[
  {
    "library": "django",
    "name": "is_counterclockwise",
    "source_code": "@property\ndef is_counterclockwise(self):\n    ret = c_byte()\n    if not capi.cs_is_ccw(self.ptr, byref(ret)):\n        raise GEOSException('Error encountered in GEOS C function \"%s\".' % capi.cs_is_ccw.func_name)\n    return ret.value == 1",
    "docstring": "Return whether this coordinate sequence is counterclockwise.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\coordseq.py",
    "ast_data": "FunctionDef name:is_counterclockwise arg:self arguments arg Assign Call If Call Call Raise Call Return return:yes Compare"
  },
  {
    "library": "scrapy",
    "name": "from_settings",
    "source_code": "def from_settings(settings):\n    pass",
    "docstring": "Return an instance of the class for the given settings",
    "type": "method",
    "file_path": "scrapy\\scrapy\\interfaces.py",
    "ast_data": "FunctionDef name:from_settings arg:settings arguments arg"
  },
  {
    "library": "scipy",
    "name": "_matvec",
    "source_code": "def _matvec(self, x):\n    x = x.reshape(self.shape[0], -1)\n    result_dtype = np.promote_types(x.dtype, self.dtype)\n    kx = np.zeros_like(x, dtype=result_dtype)\n    d1 = self._diag1\n    d0 = self._diag0\n    kx[0, :] = d0[0] * x[0, :] + d1[0] * x[1, :]\n    kx[-1, :] = d1[-1] * x[-2, :] + d0[-1] * x[-1, :]\n    kx[1:-1, :] = d1[:-1, None] * x[:-2, :] + d0[1:-1, None] * x[1:-1, :] + d1[1:, None] * x[2:, :]\n    return kx",
    "docstring": "Construct matrix-free callable banded-matrix-vector multiplication by the Mikota stiffness matrix without constructing or storing the matrix itself using the knowledge of its entries and the 3-diagonal format.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_special_sparse_arrays.py",
    "ast_data": "FunctionDef name:_matvec arg:self arg:x arguments arg arg Assign Call Assign Call Assign Call Assign Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "hilbert",
    "source_code": "def hilbert(n):\n    values = 1.0 / (1.0 + np.arange(2 * n - 1))\n    h = hankel(values[:n], r=values[n - 1:])\n    return h",
    "docstring": "Create a Hilbert matrix of order . Returns the by array with entries . Parameters ---------- n : int The size of the array to create. Returns ------- h : (n, n) ndarray The Hilbert matrix. See Also -------- invhilbert : Compute the inverse of a Hilbert matrix. Notes ----- .. versionadded:: 0.10.0 Examples -------- >>> from scipy.linalg import hilbert >>> hilbert(3) array([[ 1. , 0.5 , 0.33333333], [ 0.5 , 0.33333333, 0.25 ], [ 0.33333333, 0.25 , 0.2 ]])",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_special_matrices.py",
    "ast_data": "FunctionDef name:hilbert arg:n arguments arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "RegressionOutput",
    "source_code": "class RegressionOutput(ExportOutput):\n\n    def __init__(self, value):\n        if not (isinstance(value, tensor.Tensor) and value.dtype.is_floating):\n            raise ValueError('Regression output value must be a float32 Tensor; got {}'.format(value))\n        self._value = value\n\n    @property\n    def value(self):\n        return self._value\n\n    def as_signature_def(self, receiver_tensors):\n        if len(receiver_tensors) != 1:\n            raise ValueError(f'Regression signatures can only accept a single tensor input of type tf.string. Please check to make sure that you have structured the serving_input_receiver_fn so that it creates a single string placeholder. If your model function expects multiple inputs, then use `tf.io.parse_example()` to parse the string into multiple tensors.\\n Received: {receiver_tensors}')\n        (_, examples), = receiver_tensors.items()\n        if dtypes.as_dtype(examples.dtype) != dtypes.string:\n            raise ValueError(f'Regression signatures can only accept a single tensor input of type tf.string. Please check to make sure that you have structured the serving_input_receiver_fn so that it creates a single string placeholder. If your model function expects multiple inputs, then use `tf.io.parse_example()` to parse the string into multiple tensors.\\n Received: {receiver_tensors}')\n        return signature_def_utils.regression_signature_def(examples, self.value)",
    "docstring": "Represents the output of a regression head.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\model_utils\\export_output.py",
    "ast_data": "ClassDef name:RegressionOutput FunctionDef name:__init__ arg:self arg:value arguments arg arg If BoolOp Call Raise Call Call Assign FunctionDef name:value arg:self arguments arg Return return:yes FunctionDef name:as_signature_def arg:self arg:receiver_tensors arguments arg arg If Compare Call Raise Call Assign Call If Compare Call Raise Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_unregister_deepcopy_hook",
    "source_code": "def _unregister_deepcopy_hook(self, f):\n    assert callable(f), 'deepcopy hook must be a callable.'\n    self._deepcopy_hooks.remove(f)",
    "docstring": "Takes a callable which was previously registered to be called after deepcopy. This function will unregister that callable so it is no longer invoked on deepcopy.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\graph_module.py",
    "ast_data": "FunctionDef name:_unregister_deepcopy_hook arg:self arg:f arguments arg arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "is_embedding_node",
    "source_code": "def is_embedding_node(node: Node) -> bool:\n    if node.op == 'call_module':\n        submodule = self.graph_module\n        for atom in str(node.target).split('.'):\n            if not hasattr(submodule, atom):\n                raise RuntimeError(f'Module {submodule} has no attribute {atom}')\n            submodule = getattr(submodule, atom)\n            if 'Embedding' in str(submodule):\n                return True\n    return False",
    "docstring": "Check if a node is an embedding node",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\accelerator_partitioner.py",
    "ast_data": "FunctionDef name:is_embedding_node arg:node arguments arg If Compare Assign For Call Call If Call Raise Call Assign Call If Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_wait_for_computation_stream",
    "source_code": "def _wait_for_computation_stream(computation_stream: torch.Stream, unshard_stream: torch.Stream, pre_unshard_stream: torch.Stream):\n    if torch.distributed._functional_collectives.is_torchdynamo_compiling():\n        return\n    unshard_stream.wait_stream(computation_stream)\n    pre_unshard_stream.wait_stream(computation_stream)",
    "docstring": "Has the unshard and pre-unshard streams wait for the computation stream. For example, this should be called in the FSDP root's pre-forward to respect optimizer step computation.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_runtime_utils.py",
    "ast_data": "FunctionDef name:_wait_for_computation_stream arg:computation_stream arg:unshard_stream arg:pre_unshard_stream arguments arg arg arg If Call Return return:no Call Call"
  },
  {
    "library": "pytorch",
    "name": "_is_compiled",
    "source_code": "def _is_compiled() -> bool:\n    return hasattr(torch._C, '_cuda_getDeviceCount')",
    "docstring": "Return true if compile with CUDA support.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:_is_compiled arguments Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_bbox_to_anchor",
    "source_code": "def get_bbox_to_anchor(self):\n    if self._bbox_to_anchor is None:\n        return self.axes.bbox\n    else:\n        transform = self._bbox_to_anchor_transform\n        if transform is None:\n            return self._bbox_to_anchor\n        else:\n            return TransformedBbox(self._bbox_to_anchor, transform)",
    "docstring": "Return the bbox that the box is anchored to.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py",
    "ast_data": "FunctionDef name:get_bbox_to_anchor arg:self arguments arg If Compare Return return:yes Assign If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_record_memory_stats",
    "source_code": "@no_type_check\ndef _record_memory_stats(self, fn_name: str) -> None:\n    memory_allocated: float = torch.cuda.memory_allocated() / BYTES_PER_MB\n    memory_reserved: float = torch.cuda.memory_reserved() / BYTES_PER_MB\n    memory_active: float = torch.cuda.memory_stats().get('active_bytes.all.current', 0) / BYTES_PER_MB\n    self.memories_allocated[self._op_index] = (fn_name, memory_allocated)\n    self.memories_reserved[self._op_index] = (fn_name, memory_reserved)\n    self.memories_active[self._op_index] = (fn_name, memory_active)\n    self._op_index += 1",
    "docstring": "Record current memory allocated, current memory active and current memory reserved. The memory stats dict is indexed with ``.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_tools\\memory_tracker.py",
    "ast_data": "FunctionDef name:_record_memory_stats arg:self arg:fn_name arguments arg arg Call Call Call Call Assign Assign Assign"
  },
  {
    "library": "django",
    "name": "__eq__",
    "source_code": "def __eq__(self, other):\n    return isinstance(other, OGRGeometry) and self.equals(other)",
    "docstring": "Is this Geometry equal to the other?",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes BoolOp Call Call"
  },
  {
    "library": "pytorch",
    "name": "named_parameters",
    "source_code": "def named_parameters(self, prefix: str='', recurse: bool=True, remove_duplicate: bool=True) -> Iterator[tuple[str, Parameter]]:\n    gen = self._named_members(lambda module: module._parameters.items(), prefix=prefix, recurse=recurse, remove_duplicate=remove_duplicate)\n    yield from gen",
    "docstring": "Return an iterator over module parameters, yielding both the name of the parameter as well as the parameter itself. Args: prefix (str): prefix to prepend to all parameter names. recurse (bool): if True, then yields parameters of this module and all submodules. Otherwise, yields only parameters that are direct members of this module. remove_duplicate (bool, optional): whether to remove the duplicated parameters in the result. Defaults to True. Yields: (str, Parameter): Tuple containing the name and parameter Example:: >>> # xdoctest: +SKIP(\"undefined vars\") >>> for name, param in self.named_parameters(): >>> if name in ['bias']: >>> print(param.size())",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:named_parameters arg:self arg:prefix arg:recurse arg:remove_duplicate arguments arg arg arg arg Assign Call arguments arg Call"
  },
  {
    "library": "scipy",
    "name": "solve_bdf_system",
    "source_code": "def solve_bdf_system(fun, t_new, y_predict, c, psi, LU, solve_lu, scale, tol):\n    d = 0\n    y = y_predict.copy()\n    dy_norm_old = None\n    converged = False\n    for k in range(NEWTON_MAXITER):\n        f = fun(t_new, y)\n        if not np.all(np.isfinite(f)):\n            break\n        dy = solve_lu(LU, c * f - psi - d)\n        dy_norm = norm(dy / scale)\n        if dy_norm_old is None:\n            rate = None\n        else:\n            rate = dy_norm / dy_norm_old\n        if rate is not None and (rate >= 1 or rate ** (NEWTON_MAXITER - k) / (1 - rate) * dy_norm > tol):\n            break\n        y += dy\n        d += dy\n        if dy_norm == 0 or (rate is not None and rate / (1 - rate) * dy_norm < tol):\n            converged = True\n            break\n        dy_norm_old = dy_norm\n    return (converged, k + 1, y, d)",
    "docstring": "Solve the algebraic system resulting from BDF method.",
    "type": "function",
    "file_path": "scipy\\scipy\\integrate\\_ivp\\bdf.py",
    "ast_data": "FunctionDef name:solve_bdf_system arg:fun arg:t_new arg:y_predict arg:c arg:psi arg:LU arg:solve_lu arg:scale arg:tol arguments arg arg arg arg arg arg arg arg arg Assign Assign Call Assign Assign For Call Assign Call If Call Call Assign Call Assign Call If Compare Assign Assign If BoolOp Compare BoolOp Compare Compare If BoolOp Compare BoolOp Compare Compare Assign Assign Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "expired",
    "source_code": "def expired(self):\n    if self.timer.expired():\n        raise LockTimeout('Timeout acquiring lock for %(session_id)s' % vars(self))\n    return False",
    "docstring": "Check whether the lock checker has expired.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\locking.py",
    "ast_data": "FunctionDef name:expired arg:self arguments arg If Call Raise Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "SimplifyIndexing",
    "source_code": "class SimplifyIndexing(V.WrapperHandler):\n\n    def __init__(self, inner, var_ranges: VarRanges) -> None:\n        super().__init__(inner)\n        self.name = 'SimplifyIndexing'\n        self._simplify: Callable[[Expr], Expr] = lambda index: V.graph.sizevars.simplify_with_ranges(index, var_ranges)\n\n    def load(self, name: str, index: sympy.Expr):\n        return self._inner.load(name, self._simplify(index))\n\n    def store(self, name, index, value, mode=None):\n        return self._inner.store(name, self._simplify(index), value, mode=mode)\n\n    def store_reduction(self, name, index, value):\n        return self._inner.store_reduction(name, self._simplify(index), value)\n\n    def index_expr(self, index, dtype):\n        return self._inner.index_expr(self._simplify(index), dtype)\n\n    def check_bounds(self, index, size, lower, upper):\n        return self._inner.check_bounds(self._simplify(index), size, lower, upper)",
    "docstring": "A wrapper around .virtualize.ops that uses var range information to simplify ModularIndexing/FloorDiv.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\sizevars.py",
    "ast_data": "ClassDef name:SimplifyIndexing FunctionDef name:__init__ arg:self arg:inner arg:var_ranges arguments arg arg arg Call Call Assign arguments arg Call FunctionDef name:load arg:self arg:name arg:index arguments arg arg arg Return return:yes Call Call FunctionDef name:store arg:self arg:name arg:index arg:value arg:mode arguments arg arg arg arg arg Return return:yes Call Call FunctionDef name:store_reduction arg:self arg:name arg:index arg:value arguments arg arg arg arg Return return:yes Call Call FunctionDef name:index_expr arg:self arg:index arg:dtype arguments arg arg arg Return return:yes Call Call FunctionDef name:check_bounds arg:self arg:index arg:size arg:lower arg:upper arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_boxstyle",
    "source_code": "@_docstring.interpd\ndef set_boxstyle(self, boxstyle=None, **kwargs):\n    if boxstyle is None:\n        return BoxStyle.pprint_styles()\n    self._bbox_transmuter = BoxStyle(boxstyle, **kwargs) if isinstance(boxstyle, str) else boxstyle\n    self.stale = True",
    "docstring": "Set the box style, possibly with further attributes. Attributes from the previous box style are not reused. Without argument (or with `~matplotlib.patches.BoxStyle.BoxStyle.BoxStyle` object, as documented in that class. The following box styles are available: %(BoxStyle:table_and_accepts)s **kwargs Additional attributes for the box style. See the table above for supported parameters. Examples -------- :: set_boxstyle(\"Round,pad=0.2\") set_boxstyle(\"round\", pad=0.2)",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:set_boxstyle arg:self arg:boxstyle arguments arg arg arg If Compare Return return:yes Call Assign Call Call Assign"
  },
  {
    "library": "authlib",
    "name": "validate_ui_locales_supported",
    "source_code": "def validate_ui_locales_supported(self):\n    validate_array_value(self, 'ui_locales_supported')",
    "docstring": "OPTIONAL. Languages and scripts supported for the user interface, represented as a JSON array of language tag values from BCP 47 [RFC5646]. If omitted, the set of supported languages and scripts is unspecified.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc8414\\models.py",
    "ast_data": "FunctionDef name:validate_ui_locales_supported arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_BatchGatherGrad",
    "source_code": "def _BatchGatherGrad(params_shape, values, indices, batch_dims, gather_dim_size):\n    indices_size = array_ops.expand_dims(array_ops.size(indices), 0)\n    if batch_dims:\n        values_shape = array_ops.shape(values)\n        outer_shape = values_shape[:batch_dims]\n        inner_shape = values_shape[batch_dims:][1:]\n        batch_size = gen_math_ops.prod(outer_shape, [0], False)\n        flat_values_shape = array_ops.concat([[-1], inner_shape], 0)\n        gather_dim_size *= batch_size\n        indices = _GetBatchIndices(params_shape, indices, batch_dims)\n        values = array_ops.reshape(_IndexedSlicesToTensorNoWarning(values), flat_values_shape)\n    indices = array_ops.reshape(indices, indices_size)\n    params_grad = math_ops.unsorted_segment_sum(values, indices, gather_dim_size)\n    if batch_dims:\n        params_grad = array_ops.reshape(params_grad, array_ops.concat([outer_shape, flat_values_shape], 0))\n    return params_grad",
    "docstring": "Returns the gradient of GatherV2 with batch dimensions.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_grad.py",
    "ast_data": "FunctionDef name:_BatchGatherGrad arg:params_shape arg:values arg:indices arg:batch_dims arg:gather_dim_size arguments arg arg arg arg arg Assign Call Call If Assign Call Assign Assign Assign Call Assign Call Assign Call Assign Call Call Assign Call Assign Call If Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "MutationAwareDict",
    "source_code": "class MutationAwareDict(py_collections.OrderedDict):\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._mutated = True\n\n    def pop(self, key, default=None):\n        self._mutated = True\n        return super().pop(key, default)\n\n    def __setitem__(self, key, value):\n        self._mutated = True\n        return super().__setitem__(key, value)\n\n    def __delitem__(self, key):\n        self._mutated = True\n        return super().__delitem__(key)\n\n    def clear(self):\n        self._mutated = True\n        return super().clear()\n\n    @property\n    def mutated(self):\n        return self._mutated\n\n    @mutated.setter\n    def mutated(self, value):\n        self._mutated = value",
    "docstring": "A dict with a mutation flag.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\core\\function\\capture\\capture_container.py",
    "ast_data": "ClassDef name:MutationAwareDict FunctionDef name:__init__ arg:self arguments arg arg arg Call Call Assign FunctionDef name:pop arg:self arg:key arg:default arguments arg arg arg Assign Return return:yes Call Call FunctionDef name:__setitem__ arg:self arg:key arg:value arguments arg arg arg Assign Return return:yes Call Call FunctionDef name:__delitem__ arg:self arg:key arguments arg arg Assign Return return:yes Call Call FunctionDef name:clear arg:self arguments arg Assign Return return:yes Call Call FunctionDef name:mutated arg:self arguments arg Return return:yes FunctionDef name:mutated arg:self arg:value arguments arg arg Assign"
  },
  {
    "library": "kornia",
    "name": "rotation",
    "source_code": "@property\ndef rotation(self) -> So3 | So2:\n    return self._dst_from_src.rotation",
    "docstring": "Rotation part of the pose.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\pose.py",
    "ast_data": "FunctionDef name:rotation arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "HessiansV2",
    "source_code": "@tf_export('hessians', v1=[])\ndef HessiansV2(ys, xs, gate_gradients=False, aggregation_method=None, name='hessians'):\n    return hessians(ys, xs, name=name, colocate_gradients_with_ops=True, gate_gradients=gate_gradients, aggregation_method=aggregation_method)",
    "docstring": "Constructs the Hessian of sum of with respect to in . adds ops to the graph to output the Hessian matrix of with respect to . It returns a list of of length where each tensor is the Hessian of . The Hessian is a matrix of second-order partial derivatives of a scalar tensor (see for more details). Args: ys: A or list of tensors to be differentiated. xs: A or list of tensors to be used for differentiation. gate_gradients: See documentation for details. aggregation_method: See documentation for details. name: Optional name to use for grouping all the gradient ops together. defaults to 'hessians'. Returns: A list of Hessian matrices of for each in . Raises: LookupError: if one of the operations between and does not have a registered gradient function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\gradients_impl.py",
    "ast_data": "FunctionDef name:HessiansV2 arg:ys arg:xs arg:gate_gradients arg:aggregation_method arg:name arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "PostgresOperatorLookup",
    "source_code": "class PostgresOperatorLookup(Lookup):\n    postgres_operator = None\n\n    def as_postgresql(self, compiler, connection):\n        lhs, lhs_params = self.process_lhs(compiler, connection)\n        rhs, rhs_params = self.process_rhs(compiler, connection)\n        params = tuple(lhs_params) + tuple(rhs_params)\n        return ('%s %s %s' % (lhs, self.postgres_operator, rhs), params)",
    "docstring": "Lookup defined by operators on PostgreSQL.",
    "type": "class",
    "file_path": "django\\django\\db\\models\\lookups.py",
    "ast_data": "ClassDef name:PostgresOperatorLookup Assign FunctionDef name:as_postgresql arg:self arg:compiler arg:connection arguments arg arg arg Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "get_metadata_routing",
    "source_code": "def get_metadata_routing(self):\n    router = MetadataRouter(owner=self.__class__.__name__).add_self_request(self).add(scorer=self.scoring, method_mapping=MethodMapping().add(caller='fit', callee='score')).add(splitter=self.cv, method_mapping=MethodMapping().add(caller='fit', callee='split'))\n    return router",
    "docstring": "Get metadata routing of this object. Please check :ref: on how the routing mechanism works. .. versionadded:: 1.5 Returns ------- routing : MetadataRouter A :class: encapsulating routing information.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_ridge.py",
    "ast_data": "FunctionDef name:get_metadata_routing arg:self arguments arg Assign Call Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_registered",
    "source_code": "def is_registered(self, prefix):\n    return self._resolve_prefix(prefix) is not None",
    "docstring": "Test if a command prefix or its alias is has a registered handler. Args: prefix: A prefix or its alias, as a str. Returns: True iff a handler is registered for prefix.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py",
    "ast_data": "FunctionDef name:is_registered arg:self arg:prefix arguments arg arg Return return:yes Compare Call"
  },
  {
    "library": "kornia",
    "name": "__dir__",
    "source_code": "def __dir__(self) -> List[str]:\n    self._load()\n    return dir(self.module)",
    "docstring": "Load the module (if not already loaded) and returns the list of attributes of the module. This method is called when the built-in dir() function is used on the LazyLoader instance. It ensures that the module is loaded and then returns the list of attributes of the module. Returns: list: The list of attributes of the loaded module.",
    "type": "method",
    "file_path": "kornia\\kornia\\core\\external.py",
    "ast_data": "FunctionDef name:__dir__ arg:self arguments arg Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "require_length_match",
    "source_code": "def require_length_match(data, index: Index) -> None:\n    if len(data) != len(index):\n        raise ValueError(f'Length of values ({len(data)}) does not match length of index ({len(index)})')",
    "docstring": "Check the length of data matches the length of the index.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\common.py",
    "ast_data": "FunctionDef name:require_length_match arg:data arg:index arguments arg arg If Compare Call Call Raise Call Call Call"
  },
  {
    "library": "pandas",
    "name": "render_pep440_post_branch",
    "source_code": "def render_pep440_post_branch(pieces):\n    if pieces['closest-tag']:\n        rendered = pieces['closest-tag']\n        if pieces['distance'] or pieces['dirty']:\n            rendered += f'.post{pieces['distance']}'\n            if pieces['branch'] != 'master':\n                rendered += '.dev0'\n            rendered += plus_or_dot(pieces)\n            rendered += f'g{pieces['short']}'\n            if pieces['dirty']:\n                rendered += '.dirty'\n    else:\n        rendered = f'0.post{pieces['distance']}'\n        if pieces['branch'] != 'master':\n            rendered += '.dev0'\n        rendered += f'+g{pieces['short']}'\n        if pieces['dirty']:\n            rendered += '.dirty'\n    return rendered",
    "docstring": "TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] . The \".dev0\" means not master branch. Exceptions: 1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty]",
    "type": "function",
    "file_path": "pandas\\pandas\\_version.py",
    "ast_data": "FunctionDef name:render_pep440_post_branch arg:pieces arguments arg If Assign If BoolOp If Compare Call If Assign If Compare If Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_debug_quantized_model",
    "source_code": "def get_debug_quantized_model(self) -> bytes:\n    return self._get_quantized_model(is_debug=True)",
    "docstring": "Returns an instrumented quantized model. Convert the quantized model with the initialized converter and return bytes for model. The model will be instrumented with numeric verification operations and should only be used for debugging. Returns: Model bytes corresponding to the model. Raises: ValueError: if converter is not passed to the debugger.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\tools\\optimize\\debugging\\python\\debugger.py",
    "ast_data": "FunctionDef name:get_debug_quantized_model arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_format_native_types",
    "source_code": "def _format_native_types(self, *, na_rep: str | float='NaT', date_format=None, **kwargs) -> npt.NDArray[np.object_]:\n    return libperiod.period_array_strftime(self.asi8, self.dtype._dtype_code, na_rep, date_format)",
    "docstring": "actually format my specific types",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\period.py",
    "ast_data": "FunctionDef name:_format_native_types arg:self arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_validate_dialect",
    "source_code": "def _validate_dialect(dialect: csv.Dialect) -> None:\n    for param in MANDATORY_DIALECT_ATTRS:\n        if not hasattr(dialect, param):\n            raise ValueError(f'Invalid dialect {dialect} provided')",
    "docstring": "Validate csv dialect instance. Raises ------ ValueError If incorrect dialect is provided.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\parsers\\readers.py",
    "ast_data": "FunctionDef name:_validate_dialect arg:dialect arguments arg For If Call Raise Call"
  },
  {
    "library": "kornia",
    "name": "adjoint",
    "source_code": "def adjoint(self) -> Tensor:\n    rt = self.matrix()\n    rt[..., 0:2, 2] = stack((self.t.data[..., 1], -self.t.data[..., 0]), -1)\n    return rt",
    "docstring": "Return the adjoint matrix of shape :math:. Example: >>> s = Se2.identity() >>> s.adjoint() tensor([[1., -0., 0.], [0., 1., -0.], [0., 0., 1.]], grad_fn=)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\se2.py",
    "ast_data": "FunctionDef name:adjoint arg:self arguments arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, compression_type=None, flush_mode=None, input_buffer_size=None, output_buffer_size=None, window_bits=None, compression_level=None, compression_method=None, mem_level=None, compression_strategy=None):\n    self.get_compression_type_string(compression_type)\n    self.compression_type = compression_type\n    self.flush_mode = flush_mode\n    self.input_buffer_size = input_buffer_size\n    self.output_buffer_size = output_buffer_size\n    self.window_bits = window_bits\n    self.compression_level = compression_level\n    self.compression_method = compression_method\n    self.mem_level = mem_level\n    self.compression_strategy = compression_strategy",
    "docstring": "Creates a instance. Options only effect TFRecordWriter when compression_type is not . Documentation, details, and defaults can be found in []( and in the [zlib manual]( Leaving an option as allows C++ to set a reasonable default. Args: compression_type: , , or (no compression). flush_mode: flush mode or , Default: Z_NO_FLUSH. input_buffer_size: int or . output_buffer_size: int or . window_bits: int or . compression_level: 0 to 9, or . compression_method: compression method or . mem_level: 1 to 9, or . compression_strategy: strategy or . Default: Z_DEFAULT_STRATEGY. Returns: A object. Raises: ValueError: If compression_type is invalid.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\tf_record.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:compression_type arg:flush_mode arg:input_buffer_size arg:output_buffer_size arg:window_bits arg:compression_level arg:compression_method arg:mem_level arg:compression_strategy arguments arg arg arg arg arg arg arg arg arg arg Call Assign Assign Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "peek_top_obj",
    "source_code": "def peek_top_obj(self) -> T:\n    return self._stack[-1].obj",
    "docstring": "Return the most recent stored object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\traceable_stack.py",
    "ast_data": "FunctionDef name:peek_top_obj arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "inner_shape",
    "source_code": "@property\ndef inner_shape(self):\n    return self._inner_shape",
    "docstring": "The inner dimension sizes for this shape. Returns: A 1-D integer .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:inner_shape arg:self arguments arg Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "_nested_offsets",
    "source_code": "def _nested_offsets(self, width, dodge):\n    offsets = None\n    if 'hue' in self.variables and self._hue_map.levels is not None:\n        n_levels = len(self._hue_map.levels)\n        if dodge:\n            each_width = width / n_levels\n            offsets = np.linspace(0, width - each_width, n_levels)\n            offsets -= offsets.mean()\n        else:\n            offsets = np.zeros(n_levels)\n    return offsets",
    "docstring": "Return offsets for each hue level for dodged plots.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\categorical.py",
    "ast_data": "FunctionDef name:_nested_offsets arg:self arg:width arg:dodge arguments arg arg arg Assign If BoolOp Compare Compare Assign Call If Assign Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "validate_token_endpoint_auth_methods_supported",
    "source_code": "def validate_token_endpoint_auth_methods_supported(self):\n    validate_array_value(self, 'token_endpoint_auth_methods_supported')",
    "docstring": "OPTIONAL. JSON array containing a list of client authentication methods supported by this token endpoint. Client authentication method values are used in the \"token_endpoint_auth_method\" parameter defined in Section 2 of [RFC7591]. If omitted, the default is \"client_secret_basic\" -- the HTTP Basic Authentication Scheme specified in Section 2.3.1 of OAuth 2.0 [RFC6749].",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc8414\\models.py",
    "ast_data": "FunctionDef name:validate_token_endpoint_auth_methods_supported arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_ifft",
    "source_code": "def _ifft(self, x):\n    x_complex = _to_complex(x)\n    return _IFFT_OP[self.block_depth](x_complex)",
    "docstring": "IFFT along the last self.block_depth dimensions of x. Args: x: with floating or complex dtype. Should be in the form returned by self._vectorize_then_blockify. Returns: with .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_circulant.py",
    "ast_data": "FunctionDef name:_ifft arg:self arg:x arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "abort_collective_ops",
    "source_code": "def abort_collective_ops(self, code, message):\n    self.ensure_initialized()\n    pywrap_tfe.TFE_AbortCollectiveOps(self._handle, code, message)",
    "docstring": "Abort the collective ops. This is intended to be used when a peer failure is detected, which allows the user to handle the case instead of hanging. This aborts all on-going collectives. After all subsequent collectives error immediately, and you need to reset_context() to use collectives again. Args: code: a error code. message: a string. The error message.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:abort_collective_ops arg:self arg:code arg:message arguments arg arg arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "complex_double",
    "source_code": "def complex_double(self):\n    _warn_typed_storage_removal()\n    return self._to(torch.cdouble)",
    "docstring": "Casts this storage to complex double type.",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:complex_double arg:self arguments arg Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "memory_efficient_fusion",
    "source_code": "def memory_efficient_fusion(fn: Union[Callable, nn.Module], **kwargs):\n    config = {'fw_compiler': ts_compile, 'bw_compiler': ts_compile, 'partition_fn': min_cut_rematerialization_partition, 'decompositions': default_decompositions}\n    config.update(kwargs)\n    if isinstance(fn, torch.nn.Module):\n        return aot_module(fn, **config)\n    else:\n        return aot_function(fn, **config)",
    "docstring": "Wrapper function over :func: and :func: to perform memory efficient fusion. It uses the :func: partitioner to perform efficient recomputation. It uses NVFuser to compile the generated forward and backward graphs. .. warning:: This API is experimental and likely to change. Args: fn (Union[Callable, nn.Module]): A Python function or a `fn`, but whose forward and backward graphs have gone through recomputation optimizations, and the graphs have been compiled with nvfuser.",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\compilers.py",
    "ast_data": "FunctionDef name:memory_efficient_fusion arg:fn arguments arg arg Assign Call If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "do_if",
    "source_code": "@register.tag('if')\ndef do_if(parser, token):\n    bits = token.split_contents()[1:]\n    condition = TemplateIfParser(parser, bits).parse()\n    nodelist = parser.parse(('elif', 'else', 'endif'))\n    conditions_nodelists = [(condition, nodelist)]\n    token = parser.next_token()\n    while token.contents.startswith('elif'):\n        bits = token.split_contents()[1:]\n        condition = TemplateIfParser(parser, bits).parse()\n        nodelist = parser.parse(('elif', 'else', 'endif'))\n        conditions_nodelists.append((condition, nodelist))\n        token = parser.next_token()\n    if token.contents == 'else':\n        nodelist = parser.parse(('endif',))\n        conditions_nodelists.append((None, nodelist))\n        token = parser.next_token()\n    if token.contents != 'endif':\n        raise TemplateSyntaxError('Malformed template tag at line {}: \"{}\"'.format(token.lineno, token.contents))\n    return IfNode(conditions_nodelists)",
    "docstring": "Evaluate a variable, and if that variable is \"true\" (i.e., exists, is not empty, and is not a false boolean value), output the contents of the block: :: {% if athlete_list %} Number of athletes: {{ athlete_list|count }} {% elif athlete_in_locker_room_list %} Athletes should be out of the locker room soon! {% else %} No athletes. {% endif %} In the above, if ``. Operator precedence follows Python.",
    "type": "function",
    "file_path": "django\\django\\template\\defaulttags.py",
    "ast_data": "FunctionDef name:do_if arg:parser arg:token arguments arg arg Assign Call Assign Call Call Assign Call Assign Assign Call While Call Assign Call Assign Call Call Assign Call Call Assign Call If Compare Assign Call Call Assign Call If Compare Raise Call Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "auth_name",
    "source_code": "def auth_name(self, target):\n    return capi.get_auth_name(self.ptr, target if target is None else force_bytes(target))",
    "docstring": "Return the authority name for the given string target node.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\srs.py",
    "ast_data": "FunctionDef name:auth_name arg:self arg:target arguments arg arg Return return:yes Call Compare Call"
  },
  {
    "library": "cherrypy",
    "name": "readline",
    "source_code": "def readline(self, size=None):\n    chunks = []\n    while size is None or size > 0:\n        chunksize = self.bufsize\n        if size is not None and size < self.bufsize:\n            chunksize = size\n        data = self.read(chunksize)\n        if not data:\n            break\n        pos = data.find(b'\\n') + 1\n        if pos:\n            chunks.append(data[:pos])\n            remainder = data[pos:]\n            self.buffer += remainder\n            self.bytes_read -= len(remainder)\n            break\n        else:\n            chunks.append(data)\n    return b''.join(chunks)",
    "docstring": "Read a line from the request body and return it.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpreqbody.py",
    "ast_data": "FunctionDef name:readline arg:self arg:size arguments arg arg Assign While BoolOp Compare Compare Assign If BoolOp Compare Compare Assign Assign Call If Assign Call If Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_get_lowers_and_uppers",
    "source_code": "def _get_lowers_and_uppers(self):\n    lowers = self._levels[:-1]\n    if self.zmin == lowers[0]:\n        lowers = lowers.copy()\n        if self.logscale:\n            lowers[0] = 0.99 * self.zmin\n        else:\n            lowers[0] -= 1\n    uppers = self._levels[1:]\n    return (lowers, uppers)",
    "docstring": "Return `` for filled contours.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\contour.py",
    "ast_data": "FunctionDef name:_get_lowers_and_uppers arg:self arguments arg Assign If Compare Assign Call If Assign Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_under",
    "source_code": "def get_under(self):\n    if not self._isinit:\n        self._init()\n    return np.array(self._lut[self._i_under])",
    "docstring": "Get the color for low out-of-range values.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:get_under arg:self arguments arg If Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "can_filter",
    "source_code": "def can_filter(self):\n    return not self.is_sliced",
    "docstring": "Return True if adding filters to this instance is still possible. Typically, this means no limits or offsets have been put on the results.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\query.py",
    "ast_data": "FunctionDef name:can_filter arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "read",
    "source_code": "def read(self, index, name=None):\n    del name\n    if isinstance(index, ops.EagerTensor):\n        index = index.numpy()\n    if index < 0:\n        raise errors_impl.OutOfRangeError(None, None, 'Reading from negative indices (index %d) is not allowed.' % index)\n    if index >= len(self._tensor_array):\n        raise errors_impl.OutOfRangeError(None, None, 'Tried to read from index %d but array size is: %d ' % (index, len(self._tensor_array)))\n    tensor = self._tensor_array[index]\n    if tensor is None:\n        if index in self._previously_read_indices:\n            raise errors_impl.InvalidArgumentError(None, None, 'Could not read index %d twice because it was cleared after a previous read (perhaps try setting clear_after_read = false?)' % index)\n        else:\n            tensor = self._maybe_zero(index)\n    if self._clear_after_read:\n        self._tensor_array[index] = None\n        self._previously_read_indices.append(index)\n    return tensor",
    "docstring": "See TensorArray.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:read arg:self arg:index arg:name arguments arg arg arg If Call Assign Call If Compare Raise Call If Compare Call Raise Call Call Assign If Compare If Compare Raise Call Assign Call If Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "floor_to_int",
    "source_code": "def floor_to_int(self, x: T, dtype: torch.dtype) -> T:\n    raise NotImplementedError",
    "docstring": "Convert x to dtype with ceiling semantics. See also trunc_to_int.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ops_handler.py",
    "ast_data": "FunctionDef name:floor_to_int arg:self arg:x arg:dtype arguments arg arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "create_dummy_tensor",
    "source_code": "def create_dummy_tensor(spec):\n    if hasattr(spec, '_create_empty_value'):\n        return spec._create_empty_value()\n    if isinstance(spec, ragged_tensor.RaggedTensorSpec):\n        feature_shape = spec._shape[:1].concatenate(spec._shape[1 + spec._ragged_rank:])\n        feature_type = spec._dtype\n    else:\n        feature_shape = spec.shape\n        feature_type = spec.dtype\n    dims = [dim if dim is not None else 0 for dim in feature_shape.as_list()] if feature_shape else []\n    if dims and (isinstance(spec, ragged_tensor.RaggedTensorSpec) or feature_shape.is_fully_defined()):\n        dims[0] = tensor_shape.Dimension(0)\n    if isinstance(spec, sparse_tensor.SparseTensorSpec):\n        return sparse_tensor.SparseTensor(values=array_ops.zeros(0, feature_type), indices=array_ops.zeros((0, len(dims)), dtypes.int64), dense_shape=dims)\n    dummy_tensor = array_ops.zeros(tensor_shape.TensorShape(dims), feature_type)\n    if isinstance(spec, ragged_tensor.RaggedTensorSpec):\n        row_splits = array_ops.zeros(1, spec._row_splits_dtype)\n        dummy_tensor = ragged_tensor.RaggedTensor.from_nested_row_splits(dummy_tensor, (row_splits,) * spec._ragged_rank, validate=False)\n    return dummy_tensor",
    "docstring": "Create a dummy tensor with possible batch dimensions set to 0.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py",
    "ast_data": "FunctionDef name:create_dummy_tensor arg:spec arguments arg If Call Return return:yes Call If Call Assign Call Assign Assign Assign Assign Compare Call If BoolOp BoolOp Call Call Assign Call If Call Return return:yes Call Call Call Call Assign Call Call If Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "diagonal",
    "source_code": "@array_function_dispatch(_diagonal_dispatcher)\ndef diagonal(x, /, *, offset=0):\n    return _core_diagonal(x, offset, axis1=-2, axis2=-1)",
    "docstring": "Returns specified diagonals of a matrix (or a stack of matrices) `numpy.diagonaloffsetnumpy.flipudnumpy.fliplr`. >>> a = np.arange(9).reshape(3, 3) >>> a array([[0, 1, 2], [3, 4, 5], [6, 7, 8]]) >>> np.linalg.diagonal(np.fliplr(a)) # Horizontal flip array([2, 4, 6]) >>> np.linalg.diagonal(np.flipud(a)) # Vertical flip array([6, 4, 2]) Note that the order in which the diagonal is retrieved varies depending on the flip function.",
    "type": "function",
    "file_path": "numpy\\numpy\\linalg\\_linalg.py",
    "ast_data": "FunctionDef name:diagonal arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "authlib",
    "name": "validate_default_acr_values",
    "source_code": "def validate_default_acr_values(self):\n    self._validate_claim_value('default_acr_values')",
    "docstring": "Default requested Authentication Context Class Reference values. Array of strings that specifies the default acr values that the OP is being requested to use for processing requests from this Client, with the values appearing in order of preference. The Authentication Context Class satisfied by the authentication performed is returned as the acr Claim Value in the issued ID Token. The acr Claim is requested as a Voluntary Claim by this parameter. The acr_values_supported discovery element contains a list of the supported acr values supported by the OP. Values specified in the acr_values request parameter or an individual acr Claim request override these default values.",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\registration\\claims.py",
    "ast_data": "FunctionDef name:validate_default_acr_values arg:self arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "list_options",
    "source_code": "def list_options() -> list[str]:\n    from torch._inductor import config\n    current_config: dict[str, Any] = config.get_config_copy()\n    return list(current_config.keys())",
    "docstring": "Returns a dictionary describing the optimizations and debug configurations that are available to . The options are documented in . Example:: >>> torch._inductor.list_options()",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\__init__.py",
    "ast_data": "FunctionDef name:list_options arguments Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_get_dist_to_box",
    "source_code": "def _get_dist_to_box(self, rotation, x0, y0, figure_box):\n    if rotation > 270:\n        quad = rotation - 270\n        h1 = (y0 - figure_box.y0) / math.cos(math.radians(quad))\n        h2 = (figure_box.x1 - x0) / math.cos(math.radians(90 - quad))\n    elif rotation > 180:\n        quad = rotation - 180\n        h1 = (x0 - figure_box.x0) / math.cos(math.radians(quad))\n        h2 = (y0 - figure_box.y0) / math.cos(math.radians(90 - quad))\n    elif rotation > 90:\n        quad = rotation - 90\n        h1 = (figure_box.y1 - y0) / math.cos(math.radians(quad))\n        h2 = (x0 - figure_box.x0) / math.cos(math.radians(90 - quad))\n    else:\n        h1 = (figure_box.x1 - x0) / math.cos(math.radians(rotation))\n        h2 = (figure_box.y1 - y0) / math.cos(math.radians(90 - rotation))\n    return min(h1, h2)",
    "docstring": "Return the distance from the given points to the boundaries of a rotated box, in pixels.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:_get_dist_to_box arg:self arg:rotation arg:x0 arg:y0 arg:figure_box arguments arg arg arg arg arg If Compare Assign Assign Call Call Assign Call Call If Compare Assign Assign Call Call Assign Call Call If Compare Assign Assign Call Call Assign Call Call Assign Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "__ne__",
    "source_code": "def __ne__(self, other):\n    return self._comparison(other, operator.ne)",
    "docstring": "Check whether other does not equal self elementwise. When either of the elements is masked, the result is masked as well, but the underlying boolean data are still set, with self and other considered equal if both are masked, and unequal otherwise. For structured arrays, all fields are combined, with masked values ignored. The result is masked if all fields were masked, with self and other considered equal only if both were fully masked.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:__ne__ arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_log_weights",
    "source_code": "def _log_weights(self, epoch):\n    with self._train_writer.as_default():\n        with summary_ops_v2.record_if(True):\n            for layer in self.model.layers:\n                for weight in layer.weights:\n                    weight_name = weight.name.replace(':', '_')\n                    summary_ops_v2.histogram(weight_name, weight, step=epoch)\n                    if self.write_images:\n                        self._log_weight_as_image(weight, weight_name, epoch)\n            self._train_writer.flush()",
    "docstring": "Logs the weights of the Model to TensorBoard.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:_log_weights arg:self arg:epoch arguments arg arg With Call With Call For For Assign Call Call If Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_ops_details",
    "source_code": "def _get_ops_details(self):\n    return [self._get_op_details(idx) for idx in range(self._interpreter.NumNodes())]",
    "docstring": "Gets op details for every node. Returns: A list of dictionaries containing arrays with lists of tensor ids for tensors involved in the op.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\interpreter.py",
    "ast_data": "FunctionDef name:_get_ops_details arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "special_ortho_group_gen",
    "source_code": "class special_ortho_group_gen(multi_rv_generic):\n\n    def __init__(self, seed=None):\n        super().__init__(seed)\n        self.__doc__ = doccer.docformat(self.__doc__)\n\n    def __call__(self, dim=None, seed=None):\n        return special_ortho_group_frozen(dim, seed=seed)\n\n    def _process_parameters(self, dim):\n        if dim is None or not np.isscalar(dim) or dim < 0 or (dim != int(dim)):\n            raise ValueError('Dimension of rotation must be specified,\\n                                and must be a scalar nonnegative integer.')\n        return dim\n\n    def rvs(self, dim, size=1, random_state=None):\n        random_state = self._get_random_state(random_state)\n        q = ortho_group.rvs(dim, size, random_state)\n        dets = np.linalg.det(q)\n        if dim:\n            q[..., 0, :] /= dets[..., np.newaxis]\n        return q",
    "docstring": "A Special Orthogonal matrix (SO(N)) random variable. Return a random rotation matrix, drawn from the Haar distribution (the only uniform distribution on SO(N)) with a determinant of +1. The keyword specifies the dimension N. Methods ------- rvs(dim=None, size=1, random_state=None) Draw random samples from SO(N). Parameters ---------- dim : scalar Dimension of matrices seed : {None, int, np.random.RandomState, np.random.Generator}, optional Used for drawing random variates. If is , the singleton is used. If is an int, a new `seedNoneortho_groupscipy.spatial.transform.Rotation.randomdim` parameter, returning a \"frozen\" special_ortho_group random variable: >>> rv = special_ortho_group(5) >>> # Frozen object with the same methods but holding the >>> # dimension parameter fixed. See Also -------- ortho_group, scipy.spatial.transform.Rotation.random",
    "type": "class",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "ClassDef name:special_ortho_group_gen FunctionDef name:__init__ arg:self arg:seed arguments arg arg Call Call Assign Call FunctionDef name:__call__ arg:self arg:dim arg:seed arguments arg arg arg Return return:yes Call FunctionDef name:_process_parameters arg:self arg:dim arguments arg arg If BoolOp Compare Call Compare Compare Call Raise Call Return return:yes FunctionDef name:rvs arg:self arg:dim arg:size arg:random_state arguments arg arg arg arg Assign Call Assign Call Assign Call If Return return:yes"
  },
  {
    "library": "pygame",
    "name": "pixels_blue",
    "source_code": "def pixels_blue(surface):\n    return numpy.array(surface.get_view('B'), copy=False)",
    "docstring": "pygame.surfarray.pixels_blue(Surface): return array Reference pixel blue into a 2d array. Create a new 2D array that directly references the blue values in a Surface. Any changes to the array will affect the pixels in the Surface. This is a fast operation since no data is copied. This can only work on 24-bit or 32-bit Surfaces. The Surface this array references will remain locked for the lifetime of the array.",
    "type": "function",
    "file_path": "pygame\\src_py\\surfarray.py",
    "ast_data": "FunctionDef name:pixels_blue arg:surface arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "codegen_cooperative_reduction_peer_combine",
    "source_code": "def codegen_cooperative_reduction_peer_combine(self, result_var, dtype, default_val):\n    xnumel = self.numels['x']\n    mask = 'xindex < xnumel' if not self._has_constant_xmask() else None\n    nbytes = xnumel * dtype.itemsize * self.max_rsplit()\n    ws_name, ws_offset = self.cooperative_reduction_workspace_cache.allocate(nbytes)\n    self.post_loop_combine.splice(f'\\n                {result_var}_ws = ({ws_name} + {self.index_to_str(ws_offset)}).to(tl.pointer_type({triton_type(dtype)}))\\n                tl.store({result_var}_ws + (xindex * RSPLIT + rsplit_id), {result_var}, {mask})\\n            ', strip=True)\n    self.post_loop_store.writeline(f\"{result_var}_peers = tl.load({result_var}_ws + (xindex * RSPLIT + rsplit_arange), rsplit_mask, eviction_policy='evict_first', other=triton_helpers.if_mask(rsplit_mask, {constant_repr(default_val)}))\")\n    return f'{result_var}_peers'",
    "docstring": "Generate code to save a [XBLOCK, RSPLIT] temporary workspace, where each thread block writes a different column. After the barrier, every thread block loads the completed value so that it can compute the final value independently.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py",
    "ast_data": "FunctionDef name:codegen_cooperative_reduction_peer_combine arg:self arg:result_var arg:dtype arg:default_val arguments arg arg arg arg Assign Assign Call Assign Call Assign Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "gpaths",
    "source_code": "def gpaths(paths, local_path='', include_non_existing=True):\n    if is_string(paths):\n        paths = (paths,)\n    return _fix_paths(paths, local_path, include_non_existing)",
    "docstring": "Apply glob to paths and prepend local_path if needed.",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\misc_util.py",
    "ast_data": "FunctionDef name:gpaths arg:paths arg:local_path arg:include_non_existing arguments arg arg arg If Call Assign Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "extract_tensor_patches",
    "source_code": "def extract_tensor_patches(input: Tensor, window_size: Union[int, Tuple[int, int]], stride: Union[int, Tuple[int, int]]=1, padding: PadType=0, allow_auto_padding: bool=False) -> Tensor:\n    if not torch.is_tensor(input):\n        raise TypeError(f'Input input type is not a Tensor. Got {type(input)}')\n    if len(input.shape) != 4:\n        raise ValueError(f'Invalid input shape, we expect BxCxHxW. Got: {input.shape}')\n    window_size = cast(Tuple[int, int], _pair(window_size))\n    stride = cast(Tuple[int, int], _pair(stride))\n    original_size = (input.shape[-2], input.shape[-1])\n    if not padding:\n        if not _check_patch_fit(original_size, window_size, stride):\n            if not allow_auto_padding:\n                warn(f'The window will not fit into the image. \\nWindow size: {window_size}\\nStride: {stride}\\nImage size: {original_size}\\nThis means that the final incomplete patches will be dropped. By enabling `allow_auto_padding`, the input will be padded to fit the window and stride.', stacklevel=1)\n            else:\n                padding = compute_padding(original_size=original_size, window_size=window_size, stride=stride)\n    if padding:\n        padding = create_padding_tuple(padding)\n        input = pad(input, padding)\n    return _extract_tensor_patchesnd(input, window_size, stride)",
    "docstring": "Extract patches from tensors and stacks them. See :class: for details. Args: input: tensor image where to extract the patches with shape :math:. window_size: the size of the sliding window and the output patch size. stride: stride of the sliding window. padding: Zero-padding added to both side of the input. allow_auto_padding: whether to allow automatic padding if the window and stride do not fit into the image. Returns: the tensor with the extracted patches with shape :math:. Examples: >>> input = torch.arange(9.).view(1, 1, 3, 3) >>> patches = extract_tensor_patches(input, (2, 3)) >>> input tensor([[[[0., 1., 2.], [3., 4., 5.], [6., 7., 8.]]]]) >>> patches[:, -1] tensor([[[[3., 4., 5.], [6., 7., 8.]]]])",
    "type": "function",
    "file_path": "kornia\\kornia\\contrib\\extract_patches.py",
    "ast_data": "FunctionDef name:extract_tensor_patches arg:input arg:window_size arg:stride arg:padding arg:allow_auto_padding arguments arg arg arg arg arg If Call Raise Call Call If Compare Call Raise Call Assign Call Call Assign Call Call Assign If If Call If Call Assign Call If Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, performed_action, run_metadata=None, client_graph_def=None, tf_error=None):\n    _check_type(performed_action, str)\n    self.performed_action = performed_action\n    if run_metadata is not None:\n        _check_type(run_metadata, config_pb2.RunMetadata)\n    self.run_metadata = run_metadata\n    self.client_graph_def = client_graph_def\n    self.tf_error = tf_error",
    "docstring": "Constructor for . Args: performed_action: () Actually-performed action by the debug-wrapper session. run_metadata: run_metadata output from the run() call (if any). client_graph_def: (GraphDef) GraphDef from the client side, i.e., from the python front end of TensorFlow. Can be obtained with session.graph.as_graph_def(). tf_error: (errors.OpError subtypes) TensorFlow OpError that occurred during the run (if any).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\framework.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:performed_action arg:run_metadata arg:client_graph_def arg:tf_error arguments arg arg arg arg arg Call Assign If Compare Call Assign Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "_update_glyph_map_defs",
    "source_code": "def _update_glyph_map_defs(self, glyph_map_new):\n    writer = self.writer\n    if glyph_map_new:\n        writer.start('defs')\n        for char_id, (vertices, codes) in glyph_map_new.items():\n            char_id = self._adjust_char_id(char_id)\n            path_data = self._convert_path(Path(vertices * 64, codes), simplify=False)\n            writer.element('path', id=char_id, d=path_data, transform=_generate_transform([('scale', (1 / 64,))]))\n        writer.end('defs')\n        self._glyph_map.update(glyph_map_new)",
    "docstring": "Emit definitions for not-yet-defined glyphs, and record them as having been defined.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_svg.py",
    "ast_data": "FunctionDef name:_update_glyph_map_defs arg:self arg:glyph_map_new arguments arg arg Assign If Call For Call Assign Call Assign Call Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "untyped",
    "source_code": "def untyped(self):\n    _warn_typed_storage_removal()\n    return self._untyped_storage",
    "docstring": "Return the internal :class:.",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:untyped arg:self arguments arg Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_parent_graph",
    "source_code": "def _get_parent_graph(self, graph):\n    parent_graph = graph.outer_graph\n    if not isinstance(parent_graph, func_graph.FuncGraph) and ops.executing_eagerly_outside_functions():\n        return _DUMMY_EAGER_GRAPH.key\n    return parent_graph",
    "docstring": "Returns the parent graph or dummy eager object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:_get_parent_graph arg:self arg:graph arguments arg arg Assign If BoolOp Call Call Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "get_context_object_name",
    "source_code": "def get_context_object_name(self, obj):\n    if self.context_object_name:\n        return self.context_object_name\n    elif isinstance(obj, models.Model):\n        return obj._meta.model_name\n    else:\n        return None",
    "docstring": "Get the name to use for the object.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\detail.py",
    "ast_data": "FunctionDef name:get_context_object_name arg:self arg:obj arguments arg arg If Return return:yes If Call Return return:yes Return return:no"
  },
  {
    "library": "scrapy",
    "name": "StartSpiderMiddleware",
    "source_code": "class StartSpiderMiddleware(BaseSpiderMiddleware):\n\n    def get_processed_request(self, request: Request, response: Response | None) -> Request | None:\n        if response is None:\n            request.meta.setdefault('is_start_request', True)\n        return request",
    "docstring": "Set :reqmeta:. .. reqmeta:: is_start_request is_start_request ---------------- :attr: key that is set to `start requests downloader middlewares `.",
    "type": "class",
    "file_path": "scrapy\\scrapy\\spidermiddlewares\\start.py",
    "ast_data": "ClassDef name:StartSpiderMiddleware FunctionDef name:get_processed_request arg:self arg:request arg:response arguments arg arg arg If Compare Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_post_order_apply",
    "source_code": "def _post_order_apply(root_module: nn.Module, fn: Callable[[nn.Module], Optional[nn.Module]]):\n    visited_modules: set[nn.Module] = {root_module}\n\n    def _post_order_apply_inner(module: nn.Module, module_name: str, parent_module: Optional[nn.Module]):\n        for child_module_name, child_module in module.named_children():\n            if child_module not in visited_modules:\n                visited_modules.add(child_module)\n                _post_order_apply_inner(child_module, child_module_name, module)\n        optional_module = fn(module)\n        if optional_module is not None:\n            assert isinstance(parent_module, nn.Module), f'Non-root modules should have their parent module set but got {parent_module} for {module}'\n            assert module_name, f'Non-root modules should have their module name set but got an empty module name for {module}'\n            assert isinstance(optional_module, nn.Module), f'fn should return None or an nn.Module but got {optional_module}'\n            setattr(parent_module, module_name, optional_module)\n    _post_order_apply_inner(root_module, '', None)",
    "docstring": "This applies `nn.Module`, in which case the module is not changed.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\wrap.py",
    "ast_data": "FunctionDef name:_post_order_apply arg:root_module arg:fn arguments arg arg FunctionDef name:_post_order_apply_inner arg:module arg:module_name arg:parent_module arguments arg arg arg For Call If Compare Call Call Assign Call If Compare Call Call Call Call"
  },
  {
    "library": "numpy",
    "name": "get_info",
    "source_code": "def get_info(self, *names):\n    from .system_info import get_info, dict_append\n    info_dict = {}\n    for a in names:\n        dict_append(info_dict, **get_info(a))\n    return info_dict",
    "docstring": "Get resources information. Return information (from system_info.get_info) for all of the names in the argument list in a single dictionary.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\misc_util.py",
    "ast_data": "FunctionDef name:get_info arg:self arguments arg arg Assign For Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_autocommit",
    "source_code": "def get_autocommit(using=None):\n    return get_connection(using).get_autocommit()",
    "docstring": "Get the autocommit status of the connection.",
    "type": "function",
    "file_path": "django\\django\\db\\transaction.py",
    "ast_data": "FunctionDef name:get_autocommit arg:using arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "softplus",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef softplus(x):\n    return math_ops.softplus(x)",
    "docstring": "Softplus of a tensor. Args: x: A tensor or variable. Returns: A tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:softplus arg:x arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "asdict",
    "source_code": "def asdict(self) -> dict[str, Any]:\n    return {'name': self.name, 'max_abs_diff': self.max_abs_diff, 'max_rel_diff': self.max_rel_diff, 'abs_diff_hist': [self.abs_diff_hist[0].tolist(), self.abs_diff_hist[1].tolist()], 'rel_diff_hist': [self.rel_diff_hist[0].tolist(), self.rel_diff_hist[1].tolist()], 'expected_dtype': str(self.expected_dtype), 'actual_dtype': str(self.actual_dtype)}",
    "docstring": "Convert the VerificationInfo object to a dictionary. Returns: A dictionary representation of the VerificationInfo object.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_verification.py",
    "ast_data": "FunctionDef name:asdict arg:self arguments arg Return return:yes Call Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "register_callback",
    "source_code": "def register_callback(self, output_file_path: str) -> Self:\n\n    def get_temp_uncompressed_file() -> str:\n        fp = tempfile.NamedTemporaryFile('w+b', suffix='.json', delete=False)\n        fp.close()\n        return fp.name\n    if not self._registered:\n        self.output_file_path = output_file_path\n        if output_file_path.endswith('.gz'):\n            output_file_path = get_temp_uncompressed_file()\n        self.output_file_path_observer = output_file_path\n        self._registered = _add_execution_trace_observer(output_file_path)\n    return self",
    "docstring": "Adds ET observer to record function callbacks. The data will be written to output_file_path.",
    "type": "method",
    "file_path": "pytorch\\torch\\profiler\\profiler.py",
    "ast_data": "FunctionDef name:register_callback arg:self arg:output_file_path arguments arg arg FunctionDef name:get_temp_uncompressed_file arguments Assign Call Call Return return:yes If Assign If Call Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "compile",
    "source_code": "def compile(self, *, fullgraph: bool=False, dynamic: bool=False, backend: str='inductor', mode: Optional[str]=None, options: Optional[dict[str, str | int | bool]]=None, disable: bool=False) -> None:\n    self.model = torch.compile(self.model, fullgraph=fullgraph, dynamic=dynamic, backend=backend, mode=mode, options=options, disable=disable)",
    "docstring": "Compile the internal object detection model with :py:func:.",
    "type": "method",
    "file_path": "kornia\\kornia\\models\\detection\\base.py",
    "ast_data": "FunctionDef name:compile arg:self arguments arg arg arg arg arg arg arg Assign Call"
  },
  {
    "library": "django",
    "name": "check_cs_op",
    "source_code": "def check_cs_op(result, func, cargs):\n    if result == 0:\n        raise GEOSException('Could not set value on coordinate sequence')\n    else:\n        return result",
    "docstring": "Check the status code of a coordinate sequence operation.",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\geos\\prototypes\\coordseq.py",
    "ast_data": "FunctionDef name:check_cs_op arg:result arg:func arg:cargs arguments arg arg arg If Compare Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_merge_tensors",
    "source_code": "def _merge_tensors(t1, t2, name, validate):\n    if t1 is None:\n        return (t2, False)\n    elif t2 is None:\n        return (t1, False)\n    elif t1 is t2:\n        return (t1, True)\n    else:\n        err_msg = 'RowPartition._merge_precomputed_encodings: partitions have incompatible %s' % name\n        if not t1.shape.is_compatible_with(t2.shape):\n            raise ValueError(err_msg)\n        if validate:\n            checks = [check_ops.assert_equal(t1, t2, message=err_msg)]\n            return (control_flow_ops.with_dependencies(checks, t1), True)\n        else:\n            return (t1, False)",
    "docstring": "Merge two optional Tensors with equal values into a single Tensor. Args: t1: tf.Tensor or None t2: tf.Tensor or None name: A name for the tensors (for error messages) validate: If true, then check that is compatible with (if both are non-None). Returns: A pair : * is if it is not None; or otherwise. * is true if we validated that t1 and t2 are equal (either by adding a check, or because t1 is t2).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py",
    "ast_data": "FunctionDef name:_merge_tensors arg:t1 arg:t2 arg:name arg:validate arguments arg arg arg arg If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Assign If Call Raise Call If Assign Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_kl_normal_normal",
    "source_code": "@kullback_leibler.RegisterKL(Normal, Normal)\ndef _kl_normal_normal(n_a, n_b, name=None):\n    with ops.name_scope(name, 'kl_normal_normal', [n_a.loc, n_b.loc]):\n        one = constant_op.constant(1, dtype=n_a.dtype)\n        two = constant_op.constant(2, dtype=n_a.dtype)\n        half = constant_op.constant(0.5, dtype=n_a.dtype)\n        s_a_squared = math_ops.square(n_a.scale)\n        s_b_squared = math_ops.square(n_b.scale)\n        ratio = s_a_squared / s_b_squared\n        return math_ops.squared_difference(n_a.loc, n_b.loc) / (two * s_b_squared) + half * (ratio - one - math_ops.log(ratio))",
    "docstring": "Calculate the batched KL divergence KL(n_a || n_b) with n_a and n_b Normal. Args: n_a: instance of a Normal distribution object. n_b: instance of a Normal distribution object. name: (optional) Name to use for created operations. default is \"kl_normal_normal\". Returns: Batchwise KL(n_a || n_b)",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\normal.py",
    "ast_data": "FunctionDef name:_kl_normal_normal arg:n_a arg:n_b arg:name arguments arg arg arg With Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, thunk):\n    self._thunk = thunk\n    self._master_tensor = thunk()",
    "docstring": "Initializes a _LazyEvalTensor object. Args: thunk: A callable. A thunk which computes the value of the tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variable_scope.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:thunk arguments arg arg Assign Assign Call"
  },
  {
    "library": "cherrypy",
    "name": "update",
    "source_code": "def update(self, d):\n    if not self.loaded:\n        self.load()\n    self._data.update(d)",
    "docstring": "Update multiple session-stored objects in one go. D.update(E) -> None. Update D from E: for k in E: D[k] = E[k].",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\sessions.py",
    "ast_data": "FunctionDef name:update arg:self arg:d arguments arg arg If Call Call"
  },
  {
    "library": "tensorflow",
    "name": "from_frozen_graph",
    "source_code": "@classmethod\n@_deprecation.deprecated(None, 'Use `lite.TFLiteConverter.from_frozen_graph` instead.')\ndef from_frozen_graph(cls, graph_def_file, input_arrays, output_arrays, input_shapes=None):\n    return TFLiteConverter.from_frozen_graph(graph_def_file, input_arrays, output_arrays, input_shapes)",
    "docstring": "Creates a TocoConverter class from a file containing a frozen graph.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "FunctionDef name:from_frozen_graph arg:cls arg:graph_def_file arg:input_arrays arg:output_arrays arg:input_shapes arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "uniform_",
    "source_code": "def uniform_(tensor: Tensor, a: float=0.0, b: float=1.0, generator: _Optional[torch.Generator]=None) -> Tensor:\n    if torch.overrides.has_torch_function_variadic(tensor):\n        return torch.overrides.handle_torch_function(uniform_, (tensor,), tensor=tensor, a=a, b=b, generator=generator)\n    return _no_grad_uniform_(tensor, a, b, generator)",
    "docstring": "Fill the input Tensor with values drawn from the uniform distribution. :math:. Args: tensor: an n-dimensional a: the lower bound of the uniform distribution b: the upper bound of the uniform distribution generator: the torch Generator to sample from (default: None) Examples: >>> w = torch.empty(3, 5) >>> nn.init.uniform_(w)",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\init.py",
    "ast_data": "FunctionDef name:uniform_ arg:tensor arg:a arg:b arg:generator arguments arg arg arg arg If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "AppendDocstring",
    "source_code": "class AppendDocstring:\n\n    def __init__(self, additional_note='', kwargs_dict=None):\n        self._additional_note = additional_note\n        if kwargs_dict:\n            bullets = []\n            for key in sorted(kwargs_dict.keys()):\n                value = kwargs_dict[key]\n                if any((x.isspace() for x in key)):\n                    raise ValueError('Parameter name \"%s\" contains whitespace.' % key)\n                value = value.lstrip()\n                if '\\n' in value:\n                    raise ValueError('Parameter description for \"%s\" contains newlines.' % key)\n                bullets.append('*  `%s`: %s' % (key, value))\n            self._additional_note += '\\n\\n##### `kwargs`:\\n\\n' + '\\n'.join(bullets)\n\n    def __call__(self, fn):\n\n        @functools.wraps(fn)\n        def _fn(*args, **kwargs):\n            return fn(*args, **kwargs)\n        if _fn.__doc__ is None:\n            _fn.__doc__ = self._additional_note\n        else:\n            _fn.__doc__ += '\\n%s' % self._additional_note\n        return _fn",
    "docstring": "Helper class to promote private subclass docstring to public counterpart. Example: In this case, the decorator appends the to the docstring of (not ) and adds a new section with each dictionary item as a bullet-point. For a more detailed example, see .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\util.py",
    "ast_data": "ClassDef name:AppendDocstring FunctionDef name:__init__ arg:self arg:additional_note arg:kwargs_dict arguments arg arg arg Assign If Assign For Call Call Assign If Call Call Raise Call Assign Call If Compare Raise Call Call Call FunctionDef name:__call__ arg:self arg:fn arguments arg arg FunctionDef name:_fn arguments arg arg Return return:yes Call Call If Compare Assign Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "_determine_grid_dimensions",
    "source_code": "def _determine_grid_dimensions(self, facet_spec: FacetSpec, pair_spec: PairSpec) -> None:\n    self.grid_dimensions: dict[str, list] = {}\n    for dim, axis in zip(['col', 'row'], ['x', 'y']):\n        facet_vars = facet_spec.get('variables', {})\n        if dim in facet_vars:\n            self.grid_dimensions[dim] = facet_spec['structure'][dim]\n        elif axis in pair_spec.get('structure', {}):\n            self.grid_dimensions[dim] = [None for _ in pair_spec.get('structure', {})[axis]]\n        else:\n            self.grid_dimensions[dim] = [None]\n        self.subplot_spec[f'n{dim}s'] = len(self.grid_dimensions[dim])\n    if not pair_spec.get('cross', True):\n        self.subplot_spec['nrows'] = 1\n    self.n_subplots = self.subplot_spec['ncols'] * self.subplot_spec['nrows']",
    "docstring": "Parse faceting and pairing information to define figure structure.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\subplots.py",
    "ast_data": "FunctionDef name:_determine_grid_dimensions arg:self arg:facet_spec arg:pair_spec arguments arg arg arg For Call Assign Call If Compare Assign If Compare Call Assign Call Assign Assign Call If Call Assign Assign"
  },
  {
    "library": "django",
    "name": "send_messages",
    "source_code": "def send_messages(self, email_messages):\n    raise NotImplementedError('subclasses of BaseEmailBackend must override send_messages() method')",
    "docstring": "Send one or more EmailMessage objects and return the number of email messages sent.",
    "type": "method",
    "file_path": "django\\django\\core\\mail\\backends\\base.py",
    "ast_data": "FunctionDef name:send_messages arg:self arg:email_messages arguments arg arg Raise Call"
  },
  {
    "library": "django",
    "name": "c",
    "source_code": "def c(self):\n    return self.data.isoformat()",
    "docstring": "ISO 8601 Format Example : '2008-01-02T10:30:00.000123'",
    "type": "method",
    "file_path": "django\\django\\utils\\dateformat.py",
    "ast_data": "FunctionDef name:c arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "statically_known_false",
    "source_code": "def statically_known_false(x: BoolLikeType) -> bool:\n    if not isinstance(x, SymBool):\n        assert isinstance(x, bool)\n        return not x\n    result = _static_eval_sym_bool(x)\n    if result is None:\n        return False\n    return not result",
    "docstring": "Returns True if x can be simplified to a constant and is False. If x cannot be evaluated from static, we return False .. note:: This function doesn't introduce new guards, so the expression may end up evaluating to False at runtime even if this function returns False. Args: x (bool, SymBool): The expression to try statically evaluating",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:statically_known_false arg:x arguments arg If Call Call Return return:yes Assign Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_updates_for",
    "source_code": "@doc_controls.do_not_generate_docs\ndef get_updates_for(self, inputs):\n    warnings.warn('`layer.get_updates_for` is deprecated and will be removed in a future version. Please use `layer.updates` method instead.')\n    return self.updates",
    "docstring": "Deprecated, do NOT use! Retrieves updates relevant to a specific set of inputs. Args: inputs: Input tensor or list/tuple of input tensors. Returns: List of update ops of the layer that depend on .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:get_updates_for arg:self arg:inputs arguments arg arg Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_allowance",
    "source_code": "def _allowance(self, confidence_level: DecimalNumber=0.95, tol: DecimalNumber=0.001) -> float:\n    alpha = 1 - confidence_level\n\n    def pvalue_from_stat(statistic):\n        statistic = np.array(statistic)\n        sf = _pvalue_dunnett(rho=self._rho, df=self._df, statistic=statistic, alternative=self._alternative, rng=self._rng)\n        return abs(sf - alpha) / alpha\n    res = minimize_scalar(pvalue_from_stat, method='brent', tol=tol)\n    critical_value = res.x\n    if res.success is False or res.fun >= tol * 10:\n        warnings.warn(f'Computation of the confidence interval did not converge to the desired level. The confidence level corresponding with the returned interval is approximately {alpha * (1 + res.fun)}.', stacklevel=3)\n    allowance = critical_value * self._std * np.sqrt(1 / self._n_samples + 1 / self._n_control)\n    return abs(allowance)",
    "docstring": "Allowance. It is the quantity to add/subtract from the observed difference between the means of observed groups and the mean of the control group. The result gives confidence limits. Parameters ---------- confidence_level : float, optional Confidence level for the computed confidence interval. Default is .95. tol : float, optional A tolerance for numerical optimization: the allowance will produce a confidence within `` of the specified level, or a warning will be emitted. Tight tolerances may be impractical due to noisy evaluation of the objective. Default is 1e-3. Returns ------- allowance : float Allowance around the mean.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multicomp.py",
    "ast_data": "FunctionDef name:_allowance arg:self arg:confidence_level arg:tol arguments arg arg arg Assign FunctionDef name:pvalue_from_stat arg:statistic arguments arg Assign Call Assign Call Return return:yes Call Assign Call Assign If BoolOp Compare Compare Call Assign Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "rgb255_to_rgb",
    "source_code": "def rgb255_to_rgb(image: Tensor) -> Tensor:\n    KORNIA_CHECK_IS_COLOR(image)\n    rgb = image / 255.0\n    return rgb",
    "docstring": "Convert an image from RGB [0, 255] to RGB for visualization purposes. Args: image: RGB Image to be converted to RGB of shape :math:. Returns: RGB version of the image with shape of shape :math:. Example: >>> input = torch.rand(2, 3, 4, 5) >>> output = rgb255_to_rgb(input) # 2x3x4x5",
    "type": "function",
    "file_path": "kornia\\kornia\\color\\rgb.py",
    "ast_data": "FunctionDef name:rgb255_to_rgb arg:image arguments arg Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_reduce_op",
    "source_code": "class _reduce_op:\n\n    def __init__(self) -> None:\n        for k, v in ReduceOp.RedOpType.__members__.items():\n            setattr(self, k, v)\n        self.__members__ = ReduceOp.RedOpType.__members__\n\n    @deprecated('`torch.distributed.reduce_op` is deprecated, please use `torch.distributed.ReduceOp` instead', category=FutureWarning)\n    def __getattribute__(self, key):\n        return object.__getattribute__(self, key)",
    "docstring": "Deprecated enum-like class. For reduction operations: `~torch.distributed.ReduceOp` is recommended to use instead.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "ClassDef name:_reduce_op FunctionDef name:__init__ arg:self arguments arg For Call Call Assign FunctionDef name:__getattribute__ arg:self arg:key arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "RendezvousSettings",
    "source_code": "@dataclass(repr=False, eq=False, frozen=True)\nclass RendezvousSettings:\n    run_id: str\n    min_nodes: int\n    max_nodes: int\n    timeout: RendezvousTimeout\n    keep_alive_interval: timedelta\n    keep_alive_max_attempt: int",
    "docstring": "Hold the settings of the rendezvous. Attributes: run_id: The run id of the rendezvous. min_nodes: The minimum number of nodes to admit to the rendezvous. max_nodes: The maximum number of nodes to admit to the rendezvous. timeout: The timeout configuration of the rendezvous. keep_alive_interval: The amount of time a node waits before sending a heartbeat to keep it alive in the rendezvous. keep_alive_max_attempt: The maximum number of failed heartbeat attempts after which a node is considered dead.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\dynamic_rendezvous.py",
    "ast_data": "ClassDef name:RendezvousSettings Call"
  },
  {
    "library": "django",
    "name": "pagination",
    "source_code": "def pagination(cl):\n    pagination_required = (not cl.show_all or not cl.can_show_all) and cl.multi_page\n    page_range = cl.paginator.get_elided_page_range(cl.page_num) if pagination_required else []\n    need_show_all_link = cl.can_show_all and (not cl.show_all) and cl.multi_page\n    return {'cl': cl, 'pagination_required': pagination_required, 'show_all_url': need_show_all_link and cl.get_query_string({ALL_VAR: ''}), 'page_range': page_range, 'ALL_VAR': ALL_VAR, '1': 1}",
    "docstring": "Generate the series of links to the pages in a paginated list.",
    "type": "function",
    "file_path": "django\\django\\contrib\\admin\\templatetags\\admin_list.py",
    "ast_data": "FunctionDef name:pagination arg:cl arguments arg Assign BoolOp BoolOp Assign Call Assign BoolOp Return return:yes BoolOp Call"
  },
  {
    "library": "numpy",
    "name": "_opt_info",
    "source_code": "def _opt_info():\n    from numpy._core._multiarray_umath import __cpu_baseline__, __cpu_dispatch__, __cpu_features__\n    if len(__cpu_baseline__) == 0 and len(__cpu_dispatch__) == 0:\n        return ''\n    enabled_features = ' '.join(__cpu_baseline__)\n    for feature in __cpu_dispatch__:\n        if __cpu_features__[feature]:\n            enabled_features += f' {feature}*'\n        else:\n            enabled_features += f' {feature}?'\n    return enabled_features",
    "docstring": "Returns a string containing the CPU features supported by the current build. The format of the string can be explained as follows: - Dispatched features supported by the running machine end with . - Dispatched features not supported by the running machine end with . - Remaining features represent the baseline. Returns: str: A formatted string indicating the supported CPU features.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_utils_impl.py",
    "ast_data": "FunctionDef name:_opt_info arguments If BoolOp Compare Call Compare Call Return return:yes Assign Call For If Return return:yes"
  },
  {
    "library": "django",
    "name": "__str__",
    "source_code": "def __str__(self):\n    return str(self.tuple)",
    "docstring": "Return the string representation of the coordinate sequence.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\coordseq.py",
    "ast_data": "FunctionDef name:__str__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_post_reduce_grad_callback",
    "source_code": "@no_type_check\ndef _post_reduce_grad_callback(state: _FSDPState, handle: FlatParamHandle, grad_to_offload: torch.Tensor):\n    _offload_grad(state, handle, grad_to_offload)\n    _post_backward_use_sharded_grad_views(handle)",
    "docstring": "This callback captures any logic to run after the gradient reduction finishes. Currently, this offloads the gradient to CPU if CPU offloading is enabled and uses sharded gradient views if ``.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_runtime_utils.py",
    "ast_data": "FunctionDef name:_post_reduce_grad_callback arg:state arg:handle arg:grad_to_offload arguments arg arg arg Call Call"
  },
  {
    "library": "matplotlib",
    "name": "clf",
    "source_code": "def clf() -> None:\n    gcf().clear()",
    "docstring": "Clear the current figure.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:clf arguments Call Call"
  },
  {
    "library": "pytorch",
    "name": "add_summary",
    "source_code": "def add_summary(self, summary, global_step=None, walltime=None):\n    event = event_pb2.Event(summary=summary)\n    self.add_event(event, global_step, walltime)",
    "docstring": "Add a protocol buffer to the event file. This method wraps the provided summary in an protocol buffer and adds it to the event file. Args: summary: A protocol buffer. global_step: Number. Optional global step value for training process to record with the summary. walltime: float. Optional walltime to override the default (current) walltime (from time.time()) seconds after epoch",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\tensorboard\\writer.py",
    "ast_data": "FunctionDef name:add_summary arg:self arg:summary arg:global_step arg:walltime arguments arg arg arg arg Assign Call Call"
  },
  {
    "library": "scipy",
    "name": "__new__",
    "source_code": "def __new__(cls, *system, **kwargs):\n    if cls is LinearTimeInvariant:\n        raise NotImplementedError('The LinearTimeInvariant class is not meant to be used directly, use `lti` or `dlti` instead.')\n    return super().__new__(cls)",
    "docstring": "Create a new object, don't allow direct instances.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:__new__ arg:cls arguments arg arg arg If Compare Raise Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_bisect",
    "source_code": "def _bisect(self, X, x_squared_norms, sample_weight, cluster_to_bisect):\n    X = X[cluster_to_bisect.indices]\n    x_squared_norms = x_squared_norms[cluster_to_bisect.indices]\n    sample_weight = sample_weight[cluster_to_bisect.indices]\n    best_inertia = None\n    for _ in range(self.n_init):\n        centers_init = self._init_centroids(X, x_squared_norms=x_squared_norms, init=self.init, random_state=self._random_state, n_centroids=2, sample_weight=sample_weight)\n        labels, inertia, centers, _ = self._kmeans_single(X, sample_weight, centers_init, max_iter=self.max_iter, verbose=self.verbose, tol=self.tol, n_threads=self._n_threads)\n        if best_inertia is None or inertia < best_inertia * (1 - 1e-06):\n            best_labels = labels\n            best_centers = centers\n            best_inertia = inertia\n    if self.verbose:\n        print(f'New centroids from bisection: {best_centers}')\n    if self.bisecting_strategy == 'biggest_inertia':\n        scores = self._inertia_per_cluster(X, best_centers, best_labels, sample_weight)\n    else:\n        scores = np.bincount(best_labels, minlength=2)\n    cluster_to_bisect.split(best_labels, best_centers, scores)",
    "docstring": "Split a cluster into 2 subsclusters. Parameters ---------- X : {ndarray, csr_matrix} of shape (n_samples, n_features) Training instances to cluster. x_squared_norms : ndarray of shape (n_samples,) Squared euclidean norm of each data point. sample_weight : ndarray of shape (n_samples,) The weights for each observation in X. cluster_to_bisect : _BisectingTree node object The cluster node to split.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_bisect_k_means.py",
    "ast_data": "FunctionDef name:_bisect arg:self arg:X arg:x_squared_norms arg:sample_weight arg:cluster_to_bisect arguments arg arg arg arg arg Assign Assign Assign Assign For Call Assign Call Assign Call If BoolOp Compare Compare Assign Assign Assign If Call If Compare Assign Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "bucketize",
    "source_code": "def bucketize(self, values: CSEVariable, boundaries: tuple[str, sympy.Expr, sympy.Expr, sympy.Expr], boundary_indices: CSEVariable, indexing_dtype: torch.dtype, right: bool, sorter: Optional[tuple[str, sympy.Expr]]=None, sorter_indices: Optional[CSEVariable]=None) -> CSEVariable:\n    raise NotImplementedError",
    "docstring": "See [Note: Inductor bucketize op]",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\common.py",
    "ast_data": "FunctionDef name:bucketize arg:self arg:values arg:boundaries arg:boundary_indices arg:indexing_dtype arg:right arg:sorter arg:sorter_indices arguments arg arg arg arg arg arg arg arg Raise"
  },
  {
    "library": "scikit-learn",
    "name": "KMeansBenchmark",
    "source_code": "class KMeansBenchmark(Predictor, Transformer, Estimator, Benchmark):\n    param_names = ['representation', 'algorithm', 'init']\n    params = (['dense', 'sparse'], ['lloyd', 'elkan'], ['random', 'k-means++'])\n\n    def setup_cache(self):\n        super().setup_cache()\n\n    def make_data(self, params):\n        representation, algorithm, init = params\n        if representation == 'sparse':\n            data = _20newsgroups_highdim_dataset(n_samples=8000)\n        else:\n            data = _blobs_dataset(n_clusters=20)\n        return data\n\n    def make_estimator(self, params):\n        representation, algorithm, init = params\n        max_iter = 30 if representation == 'sparse' else 100\n        estimator = KMeans(n_clusters=20, algorithm=algorithm, init=init, n_init=1, max_iter=max_iter, tol=0, random_state=0)\n        return estimator\n\n    def make_scorers(self):\n        self.train_scorer = lambda _, __: neg_mean_inertia(self.X, self.estimator.predict(self.X), self.estimator.cluster_centers_)\n        self.test_scorer = lambda _, __: neg_mean_inertia(self.X_val, self.estimator.predict(self.X_val), self.estimator.cluster_centers_)",
    "docstring": "Benchmarks for KMeans.",
    "type": "class",
    "file_path": "scikit-learn\\asv_benchmarks\\benchmarks\\cluster.py",
    "ast_data": "ClassDef name:KMeansBenchmark Assign Assign FunctionDef name:setup_cache arg:self arguments arg Call Call FunctionDef name:make_data arg:self arg:params arguments arg arg Assign If Compare Assign Call Assign Call Return return:yes FunctionDef name:make_estimator arg:self arg:params arguments arg arg Assign Assign Compare Assign Call Return return:yes FunctionDef name:make_scorers arg:self arguments arg Assign arguments arg arg Call Call Assign arguments arg arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "add_submodule",
    "source_code": "@compatibility(is_backward_compatible=True)\ndef add_submodule(self, target: str, m: torch.nn.Module) -> bool:\n    *prefix, field = target.split('.')\n    mod: torch.nn.Module = self\n    for item in prefix:\n        submod = getattr(mod, item, None)\n        if submod is None:\n            submod = torch.nn.Module()\n            setattr(mod, item, submod)\n        if not isinstance(submod, torch.nn.Module):\n            return False\n        mod = submod\n    mod.add_module(field, m)\n    return True",
    "docstring": "Adds the given submodule to `` (not a parameter or other attribute)",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\graph_module.py",
    "ast_data": "FunctionDef name:add_submodule arg:self arg:target arg:m arguments arg arg arg Assign Call For Assign Call If Compare Assign Call Call If Call Return return:yes Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "__eq__",
    "source_code": "def __eq__(self, other):\n    return self._comparison(other, operator.eq)",
    "docstring": "Check whether other equals self elementwise. When either of the elements is masked, the result is masked as well, but the underlying boolean data are still set, with self and other considered equal if both are masked, and unequal otherwise. For structured arrays, all fields are combined, with masked values ignored. The result is masked if all fields were masked, with self and other considered equal only if both were fully masked.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "def fit(self, X, y, *args, **kwargs):\n    check_is_fitted(self.estimator)\n    return self",
    "docstring": "No-op. As a frozen estimator, calling has no effect. Parameters ---------- X : object Ignored. y : object Ignored. *args : tuple Additional positional arguments. Ignored, but present for API compatibility with . **kwargs : dict Additional keyword arguments. Ignored, but present for API compatibility with . Returns ------- self : object Returns the instance itself.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\frozen\\_frozen.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg arg arg Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "sequence_categorical_column_with_hash_bucket",
    "source_code": "@doc_controls.header(_FEATURE_COLUMN_DEPRECATION_WARNING)\n@tf_export('feature_column.sequence_categorical_column_with_hash_bucket')\n@deprecation.deprecated(None, _FEATURE_COLUMN_DEPRECATION_RUNTIME_WARNING)\ndef sequence_categorical_column_with_hash_bucket(key, hash_bucket_size, dtype=dtypes.string):\n    return fc.SequenceCategoricalColumn(fc.categorical_column_with_hash_bucket(key=key, hash_bucket_size=hash_bucket_size, dtype=dtype))",
    "docstring": "A sequence of categorical terms where ids are set by hashing. Pass this to or to convert sequence categorical data into dense representation for input to sequence NN, such as RNN. Example: Args: key: A unique string identifying the input feature. hash_bucket_size: An int > 1. The number of buckets. dtype: The type of features. Only string and integer types are supported. Returns: A . Raises: ValueError: is not greater than 1. ValueError: is neither string nor integer.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\sequence_feature_column.py",
    "ast_data": "FunctionDef name:sequence_categorical_column_with_hash_bucket arg:key arg:hash_bucket_size arg:dtype arguments arg arg arg Return return:yes Call Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "AxisScaleBase",
    "source_code": "class AxisScaleBase(ToolToggleBase):\n\n    def trigger(self, sender, event, data=None):\n        if event.inaxes is None:\n            return\n        super().trigger(sender, event, data)\n\n    def enable(self, event=None):\n        self.set_scale(event.inaxes, 'log')\n        self.figure.canvas.draw_idle()\n\n    def disable(self, event=None):\n        self.set_scale(event.inaxes, 'linear')\n        self.figure.canvas.draw_idle()",
    "docstring": "Base Tool to toggle between linear and logarithmic.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py",
    "ast_data": "ClassDef name:AxisScaleBase FunctionDef name:trigger arg:self arg:sender arg:event arg:data arguments arg arg arg arg If Compare Return return:no Call Call FunctionDef name:enable arg:self arg:event arguments arg arg Call Call FunctionDef name:disable arg:self arg:event arguments arg arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "mark",
    "source_code": "def mark(msg):\n    return _itt.mark(msg)",
    "docstring": "Describe an instantaneous event that occurred at some point. Arguments: msg (str): ASCII message to associate with the event.",
    "type": "function",
    "file_path": "pytorch\\torch\\profiler\\itt.py",
    "ast_data": "FunctionDef name:mark arg:msg arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "history_map",
    "source_code": "@property\ndef history_map(self):\n    return self._history_map",
    "docstring": "The map that records all the tensors needed for backprop.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_state.py",
    "ast_data": "FunctionDef name:history_map arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "encoding_specs",
    "source_code": "@abc.abstractmethod\ndef encoding_specs(self, spec):\n    raise NotImplementedError(f'{type(self).__name__}.encoding_specs')",
    "docstring": "Returns a nest of (s) describing the encoding for . Args: spec: The TypeSpec whose encoding should be described. Returns: A nest (as defined by tf.TypeSpecself.encode(spec, ...)`. All TypeSpecs in this nest must be batchable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py",
    "ast_data": "FunctionDef name:encoding_specs arg:self arg:spec arguments arg arg Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_boundaries_to_sizes",
    "source_code": "def _boundaries_to_sizes(a, boundaries, axis):\n    if axis >= len(a.shape):\n        raise ValueError('axis %s is out of bound for shape %s' % (axis, a.shape))\n    total_size = a.shape[axis]\n    sizes = []\n    sizes_sum = 0\n    prev = 0\n    for i, b in enumerate(boundaries):\n        size = b - prev\n        if size < 0:\n            raise ValueError('The %s-th boundary %s is smaller than the previous boundary %s' % (i, b, prev))\n        size = builtins.min(size, builtins.max(0, total_size - sizes_sum))\n        sizes.append(size)\n        sizes_sum += size\n        prev = b\n    sizes.append(builtins.max(0, total_size - sizes_sum))\n    return sizes",
    "docstring": "Converting boundaries of splits to sizes of splits. Args: a: the array to be split. boundaries: the boundaries, as in np.split. axis: the axis along which to split. Returns: A list of sizes of the splits, as in tf.split.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_array_ops.py",
    "ast_data": "FunctionDef name:_boundaries_to_sizes arg:a arg:boundaries arg:axis arguments arg arg arg If Compare Call Raise Call Assign Assign Assign Assign For Call Assign If Compare Raise Call Assign Call Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "husl_palette",
    "source_code": "def husl_palette(n_colors=6, h=0.01, s=0.9, l=0.65, as_cmap=False):\n    if as_cmap:\n        n_colors = 256\n    hues = np.linspace(0, 1, int(n_colors) + 1)[:-1]\n    hues += h\n    hues %= 1\n    hues *= 359\n    s *= 99\n    l *= 99\n    palette = [_color_to_rgb((h_i, s, l), input='husl') for h_i in hues]\n    if as_cmap:\n        return mpl.colors.ListedColormap(palette, 'hsl')\n    else:\n        return _ColorPalette(palette)",
    "docstring": "Return hues with constant lightness and saturation in the HUSL system. The hues are evenly sampled along a circular path. The resulting palette will be appropriate for categorical or cyclical data. The , , and values should be between 0 and 1. This function is similar to :func:, but it uses a nonlinear color space that is more perceptually uniform. Parameters ---------- n_colors : int Number of colors in the palette. h : float The value of the first hue. l : float The lightness value. s : float The saturation intensity. as_cmap : bool If True, return a matplotlib colormap object. Returns ------- palette list of RGB tuples or :class: See Also -------- hls_palette : Make a palette using evenly spaced hues in the HSL system. Examples -------- .. include:: ../docstrings/husl_palette.rst",
    "type": "function",
    "file_path": "seaborn\\seaborn\\palettes.py",
    "ast_data": "FunctionDef name:husl_palette arg:n_colors arg:h arg:s arg:l arg:as_cmap arguments arg arg arg arg arg If Assign Assign Call Call Assign Call If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "variation",
    "source_code": "def variation(a, axis=0, ddof=0):\n    a, axis = _chk_asarray(a, axis)\n    return a.std(axis, ddof=ddof) / a.mean(axis)",
    "docstring": "Compute the coefficient of variation. The coefficient of variation is the standard deviation divided by the mean. This function is equivalent to:: np.std(x, axis=axis, ddof=ddof) / np.mean(x) The default for `avariationscipy.stats.variationscipy.stats.variation` except 'stats.mstats.variation' ignores masked array elements.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mstats_basic.py",
    "ast_data": "FunctionDef name:variation arg:a arg:axis arg:ddof arguments arg arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "complete",
    "source_code": "@lazy_cython\ndef complete(y):\n    return linkage(y, method='complete', metric='euclidean')",
    "docstring": "Perform complete/max/farthest point linkage on a condensed distance matrix. Parameters ---------- y : ndarray The upper triangular of the distance matrix. The result of `linkagescipy.cluster.hierarchy.linkagescipy.cluster.hierarchy.fclusterscipy.cluster.hierarchy.dendrogram` can be used to generate a plot of the dendrogram.",
    "type": "function",
    "file_path": "scipy\\scipy\\cluster\\hierarchy.py",
    "ast_data": "FunctionDef name:complete arg:y arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "all_to_all_single",
    "source_code": "def all_to_all_single(output, input, output_split_sizes=None, input_split_sizes=None, group=group.WORLD):\n    return _AlltoAllSingle.apply(group, output, output_split_sizes, input_split_sizes, input)",
    "docstring": "Each process splits input tensor and then scatters the split list to all processes in a group. Then concatenate the received tensors from all the processes in the group and return single output tensor. Arguments: output (Tensor): Gathered concatenated output tensor. input (Tensor): Input tensor to scatter. output_split_sizes: (list[Int], optional): Output split sizes for dim 0 if specified None or empty, dim 0 of ``. Returns: Tensor: Output of the collective.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\nn\\functional.py",
    "ast_data": "FunctionDef name:all_to_all_single arg:output arg:input arg:output_split_sizes arg:input_split_sizes arg:group arguments arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_DTypeFromTensor",
    "source_code": "def _DTypeFromTensor(tensor):\n    dtype = tensor.dtype\n    if dtype.base_dtype == dtypes.variant:\n        if isinstance(tensor, ops.EagerTensor):\n            handle_data = tensor._handle_data\n        else:\n            handle_data = handle_data_util.get_resource_handle_data(tensor)\n        if handle_data is not None and handle_data.is_set and handle_data.shape_and_type:\n            first_type = handle_data.shape_and_type[0].dtype\n            if first_type != types_pb2.DT_INVALID and all((shape_and_type.dtype == first_type for shape_and_type in handle_data.shape_and_type)):\n                return first_type\n    return dtype",
    "docstring": "Extract either or the unanimous sub-type of a variant.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\backprop_util.py",
    "ast_data": "FunctionDef name:_DTypeFromTensor arg:tensor arguments arg Assign If Compare If Call Assign Assign Call If BoolOp Compare Assign If BoolOp Compare Call Compare Return return:yes Return return:yes"
  },
  {
    "library": "kornia",
    "name": "DistanceTransform",
    "source_code": "class DistanceTransform(nn.Module):\n\n    def __init__(self, kernel_size: int=3, h: float=0.35) -> None:\n        super().__init__()\n        self.kernel_size = kernel_size\n        self.h = h\n\n    def forward(self, image: torch.Tensor) -> torch.Tensor:\n        if image.shape[1] > 1:\n            image_in = image.view(-1, 1, image.shape[-2], image.shape[-1])\n        else:\n            image_in = image\n        return distance_transform(image_in, self.kernel_size, self.h).view_as(image)",
    "docstring": "Module that approximates the Manhattan (city block) distance transform of images using convolutions. Args: kernel_size: size of the convolution kernel. h: value that influence the approximation of the min function.",
    "type": "class",
    "file_path": "kornia\\kornia\\contrib\\distance_transform.py",
    "ast_data": "ClassDef name:DistanceTransform FunctionDef name:__init__ arg:self arg:kernel_size arg:h arguments arg arg arg Call Call Assign Assign FunctionDef name:forward arg:self arg:image arguments arg arg If Compare Assign Call Assign Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "are_ghstack_branches_in_sync",
    "source_code": "def are_ghstack_branches_in_sync(repo: GitRepo, head_ref: str, base_ref: Optional[str]=None) -> bool:\n    orig_ref = re.sub('/head$', '/orig', head_ref)\n    if base_ref is None:\n        base_ref = re.sub('/head$', '/base', head_ref)\n    orig_diff_sha = _shasum(repo.diff(f'{repo.remote}/{orig_ref}'))\n    head_diff_sha = _shasum(repo.diff(base_ref if is_commit_hash(base_ref) else f'{repo.remote}/{base_ref}', f'{repo.remote}/{head_ref}'))\n    return orig_diff_sha == head_diff_sha",
    "docstring": "Checks that diff between base and head is the same as diff between orig and its parent",
    "type": "function",
    "file_path": "pytorch\\.github\\scripts\\gitutils.py",
    "ast_data": "FunctionDef name:are_ghstack_branches_in_sync arg:repo arg:head_ref arg:base_ref arguments arg arg arg Assign Call If Compare Assign Call Assign Call Call Assign Call Call Call Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "_is_qat_saved_model",
    "source_code": "def _is_qat_saved_model(saved_model_path: str):\n    saved_model_proto = saved_model_loader.parse_saved_model(saved_model_path)\n    for meta_graph in saved_model_proto.meta_graphs:\n        if any((node.op.startswith('FakeQuant') for node in meta_graph.graph_def.node)):\n            return True\n        for function in meta_graph.graph_def.library.function:\n            if any((node.op.startswith('FakeQuant') for node in function.node_def)):\n                return True\n    return False",
    "docstring": "Checks if the SavedModel is QAT-enabled by looking for 'FakeQuant' ops.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\quantize_model.py",
    "ast_data": "FunctionDef name:_is_qat_saved_model arg:saved_model_path arguments arg Assign Call For If Call Call Return return:yes For If Call Call Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "__init__",
    "source_code": "def __init__(self, *args):\n    if len(args) == 1:\n        if isinstance(args[0], OGREnvelope):\n            self._envelope = args[0]\n        elif isinstance(args[0], (tuple, list)):\n            if len(args[0]) != 4:\n                raise GDALException('Incorrect number of tuple elements (%d).' % len(args[0]))\n            else:\n                self._from_sequence(args[0])\n        else:\n            raise TypeError('Incorrect type of argument: %s' % type(args[0]))\n    elif len(args) == 4:\n        self._from_sequence([float(a) for a in args])\n    else:\n        raise GDALException('Incorrect number (%d) of arguments.' % len(args))\n    if self.min_x > self.max_x:\n        raise GDALException('Envelope minimum X > maximum X.')\n    if self.min_y > self.max_y:\n        raise GDALException('Envelope minimum Y > maximum Y.')",
    "docstring": "The initialization function may take an OGREnvelope structure, 4-element tuple or list, or 4 individual arguments.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\envelope.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg arg If Compare Call If Call Assign If Call If Compare Call Raise Call Call Call Raise Call Call If Compare Call Call Call Raise Call Call If Compare Raise Call If Compare Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "TargetTags",
    "source_code": "@dataclass(slots=True)\nclass TargetTags:\n    required: bool\n    one_d_labels: bool = False\n    two_d_labels: bool = False\n    positive_only: bool = False\n    multi_output: bool = False\n    single_output: bool = True",
    "docstring": "Tags for the target data. Parameters ---------- required : bool Whether the estimator requires y to be passed to , or methods. The tag is `~sklearn.base.RegressorMixin~sklearn.base.ClassifierMixinmulti-output` if the estimator supports only multi-output cases.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\utils\\_tags.py",
    "ast_data": "ClassDef name:TargetTags Call"
  },
  {
    "library": "scipy",
    "name": "D",
    "source_code": "@property\ndef D(self):\n    return self._D",
    "docstring": "Feedthrough matrix of the system.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:D arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "make_debug_cursor",
    "source_code": "def make_debug_cursor(self, cursor):\n    return utils.CursorDebugWrapper(cursor, self)",
    "docstring": "Create a cursor that logs all queries in self.queries_log.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\base.py",
    "ast_data": "FunctionDef name:make_debug_cursor arg:self arg:cursor arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_intermediates",
    "source_code": "def _get_intermediates(func_graph):\n    intermediates = []\n    reverse_captures = dict(((v.ref(), k) for k, v in func_graph.captures))\n    for op in func_graph.get_operations():\n        if op.type == 'Identity':\n            continue\n        if op.type == 'MutexLock':\n            continue\n        for o in op.outputs:\n            if o is not func_graph.inputs[0] and o.dtype != dtypes.resource and (_get_accumulator(o) is None) and (o.ref() not in reverse_captures):\n                intermediates.append(o)\n    return intermediates",
    "docstring": "Returns all tensors in that should be accumulated.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\while_v2.py",
    "ast_data": "FunctionDef name:_get_intermediates arg:func_graph arguments arg Assign Assign Call Call For Call If Compare If Compare For If BoolOp Compare Compare Compare Call Compare Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_intersection_unique",
    "source_code": "def _intersection_unique(self, other: IntervalIndex) -> IntervalIndex:\n    lindexer = self.left.get_indexer(other.left)\n    rindexer = self.right.get_indexer(other.right)\n    match = (lindexer == rindexer) & (lindexer != -1)\n    indexer = lindexer.take(match.nonzero()[0])\n    indexer = unique(indexer)\n    return self.take(indexer)",
    "docstring": "Used when the IntervalIndex does not have any common endpoint, no matter left or right. Return the intersection with another IntervalIndex. Parameters ---------- other : IntervalIndex Returns ------- IntervalIndex",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\interval.py",
    "ast_data": "FunctionDef name:_intersection_unique arg:self arg:other arguments arg arg Assign Call Assign Call Assign Compare Compare Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_compression_type_string",
    "source_code": "@classmethod\ndef get_compression_type_string(cls, options):\n    if not options:\n        return ''\n    elif isinstance(options, TFRecordOptions):\n        return cls.get_compression_type_string(options.compression_type)\n    elif isinstance(options, TFRecordCompressionType):\n        return cls.compression_type_map[options]\n    elif options in TFRecordOptions.compression_type_map:\n        return cls.compression_type_map[options]\n    elif options in TFRecordOptions.compression_type_map.values():\n        return options\n    else:\n        raise ValueError('Not a valid compression_type: \"{}\"'.format(options))",
    "docstring": "Convert various option types to a unified string. Args: options: , , or string. Returns: Compression type as string (e.g. , , or ). Raises: ValueError: If compression_type is invalid.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\tf_record.py",
    "ast_data": "FunctionDef name:get_compression_type_string arg:cls arg:options arguments arg arg If Return return:yes If Call Return return:yes Call If Call Return return:yes If Compare Return return:yes If Compare Call Return return:yes Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "triton_kernel_kwargs",
    "source_code": "def triton_kernel_kwargs(self, kernel_cls: type[TritonKernel], features: SIMDKernelFeatures, groups: list[sympy.Expr], kernel_kwargs: dict[str, Any]) -> dict[str, Any]:\n    return kernel_kwargs",
    "docstring": "Hook to change the kwargs passed to TritonKernel, used to apply fixed configurations",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\choices.py",
    "ast_data": "FunctionDef name:triton_kernel_kwargs arg:self arg:kernel_cls arg:features arg:groups arg:kernel_kwargs arguments arg arg arg arg arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "get_library_dirs",
    "source_code": "@functools.lru_cache(maxsize=128)\ndef get_library_dirs(self):\n    opt = FCompiler.get_library_dirs(self)\n    flang_dir = dirname(self.executables['compiler_f77'][0])\n    opt.append(normpath(join(flang_dir, '..', 'lib')))\n    return opt",
    "docstring": "List of compiler library directories.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\fcompiler\\arm.py",
    "ast_data": "FunctionDef name:get_library_dirs arg:self arguments arg Assign Call Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, tz=None):\n    self.tz = _get_tzinfo(tz)",
    "docstring": "Parameters ---------- tz : str or , default: :rc: Ticks timezone. If a string, *tz* is passed to .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\dates.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:tz arguments arg arg Assign Call"
  },
  {
    "library": "pytorch",
    "name": "huber_loss",
    "source_code": "def huber_loss(input: Tensor, target: Tensor, reduction: str='mean', delta: float=1.0, weight: Optional[Tensor]=None) -> Tensor:\n    if has_torch_function_variadic(input, target, weight):\n        return handle_torch_function(huber_loss, (input, target, weight), input, target, reduction=reduction, delta=delta, weight=weight)\n    if not target.size() == input.size():\n        warnings.warn(f'Using a target size ({target.size()}) that is different to the input size ({input.size()}). This will likely lead to incorrect results due to broadcasting. Please ensure they have the same size.', stacklevel=2)\n    expanded_input, expanded_target = torch.broadcast_tensors(input, target)\n    if weight is None:\n        return torch._C._nn.huber_loss(expanded_input, expanded_target, _Reduction.get_enum(reduction), delta)\n    else:\n        if weight.size() != input.size():\n            raise ValueError('Weights and input must have the same size.')\n        unweighted_loss = torch._C._nn.huber_loss(expanded_input, expanded_target, _Reduction.get_enum('none'), delta)\n        weighted_loss = unweighted_loss * weight\n        if reduction == 'none':\n            return weighted_loss\n        elif reduction == 'sum':\n            return torch.sum(weighted_loss)\n        elif reduction == 'mean':\n            return weighted_loss.mean()\n        else:\n            raise ValueError(f\"Invalid reduction mode: {reduction}. Expected one of 'none', 'mean', 'sum'.\")",
    "docstring": "Compute the Huber loss, with optional weighting. Function uses a squared term if the absolute element-wise error falls below delta and a delta-scaled L1 term otherwise. When delta equals 1, this loss is equivalent to SmoothL1Loss. In general, Huber loss differs from SmoothL1Loss by a factor of delta (AKA beta in Smooth L1). See :class: for details. Args: input (Tensor): Predicted values. target (Tensor): Ground truth values. reduction (str, optional): Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. 'mean': the mean of the output is taken. 'sum': the output will be summed. 'none': no reduction will be applied. Default: 'mean'. delta (float, optional): The threshold at which to change between delta-scaled L1 and L2 loss. Default: 1.0. weight (Tensor, optional): Weights for each sample. Default: None. Returns: Tensor: Huber loss (optionally weighted).",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:huber_loss arg:input arg:target arg:reduction arg:delta arg:weight arguments arg arg arg arg arg If Call Return return:yes Call If Compare Call Call Call Call Call Assign Call If Compare Return return:yes Call Call If Compare Call Call Raise Call Assign Call Call Assign If Compare Return return:yes If Compare Return return:yes Call If Compare Return return:yes Call Raise Call"
  },
  {
    "library": "django",
    "name": "_check_pattern_name",
    "source_code": "def _check_pattern_name(self):\n    if self.pattern.name is not None and ':' in self.pattern.name:\n        warning = Warning(\"Your URL pattern {} has a name including a ':'. Remove the colon, to avoid ambiguous namespace references.\".format(self.pattern.describe()), id='urls.W003')\n        return [warning]\n    else:\n        return []",
    "docstring": "Check that the pattern name does not contain a colon.",
    "type": "method",
    "file_path": "django\\django\\urls\\resolvers.py",
    "ast_data": "FunctionDef name:_check_pattern_name arg:self arguments arg If BoolOp Compare Compare Assign Call Call Call Return return:yes Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "set_model",
    "source_code": "def set_model(self, model):\n    self.model = model\n    self._log_write_dir = self._get_log_write_dir()\n    self._train_dir = os.path.join(self._log_write_dir, 'train')\n    self._train_step = self.model._train_counter\n    self._val_dir = os.path.join(self._log_write_dir, 'validation')\n    self._val_step = self.model._test_counter\n    self._writers = {}\n    self._should_write_train_graph = False\n    if self.write_graph:\n        self._write_keras_model_summary()\n        self._should_write_train_graph = True\n    if self.embeddings_freq:\n        self._configure_embeddings()",
    "docstring": "Sets Keras model and writes graph if specified.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:set_model arg:self arg:model arguments arg arg Assign Assign Call Assign Call Assign Assign Call Assign Assign Assign If Call Assign If Call"
  },
  {
    "library": "matplotlib",
    "name": "disable",
    "source_code": "def disable(self, event=None):\n    pass",
    "docstring": "Disable the toggle tool. call this method when is True. This can happen in different circumstances. * Click on the toolbar tool button. * Call to . * Another derived tool is triggered (from the same ).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py",
    "ast_data": "FunctionDef name:disable arg:self arg:event arguments arg arg"
  },
  {
    "library": "django",
    "name": "width",
    "source_code": "@property\ndef width(self):\n    return capi.get_band_xsize(self._ptr)",
    "docstring": "Width (X axis) in pixels of the band.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\raster\\band.py",
    "ast_data": "FunctionDef name:width arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_dense_var_to_tensor",
    "source_code": "def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):\n    if values_util.is_saving_non_distributed():\n        return ops.convert_to_tensor(self._primary, dtype=dtype, name=name, as_ref=as_ref)\n    with distribute_lib.enter_or_assert_strategy(self._distribute_strategy):\n        return ops.convert_to_tensor(self._get(), dtype=dtype, name=name, as_ref=as_ref)",
    "docstring": "Converts a variable to a tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py",
    "ast_data": "FunctionDef name:_dense_var_to_tensor arg:self arg:dtype arg:name arg:as_ref arguments arg arg arg arg If Call Return return:yes Call With Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "addfont",
    "source_code": "def addfont(self, path):\n    path = os.fsdecode(path)\n    if Path(path).suffix.lower() == '.afm':\n        with open(path, 'rb') as fh:\n            font = _afm.AFM(fh)\n        prop = afmFontProperty(path, font)\n        self.afmlist.append(prop)\n    else:\n        font = ft2font.FT2Font(path)\n        prop = ttfFontProperty(font)\n        self.ttflist.append(prop)\n    self._findfont_cached.cache_clear()",
    "docstring": "Cache the properties of the font at *path* to make it available to the . The type of font is inferred from the path suffix. Parameters ---------- path : str or path-like Notes ----- This method is useful for adding a custom font without installing it in your operating system. See the singleton instance for usage and caveats about this function.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\font_manager.py",
    "ast_data": "FunctionDef name:addfont arg:self arg:path arguments arg arg Assign Call If Compare Call Call With Call Assign Call Assign Call Call Assign Call Assign Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_mode",
    "source_code": "def _mode(self, dropna: bool=True) -> Self:\n    result, _ = mode(self, dropna=dropna)\n    return result",
    "docstring": "Returns the mode(s) of the ExtensionArray. Always returns even if only one value. Parameters ---------- dropna : bool, default True Don't consider counts of NA values. Returns ------- same type as self Sorted, if possible.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\base.py",
    "ast_data": "FunctionDef name:_mode arg:self arg:dropna arguments arg arg Assign Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "download_error",
    "source_code": "def download_error(self, failure: Failure, request: Request, spider: Spider, errmsg: str | None=None) -> LogFormatterResult:\n    args: dict[str, Any] = {'request': request}\n    if errmsg:\n        msg = DOWNLOADERRORMSG_LONG\n        args['errmsg'] = errmsg\n    else:\n        msg = DOWNLOADERRORMSG_SHORT\n    return {'level': logging.ERROR, 'msg': msg, 'args': args}",
    "docstring": "Logs a download error message from a spider (typically coming from the engine). .. versionadded:: 2.0",
    "type": "method",
    "file_path": "scrapy\\scrapy\\logformatter.py",
    "ast_data": "FunctionDef name:download_error arg:self arg:failure arg:request arg:spider arg:errmsg arguments arg arg arg arg arg If Assign Assign Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "disable",
    "source_code": "def disable(self, event=None):\n    self._cancel_action()\n    self.figure.canvas.widgetlock.release(self)\n    self.figure.canvas.mpl_disconnect(self._idPress)\n    self.figure.canvas.mpl_disconnect(self._idRelease)\n    self.figure.canvas.mpl_disconnect(self._idScroll)",
    "docstring": "Release the canvas and disconnect press/release events.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py",
    "ast_data": "FunctionDef name:disable arg:self arg:event arguments arg arg Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "prepare_softmax_online",
    "source_code": "@register_lowering(inductor_prims.prepare_softmax_online, type_promotion_kind=None)\ndef prepare_softmax_online(x, dim):\n    kwargs = _make_reduction_inner(x, axis=dim, keepdims=True, dtype=None, override_return_dtype=None)\n    reduction_ranges = kwargs['reduction_ranges']\n    rnumel = V.graph.sizevars.simplify(sympy_product(reduction_ranges))\n    hint, num_split = ir.Reduction.num_splits(**kwargs, reduction_type='online_softmax_reduce', reduction_numel=rnumel)\n    if num_split == 1 and V.graph.sizevars.size_hint(rnumel) >= config.unroll_reductions_threshold:\n        max_tensor, sum_tensor = OnlineSoftmaxReduction.create(input_node=x, num_output=2, reduction_hint=hint, **kwargs)\n        return (max_tensor, sum_tensor)\n    else:\n        warnings.warn(textwrap.dedent('\\n            Online softmax is disabled on the fly since Inductor decides to\\n            split the reduction. Cut an issue to PyTorch if this is an\\n            important use case and you want to speed it up with online\\n            softmax.\\n            '))\n        amax = reduce_amax(x, dim, keepdims=True)\n        exp = lowerings[aten.exp](sub(x, amax))\n        xsum = sum_(exp, dim, keepdims=True)\n        return (amax, xsum)",
    "docstring": "Lowering inductor_prims.prepare_softmax_online to compute max/sum in one pass if no split is needed.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\lowering.py",
    "ast_data": "FunctionDef name:prepare_softmax_online arg:x arg:dim arguments arg arg Assign Call Assign Assign Call Call Assign Call If BoolOp Compare Compare Call Assign Call Return return:yes Call Call Assign Call Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "check_client",
    "source_code": "def check_client(self, client):\n    raise NotImplementedError()",
    "docstring": "A method to check if this token is issued to the given client. For instance, `` is saved on token table:: def check_client(self, client): return self.client_id == client.client_id :return: bool",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\models.py",
    "ast_data": "FunctionDef name:check_client arg:self arg:client arguments arg arg Raise Call"
  },
  {
    "library": "sphinx",
    "name": "build_navlist",
    "source_code": "def build_navlist(self, navnodes: list[dict[str, Any]]) -> list[NavPoint]:\n    navstack: list[NavPoint] = [NavPoint('', '', [])]\n    level = 0\n    for node in navnodes:\n        if not node['text']:\n            continue\n        file = node['refuri'].split('#')[0]\n        if file in self.ignored_files:\n            continue\n        if node['level'] > self.config.epub_tocdepth:\n            continue\n        navpoint = NavPoint(node['text'], node['refuri'], [])\n        if node['level'] == level:\n            navstack.pop()\n            navstack[-1].children.append(navpoint)\n            navstack.append(navpoint)\n        elif node['level'] == level + 1:\n            level += 1\n            navstack[-1].children.append(navpoint)\n            navstack.append(navpoint)\n        elif node['level'] < level:\n            while node['level'] < len(navstack):\n                navstack.pop()\n            level = node['level']\n            navstack[-1].children.append(navpoint)\n            navstack.append(navpoint)\n        else:\n            unreachable = 'Should never reach here. It might be a bug.'\n            raise RuntimeError(unreachable)\n    return navstack[0].children",
    "docstring": "Create the toc navigation structure. This method is almost same as build_navpoints method in epub.py. This is because the logical navigation structure of epub3 is not different from one of epub2. The difference from build_navpoints method is templates which are used when generating navigation documents.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\epub3.py",
    "ast_data": "FunctionDef name:build_navlist arg:self arg:navnodes arguments arg arg Call Assign For If Assign Call If Compare If Compare Assign Call If Compare Call Call Call If Compare Call Call If Compare While Compare Call Call Assign Call Call Assign Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "assert_is_compatible_with",
    "source_code": "def assert_is_compatible_with(self, other):\n    if not self.is_compatible_with(other):\n        raise ValueError('Shapes %s and %s are incompatible' % (self, other))",
    "docstring": "Raises exception if and do not represent the same shape. This method can be used to assert that there exists a shape that both and represent. Args: other: Another TensorShape. Raises: ValueError: If and do not represent the same shape.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:assert_is_compatible_with arg:self arg:other arguments arg arg If Call Raise Call"
  },
  {
    "library": "django",
    "name": "get_preserved_filters",
    "source_code": "def get_preserved_filters(self, request):\n    match = request.resolver_match\n    if self.preserve_filters and match:\n        current_url = '%s:%s' % (match.app_name, match.url_name)\n        changelist_url = 'admin:%s_%s_changelist' % (self.opts.app_label, self.opts.model_name)\n        if current_url == changelist_url:\n            preserved_filters = request.GET.urlencode()\n        else:\n            preserved_filters = request.GET.get('_changelist_filters')\n        if preserved_filters:\n            return urlencode({'_changelist_filters': preserved_filters})\n    return ''",
    "docstring": "Return the preserved filters querystring.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:get_preserved_filters arg:self arg:request arguments arg arg Assign If BoolOp Assign Assign If Compare Assign Call Assign Call If Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_checkpoint_filename",
    "source_code": "def _get_checkpoint_filename(ckpt_dir_or_file):\n    if isinstance(ckpt_dir_or_file, os.PathLike):\n        ckpt_dir_or_file = os.fspath(ckpt_dir_or_file)\n    if gfile.IsDirectory(ckpt_dir_or_file):\n        return checkpoint_management.latest_checkpoint(ckpt_dir_or_file)\n    return ckpt_dir_or_file",
    "docstring": "Returns checkpoint filename given directory or specific checkpoint file.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\checkpoint_utils.py",
    "ast_data": "FunctionDef name:_get_checkpoint_filename arg:ckpt_dir_or_file arguments arg If Call Assign Call If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_default_qat_qconfig_mapping",
    "source_code": "def get_default_qat_qconfig_mapping(backend='x86', version=1) -> QConfigMapping:\n    return _get_default_qconfig_mapping(True, backend, version)",
    "docstring": "Return the default QConfigMapping for quantization aware training. Args: * `` (int) : the version for the default qconfig mapping",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\qconfig_mapping.py",
    "ast_data": "FunctionDef name:get_default_qat_qconfig_mapping arg:backend arg:version arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_validate_all_indexes_accounted_for_in_provided_output",
    "source_code": "def _validate_all_indexes_accounted_for_in_provided_output(self, saved_nodes_idxs: list[int], recomputable_node_idxs: list[int]) -> None:\n    recomputable_node_idxs_set = set(recomputable_node_idxs)\n    saved_nodes_idxs_set = set(saved_nodes_idxs)\n    all_candidate_nodes_idxs = set(range(len(self._graph_info_provider.all_recomputable_banned_nodes)))\n    assert len(recomputable_node_idxs_set.intersection(saved_nodes_idxs_set)) == 0, 'Saved nodes and recomputable nodes cannot have any overlaps'\n    assert recomputable_node_idxs_set.union(saved_nodes_idxs_set) == all_candidate_nodes_idxs, 'All candidate nodes must be accounted for in the provided output'",
    "docstring": "Validate that all indexes are accounted for in the provided output. This function checks that the union of saved nodes and recomputable nodes covers all candidate nodes without any overlaps.",
    "type": "method",
    "file_path": "pytorch\\torch\\_functorch\\_activation_checkpointing\\knapsack_evaluator.py",
    "ast_data": "FunctionDef name:_validate_all_indexes_accounted_for_in_provided_output arg:self arg:saved_nodes_idxs arg:recomputable_node_idxs arguments arg arg arg Assign Call Assign Call Assign Call Call Call Compare Call Call Compare Call"
  },
  {
    "library": "django",
    "name": "_get_path_info_with_parent",
    "source_code": "def _get_path_info_with_parent(self, filtered_relation):\n    path = []\n    opts = self.remote_field.model._meta.concrete_model._meta\n    parent_opts = opts.get_field(self.object_id_field_name).model._meta\n    target = parent_opts.pk\n    path.append(PathInfo(from_opts=self.model._meta, to_opts=parent_opts, target_fields=(target,), join_field=self.remote_field, m2m=True, direct=False, filtered_relation=filtered_relation))\n    parent_field_chain = []\n    while parent_opts != opts:\n        field = opts.get_ancestor_link(parent_opts.model)\n        parent_field_chain.append(field)\n        opts = field.remote_field.model._meta\n    parent_field_chain.reverse()\n    for field in parent_field_chain:\n        path.extend(field.remote_field.path_infos)\n    return path",
    "docstring": "Return the path that joins the current model through any parent models. The idea is that if you have a GFK defined on a parent model then we need to join the parent model first, then the child model.",
    "type": "method",
    "file_path": "django\\django\\contrib\\contenttypes\\fields.py",
    "ast_data": "FunctionDef name:_get_path_info_with_parent arg:self arg:filtered_relation arguments arg arg Assign Assign Assign Call Assign Call Call Assign While Compare Assign Call Call Assign Call For Call Return return:yes"
  },
  {
    "library": "django",
    "name": "y",
    "source_code": "@y.setter\ndef y(self, value):\n    self._cs.setOrdinate(1, 0, value)",
    "docstring": "Set the Y component of the Point.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\point.py",
    "ast_data": "FunctionDef name:y arg:self arg:value arguments arg arg Call"
  },
  {
    "library": "scikit-learn",
    "name": "_eigen_decompose_covariance",
    "source_code": "def _eigen_decompose_covariance(self, X, y, sqrt_sw):\n    n_samples, n_features = X.shape\n    cov = np.empty((n_features + 1, n_features + 1), dtype=X.dtype)\n    cov[:-1, :-1], X_mean = self._compute_covariance(X, sqrt_sw)\n    if not self.fit_intercept:\n        cov = cov[:-1, :-1]\n    else:\n        cov[-1] = 0\n        cov[:, -1] = 0\n        cov[-1, -1] = sqrt_sw.dot(sqrt_sw)\n    nullspace_dim = max(0, n_features - n_samples)\n    eigvals, V = linalg.eigh(cov)\n    eigvals = eigvals[nullspace_dim:]\n    V = V[:, nullspace_dim:]\n    return (X_mean, eigvals, V, X)",
    "docstring": "Eigendecomposition of X^T.X, used when n_samples > n_features and X is sparse.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_ridge.py",
    "ast_data": "FunctionDef name:_eigen_decompose_covariance arg:self arg:X arg:y arg:sqrt_sw arguments arg arg arg arg Assign Assign Call Assign Call If Assign Assign Assign Assign Call Assign Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "iterate_till_fixed_point",
    "source_code": "def iterate_till_fixed_point(constraints, counter):\n    old_c = None\n    while old_c != constraints:\n        old_c = constraints\n        constraints, counter = transform_constraint(constraints, counter)\n    return (constraints, counter)",
    "docstring": "Transform constraints till reaching a fixed point",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\transform_to_z3.py",
    "ast_data": "FunctionDef name:iterate_till_fixed_point arg:constraints arg:counter arguments arg arg Assign While Compare Assign Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_ConstantPredictor",
    "source_code": "class _ConstantPredictor(BaseEstimator):\n\n    def fit(self, X, y):\n        check_params = dict(ensure_all_finite=False, dtype=None, ensure_2d=False, accept_sparse=True)\n        validate_data(self, X, y, reset=True, validate_separately=(check_params, check_params))\n        self.y_ = y\n        return self\n\n    def predict(self, X):\n        check_is_fitted(self)\n        validate_data(self, X, ensure_all_finite=False, dtype=None, accept_sparse=True, ensure_2d=False, reset=False)\n        return np.repeat(self.y_, _num_samples(X))\n\n    def decision_function(self, X):\n        check_is_fitted(self)\n        validate_data(self, X, ensure_all_finite=False, dtype=None, accept_sparse=True, ensure_2d=False, reset=False)\n        return np.repeat(self.y_, _num_samples(X))\n\n    def predict_proba(self, X):\n        check_is_fitted(self)\n        validate_data(self, X, ensure_all_finite=False, dtype=None, accept_sparse=True, ensure_2d=False, reset=False)\n        y_ = self.y_.astype(np.float64)\n        return np.repeat([np.hstack([1 - y_, y_])], _num_samples(X), axis=0)",
    "docstring": "Helper predictor to be used when only one class is present.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\multiclass.py",
    "ast_data": "ClassDef name:_ConstantPredictor FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Assign Call Call Assign Return return:yes FunctionDef name:predict arg:self arg:X arguments arg arg Call Call Return return:yes Call Call FunctionDef name:decision_function arg:self arg:X arguments arg arg Call Call Return return:yes Call Call FunctionDef name:predict_proba arg:self arg:X arguments arg arg Call Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "ipython",
    "source_code": "@click.option('--pythonpath', '-p', metavar='PYTHONPATH', default=None, help='Paths to prepend to PYTHONPATH')\n@spin.util.extend_command(spin.cmds.meson.ipython)\ndef ipython(*, parent_callback, pythonpath, **kwargs):\n    _set_pythonpath(pythonpath)\n    parent_callback(**kwargs)",
    "docstring": "💻 Launch IPython shell with PYTHONPATH set OPTIONS are passed through directly to IPython, e.g.: spin ipython -i myscript.py",
    "type": "function",
    "file_path": "scipy\\.spin\\cmds.py",
    "ast_data": "FunctionDef name:ipython arguments arg arg arg Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "set_sharing_strategy",
    "source_code": "def set_sharing_strategy(new_strategy):\n    global _sharing_strategy\n    assert new_strategy in _all_sharing_strategies\n    _sharing_strategy = new_strategy",
    "docstring": "Set the strategy for sharing CPU tensors. Args: new_strategy (str): Name of the selected strategy. Should be one of the values returned by :func:.",
    "type": "function",
    "file_path": "pytorch\\torch\\multiprocessing\\__init__.py",
    "ast_data": "FunctionDef name:set_sharing_strategy arg:new_strategy arguments arg Compare Assign"
  },
  {
    "library": "pytorch",
    "name": "extract_results_from_loggers",
    "source_code": "def extract_results_from_loggers(model: GraphModule) -> dict[int, tuple[Optional[str], object, list[object]]]:\n    handles: dict[int, tuple[Optional[str], object, list[object]]] = {}\n    for _name, module in model.named_children():\n        if isinstance(module, OutputLogger) and len(module.stats) > 0:\n            handles[module.debug_handle] = (module.node_name, module.nn_module_stack, module.stats)\n    return handles",
    "docstring": "For a given model, extract the tensors stats and related information for each debug handle. The reason we have a list of object, instead of Tensor is because the output of node may not be a Tensor, it could be (nested) list, tuple or dict as well. Returns: A dict is keyed by the debug_handle id and the values are a list of object recorded in loggers",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\_numeric_debugger.py",
    "ast_data": "FunctionDef name:extract_results_from_loggers arg:model arguments arg For Call If BoolOp Call Compare Call Assign Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "_define_bin_edges",
    "source_code": "def _define_bin_edges(self, vals, weight, bins, binwidth, binrange, discrete):\n    vals = vals.replace(-np.inf, np.nan).replace(np.inf, np.nan).dropna()\n    if binrange is None:\n        start, stop = (vals.min(), vals.max())\n    else:\n        start, stop = binrange\n    if discrete:\n        bin_edges = np.arange(start - 0.5, stop + 1.5)\n    else:\n        if binwidth is not None:\n            bins = int(round((stop - start) / binwidth))\n        bin_edges = np.histogram_bin_edges(vals, bins, binrange, weight)\n    return bin_edges",
    "docstring": "Inner function that takes bin parameters as arguments.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_stats\\counting.py",
    "ast_data": "FunctionDef name:_define_bin_edges arg:self arg:vals arg:weight arg:bins arg:binwidth arg:binrange arg:discrete arguments arg arg arg arg arg arg arg Assign Call Call Call If Compare Assign Call Call Assign If Assign Call If Compare Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "sign",
    "source_code": "@abc.abstractmethod\ndef sign(self, data: bytes, padding: AsymmetricPadding, algorithm: asym_utils.Prehashed | hashes.HashAlgorithm) -> bytes:\n    pass",
    "docstring": "Signs the data.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\rsa.py",
    "ast_data": "FunctionDef name:sign arg:self arg:data arg:padding arg:algorithm arguments arg arg arg arg"
  },
  {
    "library": "tensorflow",
    "name": "save_optimizer_weights_to_hdf5_group",
    "source_code": "def save_optimizer_weights_to_hdf5_group(hdf5_group, optimizer):\n    symbolic_weights = getattr(optimizer, 'weights')\n    if symbolic_weights:\n        weights_group = hdf5_group.create_group('optimizer_weights')\n        weight_names = [str(w.name).encode('utf8') for w in symbolic_weights]\n        save_attributes_to_hdf5_group(weights_group, 'weight_names', weight_names)\n        weight_values = backend.batch_get_value(symbolic_weights)\n        for name, val in zip(weight_names, weight_values):\n            param_dset = weights_group.create_dataset(name, val.shape, dtype=val.dtype)\n            if not val.shape:\n                param_dset[()] = val\n            else:\n                param_dset[:] = val",
    "docstring": "Saves optimizer weights of a optimizer to a HDF5 group. Args: hdf5_group: HDF5 group. optimizer: optimizer instance.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\hdf5_format.py",
    "ast_data": "FunctionDef name:save_optimizer_weights_to_hdf5_group arg:hdf5_group arg:optimizer arguments arg arg Assign Call If Assign Call Assign Call Call Call Assign Call For Call Assign Call If Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "get_logical_id_to_device",
    "source_code": "def get_logical_id_to_device(devices: list[Device]) -> dict[int, Device]:\n    logical_id_to_device: dict[int, Device] = {}\n    for d in devices:\n        logical_id_to_device[d.logical_id] = d\n    return logical_id_to_device",
    "docstring": "Get a mapping from device logical ID to Device object.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\accelerator_partitioner.py",
    "ast_data": "FunctionDef name:get_logical_id_to_device arg:devices arguments arg For Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_mouseover",
    "source_code": "def get_mouseover(self):\n    return self._mouseover",
    "docstring": "Return whether this artist is queried for custom context information when the mouse cursor moves over it.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:get_mouseover arg:self arguments arg Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "DefaultDomain",
    "source_code": "class DefaultDomain(SphinxDirective):\n    has_content = False\n    required_arguments = 1\n    optional_arguments = 0\n    final_argument_whitespace = False\n    option_spec: ClassVar[OptionSpec] = {}\n\n    def run(self) -> list[Node]:\n        domain_name = self.arguments[0].lower()\n        default_domain = self.env.domains.get(domain_name)\n        self.env.current_document.default_domain = default_domain\n        return []",
    "docstring": "Directive to (re-)set the default domain for this source file.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\directives\\__init__.py",
    "ast_data": "ClassDef name:DefaultDomain Assign Assign Assign Assign FunctionDef name:run arg:self arguments arg Assign Call Assign Call Assign Return return:no"
  },
  {
    "library": "django",
    "name": "list",
    "source_code": "def list(self, ignore_patterns):\n    for prefix, root in self.locations:\n        if os.path.isdir(root):\n            storage = self.storages[root]\n            for path in utils.get_files(storage, ignore_patterns):\n                yield (path, storage)",
    "docstring": "List all files in all locations.",
    "type": "method",
    "file_path": "django\\django\\contrib\\staticfiles\\finders.py",
    "ast_data": "FunctionDef name:list arg:self arg:ignore_patterns arguments arg arg For If Call Assign For Call"
  },
  {
    "library": "tensorflow",
    "name": "he_uniform",
    "source_code": "@tf_export(v1=['initializers.he_uniform'])\ndef he_uniform(seed=None):\n    return VarianceScaling(scale=2.0, mode='fan_in', distribution='uniform', seed=seed)",
    "docstring": "He uniform variance scaling initializer. It draws samples from a uniform distribution within [-limit, limit] where is where is the number of input units in the weight tensor. Args: seed: A Python integer. Used to seed the random generator. Returns: An initializer. References: [He et al., 2015] ( # pylint: disable=line-too-long ([pdf](",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops.py",
    "ast_data": "FunctionDef name:he_uniform arg:seed arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_post_backward_final_callback",
    "source_code": "@no_type_check\n@torch.no_grad()\ndef _post_backward_final_callback(state: _FSDPState, module: nn.Module):\n    _p_assert(state._is_root, 'The post-backward callback should only be called on the root FSDP instance')\n    root_state = state\n    if root_state._sync_gradients:\n        current_stream = state._device_handle.current_stream()\n        current_stream.wait_stream(root_state._post_backward_stream)\n        if root_state._all_reduce_stream is not current_stream:\n            current_stream.wait_stream(root_state._all_reduce_stream)\n        if root_state.cpu_offload.offload_params:\n            state._device_handle.current_stream().synchronize()\n    root_state._exec_order_data.next_iter()\n    for fsdp_state in state._all_fsdp_states:\n        _catch_all_reshard(fsdp_state)\n        _finalize_params(fsdp_state)\n        fsdp_state.training_state = TrainingState.IDLE\n        handle = fsdp_state._handle\n        if handle:\n            handle._ran_pre_backward_hook = False\n            handle._needs_pre_backward_unshard = False\n            handle._post_forward_index = None\n            handle._training_state = HandleTrainingState.IDLE\n            handle._prefetched = False\n    root_state._post_backward_callback_queued = False",
    "docstring": "This waits for the post-backward to finish and performs some final cleanup. This runs at the end of the entire backward pass and should only be called on the root FSDP instance.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_runtime_utils.py",
    "ast_data": "FunctionDef name:_post_backward_final_callback arg:state arg:module arguments arg arg Call Assign If Assign Call Call If Compare Call If Call Call Call For Call Call Assign Assign If Assign Assign Assign Assign Assign Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "_on_scroll",
    "source_code": "def _on_scroll(self, event):\n    pass",
    "docstring": "Mouse scroll event handler.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:_on_scroll arg:self arg:event arguments arg arg"
  },
  {
    "library": "matplotlib",
    "name": "_get_aligned_offsets",
    "source_code": "def _get_aligned_offsets(yspans, height, align='baseline'):\n    _api.check_in_list(['baseline', 'left', 'top', 'right', 'bottom', 'center'], align=align)\n    if height is None:\n        height = max((y1 - y0 for y0, y1 in yspans))\n    if align == 'baseline':\n        yspan = (min((y0 for y0, y1 in yspans)), max((y1 for y0, y1 in yspans)))\n        offsets = [0] * len(yspans)\n    elif align in ['left', 'bottom']:\n        yspan = (0, height)\n        offsets = [-y0 for y0, y1 in yspans]\n    elif align in ['right', 'top']:\n        yspan = (0, height)\n        offsets = [height - y1 for y0, y1 in yspans]\n    elif align == 'center':\n        yspan = (0, height)\n        offsets = [(height - (y1 - y0)) * 0.5 - y0 for y0, y1 in yspans]\n    return (yspan, offsets)",
    "docstring": "Align boxes each specified by their `` is used without checking that it is actually large enough). descent The descent of the packing. offsets The bottom offsets of the boxes.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py",
    "ast_data": "FunctionDef name:_get_aligned_offsets arg:yspans arg:height arg:align arguments arg arg arg Call If Compare Assign Call If Compare Assign Call Call Assign Call If Compare Assign Assign If Compare Assign Assign If Compare Assign Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_value_with_fmt",
    "source_code": "def _value_with_fmt(self, val) -> tuple[int | float | bool | str | datetime.datetime | datetime.date, str | None]:\n    fmt = None\n    if is_integer(val):\n        val = int(val)\n    elif is_float(val):\n        val = float(val)\n    elif is_bool(val):\n        val = bool(val)\n    elif is_decimal(val):\n        val = Decimal(val)\n    elif isinstance(val, datetime.datetime):\n        fmt = self._datetime_format\n    elif isinstance(val, datetime.date):\n        fmt = self._date_format\n    elif isinstance(val, datetime.timedelta):\n        val = val.total_seconds() / 86400\n        fmt = '0'\n    else:\n        val = str(val)\n        if len(val) > 32767:\n            warnings.warn(f'Cell contents too long ({len(val)}), truncated to 32767 characters', UserWarning, stacklevel=find_stack_level())\n    return (val, fmt)",
    "docstring": "Convert numpy types to Python types for the Excel writers. Parameters ---------- val : object Value to be written into cells Returns ------- Tuple with the first element being the converted value and the second being an optional format",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\excel\\_base.py",
    "ast_data": "FunctionDef name:_value_with_fmt arg:self arg:val arguments arg arg Assign If Call Assign Call If Call Assign Call If Call Assign Call If Call Assign Call If Call Assign If Call Assign If Call Assign Call Assign Assign Call If Compare Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "step",
    "source_code": "@_use_grad_for_differentiable\ndef step(self, closure=None):\n    loss = None\n    if closure is not None:\n        with torch.enable_grad():\n            loss = closure()\n    for group in self.param_groups:\n        params: list[Tensor] = []\n        grads: list[Tensor] = []\n        momentum_buffer_list: list[Optional[Tensor]] = []\n        has_sparse_grad = self._init_group(group, params, grads, momentum_buffer_list)\n        sgd(params, grads, momentum_buffer_list, weight_decay=group['weight_decay'], momentum=group['momentum'], lr=group['lr'], dampening=group['dampening'], nesterov=group['nesterov'], maximize=group['maximize'], has_sparse_grad=has_sparse_grad, foreach=group['foreach'], fused=group['fused'], grad_scale=getattr(self, 'grad_scale', None), found_inf=getattr(self, 'found_inf', None))\n        if group['momentum'] != 0:\n            for p, momentum_buffer in zip(params, momentum_buffer_list):\n                state = self.state[p]\n                state['momentum_buffer'] = momentum_buffer\n    return loss",
    "docstring": "Perform a single optimization step. Args: closure (Callable, optional): A closure that reevaluates the model and returns the loss.",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\sgd.py",
    "ast_data": "FunctionDef name:step arg:self arg:closure arguments arg arg Assign If Compare With Call Assign Call For Assign Call Call Call Call If Compare For Call Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_tolist",
    "source_code": "def _tolist(obj):\n    a1 = []\n    for elem in obj:\n        if isinstance(elem, (list, tuple)):\n            elem = _tolist(elem)\n        if isinstance(elem, ndarray):\n            a1.append(elem.tensor.tolist())\n        else:\n            a1.append(elem)\n    return a1",
    "docstring": "Recursively convert tensors into lists.",
    "type": "function",
    "file_path": "pytorch\\torch\\_numpy\\_ndarray.py",
    "ast_data": "FunctionDef name:_tolist arg:obj arguments arg Assign For If Call Assign Call If Call Call Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "__mul__",
    "source_code": "def __mul__(self, other: NamedPose) -> NamedPose:\n    if self._frame_src != other._frame_dst:\n        raise ValueError(f'Cannot compose {self} with {other}')\n    if isinstance(other.pose, Se2):\n        return NamedPose(self._dst_from_src._mul_se2(other.pose), other._frame_src, self._frame_dst)\n    elif isinstance(other.pose, Se3):\n        return NamedPose(self._dst_from_src._mul_se3(other.pose), other._frame_src, self._frame_dst)\n    else:\n        raise ValueError(f'Pose must be either Se2 or Se3, got {type(self._dst_from_src)}')",
    "docstring": "Compose two NamedPoses. Args: other: NamedPose to compose with. Returns: Composed NamedPose. Example: >>> b_from_a = NamedPose(Se3.identity(), frame_src=\"frame_a\", frame_dst=\"frame_b\") >>> c_from_b = NamedPose(Se3.identity(), frame_src=\"frame_b\", frame_dst=\"frame_c\") >>> c_from_b * b_from_a NamedPose(dst_from_src=rotation: Parameter containing: tensor([1., 0., 0., 0.], requires_grad=True) translation: x: 0.0 y: 0.0 z: 0.0, frame_src: frame_a -> frame_dst: frame_c)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\pose.py",
    "ast_data": "FunctionDef name:__mul__ arg:self arg:other arguments arg arg If Compare Raise Call If Call Return return:yes Call Call If Call Return return:yes Call Call Raise Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_Base",
    "source_code": "class _Base:\n\n    @staticmethod\n    def ensure_quadratic_bezier(path):\n        segments = list(path.iter_segments())\n        if len(segments) != 2 or segments[0][1] != Path.MOVETO or segments[1][1] != Path.CURVE3:\n            raise ValueError(\"'path' is not a valid quadratic Bezier curve\")\n        return [*segments[0][0], *segments[1][0]]\n\n    def transmute(self, path, mutation_size, linewidth):\n        raise NotImplementedError('Derived must override')\n\n    def __call__(self, path, mutation_size, linewidth, aspect_ratio=1.0):\n        if aspect_ratio is not None:\n            vertices = path.vertices / [1, aspect_ratio]\n            path_shrunk = Path(vertices, path.codes)\n            path_mutated, fillable = self.transmute(path_shrunk, mutation_size, linewidth)\n            if np.iterable(fillable):\n                path_list = [Path(p.vertices * [1, aspect_ratio], p.codes) for p in path_mutated]\n                return (path_list, fillable)\n            else:\n                return (path_mutated, fillable)\n        else:\n            return self.transmute(path, mutation_size, linewidth)",
    "docstring": "Arrow Transmuter Base class ArrowTransmuterBase and its derivatives are used to make a fancy arrow around a given path. The __call__ method returns a path (which will be used to create a PathPatch instance) and a boolean value indicating the path is open therefore is not fillable. This class is not an artist and actual drawing of the fancy arrow is done by the FancyArrowPatch class.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "ClassDef name:_Base FunctionDef name:ensure_quadratic_bezier arg:path arguments arg Assign Call Call If BoolOp Compare Call Compare Compare Raise Call Return return:yes FunctionDef name:transmute arg:self arg:path arg:mutation_size arg:linewidth arguments arg arg arg arg Raise Call FunctionDef name:__call__ arg:self arg:path arg:mutation_size arg:linewidth arg:aspect_ratio arguments arg arg arg arg arg If Compare Assign Assign Call Assign Call If Call Assign Call Return return:yes Return return:yes Return return:yes Call"
  },
  {
    "library": "django",
    "name": "format_value",
    "source_code": "def format_value(self, value):\n    if value == '' or value is None:\n        return None\n    if self.is_localized:\n        return formats.localize_input(value)\n    return str(value)",
    "docstring": "Return a value as it should appear when rendered in a template.",
    "type": "method",
    "file_path": "django\\django\\forms\\widgets.py",
    "ast_data": "FunctionDef name:format_value arg:self arg:value arguments arg arg If BoolOp Compare Compare Return return:no If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, sess, watch_fn=None, thread_name_filter=None, pass_through_operrors=False):\n    BaseDebugWrapperSession.__init__(self, sess, thread_name_filter=thread_name_filter, pass_through_operrors=pass_through_operrors)\n    self._watch_fn = None\n    if watch_fn is not None:\n        if not callable(watch_fn):\n            raise TypeError('watch_fn is not callable')\n        self._watch_fn = watch_fn",
    "docstring": "Constructor of NonInteractiveDebugWrapperSession. Args: sess: The TensorFlow object being wrapped. watch_fn: () A Callable that maps the fetches and feeds of a debugged call to * Args: * : the fetches to the call. * : the feeds to the call. * Returns: () An object containing debug options including the debug ops to use, the node names, op types and/or tensor data types to watch, etc. See the documentation of for more details. thread_name_filter: Regular-expression white list for threads on which the wrapper session will be active. See doc of for more details. pass_through_operrors: If true, all captured OpErrors will be propagated. By default this captures all OpErrors. Raises: TypeError: If a non-None is specified and it is not callable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\framework.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:sess arg:watch_fn arg:thread_name_filter arg:pass_through_operrors arguments arg arg arg arg arg Call Assign If Compare If Call Raise Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "set_width",
    "source_code": "def set_width(self, width):\n    if width > min(self.a, self.b):\n        raise ValueError('Width of annulus must be less than or equal to semi-minor axis')\n    self._width = width\n    self._path = None\n    self.stale = True",
    "docstring": "Set the width (thickness) of the annulus ring. The width is measured inwards from the outer ellipse. Parameters ---------- width : float",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:set_width arg:self arg:width arguments arg arg If Compare Call Raise Call Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "get_param",
    "source_code": "def get_param(module, attr):\n    param = getattr(module, attr, None)\n    if callable(param):\n        return param()\n    else:\n        return param",
    "docstring": "Get the parameter given a module and attribute. Sometimes the weights/bias attribute gives you the raw tensor, but sometimes gives a function that will give you the raw tensor, this function takes care of that logic",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\_correct_bias.py",
    "ast_data": "FunctionDef name:get_param arg:module arg:attr arguments arg arg Assign Call If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_topological_sort_nodes",
    "source_code": "def _topological_sort_nodes(self) -> list[list[BaseSchedulerNode]]:\n    order = []\n    nodes = dict.fromkeys(self.nodes, 0)\n    children: dict[Any, Any] = {}\n    for node in self.nodes:\n        deps = self._get_unmet_dep_nodes(node)\n        nodes[node] = len(deps)\n        for dep in deps:\n            c = children.get(dep, [])\n            c.append(node)\n            children[dep] = c\n    zero_deg_nodes = [n for n, v in nodes.items() if v == 0]\n    while zero_deg_nodes:\n        order.append(zero_deg_nodes)\n        for n in zero_deg_nodes:\n            for user in children.get(n, []):\n                nodes[user] -= 1\n            nodes.pop(n)\n        zero_deg_nodes = [n for n, v in nodes.items() if v == 0]\n    assert not nodes, 'Topological sort failed!'\n    return order",
    "docstring": "Sort nodes by their topological order, return a list of node lists.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:_topological_sort_nodes arg:self arguments arg Assign Assign Call For Assign Call Assign Call For Assign Call Call Assign Assign Call Compare While Call For For Call Call Assign Call Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_MaybeDevice",
    "source_code": "@contextlib.contextmanager\ndef _MaybeDevice(device):\n    if device:\n        with ops.device(device):\n            yield\n    else:\n        yield",
    "docstring": "Applies the given device only if device is not None or empty.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\importer.py",
    "ast_data": "FunctionDef name:_MaybeDevice arg:device arguments arg If With Call"
  },
  {
    "library": "scipy",
    "name": "parse_structure",
    "source_code": "def parse_structure(astr):\n    spanlist = []\n    ind = 0\n    while True:\n        m = routine_start_re.search(astr, ind)\n        if m is None:\n            break\n        start = m.start()\n        if function_start_re.match(astr, start, m.end()):\n            while True:\n                i = astr.rfind('\\n', ind, start)\n                if i == -1:\n                    break\n                start = i\n                if astr[i:i + 7] != '\\n     $':\n                    break\n        start += 1\n        m = routine_end_re.search(astr, m.end())\n        ind = end = m and m.end() - 1 or len(astr)\n        spanlist.append((start, end))\n    return spanlist",
    "docstring": "Return a list of tuples for each function or subroutine each tuple is the start and end of a subroutine or function to be expanded.",
    "type": "function",
    "file_path": "scipy\\tools\\generate_f2pymod.py",
    "ast_data": "FunctionDef name:parse_structure arg:astr arguments arg Assign Assign While Assign Call If Compare Assign Call If Call Call While Assign Call If Compare Assign If Compare Assign Call Call Assign BoolOp BoolOp Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "name",
    "source_code": "@property\ndef name(self):\n    if context.executing_eagerly():\n        return self._name\n    return self._queue_ref.op.name",
    "docstring": "The name of the underlying queue.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg If Call Return return:yes Return return:yes"
  },
  {
    "library": "scipy",
    "name": "Schwefel04",
    "source_code": "class Schwefel04(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([0.0] * self.N, [10.0] * self.N))\n        self.custom_bounds = ([0.0, 2.0], [0.0, 2.0])\n        self.global_optimum = [[1.0 for _ in range(self.N)]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return sum((x - 1.0) ** 2.0 + (x[0] - x ** 2.0) ** 2.0)",
    "docstring": "Schwefel 4 objective function. This class defines the Schwefel 4 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Schwefel04}}(x) = \\sum_{i=1}^n \\left[(x_i - 1)^2 + (x_1 - x_i^2)^2 \\right] Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for:math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_S.py",
    "ast_data": "ClassDef name:Schwefel04 Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "run_fwd_maybe_bwd",
    "source_code": "def run_fwd_maybe_bwd(gm, args, only_fwd=False, disable_clone=False):\n    from .testing import collect_results, reduce_to_scalar_loss, requires_bwd_pass\n    gm = copy.deepcopy(gm)\n    if not disable_clone:\n        args = clone_inputs_retaining_gradness(args)\n    if hasattr(gm, 'zero_grad'):\n        gm.zero_grad(True)\n    out = gm(args) if getattr(gm, '_boxed_call', False) else gm(*args)\n    if only_fwd:\n        return out\n    if requires_bwd_pass(out):\n        loss = reduce_to_scalar_loss(out)\n        loss.backward()\n    return collect_results(gm, out, None, args)",
    "docstring": "Runs a forward and possibly backward iteration for a given mod and args. When disable_clone is True, we will use args as-is without cloning. This is higher fidelity but we may destroy the args in the process.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\debug_utils.py",
    "ast_data": "FunctionDef name:run_fwd_maybe_bwd arg:gm arg:args arg:only_fwd arg:disable_clone arguments arg arg arg arg Assign Call If Assign Call If Call Call Assign Call Call Call If Return return:yes If Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_Domain",
    "source_code": "class _Domain(ABC):\n    symbols = {np.inf: '\\\\infty', -np.inf: '-\\\\infty', np.pi: '\\\\pi', -np.pi: '-\\\\pi'}\n\n    @abstractmethod\n    def contains(self, x):\n        raise NotImplementedError()\n\n    @abstractmethod\n    def draw(self, n):\n        raise NotImplementedError()\n\n    @abstractmethod\n    def get_numerical_endpoints(self, x):\n        raise NotImplementedError()\n\n    @abstractmethod\n    def __str__(self):\n        raise NotImplementedError()",
    "docstring": "Representation of the applicable domain of a parameter or variable. A object is responsible for storing information about the domain of a parameter or variable, determining whether a value is within the domain (), and providing a text/mathematical representation of itself (). Because the domain of a parameter/variable can have a complicated relationship with other parameters and variables of a distribution, itself does not try to represent all possibilities; in fact, it has no implementation and is meant for subclassing. Attributes ---------- symbols : dict A map from special numerical values to symbols for use in Methods ------- contains(x) Determine whether the argument is contained within the domain (True) or not (False). Used for input validation. get_numerical_endpoints() Gets the numerical values of the domain endpoints, which may have been defined symbolically or through a callable. __str__() Returns a text representation of the domain (e.g. ``). Used for generating documentation.",
    "type": "class",
    "file_path": "scipy\\scipy\\stats\\_distribution_infrastructure.py",
    "ast_data": "ClassDef name:_Domain Assign FunctionDef name:contains arg:self arg:x arguments arg arg Raise Call FunctionDef name:draw arg:self arg:n arguments arg arg Raise Call FunctionDef name:get_numerical_endpoints arg:self arg:x arguments arg arg Raise Call FunctionDef name:__str__ arg:self arguments arg Raise Call"
  },
  {
    "library": "scipy",
    "name": "xmin",
    "source_code": "@property\ndef xmin(self):\n    return asarray([b[0] for b in self.bounds])",
    "docstring": "The lower bounds for the problem Returns ------- xmin : sequence The lower bounds for the problem",
    "type": "method",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_benchmark.py",
    "ast_data": "FunctionDef name:xmin arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "BarContainer",
    "source_code": "class BarContainer(Container):\n\n    def __init__(self, patches, errorbar=None, *, datavalues=None, orientation=None, **kwargs):\n        self.patches = patches\n        self.errorbar = errorbar\n        self.datavalues = datavalues\n        self.orientation = orientation\n        super().__init__(patches, **kwargs)",
    "docstring": "Container for the artists of bar plots (e.g. created by ). The container can be treated as a tuple of the *patches* themselves. Additionally, you can access these and further parameters by the attributes. Attributes ---------- patches : list of :class: The artists of the bars. errorbar : None or :class: A container for the error bar artists if error bars are present. *None* otherwise. datavalues : None or array-like The underlying data values corresponding to the bars. orientation : {'vertical', 'horizontal'}, default: None If 'vertical', the bars are assumed to be vertical. If 'horizontal', the bars are assumed to be horizontal.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\container.py",
    "ast_data": "ClassDef name:BarContainer FunctionDef name:__init__ arg:self arg:patches arg:errorbar arguments arg arg arg arg arg arg Assign Assign Assign Assign Call Call"
  },
  {
    "library": "django",
    "name": "_lazy_re_compile",
    "source_code": "def _lazy_re_compile(regex, flags=0):\n\n    def _compile():\n        if isinstance(regex, (str, bytes)):\n            return re.compile(regex, flags)\n        else:\n            assert not flags, 'flags must be empty if regex is passed pre-compiled'\n            return regex\n    return SimpleLazyObject(_compile)",
    "docstring": "Lazily compile a regex with flags.",
    "type": "function",
    "file_path": "django\\django\\utils\\regex_helper.py",
    "ast_data": "FunctionDef name:_lazy_re_compile arg:regex arg:flags arguments arg arg FunctionDef name:_compile arguments If Call Return return:yes Call Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_annotate_maxpool2d",
    "source_code": "def _annotate_maxpool2d(self, node: Node, quantization_config: Optional[QuantizationConfig]) -> None:\n    return",
    "docstring": "Here we skip the annotate logic for maxpool at XPU backend as the quantized::max_pool2d is only implemented for CPU.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\xpu_inductor_quantizer.py",
    "ast_data": "FunctionDef name:_annotate_maxpool2d arg:self arg:node arg:quantization_config arguments arg arg arg Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "_philox_scramble_seed",
    "source_code": "def _philox_scramble_seed(seed):\n    key = constant_op.constant([163851598941452064], dtypes.uint64)\n    counter = math_ops.cast(seed, dtypes.uint64)\n    mix = gen_stateless_random_ops_v2.stateless_random_uniform_full_int_v2([4], key=key, counter=counter, dtype=dtypes.uint32, alg=Algorithm.PHILOX.value)\n    key = array_ops.reshape(_uint32s_to_uint64(mix[:2]), [1])\n    counter = array_ops_stack.stack([0, _uint32s_to_uint64(mix[2:])], axis=0)\n    return (key, counter)",
    "docstring": "Determines the key and counter for Philox PRNG with the given seed. Args: seed: An integer tensor of shape [2]. The seed to calculate the key and counter from. Returns: A pair (key, counter) suitable for V2 stateless RNG ops like .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\random_ops_util.py",
    "ast_data": "FunctionDef name:_philox_scramble_seed arg:seed arguments arg Assign Call Assign Call Assign Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "read_parquet",
    "source_code": "@doc(storage_options=_shared_docs['storage_options'])\ndef read_parquet(path: FilePath | ReadBuffer[bytes], engine: str='auto', columns: list[str] | None=None, storage_options: StorageOptions | None=None, dtype_backend: DtypeBackend | lib.NoDefault=lib.no_default, filesystem: Any=None, filters: list[tuple] | list[list[tuple]] | None=None, to_pandas_kwargs: dict | None=None, **kwargs) -> DataFrame:\n    impl = get_engine(engine)\n    check_dtype_backend(dtype_backend)\n    return impl.read(path, columns=columns, filters=filters, storage_options=storage_options, dtype_backend=dtype_backend, filesystem=filesystem, to_pandas_kwargs=to_pandas_kwargs, **kwargs)",
    "docstring": "Load a parquet object from the file path, returning a DataFrame. The function automatically handles reading the data from a parquet file and creates a DataFrame with the appropriate structure. Parameters ---------- path : str, path object or file-like object String, path object (implementing `DataFrameDataFrameArrowDtypeDataFramekwargsfilterspyarrowenginefilterspyarrow` engine, which can benefit from multithreading and also potentially be more economical in terms of memory. >>> sel = [(\"foo\", \">\", 2)] >>> restored_part = pd.read_parquet(BytesIO(df_parquet_bytes), filters=sel) >>> restored_part foo bar 0 3 8 1 4 9",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\parquet.py",
    "ast_data": "FunctionDef name:read_parquet arg:path arg:engine arg:columns arg:storage_options arg:dtype_backend arg:filesystem arg:filters arg:to_pandas_kwargs arguments arg arg arg arg arg arg arg arg arg Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "push",
    "source_code": "def push(self, module_meta: _ModuleMeta) -> None:\n    self._module_stack.append(module_meta)",
    "docstring": "Pushes a module meta to the stack.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\modularization.py",
    "ast_data": "FunctionDef name:push arg:self arg:module_meta arguments arg arg Call"
  },
  {
    "library": "scikit-learn",
    "name": "_predict",
    "source_code": "def _predict(self, X, check_input=True):\n    y_pred = self._forward_pass_fast(X, check_input=check_input)\n    if y_pred.shape[1] == 1:\n        return y_pred.ravel()\n    return y_pred",
    "docstring": "Private predict method with optional input validation",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neural_network\\_multilayer_perceptron.py",
    "ast_data": "FunctionDef name:_predict arg:self arg:X arg:check_input arguments arg arg arg Assign Call If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "serve",
    "source_code": "def serve(path=localFile, port=8080, root=None):\n    if coverage is None:\n        raise ImportError('The coverage module could not be imported.')\n    from coverage import coverage\n    cov = coverage(data_file=path)\n    cov.load()\n    cherrypy.config.update({'server.socket_port': int(port), 'server.thread_pool': 10, 'environment': 'production'})\n    cherrypy.quickstart(CoverStats(cov, root))",
    "docstring": "Serve the coverage app over HTTP.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\covercp.py",
    "ast_data": "FunctionDef name:serve arg:path arg:port arg:root arguments arg arg arg If Compare Raise Call Assign Call Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_TreeSpecSchema",
    "source_code": "@dataclasses.dataclass\nclass _TreeSpecSchema:\n    type: Optional[str]\n    context: DumpableContext\n    children_spec: list['_TreeSpecSchema']",
    "docstring": "_TreeSpecSchema is the schema used to serialize the TreeSpec It contains the following fields: - type: A string name of the type. null for the case of a LeafSpec. - context: Any format which is json dumpable - children_spec: A list of children serialized specs.",
    "type": "class",
    "file_path": "pytorch\\torch\\utils\\_pytree.py",
    "ast_data": "ClassDef name:_TreeSpecSchema"
  },
  {
    "library": "pytorch",
    "name": "bmm_flop",
    "source_code": "@register_flop_formula(aten.bmm)\ndef bmm_flop(a_shape, b_shape, out_shape=None, **kwargs) -> int:\n    b, m, k = a_shape\n    b2, k2, n = b_shape\n    assert b == b2\n    assert k == k2\n    flop = b * m * n * 2 * k\n    return flop",
    "docstring": "Count flops for the bmm operation.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\flop_counter.py",
    "ast_data": "FunctionDef name:bmm_flop arg:a_shape arg:b_shape arg:out_shape arguments arg arg arg arg Assign Assign Compare Compare Assign Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "vflip",
    "source_code": "def vflip(input: Tensor) -> Tensor:\n    return input.flip(-2).contiguous()",
    "docstring": "Vertically flip a tensor image or a batch of tensor images. .. image:: _static/img/vflip.png Input must be a tensor of shape (C, H, W) or a batch of tensors :math:. Args: input: input tensor. Returns: The vertically flipped image tensor.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\transform\\flips.py",
    "ast_data": "FunctionDef name:vflip arg:input arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "fold",
    "source_code": "def fold(input: Tensor, output_size: BroadcastingList2[int], kernel_size: BroadcastingList2[int], dilation: BroadcastingList2[int]=1, padding: BroadcastingList2[int]=0, stride: BroadcastingList2[int]=1) -> Tensor:\n    if has_torch_function_unary(input):\n        return handle_torch_function(fold, (input,), input, output_size, kernel_size, dilation=dilation, padding=padding, stride=stride)\n    return torch._C._nn.col2im(input, _pair(output_size), _pair(kernel_size), _pair(dilation), _pair(padding), _pair(stride))",
    "docstring": "Combine an array of sliding local blocks into a large containing tensor. .. warning:: Currently, only unbatched (3D) or batched (4D) image-like output tensors are supported. See :class: for details",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:fold arg:input arg:output_size arg:kernel_size arg:dilation arg:padding arg:stride arguments arg arg arg arg arg arg If Call Return return:yes Call Return return:yes Call Call Call Call Call Call"
  },
  {
    "library": "authlib",
    "name": "authenticate_token",
    "source_code": "def authenticate_token(self, request):\n    raise NotImplementedError()",
    "docstring": "Authenticate current credential who is requesting to register a client. Developers MUST implement this method in subclass:: def authenticate_token(self, request): auth = request.headers.get(\"Authorization\") return get_token_by_auth(auth) :return: token instance",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc7592\\endpoint.py",
    "ast_data": "FunctionDef name:authenticate_token arg:self arg:request arguments arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "IndexExpression",
    "source_code": "class IndexExpression:\n\n    def __init__(self, maketuple):\n        self.maketuple = maketuple\n\n    def __getitem__(self, item):\n        if self.maketuple and (not isinstance(item, tuple)):\n            return (item,)\n        else:\n            return item",
    "docstring": "Written by Konrad Hinsen last revision: 1999-7-23 Cosmetic changes by T. Oliphant 2001",
    "type": "class",
    "file_path": "pytorch\\torch\\_numpy\\_funcs.py",
    "ast_data": "ClassDef name:IndexExpression FunctionDef name:__init__ arg:self arg:maketuple arguments arg arg Assign FunctionDef name:__getitem__ arg:self arg:item arguments arg arg If BoolOp Call Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "AltersData",
    "source_code": "class AltersData:\n\n    def __init_subclass__(cls, **kwargs):\n        for fn_name, fn in vars(cls).items():\n            if callable(fn) and (not hasattr(fn, 'alters_data')):\n                for base in cls.__bases__:\n                    if (base_fn := getattr(base, fn_name, None)):\n                        if hasattr(base_fn, 'alters_data'):\n                            fn.alters_data = base_fn.alters_data\n                        break\n        super().__init_subclass__(**kwargs)",
    "docstring": "Make subclasses preserve the alters_data attribute on overridden methods.",
    "type": "class",
    "file_path": "django\\django\\db\\models\\utils.py",
    "ast_data": "ClassDef name:AltersData FunctionDef name:__init_subclass__ arg:cls arguments arg arg For Call Call If BoolOp Call Call For If Call If Call Assign Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "staged_predict",
    "source_code": "def staged_predict(self, X):\n    check_is_fitted(self)\n    X = self._check_X(X)\n    for i, _ in enumerate(self.estimators_, 1):\n        yield self._get_median_predict(X, limit=i)",
    "docstring": "Return staged predictions for X. The predicted regression value of an input sample is computed as the weighted median prediction of the regressors in the ensemble. This generator method yields the ensemble prediction after each iteration of boosting and therefore allows monitoring, such as to determine the prediction on a test set after each boost. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The training input samples. Yields ------ y : generator of ndarray of shape (n_samples,) The predicted regression values.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_weight_boosting.py",
    "ast_data": "FunctionDef name:staged_predict arg:self arg:X arguments arg arg Call Assign Call For Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_shared_y_axes",
    "source_code": "def get_shared_y_axes(self):\n    return cbook.GrouperView(self._shared_axes['y'])",
    "docstring": "Return an immutable view on the shared y-axes Grouper.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:get_shared_y_axes arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "put_variables",
    "source_code": "def put_variables(self, mdict, write_header=None):\n    if write_header is None:\n        write_header = self.file_stream.tell() == 0\n    if write_header:\n        self.write_file_header()\n    self._matrix_writer = VarWriter5(self)\n    for name, var in mdict.items():\n        if name[0] == '_':\n            msg = f'Starting field name with a underscore ({name}) is ignored'\n            warnings.warn(msg, MatWriteWarning, stacklevel=2)\n            continue\n        is_global = name in self.global_vars\n        if self.do_compression:\n            stream = BytesIO()\n            self._matrix_writer.file_stream = stream\n            self._matrix_writer.write_top(var, name.encode('latin1'), is_global)\n            out_str = zlib.compress(stream.getvalue())\n            tag = np.empty((), NDT_TAG_FULL)\n            tag['mdtype'] = miCOMPRESSED\n            tag['byte_count'] = len(out_str)\n            self.file_stream.write(tag.tobytes())\n            self.file_stream.write(out_str)\n        else:\n            self._matrix_writer.write_top(var, name.encode('latin1'), is_global)",
    "docstring": "Write variables in to stream Parameters ---------- mdict : mapping mapping with method `` is something writeable to a matlab file, such as a NumPy array. write_header : {None, True, False}, optional If True, then write the matlab file header before writing the variables. If None (the default) then write the file header if we are at position 0 in the stream. By setting False here, and setting the stream position to the end of the file, you can append variables to a matlab file",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\matlab\\_mio5.py",
    "ast_data": "FunctionDef name:put_variables arg:self arg:mdict arg:write_header arguments arg arg arg If Compare Assign Compare Call If Call Assign Call For Call If Compare Assign Call Assign Compare If Assign Call Assign Call Call Assign Call Call Assign Call Assign Assign Call Call Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_matmat",
    "source_code": "def _matmat(self, x):\n    return self._matvec(x)",
    "docstring": "Construct matrix-free callable matrix-matrix multiplication by the Mikota mass matrix without constructing or storing the matrix itself by reusing the ``.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_special_sparse_arrays.py",
    "ast_data": "FunctionDef name:_matmat arg:self arg:x arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "reorder_kwargs",
    "source_code": "def reorder_kwargs(user_kwargs: dict[str, Any], spec: TreeSpec) -> dict[str, Any]:\n    assert spec.type is tuple\n    assert spec.num_children == 2\n    kwargs_spec = spec.children_specs[1]\n    assert kwargs_spec.type is dict\n    if set(user_kwargs) != set(kwargs_spec.context):\n        raise ValueError(f'Ran into a kwarg keyword mismatch: Got the following keywords {list(user_kwargs)} but expected {kwargs_spec.context}')\n    reordered_kwargs = {}\n    for kw in kwargs_spec.context:\n        reordered_kwargs[kw] = user_kwargs[kw]\n    return reordered_kwargs",
    "docstring": "Reorder user-provided kwargs to match the order in . is expected to be the in_spec of an exported program, i.e. the spec that results from flattening . We need this to provide consistent input ordering, such so that users can pass in foo(a=a, b=b) OR foo(b=b, a=a) and receive the same result.",
    "type": "function",
    "file_path": "pytorch\\torch\\export\\_tree_utils.py",
    "ast_data": "FunctionDef name:reorder_kwargs arg:user_kwargs arg:spec arguments arg arg Compare Compare Assign Compare If Compare Call Call Raise Call Call Assign For Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "softmin",
    "source_code": "def softmin(input: Tensor, dim: Optional[int]=None, _stacklevel: int=3, dtype: Optional[DType]=None) -> Tensor:\n    if has_torch_function_unary(input):\n        return handle_torch_function(softmin, (input,), input, dim=dim, _stacklevel=_stacklevel, dtype=dtype)\n    if dim is None:\n        dim = _get_softmax_dim('softmin', input.dim(), _stacklevel)\n    if dtype is None:\n        ret = (-input).softmax(dim)\n    else:\n        ret = (-input).softmax(dim, dtype=dtype)\n    return ret",
    "docstring": "Apply a softmin function. Note that :math:. See softmax definition for mathematical formula. See :class: for more details. Args: input (Tensor): input dim (int): A dimension along which softmin will be computed (so every slice along dim will sum to 1). dtype (:class:, optional): the desired data type of returned tensor. If specified, the input tensor is casted to :attr: before the operation is performed. This is useful for preventing data type overflows. Default: None.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:softmin arg:input arg:dim arg:_stacklevel arg:dtype arguments arg arg arg arg If Call Return return:yes Call If Compare Assign Call Call If Compare Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "parse_single_example_v2",
    "source_code": "@tf_export('io.parse_single_example', v1=[])\n@dispatch.add_dispatch_support\ndef parse_single_example_v2(serialized, features, example_names=None, name=None):\n    if features is None:\n        raise ValueError('Invalid argument: features cannot be None.')\n    if not features:\n        raise ValueError('Invalid argument: features cannot be empty.')\n    with ops.name_scope(name, 'ParseSingleExample', [serialized, example_names]):\n        serialized = ops.convert_to_tensor(serialized, name='serialized')\n        serialized = _assert_scalar(serialized, 'serialized')\n        return parse_example_v2(serialized, features, example_names, name)",
    "docstring": "Parses a single proto. Similar to , except: For dense tensors, the returned is identical to the output of , except there is no batch dimension, the output shape is the same as the shape given in . For s, the first (batch) column of the indices matrix is removed (the indices matrix is a column vector), the values vector is unchanged, and the first () entry of the shape vector is removed (it is now a single element vector). One might see performance advantages by batching protos with instead of using this function directly. Args: serialized: A scalar string Tensor, a single serialized Example. features: A mapping of feature keys to or values. example_names: (Optional) A scalar string Tensor, the associated name. name: A name for this operation (optional). Returns: A mapping feature keys to and values. Raises: ValueError: if any feature is invalid.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parsing_ops.py",
    "ast_data": "FunctionDef name:parse_single_example_v2 arg:serialized arg:features arg:example_names arg:name arguments arg arg arg arg If Compare Raise Call If Raise Call With Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_num_tasks",
    "source_code": "def _num_tasks(self) -> int:\n    return self._server.num_tasks()",
    "docstring": "Returns the number of tasks currently being executed on the worker.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\service\\server_lib.py",
    "ast_data": "FunctionDef name:_num_tasks arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "words",
    "source_code": "def words(self, num, truncate=None, html=False):\n    self._setup()\n    length = int(num)\n    if length <= 0:\n        return ''\n    if html:\n        parser = TruncateWordsHTMLParser(length=length, replacement=truncate)\n        parser.feed(self._wrapped)\n        parser.close()\n        return parser.output\n    return self._text_words(length, truncate)",
    "docstring": "Truncate a string after a certain number of words. specifies what should be used to notify that the string has been truncated, defaulting to ellipsis.",
    "type": "method",
    "file_path": "django\\django\\utils\\text.py",
    "ast_data": "FunctionDef name:words arg:self arg:num arg:truncate arg:html arguments arg arg arg arg Call Assign Call If Compare Return return:yes If Assign Call Call Call Return return:yes Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "in_y_true_range",
    "source_code": "def in_y_true_range(self, y):\n    return self.interval_y_true.includes(y)",
    "docstring": "Return True if y is in the valid range of y_true. Parameters ---------- y : ndarray",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\_loss\\loss.py",
    "ast_data": "FunctionDef name:in_y_true_range arg:self arg:y arguments arg arg Return return:yes Call"
  },
  {
    "library": "cryptography",
    "name": "__copy__",
    "source_code": "@abc.abstractmethod\ndef __copy__(self) -> X25519PrivateKey:\n    pass",
    "docstring": "Returns a copy.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\x25519.py",
    "ast_data": "FunctionDef name:__copy__ arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "experimental_require_static_shapes",
    "source_code": "@property\ndef experimental_require_static_shapes(self):\n    return self._require_static_shapes",
    "docstring": "Returns if static shape is required; otherwise.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:experimental_require_static_shapes arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_Initializer",
    "source_code": "class _Initializer(resource.CapturableResource):\n\n    def __init__(self, init_fn, asset_paths):\n        super(_Initializer, self).__init__()\n        self._asset_paths = asset_paths\n        self._init_fn = init_fn\n\n    def _create_resource(self):\n        return constant_op.constant(1.0)\n\n    def _initialize(self):\n        return self._init_fn(*[path.asset_path for path in self._asset_paths])",
    "docstring": "Represents an initialization operation restored from a SavedModel. Without this object re-export of imported 1.x SavedModels would omit the original SavedModel's initialization procedure. Created when loads a TF 1.x-style SavedModel with an initialization op. This object holds a function that runs the initialization. It does not require any manual user intervention; will see this object and automatically add it to the exported SavedModel, and runs the initialization function automatically.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\load_v1_in_v2.py",
    "ast_data": "ClassDef name:_Initializer FunctionDef name:__init__ arg:self arg:init_fn arg:asset_paths arguments arg arg arg Call Call Assign Assign FunctionDef name:_create_resource arg:self arguments arg Return return:yes Call FunctionDef name:_initialize arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "rgb_to_yuv422",
    "source_code": "def rgb_to_yuv422(image: Tensor) -> tuple[Tensor, Tensor]:\n    if not isinstance(image, Tensor):\n        raise TypeError(f'Input type is not a Tensor. Got {type(image)}')\n    if len(image.shape) < 3 or image.shape[-3] != 3:\n        raise ValueError(f'Input size must have a shape of (*, 3, H, W). Got {image.shape}')\n    if len(image.shape) < 2 or image.shape[-2] % 2 == 1 or image.shape[-1] % 2 == 1:\n        raise ValueError(f'Input H&W must be evenly disible by 2. Got {image.shape}')\n    yuvimage = rgb_to_yuv(image)\n    return (yuvimage[..., :1, :, :], yuvimage[..., 1:3, :, :].unfold(-1, 2, 2).mean(-1))",
    "docstring": "Convert an RGB image to YUV 422 (subsampled). Input need to be padded to be evenly divisible by 2 vertical. The image data is assumed to be in the range of :math:. The range of the output is of :math: to luma and the ranges of U and V are :math: and :math:, respectively. The YUV model adopted here follows M/PAL values (see _, Table 2, items 2.5 and 2.6). Args: image: RGB Image to be converted to YUV with shape :math:. Returns: A Tensor containing the Y plane with shape :math: A Tensor containing the UV planes with shape :math: Example: >>> input = torch.rand(2, 3, 4, 6) >>> output = rgb_to_yuv420(input) # (2x1x4x6, 2x1x4x3)",
    "type": "function",
    "file_path": "kornia\\kornia\\color\\yuv.py",
    "ast_data": "FunctionDef name:rgb_to_yuv422 arg:image arguments arg If Call Raise Call Call If BoolOp Compare Call Compare Raise Call If BoolOp Compare Call Compare Compare Raise Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_current_process_group",
    "source_code": "def _get_current_process_group():\n    global _CURRENT_PROCESS_GROUP\n    if _CURRENT_PROCESS_GROUP is None:\n        return distributed_c10d._get_default_group()\n    else:\n        return _CURRENT_PROCESS_GROUP",
    "docstring": "Retrieves the current process group set by ``. If not set, it just returns the default group.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\api.py",
    "ast_data": "FunctionDef name:_get_current_process_group arguments If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "log",
    "source_code": "@set_module('numpy.lib.scimath')\n@array_function_dispatch(_unary_dispatcher)\ndef log(x):\n    x = _fix_real_lt_zero(x)\n    return nx.log(x)",
    "docstring": "Compute the natural logarithm of . Return the \"principal value\" (for a description of this, see ) of :math:. For real , this is a real number (`xxoutx >> import numpy as np >>> np.emath.log(np.exp(1)) 1.0 Negative arguments are handled \"correctly\" (recall that ``x >> np.emath.log(-np.exp(1)) == (1 + np.pi * 1j) True",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_scimath_impl.py",
    "ast_data": "FunctionDef name:log arg:x arguments arg Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "Schaffer04",
    "source_code": "class Schaffer04(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-100.0] * self.N, [100.0] * self.N))\n        self.custom_bounds = [(-10, 10), (-10, 10)]\n        self.global_optimum = [[0.0, 1.253115]]\n        self.fglob = 0.292579\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        num = cos(sin(abs(x[0] ** 2 - x[1] ** 2))) ** 2 - 0.5\n        den = (1 + 0.001 * (x[0] ** 2 + x[1] ** 2)) ** 2\n        return 0.5 + num / den",
    "docstring": "Schaffer 4 objective function. This class defines the Schaffer 4 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Schaffer04}}(x) = 0.5 + \\frac{\\cos^2 \\left( \\sin(x_1^2 - x_2^2) \\right ) - 0.5}{1 + 0.001(x_1^2 + x_2^2)^2}^2 with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Mishra, S. Some new test functions for global optimization and performance of repulsive particle swarm method. Munich Personal RePEc Archive, 2006, 2718",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_S.py",
    "ast_data": "ClassDef name:Schaffer04 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "if_mask",
    "source_code": "@triton_builtin\ndef if_mask(mask: Any, val, *, _builder: object=None) -> tl.constexpr:\n    if isinstance(mask, tl.constexpr) and mask.value is None:\n        return tl.constexpr(None)\n    return val",
    "docstring": "Work around triton compile error: othermask`valNone` depending on the value of mask.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\triton_helpers.py",
    "ast_data": "FunctionDef name:if_mask arg:mask arg:val arguments arg arg arg If BoolOp Call Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "django",
    "name": "format_html",
    "source_code": "def format_html(format_string, *args, **kwargs):\n    if not (args or kwargs):\n        raise TypeError('args or kwargs must be provided.')\n    args_safe = map(conditional_escape, args)\n    kwargs_safe = {k: conditional_escape(v) for k, v in kwargs.items()}\n    return mark_safe(format_string.format(*args_safe, **kwargs_safe))",
    "docstring": "Similar to str.format, but pass all arguments through conditional_escape(), and call mark_safe() on the result. This function should be used instead of str.format or % interpolation to build up small HTML fragments.",
    "type": "function",
    "file_path": "django\\django\\utils\\html.py",
    "ast_data": "FunctionDef name:format_html arg:format_string arguments arg arg arg If BoolOp Raise Call Assign Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, transform=None, subs=None, linthresh=None, base=None):\n    if transform is not None:\n        self._base = transform.base\n        self._linthresh = transform.linthresh\n    elif linthresh is not None and base is not None:\n        self._base = base\n        self._linthresh = linthresh\n    else:\n        raise ValueError('Either transform, or both linthresh and base, must be provided.')\n    if subs is None:\n        self._subs = [1.0]\n    else:\n        self._subs = subs\n    self.numticks = 15",
    "docstring": "Parameters ---------- transform : , optional If set, defines the *base* and *linthresh* of the symlog transform. base, linthresh : float, optional The *base* and *linthresh* of the symlog transform, as documented for . These parameters are only used if *transform* is not set. subs : sequence of float, default: [1] The multiples of integer powers of the base where ticks are placed, i.e., ticks are placed at ``. Notes ----- Either *transform*, or both *base* and *linthresh*, must be given.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:transform arg:subs arg:linthresh arg:base arguments arg arg arg arg arg If Compare Assign Assign If BoolOp Compare Compare Assign Assign Raise Call If Compare Assign Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "add_line",
    "source_code": "def add_line(self, line):\n    _api.check_isinstance(mlines.Line2D, line=line)\n    self._set_artist_props(line)\n    if line.get_clip_path() is None:\n        line.set_clip_path(self.patch)\n    self._update_line_limits(line)\n    if not line.get_label():\n        line.set_label(f'_child{len(self._children)}')\n    self._children.append(line)\n    line._remove_method = self._children.remove\n    self.stale = True\n    return line",
    "docstring": "Add a to the Axes; return the line.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:add_line arg:self arg:line arguments arg arg Call Call If Compare Call Call Call If Call Call Call Call Assign Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "powernorm_gen",
    "source_code": "class powernorm_gen(rv_continuous):\n\n    def _shape_info(self):\n        return [_ShapeInfo('c', False, (0, np.inf), (False, False))]\n\n    def _pdf(self, x, c):\n        return c * _norm_pdf(x) * _norm_cdf(-x) ** (c - 1.0)\n\n    def _logpdf(self, x, c):\n        return np.log(c) + _norm_logpdf(x) + (c - 1) * _norm_logcdf(-x)\n\n    def _cdf(self, x, c):\n        return -sc.expm1(self._logsf(x, c))\n\n    def _ppf(self, q, c):\n        return -_norm_ppf(pow(1.0 - q, 1.0 / c))\n\n    def _sf(self, x, c):\n        return np.exp(self._logsf(x, c))\n\n    def _logsf(self, x, c):\n        return c * _norm_logcdf(-x)\n\n    def _isf(self, q, c):\n        return -_norm_ppf(np.exp(np.log(q) / c))",
    "docstring": "A power normal continuous random variable. %(before_notes)s Notes ----- The probability density function for is: .. math:: f(x, c) = c \\phi(x) (\\Phi(-x))^{c-1} where :math: is the normal pdf, :math: is the normal cdf, :math: is any real, and :math: [1]_. takes `c`. %(after_notes)s References ---------- .. [1] NIST Engineering Statistics Handbook, Section 1.3.6.6.13, %(example)s",
    "type": "class",
    "file_path": "scipy\\scipy\\stats\\_continuous_distns.py",
    "ast_data": "ClassDef name:powernorm_gen FunctionDef name:_shape_info arg:self arguments arg Return return:yes Call FunctionDef name:_pdf arg:self arg:x arg:c arguments arg arg arg Return return:yes Call Call FunctionDef name:_logpdf arg:self arg:x arg:c arguments arg arg arg Return return:yes Call Call Call FunctionDef name:_cdf arg:self arg:x arg:c arguments arg arg arg Return return:yes Call Call FunctionDef name:_ppf arg:self arg:q arg:c arguments arg arg arg Return return:yes Call Call FunctionDef name:_sf arg:self arg:x arg:c arguments arg arg arg Return return:yes Call Call FunctionDef name:_logsf arg:self arg:x arg:c arguments arg arg arg Return return:yes Call FunctionDef name:_isf arg:self arg:q arg:c arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "now",
    "source_code": "@register.tag\ndef now(parser, token):\n    bits = token.split_contents()\n    asvar = None\n    if len(bits) == 4 and bits[-2] == 'as':\n        asvar = bits[-1]\n        bits = bits[:-2]\n    if len(bits) != 2:\n        raise TemplateSyntaxError(\"'now' statement takes one argument\")\n    format_string = bits[1][1:-1]\n    return NowNode(format_string, asvar)",
    "docstring": "Display the date, formatted according to the given string. Use the same format as PHP's `` function; see for all the possible values. Sample usage:: It is {% now \"jS F Y H:i\" %}",
    "type": "function",
    "file_path": "django\\django\\template\\defaulttags.py",
    "ast_data": "FunctionDef name:now arg:parser arg:token arguments arg arg Assign Call Assign If BoolOp Compare Call Compare Assign Assign If Compare Call Raise Call Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "compile_file",
    "source_code": "@indent_msg\ndef compile_file(self, path: Path, top_package_path: Path):\n    assert path.is_file()\n    if path.suffix != '.py':\n        self.msg(path, 'N')\n        return\n    if path.name in DENY_LIST:\n        self.msg(path, 'X')\n        return\n    self.msg(path, 'F')\n    module_qualname = self.get_module_qualname(path, top_package_path)\n    module_mangled_name = '__'.join(module_qualname)\n    c_name = 'M_' + module_mangled_name\n    with open(path) as src_file:\n        co = self.compile_string(src_file.read())\n    bytecode = marshal.dumps(co)\n    size = len(bytecode)\n    if path.name == '__init__.py':\n        size = -size\n    self.frozen_modules.append(FrozenModule('.'.join(module_qualname), c_name, size, bytecode))",
    "docstring": "Compile a Python source file to frozen bytecode. Append the result to .",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\_freeze.py",
    "ast_data": "FunctionDef name:compile_file arg:self arg:path arg:top_package_path arguments arg arg arg Call If Compare Call Return return:no If Compare Call Return return:no Call Assign Call Assign Call Assign With Call Assign Call Call Assign Call Assign Call If Compare Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "collect_bw_donated_buffer_idxs",
    "source_code": "def collect_bw_donated_buffer_idxs(fw_module: torch.fx.GraphModule, bw_module: torch.fx.GraphModule, fw_metadata: ViewAndMutationMeta) -> list[int]:\n    if contain_metadata_mutation_ops(fw_module) or contain_metadata_mutation_ops(bw_module):\n        return []\n    fw_ins = fw_module.graph.find_nodes(op='placeholder')\n    bw_outs = next(reversed(bw_module.graph.find_nodes(op='output'))).args[0]\n    fw_outs = next(reversed(fw_module.graph.find_nodes(op='output'))).args[0]\n    fw_ins = [n.meta['val'] if hasattr(n, 'meta') and 'val' in n.meta else None for n in fw_ins]\n    fw_outs = [n.meta['val'] if hasattr(n, 'meta') and 'val' in n.meta else None for n in fw_outs]\n    bw_outs = [n.meta['val'] if hasattr(n, 'meta') and 'val' in n.meta else None for n in bw_outs]\n    user_fw_outs = fw_outs[:fw_metadata.num_forward]\n    saved_tensors = fw_outs[fw_metadata.tensors_saved_for_backwards_slice]\n    fw_donated_buffer = collect_fw_donated_buffer_idxs(fw_ins, user_fw_outs, bw_outs, saved_tensors)\n    assert fw_metadata.num_symints_saved_for_bw is not None\n    return [fw_metadata.num_symints_saved_for_bw + i for i in fw_donated_buffer]",
    "docstring": "Collects backward donated buffer indexes from fw_module and bw_module.",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\jit_compile_runtime_wrappers.py",
    "ast_data": "FunctionDef name:collect_bw_donated_buffer_idxs arg:fw_module arg:bw_module arg:fw_metadata arguments arg arg arg If BoolOp Call Call Return return:no Assign Call Assign Call Call Call Assign Call Call Call Assign BoolOp Call Compare Assign BoolOp Call Compare Assign BoolOp Call Compare Assign Assign Assign Call Compare Return return:yes"
  },
  {
    "library": "django",
    "name": "target_field",
    "source_code": "@property\ndef target_field(self):\n    target_fields = self.path_infos[-1].target_fields\n    if len(target_fields) > 1:\n        raise exceptions.FieldError('The relation has multiple target fields, but only single target field was asked for')\n    return target_fields[0]",
    "docstring": "When filtering against this relation, return the field on the remote model against which the filtering should happen.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\related.py",
    "ast_data": "FunctionDef name:target_field arg:self arguments arg Assign If Compare Call Raise Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_set_dtype",
    "source_code": "def _set_dtype(self, dtype: CategoricalDtype) -> Self:\n    codes = recode_for_categories(self.codes, self.categories, dtype.categories)\n    return type(self)._simple_new(codes, dtype=dtype)",
    "docstring": "Internal method for directly updating the CategoricalDtype Parameters ---------- dtype : CategoricalDtype Notes ----- We don't do any validation here. It's assumed that the dtype is a (valid) instance of .",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\categorical.py",
    "ast_data": "FunctionDef name:_set_dtype arg:self arg:dtype arguments arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "update_state",
    "source_code": "def update_state(self, y_true, y_pred, sample_weight=None):\n    y_true = math_ops.cast(y_true, self._dtype)\n    y_pred = math_ops.cast(y_pred, self._dtype)\n    y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(y_pred, y_true)\n    error_sq = math_ops.squared_difference(y_pred, y_true)\n    return super(RootMeanSquaredError, self).update_state(error_sq, sample_weight=sample_weight)",
    "docstring": "Accumulates root mean squared error statistics. Args: y_true: The ground truth values. y_pred: The predicted values. sample_weight: Optional weighting of each example. Defaults to 1. Can be a whose rank is either 0, or the same rank as , and must be broadcastable to . Returns: Update op.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py",
    "ast_data": "FunctionDef name:update_state arg:self arg:y_true arg:y_pred arg:sample_weight arguments arg arg arg arg Assign Call Assign Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "itemsize",
    "source_code": "@property\ndef itemsize(self) -> int:\n    return self._dtype.itemsize",
    "docstring": "The element size of this data-type object.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py",
    "ast_data": "FunctionDef name:itemsize arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "transform",
    "source_code": "def transform(node, ctx, default_to_null_return=True):\n    node = qual_names.resolve(node)\n    node = activity.resolve(node, ctx, None)\n    node = ConditionalReturnRewriter(ctx).visit(node)\n    node = qual_names.resolve(node)\n    node = activity.resolve(node, ctx, None)\n    transformer = ReturnStatementsTransformer(ctx, allow_missing_return=default_to_null_return)\n    node = transformer.visit(node)\n    return node",
    "docstring": "Ensure a function has only a single return, at the end.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\converters\\return_statements.py",
    "ast_data": "FunctionDef name:transform arg:node arg:ctx arg:default_to_null_return arguments arg arg arg Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_ConfigAutoWrap",
    "source_code": "class _ConfigAutoWrap:\n    in_autowrap_context: bool = False\n    wrapper_cls: Optional[Callable] = None\n    kwargs: dict[str, Any] = {}\n\n    def __init__(self, **kwargs: dict[str, Any]):\n        self.kwargs = kwargs\n\n    @staticmethod\n    def enable_autowrap_context(kwargs: Any) -> None:\n        if _ConfigAutoWrap.in_autowrap_context:\n            raise NotImplementedError('You are already within an autowrap context and we currently do not supported nested autowrap.')\n        _ConfigAutoWrap.in_autowrap_context = True\n        assert 'wrapper_cls' in kwargs.keys(), 'Expected to pass in wrapper_cls arg into _ConfigAutoWrap.'\n        _ConfigAutoWrap.wrapper_cls = cast(Callable, kwargs['wrapper_cls'])\n        del kwargs['wrapper_cls']\n        _ConfigAutoWrap.kwargs = kwargs\n\n    @staticmethod\n    def disable_autowrap_context() -> None:\n        _ConfigAutoWrap.in_autowrap_context = False\n        _ConfigAutoWrap.wrapper_cls = None\n        _ConfigAutoWrap.kwargs = {}\n\n    def __enter__(self) -> None:\n        self.enable_autowrap_context(self.kwargs)\n\n    def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:\n        self.disable_autowrap_context()",
    "docstring": "Helper class to wrap modules based on default config args via a context manager. See :func: for more information.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\wrap.py",
    "ast_data": "ClassDef name:_ConfigAutoWrap FunctionDef name:__init__ arg:self arguments arg arg Assign FunctionDef name:enable_autowrap_context arg:kwargs arguments arg If Raise Call Assign Compare Call Assign Call Assign FunctionDef name:disable_autowrap_context arguments Assign Assign Assign FunctionDef name:__enter__ arg:self arguments arg Call FunctionDef name:__exit__ arg:self arg:exc_type arg:exc_val arg:exc_tb arguments arg arg arg arg Call"
  },
  {
    "library": "matplotlib",
    "name": "TimedAnimation",
    "source_code": "class TimedAnimation(Animation):\n\n    def __init__(self, fig, interval=200, repeat_delay=0, repeat=True, event_source=None, *args, **kwargs):\n        self._interval = interval\n        self._repeat_delay = repeat_delay if repeat_delay is not None else 0\n        self._repeat = repeat\n        if event_source is None:\n            event_source = fig.canvas.new_timer(interval=self._interval)\n        super().__init__(fig, *args, event_source=event_source, **kwargs)\n\n    def _step(self, *args):\n        still_going = super()._step(*args)\n        if not still_going:\n            if self._repeat:\n                self._init_draw()\n                self.frame_seq = self.new_frame_seq()\n                self.event_source.interval = self._repeat_delay\n                return True\n            else:\n                self.pause()\n                if self._blit:\n                    self._fig.canvas.mpl_disconnect(self._resize_id)\n                self._fig.canvas.mpl_disconnect(self._close_id)\n                self.event_source = None\n                return False\n        self.event_source.interval = self._interval\n        return True",
    "docstring": "subclass for time-based animation. A new frame is drawn every *interval* milliseconds. .. note:: You must store the created Animation in a variable that lives as long as the animation should run. Otherwise, the Animation object will be garbage-collected and the animation stops. Parameters ---------- fig : The figure object used to get needed events, such as draw or resize. interval : int, default: 200 Delay between frames in milliseconds. repeat_delay : int, default: 0 The delay in milliseconds between consecutive animation runs, if *repeat* is True. repeat : bool, default: True Whether the animation repeats when the sequence of frames is completed. blit : bool, default: False Whether blitting is used to optimize drawing.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\animation.py",
    "ast_data": "ClassDef name:TimedAnimation FunctionDef name:__init__ arg:self arg:fig arg:interval arg:repeat_delay arg:repeat arg:event_source arguments arg arg arg arg arg arg arg arg Assign Assign Compare Assign If Compare Assign Call Call Call FunctionDef name:_step arg:self arguments arg arg Assign Call Call If If Call Assign Call Assign Return return:yes Call If Call Call Assign Return return:yes Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "return_arg_list",
    "source_code": "def return_arg_list(arg_indices: list[int]) -> Callable[[Node], list[int]]:\n\n    def arg_indices_func(node: Node) -> list[int]:\n        return [i for i in arg_indices if i < len(node.args)]\n    return arg_indices_func",
    "docstring": "Constructs a function that takes a node as arg and returns the arg_indices that are valid for node.args",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\utils.py",
    "ast_data": "FunctionDef name:return_arg_list arg:arg_indices arguments arg FunctionDef name:arg_indices_func arg:node arguments arg Return return:yes Compare Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "_compute_scaling_matrix",
    "source_code": "def _compute_scaling_matrix(scale: Tensor, center: Tensor) -> Tensor:\n    angle: Tensor = zeros(scale.shape[:1], device=scale.device, dtype=scale.dtype)\n    matrix: Tensor = get_rotation_matrix2d(center, angle, scale)\n    return matrix",
    "docstring": "Compute affine matrix for scaling.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\transform\\affwarp.py",
    "ast_data": "FunctionDef name:_compute_scaling_matrix arg:scale arg:center arguments arg arg Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_nt_quote_args",
    "source_code": "def _nt_quote_args(args: Optional[list[str]]) -> list[str]:\n    if not args:\n        return []\n    return [f'\"{arg}\"' if ' ' in arg else arg for arg in args]",
    "docstring": "Quote command-line arguments for DOS/Windows conventions. Just wraps every argument which contains blanks in double quotes, and returns a new argument list.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\cpp_extension.py",
    "ast_data": "FunctionDef name:_nt_quote_args arg:args arguments arg If Return return:no Return return:yes Compare"
  },
  {
    "library": "pytorch",
    "name": "create_python_bindings",
    "source_code": "def create_python_bindings(fm: FileManager, pairs: Sequence[PythonSignatureNativeFunctionPair], pred: Callable[[NativeFunction], bool], module: str | None, filename: str, *, method: bool, symint: bool=True) -> None:\n    py_methods: list[str] = []\n    ops_headers: list[str] = []\n    py_method_defs: list[str] = []\n    py_forwards: list[str] = []\n    grouped = group_filter_overloads(pairs, pred)\n    for name in sorted(grouped.keys(), key=str):\n        overloads = grouped[name]\n        py_methods.append(method_impl(name, module, overloads, method=method, symint=symint))\n        py_method_defs.append(method_def(name, module, overloads, method=method))\n        py_forwards.extend(forward_decls(name, overloads, method=method))\n        ops_headers.append(f'#include <ATen/ops/{name.base}.h>')\n    fm.write_with_template(filename, filename, lambda: {'generated_comment': '@' + f'generated from {fm.template_dir_for_comments()}/{filename}', 'ops_headers': ops_headers, 'py_forwards': py_forwards, 'py_methods': py_methods, 'py_method_defs': py_method_defs})",
    "docstring": "Generates Python bindings to ATen functions",
    "type": "function",
    "file_path": "pytorch\\tools\\autograd\\gen_python_functions.py",
    "ast_data": "FunctionDef name:create_python_bindings arg:fm arg:pairs arg:pred arg:module arg:filename arguments arg arg arg arg arg arg arg Assign Call For Call Call Assign Call Call Call Call Call Call Call Call arguments Call"
  },
  {
    "library": "pytorch",
    "name": "device_count",
    "source_code": "def device_count() -> int:\n    return torch._C._mtia_getDeviceCount()",
    "docstring": "Return the number of MTIA devices available.",
    "type": "function",
    "file_path": "pytorch\\torch\\mtia\\__init__.py",
    "ast_data": "FunctionDef name:device_count arguments Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "center",
    "source_code": "def center(self, width, fillchar=' '):\n    return asarray(center(self, width, fillchar))",
    "docstring": "Return a copy of with its elements centered in a string of length . See Also -------- center",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:center arg:self arg:width arg:fillchar arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_qconfig_info",
    "source_code": "@abstractmethod\ndef get_qconfig_info(self, model) -> dict[str, DetectorQConfigInfo]:\n    pass",
    "docstring": "Returns the DetectorQConfigInfo for each module_fqn relevant Args model (nn.Module or subclass): model to find observer insertion points Returns a Dict mapping from unique observer fqns (where we want to insert them) to: A DetectorQConfigInfo with the information to generate a QConfig for a specific module",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\detector.py",
    "ast_data": "FunctionDef name:get_qconfig_info arg:self arg:model arguments arg arg"
  },
  {
    "library": "scikit-learn",
    "name": "indices_to_mask",
    "source_code": "def indices_to_mask(indices, mask_length):\n    if mask_length <= np.max(indices):\n        raise ValueError('mask_length must be greater than max(indices)')\n    mask = np.zeros(mask_length, dtype=bool)\n    mask[indices] = True\n    return mask",
    "docstring": "Convert list of indices to boolean mask. Parameters ---------- indices : list-like List of integers treated as indices. mask_length : int Length of boolean mask to be generated. This parameter must be greater than max(indices). Returns ------- mask : 1d boolean nd-array Boolean array that is True where indices are present, else False. Examples -------- >>> from sklearn.utils._mask import indices_to_mask >>> indices = [1, 2 , 3, 4] >>> indices_to_mask(indices, 5) array([False, True, True, True, True])",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_mask.py",
    "ast_data": "FunctionDef name:indices_to_mask arg:indices arg:mask_length arguments arg arg If Compare Call Raise Call Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "from_control_flow_context_def",
    "source_code": "def from_control_flow_context_def(context_def, import_scope=None):\n    if context_def.HasField('cond_ctxt'):\n        return CondContext.from_proto(context_def.cond_ctxt, import_scope=import_scope)\n    if context_def.HasField('while_ctxt'):\n        return WhileContext.from_proto(context_def.while_ctxt, import_scope=import_scope)\n    raise NotImplementedError('Unknown ControlFlowContextDef field: %s' % context_def.WhichOneof('ctxt'))",
    "docstring": "Deserializes into the appropriate ControlFlowContext. Args: context_def: ControlFlowContextDef proto import_scope: Optional . Name scope to add. Returns: A ControlFlowContext subclass",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:from_control_flow_context_def arg:context_def arg:import_scope arguments arg arg If Call Return return:yes Call If Call Return return:yes Call Raise Call Call"
  },
  {
    "library": "numpy",
    "name": "array_function_dispatch",
    "source_code": "def array_function_dispatch(dispatcher=None, module=None, verify=True, docs_from_dispatcher=False):\n\n    def decorator(implementation):\n        if verify:\n            if dispatcher is not None:\n                verify_matching_signatures(implementation, dispatcher)\n            else:\n                co = implementation.__code__\n                last_arg = co.co_argcount + co.co_kwonlyargcount - 1\n                last_arg = co.co_varnames[last_arg]\n                if last_arg != 'like' or co.co_kwonlyargcount == 0:\n                    raise RuntimeError(f'__array_function__ expects `like=` to be the last argument and a keyword-only argument. {implementation} does not seem to comply.')\n        if docs_from_dispatcher:\n            add_docstring(implementation, dispatcher.__doc__)\n        public_api = _ArrayFunctionDispatcher(dispatcher, implementation)\n        public_api = functools.wraps(implementation)(public_api)\n        if module is not None:\n            public_api.__module__ = module\n        ARRAY_FUNCTIONS.add(public_api)\n        return public_api\n    return decorator",
    "docstring": "Decorator for adding dispatch with the __array_function__ protocol. See NEP-18 for example usage. Parameters ---------- dispatcher : callable or None Function that when called like `Nonelike=like=like`. docs_from_dispatcher : bool, optional If True, copy docs from the dispatcher function onto the dispatched function, rather than from the implementation. This is useful for functions defined in C, which otherwise don't have docstrings. Returns ------- Function suitable for decorating the implementation of a NumPy function.",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\overrides.py",
    "ast_data": "FunctionDef name:array_function_dispatch arg:dispatcher arg:module arg:verify arg:docs_from_dispatcher arguments arg arg arg arg FunctionDef name:decorator arg:implementation arguments arg If If Compare Call Assign Assign Assign If BoolOp Compare Compare Raise Call If Call Assign Call Assign Call Call If Compare Assign Call Return return:yes Return return:yes"
  },
  {
    "library": "numpy",
    "name": "FormatError",
    "source_code": "class FormatError(OSError):\n\n    def __init__(self, msg):\n        self.msg = msg\n\n    def __str__(self):\n        return self.msg",
    "docstring": "Exception thrown when there is a problem parsing a configuration file.",
    "type": "class",
    "file_path": "numpy\\numpy\\distutils\\npy_pkg_config.py",
    "ast_data": "ClassDef name:FormatError FunctionDef name:__init__ arg:self arg:msg arguments arg arg Assign FunctionDef name:__str__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "append",
    "source_code": "def append(self, value):\n    self._check_external_modification()\n    super().append(value)\n    self._update_snapshot()",
    "docstring": "Add a new trackable value.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\data_structures.py",
    "ast_data": "FunctionDef name:append arg:self arg:value arguments arg arg Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "idctn",
    "source_code": "@_dispatch\ndef idctn(x, type=2, s=None, axes=None, norm=None, overwrite_x=False, workers=None, orthogonalize=None):\n    return (Dispatchable(x, np.ndarray),)",
    "docstring": "Return multidimensional Inverse Discrete Cosine Transform along the specified axes. Parameters ---------- x : array_like The input array. type : {1, 2, 3, 4}, optional Type of the DCT (see Notes). Default type is 2. s : int or array_like of ints or None, optional The shape of the result. If both and (see below) are None, is `saxess`s[i] >> import numpy as np >>> from scipy.fft import dctn, idctn >>> rng = np.random.default_rng() >>> y = rng.standard_normal((16, 16)) >>> np.allclose(y, idctn(dctn(y))) True",
    "type": "function",
    "file_path": "scipy\\scipy\\fft\\_realtransforms.py",
    "ast_data": "FunctionDef name:idctn arg:x arg:type arg:s arg:axes arg:norm arg:overwrite_x arg:workers arg:orthogonalize arguments arg arg arg arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "needs_keras_history",
    "source_code": "def needs_keras_history(tensors, ignore_call_context=False):\n    input_tensors = nest.flatten(tensors)\n    if call_context().in_call and (not ignore_call_context):\n        return False\n    if all((getattr(tensor, '_keras_history', None) is not None for tensor in input_tensors)):\n        return False\n    return uses_keras_history(tensors)",
    "docstring": "Check if any Tensors need to be wrapped in TensorFlowOpLayers. This will never return True inside a sublayer, because sublayers do not need to create Keras History. Otherwise, this returns True if one or more of originates from a and does not have set. Args: tensors: An arbitrary nested structure of Tensors. ignore_call_context: Whether to ignore the check of if currently outside of a context. This is when creating KerasHistory inside , where we always know that Tensors are being used with the Functional API. Returns: Bool, whether at least one Tensor needs to be wrapped.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_utils.py",
    "ast_data": "FunctionDef name:needs_keras_history arg:tensors arg:ignore_call_context arguments arg arg Assign Call If BoolOp Call Return return:yes If Call Compare Call Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_polynomial_coefficients_given_roots",
    "source_code": "def _polynomial_coefficients_given_roots(roots):\n    poly_order = roots.shape[-1]\n    poly_coeffs_shape = list(roots.shape)\n    poly_coeffs_shape[-1] += 2\n    poly_coeffs = roots.new_zeros(poly_coeffs_shape)\n    poly_coeffs[..., 0] = 1\n    poly_coeffs[..., -1] = 1\n    for i in range(1, poly_order + 1):\n        poly_coeffs_new = poly_coeffs.clone() if roots.requires_grad else poly_coeffs\n        out = poly_coeffs_new.narrow(-1, poly_order - i, i + 1)\n        out -= roots.narrow(-1, i - 1, 1) * poly_coeffs.narrow(-1, poly_order - i + 1, i + 1)\n        poly_coeffs = poly_coeffs_new\n    return poly_coeffs.narrow(-1, 1, poly_order + 1)",
    "docstring": "Given the of a polynomial, find the polynomial's coefficients. If roots = (r_1, ..., r_n), then the method returns coefficients (a_0, a_1, ..., a_n (== 1)) so that p(x) = (x - r_1) * ... * (x - r_n) = x^n + a_{n-1} * x^{n-1} + ... a_1 * x_1 + a_0 Note: for better performance requires writing a low-level kernel",
    "type": "function",
    "file_path": "pytorch\\torch\\_lobpcg.py",
    "ast_data": "FunctionDef name:_polynomial_coefficients_given_roots arg:roots arguments arg Assign Assign Call Assign Call Assign Assign For Call Assign Call Assign Call Call Call Assign Return return:yes Call"
  },
  {
    "library": "cryptography",
    "name": "Binding",
    "source_code": "class Binding:\n    lib: typing.ClassVar = None\n    ffi = _openssl.ffi\n    _lib_loaded = False\n    _init_lock = threading.Lock()\n\n    def __init__(self) -> None:\n        self._ensure_ffi_initialized()\n\n    @classmethod\n    def _ensure_ffi_initialized(cls) -> None:\n        with cls._init_lock:\n            if not cls._lib_loaded:\n                cls.lib = build_conditional_library(_openssl.lib, CONDITIONAL_NAMES)\n                cls._lib_loaded = True\n\n    @classmethod\n    def init_static_locks(cls) -> None:\n        cls._ensure_ffi_initialized()",
    "docstring": "OpenSSL API wrapper.",
    "type": "class",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\bindings\\openssl\\binding.py",
    "ast_data": "ClassDef name:Binding Assign Assign Assign Call FunctionDef name:__init__ arg:self arguments arg Call FunctionDef name:_ensure_ffi_initialized arg:cls arguments arg With If Assign Call Assign FunctionDef name:init_static_locks arg:cls arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "as_shape",
    "source_code": "def as_shape(shape) -> 'TensorShape':\n    if isinstance(shape, TensorShape):\n        return shape\n    else:\n        return TensorShape(shape)",
    "docstring": "Converts the given object to a TensorShape.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:as_shape arg:shape arguments arg If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "validate_request_object_signing_alg_values_supported",
    "source_code": "def validate_request_object_signing_alg_values_supported(self):\n    values = self.get('request_object_signing_alg_values_supported')\n    if not values:\n        return\n    if not isinstance(values, list):\n        raise ValueError('\"request_object_signing_alg_values_supported\" MUST be JSON array')",
    "docstring": "OPTIONAL. JSON array containing a list of the JWS signing algorithms (alg values) supported by the OP for Request Objects, which are described in Section 6.1 of OpenID Connect Core 1.0. These algorithms are used both when the Request Object is passed by value (using the request parameter) and when it is passed by reference (using the request_uri parameter). Servers SHOULD support none and RS256.",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\discovery\\models.py",
    "ast_data": "FunctionDef name:validate_request_object_signing_alg_values_supported arg:self arguments arg Assign Call If Return return:no If Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='mean_squared_error'):\n    super().__init__(mean_squared_error, name=name, reduction=reduction)",
    "docstring": "Initializes instance. Args: reduction: Type of to apply to loss. Default value is . indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to . When used with , outside of built-in training loops such as and , using or will raise an error. Please see this custom training [tutorial]( for more details. name: Optional name for the instance. Defaults to 'mean_squared_error'.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:reduction arg:name arguments arg arg arg Call Call"
  },
  {
    "library": "kornia",
    "name": "ChannelDropoutGenerator",
    "source_code": "class ChannelDropoutGenerator(RandomGeneratorBase):\n\n    def __init__(self, num_drop_channels: int) -> None:\n        super().__init__()\n        self.num_drop_channels = num_drop_channels\n        self.drop_sampler: UniformDistribution\n\n    def __repr__(self) -> str:\n        repr_buf = f'num_drop_channels={self.num_drop_channels}'\n        return repr_buf\n\n    def make_samplers(self, device: torch.device, dtype: torch.dtype) -> None:\n        drop = _range_bound((0.0, 1.0), 'drop', device=device, dtype=dtype)\n        self.drop_sampler = UniformDistribution(drop[0], drop[1], validate_args=False)\n\n    def forward(self, batch_shape: tuple[int, ...], same_on_batch: bool=False) -> dict[str, Tensor]:\n        batch_size, channels, _, _ = batch_shape\n        _common_param_check(batch_size, same_on_batch)\n        _device, _dtype = (self.device, self.dtype)\n        batch_idx = torch.arange(batch_size, device=_device, dtype=torch.long).reshape(batch_size, 1)\n        channel_idx = torch.argsort(_adapted_rsampling((batch_size, channels), self.drop_sampler, same_on_batch), dim=1)[:, :self.num_drop_channels].to(torch.long)\n        return {'batch_idx': batch_idx, 'channel_idx': channel_idx}",
    "docstring": "Generate random dropout masks for channels in a batch of images. Args: num_drop_channels: The number of channels to drop randomly. Returns: A dictionary containing the dropout mask. - dropout_mask: Binary masks (bool) indicating the dropped channels with a shape of (B, C, H, W). Note: The generated random numbers are not reproducible across different devices and dtypes. By default, the parameters will be generated on CPU. This can be changed by calling ``.",
    "type": "class",
    "file_path": "kornia\\kornia\\augmentation\\random_generator\\_2d\\channel_dropout.py",
    "ast_data": "ClassDef name:ChannelDropoutGenerator FunctionDef name:__init__ arg:self arg:num_drop_channels arguments arg arg Call Call Assign FunctionDef name:__repr__ arg:self arguments arg Assign Return return:yes FunctionDef name:make_samplers arg:self arg:device arg:dtype arguments arg arg arg Assign Call Assign Call FunctionDef name:forward arg:self arg:batch_shape arg:same_on_batch arguments arg arg arg Assign Call Assign Assign Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, tensors, every_n_iter=None, every_n_secs=None, at_end=False, formatter=None):\n    only_log_at_end = at_end and every_n_iter is None and (every_n_secs is None)\n    if not only_log_at_end and (every_n_iter is None) == (every_n_secs is None):\n        raise ValueError('either at_end and/or exactly one of every_n_iter and every_n_secs must be provided.')\n    if every_n_iter is not None and every_n_iter <= 0:\n        raise ValueError('invalid every_n_iter=%s.' % every_n_iter)\n    if not isinstance(tensors, dict):\n        self._tag_order = tensors\n        tensors = {item: item for item in tensors}\n    else:\n        self._tag_order = sorted(tensors.keys())\n    self._tensors = tensors\n    self._formatter = formatter\n    self._timer = NeverTriggerTimer() if only_log_at_end else SecondOrStepTimer(every_secs=every_n_secs, every_steps=every_n_iter)\n    self._log_at_end = at_end",
    "docstring": "Initializes a . Args: tensors: that maps string-valued tags to tensors/tensor names, or of tensors/tensor names. every_n_iter: , print the values of once every N local steps taken on the current worker. every_n_secs: or , print the values of once every N seconds. Exactly one of and should be provided. at_end: specifying whether to print the values of at the end of the run. formatter: function, takes dict of -> and returns a string. If uses default printing all tensors. Raises: ValueError: if is non-positive.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\basic_session_run_hooks.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:tensors arg:every_n_iter arg:every_n_secs arg:at_end arg:formatter arguments arg arg arg arg arg arg Assign BoolOp Compare Compare If BoolOp Compare Compare Compare Raise Call If BoolOp Compare Compare Raise Call If Call Assign Assign Assign Call Call Assign Assign Assign Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "__call__",
    "source_code": "def __call__(self, *args, **kwargs) -> Sequence[torch.Tensor]:\n    import onnxruntime as ort\n    flatten_args = _process_args(args, kwargs)\n    if self._inference_session is None:\n        self.initialize_inference_session()\n    assert self._inference_session is not None\n    ort_input = {k.name: _to_ort_value(v) for k, v in zip(self.model.graph.inputs, flatten_args)}\n    run_options = ort.RunOptions()\n    run_options.log_severity_level = 3\n    logger.debug('Running the inference session with %s arguments.', len(ort_input))\n    outputs = self._inference_session.run_with_ort_values(None, ort_input, run_options=run_options)\n    logger.debug('Inference session run completed.')\n    return tuple((_from_ort_value(output) for output in outputs))",
    "docstring": "Run the ONNX model with the same arguments you would provide to the GraphModule.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_onnx_program.py",
    "ast_data": "FunctionDef name:__call__ arg:self arguments arg arg arg Assign Call If Compare Call Compare Assign Call Call Assign Call Assign Call Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "ignore_errors",
    "source_code": "@tf_export('data.experimental.ignore_errors')\n@deprecation.deprecated(None, 'Use `tf.data.Dataset.ignore_errors` instead.')\ndef ignore_errors(log_warning=False):\n\n    def _apply_fn(dataset):\n        return dataset.ignore_errors(log_warning)\n    return _apply_fn",
    "docstring": "Creates a from another and silently ignores any errors. Use this transformation to produce a dataset that contains the same elements as the input, but silently drops any elements that caused an error. For example: Args: log_warning: (Optional.) A 'tf.bool' scalar indicating whether ignored errors should be logged to stderr. Defaults to 'False'. Returns: A transformation function, which can be passed to .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\error_ops.py",
    "ast_data": "FunctionDef name:ignore_errors arg:log_warning arguments arg FunctionDef name:_apply_fn arg:dataset arguments arg Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "warning",
    "source_code": "def warning(self, msg: object, *args: object, type: str | None=None, subtype: str | None=None, location: str | tuple[str | None, int | None] | Node | None=None, nonl: bool=True, color: str | None=None, once: bool=False, **kwargs: Any) -> None:\n    return super().warning(msg, *args, type=type, subtype=subtype, location=location, nonl=nonl, color=color, once=once, **kwargs)",
    "docstring": "Log a sphinx warning. It is recommended to include a `show_warning_typessuppress_warnings`.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\util\\logging.py",
    "ast_data": "FunctionDef name:warning arg:self arg:msg arguments arg arg arg arg arg arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "inverse_transform",
    "source_code": "def inverse_transform(self, X):\n    if not self.fit_inverse_transform:\n        raise NotFittedError('The fit_inverse_transform parameter was not set to True when instantiating and hence the inverse transform is not available.')\n    K = self._get_kernel(X, self.X_transformed_fit_)\n    return np.dot(K, self.dual_coef_)",
    "docstring": "Transform X back to original space. `~sklearn.decomposition.fit~sklearn.decomposition.PCA~sklearn.decomposition.PCA~sklearn.decomposition.KernelPCAn_samplesn_featuresBakır, Gökhan H., Jason Weston, and Bernhard Schölkopf. \"Learning to find pre-images.\" Advances in neural information processing systems 16 (2004): 449-456. `_",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_kernel_pca.py",
    "ast_data": "FunctionDef name:inverse_transform arg:self arg:X arguments arg arg If Raise Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_apply_sparse",
    "source_code": "def _apply_sparse(self, grad, var):\n    raise RuntimeError('This function should never be called')",
    "docstring": "This function should never be called.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\experimental\\loss_scale_optimizer.py",
    "ast_data": "FunctionDef name:_apply_sparse arg:self arg:grad arg:var arguments arg arg arg Raise Call"
  },
  {
    "library": "cryptography",
    "name": "public_numbers",
    "source_code": "@abc.abstractmethod\ndef public_numbers(self) -> RSAPublicNumbers:\n    pass",
    "docstring": "Returns an RSAPublicNumbers",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\rsa.py",
    "ast_data": "FunctionDef name:public_numbers arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "_get_optional_partition_dtype",
    "source_code": "def _get_optional_partition_dtype(values):\n    if isinstance(values, RaggedTensor):\n        return values._row_partition.dtype\n    return None",
    "docstring": "Returns the partition dtype, or None if None exists.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:_get_optional_partition_dtype arg:values arguments arg If Call Return return:yes Return return:no"
  },
  {
    "library": "scipy",
    "name": "ttest_ind_from_stats",
    "source_code": "@xp_capabilities(cpu_only=True, exceptions=['cupy', 'jax.numpy'])\ndef ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2, equal_var=True, alternative='two-sided'):\n    xp = array_namespace(mean1, std1, mean2, std2)\n    mean1 = xp.asarray(mean1)\n    std1 = xp.asarray(std1)\n    mean2 = xp.asarray(mean2)\n    std2 = xp.asarray(std2)\n    if equal_var:\n        df, denom = _equal_var_ttest_denom(std1 ** 2, nobs1, std2 ** 2, nobs2, xp=xp)\n    else:\n        df, denom = _unequal_var_ttest_denom(std1 ** 2, nobs1, std2 ** 2, nobs2, xp=xp)\n    res = _ttest_ind_from_stats(mean1, mean2, denom, df, alternative)\n    return Ttest_indResult(*res)",
    "docstring": "T-test for means of two independent samples from descriptive statistics. This is a test for the null hypothesis that two independent samples have identical average (expected) values. Parameters ---------- mean1 : array_like The mean(s) of sample 1. std1 : array_like The corrected sample standard deviation of sample 1 (i.e. `mean1mean2mean1mean2std1std2std1std2scipy.stats.ttest_ind\\hat{p}\\hat{p}(1-\\hat{p})scipy.stat.ttest_ind`, as above. >>> group1 = np.array([1]*30 + [0]*(150-30)) >>> group2 = np.array([1]*45 + [0]*(200-45)) >>> ttest_ind(group1, group2) TtestResult(statistic=-0.5627179589855622, pvalue=0.573989277115258, df=348.0)",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_stats_py.py",
    "ast_data": "FunctionDef name:ttest_ind_from_stats arg:mean1 arg:std1 arg:nobs1 arg:mean2 arg:std2 arg:nobs2 arg:equal_var arg:alternative arguments arg arg arg arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Call If Assign Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "get_permission_codename",
    "source_code": "def get_permission_codename(action, opts):\n    return '%s_%s' % (action, opts.model_name)",
    "docstring": "Return the codename of the permission for the specified action.",
    "type": "function",
    "file_path": "django\\django\\contrib\\auth\\__init__.py",
    "ast_data": "FunctionDef name:get_permission_codename arg:action arg:opts arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "LayerVersionSelector",
    "source_code": "class LayerVersionSelector(object):\n\n    def __new__(cls, *args, **kwargs):\n        use_v2 = should_use_v2()\n        cls = swap_class(cls, base_layer.Layer, base_layer_v1.Layer, use_v2)\n        return super(LayerVersionSelector, cls).__new__(cls)",
    "docstring": "Chooses between Keras v1 and v2 Layer class.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\version_utils.py",
    "ast_data": "ClassDef name:LayerVersionSelector FunctionDef name:__new__ arg:cls arguments arg arg arg Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "load_library",
    "source_code": "def load_library(self, path):\n    torch.ops.load_library(path)",
    "docstring": "Loads a shared library from the given path into the current process. The library being loaded may run global initialization code to register custom classes with the PyTorch JIT runtime. This allows dynamically loading custom classes. For this, you should compile your class and the static registration code into a shared library object, and then call `` attribute, a set that may be inspected for the paths of all libraries loaded using this function. Args: path (str): A path to a shared library to load.",
    "type": "method",
    "file_path": "pytorch\\torch\\_classes.py",
    "ast_data": "FunctionDef name:load_library arg:self arg:path arguments arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "flatten_args",
    "source_code": "def flatten_args(args):\n    flat_args = []\n\n    def extract_tensor_args(a):\n        nonlocal flat_args\n        flat_args.append(a)\n        return a\n    fx.node.map_aggregate(args, extract_tensor_args)\n    return flat_args",
    "docstring": "Flatten the args into a list form.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\_utils.py",
    "ast_data": "FunctionDef name:flatten_args arg:args arguments arg Assign FunctionDef name:extract_tensor_args arg:a arguments arg Call Return return:yes Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "vit_giant2",
    "source_code": "def vit_giant2(patch_size=16, **kwargs):\n    model = DinoVisionTransformer(patch_size=patch_size, embed_dim=1536, depth=40, num_heads=24, mlp_ratio=4, block_fn=partial(Block, attn_class=MemEffAttention), **kwargs)\n    return model",
    "docstring": "Close to ViT-giant, with embed-dim 1536 and 24 heads => embed-dim per head 64.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\dedode\\transformer\\dinov2.py",
    "ast_data": "FunctionDef name:vit_giant2 arg:patch_size arguments arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_validate_list_constructor",
    "source_code": "def _validate_list_constructor(elements, element_dtype, element_shape):\n    if element_dtype is not None and element_shape is not None:\n        return\n    if tensor_util.is_tf_type(elements):\n        return\n    if isinstance(elements, (list, tuple)):\n        if elements:\n            return\n        else:\n            raise ValueError('element_dtype and element_shape are required when elements are empty')\n    raise ValueError('unknown type for elements: {}; only Tensor, list and tuple are allowed'.format(type(elements)))",
    "docstring": "Validates the inputs of tensor_list.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\lang\\special_functions.py",
    "ast_data": "FunctionDef name:_validate_list_constructor arg:elements arg:element_dtype arg:element_shape arguments arg arg arg If BoolOp Compare Compare Return return:no If Call Return return:no If Call If Return return:no Raise Call Raise Call Call Call"
  },
  {
    "library": "scrapy",
    "name": "logformatter_adapter",
    "source_code": "def logformatter_adapter(logkws: LogFormatterResult) -> tuple[int, str, dict[str, Any] | tuple[Any, ...]]:\n    level = logkws.get('level', logging.INFO)\n    message = logkws.get('msg') or ''\n    args = cast(dict[str, Any], logkws) if not logkws.get('args') else logkws['args']\n    return (level, message, args)",
    "docstring": "Helper that takes the dictionary output from the methods in LogFormatter and adapts it into a tuple of positional arguments for logger.log calls, handling backward compatibility as well.",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\log.py",
    "ast_data": "FunctionDef name:logformatter_adapter arg:logkws arguments arg Assign Call Assign BoolOp Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "log_if",
    "source_code": "@tf_export(v1=['logging.log_if'])\ndef log_if(level, msg, condition, *args):\n    if condition:\n        vlog(level, msg, *args)",
    "docstring": "Log 'msg % args' at level 'level' only if condition is fulfilled.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\platform\\tf_logging.py",
    "ast_data": "FunctionDef name:log_if arg:level arg:msg arg:condition arguments arg arg arg arg If Call Call"
  },
  {
    "library": "cherrypy",
    "name": "default",
    "source_code": "@cherrypy.expose\ndef default(self, user):\n    if user == 'remi':\n        out = 'Remi Delon, CherryPy lead developer'\n    elif user == 'hendrik':\n        out = 'Hendrik Mans, CherryPy co-developer & crazy German'\n    elif user == 'lorenzo':\n        out = 'Lorenzo Lamas, famous actor and singer!'\n    else:\n        out = 'Unknown user. :-('\n    return '%s (<a href=\"./\">back</a>)' % out",
    "docstring": "Produce HTTP response body of the users app fallback URI.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\tutorial\\tut06_default_method.py",
    "ast_data": "FunctionDef name:default arg:self arg:user arguments arg arg If Compare Assign If Compare Assign If Compare Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "raise_exception_on_not_ok_status",
    "source_code": "@tf_export(v1=['errors.raise_exception_on_not_ok_status'])\nclass raise_exception_on_not_ok_status(object):\n\n    def __enter__(self):\n        self.status = c_api_util.ScopedTFStatus()\n        return self.status.status\n\n    def __exit__(self, type_arg, value_arg, traceback_arg):\n        try:\n            if c_api.TF_GetCode(self.status.status) != 0:\n                raise _make_specific_exception(None, None, compat.as_text(c_api.TF_Message(self.status.status)), c_api.TF_GetCode(self.status.status))\n        finally:\n            del self.status\n        return False",
    "docstring": "Context manager to check for C API status.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py",
    "ast_data": "ClassDef name:raise_exception_on_not_ok_status FunctionDef name:__enter__ arg:self arguments arg Assign Call Return return:yes FunctionDef name:__exit__ arg:self arg:type_arg arg:value_arg arg:traceback_arg arguments arg arg arg arg Try If Compare Call Raise Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "OmittedLines",
    "source_code": "class OmittedLines:\n    omitted: set[int]\n\n    def __init__(self, lines: Sequence[str], linter_name: str) -> None:\n        self.lines = lines\n        suffix = f'# noqa: {linter_name}'\n        omitted = ((i, s.rstrip()) for i, s in enumerate(lines))\n        self.omitted = {i + 1 for i, s in omitted if s.endswith(suffix)}\n\n    def __call__(self, tokens: Sequence[TokenInfo], begin: int=0, end: int=NO_TOKEN) -> bool:\n        if end == NO_TOKEN:\n            end = len(tokens)\n        start = min((tokens[i].start[0] for i in range(begin, end)), default=0)\n        end = max((tokens[i].end[0] for i in range(begin, end)), default=-1)\n        return self.contains_lines(start, end)\n\n    def contains_lines(self, begin: int, end: int) -> bool:\n        return bool(self.omitted.intersection(range(begin, end + 1)))",
    "docstring": "Read lines textually and find comment lines that end in 'noqa {linter_name}'",
    "type": "class",
    "file_path": "pytorch\\tools\\linter\\adapters\\_linter.py",
    "ast_data": "ClassDef name:OmittedLines FunctionDef name:__init__ arg:self arg:lines arg:linter_name arguments arg arg arg Assign Assign Assign Call Call Assign Call FunctionDef name:__call__ arg:self arg:tokens arg:begin arg:end arguments arg arg arg arg If Compare Assign Call Assign Call Call Assign Call Call Return return:yes Call FunctionDef name:contains_lines arg:self arg:begin arg:end arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "CodeObjectCache",
    "source_code": "class CodeObjectCache(_TransformedFnCache):\n\n    def _get_key(self, entity):\n        if hasattr(entity, '__code__'):\n            return entity.__code__\n        else:\n            return entity",
    "docstring": "A function cache based on code objects. Code objects are good proxies for the source code of a function. This cache efficiently handles functions that share code objects, such as functions defined in a loop, bound methods, etc. The cache falls back to the function object, if it doesn't have a code object.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\cache.py",
    "ast_data": "ClassDef name:CodeObjectCache FunctionDef name:_get_key arg:self arg:entity arguments arg arg If Call Return return:yes Return return:yes"
  },
  {
    "library": "pandas",
    "name": "TimedeltaProperties",
    "source_code": "@delegate_names(delegate=TimedeltaArray, accessors=TimedeltaArray._datetimelike_ops, typ='property')\n@delegate_names(delegate=TimedeltaArray, accessors=TimedeltaArray._datetimelike_methods, typ='method')\nclass TimedeltaProperties(Properties):\n\n    def to_pytimedelta(self) -> np.ndarray:\n        warnings.warn(f'The behavior of {type(self).__name__}.to_pytimedelta is deprecated, in a future version this will return a Series containing python datetime.timedelta objects instead of an ndarray. To retain the old behavior, call `np.array` on the result', FutureWarning, stacklevel=find_stack_level())\n        return self._get_values().to_pytimedelta()\n\n    @property\n    def components(self) -> DataFrame:\n        return self._get_values().components.set_index(self._parent.index).__finalize__(self._parent)\n\n    @property\n    def freq(self):\n        return self._get_values().inferred_freq",
    "docstring": "Accessor object for datetimelike properties of the Series values. Returns a Series indexed like the original Series. Raises TypeError if the Series does not contain datetimelike values. Examples -------- >>> seconds_series = pd.Series( ... pd.timedelta_range(start=\"1 second\", periods=3, freq=\"s\") ... ) >>> seconds_series 0 0 days 00:00:01 1 0 days 00:00:02 2 0 days 00:00:03 dtype: timedelta64[ns] >>> seconds_series.dt.seconds 0 1 1 2 2 3 dtype: int32",
    "type": "class",
    "file_path": "pandas\\pandas\\core\\indexes\\accessors.py",
    "ast_data": "ClassDef name:TimedeltaProperties FunctionDef name:to_pytimedelta arg:self arguments arg Call Call Call Return return:yes Call Call FunctionDef name:components arg:self arguments arg Return return:yes Call Call Call FunctionDef name:freq arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "visit_ImportFrom",
    "source_code": "def visit_ImportFrom(self, node: ast.ImportFrom) -> None:\n    for name in node.names:\n        self.add_entry(name.asname or name.name)\n        if node.module not in {'typing', 'typing_extensions'}:\n            continue\n        if name.name == 'final':\n            self.typing_final_names.add(name.asname or name.name)\n        elif name.name == 'overload':\n            self.typing_overload_names.add(name.asname or name.name)",
    "docstring": "Handles Import node and record the order of definitions.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\pycode\\parser.py",
    "ast_data": "FunctionDef name:visit_ImportFrom arg:self arg:node arguments arg arg For Call BoolOp If Compare If Compare Call BoolOp If Compare Call BoolOp"
  },
  {
    "library": "django",
    "name": "sensitive_post_parameters",
    "source_code": "def sensitive_post_parameters(*parameters):\n    if len(parameters) == 1 and callable(parameters[0]):\n        raise TypeError('sensitive_post_parameters() must be called to use it as a decorator, e.g., use @sensitive_post_parameters(), not @sensitive_post_parameters.')\n\n    def decorator(view):\n        if iscoroutinefunction(view):\n\n            @wraps(view)\n            async def sensitive_post_parameters_wrapper(request, *args, **kwargs):\n                if not isinstance(request, HttpRequest):\n                    raise TypeError(\"sensitive_post_parameters didn't receive an HttpRequest object. If you are decorating a classmethod, make sure to use @method_decorator.\")\n                if parameters:\n                    request.sensitive_post_parameters = parameters\n                else:\n                    request.sensitive_post_parameters = '__ALL__'\n                return await view(request, *args, **kwargs)\n        else:\n\n            @wraps(view)\n            def sensitive_post_parameters_wrapper(request, *args, **kwargs):\n                if not isinstance(request, HttpRequest):\n                    raise TypeError(\"sensitive_post_parameters didn't receive an HttpRequest object. If you are decorating a classmethod, make sure to use @method_decorator.\")\n                if parameters:\n                    request.sensitive_post_parameters = parameters\n                else:\n                    request.sensitive_post_parameters = '__ALL__'\n                return view(request, *args, **kwargs)\n        return sensitive_post_parameters_wrapper\n    return decorator",
    "docstring": "Indicate which POST parameters used in the decorated view are sensitive, so that those parameters can later be treated in a special way, for example by hiding them when logging unhandled exceptions. Accept two forms: * with specified parameters: @sensitive_post_parameters('password', 'credit_card') def my_view(request): pw = request.POST['password'] cc = request.POST['credit_card'] ... * without any specified parameters, in which case consider all variables are sensitive: @sensitive_post_parameters() def my_view(request) ...",
    "type": "function",
    "file_path": "django\\django\\views\\decorators\\debug.py",
    "ast_data": "FunctionDef name:sensitive_post_parameters arguments arg If BoolOp Compare Call Call Raise Call FunctionDef name:decorator arg:view arguments arg If Call AsyncFunctionDef name:sensitive_post_parameters_wrapper arg:request arguments arg arg arg If Call Raise Call If Assign Assign Return return:yes Call Call FunctionDef name:sensitive_post_parameters_wrapper arg:request arguments arg arg arg If Call Raise Call If Assign Assign Return return:yes Call Call Return return:yes Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_update_other_results",
    "source_code": "def _update_other_results(results, best):\n    best_con = best[1]\n    bx, by = best_con\n    mod_results = []\n    for cost, (x, y), con_sets in results:\n        if x in best_con or y in best_con:\n            continue\n        del con_sets[by - int(by > x) - int(by > y)]\n        del con_sets[bx - int(bx > x) - int(bx > y)]\n        con_sets.insert(-1, best[2][-1])\n        mod_con = (x - int(x > bx) - int(x > by), y - int(y > bx) - int(y > by))\n        mod_results.append((cost, mod_con, con_sets))\n    return mod_results",
    "docstring": "Update the positions and provisional input_sets of `` contraction.",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\einsumfunc.py",
    "ast_data": "FunctionDef name:_update_other_results arg:results arg:best arguments arg arg Assign Assign Assign For If BoolOp Compare Compare Call Compare Call Compare Call Compare Call Compare Call Assign Call Compare Call Compare Call Compare Call Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_next",
    "source_code": "def get_next(self, name=None):\n    self._get_next_call_count += 1\n    if self._get_next_call_count > GET_NEXT_CALL_WARNING_THRESHOLD:\n        warnings.warn(GET_NEXT_CALL_WARNING_MESSAGE)\n    with ops.colocate_with(self._iterator_resource):\n        flat_ret = gen_dataset_ops.iterator_get_next(self._iterator_resource, output_types=self._flat_tensor_types, output_shapes=self._flat_tensor_shapes, name=name)\n        return structure.from_tensor_list(self._element_spec, flat_ret)",
    "docstring": "Returns the next element. In graph mode, you should typically call this method *once* and use its result as the input to another computation. A typical loop will then call on the result of that computation. The loop will terminate when the operation raises . The following skeleton shows how to use this method when building a training loop: NOTE: It is legitimate to call multiple times, e.g. when you are distributing different elements to multiple devices in a single step. However, a common pitfall arises when users call in each iteration of their training loop. adds ops to the graph, and executing each op allocates resources (including threads); as a consequence, invoking it in every iteration of a training loop causes slowdown and eventual resource exhaustion. To guard against this outcome, we log a warning when the number of uses crosses a fixed threshold of suspiciousness. Args: name: (Optional.) A name for the created operation. Returns: A (nested) structure of values matching .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\iterator_ops.py",
    "ast_data": "FunctionDef name:get_next arg:self arg:name arguments arg arg If Compare Call With Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "get_object",
    "source_code": "@classmethod\ndef get_object(cls, obj, transposed: bool):\n    return obj",
    "docstring": "return the data for this obj",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:get_object arg:cls arg:obj arg:transposed arguments arg arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "parents",
    "source_code": "@property\ndef parents(self):\n    return [self.categorical_column]",
    "docstring": "See 'FeatureColumn` base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:parents arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "losses",
    "source_code": "@property\ndef losses(self):\n    aggregated = []\n    for layer in self.layers:\n        if hasattr(layer, 'losses'):\n            aggregated += layer.losses\n    return aggregated",
    "docstring": "Aggregate losses from any instances.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\data_structures.py",
    "ast_data": "FunctionDef name:losses arg:self arguments arg Assign For If Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_add_summary_recording_cond_transformer",
    "source_code": "def _add_summary_recording_cond_transformer(parent, node, full_name, name, logs, cond):\n    node.args.append(pasta.parse(cond))\n    logs.append((ast_edits.INFO, node.lineno, node.col_offset, 'Adding `%s` argument to %s in anticipation of it being renamed to tf.compat.v2.summary.record_if()' % (cond, full_name or name)))\n    return node",
    "docstring": "Adds cond argument to tf.contrib.summary.xxx_record_summaries(). This is in anticipation of them being renamed to tf.summary.record_if(), which requires the cond argument.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\tf_upgrade_v2.py",
    "ast_data": "FunctionDef name:_add_summary_recording_cond_transformer arg:parent arg:node arg:full_name arg:name arg:logs arg:cond arguments arg arg arg arg arg arg Call Call Call BoolOp Return return:yes"
  },
  {
    "library": "scipy",
    "name": "Pointer",
    "source_code": "class Pointer:\n\n    def __init__(self, index):\n        self.index = index\n        return",
    "docstring": "Class used to define pointers",
    "type": "class",
    "file_path": "scipy\\scipy\\io\\_idl.py",
    "ast_data": "ClassDef name:Pointer FunctionDef name:__init__ arg:self arg:index arguments arg arg Assign Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "schedule",
    "source_code": "def schedule(self, function, args, kwargs):\n    closure = Closure(function, self.closure_queue._cancellation_mgr, args=args, kwargs=kwargs)\n    ret = closure.build_output_remote_value()\n    self.closure_queue.put(closure)\n    return ret",
    "docstring": "Schedules to be dispatched to a worker for execution. Args: function: The function to be dispatched to a worker for execution asynchronously. args: Positional arguments for . kwargs: Keyword arguments for . Returns: A object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py",
    "ast_data": "FunctionDef name:schedule arg:self arg:function arg:args arg:kwargs arguments arg arg arg arg Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_values_for_factorize",
    "source_code": "def _values_for_factorize(self) -> tuple[np.ndarray, Any]:\n    values = self._pa_array.to_numpy()\n    return (values, self.dtype.na_value)",
    "docstring": "Return an array and missing value suitable for factorization. Returns ------- values : ndarray na_value : pd.NA Notes ----- The values returned by this method are also used in :func:.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\arrow\\array.py",
    "ast_data": "FunctionDef name:_values_for_factorize arg:self arguments arg Assign Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "default_status",
    "source_code": "@classproperty\ndef default_status(cls):\n    return 303 if cherrypy.serving.request.protocol >= (1, 1) else 302",
    "docstring": "Redirect status for the request. This is the default handler. RFC 2616 indicates a 301 response code fits our goal; however, browser support for 301 is quite messy. Use 302/303 instead. See",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cperror.py",
    "ast_data": "FunctionDef name:default_status arg:cls arguments arg Return return:yes Compare"
  },
  {
    "library": "cherrypy",
    "name": "SimplePlugin",
    "source_code": "class SimplePlugin(object):\n    bus = None\n    'A :class:`Bus <cherrypy.process.wspbus.Bus>`, usually cherrypy.engine.\\n    '\n\n    def __init__(self, bus):\n        self.bus = bus\n\n    def subscribe(self):\n        for channel in self.bus.listeners:\n            method = getattr(self, channel, None)\n            if method is not None:\n                self.bus.subscribe(channel, method)\n\n    def unsubscribe(self):\n        for channel in self.bus.listeners:\n            method = getattr(self, channel, None)\n            if method is not None:\n                self.bus.unsubscribe(channel, method)",
    "docstring": "Plugin base class which auto-subscribes methods for known channels.",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\process\\plugins.py",
    "ast_data": "ClassDef name:SimplePlugin Assign FunctionDef name:__init__ arg:self arg:bus arguments arg arg Assign FunctionDef name:subscribe arg:self arguments arg For Assign Call If Compare Call FunctionDef name:unsubscribe arg:self arguments arg For Assign Call If Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "is_literal",
    "source_code": "def is_literal(node):\n    if is_constant(node):\n        return True\n    if isinstance(node, gast.Name) and node.id in ['True', 'False', 'None']:\n        return True\n    return False",
    "docstring": "Tests whether node represents a Python literal.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\gast_util.py",
    "ast_data": "FunctionDef name:is_literal arg:node arguments arg If Call Return return:yes If BoolOp Call Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "field_value",
    "source_code": "def field_value(self, field_name):\n    if isinstance(field_name, (list, tuple)):\n        value = self\n        for f in field_name:\n            if not isinstance(value, StructuredTensor):\n                raise KeyError('Field path {} not found in {}'.format(field_name, self))\n            value = value.field_value(f)\n        return value\n    return self._fields[field_name]",
    "docstring": "Returns the tensor value for the specified field or path. If is a , then it names a field directly owned by this . If this has shape , then the returned tensor will have shape , where the slice contains the field value for the structure at . If is a of , then it specifies a path to a field owned by nested . In particular, is equivalent to Args: field_name: or of : The field whose values should be returned. Returns: , , or . Raises: KeyError: If the given field_name is not found.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor.py",
    "ast_data": "FunctionDef name:field_value arg:self arg:field_name arguments arg arg If Call Assign For If Call Raise Call Call Assign Call Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_to_unmasked_float_array",
    "source_code": "def _to_unmasked_float_array(x):\n    if hasattr(x, 'mask'):\n        return np.ma.asanyarray(x, float).filled(np.nan)\n    else:\n        return np.asanyarray(x, float)",
    "docstring": "Convert a sequence to a float array; if input was a masked array, masked values are converted to nans.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\cbook.py",
    "ast_data": "FunctionDef name:_to_unmasked_float_array arg:x arguments arg If Call Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "__deepcopy__",
    "source_code": "@final\ndef __deepcopy__(self, memo=None) -> Self:\n    return self.copy(deep=True)",
    "docstring": "Parameters ---------- memo, default None Standard signature. Unused",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:__deepcopy__ arg:self arg:memo arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "can_decode",
    "source_code": "def can_decode(self, value):\n    if value.HasField('type_spec_value'):\n        type_spec_class_enum = value.type_spec_value.type_spec_class\n        return type_spec_class_enum == self.type_spec_proto_enum\n    return False",
    "docstring": "Returns true if can be decoded into its built-in TypeSpec.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\nested_structure_coder.py",
    "ast_data": "FunctionDef name:can_decode arg:self arg:value arguments arg arg If Call Assign Return return:yes Compare Return return:yes"
  },
  {
    "library": "scipy",
    "name": "Tripod",
    "source_code": "class Tripod(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-100.0] * self.N, [100.0] * self.N))\n        self.global_optimum = [[0.0, -50.0]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        p1 = float(x[0] >= 0)\n        p2 = float(x[1] >= 0)\n        return p2 * (1.0 + p1) + abs(x[0] + 50.0 * p2 * (1.0 - 2.0 * p1)) + abs(x[1] + 50.0 * (1.0 - 2.0 * p2))",
    "docstring": "Tripod objective function. This class defines the Tripod [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Tripod}}(x) = p(x_2) \\left[1 + p(x_1) \\right] + \\lvert x_1 + 50p(x_2) \\left[1 - 2p(x_1) \\right] \\rvert + \\lvert x_2 + 50\\left[1 - 2p(x_2)\\right] \\rvert with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_T.py",
    "ast_data": "ClassDef name:Tripod FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Compare Assign Call Compare Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "InvalidSessionKey",
    "source_code": "class InvalidSessionKey(SuspiciousOperation):\n    pass",
    "docstring": "Invalid characters in session key",
    "type": "class",
    "file_path": "django\\django\\contrib\\sessions\\exceptions.py",
    "ast_data": "ClassDef name:InvalidSessionKey"
  },
  {
    "library": "django",
    "name": "identifier_converter",
    "source_code": "def identifier_converter(self, name):\n    return name",
    "docstring": "Apply a conversion to the identifier for the purposes of comparison. The default identifier converter is for case sensitive comparison.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\introspection.py",
    "ast_data": "FunctionDef name:identifier_converter arg:self arg:name arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_reset",
    "source_code": "def _reset(self):\n    if self.mode not in ['auto', 'min', 'max']:\n        logging.warning('Learning rate reduction mode %s is unknown, fallback to auto mode.', self.mode)\n        self.mode = 'auto'\n    if self.mode == 'min' or (self.mode == 'auto' and 'acc' not in self.monitor):\n        self.monitor_op = lambda a, b: np.less(a, b - self.min_delta)\n        self.best = np.inf\n    else:\n        self.monitor_op = lambda a, b: np.greater(a, b + self.min_delta)\n        self.best = -np.inf\n    self.cooldown_counter = 0\n    self.wait = 0",
    "docstring": "Resets wait counter and cooldown counter.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:_reset arg:self arguments arg If Compare Call Assign If BoolOp Compare BoolOp Compare Compare Assign arguments arg arg Call Assign Assign arguments arg arg Call Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_with_space_to_batch_base_paddings",
    "source_code": "def _with_space_to_batch_base_paddings(filter_shape, num_spatial_dims, rate_or_const_rate):\n    filter_spatial_shape = filter_shape[:num_spatial_dims]\n    pad_extra_shape = (filter_spatial_shape - 1) * rate_or_const_rate\n    pad_extra_start = pad_extra_shape // 2\n    pad_extra_end = pad_extra_shape - pad_extra_start\n    base_paddings = array_ops_stack.stack([[pad_extra_start[i], pad_extra_end[i]] for i in range(num_spatial_dims)])\n    return base_paddings",
    "docstring": "Helper function to compute base_paddings.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py",
    "ast_data": "FunctionDef name:_with_space_to_batch_base_paddings arg:filter_shape arg:num_spatial_dims arg:rate_or_const_rate arguments arg arg arg Assign Assign Assign Assign Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "graphviz_dump_transform",
    "source_code": "def graphviz_dump_transform(transform, dest, *, highlight=None):\n    if highlight is None:\n        highlight = [transform]\n    seen = set()\n\n    def recurse(root, buf):\n        if id(root) in seen:\n            return\n        seen.add(id(root))\n        props = {}\n        label = type(root).__name__\n        if root._invalid:\n            label = f'[{label}]'\n        if root in highlight:\n            props['style'] = 'bold'\n        props['shape'] = 'box'\n        props['label'] = '\"%s\"' % label\n        props = ' '.join(map('{0[0]}={0[1]}'.format, props.items()))\n        buf.write(f'{id(root)} [{props}];\\n')\n        for key, val in vars(root).items():\n            if isinstance(val, TransformNode) and id(root) in val._parents:\n                buf.write(f'\"{id(root)}\" -> \"{id(val)}\" [label=\"{key}\", fontsize=10];\\n')\n                recurse(val, buf)\n    buf = StringIO()\n    buf.write('digraph G {\\n')\n    recurse(transform, buf)\n    buf.write('}\\n')\n    subprocess.run(['dot', '-T', Path(dest).suffix[1:], '-o', dest], input=buf.getvalue().encode('utf-8'), check=True)",
    "docstring": "Generate a graphical representation of the transform tree for *transform* using the :program: program (which this function depends on). The output format (png, dot, etc.) is determined from the suffix of *dest*. Parameters ---------- transform : The represented transform. dest : str Output filename. The extension must be one of the formats supported by :program:, e.g. png, svg, dot, ... (see highlight : list of or None The transforms in the tree to be drawn in bold. If *None*, *transform* is highlighted.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\_internal_utils.py",
    "ast_data": "FunctionDef name:graphviz_dump_transform arg:transform arg:dest arguments arg arg arg If Compare Assign Assign Call FunctionDef name:recurse arg:root arg:buf arguments arg arg If Compare Call Return return:no Call Call Assign Assign Call If Assign If Compare Assign Assign Assign Assign Call Call Call Call Call For Call Call If BoolOp Call Compare Call Call Call Call Call Assign Call Call Call Call Call Call Call Call"
  },
  {
    "library": "seaborn",
    "name": "format_data",
    "source_code": "def format_data(self, data, pivot_kws, z_score=None, standard_scale=None):\n    if pivot_kws is not None:\n        data2d = data.pivot(**pivot_kws)\n    else:\n        data2d = data\n    if z_score is not None and standard_scale is not None:\n        raise ValueError('Cannot perform both z-scoring and standard-scaling on data')\n    if z_score is not None:\n        data2d = self.z_score(data2d, z_score)\n    if standard_scale is not None:\n        data2d = self.standard_scale(data2d, standard_scale)\n    return data2d",
    "docstring": "Extract variables from data or use directly.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\matrix.py",
    "ast_data": "FunctionDef name:format_data arg:self arg:data arg:pivot_kws arg:z_score arg:standard_scale arguments arg arg arg arg arg If Compare Assign Call Assign If BoolOp Compare Compare Raise Call If Compare Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "can_ccompiler_link",
    "source_code": "def can_ccompiler_link(self, ccompiler):\n    return True",
    "docstring": "Check if the given C compiler can link objects produced by this compiler.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\fcompiler\\__init__.py",
    "ast_data": "FunctionDef name:can_ccompiler_link arg:self arg:ccompiler arguments arg arg Return return:yes"
  },
  {
    "library": "django",
    "name": "ljust",
    "source_code": "@register.filter(is_safe=True)\n@stringfilter\ndef ljust(value, arg):\n    return value.ljust(int(arg))",
    "docstring": "Left-align the value in a field of a given width.",
    "type": "function",
    "file_path": "django\\django\\template\\defaultfilters.py",
    "ast_data": "FunctionDef name:ljust arg:value arg:arg arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "set_default_dtype",
    "source_code": "def set_default_dtype(fp_dtype='numpy', int_dtype='numpy'):\n    if fp_dtype not in ['numpy', 'pytorch']:\n        fp_dtype = dtype(fp_dtype).torch_dtype\n    if int_dtype not in ['numpy', 'pytorch']:\n        int_dtype = dtype(int_dtype).torch_dtype\n    if fp_dtype == 'numpy':\n        float_dtype = torch.float64\n    elif fp_dtype == 'pytorch':\n        float_dtype = torch.float32\n    else:\n        float_dtype = fp_dtype\n    complex_dtype = {torch.float64: torch.complex128, torch.float32: torch.complex64, torch.float16: torch.complex64}[float_dtype]\n    if int_dtype in ['numpy', 'pytorch']:\n        int_dtype = torch.int64\n    else:\n        int_dtype = int_dtype\n    new_defaults = _dtypes_impl.DefaultDTypes(float_dtype=float_dtype, complex_dtype=complex_dtype, int_dtype=int_dtype)\n    old_defaults = _dtypes_impl.default_dtypes\n    _dtypes_impl._default_dtypes = new_defaults\n    return old_defaults",
    "docstring": "Set the (global) defaults for fp, complex, and int dtypes. The complex dtype is inferred from the float (fp) dtype. It has a width at least twice the width of the float dtype, i.e., it's complex128 for float64 and complex64 for float32. Parameters ---------- fp_dtype Allowed values are \"numpy\", \"pytorch\" or dtype_like things which can be converted into a DType instance. Default is \"numpy\" (i.e. float64). int_dtype Allowed values are \"numpy\", \"pytorch\" or dtype_like things which can be converted into a DType instance. Default is \"numpy\" (i.e. int64). Returns ------- The old default dtype state: a namedtuple with attributes ``. These attributes store *pytorch* dtypes. Notes ------------ This functions has a side effect: it sets the global state with the provided dtypes. The complex dtype has bit width of at least twice the width of the float dtype, i.e. it's complex128 for float64 and complex64 for float32.",
    "type": "function",
    "file_path": "pytorch\\torch\\_numpy\\_dtypes.py",
    "ast_data": "FunctionDef name:set_default_dtype arg:fp_dtype arg:int_dtype arguments arg arg If Compare Assign Call If Compare Assign Call If Compare Assign If Compare Assign Assign Assign If Compare Assign Assign Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "sharpness",
    "source_code": "@perform_keep_shape_image\ndef sharpness(input: Tensor, factor: Union[float, Tensor]) -> Tensor:\n    if not isinstance(factor, Tensor):\n        factor = torch.as_tensor(factor, device=input.device, dtype=input.dtype)\n    if len(factor.size()) != 0 and factor.shape != torch.Size([input.size(0)]):\n        raise AssertionError(f'Input batch size shall match with factor size if factor is not a 0-dim tensor. Got {input.size(0)} and {factor.shape}')\n    kernel = torch.as_tensor([[1, 1, 1], [1, 5, 1], [1, 1, 1]], dtype=input.dtype, device=input.device).view(1, 1, 3, 3).repeat(input.size(1), 1, 1, 1) / 13\n    degenerate = torch.nn.functional.conv2d(input, kernel, bias=None, stride=1, groups=input.size(1))\n    degenerate = torch.clamp(degenerate, 0.0, 1.0)\n    mask = torch.ones_like(degenerate)\n    padded_mask = torch.nn.functional.pad(mask, [1, 1, 1, 1])\n    padded_degenerate = torch.nn.functional.pad(degenerate, [1, 1, 1, 1])\n    result = torch.where(padded_mask == 1, padded_degenerate, input)\n    if len(factor.size()) == 0:\n        return _blend_one(result, input, factor)\n    return torch.stack([_blend_one(result[i], input[i], factor[i]) for i in range(len(factor))])",
    "docstring": "Apply sharpness to the input tensor. .. image:: _static/img/sharpness.png Implemented Sharpness function from PIL using torch ops. This implementation refers to: Args: input: image tensor with shape :math: to sharpen. factor: factor of sharpness strength. Must be above 0. If float or one element tensor, input will be sharpened by the same factor across the whole batch. If 1-d tensor, input will be sharpened element-wisely, len(factor) == len(input). Returns: Sharpened image or images with shape :math:. Example: >>> x = torch.rand(1, 1, 5, 5) >>> sharpness(x, 0.5).shape torch.Size([1, 1, 5, 5])",
    "type": "function",
    "file_path": "kornia\\kornia\\enhance\\adjust.py",
    "ast_data": "FunctionDef name:sharpness arg:input arg:factor arguments arg arg If Call Assign Call If BoolOp Compare Call Call Compare Call Call Raise Call Call Assign Call Call Call Call Assign Call Call Assign Call Assign Call Assign Call Assign Call Assign Call Compare If Compare Call Call Return return:yes Call Return return:yes Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "fuse_linear_bn_weights",
    "source_code": "def fuse_linear_bn_weights(linear_w: torch.Tensor, linear_b: torch.Tensor | None, bn_rm: torch.Tensor, bn_rv: torch.Tensor, bn_eps: float, bn_w: torch.Tensor, bn_b: torch.Tensor) -> tuple[torch.nn.Parameter, torch.nn.Parameter]:\n    linear_weight_dtype = linear_w.dtype\n    linear_bias_dtype = linear_b.dtype if linear_b is not None else linear_weight_dtype\n    if linear_b is None:\n        linear_b = torch.zeros_like(bn_rm)\n    bn_scale = bn_w * torch.rsqrt(bn_rv + bn_eps)\n    fused_w = linear_w * bn_scale.unsqueeze(-1).to(dtype=linear_weight_dtype)\n    fused_b = ((linear_b - bn_rm) * bn_scale + bn_b).to(dtype=linear_bias_dtype)\n    return (torch.nn.Parameter(fused_w, linear_w.requires_grad), torch.nn.Parameter(fused_b, linear_b.requires_grad))",
    "docstring": "Fuse linear module parameters and BatchNorm module parameters into new linear module parameters. Args: linear_w (torch.Tensor): Linear weight. linear_b (Optional[torch.Tensor]): Linear bias. bn_rm (torch.Tensor): BatchNorm running mean. bn_rv (torch.Tensor): BatchNorm running variance. bn_eps (float): BatchNorm epsilon. bn_w (torch.Tensor): BatchNorm weight. bn_b (torch.Tensor): BatchNorm bias. Returns: Tuple[torch.nn.Parameter, torch.nn.Parameter]: Fused linear weight and bias.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\utils\\fusion.py",
    "ast_data": "FunctionDef name:fuse_linear_bn_weights arg:linear_w arg:linear_b arg:bn_rm arg:bn_rv arg:bn_eps arg:bn_w arg:bn_b arguments arg arg arg arg arg arg arg Assign Assign Compare If Compare Assign Call Assign Call Assign Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_should_save_on_batch",
    "source_code": "def _should_save_on_batch(self, batch):\n    if self.save_freq == 'epoch':\n        return False\n    if batch <= self._last_batch_seen:\n        add_batches = batch + 1\n    else:\n        add_batches = batch - self._last_batch_seen\n    self._batches_seen_since_last_saving += add_batches\n    self._last_batch_seen = batch\n    if self._batches_seen_since_last_saving >= self.save_freq:\n        self._batches_seen_since_last_saving = 0\n        return True\n    return False",
    "docstring": "Handles batch-level saving logic, supports steps_per_execution.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:_should_save_on_batch arg:self arg:batch arguments arg arg If Compare Return return:yes If Compare Assign Assign Assign If Compare Assign Return return:yes Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "MiniBatchKMeansBenchmark",
    "source_code": "class MiniBatchKMeansBenchmark(Predictor, Transformer, Estimator, Benchmark):\n    param_names = ['representation', 'init']\n    params = (['dense', 'sparse'], ['random', 'k-means++'])\n\n    def setup_cache(self):\n        super().setup_cache()\n\n    def make_data(self, params):\n        representation, init = params\n        if representation == 'sparse':\n            data = _20newsgroups_highdim_dataset()\n        else:\n            data = _blobs_dataset(n_clusters=20)\n        return data\n\n    def make_estimator(self, params):\n        representation, init = params\n        max_iter = 5 if representation == 'sparse' else 2\n        estimator = MiniBatchKMeans(n_clusters=20, init=init, n_init=1, max_iter=max_iter, batch_size=1000, max_no_improvement=None, compute_labels=False, random_state=0)\n        return estimator\n\n    def make_scorers(self):\n        self.train_scorer = lambda _, __: neg_mean_inertia(self.X, self.estimator.predict(self.X), self.estimator.cluster_centers_)\n        self.test_scorer = lambda _, __: neg_mean_inertia(self.X_val, self.estimator.predict(self.X_val), self.estimator.cluster_centers_)",
    "docstring": "Benchmarks for MiniBatchKMeans.",
    "type": "class",
    "file_path": "scikit-learn\\asv_benchmarks\\benchmarks\\cluster.py",
    "ast_data": "ClassDef name:MiniBatchKMeansBenchmark Assign Assign FunctionDef name:setup_cache arg:self arguments arg Call Call FunctionDef name:make_data arg:self arg:params arguments arg arg Assign If Compare Assign Call Assign Call Return return:yes FunctionDef name:make_estimator arg:self arg:params arguments arg arg Assign Assign Compare Assign Call Return return:yes FunctionDef name:make_scorers arg:self arguments arg Assign arguments arg arg Call Call Assign arguments arg arg Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_which",
    "source_code": "def set_which(self, which):\n    self._which = which",
    "docstring": "Select major or minor grid lines. Parameters ---------- which : {\"major\", \"minor\"}",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axis_artist.py",
    "ast_data": "FunctionDef name:set_which arg:self arg:which arguments arg arg Assign"
  },
  {
    "library": "numpy",
    "name": "getargspec",
    "source_code": "def getargspec(func):\n    if ismethod(func):\n        func = func.__func__\n    if not isfunction(func):\n        raise TypeError('arg is not a Python function')\n    args, varargs, varkw = getargs(func.__code__)\n    return (args, varargs, varkw, func.__defaults__)",
    "docstring": "Get the names and default values of a function's arguments. A tuple of four things is returned: (args, varargs, varkw, defaults). 'args' is a list of the argument names (it may contain nested lists). 'varargs' and 'varkw' are the names of the * and ** arguments or None. 'defaults' is an n-tuple of the default values of the last n arguments.",
    "type": "function",
    "file_path": "numpy\\numpy\\_utils\\_inspect.py",
    "ast_data": "FunctionDef name:getargspec arg:func arguments arg If Call Assign If Call Raise Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "size_inference_rule",
    "source_code": "@register_inference_rule('size')\ndef size_inference_rule(n: Node, symbols, constraints, counter):\n    if len(n.args) == 1:\n        size, counter = gen_tvar(counter)\n        symbols[n] = size\n        input = symbols[n.args[0]]\n        c = BinConstraintT(input, size, op_eq)\n        return ([c], counter)\n    elif len(n.args) == 2:\n        if isinstance(n.args[1], int):\n            size_index, counter = gen_dvar(counter)\n            symbols[n] = size_index\n            input = symbols[n.args[0]]\n            c2 = [GetItem(i + 1, n.args[1], size_index, input) for i in range(MAX_TENSOR_RANK)]\n            c3 = BinConstraintD(0, size_index, op_leq)\n            input_dyn = BinConstraintT(input, Dyn, op_eq)\n            output_dyn = BinConstraintD(size_index, Dyn, op_eq)\n            c1 = Conj([input_dyn, output_dyn])\n            return ([Disj([c1, Conj([Disj(c2), c3])])], counter)\n        else:\n            raise NotImplementedError\n    else:\n        raise NotImplementedError",
    "docstring": "The constraint is just lhs = rhs. Ex: size = input_ids.size()",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_generator.py",
    "ast_data": "FunctionDef name:size_inference_rule arg:n arg:symbols arg:constraints arg:counter arguments arg arg arg arg If Compare Call Assign Call Assign Assign Assign Call Return return:yes If Compare Call If Call Assign Call Assign Assign Assign Call Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call Call Call Raise Raise Call"
  },
  {
    "library": "django",
    "name": "copy",
    "source_code": "def copy(self):\n    return self.__deepcopy__({})",
    "docstring": "Return a mutable copy of this object.",
    "type": "method",
    "file_path": "django\\django\\http\\request.py",
    "ast_data": "FunctionDef name:copy arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_dashes",
    "source_code": "def set_dashes(self, seq):\n    if seq == (None, None) or len(seq) == 0:\n        self.set_linestyle('-')\n    else:\n        self.set_linestyle((0, seq))",
    "docstring": "Set the dash sequence. The dash sequence is a sequence of floats of even length describing the length of dashes and spaces in points. For example, (5, 2, 1, 2) describes a sequence of 5 point and 1 point dashes separated by 2 point spaces. See also , which allows those spaces to be filled with a color. Parameters ---------- seq : sequence of floats (on/off ink in points) or (None, None) If *seq* is empty or ``, the linestyle will be set to solid.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:set_dashes arg:self arg:seq arguments arg arg If BoolOp Compare Compare Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_isna_array",
    "source_code": "def _isna_array(values: ArrayLike) -> npt.NDArray[np.bool_] | NDFrame:\n    dtype = values.dtype\n    result: npt.NDArray[np.bool_] | NDFrame\n    if not isinstance(values, np.ndarray):\n        result = values.isna()\n    elif isinstance(values, np.rec.recarray):\n        result = _isna_recarray_dtype(values)\n    elif is_string_or_object_np_dtype(values.dtype):\n        result = _isna_string_dtype(values)\n    elif dtype.kind in 'mM':\n        result = values.view('i8') == iNaT\n    else:\n        result = np.isnan(values)\n    return result",
    "docstring": "Return an array indicating which values of the input array are NaN / NA. Parameters ---------- obj: ndarray or ExtensionArray The input array whose elements are to be checked. Returns ------- array-like Array of boolean values denoting the NA status of each element.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\missing.py",
    "ast_data": "FunctionDef name:_isna_array arg:values arguments arg Assign If Call Assign Call If Call Assign Call If Call Assign Call If Compare Assign Compare Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_flatten_non_variable_composites_with_tuple_path",
    "source_code": "def _flatten_non_variable_composites_with_tuple_path(structure, path_prefix=()):\n    for path, child in nest.flatten_with_tuple_paths(structure):\n        if isinstance(child, composite_tensor.CompositeTensor) and (not _is_variable(child)):\n            spec = child._type_spec\n            yield from _flatten_non_variable_composites_with_tuple_path(spec._to_components(child), path_prefix + path + (spec.value_type.__name__,))\n        else:\n            yield (path_prefix + path, child)",
    "docstring": "Flattens composite tensors with tuple path expect variables.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\module\\module.py",
    "ast_data": "FunctionDef name:_flatten_non_variable_composites_with_tuple_path arg:structure arg:path_prefix arguments arg arg For Call If BoolOp Call Call Assign Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "inverse_transform",
    "source_code": "def inverse_transform(self, X):\n    check_is_fitted(self)\n    if not self.add_indicator:\n        raise ValueError(f\"'inverse_transform' works only when 'SimpleImputer' is instantiated with 'add_indicator=True'. Got 'add_indicator={self.add_indicator}' instead.\")\n    n_features_missing = len(self.indicator_.features_)\n    non_empty_feature_count = X.shape[1] - n_features_missing\n    array_imputed = X[:, :non_empty_feature_count].copy()\n    missing_mask = X[:, non_empty_feature_count:].astype(bool)\n    n_features_original = len(self.statistics_)\n    shape_original = (X.shape[0], n_features_original)\n    X_original = np.zeros(shape_original)\n    X_original[:, self.indicator_.features_] = missing_mask\n    full_mask = X_original.astype(bool)\n    imputed_idx, original_idx = (0, 0)\n    while imputed_idx < len(array_imputed.T):\n        if not np.all(X_original[:, original_idx]):\n            X_original[:, original_idx] = array_imputed.T[imputed_idx]\n            imputed_idx += 1\n            original_idx += 1\n        else:\n            original_idx += 1\n    X_original[full_mask] = self.missing_values\n    return X_original",
    "docstring": "Convert the data back to the original representation. Inverts the operation performed on an array. This operation can only be performed after :class: is instantiated with . Note that can only invert the transform in features that have binary indicators for missing values. If a feature has no missing values at time, the feature won't have a binary indicator, and the imputation done at time won't be inverted. .. versionadded:: 0.24 Parameters ---------- X : array-like of shape (n_samples, n_features + n_features_missing_indicator) The imputed data to be reverted to original data. It has to be an augmented array of imputed data and the missing indicator mask. Returns ------- X_original : ndarray of shape (n_samples, n_features) The original with missing values as it was prior to imputation.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\impute\\_base.py",
    "ast_data": "FunctionDef name:inverse_transform arg:self arg:X arguments arg arg Call If Raise Call Assign Call Assign Assign Call Assign Call Assign Call Assign Assign Call Assign Assign Call Assign While Compare Call If Call Assign Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "_setup",
    "source_code": "def _setup(self):\n    raise NotImplementedError('subclasses of LazyObject must provide a _setup() method')",
    "docstring": "Must be implemented by subclasses to initialize the wrapped object.",
    "type": "method",
    "file_path": "django\\django\\utils\\functional.py",
    "ast_data": "FunctionDef name:_setup arg:self arguments arg Raise Call"
  },
  {
    "library": "kornia",
    "name": "shear",
    "source_code": "def shear(tensor: Tensor, shear: Tensor, mode: str='bilinear', padding_mode: str='zeros', align_corners: bool=False) -> Tensor:\n    if not isinstance(tensor, Tensor):\n        raise TypeError(f'Input tensor type is not a Tensor. Got {type(tensor)}')\n    if not isinstance(shear, Tensor):\n        raise TypeError(f'Input shear type is not a Tensor. Got {type(shear)}')\n    if len(tensor.shape) not in (3, 4):\n        raise ValueError(f'Invalid tensor shape, we expect CxHxW or BxCxHxW. Got: {tensor.shape}')\n    shear_matrix: Tensor = _compute_shear_matrix(shear)\n    return affine(tensor, shear_matrix[..., :2, :3], mode, padding_mode, align_corners)",
    "docstring": "Shear the tensor. .. image:: _static/img/shear.png Args: tensor: The image tensor to be skewed with shape of :math:. shear: tensor containing the angle to shear in the x and y direction. The tensor must have a shape of (B, 2), where B is batch size, last dimension contains shx shy. mode: interpolation mode to calculate output values ``. align_corners: interpolation flag. Returns: The skewed tensor with shape same as the input. Example: >>> img = torch.rand(1, 3, 4, 4) >>> shear_factor = torch.tensor([[0.5, 0.0]]) >>> out = shear(img, shear_factor) >>> print(out.shape) torch.Size([1, 3, 4, 4])",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\transform\\affwarp.py",
    "ast_data": "FunctionDef name:shear arg:tensor arg:shear arg:mode arg:padding_mode arg:align_corners arguments arg arg arg arg arg If Call Raise Call Call If Call Raise Call Call If Compare Call Raise Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "experimental_map_outside_compilation",
    "source_code": "def experimental_map_outside_compilation(computation: Callable[..., Any], *args, **kwargs) -> Any:\n    return outside_compilation_impl(True, computation, *args, **kwargs)",
    "docstring": "Maps onto shards and puts it outside any current TPU replicate scope. maps onto the shards of , where is split-sharded. Each invocation of on a split occurs on the CPU that's associated with the TPU that owns the split. Example usage: should be called inside TPUReplicateContext. That is, should be called inside a function that is passed to -- this is implied when outside compilation is invoked inside a function passed to TPUStrategy . It is invalid to invoke outside of TPUReplicateContext. should input and output tensors that are located on the TPU. Internally, adds outside compilation attributes to all ops in and moves outside-compiled ops to a host-side graph. This is similar to . Send/recv ops from/to the TPU send each split directly to the TPU's host. Args: computation: A Python function that builds the computation to place on the host. *args: the positional arguments for the computation. **kwargs: the keyword arguments for the computation. Returns: The Tensors returned by computation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_replication.py",
    "ast_data": "FunctionDef name:experimental_map_outside_compilation arg:computation arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "endswith",
    "source_code": "@set_module('numpy.strings')\ndef endswith(a, suffix, start=0, end=None):\n    end = end if end is not None else MAX\n    return _endswith_ufunc(a, suffix, start, end)",
    "docstring": "Returns a boolean array which is where the string element in `False`, stop comparing at that position. Returns ------- out : ndarray Output array of bools See Also -------- str.endswith Examples -------- >>> import numpy as np >>> s = np.array(['foo', 'bar']) >>> s array(['foo', 'bar'], dtype='>> np.strings.endswith(s, 'ar') array([False, True]) >>> np.strings.endswith(s, 'a', start=1, end=2) array([False, True])",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\strings.py",
    "ast_data": "FunctionDef name:endswith arg:a arg:suffix arg:start arg:end arguments arg arg arg arg Assign Compare Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "swap_memory",
    "source_code": "@property\ndef swap_memory(self):\n    return self._swap_memory",
    "docstring": "True iff GPU-CPU memory swap is enabled for this while loop.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:swap_memory arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "UnwrapUnspecArg",
    "source_code": "@dataclasses.dataclass\nclass UnwrapUnspecArg:\n    dtype: torch_dtype",
    "docstring": "Marker that we need to call .item() on the tensor",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp_wrapper_gpu.py",
    "ast_data": "ClassDef name:UnwrapUnspecArg"
  },
  {
    "library": "scipy",
    "name": "rvs",
    "source_code": "def rvs(self, *args, **kwds):\n    discrete = kwds.pop('discrete', None)\n    rndm = kwds.pop('random_state', None)\n    args, loc, scale, size = self._parse_args_rvs(*args, **kwds)\n    cond = logical_and(self._argcheck(*args), scale >= 0)\n    if not np.all(cond):\n        message = f'Domain error in arguments. The `scale` parameter must be positive for all distributions, and many distributions have restrictions on shape parameters. Please see the `scipy.stats.{self.name}` documentation for details.'\n        raise ValueError(message)\n    if np.all(scale == 0):\n        return loc * ones(size, 'd')\n    if rndm is not None:\n        random_state_saved = self._random_state\n        random_state = check_random_state(rndm)\n    else:\n        random_state = self._random_state\n    vals = self._rvs(*args, size=size, random_state=random_state)\n    vals = vals * scale + loc\n    if rndm is not None:\n        self._random_state = random_state_saved\n    if discrete and (not isinstance(self, rv_sample)):\n        if size == ():\n            vals = int(vals)\n        else:\n            vals = vals.astype(np.int64)\n    return vals",
    "docstring": "Random variates of given type. Parameters ---------- arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). scale : array_like, optional Scale parameter (default=1). size : int or tuple of ints, optional Defining number of random variates (default is 1). random_state : {None, int, , }, optional If is None (or ), the singleton is used. If is an int, a new `random_staterandom_statesize`.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:rvs arg:self arguments arg arg arg Assign Call Assign Call Assign Call Assign Call Call Compare If Call Assign Raise Call If Call Compare Return return:yes Call If Compare Assign Assign Call Assign Assign Call Assign If Compare Assign If BoolOp Call If Compare Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_normalize_path",
    "source_code": "def _normalize_path(path):\n    parent, file_name = os.path.split(path)\n    if parent:\n        raise ValueError(f'{path!r} must be only a file name')\n    else:\n        return file_name",
    "docstring": "Normalize a path by ensuring it is a string. If the resulting string contains path separators, an exception is raised.",
    "type": "function",
    "file_path": "pytorch\\torch\\package\\_importlib.py",
    "ast_data": "FunctionDef name:_normalize_path arg:path arguments arg Assign Call If Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "compute_mask",
    "source_code": "def compute_mask(self, t, default_mask):\n    _validate_structured_pruning(t)\n    _validate_pruning_dim(t, self.dim)\n    tensor_size = t.shape[self.dim]\n    nparams_toprune = _compute_nparams_toprune(self.amount, tensor_size)\n    nparams_tokeep = tensor_size - nparams_toprune\n    _validate_pruning_amount(nparams_toprune, tensor_size)\n    norm = _compute_norm(t, self.n, self.dim)\n    topk = torch.topk(norm, k=nparams_tokeep, largest=True)\n\n    def make_mask(t, dim, indices):\n        mask = torch.zeros_like(t)\n        slc = [slice(None)] * len(t.shape)\n        slc[dim] = indices\n        mask[slc] = 1\n        return mask\n    if nparams_toprune == 0:\n        mask = default_mask\n    else:\n        mask = make_mask(t, self.dim, topk.indices)\n        mask *= default_mask.to(dtype=mask.dtype)\n    return mask",
    "docstring": "Compute and returns a mask for the input tensor ``",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\utils\\prune.py",
    "ast_data": "FunctionDef name:compute_mask arg:self arg:t arg:default_mask arguments arg arg arg Call Call Assign Assign Call Assign Call Assign Call Assign Call FunctionDef name:make_mask arg:t arg:dim arg:indices arguments arg arg arg Assign Call Assign Call Call Assign Assign Return return:yes If Compare Assign Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "quote_value",
    "source_code": "def quote_value(self, value):\n    raise NotImplementedError()",
    "docstring": "Return a quoted version of the value so it's safe to use in an SQL string. This is not safe against injection from user code; it is intended only for use in making SQL scripts or preparing default values for particularly tricky backends (defaults are not user-defined, though, so this is safe).",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\schema.py",
    "ast_data": "FunctionDef name:quote_value arg:self arg:value arguments arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "prod",
    "source_code": "def prod(values, base=1):\n    return functools.reduce(lambda x, y: int(x) * int(y), values, base)",
    "docstring": "np.prod can overflow, so for sizes the product should be done in Python. Even though np.prod type promotes to int64, it can still overflow in which case the negative value will pass the size check and OOM when attempting to actually allocate the Tensor.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\benchmark\\utils\\fuzzer.py",
    "ast_data": "FunctionDef name:prod arg:values arg:base arguments arg arg Return return:yes Call arguments arg arg Call Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, bysecond=None, interval=1, tz=None):\n    if bysecond is None:\n        bysecond = range(60)\n    rule = rrulewrapper(SECONDLY, bysecond=bysecond, interval=interval)\n    super().__init__(rule, tz=tz)",
    "docstring": "Parameters ---------- bysecond : int or list of int, default: all seconds Ticks will be placed on every second in *bysecond*. Default is `~datetime.tzinfotimezonedateutil.tz`.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\dates.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:bysecond arg:interval arg:tz arguments arg arg arg arg If Compare Assign Call Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_write_tensor_list_section",
    "source_code": "def _write_tensor_list_section(self, graph_order):\n    self._write_report('%s %s\\n' % (_MARKER_SECTION_BEGIN, _SECTION_NAME_TENSOR_LIST))\n    self._write_report('%s %d\\n' % (_FIELD_NAME_NUM_TENSORS, len(graph_order.tensors)))\n    for i in range(0, len(graph_order.tensors)):\n        tensor = graph_order.tensors[i]\n        line = '%d \"%s\"' % (i, tensor.name)\n        consumers = tensor.consumers()\n        consumers.sort(key=lambda op: op.name)\n        for consumer_op in consumers:\n            if consumer_op.name not in graph_order.op_to_idx:\n                raise ValueError('consumer_op is not in op_to_idx.  got consumer_op={}, op_to_idx={}'.format(consumer_op.name, graph_order.op_to_idx))\n            line += ' %d' % graph_order.op_to_idx[consumer_op.name]\n        line += '\\n'\n        self._write_report(line)\n    self._write_report('%s %s\\n' % (_MARKER_SECTION_END, _SECTION_NAME_TENSOR_LIST))",
    "docstring": "Writes the tensor-list section of the report.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer_report.py",
    "ast_data": "FunctionDef name:_write_tensor_list_section arg:self arg:graph_order arguments arg arg Call Call Call For Call Call Assign Assign Assign Call Call arguments arg For If Compare Raise Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "pop_header_name",
    "source_code": "def pop_header_name(row: list[Hashable], index_col: int | Sequence[int]) -> tuple[Hashable | None, list[Hashable]]:\n    if is_list_like(index_col):\n        assert isinstance(index_col, Iterable)\n        i = max(index_col)\n    else:\n        assert not isinstance(index_col, Iterable)\n        i = index_col\n    header_name = row[i]\n    header_name = None if header_name == '' else header_name\n    return (header_name, row[:i] + [''] + row[i + 1:])",
    "docstring": "Pop the header name for MultiIndex parsing. Parameters ---------- row : list The data row to parse for the header name. index_col : int, list The index columns for our data. Assumed to be non-null. Returns ------- header_name : str The extracted header name. trimmed_row : list The original data row with the header name removed.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\excel\\_util.py",
    "ast_data": "FunctionDef name:pop_header_name arg:row arg:index_col arguments arg arg If Call Call Assign Call Call Assign Assign Assign Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_normalize_path",
    "source_code": "def _normalize_path(path):\n    path = os.path.realpath(path)\n    if _is_windows():\n        path = path.replace('\\\\', '/')\n    return path",
    "docstring": "Returns normalized path, with forward slashes on Windows.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\third_party\\gpus\\find_cuda_config.py",
    "ast_data": "FunctionDef name:_normalize_path arg:path arguments arg Assign Call If Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_fit_transformer",
    "source_code": "def _fit_transformer(self, y):\n    if self.transformer is not None and (self.func is not None or self.inverse_func is not None):\n        raise ValueError(\"'transformer' and functions 'func'/'inverse_func' cannot both be set.\")\n    elif self.transformer is not None:\n        self.transformer_ = clone(self.transformer)\n    else:\n        if self.func is not None and self.inverse_func is None or (self.func is None and self.inverse_func is not None):\n            lacking_param, existing_param = ('func', 'inverse_func') if self.func is None else ('inverse_func', 'func')\n            raise ValueError(f\"When '{existing_param}' is provided, '{lacking_param}' must also be provided. If {lacking_param} is supposed to be the default, you need to explicitly pass it the identity function.\")\n        self.transformer_ = FunctionTransformer(func=self.func, inverse_func=self.inverse_func, validate=True, check_inverse=self.check_inverse)\n        self.transformer_.set_output(transform='default')\n    self.transformer_.fit(y)\n    if self.check_inverse:\n        idx_selected = slice(None, None, max(1, y.shape[0] // 10))\n        y_sel = _safe_indexing(y, idx_selected)\n        y_sel_t = self.transformer_.transform(y_sel)\n        if not np.allclose(y_sel, self.transformer_.inverse_transform(y_sel_t)):\n            warnings.warn(\"The provided functions or transformer are not strictly inverse of each other. If you are sure you want to proceed regardless, set 'check_inverse=False'\", UserWarning)",
    "docstring": "Check transformer and fit transformer. Create the default transformer, fit it and make additional inverse check on a subset (optional).",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\compose\\_target.py",
    "ast_data": "FunctionDef name:_fit_transformer arg:self arg:y arguments arg arg If BoolOp Compare BoolOp Compare Compare Raise Call If Compare Assign Call If BoolOp BoolOp Compare Compare BoolOp Compare Compare Assign Compare Raise Call Assign Call Call Call If Assign Call Call Assign Call Assign Call If Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "clear_cublass_cache",
    "source_code": "def clear_cublass_cache() -> None:\n    torch._C._cuda_clearCublasWorkspaces()",
    "docstring": "Cublas keeps a persistent workspace allocation for running matmuls. This poses a problem for doing warmup within a CUDAGraph private pool because we do not want persistent allocations from one one run to the next. When we begin a new run of a cudagraphs path (generation), all tensors from the previous generation are freed. This frees them the memory pool, but not elsewhere. A tensor in the cublas workspace would continue to be in use the workspace but would also get allocated in the next run. The memory would be in use in two places. To solve this, we clear cublas caches before and after warming up or recording. If a workspace is required it will be allocated to the cudagraph private pool and accounted for in the allocator for the duration of the program. There is no overhead to this on replay since cudagraphs removes allocation overhead.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py",
    "ast_data": "FunctionDef name:clear_cublass_cache arguments Call"
  },
  {
    "library": "scipy",
    "name": "lagrangian_hessian_s",
    "source_code": "def lagrangian_hessian_s(self, z, v):\n    s = self.get_slack(z)\n    primal = self.barrier_parameter\n    primal_dual = v[-self.n_ineq:] * s\n    return np.where(v[-self.n_ineq:] > 0, primal_dual, primal)",
    "docstring": "Returns scaled Lagrangian Hessian (in relation to) -> S Hs S",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_trustregion_constr\\tr_interior_point.py",
    "ast_data": "FunctionDef name:lagrangian_hessian_s arg:self arg:z arg:v arguments arg arg arg Assign Call Assign Assign Return return:yes Call Compare"
  },
  {
    "library": "pandas",
    "name": "_values_for_factorize",
    "source_code": "def _values_for_factorize(self) -> tuple[np.ndarray, Any]:\n    return (self.astype(object), np.nan)",
    "docstring": "Return an array and missing value suitable for factorization. Returns ------- values : ndarray An array suitable for factorization. This should maintain order and be a supported dtype (Float64, Int64, UInt64, String, Object). By default, the extension array is cast to object dtype. na_value : object The value in to consider missing. This will be treated as NA in the factorization routines, so it will be coded as and not included in . By default, `pandas.util.hash_pandas_object` method. Examples -------- >>> pd.array([1, 2, 3])._values_for_factorize() (array([1, 2, 3], dtype=object), nan)",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\base.py",
    "ast_data": "FunctionDef name:_values_for_factorize arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_maybe_mark",
    "source_code": "def _maybe_mark(estimator, check, expected_failed_checks: dict[str, str] | None=None, mark: Literal['xfail', 'skip', None]=None, pytest=None):\n    should_be_marked, reason = _should_be_skipped_or_marked(estimator, check, expected_failed_checks)\n    if not should_be_marked or mark is None:\n        return (estimator, check)\n    estimator_name = estimator.__class__.__name__\n    if mark == 'xfail':\n        return pytest.param(estimator, check, marks=pytest.mark.xfail(reason=reason))\n    else:\n\n        @wraps(check)\n        def wrapped(*args, **kwargs):\n            raise SkipTest(f'Skipping {_check_name(check)} for {estimator_name}: {reason}')\n        return (estimator, wrapped)",
    "docstring": "Mark the test as xfail or skip if needed. Parameters ---------- estimator : estimator object Estimator instance for which to generate checks. check : partial or callable Check to be marked. expected_failed_checks : dict[str, str], default=None Dictionary of the form {check_name: reason} for checks that are expected to fail. mark : \"xfail\" or \"skip\" or None Whether to mark the check as xfail or skip. pytest : pytest module, default=None Pytest module to use to mark the check. This is only needed if `\"xfail\"check_estimatorpytestparametrize_with_checks` only.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\estimator_checks.py",
    "ast_data": "FunctionDef name:_maybe_mark arg:estimator arg:check arg:expected_failed_checks arg:mark arg:pytest arguments arg arg arg arg arg Assign Call If BoolOp Compare Return return:yes Assign If Compare Return return:yes Call Call FunctionDef name:wrapped arguments arg arg Raise Call Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "show",
    "source_code": "def show(self, n_row: Optional[int]=None, backend: str='pil', display: bool=True) -> Optional[Any]:\n    if self._output_image is None:\n        raise ValueError('No pre-computed images found. Needs to execute first.')\n    if len(self._output_image.shape) == 3:\n        out_image = self._output_image\n    elif len(self._output_image.shape) == 4:\n        if n_row is None:\n            n_row = math.ceil(self._output_image.shape[0] ** 0.5)\n        out_image = kornia.utils.image.make_grid(self._output_image, n_row, padding=2)\n    else:\n        raise ValueError\n    if backend == 'pil' and display:\n        Image.fromarray((out_image.permute(1, 2, 0).squeeze().numpy() * 255).astype(np.uint8)).show()\n        return None\n    if backend == 'pil':\n        return Image.fromarray((out_image.permute(1, 2, 0).squeeze().numpy() * 255).astype(np.uint8))\n    raise ValueError(f'Unsupported backend `{backend}`.')",
    "docstring": "Return PIL images. Args: n_row: Number of images displayed in each row of the grid. backend: visualization backend. Only PIL is supported now. display: whether to display the image.",
    "type": "method",
    "file_path": "kornia\\kornia\\core\\module.py",
    "ast_data": "FunctionDef name:show arg:self arg:n_row arg:backend arg:display arguments arg arg arg arg If Compare Raise Call If Compare Call Assign If Compare Call If Compare Assign Call Assign Call Raise If BoolOp Compare Call Call Call Call Call Call Return return:no If Compare Return return:yes Call Call Call Call Call Raise Call"
  },
  {
    "library": "authlib",
    "name": "request",
    "source_code": "def request(self, method, url, withhold_token=False, auth=USE_CLIENT_DEFAULT, **kwargs):\n    if not withhold_token and auth is USE_CLIENT_DEFAULT:\n        if not self.token or self.token.is_expired():\n            self.refresh_token()\n        auth = self.token_auth\n    return super().request(method, url, auth=auth, **kwargs)",
    "docstring": "Send request with auto refresh token feature.",
    "type": "method",
    "file_path": "authlib\\authlib\\integrations\\httpx_client\\assertion_client.py",
    "ast_data": "FunctionDef name:request arg:self arg:method arg:url arg:withhold_token arg:auth arguments arg arg arg arg arg arg If BoolOp Compare If BoolOp Call Call Assign Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "batched_dot_product",
    "source_code": "def batched_dot_product(x: Tensor, y: Tensor, keepdim: bool=False) -> Tensor:\n    KORNIA_CHECK_SHAPE(x, ['*', 'N'])\n    KORNIA_CHECK_SHAPE(y, ['*', 'N'])\n    return (x * y).sum(-1, keepdim)",
    "docstring": "Return a batched version of .dot().",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\linalg.py",
    "ast_data": "FunctionDef name:batched_dot_product arg:x arg:y arg:keepdim arguments arg arg arg Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_check_fix_default_value",
    "source_code": "def _check_fix_default_value(self):\n    if not self.has_default():\n        return []\n    value = self.default\n    if isinstance(value, datetime.datetime):\n        now = None\n    elif isinstance(value, datetime.time):\n        now = _get_naive_now()\n        value = datetime.datetime.combine(now.date(), value)\n    else:\n        return []\n    return self._check_if_value_fixed(value, now=now)",
    "docstring": "Warn that using an actual date or datetime value is probably wrong; it's only evaluated on server startup.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\__init__.py",
    "ast_data": "FunctionDef name:_check_fix_default_value arg:self arguments arg If Call Return return:no Assign If Call Assign If Call Assign Call Assign Call Call Return return:no Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_check_early_stopping_loss",
    "source_code": "def _check_early_stopping_loss(self, raw_predictions, y_train, sample_weight_train, raw_predictions_val, y_val, sample_weight_val, n_threads=1):\n    self.train_score_.append(-self._loss(y_true=y_train, raw_prediction=raw_predictions, sample_weight=sample_weight_train, n_threads=n_threads))\n    if self._use_validation_data:\n        self.validation_score_.append(-self._loss(y_true=y_val, raw_prediction=raw_predictions_val, sample_weight=sample_weight_val, n_threads=n_threads))\n        return self._should_stop(self.validation_score_)\n    else:\n        return self._should_stop(self.train_score_)",
    "docstring": "Check if fitting should be early-stopped based on loss. Scores are computed on validation data or on training data.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\gradient_boosting.py",
    "ast_data": "FunctionDef name:_check_early_stopping_loss arg:self arg:raw_predictions arg:y_train arg:sample_weight_train arg:raw_predictions_val arg:y_val arg:sample_weight_val arg:n_threads arguments arg arg arg arg arg arg arg arg Call Call If Call Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "node_name_from_input",
    "source_code": "def node_name_from_input(node_name: str) -> str:\n    if node_name.startswith('^'):\n        node_name = node_name[1:]\n    m = re.search('(.*):\\\\d+$', node_name)\n    if m:\n        node_name = m.group(1)\n    return node_name",
    "docstring": "Strips off ports and other decorations to get the underlying node name.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\optimize_for_inference_lib.py",
    "ast_data": "FunctionDef name:node_name_from_input arg:node_name arguments arg If Call Assign Assign Call If Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "__getitem__",
    "source_code": "def __getitem__(self, x):\n    if x not in self._indices:\n        raise KeyError(x)\n    parents = self._parents\n    while self._indices[x] != self._indices[parents[x]]:\n        parents[x] = parents[parents[x]]\n        x = parents[x]\n    return x",
    "docstring": "Find the root element of . Parameters ---------- x : hashable object Input element. Returns ------- root : hashable object Root element of .",
    "type": "method",
    "file_path": "scipy\\scipy\\_lib\\_disjoint_set.py",
    "ast_data": "FunctionDef name:__getitem__ arg:self arg:x arguments arg arg If Compare Raise Call Assign While Compare Assign Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "project_points",
    "source_code": "def project_points(point_3d: torch.Tensor, camera_matrix: torch.Tensor) -> torch.Tensor:\n    xy_coords: torch.Tensor = convert_points_from_homogeneous(point_3d)\n    return denormalize_points_with_intrinsics(xy_coords, camera_matrix)",
    "docstring": "Project a 3d point onto the 2d camera plane. Args: point_3d: tensor containing the 3d points to be projected to the camera plane. The shape of the tensor can be :math:. camera_matrix: tensor containing the intrinsics camera matrix. The tensor shape must be :math:. Returns: tensor of (u, v) cam coordinates with shape :math:. Example: >>> _ = torch.manual_seed(0) >>> X = torch.rand(1, 3) >>> K = torch.eye(3)[None] >>> project_points(X, K) tensor([[5.6088, 8.6827]])",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\camera\\perspective.py",
    "ast_data": "FunctionDef name:project_points arg:point_3d arg:camera_matrix arguments arg arg Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "cudnn_sdp_enabled",
    "source_code": "def cudnn_sdp_enabled():\n    return torch._C._get_cudnn_sdp_enabled()",
    "docstring": ".. warning:: This flag is beta and subject to change. Returns whether cuDNN scaled dot product attention is enabled or not.",
    "type": "function",
    "file_path": "pytorch\\torch\\backends\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:cudnn_sdp_enabled arguments Return return:yes Call"
  },
  {
    "library": "django",
    "name": "aincr_version",
    "source_code": "async def aincr_version(self, key, delta=1, version=None):\n    if version is None:\n        version = self.version\n    value = await self.aget(key, self._missing_key, version=version)\n    if value is self._missing_key:\n        raise ValueError(\"Key '%s' not found\" % key)\n    await self.aset(key, value, version=version + delta)\n    await self.adelete(key, version=version)\n    return version + delta",
    "docstring": "See incr_version().",
    "type": "method",
    "file_path": "django\\django\\core\\cache\\backends\\base.py",
    "ast_data": "AsyncFunctionDef name:aincr_version arg:self arg:key arg:delta arg:version arguments arg arg arg arg If Compare Assign Assign Call If Compare Raise Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "ss_diff",
    "source_code": "def ss_diff(x, a, b, period=None, _cache=_cache):\n    if isinstance(_cache, threading.local):\n        if not hasattr(_cache, 'ss_diff_cache'):\n            _cache.ss_diff_cache = {}\n        _cache = _cache.ss_diff_cache\n    tmp = asarray(x)\n    if iscomplexobj(tmp):\n        return ss_diff(tmp.real, a, b, period, _cache) + 1j * ss_diff(tmp.imag, a, b, period, _cache)\n    if period is not None:\n        a = a * 2 * pi / period\n        b = b * 2 * pi / period\n    n = len(x)\n    omega = _cache.get((n, a, b))\n    if omega is None:\n        if len(_cache) > 20:\n            while _cache:\n                _cache.popitem()\n\n        def kernel(k, a=a, b=b):\n            if k:\n                return sinh(a * k) / sinh(b * k)\n            return float(a) / b\n        omega = convolve.init_convolution_kernel(n, kernel)\n        _cache[n, a, b] = omega\n    overwrite_x = _datacopied(tmp, x)\n    return convolve.convolve(tmp, omega, overwrite_x=overwrite_x)",
    "docstring": "Return (a,b)-sinh/sinh pseudo-derivative of a periodic sequence x. If x_j and y_j are Fourier coefficients of periodic functions x and y, respectively, then:: y_j = sinh(j*a*2*pi/period)/sinh(j*b*2*pi/period) * x_j y_0 = a/b * x_0 Parameters ---------- x : array_like The array to take the pseudo-derivative from. a,b Defines the parameters of the sinh/sinh pseudo-differential operator. period : float, optional The period of the sequence x. Default is ``",
    "type": "function",
    "file_path": "scipy\\scipy\\fftpack\\_pseudo_diffs.py",
    "ast_data": "FunctionDef name:ss_diff arg:x arg:a arg:b arg:period arg:_cache arguments arg arg arg arg arg If Call If Call Assign Assign Assign Call If Call Return return:yes Call Call If Compare Assign Assign Assign Call Assign Call If Compare If Compare Call While Call FunctionDef name:kernel arg:k arg:a arg:b arguments arg arg arg If Return return:yes Call Call Return return:yes Call Assign Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "decision_function",
    "source_code": "def decision_function(self, X):\n    return self.score_samples(X) - self.offset_",
    "docstring": "Average anomaly score of X of the base classifiers. The anomaly score of an input sample is computed as the mean anomaly score of the trees in the forest. The measure of normality of an observation given a tree is the depth of the leaf containing this observation, which is equivalent to the number of splittings required to isolate this point. In case of several observations n_left in the leaf, the average path length of a n_left samples isolation tree is added. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``. This is because, calculating the score may actually be faster without parallelization for a small number of samples, such as for 1000 samples or less. The user can set the number of jobs in the joblib context to control the number of parallel jobs. .. code-block:: python from joblib import parallel_backend # Note, we use threading here as the decision_function method is # not CPU bound. with parallel_backend(\"threading\", n_jobs=4): model.decision_function(X)",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_iforest.py",
    "ast_data": "FunctionDef name:decision_function arg:self arg:X arguments arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_get_m2m_db_table",
    "source_code": "def _get_m2m_db_table(self, opts):\n    if self.remote_field.through is not None:\n        return self.remote_field.through._meta.db_table\n    elif self.db_table:\n        return self.db_table\n    else:\n        m2m_table_name = '%s_%s' % (utils.strip_quotes(opts.db_table), self.name)\n        return utils.truncate_name(m2m_table_name, connection.ops.max_name_length())",
    "docstring": "Function that can be curried to provide the m2m table name for this relation.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\related.py",
    "ast_data": "FunctionDef name:_get_m2m_db_table arg:self arg:opts arguments arg arg If Compare Return return:yes If Return return:yes Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_CustomReciprocal",
    "source_code": "def _CustomReciprocal(x):\n    return math_ops.div_no_nan(math_ops.cast(1.0, x.dtype), x)",
    "docstring": "Wrapper function around to perform a \"safe\" reciprocal incase the input is zero. Avoids divide by zero and NaNs. Input: x -> input tensor to be reciprocat-ed. Returns: x_reciprocal -> reciprocal of x without NaNs.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_grad.py",
    "ast_data": "FunctionDef name:_CustomReciprocal arg:x arguments arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "estimate_error",
    "source_code": "def estimate_error(self, f, a, b, args=()):\n    nodes, weights = self.nodes_and_weights\n    lower_nodes, lower_weights = self.lower_nodes_and_weights\n    if self.xp is None:\n        self.xp = array_namespace(nodes)\n    error_nodes = self.xp.concat([nodes, lower_nodes], axis=0)\n    error_weights = self.xp.concat([weights, -lower_weights], axis=0)\n    return self.xp.abs(_apply_fixed_rule(f, a, b, error_nodes, error_weights, args, self.xp))",
    "docstring": "Estimate the error of the approximation for the integral of in rectangular region described by corners and . Parameters ---------- f : callable Function to estimate error for. must have the signature:: f(x : ndarray, \\*args) -> ndarray should accept arrays of shape:: (npoints, ndim) and output arrays of shape:: (npoints, output_dim_1, ..., output_dim_n) In this case, will return arrays of shape:: (output_dim_1, ..., output_dim_n) a, b : ndarray Lower and upper limits of integration as rank-1 arrays specifying the left and right endpoints of the intervals being integrated over. Infinite limits are currently not supported. args : tuple, optional Additional positional args passed to , if any. Returns ------- err_est : ndarray Result of error estimation. If returns arrays of shape `est`.",
    "type": "method",
    "file_path": "scipy\\scipy\\integrate\\_rules\\_base.py",
    "ast_data": "FunctionDef name:estimate_error arg:self arg:f arg:a arg:b arg:args arguments arg arg arg arg arg Assign Assign If Compare Assign Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_configdir",
    "source_code": "@_logged_cached('CONFIGDIR=%s')\ndef get_configdir():\n    return _get_config_or_cache_dir(_get_xdg_config_dir)",
    "docstring": "Return the string path of the configuration directory. The directory is chosen as follows: 1. If the MPLCONFIGDIR environment variable is supplied, choose that. 2. On Linux, follow the XDG specification and look first in ``. 3. If the chosen directory exists and is writable, use that as the configuration directory. 4. Else, create a temporary directory, and use it as the configuration directory.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\__init__.py",
    "ast_data": "FunctionDef name:get_configdir arguments Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "Static",
    "source_code": "class Static(NoValue):\n    IS_PARAM = 'Symbol is a parameter to the function being analyzed.'\n    SCOPE = 'The scope for the annotated node. See activity.py.'\n    ARGS_SCOPE = 'The scope for the argument list of a function call.'\n    COND_SCOPE = 'The scope for the test node of a conditional statement.'\n    BODY_SCOPE = 'The scope for the main body of a statement (True branch for if statements, main body for loops).'\n    ORELSE_SCOPE = 'The scope for the orelse body of a statement (False branch for if statements, orelse body for loops).'\n    DEFINITIONS = 'Reaching definition information. See reaching_definitions.py.'\n    ORIG_DEFINITIONS = 'The value of DEFINITIONS that applied to the original code before any conversion.'\n    DEFINED_FNS_IN = 'Local function definitions that may exist when exiting the node. See reaching_fndefs.py'\n    DEFINED_VARS_IN = 'Symbols defined when entering the node. See reaching_definitions.py.'\n    LIVE_VARS_OUT = 'Symbols live when exiting the node. See liveness.py.'\n    LIVE_VARS_IN = 'Symbols live when entering the node. See liveness.py.'\n    TYPES = 'Static type information. See type_inference.py.'\n    CLOSURE_TYPES = 'Types of closure symbols at each detected call site.'\n    VALUE = 'Static value information. See type_inference.py.'",
    "docstring": "Container for static analysis annotation keys. The enum values are used strictly for documentation purposes.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\anno.py",
    "ast_data": "ClassDef name:Static Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "flush",
    "source_code": "def flush(self):\n    if self.all_writers is None:\n        return\n    for writer in self.all_writers.values():\n        writer.flush()",
    "docstring": "Flushes the event file to disk. Call this method to make sure that all pending events have been written to disk.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\tensorboard\\writer.py",
    "ast_data": "FunctionDef name:flush arg:self arguments arg If Compare Return return:no For Call Call"
  },
  {
    "library": "pytorch",
    "name": "_clear_non_serializable_cached_data",
    "source_code": "def _clear_non_serializable_cached_data(self):\n    if has_torch_function_unary(self):\n        return handle_torch_function(Tensor._clear_non_serializable_cached_data, (self,), self)\n    CACHED_SIZES_STRIDES_KEYS = ['_sym_sizes_capsule', '_sym_sizes_capsule_len', '_sym_strides_capsule', '_sym_strides_capsule_len']\n    for key in CACHED_SIZES_STRIDES_KEYS:\n        self.__dict__.pop(key, None)",
    "docstring": "Clears any data cached in the tensor's `` Additional data cleared within the override must be able to be re-cached transparently to avoid breaking subclass functionality.",
    "type": "method",
    "file_path": "pytorch\\torch\\_tensor.py",
    "ast_data": "FunctionDef name:_clear_non_serializable_cached_data arg:self arguments arg If Call Return return:yes Call Assign For Call"
  },
  {
    "library": "pytorch",
    "name": "reinforce_type",
    "source_code": "def reinforce_type(self, expected_type):\n    if isinstance(expected_type, tuple):\n        expected_type = tuple[expected_type]\n    _type_check(expected_type, msg=\"'expected_type' must be a type\")\n    if not issubtype(expected_type, self.type.param):\n        raise TypeError(f\"Expected 'expected_type' as subtype of {self.type}, but found {_type_repr(expected_type)}\")\n    self.type = _DataPipeType(expected_type)\n    return self",
    "docstring": "Reinforce the type for DataPipe instance. And the 'expected_type' is required to be a subtype of the original type hint to restrict the type requirement of DataPipe instance.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\data\\datapipes\\_typing.py",
    "ast_data": "FunctionDef name:reinforce_type arg:self arg:expected_type arguments arg arg If Call Assign Call If Call Raise Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_raise_untracked_capture_error",
    "source_code": "def _raise_untracked_capture_error(function_name, capture, internal_capture=None, node_path=None):\n    msg = f\"Tried to export a function which references an 'untracked' resource. TensorFlow objects (e.g. tf.Variable) captured by functions must be 'tracked' by assigning them to an attribute of a tracked object or assigned to an attribute of the main object directly. See the information below:\\n\\tFunction name = {function_name}\"\n    if node_path is not None:\n        msg += f'\\n\\tPath to Function = {node_path}'\n    msg += f'\\n\\tCaptured Tensor = {capture}'\n    msg += f'\\n\\t{_get_trackable_parent_error_string(capture)}'\n    if internal_capture is not None:\n        msg += f'\\n\\tInternal Tensor = {internal_capture}'\n    raise AssertionError(msg)",
    "docstring": "Raises AssertionError due to being unable to export a function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\saved_model_exported_concrete.py",
    "ast_data": "FunctionDef name:_raise_untracked_capture_error arg:function_name arg:capture arg:internal_capture arg:node_path arguments arg arg arg arg Assign If Compare Call If Compare Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_get_package",
    "source_code": "def _get_package(self, package):\n    if hasattr(package, '__spec__'):\n        if package.__spec__.submodule_search_locations is None:\n            raise TypeError(f'{package.__spec__.name!r} is not a package')\n        else:\n            return package\n    else:\n        module = self.import_module(package)\n        if module.__spec__.submodule_search_locations is None:\n            raise TypeError(f'{package!r} is not a package')\n        else:\n            return module",
    "docstring": "Take a package name or module object and return the module. If a name, the module is imported. If the passed or imported module object is not a package, raise an exception.",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\package_importer.py",
    "ast_data": "FunctionDef name:_get_package arg:self arg:package arguments arg arg If Call If Compare Raise Call Return return:yes Assign Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "GatherDimensionNumbers",
    "source_code": "class GatherDimensionNumbers:\n    __slots__ = ('offset_dims', 'collapsed_slice_dims', 'start_index_map', 'index_vector_dim')\n\n    def __init__(self):\n        self.offset_dims = []\n        self.collapsed_slice_dims = []\n        self.start_index_map = []\n        self.index_vector_dim = 0",
    "docstring": "Python representation of a xla.GatherDimensionNumbers protobuf.",
    "type": "class",
    "file_path": "tensorflow\\third_party\\xla\\xla\\python\\xla_client.py",
    "ast_data": "ClassDef name:GatherDimensionNumbers Assign FunctionDef name:__init__ arg:self arguments arg Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_NumpyCodec",
    "source_code": "class _NumpyCodec:\n\n    def can_encode(self, pyobj):\n        return isinstance(pyobj, np.ndarray)\n\n    def do_encode(self, numpy_value, encode_fn):\n        del encode_fn\n        encoded_numpy = struct_pb2.StructuredValue()\n        encoded_numpy.numpy_value.CopyFrom(tensor_util.make_tensor_proto(numpy_value))\n        return encoded_numpy\n\n    def can_decode(self, value):\n        return value.HasField('numpy_value')\n\n    def do_decode(self, value, decode_fn):\n        del decode_fn\n        tensor_proto = value.numpy_value\n        numpy = tensor_util.MakeNdarray(tensor_proto)\n        return numpy",
    "docstring": "Codec for Numpy.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\constant_op.py",
    "ast_data": "ClassDef name:_NumpyCodec FunctionDef name:can_encode arg:self arg:pyobj arguments arg arg Return return:yes Call FunctionDef name:do_encode arg:self arg:numpy_value arg:encode_fn arguments arg arg arg Assign Call Call Call Return return:yes FunctionDef name:can_decode arg:self arg:value arguments arg arg Return return:yes Call FunctionDef name:do_decode arg:self arg:value arg:decode_fn arguments arg arg arg Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "num_local_devices",
    "source_code": "@tf_export('experimental.dtensor.num_local_devices', v1=[])\ndef num_local_devices(device_type: str) -> int:\n    if device_type.upper() in ['CPU', 'GPU']:\n        context_config = context.get_config()\n        return context_config.device_count[device_type.upper()]\n    return len(tf_config.list_physical_devices(device_type))",
    "docstring": "Returns the number of devices of device_type configured on this client.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\config.py",
    "ast_data": "FunctionDef name:num_local_devices arg:device_type arguments arg If Compare Call Assign Call Return return:yes Call Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_one_eve",
    "source_code": "def _one_eve(self, k):\n    phi = [self._ev1d(j, n) for j, n in zip(k, self.grid_shape)]\n    result = phi[0]\n    for phi in phi[1:]:\n        result = np.tensordot(result, phi, axes=0)\n    return np.asarray(result).ravel()",
    "docstring": "Return 1 eigenvector in Nd with multi-index as a tensor product of the corresponding 1d eigenvectors.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_special_sparse_arrays.py",
    "ast_data": "FunctionDef name:_one_eve arg:self arg:k arguments arg arg Assign Call Call Assign For Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_VariableScopeStore",
    "source_code": "class _VariableScopeStore(threading.local):\n\n    def __init__(self):\n        super(_VariableScopeStore, self).__init__()\n        self.current_scope = VariableScope(False)\n        self.variable_scopes_count = {}\n\n    def open_variable_scope(self, scope_name):\n        if scope_name in self.variable_scopes_count:\n            self.variable_scopes_count[scope_name] += 1\n        else:\n            self.variable_scopes_count[scope_name] = 1\n\n    def close_variable_subscopes(self, scope_name):\n        if scope_name is None:\n            for k in self.variable_scopes_count:\n                self.variable_scopes_count[k] = 0\n        else:\n            startswith_check = scope_name + '/'\n            startswith_len = len(startswith_check)\n            for k in self.variable_scopes_count:\n                if k[:startswith_len] == startswith_check:\n                    self.variable_scopes_count[k] = 0\n\n    def variable_scope_count(self, scope_name):\n        return self.variable_scopes_count.get(scope_name, 0)",
    "docstring": "A thread local store for the current variable scope and scope counts.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variable_scope.py",
    "ast_data": "ClassDef name:_VariableScopeStore FunctionDef name:__init__ arg:self arguments arg Call Call Assign Call Assign FunctionDef name:open_variable_scope arg:self arg:scope_name arguments arg arg If Compare Assign FunctionDef name:close_variable_subscopes arg:self arg:scope_name arguments arg arg If Compare For Assign Assign Assign Call For If Compare Assign FunctionDef name:variable_scope_count arg:self arg:scope_name arguments arg arg Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "AnotherPage",
    "source_code": "class AnotherPage(Page):\n    title = 'Another Page'\n\n    @cherrypy.expose\n    def index(self):\n        return self.header() + '\\n            <p>\\n            And this is the amazing second page!\\n            </p>\\n        ' + self.footer()",
    "docstring": "Another page app.",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\tutorial\\tut05_derived_objects.py",
    "ast_data": "ClassDef name:AnotherPage Assign FunctionDef name:index arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "Problem06",
    "source_code": "class Problem06(Benchmark):\n\n    def __init__(self, dimensions=1):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = [(-10.0, 10.0)]\n        self.global_optimum = 0.67956\n        self.fglob = -0.824239\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        x = x[0]\n        return -(x + sin(x)) * exp(-x ** 2.0)",
    "docstring": "Univariate Problem06 objective function. This class defines the Univariate Problem06 global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Problem06}}(x) = - \\left[x + \\sin(x) \\right] e^{-x^2} Bound constraints: :math: .. figure:: figures/Problem06.png :alt: Univariate Problem06 function :align: center **Univariate Problem06 function** *Global optimum*: :math: for :math:",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_univariate.py",
    "ast_data": "ClassDef name:Problem06 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__call__",
    "source_code": "def __call__(self):\n    return tensor_conversion.convert_to_tensor_v2_with_dispatch(self._current_loss_scale)",
    "docstring": "Returns the current loss scale as a scalar tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\loss_scale_optimizer.py",
    "ast_data": "FunctionDef name:__call__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "is_fsspec_url",
    "source_code": "def is_fsspec_url(url: FilePath | BaseBuffer) -> bool:\n    return isinstance(url, str) and bool(_FSSPEC_URL_PATTERN.match(url)) and (not url.startswith(('http://', 'https://')))",
    "docstring": "Returns true if the given URL looks like something fsspec can handle",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\common.py",
    "ast_data": "FunctionDef name:is_fsspec_url arg:url arguments arg Return return:yes BoolOp Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_copy_trackable_to_cpu",
    "source_code": "def _copy_trackable_to_cpu(self, object_map):\n    if self not in object_map:\n        object_map[self] = DenseHashTable(self._key_dtype, self._value_dtype, self._default_value, self._empty_key, self._deleted_key, self._initial_num_buckets, self._name, self._checkpoint, self._is_anonymous)\n    serialized = self._serialize_to_tensors()\n    object_map[self]._restore_from_tensors(serialized)",
    "docstring": "Implements checkpointing protocols for .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "FunctionDef name:_copy_trackable_to_cpu arg:self arg:object_map arguments arg arg If Compare Assign Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_to_default_values",
    "source_code": "def _to_default_values(fullargspec):\n    if fullargspec.defaults is not None:\n        defaults = {name: value for name, value in zip(fullargspec.args[-len(fullargspec.defaults):], fullargspec.defaults)}\n    else:\n        defaults = {}\n    if fullargspec.kwonlydefaults is not None:\n        defaults.update(fullargspec.kwonlydefaults)\n    defaults = {function_type_lib.sanitize_arg_name(name): value for name, value in defaults.items()}\n    return defaults",
    "docstring": "Returns default values from the function's inspected fullargspec.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\function_type_utils.py",
    "ast_data": "FunctionDef name:_to_default_values arg:fullargspec arguments arg If Compare Assign Call Call Assign If Compare Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "conv_output_length",
    "source_code": "def conv_output_length(input_length, filter_size, padding, stride, dilation=1):\n    if input_length is None:\n        return None\n    assert padding in {'same', 'valid', 'full'}\n    dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)\n    if padding == 'same':\n        output_length = input_length\n    elif padding == 'valid':\n        output_length = input_length - dilated_filter_size + 1\n    elif padding == 'full':\n        output_length = input_length + dilated_filter_size - 1\n    return (output_length + stride - 1) // stride",
    "docstring": "Determines output length of a convolution given input length. Args: input_length: integer. filter_size: integer. padding: one of \"same\", \"valid\", \"full\". stride: integer. dilation: dilation rate, integer. Returns: The output length (integer).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\layers\\utils.py",
    "ast_data": "FunctionDef name:conv_output_length arg:input_length arg:filter_size arg:padding arg:stride arg:dilation arguments arg arg arg arg arg If Compare Return return:no Compare Assign If Compare Assign If Compare Assign If Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_with_tensor_ranks_only",
    "source_code": "def _with_tensor_ranks_only(self) -> 'TypeSpec':\n\n    def relax(value):\n        if isinstance(value, TypeSpec):\n            return value._with_tensor_ranks_only()\n        elif isinstance(value, tensor_shape.TensorShape) and value.rank is not None:\n            return tensor_shape.TensorShape([None] * value.rank)\n        else:\n            return value\n    return self._deserialize(nest.map_structure(relax, self._serialize()))",
    "docstring": "Returns a TypeSpec compatible with , with tensor shapes relaxed. Returns: A that is compatible with , where any information has been relaxed to include only tensor rank (and not the dimension sizes for individual axes).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py",
    "ast_data": "FunctionDef name:_with_tensor_ranks_only arg:self arguments arg FunctionDef name:relax arg:value arguments arg If Call Return return:yes Call If BoolOp Call Compare Return return:yes Call Return return:yes Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "placeholder_value",
    "source_code": "def placeholder_value(self, placeholder_context):\n    if placeholder_context.unnest_only:\n        return self\n    name = self.name or placeholder_context.naming_scope\n    context_graph = placeholder_context.context_graph\n    if placeholder_context.with_none_control_dependencies:\n        with context_graph.control_dependencies(None):\n            placeholder = self._graph_placeholder(context_graph, name=name)\n    else:\n        placeholder = self._graph_placeholder(context_graph, name=name)\n    if name is not None:\n        placeholder.op._set_attr('_user_specified_name', attr_value_pb2.AttrValue(s=compat.as_bytes(name)))\n    handle_data = self.dtype._handle_data\n    if handle_data is not None and handle_data.shape_inference.is_set and handle_data.shape_inference.shape_and_type:\n        handle_data_util.set_handle_data(placeholder, handle_data.shape_inference)\n    if placeholder_context.composite_device_name is not None:\n        placeholder.op._set_attr('_composite_device', attr_value_pb2.AttrValue(s=compat.as_bytes(placeholder_context.composite_device_name)))\n    return placeholder",
    "docstring": "Generates a graph placeholder with the given TensorSpec information.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor.py",
    "ast_data": "FunctionDef name:placeholder_value arg:self arg:placeholder_context arguments arg arg If Return return:yes Assign BoolOp Assign If With Call Assign Call Assign Call If Compare Call Call Call Assign If BoolOp Compare Call If Compare Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_TensorCaches",
    "source_code": "class _TensorCaches(threading.local):\n    __slots__ = ['_ones_rank_cache', '_zeros_cache']\n\n    def __init__(self):\n        super().__init__()\n        self._ones_rank_cache = None\n        self._zeros_cache = None\n\n    @property\n    def ones_rank_cache(self):\n        if not self._ones_rank_cache:\n            self._ones_rank_cache = _EagerTensorCache()\n        return self._ones_rank_cache\n\n    @property\n    def zeros_cache(self):\n        if not self._zeros_cache:\n            self._zeros_cache = _EagerTensorCache()\n        return self._zeros_cache",
    "docstring": "Thread local tensor caches.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "ClassDef name:_TensorCaches Assign FunctionDef name:__init__ arg:self arguments arg Call Call Assign Assign FunctionDef name:ones_rank_cache arg:self arguments arg If Assign Call Return return:yes FunctionDef name:zeros_cache arg:self arguments arg If Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "replace_random_passes",
    "source_code": "def replace_random_passes(gm: torch.fx.GraphModule):\n    if config.fallback_random:\n        return 0\n    count = patterns.apply(gm)\n    with GraphTransformObserver(gm, 'fuse_seed_creation_pass'):\n        count += fuse_seed_creation_pass(gm.graph)\n    return count",
    "docstring": "Modify the given FX graph to use backend-native random ops",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\replace_random.py",
    "ast_data": "FunctionDef name:replace_random_passes arg:gm arguments arg If Return return:yes Assign Call With Call Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "MutableChain",
    "source_code": "class MutableChain(Iterable[_T]):\n\n    def __init__(self, *args: Iterable[_T]):\n        self.data: Iterator[_T] = chain.from_iterable(args)\n\n    def extend(self, *iterables: Iterable[_T]) -> None:\n        self.data = chain(self.data, chain.from_iterable(iterables))\n\n    def __iter__(self) -> Iterator[_T]:\n        return self\n\n    def __next__(self) -> _T:\n        return next(self.data)",
    "docstring": "Thin wrapper around itertools.chain, allowing to add iterables \"in-place\"",
    "type": "class",
    "file_path": "scrapy\\scrapy\\utils\\python.py",
    "ast_data": "ClassDef name:MutableChain FunctionDef name:__init__ arg:self arguments arg arg Call FunctionDef name:extend arg:self arguments arg arg Assign Call Call FunctionDef name:__iter__ arg:self arguments arg Return return:yes FunctionDef name:__next__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "AutoLocator",
    "source_code": "class AutoLocator(MaxNLocator):\n\n    def __init__(self):\n        if mpl.rcParams['_internal.classic_mode']:\n            nbins = 9\n            steps = [1, 2, 5, 10]\n        else:\n            nbins = 'auto'\n            steps = [1, 2, 2.5, 5, 10]\n        super().__init__(nbins=nbins, steps=steps)",
    "docstring": "Place evenly spaced ticks, with the step size and maximum number of ticks chosen automatically. This is a subclass of , with parameters *nbins = 'auto'* and *steps = [1, 2, 2.5, 5, 10]*.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "ClassDef name:AutoLocator FunctionDef name:__init__ arg:self arguments arg If Assign Assign Assign Assign Call Call"
  },
  {
    "library": "kornia",
    "name": "_get_new_batch_shape",
    "source_code": "def _get_new_batch_shape(param: ParamItem, batch_shape: torch.Size) -> torch.Size:\n    if param.data is None:\n        return batch_shape\n    if isinstance(param.data, list):\n        for p in param.data:\n            batch_shape = _get_new_batch_shape(p, batch_shape)\n    elif 'output_size' in param.data:\n        if not (param.data['batch_prob'] > 0.5)[0]:\n            return batch_shape\n        new_batch_shape = list(batch_shape)\n        new_batch_shape[-2:] = param.data['output_size'][0]\n        batch_shape = torch.Size(new_batch_shape)\n    return batch_shape",
    "docstring": "Get the new batch shape if the augmentation changes the image size. Note: Augmentations that change the image size must provide the parameter .",
    "type": "function",
    "file_path": "kornia\\kornia\\augmentation\\container\\image.py",
    "ast_data": "FunctionDef name:_get_new_batch_shape arg:param arg:batch_shape arguments arg arg If Compare Return return:yes If Call For Assign Call If Compare If Compare Return return:yes Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, accumulator_node_name: str, removed_buffers: OrderedSet[str]):\n    self.accumulator_node_name: str = accumulator_node_name\n    self.body: IndentedBuffer = IndentedBuffer(1)\n    self.var_counter: Iterator[int] = itertools.count()\n    self.store_name_to_value: dict[str, OpsValue] = dict()\n    self.reads: OrderedSet[str] = OrderedSet([])\n    self.var_name_to_buffer_name: dict[str, str] = {_ACCUMULATOR_ARG_NAME: accumulator_node_name}\n    self.removed_buffers: OrderedSet[str] = removed_buffers\n    self.cur_node: Optional[ComputedBuffer] = None\n    self.name_to_buffer = V.graph.name_to_buffer | V.graph.graph_inputs\n    self.is_D_assigned = False\n    self.D_var_name = None\n    if accumulator_node_name not in removed_buffers:\n        var = self._tmp_var()\n        self.body.writeline(f'{var} = {_ACCUMULATOR_ARG_NAME}')\n        self.store(accumulator_node_name, value=OpsValue(var))",
    "docstring": "Initializes a CutlassEVTEpilogueArgumentFormatter object. Do not instantiate directly. Use the CutlassEVTCodegen.ir_to_evt_python_code static method. Args: accumulator_node_name: The name of the accumulator node which should contain the Matmul result before fusion according to the IR graph. epilogue_nodes: The list of scheduler nodes to be fused into the epilogue",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\cutlass_python_evt.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:accumulator_node_name arg:removed_buffers arguments arg arg arg Call Call Call Call Assign Assign Assign If Compare Assign Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "sort_native_function",
    "source_code": "def sort_native_function(f: NativeFunctionsGroup | NativeFunction) -> str:\n    func = f.functional.func if isinstance(f, NativeFunctionsGroup) else f.func\n    return str(func.name.name)",
    "docstring": "We sort the native function because of the note in concat_map_codegen. TODO(alanwaketan): Remove this sorting hack once all ops are grouped properly.",
    "type": "function",
    "file_path": "pytorch\\torchgen\\gen_lazy_tensor.py",
    "ast_data": "FunctionDef name:sort_native_function arg:f arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "ensure_quadratic_bezier",
    "source_code": "@staticmethod\ndef ensure_quadratic_bezier(path):\n    segments = list(path.iter_segments())\n    if len(segments) != 2 or segments[0][1] != Path.MOVETO or segments[1][1] != Path.CURVE3:\n        raise ValueError(\"'path' is not a valid quadratic Bezier curve\")\n    return [*segments[0][0], *segments[1][0]]",
    "docstring": "Some ArrowStyle classes only works with a simple quadratic Bézier curve (created with or ). This static method checks if the provided path is a simple quadratic Bézier curve and returns its control points if true.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:ensure_quadratic_bezier arg:path arguments arg Assign Call Call If BoolOp Compare Call Compare Compare Raise Call Return return:yes"
  },
  {
    "library": "django",
    "name": "__init__",
    "source_code": "def __init__(self, expression, length, **extra):\n    if not hasattr(length, 'resolve_expression'):\n        if length < 1:\n            raise ValueError(\"'length' must be greater than 0.\")\n    super().__init__(expression, length, **extra)",
    "docstring": "expression: the name of a field, or an expression returning a string length: the number of characters to return from the start of the string",
    "type": "method",
    "file_path": "django\\django\\db\\models\\functions\\text.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:expression arg:length arguments arg arg arg arg If Call If Compare Raise Call Call Call"
  },
  {
    "library": "django",
    "name": "temporary_connection",
    "source_code": "@contextmanager\ndef temporary_connection(self):\n    must_close = self.connection is None\n    try:\n        with self.cursor() as cursor:\n            yield cursor\n    finally:\n        if must_close:\n            self.close()",
    "docstring": "Context manager that ensures that a connection is established, and if it opened one, closes it to avoid leaving a dangling connection. This is useful for operations outside of the request-response cycle. Provide a cursor: with self.temporary_connection() as cursor: ...",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\base.py",
    "ast_data": "FunctionDef name:temporary_connection arg:self arguments arg Assign Compare Try With Call If Call"
  },
  {
    "library": "pandas",
    "name": "fill_value",
    "source_code": "@property\ndef fill_value(self):\n    return self._fill_value",
    "docstring": "The fill value of the array. Converting the SparseArray to a dense ndarray will fill the array with this value. .. warning:: It's possible to end up with a SparseArray that has `` directly.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py",
    "ast_data": "FunctionDef name:fill_value arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "add_result",
    "source_code": "def add_result(self, result, t, name):\n    result.time = t\n    result.name = name\n    if not hasattr(result, 'njev'):\n        result.njev = 0\n    if not hasattr(result, 'nhev'):\n        result.nhev = 0\n    self.results.append(result)",
    "docstring": "add a result to the list",
    "type": "method",
    "file_path": "scipy\\benchmarks\\benchmarks\\optimize.py",
    "ast_data": "FunctionDef name:add_result arg:self arg:result arg:t arg:name arguments arg arg arg arg Assign Assign If Call Assign If Call Assign Call"
  },
  {
    "library": "numpy",
    "name": "has_samewindow",
    "source_code": "def has_samewindow(self, other):\n    return np.all(self.window == other.window)",
    "docstring": "Check if windows match. Parameters ---------- other : class instance The other class must have the `` attribute. Returns ------- bool : boolean True if the windows are the same, False otherwise.",
    "type": "method",
    "file_path": "numpy\\numpy\\polynomial\\_polybase.py",
    "ast_data": "FunctionDef name:has_samewindow arg:self arg:other arguments arg arg Return return:yes Call Compare"
  },
  {
    "library": "pytorch",
    "name": "set_cpu_parent",
    "source_code": "def set_cpu_parent(self, parent):\n    assert self.device_type == DeviceType.CPU\n    assert isinstance(parent, FunctionEvent)\n    assert parent.device_type == DeviceType.CPU\n    self.cpu_parent = parent",
    "docstring": "Set the immediate CPU parent of type FunctionEvent. One profiling FunctionEvent should have only one CPU parent such that the child's range interval is completely inside the parent's. We use this connection to determine the event is from top-level op or not.",
    "type": "method",
    "file_path": "pytorch\\torch\\autograd\\profiler_util.py",
    "ast_data": "FunctionDef name:set_cpu_parent arg:self arg:parent arguments arg arg Compare Call Compare Assign"
  },
  {
    "library": "matplotlib",
    "name": "get_default_weight",
    "source_code": "def get_default_weight(self):\n    return self.__default_weight",
    "docstring": "Return the default font weight.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\font_manager.py",
    "ast_data": "FunctionDef name:get_default_weight arg:self arguments arg Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "ftp_store_file",
    "source_code": "def ftp_store_file(*, path: str, file: IO[bytes], host: str, port: int, username: str, password: str, use_active_mode: bool=False, overwrite: bool=True) -> None:\n    with FTP() as ftp:\n        ftp.connect(host, port)\n        ftp.login(username, password)\n        if use_active_mode:\n            ftp.set_pasv(False)\n        file.seek(0)\n        dirname, filename = posixpath.split(path)\n        ftp_makedirs_cwd(ftp, dirname)\n        command = 'STOR' if overwrite else 'APPE'\n        ftp.storbinary(f'{command} {filename}', file)\n        file.close()",
    "docstring": "Opens a FTP connection with passed credentials,sets current directory to the directory extracted from given path, then uploads the file to server",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\ftp.py",
    "ast_data": "FunctionDef name:ftp_store_file arguments arg arg arg arg arg arg arg arg With Call Call Call If Call Call Assign Call Call Assign Call Call"
  },
  {
    "library": "matplotlib",
    "name": "compute_dz",
    "source_code": "def compute_dz(self):\n    el_geom_w = self.compute_geom_weights()\n    el_geom_grad = self.compute_geom_grads()\n    w_node_sum = np.bincount(np.ravel(self._triangles), weights=np.ravel(el_geom_w))\n    dfx_el_w = np.empty_like(el_geom_w)\n    dfy_el_w = np.empty_like(el_geom_w)\n    for iapex in range(3):\n        dfx_el_w[:, iapex] = el_geom_w[:, iapex] * el_geom_grad[:, 0]\n        dfy_el_w[:, iapex] = el_geom_w[:, iapex] * el_geom_grad[:, 1]\n    dfx_node_sum = np.bincount(np.ravel(self._triangles), weights=np.ravel(dfx_el_w))\n    dfy_node_sum = np.bincount(np.ravel(self._triangles), weights=np.ravel(dfy_el_w))\n    dfx_estim = dfx_node_sum / w_node_sum\n    dfy_estim = dfy_node_sum / w_node_sum\n    return np.vstack([dfx_estim, dfy_estim]).T",
    "docstring": "self.df is computed as weighted average of _triangles sharing a common node. On each triangle itri f is first assumed linear (= ~f), which allows to compute d~f[itri] Then the following approximation of df nodal values is then proposed: f[ipt] = SUM ( w[itri] x d~f[itri] , for itri sharing apex ipt) The weighted coeff. w[itri] are proportional to the angle of the triangle itri at apex ipt",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triinterpolate.py",
    "ast_data": "FunctionDef name:compute_dz arg:self arguments arg Assign Call Assign Call Assign Call Call Call Assign Call Assign Call For Call Assign Assign Assign Call Call Call Assign Call Call Call Assign Assign Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_solve_check",
    "source_code": "def _solve_check(n, info, lamch=None, rcond=None):\n    if info < 0:\n        raise ValueError(f'LAPACK reported an illegal value in {-info}-th argument.')\n    elif 0 < info or rcond == 0:\n        raise LinAlgError('Matrix is singular.')\n    if lamch is None:\n        return\n    E = lamch('E')\n    if not rcond >= E:\n        warn(f'Ill-conditioned matrix (rcond={rcond:.6g}): result may not be accurate.', LinAlgWarning, stacklevel=3)",
    "docstring": "Check arguments during the different steps of the solution phase",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_basic.py",
    "ast_data": "FunctionDef name:_solve_check arg:n arg:info arg:lamch arg:rcond arguments arg arg arg arg If Compare Raise Call If BoolOp Compare Compare Raise Call If Compare Return return:no Assign Call If Compare Call"
  },
  {
    "library": "matplotlib",
    "name": "__iter__",
    "source_code": "def __iter__(self):\n    with _api.suppress_matplotlib_deprecation_warning():\n        yield from sorted(dict.__iter__(self))",
    "docstring": "Yield sorted list of keys.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\__init__.py",
    "ast_data": "FunctionDef name:__iter__ arg:self arguments arg With Call Call Call"
  },
  {
    "library": "authlib",
    "name": "get_client",
    "source_code": "def get_client(self) -> ClientMixin:\n    raise NotImplementedError()",
    "docstring": "A method to get the client object associated with this token: .. code-block:: def get_client(self): return Client.get(self.client_id)",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\models.py",
    "ast_data": "FunctionDef name:get_client arg:self arguments arg Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "font_is_installed",
    "source_code": "def font_is_installed(font):\n    return [fam for fam in QtGui.QFontDatabase().families() if str(fam) == font]",
    "docstring": "Check if font is installed",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\qt_editor\\_formlayout.py",
    "ast_data": "FunctionDef name:font_is_installed arg:font arguments arg Return return:yes Call Call Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "_inside_op_range",
    "source_code": "def _inside_op_range(self, idx):\n    if idx < self._parameters.op_range[0]:\n        return False\n    return self._parameters.op_range[1] < 0 or idx <= self._parameters.op_range[1]",
    "docstring": "Return True if the given index is inside the selected range.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:_inside_op_range arg:self arg:idx arguments arg arg If Compare Return return:yes Return return:yes BoolOp Compare Compare"
  },
  {
    "library": "tensorflow",
    "name": "_set_handle_shapes_and_types",
    "source_code": "def _set_handle_shapes_and_types(tensor, handle_data, graph_mode):\n    tensor._handle_data = handle_data\n    if not graph_mode:\n        return\n    shapes, types = zip(*[(pair.shape, pair.dtype) for pair in handle_data.shape_and_type])\n    ranks = [len(s.dim) if not s.unknown_rank else -1 for s in shapes]\n    shapes = [[d.size for d in s.dim] if not s.unknown_rank else None for s in shapes]\n    with tensor._op.graph._c_graph.get() as c_graph:\n        pywrap_tf_session.TF_GraphSetOutputHandleShapesAndTypes_wrapper(c_graph, tensor._as_tf_output(), shapes, ranks, types)",
    "docstring": "Sets the shape inference result HandleData on tensor. Args: tensor: A or . handle_data: A . graph_mode: A python bool.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:_set_handle_shapes_and_types arg:tensor arg:handle_data arg:graph_mode arguments arg arg arg Assign If Return return:no Assign Call Assign Call Assign With Call Call Call"
  },
  {
    "library": "numpy",
    "name": "myeval",
    "source_code": "def myeval(e, g=None, l=None):\n    r = eval(e, g, l)\n    if type(r) in [int, float]:\n        return r\n    raise ValueError(f'r={r!r}')",
    "docstring": "Like but returns only integers and floats",
    "type": "function",
    "file_path": "numpy\\numpy\\f2py\\crackfortran.py",
    "ast_data": "FunctionDef name:myeval arg:e arg:g arg:l arguments arg arg arg Assign Call If Compare Call Return return:yes Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "_mpltype_role",
    "source_code": "def _mpltype_role(name, rawtext, text, lineno, inliner, options=None, content=None):\n    mpltype = text\n    type_to_link_target = {'color': 'colors_def', 'hatch': 'hatch_def'}\n    if mpltype not in type_to_link_target:\n        raise ValueError(f'Unknown mpltype: {mpltype!r}')\n    node_list, messages = inliner.interpreted(mpltype, f'{mpltype} <{type_to_link_target[mpltype]}>', 'ref', lineno)\n    return (node_list, messages)",
    "docstring": "Sphinx role `:mpltype:color`",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\sphinxext\\roles.py",
    "ast_data": "FunctionDef name:_mpltype_role arg:name arg:rawtext arg:text arg:lineno arg:inliner arg:options arg:content arguments arg arg arg arg arg arg arg Assign Assign If Compare Raise Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "cache_on_shape_env",
    "source_code": "def cache_on_shape_env(self) -> bool:\n    return bool(self.sym_node_lookup)",
    "docstring": "Returns true if the CacheKey needs to be cached on the ShapeEnv rather than the global cache. If our inputs contain a SymNode then we can't cache this operation on the global cache because the cached output will implicitly depend on guard values which might not be true on some other ShapeEnv. So unless we're also going to cache the guards we need to cache this operation on the ShapeEnv instead of globally.",
    "type": "method",
    "file_path": "pytorch\\torch\\_subclasses\\_fake_tensor_utils.py",
    "ast_data": "FunctionDef name:cache_on_shape_env arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__eq__",
    "source_code": "def __eq__(self, other):\n    if tensor_lib.Tensor._USE_EQUALITY and ops.executing_eagerly_outside_functions():\n        return gen_math_ops.equal(self, other, incompatible_shape_error=False)\n    else:\n        return self is other",
    "docstring": "Compares two variables element-wise for equality.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:__eq__ arg:self arg:other arguments arg arg If BoolOp Call Return return:yes Call Return return:yes Compare"
  },
  {
    "library": "pandas",
    "name": "_validate_scalar",
    "source_code": "def _validate_scalar(self, fill_value):\n    if is_valid_na_for_dtype(fill_value, self.categories.dtype):\n        fill_value = -1\n    elif fill_value in self.categories:\n        fill_value = self._unbox_scalar(fill_value)\n    else:\n        raise TypeError(f'Cannot setitem on a Categorical with a new category ({fill_value}), set the categories first') from None\n    return fill_value",
    "docstring": "Convert a user-facing fill_value to a representation to use with our underlying ndarray, raising TypeError if this is not possible. Parameters ---------- fill_value : object Returns ------- fill_value : int Raises ------ TypeError",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\categorical.py",
    "ast_data": "FunctionDef name:_validate_scalar arg:self arg:fill_value arguments arg arg If Call Assign If Compare Assign Call Raise Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "KORNIA_CHECK_SAME_SHAPE",
    "source_code": "def KORNIA_CHECK_SAME_SHAPE(x: Tensor, y: Tensor, raises: bool=True) -> bool:\n    if x.shape != y.shape:\n        if raises:\n            raise TypeError(f'Not same shape for tensors. Got: {x.shape} and {y.shape}')\n        return False\n    return True",
    "docstring": "Check whether two tensor have the same shape. Args: x: first tensor to evaluate. y: sencod tensor to evaluate. msg: message to show in the exception. raises: bool indicating whether an exception should be raised upon failure. Raises: TypeException: if the two tensors have not the same shape and raises is True. Example: >>> x1 = torch.rand(2, 3, 3) >>> x2 = torch.rand(2, 3, 3) >>> KORNIA_CHECK_SAME_SHAPE(x1, x2) True",
    "type": "function",
    "file_path": "kornia\\kornia\\core\\check.py",
    "ast_data": "FunctionDef name:KORNIA_CHECK_SAME_SHAPE arg:x arg:y arg:raises arguments arg arg arg If Compare If Raise Call Return return:yes Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_from_ctypes_scalar",
    "source_code": "def _from_ctypes_scalar(t):\n    if getattr(t, '__ctype_be__', None) is t:\n        return np.dtype('>' + t._type_)\n    elif getattr(t, '__ctype_le__', None) is t:\n        return np.dtype('<' + t._type_)\n    else:\n        return np.dtype(t._type_)",
    "docstring": "Return the dtype type with endianness included if it's the case",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\_dtype_ctypes.py",
    "ast_data": "FunctionDef name:_from_ctypes_scalar arg:t arguments arg If Compare Call Return return:yes Call If Compare Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_remote_cache",
    "source_code": "@staticmethod\ndef get_remote_cache() -> Optional[RemoteCache[JsonDataTy]]:\n    cache_id = 'fx-graph-v1'\n    return create_cache(cache_id, config.is_fbcode(), 'FbRemoteFxGraphCache', 'RemoteFxGraphCache')",
    "docstring": "Attempts to load the remote cache, returns None on error.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codecache.py",
    "ast_data": "FunctionDef name:get_remote_cache arguments Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_sort_dump_data_by",
    "source_code": "def _sort_dump_data_by(self, data, sort_by, reverse):\n    if sort_by == SORT_TENSORS_BY_TIMESTAMP:\n        return sorted(data, reverse=reverse, key=lambda x: x.timestamp)\n    elif sort_by == SORT_TENSORS_BY_DUMP_SIZE:\n        return sorted(data, reverse=reverse, key=lambda x: x.dump_size_bytes)\n    elif sort_by == SORT_TENSORS_BY_OP_TYPE:\n        return sorted(data, reverse=reverse, key=lambda x: self._debug_dump.node_op_type(x.node_name))\n    elif sort_by == SORT_TENSORS_BY_TENSOR_NAME:\n        return sorted(data, reverse=reverse, key=lambda x: '%s:%d' % (x.node_name, x.output_slot))\n    else:\n        raise ValueError('Unsupported key to sort tensors by: %s' % sort_by)",
    "docstring": "Sort a list of DebugTensorDatum in specified order. Args: data: (list of DebugTensorDatum) the data to be sorted. sort_by: The field to sort data by. reverse: (bool) Whether to use reversed (descending) order. Returns: (list of DebugTensorDatum) in sorted order. Raises: ValueError: given an invalid value of sort_by.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\analyzer_cli.py",
    "ast_data": "FunctionDef name:_sort_dump_data_by arg:self arg:data arg:sort_by arg:reverse arguments arg arg arg arg If Compare Return return:yes Call arguments arg If Compare Return return:yes Call arguments arg If Compare Return return:yes Call arguments arg Call If Compare Return return:yes Call arguments arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "add_to_collection",
    "source_code": "def add_to_collection(self, name, value) -> None:\n    self._check_not_finalized()\n    with self._lock:\n        if name not in self._collections:\n            self._collections[name] = [value]\n        else:\n            self._collections[name].append(value)",
    "docstring": "Stores in the collection with the given . Note that collections are not sets, so it is possible to add a value to a collection several times. Args: name: The key for the collection. The class contains many standard names for collections. value: The value to add to the collection.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:add_to_collection arg:self arg:name arg:value arguments arg arg arg Call With If Compare Assign Call"
  },
  {
    "library": "scikit-learn",
    "name": "_build_y",
    "source_code": "def _build_y(self, X, y, sample_weight, trim_duplicates=True):\n    self._check_input_data_shape(X)\n    X = X.reshape(-1)\n    if self.increasing == 'auto':\n        self.increasing_ = check_increasing(X, y)\n    else:\n        self.increasing_ = self.increasing\n    sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)\n    mask = sample_weight > 0\n    X, y, sample_weight = (X[mask], y[mask], sample_weight[mask])\n    order = np.lexsort((y, X))\n    X, y, sample_weight = [array[order] for array in [X, y, sample_weight]]\n    unique_X, unique_y, unique_sample_weight = _make_unique(X, y, sample_weight)\n    X = unique_X\n    y = isotonic_regression(unique_y, sample_weight=unique_sample_weight, y_min=self.y_min, y_max=self.y_max, increasing=self.increasing_)\n    self.X_min_, self.X_max_ = (np.min(X), np.max(X))\n    if trim_duplicates:\n        keep_data = np.ones((len(y),), dtype=bool)\n        keep_data[1:-1] = np.logical_or(np.not_equal(y[1:-1], y[:-2]), np.not_equal(y[1:-1], y[2:]))\n        return (X[keep_data], y[keep_data])\n    else:\n        return (X, y)",
    "docstring": "Build the y_ IsotonicRegression.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\isotonic.py",
    "ast_data": "FunctionDef name:_build_y arg:self arg:X arg:y arg:sample_weight arg:trim_duplicates arguments arg arg arg arg arg Call Assign Call If Compare Assign Call Assign Assign Call Assign Compare Assign Assign Call Assign Assign Call Assign Assign Call Assign Call Call If Assign Call Call Assign Call Call Call Return return:yes Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_get_label_or_level_values",
    "source_code": "@final\ndef _get_label_or_level_values(self, key: Level, axis: AxisInt=0) -> ArrayLike:\n    axis = self._get_axis_number(axis)\n    first_other_axes = next((ax for ax in range(self._AXIS_LEN) if ax != axis), None)\n    if self._is_label_reference(key, axis=axis):\n        self._check_label_or_level_ambiguity(key, axis=axis)\n        if first_other_axes is None:\n            raise ValueError('axis matched all axes')\n        values = self.xs(key, axis=first_other_axes)._values\n    elif self._is_level_reference(key, axis=axis):\n        values = self.axes[axis].get_level_values(key)._values\n    else:\n        raise KeyError(key)\n    if values.ndim > 1:\n        if first_other_axes is not None and isinstance(self._get_axis(first_other_axes), MultiIndex):\n            multi_message = '\\nFor a multi-index, the label must be a tuple with elements corresponding to each level.'\n        else:\n            multi_message = ''\n        label_axis_name = 'column' if axis == 0 else 'index'\n        raise ValueError(f\"The {label_axis_name} label '{key}' is not unique.{multi_message}\")\n    return values",
    "docstring": "Return a 1-D array of values associated with , a label or level from the given . Retrieval logic: - (axis=0): Return column values if matches a column label. Otherwise return index level values if matches an index level. - (axis=1): Return row values if matches an index label. Otherwise return column level values if 'key' matches a column level Parameters ---------- key : Hashable Label or level name. axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- np.ndarray or ExtensionArray Raises ------ KeyError if matches neither a label nor a level ValueError if matches multiple labels",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:_get_label_or_level_values arg:self arg:key arg:axis arguments arg arg arg Assign Call Assign Call Call Compare If Call Call If Compare Raise Call Assign Call If Call Assign Call Raise Call If Compare If BoolOp Compare Call Call Assign Assign Assign Compare Raise Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "BlendedAffine2D",
    "source_code": "class BlendedAffine2D(_BlendedMixin, Affine2DBase):\n    is_separable = True\n\n    def __init__(self, x_transform, y_transform, **kwargs):\n        is_affine = x_transform.is_affine and y_transform.is_affine\n        is_separable = x_transform.is_separable and y_transform.is_separable\n        is_correct = is_affine and is_separable\n        if not is_correct:\n            raise ValueError('Both *x_transform* and *y_transform* must be 2D affine transforms')\n        Transform.__init__(self, **kwargs)\n        self._x = x_transform\n        self._y = y_transform\n        self.set_children(x_transform, y_transform)\n        Affine2DBase.__init__(self)\n        self._mtx = None\n\n    def get_matrix(self):\n        if self._invalid:\n            if self._x == self._y:\n                self._mtx = self._x.get_matrix()\n            else:\n                x_mtx = self._x.get_matrix()\n                y_mtx = self._y.get_matrix()\n                self._mtx = np.array([x_mtx[0], y_mtx[1], [0.0, 0.0, 1.0]])\n            self._inverted = None\n            self._invalid = 0\n        return self._mtx",
    "docstring": "A \"blended\" transform uses one transform for the *x*-direction, and another transform for the *y*-direction. This version is an optimization for the case where both child transforms are of type .",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "ClassDef name:BlendedAffine2D Assign FunctionDef name:__init__ arg:self arg:x_transform arg:y_transform arguments arg arg arg arg Assign BoolOp Assign BoolOp Assign BoolOp If Raise Call Call Assign Assign Call Call Assign FunctionDef name:get_matrix arg:self arguments arg If If Compare Assign Call Assign Call Assign Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_get_take_nd_function_cached",
    "source_code": "@functools.lru_cache\ndef _get_take_nd_function_cached(ndim: int, arr_dtype: np.dtype, out_dtype: np.dtype, axis: AxisInt):\n    tup = (arr_dtype.name, out_dtype.name)\n    if ndim == 1:\n        func = _take_1d_dict.get(tup, None)\n    elif ndim == 2:\n        if axis == 0:\n            func = _take_2d_axis0_dict.get(tup, None)\n        else:\n            func = _take_2d_axis1_dict.get(tup, None)\n    if func is not None:\n        return func\n    tup = (out_dtype.name, out_dtype.name)\n    if ndim == 1:\n        func = _take_1d_dict.get(tup, None)\n    elif ndim == 2:\n        if axis == 0:\n            func = _take_2d_axis0_dict.get(tup, None)\n        else:\n            func = _take_2d_axis1_dict.get(tup, None)\n    if func is not None:\n        func = _convert_wrapper(func, out_dtype)\n        return func\n    return None",
    "docstring": "Part of _get_take_nd_function below that doesn't need and thus can be cached (mask_info potentially contains a numpy ndarray which is not hashable and thus cannot be used as argument for cached function).",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\array_algos\\take.py",
    "ast_data": "FunctionDef name:_get_take_nd_function_cached arg:ndim arg:arr_dtype arg:out_dtype arg:axis arguments arg arg arg arg Assign If Compare Assign Call If Compare If Compare Assign Call Assign Call If Compare Return return:yes Assign If Compare Assign Call If Compare If Compare Assign Call Assign Call If Compare Assign Call Return return:yes Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "GlobalPooling3D",
    "source_code": "class GlobalPooling3D(Layer):\n\n    def __init__(self, data_format=None, keepdims=False, **kwargs):\n        super(GlobalPooling3D, self).__init__(**kwargs)\n        self.data_format = conv_utils.normalize_data_format(data_format)\n        self.input_spec = InputSpec(ndim=5)\n        self.keepdims = keepdims\n\n    def compute_output_shape(self, input_shape):\n        input_shape = tensor_shape.TensorShape(input_shape).as_list()\n        if self.data_format == 'channels_last':\n            if self.keepdims:\n                return tensor_shape.TensorShape([input_shape[0], 1, 1, 1, input_shape[4]])\n            else:\n                return tensor_shape.TensorShape([input_shape[0], input_shape[4]])\n        elif self.keepdims:\n            return tensor_shape.TensorShape([input_shape[0], input_shape[1], 1, 1, 1])\n        else:\n            return tensor_shape.TensorShape([input_shape[0], input_shape[1]])\n\n    def call(self, inputs):\n        raise NotImplementedError\n\n    def get_config(self):\n        config = {'data_format': self.data_format, 'keepdims': self.keepdims}\n        base_config = super(GlobalPooling3D, self).get_config()\n        return dict(list(base_config.items()) + list(config.items()))",
    "docstring": "Abstract class for different global pooling 3D layers.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\pooling.py",
    "ast_data": "ClassDef name:GlobalPooling3D FunctionDef name:__init__ arg:self arg:data_format arg:keepdims arguments arg arg arg arg Call Call Assign Call Assign Call Assign FunctionDef name:compute_output_shape arg:self arg:input_shape arguments arg arg Assign Call Call If Compare If Return return:yes Call Return return:yes Call If Return return:yes Call Return return:yes Call FunctionDef name:call arg:self arg:inputs arguments arg arg Raise FunctionDef name:get_config arg:self arguments arg Assign Assign Call Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "TFLiteMetrics",
    "source_code": "class TFLiteMetrics(metrics_interface.TFLiteMetricsInterface):\n\n    def __init__(self, model_hash: Optional[Text]=None, model_path: Optional[Text]=None) -> None:\n        pass\n\n    def increase_counter_debugger_creation(self):\n        pass\n\n    def increase_counter_interpreter_creation(self):\n        pass\n\n    def increase_counter_converter_attempt(self):\n        pass\n\n    def increase_counter_converter_success(self):\n        pass\n\n    def set_converter_param(self, name, value):\n        pass\n\n    def set_converter_error(self, error_data):\n        pass\n\n    def set_converter_latency(self, value):\n        pass",
    "docstring": "TFLite metrics helper.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\metrics\\metrics_portable.py",
    "ast_data": "ClassDef name:TFLiteMetrics FunctionDef name:__init__ arg:self arg:model_hash arg:model_path arguments arg arg arg FunctionDef name:increase_counter_debugger_creation arg:self arguments arg FunctionDef name:increase_counter_interpreter_creation arg:self arguments arg FunctionDef name:increase_counter_converter_attempt arg:self arguments arg FunctionDef name:increase_counter_converter_success arg:self arguments arg FunctionDef name:set_converter_param arg:self arg:name arg:value arguments arg arg arg FunctionDef name:set_converter_error arg:self arg:error_data arguments arg arg FunctionDef name:set_converter_latency arg:self arg:value arguments arg arg"
  },
  {
    "library": "tensorflow",
    "name": "_create_event",
    "source_code": "def _create_event(self, ph: str, category: str, name: str, pid: int, tid: int, timestamp: int) -> Dict[str, Union[str, int]]:\n    event = {}\n    event['ph'] = ph\n    event['cat'] = category\n    event['name'] = name\n    event['pid'] = pid\n    event['tid'] = tid\n    event['ts'] = timestamp\n    return event",
    "docstring": "Creates a new Chrome Trace event. For details of the file format, see: Args: ph: The type of event - usually a single character. category: The event category as a string. name: The event name as a string. pid: Identifier of the process generating this event as an integer. tid: Identifier of the thread generating this event as an integer. timestamp: The timestamp of this event as a long integer. Returns: A JSON compatible event object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py",
    "ast_data": "FunctionDef name:_create_event arg:self arg:ph arg:category arg:name arg:pid arg:tid arg:timestamp arguments arg arg arg arg arg arg arg Assign Assign Assign Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "has_sametype",
    "source_code": "def has_sametype(self, other):\n    return isinstance(other, self.__class__)",
    "docstring": "Check if types match. Parameters ---------- other : object Class instance. Returns ------- bool : boolean True if other is same class as self",
    "type": "method",
    "file_path": "numpy\\numpy\\polynomial\\_polybase.py",
    "ast_data": "FunctionDef name:has_sametype arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "mark_mixed_dtype_allowed_computation_ops",
    "source_code": "def mark_mixed_dtype_allowed_computation_ops(gm):\n    for target in [aten.convolution.default, aten.addmm.default, aten.mm.default]:\n        for node in gm.graph.find_nodes(op='call_function', target=target):\n            mark_mixed_dtype(node)",
    "docstring": "Mark convolutions/linear which we will binary fold even with mixed precision constants. We constant fold in the higher precision for better accuracy and then recover the original precision after.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\binary_folding.py",
    "ast_data": "FunctionDef name:mark_mixed_dtype_allowed_computation_ops arg:gm arguments arg For For Call Call"
  },
  {
    "library": "tensorflow",
    "name": "index_shuffle",
    "source_code": "@tf_export('random.experimental.index_shuffle')\n@dispatch.add_dispatch_support\ndef index_shuffle(index, seed, max_index):\n    seed = ops.convert_to_tensor(seed)\n    if seed.shape.rank is None:\n        paddings = [[1, 0]]\n    else:\n        paddings = [[1, 0]] + (seed.shape.rank - 1) * [[0, 0]]\n    seed = array_ops.pad(seed, paddings, constant_values=498247692)\n    return gen_random_index_shuffle_ops.random_index_shuffle(index, seed=seed, max_index=max_index, rounds=4)",
    "docstring": "Outputs the position of in a permutation of . For each possible and there is one pseudorandom permutation of the sequence . Instead of materializing the full array we can compute the new position of any integer (tf.datavectorvectortf.random.stateless_*seedseedseed[0, max_index]vS=[0, ..., max_index]eint32uint32int64uint64indexseedindexvSeseed`), then the output will be a vector of the same size which each element shuffled independently. Scalar values are broadcasted in this case.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\stateless_random_ops.py",
    "ast_data": "FunctionDef name:index_shuffle arg:index arg:seed arg:max_index arguments arg arg arg Assign Call If Compare Assign Assign Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_initialize",
    "source_code": "def _initialize(self):\n    self._event_queue = CloseableQueue(self._max_queue)\n    self._worker = _EventLoggerThread(self._event_queue, self._ev_writer, self._flush_secs, self._flush_complete, self._flush_sentinel, self._close_sentinel)\n    self._worker.start()",
    "docstring": "Initializes or re-initializes the queue and writer thread. The EventsWriter itself does not need to be re-initialized explicitly, because it will auto-initialize itself if used after being closed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\event_file_writer.py",
    "ast_data": "FunctionDef name:_initialize arg:self arguments arg Assign Call Assign Call Call"
  },
  {
    "library": "pandas",
    "name": "_sort_tuples",
    "source_code": "def _sort_tuples(values: np.ndarray) -> np.ndarray:\n    from pandas.core.internals.construction import to_arrays\n    from pandas.core.sorting import lexsort_indexer\n    arrays, _ = to_arrays(values, None)\n    indexer = lexsort_indexer(arrays, orders=True)\n    return values[indexer]",
    "docstring": "Convert array of tuples (1d) to array of arrays (2d). We need to keep the columns separately as they contain different types and nans (can't use as it may fail when str and nan are mixed in a column as types cannot be compared).",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\algorithms.py",
    "ast_data": "FunctionDef name:_sort_tuples arg:values arguments arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "TableInitializerBase",
    "source_code": "class TableInitializerBase(trackable_base.Trackable):\n\n    def __init__(self, key_dtype, value_dtype):\n        self._key_dtype = dtypes.as_dtype(key_dtype)\n        self._value_dtype = dtypes.as_dtype(value_dtype)\n\n    @property\n    def key_dtype(self):\n        return self._key_dtype\n\n    @property\n    def value_dtype(self):\n        return self._value_dtype\n\n    def initialize(self, table):\n        raise NotImplementedError\n\n    @property\n    def _shared_name(self):\n        shared_name = ''\n        if context.executing_eagerly():\n            shared_name += str(ops.uid())\n        return shared_name",
    "docstring": "Base class for lookup table initializers.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "ClassDef name:TableInitializerBase FunctionDef name:__init__ arg:self arg:key_dtype arg:value_dtype arguments arg arg arg Assign Call Assign Call FunctionDef name:key_dtype arg:self arguments arg Return return:yes FunctionDef name:value_dtype arg:self arguments arg Return return:yes FunctionDef name:initialize arg:self arg:table arguments arg arg Raise FunctionDef name:_shared_name arg:self arguments arg Assign If Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "OutputsAggregator",
    "source_code": "class OutputsAggregator(Aggregator):\n    _structure = None\n\n    def create(self, batch_outs):\n        self._structure = nest.get_traverse_shallow_structure(lambda x: not is_composite_or_composite_value(x), batch_outs)\n        batch_outs = nest.flatten_up_to(self._structure, batch_outs)\n        for batch_element in batch_outs:\n            if is_composite_or_composite_value(batch_element):\n                self.results.append(ConcatAggregator(self.batch_size))\n            elif isinstance(batch_element, np.ndarray):\n                self.results.append(ConcatAggregator(self.batch_size) if self.use_steps else SliceAggregator(self.num_samples, self.batch_size))\n            else:\n                raise RuntimeError('Attempted to aggregate unsupported object {}.'.format(batch_element))\n            self.results[-1].create(batch_element)\n\n    def aggregate(self, batch_outs, batch_start=None, batch_end=None):\n        batch_outs = nest.flatten_up_to(self._structure, batch_outs)\n        for batch_element, result in zip(batch_outs, self.results):\n            result.aggregate(batch_element, batch_start, batch_end)\n\n    def finalize(self):\n        for result in self.results:\n            result.finalize()\n        self.results = [i.results for i in self.results]\n        self.results = nest.pack_sequence_as(self._structure, self.results)",
    "docstring": "Aggregator that concatenates outputs.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py",
    "ast_data": "ClassDef name:OutputsAggregator Assign FunctionDef name:create arg:self arg:batch_outs arguments arg arg Assign Call arguments arg Call Assign Call For If Call Call Call If Call Call Call Call Raise Call Call Call FunctionDef name:aggregate arg:self arg:batch_outs arg:batch_start arg:batch_end arguments arg arg arg arg Assign Call For Call Call FunctionDef name:finalize arg:self arguments arg For Call Assign Assign Call"
  },
  {
    "library": "sphinx",
    "name": "build_all",
    "source_code": "@final\ndef build_all(self) -> None:\n    self.compile_all_catalogs()\n    self.build(None, summary=__('all source files'), method='all')",
    "docstring": "Build all source files.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\__init__.py",
    "ast_data": "FunctionDef name:build_all arg:self arguments arg Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_NoneConstraint",
    "source_code": "class _NoneConstraint(_Constraint):\n\n    def is_satisfied_by(self, val):\n        return val is None\n\n    def __str__(self):\n        return 'None'",
    "docstring": "Constraint representing the None singleton.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\utils\\_param_validation.py",
    "ast_data": "ClassDef name:_NoneConstraint FunctionDef name:is_satisfied_by arg:self arg:val arguments arg arg Return return:yes Compare FunctionDef name:__str__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "__init__",
    "source_code": "def __init__(self, producer, length=None):\n    self._producer = producer\n    self._empty = False\n    self._leftover = b''\n    self.length = length\n    self.position = 0\n    self._remaining = length\n    self._unget_history = []",
    "docstring": "Every LazyStream must have a producer when instantiated. A producer is an iterable that returns a string each time it is called.",
    "type": "method",
    "file_path": "django\\django\\http\\multipartparser.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:producer arg:length arguments arg arg arg Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "variable_op",
    "source_code": "def variable_op(shape, dtype, name='Variable', set_shape=True, container='', shared_name=''):\n    if not set_shape:\n        shape = tensor_shape.unknown_shape()\n    ret = gen_state_ops.variable(shape=shape, dtype=dtype, name=name, container=container, shared_name=shared_name)\n    if set_shape:\n        ret.set_shape(shape)\n    return ret",
    "docstring": "Deprecated. Used variable_op_v2 instead.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\state_ops.py",
    "ast_data": "FunctionDef name:variable_op arg:shape arg:dtype arg:name arg:set_shape arg:container arg:shared_name arguments arg arg arg arg arg arg If Assign Call Assign Call If Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_reconstruct_all_models",
    "source_code": "def _reconstruct_all_models(self):\n    all_initialized_models = set()\n    while self._models_to_reconstruct:\n        model_id = self._models_to_reconstruct.pop(0)\n        all_initialized_models.add(model_id)\n        model, layers = self.model_layer_dependencies[model_id]\n        self._reconstruct_model(model_id, model, layers)\n        _finalize_config_layers([model])\n    if all_initialized_models != set(self.model_layer_dependencies.keys()):\n        uninitialized_model_ids = set(self.model_layer_dependencies.keys()) - all_initialized_models\n        uninitialized_model_names = [self.model_layer_dependencies[model_id][0].name for model_id in uninitialized_model_ids]\n        raise ValueError('Error when loading from SavedModel -- the following models could not be initialized: {}'.format(uninitialized_model_names))",
    "docstring": "Reconstructs the network structure of all models.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\load.py",
    "ast_data": "FunctionDef name:_reconstruct_all_models arg:self arguments arg Assign Call While Assign Call Call Assign Call Call If Compare Call Call Assign Call Call Assign Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "EnterGradWhileContext",
    "source_code": "def EnterGradWhileContext(self, op, before):\n    grad_state = self.GetGradState(op, before)\n    if grad_state:\n        grad_state.grad_context.Enter()",
    "docstring": "Enter the WhileContext for gradient computation.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_state.py",
    "ast_data": "FunctionDef name:EnterGradWhileContext arg:self arg:op arg:before arguments arg arg arg Assign Call If Call"
  },
  {
    "library": "authlib",
    "name": "validate_iss",
    "source_code": "def validate_iss(self):\n    self._validate_claim_value('iss')",
    "docstring": "The \"iss\" (issuer) claim identifies the principal that issued the JWT. The processing of this claim is generally application specific. The \"iss\" value is a case-sensitive string containing a StringOrURI value. Use of this claim is OPTIONAL.",
    "type": "method",
    "file_path": "authlib\\authlib\\jose\\rfc7519\\claims.py",
    "ast_data": "FunctionDef name:validate_iss arg:self arguments arg Call"
  },
  {
    "library": "cherrypy",
    "name": "log",
    "source_code": "def log(self, msg='', level=20, traceback=False):\n    if traceback:\n        msg += '\\n' + ''.join(_traceback.format_exception(*sys.exc_info()))\n    self.publish('log', msg, level)",
    "docstring": "Log the given message. Append the last traceback if requested.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\wspbus.py",
    "ast_data": "FunctionDef name:log arg:self arg:msg arg:level arg:traceback arguments arg arg arg arg If Call Call Call Call"
  },
  {
    "library": "django",
    "name": "set_group_by",
    "source_code": "def set_group_by(self, allow_aliases=True):\n    if allow_aliases and self.values_select:\n        group_by_annotations = {}\n        values_select = {}\n        for alias, expr in zip(self.values_select, self.select):\n            if isinstance(expr, Col):\n                values_select[alias] = expr\n            else:\n                group_by_annotations[alias] = expr\n        self.annotations = {**group_by_annotations, **self.annotations}\n        self.append_annotation_mask(group_by_annotations)\n        self.select = tuple(values_select.values())\n        self.values_select = tuple(values_select)\n        if self.selected is not None:\n            for index, value_select in enumerate(values_select):\n                self.selected[value_select] = index\n    group_by = list(self.select)\n    for alias, annotation in self.annotation_select.items():\n        if not (group_by_cols := annotation.get_group_by_cols()):\n            continue\n        if allow_aliases and (not annotation.contains_aggregate):\n            group_by.append(Ref(alias, annotation))\n        else:\n            group_by.extend(group_by_cols)\n    self.group_by = tuple(group_by)",
    "docstring": "Expand the GROUP BY clause required by the query. This will usually be the set of all non-aggregate fields in the return data. If the database backend supports grouping by the primary key, and the query would be equivalent, the optimization will be made automatically.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\query.py",
    "ast_data": "FunctionDef name:set_group_by arg:self arg:allow_aliases arguments arg arg If BoolOp Assign Assign For Call If Call Assign Assign Assign Call Assign Call Call Assign Call If Compare For Call Assign Assign Call For Call If Call If BoolOp Call Call Call Assign Call"
  },
  {
    "library": "scipy",
    "name": "tobsr",
    "source_code": "def tobsr(self, blocksize=None, copy=False):\n    return self.tocsr(copy=False).tobsr(blocksize=blocksize, copy=copy)",
    "docstring": "Convert this array/matrix to Block Sparse Row format. With copy=False, the data/indices may be shared between this array/matrix and the resultant bsr_array/matrix. When blocksize=(R, C) is provided, it will be used for construction of the bsr_array/matrix.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_base.py",
    "ast_data": "FunctionDef name:tobsr arg:self arg:blocksize arg:copy arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scrapy",
    "name": "replace_in_component_priority_dict",
    "source_code": "def replace_in_component_priority_dict(self, name: _SettingsKeyT, old_cls: type, new_cls: type, priority: int | None=None) -> None:\n    component_priority_dict = self.getdict(name)\n    old_priority = None\n    for cls_or_path in tuple(component_priority_dict):\n        if load_object(cls_or_path) != old_cls:\n            continue\n        if (old_priority := component_priority_dict.pop(cls_or_path)) is None:\n            break\n    if old_priority is None:\n        raise KeyError(f'{old_cls} not found in the {name} setting ({component_priority_dict!r}).')\n    component_priority_dict[new_cls] = old_priority if priority is None else priority\n    self.set(name, component_priority_dict, priority=self.getpriority(name) or 0)",
    "docstring": "Replace *old_cls* with *new_cls* in the *name* :ref:. If *old_cls* is missing, or has :data: as value, :exc: is raised. If *old_cls* was present as an import string, even more than once, those keys are dropped and replaced by *new_cls*. If *priority* is specified, that is the value assigned to *new_cls* in the component priority dictionary. Otherwise, the value of *old_cls* is used. If *old_cls* was present multiple times (possible with import strings) with different values, the value assigned to *new_cls* is one of them, with no guarantee about which one it is. This change is applied regardless of the priority of the *name* setting. The setting priority is not affected by this change either.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\settings\\__init__.py",
    "ast_data": "FunctionDef name:replace_in_component_priority_dict arg:self arg:name arg:old_cls arg:new_cls arg:priority arguments arg arg arg arg arg Assign Call Assign For Call If Compare Call If Compare Call If Compare Raise Call Assign Compare Call BoolOp Call"
  },
  {
    "library": "pytorch",
    "name": "_shards_get_overlap_region_wrt_saved_tensor",
    "source_code": "def _shards_get_overlap_region_wrt_saved_tensor(saved_shard: ChunkStorageMetadata, current_shard: ChunkStorageMetadata) -> list[tuple[int, int, int, int]]:\n    narrows = []\n    for dim, (saved_shard_offset, current_shard_offset, saved_shard_size, current_shard_size) in enumerate(zip(saved_shard.offsets, current_shard.offsets, saved_shard.sizes, current_shard.sizes)):\n        min_range_end = min(saved_shard_offset + saved_shard_size, current_shard_offset + current_shard_size)\n        length = min_range_end - max(current_shard_offset, saved_shard_offset)\n        if saved_shard_offset > current_shard_offset:\n            offset_for_saved_tensor = 0\n            offset_for_current_tensor = saved_shard_offset - current_shard_offset\n        else:\n            offset_for_saved_tensor = current_shard_offset - saved_shard_offset\n            offset_for_current_tensor = 0\n        narrows.append((dim, offset_for_saved_tensor, offset_for_current_tensor, length))\n    return narrows",
    "docstring": "Return the overlapping region between saved_shard and current_shard. There returned list has the same number of elements as the tensor's dimension. For each element, we produce a tuple with the following contents: (dimension, offset, offset, length) Offsets are relative to each shard.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\resharding.py",
    "ast_data": "FunctionDef name:_shards_get_overlap_region_wrt_saved_tensor arg:saved_shard arg:current_shard arguments arg arg Assign For Call Call Assign Call Assign Call If Compare Assign Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "validate_require_auth_time",
    "source_code": "def validate_require_auth_time(self):\n    self.setdefault('require_auth_time', False)\n    if self.get('require_auth_time') is not None and (not isinstance(self['require_auth_time'], bool)):\n        raise InvalidClaimError('require_auth_time')\n    self._validate_claim_value('require_auth_time')",
    "docstring": "Boolean value specifying whether the auth_time Claim in the ID Token is REQUIRED. It is REQUIRED when the value is true. (If this is false, the auth_time Claim can still be dynamically requested as an individual Claim for the ID Token using the claims request parameter described in Section 5.5.1 of OpenID Connect Core 1.0 [OpenID.Core].) If omitted, the default value is false.",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\registration\\claims.py",
    "ast_data": "FunctionDef name:validate_require_auth_time arg:self arguments arg Call If BoolOp Compare Call Call Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, filename, compression_type=None):\n    self._filename = ops.convert_to_tensor(filename, dtypes.string, name='filename')\n    self._compression_type = convert.optional_param_to_tensor('compression_type', compression_type, argument_default='', argument_dtype=dtypes.string)",
    "docstring": "Initializes a . Args: filename: a string path indicating where to write the TFRecord data. compression_type: (Optional.) a string indicating what type of compression to use when writing the file. See for what types of compression are available. Defaults to .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\writers.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:filename arg:compression_type arguments arg arg arg Assign Call Assign Call"
  },
  {
    "library": "cherrypy",
    "name": "run",
    "source_code": "def run(self):\n    request = cherrypy.serving.request\n    response = cherrypy.serving.response\n    path = request.path_info\n    if path.endswith('login_screen'):\n        self._debug_message('routing %(path)r to login_screen', locals())\n        response.body = self.login_screen()\n        return True\n    elif path.endswith('do_login'):\n        if request.method != 'POST':\n            response.headers['Allow'] = 'POST'\n            self._debug_message('do_login requires POST')\n            raise cherrypy.HTTPError(405)\n        self._debug_message('routing %(path)r to do_login', locals())\n        return self.do_login(**request.params)\n    elif path.endswith('do_logout'):\n        if request.method != 'POST':\n            response.headers['Allow'] = 'POST'\n            raise cherrypy.HTTPError(405)\n        self._debug_message('routing %(path)r to do_logout', locals())\n        return self.do_logout(**request.params)\n    else:\n        self._debug_message('No special path, running do_check')\n        return self.do_check()",
    "docstring": "Perform session authentication.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\cptools.py",
    "ast_data": "FunctionDef name:run arg:self arguments arg Assign Assign Assign If Call Call Call Assign Call Return return:yes If Call If Compare Assign Call Raise Call Call Call Return return:yes Call If Call If Compare Assign Raise Call Call Call Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "fetch_returned_insert_columns",
    "source_code": "def fetch_returned_insert_columns(self, cursor, returning_params):\n    return cursor.fetchone()",
    "docstring": "Given a cursor object that has just performed an INSERT...RETURNING statement into a table, return the newly created data.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:fetch_returned_insert_columns arg:self arg:cursor arg:returning_params arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_dash_capstyle",
    "source_code": "@_docstring.interpd\ndef set_dash_capstyle(self, s):\n    cs = CapStyle(s)\n    if self._dashcapstyle != cs:\n        self.stale = True\n    self._dashcapstyle = cs",
    "docstring": "How to draw the end caps if the line is . The default capstyle is :rc:. Parameters ---------- s : or %(CapStyle)s",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:set_dash_capstyle arg:self arg:s arguments arg arg Assign Call If Compare Assign Assign"
  },
  {
    "library": "scipy",
    "name": "__init__",
    "source_code": "def __init__(self, df, scale, seed=None):\n    self._dist = invwishart_gen(seed)\n    self.dim, self.df, self.scale = self._dist._process_parameters(df, scale)\n    self.C = scipy.linalg.cholesky(self.scale, lower=True)\n    self.log_det_scale = 2 * np.sum(np.log(self.C.diagonal()))",
    "docstring": "Create a frozen inverse Wishart distribution. Parameters ---------- df : array_like Degrees of freedom of the distribution scale : array_like Scale matrix of the distribution seed : {None, int, }, optional If is None the singleton is used. If is an int, a new `seedseed` instance then that instance is used.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:df arg:scale arg:seed arguments arg arg arg arg Assign Call Assign Call Assign Call Assign Call Call Call"
  },
  {
    "library": "django",
    "name": "response_post_save_add",
    "source_code": "def response_post_save_add(self, request, obj):\n    return self._response_post_save(request, obj)",
    "docstring": "Figure out where to redirect after the 'Save' button has been pressed when adding a new object.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:response_post_save_add arg:self arg:request arg:obj arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "indicator_column",
    "source_code": "@doc_controls.header(_FEATURE_COLUMN_DEPRECATION_WARNING)\n@tf_export('feature_column.indicator_column')\n@deprecation.deprecated(None, _FEATURE_COLUMN_DEPRECATION_RUNTIME_WARNING)\ndef indicator_column(categorical_column):\n    if not isinstance(categorical_column, (CategoricalColumn, fc_old._CategoricalColumn)):\n        raise ValueError('Unsupported input type. Input must be a CategoricalColumn. Given: {}'.format(categorical_column))\n    return IndicatorColumn(categorical_column)",
    "docstring": "Represents multi-hot representation of given categorical column. - For DNN model, can be used to wrap any (e.g., to feed to DNN). Consider to Use if the number of buckets/unique(values) are large. - For Wide (aka linear) model, is the internal representation for categorical column when passing categorical column directly (as any element in feature_columns) to . See for details. Args: categorical_column: A which is created by or functions. Returns: An . Raises: ValueError: If is not CategoricalColumn type.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:indicator_column arg:categorical_column arguments arg If Call Raise Call Call Return return:yes Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "lagrange_inversion",
    "source_code": "def lagrange_inversion(a):\n    n = len(a)\n    f = sum((a[i] * x ** i for i in range(n)))\n    h = (x / f).series(x, 0, n).removeO()\n    hpower = [h ** 0]\n    for k in range(n):\n        hpower.append((hpower[-1] * h).expand())\n    b = [mp.mpf(0)]\n    for k in range(1, n):\n        b.append(hpower[k].coeff(x, k - 1) / k)\n    b = [mp.mpf(x) for x in b]\n    return b",
    "docstring": "Given a series f(x) = a[1]*x + a[2]*x**2 + ... + a[n-1]*x**(n - 1), use the Lagrange inversion formula to compute a series g(x) = b[1]*x + b[2]*x**2 + ... + b[n-1]*x**(n - 1) so that f(g(x)) = g(f(x)) = x mod x**n. We must have a[0] = 0, so necessarily b[0] = 0 too. The algorithm is naive and could be improved, but speed isn't an issue here and it's easy to read.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_precompute\\utils.py",
    "ast_data": "FunctionDef name:lagrange_inversion arg:a arguments arg Assign Call Assign Call Call Assign Call Call Assign For Call Call Call Assign Call For Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "trans_x",
    "source_code": "@classmethod\ndef trans_x(cls, x: Tensor) -> Se3:\n    zs = zeros_like(x)\n    return cls.trans(x, zs, zs)",
    "docstring": "Construct a x-axis translation. Args: x: the x-axis translation.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\se3.py",
    "ast_data": "FunctionDef name:trans_x arg:cls arg:x arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "inplace_tanh",
    "source_code": "def inplace_tanh(X):\n    np.tanh(X, out=X)",
    "docstring": "Compute the hyperbolic tan function inplace. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) The input data.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\neural_network\\_base.py",
    "ast_data": "FunctionDef name:inplace_tanh arg:X arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "register_forward_hook",
    "source_code": "def register_forward_hook(self, hook: Union[Callable[[T, tuple[Any, ...], Any], Optional[Any]], Callable[[T, tuple[Any, ...], dict[str, Any], Any], Optional[Any]]], *, prepend: bool=False, with_kwargs: bool=False, always_call: bool=False) -> RemovableHandle:\n    handle = RemovableHandle(self._forward_hooks, extra_dict=[self._forward_hooks_with_kwargs, self._forward_hooks_always_called])\n    self._forward_hooks[handle.id] = hook\n    if with_kwargs:\n        self._forward_hooks_with_kwargs[handle.id] = True\n    if always_call:\n        self._forward_hooks_always_called[handle.id] = True\n    if prepend:\n        self._forward_hooks.move_to_end(handle.id, last=False)\n    return handle",
    "docstring": "Register a forward hook on the module. The hook will be called every time after :func: has computed an output. If `forwardtorch.nn.Moduletorch.nn.Moduleregister_module_forward_hooktorch.utils.hooks.RemovableHandle`",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:register_forward_hook arg:self arg:hook arguments arg arg arg arg arg Assign Call Assign If Assign If Assign If Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_sample_random",
    "source_code": "def _sample_random():\n    new_center = array_ops.reshape(first_shard[0], [1, -1])\n    if self._distance_metric == COSINE_DISTANCE:\n        new_center = nn_impl.l2_normalize(new_center, dim=1)\n    return new_center",
    "docstring": "Returns a random point as a cluster center.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\clustering_ops.py",
    "ast_data": "FunctionDef name:_sample_random arguments Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_figwidth",
    "source_code": "def get_figwidth(self):\n    return self.bbox_inches.width",
    "docstring": "Return the figure width in inches.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:get_figwidth arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X):\n    return self._get_predictions(X, output_method='predict')",
    "docstring": "Predict on the data matrix X using the ClassifierChain model. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input data. Returns ------- Y_pred : array-like of shape (n_samples, n_classes) The predicted values.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\multioutput.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Return return:yes Call"
  },
  {
    "library": "pygame",
    "name": "get_arraytypes",
    "source_code": "def get_arraytypes():\n    warnings.warn(DeprecationWarning('only numpy arrays are now supported, this function will be removed in a future version of the module'))\n    return ('numpy',)",
    "docstring": "pygame.surfarray.get_arraytypes(): return tuple DEPRECATED - only numpy arrays are now supported.",
    "type": "function",
    "file_path": "pygame\\src_py\\surfarray.py",
    "ast_data": "FunctionDef name:get_arraytypes arguments Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_height",
    "source_code": "def set_height(self, h):\n    self._height = h\n    self.stale = True",
    "docstring": "Set the rectangle height. Parameters ---------- h : float",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:set_height arg:self arg:h arguments arg arg Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "parse_memory_interval",
    "source_code": "def parse_memory_interval(interval_str):\n    str_interval = _parse_interval(interval_str)\n    interval_start = 0\n    interval_end = float('inf')\n    if str_interval.start:\n        interval_start = parse_readable_size_str(str_interval.start)\n    if str_interval.end:\n        interval_end = parse_readable_size_str(str_interval.end)\n    if interval_start > interval_end:\n        raise ValueError('Invalid interval %s. Start of interval must be less than or equal to end of interval.' % interval_str)\n    return Interval(interval_start, str_interval.start_included, interval_end, str_interval.end_included)",
    "docstring": "Convert a human-readable memory interval to a tuple of start and end value. Args: interval_str: () A human-readable str representing an interval (e.g., \"[10kB, 20kB]\", \"100G\"). Only the units \"kB\", \"MB\", \"GB\" are supported. The \"B character at the end of the input may be omitted. Returns: object where start and end are in bytes. Raises: ValueError: if the input is not valid.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\command_parser.py",
    "ast_data": "FunctionDef name:parse_memory_interval arg:interval_str arguments arg Assign Call Assign Assign Call If Assign Call If Assign Call If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "metrics_names",
    "source_code": "@property\ndef metrics_names(self):\n    metrics_names = ['loss']\n    if self._is_compiled:\n        if not hasattr(self, '_v1_compile_was_called'):\n            return super(Model, self).metrics_names\n        if len(self._training_endpoints) > 1:\n            metrics_names.extend([e.loss_name() for e in self._training_endpoints if not e.should_skip_target()])\n    metrics_names += [m.name for m in self.metrics]\n    return metrics_names",
    "docstring": "Returns the model's display labels for all outputs.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py",
    "ast_data": "FunctionDef name:metrics_names arg:self arguments arg Assign If If Call Return return:yes Call If Compare Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "start",
    "source_code": "def start(self):\n    self._server.start()",
    "docstring": "Starts this server. >>> dispatcher = tf.data.experimental.service.DispatchServer(start=False) >>> dispatcher.start() Raises: tf.errors.OpError: Or one of its subclasses if an error occurs while starting the server.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\service\\server_lib.py",
    "ast_data": "FunctionDef name:start arg:self arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "_maybe_promote_node",
    "source_code": "def _maybe_promote_node(self, node: torch.fx.Node, rule: TypePromotionRule) -> torch.fx.Node:\n    args, kwargs = self.fetch_args_kwargs_from_env(node)\n    type_promotion_info = rule.preview_type_promotion(args, kwargs)\n    new_args = []\n    new_kwargs = {}\n    for i, arg in enumerate(node.args):\n        new_args.append(self._maybe_promote_arg(node, arg, type_promotion_info.args_dtypes.get(i, None)))\n    for name, arg in node.kwargs.items():\n        new_kwargs[name] = self._maybe_promote_arg(node, arg, type_promotion_info.kwargs_dtypes.get(name, None))\n    new_args = tuple(new_args)\n    if node.args != new_args or node.kwargs != new_kwargs:\n        node.args = new_args\n        node.kwargs = new_kwargs\n        self._rerun_node_after_type_promotion(node, type_promotion_info.out_dtype)\n    return node",
    "docstring": "Promote node inputs and outputs according to type promotion rule.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\type_promotion.py",
    "ast_data": "FunctionDef name:_maybe_promote_node arg:self arg:node arg:rule arguments arg arg arg Assign Call Assign Call Assign Assign For Call Call Call Call For Call Assign Call Call Assign Call If BoolOp Compare Compare Assign Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "explicit_super",
    "source_code": "def explicit_super(code: types.CodeType, instructions: list[Instruction]) -> None:\n    cell_and_free = (code.co_cellvars or ()) + (code.co_freevars or ())\n    if not len(code.co_varnames):\n        return\n    output = []\n    for idx, inst in enumerate(instructions):\n        output.append(inst)\n        if inst.opname == 'LOAD_GLOBAL' and inst.argval == 'super':\n            nexti = instructions[idx + 1]\n            if nexti.arg == 0 and (sys.version_info >= (3, 12) and nexti.opname == 'CALL' or (sys.version_info >= (3, 11) and sys.version_info < (3, 12) and (nexti.opname == 'PRECALL')) or (sys.version_info < (3, 11) and nexti.opname == 'CALL_FUNCTION')):\n                assert '__class__' in cell_and_free\n                output.append(create_instruction('LOAD_DEREF', argval='__class__'))\n                first_var = code.co_varnames[0]\n                if first_var in cell_and_free:\n                    output.append(create_instruction('LOAD_DEREF', argval=first_var))\n                else:\n                    output.append(create_instruction('LOAD_FAST', argval=first_var))\n                nexti.arg = 2\n                nexti.argval = 2\n                if nexti.opname == 'PRECALL':\n                    call_inst = instructions[idx + 2]\n                    call_inst.arg = 2\n                    call_inst.argval = 2\n    instructions[:] = output",
    "docstring": "convert super() with no args into explicit arg form",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\bytecode_transformation.py",
    "ast_data": "FunctionDef name:explicit_super arg:code arg:instructions arguments arg arg Assign BoolOp BoolOp If Call Return return:no Assign For Call Call If BoolOp Compare Compare Assign If BoolOp Compare BoolOp BoolOp Compare Compare BoolOp Compare Compare Compare BoolOp Compare Compare Compare Call Call Assign If Compare Call Call Call Call Assign Assign If Compare Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "variables",
    "source_code": "@property\ndef variables(self):\n    return self.weights",
    "docstring": "Returns the list of all layer variables/weights. Alias of . Returns: A list of variables.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py",
    "ast_data": "FunctionDef name:variables arg:self arguments arg Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "terminal_safe",
    "source_code": "def terminal_safe(s: str) -> str:\n    return s.encode('ascii', 'backslashreplace').decode('ascii')",
    "docstring": "Safely encode a string for printing to the terminal.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\console.py",
    "ast_data": "FunctionDef name:terminal_safe arg:s arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_update",
    "source_code": "def _update(self, update_fn, value, **kwargs):\n    if values_util.is_saving_non_distributed():\n        return update_fn(self._primary, value, **kwargs)\n    with distribute_lib.enter_or_assert_strategy(self.distribute_strategy):\n        if distribute_lib.in_cross_replica_context():\n            update_replica_id = distribute_lib.get_update_replica_id()\n            if update_replica_id is not None:\n                replica_value = self._get_replica(update_replica_id)\n                return update_fn(replica_value, value, **kwargs)\n            return self._update_cross_replica(update_fn, value, **kwargs)\n        else:\n            values_util.assert_replica_context(self.distribute_strategy)\n            return self._update_replica(update_fn, value, **kwargs)",
    "docstring": "Applies updates depending on the context. The method calls in replica context, in cross replica context, and in update context. If is True, the method returns the updated Variable. If is False, the method returns the update . Args: update_fn: A callable to pass to to update the variable. It should have the same signature as . value: value to be passed to . **kwargs: keyword arguments to . Returns: Updated variable or .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py",
    "ast_data": "FunctionDef name:_update arg:self arg:update_fn arg:value arguments arg arg arg arg If Call Return return:yes Call With Call If Call Assign Call If Compare Assign Call Return return:yes Call Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "is_complex_dtype",
    "source_code": "def is_complex_dtype(arr_or_dtype) -> bool:\n    return _is_dtype_type(arr_or_dtype, classes(np.complexfloating))",
    "docstring": "Check whether the provided array or dtype is of a complex dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a complex dtype. See Also -------- api.types.is_complex: Return True if given object is complex. api.types.is_numeric_dtype: Check whether the provided array or dtype is of a numeric dtype. api.types.is_integer_dtype: Check whether the provided array or dtype is of an integer dtype. Examples -------- >>> from pandas.api.types import is_complex_dtype >>> is_complex_dtype(str) False >>> is_complex_dtype(int) False >>> is_complex_dtype(np.complex128) True >>> is_complex_dtype(np.array([\"a\", \"b\"])) False >>> is_complex_dtype(pd.Series([1, 2])) False >>> is_complex_dtype(np.array([1 + 1j, 5])) True",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\common.py",
    "ast_data": "FunctionDef name:is_complex_dtype arg:arr_or_dtype arguments arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_fit_binary",
    "source_code": "def _fit_binary(estimator, X, y, fit_params, classes=None):\n    unique_y = np.unique(y)\n    if len(unique_y) == 1:\n        if classes is not None:\n            if y[0] == -1:\n                c = 0\n            else:\n                c = y[0]\n            warnings.warn('Label %s is present in all training examples.' % str(classes[c]))\n        estimator = _ConstantPredictor().fit(X, unique_y)\n    else:\n        estimator = clone(estimator)\n        estimator.fit(X, y, **fit_params)\n    return estimator",
    "docstring": "Fit a single binary estimator.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\multiclass.py",
    "ast_data": "FunctionDef name:_fit_binary arg:estimator arg:X arg:y arg:fit_params arg:classes arguments arg arg arg arg arg Assign Call If Compare Call If Compare If Compare Assign Assign Call Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "__lt__",
    "source_code": "def __lt__(self, other):\n    return self.value < other.value",
    "docstring": "Check if this header value is less than the other.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\httputil.py",
    "ast_data": "FunctionDef name:__lt__ arg:self arg:other arguments arg arg Return return:yes Compare"
  },
  {
    "library": "scrapy",
    "name": "store",
    "source_code": "def store(self, file: IO[bytes]) -> Deferred[None] | None:\n    pass",
    "docstring": "Store the given file stream",
    "type": "method",
    "file_path": "scrapy\\scrapy\\extensions\\feedexport.py",
    "ast_data": "FunctionDef name:store arg:self arg:file arguments arg arg"
  },
  {
    "library": "pytorch",
    "name": "SyclExtension",
    "source_code": "def SyclExtension(name, sources, *args, **kwargs):\n    library_dirs = kwargs.get('library_dirs', [])\n    library_dirs += library_paths()\n    kwargs['library_dirs'] = library_dirs\n    libraries = kwargs.get('libraries', [])\n    libraries.append('c10')\n    libraries.append('c10_xpu')\n    libraries.append('torch')\n    libraries.append('torch_cpu')\n    if not kwargs.get('py_limited_api', False):\n        libraries.append('torch_python')\n    libraries.append('torch_xpu')\n    kwargs['libraries'] = libraries\n    include_dirs = kwargs.get('include_dirs', [])\n    include_dirs += include_paths()\n    kwargs['include_dirs'] = include_dirs\n    kwargs['language'] = 'c++'\n    return setuptools.Extension(name, sources, *args, **kwargs)",
    "docstring": "Creates a :class: for SYCL/C++. Convenience method that creates a :class: with the bare minimum (but often sufficient) arguments to build a SYCL/C++ extension. All arguments are forwarded to the :class: constructor. .. warning:: The PyTorch python API (as provided in libtorch_python) cannot be built with the flag `TORCH_XPU_ARCH_LIST` Note that while it's possible to include all supported archs, the more archs get included the slower the building process will be, as it will build a separate kernel image for each arch. Note: Ninja is required to build SyclExtension.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\cpp_extension.py",
    "ast_data": "FunctionDef name:SyclExtension arg:name arg:sources arguments arg arg arg arg Assign Call Call Assign Assign Call Call Call Call Call If Call Call Call Assign Assign Call Call Assign Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "@deprecation.deprecated(None, 'Queue-based input pipelines have been replaced by `tf.data`. Use `tf.contrib.data.LMDBDataset`.')\ndef __init__(self, name=None, options=None):\n    del options\n    rr = gen_io_ops.lmdb_reader(name=name)\n    super(LMDBReader, self).__init__(rr)",
    "docstring": "Create a LMDBReader. Args: name: A name for the operation (optional). options: A LMDBRecordOptions object (optional).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\io_ops.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:name arg:options arguments arg arg arg Assign Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "start_queue_runners",
    "source_code": "@tf_export(v1=['train.queue_runner.start_queue_runners', 'train.start_queue_runners'])\n@deprecation.deprecated(None, _DEPRECATION_INSTRUCTION)\ndef start_queue_runners(sess=None, coord=None, daemon=True, start=True, collection=ops.GraphKeys.QUEUE_RUNNERS):\n    if context.executing_eagerly():\n        raise RuntimeError('Queues are not compatible with eager execution.')\n    if sess is None:\n        sess = ops.get_default_session()\n        if not sess:\n            raise ValueError('Cannot start queue runners: No default session is registered. Use `with sess.as_default()` or pass an explicit session to tf.start_queue_runners(sess=sess)')\n    if not isinstance(sess, session.SessionInterface):\n        if sess.__class__.__name__ in ['MonitoredSession', 'SingularMonitoredSession']:\n            return []\n        raise TypeError('sess must be a `tf.Session` object. Given class: {}'.format(sess.__class__))\n    queue_runners = ops.get_collection(collection)\n    if not queue_runners:\n        logging.warning('`tf.train.start_queue_runners()` was called when no queue runners were defined. You can safely remove the call to this deprecated function.')\n    with sess.graph.as_default():\n        threads = []\n        for qr in ops.get_collection(collection):\n            threads.extend(qr.create_threads(sess, coord=coord, daemon=daemon, start=start))\n    return threads",
    "docstring": "Starts all queue runners collected in the graph. This is a companion method to . It just starts threads for all queue runners collected in the graph. It returns the list of all threads. @compatibility(TF2) QueueRunners are not compatible with eager execution. Instead, please use [tf.data]( to get data into your model. @end_compatibility Args: sess: used to run the queue ops. Defaults to the default session. coord: Optional for coordinating the started threads. daemon: Whether the threads should be marked as , meaning they don't block program exit. start: Set to to only create the threads, not start them. collection: A specifying the graph collection to get the queue runners from. Defaults to . Raises: ValueError: if is None and there isn't any default session. TypeError: if is not a object. Returns: A list of threads. Raises: RuntimeError: If called with eager execution enabled. ValueError: If called without a default registered.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\queue_runner_impl.py",
    "ast_data": "FunctionDef name:start_queue_runners arg:sess arg:coord arg:daemon arg:start arg:collection arguments arg arg arg arg arg If Call Raise Call If Compare Assign Call If Raise Call If Call If Compare Return return:no Raise Call Call Assign Call If Call With Call Assign For Call Call Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "__call__",
    "source_code": "def __call__(self, x, *args):\n    if self.vals is None or x != self.x:\n        fg = self.fun(x, *args)\n        self.x = x\n        self.n_calls += 1\n        self.vals = fg[:]\n    return self.vals[0]",
    "docstring": "Calculate f or use cached value if available",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_root_scalar.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:x arguments arg arg arg If BoolOp Compare Compare Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "convert",
    "source_code": "@staticmethod\ndef convert(value, unit, axis):\n    if unit is None:\n        raise ValueError('Missing category information for StrCategoryConverter; this might be caused by unintendedly mixing categorical and numeric data')\n    StrCategoryConverter._validate_unit(unit)\n    values = np.atleast_1d(np.array(value, dtype=object))\n    unit.update(values)\n    s = np.vectorize(unit._mapping.__getitem__, otypes=[float])(values)\n    return s if not cbook.is_scalar_or_string(value) else s[0]",
    "docstring": "Convert strings in *value* to floats using mapping information stored in the *unit* object. Parameters ---------- value : str or iterable Value or list of values to be converted. unit : An object mapping strings to integers. axis : The axis on which the converted value is plotted. .. note:: *axis* is unused. Returns ------- float or of float",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\category.py",
    "ast_data": "FunctionDef name:convert arg:value arg:unit arg:axis arguments arg arg arg If Compare Raise Call Call Assign Call Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "__init__",
    "source_code": "@docfiller\ndef __init__(self, file_stream, do_compression=False, unicode_strings=False, global_vars=None, long_field_names=False, oned_as='row'):\n    self.file_stream = file_stream\n    self.do_compression = do_compression\n    self.unicode_strings = unicode_strings\n    if global_vars:\n        self.global_vars = global_vars\n    else:\n        self.global_vars = []\n    self.long_field_names = long_field_names\n    self.oned_as = oned_as\n    self._matrix_writer = None",
    "docstring": "Initialize writer for matlab 5 format files Parameters ---------- %(do_compression)s %(unicode_strings)s global_vars : None or sequence of strings, optional Names of variables to be marked as global for matlab %(long_fields)s %(oned_as)s",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\matlab\\_mio5.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:file_stream arg:do_compression arg:unicode_strings arg:global_vars arg:long_field_names arg:oned_as arguments arg arg arg arg arg arg arg Assign Assign Assign If Assign Assign Assign Assign Assign"
  },
  {
    "library": "django",
    "name": "__init__",
    "source_code": "def __init__(self, default_settings):\n    self.__dict__['_deleted'] = set()\n    self.default_settings = default_settings",
    "docstring": "Requests for configuration variables not in this class are satisfied from the module specified in default_settings (if possible).",
    "type": "method",
    "file_path": "django\\django\\conf\\__init__.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:default_settings arguments arg arg Assign Call Assign"
  },
  {
    "library": "pytorch",
    "name": "memory_stats",
    "source_code": "def memory_stats(device: _device_t=None) -> dict[str, Any]:\n    result = []\n\n    def _recurse_add_to_result(prefix: str, obj: Any) -> None:\n        if isinstance(obj, dict):\n            if len(prefix) > 0:\n                prefix += '.'\n            for k, v in obj.items():\n                _recurse_add_to_result(prefix + k, v)\n        else:\n            result.append((prefix, obj))\n    stats = memory_stats_as_nested_dict(device=device)\n    _recurse_add_to_result('', stats)\n    result.sort()\n    return collections.OrderedDict(result)",
    "docstring": "Return a dictionary of XPU memory allocator statistics for a given device. The return value of this function is a dictionary of statistics, each of which is a non-negative integer. Core statistics: - `~torch.xpu.current_devicedevice` (default).",
    "type": "function",
    "file_path": "pytorch\\torch\\xpu\\memory.py",
    "ast_data": "FunctionDef name:memory_stats arg:device arguments arg Assign FunctionDef name:_recurse_add_to_result arg:prefix arg:obj arguments arg arg If Call If Compare Call For Call Call Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "datetime_extract_sql",
    "source_code": "def datetime_extract_sql(self, lookup_type, sql, params, tzname):\n    raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_extract_sql() method')",
    "docstring": "Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute', or 'second', return the SQL that extracts a value from the given datetime field field_name.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:datetime_extract_sql arg:self arg:lookup_type arg:sql arg:params arg:tzname arguments arg arg arg arg arg Raise Call"
  },
  {
    "library": "authlib",
    "name": "LoginRequiredError",
    "source_code": "class LoginRequiredError(OAuth2Error):\n    error = 'login_required'",
    "docstring": "The Authorization Server requires End-User authentication. This error MAY be returned when the prompt parameter value in the Authentication Request is none, but the Authentication Request cannot be completed without displaying a user interface for End-User authentication.",
    "type": "class",
    "file_path": "authlib\\authlib\\oidc\\core\\errors.py",
    "ast_data": "ClassDef name:LoginRequiredError Assign"
  },
  {
    "library": "tensorflow",
    "name": "_zeros_like",
    "source_code": "def _zeros_like(op_input, op_output):\n    if op_output.dtype == dtypes.resource:\n        return array_ops.zeros(gen_resource_variable_ops.variable_shape(op_output), dtype=default_gradient.get_zeros_dtype(op_input))\n    return array_ops.zeros_like(op_output)",
    "docstring": "Like array_ops.zeros_like() but also accepts resource var handles.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\while_v2.py",
    "ast_data": "FunctionDef name:_zeros_like arg:op_input arg:op_output arguments arg arg If Compare Return return:yes Call Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "hard_constraints",
    "source_code": "def hard_constraints(self):\n    for i in range(self.ncols):\n        hc = [self.rights[i] >= self.lefts[i], self.rights[i] - self.margins['right'][i] - self.margins['rightcb'][i] >= self.lefts[i] - self.margins['left'][i] - self.margins['leftcb'][i]]\n        for c in hc:\n            self.solver.addConstraint(c | 'required')\n    for i in range(self.nrows):\n        hc = [self.tops[i] >= self.bottoms[i], self.tops[i] - self.margins['top'][i] - self.margins['topcb'][i] >= self.bottoms[i] - self.margins['bottom'][i] - self.margins['bottomcb'][i]]\n        for c in hc:\n            self.solver.addConstraint(c | 'required')",
    "docstring": "These are the redundant constraints, plus ones that make the rest of the code easier.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_layoutgrid.py",
    "ast_data": "FunctionDef name:hard_constraints arg:self arguments arg For Call Assign Compare Compare For Call For Call Assign Compare Compare For Call"
  },
  {
    "library": "pandas",
    "name": "has_dropped_na",
    "source_code": "@final\n@cache_readonly\ndef has_dropped_na(self) -> bool:\n    return bool((self.ids < 0).any())",
    "docstring": "Whether grouper has null value(s) that are dropped.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\groupby\\ops.py",
    "ast_data": "FunctionDef name:has_dropped_na arg:self arguments arg Return return:yes Call Call Compare"
  },
  {
    "library": "pytorch",
    "name": "check_same_dtype",
    "source_code": "def check_same_dtype(*args):\n    full_dtype = None\n    scalar_type = None\n    for arg in args:\n        if isinstance(arg, Number):\n            continue\n        elif isinstance(arg, TensorLike):\n            if full_dtype is None:\n                full_dtype = arg.dtype\n            if scalar_type is None:\n                scalar_type = dtype_to_type(arg.dtype)\n            if full_dtype is not arg.dtype:\n                msg = 'Tensor with dtype ' + str(arg.dtype) + ' is not the expected dtype of ' + str(full_dtype) + '!'\n                raise RuntimeError(msg)\n            arg_type = dtype_to_type(arg.dtype)\n            if arg_type is not scalar_type:\n                msg = 'Tensor with corresponding Python type ' + str(arg_type) + ' is not the expected type of ' + str(scalar_type) + '!'\n                raise RuntimeError(msg)\n        else:\n            msg = 'Unexpected type when checking for same dtype, ' + str(type(arg)) + '!'\n            raise RuntimeError(msg)",
    "docstring": "Checks that all Tensors in args have the same device and that all Numbers have the same corresponding Python type. Raises a RuntimeError when: - args contains an object whose type is not Tensor or Number - two Tensors objects in args have different dtypes - two Number objects in args have different types - there are Tensors and Numbers in args, and one of those Tensors corresponding Python types is different from the type of one of those Numbers",
    "type": "function",
    "file_path": "pytorch\\torch\\_prims_common\\__init__.py",
    "ast_data": "FunctionDef name:check_same_dtype arguments arg Assign Assign For If Call If Call If Compare Assign If Compare Assign Call If Compare Assign Call Call Raise Call Assign Call If Compare Assign Call Call Raise Call Assign Call Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "step",
    "source_code": "def step(self):\n    with torch.no_grad():\n        for name, configs in self.data_groups.items():\n            data = configs['data']\n            self.update_mask(name, data, configs)\n            self.data_groups[name].pop('data')",
    "docstring": "Internally calls the update_mask() function for each layer",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\activation_sparsifier\\activation_sparsifier.py",
    "ast_data": "FunctionDef name:step arg:self arguments arg With Call For Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "rprop",
    "source_code": "@_disable_dynamo_if_unsupported(single_tensor_fn=_single_tensor_rprop)\ndef rprop(params: list[Tensor], grads: list[Tensor], prevs: list[Tensor], step_sizes: list[Tensor], state_steps: list[Tensor], foreach: Optional[bool]=None, capturable: bool=False, maximize: bool=False, differentiable: bool=False, has_complex: bool=False, *, step_size_min: float, step_size_max: float, etaminus: float, etaplus: float):\n    if not torch.compiler.is_compiling() and (not all((isinstance(t, torch.Tensor) for t in state_steps))):\n        raise RuntimeError('API has changed, `state_steps` argument must contain a list of singleton tensors')\n    if foreach is None:\n        _, foreach = _default_to_fused_or_foreach(params, differentiable, use_fused=False)\n    if foreach and torch.jit.is_scripting():\n        raise RuntimeError('torch.jit.script not supported with foreach optimizers')\n    if foreach and (not torch.jit.is_scripting()):\n        func = _multi_tensor_rprop\n    else:\n        func = _single_tensor_rprop\n    func(params, grads, prevs, step_sizes, state_steps, step_size_min=step_size_min, step_size_max=step_size_max, etaminus=etaminus, etaplus=etaplus, capturable=capturable, maximize=maximize, differentiable=differentiable, has_complex=has_complex)",
    "docstring": "Functional API that performs rprop algorithm computation. See :class: for details.",
    "type": "function",
    "file_path": "pytorch\\torch\\optim\\rprop.py",
    "ast_data": "FunctionDef name:rprop arg:params arg:grads arg:prevs arg:step_sizes arg:state_steps arg:foreach arg:capturable arg:maximize arg:differentiable arg:has_complex arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg If BoolOp Call Call Call Raise Call If Compare Assign Call If BoolOp Call Raise Call If BoolOp Call Assign Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "needs_unshard",
    "source_code": "def needs_unshard(self) -> bool:\n    if not self.uses_sharded_strategy:\n        return False\n    unsharded_flat_param = self._get_padded_unsharded_flat_param()\n    already_unsharded = _same_storage_size(unsharded_flat_param, unsharded_flat_param.numel())\n    return not already_unsharded",
    "docstring": "Return if the handle's flat parameter needs to be unsharded.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py",
    "ast_data": "FunctionDef name:needs_unshard arg:self arguments arg If Return return:yes Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "generate_table",
    "source_code": "def generate_table(self) -> tuple[dict[str, tuple[int, int]], DataFrame]:\n    gso_table = self._gso_table\n    gso_df = self.df\n    columns = list(gso_df.columns)\n    selected = gso_df[self.columns]\n    col_index = [(col, columns.index(col)) for col in self.columns]\n    keys = np.empty(selected.shape, dtype=np.uint64)\n    for o, (idx, row) in enumerate(selected.iterrows()):\n        for j, (col, v) in enumerate(col_index):\n            val = row[col]\n            val = '' if isna(val) else val\n            key = gso_table.get(val, None)\n            if key is None:\n                key = (v + 1, o + 1)\n                gso_table[val] = key\n            keys[o, j] = self._convert_key(key)\n    for i, col in enumerate(self.columns):\n        gso_df[col] = keys[:, i]\n    return (gso_table, gso_df)",
    "docstring": "Generates the GSO lookup table for the DataFrame Returns ------- gso_table : dict Ordered dictionary using the string found as keys and their lookup position (v,o) as values gso_df : DataFrame DataFrame where strl columns have been converted to (v,o) values Notes ----- Modifies the DataFrame in-place. The DataFrame returned encodes the (v,o) values as uint64s. The encoding depends on the dta version, and can be expressed as enc = v + o * 2 ** (o_size * 8) so that v is stored in the lower bits and o is in the upper bits. o_size is * 117: 4 * 118: 6 * 119: 5",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\stata.py",
    "ast_data": "FunctionDef name:generate_table arg:self arguments arg Assign Assign Assign Call Assign Assign Call Assign Call For Call Call For Call Assign Assign Call Assign Call If Compare Assign Assign Assign Call For Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "get_alternative_name",
    "source_code": "def get_alternative_name(self, file_root, file_ext):\n    return '%s_%s%s' % (file_root, get_random_string(7), file_ext)",
    "docstring": "Return an alternative filename, by adding an underscore and a random 7 character alphanumeric string (before the file extension, if one exists) to the filename.",
    "type": "method",
    "file_path": "django\\django\\core\\files\\storage\\base.py",
    "ast_data": "FunctionDef name:get_alternative_name arg:self arg:file_root arg:file_ext arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "weighted_metrics",
    "source_code": "@property\ndef weighted_metrics(self):\n    if not self._built:\n        return None\n    return nest.flatten(self._weighted_metrics)",
    "docstring": "Metrics in this container that should be passed .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\compile_utils.py",
    "ast_data": "FunctionDef name:weighted_metrics arg:self arguments arg If Return return:no Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "ErrorTool",
    "source_code": "class ErrorTool(Tool):\n\n    def __init__(self, callable, name=None):\n        Tool.__init__(self, None, callable, name)\n\n    def _wrapper(self):\n        self.callable(**self._merged_args())\n\n    def _setup(self):\n        cherrypy.serving.request.error_response = self._wrapper",
    "docstring": "Tool which is used to replace the default request.error_response.",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\_cptools.py",
    "ast_data": "ClassDef name:ErrorTool FunctionDef name:__init__ arg:self arg:callable arg:name arguments arg arg arg Call FunctionDef name:_wrapper arg:self arguments arg Call Call FunctionDef name:_setup arg:self arguments arg Assign"
  },
  {
    "library": "numpy",
    "name": "deprecate_with_doc",
    "source_code": "def deprecate_with_doc(msg):\n    warnings.warn('`deprecate` is deprecated, use `warn` with `DeprecationWarning` instead. (deprecated in NumPy 2.0)', DeprecationWarning, stacklevel=2)\n    return _Deprecate(message=msg)",
    "docstring": "Deprecates a function and includes the deprecation in its docstring. .. deprecated:: 2.0 Use with :exc: instead. This function is used as a decorator. It returns an object that can be used to issue a DeprecationWarning, by passing the to-be decorated function as argument, this adds warning to the to-be decorated function's docstring and returns the new function object. See Also -------- deprecate : Decorate a function such that it issues a :exc: Parameters ---------- msg : str Additional explanation of the deprecation. Displayed in the docstring after the warning. Returns ------- obj : object",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_utils_impl.py",
    "ast_data": "FunctionDef name:deprecate_with_doc arg:msg arguments arg Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "touches",
    "source_code": "def touches(self, other):\n    return self._topology(capi.ogr_touches, other)",
    "docstring": "Return True if this geometry touches the other.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:touches arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "py_vq",
    "source_code": "def py_vq(obs, code_book, check_finite=True):\n    xp = array_namespace(obs, code_book)\n    obs = _asarray(obs, xp=xp, check_finite=check_finite)\n    code_book = _asarray(code_book, xp=xp, check_finite=check_finite)\n    if obs.ndim != code_book.ndim:\n        raise ValueError('Observation and code_book should have the same rank')\n    if obs.ndim == 1:\n        obs = obs[:, xp.newaxis]\n        code_book = code_book[:, xp.newaxis]\n    dist = xp.asarray(cdist(obs, code_book))\n    code = xp.argmin(dist, axis=1)\n    min_dist = xp.min(dist, axis=1)\n    return (code, min_dist)",
    "docstring": "Python version of vq algorithm. The algorithm computes the Euclidean distance between each observation and every frame in the code_book. Parameters ---------- obs : ndarray Expects a rank 2 array. Each row is one observation. code_book : ndarray Code book to use. Same format than obs. Should have same number of features (e.g., columns) than obs. check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Default: True Returns ------- code : ndarray code[i] gives the label of the ith obversation; its code is code_book[code[i]]. mind_dist : ndarray min_dist[i] gives the distance between the ith observation and its corresponding code. Notes ----- This function is slower than the C version but works for all input types. If the inputs have the wrong types for the C versions of the function, this one is called as a last resort. It is about 20 times slower than the C version.",
    "type": "function",
    "file_path": "scipy\\scipy\\cluster\\vq.py",
    "ast_data": "FunctionDef name:py_vq arg:obs arg:code_book arg:check_finite arguments arg arg arg Assign Call Assign Call Assign Call If Compare Raise Call If Compare Assign Assign Assign Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "parse_query_string",
    "source_code": "def parse_query_string(query_string, keep_blank_values=True, encoding='utf-8'):\n    if image_map_pattern.match(query_string):\n        pm = query_string.split(',')\n        pm = {'x': int(pm[0]), 'y': int(pm[1])}\n    else:\n        pm = _parse_qs(query_string, keep_blank_values, encoding=encoding)\n    return pm",
    "docstring": "Build a params dictionary from a query_string. Duplicate key/value pairs in the provided query_string will be returned as {'key': [val1, val2, ...]}. Single key/values will be returned as strings: {'key': 'value'}.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\httputil.py",
    "ast_data": "FunctionDef name:parse_query_string arg:query_string arg:keep_blank_values arg:encoding arguments arg arg arg If Call Assign Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_regularization_loss",
    "source_code": "@tf_export(v1=['losses.get_regularization_loss'])\ndef get_regularization_loss(scope=None, name='total_regularization_loss'):\n    losses = get_regularization_losses(scope)\n    if losses:\n        return math_ops.add_n(losses, name=name)\n    else:\n        return constant_op.constant(0.0)",
    "docstring": "Gets the total regularization loss. Args: scope: An optional scope name for filtering the losses to return. name: The name of the returned tensor. Returns: A scalar regularization loss.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\losses\\util.py",
    "ast_data": "FunctionDef name:get_regularization_loss arg:scope arg:name arguments arg arg Assign Call If Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_rewrite_grad_indexed_slices_output",
    "source_code": "def _rewrite_grad_indexed_slices_output(old_output_slices, new_input_slices):\n\n    def rewrite(old_output, new_input):\n        assert old_output.type == 'Identity'\n        concat_op = old_output.inputs[0].op\n        assert concat_op.type == 'ConcatV2'\n        old_concat_args = concat_op.inputs[:-1]\n        return array_ops.concat([new_input] + old_concat_args[1:], 0)\n    values = rewrite(old_output_slices.values.op, new_input_slices.values)\n    indices = rewrite(old_output_slices.indices.op, new_input_slices.indices)\n    return indexed_slices.IndexedSlices(values=values, indices=indices, dense_shape=new_input_slices.dense_shape)",
    "docstring": "Creates a new version of old_output_slices with new_input_slices as input. This method assumes that old_output_slices.{values,indices} are produced by concatenating the incoming gradient Tensor input with the IndexedSlices produced by the gradient computation of the while body. See backprop.aggregate_indexed_slices_gradients for where these concats are constructed. We build new concats that use new_input_slices instead of the original Tensor input. Args: old_output_slices: original IndexedSlices output of while gradient. new_input_slices: new IndexedSlices to use as input to while gradient. Returns: A new IndexedSlices to replace old_output_slices.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\while_v2_indexed_slices_rewriter.py",
    "ast_data": "FunctionDef name:_rewrite_grad_indexed_slices_output arg:old_output_slices arg:new_input_slices arguments arg arg FunctionDef name:rewrite arg:old_output arg:new_input arguments arg arg Compare Assign Compare Assign Return return:yes Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "left_multiply",
    "source_code": "def left_multiply(J, d, copy=True):\n    if copy and (not isinstance(J, LinearOperator)):\n        J = J.copy()\n    if issparse(J):\n        J.data *= np.repeat(d, np.diff(J.indptr))\n    elif isinstance(J, LinearOperator):\n        J = left_multiplied_operator(J, d)\n    else:\n        J *= d[:, np.newaxis]\n    return J",
    "docstring": "Compute diag(d) J. If is False, is modified in place (unless being LinearOperator).",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_lsq\\common.py",
    "ast_data": "FunctionDef name:left_multiply arg:J arg:d arg:copy arguments arg arg arg If BoolOp Call Assign Call If Call Call Call If Call Assign Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "parse",
    "source_code": "def parse(self) -> None:\n    while (tok := self.fetch_token()) and (not tok.match([OP, '='], NEWLINE, COMMENT)):\n        assert tok\n    assert tok is not None\n    if tok == [OP, '=']:\n        self.fetch_rvalue()\n        tok = self.current\n        assert tok is not None\n    if tok == COMMENT:\n        self.comment = tok.value",
    "docstring": "Parse the code and obtain comment after assignment.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\pycode\\parser.py",
    "ast_data": "FunctionDef name:parse arg:self arguments arg While BoolOp Call Call Compare If Compare Call Assign Compare If Compare Assign"
  },
  {
    "library": "authlib",
    "name": "get_oauth_token",
    "source_code": "def get_oauth_token(self):\n    raise NotImplementedError()",
    "docstring": "A method to get the value of ``:: def get_oauth_token(self): return self.oauth_token :return: A string",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth1\\rfc5849\\models.py",
    "ast_data": "FunctionDef name:get_oauth_token arg:self arguments arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "filesystem_set_configuration",
    "source_code": "def filesystem_set_configuration(scheme, key, value, name=None):\n    return _gen_filesystem_ops.file_system_set_configuration(scheme, key=key, value=value, name=name)",
    "docstring": "Set configuration of the file system. Args: scheme: File system scheme. key: The name of the configuration option. value: The value of the configuration option. name: A name for the operation (optional). Returns: None.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\filesystem_ops.py",
    "ast_data": "FunctionDef name:filesystem_set_configuration arg:scheme arg:key arg:value arg:name arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "new_config",
    "source_code": "def new_config(self) -> ConfigType:\n    ret = {name: val if val != DEFAULT else self.fields[name].default for name, val in self.default.items()}\n    return ret",
    "docstring": "creates a new config from the default",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\fuzzer.py",
    "ast_data": "FunctionDef name:new_config arg:self arguments arg Assign Compare Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "check_app_config_entries_dont_start_with_script_name",
    "source_code": "def check_app_config_entries_dont_start_with_script_name(self):\n    for sn, app in cherrypy.tree.apps.items():\n        if not isinstance(app, cherrypy.Application):\n            continue\n        if not app.config:\n            continue\n        if sn == '':\n            continue\n        sn_atoms = sn.strip('/').split('/')\n        for key in app.config.keys():\n            key_atoms = key.strip('/').split('/')\n            if key_atoms[:len(sn_atoms)] == sn_atoms:\n                warnings.warn('The application mounted at %r has config entries that start with its script name: %r' % (sn, key))",
    "docstring": "Check for App config with sections that repeat script_name.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpchecker.py",
    "ast_data": "FunctionDef name:check_app_config_entries_dont_start_with_script_name arg:self arguments arg For Call If Call If If Compare Assign Call Call For Call Assign Call Call If Compare Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_estimate_log_prob_resp",
    "source_code": "def _estimate_log_prob_resp(self, X):\n    weighted_log_prob = self._estimate_weighted_log_prob(X)\n    log_prob_norm = logsumexp(weighted_log_prob, axis=1)\n    with np.errstate(under='ignore'):\n        log_resp = weighted_log_prob - log_prob_norm[:, np.newaxis]\n    return (log_prob_norm, log_resp)",
    "docstring": "Estimate log probabilities and responsibilities for each sample. Compute the log probabilities, weighted log probabilities per component and responsibilities for each sample in X with respect to the current state of the model. Parameters ---------- X : array-like of shape (n_samples, n_features) Returns ------- log_prob_norm : array, shape (n_samples,) log p(X) log_responsibilities : array, shape (n_samples, n_components) logarithm of the responsibilities",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\mixture\\_base.py",
    "ast_data": "FunctionDef name:_estimate_log_prob_resp arg:self arg:X arguments arg arg Assign Call Assign Call With Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "item_attributes",
    "source_code": "def item_attributes(self, item):\n    return {}",
    "docstring": "Return extra attributes to place on each item (i.e. item/entry) element.",
    "type": "method",
    "file_path": "django\\django\\utils\\feedgenerator.py",
    "ast_data": "FunctionDef name:item_attributes arg:self arg:item arguments arg arg Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "start_loop",
    "source_code": "def start_loop(self):\n    pass",
    "docstring": "Called when the thread starts.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\coordinator.py",
    "ast_data": "FunctionDef name:start_loop arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "get_default_mmap_options",
    "source_code": "def get_default_mmap_options() -> Optional[int]:\n    from torch.utils.serialization import config\n    return config.load.mmap_flags",
    "docstring": "Get default mmap options for :func: with ``. Returns: default_mmap_options: int",
    "type": "function",
    "file_path": "pytorch\\torch\\serialization.py",
    "ast_data": "FunctionDef name:get_default_mmap_options arguments Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "on_compile_end",
    "source_code": "def on_compile_end(callback: Callable[[], None]) -> Callable[[], None]:\n    callback_handler.register_end_callback(callback)\n    return callback",
    "docstring": "Decorator to register a callback function for the end of the compilation.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\callback.py",
    "ast_data": "FunctionDef name:on_compile_end arg:callback arguments arg Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "Chebyshev",
    "source_code": "class Chebyshev(ABCPolyBase):\n    _add = staticmethod(chebadd)\n    _sub = staticmethod(chebsub)\n    _mul = staticmethod(chebmul)\n    _div = staticmethod(chebdiv)\n    _pow = staticmethod(chebpow)\n    _val = staticmethod(chebval)\n    _int = staticmethod(chebint)\n    _der = staticmethod(chebder)\n    _fit = staticmethod(chebfit)\n    _line = staticmethod(chebline)\n    _roots = staticmethod(chebroots)\n    _fromroots = staticmethod(chebfromroots)\n\n    @classmethod\n    def interpolate(cls, func, deg, domain=None, args=()):\n        if domain is None:\n            domain = cls.domain\n        xfunc = lambda x: func(pu.mapdomain(x, cls.window, domain), *args)\n        coef = chebinterpolate(xfunc, deg)\n        return cls(coef, domain=domain)\n    domain = np.array(chebdomain)\n    window = np.array(chebdomain)\n    basis_name = 'T'",
    "docstring": "A Chebyshev series class. The Chebyshev class provides the standard Python numerical methods '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the attributes and methods listed below. Parameters ---------- coef : array_like Chebyshev coefficients in order of increasing degree, i.e., `domain` for its use. The default value is [-1., 1.]. symbol : str, optional Symbol used to represent the independent variable in string representations of the polynomial expression, e.g. for printing. The symbol must be a valid Python identifier. Default value is 'x'. .. versionadded:: 1.24",
    "type": "class",
    "file_path": "numpy\\numpy\\polynomial\\chebyshev.py",
    "ast_data": "ClassDef name:Chebyshev Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call FunctionDef name:interpolate arg:cls arg:func arg:deg arg:domain arg:args arguments arg arg arg arg arg If Compare Assign Assign arguments arg Call Call Assign Call Return return:yes Call Assign Call Assign Call Assign"
  },
  {
    "library": "django",
    "name": "login_required",
    "source_code": "def login_required(function=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url=None):\n    actual_decorator = user_passes_test(lambda u: u.is_authenticated, login_url=login_url, redirect_field_name=redirect_field_name)\n    if function:\n        return actual_decorator(function)\n    return actual_decorator",
    "docstring": "Decorator for views that checks that the user is logged in, redirecting to the log-in page if necessary.",
    "type": "function",
    "file_path": "django\\django\\contrib\\auth\\decorators.py",
    "ast_data": "FunctionDef name:login_required arg:function arg:redirect_field_name arg:login_url arguments arg arg arg Assign Call arguments arg If Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_recursive_apply",
    "source_code": "def _recursive_apply(tensors, apply_fn):\n    tensors_type = type(tensors)\n    if isinstance(tensors, tensor_lib.Tensor):\n        return apply_fn(tensors)\n    elif isinstance(tensors, variables.Variable):\n        return apply_fn(tensors.value())\n    elif isinstance(tensors, (list, tuple)):\n        tensors = [_recursive_apply(t, apply_fn) for t in tensors]\n        if tensors_type is list:\n            return list(tensors)\n        elif tensors_type is tuple:\n            return tuple(tensors)\n        return tensors_type(*tensors)\n    elif tensors_type is dict:\n        return dict(((k, _recursive_apply(v, apply_fn)) for k, v in tensors.items()))\n    else:\n        raise TypeError(f'_recursive_apply argument {tensors!r} has invalid type {tensors_type!r}')",
    "docstring": "Helper method to recursively apply a function to structure of tensors. The structure of the tensors should take the form similar to fetches in and includes single , , nested , , , or . Args: tensors: Single , , nested tuplenamedtupledictTensorTensorTypeError` if undefined type in the tensors structure.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\subscribe.py",
    "ast_data": "FunctionDef name:_recursive_apply arg:tensors arg:apply_fn arguments arg arg Assign Call If Call Return return:yes Call If Call Return return:yes Call Call If Call Assign Call If Compare Return return:yes Call If Compare Return return:yes Call Return return:yes Call If Compare Return return:yes Call Call Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "synchronize",
    "source_code": "def synchronize(device: Optional[_device_t]=None) -> None:\n    with torch.mtia.device(device):\n        return torch._C._mtia_deviceSynchronize()",
    "docstring": "Waits for all jobs in all streams on a MTIA device to complete.",
    "type": "function",
    "file_path": "pytorch\\torch\\mtia\\__init__.py",
    "ast_data": "FunctionDef name:synchronize arg:device arguments arg With Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "new_serial_number",
    "source_code": "def new_serial_number(self, category: str='', /) -> int:\n    current = self._serial_numbers.get(category, 0)\n    self._serial_numbers[category] = current + 1\n    return current",
    "docstring": "Return a serial number, e.g. for index entry targets. The number is guaranteed to be unique in the current document & category.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\environment\\__init__.py",
    "ast_data": "FunctionDef name:new_serial_number arguments arg arg Assign Call Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "InconsistentVersionWarning",
    "source_code": "class InconsistentVersionWarning(UserWarning):\n\n    def __init__(self, *, estimator_name, current_sklearn_version, original_sklearn_version):\n        self.estimator_name = estimator_name\n        self.current_sklearn_version = current_sklearn_version\n        self.original_sklearn_version = original_sklearn_version\n\n    def __str__(self):\n        return f'Trying to unpickle estimator {self.estimator_name} from version {self.original_sklearn_version} when using version {self.current_sklearn_version}. This might lead to breaking code or invalid results. Use at your own risk. For more info please refer to:\\nhttps://scikit-learn.org/stable/model_persistence.html#security-maintainability-limitations'",
    "docstring": "Warning raised when an estimator is unpickled with an inconsistent version. Parameters ---------- estimator_name : str Estimator name. current_sklearn_version : str Current scikit-learn version. original_sklearn_version : str Original scikit-learn version.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\exceptions.py",
    "ast_data": "ClassDef name:InconsistentVersionWarning FunctionDef name:__init__ arg:self arguments arg arg arg arg Assign Assign Assign FunctionDef name:__str__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "integrate",
    "source_code": "def integrate(self, t, step=False, relax=False):\n    y = ode.integrate(self, t, step, relax)\n    return y[::2] + 1j * y[1::2]",
    "docstring": "Find y=y(t), set y as an initial condition, and return y. Parameters ---------- t : float The endpoint of the integration step. step : bool If True, and if the integrator supports the step method, then perform a single integration step and return. This parameter is provided in order to expose internals of the implementation, and should not be changed from its default value in most cases. relax : bool If True and if the integrator supports the run_relax method, then integrate until t_1 >= t and return. ``. This parameter is provided in order to expose internals of the implementation, and should not be changed from its default value in most cases. Returns ------- y : float The integrated value at t",
    "type": "method",
    "file_path": "scipy\\scipy\\integrate\\_ode.py",
    "ast_data": "FunctionDef name:integrate arg:self arg:t arg:step arg:relax arguments arg arg arg arg Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "check_setting_language_code",
    "source_code": "@register(Tags.translation)\ndef check_setting_language_code(app_configs, **kwargs):\n    tag = settings.LANGUAGE_CODE\n    if not isinstance(tag, str) or not language_code_re.match(tag):\n        return [Error(E001.msg.format(tag), id=E001.id)]\n    return []",
    "docstring": "Error if LANGUAGE_CODE setting is invalid.",
    "type": "function",
    "file_path": "django\\django\\core\\checks\\translation.py",
    "ast_data": "FunctionDef name:check_setting_language_code arg:app_configs arguments arg arg Assign If BoolOp Call Call Return return:yes Call Call Return return:no Call"
  },
  {
    "library": "cherrypy",
    "name": "decompress",
    "source_code": "def decompress(body):\n    import gzip\n    zbuf = io.BytesIO()\n    zbuf.write(body)\n    zbuf.seek(0)\n    zfile = gzip.GzipFile(mode='rb', fileobj=zbuf)\n    data = zfile.read()\n    zfile.close()\n    return data",
    "docstring": "Decompress a blob of bytes.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\encoding.py",
    "ast_data": "FunctionDef name:decompress arg:body arguments arg Assign Call Call Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_try_guard_against_uninitialized_dependencies",
    "source_code": "def _try_guard_against_uninitialized_dependencies(name, initial_value):\n    if not isinstance(initial_value, tensor_lib.Tensor):\n        raise TypeError('initial_value needs to be a Tensor: %s' % initial_value)\n    if _has_cycle(initial_value.op, state={}):\n        return initial_value\n    return _safe_initial_value_from_tensor(name, initial_value, op_cache={})",
    "docstring": "Attempt to guard against dependencies on uninitialized variables. Replace references to variables in with references to the variable's initialized values. The initialized values are essentially conditional TensorFlow graphs that return a variable's value if it is initialized or its if it hasn't been initialized. This replacement is done on a best effort basis: - If the graph contains cycles, we don't do any replacements for that graph. - If the variables that depends on are not present in the or we don't replace them. In these cases, it is up to the caller to ensure that the graph uses initialized variables or that they guard access to variables using their method. Args: name: Variable name. initial_value: . The initial value. Returns: A suitable to initialize a variable. Raises: TypeError: If is not a .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:_try_guard_against_uninitialized_dependencies arg:name arg:initial_value arguments arg arg If Call Raise Call If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "infer_bbox_shape",
    "source_code": "def infer_bbox_shape(boxes: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:\n    validate_bbox(boxes)\n    width: torch.Tensor = boxes[:, 1, 0] - boxes[:, 0, 0] + 1\n    height: torch.Tensor = boxes[:, 2, 1] - boxes[:, 0, 1] + 1\n    return (height, width)",
    "docstring": "Auto-infer the output sizes for the given 2D bounding boxes. Args: boxes: a tensor containing the coordinates of the bounding boxes to be extracted. The tensor must have the shape of Bx4x2, where each box is defined in the following `(B,)(B,)`. Example: >>> boxes = torch.tensor([[ ... [1., 1.], ... [2., 1.], ... [2., 2.], ... [1., 2.], ... ], [ ... [1., 1.], ... [3., 1.], ... [3., 2.], ... [1., 2.], ... ]]) # 2x4x2 >>> infer_bbox_shape(boxes) (tensor([2., 2.]), tensor([2., 3.]))",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\bbox.py",
    "ast_data": "FunctionDef name:infer_bbox_shape arg:boxes arguments arg Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "prepare_loss_functions",
    "source_code": "def prepare_loss_functions(loss, output_names):\n    if isinstance(loss, collections.abc.Mapping):\n        generic_utils.check_for_unexpected_keys('loss', loss, output_names)\n        loss_functions = []\n        for name in output_names:\n            if name not in loss:\n                logging.warning('Output {0} missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to {0}.'.format(name))\n            loss_functions.append(get_loss_function(loss.get(name, None)))\n    elif isinstance(loss, str):\n        loss_functions = [get_loss_function(loss) for _ in output_names]\n    elif isinstance(loss, collections.abc.Sequence):\n        if len(loss) != len(output_names):\n            raise ValueError('When passing a list as loss, it should have one entry per model outputs. The model has {} outputs, but you passed loss={}'.format(len(output_names), loss))\n        loss_functions = nest.map_structure(get_loss_function, loss)\n    else:\n        loss_functions = [get_loss_function(loss) for _ in range(len(output_names))]\n    return loss_functions",
    "docstring": "Converts loss to a list of loss functions. Args: loss: String (name of objective function), objective function or instance. See . If the model has multiple outputs, you can use a different loss on each output by passing a dictionary or a list of losses. The loss value that will be minimized by the model will then be the sum of all individual losses. output_names: List of model output names. Returns: A list of loss objective functions. Raises: ValueError: If loss is a dict with keys not in model output names, or if loss is a list with len not equal to model outputs.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py",
    "ast_data": "FunctionDef name:prepare_loss_functions arg:loss arg:output_names arguments arg arg If Call Call Assign For If Compare Call Call Call Call Call If Call Assign Call If Call If Compare Call Call Raise Call Call Call Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "unpackage_script_module",
    "source_code": "def unpackage_script_module(importer: PackageImporter, script_module_id: str) -> torch.nn.Module:\n    if not isinstance(importer.zip_reader, torch._C.PyTorchFileReader):\n        raise RuntimeError('Loading ScriptObjects from a PackageImporter created from a directory is not supported. Use a package archive file instead.')\n    cu = torch._C.CompilationUnit()\n    cpp_module = torch._C._import_ir_module_from_package(cu, importer.zip_reader, importer.storage_context, validate_map_location(importer.last_map_location), script_module_id)\n    return wrap_cpp_module(cpp_module)",
    "docstring": "Call by `` archive.",
    "type": "function",
    "file_path": "pytorch\\torch\\jit\\_script.py",
    "ast_data": "FunctionDef name:unpackage_script_module arg:importer arg:script_module_id arguments arg arg If Call Raise Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_list_node_dumps",
    "source_code": "def _list_node_dumps(self, node_name):\n    lines = []\n    font_attr_segs = {}\n    watch_keys = self._debug_dump.debug_watch_keys(node_name)\n    dump_count = 0\n    for watch_key in watch_keys:\n        debug_tensor_data = self._debug_dump.watch_key_to_data(watch_key)\n        for datum in debug_tensor_data:\n            line = '  Slot %d @ %s @ %.3f ms' % (datum.output_slot, datum.debug_op, (datum.timestamp - self._debug_dump.t0) / 1000.0)\n            lines.append(line)\n            command = 'pt %s:%d -n %d' % (node_name, datum.output_slot, dump_count)\n            font_attr_segs[len(lines) - 1] = [(2, len(line), debugger_cli_common.MenuItem(None, command))]\n            dump_count += 1\n    output = debugger_cli_common.RichTextLines(lines, font_attr_segs=font_attr_segs)\n    output_with_header = debugger_cli_common.RichTextLines(['%d dumped tensor(s):' % dump_count, ''])\n    output_with_header.extend(output)\n    return output_with_header",
    "docstring": "List dumped tensor data from a node. Args: node_name: Name of the node of which the attributes are to be listed. Returns: A RichTextLines object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\analyzer_cli.py",
    "ast_data": "FunctionDef name:_list_node_dumps arg:self arg:node_name arguments arg arg Assign Assign Assign Call Assign For Assign Call For Assign Call Assign Assign Call Call Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "register_serializable",
    "source_code": "def register_serializable(package='Custom', name=None, predicate=None):\n\n    def decorator(arg):\n        nonlocal predicate\n        if not tf_inspect.isclass(arg):\n            raise TypeError('Registered serializable must be a class: {}'.format(arg))\n        class_name = name if name is not None else arg.__name__\n        if predicate is None:\n            predicate = lambda x: isinstance(x, arg)\n        _class_registry.register(package, class_name, predicate, arg)\n        return arg\n    return decorator",
    "docstring": "Decorator for registering a serializable class. THIS METHOD IS STILL EXPERIMENTAL AND MAY CHANGE AT ANY TIME. Registered classes will be saved with a name generated by combining the and arguments. When loading a SavedModel, modules saved with this registered name will be created using the method. By default, only direct instances of the registered class will be saved/ restored with the / methods. To extend the registration to subclasses, use the : Args: package: The package that this class belongs to. name: The name to serialize this class under in this package. If None, the class's name will be used. predicate: An optional function that takes a single Trackable argument, and determines whether that object should be serialized with this and . The default predicate checks whether the object's type exactly matches the registered class. Predicates are executed in the reverse order that they are added (later registrations are checked first). Returns: A decorator that registers the decorated class with the passed names and predicate.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\registration\\registration.py",
    "ast_data": "FunctionDef name:register_serializable arg:package arg:name arg:predicate arguments arg arg arg FunctionDef name:decorator arg:arg arguments arg If Call Raise Call Call Assign Compare If Compare Assign arguments arg Call Call Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "run_checks",
    "source_code": "def run_checks(self, app_configs=None, tags=None, include_deployment_checks=False, databases=None):\n    errors = []\n    checks = self.get_checks(include_deployment_checks)\n    if tags is not None:\n        checks = [check for check in checks if not set(check.tags).isdisjoint(tags)]\n    for check in checks:\n        new_errors = check(app_configs=app_configs, databases=databases)\n        if not isinstance(new_errors, Iterable):\n            raise TypeError('The function %r did not return a list. All functions registered with the checks registry must return a list.' % check)\n        errors.extend(new_errors)\n    return errors",
    "docstring": "Run all registered checks and return list of Errors and Warnings.",
    "type": "method",
    "file_path": "django\\django\\core\\checks\\registry.py",
    "ast_data": "FunctionDef name:run_checks arg:self arg:app_configs arg:tags arg:include_deployment_checks arg:databases arguments arg arg arg arg arg Assign Assign Call If Compare Assign Call Call For Assign Call If Call Raise Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "BracketCurve",
    "source_code": "@_register_style(_style_list, name=']->')\nclass BracketCurve(_Curve):\n    arrow = ']->'\n\n    def __init__(self, widthA=1.0, lengthA=0.2, angleA=None):\n        super().__init__(widthA=widthA, lengthA=lengthA, angleA=angleA)",
    "docstring": "An arrow with an outward square bracket at its start and a head at the end.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "ClassDef name:BracketCurve Assign FunctionDef name:__init__ arg:self arg:widthA arg:lengthA arg:angleA arguments arg arg arg arg Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_sizeof_fmt",
    "source_code": "def _sizeof_fmt(num: float, size_qualifier: str) -> str:\n    for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:\n        if num < 1024.0:\n            return f'{num:3.1f}{size_qualifier} {x}'\n        num /= 1024.0\n    return f'{num:3.1f}{size_qualifier} PB'",
    "docstring": "Return size in human readable format. Parameters ---------- num : int Size in bytes. size_qualifier : str Either empty, or '+' (if lower bound). Returns ------- str Size in human readable format. Examples -------- >>> _sizeof_fmt(23028, \"\") '22.5 KB' >>> _sizeof_fmt(23028, \"+\") '22.5+ KB'",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:_sizeof_fmt arg:num arg:size_qualifier arguments arg arg For If Compare Return return:yes Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_entropy",
    "source_code": "def _entropy(self, dim, df, log_det_scale):\n    return 0.5 * (dim + 1) * log_det_scale + 0.5 * dim * (dim + 1) * _LOG_2 + multigammaln(0.5 * df, dim) - 0.5 * (df - dim - 1) * np.sum([psi(0.5 * (df + 1 - (i + 1))) for i in range(dim)]) + 0.5 * df * dim",
    "docstring": "Compute the differential entropy of the Wishart. Parameters ---------- dim : int Dimension of the scale matrix df : int Degrees of freedom log_det_scale : float Logarithm of the determinant of the scale matrix Notes ----- As this function does no argument checking, it should not be called directly; use 'entropy' instead.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:_entropy arg:self arg:dim arg:df arg:log_det_scale arguments arg arg arg arg Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "assert_no_leak_if_all_possibly_except_one",
    "source_code": "@trace.trace_wrapper\ndef assert_no_leak_if_all_possibly_except_one(self):\n    snapshot_diffs = []\n    for i in range(0, len(self._snapshots) - 1):\n        snapshot_diffs.append(self._snapshot_diff(i, i + 1))\n    allocation_counter = collections.Counter()\n    for diff in snapshot_diffs:\n        for name, count in diff.items():\n            if count > 0:\n                allocation_counter[name] += 1\n    leaking_object_names = {name for name, count in allocation_counter.items() if count >= len(snapshot_diffs) - 1}\n    if leaking_object_names:\n        object_list_to_print = '\\n'.join([' - ' + name for name in leaking_object_names])\n        raise AssertionError(f'These Python objects were allocated in every snapshot possibly except one.\\n\\n{object_list_to_print}')",
    "docstring": "Raises an exception if a leak is detected. This algorithm classifies a series of allocations as a leak if it's the same type at every snapshot, but possibly except one snapshot.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\python_memory_checker.py",
    "ast_data": "FunctionDef name:assert_no_leak_if_all_possibly_except_one arg:self arguments arg Assign For Call Call Call Call Assign Call For For Call If Compare Assign Call Compare Call If Assign Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "FileWriterCache",
    "source_code": "@tf_export(v1=['summary.FileWriterCache'])\nclass FileWriterCache(object):\n    _cache = {}\n    _lock = threading.RLock()\n\n    @staticmethod\n    def clear():\n        with FileWriterCache._lock:\n            for item in FileWriterCache._cache.values():\n                item.close()\n            FileWriterCache._cache = {}\n\n    @staticmethod\n    def get(logdir):\n        with FileWriterCache._lock:\n            if logdir not in FileWriterCache._cache:\n                FileWriterCache._cache[logdir] = FileWriter(logdir, graph=ops.get_default_graph())\n            return FileWriterCache._cache[logdir]",
    "docstring": "Cache for file writers. This class caches file writers, one per directory.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\writer_cache.py",
    "ast_data": "ClassDef name:FileWriterCache Assign Assign Call FunctionDef name:clear arguments With For Call Call Assign FunctionDef name:get arg:logdir arguments arg With If Compare Assign Call Call Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "TokenValidator",
    "source_code": "class TokenValidator:\n    TOKEN_TYPE = 'bearer'\n\n    def __init__(self, realm=None, **extra_attributes):\n        self.realm = realm\n        self.extra_attributes = extra_attributes\n\n    @staticmethod\n    def scope_insufficient(token_scopes, required_scopes):\n        if not required_scopes:\n            return False\n        token_scopes = scope_to_list(token_scopes)\n        if not token_scopes:\n            return True\n        token_scopes = set(token_scopes)\n        for scope in required_scopes:\n            resource_scopes = set(scope_to_list(scope))\n            if token_scopes.issuperset(resource_scopes):\n                return False\n        return True\n\n    def authenticate_token(self, token_string):\n        raise NotImplementedError()\n\n    def validate_request(self, request):\n        pass\n\n    def validate_token(self, token, scopes, request):\n        raise NotImplementedError()",
    "docstring": "Base token validator class. Subclass this validator to register into ResourceProtector instance.",
    "type": "class",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\resource_protector.py",
    "ast_data": "ClassDef name:TokenValidator Assign FunctionDef name:__init__ arg:self arg:realm arguments arg arg arg Assign Assign FunctionDef name:scope_insufficient arg:token_scopes arg:required_scopes arguments arg arg If Return return:yes Assign Call If Return return:yes Assign Call For Assign Call Call If Call Return return:yes Return return:yes FunctionDef name:authenticate_token arg:self arg:token_string arguments arg arg Raise Call FunctionDef name:validate_request arg:self arg:request arguments arg arg FunctionDef name:validate_token arg:self arg:token arg:scopes arg:request arguments arg arg arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_add_cutlass_gemm_choices",
    "source_code": "def _add_cutlass_gemm_choices(self, choices: list[ChoiceCaller], layout: ir.Layout, input_nodes: list[Buffer], alpha: Union[float, int]=1, beta: Union[float, int]=0, input_reorder: Optional[list[int]]=None, **extra_kwargs) -> None:\n    ops = self.gen_ops()\n    for name, op in ops:\n        for swizzle in inductor_cuda_config.cutlass_max_profiling_swizzle_options:\n            description = f'{name} swizzle={swizzle}'\n            self.maybe_append_choice(choices, description=description, op=op, swizzle=swizzle)\n    if len(ops) == 0:\n        input_layouts = [node.get_layout() for node in input_nodes]\n        input_strides = [node.get_stride() for node in input_nodes]\n        output_layout = layout\n        warning_msg = f'No suitable Cutlass GEMM configs found, fallbacks used ( len(ops)={len(ops)!r}, output_layout={output_layout!r}, input_layouts={input_layouts!r}, input_strides={input_strides!r} )'\n        log.warning(warning_msg)\n    log.debug('Added %d Cutlass gemm configs.', len(ops))",
    "docstring": "Adds Cutlass GEMM configurations choices to the auto-tuning list. This function mutates the passed list of choices by appending the choices for Cutlass GEMM configs to it. Args: choices (list): The list to which choices are appended. layout (ir.Layout): The layout configuration. input_nodes (list): The list of input nodes. alpha (float,int): Scaling factor, defaults to 1. beta (float,int): Offset, defaults to 0. input_reorder (list, optional): Order of the inputs, defaults to None. **extra_kwargs: Additional keyword arguments.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\gemm_template.py",
    "ast_data": "FunctionDef name:_add_cutlass_gemm_choices arg:self arg:choices arg:layout arg:input_nodes arg:alpha arg:beta arg:input_reorder arguments arg arg arg arg arg arg arg arg Assign Call For For Assign Call If Compare Call Assign Call Assign Call Assign Assign Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "sign_round_up",
    "source_code": "@_blocked_elementwise\ndef sign_round_up(X):\n    Y = X.copy()\n    Y[Y == 0] = 1\n    Y /= np.abs(Y)\n    return Y",
    "docstring": "This should do the right thing for both real and complex matrices. From Higham and Tisseur: \"Everything in this section remains valid for complex matrices provided that sign(A) is redefined as the matrix (aij / |aij|) (and sign(0) = 1) transposes are replaced by conjugate transposes.\"",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_onenormest.py",
    "ast_data": "FunctionDef name:sign_round_up arg:X arguments arg Assign Call Assign Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, func, lower_control_flow, aggressive_inlining, variable_names_allowlist=None, variable_names_denylist=None):\n    self._func = func\n    graph_def = _run_inline_graph_optimization(func, lower_control_flow, aggressive_inlining)\n    super(_FunctionConverterData, self).__init__(graph_def, variable_names_allowlist=variable_names_allowlist, variable_names_denylist=variable_names_denylist)\n    self._build_tensor_data()",
    "docstring": "Creates the conversion data for the given function. Args: func: ConcreteFunction. lower_control_flow: Boolean indicating whether or not to lower control flow ops such as If and While. aggressive_inlining: Boolean indicating whether or not to do aggressive function inlining (might be unsafe if function has stateful ops, not properly connected to control outputs). variable_names_allowlist: The set of variable names to convert (by default, all variables are converted). variable_names_denylist: The set of variable names to omit converting to constants.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:func arg:lower_control_flow arg:aggressive_inlining arg:variable_names_allowlist arg:variable_names_denylist arguments arg arg arg arg arg arg Assign Assign Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_ceil_to_pow_of_n",
    "source_code": "def _ceil_to_pow_of_n(x, n):\n    x = math_ops.cast(x, dtypes.float32)\n    lognx = math_ops.log(x) / math_ops.log(n * 1.0)\n    lognx = math_ops.ceil(lognx)\n    result = math_ops.pow(n * 1.0, lognx)\n    result = math_ops.cast(result, dtypes.int32)\n    return result",
    "docstring": "Ceil input to power of .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu.py",
    "ast_data": "FunctionDef name:_ceil_to_pow_of_n arg:x arg:n arguments arg arg Assign Call Assign Call Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "shrink_mask",
    "source_code": "def shrink_mask(self):\n    self._mask = _shrink_mask(self._mask)\n    return self",
    "docstring": "Reduce a mask to nomask when possible. Parameters ---------- None Returns ------- result : MaskedArray A :class: object. Examples -------- >>> import numpy as np >>> x = np.ma.array([[1,2 ], [3, 4]], mask=[0]*4) >>> x.mask array([[False, False], [False, False]]) >>> x.shrink_mask() masked_array( data=[[1, 2], [3, 4]], mask=False, fill_value=999999) >>> x.mask False",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:shrink_mask arg:self arguments arg Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_get_ax_freq",
    "source_code": "def _get_ax_freq(ax: Axes):\n    ax_freq = getattr(ax, 'freq', None)\n    if ax_freq is None:\n        if hasattr(ax, 'left_ax'):\n            ax_freq = getattr(ax.left_ax, 'freq', None)\n        elif hasattr(ax, 'right_ax'):\n            ax_freq = getattr(ax.right_ax, 'freq', None)\n    if ax_freq is None:\n        shared_axes = ax.get_shared_x_axes().get_siblings(ax)\n        if len(shared_axes) > 1:\n            for shared_ax in shared_axes:\n                ax_freq = getattr(shared_ax, 'freq', None)\n                if ax_freq is not None:\n                    break\n    return ax_freq",
    "docstring": "Get the freq attribute of the ax object if set. Also checks shared axes (eg when using secondary yaxis, sharex=True or twinx)",
    "type": "function",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\timeseries.py",
    "ast_data": "FunctionDef name:_get_ax_freq arg:ax arguments arg Assign Call If Compare If Call Assign Call If Call Assign Call If Compare Assign Call Call If Compare Call For Assign Call If Compare Return return:yes"
  },
  {
    "library": "django",
    "name": "GDALRasterBase",
    "source_code": "class GDALRasterBase(GDALBase):\n\n    @property\n    def metadata(self):\n        domain_list = ['DEFAULT']\n        meta_list = capi.get_ds_metadata_domain_list(self._ptr)\n        if meta_list:\n            counter = 0\n            domain = meta_list[counter]\n            while domain:\n                domain_list.append(domain.decode())\n                counter += 1\n                domain = meta_list[counter]\n        capi.free_dsl(meta_list)\n        result = {}\n        for domain in domain_list:\n            data = capi.get_ds_metadata(self._ptr, None if domain == 'DEFAULT' else domain.encode())\n            if not data:\n                continue\n            domain_meta = {}\n            counter = 0\n            item = data[counter]\n            while item:\n                key, val = item.decode().split('=')\n                domain_meta[key] = val\n                counter += 1\n                item = data[counter]\n            result[domain or 'DEFAULT'] = domain_meta\n        return result\n\n    @metadata.setter\n    def metadata(self, value):\n        for domain, metadata in value.items():\n            domain = None if domain == 'DEFAULT' else domain.encode()\n            for meta_name, meta_value in metadata.items():\n                capi.set_ds_metadata_item(self._ptr, meta_name.encode(), meta_value.encode() if meta_value else None, domain)",
    "docstring": "Attributes that exist on both GDALRaster and GDALBand.",
    "type": "class",
    "file_path": "django\\django\\contrib\\gis\\gdal\\raster\\base.py",
    "ast_data": "ClassDef name:GDALRasterBase FunctionDef name:metadata arg:self arguments arg Assign Assign Call If Assign Assign While Call Call Assign Call Assign For Assign Call Compare Call If Assign Assign Assign While Assign Call Call Assign Assign Assign BoolOp Return return:yes FunctionDef name:metadata arg:self arg:value arguments arg arg For Call Assign Compare Call For Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_unify",
    "source_code": "@dispatch(slice, slice, dict)\ndef _unify(u, v, s):\n    return unify((u.start, u.stop, u.step), (v.start, v.stop, v.step), s)",
    "docstring": "Unify a Python `` object",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\unification\\more.py",
    "ast_data": "FunctionDef name:_unify arg:u arg:v arg:s arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "is_all_strings",
    "source_code": "def is_all_strings(value: ArrayLike) -> bool:\n    dtype = value.dtype\n    if isinstance(dtype, np.dtype):\n        if len(value) == 0:\n            return dtype == np.dtype('object')\n        else:\n            return dtype == np.dtype('object') and lib.is_string_array(np.asarray(value), skipna=False)\n    elif isinstance(dtype, CategoricalDtype):\n        return dtype.categories.inferred_type == 'string'\n    return dtype == 'string'",
    "docstring": "Check if this is an array of strings that we should try parsing. Includes object-dtype ndarray containing all-strings, StringArray, and Categorical with all-string categories. Does not include numpy string dtypes.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\common.py",
    "ast_data": "FunctionDef name:is_all_strings arg:value arguments arg Assign If Call If Compare Call Return return:yes Compare Call Return return:yes BoolOp Compare Call Call Call If Call Return return:yes Compare Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "_is_known_unsigned_by_dtype",
    "source_code": "def _is_known_unsigned_by_dtype(dt):\n    return {dtypes.bool: True, dtypes.uint8: True, dtypes.uint16: True}.get(dt.base_dtype, False)",
    "docstring": "Helper returning True if dtype is known to be unsigned.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\util.py",
    "ast_data": "FunctionDef name:_is_known_unsigned_by_dtype arg:dt arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_axes_pad",
    "source_code": "def set_axes_pad(self, axes_pad):\n    self._horiz_pad_size.fixed_size = axes_pad[0]\n    self._vert_pad_size.fixed_size = axes_pad[1]",
    "docstring": "Set the padding between the axes. Parameters ---------- axes_pad : (float, float) The padding (horizontal pad, vertical pad) in inches.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_grid.py",
    "ast_data": "FunctionDef name:set_axes_pad arg:self arg:axes_pad arguments arg arg Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "copy_args_to_cpu_if_needed",
    "source_code": "def copy_args_to_cpu_if_needed(self, *args, **kwargs):\n    if not self.optimize_mem:\n        return {}\n    copies = {}\n    budget = torch.cuda.max_memory_allocated() - torch.cuda.memory_allocated()\n\n    def maybe_copy(name, arg):\n        if name in self.mutated_arg_names and arg.is_cuda:\n            nonlocal budget\n            assert isinstance(arg, torch.Tensor)\n            required_storage_length = compute_required_storage_length(arg.size(), arg.stride(), 0)\n            size = required_storage_length * arg.element_size()\n            if size > budget:\n                cpu_arg = torch.empty_strided((required_storage_length,), (1,), dtype=arg.dtype, device='cpu', pin_memory=True)\n                cpu_arg.copy_(arg.as_strided((required_storage_length,), (1,)), non_blocking=True)\n                copies[name] = (arg, cpu_arg)\n            else:\n                budget -= size\n    for name, arg in zip(self.fn.arg_names, args):\n        maybe_copy(name, arg)\n    for name, arg in kwargs.items():\n        maybe_copy(name, arg)\n    return copies",
    "docstring": "To support benchmarking in the presence of mutated args, we need to avoid autotuning contanminating them. We try to pass cloned args to the kernel. If those clones would increase the peak memory usage, however, we instead copy to cpu and restore them after each iteration. Figure out the args to be copied and do the copying.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\triton_heuristics.py",
    "ast_data": "FunctionDef name:copy_args_to_cpu_if_needed arg:self arguments arg arg arg If Return return:no Assign Assign Call Call FunctionDef name:maybe_copy arg:name arg:arg arguments arg arg If BoolOp Compare Call Assign Call Call Call Assign Call If Compare Assign Call Call Call Assign For Call Call For Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n    if self.affinity == 'precomputed':\n        X = validate_data(self, X, copy=self.copy, force_writeable=True)\n        self.affinity_matrix_ = X\n    else:\n        X = validate_data(self, X, accept_sparse='csr')\n        self.affinity_matrix_ = -euclidean_distances(X, squared=True)\n    if self.affinity_matrix_.shape[0] != self.affinity_matrix_.shape[1]:\n        raise ValueError(f'The matrix of similarities must be a square array. Got {self.affinity_matrix_.shape} instead.')\n    if self.preference is None:\n        preference = np.median(self.affinity_matrix_)\n    else:\n        preference = self.preference\n    preference = np.asarray(preference)\n    random_state = check_random_state(self.random_state)\n    self.cluster_centers_indices_, self.labels_, self.n_iter_ = _affinity_propagation(self.affinity_matrix_, max_iter=self.max_iter, convergence_iter=self.convergence_iter, preference=preference, damping=self.damping, verbose=self.verbose, return_n_iter=True, random_state=random_state)\n    if self.affinity != 'precomputed':\n        self.cluster_centers_ = X[self.cluster_centers_indices_].copy()\n    return self",
    "docstring": "Fit the clustering from features, or affinity matrix. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features), or array-like of shape (n_samples, n_samples) Training instances to cluster, or similarities / affinities between instances if ``. y : Ignored Not used, present here for API consistency by convention. Returns ------- self Returns the instance itself.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_affinity_propagation.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg If Compare Assign Call Assign Assign Call Assign Call If Compare Raise Call If Compare Assign Call Assign Assign Call Assign Call Assign Call If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__add__",
    "source_code": "def __add__(self, other):\n    ret = RichLine()\n    if isinstance(other, str):\n        ret.text = self.text + other\n        ret.font_attr_segs = self.font_attr_segs[:]\n        return ret\n    elif isinstance(other, RichLine):\n        ret.text = self.text + other.text\n        ret.font_attr_segs = self.font_attr_segs[:]\n        old_len = len(self.text)\n        for start, end, font_attr in other.font_attr_segs:\n            ret.font_attr_segs.append((old_len + start, old_len + end, font_attr))\n        return ret\n    else:\n        raise TypeError('%r cannot be concatenated with a RichLine' % other)",
    "docstring": "Concatenate two chunks of maybe rich text to make a longer rich line. Does not modify self. Args: other: Another piece of text to concatenate with this one. If it is a plain str, it will be appended to this string with no attributes. If it is a RichLine, it will be appended to this string with its attributes preserved. Returns: A new RichLine comprising both chunks of text, with appropriate attributes applied to the corresponding substrings.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py",
    "ast_data": "FunctionDef name:__add__ arg:self arg:other arguments arg arg Assign Call If Call Assign Assign Return return:yes If Call Assign Assign Assign Call For Call Return return:yes Raise Call"
  },
  {
    "library": "pytorch",
    "name": "script_lstm",
    "source_code": "def script_lstm(input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=False, bidirectional=False):\n    assert bias\n    assert not batch_first\n    if bidirectional:\n        stack_type = StackedLSTM2\n        layer_type = BidirLSTMLayer\n        dirs = 2\n    elif dropout:\n        stack_type = StackedLSTMWithDropout\n        layer_type = LSTMLayer\n        dirs = 1\n    else:\n        stack_type = StackedLSTM\n        layer_type = LSTMLayer\n        dirs = 1\n    return stack_type(num_layers, layer_type, first_layer_args=[LSTMCell, input_size, hidden_size], other_layer_args=[LSTMCell, hidden_size * dirs, hidden_size])",
    "docstring": "Returns a ScriptModule that mimics a PyTorch native LSTM.",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\fastrnns\\custom_lstms.py",
    "ast_data": "FunctionDef name:script_lstm arg:input_size arg:hidden_size arg:num_layers arg:bias arg:batch_first arg:dropout arg:bidirectional arguments arg arg arg arg arg arg arg If Assign Assign Assign If Assign Assign Assign Assign Assign Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "make_batches",
    "source_code": "def make_batches(size, batch_size):\n    num_batches = int(np.ceil(size / float(batch_size)))\n    return [(i * batch_size, min(size, (i + 1) * batch_size)) for i in range(0, num_batches)]",
    "docstring": "Returns a list of batch indices (tuples of indices). Args: size: Integer, total size of the data to slice into batches. batch_size: Integer, batch size. Returns: A list of tuples of array indices.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\generic_utils.py",
    "ast_data": "FunctionDef name:make_batches arg:size arg:batch_size arguments arg arg Assign Call Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_param_groups",
    "source_code": "def get_param_groups(inputs: list[Node], params: list[Node], reverse_edges_dict) -> list[dict[str, Any]]:\n    inputs_closure, _ = reverse_closure(inputs, set(), reverse_edges_dict)\n    param_groups: dict[Node, dict[str, set]] = dict()\n    for param in params:\n        closure, intersected = reverse_closure([param], inputs_closure, reverse_edges_dict)\n        param_group: dict[str, set] = {'params': {param}, 'intermediates': intersected}\n        for input_node in intersected:\n            existing = param_groups.get(input_node, None)\n            if existing is not None:\n                existing['params'] = existing['params'].union(param_group['params'])\n                existing['intermediates'] = existing['intermediates'].union(param_group['intermediates'])\n                param_group = existing\n            else:\n                param_groups[input_node] = param_group\n    union_params: set[Node] = set()\n    seen_ids: set[int] = set()\n    unique_param_groups = []\n    for param_group in param_groups.values():\n        if id(param_group) not in seen_ids:\n            seen_ids.add(id(param_group))\n            unique_param_groups.append(param_group)\n            union_params = union_params.union(param_group['params'])\n    return unique_param_groups",
    "docstring": "Given a list of inputs and a list of parameters, return a list of parameter groups, where each group contains the parameters and the intermediates that are connected to the parameters. The returned list of parameter groups is a list of dictionaries, where each dictionary contains the following keys: - \"params\": a set of parameters - \"intermediates\": a set of intermediates The returned list of parameter groups is a list of dictionaries,",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\_backward.py",
    "ast_data": "FunctionDef name:get_param_groups arg:inputs arg:params arg:reverse_edges_dict arguments arg arg arg Assign Call Call Call For Assign Call For Assign Call If Compare Assign Call Assign Call Assign Assign Call Call Assign For Call If Compare Call Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "line_map_to_segments",
    "source_code": "def line_map_to_segments(junctions: Tensor, line_map: Tensor) -> Tensor:\n    junc_loc1, junc_loc2 = where(torch.triu(line_map))\n    segments = stack([junctions[junc_loc1], junctions[junc_loc2]], 1)\n    return segments",
    "docstring": "Convert a junction connectivity map to a Nx2x2 tensor of segments.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\sold2\\sold2_detector.py",
    "ast_data": "FunctionDef name:line_map_to_segments arg:junctions arg:line_map arguments arg arg Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "PearsonRResult",
    "source_code": "class PearsonRResult(PearsonRResultBase):\n\n    def __init__(self, statistic, pvalue, alternative, n, x, y, axis):\n        super().__init__(statistic, pvalue)\n        self._alternative = alternative\n        self._n = n\n        self._x = x\n        self._y = y\n        self._axis = axis\n        self.correlation = statistic\n\n    def confidence_interval(self, confidence_level=0.95, method=None):\n        if isinstance(method, BootstrapMethod):\n            xp = array_namespace(self._x)\n            message = '`method` must be `None` if `pearsonr` arguments were not NumPy arrays.'\n            if not is_numpy(xp):\n                raise ValueError(message)\n            ci = _pearsonr_bootstrap_ci(confidence_level, method, self._x, self._y, self._alternative, self._axis)\n        elif method is None:\n            ci = _pearsonr_fisher_ci(self.statistic, self._n, confidence_level, self._alternative)\n        else:\n            message = '`method` must be an instance of `BootstrapMethod` or None.'\n            raise ValueError(message)\n        return ci",
    "docstring": "Result of Attributes ---------- statistic : float Pearson product-moment correlation coefficient. pvalue : float The p-value associated with the chosen alternative. Methods ------- confidence_interval Computes the confidence interval of the correlation coefficient for the given confidence level.",
    "type": "class",
    "file_path": "scipy\\scipy\\stats\\_stats_py.py",
    "ast_data": "ClassDef name:PearsonRResult FunctionDef name:__init__ arg:self arg:statistic arg:pvalue arg:alternative arg:n arg:x arg:y arg:axis arguments arg arg arg arg arg arg arg arg Call Call Assign Assign Assign Assign Assign Assign FunctionDef name:confidence_interval arg:self arg:confidence_level arg:method arguments arg arg arg If Call Assign Call Assign If Call Raise Call Assign Call If Compare Assign Call Assign Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_tf_extension_type_with_packed",
    "source_code": "def _tf_extension_type_with_packed(self, value):\n    copy = _create_object_from_type_and_dict(type(self), self.__dict__)\n    copy.__dict__['_tf_extension_type_is_packed'] = value\n    return copy",
    "docstring": "Returns a copy of this with . Args: value: A boolean value. Returns: A copy of with .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type.py",
    "ast_data": "FunctionDef name:_tf_extension_type_with_packed arg:self arg:value arguments arg arg Assign Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, module):\n    super().__init__()\n    self.module = module",
    "docstring": "Collapses input of dim T*N*H to (T*N)*H, and applies to a module. Allows handling of variable sequence lengths and minibatch sizes. :param module: Module to apply input to.",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\functional_autograd_benchmark\\torchaudio_models.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:module arguments arg arg Call Call Assign"
  },
  {
    "library": "numpy",
    "name": "__init__",
    "source_code": "def __init__(self, critical_value):\n    self.critical_value = critical_value",
    "docstring": "DomainGreaterEqual(v)(x) = true where x < v",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:critical_value arguments arg arg Assign"
  },
  {
    "library": "numpy",
    "name": "tolist",
    "source_code": "def tolist(self, fill_value=None):\n    _mask = self._mask\n    if _mask is nomask:\n        return self._data.tolist()\n    if fill_value is not None:\n        return self.filled(fill_value).tolist()\n    names = self.dtype.names\n    if names:\n        result = self._data.astype([(_, object) for _ in names])\n        for n in names:\n            result[n][_mask[n]] = None\n        return result.tolist()\n    if _mask is nomask:\n        return [None]\n    inishape = self.shape\n    result = np.array(self._data.ravel(), dtype=object)\n    result[_mask.ravel()] = None\n    result.shape = inishape\n    return result.tolist()",
    "docstring": "Return the data portion of the masked array as a hierarchical Python list. Data items are converted to the nearest compatible Python type. Masked values are converted to . If is None, the corresponding entries in the output list will be ``. Parameters ---------- fill_value : scalar, optional The value to use for invalid entries. Default is None. Returns ------- result : list The Python list representation of the masked array. Examples -------- >>> import numpy as np >>> x = np.ma.array([[1,2,3], [4,5,6], [7,8,9]], mask=[0] + [1,0]*4) >>> x.tolist() [[1, None, 3], [None, 5, None], [7, None, 9]] >>> x.tolist(-999) [[1, -999, 3], [-999, 5, -999], [7, -999, 9]]",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:tolist arg:self arg:fill_value arguments arg arg Assign If Compare Return return:yes Call If Compare Return return:yes Call Call Assign If Assign Call For Assign Return return:yes Call If Compare Return return:no Assign Assign Call Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "frozen_saveables_and_savers",
    "source_code": "def frozen_saveables_and_savers(graph_view, object_map=None, to_graph=None, call_with_mapped_captures=None, saveables_cache=None):\n    if to_graph:\n        target_context = to_graph.as_default\n    else:\n        target_context = ops.NullContextmanager\n    with target_context():\n        named_saveable_objects, graph_proto, _, registered_savers = serialize_gathered_objects(graph_view, object_map, call_with_mapped_captures, saveables_cache)\n        with ops.device('/cpu:0'):\n            object_graph_tensor = constant_op.constant(graph_proto.SerializeToString(), dtype=dtypes.string)\n        named_saveable_objects.append(base.NoRestoreSaveable(tensor=object_graph_tensor, name=base.OBJECT_GRAPH_PROTO_KEY))\n    return (named_saveable_objects, registered_savers)",
    "docstring": "Generates SaveableObjects and registered savers in the frozen graph.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\save_util_v1.py",
    "ast_data": "FunctionDef name:frozen_saveables_and_savers arg:graph_view arg:object_map arg:to_graph arg:call_with_mapped_captures arg:saveables_cache arguments arg arg arg arg arg If Assign Assign With Call Assign Call With Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "error",
    "source_code": "def error(self, token, e):\n    if not isinstance(e, Exception):\n        e = TemplateSyntaxError(e)\n    if not hasattr(e, 'token'):\n        e.token = token\n    return e",
    "docstring": "Return an exception annotated with the originating token. Since the parser can be called recursively, check if a token is already set. This ensures the innermost token is highlighted if an exception occurs, e.g. a compile error within the body of an if statement.",
    "type": "method",
    "file_path": "django\\django\\template\\base.py",
    "ast_data": "FunctionDef name:error arg:self arg:token arg:e arguments arg arg arg If Call Assign Call If Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_write_to_local_cache",
    "source_code": "@staticmethod\ndef _write_to_local_cache(key: str, content: bytes):\n    subdir = AOTAutogradCache._get_tmp_dir_for_key(key)\n    if not os.path.exists(subdir):\n        os.makedirs(subdir, exist_ok=True)\n    path = os.path.join(subdir, sha256_hash(content))\n    log.info('Writing AOTAutograd cache entry to %s', path)\n    write_atomic(path, content)",
    "docstring": "Write an entry to the local cache.",
    "type": "method",
    "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\autograd_cache.py",
    "ast_data": "FunctionDef name:_write_to_local_cache arg:key arg:content arguments arg arg Assign Call If Call Call Assign Call Call Call Call"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, interval, function, args=[], kwargs={}, bus=None):\n    super(BackgroundTask, self).__init__()\n    self.interval = interval\n    self.function = function\n    self.args = args\n    self.kwargs = kwargs\n    self.running = False\n    self.bus = bus\n    self.daemon = True",
    "docstring": "Initialize a background task parameters.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\plugins.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:interval arg:function arg:args arg:kwargs arg:bus arguments arg arg arg arg arg arg Call Call Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "wait",
    "source_code": "def wait(self) -> T:\n    return super().wait()",
    "docstring": "Block until the value of this `` method will also throw an error.",
    "type": "method",
    "file_path": "pytorch\\torch\\futures\\__init__.py",
    "ast_data": "FunctionDef name:wait arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "file_structure",
    "source_code": "def file_structure(self, *, include: 'GlobPattern'='**', exclude: 'GlobPattern'=()) -> Directory:\n    return _create_directory_from_file_list(self.filename, self.zip_reader.get_all_records(), include, exclude)",
    "docstring": "Returns a file structure representation of package's zipfile. Args: include (Union[List[str], str]): An optional string e.g. `PackageExporter.mockDirectory`",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\package_importer.py",
    "ast_data": "FunctionDef name:file_structure arg:self arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "__add__",
    "source_code": "def __add__(self, other: BlockParameters) -> BlockParameters:\n    cls = type(self)\n    a, b = tuple((dataclasses.asdict(x) for x in (self, other)))\n    return cls(**{key: a[key] + b[key] for key in a})",
    "docstring": "Concatenates block parameters.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py",
    "ast_data": "FunctionDef name:__add__ arg:self arg:other arguments arg arg Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "graph",
    "source_code": "@property\ndef graph(self):\n    if self._tf_sess() is None:\n        return None\n    return self._tf_sess().graph",
    "docstring": "The graph that was launched in this session.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\monitored_session.py",
    "ast_data": "FunctionDef name:graph arg:self arguments arg If Compare Call Return return:no Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "draw",
    "source_code": "def draw(self, sizes=None, rng=None, proportions=None, region='domain'):\n    parameter_values = {}\n    if sizes is None or not len(sizes) or (not np.iterable(sizes[0])):\n        sizes = [sizes] * len(self.parameters)\n    for size, param in zip(sizes, self.parameters.values()):\n        parameter_values[param.name] = param.draw(size, rng=rng, proportions=proportions, parameter_values=parameter_values, region=region)\n    return parameter_values",
    "docstring": "Draw random values of all parameters for use in testing. Parameters ---------- sizes : iterable of shape tuples The size of the array to be generated for each parameter in the parameterization. Note that the order of sizes is arbitary; the size of the array generated for a specific parameter is not controlled individually as written. rng : NumPy Generator The generator used to draw random values. proportions : tuple A tuple of four non-negative numbers that indicate the expected relative proportion of elements that are within the parameter's domain, are on the boundary of the parameter's domain, are outside the parameter's domain, and have value NaN. For more information, see the method of the _Parameter subclasses. domain : str The domain of the from which to draw. Default is \"domain\" (the *full* domain); alternative is \"typical\". Returns ------- parameter_values : dict (string: array) A dictionary of parameter name/value pairs.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distribution_infrastructure.py",
    "ast_data": "FunctionDef name:draw arg:self arg:sizes arg:rng arg:proportions arg:region arguments arg arg arg arg arg Assign If BoolOp Compare Call Call Assign Call For Call Call Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "lower",
    "source_code": "def lower(self):\n    return asarray(lower(self))",
    "docstring": "Return an array with the elements of converted to lowercase. See Also -------- char.lower",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:lower arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "mem_get_info",
    "source_code": "def mem_get_info(device: _device_t=None) -> tuple[int, int]:\n    device = _get_device_index(device, optional=True)\n    return torch._C._xpu_getMemoryInfo(device)",
    "docstring": "Return the global free and total GPU memory for a given device. Args: device (torch.device or int or str, optional): selected device. Returns statistic for the current device, given by :func:, if :attr: is `` (default). Returns: int: the memory available on the device in units of bytes. int: the total memory on the device in units of bytes",
    "type": "function",
    "file_path": "pytorch\\torch\\xpu\\memory.py",
    "ast_data": "FunctionDef name:mem_get_info arg:device arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "BatchFusion",
    "source_code": "class BatchFusion(GroupBatchFusionBase):\n    pass",
    "docstring": "Fuse ops in a batch way, e.g, fuse mm/addmm of same input shapes with bmm.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\group_batch_fusion.py",
    "ast_data": "ClassDef name:BatchFusion"
  },
  {
    "library": "pytorch",
    "name": "_reconstruct",
    "source_code": "def _reconstruct(self, cpp_module):\n    self.__init__(cpp_module)\n    self._concrete_type = torch._C.ConcreteModuleType.from_jit_type(self._c._type())\n    modules = {}\n    for name, cpp_module in torch._C.ModuleDict(self._c).items():\n        modules[name] = wrap_cpp_module(cpp_module)\n    self._modules = OrderedModuleDict(self._c, modules)\n    self._parameters = OrderedDictWrapper(torch._C.ParameterDict(self._c))\n    self._buffers = OrderedDictWrapper(torch._C.BufferDict(self._c))\n    self.__dict__ = {k: v for k, v in self.__dict__.items() if not isinstance(v, torch._C.ScriptMethod)}\n    self.__dict__['_initializing'] = False",
    "docstring": "Re-construct an instance of RecursiveScriptModule using an instance of a C++ module. Args: cpp_module: The C++ module that this RecursiveScriptModule will be rebuilt around.",
    "type": "method",
    "file_path": "pytorch\\torch\\jit\\_script.py",
    "ast_data": "FunctionDef name:_reconstruct arg:self arg:cpp_module arguments arg arg Call Assign Call Call Assign For Call Call Assign Call Assign Call Assign Call Call Assign Call Call Assign Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "ConcatAggregator",
    "source_code": "class ConcatAggregator(Aggregator):\n\n    def __init__(self, batch_size):\n        self.composite = None\n        super(ConcatAggregator, self).__init__(use_steps=True, num_samples=None, steps=None, batch_size=batch_size)\n\n    def create(self, batch_element):\n        self.composite = is_composite_or_composite_value(batch_element)\n\n    def aggregate(self, batch_element, batch_start=None, batch_end=None):\n        if self.batch_size and self.batch_size < batch_element.shape[0]:\n            raise ValueError('Mismatch between expected batch size and model output batch size. Output shape = {}, expected output shape = shape {}'.format(batch_element.shape, (self.batch_size,) + batch_element.shape[1:]))\n        self.results.append(batch_element)\n\n    def finalize(self):\n        if len(self.results) == 1:\n            self.results = self.results[0]\n        elif self.composite:\n            results = self.results[0]\n            for r in self.results[1:]:\n                results = _append_composite_tensor(results, r)\n            self.results = results\n        else:\n            self.results = np.concatenate(self.results, axis=0)",
    "docstring": "Combine tensor-likes which cannot be merged on the fly. This class expects to aggregate a single tensor-like rather than a nested structure of tensor-likes.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py",
    "ast_data": "ClassDef name:ConcatAggregator FunctionDef name:__init__ arg:self arg:batch_size arguments arg arg Assign Call Call FunctionDef name:create arg:self arg:batch_element arguments arg arg Assign Call FunctionDef name:aggregate arg:self arg:batch_element arg:batch_start arg:batch_end arguments arg arg arg arg If BoolOp Compare Raise Call Call Call FunctionDef name:finalize arg:self arguments arg If Compare Call Assign If Assign For Assign Call Assign Assign Call"
  },
  {
    "library": "scikit-learn",
    "name": "_serialize",
    "source_code": "def _serialize(self):\n    output = dict()\n    for method in SIMPLE_METHODS:\n        mmr = getattr(self, method)\n        if len(mmr.requests):\n            output[method] = mmr._serialize()\n    return output",
    "docstring": "Serialize the object. Returns ------- obj : dict A serialized version of the instance in the form of a dictionary.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py",
    "ast_data": "FunctionDef name:_serialize arg:self arguments arg Assign Call For Assign Call If Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "_column_default_sql",
    "source_code": "def _column_default_sql(self, field):\n    return '%s'",
    "docstring": "Return the SQL to use in a DEFAULT clause. The resulting string should contain a '%s' placeholder for a default value.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\schema.py",
    "ast_data": "FunctionDef name:_column_default_sql arg:self arg:field arguments arg arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_path",
    "source_code": "def get_path(self):\n    return self._path",
    "docstring": "Return a for the primary part of the marker. For unfilled markers this is the whole marker, for filled markers, this is the area to be drawn with *markerfacecolor*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\markers.py",
    "ast_data": "FunctionDef name:get_path arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "ToolBack",
    "source_code": "class ToolBack(ViewsPositionsBase):\n    description = 'Back to previous view'\n    image = 'mpl-data/images/back'\n    default_keymap = property(lambda self: mpl.rcParams['keymap.back'])\n    _on_trigger = 'back'",
    "docstring": "Move back up the view limits stack.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py",
    "ast_data": "ClassDef name:ToolBack Assign Assign Assign Call arguments arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "_row_starts",
    "source_code": "def _row_starts(t, dtype):\n    if isinstance(t, ragged_tensor.RaggedTensor):\n        return math_ops.cast(t.row_starts(), dtype)\n    else:\n        t_shape = array_ops.shape(t, out_type=dtype)\n        return math_ops.range(t_shape[0]) * t_shape[1]",
    "docstring": "Returns the start indices for the rows in .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_gather_ops.py",
    "ast_data": "FunctionDef name:_row_starts arg:t arg:dtype arguments arg arg If Call Return return:yes Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "external_captures",
    "source_code": "@property\ndef external_captures(self):\n    return list(self._function_captures.by_val_external.values())",
    "docstring": "External tensors captured by this function.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\func_graph.py",
    "ast_data": "FunctionDef name:external_captures arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_make_train_step_fn",
    "source_code": "def _make_train_step_fn(model, mode, strategy, output_labels):\n\n    def _step_fn(ctx, inputs):\n        if isinstance(inputs, (tuple, list)) and len(inputs) == 2:\n            inputs, targets = inputs\n        else:\n            targets = None\n        if isinstance(inputs, dict):\n            inputs = [inputs[input_name] for input_name in model._feed_input_names]\n        _build_model(strategy, model, mode, inputs, targets)\n        grouped_inputs, grouped_outputs, grouped_updates, grouped_session_args = strategy.extended.call_for_each_replica(_per_replica_execution_function, args=(dist_utils.get_distributed_model(model, mode), mode))\n        all_inputs, all_outputs, all_updates, all_session_args = dist_utils.unwrap_values(strategy, grouped_inputs, grouped_outputs, grouped_updates, grouped_session_args)\n        combined_fn = backend.function(all_inputs, all_outputs, updates=all_updates, name='distributed_' + str(mode) + '_function', **all_session_args)\n        for label, output in zip(output_labels, combined_fn.outputs):\n            if label == 'loss':\n                reduce_op = ds_reduce_util.ReduceOp.SUM\n            else:\n                reduce_op = ds_reduce_util.ReduceOp.MEAN\n            ctx.set_last_step_output(label, output, reduce_op)\n        return combined_fn.updates_op\n    return _step_fn",
    "docstring": "Create step fn. Args: model: a Keras Model instance. mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT. strategy: a instance. output_labels: the output labels for the step function. Returns: A step function to run by .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_distributed_v1.py",
    "ast_data": "FunctionDef name:_make_train_step_fn arg:model arg:mode arg:strategy arg:output_labels arguments arg arg arg arg FunctionDef name:_step_fn arg:ctx arg:inputs arguments arg arg If BoolOp Call Compare Call Assign Assign If Call Assign Call Assign Call Call Assign Call Assign Call Call For Call If Compare Assign Assign Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "Node",
    "source_code": "class Node(object):\n\n    def __init__(self, next_, prev, ast_node):\n        self.next = next_\n        self.prev = prev\n        self.ast_node = ast_node\n\n    def freeze(self):\n        self.next = frozenset(self.next)\n        self.prev = weakref.WeakSet(self.prev)\n\n    def __repr__(self):\n        if isinstance(self.ast_node, gast.FunctionDef):\n            return 'def %s' % self.ast_node.name\n        elif isinstance(self.ast_node, gast.ClassDef):\n            return 'class %s' % self.ast_node.name\n        elif isinstance(self.ast_node, gast.withitem):\n            return astunparse.unparse(self.ast_node.context_expr).strip()\n        return astunparse.unparse(self.ast_node).strip()",
    "docstring": "A node in the CFG. Although new instances of this class are mutable, the objects that a user finds in the CFG are typically not. The nodes represent edges in the CFG graph, and maintain pointers to allow efficient walking in both forward and reverse order. The following property holds for all nodes: \"child in node.next\" iff \"node in child.prev\". Attributes: next: FrozenSet[Node, ...], the nodes that follow this node, in control flow order prev: FrozenSet[Node, ...], the nodes that precede this node, in reverse control flow order ast_node: ast.AST, the AST node corresponding to this CFG node",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\cfg.py",
    "ast_data": "ClassDef name:Node FunctionDef name:__init__ arg:self arg:next_ arg:prev arg:ast_node arguments arg arg arg arg Assign Assign Assign FunctionDef name:freeze arg:self arguments arg Assign Call Assign Call FunctionDef name:__repr__ arg:self arguments arg If Call Return return:yes If Call Return return:yes If Call Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "overlaps",
    "source_code": "def overlaps(self, other: LiveRanges):\n    left = collections.deque(self.ranges)\n    right = collections.deque(other.ranges)\n    while left and right:\n        if left[0].begin > right[0].begin:\n            left, right = (right, left)\n        assert left[0].begin <= right[0].begin\n        if left[0].end > right[0].begin:\n            return True\n        left.popleft()\n    return False",
    "docstring": "Check if any pair of ranges in self and other overlap",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\memory_planning.py",
    "ast_data": "FunctionDef name:overlaps arg:self arg:other arguments arg arg Assign Call Assign Call While BoolOp If Compare Assign Compare If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_verify_static_batch_size_equality",
    "source_code": "def _verify_static_batch_size_equality(tensors, columns):\n    expected_batch_size = None\n    for i in range(0, len(tensors)):\n        batch_size = tensor_shape.Dimension(tensor_shape.dimension_value(tensors[i].shape[0]))\n        if batch_size.value is not None:\n            if expected_batch_size is None:\n                bath_size_column_index = i\n                expected_batch_size = batch_size\n            elif not expected_batch_size.is_compatible_with(batch_size):\n                raise ValueError('Batch size (first dimension) of each feature must be same. Batch size of columns ({}, {}): ({}, {})'.format(columns[bath_size_column_index].name, columns[i].name, expected_batch_size, batch_size))",
    "docstring": "Verify equality between static batch sizes. Args: tensors: iterable of input tensors. columns: Corresponding feature columns. Raises: ValueError: in case of mismatched batch sizes.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:_verify_static_batch_size_equality arg:tensors arg:columns arguments arg arg Assign For Call Call Assign Call Call If Compare If Compare Assign Assign If Call Raise Call Call"
  },
  {
    "library": "scipy",
    "name": "jac",
    "source_code": "def jac(self, x):\n    raise NotImplementedError",
    "docstring": "Evaluate jacobian at point x. Parameters ---------- x : ndarray, shape (n,) Vector of residuals f(x). Returns ------- ndarray, shape (m, n) Jacobian matrix of at point .",
    "type": "method",
    "file_path": "scipy\\benchmarks\\benchmarks\\lsq_problems.py",
    "ast_data": "FunctionDef name:jac arg:self arg:x arguments arg arg Raise"
  },
  {
    "library": "pytorch",
    "name": "_maybe_propagate_dtype_for_node",
    "source_code": "def _maybe_propagate_dtype_for_node(node: Node, target_dtype: Union[torch.dtype, type], node_name_to_match_result_with_qconfig: dict[str, _MatchResultWithQConfig]) -> None:\n    node.meta['target_dtype_info']['input_act_obs_or_fq_ctr'] = None\n    node.meta['target_dtype_info']['output_act_obs_or_fq_ctr'] = None\n    _root_node, _, _pattern, qhandler, _qconfig = node_name_to_match_result_with_qconfig.get(node.name, (None, None, None, None, None))\n    if qhandler is not None and qhandler.is_general_tensor_value_op():\n        prev_node = node.args[0]\n        if isinstance(prev_node, Node):\n            _maybe_propagate_dtype_for_node(prev_node, target_dtype, node_name_to_match_result_with_qconfig)",
    "docstring": "Assigns to , setting to False. If is a general tensor shape op, also call this function recursively on the first argument, to propagate the dtype to the caller.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\prepare.py",
    "ast_data": "FunctionDef name:_maybe_propagate_dtype_for_node arg:node arg:target_dtype arg:node_name_to_match_result_with_qconfig arguments arg arg arg Assign Assign Assign Call If BoolOp Compare Call Assign If Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "inplace_swap_column",
    "source_code": "def inplace_swap_column(X, m, n):\n    if m < 0:\n        m += X.shape[1]\n    if n < 0:\n        n += X.shape[1]\n    if sp.issparse(X) and X.format == 'csc':\n        inplace_swap_row_csr(X, m, n)\n    elif sp.issparse(X) and X.format == 'csr':\n        inplace_swap_row_csc(X, m, n)\n    else:\n        _raise_typeerror(X)",
    "docstring": "Swap two columns of a CSC/CSR matrix in-place. Parameters ---------- X : sparse matrix of shape (n_samples, n_features) Matrix whose two columns are to be swapped. It should be of CSR or CSC format. m : int Index of the column of X to be swapped. n : int Index of the column of X to be swapped. Examples -------- >>> from sklearn.utils import sparsefuncs >>> from scipy import sparse >>> import numpy as np >>> indptr = np.array([0, 2, 3, 3, 3]) >>> indices = np.array([0, 2, 2]) >>> data = np.array([8, 2, 5]) >>> csr = sparse.csr_matrix((data, indices, indptr)) >>> csr.todense() matrix([[8, 0, 2], [0, 0, 5], [0, 0, 0], [0, 0, 0]]) >>> sparsefuncs.inplace_swap_column(csr, 0, 1) >>> csr.todense() matrix([[0, 8, 2], [0, 0, 5], [0, 0, 0], [0, 0, 0]])",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\sparsefuncs.py",
    "ast_data": "FunctionDef name:inplace_swap_column arg:X arg:m arg:n arguments arg arg arg If Compare If Compare If BoolOp Call Compare Call If BoolOp Call Compare Call Call"
  },
  {
    "library": "virtualenv",
    "name": "run",
    "source_code": "@abstractmethod\ndef run(self):\n    raise NotImplementedError",
    "docstring": "Discovers an interpreter. :return: the interpreter ready to use for virtual environment creation",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\discovery\\discover.py",
    "ast_data": "FunctionDef name:run arg:self arguments arg Raise"
  },
  {
    "library": "pytorch",
    "name": "set_stream",
    "source_code": "def set_stream(stream: torch.Stream) -> None:\n    torch._C._accelerator_setStream(stream)",
    "docstring": "Set the current stream to a given stream. Args: stream (torch.Stream): a given stream that must match the current :ref: device type. .. note:: This function will set the current device index to the device index of the given stream.",
    "type": "function",
    "file_path": "pytorch\\torch\\accelerator\\__init__.py",
    "ast_data": "FunctionDef name:set_stream arg:stream arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "single_method_decorator",
    "source_code": "def single_method_decorator(f):\n\n    @parameterized.named_parameters(*params)\n    @functools.wraps(f)\n    def decorated(self, run_mode, *args, **kwargs):\n        if run_mode == 'v1_session':\n            _v1_session_test(f, self, config, *args, **kwargs)\n        elif run_mode == 'v2_eager':\n            _v2_eager_test(f, self, *args, **kwargs)\n        elif run_mode == 'v2_function':\n            _v2_function_test(f, self, *args, **kwargs)\n        else:\n            return ValueError('Unknown run mode %s' % run_mode)\n    return decorated",
    "docstring": "Decorator that constructs the test cases.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\keras_parameterized.py",
    "ast_data": "FunctionDef name:single_method_decorator arg:f arguments arg FunctionDef name:decorated arg:self arg:run_mode arguments arg arg arg arg If Compare Call If Compare Call If Compare Call Return return:yes Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "layer_norm",
    "source_code": "def layer_norm(input: Tensor, normalized_shape: list[int], weight: Optional[Tensor]=None, bias: Optional[Tensor]=None, eps: float=1e-05) -> Tensor:\n    if has_torch_function_variadic(input, weight, bias):\n        return handle_torch_function(layer_norm, (input, weight, bias), input, normalized_shape, weight=weight, bias=bias, eps=eps)\n    return torch.layer_norm(input, normalized_shape, weight, bias, eps, torch.backends.cudnn.enabled)",
    "docstring": "Apply Layer Normalization for last certain number of dimensions. See :class: for details.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:layer_norm arg:input arg:normalized_shape arg:weight arg:bias arg:eps arguments arg arg arg arg arg If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "f",
    "source_code": "def f(original_nodes):\n    del original_nodes\n    return saved_debug_info",
    "docstring": "Function to create for the given .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\util.py",
    "ast_data": "FunctionDef name:f arg:original_nodes arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "map_defun",
    "source_code": "def map_defun(fn, elems, output_dtypes, output_shapes, max_intra_op_parallelism=1):\n    if not isinstance(elems, list):\n        raise ValueError(f'`elems` must be a list of tensors, but was {elems}.')\n    if not isinstance(output_dtypes, list):\n        raise ValueError(f'`output_dtypes` must be a list of `tf.DType` objects, but was {output_dtypes}.')\n    if not isinstance(output_shapes, list):\n        raise ValueError(f'`output_shapes` must be a list of `tf.TensorShape` objects, but was {output_shapes}.')\n    concrete_fn = fn.get_concrete_function()\n    elems = [ops.convert_to_tensor(e) for e in elems]\n    output_shapes = [tensor_shape.TensorShape(s) for s in output_shapes]\n    return gen_dataset_ops.map_defun(elems, concrete_fn.captured_inputs, output_dtypes, output_shapes, concrete_fn, max_intra_op_parallelism)",
    "docstring": "Map a function on the list of tensors unpacked from on dimension 0. Args: fn: A function () that takes a list of tensors and returns another list of tensors. The output list has the same types as output_dtypes. The elements of the output list have the same dimension 0 as , and the remaining dimensions correspond to those of . elems: A list of tensors. output_dtypes: A list of dtypes corresponding to the output types of the function. output_shapes: A list of s corresponding to the output shapes from each invocation of the function on slices of inputs. max_intra_op_parallelism: An integer. If positive, sets the max parallelism limit of each function call to this. Raises: ValueError: if any of the inputs are malformed. Returns: A list of objects with the same types as .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\map_defun.py",
    "ast_data": "FunctionDef name:map_defun arg:fn arg:elems arg:output_dtypes arg:output_shapes arg:max_intra_op_parallelism arguments arg arg arg arg arg If Call Raise Call If Call Raise Call If Call Raise Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "add",
    "source_code": "def add(self, *, caller, callee):\n    if caller not in METHODS:\n        raise ValueError(f'Given caller:{caller} is not a valid method. Valid methods are: {METHODS}')\n    if callee not in METHODS:\n        raise ValueError(f'Given callee:{callee} is not a valid method. Valid methods are: {METHODS}')\n    self._routes.append(MethodPair(caller=caller, callee=callee))\n    return self",
    "docstring": "Add a method mapping. Parameters ---------- caller : str Parent estimator's method name in which the ``. Returns ------- self : MethodMapping Returns self.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py",
    "ast_data": "FunctionDef name:add arg:self arguments arg arg arg If Compare Raise Call If Compare Raise Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "StandardSingleLossStep",
    "source_code": "class StandardSingleLossStep(StandardInputStep):\n\n    def __init__(self, dataset_fn, loss_fn, optimizer, distribution, iterations_per_step=1):\n        super(StandardSingleLossStep, self).__init__(dataset_fn, distribution)\n        self._loss_fn = loss_fn\n        self._optimizer = optimizer\n        self._iterations_per_step = iterations_per_step\n\n    def __call__(self):\n        with self._distribution.scope():\n\n            def step_fn(ctx, inputs):\n                gradients_fn = backprop.implicit_grad(self._loss_fn)\n                gradients_fn = optimizer_lib.get_filtered_grad_fn(gradients_fn)\n                grads_and_vars = self.distribution.extended.call_for_each_replica(gradients_fn, args=(ctx, inputs))\n                return self._optimizer._distributed_apply(self.distribution, grads_and_vars)\n            ctx = self.distribution.extended.experimental_run_steps_on_iterator(step_fn, self._iterator, self._iterations_per_step)\n            return ctx.run_op",
    "docstring": "A step function that implements a training step for a feed forward network. An instance of this class is intended to be used as a callable: Args: dataset_fn: a function that returns a tf.data Dataset that produces the input for the model. loss_fn: a function that takes a context and inputs as arguments. It returns the loss for those inputs. is an instance of that will be passed when is run. can be used to specify the outputs to be returned from , among other things. optimizer: an optimizer that implements an update rule. distribution: a object.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\step_fn.py",
    "ast_data": "ClassDef name:StandardSingleLossStep FunctionDef name:__init__ arg:self arg:dataset_fn arg:loss_fn arg:optimizer arg:distribution arg:iterations_per_step arguments arg arg arg arg arg arg Call Call Assign Assign Assign FunctionDef name:__call__ arg:self arguments arg With Call FunctionDef name:step_fn arg:ctx arg:inputs arguments arg arg Assign Call Assign Call Assign Call Return return:yes Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_full_name",
    "source_code": "def _full_name(self):\n    return 'projects/%s/locations/%s/nodes/%s' % (self._project, self._zone, self._tpu)",
    "docstring": "Returns the full Cloud name for this TPU.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\client\\client.py",
    "ast_data": "FunctionDef name:_full_name arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "_alter_column_null_sql",
    "source_code": "def _alter_column_null_sql(self, model, old_field, new_field):\n    if self.connection.features.interprets_empty_strings_as_nulls and new_field.empty_strings_allowed:\n        return\n    else:\n        new_db_params = new_field.db_parameters(connection=self.connection)\n        sql = self.sql_alter_column_null if new_field.null else self.sql_alter_column_not_null\n        return (sql % {'column': self.quote_name(new_field.column), 'type': new_db_params['type']}, [])",
    "docstring": "Hook to specialize column null alteration. Return a (sql, params) fragment to set a column to null or non-null as required by new_field, or None if no changes are required.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\schema.py",
    "ast_data": "FunctionDef name:_alter_column_null_sql arg:self arg:model arg:old_field arg:new_field arguments arg arg arg arg If BoolOp Return return:no Assign Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "ExecutionDigest",
    "source_code": "class ExecutionDigest(BaseDigest):\n\n    def __init__(self, wall_time, locator, op_type, output_tensor_device_ids=None):\n        super().__init__(wall_time, locator)\n        self._op_type = op_type\n        self._output_tensor_device_ids = _tuple_or_none(output_tensor_device_ids)\n\n    @property\n    def op_type(self):\n        return self._op_type\n\n    @property\n    def output_tensor_device_ids(self):\n        return self._output_tensor_device_ids\n\n    def to_json(self):\n        output = super().to_json()\n        output.update({'op_type': self.op_type, 'output_tensor_device_ids': self.output_tensor_device_ids})\n        return output",
    "docstring": "Light-weight digest summarizing top-level execution event. Use to load the more detailed data object concerning the execution event (). Properties: op_type: Type name of the executed op. In the case of the eager execution of an individual op, it is the name of the op (e.g., \"MatMul\"). In the case of the execution of a tf.function (FuncGraph), this is the internally-generated name of the function (e.g., \"__inference_my_func_123\"). output_tensor_device_ids: IDs of the devices on which the output tensors of the execution reside. For no-output execution, this is .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py",
    "ast_data": "ClassDef name:ExecutionDigest FunctionDef name:__init__ arg:self arg:wall_time arg:locator arg:op_type arg:output_tensor_device_ids arguments arg arg arg arg arg Call Call Assign Assign Call FunctionDef name:op_type arg:self arguments arg Return return:yes FunctionDef name:output_tensor_device_ids arg:self arguments arg Return return:yes FunctionDef name:to_json arg:self arguments arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_StopAfterNEvalsHook",
    "source_code": "class _StopAfterNEvalsHook(session_run_hook.SessionRunHook):\n\n    def __init__(self, num_evals, log_progress=True):\n        self._num_evals = num_evals\n        self._evals_completed = None\n        self._log_progress = log_progress\n        self._log_frequency = 1 if num_evals is None or num_evals < 20 else math.floor(num_evals / 10.0)\n\n    def _set_evals_completed_tensor(self, updated_eval_step):\n        self._evals_completed = updated_eval_step\n\n    def before_run(self, run_context):\n        return session_run_hook.SessionRunArgs({'evals_completed': self._evals_completed})\n\n    def after_run(self, run_context, run_values):\n        evals_completed = run_values.results['evals_completed']\n        if self._log_progress:\n            if self._num_evals is None:\n                logging.info('Evaluation [%d]', evals_completed)\n            elif evals_completed % self._log_frequency == 0 or self._num_evals == evals_completed:\n                logging.info('Evaluation [%d/%d]', evals_completed, self._num_evals)\n        if self._num_evals is not None and evals_completed >= self._num_evals:\n            run_context.request_stop()",
    "docstring": "Run hook used by the evaluation routines to run the N times.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\training\\evaluation.py",
    "ast_data": "ClassDef name:_StopAfterNEvalsHook FunctionDef name:__init__ arg:self arg:num_evals arg:log_progress arguments arg arg arg Assign Assign Assign Assign BoolOp Compare Compare Call FunctionDef name:_set_evals_completed_tensor arg:self arg:updated_eval_step arguments arg arg Assign FunctionDef name:before_run arg:self arg:run_context arguments arg arg Return return:yes Call FunctionDef name:after_run arg:self arg:run_context arg:run_values arguments arg arg arg Assign If If Compare Call If BoolOp Compare Compare Call If BoolOp Compare Compare Call"
  },
  {
    "library": "scipy",
    "name": "isf",
    "source_code": "def isf(self, q, *args, **kwds):\n    args, loc, _ = self._parse_args(*args, **kwds)\n    q, loc = map(asarray, (q, loc))\n    args = tuple(map(asarray, args))\n    _a, _b = self._get_support(*args)\n    cond0 = self._argcheck(*args) & (loc == loc)\n    cond1 = (q > 0) & (q < 1)\n    cond2 = (q == 1) & cond0\n    cond3 = (q == 0) & cond0\n    cond = cond0 & cond1\n    output = np.full(shape(cond), fill_value=self.badvalue, dtype='d')\n    lower_bound = _a - 1 + loc\n    upper_bound = _b + loc\n    place(output, cond2 * (cond == cond), lower_bound)\n    place(output, cond3 * (cond == cond), upper_bound)\n    if np.any(cond):\n        goodargs = argsreduce(cond, *(q,) + args + (loc,))\n        loc, goodargs = (goodargs[-1], goodargs[:-1])\n        place(output, cond, self._isf(*goodargs) + loc)\n    if output.ndim == 0:\n        return output[()]\n    return output",
    "docstring": "Inverse survival function (inverse of ) at q of the given RV. Parameters ---------- q : array_like Upper tail probability. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). Returns ------- k : ndarray or scalar Quantile corresponding to the upper tail probability, q.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:isf arg:self arg:q arguments arg arg arg arg Assign Call Assign Call Assign Call Call Assign Call Assign Call Compare Assign Compare Compare Assign Compare Assign Compare Assign Assign Call Call Assign Assign Call Compare Call Compare If Call Assign Call Assign Call Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "custom_box_style",
    "source_code": "def custom_box_style(x0, y0, width, height, mutation_size):\n    mypad = 0.3\n    pad = mutation_size * mypad\n    width = width + 2 * pad\n    height = height + 2 * pad\n    x0, y0 = (x0 - pad, y0 - pad)\n    x1, y1 = (x0 + width, y0 + height)\n    return Path([(x0, y0), (x1, y0), (x1, y1), (x0, y1), (x0 - pad, (y0 + y1) / 2), (x0, y0), (x0, y0)], closed=True)",
    "docstring": "Given the location and size of the box, return the path of the box around it. Rotation is automatically taken care of. Parameters ---------- x0, y0, width, height : float Box location and size. mutation_size : float Mutation reference scale, typically the text font size.",
    "type": "function",
    "file_path": "matplotlib\\galleries\\users_explain\\text\\annotations.py",
    "ast_data": "FunctionDef name:custom_box_style arg:x0 arg:y0 arg:width arg:height arg:mutation_size arguments arg arg arg arg arg Assign Assign Assign Assign Assign Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_RemoveRuntimeAssertionsPass",
    "source_code": "class _RemoveRuntimeAssertionsPass(PassBase):\n\n    def call(self, graph_module: torch.fx.GraphModule) -> PassResult:\n        modified = False\n        for module in graph_module.modules():\n            if not isinstance(module, torch.fx.GraphModule):\n                continue\n            for node in module.graph.nodes:\n                if node.target in [torch.ops.aten._assert_async.msg, torch.ops.aten._assert_scalar.default, torch.ops.aten.sym_constrain_range_for_size.default, torch.ops.aten.sym_constrain_range.default, torch.ops.aten._assert_tensor_metadata.default]:\n                    assert_async_node = node\n                    if len(assert_async_node.users) > 0:\n                        continue\n                    module.graph.erase_node(assert_async_node)\n                    modified = True\n        return PassResult(graph_module, modified)",
    "docstring": "Remove runtime assertions inserted by the _AddRuntimeAssertionsForInlineConstraintsPass.",
    "type": "class",
    "file_path": "pytorch\\torch\\_export\\passes\\remove_runtime_assertions.py",
    "ast_data": "ClassDef name:_RemoveRuntimeAssertionsPass FunctionDef name:call arg:self arg:graph_module arguments arg arg Assign For Call If Call For If Compare Assign If Compare Call Call Assign Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "from_arrays",
    "source_code": "@classmethod\ndef from_arrays(cls, arrays, sortorder: int | None=None, names: Sequence[Hashable] | Hashable | lib.NoDefault=lib.no_default) -> MultiIndex:\n    error_msg = 'Input must be a list / sequence of array-likes.'\n    if not is_list_like(arrays):\n        raise TypeError(error_msg)\n    if is_iterator(arrays):\n        arrays = list(arrays)\n    for array in arrays:\n        if not is_list_like(array):\n            raise TypeError(error_msg)\n    for i in range(1, len(arrays)):\n        if len(arrays[i]) != len(arrays[i - 1]):\n            raise ValueError('all arrays must be same length')\n    codes, levels = factorize_from_iterables(arrays)\n    if names is lib.no_default:\n        names = [getattr(arr, 'name', None) for arr in arrays]\n    return cls(levels=levels, codes=codes, sortorder=sortorder, names=names, verify_integrity=False)",
    "docstring": "Convert arrays to MultiIndex. Parameters ---------- arrays : list / sequence of array-likes Each array-like gives one level's value for each data point. len(arrays) is the number of levels. sortorder : int or None Level of sortedness (must be lexicographically sorted by that level). names : list / sequence of str, optional Names for the levels in the index. Returns ------- MultiIndex See Also -------- MultiIndex.from_tuples : Convert list of tuples to MultiIndex. MultiIndex.from_product : Make a MultiIndex from cartesian product of iterables. MultiIndex.from_frame : Make a MultiIndex from a DataFrame. Examples -------- >>> arrays = [[1, 1, 2, 2], [\"red\", \"blue\", \"red\", \"blue\"]] >>> pd.MultiIndex.from_arrays(arrays, names=(\"number\", \"color\")) MultiIndex([(1, 'red'), (1, 'blue'), (2, 'red'), (2, 'blue')], names=['number', 'color'])",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "FunctionDef name:from_arrays arg:cls arg:arrays arg:sortorder arg:names arguments arg arg arg arg Assign If Call Raise Call If Call Assign Call For If Call Raise Call For Call Call If Compare Call Call Raise Call Assign Call If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_divide_sparse",
    "source_code": "def _divide_sparse(self, other):\n    if other.shape != self.shape:\n        raise ValueError(f'inconsistent shapes {self.shape} and {other.shape}')\n    r = self._binopt(other, '_eldiv_')\n    if np.issubdtype(r.dtype, np.inexact):\n        out = np.empty(self.shape, dtype=self.dtype)\n        out.fill(np.nan)\n        coords = other.nonzero()\n        if self.ndim == 1:\n            coords = (coords[-1],)\n        out[coords] = 0\n        r = r.tocoo()\n        out[r.coords] = r.data\n        return self._container(out)\n    else:\n        out = r\n        return out",
    "docstring": "Divide this matrix by a second sparse matrix.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_compressed.py",
    "ast_data": "FunctionDef name:_divide_sparse arg:self arg:other arguments arg arg If Compare Raise Call Assign Call If Call Assign Call Call Assign Call If Compare Assign Assign Assign Call Assign Return return:yes Call Assign Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "categorical_order",
    "source_code": "def categorical_order(vector: Series, order: list | None=None) -> list:\n    if order is not None:\n        return order\n    if vector.dtype.name == 'category':\n        order = list(vector.cat.categories)\n    else:\n        order = list(filter(pd.notnull, vector.unique()))\n        if variable_type(pd.Series(order)) == 'numeric':\n            order.sort()\n    return order",
    "docstring": "Return a list of unique data values using seaborn's ordering rules. Parameters ---------- vector : Series Vector of \"categorical\" values order : list Desired order of category levels to override the order determined from the object. Returns ------- order : list Ordered list of category levels not including null values.",
    "type": "function",
    "file_path": "seaborn\\seaborn\\_core\\rules.py",
    "ast_data": "FunctionDef name:categorical_order arg:vector arg:order arguments arg arg If Compare Return return:yes If Compare Assign Call Assign Call Call Call If Compare Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "should_recreate_iterator",
    "source_code": "@abc.abstractmethod\ndef should_recreate_iterator(self):\n    raise NotImplementedError",
    "docstring": "Returns whether a new iterator should be created every epoch.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\data_adapter.py",
    "ast_data": "FunctionDef name:should_recreate_iterator arg:self arguments arg Raise"
  },
  {
    "library": "kornia",
    "name": "__call__",
    "source_code": "def __call__(self, *inputs: Any, input_names_to_handle: Optional[list[Any]]=None, output_type: str='tensor', **kwargs: Any) -> Any:\n    if not self._disable_features:\n        decorated_forward = self.convert_input_output(input_names_to_handle=input_names_to_handle, output_type=output_type)(super().__call__)\n        _output_image = decorated_forward(*inputs, **kwargs)\n        if output_type == 'tensor':\n            self._output_image = self._detach_tensor_to_cpu(_output_image)\n        else:\n            self._output_image = _output_image\n    else:\n        _output_image = super().__call__(*inputs, **kwargs)\n    return _output_image",
    "docstring": "Overwrite the __call__ function to handle various inputs. Args: inputs: Inputs to operate on. input_names_to_handle: List of input names to convert, if None, handle all inputs. output_type: Desired output type ('tensor', 'numpy', or 'pil'). kwargs: Additional arguments. Returns: Callable: Decorated function with converted input and output types.",
    "type": "method",
    "file_path": "kornia\\kornia\\core\\module.py",
    "ast_data": "FunctionDef name:__call__ arg:self arguments arg arg arg arg arg If Assign Call Call Call Assign Call If Compare Assign Call Assign Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_str_lower_equal",
    "source_code": "def _str_lower_equal(obj, s):\n    return isinstance(obj, str) and obj.lower() == s",
    "docstring": "Return whether *obj* is a string equal, when lowercased, to string *s*. This helper solely exists to handle the case where *obj* is a numpy array, because in such cases, a naive `` would yield an array, which cannot be used in a boolean context.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\cbook.py",
    "ast_data": "FunctionDef name:_str_lower_equal arg:obj arg:s arguments arg arg Return return:yes BoolOp Call Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, node_def, op, message, error_code, *args):\n    super(OpError, self).__init__()\n    self._node_def = node_def\n    self._op = op\n    self._message = message\n    self._error_code = error_code\n    if args:\n        self._experimental_payloads = args[0]\n    else:\n        self._experimental_payloads = {}",
    "docstring": "Creates a new indicating that a particular op failed. Args: node_def: The proto representing the op that failed, if known; otherwise None. op: The that failed, if known; otherwise None. During eager execution, this field is always . message: The message string describing the failure. error_code: The describing the error. *args: If not empty, it should contain a dictionary describing details about the error. This argument is inspired by Abseil payloads:",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:node_def arg:op arg:message arg:error_code arguments arg arg arg arg arg arg Call Call Assign Assign Assign Assign If Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "_sorted_patterns_dict",
    "source_code": "def _sorted_patterns_dict(patterns_dict: dict[Pattern, QuantizeHandler]) -> dict[Pattern, QuantizeHandler]:\n\n    def get_len(pattern):\n        len = 0\n        if isinstance(pattern, tuple):\n            for item in pattern:\n                len += get_len(item)\n        else:\n            len += 1\n        return len\n    return OrderedDict(sorted(patterns_dict.items(), key=lambda kv: -get_len(kv[0]) if isinstance(kv[0], tuple) else 1))",
    "docstring": "Return a sorted version of the patterns dictionary such that longer patterns are matched first, e.g. match (F.relu, F.linear) before F.relu. This works for current use cases, but we may need to have a more clever way to sort things to address more complex patterns",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\pattern_utils.py",
    "ast_data": "FunctionDef name:_sorted_patterns_dict arg:patterns_dict arguments arg FunctionDef name:get_len arg:pattern arguments arg Assign If Call For Call Return return:yes Return return:yes Call Call Call arguments arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_execution_digest_from_debug_event_proto",
    "source_code": "def _execution_digest_from_debug_event_proto(debug_event, locator):\n    return ExecutionDigest(debug_event.wall_time, locator, debug_event.execution.op_type, output_tensor_device_ids=debug_event.execution.output_tensor_device_ids or None)",
    "docstring": "Convert a DebugEvent proto into an ExecutionDigest data object.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py",
    "ast_data": "FunctionDef name:_execution_digest_from_debug_event_proto arg:debug_event arg:locator arguments arg arg Return return:yes Call BoolOp"
  },
  {
    "library": "sphinx",
    "name": "getall",
    "source_code": "def getall(obj: Any) -> Sequence[str] | None:\n    __all__ = safe_getattr(obj, '__all__', None)\n    if __all__ is None:\n        return None\n    if isinstance(__all__, list | tuple) and all((isinstance(e, str) for e in __all__)):\n        return __all__\n    raise ValueError(__all__)",
    "docstring": "Get the `ValueError` is not a list or tuple of strings.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\inspect.py",
    "ast_data": "FunctionDef name:getall arg:obj arguments arg Assign Call If Compare Return return:no If BoolOp Call Call Call Return return:yes Raise Call"
  },
  {
    "library": "pandas",
    "name": "_get_metadata_path",
    "source_code": "def _get_metadata_path(self, key: str) -> str:\n    group = self.group._v_pathname\n    return f'{group}/meta/{key}/meta'",
    "docstring": "return the metadata pathname for this key",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:_get_metadata_path arg:self arg:key arguments arg arg Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_alert_malformed",
    "source_code": "def _alert_malformed(self, msg: str, row_num: int) -> None:\n    if self.on_bad_lines == self.BadLineHandleMethod.ERROR:\n        raise ParserError(msg)\n    if self.on_bad_lines == self.BadLineHandleMethod.WARN:\n        warnings.warn(f'Skipping line {row_num}: {msg}\\n', ParserWarning, stacklevel=find_stack_level())",
    "docstring": "Alert a user about a malformed row, depending on value of enum. If is ERROR, the alert will be . If is WARN, the alert will be printed out. Parameters ---------- msg: str The error message to display. row_num: int The row number where the parsing error occurred. Because this row number is displayed, we 1-index, even though we 0-index internally.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\parsers\\python_parser.py",
    "ast_data": "FunctionDef name:_alert_malformed arg:self arg:msg arg:row_num arguments arg arg arg If Compare Raise Call If Compare Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_update_sparse_core_buffer_size_after_table_stacking",
    "source_code": "def _update_sparse_core_buffer_size_after_table_stacking(self):\n    for table_name in self._stacked_table_to_tables:\n        if self._sparse_core_embedding_config.max_ids_per_table is None or table_name not in self._sparse_core_embedding_config.max_ids_per_table:\n            logging.warning('Table %s is not found in max_ids_per_table provided by SparseCoreEmbeddingConfig. Using default value 256.', table_name)\n            self._table_to_max_ids_per_sparse_core[table_name] = self.DEFAULT_MAX_IDS_PER_TABLE\n        else:\n            self._table_to_max_ids_per_sparse_core[table_name] = self._sparse_core_embedding_config.max_ids_per_table[table_name]\n        if self._sparse_core_embedding_config.max_unique_ids_per_table is None or table_name not in self._sparse_core_embedding_config.max_unique_ids_per_table:\n            logging.warning('Table %s is not found in max_unique_ids_per_table provided by SparseCoreEmbeddingConfig. Using default value 256.', table_name)\n            self._table_to_max_unique_ids_per_sparse_core[table_name] = self.DEFAULT_MAX_UNIQUE_IDS_PER_TABLE\n        else:\n            self._table_to_max_unique_ids_per_sparse_core[table_name] = self._sparse_core_embedding_config.max_unique_ids_per_table[table_name]",
    "docstring": "Update the sparse core buffer size after table stacking.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3.py",
    "ast_data": "FunctionDef name:_update_sparse_core_buffer_size_after_table_stacking arg:self arguments arg For If BoolOp Compare Compare Call Assign Assign If BoolOp Compare Compare Call Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "is_debug",
    "source_code": "def is_debug(self) -> bool:\n    return self.build_type_string == 'Debug'",
    "docstring": "Checks Debug build.",
    "type": "method",
    "file_path": "pytorch\\tools\\setup_helpers\\env.py",
    "ast_data": "FunctionDef name:is_debug arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "value_type",
    "source_code": "@abc.abstractproperty\ndef value_type(self):\n    raise NotImplementedError('%s.value_type' % type(self).__name__)",
    "docstring": "The Python type for values that are compatible with this TypeSpec. In particular, all values that are compatible with this TypeSpec must be an instance of this type.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py",
    "ast_data": "FunctionDef name:value_type arg:self arguments arg Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "_immutable_dict",
    "source_code": "def _immutable_dict(items):\n    from types import MappingProxyType\n    return MappingProxyType(dict(items))",
    "docstring": "Creates a mapping where items cannot be added, deleted, or updated. NOTE: The immutability is shallow (like tuple is an immutable collection).",
    "type": "function",
    "file_path": "pytorch\\torch\\export\\graph_signature.py",
    "ast_data": "FunctionDef name:_immutable_dict arg:items arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "verify_onnx_program",
    "source_code": "def verify_onnx_program(onnx_program: _onnx_program.ONNXProgram, args: tuple[Any, ...] | None=None, kwargs: dict[str, Any] | None=None, compare_intermediates: bool=False) -> list[VerificationInfo]:\n    exported_program = onnx_program.exported_program\n    if exported_program is None:\n        raise ValueError('The ONNX program does not contain an exported_program. Please provide an exported_program to verify the ONNX program.')\n    if args is None and kwargs is None:\n        if exported_program.example_inputs is None:\n            raise ValueError('No example inputs provided and the exported_program does not contain example inputs. Please provide arguments to verify the ONNX program.')\n        args, kwargs = exported_program.example_inputs\n    if args is None:\n        args = ()\n    if kwargs is None:\n        kwargs = {}\n    flat_args, _ = exported_program._get_flat_args_with_check(args, kwargs)\n    if not compare_intermediates:\n        torch_outputs, _ = _pytree.tree_flatten(exported_program.module()(*args, **kwargs))\n        onnx_outputs = onnx_program(*flat_args)\n        results = []\n        for torch_output, onnx_output, output_val in zip(torch_outputs, onnx_outputs, onnx_program.model.graph.outputs):\n            results.append(VerificationInfo.from_tensors(name=str(output_val.name), expected=torch_output, actual=onnx_output))\n        return results\n    interpreter = _VerificationInterpreter(onnx_program)\n    interpreter.run(*flat_args)\n    return interpreter.verification_infos",
    "docstring": "Verify the ONNX model by comparing the values with the expected values from ExportedProgram. Args: onnx_program: The ONNX program to verify. args: The input arguments for the model. kwargs: The keyword arguments for the model. compare_intermediates: Whether to verify intermediate values. This is going to take longer time, so it is disabled by default. Returns: VerificationInfo objects containing the verification information for each value.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_verification.py",
    "ast_data": "FunctionDef name:verify_onnx_program arg:onnx_program arg:args arg:kwargs arg:compare_intermediates arguments arg arg arg arg Assign If Compare Raise Call If BoolOp Compare Compare If Compare Raise Call Assign If Compare Assign If Compare Assign Assign Call If Assign Call Call Call Assign Call Assign For Call Call Call Call Return return:yes Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "parse_results",
    "source_code": "def parse_results(lines):\n    idx = 0\n    batch, onednn, model = (None, None, None)\n    state = State.FIND_CONFIG_OR_MODEL\n    while idx < len(lines):\n        if state is State.FIND_CONFIG_OR_MODEL:\n            config = re.match(\"\\\\+ echo 'BATCH=(?P<batch>[\\\\d]+), ONEDNN=(?P<onednn>[\\\\d]+)\", lines[idx])\n            if config:\n                batch = int(config.group('batch'))\n                onednn = int(config.group('onednn'))\n                batch_sizes.add(batch)\n            else:\n                model_re = re.search('tf-graphs\\\\/(?P<model>[\\\\w\\\\d_-]+).pb', lines[idx])\n                assert model_re\n                model = model_re.group('model')\n                models.add(model)\n                state = State.FIND_RUNNING_TIME\n        elif state is State.FIND_RUNNING_TIME:\n            match = re.search('no stats: (?P<avg>[\\\\d.]+)', lines[idx])\n            state = State.FIND_CONFIG_OR_MODEL\n            if match:\n                avg = float(match.group('avg'))\n                key = (model, batch, onednn)\n                assert None not in key\n                db[key] = avg\n            else:\n                continue\n        else:\n            raise RuntimeError('Reached the unreachable code.')\n        idx = idx + 1",
    "docstring": "Parses benchmark results from run_onednn_benchmarks.sh. Stores results in a global dict. Args: lines: Array of strings corresponding to each line of the output from run_onednn_benchmarks.sh Raises: RuntimeError: If the program reaches an unknown state.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\benchmark\\parse_onednn_benchmarks.py",
    "ast_data": "FunctionDef name:parse_results arg:lines arguments arg Assign Assign Assign While Compare Call If Compare Assign Call If Assign Call Call Assign Call Call Call Assign Call Assign Call Call Assign If Compare Assign Call Assign If Assign Call Call Assign Compare Assign Raise Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_address",
    "source_code": "@property\ndef _address(self) -> str:\n    return 'localhost:{0}'.format(self._server.bound_port())",
    "docstring": "Returns the address of the server. The returned string will be in the form address:port, e.g. \"localhost:1000\".",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\service\\server_lib.py",
    "ast_data": "FunctionDef name:_address arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_offset",
    "source_code": "def _offset(self, ox, oy):\n    for c in self._cells.values():\n        x, y = (c.get_x(), c.get_y())\n        c.set_x(x + ox)\n        c.set_y(y + oy)",
    "docstring": "Move all the artists by ox, oy (axes coords).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\table.py",
    "ast_data": "FunctionDef name:_offset arg:self arg:ox arg:oy arguments arg arg arg For Call Assign Call Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "check_nonsquare_error",
    "source_code": "@ignore_warnings\ndef check_nonsquare_error(name, estimator_orig):\n    X, y = make_blobs(n_samples=20, n_features=10)\n    estimator = clone(estimator_orig)\n    with raises(ValueError, err_msg=f'The pairwise estimator {name} does not raise an error on non-square data'):\n        estimator.fit(X, y)",
    "docstring": "Test that error is thrown when non-square data provided.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\estimator_checks.py",
    "ast_data": "FunctionDef name:check_nonsquare_error arg:name arg:estimator_orig arguments arg arg Assign Call Assign Call With Call Call"
  },
  {
    "library": "pandas",
    "name": "_set_default_format",
    "source_code": "def _set_default_format(self, vmin, vmax):\n    info = self.finder(vmin, vmax, self.freq)\n    if self.isminor:\n        format = np.compress(info['min'] & np.logical_not(info['maj']), info)\n    else:\n        format = np.compress(info['maj'], info)\n    self.formatdict = {x: f for x, _, _, f in format}\n    return self.formatdict",
    "docstring": "Returns the default ticks spacing.",
    "type": "method",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\converter.py",
    "ast_data": "FunctionDef name:_set_default_format arg:self arg:vmin arg:vmax arguments arg arg arg Assign Call If Assign Call Call Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_set_pg_timeout",
    "source_code": "def _set_pg_timeout(timeout: timedelta, group: Optional[ProcessGroup]=None) -> None:\n    if group is None:\n        group = _get_default_group()\n    if _rank_not_in_group(group):\n        raise ValueError('Invalid process group specified')\n    assert isinstance(group, ProcessGroup)\n    devices = group._device_types\n    backends = set()\n    if torch.device('cpu') in devices and is_gloo_available():\n        backend = group._get_backend(torch.device('cpu'))\n        if isinstance(backend, ProcessGroupGloo):\n            backends.add(backend)\n    if torch.device('cuda') in devices:\n        backend = group._get_backend(torch.device('cuda'))\n        if is_nccl_available() and isinstance(backend, ProcessGroupNCCL):\n            backends.add(backend)\n        elif is_gloo_available() and isinstance(backend, ProcessGroupGloo):\n            backends.add(backend)\n    if len(backends) == 0:\n        warnings.warn('Set timeout is now only supported for either nccl or gloo.')\n    for backend in backends:\n        backend._set_default_timeout(timeout)",
    "docstring": "Set the timeout for the given process group when users want to use a different timeout instead of default values. Args: timeout (timedelta): Timeout for operations executed against the process group which users want to set. Default value is 10 minutes for NCCL and 30 minutes for other backends. This is the duration after which collectives will be aborted asynchronously and the process will crash. This is done since CUDA execution is async and it is no longer safe to continue executing user code since failed async NCCL operations might result in subsequent CUDA operations running on corrupted data. When TORCH_NCCL_BLOCKING_WAIT is set, the process will block and wait for this timeout. group (ProcessGroup, optional): The process group to work on. The default is the general main process group. If another specific group is specified, the calling process must be part of :attr:. Returns: None",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:_set_pg_timeout arg:timeout arg:group arguments arg arg If Compare Assign Call If Call Raise Call Call Assign Assign Call If BoolOp Compare Call Call Assign Call Call If Call Call If Compare Call Assign Call Call If BoolOp Call Call Call If BoolOp Call Call Call If Compare Call Call For Call"
  },
  {
    "library": "pytorch",
    "name": "GLU",
    "source_code": "class GLU(Module):\n    __constants__ = ['dim']\n    dim: int\n\n    def __init__(self, dim: int=-1) -> None:\n        super().__init__()\n        self.dim = dim\n\n    def forward(self, input: Tensor) -> Tensor:\n        return F.glu(input, self.dim)\n\n    def extra_repr(self) -> str:\n        return f'dim={self.dim}'",
    "docstring": "Applies the gated linear unit function. :math: where :math: is the first half of the input matrices and :math: is the second half. Args: dim (int): the dimension on which to split the input. Default: -1 Shape: - Input: :math: where means, any number of additional dimensions - Output: :math: where :math: .. image:: ../scripts/activation_images/GLU.png Examples:: >>> m = nn.GLU() >>> input = torch.randn(4, 2) >>> output = m(input)",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\activation.py",
    "ast_data": "ClassDef name:GLU Assign FunctionDef name:__init__ arg:self arg:dim arguments arg arg Call Call Assign FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:extra_repr arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "__call__",
    "source_code": "def __call__(self, t):\n    t = np.asarray(t)\n    if t.ndim > 1:\n        raise ValueError('`t` must be a float or a 1-D array.')\n    return self._call_impl(t)",
    "docstring": "Evaluate the interpolant. Parameters ---------- t : float or array_like with shape (n_points,) Points to evaluate the solution at. Returns ------- y : ndarray, shape (n,) or (n, n_points) Computed values. Shape depends on whether was a scalar or a 1-D array.",
    "type": "method",
    "file_path": "scipy\\scipy\\integrate\\_ivp\\base.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:t arguments arg arg Assign Call If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "SASReader",
    "source_code": "class SASReader(Iterator['DataFrame'], ABC):\n\n    @abstractmethod\n    def read(self, nrows: int | None=None) -> DataFrame:\n        ...\n\n    @abstractmethod\n    def close(self) -> None:\n        ...\n\n    def __enter__(self) -> Self:\n        return self\n\n    def __exit__(self, exc_type: type[BaseException] | None, exc_value: BaseException | None, traceback: TracebackType | None) -> None:\n        self.close()",
    "docstring": "Abstract class for XportReader and SAS7BDATReader.",
    "type": "class",
    "file_path": "pandas\\pandas\\io\\sas\\sasreader.py",
    "ast_data": "ClassDef name:SASReader FunctionDef name:read arg:self arg:nrows arguments arg arg FunctionDef name:close arg:self arguments arg FunctionDef name:__enter__ arg:self arguments arg Return return:yes FunctionDef name:__exit__ arg:self arg:exc_type arg:exc_value arg:traceback arguments arg arg arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "_unique_symbols",
    "source_code": "@classmethod\ndef _unique_symbols(cls, args, initial_set: Optional[set[sympy.core.symbol.Symbol]]=None) -> Optional[set[sympy.core.symbol.Symbol]]:\n    seen_symbols = set() if initial_set is None else initial_set\n    for arg in args:\n        for element in arg.atoms():\n            if not isinstance(element, sympy.core.symbol.Symbol):\n                return None\n            elif element in seen_symbols:\n                return None\n            else:\n                seen_symbols.add(element)\n    return seen_symbols",
    "docstring": "Return seen_symbols if all atoms in all args are all unique symbols, else returns None. initial_set can be used to represent initial value for seen_symbols",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\_sympy\\functions.py",
    "ast_data": "FunctionDef name:_unique_symbols arg:cls arg:args arg:initial_set arguments arg arg arg Assign Compare Call For For Call If Call Return return:no If Compare Return return:no Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_has_fully_defined_shape",
    "source_code": "def _has_fully_defined_shape(tensor):\n    return isinstance(tensor, ops.EagerTensor) or tensor.shape.is_fully_defined()",
    "docstring": "Returns true if tensor has a fully defined shape.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:_has_fully_defined_shape arg:tensor arguments arg Return return:yes BoolOp Call Call"
  },
  {
    "library": "tensorflow",
    "name": "output_types",
    "source_code": "@property\n@deprecation.deprecated(None, 'Use `tf.compat.v1.data.get_output_types(dataset)`.')\ndef output_types(self):\n    return nest.map_structure(lambda component_spec: component_spec._to_legacy_output_types(), self.element_spec)",
    "docstring": "Returns the type of each component of an element of this dataset. Returns: A (nested) structure of objects corresponding to each component of an element of this dataset.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:output_types arg:self arguments arg Return return:yes Call arguments arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "is_sparse",
    "source_code": "def is_sparse(A):\n    if isinstance(A, torch.Tensor):\n        return A.layout == torch.sparse_coo\n    error_str = 'expected Tensor'\n    if not torch.jit.is_scripting():\n        error_str += f' but got {type(A)}'\n    raise TypeError(error_str)",
    "docstring": "Check if tensor A is a sparse tensor",
    "type": "function",
    "file_path": "pytorch\\torch\\_linalg_utils.py",
    "ast_data": "FunctionDef name:is_sparse arg:A arguments arg If Call Return return:yes Compare Assign If Call Call Raise Call"
  },
  {
    "library": "django",
    "name": "get",
    "source_code": "def get(self, key, default=None, version=None):\n    raise NotImplementedError('subclasses of BaseCache must provide a get() method')",
    "docstring": "Fetch a given key from the cache. If the key does not exist, return default, which itself defaults to None.",
    "type": "method",
    "file_path": "django\\django\\core\\cache\\backends\\base.py",
    "ast_data": "FunctionDef name:get arg:self arg:key arg:default arg:version arguments arg arg arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "GcpGpuTerminationConfig",
    "source_code": "class GcpGpuTerminationConfig(TerminationConfig):\n\n    def __init__(self, termination_watcher_fn=None, exit_fn=None, grace_period=None, save_fn=None):\n        self.termination_watcher_fn = termination_watcher_fn or failure_handling_util.termination_watcher_function_gce\n        self.exit_fn = exit_fn or failure_handling_util.gce_exit_fn\n        self.grace_period = grace_period if grace_period or grace_period == 0 else failure_handling_util.GRACE_PERIOD_GCE\n        self.save_fn = save_fn",
    "docstring": "Configurations for GCP GPU VM.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\failure_handling\\failure_handling.py",
    "ast_data": "ClassDef name:GcpGpuTerminationConfig FunctionDef name:__init__ arg:self arg:termination_watcher_fn arg:exit_fn arg:grace_period arg:save_fn arguments arg arg arg arg arg Assign BoolOp Assign BoolOp Assign BoolOp Compare Assign"
  },
  {
    "library": "matplotlib",
    "name": "MaxHeight",
    "source_code": "class MaxHeight(MaxExtent):\n\n    def __init__(self, artist_list):\n        super().__init__(artist_list, 'height')",
    "docstring": "Size whose absolute part is the largest height of the given *artist_list*.",
    "type": "class",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_size.py",
    "ast_data": "ClassDef name:MaxHeight FunctionDef name:__init__ arg:self arg:artist_list arguments arg arg Call Call"
  },
  {
    "library": "scrapy",
    "name": "media_to_download",
    "source_code": "@abstractmethod\ndef media_to_download(self, request: Request, info: SpiderInfo, *, item: Any=None) -> Deferred[FileInfo | None]:\n    raise NotImplementedError",
    "docstring": "Check request before starting download",
    "type": "method",
    "file_path": "scrapy\\scrapy\\pipelines\\media.py",
    "ast_data": "FunctionDef name:media_to_download arg:self arg:request arg:info arguments arg arg arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "compute_gradients",
    "source_code": "def compute_gradients(self, *args, **kwargs):\n    return self._opt.compute_gradients(*args, **kwargs)",
    "docstring": "Compute gradients of \"loss\" for the variables in \"var_list\". This simply wraps the compute_gradients() from the real optimizer. The gradients will be aggregated in the apply_gradients() so that user can modify the gradients like clipping with per replica global norm if needed. The global norm with aggregated gradients can be bad as one replica's huge gradients can hurt the gradients from other replicas. Args: *args: Arguments for compute_gradients(). **kwargs: Keyword arguments for compute_gradients(). Returns: A list of (gradient, variable) pairs.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\sync_replicas_optimizer.py",
    "ast_data": "FunctionDef name:compute_gradients arg:self arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "inner_sharded",
    "source_code": "@classmethod\ndef inner_sharded(cls, mesh: Mesh, inner_dim: str, rank: int) -> 'Layout':\n    return cls.batch_sharded(mesh, inner_dim, rank, axis=rank - 1)",
    "docstring": "Returns a layout sharded on inner dimension.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\layout.py",
    "ast_data": "FunctionDef name:inner_sharded arg:cls arg:mesh arg:inner_dim arg:rank arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_scorer_names",
    "source_code": "def get_scorer_names():\n    return sorted(_SCORERS.keys())",
    "docstring": "Get the names of all available scorers. These names can be passed to :func: to retrieve the scorer object. Returns ------- list of str Names of all available scorers. Examples -------- >>> from sklearn.metrics import get_scorer_names >>> all_scorers = get_scorer_names() >>> type(all_scorers) >>> all_scorers[:3] ['accuracy', 'adjusted_mutual_info_score', 'adjusted_rand_score'] >>> \"roc_auc\" in all_scorers True",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\metrics\\_scorer.py",
    "ast_data": "FunctionDef name:get_scorer_names arguments Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "kl_divergence",
    "source_code": "@deprecation.deprecated('2019-01-01', 'The TensorFlow Distributions library has moved to TensorFlow Probability (https://github.com/tensorflow/probability). You should update all references to use `tfp.distributions` instead of `tf.distributions`.', warn_once=True)\n@tf_export(v1=['distributions.kl_divergence'])\ndef kl_divergence(distribution_a, distribution_b, allow_nan_stats=True, name=None):\n    kl_fn = _registered_kl(type(distribution_a), type(distribution_b))\n    if kl_fn is None:\n        raise NotImplementedError('No KL(distribution_a || distribution_b) registered for distribution_a type %s and distribution_b type %s' % (type(distribution_a).__name__, type(distribution_b).__name__))\n    with ops.name_scope('KullbackLeibler'):\n        kl_t = kl_fn(distribution_a, distribution_b, name=name)\n        if allow_nan_stats:\n            return kl_t\n        kl_t = array_ops.identity(kl_t, name='kl')\n        with ops.control_dependencies([control_flow_assert.Assert(math_ops.logical_not(math_ops.reduce_any(math_ops.is_nan(kl_t))), ['KL calculation between %s and %s returned NaN values (and was called with allow_nan_stats=False). Values:' % (distribution_a.name, distribution_b.name), kl_t])]):\n            return array_ops.identity(kl_t, name='checked_kl')",
    "docstring": "Get the KL-divergence KL(distribution_a || distribution_b). If there is no KL method registered specifically for and , then the class hierarchies of these types are searched. If one KL method is registered between any pairs of classes in these two parent hierarchies, it is used. If more than one such registered method exists, the method whose registered classes have the shortest sum MRO paths to the input types is used. If more than one such shortest path exists, the first method identified in the search is used (favoring a shorter MRO distance to ). Args: distribution_a: The first distribution. distribution_b: The second distribution. allow_nan_stats: Python , default . When , statistics (e.g., mean, mode, variance) use the value \"\" to indicate the result is undefined. When , an exception is raised if one or more of the statistic's batch members are undefined. name: Python name prefixed to Ops created by this class. Returns: A Tensor with the batchwise KL-divergence between and . Raises: NotImplementedError: If no KL method is defined for distribution types of and .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\kullback_leibler.py",
    "ast_data": "FunctionDef name:kl_divergence arg:distribution_a arg:distribution_b arg:allow_nan_stats arg:name arguments arg arg arg arg Assign Call Call Call If Compare Raise Call Call Call With Call Assign Call If Return return:yes Assign Call With Call Call Call Call Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "kwargs",
    "source_code": "@property\ndef kwargs(self) -> dict[str, Argument]:\n    return self._kwargs",
    "docstring": "The dict of keyword arguments to this `Node` docstring for more information. Assignment to this property is allowed. All accounting of uses and users is updated automatically on assignment.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\node.py",
    "ast_data": "FunctionDef name:kwargs arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "do_encode",
    "source_code": "def do_encode(self, tensor_value, encode_fn):\n    del encode_fn\n    encoded_tensor = struct_pb2.StructuredValue()\n    if isinstance(tensor_value, ops.EagerTensor):\n        encoded_tensor.tensor_value.CopyFrom(tensor_util.make_tensor_proto(tensor_value.numpy()))\n    elif tensor_value.op.type == 'Const':\n        encoded_tensor.tensor_value.CopyFrom(tensor_value.op.get_attr('value'))\n    else:\n        raise nested_structure_coder.NotEncodableError(f'No encoder for object {str(tensor_value)} of type {type(tensor_value)}.')\n    return encoded_tensor",
    "docstring": "Returns an encoded for the given .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\constant_op.py",
    "ast_data": "FunctionDef name:do_encode arg:self arg:tensor_value arg:encode_fn arguments arg arg arg Assign Call If Call Call Call Call If Compare Call Call Raise Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_subclass_with_normal",
    "source_code": "def _subclass_with_normal(effect_class):\n\n    class withEffect(effect_class):\n\n        def draw_path(self, renderer, gc, tpath, affine, rgbFace):\n            super().draw_path(renderer, gc, tpath, affine, rgbFace)\n            renderer.draw_path(gc, tpath, affine, rgbFace)\n    withEffect.__name__ = f'with{effect_class.__name__}'\n    withEffect.__qualname__ = f'with{effect_class.__name__}'\n    withEffect.__doc__ = f'\\n    A shortcut PathEffect for applying `.{effect_class.__name__}` and then\\n    drawing the original Artist.\\n\\n    With this class you can use ::\\n\\n        artist.set_path_effects([patheffects.with{effect_class.__name__}()])\\n\\n    as a shortcut for ::\\n\\n        artist.set_path_effects([patheffects.{effect_class.__name__}(),\\n                                 patheffects.Normal()])\\n    '\n    withEffect.draw_path.__doc__ = effect_class.draw_path.__doc__\n    return withEffect",
    "docstring": "Create a PathEffect class combining *effect_class* and a normal draw.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\patheffects.py",
    "ast_data": "FunctionDef name:_subclass_with_normal arg:effect_class arguments arg ClassDef name:withEffect FunctionDef name:draw_path arg:self arg:renderer arg:gc arg:tpath arg:affine arg:rgbFace arguments arg arg arg arg arg arg Call Call Call Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_write_trace_points",
    "source_code": "def _write_trace_points(self, tensor_trace_points):\n    self._write_report('%s %s\\n' % (_MARKER_SECTION_BEGIN, _SECTION_NAME_TENSOR_TRACER_CHECKPOINT))\n    for tensor, checkpoint_name in tensor_trace_points:\n        self._write_report('%s %s\\n' % (tensor.name, checkpoint_name))\n    self._write_report('%s %s\\n' % (_MARKER_SECTION_END, _SECTION_NAME_TENSOR_TRACER_CHECKPOINT))",
    "docstring": "Writes the list of checkpoints.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer_report.py",
    "ast_data": "FunctionDef name:_write_trace_points arg:self arg:tensor_trace_points arguments arg arg Call For Call Call"
  },
  {
    "library": "tensorflow",
    "name": "initial_value",
    "source_code": "def initial_value(self, device):\n    return self.get_var_on_device(device).initial_value",
    "docstring": "Returns the Tensor used as the initial value for the variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\packed_distributed_variable.py",
    "ast_data": "FunctionDef name:initial_value arg:self arg:device arguments arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_check_vocabulary",
    "source_code": "def _check_vocabulary(self):\n    if not hasattr(self, 'vocabulary_'):\n        self._validate_vocabulary()\n        if not self.fixed_vocabulary_:\n            raise NotFittedError('Vocabulary not fitted or provided')\n    if len(self.vocabulary_) == 0:\n        raise ValueError('Vocabulary is empty')",
    "docstring": "Check if vocabulary is empty or missing (not fitted)",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_extraction\\text.py",
    "ast_data": "FunctionDef name:_check_vocabulary arg:self arguments arg If Call Call If Raise Call If Compare Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "box_area",
    "source_code": "def box_area(boxes):\n    return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])",
    "docstring": "Computes the area of a set of bounding boxes, which are specified by its (x1, y1, x2, y2) coordinates. Args: boxes (Tensor[N, 4]): boxes for which the area will be computed. They are expected to be in (x1, y1, x2, y2) format Returns: area (Tensor[N]): area for each box",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\functional_autograd_benchmark\\torchvision_models.py",
    "ast_data": "FunctionDef name:box_area arg:boxes arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_maybe_align_series_as_frame",
    "source_code": "def _maybe_align_series_as_frame(self, series: Series, axis: AxisInt):\n    rvalues = series._values\n    if not isinstance(rvalues, np.ndarray):\n        if rvalues.dtype in ('datetime64[ns]', 'timedelta64[ns]'):\n            rvalues = np.asarray(rvalues)\n        else:\n            return series\n    if axis == 0:\n        rvalues = rvalues.reshape(-1, 1)\n    else:\n        rvalues = rvalues.reshape(1, -1)\n    rvalues = np.broadcast_to(rvalues, self.shape)\n    return self._constructor(rvalues, index=self.index, columns=self.columns, dtype=rvalues.dtype).__finalize__(series)",
    "docstring": "If the Series operand is not EA-dtype, we can broadcast to 2D and operate blockwise.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:_maybe_align_series_as_frame arg:self arg:series arg:axis arguments arg arg arg Assign If Call If Compare Assign Call Return return:yes If Compare Assign Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_is_natively_supported",
    "source_code": "def _is_natively_supported(x):\n    if np.iterable(x):\n        for thisx in x:\n            if thisx is ma.masked:\n                continue\n            return isinstance(thisx, Number) and (not isinstance(thisx, Decimal))\n    else:\n        return isinstance(x, Number) and (not isinstance(x, Decimal))",
    "docstring": "Return whether *x* is of a type that Matplotlib natively supports or an array of objects of such types.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\units.py",
    "ast_data": "FunctionDef name:_is_natively_supported arg:x arguments arg If Call For If Compare Return return:yes BoolOp Call Call Return return:yes BoolOp Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_flag_value",
    "source_code": "def get_flag_value(self, wanted_flag_name):\n    tensor_tracer_flags = self._env.get(FLAGS_ENV_VAR)\n    if not tensor_tracer_flags:\n        return (False, None)\n    pos = 0\n    while True:\n        match, has_value = TTParameters.match_next_flag(tensor_tracer_flags, pos)\n        if not match:\n            return (False, None)\n        flag_name = match.group(1)\n        if has_value:\n            flag_value = match.group(2)\n        else:\n            flag_value = None\n        if flag_name == wanted_flag_name:\n            return (True, flag_value)\n        pos = match.end()\n    raise RuntimeError('Invalid tensor tracer flag. Could not recognize %s.' % flag_name)",
    "docstring": "Returns the value of a TensorTracer flags. Args: wanted_flag_name: the name of the flag we are looking for. Returns: A pair where the first element indicates if the flag is found and the second element is the value of the flag. Raises: RuntimeError: If supposedly deadcode is reached.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer_flags.py",
    "ast_data": "FunctionDef name:get_flag_value arg:self arg:wanted_flag_name arguments arg arg Assign Call If Return return:yes Assign While Assign Call If Return return:yes Assign Call If Assign Call Assign If Compare Return return:yes Assign Call Raise Call"
  },
  {
    "library": "django",
    "name": "reset_sequences",
    "source_code": "def reset_sequences(self, connection, models):\n    sequence_sql = connection.ops.sequence_reset_sql(no_style(), models)\n    if sequence_sql:\n        if self.verbosity >= 2:\n            self.stdout.write('Resetting sequences')\n        with connection.cursor() as cursor:\n            for line in sequence_sql:\n                cursor.execute(line)",
    "docstring": "Reset database sequences for the given connection and models.",
    "type": "method",
    "file_path": "django\\django\\core\\management\\commands\\loaddata.py",
    "ast_data": "FunctionDef name:reset_sequences arg:self arg:connection arg:models arguments arg arg arg Assign Call Call If If Compare Call With Call For Call"
  },
  {
    "library": "django",
    "name": "centroid",
    "source_code": "@property\ndef centroid(self):\n    p = OGRGeometry(OGRGeomType('Point'))\n    capi.get_centroid(self.ptr, p.ptr)\n    return p",
    "docstring": "Return the centroid (a Point) of this Polygon.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:centroid arg:self arguments arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_subtype_with_str",
    "source_code": "@property\ndef _subtype_with_str(self):\n    if isinstance(self.fill_value, str):\n        return type(self.fill_value)\n    return self.subtype",
    "docstring": "Whether the SparseDtype's subtype should be considered ``, we need to be more specific, we need the actual underlying type. Returns ------- >>> SparseDtype(int, 1)._subtype_with_str dtype('int64') >>> SparseDtype(object, 1)._subtype_with_str dtype('O') >>> dtype = SparseDtype(str, \"\") >>> dtype.subtype dtype('O') >>> dtype._subtype_with_str",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py",
    "ast_data": "FunctionDef name:_subtype_with_str arg:self arguments arg If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "contour",
    "source_code": "@_preprocess_data()\ndef contour(self, X, Y, Z, *args, extend3d=False, stride=5, zdir='z', offset=None, axlim_clip=False, **kwargs):\n    had_data = self.has_data()\n    jX, jY, jZ = art3d.rotate_axes(X, Y, Z, zdir)\n    cset = super().contour(jX, jY, jZ, *args, **kwargs)\n    self.add_contour_set(cset, extend3d, stride, zdir, offset, axlim_clip)\n    self.auto_scale_xyz(X, Y, Z, had_data)\n    return cset",
    "docstring": "Create a 3D contour plot. Parameters ---------- X, Y, Z : array-like, Input data. See for supported data shapes. extend3d : bool, default: False Whether to extend contour in 3D. stride : int, default: 5 Step size for extending contour. zdir : {'x', 'y', 'z'}, default: 'z' The direction to use. offset : float, optional If specified, plot a projection of the contour lines at this position in a plane normal to *zdir*. axlim_clip : bool, default: False Whether to hide lines with a vertex outside the axes view limits. .. versionadded:: 3.10 data : indexable object, optional DATA_PARAMETER_PLACEHOLDER *args, **kwargs Other arguments are forwarded to . Returns ------- matplotlib.contour.QuadContourSet",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:contour arg:self arg:X arg:Y arg:Z arguments arg arg arg arg arg arg arg arg arg arg arg Assign Call Assign Call Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_fill_zeros",
    "source_code": "def _fill_zeros(result: np.ndarray, x, y) -> np.ndarray:\n    if result.dtype.kind == 'f':\n        return result\n    is_variable_type = hasattr(y, 'dtype')\n    is_scalar_type = not isinstance(y, np.ndarray)\n    if not is_variable_type and (not is_scalar_type):\n        return result\n    if is_scalar_type:\n        y = np.array(y)\n    if y.dtype.kind in 'iu':\n        ymask = y == 0\n        if ymask.any():\n            mask = ymask & ~np.isnan(result)\n            result = result.astype('float64', copy=False)\n            np.putmask(result, mask, np.nan)\n    return result",
    "docstring": "If this is a reversed op, then flip x,y If we have an integer value (or array in y) and we have 0's, fill them with np.nan, return the result. Mask the nan's from x.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\ops\\missing.py",
    "ast_data": "FunctionDef name:_fill_zeros arg:result arg:x arg:y arguments arg arg arg If Compare Return return:yes Assign Call Assign Call If BoolOp Return return:yes If Assign Call If Compare Assign Compare If Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "convert_conv3d_weight_memory_format",
    "source_code": "def convert_conv3d_weight_memory_format(module: _M, memory_format: torch.memory_format) -> _M:\n    if isinstance(module, (torch.nn.Conv3d, torch.nn.ConvTranspose3d)):\n        weight_data = module.weight.detach().clone(memory_format=memory_format)\n        module.weight.data = weight_data.resize_(weight_data.size(), memory_format=memory_format)\n    for child in module.children():\n        convert_conv3d_weight_memory_format(child, memory_format)\n    return module",
    "docstring": "Convert `` Example: >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA) >>> # xdoctest: +REQUIRES(env:CUBLAS_WORKSPACE_CONFIG) >>> input = torch.randint(1, 10, (2, 8, 4, 4, 4), dtype=torch.float16, device=\"cuda\") >>> model = nn.Sequential( >>> nn.Conv3d(8, 4, 3)).cuda().half() >>> # This is identical to: >>> # nn.utils.convert_conv3d_weight_memory_format(model, torch.channels_last_3d) >>> model = nn.utils.convert_conv3d_weight_memory_format(model, torch.channels_last_3d) >>> out = model(input)",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\utils\\memory_format.py",
    "ast_data": "FunctionDef name:convert_conv3d_weight_memory_format arg:module arg:memory_format arguments arg arg If Call Assign Call Call Assign Call Call For Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "call_op",
    "source_code": "def call_op(op_type: str, *args: ir.Value, _num_outputs: int=1, _domain: str='', **kwargs: int | float | str | bool | ir.Graph | ir.TensorProtocol) -> Sequence[ir.Value]:\n    from onnxscript.ir import convenience as ir_convenience\n    assert _core.current_tracer is not None\n    tracer = _core.current_tracer\n    inputs = list(args)\n    for input in reversed(inputs):\n        if input is not None:\n            break\n        inputs.pop()\n    attributes = [attr for attr in ir_convenience.convert_attributes(kwargs) if attr.value is not None]\n    tracer.nodes.append((node := ir.Node(_domain, op_type, inputs=inputs, attributes=attributes, num_outputs=_num_outputs, version=tracer.opset.version)))\n    return node.outputs",
    "docstring": "Call an operator with the given arguments and keyword arguments. Arguments are always inputs, while keyword arguments are attributes.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_torchlib\\ops\\hop.py",
    "ast_data": "FunctionDef name:call_op arg:op_type arguments arg arg arg arg arg Compare Assign Assign Call For Call If Compare Call Assign Call Compare Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "AHOperation",
    "source_code": "class AHOperation:\n\n    def __init__(self, name: str, func: Callable[[Any], Value], is_categorical: bool=False) -> None:\n        self.name = name\n        self.func = func\n        self.is_categorical = is_categorical\n\n    def apply_operation(self, data: Any) -> None:\n        data[self.name] = self.func(data)",
    "docstring": "AHOperation can be used to augment the data collected by AutoHeuristic. One might for example store features like m, k, n, but also want to use features like m*n, or k*n, to learn a heuristic. Instead of storing features that can be created from the collected data, one can use AHOperation to create new features from the collected data.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\autoheuristic\\autoheuristic_utils.py",
    "ast_data": "ClassDef name:AHOperation FunctionDef name:__init__ arg:self arg:name arg:func arg:is_categorical arguments arg arg arg arg Assign Assign Assign FunctionDef name:apply_operation arg:self arg:data arguments arg arg Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "from_value_rowids",
    "source_code": "@classmethod\n@dispatch.add_dispatch_support\ndef from_value_rowids(cls, values, value_rowids, nrows=None, name=None, validate=True):\n    if not isinstance(validate, bool):\n        raise TypeError(f'Argument `validate` must have type bool. Received {validate}.')\n    with ops.name_scope(name, 'RaggedFromValueRowIds', [values, value_rowids, nrows]):\n        row_partition = RowPartition.from_value_rowids(value_rowids=value_rowids, nrows=nrows, validate=validate, dtype_hint=_get_optional_partition_dtype(values))\n        return cls._from_row_partition(values, row_partition, validate=validate)",
    "docstring": "Creates a with rows partitioned by . The returned corresponds with the python list defined by: Args: values: A potentially ragged tensor with shape . value_rowids: A 1-D integer tensor with shape , which corresponds one-to-one with , and specifies each value's row index. Must be nonnegative, and must be sorted in ascending order. nrows: An integer scalar specifying the number of rows. This should be specified if the may containing empty training rows. Must be greater than (or zero if is empty). Defaults to (or zero if is empty). name: A name prefix for the RaggedTensor (optional). validate: If true, then use assertions to check that the arguments form a valid . Note: these assertions incur a runtime cost, since they must be checked for each tensor value. Returns: A . . . Raises: ValueError: If is incompatible with . #### Example: >>> print(tf.RaggedTensor.from_value_rowids( ... values=[3, 1, 4, 1, 5, 9, 2, 6], ... value_rowids=[0, 0, 0, 0, 2, 2, 2, 3], ... nrows=5))",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:from_value_rowids arg:cls arg:values arg:value_rowids arg:nrows arg:name arg:validate arguments arg arg arg arg arg arg If Call Raise Call With Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "cryptography",
    "name": "__copy__",
    "source_code": "@abc.abstractmethod\ndef __copy__(self) -> DHPublicKey:\n    pass",
    "docstring": "Returns a copy.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\dh.py",
    "ast_data": "FunctionDef name:__copy__ arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "_reduce_fake_tensor",
    "source_code": "def _reduce_fake_tensor(self, t: Tensor) -> tuple[Callable[[T], T], tuple[TensorMetadata]]:\n    metadata = extract_tensor_metadata_for_cache_key(t)\n    return (_ident, (metadata,))",
    "docstring": "Custom reducer to pickle FakeTensors.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codecache.py",
    "ast_data": "FunctionDef name:_reduce_fake_tensor arg:self arg:t arguments arg arg Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "update_session_auth_hash",
    "source_code": "def update_session_auth_hash(request, user):\n    request.session.cycle_key()\n    if hasattr(user, 'get_session_auth_hash') and request.user == user:\n        request.session[HASH_SESSION_KEY] = user.get_session_auth_hash()",
    "docstring": "Updating a user's password logs out all sessions for the user. Take the current request and the updated user object from which the new session hash will be derived and update the session hash appropriately to prevent a password change from logging out the session from which the password was changed.",
    "type": "function",
    "file_path": "django\\django\\contrib\\auth\\__init__.py",
    "ast_data": "FunctionDef name:update_session_auth_hash arg:request arg:user arguments arg arg Call If BoolOp Call Compare Assign Call"
  },
  {
    "library": "pytorch",
    "name": "_compare_onnx_pytorch_model",
    "source_code": "def _compare_onnx_pytorch_model(pt_model: _ModelType, onnx_model_f: str | io.BytesIO, input_args: _InputArgsType, input_kwargs: _InputKwargsType | None, additional_test_inputs: Sequence[_InputArgsType] | None, options: VerificationOptions):\n    onnx_session = _onnx_backend_session(onnx_model_f, options.backend)\n\n    def compare_onnx_pytorch_model_with_input(input_args, input_kwargs):\n        pt_args, pt_kwargs = _prepare_input_for_pytorch(input_args, input_kwargs)\n        pt_model_copy = _try_clone_model(pt_model)\n        pt_outs = pt_model_copy(*pt_args, **pt_kwargs)\n        onnx_inputs = _prepare_input_for_onnx(input_args, input_kwargs, options.remained_onnx_input_idx, options.flatten)\n        onnx_outs = _run_onnx(onnx_session, onnx_inputs)\n        _compare_onnx_pytorch_outputs(onnx_outs=onnx_outs, pt_outs=pt_outs, options=options)\n    compare_onnx_pytorch_model_with_input(input_args, input_kwargs)\n    if additional_test_inputs:\n        for test_input_args in additional_test_inputs:\n            compare_onnx_pytorch_model_with_input(test_input_args, {})",
    "docstring": "Compare outputs from ONNX model runs with outputs from PyTorch model runs. Args: pt_model: PyTorch model. onnx_model_f: ONNX model file path or file-like object. input_args: positional arguments for PyTorch model forward method. input_kwargs: keyword arguments for PyTorch model forward method. additional_test_inputs: additional positional arguments for PyTorch model forward method. options: options for verification. Raises: AssertionError: if outputs from ONNX model and PyTorch model are not equal up to specified precision.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\verification.py",
    "ast_data": "FunctionDef name:_compare_onnx_pytorch_model arg:pt_model arg:onnx_model_f arg:input_args arg:input_kwargs arg:additional_test_inputs arg:options arguments arg arg arg arg arg arg Assign Call FunctionDef name:compare_onnx_pytorch_model_with_input arg:input_args arg:input_kwargs arguments arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Call Call If For Call"
  },
  {
    "library": "django",
    "name": "url",
    "source_code": "def url(self, name, force=False):\n    return self._url(self.stored_name, name, force)",
    "docstring": "Return the non-hashed URL in DEBUG mode.",
    "type": "method",
    "file_path": "django\\django\\contrib\\staticfiles\\storage.py",
    "ast_data": "FunctionDef name:url arg:self arg:name arg:force arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "check_response",
    "source_code": "def check_response(self, response, callback, name=None):\n    if not (response is None or asyncio.iscoroutine(response)):\n        return\n    if not name:\n        if isinstance(callback, types.FunctionType):\n            name = 'The view %s.%s' % (callback.__module__, callback.__name__)\n        else:\n            name = 'The view %s.%s.__call__' % (callback.__module__, callback.__class__.__name__)\n    if response is None:\n        raise ValueError(\"%s didn't return an HttpResponse object. It returned None instead.\" % name)\n    elif asyncio.iscoroutine(response):\n        raise ValueError(\"%s didn't return an HttpResponse object. It returned an unawaited coroutine instead. You may need to add an 'await' into your view.\" % name)",
    "docstring": "Raise an error if the view returned None or an uncalled coroutine.",
    "type": "method",
    "file_path": "django\\django\\core\\handlers\\base.py",
    "ast_data": "FunctionDef name:check_response arg:self arg:response arg:callback arg:name arguments arg arg arg arg If BoolOp Compare Call Return return:no If If Call Assign Assign If Compare Raise Call If Call Raise Call"
  },
  {
    "library": "numpy",
    "name": "_DomainGreater",
    "source_code": "class _DomainGreater:\n\n    def __init__(self, critical_value):\n        self.critical_value = critical_value\n\n    def __call__(self, x):\n        with np.errstate(invalid='ignore'):\n            return umath.less_equal(x, self.critical_value)",
    "docstring": "DomainGreater(v)(x) is True where x <= v.",
    "type": "class",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "ClassDef name:_DomainGreater FunctionDef name:__init__ arg:self arg:critical_value arguments arg arg Assign FunctionDef name:__call__ arg:self arg:x arguments arg arg With Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "device_name",
    "source_code": "@property\ndef device_name(self):\n    return self._thread_local_data.device_name",
    "docstring": "Returns the device name for the current thread.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:device_name arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "roots_chebys",
    "source_code": "def roots_chebys(n, mu=False):\n    x, w, m = roots_chebyu(n, True)\n    x *= 2\n    w *= 2\n    m *= 2\n    if mu:\n        return (x, w, m)\n    else:\n        return (x, w)",
    "docstring": "Gauss-Chebyshev (second kind) quadrature. Compute the sample points and weights for Gauss-Chebyshev quadrature. The sample points are the roots of the nth degree Chebyshev polynomial of the second kind, :math:. These sample points and weights correctly integrate polynomials of degree :math: or less over the interval :math: with weight function :math:. See 22.2.7 in [AS]_ for more details. Parameters ---------- n : int quadrature order mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights See Also -------- scipy.integrate.fixed_quad References ---------- .. [AS] Milton Abramowitz and Irene A. Stegun, eds. Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables. New York: Dover, 1972.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_orthogonal.py",
    "ast_data": "FunctionDef name:roots_chebys arg:n arg:mu arguments arg arg Assign Call If Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "OSMWidget",
    "source_code": "class OSMWidget(OpenLayersWidget):\n    template_name = 'gis/openlayers-osm.html'\n    default_lon = 5\n    default_lat = 47\n    default_zoom = 12\n\n    def __init__(self, attrs=None):\n        super().__init__()\n        for key in ('default_lon', 'default_lat', 'default_zoom'):\n            self.attrs[key] = getattr(self, key)\n        if attrs:\n            self.attrs.update(attrs)",
    "docstring": "An OpenLayers/OpenStreetMap-based widget.",
    "type": "class",
    "file_path": "django\\django\\contrib\\gis\\forms\\widgets.py",
    "ast_data": "ClassDef name:OSMWidget Assign Assign Assign Assign FunctionDef name:__init__ arg:self arg:attrs arguments arg arg Call Call For Assign Call If Call"
  },
  {
    "library": "scikit-learn",
    "name": "_parse_local_version",
    "source_code": "def _parse_local_version(local: str) -> Optional[LocalType]:\n    if local is not None:\n        return tuple((part.lower() if not part.isdigit() else int(part) for part in _local_version_separators.split(local)))\n    return None",
    "docstring": "Takes a string like abc.1.twelve and turns it into (\"abc\", 1, \"twelve\").",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\_packaging\\version.py",
    "ast_data": "FunctionDef name:_parse_local_version arg:local arguments arg If Compare Return return:yes Call Call Call Call Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "greater",
    "source_code": "def greater(a, b):\n    return _maybe_static(a) > _maybe_static(b)",
    "docstring": "A version of tf.greater that eagerly evaluates if possible.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_utils.py",
    "ast_data": "FunctionDef name:greater arg:a arg:b arguments arg arg Return return:yes Compare Call Call"
  },
  {
    "library": "cryptography",
    "name": "verify",
    "source_code": "@abc.abstractmethod\ndef verify(self, key_material: bytes, expected_key: bytes) -> None:\n    pass",
    "docstring": "Checks whether the key generated by the key material matches the expected derived key. Raises an exception if they do not match.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\kdf\\__init__.py",
    "ast_data": "FunctionDef name:verify arg:self arg:key_material arg:expected_key arguments arg arg arg"
  },
  {
    "library": "tensorflow",
    "name": "with_min_memory",
    "source_code": "def with_min_memory(self, min_bytes=0, min_peak_bytes=0, min_residual_bytes=0, min_output_bytes=0):\n    self._options['min_bytes'] = min_bytes\n    self._options['min_peak_bytes'] = min_peak_bytes\n    self._options['min_residual_bytes'] = min_residual_bytes\n    self._options['min_output_bytes'] = min_output_bytes\n    return self",
    "docstring": "Only show profiler nodes consuming no less than 'min_bytes'. Args: min_bytes: Only show profiler nodes requested to allocate no less bytes than this. min_peak_bytes: Only show profiler nodes using no less than this bytes at peak (high watermark). For profiler nodes consist of multiple graph nodes, it sums the graph nodes' peak_bytes. min_residual_bytes: Only show profiler nodes have no less than this bytes not being de-allocated after Compute() ends. For profiler nodes consist of multiple graph nodes, it sums the graph nodes' residual_bytes. min_output_bytes: Only show profiler nodes have no less than this bytes output. The output are not necessarily allocated by this profiler nodes. Returns: self",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\option_builder.py",
    "ast_data": "FunctionDef name:with_min_memory arg:self arg:min_bytes arg:min_peak_bytes arg:min_residual_bytes arg:min_output_bytes arguments arg arg arg arg arg Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "on_epoch_end",
    "source_code": "def on_epoch_end(self):\n    pass",
    "docstring": "Method called at the end of every epoch.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\data_utils.py",
    "ast_data": "FunctionDef name:on_epoch_end arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "_get_amdsmi_device_index",
    "source_code": "def _get_amdsmi_device_index(device: Optional[Union[int, Device]]) -> int:\n    idx = _get_device_index(device, optional=True)\n    visible_devices = _parse_visible_devices()\n    if type(visible_devices[0]) is str:\n        uuids = _raw_device_uuid_amdsmi()\n        if uuids is None:\n            raise RuntimeError(\"Can't get device UUIDs\")\n        visible_devices_str = cast(list[str], visible_devices)\n        visible_devices = _transform_uuid_to_ordinals(visible_devices_str, uuids)\n    idx_map = dict(enumerate(cast(list[int], visible_devices)))\n    if idx not in idx_map:\n        raise RuntimeError(f'device {idx} is not visible (HIP_VISIBLE_DEVICES={visible_devices})')\n    return idx_map[idx]",
    "docstring": "Return the amdsmi index of the device, taking visible_devices into account.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:_get_amdsmi_device_index arg:device arguments arg Assign Call Assign Call If Compare Call Assign Call If Compare Raise Call Assign Call Assign Call Assign Call Call Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_",
    "source_code": "@define.register\ndef _(lib: Library, schema, alias_analysis=''):\n\n    def wrap(f):\n        name = lib.define(schema, alias_analysis)\n        lib.impl(name, f)\n        return f\n    return wrap",
    "docstring": "The old torch.library.define. We're keeping this around for BC reasons",
    "type": "function",
    "file_path": "pytorch\\torch\\library.py",
    "ast_data": "FunctionDef name:_ arg:lib arg:schema arg:alias_analysis arguments arg arg arg FunctionDef name:wrap arg:f arguments arg Assign Call Call Return return:yes Return return:yes"
  },
  {
    "library": "scipy",
    "name": "zeros",
    "source_code": "@property\ndef zeros(self):\n    return self.to_zpk().zeros",
    "docstring": "Zeros of the system.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:zeros arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=False)\ndef fit(self, X, y=None):\n    self.fit_transform(X)\n    return self",
    "docstring": "Fit X into an embedded space. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_samples) If the metric is 'precomputed' X must be a square distance matrix. Otherwise it contains a sample per row. If the method is 'exact', X may be a sparse matrix of type 'csr', 'csc' or 'coo'. If the method is 'barnes_hut' and the metric is 'precomputed', X may be a precomputed sparse graph. y : None Ignored. Returns ------- self : object Fitted estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\manifold\\_t_sne.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "normal_transform_pixel",
    "source_code": "def normal_transform_pixel(height: int, width: int, eps: float=1e-14, device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None) -> Tensor:\n    tr_mat = tensor([[1.0, 0.0, -1.0], [0.0, 1.0, -1.0], [0.0, 0.0, 1.0]], device=device, dtype=dtype)\n    width_denom: float = eps if width == 1 else width - 1.0\n    height_denom: float = eps if height == 1 else height - 1.0\n    tr_mat[0, 0] = tr_mat[0, 0] * 2.0 / width_denom\n    tr_mat[1, 1] = tr_mat[1, 1] * 2.0 / height_denom\n    return tr_mat.unsqueeze(0)",
    "docstring": "Compute the normalization matrix from image size in pixels to [-1, 1]. Args: height: image height. width: image width. eps: epsilon to prevent divide-by-zero errors device: device to place the result on. dtype: dtype of the result. Returns: normalized transform with shape :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\conversions.py",
    "ast_data": "FunctionDef name:normal_transform_pixel arg:height arg:width arg:eps arg:device arg:dtype arguments arg arg arg arg arg Assign Call Compare Compare Assign Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_SACModMetadata",
    "source_code": "@dataclass\nclass _SACModMetadata:\n    start_idx: int\n    force_store_random: bool\n    sac_metadata: list[_SACMetadata]",
    "docstring": "Stores metadata for a module for SAC. Attributes: start_idx (int): The starting index of the module's operators. force_store_random (bool): Whether to force store random operators in the module. sac_metadata (List[_SACMetadata]): List of metadata for each operator in the module.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\_tools\\sac_estimator.py",
    "ast_data": "ClassDef name:_SACModMetadata"
  },
  {
    "library": "pytorch",
    "name": "reset_peak_memory_stats",
    "source_code": "def reset_peak_memory_stats(device: Optional[_device_t]=None) -> None:\n    if not is_initialized():\n        return\n    torch._C._mtia_resetPeakMemoryStats(_get_device_index(device, optional=True))",
    "docstring": "Reset the peak memory stats for a given device. Args: device (torch.device, str, or int, optional) selected device. Returns statistics for the current device, given by current_device(), if device is None (default).",
    "type": "function",
    "file_path": "pytorch\\torch\\mtia\\memory.py",
    "ast_data": "FunctionDef name:reset_peak_memory_stats arg:device arguments arg If Call Return return:no Call Call"
  },
  {
    "library": "pytorch",
    "name": "is_cuda",
    "source_code": "@property\ndef is_cuda(self) -> bool:\n    return self.data.is_cuda",
    "docstring": "Return true if stored on a gpu.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\utils\\rnn.py",
    "ast_data": "FunctionDef name:is_cuda arg:self arguments arg Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "dark_palette",
    "source_code": "def dark_palette(color, n_colors=6, reverse=False, as_cmap=False, input='rgb'):\n    rgb = _color_to_rgb(color, input)\n    hue, sat, _ = husl.rgb_to_husl(*rgb)\n    gray_s, gray_l = (0.15 * sat, 15)\n    gray = _color_to_rgb((hue, gray_s, gray_l), input='husl')\n    colors = [rgb, gray] if reverse else [gray, rgb]\n    return blend_palette(colors, n_colors, as_cmap)",
    "docstring": "Make a sequential palette that blends from dark to `choose_dark_palettematplotlib.colors.ListedColormapmatplotlib.colors.ListedColormap` See Also -------- light_palette : Create a sequential palette with bright low values. diverging_palette : Create a diverging palette with two colors. Examples -------- .. include:: ../docstrings/dark_palette.rst",
    "type": "function",
    "file_path": "seaborn\\seaborn\\palettes.py",
    "ast_data": "FunctionDef name:dark_palette arg:color arg:n_colors arg:reverse arg:as_cmap arg:input arguments arg arg arg arg arg Assign Call Assign Call Assign Assign Call Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "FlatValuesListIterable",
    "source_code": "class FlatValuesListIterable(BaseIterable):\n\n    def __iter__(self):\n        queryset = self.queryset\n        compiler = queryset.query.get_compiler(queryset.db)\n        for row in compiler.results_iter(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size):\n            yield row[0]",
    "docstring": "Iterable returned by QuerySet.values_list(flat=True) that yields single values.",
    "type": "class",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "ClassDef name:FlatValuesListIterable FunctionDef name:__iter__ arg:self arguments arg Assign Assign Call For Call"
  },
  {
    "library": "pytorch",
    "name": "copy_fwd_metadata_to_bw_nodes",
    "source_code": "def copy_fwd_metadata_to_bw_nodes(fx_g):\n\n    def _is_forward_node_with_seq_nr(node):\n        return 'nn_module_stack' in node.meta and 'seq_nr' in node.meta\n\n    def _is_backward_node_with_seq_nr(node):\n        return 'nn_module_stack' not in node.meta and 'seq_nr' in node.meta\n    fwd_seq_nr_to_node = {}\n    for node in fx_g.graph.nodes:\n        if not _is_forward_node_with_seq_nr(node):\n            continue\n        seq_nr = node.meta['seq_nr']\n        if seq_nr in fwd_seq_nr_to_node:\n            continue\n        fwd_seq_nr_to_node[node.meta['seq_nr']] = node\n    for node in fx_g.graph.nodes:\n        if not _is_backward_node_with_seq_nr(node):\n            continue\n        fwd_node = fwd_seq_nr_to_node.get(node.meta['seq_nr'])\n        if fwd_node is not None:\n            node.meta['fwd_nn_module_stack'] = fwd_node.meta['nn_module_stack']\n            node.meta['fwd_source_fn_stack'] = fwd_node.meta.get('source_fn_stack')",
    "docstring": "Input: which contains the joint fwd+bwd FX graph created by aot_autograd. This function walks the graph and copies over metadata from forward nodes to backward nodes, using the field as a one-to-many mapping from forward node to backward node. This metadata is useful for performance profiling and debugging.",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\utils.py",
    "ast_data": "FunctionDef name:copy_fwd_metadata_to_bw_nodes arg:fx_g arguments arg FunctionDef name:_is_forward_node_with_seq_nr arg:node arguments arg Return return:yes BoolOp Compare Compare FunctionDef name:_is_backward_node_with_seq_nr arg:node arguments arg Return return:yes BoolOp Compare Compare Assign For If Call Assign If Compare Assign For If Call Assign Call If Compare Assign Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "get_checkpoint_mtimes",
    "source_code": "@deprecation.deprecated(date=None, instructions='Use standard file utilities to get mtimes.')\n@tf_export(v1=['train.get_checkpoint_mtimes'])\ndef get_checkpoint_mtimes(checkpoint_prefixes):\n    mtimes = []\n\n    def match_maybe_append(pathname):\n        fnames = file_io.get_matching_files(pathname)\n        if fnames:\n            mtimes.append(file_io.stat(fnames[0]).mtime_nsec / 1000000000.0)\n            return True\n        return False\n    for checkpoint_prefix in checkpoint_prefixes:\n        pathname = _prefix_to_checkpoint_path(checkpoint_prefix, saver_pb2.SaverDef.V2)\n        if match_maybe_append(pathname):\n            continue\n        match_maybe_append(checkpoint_prefix)\n    return mtimes",
    "docstring": "Returns the mtimes (modification timestamps) of the checkpoints. Globs for the checkpoints pointed to by . If the files exist, collect their mtime. Both V2 and V1 checkpoints are considered, in that priority. This is the recommended way to get the mtimes, since it takes into account the naming difference between V1 and V2 formats. Note: If not all checkpoints exist, the length of the returned mtimes list will be smaller than the length of list, so mapping checkpoints to corresponding mtimes will not be possible. Args: checkpoint_prefixes: a list of checkpoint paths, typically the results of or those of , regardless of sharded/non-sharded or V1/V2. Returns: A list of mtimes (in microseconds) of the found checkpoints.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint_management.py",
    "ast_data": "FunctionDef name:get_checkpoint_mtimes arg:checkpoint_prefixes arguments arg Assign FunctionDef name:match_maybe_append arg:pathname arguments arg Assign Call If Call Call Return return:yes Return return:yes For Assign Call If Call Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "BBOverlapsLookup",
    "source_code": "@BaseSpatialField.register_lookup\nclass BBOverlapsLookup(GISLookup):\n    lookup_name = 'bboverlaps'",
    "docstring": "The 'bboverlaps' operator returns true if A's bounding box overlaps B's bounding box.",
    "type": "class",
    "file_path": "django\\django\\contrib\\gis\\db\\models\\lookups.py",
    "ast_data": "ClassDef name:BBOverlapsLookup Assign"
  },
  {
    "library": "authlib",
    "name": "encode",
    "source_code": "def encode(self, header, payload, key, check=True):\n    header.setdefault('typ', 'JWT')\n    for k in ['exp', 'iat', 'nbf']:\n        claim = payload.get(k)\n        if isinstance(claim, datetime.datetime):\n            payload[k] = calendar.timegm(claim.utctimetuple())\n    if check:\n        self.check_sensitive_data(payload)\n    key = find_encode_key(key, header)\n    text = to_bytes(json_dumps(payload))\n    if 'enc' in header:\n        return self._jwe.serialize_compact(header, text, key)\n    else:\n        return self._jws.serialize_compact(header, text, key)",
    "docstring": "Encode a JWT with the given header, payload and key. :param header: A dict of JWS header :param payload: A dict to be encoded :param key: key used to sign the signature :param check: check if sensitive data in payload :return: bytes",
    "type": "method",
    "file_path": "authlib\\authlib\\jose\\rfc7519\\jwt.py",
    "ast_data": "FunctionDef name:encode arg:self arg:header arg:payload arg:key arg:check arguments arg arg arg arg arg Call For Assign Call If Call Assign Call Call If Call Assign Call Assign Call Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "Bird",
    "source_code": "class Bird(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-2.0 * pi] * self.N, [2.0 * pi] * self.N))\n        self.global_optimum = [[4.701055751981055, 3.152946019601391], [-1.582142172055011, -3.13024679963543]]\n        self.fglob = -106.7645367198034\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return sin(x[0]) * exp((1 - cos(x[1])) ** 2) + cos(x[1]) * exp((1 - sin(x[0])) ** 2) + (x[0] - x[1]) ** 2",
    "docstring": "Bird objective function. The Bird global optimization problem is a multimodal minimization problem defined as follows .. math:: f_{\\text{Bird}}(x) = \\left(x_1 - x_2\\right)^{2} + e^{\\left[1 - \\sin\\left(x_1\\right) \\right]^{2}} \\cos\\left(x_2\\right) + e^{\\left[1 - \\cos\\left(x_2\\right)\\right]^{2}} \\sin\\left(x_1\\right) with :math: *Global optimum*: :math: for :math: or :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_B.py",
    "ast_data": "ClassDef name:Bird FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "USFederalHolidayCalendar",
    "source_code": "class USFederalHolidayCalendar(AbstractHolidayCalendar):\n    rules = [Holiday(\"New Year's Day\", month=1, day=1, observance=nearest_workday), USMartinLutherKingJr, USPresidentsDay, USMemorialDay, Holiday('Juneteenth National Independence Day', month=6, day=19, start_date='2021-06-18', observance=nearest_workday), Holiday('Independence Day', month=7, day=4, observance=nearest_workday), USLaborDay, USColumbusDay, Holiday('Veterans Day', month=11, day=11, observance=nearest_workday), USThanksgivingDay, Holiday('Christmas Day', month=12, day=25, observance=nearest_workday)]",
    "docstring": "US Federal Government Holiday Calendar based on rules specified by:",
    "type": "class",
    "file_path": "pandas\\pandas\\tseries\\holiday.py",
    "ast_data": "ClassDef name:USFederalHolidayCalendar Assign Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_current_quantization_mode",
    "source_code": "def _get_current_quantization_mode(self) -> _CurrentQuantizationMode:\n    qat_state = None\n    dynamic_state = None\n    for qconfig in list(self.module_name_qconfig.values()) + list(self.operator_type_qconfig.values()) + [self.global_config]:\n        if qconfig is not None:\n            if qat_state is None:\n                qat_state = qconfig.is_qat\n            else:\n                assert qat_state == qconfig.is_qat, f'All non-None quantization configs should have the same `is_qat`,but got {qat_state} and {qconfig.is_qat}.'\n            input_activation_spec = qconfig.input_activation\n            if input_activation_spec is not None:\n                if dynamic_state is None:\n                    dynamic_state = input_activation_spec.is_dynamic\n                else:\n                    assert dynamic_state == input_activation_spec.is_dynamic, f'All non-None `input_activation_spec` should have the same `is_dynamic`,but got {dynamic_state} and {input_activation_spec.is_dynamic}.'\n    return _CurrentQuantizationMode(qat_state=qat_state, dynamic_state=dynamic_state)",
    "docstring": "Retrieves the current quantization mode based on all configurations.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\x86_inductor_quantizer.py",
    "ast_data": "FunctionDef name:_get_current_quantization_mode arg:self arguments arg Assign Assign For Call Call Call Call If Compare If Compare Assign Compare Assign If Compare If Compare Assign Compare Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_try_load_par_source",
    "source_code": "def _try_load_par_source(source_file_path):\n    prefix_path = source_file_path\n    while True:\n        prefix_path, basename = os.path.split(prefix_path)\n        if not basename:\n            break\n        suffix_path = os.path.normpath(os.path.relpath(source_file_path, start=prefix_path))\n        if prefix_path.endswith('.par') and os.path.isfile(prefix_path):\n            with zipfile.ZipFile(prefix_path) as z:\n                norm_names = [os.path.normpath(name) for name in z.namelist()]\n                if suffix_path in norm_names:\n                    with z.open(z.namelist()[norm_names.index(suffix_path)]) as zf:\n                        source_text = zf.read().decode('utf-8')\n                        return source_text.split('\\n')",
    "docstring": "Try loading the source code inside a .par file. A .par file is a zip-compressed, self-contained Python executable. It contains the content of individual Python source files that can be read only through extracting from the zip file. Args: source_file_path: The full path to the file inside the .par file. This path should include the path to the .par file itself, followed by the intra-par path, e.g., \"/tmp/my_executable.par/org-tensorflow/tensorflow/python/foo/bar.py\". Returns: If successful, lines of the source file as a of s. Else, .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\source_utils.py",
    "ast_data": "FunctionDef name:_try_load_par_source arg:source_file_path arguments arg Assign While Assign Call If Assign Call Call If BoolOp Call Call With Call Assign Call Call If Compare With Call Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_is_conv_node",
    "source_code": "def _is_conv_node(n: Node):\n    return n.op == 'call_function' and n.target in [torch.ops.aten.conv1d.default, torch.ops.aten.conv2d.default]",
    "docstring": "Return whether the node refers to an aten conv op.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\utils.py",
    "ast_data": "FunctionDef name:_is_conv_node arg:n arguments arg Return return:yes BoolOp Compare Compare"
  },
  {
    "library": "scikit-learn",
    "name": "theta",
    "source_code": "@property\ndef theta(self):\n    theta = []\n    params = self.get_params()\n    for hyperparameter in self.hyperparameters:\n        if not hyperparameter.fixed:\n            theta.append(params[hyperparameter.name])\n    if len(theta) > 0:\n        return np.log(np.hstack(theta))\n    else:\n        return np.array([])",
    "docstring": "Returns the (flattened, log-transformed) non-fixed hyperparameters. Note that theta are typically the log-transformed values of the kernel's hyperparameters as this representation of the search space is more amenable for hyperparameter search, as hyperparameters like length-scales naturally live on a log-scale. Returns ------- theta : ndarray of shape (n_dims,) The non-fixed, log-transformed hyperparameters of the kernel",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:theta arg:self arguments arg Assign Assign Call For If Call If Compare Call Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "hyperparameters",
    "source_code": "@property\ndef hyperparameters(self):\n    r = [Hyperparameter('k1__' + hyperparameter.name, hyperparameter.value_type, hyperparameter.bounds, hyperparameter.n_elements) for hyperparameter in self.k1.hyperparameters]\n    for hyperparameter in self.k2.hyperparameters:\n        r.append(Hyperparameter('k2__' + hyperparameter.name, hyperparameter.value_type, hyperparameter.bounds, hyperparameter.n_elements))\n    return r",
    "docstring": "Returns a list of all hyperparameter.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:hyperparameters arg:self arguments arg Assign Call For Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "log_marginal_likelihood",
    "source_code": "def log_marginal_likelihood(self, theta=None, eval_gradient=False, clone_kernel=True):\n    check_is_fitted(self)\n    if theta is None:\n        if eval_gradient:\n            raise ValueError('Gradient can only be evaluated for theta!=None')\n        return self.log_marginal_likelihood_value_\n    theta = np.asarray(theta)\n    if self.n_classes_ == 2:\n        return self.base_estimator_.log_marginal_likelihood(theta, eval_gradient, clone_kernel=clone_kernel)\n    else:\n        if eval_gradient:\n            raise NotImplementedError('Gradient of log-marginal-likelihood not implemented for multi-class GPC.')\n        estimators = self.base_estimator_.estimators_\n        n_dims = estimators[0].kernel_.n_dims\n        if theta.shape[0] == n_dims:\n            return np.mean([estimator.log_marginal_likelihood(theta, clone_kernel=clone_kernel) for i, estimator in enumerate(estimators)])\n        elif theta.shape[0] == n_dims * self.classes_.shape[0]:\n            return np.mean([estimator.log_marginal_likelihood(theta[n_dims * i:n_dims * (i + 1)], clone_kernel=clone_kernel) for i, estimator in enumerate(estimators)])\n        else:\n            raise ValueError('Shape of theta must be either %d or %d. Obtained theta with shape %d.' % (n_dims, n_dims * self.classes_.shape[0], theta.shape[0]))",
    "docstring": "Return log-marginal likelihood of theta for training data. In the case of multi-class classification, the mean log-marginal likelihood of the one-versus-rest classifiers are returned. Parameters ---------- theta : array-like of shape (n_kernel_params,), default=None Kernel hyperparameters for which the log-marginal likelihood is evaluated. In the case of multi-class classification, theta may be the hyperparameters of the compound kernel or of an individual kernel. In the latter case, all individual kernel get assigned the same theta values. If None, the precomputed log_marginal_likelihood of `eval_gradient` is True.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\_gpc.py",
    "ast_data": "FunctionDef name:log_marginal_likelihood arg:self arg:theta arg:eval_gradient arg:clone_kernel arguments arg arg arg arg Call If Compare If Raise Call Return return:yes Assign Call If Compare Return return:yes Call If Raise Call Assign Assign If Compare Return return:yes Call Call Call If Compare Return return:yes Call Call Call Raise Call"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, bus):\n    self.threads = {}\n    SimplePlugin.__init__(self, bus)\n    self.bus.listeners.setdefault('acquire_thread', set())\n    self.bus.listeners.setdefault('start_thread', set())\n    self.bus.listeners.setdefault('release_thread', set())\n    self.bus.listeners.setdefault('stop_thread', set())",
    "docstring": "Initialize the thread manager plugin.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\plugins.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:bus arguments arg arg Assign Call Call Call Call Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_dump_chrometrace",
    "source_code": "def _dump_chrometrace(schedule, filename):\n    events = []\n    for rank in sorted(schedule):\n        for timestep, action in enumerate(schedule[rank]):\n            if action is None:\n                continue\n            events.append({'name': str(action), 'cat': 'computation' if action.computation_type in (F, B, W) else 'communication', 'ph': 'X', 'pid': rank, 'tid': rank, 'ts': timestep, 'dur': 1})\n    import json\n    with open(filename, 'w') as f:\n        json.dump({'traceEvents': events}, f)",
    "docstring": "This function dumps a schedule IR into a chrometrace format so it can be visualized. It is currently very basic and only serves as a graphical alternative to dumping the schedule IR as text. As future work we may extend this to include more accurate heuristics for durations, or let users input durations, add 'flow events' to let the UI show the connection between sends and recvs, and model cuda streams for comm/compute as separate streams on the chrometrace view.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\schedules.py",
    "ast_data": "FunctionDef name:_dump_chrometrace arg:schedule arg:filename arguments arg arg Assign For Call For Call If Compare Call Call Compare With Call Call"
  },
  {
    "library": "scipy",
    "name": "_count_paths_outside_method",
    "source_code": "def _count_paths_outside_method(m, n, g, h):\n    if m < n:\n        m, n = (n, m)\n    mg = m // g\n    ng = n // g\n    lxj = n + (mg - h) // mg\n    xj = [(h + mg * j + ng - 1) // ng for j in range(lxj)]\n    if lxj == 0:\n        return special.binom(m + n, n)\n    B = np.zeros(lxj)\n    B[0] = 1\n    for j in range(1, lxj):\n        Bj = special.binom(xj[j] + j, j)\n        for i in range(j):\n            bin = special.binom(xj[j] - xj[i] + j - i, j - i)\n            Bj -= bin * B[i]\n        B[j] = Bj\n    num_paths = 0\n    for j in range(lxj):\n        bin = special.binom(m - xj[j] + (n - j), n - j)\n        term = B[j] * bin\n        num_paths += term\n    return num_paths",
    "docstring": "Count the number of paths that pass outside the specified diagonal. Parameters ---------- m : integer m > 0 n : integer n > 0 g : integer g is greatest common divisor of m and n h : integer 0 <= h <= lcm(m,n) Returns ------- p : float The number of paths that go low. The calculation may overflow - check for a finite answer. Notes ----- Count the integer lattice paths from (0, 0) to (m, n), which at some point (x, y) along the path, satisfy: m*y <= n*x - h*g The paths make steps of size +1 in either positive x or positive y directions. We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk. Hodges, J.L. Jr., \"The Significance Probability of the Smirnov Two-Sample Test,\" Arkiv fiur Matematik, 3, No. 43 (1958), 469-86.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_stats_py.py",
    "ast_data": "FunctionDef name:_count_paths_outside_method arg:m arg:n arg:g arg:h arguments arg arg arg arg If Compare Assign Assign Assign Assign Assign Call If Compare Return return:yes Call Assign Call Assign For Call Assign Call For Call Assign Call Assign Assign For Call Assign Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "__getitem__",
    "source_code": "def __getitem__(self, key: PositionalIndexer2D) -> Self | DTScalarOrNaT:\n    result = cast('Union[Self, DTScalarOrNaT]', super().__getitem__(key))\n    if lib.is_scalar(result):\n        return result\n    else:\n        result = cast(Self, result)\n    result._freq = self._get_getitem_freq(key)\n    return result",
    "docstring": "This getitem defers to the underlying array, which by-definition can only handle list-likes, slices, and integer scalars",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py",
    "ast_data": "FunctionDef name:__getitem__ arg:self arg:key arguments arg arg Assign Call Call Call If Call Return return:yes Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, variables, name='TPUReplicatedVariable'):\n    if not isinstance(variables, abc.Sequence) or not variables or any((not isinstance(v, variables_lib.Variable) for v in variables)):\n        raise TypeError(f'Argument `variables` should be a non-empty list of `variables.Variable`s. Received {variables}')\n    if any((v.dtype != variables[0].dtype for v in variables)):\n        raise ValueError(f'All elements in argument `variables` must have the same dtype. Received dtypes: {[v.dtype for v in variables]}')\n    if any((v.shape != variables[0].shape for v in variables)):\n        raise ValueError(f'All elements in argument `variables` must have the same shape. Received shapes: {[v.shape for v in variables]}')\n    self._vars = variables\n    self._name = name\n    self._common_name = self._name.split(':')[0]\n    self._cached_value = None",
    "docstring": "Treats as a replicated list of s. Example: Args: variables: A list of s that comprise this replicated variable. Variables should not be shared between different objects. name: String. Name of this container. Defaults to \"TPUReplicatedVariable\".",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_replicated_variable.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:variables arg:name arguments arg arg arg If BoolOp Call Call Call Raise Call If Call Compare Raise Call If Call Compare Raise Call Assign Assign Assign Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "axes",
    "source_code": "@property\ndef axes(self):\n    return self._axstack.as_list()",
    "docstring": "List of Axes in the Figure. You can access and modify the Axes in the Figure through this list. Do not modify the list itself. Instead, use , or to add or remove an Axes. Note: The property and method are equivalent.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:axes arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, input_dataset, num_replicas):\n\n    def recalculate_batch_size(type_spec):\n        output_shape = type_spec._to_legacy_output_shapes()\n        if not isinstance(output_shape, tensor_shape.TensorShape):\n            return None\n        if output_shape.rank is None:\n            return None\n        if len(output_shape) < 1:\n            raise ValueError('Invalid `input_dataset`. Expected a dataset whose elements have rank >= 1 but found a dataset whose elements are scalars. Fix the issue by adding the `batch` transformation to the dataset.')\n        output_dims = [d.value for d in output_shape.dims]\n        if output_dims[0] is not None and output_dims[0] % num_replicas == 0:\n            return output_dims[0] // num_replicas\n        return None\n\n    def rebatch(type_spec):\n        batch_size = recalculate_batch_size(type_spec)\n        return type_spec._unbatch()._batch(batch_size)\n    self._element_spec = nest.map_structure(rebatch, dataset_ops.get_structure(input_dataset))\n    input_dataset = dataset_ops.normalize_to_dense(input_dataset)\n    variant_tensor = ged_ops.rebatch_dataset(input_dataset._variant_tensor, num_replicas=num_replicas, **self._flat_structure)\n    super(_LegacyRebatchDataset, self).__init__(input_dataset, variant_tensor)",
    "docstring": "Creates a _LegacyRebatchDataset. Args: input_dataset: to rebatch. num_replicas: A scalar, representing the number of sub-batches to split each batch from into.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\distribute.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:input_dataset arg:num_replicas arguments arg arg arg FunctionDef name:recalculate_batch_size arg:type_spec arguments arg Assign Call If Call Return return:no If Compare Return return:no If Compare Call Raise Call Assign If BoolOp Compare Compare Return return:yes Return return:no FunctionDef name:rebatch arg:type_spec arguments arg Assign Call Return return:yes Call Call Assign Call Call Assign Call Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "gelu",
    "source_code": "@tf_export('nn.gelu', v1=[])\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef gelu(features, approximate=False, name=None):\n    with ops.name_scope(name, 'Gelu', [features]):\n        features = ops.convert_to_tensor(features, name='features')\n        if not features.dtype.is_floating:\n            raise ValueError(f'`features.dtype` must be a floating point tensor.Received:features.dtype={features.dtype}')\n        if approximate:\n            coeff = math_ops.cast(0.044715, features.dtype)\n            return 0.5 * features * (1.0 + math_ops.tanh(0.7978845608028654 * (features + coeff * math_ops.pow(features, 3))))\n        else:\n            return 0.5 * features * math_ops.erfc(-features * math_ops.cast(0.7071067811865476, features.dtype))",
    "docstring": "Compute the Gaussian Error Linear Unit (GELU) activation function. Gaussian error linear unit (GELU) computes float TensorboolFalseTensorfeaturesfeaturesTensor`. References: [Gaussian Error Linear Units (GELUs)](",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py",
    "ast_data": "FunctionDef name:gelu arg:features arg:approximate arg:name arguments arg arg arg With Call Assign Call If Raise Call If Assign Call Return return:yes Call Call Return return:yes Call Call Call"
  },
  {
    "library": "scrapy",
    "name": "update_vars",
    "source_code": "def update_vars(self, vars: dict[str, Any]) -> None:\n    pass",
    "docstring": "You can use this function to update the Scrapy objects that will be available in the shell",
    "type": "method",
    "file_path": "scrapy\\scrapy\\commands\\shell.py",
    "ast_data": "FunctionDef name:update_vars arg:self arg:vars arguments arg arg"
  },
  {
    "library": "pytorch",
    "name": "_Simplex",
    "source_code": "class _Simplex(Constraint):\n    event_dim = 1\n\n    def check(self, value):\n        return torch.all(value >= 0, dim=-1) & ((value.sum(-1) - 1).abs() < 1e-06)",
    "docstring": "Constrain to the unit simplex in the innermost (rightmost) dimension. Specifically: and .",
    "type": "class",
    "file_path": "pytorch\\torch\\distributions\\constraints.py",
    "ast_data": "ClassDef name:_Simplex Assign FunctionDef name:check arg:self arg:value arguments arg arg Return return:yes Call Compare Compare Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_fit_and_predict",
    "source_code": "def _fit_and_predict(estimator, X, y, train, test, fit_params, method):\n    fit_params = fit_params if fit_params is not None else {}\n    fit_params = _check_method_params(X, params=fit_params, indices=train)\n    X_train, y_train = _safe_split(estimator, X, y, train)\n    X_test, _ = _safe_split(estimator, X, y, test, train)\n    if y_train is None:\n        estimator.fit(X_train, **fit_params)\n    else:\n        estimator.fit(X_train, y_train, **fit_params)\n    func = getattr(estimator, method)\n    predictions = func(X_test)\n    encode = method in ['decision_function', 'predict_proba', 'predict_log_proba'] and y is not None\n    if encode:\n        if isinstance(predictions, list):\n            predictions = [_enforce_prediction_order(estimator.classes_[i_label], predictions[i_label], n_classes=len(set(y[:, i_label])), method=method) for i_label in range(len(predictions))]\n        else:\n            n_classes = len(set(y)) if y.ndim == 1 else y.shape[1]\n            predictions = _enforce_prediction_order(estimator.classes_, predictions, n_classes, method)\n    return predictions",
    "docstring": "Fit estimator and predict values for a given dataset split. Read more in the :ref:. Parameters ---------- estimator : estimator object implementing 'fit' and 'predict' The object to use to fit the data. X : array-like of shape (n_samples, n_features) The data to fit. .. versionchanged:: 0.20 X is only required to be an object with finite length or shape now y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None The target variable to try to predict in the case of supervised learning. train : array-like of shape (n_train_samples,) Indices of training samples. test : array-like of shape (n_test_samples,) Indices of test samples. fit_params : dict or None Parameters that will be passed to ``. method : str Invokes the passed method name of the passed estimator. Returns ------- predictions : sequence Result of calling 'estimator.method'",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_validation.py",
    "ast_data": "FunctionDef name:_fit_and_predict arg:estimator arg:X arg:y arg:train arg:test arg:fit_params arg:method arguments arg arg arg arg arg arg arg Assign Compare Assign Call Assign Call Assign Call If Compare Call Call Assign Call Assign Call Assign BoolOp Compare Compare If If Call Assign Call Call Call Call Call Assign Compare Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_shape",
    "source_code": "def get_shape(self) -> tensor_shape.TensorShape:\n    return self.shape",
    "docstring": "The statically known shape of this ragged tensor. Returns: A containing the statically known shape of this ragged tensor. Ragged dimensions have a size of . Alias for property. Examples: >>> tf.ragged.constant([[0], [1, 2]]).get_shape() TensorShape([2, None]) >>> tf.ragged.constant( ... [[[0, 1]], [[1, 2], [3, 4]]], ragged_rank=1).get_shape() TensorShape([2, None, 2])",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:get_shape arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "__mul__",
    "source_code": "def __mul__(self, right: Se3) -> Se3 | Vector3 | Tensor:\n    so3 = self.so3\n    t = self.t\n    if isinstance(right, Se3):\n        return self._mul_se3(right)\n    elif isinstance(right, (Vector3, Tensor)):\n        return so3 * right + t.data\n    else:\n        raise TypeError(f'Unsupported type: {type(right)}')",
    "docstring": "Compose two Se3 transformations. Args: right: the other Se3 transformation. Return: The resulting Se3 transformation.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\se3.py",
    "ast_data": "FunctionDef name:__mul__ arg:self arg:right arguments arg arg Assign Assign If Call Return return:yes Call If Call Return return:yes Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "from_pyval",
    "source_code": "@classmethod\ndef from_pyval(cls, pyval, typespec=None):\n    return cls._from_pyval(pyval, typespec, ())",
    "docstring": "Constructs a StructuredTensor from a nested Python structure. >>> tf.experimental.StructuredTensor.from_pyval( ... {'a': [1, 2, 3], 'b': [[4, 5], [6, 7]]}) }, shape=())> Note that . Args: pyval: The nested Python structure that should be used to create the new . typespec: A specifying the expected type for each field. If not specified, then all nested dictionaries are turned into StructuredTensors, and all nested lists are turned into Tensors (if rank=2). Returns: A .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor.py",
    "ast_data": "FunctionDef name:from_pyval arg:cls arg:pyval arg:typespec arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "cannot_be_moved",
    "source_code": "def cannot_be_moved(self, node: fx.Node) -> bool:\n    if node.target == 'output':\n        return not self.allow_outputs\n    if not (isinstance(node.target, torch._ops.OpOverload) and node.target.namespace in ('prims', 'aten')):\n        return True\n    if is_index_put_and_requires_h2d_sync_for_gpu_value(node):\n        return True\n    return False",
    "docstring": "Returns whether a node can be moved to the target device. If this function returns False, it means that this node and all of its users won't be moved into the target device.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\post_grad.py",
    "ast_data": "FunctionDef name:cannot_be_moved arg:self arg:node arguments arg arg If Compare Return return:yes If BoolOp Call Compare Return return:yes If Call Return return:yes Return return:yes"
  },
  {
    "library": "numpy",
    "name": "__randomstate_ctor",
    "source_code": "def __randomstate_ctor(bit_generator_name='MT19937', bit_generator_ctor=__bit_generator_ctor):\n    if isinstance(bit_generator_name, BitGenerator):\n        return RandomState(bit_generator_name)\n    return RandomState(bit_generator_ctor(bit_generator_name))",
    "docstring": "Pickling helper function that returns a legacy RandomState-like object Parameters ---------- bit_generator_name : str String containing the core BitGenerator's name bit_generator_ctor : callable, optional Callable function that takes bit_generator_name as its only argument and returns an instantized bit generator. Returns ------- rs : RandomState Legacy RandomState using the named core BitGenerator",
    "type": "function",
    "file_path": "numpy\\numpy\\random\\_pickle.py",
    "ast_data": "FunctionDef name:__randomstate_ctor arg:bit_generator_name arg:bit_generator_ctor arguments arg arg If Call Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "__str__",
    "source_code": "def __str__(self) -> str:\n    return self.value",
    "docstring": "Return string representation (useful for pytest logs). Returns ------- str The operation's name.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_extra\\_lib\\_at.py",
    "ast_data": "FunctionDef name:__str__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "is_dependent",
    "source_code": "def is_dependent(constraint):\n    return isinstance(constraint, _Dependent)",
    "docstring": "Checks if ``, False otherwise. Examples: >>> import torch >>> from torch.distributions import Bernoulli >>> from torch.distributions.constraints import is_dependent >>> dist = Bernoulli(probs=torch.tensor([0.6], requires_grad=True)) >>> constraint1 = dist.arg_constraints[\"probs\"] >>> constraint2 = dist.arg_constraints[\"logits\"] >>> for constraint in [constraint1, constraint2]: >>> if is_dependent(constraint): >>> continue",
    "type": "function",
    "file_path": "pytorch\\torch\\distributions\\constraints.py",
    "ast_data": "FunctionDef name:is_dependent arg:constraint arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_dense_var_to_tensor",
    "source_code": "def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):\n    if as_ref:\n        raise ValueError('You may be using variable created under distribute strategy in TF 1.x control flows. Try explicitly converting the variable to Tensor using variable.read_value(), or switch to TF 2.x.')\n    return ops.convert_to_tensor(self._get(), dtype=dtype, name=name, as_ref=as_ref)",
    "docstring": "Converts a variable to a tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py",
    "ast_data": "FunctionDef name:_dense_var_to_tensor arg:self arg:dtype arg:name arg:as_ref arguments arg arg arg arg If Raise Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_MatrixSetDiagGrad",
    "source_code": "@ops.RegisterGradient('MatrixSetDiag')\ndef _MatrixSetDiagGrad(op: ops.Operation, grad):\n    input_shape = op.inputs[0].get_shape().merge_with(grad.get_shape())\n    diag_shape = op.inputs[1].get_shape()\n    batch_shape = input_shape[:-2].merge_with(diag_shape[:-1])\n    matrix_shape = input_shape[-2:]\n    if batch_shape.is_fully_defined() and matrix_shape.is_fully_defined():\n        diag_shape = batch_shape.as_list() + [min(matrix_shape.as_list())]\n    else:\n        with ops.colocate_with(grad):\n            grad_shape = array_ops.shape(grad)\n            grad_rank = array_ops.rank(grad)\n            batch_shape = array_ops.slice(grad_shape, [0], [grad_rank - 2])\n            matrix_shape = array_ops.slice(grad_shape, [grad_rank - 2], [2])\n            min_dim = math_ops.reduce_min(matrix_shape)\n            diag_shape = array_ops.concat([batch_shape, [min_dim]], 0)\n    grad_input = array_ops.matrix_set_diag(grad, array_ops.zeros(diag_shape, dtype=grad.dtype))\n    grad_diag = array_ops.matrix_diag_part(grad)\n    return (grad_input, grad_diag)",
    "docstring": "Gradient for MatrixSetDiag.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_grad.py",
    "ast_data": "FunctionDef name:_MatrixSetDiagGrad arg:op arg:grad arguments arg arg Assign Call Call Call Assign Call Assign Call Assign If BoolOp Call Call Assign Call Call Call With Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get",
    "source_code": "@contextlib.contextmanager\ndef get(self):\n    if self._obj is None:\n        raise AlreadyGarbageCollectedError(self.name, self.type_name)\n    yield self._obj",
    "docstring": "Yields the managed C-API Object, guaranteeing aliveness. This is a context manager. Inside the context the C-API object is guaranteed to be alive. Raises: AlreadyGarbageCollectedError: if the object is already deleted.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\c_api_util.py",
    "ast_data": "FunctionDef name:get arg:self arguments arg If Compare Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_create_grad_func",
    "source_code": "def _create_grad_func(func_graph, grads, name):\n    return func_graph_module.func_graph_from_py_func(name, lambda: _grad_fn(func_graph, grads), [], {}, func_graph=_CondGradFuncGraph(name, func_graph))",
    "docstring": "Returns the FuncGraph representation of _grad_fn.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\cond_v2.py",
    "ast_data": "FunctionDef name:_create_grad_func arg:func_graph arg:grads arg:name arguments arg arg arg Return return:yes Call arguments Call Call"
  },
  {
    "library": "pytorch",
    "name": "write",
    "source_code": "@classmethod\ndef write(cls, source_code: str, dst_file_ext: str) -> tuple[str, str]:\n    cuda_command = repr(rocm_compile_command(['dummy_input'], 'dummy_output', dst_file_ext))\n    key, input_path = write(source_code, cls._SOURCE_CODE_SUFFIX, extra=cuda_command)\n    return (key, input_path)",
    "docstring": "Writes source code into a file with dst_file_ext as the file extension. Returns the hash key of source code, and the path to the file.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codecache.py",
    "ast_data": "FunctionDef name:write arg:cls arg:source_code arg:dst_file_ext arguments arg arg arg Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "RestoredResource",
    "source_code": "class RestoredResource(TrackableResource):\n\n    def __init__(self, device=''):\n        super().__init__(device=device)\n\n    @classmethod\n    def _deserialize_from_proto(cls, object_proto, dependencies, **unused_kwargs):\n        obj = cls(device=object_proto.resource.device)\n        resource_creator = dependencies.get('_create_resource')\n        if resource_creator is not None:\n            obj._create_resource = resource_creator\n        return obj\n\n    def _add_trackable_child(self, name, value):\n        setattr(self, name, value)\n        if isinstance(value, base.Trackable) and (not isinstance(value, def_function.Function)):\n            self._track_trackable(value, name)",
    "docstring": "Restored SavedResource.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\resource.py",
    "ast_data": "ClassDef name:RestoredResource FunctionDef name:__init__ arg:self arg:device arguments arg arg Call Call FunctionDef name:_deserialize_from_proto arg:cls arg:object_proto arg:dependencies arguments arg arg arg arg Assign Call Assign Call If Compare Assign Return return:yes FunctionDef name:_add_trackable_child arg:self arg:name arg:value arguments arg arg arg Call If BoolOp Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_items_overlap_with_suffix",
    "source_code": "def _items_overlap_with_suffix(left: Index, right: Index, suffixes: Suffixes) -> tuple[Index, Index]:\n    if not is_list_like(suffixes, allow_sets=False) or isinstance(suffixes, dict):\n        raise TypeError(f\"Passing 'suffixes' as a {type(suffixes)}, is not supported. Provide 'suffixes' as a tuple instead.\")\n    to_rename = left.intersection(right)\n    if len(to_rename) == 0:\n        return (left, right)\n    lsuffix, rsuffix = suffixes\n    if not lsuffix and (not rsuffix):\n        raise ValueError(f'columns overlap but no suffix specified: {to_rename}')\n\n    def renamer(x, suffix: str | None):\n        if x in to_rename and suffix is not None:\n            return f'{x}{suffix}'\n        return x\n    lrenamer = partial(renamer, suffix=lsuffix)\n    rrenamer = partial(renamer, suffix=rsuffix)\n    llabels = left._transform_index(lrenamer)\n    rlabels = right._transform_index(rrenamer)\n    dups = []\n    if not llabels.is_unique:\n        dups = llabels[llabels.duplicated() & ~left.duplicated()].tolist()\n    if not rlabels.is_unique:\n        dups.extend(rlabels[rlabels.duplicated() & ~right.duplicated()].tolist())\n    if dups:\n        raise MergeError(f\"Passing 'suffixes' which cause duplicate columns {set(dups)} is not allowed.\")\n    return (llabels, rlabels)",
    "docstring": "Suffixes type validation. If two indices overlap, add suffixes to overlapping entries. If corresponding suffix is empty, the entry is simply converted to string.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\reshape\\merge.py",
    "ast_data": "FunctionDef name:_items_overlap_with_suffix arg:left arg:right arg:suffixes arguments arg arg arg If BoolOp Call Call Raise Call Call Assign Call If Compare Call Return return:yes Assign If BoolOp Raise Call FunctionDef name:renamer arg:x arg:suffix arguments arg arg If BoolOp Compare Compare Return return:yes Return return:yes Assign Call Assign Call Assign Call Assign Call Assign If Assign Call Call Call If Call Call Call Call If Raise Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_restore_from_tensors",
    "source_code": "def _restore_from_tensors(self, restored_tensors):\n    raise NotImplementedError",
    "docstring": "Restores checkpointed values to this . Please see the documentation for . Args: restored_tensors: A dictionary mapping names to tensors. The keys to this dictionary matches the names passed to _serialize_to_tensors. Returns: An op that runs the restoration.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\base.py",
    "ast_data": "FunctionDef name:_restore_from_tensors arg:self arg:restored_tensors arguments arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "assert_stmt",
    "source_code": "def assert_stmt(expression1, expression2):\n    if not callable(expression2):\n        raise ValueError('{} must be a callable'.format(expression2))\n    args, _, keywords, _ = tf_inspect.getargspec(expression2)\n    if args or keywords:\n        raise ValueError('{} may not have any arguments'.format(expression2))\n    if tensor_util.is_tf_type(expression1):\n        return _tf_assert_stmt(expression1, expression2)\n    else:\n        return _py_assert_stmt(expression1, expression2)",
    "docstring": "Functional form of an assert statement. This follows the semantics of the Python assert statement, however the concrete implementations may deviate from it. See the respective implementation for details. In general, the assert statement should not be used for control flow. Furthermore, it is encouraged that the assertion expressions should not have side effects. Args: expression1: Any expression2: Callable[[], Any], returns the expression to include in the error message when expression1 evaluates to False. When expression1 is True, the result of expression2 will not be evaluated, however, expression2 itself may be evaluated in some implementations. Returns: Any, implementation-dependent. Raises: ValueError: if any arguments are illegal.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\exceptions.py",
    "ast_data": "FunctionDef name:assert_stmt arg:expression1 arg:expression2 arguments arg arg If Call Raise Call Call Assign Call If BoolOp Raise Call Call If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "op",
    "source_code": "@property\ndef op(self) -> ops.Operation:\n    return self._values.op",
    "docstring": "The that produces as an output.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\sparse_tensor.py",
    "ast_data": "FunctionDef name:op arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_replace_tensors_for_gradient",
    "source_code": "def _replace_tensors_for_gradient(x, grad):\n    if not isinstance(x, composite_tensor.CompositeTensor):\n        return grad\n    if not isinstance(x, CompositeTensorGradientProtocol):\n        raise ValueError(f'Type {type(x).__name__} is not supported as a gradient source.')\n    composite_gradient = x.__composite_gradient__\n    x_components = composite_gradient.get_gradient_components(x)\n    if x_components is x:\n        grad_components = grad\n    else:\n        grad_components = nest.map_structure_up_to(x_components, _replace_tensors_for_gradient, x_components, grad)\n    if grad_components is None:\n        return None\n    return composite_gradient.replace_gradient_components(x, grad_components)",
    "docstring": "Replaces the tensors in that should be differentiated with . Args: x: A or . grad: A nested structure of , with the same structure as the value returned by . Returns: A or .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\composite_tensor_gradient.py",
    "ast_data": "FunctionDef name:_replace_tensors_for_gradient arg:x arg:grad arguments arg arg If Call Return return:yes If Call Raise Call Call Assign Assign Call If Compare Assign Assign Call If Compare Return return:no Return return:yes Call"
  },
  {
    "library": "django",
    "name": "do_filter",
    "source_code": "@register.tag('filter')\ndef do_filter(parser, token):\n    _, rest = token.contents.split(None, 1)\n    filter_expr = parser.compile_filter('var|%s' % rest)\n    for func, unused in filter_expr.filters:\n        filter_name = getattr(func, '_filter_name', None)\n        if filter_name in ('escape', 'safe'):\n            raise TemplateSyntaxError('\"filter %s\" is not permitted.  Use the \"autoescape\" tag instead.' % filter_name)\n    nodelist = parser.parse(('endfilter',))\n    parser.delete_first_token()\n    return FilterNode(filter_expr, nodelist)",
    "docstring": "Filter the contents of the block through variable filters. Filters can also be piped through each other, and they can have arguments -- just like in variable syntax. Sample usage:: {% filter force_escape|lower %} This text will be HTML-escaped, and will appear in lowercase. {% endfilter %} Note that the `` tag to manage autoescaping for blocks of template code.",
    "type": "function",
    "file_path": "django\\django\\template\\defaulttags.py",
    "ast_data": "FunctionDef name:do_filter arg:parser arg:token arguments arg arg Assign Call Assign Call For Assign Call If Compare Raise Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_facecolor",
    "source_code": "def set_facecolor(self, color):\n    self._original_facecolor = color\n    self._set_facecolor(color)",
    "docstring": "Set the patch face color. Parameters ---------- color : :mpltype: or None",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:set_facecolor arg:self arg:color arguments arg arg Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "build_ta_with_new_flow",
    "source_code": "def build_ta_with_new_flow(old_ta, flow):\n    impl = old_ta._implementation if isinstance(old_ta, TensorArray) else old_ta\n    if not context.executing_eagerly():\n        if not isinstance(impl, _GraphTensorArrayV2) and control_flow_util.EnableControlFlowV2(ops.get_default_graph()):\n            raise NotImplementedError('Attempting to build a graph-mode TF2-style TensorArray from either an eager-mode TensorArray or a TF1-style TensorArray.  This is not currently supported.  You may be attempting to capture a TensorArray inside a tf.function or tf.data map function. Instead, construct a new TensorArray inside the function.')\n    new_ta = TensorArray(dtype=impl.dtype, handle=impl.handle, flow=flow, infer_shape=impl._infer_shape, colocate_with_first_write_call=impl._colocate_with_first_write_call)\n    new_impl = new_ta._implementation\n    new_impl._dynamic_size = impl._dynamic_size\n    new_impl._size = impl._size\n    new_impl._colocate_with = impl._colocate_with\n    new_impl._element_shape = impl._element_shape\n    return new_ta",
    "docstring": "Builds a TensorArray with a new tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:build_ta_with_new_flow arg:old_ta arg:flow arguments arg arg Assign Call If Call If BoolOp Call Call Call Raise Call Assign Call Assign Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "reduce_scatter_tensor_coalesced",
    "source_code": "def reduce_scatter_tensor_coalesced(inputs: list[torch.Tensor], reduceOp: str, scatter_dim: list[int], group: RANK_TYPES, tag: str='') -> list[torch.Tensor]:\n    group_name = _resolve_group_name(group, tag)\n    group_size = c10d._get_group_size_by_name(group_name)\n    assert len(scatter_dim) == len(inputs)\n    for idx, (dim, tensor) in enumerate(zip(scatter_dim, inputs)):\n        assert tensor.size(dim) % group_size == 0, f'input dimension {dim} ({tensor.size(dim)} must be a multiple of group_size {group_size} for tensor at index {idx}'\n        if dim != 0:\n            tensor_list = torch.chunk(tensor, group_size, dim=dim)\n            inputs[idx] = torch.cat(tensor_list)\n    tensor_list = torch.ops._c10d_functional.reduce_scatter_tensor_coalesced(inputs, reduceOp.lower(), group_size, group_name)\n    return list(map(_maybe_wrap_tensor, tensor_list))",
    "docstring": "Reduces a list of tensors across all machines in such a way that all get the final result, then scatter the results to corresponding ranks. The input tensors are left unmodified. Group can be one of: List[int]: ranks participating in the collective. List[List[int]]: 2D mesh of ranks taking part of this collective in MPMD. ProcessGroup: Will perform a collective using the ranks and tag of the PG. DeviceMesh: Do a SPMD collective over all ranks of the mesh (DeviceMesh, int): Do a MPMD collective over one dimension of the DeviceMesh :: N.B. If you pass a PG or a 1D list to perform a MPMD collective, the compiler won't be able to recover that information and perform collective algebraic optimization. Use other forms of input for that.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_functional_collectives.py",
    "ast_data": "FunctionDef name:reduce_scatter_tensor_coalesced arg:inputs arg:reduceOp arg:scatter_dim arg:group arg:tag arguments arg arg arg arg arg Assign Call Assign Call Compare Call Call For Call Call Compare Call Call If Compare Assign Call Assign Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "sort_index",
    "source_code": "def sort_index(self, *, axis: Axis=0, level: IndexLabel | None=None, ascending: bool | Sequence[bool]=True, inplace: bool=False, kind: SortKind='quicksort', na_position: NaPosition='last', sort_remaining: bool=True, ignore_index: bool=False, key: IndexKeyFunc | None=None) -> DataFrame | None:\n    return super().sort_index(axis=axis, level=level, ascending=ascending, inplace=inplace, kind=kind, na_position=na_position, sort_remaining=sort_remaining, ignore_index=ignore_index, key=key)",
    "docstring": "Sort object by labels (along an axis). Returns a new DataFrame sorted by label if argument is `numpy.sortmergesortstablefirstlastkeysortedkey` this is applied to each level separately. >>> df = pd.DataFrame({\"a\": [1, 2, 3, 4]}, index=[\"A\", \"b\", \"C\", \"d\"]) >>> df.sort_index(key=lambda x: x.str.lower()) a A 1 b 2 C 3 d 4",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:sort_index arg:self arguments arg arg arg arg arg arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "clearup_closed",
    "source_code": "def clearup_closed(self):\n    self.web_sockets = {socket for socket in self.web_sockets if socket.is_open()}\n    if len(self.web_sockets) == 0:\n        CloseEvent('close_event', self.canvas)._process()",
    "docstring": "Clear up any closed Comms.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_nbagg.py",
    "ast_data": "FunctionDef name:clearup_closed arg:self arguments arg Assign Call If Compare Call Call Call"
  },
  {
    "library": "django",
    "name": "related_objects",
    "source_code": "@cached_property\ndef related_objects(self):\n    all_related_fields = self._get_fields(forward=False, reverse=True, include_hidden=True)\n    return make_immutable_fields_list('related_objects', (obj for obj in all_related_fields if not obj.hidden or obj.field.many_to_many))",
    "docstring": "Return all related objects pointing to the current model. The related objects can come from a one-to-one, one-to-many, or many-to-many field relation type. Private API intended only to be used by Django itself; get_fields() combined with filtering of field properties is the public API for obtaining this field list.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\options.py",
    "ast_data": "FunctionDef name:related_objects arg:self arguments arg Assign Call Return return:yes Call BoolOp"
  },
  {
    "library": "pytorch",
    "name": "get_pip_packages",
    "source_code": "def get_pip_packages(run_lambda, patterns=None):\n    if patterns is None:\n        patterns = PIP_PATTERNS + COMMON_PATTERNS + NVIDIA_PATTERNS\n    pip_version = 'pip3' if sys.version_info.major == 3 else 'pip'\n    os.environ['PIP_DISABLE_PIP_VERSION_CHECK'] = '1'\n    out = run_and_read_all(run_lambda, [sys.executable, '-mpip', 'list', '--format=freeze'])\n    if out is None:\n        return (pip_version, out)\n    filtered_out = '\\n'.join((line for line in out.splitlines() if any((name in line for name in patterns))))\n    return (pip_version, filtered_out)",
    "docstring": "Return output. Note: will also find conda-installed pytorch and numpy packages.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\collect_env.py",
    "ast_data": "FunctionDef name:get_pip_packages arg:run_lambda arg:patterns arguments arg arg If Compare Assign Assign Compare Assign Assign Call If Compare Return return:yes Assign Call Call Call Compare Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "add_generic_role",
    "source_code": "def add_generic_role(self, name: str, nodeclass: type[Node], override: bool=False) -> None:\n    logger.debug('[app] adding generic role: %r', (name, nodeclass))\n    if not override and docutils.is_role_registered(name):\n        logger.warning(__('role %r is already registered and will not be overridden'), name, type='app', subtype='add_generic_role')\n    role = roles.GenericRole(name, nodeclass)\n    docutils.register_role(name, role)",
    "docstring": "Register a generic Docutils role. Register a Docutils role that does nothing but wrap its contents in the node given by *nodeclass*. :param override: If false, do not install it if another role is already installed as the same name If true, unconditionally install the role. .. versionadded:: 0.6 .. versionchanged:: 1.8 Add *override* keyword.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\application.py",
    "ast_data": "FunctionDef name:add_generic_role arg:self arg:name arg:nodeclass arg:override arguments arg arg arg arg Call If BoolOp Call Call Call Assign Call Call"
  },
  {
    "library": "scipy",
    "name": "Brown",
    "source_code": "class Brown(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-1.0] * self.N, [4.0] * self.N))\n        self.custom_bounds = ([-1.0, 1.0], [-1.0, 1.0])\n        self.global_optimum = [[0 for _ in range(self.N)]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        x0 = x[:-1]\n        x1 = x[1:]\n        return sum((x0 ** 2.0) ** (x1 ** 2.0 + 1.0) + (x1 ** 2.0) ** (x0 ** 2.0 + 1.0))",
    "docstring": "Brown objective function. The Brown [1]_ global optimization problem is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Brown}}(x) = \\sum_{i=1}^{n-1}\\left[ \\left(x_i^2\\right)^{x_{i + 1}^2 + 1} + \\left(x_{i + 1}^2\\right)^{x_i^2 + 1}\\right] with :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_B.py",
    "ast_data": "ClassDef name:Brown Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "CallbackFilter",
    "source_code": "class CallbackFilter(logging.Filter):\n\n    def __init__(self, callback):\n        self.callback = callback\n\n    def filter(self, record):\n        if self.callback(record):\n            return 1\n        return 0",
    "docstring": "A logging filter that checks the return value of a given callable (which takes the record-to-be-logged as its only parameter) to decide whether to log a record.",
    "type": "class",
    "file_path": "django\\django\\utils\\log.py",
    "ast_data": "ClassDef name:CallbackFilter FunctionDef name:__init__ arg:self arg:callback arguments arg arg Assign FunctionDef name:filter arg:self arg:record arguments arg arg If Call Return return:yes Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "parse_definition",
    "source_code": "def parse_definition(self) -> None:\n    parser = DefinitionFinder(self.code.splitlines(True))\n    parser.parse()\n    self.definitions = parser.definitions",
    "docstring": "Parse the location of definitions from the code.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\pycode\\parser.py",
    "ast_data": "FunctionDef name:parse_definition arg:self arguments arg Assign Call Call Call Assign"
  },
  {
    "library": "numpy",
    "name": "__str__",
    "source_code": "def __str__(self):\n    if self.size > 1:\n        mstr = [f'({','.join([str(i) for i in s])})' for s in zip(*[getattr(self, f) for f in self.dtype.names])]\n        return f'[{', '.join(mstr)}]'\n    else:\n        mstr = [f'{','.join([str(i) for i in s])}' for s in zip([getattr(self, f) for f in self.dtype.names])]\n        return f'({', '.join(mstr)})'",
    "docstring": "Calculates the string representation.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\mrecords.py",
    "ast_data": "FunctionDef name:__str__ arg:self arguments arg If Compare Assign Call Call Call Call Return return:yes Call Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_test_db_clone_settings",
    "source_code": "def get_test_db_clone_settings(self, suffix):\n    orig_settings_dict = self.connection.settings_dict\n    return {**orig_settings_dict, 'NAME': '{}_{}'.format(orig_settings_dict['NAME'], suffix)}",
    "docstring": "Return a modified connection settings dict for the n-th clone of a DB.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\creation.py",
    "ast_data": "FunctionDef name:get_test_db_clone_settings arg:self arg:suffix arguments arg arg Assign Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_pdf_single_value_cf_integrate",
    "source_code": "def _pdf_single_value_cf_integrate(Phi, x, alpha, beta, **kwds):\n    quad_eps = kwds.get('quad_eps', _QUAD_EPS)\n\n    def integrand1(t):\n        if t == 0:\n            return 0\n        return np.exp(-t ** alpha) * np.cos(beta * t ** alpha * Phi(alpha, t))\n\n    def integrand2(t):\n        if t == 0:\n            return 0\n        return np.exp(-t ** alpha) * np.sin(beta * t ** alpha * Phi(alpha, t))\n    with np.errstate(invalid='ignore'):\n        int1, *ret1 = integrate.quad(integrand1, 0, np.inf, weight='cos', wvar=x, limit=1000, epsabs=quad_eps, epsrel=quad_eps, full_output=1)\n        int2, *ret2 = integrate.quad(integrand2, 0, np.inf, weight='sin', wvar=x, limit=1000, epsabs=quad_eps, epsrel=quad_eps, full_output=1)\n    return (int1 + int2) / np.pi",
    "docstring": "To improve DNI accuracy convert characteristic function in to real valued integral using Euler's formula, then exploit cosine symmetry to change limits to [0, inf). Finally use cosine addition formula to split into two parts that can be handled by weighted quad pack.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_levy_stable\\__init__.py",
    "ast_data": "FunctionDef name:_pdf_single_value_cf_integrate arg:Phi arg:x arg:alpha arg:beta arguments arg arg arg arg arg Assign Call FunctionDef name:integrand1 arg:t arguments arg If Compare Return return:yes Return return:yes Call Call Call FunctionDef name:integrand2 arg:t arguments arg If Compare Return return:yes Return return:yes Call Call Call With Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "patch",
    "source_code": "def patch(self, frame_dict: dict[str, Any], name: str, new_fn: Callable, deduplicate: bool=True):\n    new_fn.__fx_already_patched = deduplicate\n    if name not in frame_dict and hasattr(builtins, name):\n        self.patches_made.append(_PatchedFnDel(frame_dict, name, None, new_fn))\n        self.patches_made[-1].patch()\n    elif getattr(frame_dict[name], '__fx_already_patched', False):\n        return\n    else:\n        self.patches_made.append(_PatchedFnSetItem(frame_dict, name, frame_dict[name], new_fn))\n        self.patches_made[-1].patch()",
    "docstring": "Replace frame_dict[name] with new_fn until we exit the context manager.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\_symbolic_trace.py",
    "ast_data": "FunctionDef name:patch arg:self arg:frame_dict arg:name arg:new_fn arg:deduplicate arguments arg arg arg arg arg Assign If BoolOp Compare Call Call Call Call If Call Return return:no Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_process_expr",
    "source_code": "def _process_expr(self, node: ast.Expr) -> None:\n    if isinstance(node.value, ast.Call):\n        self._process_call(node.value)\n    elif isinstance(node.value, ast.Constant):\n        self._process_constant(node.value)\n    else:\n        self.visit(node)",
    "docstring": "Process top-level expression for potential symbol export.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\api\\generator2\\extractor\\extractor.py",
    "ast_data": "FunctionDef name:_process_expr arg:self arg:node arguments arg arg If Call Call If Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "is_registered",
    "source_code": "@property\ndef is_registered(self):\n    return self._registered",
    "docstring": "Returns True if the execution trace observer is registered, otherwise False.",
    "type": "method",
    "file_path": "pytorch\\torch\\profiler\\profiler.py",
    "ast_data": "FunctionDef name:is_registered arg:self arguments arg Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "exchange",
    "source_code": "@abc.abstractmethod\ndef exchange(self, peer_public_key: X448PublicKey) -> bytes:\n    pass",
    "docstring": "Performs a key exchange operation using the provided peer's public key.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\x448.py",
    "ast_data": "FunctionDef name:exchange arg:self arg:peer_public_key arguments arg arg"
  },
  {
    "library": "authlib",
    "name": "register_token_validator",
    "source_code": "def register_token_validator(self, validator: TokenValidator):\n    if not self._default_auth_type:\n        self._default_realm = validator.realm\n        self._default_auth_type = validator.TOKEN_TYPE\n    if validator.TOKEN_TYPE not in self._token_validators:\n        self._token_validators[validator.TOKEN_TYPE] = validator",
    "docstring": "Register a token validator for a given Authorization type. Authlib has a built-in BearerTokenValidator per rfc6750.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\resource_protector.py",
    "ast_data": "FunctionDef name:register_token_validator arg:self arg:validator arguments arg arg If Assign Assign If Compare Assign"
  },
  {
    "library": "pytorch",
    "name": "_graph_partition_pivot",
    "source_code": "def _graph_partition_pivot(self) -> int:\n    included_node_indices = [i for i, n in enumerate(self.graph.nodes()) if n.kind() not in self._EXCLUDED_NODE_KINDS]\n    half_idx = len(included_node_indices) // 2 - 1\n    if half_idx >= 0 and len(included_node_indices) > half_idx:\n        return included_node_indices[half_idx] + 1\n    return -1",
    "docstring": "Find the pivot index to partition the graph. The pivot is the node that splits the graph into two parts. Each part should have the similar amount of nodes, excluding non essential ops, defined in , such as . If the graph has an odd number of nodes, the upper part will have one more node. If the graph does not have any node that can be partitioned, return -1. Returns: The index of the pivot node.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\verification.py",
    "ast_data": "FunctionDef name:_graph_partition_pivot arg:self arguments arg Assign Call Call Compare Call Assign Call If BoolOp Compare Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_height_char",
    "source_code": "def get_height_char(self, c, isord=False):\n    if not isord:\n        c = ord(c)\n    return self._metrics[c].bbox[-1]",
    "docstring": "Get the bounding box (ink) height of character *c* (space is 0).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_afm.py",
    "ast_data": "FunctionDef name:get_height_char arg:self arg:c arg:isord arguments arg arg arg If Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "vgg16",
    "source_code": "def vgg16(*, weights: Optional[Any]=None, **kwargs: Any) -> VGG:\n    return _vgg('D', False, weights, **kwargs)",
    "docstring": "VGG-16 from __. Args: weights (:class:, optional): The pretrained weights to use. See :class: below for more details, and possible values. By default, no pre-trained weights are used. progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True. **kwargs: parameters passed to the `source code `_ for more details about this class. .. autoclass:: torchvision.models.VGG16_Weights :members:",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\dedode\\vgg.py",
    "ast_data": "FunctionDef name:vgg16 arguments arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "DefaultStorageFinder",
    "source_code": "class DefaultStorageFinder(BaseStorageFinder):\n    storage = default_storage\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        base_location = getattr(self.storage, 'base_location', empty)\n        if not base_location:\n            raise ImproperlyConfigured(\"The storage backend of the staticfiles finder %r doesn't have a valid location.\" % self.__class__)",
    "docstring": "A static files finder that uses the default storage backend.",
    "type": "class",
    "file_path": "django\\django\\contrib\\staticfiles\\finders.py",
    "ast_data": "ClassDef name:DefaultStorageFinder Assign FunctionDef name:__init__ arg:self arguments arg arg arg Call Call Assign Call If Raise Call"
  },
  {
    "library": "scipy",
    "name": "_root_scalar_toms748_doc",
    "source_code": "def _root_scalar_toms748_doc():\n    pass",
    "docstring": "Options ------- args : tuple, optional Extra arguments passed to the objective function. bracket: A sequence of 2 floats, optional An interval bracketing a root. `` must have different signs at the two endpoints. xtol : float, optional Tolerance (absolute) for termination. rtol : float, optional Tolerance (relative) for termination. maxiter : int, optional Maximum number of iterations. options: dict, optional Specifies any method-specific options not covered above.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_root_scalar.py",
    "ast_data": "FunctionDef name:_root_scalar_toms748_doc arguments"
  },
  {
    "library": "tensorflow",
    "name": "resize_image_with_pad_v2",
    "source_code": "@tf_export('image.resize_with_pad', v1=[])\n@dispatch.add_dispatch_support\ndef resize_image_with_pad_v2(image, target_height, target_width, method=ResizeMethod.BILINEAR, antialias=False):\n\n    def _resize_fn(im, new_size):\n        return resize_images_v2(im, new_size, method, antialias=antialias)\n    return _resize_image_with_pad_common(image, target_height, target_width, _resize_fn)",
    "docstring": "Resizes and pads an image to a target width and height. Resizes an image to a target width and height by keeping the aspect ratio the same without distortion. If the target dimensions don't match the image dimensions, the image is resized and then padded with zeroes to match requested dimensions. Args: image: 4-D Tensor of shape or 3-D Tensor of shape . target_height: Target height. target_width: Target width. method: Method to use for resizing image. See antialias: Whether to use anti-aliasing when resizing. See 'image.resize()'. Raises: ValueError: if or are zero or negative. Returns: Resized and padded image. If was 4-D, a 4-D float Tensor of shape . If was 3-D, a 3-D float Tensor of shape .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:resize_image_with_pad_v2 arg:image arg:target_height arg:target_width arg:method arg:antialias arguments arg arg arg arg arg FunctionDef name:_resize_fn arg:im arg:new_size arguments arg arg Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "get_level_values",
    "source_code": "def get_level_values(self, level) -> Index:\n    level = self._get_level_number(level)\n    values = self._get_level_values(level)\n    return values",
    "docstring": "Return vector of label values for requested level. Length of returned vector is equal to the length of the index. The method is a crucial utility for extracting specific level values from a . This function is particularly useful when working with multi-level data, allowing you to isolate and manipulate individual levels without having to deal with the complexity of the entire structure. It seamlessly handles both integer and string-based level access, providing flexibility in how you can interact with the data. Additionally, this method ensures that the returned maintains the integrity of the original data, even when missing values are present, by appropriately casting the result to a suitable data type. Parameters ---------- level : int or str `Index`. >>> pd.MultiIndex.from_arrays([[1, None, 2], [3, 4, 5]]).dtypes level_0 int64 level_1 int64 dtype: object >>> pd.MultiIndex.from_arrays([[1, None, 2], [3, 4, 5]]).get_level_values(0) Index([1.0, nan, 2.0], dtype='float64')",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "FunctionDef name:get_level_values arg:self arg:level arguments arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_postprocess_statement",
    "source_code": "def _postprocess_statement(self, node):\n    pop_uses = self.state[_Statement].pop_uses\n    if pop_uses:\n        replacements = []\n        for original_call_node, pop_var_name in pop_uses:\n            replacements.extend(self._generate_pop_operation(original_call_node, pop_var_name))\n        replacements.append(node)\n        node = replacements\n    self.state[_Statement].exit()\n    return (node, None)",
    "docstring": "Inserts any separate pop() calls that node may use.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\converters\\lists.py",
    "ast_data": "FunctionDef name:_postprocess_statement arg:self arg:node arguments arg arg Assign If Assign For Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_get_trimming_maximums",
    "source_code": "def _get_trimming_maximums(rn, cn, max_elements, max_rows=None, max_cols=None, scaling_factor: float=0.8) -> tuple[int, int]:\n\n    def scale_down(rn, cn):\n        if cn >= rn:\n            return (rn, int(cn * scaling_factor))\n        else:\n            return (int(rn * scaling_factor), cn)\n    if max_rows:\n        rn = max_rows if rn > max_rows else rn\n    if max_cols:\n        cn = max_cols if cn > max_cols else cn\n    while rn * cn > max_elements:\n        rn, cn = scale_down(rn, cn)\n    return (rn, cn)",
    "docstring": "Recursively reduce the number of rows and columns to satisfy max elements. Parameters ---------- rn, cn : int The number of input rows / columns max_elements : int The number of allowable elements max_rows, max_cols : int, optional Directly specify an initial maximum rows or columns before compression. scaling_factor : float Factor at which to reduce the number of rows / columns to fit. Returns ------- rn, cn : tuple New rn and cn values that satisfy the max_elements constraint",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\formats\\style_render.py",
    "ast_data": "FunctionDef name:_get_trimming_maximums arg:rn arg:cn arg:max_elements arg:max_rows arg:max_cols arg:scaling_factor arguments arg arg arg arg arg arg FunctionDef name:scale_down arg:rn arg:cn arguments arg arg If Compare Return return:yes Call Return return:yes Call If Assign Compare If Assign Compare While Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "fixed_size_partitioner",
    "source_code": "@tf_export(v1=['fixed_size_partitioner'])\ndef fixed_size_partitioner(num_shards, axis=0):\n\n    def _partitioner(shape, **unused_args):\n        partitions_list = [1] * len(shape)\n        partitions_list[axis] = min(num_shards, shape.dims[axis].value)\n        return partitions_list\n    return _partitioner",
    "docstring": "Partitioner to specify a fixed number of shards along given axis. @compatibility(TF2) This API is deprecated in TF2. In TF2, partitioner is no longer part of the variable declaration via . [ParameterServer Training] ( handles partitioning of variables. The corresponding TF2 partitioner class of is . Check the [migration guide] ( on the differences in treatment of variables and losses between TF1 and TF2. Before: After: @end_compatibility Args: num_shards: , number of shards to partition variable. axis: , axis to partition on. Returns: A partition function usable as the argument to and .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\partitioned_variables.py",
    "ast_data": "FunctionDef name:fixed_size_partitioner arg:num_shards arg:axis arguments arg arg FunctionDef name:_partitioner arg:shape arguments arg arg Assign Call Assign Call Return return:yes Return return:yes Call"
  },
  {
    "library": "django",
    "name": "check_const_string",
    "source_code": "def check_const_string(result, func, cargs, offset=None, cpl=False):\n    if offset:\n        check_err(result, cpl=cpl)\n        ptr = ptr_byref(cargs, offset)\n        return ptr.value\n    else:\n        return result",
    "docstring": "Similar functionality to , but does not free the pointer.",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\gdal\\prototypes\\errcheck.py",
    "ast_data": "FunctionDef name:check_const_string arg:result arg:func arg:cargs arg:offset arg:cpl arguments arg arg arg arg arg If Call Assign Call Return return:yes Return return:yes"
  },
  {
    "library": "numpy",
    "name": "all",
    "source_code": "@array_function_dispatch(_all_dispatcher)\ndef all(a, axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue):\n    return _wrapreduction_any_all(a, np.logical_and, 'all', axis, out, keepdims=keepdims, where=where)",
    "docstring": "Test whether all array elements along a given axis evaluate to True. Parameters ---------- a : array_like Input array or object that can be converted to an array. axis : None or int or tuple of ints, optional Axis or axes along which a logical AND reduction is performed. The default (`axisufuncs-output-typekeepdimsallndarraykeepdimsTrue~numpy.ufunc.reduceoutoutTrue`. Examples -------- >>> import numpy as np >>> np.all([[True,False],[True,True]]) False >>> np.all([[True,False],[True,True]], axis=0) array([ True, False]) >>> np.all([-1, 4, 5]) True >>> np.all([1.0, np.nan]) True >>> np.all([[True, True], [False, True]], where=[[True], [False]]) True >>> o=np.array(False) >>> z=np.all([-1, 4, 5], out=o) >>> id(z), id(o), z (28293632, 28293632, array(True)) # may vary",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\fromnumeric.py",
    "ast_data": "FunctionDef name:all arg:a arg:axis arg:out arg:keepdims arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_find_versioned_file",
    "source_code": "def _find_versioned_file(base_paths, relative_paths, filepatterns, required_version, get_version):\n    if type(filepatterns) not in [list, tuple]:\n        filepatterns = [filepatterns]\n    for path in _cartesian_product(base_paths, relative_paths):\n        for filepattern in filepatterns:\n            for file in glob.glob(os.path.join(path, filepattern)):\n                actual_version = get_version(file)\n                if _matches_version(actual_version, required_version):\n                    return (file, actual_version)\n    raise _not_found_error(base_paths, relative_paths, ', '.join(filepatterns) + \" matching version '%s'\" % required_version)",
    "docstring": "Returns first valid path to a file that matches the requested version.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\third_party\\gpus\\find_cuda_config.py",
    "ast_data": "FunctionDef name:_find_versioned_file arg:base_paths arg:relative_paths arg:filepatterns arg:required_version arg:get_version arguments arg arg arg arg arg If Compare Call Assign For Call For For Call Call Assign Call If Call Return return:yes Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "build",
    "source_code": "def build(self) -> None:\n    if self._use_relative_path:\n        return self.build_fbcode_re()\n    _create_if_dir_not_exist(self._output_dir)\n    _build_tmp_dir = os.path.join(self._output_dir, f'{self._name}_{_BUILD_TEMP_DIR}')\n    _create_if_dir_not_exist(_build_tmp_dir)\n    build_cmd = self.get_command_line()\n    run_compile_cmd(build_cmd, cwd=_build_tmp_dir)\n    _remove_dir(_build_tmp_dir)",
    "docstring": "It is must need a temperary directory to store object files in Windows. After build completed, delete the temperary directory to save disk space.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\cpp_builder.py",
    "ast_data": "FunctionDef name:build arg:self arguments arg If Return return:yes Call Call Assign Call Call Assign Call Call Call"
  },
  {
    "library": "django",
    "name": "password_change_done",
    "source_code": "def password_change_done(self, request, extra_context=None):\n    from django.contrib.auth.views import PasswordChangeDoneView\n    defaults = {'extra_context': {**self.each_context(request), **(extra_context or {})}}\n    if self.password_change_done_template is not None:\n        defaults['template_name'] = self.password_change_done_template\n    request.current_app = self.name\n    return PasswordChangeDoneView.as_view(**defaults)(request)",
    "docstring": "Display the \"success\" page after a password change.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\sites.py",
    "ast_data": "FunctionDef name:password_change_done arg:self arg:request arg:extra_context arguments arg arg arg Assign Call BoolOp If Compare Assign Assign Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_foreach_kernels_supported_devices",
    "source_code": "def _get_foreach_kernels_supported_devices() -> list[str]:\n    return ['cuda', 'xpu', torch._C._get_privateuse1_backend_name()]",
    "docstring": "Return the device type list that supports foreach kernels.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_foreach_utils.py",
    "ast_data": "FunctionDef name:_get_foreach_kernels_supported_devices arguments Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "series_small_a",
    "source_code": "def series_small_a():\n    order = 5\n    a, b, x, k = symbols('a b x k')\n    A = []\n    X = []\n    B = []\n    expression = Sum(x ** k / factorial(k) / gamma(a * k + b), (k, 0, S.Infinity))\n    expression = gamma(b) / sympy.exp(x) * expression\n    for n in range(0, order + 1):\n        term = expression.diff(a, n).subs(a, 0).simplify().doit()\n        x_part = term.subs(polygamma(0, b), 1).replace(polygamma, lambda *args: 0)\n        x_part *= (-1) ** n\n        A.append(a ** n / factorial(n))\n        X.append(horner(x_part))\n        B.append(horner((term / x_part).simplify()))\n    s = 'Tylor series expansion of Phi(a, b, x) in a=0 up to order 5.\\n'\n    s += 'Phi(a, b, x) = exp(x)/gamma(b) * sum(A[i] * X[i] * B[i], i=0..5)\\n'\n    for name, c in zip(['A', 'X', 'B'], [A, X, B]):\n        for i in range(len(c)):\n            s += f'\\n{name}[{i}] = ' + str(c[i])\n    return s",
    "docstring": "Tylor series expansion of Phi(a, b, x) in a=0 up to order 5.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_precompute\\wright_bessel.py",
    "ast_data": "FunctionDef name:series_small_a arguments Assign Assign Call Assign Assign Assign Assign Call Call Call Assign Call Call For Call Assign Call Call Call Call Assign Call Call Call arguments arg Call Call Call Call Call Call Call Assign For Call For Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "find_first_unfusible",
    "source_code": "def find_first_unfusible(start_nodes: list[fx.Node], max_range: int) -> int:\n    sorted_nodes: list[tuple[int, fx.Node, bool]] = []\n    for n in start_nodes:\n        heapq.heappush(sorted_nodes, (node_info.get_fw_order(n), n, True))\n    while len(sorted_nodes) > 0:\n        _, node, node_is_fusible = heapq.heappop(sorted_nodes)\n        if not node_is_fusible:\n            return node_info.get_fw_order(node)\n        for user in node.users:\n            if node_info.is_required_fw(user):\n                if node_info.get_fw_order(user) > max_range:\n                    continue\n                val: tuple[int, fx.Node, bool] = (node_info.get_fw_order(user), user, is_fusible(node, user))\n                if val not in sorted_nodes:\n                    heapq.heappush(sorted_nodes, val)\n    return max_range",
    "docstring": "Finds the first unfusible node in the chain of nodes starting from and returns its position.",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\partitioners.py",
    "ast_data": "FunctionDef name:find_first_unfusible arg:start_nodes arg:max_range arguments arg arg For Call Call While Compare Call Assign Call If Return return:yes Call For If Call If Compare Call Call Call If Compare Call Return return:yes"
  },
  {
    "library": "django",
    "name": "flatten",
    "source_code": "def flatten(fields):\n    flat = []\n    for field in fields:\n        if isinstance(field, (list, tuple)):\n            flat.extend(field)\n        else:\n            flat.append(field)\n    return flat",
    "docstring": "Return a list which is a single level of flattening of the original list.",
    "type": "function",
    "file_path": "django\\django\\contrib\\admin\\utils.py",
    "ast_data": "FunctionDef name:flatten arg:fields arguments arg Assign For If Call Call Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "create_bearer_token_validator",
    "source_code": "def create_bearer_token_validator(session, token_model):\n    from authlib.oauth2.rfc6750 import BearerTokenValidator\n\n    class _BearerTokenValidator(BearerTokenValidator):\n\n        def authenticate_token(self, token_string):\n            q = session.query(token_model)\n            return q.filter_by(access_token=token_string).first()\n    return _BearerTokenValidator",
    "docstring": "Create an bearer token validator class with SQLAlchemy session and token model. :param session: SQLAlchemy session :param token_model: Token model class",
    "type": "function",
    "file_path": "authlib\\authlib\\integrations\\sqla_oauth2\\functions.py",
    "ast_data": "FunctionDef name:create_bearer_token_validator arg:session arg:token_model arguments arg arg ClassDef name:_BearerTokenValidator FunctionDef name:authenticate_token arg:self arg:token_string arguments arg arg Assign Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_collapse",
    "source_code": "def _collapse(self, axis):\n    if axis is None:\n        return self[0, 0]\n    else:\n        return self",
    "docstring": "A convenience function for operations that want to collapse to a scalar like _align, but are using keepdims=True",
    "type": "method",
    "file_path": "numpy\\numpy\\matrixlib\\defmatrix.py",
    "ast_data": "FunctionDef name:_collapse arg:self arg:axis arguments arg arg If Compare Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "process_response",
    "source_code": "def process_response(self, request, response):\n    if response.status_code == 404 and self.should_redirect_with_slash(request):\n        return self.response_redirect_class(self.get_full_path_with_slash(request))\n    if not response.streaming and (not response.has_header('Content-Length')):\n        response.headers['Content-Length'] = str(len(response.content))\n    return response",
    "docstring": "When the status code of the response is 404, it may redirect to a path with an appended slash if should_redirect_with_slash() returns True.",
    "type": "method",
    "file_path": "django\\django\\middleware\\common.py",
    "ast_data": "FunctionDef name:process_response arg:self arg:request arg:response arguments arg arg arg If BoolOp Compare Call Return return:yes Call Call If BoolOp Call Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_update_val_from_pos",
    "source_code": "def _update_val_from_pos(self, pos):\n    idx = np.argmin(np.abs(self.val - pos))\n    if idx == 0:\n        val = self._min_in_bounds(pos)\n        self.set_min(val)\n    else:\n        val = self._max_in_bounds(pos)\n        self.set_max(val)\n    if self._active_handle:\n        if self.orientation == 'vertical':\n            self._active_handle.set_ydata([val])\n        else:\n            self._active_handle.set_xdata([val])",
    "docstring": "Update the slider value based on a given position.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:_update_val_from_pos arg:self arg:pos arguments arg arg Assign Call Call If Compare Assign Call Call Assign Call Call If If Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "method_def",
    "source_code": "def method_def(name: BaseOperatorName, module: str | None, overloads: Sequence[PythonSignatureNativeFunctionPair], *, method: bool) -> str:\n    pycname = get_pycname(name)\n    if name.dunder_method:\n        pycname = f'TypeError_to_NotImplemented_<{pycname}>'\n    if is_noarg(overloads):\n        flags = 'METH_NOARGS' if method else 'METH_VARARGS | METH_KEYWORDS'\n    else:\n        pycname = f'castPyCFunctionWithKeywords({pycname})'\n        flags = 'METH_VARARGS | METH_KEYWORDS'\n    if module == 'torch':\n        flags += ' | METH_STATIC'\n    return f'{{\"{name}\", {pycname}, {flags}, nullptr}},'",
    "docstring": "Generate method def entry.",
    "type": "function",
    "file_path": "pytorch\\tools\\autograd\\gen_python_functions.py",
    "ast_data": "FunctionDef name:method_def arg:name arg:module arg:overloads arguments arg arg arg arg Assign Call If Assign If Call Assign Assign Assign If Compare Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "start_with_callback",
    "source_code": "def start_with_callback(self, func, args=None, kwargs=None):\n    if args is None:\n        args = ()\n    if kwargs is None:\n        kwargs = {}\n    args = (func,) + args\n\n    def _callback(func, *a, **kw):\n        self.wait(states.STARTED)\n        func(*a, **kw)\n    t = threading.Thread(target=_callback, args=args, kwargs=kwargs)\n    t.name = 'Bus Callback ' + t.name\n    t.start()\n    self.start()\n    return t",
    "docstring": "Start 'func' in a new thread T, then start self (and return T).",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\wspbus.py",
    "ast_data": "FunctionDef name:start_with_callback arg:self arg:func arg:args arg:kwargs arguments arg arg arg arg If Compare Assign If Compare Assign Assign FunctionDef name:_callback arg:func arguments arg arg arg Call Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_backend_timeout",
    "source_code": "def get_backend_timeout(self, timeout=DEFAULT_TIMEOUT):\n    if timeout == DEFAULT_TIMEOUT:\n        timeout = self.default_timeout\n    if timeout is None:\n        return 0\n    elif int(timeout) == 0:\n        timeout = -1\n    if timeout > 2592000:\n        timeout += int(time.time())\n    return int(timeout)",
    "docstring": "Memcached deals with long (> 30 days) timeouts in a special way. Call this function to obtain a safe value for your timeout.",
    "type": "method",
    "file_path": "django\\django\\core\\cache\\backends\\memcached.py",
    "ast_data": "FunctionDef name:get_backend_timeout arg:self arg:timeout arguments arg arg If Compare Assign If Compare Return return:yes If Compare Call Assign If Compare Call Call Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "validate_token_request",
    "source_code": "def validate_token_request(self):\n    device_code = self.request.payload.data.get('device_code')\n    if not device_code:\n        raise InvalidRequestError(\"Missing 'device_code' in payload\")\n    client = self.authenticate_token_endpoint_client()\n    if not client.check_grant_type(self.GRANT_TYPE):\n        raise UnauthorizedClientError(f\"The client is not authorized to use 'response_type={self.GRANT_TYPE}'\")\n    credential = self.query_device_credential(device_code)\n    if not credential:\n        raise InvalidRequestError(\"Invalid 'device_code' in payload\")\n    if credential.get_client_id() != client.get_client_id():\n        raise UnauthorizedClientError()\n    user = self.validate_device_credential(credential)\n    self.request.user = user\n    self.request.client = client\n    self.request.credential = credential",
    "docstring": "After displaying instructions to the user, the client creates an access token request and sends it to the token endpoint with the following parameters: grant_type REQUIRED. Value MUST be set to \"urn:ietf:params:oauth:grant-type:device_code\". device_code REQUIRED. The device verification code, \"device_code\" from the device authorization response. client_id REQUIRED if the client is not authenticating with the authorization server as described in Section 3.2.1. of [RFC6749]. The client identifier as described in Section 2.2 of [RFC6749]. For example, the client makes the following HTTPS request:: POST /token HTTP/1.1 Host: server.example.com Content-Type: application/x-www-form-urlencoded grant_type=urn%3Aietf%3Aparams%3Aoauth%3Agrant-type%3Adevice_code &device_code=GmRhmhcxhwAzkoEqiMEg_DnyEysNkuNhszIySk9eS &client_id=1406020730",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc8628\\device_code.py",
    "ast_data": "FunctionDef name:validate_token_request arg:self arguments arg Assign Call If Raise Call Assign Call If Call Raise Call Assign Call If Raise Call If Compare Call Call Raise Call Assign Call Assign Assign Assign"
  },
  {
    "library": "scipy",
    "name": "_matvec",
    "source_code": "def _matvec(self, x):\n    return self.matmat(x.reshape(-1, 1))",
    "docstring": "Default matrix-vector multiplication handler. If self is a linear operator of shape (M, N), then this method will be called on a shape (N,) or (N, 1) ndarray, and should return a shape (M,) or (M, 1) ndarray. This default implementation falls back on _matmat, so defining that will define matrix-vector multiplication as well.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_interface.py",
    "ast_data": "FunctionDef name:_matvec arg:self arg:x arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "after_create_session",
    "source_code": "def after_create_session(self, session, coord):\n    local_init_success, msg = session_manager._ready(self._ready_for_local_init_op, session, 'Model is not ready for SyncReplicasOptimizer local init.')\n    if not local_init_success:\n        raise RuntimeError('Init operations did not make model ready for SyncReplicasOptimizer local_init. Init op: %s, error: %s' % (self._local_init_op.name, msg))\n    session.run(self._local_init_op)\n    if self._init_tokens_op is not None:\n        session.run(self._init_tokens_op)\n    if self._q_runner is not None:\n        self._q_runner.create_threads(session, coord=coord, daemon=True, start=True)",
    "docstring": "Runs SyncReplicasOptimizer initialization ops.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\sync_replicas_optimizer.py",
    "ast_data": "FunctionDef name:after_create_session arg:self arg:session arg:coord arguments arg arg arg Assign Call If Raise Call Call If Compare Call If Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "convert_variables_to_tensors",
    "source_code": "def convert_variables_to_tensors(values):\n\n    def _convert_resource_variable_to_tensor(x):\n        if _pywrap_utils.IsResourceVariable(x):\n            return ops.convert_to_tensor(x)\n        elif isinstance(x, composite_tensor.CompositeTensor):\n            return composite_tensor.convert_variables_to_tensors(x)\n        else:\n            return x\n    return nest.map_structure(_convert_resource_variable_to_tensor, values)",
    "docstring": "Converts s in to s. If an object is a and overrides its method, its components will also be converted to s. Objects other than s in will be returned unchanged. Args: values: A nested structure of s, or any other objects. Returns: A new structure with s in converted to s.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\variable_utils.py",
    "ast_data": "FunctionDef name:convert_variables_to_tensors arg:values arguments arg FunctionDef name:_convert_resource_variable_to_tensor arg:x arguments arg If Call Return return:yes Call If Call Return return:yes Call Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "op",
    "source_code": "@property\ndef op(self):\n    raise NotImplementedError",
    "docstring": "The of this variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:op arg:self arguments arg Raise"
  },
  {
    "library": "pandas",
    "name": "__repr__",
    "source_code": "def __repr__(self) -> str:\n    self.infer_axes()\n    s = self.shape\n    if s is not None:\n        if isinstance(s, (list, tuple)):\n            jshape = ','.join([pprint_thing(x) for x in s])\n            s = f'[{jshape}]'\n        return f'{self.pandas_type:12.12} (shape->{s})'\n    return self.pandas_type",
    "docstring": "return a pretty representation of myself",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Call Assign If Compare If Call Assign Call Call Assign Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_check_signature",
    "source_code": "def _check_signature(api_signature, func):\n    func_argspec = tf_inspect.getargspec(func)\n    if func_argspec.varargs is not None and func_argspec.keywords is not None and (not func_argspec.args):\n        return\n    func_signature = tf_inspect.signature(func)\n    ok = len(api_signature.parameters) == len(func_signature.parameters)\n    if ok:\n        for param_1, param_2 in zip(api_signature.parameters.values(), func_signature.parameters.values()):\n            if param_1.name != param_2.name or param_1.kind != param_2.kind:\n                ok = False\n    if not ok:\n        raise ValueError(f\"Dispatch function's signature {func_signature} does not match API's signature {api_signature}.\")",
    "docstring": "Checks that a dispatch target's signature is compatible with an API. Args: api_signature: The signature of the TensorFlow API. func: The dispatch target. Raises: ValueError: if the signatures are incompatible. Two signatures are considered compatible if they have the same number of parameters, and all corresponding parameters have the same and . (Parameters are not required to have the same default value or the same annotation.)",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\dispatch.py",
    "ast_data": "FunctionDef name:_check_signature arg:api_signature arg:func arguments arg arg Assign Call If BoolOp Compare Compare Return return:no Assign Call Assign Compare Call Call If For Call Call Call If BoolOp Compare Compare Assign If Raise Call"
  },
  {
    "library": "pytorch",
    "name": "save_config",
    "source_code": "def save_config(self) -> bytes:\n    ignored_keys = getattr(self, '_save_config_ignore', [])\n    return pickle.dumps(self._get_dict(ignored_keys=ignored_keys), protocol=2)",
    "docstring": "Convert config to a pickled blob",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\_config_module.py",
    "ast_data": "FunctionDef name:save_config arg:self arguments arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "mmread",
    "source_code": "def mmread(source, *, spmatrix=True):\n    return MMFile().read(source, spmatrix=spmatrix)",
    "docstring": "Reads the contents of a Matrix Market file-like 'source' into a matrix. Parameters ---------- source : str or file-like Matrix Market filename (extensions .mtx, .mtz.gz) or open file-like object. spmatrix : bool, optional (default: True) If `` returns the data as sparse matrix in COO format. >>> m = mmread(StringIO(text), spmatrix=False) >>> m >>> m.toarray() array([[0., 0., 0., 0., 0.], [0., 0., 1., 0., 0.], [0., 0., 0., 2., 3.], [4., 5., 6., 7., 0.], [0., 0., 0., 0., 0.]])",
    "type": "function",
    "file_path": "scipy\\scipy\\io\\_mmio.py",
    "ast_data": "FunctionDef name:mmread arg:source arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "seaborn",
    "name": "_define_support_grid",
    "source_code": "def _define_support_grid(self, x, bw, cut, clip, gridsize):\n    clip_lo = -np.inf if clip[0] is None else clip[0]\n    clip_hi = +np.inf if clip[1] is None else clip[1]\n    gridmin = max(x.min() - bw * cut, clip_lo)\n    gridmax = min(x.max() + bw * cut, clip_hi)\n    return np.linspace(gridmin, gridmax, gridsize)",
    "docstring": "Create the grid of evaluation points depending for vector x.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_statistics.py",
    "ast_data": "FunctionDef name:_define_support_grid arg:self arg:x arg:bw arg:cut arg:clip arg:gridsize arguments arg arg arg arg arg arg Assign Compare Assign Compare Assign Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "Hints",
    "source_code": "@tf_export('distribute.experimental.CollectiveHints')\nclass Hints(object):\n\n    @deprecation.deprecated(None, 'use distribute.experimental.CommunicationOptions instead')\n    def __new__(cls, bytes_per_pack=0, timeout_seconds=None):\n        return Options(bytes_per_pack=bytes_per_pack, timeout_seconds=timeout_seconds)\n\n    def __init__(self, bytes_per_pack=0, timeout_seconds=None):\n        pass",
    "docstring": "Hints for collective operations like AllReduce. This can be passed to methods like to optimize collective operation performance. Note that these are only hints, which may or may not change the actual behavior. Some options only apply to certain strategy and are ignored by others. One common optimization is to break gradients all-reduce into multiple packs so that weight updates can overlap with gradient all-reduce. Examples: - bytes_per_pack - timeout_seconds",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\collective_util.py",
    "ast_data": "ClassDef name:Hints FunctionDef name:__new__ arg:cls arg:bytes_per_pack arg:timeout_seconds arguments arg arg arg Return return:yes Call Call FunctionDef name:__init__ arg:self arg:bytes_per_pack arg:timeout_seconds arguments arg arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_convert",
    "source_code": "@staticmethod\ndef _convert(value, dtype=None):\n    result = numpy_compat.np_asarray(value, dtype=dtype, order='C')\n    if result.dtype.char == 'S' and result is not value:\n        return numpy_compat.np_asarray(value, order='C', dtype=object)\n    elif result.dtype.char == 'U' and result is not value:\n        value = np.vectorize(lambda x: x.encode('utf8'))(value)\n        return numpy_compat.np_asarray(value, order='C', dtype=object)\n    elif result.dtype.char == 'U':\n        return result.astype(np.bytes_)\n    else:\n        return result",
    "docstring": "Converts an arg to numpy, avoiding dangerous string and unicode dtypes. Numpy pads with zeros when using string and unicode dtypes if different components of a tensor have different lengths. This is bad: ignoring the padding is wrong for text data, and removing the padding is wrong for binary data. To avoid this bug, we redo the conversion using an object dtype. Additionally, we convert unicode strings to (byte-)strings for compatibility. Args: value: Value to convert to a numpy array. dtype: (Optional.) Desired NumPy type for the returned value. Returns: A numpy array.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\script_ops.py",
    "ast_data": "FunctionDef name:_convert arg:value arg:dtype arguments arg arg Assign Call If BoolOp Compare Compare Return return:yes Call If BoolOp Compare Compare Assign Call Call arguments arg Call Return return:yes Call If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "add_tensor",
    "source_code": "def add_tensor(self, tag, tensor, global_step=None, walltime=None):\n    torch._C._log_api_usage_once('tensorboard.logging.add_tensor')\n    summary = tensor_proto(tag, tensor)\n    self._get_file_writer().add_summary(summary, global_step, walltime)",
    "docstring": "Add tensor data to summary. Args: tag (str): Data identifier tensor (torch.Tensor): tensor to save global_step (int): Global step value to record Examples:: from torch.utils.tensorboard import SummaryWriter writer = SummaryWriter() x = torch.tensor([1,2,3]) writer.add_scalar('x', x) writer.close() Expected result: Summary::tensor::float_val [1,2,3] ::tensor::shape [3] ::tag 'x'",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\tensorboard\\writer.py",
    "ast_data": "FunctionDef name:add_tensor arg:self arg:tag arg:tensor arg:global_step arg:walltime arguments arg arg arg arg arg Call Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "assert_existing_objects_matched",
    "source_code": "def assert_existing_objects_matched(self):\n    raise AssertionError('No checkpoint specified (save_path=None); nothing is being restored.')",
    "docstring": "Assertion for consistency with . Always fails.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:assert_existing_objects_matched arg:self arguments arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "force_save_collectives",
    "source_code": "def force_save_collectives(joint_module: fx.GraphModule) -> None:\n    for node in joint_module.graph.nodes:\n        if isinstance(node.target, torch._ops.OpOverload) and node.target.namespace == '_c10d_functional' and (not must_recompute(node)):\n            node.meta['recompute'] = CheckpointPolicy.MUST_SAVE",
    "docstring": "By default, the partitioner is not allowed to recompute collectives unless they come from a user-annotated AC region. See Note [Recomputing collectives in the partitioner]",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\partitioners.py",
    "ast_data": "FunctionDef name:force_save_collectives arg:joint_module arguments arg For If BoolOp Call Compare Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "unstack",
    "source_code": "@tf_should_use.should_use_result\ndef unstack(self, value, name=None):\n    return self._implementation.unstack(value, name=name)",
    "docstring": "Unstack the values of a in the TensorArray. If input value shapes have rank-, then the output TensorArray will contain elements whose shapes are rank-. Args: value: (N+1)-D. Tensor of type . The Tensor to unstack. name: A name for the operation (optional). Returns: A new TensorArray object with flow that ensures the unstack occurs. Use this object for all subsequent operations. Raises: ValueError: if the shape inference fails.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:unstack arg:self arg:value arg:name arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "matrix",
    "source_code": "def matrix(self) -> Tensor:\n    row0 = stack((self.z.real, -self.z.imag), -1)\n    row1 = stack((self.z.imag, self.z.real), -1)\n    return stack((row0, row1), -2)",
    "docstring": "Convert the complex number to a rotation matrix of shape :math:. Example: >>> s = So2.identity() >>> m = s.matrix() >>> m tensor([[1., -0.], [0., 1.]], grad_fn=)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\so2.py",
    "ast_data": "FunctionDef name:matrix arg:self arguments arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "add_headers",
    "source_code": "def add_headers(self, *files):\n    headers = []\n    for path in files:\n        if is_string(path):\n            [headers.append((self.name, p)) for p in self.paths(path)]\n        else:\n            if not isinstance(path, (tuple, list)) or len(path) != 2:\n                raise TypeError(repr(path))\n            [headers.append((path[0], p)) for p in self.paths(path[1])]\n    dist = self.get_distribution()\n    if dist is not None:\n        if dist.headers is None:\n            dist.headers = []\n        dist.headers.extend(headers)\n    else:\n        self.headers.extend(headers)",
    "docstring": "Add installable headers to configuration. Add the given sequence of files to the beginning of the headers list. By default, headers will be installed under // directory. If an item of files is a tuple, then its first argument specifies the actual installation location relative to the path. Parameters ---------- files : str or seq Argument(s) can be either: * 2-sequence (,) * path(s) to header file(s) where python includedir suffix will default to package name.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\misc_util.py",
    "ast_data": "FunctionDef name:add_headers arg:self arguments arg arg Assign For If Call Call Call If BoolOp Call Compare Call Raise Call Call Call Call Assign Call If Compare If Compare Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "_error",
    "source_code": "def _error(self, pf: PythonFileT, result: LintResult) -> None:\n    pass",
    "docstring": "Called on files that are unparseable",
    "type": "method",
    "file_path": "pytorch\\tools\\linter\\adapters\\_linter.py",
    "ast_data": "FunctionDef name:_error arg:self arg:pf arg:result arguments arg arg arg"
  },
  {
    "library": "pytorch",
    "name": "_SymIntOutputStub",
    "source_code": "@dataclass_slots\n@dataclass\nclass _SymIntOutputStub:\n    value: Union[int, _DeconstructedSymNode]\n\n    def __init__(self, value: SymInt, key_path: Optional[int]) -> None:\n        if key_path is None:\n            self.value = _DeconstructedSymNode.from_node(value.node)\n        else:\n            self.value = key_path\n\n    def extract(self, key: _DispatchCacheKey, shape_env: ShapeEnv) -> SymInt:\n        if isinstance(self.value, _DeconstructedSymNode):\n            return SymInt(self.value.extract(shape_env))\n        else:\n            src = key.key[self.value]\n            assert isinstance(src, _PySymInputStub) and isinstance(src.value, SymInt)\n            return src.value\n\n    def __repr__(self) -> str:\n        return f'_SymIntOutputStub({self.value!r})'\n\n    def __eq__(self, other: object) -> bool:\n        raise NotImplementedError\n\n    def __hash__(self) -> int:\n        raise NotImplementedError",
    "docstring": "Represents a SymInt in the cached output.",
    "type": "class",
    "file_path": "pytorch\\torch\\_subclasses\\_fake_tensor_utils.py",
    "ast_data": "ClassDef name:_SymIntOutputStub FunctionDef name:__init__ arg:self arg:value arg:key_path arguments arg arg arg If Compare Assign Call Assign FunctionDef name:extract arg:self arg:key arg:shape_env arguments arg arg arg If Call Return return:yes Call Call Assign BoolOp Call Call Return return:yes FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:__eq__ arg:self arg:other arguments arg arg Raise FunctionDef name:__hash__ arg:self arguments arg Raise"
  },
  {
    "library": "cryptography",
    "name": "__eq__",
    "source_code": "@abc.abstractmethod\ndef __eq__(self, other: object) -> bool:\n    pass",
    "docstring": "Checks equality.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\dh.py",
    "ast_data": "FunctionDef name:__eq__ arg:self arg:other arguments arg arg"
  },
  {
    "library": "scipy",
    "name": "orthogonality",
    "source_code": "def orthogonality(A, g):\n    norm_g = np.linalg.norm(g)\n    if issparse(A):\n        norm_A = scipy.sparse.linalg.norm(A, ord='fro')\n    else:\n        norm_A = np.linalg.norm(A, ord='fro')\n    if norm_g == 0 or norm_A == 0:\n        return 0\n    norm_A_g = np.linalg.norm(A.dot(g))\n    orth = norm_A_g / (norm_A * norm_g)\n    return orth",
    "docstring": "Measure orthogonality between a vector and the null space of a matrix. Compute a measure of orthogonality between the null space of the (possibly sparse) matrix ``. References ---------- .. [1] Gould, Nicholas IM, Mary E. Hribar, and Jorge Nocedal. \"On the solution of equality constrained quadratic programming problems arising in optimization.\" SIAM Journal on Scientific Computing 23.4 (2001): 1376-1395.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_trustregion_constr\\projections.py",
    "ast_data": "FunctionDef name:orthogonality arg:A arg:g arguments arg arg Assign Call If Call Assign Call Assign Call If BoolOp Compare Compare Return return:yes Assign Call Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_simple_new",
    "source_code": "@classmethod\ndef _simple_new(cls, values: ArrayLike, name: Hashable | None=None, refs=None) -> Self:\n    assert isinstance(values, cls._data_cls), type(values)\n    result = object.__new__(cls)\n    result._data = values\n    result._name = name\n    result._cache = {}\n    result._reset_identity()\n    if refs is not None:\n        result._references = refs\n    else:\n        result._references = BlockValuesRefs()\n    result._references.add_index_reference(result)\n    return result",
    "docstring": "We require that we have a dtype compat for the values. If we are passed a non-dtype compat, then coerce using the constructor. Must be careful not to recurse.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_simple_new arg:cls arg:values arg:name arg:refs arguments arg arg arg arg Call Call Assign Call Assign Assign Assign Call If Compare Assign Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get",
    "source_code": "def get(self, key):\n    if key in self._feature_tensors:\n        return self._feature_tensors[key]\n    if key in self._features:\n        feature_tensor = self._get_raw_feature_as_tensor(key)\n        self._feature_tensors[key] = feature_tensor\n        return feature_tensor\n    if isinstance(key, six.string_types):\n        raise ValueError('Feature {} is not in features dictionary.'.format(key))\n    if not isinstance(key, _FeatureColumn):\n        raise TypeError('\"key\" must be either a \"str\" or \"_FeatureColumn\". Provided: {}'.format(key))\n    column = key\n    logging.debug('Transforming feature_column %s.', column)\n    transformed = column._transform_feature(self)\n    if transformed is None:\n        raise ValueError('Column {} is not supported.'.format(column.name))\n    self._feature_tensors[column] = transformed\n    return transformed",
    "docstring": "Returns a for the given key. A key is used to access a base feature (not-transformed). When a is passed, the transformed feature is returned if it already exists, otherwise the given is asked to provide its transformed output, which is then cached. Args: key: a or a . Returns: The transformed corresponding to the . Raises: ValueError: if key is not found or a transformed cannot be computed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py",
    "ast_data": "FunctionDef name:get arg:self arg:key arguments arg arg If Compare Return return:yes If Compare Assign Call Assign Return return:yes If Call Raise Call Call If Call Raise Call Call Assign Call Assign Call If Compare Raise Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "vector_to_parameters",
    "source_code": "def vector_to_parameters(vec: torch.Tensor, parameters: Iterable[torch.Tensor]) -> None:\n    if not isinstance(vec, torch.Tensor):\n        raise TypeError(f'expected torch.Tensor, but got: {torch.typename(vec)}')\n    param_device = None\n    pointer = 0\n    for param in parameters:\n        param_device = _check_param_device(param, param_device)\n        num_param = param.numel()\n        param.data = vec[pointer:pointer + num_param].view_as(param).data\n        pointer += num_param",
    "docstring": "Copy slices of a vector into an iterable of parameters. Args: vec (Tensor): a single vector representing the parameters of a model. parameters (Iterable[Tensor]): an iterable of Tensors that are the parameters of a model.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\utils\\convert_parameters.py",
    "ast_data": "FunctionDef name:vector_to_parameters arg:vec arg:parameters arguments arg arg If Call Raise Call Call Assign Assign For Assign Call Assign Call Assign Call"
  },
  {
    "library": "cryptography",
    "name": "verify",
    "source_code": "@abc.abstractmethod\ndef verify(self, signature: utils.Buffer, data: utils.Buffer, signature_algorithm: EllipticCurveSignatureAlgorithm) -> None:\n    pass",
    "docstring": "Verifies the signature of the data.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ec.py",
    "ast_data": "FunctionDef name:verify arg:self arg:signature arg:data arg:signature_algorithm arguments arg arg arg arg"
  },
  {
    "library": "tensorflow",
    "name": "name",
    "source_code": "@property\ndef name(self):\n    return '{}_weighted_by_{}'.format(self.categorical_column.name, self.weight_feature_key)",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_linewidth",
    "source_code": "def set_linewidth(self, w):\n    self._linewidth = float(w)",
    "docstring": "Set the linewidth in points.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:set_linewidth arg:self arg:w arguments arg arg Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "zeros_like",
    "source_code": "def zeros_like(t):\n    if t.dtype == dtypes.resource:\n        return array_ops.zeros(*shape_and_dtype(t))\n    else:\n        return array_ops.zeros_like(t)",
    "docstring": "Like array_ops.zeros_like, but respects resource handles.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\default_gradient.py",
    "ast_data": "FunctionDef name:zeros_like arg:t arguments arg If Compare Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "trimmed_mean",
    "source_code": "def trimmed_mean(a, limits=(0.1, 0.1), inclusive=(1, 1), relative=True, axis=None):\n    if not isinstance(limits, tuple) and isinstance(limits, float):\n        limits = (limits, limits)\n    if relative:\n        return trimr(a, limits=limits, inclusive=inclusive, axis=axis).mean(axis=axis)\n    else:\n        return trima(a, limits=limits, inclusive=inclusive).mean(axis=axis)",
    "docstring": "Returns the trimmed mean of the data along the given axis. %s",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mstats_basic.py",
    "ast_data": "FunctionDef name:trimmed_mean arg:a arg:limits arg:inclusive arg:relative arg:axis arguments arg arg arg arg arg If BoolOp Call Call Assign If Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_format_A_constraints",
    "source_code": "def _format_A_constraints(A, n_x, sparse_lhs=False):\n    if sparse_lhs:\n        return sps.coo_array((0, n_x) if A is None else A, dtype=float, copy=True)\n    elif A is None:\n        return np.zeros((0, n_x), dtype=float)\n    else:\n        return np.array(A, dtype=float, copy=True)",
    "docstring": "Format the left hand side of the constraints to a 2-D array Parameters ---------- A : 2-D array 2-D array such that `A_ubA_eq`.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_linprog_util.py",
    "ast_data": "FunctionDef name:_format_A_constraints arg:A arg:n_x arg:sparse_lhs arguments arg arg arg If Return return:yes Call Compare If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_visit_internal",
    "source_code": "def _visit_internal(self, mode):\n    assert mode in (_WalkMode.FORWARD, _WalkMode.REVERSE)\n    if mode == _WalkMode.FORWARD:\n        open_ = [self.graph.entry]\n    elif mode == _WalkMode.REVERSE:\n        open_ = list(self.graph.exit)\n    closed = set()\n    while open_:\n        node = open_.pop(0)\n        closed.add(node)\n        should_revisit = self.visit_node(node)\n        if mode == _WalkMode.FORWARD:\n            children = node.next\n        elif mode == _WalkMode.REVERSE:\n            children = node.prev\n        for next_ in children:\n            if should_revisit or next_ not in closed:\n                open_.append(next_)",
    "docstring": "Visits the CFG, breadth-first.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\cfg.py",
    "ast_data": "FunctionDef name:_visit_internal arg:self arg:mode arguments arg arg Compare If Compare Assign If Compare Assign Call Assign Call While Assign Call Call Assign Call If Compare Assign If Compare Assign For If BoolOp Compare Call"
  },
  {
    "library": "pytorch",
    "name": "__repr__",
    "source_code": "def __repr__(self) -> str:\n    self._lazy_init()\n    skip_line, newline = ('MEASUREMENT_REPR_SKIP_LINE', '\\n')\n    n = len(self._sorted_times)\n    time_unit, time_scale = select_unit(self._median)\n    iqr_filter = '' if n >= 4 else skip_line\n    repr_str = f'\\n{super().__repr__()}\\n{self.task_spec.summarize()}\\n  {('Median: ' if n > 1 else '')}{self._median / time_scale:.2f} {time_unit}\\n  {iqr_filter}IQR:    {self.iqr / time_scale:.2f} {time_unit} ({self._p25 / time_scale:.2f} to {self._p75 / time_scale:.2f})\\n  {n} measurement{('s' if n > 1 else '')}, {self.number_per_run} runs {('per measurement,' if n > 1 else ',')} {self.num_threads} thread{('s' if self.num_threads > 1 else '')}\\n{newline.join(self._warnings)}'.strip()\n    return '\\n'.join((l for l in repr_str.splitlines(keepends=False) if skip_line not in l))",
    "docstring": "Example repr: Broadcasting add (4x8) Median: 5.73 us IQR: 2.25 us (4.01 to 6.26) 372 measurements, 100 runs per measurement, 1 thread WARNING: Interquartile range is 39.4% of the median measurement. This suggests significant environmental influence.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\benchmark\\utils\\common.py",
    "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Call Assign Assign Call Assign Call Assign Compare Assign Call Call Call Call Compare Compare Compare Compare Call Return return:yes Call Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "_CheckNumericsV2Grad",
    "source_code": "@ops.RegisterGradient('CheckNumericsV2')\ndef _CheckNumericsV2Grad(op: ops.Operation, grad):\n    return array_ops.check_numerics_v2(grad, 'Not a number (NaN) or infinity (Inf) values detected in gradient. %s' % op.get_attr('message'))",
    "docstring": "Gradient for check_numerics op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_grad.py",
    "ast_data": "FunctionDef name:_CheckNumericsV2Grad arg:op arg:grad arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "cherrypy",
    "name": "after",
    "source_code": "@classmethod\ndef after(cls, elapsed):\n    return cls(datetime.datetime.now(datetime.timezone.utc) + elapsed)",
    "docstring": "Return a timer that will expire after passes.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\locking.py",
    "ast_data": "FunctionDef name:after arg:cls arg:elapsed arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "pyarrow_array_to_numpy_and_mask",
    "source_code": "def pyarrow_array_to_numpy_and_mask(arr, dtype: np.dtype) -> tuple[np.ndarray, np.ndarray]:\n    dtype = np.dtype(dtype)\n    if pyarrow.types.is_null(arr.type):\n        data = np.empty(len(arr), dtype=dtype)\n        mask = np.zeros(len(arr), dtype=bool)\n        return (data, mask)\n    buflist = arr.buffers()\n    offset = arr.offset * dtype.itemsize\n    length = len(arr) * dtype.itemsize\n    data_buf = buflist[1][offset:offset + length]\n    data = np.frombuffer(data_buf, dtype=dtype)\n    bitmask = buflist[0]\n    if bitmask is not None:\n        mask = pyarrow.BooleanArray.from_buffers(pyarrow.bool_(), len(arr), [None, bitmask], offset=arr.offset)\n        mask = np.asarray(mask)\n    else:\n        mask = np.ones(len(arr), dtype=bool)\n    return (data, mask)",
    "docstring": "Convert a primitive pyarrow.Array to a numpy array and boolean mask based on the buffers of the Array. At the moment pyarrow.BooleanArray is not supported. Parameters ---------- arr : pyarrow.Array dtype : numpy.dtype Returns ------- (data, mask) Tuple of two numpy arrays with the raw data (with specified dtype) and a boolean mask (validity mask, so False means missing)",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\arrays\\arrow\\_arrow_utils.py",
    "ast_data": "FunctionDef name:pyarrow_array_to_numpy_and_mask arg:arr arg:dtype arguments arg arg Assign Call If Call Assign Call Call Assign Call Call Return return:yes Assign Call Assign Assign Call Assign Assign Call Assign If Compare Assign Call Call Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "invert",
    "source_code": "def invert(self):\n    raise NotImplementedError('cannot use an invert condition when passing to numexpr')",
    "docstring": "invert the condition",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\computation\\pytables.py",
    "ast_data": "FunctionDef name:invert arg:self arguments arg Raise Call"
  },
  {
    "library": "cherrypy",
    "name": "CherryPyException",
    "source_code": "class CherryPyException(Exception):\n    pass",
    "docstring": "A base class for CherryPy exceptions.",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\_cperror.py",
    "ast_data": "ClassDef name:CherryPyException"
  },
  {
    "library": "scikit-learn",
    "name": "_radius_neighbors_reduce_func",
    "source_code": "def _radius_neighbors_reduce_func(self, dist, start, radius, return_distance):\n    neigh_ind = [np.where(d <= radius)[0] for d in dist]\n    if return_distance:\n        if self.effective_metric_ == 'euclidean':\n            dist = [np.sqrt(d[neigh_ind[i]]) for i, d in enumerate(dist)]\n        else:\n            dist = [d[neigh_ind[i]] for i, d in enumerate(dist)]\n        results = (dist, neigh_ind)\n    else:\n        results = neigh_ind\n    return results",
    "docstring": "Reduce a chunk of distances to the nearest neighbors. Callback to :func: Parameters ---------- dist : ndarray of shape (n_samples_chunk, n_samples) The distance matrix. start : int The index in X which the first row of dist corresponds to. radius : float The radius considered when making the nearest neighbors search. return_distance : bool Whether or not to return the distances. Returns ------- dist : list of ndarray of shape (n_samples_chunk,) Returned only if . neigh : list of ndarray of shape (n_samples_chunk,) The neighbors indices.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neighbors\\_base.py",
    "ast_data": "FunctionDef name:_radius_neighbors_reduce_func arg:self arg:dist arg:start arg:radius arg:return_distance arguments arg arg arg arg arg Assign Call Compare If If Compare Assign Call Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "reset_model",
    "source_code": "def reset_model(self) -> None:\n    torch.nn.init.eye_(self.model)",
    "docstring": "Initialize the model with identity transform.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\transform\\image_registrator.py",
    "ast_data": "FunctionDef name:reset_model arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_tensor_watch_key",
    "source_code": "def _get_tensor_watch_key(node_name, output_slot, debug_op):\n    return '%s:%s' % (_get_tensor_name(node_name, output_slot), debug_op)",
    "docstring": "Get the string representation of a debug watch on a tensor. Args: node_name: Name of the node by which the watched tensor is produced, as a string. output_slot: Output slot index of the tensor, as an integer. debug_op: Name of the debug op that is used to watch the tensor, as a string. Returns: A string representing the debug watch on the tensor (i.e., the \"watch key\").",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:_get_tensor_watch_key arg:node_name arg:output_slot arg:debug_op arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "script_qconfig_dict",
    "source_code": "def script_qconfig_dict(qconfig_dict):\n    return {k: script_qconfig(v) if v else None for k, v in qconfig_dict.items()}",
    "docstring": "Helper function used by . Apply for all entries in that is not None.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantize_jit.py",
    "ast_data": "FunctionDef name:script_qconfig_dict arg:qconfig_dict arguments arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "has_module_perms",
    "source_code": "def has_module_perms(self, user_obj, app_label):\n    return user_obj.is_active and any((perm[:perm.index('.')] == app_label for perm in self.get_all_permissions(user_obj)))",
    "docstring": "Return True if user_obj has any permissions in the given app_label.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\backends.py",
    "ast_data": "FunctionDef name:has_module_perms arg:self arg:user_obj arg:app_label arguments arg arg arg Return return:yes BoolOp Call Compare Call Call"
  },
  {
    "library": "cherrypy",
    "name": "footer",
    "source_code": "def footer(self):\n    return '\\n            </body>\\n            </html>\\n        '",
    "docstring": "Render HTML layout footer.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\tutorial\\tut05_derived_objects.py",
    "ast_data": "FunctionDef name:footer arg:self arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "contents",
    "source_code": "@property\ndef contents(self):\n    full_dtype = np.dtype((self._dtype_, self._shape_))\n    full_ctype = ctypes.c_char * full_dtype.itemsize\n    buffer = ctypes.cast(self, ctypes.POINTER(full_ctype)).contents\n    return np.frombuffer(buffer, dtype=full_dtype).squeeze(axis=0)",
    "docstring": "Get an ndarray viewing the data pointed to by this pointer. This mirrors the attribute of a normal ctypes pointer",
    "type": "method",
    "file_path": "numpy\\numpy\\ctypeslib\\_ctypeslib.py",
    "ast_data": "FunctionDef name:contents arg:self arguments arg Assign Call Assign Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "exp",
    "source_code": "def exp(X, /):\n    return MonotonicTransformedDistribution(X, g=np.exp, h=np.log, dh=lambda u: 1 / u, logdh=lambda u: -np.log(u))",
    "docstring": "Natural exponential of a random variable Parameters ---------- X : The random variable :math:. Returns ------- Y : A random variable :math:. Examples -------- Suppose we have a normally distributed random variable :math:: >>> import numpy as np >>> from scipy import stats >>> X = stats.Normal() We wish to have a lognormally distributed random variable :math:, a random variable whose natural logarithm is :math:. If :math: is to be the natural logarithm of :math:, then we must take :math: to be the natural exponential of :math:. >>> Y = stats.exp(X) To demonstrate that `Xlog(y)`')) >>> plt.show()",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_distribution_infrastructure.py",
    "ast_data": "FunctionDef name:exp arguments arg Return return:yes Call arguments arg arguments arg Call"
  },
  {
    "library": "pandas",
    "name": "_maybe_infer_dtype_type",
    "source_code": "def _maybe_infer_dtype_type(element):\n    tipo = None\n    if hasattr(element, 'dtype'):\n        tipo = element.dtype\n    elif is_list_like(element):\n        element = np.asarray(element)\n        tipo = element.dtype\n    return tipo",
    "docstring": "Try to infer an object's dtype, for use in arithmetic ops. Uses if that's available. Objects implementing the iterator protocol are cast to a NumPy array, and from there the array's type is used. Parameters ---------- element : object Possibly has a attribute, and possibly the iterator protocol. Returns ------- tipo : type Examples -------- >>> from collections import namedtuple >>> Foo = namedtuple(\"Foo\", \"dtype\") >>> _maybe_infer_dtype_type(Foo(np.dtype(\"i8\"))) dtype('int64')",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\cast.py",
    "ast_data": "FunctionDef name:_maybe_infer_dtype_type arg:element arguments arg Assign If Call Assign If Call Assign Call Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "STEFunction",
    "source_code": "class STEFunction(Function):\n\n    @staticmethod\n    def forward(ctx: Any, input: Tensor, output: Tensor, grad_fn: Optional[Callable[..., Any]]=None) -> Tensor:\n        ctx.in_shape = input.shape\n        ctx.out_shape = output.shape\n        ctx.grad_fn = grad_fn\n        return output\n\n    @staticmethod\n    def backward(ctx: Any, grad_output: Tensor) -> Tuple[Tensor, Tensor, None]:\n        if ctx.grad_fn is None:\n            return (grad_output.sum_to_size(ctx.in_shape), grad_output.sum_to_size(ctx.out_shape), None)\n        return (ctx.grad_fn(grad_output.sum_to_size(ctx.in_shape)), ctx.grad_fn(grad_output.sum_to_size(ctx.out_shape)), None)",
    "docstring": "Straight-Through Estimation (STE) function. STE bridges the gradients between the input tensor and output tensor as if the function was an identity function. Meanwhile, advanced gradient functions are also supported. e.g. the output gradients can be mapped into [-1, 1] with `` estimated from STE. >>> input = torch.randn(4, requires_grad = True) >>> output = torch.sign(input) >>> loss = output.mean() >>> loss.backward() >>> input.grad tensor([0., 0., 0., 0.]) >>> with torch.no_grad(): ... output = torch.sign(input) >>> out_est = STEFunction.apply(input, output) >>> loss = out_est.mean() >>> loss.backward() >>> input.grad tensor([0.2500, 0.2500, 0.2500, 0.2500])",
    "type": "class",
    "file_path": "kornia\\kornia\\grad_estimator\\ste.py",
    "ast_data": "ClassDef name:STEFunction FunctionDef name:forward arg:ctx arg:input arg:output arg:grad_fn arguments arg arg arg arg Assign Assign Assign Return return:yes FunctionDef name:backward arg:ctx arg:grad_output arguments arg arg If Compare Return return:yes Call Call Return return:yes Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_create_binned_data",
    "source_code": "def _create_binned_data(bin_numbers, unique_bin_numbers, values, vv):\n    bin_map = dict()\n    for i in unique_bin_numbers:\n        bin_map[i] = []\n    for i in builtins.range(len(bin_numbers)):\n        bin_map[bin_numbers[i]].append(values[vv, i])\n    return bin_map",
    "docstring": "Create hashmap of bin ids to values in bins key: bin number value: list of binned data",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_binned_statistic.py",
    "ast_data": "FunctionDef name:_create_binned_data arg:bin_numbers arg:unique_bin_numbers arg:values arg:vv arguments arg arg arg arg Assign Call For Assign For Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "success",
    "source_code": "def success(request, message, extra_tags='', fail_silently=False):\n    add_message(request, constants.SUCCESS, message, extra_tags=extra_tags, fail_silently=fail_silently)",
    "docstring": "Add a message with the `` level.",
    "type": "function",
    "file_path": "django\\django\\contrib\\messages\\api.py",
    "ast_data": "FunctionDef name:success arg:request arg:message arg:extra_tags arg:fail_silently arguments arg arg arg arg Call"
  },
  {
    "library": "pandas",
    "name": "get_chunks",
    "source_code": "@abstractmethod\ndef get_chunks(self, n_chunks: int | None=None) -> Iterable[Column]:\n    pass",
    "docstring": "Return an iterator yielding the chunks. See for details on ``.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\interchange\\dataframe_protocol.py",
    "ast_data": "FunctionDef name:get_chunks arg:self arg:n_chunks arguments arg arg"
  },
  {
    "library": "django",
    "name": "normalize_table_name",
    "source_code": "def normalize_table_name(self, table_name):\n    return re.sub('[^a-zA-Z0-9]', '', table_name.title())",
    "docstring": "Translate the table name to a Python-compatible model name.",
    "type": "method",
    "file_path": "django\\django\\core\\management\\commands\\inspectdb.py",
    "ast_data": "FunctionDef name:normalize_table_name arg:self arg:table_name arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_create_tensor_from_params",
    "source_code": "def _create_tensor_from_params(*size, local_device, tensor_properties: TensorProperties):\n    dtype = tensor_properties.dtype\n    layout = tensor_properties.layout\n    requires_grad = tensor_properties.requires_grad\n    memory_format = tensor_properties.memory_format\n    pin_memory = tensor_properties.pin_memory\n    return torch.empty(*size, dtype=dtype, layout=layout, device=local_device, requires_grad=requires_grad, memory_format=memory_format, pin_memory=pin_memory)",
    "docstring": "Helper to construct tensor from size, device and common params.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\api.py",
    "ast_data": "FunctionDef name:_create_tensor_from_params arguments arg arg arg Assign Assign Assign Assign Assign Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "SpiderState",
    "source_code": "class SpiderState:\n\n    def __init__(self, jobdir: str | None=None):\n        self.jobdir: str | None = jobdir\n\n    @classmethod\n    def from_crawler(cls, crawler: Crawler) -> Self:\n        jobdir = job_dir(crawler.settings)\n        if not jobdir:\n            raise NotConfigured\n        obj = cls(jobdir)\n        crawler.signals.connect(obj.spider_closed, signal=signals.spider_closed)\n        crawler.signals.connect(obj.spider_opened, signal=signals.spider_opened)\n        return obj\n\n    def spider_closed(self, spider: Spider) -> None:\n        if self.jobdir:\n            with Path(self.statefn).open('wb') as f:\n                assert hasattr(spider, 'state')\n                pickle.dump(spider.state, f, protocol=4)\n\n    def spider_opened(self, spider: Spider) -> None:\n        if self.jobdir and Path(self.statefn).exists():\n            with Path(self.statefn).open('rb') as f:\n                spider.state = pickle.load(f)\n        else:\n            spider.state = {}\n\n    @property\n    def statefn(self) -> str:\n        assert self.jobdir\n        return str(Path(self.jobdir, 'spider.state'))",
    "docstring": "Store and load spider state during a scraping job",
    "type": "class",
    "file_path": "scrapy\\scrapy\\extensions\\spiderstate.py",
    "ast_data": "ClassDef name:SpiderState FunctionDef name:__init__ arg:self arg:jobdir arguments arg arg FunctionDef name:from_crawler arg:cls arg:crawler arguments arg arg Assign Call If Raise Assign Call Call Call Return return:yes FunctionDef name:spider_closed arg:self arg:spider arguments arg arg If With Call Call Call Call FunctionDef name:spider_opened arg:self arg:spider arguments arg arg If BoolOp Call Call With Call Call Assign Call Assign FunctionDef name:statefn arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "TritonKernel",
    "source_code": "@dataclasses.dataclass\nclass TritonKernel:\n    tuner: CachingAutotuner\n    wrapped: TraceableTritonKernelWrapper",
    "docstring": "Stores metadata about Triton kernels for use in FX.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\wrapper_fxir.py",
    "ast_data": "ClassDef name:TritonKernel"
  },
  {
    "library": "tensorflow",
    "name": "register",
    "source_code": "def register(self, candidate, name=None):\n    if not name:\n        name = candidate.__name__\n    if name in self._registry:\n        frame = self._registry[name][_LOCATION_TAG]\n        raise KeyError(\"Registering two %s with name '%s'! (Previous registration was in %s %s:%d)\" % (self._name, name, frame.name, frame.filename, frame.lineno))\n    logging.vlog(1, 'Registering %s (%s) in %s.', name, candidate, self._name)\n    stack = traceback.extract_stack(limit=3)\n    stack_index = min(2, len(stack) - 1)\n    if stack_index >= 0:\n        location_tag = stack[stack_index]\n    else:\n        location_tag = ('UNKNOWN', 'UNKNOWN', 'UNKNOWN', 'UNKNOWN', 'UNKNOWN')\n    self._registry[name] = {_TYPE_TAG: candidate, _LOCATION_TAG: location_tag}",
    "docstring": "Registers a Python object \"candidate\" for the given \"name\". Args: candidate: The candidate object to add to the registry. name: An optional string specifying the registry key for the candidate. If None, candidate.__name__ will be used. Raises: KeyError: If same name is used twice.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\registry.py",
    "ast_data": "FunctionDef name:register arg:self arg:candidate arg:name arguments arg arg arg If Assign If Compare Assign Raise Call Call Assign Call Assign Call Call If Compare Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_get_slot_key_from_var",
    "source_code": "def _get_slot_key_from_var(var, slot_name):\n    name = _var_key(var)\n    return name + '/' + slot_name",
    "docstring": "Get the slot key for the variable: var_name/slot_name.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py",
    "ast_data": "FunctionDef name:_get_slot_key_from_var arg:var arg:slot_name arguments arg arg Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "@_docstring.interpd\ndef __init__(self, center, r, theta1, theta2, *, width=None, **kwargs):\n    super().__init__(**kwargs)\n    self.center = center\n    self.r, self.width = (r, width)\n    self.theta1, self.theta2 = (theta1, theta2)\n    self._patch_transform = transforms.IdentityTransform()\n    self._recompute_path()",
    "docstring": "A wedge centered at *x*, *y* center with radius *r* that sweeps *theta1* to *theta2* (in degrees). If *width* is given, then a partial wedge is drawn from inner radius *r* - *width* to outer radius *r*. Valid keyword arguments are: %(Patch:kwdoc)s",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:center arg:r arg:theta1 arg:theta2 arguments arg arg arg arg arg arg arg Call Call Assign Assign Assign Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, all_reduce_alg='nccl', num_packs=1):\n    self._all_reduce_alg = all_reduce_alg\n    self._num_packs = num_packs\n    self._simple_cross_replica_ops = ReductionToOneDevice()\n    super(AllReduceCrossDeviceOps, self).__init__()",
    "docstring": "Initializes the object. Args: all_reduce_alg: the all-reduce algorithm to use, currently only \"nccl\" or \"hierarchical_copy\" are supported. num_packs: a non-negative integer. The number of packs to split values into. If zero, no packing will be done.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_ops.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:all_reduce_alg arg:num_packs arguments arg arg arg Assign Assign Assign Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_close",
    "source_code": "def _close(self) -> None:\n    if self._output_file is not None:\n        assert isinstance(self.handles.handle, BytesIO)\n        bio, self.handles.handle = (self.handles.handle, self._output_file)\n        self.handles.handle.write(bio.getvalue())",
    "docstring": "Close the file if it was created by the writer. If a buffer or file-like object was passed in, for example a GzipFile, then leave this file open for the caller to close.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\stata.py",
    "ast_data": "FunctionDef name:_close arg:self arguments arg If Compare Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "create_mask",
    "source_code": "def create_mask(mod_fn: Union[_score_mod_signature, _mask_mod_signature], B: Optional[int], H: Optional[int], Q_LEN: int, KV_LEN: int, device: DeviceLikeType='cuda') -> Tensor:\n    if B is None:\n        B = 1\n    if H is None:\n        H = 1\n    b = torch.arange(0, B, device=device)\n    h = torch.arange(0, H, device=device)\n    m = torch.arange(0, Q_LEN, device=device)\n    n = torch.arange(0, KV_LEN, device=device)\n    mod_type = _get_mod_type(mod_fn)\n    from torch._dynamo._trace_wrapped_higher_order_op import TransformGetItemToIndex\n    with TransformGetItemToIndex():\n        if mod_type == _ModificationType.SCORE_MOD:\n            score_mod = mod_fn\n            score_mod = _vmap_for_bhqkv(score_mod, prefix=(0,))\n            out = score_mod(torch.zeros(B, H, Q_LEN, KV_LEN, device=device), b, h, m, n)\n            mask = torch.where(torch.isneginf(out), False, True)\n            return mask\n        elif mod_type == _ModificationType.MASK_MOD:\n            mask_mod = mod_fn\n            mask_mod = _vmap_for_bhqkv(mask_mod, prefix=())\n            mask = mask_mod(b, h, m, n)\n            return mask\n        else:\n            raise AssertionError",
    "docstring": "This function creates a mask tensor from a mod_fn function. Args: mod_fn (Union[_score_mod_signature, _mask_mod_signature]): Function to modify attention scores. B (int): Batch size. H (int): Number of query heads. Q_LEN (int): Sequence length of query. KV_LEN (int): Sequence length of key/value. device (str): Device to run the mask creation on. Returns: mask (Tensor): A mask tensor with shape (B, H, M, N).",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\attention\\flex_attention.py",
    "ast_data": "FunctionDef name:create_mask arg:mod_fn arg:B arg:H arg:Q_LEN arg:KV_LEN arg:device arguments arg arg arg arg arg arg If Compare Assign If Compare Assign Assign Call Assign Call Assign Call Assign Call Assign Call With Call If Compare Assign Assign Call Assign Call Call Assign Call Call Return return:yes If Compare Assign Assign Call Assign Call Return return:yes Raise"
  },
  {
    "library": "scipy",
    "name": "_process_quantiles",
    "source_code": "def _process_quantiles(self, X, dims):\n    X = np.asarray(X, dtype=float)\n    if X.ndim == 2:\n        X = X[np.newaxis, :]\n    if X.shape[-2:] != dims:\n        raise ValueError('The shape of array `X` is not compatible with the distribution parameters.')\n    return X",
    "docstring": "Adjust quantiles array so that last two axes labels the components of each data point.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:_process_quantiles arg:self arg:X arg:dims arguments arg arg arg Assign Call If Compare Assign If Compare Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "outer_context",
    "source_code": "@property\ndef outer_context(self):\n    return self._outer_context",
    "docstring": "Return the context containing this context.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:outer_context arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "AnchoredText",
    "source_code": "class AnchoredText(AnchoredOffsetbox):\n\n    def __init__(self, s, loc, *, pad=0.4, borderpad=0.5, prop=None, **kwargs):\n        if prop is None:\n            prop = {}\n        badkwargs = {'va', 'verticalalignment'}\n        if badkwargs & set(prop):\n            raise ValueError('Mixing verticalalignment with AnchoredText is not supported.')\n        self.txt = TextArea(s, textprops=prop)\n        fp = self.txt._text.get_fontproperties()\n        super().__init__(loc, pad=pad, borderpad=borderpad, child=self.txt, prop=fp, **kwargs)",
    "docstring": "AnchoredOffsetbox with Text.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py",
    "ast_data": "ClassDef name:AnchoredText FunctionDef name:__init__ arg:self arg:s arg:loc arguments arg arg arg arg arg arg arg If Compare Assign Assign If Call Raise Call Assign Call Assign Call Call Call"
  },
  {
    "library": "kornia",
    "name": "TrivialAugment",
    "source_code": "class TrivialAugment(PolicyAugmentBase):\n\n    def __init__(self, policy: Optional[List[SUBPOLICY_CONFIG]]=None, transformation_matrix_mode: str='silent') -> None:\n        if policy is None:\n            _policy = default_policy\n        else:\n            _policy = policy\n        super().__init__(_policy, transformation_matrix_mode=transformation_matrix_mode)\n        selection_weights = torch.tensor([1.0 / len(self)] * len(self))\n        self.rand_selector = Categorical(selection_weights)\n\n    def compose_subpolicy_sequential(self, subpolicy: SUBPOLICY_CONFIG) -> PolicySequential:\n        if len(subpolicy) != 1:\n            raise RuntimeError(f'Each policy must have only one operation for TrivialAugment. Got {len(subpolicy)}.')\n        name, low, high = subpolicy[0]\n        return PolicySequential(*[getattr(ops, name)(low, high)])\n\n    def get_forward_sequence(self, params: Optional[List[ParamItem]]=None) -> Iterator[Tuple[str, Module]]:\n        if params is None:\n            idx = self.rand_selector.sample((1,))\n            return self.get_children_by_indices(idx)\n        return self.get_children_by_params(params)",
    "docstring": "Apply TrivialAugment :cite: augmentation strategies. Args: policy: candidate transformations. If None, a default candidate list will be used. transformation_matrix_mode: computation mode for the chained transformation matrix, via attribute. If , transformation matrix will be computed silently and the non-rigid modules will be ignored as identity transformations. If , transformation matrix will be computed silently and the non-rigid modules will trigger errors. If , transformation matrix will be totally ignored. Examples: >>> import kornia.augmentation as K >>> in_tensor = torch.rand(5, 3, 30, 30) >>> aug = K.AugmentationSequential(TrivialAugment()) >>> aug(in_tensor).shape torch.Size([5, 3, 30, 30])",
    "type": "class",
    "file_path": "kornia\\kornia\\augmentation\\auto\\trivial_augment\\trivial_augment.py",
    "ast_data": "ClassDef name:TrivialAugment FunctionDef name:__init__ arg:self arg:policy arg:transformation_matrix_mode arguments arg arg arg If Compare Assign Assign Call Call Assign Call Call Call Assign Call FunctionDef name:compose_subpolicy_sequential arg:self arg:subpolicy arguments arg arg If Compare Call Raise Call Call Assign Return return:yes Call Call Call FunctionDef name:get_forward_sequence arg:self arg:params arguments arg arg If Compare Assign Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_set_autocommit",
    "source_code": "def _set_autocommit(self, autocommit):\n    raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a _set_autocommit() method')",
    "docstring": "Backend-specific implementation to enable or disable autocommit.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\base.py",
    "ast_data": "FunctionDef name:_set_autocommit arg:self arg:autocommit arguments arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "with_file_output",
    "source_code": "def with_file_output(self, outfile):\n    self._options['output'] = 'file:outfile=%s' % outfile\n    return self",
    "docstring": "Print the result to a file.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\option_builder.py",
    "ast_data": "FunctionDef name:with_file_output arg:self arg:outfile arguments arg arg Assign Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "centered",
    "source_code": "class centered(nodes.Part, nodes.TextElement):\n    pass",
    "docstring": "Deprecated.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\addnodes.py",
    "ast_data": "ClassDef name:centered"
  },
  {
    "library": "scikit-learn",
    "name": "_check_parameters",
    "source_code": "@abstractmethod\ndef _check_parameters(self, n_samples):\n    pass",
    "docstring": "Validate parameters depending on the input data.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_bicluster.py",
    "ast_data": "FunctionDef name:_check_parameters arg:self arg:n_samples arguments arg arg"
  },
  {
    "library": "tensorflow",
    "name": "is_variable_initialized",
    "source_code": "@tf_export(v1=['is_variable_initialized'])\n@tf_should_use.should_use_result\ndef is_variable_initialized(variable):\n    from tensorflow.python.ops import state_ops\n    return state_ops.is_variable_initialized(variable)",
    "docstring": "Tests if a variable has been initialized. Args: variable: A . Returns: Returns a scalar boolean Tensor, if the variable has been initialized, otherwise.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variable_v1.py",
    "ast_data": "FunctionDef name:is_variable_initialized arg:variable arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "name",
    "source_code": "@property\ndef name(self):\n    self._create_definition_if_needed()\n    return self._func_name",
    "docstring": "Function name.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\function.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__call__",
    "source_code": "def __call__(self, value, clip=None):\n    if clip is None:\n        clip = self.clip\n    xx, is_scalar = self.process_value(value)\n    mask = np.ma.getmaskarray(xx)\n    xx = np.atleast_1d(xx.filled(self.vmax + 1))\n    if clip:\n        np.clip(xx, self.vmin, self.vmax, out=xx)\n        max_col = self.Ncmap - 1\n    else:\n        max_col = self.Ncmap\n    iret = np.digitize(xx, self.boundaries) - 1 + self._offset\n    if self.Ncmap > self._n_regions:\n        if self._n_regions == 1:\n            iret[iret == 0] = (self.Ncmap - 1) // 2\n        else:\n            iret = (self.Ncmap - 1) / (self._n_regions - 1) * iret\n    iret = iret.astype(np.int16)\n    iret[xx < self.vmin] = -1\n    iret[xx >= self.vmax] = max_col\n    ret = np.ma.array(iret, mask=mask)\n    if is_scalar:\n        ret = int(ret[0])\n    return ret",
    "docstring": "This method behaves similarly to , except that it returns integers or arrays of int16.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:value arg:clip arguments arg arg arg If Compare Assign Assign Call Assign Call Assign Call Call If Call Assign Assign Assign Call If Compare If Compare Assign Compare Assign Assign Call Assign Compare Assign Compare Assign Call If Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "row_lengths",
    "source_code": "def row_lengths(self):\n    if self._row_lengths is not None:\n        return self._row_lengths\n    splits = self._row_splits\n    return splits[1:] - splits[:-1]",
    "docstring": "Returns the lengths of rows in this . Returns: A 1-D integer Tensor with shape . The returned tensor is nonnegative. .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py",
    "ast_data": "FunctionDef name:row_lengths arg:self arguments arg If Compare Return return:yes Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_format_param",
    "source_code": "def _format_param(name: str, optimizer: Optimizer, param):\n\n    def _copy(_param):\n        return _param.clone() if isinstance(_param, Tensor) else _param\n    if isinstance(param, (list, tuple)):\n        if len(param) != len(optimizer.param_groups):\n            raise ValueError(f'{name} must have the same length as optimizer.param_groups. {name} has {len(param)} values, param_groups has {len(optimizer.param_groups)}.')\n    else:\n        param = [param] * len(optimizer.param_groups)\n    return list(map(_copy, param))",
    "docstring": "Return correctly formatted lr/momentum for each param group.",
    "type": "function",
    "file_path": "pytorch\\torch\\optim\\lr_scheduler.py",
    "ast_data": "FunctionDef name:_format_param arg:name arg:optimizer arg:param arguments arg arg arg FunctionDef name:_copy arg:_param arguments arg Return return:yes Call Call If Call If Compare Call Call Raise Call Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "name",
    "source_code": "@property\ndef name(self) -> str_type:\n    return str(self)",
    "docstring": "A string representation of the dtype.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_clean_nans",
    "source_code": "def _clean_nans(scores):\n    scores = as_float_array(scores, copy=True)\n    scores[np.isnan(scores)] = np.finfo(scores.dtype).min\n    return scores",
    "docstring": "Fixes Issue #1240: NaNs can't be properly compared, so change them to the smallest value of scores's dtype. -inf seems to be unreliable.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\feature_selection\\_univariate_selection.py",
    "ast_data": "FunctionDef name:_clean_nans arg:scores arguments arg Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "pstoeps",
    "source_code": "def pstoeps(tmpfile, bbox=None, rotated=False):\n    epsfile = tmpfile + '.eps'\n    with open(epsfile, 'wb') as epsh, open(tmpfile, 'rb') as tmph:\n        write = epsh.write\n        for line in tmph:\n            if line.startswith(b'%!PS'):\n                write(b'%!PS-Adobe-3.0 EPSF-3.0\\n')\n                if bbox:\n                    write(_get_bbox_header(bbox).encode('ascii') + b'\\n')\n            elif line.startswith(b'%%EndComments'):\n                write(line)\n                write(b'%%BeginProlog\\nsave\\ncountdictstack\\nmark\\nnewpath\\n/showpage {} def\\n/setpagedevice {pop} def\\n%%EndProlog\\n%%Page 1 1\\n')\n                if rotated:\n                    write(_get_rotate_command(bbox).encode('ascii') + b'\\n')\n                break\n            elif bbox and line.startswith((b'%%Bound', b'%%HiResBound', b'%%DocumentMedia', b'%%Pages')):\n                pass\n            else:\n                write(line)\n        for line in tmph:\n            if line.startswith(b'%%EOF'):\n                write(b'cleartomark\\ncountdictstack\\nexch sub { end } repeat\\nrestore\\nshowpage\\n%%EOF\\n')\n            elif line.startswith(b'%%PageBoundingBox'):\n                pass\n            else:\n                write(line)\n    os.remove(tmpfile)\n    shutil.move(epsfile, tmpfile)",
    "docstring": "Convert the postscript to encapsulated postscript. The bbox of the eps file will be replaced with the given *bbox* argument. If None, original bbox will be used.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_ps.py",
    "ast_data": "FunctionDef name:pstoeps arg:tmpfile arg:bbox arg:rotated arguments arg arg arg Assign With Call Call Assign For If Call Call If Call Call Call If Call Call Call If Call Call Call If BoolOp Call Call For If Call Call If Call Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_fill_or_add_to_diagonal",
    "source_code": "def _fill_or_add_to_diagonal(array, value, xp, add_value=True, wrap=False):\n    if array.ndim != 2:\n        raise ValueError(f'array should be 2-d. Got array with shape {tuple(array.shape)}')\n    value = xp.asarray(value, dtype=array.dtype, device=device(array))\n    end = None\n    step = array.shape[1] + 1\n    if not wrap:\n        end = array.shape[1] * array.shape[1]\n    array_flat = xp.reshape(array, (-1,))\n    if add_value:\n        array_flat[:end:step] += value\n    else:\n        array_flat[:end:step] = value",
    "docstring": "Implementation to facilitate adding or assigning specified values to the diagonal of a 2-d array. If `TrueTrue. This is currently only supported for 2-d arrays. The implementation is taken from the function:",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_array_api.py",
    "ast_data": "FunctionDef name:_fill_or_add_to_diagonal arg:array arg:value arg:xp arg:add_value arg:wrap arguments arg arg arg arg arg If Compare Raise Call Call Assign Call Call Assign Assign If Assign Assign Call If Assign"
  },
  {
    "library": "pytorch",
    "name": "_supports_report_gen",
    "source_code": "def _supports_report_gen(self, module: nn.Module) -> bool:\n    return hasattr(module, self.DEFAULT_PRE_OBSERVER_NAME)",
    "docstring": "Returns whether the given module is supported for report generation Any module that has a model report pre-observer is supported Args module: The module to check and ensure is supported Returns True if the module is supported by observer, False otherwise",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\detector.py",
    "ast_data": "FunctionDef name:_supports_report_gen arg:self arg:module arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_get_view",
    "source_code": "def _get_view(self):\n    return {'xlim': self.get_xlim(), 'autoscalex_on': self.get_autoscalex_on(), 'ylim': self.get_ylim(), 'autoscaley_on': self.get_autoscaley_on()}",
    "docstring": "Save information required to reproduce the current view. This method is called before a view is changed, such as during a pan or zoom initiated by the user. It returns an opaque object that describes the current view, in a format compatible with :meth:. The default implementation saves the view limits and autoscaling state. Subclasses may override this as needed, as long as :meth: is also adjusted accordingly.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:_get_view arg:self arguments arg Return return:yes Call Call Call Call"
  },
  {
    "library": "seaborn",
    "name": "_check_dimension_uniqueness",
    "source_code": "def _check_dimension_uniqueness(self, facet_spec: FacetSpec, pair_spec: PairSpec) -> None:\n    err = None\n    facet_vars = facet_spec.get('variables', {})\n    if facet_spec.get('wrap') and {'col', 'row'} <= set(facet_vars):\n        err = 'Cannot wrap facets when specifying both `col` and `row`.'\n    elif pair_spec.get('wrap') and pair_spec.get('cross', True) and (len(pair_spec.get('structure', {}).get('x', [])) > 1) and (len(pair_spec.get('structure', {}).get('y', [])) > 1):\n        err = 'Cannot wrap subplots when pairing on both `x` and `y`.'\n    collisions = {'x': ['columns', 'rows'], 'y': ['rows', 'columns']}\n    for pair_axis, (multi_dim, wrap_dim) in collisions.items():\n        if pair_axis not in pair_spec.get('structure', {}):\n            continue\n        elif multi_dim[:3] in facet_vars:\n            err = f'Cannot facet the {multi_dim} while pairing on `{pair_axis}``.'\n        elif wrap_dim[:3] in facet_vars and facet_spec.get('wrap'):\n            err = f'Cannot wrap the {wrap_dim} while pairing on `{pair_axis}``.'\n        elif wrap_dim[:3] in facet_vars and pair_spec.get('wrap'):\n            err = f'Cannot wrap the {multi_dim} while faceting the {wrap_dim}.'\n    if err is not None:\n        raise RuntimeError(err)",
    "docstring": "Reject specs that pair and facet on (or wrap to) same figure dimension.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\subplots.py",
    "ast_data": "FunctionDef name:_check_dimension_uniqueness arg:self arg:facet_spec arg:pair_spec arguments arg arg arg Assign Assign Call If BoolOp Call Compare Call Assign If BoolOp Call Call Compare Call Call Call Compare Call Call Call Assign Assign For Call If Compare Call If Compare Assign If BoolOp Compare Call Assign If BoolOp Compare Call Assign If Compare Raise Call"
  },
  {
    "library": "pytorch",
    "name": "check_inst_exn_tab_entries_nested",
    "source_code": "def check_inst_exn_tab_entries_nested(tab: list[InstructionExnTabEntry], indexof) -> None:\n    entry_stack: list[tuple[int, int]] = []\n    for entry in tab:\n        key = (indexof[entry.start], indexof[entry.end])\n        while entry_stack and entry_stack[-1][1] < key[0]:\n            entry_stack.pop()\n        if entry_stack:\n            assert entry_stack[-1][0] <= key[0] <= key[1] <= entry_stack[-1][1]\n        entry_stack.append(key)",
    "docstring": "Checks is a properly sorted list of nested InstructionExnTabEntry's, i.e. no entries partially overlap. \"Properly sorted\" means entries are sorted by increasing starts, then decreasing ends.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\bytecode_transformation.py",
    "ast_data": "FunctionDef name:check_inst_exn_tab_entries_nested arg:tab arg:indexof arguments arg arg For Assign While BoolOp Compare Call If Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "dtype_name",
    "source_code": "def dtype_name(dtype):\n    dtype = dtypes.as_dtype(dtype)\n    if hasattr(dtype, 'name'):\n        return dtype.name\n    if hasattr(dtype, '__name__'):\n        return dtype.__name__\n    return str(dtype)",
    "docstring": "Returns the string name for this .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_util.py",
    "ast_data": "FunctionDef name:dtype_name arg:dtype arguments arg Assign Call If Call Return return:yes If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "__init__",
    "source_code": "def __init__(self, dim=None, seed=None):\n    self._dist = special_ortho_group_gen(seed)\n    self.dim = self._dist._process_parameters(dim)",
    "docstring": "Create a frozen SO(N) distribution. Parameters ---------- dim : scalar Dimension of matrices seed : {None, int, , }, optional If is None (or ), the singleton is used. If is an int, a new `seedseed` instance then that instance is used. Examples -------- >>> from scipy.stats import special_ortho_group >>> g = special_ortho_group(5) >>> x = g.rvs()",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:dim arg:seed arguments arg arg arg Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "initialize",
    "source_code": "@deprecated(None, \"Use the iterator's `initializer` property instead.\")\ndef initialize(self):\n    return self._initializer",
    "docstring": "Initialize underlying iterators. Returns: A list of any initializer ops that should be run.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\input_lib.py",
    "ast_data": "FunctionDef name:initialize arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "trrad",
    "source_code": "def trrad(delta_in, dnorm, eta1, eta2, gamma1, gamma2, ratio):\n    if DEBUGGING:\n        assert delta_in >= dnorm > 0\n        assert 0 <= eta1 <= eta2 < 1\n        assert 0 < gamma1 < 1 < gamma2\n        assert not np.isnan(ratio)\n    if ratio <= eta1:\n        delta = gamma1 * dnorm\n    elif ratio <= eta2:\n        delta = max(gamma1 * delta_in, dnorm)\n    else:\n        delta = max(gamma1 * delta_in, gamma2 * dnorm)\n    if DEBUGGING:\n        assert delta > 0\n    return delta",
    "docstring": "This function updates the trust region radius according to RATIO and DNORM.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\pyprima\\pyprima\\src\\pyprima\\cobyla\\trustregion.py",
    "ast_data": "FunctionDef name:trrad arg:delta_in arg:dnorm arg:eta1 arg:eta2 arg:gamma1 arg:gamma2 arg:ratio arguments arg arg arg arg arg arg arg If Compare Compare Compare Call If Compare Assign If Compare Assign Call Assign Call If Compare Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X):\n    probs = self.predict_proba(X)\n    classes_ = self.classes_\n    if not self.outputs_2d_:\n        probs = [probs]\n        classes_ = [self.classes_]\n    n_outputs = len(classes_)\n    n_queries = probs[0].shape[0]\n    y_pred = np.empty((n_queries, n_outputs), dtype=classes_[0].dtype)\n    for k, prob in enumerate(probs):\n        max_prob_index = prob.argmax(axis=1)\n        y_pred[:, k] = classes_[k].take(max_prob_index)\n        outlier_zero_probs = (prob == 0).all(axis=1)\n        if outlier_zero_probs.any():\n            zero_prob_index = np.flatnonzero(outlier_zero_probs)\n            y_pred[zero_prob_index, k] = self.outlier_label_[k]\n    if not self.outputs_2d_:\n        y_pred = y_pred.ravel()\n    return y_pred",
    "docstring": "Predict the class labels for the provided data. Parameters ---------- X : {array-like, sparse matrix} of shape (n_queries, n_features), or (n_queries, n_indexed) if metric == 'precomputed', or None Test samples. If , predictions for all indexed points are returned; in this case, points are not considered their own neighbors. Returns ------- y : ndarray of shape (n_queries,) or (n_queries, n_outputs) Class labels for each data sample.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neighbors\\_classification.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Assign Call Assign If Assign Assign Assign Call Assign Assign Call For Call Assign Call Assign Call Assign Call Compare If Call Assign Call Assign If Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_rotvec_dot_to_angular_rate_matrix",
    "source_code": "def _rotvec_dot_to_angular_rate_matrix(rotvecs):\n    norm = np.linalg.norm(rotvecs, axis=1)\n    k1 = np.empty_like(norm)\n    k2 = np.empty_like(norm)\n    mask = norm > 0.0001\n    nm = norm[mask]\n    k1[mask] = (1 - np.cos(nm)) / nm ** 2\n    k2[mask] = (nm - np.sin(nm)) / nm ** 3\n    mask = ~mask\n    nm = norm[mask]\n    k1[mask] = 0.5 - nm ** 2 / 24\n    k2[mask] = 1 / 6 - nm ** 2 / 120\n    skew = _create_skew_matrix(rotvecs)\n    result = np.empty((len(rotvecs), 3, 3))\n    result[:] = np.identity(3)\n    result[:] -= k1[:, None, None] * skew\n    result[:] += k2[:, None, None] * np.matmul(skew, skew)\n    return result",
    "docstring": "Compute matrices to transform rot. vector derivatives to angular rates. The matrices depend on the current attitude represented as a rotation vector. Parameters ---------- rotvecs : ndarray, shape (n, 3) Set of rotation vectors. Returns ------- ndarray, shape (n, 3, 3)",
    "type": "function",
    "file_path": "scipy\\scipy\\spatial\\transform\\_rotation_spline.py",
    "ast_data": "FunctionDef name:_rotvec_dot_to_angular_rate_matrix arg:rotvecs arguments arg Assign Call Assign Call Assign Call Assign Compare Assign Assign Call Assign Call Assign Assign Assign Assign Assign Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_getargspec",
    "source_code": "def _getargspec(target):\n    fullargspecs = getfullargspec(target)\n    if hasattr(_inspect, 'ArgSpec'):\n        argspecs = ArgSpec(args=fullargspecs.args, varargs=fullargspecs.varargs, keywords=fullargspecs.varkw, defaults=fullargspecs.defaults)\n    else:\n        argspecs = FullArgSpec(args=fullargspecs.args, varargs=fullargspecs.varargs, varkw=fullargspecs.varkw, defaults=fullargspecs.defaults, kwonlyargs=[], kwonlydefaults=None, annotations={})\n    return argspecs",
    "docstring": "A python3 version of getargspec. Calls and assigns args, varargs, varkw, and defaults to a python 2/3 compatible . The parameter name 'varkw' is changed to 'keywords' to fit the struct. Args: target: the target object to inspect. Returns: An ArgSpec with args, varargs, keywords, and defaults parameters from FullArgSpec.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_inspect.py",
    "ast_data": "FunctionDef name:_getargspec arg:target arguments arg Assign Call If Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "disc",
    "source_code": "def disc(t, k):\n    n = t.shape[0]\n    delta = t[n - k - 1] - t[k]\n    nrint = n - 2 * k - 1\n    matr = np.empty((nrint - 1, k + 2), dtype=float)\n    for jj in range(nrint - 1):\n        j = jj + k + 1\n        for ii in range(k + 2):\n            i = jj + ii\n            matr[jj, ii] = (t[i + k + 1] - t[i]) / prodd(t, i, j, k)\n    matr *= (delta / nrint) ** k\n    offset = np.array([i for i in range(nrint - 1)], dtype=np.int64)\n    nc = n - k - 1\n    return (matr, offset, nc)",
    "docstring": "Discontinuity matrix: jumps of k-th derivatives of b-splines at internal knots. See Eqs. (9)-(10) of Ref. [1], or, equivalently, Eq. (3.43) of Ref. [2]. This routine assumes internal knots are all simple (have multiplicity =1). Parameters ---------- t : ndarray, 1D, shape(n,) Knots. k : int The spline degree Returns ------- disc : ndarray, shape(n-2*k-1, k+2) The jumps of the k-th derivatives of b-splines at internal knots, `10.1016/0146-664X(82)90043-0` .. [2] Tom Lyche and Knut Morken, Spline methods,",
    "type": "function",
    "file_path": "scipy\\scipy\\interpolate\\_fitpack_repro.py",
    "ast_data": "FunctionDef name:disc arg:t arg:k arguments arg arg Assign Assign Assign Assign Call For Call Assign For Call Assign Assign Call Assign Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "df",
    "source_code": "@property\ndef df(self):\n    return self._df",
    "docstring": "Degrees of freedom in these Student's t distribution(s).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\student_t.py",
    "ast_data": "FunctionDef name:df arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_rand1",
    "source_code": "def _rand1(self, samples):\n    r0, r1, r2 = samples[..., :3].T\n    return self.population[r0] + self.scale * (self.population[r1] - self.population[r2])",
    "docstring": "rand1bin, rand1exp",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_differentialevolution.py",
    "ast_data": "FunctionDef name:_rand1 arg:self arg:samples arguments arg arg Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_lookup_dependency",
    "source_code": "def _lookup_dependency(self, name):\n    unconditional = super(_DynamicLossScaleState, self)._lookup_dependency(name)\n    if unconditional is not None:\n        return unconditional\n    if context.executing_eagerly():\n        graph_key = None\n    else:\n        graph = ops.get_default_graph()\n        graph_key = graph._graph_key\n    return self._weights.get((name, graph_key), None)",
    "docstring": "From Trackable. Find a weight in the current graph.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\loss_scale_optimizer.py",
    "ast_data": "FunctionDef name:_lookup_dependency arg:self arg:name arguments arg arg Assign Call Call If Compare Return return:yes If Call Assign Assign Call Assign Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "minimum_fill_value",
    "source_code": "def minimum_fill_value(obj):\n    return _extremum_fill_value(obj, min_filler, 'minimum')",
    "docstring": "Return the maximum value that can be represented by the dtype of an object. This function is useful for calculating a fill value suitable for taking the minimum of an array with a given dtype. Parameters ---------- obj : ndarray, dtype or scalar An object that can be queried for it's numeric type. Returns ------- val : scalar The maximum representable value. Raises ------ TypeError If isn't a suitable numeric type. See Also -------- maximum_fill_value : The inverse function. set_fill_value : Set the filling value of a masked array. MaskedArray.fill_value : Return current fill value. Examples -------- >>> import numpy as np >>> import numpy.ma as ma >>> a = np.int8() >>> ma.minimum_fill_value(a) 127 >>> a = np.int32() >>> ma.minimum_fill_value(a) 2147483647 An array of numeric data can also be passed. >>> a = np.array([1, 2, 3], dtype=np.int8) >>> ma.minimum_fill_value(a) 127 >>> a = np.array([1, 2, 3], dtype=np.float32) >>> ma.minimum_fill_value(a) inf",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:minimum_fill_value arg:obj arguments arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "__init__",
    "source_code": "def __init__(self, version):\n    major, minor = version.split('.')\n    self.version = f'NPY_{major}_{minor}_API_VERSION'",
    "docstring": "Version should be the normal NumPy version, e.g. \"1.25\"",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\code_generators\\genapi.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:version arguments arg arg Assign Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "get_min_max_value",
    "source_code": "def get_min_max_value(statistics: calib_stats_pb2.CalibrationStatistics, calib_opts: stablehlo_quant_config_pb2.CalibrationOptions) -> tuple[float, float]:\n    calib_method = calib_opts.calibration_method\n    if calib_method not in _REGISTRY:\n        raise ValueError(f'Unsupported calibration method: {calib_method}')\n    calibration_algorithm = _REGISTRY[calib_method](statistics, calib_opts)\n    return calibration_algorithm.get_min_max_value()",
    "docstring": "Calculates min and max from statistics using calibration options. Args: statistics: Collected calibration statistics. calib_opts: Calibration options used for calculating min and max. Returns: (min_value, max_value): Min and max calculated using calib_opts. Raises: ValueError: Unsupported calibration method is given.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\calibrator\\calibration_algorithm.py",
    "ast_data": "FunctionDef name:get_min_max_value arg:statistics arg:calib_opts arguments arg arg Assign If Compare Raise Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "recursive_undo",
    "source_code": "def recursive_undo(self, sched=None):\n    scheds = self if sched is None else sched\n    if hasattr(scheds, '_schedulers'):\n        for s in scheds._schedulers:\n            self.recursive_undo(s)\n    elif hasattr(scheds, 'last_epoch'):\n        scheds.last_epoch -= 1",
    "docstring": "Recursively undo any step performed by the initialisation of schedulers.",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\lr_scheduler.py",
    "ast_data": "FunctionDef name:recursive_undo arg:self arg:sched arguments arg arg Assign Compare If Call For Call If Call"
  },
  {
    "library": "pytorch",
    "name": "commit_tensor",
    "source_code": "@abc.abstractmethod\ndef commit_tensor(self, read_item: ReadItem, tensor: torch.Tensor) -> None:\n    pass",
    "docstring": "Call once the StorageReader finished loading data into `` prior to copying it back to the one in the state_dict. The contents of tensor will follow its device synchronization model.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\planner.py",
    "ast_data": "FunctionDef name:commit_tensor arg:self arg:read_item arg:tensor arguments arg arg arg"
  },
  {
    "library": "numpy",
    "name": "_arg_trim_zeros",
    "source_code": "def _arg_trim_zeros(filt):\n    nonzero = np.argwhere(filt) if filt.dtype != np.object_ else np.argwhere(filt != 0)\n    if nonzero.size == 0:\n        start = stop = np.array([], dtype=np.intp)\n    else:\n        start = nonzero.min(axis=0)\n        stop = nonzero.max(axis=0)\n    return (start, stop)",
    "docstring": "Return indices of the first and last non-zero element. Parameters ---------- filt : array_like Input array. Returns ------- start, stop : ndarray Two arrays containing the indices of the first and last non-zero element in each dimension. See also -------- trim_zeros Examples -------- >>> import numpy as np >>> _arg_trim_zeros(np.array([0, 0, 1, 1, 0])) (array([2]), array([3]))",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_function_base_impl.py",
    "ast_data": "FunctionDef name:_arg_trim_zeros arg:filt arguments arg Assign Compare Call Call Compare If Compare Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "raise_on_incompatible",
    "source_code": "def raise_on_incompatible(left, right) -> IncompatibleFrequency:\n    if isinstance(right, (np.ndarray, ABCTimedeltaArray)) or right is None:\n        other_freq = None\n    elif isinstance(right, BaseOffset):\n        with warnings.catch_warnings():\n            warnings.filterwarnings('ignore', 'PeriodDtype\\\\[B\\\\] is deprecated', category=FutureWarning)\n            other_freq = PeriodDtype(right)._freqstr\n    elif isinstance(right, (ABCPeriodIndex, PeriodArray, Period)):\n        other_freq = right.freqstr\n    else:\n        other_freq = delta_to_tick(Timedelta(right)).freqstr\n    own_freq = PeriodDtype(left.freq)._freqstr\n    msg = DIFFERENT_FREQ.format(cls=type(left).__name__, own_freq=own_freq, other_freq=other_freq)\n    return IncompatibleFrequency(msg)",
    "docstring": "Helper function to render a consistent error message when raising IncompatibleFrequency. Parameters ---------- left : PeriodArray right : None, DateOffset, Period, ndarray, or timedelta-like Returns ------- IncompatibleFrequency Exception to be raised by the caller.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\arrays\\period.py",
    "ast_data": "FunctionDef name:raise_on_incompatible arg:left arg:right arguments arg arg If BoolOp Call Compare Assign If Call With Call Call Assign Call If Call Assign Assign Call Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_fw_set_module_hook",
    "source_code": "def _fw_set_module_hook(self, mod, input, output):\n    if self.is_bw:\n        self.activation_checkpointing = True\n    else:\n        self.activation_checkpointing = False\n    if not self.activation_checkpointing:\n        self.parent_list.pop()\n        self.name = self.parent_list[-1]",
    "docstring": "Updates the current module after module finishes running and all other hooks are resolved",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\debug\\_comm_mode.py",
    "ast_data": "FunctionDef name:_fw_set_module_hook arg:self arg:mod arg:input arg:output arguments arg arg arg arg If Assign Assign If Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "with_options",
    "source_code": "def with_options(self, options, name=None) -> 'DatasetV2':\n    return _OptionsDataset(self, options, name=name)",
    "docstring": "Returns a new with the given options set. The options are \"global\" in the sense they apply to the entire dataset. If options are set multiple times, they are merged as long as different options do not use different non-default values. >>> ds = tf.data.Dataset.range(5) >>> ds = ds.interleave(lambda x: tf.data.Dataset.range(5), ... cycle_length=3, ... num_parallel_calls=3) >>> options = tf.data.Options() >>> # This will make the interleave order non-deterministic. >>> options.deterministic = False >>> ds = ds.with_options(options) Args: options: A that identifies the options the use. name: (Optional.) A name for the tf.data operation. Returns: A new with the transformation applied as described above. Raises: ValueError: when an option is set more than once to a non-default value",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:with_options arg:self arg:options arg:name arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "_jpeg_decode",
    "source_code": "def _jpeg_decode(input_y: Tensor, input_cb: Tensor, input_cr: Tensor, jpeg_quality: Tensor, H: int, W: int, quantization_table_y: Tensor, quantization_table_c: Tensor) -> Tensor:\n    input_y = _dequantize(input_y, jpeg_quality, quantization_table_y)\n    input_cb_cr = _dequantize(torch.cat((input_cb, input_cr), dim=1), jpeg_quality, quantization_table_c)\n    idct_y: Tensor = _idct_8x8(input_y)\n    idct_cb, idct_cr = _idct_8x8(input_cb_cr).chunk(2, dim=1)\n    image_y: Tensor = _unpatchify_8x8(idct_y, H, W)\n    image_cb: Tensor = _unpatchify_8x8(idct_cb, H // 2, W // 2)\n    image_cr: Tensor = _unpatchify_8x8(idct_cr, H // 2, W // 2)\n    image_cb = _chroma_upsampling(image_cb)\n    image_cr = _chroma_upsampling(image_cr)\n    image_ycbcr: Tensor = torch.stack((image_y, image_cb, image_cr), dim=1) / 255.0\n    rgb_decoded: Tensor = ycbcr_to_rgb(image_ycbcr)\n    return rgb_decoded",
    "docstring": "Perform JPEG decoding. Args: input_y (Tensor): Compressed Y component of the shape :math:. input_cb (Tensor): Compressed Cb component of the shape :math:. input_cr (Tensor): Compressed Cr component of the shape :math:. jpeg_quality (Tensor): Compression strength of the shape :math:. H (int): Original image height. W (int): Original image width. quantization_table_y (Tensor): Quantization table for Y channel. quantization_table_c (Tensor): Quantization table for C channels. Returns: rgb_decoded (Tensor): Decompressed RGB image of the shape :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\enhance\\jpeg.py",
    "ast_data": "FunctionDef name:_jpeg_decode arg:input_y arg:input_cb arg:input_cr arg:jpeg_quality arg:H arg:W arg:quantization_table_y arg:quantization_table_c arguments arg arg arg arg arg arg arg arg Assign Call Assign Call Call Call Assign Call Call Call Call Call Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "contains",
    "source_code": "def contains(self, mouseevent):\n    if self._different_canvas(mouseevent):\n        return (False, {})\n    inside = self.bbox.contains(mouseevent.x, mouseevent.y)\n    return (inside, {})",
    "docstring": "Test whether the mouse event occurred on the figure. Returns ------- bool, {}",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:contains arg:self arg:mouseevent arguments arg arg If Call Return return:yes Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "as_string",
    "source_code": "def as_string(self, unixfrom=False, linesep='\\n'):\n    fp = StringIO()\n    g = generator.Generator(fp, mangle_from_=False)\n    g.flatten(self, unixfrom=unixfrom, linesep=linesep)\n    return fp.getvalue()",
    "docstring": "Return the entire formatted message as a string. Optional `unixfrom' when True, means include the Unix From_ envelope header. This overrides the default as_string() implementation to not mangle lines that begin with 'From '. See bug #13433 for details.",
    "type": "method",
    "file_path": "django\\django\\core\\mail\\message.py",
    "ast_data": "FunctionDef name:as_string arg:self arg:unixfrom arg:linesep arguments arg arg arg Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "default",
    "source_code": "@cherrypy.expose\ndef default(self, *args, **kwargs):\n    return 'args: %s kwargs: %s' % (args, kwargs)",
    "docstring": "Render catch-all args and kwargs.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\scaffold\\__init__.py",
    "ast_data": "FunctionDef name:default arg:self arguments arg arg arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "ConvSoftArgmax2d",
    "source_code": "class ConvSoftArgmax2d(Module):\n\n    def __init__(self, kernel_size: tuple[int, int]=(3, 3), stride: tuple[int, int]=(1, 1), padding: tuple[int, int]=(1, 1), temperature: Tensor | float=1.0, normalized_coordinates: bool=True, eps: float=1e-08, output_value: bool=False) -> None:\n        super().__init__()\n        self.kernel_size = kernel_size\n        self.stride = stride\n        self.padding = padding\n        self.temperature = temperature\n        self.normalized_coordinates = normalized_coordinates\n        self.eps = eps\n        self.output_value = output_value\n\n    def __repr__(self) -> str:\n        return f'{self.__class__.__name__}(kernel_size={self.kernel_size}, stride={self.stride}, padding={self.padding}, temperature={self.temperature}, normalized_coordinates={self.normalized_coordinates}, eps={self.eps}, output_value={self.output_value})'\n\n    def forward(self, x: Tensor) -> Tensor | tuple[Tensor, Tensor]:\n        return conv_soft_argmax2d(x, self.kernel_size, self.stride, self.padding, self.temperature, self.normalized_coordinates, self.eps, self.output_value)",
    "docstring": "Module that calculates soft argmax 2d per window. See :func: for details.",
    "type": "class",
    "file_path": "kornia\\kornia\\geometry\\subpix\\spatial_soft_argmax.py",
    "ast_data": "ClassDef name:ConvSoftArgmax2d FunctionDef name:__init__ arg:self arg:kernel_size arg:stride arg:padding arg:temperature arg:normalized_coordinates arg:eps arg:output_value arguments arg arg arg arg arg arg arg arg Call Call Assign Assign Assign Assign Assign Assign Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:forward arg:self arg:x arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "to_map",
    "source_code": "def to_map(val_or_map: Union[Std, dict[int, Std]], local_world_size: int) -> dict[int, Std]:\n    if isinstance(val_or_map, Std):\n        return dict.fromkeys(range(local_world_size), val_or_map)\n    else:\n        map = {}\n        for i in range(local_world_size):\n            map[i] = val_or_map.get(i, Std.NONE)\n        return map",
    "docstring": "Certain APIs take redirect settings either as a single value (e.g. apply to all local ranks) or as an explicit user-provided mapping. This method is a convenience method that converts a value or mapping into a mapping. Example: :: to_map(Std.OUT, local_world_size=2) # returns: {0: Std.OUT, 1: Std.OUT} to_map({1: Std.OUT}, local_world_size=2) # returns: {0: Std.NONE, 1: Std.OUT} to_map( {0: Std.OUT, 1: Std.OUT}, local_world_size=2 ) # returns: {0: Std.OUT, 1: Std.OUT}",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\elastic\\multiprocessing\\api.py",
    "ast_data": "FunctionDef name:to_map arg:val_or_map arg:local_world_size arguments arg arg If Call Return return:yes Call Call Assign For Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "_checkindex",
    "source_code": "def _checkindex(self, index):\n    if not 0 <= index < self.size:\n        raise IndexError('invalid GEOS Geometry index: %s' % index)",
    "docstring": "Check the given index.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\coordseq.py",
    "ast_data": "FunctionDef name:_checkindex arg:self arg:index arguments arg arg If Compare Raise Call"
  },
  {
    "library": "pytorch",
    "name": "range",
    "source_code": "@contextmanager\ndef range(msg, *args, **kwargs):\n    range_push(msg.format(*args, **kwargs))\n    try:\n        yield\n    finally:\n        range_pop()",
    "docstring": "Context manager / decorator that pushes an ITT range at the beginning of its scope, and pops it at the end. If extra arguments are given, they are passed as arguments to msg.format(). Args: msg (str): message to associate with the range",
    "type": "function",
    "file_path": "pytorch\\torch\\profiler\\itt.py",
    "ast_data": "FunctionDef name:range arg:msg arguments arg arg arg Call Call Try Call"
  },
  {
    "library": "scipy",
    "name": "pmf",
    "source_code": "def pmf(self, x, /, *, method=None):\n    raise NotImplementedError()",
    "docstring": "Probability mass function The probability mass function (\"PMF\"), denoted :math:, is the probability that the random variable :math: will assume the value :math:. .. math:: f(x) = P(X = x) accepts for :math:. Parameters ---------- x : array_like The argument of the PMF. method : {None, 'formula', 'logexp'} The strategy used to evaluate the PMF. By default (`methodmethodx{l, l+1, ..., r-1, r}0xxx rpmf` at all real arguments. References ---------- .. [1] Probability mass function, *Wikipedia*, Examples -------- Instantiate a distribution with the desired parameters: >>> from scipy import stats >>> X = stats.Binomial(n=10, p=0.5) Evaluate the PMF at the desired argument: >>> X.pmf(5) np.float64(0.24609375)",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_probability_distribution.py",
    "ast_data": "FunctionDef name:pmf arguments arg arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_Dependent",
    "source_code": "class _Dependent(Constraint):\n\n    def __init__(self, *, is_discrete=NotImplemented, event_dim=NotImplemented):\n        self._is_discrete = is_discrete\n        self._event_dim = event_dim\n        super().__init__()\n\n    @property\n    def is_discrete(self) -> bool:\n        if self._is_discrete is NotImplemented:\n            raise NotImplementedError('.is_discrete cannot be determined statically')\n        return self._is_discrete\n\n    @property\n    def event_dim(self) -> int:\n        if self._event_dim is NotImplemented:\n            raise NotImplementedError('.event_dim cannot be determined statically')\n        return self._event_dim\n\n    def __call__(self, *, is_discrete=NotImplemented, event_dim=NotImplemented):\n        if is_discrete is NotImplemented:\n            is_discrete = self._is_discrete\n        if event_dim is NotImplemented:\n            event_dim = self._event_dim\n        return _Dependent(is_discrete=is_discrete, event_dim=event_dim)\n\n    def check(self, x):\n        raise ValueError('Cannot determine validity of dependent constraint')",
    "docstring": "Placeholder for variables whose support depends on other variables. These variables obey no simple coordinate-wise constraints. Args: is_discrete (bool): Optional value of `` attribute will raise a NotImplementedError.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributions\\constraints.py",
    "ast_data": "ClassDef name:_Dependent FunctionDef name:__init__ arg:self arguments arg arg arg Assign Assign Call Call FunctionDef name:is_discrete arg:self arguments arg If Compare Raise Call Return return:yes FunctionDef name:event_dim arg:self arguments arg If Compare Raise Call Return return:yes FunctionDef name:__call__ arg:self arguments arg arg arg If Compare Assign If Compare Assign Return return:yes Call FunctionDef name:check arg:self arg:x arguments arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_conv_2d_backprop_input_flops",
    "source_code": "@ops.RegisterStatistics('Conv2DBackpropInput', 'flops')\ndef _conv_2d_backprop_input_flops(graph, node):\n    _verify_conv_data_format(node)\n    out_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)\n    out_shape.assert_is_fully_defined()\n    kernel_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[1])\n    kernel_shape.assert_is_fully_defined()\n    strides_shape = list(node.attr['strides'].list.i)\n    strides_product = strides_shape[1] * strides_shape[2]\n    return ops.OpStats('flops', 2 * out_shape.num_elements() * kernel_shape.num_elements() / (out_shape.dims[-1].value * strides_product))",
    "docstring": "Compute flops for Conv2DBackpropInput operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py",
    "ast_data": "FunctionDef name:_conv_2d_backprop_input_flops arg:graph arg:node arguments arg arg Call Assign Call Call Assign Call Call Assign Call Assign Return return:yes Call Call Call Call"
  },
  {
    "library": "numpy",
    "name": "hermcompanion",
    "source_code": "def hermcompanion(c):\n    [c] = pu.as_series([c])\n    if len(c) < 2:\n        raise ValueError('Series must have maximum degree of at least 1.')\n    if len(c) == 2:\n        return np.array([[-0.5 * c[0] / c[1]]])\n    n = len(c) - 1\n    mat = np.zeros((n, n), dtype=c.dtype)\n    scl = np.hstack((1.0, 1.0 / np.sqrt(2.0 * np.arange(n - 1, 0, -1))))\n    scl = np.multiply.accumulate(scl)[::-1]\n    top = mat.reshape(-1)[1::n + 1]\n    bot = mat.reshape(-1)[n::n + 1]\n    top[...] = np.sqrt(0.5 * np.arange(1, n))\n    bot[...] = top\n    mat[:, -1] -= scl * c[:-1] / (2.0 * c[-1])\n    return mat",
    "docstring": "Return the scaled companion matrix of c. The basis polynomials are scaled so that the companion matrix is symmetric when is an Hermite basis polynomial. This provides better eigenvalue estimates than the unscaled case and for basis polynomials the eigenvalues are guaranteed to be real if is used to obtain them. Parameters ---------- c : array_like 1-D array of Hermite series coefficients ordered from low to high degree. Returns ------- mat : ndarray Scaled companion matrix of dimensions (deg, deg). Examples -------- >>> from numpy.polynomial.hermite import hermcompanion >>> hermcompanion([1, 0, 1]) array([[0. , 0.35355339], [0.70710678, 0. ]])",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\hermite.py",
    "ast_data": "FunctionDef name:hermcompanion arg:c arguments arg Assign Call If Compare Call Raise Call If Compare Call Return return:yes Call Assign Call Assign Call Assign Call Call Call Assign Call Assign Call Assign Call Assign Call Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "aiterator",
    "source_code": "async def aiterator(self, chunk_size=2000):\n    if chunk_size <= 0:\n        raise ValueError('Chunk size must be strictly positive.')\n    use_chunked_fetch = not connections[self.db].settings_dict.get('DISABLE_SERVER_SIDE_CURSORS')\n    iterable = self._iterable_class(self, chunked_fetch=use_chunked_fetch, chunk_size=chunk_size)\n    if self._prefetch_related_lookups:\n        results = []\n        async for item in iterable:\n            results.append(item)\n            if len(results) >= chunk_size:\n                await aprefetch_related_objects(results, *self._prefetch_related_lookups)\n                for result in results:\n                    yield result\n                results.clear()\n        if results:\n            await aprefetch_related_objects(results, *self._prefetch_related_lookups)\n            for result in results:\n                yield result\n    else:\n        async for item in iterable:\n            yield item",
    "docstring": "An asynchronous iterator over the results from applying this QuerySet to the database.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "AsyncFunctionDef name:aiterator arg:self arg:chunk_size arguments arg arg If Compare Raise Call Assign Call Assign Call If Assign Call If Compare Call Call For Call If Call For"
  },
  {
    "library": "pytorch",
    "name": "get_inputs",
    "source_code": "def get_inputs(input_data_path):\n    inputs = []\n    with open(input_data_path, 'rb') as f:\n        inputs_meta = pickle.load(f)\n        inputs = []\n        for meta in inputs_meta:\n            if len(meta) == 1:\n                type = meta\n                input = type(random.rand())\n            else:\n                type, shape, _stride, dtype, device = meta\n                if dtype in {torch.int, torch.int32, torch.int64, torch.bool, torch.int, torch.uint8, int, float}:\n                    input = torch.randint(0, 1, shape, dtype=dtype, device=device)\n                else:\n                    input = torch.rand(shape, dtype=dtype, device=device)\n            inputs.append(input)\n    return inputs",
    "docstring": "Return a random input for the given inputs meta generated from _save_fx_default.",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\compilers.py",
    "ast_data": "FunctionDef name:get_inputs arg:input_data_path arguments arg Assign With Call Assign Call Assign For If Compare Call Assign Assign Call Call Assign If Compare Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "invert",
    "source_code": "def invert(image: Tensor, max_val: Optional[Tensor]=None) -> Tensor:\n    if not isinstance(image, Tensor):\n        raise AssertionError(f'Input is not a Tensor. Got: {type(input)}')\n    if max_val is None:\n        _max_val = tensor([1.0])\n    else:\n        _max_val = max_val\n    if not isinstance(_max_val, Tensor):\n        raise AssertionError(f'max_val is not a Tensor. Got: {type(_max_val)}')\n    return _max_val.to(image) - image",
    "docstring": "Invert the values of an input image tensor by its maximum value. .. image:: _static/img/invert.png Args: image: The input tensor to invert with an arbitatry shape. max_val: The expected maximum value in the input tensor. The shape has to according to the input tensor shape, or at least has to work with broadcasting. Example: >>> img = torch.rand(1, 2, 4, 4) >>> invert(img).shape torch.Size([1, 2, 4, 4]) >>> img = 255. * torch.rand(1, 2, 3, 4, 4) >>> invert(img, torch.as_tensor(255.)).shape torch.Size([1, 2, 3, 4, 4]) >>> img = torch.rand(1, 3, 4, 4) >>> invert(img, torch.as_tensor([[[[1.]]]])).shape torch.Size([1, 3, 4, 4])",
    "type": "function",
    "file_path": "kornia\\kornia\\enhance\\adjust.py",
    "ast_data": "FunctionDef name:invert arg:image arg:max_val arguments arg arg If Call Raise Call Call If Compare Assign Call Assign If Call Raise Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_handle_weight_regularization",
    "source_code": "def _handle_weight_regularization(self, name, variable, regularizer):\n\n    def _loss_for_variable(v):\n        with backend.name_scope(name + '/Regularizer'):\n            regularization = regularizer(v)\n        return regularization\n    if base_layer_utils.is_split_variable(variable):\n        for v in variable:\n            self.add_loss(functools.partial(_loss_for_variable, v))\n    else:\n        self.add_loss(functools.partial(_loss_for_variable, variable))",
    "docstring": "Create lambdas which compute regularization losses.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:_handle_weight_regularization arg:self arg:name arg:variable arg:regularizer arguments arg arg arg arg FunctionDef name:_loss_for_variable arg:v arguments arg With Call Assign Call Return return:yes If Call For Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "addcmul",
    "source_code": "@register_decomposition(aten.addcmul)\n@out_wrapper()\n@elementwise_type_promotion_wrapper(type_promoting_args=('self', 'tensor1', 'tensor2'), type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT)\ndef addcmul(self: TensorLikeType, tensor1: TensorLikeType, tensor2: TensorLikeType, *, value: NumberType=1) -> TensorLikeType:\n    if value is not None:\n        dtype = self.dtype\n        python_type = utils.dtype_to_type(dtype)\n        torch._check_value(utils.is_weakly_lesser_type(type(value), python_type), lambda: f'value argument of type {type(value)} cannot be safely cast to type {python_type}!')\n    return self + value * tensor1 * tensor2",
    "docstring": "Reference implementation of torch.addcmul",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\__init__.py",
    "ast_data": "FunctionDef name:addcmul arg:self arg:tensor1 arg:tensor2 arguments arg arg arg arg If Compare Assign Assign Call Call Call Call arguments Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "avg_pool2d",
    "source_code": "@tf_export('nn.avg_pool2d', v1=[])\n@dispatch.add_dispatch_support\ndef avg_pool2d(input, ksize, strides, padding, data_format='NHWC', name=None):\n    with ops.name_scope(name, 'AvgPool2D', [input]) as name:\n        if data_format is None:\n            data_format = 'NHWC'\n        channel_index = 1 if data_format.startswith('NC') else 3\n        ksize = _get_sequence(ksize, 2, channel_index, 'ksize')\n        strides = _get_sequence(strides, 2, channel_index, 'strides')\n        return gen_nn_ops.avg_pool(input, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=name)",
    "docstring": "Performs the average pooling on the input. Each entry in is the mean of the corresponding size window in . Args: input: A 4-D of shape and type , , , , or . ksize: An int or list of that has length , or . The size of the window for each dimension of the input tensor. strides: An int or list of that has length , or . The stride of the sliding window for each dimension of the input tensor. padding: A string, either or . The padding algorithm. See [here]( for more information. data_format: A string. 'NHWC' and 'NCHW' are supported. name: Optional name for the operation. Returns: A with the same type as . The average pooled output tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py",
    "ast_data": "FunctionDef name:avg_pool2d arg:input arg:ksize arg:strides arg:padding arg:data_format arg:name arguments arg arg arg arg arg arg With Call If Compare Assign Assign Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "unique_graph_id",
    "source_code": "def unique_graph_id(proxy_mode, prefix):\n    return unique_graph_name_with_root(proxy_mode.tracer.root, prefix)",
    "docstring": "Returns a unique name and id for a graph to be added to a proxy_mode tracer",
    "type": "function",
    "file_path": "pytorch\\torch\\_higher_order_ops\\utils.py",
    "ast_data": "FunctionDef name:unique_graph_id arg:proxy_mode arg:prefix arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "RdzvEvent",
    "source_code": "@dataclass\nclass RdzvEvent:\n    name: str\n    run_id: str\n    message: str\n    hostname: str\n    pid: int\n    node_state: NodeState\n    master_endpoint: str = ''\n    rank: Optional[int] = None\n    local_id: Optional[int] = None\n    error_trace: str = ''\n\n    def __str__(self):\n        return self.serialize()\n\n    @staticmethod\n    def deserialize(data: Union[str, 'RdzvEvent']) -> 'RdzvEvent':\n        if isinstance(data, RdzvEvent):\n            return data\n        if isinstance(data, str):\n            data_dict = json.loads(data)\n        data_dict['node_state'] = NodeState[data_dict['node_state']]\n        return RdzvEvent(**data_dict)\n\n    def serialize(self) -> str:\n        return json.dumps(asdict(self))",
    "docstring": "Dataclass to represent any rendezvous event. Args: name: Event name. (E.g. Current action being performed) run_id: The run id of the rendezvous message: The message describing the event hostname: Hostname of the node pid: The process id of the node node_state: The state of the node (INIT, RUNNING, SUCCEEDED, FAILED) master_endpoint: The master endpoint for the rendezvous store, if known rank: The rank of the node, if known local_id: The local_id of the node, if defined in dynamic_rendezvous.py error_trace: Error stack trace, if this is an error event.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\elastic\\events\\api.py",
    "ast_data": "ClassDef name:RdzvEvent FunctionDef name:__str__ arg:self arguments arg Return return:yes Call FunctionDef name:deserialize arg:data arguments arg If Call Return return:yes If Call Assign Call Assign Return return:yes Call FunctionDef name:serialize arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "warning_text",
    "source_code": "def warning_text(name, amb):\n    text = f'\\nAmbiguities exist in dispatched function {name}\\n\\n'\n    text += 'The following signatures may result in ambiguous behavior:\\n'\n    for pair in amb:\n        text += '\\t' + ', '.join(('[' + str_signature(s) + ']' for s in pair)) + '\\n'\n    text += '\\n\\nConsider making the following additions:\\n\\n'\n    text += '\\n\\n'.join(['@dispatch(' + str_signature(super_signature(s)) + f')\\ndef {name}(...)' for s in amb])\n    return text",
    "docstring": "The text for ambiguity warnings",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\unification\\multipledispatch\\dispatcher.py",
    "ast_data": "FunctionDef name:warning_text arg:name arg:amb arguments arg arg Assign For Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "PlaceholderContext",
    "source_code": "class PlaceholderContext:\n    pass",
    "docstring": "Contains context information for generating placeholders within a scope.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\types\\trace.py",
    "ast_data": "ClassDef name:PlaceholderContext"
  },
  {
    "library": "pytorch",
    "name": "clamp_probs",
    "source_code": "def clamp_probs(probs):\n    eps = torch.finfo(probs.dtype).eps\n    return probs.clamp(min=eps, max=1 - eps)",
    "docstring": "Clamps the probabilities to be in the open interval . The probabilities would be clamped between and , and would be the smallest representable positive number for the input data type. Args: probs (Tensor): A tensor of probabilities. Returns: Tensor: The clamped probabilities. Examples: >>> probs = torch.tensor([0.0, 0.5, 1.0]) >>> clamp_probs(probs) tensor([1.1921e-07, 5.0000e-01, 1.0000e+00]) >>> probs = torch.tensor([0.0, 0.5, 1.0], dtype=torch.float64) >>> clamp_probs(probs) tensor([2.2204e-16, 5.0000e-01, 1.0000e+00], dtype=torch.float64)",
    "type": "function",
    "file_path": "pytorch\\torch\\distributions\\utils.py",
    "ast_data": "FunctionDef name:clamp_probs arg:probs arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "register_lowering_pattern",
    "source_code": "def register_lowering_pattern(pattern, extra_check=_return_true, pass_number=1) -> Callable[[Callable[_P, _T]], Callable[_P, _T]]:\n    return pattern_matcher.register_lowering_pattern(pattern, extra_check, pass_dict=pass_patterns[pass_number])",
    "docstring": "Register an aten to inductor IR replacement pattern",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\post_grad.py",
    "ast_data": "FunctionDef name:register_lowering_pattern arg:pattern arg:extra_check arg:pass_number arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_minimize_dogleg",
    "source_code": "def _minimize_dogleg(fun, x0, args=(), jac=None, hess=None, **trust_region_options):\n    if jac is None:\n        raise ValueError('Jacobian is required for dogleg minimization')\n    if not callable(hess):\n        raise ValueError('Hessian is required for dogleg minimization')\n    return _minimize_trust_region(fun, x0, args=args, jac=jac, hess=hess, subproblem=DoglegSubproblem, **trust_region_options)",
    "docstring": "Minimization of scalar function of one or more variables using the dog-leg trust-region algorithm. Options ------- initial_trust_radius : float Initial trust-region radius. max_trust_radius : float Maximum value of the trust-region radius. No steps that are longer than this value will be proposed. eta : float Trust region related acceptance stringency for proposed steps. gtol : float Gradient norm must be less than before successful termination.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_trustregion_dogleg.py",
    "ast_data": "FunctionDef name:_minimize_dogleg arg:fun arg:x0 arg:args arg:jac arg:hess arguments arg arg arg arg arg arg If Compare Raise Call If Call Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "sparse_average_precision_at_k",
    "source_code": "@tf_export(v1=['metrics.sparse_average_precision_at_k'])\n@deprecated(None, 'Use average_precision_at_k instead')\ndef sparse_average_precision_at_k(labels, predictions, k, weights=None, metrics_collections=None, updates_collections=None, name=None):\n    return average_precision_at_k(labels=labels, predictions=predictions, k=k, weights=weights, metrics_collections=metrics_collections, updates_collections=updates_collections, name=name)",
    "docstring": "Renamed to , please use that method instead.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\metrics_impl.py",
    "ast_data": "FunctionDef name:sparse_average_precision_at_k arg:labels arg:predictions arg:k arg:weights arg:metrics_collections arg:updates_collections arg:name arguments arg arg arg arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "placeholder",
    "source_code": "@tf_export(v1=['ragged.placeholder'])\n@dispatch.add_dispatch_support\ndef placeholder(dtype, ragged_rank, value_shape=None, name=None):\n    if ragged_rank == 0:\n        return array_ops.placeholder(dtype, value_shape, name)\n    with ops.name_scope(name, 'RaggedPlaceholder', []):\n        flat_shape = tensor_shape.TensorShape([None]).concatenate(value_shape)\n        result = array_ops.placeholder(dtype, flat_shape, 'flat_values')\n        for i in reversed(range(ragged_rank)):\n            row_splits = array_ops.placeholder(dtypes.int64, [None], 'row_splits_%d' % i)\n            result = ragged_tensor.RaggedTensor.from_row_splits(result, row_splits, validate=False)\n        return result",
    "docstring": "Creates a placeholder for a that will always be fed. **Important**: This ragged tensor will produce an error if evaluated. Its value must be fed using the optional argument to , , or . Args: dtype: The data type for the . ragged_rank: The ragged rank for the value_shape: The shape for individual flat values in the . name: A name for the operation (optional). Returns: A that may be used as a handle for feeding a value, but not evaluated directly. Raises: RuntimeError: if eager execution is enabled @compatibility(TF2) This API is not compatible with eager execution and . To migrate to TF2, rewrite the code to be compatible with eager execution. Check the [migration guide]( on replacing calls. In TF2, you can just pass tensors directly into ops and layers. If you want to explicitly set up your inputs, also see [Keras functional API]( on how to use to replace . arguments also do the job of . For more details please read [Better performance with tf.function]( @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_factory_ops.py",
    "ast_data": "FunctionDef name:placeholder arg:dtype arg:ragged_rank arg:value_shape arg:name arguments arg arg arg arg If Compare Return return:yes Call With Call Assign Call Call Assign Call For Call Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "ihfft2",
    "source_code": "def ihfft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *, plan=None):\n    if plan is not None:\n        raise NotImplementedError('Passing a precomputed plan is not yet supported by scipy.fft functions')\n    return ihfftn(x, s, axes, norm, overwrite_x, workers)",
    "docstring": "2-D discrete inverse Fourier transform of a Hermitian sequence",
    "type": "function",
    "file_path": "scipy\\scipy\\fft\\_pocketfft\\basic.py",
    "ast_data": "FunctionDef name:ihfft2 arg:x arg:s arg:axes arg:norm arg:overwrite_x arg:workers arguments arg arg arg arg arg arg arg If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "pygame",
    "name": "__init__",
    "source_code": "def __init__(self, *args):\n    if len(args) == 0:\n        self.type = 'system'\n        self.data = (pygame.SYSTEM_CURSOR_ARROW,)\n    elif len(args) == 1 and args[0] in _cursor_id_table:\n        self.type = 'system'\n        self.data = (args[0],)\n    elif len(args) == 1 and isinstance(args[0], Cursor):\n        self.type = args[0].type\n        self.data = args[0].data\n    elif len(args) == 2 and len(args[0]) == 2 and isinstance(args[1], pygame.Surface):\n        self.type = 'color'\n        self.data = tuple(args)\n    elif len(args) == 4 and len(args[0]) == 2 and (len(args[1]) == 2):\n        self.type = 'bitmap'\n        self.data = tuple((tuple(arg) for arg in args))\n    else:\n        raise TypeError('Arguments must match a cursor specification')",
    "docstring": "Cursor(size, hotspot, xormasks, andmasks) -> Cursor Cursor(hotspot, Surface) -> Cursor Cursor(constant) -> Cursor Cursor(Cursor) -> copies the Cursor object passed as an argument Cursor() -> Cursor pygame object for representing cursors You can initialize a cursor from a system cursor or use the constructor on an existing Cursor object, which will copy it. Providing a Surface instance will render the cursor displayed as that Surface when used. These Surfaces may use other colors than black and white.",
    "type": "method",
    "file_path": "pygame\\src_py\\cursors.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg arg If Compare Call Assign Assign If BoolOp Compare Call Compare Assign Assign If BoolOp Compare Call Call Assign Assign If BoolOp Compare Call Compare Call Call Assign Assign Call If BoolOp Compare Call Compare Call Compare Call Assign Assign Call Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "task_type",
    "source_code": "@property\ndef task_type(self):\n    return self._task_type",
    "docstring": "Returns the role of the corresponding task.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distribute_coordinator_utils.py",
    "ast_data": "FunctionDef name:task_type arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "generate_transformation_matrix",
    "source_code": "def generate_transformation_matrix(self, input: Tensor, params: Dict[str, Tensor], flags: Dict[str, Any]) -> Tensor:\n    batch_prob = params['batch_prob']\n    to_apply = batch_prob > 0.5\n    in_tensor = self.transform_tensor(input)\n    if not to_apply.any():\n        trans_matrix = self.identity_matrix(in_tensor)\n    elif to_apply.all():\n        trans_matrix = self.compute_transformation(in_tensor, params=params, flags=flags)\n    else:\n        trans_matrix_A = self.identity_matrix(in_tensor)\n        trans_matrix_B = self.compute_transformation(in_tensor[to_apply], params=params, flags=flags)\n        if is_autocast_enabled():\n            trans_matrix_A = trans_matrix_A.type(input.dtype)\n            trans_matrix_B = trans_matrix_B.type(input.dtype)\n        trans_matrix = trans_matrix_A.index_put((to_apply,), trans_matrix_B)\n    return trans_matrix",
    "docstring": "Generate transformation matrices with the given input and param settings.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\_2d\\base.py",
    "ast_data": "FunctionDef name:generate_transformation_matrix arg:self arg:input arg:params arg:flags arguments arg arg arg arg Assign Assign Compare Assign Call If Call Assign Call If Call Assign Call Assign Call Assign Call If Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "splity",
    "source_code": "def splity(self, *args):\n    yf = [0, *args, 1]\n    x0, y0, x1, y1 = self.extents\n    h = y1 - y0\n    return [Bbox([[x0, y0 + yf0 * h], [x1, y0 + yf1 * h]]) for yf0, yf1 in itertools.pairwise(yf)]",
    "docstring": "Return a list of new objects formed by splitting the original one with horizontal lines at fractional positions given by *args*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:splity arg:self arguments arg arg Assign Assign Assign Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "node_replace_",
    "source_code": "def node_replace_(old_node: torch.fx.Node, new_node: torch.fx.Node) -> None:\n    old_node.replace_all_uses_with(new_node)\n    old_node.users.clear()\n    old_node.graph.erase_node(old_node)",
    "docstring": "Replace all uses of old_node with new_node.",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\utils.py",
    "ast_data": "FunctionDef name:node_replace_ arg:old_node arg:new_node arguments arg arg Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "LSTMCell",
    "source_code": "class LSTMCell(RNNCellBase):\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, num_chunks=4, **kwargs)\n\n    def _get_name(self):\n        return 'DynamicQuantizedLSTMCell'\n\n    def forward(self, input: Tensor, hx: Optional[tuple[Tensor, Tensor]]=None) -> tuple[Tensor, Tensor]:\n        self.check_forward_input(input)\n        if hx is None:\n            zeros = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device)\n            hx = (zeros, zeros)\n        self.check_forward_hidden(input, hx[0], '[0]')\n        self.check_forward_hidden(input, hx[1], '[1]')\n        return torch.ops.quantized.quantized_lstm_cell_dynamic(input, hx, self._packed_weight_ih, self._packed_weight_hh, self.bias_ih, self.bias_hh)\n\n    @classmethod\n    def from_float(cls, mod, use_precomputed_fake_quant=False):\n        return super().from_float(mod, use_precomputed_fake_quant=use_precomputed_fake_quant)",
    "docstring": "A long short-term memory (LSTM) cell. A dynamic quantized LSTMCell module with floating point tensor as inputs and outputs. Weights are quantized to 8 bits. We adopt the same interface as , please see for documentation. Examples:: >>> # xdoctest: +SKIP >>> rnn = nn.LSTMCell(10, 20) >>> input = torch.randn(6, 3, 10) >>> hx = torch.randn(3, 20) >>> cx = torch.randn(3, 20) >>> output = [] >>> for i in range(6): ... hx, cx = rnn(input[i], (hx, cx)) ... output.append(hx)",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\dynamic\\modules\\rnn.py",
    "ast_data": "ClassDef name:LSTMCell FunctionDef name:__init__ arg:self arguments arg arg arg Call Call FunctionDef name:_get_name arg:self arguments arg Return return:yes FunctionDef name:forward arg:self arg:input arg:hx arguments arg arg arg Call If Compare Assign Call Call Assign Call Call Return return:yes Call FunctionDef name:from_float arg:cls arg:mod arg:use_precomputed_fake_quant arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_wrap_activity_regularizer",
    "source_code": "def _wrap_activity_regularizer(layer):\n    if isinstance(layer._activity_regularizer, def_function.Function):\n        return layer._activity_regularizer\n    return def_function.Function(layer._activity_regularizer, '{}_activity_regularizer'.format(layer.name), input_signature=[tensor_spec.TensorSpec(None, layer._compute_dtype or K.floatx())])",
    "docstring": "Wraps the activity regularizer.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\save_impl.py",
    "ast_data": "FunctionDef name:_wrap_activity_regularizer arg:layer arguments arg If Call Return return:yes Return return:yes Call Call Call BoolOp Call"
  },
  {
    "library": "pandas",
    "name": "compare_or_regex_search",
    "source_code": "def compare_or_regex_search(a: ArrayLike, b: Scalar | Pattern, regex: bool, mask: npt.NDArray[np.bool_]) -> ArrayLike:\n    if isna(b):\n        return ~mask\n\n    def _check_comparison_types(result: ArrayLike | bool, a: ArrayLike, b: Scalar | Pattern) -> None:\n        if is_bool(result) and isinstance(a, np.ndarray):\n            type_names = [type(a).__name__, type(b).__name__]\n            type_names[0] = f'ndarray(dtype={a.dtype})'\n            raise TypeError(f'Cannot compare types {type_names[0]!r} and {type_names[1]!r}')\n    if not regex or not should_use_regex(regex, b):\n        op = lambda x: operator.eq(x, b)\n    else:\n        op = np.vectorize(lambda x: bool(re.search(b, x)) if isinstance(x, str) and isinstance(b, (str, Pattern)) else False, otypes=[bool])\n    if isinstance(a, np.ndarray) and mask is not None:\n        a = a[mask]\n        result = op(a)\n        if isinstance(result, np.ndarray):\n            tmp = np.zeros(mask.shape, dtype=np.bool_)\n            np.place(tmp, mask, result)\n            result = tmp\n    else:\n        result = op(a)\n    _check_comparison_types(result, a, b)\n    return result",
    "docstring": "Compare two array-like inputs of the same shape or two scalar values Calls operator.eq or re.search, depending on regex argument. If regex is True, perform an element-wise regex matching. Parameters ---------- a : array-like b : scalar or regex pattern regex : bool mask : np.ndarray[bool] Returns ------- mask : array-like of bool",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\array_algos\\replace.py",
    "ast_data": "FunctionDef name:compare_or_regex_search arg:a arg:b arg:regex arg:mask arguments arg arg arg arg If Call Return return:yes FunctionDef name:_check_comparison_types arg:result arg:a arg:b arguments arg arg arg If BoolOp Call Call Assign Call Call Assign Raise Call If BoolOp Call Assign arguments arg Call Assign Call arguments arg BoolOp Call Call Call Call If BoolOp Call Compare Assign Assign Call If Call Assign Call Call Assign Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "entropy",
    "source_code": "def entropy(self, rowcov=1, colcov=1):\n    dummy_mean = np.zeros((rowcov.shape[0], colcov.shape[0]))\n    dims, _, rowcov, colcov = self._process_parameters(dummy_mean, rowcov, colcov)\n    rowpsd = _PSD(rowcov, allow_singular=False)\n    colpsd = _PSD(colcov, allow_singular=False)\n    return self._entropy(dims, rowpsd.log_pdet, colpsd.log_pdet)",
    "docstring": "Log of the matrix normal probability density function. Parameters ---------- rowcov : array_like, optional Among-row covariance matrix of the distribution (default: ``) Returns ------- entropy : float Entropy of the distribution Notes ----- %(_matnorm_doc_callparams_note)s",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:entropy arg:self arg:rowcov arg:colcov arguments arg arg arg Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "initializer",
    "source_code": "@property\ndef initializer(self):\n    raise NotImplementedError",
    "docstring": "The initializer operation for this variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:initializer arg:self arguments arg Raise"
  },
  {
    "library": "django",
    "name": "add",
    "source_code": "def add(self, objs, source=None, nullable=False, reverse_dependency=False):\n    if not objs:\n        return []\n    new_objs = []\n    model = objs[0].__class__\n    instances = self.data[model]\n    for obj in objs:\n        if obj not in instances:\n            new_objs.append(obj)\n    instances.update(new_objs)\n    if source is not None and (not nullable):\n        self.add_dependency(source, model, reverse_dependency=reverse_dependency)\n    return new_objs",
    "docstring": "Add 'objs' to the collection of objects to be deleted. If the call is the result of a cascade, 'source' should be the model that caused it, and 'nullable' should be set to True if the relation can be null. Return a list of all objects that were not already collected.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\deletion.py",
    "ast_data": "FunctionDef name:add arg:self arg:objs arg:source arg:nullable arg:reverse_dependency arguments arg arg arg arg arg If Return return:no Assign Assign Assign For If Compare Call Call If BoolOp Compare Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "Range",
    "source_code": "@document_properties\n@dataclass\nclass Range(Paths):\n\n    def _setup_segments(self, data, orient):\n        val = {'x': 'y', 'y': 'x'}[orient]\n        if not set(data.columns) & {f'{val}min', f'{val}max'}:\n            agg = {f'{val}min': (val, 'min'), f'{val}max': (val, 'max')}\n            data = data.groupby(orient).agg(**agg).reset_index()\n        cols = [orient, f'{val}min', f'{val}max']\n        data = data[cols].melt(orient, value_name=val)[['x', 'y']]\n        segments = [d.to_numpy() for _, d in data.groupby(orient)]\n        return segments",
    "docstring": "An oriented line mark drawn between min/max values. Examples -------- .. include:: ../docstrings/objects.Range.rst",
    "type": "class",
    "file_path": "seaborn\\seaborn\\_marks\\line.py",
    "ast_data": "ClassDef name:Range FunctionDef name:_setup_segments arg:self arg:data arg:orient arguments arg arg arg Assign If Call Assign Assign Call Call Call Assign Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "xpdf_distill",
    "source_code": "def xpdf_distill(tmpfile, eps=False, ptype='letter', bbox=None, rotated=False):\n    mpl._get_executable_info('gs')\n    mpl._get_executable_info('pdftops')\n    if eps:\n        paper_option = ['-dEPSCrop']\n    elif ptype == 'figure':\n        paper_option = [f'-dDEVICEWIDTHPOINTS#{bbox[2]}', f'-dDEVICEHEIGHTPOINTS#{bbox[3]}']\n    else:\n        paper_option = [f'-sPAPERSIZE#{ptype}']\n    with TemporaryDirectory() as tmpdir:\n        tmppdf = pathlib.Path(tmpdir, 'tmp.pdf')\n        tmpps = pathlib.Path(tmpdir, 'tmp.ps')\n        cbook._check_and_log_subprocess(['ps2pdf', '-dAutoFilterColorImages#false', '-dAutoFilterGrayImages#false', '-sAutoRotatePages#None', '-sGrayImageFilter#FlateEncode', '-sColorImageFilter#FlateEncode', *paper_option, tmpfile, tmppdf], _log)\n        cbook._check_and_log_subprocess(['pdftops', '-paper', 'match', '-level3', tmppdf, tmpps], _log)\n        shutil.move(tmpps, tmpfile)\n    if eps:\n        pstoeps(tmpfile)",
    "docstring": "Use ghostscript's ps2pdf and xpdf's/poppler's pdftops to distill a file. This yields smaller files without illegal encapsulated postscript operators. This distiller is preferred, generating high-level postscript output that treats text as text.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_ps.py",
    "ast_data": "FunctionDef name:xpdf_distill arg:tmpfile arg:eps arg:ptype arg:bbox arg:rotated arguments arg arg arg arg arg Call Call If Assign If Compare Assign Assign With Call Assign Call Assign Call Call Call Call If Call"
  },
  {
    "library": "sphinx",
    "name": "render",
    "source_code": "def render(self, template: str, context: dict[str, Any]) -> None:\n    msg = 'must be implemented in subclasses'\n    raise NotImplementedError(msg)",
    "docstring": "Called by the builder to render a template given as a filename with a specified context (a Python dictionary).",
    "type": "method",
    "file_path": "sphinx\\sphinx\\application.py",
    "ast_data": "FunctionDef name:render arg:self arg:template arg:context arguments arg arg arg Assign Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "FontMetrics",
    "source_code": "class FontMetrics(NamedTuple):\n    advance: float\n    height: float\n    width: float\n    xmin: float\n    xmax: float\n    ymin: float\n    ymax: float\n    iceberg: float\n    slanted: bool",
    "docstring": "Metrics of a font. Attributes ---------- advance : float The advance distance (in points) of the glyph. height : float The height of the glyph in points. width : float The width of the glyph in points. xmin, xmax, ymin, ymax : float The ink rectangle of the glyph. iceberg : float The distance from the baseline to the top of the glyph. (This corresponds to TeX's definition of \"height\".) slanted : bool Whether the glyph should be considered as \"slanted\" (currently used for kerning sub/superscripts).",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\_mathtext.py",
    "ast_data": "ClassDef name:FontMetrics"
  },
  {
    "library": "tensorflow",
    "name": "_make_intermediates_match",
    "source_code": "def _make_intermediates_match(branch_graphs, branch_optionals):\n    new_branch_optionals = []\n    intermediates_size = max((len(o) for o in branch_optionals))\n    for i, branch_graph in enumerate(branch_graphs):\n        other_optionals = _create_none_optionals(branch_graph, intermediates_size - len(branch_optionals[i]))\n        new_branch_optionals.append(branch_optionals[i] + other_optionals)\n    return new_branch_optionals",
    "docstring": "Returns new optionals lists that have matching signatures. This is done by mirroring each list in the other using none optionals. There is no merging of like optionals. Args: branch_graphs: of . branch_optionals: of s of optional s from other branch_graphs Returns: A of s of s for each branch_graph. Each list has the same number of s, all of which will be optionals of the same shape/type.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\cond_v2.py",
    "ast_data": "FunctionDef name:_make_intermediates_match arg:branch_graphs arg:branch_optionals arguments arg arg Assign Assign Call Call For Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "ClassNamePrefixFeaturesOutMixin",
    "source_code": "class ClassNamePrefixFeaturesOutMixin:\n\n    def get_feature_names_out(self, input_features=None):\n        check_is_fitted(self, '_n_features_out')\n        return _generate_get_feature_names_out(self, self._n_features_out, input_features=input_features)",
    "docstring": "Mixin class for transformers that generate their own names by prefixing. This mixin is useful when the transformer needs to generate its own feature names out, such as :class:. For example, if :class: outputs 3 features, then the generated feature names out are: . This mixin assumes that a attribute is defined when the transformer is fitted. is the number of output features that the transformer will return in of . Examples -------- >>> import numpy as np >>> from sklearn.base import ClassNamePrefixFeaturesOutMixin, BaseEstimator >>> class MyEstimator(ClassNamePrefixFeaturesOutMixin, BaseEstimator): ... def fit(self, X, y=None): ... self._n_features_out = X.shape[1] ... return self >>> X = np.array([[1, 2], [3, 4]]) >>> MyEstimator().fit(X).get_feature_names_out() array(['myestimator0', 'myestimator1'], dtype=object)",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\base.py",
    "ast_data": "ClassDef name:ClassNamePrefixFeaturesOutMixin FunctionDef name:get_feature_names_out arg:self arg:input_features arguments arg arg Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_compute_fans",
    "source_code": "def _compute_fans(shape):\n    if len(shape) < 1:\n        fan_in = fan_out = 1\n    elif len(shape) == 1:\n        fan_in = fan_out = shape[0]\n    elif len(shape) == 2:\n        fan_in = shape[0]\n        fan_out = shape[1]\n    else:\n        receptive_field_size = 1\n        for dim in shape[:-2]:\n            receptive_field_size *= dim\n        fan_in = shape[-2] * receptive_field_size\n        fan_out = shape[-1] * receptive_field_size\n    return (int(fan_in), int(fan_out))",
    "docstring": "Computes the number of input and output units for a weight shape. Args: shape: Integer shape tuple or TF tensor shape. Returns: A tuple of integer scalars (fan_in, fan_out).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\initializers\\initializers_v2.py",
    "ast_data": "FunctionDef name:_compute_fans arg:shape arguments arg If Compare Call Assign If Compare Call Assign If Compare Call Assign Assign Assign For Assign Assign Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "box_intersections",
    "source_code": "def box_intersections(z, d, lb, ub, entire_line=False):\n    z = np.asarray(z)\n    d = np.asarray(d)\n    lb = np.asarray(lb)\n    ub = np.asarray(ub)\n    if norm(d) == 0:\n        return (0, 0, False)\n    zero_d = d == 0\n    if (z[zero_d] < lb[zero_d]).any() or (z[zero_d] > ub[zero_d]).any():\n        intersect = False\n        return (0, 0, intersect)\n    not_zero_d = np.logical_not(zero_d)\n    z = z[not_zero_d]\n    d = d[not_zero_d]\n    lb = lb[not_zero_d]\n    ub = ub[not_zero_d]\n    t_lb = (lb - z) / d\n    t_ub = (ub - z) / d\n    ta = max(np.minimum(t_lb, t_ub))\n    tb = min(np.maximum(t_lb, t_ub))\n    if ta <= tb:\n        intersect = True\n    else:\n        intersect = False\n    if not entire_line:\n        if tb < 0 or ta > 1:\n            intersect = False\n            ta = 0\n            tb = 0\n        else:\n            ta = max(0, ta)\n            tb = min(1, tb)\n    return (ta, tb, intersect)",
    "docstring": "Find the intersection between segment (or line) and box constraints. Find the intersection between the segment (or line) defined by the parametric equation ``, there is no intersection.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_trustregion_constr\\qp_subproblem.py",
    "ast_data": "FunctionDef name:box_intersections arg:z arg:d arg:lb arg:ub arg:entire_line arguments arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call If Compare Call Return return:yes Assign Compare If BoolOp Call Compare Call Compare Assign Return return:yes Assign Call Assign Assign Assign Assign Assign Assign Assign Call Call Assign Call Call If Compare Assign Assign If If BoolOp Compare Compare Assign Assign Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "inputs_are_mutable",
    "source_code": "@classmethod\ndef inputs_are_mutable(cls, t: _ExtraFields_TorchOp) -> tuple[Optional[bool], ...]:\n    mutable: Optional[list[bool]] = None\n    for schema in cls.match_schemas(t):\n        mutable = mutable or [False for _ in schema.arguments]\n        for i, arg in enumerate(schema.arguments):\n            mutable[i] |= getattr(arg.alias_info, 'is_write', False)\n    return tuple(mutable or (None for _ in t.inputs))",
    "docstring": "Determine which inputs may have mutated based on function schema. Note that we don't need to resolve down to a single schema to perform this analysis. An input is mutable if it is mutable in any overload. In practice, however, it is overwhelmingly common to match a single overload. If we cannot find any valid schema then we must be conservative and assume all inputs are mutable.",
    "type": "method",
    "file_path": "pytorch\\torch\\profiler\\_memory_profiler.py",
    "ast_data": "FunctionDef name:inputs_are_mutable arg:cls arg:t arguments arg arg For Call Assign BoolOp For Call Call Return return:yes Call BoolOp"
  },
  {
    "library": "tensorflow",
    "name": "create_report",
    "source_code": "def create_report(self, tt_config, tt_parameters, tensor_trace_order, tensor_trace_points):\n    with OpenReportFile(tt_parameters) as self._report_file:\n        self._write_config_section(tt_config, tt_parameters)\n        self._write_op_list_section(tensor_trace_order.graph_order)\n        self._write_tensor_list_section(tensor_trace_order.graph_order)\n        self._write_trace_points(tensor_trace_points)\n        self._write_cache_index_map_section(tensor_trace_order)\n        self._write_reason_section()\n        self._write_graph_section(tensor_trace_order.graph_order)",
    "docstring": "Creates a report file and writes the trace information.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer_report.py",
    "ast_data": "FunctionDef name:create_report arg:self arg:tt_config arg:tt_parameters arg:tensor_trace_order arg:tensor_trace_points arguments arg arg arg arg arg With Call Call Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_backward",
    "source_code": "def _backward(accs, elems):\n    state_log_prob, cum_log_sum = accs\n    obs_log_prob, mask = elems\n    state_log_prob += obs_log_prob\n    state_log_prob = array_ops.expand_dims(state_log_prob, axis=1)\n    state_log_prob += bwd_state_trans_log_probs\n    state_log_prob = math_ops.reduce_logsumexp(state_log_prob, axis=-1)\n    log_prob_sum = math_ops.reduce_logsumexp(state_log_prob, axis=-1, keepdims=True)\n    state_log_prob -= log_prob_sum\n    cum_log_sum += array_ops.squeeze(log_prob_sum, axis=[-1]) * mask\n    batched_mask = array_ops.expand_dims(mask, axis=1)\n    out = state_log_prob * batched_mask\n    out += final_state_log_probs * (1.0 - batched_mask)\n    return (out, cum_log_sum)",
    "docstring": "Calculate log probs and cumulative sum masked for sequence length.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ctc_ops.py",
    "ast_data": "FunctionDef name:_backward arg:accs arg:elems arguments arg arg Assign Assign Assign Call Assign Call Assign Call Call Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "linear_pointwise_strategy",
    "source_code": "def linear_pointwise_strategy(op_schema: OpSchema) -> StrategyType:\n    return pointwise_strategy(op_schema, linearity=True)",
    "docstring": "Linear pointwise operators can propagate pending reductions. For example, c = add(a, b); if a is pending sum, then c will be pending sum as well without any communication overhead.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_ops\\_pointwise_ops.py",
    "ast_data": "FunctionDef name:linear_pointwise_strategy arg:op_schema arguments arg Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "peek",
    "source_code": "def peek(self) -> Request | None:\n    request = super().peek()\n    if not request:\n        return None\n    return request_from_dict(request, spider=self.spider)",
    "docstring": "Returns the next object to be returned by :meth:, but without removing it from the queue. Raises :exc: if the underlying queue class does not implement a `` method, which is optional for queues.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\squeues.py",
    "ast_data": "FunctionDef name:peek arg:self arguments arg Assign Call Call If Return return:no Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_reset_flat_param_grad_info_if_needed",
    "source_code": "def _reset_flat_param_grad_info_if_needed(self):\n    if not self._use_orig_params:\n        return\n    flat_param = self.flat_param\n    assert flat_param._params is not None\n    all_grad_none = True\n    requires_grad = False\n    for param in flat_param._params:\n        all_grad_none &= param.grad is None\n        requires_grad |= param.requires_grad\n    if all_grad_none:\n        flat_param.grad = None\n    flat_param.requires_grad = requires_grad",
    "docstring": "Reset `` call as possible.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py",
    "ast_data": "FunctionDef name:_reset_flat_param_grad_info_if_needed arg:self arguments arg If Return return:no Assign Compare Assign Assign For Compare If Assign Assign"
  },
  {
    "library": "scipy",
    "name": "set_f_params",
    "source_code": "def set_f_params(self, *args):\n    self.f_params = args\n    return self",
    "docstring": "Set extra parameters for user-supplied function f.",
    "type": "method",
    "file_path": "scipy\\scipy\\integrate\\_ode.py",
    "ast_data": "FunctionDef name:set_f_params arg:self arguments arg arg Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "clip",
    "source_code": "def clip(x: Array, /, min: float | Array | None=None, max: float | Array | None=None) -> Array:\n\n    def _isscalar(a: float | Array | None, /) -> TypeIs[float | None]:\n        return a is None or isinstance(a, (int, float))\n    min_shape = () if _isscalar(min) else min.shape\n    max_shape = () if _isscalar(max) else max.shape\n    result_shape = np.broadcast_shapes(x.shape, min_shape, max_shape)\n    if min is not None:\n        min = da.broadcast_to(da.asarray(min), result_shape)\n    if max is not None:\n        max = da.broadcast_to(da.asarray(max), result_shape)\n    if min is None and max is None:\n        return da.positive(x)\n    if min is None:\n        return astype(da.minimum(x, max), x.dtype)\n    if max is None:\n        return astype(da.maximum(x, min), x.dtype)\n    return astype(da.minimum(da.maximum(x, min), max), x.dtype)",
    "docstring": "Array API compatibility wrapper for clip(). See the corresponding documentation in the array library and/or the array API specification for more details.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\dask\\array\\_aliases.py",
    "ast_data": "FunctionDef name:clip arg:min arg:max arguments arg arg arg FunctionDef name:_isscalar arguments arg Return return:yes BoolOp Compare Call Assign Call Assign Call Assign Call If Compare Assign Call Call If Compare Assign Call Call If BoolOp Compare Compare Return return:yes Call If Compare Return return:yes Call Call If Compare Return return:yes Call Call Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_process_parameters",
    "source_code": "def _process_parameters(self, mean, cov, allow_singular=True):\n    if isinstance(cov, _covariance.Covariance):\n        return self._process_parameters_Covariance(mean, cov)\n    else:\n        dim, mean, cov = self._process_parameters_psd(None, mean, cov)\n        psd = _PSD(cov, allow_singular=allow_singular)\n        cov_object = _covariance.CovViaPSD(psd)\n        return (dim, mean, cov_object)",
    "docstring": "Infer dimensionality from mean or covariance matrix, ensure that mean and covariance are full vector resp. matrix.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:_process_parameters arg:self arg:mean arg:cov arg:allow_singular arguments arg arg arg arg If Call Return return:yes Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "obj",
    "source_code": "@property\ndef obj(self):\n    return self._obj_ref()",
    "docstring": "Return object if alive, otherwise None.",
    "type": "method",
    "file_path": "pytorch\\torch\\sparse\\_triton_ops.py",
    "ast_data": "FunctionDef name:obj arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "matvec",
    "source_code": "def matvec(self, x):\n    x = np.asanyarray(x)\n    M, N = self.shape\n    if x.shape != (N,) and x.shape != (N, 1):\n        raise ValueError('dimension mismatch')\n    y = self._matvec(x)\n    if isinstance(x, np.matrix):\n        y = asmatrix(y)\n    else:\n        y = np.asarray(y)\n    if x.ndim == 1:\n        y = y.reshape(M)\n    elif x.ndim == 2:\n        y = y.reshape(M, 1)\n    else:\n        raise ValueError('invalid shape returned by user-defined matvec()')\n    return y",
    "docstring": "Matrix-vector multiplication. Performs the operation y=A@x where A is an MxN linear operator and x is a column vector or 1-d array. Parameters ---------- x : {matrix, ndarray} An array with shape (N,) or (N,1). Returns ------- y : {matrix, ndarray} A matrix or ndarray with shape (M,) or (M,1) depending on the type and shape of the x argument. Notes ----- This matvec wraps the user-specified matvec routine or overridden _matvec method to ensure that y has the correct shape and type.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_interface.py",
    "ast_data": "FunctionDef name:matvec arg:self arg:x arguments arg arg Assign Call Assign If BoolOp Compare Compare Raise Call Assign Call If Call Assign Call Assign Call If Compare Assign Call If Compare Assign Call Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "after_fork",
    "source_code": "def after_fork():\n    _pool_set.clear()\n    AsyncCompile.process_pool.cache_clear()",
    "docstring": "Reset pools to initial state without shutting them down",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\async_compile.py",
    "ast_data": "FunctionDef name:after_fork arguments Call Call"
  },
  {
    "library": "scrapy",
    "name": "_process_links",
    "source_code": "def _process_links(self, links: list[Link]) -> list[Link]:\n    return self._deduplicate_if_needed(links)",
    "docstring": "Normalize and filter extracted links The subclass should override it if necessary",
    "type": "method",
    "file_path": "scrapy\\scrapy\\linkextractors\\lxmlhtml.py",
    "ast_data": "FunctionDef name:_process_links arg:self arg:links arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_custom_getattr_fallback",
    "source_code": "def _custom_getattr_fallback(self, base, tx, name, obj_source):\n    if object_has_getattribute(base):\n        unimplemented_v2(gb_type='torch.nn.Module with a custom __getattribute__ defined', context=f'var_getattr {self} {name}', explanation='Dynamo does not support checking key existence on `nn.Module` instances that have a custom `__getattribute__` method defined.', hints=['Avoid defining `__getattribute__` in your module.', *graph_break_hints.SUPPORTABLE])\n    getattr_fn = get_custom_getattr(base, ignore_nn_module_getattr=True)\n    if getattr_fn is None:\n        return None\n    if not isinstance(getattr_fn, types.FunctionType):\n        unimplemented_v2(gb_type='torch.nn.Module with a non-function custom __getattr__', context=f'var_getattr {self} {name}', explanation='Dynamo detected a nn.Module object with a custom `__getattr__` method, but this method is not a standard Python function (e.g., it might be implemented in C/C++). Dynamo cannot currently trace into such non-standard `__getattr__` methods.', hints=['Avoid using objects with non-standard __getattr__ methods within the compiled region. If possible, implement __getattr__ as a standard Python function.', *graph_break_hints.SUPPORTABLE])\n    options = {'source': AttrSource(obj_source, '__getattr__')}\n    return variables.UserMethodVariable(getattr_fn, self, **options).call_function(tx, [variables.ConstantVariable.create(name)], {})",
    "docstring": "Check for a __getattr__ and handle it specially if it is implemented",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\nn_module.py",
    "ast_data": "FunctionDef name:_custom_getattr_fallback arg:self arg:base arg:tx arg:name arg:obj_source arguments arg arg arg arg arg If Call Call Assign Call If Compare Return return:no If Call Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_broadcast_to_outputs",
    "source_code": "def _maybe_broadcast_to_outputs(self, outputs, objects):\n    if not self._should_broadcast(objects):\n        return objects\n    should_copy_objects = len(nest.flatten(outputs)) > 1\n\n    def _broadcast_fn():\n        if should_copy_objects:\n            return nest.map_structure(self._copy_object, objects)\n        return objects\n    return nest.map_structure(lambda _: _broadcast_fn(), outputs)",
    "docstring": "Determines if losses / metrics should be applied to all outputs. NOTE: This method should only be called for Metrics / Losses, not for y_true / sample_weight. Args: outputs: Model predictions. objects: Arbitrary nested structure (e.g. of losses or metrics) Returns: Arbitrary nested structure of objects, maybe copied to each output. Applies a Loss / Metric to all outputs.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\compile_utils.py",
    "ast_data": "FunctionDef name:_maybe_broadcast_to_outputs arg:self arg:outputs arg:objects arguments arg arg arg If Call Return return:yes Assign Compare Call Call FunctionDef name:_broadcast_fn arguments If Return return:yes Call Return return:yes Return return:yes Call arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "create_placeholder",
    "source_code": "def create_placeholder(name: str, dtype: torch.dtype, device: torch.device, size: Optional[list[int]]=None) -> TensorBox:\n    input_buffer = InputBuffer(name=name, layout=FixedLayout(device, dtype, size if size else [], FlexibleLayout.contiguous_strides(size) if size else []))\n    return TensorBox.create(input_buffer)",
    "docstring": "Creates a placeholder input buffers for producing subgraph_output.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\kernel\\flex_attention.py",
    "ast_data": "FunctionDef name:create_placeholder arg:name arg:dtype arg:device arg:size arguments arg arg arg arg Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "RemoteValue",
    "source_code": "@tf_export('distribute.experimental.coordinator.RemoteValue', 'distribute.coordinator.RemoteValue', v1=[])\nclass RemoteValue(object):\n\n    def fetch(self):\n        raise NotImplementedError('Must be implemented in subclasses.')\n\n    def get(self):\n        raise NotImplementedError('Must be implemented in subclasses.')",
    "docstring": "An asynchronously available value of a scheduled function. This class is used as the return value of where the underlying value becomes available at a later time once the function has been executed. Using as an input to a subsequent function scheduled with is currently not supported. Example:",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\remote_value.py",
    "ast_data": "ClassDef name:RemoteValue FunctionDef name:fetch arg:self arguments arg Raise Call FunctionDef name:get arg:self arguments arg Raise Call Call"
  },
  {
    "library": "kornia",
    "name": "intrinsics_inverse",
    "source_code": "def intrinsics_inverse(self) -> Tensor:\n    return self.intrinsics.inverse()",
    "docstring": "Return the inverse of the 4x4 instrisics matrix. Returns: tensor of shape :math:.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\camera\\pinhole.py",
    "ast_data": "FunctionDef name:intrinsics_inverse arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "CppMicroGemmRef",
    "source_code": "class CppMicroGemmRef(CppMicroGemm):\n    TEMPLATE_ENTRY = '\\n{{declare_kernel}} {\\n    for (int64_t m = 0; m < M; ++m) {\\n        for (int64_t n = 0; n < N; ++n) {\\n            {{compute_t}} result = accum ? C[m * ldc + n] : 0;\\n            for (int64_t k = 0; k < K; ++k) {\\n                result += ({{compute_t}})A[m * lda + k] * ({{compute_t}})B[k * ldb + n] * {{alpha}};\\n            }\\n            C[m * ldc + n] = result;\\n        }\\n    }\\n}\\n'\n\n    def __init__(self, name, input_dtype, input2_dtype, output_dtype, compute_dtype, alpha) -> None:\n        super().__init__(name, input_dtype, input2_dtype, output_dtype, compute_dtype, GemmBlocking(1, 1, 1), alpha)\n\n    def codegen_define(self, kernel: CppTemplateKernel) -> str:\n        options = {'declare_kernel': self.get_kernel_declaration(), **self.get_common_options()}\n        return KernelTemplate._template_from_string(self.TEMPLATE_ENTRY).render(options)",
    "docstring": "A reference implementation of the CppMicroGemm class with naive C++ code. It is used for correctness debugging.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp_micro_gemm.py",
    "ast_data": "ClassDef name:CppMicroGemmRef Assign FunctionDef name:__init__ arg:self arg:name arg:input_dtype arg:input2_dtype arg:output_dtype arg:compute_dtype arg:alpha arguments arg arg arg arg arg arg arg Call Call Call FunctionDef name:codegen_define arg:self arg:kernel arguments arg arg Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "_commandline_dep_string",
    "source_code": "def _commandline_dep_string(cc_args, extra_postargs, pp_opts):\n    cmdline = 'commandline: '\n    cmdline += ' '.join(cc_args)\n    cmdline += ' '.join(extra_postargs)\n    cmdline += ' '.join(pp_opts) + '\\n'\n    return cmdline",
    "docstring": "Return commandline representation used to determine if a file needs to be recompiled",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\misc_util.py",
    "ast_data": "FunctionDef name:_commandline_dep_string arg:cc_args arg:extra_postargs arg:pp_opts arguments arg arg arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "uvicorn",
    "name": "signal_handler",
    "source_code": "def signal_handler(self, sig: int, frame: FrameType | None) -> None:\n    if sys.platform == 'win32' and self.is_restarting:\n        self.is_restarting = False\n    else:\n        self.should_exit.set()",
    "docstring": "A signal handler that is registered with the parent process.",
    "type": "method",
    "file_path": "uvicorn\\uvicorn\\supervisors\\basereload.py",
    "ast_data": "FunctionDef name:signal_handler arg:self arg:sig arg:frame arguments arg arg arg If BoolOp Compare Assign Call"
  },
  {
    "library": "scikit-learn",
    "name": "diag",
    "source_code": "def diag(self, X):\n    return np.vstack([kernel.diag(X) for kernel in self.kernels]).T",
    "docstring": "Returns the diagonal of the kernel k(X, X). The result of this method is identical to ; however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters ---------- X : array-like of shape (n_samples_X, n_features) or list of object Argument to the kernel. Returns ------- K_diag : ndarray of shape (n_samples_X, n_kernels) Diagonal of kernel k(X, X)",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:diag arg:self arg:X arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_get_support_mask",
    "source_code": "@abstractmethod\ndef _get_support_mask(self):\n    pass",
    "docstring": "Get the boolean mask indicating which features are selected Returns ------- support : boolean array of shape [# input features] An element is True iff its corresponding feature is selected for retention.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_selection\\_base.py",
    "ast_data": "FunctionDef name:_get_support_mask arg:self arguments arg"
  },
  {
    "library": "numpy",
    "name": "flatten_structured_array",
    "source_code": "def flatten_structured_array(a):\n\n    def flatten_sequence(iterable):\n        for elm in iter(iterable):\n            if hasattr(elm, '__iter__'):\n                yield from flatten_sequence(elm)\n            else:\n                yield elm\n    a = np.asanyarray(a)\n    inishape = a.shape\n    a = a.ravel()\n    if isinstance(a, MaskedArray):\n        out = np.array([tuple(flatten_sequence(d.item())) for d in a._data])\n        out = out.view(MaskedArray)\n        out._mask = np.array([tuple(flatten_sequence(d.item())) for d in getmaskarray(a)])\n    else:\n        out = np.array([tuple(flatten_sequence(d.item())) for d in a])\n    if len(inishape) > 1:\n        newshape = list(out.shape)\n        newshape[0] = inishape\n        out.shape = tuple(flatten_sequence(newshape))\n    return out",
    "docstring": "Flatten a structured array. The data type of the output is chosen such that it can represent all of the (nested) fields. Parameters ---------- a : structured array Returns ------- output : masked array or ndarray A flattened masked array if the input is a masked array, otherwise a standard ndarray. Examples -------- >>> import numpy as np >>> ndtype = [('a', int), ('b', float)] >>> a = np.array([(1, 1), (2, 2)], dtype=ndtype) >>> np.ma.flatten_structured_array(a) array([[1., 1.], [2., 2.]])",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:flatten_structured_array arg:a arguments arg FunctionDef name:flatten_sequence arg:iterable arguments arg For Call If Call Call Assign Call Assign Assign Call If Call Assign Call Call Call Call Assign Call Assign Call Call Call Call Call Assign Call Call Call Call If Compare Call Assign Call Assign Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "dfs_helper",
    "source_code": "def dfs_helper(node: Node, partition_latency) -> PartitionLatency:\n    node_latency = node_to_latency_mapping[node]\n    overall_latency_sec = partition_latency.overall_latency_sec + max(node_latency.computer_latency_sec, node_latency.mem_latency_sec)\n    mem_latency_sec = partition_latency.mem_latency_sec + node_latency.mem_latency_sec\n    computer_latency_sec = partition_latency.computer_latency_sec + node_latency.computer_latency_sec\n    users = set(node.users).intersection(partition.nodes)\n    if users:\n        max_latency = PartitionLatency(mem_latency_sec=0.0, computer_latency_sec=0.0, overall_latency_sec=0.0)\n        for n in users:\n            new_partition_latency = dfs_helper(n, PartitionLatency(mem_latency_sec, computer_latency_sec, overall_latency_sec))\n            if new_partition_latency.overall_latency_sec > max_latency.overall_latency_sec:\n                max_latency = new_partition_latency\n        return max_latency\n    return PartitionLatency(mem_latency_sec, computer_latency_sec, overall_latency_sec)",
    "docstring": "Given a top node of a partition, this function returns the latency of the critical path in the partition",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\partitioner_utils.py",
    "ast_data": "FunctionDef name:dfs_helper arg:node arg:partition_latency arguments arg arg Assign Assign Call Assign Assign Assign Call Call If Assign Call For Assign Call Call If Compare Assign Return return:yes Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "fast_xs",
    "source_code": "def fast_xs(self, loc: int) -> SingleBlockManager:\n    if len(self.blocks) == 1:\n        result: np.ndarray | ExtensionArray = self.blocks[0].iget((slice(None), loc))\n        bp = BlockPlacement(slice(0, len(result)))\n        block = new_block(result, placement=bp, ndim=1, refs=self.blocks[0].refs)\n        return SingleBlockManager(block, self.axes[0])\n    dtype = interleaved_dtype([blk.dtype for blk in self.blocks])\n    n = len(self)\n    if isinstance(dtype, ExtensionDtype):\n        result = np.empty(n, dtype=object)\n    else:\n        result = np.empty(n, dtype=dtype)\n        result = ensure_wrapped_if_datetimelike(result)\n    for blk in self.blocks:\n        for i, rl in enumerate(blk.mgr_locs):\n            result[rl] = blk.iget((i, loc))\n    if isinstance(dtype, ExtensionDtype):\n        cls = dtype.construct_array_type()\n        result = cls._from_sequence(result, dtype=dtype)\n    bp = BlockPlacement(slice(0, len(result)))\n    block = new_block(result, placement=bp, ndim=1)\n    return SingleBlockManager(block, self.axes[0])",
    "docstring": "Return the array corresponding to . Parameters ---------- loc : int Returns ------- np.ndarray or ExtensionArray",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:fast_xs arg:self arg:loc arguments arg arg If Compare Call Call Call Assign Call Call Call Assign Call Return return:yes Call Assign Call Assign Call If Call Assign Call Assign Call Assign Call For For Call Assign Call If Call Assign Call Assign Call Assign Call Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "setup",
    "source_code": "@classmethod\ndef setup(cls, **kwargs):\n    for k, v in kwargs.items():\n        setattr(cls, k, v)\n    import memcache\n    cls.cache = memcache.Client(cls.servers)",
    "docstring": "Set up the storage system for memcached-based sessions. This should only be called once per process; this will be done automatically when using sessions.init (as the built-in Tool does).",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\sessions.py",
    "ast_data": "FunctionDef name:setup arg:cls arguments arg arg For Call Call Assign Call"
  },
  {
    "library": "authlib",
    "name": "validate_id_token_encryption_enc_values_supported",
    "source_code": "def validate_id_token_encryption_enc_values_supported(self):\n    validate_array_value(self, 'id_token_encryption_enc_values_supported')",
    "docstring": "OPTIONAL. JSON array containing a list of the JWE encryption algorithms (enc values) supported by the OP for the ID Token to encode the Claims in a JWT.",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\discovery\\models.py",
    "ast_data": "FunctionDef name:validate_id_token_encryption_enc_values_supported arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self):\n    self._number_of_shards = None",
    "docstring": "Creates a new TpuContext.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_function.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg Assign"
  },
  {
    "library": "pandas",
    "name": "PlanePlot",
    "source_code": "class PlanePlot(MPLPlot, ABC):\n    _layout_type = 'single'\n\n    def __init__(self, data, x, y, **kwargs) -> None:\n        MPLPlot.__init__(self, data, **kwargs)\n        if x is None or y is None:\n            raise ValueError(self._kind + ' requires an x and y column')\n        if is_integer(x) and (not holds_integer(self.data.columns)):\n            x = self.data.columns[x]\n        if is_integer(y) and (not holds_integer(self.data.columns)):\n            y = self.data.columns[y]\n        self.x = x\n        self.y = y\n\n    @final\n    def _get_nseries(self, data: Series | DataFrame) -> int:\n        return 1\n\n    @final\n    def _post_plot_logic(self, ax: Axes, data) -> None:\n        x, y = (self.x, self.y)\n        xlabel = self.xlabel if self.xlabel is not None else pprint_thing(x)\n        ylabel = self.ylabel if self.ylabel is not None else pprint_thing(y)\n        ax.set_xlabel(xlabel)\n        ax.set_ylabel(ylabel)\n\n    @final\n    def _plot_colorbar(self, ax: Axes, *, fig: Figure, **kwds):\n        img = ax.collections[-1]\n        return fig.colorbar(img, ax=ax, **kwds)",
    "docstring": "Abstract class for plotting on plane, currently scatter and hexbin.",
    "type": "class",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\core.py",
    "ast_data": "ClassDef name:PlanePlot Assign FunctionDef name:__init__ arg:self arg:data arg:x arg:y arguments arg arg arg arg arg Call If BoolOp Compare Compare Raise Call If BoolOp Call Call Assign If BoolOp Call Call Assign Assign Assign FunctionDef name:_get_nseries arg:self arg:data arguments arg arg Return return:yes FunctionDef name:_post_plot_logic arg:self arg:ax arg:data arguments arg arg arg Assign Assign Compare Call Assign Compare Call Call Call FunctionDef name:_plot_colorbar arg:self arg:ax arguments arg arg arg arg Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_xbound",
    "source_code": "def get_xbound(self):\n    left, right = self.get_xlim()\n    if left < right:\n        return (left, right)\n    else:\n        return (right, left)",
    "docstring": "Return the lower and upper x-axis bounds, in increasing order. See Also -------- set_xbound get_xlim, set_xlim invert_xaxis, xaxis_inverted",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:get_xbound arg:self arguments arg Assign Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "pandas",
    "name": "autoscale",
    "source_code": "def autoscale(self):\n    vmin, vmax = self.axis.get_data_interval()\n    locs = self._get_default_locs(vmin, vmax)\n    vmin, vmax = locs[[0, -1]]\n    if vmin == vmax:\n        vmin -= 1\n        vmax += 1\n    return mpl.transforms.nonsingular(vmin, vmax)",
    "docstring": "Sets the view limits to the nearest multiples of base that contain the data.",
    "type": "method",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\converter.py",
    "ast_data": "FunctionDef name:autoscale arg:self arguments arg Assign Call Assign Call Assign If Compare Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "Invert",
    "source_code": "class Invert(OperationBase):\n\n    def __init__(self, initial_probability: float=0.5, temperature: float=0.1) -> None:\n        super().__init__(K.RandomInvert(same_on_batch=False, p=initial_probability), initial_magnitude=None, temperature=temperature, symmetric_megnitude=False)",
    "docstring": "Apply invert operation. Args: initial_probability: the initial probability. If None, the augmentation will be randomly applied according to he augmentation sampling range. temperature: temperature for RelaxedBernoulli distribution used during training.",
    "type": "class",
    "file_path": "kornia\\kornia\\augmentation\\auto\\operations\\ops.py",
    "ast_data": "ClassDef name:Invert FunctionDef name:__init__ arg:self arg:initial_probability arg:temperature arguments arg arg arg Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "partition_graphs",
    "source_code": "def partition_graphs(self):\n    if not self._debug_graphs:\n        raise LookupError('No partition graphs have been loaded.')\n    return [self._debug_graphs[key].debug_graph_def for key in self._debug_graphs]",
    "docstring": "Get the partition graphs. Returns: Partition graphs as a list of GraphDef. Raises: LookupError: If no partition graphs have been loaded.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:partition_graphs arg:self arguments arg If Raise Call Return return:yes"
  },
  {
    "library": "django",
    "name": "BaseFormView",
    "source_code": "class BaseFormView(FormMixin, ProcessFormView):\n    pass",
    "docstring": "A base view for displaying a form.",
    "type": "class",
    "file_path": "django\\django\\views\\generic\\edit.py",
    "ast_data": "ClassDef name:BaseFormView"
  },
  {
    "library": "pytorch",
    "name": "valid_index_tensor",
    "source_code": "def valid_index_tensor(index, dims):\n    slice_count = 0\n    for s in index:\n        if isinstance(s, slice):\n            slice_count += 1\n    if slice_count > len(dims):\n        return F()\n    else:\n        return T()",
    "docstring": "if the slice instances exceed the length of the dimensions then this is a type error so we return False",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_transformation.py",
    "ast_data": "FunctionDef name:valid_index_tensor arg:index arg:dims arguments arg arg Assign For If Call If Compare Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_set",
    "source_code": "def _set(self, key, val):\n    dict.__setitem__(self, key, val)",
    "docstring": "Directly write data bypassing deprecation and validation logic. Notes ----- As end user or downstream library you almost always should use ``, i.e. it is subject to Matplotlib's API and deprecation policy. :meta public:",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\__init__.py",
    "ast_data": "FunctionDef name:_set arg:self arg:key arg:val arguments arg arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "sequence_numeric_column",
    "source_code": "@doc_controls.header(_FEATURE_COLUMN_DEPRECATION_WARNING)\n@tf_export('feature_column.sequence_numeric_column')\n@deprecation.deprecated(None, _FEATURE_COLUMN_DEPRECATION_RUNTIME_WARNING)\ndef sequence_numeric_column(key, shape=(1,), default_value=0.0, dtype=dtypes.float32, normalizer_fn=None):\n    shape = fc._check_shape(shape=shape, key=key)\n    if not (dtype.is_integer or dtype.is_floating):\n        raise ValueError('dtype must be convertible to float. dtype: {}, key: {}'.format(dtype, key))\n    if normalizer_fn is not None and (not callable(normalizer_fn)):\n        raise TypeError('normalizer_fn must be a callable. Given: {}'.format(normalizer_fn))\n    return SequenceNumericColumn(key, shape=shape, default_value=default_value, dtype=dtype, normalizer_fn=normalizer_fn)",
    "docstring": "Returns a feature column that represents sequences of numeric data. Example: Args: key: A unique string identifying the input features. shape: The shape of the input data per sequence id. E.g. if , each example must contain values. default_value: A single value compatible with that is used for padding the sparse data into a dense . dtype: The type of values. normalizer_fn: If not , a function that can be used to normalize the value of the tensor after is applied for parsing. Normalizer function takes the input as its argument, and returns the output . (e.g. lambda x: (x - 3.0) / 4.2). Please note that even though the most common use case of this function is normalization, it can be used for any kind of Tensorflow transformations. Returns: A . Raises: TypeError: if any dimension in shape is not an int. ValueError: if any dimension in shape is not a positive integer. ValueError: if is not convertible to .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\sequence_feature_column.py",
    "ast_data": "FunctionDef name:sequence_numeric_column arg:key arg:shape arg:default_value arg:dtype arg:normalizer_fn arguments arg arg arg arg arg Assign Call If BoolOp Raise Call Call If BoolOp Compare Call Raise Call Call Return return:yes Call Call Call Call"
  },
  {
    "library": "django",
    "name": "cache_key_culling_sql",
    "source_code": "def cache_key_culling_sql(self):\n    cache_key = self.quote_name('cache_key')\n    return f'SELECT {cache_key} FROM %s ORDER BY {cache_key} LIMIT 1 OFFSET %%s'",
    "docstring": "Return an SQL query that retrieves the first cache key greater than the n smallest. This is used by the 'db' cache backend to determine where to start culling.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:cache_key_culling_sql arg:self arguments arg Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "main_op",
    "source_code": "@tf_export(v1=['saved_model.main_op.main_op'])\n@deprecation.deprecated(None, _DEPRECATION_MSG)\ndef main_op():\n    init = variables.global_variables_initializer()\n    init_local = variables.local_variables_initializer()\n    init_tables = lookup_ops.tables_initializer()\n    return control_flow_ops.group(init, init_local, init_tables)",
    "docstring": "Returns a main op to init variables and tables. Returns the main op including the group of ops that initializes all variables, initializes local variables and initialize all tables. Returns: The set of ops to be run as part of the main op upon the load operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\main_op_impl.py",
    "ast_data": "FunctionDef name:main_op arguments Assign Call Assign Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "output_size",
    "source_code": "@property\ndef output_size(self):\n    raise NotImplementedError('Abstract method')",
    "docstring": "Integer or TensorShape: size of outputs produced by this cell.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\recurrent.py",
    "ast_data": "FunctionDef name:output_size arg:self arguments arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_reader_key",
    "source_code": "@staticmethod\ndef _get_reader_key(handle):\n    handle_parts = str(handle).split(';')\n    return handle_parts[0] + ';' + handle_parts[-1]",
    "docstring": "The graph key for reader.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\session_ops.py",
    "ast_data": "FunctionDef name:_get_reader_key arg:handle arguments arg Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "emit_check_no_requires_grad",
    "source_code": "def emit_check_no_requires_grad(tensor_args: list[DifferentiableInput], args_with_derivatives: list[DifferentiableInput]) -> list[str]:\n    body: list[str] = []\n    for arg in tensor_args:\n        if arg in args_with_derivatives:\n            continue\n        arg_name = arg.name\n        if info and arg_name in info.non_differentiable_arg_names:\n            continue\n        if arg_name == 'output':\n            continue\n        body.append(f'check_no_requires_grad({arg_name}, \"{arg_name}\", \"{name}\");')\n    return body",
    "docstring": "Checks that arguments without derivatives don't require grad",
    "type": "function",
    "file_path": "pytorch\\tools\\autograd\\gen_variable_type.py",
    "ast_data": "FunctionDef name:emit_check_no_requires_grad arg:tensor_args arg:args_with_derivatives arguments arg arg For If Compare Assign If BoolOp Compare If Compare Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "current_stream",
    "source_code": "def current_stream(device: Optional[_device_t]=None) -> Stream:\n    return torch._C._mtia_getCurrentStream(_get_device_index(device, optional=True))",
    "docstring": "Return the currently selected :class: for a given device. Args: device (torch.device or int, optional): selected device. Returns the currently selected :class: for the current device, given by :func:, if :attr: is `` (default).",
    "type": "function",
    "file_path": "pytorch\\torch\\mtia\\__init__.py",
    "ast_data": "FunctionDef name:current_stream arg:device arguments arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "fill_space",
    "source_code": "def fill_space(self) -> np.ndarray:\n    return self.random(np.inf)",
    "docstring": "Draw `random` is very small-with respect to the dimensionality. Returns ------- sample : array_like (n, d) QMC sample.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_qmc.py",
    "ast_data": "FunctionDef name:fill_space arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "prepared",
    "source_code": "@property\ndef prepared(self):\n    return PreparedGeometry(self)",
    "docstring": "Return a PreparedGeometry corresponding to this geometry -- it is optimized for the contains, intersects, and covers operations.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:prepared arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_sparse_tensors",
    "source_code": "@abc.abstractmethod\ndef _get_sparse_tensors(self, inputs, weight_collections=None, trainable=None):\n    pass",
    "docstring": "Returns an IdWeightPair. is a pair of s which represents ids and weights. is typically a x of . is either a of or to indicate all weights should be taken to be 1. If specified, must have exactly the same shape and indices as . Expected is same as parsing output of a which is a ragged matrix. Args: inputs: A as a cache to get input tensors required to create . weight_collections: List of graph collections to which variables (if any will be created) are added. trainable: If also add variables to the graph collection (see ).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py",
    "ast_data": "FunctionDef name:_get_sparse_tensors arg:self arg:inputs arg:weight_collections arg:trainable arguments arg arg arg arg"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, step_stats: step_stats_pb2.StepStats, graph: Optional[Any]=None) -> None:\n    self._origin_step_stats = step_stats\n    self._step_stats = None\n    self._graph = graph\n    self._chrome_trace = _ChromeTraceFormatter()\n    self._next_pid = 0\n    self._device_pids = {}\n    self._tensor_pids = {}\n    self._tensors = {}\n    self._next_flow_id = 0\n    self._flow_starts = {}\n    self._alloc_times = {}\n    self._allocator_maximums = {}",
    "docstring": "Constructs a new Timeline. A 'Timeline' is used for visualizing the execution of a TensorFlow computation. It shows the timings and concurrency of execution at the granularity of TensorFlow Ops. This class is not thread safe. Args: step_stats: The 'step_stats_pb2.StepStats' proto recording execution times. graph: (Optional) The 'Graph' that was executed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:step_stats arg:graph arguments arg arg arg Assign Assign Assign Assign Call Assign Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "set_title",
    "source_code": "def set_title(self, label, fontdict=None, loc=None, pad=None, *, y=None, **kwargs):\n    loc = mpl._val_or_rc(loc, 'axes.titlelocation').lower()\n    y = mpl._val_or_rc(y, 'axes.titley')\n    if y is None:\n        y = 1.0\n    else:\n        self._autotitlepos = False\n    kwargs['y'] = y\n    titles = {'left': self._left_title, 'center': self.title, 'right': self._right_title}\n    title = _api.check_getitem(titles, loc=loc)\n    default = {'fontsize': mpl.rcParams['axes.titlesize'], 'fontweight': mpl.rcParams['axes.titleweight'], 'verticalalignment': 'baseline', 'horizontalalignment': loc}\n    titlecolor = mpl.rcParams['axes.titlecolor']\n    if not cbook._str_lower_equal(titlecolor, 'auto'):\n        default['color'] = titlecolor\n    self._set_title_offset_trans(float(mpl._val_or_rc(pad, 'axes.titlepad')))\n    title.set_text(label)\n    title.update(default)\n    if fontdict is not None:\n        title.update(fontdict)\n    title._internal_update(kwargs)\n    return title",
    "docstring": "Set a title for the Axes. Set one of the three available Axes titles. The available titles are positioned above the Axes in the center, flush with the left edge, and flush with the right edge. Parameters ---------- label : str Text to use for the title fontdict : dict .. admonition:: Discouraged The use of *fontdict* is discouraged. Parameters should be passed as individual keyword arguments or using dictionary-unpacking `axes.titlelocationaxes.titleyaxes.titleyaxes.titlepad.Text~matplotlib.text.Text.Text` for a list of valid text properties.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_axes.py",
    "ast_data": "FunctionDef name:set_title arg:self arg:label arg:fontdict arg:loc arg:pad arguments arg arg arg arg arg arg arg Assign Call Call Assign Call If Compare Assign Assign Assign Assign Assign Call Assign Assign If Call Assign Call Call Call Call Call If Compare Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "register_read_only_resource_op",
    "source_code": "def register_read_only_resource_op(op_type):\n    RESOURCE_READ_OPS.add(op_type)",
    "docstring": "Declares that does not update its touched resource.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\auto_control_deps_utils.py",
    "ast_data": "FunctionDef name:register_read_only_resource_op arg:op_type arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "CKTileTemplate",
    "source_code": "class CKTileTemplate(ROCmTemplate):\n    _TORCH_DTYPE_TO_CK = {torch.float32: 'F32', torch.float64: 'F64', torch.float16: 'F16', torch.bfloat16: 'BF16', torch.int32: 'I32', torch.int8: 'I8', torch.float8_e4m3fnuz: 'F8', torch.float8_e5m2fnuz: 'BF8'}\n    ck_dtype_to_size = {'FP16': 2, 'BF16': 2}\n\n    def header(self) -> IndentedBuffer:\n        res = super().header()\n        res.splice('\\n                // CK headers\\n                #include \"ck_tile/core.hpp\"\\n\\n            ')\n        return res\n\n    def globals(self) -> IndentedBuffer:\n        res = super().globals()\n        res.splice('\\n                using F8  = ck_tile::fp8_t;\\n                using BF8 = ck_tile::bf8_t;\\n                using F16 = ck_tile::half_t;\\n                using F32 = float;\\n                using BF16 = ck_tile::bfloat16_t;\\n            ')\n        return res\n\n    def torch_type_to_ck(self, node: IRNode, ptr: str) -> str:\n        if node is None:\n            return ptr\n        else:\n            return f'({self._TORCH_DTYPE_TO_CK.get(node.get_dtype())}*)({ptr})'",
    "docstring": "Base class for generating CK templates, has common, i.e. non-gemm-specific, code generation logic",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\rocm\\ck_tile_template.py",
    "ast_data": "ClassDef name:CKTileTemplate Assign Assign FunctionDef name:header arg:self arguments arg Assign Call Call Call Return return:yes FunctionDef name:globals arg:self arguments arg Assign Call Call Call Return return:yes FunctionDef name:torch_type_to_ck arg:self arg:node arg:ptr arguments arg arg arg If Compare Return return:yes Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "FakeStructuredSparsity",
    "source_code": "class FakeStructuredSparsity(nn.Module):\n\n    def __init__(self, mask):\n        super().__init__()\n        self.register_buffer('mask', mask)\n\n    def forward(self, x):\n        assert isinstance(self.mask, torch.Tensor)\n        assert self.mask.shape[0] == x.shape[0]\n        shape = [1] * len(x.shape)\n        shape[0] = -1\n        return self.mask.reshape(shape) * x\n\n    def state_dict(self, *args, **kwargs):\n        return {}",
    "docstring": "Parametrization for Structured Pruning. Like FakeSparsity, this should be attached to the 'weight' or any other parameter that requires a mask. Instead of an element-wise bool mask, this parameterization uses a row-wise bool mask.",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\pruner\\parametrization.py",
    "ast_data": "ClassDef name:FakeStructuredSparsity FunctionDef name:__init__ arg:self arg:mask arguments arg arg Call Call Call FunctionDef name:forward arg:self arg:x arguments arg arg Call Compare Assign Call Assign Return return:yes Call FunctionDef name:state_dict arg:self arguments arg arg arg Return return:no"
  },
  {
    "library": "pandas",
    "name": "DlpackDeviceType",
    "source_code": "class DlpackDeviceType(enum.IntEnum):\n    CPU = 1\n    CUDA = 2\n    CPU_PINNED = 3\n    OPENCL = 4\n    VULKAN = 7\n    METAL = 8\n    VPI = 9\n    ROCM = 10",
    "docstring": "Integer enum for device type codes matching DLPack.",
    "type": "class",
    "file_path": "pandas\\pandas\\core\\interchange\\dataframe_protocol.py",
    "ast_data": "ClassDef name:DlpackDeviceType Assign Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "numpy",
    "name": "IntelEM64TCCompiler",
    "source_code": "class IntelEM64TCCompiler(UnixCCompiler):\n    compiler_type = 'intelem'\n    cc_exe = 'icc -m64'\n    cc_args = '-fPIC'\n\n    def __init__(self, verbose=0, dry_run=0, force=0):\n        UnixCCompiler.__init__(self, verbose, dry_run, force)\n        v = self.get_version()\n        mpopt = 'openmp' if v and v < '15' else 'qopenmp'\n        self.cc_exe = 'icc -std=c99 -m64 -fPIC -fp-model strict -O3 -fomit-frame-pointer -{}'.format(mpopt)\n        compiler = self.cc_exe\n        if platform.system() == 'Darwin':\n            shared_flag = '-Wl,-undefined,dynamic_lookup'\n        else:\n            shared_flag = '-shared'\n        self.set_executables(compiler=compiler, compiler_so=compiler, compiler_cxx=compiler, archiver='xiar' + ' cru', linker_exe=compiler + ' -shared-intel', linker_so=compiler + ' ' + shared_flag + ' -shared-intel')",
    "docstring": "A modified Intel x86_64 compiler compatible with a 64bit GCC-built Python.",
    "type": "class",
    "file_path": "numpy\\numpy\\distutils\\intelccompiler.py",
    "ast_data": "ClassDef name:IntelEM64TCCompiler Assign Assign Assign FunctionDef name:__init__ arg:self arg:verbose arg:dry_run arg:force arguments arg arg arg arg Call Assign Call Assign BoolOp Compare Assign Call Assign If Compare Call Assign Assign Call"
  },
  {
    "library": "pandas",
    "name": "to_sql",
    "source_code": "def to_sql(self, frame, name: str, if_exists: str='fail', index: bool=True, index_label=None, schema=None, chunksize: int | None=None, dtype: DtypeArg | None=None, method: Literal['multi'] | Callable | None=None, engine: str='auto', **engine_kwargs) -> int | None:\n    if dtype:\n        if not is_dict_like(dtype):\n            dtype = dict.fromkeys(frame, dtype)\n        else:\n            dtype = cast(dict, dtype)\n        for col, my_type in dtype.items():\n            if not isinstance(my_type, str):\n                raise ValueError(f'{col} ({my_type}) not a string')\n    table = SQLiteTable(name, self, frame=frame, index=index, if_exists=if_exists, index_label=index_label, dtype=dtype)\n    table.create()\n    return table.insert(chunksize, method)",
    "docstring": "Write records stored in a DataFrame to a SQL database. Parameters ---------- frame: DataFrame name: string Name of SQL table. if_exists: {'fail', 'replace', 'append', 'delete_rows'}, default 'fail' fail: If table exists, do nothing. replace: If table exists, drop it, recreate it, and insert data. append: If table exists, insert data. Create if it does not exist. delete_rows: If a table exists, delete all records and insert data. index : bool, default True Write DataFrame index as a column index_label : string or sequence, default None Column label for index column(s). If None is given (default) and is True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. schema : string, default None Ignored parameter included for compatibility with SQLAlchemy version of `insert method `.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\sql.py",
    "ast_data": "FunctionDef name:to_sql arg:self arg:frame arg:name arg:if_exists arg:index arg:index_label arg:schema arg:chunksize arg:dtype arg:method arg:engine arguments arg arg arg arg arg arg arg arg arg arg arg arg If If Call Assign Call Assign Call For Call If Call Raise Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "set_output_all_intermediates",
    "source_code": "@tf_export(v1=['experimental.output_all_intermediates'])\ndef set_output_all_intermediates(state):\n    global _EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE\n    _EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE = state",
    "docstring": "Whether to output all intermediates from functional control flow ops. The \"default\" behavior to is to output all intermediates when using v2 control flow inside Keras models in graph mode. This is needed to support taking gradients of v2 control flow. In graph mode, Keras can sometimes freeze the forward graph before the gradient computation which does not work for v2 control flow since it requires updating the forward ops to output the needed intermediates. We work around this by proactively outputting the needed intermediates when building the forward pass itself. Ideally any such extra tensors should be pruned out at runtime. However, if for any reason this doesn't work for you or if you have an inference-only model you can turn this behavior off using . If with the default behavior you are still seeing errors of the form \"Connecting to invalid output X of source node Y which has Z outputs\" try setting and please file an issue at Args: state: True, False or None. None restores the default behavior.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_util_v2.py",
    "ast_data": "FunctionDef name:set_output_all_intermediates arg:state arguments arg Assign Call"
  },
  {
    "library": "seaborn",
    "name": "__init__",
    "source_code": "def __init__(self, k_depth, outlier_prop, trust_alpha):\n    k_options = ['tukey', 'proportion', 'trustworthy', 'full']\n    if isinstance(k_depth, str):\n        _check_argument('k_depth', k_options, k_depth)\n    elif not isinstance(k_depth, int):\n        err = f'The `k_depth` parameter must be either an integer or string (one of {k_options}), not {k_depth!r}.'\n        raise TypeError(err)\n    self.k_depth = k_depth\n    self.outlier_prop = outlier_prop\n    self.trust_alpha = trust_alpha",
    "docstring": "Compute percentiles of a distribution using various tail stopping rules. Parameters ---------- k_depth: \"tukey\", \"proportion\", \"trustworthy\", or \"full\" Stopping rule for choosing tail percentiled to show: - tukey: Show a similar number of outliers as in a conventional boxplot. - proportion: Show approximately outliers. - trust_alpha: Use level for most extreme tail percentile. outlier_prop: float Parameter for setting the expected outlier rate. trust_alpha: float Parameter for setting the confidence threshold. Notes ----- Based on the proposal in this paper:",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_statistics.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:k_depth arg:outlier_prop arg:trust_alpha arguments arg arg arg arg Assign If Call Call If Call Assign Raise Call Assign Assign Assign"
  },
  {
    "library": "kornia",
    "name": "rgb2short",
    "source_code": "def rgb2short(rgb: str) -> Tuple[str, str]:\n    rgb = _strip_hash(rgb)\n    incs = (0, 95, 135, 175, 215, 255)\n    parts = [int(h, 16) for h in re.split('(..)(..)(..)', rgb)[1:4]]\n    res = []\n    for part in parts:\n        i = 0\n        while i < len(incs) - 1:\n            s, b = (incs[i], incs[i + 1])\n            if s <= part <= b:\n                s1 = abs(s - part)\n                b1 = abs(b - part)\n                if s1 < b1:\n                    closest = s\n                else:\n                    closest = b\n                res.append(closest)\n                break\n            i += 1\n    _res = ''.join([f'{i:02x}' for i in res])\n    equiv = RGB2SHORT_DICT[_res]\n    return (equiv, _res)",
    "docstring": "Find the closest xterm-256 approximation to the given RGB value. Args: rgb: Hex code representing an RGB value, eg, 'abcdef'. Returns: String between 0 and 255, compatible with xterm. Example: >>> rgb2short('123456') ('23', '005f5f') >>> rgb2short('ffffff') ('231', 'ffffff') >>> rgb2short('0DADD6') # vimeo logo ('38', '00afd7')",
    "type": "function",
    "file_path": "kornia\\kornia\\utils\\image_print.py",
    "ast_data": "FunctionDef name:rgb2short arg:rgb arguments arg Assign Call Assign Assign Call Call Assign For Assign While Compare Call Assign If Compare Assign Call Assign Call If Compare Assign Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "RepresentativeDatasetSaver",
    "source_code": "class RepresentativeDatasetSaver:\n\n    def save(self, representative_dataset: RepresentativeDatasetMapping) -> Mapping[str, _RepresentativeDatasetFile]:\n        raise NotImplementedError('Method \"save\" is not implemented.')",
    "docstring": "Representative dataset saver. Exposes a single method that saves the provided representative dataset into files. This is useful when you would like to keep a snapshot of your representative dataset at a file system or when you need to pass the representative dataset as files.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\representative_dataset.py",
    "ast_data": "ClassDef name:RepresentativeDatasetSaver FunctionDef name:save arg:self arg:representative_dataset arguments arg arg Raise Call"
  },
  {
    "library": "cryptography",
    "name": "serial_number",
    "source_code": "def serial_number(self, number: int) -> CertificateBuilder:\n    if not isinstance(number, int):\n        raise TypeError('Serial number must be of integral type.')\n    if self._serial_number is not None:\n        raise ValueError('The serial number may only be set once.')\n    if number <= 0:\n        raise ValueError('The serial number should be positive.')\n    if number.bit_length() >= 160:\n        raise ValueError('The serial number should not be more than 159 bits.')\n    return CertificateBuilder(self._issuer_name, self._subject_name, self._public_key, number, self._not_valid_before, self._not_valid_after, self._extensions)",
    "docstring": "Sets the certificate serial number.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\x509\\base.py",
    "ast_data": "FunctionDef name:serial_number arg:self arg:number arguments arg arg If Call Raise Call If Compare Raise Call If Compare Raise Call If Compare Call Raise Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "ManifestStaticFilesStorage",
    "source_code": "class ManifestStaticFilesStorage(ManifestFilesMixin, StaticFilesStorage):\n    pass",
    "docstring": "A static file system storage backend which also saves hashed copies of the files it saves.",
    "type": "class",
    "file_path": "django\\django\\contrib\\staticfiles\\storage.py",
    "ast_data": "ClassDef name:ManifestStaticFilesStorage"
  },
  {
    "library": "django",
    "name": "get_relations",
    "source_code": "def get_relations(self, cursor, table_name):\n    cursor.execute('\\n            SELECT column_name, referenced_column_name, referenced_table_name\\n            FROM information_schema.key_column_usage\\n            WHERE table_name = %s\\n                AND table_schema = DATABASE()\\n                AND referenced_table_schema = DATABASE()\\n                AND referenced_table_name IS NOT NULL\\n                AND referenced_column_name IS NOT NULL\\n            ', [table_name])\n    return {field_name: (other_field, other_table) for field_name, other_field, other_table in cursor.fetchall()}",
    "docstring": "Return a dictionary of {field_name: (field_name_other_table, other_table)} representing all foreign keys in the given table.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\mysql\\introspection.py",
    "ast_data": "FunctionDef name:get_relations arg:self arg:cursor arg:table_name arguments arg arg arg Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "__call__",
    "source_code": "def __call__(self):\n    return self.callback(**self.kwargs)",
    "docstring": "Run self.callback(**self.kwargs).",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cprequest.py",
    "ast_data": "FunctionDef name:__call__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "consumes",
    "source_code": "def consumes(self, method, params):\n    return getattr(self, method)._consumes(params=params)",
    "docstring": "Check whether the given parameters are consumed by the given method. .. versionadded:: 1.4 Parameters ---------- method : str The name of the method to check. params : iterable of str An iterable of parameters to check. Returns ------- consumed : set of str A set of parameters which are consumed by the given method.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py",
    "ast_data": "FunctionDef name:consumes arg:self arg:method arg:params arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "children",
    "source_code": "def children(self) -> Iterator['Module']:\n    for _name, module in self.named_children():\n        yield module",
    "docstring": "Return an iterator over immediate children modules. Yields: Module: a child module",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:children arg:self arguments arg For Call"
  },
  {
    "library": "kornia",
    "name": "get_grid",
    "source_code": "def get_grid(B: int, H: int, W: int, device: torch.device) -> torch.Tensor:\n    x1_n_ = torch.meshgrid(*[torch.linspace(-1 + 1 / n, 1 - 1 / n, n, device=device) for n in (B, H, W)], indexing='ij')\n    x1_n = torch.stack((x1_n_[2], x1_n_[1]), dim=-1).reshape(B, H * W, 2)\n    return x1_n",
    "docstring": "Get grid of provided layout.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\dedode\\utils.py",
    "ast_data": "FunctionDef name:get_grid arg:B arg:H arg:W arg:device arguments arg arg arg arg Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "rvs",
    "source_code": "def rvs(self, df, scale, size=1, random_state=None):\n    n, shape = self._process_size(size)\n    dim, df, scale = self._process_parameters(df, scale)\n    C = scipy.linalg.cholesky(scale, lower=True)\n    out = self._rvs(n, shape, dim, df, C, random_state)\n    return _squeeze_output(out)",
    "docstring": "Draw random samples from an inverse Wishart distribution. Parameters ---------- %(_doc_default_callparams)s size : integer or iterable of integers, optional Number of samples to draw (default 1). %(_doc_random_state)s Returns ------- rvs : ndarray Random variates of shape () + (`` is the dimension of the scale matrix. Notes ----- %(_doc_callparams_note)s",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:rvs arg:self arg:df arg:scale arg:size arg:random_state arguments arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_numeric_methods",
    "source_code": "def _numeric_methods(ufunc, name):\n    return (_binary_method(ufunc, name), _reflected_binary_method(ufunc, name), _inplace_binary_method(ufunc, name))",
    "docstring": "Implement forward, reflected and inplace binary methods with a ufunc.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\mixins.py",
    "ast_data": "FunctionDef name:_numeric_methods arg:ufunc arg:name arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_handle_row_wise_mask",
    "source_code": "def _handle_row_wise_mask(gather_inp, padding_idx, weight, world_size, rank):\n    start_pos, chunk_size = get_chunk_sharding_params(weight.size(0), world_size, weight._sharding_spec, rank)\n    mask = (gather_inp < start_pos) | (gather_inp >= start_pos + chunk_size)\n    lookup_input = gather_inp.clone() - start_pos\n    lookup_input[mask] = chunk_size\n    if padding_idx is not None and padding_idx >= start_pos and (padding_idx < start_pos + chunk_size):\n        padding_idx = padding_idx - start_pos\n    else:\n        padding_idx = None\n    padding_row = torch.zeros(1, weight.size(1), device=gather_inp.device, dtype=weight.dtype)\n    return (lookup_input, padding_idx, padding_row)",
    "docstring": "Mask the input for embedding look-up for IDs which are not stored on the current rank. This function also adjust the ``.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharding_spec\\chunk_sharding_spec_ops\\_common.py",
    "ast_data": "FunctionDef name:_handle_row_wise_mask arg:gather_inp arg:padding_idx arg:weight arg:world_size arg:rank arguments arg arg arg arg arg Assign Call Call Assign Compare Compare Assign Call Assign If BoolOp Compare Compare Compare Assign Assign Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "__len__",
    "source_code": "def __len__(self):\n    return len(self.children)",
    "docstring": "Return the number of children this node has.",
    "type": "method",
    "file_path": "django\\django\\utils\\tree.py",
    "ast_data": "FunctionDef name:__len__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pygame",
    "name": "quit",
    "source_code": "def quit():\n    if _module_init():\n        _pypm.Terminate()\n        _module_init(False)",
    "docstring": "uninitialize the midi module pygame.midi.quit(): return None Called automatically atexit if you don't call it. It is safe to call this function more than once.",
    "type": "function",
    "file_path": "pygame\\src_py\\midi.py",
    "ast_data": "FunctionDef name:quit arguments If Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_init_from_proto",
    "source_code": "def _init_from_proto(self, context_def, import_scope=None):\n    assert isinstance(context_def, control_flow_pb2.CondContextDef)\n    g = ops.get_default_graph()\n    self._name = ops.prepend_name_scope(context_def.context_name, import_scope)\n    self._pred = g.as_graph_element(ops.prepend_name_scope(context_def.pred_name, import_scope))\n    self._pivot = g.as_graph_element(ops.prepend_name_scope(context_def.pivot_name, import_scope))\n    self._branch = context_def.branch\n    super(CondContext, self).__init__(values_def=context_def.values_def, import_scope=import_scope)",
    "docstring": "Creates a new from protocol buffer. Args: context_def: protocol buffer. import_scope: Optional . Name scope to add.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:_init_from_proto arg:self arg:context_def arg:import_scope arguments arg arg arg Call Assign Call Assign Call Assign Call Call Assign Call Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "GetWhileContext",
    "source_code": "def GetWhileContext(self):\n    if self._outer_context:\n        return self._outer_context.GetWhileContext()\n    return None",
    "docstring": "Return the while context containing this context.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:GetWhileContext arg:self arguments arg If Return return:yes Call Return return:no"
  },
  {
    "library": "scrapy",
    "name": "send_catch_log_async",
    "source_code": "async def send_catch_log_async(self, signal: Any, **kwargs: Any) -> list[tuple[Any, Any]]:\n    kwargs.setdefault('sender', self.sender)\n    return await _signal.send_catch_log_async(signal, **kwargs)",
    "docstring": "Like :meth: but supports :ref:. Returns a coroutine that completes once all signal handlers have finished. Send a signal, catch exceptions and log them. The keyword arguments are passed to the signal handlers (connected through the :meth: method).",
    "type": "method",
    "file_path": "scrapy\\scrapy\\signalmanager.py",
    "ast_data": "AsyncFunctionDef name:send_catch_log_async arg:self arg:signal arguments arg arg arg Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "next_workday",
    "source_code": "def next_workday(dt: datetime) -> datetime:\n    dt += timedelta(days=1)\n    while dt.weekday() > 4:\n        dt += timedelta(days=1)\n    return dt",
    "docstring": "returns next workday used for observances",
    "type": "function",
    "file_path": "pandas\\pandas\\tseries\\holiday.py",
    "ast_data": "FunctionDef name:next_workday arg:dt arguments arg Call While Compare Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_vertices",
    "source_code": "def get_vertices(self):\n    if self.width < self.height:\n        ret = self.get_patch_transform().transform([(0, 1), (0, -1)])\n    else:\n        ret = self.get_patch_transform().transform([(1, 0), (-1, 0)])\n    return [tuple(x) for x in ret]",
    "docstring": "Return the vertices coordinates of the ellipse. The definition can be found _ .. versionadded:: 3.8",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_vertices arg:self arguments arg If Compare Assign Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_dashes",
    "source_code": "def get_dashes(self):\n    return self._dashes",
    "docstring": "Return the dash style as an (offset, dash-list) pair. See for details. Default value is (None, None).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:get_dashes arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "Problem04",
    "source_code": "class Problem04(Benchmark):\n\n    def __init__(self, dimensions=1):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = [(1.9, 3.9)]\n        self.global_optimum = 2.868034\n        self.fglob = -3.85045\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        x = x[0]\n        return -(16 * x ** 2 - 24 * x + 5) * exp(-x)",
    "docstring": "Univariate Problem04 objective function. This class defines the Univariate Problem04 global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Problem04}}(x) = - \\left(16x^2 - 24x + 5 \\right) e^{-x} Bound constraints: :math: .. figure:: figures/Problem04.png :alt: Univariate Problem04 function :align: center **Univariate Problem04 function** *Global optimum*: :math: for :math:",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_univariate.py",
    "ast_data": "ClassDef name:Problem04 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "reduction",
    "source_code": "def reduction(self, dtype: torch.dtype, src_dtype: torch.dtype, reduction_type: ReductionType, value: Union[CSEVariable, tuple[CSEVariable, ...]]) -> Union[CSEVariable, tuple[CSEVariable, ...]]:\n    cache_key = (src_dtype, reduction_type, value)\n    if cache_key in self.cse.reduction_cache:\n        return self.cse.reduction_cache[cache_key]\n    result = self._reduction_nocache(dtype, src_dtype, reduction_type, value)\n    self.cse.reduction_cache[cache_key] = result\n    return result",
    "docstring": "Caching wrapper around _reduction_nocache",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\mps.py",
    "ast_data": "FunctionDef name:reduction arg:self arg:dtype arg:src_dtype arg:reduction_type arg:value arguments arg arg arg arg arg Assign If Compare Return return:yes Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "chain_matmul",
    "source_code": "def chain_matmul(*matrices, out=None):\n    if has_torch_function(matrices):\n        return handle_torch_function(chain_matmul, matrices, *matrices)\n    if out is None:\n        return _VF.chain_matmul(matrices)\n    else:\n        return _VF.chain_matmul(matrices, out=out)",
    "docstring": "Returns the matrix product of the :math: 2-D tensors. This product is efficiently computed using the matrix chain order algorithm which selects the order in which incurs the lowest cost in terms of arithmetic operations (_). Note that since this is a function to compute the product, :math: needs to be greater than or equal to 2; if equal to 2 then a trivial matrix-matrix product is returned. If :math: is 1, then this is a no-op - the original matrix is returned as is. .. warning:: :func: is deprecated and will be removed in a future PyTorch release. Use :func: instead, which accepts a list of two or more tensors rather than multiple arguments. Args: matrices (Tensors...): a sequence of 2 or more 2-D tensors whose product is to be determined. out (Tensor, optional): the output tensor. Ignored if :attr: = `i^{th}p_{i} \\times p_{i + 1}p_{1} \\times p_{N + 1}[CLRS]`:",
    "type": "function",
    "file_path": "pytorch\\torch\\functional.py",
    "ast_data": "FunctionDef name:chain_matmul arguments arg arg If Call Return return:yes Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, initial_learning_rate, decay_steps, end_learning_rate=0.0001, power=1.0, cycle=False, name=None):\n    super(PolynomialDecay, self).__init__()\n    self.initial_learning_rate = initial_learning_rate\n    self.decay_steps = decay_steps\n    self.end_learning_rate = end_learning_rate\n    self.power = power\n    self.cycle = cycle\n    self.name = name",
    "docstring": "Applies a polynomial decay to the learning rate. Args: initial_learning_rate: A scalar or or a Python number. The initial learning rate. decay_steps: A scalar or or a Python number. Must be positive. See the decay computation above. end_learning_rate: A scalar or or a Python number. The minimal end learning rate. power: A scalar or or a Python number. The power of the polynomial. Defaults to linear, 1.0. cycle: A boolean, whether or not it should cycle beyond decay_steps. name: String. Optional name of the operation. Defaults to 'PolynomialDecay'.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\learning_rate_schedule.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:initial_learning_rate arg:decay_steps arg:end_learning_rate arg:power arg:cycle arg:name arguments arg arg arg arg arg arg arg Call Call Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "experimental_replicate_to_logical_devices",
    "source_code": "def experimental_replicate_to_logical_devices(self, tensor):\n    return xla_sharding.replicate(tensor, use_sharding_op=True)",
    "docstring": "Adds annotation that will be replicated to all logical devices. This adds an annotation to tensor specifying that operations on will be invoked on all logical devices. Args: tensor: Input tensor to annotate. Returns: Annotated tensor with identical value as .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_strategy.py",
    "ast_data": "FunctionDef name:experimental_replicate_to_logical_devices arg:self arg:tensor arguments arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_make_estimator",
    "source_code": "def _make_estimator(self, append=True, random_state=None):\n    estimator = clone(self.estimator_)\n    estimator.set_params(**{p: getattr(self, p) for p in self.estimator_params})\n    if random_state is not None:\n        _set_random_states(estimator, random_state)\n    if append:\n        self.estimators_.append(estimator)\n    return estimator",
    "docstring": "Make and configure a copy of the attribute. Warning: This method should be used to properly instantiate new sub-estimators.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_base.py",
    "ast_data": "FunctionDef name:_make_estimator arg:self arg:append arg:random_state arguments arg arg arg Assign Call Call Call If Compare Call If Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "start",
    "source_code": "def start(self) -> None:\n    self._server.start()",
    "docstring": "Starts this server. Raises: tf.errors.OpError: Or one of its subclasses if an error occurs while starting the server.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\service\\server_lib.py",
    "ast_data": "FunctionDef name:start arg:self arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "_lazy_init",
    "source_code": "def _lazy_init(self) -> None:\n    if self._is_root is not None:\n        return\n    self._is_root = True\n    if len(self._modules) > 1:\n        raise RuntimeError(f'FSDP requires a single root module but got {self._modules}')\n    detect_compiled_autograd()\n    root_module = self._modules[0]\n    visited_states: set[FSDPState] = set()\n    for module_name, module in root_module.named_modules():\n        if (state := _get_module_fsdp_state(module)) is None:\n            continue\n        if module is not root_module:\n            if state not in visited_states and state._is_root is not None:\n                raise RuntimeError(f'FSDP state has already been lazily initialized for {module_name}\\nFSDP requires running forward through the root module first')\n            state._is_root = False\n        self._state_ctx.all_states.append(state)\n        visited_states.add(state)\n    if self._fsdp_param_group:\n        self._fsdp_param_group.post_forward_mesh_info = None\n    self._init_fqns()\n    self._init_shared_state()\n    for state in self._state_ctx.all_states:\n        if state._fsdp_param_group:\n            state._fsdp_param_group.lazy_init()",
    "docstring": "Lazy initialization represents when all modules' parallelisms have finalized (e.g. FSDP has been applied to all desired modules). This means that we can determine which state is the root, and we do so by the 1st state to run forward.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fsdp_state.py",
    "ast_data": "FunctionDef name:_lazy_init arg:self arguments arg If Compare Return return:no Assign If Compare Call Raise Call Call Assign Call For Call If Compare Call If Compare If BoolOp Compare Compare Raise Call Assign Call Call If Assign Call Call For If Call"
  },
  {
    "library": "tensorflow",
    "name": "annotate_source",
    "source_code": "def annotate_source(dump, source_file_path, do_dumped_tensors=False, file_stack_top=False, min_line=None, max_line=None):\n    py_graph = dump.python_graph\n    if not py_graph:\n        raise ValueError('Cannot perform source annotation due to a lack of set Python graph in the dump object')\n    source_file_path = _norm_abs_path(source_file_path)\n    line_to_op_names = {}\n    for op in py_graph.get_operations():\n        for file_path, line_number, _, _ in reversed(dump.node_traceback(op.name)):\n            if min_line is not None and line_number < min_line or (max_line is not None and line_number >= max_line):\n                continue\n            if _norm_abs_path(file_path) != source_file_path:\n                continue\n            if do_dumped_tensors:\n                watch_keys = dump.debug_watch_keys(op.name)\n                items_to_append = list(set(map(_convert_watch_key_to_tensor_name, watch_keys)))\n            else:\n                items_to_append = [op.name]\n            if line_number in line_to_op_names:\n                line_to_op_names[line_number].extend(items_to_append)\n            else:\n                line_to_op_names[line_number] = items_to_append\n            if file_stack_top:\n                break\n    return line_to_op_names",
    "docstring": "Annotate a Python source file with a list of ops created at each line. (The annotation doesn't change the source file itself.) Args: dump: () A object of which the Python graph has been loaded. source_file_path: () Path to the source file being annotated. do_dumped_tensors: () Whether dumped Tensors, instead of ops are to be used to annotate the source file. file_stack_top: () Whether only the top stack trace in the specified source file is to be annotated. min_line: ( or ) The 1-based line to start annotate the source file from (inclusive). max_line: ( or ) The 1-based line number to end the annotation at (exclusive). Returns: A mapping 1-based line number to a list of op name(s) created at that line, or tensor names if is True. Raises: ValueError: If the dump object does not have a Python graph set.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\source_utils.py",
    "ast_data": "FunctionDef name:annotate_source arg:dump arg:source_file_path arg:do_dumped_tensors arg:file_stack_top arg:min_line arg:max_line arguments arg arg arg arg arg arg Assign If Raise Call Assign Call Assign For Call For Call Call If BoolOp BoolOp Compare Compare BoolOp Compare Compare If Compare Call If Assign Call Assign Call Call Call Assign If Compare Call Assign If Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "production",
    "source_code": "class production(nodes.Part, nodes.Inline, nodes.FixedTextElement):\n    pass",
    "docstring": "Node for a single grammar production rule.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\addnodes.py",
    "ast_data": "ClassDef name:production"
  },
  {
    "library": "matplotlib",
    "name": "_on_timer",
    "source_code": "def _on_timer(self):\n    for func, args, kwargs in self.callbacks:\n        ret = func(*args, **kwargs)\n        if ret == 0:\n            self.callbacks.remove((func, args, kwargs))\n    if len(self.callbacks) == 0:\n        self.stop()",
    "docstring": "Runs all function that have been registered as callbacks. Functions can return False (or 0) if they should not be called any more. If there are no callbacks, the timer is automatically stopped.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:_on_timer arg:self arguments arg For Assign Call If Compare Call If Compare Call Call"
  },
  {
    "library": "tensorflow",
    "name": "matrix_solve_with_broadcast",
    "source_code": "def matrix_solve_with_broadcast(matrix, rhs, adjoint=False, name=None):\n    with ops.name_scope(name, 'MatrixSolveWithBroadcast', [matrix, rhs]):\n        matrix = tensor_conversion.convert_to_tensor_v2_with_dispatch(matrix, name='matrix')\n        rhs = tensor_conversion.convert_to_tensor_v2_with_dispatch(rhs, name='rhs', dtype=matrix.dtype)\n        matrix, rhs, reshape_inv, still_need_to_transpose = _reshape_for_efficiency(matrix, rhs, adjoint_a=adjoint)\n        matrix, rhs = broadcast_matrix_batch_dims([matrix, rhs])\n        solution = linalg_ops.matrix_solve(matrix, rhs, adjoint=adjoint and still_need_to_transpose)\n        return reshape_inv(solution)",
    "docstring": "Solve systems of linear equations.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_util.py",
    "ast_data": "FunctionDef name:matrix_solve_with_broadcast arg:matrix arg:rhs arg:adjoint arg:name arguments arg arg arg arg With Call Assign Call Assign Call Assign Call Assign Call Assign Call BoolOp Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "get_status",
    "source_code": "def get_status(self):\n    a, b = self.ab[:2]\n    if np.isclose(a, b, rtol=self.rtol, atol=self.xtol):\n        return (_ECONVERGED, sum(self.ab) / 2.0)\n    if self.iterations >= self.maxiter:\n        return (_ECONVERR, sum(self.ab) / 2.0)\n    return (_EINPROGRESS, sum(self.ab) / 2.0)",
    "docstring": "Determine the current status.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_zeros_py.py",
    "ast_data": "FunctionDef name:get_status arg:self arguments arg Assign If Call Return return:yes Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "flatatt",
    "source_code": "def flatatt(attrs):\n    key_value_attrs = []\n    boolean_attrs = []\n    for attr, value in attrs.items():\n        if isinstance(value, bool):\n            if value:\n                boolean_attrs.append((attr,))\n        elif value is not None:\n            key_value_attrs.append((attr, value))\n    return format_html_join('', ' {}=\"{}\"', sorted(key_value_attrs)) + format_html_join('', ' {}', sorted(boolean_attrs))",
    "docstring": "Convert a dictionary of attributes to a single string. The returned string will contain a leading space followed by key=\"value\", XML-style pairs. In the case of a boolean value, the key will appear without a value. It is assumed that the keys do not need to be XML-escaped. If the passed dictionary is empty, then return an empty string. The result is passed through 'mark_safe' (by way of 'format_html_join').",
    "type": "function",
    "file_path": "django\\django\\forms\\utils.py",
    "ast_data": "FunctionDef name:flatatt arg:attrs arguments arg Assign Assign For Call If Call If Call If Compare Call Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "BorgTPUTerminationConfig",
    "source_code": "class BorgTPUTerminationConfig(TerminationConfig):\n\n    def __init__(self, termination_watcher_fn=None, exit_fn=None, grace_period=None, save_fn=None):\n        self.termination_watcher_fn = termination_watcher_fn\n        self.exit_fn = exit_fn or failure_handling_util.default_tpu_exit_fn\n        self.grace_period = grace_period or 0\n        self.save_fn = save_fn",
    "docstring": "Configurations for Borg.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\failure_handling\\failure_handling.py",
    "ast_data": "ClassDef name:BorgTPUTerminationConfig FunctionDef name:__init__ arg:self arg:termination_watcher_fn arg:exit_fn arg:grace_period arg:save_fn arguments arg arg arg arg arg Assign Assign BoolOp Assign BoolOp Assign"
  },
  {
    "library": "django",
    "name": "handle_token",
    "source_code": "@classmethod\ndef handle_token(cls, parser, token):\n    bits = token.split_contents()\n    if len(bits) < 2:\n        raise template.TemplateSyntaxError(\"'%s' takes at least one argument (path to file)\" % bits[0])\n    path = parser.compile_filter(bits[1])\n    if len(bits) >= 2 and bits[-2] == 'as':\n        varname = bits[3]\n    else:\n        varname = None\n    return cls(varname, path)",
    "docstring": "Class method to parse prefix node and return a Node.",
    "type": "method",
    "file_path": "django\\django\\templatetags\\static.py",
    "ast_data": "FunctionDef name:handle_token arg:cls arg:parser arg:token arguments arg arg arg Assign Call If Compare Call Raise Call Assign Call If BoolOp Compare Call Compare Assign Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_replace_uniform_noise_node",
    "source_code": "def _replace_uniform_noise_node(parent, old_value):\n    uniform = ast.Str(s='uniform')\n    gaussian = ast.Str(s='gaussian')\n    new_value = ast.IfExp(body=uniform, test=old_value, orelse=gaussian)\n    pasta.ast_utils.replace_child(parent, old_value, new_value)\n    ast.copy_location(new_value, old_value)\n    pasta.base.formatting.set(new_value.test, 'prefix', '(')\n    pasta.base.formatting.set(new_value.test, 'suffix', ')')",
    "docstring": "Replaces old_value with 'uniform' or 'gaussian'.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\tf_upgrade_v2.py",
    "ast_data": "FunctionDef name:_replace_uniform_noise_node arg:parent arg:old_value arguments arg arg Assign Call Assign Call Assign Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "mark_dirty",
    "source_code": "def mark_dirty(self) -> None:\n    self._dirty = True",
    "docstring": "See base class. If the local rendezvous state is dirty, the next sync call will try to write the changes back to the backend. However this attempt might fail if another node, which had the same state, also made changes and wrote them before us.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\dynamic_rendezvous.py",
    "ast_data": "FunctionDef name:mark_dirty arg:self arguments arg Assign"
  },
  {
    "library": "pandas",
    "name": "_convert_to_border",
    "source_code": "@classmethod\ndef _convert_to_border(cls, border_dict):\n    from openpyxl.styles import Border\n    _border_key_map = {'diagonalup': 'diagonalUp', 'diagonaldown': 'diagonalDown'}\n    border_kwargs = {}\n    for k, v in border_dict.items():\n        k = _border_key_map.get(k, k)\n        if k == 'color':\n            v = cls._convert_to_color(v)\n        if k in ['left', 'right', 'top', 'bottom', 'diagonal']:\n            v = cls._convert_to_side(v)\n        border_kwargs[k] = v\n    return Border(**border_kwargs)",
    "docstring": "Convert `` to an openpyxl v2 Border object. Parameters ---------- border_dict : dict A dict with zero or more of the following keys (or their synonyms). 'left' 'right' 'top' 'bottom' 'diagonal' 'diagonal_direction' 'vertical' 'horizontal' 'diagonalUp' ('diagonalup') 'diagonalDown' ('diagonaldown') 'outline' Returns ------- border : openpyxl.styles.Border",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\excel\\_openpyxl.py",
    "ast_data": "FunctionDef name:_convert_to_border arg:cls arg:border_dict arguments arg arg Assign Assign For Call Assign Call If Compare Assign Call If Compare Assign Call Assign Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "BiggsExp05",
    "source_code": "class BiggsExp05(Benchmark):\n\n    def __init__(self, dimensions=5):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([0.0] * 5, [20.0] * 5))\n        self.global_optimum = [[1.0, 10.0, 1.0, 5.0, 4.0]]\n        self.fglob = 0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        t = arange(1, 12.0) * 0.1\n        y = exp(-t) - 5 * exp(-10 * t) + 3 * exp(-4 * t)\n        vec = (x[2] * exp(-t * x[0]) - x[3] * exp(-t * x[1]) + 3 * exp(-t * x[4]) - y) ** 2\n        return sum(vec)",
    "docstring": "BiggsExp05 objective function. The BiggsExp05 [1]_ global optimization problem is a multimodal minimization problem defined as follows .. math:: \\begin{matrix}\\ f_{\\text{BiggsExp05}}(x) = \\sum_{i=1}^{11} (x_3 e^{-t_i x_1} - x_4 e^{-t_i x_2} + 3 e^{-t_i x_5} - y_i)^2\\\\ t_i = 0.1i\\\\ y_i = e^{-t_i} - 5e^{-10 t_i} + 3e^{-4 t_i}\\\\ \\end{matrix} with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_B.py",
    "ast_data": "ClassDef name:BiggsExp05 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Assign Call Call Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "paint_path",
    "source_code": "@classmethod\ndef paint_path(cls, fill, stroke):\n    if stroke:\n        if fill:\n            return cls.fill_stroke\n        else:\n            return cls.stroke\n    elif fill:\n        return cls.fill\n    else:\n        return cls.endpath",
    "docstring": "Return the PDF operator to paint a path. Parameters ---------- fill : bool Fill the path with the fill color. stroke : bool Stroke the outline of the path with the line color.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py",
    "ast_data": "FunctionDef name:paint_path arg:cls arg:fill arg:stroke arguments arg arg arg If If Return return:yes Return return:yes If Return return:yes Return return:yes"
  },
  {
    "library": "numpy",
    "name": "__mod__",
    "source_code": "def __mod__(self, i):\n    return asarray(mod(self, i))",
    "docstring": "Return (self % i), that is pre-Python 2.6 string formatting (interpolation), element-wise for a pair of array_likes of or . See Also -------- mod",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:__mod__ arg:self arg:i arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "extra",
    "source_code": "def extra(self, select=None, where=None, params=None, tables=None, order_by=None, select_params=None):\n    self._not_support_combined_queries('extra')\n    if self.query.is_sliced:\n        raise TypeError('Cannot change a query once a slice has been taken.')\n    clone = self._chain()\n    clone.query.add_extra(select, select_params, where, params, tables, order_by)\n    return clone",
    "docstring": "Add extra SQL fragments to the query.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:extra arg:self arg:select arg:where arg:params arg:tables arg:order_by arg:select_params arguments arg arg arg arg arg arg arg Call If Raise Call Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_decade_greater_equal",
    "source_code": "def _decade_greater_equal(x, base):\n    return x if x == 0 else -_decade_less_equal(-x, base) if x < 0 else base ** np.ceil(np.log(x) / np.log(base))",
    "docstring": "Return the smallest integer power of *base* that's greater or equal to *x*. If *x* is negative, the exponent will be *smaller*.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:_decade_greater_equal arg:x arg:base arguments arg arg Return return:yes Compare Compare Call Call Call Call"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "def forward(self, img: Tensor) -> Dict[str, Any]:\n    KORNIA_CHECK_SHAPE(img, ['B', '1', 'H', 'W'])\n    outputs = {}\n    net_outputs = self.model(img)\n    outputs['junction_heatmap'] = net_outputs['junctions']\n    outputs['line_heatmap'] = net_outputs['heatmap']\n    outputs['dense_desc'] = net_outputs['descriptors']\n    lines = []\n    for junc_prob, heatmap in zip(net_outputs['junctions'], net_outputs['heatmap']):\n        junctions = prob_to_junctions(junc_prob, self.grid_size, self.junc_detect_thresh, self.max_num_junctions)\n        line_map, junctions, _ = self.line_detector.detect(junctions, heatmap)\n        lines.append(line_map_to_segments(junctions, line_map))\n    outputs['line_segments'] = lines\n    return outputs",
    "docstring": "Run forward. Args: img: batched images with shape :math:. Returns: line_segments: list of N line segments in each of the B images :math:. junction_heatmap: raw junction heatmap of shape :math:. line_heatmap: raw line heatmap of shape :math:. dense_desc: the semi-dense descriptor map of shape :math:.",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\sold2\\sold2.py",
    "ast_data": "FunctionDef name:forward arg:self arg:img arguments arg arg Call Assign Assign Call Assign Assign Assign Assign For Call Assign Call Assign Call Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "unflatten",
    "source_code": "def unflatten(module: ExportedProgram, flat_args_adapter: Optional[FlatArgsAdapter]=None) -> UnflattenedModule:\n    module = _remove_effect_tokens(module)\n    return UnflattenedModule(module, flat_args_adapter)",
    "docstring": "Unflatten an ExportedProgram, producing a module with the same module hierarchy as the original eager module. This can be useful if you are trying to use :mod: with another system that expects a module hierachy instead of the flat graph that :mod: usually produces. .. note:: The args/kwargs of unflattened modules will not necessarily match the eager module, so doing a module swap (e.g. :code:) will not necessarily work. If you need to swap a module out, you need to set the :code: parameter of :func:. Args: module (ExportedProgram): The ExportedProgram to unflatten. flat_args_adapter (Optional[FlatArgsAdapter]): Adapt flat args if input TreeSpec does not match with exported module's. Returns: An instance of :class:, which has the same module hierarchy as the original eager module pre-export.",
    "type": "function",
    "file_path": "pytorch\\torch\\export\\unflatten.py",
    "ast_data": "FunctionDef name:unflatten arg:module arg:flat_args_adapter arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_set_operation",
    "source_code": "def _set_operation(a, b, set_operation, validate_indices=True):\n    if isinstance(a, sparse_tensor.SparseTensor):\n        if isinstance(b, sparse_tensor.SparseTensor):\n            indices, values, shape = gen_set_ops.sparse_to_sparse_set_operation(a.indices, a.values, a.dense_shape, b.indices, b.values, b.dense_shape, set_operation, validate_indices)\n        else:\n            raise ValueError('Sparse,Dense is not supported, but Dense,Sparse is. Please flip the order of your inputs.')\n    elif isinstance(b, sparse_tensor.SparseTensor):\n        indices, values, shape = gen_set_ops.dense_to_sparse_set_operation(a, b.indices, b.values, b.dense_shape, set_operation, validate_indices)\n    else:\n        indices, values, shape = gen_set_ops.dense_to_dense_set_operation(a, b, set_operation, validate_indices)\n    return sparse_tensor.SparseTensor(indices, values, shape)",
    "docstring": "Compute set operation of elements in last dimension of and . All but the last dimension of and must match. Args: a: or of the same type as . If sparse, indices must be sorted in row-major order. b: or of the same type as . Must be if is . If sparse, indices must be sorted in row-major order. set_operation: String indicating set operation. See SetOperationOp::SetOperationFromContext for valid values. validate_indices: Whether to validate the order and range of sparse indices in and . Returns: A with the same rank as and , and all but the last dimension the same. Elements along the last dimension contain the results of the set operation. Raises: TypeError: If inputs are invalid types. ValueError: If is sparse and is dense.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\sets_impl.py",
    "ast_data": "FunctionDef name:_set_operation arg:a arg:b arg:set_operation arg:validate_indices arguments arg arg arg arg If Call If Call Assign Call Raise Call If Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_graph_segment_str_at_line",
    "source_code": "def _graph_segment_str_at_line(self, line: int) -> str:\n    if line == 0:\n        result_str = self._node_count_segment_str()\n        result_str += ' ' * (self._max_segment_columns() - len(result_str))\n        return result_str\n    if line == 1:\n        result_str = self._graph_id_segment_str()\n        result_str += ' ' * (self._max_segment_columns() - len(result_str))\n        return result_str\n    if 0 <= line < self._total_rows():\n        return ' ' * self._max_segment_columns()\n    return ''",
    "docstring": "Get the string representation of the graph segment at the given line.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\verification.py",
    "ast_data": "FunctionDef name:_graph_segment_str_at_line arg:self arg:line arguments arg arg If Compare Assign Call Call Call Return return:yes If Compare Assign Call Call Call Return return:yes If Compare Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "dtype",
    "source_code": "@property\ndef dtype(self):\n    return self._values.dtype",
    "docstring": "The of elements in this tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\sparse_tensor.py",
    "ast_data": "FunctionDef name:dtype arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "burn_in_info",
    "source_code": "def burn_in_info(skeleton, info):\n    return skeleton.replace('BURNED_IN_MODEL_INFO = null', 'BURNED_IN_MODEL_INFO = ' + json.dumps(info, sort_keys=True).replace('/', '\\\\/'))",
    "docstring": "Burn model info into the HTML skeleton. The result will render the hard-coded model info and have no external network dependencies for code or data.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\model_dump\\__init__.py",
    "ast_data": "FunctionDef name:burn_in_info arg:skeleton arg:info arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "value_rowids",
    "source_code": "def value_rowids(self):\n    if self._value_rowids is not None:\n        return self._value_rowids\n    return segment_id_ops.row_splits_to_segment_ids(self._row_splits)",
    "docstring": "Returns the row indices for this row partition. specifies the row index fo reach value. In particular, is the row index for . Returns: A 1-D integer with shape . The returned tensor is nonnegative, and is sorted in ascending order.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py",
    "ast_data": "FunctionDef name:value_rowids arg:self arguments arg If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__exit__",
    "source_code": "def __exit__(self, typ, value, traceback):\n    if self._recording:\n        self._pop_tape()",
    "docstring": "Exits the recording context, no further operations are traced.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\backprop.py",
    "ast_data": "FunctionDef name:__exit__ arg:self arg:typ arg:value arg:traceback arguments arg arg arg arg If Call"
  },
  {
    "library": "seaborn",
    "name": "numeric_mapping",
    "source_code": "def numeric_mapping(self, data, palette, norm):\n    if isinstance(palette, dict):\n        levels = list(sorted(palette))\n        colors = [palette[k] for k in sorted(palette)]\n        cmap = mpl.colors.ListedColormap(colors)\n        lookup_table = palette.copy()\n    else:\n        levels = list(np.sort(remove_na(data.unique())))\n        palette = 'ch:' if palette is None else palette\n        if isinstance(palette, mpl.colors.Colormap):\n            cmap = palette\n        else:\n            cmap = color_palette(palette, as_cmap=True)\n        if norm is None:\n            norm = mpl.colors.Normalize()\n        elif isinstance(norm, tuple):\n            norm = mpl.colors.Normalize(*norm)\n        elif not isinstance(norm, mpl.colors.Normalize):\n            err = '``hue_norm`` must be None, tuple, or Normalize object.'\n            raise ValueError(err)\n        if not norm.scaled():\n            norm(np.asarray(data.dropna()))\n        lookup_table = dict(zip(levels, cmap(norm(levels))))\n    return (levels, lookup_table, norm, cmap)",
    "docstring": "Determine colors when the hue variable is quantitative.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_base.py",
    "ast_data": "FunctionDef name:numeric_mapping arg:self arg:data arg:palette arg:norm arguments arg arg arg arg If Call Assign Call Call Assign Call Assign Call Assign Call Assign Call Call Call Call Assign Compare If Call Assign Assign Call If Compare Assign Call If Call Assign Call If Call Assign Raise Call If Call Call Call Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "_common_param_check",
    "source_code": "def _common_param_check(batch_size: int, same_on_batch: Optional[bool]=None) -> None:\n    if not (isinstance(batch_size, int) and batch_size >= 0):\n        raise AssertionError(f'`batch_size` shall be a positive integer. Got {batch_size}.')\n    if same_on_batch is not None and (not isinstance(same_on_batch, bool)):\n        raise AssertionError(f'`same_on_batch` shall be boolean. Got {same_on_batch}.')",
    "docstring": "Check valid batch_size and same_on_batch params.",
    "type": "function",
    "file_path": "kornia\\kornia\\augmentation\\utils\\param_validation.py",
    "ast_data": "FunctionDef name:_common_param_check arg:batch_size arg:same_on_batch arguments arg arg If BoolOp Call Compare Raise Call If BoolOp Compare Call Raise Call"
  },
  {
    "library": "seaborn",
    "name": "theme",
    "source_code": "@property\ndef theme(self) -> dict[str, Any]:\n    return self._theme",
    "docstring": "Dictionary of base theme parameters for :class:. Keys and values correspond to matplotlib rc params, as documented here:",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\plot.py",
    "ast_data": "FunctionDef name:theme arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_constrained_layout",
    "source_code": "@_api.deprecated('3.6', alternative=\"set_layout_engine('constrained')\", pending=True)\ndef set_constrained_layout(self, constrained):\n    constrained = mpl._val_or_rc(constrained, 'figure.constrained_layout.use')\n    _constrained = 'constrained' if bool(constrained) else 'none'\n    _parameters = constrained if isinstance(constrained, dict) else {}\n    self.set_layout_engine(_constrained, **_parameters)\n    self.stale = True",
    "docstring": "Set whether `figure.constrained_layout.use` is the height padding. Parameters ---------- constrained : bool or dict or None",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:set_constrained_layout arg:self arg:constrained arguments arg arg Assign Call Assign Call Assign Call Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "loop",
    "source_code": "def loop(self, timer_interval_secs, target, args=None, kwargs=None):\n    looper = coordinator.LooperThread(self._coord, timer_interval_secs, target=target, args=args, kwargs=kwargs)\n    looper.start()\n    return looper",
    "docstring": "Start a LooperThread that calls a function periodically. If is None the thread calls repeatedly. Otherwise it calls it every seconds. The thread terminates when a stop is requested. The started thread is added to the list of threads managed by the supervisor so it does not need to be passed to the method. Args: timer_interval_secs: Number. Time boundaries at which to call . target: A callable object. args: Optional arguments to pass to when calling it. kwargs: Optional keyword arguments to pass to when calling it. Returns: The started thread.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py",
    "ast_data": "FunctionDef name:loop arg:self arg:timer_interval_secs arg:target arg:args arg:kwargs arguments arg arg arg arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "gpu",
    "source_code": "@deprecation.deprecated(None, 'Use tf.identity instead.')\ndef gpu(self: EagerTensorType, gpu_index=0) -> EagerTensorType:\n    return self._copy(context.context(), 'GPU:' + str(gpu_index))",
    "docstring": "A copy of this Tensor with contents backed by memory on the GPU. Args: gpu_index: Identifies which GPU to place the contents on the returned Tensor in. Returns: A GPU-memory backed Tensor object initialized with the same contents as this Tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:gpu arg:self arg:gpu_index arguments arg arg Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "print_dict",
    "source_code": "def print_dict(py_dict):\n    for gpu, cc in py_dict.items():\n        print('{:<25}{:<25}'.format(gpu, cc))",
    "docstring": "Prints dictionary with formatting (2 column table). Args: py_dict: Dictionary that is to be printed out in a table format.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\tensorflow_builder\\config_detector\\data\\cuda_compute_capability.py",
    "ast_data": "FunctionDef name:print_dict arg:py_dict arguments arg For Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X):\n    check_is_fitted(self)\n    X = self._validate_X_predict(X)\n    n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)\n    if self.n_outputs_ > 1:\n        y_hat = np.zeros((X.shape[0], self.n_outputs_), dtype=np.float64)\n    else:\n        y_hat = np.zeros(X.shape[0], dtype=np.float64)\n    lock = threading.Lock()\n    Parallel(n_jobs=n_jobs, verbose=self.verbose, require='sharedmem')((delayed(_accumulate_prediction)(e.predict, X, [y_hat], lock) for e in self.estimators_))\n    y_hat /= len(self.estimators_)\n    return y_hat",
    "docstring": "Predict regression target for X. The predicted regression target of an input sample is computed as the mean predicted regression targets of the trees in the forest. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, its dtype will be converted to ``. Returns ------- y : ndarray of shape (n_samples,) or (n_samples, n_outputs) The predicted values.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_forest.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Call Assign Call Assign Call If Compare Assign Call Assign Call Assign Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "graph_wrapped_for_higher_order_tape_gradients",
    "source_code": "def graph_wrapped_for_higher_order_tape_gradients(graph):\n    while graph is not None:\n        if 'cflow_gradient_wrapper' in getattr(graph, 'name', ''):\n            return True\n        graph = getattr(graph, 'outer_graph', None)\n    return False",
    "docstring": "Check if is wrapped by .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_util_v2.py",
    "ast_data": "FunctionDef name:graph_wrapped_for_higher_order_tape_gradients arg:graph arguments arg While Compare If Compare Call Return return:yes Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_order_by",
    "source_code": "def get_order_by(self):\n    result = []\n    seen = set()\n    for expr, is_ref in self._order_by_pairs():\n        resolved = expr.resolve_expression(self.query, allow_joins=True, reuse=None)\n        if not is_ref and self.query.combinator and self.select:\n            src = resolved.expression\n            expr_src = expr.expression\n            for sel_expr, _, col_alias in self.select:\n                if src == sel_expr:\n                    if self.query.has_select_fields and col_alias in self.query.annotation_select and (not (isinstance(expr_src, F) and col_alias == expr_src.name)):\n                        continue\n                    resolved.set_source_expressions([Ref(col_alias if col_alias else src.target.column, src)])\n                    break\n            else:\n                order_by_idx = len(self.query.select) + 1\n                col_alias = f'__orderbycol{order_by_idx}'\n                for q in self.query.combined_queries:\n                    if q.has_select_fields:\n                        raise DatabaseError('ORDER BY term does not match any column in the result set.')\n                    q.add_annotation(expr_src, col_alias)\n                self.query.add_select_col(resolved, col_alias)\n                resolved.set_source_expressions([Ref(col_alias, src)])\n        sql, params = self.compile(resolved)\n        without_ordering = self.ordering_parts.search(sql)[1]\n        params_hash = make_hashable(params)\n        if (without_ordering, params_hash) in seen:\n            continue\n        seen.add((without_ordering, params_hash))\n        result.append((resolved, (sql, params, is_ref)))\n    return result",
    "docstring": "Return a list of 2-tuples of the form (expr, (sql, params, is_ref)) for the ORDER BY clause. The order_by clause can alter the select clause (for example it can add aliases to clauses that do not yet have one, or it can add totally new select clauses).",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\compiler.py",
    "ast_data": "FunctionDef name:get_order_by arg:self arguments arg Assign Assign Call For Call Assign Call If BoolOp Assign Assign For If Compare If BoolOp Compare BoolOp Call Compare Call Call Assign Call Assign For If Raise Call Call Call Call Call Assign Call Assign Call Assign Call If Compare Call Call Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "public_key",
    "source_code": "@abc.abstractmethod\ndef public_key(self) -> RSAPublicKey:\n    pass",
    "docstring": "The RSAPublicKey associated with this private key.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\rsa.py",
    "ast_data": "FunctionDef name:public_key arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, f: FileLike, importer: Union[Importer, Sequence[Importer]]=sys_importer, debug: bool=False) -> None:\n    torch._C._log_api_usage_once('torch.package.PackageExporter')\n    self.debug = debug\n    if isinstance(f, (str, os.PathLike)):\n        f = os.fspath(f)\n        self.buffer: Optional[IO[bytes]] = None\n    else:\n        self.buffer = f\n    self.zip_file = torch._C.PyTorchFileWriter(f)\n    self.zip_file.set_min_version(6)\n    self._written_files: set[str] = set()\n    self.serialized_reduces: dict[int, Any] = {}\n    self.dependency_graph = DiGraph()\n    self.script_module_serializer = torch._C.ScriptModuleSerializer(self.zip_file)\n    self.storage_context = self.script_module_serializer.storage_context()\n    self._extern_hooks: OrderedDict = OrderedDict()\n    self._mock_hooks: OrderedDict = OrderedDict()\n    self._intern_hooks: OrderedDict = OrderedDict()\n    if isinstance(importer, Importer):\n        self.importer = importer\n    else:\n        if not isinstance(importer, collections.abc.Sequence):\n            raise TypeError(f'importer arg should be an Importer or a sequence of Importers, got {type(importer)} instead.')\n        self.importer = OrderedImporter(*importer)\n    self.patterns: dict[GlobGroup, _PatternInfo] = {}\n    self._unique_id = 0",
    "docstring": "Create an exporter. Args: f: The location to export to. Can be a `` will be constructed out of them. debug: If set to True, add path of broken modules to PackagingErrors.",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\package_exporter.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:f arg:importer arg:debug arguments arg arg arg arg Call Assign If Call Assign Call Assign Assign Call Call Call Assign Call Assign Call Assign Call Call Call Call If Call Assign If Call Raise Call Call Assign Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "StepContext",
    "source_code": "class StepContext:\n\n    def __init__(self, session, run_with_hooks_fn):\n        self._session = session\n        self._run_with_hooks_fn = run_with_hooks_fn\n\n    @property\n    def session(self):\n        return self._session\n\n    def run_with_hooks(self, *args, **kwargs):\n        return self._run_with_hooks_fn(*args, **kwargs)\n\n    def request_stop(self):\n        raise StopIteration('step_fn has requested the iterations to stop.')",
    "docstring": "Control flow instrument for the from . Users of may perform calls without running hooks by accessing the . A call with hooks may be performed using . Computation flow can be interrupted using .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\training\\monitored_session.py",
    "ast_data": "ClassDef name:StepContext FunctionDef name:__init__ arg:self arg:session arg:run_with_hooks_fn arguments arg arg arg Assign Assign FunctionDef name:session arg:self arguments arg Return return:yes FunctionDef name:run_with_hooks arg:self arguments arg arg arg Return return:yes Call FunctionDef name:request_stop arg:self arguments arg Raise Call"
  },
  {
    "library": "django",
    "name": "number_format",
    "source_code": "def number_format(value, decimal_pos=None, use_l10n=None, force_grouping=False):\n    if use_l10n is None:\n        use_l10n = True\n    lang = get_language() if use_l10n else None\n    return numberformat.format(value, get_format('DECIMAL_SEPARATOR', lang, use_l10n=use_l10n), decimal_pos, get_format('NUMBER_GROUPING', lang, use_l10n=use_l10n), get_format('THOUSAND_SEPARATOR', lang, use_l10n=use_l10n), force_grouping=force_grouping, use_l10n=use_l10n)",
    "docstring": "Format a numeric value using localization settings. If use_l10n is provided and is not None, it forces the value to be localized (or not), otherwise it's always localized.",
    "type": "function",
    "file_path": "django\\django\\utils\\formats.py",
    "ast_data": "FunctionDef name:number_format arg:value arg:decimal_pos arg:use_l10n arg:force_grouping arguments arg arg arg arg If Compare Assign Assign Call Return return:yes Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "Rule",
    "source_code": "class Rule(Box):\n\n    def __init__(self, width: float, height: float, depth: float, state: ParserState):\n        super().__init__(width, height, depth)\n        self.fontset = state.fontset\n\n    def render(self, output: Output, x: float, y: float, w: float, h: float) -> None:\n        self.fontset.render_rect_filled(output, x, y, x + w, y + h)",
    "docstring": "A solid black rectangle. It has *width*, *depth*, and *height* fields just as in an . However, if any of these dimensions is inf, the actual value will be determined by running the rule up to the boundary of the innermost enclosing box. This is called a \"running dimension\". The width is never running in an ; the height and depth are never running in a .",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\_mathtext.py",
    "ast_data": "ClassDef name:Rule FunctionDef name:__init__ arg:self arg:width arg:height arg:depth arg:state arguments arg arg arg arg arg Call Call Assign FunctionDef name:render arg:self arg:output arg:x arg:y arg:w arg:h arguments arg arg arg arg arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "scan_meta_graph_def",
    "source_code": "def scan_meta_graph_def(meta_graph_def, op_denylist):\n    ops_in_metagraph = set(meta_graph_lib.ops_used_by_graph_def(meta_graph_def.graph_def))\n    denylisted_ops = op_denylist & ops_in_metagraph\n    if denylisted_ops:\n        print('MetaGraph with tag set %s contains the following denylisted ops:' % meta_graph_def.meta_info_def.tags, denylisted_ops)\n    else:\n        print('MetaGraph with tag set %s does not contain the default denylisted ops:' % meta_graph_def.meta_info_def.tags, op_denylist)",
    "docstring": "Scans meta_graph_def and reports if there are ops on denylist. Print ops if they are on denylist, or print success if no denylisted ops found. Args: meta_graph_def: MetaGraphDef protocol buffer. op_denylist: set of ops to scan for.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\saved_model_cli.py",
    "ast_data": "FunctionDef name:scan_meta_graph_def arg:meta_graph_def arg:op_denylist arguments arg arg Assign Call Call Assign If Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_SoftmaxCrossEntropyWithLogitsGrad",
    "source_code": "@ops.RegisterGradient('SoftmaxCrossEntropyWithLogits')\ndef _SoftmaxCrossEntropyWithLogitsGrad(op: ops.Operation, grad_loss, grad_grad):\n    softmax_grad = op.outputs[1]\n    grad = _BroadcastMul(grad_loss, softmax_grad)\n    logits = op.inputs[0]\n    if grad_grad is not None and (not getattr(grad_grad, '_is_zeros_tensor', False)):\n        softmax = gen_nn_ops.softmax(logits)\n        grad += (grad_grad - array_ops.squeeze(math_ops.matmul(array_ops.expand_dims(grad_grad, 1), array_ops.expand_dims(softmax, 2)), axis=1)) * softmax\n    return (grad, _BroadcastMul(grad_loss, -gen_nn_ops.log_softmax(logits)))",
    "docstring": "Gradient function for SoftmaxCrossEntropyWithLogits.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_grad.py",
    "ast_data": "FunctionDef name:_SoftmaxCrossEntropyWithLogitsGrad arg:op arg:grad_loss arg:grad_grad arguments arg arg arg Assign Assign Call Assign If BoolOp Compare Call Assign Call Call Call Call Call Return return:yes Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "isdtype",
    "source_code": "def isdtype(dtype: DType, kind: Union[DType, str, Tuple[Union[DType, str], ...]], *, _tuple=True) -> bool:\n    if isinstance(kind, tuple) and _tuple:\n        return _builtin_any((isdtype(dtype, k, _tuple=False) for k in kind))\n    elif isinstance(kind, str):\n        if kind == 'bool':\n            return dtype == torch.bool\n        elif kind == 'signed integer':\n            return dtype in _int_dtypes and dtype.is_signed\n        elif kind == 'unsigned integer':\n            return dtype in _int_dtypes and (not dtype.is_signed)\n        elif kind == 'integral':\n            return dtype in _int_dtypes\n        elif kind == 'real floating':\n            return dtype.is_floating_point\n        elif kind == 'complex floating':\n            return dtype.is_complex\n        elif kind == 'numeric':\n            return isdtype(dtype, ('integral', 'real floating', 'complex floating'))\n        else:\n            raise ValueError(f'Unrecognized data type kind: {kind!r}')\n    else:\n        return dtype == kind",
    "docstring": "Returns a boolean indicating whether a provided dtype is of a specified data type ``. Note that outside of this function, this compat library does not yet fully support complex numbers. See for more details",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\torch\\_aliases.py",
    "ast_data": "FunctionDef name:isdtype arg:dtype arg:kind arguments arg arg arg If BoolOp Call Return return:yes Call Call If Call If Compare Return return:yes Compare If Compare Return return:yes BoolOp Compare If Compare Return return:yes BoolOp Compare If Compare Return return:yes Compare If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Call Raise Call Return return:yes Compare"
  },
  {
    "library": "django",
    "name": "parse_number",
    "source_code": "@classmethod\ndef parse_number(cls, name):\n    if (squashed_match := re.search('.*_squashed_(\\\\d+)', name)):\n        return int(squashed_match[1])\n    match = re.match('^\\\\d+', name)\n    if match:\n        return int(match[0])\n    return None",
    "docstring": "Given a migration name, try to extract a number from the beginning of it. For a squashed migration such as '0001_squashed_0004…', return the second number. If no number is found, return None.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\autodetector.py",
    "ast_data": "FunctionDef name:parse_number arg:cls arg:name arguments arg arg If Call Return return:yes Call Assign Call If Return return:yes Call Return return:no"
  },
  {
    "library": "matplotlib",
    "name": "_single_shot_timer",
    "source_code": "def _single_shot_timer(self, callback):\n\n    def callback_func(callback, timer):\n        callback()\n        self._timers.remove(timer)\n    timer = self.new_timer(interval=0)\n    timer.single_shot = True\n    timer.add_callback(callback_func, callback, timer)\n    self._timers.add(timer)\n    timer.start()",
    "docstring": "Add a single shot timer with the given callback",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_macosx.py",
    "ast_data": "FunctionDef name:_single_shot_timer arg:self arg:callback arguments arg arg FunctionDef name:callback_func arg:callback arg:timer arguments arg arg Call Call Assign Call Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_UnsortedSegmentMinOrMaxGrad",
    "source_code": "def _UnsortedSegmentMinOrMaxGrad(op: ops.Operation, grad):\n    gathered_outputs, zero_clipped_indices, is_positive = _GatherDropNegatives(op.outputs[0], op.inputs[1])\n    is_selected = math_ops.equal(op.inputs[0], gathered_outputs)\n    is_selected = math_ops.logical_and(is_selected, is_positive)\n    num_selected = math_ops.unsorted_segment_sum(math_ops.cast(is_selected, grad.dtype), op.inputs[1], op.inputs[2])\n    weighted_grads = math_ops.divide(grad, num_selected)\n    gathered_grads, _, _ = _GatherDropNegatives(weighted_grads, None, zero_clipped_indices, is_positive)\n    zeros = array_ops.zeros_like(gathered_grads)\n    return (array_ops.where_v2(is_selected, gathered_grads, zeros), None, None)",
    "docstring": "Gradient for UnsortedSegmentMin and UnsortedSegmentMax.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_UnsortedSegmentMinOrMaxGrad arg:op arg:grad arguments arg arg Assign Call Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "to",
    "source_code": "def to(self, *args: Any, **kwargs: Any) -> DISKFeatures:\n    return DISKFeatures(self.keypoints.to(*args, **kwargs), self.descriptors.to(*args, **kwargs), self.detection_scores.to(*args, **kwargs))",
    "docstring": "Call :func: on each tensor to move the keypoints, descriptors and detection scores to the specified device and/or data type. Args: *args: Arguments passed to :func:. **kwargs: Keyword arguments passed to :func:. Returns: A new DISKFeatures object with tensors of appropriate type and location.",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\disk\\structs.py",
    "ast_data": "FunctionDef name:to arg:self arguments arg arg arg Return return:yes Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "cov",
    "source_code": "def cov(self, m, n):\n    M, m, n, _, _, mncond = self._process_parameters(m, n)\n    if m.size != 0:\n        M = M[..., np.newaxis, np.newaxis]\n        n = n[..., np.newaxis, np.newaxis]\n    cond = (M == 0) & (M - 1 == 0)\n    M = np.ma.masked_array(M, mask=cond)\n    output = -n * (M - n) / (M - 1) * np.einsum('...i,...j->...ij', m, m) / M ** 2\n    if m.size != 0:\n        M, n = (M[..., 0, 0], n[..., 0, 0])\n        cond = cond[..., 0, 0]\n    dim = m.shape[-1]\n    for i in range(dim):\n        output[..., i, i] = n * (M - n) * m[..., i] * (M - m[..., i])\n        output[..., i, i] = output[..., i, i] / (M - 1)\n        output[..., i, i] = output[..., i, i] / M ** 2\n    if m.size != 0:\n        mncond = mncond[..., np.newaxis, np.newaxis] | np.zeros(output.shape, dtype=np.bool_)\n    return self._checkresult(output, mncond, np.nan)",
    "docstring": "Covariance matrix of the multivariate hypergeometric distribution. Parameters ---------- %(_doc_default_callparams)s Returns ------- cov : array_like The covariance matrix of the distribution",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:cov arg:self arg:m arg:n arguments arg arg arg Assign Call If Compare Assign Assign Assign Compare Compare Assign Call Assign Call If Compare Assign Assign Assign For Call Assign Assign Assign If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_RemoveExternalControlEdges",
    "source_code": "def _RemoveExternalControlEdges(self, op: ops.Operation) -> Tuple[List[ops.Operation], List[ops.Operation]]:\n    internal_control_inputs = []\n    external_control_inputs = []\n    for x in op.control_inputs:\n        is_internal_op = False\n        ctxt = x._get_control_flow_context()\n        while ctxt is not None:\n            if ctxt == self:\n                is_internal_op = True\n                break\n            ctxt = ctxt._outer_context\n        if is_internal_op:\n            internal_control_inputs.append(x)\n        else:\n            external_control_inputs.append(x)\n    op._remove_all_control_inputs()\n    op._add_control_inputs(internal_control_inputs)\n    return (internal_control_inputs, external_control_inputs)",
    "docstring": "Remove any external control dependency on this op.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_replication.py",
    "ast_data": "FunctionDef name:_RemoveExternalControlEdges arg:self arg:op arguments arg arg Assign Assign For Assign Assign Call While Compare If Compare Assign Assign If Call Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "float_factorial",
    "source_code": "def float_factorial(n: int) -> float:\n    return float(math.factorial(n)) if n < 171 else np.inf",
    "docstring": "Compute the factorial and return as a float Returns infinity when result is too large for a double",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_util.py",
    "ast_data": "FunctionDef name:float_factorial arg:n arguments arg Return return:yes Compare Call Call"
  },
  {
    "library": "pandas",
    "name": "_math_mode_with_dollar",
    "source_code": "def _math_mode_with_dollar(s: str) -> str:\n    s = s.replace('\\\\$', 'rt8§=§7wz')\n    pattern = re.compile('\\\\$.*?\\\\$')\n    pos = 0\n    ps = pattern.search(s, pos)\n    res = []\n    while ps:\n        res.append(_escape_latex(s[pos:ps.span()[0]]))\n        res.append(ps.group())\n        pos = ps.span()[1]\n        ps = pattern.search(s, pos)\n    res.append(_escape_latex(s[pos:len(s)]))\n    return ''.join(res).replace('rt8§=§7wz', '\\\\$')",
    "docstring": "All characters in LaTeX math mode are preserved. The substrings in LaTeX math mode, which start with the character ``, are preserved without escaping. Otherwise regular LaTeX escaping applies. Parameters ---------- s : str Input to be escaped Return ------ str : Escaped string",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\formats\\style_render.py",
    "ast_data": "FunctionDef name:_math_mode_with_dollar arg:s arguments arg Assign Call Assign Call Assign Assign Call Assign While Call Call Call Call Call Assign Call Assign Call Call Call Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "FieldDoesNotExist",
    "source_code": "class FieldDoesNotExist(Exception):\n    pass",
    "docstring": "The requested model field does not exist",
    "type": "class",
    "file_path": "django\\django\\core\\exceptions.py",
    "ast_data": "ClassDef name:FieldDoesNotExist"
  },
  {
    "library": "numpy",
    "name": "_hist_bin_stone",
    "source_code": "def _hist_bin_stone(x, range):\n    n = x.size\n    ptp_x = _ptp(x)\n    if n <= 1 or ptp_x == 0:\n        return 0\n\n    def jhat(nbins):\n        hh = ptp_x / nbins\n        p_k = np.histogram(x, bins=nbins, range=range)[0] / n\n        return (2 - (n + 1) * p_k.dot(p_k)) / hh\n    nbins_upper_bound = max(100, int(np.sqrt(n)))\n    nbins = min(_range(1, nbins_upper_bound + 1), key=jhat)\n    if nbins == nbins_upper_bound:\n        warnings.warn('The number of bins estimated may be suboptimal.', RuntimeWarning, stacklevel=3)\n    return ptp_x / nbins",
    "docstring": "Histogram bin estimator based on minimizing the estimated integrated squared error (ISE). The number of bins is chosen by minimizing the estimated ISE against the unknown true distribution. The ISE is estimated using cross-validation and can be regarded as a generalization of Scott's rule. This paper by Stone appears to be the origination of this rule. Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. range : (float, float) The lower and upper range of the bins. Returns ------- h : An estimate of the optimal bin width for the given data.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_histograms_impl.py",
    "ast_data": "FunctionDef name:_hist_bin_stone arg:x arg:range arguments arg arg Assign Assign Call If BoolOp Compare Compare Return return:yes FunctionDef name:jhat arg:nbins arguments arg Assign Assign Call Return return:yes Call Assign Call Call Call Assign Call Call If Compare Call Return return:yes"
  },
  {
    "library": "django",
    "name": "savepoint_rollback_sql",
    "source_code": "def savepoint_rollback_sql(self, sid):\n    return 'ROLLBACK TO SAVEPOINT %s' % self.quote_name(sid)",
    "docstring": "Return the SQL for rolling back the given savepoint.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:savepoint_rollback_sql arg:self arg:sid arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "from_tensor",
    "source_code": "@classmethod\ndef from_tensor(cls, tensor):\n    if isinstance(tensor, tensor_lib.Tensor):\n        name = getattr(tensor, 'name', None)\n        type_spec = type_spec_module.type_spec_from_value(tensor)\n        inferred_value = None\n        if type_spec.dtype == dtypes.int32 and type_spec.shape.rank is not None and (type_spec.shape.rank < 2):\n            inferred_value = array_ops.ones(shape=tensor).shape\n            if inferred_value.dims:\n                inferred_value = inferred_value.as_list()\n                if len(inferred_value) > _MAX_TENSOR_RANK:\n                    inferred_value = None\n            else:\n                inferred_value = None\n        return KerasTensor(type_spec, inferred_value=inferred_value, name=name)\n    else:\n        name = getattr(tensor, 'name', None)\n        type_spec = type_spec_module.type_spec_from_value(tensor)\n        return cls(type_spec, name=name)",
    "docstring": "Convert a traced (composite)tensor to a representative KerasTensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\keras_tensor.py",
    "ast_data": "FunctionDef name:from_tensor arg:cls arg:tensor arguments arg arg If Call Assign Call Assign Call Assign If BoolOp Compare Compare Compare Assign Call If Assign Call If Compare Call Assign Assign Return return:yes Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_user_permissions",
    "source_code": "def get_user_permissions(self, obj=None):\n    return _user_get_permissions(self, obj, 'user')",
    "docstring": "Return a list of permission strings that this user has directly. Query all available auth backends. If an object is passed in, return only permissions matching this object.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\models.py",
    "ast_data": "FunctionDef name:get_user_permissions arg:self arg:obj arguments arg arg Return return:yes Call"
  },
  {
    "library": "virtualenv",
    "name": "set_pyenv_cfg",
    "source_code": "def set_pyenv_cfg(self):\n    super().set_pyenv_cfg()\n    self.pyenv_cfg['base-prefix'] = self.interpreter.system_prefix\n    self.pyenv_cfg['base-exec-prefix'] = self.interpreter.system_exec_prefix\n    self.pyenv_cfg['base-executable'] = self.interpreter.system_executable",
    "docstring": "We directly inject the base prefix and base exec prefix to avoid site.py needing to discover these from home (which usually is done within the interpreter itself).",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\create\\via_global_ref\\builtin\\via_global_self_do.py",
    "ast_data": "FunctionDef name:set_pyenv_cfg arg:self arguments arg Call Call Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "scatter_nd_min",
    "source_code": "def scatter_nd_min(self, indices, updates, name=None):\n    return gen_state_ops.scatter_nd_min(self._variable, indices, updates, use_locking=True, name=name)",
    "docstring": "Updates this variable with the min of and itself. is a with rank and is a of rank . must be integer tensor, containing indices into . It must be shape where . The innermost dimension of (with length ) corresponds to indices into elements (if ) or slices (if ) along the th dimension of . is of rank with shape: See for more details about how to make updates to slices. Args: indices: The indices to be used in the operation. updates: The values to be used in the operation. name: the name of the operation. Returns: A that will hold the new value of this variable after the scattered addition has completed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py",
    "ast_data": "FunctionDef name:scatter_nd_min arg:self arg:indices arg:updates arg:name arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X):\n    check_is_fitted(self)\n    X = self._check_test_data(X)\n    sample_weight = np.ones(X.shape[0], dtype=X.dtype)\n    labels = _labels_inertia_threadpool_limit(X, sample_weight, self.cluster_centers_, n_threads=self._n_threads, return_inertia=False)\n    return labels",
    "docstring": "Predict the closest cluster each sample in X belongs to. In the vector quantization literature, is called the code book and each value returned by is the index of the closest code in the code book. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) New data to predict. Returns ------- labels : ndarray of shape (n_samples,) Index of the cluster each sample belongs to.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_kmeans.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "foldl",
    "source_code": "@doc_controls.do_not_generate_docs\ndef foldl(fn, elems, initializer=None, name=None):\n    return functional_ops.foldl(fn, elems, initializer=initializer, name=name)",
    "docstring": "Reduce elems using fn to combine them from left to right. Args: fn: Callable that will be called upon each element in elems and an accumulator, for instance elems: tensor initializer: The first value used ( in case of None) name: A string name for the foldl node in the graph Returns: Tensor with same type and shape as .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:foldl arg:fn arg:elems arg:initializer arg:name arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_get_tex_source",
    "source_code": "@classmethod\ndef _get_tex_source(cls, tex, fontsize):\n    font_preamble, fontcmd = cls._get_font_preamble_and_command()\n    baselineskip = 1.25 * fontsize\n    return '\\n'.join(['\\\\documentclass{article}', '% Pass-through \\\\mathdefault, which is used in non-usetex mode', '% to use the default text font but was historically suppressed', '% in usetex mode.', '\\\\newcommand{\\\\mathdefault}[1]{#1}', font_preamble, '\\\\usepackage[utf8]{inputenc}', '\\\\DeclareUnicodeCharacter{2212}{\\\\ensuremath{-}}', '% geometry is loaded before the custom preamble as ', '% convert_psfrags relies on a custom preamble to change the ', '% geometry.', '\\\\usepackage[papersize=72in, margin=1in]{geometry}', cls.get_custom_preamble(), '% Use `underscore` package to take care of underscores in text.', '% The [strings] option allows to use underscores in file names.', _usepackage_if_not_loaded('underscore', option='strings'), '% Custom packages (e.g. newtxtext) may already have loaded ', '% textcomp with different options.', _usepackage_if_not_loaded('textcomp'), '\\\\pagestyle{empty}', '\\\\begin{document}', '% The empty hbox ensures that a page is printed even for empty', '% inputs, except when using psfrag which gets confused by it.', '% matplotlibbaselinemarker is used by dviread to detect the', \"% last line's baseline.\", f'\\\\fontsize{{{fontsize}}}{{{baselineskip}}}%', '\\\\ifdefined\\\\psfrag\\\\else\\\\hbox{}\\\\fi%', f'{{{fontcmd} {tex}}}%', '\\\\end{document}'])",
    "docstring": "Return the complete TeX source for processing a TeX string.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\texmanager.py",
    "ast_data": "FunctionDef name:_get_tex_source arg:cls arg:tex arg:fontsize arguments arg arg arg Assign Call Assign Return return:yes Call Call Call Call"
  },
  {
    "library": "authlib",
    "name": "validate_jwks_uri",
    "source_code": "def validate_jwks_uri(self):\n    self._validate_uri('jwks_uri')",
    "docstring": "URL string referencing the client's JSON Web Key (JWK) Set [RFC7517] document, which contains the client's public keys. The value of this field MUST point to a valid JWK Set document. These keys can be used by higher-level protocols that use signing or encryption. For instance, these keys might be used by some applications for validating signed requests made to the token endpoint when using JWTs for client authentication [RFC7523]. Use of this parameter is preferred over the \"jwks\" parameter, as it allows for easier key rotation. The \"jwks_uri\" and \"jwks\" parameters MUST NOT both be present in the same request or response.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc7591\\claims.py",
    "ast_data": "FunctionDef name:validate_jwks_uri arg:self arguments arg Call"
  },
  {
    "library": "scrapy",
    "name": "ScrapyHelpFormatter",
    "source_code": "class ScrapyHelpFormatter(argparse.HelpFormatter):\n\n    def __init__(self, prog: str, indent_increment: int=2, max_help_position: int=24, width: int | None=None):\n        super().__init__(prog, indent_increment=indent_increment, max_help_position=max_help_position, width=width)\n\n    def _join_parts(self, part_strings: Iterable[str]) -> str:\n        parts = self.format_part_strings(builtins.list(part_strings))\n        return super()._join_parts(parts)\n\n    def format_part_strings(self, part_strings: list[str]) -> list[str]:\n        if part_strings and part_strings[0].startswith('usage: '):\n            part_strings[0] = 'Usage\\n=====\\n  ' + part_strings[0][len('usage: '):]\n        headings = [i for i in range(len(part_strings)) if part_strings[i].endswith(':\\n')]\n        for index in headings[::-1]:\n            char = '-' if 'Global Options' in part_strings[index] else '='\n            part_strings[index] = part_strings[index][:-2].title()\n            underline = ''.join(['\\n', char * len(part_strings[index]), '\\n'])\n            part_strings.insert(index + 1, underline)\n        return part_strings",
    "docstring": "Help Formatter for scrapy command line help messages.",
    "type": "class",
    "file_path": "scrapy\\scrapy\\commands\\__init__.py",
    "ast_data": "ClassDef name:ScrapyHelpFormatter FunctionDef name:__init__ arg:self arg:prog arg:indent_increment arg:max_help_position arg:width arguments arg arg arg arg arg Call Call FunctionDef name:_join_parts arg:self arg:part_strings arguments arg arg Assign Call Call Return return:yes Call Call FunctionDef name:format_part_strings arg:self arg:part_strings arguments arg arg If BoolOp Call Assign Call Assign Call Call Call For Assign Compare Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_check_index",
    "source_code": "def _check_index(cond, message=None):\n    _check_with(IndexError, cond, message)",
    "docstring": "Throws error containing an optional message if the specified condition is False. Error type: `bool`",
    "type": "function",
    "file_path": "pytorch\\torch\\__init__.py",
    "ast_data": "FunctionDef name:_check_index arg:cond arg:message arguments arg arg Call"
  },
  {
    "library": "authlib",
    "name": "acquire_token",
    "source_code": "def acquire_token(self, scopes=None, **kwargs):\n    request = FlaskJsonRequest(_req)\n    kwargs['scopes'] = scopes\n    for claim in kwargs:\n        if isinstance(kwargs[claim], str):\n            kwargs[claim] = [kwargs[claim]]\n    token = self.validate_request(request=request, **kwargs)\n    token_authenticated.send(self, token=token)\n    g.authlib_server_oauth2_token = token\n    return token",
    "docstring": "A method to acquire current valid token with the given scope. :param scopes: a list of scope values :return: token object",
    "type": "method",
    "file_path": "authlib\\authlib\\integrations\\flask_oauth2\\resource_protector.py",
    "ast_data": "FunctionDef name:acquire_token arg:self arg:scopes arguments arg arg arg Assign Call Assign For If Call Assign Assign Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "BackwardSignature",
    "source_code": "@dataclass\nclass BackwardSignature:\n    gradients_to_parameters: dict[str, str]\n    gradients_to_user_inputs: dict[str, str]\n    loss_output: str",
    "docstring": "Provides information about the backward section of an exported joint forward-backward graph. For a particular fx GraphModule, this class contains information on: (1) A mapping from each gradient (backwards output) to the parameter it corresponds to (forward input) (2) A mapping from each gradient (backwards output) to the user input it corresponds to (forward input) (3) Which of the forward outputs corresponds to the loss, that we backprop on. Each string name is the of the corresponding node in the fx graph.",
    "type": "class",
    "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\schemas.py",
    "ast_data": "ClassDef name:BackwardSignature"
  },
  {
    "library": "tensorflow",
    "name": "_TypeBasedDispatcher",
    "source_code": "class _TypeBasedDispatcher(OpDispatcher):\n\n    def __init__(self, override_func, types):\n        self._types = types\n        self._override_func = override_func\n\n    def _handles(self, args, kwargs):\n        for arg in itertools.chain(args, kwargs.values()):\n            if isinstance(arg, self._types) or (isinstance(arg, (list, tuple)) and any((isinstance(elt, self._types) for elt in arg))):\n                return True\n        return False\n\n    def handle(self, args, kwargs):\n        if self._handles(args, kwargs):\n            return self._override_func(*args, **kwargs)\n        else:\n            return self.NOT_SUPPORTED",
    "docstring": "Dispatcher that handles op if any arguments have a specified type. Checks the types of the arguments and keyword arguments (including elements of lists or tuples), and if any argument values have the indicated type(s), then delegates to an override function.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\util\\dispatch.py",
    "ast_data": "ClassDef name:_TypeBasedDispatcher FunctionDef name:__init__ arg:self arg:override_func arg:types arguments arg arg arg Assign Assign FunctionDef name:_handles arg:self arg:args arg:kwargs arguments arg arg arg For Call Call If BoolOp Call BoolOp Call Call Call Return return:yes Return return:yes FunctionDef name:handle arg:self arg:args arg:kwargs arguments arg arg arg If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_verticalalignment",
    "source_code": "def set_verticalalignment(self, align):\n    _api.check_in_list(['top', 'bottom', 'center', 'baseline', 'center_baseline'], align=align)\n    self._verticalalignment = align\n    self.stale = True",
    "docstring": "Set the vertical alignment relative to the anchor point. See also :doc:. Parameters ---------- align : {'baseline', 'bottom', 'center', 'center_baseline', 'top'}",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:set_verticalalignment arg:self arg:align arguments arg arg Call Assign Assign"
  },
  {
    "library": "django",
    "name": "get_key_func",
    "source_code": "def get_key_func(key_func):\n    if key_func is not None:\n        if callable(key_func):\n            return key_func\n        else:\n            return import_string(key_func)\n    return default_key_func",
    "docstring": "Function to decide which key function to use. Default to ``.",
    "type": "function",
    "file_path": "django\\django\\core\\cache\\backends\\base.py",
    "ast_data": "FunctionDef name:get_key_func arg:key_func arguments arg If Compare If Call Return return:yes Return return:yes Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_evaluate_derivatives",
    "source_code": "def _evaluate_derivatives(self, x, der=None):\n    raise NotImplementedError()",
    "docstring": "Actually evaluate the derivatives. Parameters ---------- x : array_like 1D array of points at which to evaluate the derivatives der : integer, optional The number of derivatives to evaluate, from 'order 0' (der=1) to order der-1. If omitted, return all possibly-non-zero derivatives, ie 0 to order n-1. Returns ------- d : ndarray Array of shape `` containing the derivatives from 0 to der-1",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_polyint.py",
    "ast_data": "FunctionDef name:_evaluate_derivatives arg:self arg:x arg:der arguments arg arg arg Raise Call"
  },
  {
    "library": "django",
    "name": "fields",
    "source_code": "@property\ndef fields(self):\n    return [force_str(capi.get_field_name(capi.get_field_defn(self._ldefn, i)), self._ds.encoding, strings_only=True) for i in range(self.num_fields)]",
    "docstring": "Return a list of string names corresponding to each of the Fields available in this Layer.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\layer.py",
    "ast_data": "FunctionDef name:fields arg:self arguments arg Return return:yes Call Call Call Call"
  },
  {
    "library": "django",
    "name": "get_created_time",
    "source_code": "def get_created_time(self, name):\n    raise NotImplementedError('subclasses of Storage must provide a get_created_time() method')",
    "docstring": "Return the creation time (as a datetime) of the file specified by name. The datetime will be timezone-aware if USE_TZ=True.",
    "type": "method",
    "file_path": "django\\django\\core\\files\\storage\\base.py",
    "ast_data": "FunctionDef name:get_created_time arg:self arg:name arguments arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "pg_names",
    "source_code": "@property\ndef pg_names(self) -> dict[ProcessGroup, str]:\n    global _pg_names\n    return _pg_names",
    "docstring": "Process group's names, map from ProcessGroup to str. TODO don't expose the map, expose fine grained ops",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:pg_names arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "concatenate",
    "source_code": "def concatenate(self, other):\n    other = as_shape(other)\n    if self.dims is None or other.dims is None:\n        return unknown_shape()\n    else:\n        return TensorShape(self.dims + other.dims)",
    "docstring": "Returns the concatenation of the dimension in and . *N.B.* If either or is completely unknown, concatenation will discard information about the other shape. In future, we might support concatenation that preserves this information for use with slicing. Args: other: Another . Returns: A whose dimensions are the concatenation of the dimensions in and .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:concatenate arg:self arg:other arguments arg arg Assign Call If BoolOp Compare Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "recreate_saveable_objects",
    "source_code": "def recreate_saveable_objects(saveable_fn_by_name, temp_session):\n    names_and_slices = []\n    with ops.init_scope():\n        for save_fn, _ in saveable_fn_by_name.values():\n            for tensor_info in save_fn(''):\n                name = tensor_info['name']\n                slice_spec = tensor_info['slice_spec']\n                if not context.executing_eagerly():\n                    sess = ops.get_default_session()\n                    if sess is None:\n                        if temp_session[0] is not None:\n                            sess = temp_session[0]\n                        else:\n                            sess = temp_session[0] = session.Session()\n                    name, slice_spec = sess.run([name, slice_spec])\n                names_and_slices.append((_convert_to_string(name), _convert_to_string(slice_spec)))\n    saveable_factories = {}\n    for name, (save_fn, restore_fn) in saveable_fn_by_name.items():\n        saveable_factories[name] = functools.partial(RestoredSaveableObject, names_and_slices=names_and_slices, save_function=save_fn, restore_function=restore_fn)\n    return saveable_factories",
    "docstring": "Returns a dict of SaveableObject factories generated from loaded fns.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\saving\\saveable_object_util.py",
    "ast_data": "FunctionDef name:recreate_saveable_objects arg:saveable_fn_by_name arg:temp_session arguments arg arg Assign With Call For Call For Call Assign Assign If Call Assign Call If Compare If Compare Assign Assign Call Assign Call Call Call Call Assign For Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "set_shape_and_handle_data_for_outputs",
    "source_code": "def set_shape_and_handle_data_for_outputs(_) -> None:\n    pass",
    "docstring": "No op. TODO(b/74620627): Remove this.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:set_shape_and_handle_data_for_outputs arg:_ arguments arg"
  },
  {
    "library": "kornia",
    "name": "RgbToGrayscale",
    "source_code": "class RgbToGrayscale(Module):\n    ONNX_DEFAULT_INPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n    ONNX_DEFAULT_OUTPUTSHAPE: ClassVar[list[int]] = [-1, 1, -1, -1]\n\n    def __init__(self, rgb_weights: Optional[Tensor]=None) -> None:\n        super().__init__()\n        if rgb_weights is None:\n            rgb_weights = Tensor([0.299, 0.587, 0.114])\n        self.rgb_weights = rgb_weights\n\n    def forward(self, image: Tensor) -> Tensor:\n        return rgb_to_grayscale(image, rgb_weights=self.rgb_weights)",
    "docstring": "Module to convert a RGB image to grayscale version of image. The image data is assumed to be in the range of (0, 1). Shape: - image: :math: - output: :math: reference: Example: >>> input = torch.rand(2, 3, 4, 5) >>> gray = RgbToGrayscale() >>> output = gray(input) # 2x1x4x5",
    "type": "class",
    "file_path": "kornia\\kornia\\color\\gray.py",
    "ast_data": "ClassDef name:RgbToGrayscale FunctionDef name:__init__ arg:self arg:rgb_weights arguments arg arg Call Call If Compare Assign Call Assign FunctionDef name:forward arg:self arg:image arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "apply_grad",
    "source_code": "def apply_grad(self, grad, local_step=0, name=None):\n    grad = ops.convert_to_tensor(grad, self._dtype)\n    grad.get_shape().assert_is_compatible_with(self._shape)\n    local_step = math_ops.cast(ops.convert_to_tensor(local_step), _dtypes.int64)\n    return gen_data_flow_ops.resource_accumulator_apply_gradient(self._accumulator_ref, local_step=local_step, gradient=grad, name=name)",
    "docstring": "Attempts to apply a gradient to the accumulator. The attempt is silently dropped if the gradient is stale, i.e., local_step is less than the accumulator's global time step. Args: grad: The gradient tensor to be applied. local_step: Time step at which the gradient was computed. name: Optional name for the operation. Returns: The operation that (conditionally) applies a gradient to the accumulator. Raises: ValueError: If grad is of the wrong shape",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:apply_grad arg:self arg:grad arg:local_step arg:name arguments arg arg arg arg Assign Call Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "normalize_to_dense",
    "source_code": "def normalize_to_dense(dataset: Dataset):\n    if structured_function._should_unpack(dataset.element_spec):\n\n        def normalize(*args):\n            return structure.to_batched_tensor_list(dataset.element_spec, tuple(args))\n    else:\n\n        def normalize(arg):\n            return structure.to_batched_tensor_list(dataset.element_spec, arg)\n    normalized_dataset = dataset.map(normalize)\n    return _RestructuredDataset(normalized_dataset, dataset.element_spec)",
    "docstring": "Normalizes non-tensor components in a dataset to dense representations. This is necessary for dataset transformations that slice along the batch dimension and are oblivious to non-tensors, e.g. , . Args: dataset: Dataset to normalize. Returns: A dataset whose sparse and ragged tensors have been normalized to their dense representations.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:normalize_to_dense arg:dataset arguments arg If Call FunctionDef name:normalize arguments arg Return return:yes Call Call FunctionDef name:normalize arg:arg arguments arg Return return:yes Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "name",
    "source_code": "@property\ndef name(self):\n    return self._name",
    "docstring": "Returns the name of this policy.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\policy.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "Zacharov",
    "source_code": "class Zacharov(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-5.0] * self.N, [10.0] * self.N))\n        self.custom_bounds = ([-1, 1], [-1, 1])\n        self.global_optimum = [[0 for _ in range(self.N)]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        u = sum(x ** 2)\n        v = sum(arange(1, self.N + 1) * x)\n        return u + (0.5 * v) ** 2 + (0.5 * v) ** 4",
    "docstring": "Zacharov objective function. This class defines the Zacharov [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Zacharov}}(x) = \\sum_{i=1}^{n} x_i^2 + \\left ( \\frac{1}{2} \\sum_{i=1}^{n} i x_i \\right )^2 + \\left ( \\frac{1}{2} \\sum_{i=1}^{n} i x_i \\right )^4 Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_Z.py",
    "ast_data": "ClassDef name:Zacharov Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "validate_revocation_endpoint_auth_signing_alg_values_supported",
    "source_code": "def validate_revocation_endpoint_auth_signing_alg_values_supported(self):\n    _validate_alg_values(self, 'revocation_endpoint_auth_signing_alg_values_supported', self.revocation_endpoint_auth_methods_supported)",
    "docstring": "OPTIONAL. JSON array containing a list of the JWS signing algorithms (\"alg\" values) supported by the revocation endpoint for the signature on the JWT [JWT] used to authenticate the client at the revocation endpoint for the \"private_key_jwt\" and \"client_secret_jwt\" authentication methods. This metadata entry MUST be present if either of these authentication methods are specified in the \"revocation_endpoint_auth_methods_supported\" entry. No default algorithms are implied if this entry is omitted. The value \"none\" MUST NOT be used.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc8414\\models.py",
    "ast_data": "FunctionDef name:validate_revocation_endpoint_auth_signing_alg_values_supported arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_print",
    "source_code": "def _print(self, *args):\n\n    def _format(name, arr):\n        title = '### All Compatibility %s ###' % str(name)\n        tlen = len(title)\n        print('-' * tlen)\n        print(title)\n        print('-' * tlen)\n        print(' Total # of %s: %s\\n' % (str(name), str(len(arr))))\n        if arr:\n            for item in arr:\n                detail = ''\n                if isinstance(item[1], list):\n                    for itm in item[1]:\n                        detail += str(itm) + ', '\n                    detail = detail[:-2]\n                else:\n                    detail = str(item[1])\n                print(\"  %s ('%s')\\n\" % (str(item[0]), detail))\n        else:\n            print('  No %s' % name)\n        print('\\n')\n    for p_item in args:\n        if p_item == 'failures':\n            _format('Failures', self.failures)\n        elif p_item == 'successes':\n            _format('Successes', self.successes)\n        elif p_item == 'failure_msgs':\n            _format('Failure Messages', self.error_msg)\n        elif p_item == 'warning_msgs':\n            _format('Warning Messages', self.warning_msg)\n        else:\n            raise Exception('[Error] Wrong input provided for %s.' % _get_func_name())",
    "docstring": "Prints compatibility check status and failure or warning messages. Prints to console without using . Args: *args: String(s) that is one of: [, # all failures , # all successes , # failure message(s) recorded upon failure(s) ] # warning message(s) recorded upon warning(s) Raises: Exception: If *args not in: [, , , ]",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\tools\\tensorflow_builder\\compat_checker\\compat_checker.py",
    "ast_data": "FunctionDef name:_print arg:self arguments arg arg FunctionDef name:_format arg:name arg:arr arguments arg arg Assign Call Assign Call Call Call Call Call Call Call Call If For Assign If Call For Call Assign Assign Call Call Call Call Call For If Compare Call If Compare Call If Compare Call If Compare Call Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "clear_cublas_manager",
    "source_code": "@contextlib.contextmanager\ndef clear_cublas_manager() -> Generator[None, None, None]:\n    clear_cublass_cache()\n    try:\n        yield\n    finally:\n        clear_cublass_cache()",
    "docstring": "Context manager around clearing cublas caches that will clear on enter and exit",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py",
    "ast_data": "FunctionDef name:clear_cublas_manager arguments Call Try Call"
  },
  {
    "library": "tensorflow",
    "name": "_is_acceptable_input_type",
    "source_code": "def _is_acceptable_input_type(x):\n    supported_composite_types = (indexed_slices.IndexedSlices, weak_tensor.WeakTensor, variables.Variable)\n    return isinstance(x, supported_composite_types) or not isinstance(x, composite_tensor.CompositeTensor)",
    "docstring": "Determines if x is an acceptable input type for auto dtype conversion semantics.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\flexible_dtypes.py",
    "ast_data": "FunctionDef name:_is_acceptable_input_type arg:x arguments arg Assign Return return:yes BoolOp Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_fit_multiclass",
    "source_code": "def _fit_multiclass(self, X, y, alpha, C, learning_rate, sample_weight, max_iter):\n    validation_mask = self._make_validation_split(y, sample_mask=sample_weight > 0)\n    random_state = check_random_state(self.random_state)\n    seeds = random_state.randint(MAX_INT, size=len(self.classes_))\n    result = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, require='sharedmem')((delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate, max_iter, self._expanded_class_weight[i], 1.0, sample_weight, validation_mask=validation_mask, random_state=seed) for i, seed in enumerate(seeds)))\n    n_iter_ = 0.0\n    for i, (_, intercept, n_iter_i) in enumerate(result):\n        self.intercept_[i] = intercept\n        n_iter_ = max(n_iter_, n_iter_i)\n    self.t_ += n_iter_ * X.shape[0]\n    self.n_iter_ = n_iter_\n    if self.average > 0:\n        if self.average <= self.t_ - 1.0:\n            self.coef_ = self._average_coef\n            self.intercept_ = self._average_intercept\n        else:\n            self.coef_ = self._standard_coef\n            self._standard_intercept = np.atleast_1d(self.intercept_)\n            self.intercept_ = self._standard_intercept",
    "docstring": "Fit a multi-class classifier by combining binary classifiers Each binary classifier predicts one class versus all others. This strategy is called OvA (One versus All) or OvR (One versus Rest).",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_stochastic_gradient.py",
    "ast_data": "FunctionDef name:_fit_multiclass arg:self arg:X arg:y arg:alpha arg:C arg:learning_rate arg:sample_weight arg:max_iter arguments arg arg arg arg arg arg arg arg Assign Call Compare Assign Call Assign Call Call Assign Call Call Call Call Call Assign For Call Assign Assign Call Assign If Compare If Compare Assign Assign Assign Assign Call Assign"
  },
  {
    "library": "pytorch",
    "name": "state_dict",
    "source_code": "def state_dict(self) -> dict[str, Any]:\n    self._check_overlap_initialized()\n    if len(self._all_state_dicts) == 0:\n        raise RuntimeError(f'Optimizer state has not been consolidated on this rank. Please call `consolidate_state_dict(to={self.rank})` on all ranks beforehand if you meant to save the global state.')\n    state_dict = super().state_dict()\n    for rank, local_state_dict in enumerate(self._all_state_dicts):\n        local_param_groups = local_state_dict['param_groups']\n        global_param_groups = self._partition_parameters()[rank]\n        assert len(local_param_groups) == len(global_param_groups), 'Mismatch between number of local and global parameter groups'\n        for local_param_group, global_param_group in zip(local_param_groups, global_param_groups):\n            local_param_indices = local_param_group['params']\n            global_params = global_param_group['params']\n            assert len(local_param_indices) == len(global_params), 'Mismatch between number of local and global parameters in parameter group'\n            for local_param_index, global_param in zip(local_param_indices, global_params):\n                if local_param_index in local_state_dict['state']:\n                    global_param_index = self._param_to_index[global_param]\n                    state_dict['state'][global_param_index] = local_state_dict['state'][local_param_index]\n    state_dict['state'] = dict(sorted(state_dict['state'].items()))\n    return state_dict",
    "docstring": "Return the last global optimizer state known to this rank. .. warning: If the state has not been consolidated to this rank, this raises a runtime error, and even if it has, the state may not be up-to-date, depending on when :meth: was last called. Raises: RuntimeError: if `ZeroRedundancyOptimizerDistributedDataParallelconsolidate_state_dict`.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\optim\\zero_redundancy_optimizer.py",
    "ast_data": "FunctionDef name:state_dict arg:self arguments arg Call If Compare Call Raise Call Assign Call Call For Call Assign Assign Call Compare Call Call For Call Assign Assign Compare Call Call For Call If Compare Assign Assign Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_rename_param_and_buffer",
    "source_code": "def _rename_param_and_buffer(self, nodes: Sequence[torch.fx.Node], new_name: str) -> None:\n    assert len(nodes) > 0, '`nodes` cannot be empty'\n    assert len({node.target for node in nodes}) == 1, '`nodes` must all have same `target`'\n    old_name = nodes[0].target\n    assert isinstance(old_name, str), f'Expected str, got type({old_name})'\n    normalized_name = new_name.replace('.', '/')\n    attr_value = getattr(self.module, old_name)\n    setattr(self.module, normalized_name, attr_value)\n    delattr(self.module, old_name)\n    for node in nodes:\n        with self.module.graph.inserting_before(node):\n            new_node = self.module.graph.get_attr(normalized_name)\n            new_node.meta = node.meta\n            node.replace_all_uses_with(new_node)\n            self.module.graph.erase_node(node)\n    logger.info(\"Renamed 'self.%s' to 'self.%s', normalized from original parameter name '%s'.\", old_name, normalized_name, new_name)",
    "docstring": "Rename the parameter/buffer and replace corresponding nodes with new nodes of updated target.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\readability.py",
    "ast_data": "FunctionDef name:_rename_param_and_buffer arg:self arg:nodes arg:new_name arguments arg arg arg Compare Call Compare Call Assign Call Assign Call Assign Call Call Call For With Call Assign Call Assign Call Call Call"
  },
  {
    "library": "numpy",
    "name": "LapackILP64NotFoundError",
    "source_code": "class LapackILP64NotFoundError(NotFoundError):\n    pass",
    "docstring": "64-bit Lapack libraries not found. Known libraries in numpy/distutils/site.cfg file are: openblas64_, openblas_ilp64",
    "type": "class",
    "file_path": "numpy\\numpy\\distutils\\system_info.py",
    "ast_data": "ClassDef name:LapackILP64NotFoundError"
  },
  {
    "library": "pandas",
    "name": "_has_no_reference",
    "source_code": "def _has_no_reference(self, i: int=0) -> bool:\n    return not self.blocks[0].refs.has_reference()",
    "docstring": "Check for column if it has references. (whether it references another array or is itself being referenced) Returns True if the column has no references.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:_has_no_reference arg:self arg:i arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_sorted_batch_p2p",
    "source_code": "def _sorted_batch_p2p(p2p_ops: list[dist.P2POp], desc: Optional[str]=None) -> dict[int, list[dist.Work]]:\n    ops_by_peer: dict[int, list[dist.P2POp]] = defaultdict(list)\n    work_by_peer: dict[int, list[dist.Work]] = {}\n    if len(p2p_ops) == 0:\n        return work_by_peer\n    for op in p2p_ops:\n        ops_by_peer[op.peer].append(op)\n    for peer, ops in sorted(ops_by_peer.items()):\n        work_by_peer[peer] = _batch_p2p(ops, desc=desc)\n    return work_by_peer",
    "docstring": "Sorts the list of P2P ops by the peer rank, and then calls batch_isend_irecv. Return a dictionary of works by peer rank. This function helps us avoid hangs in case of skip connections.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\schedules.py",
    "ast_data": "FunctionDef name:_sorted_batch_p2p arg:p2p_ops arg:desc arguments arg arg Call If Compare Call Return return:yes For Call For Call Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "to",
    "source_code": "def to(self, *args: Any, **kwargs: Any) -> '_BasicAugmentationBase':\n    device, dtype, _, _ = torch._C._nn._parse_to(*args, **kwargs)\n    self.set_rng_device_and_dtype(device, dtype)\n    return super().to(*args, **kwargs)",
    "docstring": "Set the device and dtype for the random number generator.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\base.py",
    "ast_data": "FunctionDef name:to arg:self arguments arg arg arg Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "unweighted_metrics",
    "source_code": "@property\ndef unweighted_metrics(self):\n    if not self._built:\n        return None\n    return nest.flatten(self._metrics)",
    "docstring": "Metrics in this container that should not be passed .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\compile_utils.py",
    "ast_data": "FunctionDef name:unweighted_metrics arg:self arguments arg If Return return:no Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_MapDataset",
    "source_code": "class _MapDataset(dataset_ops.UnaryDataset):\n\n    def __init__(self, input_dataset, map_func, force_synchronous=False, use_inter_op_parallelism=True, preserve_cardinality=True, use_legacy_function=False, name=None):\n        self._input_dataset = input_dataset\n        self._use_inter_op_parallelism = use_inter_op_parallelism\n        self._preserve_cardinality = preserve_cardinality\n        self._map_func = structured_function.StructuredFunctionWrapper(map_func, self._transformation_name(), dataset=input_dataset, use_legacy_function=use_legacy_function)\n        self._force_synchronous = force_synchronous\n        self._name = name\n        variant_tensor = gen_dataset_ops.map_dataset(input_dataset._variant_tensor, self._map_func.function.captured_inputs, f=self._map_func.function, use_inter_op_parallelism=self._use_inter_op_parallelism, preserve_cardinality=self._preserve_cardinality, force_synchronous=self._force_synchronous, **self._common_args)\n        super().__init__(input_dataset, variant_tensor)\n\n    def _functions(self):\n        return [self._map_func]\n\n    @property\n    def element_spec(self):\n        return self._map_func.output_structure\n\n    def _transformation_name(self):\n        return 'Dataset.map()'",
    "docstring": "A that maps a function over elements in its input.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\map_op.py",
    "ast_data": "ClassDef name:_MapDataset FunctionDef name:__init__ arg:self arg:input_dataset arg:map_func arg:force_synchronous arg:use_inter_op_parallelism arg:preserve_cardinality arg:use_legacy_function arg:name arguments arg arg arg arg arg arg arg arg Assign Assign Assign Assign Call Call Assign Assign Assign Call Call Call FunctionDef name:_functions arg:self arguments arg Return return:yes FunctionDef name:element_spec arg:self arguments arg Return return:yes FunctionDef name:_transformation_name arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__repr__",
    "source_code": "def __repr__(self) -> str:\n    return f'Partial({self.reduce_op})'",
    "docstring": "machine readable representation of the Partial placement",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\placement_types.py",
    "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "__len__",
    "source_code": "def __len__(self) -> int:\n    return len(self.dqs) + len(self.mqs) if self.dqs is not None else len(self.mqs)",
    "docstring": "Return the total amount of enqueued requests",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\scheduler.py",
    "ast_data": "FunctionDef name:__len__ arg:self arguments arg Return return:yes Compare Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_tag_zeros_tensor",
    "source_code": "def _tag_zeros_tensor(fun):\n\n    def wrapped(*args, **kwargs):\n        tensor = fun(*args, **kwargs)\n        tensor._is_zeros_tensor = True\n        return tensor\n    return tf_decorator.make_decorator(fun, wrapped)",
    "docstring": "Tags the result of function by setting _is_zeros_tensor attribute. This is useful to compute Hessians of fused ops such as cross_entropy.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:_tag_zeros_tensor arg:fun arguments arg FunctionDef name:wrapped arguments arg arg Assign Call Assign Return return:yes Return return:yes Call"
  },
  {
    "library": "django",
    "name": "model_to_dict",
    "source_code": "def model_to_dict(instance, fields=None, exclude=None):\n    opts = instance._meta\n    data = {}\n    for f in chain(opts.concrete_fields, opts.private_fields, opts.many_to_many):\n        if not getattr(f, 'editable', False):\n            continue\n        if fields is not None and f.name not in fields:\n            continue\n        if exclude and f.name in exclude:\n            continue\n        data[f.name] = f.value_from_object(instance)\n    return data",
    "docstring": "Return a dict containing the data in `` argument.",
    "type": "function",
    "file_path": "django\\django\\forms\\models.py",
    "ast_data": "FunctionDef name:model_to_dict arg:instance arg:fields arg:exclude arguments arg arg arg Assign Assign For Call If Call If BoolOp Compare Compare If BoolOp Compare Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "isdtype",
    "source_code": "def isdtype(dtype, kind, *, xp):\n    if isinstance(kind, tuple):\n        return any((_isdtype_single(dtype, k, xp=xp) for k in kind))\n    else:\n        return _isdtype_single(dtype, kind, xp=xp)",
    "docstring": "Returns a boolean indicating whether a provided dtype is of type \"kind\". Included in the v2022.12 of the Array API spec.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_array_api.py",
    "ast_data": "FunctionDef name:isdtype arg:dtype arg:kind arguments arg arg arg If Call Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "get_signatures",
    "source_code": "def get_signatures(self) -> list[str]:\n    lines = nl_escape_re.sub('', self.arguments[0]).split('\\n')\n    if self.config.strip_signature_backslash:\n        return [strip_backslash_re.sub('\\\\1', line.strip()) for line in lines]\n    else:\n        return [line.strip() for line in lines]",
    "docstring": "Retrieve the signatures to document from the directive arguments. By default, signatures are given as arguments, one per line.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\directives\\__init__.py",
    "ast_data": "FunctionDef name:get_signatures arg:self arguments arg Assign Call Call If Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "run_init_ops",
    "source_code": "def run_init_ops(self, sess, tags, import_scope=None):\n    meta_graph_def = self.get_meta_graph_def_from_tags(tags)\n    with sess.graph.as_default():\n        asset_tensors_dictionary = get_asset_tensors(self._export_dir, meta_graph_def, import_scope=import_scope)\n        init_op = get_init_op(meta_graph_def, import_scope)\n        if init_op is not None:\n            sess.run(fetches=[init_op], feed_dict=asset_tensors_dictionary)",
    "docstring": "Run initialization ops defined in the . Args: sess: tf.compat.v1.Session to restore variable values. tags: a set of string tags identifying a MetaGraphDef. import_scope: Optional -- if specified, prepend this string followed by '/' to all loaded tensor names. This scope is applied to tensor instances loaded into the passed session, but it is *not* written through to the static protocol buffer that is returned.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\loader_impl.py",
    "ast_data": "FunctionDef name:run_init_ops arg:self arg:sess arg:tags arg:import_scope arguments arg arg arg arg Assign Call With Call Assign Call Assign Call If Compare Call"
  },
  {
    "library": "pytorch",
    "name": "export_stacks",
    "source_code": "def export_stacks(self, path: str, metric: str='self_cpu_time_total'):\n    assert self.profiler\n    return self.profiler.export_stacks(path, metric)",
    "docstring": "Save stack traces to a file Args: path (str): save stacks file to this location; metric (str): metric to use: \"self_cpu_time_total\" or \"self_cuda_time_total\"",
    "type": "method",
    "file_path": "pytorch\\torch\\profiler\\profiler.py",
    "ast_data": "FunctionDef name:export_stacks arg:self arg:path arg:metric arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_call_wrapped_cell",
    "source_code": "def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs):\n    with ops.device(self._device):\n        return cell_call_fn(inputs, state, **kwargs)",
    "docstring": "Run the cell on specified device.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\legacy_rnn\\rnn_cell_wrapper_impl.py",
    "ast_data": "FunctionDef name:_call_wrapped_cell arg:self arg:inputs arg:state arg:cell_call_fn arguments arg arg arg arg arg With Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_lift_single_variable",
    "source_code": "def _lift_single_variable(old_variable, graph, variable_holder):\n    new_variable = resource_variable_ops.UninitializedVariable(shape=old_variable.shape, dtype=old_variable.dtype, name=old_variable.op.name, trainable=old_variable.trainable, extra_handle_data=old_variable.handle)\n    new_variable._initializer_op = old_variable._initializer_op\n    graph.add_capture(new_variable.handle, old_variable.handle)\n    graph.capture(new_variable.handle)\n    variable_name = new_variable.name.split(':')[0]\n    variable_holder._variables_by_name[variable_name] = new_variable\n    graph._weak_variables.append(weakref.ref(new_variable))\n    graph.watch_variable(new_variable)\n    return new_variable",
    "docstring": "Lifts out of the .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\wrap_function.py",
    "ast_data": "FunctionDef name:_lift_single_variable arg:old_variable arg:graph arg:variable_holder arguments arg arg arg Assign Call Assign Call Call Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "_copytree",
    "source_code": "def _copytree(self, src: Path, dst: Path) -> None:\n    ignore = IGNORE\n    names = [x.name for x in src.iterdir()]\n    ignored_names = ignore(src, names)\n    if not dst.exists():\n        dst.mkdir(parents=True)\n    for name in names:\n        if name in ignored_names:\n            continue\n        srcname = src / name\n        dstname = dst / name\n        if srcname.is_dir():\n            self._copytree(srcname, dstname)\n        else:\n            copy2(srcname, dstname)\n            _make_writable(dstname)\n    copystat(src, dst)\n    _make_writable(dst)",
    "docstring": "Since the original function always creates the directory, to resolve the issue a new function had to be created. It's a simple copy and was reduced for this case. More info at:",
    "type": "method",
    "file_path": "scrapy\\scrapy\\commands\\startproject.py",
    "ast_data": "FunctionDef name:_copytree arg:self arg:src arg:dst arguments arg arg arg Assign Assign Call Assign Call If Call Call For If Compare Assign Assign If Call Call Call Call Call Call"
  },
  {
    "library": "django",
    "name": "DictWrapper",
    "source_code": "class DictWrapper(dict):\n\n    def __init__(self, data, func, prefix):\n        super().__init__(data)\n        self.func = func\n        self.prefix = prefix\n\n    def __getitem__(self, key):\n        use_func = key.startswith(self.prefix)\n        key = key.removeprefix(self.prefix)\n        value = super().__getitem__(key)\n        if use_func:\n            return self.func(value)\n        return value",
    "docstring": "Wrap accesses to a dictionary so that certain values (those starting with the specified prefix) are passed through a function before being returned. The prefix is removed before looking up the real value. Used by the SQL construction code to ensure that values are correctly quoted before being used.",
    "type": "class",
    "file_path": "django\\django\\utils\\datastructures.py",
    "ast_data": "ClassDef name:DictWrapper FunctionDef name:__init__ arg:self arg:data arg:func arg:prefix arguments arg arg arg arg Call Call Assign Assign FunctionDef name:__getitem__ arg:self arg:key arguments arg arg Assign Call Assign Call Assign Call Call If Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "log_survival_function",
    "source_code": "def log_survival_function(self, value, name='log_survival_function'):\n    return self._call_log_survival_function(value, name)",
    "docstring": "Log survival function. Given random variable , the survival function is defined: ``. Args: value: or . name: Python prepended to names of ops created by this function. Returns: of shape with values of type .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:log_survival_function arg:self arg:value arg:name arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "is_symbolic_tensor",
    "source_code": "@tf_export('is_symbolic_tensor', v1=['is_symbolic_tensor'])\ndef is_symbolic_tensor(tensor) -> bool:\n    return isinstance(tensor, SymbolicTensor)",
    "docstring": "Test if is a symbolic Tensor. Args: tensor: a tensor-like object Returns: True if is a symbolic tensor (not an eager tensor).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:is_symbolic_tensor arg:tensor arguments arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "to_python",
    "source_code": "def to_python(self, value):\n    if value in self.empty_values:\n        return ''\n    return str(value)",
    "docstring": "Return a string.",
    "type": "method",
    "file_path": "django\\django\\forms\\fields.py",
    "ast_data": "FunctionDef name:to_python arg:self arg:value arguments arg arg If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "django",
    "name": "PersistentRemoteUserMiddleware",
    "source_code": "class PersistentRemoteUserMiddleware(RemoteUserMiddleware):\n    force_logout_if_no_header = False",
    "docstring": "Middleware for web-server provided authentication on logon pages. Like RemoteUserMiddleware but keeps the user authenticated even if the `` key is not found in the request. Useful for setups when the external authentication is only expected to happen on some \"logon\" URL and the rest of the application wants to use Django's authentication mechanism.",
    "type": "class",
    "file_path": "django\\django\\contrib\\auth\\middleware.py",
    "ast_data": "ClassDef name:PersistentRemoteUserMiddleware Assign"
  },
  {
    "library": "cherrypy",
    "name": "login_screen",
    "source_code": "def login_screen(self, from_page='..', username='', error_msg='', **kwargs):\n    return (str('<html><body>\\nMessage: %(error_msg)s\\n<form method=\"post\" action=\"do_login\">\\n    Login: <input type=\"text\" name=\"username\" value=\"%(username)s\" size=\"10\" />\\n    <br />\\n    Password: <input type=\"password\" name=\"password\" size=\"10\" />\\n    <br />\\n    <input type=\"hidden\" name=\"from_page\" value=\"%(from_page)s\" />\\n    <br />\\n    <input type=\"submit\" />\\n</form>\\n</body></html>') % vars()).encode('utf-8')",
    "docstring": "Render the login HTML page.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\cptools.py",
    "ast_data": "FunctionDef name:login_screen arg:self arg:from_page arg:username arg:error_msg arguments arg arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pandas",
    "name": "construct_array_type",
    "source_code": "@classmethod\ndef construct_array_type(cls) -> type_t[SparseArray]:\n    from pandas.core.arrays.sparse.array import SparseArray\n    return SparseArray",
    "docstring": "Return the array type associated with this dtype. Returns ------- type",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py",
    "ast_data": "FunctionDef name:construct_array_type arg:cls arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "add_system_root",
    "source_code": "def add_system_root(library_root):\n    global default_lib_dirs\n    global default_include_dirs\n    library_root = os.path.normpath(library_root)\n    default_lib_dirs.extend((os.path.join(library_root, d) for d in _lib_dirs))\n    default_include_dirs.extend((os.path.join(library_root, d) for d in _include_dirs))",
    "docstring": "Add a package manager root to the include directories",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\system_info.py",
    "ast_data": "FunctionDef name:add_system_root arg:library_root arguments arg Assign Call Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "new_axes",
    "source_code": "def new_axes(objs: list[Series | DataFrame], bm_axis: AxisInt, intersect: bool, sort: bool, keys: Iterable[Hashable] | None, names: list[HashableT] | None, axis: AxisInt, levels, verify_integrity: bool, ignore_index: bool) -> list[Index]:\n    return [_get_concat_axis_dataframe(objs, axis, ignore_index, keys, names, levels, verify_integrity) if i == bm_axis else get_objs_combined_axis(objs, axis=objs[0]._get_block_manager_axis(i), intersect=intersect, sort=sort) for i in range(2)]",
    "docstring": "Return the new [index, column] result for concat.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\reshape\\concat.py",
    "ast_data": "FunctionDef name:new_axes arg:objs arg:bm_axis arg:intersect arg:sort arg:keys arg:names arg:axis arg:levels arg:verify_integrity arg:ignore_index arguments arg arg arg arg arg arg arg arg arg arg Return return:yes Compare Call Call Call Call"
  },
  {
    "library": "seaborn",
    "name": "Marker",
    "source_code": "class Marker(ObjectProperty):\n    null_value = MarkerStyle('')\n\n    def standardize(self, val: MarkerPattern) -> MarkerStyle:\n        return MarkerStyle(val)\n\n    def _default_values(self, n: int) -> list[MarkerStyle]:\n        markers = ['o', 'X', (4, 0, 45), 'P', (4, 0, 0), (4, 1, 0), '^', (4, 1, 45), 'v']\n        s = 5\n        while len(markers) < n:\n            a = 360 / (s + 1) / 2\n            markers.extend([(s + 1, 1, a), (s + 1, 0, a), (s, 1, 0), (s, 0, 0)])\n            s += 1\n        markers = [MarkerStyle(m) for m in markers[:n]]\n        return markers",
    "docstring": "Shape of points in scatter-type marks or lines with data points marked.",
    "type": "class",
    "file_path": "seaborn\\seaborn\\_core\\properties.py",
    "ast_data": "ClassDef name:Marker Assign Call FunctionDef name:standardize arg:self arg:val arguments arg arg Return return:yes Call FunctionDef name:_default_values arg:self arg:n arguments arg arg Assign Assign While Compare Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_timer_set_interval",
    "source_code": "def _timer_set_interval(self):\n    pass",
    "docstring": "Used to set interval on underlying timer object.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:_timer_set_interval arg:self arguments arg"
  },
  {
    "library": "kornia",
    "name": "polar_angle",
    "source_code": "@property\ndef polar_angle(self) -> Tensor:\n    return (self.scalar / self.norm()).acos()",
    "docstring": "Return the polar angle with shape :math:. Example: >>> q = Quaternion.identity() >>> q.polar_angle tensor(0., grad_fn=)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\quaternion.py",
    "ast_data": "FunctionDef name:polar_angle arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_from_args_and_kwargs",
    "source_code": "@staticmethod\ndef get_from_args_and_kwargs(*args, **kwargs):\n    if isinstance(args[0], Triangulation):\n        triangulation, *args = args\n        if 'triangles' in kwargs:\n            _api.warn_external(\"Passing the keyword 'triangles' has no effect when also passing a Triangulation\")\n        if 'mask' in kwargs:\n            _api.warn_external(\"Passing the keyword 'mask' has no effect when also passing a Triangulation\")\n    else:\n        x, y, triangles, mask, args, kwargs = Triangulation._extract_triangulation_params(args, kwargs)\n        triangulation = Triangulation(x, y, triangles, mask)\n    return (triangulation, args, kwargs)",
    "docstring": "Return a Triangulation object from the args and kwargs, and the remaining args and kwargs with the consumed values removed. There are two alternatives: either the first argument is a Triangulation object, in which case it is returned, or the args and kwargs are sufficient to create a new Triangulation to return. In the latter case, see Triangulation.__init__ for the possible args and kwargs.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triangulation.py",
    "ast_data": "FunctionDef name:get_from_args_and_kwargs arguments arg arg If Call Assign If Compare Call If Compare Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_compute_distance",
    "source_code": "def _compute_distance(self, t):\n    dim = 0\n    size = t.size(dim)\n    slc = [slice(None)] * t.dim()\n    t_flatten = [t[tuple(slc[:dim] + [slice(i, i + 1)] + slc[dim + 1:])].reshape(-1) for i in range(size)]\n    t_flatten = torch.stack(t_flatten)\n    dist_matrix = self.dist_fn(t_flatten)\n    distance = torch.sum(torch.abs(dist_matrix), 1)\n    return distance",
    "docstring": "Compute distance across all entries in tensor along all dimension except for the one identified by dim. Args: t (torch.Tensor): tensor representing the parameter to prune Returns: distance (torch.Tensor): distance computed across filtters",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\pruner\\FPGM_pruner.py",
    "ast_data": "FunctionDef name:_compute_distance arg:self arg:t arguments arg arg Assign Assign Call Assign Call Call Assign Call Call Call Call Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "not_equal",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef not_equal(x, y):\n    return math_ops.not_equal(x, y)",
    "docstring": "Element-wise inequality between two tensors. Args: x: Tensor or variable. y: Tensor or variable. Returns: A bool tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:not_equal arg:x arg:y arguments arg arg Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "_Serving",
    "source_code": "class _Serving(_local):\n    request = _cprequest.Request(_httputil.Host('127.0.0.1', 80), _httputil.Host('127.0.0.1', 1111))\n    'The request object for the current thread.\\n\\n    In the main thread, and any threads which are not receiving HTTP\\n    requests, this is None.\\n    '\n    response = _cprequest.Response()\n    'The response object for the current thread.\\n\\n    In the main thread, and any threads which are not receiving HTTP\\n    requests, this is None.\\n    '\n\n    def load(self, request, response):\n        self.request = request\n        self.response = response\n\n    def clear(self):\n        self.__dict__.clear()",
    "docstring": "An interface for registering request and response objects. Rather than have a separate \"thread local\" object for the request and the response, this class works as a single threadlocal container for both objects (and any others which developers wish to define). In this way, we can easily dump those objects when we stop/start a new HTTP conversation, yet still refer to them as module-level globals in a thread-safe way.",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\__init__.py",
    "ast_data": "ClassDef name:_Serving Assign Call Call Call Assign Call FunctionDef name:load arg:self arg:request arg:response arguments arg arg arg Assign Assign FunctionDef name:clear arg:self arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "create_attach_sparsifier",
    "source_code": "def create_attach_sparsifier(model, **sparse_config):\n    data_norm_sparsifier = DataNormSparsifier(**sparse_config)\n    for name, parameter in model.named_parameters():\n        if 'emb_l' in name:\n            valid_name = get_valid_name(name)\n            data_norm_sparsifier.add_data(name=valid_name, data=parameter)\n    return data_norm_sparsifier",
    "docstring": "Create a DataNormSparsifier and the attach it to the model embedding layers Args: model (nn.Module) layer of the model that needs to be attached to the sparsifier sparse_config (Dict) Config to the DataNormSparsifier. Should contain the following keys: - sparse_block_shape - norm - sparsity_level",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\data_sparsifier\\benchmarks\\evaluate_disk_savings.py",
    "ast_data": "FunctionDef name:create_attach_sparsifier arg:model arguments arg arg Assign Call For Call If Compare Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, products):\n    self.products = products",
    "docstring": ":param products: lists of dimensions to multiply",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:products arguments arg arg Assign"
  },
  {
    "library": "pytorch",
    "name": "forced_specializations",
    "source_code": "def forced_specializations(self) -> dict[str, sympy.Expr]:\n\n    def debug_name(src: Source) -> str:\n        name = src.name()\n        if self._dcp.source_name_to_debug_name:\n            return f'{self._dcp.source_name_to_debug_name[name]} = {name}'\n        else:\n            return name\n    return {debug_name(self._dcp.symbol_to_source[s][0]): val for s, val in self._substitutions.items() if s in self._marked_dynamic}",
    "docstring": "Returns a dictionary of the names of symbols to their specialized value",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:forced_specializations arg:self arguments arg FunctionDef name:debug_name arg:src arguments arg Assign Call If Return return:yes Return return:yes Return return:yes Call Call Compare"
  },
  {
    "library": "pytorch",
    "name": "_annotate_conv_bn_relu",
    "source_code": "@register_annotator('conv_bn_relu')\ndef _annotate_conv_bn_relu(gm: torch.fx.GraphModule, quantization_config: Optional[QuantizationConfig], filter_fn: Optional[Callable[[Node], bool]]=None) -> Optional[list[list[Node]]]:\n    return _do_annotate_conv_bn(gm, quantization_config, filter_fn, has_relu=True)",
    "docstring": "Find conv + batchnorm + relu parititions Note: This is only used for QAT. In PTQ, batchnorm should already be fused into the conv.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\xnnpack_quantizer_utils.py",
    "ast_data": "FunctionDef name:_annotate_conv_bn_relu arg:gm arg:quantization_config arg:filter_fn arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "forward",
    "source_code": "def forward(self, input: Tensor, offsets: Optional[Tensor]=None, per_sample_weights: Optional[Tensor]=None) -> Tensor:\n    return F.embedding_bag(input, self.weight, offsets, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.mode, self.sparse, per_sample_weights, self.include_last_offset, self.padding_idx)",
    "docstring": "Forward pass of EmbeddingBag. Args: input (Tensor): Tensor containing bags of indices into the embedding matrix. offsets (Tensor, optional): Only used when :attr: is 1D. :attr: determines the starting index position of each bag (sequence) in :attr:. per_sample_weights (Tensor, optional): a tensor of float / double weights, or None to indicate all weights should be taken to be `per_sample_weightsoffsets(B, embedding_dim)inputoffsetsinput(B, N)modeoffsetsinput(N)offsetsinputoffsets(B)input` bags. Empty bags (i.e., having 0-length) will have returned vectors filled by zeros.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\sparse.py",
    "ast_data": "FunctionDef name:forward arg:self arg:input arg:offsets arg:per_sample_weights arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_maybe_cast_slice_bound",
    "source_code": "def _maybe_cast_slice_bound(self, label, side: str_t):\n    if is_numeric_dtype(self.dtype):\n        return self._maybe_cast_indexer(label)\n    if (is_float(label) or is_integer(label)) and label not in self:\n        self._raise_invalid_indexer('slice', label)\n    return label",
    "docstring": "This function should be overloaded in subclasses that allow non-trivial casting on label-slice bounds, e.g. datetime-like indices allowing strings containing formatted datetimes. Parameters ---------- label : object side : {'left', 'right'} Returns ------- label : object Notes ----- Value of parameter should be validated in caller.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_maybe_cast_slice_bound arg:self arg:label arg:side arguments arg arg arg If Call Return return:yes Call If BoolOp BoolOp Call Call Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "create_autocast_variable",
    "source_code": "def create_autocast_variable(variable):\n    if not distributed_training_utils.is_distributed_variable(variable):\n        return AutoCastVariable(variable)\n\n    class AutoCastDistributedVariable(AutoCastVariable, variable.__class__):\n\n        def __repr__(self):\n            return '<AutoCastDistributedVariable dtype={v.dtype.name} dtype_to_cast_to={v._cast_dtype.name} inner_variable={v._variable}>'.format(v=self)\n    return AutoCastDistributedVariable(variable)",
    "docstring": "Creates an AutoCastVariable that wraps another variable. This typically just returns . But, if the variable is a DistributedVariable or one of its subclasses, we instead dynamically create a class that subclasses from both AutoCastVariable and variable.__class__. This is so the returned variable will still pass , which is required for DistributedVariables and its subclasses to work properly. Args: variable: A floating-point resource variable to wrap. Returns: An AutoCastVariable that wraps the variable.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\autocast_variable.py",
    "ast_data": "FunctionDef name:create_autocast_variable arg:variable arguments arg If Call Return return:yes Call ClassDef name:AutoCastDistributedVariable FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "ExecutionState",
    "source_code": "class ExecutionState(Enum):\n    NONE = auto()\n    WARMUP = auto()\n    RECORDING = auto()\n    EXECUTION = auto()",
    "docstring": "Represents the state of the CUDAGraph Tree. Will be None if there is no live current memory allocated in the cuda graph pool. Otherwise will reflect the state of the most recently executed node.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py",
    "ast_data": "ClassDef name:ExecutionState Assign Call Assign Call Assign Call Assign Call"
  },
  {
    "library": "authlib",
    "name": "validate_acr_values_supported",
    "source_code": "def validate_acr_values_supported(self):\n    validate_array_value(self, 'acr_values_supported')",
    "docstring": "OPTIONAL. JSON array containing a list of the Authentication Context Class References that this OP supports.",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\discovery\\models.py",
    "ast_data": "FunctionDef name:validate_acr_values_supported arg:self arguments arg Call"
  },
  {
    "library": "django",
    "name": "has_permission",
    "source_code": "def has_permission(self):\n    perms = self.get_permission_required()\n    return self.request.user.has_perms(perms)",
    "docstring": "Override this method to customize the way permissions are checked.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\mixins.py",
    "ast_data": "FunctionDef name:has_permission arg:self arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "__get__",
    "source_code": "def __get__(self, instance, cls=None):\n    if instance is None:\n        return self\n    res = instance.__dict__[self.name] = self.func(instance)\n    return res",
    "docstring": "Call the function and put the return value in instance.__dict__ so that subsequent attribute access on the instance returns the cached value instead of calling cached_property.__get__().",
    "type": "method",
    "file_path": "django\\django\\utils\\functional.py",
    "ast_data": "FunctionDef name:__get__ arg:self arg:instance arg:cls arguments arg arg arg If Compare Return return:yes Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "tree_unflatten",
    "source_code": "def tree_unflatten(leaves: Iterable[Any], treespec: TreeSpec) -> PyTree:\n    if not isinstance(treespec, TreeSpec):\n        raise TypeError(f'tree_unflatten(leaves, treespec): Expected `treespec` to be instance of TreeSpec but got item of type {type(treespec)}.')\n    return treespec.unflatten(leaves)",
    "docstring": "Given a list of values and a TreeSpec, builds a pytree. This is the inverse operation of .",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_pytree.py",
    "ast_data": "FunctionDef name:tree_unflatten arg:leaves arg:treespec arguments arg arg If Call Raise Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_convert_anonymous_fields",
    "source_code": "def _convert_anonymous_fields(value, for_spec=False):\n    if isinstance(value, (int, float, bool, str, bytes, type(None), dtypes.DType, tensor_shape.TensorShape)):\n        return value\n    if isinstance(value, tuple):\n        return tuple((_convert_anonymous_fields(v, for_spec) for v in value))\n    if isinstance(value, typing.Mapping):\n        return immutable_dict.ImmutableDict([(_convert_anonymous_fields(k, for_spec), _convert_anonymous_fields(v, for_spec)) for k, v in value.items()])\n    if isinstance(value, (tensor.Tensor, composite_tensor.CompositeTensor)) and (not for_spec):\n        return value\n    if isinstance(value, type_spec.TypeSpec) and for_spec:\n        return value\n    raise ValueError(f'Cannot convert anonymous fields from an unsupported `value` argument: {value!r}.')",
    "docstring": "Type-checks and converts for inclusion in an AnonymousExtensionType.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type.py",
    "ast_data": "FunctionDef name:_convert_anonymous_fields arg:value arg:for_spec arguments arg arg If Call Call Return return:yes If Call Return return:yes Call Call If Call Return return:yes Call Call Call Call If BoolOp Call Return return:yes If BoolOp Call Return return:yes Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "start",
    "source_code": "def start(self, workers=1, max_queue_size=10):\n    if self.use_multiprocessing:\n        self.executor_fn = self._get_executor_init(workers)\n    else:\n        self.executor_fn = lambda _: get_pool_class(False)(workers)\n    self.workers = workers\n    self.queue = queue.Queue(max_queue_size)\n    self.stop_signal = threading.Event()\n    self.run_thread = threading.Thread(target=self._run)\n    self.run_thread.daemon = True\n    self.run_thread.start()",
    "docstring": "Starts the handler's workers. Args: workers: Number of workers. max_queue_size: queue size (when full, workers could block on )",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\data_utils.py",
    "ast_data": "FunctionDef name:start arg:self arg:workers arg:max_queue_size arguments arg arg arg If Assign Call Assign arguments arg Call Call Assign Assign Call Assign Call Assign Call Assign Call"
  },
  {
    "library": "scipy",
    "name": "_read_int16",
    "source_code": "def _read_int16(f):\n    return np.int16(struct.unpack('>h', f.read(4)[2:4])[0])",
    "docstring": "Read a signed 16-bit integer",
    "type": "function",
    "file_path": "scipy\\scipy\\io\\_idl.py",
    "ast_data": "FunctionDef name:_read_int16 arg:f arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "list_gui_frameworks",
    "source_code": "def list_gui_frameworks(self):\n    return [k for k in self._GUI_FRAMEWORK_TO_BACKEND if k != 'headless']",
    "docstring": "Return list of GUI frameworks used by Matplotlib backends. Returns ------- list of str GUI framework names.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\registry.py",
    "ast_data": "FunctionDef name:list_gui_frameworks arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "matplotlib",
    "name": "get_clip_box",
    "source_code": "def get_clip_box(self):\n    return self.clipbox",
    "docstring": "Return the clipbox.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:get_clip_box arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "has_curve",
    "source_code": "@property\ndef has_curve(self):\n    return capi.has_curve_geom(self.ptr, 0)",
    "docstring": "Return True if the geometry is or has curve geometry.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:has_curve arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_unfold_continuations",
    "source_code": "def _unfold_continuations(code_string):\n    return code_string.replace('\\\\\\n', '')",
    "docstring": "Removes any backslash line continuations from the code.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\parser.py",
    "ast_data": "FunctionDef name:_unfold_continuations arg:code_string arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, boxin, boxout, **kwargs):\n    _api.check_isinstance(BboxBase, boxin=boxin, boxout=boxout)\n    super().__init__(**kwargs)\n    self._boxin = boxin\n    self._boxout = boxout\n    self.set_children(boxin, boxout)\n    self._mtx = None\n    self._inverted = None",
    "docstring": "Create a new that linearly transforms points from *boxin* to *boxout*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:boxin arg:boxout arguments arg arg arg arg Call Call Call Assign Assign Call Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "driver_allocated_memory",
    "source_code": "def driver_allocated_memory() -> int:\n    return torch._C._mps_driverAllocatedMemory()",
    "docstring": "Returns total GPU memory allocated by Metal driver for the process in bytes. .. note:: The returned size includes cached allocations in MPSAllocator pools as well as allocations from MPS/MPSGraph frameworks.",
    "type": "function",
    "file_path": "pytorch\\torch\\mps\\__init__.py",
    "ast_data": "FunctionDef name:driver_allocated_memory arguments Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_write_map",
    "source_code": "def _write_map(self) -> None:\n    pass",
    "docstring": "No-op, future compatibility",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\stata.py",
    "ast_data": "FunctionDef name:_write_map arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "get_growth_factor",
    "source_code": "def get_growth_factor(self) -> float:\n    return self._growth_factor",
    "docstring": "Return a Python float containing the scale growth factor.",
    "type": "method",
    "file_path": "pytorch\\torch\\amp\\grad_scaler.py",
    "ast_data": "FunctionDef name:get_growth_factor arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_picker",
    "source_code": "def get_picker(self):\n    return self._picker",
    "docstring": "Return the picking behavior of the artist. The possible values are described in . See Also -------- .Artist.set_picker, .Artist.pickable, .Artist.pick",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:get_picker arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "FxNetMinimizerRunFuncError",
    "source_code": "@compatibility(is_backward_compatible=False)\nclass FxNetMinimizerRunFuncError(Exception):\n    pass",
    "docstring": "Raised if error occurs during run_a or run_b functions",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\passes\\net_min_base.py",
    "ast_data": "ClassDef name:FxNetMinimizerRunFuncError Call"
  },
  {
    "library": "scrapy",
    "name": "next_request",
    "source_code": "@abstractmethod\ndef next_request(self) -> Request | None:\n    raise NotImplementedError",
    "docstring": "Return the next :class: to be processed, or ``.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\scheduler.py",
    "ast_data": "FunctionDef name:next_request arg:self arguments arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "ConversionError",
    "source_code": "class ConversionError(AutoGraphError):\n    pass",
    "docstring": "Raised during the conversion process.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\impl\\api.py",
    "ast_data": "ClassDef name:ConversionError"
  },
  {
    "library": "tensorflow",
    "name": "_CapturedObject",
    "source_code": "class _CapturedObject(object):\n\n    def __init__(self):\n        self._object = None\n\n    def capture(self, o):\n        if self._object:\n            raise RuntimeError('InternalError: _CapturedObject can capture only once. Please file bug.')\n        self._object = o\n\n    def get(self):\n        return self._object",
    "docstring": "A placeholder to capture an object.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\xla\\xla.py",
    "ast_data": "ClassDef name:_CapturedObject FunctionDef name:__init__ arg:self arguments arg Assign FunctionDef name:capture arg:self arg:o arguments arg arg If Raise Call Assign FunctionDef name:get arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "byte",
    "source_code": "def byte(self):\n    return self._to(torch.uint8)",
    "docstring": "Casts this storage to byte type.",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:byte arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "handle_token",
    "source_code": "@classmethod\ndef handle_token(cls, parser, token, name):\n    tokens = token.contents.split()\n    if len(tokens) > 1 and tokens[1] != 'as':\n        raise template.TemplateSyntaxError(\"First argument in '%s' must be 'as'\" % tokens[0])\n    if len(tokens) > 1:\n        varname = tokens[2]\n    else:\n        varname = None\n    return cls(varname, name)",
    "docstring": "Class method to parse prefix node and return a Node.",
    "type": "method",
    "file_path": "django\\django\\templatetags\\static.py",
    "ast_data": "FunctionDef name:handle_token arg:cls arg:parser arg:token arg:name arguments arg arg arg arg Assign Call If BoolOp Compare Call Compare Raise Call If Compare Call Assign Assign Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "ones",
    "source_code": "def ones(shape, dtype=None, order='C'):\n    a = ndarray.__new__(matrix, shape, dtype, order=order)\n    a.fill(1)\n    return a",
    "docstring": "Matrix of ones. Return a matrix of given shape and type, filled with ones. Parameters ---------- shape : {sequence of ints, int} Shape of the matrix dtype : data-type, optional The desired data-type for the matrix, default is np.float64. order : {'C', 'F'}, optional Whether to store matrix in C- or Fortran-contiguous order, default is 'C'. Returns ------- out : matrix Matrix of ones of given shape, dtype, and order. See Also -------- ones : Array of ones. matlib.zeros : Zero matrix. Notes ----- If has length one i.e. `out`. Examples -------- >>> np.matlib.ones((2,3)) matrix([[1., 1., 1.], [1., 1., 1.]]) >>> np.matlib.ones(2) matrix([[1., 1.]])",
    "type": "function",
    "file_path": "numpy\\numpy\\matlib.py",
    "ast_data": "FunctionDef name:ones arg:shape arg:dtype arg:order arguments arg arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "predict_generator",
    "source_code": "def predict_generator(self, generator, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False, verbose=0):\n    warnings.warn('`Model.predict_generator` is deprecated and will be removed in a future version. Please use `Model.predict`, which supports generators.')\n    return self.predict(generator, steps=steps, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, verbose=verbose, callbacks=callbacks)",
    "docstring": "Generates predictions for the input samples from a data generator. DEPRECATED: now supports generators, so there is no longer any need to use this endpoint.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py",
    "ast_data": "FunctionDef name:predict_generator arg:self arg:generator arg:steps arg:callbacks arg:max_queue_size arg:workers arg:use_multiprocessing arg:verbose arguments arg arg arg arg arg arg arg arg Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "__len__",
    "source_code": "def __len__(self) -> int:\n    return len(self._info_axis)",
    "docstring": "Returns length of info axis",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:__len__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pygame",
    "name": "_get_visible",
    "source_code": "def _get_visible(self):\n    return self._visible",
    "docstring": "return the visible value of that sprite",
    "type": "method",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:_get_visible arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "__call__",
    "source_code": "def __call__(self, dim=None, seed=None):\n    return ortho_group_frozen(dim, seed=seed)",
    "docstring": "Create a frozen O(N) distribution. See for more information.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:dim arg:seed arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "cryptography",
    "name": "rsa_recover_prime_factors",
    "source_code": "def rsa_recover_prime_factors(n: int, e: int, d: int) -> tuple[int, int]:\n    if d <= 1 or e <= 1:\n        raise ValueError(\"d, e can't be <= 1\")\n    if 17 != pow(17, e * d, n):\n        raise ValueError(\"n, d, e don't match\")\n    ktot = d * e - 1\n    t = ktot\n    while t % 2 == 0:\n        t = t // 2\n    spotted = False\n    tries = 0\n    while not spotted and tries < _MAX_RECOVERY_ATTEMPTS:\n        a = random.randint(2, n - 1)\n        tries += 1\n        k = t\n        while k < ktot:\n            cand = pow(a, k, n)\n            if cand != 1 and cand != n - 1 and (pow(cand, 2, n) == 1):\n                p = gcd(cand + 1, n)\n                spotted = True\n                break\n            k *= 2\n    if not spotted:\n        raise ValueError('Unable to compute factors p and q from exponent d.')\n    q, r = divmod(n, p)\n    assert r == 0\n    p, q = sorted((p, q), reverse=True)\n    return (p, q)",
    "docstring": "Compute factors p and q from the private exponent d. We assume that n has no more than two factors. This function is adapted from code in PyCrypto.",
    "type": "function",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\rsa.py",
    "ast_data": "FunctionDef name:rsa_recover_prime_factors arg:n arg:e arg:d arguments arg arg arg If BoolOp Compare Compare Raise Call If Compare Call Raise Call Assign Assign While Compare Assign Assign Assign While BoolOp Compare Assign Call Assign While Compare Assign Call If BoolOp Compare Compare Compare Call Assign Call Assign If Raise Call Assign Call Compare Assign Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "PreserveTranslatableMessages",
    "source_code": "class PreserveTranslatableMessages(SphinxTransform):\n    default_priority = 10\n\n    def apply(self, **kwargs: Any) -> None:\n        for node in self.document.findall(addnodes.translatable):\n            node.preserve_original_messages()",
    "docstring": "Preserve original translatable messages before translation",
    "type": "class",
    "file_path": "sphinx\\sphinx\\transforms\\i18n.py",
    "ast_data": "ClassDef name:PreserveTranslatableMessages Assign FunctionDef name:apply arg:self arguments arg arg For Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_backend_config",
    "source_code": "def _get_backend_config(obj: Any, dict_key: str) -> Optional[BackendConfig]:\n    if isinstance(obj, BackendConfig) or obj is None:\n        return obj\n    if isinstance(obj, dict):\n        return BackendConfig.from_dict(obj)\n    raise ValueError(f\"\"\"Expected BackendConfig in prepare_custom_config_dict[\"{dict_key}\"], got '{type(obj)}'\"\"\")",
    "docstring": "Convert the given object into a BackendConfig if possible, else throw an exception.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\custom_config.py",
    "ast_data": "FunctionDef name:_get_backend_config arg:obj arg:dict_key arguments arg arg If BoolOp Call Compare Return return:yes If Call Return return:yes Call Raise Call Call"
  },
  {
    "library": "kornia",
    "name": "xmin",
    "source_code": "@property\ndef xmin(self) -> torch.Tensor:\n    return self._data[..., 0]",
    "docstring": "The bounding box top-left x-coordinate.",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\face_detection.py",
    "ast_data": "FunctionDef name:xmin arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "evaluate_guards_for_args",
    "source_code": "def evaluate_guards_for_args(self, placeholders: Sequence[FakeTensor], args: Sequence[Tensor], *, ignore_static: bool=True) -> bool:\n    code = self.produce_guards_expression(placeholders, ignore_static=ignore_static)\n    if code:\n        return self.evaluate_guards_expression(code, args)\n    return True",
    "docstring": "Generate guards for a graph's placeholder values and evaluate the guards with args",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:evaluate_guards_for_args arg:self arg:placeholders arg:args arguments arg arg arg arg Assign Call If Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, proto=None):\n    self._proto = proto",
    "docstring": "Do not use this constructor; use the factory functions below.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\xla\\experimental\\xla_sharding.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:proto arguments arg arg Assign"
  },
  {
    "library": "scikit-learn",
    "name": "svd_flip",
    "source_code": "def svd_flip(u, v, u_based_decision=True):\n    xp, _ = get_namespace(*[a for a in [u, v] if a is not None])\n    if u_based_decision:\n        max_abs_u_cols = xp.argmax(xp.abs(u.T), axis=1)\n        shift = xp.arange(u.T.shape[0], device=device(u))\n        indices = max_abs_u_cols + shift * u.T.shape[1]\n        signs = xp.sign(xp.take(xp.reshape(u.T, (-1,)), indices, axis=0))\n        u *= signs[np.newaxis, :]\n        if v is not None:\n            v *= signs[:, np.newaxis]\n    else:\n        max_abs_v_rows = xp.argmax(xp.abs(v), axis=1)\n        shift = xp.arange(v.shape[0], device=device(v))\n        indices = max_abs_v_rows + shift * v.shape[1]\n        signs = xp.sign(xp.take(xp.reshape(v, (-1,)), indices, axis=0))\n        if u is not None:\n            u *= signs[np.newaxis, :]\n        v *= signs[:, np.newaxis]\n    return (u, v)",
    "docstring": "Sign correction to ensure deterministic output from SVD. Adjusts the columns of u and the rows of v such that the loadings in the columns in u that are largest in absolute value are always positive. If u_based_decision is False, then the same sign correction is applied to so that the rows in v that are largest in absolute value are always positive. Parameters ---------- u : ndarray Parameters u and v are the output of or :func:, with matching inner dimensions so one can compute . u can be None if is False. v : ndarray Parameters u and v are the output of or :func:, with matching inner dimensions so one can compute . The input v should really be called vt to be consistent with scipy's output. v can be None if is True. u_based_decision : bool, default=True If True, use the columns of u as the basis for sign flipping. Otherwise, use the rows of v. The choice of which variable to base the decision on is generally algorithm dependent. Returns ------- u_adjusted : ndarray Array u with adjusted columns and the same dimensions as u. v_adjusted : ndarray Array v with adjusted rows and the same dimensions as v.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\extmath.py",
    "ast_data": "FunctionDef name:svd_flip arg:u arg:v arg:u_based_decision arguments arg arg arg Assign Call Compare If Assign Call Call Assign Call Call Assign Assign Call Call Call If Compare Assign Call Call Assign Call Call Assign Assign Call Call Call If Compare Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "default",
    "source_code": "@property\ndef default(self) -> Any:\n    if self._val is not None:\n        return self._val\n    elif self._rc is not None:\n        return mpl.rcParams.get(self._rc)",
    "docstring": "Get the default value for this feature, or access the relevant rcParam.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_marks\\base.py",
    "ast_data": "FunctionDef name:default arg:self arguments arg If Compare Return return:yes If Compare Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "shape",
    "source_code": "@property\ndef shape(self):\n    return self._shape",
    "docstring": "The overall shape, combining all shards along axis .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\sharded_variable.py",
    "ast_data": "FunctionDef name:shape arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_copy_node",
    "source_code": "def is_copy_node(node_name):\n    return node_name.startswith('__copy_')",
    "docstring": "Determine whether a node name is that of a debug Copy node. Such nodes are inserted by TensorFlow core upon request in RunOptions.debug_options.debug_tensor_watch_opts. Args: node_name: Name of the node. Returns: A bool indicating whether the input argument is the name of a debug Copy node.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_graphs.py",
    "ast_data": "FunctionDef name:is_copy_node arg:node_name arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_attach_methods",
    "source_code": "def _attach_methods(self):\n    self._cdfvec = vectorize(self._cdf_single, otypes='d')\n    self.vecentropy = vectorize(self._entropy)\n    self._attach_argparser_methods()\n    _vec_generic_moment = vectorize(_drv2_moment, otypes='d')\n    _vec_generic_moment.nin = self.numargs + 2\n    self.generic_moment = types.MethodType(_vec_generic_moment, self)\n    _vppf = vectorize(_drv2_ppfsingle, otypes='d')\n    _vppf.nin = self.numargs + 2\n    self._ppfvec = types.MethodType(_vppf, self)\n    self._cdfvec.nin = self.numargs + 1",
    "docstring": "Attaches dynamically created methods to the rv_discrete instance.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:_attach_methods arg:self arguments arg Assign Call Assign Call Call Assign Call Assign Assign Call Assign Call Assign Assign Call Assign"
  },
  {
    "library": "pandas",
    "name": "_ensure_str",
    "source_code": "def _ensure_str(name):\n    if isinstance(name, str):\n        name = str(name)\n    return name",
    "docstring": "Ensure that an index / column name is a str (python 3); otherwise they may be np.string dtype. Non-string dtypes are passed through unchanged.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:_ensure_str arg:name arguments arg If Call Assign Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "DefaultValue",
    "source_code": "class DefaultValue:\n\n    def __init__(self, value: str) -> None:\n        self.value = value\n\n    def __eq__(self, other: object) -> bool:\n        return self.value == other\n\n    def __hash__(self) -> int:\n        return hash(self.value)\n\n    def __repr__(self) -> str:\n        return self.value",
    "docstring": "A simple wrapper for default value of the parameters of overload functions.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\util\\inspect.py",
    "ast_data": "ClassDef name:DefaultValue FunctionDef name:__init__ arg:self arg:value arguments arg arg Assign FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes Compare FunctionDef name:__hash__ arg:self arguments arg Return return:yes Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "get_redirect_field_name",
    "source_code": "def get_redirect_field_name(self):\n    return self.redirect_field_name",
    "docstring": "Override this method to override the redirect_field_name attribute.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\mixins.py",
    "ast_data": "FunctionDef name:get_redirect_field_name arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "atleast_nd",
    "source_code": "def atleast_nd(x: Array, /, *, ndim: int, xp: ModuleType | None=None) -> Array:\n    if xp is None:\n        xp = array_namespace(x)\n    if x.ndim < ndim:\n        x = xp.expand_dims(x, axis=0)\n        x = atleast_nd(x, ndim=ndim, xp=xp)\n    return x",
    "docstring": "Recursively expand the dimension of an array to at least . Parameters ---------- x : array Input array. ndim : int The minimum number of dimensions for the result. xp : array_namespace, optional The standard-compatible namespace for . Default: infer. Returns ------- array An array with `ndimndimx` >> import array_api_strict as xp >>> import array_api_extra as xpx >>> x = xp.asarray([1]) >>> xpx.atleast_nd(x, ndim=3, xp=xp) Array([[[1]]], dtype=array_api_strict.int64) >>> x = xp.asarray([[[1, 2], ... [3, 4]]]) >>> xpx.atleast_nd(x, ndim=1, xp=xp) is x True",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_extra\\_lib\\_funcs.py",
    "ast_data": "FunctionDef name:atleast_nd arguments arg arg arg If Compare Assign Call If Compare Assign Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "batch_2x2_det",
    "source_code": "def batch_2x2_det(m: Tensor) -> Tensor:\n    a = m[..., 0, 0]\n    b = m[..., 0, 1]\n    c = m[..., 1, 0]\n    d = m[..., 1, 1]\n    return a * d - b * c",
    "docstring": "Returns determinant of batch of 2x2 matrices.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\adalam\\utils.py",
    "ast_data": "FunctionDef name:batch_2x2_det arg:m arguments arg Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "huber",
    "source_code": "@dispatch.add_dispatch_support\ndef huber(y_true, y_pred, delta=1.0):\n    y_pred = math_ops.cast(y_pred, dtype=backend.floatx())\n    y_true = math_ops.cast(y_true, dtype=backend.floatx())\n    delta = math_ops.cast(delta, dtype=backend.floatx())\n    error = math_ops.subtract(y_pred, y_true)\n    abs_error = math_ops.abs(error)\n    half = tensor_conversion.convert_to_tensor_v2_with_dispatch(0.5, dtype=abs_error.dtype)\n    return backend.mean(array_ops.where_v2(abs_error <= delta, half * math_ops.square(error), delta * abs_error - half * math_ops.square(delta)), axis=-1)",
    "docstring": "Computes Huber loss value. For each value x in : where d is . See: Args: y_true: tensor of true targets. y_pred: tensor of predicted targets. delta: A float, the point where the Huber loss function changes from a quadratic to linear. Returns: Tensor with one scalar loss entry per sample.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py",
    "ast_data": "FunctionDef name:huber arg:y_true arg:y_pred arg:delta arguments arg arg arg Assign Call Call Assign Call Call Assign Call Call Assign Call Assign Call Assign Call Return return:yes Call Call Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_object_coll_device",
    "source_code": "def _get_object_coll_device(group: Optional[ProcessGroup]=None) -> str:\n    group = group or _get_default_group()\n    if not isinstance(group, ProcessGroup):\n        warnings.warn(f'You are using a Backend {type(group)} as a ProcessGroup. This usage is deprecated since PyTorch 2.0. Please use a public API of PyTorch Distributed instead.')\n        if isinstance(group, ProcessGroupGloo):\n            return 'cpu'\n        else:\n            raise ValueError(f'Expecting a ProcessGroup, but got a {type(group)}.')\n    '\\n    ``group._device_types`` is a property pybind that returns the devices\\n    (\"cpu\", \"cuda\", etc) supported by ``group``. Can be multiple if the\\n    ``group`` supports multiple devices.\\n    '\n    devices = group._device_types\n    if len(devices) == 1:\n        return devices[0].type\n    elif len(devices) == 0:\n        return 'cpu'\n    elif torch.device('cpu') in devices:\n        return 'cpu'\n    else:\n        return devices[0].type",
    "docstring": ".. note:: This is an internal helper and does not have backward compatibility, please use with caution. Return the device type to use with ``.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:_get_object_coll_device arg:group arguments arg Assign BoolOp Call If Call Call Call If Call Return return:yes Raise Call Call Assign If Compare Call Return return:yes If Compare Call Return return:yes If Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "compute_values",
    "source_code": "def compute_values(self, value_names: Sequence[str], args=(), kwargs=None) -> Sequence[torch.Tensor]:\n    if kwargs is None:\n        kwargs = {}\n    self.release()\n    values = _create_value_mapping(self.model.graph)\n    for name in value_names:\n        if name not in values:\n            raise ValueError(f\"Value '{name}' not found in the model. Please provide a valid value name.\")\n    temporary_outputs = [values[name] for name in value_names]\n    with _set_graph_outputs(self.model.graph, temporary_outputs):\n        try:\n            result = self(*args, **kwargs)\n        finally:\n            self.release()\n    return result",
    "docstring": "Compute the values of the specified names in the ONNX model. This method is used to compute the values of the specified names in the ONNX model. The values are returned as a dictionary mapping names to tensors. Args: value_names: The names of the values to compute. Returns: A dictionary mapping names to tensors.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_onnx_program.py",
    "ast_data": "FunctionDef name:compute_values arg:self arg:value_names arg:args arg:kwargs arguments arg arg arg arg If Compare Assign Call Assign Call For If Compare Raise Call Assign With Call Try Assign Call Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "set_color_codes",
    "source_code": "def set_color_codes(palette='deep'):\n    if palette == 'reset':\n        colors = [(0.0, 0.0, 1.0), (0.0, 0.5, 0.0), (1.0, 0.0, 0.0), (0.75, 0.0, 0.75), (0.75, 0.75, 0.0), (0.0, 0.75, 0.75), (0.0, 0.0, 0.0)]\n    elif not isinstance(palette, str):\n        err = 'set_color_codes requires a named seaborn palette'\n        raise TypeError(err)\n    elif palette in SEABORN_PALETTES:\n        if not palette.endswith('6'):\n            palette = palette + '6'\n        colors = SEABORN_PALETTES[palette] + [(0.1, 0.1, 0.1)]\n    else:\n        err = f\"Cannot set colors with palette '{palette}'\"\n        raise ValueError(err)\n    for code, color in zip('bgrmyck', colors):\n        rgb = mpl.colors.colorConverter.to_rgb(color)\n        mpl.colors.colorConverter.colors[code] = rgb",
    "docstring": "Change how matplotlib color shorthands are interpreted. Calling this will change how shorthand codes like \"b\" or \"g\" are interpreted by matplotlib in subsequent plots. Parameters ---------- palette : {deep, muted, pastel, dark, bright, colorblind} Named seaborn palette to use as the source of colors. See Also -------- set : Color codes can be set through the high-level seaborn style manager. set_palette : Color codes can also be set through the function that sets the matplotlib color cycle.",
    "type": "function",
    "file_path": "seaborn\\seaborn\\palettes.py",
    "ast_data": "FunctionDef name:set_color_codes arg:palette arguments arg If Compare Assign If Call Assign Raise Call If Compare If Call Assign Assign Assign Raise Call For Call Assign Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_check_num_rows_possibly_add_asserts",
    "source_code": "def _check_num_rows_possibly_add_asserts(self):\n    if self._assert_proper_shapes:\n        self._num_rows = control_flow_ops.with_dependencies([check_ops.assert_rank(self._num_rows, 0, message='Argument num_rows must be a 0-D Tensor.'), check_ops.assert_non_negative(self._num_rows, message='Argument num_rows must be non-negative.')], self._num_rows)\n    if not self._num_rows.dtype.is_integer:\n        raise TypeError('Argument num_rows must be integer type.  Found: %s' % self._num_rows)\n    num_rows_static = self._num_rows_static\n    if num_rows_static is None:\n        return\n    if num_rows_static.ndim != 0:\n        raise ValueError('Argument num_rows must be a 0-D Tensor.  Found: %s' % num_rows_static)\n    if num_rows_static < 0:\n        raise ValueError('Argument num_rows must be non-negative.  Found: %s' % num_rows_static)",
    "docstring": "Static check of init arg , possibly add asserts.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_identity.py",
    "ast_data": "FunctionDef name:_check_num_rows_possibly_add_asserts arg:self arguments arg If Assign Call Call Call If Raise Call Assign If Compare Return return:no If Compare Raise Call If Compare Raise Call"
  },
  {
    "library": "numpy",
    "name": "BlasSrcNotFoundError",
    "source_code": "class BlasSrcNotFoundError(BlasNotFoundError):\n    pass",
    "docstring": "Blas ( sources not found. Directories to search for the sources can be specified in the numpy/distutils/site.cfg file (section [blas_src]) or by setting the BLAS_SRC environment variable.",
    "type": "class",
    "file_path": "numpy\\numpy\\distutils\\system_info.py",
    "ast_data": "ClassDef name:BlasSrcNotFoundError"
  },
  {
    "library": "django",
    "name": "datetime_cast_time_sql",
    "source_code": "def datetime_cast_time_sql(self, sql, params, tzname):\n    raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_cast_time_sql() method')",
    "docstring": "Return the SQL to cast a datetime value to time value.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:datetime_cast_time_sql arg:self arg:sql arg:params arg:tzname arguments arg arg arg arg Raise Call"
  },
  {
    "library": "django",
    "name": "distinct_sql",
    "source_code": "def distinct_sql(self, fields, params):\n    if fields:\n        raise NotSupportedError('DISTINCT ON fields is not supported by this database backend')\n    else:\n        return (['DISTINCT'], [])",
    "docstring": "Return an SQL DISTINCT clause which removes duplicate rows from the result set. If any fields are given, only check the given fields for duplicates.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:distinct_sql arg:self arg:fields arg:params arguments arg arg arg If Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "update_add",
    "source_code": "@doc_controls.do_not_generate_docs\ndef update_add(x, increment):\n    return state_ops.assign_add(x, increment)",
    "docstring": "Update the value of by adding . Args: x: A Variable. increment: A tensor of same shape as . Returns: The variable updated.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:update_add arg:x arg:increment arguments arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "__getattr__",
    "source_code": "def __getattr__(self, item: str) -> object:\n    self._load()\n    return getattr(self.module, item)",
    "docstring": "Load the module (if not already loaded) and returns the requested attribute. This method is called when an attribute of the LazyLoader instance is accessed. It ensures that the module is loaded and then returns the requested attribute. Args: item: The name of the attribute to be accessed. Returns: The requested attribute of the loaded module.",
    "type": "method",
    "file_path": "kornia\\kornia\\core\\external.py",
    "ast_data": "FunctionDef name:__getattr__ arg:self arg:item arguments arg arg Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "process_update",
    "source_code": "def process_update(x):\n    if callable(x):\n        update = lambda: process_update(x())\n        return update()\n    elif isinstance(x, ops.Operation):\n        update = x\n    elif hasattr(x, 'op'):\n        update = x.op\n    else:\n        update = tensor_conversion.convert_to_tensor_v2_with_dispatch(x)\n    reachable = tf_utils.get_reachable_from_inputs(relevant_inputs, [update])\n    update._unconditional_update = update not in reachable\n    return update",
    "docstring": "Standardize update ops. Args: x: Tensor, op, or callable. Returns: An update op.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py",
    "ast_data": "FunctionDef name:process_update arg:x arguments arg If Call Assign arguments Call Call Return return:yes Call If Call Assign If Call Assign Assign Call Assign Call Assign Compare Return return:yes"
  },
  {
    "library": "django",
    "name": "comment",
    "source_code": "@register.tag\ndef comment(parser, token):\n    parser.skip_past('endcomment')\n    return CommentNode()",
    "docstring": "Ignore everything between ``.",
    "type": "function",
    "file_path": "django\\django\\template\\defaulttags.py",
    "ast_data": "FunctionDef name:comment arg:parser arg:token arguments arg arg Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "time_cdist",
    "source_code": "def time_cdist(self, num_points, metric):\n    distance.cdist(self.points, self.points, self.metric, **self.kwargs)",
    "docstring": "Time scipy.spatial.distance.cdist over a range of input data sizes and metrics.",
    "type": "method",
    "file_path": "scipy\\benchmarks\\benchmarks\\spatial.py",
    "ast_data": "FunctionDef name:time_cdist arg:self arg:num_points arg:metric arguments arg arg arg Call"
  },
  {
    "library": "django",
    "name": "execute_sql_flush",
    "source_code": "def execute_sql_flush(self, sql_list):\n    with transaction.atomic(using=self.connection.alias, savepoint=self.connection.features.can_rollback_ddl):\n        with self.connection.cursor() as cursor:\n            for sql in sql_list:\n                cursor.execute(sql)",
    "docstring": "Execute a list of SQL statements to flush the database.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:execute_sql_flush arg:self arg:sql_list arguments arg arg With Call With Call For Call"
  },
  {
    "library": "pytorch",
    "name": "bfloat16",
    "source_code": "def bfloat16(self) -> Self:\n    return self._apply(lambda t: t.bfloat16() if t.is_floating_point() else t)",
    "docstring": "Casts all floating point parameters and buffers to `` datatype. .. note:: This method modifies the module in-place. Returns: Module: self",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:bfloat16 arg:self arguments arg Return return:yes Call arguments arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "static_nvals",
    "source_code": "@property\ndef static_nvals(self):\n    if self._nvals is not None:\n        nvals = tensor_util.constant_value(self._nvals)\n        if nvals is not None:\n            return nvals\n    if self._value_rowids is not None:\n        nvals = tensor_shape.dimension_at_index(self._value_rowids.shape, 0)\n        if nvals.value is not None:\n            return nvals.value\n    return None",
    "docstring": "The number of values in this partition, if statically known. Returns: The number of values in this partition as an (if statically known); or (otherwise).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py",
    "ast_data": "FunctionDef name:static_nvals arg:self arguments arg If Compare Assign Call If Compare Return return:yes If Compare Assign Call If Compare Return return:yes Return return:no"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, ax, *, interpolation='nearest', **kwargs):\n    super().__init__(ax, **kwargs)\n    self.set_interpolation(interpolation)",
    "docstring": "Parameters ---------- ax : The Axes the image will belong to. interpolation : {'nearest', 'bilinear'}, default: 'nearest' The interpolation scheme used in the resampling. **kwargs All other keyword arguments are identical to those of .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\image.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:ax arguments arg arg arg arg Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "copper",
    "source_code": "def copper() -> None:\n    set_cmap('copper')",
    "docstring": "Set the colormap to 'copper'. This changes the default colormap as well as the colormap of the current image if there is one. See `` for more information.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:copper arguments Call"
  },
  {
    "library": "tensorflow",
    "name": "on_train_begin",
    "source_code": "@doc_controls.for_subclass_implementers\ndef on_train_begin(self, logs=None):\n    pass",
    "docstring": "Called at the beginning of training. Subclasses should override for any actions to run. Args: logs: Dict. Currently no data is passed to this argument for this method but that may change in the future.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:on_train_begin arg:self arg:logs arguments arg arg"
  },
  {
    "library": "pytorch",
    "name": "unfold3d",
    "source_code": "def unfold3d(tensor, kernel_size, padding, stride, dilation):\n    import numpy as np\n    if len(tensor.shape) != 5:\n        raise ValueError(f'Input tensor must be of the shape [B, C, D, H, W]. Got{tensor.shape}')\n    if dilation != (1, 1, 1):\n        raise NotImplementedError(f'dilation={dilation} not supported.')\n    batch_size, channels, _, _, _ = tensor.shape\n    tensor = F.pad(tensor, (padding[2], padding[2], padding[1], padding[1], padding[0], padding[0]))\n    tensor = tensor.unfold(dimension=2, size=kernel_size[0], step=stride[0])\n    tensor = tensor.unfold(dimension=3, size=kernel_size[1], step=stride[1])\n    tensor = tensor.unfold(dimension=4, size=kernel_size[2], step=stride[2])\n    tensor = tensor.permute(0, 2, 3, 4, 1, 5, 6, 7)\n    tensor = tensor.reshape(batch_size, -1, channels * np.prod(kernel_size)).transpose(1, 2)\n    return tensor",
    "docstring": "Extract sliding local blocks from an batched input tensor. :class: only supports 4D inputs (batched image-like tensors). This method implements the same action for 5D inputs Args: tensor: An input tensor of shape `torch.nn.Unfold` for more details Example: >>> # xdoctest: +SKIP >>> B, C, D, H, W = 3, 4, 5, 6, 7 >>> tensor = torch.arange(1, B * C * D * H * W + 1.).view(B, C, D, H, W) >>> unfold3d(tensor, kernel_size=2, padding=0, stride=1).shape torch.Size([3, 32, 120])",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\utils\\_expanded_weights\\conv_utils.py",
    "ast_data": "FunctionDef name:unfold3d arg:tensor arg:kernel_size arg:padding arg:stride arg:dilation arguments arg arg arg arg arg If Compare Call Raise Call If Compare Raise Call Assign Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "isinstance",
    "source_code": "def isinstance(obj, target_type):\n    return _isinstance(obj, target_type)",
    "docstring": "Provide container type refinement in TorchScript. It can refine parameterized containers of the List, Dict, Tuple, and Optional types. E.g. `` for type refinement): .. testcode:: import torch from typing import Any, Dict, List class MyModule(torch.nn.Module): def __init__(self) -> None: super().__init__() def forward(self, input: Any): # note the Any type if torch.jit.isinstance(input, List[torch.Tensor]): for t in input: y = t.clamp(0, 0.5) elif torch.jit.isinstance(input, Dict[str, str]): for val in input.values(): print(val) m = torch.jit.script(MyModule()) x = [torch.rand(3,3), torch.rand(4,3)] m(x) y = {\"key1\":\"val1\",\"key2\":\"val2\"} m(y)",
    "type": "function",
    "file_path": "pytorch\\torch\\jit\\__init__.py",
    "ast_data": "FunctionDef name:isinstance arg:obj arg:target_type arguments arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit_transform",
    "source_code": "def fit_transform(self, X, y=None):\n    return self.fit(X, y).transform(X, y)",
    "docstring": "Learn and apply the dimension reduction on the train data. Parameters ---------- X : array-like of shape (n_samples, n_features) Training vectors, where is the number of samples and is the number of predictors. y : array-like of shape (n_samples, n_targets), default=None Target vectors, where is the number of samples and is the number of response variables. Returns ------- self : ndarray of shape (n_samples, n_components) Return if is not given, otherwise.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cross_decomposition\\_pls.py",
    "ast_data": "FunctionDef name:fit_transform arg:self arg:X arg:y arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_gen_rows",
    "source_code": "def _gen_rows(self) -> Iterator[Sequence[str]]:\n    if self.with_counts:\n        return self._gen_rows_with_counts()\n    else:\n        return self._gen_rows_without_counts()",
    "docstring": "Generator function yielding rows content. Each element represents a row comprising a sequence of strings.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:_gen_rows arg:self arguments arg If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "finalize_state",
    "source_code": "def finalize_state(self):\n    pass",
    "docstring": "Finalize the statistics for the preprocessing layer. This method is called at the end of or after restoring a serialized preprocessing layer's state. This method handles any one-time operations that should occur on the layer's state before .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_preprocessing_layer.py",
    "ast_data": "FunctionDef name:finalize_state arg:self arguments arg"
  },
  {
    "library": "matplotlib",
    "name": "_get_backend_or_none",
    "source_code": "def _get_backend_or_none(self):\n    backend = self._get('backend')\n    return None if backend is rcsetup._auto_backend_sentinel else backend",
    "docstring": "Get the requested backend, if any, without triggering resolution.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\__init__.py",
    "ast_data": "FunctionDef name:_get_backend_or_none arg:self arguments arg Assign Call Return return:yes Compare"
  },
  {
    "library": "matplotlib",
    "name": "BboxTransformFrom",
    "source_code": "class BboxTransformFrom(Affine2DBase):\n    is_separable = True\n\n    def __init__(self, boxin, **kwargs):\n        _api.check_isinstance(BboxBase, boxin=boxin)\n        super().__init__(**kwargs)\n        self._boxin = boxin\n        self.set_children(boxin)\n        self._mtx = None\n        self._inverted = None\n    __str__ = _make_str_method('_boxin')\n\n    def get_matrix(self):\n        if self._invalid:\n            inl, inb, inw, inh = self._boxin.bounds\n            if DEBUG and (inw == 0 or inh == 0):\n                raise ValueError('Transforming from a singular bounding box.')\n            x_scale = 1.0 / inw\n            y_scale = 1.0 / inh\n            self._mtx = np.array([[x_scale, 0.0, -inl * x_scale], [0.0, y_scale, -inb * y_scale], [0.0, 0.0, 1.0]], float)\n            self._inverted = None\n            self._invalid = 0\n        return self._mtx",
    "docstring": "linearly transforms points from a given to the unit bounding box.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "ClassDef name:BboxTransformFrom Assign FunctionDef name:__init__ arg:self arg:boxin arguments arg arg arg Call Call Call Assign Call Assign Assign Assign Call FunctionDef name:get_matrix arg:self arguments arg If Assign If BoolOp BoolOp Compare Compare Raise Call Assign Assign Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_is_boolean",
    "source_code": "@property\ndef _is_boolean(self) -> bool:\n    return pa.types.is_boolean(self.pyarrow_dtype)",
    "docstring": "Whether this dtype should be considered boolean.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py",
    "ast_data": "FunctionDef name:_is_boolean arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "format_index",
    "source_code": "def format_index(self, formatter: ExtFormatter | None=None, axis: Axis=0, level: Level | list[Level] | None=None, na_rep: str | None=None, precision: int | None=None, decimal: str='.', thousands: str | None=None, escape: str | None=None, hyperlinks: str | None=None) -> StylerRenderer:\n    axis = self.data._get_axis_number(axis)\n    if axis == 0:\n        display_funcs_, obj = (self._display_funcs_index, self.index)\n    else:\n        display_funcs_, obj = (self._display_funcs_columns, self.columns)\n    levels_ = refactor_levels(level, obj)\n    if all((formatter is None, level is None, precision is None, decimal == '.', thousands is None, na_rep is None, escape is None, hyperlinks is None)):\n        display_funcs_.clear()\n        return self\n    if not isinstance(formatter, dict):\n        formatter = dict.fromkeys(levels_, formatter)\n    else:\n        formatter = {obj._get_level_number(level): formatter_ for level, formatter_ in formatter.items()}\n    for lvl in levels_:\n        format_func = _maybe_wrap_formatter(formatter.get(lvl), na_rep=na_rep, precision=precision, decimal=decimal, thousands=thousands, escape=escape, hyperlinks=hyperlinks)\n        for idx in [(i, lvl) if axis == 0 else (lvl, i) for i in range(len(obj))]:\n            display_funcs_[idx] = format_func\n    return self",
    "docstring": "Format the text display value of index labels or column headers. .. versionadded:: 1.4.0 Parameters ---------- formatter : str, callable, dict or None Object to define how values are displayed. See notes. axis : {0, \"index\", 1, \"columns\"} Whether to apply the formatter to the index or column headers. level : int, str, list The level(s) over which to apply the generic formatter. na_rep : str, optional Representation for missing values. If ```ValueErrorStyler.format_indexStyler.to_excelnumber-formatStyler.format`. >>> df = pd.DataFrame([[1, 2, 3]], columns=[\"123\", \"~\", \"$%#\"]) >>> df.style.format_index(\"\\\\textbf{{{}}}\", escape=\"latex\", axis=1).to_latex() ... # doctest: +SKIP \\begin{tabular}{lrrr} {} & {\\textbf{123}} & {\\textbf{\\textasciitilde }} & {\\textbf{\\$\\%\\#}} \\\\ 0 & 1 & 2 & 3 \\\\ \\end{tabular}",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\style_render.py",
    "ast_data": "FunctionDef name:format_index arg:self arg:formatter arg:axis arg:level arg:na_rep arg:precision arg:decimal arg:thousands arg:escape arg:hyperlinks arguments arg arg arg arg arg arg arg arg arg arg Assign Call If Compare Assign Assign Assign Call If Call Compare Compare Compare Compare Compare Compare Compare Compare Call Return return:yes If Call Assign Call Assign Call Call For Assign Call Call For Compare Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "Counter",
    "source_code": "class Counter(Metric):\n    __slots__ = []\n\n    def __init__(self, name, description, *labels):\n        super(Counter, self).__init__('Counter', _counter_methods, len(labels), name, description, *labels)\n\n    def get_cell(self, *labels):\n        return CounterCell(super(Counter, self).get_cell(*labels))",
    "docstring": "A stateful class for updating a cumulative integer metric. This class encapsulates a set of values (or a single value for a label-less metric). Each value is identified by a tuple of labels. The class allows the user to increment each value.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py",
    "ast_data": "ClassDef name:Counter Assign FunctionDef name:__init__ arg:self arg:name arg:description arguments arg arg arg arg Call Call Call FunctionDef name:get_cell arg:self arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "PlacementSpec",
    "source_code": "class PlacementSpec(ABC):\n    pass",
    "docstring": "Base class representing the placement of an entity. Subclasses of this class can be used to specify customized placements which might not be covered by existing APIs.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharding_spec\\api.py",
    "ast_data": "ClassDef name:PlacementSpec"
  },
  {
    "library": "tensorflow",
    "name": "initializer",
    "source_code": "@property\ndef initializer(self):\n    return self.initialize()",
    "docstring": "Returns a list of ops that initialize the iterator.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:initializer arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "on_text_change",
    "source_code": "def on_text_change(self, func):\n    return self._observers.connect('change', lambda text: func(text))",
    "docstring": "When the text changes, call this *func* with event. A connection id is returned which can be used to disconnect.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:on_text_change arg:self arg:func arguments arg arg Return return:yes Call arguments arg Call"
  },
  {
    "library": "scrapy",
    "name": "BaseSchedulerMeta",
    "source_code": "class BaseSchedulerMeta(type):\n\n    def __instancecheck__(cls, instance: Any) -> bool:\n        return cls.__subclasscheck__(type(instance))\n\n    def __subclasscheck__(cls, subclass: type) -> bool:\n        return hasattr(subclass, 'has_pending_requests') and callable(subclass.has_pending_requests) and hasattr(subclass, 'enqueue_request') and callable(subclass.enqueue_request) and hasattr(subclass, 'next_request') and callable(subclass.next_request)",
    "docstring": "Metaclass to check scheduler classes against the necessary interface",
    "type": "class",
    "file_path": "scrapy\\scrapy\\core\\scheduler.py",
    "ast_data": "ClassDef name:BaseSchedulerMeta FunctionDef name:__instancecheck__ arg:cls arg:instance arguments arg arg Return return:yes Call Call FunctionDef name:__subclasscheck__ arg:cls arg:subclass arguments arg arg Return return:yes BoolOp Call Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "disallow_in_graph",
    "source_code": "def disallow_in_graph(fn):\n    return _disallow_in_graph_helper(throw_if_not_allowed=True)(fn)",
    "docstring": "Customize which functions TorchDynamo will exclude in the generated graph and force a graph break on. :: torch._dynamo.disallow_in_graph(torch.sub) @torch._dynamo.optimize(...) def fn(a): x = torch.add(x, 1) x = torch.sub(x, 1) x = torch.add(x, 1) return x fn(...) Will break the graph on , and give two graphs each with a single op.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\decorators.py",
    "ast_data": "FunctionDef name:disallow_in_graph arg:fn arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_element_to_string",
    "source_code": "def _element_to_string(x):\n    if x is Ellipsis:\n        return '...'\n    if isinstance(x, str):\n        return \"'\" + x + \"'\"\n    return str(x)",
    "docstring": "element to a string within a list.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:_element_to_string arg:x arguments arg If Compare Return return:yes If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "start",
    "source_code": "def start():\n    the_coverage.start()",
    "docstring": "Start collecting coverage.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\covercp.py",
    "ast_data": "FunctionDef name:start arguments Call"
  },
  {
    "library": "pytorch",
    "name": "boxed_run",
    "source_code": "@compatibility(is_backward_compatible=True)\ndef boxed_run(self, args_list):\n    args_iter = iter(args_list)\n    env = {}\n    for n in self.graph.nodes:\n        if n.op == 'placeholder':\n            env[n] = next(args_iter)\n    args_list.clear()\n    return self.run(initial_env=env)",
    "docstring": "Run via interpretation and return the result. This uses the \"boxed\" calling convention, where you pass a list of arguments, which will be cleared by the interpreter. This ensures that input tensors are promptly deallocated.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\interpreter.py",
    "ast_data": "FunctionDef name:boxed_run arg:self arg:args_list arguments arg arg Assign Call Assign For If Compare Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "exp",
    "source_code": "@staticmethod\ndef exp(theta: Tensor) -> So2:\n    check_so2_theta_shape(theta)\n    return So2(complex(theta.cos(), theta.sin()))",
    "docstring": "Convert elements of lie algebra to elements of lie group. Args: theta: angle in radians of shape :math: or :math:. Example: >>> v = torch.tensor([3.1415/2]) >>> s = So2.exp(v) >>> s Parameter containing: tensor([4.6329e-05+1.j], requires_grad=True)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\so2.py",
    "ast_data": "FunctionDef name:exp arg:theta arguments arg Call Return return:yes Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_construct_default_doc",
    "source_code": "def _construct_default_doc(self, longname=None, docdict=None, discrete='continuous'):\n    if longname is None:\n        longname = 'A'\n    self.__doc__ = ''.join([f'{longname} {discrete} random variable.', '\\n\\n%(before_notes)s\\n', docheaders['notes'], '\\n%(example)s'])\n    self._construct_doc(docdict)",
    "docstring": "Construct instance docstring from the default template.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:_construct_default_doc arg:self arg:longname arg:docdict arg:discrete arguments arg arg arg arg If Compare Assign Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, action):\n    _check_type(action, str)\n    self.action = action",
    "docstring": "Constructor. Args: action: () Debugger action to take on session init.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\framework.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:action arguments arg arg Call Assign"
  },
  {
    "library": "scipy",
    "name": "_nbytes_full",
    "source_code": "def _nbytes_full(fmt, nlines):\n    return (fmt.repeat * fmt.width + 1) * (nlines - 1)",
    "docstring": "Return the number of bytes to read to get every full lines for the given parsed fortran format.",
    "type": "function",
    "file_path": "scipy\\scipy\\io\\_harwell_boeing\\hb.py",
    "ast_data": "FunctionDef name:_nbytes_full arg:fmt arg:nlines arguments arg arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "dtype",
    "source_code": "@property\ndef dtype(self) -> ArrowDtype:\n    return self._dtype",
    "docstring": "An instance of 'ExtensionDtype'.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\arrow\\array.py",
    "ast_data": "FunctionDef name:dtype arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "JennrichSampson",
    "source_code": "class JennrichSampson(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-1.0] * self.N, [1.0] * self.N))\n        self.global_optimum = [[0.257825, 0.257825]]\n        self.custom_bounds = [(-1, 0.34), (-1, 0.34)]\n        self.fglob = 124.3621824\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        i = arange(1, 11)\n        return sum((2 + 2 * i - (exp(i * x[0]) + exp(i * x[1]))) ** 2)",
    "docstring": "Jennrich-Sampson objective function. This class defines the Jennrich-Sampson [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{JennrichSampson}}(x) = \\sum_{i=1}^{10} \\left [2 + 2i - (e^{ix_1} + e^{ix_2}) \\right ]^2 with :math: for :math:. *Global optimum*: :math: for :math:. .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_J.py",
    "ast_data": "ClassDef name:JennrichSampson FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "fully_containsx",
    "source_code": "def fully_containsx(self, x):\n    x0, x1 = self.intervalx\n    return x0 < x < x1 or x0 > x > x1",
    "docstring": "Return whether *x* is in the open (:attr:, :attr:) interval.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:fully_containsx arg:self arg:x arguments arg arg Assign Return return:yes BoolOp Compare Compare"
  },
  {
    "library": "pytorch",
    "name": "store_reduction",
    "source_code": "def store_reduction(self, name: str, index: sympy.Expr, value: T) -> None:\n    raise NotImplementedError",
    "docstring": "Store the fully accumulated result of 'reduction' to the memory location 'name' offset by 'expr'.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ops_handler.py",
    "ast_data": "FunctionDef name:store_reduction arg:self arg:name arg:index arg:value arguments arg arg arg arg Raise"
  },
  {
    "library": "pytorch",
    "name": "to_bool",
    "source_code": "@compatibility(is_backward_compatible=True)\ndef to_bool(self, obj: 'Proxy') -> bool:\n    raise TraceError('symbolically traced variables cannot be used as inputs to control flow')",
    "docstring": "Called when a proxy object is being converted to a boolean, such as when used in control flow. Normally we don't know what to do because we don't know the value of the proxy, but a custom tracer can attach more information to the graph node using create_node and can choose to return a value.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\proxy.py",
    "ast_data": "FunctionDef name:to_bool arg:self arg:obj arguments arg arg Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "update",
    "source_code": "def update(self):\n    self._load_source_files()\n    self._load_stack_frames()\n    self._load_graphs()\n    self._load_graph_execution_traces()\n    self._load_execution()",
    "docstring": "Perform incremental read of the file set.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py",
    "ast_data": "FunctionDef name:update arg:self arguments arg Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_build_param_buckets",
    "source_code": "def _build_param_buckets(self) -> None:\n    if not self.parameters_as_bucket_view or self._overlap_with_ddp:\n        return\n    num_devices = len(self._device_to_params_per_rank)\n    self._buckets = [[] for _ in range(num_devices)]\n    for dev_i, (device, params_per_rank) in enumerate(self._device_to_params_per_rank.items()):\n        for params in params_per_rank:\n            bucket_size = 0\n            dtype = None\n            trainable_params = []\n            for param in params:\n                if not _is_trainable(param):\n                    param.data = param.data.detach().clone()\n                else:\n                    bucket_size += param.numel()\n                    trainable_params.append(param)\n                dtype = param.dtype\n            if bucket_size == 0:\n                bucket = torch.zeros(1, device=device)\n            else:\n                bucket = torch.empty(bucket_size, dtype=dtype, device=device)\n                offset = 0\n                for param in trainable_params:\n                    offset_next = offset + param.numel()\n                    bucket[offset:offset_next].copy_(param.data.flatten())\n                    param.data = bucket[offset:offset_next].view_as(param.data)\n                    offset = offset_next\n            self._buckets[dev_i].append(bucket)",
    "docstring": "Build parameter buckets if ``. For each device that stores this rank's parameters, there is a bucket (represented as a tensor) containing all of the parameters on that device that are assigned to a given rank in the parameter update partition. This method is called in the constructor and any time parameter trainability is changed. .. warning:: The current implementation assumes that all of the parameters in a bucket are of the same dense type when allocating the bucket's tensor. .. warning:: If the model parameters are stored across more than one device, then the storage partitioning must be the same across all processes in order for parameter synchronization to work.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\optim\\zero_redundancy_optimizer.py",
    "ast_data": "FunctionDef name:_build_param_buckets arg:self arguments arg If BoolOp Return return:no Assign Call Assign Call For Call Call For Assign Assign Assign For If Call Assign Call Call Call Call Assign If Compare Assign Call Assign Call Assign For Assign Call Call Call Assign Call Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "set_agg_filter",
    "source_code": "def set_agg_filter(self, filter_func):\n    self._agg_filter = filter_func\n    self.stale = True",
    "docstring": "Set the agg filter. Parameters ---------- filter_func : callable A filter function, which takes a (m, n, depth) float array and a dpi value, and returns a (m, n, depth) array and two offsets from the bottom left corner of the image .. ACCEPTS: a filter function, which takes a (m, n, 3) float array and a dpi value, and returns a (m, n, 3) array and two offsets from the bottom left corner of the image",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:set_agg_filter arg:self arg:filter_func arguments arg arg Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "_start_record_function",
    "source_code": "def _start_record_function(exec_type, func_name, current_worker_name, dest_worker_name):\n    assert torch.autograd._profiler_enabled(), 'Autograd profiler should be enabled.'\n    profile_key = f'rpc_{exec_type.value}#{str(func_name)}({current_worker_name} -> {dest_worker_name})'\n    rf = torch.autograd._RecordFunction()\n    torch.autograd._run_before_callbacks(rf, profile_key)\n    return rf",
    "docstring": "This function should be called from RPC/RRef functions to create a RecordFunction object for profiling. This function also runs the before callbacks that start the profiling, though the user is responsible for running the appropriate callbacks when the function to be profiled finishes. Args: exec_type (RPCExecMode): Type of RPC/RRef call func_name (str): Name of function being profiled. current_worker_name (str): Name of current worker. dest_worker_name (str): Name of the destination worker. Returns: An instance of .",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\rpc\\internal.py",
    "ast_data": "FunctionDef name:_start_record_function arg:exec_type arg:func_name arg:current_worker_name arg:dest_worker_name arguments arg arg arg arg Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "argmax",
    "source_code": "def argmax(self, axis=None, fill_value=None, out=None, *, keepdims=np._NoValue):\n    if fill_value is None:\n        fill_value = maximum_fill_value(self._data)\n    d = self.filled(fill_value).view(ndarray)\n    keepdims = False if keepdims is np._NoValue else bool(keepdims)\n    return d.argmax(axis, out=out, keepdims=keepdims)",
    "docstring": "Returns array of indices of the maximum values along the given axis. Masked values are treated as if they had the value fill_value. Parameters ---------- axis : {None, integer} If None, the index is into the flattened array, otherwise along the specified axis fill_value : scalar or None, optional Value used to fill in the masked values. If None, the output of maximum_fill_value(self._data) is used instead. out : {None, array}, optional Array into which the result can be placed. Its type is preserved and it must be of the right shape to hold the output. Returns ------- index_array : {integer_array} Examples -------- >>> import numpy as np >>> a = np.arange(6).reshape(2,3) >>> a.argmax() 5 >>> a.argmax(0) array([1, 1, 1]) >>> a.argmax(1) array([2, 2])",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:argmax arg:self arg:axis arg:fill_value arg:out arguments arg arg arg arg arg If Compare Assign Call Assign Call Call Assign Compare Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "is_registered_op",
    "source_code": "def is_registered_op(self, namespace: str, op_name: str, overload: str | None=None) -> bool:\n    functions = self.get_op_functions(namespace=namespace, op_name=op_name, overload=overload)\n    return functions is not None",
    "docstring": "Returns whether the given op is registered: torch.ops.... Args: namespace: The namespace of the operator to check. op_name: The name of the operator to check. overload: The overload of the operator to check. If it's default overload, leave it to None. Returns: True if the given op is registered, otherwise False.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\_exporter_legacy.py",
    "ast_data": "FunctionDef name:is_registered_op arg:self arg:namespace arg:op_name arg:overload arguments arg arg arg arg Assign Call Return return:yes Compare"
  },
  {
    "library": "pytorch",
    "name": "_get_standalone_module_configs",
    "source_code": "def _get_standalone_module_configs(node: Node, named_modules: dict[str, torch.nn.Module], prepare_custom_config: PrepareCustomConfig, parent_qconfig: QConfigAny, parent_backend_config: Optional[BackendConfig]) -> tuple[QConfigMapping, tuple[Any, ...], PrepareCustomConfig, Optional[BackendConfig]]:\n    module_name = str(node.target)\n    module_type = type(named_modules[module_name])\n    config_entry = StandaloneModuleConfigEntry(None, (), None, None)\n    config_entry = prepare_custom_config.standalone_module_classes.get(module_type, config_entry)\n    config_entry = prepare_custom_config.standalone_module_names.get(module_name, config_entry)\n    qconfig_mapping = config_entry.qconfig_mapping or QConfigMapping().set_global(parent_qconfig)\n    example_inputs = config_entry.example_inputs\n    prepare_custom_config = config_entry.prepare_custom_config or PrepareCustomConfig()\n    backend_config = config_entry.backend_config or parent_backend_config\n    return (qconfig_mapping, example_inputs, prepare_custom_config, backend_config)",
    "docstring": "Returns the standalone module QConfigMapping and PrepareCustomConfig for , assuming that the module pointed to by is a standalone modules.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\prepare.py",
    "ast_data": "FunctionDef name:_get_standalone_module_configs arg:node arg:named_modules arg:prepare_custom_config arg:parent_qconfig arg:parent_backend_config arguments arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign BoolOp Call Call Assign Assign BoolOp Call Assign BoolOp Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "record_start",
    "source_code": "def record_start(self):\n    request = cherrypy.serving.request\n    if not hasattr(request.rfile, 'bytes_read'):\n        request.rfile = ByteCountWrapper(request.rfile)\n        request.body.fp = request.rfile\n    r = request.remote\n    appstats['Current Requests'] += 1\n    appstats['Total Requests'] += 1\n    appstats['Requests'][_get_threading_ident()] = {'Bytes Read': None, 'Bytes Written': None, 'Client': lambda s: '%s:%s' % (r.ip, r.port), 'End Time': None, 'Processing Time': proc_time, 'Request-Line': request.request_line, 'Response Status': None, 'Start Time': time.time()}",
    "docstring": "Record the beginning of a request.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\cpstats.py",
    "ast_data": "FunctionDef name:record_start arg:self arguments arg Assign If Call Assign Call Assign Assign Assign Call arguments arg Call"
  },
  {
    "library": "scikit-learn",
    "name": "is_outlier_detector",
    "source_code": "def is_outlier_detector(estimator):\n    if isinstance(estimator, type):\n        warnings.warn(f'passing a class to {print(inspect.stack()[0][3])} is deprecated and will be removed in 1.8. Use an instance of the class instead.', FutureWarning)\n        return getattr(estimator, '_estimator_type', None) == 'outlier_detector'\n    return get_tags(estimator).estimator_type == 'outlier_detector'",
    "docstring": "Return True if the given estimator is (probably) an outlier detector. Parameters ---------- estimator : estimator instance Estimator object to test. Returns ------- out : bool True if estimator is an outlier detector and False otherwise.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\base.py",
    "ast_data": "FunctionDef name:is_outlier_detector arg:estimator arguments arg If Call Call Call Call Return return:yes Compare Call Return return:yes Compare Call"
  },
  {
    "library": "scipy",
    "name": "time_build",
    "source_code": "def time_build(self, mnr, cls_name):\n    m, n, r = mnr\n    if cls_name == 'cKDTree_flat':\n        self.T = self.cls(self.data, leafsize=n)\n    else:\n        self.cls(self.data)",
    "docstring": "Constructing kd-tree ======================= dim | # points | time",
    "type": "method",
    "file_path": "scipy\\benchmarks\\benchmarks\\spatial.py",
    "ast_data": "FunctionDef name:time_build arg:self arg:mnr arg:cls_name arguments arg arg arg Assign If Compare Assign Call Call"
  },
  {
    "library": "matplotlib",
    "name": "BivarColormapFromImage",
    "source_code": "class BivarColormapFromImage(BivarColormap):\n\n    def __init__(self, lut, shape='square', origin=(0, 0), name='from image'):\n        lut = np.array(lut, copy=True)\n        if lut.ndim != 3 or lut.shape[2] not in (3, 4):\n            raise ValueError('The lut must be an array of shape (n, m, 3) or (n, m, 4)', ' or a PIL.image encoded as RGB or RGBA')\n        if lut.dtype == np.uint8:\n            lut = lut.astype(np.float32) / 255\n        if lut.shape[2] == 3:\n            new_lut = np.empty((lut.shape[0], lut.shape[1], 4), dtype=lut.dtype)\n            new_lut[:, :, :3] = lut\n            new_lut[:, :, 3] = 1.0\n            lut = new_lut\n        self._lut = lut\n        super().__init__(lut.shape[0], lut.shape[1], shape, origin, name=name)\n\n    def _init(self):\n        self._isinit = True",
    "docstring": "BivarColormap object generated by supersampling a regular grid. Parameters ---------- lut : nparray of shape (N, M, 3) or (N, M, 4) The look-up-table shape: {'square', 'circle', 'ignore', 'circleignore'} - If 'square' each variate is clipped to [0,1] independently - If 'circle' the variates are clipped radially to the center of the colormap, and a circular mask is applied when the colormap is displayed - If 'ignore' the variates are not clipped, but instead assigned the 'outside' color - If 'circleignore' a circular mask is applied, but the data is not clipped origin: (float, float) The relative origin of the colormap. Typically (0, 0), for colormaps that are linear on both axis, and (.5, .5) for circular colormaps. Used when getting 1D colormaps from 2D colormaps. name : str, optional The name of the colormap.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "ClassDef name:BivarColormapFromImage FunctionDef name:__init__ arg:self arg:lut arg:shape arg:origin arg:name arguments arg arg arg arg arg Assign Call If BoolOp Compare Compare Raise Call If Compare Assign Call If Compare Assign Call Assign Assign Assign Assign Call Call FunctionDef name:_init arg:self arguments arg Assign"
  },
  {
    "library": "pandas",
    "name": "after_nearest_workday",
    "source_code": "def after_nearest_workday(dt: datetime) -> datetime:\n    return next_workday(nearest_workday(dt))",
    "docstring": "returns next workday after nearest workday needed for Boxing day or multiple holidays in a series",
    "type": "function",
    "file_path": "pandas\\pandas\\tseries\\holiday.py",
    "ast_data": "FunctionDef name:after_nearest_workday arg:dt arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "from_config",
    "source_code": "@classmethod\ndef from_config(cls, config, custom_objects=None, columns_by_name=None):\n    return cls._from_config(config, custom_objects, columns_by_name)",
    "docstring": "Creates a FeatureColumn from its config. This method should be the reverse of , capable of instantiating the same FeatureColumn from the config dictionary. See for an example of common (de)serialization practices followed in this file. TODO(b/118939620): This is a private method until consensus is reached on supporting object deserialization deduping within Keras. Args: config: A Dict config acquired with . custom_objects: Optional dictionary mapping names (strings) to custom classes or functions to be considered during deserialization. columns_by_name: A Dict[String, FeatureColumn] of existing columns in order to avoid duplication. Should be passed to any calls to deserialize_feature_column(). Returns: A FeatureColumn for the input config.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2_types.py",
    "ast_data": "FunctionDef name:from_config arg:cls arg:config arg:custom_objects arg:columns_by_name arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_add_np_doc",
    "source_code": "def _add_np_doc(doc, np_fun_name, np_f, link):\n    flag = get_np_doc_form()\n    if flag == 'inlined':\n        if _has_docstring(np_f):\n            doc += 'Documentation for `numpy.%s`:\\n\\n' % np_fun_name\n            doc += np_f.__doc__.replace('>>>', '>')\n    elif isinstance(flag, str):\n        if link is None:\n            url = generate_link(flag, np_fun_name)\n        elif isinstance(link, AliasOf):\n            url = generate_link(flag, link.value)\n        elif isinstance(link, Link):\n            url = link.value\n        else:\n            url = None\n        if url is not None:\n            if is_check_link():\n                import requests\n                r = requests.head(url)\n                if r.status_code != 200:\n                    raise ValueError(f'Check link failed at [{url}] with status code {r.status_code}. Argument `np_fun_name` is {np_fun_name}.')\n            doc += 'See the NumPy documentation for [`numpy.%s`](%s).' % (np_fun_name, url)\n    return doc",
    "docstring": "Appends the numpy docstring to , according to . See for how it controls the form of the numpy docstring. Args: doc: the docstring to be appended to. np_fun_name: the name of the numpy function. np_f: (optional) the numpy function. link: (optional) which link to use. See for details. Returns: with numpy docstring appended.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_utils.py",
    "ast_data": "FunctionDef name:_add_np_doc arg:doc arg:np_fun_name arg:np_f arg:link arguments arg arg arg arg Assign Call If Compare If Call Call If Call If Compare Assign Call If Call Assign Call If Call Assign Assign If Compare If Call Assign Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "math_reference",
    "source_code": "class math_reference(nodes.Inline, nodes.Referential, nodes.TextElement):\n    pass",
    "docstring": "A node for a reference for equation.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\builders\\latex\\nodes.py",
    "ast_data": "ClassDef name:math_reference"
  },
  {
    "library": "kornia",
    "name": "get_attn_bias_and_cat",
    "source_code": "def get_attn_bias_and_cat(x_list, branges=None):\n    batch_sizes = [b.shape[0] for b in branges] if branges is not None else [x.shape[0] for x in x_list]\n    all_shapes = tuple(((b, x.shape[1]) for b, x in zip(batch_sizes, x_list)))\n    if all_shapes not in attn_bias_cache.keys():\n        seqlens = []\n        for b, x in zip(batch_sizes, x_list):\n            for _ in range(b):\n                seqlens.append(x.shape[1])\n        attn_bias = fmha.BlockDiagonalMask.from_seqlens(seqlens)\n        attn_bias._batch_sizes = batch_sizes\n        attn_bias_cache[all_shapes] = attn_bias\n    if branges is not None:\n        cat_tensors = index_select_cat([x.flatten(1) for x in x_list], branges).view(1, -1, x_list[0].shape[-1])\n    else:\n        tensors_bs1 = tuple((x.reshape([1, -1, *x.shape[2:]]) for x in x_list))\n        cat_tensors = torch.cat(tensors_bs1, dim=1)\n    return (attn_bias_cache[all_shapes], cat_tensors)",
    "docstring": "Perform the index select, cat the tensors, and provide the attn_bias from cache.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\dedode\\transformer\\layers\\block.py",
    "ast_data": "FunctionDef name:get_attn_bias_and_cat arg:x_list arg:branges arguments arg arg Assign Compare Assign Call Call If Compare Call Assign For Call For Call Call Assign Call Assign Assign If Compare Assign Call Call Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "_do_insert",
    "source_code": "def _do_insert(self, manager, using, fields, returning_fields, raw):\n    return manager._insert([self], fields=fields, returning_fields=returning_fields, using=using, raw=raw)",
    "docstring": "Do an INSERT. If returning_fields is defined then this method should return the newly created data for the model.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\base.py",
    "ast_data": "FunctionDef name:_do_insert arg:self arg:manager arg:using arg:fields arg:returning_fields arg:raw arguments arg arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "factorial2",
    "source_code": "def factorial2(n, exact=False, extend='zero'):\n    return _factorialx_wrapper('factorial2', n, k=2, exact=exact, extend=extend)",
    "docstring": "Double factorial. This is the factorial with every second value skipped. E.g., ``n >> from scipy.special import factorial2 >>> factorial2(7, exact=False) np.float64(105.00000000000001) >>> factorial2(7, exact=True) 105 References ---------- .. [1] Complex extension to double factorial",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_basic.py",
    "ast_data": "FunctionDef name:factorial2 arg:n arg:exact arg:extend arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "sqrt",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef sqrt(x):\n    zero = _constant_to_tensor(0.0, x.dtype.base_dtype)\n    x = math_ops.maximum(x, zero)\n    return math_ops.sqrt(x)",
    "docstring": "Element-wise square root. This function clips negative tensor values to 0 before computing the square root. Args: x: Tensor or variable. Returns: A tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:sqrt arg:x arguments arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "copy_properties",
    "source_code": "def copy_properties(self, other):\n    super().copy_properties(other)\n    fillcolor = getattr(other, '_fillcolor', self._fillcolor)\n    effective_alphas = getattr(other, '_effective_alphas', self._effective_alphas)\n    self._fillcolor = fillcolor\n    self._effective_alphas = effective_alphas",
    "docstring": "Copy properties of other into self.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py",
    "ast_data": "FunctionDef name:copy_properties arg:self arg:other arguments arg arg Call Call Assign Call Assign Call Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "get_overlapping_candidate",
    "source_code": "def get_overlapping_candidate():\n    candidates = [x for x in ready if not contains_collective(x.snode) and (not contains_wait(x.snode))]\n    if len(candidates) == 0:\n        return None\n    return min(candidates, key=lambda x: x.score)",
    "docstring": "Return the next node in the ready queue that's neither a collective or a wait.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\comms.py",
    "ast_data": "FunctionDef name:get_overlapping_candidate arguments Assign BoolOp Call Call If Compare Call Return return:no Return return:yes Call arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "from_shape",
    "source_code": "@classmethod\ndef from_shape(cls, ragged_shape: dynamic_ragged_shape.DynamicRaggedShape) -> 'StructuredTensor':\n    return StructuredTensor(fields={}, ragged_shape=ragged_shape)",
    "docstring": "Creates a with no fields and ragged_shape. Args: ragged_shape: the shape of the structured tensor. Returns: a StructuredTensor with no fields and ragged_shape.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor.py",
    "ast_data": "FunctionDef name:from_shape arg:cls arg:ragged_shape arguments arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_round_to_power_of_two",
    "source_code": "def _round_to_power_of_two(x):\n    return 2 ** np.around(np.log2(x))",
    "docstring": "Round elements of the array to the nearest power of two.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_linprog_util.py",
    "ast_data": "FunctionDef name:_round_to_power_of_two arg:x arguments arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "as_ordered",
    "source_code": "def as_ordered(self) -> Self:\n    return self.set_ordered(True)",
    "docstring": "Set the Categorical to be ordered. Returns ------- Categorical Ordered Categorical. See Also -------- as_unordered : Set the Categorical to be unordered. Examples -------- For :class:: >>> ser = pd.Series([\"a\", \"b\", \"c\", \"a\"], dtype=\"category\") >>> ser.cat.ordered False >>> ser = ser.cat.as_ordered() >>> ser.cat.ordered True For :class:: >>> ci = pd.CategoricalIndex([\"a\", \"b\", \"c\", \"a\"]) >>> ci.ordered False >>> ci = ci.as_ordered() >>> ci.ordered True",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\categorical.py",
    "ast_data": "FunctionDef name:as_ordered arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "Status",
    "source_code": "class Status(Enum):\n    SKIPPED = 'skipped'\n    PASSED = 'passed'\n    FAILED_COMPILE = 'failed_compile'\n    FAILED_RUN_COMPILE_EXCEPTION = 'failed_run_compile_exception'\n    FAILED_RUN_EAGER_EXCEPTION = 'failed_run_eager_exception'\n    FAILED_RUN_RETURN = 'failed_run_return'\n\n    def failing(self) -> bool:\n        return self == Status.FAILED_COMPILE or self == Status.FAILED_RUN_EAGER_EXCEPTION or self == Status.FAILED_RUN_COMPILE_EXCEPTION or (self == Status.FAILED_RUN_RETURN)",
    "docstring": "The Status return value enum for Config Fuzzer",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\fuzzer.py",
    "ast_data": "ClassDef name:Status Assign Assign Assign Assign Assign Assign FunctionDef name:failing arg:self arguments arg Return return:yes BoolOp Compare Compare Compare Compare"
  },
  {
    "library": "seaborn",
    "name": "plot",
    "source_code": "def plot(self, ax, scatter_kws, line_kws):\n    if self.scatter:\n        scatter_kws['label'] = self.label\n    else:\n        line_kws['label'] = self.label\n    if self.color is None:\n        lines, = ax.plot([], [])\n        color = lines.get_color()\n        lines.remove()\n    else:\n        color = self.color\n    color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n    scatter_kws.setdefault('color', color)\n    line_kws.setdefault('color', color)\n    if self.scatter:\n        self.scatterplot(ax, scatter_kws)\n    if self.fit_reg:\n        self.lineplot(ax, line_kws)\n    if hasattr(self.x, 'name'):\n        ax.set_xlabel(self.x.name)\n    if hasattr(self.y, 'name'):\n        ax.set_ylabel(self.y.name)",
    "docstring": "Draw the full plot.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\regression.py",
    "ast_data": "FunctionDef name:plot arg:self arg:ax arg:scatter_kws arg:line_kws arguments arg arg arg arg If Assign Assign If Compare Assign Call Assign Call Call Assign Assign Call Call Call Call If Call If Call If Call Call If Call Call"
  },
  {
    "library": "sphinx",
    "name": "convert_locale_to_language_tag",
    "source_code": "def convert_locale_to_language_tag(locale: str | None) -> str | None:\n    if locale:\n        return locale.replace('_', '-')\n    else:\n        return None",
    "docstring": "Convert a locale string to a language tag (ex. en_US -> en-US). refs: BCP 47 (:rfc:)",
    "type": "function",
    "file_path": "sphinx\\sphinx\\builders\\html\\__init__.py",
    "ast_data": "FunctionDef name:convert_locale_to_language_tag arg:locale arguments arg If Return return:yes Call Return return:no"
  },
  {
    "library": "django",
    "name": "SessionStorage",
    "source_code": "class SessionStorage(BaseStorage):\n    session_key = '_messages'\n\n    def __init__(self, request, *args, **kwargs):\n        if not hasattr(request, 'session'):\n            raise ImproperlyConfigured('The session-based temporary message storage requires session middleware to be installed, and come before the message middleware in the MIDDLEWARE list.')\n        super().__init__(request, *args, **kwargs)\n\n    def _get(self, *args, **kwargs):\n        return (self.deserialize_messages(self.request.session.get(self.session_key)), True)\n\n    def _store(self, messages, response, *args, **kwargs):\n        if messages:\n            self.request.session[self.session_key] = self.serialize_messages(messages)\n        else:\n            self.request.session.pop(self.session_key, None)\n        return []\n\n    def serialize_messages(self, messages):\n        encoder = MessageEncoder()\n        return encoder.encode(messages)\n\n    def deserialize_messages(self, data):\n        if data and isinstance(data, str):\n            return json.loads(data, cls=MessageDecoder)\n        return data",
    "docstring": "Store messages in the session (that is, django.contrib.sessions).",
    "type": "class",
    "file_path": "django\\django\\contrib\\messages\\storage\\session.py",
    "ast_data": "ClassDef name:SessionStorage Assign FunctionDef name:__init__ arg:self arg:request arguments arg arg arg arg If Call Raise Call Call Call FunctionDef name:_get arg:self arguments arg arg arg Return return:yes Call Call FunctionDef name:_store arg:self arg:messages arg:response arguments arg arg arg arg arg If Assign Call Call Return return:no FunctionDef name:serialize_messages arg:self arg:messages arguments arg arg Assign Call Return return:yes Call FunctionDef name:deserialize_messages arg:self arg:data arguments arg arg If BoolOp Call Return return:yes Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "authenticate_client",
    "source_code": "def authenticate_client(self, request):\n    raise NotImplementedError()",
    "docstring": "Read a client from the request payload. Developers MUST implement this method in subclass:: def authenticate_client(self, request): client_id = request.payload.data.get(\"client_id\") return Client.get(client_id=client_id) :return: client instance",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc7592\\endpoint.py",
    "ast_data": "FunctionDef name:authenticate_client arg:self arg:request arguments arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "has_file",
    "source_code": "def has_file(self, filename: str) -> bool:\n    lineage = filename.split('/', maxsplit=1)\n    child = lineage[0]\n    grandchildren = lineage[1] if len(lineage) > 1 else None\n    if child in self.children.keys():\n        if grandchildren is None:\n            return True\n        else:\n            return self.children[child].has_file(grandchildren)\n    return False",
    "docstring": "Checks if a file is present in a :class:. Args: filename (str): Path of file to search for. Returns: bool: If a :class: contains the specified file.",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\file_structure_representation.py",
    "ast_data": "FunctionDef name:has_file arg:self arg:filename arguments arg arg Assign Call Assign Assign Compare Call If Compare Call If Compare Return return:yes Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "from_str",
    "source_code": "@staticmethod\ndef from_str(action_string: str):\n    action_string = action_string.strip()\n    if (match := _action_regex.match(action_string)):\n        stage_index, computation_type, microbatch_index = match.groups()\n        return _Action(int(stage_index), _ComputationType.from_str(computation_type), int(microbatch_index) if len(microbatch_index) else None)\n    elif action_string == '':\n        return None\n    raise RuntimeError(f'Invalid action string: {action_string}, should be formatted as [stage][action type][(microbatch)] e.g. 2F0')",
    "docstring": "Reverse of __repr__ String should be formatted as [stage][action type][(microbatch)] e.g. , ,",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\schedules.py",
    "ast_data": "FunctionDef name:from_str arg:action_string arguments arg Assign Call If Call Assign Call Return return:yes Call Call Call Call Call If Compare Return return:no Raise Call"
  },
  {
    "library": "pandas",
    "name": "get_cython_func",
    "source_code": "def get_cython_func(arg: Callable) -> str | None:\n    return _cython_table.get(arg)",
    "docstring": "if we define an internal function for this argument, return it",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\common.py",
    "ast_data": "FunctionDef name:get_cython_func arg:arg arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "astype",
    "source_code": "def astype(self, dtype, casting='unsafe', copy=True):\n    dtype = getdtype(dtype)\n    if self.dtype != dtype:\n        return self.tocsr().astype(dtype, casting=casting, copy=copy).asformat(self.format)\n    elif copy:\n        return self.copy()\n    else:\n        return self",
    "docstring": "Cast the array/matrix elements to a specified type. Parameters ---------- dtype : string or numpy dtype Typecode or data-type to which to cast the data. casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional Controls what kind of data casting may occur. Defaults to 'unsafe' for backwards compatibility. 'no' means the data types should not be cast at all. 'equiv' means only byte-order changes are allowed. 'safe' means only casts which can preserve values are allowed. 'same_kind' means only safe casts or casts within a kind, like float64 to float32, are allowed. 'unsafe' means any data conversions may be done. copy : bool, optional If is , the result might share some memory with this array/matrix. If is , it is guaranteed that the result and this array/matrix do not share any memory.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_base.py",
    "ast_data": "FunctionDef name:astype arg:self arg:dtype arg:casting arg:copy arguments arg arg arg arg Assign Call If Compare Return return:yes Call Call Call If Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_dense_tensor",
    "source_code": "def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):\n    del weight_collections\n    del trainable\n    return inputs.get(self)",
    "docstring": "Returns dense representing numeric feature. Args: inputs: A object to access inputs. weight_collections: Unused since no variables are created in this function. trainable: Unused bool since no variables are created in this function. Returns: Dense created within .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py",
    "ast_data": "FunctionDef name:_get_dense_tensor arg:self arg:inputs arg:weight_collections arg:trainable arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "is_leaf",
    "source_code": "def is_leaf(self):\n    return self.left is None",
    "docstring": "Return True if the target node is a leaf. Returns ------- leafness : bool True if the target node is a leaf node.",
    "type": "method",
    "file_path": "scipy\\scipy\\cluster\\hierarchy.py",
    "ast_data": "FunctionDef name:is_leaf arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "kornia",
    "name": "match_nn",
    "source_code": "def match_nn(desc1: Tensor, desc2: Tensor, dm: Optional[Tensor]=None) -> Tuple[Tensor, Tensor]:\n    KORNIA_CHECK_SHAPE(desc1, ['B', 'DIM'])\n    KORNIA_CHECK_SHAPE(desc2, ['B', 'DIM'])\n    if len(desc1) == 0 or len(desc2) == 0:\n        return _no_match(desc1)\n    distance_matrix = _get_lazy_distance_matrix(desc1, desc2, dm)\n    match_dists, idxs_in_2 = torch.min(distance_matrix, dim=1)\n    idxs_in1 = torch.arange(0, idxs_in_2.size(0), device=idxs_in_2.device)\n    matches_idxs = concatenate([idxs_in1.view(-1, 1), idxs_in_2.view(-1, 1)], 1)\n    return (match_dists.view(-1, 1), matches_idxs.view(-1, 2))",
    "docstring": "Find nearest neighbors in desc2 for each vector in desc1. If the distance matrix dm is not provided, :py:func: is used. Args: desc1: Batch of descriptors of a shape :math:. desc2: Batch of descriptors of a shape :math:. dm: Tensor containing the distances from each descriptor in desc1 to each descriptor in desc2, shape of :math:. Returns: - Descriptor distance of matching descriptors, shape of :math:. - Long tensor indexes of matching descriptors in desc1 and desc2, shape of :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\matching.py",
    "ast_data": "FunctionDef name:match_nn arg:desc1 arg:desc2 arg:dm arguments arg arg arg Call Call If BoolOp Compare Call Compare Call Return return:yes Call Assign Call Assign Call Assign Call Call Assign Call Call Call Return return:yes Call Call"
  },
  {
    "library": "authlib",
    "name": "authorize_redirect",
    "source_code": "def authorize_redirect(self, redirect_uri=None, **kwargs):\n    rv = self.create_authorization_url(redirect_uri, **kwargs)\n    self.save_authorize_data(redirect_uri=redirect_uri, **rv)\n    return redirect(rv['url'])",
    "docstring": "Create a HTTP Redirect for Authorization Endpoint. :param redirect_uri: Callback or redirect URI for authorization. :param kwargs: Extra parameters to include. :return: A HTTP redirect response.",
    "type": "method",
    "file_path": "authlib\\authlib\\integrations\\flask_client\\apps.py",
    "ast_data": "FunctionDef name:authorize_redirect arg:self arg:redirect_uri arguments arg arg arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "decision_function",
    "source_code": "@available_if(_estimators_has('decision_function'))\ndef decision_function(self, X):\n    check_is_fitted(self)\n    if len(self.estimators_) == 1:\n        return self.estimators_[0].decision_function(X)\n    return np.array([est.decision_function(X).ravel() for est in self.estimators_]).T",
    "docstring": "Decision function for the OneVsRestClassifier. Return the distance of each sample from the decision boundary for each class. This can only be used with estimators which implement the method. Parameters ---------- X : array-like of shape (n_samples, n_features) Input data. Returns ------- T : array-like of shape (n_samples, n_classes) or (n_samples,) for binary classification. Result of calling on the final estimator. .. versionchanged:: 0.19 output shape changed to `` to conform to scikit-learn conventions for binary classification.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\multiclass.py",
    "ast_data": "FunctionDef name:decision_function arg:self arg:X arguments arg arg Call If Compare Call Return return:yes Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "django",
    "name": "postgis_version_tuple",
    "source_code": "def postgis_version_tuple(self):\n    version = self.postgis_lib_version()\n    return (version, *get_version_tuple(version))",
    "docstring": "Return the PostGIS version as a tuple (version string, major, minor, subminor).",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\postgis\\operations.py",
    "ast_data": "FunctionDef name:postgis_version_tuple arg:self arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "uvicorn",
    "name": "timeout_keep_alive_handler",
    "source_code": "def timeout_keep_alive_handler(self) -> None:\n    if not self.transport.is_closing():\n        event = h11.ConnectionClosed()\n        self.conn.send(event)\n        self.transport.close()",
    "docstring": "Called on a keep-alive connection if no new data is received after a short delay.",
    "type": "method",
    "file_path": "uvicorn\\uvicorn\\protocols\\http\\h11_impl.py",
    "ast_data": "FunctionDef name:timeout_keep_alive_handler arg:self arguments arg If Call Assign Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_usetex",
    "source_code": "def get_usetex(self):\n    return self._usetex",
    "docstring": "Return whether TeX's math mode is enabled for rendering.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:get_usetex arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "foreach",
    "source_code": "def foreach(triton_meta, num_warps, filename=None, inductor_meta=None):\n    return cached_autotune(None, [triton.Config({}, num_stages=1, num_warps=num_warps)], triton_meta=triton_meta, inductor_meta=inductor_meta, heuristic_type=HeuristicType.TEMPLATE, filename=filename)",
    "docstring": "Compile a triton foreach kernel",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\triton_heuristics.py",
    "ast_data": "FunctionDef name:foreach arg:triton_meta arg:num_warps arg:filename arg:inductor_meta arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "scatter_add",
    "source_code": "def scatter_add(self, sparse_delta, use_locking=False, name=None):\n    if not isinstance(sparse_delta, indexed_slices.IndexedSlices):\n        raise TypeError('sparse_delta is not IndexedSlices: %s' % sparse_delta)\n    return gen_state_ops.scatter_add(self._variable, sparse_delta.indices, sparse_delta.values, use_locking=use_locking, name=name)",
    "docstring": "Adds to this variable. Args: sparse_delta: to be added to this variable. use_locking: If , use locking during the operation. name: the name of the operation. Returns: A that will hold the new value of this variable after the scattered addition has completed. Raises: TypeError: if is not an .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py",
    "ast_data": "FunctionDef name:scatter_add arg:self arg:sparse_delta arg:use_locking arg:name arguments arg arg arg arg If Call Raise Call Return return:yes Call"
  },
  {
    "library": "cryptography",
    "name": "openssl_version_text",
    "source_code": "def openssl_version_text(self) -> str:\n    return rust_openssl.openssl_version_text()",
    "docstring": "Friendly string name of the loaded OpenSSL library. This is not necessarily the same version as it was compiled against. Example: OpenSSL 3.2.1 30 Jan 2024",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\backends\\openssl\\backend.py",
    "ast_data": "FunctionDef name:openssl_version_text arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "flatten",
    "source_code": "def flatten(x: torch.fx.node.Argument) -> NodeList:\n    r: NodeList = []\n    map_arg(x, r.append)\n    return r",
    "docstring": "Stores nodes in x to a list and returns the list.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\passes\\split_utils.py",
    "ast_data": "FunctionDef name:flatten arg:x arguments arg Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_completions",
    "source_code": "def get_completions(self, context_word, prefix):\n    if context_word not in self._comp_dict:\n        return (None, None)\n    comp_items = self._comp_dict[context_word]\n    comp_items = sorted([item for item in comp_items if item.startswith(prefix)])\n    return (comp_items, self._common_prefix(comp_items))",
    "docstring": "Get the tab completions given a context word and a prefix. Args: context_word: The context word. prefix: The prefix of the incomplete word. Returns: (1) None if no registered context matches the context_word. A list of str for the matching completion items. Can be an empty list of a matching context exists, but no completion item matches the prefix. (2) Common prefix of all the words in the first return value. If the first return value is None, this return value will be None, too. If the first return value is not None, i.e., a list, this return value will be a str, which can be an empty str if there is no common prefix among the items of the list.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py",
    "ast_data": "FunctionDef name:get_completions arg:self arg:context_word arg:prefix arguments arg arg arg If Compare Return return:no Assign Assign Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_is_memory_usage_qualified",
    "source_code": "@cache_readonly\ndef _is_memory_usage_qualified(self) -> bool:\n    return is_object_dtype(self.dtype) or (is_string_dtype(self.dtype) and self.dtype.storage == 'python')",
    "docstring": "Return a boolean if we need a qualified .info display.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_is_memory_usage_qualified arg:self arguments arg Return return:yes BoolOp Call BoolOp Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "_from_pylist_of_empty_dict",
    "source_code": "@classmethod\ndef _from_pylist_of_empty_dict(cls, pyval, rank):\n    if rank == 0:\n        return StructuredTensor.from_fields(fields={}, shape=(), validate=False)\n    elif rank == 1:\n        nrows = len(pyval)\n        shape = (nrows,)\n        return StructuredTensor.from_fields(fields={}, shape=shape, nrows=nrows)\n    elif rank > 1:\n        ragged_zeros = ragged_factory_ops.constant(_dicts_to_zeros(pyval))\n        nrows = len(pyval)\n        shape = tensor_shape.TensorShape([len(pyval)] + [None] * (rank - 1))\n        return StructuredTensor.from_fields(fields={}, shape=shape, row_partitions=ragged_zeros._nested_row_partitions, nrows=nrows)",
    "docstring": "Converts a pylist of empty dictionaries to StructuredTensors.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor.py",
    "ast_data": "FunctionDef name:_from_pylist_of_empty_dict arg:cls arg:pyval arg:rank arguments arg arg arg If Compare Return return:yes Call If Compare Assign Call Assign Return return:yes Call If Compare Assign Call Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "matmul",
    "source_code": "def matmul(self, x, adjoint=False, adjoint_arg=False, name='matmul'):\n    if isinstance(x, LinearOperator):\n        left_operator = self.adjoint() if adjoint else self\n        right_operator = x.adjoint() if adjoint_arg else x\n        if right_operator.range_dimension is not None and left_operator.domain_dimension is not None and (right_operator.range_dimension != left_operator.domain_dimension):\n            raise ValueError('Operators are incompatible. Expected `x` to have dimension {} but got {}.'.format(left_operator.domain_dimension, right_operator.range_dimension))\n        with self._name_scope(name):\n            return self._linop_matmul(left_operator, right_operator)\n    with self._name_scope(name):\n        x = tensor_conversion.convert_to_tensor_v2_with_dispatch(x, name='x')\n        self._check_input_dtype(x)\n        self_dim = -2 if adjoint else -1\n        arg_dim = -1 if adjoint_arg else -2\n        tensor_shape.dimension_at_index(self.shape, self_dim).assert_is_compatible_with(x.shape[arg_dim])\n        return self._matmul(x, adjoint=adjoint, adjoint_arg=adjoint_arg)",
    "docstring": "Transform [batch] matrix with left multiplication: . Args: x: or with compatible shape and same as . See class docstring for definition of compatibility. adjoint: Python . If , left multiply by the adjoint: . adjoint_arg: Python . If , compute where is the hermitian transpose (transposition and complex conjugation). name: A name for this . Returns: A or with shape and same as .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "FunctionDef name:matmul arg:self arg:x arg:adjoint arg:adjoint_arg arg:name arguments arg arg arg arg arg If Call Assign Call Assign Call If BoolOp Compare Compare Compare Raise Call Call With Call Return return:yes Call With Call Assign Call Call Assign Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "analyze",
    "source_code": "@staticmethod\ndef analyze(model_path=None, model_content=None, gpu_compatibility=False, **kwargs):\n    if not model_path and (not model_content):\n        raise ValueError('neither `model_path` nor `model_content` is provided')\n    if model_path:\n        print(f'=== {model_path} ===\\n')\n        tflite_model = model_path\n        input_is_filepath = True\n    else:\n        print('=== TFLite ModelAnalyzer ===\\n')\n        tflite_model = model_content\n        input_is_filepath = False\n    if kwargs.get('experimental_use_mlir', False):\n        print(wrap_converter.wrapped_flat_buffer_file_to_mlir(tflite_model, input_is_filepath))\n    else:\n        print(_analyzer_wrapper.ModelAnalyzer(tflite_model, input_is_filepath, gpu_compatibility))",
    "docstring": "Analyzes the given tflite_model with dumping model structure. This tool provides a way to understand users' TFLite flatbuffer model by dumping internal graph structure. It also provides additional features like checking GPU delegate compatibility. WARNING: Experimental interface, subject to change. The output format is not guaranteed to stay stable, so don't write scripts to this. Args: model_path: TFLite flatbuffer model path. model_content: TFLite flatbuffer model object. gpu_compatibility: Whether to check GPU delegate compatibility. **kwargs: Experimental keyword arguments to analyze API. Returns: Print analyzed report via console output.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\analyzer.py",
    "ast_data": "FunctionDef name:analyze arg:model_path arg:model_content arg:gpu_compatibility arguments arg arg arg arg If BoolOp Raise Call If Call Assign Assign Call Assign Assign If Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "profile",
    "source_code": "@contextlib.contextmanager\ndef profile():\n    try:\n        start()\n        yield\n    finally:\n        stop()",
    "docstring": "Enable profiling. Context Manager to enabling profile collection by the active profiling tool from CUDA backend. Example: >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA) >>> import torch >>> model = torch.nn.Linear(20, 30).cuda() >>> inputs = torch.randn(128, 20).cuda() >>> with torch.cuda.profiler.profile() as prof: ... model(inputs)",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\profiler.py",
    "ast_data": "FunctionDef name:profile arguments Try Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_ops_from_graphdef",
    "source_code": "def _get_ops_from_graphdef(graph_def):\n    ops = set()\n    ops.update(_get_ops_from_nodedefs(graph_def.node))\n    for function in graph_def.library.function:\n        ops.update(_get_ops_from_nodedefs(function.node_def))\n    return ops",
    "docstring": "Gets the ops and kernels needed from the tensorflow model.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\selective_registration_header_lib.py",
    "ast_data": "FunctionDef name:_get_ops_from_graphdef arg:graph_def arguments arg Assign Call Call Call For Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_process_quantiles",
    "source_code": "def _process_quantiles(self, x, dim):\n    x = np.asarray(x, dtype=float)\n    if x.ndim == 0:\n        x = x[np.newaxis]\n    elif x.ndim == 1:\n        if dim == 1:\n            x = x[:, np.newaxis]\n        else:\n            x = x[np.newaxis, :]\n    return x",
    "docstring": "Adjust quantiles array so that last axis labels the components of each data point.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:_process_quantiles arg:self arg:x arg:dim arguments arg arg arg Assign Call If Compare Assign If Compare If Compare Assign Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "polyfit",
    "source_code": "def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):\n    x = asarray(x)\n    y = asarray(y)\n    m = getmask(x)\n    if y.ndim == 1:\n        m = mask_or(m, getmask(y))\n    elif y.ndim == 2:\n        my = getmask(mask_rows(y))\n        if my is not nomask:\n            m = mask_or(m, my[:, 0])\n    else:\n        raise TypeError('Expected a 1D or 2D array for y!')\n    if w is not None:\n        w = asarray(w)\n        if w.ndim != 1:\n            raise TypeError('expected a 1-d array for weights')\n        if w.shape[0] != y.shape[0]:\n            raise TypeError('expected w and y to have the same length')\n        m = mask_or(m, getmask(w))\n    if m is not nomask:\n        not_m = ~m\n        if w is not None:\n            w = w[not_m]\n        return np.polyfit(x[not_m], y[not_m], deg, rcond, full, w, cov)\n    else:\n        return np.polyfit(x, y, deg, rcond, full, w, cov)",
    "docstring": "Any masked values in x is propagated in y, and vice-versa.",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\extras.py",
    "ast_data": "FunctionDef name:polyfit arg:x arg:y arg:deg arg:rcond arg:full arg:w arg:cov arguments arg arg arg arg arg arg arg Assign Call Assign Call Assign Call If Compare Assign Call Call If Compare Assign Call Call If Compare Assign Call Raise Call If Compare Assign Call If Compare Raise Call If Compare Raise Call Assign Call Call If Compare Assign If Compare Assign Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "cluster_optics_dbscan",
    "source_code": "@validate_params({'reachability': [np.ndarray], 'core_distances': [np.ndarray], 'ordering': [np.ndarray], 'eps': [Interval(Real, 0, None, closed='both')]}, prefer_skip_nested_validation=True)\ndef cluster_optics_dbscan(*, reachability, core_distances, ordering, eps):\n    n_samples = len(core_distances)\n    labels = np.zeros(n_samples, dtype=int)\n    far_reach = reachability > eps\n    near_core = core_distances <= eps\n    labels[ordering] = np.cumsum(far_reach[ordering] & near_core[ordering]) - 1\n    labels[far_reach & ~near_core] = -1\n    return labels",
    "docstring": "Perform DBSCAN extraction for an arbitrary epsilon. Extracting the clusters runs in linear time. Note that this results in `~sklearn.cluster.DBSCAN` parameter. Must be set to >> import numpy as np >>> from sklearn.cluster import cluster_optics_dbscan, compute_optics_graph >>> X = np.array([[1, 2], [2, 5], [3, 6], ... [8, 7], [8, 8], [7, 3]]) >>> ordering, core_distances, reachability, predecessor = compute_optics_graph( ... X, ... min_samples=2, ... max_eps=np.inf, ... metric=\"minkowski\", ... p=2, ... metric_params=None, ... algorithm=\"auto\", ... leaf_size=30, ... n_jobs=None, ... ) >>> eps = 4.5 >>> labels = cluster_optics_dbscan( ... reachability=reachability, ... core_distances=core_distances, ... ordering=ordering, ... eps=eps, ... ) >>> labels array([0, 0, 0, 1, 1, 1])",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\cluster\\_optics.py",
    "ast_data": "FunctionDef name:cluster_optics_dbscan arguments arg arg arg arg Assign Call Assign Call Assign Compare Assign Compare Assign Call Assign Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "csv_sniffer_has_bug_last_field",
    "source_code": "def csv_sniffer_has_bug_last_field():\n    has_bug = getattr(csv_sniffer_has_bug_last_field, 'has_bug', None)\n    if has_bug is None:\n        dialect = csv.Sniffer().sniff(\"3, 'a'\")\n        csv_sniffer_has_bug_last_field.has_bug = dialect.quotechar != \"'\"\n        has_bug = csv_sniffer_has_bug_last_field.has_bug\n    return has_bug",
    "docstring": "Checks if the bug is unpatched.",
    "type": "function",
    "file_path": "scipy\\scipy\\io\\arff\\_arffread.py",
    "ast_data": "FunctionDef name:csv_sniffer_has_bug_last_field arguments Assign Call If Compare Assign Call Call Assign Compare Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "isin",
    "source_code": "def isin(self, values: ArrayLike) -> npt.NDArray[np.bool_]:\n    return isin(np.asarray(self), values)",
    "docstring": "Pointwise comparison for set containment in the given values. Roughly equivalent to Parameters ---------- values : np.ndarray or ExtensionArray Values to compare every element in the array against. Returns ------- np.ndarray[bool] With true at indices where value is in . See Also -------- DataFrame.isin: Whether each element in the DataFrame is contained in values. Index.isin: Return a boolean array where the index values are in values. Series.isin: Whether elements in Series are contained in values. Examples -------- >>> arr = pd.array([1, 2, 3]) >>> arr.isin([1]) [True, False, False] Length: 3, dtype: boolean",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\base.py",
    "ast_data": "FunctionDef name:isin arg:self arg:values arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_should_trace_in_control_flow",
    "source_code": "def _should_trace_in_control_flow(self):\n    if self._use_temp_cache():\n        return False\n    elif self._tt_config.device_type == _DEVICE_TYPE_TPU:\n        return self._use_tensor_values_cache() or self._use_tensor_buffer()\n    return True",
    "docstring": "Returns false incase it is not safe to trace ops in tf.cond or tf.while_loop.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:_should_trace_in_control_flow arg:self arguments arg If Call Return return:yes If Compare Return return:yes BoolOp Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_merge_with_spec",
    "source_code": "def _merge_with_spec(self, other: 'DynamicRaggedShape.Spec') -> 'DynamicRaggedShape':\n    max_num_row_partitions = max(self.num_row_partitions, other.num_row_partitions)\n    a = self._with_num_row_partitions(max_num_row_partitions)\n    b = other._with_num_row_partitions(max_num_row_partitions)\n    new_row_partitions = [rp_a._merge_with_spec(rp_b) for rp_a, rp_b in zip(a._row_partitions, b._row_partitions)]\n    new_dtype = b.dtype if a.dtype == dtypes.int32 else dtypes.int64\n    new_static_inner_shape = a._static_inner_shape.merge_with(b._static_inner_shape)\n    new_inner_shape = a._inner_shape\n    return DynamicRaggedShape(new_row_partitions, new_inner_shape, new_dtype, True, new_static_inner_shape)",
    "docstring": "Merge a spec with a DynamicRaggedShape.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:_merge_with_spec arg:self arg:other arguments arg arg Assign Call Assign Call Assign Call Assign Call Call Assign Compare Assign Call Assign Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "MiniBatchDictionaryLearningBenchmark",
    "source_code": "class MiniBatchDictionaryLearningBenchmark(Transformer, Estimator, Benchmark):\n    param_names = ['fit_algorithm', 'n_jobs']\n    params = (['lars', 'cd'], Benchmark.n_jobs_vals)\n\n    def setup_cache(self):\n        super().setup_cache()\n\n    def make_data(self, params):\n        return _olivetti_faces_dataset()\n\n    def make_estimator(self, params):\n        fit_algorithm, n_jobs = params\n        estimator = MiniBatchDictionaryLearning(n_components=15, fit_algorithm=fit_algorithm, alpha=0.1, batch_size=3, random_state=0, n_jobs=n_jobs)\n        return estimator\n\n    def make_scorers(self):\n        make_dict_learning_scorers(self)",
    "docstring": "Benchmarks for MiniBatchDictionaryLearning",
    "type": "class",
    "file_path": "scikit-learn\\asv_benchmarks\\benchmarks\\decomposition.py",
    "ast_data": "ClassDef name:MiniBatchDictionaryLearningBenchmark Assign Assign FunctionDef name:setup_cache arg:self arguments arg Call Call FunctionDef name:make_data arg:self arg:params arguments arg arg Return return:yes Call FunctionDef name:make_estimator arg:self arg:params arguments arg arg Assign Assign Call Return return:yes FunctionDef name:make_scorers arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_sequence_dense_tensor",
    "source_code": "@abc.abstractmethod\ndef _get_sequence_dense_tensor(self, inputs, weight_collections=None, trainable=None):\n    pass",
    "docstring": "Returns a .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py",
    "ast_data": "FunctionDef name:_get_sequence_dense_tensor arg:self arg:inputs arg:weight_collections arg:trainable arguments arg arg arg arg"
  },
  {
    "library": "scipy",
    "name": "logpmf",
    "source_code": "def logpmf(self, x, n, p):\n    n, p, npcond = self._process_parameters(n, p)\n    x, xcond = self._process_quantiles(x, n, p)\n    result = self._logpmf(x, n, p)\n    xcond_ = xcond | np.zeros(npcond.shape, dtype=np.bool_)\n    result = self._checkresult(result, xcond_, -np.inf)\n    npcond_ = npcond | np.zeros(xcond.shape, dtype=np.bool_)\n    return self._checkresult(result, npcond_, np.nan)",
    "docstring": "Log of the Multinomial probability mass function. Parameters ---------- x : array_like Quantiles, with the last axis of denoting the components. %(_doc_default_callparams)s Returns ------- logpmf : ndarray or scalar Log of the probability mass function evaluated at Notes ----- %(_doc_callparams_note)s",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:logpmf arg:self arg:x arg:n arg:p arguments arg arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "_get_batch_size",
    "source_code": "def _get_batch_size(self, points: Optional[tuple[Tensor, Tensor]], boxes: Optional[Tensor], masks: Optional[Tensor]) -> int:\n    if points is not None:\n        return points[0].shape[0]\n    elif boxes is not None:\n        return boxes.shape[0]\n    elif masks is not None:\n        return masks.shape[0]\n    else:\n        return 1",
    "docstring": "Get the batch size of the output given the batch size of the input prompts.",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\models\\sam\\architecture\\prompt_encoder.py",
    "ast_data": "FunctionDef name:_get_batch_size arg:self arg:points arg:boxes arg:masks arguments arg arg arg arg If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Return return:yes"
  },
  {
    "library": "numpy",
    "name": "__lt__",
    "source_code": "def __lt__(self, other):\n    return less(self, other)",
    "docstring": "Return (self < other) element-wise. See Also -------- less",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:__lt__ arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "find_closure_group",
    "source_code": "def find_closure_group(input_string, start, group):\n    inside_parenthesis = False\n    parens = 0\n    pos = start\n    p_start, p_end = (-1, -1)\n    while pos < len(input_string):\n        if input_string[pos] == group[0]:\n            if inside_parenthesis is False:\n                inside_parenthesis = True\n                parens = 1\n                p_start = pos\n            else:\n                parens += 1\n        elif input_string[pos] == group[1] and inside_parenthesis:\n            parens -= 1\n            if parens == 0:\n                p_end = pos\n                return (p_start, p_end)\n        pos += 1\n    return (None, None)",
    "docstring": "Generalization for finding a balancing closure group if group = [\"(\", \")\"], then finds the first balanced parentheses. if group = [\"{\", \"}\"], then finds the first balanced bracket. Given an input string, a starting position in the input string, and the group type, find_closure_group returns the positions of group[0] and group[1] as a tuple. Example: >>> find_closure_group(\"(hi)\", 0, [\"(\", \")\"]) (0, 3)",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\hipify\\hipify_python.py",
    "ast_data": "FunctionDef name:find_closure_group arg:input_string arg:start arg:group arguments arg arg arg Assign Assign Assign Assign While Compare Call If Compare If Compare Assign Assign Assign If BoolOp Compare If Compare Assign Return return:yes Return return:no"
  },
  {
    "library": "django",
    "name": "_from_sequence",
    "source_code": "def _from_sequence(self, seq):\n    self._envelope = OGREnvelope()\n    self._envelope.MinX = seq[0]\n    self._envelope.MinY = seq[1]\n    self._envelope.MaxX = seq[2]\n    self._envelope.MaxY = seq[3]",
    "docstring": "Initialize the C OGR Envelope structure from the given sequence.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\envelope.py",
    "ast_data": "FunctionDef name:_from_sequence arg:self arg:seq arguments arg arg Assign Call Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_SegmentProdGrad",
    "source_code": "@ops.RegisterGradient('SegmentProd')\ndef _SegmentProdGrad(op: ops.Operation, grad):\n    data = op.inputs[0]\n    segment_ids = op.inputs[1]\n    is_zero = math_ops.equal(data, 0)\n    num_zeros = gen_math_ops.segment_sum(math_ops.cast(is_zero, dtype=dtypes.int32), segment_ids)\n    grad = array_ops.where_v2(math_ops.greater(num_zeros, 1), array_ops.zeros_like(grad), grad)\n    non_zero_data = array_ops.where_v2(is_zero, array_ops.ones_like(data), data)\n    non_zero_prod = gen_math_ops.segment_prod(non_zero_data, segment_ids)\n    gathered_prod = array_ops.gather(op.outputs[0], segment_ids)\n    gathered_non_zero_prod = array_ops.gather(non_zero_prod, segment_ids)\n    prod_divided_by_el = gathered_prod / non_zero_data\n    partial_derivative = array_ops.where_v2(is_zero, gathered_non_zero_prod, prod_divided_by_el)\n    gathered_grad = array_ops.gather(grad, segment_ids)\n    return (gathered_grad * partial_derivative, None)",
    "docstring": "Gradient for SegmentProd. The gradient can be expressed for each segment by dividing the segment's product by each element of the segment input tensor, but this approach can't deal with zeros in the input. Unlike reduce_prod we can't use cumsum here as individual segments may have a different number of elements. Therefore we consider three cases: 1) A segment input contains no zeros and we can safely divide by the input tensor. 2) A segment contains exactly one zero. Then the gradient of each input of the segment is zero except for the 0-input, there the gradient is the product of the remaining segment entries. 3) A segment contains at least two zeros. The gradient is zero for all segment inputs.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_SegmentProdGrad arg:op arg:grad arguments arg arg Assign Assign Assign Call Assign Call Call Assign Call Call Call Assign Call Call Assign Call Assign Call Assign Call Assign Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "clean",
    "source_code": "@click.command()\ndef clean():\n    util.run([sys.executable, '-m', 'pip', 'uninstall', 'scikit-learn', '-y'])\n    default_meson_build_dir = f'build/cp{sys.version_info.major}{sys.version_info.minor}'\n    click.secho(f'removing default Meson build dir: {default_meson_build_dir}', bold=True, fg='bright_blue')\n    shutil.rmtree(default_meson_build_dir, ignore_errors=True)",
    "docstring": "🪥 Clean build folder. Very rarely needed since meson-python recompiles as needed when sklearn is imported. One known use case where \"spin clean\" is useful: avoid compilation errors when switching from numpy=2 in the same conda environment or virtualenv.",
    "type": "function",
    "file_path": "scikit-learn\\.spin\\cmds.py",
    "ast_data": "FunctionDef name:clean arguments Call Assign Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "update_position",
    "source_code": "def update_position(self, loc):\n    raise NotImplementedError('Derived must override')",
    "docstring": "Set the location of tick in data coords with scalar *loc*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:update_position arg:self arg:loc arguments arg arg Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y, sample_weight=None):\n    X, y = validate_data(self, X, y, accept_sparse='csr', dtype=np.float64, order='C', accept_large_sparse=False)\n    check_classification_targets(y)\n    self.classes_ = np.unique(y)\n    _dual = _validate_dual_parameter(self.dual, self.loss, self.penalty, self.multi_class, X)\n    self.coef_, self.intercept_, n_iter_ = _fit_liblinear(X, y, self.C, self.fit_intercept, self.intercept_scaling, self.class_weight, self.penalty, _dual, self.verbose, self.max_iter, self.tol, self.random_state, self.multi_class, self.loss, sample_weight=sample_weight)\n    self.n_iter_ = n_iter_.max().item()\n    if self.multi_class == 'crammer_singer' and len(self.classes_) == 2:\n        self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)\n        if self.fit_intercept:\n            intercept = self.intercept_[1] - self.intercept_[0]\n            self.intercept_ = np.array([intercept])\n    return self",
    "docstring": "Fit the model according to the given training data. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vector, where is the number of samples and is the number of features. y : array-like of shape (n_samples,) Target vector relative to X. sample_weight : array-like of shape (n_samples,), default=None Array of weights that are assigned to individual samples. If not provided, then each sample is given unit weight. .. versionadded:: 0.18 Returns ------- self : object An instance of the estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\svm\\_classes.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg Assign Call Call Assign Call Assign Call Assign Call Assign Call Call If BoolOp Compare Compare Call Assign Call If Assign Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "root_nodes",
    "source_code": "def root_nodes(self, app=None):\n    roots = set()\n    for node in self.nodes:\n        if all((key[0] != node[0] for key in self.node_map[node].parents)) and (not app or app == node[0]):\n            roots.add(node)\n    return sorted(roots)",
    "docstring": "Return all root nodes - that is, nodes with no dependencies inside their app. These are the starting point for an app.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\graph.py",
    "ast_data": "FunctionDef name:root_nodes arg:self arg:app arguments arg arg Assign Call For If BoolOp Call Compare BoolOp Compare Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_qmvt",
    "source_code": "def _qmvt(m, nu, covar, low, high, rng, lattice='cbc', n_batches=10):\n    sn = max(1.0, np.sqrt(nu))\n    low = np.asarray(low, dtype=np.float64)\n    high = np.asarray(high, dtype=np.float64)\n    cho, lo, hi = _permuted_cholesky(covar, low / sn, high / sn)\n    n = cho.shape[0]\n    q, n_qmc_samples = _cbc_lattice(n, max(m // n_batches, 1))\n    rndm = rng.random(size=(n_batches, n))\n    prob, est_error, n_samples = _qmvt_inner(q, rndm, int(n_qmc_samples), int(n_batches), cho, lo, hi, float(nu))\n    return (prob, est_error, n_samples)",
    "docstring": "Multivariate t integration over box bounds. Parameters ---------- m : int > n_batches The number of points to sample. This number will be divided into batches that apply random offsets of the sampling lattice for each batch in order to estimate the error. nu : float >= 0 The shape parameter of the multivariate t distribution. covar : (n, n) float array Possibly singular, positive semidefinite symmetric covariance matrix. low, high : (n,) float array The low and high integration bounds. rng : Generator, optional default_rng(), yada, yada lattice : 'cbc' or callable The type of lattice rule to use to construct the integration points. n_batches : int > 0, optional The number of QMC batches to apply. Returns ------- prob : float The estimated probability mass within the bounds. est_error : float 3 times the standard error of the batch estimates. n_samples : int The number of samples actually used.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_qmvnt.py",
    "ast_data": "FunctionDef name:_qmvt arg:m arg:nu arg:covar arg:low arg:high arg:rng arg:lattice arg:n_batches arguments arg arg arg arg arg arg arg arg Assign Call Call Assign Call Assign Call Assign Call Assign Assign Call Call Assign Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "Roundtooth",
    "source_code": "@_register_style(_style_list)\nclass Roundtooth(Sawtooth):\n\n    def __call__(self, x0, y0, width, height, mutation_size):\n        saw_vertices = self._get_sawtooth_vertices(x0, y0, width, height, mutation_size)\n        saw_vertices = np.concatenate([saw_vertices, [saw_vertices[0]]])\n        codes = [Path.MOVETO] + [Path.CURVE3, Path.CURVE3] * ((len(saw_vertices) - 1) // 2) + [Path.CLOSEPOLY]\n        return Path(saw_vertices, codes)",
    "docstring": "A box with a rounded sawtooth outline.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "ClassDef name:Roundtooth FunctionDef name:__call__ arg:self arg:x0 arg:y0 arg:width arg:height arg:mutation_size arguments arg arg arg arg arg arg Assign Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "convert_points_to_homogeneous",
    "source_code": "def convert_points_to_homogeneous(points: Tensor) -> Tensor:\n    if not isinstance(points, Tensor):\n        raise TypeError(f'Input type is not a Tensor. Got {type(points)}')\n    if len(points.shape) < 2:\n        raise ValueError(f'Input must be at least a 2D tensor. Got {points.shape}')\n    return pad(points, [0, 1], 'constant', 1.0)",
    "docstring": "Convert points from Euclidean to homogeneous space. Args: points: the points to be transformed with shape :math:. Returns: the points in homogeneous coordinates :math:. Examples: >>> input = tensor([[0., 0.]]) >>> convert_points_to_homogeneous(input) tensor([[0., 0., 1.]])",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\conversions.py",
    "ast_data": "FunctionDef name:convert_points_to_homogeneous arg:points arguments arg If Call Raise Call Call If Compare Call Raise Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_replace_by_prefix",
    "source_code": "def _replace_by_prefix(state_dict: dict[str, Any], old_prefix: str, new_prefix: str) -> None:\n    if old_prefix == new_prefix:\n        raise ValueError('old_prefix and new_prefix must be distinct')\n    for key in list(state_dict.keys()):\n        if not key.startswith(old_prefix):\n            continue\n        new_key = new_prefix + key[len(old_prefix):]\n        state_dict[new_key] = state_dict[key]\n        del state_dict[key]",
    "docstring": "Replace all keys that match a given old_prefix with a new_prefix (in-place). Usage:: state_dict = {\"layer.xyz\": torch.tensor(1)} replace_by_prefix_(state_dict, \"layer.\", \"module.layer.\") assert state_dict == {\"module.layer.xyz\": torch.tensor(1)}",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\utils.py",
    "ast_data": "FunctionDef name:_replace_by_prefix arg:state_dict arg:old_prefix arg:new_prefix arguments arg arg arg If Compare Raise Call For Call Call If Call Assign Call Assign"
  },
  {
    "library": "pytorch",
    "name": "_is_output_dtype_supported_by_backend",
    "source_code": "def _is_output_dtype_supported_by_backend(node: Node, qconfig: QConfigAny, dtype_config: DTypeConfig) -> bool:\n    backend_config_output_dtype = dtype_config.output_dtype\n    qconfig_output_dtype = None\n    output_act_obs_or_fq_ctr = node.meta['target_dtype_info'].get('output_act_obs_or_fq_ctr', _DEFAULT_FP32_OBS_OR_FQ_CTR)\n    output_act_obs_or_fq = output_act_obs_or_fq_ctr() if output_act_obs_or_fq_ctr else None\n    qconfig_output_dtype, qconfig_output_is_dynamic = _get_dtype_and_is_dynamic(output_act_obs_or_fq)\n    if qconfig_output_is_dynamic:\n        qconfig_output_dtype = torch.float32\n    dtype_matches = qconfig_output_dtype == backend_config_output_dtype\n    qconfig_satisfies_constraints = _qconfig_satisfies_dtype_config_constraints(qconfig, dtype_config.output_dtype_with_constraints)\n    return backend_config_output_dtype is None or (dtype_matches and qconfig_satisfies_constraints)",
    "docstring": "Check if the configured qconfig for the output is supported by the backend or not",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\prepare.py",
    "ast_data": "FunctionDef name:_is_output_dtype_supported_by_backend arg:node arg:qconfig arg:dtype_config arguments arg arg arg Assign Assign Assign Call Assign Call Assign Call If Assign Assign Compare Assign Call Return return:yes BoolOp Compare BoolOp"
  },
  {
    "library": "matplotlib",
    "name": "set_y",
    "source_code": "def set_y(self, y):\n    self._y = y\n    self.stale = True",
    "docstring": "Set the *y* position of the text. Parameters ---------- y : float",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:set_y arg:self arg:y arguments arg arg Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "stack",
    "source_code": "def stack(self, name=None):\n    if self._tensor_array:\n        for ix in range(len(self._tensor_array)):\n            self._maybe_zero(ix)\n    if not self._tensor_array and self._element_shape.is_fully_defined():\n        return ops.convert_to_tensor(np.ndarray([0] + self._element_shape), name=name, dtype=self._dtype)\n    else:\n        return ops.convert_to_tensor(self._tensor_array, name=name, dtype=self._dtype)",
    "docstring": "See TensorArray.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:stack arg:self arg:name arguments arg arg If For Call Call Call If BoolOp Call Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "triton_type",
    "source_code": "def triton_type(dtype: torch.dtype) -> str:\n    triton_type_name = _triton_type_re.sub('tl.', str(dtype))\n    return _triton_type_mapping.get(triton_type_name, triton_type_name)",
    "docstring": "Convert torch.dtype to triton type",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\utils.py",
    "ast_data": "FunctionDef name:triton_type arg:dtype arguments arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit_intercept_only",
    "source_code": "def fit_intercept_only(self, y_true, sample_weight=None):\n    out = np.zeros(self.n_classes, dtype=y_true.dtype)\n    eps = np.finfo(y_true.dtype).eps\n    for k in range(self.n_classes):\n        out[k] = np.average(y_true == k, weights=sample_weight, axis=0)\n        out[k] = np.clip(out[k], eps, 1 - eps)\n    return self.link.link(out[None, :]).reshape(-1)",
    "docstring": "Compute raw_prediction of an intercept-only model. This is the softmax of the weighted average of the target, i.e. over the samples axis=0.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\_loss\\loss.py",
    "ast_data": "FunctionDef name:fit_intercept_only arg:self arg:y_true arg:sample_weight arguments arg arg arg Assign Call Assign Call For Call Assign Call Compare Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "host_memory_stats_as_nested_dict",
    "source_code": "def host_memory_stats_as_nested_dict() -> dict[str, Any]:\n    if not is_initialized():\n        return {}\n    return torch._C._cuda_hostMemoryStats()",
    "docstring": "Return the result of :func: as a nested dictionary.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\memory.py",
    "ast_data": "FunctionDef name:host_memory_stats_as_nested_dict arguments If Call Return return:no Return return:yes Call"
  },
  {
    "library": "django",
    "name": "describe",
    "source_code": "def describe(self):\n    description = \"'{}'\".format(self)\n    if self.name:\n        description += \" [name='{}']\".format(self.name)\n    return description",
    "docstring": "Format the URL pattern for display in warning messages.",
    "type": "method",
    "file_path": "django\\django\\urls\\resolvers.py",
    "ast_data": "FunctionDef name:describe arg:self arguments arg Assign Call If Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "load",
    "source_code": "def load(self, name: str, index: sympy.Expr) -> CSEVariable:\n    var = self.args.input(name)\n    index = self.prepare_indexing(index)\n    dtype = V.graph.get_dtype(name)\n    line = f'{var}[{self.index_to_str(index)}]'\n    if dtype in [torch.float16, torch.bfloat16]:\n        line = f'static_cast<float>({line})'\n        dtype = torch.float32\n    return self.cse.generate(self.loads, line, dtype=dtype)",
    "docstring": "Codegen a load from an InputBuffer",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\mps.py",
    "ast_data": "FunctionDef name:load arg:self arg:name arg:index arguments arg arg arg Assign Call Assign Call Assign Call Assign Call If Compare Assign Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "build",
    "source_code": "@staticmethod\ndef build(kernel: CppKernel):\n    itervars = kernel.itervars\n    ranges = kernel.ranges\n    reduction_depth = kernel.reduction_depth\n    assert reduction_depth is not None\n    loops: Optional[list[LoopLevel]] = None\n    for loop_idx, (var, size) in enumerate(zip(itervars, ranges)):\n        loop = LoopLevel(var, size)\n        if not loops:\n            loops = [loop]\n        else:\n            loops.append(loop)\n        if loop_idx >= reduction_depth:\n            loop.is_reduction = kernel.is_reduction\n    loop_nest = LoopNest(loops)\n    return loop_nest",
    "docstring": "Build a LoopNest with the given as the leaf",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp.py",
    "ast_data": "FunctionDef name:build arg:kernel arguments arg Assign Assign Assign Compare For Call Call Assign Call If Assign Call If Compare Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "copy_function_def_to_graph_def_recursively",
    "source_code": "def copy_function_def_to_graph_def_recursively(func_name, graph_def, copied_functions, default_graph=None):\n    if func_name and (not is_function(func_name, default_graph)):\n        raise ValueError(f'Function {func_name} was not found. Please make sure the FunctionDef `fdef` is correct.')\n    if func_name in copied_functions:\n        return\n    copied_functions.add(func_name)\n    func_def = get_function_def(func_name, default_graph)\n    graph_def.library.function.add().CopyFrom(func_def)\n    for node_def in func_def.node_def:\n        op_def = default_graph.op_def_for_type(node_def.op)\n        for attr in op_def.attr:\n            if attr.type == 'func':\n                func_name = node_def.attr[attr.name].func.name\n                copy_function_def_to_graph_def_recursively(func_name, graph_def, copied_functions, default_graph)\n            elif attr.type == 'list(func)':\n                for fn in node_def.attr[attr.name].list.func:\n                    func_name = fn.name\n                    copy_function_def_to_graph_def_recursively(func_name, graph_def, copied_functions, default_graph)",
    "docstring": "Recursively copies s to . It copies the outermost and all nested s to . The enforces that every will be copied at most once. The s will be found from if this function was called in graph mode or from eager context if this function was called in eager mode. Args: func_name: The signature name of FunctionDef to be copied to . graph_def: The GraphDef that will contain all s in its library. copied_functions: A set contains all copied function names. default_graph: The where all s will be found in graph mode. Not used in eager mode.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\function_def_to_graph.py",
    "ast_data": "FunctionDef name:copy_function_def_to_graph_def_recursively arg:func_name arg:graph_def arg:copied_functions arg:default_graph arguments arg arg arg arg If BoolOp Call Raise Call If Compare Return return:no Call Assign Call Call Call For Assign Call For If Compare Assign Call If Compare For Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_RefVariableProcessor",
    "source_code": "class _RefVariableProcessor(_OptimizableVariable):\n\n    def __init__(self, v):\n        self._v = v\n\n    def __str__(self):\n        return '<_RefVariableProcessor(%s)>' % self._v\n\n    def target(self):\n        return self._v._ref()\n\n    def update_op(self, optimizer, g):\n        if isinstance(g, tensor.Tensor):\n            update_op = optimizer._apply_dense(g, self._v)\n            if self._v.constraint is not None:\n                with ops.control_dependencies([update_op]):\n                    return self._v.assign(self._v.constraint(self._v))\n            else:\n                return update_op\n        else:\n            assert isinstance(g, indexed_slices.IndexedSlices), ('Gradient ', g, ' is neither a tensor nor IndexedSlices.')\n            if self._v.constraint is not None:\n                raise RuntimeError('Cannot use a constraint function on a sparse variable.')\n            return optimizer._apply_sparse_duplicate_indices(g, self._v)",
    "docstring": "Processor for Variable.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\training\\optimizer.py",
    "ast_data": "ClassDef name:_RefVariableProcessor FunctionDef name:__init__ arg:self arg:v arguments arg arg Assign FunctionDef name:__str__ arg:self arguments arg Return return:yes FunctionDef name:target arg:self arguments arg Return return:yes Call FunctionDef name:update_op arg:self arg:optimizer arg:g arguments arg arg arg If Call Assign Call If Compare With Call Return return:yes Call Call Return return:yes Call If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, filename, *, metadata=None):\n    self._output_name = filename\n    self._n_figures = 0\n    self._metadata = (metadata or {}).copy()\n    self._info_dict = _create_pdf_info_dict('pgf', self._metadata)\n    self._file = BytesIO()",
    "docstring": "Create a new PdfPages object. Parameters ---------- filename : str or path-like Plots using will be written to a file at this location. Any older file with the same name is overwritten. metadata : dict, optional Information dictionary object (see PDF reference section 10.2.1 'Document Information Dictionary'), e.g.: `None`. Note that some versions of LaTeX engines may ignore the 'Producer' key and set it to themselves.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pgf.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:filename arguments arg arg arg Assign Assign Assign Call BoolOp Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "assert_no_new_python_objects",
    "source_code": "@trace.trace_wrapper\ndef assert_no_new_python_objects(self, threshold=None):\n    self._python_memory_checker.assert_no_new_objects(threshold=threshold)",
    "docstring": "Raises an exception if there are new Python objects created. It computes the number of new Python objects per type using the first and the last snapshots. Args: threshold: A dictionary of [Type name string], [count] pair. It won't raise an exception if the new Python objects are under this threshold.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\memory_checker.py",
    "ast_data": "FunctionDef name:assert_no_new_python_objects arg:self arg:threshold arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_split_cluster_for_evaluator",
    "source_code": "def _split_cluster_for_evaluator(cluster_spec, task_type):\n    new_cluster_spec = multi_worker_util.normalize_cluster_spec(cluster_spec).as_dict()\n    if task_type == _TaskType.EVALUATOR:\n        assert _TaskType.EVALUATOR in new_cluster_spec\n        new_cluster_spec = {_TaskType.EVALUATOR: new_cluster_spec[_TaskType.EVALUATOR]}\n    else:\n        new_cluster_spec.pop(_TaskType.EVALUATOR, None)\n    return multi_worker_util.normalize_cluster_spec(new_cluster_spec)",
    "docstring": "Split the cluster for evaluator since it needn't talk to other tasks.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_coordinator.py",
    "ast_data": "FunctionDef name:_split_cluster_for_evaluator arg:cluster_spec arg:task_type arguments arg arg Assign Call Call If Compare Compare Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "custom_sharding_spec_op",
    "source_code": "def custom_sharding_spec_op(sharding_spec_class, func):\n    class_name = sharding_spec_class.__qualname__\n    if class_name not in _CUSTOM_SHARDING_SPEC_OPS:\n        _CUSTOM_SHARDING_SPEC_OPS[class_name] = {}\n    return functools.partial(_decorator_func, op=func, op_table=_CUSTOM_SHARDING_SPEC_OPS[class_name])",
    "docstring": "Decorator to allow custom registration of ops. Args: sharding_spec_class(type): The ShardingSpec for which we need to add this custom op. func(Callable): The op to override (ex: torch.bmm)",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharding_spec\\api.py",
    "ast_data": "FunctionDef name:custom_sharding_spec_op arg:sharding_spec_class arg:func arguments arg arg Assign If Compare Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, x_transform, y_transform, **kwargs):\n    Transform.__init__(self, **kwargs)\n    self._x = x_transform\n    self._y = y_transform\n    self.set_children(x_transform, y_transform)\n    self._affine = None",
    "docstring": "Create a new \"blended\" transform using *x_transform* to transform the *x*-axis and *y_transform* to transform the *y*-axis. You will generally not call this constructor directly but use the function instead, which can determine automatically which kind of blended transform to create.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:x_transform arg:y_transform arguments arg arg arg arg Call Assign Assign Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "semilogx",
    "source_code": "@_docstring.interpd\ndef semilogx(self, *args, **kwargs):\n    d = {k: v for k, v in kwargs.items() if k in ['base', 'subs', 'nonpositive', 'basex', 'subsx', 'nonposx']}\n    self.set_xscale('log', **d)\n    return self.plot(*args, **{k: v for k, v in kwargs.items() if k not in d})",
    "docstring": "Make a plot with log scaling on the x-axis. Call signatures:: semilogx([x], y, [fmt], data=None, **kwargs) semilogx([x], y, [fmt], [x2], y2, [fmt2], ..., **kwargs) This is just a thin wrapper around which additionally changes the x-axis to log scaling. All the concepts and parameters of plot can be used here as well. The additional parameters *base*, *subs*, and *nonpositive* control the x-axis properties. They are just forwarded to . Parameters ---------- base : float, default: 10 Base of the x logarithm. subs : array-like, optional The location of the minor xticks. If *None*, reasonable locations are automatically chosen depending on the number of decades in the plot. See for details. nonpositive : {'mask', 'clip'}, default: 'clip' Non-positive values in x can be masked as invalid, or clipped to a very small positive number. **kwargs All parameters supported by . Returns ------- list of Objects representing the plotted data.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_axes.py",
    "ast_data": "FunctionDef name:semilogx arg:self arguments arg arg arg Assign Call Compare Call Return return:yes Call Call Compare"
  },
  {
    "library": "matplotlib",
    "name": "clear",
    "source_code": "def clear(self):\n    self._checks.set_facecolor(['none'] * len(self._active_check_colors))\n    if hasattr(self, '_lines'):\n        for l1, l2 in self._lines:\n            l1.set_visible(False)\n            l2.set_visible(False)\n    if self.drawon:\n        self.canvas.draw()\n    if self.eventson:\n        self._observers.process('clicked', None)",
    "docstring": "Uncheck all checkboxes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:clear arg:self arguments arg Call Call If Call For Call Call If Call If Call"
  },
  {
    "library": "pygame",
    "name": "draw_line",
    "source_code": "def draw_line(surf, color, from_point, to_point, width=1):\n    line = [from_point[0], from_point[1], to_point[0], to_point[1]]\n    return _clip_and_draw_line_width(surf, surf.get_clip(), color, line, width)",
    "docstring": "draw anti-aliased line between two endpoints.",
    "type": "function",
    "file_path": "pygame\\src_py\\draw_py.py",
    "ast_data": "FunctionDef name:draw_line arg:surf arg:color arg:from_point arg:to_point arg:width arguments arg arg arg arg arg Assign Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "GradientBoostingClassifierBenchmark",
    "source_code": "class GradientBoostingClassifierBenchmark(Predictor, Estimator, Benchmark):\n    param_names = ['representation']\n    params = (['dense', 'sparse'],)\n\n    def setup_cache(self):\n        super().setup_cache()\n\n    def make_data(self, params):\n        representation, = params\n        if representation == 'sparse':\n            data = _20newsgroups_highdim_dataset()\n        else:\n            data = _20newsgroups_lowdim_dataset()\n        return data\n\n    def make_estimator(self, params):\n        representation, = params\n        n_estimators = 100 if Benchmark.data_size == 'large' else 10\n        estimator = GradientBoostingClassifier(n_estimators=n_estimators, max_features='log2', subsample=0.5, random_state=0)\n        return estimator\n\n    def make_scorers(self):\n        make_gen_classif_scorers(self)",
    "docstring": "Benchmarks for GradientBoostingClassifier.",
    "type": "class",
    "file_path": "scikit-learn\\asv_benchmarks\\benchmarks\\ensemble.py",
    "ast_data": "ClassDef name:GradientBoostingClassifierBenchmark Assign Assign FunctionDef name:setup_cache arg:self arguments arg Call Call FunctionDef name:make_data arg:self arg:params arguments arg arg Assign If Compare Assign Call Assign Call Return return:yes FunctionDef name:make_estimator arg:self arg:params arguments arg arg Assign Assign Compare Assign Call Return return:yes FunctionDef name:make_scorers arg:self arguments arg Call"
  },
  {
    "library": "kornia",
    "name": "lab_to_rgb",
    "source_code": "def lab_to_rgb(image: torch.Tensor, clip: bool=True) -> torch.Tensor:\n    if not isinstance(image, torch.Tensor):\n        raise TypeError(f'Input type is not a torch.Tensor. Got {type(image)}')\n    if len(image.shape) < 3 or image.shape[-3] != 3:\n        raise ValueError(f'Input size must have a shape of (*, 3, H, W). Got {image.shape}')\n    L: torch.Tensor = image[..., 0, :, :]\n    a: torch.Tensor = image[..., 1, :, :]\n    _b: torch.Tensor = image[..., 2, :, :]\n    fy = (L + 16.0) / 116.0\n    fx = a / 500.0 + fy\n    fz = fy - _b / 200.0\n    fz = fz.clamp(min=0.0)\n    fxyz = torch.stack([fx, fy, fz], dim=-3)\n    power = torch.pow(fxyz, 3.0)\n    scale = (fxyz - 4.0 / 29.0) / 7.787\n    xyz = torch.where(fxyz > 0.2068966, power, scale)\n    xyz_ref_white = torch.tensor([0.95047, 1.0, 1.08883], device=xyz.device, dtype=xyz.dtype)[..., :, None, None]\n    xyz_im = xyz * xyz_ref_white\n    rgbs_im: torch.Tensor = xyz_to_rgb(xyz_im)\n    rgb_im = linear_rgb_to_rgb(rgbs_im)\n    if clip:\n        rgb_im = torch.clamp(rgb_im, min=0.0, max=1.0)\n    return rgb_im",
    "docstring": "Convert a Lab image to RGB. The L channel is assumed to be in the range of :math:. a and b channels are in the range of :math:. Args: image: Lab image to be converted to RGB with shape :math:. clip: Whether to apply clipping to insure output RGB values in range :math:. Returns: Lab version of the image with shape :math:. The output RGB image are in the range of :math:. Example: >>> input = torch.rand(2, 3, 4, 5) >>> output = lab_to_rgb(input) # 2x3x4x5",
    "type": "function",
    "file_path": "kornia\\kornia\\color\\lab.py",
    "ast_data": "FunctionDef name:lab_to_rgb arg:image arg:clip arguments arg arg If Call Raise Call Call If BoolOp Compare Call Compare Raise Call Assign Assign Assign Assign Call Assign Call Assign Call Assign Assign Call Compare Assign Call Assign Call Assign Call If Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_parallel_build_trees",
    "source_code": "def _parallel_build_trees(tree, bootstrap, X, y, sample_weight, tree_idx, n_trees, verbose=0, class_weight=None, n_samples_bootstrap=None, missing_values_in_feature_mask=None):\n    if verbose > 1:\n        print('building tree %d of %d' % (tree_idx + 1, n_trees))\n    if bootstrap:\n        n_samples = X.shape[0]\n        if sample_weight is None:\n            curr_sample_weight = np.ones((n_samples,), dtype=np.float64)\n        else:\n            curr_sample_weight = sample_weight.copy()\n        indices = _generate_sample_indices(tree.random_state, n_samples, n_samples_bootstrap)\n        sample_counts = np.bincount(indices, minlength=n_samples)\n        curr_sample_weight *= sample_counts\n        if class_weight == 'subsample':\n            with catch_warnings():\n                simplefilter('ignore', DeprecationWarning)\n                curr_sample_weight *= compute_sample_weight('auto', y, indices=indices)\n        elif class_weight == 'balanced_subsample':\n            curr_sample_weight *= compute_sample_weight('balanced', y, indices=indices)\n        tree._fit(X, y, sample_weight=curr_sample_weight, check_input=False, missing_values_in_feature_mask=missing_values_in_feature_mask)\n    else:\n        tree._fit(X, y, sample_weight=sample_weight, check_input=False, missing_values_in_feature_mask=missing_values_in_feature_mask)\n    return tree",
    "docstring": "Private function used to fit a single tree in parallel.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_forest.py",
    "ast_data": "FunctionDef name:_parallel_build_trees arg:tree arg:bootstrap arg:X arg:y arg:sample_weight arg:tree_idx arg:n_trees arg:verbose arg:class_weight arg:n_samples_bootstrap arg:missing_values_in_feature_mask arguments arg arg arg arg arg arg arg arg arg arg arg If Compare Call If Assign If Compare Assign Call Assign Call Assign Call Assign Call If Compare With Call Call Call If Compare Call Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "disp",
    "source_code": "def disp(mesg, device=None, linefeed=True):\n    warnings.warn('`disp` is deprecated, use your own printing function instead. (deprecated in NumPy 2.0)', DeprecationWarning, stacklevel=2)\n    if device is None:\n        device = sys.stdout\n    if linefeed:\n        device.write(f'{mesg}\\n')\n    else:\n        device.write(f'{mesg}')\n    device.flush()",
    "docstring": "Display a message on a device. .. deprecated:: 2.0 Use your own printing function instead. Parameters ---------- mesg : str Message to display. device : object Device to write message. If None, defaults to `devicedevice`, a file-like object can also be used as it has both required methods: >>> from io import StringIO >>> buf = StringIO() >>> np.disp('\"Display\" in a file', device=buf) >>> buf.getvalue() '\"Display\" in a file\\n'",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_function_base_impl.py",
    "ast_data": "FunctionDef name:disp arg:mesg arg:device arg:linefeed arguments arg arg arg Call If Compare Assign If Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "is_registered_op",
    "source_code": "def is_registered_op(self, name: str, version: int) -> bool:\n    functions = self.get_function_group(name)\n    if functions is None:\n        return False\n    return functions.get(version) is not None",
    "docstring": "Returns whether the given op is registered for the given opset version.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\registration.py",
    "ast_data": "FunctionDef name:is_registered_op arg:self arg:name arg:version arguments arg arg arg Assign Call If Compare Return return:yes Return return:yes Compare Call"
  },
  {
    "library": "pytorch",
    "name": "pop",
    "source_code": "def pop():\n    nonlocal nexti\n    if key_stack:\n        key = key_stack.pop()\n        if nexti <= key[1]:\n            exn_tab.append(ExceptionTableEntry(max(key[0], nexti), key[1], *exn_dict[key]))\n            nexti = key[1] + 2",
    "docstring": "Pop the key_stack and append an exception table entry if possible.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\bytecode_transformation.py",
    "ast_data": "FunctionDef name:pop arguments If Assign Call If Compare Call Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "friendly_debug_info",
    "source_code": "def friendly_debug_info(v: object) -> Argument:\n    if isinstance(v, torch.Tensor):\n        return f'Tensor({v.shape}, grad={v.requires_grad}, dtype={v.dtype})'\n    else:\n        return str(v)",
    "docstring": "Helper function to print out debug info in a friendly way.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\_debug.py",
    "ast_data": "FunctionDef name:friendly_debug_info arg:v arguments arg If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "StorageWeakRef",
    "source_code": "class StorageWeakRef:\n    __slots__ = ['cdata', '_free_weak_ref']\n\n    def __init__(self, storage):\n        self.cdata = storage._weak_ref()\n        self._free_weak_ref = torch.Storage._free_weak_ref\n\n    @classmethod\n    def from_weakref(cls, cdata):\n        instance = cls.__new__(cls)\n        instance.cdata = cdata\n        instance._free_weak_ref = torch.Storage._free_weak_ref\n        return instance\n\n    def expired(self):\n        return torch.Storage._expired(self.cdata)\n\n    def __del__(self):\n        self._free_weak_ref(self.cdata)\n\n    def __hash__(self):\n        return self.cdata\n\n    def __eq__(self, other):\n        if id(self) == id(other):\n            return True\n        return self.cdata == other.cdata",
    "docstring": "A weak reference to a Storage. The cdata member is a Python number containing the integer representation of the Storage pointer.",
    "type": "class",
    "file_path": "pytorch\\torch\\multiprocessing\\reductions.py",
    "ast_data": "ClassDef name:StorageWeakRef Assign FunctionDef name:__init__ arg:self arg:storage arguments arg arg Assign Call Assign FunctionDef name:from_weakref arg:cls arg:cdata arguments arg arg Assign Call Assign Assign Return return:yes FunctionDef name:expired arg:self arguments arg Return return:yes Call FunctionDef name:__del__ arg:self arguments arg Call FunctionDef name:__hash__ arg:self arguments arg Return return:yes FunctionDef name:__eq__ arg:self arg:other arguments arg arg If Compare Call Call Return return:yes Return return:yes Compare"
  },
  {
    "library": "cherrypy",
    "name": "iso_format",
    "source_code": "def iso_format(v):\n    return time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(v))",
    "docstring": "Format given date as ISO string.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\cpstats.py",
    "ast_data": "FunctionDef name:iso_format arg:v arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "with_options",
    "source_code": "@classmethod\ndef with_options(cls, **options):\n\n    class cls_with_options(cls):\n\n        def __init__(self, *args, **kwargs):\n            kwargs.update(options)\n            super().__init__(*args, **kwargs)\n    return cls_with_options",
    "docstring": "Return a subclass with alternative constructor that extends any original keyword arguments to the original constructor with the given options.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\cpp_extension.py",
    "ast_data": "FunctionDef name:with_options arg:cls arguments arg arg ClassDef name:cls_with_options FunctionDef name:__init__ arg:self arguments arg arg arg Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_or_create_assets_dir",
    "source_code": "def _get_or_create_assets_dir(export_dir):\n    assets_destination_dir = _get_assets_dir(export_dir)\n    file_io.recursive_create_dir(assets_destination_dir)\n    return assets_destination_dir",
    "docstring": "Return assets sub-directory, or create one if it doesn't exist.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model_experimental.py",
    "ast_data": "FunctionDef name:_get_or_create_assets_dir arg:export_dir arguments arg Assign Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "MissingValues",
    "source_code": "class MissingValues(NamedTuple):\n    nan: bool\n    none: bool\n\n    def to_list(self):\n        output = []\n        if self.none:\n            output.append(None)\n        if self.nan:\n            output.append(np.nan)\n        return output",
    "docstring": "Data class for missing data information",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\utils\\_encode.py",
    "ast_data": "ClassDef name:MissingValues FunctionDef name:to_list arg:self arguments arg Assign If Call If Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "ImageModule",
    "source_code": "class ImageModule(Module, ImageModuleMixIn, ONNXExportMixin):\n\n    def __init__(self, *args: Any, **kwargs: Any) -> None:\n        super().__init__(*args, **kwargs)\n        self._disable_features: bool = False\n\n    @property\n    def disable_features(self) -> bool:\n        return self._disable_features\n\n    @disable_features.setter\n    def disable_features(self, value: bool=True) -> None:\n        self._disable_features = value\n\n    def __call__(self, *inputs: Any, input_names_to_handle: Optional[list[Any]]=None, output_type: str='tensor', **kwargs: Any) -> Any:\n        if not self._disable_features:\n            decorated_forward = self.convert_input_output(input_names_to_handle=input_names_to_handle, output_type=output_type)(super().__call__)\n            _output_image = decorated_forward(*inputs, **kwargs)\n            if output_type == 'tensor':\n                self._output_image = self._detach_tensor_to_cpu(_output_image)\n            else:\n                self._output_image = _output_image\n        else:\n            _output_image = super().__call__(*inputs, **kwargs)\n        return _output_image",
    "docstring": "Handles image-based operations. This modules accepts multiple input and output data types, provides end-to-end visualization, file saving features. Note that this module fits the classes that return one image tensor only. Note: The additional add-on features increase the use of memories. To restore the original behaviour, you may set .",
    "type": "class",
    "file_path": "kornia\\kornia\\core\\module.py",
    "ast_data": "ClassDef name:ImageModule FunctionDef name:__init__ arg:self arguments arg arg arg Call Call FunctionDef name:disable_features arg:self arguments arg Return return:yes FunctionDef name:disable_features arg:self arg:value arguments arg arg Assign FunctionDef name:__call__ arg:self arguments arg arg arg arg arg If Assign Call Call Call Assign Call If Compare Assign Call Assign Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_format_origin_stack",
    "source_code": "def _format_origin_stack(origin_stack, call_traceback_proto):\n    string_to_id = {}\n    string_to_id[None] = 0\n    for frame in origin_stack:\n        file_path, lineno, func_name, line_text = frame\n        call_traceback_proto.origin_stack.traces.add(file_id=_string_to_id(file_path, string_to_id), lineno=lineno, function_id=_string_to_id(func_name, string_to_id), line_id=_string_to_id(line_text, string_to_id))\n    id_to_string = call_traceback_proto.origin_id_to_string\n    for key, value in string_to_id.items():\n        id_to_string[value] = key if key is not None else ''",
    "docstring": "Format a traceback stack for a proto. Args: origin_stack: The stack list as returned by . call_traceback_proto: A proto whose fields are to be populated.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\source_remote.py",
    "ast_data": "FunctionDef name:_format_origin_stack arg:origin_stack arg:call_traceback_proto arguments arg arg Assign Assign For Assign Call Call Call Call Assign For Call Assign Compare"
  },
  {
    "library": "scikit-learn",
    "name": "get_feature_names_out",
    "source_code": "def get_feature_names_out(self, input_features=None):\n    check_is_fitted(self, 'n_features_in_')\n    input_features = _check_feature_names_in(self, input_features)\n    non_missing_mask = np.logical_not(_get_mask(self.statistics_, np.nan))\n    names = input_features[non_missing_mask]\n    return self._concatenate_indicator_feature_names_out(names, input_features)",
    "docstring": "Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Input features. - If is , then is used as feature names in. If is not defined, then the following input feature names are generated: . - If is an array-like, then must match if is defined. Returns ------- feature_names_out : ndarray of str objects Transformed feature names.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\impute\\_base.py",
    "ast_data": "FunctionDef name:get_feature_names_out arg:self arg:input_features arguments arg arg Call Assign Call Assign Call Call Assign Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_ensure_2d",
    "source_code": "def _ensure_2d(values: np.ndarray) -> np.ndarray:\n    if values.ndim == 1:\n        values = values.reshape((values.shape[0], 1))\n    elif values.ndim != 2:\n        raise ValueError(f'Must pass 2-d input. shape={values.shape}')\n    return values",
    "docstring": "Reshape 1D values, raise on anything else other than 2D.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\internals\\construction.py",
    "ast_data": "FunctionDef name:_ensure_2d arg:values arguments arg If Compare Assign Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "array_str",
    "source_code": "@array_function_dispatch(_array_str_dispatcher, module='numpy')\ndef array_str(a, max_line_width=None, precision=None, suppress_small=None):\n    return _array_str_implementation(a, max_line_width, precision, suppress_small)",
    "docstring": "Return a string representation of the data in an array. The data in the array is returned as a single string. This function is similar to , the difference being that also returns information on the kind of array and its data type. Parameters ---------- a : ndarray Input array. max_line_width : int, optional Inserts newlines if text is longer than . Defaults to ``. See Also -------- array2string, array_repr, set_printoptions Examples -------- >>> import numpy as np >>> np.array_str(np.arange(3)) '[0 1 2]'",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\arrayprint.py",
    "ast_data": "FunctionDef name:array_str arg:a arg:max_line_width arg:precision arg:suppress_small arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "build_wheel",
    "source_code": "def build_wheel(dir_path: str, cwd: str, project_name: str, platform: str, collab: str=False) -> None:\n    env = os.environ.copy()\n    if is_windows():\n        env['HOMEPATH'] = 'C:'\n    env['project_name'] = project_name\n    if collab == 'True':\n        env['collaborator_build'] = True\n    subprocess.run([sys.executable, 'tensorflow/tools/pip_package/setup.py', 'bdist_wheel', f'--dist-dir={dir_path}', f'--plat-name={platform}'], check=True, cwd=cwd, env=env)",
    "docstring": "Build the wheel in the target directory. Args: dir_path: directory where the wheel will be stored cwd: path to directory with wheel source files project_name: name to pass to setup.py. platform: platform name to pass to setup.py. collab: defines if this is a collab build",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\pip_package\\build_pip_package.py",
    "ast_data": "FunctionDef name:build_wheel arg:dir_path arg:cwd arg:project_name arg:platform arg:collab arguments arg arg arg arg arg Assign Call If Call Assign Assign If Compare Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "softmax",
    "source_code": "@dispatch.dispatch_for_api(nn_ops.softmax_v2)\ndef softmax(logits: ragged_tensor.Ragged, axis=None, name=None):\n    if axis is None:\n        axis = -1\n    with ops.name_scope(name, 'RaggedSoftmax', [logits]) as name:\n        max_input = reduce_max(logits, axis=axis, keepdims=True)\n        logits_exp = math_ops.exp(math_ops.subtract(logits, max_input))\n        denominator = reduce_sum(logits_exp, axis=axis, keepdims=True)\n        return math_ops.divide(logits_exp, denominator)",
    "docstring": "Computes softmax activations. Used for multi-class predictions. The sum of all outputs generated by softmax is 1. This function performs the equivalent of softmax = tf.exp(logits) / tf.reduce_sum(tf.exp(logits), axis) Example usage: >>> softmax = tf.nn.softmax([-1, 0., 1.]) >>> softmax >>> sum(softmax) Args: logits: A non-empty . Must be one of the following types: , , . axis: The dimension softmax would be performed on. The default is -1 which indicates the last dimension. name: A name for the operation (optional). Returns: A . Has the same type and shape as . Raises: InvalidArgumentError: if is empty or is beyond the last dimension of .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_math_ops.py",
    "ast_data": "FunctionDef name:softmax arg:logits arg:axis arg:name arguments arg arg arg If Compare Assign With Call Assign Call Assign Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "extract_weights",
    "source_code": "def extract_weights(mod: nn.Module) -> tuple[tuple[Tensor, ...], list[str]]:\n    orig_params = tuple(mod.parameters())\n    names = []\n    for name, p in list(mod.named_parameters()):\n        _del_nested_attr(mod, name.split('.'))\n        names.append(name)\n    params = tuple((p.detach().requires_grad_() for p in orig_params))\n    return (params, names)",
    "docstring": "This function removes all the Parameters from the model and return them as a tuple as well as their original attribute names. The weights must be re-loaded with before the model can be used again. Note that this function modifies the model in place and after this call, mod.parameters() will be empty.",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\functional_autograd_benchmark\\utils.py",
    "ast_data": "FunctionDef name:extract_weights arg:mod arguments arg Assign Call Call Assign For Call Call Call Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "distance_metrics",
    "source_code": "def distance_metrics():\n    return PAIRWISE_DISTANCE_FUNCTIONS",
    "docstring": "Valid metrics for pairwise_distances. This function simply returns the valid pairwise distance metrics. It exists to allow for a description of the mapping for each of the valid strings. The valid distance metrics, and the function they map to, are: =============== ======================================== metric Function =============== ======================================== 'cityblock' metrics.pairwise.manhattan_distances 'cosine' metrics.pairwise.cosine_distances 'euclidean' metrics.pairwise.euclidean_distances 'haversine' metrics.pairwise.haversine_distances 'l1' metrics.pairwise.manhattan_distances 'l2' metrics.pairwise.euclidean_distances 'manhattan' metrics.pairwise.manhattan_distances 'nan_euclidean' metrics.pairwise.nan_euclidean_distances =============== ======================================== Read more in the :ref:. Returns ------- distance_metrics : dict Returns valid metrics for pairwise_distances.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\metrics\\pairwise.py",
    "ast_data": "FunctionDef name:distance_metrics arguments Return return:yes"
  },
  {
    "library": "numpy",
    "name": "hermvander3d",
    "source_code": "def hermvander3d(x, y, z, deg):\n    return pu._vander_nd_flat((hermvander, hermvander, hermvander), (x, y, z), deg)",
    "docstring": "Pseudo-Vandermonde matrix of given degrees. Returns the pseudo-Vandermonde matrix of degrees and sample points `lmnxyz`0 >> from numpy.polynomial.hermite import hermvander3d >>> x = np.array([-1, 0, 1]) >>> y = np.array([-1, 0, 1]) >>> z = np.array([-1, 0, 1]) >>> hermvander3d(x, y, z, [0, 1, 2]) array([[ 1., -2., 2., -2., 4., -4.], [ 1., 0., -2., 0., 0., -0.], [ 1., 2., 2., 2., 4., 4.]])",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\hermite.py",
    "ast_data": "FunctionDef name:hermvander3d arg:x arg:y arg:z arg:deg arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_validate_distribution",
    "source_code": "def _validate_distribution(values, weights):\n    values = np.asarray(values, dtype=float)\n    if len(values) == 0:\n        raise ValueError(\"Distribution can't be empty.\")\n    if weights is not None:\n        weights = np.asarray(weights, dtype=float)\n        if len(weights) != len(values):\n            raise ValueError('Value and weight array-likes for the same empirical distribution must be of the same size.')\n        if np.any(weights < 0):\n            raise ValueError('All weights must be non-negative.')\n        if not 0 < np.sum(weights) < np.inf:\n            raise ValueError('Weight array-like sum must be positive and finite. Set as None for an equal distribution of weight.')\n        return (values, weights)\n    return (values, None)",
    "docstring": "Validate the values and weights from a distribution input of and return them as ndarray objects. Parameters ---------- values : array_like Values observed in the (empirical) distribution. weights : array_like Weight for each value. Returns ------- values : ndarray Values as ndarray. weights : ndarray Weights as ndarray.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_stats_py.py",
    "ast_data": "FunctionDef name:_validate_distribution arg:values arg:weights arguments arg arg Assign Call If Compare Call Raise Call If Compare Assign Call If Compare Call Call Raise Call If Call Compare Raise Call If Compare Call Raise Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "set_max_tuning_iterations",
    "source_code": "def set_max_tuning_iterations(iterations: int) -> None:\n    torch._C._cuda_tunableop_set_max_tuning_iterations(iterations)",
    "docstring": "Set max number of iterations to spend tuning a given solution. If both max tuning duration and iterations are set, the smaller of the two will be honored. At minimum 1 tuning iteration will always be run.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\tunable.py",
    "ast_data": "FunctionDef name:set_max_tuning_iterations arg:iterations arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "close",
    "source_code": "def close(self):\n    self._execute_dependency_graph()\n    self._write_python_version()\n    self.script_module_serializer.write_files()\n    self._finalize_zip()",
    "docstring": "Write the package to the filesystem. Any calls after :meth: are now invalid. It is preferable to use resource guard syntax instead:: with PackageExporter(\"file.zip\") as e: ...",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\package_exporter.py",
    "ast_data": "FunctionDef name:close arg:self arguments arg Call Call Call Call"
  },
  {
    "library": "kornia",
    "name": "slerp",
    "source_code": "def slerp(self, q1: 'Quaternion', t: float) -> 'Quaternion':\n    KORNIA_CHECK_TYPE(q1, Quaternion)\n    q0 = self.normalize()\n    q1 = q1.normalize()\n    return q0 * (q0.inv() * q1) ** t",
    "docstring": "Return a unit quaternion spherically interpolated between quaternions self.q and q1. See more: Args: q1: second quaternion to be interpolated between. t: interpolation ratio, range [0-1] Example: >>> q0 = Quaternion.identity() >>> q1 = Quaternion(torch.tensor([1., .5, 0., 0.])) >>> q2 = q0.slerp(q1, .3)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\quaternion.py",
    "ast_data": "FunctionDef name:slerp arg:self arg:q1 arg:t arguments arg arg arg Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "converted_self",
    "source_code": "def converted_self(self):\n    if self._converted_self is None:\n        old_name = self.function.signature.name\n        new_name = self._enclosing_graph.converted_function_names[old_name]\n        self.converted_enclosing_graph.rename_function(old_name, new_name)\n        self._converted_self = self.converted_enclosing_graph.functions[new_name]\n    return self._converted_self",
    "docstring": "The Function copy to be converted. The copy will be renamed according to the graph's converted_function_name map, to ensure the name does not match anything currently in TensorFlow's function cache. Returns: The function instance to be converted.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "FunctionDef name:converted_self arg:self arguments arg If Compare Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "write",
    "source_code": "def write(self, record):\n    super(TFRecordWriter, self).write(record)",
    "docstring": "Write a string record to the file. Args: record: str",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\tf_record.py",
    "ast_data": "FunctionDef name:write arg:self arg:record arguments arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "read_model_with_mutable_tensors",
    "source_code": "def read_model_with_mutable_tensors(input_tflite_file):\n    return copy.deepcopy(read_model(input_tflite_file))",
    "docstring": "Reads a tflite model as a python object with mutable tensors. Similar to read_model() with the addition that the returned object has mutable tensors (read_model() returns an object with immutable tensors). NOTE: This API only works for TFLite generated with _experimental_use_buffer_offset=false Args: input_tflite_file: Full path name to the input tflite file Raises: RuntimeError: If input_tflite_file path is invalid. IOError: If input_tflite_file cannot be opened. Returns: A mutable python object corresponding to the input tflite file.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\tools\\flatbuffer_utils.py",
    "ast_data": "FunctionDef name:read_model_with_mutable_tensors arg:input_tflite_file arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_check_shape",
    "source_code": "def _check_shape(shape, key):\n    assert shape is not None\n    if not nest.is_nested(shape):\n        shape = [shape]\n    shape = tuple(shape)\n    for dimension in shape:\n        if not isinstance(dimension, int):\n            raise TypeError('shape dimensions must be integer. shape: {}, key: {}'.format(shape, key))\n        if dimension < 1:\n            raise ValueError('shape dimensions must be greater than 0. shape: {}, key: {}'.format(shape, key))\n    return shape",
    "docstring": "Returns shape if it's valid, raises error otherwise.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:_check_shape arg:shape arg:key arguments arg arg Compare If Call Assign Assign Call For If Call Raise Call Call If Compare Raise Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, positions, orientation='horizontal', *, lineoffset=0, linelength=1, linewidth=None, color=None, linestyle='solid', antialiased=None, **kwargs):\n    super().__init__([], linewidths=linewidth, linestyles=linestyle, colors=color, antialiaseds=antialiased, **kwargs)\n    self._is_horizontal = True\n    self._linelength = linelength\n    self._lineoffset = lineoffset\n    self.set_orientation(orientation)\n    self.set_positions(positions)",
    "docstring": "Parameters ---------- positions : 1D array-like Each value is an event. orientation : {'horizontal', 'vertical'}, default: 'horizontal' The sequence of events is plotted along this direction. The marker lines of the single events are along the orthogonal direction. lineoffset : float, default: 0 The offset of the center of the markers from the origin, in the direction orthogonal to *orientation*. linelength : float, default: 1 The total height of the marker (i.e. the marker stretches from `lines.linewidthcolorcolorlines.colorlines.antialiased.LineCollection`. Examples -------- .. plot:: gallery/lines_bars_and_markers/eventcollection_demo.py",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:positions arg:orientation arguments arg arg arg arg arg arg arg arg arg arg Call Call Assign Assign Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "size",
    "source_code": "def size(self, name=None):\n    with ops.name_scope(name, '%s_Size' % self.name):\n        if self._table:\n            tsize = self._table.size()\n        else:\n            tsize = ops.convert_to_tensor(0, dtype=dtypes.int64)\n        return tsize + self._num_oov_buckets",
    "docstring": "Compute the number of elements in this table.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "FunctionDef name:size arg:self arg:name arguments arg arg With Call If Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "union",
    "source_code": "@staticmethod\ndef union(bboxes):\n    if not len(bboxes):\n        raise ValueError(\"'bboxes' cannot be empty\")\n    x0 = np.min([bbox.xmin for bbox in bboxes])\n    x1 = np.max([bbox.xmax for bbox in bboxes])\n    y0 = np.min([bbox.ymin for bbox in bboxes])\n    y1 = np.max([bbox.ymax for bbox in bboxes])\n    return Bbox([[x0, y0], [x1, y1]])",
    "docstring": "Return a that contains all of the given *bboxes*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:union arg:bboxes arguments arg If Call Raise Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "import_file",
    "source_code": "def import_file(folder, module_name):\n    import importlib\n    import pathlib\n    fname = pathlib.Path(folder) / f'{module_name}.py'\n    spec = importlib.util.spec_from_file_location(module_name, str(fname))\n    module = importlib.util.module_from_spec(spec)\n    spec.loader.exec_module(module)\n    return module",
    "docstring": "Import a file directly, avoiding importing scipy",
    "type": "function",
    "file_path": "numpy\\numpy\\_build_utils\\__init__.py",
    "ast_data": "FunctionDef name:import_file arg:folder arg:module_name arguments arg arg Assign Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_asfarray",
    "source_code": "def _asfarray(x):\n    if not hasattr(x, 'dtype'):\n        x = np.asarray(x)\n    if x.dtype == np.float16:\n        return np.asarray(x, np.float32)\n    elif x.dtype.kind not in 'fc':\n        return np.asarray(x, np.float64)\n    dtype = x.dtype.newbyteorder('=')\n    copy = True if not x.flags['ALIGNED'] else copy_if_needed\n    return np.array(x, dtype=dtype, copy=copy)",
    "docstring": "Convert to array with floating or complex dtype. float16 values are also promoted to float32.",
    "type": "function",
    "file_path": "scipy\\scipy\\fft\\_pocketfft\\helper.py",
    "ast_data": "FunctionDef name:_asfarray arg:x arguments arg If Call Assign Call If Compare Return return:yes Call If Compare Return return:yes Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "default_configs",
    "source_code": "@staticmethod\ndef default_configs():\n    raise ValueError('this method should be reimplemented by subclass')",
    "docstring": "return a list of defualt configs for this benchmark",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\tensorexpr\\benchmark.py",
    "ast_data": "FunctionDef name:default_configs arguments Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "get_input_details",
    "source_code": "def get_input_details(self):\n    result = {}\n    for input_name, tensor_index in self._inputs.items():\n        result[input_name] = self._interpreter._get_tensor_details(tensor_index, self._subgraph_index)\n    return result",
    "docstring": "Gets input tensor details. Returns: A dictionary from input name to tensor details where each item is a dictionary with details about an input tensor. Each dictionary contains the following fields that describe the tensor: + : The tensor name. + : The tensor index in the interpreter. + : The shape of the tensor. + : Same as for models with known/fixed shapes. If any dimension sizes are unknown, they are indicated with . + : The numpy data type (such as or ). + : Deprecated, use . This field only works for per-tensor quantization, whereas works in all cases. + : A dictionary of parameters used to quantize the tensor: ~ : List of scales (one if per-tensor quantization). ~ : List of zero_points (one if per-tensor quantization). ~ : Specifies the dimension of per-axis quantization, in the case of multiple scales/zero_points. + : A dictionary of parameters used to encode a sparse tensor. This is empty if the tensor is dense.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\interpreter.py",
    "ast_data": "FunctionDef name:get_input_details arg:self arguments arg Assign For Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_make_elementwise_unary_prim",
    "source_code": "def _make_elementwise_unary_prim(name: str, *, type_promotion: ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND, **kwargs):\n    return _make_prim(schema=f'{name}(Tensor self) -> Tensor', meta=partial(_prim_elementwise_meta, type_promotion=type_promotion), return_type=RETURN_TYPE.NEW, **kwargs)",
    "docstring": "Creates an elementwise unary prim.",
    "type": "function",
    "file_path": "pytorch\\torch\\_prims\\__init__.py",
    "ast_data": "FunctionDef name:_make_elementwise_unary_prim arg:name arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "TensorBoardDebugWrapperSession",
    "source_code": "class TensorBoardDebugWrapperSession(GrpcDebugWrapperSession):\n\n    def __init__(self, sess, grpc_debug_server_addresses, thread_name_filter=None, send_traceback_and_source_code=True):\n\n        def _gated_grpc_watch_fn(fetches, feeds):\n            del fetches, feeds\n            return framework.WatchOptions(debug_ops=['DebugIdentity(gated_grpc=true)'])\n        super().__init__(sess, grpc_debug_server_addresses, watch_fn=_gated_grpc_watch_fn, thread_name_filter=thread_name_filter)\n        self._send_traceback_and_source_code = send_traceback_and_source_code\n        self._sent_graph_version = -1\n        register_signal_handler()\n\n    def run(self, fetches, feed_dict=None, options=None, run_metadata=None, callable_runner=None, callable_runner_args=None, callable_options=None):\n        if self._send_traceback_and_source_code:\n            self._sent_graph_version = publish_traceback(self._grpc_debug_server_urls, self.graph, feed_dict, fetches, self._sent_graph_version)\n        return super().run(fetches, feed_dict=feed_dict, options=options, run_metadata=run_metadata, callable_runner=callable_runner, callable_runner_args=callable_runner_args, callable_options=callable_options)",
    "docstring": "A tfdbg Session wrapper that can be used with TensorBoard Debugger Plugin. This wrapper is the same as , except that it uses a predefined that 1) uses debug ops with the attribute set to to allow the interactive enabling and disabling of tensor breakpoints. 2) watches all tensors in the graph. This saves the need for the user to define a .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\grpc_wrapper.py",
    "ast_data": "ClassDef name:TensorBoardDebugWrapperSession FunctionDef name:__init__ arg:self arg:sess arg:grpc_debug_server_addresses arg:thread_name_filter arg:send_traceback_and_source_code arguments arg arg arg arg arg FunctionDef name:_gated_grpc_watch_fn arg:fetches arg:feeds arguments arg arg Return return:yes Call Call Call Assign Assign Call FunctionDef name:run arg:self arg:fetches arg:feed_dict arg:options arg:run_metadata arg:callable_runner arg:callable_runner_args arg:callable_options arguments arg arg arg arg arg arg arg arg If Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_or_create_global_step_read",
    "source_code": "def _get_or_create_global_step_read(graph=None):\n    graph = graph or ops.get_default_graph()\n    global_step_read_tensor = _get_global_step_read(graph)\n    if global_step_read_tensor is not None:\n        return global_step_read_tensor\n    global_step_tensor = get_global_step(graph)\n    if global_step_tensor is None:\n        return None\n    with graph.as_default() as g, g.name_scope(None):\n        with g.name_scope(global_step_tensor.op.name + '/'):\n            if isinstance(global_step_tensor, variables.Variable):\n                global_step_value = cond.cond(variable_v1.is_variable_initialized(global_step_tensor), global_step_tensor.read_value, lambda: global_step_tensor.initial_value)\n            else:\n                global_step_value = global_step_tensor\n            global_step_read_tensor = global_step_value + 0\n            ops.add_to_collection(GLOBAL_STEP_READ_KEY, global_step_read_tensor)\n    return _get_global_step_read(graph)",
    "docstring": "Gets or creates global step read tensor in graph. Args: graph: The graph in which to create the global step read tensor. If missing, use default graph. Returns: Global step read tensor if there is global_step_tensor else return None.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\training_util.py",
    "ast_data": "FunctionDef name:_get_or_create_global_step_read arg:graph arguments arg Assign BoolOp Call Assign Call If Compare Return return:yes Assign Call If Compare Return return:no With Call Call With Call If Call Assign Call Call arguments Assign Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_get_param_names",
    "source_code": "def _get_param_names(self, method, return_alias, ignore_self_request=None):\n    return getattr(self, method)._get_param_names(return_alias=return_alias)",
    "docstring": "Get names of all metadata that can be consumed or routed by specified method. This method returns the names of all metadata, even the ``, aliases are ignored and original names are returned. ignore_self_request : bool Ignored. Present for API compatibility. Returns ------- names : set of str A set of strings with the names of all parameters.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py",
    "ast_data": "FunctionDef name:_get_param_names arg:self arg:method arg:return_alias arg:ignore_self_request arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_max_condition_number_to_be_non_singular",
    "source_code": "def _max_condition_number_to_be_non_singular(self):\n    with ops.name_scope('max_nonsingular_condition_number'):\n        dtype_eps = np.finfo(self.dtype.as_numpy_dtype).eps\n        eps = math_ops.cast(math_ops.reduce_max([100.0, math_ops.cast(self.range_dimension_tensor(), self.dtype), math_ops.cast(self.domain_dimension_tensor(), self.dtype)]), self.dtype) * dtype_eps\n        return 1.0 / eps",
    "docstring": "Return the maximum condition number that we consider nonsingular.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "FunctionDef name:_max_condition_number_to_be_non_singular arg:self arguments arg With Call Assign Call Assign Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=False)\ndef fit(self, X, y, **params):\n    _raise_for_params(params, self, None)\n    X, y = indexable(X, y)\n    y_type = type_of_target(y, input_name='y')\n    if y_type != 'binary':\n        raise ValueError(f'Only binary classification is supported. Unknown label type: {y_type}')\n    self._fit(X, y, **params)\n    if hasattr(self.estimator_, 'n_features_in_'):\n        self.n_features_in_ = self.estimator_.n_features_in_\n    if hasattr(self.estimator_, 'feature_names_in_'):\n        self.feature_names_in_ = self.estimator_.feature_names_in_\n    return self",
    "docstring": "Fit the classifier. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) Target values. **params : dict Parameters to pass to the method of the underlying classifier. Returns ------- self : object Returns an instance of self.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_classification_threshold.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg arg Call Assign Call Assign Call If Compare Raise Call Call If Call Assign If Call Assign Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "reorder_arrays",
    "source_code": "def reorder_arrays(arrays: list[ArrayLike], arr_columns: Index, columns: Index | None, length: int) -> tuple[list[ArrayLike], Index]:\n    if columns is not None:\n        if not columns.equals(arr_columns):\n            new_arrays: list[ArrayLike] = []\n            indexer = arr_columns.get_indexer(columns)\n            for i, k in enumerate(indexer):\n                if k == -1:\n                    arr = np.empty(length, dtype=object)\n                    arr.fill(np.nan)\n                else:\n                    arr = arrays[k]\n                new_arrays.append(arr)\n            arrays = new_arrays\n            arr_columns = columns\n    return (arrays, arr_columns)",
    "docstring": "Preemptively (cheaply) reindex arrays with new columns.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\internals\\construction.py",
    "ast_data": "FunctionDef name:reorder_arrays arg:arrays arg:arr_columns arg:columns arg:length arguments arg arg arg arg If Compare If Call Assign Call For Call If Compare Assign Call Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "get_flags_f77",
    "source_code": "def get_flags_f77(self):\n    return self._get_command_flags('compiler_f77')",
    "docstring": "List of Fortran 77 specific flags.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\fcompiler\\__init__.py",
    "ast_data": "FunctionDef name:get_flags_f77 arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "from_cardan_angles",
    "source_code": "@classmethod\ndef from_cardan_angles(cls, elev, azim, roll):\n    ca, sa = (np.cos(azim / 2), np.sin(azim / 2))\n    ce, se = (np.cos(elev / 2), np.sin(elev / 2))\n    cr, sr = (np.cos(roll / 2), np.sin(roll / 2))\n    qw = ca * ce * cr + sa * se * sr\n    qx = ca * ce * sr - sa * se * cr\n    qy = ca * se * cr + sa * ce * sr\n    qz = ca * se * sr - sa * ce * cr\n    return cls(qw, [qx, qy, qz])",
    "docstring": "Converts the angles to a quaternion q = exp((roll/2)*e_x)*exp((elev/2)*e_y)*exp((-azim/2)*e_z) i.e., the angles are a kind of Tait-Bryan angles, -z,y',x\". The angles should be given in radians, not degrees.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:from_cardan_angles arg:cls arg:elev arg:azim arg:roll arguments arg arg arg arg Assign Call Call Assign Call Call Assign Call Call Assign Assign Assign Assign Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "_get_win_folder_from_registry",
    "source_code": "def _get_win_folder_from_registry(csidl_name):\n    import winreg as _winreg\n    shell_folder_name = {'CSIDL_APPDATA': 'AppData', 'CSIDL_COMMON_APPDATA': 'Common AppData', 'CSIDL_LOCAL_APPDATA': 'Local AppData'}[csidl_name]\n    key = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, 'Software\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\Explorer\\\\Shell Folders')\n    dir, type = _winreg.QueryValueEx(key, shell_folder_name)\n    return dir",
    "docstring": "This is a fallback technique at best. I'm not sure if using the registry for this guarantees us the correct answer for all CSIDL_* names.",
    "type": "function",
    "file_path": "seaborn\\seaborn\\external\\appdirs.py",
    "ast_data": "FunctionDef name:_get_win_folder_from_registry arg:csidl_name arguments arg Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "defines",
    "source_code": "@staticmethod\ndef defines(args: list[str], **kwargs: CMakeValue) -> None:\n    for key, value in sorted(kwargs.items()):\n        if value is not None:\n            args.append(f'-D{key}={value}')",
    "docstring": "Adds definitions to a cmake argument list.",
    "type": "method",
    "file_path": "pytorch\\tools\\setup_helpers\\cmake.py",
    "ast_data": "FunctionDef name:defines arg:args arguments arg arg For Call Call If Compare Call"
  },
  {
    "library": "matplotlib",
    "name": "_get_ticklabel_bboxes",
    "source_code": "def _get_ticklabel_bboxes(self, ticks, renderer=None):\n    if renderer is None:\n        renderer = self.get_figure(root=True)._get_renderer()\n    return ([tick.label1.get_window_extent(renderer) for tick in ticks if tick.label1.get_visible()], [tick.label2.get_window_extent(renderer) for tick in ticks if tick.label2.get_visible()])",
    "docstring": "Return lists of bboxes for ticks' label1's and label2's.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:_get_ticklabel_bboxes arg:self arg:ticks arg:renderer arguments arg arg arg If Compare Assign Call Call Return return:yes Call Call Call Call"
  },
  {
    "library": "django",
    "name": "_check_fix_default_value",
    "source_code": "def _check_fix_default_value(self):\n    if not self.has_default():\n        return []\n    value = self.default\n    if isinstance(value, (datetime.datetime, datetime.date)):\n        return self._check_if_value_fixed(value)\n    return []",
    "docstring": "Warn that using an actual date or datetime value is probably wrong; it's only evaluated on server startup.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\__init__.py",
    "ast_data": "FunctionDef name:_check_fix_default_value arg:self arguments arg If Call Return return:no Assign If Call Return return:yes Call Return return:no"
  },
  {
    "library": "cherrypy",
    "name": "getargspec",
    "source_code": "def getargspec(callable):\n    return inspect.getfullargspec(callable)[:4]",
    "docstring": "Get argument specification using :mod:.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\_cpdispatch.py",
    "ast_data": "FunctionDef name:getargspec arg:callable arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_repr_html_",
    "source_code": "@property\ndef _repr_html_(self):\n    if get_config()['display'] != 'diagram':\n        raise AttributeError(\"_repr_html_ is only defined when the 'display' configuration option is set to 'diagram'\")\n    return self._repr_html_inner",
    "docstring": "HTML representation of estimator. This is redundant with the logic of . The latter should be favored in the long term, is only implemented for consumers who do not interpret .",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_repr_html\\base.py",
    "ast_data": "FunctionDef name:_repr_html_ arg:self arguments arg If Compare Call Raise Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_normed_hermite_e_n",
    "source_code": "def _normed_hermite_e_n(x, n):\n    if n == 0:\n        return np.full(x.shape, 1 / np.sqrt(np.sqrt(2 * np.pi)))\n    c0 = 0.0\n    c1 = 1.0 / np.sqrt(np.sqrt(2 * np.pi))\n    nd = float(n)\n    for i in range(n - 1):\n        tmp = c0\n        c0 = -c1 * np.sqrt((nd - 1.0) / nd)\n        c1 = tmp + c1 * x * np.sqrt(1.0 / nd)\n        nd = nd - 1.0\n    return c0 + c1 * x",
    "docstring": "Evaluate a normalized HermiteE polynomial. Compute the value of the normalized HermiteE polynomial of degree ``. Parameters ---------- x : ndarray of double. Points at which to evaluate the function n : int Degree of the normalized HermiteE function to be evaluated. Returns ------- values : ndarray The shape of the return value is described above. Notes ----- This function is needed for finding the Gauss points and integration weights for high degrees. The values of the standard HermiteE functions overflow when n >= 207.",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\hermite_e.py",
    "ast_data": "FunctionDef name:_normed_hermite_e_n arg:x arg:n arguments arg arg If Compare Return return:yes Call Call Call Assign Assign Call Call Assign Call For Call Assign Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_Dij",
    "source_code": "def _Dij(A, i, j):\n    return A[i + 1:, :j].sum() + A[:i, j + 1:].sum()",
    "docstring": "Sum of lower-left and upper-right blocks of contingency table.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_stats_pythran.py",
    "ast_data": "FunctionDef name:_Dij arg:A arg:i arg:j arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "rot90",
    "source_code": "@tf_export('image.rot90')\n@dispatch.add_dispatch_support\ndef rot90(image, k=1, name=None):\n    with ops.name_scope(name, 'rot90', [image, k]) as scope:\n        image = ops.convert_to_tensor(image, name='image')\n        image = _AssertAtLeast3DImage(image)\n        k = ops.convert_to_tensor(k, dtype=dtypes.int32, name='k')\n        k.get_shape().assert_has_rank(0)\n        k = math_ops.mod(k, 4)\n        shape = image.get_shape()\n        if shape.ndims is None:\n            rank = array_ops.rank(image)\n\n            def f_rank3():\n                return _rot90_3D(image, k, scope)\n\n            def f_rank4():\n                return _rot90_4D(image, k, scope)\n            return tf_cond.cond(math_ops.equal(rank, 3), f_rank3, f_rank4)\n        elif shape.ndims == 3:\n            return _rot90_3D(image, k, scope)\n        elif shape.ndims == 4:\n            return _rot90_4D(image, k, scope)\n        else:\n            raise ValueError(\"'image' (shape %s) must have either 3 or 4 dimensions.\" % shape)",
    "docstring": "Rotate image(s) by 90 degrees. For example: >>> a=tf.constant([[[1],[2]], ... [[3],[4]]]) >>> # rotating counter clockwise by 90 degrees >>> a_rot=tf.image.rot90(a) >>> print(a_rot[...,0].numpy()) [[2 4] [1 3]] >>> # rotating counter clockwise by 270 degrees >>> a_rot=tf.image.rot90(a, k=3) >>> print(a_rot[...,0].numpy()) [[3 1] [4 2]] >>> # rotating clockwise by 180 degrees >>> a_rot=tf.image.rot90(a, k=-2) >>> print(a_rot[...,0].numpy()) [[4 3] [2 1]] Args: image: 4-D Tensor of shape or 3-D Tensor of shape . k: A scalar integer tensor. The number of times the image(s) are rotated by 90 degrees. name: A name for this operation (optional). Returns: A rotated tensor of the same type and shape as . Raises: ValueError: if the shape of not supported.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:rot90 arg:image arg:k arg:name arguments arg arg arg With Call Assign Call Assign Call Assign Call Call Call Assign Call Assign Call If Compare Assign Call FunctionDef name:f_rank3 arguments Return return:yes Call FunctionDef name:f_rank4 arguments Return return:yes Call Return return:yes Call Call If Compare Return return:yes Call If Compare Return return:yes Call Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "sdpa_backward_flop",
    "source_code": "@register_flop_formula([aten._scaled_dot_product_efficient_attention_backward, aten._scaled_dot_product_flash_attention_backward, aten._scaled_dot_product_cudnn_attention_backward])\ndef sdpa_backward_flop(grad_out_shape, query_shape, key_shape, value_shape, *args, out_shape=None, **kwargs) -> int:\n    return sdpa_backward_flop_count(grad_out_shape, query_shape, key_shape, value_shape)",
    "docstring": "Count flops for self-attention backward.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\flop_counter.py",
    "ast_data": "FunctionDef name:sdpa_backward_flop arg:grad_out_shape arg:query_shape arg:key_shape arg:value_shape arguments arg arg arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_check",
    "source_code": "def _check(cond, message=None):\n    _check_with(RuntimeError, cond, message)",
    "docstring": "Throws error containing an optional message if the specified condition is False. Error type: `bool`",
    "type": "function",
    "file_path": "pytorch\\torch\\__init__.py",
    "ast_data": "FunctionDef name:_check arg:cond arg:message arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_disable_summary_context",
    "source_code": "@contextlib.contextmanager\ndef _disable_summary_context():\n    original_skip_summary_func = summary_op_util.skip_summary\n    summary_op_util.skip_summary = lambda: True\n    try:\n        yield\n    finally:\n        summary_op_util.skip_summary = original_skip_summary_func",
    "docstring": "Enters a context where all summary ops are skipped. Summaries are not yet supported in xla.compile(). So we provide this context manager that can skip creating summary ops. This is a temporary workaround due to XLA not supporting summary ops. Yields: None.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\xla\\xla.py",
    "ast_data": "FunctionDef name:_disable_summary_context arguments Assign Assign arguments Try Assign"
  },
  {
    "library": "matplotlib",
    "name": "my_plotter",
    "source_code": "def my_plotter(ax, data1, data2, param_dict):\n    out = ax.plot(data1, data2, **param_dict)\n    return out",
    "docstring": "A helper function to make a graph.",
    "type": "function",
    "file_path": "matplotlib\\galleries\\users_explain\\quick_start.py",
    "ast_data": "FunctionDef name:my_plotter arg:ax arg:data1 arg:data2 arg:param_dict arguments arg arg arg arg Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_cache_key",
    "source_code": "def _cache_key(self, state: _CacheKeyState, func: OpOverload, args: Sequence[object], kwargs: Mapping[str, object]) -> _DispatchCacheKey:\n    key_values = [func, torch.get_default_dtype(), torch._C._get_default_device(), torch.is_inference_mode_enabled(), self.shape_env.settings if self.shape_env else None]\n    if state.known_symbols:\n        key_values.append(self.epoch)\n    id_hashed_objects: list[object] = []\n    if args:\n        self._prep_args_for_hash(key_values, args, state, id_hashed_objects)\n    if kwargs:\n        self._prep_args_for_hash(key_values, kwargs, state, id_hashed_objects)\n    key = _DispatchCacheKey(tuple(key_values))\n    for id_hashed_obj in id_hashed_objects:\n        weakref.finalize(id_hashed_obj, functools.partial(evict_fake_tensor_cache_key, key=key))\n    id_hashed_objects.clear()\n    return key",
    "docstring": "Create a cache key given the dispatch args. Raises _BypassDispatchCache for any situation that precludes caching.",
    "type": "method",
    "file_path": "pytorch\\torch\\_subclasses\\fake_tensor.py",
    "ast_data": "FunctionDef name:_cache_key arg:self arg:state arg:func arg:args arg:kwargs arguments arg arg arg arg arg Assign Call Call Call If Call If Call If Call Assign Call Call For Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_dashes",
    "source_code": "def set_dashes(self, dash_offset, dash_list):\n    if dash_list is not None:\n        dl = np.asarray(dash_list)\n        if np.any(dl < 0.0):\n            raise ValueError('All values in the dash list must be non-negative')\n        if dl.size and (not np.any(dl > 0.0)):\n            raise ValueError('At least one value in the dash list must be positive')\n    self._dashes = (dash_offset, dash_list)",
    "docstring": "Set the dash style for the gc. Parameters ---------- dash_offset : float Distance, in points, into the dash pattern at which to start the pattern. It is usually set to 0. dash_list : array-like or None The on-off sequence as points. None specifies a solid line. All values must otherwise be non-negative (:math:). Notes ----- See p. 666 of the PostScript _ for more info.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:set_dashes arg:self arg:dash_offset arg:dash_list arguments arg arg arg If Compare Assign Call If Call Compare Raise Call If BoolOp Call Compare Raise Call Assign"
  },
  {
    "library": "pytorch",
    "name": "sync_state",
    "source_code": "def sync_state(*wrapped_method_modules):\n    if wrapped_method_modules:\n        m, *other_ms = wrapped_method_modules\n        for other_m in other_ms:\n            _sync_state(m, other_m)",
    "docstring": "Sync state between exported modules corresponding to wrapped methods. This might be necessary after serializing/deserializing due to copying.",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\utils.py",
    "ast_data": "FunctionDef name:sync_state arguments arg If Assign For Call"
  },
  {
    "library": "pandas",
    "name": "idelete",
    "source_code": "def idelete(self, indexer) -> SingleBlockManager:\n    nb = self._block.delete(indexer)[0]\n    self.blocks = (nb,)\n    self.axes[0] = self.axes[0].delete(indexer)\n    self._reset_cache()\n    return self",
    "docstring": "Delete single location from SingleBlockManager. Ensures that self.blocks doesn't become empty.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:idelete arg:self arg:indexer arguments arg arg Assign Call Assign Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "size",
    "source_code": "@dispatch.dispatch_for_api(array_ops.size_v2)\ndef size(input: ragged_tensor.Ragged, out_type=dtypes.int32, name=None):\n    if ragged_tensor.is_ragged(input):\n        return array_ops.size(input.flat_values, out_type=out_type, name=name)\n    else:\n        return array_ops.size(input, out_type=out_type, name=name)",
    "docstring": "Returns the size of a potentially ragged tensor. The size of a ragged tensor is the size of its inner values. #### Example: >>> tf.size(tf.ragged.constant([[1, 2], [3]])).numpy().item() 3 Args: input: A potentially ragged . out_type: The numeric output type for the operation. name: A name for the operation (optional). Returns: A Tensor of type .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_array_ops.py",
    "ast_data": "FunctionDef name:size arg:input arg:out_type arg:name arguments arg arg arg If Call Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_mutation_scale",
    "source_code": "def get_mutation_scale(self):\n    return self._mutation_scale",
    "docstring": "Return the mutation scale.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_mutation_scale arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "annotate",
    "source_code": "def annotate(self, model: torch.fx.GraphModule) -> torch.fx.GraphModule:\n    self._annotate_embedding_ops(model.graph)\n    return model",
    "docstring": "just handling global spec for now",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\embedding_quantizer.py",
    "ast_data": "FunctionDef name:annotate arg:self arg:model arguments arg arg Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_validate_inputs",
    "source_code": "def _validate_inputs(concrete_function):\n    if any((isinstance(inp, resource_variable_ops.VariableSpec) for inp in nest.flatten(concrete_function.structured_input_signature))):\n        raise ValueError(f\"Unable to serialize concrete_function '{concrete_function.name}'with tf.Variable input. Functions that expect tf.Variable inputs cannot be exported as signatures.\")",
    "docstring": "Raises error if input type is tf.Variable.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\signature_serialization.py",
    "ast_data": "FunctionDef name:_validate_inputs arg:concrete_function arguments arg If Call Call Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_add_docstr",
    "source_code": "def _add_docstr(*args: str) -> Callable[[_T], _T]:\n\n    def decorator(o: _T) -> _T:\n        o.__doc__ = ''.join(args)\n        return o\n    return decorator",
    "docstring": "Adds docstrings to a given decorated function. Specially useful when then docstrings needs string interpolation, e.g., with str.format(). REMARK: Do not use this function if the docstring doesn't need string interpolation, just write a conventional docstring. Args: args (str):",
    "type": "function",
    "file_path": "pytorch\\torch\\signal\\windows\\windows.py",
    "ast_data": "FunctionDef name:_add_docstr arguments arg FunctionDef name:decorator arg:o arguments arg Assign Call Return return:yes Return return:yes"
  },
  {
    "library": "numpy",
    "name": "parse_targets",
    "source_code": "def parse_targets(self, source):\n    self.dist_log(\"looking for '@targets' inside -> \", source)\n    with open(source) as fd:\n        tokens = ''\n        max_to_reach = 1000\n        start_with = '@targets'\n        start_pos = -1\n        end_with = '*/'\n        end_pos = -1\n        for current_line, line in enumerate(fd):\n            if current_line == max_to_reach:\n                self.dist_fatal('reached the max of lines')\n                break\n            if start_pos == -1:\n                start_pos = line.find(start_with)\n                if start_pos == -1:\n                    continue\n                start_pos += len(start_with)\n            tokens += line\n            end_pos = line.find(end_with)\n            if end_pos != -1:\n                end_pos += len(tokens) - len(line)\n                break\n    if start_pos == -1:\n        self.dist_fatal(\"expected to find '%s' within a C comment\" % start_with)\n    if end_pos == -1:\n        self.dist_fatal(\"expected to end with '%s'\" % end_with)\n    tokens = tokens[start_pos:end_pos]\n    return self._parse_target_tokens(tokens)",
    "docstring": "Fetch and parse configuration statements that required for defining the targeted CPU features, statements should be declared in the top of source in between **C** comment and start with a special mark **@targets**. Configuration statements are sort of keywords representing CPU features names, group of statements and policies, combined together to determine the required optimization. Parameters ---------- source : str the path of **C** source file. Returns ------- - bool, True if group has the 'baseline' option - list, list of CPU features - list, list of extra compiler flags",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py",
    "ast_data": "FunctionDef name:parse_targets arg:self arg:source arguments arg arg Call With Call Assign Assign Assign Assign Assign Assign For Call If Compare Call If Compare Assign Call If Compare Call Assign Call If Compare Call Call If Compare Call If Compare Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_tracking_metadata",
    "source_code": "@property\ndef _tracking_metadata(self):\n    return self._trackable_saved_model_saver.tracking_metadata",
    "docstring": "Info about this layer to be saved into the SavedModel.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:_tracking_metadata arg:self arguments arg Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "public_bytes_raw",
    "source_code": "@abc.abstractmethod\ndef public_bytes_raw(self) -> bytes:\n    pass",
    "docstring": "The raw bytes of the public key. Equivalent to public_bytes(Raw, Raw).",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\x448.py",
    "ast_data": "FunctionDef name:public_bytes_raw arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "to_tensor_list",
    "source_code": "def to_tensor_list(element_spec, element):\n    return _to_tensor_list_helper(lambda state, spec, component: state + spec._to_tensor_list(component), element_spec, element)",
    "docstring": "Returns a tensor list representation of the element. Args: element_spec: A nested structure of objects representing to element type specification. element: The element to convert to tensor list representation. Returns: A tensor list representation of . Raises: ValueError: If and do not have the same number of elements or if the two structures are not nested in the same way. TypeError: If and differ in the type of sequence in any of their substructures.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\util\\structure.py",
    "ast_data": "FunctionDef name:to_tensor_list arg:element_spec arg:element arguments arg arg Return return:yes Call arguments arg arg arg Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict_proba",
    "source_code": "@available_if(_estimator_has('predict_proba'))\ndef predict_proba(self, X, **params):\n    check_is_fitted(self)\n    _raise_for_params(params, self, 'predict_proba')\n    if _routing_enabled():\n        routed_params = process_routing(self, 'predict_proba', **params)\n    else:\n        routed_params = Bunch(estimator=Bunch(predict_proba={}))\n    X = validate_data(self, X, accept_sparse=True, ensure_all_finite=False, reset=False)\n    return self.estimator_.predict_proba(X, **routed_params.estimator.predict_proba)",
    "docstring": "Predict probability for each possible outcome. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Array representing the data. **params : dict of str -> object Parameters to pass to the underlying estimator's `enable_metadata_routing=TrueMetadata Routing User Guide ` for more details. Returns ------- y : ndarray of shape (n_samples, n_features) Array with prediction probabilities.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\semi_supervised\\_self_training.py",
    "ast_data": "FunctionDef name:predict_proba arg:self arg:X arguments arg arg arg Call Call If Call Assign Call Assign Call Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_despine",
    "source_code": "def _despine(ax):\n    for s in ['top', 'right']:\n        ax.spines[s].set_visible(False)\n    for s in ['bottom', 'left']:\n        ax.spines[s].set_bounds(0, 1)",
    "docstring": "Remove the top and right spines of the plot. Parameters ---------- ax : matplotlib.axes.Axes The axes of the plot to despine.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_plotting.py",
    "ast_data": "FunctionDef name:_despine arg:ax arguments arg For Call For Call"
  },
  {
    "library": "kornia",
    "name": "_joint_range_check",
    "source_code": "def _joint_range_check(ranged_factor: Tensor, name: str, bounds: Optional[Tuple[float, float]]=None) -> None:\n    if bounds is None:\n        bounds = (float('-inf'), float('inf'))\n    if ranged_factor.dim() == 1 and len(ranged_factor) == 2:\n        if not bounds[0] <= ranged_factor[0] or not bounds[1] >= ranged_factor[1]:\n            raise ValueError(f'{name} out of bounds. Expected inside {bounds}, got {ranged_factor}.')\n        if not bounds[0] <= ranged_factor[0] <= ranged_factor[1] <= bounds[1]:\n            raise ValueError(f'{name}[0] should be smaller than {name}[1] got {ranged_factor}')\n    else:\n        raise TypeError(f'{name} should be a tensor with length 2 whose values between {bounds}. Got {ranged_factor}.')",
    "docstring": "Check if bounds[0] <= ranged_factor[0] <= ranged_factor[1] <= bounds[1].",
    "type": "function",
    "file_path": "kornia\\kornia\\augmentation\\utils\\param_validation.py",
    "ast_data": "FunctionDef name:_joint_range_check arg:ranged_factor arg:name arg:bounds arguments arg arg arg If Compare Assign Call Call If BoolOp Compare Call Compare Call If BoolOp Compare Compare Raise Call If Compare Raise Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "reuse_variables",
    "source_code": "def reuse_variables(self):\n    self._reuse = True",
    "docstring": "Reuse variables in this scope.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variable_scope.py",
    "ast_data": "FunctionDef name:reuse_variables arg:self arguments arg Assign"
  },
  {
    "library": "kornia",
    "name": "_get_base_url",
    "source_code": "def _get_base_url(model_type: Literal['b1', 'b2', 'b3']='b1', resolution: Literal[224, 256, 288]=224) -> str:\n    return f'https://huggingface.co/kornia/efficientvit_imagenet_{model_type}_r{resolution}/resolve/main/{model_type}-r{resolution}.pt'",
    "docstring": "Return the base URL of the model weights.",
    "type": "function",
    "file_path": "kornia\\kornia\\contrib\\models\\efficient_vit\\model.py",
    "ast_data": "FunctionDef name:_get_base_url arg:model_type arg:resolution arguments arg arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "clip_children",
    "source_code": "@property\ndef clip_children(self):\n    return self._clip_children",
    "docstring": "If the children of this DrawingArea should be clipped by DrawingArea bounding box.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py",
    "ast_data": "FunctionDef name:clip_children arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_repr_mimebundle_",
    "source_code": "def _repr_mimebundle_(self, **kwargs):\n    output = {'text/plain': repr(self)}\n    if get_config()['display'] == 'diagram':\n        output['text/html'] = self._html_repr()\n    return output",
    "docstring": "Mime bundle used by jupyter kernels to display estimator",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_repr_html\\base.py",
    "ast_data": "FunctionDef name:_repr_mimebundle_ arg:self arguments arg arg Assign Call If Compare Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "read_var_array",
    "source_code": "def read_var_array(self, header, process=True):\n    return self._matrix_reader.array_from_header(header, process)",
    "docstring": "Read array, given Parameters ---------- header : header object object with fields defining variable header process : {True, False}, optional If True, apply recursive post-processing during loading of array. Returns ------- arr : array array with post-processing applied or not according to .",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\matlab\\_mio4.py",
    "ast_data": "FunctionDef name:read_var_array arg:self arg:header arg:process arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_graph_string",
    "source_code": "def _graph_string(graph):\n    if graph:\n        return graph.as_graph_def(add_shapes=True).SerializeToString()\n    else:\n        return b''",
    "docstring": "Helper to serialize a graph to string.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\model_analyzer.py",
    "ast_data": "FunctionDef name:_graph_string arg:graph arguments arg If Return return:yes Call Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "project_data_dir",
    "source_code": "def project_data_dir(project: str='default') -> str:\n    if not inside_project():\n        raise NotConfigured('Not inside a project')\n    cfg = get_config()\n    if cfg.has_option(DATADIR_CFG_SECTION, project):\n        d = Path(cfg.get(DATADIR_CFG_SECTION, project))\n    else:\n        scrapy_cfg = closest_scrapy_cfg()\n        if not scrapy_cfg:\n            raise NotConfigured('Unable to find scrapy.cfg file to infer project data dir')\n        d = (Path(scrapy_cfg).parent / '.scrapy').resolve()\n    if not d.exists():\n        d.mkdir(parents=True)\n    return str(d)",
    "docstring": "Return the current project data dir, creating it if it doesn't exist",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\project.py",
    "ast_data": "FunctionDef name:project_data_dir arg:project arguments arg If Call Raise Call Assign Call If Call Assign Call Call Assign Call If Raise Call Assign Call Call If Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y, sample_weight=None):\n    X, y = self._check_X_y(X, y)\n    _, n_features = X.shape\n    labelbin = LabelBinarizer()\n    Y = labelbin.fit_transform(y)\n    self.classes_ = labelbin.classes_\n    if Y.shape[1] == 1:\n        if len(self.classes_) == 2:\n            Y = np.concatenate((1 - Y, Y), axis=1)\n        else:\n            Y = np.ones_like(Y)\n    if sample_weight is not None:\n        Y = Y.astype(np.float64, copy=False)\n        sample_weight = _check_sample_weight(sample_weight, X)\n        sample_weight = np.atleast_2d(sample_weight)\n        Y *= sample_weight.T\n    class_prior = self.class_prior\n    n_classes = Y.shape[1]\n    self._init_counters(n_classes, n_features)\n    self._count(X, Y)\n    alpha = self._check_alpha()\n    self._update_feature_log_prob(alpha)\n    self._update_class_log_prior(class_prior=class_prior)\n    return self",
    "docstring": "Fit Naive Bayes classifier according to X, y. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vectors, where is the number of samples and is the number of features. y : array-like of shape (n_samples,) Target values. sample_weight : array-like of shape (n_samples,), default=None Weights applied to individual samples (1. for unweighted). Returns ------- self : object Returns the instance itself.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\naive_bayes.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg Assign Call Assign Assign Call Assign Call Assign If Compare If Compare Call Assign Call Assign Call If Compare Assign Call Assign Call Assign Call Assign Assign Call Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_update_view",
    "source_code": "def _update_view(self):\n    nav_info = self._nav_stack()\n    if nav_info is None:\n        return\n    items = list(nav_info.items())\n    for ax, (view, (pos_orig, pos_active)) in items:\n        ax._set_view(view)\n        ax._set_position(pos_orig, 'original')\n        ax._set_position(pos_active, 'active')\n    self.canvas.draw_idle()",
    "docstring": "Update the viewlim and position from the view and position stack for each Axes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:_update_view arg:self arguments arg Assign Call If Compare Return return:no Assign Call Call For Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "buffer_rgba",
    "source_code": "def buffer_rgba(self):\n    return self.renderer.buffer_rgba()",
    "docstring": "Get the image as a to the renderer's buffer. must be called at least once before this function will work and to update the renderer for any subsequent changes to the Figure.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_agg.py",
    "ast_data": "FunctionDef name:buffer_rgba arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "remove_dropout",
    "source_code": "def remove_dropout(model: nn.Module) -> nn.Module:\n    fx_model = fx.symbolic_trace(model)\n\n    class DropoutRemover(torch.fx.Transformer):\n\n        def call_module(self, target: Target, args: tuple[Argument, ...], kwargs: dict[str, Any]) -> Any:\n            if isinstance(self.submodules[target], nn.Dropout):\n                assert len(args) == 1\n                return args[0]\n            else:\n                return super().call_module(target, args, kwargs)\n    return DropoutRemover(fx_model).transform()",
    "docstring": "Removes all dropout layers from the module.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\optimization.py",
    "ast_data": "FunctionDef name:remove_dropout arg:model arguments arg Assign Call ClassDef name:DropoutRemover FunctionDef name:call_module arg:self arg:target arg:args arg:kwargs arguments arg arg arg arg If Call Compare Call Return return:yes Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_conv_add_extra_inputs_getter_left",
    "source_code": "def _conv_add_extra_inputs_getter_left(pattern):\n    _, _conv, extra_input = pattern\n    return [extra_input]",
    "docstring": "get inputs pattern for extra inputs, inputs for root node are assumed to be copied over from root node to the fused node",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\onednn.py",
    "ast_data": "FunctionDef name:_conv_add_extra_inputs_getter_left arg:pattern arguments arg Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "wait",
    "source_code": "def wait(self):\n    self._local_sense.value = not self._flag\n    with self._lock:\n        self._counter += 1\n        if self._counter == self._num_participants:\n            self._counter = 0\n            self._flag = self._local_sense.value\n    with self._condition:\n        while self._flag != self._local_sense.value:\n            self._condition.wait()\n        self._condition.notify_all()",
    "docstring": "Waits until all other callers reach the same wait call.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_coordinator.py",
    "ast_data": "FunctionDef name:wait arg:self arguments arg Assign With If Compare Assign Assign With While Compare Call Call"
  },
  {
    "library": "pandas",
    "name": "_from_sequence",
    "source_code": "@classmethod\ndef _from_sequence(cls, scalars, *, dtype: Dtype | None=None, copy: bool=False) -> Self:\n    raise AbstractMethodError(cls)",
    "docstring": "Construct a new ExtensionArray from a sequence of scalars. Parameters ---------- scalars : Sequence Each element will be an instance of the scalar type for this array, `` or be converted into this type in this method. dtype : dtype, optional Construct for this particular dtype. This should be a Dtype compatible with the ExtensionArray. copy : bool, default False If True, copy the underlying data. Returns ------- ExtensionArray See Also -------- api.extensions.ExtensionArray._from_sequence_of_strings : Construct a new ExtensionArray from a sequence of strings. api.extensions.ExtensionArray._hash_pandas_object : Hook for hash_pandas_object. Examples -------- >>> pd.arrays.IntegerArray._from_sequence([4, 5]) [4, 5] Length: 2, dtype: Int64",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\base.py",
    "ast_data": "FunctionDef name:_from_sequence arg:cls arg:scalars arguments arg arg arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "get_variable_scope",
    "source_code": "@tf_export(v1=['get_variable_scope'])\ndef get_variable_scope():\n    return get_variable_scope_store().current_scope",
    "docstring": "Returns the current variable scope. @compatibility(TF2) Although it is a legacy api, is compatible with eager execution and However, to maintain variable-scope based variable reuse you will need to combine it with . (Though it will behave as if reuse is always set to .) See the [migration guide]( for more info. The TF2 equivalent, if you are just trying to track variable name prefixes and not control -based variable reuse, would be to use and capture the output of opening the scope (which represents the current name prefix). For example: @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variable_scope.py",
    "ast_data": "FunctionDef name:get_variable_scope arguments Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "GpuData",
    "source_code": "@dataclasses.dataclass\nclass GpuData:\n    uuid: str\n    utilization: float\n    mem_utilization: float",
    "docstring": "Dataclass for storing gpu data. This is the data that will be logged to the usage_log file.",
    "type": "class",
    "file_path": "pytorch\\tools\\stats\\monitor.py",
    "ast_data": "ClassDef name:GpuData"
  },
  {
    "library": "django",
    "name": "__init__",
    "source_code": "def __init__(self, message, code=None, params=None):\n    super().__init__(message, code, params)\n    if isinstance(message, ValidationError):\n        if hasattr(message, 'error_dict'):\n            message = message.error_dict\n        elif not hasattr(message, 'message'):\n            message = message.error_list\n        else:\n            message, code, params = (message.message, message.code, message.params)\n    if isinstance(message, dict):\n        self.error_dict = {}\n        for field, messages in message.items():\n            if not isinstance(messages, ValidationError):\n                messages = ValidationError(messages)\n            self.error_dict[field] = messages.error_list\n    elif isinstance(message, list):\n        self.error_list = []\n        for message in message:\n            if not isinstance(message, ValidationError):\n                message = ValidationError(message)\n            if hasattr(message, 'error_dict'):\n                self.error_list.extend(sum(message.error_dict.values(), []))\n            else:\n                self.error_list.extend(message.error_list)\n    else:\n        self.message = message\n        self.code = code\n        self.params = params\n        self.error_list = [self]",
    "docstring": "The argument can be a single error, a list of errors, or a dictionary that maps field names to lists of errors. What we define as an \"error\" can be either a simple string or an instance of ValidationError with its message attribute set, and what we define as list or dictionary can be an actual or or an instance of ValidationError with its or attribute set.",
    "type": "method",
    "file_path": "django\\django\\core\\exceptions.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:message arg:code arg:params arguments arg arg arg arg Call Call If Call If Call Assign If Call Assign Assign If Call Assign For Call If Call Assign Call Assign If Call Assign For If Call Assign Call If Call Call Call Call Call Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "serialize_bare_concrete_function",
    "source_code": "def serialize_bare_concrete_function(concrete_function):\n    proto = saved_object_graph_pb2.SavedBareConcreteFunction(concrete_function_name=concrete_function.name, allowed_positional_arguments=concrete_function._num_positional_args, argument_keywords=concrete_function._arg_keywords)\n    function_spec = get_preinitialized_function_spec(concrete_function)\n    if function_spec is not None:\n        proto.function_spec.CopyFrom(_serialize_function_spec(function_spec))\n    return proto",
    "docstring": "Build a SavedBareConcreteFunction.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\function_serialization.py",
    "ast_data": "FunctionDef name:serialize_bare_concrete_function arg:concrete_function arguments arg Assign Call Assign Call If Compare Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "trace",
    "source_code": "@array_function_dispatch(_trace_dispatcher)\ndef trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):\n    if isinstance(a, np.matrix):\n        return asarray(a).trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out)\n    else:\n        return asanyarray(a).trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out)",
    "docstring": "Return the sum along diagonals of the array. If is 2-D, the sum along its diagonal with the given offset is returned, i.e., the sum of elements `aaaxis1axis2aaaaa` has larger dimensions, then an array of sums along diagonals is returned. See Also -------- diag, diagonal, diagflat Examples -------- >>> import numpy as np >>> np.trace(np.eye(3)) 3.0 >>> a = np.arange(8).reshape((2,2,2)) >>> np.trace(a) array([6, 8]) >>> a = np.arange(24).reshape((2,2,2,3)) >>> np.trace(a).shape (2, 3)",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\fromnumeric.py",
    "ast_data": "FunctionDef name:trace arg:a arg:offset arg:axis1 arg:axis2 arg:dtype arg:out arguments arg arg arg arg arg arg If Call Return return:yes Call Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "actual_grad_fn",
    "source_code": "def actual_grad_fn(*result_grad_components):\n    result_grads = composite_tensor_gradient.replace_flat_tensors_for_gradients(nest.flatten(result), result_grad_components)\n    if not isinstance(result_grads, (list, tuple)):\n        result_grads = [result_grads]\n    if variables:\n        input_grads, variable_grads = grad_fn(*result_grads, variables=variables)\n        if len(variable_grads) != len(variables):\n            raise ValueError('Must return gradient for each variable from @custom_gradient grad_fn.')\n    else:\n        input_grads = grad_fn(*result_grads)\n        variable_grads = []\n    flat_grads = composite_tensor_gradient.get_flat_tensors_for_gradients(nest.flatten(input_grads))\n    if len(flat_grads) != arg_count:\n        raise ValueError(f'custom_gradient function expected to return {arg_count} gradients, but returned {len(flat_grads)} instead.')\n    return flat_grads + variable_grads",
    "docstring": "Custom grad fn wrapper.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\custom_gradient.py",
    "ast_data": "FunctionDef name:actual_grad_fn arguments arg Assign Call Call If Call Assign If Assign Call If Compare Call Call Raise Call Assign Call Assign Assign Call Call If Compare Call Raise Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "gettext_noop",
    "source_code": "def gettext_noop(message):\n    return message",
    "docstring": "Mark strings for translation but don't translate them now. This can be used to store strings in global variables that should stay in the base language (because they might be used externally) and will be translated later.",
    "type": "function",
    "file_path": "django\\django\\utils\\translation\\trans_real.py",
    "ast_data": "FunctionDef name:gettext_noop arg:message arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_update_map",
    "source_code": "def _update_map(self, tag: str) -> None:\n    assert self.handles.handle is not None\n    self._map[tag] = self.handles.handle.tell()",
    "docstring": "Update map location for tag with file position",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\stata.py",
    "ast_data": "FunctionDef name:_update_map arg:self arg:tag arguments arg arg Compare Assign Call"
  },
  {
    "library": "kornia",
    "name": "StraightThroughEstimator",
    "source_code": "class StraightThroughEstimator(nn.Module):\n\n    def __init__(self, target_fn: nn.Module, grad_fn: Optional[Callable[..., Any]]=None) -> None:\n        super().__init__()\n        self.target_fn = target_fn\n        self.grad_fn = grad_fn\n\n    def __repr__(self) -> str:\n        return f'{self.__class__.__name__}(target_fn={self.target_fn}, grad_fn={self.grad_fn})'\n\n    def forward(self, input: Tensor) -> Tensor:\n        out = self.target_fn(input)\n        if not isinstance(out, Tensor):\n            raise NotImplementedError('Only Tensor is supported at the moment. Feel free to contribute to https://github.com/kornia/kornia.')\n        output = STEFunction.apply(input, out, self.grad_fn)\n        return output",
    "docstring": "Straight-Through Estimation (STE) module. STE wraps the `` block. >>> import kornia.augmentation as K >>> input = torch.randn(1, 1, 4, 4, requires_grad = True) >>> aug = K.ImageSequential( ... K.RandomAffine((77, 77)), ... StraightThroughEstimator(K.RandomPosterize(3, p=1.), grad_fn=None), ... K.RandomRotation((15, 15)), ... ) >>> aug(input).mean().backward() >>> input.grad tensor([[[[0.0422, 0.0626, 0.0566, 0.0422], [0.0566, 0.0626, 0.0626, 0.0626], [0.0626, 0.0626, 0.0626, 0.0566], [0.0422, 0.0566, 0.0626, 0.0422]]]])",
    "type": "class",
    "file_path": "kornia\\kornia\\grad_estimator\\ste.py",
    "ast_data": "ClassDef name:StraightThroughEstimator FunctionDef name:__init__ arg:self arg:target_fn arg:grad_fn arguments arg arg arg Call Call Assign Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:forward arg:self arg:input arguments arg arg Assign Call If Call Raise Call Assign Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "get_verification_uri",
    "source_code": "def get_verification_uri(self):\n    raise NotImplementedError()",
    "docstring": "Define the `` of device authorization endpoint. Developers MUST implement this method in subclass:: def get_verification_uri(self): return \"",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc8628\\endpoint.py",
    "ast_data": "FunctionDef name:get_verification_uri arg:self arguments arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "decorator",
    "source_code": "def decorator(arg):\n    nonlocal predicate\n    if not tf_inspect.isclass(arg):\n        raise TypeError('Registered serializable must be a class: {}'.format(arg))\n    class_name = name if name is not None else arg.__name__\n    if predicate is None:\n        predicate = lambda x: isinstance(x, arg)\n    _class_registry.register(package, class_name, predicate, arg)\n    return arg",
    "docstring": "Registers a class with the serialization framework.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\registration\\registration.py",
    "ast_data": "FunctionDef name:decorator arg:arg arguments arg If Call Raise Call Call Assign Compare If Compare Assign arguments arg Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_inplace_helper",
    "source_code": "def _inplace_helper(x, i, v, op):\n    x = ops.convert_to_tensor(x)\n    v = ops.convert_to_tensor(v, x.dtype)\n    if i is None:\n        return array_ops.reshape(op(array_ops.reshape(x, [1, -1]), [0], array_ops.reshape(v, [1, -1])), array_ops.shape(x))\n    i = math_ops.cast(i, dtypes.int32)\n    if i.get_shape().ndims == 0:\n        return op(x, array_ops.reshape(i, [1]), array_ops.expand_dims(v, 0))\n    return op(x, i, v)",
    "docstring": "Applies an inplace op on (x, i, v). op is one of gen_array_ops.alias_inplace_update, gen_array_ops.alias_inplace_add, or gen_array_ops.alias_inplace_sub. If i is None, x and v must be the same shape. Computes x op v; If i is a scalar, x has a rank 1 higher than v's. Computes x[i, :] op v; Otherwise, x and v must have the same rank. Computes x[i, :] op v; Args: x: A Tensor. i: None, a scalar or a vector. v: A Tensor. op: alias_inplace_update, alias_inplace_add, or alias_inplace_sub. Returns: Returns x.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\inplace_ops.py",
    "ast_data": "FunctionDef name:_inplace_helper arg:x arg:i arg:v arg:op arguments arg arg arg arg Assign Call Assign Call If Compare Return return:yes Call Call Call Call Call Assign Call If Compare Call Return return:yes Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "bfs_trace_with_node_process",
    "source_code": "def bfs_trace_with_node_process(model: Union[ExportedProgram, torch.fx.GraphModule], node_op: Callable) -> None:\n    assert isinstance(model, (ExportedProgram, torch.fx.GraphModule)), f'Expected GraphModule or ExportedProgram, got {type(model)}'\n    gm = model.graph_module if isinstance(model, ExportedProgram) else model\n    queue = [gm]\n    while queue:\n        current_graph_module = queue.pop(0)\n        for node in current_graph_module.graph.nodes:\n            if node.op in ['output', 'placeholder']:\n                continue\n            node_op(node)\n        control_flow_submodules = [submodule for _, submodule, _ in _get_control_flow_submodules(current_graph_module)]\n        queue.extend(control_flow_submodules)",
    "docstring": "Traverse the graph module and apply node_op to each node.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\graph_utils.py",
    "ast_data": "FunctionDef name:bfs_trace_with_node_process arg:model arg:node_op arguments arg arg Call Call Assign Call Assign While Assign Call For If Compare Call Assign Call Call"
  },
  {
    "library": "pandas",
    "name": "dispatch_reduction_ufunc",
    "source_code": "def dispatch_reduction_ufunc(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):\n    assert method == 'reduce'\n    if len(inputs) != 1 or inputs[0] is not self:\n        return NotImplemented\n    if ufunc.__name__ not in REDUCTION_ALIASES:\n        return NotImplemented\n    method_name = REDUCTION_ALIASES[ufunc.__name__]\n    if not hasattr(self, method_name):\n        return NotImplemented\n    if self.ndim > 1:\n        if isinstance(self, ABCNDFrame):\n            kwargs['numeric_only'] = False\n        if 'axis' not in kwargs:\n            kwargs['axis'] = 0\n    return getattr(self, method_name)(skipna=False, **kwargs)",
    "docstring": "Dispatch ufunc reductions to self's reduction methods.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\arraylike.py",
    "ast_data": "FunctionDef name:dispatch_reduction_ufunc arg:self arg:ufunc arg:method arguments arg arg arg arg arg Compare If BoolOp Compare Call Compare Return return:yes If Compare Return return:yes Assign If Call Return return:yes If Compare If Call Assign If Compare Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_group_value_by_device",
    "source_code": "def _group_value_by_device(per_replica_values):\n    destinations = per_replica_values[0]._devices\n    grouped = [[] for _ in range(len(destinations))]\n    for per_replica_value in per_replica_values:\n        for i, v in enumerate(per_replica_value.values):\n            assert per_replica_value._devices == destinations\n            grouped[i].append((v, None))\n    return grouped",
    "docstring": "Group values into sublists by their devices. This grouping is needed to call the all-reduce library because it expects a list of the following form: [[(grad0_gpu0, v0_gpu0), (grad1_gpu0, v1_gpu0), (grad2_gpu0, v2_gpu0) ...], [(grad0_gpu1, v0_gpu1), (grad1_gpu1, v1_gpu1), (grad2_gpu1, v2_gpu1) ...], [(grad0_gpu2, v0_gpu2), (grad1_gpu0, v1_gpu2), (grad2_gpu0, v2_gpu2) ...], ... ] Args: per_replica_values: a list of PerReplica objects. Returns: a list of lists, each sublist has components for its corresponding device of PerReplica objects, paired with a None.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_ops.py",
    "ast_data": "FunctionDef name:_group_value_by_device arg:per_replica_values arguments arg Assign Assign Call Call For For Call Compare Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "print_live_refs",
    "source_code": "def print_live_refs(*a: Any, **kw: Any) -> None:\n    print(format_live_refs(*a, **kw))",
    "docstring": "Print tracked objects",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\trackref.py",
    "ast_data": "FunctionDef name:print_live_refs arguments arg arg Call Call"
  },
  {
    "library": "numpy",
    "name": "_align",
    "source_code": "def _align(self, axis):\n    if axis is None:\n        return self[0, 0]\n    elif axis == 0:\n        return self\n    elif axis == 1:\n        return self.transpose()\n    else:\n        raise ValueError('unsupported axis')",
    "docstring": "A convenience function for operations that need to preserve axis orientation.",
    "type": "method",
    "file_path": "numpy\\numpy\\matrixlib\\defmatrix.py",
    "ast_data": "FunctionDef name:_align arg:self arg:axis arguments arg arg If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "get_visible_devices",
    "source_code": "@tf_export('config.get_visible_devices', 'config.experimental.get_visible_devices')\n@deprecation.deprecated_endpoints('config.experimental.get_visible_devices')\ndef get_visible_devices(device_type=None):\n    return context.context().get_visible_devices(device_type)",
    "docstring": "Get the list of visible physical devices. Returns the list of s currently marked as visible to the runtime. A visible device will have at least one associated with it once the runtime is initialized. The following example verifies all visible GPUs have been disabled: >>> physical_devices = tf.config.list_physical_devices('GPU') >>> try: ... # Disable all GPUS ... tf.config.set_visible_devices([], 'GPU') ... visible_devices = tf.config.get_visible_devices() ... for device in visible_devices: ... assert device.device_type != 'GPU' ... except: ... # Invalid device or cannot modify virtual devices once initialized. ... pass Args: device_type: (optional string) Only include devices matching this device type. For example \"CPU\" or \"GPU\". Returns: List of visible s",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\config.py",
    "ast_data": "FunctionDef name:get_visible_devices arg:device_type arguments arg Return return:yes Call Call Call Call"
  },
  {
    "library": "seaborn",
    "name": "__init__",
    "source_code": "def __init__(self, stat='count', bins='auto', binwidth=None, binrange=None, discrete=False, cumulative=False):\n    stat_choices = ['count', 'frequency', 'density', 'probability', 'proportion', 'percent']\n    _check_argument('stat', stat_choices, stat)\n    self.stat = stat\n    self.bins = bins\n    self.binwidth = binwidth\n    self.binrange = binrange\n    self.discrete = discrete\n    self.cumulative = cumulative\n    self.bin_kws = None",
    "docstring": "Initialize the estimator with its parameters. Parameters ---------- stat : str Aggregate statistic to compute in each bin. - : show the number of observations in each bin - : show the number of observations divided by the bin width - or : normalize such that bar heights sum to 1 - : normalize such that bar heights sum to 100 - : normalize such that the total area of the histogram equals 1 bins : str, number, vector, or a pair of such values Generic bin parameter that can be the name of a reference rule, the number of bins, or the breaks of the bins. Passed to :func:. binwidth : number or pair of numbers Width of each bin, overrides `` such that bin edges cover integer values in the dataset. cumulative : bool If True, return the cumulative statistic.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_statistics.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:stat arg:bins arg:binwidth arg:binrange arg:discrete arg:cumulative arguments arg arg arg arg arg arg arg Assign Call Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "django",
    "name": "StaticFilesStorage",
    "source_code": "class StaticFilesStorage(FileSystemStorage):\n\n    def __init__(self, location=None, base_url=None, *args, **kwargs):\n        if location is None:\n            location = settings.STATIC_ROOT\n        if base_url is None:\n            base_url = settings.STATIC_URL\n        check_settings(base_url)\n        super().__init__(location, base_url, *args, **kwargs)\n        if not location:\n            self.base_location = None\n            self.location = None\n\n    def path(self, name):\n        if not self.location:\n            raise ImproperlyConfigured(\"You're using the staticfiles app without having set the STATIC_ROOT setting to a filesystem path.\")\n        return super().path(name)",
    "docstring": "Standard file system storage for static files. The defaults for ``.",
    "type": "class",
    "file_path": "django\\django\\contrib\\staticfiles\\storage.py",
    "ast_data": "ClassDef name:StaticFilesStorage FunctionDef name:__init__ arg:self arg:location arg:base_url arguments arg arg arg arg arg If Compare Assign If Compare Assign Call Call Call If Assign Assign FunctionDef name:path arg:self arg:name arguments arg arg If Raise Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "add_knot",
    "source_code": "def add_knot(x, t, k, residuals):\n    new_knot = _dierckx.fpknot(x, t, k, residuals)\n    idx_t = np.searchsorted(t, new_knot)\n    t_new = np.r_[t[:idx_t], new_knot, t[idx_t:]]\n    return t_new",
    "docstring": "Add a new knot. (Approximately) replicate FITPACK's logic: 1. split the array into knot intervals, `xx`*. This routine is an analog of (cf _split function) and",
    "type": "function",
    "file_path": "scipy\\scipy\\interpolate\\_fitpack_repro.py",
    "ast_data": "FunctionDef name:add_knot arg:x arg:t arg:k arg:residuals arguments arg arg arg arg Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "SixHumpCamel",
    "source_code": "class SixHumpCamel(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-5.0] * self.N, [5.0] * self.N))\n        self.custom_bounds = [(-2, 2), (-1.5, 1.5)]\n        self.global_optimum = [(0.08984201368301331, -0.7126564032704135), (-0.08984201368301331, 0.7126564032704135)]\n        self.fglob = -1.031628\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return (4 - 2.1 * x[0] ** 2 + x[0] ** 4 / 3) * x[0] ** 2 + x[0] * x[1] + (4 * x[1] ** 2 - 4) * x[1] ** 2",
    "docstring": "Six Hump Camel objective function. This class defines the Six Hump Camel [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{SixHumpCamel}}(x) = 4x_1^2+x_1x_2-4x_2^2-2.1x_1^4+ 4x_2^4+\\frac{1}{3}x_1^6 with :math: for :math:. *Global optimum*: :math: for :math: or :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_S.py",
    "ast_data": "ClassDef name:SixHumpCamel FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "parse_data",
    "source_code": "def parse_data(self, data_str):\n    if data_str in self.values:\n        return data_str\n    elif data_str == '?':\n        return data_str\n    else:\n        raise ValueError(f'{str(data_str)} value not in {str(self.values)}')",
    "docstring": "Parse a value of this type.",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\arff\\_arffread.py",
    "ast_data": "FunctionDef name:parse_data arg:self arg:data_str arguments arg arg If Compare Return return:yes If Compare Return return:yes Raise Call Call Call"
  },
  {
    "library": "scrapy",
    "name": "inthread",
    "source_code": "def inthread(func: Callable[_P, _T]) -> Callable[_P, Deferred[_T]]:\n\n    @wraps(func)\n    def wrapped(*a: _P.args, **kw: _P.kwargs) -> Deferred[_T]:\n        return deferToThread(func, *a, **kw)\n    return wrapped",
    "docstring": "Decorator to call a function in a thread and return a deferred with the result",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\decorators.py",
    "ast_data": "FunctionDef name:inthread arg:func arguments arg FunctionDef name:wrapped arguments arg arg Return return:yes Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "connect_default_events",
    "source_code": "def connect_default_events(self):\n    self.connect_event('motion_notify_event', self.onmove)\n    self.connect_event('button_press_event', self.press)\n    self.connect_event('button_release_event', self.release)\n    self.connect_event('draw_event', self.update_background)\n    self.connect_event('key_press_event', self.on_key_press)\n    self.connect_event('key_release_event', self.on_key_release)\n    self.connect_event('scroll_event', self.on_scroll)",
    "docstring": "Connect the major canvas events to methods.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:connect_default_events arg:self arguments arg Call Call Call Call Call Call Call"
  },
  {
    "library": "numpy",
    "name": "masked_greater_equal",
    "source_code": "def masked_greater_equal(x, value, copy=True):\n    return masked_where(greater_equal(x, value), x, copy=copy)",
    "docstring": "Mask an array where greater than or equal to a given value. This function is a shortcut to `condition` = (x >= value). See Also -------- masked_where : Mask where a condition is met. Examples -------- >>> import numpy as np >>> import numpy.ma as ma >>> a = np.arange(4) >>> a array([0, 1, 2, 3]) >>> ma.masked_greater_equal(a, 2) masked_array(data=[0, 1, --, --], mask=[False, False, True, True], fill_value=999999)",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:masked_greater_equal arg:x arg:value arg:copy arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "unshare_mask",
    "source_code": "def unshare_mask(self):\n    if self._sharedmask:\n        self._mask = self._mask.copy()\n        self._sharedmask = False\n    return self",
    "docstring": "Copy the mask and set the flag to `sharedmaskunshare_mask` ensures the mask is not shared. A copy of the mask is only made if it was shared. See Also -------- sharedmask",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:unshare_mask arg:self arguments arg If Assign Call Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "ImageSize",
    "source_code": "@dataclass(frozen=True)\nclass ImageSize:\n    height: int | Tensor\n    width: int | Tensor",
    "docstring": "Data class to represent image shape. Args: height: image height. width: image width. Example: >>> size = ImageSize(3, 4) >>> size.height 3 >>> size.width 4",
    "type": "class",
    "file_path": "kornia\\kornia\\image\\base.py",
    "ast_data": "ClassDef name:ImageSize Call"
  },
  {
    "library": "numpy",
    "name": "__rfloordiv__",
    "source_code": "def __rfloordiv__(self, other):\n    return floor_divide(other, self)",
    "docstring": "Divide self into other, and return a new masked array.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:__rfloordiv__ arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_snapshot_colocation_stack_metadata",
    "source_code": "def _snapshot_colocation_stack_metadata(self) -> dict[str, traceable_stack.TraceableObject]:\n    return {traceable_obj.obj.name: traceable_obj.copy_metadata() for traceable_obj in self._colocation_stack.peek_traceable_objs()}",
    "docstring": "Return colocation stack metadata as a dictionary.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_snapshot_colocation_stack_metadata arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_next_func",
    "source_code": "@def_function.function(input_signature=[tensor_spec.TensorSpec([], dtypes.string)])\ndef _next_func(string_handle):\n    with ops.device(self._source_device_string):\n        iterator = iterator_ops.Iterator.from_string_handle(string_handle, dataset_ops.get_legacy_output_types(self), dataset_ops.get_legacy_output_shapes(self), dataset_ops.get_legacy_output_classes(self))\n    return structure.to_tensor_list(self.element_spec, iterator.get_next())",
    "docstring": "Calls get_next for created iterator. Args: string_handle: An iterator string handle created by _init_func Returns: The elements generated from",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\prefetching_ops.py",
    "ast_data": "FunctionDef name:_next_func arg:string_handle arguments arg With Call Assign Call Call Call Call Return return:yes Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "__str__",
    "source_code": "def __str__(self) -> str:\n    parts = []\n    if self.epoch != 0:\n        parts.append(f'{self.epoch}!')\n    parts.append('.'.join((str(x) for x in self.release)))\n    if self.pre is not None:\n        parts.append(''.join((str(x) for x in self.pre)))\n    if self.post is not None:\n        parts.append(f'.post{self.post}')\n    if self.dev is not None:\n        parts.append(f'.dev{self.dev}')\n    if self.local is not None:\n        parts.append(f'+{self.local}')\n    return ''.join(parts)",
    "docstring": "A string representation of the version that can be rounded-tripped. >>> str(Version(\"1.0a5\")) '1.0a5'",
    "type": "method",
    "file_path": "pytorch\\torch\\_vendor\\packaging\\version.py",
    "ast_data": "FunctionDef name:__str__ arg:self arguments arg Assign If Compare Call Call Call Call If Compare Call Call Call If Compare Call If Compare Call If Compare Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_set_mutable",
    "source_code": "def _set_mutable(self, mutable):\n    object.__setattr__(self, '_mutable', mutable)\n    self.autotune._set_mutable(mutable)\n    self.experimental_distribute._set_mutable(mutable)\n    self.experimental_optimization._set_mutable(mutable)\n    self.threading._set_mutable(mutable)",
    "docstring": "Change the mutability value to on this options and children.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\options.py",
    "ast_data": "FunctionDef name:_set_mutable arg:self arg:mutable arguments arg arg Call Call Call Call Call"
  },
  {
    "library": "virtualenv",
    "name": "reset",
    "source_code": "@abstractmethod\ndef reset(self):\n    pass",
    "docstring": "Called when the user passes in the reset app data.",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\app_data\\base.py",
    "ast_data": "FunctionDef name:reset arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "SymIntEqByExpr",
    "source_code": "class SymIntEqByExpr:\n    val: Union[torch.SymInt, int]\n\n    def __init__(self, val: Union[torch.SymInt, int]) -> None:\n        self.val = val\n\n    def __repr__(self) -> str:\n        return repr(self.val)\n\n    def _extract(self) -> sympy.Expr:\n        if isinstance(self.val, torch.SymInt):\n            return self.val.node.expr\n        else:\n            return sympy.Integer(self.val)\n\n    def __eq__(self, other: object) -> bool:\n        assert isinstance(other, SymIntEqByExpr)\n        if type(self.val) is int and type(other.val) is int:\n            return self.val == other.val\n        return self._extract() == other._extract()\n\n    def __hash__(self) -> int:\n        return hash(self._extract())",
    "docstring": "This is a wrapper around SymInt which has alternative semantics for equality. Specifically, instead of erroring or guarding, we instead will hash/compare equality based on the underlying sympy expression; e.g., s0 and s1 will always compare as False. NB: This does NOT do fancy analysis that maybe_evaluate_static does; we can only reason through equalities that occur because to expressions canonicalize to the same expression via regular simplification.",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "ClassDef name:SymIntEqByExpr FunctionDef name:__init__ arg:self arg:val arguments arg arg Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call FunctionDef name:_extract arg:self arguments arg If Call Return return:yes Return return:yes Call FunctionDef name:__eq__ arg:self arg:other arguments arg arg Call If BoolOp Compare Call Compare Call Return return:yes Compare Return return:yes Compare Call Call FunctionDef name:__hash__ arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "IteratorDecorator",
    "source_code": "class IteratorDecorator:\n\n    def __init__(self, iterator, datapipe, iterator_id, has_next_method):\n        self.iterator = iterator\n        self.datapipe = datapipe\n        self.iterator_id = iterator_id\n        self._profiler_enabled = torch.autograd._profiler_enabled()\n        self.self_and_has_next_method = self.iterator is self.datapipe and has_next_method\n\n    def __iter__(self):\n        return self\n\n    def _get_next(self):\n        _check_iterator_valid(self.datapipe, self.iterator_id)\n        result = next(self.iterator)\n        if not self.self_and_has_next_method:\n            self.datapipe._number_of_samples_yielded += 1\n        return result\n\n    def __next__(self):\n        if self._profiler_enabled:\n            with profiler_record_fn_context(self.datapipe):\n                return self._get_next()\n        else:\n            return self._get_next()\n\n    def __getattr__(self, name):\n        return getattr(self.iterator, name)",
    "docstring": "Wrap the iterator and modifying its method. This decorator is applied to DataPipes of which method is NOT a generator function. Those method commonly returns but not necessarily.",
    "type": "class",
    "file_path": "pytorch\\torch\\utils\\data\\datapipes\\_hook_iterator.py",
    "ast_data": "ClassDef name:IteratorDecorator FunctionDef name:__init__ arg:self arg:iterator arg:datapipe arg:iterator_id arg:has_next_method arguments arg arg arg arg arg Assign Assign Assign Assign Call Assign BoolOp Compare FunctionDef name:__iter__ arg:self arguments arg Return return:yes FunctionDef name:_get_next arg:self arguments arg Call Assign Call If Return return:yes FunctionDef name:__next__ arg:self arguments arg If With Call Return return:yes Call Return return:yes Call FunctionDef name:__getattr__ arg:self arg:name arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_dict_to_tensor",
    "source_code": "def _dict_to_tensor(self, x, k1, k2, k3):\n    return array_ops_stack.stack([array_ops_stack.stack([array_ops_stack.stack([x[i, j, k] for k in range(k3)]) for j in range(k2)]) for i in range(k1)])",
    "docstring": "Convert a dictionary to a tensor. Args: x: A k1 * k2 dictionary. k1: First dimension of x. k2: Second dimension of x. k3: Third dimension of x. Returns: A k1 * k2 * k3 tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops.py",
    "ast_data": "FunctionDef name:_dict_to_tensor arg:self arg:x arg:k1 arg:k2 arg:k3 arguments arg arg arg arg arg Return return:yes Call Call Call Call Call Call"
  },
  {
    "library": "scrapy",
    "name": "InitSpider",
    "source_code": "class InitSpider(Spider):\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        warnings.warn(\"InitSpider is deprecated. Copy its code from Scrapy's source if needed. Will be removed in a future version.\", ScrapyDeprecationWarning, stacklevel=2)\n\n    async def start(self) -> AsyncIterator[Any]:\n        with warnings.catch_warnings():\n            warnings.filterwarnings('ignore', category=ScrapyDeprecationWarning, module='^scrapy\\\\.spiders$')\n            for item_or_request in self.start_requests():\n                yield item_or_request\n\n    def start_requests(self) -> Iterable[Request]:\n        self._postinit_reqs: Iterable[Request] = super().start_requests()\n        return cast(Iterable[Request], iterate_spider_output(self.init_request()))\n\n    def initialized(self, response: Response | None=None) -> Any:\n        return self.__dict__.pop('_postinit_reqs')\n\n    def init_request(self) -> Any:\n        return self.initialized()",
    "docstring": "Base Spider with initialization facilities .. warning:: This class is deprecated. Copy its code into your project if needed. It will be removed in a future Scrapy version.",
    "type": "class",
    "file_path": "scrapy\\scrapy\\spiders\\init.py",
    "ast_data": "ClassDef name:InitSpider FunctionDef name:__init__ arg:self arguments arg arg arg Call Call Call AsyncFunctionDef name:start arg:self arguments arg With Call Call For Call FunctionDef name:start_requests arg:self arguments arg Call Call Return return:yes Call Call Call FunctionDef name:initialized arg:self arg:response arguments arg arg Return return:yes Call FunctionDef name:init_request arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "broadcast",
    "source_code": "def broadcast(self, rt):\n    flat_values = self.broadcast_flat_values(rt)\n    return self.target_shape._add_row_partitions(flat_values)",
    "docstring": "Broadcast a tensor of source_shape to target_shape.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:broadcast arg:self arg:rt arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "setup",
    "source_code": "def setup(self, request, *args, **kwargs):\n    if hasattr(self, 'get') and (not hasattr(self, 'head')):\n        self.head = self.get\n    self.request = request\n    self.args = args\n    self.kwargs = kwargs",
    "docstring": "Initialize attributes shared by all view methods.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\base.py",
    "ast_data": "FunctionDef name:setup arg:self arg:request arguments arg arg arg arg If BoolOp Call Call Assign Assign Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "get_forced_alpha",
    "source_code": "def get_forced_alpha(self):\n    return self._forced_alpha",
    "docstring": "Return whether the value given by get_alpha() should be used to override any other alpha-channel values.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:get_forced_alpha arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_setattr_cm",
    "source_code": "@contextlib.contextmanager\ndef _setattr_cm(obj, **kwargs):\n    sentinel = object()\n    origs = {}\n    for attr in kwargs:\n        orig = getattr(obj, attr, sentinel)\n        if attr in obj.__dict__ or orig is sentinel:\n            origs[attr] = orig\n        else:\n            cls_orig = getattr(type(obj), attr)\n            if isinstance(cls_orig, property):\n                origs[attr] = orig\n            else:\n                origs[attr] = sentinel\n    try:\n        for attr, val in kwargs.items():\n            setattr(obj, attr, val)\n        yield\n    finally:\n        for attr, orig in origs.items():\n            if orig is sentinel:\n                delattr(obj, attr)\n            else:\n                setattr(obj, attr, orig)",
    "docstring": "Temporarily set some attributes; restore original state at context exit.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\cbook.py",
    "ast_data": "FunctionDef name:_setattr_cm arg:obj arguments arg arg Assign Call Assign For Assign Call If BoolOp Compare Compare Assign Assign Call Call If Call Assign Assign Try For Call Call For Call If Compare Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_pickradius",
    "source_code": "def set_pickradius(self, pickradius):\n    if not isinstance(pickradius, Real):\n        raise ValueError(f'pickradius must be a real-valued number, not {pickradius!r}')\n    self._pickradius = pickradius",
    "docstring": "Set the pick radius used for containment tests. Parameters ---------- pickradius : float Pick radius, in points.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:set_pickradius arg:self arg:pickradius arguments arg arg If Call Raise Call Assign"
  },
  {
    "library": "pandas",
    "name": "size",
    "source_code": "@property\ndef size(self) -> int:\n    return len(self._values)",
    "docstring": "Return the number of elements in the underlying data. See Also -------- Series.ndim: Number of dimensions of the underlying data, by definition 1. Series.shape: Return a tuple of the shape of the underlying data. Series.dtype: Return the dtype object of the underlying data. Series.values: Return Series as ndarray or ndarray-like depending on the dtype. Examples -------- For Series: >>> s = pd.Series([\"Ant\", \"Bear\", \"Cow\"]) >>> s 0 Ant 1 Bear 2 Cow dtype: object >>> s.size 3 For Index: >>> idx = pd.Index([1, 2, 3]) >>> idx Index([1, 2, 3], dtype='int64') >>> idx.size 3",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\base.py",
    "ast_data": "FunctionDef name:size arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "m_num",
    "source_code": "@property\ndef m_num(self) -> int:\n    return len(self.win)",
    "docstring": "Number of samples in window . Note that the FFT can be oversampled by zero-padding. This is achieved by setting the property. See Also -------- m_num_mid: Center index of window . mfft: Length of input for the FFT used - may be larger than . hop: Time increment in signal samples for sliding window. win: Window function as real- or complex-valued 1d array. ShortTimeFFT: Class this property belongs to.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_short_time_fft.py",
    "ast_data": "FunctionDef name:m_num arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "make_dataset",
    "source_code": "def make_dataset(X, y, sample_weight, random_state=None):\n    rng = check_random_state(random_state)\n    seed = rng.randint(1, np.iinfo(np.int32).max)\n    if X.dtype == np.float32:\n        CSRData = CSRDataset32\n        ArrayData = ArrayDataset32\n    else:\n        CSRData = CSRDataset64\n        ArrayData = ArrayDataset64\n    if sp.issparse(X):\n        dataset = CSRData(X.data, X.indptr, X.indices, y, sample_weight, seed=seed)\n        intercept_decay = SPARSE_INTERCEPT_DECAY\n    else:\n        X = np.ascontiguousarray(X)\n        dataset = ArrayData(X, y, sample_weight, seed=seed)\n        intercept_decay = 1.0\n    return (dataset, intercept_decay)",
    "docstring": "Create `Glossary ` abstraction intercept_decay The intercept decay",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_base.py",
    "ast_data": "FunctionDef name:make_dataset arg:X arg:y arg:sample_weight arg:random_state arguments arg arg arg arg Assign Call Assign Call Call If Compare Assign Assign Assign Assign If Call Assign Call Assign Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, target_shape, **kwargs):\n    super(Reshape, self).__init__(**kwargs)\n    self.target_shape = tuple(target_shape)",
    "docstring": "Creates a layer instance. Args: target_shape: Target shape. Tuple of integers, does not include the samples dimension (batch size). **kwargs: Any additional layer keyword arguments.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\core.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:target_shape arguments arg arg arg Call Call Assign Call"
  },
  {
    "library": "django",
    "name": "TemplateView",
    "source_code": "class TemplateView(TemplateResponseMixin, ContextMixin, View):\n\n    def get(self, request, *args, **kwargs):\n        context = self.get_context_data(**kwargs)\n        return self.render_to_response(context)",
    "docstring": "Render a template. Pass keyword arguments from the URLconf to the context.",
    "type": "class",
    "file_path": "django\\django\\views\\generic\\base.py",
    "ast_data": "ClassDef name:TemplateView FunctionDef name:get arg:self arg:request arguments arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "type_before_parametrizations",
    "source_code": "def type_before_parametrizations(module: Module) -> type:\n    if is_parametrized(module):\n        return module.__class__.__bases__[0]\n    else:\n        return type(module)",
    "docstring": "Return the module type before parametrizations were applied and if not, then it returns the module type. Args: module (nn.Module): module to get type of",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\utils\\parametrize.py",
    "ast_data": "FunctionDef name:type_before_parametrizations arg:module arguments arg If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "django",
    "name": "check_sized_string",
    "source_code": "def check_sized_string(result, func, cargs):\n    if not result:\n        raise GEOSException('Invalid string pointer returned by GEOS C function \"%s\"' % func.__name__)\n    s = string_at(result, last_arg_byref(cargs))\n    free(result)\n    return s",
    "docstring": "Error checking for routines that return explicitly sized strings. This frees the memory allocated by GEOS at the result pointer.",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\geos\\prototypes\\errcheck.py",
    "ast_data": "FunctionDef name:check_sized_string arg:result arg:func arg:cargs arguments arg arg arg If Raise Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "is_protected_type",
    "source_code": "def is_protected_type(obj):\n    return isinstance(obj, _PROTECTED_TYPES)",
    "docstring": "Determine if the object instance is of a protected type. Objects of protected types are preserved as-is when passed to force_str(strings_only=True).",
    "type": "function",
    "file_path": "django\\django\\utils\\encoding.py",
    "ast_data": "FunctionDef name:is_protected_type arg:obj arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "add_child_axes",
    "source_code": "def add_child_axes(self, ax):\n    ax._axes = self\n    ax.stale_callback = martist._stale_axes_callback\n    self.child_axes.append(ax)\n    ax._remove_method = functools.partial(self.get_figure(root=False)._remove_axes, owners=[self.child_axes])\n    self.stale = True\n    return ax",
    "docstring": "Add an to the Axes' children; return the child Axes. This is the lowlevel version. See .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:add_child_axes arg:self arg:ax arguments arg arg Assign Assign Call Assign Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_scalar",
    "source_code": "def _scalar(tf_fn, x, promote_to_float=False):\n    x = np_array_ops.asarray(x)\n    if promote_to_float and (not np.issubdtype(x.dtype.as_numpy_dtype, np.inexact)):\n        x = x.astype(np_utils.result_type(float))\n    return tf_fn(x)",
    "docstring": "Computes the tf_fn(x) for each element in . Args: tf_fn: function that takes a single Tensor argument. x: array_like. Could be an ndarray, a Tensor or any object that can be converted to a Tensor using . promote_to_float: whether to cast the argument to a float dtype if it is not already. Returns: An ndarray with the same shape as . The default output dtype is determined by , unless x is an ndarray with a floating point type, in which case the output type is same as x.dtype.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_math_ops.py",
    "ast_data": "FunctionDef name:_scalar arg:tf_fn arg:x arg:promote_to_float arguments arg arg arg Assign Call If BoolOp Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "SelectorList",
    "source_code": "class SelectorList(_ParselSelector.selectorlist_cls, object_ref):\n    pass",
    "docstring": "The :class: class is a subclass of the builtin `` class, which provides a few additional methods.",
    "type": "class",
    "file_path": "scrapy\\scrapy\\selector\\unified.py",
    "ast_data": "ClassDef name:SelectorList"
  },
  {
    "library": "pytorch",
    "name": "update_mask",
    "source_code": "def update_mask(self, name, data, configs):\n    mask = self.get_mask(name)\n    sparse_config = configs['sparse_config']\n    features = configs['features']\n    reduce_fn = configs['reduce_fn']\n    mask_fn = configs['mask_fn']\n    if features is None:\n        data = reduce_fn(data)\n        mask.data = mask_fn(data, **sparse_config)\n    else:\n        for feature_idx in range(len(features)):\n            data_feature = reduce_fn(data[feature_idx])\n            mask[feature_idx].data = mask_fn(data_feature, **sparse_config)",
    "docstring": "Called for each registered layer and does the following- 1. apply reduce_fn on the aggregated activations 2. use mask_fn to compute the sparsification mask Note: the reduce_fn and mask_fn is called for each feature, dim over the data",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\activation_sparsifier\\activation_sparsifier.py",
    "ast_data": "FunctionDef name:update_mask arg:self arg:name arg:data arg:configs arguments arg arg arg arg Assign Call Assign Assign Assign Assign If Compare Assign Call Assign Call For Call Call Assign Call Assign Call"
  },
  {
    "library": "cherrypy",
    "name": "validate_since",
    "source_code": "def validate_since():\n    response = cherrypy.serving.response\n    lastmod = response.headers.get('Last-Modified')\n    if lastmod:\n        status, reason, msg = _httputil.valid_status(response.status)\n        request = cherrypy.serving.request\n        since = request.headers.get('If-Unmodified-Since')\n        if since and since != lastmod:\n            if status >= 200 and status <= 299 or status == 412:\n                raise cherrypy.HTTPError(412)\n        since = request.headers.get('If-Modified-Since')\n        if since and since == lastmod:\n            if status >= 200 and status <= 299 or status == 304:\n                if request.method in ('GET', 'HEAD'):\n                    raise cherrypy.HTTPRedirect([], 304)\n                else:\n                    raise cherrypy.HTTPError(412)",
    "docstring": "Validate the current Last-Modified against If-Modified-Since headers. If no code has set the Last-Modified response header, then no validation will be performed.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\cptools.py",
    "ast_data": "FunctionDef name:validate_since arguments Assign Assign Call If Assign Call Assign Assign Call If BoolOp Compare If BoolOp BoolOp Compare Compare Compare Raise Call Assign Call If BoolOp Compare If BoolOp BoolOp Compare Compare Compare If Compare Raise Call Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "_predict",
    "source_code": "def _predict(self, X):\n    kwargs = {'Y_norm_squared': self._subcluster_norms}\n    with config_context(assume_finite=True):\n        argmin = pairwise_distances_argmin(X, self.subcluster_centers_, metric_kwargs=kwargs)\n    return self.subcluster_labels_[argmin]",
    "docstring": "Predict data using the `` of subclusters.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_birch.py",
    "ast_data": "FunctionDef name:_predict arg:self arg:X arguments arg arg Assign With Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_Case",
    "source_code": "class _Case(_FunctionCaller):\n\n    def __init__(self, node, function, enclosing_graph):\n        super(_Case, self).__init__(node, function, enclosing_graph, first_function_input=1, type_attribute='Tin', function_attributes=['branches'])",
    "docstring": "Specialization of _Node to Case-like operations.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "ClassDef name:_Case FunctionDef name:__init__ arg:self arg:node arg:function arg:enclosing_graph arguments arg arg arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_prepare_run_watch_config",
    "source_code": "def _prepare_run_watch_config(self, fetches, feed_dict):\n    debug_urls = self.prepare_run_debug_urls(fetches, feed_dict)\n    if self._watch_fn is None:\n        watch_options = WatchOptions()\n    else:\n        watch_options = self._watch_fn(fetches, feed_dict)\n        if isinstance(watch_options, tuple):\n            watch_options = WatchOptions(*watch_options)\n    return (debug_urls, watch_options)",
    "docstring": "Get the debug_urls, and node/op allowlists for the current run() call. Args: fetches: Same as the argument to . feed_dict: Same as the to . Returns: debug_urls: (str or list of str) Debug URLs for the current run() call. Currently, the list consists of only one URL that is a file:// URL. watch_options: (WatchOptions) The return value of a watch_fn, containing options including debug_ops, and allowlists.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\framework.py",
    "ast_data": "FunctionDef name:_prepare_run_watch_config arg:self arg:fetches arg:feed_dict arguments arg arg arg Assign Call If Compare Assign Call Assign Call If Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, name=None, shared_name=None, critical_section_def=None, import_scope=None):\n    context.ensure_initialized()\n    if critical_section_def and name is not None:\n        raise ValueError(f'Arguments critical_section_def={critical_section_def} and shared_name={shared_name} are mutually exclusive. Please only specify one of them.')\n    if critical_section_def:\n        raise ValueError('Argument `critical_section_def` is not supported.')\n    else:\n        self._init_from_args(name, shared_name)",
    "docstring": "Creates a critical section.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\critical_section_ops.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:name arg:shared_name arg:critical_section_def arg:import_scope arguments arg arg arg arg arg Call If BoolOp Compare Raise Call If Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "call",
    "source_code": "@doc_controls.for_subclass_implementers\ndef call(self, inputs, **kwargs):\n    return inputs",
    "docstring": "This is where the layer's logic lives. Args: inputs: Input tensor, or list/tuple of input tensors. **kwargs: Additional keyword arguments. Returns: A tensor or list/tuple of tensors.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py",
    "ast_data": "FunctionDef name:call arg:self arg:inputs arguments arg arg arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "close",
    "source_code": "def close(self):\n    if hasattr(self, 'fp') and (not self.fp.closed):\n        try:\n            self.flush()\n        finally:\n            self.variables = {}\n            if self._mm_buf is not None:\n                ref = weakref.ref(self._mm_buf)\n                self._mm_buf = None\n                if ref() is None:\n                    self._mm.close()\n                else:\n                    warnings.warn('Cannot close a netcdf_file opened with mmap=True, when netcdf_variables or arrays referring to its data still exist. All data arrays obtained from such files refer directly to data on disk, and must be copied before the file can be cleanly closed. (See netcdf_file docstring for more information on mmap.)', category=RuntimeWarning, stacklevel=2)\n            self._mm = None\n            self.fp.close()",
    "docstring": "Closes the NetCDF file.",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\_netcdf.py",
    "ast_data": "FunctionDef name:close arg:self arguments arg If BoolOp Call Try Call Assign If Compare Assign Call Assign If Compare Call Call Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_trainable_state",
    "source_code": "def _get_trainable_state(self):\n    trainable_state = weakref.WeakKeyDictionary()\n    for layer in self._flatten_layers():\n        trainable_state[layer] = layer.trainable\n    return trainable_state",
    "docstring": "Get the state of each sublayer. Returns: A dict mapping all sublayers to their value.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:_get_trainable_state arg:self arguments arg Assign Call For Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_summary",
    "source_code": "def _summary(self, name=None) -> str_t:\n    if len(self) > 0:\n        head = self[0]\n        if hasattr(head, 'format') and (not isinstance(head, str)):\n            head = head.format()\n        elif needs_i8_conversion(self.dtype):\n            head = self._formatter_func(head).replace(\"'\", '')\n        tail = self[-1]\n        if hasattr(tail, 'format') and (not isinstance(tail, str)):\n            tail = tail.format()\n        elif needs_i8_conversion(self.dtype):\n            tail = self._formatter_func(tail).replace(\"'\", '')\n        index_summary = f', {head} to {tail}'\n    else:\n        index_summary = ''\n    if name is None:\n        name = type(self).__name__\n    return f'{name}: {len(self)} entries{index_summary}'",
    "docstring": "Return a summarized representation. Parameters ---------- name : str name to use in the summary representation Returns ------- String with a summarized representation of the index",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_summary arg:self arg:name arguments arg arg If Compare Call Assign If BoolOp Call Call Assign Call If Call Assign Call Call Assign If BoolOp Call Call Assign Call If Call Assign Call Call Assign Assign If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "generate_fontconfig_pattern",
    "source_code": "def generate_fontconfig_pattern(d):\n    kvs = [(k, getattr(d, f'get_{k}')()) for k in ['style', 'variant', 'weight', 'stretch', 'file', 'size']]\n    return ','.join((_family_escape(f) for f in d.get_family())) + ''.join((f':{k}={_value_escape(str(v))}' for k, v in kvs if v is not None))",
    "docstring": "Convert a to a fontconfig pattern string.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\_fontconfig_pattern.py",
    "ast_data": "FunctionDef name:generate_fontconfig_pattern arg:d arguments arg Assign Call Call Return return:yes Call Call Call Call Call Call Compare"
  },
  {
    "library": "matplotlib",
    "name": "set_data_interval",
    "source_code": "def set_data_interval(self, vmin, vmax, ignore=False):\n    raise NotImplementedError('Derived must override')",
    "docstring": "Set the axis data limits. This method is for internal use. If *ignore* is False (the default), this method will never reduce the preexisting data limits, only expand them if *vmin* or *vmax* are not within them. Moreover, the order of *vmin* and *vmax* does not matter; the orientation of the axis will not change. If *ignore* is True, the data limits will be set exactly to `` in that order.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:set_data_interval arg:self arg:vmin arg:vmax arg:ignore arguments arg arg arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "sym_int",
    "source_code": "def sym_int(a):\n    if overrides.has_torch_function_unary(a):\n        return overrides.handle_torch_function(sym_int, (a,), a)\n    if isinstance(a, SymInt):\n        return a\n    elif isinstance(a, SymFloat):\n        return math.trunc(a)\n    return builtins.int(a)",
    "docstring": "SymInt-aware utility for int casting. Args: a (SymInt, SymFloat, or object): Object to cast",
    "type": "function",
    "file_path": "pytorch\\torch\\__init__.py",
    "ast_data": "FunctionDef name:sym_int arg:a arguments arg If Call Return return:yes Call If Call Return return:yes If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "apply",
    "source_code": "def apply(self, X):\n    X = self._validate_X_predict(X)\n    results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, prefer='threads')((delayed(tree.apply)(X, check_input=False) for tree in self.estimators_))\n    return np.array(results).T",
    "docstring": "Apply trees in the forest to X, return leaf indices. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, its dtype will be converted to ``. Returns ------- X_leaves : ndarray of shape (n_samples, n_estimators) For each datapoint x in X and for each tree in the forest, return the index of the leaf x ends up in.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_forest.py",
    "ast_data": "FunctionDef name:apply arg:self arg:X arguments arg arg Assign Call Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "add_meta_graph",
    "source_code": "def add_meta_graph(self, meta_graph_def, global_step=None):\n    if not isinstance(meta_graph_def, meta_graph_pb2.MetaGraphDef):\n        raise TypeError('meta_graph_def must be type MetaGraphDef, saw type: %s' % type(meta_graph_def))\n    meta_graph_bytes = meta_graph_def.SerializeToString()\n    event = event_pb2.Event(meta_graph_def=meta_graph_bytes)\n    self._add_event(event, global_step)",
    "docstring": "Adds a to the event file. The allows running the given graph via . Args: meta_graph_def: A object, often as returned by . global_step: Number. Optional global step counter to record with the graph. Raises: TypeError: If both is not an instance of .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\writer.py",
    "ast_data": "FunctionDef name:add_meta_graph arg:self arg:meta_graph_def arg:global_step arguments arg arg arg If Call Raise Call Call Assign Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "ClassificationOutput",
    "source_code": "class ClassificationOutput(ExportOutput):\n\n    def __init__(self, scores=None, classes=None):\n        if scores is not None and (not (isinstance(scores, tensor.Tensor) and scores.dtype.is_floating)):\n            raise ValueError('Classification scores must be a float32 Tensor; got {}'.format(scores))\n        if classes is not None and (not (isinstance(classes, tensor.Tensor) and dtypes.as_dtype(classes.dtype) == dtypes.string)):\n            raise ValueError('Classification classes must be a string Tensor; got {}'.format(classes))\n        if scores is None and classes is None:\n            raise ValueError('At least one of scores and classes must be set.')\n        self._scores = scores\n        self._classes = classes\n\n    @property\n    def scores(self):\n        return self._scores\n\n    @property\n    def classes(self):\n        return self._classes\n\n    def as_signature_def(self, receiver_tensors):\n        if len(receiver_tensors) != 1:\n            raise ValueError('Classification input must be a single string Tensor; got {}'.format(receiver_tensors))\n        (_, examples), = receiver_tensors.items()\n        if dtypes.as_dtype(examples.dtype) != dtypes.string:\n            raise ValueError('Classification input must be a single string Tensor; got {}'.format(receiver_tensors))\n        return signature_def_utils.classification_signature_def(examples, self.classes, self.scores)",
    "docstring": "Represents the output of a classification head. Either classes or scores or both must be set. The classes must provide string labels, not integer class IDs. If only classes is set, it is interpreted as providing top-k results in descending order. If only scores is set, it is interpreted as providing a score for every class in order of class ID. If both classes and scores are set, they are interpreted as zipped, so each score corresponds to the class at the same index. Clients should not depend on the order of the entries.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\utils_v1\\export_output.py",
    "ast_data": "ClassDef name:ClassificationOutput FunctionDef name:__init__ arg:self arg:scores arg:classes arguments arg arg arg If BoolOp Compare BoolOp Call Raise Call Call If BoolOp Compare BoolOp Call Compare Call Raise Call Call If BoolOp Compare Compare Raise Call Assign Assign FunctionDef name:scores arg:self arguments arg Return return:yes FunctionDef name:classes arg:self arguments arg Return return:yes FunctionDef name:as_signature_def arg:self arg:receiver_tensors arguments arg arg If Compare Call Raise Call Call Assign Call If Compare Call Raise Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "PostTrainingDataSparsity",
    "source_code": "class PostTrainingDataSparsity(pl.callbacks.Callback):\n\n    def __init__(self, data_sparsifier_class, data_sparsifier_args):\n        super().__init__()\n        self.data_sparsifier_class = data_sparsifier_class\n        self.data_sparsifier_args = data_sparsifier_args\n        self.data_sparsifier: Any = None\n        self.sparsified: Optional[torch.nn.Module] = None\n\n    def on_fit_end(self, trainer, pl_module) -> None:\n        self.sparsified = deepcopy(pl_module.model).eval()\n        self.data_sparsifier = self.data_sparsifier_class(**self.data_sparsifier_args)\n        _attach_model_to_data_sparsifier(self.sparsified, self.data_sparsifier)\n        self.data_sparsifier.step()\n        self.data_sparsifier.squash_mask()\n        _log_sparsified_level(self.sparsified, self.data_sparsifier)",
    "docstring": "Lightning callback that enables post-training sparsity. This callback aims to sparsify the model inside lightning module after training. **Note that the model is copied and then sparsified, so the existing model is not modified** The sparsified model can be used for comparison and can be accessed using .sparsified Args: data_sparsifier_class (some implemented class of BaseDataSparsifier) The data sparsifier object of this class is created when the training starts. Note: Objects should not be passed in here as they are created once the training completes. data_sparsifier_args (Dict) Dictionary of args to be passed to the data sparsifier. Note: data_list arg should be ignored Hooks implemented: on_fit_end() 1. copies the model and attaches it to the sparsifier 2. sparsier step() is called 3. squashes the mask()",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\data_sparsifier\\lightning\\callbacks\\data_sparsity.py",
    "ast_data": "ClassDef name:PostTrainingDataSparsity FunctionDef name:__init__ arg:self arg:data_sparsifier_class arg:data_sparsifier_args arguments arg arg arg Call Call Assign Assign FunctionDef name:on_fit_end arg:self arg:trainer arg:pl_module arguments arg arg arg Assign Call Call Assign Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_decision",
    "source_code": "def get_decision(self) -> Optional[Choice]:\n    heuristics = self.get_heuristics(self.metadata.name)\n    for heuristic in heuristics:\n        if heuristic.check_precondition(self.metadata, self.context):\n            return heuristic.get_decision(self.context, self.metadata.choices)\n    return None",
    "docstring": "Returns the decision made by the learned heuristic or None if no heuristic was found or the heuristic is unsure which choice to make.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\autoheuristic\\learned_heuristic_controller.py",
    "ast_data": "FunctionDef name:get_decision arg:self arguments arg Assign Call For If Call Return return:yes Call Return return:no"
  },
  {
    "library": "scrapy",
    "name": "to_dict",
    "source_code": "def to_dict(self, *, spider: scrapy.Spider | None=None) -> dict[str, Any]:\n    d = {'url': self.url, 'callback': _find_method(spider, self.callback) if callable(self.callback) else self.callback, 'errback': _find_method(spider, self.errback) if callable(self.errback) else self.errback, 'headers': dict(self.headers)}\n    for attr in self.attributes:\n        d.setdefault(attr, getattr(self, attr))\n    if type(self) is not Request:\n        d['_class'] = self.__module__ + '.' + self.__class__.__name__\n    return d",
    "docstring": "Return a dictionary containing the Request's data. Use :func: to convert back into a :class: object. If a spider is given, this method will try to find out the name of the spider methods used as callback and errback and include them in the output dict, raising an exception if they cannot be found.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\http\\request\\__init__.py",
    "ast_data": "FunctionDef name:to_dict arg:self arguments arg arg Assign Call Call Call Call Call For Call Call If Compare Call Assign Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "default_latex_docclass",
    "source_code": "def default_latex_docclass(config: Config) -> dict[str, str]:\n    if config.language == 'ja':\n        if config.latex_engine == 'uplatex':\n            return {'manual': 'ujbook', 'howto': 'ujreport'}\n        else:\n            return {'manual': 'jsbook', 'howto': 'jreport'}\n    else:\n        return {}",
    "docstring": "Better default latex_docclass settings for specific languages.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\builders\\latex\\__init__.py",
    "ast_data": "FunctionDef name:default_latex_docclass arg:config arguments arg If Compare If Compare Return return:yes Return return:yes Return return:no"
  },
  {
    "library": "pandas",
    "name": "searchsorted",
    "source_code": "def searchsorted(self, value: NumpyValueArrayLike | ExtensionArray, side: Literal['left', 'right']='left', sorter: NumpySorter | None=None) -> npt.NDArray[np.intp] | np.intp:\n    arr = self.astype(object)\n    if isinstance(value, ExtensionArray):\n        value = value.astype(object)\n    return arr.searchsorted(value, side=side, sorter=sorter)",
    "docstring": "Find indices where elements should be inserted to maintain order. Find the indices into a sorted array (a) such that, if the corresponding elements in were inserted before the indices, the order of would be preserved. Assuming that is sorted: ====== ================================ returned index satisfies ====== ================================ left ``self[i-1] >> arr = pd.array([1, 2, 3, 5]) >>> arr.searchsorted([4]) array([3])",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\base.py",
    "ast_data": "FunctionDef name:searchsorted arg:self arg:value arg:side arg:sorter arguments arg arg arg arg Assign Call If Call Assign Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "RefOnlyListChecker",
    "source_code": "class RefOnlyListChecker(nodes.GenericNodeVisitor):\n\n    def default_visit(self, node: Node) -> None:\n        raise nodes.NodeFound\n\n    def visit_bullet_list(self, node: nodes.bullet_list) -> None:\n        pass\n\n    def visit_list_item(self, node: nodes.list_item) -> None:\n        children: list[Node] = [child for child in node.children if not isinstance(child, nodes.Invisible)]\n        if len(children) != 1:\n            raise nodes.NodeFound\n        if not isinstance(children[0], nodes.paragraph):\n            raise nodes.NodeFound\n        para = children[0]\n        if len(para) != 1:\n            raise nodes.NodeFound\n        if not isinstance(para[0], addnodes.pending_xref):\n            raise nodes.NodeFound\n        raise nodes.SkipChildren\n\n    def invisible_visit(self, node: Node) -> None:\n        pass",
    "docstring": "Raise if non-simple list item is encountered. Here 'simple' means a list item containing only a paragraph with a single reference in it.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\transforms\\compact_bullet_list.py",
    "ast_data": "ClassDef name:RefOnlyListChecker FunctionDef name:default_visit arg:self arg:node arguments arg arg Raise FunctionDef name:visit_bullet_list arg:self arg:node arguments arg arg FunctionDef name:visit_list_item arg:self arg:node arguments arg arg Call If Compare Call Raise If Call Raise Assign If Compare Call Raise If Call Raise Raise FunctionDef name:invisible_visit arg:self arg:node arguments arg arg"
  },
  {
    "library": "matplotlib",
    "name": "set_offsets",
    "source_code": "def set_offsets(self, offsets):\n    offsets = np.asanyarray(offsets)\n    if offsets.shape == (2,):\n        offsets = offsets[None, :]\n    cstack = np.ma.column_stack if isinstance(offsets, np.ma.MaskedArray) else np.column_stack\n    self._offsets = cstack((np.asanyarray(self.convert_xunits(offsets[:, 0]), float), np.asanyarray(self.convert_yunits(offsets[:, 1]), float)))\n    self.stale = True",
    "docstring": "Set the offsets for the collection. Parameters ---------- offsets : (N, 2) or (2,) array-like",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:set_offsets arg:self arg:offsets arguments arg arg Assign Call If Compare Assign Assign Call Assign Call Call Call Call Call Assign"
  },
  {
    "library": "pandas",
    "name": "close",
    "source_code": "def close(self) -> None:\n    if self._handle is not None:\n        self._handle.close()\n    self._handle = None",
    "docstring": "Close the PyTables file handle",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:close arg:self arguments arg If Compare Call Assign"
  },
  {
    "library": "pytorch",
    "name": "confirm_phase",
    "source_code": "def confirm_phase(self, expected_version, this_rank):\n    logger.info('All peers arrived. Confirming membership.')\n    self.confirm_membership(expected_version, this_rank)\n    logger.info('Waiting for confirmations from all peers.')\n    active_version = self.wait_for_final(expected_version)\n    state = json.loads(active_version.value)\n    logger.info('Rendezvous version %s is complete. Final state: %s', state['version'], state)\n    return (state['version'], this_rank, len(state['participants']))",
    "docstring": "Once the rendezvous state transitions from 'joinable' to 'frozen', we have every participant confirm their membership and setup per-member keep-alive TTL keys, and then wait for all other participants to confirm, which would then successfully conclude this rendezvous.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\etcd_rendezvous.py",
    "ast_data": "FunctionDef name:confirm_phase arg:self arg:expected_version arg:this_rank arguments arg arg arg Call Call Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "setup_onnx_logging",
    "source_code": "@deprecated('The feature will be removed. Please remove usage of this function')\n@contextlib.contextmanager\ndef setup_onnx_logging(verbose: bool):\n    is_originally_enabled = _C._jit_is_onnx_log_enabled\n    if is_originally_enabled or verbose:\n        _C._jit_set_onnx_log_enabled(True)\n    try:\n        yield\n    finally:\n        if not is_originally_enabled:\n            _C._jit_set_onnx_log_enabled(False)",
    "docstring": "A context manager to temporarily set the ONNX logging verbosity. .. deprecated:: 2.7 Please remove usage of this function.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\utils.py",
    "ast_data": "FunctionDef name:setup_onnx_logging arg:verbose arguments arg Assign If BoolOp Call Try If Call Call"
  },
  {
    "library": "pytorch",
    "name": "_set_main_datapipe_valid_iterator_id",
    "source_code": "def _set_main_datapipe_valid_iterator_id(self) -> int:\n    if self.main_datapipe._valid_iterator_id is None:\n        self.main_datapipe._valid_iterator_id = 0\n    elif self.main_datapipe._valid_iterator_id == self._valid_iterator_id:\n        self.main_datapipe._valid_iterator_id += 1\n        if not self.main_datapipe.is_every_instance_exhausted():\n            warnings.warn('Some child DataPipes are not exhausted when __iter__ is called. We are resetting the buffer and each child DataPipe will read from the start again.', UserWarning)\n        self.main_datapipe.reset()\n    self._valid_iterator_id = self.main_datapipe._valid_iterator_id\n    return self._valid_iterator_id",
    "docstring": "Update the valid iterator ID for both this DataPipe object and . is called when the ID is incremented to a new generation.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\data\\datapipes\\iter\\combining.py",
    "ast_data": "FunctionDef name:_set_main_datapipe_valid_iterator_id arg:self arguments arg If Compare Assign If Compare If Call Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "from_tensor",
    "source_code": "@classmethod\ndef from_tensor(cls, tensor):\n    if isinstance(tensor, core.Value):\n        return EagerWeakTensor(tensor)\n    if isinstance(tensor, core.Symbol):\n        return GraphWeakTensor(tensor)\n    raise errors.InvalidArgumentError(None, None, f'WeakTensor can only be constructed from tf.Tensor or tf.WeakTensor, but {type(tensor)} was given.')",
    "docstring": "Converts a 'tf.Tensor' into a 'WeakTensor'. This should be the standard way of creating a WeakTensor instead of directly calling the WeakTensor constructor. Args: tensor: The that should be converted into a 'WeakTensor'. Returns: A or 'GraphWeakTensor' that holds the .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\weak_tensor.py",
    "ast_data": "FunctionDef name:from_tensor arg:cls arg:tensor arguments arg arg If Call Return return:yes Call If Call Return return:yes Call Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "ClosureAbortedError",
    "source_code": "class ClosureAbortedError(Exception):\n\n    def __init__(self, original_exception):\n        if isinstance(original_exception, (ClosureInputError, ClosureAbortedError)):\n            self.original_exception = original_exception.original_exception\n        else:\n            self.original_exception = original_exception\n        message = 'Other function has an execution error, as a result, the current value is not available. The original exception is %r, error message is %s.' % (self.original_exception, str(self.original_exception))\n        super().__init__(message)\n        self.with_traceback(original_exception.__traceback__)",
    "docstring": "Wrapper for errors from training closures, to attach to resource closures. This wrapper is used when a dependent training closure fails to set errors on its required resource closures. Attributes: original_exception: The Exception to wrap",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py",
    "ast_data": "ClassDef name:ClosureAbortedError FunctionDef name:__init__ arg:self arg:original_exception arguments arg arg If Call Assign Assign Assign Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_repr_categories",
    "source_code": "def _repr_categories(self) -> list[str]:\n    max_categories = 10 if get_option('display.max_categories') == 0 else get_option('display.max_categories')\n    from pandas.io.formats import format as fmt\n    format_array = partial(fmt.format_array, formatter=None, quoting=QUOTE_NONNUMERIC)\n    if len(self.categories) > max_categories:\n        num = max_categories // 2\n        head = format_array(self.categories[:num]._values)\n        tail = format_array(self.categories[-num:]._values)\n        category_strs = head + ['...'] + tail\n    else:\n        category_strs = format_array(self.categories._values)\n    category_strs = [x.strip() for x in category_strs]\n    return category_strs",
    "docstring": "return the base repr for the categories",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\categorical.py",
    "ast_data": "FunctionDef name:_repr_categories arg:self arguments arg Assign Compare Call Call Assign Call If Compare Call Assign Assign Call Assign Call Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_ProcessReturnElementsParam",
    "source_code": "def _ProcessReturnElementsParam(return_elements):\n    if return_elements is None:\n        return None\n    if not all((isinstance(x, compat.bytes_or_text_types) for x in return_elements)):\n        raise TypeError(f'Argument `return_elements` must be a list of strings. Obtained {return_elements}.')\n    return tuple((compat.as_str(x) for x in return_elements))",
    "docstring": "Type-checks and possibly canonicalizes .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\importer.py",
    "ast_data": "FunctionDef name:_ProcessReturnElementsParam arg:return_elements arguments arg If Compare Return return:no If Call Call Raise Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "find_rocm_config",
    "source_code": "def find_rocm_config():\n    rocm_install_path = _get_rocm_install_path()\n    if not os.path.exists(rocm_install_path):\n        raise ConfigError('Specified ROCM_PATH \"{}\" does not exist'.format(rocm_install_path))\n    result = {}\n    result['rocm_toolkit_path'] = rocm_install_path\n    result.update(_find_rocm_config(rocm_install_path))\n    result.update(_find_hipruntime_config(rocm_install_path))\n    result.update(_find_miopen_config(rocm_install_path))\n    result.update(_find_rocblas_config(rocm_install_path))\n    result.update(_find_rocrand_config(rocm_install_path))\n    result.update(_find_rocfft_config(rocm_install_path))\n    if result['rocm_version_number'] >= 40100:\n        result.update(_find_hipfft_config(rocm_install_path))\n    result.update(_find_roctracer_config(rocm_install_path))\n    result.update(_find_hipsparse_config(rocm_install_path))\n    if result['rocm_version_number'] >= 40500:\n        result.update(_find_hipsolver_config(rocm_install_path))\n    result.update(_find_rocsolver_config(rocm_install_path))\n    return result",
    "docstring": "Returns a dictionary of ROCm components config info.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\third_party\\gpus\\find_rocm_config.py",
    "ast_data": "FunctionDef name:find_rocm_config arguments Assign Call If Call Raise Call Call Assign Assign Call Call Call Call Call Call Call Call Call Call Call Call If Compare Call Call Call Call Call Call If Compare Call Call Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "Points2D_FlatTensors",
    "source_code": "class Points2D_FlatTensors:\n\n    def __init__(self) -> None:\n        self._x: Tensor\n        self._y: Tensor\n        self._camera_ids: List[int] = []",
    "docstring": "Class to hold x/y pixel coordinates for each ray, and its scene camera id.",
    "type": "class",
    "file_path": "kornia\\kornia\\nerf\\samplers.py",
    "ast_data": "ClassDef name:Points2D_FlatTensors FunctionDef name:__init__ arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "_add_weight",
    "source_code": "def _add_weight(self, name, initial_value, dtype=None):\n    variable = variable_v1.VariableV1(initial_value=initial_value, name=name, dtype=dtype, trainable=False, use_resource=True, synchronization=variables.VariableSynchronization.AUTO, aggregation=variables.VariableAggregation.NONE)\n    if context.executing_eagerly():\n        graph_key = None\n    else:\n        graph = ops.get_default_graph()\n        graph_key = graph._graph_key\n    key = (name, graph_key)\n    if self._weights.get(key, None) is not None:\n        raise RuntimeError('Duplicate variables detected. {}'.format(key))\n    self._weights[key] = variable\n    self._handle_deferred_dependencies(name=name, trackable=variable)\n    return variable",
    "docstring": "Adds a weight to this loss scale. Args: name: Variable name. initial_value: The variable's initial value. dtype: The type of the variable. Returns: A variable. Raises: RuntimeError: If a weight with has already been added.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\experimental\\loss_scale.py",
    "ast_data": "FunctionDef name:_add_weight arg:self arg:name arg:initial_value arg:dtype arguments arg arg arg arg Assign Call If Call Assign Assign Call Assign Assign If Compare Call Raise Call Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "no_append_slash",
    "source_code": "def no_append_slash(view_func):\n    if iscoroutinefunction(view_func):\n\n        async def _view_wrapper(request, *args, **kwargs):\n            return await view_func(request, *args, **kwargs)\n    else:\n\n        def _view_wrapper(request, *args, **kwargs):\n            return view_func(request, *args, **kwargs)\n    _view_wrapper.should_append_slash = False\n    return wraps(view_func)(_view_wrapper)",
    "docstring": "Mark a view function as excluded from CommonMiddleware's APPEND_SLASH redirection.",
    "type": "function",
    "file_path": "django\\django\\views\\decorators\\common.py",
    "ast_data": "FunctionDef name:no_append_slash arg:view_func arguments arg If Call AsyncFunctionDef name:_view_wrapper arg:request arguments arg arg arg Return return:yes Call FunctionDef name:_view_wrapper arg:request arguments arg arg arg Return return:yes Call Assign Return return:yes Call Call"
  },
  {
    "library": "authlib",
    "name": "ExpiredTokenError",
    "source_code": "class ExpiredTokenError(OAuth2Error):\n    error = 'expired_token'",
    "docstring": "The \"device_code\" has expired, and the device authorization session has concluded. The client MAY commence a new device authorization request but SHOULD wait for user interaction before restarting to avoid unnecessary polling.",
    "type": "class",
    "file_path": "authlib\\authlib\\oauth2\\rfc8628\\errors.py",
    "ast_data": "ClassDef name:ExpiredTokenError Assign"
  },
  {
    "library": "tensorflow",
    "name": "from_variant",
    "source_code": "@tf_export('data.experimental.from_variant')\ndef from_variant(variant, structure):\n    return _VariantDataset(variant, structure)",
    "docstring": "Constructs a dataset from the given variant and (nested) structure. Args: variant: A scalar tensor representing a dataset. structure: A (nested) structure of objects representing the structure of each element in the dataset. Returns: A instance.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:from_variant arg:variant arg:structure arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "IntArrayRefRepr",
    "source_code": "class IntArrayRefRepr(gdb.Command):\n\n    def __init__(self) -> None:\n        gdb.Command.__init__(self, 'torch-int-array-ref-repr', gdb.COMMAND_USER, gdb.COMPLETE_EXPRESSION)\n\n    def invoke(self, args: str, from_tty: bool) -> None:\n        args = gdb.string_to_argv(args)\n        if len(args) != 1:\n            print('Usage: torch-int-array-ref-repr EXP')\n            return\n        name = args[0]\n        with DisableBreakpoints():\n            res = gdb.parse_and_eval(f'torch::gdb::int_array_ref_string({name})')\n            res = str(res)\n            print(res[res.find('\"') + 1:-1])",
    "docstring": "Print human readable representation of c10::IntArrayRef",
    "type": "class",
    "file_path": "pytorch\\tools\\gdb\\pytorch-gdb.py",
    "ast_data": "ClassDef name:IntArrayRefRepr FunctionDef name:__init__ arg:self arguments arg Call FunctionDef name:invoke arg:self arg:args arg:from_tty arguments arg arg arg Assign Call If Compare Call Call Return return:no Assign With Call Assign Call Assign Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_random",
    "source_code": "def _random(self, n: IntNumber=1, *, workers: IntNumber=1) -> np.ndarray:\n    sample: np.ndarray = np.empty((n, self.d), dtype=np.float64)\n    if n == 0:\n        return sample\n    total_n = self.num_generated + n\n    if total_n > self.maxn:\n        msg = f'At most 2**{self.bits}={self.maxn} distinct points can be generated. {self.num_generated} points have been previously generated, then: n={self.num_generated}+{n}={total_n}. '\n        if self.bits != 64:\n            msg += 'Consider increasing `bits`.'\n        raise ValueError(msg)\n    if self.num_generated == 0:\n        if not n & n - 1 == 0:\n            warnings.warn(\"The balance properties of Sobol' points require n to be a power of 2.\", stacklevel=3)\n        if n == 1:\n            sample = self._first_point\n        else:\n            _draw(n=n - 1, num_gen=self.num_generated, dim=self.d, scale=self._scale, sv=self._sv, quasi=self._quasi, sample=sample)\n            sample = np.concatenate([self._first_point, sample])[:n]\n    else:\n        _draw(n=n, num_gen=self.num_generated - 1, dim=self.d, scale=self._scale, sv=self._sv, quasi=self._quasi, sample=sample)\n    return sample",
    "docstring": "Draw next point(s) in the Sobol' sequence. Parameters ---------- n : int, optional Number of samples to generate in the parameter space. Default is 1. Returns ------- sample : array_like (n, d) Sobol' sample.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_qmc.py",
    "ast_data": "FunctionDef name:_random arg:self arg:n arguments arg arg arg Call If Compare Return return:yes Assign If Compare Assign If Compare Raise Call If Compare If Compare Call If Compare Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "escapejs_filter",
    "source_code": "@register.filter('escapejs')\n@stringfilter\ndef escapejs_filter(value):\n    return escapejs(value)",
    "docstring": "Hex encode characters for use in JavaScript strings.",
    "type": "function",
    "file_path": "django\\django\\template\\defaultfilters.py",
    "ast_data": "FunctionDef name:escapejs_filter arg:value arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "WrapperFxCodegen",
    "source_code": "class WrapperFxCodegen(PythonWrapperCodegen):\n    supports_caching = False\n\n    def _generate(self, is_inference: bool) -> tuple[FileBackedGraphModule, None]:\n        self.run_wrapper_ir_passes(is_inference)\n        prologue = '\\n'.join([self.imports.getvalue(), self.header.getvalue()])\n        gm = FxConverter(lines=self.lines, prologue=prologue).generate()\n        compiled_fn = self.compile_graph(gm)\n        return (FileBackedGraphModule(gm, compiled_fn), None)\n\n    def compile_graph(self, gm: GraphModule) -> Callable[..., Any]:\n        return gm.forward\n\n    @classmethod\n    def create(cls, is_subgraph: bool, subgraph_name: Optional[str], parent_wrapper: Optional[PythonWrapperCodegen], partition_signatures: Optional[ir.GraphPartitionSignature]=None) -> 'WrapperFxCodegen':\n        if is_subgraph:\n            raise NotImplementedError('Subgraphs are not yet supported by FX conversion')\n        return cls()",
    "docstring": "Backend to generate wrapper code as an FX IR graph.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\wrapper_fxir.py",
    "ast_data": "ClassDef name:WrapperFxCodegen Assign FunctionDef name:_generate arg:self arg:is_inference arguments arg arg Call Assign Call Call Call Assign Call Call Assign Call Return return:yes Call FunctionDef name:compile_graph arg:self arg:gm arguments arg arg Return return:yes FunctionDef name:create arg:cls arg:is_subgraph arg:subgraph_name arg:parent_wrapper arg:partition_signatures arguments arg arg arg arg arg If Raise Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "prepend",
    "source_code": "@compatibility(is_backward_compatible=True)\ndef prepend(self, x: 'Node') -> None:\n    assert self.graph == x.graph, 'Attempting to move a Node into a different Graph'\n    if self == x:\n        log.debug('Trying to prepend a node to itself. This behavior has no effect on the graph.')\n        return\n    x._remove_from_list()\n    p = self._prev\n    p._next, x._prev = (x, p)\n    x._next, self._prev = (self, x)\n    psk = x._prev._sort_key\n    nsk = x._next._sort_key\n    if len(psk) > len(nsk):\n        idx: int\n        *prefix, idx = psk[:len(nsk) + 1]\n        x._sort_key = (*prefix, idx + 1)\n    elif len(psk) < len(nsk):\n        *prefix, idx = nsk[:len(psk) + 1]\n        x._sort_key = (*prefix, idx - 1)\n    else:\n        x._sort_key = (*psk, 0)",
    "docstring": "Insert x before this node in the list of nodes in the graph. Example:: Before: p -> self bx -> x -> ax After: p -> x -> self bx -> ax Args: x (Node): The node to put before this node. Must be a member of the same graph.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\node.py",
    "ast_data": "FunctionDef name:prepend arg:self arg:x arguments arg arg Compare If Compare Call Return return:no Call Assign Assign Assign Assign Assign If Compare Call Call Assign Call Assign If Compare Call Call Assign Call Assign Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "_contains",
    "source_code": "def _contains(self, event):\n    return self._selection_artist.contains(event, radius=0)[0]",
    "docstring": "Return True if event is within the patch.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:_contains arg:self arg:event arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "is_prim",
    "source_code": "def is_prim(domain: str) -> bool:\n    return domain == 'prim'",
    "docstring": "Check if the domain is official.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\jit_utils.py",
    "ast_data": "FunctionDef name:is_prim arg:domain arguments arg Return return:yes Compare"
  },
  {
    "library": "pandas",
    "name": "validate_bool_kwarg",
    "source_code": "def validate_bool_kwarg(value: BoolishNoneT, arg_name: str, none_allowed: bool=True, int_allowed: bool=False) -> BoolishNoneT:\n    good_value = is_bool(value)\n    if none_allowed:\n        good_value = good_value or value is None\n    if int_allowed:\n        good_value = good_value or isinstance(value, int)\n    if not good_value:\n        raise ValueError(f'For argument \"{arg_name}\" expected type bool, received type {type(value).__name__}.')\n    return value",
    "docstring": "Ensure that argument passed in arg_name can be interpreted as boolean. Parameters ---------- value : bool Value to be validated. arg_name : str Name of the argument. To be reflected in the error message. none_allowed : bool, default True Whether to consider None to be a valid boolean. int_allowed : bool, default False Whether to consider integer value to be a valid boolean. Returns ------- value The same value as input. Raises ------ ValueError If the value is not a valid boolean.",
    "type": "function",
    "file_path": "pandas\\pandas\\util\\_validators.py",
    "ast_data": "FunctionDef name:validate_bool_kwarg arg:value arg:arg_name arg:none_allowed arg:int_allowed arguments arg arg arg arg Assign Call If Assign BoolOp Compare If Assign BoolOp Call If Raise Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "empty_value",
    "source_code": "@property\ndef empty_value(self) -> str:\n    return ''",
    "docstring": "Property for compat with other readers.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\excel\\_odfreader.py",
    "ast_data": "FunctionDef name:empty_value arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "NonCapture",
    "source_code": "class NonCapture(list):\n    pass",
    "docstring": "Represent a non-capturing group in the pattern string.",
    "type": "class",
    "file_path": "django\\django\\utils\\regex_helper.py",
    "ast_data": "ClassDef name:NonCapture"
  },
  {
    "library": "pytorch",
    "name": "join",
    "source_code": "def join(self, other: LiveRange):\n    return LiveRange(min(self.begin, other.begin), max(self.end, other.end))",
    "docstring": "Combine two ranges using a union operation",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\memory_planning.py",
    "ast_data": "FunctionDef name:join arg:self arg:other arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_make_sure_a_list",
    "source_code": "def _make_sure_a_list(self, var):\n    n = len(self.sparsifier.groups)\n    if not isinstance(var, (list, tuple)):\n        return [var] * n\n    else:\n        if len(var) != n:\n            raise ValueError(f'Expected variable of length {n}, but got {len(var)}')\n        return list(var)",
    "docstring": "Utility that extends it to the same length as the .groups, ensuring it is a list",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\pruning\\scheduler\\base_scheduler.py",
    "ast_data": "FunctionDef name:_make_sure_a_list arg:self arg:var arguments arg arg Assign Call If Call Return return:yes If Compare Call Raise Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "as_storage_and_layout",
    "source_code": "def as_storage_and_layout(x: IRNode, freeze: bool=True, want_contiguous: bool=False, stride_order: Optional[Sequence[Union[int, Integer]]]=None, allow_padding: bool=False, exact_strides: Optional[Sequence[Union[int, Integer]]]=None) -> tuple[StorageBox, Layout]:\n    if isinstance(x, TensorBox):\n        return as_storage_and_layout(x.data, freeze=freeze, want_contiguous=want_contiguous, stride_order=stride_order, allow_padding=allow_padding, exact_strides=exact_strides)\n    if isinstance(x, StorageBox):\n        _, layout = as_storage_and_layout(x.data, freeze=freeze, want_contiguous=want_contiguous, stride_order=stride_order, allow_padding=allow_padding, exact_strides=exact_strides)\n        return (x, x.data.get_layout())\n    if isinstance(x, Buffer):\n        if freeze:\n            if want_contiguous:\n                x.freeze_layout()\n                assert x.get_layout().is_contiguous()\n            elif stride_order is not None:\n                x.freeze_layout_with_stride_order(stride_order, allow_padding=allow_padding)\n            elif exact_strides is not None:\n                x.freeze_layout_with_exact_strides(exact_strides, allow_padding=allow_padding)\n            else:\n                x.decide_layout()\n        return (StorageBox(x), x.get_layout())\n    if isinstance(x, ReinterpretView):\n        buffer, _ = as_storage_and_layout(x.data, freeze=freeze)\n        return (buffer, x.layout)\n    raise NotImplementedError",
    "docstring": "Try to simplify x into a StorageBox and a Layout. allow_padding only affect how we apply stride_order. When allow_padding is True, we have the freedom to add padding when applying the stride_order.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\ir.py",
    "ast_data": "FunctionDef name:as_storage_and_layout arg:x arg:freeze arg:want_contiguous arg:stride_order arg:allow_padding arg:exact_strides arguments arg arg arg arg arg arg If Call Return return:yes Call If Call Assign Call Return return:yes Call If Call If If Call Call Call If Compare Call If Compare Call Call Return return:yes Call Call If Call Assign Call Return return:yes Raise"
  },
  {
    "library": "pandas",
    "name": "dtypes",
    "source_code": "@property\ndef dtypes(self) -> Series:\n    from pandas import Index, Series\n    pa_type = self._data.dtype.pyarrow_dtype\n    types = [ArrowDtype(struct.type) for struct in pa_type]\n    names = [struct.name for struct in pa_type]\n    return Series(types, index=Index(names))",
    "docstring": "Return the dtype object of each child field of the struct. Returns ------- pandas.Series The data type of each child field. See Also -------- Series.dtype: Return the dtype object of the underlying data. Examples -------- >>> import pyarrow as pa >>> s = pd.Series( ... [ ... {\"version\": 1, \"project\": \"pandas\"}, ... {\"version\": 2, \"project\": \"pandas\"}, ... {\"version\": 1, \"project\": \"numpy\"}, ... ], ... dtype=pd.ArrowDtype( ... pa.struct([(\"version\", pa.int64()), (\"project\", pa.string())]) ... ), ... ) >>> s.struct.dtypes version int64[pyarrow] project string[pyarrow] dtype: object",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\arrow\\accessors.py",
    "ast_data": "FunctionDef name:dtypes arg:self arguments arg Assign Assign Call Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "batch_shape_tensor",
    "source_code": "def batch_shape_tensor(self, name='batch_shape_tensor'):\n    with self._name_scope(name):\n        return self._batch_shape_tensor()",
    "docstring": "Shape of batch dimensions of this operator, determined at runtime. If this operator acts like the batch matrix with , then this returns a holding . Args: name: A name for this . Returns:",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "FunctionDef name:batch_shape_tensor arg:self arg:name arguments arg arg With Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "left_shift",
    "source_code": "def left_shift(a, n):\n    m = getmask(a)\n    if m is nomask:\n        d = umath.left_shift(filled(a), n)\n        return masked_array(d)\n    else:\n        d = umath.left_shift(filled(a, 0), n)\n        return masked_array(d, mask=m)",
    "docstring": "Shift the bits of an integer to the left. This is the masked array version of , for details see that function. See Also -------- numpy.left_shift Examples -------- Shift with a masked array: >>> arr = np.ma.array([10, 20, 30], mask=[False, True, False]) >>> np.ma.left_shift(arr, 1) masked_array(data=[20, --, 60], mask=[False, True, False], fill_value=999999) Large shift: >>> np.ma.left_shift(10, 10) masked_array(data=10240, mask=False, fill_value=999999) Shift with a scalar and an array: >>> scalar = 10 >>> arr = np.ma.array([1, 2, 3], mask=[False, True, False]) >>> np.ma.left_shift(scalar, arr) masked_array(data=[20, --, 80], mask=[False, True, False], fill_value=999999)",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:left_shift arg:a arg:n arguments arg arg Assign Call If Compare Assign Call Call Return return:yes Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "PossiblePrecisionLoss",
    "source_code": "class PossiblePrecisionLoss(Warning):\n    pass",
    "docstring": "Warning raised by to_stata on a column with a value outside or equal to int64. When the column value is outside or equal to the int64 value the column is converted to a float64 dtype. See Also -------- DataFrame.to_stata : Export DataFrame object to Stata dta format. Examples -------- >>> df = pd.DataFrame({\"s\": pd.Series([1, 2**53], dtype=np.int64)}) >>> df.to_stata(\"test\") # doctest: +SKIP",
    "type": "class",
    "file_path": "pandas\\pandas\\errors\\__init__.py",
    "ast_data": "ClassDef name:PossiblePrecisionLoss"
  },
  {
    "library": "tensorflow",
    "name": "strtobool",
    "source_code": "def strtobool(val_str):\n    if val_str in ('True', 'true'):\n        return True\n    elif val_str in ('False', 'false'):\n        return False\n    else:\n        tf_logging.warning('Wrong string values.       Supports False/false or True/true only. val_str = ', val_str)\n        return False",
    "docstring": "Return boolean value of it's equivalent string representation",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\optimize_for_inference_lib.py",
    "ast_data": "FunctionDef name:strtobool arg:val_str arguments arg If Compare Return return:yes If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_convert_values_and_partition",
    "source_code": "@classmethod\ndef _convert_values_and_partition(cls, values, row_partition, name):\n    if not isinstance(row_partition, RowPartition):\n        raise TypeError(f'Argument `row_partition` must be a RowPartition. Received {row_partition}.')\n    if isinstance(values, RaggedTensor):\n        if values._row_partition.dtype != row_partition.dtype:\n            if not ragged_config.auto_cast_partition_dtype():\n                raise ValueError(f'Argument `row_partition` of RaggedTensor with name: {name} must have same dtype as Argument `values`. ({row_partition.dtype} vs. {values._row_partition.dtype}).')\n            values = values.with_row_splits_dtype(row_partition.dtype)\n    else:\n        values = _convert_to_ragged_tensor_values(values)\n    return (values, row_partition)",
    "docstring": "Converts and to Tensors. If is a , then converts and to have compatible row-partitioning dtypes. In particular, if any of the row partitioning tensors are , then all of the other row partitioning tensors will be cast to (if auto_cast_partition_dtype() is true) or an error will be raised (if auto_cast_partition_dtype() is false). Args: values: The for the being constructed. row_partition: A RowPartition object for the being constructed. name: The name of the RowPartition object. Returns: A tuple (values, partition).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:_convert_values_and_partition arg:cls arg:values arg:row_partition arg:name arguments arg arg arg arg If Call Raise Call If Call If Compare If Call Raise Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_test_results_re",
    "source_code": "def _test_results_re() -> None:\n    pass",
    "docstring": ">>> def t(s): ... return RESULTS_RE.search(s).groupdict() >>> t(r\"file.py:80:1: E302 expected 2 blank lines, found 1\") ... # doctest: +NORMALIZE_WHITESPACE {'file': 'file.py', 'line': '80', 'column': '1', 'code': 'E302', 'message': 'expected 2 blank lines, found 1'} >>> t(r\"file.py:7:1: P201: Resource is acquired but not always released.\") ... # doctest: +NORMALIZE_WHITESPACE {'file': 'file.py', 'line': '7', 'column': '1', 'code': 'P201', 'message': 'Resource is acquired but not always released.'} >>> t(r\"file.py:8:-10: W605 invalid escape sequence '/'\") ... # doctest: +NORMALIZE_WHITESPACE {'file': 'file.py', 'line': '8', 'column': '-10', 'code': 'W605', 'message': \"invalid escape sequence '/'\"}",
    "type": "function",
    "file_path": "pytorch\\tools\\linter\\adapters\\flake8_linter.py",
    "ast_data": "FunctionDef name:_test_results_re arguments"
  },
  {
    "library": "numpy",
    "name": "prod",
    "source_code": "def prod(self, axis=None, dtype=None, out=None, keepdims=np._NoValue):\n    kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}\n    _mask = self._mask\n    newmask = _check_mask_axis(_mask, axis, **kwargs)\n    if out is None:\n        result = self.filled(1).prod(axis, dtype=dtype, **kwargs)\n        rndim = getattr(result, 'ndim', 0)\n        if rndim:\n            result = result.view(type(self))\n            result.__setmask__(newmask)\n        elif newmask:\n            result = masked\n        return result\n    result = self.filled(1).prod(axis, dtype=dtype, out=out, **kwargs)\n    if isinstance(out, MaskedArray):\n        outmask = getmask(out)\n        if outmask is nomask:\n            outmask = out._mask = make_mask_none(out.shape)\n        outmask.flat = newmask\n    return out",
    "docstring": "Return the product of the array elements over the given axis. Masked elements are set to 1 internally for computation. Refer to for full documentation. Notes ----- Arithmetic is modular when using integer types, and no error is raised on overflow. See Also -------- numpy.ndarray.prod : corresponding function for ndarrays numpy.prod : equivalent function",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:prod arg:self arg:axis arg:dtype arg:out arg:keepdims arguments arg arg arg arg arg Assign Compare Assign Assign Call If Compare Assign Call Call Assign Call If Assign Call Call Call If Assign Return return:yes Assign Call Call If Call Assign Call If Compare Assign Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_ref_artist",
    "source_code": "def get_ref_artist(self):\n    raise RuntimeError('get_ref_artist must overridden')",
    "docstring": "Return the underlying artist that actually defines some properties (e.g., color) of this artist.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axis_artist.py",
    "ast_data": "FunctionDef name:get_ref_artist arg:self arguments arg Raise Call"
  },
  {
    "library": "django",
    "name": "aget_object_list",
    "source_code": "async def aget_object_list(self):\n    if not isinstance(self.object_list, list):\n        if hasattr(self.object_list, '__aiter__'):\n            self.object_list = [obj async for obj in self.object_list]\n        else:\n            self.object_list = await sync_to_async(list)(self.object_list)\n    return self.object_list",
    "docstring": "Returns self.object_list as a list. This method must be awaited before AsyncPage can be treated as a sequence of self.object_list.",
    "type": "method",
    "file_path": "django\\django\\core\\paginator.py",
    "ast_data": "AsyncFunctionDef name:aget_object_list arg:self arguments arg If Call If Call Assign Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "rename_column_references",
    "source_code": "def rename_column_references(self, table, old_column, new_column):\n    pass",
    "docstring": "Rename all references to the old_column to the new_column.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\ddl_references.py",
    "ast_data": "FunctionDef name:rename_column_references arg:self arg:table arg:old_column arg:new_column arguments arg arg arg arg"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X):\n    check_is_fitted(self)\n    X = validate_data(self, X, reset=False)\n    X = X - self.mean_\n    U = ridge_regression(self.components_.T, X.T, self.ridge_alpha, solver='cholesky')\n    return U",
    "docstring": "Least Squares projection of the data onto the sparse components. To avoid instability issues in case the system is under-determined, regularization can be applied (Ridge regression) via the parameter. Note that Sparse PCA components orthogonality is not enforced as in PCA hence one cannot use a simple linear projection. Parameters ---------- X : ndarray of shape (n_samples, n_features) Test data to be transformed, must have the same number of features as the data used to train the model. Returns ------- X_new : ndarray of shape (n_samples, n_components) Transformed data.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_sparse_pca.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Call Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "unique",
    "source_code": "@tf_export('unique')\n@dispatch.add_dispatch_support\ndef unique(x, out_idx=dtypes.int32, name=None):\n    return gen_array_ops.unique(x, out_idx, name)",
    "docstring": "Finds unique elements in a 1-D tensor. See also . This operation returns a tensor containing all the unique elements of sorted in the same order that they occur in . This operation also returns a tensor the same size as that contains the index of each value of in the unique output . In other words: y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1] Example usage: >>> x = tf.constant([1, 1, 2, 4, 4, 4, 7, 8, 8]) >>> y, idx = unique(x) >>> y >>> idx Args: x: A Tensor. 1-D. out_idx: An optional tf.DType from: tf.int32, tf.int64. Defaults to tf.int32. name: A name for the operation (optional). Returns: A tuple of Tensor objects (y, idx). y: A Tensor. Has the same type as x. idx: A Tensor of type out_idx.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:unique arg:x arg:out_idx arg:name arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "get_slug_field",
    "source_code": "def get_slug_field(self):\n    return self.slug_field",
    "docstring": "Get the name of a slug field to be used to look up by slug.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\detail.py",
    "ast_data": "FunctionDef name:get_slug_field arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "take",
    "source_code": "def take(self, indices: TakeIndexer, allow_fill: bool=False, fill_value: Any=None) -> ArrowExtensionArray:\n    indices_array = np.asanyarray(indices)\n    if len(self._pa_array) == 0 and (indices_array >= 0).any():\n        raise IndexError('cannot do a non-empty take')\n    if indices_array.size > 0 and indices_array.max() >= len(self._pa_array):\n        raise IndexError(\"out of bounds value in 'indices'.\")\n    if allow_fill:\n        fill_mask = indices_array < 0\n        if fill_mask.any():\n            validate_indices(indices_array, len(self._pa_array))\n            indices_array = pa.array(indices_array, mask=fill_mask)\n            result = self._pa_array.take(indices_array)\n            if isna(fill_value):\n                return type(self)(result)\n            result = type(self)(result)\n            result[fill_mask] = fill_value\n            return result\n        else:\n            return type(self)(self._pa_array.take(indices))\n    else:\n        if (indices_array < 0).any():\n            indices_array = np.copy(indices_array)\n            indices_array[indices_array < 0] += len(self._pa_array)\n        return type(self)(self._pa_array.take(indices_array))",
    "docstring": "Take elements from an array. Parameters ---------- indices : sequence of int or one-dimensional np.ndarray of int Indices to be taken. allow_fill : bool, default False How to handle negative values in . * False: negative values in indicate positional indices from the right (the default). This is similar to :func:. * True: negative values in indicate missing values. These values are set to . Any other other negative values raise a `allow_fillfill_valuefill_valueindicesallow_fillindicesSeries.reindexfill_value`.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\arrow\\array.py",
    "ast_data": "FunctionDef name:take arg:self arg:indices arg:allow_fill arg:fill_value arguments arg arg arg arg Assign Call If BoolOp Compare Call Call Compare Raise Call If BoolOp Compare Compare Call Call Raise Call If Assign Compare If Call Call Call Assign Call Assign Call If Call Return return:yes Call Call Assign Call Call Assign Return return:yes Return return:yes Call Call Call If Call Compare Assign Call Compare Call Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_minor_slice",
    "source_code": "def _minor_slice(self, idx, copy=False):\n    if idx == slice(None):\n        return self.copy() if copy else self\n    M, N = self._swap(self._shape_as_2d)\n    start, stop, step = idx.indices(N)\n    N = len(range(start, stop, step))\n    if N == 0:\n        return self.__class__(self._swap((M, N)), dtype=self.dtype)\n    if step == 1:\n        return self._get_submatrix(minor=idx, copy=copy)\n    return self._minor_index_fancy(np.arange(start, stop, step))",
    "docstring": "Index along the minor axis where idx is a slice object.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_compressed.py",
    "ast_data": "FunctionDef name:_minor_slice arg:self arg:idx arg:copy arguments arg arg arg If Compare Call Return return:yes Call Assign Call Assign Call Assign Call Call If Compare Return return:yes Call Call If Compare Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_weight_only_quantize",
    "source_code": "def _weight_only_quantize(src_saved_model_path: str, dst_saved_model_path: str, quantization_options: quant_opts_pb2.QuantizationOptions) -> autotrackable.AutoTrackable:\n    mode_str = 'weight-only quantization'\n    if _is_qat_saved_model(src_saved_model_path):\n        raise ValueError('The models trained with quantization-aware training (QAT) is not supported for %s.' % mode_str)\n    logging.info('Running post-training %s on model: %s', mode_str, src_saved_model_path)\n    logging.info('QuantizationOptions: \\n%s', quantization_options)\n    signature_def_map = save_model.get_signatures_from_saved_model(src_saved_model_path, list(quantization_options.signature_keys), set(quantization_options.tags))\n    pywrap_quantize_model.quantize_weight_only(src_saved_model_path, dst_saved_model_path, quantization_options_serialized=quantization_options.SerializeToString(), signature_def_map_serialized=_serialize_signature_def_map(signature_def_map), py_function_library=py_function_lib.PyFunctionLibrary())\n    return saved_model_load.load(dst_saved_model_path)",
    "docstring": "Quantizes the given SavedModel via weight-only quantization. Args: src_saved_model_path: Path to the saved model. dst_saved_model_path: The path to save the output SavedModel. The directory will be overwritten if not empty. quantization_options: QuantizationOptions proto describing quantization related config. Returns: A SavedModel object with TF quantization applied. Raises: ValueError: when the model is QAT model.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\quantize_model.py",
    "ast_data": "FunctionDef name:_weight_only_quantize arg:src_saved_model_path arg:dst_saved_model_path arg:quantization_options arguments arg arg arg Assign If Call Raise Call Call Call Assign Call Call Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_enforce_mem_layouts",
    "source_code": "def _enforce_mem_layouts(query: Tensor, key: Tensor, value: Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n\n    def is_row_major(tensor: Tensor) -> bool:\n        return tensor.stride()[-1] == 1\n\n    def is_col_major(tensor: Tensor) -> bool:\n        return tensor.stride()[-2] == 1\n    fp8_dtypes = (torch.float8_e4m3fn, torch.float8_e5m2)\n    gemm_precision = query.dtype\n    should_enforce_mem_layout = gemm_precision in fp8_dtypes and torch.version.cuda is not None and (torch.cuda.get_device_capability('cuda') >= (8, 9)) and (torch.cuda.get_device_capability('cuda') < (10, 0))\n    if not should_enforce_mem_layout:\n        return (query, key, value)\n    if not is_row_major(query):\n        query = query.contiguous()\n    if not is_row_major(key):\n        key = key.contiguous()\n    if not is_col_major(value):\n        value = value.transpose(-2, -1).contiguous().transpose(-2, -1)\n    return (query, key, value)",
    "docstring": "Enforce memory layouts for query, key, and value tensors. For non-FP8 dtypes, no action is taken. For FP8 dtypes, we enforce the following memory layouts: - Query tensor must be in row-major memory layout, as it will be the left-operand in the FP8 GEMM . - Key tensor must be in row-major memory layout, as it will be transposed when used as the right-operand in the FP8 GEMM , meaning it will correctly be in column-major memory layout for the GEMM. - Value tensor must be in column-major memory layout, as it will be the right-operand in the FP8 GEMM . Returns the query, key, and value tensors with the enforced memory layouts.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\attention\\flex_attention.py",
    "ast_data": "FunctionDef name:_enforce_mem_layouts arg:query arg:key arg:value arguments arg arg arg FunctionDef name:is_row_major arg:tensor arguments arg Return return:yes Compare Call FunctionDef name:is_col_major arg:tensor arguments arg Return return:yes Compare Call Assign Assign Assign BoolOp Compare Compare Compare Call Compare Call If Return return:yes If Call Assign Call If Call Assign Call If Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "PartitionResult",
    "source_code": "class PartitionResult(NamedTuple):\n    dag: DAG\n    module_with_submodules: GraphModule",
    "docstring": "NameTuple used for returning DAG and a new fx module",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\experimental\\accelerator_partitioner.py",
    "ast_data": "ClassDef name:PartitionResult"
  },
  {
    "library": "django",
    "name": "_get_edited_object_pks",
    "source_code": "def _get_edited_object_pks(self, request, prefix):\n    pk_pattern = re.compile('{}-\\\\d+-{}$'.format(re.escape(prefix), self.opts.pk.name))\n    return [value for key, value in request.POST.items() if pk_pattern.match(key)]",
    "docstring": "Return POST data values of list_editable primary keys.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:_get_edited_object_pks arg:self arg:request arg:prefix arguments arg arg arg Assign Call Call Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "asarrays",
    "source_code": "def asarrays(a: Array | complex, b: Array | complex, xp: ModuleType) -> tuple[Array, Array]:\n    a_scalar = is_python_scalar(a)\n    b_scalar = is_python_scalar(b)\n    if not a_scalar and (not b_scalar):\n        return (a, b)\n    swap = False\n    if a_scalar:\n        swap = True\n        b, a = (a, b)\n    if is_array_api_obj(a):\n        xa = a\n        same_dtype = {bool: 'bool', int: ('integral', 'real floating', 'complex floating'), float: ('real floating', 'complex floating'), complex: 'complex floating'}\n        kind = same_dtype[type(cast(complex, b))]\n        if xp.isdtype(a.dtype, kind):\n            xb = xp.asarray(b, dtype=a.dtype)\n        else:\n            xb = xp.asarray(b)\n    else:\n        xa, xb = (xp.asarray(a), xp.asarray(b))\n    return (xb, xa) if swap else (xa, xb)",
    "docstring": "Ensure both and are arrays. If is a python scalar, it is converted to the same dtype as , and vice versa. Behavior is not specified when mixing a Python `x`. Default: infer. Returns ------- Array, Array The input arrays, possibly converted to arrays if they were scalars. See Also -------- mixing-arrays-with-python-scalars : Array API specification for the behavior.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_extra\\_lib\\_utils\\_helpers.py",
    "ast_data": "FunctionDef name:asarrays arg:a arg:b arg:xp arguments arg arg arg Assign Call Assign Call If BoolOp Return return:yes Assign If Assign Assign If Call Assign Assign Assign Call Call If Call Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "replace",
    "source_code": "def replace(self, *args: Any, cls: type[Request] | None=None, **kwargs: Any) -> Request:\n    for x in self.attributes:\n        kwargs.setdefault(x, getattr(self, x))\n    if cls is None:\n        cls = self.__class__\n    return cls(*args, **kwargs)",
    "docstring": "Create a new Request with the same attributes except for those given new values",
    "type": "method",
    "file_path": "scrapy\\scrapy\\http\\request\\__init__.py",
    "ast_data": "FunctionDef name:replace arg:self arguments arg arg arg arg For Call Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "stop",
    "source_code": "@inlineCallbacks\ndef stop(self) -> Generator[Deferred[Any], Any, None]:\n    if self.crawling:\n        self.crawling = False\n        assert self.engine\n        yield self.engine.stop()",
    "docstring": "Starts a graceful stop of the crawler and returns a deferred that is fired when the crawler is stopped.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\crawler.py",
    "ast_data": "FunctionDef name:stop arg:self arguments arg If Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "as_frame",
    "source_code": "def as_frame(self):\n    return (self.loc.filename, self.loc.lineno, self.function_name, self.source_code_line)",
    "docstring": "Returns a 4-tuple consistent with the return of traceback.extract_tb.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\origin_info.py",
    "ast_data": "FunctionDef name:as_frame arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "KeyNet",
    "source_code": "class KeyNet(Module):\n\n    def __init__(self, pretrained: bool=False, keynet_conf: KeyNet_conf=keynet_default_config) -> None:\n        super().__init__()\n        num_filters = keynet_conf['num_filters']\n        self.num_levels = keynet_conf['num_levels']\n        kernel_size = keynet_conf['kernel_size']\n        padding = kernel_size // 2\n        self.feature_extractor = _FeatureExtractor()\n        self.last_conv = nn.Sequential(nn.Conv2d(in_channels=num_filters * self.num_levels, out_channels=1, kernel_size=kernel_size, padding=padding), nn.ReLU(inplace=True))\n        if pretrained:\n            pretrained_dict = torch.hub.load_state_dict_from_url(KeyNet_URL, map_location=torch.device('cpu'))\n            self.load_state_dict(pretrained_dict['state_dict'], strict=True)\n        self.eval()\n\n    def forward(self, x: Tensor) -> Tensor:\n        shape_im = x.shape\n        feats: List[Tensor] = [self.feature_extractor(x)]\n        for _ in range(1, self.num_levels):\n            x = pyrdown(x, factor=1.2)\n            feats_i = self.feature_extractor(x)\n            feats_i = F.interpolate(feats_i, size=(shape_im[2], shape_im[3]), mode='bilinear')\n            feats.append(feats_i)\n        scores = self.last_conv(concatenate(feats, 1))\n        return scores",
    "docstring": "Key.Net model definition -- local feature detector (response function). This is based on the original code from paper \"Key.Net: Keypoint Detection by Handcrafted and Learned CNN Filters\". See :cite: for more details. .. image:: _static/img/KeyNet.png Args: pretrained: Download and set pretrained weights to the model. keynet_conf: Dict with initialization parameters. Do not pass it, unless you know what you are doing(B, 1, H, W)(B, 1, H, W)`",
    "type": "class",
    "file_path": "kornia\\kornia\\feature\\keynet.py",
    "ast_data": "ClassDef name:KeyNet FunctionDef name:__init__ arg:self arg:pretrained arg:keynet_conf arguments arg arg arg Call Call Assign Assign Assign Assign Assign Call Assign Call Call Call If Assign Call Call Call Call FunctionDef name:forward arg:self arg:x arguments arg arg Assign Call For Call Assign Call Assign Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "PrecisionConfig",
    "source_code": "class PrecisionConfig:\n    __slots__ = ('operand_precision',)\n    Precision = ops.PrecisionConfig_Precision\n\n    def __init__(self):\n        self.operand_precision = []",
    "docstring": "Python representation of a xla.PrecisionConfig protobuf.",
    "type": "class",
    "file_path": "tensorflow\\third_party\\xla\\xla\\python\\xla_client.py",
    "ast_data": "ClassDef name:PrecisionConfig Assign Assign FunctionDef name:__init__ arg:self arguments arg Assign"
  },
  {
    "library": "numpy",
    "name": "recfromtxt",
    "source_code": "def recfromtxt(fname, **kwargs):\n    warnings.warn('`recfromtxt` is deprecated, use `numpy.genfromtxt` instead.(deprecated in NumPy 2.0)', DeprecationWarning, stacklevel=2)\n    kwargs.setdefault('dtype', None)\n    usemask = kwargs.get('usemask', False)\n    output = genfromtxt(fname, **kwargs)\n    if usemask:\n        from numpy.ma.mrecords import MaskedRecords\n        output = output.view(MaskedRecords)\n    else:\n        output = output.view(np.recarray)\n    return output",
    "docstring": "Load ASCII data from a file and return it in a record array. If `recarraynumpy.genfromtxtgenfromtxtdtype` is None, which means that the data-type of the output array will be determined from the data.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_npyio_impl.py",
    "ast_data": "FunctionDef name:recfromtxt arg:fname arguments arg arg Call Call Assign Call Assign Call If Assign Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "permission_required",
    "source_code": "def permission_required(perm, login_url=None, raise_exception=False):\n    if isinstance(perm, str):\n        perms = (perm,)\n    else:\n        perms = perm\n\n    def decorator(view_func):\n        if iscoroutinefunction(view_func):\n\n            async def check_perms(user):\n                if await user.ahas_perms(perms):\n                    return True\n                if raise_exception:\n                    raise PermissionDenied\n                return False\n        else:\n\n            def check_perms(user):\n                if user.has_perms(perms):\n                    return True\n                if raise_exception:\n                    raise PermissionDenied\n                return False\n        return user_passes_test(check_perms, login_url=login_url)(view_func)\n    return decorator",
    "docstring": "Decorator for views that checks whether a user has a particular permission enabled, redirecting to the log-in page if necessary. If the raise_exception parameter is given the PermissionDenied exception is raised.",
    "type": "function",
    "file_path": "django\\django\\contrib\\auth\\decorators.py",
    "ast_data": "FunctionDef name:permission_required arg:perm arg:login_url arg:raise_exception arguments arg arg arg If Call Assign Assign FunctionDef name:decorator arg:view_func arguments arg If Call AsyncFunctionDef name:check_perms arg:user arguments arg If Call Return return:yes If Raise Return return:yes FunctionDef name:check_perms arg:user arguments arg If Call Return return:yes If Raise Return return:yes Return return:yes Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_conv",
    "source_code": "def _conv(obj, dtype=None):\n    if obj is None:\n        return obj\n    else:\n        if dtype is None:\n            obj = np.asarray(obj)\n        else:\n            obj = np.asarray(obj, dtype)\n        if obj.shape == ():\n            return obj.dtype.type(obj)\n        else:\n            return obj",
    "docstring": "Convert an object to the preferred form for input to the odr routine.",
    "type": "function",
    "file_path": "scipy\\scipy\\odr\\_odrpack.py",
    "ast_data": "FunctionDef name:_conv arg:obj arg:dtype arguments arg arg If Compare Return return:yes If Compare Assign Call Assign Call If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "scale_pinhole",
    "source_code": "def scale_pinhole(pinholes: Tensor, scale: Tensor) -> Tensor:\n    if not (len(pinholes.shape) == 2 and pinholes.shape[1] == 12):\n        raise AssertionError(pinholes.shape)\n    if len(scale.shape) != 1:\n        raise AssertionError(scale.shape)\n    pinholes_scaled = pinholes.clone()\n    pinholes_scaled[..., :6] = pinholes[..., :6] * scale.unsqueeze(-1)\n    return pinholes_scaled",
    "docstring": "Scale the pinhole matrix for each pinhole model. .. note:: This method is going to be deprecated in version 0.2 in favour of :attr:. Args: pinholes: tensor with the pinhole model. scale: tensor of scales. Returns: tensor of scaled pinholes. Shape: - Input: :math: and :math: - Output: :math: Example: >>> rng = torch.manual_seed(0) >>> pinhole_i = torch.rand(1, 12) # Nx12 >>> scales = 2.0 * torch.ones(1) # N >>> scale_pinhole(pinhole_i, scales) # Nx12 tensor([[0.9925, 1.5364, 0.1770, 0.2641, 0.6148, 1.2682, 0.4901, 0.8964, 0.4556, 0.6323, 0.3489, 0.4017]])",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\camera\\pinhole.py",
    "ast_data": "FunctionDef name:scale_pinhole arg:pinholes arg:scale arguments arg arg If BoolOp Compare Call Compare Raise Call If Compare Call Raise Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X):\n    check_is_fitted(self)\n    X = self._check_input(X, in_fit=False, check_positive=True, check_shape=True)\n    transform_function = {'box-cox': boxcox, 'yeo-johnson': self._yeo_johnson_transform}[self.method]\n    for i, lmbda in enumerate(self.lambdas_):\n        with np.errstate(invalid='ignore'):\n            X[:, i] = transform_function(X[:, i], lmbda)\n    if self.standardize:\n        X = self._scaler.transform(X)\n    return X",
    "docstring": "Apply the power transform to each feature using the fitted lambdas. Parameters ---------- X : array-like of shape (n_samples, n_features) The data to be transformed using a power transformation. Returns ------- X_trans : ndarray of shape (n_samples, n_features) The transformed data.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Call Assign Call Assign For Call With Call Assign Call If Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "BatcherIterDataPipe",
    "source_code": "@functional_datapipe('batch')\nclass BatcherIterDataPipe(IterDataPipe[DataChunk]):\n    datapipe: IterDataPipe\n    batch_size: int\n    drop_last: bool\n\n    def __init__(self, datapipe: IterDataPipe, batch_size: int, drop_last: bool=False, wrapper_class: type[DataChunk]=DataChunk) -> None:\n        assert batch_size > 0, 'Batch size is required to be larger than 0!'\n        super().__init__()\n        self.datapipe = datapipe\n        self.batch_size = batch_size\n        self.drop_last = drop_last\n        self.wrapper_class = wrapper_class\n\n    def __iter__(self) -> Iterator[DataChunk]:\n        batch: list = []\n        for x in self.datapipe:\n            batch.append(x)\n            if len(batch) == self.batch_size:\n                yield self.wrapper_class(batch)\n                batch = []\n        if len(batch) > 0:\n            if not self.drop_last:\n                yield self.wrapper_class(batch)\n\n    def __len__(self) -> int:\n        if isinstance(self.datapipe, Sized):\n            if self.drop_last:\n                return len(self.datapipe) // self.batch_size\n            else:\n                return (len(self.datapipe) + self.batch_size - 1) // self.batch_size\n        else:\n            raise TypeError(f\"{type(self).__name__} instance doesn't have valid length\")",
    "docstring": "Creates mini-batches of data (functional name: `` Example: >>> # xdoctest: +SKIP >>> from torchdata.datapipes.iter import IterableWrapper >>> dp = IterableWrapper(range(10)) >>> dp = dp.batch(batch_size=3, drop_last=True) >>> list(dp) [[0, 1, 2], [3, 4, 5], [6, 7, 8]]",
    "type": "class",
    "file_path": "pytorch\\torch\\utils\\data\\datapipes\\iter\\grouping.py",
    "ast_data": "ClassDef name:BatcherIterDataPipe FunctionDef name:__init__ arg:self arg:datapipe arg:batch_size arg:drop_last arg:wrapper_class arguments arg arg arg arg arg Compare Call Call Assign Assign Assign Assign FunctionDef name:__iter__ arg:self arguments arg For Call If Compare Call Call Assign If Compare Call If Call FunctionDef name:__len__ arg:self arguments arg If Call If Return return:yes Call Return return:yes Call Raise Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_missing_warn",
    "source_code": "def _missing_warn():\n    warnings.warn('One of the clusters is empty. Re-run kmeans with a different initialization.', stacklevel=3)",
    "docstring": "Print a warning when called.",
    "type": "function",
    "file_path": "scipy\\scipy\\cluster\\vq.py",
    "ast_data": "FunctionDef name:_missing_warn arguments Call"
  },
  {
    "library": "scipy",
    "name": "_read_long",
    "source_code": "def _read_long(f):\n    return np.int32(struct.unpack('>l', f.read(4))[0])",
    "docstring": "Read a signed 32-bit integer",
    "type": "function",
    "file_path": "scipy\\scipy\\io\\_idl.py",
    "ast_data": "FunctionDef name:_read_long arg:f arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict_proba",
    "source_code": "def predict_proba(self, X):\n    check_is_fitted(self)\n    y_pred = self._forward_pass_fast(X)\n    if self.n_outputs_ == 1:\n        y_pred = y_pred.ravel()\n    if y_pred.ndim == 1:\n        return np.vstack([1 - y_pred, y_pred]).T\n    else:\n        return y_pred",
    "docstring": "Probability estimates. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input data. Returns ------- y_prob : ndarray of shape (n_samples, n_classes) The predicted probability of the sample for each class in the model, where classes are ordered as they are in .",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neural_network\\_multilayer_perceptron.py",
    "ast_data": "FunctionDef name:predict_proba arg:self arg:X arguments arg arg Call Assign Call If Compare Assign Call If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "django",
    "name": "sequence_reset_sql",
    "source_code": "def sequence_reset_sql(self, style, model_list):\n    return []",
    "docstring": "Return a list of the SQL statements required to reset sequences for the given models. The argument is a Style object as returned by either color_style() or no_style() in django.core.management.color.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:sequence_reset_sql arg:self arg:style arg:model_list arguments arg arg arg Return return:no"
  },
  {
    "library": "pytorch",
    "name": "is_target_div_by_dim",
    "source_code": "def is_target_div_by_dim(target: list[int], dim: list[DVar]):\n    return BinConstraintD(BinConstraintD(Prod(target), dim, op_mod), 0, op_eq)",
    "docstring": "Generate constraints to check if the target dimensions are divisible by the input dimensions Args: target: Target dimensions dim: Input dimensions Returns: Constraints to check divisibility",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_transformation.py",
    "ast_data": "FunctionDef name:is_target_div_by_dim arg:target arg:dim arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "authlib",
    "name": "generate_device_code",
    "source_code": "def generate_device_code(self):\n    return generate_token(42)",
    "docstring": "A method to generate ``.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc8628\\endpoint.py",
    "ast_data": "FunctionDef name:generate_device_code arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_check_label_or_level_ambiguity",
    "source_code": "@final\ndef _check_label_or_level_ambiguity(self, key: Level, axis: Axis=0) -> None:\n    axis_int = self._get_axis_number(axis)\n    other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis_int)\n    if key is not None and is_hashable(key) and (key in self.axes[axis_int].names) and any((key in self.axes[ax] for ax in other_axes)):\n        level_article, level_type = ('an', 'index') if axis_int == 0 else ('a', 'column')\n        label_article, label_type = ('a', 'column') if axis_int == 0 else ('an', 'index')\n        msg = f\"'{key}' is both {level_article} {level_type} level and {label_article} {label_type} label, which is ambiguous.\"\n        raise ValueError(msg)",
    "docstring": "Check whether is ambiguous. By ambiguous, we mean that it matches both a level of the input and a label of the other axis. Parameters ---------- key : Hashable Label or level name. axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns). Raises ------ ValueError: is ambiguous",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:_check_label_or_level_ambiguity arg:self arg:key arg:axis arguments arg arg arg Assign Call Assign Call Compare If BoolOp Compare Call Compare Call Compare Assign Compare Assign Compare Assign Raise Call"
  },
  {
    "library": "pytorch",
    "name": "make_functional_with_buffers_deprecated_v1",
    "source_code": "def make_functional_with_buffers_deprecated_v1(model: nn.Module):\n    weights, weight_descriptors, _ = extract_weights(model)\n    buffers, buf_descriptors, _ = extract_buffers(model)\n\n    def fun(weights, buffers, data):\n        mutable_model = copy.deepcopy(model)\n        load_weights(mutable_model, weight_descriptors, weights)\n        load_buffers(mutable_model, buf_descriptors, buffers)\n        return mutable_model(*data)\n    return (weights, buffers, fun, weight_descriptors, buf_descriptors)",
    "docstring": "make_functional_with_buffers_deprecated_v1(model) -> weights, buffers, func, weight_names, buffer_names Given an nn.Module, make_functional_with_buffers_deprecated_v1 extracts the state (weights and buffers) and returns a functional version of the model, . can be invoked as follows: And here is an example of applying the grad transform: To put the state back into a model, use .",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\make_functional.py",
    "ast_data": "FunctionDef name:make_functional_with_buffers_deprecated_v1 arg:model arguments arg Assign Call Assign Call FunctionDef name:fun arg:weights arg:buffers arg:data arguments arg arg arg Assign Call Call Call Return return:yes Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "DictionaryLearningBenchmark",
    "source_code": "class DictionaryLearningBenchmark(Transformer, Estimator, Benchmark):\n    param_names = ['fit_algorithm', 'n_jobs']\n    params = (['lars', 'cd'], Benchmark.n_jobs_vals)\n\n    def setup_cache(self):\n        super().setup_cache()\n\n    def make_data(self, params):\n        return _olivetti_faces_dataset()\n\n    def make_estimator(self, params):\n        fit_algorithm, n_jobs = params\n        estimator = DictionaryLearning(n_components=15, fit_algorithm=fit_algorithm, alpha=0.1, transform_alpha=1, max_iter=20, tol=1e-16, random_state=0, n_jobs=n_jobs)\n        return estimator\n\n    def make_scorers(self):\n        make_dict_learning_scorers(self)",
    "docstring": "Benchmarks for DictionaryLearning.",
    "type": "class",
    "file_path": "scikit-learn\\asv_benchmarks\\benchmarks\\decomposition.py",
    "ast_data": "ClassDef name:DictionaryLearningBenchmark Assign Assign FunctionDef name:setup_cache arg:self arguments arg Call Call FunctionDef name:make_data arg:self arg:params arguments arg arg Return return:yes Call FunctionDef name:make_estimator arg:self arg:params arguments arg arg Assign Assign Call Return return:yes FunctionDef name:make_scorers arg:self arguments arg Call"
  },
  {
    "library": "scipy",
    "name": "_calculate_null_pairings",
    "source_code": "def _calculate_null_pairings(data, statistic, n_permutations, batch, rng=None):\n    n_samples = len(data)\n    n_obs_sample = data[0].shape[-1]\n    n_max = factorial(n_obs_sample) ** n_samples\n    if n_permutations >= n_max:\n        exact_test = True\n        n_permutations = n_max\n        batch = batch or int(n_permutations)\n        perm_generator = product(*(permutations(range(n_obs_sample)) for i in range(n_samples)))\n        batched_perm_generator = _batch_generator(perm_generator, batch=batch)\n    else:\n        exact_test = False\n        batch = batch or int(n_permutations)\n        args = (n_permutations, n_samples, n_obs_sample, batch, rng)\n        batched_perm_generator = _pairings_permutations_gen(*args)\n    null_distribution = []\n    for indices in batched_perm_generator:\n        indices = np.array(indices)\n        indices = np.swapaxes(indices, 0, 1)\n        data_batch = [None] * n_samples\n        for i in range(n_samples):\n            data_batch[i] = data[i][..., indices[i]]\n            data_batch[i] = np.moveaxis(data_batch[i], -2, 0)\n        null_distribution.append(statistic(*data_batch, axis=-1))\n    null_distribution = np.concatenate(null_distribution, axis=0)\n    return (null_distribution, n_permutations, exact_test)",
    "docstring": "Calculate null distribution for association tests.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_resampling.py",
    "ast_data": "FunctionDef name:_calculate_null_pairings arg:data arg:statistic arg:n_permutations arg:batch arg:rng arguments arg arg arg arg arg Assign Call Assign Assign Call If Compare Assign Assign Assign BoolOp Call Assign Call Call Call Call Assign Call Assign Assign BoolOp Call Assign Assign Call Assign For Assign Call Assign Call Assign For Call Assign Assign Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "is_usable",
    "source_code": "def is_usable(self):\n    raise NotImplementedError('subclasses of BaseDatabaseWrapper may require an is_usable() method')",
    "docstring": "Test if the database connection is usable. This method may assume that self.connection is not None. Actual implementations should take care not to raise exceptions as that may prevent Django from recycling unusable connections.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\base.py",
    "ast_data": "FunctionDef name:is_usable arg:self arguments arg Raise Call"
  },
  {
    "library": "scipy",
    "name": "_read_uint64",
    "source_code": "def _read_uint64(f):\n    return np.uint64(struct.unpack('>Q', f.read(8))[0])",
    "docstring": "Read an unsigned 64-bit integer",
    "type": "function",
    "file_path": "scipy\\scipy\\io\\_idl.py",
    "ast_data": "FunctionDef name:_read_uint64 arg:f arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_fontvariant",
    "source_code": "def get_fontvariant(self):\n    return self._fontproperties.get_variant()",
    "docstring": "Return the font variant as a string. See Also -------- .font_manager.FontProperties.get_variant",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:get_fontvariant arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_check_non_neg_array",
    "source_code": "def _check_non_neg_array(self, X, reset_n_features, whom):\n    dtype = [np.float64, np.float32] if reset_n_features else self.components_.dtype\n    X = validate_data(self, X, reset=reset_n_features, accept_sparse='csr', dtype=dtype)\n    check_non_negative(X, whom)\n    return X",
    "docstring": "check X format check X format and make sure no negative value in X. Parameters ---------- X : array-like or sparse matrix",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_lda.py",
    "ast_data": "FunctionDef name:_check_non_neg_array arg:self arg:X arg:reset_n_features arg:whom arguments arg arg arg arg Assign Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "subfigures",
    "source_code": "def subfigures(self, nrows=1, ncols=1, squeeze=True, wspace=None, hspace=None, width_ratios=None, height_ratios=None, **kwargs):\n    gs = GridSpec(nrows=nrows, ncols=ncols, figure=self, wspace=wspace, hspace=hspace, width_ratios=width_ratios, height_ratios=height_ratios, left=0, right=1, bottom=0, top=1)\n    sfarr = np.empty((nrows, ncols), dtype=object)\n    for i in range(nrows):\n        for j in range(ncols):\n            sfarr[i, j] = self.add_subfigure(gs[i, j], **kwargs)\n    if self.get_layout_engine() is None and (wspace is not None or hspace is not None):\n        bottoms, tops, lefts, rights = gs.get_grid_positions(self)\n        for sfrow, bottom, top in zip(sfarr, bottoms, tops):\n            for sf, left, right in zip(sfrow, lefts, rights):\n                bbox = Bbox.from_extents(left, bottom, right, top)\n                sf._redo_transform_rel_fig(bbox=bbox)\n    if squeeze:\n        return sfarr.item() if sfarr.size == 1 else sfarr.squeeze()\n    else:\n        return sfarr",
    "docstring": "Add a set of subfigures to this figure or subfigure. A subfigure has the same artist methods as a figure, and is logically the same as a figure, but cannot print itself. See :doc:. .. versionchanged:: 3.10 subfigures are now added in row-major order. Parameters ---------- nrows, ncols : int, default: 1 Number of rows/columns of the subfigure grid. squeeze : bool, default: True If True, extra dimensions are squeezed out from the returned array of subfigures. wspace, hspace : float, default: None The amount of width/height reserved for space between subfigures, expressed as a fraction of the average subfigure width/height. If not given, the values will be inferred from rcParams if using constrained layout (see ), or zero if not using a layout engine. width_ratios : array-like of length *ncols*, optional Defines the relative widths of the columns. Each column gets a relative width of ``. If not given, all rows will have the same height.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:subfigures arg:self arg:nrows arg:ncols arg:squeeze arg:wspace arg:hspace arg:width_ratios arg:height_ratios arguments arg arg arg arg arg arg arg arg arg Assign Call Assign Call For Call For Call Assign Call If BoolOp Compare Call BoolOp Compare Compare Assign Call For Call For Call Assign Call Call If Return return:yes Compare Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_profile_data_generator",
    "source_code": "def _get_profile_data_generator(self):\n    node_to_traceback = defaultdict(list)\n    node_to_op_type = defaultdict(str)\n    for op in self._graph.get_operations():\n        node_to_traceback[op.name] = op.traceback\n        node_to_op_type[op.name] = op.type\n\n    def profile_data_generator(device_step_stats):\n        for node_stats in device_step_stats.node_stats:\n            if node_stats.node_name == '_SOURCE' or node_stats.node_name == '_SINK':\n                continue\n            yield ProfileDatum(node_stats, node_to_op_type[node_stats.node_name], node_to_traceback[node_stats.node_name])\n    return profile_data_generator",
    "docstring": "Get function that generates objects. Returns: A function that generates objects.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\pprof_profiler.py",
    "ast_data": "FunctionDef name:_get_profile_data_generator arg:self arguments arg Assign Call Assign Call For Call Assign Assign FunctionDef name:profile_data_generator arg:device_step_stats arguments arg For If BoolOp Compare Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "from_args_list",
    "source_code": "@classmethod\ndef from_args_list(cls, args_list: list[str]) -> 'CompileCommand':\n    cc_file = None\n    filtered_args = []\n    for arg in args_list:\n        if arg in _DISALLOWED_ARGS:\n            continue\n        if arg.endswith('.cc'):\n            cc_file = arg\n        filtered_args.append(arg)\n    return cls(cc_file, filtered_args)",
    "docstring": "Alternative constructor which uses the args_list from . This collects arguments and the file being run on from the output of . Also filters out arguments which break clang-tidy. Arguments: args_list: List of arguments generated by Returns: The corresponding ClangTidyCommand.",
    "type": "method",
    "file_path": "tensorflow\\third_party\\xla\\build_tools\\lint\\generate_compile_commands.py",
    "ast_data": "FunctionDef name:from_args_list arg:cls arg:args_list arguments arg arg Assign Assign For If Compare If Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "serialize_hoo_inputs",
    "source_code": "def serialize_hoo_inputs(self, args, kwargs) -> list[NamedArgument]:\n    inputs = [NamedArgument(name='', arg=self.serialize_input(a), kind=ArgumentKind.POSITIONAL) for a in args]\n    inputs.extend([NamedArgument(name=name, arg=self.serialize_input(a), kind=ArgumentKind.KEYWORD) for name, a in kwargs.items()])\n    return inputs",
    "docstring": "For serializing HOO inputs since HOOs do not have a schema.",
    "type": "method",
    "file_path": "pytorch\\torch\\_export\\serde\\serialize.py",
    "ast_data": "FunctionDef name:serialize_hoo_inputs arg:self arg:args arg:kwargs arguments arg arg arg Assign Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "type_inference_rule",
    "source_code": "@register_inference_rule('type_as')\ndef type_inference_rule(n: Node, symbols, constraints, counter):\n    assert isinstance(n.args[0], Node)\n    assert isinstance(n.args[1], Node)\n    output, counter = gen_tvar(counter)\n    symbols[n] = output\n    from_arg = symbols[n.args[0]]\n    to_arg = symbols[n.args[1]]\n    assert isinstance(from_arg, TVar)\n    assert isinstance(to_arg, TVar)\n    return ([BinConstraintT(from_arg, to_arg, op_consistency), BinConstraintT(output, to_arg, op_eq)], counter)",
    "docstring": "We generate the constraint: input = output",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_generator.py",
    "ast_data": "FunctionDef name:type_inference_rule arg:n arg:symbols arg:constraints arg:counter arguments arg arg arg arg Call Call Assign Call Assign Assign Assign Call Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_writer",
    "source_code": "def get_writer(self):\n    if not self._writer:\n        self._writer = debug_events_writer.DebugEventsWriter(self._dump_root, self._tfdbg_run_id, circular_buffer_size=self._circular_buffer_size)\n    return self._writer",
    "docstring": "Get the debug events writer for the currently configured dump root.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\dumping_callback.py",
    "ast_data": "FunctionDef name:get_writer arg:self arguments arg If Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "FullOptimStateDictConfig",
    "source_code": "@dataclass\nclass FullOptimStateDictConfig(OptimStateDictConfig):\n    rank0_only: bool = False",
    "docstring": "Attributes: rank0_only (bool): If ``)",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\api.py",
    "ast_data": "ClassDef name:FullOptimStateDictConfig"
  },
  {
    "library": "pytorch",
    "name": "set_global",
    "source_code": "def set_global(self, global_qconfig: QConfigAny) -> QConfigMapping:\n    self.global_qconfig = global_qconfig\n    return self",
    "docstring": "Set the global (default) QConfig.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\qconfig_mapping.py",
    "ast_data": "FunctionDef name:set_global arg:self arg:global_qconfig arguments arg arg Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "inline_generator_function",
    "source_code": "def inline_generator_function(self, fn, args, kwargs):\n    if not isinstance(fn, LocalGeneratorFunctionVariable):\n        fn = LocalGeneratorFunctionVariable(fn)\n    return fn.call_function(self, args, kwargs)",
    "docstring": "Redirect the call to the generator \"call_function\"",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\symbolic_convert.py",
    "ast_data": "FunctionDef name:inline_generator_function arg:self arg:fn arg:args arg:kwargs arguments arg arg arg arg If Call Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "astype",
    "source_code": "def astype(x: Array, dtype: DType, /, *, copy: py_bool=True, device: Device | None=None) -> Array:\n    _helpers._check_device(da, device)\n    if not copy and dtype == x.dtype:\n        return x\n    x = x.astype(dtype)\n    return x.copy() if copy else x",
    "docstring": "Array API compatibility wrapper for astype(). See the corresponding documentation in the array library and/or the array API specification for more details.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\dask\\array\\_aliases.py",
    "ast_data": "FunctionDef name:astype arguments arg arg arg arg Call If BoolOp Compare Return return:yes Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "build",
    "source_code": "def build(self, inputs_shape):\n    self.cell.build(inputs_shape)\n    self.built = True",
    "docstring": "Builds the wrapped cell.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\rnn_cell_wrapper_v2.py",
    "ast_data": "FunctionDef name:build arg:self arg:inputs_shape arguments arg arg Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "create_zeros_slot",
    "source_code": "def create_zeros_slot(primary, name, dtype=None, colocate_with_primary=True, *, copy_xla_sharding=False):\n    if dtype is None:\n        dtype = primary.dtype\n    slot_shape = primary.get_shape()\n    if slot_shape.is_fully_defined():\n        initializer = init_ops.zeros_initializer()\n        return create_slot_with_initializer(primary, initializer, slot_shape, dtype, name, colocate_with_primary=colocate_with_primary, copy_xla_sharding=copy_xla_sharding)\n    else:\n        if isinstance(primary, variables.Variable):\n            slot_shape = array_ops.shape(cond.cond(variable_v1.is_variable_initialized(primary), primary.read_value, lambda: primary.initial_value))\n        else:\n            slot_shape = array_ops.shape(primary)\n        val = array_ops.zeros(slot_shape, dtype=dtype)\n        return create_slot(primary, val, name, colocate_with_primary=colocate_with_primary, copy_xla_sharding=copy_xla_sharding)",
    "docstring": "Create a slot initialized to 0 with same shape as the primary object. Args: primary: The primary or . name: Name to use for the slot variable. dtype: Type of the slot variable. Defaults to the type of . colocate_with_primary: Boolean. If True the slot is located on the same device as . copy_xla_sharding: Boolean. If True also copies XLA sharding from primary. Returns: A object.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\slot_creator.py",
    "ast_data": "FunctionDef name:create_zeros_slot arg:primary arg:name arg:dtype arg:colocate_with_primary arguments arg arg arg arg arg If Compare Assign Assign Call If Call Assign Call Return return:yes Call If Call Assign Call Call Call arguments Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "Metadata",
    "source_code": "@dataclass\nclass Metadata:\n    state_dict_metadata: dict[str, STORAGE_TYPES]\n    planner_data: Any = None\n    storage_data: Any = None\n    storage_meta: Optional[StorageMeta] = None",
    "docstring": "This class represents the metadata of the checkpoint.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\metadata.py",
    "ast_data": "ClassDef name:Metadata"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=False)\ndef fit(self, X, y, **fit_params):\n    if _routing_enabled():\n        routed_params = process_routing(self, 'fit', **fit_params)\n    else:\n        routed_params = Bunch(estimator=Bunch(fit=fit_params))\n    return self._fit(X, y, **routed_params.estimator.fit)",
    "docstring": "Fit the RFE model and then the underlying estimator on the selected features. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The training input samples. y : array-like of shape (n_samples,) The target values. **fit_params : dict - If (default): Parameters directly passed to the `enable_metadata_routing=TrueMetadata Routing User Guide ` for more details. Returns ------- self : object Fitted estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_selection\\_rfe.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg arg If Call Assign Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_num_sms",
    "source_code": "def get_num_sms() -> int:\n    carveout = torch._C._get_sm_carveout_experimental()\n    return get_max_num_sms() - (carveout if carveout is not None else 0)",
    "docstring": "Handle experimental carveout if set otherwise return hardware SM count",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\utils.py",
    "ast_data": "FunctionDef name:get_num_sms arguments Assign Call Return return:yes Call Compare"
  },
  {
    "library": "django",
    "name": "datatype",
    "source_code": "def datatype(self, as_string=False):\n    dtype = capi.get_band_datatype(self._ptr)\n    if as_string:\n        dtype = GDAL_PIXEL_TYPES[dtype]\n    return dtype",
    "docstring": "Return the GDAL Pixel Datatype for this band.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\raster\\band.py",
    "ast_data": "FunctionDef name:datatype arg:self arg:as_string arguments arg arg Assign Call If Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "tricontour",
    "source_code": "@_preprocess_data()\ndef tricontour(self, *args, extend3d=False, stride=5, zdir='z', offset=None, axlim_clip=False, **kwargs):\n    had_data = self.has_data()\n    tri, args, kwargs = Triangulation.get_from_args_and_kwargs(*args, **kwargs)\n    X = tri.x\n    Y = tri.y\n    if 'Z' in kwargs:\n        Z = kwargs.pop('Z')\n    else:\n        Z, *args = args\n    jX, jY, jZ = art3d.rotate_axes(X, Y, Z, zdir)\n    tri = Triangulation(jX, jY, tri.triangles, tri.mask)\n    cset = super().tricontour(tri, jZ, *args, **kwargs)\n    self.add_contour_set(cset, extend3d, stride, zdir, offset, axlim_clip)\n    self.auto_scale_xyz(X, Y, Z, had_data)\n    return cset",
    "docstring": "Create a 3D contour plot. .. note:: This method currently produces incorrect output due to a longstanding bug in 3D PolyCollection rendering. Parameters ---------- X, Y, Z : array-like Input data. See for supported data shapes. extend3d : bool, default: False Whether to extend contour in 3D. stride : int, default: 5 Step size for extending contour. zdir : {'x', 'y', 'z'}, default: 'z' The direction to use. offset : float, optional If specified, plot a projection of the contour lines at this position in a plane normal to *zdir*. axlim_clip : bool, default: False Whether to hide lines with a vertex outside the axes view limits. .. versionadded:: 3.10 data : indexable object, optional DATA_PARAMETER_PLACEHOLDER *args, **kwargs Other arguments are forwarded to . Returns ------- matplotlib.tri._tricontour.TriContourSet",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:tricontour arg:self arguments arg arg arg arg arg arg arg arg Assign Call Assign Call Assign Assign If Compare Assign Call Assign Assign Call Assign Call Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "add",
    "source_code": "def add(self, template_code: str, *, base_name='_triton_helper_fn') -> str:\n    existing_name = self._templates_seen.get(template_code)\n    if existing_name is not None:\n        return existing_name\n    name = f'{base_name}{len(self.finalized_helpers)}'\n    self._templates_seen[template_code] = name\n    self.finalized_helpers.append(template_code.format(name=name))\n    return name",
    "docstring": "This accepts a function definition with the function name left as a format specifier e.g. @triton.jit def {name}(arg0, arg1): return arg0 + arg1 We add the templated code to the function set and return the name assigned to that function.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py",
    "ast_data": "FunctionDef name:add arg:self arg:template_code arguments arg arg arg Assign Call If Compare Return return:yes Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "find",
    "source_code": "def find(path, find_all=False, **kwargs):\n    if kwargs:\n        find_all = _check_deprecated_find_param(find_all=find_all, **kwargs)\n    searched_locations[:] = []\n    matches = []\n    for finder in get_finders():\n        result = finder.find(path, find_all=find_all)\n        if not find_all and result:\n            return result\n        if not isinstance(result, (list, tuple)):\n            result = [result]\n        matches.extend(result)\n    if matches:\n        return matches\n    return [] if find_all else None",
    "docstring": "Find a static file with the given path using all enabled finders. If `` if no match). Otherwise return a list.",
    "type": "function",
    "file_path": "django\\django\\contrib\\staticfiles\\finders.py",
    "ast_data": "FunctionDef name:find arg:path arg:find_all arguments arg arg arg If Assign Call Assign Assign For Call Assign Call If BoolOp Return return:yes If Call Assign Call If Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "change_func_globals",
    "source_code": "def change_func_globals(f, globals):\n    g = FunctionType(f.__code__, globals, name=f.__name__, argdefs=f.__defaults__, closure=f.__closure__)\n    g = functools.update_wrapper(g, f)\n    g.__kwdefaults__ = copy.copy(f.__kwdefaults__)\n    return g",
    "docstring": "Based on (@unutbu)",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\rewriter.py",
    "ast_data": "FunctionDef name:change_func_globals arg:f arg:globals arguments arg arg Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "CheckpointWrapper",
    "source_code": "class CheckpointWrapper(ActivationWrapper):\n\n    def __init__(self, mod: torch.nn.Module, checkpoint_impl: CheckpointImpl=CheckpointImpl.NO_REENTRANT, checkpoint_fn=None, **checkpoint_fn_kwargs):\n        super().__init__(mod)\n        self.checkpoint_impl = checkpoint_impl\n        if checkpoint_fn is None:\n            self.checkpoint_fn = partial(torch_utils_checkpoint, use_reentrant=self.checkpoint_impl == CheckpointImpl.REENTRANT, **checkpoint_fn_kwargs)\n        else:\n            self.checkpoint_fn = partial(checkpoint_fn, **checkpoint_fn_kwargs)\n\n    def forward(self, *args, **kwargs):\n        if self.checkpoint_impl == CheckpointImpl.REENTRANT and kwargs != {}:\n            flat_args, kwarg_keys = _pack_kwargs(*args, **kwargs)\n\n            def my_function(*inputs):\n                unpacked_args, unpacked_kwargs = _unpack_kwargs(inputs, kwarg_keys)\n                return self._checkpoint_wrapped_module(*unpacked_args, **unpacked_kwargs)\n            return self.checkpoint_fn(my_function, *flat_args)\n        else:\n            return self.checkpoint_fn(self._checkpoint_wrapped_module, *args, **kwargs)",
    "docstring": "An `` function.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\_checkpoint\\checkpoint_wrapper.py",
    "ast_data": "ClassDef name:CheckpointWrapper FunctionDef name:__init__ arg:self arg:mod arg:checkpoint_impl arg:checkpoint_fn arguments arg arg arg arg arg Call Call Assign If Compare Assign Call Compare Assign Call FunctionDef name:forward arg:self arguments arg arg arg If BoolOp Compare Compare Assign Call FunctionDef name:my_function arguments arg Assign Call Return return:yes Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_watch",
    "source_code": "def _watch(self, primals, tangents):\n\n    def _watch(primal, tangent):\n        if not primal.dtype.is_floating:\n            logging.log_first_n(logging.WARN, 'The dtype of the watched primal must be floating (e.g. tf.float32), got %r', 5, primal.dtype)\n        tangent = ops.convert_to_tensor(tangent, dtype=primal.dtype)\n        if hasattr(primal, 'handle'):\n            primal = ops.convert_to_tensor(primal.handle)\n        pywrap_tfe.TFE_Py_ForwardAccumulatorWatch(self._accumulator, primal, tangent)\n    nest.map_structure(_watch, primals, tangents)",
    "docstring": "Ensures that are being traced by this accumulator. Mathematically, is a vector right-multiplying the Jacobian matrix (a Jacobian-vector product) for the function computed while this accumulator is active. Since JVPs are computed in forward mode as the computation happens, this vector must be supplied in advance. Watching a single tensor multiple times sums each of its . Any un-watched tensor has zeros for its tangent vector. Args: primals: A Tensor or list of Tensors. tangents: A Tensor or list of Tensors matching .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\forwardprop.py",
    "ast_data": "FunctionDef name:_watch arg:self arg:primals arg:tangents arguments arg arg arg FunctionDef name:_watch arg:primal arg:tangent arguments arg arg If Call Assign Call If Call Assign Call Call Call"
  },
  {
    "library": "scipy",
    "name": "download_wheels",
    "source_code": "def download_wheels(version, wheelhouse):\n    http = http_manager()\n    wheel_names = get_wheel_names(version)\n    for i, wheel_name in enumerate(wheel_names):\n        wheel_url = f'{STAGING_URL}/{version}/download/{wheel_name}'\n        wheel_path = os.path.join(wheelhouse, wheel_name)\n        with open(wheel_path, 'wb') as f:\n            with http.request('GET', wheel_url, preload_content=False) as r:\n                print(f'{i + 1:<4}{wheel_name}')\n                shutil.copyfileobj(r, f)\n    print(f'\\nTotal files downloaded: {len(wheel_names)}')",
    "docstring": "Download release wheels. The release wheels for the given SciPy version are downloaded into the given directory. Parameters ---------- version : str The release version. For instance, \"1.5.0\". wheelhouse : str Directory in which to download the wheels.",
    "type": "function",
    "file_path": "scipy\\tools\\download-wheels.py",
    "ast_data": "FunctionDef name:download_wheels arg:version arg:wheelhouse arguments arg arg Assign Call Assign Call For Call Assign Assign Call With Call With Call Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "num_jac",
    "source_code": "def num_jac(fun, t, y, f, threshold, factor, sparsity=None):\n    y = np.asarray(y)\n    n = y.shape[0]\n    if n == 0:\n        return (np.empty((0, 0)), factor)\n    if factor is None:\n        factor = np.full(n, EPS ** 0.5)\n    else:\n        factor = factor.copy()\n    f_sign = 2 * (np.real(f) >= 0).astype(float) - 1\n    y_scale = f_sign * np.maximum(threshold, np.abs(y))\n    h = y + factor * y_scale - y\n    for i in np.nonzero(h == 0)[0]:\n        while h[i] == 0:\n            factor[i] *= 10\n            h[i] = y[i] + factor[i] * y_scale[i] - y[i]\n    if sparsity is None:\n        return _dense_num_jac(fun, t, y, f, h, factor, y_scale)\n    else:\n        structure, groups = sparsity\n        return _sparse_num_jac(fun, t, y, f, h, factor, y_scale, structure, groups)",
    "docstring": "Finite differences Jacobian approximation tailored for ODE solvers. This function computes finite difference approximation to the Jacobian matrix of with respect to using forward differences. The Jacobian matrix has shape (n, n) and its element (i, j) is equal to `ythresholdstructurefactor` for the next evaluation.",
    "type": "function",
    "file_path": "scipy\\scipy\\integrate\\_ivp\\common.py",
    "ast_data": "FunctionDef name:num_jac arg:fun arg:t arg:y arg:f arg:threshold arg:factor arg:sparsity arguments arg arg arg arg arg arg arg Assign Call Assign If Compare Return return:yes Call If Compare Assign Call Assign Call Assign Call Compare Call Assign Call Call Assign For Call Compare While Compare Assign If Compare Return return:yes Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "create",
    "source_code": "@property\ndef create(self):\n    if not self._in_graph_mode:\n        raise RuntimeError('This operation is not supported when eager execution is enabled.')\n    return self._initializer_op",
    "docstring": "The op responsible for initializing this variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:create arg:self arguments arg If Raise Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "from_array",
    "source_code": "@classmethod\ndef from_array(cls, array: ArrayLike, index: Index, refs: BlockValuesRefs | None=None) -> SingleBlockManager:\n    array = maybe_coerce_values(array)\n    bp = BlockPlacement(slice(0, len(index)))\n    block = new_block(array, placement=bp, ndim=1, refs=refs)\n    return cls(block, index)",
    "docstring": "Constructor for if we have an array that is not yet a Block.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:from_array arg:cls arg:array arg:index arg:refs arguments arg arg arg arg Assign Call Assign Call Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "decompress",
    "source_code": "def decompress(self, s):\n    return zlib.decompress(s, -zlib.MAX_WBITS)",
    "docstring": "Decompress DEFLATE bytes data.",
    "type": "method",
    "file_path": "authlib\\authlib\\jose\\rfc7518\\jwe_zips.py",
    "ast_data": "FunctionDef name:decompress arg:self arg:s arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_DenseToCSRSparseMatrixGrad",
    "source_code": "@ops.RegisterGradient('DenseToCSRSparseMatrix')\ndef _DenseToCSRSparseMatrixGrad(op: ops.Operation, grad):\n    grad_values = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(grad, type=op.get_attr('T'))\n    return (grad_values, None)",
    "docstring": "Gradient for dense_to_csr_sparse_matrix op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\sparse\\sparse_csr_matrix_grad.py",
    "ast_data": "FunctionDef name:_DenseToCSRSparseMatrixGrad arg:op arg:grad arguments arg arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "process_parallel",
    "source_code": "def process_parallel(callbacks: Iterable[Callable[Concatenate[_T, _P], _T2]], input: _T, *a: _P.args, **kw: _P.kwargs) -> Deferred[list[_T2]]:\n    dfds = [succeed(input).addCallback(x, *a, **kw) for x in callbacks]\n    d: Deferred[list[tuple[bool, _T2]]] = DeferredList(dfds, fireOnOneErrback=True, consumeErrors=True)\n    d2: Deferred[list[_T2]] = d.addCallback(lambda r: [x[1] for x in r])\n\n    def eb(failure: Failure) -> Failure:\n        return failure.value.subFailure\n    d2.addErrback(eb)\n    return d2",
    "docstring": "Return a Deferred with the output of all successful calls to the given callbacks",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\defer.py",
    "ast_data": "FunctionDef name:process_parallel arg:callbacks arg:input arguments arg arg arg arg Assign Call Call Call Call arguments arg FunctionDef name:eb arg:failure arguments arg Return return:yes Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "fetch_access_token",
    "source_code": "def fetch_access_token(self, request_token=None, **kwargs):\n    with self._get_oauth_client() as client:\n        if request_token is None:\n            raise MissingRequestTokenError()\n        token = {}\n        token.update(request_token)\n        token.update(kwargs)\n        client.token = token\n        params = self.access_token_params or {}\n        token = client.fetch_access_token(self.access_token_url, **params)\n    return token",
    "docstring": "Fetch access token in one step. :param request_token: A previous request token for OAuth 1. :param kwargs: Extra parameters to fetch access token. :return: A token dict.",
    "type": "method",
    "file_path": "authlib\\authlib\\integrations\\base_client\\sync_app.py",
    "ast_data": "FunctionDef name:fetch_access_token arg:self arg:request_token arguments arg arg arg With Call If Compare Raise Call Assign Call Call Assign Assign BoolOp Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "log_abs_det_jacobian",
    "source_code": "def log_abs_det_jacobian(self, x, y):\n    raise NotImplementedError",
    "docstring": "Computes the log det jacobian given input and output.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributions\\transforms.py",
    "ast_data": "FunctionDef name:log_abs_det_jacobian arg:self arg:x arg:y arguments arg arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "_VarHandle",
    "source_code": "class _VarHandle(_Node):\n\n    def convert_variable_to_constant(self, incoming_edge, tensor_data):\n        tensor_proto = tensor_util.make_tensor_proto(tensor_data.numpy, tensor_data.dtype, tensor_data.numpy.shape)\n        node = self.converted_self().node\n        node.Clear()\n        node.name = self._node.name\n        node.op = 'Const'\n        node.attr['dtype'].CopyFrom(tensor_data.dtype_attr)\n        node.attr['value'].tensor.CopyFrom(tensor_proto)\n        for edge in self.outgoing_edges:\n            edge.destination.convertible.convert_variable_to_constant(edge, tensor_data)",
    "docstring": "Specialization of _Node to VarHandleOp.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "ClassDef name:_VarHandle FunctionDef name:convert_variable_to_constant arg:self arg:incoming_edge arg:tensor_data arguments arg arg arg Assign Call Assign Call Call Assign Assign Call Call For Call"
  },
  {
    "library": "pandas",
    "name": "axes",
    "source_code": "@property\ndef axes(self) -> list[Index]:\n    return [self._get_axis(a) for a in self._AXIS_ORDERS]",
    "docstring": "Return index label(s) of the internal NDFrame",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:axes arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_call_with_structured_signature",
    "source_code": "def _call_with_structured_signature(self, args, kwargs):\n    bound_args = function_type_utils.canonicalize_function_inputs(args, kwargs, self.function_type)\n    filtered_flat_args = self.function_type.unpack_inputs(bound_args)\n    return self._call_flat(filtered_flat_args, captured_inputs=self.captured_inputs)",
    "docstring": "Executes the wrapped function with the structured signature. Args: args: Positional arguments to the concrete function. kwargs: Keyword arguments to the concrete function. Returns: The result of applying the function on the Tensors/Variables contained in and . Raises: TypeError: if and do not match the structured signature of this .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py",
    "ast_data": "FunctionDef name:_call_with_structured_signature arg:self arg:args arg:kwargs arguments arg arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "variables",
    "source_code": "@property\ndef variables(self):\n    return self.global_variables + self.local_variables",
    "docstring": "Returns the list of global and local variables created by the Template.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\template.py",
    "ast_data": "FunctionDef name:variables arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "shape",
    "source_code": "@property\ndef shape(self):\n    raise NotImplementedError",
    "docstring": "The of this variable. Returns: A .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:shape arg:self arguments arg Raise"
  },
  {
    "library": "scipy",
    "name": "_cplxpair",
    "source_code": "def _cplxpair(z, tol=None):\n    z = atleast_1d(z)\n    if z.size == 0 or np.isrealobj(z):\n        return np.sort(z)\n    if z.ndim != 1:\n        raise ValueError('z must be 1-D')\n    zc, zr = _cplxreal(z, tol)\n    zc = np.dstack((zc.conj(), zc)).flatten()\n    z = np.append(zc, zr)\n    return z",
    "docstring": "Sort into pairs of complex conjugates. Complex conjugates in are sorted by increasing real part. In each pair, the number with negative imaginary part appears first. If pairs have identical real parts, they are sorted by increasing imaginary magnitude. Two complex numbers are considered a conjugate pair if their real and imaginary parts differ in magnitude by less than `tolzz` for which a conjugate cannot be found. See Also -------- _cplxreal Examples -------- >>> from scipy.signal._filter_design import _cplxpair >>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j] >>> z = _cplxpair(a) >>> print(z) [ 1.-1.j 1.+1.j 2.-1.j 2.+1.j 2.-1.j 2.+1.j 2.-2.j 2.+2.j 1.+0.j 3.+0.j 4.+0.j]",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_filter_design.py",
    "ast_data": "FunctionDef name:_cplxpair arg:z arg:tol arguments arg arg Assign Call If BoolOp Compare Call Return return:yes Call If Compare Raise Call Assign Call Assign Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "call_function",
    "source_code": "@compatibility(is_backward_compatible=True)\ndef call_function(self, the_function: Callable[..., Any], args: Optional[tuple['Argument', ...]]=None, kwargs: Optional[dict[str, 'Argument']]=None, type_expr: Optional[Any]=None, name: Optional[str]=None) -> Node:\n    return self.create_node('call_function', the_function, args, kwargs, name=name, type_expr=type_expr)",
    "docstring": "Insert a `Graph.create_node`.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\graph.py",
    "ast_data": "FunctionDef name:call_function arg:self arg:the_function arg:args arg:kwargs arg:type_expr arg:name arguments arg arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "@torch.inference_mode()\ndef forward(self, images: Union[Tensor, List[Tensor]]) -> Union[Tensor, List[Tensor]]:\n    output = self.pre_processor(images)\n    if isinstance(output, (list, tuple)):\n        images = output[0]\n    else:\n        images = output\n    if isinstance(images, list):\n        out_images = [self.model(image[None])[0] for image in images]\n    else:\n        out_images = self.model(images)\n    return self.post_processor(out_images)",
    "docstring": "Forward pass of the super resolution model. Args: images: If list of RGB images. Each image is a Tensor with shape :math:. If Tensor, a Tensor with shape :math:. Returns: output tensor.",
    "type": "method",
    "file_path": "kornia\\kornia\\models\\super_resolution\\base.py",
    "ast_data": "FunctionDef name:forward arg:self arg:images arguments arg arg Assign Call If Call Assign Assign If Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "_load_image_to_tensor",
    "source_code": "def _load_image_to_tensor(path_file: Path, device: Device) -> Tensor:\n    if path_file.suffix.lower() in ['.jpg', '.jpeg']:\n        img = kornia_rs.read_image_jpegturbo(str(path_file))\n    else:\n        img = kornia_rs.read_image_any(str(path_file))\n    img_t = image_to_tensor(img, keepdim=True)\n    dev = device if isinstance(device, torch.device) or device is None else torch.device(device)\n    return img_t.to(device=dev)",
    "docstring": "Read an image file and decode using the Kornia Rust backend. The decoded image is returned as numpy array with shape HxWxC. Args: path_file: Path to a valid image file. device: the device where you want to get your image placed. Return: Image tensor with shape :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\io\\io.py",
    "ast_data": "FunctionDef name:_load_image_to_tensor arg:path_file arg:device arguments arg arg If Compare Call Assign Call Call Assign Call Call Assign Call Assign BoolOp Call Compare Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "is_valid_signature",
    "source_code": "@tf_export(v1=['saved_model.is_valid_signature', 'saved_model.signature_def_utils.is_valid_signature'])\n@deprecation.deprecated_endpoints('saved_model.signature_def_utils.is_valid_signature')\ndef is_valid_signature(signature_def):\n    if signature_def is None:\n        return False\n    return _is_valid_classification_signature(signature_def) or _is_valid_regression_signature(signature_def) or _is_valid_predict_signature(signature_def)",
    "docstring": "Determine whether a SignatureDef can be served by TensorFlow Serving.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\signature_def_utils_impl.py",
    "ast_data": "FunctionDef name:is_valid_signature arg:signature_def arguments arg If Compare Return return:yes Return return:yes BoolOp Call Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_smooth_mgc_map",
    "source_code": "def _smooth_mgc_map(sig_connect, stat_mgc_map):\n    m, n = stat_mgc_map.shape\n    stat = stat_mgc_map[m - 1][n - 1]\n    opt_scale = [m, n]\n    if np.linalg.norm(sig_connect) != 0:\n        if np.sum(sig_connect) >= np.ceil(0.02 * max(m, n)) * min(m, n):\n            max_corr = max(stat_mgc_map[sig_connect])\n            max_corr_index = np.where((stat_mgc_map >= max_corr) & sig_connect)\n            if max_corr >= stat:\n                stat = max_corr\n                k, l = max_corr_index\n                one_d_indices = k * n + l\n                k = np.max(one_d_indices) // n\n                l = np.max(one_d_indices) % n\n                opt_scale = [k + 1, l + 1]\n    return (stat, opt_scale)",
    "docstring": "Finds the smoothed maximal within the significant region R. If area of R is too small it returns the last local correlation. Otherwise, returns the maximum within significant_connected_region. Parameters ---------- sig_connect : ndarray A binary matrix with 1's indicating the significant region. stat_mgc_map : ndarray All local correlations within `` pair.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mgc.py",
    "ast_data": "FunctionDef name:_smooth_mgc_map arg:sig_connect arg:stat_mgc_map arguments arg arg Assign Assign Assign If Compare Call If Compare Call Call Call Call Assign Call Assign Call Compare If Compare Assign Assign Assign Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_param_to_fqn",
    "source_code": "def _get_param_to_fqn(model: torch.nn.Module) -> dict[torch.nn.Parameter, str]:\n    param_to_param_names = _get_param_to_fqns(model)\n    for param_names in param_to_param_names.values():\n        assert len(param_names) > 0, '`_get_param_to_fqns()` should not construct empty lists'\n        if len(param_names) > 1:\n            raise RuntimeError(f'Each parameter should only map to one parameter name but got {len(param_names)}: {param_names}')\n    param_to_param_name = {param: param_names[0] for param, param_names in param_to_param_names.items()}\n    return param_to_param_name",
    "docstring": "Construct a mapping from parameters to their parameter names. The `FullyShardedDataParallel_get_param_to_fqnslistFullyShardedDataParallel` instances.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\fully_sharded_data_parallel.py",
    "ast_data": "FunctionDef name:_get_param_to_fqn arg:model arguments arg Assign Call For Call Compare Call If Compare Call Raise Call Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_pass_image_data",
    "source_code": "@staticmethod\ndef _pass_image_data(x, alpha=None, bytes=False, norm=True):\n    if x.shape[2] == 3:\n        if alpha is None:\n            alpha = 1\n        if x.dtype == np.uint8:\n            alpha = np.uint8(alpha * 255)\n        m, n = x.shape[:2]\n        xx = np.empty(shape=(m, n, 4), dtype=x.dtype)\n        xx[:, :, :3] = x\n        xx[:, :, 3] = alpha\n    elif x.shape[2] == 4:\n        xx = x\n    else:\n        raise ValueError('Third dimension must be 3 or 4')\n    if xx.dtype.kind == 'f':\n        if np.any((nans := np.isnan(x))):\n            if x.shape[2] == 4:\n                xx = xx.copy()\n            xx[np.any(nans, axis=2), :] = 0\n        if norm and (xx.max() > 1 or xx.min() < 0):\n            raise ValueError('Floating point image RGB values must be in the 0..1 range.')\n        if bytes:\n            xx = (xx * 255).astype(np.uint8)\n    elif xx.dtype == np.uint8:\n        if not bytes:\n            xx = xx.astype(np.float32) / 255\n    else:\n        raise ValueError('Image RGB array must be uint8 or floating point; found %s' % xx.dtype)\n    if np.ma.is_masked(x):\n        xx[np.any(np.ma.getmaskarray(x), axis=2), 3] = 0\n    return xx",
    "docstring": "Helper function to pass ndarray of shape (...,3) or (..., 4) through , see for docstring.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colorizer.py",
    "ast_data": "FunctionDef name:_pass_image_data arg:x arg:alpha arg:bytes arg:norm arguments arg arg arg arg If Compare If Compare Assign If Compare Assign Call Assign Assign Call Assign Assign If Compare Assign Raise Call If Compare If Call Call If Compare Assign Call Assign Call If BoolOp BoolOp Compare Call Compare Call Raise Call If Assign Call If Compare If Assign Call Raise Call If Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_generate_graph_inputs",
    "source_code": "def _generate_graph_inputs(self) -> None:\n    for ir_node in V.graph.graph_inputs.values():\n        buffer = self._get_buffer(ir_node)\n        node = self.gm.graph.placeholder(buffer.get_name())\n        self._create_meta_from_buffer(node, buffer)\n        self._record_allocation(buffer, node)",
    "docstring": "Converts graph inputs to FX placeholders.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\wrapper_fxir.py",
    "ast_data": "FunctionDef name:_generate_graph_inputs arg:self arguments arg For Call Assign Call Assign Call Call Call Call"
  },
  {
    "library": "cherrypy",
    "name": "__iter__",
    "source_code": "def __iter__(self):\n    return self",
    "docstring": "Make a UTF-8-encoded stream iterator.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\encoding.py",
    "ast_data": "FunctionDef name:__iter__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_name_scope",
    "source_code": "def get_name_scope(self) -> str:\n    return self._name_stack",
    "docstring": "Returns the current name scope. For example: would print the string . Returns: A string representing the current name scope.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:get_name_scope arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_colocate_with_for_gradient",
    "source_code": "def _colocate_with_for_gradient(op, gradient_uid, ignore_existing=False) -> ContextManager[None]:\n    if context.executing_eagerly():\n        if op is not None:\n            if not hasattr(op, 'device'):\n                op = convert_to_tensor(op)\n            return device(op.device)\n        else:\n            return NullContextmanager()\n    else:\n        default_graph = get_default_graph()\n        if isinstance(op, EagerTensor):\n            if default_graph.building_function:\n                return default_graph.device(op.device)\n            else:\n                raise ValueError('Encountered an Eager-defined Tensor during graph construction, but a function was not being built.')\n        return default_graph._colocate_with_for_gradient(op, gradient_uid=gradient_uid, ignore_existing=ignore_existing)",
    "docstring": "Returns a context manager for colocating op gradients with an op. Internal API. In eager mode, returns a context manager that sets the default device for new ops to the same device as the given op. Does the same if a function is currently being built (i.e. the current mode is graph, but the overall mode is eager). In all other cases, returns a context manager, optionally accounting for gradients (if a gradient UID is specified). Args: op: Operation or Tensor with which to colocate. gradient_uid: Optional gradient UID to enable colocation of gradients during compilation. ignore_existing: See . Returns: A context manager used to colocate ops and gradients with the specified operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_colocate_with_for_gradient arg:op arg:gradient_uid arg:ignore_existing arguments arg arg arg If Call If Compare If Call Assign Call Return return:yes Call Return return:yes Call Assign Call If Call If Return return:yes Call Raise Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "receiver",
    "source_code": "def receiver(signal, **kwargs):\n\n    def _decorator(func):\n        if isinstance(signal, (list, tuple)):\n            for s in signal:\n                s.connect(func, **kwargs)\n        else:\n            signal.connect(func, **kwargs)\n        return func\n    return _decorator",
    "docstring": "A decorator for connecting receivers to signals. Used by passing in the signal (or list of signals) and keyword arguments to connect:: @receiver(post_save, sender=MyModel) def signal_receiver(sender, **kwargs): ... @receiver([post_save, post_delete], sender=MyModel) def signals_receiver(sender, **kwargs): ...",
    "type": "function",
    "file_path": "django\\django\\dispatch\\dispatcher.py",
    "ast_data": "FunctionDef name:receiver arg:signal arguments arg arg FunctionDef name:_decorator arg:func arguments arg If Call For Call Call Return return:yes Return return:yes"
  },
  {
    "library": "kornia",
    "name": "camtoworld_vision_to_graphics_4x4",
    "source_code": "def camtoworld_vision_to_graphics_4x4(extrinsics_vision: Tensor) -> Tensor:\n    KORNIA_CHECK_SHAPE(extrinsics_vision, ['B', '4', '4'])\n    invert_yz = tensor([[[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1.0]]], dtype=extrinsics_vision.dtype, device=extrinsics_vision.device)\n    return extrinsics_vision @ invert_yz",
    "docstring": "Convert vision coordinate frame (e.g. OpenCV) to graphics coordinate frame (e.g. OpenGK.). I.e. flips y and z axis Graphics convention: [+x, +y, +z] == [right, up, backwards]. Vision convention: [+x, +y, +z] == [right, down, forwards]. Args: extrinsics_vision: pose matrix :math:. Returns: extrinsics: pose matrix :math:. Example: >>> ext = torch.eye(4)[None] >>> camtoworld_vision_to_graphics_4x4(ext) tensor([[[ 1., 0., 0., 0.], [ 0., -1., 0., 0.], [ 0., 0., -1., 0.], [ 0., 0., 0., 1.]]])",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\conversions.py",
    "ast_data": "FunctionDef name:camtoworld_vision_to_graphics_4x4 arg:extrinsics_vision arguments arg Call Assign Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "store_response",
    "source_code": "def store_response(self, spider: Spider, request: Request, response: Response) -> None:\n    rpath = Path(self._get_request_path(spider, request))\n    if not rpath.exists():\n        rpath.mkdir(parents=True)\n    metadata = {'url': request.url, 'method': request.method, 'status': response.status, 'response_url': response.url, 'timestamp': time()}\n    with self._open(rpath / 'meta', 'wb') as f:\n        f.write(to_bytes(repr(metadata)))\n    with self._open(rpath / 'pickled_meta', 'wb') as f:\n        pickle.dump(metadata, f, protocol=4)\n    with self._open(rpath / 'response_headers', 'wb') as f:\n        f.write(headers_dict_to_raw(response.headers))\n    with self._open(rpath / 'response_body', 'wb') as f:\n        f.write(response.body)\n    with self._open(rpath / 'request_headers', 'wb') as f:\n        f.write(headers_dict_to_raw(request.headers))\n    with self._open(rpath / 'request_body', 'wb') as f:\n        f.write(request.body)",
    "docstring": "Store the given response in the cache.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\extensions\\httpcache.py",
    "ast_data": "FunctionDef name:store_response arg:self arg:spider arg:request arg:response arguments arg arg arg arg Assign Call Call If Call Call Assign Call With Call Call Call Call With Call Call With Call Call Call With Call Call With Call Call Call With Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_require_strategy_scope_strategy",
    "source_code": "def _require_strategy_scope_strategy(strategy):\n    context = _get_per_thread_mode()\n    if context.strategy is strategy:\n        return\n    _wrong_strategy_scope(strategy, context)",
    "docstring": "Verify in a in this thread.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:_require_strategy_scope_strategy arg:strategy arguments arg Assign Call If Compare Return return:no Call"
  },
  {
    "library": "pandas",
    "name": "_simple_json_normalize",
    "source_code": "def _simple_json_normalize(ds: dict | list[dict], sep: str='.') -> dict | list[dict] | Any:\n    normalized_json_object = {}\n    if isinstance(ds, dict):\n        normalized_json_object = _normalize_json_ordered(data=ds, separator=sep)\n    elif isinstance(ds, list):\n        normalized_json_list = [_simple_json_normalize(row, sep=sep) for row in ds]\n        return normalized_json_list\n    return normalized_json_object",
    "docstring": "A optimized basic json_normalize Converts a nested dict into a flat dict (\"record\"), unlike json_normalize and nested_to_record it doesn't do anything clever. But for the most basic use cases it enhances performance. E.g. pd.json_normalize(data) Parameters ---------- ds : dict or list of dicts sep : str, default '.' Nested records will generate names separated by sep, e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar Returns ------- frame : DataFrame d - dict or list of dicts, matching Examples -------- >>> _simple_json_normalize( ... { ... \"flat1\": 1, ... \"dict1\": {\"c\": 1, \"d\": 2}, ... \"nested\": {\"e\": {\"c\": 1, \"d\": 2}, \"d\": 2}, ... } ... ) {'flat1': 1, 'dict1.c': 1, 'dict1.d': 2, 'nested.e.c': 1, 'nested.e.d': 2, 'nested.d': 2}",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\json\\_normalize.py",
    "ast_data": "FunctionDef name:_simple_json_normalize arg:ds arg:sep arguments arg arg Assign If Call Assign Call If Call Assign Call Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "get_models",
    "source_code": "def get_models(self, include_auto_created=False, include_swapped=False):\n    self.apps.check_models_ready()\n    for model in self.models.values():\n        if model._meta.auto_created and (not include_auto_created):\n            continue\n        if model._meta.swapped and (not include_swapped):\n            continue\n        yield model",
    "docstring": "Return an iterable of models. By default, the following models aren't included: - auto-created models for many-to-many relations without an explicit intermediate table, - models that have been swapped out. Set the corresponding keyword argument to True to include such models. Keyword arguments aren't documented; they're a private API.",
    "type": "method",
    "file_path": "django\\django\\apps\\config.py",
    "ast_data": "FunctionDef name:get_models arg:self arg:include_auto_created arg:include_swapped arguments arg arg arg Call For Call If BoolOp If BoolOp"
  },
  {
    "library": "django",
    "name": "uninstall_if_needed",
    "source_code": "def uninstall_if_needed(setting, value, enter, **kwargs):\n    if not enter and setting == 'INSTALLED_APPS' and ('django.contrib.postgres' not in set(value)):\n        connection_created.disconnect(register_type_handlers)\n        CharField._unregister_lookup(Unaccent)\n        TextField._unregister_lookup(Unaccent)\n        CharField._unregister_lookup(SearchLookup)\n        TextField._unregister_lookup(SearchLookup)\n        CharField._unregister_lookup(TrigramSimilar)\n        TextField._unregister_lookup(TrigramSimilar)\n        CharField._unregister_lookup(TrigramWordSimilar)\n        TextField._unregister_lookup(TrigramWordSimilar)\n        CharField._unregister_lookup(TrigramStrictWordSimilar)\n        TextField._unregister_lookup(TrigramStrictWordSimilar)\n        setting_changed.disconnect(uninstall_if_needed)\n        MigrationWriter.unregister_serializer(RANGE_TYPES)",
    "docstring": "Undo the effects of PostgresConfig.ready() when django.contrib.postgres is \"uninstalled\" by override_settings().",
    "type": "function",
    "file_path": "django\\django\\contrib\\postgres\\apps.py",
    "ast_data": "FunctionDef name:uninstall_if_needed arg:setting arg:value arg:enter arguments arg arg arg arg If BoolOp Compare Compare Call Call Call Call Call Call Call Call Call Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "indirect_indexing",
    "source_code": "def indirect_indexing(self, index_var: str, size, check, wrap_neg=True):\n    return sympy_index_symbol(str(index_var))",
    "docstring": "Convert index variable to symbolic form.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\select_algorithm.py",
    "ast_data": "FunctionDef name:indirect_indexing arg:self arg:index_var arg:size arg:check arg:wrap_neg arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "validate_input_types",
    "source_code": "def validate_input_types(inp, orig_inp, allow_dict=True, field_name='inputs'):\n    if isinstance(inp, (list, tuple)):\n        if not all((isinstance(v, np.ndarray) or tensor_util.is_tf_type(v) for v in inp)):\n            raise ValueError('Please provide as model inputs either a single array or a list of arrays. You passed: {}={}'.format(field_name, str(orig_inp)))\n    elif isinstance(inp, dict):\n        if not allow_dict:\n            raise ValueError('You cannot pass a dictionary as model {}.'.format(field_name))\n    elif not isinstance(inp, np.ndarray) and (not tensor_util.is_tf_type(inp)):\n        raise ValueError('Please provide as model inputs either a single array or a list of arrays. You passed: {}={}'.format(field_name, orig_inp))",
    "docstring": "Helper function to validate either inputs or targets.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py",
    "ast_data": "FunctionDef name:validate_input_types arg:inp arg:orig_inp arg:allow_dict arg:field_name arguments arg arg arg arg If Call If Call BoolOp Call Call Raise Call Call Call If Call If Raise Call Call If BoolOp Call Call Raise Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=False)\ndef fit(self, X, y=None, sample_weight=None):\n    algorithm = self._choose_algorithm(self.algorithm, self.metric)\n    if isinstance(self.bandwidth, str):\n        if self.bandwidth == 'scott':\n            self.bandwidth_ = X.shape[0] ** (-1 / (X.shape[1] + 4))\n        elif self.bandwidth == 'silverman':\n            self.bandwidth_ = (X.shape[0] * (X.shape[1] + 2) / 4) ** (-1 / (X.shape[1] + 4))\n    else:\n        self.bandwidth_ = self.bandwidth\n    X = validate_data(self, X, order='C', dtype=np.float64)\n    if sample_weight is not None:\n        sample_weight = _check_sample_weight(sample_weight, X, dtype=np.float64, ensure_non_negative=True)\n    kwargs = self.metric_params\n    if kwargs is None:\n        kwargs = {}\n    self.tree_ = TREE_DICT[algorithm](X, metric=self.metric, leaf_size=self.leaf_size, sample_weight=sample_weight, **kwargs)\n    return self",
    "docstring": "Fit the Kernel Density model on the data. Parameters ---------- X : array-like of shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. y : None Ignored. This parameter exists only for compatibility with :class:. sample_weight : array-like of shape (n_samples,), default=None List of sample weights attached to the data X. .. versionadded:: 0.20 Returns ------- self : object Returns the instance itself.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neighbors\\_kde.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg Assign Call If Call If Compare Assign If Compare Assign Assign Assign Call If Compare Assign Call Assign If Compare Assign Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_add_default_serving_output",
    "source_code": "def _maybe_add_default_serving_output(export_outputs):\n    if len(export_outputs) == 1:\n        (key, value), = export_outputs.items()\n        if key != signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:\n            export_outputs[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = value\n    if len(export_outputs) > 1:\n        if signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY not in export_outputs:\n            raise ValueError('Multiple `export_outputs` were provided, but none of them are specified as the default. Use`tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY` to specify a default.')\n    return export_outputs",
    "docstring": "Add a default serving output to the export_outputs if not present. Args: export_outputs: Describes the output signatures to be exported to and used during serving. Should be a dict. Returns: export_outputs dict with default serving signature added if necessary Raises: ValueError: if multiple export_outputs were provided without a default serving key.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\model_utils\\export_utils.py",
    "ast_data": "FunctionDef name:_maybe_add_default_serving_output arg:export_outputs arguments arg If Compare Call Assign Call If Compare Assign If Compare Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_get_step",
    "source_code": "def _get_step(x, d_x, z, d_z, tau, d_tau, kappa, d_kappa, alpha0):\n    i_x = d_x < 0\n    i_z = d_z < 0\n    alpha_x = alpha0 * np.min(x[i_x] / -d_x[i_x]) if np.any(i_x) else 1\n    alpha_tau = alpha0 * tau / -d_tau if d_tau < 0 else 1\n    alpha_z = alpha0 * np.min(z[i_z] / -d_z[i_z]) if np.any(i_z) else 1\n    alpha_kappa = alpha0 * kappa / -d_kappa if d_kappa < 0 else 1\n    alpha = np.min([1, alpha_x, alpha_tau, alpha_z, alpha_kappa])\n    return alpha",
    "docstring": "An implementation of [4] equation 8.21 References ---------- .. [4] Andersen, Erling D., and Knud D. Andersen. \"The MOSEK interior point optimizer for linear programming: an implementation of the homogeneous algorithm.\" High performance optimization. Springer US, 2000. 197-232.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_linprog_ip.py",
    "ast_data": "FunctionDef name:_get_step arg:x arg:d_x arg:z arg:d_z arg:tau arg:d_tau arg:kappa arg:d_kappa arg:alpha0 arguments arg arg arg arg arg arg arg arg arg Assign Compare Assign Compare Assign Call Call Assign Compare Assign Call Call Assign Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "manage_all_configs",
    "source_code": "def manage_all_configs(save_results, filename):\n    all_configs = get_all_configs()\n    print_all_configs(all_configs[0], all_configs[1], all_configs[2])\n    if save_results:\n        save_to_file(all_configs[3], filename)",
    "docstring": "Manages configuration detection and retrieval based on user input. Args: save_results: Boolean indicating whether to save the results to a file. filename: String that is the name of the output JSON file.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\tensorflow_builder\\config_detector\\config_detector.py",
    "ast_data": "FunctionDef name:manage_all_configs arg:save_results arg:filename arguments arg arg Assign Call Call If Call"
  },
  {
    "library": "cherrypy",
    "name": "__copy__",
    "source_code": "def __copy__(self):\n    newobj = self.__class__()\n    newobj.update(self)\n    return newobj",
    "docstring": "Make a copy of this instance.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\reprconf.py",
    "ast_data": "FunctionDef name:__copy__ arg:self arguments arg Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_reduce_unsupported",
    "source_code": "def _reduce_unsupported(self, s: Any) -> NoReturn:\n    raise BypassFxGraphCache('Reduce unsupported')",
    "docstring": "Custom reducer to handle any objects that we don't support and therefore raise to bypass caching.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codecache.py",
    "ast_data": "FunctionDef name:_reduce_unsupported arg:self arg:s arguments arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_Await",
    "source_code": "class _Await(torch._C._Await, Generic[W], metaclass=_PyAwaitMeta):\n    pass",
    "docstring": "Wrapper around a `Await[W]` call will be transparently added.",
    "type": "class",
    "file_path": "pytorch\\torch\\_awaits\\__init__.py",
    "ast_data": "ClassDef name:_Await"
  },
  {
    "library": "tensorflow",
    "name": "handle",
    "source_code": "@property\ndef handle(self):\n    return self._handle",
    "docstring": "The handle by which this variable can be accessed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:handle arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "bucketize",
    "source_code": "def bucketize(self, values: T, boundaries: tuple[str, sympy.Expr, sympy.Expr, sympy.Expr], boundary_indices: T, indexing_dtype: torch.dtype, right: bool, sorter: Optional[tuple[str, sympy.Expr]]=None, sorter_indices: Optional[T]=None) -> T:\n    boundaries = (boundaries[0], self._add_index(boundaries[1], MemoryUsageType.BUCKETIZE, buffer_name=boundaries[0]), self._add_index(boundaries[2], MemoryUsageType.BUCKETIZE, buffer_name=boundaries[0]), self._add_index(boundaries[3], MemoryUsageType.BUCKETIZE, buffer_name=boundaries[0]))\n    if sorter is not None:\n        sorter = (sorter[0], self._add_index(sorter[1], MemoryUsageType.BUCKETIZE, buffer_name=sorter[0]))\n    return self._inner.bucketize(values, boundaries, boundary_indices, indexing_dtype, right, sorter, sorter_indices)",
    "docstring": "See [Note: Inductor bucketize op]",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\loop_body.py",
    "ast_data": "FunctionDef name:bucketize arg:self arg:values arg:boundaries arg:boundary_indices arg:indexing_dtype arg:right arg:sorter arg:sorter_indices arguments arg arg arg arg arg arg arg arg Assign Call Call Call If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, named_params: Mapping[str, Union[Tensor, ShardedTensor]], optimizer_class, *optimizer_args, **optimizer_kwargs):\n    tensors: list[Tensor] = []\n    for value in named_params.values():\n        if isinstance(value, ShardedTensor):\n            tensors.extend((local_shard.tensor for local_shard in value.local_shards()))\n        else:\n            tensors.append(value)\n    self.named_params = named_params\n    self._optim = optimizer_class(tensors, *optimizer_args, **optimizer_kwargs)\n    self.param_groups = self._optim.param_groups\n    self.state = self._optim.state",
    "docstring": "ShardedOptimizer collects all tensors and local shard tensors of ShardedTensor, then use these tensors as `` for optimizers Args: named_params (Dict[str, Union[Tensor, ShardedTensor]]) : a Dict of parameters, where key is the parameter key, value is either Tensor or ShardedTensor parameter. optimizer_class (torch.optim.Optimizer): the Optimizer to use locally, i.e. torch.optim.SGD, torch.optim.Adagrad, etc. *optimizer_args: the arguments to initialize the optimizer. **optimizer_kwargs: the key-word arguments to initialize the optimizer.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_optim\\api.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:named_params arg:optimizer_class arguments arg arg arg arg arg For Call If Call Call Call Call Assign Assign Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "before_run",
    "source_code": "def before_run(self, run_context):\n    if not self._grpc_debug_wrapper_session:\n        self._grpc_debug_wrapper_session = grpc_wrapper.GrpcDebugWrapperSession(run_context.session, self._grpc_debug_server_addresses, watch_fn=self._watch_fn, thread_name_filter=self._thread_name_filter)\n    fetches = run_context.original_args.fetches\n    feed_dict = run_context.original_args.feed_dict\n    watch_options = self._watch_fn(fetches, feed_dict)\n    run_options = config_pb2.RunOptions()\n    debug_utils.watch_graph(run_options, run_context.session.graph, debug_urls=self._grpc_debug_wrapper_session.prepare_run_debug_urls(fetches, feed_dict), debug_ops=watch_options.debug_ops, node_name_regex_allowlist=watch_options.node_name_regex_allowlist, op_type_regex_allowlist=watch_options.op_type_regex_allowlist, tensor_dtype_regex_allowlist=watch_options.tensor_dtype_regex_allowlist, tolerate_debug_op_creation_failures=watch_options.tolerate_debug_op_creation_failures)\n    return session_run_hook.SessionRunArgs(None, feed_dict=None, options=run_options)",
    "docstring": "Called right before a session is run. Args: run_context: A session_run_hook.SessionRunContext. Encapsulates information on the run. Returns: A session_run_hook.SessionRunArgs object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\hooks.py",
    "ast_data": "FunctionDef name:before_run arg:self arg:run_context arguments arg arg If Assign Call Assign Assign Assign Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "eigvals",
    "source_code": "@tf_export('linalg.eigvals', 'eigvals', v1=[])\n@dispatch.add_dispatch_support\ndef eigvals(tensor, name=None):\n    if tensor.dtype == dtypes.float32 or tensor.dtype == dtypes.complex64:\n        out_dtype = dtypes.complex64\n    elif tensor.dtype == dtypes.float64 or tensor.dtype == dtypes.complex128:\n        out_dtype = dtypes.complex128\n    e, _ = gen_linalg_ops.eig(tensor, Tout=out_dtype, compute_v=False, name=name)\n    return e",
    "docstring": "Computes the eigenvalues of one or more matrices. Note: If your program backpropagates through this function, you should replace it with a call to tf.linalg.eig (possibly ignoring the second output) to avoid computing the eigen decomposition twice. This is because the eigenvectors are used to compute the gradient w.r.t. the eigenvalues. See _SelfAdjointEigV2Grad in linalg_grad.py. Args: tensor: of shape . name: string, optional name of the operation. Returns: e: Eigenvalues. Shape is . The vector contains the eigenvalues of .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg_ops.py",
    "ast_data": "FunctionDef name:eigvals arg:tensor arg:name arguments arg arg If BoolOp Compare Compare Assign If BoolOp Compare Compare Assign Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "manual_seed",
    "source_code": "def manual_seed(seed: int, device_mesh: DeviceMesh) -> None:\n    if not is_rng_supported_mesh(device_mesh):\n        warnings.warn(f'DTensor manual_seed() may not have complete support on {device_mesh.device_type} device mesh')\n        return\n    global _rng_tracker\n    if not _rng_tracker:\n        _rng_tracker = OffsetBasedRNGTracker(device_mesh, run_state_sync=False)\n    if device_mesh.get_coordinate() is not None:\n        _rng_tracker._manual_seed(seed)\n    else:\n        raise RuntimeError('manual_seed requires the current rank to be a part of the device mesh otherwise DTensor RNG state on the rank will not be initialized and the behavior of DTensor random ops is undefined.')",
    "docstring": "Sets the seed for generating random numbers for the calling rank. Args: seed (int): The desired seed. device_mesh (:class:): The device mesh to set the seed. It is required that the `manual_seed` will throw an error. Current implementation only supports a GPU device mesh.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_random.py",
    "ast_data": "FunctionDef name:manual_seed arg:seed arg:device_mesh arguments arg arg If Call Call Return return:no If Assign Call If Compare Call Call Raise Call"
  },
  {
    "library": "pandas",
    "name": "add_prefix",
    "source_code": "@final\ndef add_prefix(self, prefix: str, axis: Axis | None=None) -> Self:\n    f = lambda x: f'{prefix}{x}'\n    axis_name = self._info_axis_name\n    if axis is not None:\n        axis_name = self._get_axis_name(axis)\n    mapper = {axis_name: f}\n    return self._rename(**mapper)",
    "docstring": "Prefix labels with string . For Series, the row labels are prefixed. For DataFrame, the column labels are prefixed. Parameters ---------- prefix : str The string to add before each label. axis : {0 or 'index', 1 or 'columns', None}, default None Axis to add prefix on .. versionadded:: 2.0.0 Returns ------- Series or DataFrame New Series or DataFrame with updated labels. See Also -------- Series.add_suffix: Suffix row labels with string . DataFrame.add_suffix: Suffix column labels with string . Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.add_prefix(\"item_\") item_0 1 item_1 2 item_2 3 item_3 4 dtype: int64 >>> df = pd.DataFrame({\"A\": [1, 2, 3, 4], \"B\": [3, 4, 5, 6]}) >>> df A B 0 1 3 1 2 4 2 3 5 3 4 6 >>> df.add_prefix(\"col_\") col_A col_B 0 1 3 1 2 4 2 3 5 3 4 6",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:add_prefix arg:self arg:prefix arg:axis arguments arg arg arg Assign arguments arg Assign If Compare Assign Call Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "load_command_class",
    "source_code": "def load_command_class(app_name, name):\n    module = import_module('%s.management.commands.%s' % (app_name, name))\n    return module.Command()",
    "docstring": "Given a command name and an application name, return the Command class instance. Allow all errors raised by the import process (ImportError, AttributeError) to propagate.",
    "type": "function",
    "file_path": "django\\django\\core\\management\\__init__.py",
    "ast_data": "FunctionDef name:load_command_class arg:app_name arg:name arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "step_function",
    "source_code": "def step_function(model, iterator):\n\n    def run_step(data):\n        outputs = model.predict_step(data)\n        with ops.control_dependencies(_minimum_control_deps(outputs)):\n            model._predict_counter.assign_add(1)\n        return outputs\n    data = next(iterator)\n    outputs = model.distribute_strategy.run(run_step, args=(data,))\n    outputs = reduce_per_replica(outputs, self.distribute_strategy, reduction='concat')\n    return outputs",
    "docstring": "Runs a single evaluation step.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py",
    "ast_data": "FunctionDef name:step_function arg:model arg:iterator arguments arg arg FunctionDef name:run_step arg:data arguments arg Assign Call With Call Call Call Return return:yes Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "nanall",
    "source_code": "def nanall(values: np.ndarray, *, axis: AxisInt | None=None, skipna: bool=True, mask: npt.NDArray[np.bool_] | None=None) -> bool:\n    if values.dtype.kind in 'iub' and mask is None:\n        return values.all(axis)\n    if values.dtype.kind == 'M':\n        raise TypeError(\"datetime64 type does not support operation 'all'\")\n    values, _ = _get_values(values, skipna, fill_value=True, mask=mask)\n    if values.dtype == object:\n        values = values.astype(bool)\n    return values.all(axis)",
    "docstring": "Check if all elements along an axis evaluate to True. Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : bool Examples -------- >>> from pandas.core import nanops >>> s = pd.Series([1, 2, np.nan]) >>> nanops.nanall(s.values) np.True_ >>> from pandas.core import nanops >>> s = pd.Series([1, 0]) >>> nanops.nanall(s.values) np.False_",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\nanops.py",
    "ast_data": "FunctionDef name:nanall arg:values arguments arg arg arg arg If BoolOp Compare Compare Return return:yes Call If Compare Raise Call Assign Call If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_calculate_range_stats",
    "source_code": "def _calculate_range_stats(self, x_copy):\n    min_val_cur, max_val_cur = torch.aminmax(x_copy)\n    epoch_min_val = torch.min(self.epoch_activation_min, min_val_cur)\n    epoch_max_val = torch.max(self.epoch_activation_max, max_val_cur)\n    self.epoch_activation_min.copy_(epoch_min_val)\n    self.epoch_activation_max.copy_(epoch_max_val)\n    current_batch_range = max_val_cur - min_val_cur\n    new_range = (self.average_batch_activation_range * self.num_batches_tracked + current_batch_range) / (self.num_batches_tracked + 1)\n    self.average_batch_activation_range = new_range\n    self.num_batches_tracked += 1\n    return x_copy",
    "docstring": "Calculates and stores range stats with forward values. Args x_copy: A copy of the forward data Returns the passed in x_copy",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\model_report_observer.py",
    "ast_data": "FunctionDef name:_calculate_range_stats arg:self arg:x_copy arguments arg arg Assign Call Assign Call Assign Call Call Call Assign Assign Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_wrap_result",
    "source_code": "def _wrap_result(self, result):\n    if isinstance(result, ABCSeries) and self._selection is not None:\n        result.name = self._selection\n    if isinstance(result, ABCSeries) and result.empty:\n        obj = self.obj\n        result.index = _asfreq_compat(obj.index[:0], freq=self.freq)\n        result.name = getattr(obj, 'name', None)\n    if self._timegrouper._arrow_dtype is not None:\n        result.index = result.index.astype(self._timegrouper._arrow_dtype)\n        result.index.name = self.obj.index.name\n    return result",
    "docstring": "Potentially wrap any results.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\resample.py",
    "ast_data": "FunctionDef name:_wrap_result arg:self arg:result arguments arg arg If BoolOp Call Compare Assign If BoolOp Call Assign Assign Call Assign Call If Compare Assign Call Assign Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "_cmap_from_color",
    "source_code": "def _cmap_from_color(self, color):\n    r, g, b, _ = to_rgba(color)\n    h, s, _ = husl.rgb_to_husl(r, g, b)\n    xx = np.linspace(-1, 1, int(1.15 * 256))[:256]\n    ramp = np.zeros((256, 3))\n    ramp[:, 0] = h\n    ramp[:, 1] = s * np.cos(xx)\n    ramp[:, 2] = np.linspace(35, 80, 256)\n    colors = np.clip([husl.husl_to_rgb(*hsl) for hsl in ramp], 0, 1)\n    return mpl.colors.ListedColormap(colors[::-1])",
    "docstring": "Return a sequential colormap given a color seed.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\distributions.py",
    "ast_data": "FunctionDef name:_cmap_from_color arg:self arg:color arguments arg arg Assign Call Assign Call Assign Call Call Assign Call Assign Assign Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "rename_custom_ops",
    "source_code": "def rename_custom_ops(model, map_custom_op_renames):\n    for op_code in model.operatorCodes:\n        if op_code.customCode:\n            op_code_str = op_code.customCode.decode('ascii')\n            if op_code_str in map_custom_op_renames:\n                op_code.customCode = map_custom_op_renames[op_code_str].encode('ascii')",
    "docstring": "Rename custom ops so they use the same naming style as builtin ops. Args: model: The input tflite model. map_custom_op_renames: A mapping from old to new custom op names.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\tools\\flatbuffer_utils.py",
    "ast_data": "FunctionDef name:rename_custom_ops arg:model arg:map_custom_op_renames arguments arg arg For If Assign Call If Compare Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_tag_and_add_meta_graph",
    "source_code": "def _tag_and_add_meta_graph(self, meta_graph_def, tags, signature_def_map):\n    for tag in tags:\n        meta_graph_def.meta_info_def.tags.append(tag)\n    if signature_def_map is not None:\n        for key in signature_def_map:\n            meta_graph_def.signature_def[key].CopyFrom(signature_def_map[key])\n    proto_meta_graph_def = self._saved_model.meta_graphs.add()\n    proto_meta_graph_def.CopyFrom(meta_graph_def)",
    "docstring": "Tags the meta graph def and adds it to the SavedModel. Tags the meta graph def with the supplied tags, adds signature defs to it if provided and appends the meta graph def to the SavedModel proto. Args: meta_graph_def: The meta graph def to add to the SavedModel. tags: The set of tags to annotate the meta graph def with. signature_def_map: The map of signature defs to be added to the meta graph def.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\builder_impl.py",
    "ast_data": "FunctionDef name:_tag_and_add_meta_graph arg:self arg:meta_graph_def arg:tags arg:signature_def_map arguments arg arg arg arg For Call If Compare For Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "put",
    "source_code": "def put(self, item):\n    with self._not_full:\n        if self._closed:\n            raise QueueClosedError()\n        if self._maxsize > 0:\n            while len(self._queue) == self._maxsize:\n                self._not_full.wait()\n                if self._closed:\n                    raise QueueClosedError()\n        self._queue.append(item)\n        self._not_empty.notify()",
    "docstring": "Put an item into the queue. If the queue is closed, fails immediately. If the queue is full, blocks until space is available or until the queue is closed by a call to close(), at which point this call fails. Args: item: an item to add to the queue Raises: QueueClosedError: if insertion failed because the queue is closed",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\event_file_writer.py",
    "ast_data": "FunctionDef name:put arg:self arg:item arguments arg arg With If Raise Call If Compare While Compare Call Call If Raise Call Call Call"
  },
  {
    "library": "numpy",
    "name": "as_ref",
    "source_code": "def as_ref(expr):\n    return Expr(Op.REF, expr)",
    "docstring": "Return object as referencing expression.",
    "type": "function",
    "file_path": "numpy\\numpy\\f2py\\symbolic.py",
    "ast_data": "FunctionDef name:as_ref arg:expr arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "MiddlewareNotUsed",
    "source_code": "class MiddlewareNotUsed(Exception):\n    pass",
    "docstring": "This middleware is not used in this server configuration",
    "type": "class",
    "file_path": "django\\django\\core\\exceptions.py",
    "ast_data": "ClassDef name:MiddlewareNotUsed"
  },
  {
    "library": "kornia",
    "name": "Resize",
    "source_code": "class Resize(Module):\n\n    def __init__(self, size: Union[int, Tuple[int, int]], interpolation: str='bilinear', align_corners: Optional[bool]=None, side: str='short', antialias: bool=False) -> None:\n        super().__init__()\n        self.size: Union[int, Tuple[int, int]] = size\n        self.interpolation: str = interpolation\n        self.align_corners: Optional[bool] = align_corners\n        self.side: str = side\n        self.antialias: bool = antialias\n\n    def forward(self, input: Tensor) -> Tensor:\n        return resize(input, self.size, self.interpolation, align_corners=self.align_corners, side=self.side, antialias=self.antialias)",
    "docstring": "Resize the input Tensor to the given size. Args: size: Desired output size. If size is a sequence like (h, w), output size will be matched to this. If size is an int, smaller edge of the image will be matched to this number. i.e, if height > width, then image will be rescaled to (size * height / width, size) interpolation: algorithm used for upsampling: ``. antialias: if True, then image will be filtered with Gaussian before downscaling. No effect for upscaling. Returns: The resized tensor with the shape of the given size. Example: >>> img = torch.rand(1, 3, 4, 4) >>> out = Resize((6, 8))(img) >>> print(out.shape) torch.Size([1, 3, 6, 8]) .. raw:: html",
    "type": "class",
    "file_path": "kornia\\kornia\\geometry\\transform\\affwarp.py",
    "ast_data": "ClassDef name:Resize FunctionDef name:__init__ arg:self arg:size arg:interpolation arg:align_corners arg:side arg:antialias arguments arg arg arg arg arg arg Call Call FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, path, transform):\n    _api.check_isinstance(Transform, transform=transform)\n    super().__init__()\n    self._path = path\n    self._transform = transform\n    self.set_children(transform)\n    self._transformed_path = None\n    self._transformed_points = None",
    "docstring": "Parameters ---------- path : transform :",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:path arg:transform arguments arg arg arg Call Call Call Assign Assign Call Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "_generate_input_args_string",
    "source_code": "def _generate_input_args_string(obj):\n    signature = inspect.signature(obj.__class__)\n    input_param_names = set(signature.parameters.keys())\n    result = []\n    for name, value in inspect.getmembers(obj):\n        if name in input_param_names:\n            result.append((name, _simplify_obj_name(value)))\n    return ', '.join([f'{name}={value}' for name, value in result])",
    "docstring": "Generate a string for the input arguments of an object.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\data\\datapipes\\_hook_iterator.py",
    "ast_data": "FunctionDef name:_generate_input_args_string arg:obj arguments arg Assign Call Assign Call Call Assign For Call If Compare Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_log_sample_num_for_calibration",
    "source_code": "def _log_sample_num_for_calibration(representative_dataset: rd.RepresentativeDataset) -> rd.RepresentativeDataset:\n    num_samples: Optional[int] = rd.get_num_samples(representative_dataset)\n    if num_samples is None:\n        total_num_samples = '?'\n        logging.info('Representative dataset size unknown.')\n    else:\n        total_num_samples = str(num_samples)\n        logging.info('Using representative dataset of size: %s', total_num_samples)\n    sample_num = 0\n    for sample in representative_dataset:\n        sample_num += 1\n        logging.log_every_n(logging.DEBUG, 'Running representative sample for calibration: %d / %s', 5, sample_num, total_num_samples)\n        yield sample\n    logging.info('Running representative samples complete: %d / %s', sample_num, total_num_samples)",
    "docstring": "Logs the sample number for calibration. If in debug logging level, the \"sample number / total num samples\" is logged for every 5 iterations. This is often useful when tracking the progress of the calibration step which is often slow and may look stale if there's no logs being printed. Args: representative_dataset: The representative dataset. Yields: The representative samples from without any modification.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\py_function_lib.py",
    "ast_data": "FunctionDef name:_log_sample_num_for_calibration arg:representative_dataset arguments arg Call If Compare Assign Call Assign Call Call Assign For Call Call"
  },
  {
    "library": "pandas",
    "name": "__array__",
    "source_code": "def __array__(self, dtype: NpDtype | None=None, copy: bool | None=None) -> np.ndarray:\n    if copy is False:\n        raise ValueError('Unable to avoid copy while creating an array as requested.')\n    left = self._left\n    right = self._right\n    mask = self.isna()\n    closed = self.closed\n    result = np.empty(len(left), dtype=object)\n    for i, left_value in enumerate(left):\n        if mask[i]:\n            result[i] = np.nan\n        else:\n            result[i] = Interval(left_value, right[i], closed)\n    return result",
    "docstring": "Return the IntervalArray's data as a numpy array of Interval objects (with dtype='object')",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\interval.py",
    "ast_data": "FunctionDef name:__array__ arg:self arg:dtype arg:copy arguments arg arg arg If Compare Raise Call Assign Assign Assign Call Assign Assign Call Call For Call If Assign Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "height",
    "source_code": "@property\ndef height(self) -> int:\n    return int(self.layout.image_size.height)",
    "docstring": "Return the image height (columns).",
    "type": "method",
    "file_path": "kornia\\kornia\\image\\image.py",
    "ast_data": "FunctionDef name:height arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "bar",
    "source_code": "@Substitution(subset=subset_args)\ndef bar(self, subset: Subset | None=None, axis: Axis | None=0, *, color: str | list | tuple | None=None, cmap: Any | None=None, width: float=100, height: float=100, align: str | float | Callable='mid', vmin: float | None=None, vmax: float | None=None, props: str='width: 10em;') -> Styler:\n    if color is None and cmap is None:\n        color = '#d65f5f'\n    elif color is not None and cmap is not None:\n        raise ValueError('`color` and `cmap` cannot both be given')\n    elif color is not None:\n        if isinstance(color, (list, tuple)) and len(color) > 2 or not isinstance(color, (str, list, tuple)):\n            raise ValueError(\"`color` must be string or list or tuple of 2 strings,(eg: color=['#d65f5f', '#5fba7d'])\")\n    if not 0 <= width <= 100:\n        raise ValueError(f'`width` must be a value in [0, 100], got {width}')\n    if not 0 <= height <= 100:\n        raise ValueError(f'`height` must be a value in [0, 100], got {height}')\n    if subset is None:\n        subset = self._get_numeric_subset_default()\n    self.apply(_bar, subset=subset, axis=axis, align=align, colors=color, cmap=cmap, width=width / 100, height=height / 100, vmin=vmin, vmax=vmax, base_css=props)\n    return self",
    "docstring": "Draw bar chart in the cell backgrounds. .. versionchanged:: 1.4.0 Parameters ---------- %(subset)s axis : {0 or 'index', 1 or 'columns', None}, default 0 Apply to each column (`vminvmax\"width: 10em;\"Table Visualization `_ gives a number of examples for different settings and color coordination. Examples -------- >>> df = pd.DataFrame({\"A\": [1, 2, 3, 4], \"B\": [3, 4, 5, 6]}) >>> df.style.bar(subset=[\"A\"], color=\"gray\") # doctest: +SKIP",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\style.py",
    "ast_data": "FunctionDef name:bar arg:self arg:subset arg:axis arguments arg arg arg arg arg arg arg arg arg arg arg If BoolOp Compare Compare Assign If BoolOp Compare Compare Raise Call If Compare If BoolOp BoolOp Call Compare Call Call Raise Call If Compare Raise Call If Compare Raise Call If Compare Assign Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "simple_linear_interpolation",
    "source_code": "def simple_linear_interpolation(a, steps):\n    fps = a.reshape((len(a), -1))\n    xp = np.arange(len(a)) * steps\n    x = np.arange((len(a) - 1) * steps + 1)\n    return np.column_stack([np.interp(x, xp, fp) for fp in fps.T]).reshape((len(x),) + a.shape[1:])",
    "docstring": "Resample an array with ``",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\cbook.py",
    "ast_data": "FunctionDef name:simple_linear_interpolation arg:a arg:steps arguments arg arg Assign Call Call Assign Call Call Assign Call Call Return return:yes Call Call Call Call"
  },
  {
    "library": "kornia",
    "name": "DescriptorMatcher",
    "source_code": "class DescriptorMatcher(Module):\n\n    def __init__(self, match_mode: str='snn', th: float=0.8) -> None:\n        super().__init__()\n        _match_mode: str = match_mode.lower()\n        self.known_modes = ['nn', 'mnn', 'snn', 'smnn']\n        if _match_mode not in self.known_modes:\n            raise NotImplementedError(f'{match_mode} is not supported. Try one of {self.known_modes}')\n        self.match_mode = _match_mode\n        self.th = th\n\n    def forward(self, desc1: Tensor, desc2: Tensor) -> Tuple[Tensor, Tensor]:\n        if self.match_mode == 'nn':\n            out = match_nn(desc1, desc2)\n        elif self.match_mode == 'mnn':\n            out = match_mnn(desc1, desc2)\n        elif self.match_mode == 'snn':\n            out = match_snn(desc1, desc2, self.th)\n        elif self.match_mode == 'smnn':\n            out = match_smnn(desc1, desc2, self.th)\n        else:\n            raise NotImplementedError\n        return out",
    "docstring": "Module version of matching functions. See :func:, :func:, :func: or :func: for more details. Args: match_mode: type of matching, can be , , , . th: threshold on distance ratio, or other quality measure.",
    "type": "class",
    "file_path": "kornia\\kornia\\feature\\matching.py",
    "ast_data": "ClassDef name:DescriptorMatcher FunctionDef name:__init__ arg:self arg:match_mode arg:th arguments arg arg arg Call Call Call Assign If Compare Raise Call Assign Assign FunctionDef name:forward arg:self arg:desc1 arg:desc2 arguments arg arg arg If Compare Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call Raise Return return:yes"
  },
  {
    "library": "numpy",
    "name": "union1d",
    "source_code": "def union1d(ar1, ar2):\n    return unique(ma.concatenate((ar1, ar2), axis=None))",
    "docstring": "Union of two arrays. The output is always a masked array. See for more details. See Also -------- numpy.union1d : Equivalent function for ndarrays. Examples -------- >>> import numpy as np >>> ar1 = np.ma.array([1, 2, 3, 4]) >>> ar2 = np.ma.array([3, 4, 5, 6]) >>> np.ma.union1d(ar1, ar2) masked_array(data=[1, 2, 3, 4, 5, 6], mask=False, fill_value=999999)",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\extras.py",
    "ast_data": "FunctionDef name:union1d arg:ar1 arg:ar2 arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "register_module_forward_hook",
    "source_code": "def register_module_forward_hook(hook: Callable[..., None], *, with_kwargs: bool=False, always_call: bool=False) -> RemovableHandle:\n    handle = RemovableHandle(_global_forward_hooks, extra_dict=_global_forward_hooks_always_called)\n    _global_forward_hooks[handle.id] = hook\n    if with_kwargs:\n        _global_forward_hooks_with_kwargs[handle.id] = True\n    if always_call:\n        _global_forward_hooks_always_called[handle.id] = True\n    return handle",
    "docstring": "Register a global forward hook for all the modules. .. warning :: This adds global state to the module and it is only intended for debugging/profiling purposes. The hook will be called every time after :func: has computed an output. It should have the following signature:: hook(module, input, output) -> None or modified output The input contains only the positional arguments given to the module. Keyword arguments won't be passed to the hooks and only to the `forwardtorch.utils.hooks.RemovableHandle`.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:register_module_forward_hook arg:hook arguments arg arg arg Assign Call Assign If Assign If Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "siegelslopes",
    "source_code": "@_axis_nan_policy_factory(SiegelslopesResult, default_axis=None, n_outputs=2, n_samples=_n_samples_optional_x, result_to_tuple=lambda x, _: tuple(x), paired=True, too_small=1)\ndef siegelslopes(y, x=None, method='hierarchical'):\n    if method not in ['hierarchical', 'separate']:\n        raise ValueError(\"method can only be 'hierarchical' or 'separate'\")\n    y = np.asarray(y).ravel()\n    if x is None:\n        x = np.arange(len(y), dtype=float)\n    else:\n        x = np.asarray(x, dtype=float).ravel()\n        if len(x) != len(y):\n            raise ValueError('Array shapes are incompatible for broadcasting.')\n    if len(x) < 2:\n        raise ValueError('`x` and `y` must have length at least 2.')\n    dtype = np.result_type(x, y, np.float32)\n    y, x = (y.astype(dtype), x.astype(dtype))\n    medslope, medinter = siegelslopes_pythran(y, x, method)\n    medslope, medinter = (np.asarray(medslope)[()], np.asarray(medinter)[()])\n    return SiegelslopesResult(slope=medslope, intercept=medinter)",
    "docstring": "Computes the Siegel estimator for a set of points (x, y). implements a method for robust linear regression using repeated medians (see [1]_) to fit a line to the points (x, y). The method is robust to outliers with an asymptotic breakdown point of 50%. Parameters ---------- y : array_like Dependent variable. x : array_like or None, optional Independent variable. If None, use `n-1n-1nnlinregress`: >>> res = stats.siegelslopes(y, x) >>> lsq_res = stats.linregress(x, y) Plot the results. The Siegel regression line is shown in red. The green line shows the least-squares fit for comparison. >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> ax.plot(x, y, 'b.') >>> ax.plot(x, res[1] + res[0] * x, 'r-') >>> ax.plot(x, lsq_res[1] + lsq_res[0] * x, 'g-') >>> plt.show()",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_stats_mstats_common.py",
    "ast_data": "FunctionDef name:siegelslopes arg:y arg:x arg:method arguments arg arg arg If Compare Raise Call Assign Call Call If Compare Assign Call Call Assign Call Call If Compare Call Call Raise Call If Compare Call Raise Call Assign Call Assign Call Call Assign Call Assign Call Call Return return:yes Call Call arguments arg arg Call"
  },
  {
    "library": "sphinx",
    "name": "handle_signature",
    "source_code": "def handle_signature(self, sig: str, signode: desc_signature) -> ObjDescT:\n    raise ValueError",
    "docstring": "Parse the signature *sig*. The individual nodes are then appended to *signode*. If ValueError is raised, parsing is aborted and the whole *sig* is put into a single desc_name node. The return value should be a value that identifies the object. It is passed to :meth: unchanged, and otherwise only used to skip duplicates.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\directives\\__init__.py",
    "ast_data": "FunctionDef name:handle_signature arg:self arg:sig arg:signode arguments arg arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "from_string",
    "source_code": "@classmethod\ndef from_string(cls, spec):\n    return cls(*cls._string_to_components(spec))",
    "docstring": "Construct a from a string. Args: spec: a string of the form /job:/replica:/task:/device:CPU: or /job:/replica:/task:/device:GPU: as cpu and gpu are mutually exclusive. All entries are optional. Returns: A DeviceSpec.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\device_spec.py",
    "ast_data": "FunctionDef name:from_string arg:cls arg:spec arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "add_exit_node",
    "source_code": "def add_exit_node(self, ast_node, section_id, guards):\n    node = self._add_jump_node(ast_node, guards)\n    self.exits[section_id].add(node)\n    return node",
    "docstring": "Grows the graph by adding an exit node. This node becomes an exit for the current section. Args: ast_node: ast.AST section_id: Hashable, the node for which ast_node should be considered to be an exit node guards: Tuple[ast.AST, ...], the finally sections that guard ast_node Returns: Node",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\cfg.py",
    "ast_data": "FunctionDef name:add_exit_node arg:self arg:ast_node arg:section_id arg:guards arguments arg arg arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_generate_object_paths",
    "source_code": "def _generate_object_paths(object_graph_def):\n    paths = {0: 'root'}\n    nodes_to_visit = [0]\n    while nodes_to_visit:\n        current_node = nodes_to_visit.pop()\n        current_path = paths[current_node]\n        for reference in object_graph_def.nodes[current_node].children:\n            if reference.node_id in paths:\n                continue\n            paths[reference.node_id] = '{}.{}'.format(current_path, reference.local_name)\n            nodes_to_visit.append(reference.node_id)\n    return paths",
    "docstring": "Traverses through an ObjectGraphDef and builds a map of all node paths.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\load.py",
    "ast_data": "FunctionDef name:_generate_object_paths arg:object_graph_def arguments arg Assign Assign While Assign Call Assign For If Compare Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "disable_run_metadata",
    "source_code": "def disable_run_metadata():\n    context().disable_run_metadata()",
    "docstring": "Disables tracing of op execution via RunMetadata.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:disable_run_metadata arguments Call Call"
  },
  {
    "library": "sphinx",
    "name": "builtin_resolver",
    "source_code": "def builtin_resolver(app: Sphinx, env: BuildEnvironment, node: pending_xref, contnode: Element) -> Element | None:\n\n    def istyping(s: str) -> bool:\n        if s.startswith('typing.'):\n            s = s.split('.', 1)[1]\n        return s in typing.__all__\n    if node.get('refdomain') != 'py':\n        return None\n    elif node.get('reftype') in {'class', 'obj'} and node.get('reftarget') == 'None':\n        return contnode\n    elif node.get('reftype') in {'class', 'obj', 'exc'}:\n        reftarget = node.get('reftarget')\n        if inspect.isclass(getattr(builtins, reftarget, None)):\n            return contnode\n        if istyping(reftarget):\n            return contnode\n    return None",
    "docstring": "Do not emit nitpicky warnings for built-in types.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\domains\\python\\__init__.py",
    "ast_data": "FunctionDef name:builtin_resolver arg:app arg:env arg:node arg:contnode arguments arg arg arg arg FunctionDef name:istyping arg:s arguments arg If Call Assign Call Return return:yes Compare If Compare Call Return return:no If BoolOp Compare Call Compare Call Return return:yes If Compare Call Assign Call If Call Call Return return:yes If Call Return return:yes Return return:no"
  },
  {
    "library": "authlib",
    "name": "ClientSecretJWT",
    "source_code": "class ClientSecretJWT:\n    name = 'client_secret_jwt'\n    alg = 'HS256'\n\n    def __init__(self, token_endpoint=None, claims=None, headers=None, alg=None):\n        self.token_endpoint = token_endpoint\n        self.claims = claims\n        self.headers = headers\n        if alg is not None:\n            self.alg = alg\n\n    def sign(self, auth, token_endpoint):\n        return client_secret_jwt_sign(auth.client_secret, client_id=auth.client_id, token_endpoint=token_endpoint, claims=self.claims, header=self.headers, alg=self.alg)\n\n    def __call__(self, auth, method, uri, headers, body):\n        token_endpoint = self.token_endpoint\n        if not token_endpoint:\n            token_endpoint = uri\n        client_assertion = self.sign(auth, token_endpoint)\n        body = add_params_to_qs(body or '', [('client_assertion_type', ASSERTION_TYPE), ('client_assertion', client_assertion)])\n        return (uri, headers, body)",
    "docstring": "Authentication method for OAuth 2.0 Client. This authentication method is called `` value, default is HS256",
    "type": "class",
    "file_path": "authlib\\authlib\\oauth2\\rfc7523\\auth.py",
    "ast_data": "ClassDef name:ClientSecretJWT Assign Assign FunctionDef name:__init__ arg:self arg:token_endpoint arg:claims arg:headers arg:alg arguments arg arg arg arg arg Assign Assign Assign If Compare Assign FunctionDef name:sign arg:self arg:auth arg:token_endpoint arguments arg arg arg Return return:yes Call FunctionDef name:__call__ arg:self arg:auth arg:method arg:uri arg:headers arg:body arguments arg arg arg arg arg arg Assign If Assign Assign Call Assign Call BoolOp Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_hover",
    "source_code": "def _hover(self, event):\n    if self.ignore(event):\n        return\n    if self._active_handle is not None or not self._selection_completed:\n        return\n    _, e_dist = self._edge_handles.closest(event.x, event.y)\n    self._set_cursor(e_dist <= self.grab_range)",
    "docstring": "Update the canvas cursor if it's over a handle.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:_hover arg:self arg:event arguments arg arg If Call Return return:no If BoolOp Compare Return return:no Assign Call Call Compare"
  },
  {
    "library": "django",
    "name": "addslashes",
    "source_code": "@register.filter(is_safe=True)\n@stringfilter\ndef addslashes(value):\n    return value.replace('\\\\', '\\\\\\\\').replace('\"', '\\\\\"').replace(\"'\", \"\\\\'\")",
    "docstring": "Add slashes before quotes. Useful for escaping strings in CSV, for example. Less useful for escaping JavaScript; use the `` filter instead.",
    "type": "function",
    "file_path": "django\\django\\template\\defaultfilters.py",
    "ast_data": "FunctionDef name:addslashes arg:value arguments arg Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "op",
    "source_code": "@property\ndef op(self):\n    return self._op",
    "docstring": "The operation that failed, if known. *N.B.* If the failed op was synthesized at runtime, e.g. a or op, there will be no corresponding object. In that case, this will return , and you should instead use the to discover information about the op. Returns: The that failed, or None.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py",
    "ast_data": "FunctionDef name:op arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "is_pinned",
    "source_code": "def is_pinned(self) -> bool:\n    return self.data.is_pinned()",
    "docstring": "Return true if stored on in pinned memory.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\utils\\rnn.py",
    "ast_data": "FunctionDef name:is_pinned arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "enable_strict_mode",
    "source_code": "@tf_export('experimental.enable_strict_mode')\ndef enable_strict_mode():\n    global STRICT_MODE\n    STRICT_MODE = True",
    "docstring": "If called, enables strict mode for all behaviors. Used to switch all deprecation warnings to raise errors instead.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\strict_mode.py",
    "ast_data": "FunctionDef name:enable_strict_mode arguments Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "same_color",
    "source_code": "def same_color(c1, c2):\n    c1 = to_rgba_array(c1)\n    c2 = to_rgba_array(c2)\n    n1 = max(c1.shape[0], 1)\n    n2 = max(c2.shape[0], 1)\n    if n1 != n2:\n        raise ValueError('Different number of elements passed.')\n    return c1.shape == c2.shape and (c1 == c2).all()",
    "docstring": "Return whether the colors *c1* and *c2* are the same. *c1*, *c2* can be single colors or lists/arrays of colors.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:same_color arg:c1 arg:c2 arguments arg arg Assign Call Assign Call Assign Call Assign Call If Compare Raise Call Return return:yes BoolOp Compare Call Compare"
  },
  {
    "library": "scipy",
    "name": "hilbert2",
    "source_code": "def hilbert2(x, N=None):\n    xp = array_namespace(x)\n    x = xpx.atleast_nd(xp.asarray(x), ndim=2, xp=xp)\n    if x.ndim > 2:\n        raise ValueError('x must be 2-D.')\n    if xp.isdtype(x.dtype, 'complex floating'):\n        raise ValueError('x must be real.')\n    if N is None:\n        N = x.shape\n    elif isinstance(N, int):\n        if N <= 0:\n            raise ValueError('N must be positive.')\n        N = (N, N)\n    elif len(N) != 2 or xp.any(xp.asarray(N) <= 0):\n        raise ValueError('When given as a tuple, N must hold exactly two positive integers')\n    Xf = sp_fft.fft2(x, N, axes=(0, 1))\n    h1 = xp.zeros(N[0], dtype=Xf.dtype)\n    h2 = xp.zeros(N[1], dtype=Xf.dtype)\n    for h in (h1, h2):\n        N1 = h.shape[0]\n        if N1 % 2 == 0:\n            h[0] = h[N1 // 2] = 1\n            h[1:N1 // 2] = 2\n        else:\n            h[0] = 1\n            h[1:(N1 + 1) // 2] = 2\n    h = h1[:, xp.newaxis] * h2[xp.newaxis, :]\n    k = x.ndim\n    while k > 2:\n        h = h[:, xp.newaxis]\n        k -= 1\n    x = sp_fft.ifft2(Xf * h, axes=(0, 1))\n    return x",
    "docstring": "Compute the '2-D' analytic signal of Parameters ---------- x : array_like 2-D signal data. N : int or tuple of two ints, optional Number of Fourier components. Default is `x` taken along axes (0,1). References ---------- .. [1] Wikipedia, \"Analytic signal\",",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_signaltools.py",
    "ast_data": "FunctionDef name:hilbert2 arg:x arg:N arguments arg arg Assign Call Assign Call Call If Compare Raise Call If Call Raise Call If Compare Assign If Call If Compare Raise Call Assign If BoolOp Compare Call Call Compare Call Raise Call Assign Call Assign Call Assign Call For Assign If Compare Assign Assign Assign Assign Assign Assign While Compare Assign Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "preferred_rocm_fa_library",
    "source_code": "def preferred_rocm_fa_library(backend: Union[None, str, torch._C._ROCmFABackend]=None) -> torch._C._ROCmFABackend:\n    if backend is None:\n        pass\n    elif isinstance(backend, str):\n        if backend not in _ROCmFABackends:\n            raise RuntimeError(f'Unknown input value. Choose from: {_ROCmFABackends_str}.')\n        torch._C._set_rocm_fa_preferred_backend(_ROCmFABackends[backend])\n    elif isinstance(backend, torch._C._ROCmFABackend):\n        torch._C._set_rocm_fa_preferred_backend(backend)\n    else:\n        raise ValueError(f'Unknown input value. Choose from: {_ROCmFABackends_str}.')\n    return torch._C._get_rocm_fa_preferred_backend()",
    "docstring": "[ROCm-only] Override the backend PyTorch uses in ROCm environments for Flash Attention. Choose between AOTriton and CK .. warning:: This flag is experimeental and subject to change. When Flash Attention is enabled and desired, PyTorch defaults to using AOTriton as the backend. This flag (a :class:) allows users to override this backend to use composable_kernel * If is set then the default backend will be used wherever possible. Currently AOTriton. * If is set then AOTriton will be used wherever possible. * If is set then CK will be used wherever possible. * When no input is given, this function returns the currently preferred library. * User may use the environment variable TORCH_ROCM_FA_PREFER_CK=1 to set the preferred library to CK globally. Note: When a library is preferred other libraries may still be used if the preferred library doesn't implement the operation(s) called. This flag may achieve better performance if PyTorch's library selection is incorrect for your application's inputs.",
    "type": "function",
    "file_path": "pytorch\\torch\\backends\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:preferred_rocm_fa_library arg:backend arguments arg If Compare If Call If Compare Raise Call Call If Call Call Raise Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "from_blocks",
    "source_code": "@classmethod\ndef from_blocks(cls, blocks: list[Block], axes: list[Index]) -> Self:\n    return cls(blocks, axes, verify_integrity=False)",
    "docstring": "Constructor for BlockManager and SingleBlockManager with same signature.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:from_blocks arg:cls arg:blocks arg:axes arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "lookup",
    "source_code": "def lookup(self, keys, name=None):\n    with ops.name_scope(name, '%s_lookup_table_find' % self.name, [self.resource_handle, keys]):\n        keys = ops.convert_to_tensor(keys, dtype=self._key_dtype, name='keys')\n        with ops.colocate_with(self.resource_handle):\n            values = gen_lookup_ops.lookup_table_find_v2(self.resource_handle, keys, self._default_value)\n    return values",
    "docstring": "Looks up in a table, outputs the corresponding values. The is used for keys not present in the table. Args: keys: Keys to look up. Can be a tensor of any shape. Must match the table's key_dtype. name: A name for the operation (optional). Returns: A tensor containing the values in the same shape as using the table's value type. Raises: TypeError: when do not match the table data types.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "FunctionDef name:lookup arg:self arg:keys arg:name arguments arg arg arg With Call Assign Call With Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "accumulator_ref",
    "source_code": "@property\ndef accumulator_ref(self):\n    return self._accumulator_ref",
    "docstring": "The underlying accumulator reference.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:accumulator_ref arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_LogSoftmaxGrad",
    "source_code": "@ops.RegisterGradient('LogSoftmax')\ndef _LogSoftmaxGrad(op: ops.Operation, grad):\n    softmax = math_ops.exp(op.outputs[0])\n    return grad - math_ops.reduce_sum(grad, -1, keepdims=True) * softmax",
    "docstring": "The gradient for log_softmax. log_softmax = input - log(sum(exp(input)) dlog_softmax/dinput = diag - softmax(input) Args: op: The log softmax op. grad: The tensor representing the gradient w.r.t. the output. Returns: The gradients w.r.t. the input.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_grad.py",
    "ast_data": "FunctionDef name:_LogSoftmaxGrad arg:op arg:grad arguments arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "torch_version",
    "source_code": "def torch_version() -> str:\n    return torch.__version__.split('+')[0]",
    "docstring": "Parse the variable and removes +cu*/cpu.",
    "type": "function",
    "file_path": "kornia\\kornia\\utils\\_compat.py",
    "ast_data": "FunctionDef name:torch_version arguments Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "make_sample_test_batch",
    "source_code": "def make_sample_test_batch(raw_data_path, processed_data_path, device):\n    test_data_loader = make_test_data_loader(raw_data_path, processed_data_path)\n    test_iter = iter(test_data_loader)\n    test_batch = next(test_iter)\n    X_test, lS_o_test, lS_i_test, _, _, _ = unpack_batch(test_batch)\n    X, lS_o, lS_i = dlrm_wrap(X_test, lS_o_test, lS_i_test, device)\n    batch = {'X': X, 'lS_o': lS_o, 'lS_i': lS_i}\n    return batch",
    "docstring": "Create the test_data_loader and sample a batch from it. This batch will be used to measure the forward pass of the model throughout this experiment.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\data_sparsifier\\benchmarks\\evaluate_forward_time.py",
    "ast_data": "FunctionDef name:make_sample_test_batch arg:raw_data_path arg:processed_data_path arg:device arguments arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "HCentered",
    "source_code": "class HCentered(Hlist):\n\n    def __init__(self, elements: list[Node]):\n        super().__init__([Glue('ss'), *elements, Glue('ss')], do_kern=False)",
    "docstring": "A convenience class to create an whose contents are centered within its enclosing box.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\_mathtext.py",
    "ast_data": "ClassDef name:HCentered FunctionDef name:__init__ arg:self arg:elements arguments arg arg Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "pmf",
    "source_code": "def pmf(self, k, *args, **kwds):\n    args, loc, _ = self._parse_args(*args, **kwds)\n    k, loc = map(asarray, (k, loc))\n    args = tuple(map(asarray, args))\n    _a, _b = self._get_support(*args)\n    k = asarray(k - loc)\n    cond0 = self._argcheck(*args)\n    cond1 = (k >= _a) & (k <= _b)\n    if not isinstance(self, rv_sample):\n        cond1 = cond1 & self._nonzero(k, *args)\n    cond = cond0 & cond1\n    output = zeros(shape(cond), 'd')\n    place(output, 1 - cond0 + np.isnan(k), self.badvalue)\n    if np.any(cond):\n        goodargs = argsreduce(cond, *(k,) + args)\n        place(output, cond, np.clip(self._pmf(*goodargs), 0, 1))\n    if output.ndim == 0:\n        return output[()]\n    return output",
    "docstring": "Probability mass function at k of the given RV. Parameters ---------- k : array_like Quantiles. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional Location parameter (default=0). Returns ------- pmf : array_like Probability mass function evaluated at k",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:pmf arg:self arg:k arguments arg arg arg arg Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Assign Compare Compare If Call Assign Call Assign Assign Call Call Call Call If Call Assign Call Call Call Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "TypeAliasNamespace",
    "source_code": "class TypeAliasNamespace(Mapping[str, Any]):\n\n    def __init__(self, mapping: Mapping[str, str]) -> None:\n        super().__init__()\n        self.__mapping = mapping\n\n    def __getitem__(self, key: object) -> Any:\n        if not isinstance(key, str):\n            raise KeyError\n        if key in self.__mapping:\n            return TypeAliasForwardRef(self.__mapping[key])\n        else:\n            prefix = key + '.'\n            nested = {k: v for k, v in self.__mapping.items() if k.startswith(prefix)}\n            if nested:\n                return TypeAliasModule(key, nested)\n            else:\n                raise KeyError\n\n    def __contains__(self, key: object) -> bool:\n        if not isinstance(key, str):\n            return False\n        ns = self.__mapping\n        prefix = f'{key}.'\n        return key in ns or any((k.startswith(prefix) for k in ns))\n\n    def __iter__(self) -> Iterator[str]:\n        for k in self.__mapping:\n            yield k\n            for i in range(k.count('.')):\n                yield k.rsplit('.', i + 1)[0]\n\n    def __len__(self) -> int:\n        return sum((k.count('.') + 1 for k in self.__mapping))",
    "docstring": "Pseudo namespace class for :confval:. Useful for looking up nested objects via ``.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\util\\inspect.py",
    "ast_data": "ClassDef name:TypeAliasNamespace FunctionDef name:__init__ arg:self arg:mapping arguments arg arg Call Call Assign FunctionDef name:__getitem__ arg:self arg:key arguments arg arg If Call Raise If Compare Return return:yes Call Assign Assign Call Call If Return return:yes Call Raise FunctionDef name:__contains__ arg:self arg:key arguments arg arg If Call Return return:yes Assign Assign Return return:yes BoolOp Compare Call Call FunctionDef name:__iter__ arg:self arguments arg For For Call Call Call FunctionDef name:__len__ arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "check_collective_ops_peer_health",
    "source_code": "def check_collective_ops_peer_health(self, task, timeout_in_ms):\n    self.ensure_initialized()\n    pywrap_tfe.TFE_CollectiveOpsCheckPeerHealth(self._handle, task, timeout_in_ms)",
    "docstring": "Check collective peer health. This probes each task to see if they're still alive. Note that restarted tasks are considered a different one, and they're considered not healthy. This should only be used in multi client multi worker training. Args: task: a task string, must be in the format of /job:xxx/replica:0/task:N. timeout_in_ms: an integer, the timeout. If zero, there's no timeout. Raises: tf.errors.UnavailableError: when a peer is down. tf.errors.FailedPreconditionError: when a peer is a different one from the one this task has talked to, e.g. the peer has restarted. tf.errors.InvalidArgumentError: when the task string is invalid.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:check_collective_ops_peer_health arg:self arg:task arg:timeout_in_ms arguments arg arg arg Call Call"
  },
  {
    "library": "kornia",
    "name": "mean_iou",
    "source_code": "def mean_iou(pred: torch.Tensor, target: torch.Tensor, num_classes: int, eps: float=1e-06) -> torch.Tensor:\n    if not torch.is_tensor(pred) and pred.dtype is not torch.int64:\n        raise TypeError(f'Input pred type is not a torch.Tensor with torch.int64 dtype. Got {type(pred)}')\n    if not torch.is_tensor(target) and target.dtype is not torch.int64:\n        raise TypeError(f'Input target type is not a torch.Tensor with torch.int64 dtype. Got {type(target)}')\n    if not pred.shape == target.shape:\n        raise ValueError(f'Inputs pred and target must have the same shape. Got: {pred.shape} and {target.shape}')\n    if not pred.device == target.device:\n        raise ValueError(f'Inputs must be in the same device. Got: {pred.device} - {target.device}')\n    if not isinstance(num_classes, int) or num_classes < 2:\n        raise ValueError(f'The number of classes must be an integer bigger than two. Got: {num_classes}')\n    conf_mat: torch.Tensor = confusion_matrix(pred, target, num_classes)\n    sum_over_row = torch.sum(conf_mat, dim=1)\n    sum_over_col = torch.sum(conf_mat, dim=2)\n    conf_mat_diag = torch.diagonal(conf_mat, dim1=-2, dim2=-1)\n    denominator = sum_over_row + sum_over_col - conf_mat_diag\n    ious = (conf_mat_diag + eps) / (denominator + eps)\n    return ious",
    "docstring": "Calculate mean Intersection-Over-Union (mIOU). The function internally computes the confusion matrix. Args: pred : tensor with estimated targets returned by a classifier. The shape can be :math: and must contain integer values between 0 and K-1. target: tensor with ground truth (correct) target values. The shape can be :math: and must contain integer values between 0 and K-1, where targets are assumed to be provided as one-hot vectors. num_classes: total possible number of classes in target. eps: epsilon for numerical stability. Returns: a tensor representing the mean intersection-over union with shape :math: where K is the number of classes. Example: >>> logits = torch.tensor([[0, 1, 0]]) >>> target = torch.tensor([[0, 1, 0]]) >>> mean_iou(logits, target, num_classes=3) tensor([[1., 1., 1.]])",
    "type": "function",
    "file_path": "kornia\\kornia\\metrics\\mean_iou.py",
    "ast_data": "FunctionDef name:mean_iou arg:pred arg:target arg:num_classes arg:eps arguments arg arg arg arg If BoolOp Call Compare Raise Call Call If BoolOp Call Compare Raise Call Call If Compare Raise Call If Compare Raise Call If BoolOp Call Compare Raise Call Call Assign Call Assign Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "authlib",
    "name": "post",
    "source_code": "def post(self, url, **kwargs):\n    return self.request('POST', url, **kwargs)",
    "docstring": "Invoke POST http request. If `` configured, shortcut is available:: client.post(\"timeline\", json={\"text\": \"Hi\"})",
    "type": "method",
    "file_path": "authlib\\authlib\\integrations\\base_client\\sync_app.py",
    "ast_data": "FunctionDef name:post arg:self arg:url arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_process_watchdog",
    "source_code": "def _process_watchdog(self):\n    while True:\n        time.sleep(1)\n        with self._process_lock:\n            chief = self._processes.get(('chief', 0), None)\n            if chief and self._dependence_on_chief and (chief.exitcode is not None):\n                if chief.exitcode == 0 or not self._auto_restart:\n                    for p in self._processes.values():\n                        p.join(timeout=3)\n                    self._terminate_all()\n                    for p in self._processes.values():\n                        p.join()\n                    return\n            if self._auto_restart:\n                has_failure = False\n                for (task_type, task_id), p in self._processes.items():\n                    if p.exitcode is not None and p.exitcode != 0:\n                        has_failure = True\n                        logging.info('Restarting failed %s-%d', task_type, task_id)\n                        self._start_subprocess_and_reading_thread(task_type, task_id)\n                if has_failure:\n                    continue\n            if all((p.exitcode is not None for p in self._processes.values())):\n                return",
    "docstring": "Simulates a cluster management system. - If auto_restart is True, it restarts processes that exit with a non-zero exit code. Note that when join() times out it overrides auto_restart to False. - If dependence_on_chief is True, it terminates all processes once the chief exits. If auto_restart is also True, it only terminates all processes if the chief exit with a zero exit code, otherwise it restarts the chief. This runs in self._watchdog_thread.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_process_runner.py",
    "ast_data": "FunctionDef name:_process_watchdog arg:self arguments arg While Call With Assign Call If BoolOp Compare If BoolOp Compare For Call Call Call For Call Call Return return:no If Assign For Call If BoolOp Compare Compare Assign Call Call If If Call Compare Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "on_train_batch_begin",
    "source_code": "def on_train_batch_begin(self, batch, logs=None):\n    if self._should_call_train_batch_hooks:\n        self._call_batch_hook(ModeKeys.TRAIN, 'begin', batch, logs=logs)",
    "docstring": "Calls the methods of its callbacks. Args: batch: Integer, index of batch within the current epoch. logs: Dict, contains the return value of . Typically, the values of the 's metrics are returned. Example: .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:on_train_batch_begin arg:self arg:batch arg:logs arguments arg arg arg If Call"
  },
  {
    "library": "pandas",
    "name": "get_indexer_dict",
    "source_code": "def get_indexer_dict(label_list: list[np.ndarray], keys: list[Index]) -> dict[Hashable, npt.NDArray[np.intp]]:\n    shape = tuple((len(x) for x in keys))\n    group_index = get_group_index(label_list, shape, sort=True, xnull=True)\n    if np.all(group_index == -1):\n        return {}\n    ngroups = (group_index.size and group_index.max()) + 1 if is_int64_overflow_possible(shape) else np.prod(shape, dtype='i8')\n    sorter = get_group_index_sorter(group_index, ngroups)\n    sorted_labels = [lab.take(sorter) for lab in label_list]\n    group_index = group_index.take(sorter)\n    return lib.indices_fast(sorter, group_index, keys, sorted_labels)",
    "docstring": "Returns ------- dict: Labels mapped to indexers.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\sorting.py",
    "ast_data": "FunctionDef name:get_indexer_dict arg:label_list arg:keys arguments arg arg Assign Call Call Assign Call If Call Compare Return return:no Assign Call BoolOp Call Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_validate_cluster_spec",
    "source_code": "def _validate_cluster_spec(cluster_spec, task_type, task_id):\n    allowed_task_types = ('chief', 'worker', 'evaluator', 'ps', None)\n    cluster_spec = normalize_cluster_spec(cluster_spec)\n    if any((job not in allowed_task_types for job in cluster_spec.jobs)):\n        raise ValueError('Disallowed task type found in cluster spec. Allowed types are {} and the cluster spec is {}.'.format(allowed_task_types, cluster_spec))\n    if task_type not in allowed_task_types:\n        raise ValueError('Unrecognized task_type: {}, valid task types are: {}'.format(task_type, allowed_task_types))\n    if task_type and task_type not in cluster_spec.jobs and (task_type != 'evaluator'):\n        raise ValueError('`task_type` %r not found in cluster_spec.' % task_type)\n    if task_count(cluster_spec, 'chief') > 1:\n        raise ValueError(\"There must be at most one 'chief' job.\")\n    if task_count(cluster_spec, 'evaluator') > 1:\n        raise ValueError(\"There must be at most one 'evaluator' job.\")\n    if task_type in cluster_spec.jobs and task_id >= task_count(cluster_spec, task_type):\n        raise ValueError('The `task_id` %d exceeds the maximum id of %s.' % (task_id, task_type))",
    "docstring": "Validates . It checks: 1) task type is one of \"chief\", \"worker\", \"ps\", \"evaluator\", or not provided (None). 2) whether there is such a task type as in the . The only exception is . In other words, it is still a valid configuration when is but it doesn't appear in . 3) whether there is at most one \"chief\" job. 4) whether there is at most one \"evaluator\" job. 5) whether the is smaller than the number of tasks for that particular . Args: cluster_spec: a dict, or object to be validated. task_type: string indicating the type of the task. task_id: the id of the in this cluster. Raises: ValueError: if fails any check.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_worker_util.py",
    "ast_data": "FunctionDef name:_validate_cluster_spec arg:cluster_spec arg:task_type arg:task_id arguments arg arg arg Assign Assign Call If Call Compare Raise Call Call If Compare Raise Call Call If BoolOp Compare Compare Raise Call If Compare Call Raise Call If Compare Call Raise Call If BoolOp Compare Compare Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "value",
    "source_code": "@property\ndef value(self):\n    return self._value",
    "docstring": "The value of this dimension, or None if it is unknown.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:value arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_cross_suppression",
    "source_code": "def _cross_suppression(boxes, box_slice, iou_threshold, inner_idx, tile_size):\n    batch_size = array_ops.shape(boxes)[0]\n    new_slice = array_ops.slice(boxes, [0, inner_idx * tile_size, 0], [batch_size, tile_size, 4])\n    iou = _bbox_overlap(new_slice, box_slice)\n    box_slice_after_suppression = array_ops.expand_dims(math_ops.cast(math_ops.reduce_all(iou < iou_threshold, [1]), box_slice.dtype), 2) * box_slice\n    return (boxes, box_slice_after_suppression, iou_threshold, inner_idx + 1)",
    "docstring": "Suppress boxes between different tiles. Args: boxes: a tensor of shape [batch_size, num_boxes_with_padding, 4] box_slice: a tensor of shape [batch_size, tile_size, 4] iou_threshold: a scalar tensor inner_idx: a scalar tensor representing the tile index of the tile that is used to supress box_slice tile_size: an integer representing the number of boxes in a tile Returns: boxes: unchanged boxes as input box_slice_after_suppression: box_slice after suppression iou_threshold: unchanged",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:_cross_suppression arg:boxes arg:box_slice arg:iou_threshold arg:inner_idx arg:tile_size arguments arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call Call Call Compare Return return:yes"
  },
  {
    "library": "kornia",
    "name": "sample_is_valid_for_homography",
    "source_code": "def sample_is_valid_for_homography(points1: Tensor, points2: Tensor) -> Tensor:\n    if points1.shape != points2.shape:\n        raise AssertionError(points1.shape)\n    KORNIA_CHECK_SHAPE(points1, ['B', '4', '2'])\n    KORNIA_CHECK_SHAPE(points2, ['B', '4', '2'])\n    device = points1.device\n    idx_perm = torch.tensor([[0, 1, 2], [0, 1, 3], [0, 2, 3], [1, 2, 3]], dtype=torch.long, device=device)\n    points_src_h = convert_points_to_homogeneous(points1)\n    points_dst_h = convert_points_to_homogeneous(points2)\n    src_perm = points_src_h[:, idx_perm]\n    dst_perm = points_dst_h[:, idx_perm]\n    left_sign = (torch.cross(src_perm[..., 1:2, :], src_perm[..., 2:3, :]) @ src_perm[..., 0:1, :].permute(0, 1, 3, 2)).sign()\n    right_sign = (torch.cross(dst_perm[..., 1:2, :], dst_perm[..., 2:3, :]) @ dst_perm[..., 0:1, :].permute(0, 1, 3, 2)).sign()\n    sample_is_valid = (left_sign == right_sign).view(-1, 4).min(dim=1)[0]\n    return sample_is_valid",
    "docstring": "Implement oriented constraint check from :cite:. Analogous to Args: points1: A set of points in the first image with a tensor shape :math:. points2: A set of points in the second image with a tensor shape :math:. Returns: Mask with the minimal sample is good for homography estimation:math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\homography.py",
    "ast_data": "FunctionDef name:sample_is_valid_for_homography arg:points1 arg:points2 arguments arg arg If Compare Raise Call Call Call Assign Assign Call Assign Call Assign Call Assign Assign Assign Call Call Call Assign Call Call Call Assign Call Call Compare Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_process_colors",
    "source_code": "def _process_colors(self):\n    self.monochrome = self.cmap.monochrome\n    if self.colors is not None:\n        i0, i1 = (0, len(self.levels))\n        if self.filled:\n            i1 -= 1\n            if self.extend in ('both', 'min'):\n                i0 -= 1\n            if self.extend in ('both', 'max'):\n                i1 += 1\n        self.cvalues = list(range(i0, i1))\n        self.set_norm(mcolors.NoNorm())\n    else:\n        self.cvalues = self.layers\n    self.norm.autoscale_None(self.levels)\n    self.set_array(self.cvalues)\n    self.update_scalarmappable()\n    if self.extend in ('both', 'max', 'min'):\n        self.norm.clip = False",
    "docstring": "Color argument processing for contouring. Note that we base the colormapping on the contour levels and layers, not on the actual range of the Z values. This means we don't have to worry about bad values in Z, and we always have the full dynamic range available for the selected levels. The color is based on the midpoint of the layer, except for extended end layers. By default, the norm vmin and vmax are the extreme values of the non-extended levels. Hence, the layer color extremes are not the extreme values of the colormap itself, but approach those values as the number of levels increases. An advantage of this scheme is that line contours, when added to filled contours, take on colors that are consistent with those of the filled regions; for example, a contour line on the boundary between two regions will have a color intermediate between those of the regions.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\contour.py",
    "ast_data": "FunctionDef name:_process_colors arg:self arguments arg Assign If Compare Assign Call If If Compare If Compare Assign Call Call Call Call Assign Call Call Call If Compare Assign"
  },
  {
    "library": "numpy",
    "name": "atleast_2d",
    "source_code": "@array_function_dispatch(_atleast_2d_dispatcher)\ndef atleast_2d(*arys):\n    res = []\n    for ary in arys:\n        ary = asanyarray(ary)\n        if ary.ndim == 0:\n            result = ary.reshape(1, 1)\n        elif ary.ndim == 1:\n            result = ary[_nx.newaxis, :]\n        else:\n            result = ary\n        res.append(result)\n    if len(res) == 1:\n        return res[0]\n    else:\n        return tuple(res)",
    "docstring": "View inputs as arrays with at least two dimensions. Parameters ---------- arys1, arys2, ... : array_like One or more array-like sequences. Non-array inputs are converted to arrays. Arrays that already have two or more dimensions are preserved. Returns ------- res, res2, ... : ndarray An array, or tuple of arrays, each with ``. Copies are avoided where possible, and views with two or more dimensions are returned. See Also -------- atleast_1d, atleast_3d Examples -------- >>> import numpy as np >>> np.atleast_2d(3.0) array([[3.]]) >>> x = np.arange(3.0) >>> np.atleast_2d(x) array([[0., 1., 2.]]) >>> np.atleast_2d(x).base is x True >>> np.atleast_2d(1, [1, 2], [[1, 2]]) (array([[1]]), array([[1, 2]]), array([[1, 2]]))",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\shape_base.py",
    "ast_data": "FunctionDef name:atleast_2d arguments arg Assign For Assign Call If Compare Assign Call If Compare Assign Assign Call If Compare Call Return return:yes Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "projection_from_KRt",
    "source_code": "def projection_from_KRt(K: Tensor, R: Tensor, t: Tensor) -> Tensor:\n    KORNIA_CHECK_SHAPE(K, ['*', '3', '3'])\n    KORNIA_CHECK_SHAPE(R, ['*', '3', '3'])\n    KORNIA_CHECK_SHAPE(t, ['*', '3', '1'])\n    if not len(K.shape) == len(R.shape) == len(t.shape):\n        raise AssertionError\n    Rt = concatenate([R, t], dim=-1)\n    Rt_h = pad(Rt, [0, 0, 0, 1], 'constant', 0.0)\n    Rt_h[..., -1, -1] += 1.0\n    K_h = pad(K, [0, 1, 0, 1], 'constant', 0.0)\n    K_h[..., -1, -1] += 1.0\n    return K @ Rt",
    "docstring": "Get the projection matrix P from K, R and t. This function estimate the projection matrix by solving the following equation: :math:. Args: K: the camera matrix with the intrinsics with shape :math:. R: The rotation matrix with shape :math:. t: The translation vector with shape :math:. Returns: The projection matrix P with shape :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\epipolar\\projection.py",
    "ast_data": "FunctionDef name:projection_from_KRt arg:K arg:R arg:t arguments arg arg arg Call Call Call If Compare Call Call Call Raise Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_static_inner_shape_as_list",
    "source_code": "def _static_inner_shape_as_list(self, truncate_first):\n    if self._static_inner_shape.rank is None:\n        return [...]\n    result = self._static_inner_shape.as_list()\n    if truncate_first:\n        return result[1:]\n    return result",
    "docstring": "Returns the lengths of the inner shape (if rank known), or [...].",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:_static_inner_shape_as_list arg:self arg:truncate_first arguments arg arg If Compare Return return:yes Assign Call If Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "reset",
    "source_code": "def reset() -> None:\n    import logging\n    log = logging.getLogger(__name__)\n    log.info('torch._dynamo.reset')\n    with convert_frame.compile_lock:\n        reset_code_caches()\n        convert_frame.input_codes.clear()\n        reset_code_state()\n        convert_frame.output_codes.clear()\n        orig_code_map.clear()\n        guard_failures.clear()\n        graph_break_reasons.clear()\n        resume_execution.ContinueExecutionCache.cache.clear()\n        _reset_guarded_backend_cache()\n        reset_frame_count()\n        torch._dynamo.compiled_autograd.reset()\n        convert_frame.FRAME_COUNTER = 0\n        convert_frame.FRAME_COMPILE_COUNTER.clear()\n        callback_handler.clear()\n        GenerationTracker.clear()\n        TensorifyState.clear()\n        torch._dynamo.utils.warn_once_cache.clear()\n        torch._dynamo.utils.user_obj_id_to_weakref.clear()\n        torch._C._autograd._saved_tensors_hooks_set_tracing(False)",
    "docstring": "Clear all compile caches and restore initial state. This function is intended to reset Dynamo's state *as if* you had started a fresh process invocation, which makes it good for testing scenarios where you want to behave as if you started a new process. It does NOT affect any file system caches. NB: this does NOT reset logging state. Don't use this to test logging initialization/reinitialization.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\__init__.py",
    "ast_data": "FunctionDef name:reset arguments Assign Call Call With Call Call Call Call Call Call Call Call Call Call Call Assign Call Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_make_new_nodes",
    "source_code": "def _make_new_nodes(nodes_by_depth, layer_fn, layer_map, tensor_map):\n    new_nodes = set()\n    depth_keys = list(nodes_by_depth.keys())\n    depth_keys.sort(reverse=True)\n    for depth in depth_keys:\n        nodes = nodes_by_depth[depth]\n        for node in nodes:\n            layer = node.outbound_layer\n            if layer not in layer_map:\n                new_layer = layer_fn(layer)\n                layer_map[layer] = new_layer\n                layer = new_layer\n            else:\n                layer = layer_map[layer]\n                if isinstance(layer, InputLayer):\n                    continue\n            if all((tensor in tensor_map for tensor in nest.flatten(node.input_tensors))):\n                args = nest.map_structure(lambda t: tensor_map.get(t, t), node.call_args)\n                kwargs = nest.map_structure(lambda t: tensor_map.get(t, t), node.call_kwargs)\n                output_tensors = layer(*args, **kwargs)\n                first_output_tensor = nest.flatten(output_tensors)[0]\n                new_nodes.add(layer._inbound_nodes[first_output_tensor._keras_history.node_index])\n                for x, y in zip(nest.flatten(node.output_tensors), nest.flatten(output_tensors)):\n                    tensor_map[x] = y\n    return new_nodes",
    "docstring": "Uses the layers in to make new nodes based on . Args: nodes_by_depth: Provides structure information to create new nodes. layer_fn: Function to clone layers. layer_map: Map from layers in to new layers. tensor_map: Map from tensors in to newly compute tensors. Returns: A set of new nodes. and are updated.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\models.py",
    "ast_data": "FunctionDef name:_make_new_nodes arg:nodes_by_depth arg:layer_fn arg:layer_map arg:tensor_map arguments arg arg arg arg Assign Call Assign Call Call Call For Assign For Assign If Compare Assign Call Assign Assign Assign If Call If Call Compare Call Assign Call arguments arg Call Assign Call arguments arg Call Assign Call Assign Call Call For Call Call Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "is_re",
    "source_code": "def is_re(obj: object) -> TypeGuard[Pattern]:\n    return isinstance(obj, Pattern)",
    "docstring": "Check if the object is a regex pattern instance. Parameters ---------- obj : object The object to check for being a regex pattern. Typically, this would be an object that you expect to be a compiled pattern from the module. Returns ------- bool Whether is a regex pattern. See Also -------- api.types.is_float : Return True if given object is float. api.types.is_iterator : Check if the object is an iterator. api.types.is_integer : Return True if given object is integer. api.types.is_re_compilable : Check if the object can be compiled into a regex pattern instance. Examples -------- >>> from pandas.api.types import is_re >>> import re >>> is_re(re.compile(\".*\")) True >>> is_re(\"foo\") False",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\inference.py",
    "ast_data": "FunctionDef name:is_re arg:obj arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_score_with_function",
    "source_code": "def _score_with_function(self, X, y, sample_weight, score_function):\n    y_pred = self._predict(X, check_input=False)\n    if np.isnan(y_pred).any() or np.isinf(y_pred).any():\n        return np.nan\n    return score_function(y, y_pred, sample_weight=sample_weight)",
    "docstring": "Private score method without input validation.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neural_network\\_multilayer_perceptron.py",
    "ast_data": "FunctionDef name:_score_with_function arg:self arg:X arg:y arg:sample_weight arg:score_function arguments arg arg arg arg arg Assign Call If BoolOp Call Call Call Call Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_tf_tensor_string_get_item",
    "source_code": "def _tf_tensor_string_get_item(target, i):\n    x = gen_string_ops.substr(target, i, 1)\n    return x",
    "docstring": "Overload of get_item that stages a Tensor string read.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\slices.py",
    "ast_data": "FunctionDef name:_tf_tensor_string_get_item arg:target arg:i arguments arg arg Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "cumprod",
    "source_code": "@tf_export('math.cumprod', v1=['math.cumprod', 'cumprod'])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints('cumprod')\ndef cumprod(x, axis=0, exclusive=False, reverse=False, name=None):\n    with ops.name_scope(name, 'Cumprod', [x]) as name:\n        x = ops.convert_to_tensor(x, name='x')\n        return gen_math_ops.cumprod(x, axis, exclusive=exclusive, reverse=reverse, name=name)",
    "docstring": "Compute the cumulative product of the tensor along . By default, this op performs an inclusive cumprod, which means that the first element of the input is identical to the first element of the output: By setting the kwarg to , an exclusive cumprod is performed instead: By setting the kwarg to , the cumprod is performed in the opposite direction: This is more efficient than using separate ops. The and kwargs can also be combined: Args: x: A . Must be one of the following types: , , , , , , , , , , , , , . axis: A of type (default: 0). Must be in the range . exclusive: If , perform exclusive cumprod. reverse: A (default: False). name: A name for the operation (optional). Returns: A . Has the same type as .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:cumprod arg:x arg:axis arg:exclusive arg:reverse arg:name arguments arg arg arg arg arg With Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "angular_name",
    "source_code": "@property\ndef angular_name(self):\n    return self.srs.angular_name",
    "docstring": "Return the name of the angular units.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\base\\models.py",
    "ast_data": "FunctionDef name:angular_name arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "write",
    "source_code": "def write(self, file_prefix, session=None, options=None):\n    return self._write(file_prefix, session, options=options)",
    "docstring": "Writes a training checkpoint. The checkpoint includes variables created by this object and any trackable objects it depends on at the time is called. does not number checkpoints, increment , or update the metadata used by . It is primarily intended for use by higher level checkpoint management utilities. provides a very basic implementation of these features. Args: file_prefix: A prefix to use for the checkpoint filenames (/path/to/directory/and_a_prefix). session: The session to evaluate variables in. Ignored when executing eagerly. If not provided when graph building, the default session is used. options: Optional object. Returns: The full path to the checkpoint (i.e. ).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:write arg:self arg:file_prefix arg:session arg:options arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, fp, headers, params=None, request_params=None):\n    Entity.__init__(self, fp, headers, params)\n    if self.content_type.value.startswith('text/'):\n        for c in ('ISO-8859-1', 'iso-8859-1', 'Latin-1', 'latin-1'):\n            if c in self.attempt_charsets:\n                break\n        else:\n            self.attempt_charsets.append('ISO-8859-1')\n    self.processors['multipart'] = _old_process_multipart\n    if request_params is None:\n        request_params = {}\n    self.request_params = request_params",
    "docstring": "Initialize a request body entity.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpreqbody.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:fp arg:headers arg:params arg:request_params arguments arg arg arg arg arg Call If Call For If Compare Call Assign If Compare Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "weights",
    "source_code": "@property\ndef weights(self):\n    return self.trainable_weights + self.non_trainable_weights",
    "docstring": "Returns the list of all layer variables/weights. Returns: A list of variables.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:weights arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "value",
    "source_code": "@property\ndef value(self):\n    return self.as_int(self._bit64)",
    "docstring": "Return an integer contained in this field.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\field.py",
    "ast_data": "FunctionDef name:value arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "stateless_random_hue",
    "source_code": "@tf_export('image.stateless_random_hue', v1=[])\n@dispatch.add_dispatch_support\ndef stateless_random_hue(image, max_delta, seed):\n    if max_delta > 0.5:\n        raise ValueError('max_delta must be <= 0.5.')\n    if max_delta < 0:\n        raise ValueError('max_delta must be non-negative.')\n    delta = stateless_random_ops.stateless_random_uniform(shape=[], minval=-max_delta, maxval=max_delta, seed=seed)\n    return adjust_hue(image, delta)",
    "docstring": "Adjust the hue of RGB images by a random factor deterministically. Equivalent to but uses a randomly picked in the interval . Guarantees the same results given the same independent of how many times the function is called, and independent of global seed settings (e.g. ). must be in the interval . Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> seed = (1, 2) >>> tf.image.stateless_random_hue(x, 0.2, seed) Args: image: RGB image or images. The size of the last dimension must be 3. max_delta: float. The maximum value for the random delta. seed: A shape [2] Tensor, the seed to the random number generator. Must have dtype or . Returns: Adjusted image(s), same shape and DType as . Raises: ValueError: if is invalid.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:stateless_random_hue arg:image arg:max_delta arg:seed arguments arg arg arg If Compare Raise Call If Compare Raise Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "get_single_level_loss",
    "source_code": "def get_single_level_loss(self, img_src: Tensor, img_dst: Tensor, transform_model: Tensor) -> Tensor:\n    if img_src.shape != img_dst.shape:\n        raise ValueError(f'Cannot register images of different shapes                              {img_src.shape} {img_dst.shape:} ')\n    _height, _width = img_dst.shape[-2:]\n    warper = self.warper(_height, _width)\n    img_src_to_dst = warper(img_src, transform_model)\n    loss = self.loss_fn(img_src_to_dst, img_dst, reduction='none')\n    ones = warper(torch.ones_like(img_src), transform_model)\n    loss = loss.masked_select(ones > 0.9).mean()\n    return loss",
    "docstring": "Warp img_src into img_dst with transform_model and returns loss.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\transform\\image_registrator.py",
    "ast_data": "FunctionDef name:get_single_level_loss arg:self arg:img_src arg:img_dst arg:transform_model arguments arg arg arg arg If Compare Raise Call Assign Assign Call Assign Call Assign Call Assign Call Call Assign Call Call Compare Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__torch_function__",
    "source_code": "@classmethod\ndef __torch_function__(cls, func, types, args=(), kwargs=None):\n    if kwargs is None:\n        kwargs = {}\n    if not all((issubclass(cls, t) for t in types)):\n        return NotImplemented\n    with _C.DisableTorchFunctionSubclass():\n        ret = func(*args, **kwargs)\n        if func in get_default_nowrap_functions():\n            return ret\n        else:\n            return _convert(ret, cls)",
    "docstring": "This __torch_function__ implementation wraps subclasses such that methods called on subclasses return a subclass instance instead of a `__torch_function__` a classmethod.",
    "type": "method",
    "file_path": "pytorch\\torch\\_tensor.py",
    "ast_data": "FunctionDef name:__torch_function__ arg:cls arg:func arg:types arg:args arg:kwargs arguments arg arg arg arg arg If Compare Assign If Call Call Return return:yes With Call Assign Call If Compare Call Return return:yes Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X, y=None):\n    check_is_fitted(self)\n    X = validate_data(self, X, dtype=np.float64, reset=False)\n    Xr = (X - self._x_mean) / self._x_std\n    x_scores = np.dot(Xr, self.x_weights_)\n    if y is not None:\n        y = check_array(y, input_name='y', ensure_2d=False, dtype=np.float64)\n        if y.ndim == 1:\n            y = y.reshape(-1, 1)\n        yr = (y - self._y_mean) / self._y_std\n        y_scores = np.dot(yr, self.y_weights_)\n        return (x_scores, y_scores)\n    return x_scores",
    "docstring": "Apply the dimensionality reduction. Parameters ---------- X : array-like of shape (n_samples, n_features) Samples to be transformed. y : array-like of shape (n_samples,) or (n_samples, n_targets), default=None Targets. Returns ------- x_scores : array-like or tuple of array-like The transformed data if , otherwise.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cross_decomposition\\_pls.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arg:y arguments arg arg arg Call Assign Call Assign Assign Call If Compare Assign Call If Compare Assign Call Assign Assign Call Return return:yes Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "patched_path",
    "source_code": "def patched_path(path):\n    if not path.endswith('/'):\n        path += '/'\n    if path.startswith('/RPC2/'):\n        path = path[5:]\n    return path",
    "docstring": "Return 'path', doctored for RPC.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\xmlrpcutil.py",
    "ast_data": "FunctionDef name:patched_path arg:path arguments arg If Call If Call Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "feature_is_exist",
    "source_code": "def feature_is_exist(self, name):\n    assert name.isupper()\n    return name in self.conf_features",
    "docstring": "Returns True if a certain feature is exist and covered within ``. Parameters ---------- 'name': str feature name in uppercase.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py",
    "ast_data": "FunctionDef name:feature_is_exist arg:self arg:name arguments arg arg Call Return return:yes Compare"
  },
  {
    "library": "kornia",
    "name": "heatmap_to_keypoints",
    "source_code": "def heatmap_to_keypoints(heatmap: Tensor, n: Optional[int]=None, window_size: int=5, score_threshold: float=0.0) -> list[Keypoints]:\n    heatmap = heatmap.squeeze(1)\n    nmsed = nms(heatmap, window_size=window_size, cutoff=score_threshold)\n    keypoints = []\n    for b in range(heatmap.shape[0]):\n        yx = nmsed[b].nonzero(as_tuple=False)\n        detection_logp = heatmap[b][nmsed[b]]\n        xy = yx.flip((1,))\n        if n is not None:\n            n_ = min(n + 1, detection_logp.numel())\n            minus_threshold, _indices = torch.kthvalue(-detection_logp, n_)\n            mask = detection_logp > -minus_threshold\n            xy = xy[mask]\n            detection_logp = detection_logp[mask]\n            xy = xy[:n]\n            detection_logp = detection_logp[:n]\n        keypoints.append(Keypoints(xy, detection_logp))\n    return keypoints",
    "docstring": "Inference-time nms-based detection protocol.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\disk\\detector.py",
    "ast_data": "FunctionDef name:heatmap_to_keypoints arg:heatmap arg:n arg:window_size arg:score_threshold arguments arg arg arg arg Assign Call Assign Call Assign For Call Assign Call Assign Assign Call If Compare Assign Call Call Assign Call Assign Compare Assign Assign Assign Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "shutdown_system",
    "source_code": "@tf_export(v1=['tpu.shutdown_system'])\ndef shutdown_system(job: Optional[Text]=None) -> ops.Operation:\n    with ops.device(_tpu_system_device_name(job)):\n        shutdown_distributed_tpu = tpu_ops.shutdown_distributed_tpu()\n    return shutdown_distributed_tpu",
    "docstring": "Shuts down a running a distributed TPU system. Args: job: The job (the XXX in TensorFlow device specification /job:XXX) that contains the TPU devices that will be shutdown. If job=None it is assumed there is only one job in the TensorFlow flock, and an error will be returned if this assumption does not hold.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu.py",
    "ast_data": "FunctionDef name:shutdown_system arg:job arguments arg With Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "_eval_bivariate",
    "source_code": "def _eval_bivariate(self, x1, x2, weights):\n    raise NotImplementedError('Bivariate ECDF is not implemented')",
    "docstring": "Inner function for ECDF of two variables.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_statistics.py",
    "ast_data": "FunctionDef name:_eval_bivariate arg:self arg:x1 arg:x2 arg:weights arguments arg arg arg arg Raise Call"
  },
  {
    "library": "kornia",
    "name": "exp",
    "source_code": "@staticmethod\ndef exp(v: Tensor) -> Se2:\n    check_v_shape(v)\n    theta = v[..., 2]\n    so2 = So2.exp(theta)\n    z = tensor(0.0, device=v.device, dtype=v.dtype)\n    theta_nonzeros = theta != 0.0\n    a = where(theta_nonzeros, so2.z.imag / theta, z)\n    b = where(theta_nonzeros, (1.0 - so2.z.real) / theta, z)\n    x = v[..., 0]\n    y = v[..., 1]\n    t = stack((a * x - b * y, b * x + a * y), -1)\n    return Se2(so2, t)",
    "docstring": "Convert elements of lie algebra to elements of lie group. Args: v: vector of shape :math:. Example: >>> v = torch.ones((1, 3)) >>> s = Se2.exp(v) >>> s.r Parameter containing: tensor([0.5403+0.8415j], requires_grad=True) >>> s.t Parameter containing: tensor([[0.3818, 1.3012]], requires_grad=True)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\se2.py",
    "ast_data": "FunctionDef name:exp arg:v arguments arg Call Assign Assign Call Assign Call Assign Compare Assign Call Assign Call Assign Assign Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_positional_selector",
    "source_code": "@cache_readonly\ndef _positional_selector(self) -> GroupByPositionalSelector:\n    if TYPE_CHECKING:\n        groupby_self = cast(groupby.GroupBy, self)\n    else:\n        groupby_self = self\n    return GroupByPositionalSelector(groupby_self)",
    "docstring": "Return positional selection for each group. `~GroupBy.head~GroupBy.tail~GroupBy.head~GroupBy.tail~GroupBy.nth` parameter. Examples -------- >>> df = pd.DataFrame( ... [[\"a\", 1], [\"a\", 2], [\"a\", 3], [\"b\", 4], [\"b\", 5]], columns=[\"A\", \"B\"] ... ) >>> df.groupby(\"A\")._positional_selector[1:2] A B 1 a 2 4 b 5 >>> df.groupby(\"A\")._positional_selector[1, -1] A B 1 a 2 2 a 3 4 b 5",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\groupby\\indexing.py",
    "ast_data": "FunctionDef name:_positional_selector arg:self arguments arg If Assign Call Assign Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "entropy",
    "source_code": "def entropy(self, mu=None, kappa=1):\n    dim, _, kappa = self._process_parameters(mu, kappa)\n    return self._entropy(dim, kappa)",
    "docstring": "Compute the differential entropy of the von Mises-Fisher distribution. Parameters ---------- mu : array_like, default: None Mean direction of the distribution. Must be a one-dimensional unit vector of norm 1. kappa : float, default: 1 Concentration parameter. Must be positive. Returns ------- h : scalar Entropy of the von Mises-Fisher distribution.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:entropy arg:self arg:mu arg:kappa arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, feed_fn):\n    self.feed_fn = feed_fn",
    "docstring": "Initializes a . Args: feed_fn: function that takes no arguments and returns of to feed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\basic_session_run_hooks.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:feed_fn arguments arg arg Assign"
  },
  {
    "library": "numpy",
    "name": "get_strides",
    "source_code": "def get_strides(self):\n    warnings.warn('\"get_strides\" is deprecated. Use \"strides\" instead', DeprecationWarning, stacklevel=2)\n    return self.strides",
    "docstring": "Deprecated getter for the property. .. deprecated:: 1.21",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\_internal.py",
    "ast_data": "FunctionDef name:get_strides arg:self arguments arg Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_count_fx_targets",
    "source_code": "def _count_fx_targets(exported_program: torch.export.ExportedProgram) -> defaultdict[str, int]:\n    fx_node_target_count: defaultdict[str, int] = defaultdict(int)\n    for node in exported_program.graph.nodes:\n        if node.op == 'call_function':\n            fx_node_target_count[str(node.target)] += 1\n    return fx_node_target_count",
    "docstring": "Count the number of targets for each node in the exported program.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_analysis.py",
    "ast_data": "FunctionDef name:_count_fx_targets arg:exported_program arguments arg Call For If Compare Call Return return:yes"
  },
  {
    "library": "django",
    "name": "phone2numeric_filter",
    "source_code": "@register.filter('phone2numeric', is_safe=True)\ndef phone2numeric_filter(value):\n    return phone2numeric(value)",
    "docstring": "Take a phone number and converts it in to its numerical equivalent.",
    "type": "function",
    "file_path": "django\\django\\template\\defaultfilters.py",
    "ast_data": "FunctionDef name:phone2numeric_filter arg:value arguments arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "autoscale_None",
    "source_code": "def autoscale_None(self, A):\n    super().autoscale_None(A)\n    if self.vmin >= self.vcenter:\n        self.vmin = self.vcenter - (self.vmax - self.vcenter)\n    if self.vmax <= self.vcenter:\n        self.vmax = self.vcenter + (self.vcenter - self.vmin)",
    "docstring": "Get vmin and vmax. If vcenter isn't in the range [vmin, vmax], either vmin or vmax is expanded so that vcenter lies in the middle of the modified range [vmin, vmax].",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:autoscale_None arg:self arg:A arguments arg arg Call Call If Compare Assign If Compare Assign"
  },
  {
    "library": "matplotlib",
    "name": "contains_branch",
    "source_code": "def contains_branch(self, other):\n    if self.depth < other.depth:\n        return False\n    for _, sub_tree in self._iter_break_from_left_to_right():\n        if sub_tree == other:\n            return True\n    return False",
    "docstring": "Return whether the given transform is a sub-tree of this transform. This routine uses transform equality to identify sub-trees, therefore in many situations it is object id which will be used. For the case where the given transform represents the whole of this transform, returns True.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:contains_branch arg:self arg:other arguments arg arg If Compare Return return:yes For Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_vmap_for_bhqkv",
    "source_code": "def _vmap_for_bhqkv(fn: Callable, prefix: tuple[Optional[int], ...], suffix: tuple[Optional[int], ...]=(), out_dims: Union[int, list[Optional[int]]]=0, group_dim: bool=False):\n    dimensions: list[tuple[None | int, None | int, None | int, None | int]] = []\n    dimensions = [(None, None, None, 0), (None, None, 0, None), (None, 0, None, None)]\n    if group_dim:\n        dimensions += [(None, 0, None, None)]\n    dimensions += [(0, None, None, None)]\n    for dims in dimensions:\n        fn = torch.vmap(fn, in_dims=prefix + dims + suffix, out_dims=out_dims)\n    return fn",
    "docstring": "Used to vmap both score_mods and mask_mods over 4-dimensional/5-dimension inputs. Mapping over the [b, hq, q_idx, kv_idx] or [b, hkv, g, q_idx, kv_idx] dimensions. Args: fn (callable): The function to vmap. prefix (tuple): The prefix of the vmap. For score mod functions, this should be set to (0,). For mask_mods = () suffix (tuple): We need to add (0,) if gradOut is being mapped over, and (None,) * len(other_buffers). out_dims (tuple): For forward cases, keep this as the default 0 since we are only returning 1 output. For backwards, the joint graph returns grads for B, H, Q_idx, KV_idx and other_buffers, so we set this to (0, None, None, None, None) + (None,) * len(other_buffers). Returns: callable: The vmapped function.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\attention\\flex_attention.py",
    "ast_data": "FunctionDef name:_vmap_for_bhqkv arg:fn arg:prefix arg:suffix arg:out_dims arg:group_dim arguments arg arg arg arg arg Assign If For Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "process_and_write_file",
    "source_code": "def process_and_write_file(fromfile, outfile):\n    process_file = get_processor()\n    content = process_file(fromfile)\n    with open(outfile, 'w') as f:\n        f.write(content)",
    "docstring": "Process tempita templated file and write out the result. The template file is expected to end in (e.g., or ). Processing generates .",
    "type": "function",
    "file_path": "numpy\\numpy\\_build_utils\\process_src_template.py",
    "ast_data": "FunctionDef name:process_and_write_file arg:fromfile arg:outfile arguments arg arg Assign Call Assign Call With Call Call"
  },
  {
    "library": "numpy",
    "name": "decode",
    "source_code": "@set_module('numpy.strings')\n@array_function_dispatch(_code_dispatcher)\ndef decode(a, encoding=None, errors=None):\n    return _to_bytes_or_str_array(_vec_string(a, np.object_, 'decode', _clean_args(encoding, errors)), np.str_(''))",
    "docstring": "Calls :meth: element-wise. The set of available codecs comes from the Python standard library, and may be extended at runtime. For more information, see the :mod: module. Parameters ---------- a : array_like, with `bytes.decode` Notes ----- The type of the result will depend on the encoding specified. Examples -------- >>> import numpy as np >>> c = np.array([b'\\x81\\xc1\\x81\\xc1\\x81\\xc1', b'@@\\x81\\xc1@@', ... b'\\x81\\x82\\xc2\\xc1\\xc2\\x82\\x81']) >>> c array([b'\\x81\\xc1\\x81\\xc1\\x81\\xc1', b'@@\\x81\\xc1@@', b'\\x81\\x82\\xc2\\xc1\\xc2\\x82\\x81'], dtype='|S7') >>> np.strings.decode(c, encoding='cp037') array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\strings.py",
    "ast_data": "FunctionDef name:decode arg:a arg:encoding arg:errors arguments arg arg arg Return return:yes Call Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "statically_known_lt",
    "source_code": "def statically_known_lt(self, left: Expr, right: Union[Expr, int]) -> bool:\n    expr = left < right\n    return self.is_expr_static_and_true(expr)",
    "docstring": "Returns a bool indicating if it is sound to optimize as if left is less than right.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\sizevars.py",
    "ast_data": "FunctionDef name:statically_known_lt arg:self arg:left arg:right arguments arg arg arg Assign Compare Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X, copy=True):\n    check_is_fitted(self)\n    X = validate_data(self, X, copy=copy and self.whiten, dtype=[np.float64, np.float32], reset=False)\n    if self.whiten:\n        X -= self.mean_\n    return np.dot(X, self.components_.T)",
    "docstring": "Recover the sources from X (apply the unmixing matrix). Parameters ---------- X : array-like of shape (n_samples, n_features) Data to transform, where is the number of samples and is the number of features. copy : bool, default=True If False, data passed to fit can be overwritten. Defaults to True. Returns ------- X_new : ndarray of shape (n_samples, n_components) Estimated sources obtained by transforming the data with the estimated unmixing matrix.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_fastica.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arg:copy arguments arg arg arg Call Assign Call BoolOp If Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "is_available",
    "source_code": "def is_available():\n    return torch._C._has_mkldnn",
    "docstring": "Return whether PyTorch is built with MKL-DNN support.",
    "type": "function",
    "file_path": "pytorch\\torch\\backends\\mkldnn\\__init__.py",
    "ast_data": "FunctionDef name:is_available arguments Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "desc_type_parameter_list",
    "source_code": "class desc_type_parameter_list(nodes.Part, nodes.Inline, nodes.FixedTextElement):\n    child_text_separator = ', '\n\n    def astext(self) -> str:\n        return f'[{super().astext()}]'",
    "docstring": "Node for a general type parameter list. As default the type parameters list is written in line with the rest of the signature. Set `` is True.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\addnodes.py",
    "ast_data": "ClassDef name:desc_type_parameter_list Assign FunctionDef name:astext arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "dead_node_elimination",
    "source_code": "def dead_node_elimination(self) -> None:\n    updated_nodes = []\n    for node in reversed(self.nodes):\n\n        def can_eliminate_user(user: NodeUser) -> bool:\n            return user.is_weak or user.get_name() in V.graph.removed_operations\n        active_buffers = False\n        for buf in node.get_outputs():\n            can_eliminate = all((can_eliminate_user(u) for u in buf.users))\n            if can_eliminate:\n                log.debug('removed dead buffer: %s', buf.get_name())\n                V.graph.removed_buffers.add(buf.get_name())\n            else:\n                active_buffers = True\n        can_eliminate = not node.has_side_effects() and (not active_buffers)\n        if not can_eliminate:\n            updated_nodes.append(node)\n        else:\n            log.debug('removed dead operation: %s', node.get_name())\n            V.graph.removed_operations.add(node.get_name())\n            for read in node.read_writes.reads:\n                if read.name in self.name_to_buf:\n                    users = self.name_to_buf[read.name].users\n                    self.name_to_buf[read.name].users = [u for u in users if u.node.get_name() != node.get_name()]\n    self.nodes = list(reversed(updated_nodes))\n    for node in self.nodes:\n        node.prune_weak_deps()",
    "docstring": "Remove any nodes without users",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:dead_node_elimination arg:self arguments arg Assign For Call FunctionDef name:can_eliminate_user arg:user arguments arg Return return:yes BoolOp Compare Call Assign For Call Assign Call Call If Call Call Call Call Assign Assign BoolOp Call If Call Call Call Call Call For If Compare Assign Assign Compare Call Call Assign Call Call For Call"
  },
  {
    "library": "sphinx",
    "name": "ApplySourceWorkaround",
    "source_code": "class ApplySourceWorkaround(SphinxTransform):\n    default_priority = 10\n\n    def apply(self, **kwargs: Any) -> None:\n        for node in self.document.findall():\n            if isinstance(node, nodes.TextElement | nodes.image | nodes.topic):\n                apply_source_workaround(node)",
    "docstring": "Update source and rawsource attributes",
    "type": "class",
    "file_path": "sphinx\\sphinx\\transforms\\__init__.py",
    "ast_data": "ClassDef name:ApplySourceWorkaround Assign FunctionDef name:apply arg:self arguments arg arg For Call If Call Call"
  },
  {
    "library": "scipy",
    "name": "_wrapped_fun",
    "source_code": "def _wrapped_fun(*fargs):\n    nonlocal nfev\n    nfev += 1\n    return fun(*fargs)",
    "docstring": "Wrapped to track the number of times the function has been called.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_root.py",
    "ast_data": "FunctionDef name:_wrapped_fun arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_get_generator",
    "source_code": "def _get_generator(device: torch.device) -> torch._C.Generator:\n    idx = device.index\n    if idx is None:\n        idx = current_device()\n    return torch.cuda.default_generators[idx]",
    "docstring": "Return the CUDA Generator object for the given device. Args: device (torch.device): selected device.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:_get_generator arg:device arguments arg Assign If Compare Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "init_images_for_training",
    "source_code": "def init_images_for_training(self, imgs: Images) -> None:\n    self._check_image_type_consistency(imgs)\n    if _is_list_of_str(imgs):\n        images = self._load_images(imgs)\n    elif _is_list_of_tensors(imgs):\n        images = imgs\n    else:\n        raise TypeError(f'Expected a list of image tensors or image paths. Gotcha {type(imgs)}.')\n    self._check_dimensions(images)\n    self._imgs = [img.to(self._device) for img in images]",
    "docstring": "Initialize images for training. Images can be either a list of tensors, or a list of paths to image disk locations. Args: imgs: List of image tensors or image paths: Images",
    "type": "method",
    "file_path": "kornia\\kornia\\nerf\\data_utils.py",
    "ast_data": "FunctionDef name:init_images_for_training arg:self arg:imgs arguments arg arg Call If Call Assign Call If Call Assign Raise Call Call Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "create_local_server",
    "source_code": "@staticmethod\ndef create_local_server(config=None, start=True):\n    return Server({'localhost': ['localhost:0']}, protocol='grpc', config=config, start=start)",
    "docstring": "Creates a new single-process cluster running on the local host. This method is a convenience wrapper for creating a with a that specifies a single-process cluster containing a single task in a job called . Args: config: (Options.) A that specifies default configuration options for all sessions that run on this server. start: (Optional.) Boolean, indicating whether to start the server after creating it. Defaults to . Returns: A local .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\server_lib.py",
    "ast_data": "FunctionDef name:create_local_server arg:config arg:start arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "BackwardPrefetch",
    "source_code": "class BackwardPrefetch(Enum):\n    BACKWARD_PRE = auto()\n    BACKWARD_POST = auto()",
    "docstring": "This configures explicit backward prefetching, which improves throughput by enabling communication and computation overlap in the backward pass at the cost of slightly increased memory usage. - ``, which disables the backward prefetching altogether. This has no overlap and does not increase memory usage. In general, we do not recommend this setting since it may degrade throughput significantly. For more technical context: For a single process group using NCCL backend, any collectives, even if issued from different streams, contend for the same per-device NCCL stream, which implies that the relative order in which the collectives are issued matters for overlapping. The two backward prefetching values correspond to different issue orders.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\api.py",
    "ast_data": "ClassDef name:BackwardPrefetch Assign Call Assign Call"
  },
  {
    "library": "numpy",
    "name": "indices",
    "source_code": "@set_module('numpy')\ndef indices(dimensions, dtype=int, sparse=False):\n    dimensions = tuple(dimensions)\n    N = len(dimensions)\n    shape = (1,) * N\n    if sparse:\n        res = ()\n    else:\n        res = empty((N,) + dimensions, dtype=dtype)\n    for i, dim in enumerate(dimensions):\n        idx = arange(dim, dtype=dtype).reshape(shape[:i] + (dim,) + shape[i + 1:])\n        if sparse:\n            res = res + (idx,)\n        else:\n            res[i] = idx\n    return res",
    "docstring": "Return an array representing the indices of a grid. Compute an array where the subarrays contain index values 0, 1, ... varying only along the corresponding axis. Parameters ---------- dimensions : sequence of ints The shape of the grid. dtype : dtype, optional Data type of the result. sparse : boolean, optional Return a sparse representation of the grid instead of a dense representation. Default is False. Returns ------- grid : one ndarray or tuple of ndarrays If sparse is False: Returns one array of grid indices, `dimensions`. If sparse is set to true, the grid will be returned in a sparse representation. >>> i, j = np.indices((2, 3), sparse=True) >>> i.shape (2, 1) >>> j.shape (1, 3) >>> i # row indices array([[0], [1]]) >>> j # column indices array([[0, 1, 2]])",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\numeric.py",
    "ast_data": "FunctionDef name:indices arg:dimensions arg:dtype arg:sparse arguments arg arg arg Assign Call Assign Call Assign If Assign Assign Call For Call Assign Call Call If Assign Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "bincount_v1",
    "source_code": "@tf_export(v1=['math.bincount', 'bincount'])\n@deprecation.deprecated_endpoints('bincount')\ndef bincount_v1(arr, weights=None, minlength=None, maxlength=None, dtype=dtypes.int32):\n    return bincount(arr, weights, minlength, maxlength, dtype)",
    "docstring": "Counts the number of occurrences of each value in an integer array. If and are not given, returns a vector with length if is non-empty, and length 0 otherwise. If are non-None, then index of the output stores the sum of the value in at each index where the corresponding value in is . Args: arr: An int32 tensor of non-negative values. weights: If non-None, must be the same shape as arr. For each value in , the bin will be incremented by the corresponding weight instead of 1. minlength: If given, ensures the output has length at least , padding with zeros at the end if necessary. maxlength: If given, skips values in that are equal or greater than , ensuring that the output has length at most . dtype: If is None, determines the type of the output bins. Returns: A vector with the same dtype as or the given . The bin values.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\bincount_ops.py",
    "ast_data": "FunctionDef name:bincount_v1 arg:arr arg:weights arg:minlength arg:maxlength arg:dtype arguments arg arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "Subset",
    "source_code": "class Subset(Dataset[_T_co]):\n    dataset: Dataset[_T_co]\n    indices: Sequence[int]\n\n    def __init__(self, dataset: Dataset[_T_co], indices: Sequence[int]) -> None:\n        self.dataset = dataset\n        self.indices = indices\n\n    def __getitem__(self, idx):\n        if isinstance(idx, list):\n            return self.dataset[[self.indices[i] for i in idx]]\n        return self.dataset[self.indices[idx]]\n\n    def __getitems__(self, indices: list[int]) -> list[_T_co]:\n        if callable(getattr(self.dataset, '__getitems__', None)):\n            return self.dataset.__getitems__([self.indices[idx] for idx in indices])\n        else:\n            return [self.dataset[self.indices[idx]] for idx in indices]\n\n    def __len__(self):\n        return len(self.indices)",
    "docstring": "Subset of a dataset at specified indices. Args: dataset (Dataset): The whole Dataset indices (sequence): Indices in the whole set selected for subset",
    "type": "class",
    "file_path": "pytorch\\torch\\utils\\data\\dataset.py",
    "ast_data": "ClassDef name:Subset FunctionDef name:__init__ arg:self arg:dataset arg:indices arguments arg arg arg Assign Assign FunctionDef name:__getitem__ arg:self arg:idx arguments arg arg If Call Return return:yes Return return:yes FunctionDef name:__getitems__ arg:self arg:indices arguments arg arg If Call Call Return return:yes Call Return return:yes FunctionDef name:__len__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "set_configuration_from_sharded_input_tensors",
    "source_code": "def set_configuration_from_sharded_input_tensors(self, input_tensors):\n    if not self._frozen:\n        self._tuple_shapes = None\n    number_of_shards = len(input_tensors)\n    self.set_number_of_shards(number_of_shards)\n    for t in input_tensors:\n        if len(t) != self.number_of_tuple_elements:\n            raise ValueError(f'input_tensors is {str(input_tensors)} but must be a list of lists, where each inner list has length number_of_tuple_elements={self.number_of_tuple_elements}')\n    sharded_shapes = [[t[i].shape for t in input_tensors] for i in range(self.number_of_tuple_elements)]\n    unsharded_shapes = [policy.get_unsharded_shape(s) for policy, s in zip(self._sharding_policies, sharded_shapes)]\n    self.set_tuple_shapes(unsharded_shapes)\n    for i in range(1, self.number_of_shards):\n        for t1, t2 in zip(input_tensors[0], input_tensors[i]):\n            if t1.dtype != t2.dtype:\n                raise TypeError(f'types of the tuple elements of input_tensors {str(input_tensors)} are not consistent')\n    self.set_tuple_types([t.dtype for t in input_tensors[0]])",
    "docstring": "Sets the shapes and types of the queue tuple elements. input_tensors is a list of lists of Tensors whose types and shapes are used to set the queue configuration. The length of the outer list is the number of shards required, and each inner list is the tuple of Tensors to use to determine the types and shapes of the corresponding shard. This method depends on the shard dimension, and calling it freezes the shard policy. Args: input_tensors: list of lists of Tensors. The outer list length corresponds to the desired number of shards, and each inner list is the size and shape of the desired configuration of the corresponding shard. Raises: ValueError: if any inner list is not a list of length self.number_of_tuple_elements; or the inner lists do not combine to form a consistent unsharded shape. TypeError: if the types of the Tensors in the inner lists do not match.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_feed.py",
    "ast_data": "FunctionDef name:set_configuration_from_sharded_input_tensors arg:self arg:input_tensors arguments arg arg If Assign Assign Call Call For If Compare Call Raise Call Call Assign Call Assign Call Call Call For Call For Call If Compare Raise Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "from_proto",
    "source_code": "@staticmethod\ndef from_proto(queue_runner_def, import_scope=None):\n    return QueueRunner(queue_runner_def=queue_runner_def, import_scope=import_scope)",
    "docstring": "Returns a object created from .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\queue_runner_impl.py",
    "ast_data": "FunctionDef name:from_proto arg:queue_runner_def arg:import_scope arguments arg arg Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "thumbprint",
    "source_code": "def thumbprint(self):\n    fields = list(self.REQUIRED_JSON_FIELDS)\n    fields.append('kty')\n    fields.sort()\n    data = OrderedDict()\n    for k in fields:\n        data[k] = self.tokens[k]\n    json_data = json_dumps(data)\n    digest_data = hashlib.sha256(to_bytes(json_data)).digest()\n    return to_unicode(urlsafe_b64encode(digest_data))",
    "docstring": "Implementation of RFC7638 JSON Web Key (JWK) Thumbprint.",
    "type": "method",
    "file_path": "authlib\\authlib\\jose\\rfc7517\\base_key.py",
    "ast_data": "FunctionDef name:thumbprint arg:self arguments arg Assign Call Call Call Assign Call For Assign Assign Call Assign Call Call Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_shallow_copy",
    "source_code": "def _shallow_copy(self, left, right) -> Self:\n    dtype = IntervalDtype(left.dtype, closed=self.closed)\n    left, right, dtype = self._ensure_simple_new_inputs(left, right, dtype=dtype)\n    return self._simple_new(left, right, dtype=dtype)",
    "docstring": "Return a new IntervalArray with the replacement attributes Parameters ---------- left : Index Values to be used for the left-side of the intervals. right : Index Values to be used for the right-side of the intervals.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\interval.py",
    "ast_data": "FunctionDef name:_shallow_copy arg:self arg:left arg:right arguments arg arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "can_fuse_vertical",
    "source_code": "@staticmethod\ndef can_fuse_vertical(scheduler: Scheduler, node1: BaseSchedulerNode, node2: BaseSchedulerNode, shared_data_score: int) -> bool:\n    return True",
    "docstring": "Hook for heuristics to prevent vertical (producer/consumer) fusions",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\choices.py",
    "ast_data": "FunctionDef name:can_fuse_vertical arg:scheduler arg:node1 arg:node2 arg:shared_data_score arguments arg arg arg arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "has_table",
    "source_code": "def has_table(table_name: str, con, schema: str | None=None) -> bool:\n    with pandasSQL_builder(con, schema=schema) as pandas_sql:\n        return pandas_sql.has_table(table_name)",
    "docstring": "Check if DataBase has named table. Parameters ---------- table_name: string Name of SQL table. con: ADBC Connection, SQLAlchemy connectable, str, or sqlite3 connection ADBC provides high performance I/O with native type support, where available. Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. schema : string, default None Name of SQL schema in database to write to (if database flavor supports this). If None, use default schema (default). Returns ------- boolean",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\sql.py",
    "ast_data": "FunctionDef name:has_table arg:table_name arg:con arg:schema arguments arg arg arg With Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "format_all",
    "source_code": "@staticmethod\ndef format_all(tbs):\n    import torch._C._profiler\n    rs: list[Optional[list[str]]] = []\n    delayed_idxs = []\n    for i, tb in enumerate(tbs):\n        if tb.tb is None:\n            rs.append([])\n        else:\n            rs.append(None)\n            delayed_idxs.append(i)\n    torch._C._profiler.symbolize_tracebacks([tbs[i].tb for i in delayed_idxs])\n    for i in delayed_idxs:\n        rs[i] = traceback.format_list(tbs[i].summary())\n    return rs",
    "docstring": "Bulk version of CapturedTraceback.format. Returns a list of list of strings.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\_traceback.py",
    "ast_data": "FunctionDef name:format_all arg:tbs arguments arg Assign For Call If Compare Call Call Call Call For Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_arc_jac_sn",
    "source_code": "def _arc_jac_sn(w, m):\n\n    def _complement(kx):\n        return ((1 - kx) * (1 + kx)) ** 0.5\n    k = m ** 0.5\n    if k > 1:\n        return np.nan\n    elif k == 1:\n        return np.arctanh(w)\n    ks = [k]\n    niter = 0\n    while ks[-1] != 0:\n        k_ = ks[-1]\n        k_p = _complement(k_)\n        ks.append((1 - k_p) / (1 + k_p))\n        niter += 1\n        if niter > _ARC_JAC_SN_MAXITER:\n            raise ValueError('Landen transformation not converging')\n    K = np.prod(1 + np.array(ks[1:])) * np.pi / 2\n    wns = [w]\n    for kn, knext in zip(ks[:-1], ks[1:]):\n        wn = wns[-1]\n        wnext = 2 * wn / ((1 + knext) * (1 + _complement(kn * wn)))\n        wns.append(wnext)\n    u = 2 / np.pi * np.arcsin(wns[-1])\n    z = K * u\n    return z",
    "docstring": "Inverse Jacobian elliptic sn Solve for z in w = sn(z, m) Parameters ---------- w : complex scalar argument m : scalar modulus; in interval [0, 1] See [1], Eq. (56) References ---------- .. [1] Orfanidis, \"Lecture Notes on Elliptic Filter Design\",",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_filter_design.py",
    "ast_data": "FunctionDef name:_arc_jac_sn arg:w arg:m arguments arg arg FunctionDef name:_complement arg:kx arguments arg Return return:yes Assign If Compare Return return:yes If Compare Return return:yes Call Assign Assign While Compare Assign Assign Call Call If Compare Raise Call Assign Call Call Assign For Call Assign Assign Call Call Assign Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "LogoutView",
    "source_code": "class LogoutView(RedirectURLMixin, TemplateView):\n    http_method_names = ['post', 'options']\n    template_name = 'registration/logged_out.html'\n    extra_context = None\n\n    @method_decorator(csrf_protect)\n    @method_decorator(never_cache)\n    def dispatch(self, request, *args, **kwargs):\n        return super().dispatch(request, *args, **kwargs)\n\n    def post(self, request, *args, **kwargs):\n        auth_logout(request)\n        redirect_to = self.get_success_url()\n        if redirect_to != request.get_full_path():\n            return HttpResponseRedirect(redirect_to)\n        return super().get(request, *args, **kwargs)\n\n    def get_default_redirect_url(self):\n        if self.next_page:\n            return resolve_url(self.next_page)\n        elif settings.LOGOUT_REDIRECT_URL:\n            return resolve_url(settings.LOGOUT_REDIRECT_URL)\n        else:\n            return self.request.path\n\n    def get_context_data(self, **kwargs):\n        context = super().get_context_data(**kwargs)\n        current_site = get_current_site(self.request)\n        context.update({'site': current_site, 'site_name': current_site.name, 'title': _('Logged out'), 'subtitle': None, **(self.extra_context or {})})\n        return context",
    "docstring": "Log out the user and display the 'You are logged out' message.",
    "type": "class",
    "file_path": "django\\django\\contrib\\auth\\views.py",
    "ast_data": "ClassDef name:LogoutView Assign Assign Assign FunctionDef name:dispatch arg:self arg:request arguments arg arg arg arg Return return:yes Call Call Call Call FunctionDef name:post arg:self arg:request arguments arg arg arg arg Call Assign Call If Compare Call Return return:yes Call Return return:yes Call Call FunctionDef name:get_default_redirect_url arg:self arguments arg If Return return:yes Call If Return return:yes Call Return return:yes FunctionDef name:get_context_data arg:self arguments arg arg Assign Call Call Assign Call Call Call BoolOp Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "isenumclass",
    "source_code": "def isenumclass(x: Any) -> TypeIs[type[enum.Enum]]:\n    return isclass(x) and issubclass(x, enum.Enum)",
    "docstring": "Check if the object is an :class:.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\inspect.py",
    "ast_data": "FunctionDef name:isenumclass arg:x arguments arg Return return:yes BoolOp Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_SparseAddGrad",
    "source_code": "@ops.RegisterGradient('SparseAdd')\ndef _SparseAddGrad(op: ops.Operation, *grads):\n    val_grad = grads[1]\n    a_indices = op.inputs[0]\n    b_indices = op.inputs[3]\n    sum_indices = op.outputs[0]\n    a_val_grad, b_val_grad = gen_sparse_ops.sparse_add_grad(val_grad, a_indices, b_indices, sum_indices)\n    a_val_grad.set_shape(op.inputs[1].get_shape())\n    b_val_grad.set_shape(op.inputs[4].get_shape())\n    return (None, a_val_grad, None, None, b_val_grad, None, None)",
    "docstring": "The backward operator for the SparseAdd op. The SparseAdd op calculates A + B, where A, B, and the sum are all represented as objects. This op takes in the upstream gradient w.r.t. non-empty values of the sum, and outputs the gradients w.r.t. the non-empty values of A and B. Args: op: the SparseAdd op *grads: the incoming gradients, one element per output of Returns: Gradient for each of the 6 input tensors of SparseAdd: (a_indices, a_values, a_shape, b_indices, b_values, b_shape, thresh) The gradients for the indices, shapes, and the threshold are None.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_grad.py",
    "ast_data": "FunctionDef name:_SparseAddGrad arg:op arguments arg arg Assign Assign Assign Assign Assign Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "standard_kwargs",
    "source_code": "def standard_kwargs(kwarg_names, expanded_args):\n    kwarg_values = expanded_args[len(expanded_args) - len(kwarg_names):]\n    expanded_args_without_kwargs = expanded_args[:len(expanded_args) - len(kwarg_names)]\n    expanded_kwargs = dict(zip(kwarg_names, kwarg_values))\n    return (expanded_args_without_kwargs, expanded_kwargs)",
    "docstring": "Separate args and kwargs from s that standardize kwargs. Most s standardize the kwargs that they give, so this will separate the args and kwargs they pass. Functions that don't are linear and convND.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\utils\\_expanded_weights\\expanded_weights_utils.py",
    "ast_data": "FunctionDef name:standard_kwargs arg:kwarg_names arg:expanded_args arguments arg arg Assign Call Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "should_slow_down",
    "source_code": "def should_slow_down(self, credential):\n    raise NotImplementedError()",
    "docstring": "The authorization request is still pending and polling should continue, but the interval MUST be increased by 5 seconds for this and all subsequent requests.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc8628\\device_code.py",
    "ast_data": "FunctionDef name:should_slow_down arg:self arg:credential arguments arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_build_recursive_hd_scatter",
    "source_code": "def _build_recursive_hd_scatter(input_tensors, devices):\n    num_devices = len(devices)\n    num_hops = int(math.log(num_devices, 2))\n    assert num_devices == 2 ** num_hops, 'num_devices must be a power of 2'\n    chunks = input_tensors\n    for h in reversed(range(0, num_hops)):\n        span = 2 ** h\n        group_size = span * 2\n        new_chunks = [[] for _ in devices]\n        for d in range(0, num_devices):\n            if d % group_size >= group_size / 2:\n                continue\n            left_idx = d\n            right_idx = d + span\n            left_dev = devices[left_idx]\n            right_dev = devices[right_idx]\n            with ops.device(left_dev):\n                new_chunks[left_idx] = array_ops.concat([chunks[left_idx], chunks[right_idx]], 0)\n            with ops.device(right_dev):\n                new_chunks[right_idx] = array_ops.concat([chunks[left_idx], chunks[right_idx]], 0)\n        chunks = new_chunks\n    return chunks",
    "docstring": "Construct the scatter phase of recursive halving-doubling all-reduce. Args: input_tensors: list of that are fully-reduced shards. devices: a list of strings naming the devices on which the reconstituted full tensors should be placed. Returns: list of which are the fully reduced tensors.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\all_reduce.py",
    "ast_data": "FunctionDef name:_build_recursive_hd_scatter arg:input_tensors arg:devices arguments arg arg Assign Call Assign Call Call Compare Assign For Call Call Assign Assign Assign For Call If Compare Assign Assign Assign Assign With Call Assign Call With Call Assign Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "FigureManagerTemplate",
    "source_code": "class FigureManagerTemplate(FigureManagerBase):\n    pass",
    "docstring": "Helper class for pyplot mode, wraps everything up into a neat bundle. For non-interactive backends, the base class is sufficient. For interactive backends, see the documentation of the class for the list of methods that can/should be overridden.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_template.py",
    "ast_data": "ClassDef name:FigureManagerTemplate"
  },
  {
    "library": "pytorch",
    "name": "set_optimizer_state_dict",
    "source_code": "def set_optimizer_state_dict(model: nn.Module, optimizers: Union[torch.optim.Optimizer, Iterable[torch.optim.Optimizer]], optim_state_dict: OptimizerStateType, *, options: Optional[StateDictOptions]=None) -> None:\n    with _gc_context():\n        optimizers = (optimizers,) if isinstance(optimizers, torch.optim.Optimizer) else tuple(optimizers)\n        info = _verify_options(model, optimizers, optim_only=True, options=options)\n        _verify_state_dict({}, optim_state_dict, info)\n        _load_optim_state_dict(model, optimizers, optim_state_dict, info)",
    "docstring": "Load the optimizers state_dict. The counterpart of `StateDictOptions` for the details. Returns: None :type optim_state_dict: typing.OptimizerStateType",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\state_dict.py",
    "ast_data": "FunctionDef name:set_optimizer_state_dict arg:model arg:optimizers arg:optim_state_dict arguments arg arg arg arg With Call Assign Call Call Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "remove_tensor_overload_for_qdq_ops",
    "source_code": "def remove_tensor_overload_for_qdq_ops(match_pattern: GraphModule) -> None:\n    _MAP = {torch.ops.quantized_decomposed.quantize_per_tensor.default: torch.ops.quantized_decomposed.quantize_per_tensor, torch.ops.quantized_decomposed.dequantize_per_tensor.default: torch.ops.quantized_decomposed.dequantize_per_tensor, torch.ops.quantized_decomposed.quantize_per_tensor.tensor: torch.ops.quantized_decomposed.quantize_per_tensor, torch.ops.quantized_decomposed.dequantize_per_tensor.tensor: torch.ops.quantized_decomposed.dequantize_per_tensor, torch.ops.quantized_decomposed.quantize_per_tensor.tensor2: torch.ops.quantized_decomposed.quantize_per_tensor, torch.ops.quantized_decomposed.dequantize_per_tensor.tensor2: torch.ops.quantized_decomposed.dequantize_per_tensor, torch.ops.quantized_decomposed.quantize_per_channel.default: torch.ops.quantized_decomposed.quantize_per_channel, torch.ops.quantized_decomposed.dequantize_per_channel.default: torch.ops.quantized_decomposed.dequantize_per_channel, torch.ops.aten.clamp.Tensor: torch.ops.aten.clamp}\n    for n in match_pattern.graph.nodes:\n        if n.op != 'call_function':\n            continue\n        if n.target in _MAP:\n            n.target = _MAP[n.target]",
    "docstring": "Remove .tensor overload for quantize/dequantize ops so that we can use the match_pattern that we get from torchdynamo export to match the output of convert_pt2e",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\utils.py",
    "ast_data": "FunctionDef name:remove_tensor_overload_for_qdq_ops arg:match_pattern arguments arg Assign For If Compare If Compare Assign"
  },
  {
    "library": "django",
    "name": "_if_none_match_passes",
    "source_code": "def _if_none_match_passes(target_etag, etags):\n    if not target_etag:\n        return True\n    elif etags == ['*']:\n        return False\n    else:\n        target_etag = target_etag.strip('W/')\n        etags = (etag.strip('W/') for etag in etags)\n        return target_etag not in etags",
    "docstring": "Test the If-None-Match comparison as defined in RFC 9110 Section 13.1.2.",
    "type": "function",
    "file_path": "django\\django\\utils\\cache.py",
    "ast_data": "FunctionDef name:_if_none_match_passes arg:target_etag arg:etags arguments arg arg If Return return:yes If Compare Return return:yes Assign Call Assign Call Return return:yes Compare"
  },
  {
    "library": "kornia",
    "name": "RgbToRgba",
    "source_code": "class RgbToRgba(Module):\n    ONNX_DEFAULT_INPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n    ONNX_DEFAULT_OUTPUTSHAPE: ClassVar[list[int]] = [-1, 4, -1, -1]\n\n    def __init__(self, alpha_val: Union[float, Tensor]) -> None:\n        super().__init__()\n        self.alpha_val = alpha_val\n\n    def forward(self, image: Tensor) -> Tensor:\n        return rgb_to_rgba(image, self.alpha_val)",
    "docstring": "Convert an image from RGB to RGBA. Add an alpha channel to existing RGB image. Args: alpha_val: A float number for the alpha value or a tensor of shape :math:. Returns: Tensor: RGBA version of the image with shape :math:. Shape: - image: :math: - output: :math: .. note:: The current functionality is NOT supported by Torchscript. Example: >>> input = torch.rand(2, 3, 4, 5) >>> rgba = RgbToRgba(1.) >>> output = rgba(input) # 2x4x4x5",
    "type": "class",
    "file_path": "kornia\\kornia\\color\\rgb.py",
    "ast_data": "ClassDef name:RgbToRgba FunctionDef name:__init__ arg:self arg:alpha_val arguments arg arg Call Call Assign FunctionDef name:forward arg:self arg:image arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_text_bounds",
    "source_code": "def get_text_bounds(self, renderer):\n    return self._text.get_window_extent(renderer).transformed(self.get_data_transform().inverted()).bounds",
    "docstring": "Return the text bounds as *(x, y, width, height)* in table coordinates.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\table.py",
    "ast_data": "FunctionDef name:get_text_bounds arg:self arg:renderer arguments arg arg Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_init_from_proto",
    "source_code": "def _init_from_proto(self, queue_runner_def, import_scope=None):\n    assert isinstance(queue_runner_def, queue_runner_pb2.QueueRunnerDef)\n    g = ops.get_default_graph()\n    self._queue = g.as_graph_element(ops.prepend_name_scope(queue_runner_def.queue_name, import_scope))\n    self._enqueue_ops = [g.as_graph_element(ops.prepend_name_scope(op, import_scope)) for op in queue_runner_def.enqueue_op_name]\n    self._close_op = g.as_graph_element(ops.prepend_name_scope(queue_runner_def.close_op_name, import_scope))\n    self._cancel_op = g.as_graph_element(ops.prepend_name_scope(queue_runner_def.cancel_op_name, import_scope))\n    self._queue_closed_exception_types = tuple((errors.exception_type_from_error_code(code) for code in queue_runner_def.queue_closed_exception_types))\n    if not self._queue_closed_exception_types:\n        self._queue_closed_exception_types = (errors.OutOfRangeError,)",
    "docstring": "Create a QueueRunner from . Args: queue_runner_def: Optional protocol buffer. import_scope: Optional . Name scope to add.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\queue_runner_impl.py",
    "ast_data": "FunctionDef name:_init_from_proto arg:self arg:queue_runner_def arg:import_scope arguments arg arg arg Call Assign Call Assign Call Call Assign Call Call Assign Call Call Assign Call Call Assign Call Call If Assign"
  },
  {
    "library": "seaborn",
    "name": "label",
    "source_code": "def label(self, formatter: Formatter | None=None, *, concise: bool=False) -> Temporal:\n    new = copy(self)\n    new._label_params = {'formatter': formatter, 'concise': concise}\n    return new",
    "docstring": "Configure the appearance of tick labels for the scale's axis or legend. .. note:: This API is under construction and will be enhanced over time. Parameters ---------- formatter : :class: subclass Pre-configured formatter to use; other parameters will be ignored. concise : bool If True, use :class: to make the tick labels as compact as possible. Returns ------- scale Copy of self with new label configuration.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\scales.py",
    "ast_data": "FunctionDef name:label arg:self arg:formatter arguments arg arg arg Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "disable_run_metadata",
    "source_code": "def disable_run_metadata(self):\n    if not self._context_handle:\n        return\n    pywrap_tfe.TFE_ContextDisableRunMetadata(self._context_handle)",
    "docstring": "Disables tracing of op execution via RunMetadata.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:disable_run_metadata arg:self arguments arg If Return return:no Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, widthA=1.0, lengthA=0.2, angleA=None):\n    super().__init__(widthA=widthA, lengthA=lengthA, angleA=angleA)",
    "docstring": "Parameters ---------- widthA : float, default: 1.0 Width of the bracket. lengthA : float, default: 0.2 Length of the bracket. angleA : float, default: 0 degrees Orientation of the bracket, as a counterclockwise angle. 0 degrees means perpendicular to the line.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:widthA arg:lengthA arg:angleA arguments arg arg arg arg Call Call"
  },
  {
    "library": "virtualenv",
    "name": "verbosity",
    "source_code": "@property\ndef verbosity(self):\n    return self._verbosity",
    "docstring": "The verbosity of the run.",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\run\\session.py",
    "ast_data": "FunctionDef name:verbosity arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "stateless_random_jpeg_quality",
    "source_code": "@tf_export('image.stateless_random_jpeg_quality', v1=[])\n@dispatch.add_dispatch_support\ndef stateless_random_jpeg_quality(image, min_jpeg_quality, max_jpeg_quality, seed):\n    if min_jpeg_quality < 0 or max_jpeg_quality < 0 or min_jpeg_quality > 100 or (max_jpeg_quality > 100):\n        raise ValueError('jpeg encoding range must be between 0 and 100.')\n    if min_jpeg_quality >= max_jpeg_quality:\n        raise ValueError('`min_jpeg_quality` must be less than `max_jpeg_quality`.')\n    jpeg_quality = stateless_random_ops.stateless_random_uniform(shape=[], minval=min_jpeg_quality, maxval=max_jpeg_quality, seed=seed, dtype=dtypes.int32)\n    return adjust_jpeg_quality(image, jpeg_quality)",
    "docstring": "Deterministically radomize jpeg encoding quality for inducing jpeg noise. Guarantees the same results given the same independent of how many times the function is called, and independent of global seed settings (e.g. ). must be in the interval and less than . must be in the interval . Usage Example: >>> x = tf.constant([[[1, 2, 3], ... [4, 5, 6]], ... [[7, 8, 9], ... [10, 11, 12]]], dtype=tf.uint8) >>> seed = (1, 2) >>> tf.image.stateless_random_jpeg_quality(x, 75, 95, seed) Args: image: 3D image. Size of the last dimension must be 1 or 3. min_jpeg_quality: Minimum jpeg encoding quality to use. max_jpeg_quality: Maximum jpeg encoding quality to use. seed: A shape [2] Tensor, the seed to the random number generator. Must have dtype or . (When using XLA, only is allowed.) Returns: Adjusted image(s), same shape and DType as . Raises: ValueError: if or is invalid.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:stateless_random_jpeg_quality arg:image arg:min_jpeg_quality arg:max_jpeg_quality arg:seed arguments arg arg arg arg If BoolOp Compare Compare Compare Compare Raise Call If Compare Raise Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_configure_outputs_meta",
    "source_code": "def _configure_outputs_meta(self, outputs_meta: tuple[torch.Tensor, ...]):\n    assert self._outputs_meta is None, 'Attempting to reconfigure output_meta, which is not supported'\n    self._outputs_meta = tuple(outputs_meta)",
    "docstring": "Track the output shapes/dtype of this stage since they determine the send operation(s) which must match recv operations of the next stage. The next stage _will_ be freezing its recv buffers based on its initial configuration, so it's important to also freeze/validate the output side to avoid any send/recv mismatches which could show up as hangs, silent corruption, or other errors.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py",
    "ast_data": "FunctionDef name:_configure_outputs_meta arg:self arg:outputs_meta arguments arg arg Compare Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "CurveA",
    "source_code": "@_register_style(_style_list, name='<-')\nclass CurveA(_Curve):\n    arrow = '<-'",
    "docstring": "An arrow with a head at its start point.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "ClassDef name:CurveA Assign Call"
  },
  {
    "library": "pandas",
    "name": "_interp_limit",
    "source_code": "def _interp_limit(invalid: npt.NDArray[np.bool_], fw_limit: int | None, bw_limit: int | None) -> np.ndarray:\n    N = len(invalid)\n    f_idx = np.array([], dtype=np.int64)\n    b_idx = np.array([], dtype=np.int64)\n    assume_unique = True\n\n    def inner(invalid, limit: int):\n        limit = min(limit, N)\n        windowed = np.lib.stride_tricks.sliding_window_view(invalid, limit + 1).all(1)\n        idx = np.union1d(np.where(windowed)[0] + limit, np.where((~invalid[:limit + 1]).cumsum() == 0)[0])\n        return idx\n    if fw_limit is not None:\n        if fw_limit == 0:\n            f_idx = np.where(invalid)[0]\n            assume_unique = False\n        else:\n            f_idx = inner(invalid, fw_limit)\n    if bw_limit is not None:\n        if bw_limit == 0:\n            return f_idx\n        else:\n            b_idx = N - 1 - inner(invalid[::-1], bw_limit)\n            if fw_limit == 0:\n                return b_idx\n    return np.intersect1d(f_idx, b_idx, assume_unique=assume_unique)",
    "docstring": "Get indexers of values that won't be filled because they exceed the limits. Parameters ---------- invalid : np.ndarray[bool] fw_limit : int or None forward limit to index bw_limit : int or None backward limit to index Returns ------- set of indexers Notes ----- This is equivalent to the more readable, but slower .. code-block:: python def _interp_limit(invalid, fw_limit, bw_limit): for x in np.where(invalid)[0]: if invalid[max(0, x - fw_limit) : x + bw_limit + 1].all(): yield x",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\missing.py",
    "ast_data": "FunctionDef name:_interp_limit arg:invalid arg:fw_limit arg:bw_limit arguments arg arg arg Assign Call Assign Call Assign Call Assign FunctionDef name:inner arg:invalid arg:limit arguments arg arg Assign Call Assign Call Call Assign Call Call Call Compare Call Return return:yes If Compare If Compare Assign Call Assign Assign Call If Compare If Compare Return return:yes Assign Call If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_soft_device_placement",
    "source_code": "@tf_export('config.get_soft_device_placement')\ndef get_soft_device_placement():\n    return context.context().soft_device_placement",
    "docstring": "Return status of soft device placement flag. If enabled, ops can be placed on different devices than the device explicitly assigned by the user. This potentially has a large performance cost due to an increase in data communication between devices. Some cases where soft_device_placement would modify device assignment are: 1. no GPU/TPU implementation for the OP 2. no GPU devices are known or registered 3. need to co-locate with reftype input(s) which are from CPU 4. an OP can not be compiled by XLA. Common for TPU which always requires the XLA compiler. For TPUs, if this option is true, a feature called automatic outside compilation is enabled. Automatic outside compilation will move uncompilable ops within a TPU program to instead run on the host. This can be used when encountering compilation failures due to unsupported ops. Returns: A boolean indicating if soft placement is enabled.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\config.py",
    "ast_data": "FunctionDef name:get_soft_device_placement arguments Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "write",
    "source_code": "def write(self, arr, name):\n    if scipy.sparse.issparse(arr):\n        self.write_sparse(arr, name)\n        return\n    arr = np.asarray(arr)\n    dt = arr.dtype\n    if not dt.isnative:\n        arr = arr.astype(dt.newbyteorder('='))\n    dtt = dt.type\n    if dtt is np.object_:\n        raise TypeError('Cannot save object arrays in Mat4')\n    elif dtt is np.void:\n        raise TypeError('Cannot save void type arrays')\n    elif dtt in (np.str_, np.bytes_):\n        self.write_char(arr, name)\n        return\n    self.write_numeric(arr, name)",
    "docstring": "Write matrix , with name Parameters ---------- arr : array_like array to write name : str name in matlab workspace",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\matlab\\_mio4.py",
    "ast_data": "FunctionDef name:write arg:self arg:arr arg:name arguments arg arg arg If Call Call Return return:no Assign Call Assign If Assign Call Call Assign If Compare Raise Call If Compare Raise Call If Compare Call Return return:no Call"
  },
  {
    "library": "matplotlib",
    "name": "dviFontName",
    "source_code": "def dviFontName(self, dvifont):\n    dvi_info = self._dviFontInfo.get(dvifont.texname)\n    if dvi_info is not None:\n        return dvi_info.pdfname\n    tex_font_map = dviread.PsfontsMap(dviread.find_tex_file('pdftex.map'))\n    psfont = tex_font_map[dvifont.texname]\n    if psfont.filename is None:\n        raise ValueError('No usable font file found for {} (TeX: {}); the font may lack a Type-1 version'.format(psfont.psname, dvifont.texname))\n    pdfname = next(self._internal_font_seq)\n    _log.debug('Assigning font %s = %s (dvi)', pdfname, dvifont.texname)\n    self._dviFontInfo[dvifont.texname] = types.SimpleNamespace(dvifont=dvifont, pdfname=pdfname, fontfile=psfont.filename, basefont=psfont.psname, encodingfile=psfont.encoding, effects=psfont.effects)\n    return pdfname",
    "docstring": "Given a dvi font object, return a name suitable for Op.selectfont. This registers the font information internally (in ``) if not yet registered.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py",
    "ast_data": "FunctionDef name:dviFontName arg:self arg:dvifont arguments arg arg Assign Call If Compare Return return:yes Assign Call Call Assign If Compare Raise Call Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "named_steps",
    "source_code": "@property\ndef named_steps(self):\n    return Bunch(**dict(self.steps))",
    "docstring": "Access the steps by name. Read-only attribute to access any step by given name. Keys are steps names and values are the steps objects.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\pipeline.py",
    "ast_data": "FunctionDef name:named_steps arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "encoding",
    "source_code": "@encoding.setter\ndef encoding(self, val):\n    self._encoding = val\n    if hasattr(self, 'GET'):\n        del self.GET\n    if hasattr(self, '_post'):\n        del self._post",
    "docstring": "Set the encoding used for GET/POST accesses. If the GET or POST dictionary has already been created, remove and recreate it on the next access (so that it is decoded correctly).",
    "type": "method",
    "file_path": "django\\django\\http\\request.py",
    "ast_data": "FunctionDef name:encoding arg:self arg:val arguments arg arg Assign If Call If Call"
  },
  {
    "library": "scipy",
    "name": "Cube",
    "source_code": "class Cube(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n        self.custom_bounds = ([0, 2], [0, 2])\n        self.global_optimum = [[1.0, 1.0]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return 100.0 * (x[1] - x[0] ** 3.0) ** 2.0 + (1.0 - x[0]) ** 2.0",
    "docstring": "Cube objective function. This class defines the Cube global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Cube}}(x) = 100(x_2 - x_1^3)^2 + (1 - x1)^2 Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO: jamil#41 has the wrong solution.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_C.py",
    "ast_data": "ClassDef name:Cube FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes"
  },
  {
    "library": "django",
    "name": "check_query_object_type",
    "source_code": "def check_query_object_type(self, value, opts, field):\n    if hasattr(value, '_meta'):\n        if not check_rel_lookup_compatibility(value._meta.model, opts, field):\n            raise ValueError('Cannot query \"%s\": Must be \"%s\" instance.' % (value, opts.object_name))",
    "docstring": "Check whether the object passed while querying is of the correct type. If not, raise a ValueError specifying the wrong object.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\query.py",
    "ast_data": "FunctionDef name:check_query_object_type arg:self arg:value arg:opts arg:field arguments arg arg arg arg If Call If Call Raise Call"
  },
  {
    "library": "virtualenv",
    "name": "quote",
    "source_code": "@staticmethod\ndef quote(string):\n    return shlex.quote(string)",
    "docstring": "Quote strings in the activation script. :param string: the string to quote :return: quoted string that works in the activation script",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\activation\\via_template.py",
    "ast_data": "FunctionDef name:quote arg:string arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "memory_allocated",
    "source_code": "def memory_allocated(device: 'Device'=None) -> int:\n    return memory_stats(device=device).get('allocated_bytes.all.current', 0)",
    "docstring": "Return the current GPU memory occupied by tensors in bytes for a given device. Args: device (torch.device or int, optional): selected device. Returns statistic for the current device, given by :func:, if :attr: is `nvidia-smicuda-memory-management` for more details about GPU memory management.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\memory.py",
    "ast_data": "FunctionDef name:memory_allocated arg:device arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "ScriptObjectMeta",
    "source_code": "@dataclass\nclass ScriptObjectMeta:\n    constant_name: str\n    class_fqn: str",
    "docstring": "Metadata which is stored on nodes representing ScriptObjects.",
    "type": "class",
    "file_path": "pytorch\\torch\\export\\custom_obj.py",
    "ast_data": "ClassDef name:ScriptObjectMeta"
  },
  {
    "library": "tensorflow",
    "name": "tanh",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef tanh(x):\n    return nn.tanh(x)",
    "docstring": "Element-wise tanh. Args: x: A tensor or variable. Returns: A tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:tanh arg:x arguments arg Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "check_consistency",
    "source_code": "def check_consistency(self) -> None:\n    included = set().union(*self.included.values())\n    for docname in sorted(self.all_docs):\n        if docname not in self.files_to_rebuild:\n            if docname == self.config.root_doc:\n                continue\n            if docname in included:\n                continue\n            if 'orphan' in self.metadata[docname]:\n                continue\n            logger.warning(__(\"document isn't included in any toctree\"), location=docname, type='toc', subtype='not_included')\n    _check_toc_parents(self.toctree_includes)\n    self.domains._check_consistency()\n    self.events.emit('env-check-consistency', self)",
    "docstring": "Do consistency checks.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\environment\\__init__.py",
    "ast_data": "FunctionDef name:check_consistency arg:self arguments arg Assign Call Call Call For Call If Compare If Compare If Compare If Compare Call Call Call Call Call"
  },
  {
    "library": "django",
    "name": "time_extract_sql",
    "source_code": "def time_extract_sql(self, lookup_type, sql, params):\n    return self.date_extract_sql(lookup_type, sql, params)",
    "docstring": "Given a lookup_type of 'hour', 'minute', or 'second', return the SQL that extracts a value from the given time field field_name.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:time_extract_sql arg:self arg:lookup_type arg:sql arg:params arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "serialize",
    "source_code": "@classmethod\ndef serialize(cls) -> _VirtualizedSerializer:\n    kwargs = {}\n    for f in dataclasses.fields(cls):\n        kwargs[f.name] = getattr(V, f.name)\n    return _VirtualizedSerializer(**kwargs)",
    "docstring": "Turn the current state of torch._inductor.virtualized.V into a serializable structure.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\compile_fx_ext.py",
    "ast_data": "FunctionDef name:serialize arg:cls arguments arg Assign For Call Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_m_step",
    "source_code": "def _m_step(self, X, log_resp):\n    self.weights_, self.means_, self.covariances_ = _estimate_gaussian_parameters(X, np.exp(log_resp), self.reg_covar, self.covariance_type)\n    self.weights_ /= self.weights_.sum()\n    self.precisions_cholesky_ = _compute_precision_cholesky(self.covariances_, self.covariance_type)",
    "docstring": "M step. Parameters ---------- X : array-like of shape (n_samples, n_features) log_resp : array-like of shape (n_samples, n_components) Logarithm of the posterior probabilities (or responsibilities) of the point of each sample in X.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\mixture\\_gaussian_mixture.py",
    "ast_data": "FunctionDef name:_m_step arg:self arg:X arg:log_resp arguments arg arg arg Assign Call Call Call Assign Call"
  },
  {
    "library": "scrapy",
    "name": "url_has_any_extension",
    "source_code": "def url_has_any_extension(url: UrlT, extensions: Iterable[str]) -> bool:\n    lowercase_path = _parse_url(url).path.lower()\n    return any((lowercase_path.endswith(ext) for ext in extensions))",
    "docstring": "Return True if the url ends with one of the extensions provided",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\url.py",
    "ast_data": "FunctionDef name:url_has_any_extension arg:url arg:extensions arguments arg arg Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__call__",
    "source_code": "def __call__(self, shape, dtype=dtypes.float32, **kwargs):\n    self._validate_kwargs(kwargs)\n    dtype = _assert_float_dtype(dtype)\n    if _PARTITION_SHAPE in kwargs:\n        shape = kwargs[_PARTITION_SHAPE]\n    return self._random_generator.truncated_normal(shape, self.mean, self.stddev, dtype)",
    "docstring": "Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only floating point types are supported. **kwargs: Additional keyword arguments. Raises: ValueError: If the dtype is not floating point",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops_v2.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:shape arg:dtype arguments arg arg arg arg Call Assign Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_language_bidi",
    "source_code": "def get_language_bidi():\n    lang = get_language()\n    if lang is None:\n        return False\n    else:\n        base_lang = get_language().split('-')[0]\n        return base_lang in settings.LANGUAGES_BIDI",
    "docstring": "Return selected language's BiDi layout. * False = left-to-right layout * True = right-to-left layout",
    "type": "function",
    "file_path": "django\\django\\utils\\translation\\trans_real.py",
    "ast_data": "FunctionDef name:get_language_bidi arguments Assign Call If Compare Return return:yes Assign Call Call Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "add_type_based_api_dispatcher",
    "source_code": "def add_type_based_api_dispatcher(target):\n    if hasattr(target, TYPE_BASED_DISPATCH_ATTR):\n        raise ValueError(f'{target} already has a type-based API dispatcher.')\n    _, unwrapped = tf_decorator.unwrap(target)\n    target_argspec = tf_inspect.getargspec(unwrapped)\n    if target_argspec.varargs or target_argspec.keywords:\n        return target\n    setattr(target, TYPE_BASED_DISPATCH_ATTR, _api_dispatcher.PythonAPIDispatcher(unwrapped.__name__, target_argspec.args, target_argspec.defaults))\n    _TYPE_BASED_DISPATCH_SIGNATURES[target] = collections.defaultdict(list)\n    return target",
    "docstring": "Adds a PythonAPIDispatcher to the given TensorFlow API function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\dispatch.py",
    "ast_data": "FunctionDef name:add_type_based_api_dispatcher arg:target arguments arg If Call Raise Call Assign Call Assign Call If BoolOp Return return:yes Call Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "sum_labels",
    "source_code": "def sum_labels(input, labels=None, index=None):\n    count, sum = _stats(input, labels, index)\n    return sum",
    "docstring": "Calculate the sum of the values of the array. Parameters ---------- input : array_like Values of inside the regions defined by are summed together. labels : array_like of ints, optional Assign labels to the values of the array. Has to have the same shape as . index : array_like, optional A single label number or a sequence of label numbers of the objects to be measured. Returns ------- sum : ndarray or scalar An array of the sums of values of inside the regions defined by with the same shape as . If 'index' is None or scalar, a scalar is returned. See Also -------- mean, median Examples -------- >>> from scipy import ndimage >>> input = [0,1,2,3] >>> labels = [1,1,2,2] >>> ndimage.sum_labels(input, labels, index=[1,2]) [1.0, 5.0] >>> ndimage.sum_labels(input, labels, index=1) 1 >>> ndimage.sum_labels(input, labels) 6",
    "type": "function",
    "file_path": "scipy\\scipy\\ndimage\\_measurements.py",
    "ast_data": "FunctionDef name:sum_labels arg:input arg:labels arg:index arguments arg arg arg Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "salt",
    "source_code": "def salt(self):\n    char_count = math.ceil(self.salt_entropy / math.log2(len(RANDOM_STRING_CHARS)))\n    return get_random_string(char_count, allowed_chars=RANDOM_STRING_CHARS)",
    "docstring": "Generate a cryptographically secure nonce salt in ASCII with an entropy of at least bits.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\hashers.py",
    "ast_data": "FunctionDef name:salt arg:self arguments arg Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "__repr__",
    "source_code": "def __repr__(self) -> str:\n    repr_buf = f'num_drop_channels={self.num_drop_channels}'\n    return repr_buf",
    "docstring": "Return a string representation of the object.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\random_generator\\_2d\\channel_dropout.py",
    "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "remove_field",
    "source_code": "def remove_field(self, model, field):\n    if field.many_to_many and field.remote_field.through._meta.auto_created:\n        return self.delete_model(field.remote_field.through)\n    if field.db_parameters(connection=self.connection)['type'] is None:\n        return\n    if field.remote_field:\n        fk_names = self._constraint_names(model, [field.column], foreign_key=True)\n        for fk_name in fk_names:\n            self.execute(self._delete_fk_sql(model, fk_name))\n    sql = self.sql_delete_column % {'table': self.quote_name(model._meta.db_table), 'column': self.quote_name(field.column)}\n    self.execute(sql)\n    if self.connection.features.connection_persists_old_columns:\n        self.connection.close()\n    for sql in list(self.deferred_sql):\n        if isinstance(sql, Statement) and sql.references_column(model._meta.db_table, field.column):\n            self.deferred_sql.remove(sql)",
    "docstring": "Remove a field from a model. Usually involves deleting a column, but for M2Ms may involve deleting a table.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\schema.py",
    "ast_data": "FunctionDef name:remove_field arg:self arg:model arg:field arguments arg arg arg If BoolOp Return return:yes Call If Compare Call Return return:no If Assign Call For Call Call Assign Call Call Call If Call For Call If BoolOp Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_is_annotated",
    "source_code": "def _is_annotated(nodes: list[Node]):\n    annotated = False\n    for node in nodes:\n        annotated = annotated or ('quantization_annotation' in node.meta and node.meta['quantization_annotation']._annotated)\n    return annotated",
    "docstring": "Given a list of nodes (that represents an operator pattern), check if any of the node is annotated, return True if any of the node is annotated, otherwise return False",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\xnnpack_quantizer_utils.py",
    "ast_data": "FunctionDef name:_is_annotated arg:nodes arguments arg Assign For Assign BoolOp BoolOp Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "Component",
    "source_code": "class Component(enum.Enum):\n    PREPARE_TF_MODEL = 'PREPARE_TF_MODEL'\n    CONVERT_TF_TO_TFLITE_MODEL = 'CONVERT_TF_TO_TFLITE_MODEL'\n    OPTIMIZE_TFLITE_MODEL = 'OPTIMIZE_TFLITE_MODEL'",
    "docstring": "Enum class defining name of the converter components.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\convert_phase.py",
    "ast_data": "ClassDef name:Component Assign Assign Assign"
  },
  {
    "library": "django",
    "name": "add_georss_point",
    "source_code": "def add_georss_point(self, handler, coords, w3c_geo=False):\n    if w3c_geo:\n        lon, lat = coords[:2]\n        handler.addQuickElement('geo:lat', '%f' % lat)\n        handler.addQuickElement('geo:lon', '%f' % lon)\n    else:\n        handler.addQuickElement('georss:point', self.georss_coords((coords,)))",
    "docstring": "Adds a GeoRSS point with the given coords using the given handler. Handles the differences between simple GeoRSS and the more popular W3C Geo specification.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\feeds.py",
    "ast_data": "FunctionDef name:add_georss_point arg:self arg:handler arg:coords arg:w3c_geo arguments arg arg arg arg If Assign Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_status",
    "source_code": "@property\ndef get_status(self):\n    return (self._initialized, self._error_message)",
    "docstring": "Get status of initialization. Returns: Tuple (Boolean indicating initialization status, List of error messages, if any)",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\tools\\tensorflow_builder\\compat_checker\\compat_checker.py",
    "ast_data": "FunctionDef name:get_status arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "get_forward_related_filter",
    "source_code": "def get_forward_related_filter(self, obj):\n    return {self.fk_field: obj.pk, self.ct_field: ContentType.objects.get_for_model(obj).pk}",
    "docstring": "See corresponding method on RelatedField",
    "type": "method",
    "file_path": "django\\django\\contrib\\contenttypes\\fields.py",
    "ast_data": "FunctionDef name:get_forward_related_filter arg:self arg:obj arguments arg arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "TD",
    "source_code": "def TD(types, f=None, astype=None, in_=None, out=None, cfunc_alias=None, dispatch=None):\n    if f is not None:\n        if isinstance(f, str):\n            func_data = build_func_data(types, f)\n        elif len(f) != len(types):\n            raise ValueError('Number of types and f do not match')\n        else:\n            func_data = f\n    else:\n        func_data = (None,) * len(types)\n    if isinstance(in_, str):\n        in_ = (in_,) * len(types)\n    elif in_ is None:\n        in_ = (None,) * len(types)\n    elif len(in_) != len(types):\n        raise ValueError('Number of types and inputs do not match')\n    if isinstance(out, str):\n        out = (out,) * len(types)\n    elif out is None:\n        out = (None,) * len(types)\n    elif len(out) != len(types):\n        raise ValueError('Number of types and outputs do not match')\n    tds = []\n    for t, fd, i, o in zip(types, func_data, in_, out):\n        if dispatch:\n            dispt = ([k for k, v in dispatch if t in v] + [None])[0]\n        else:\n            dispt = None\n        tds.append(TypeDescription(t, f=fd, in_=i, out=o, astype=astype, cfunc_alias=cfunc_alias, dispatch=dispt))\n    return tds",
    "docstring": "Generate a TypeDescription instance for each item in types",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\code_generators\\generate_umath.py",
    "ast_data": "FunctionDef name:TD arg:types arg:f arg:astype arg:in_ arg:out arg:cfunc_alias arg:dispatch arguments arg arg arg arg arg arg arg If Compare If Call Assign Call If Compare Call Call Raise Call Assign Assign Call If Call Assign Call If Compare Assign Call If Compare Call Call Raise Call If Call Assign Call If Compare Assign Call If Compare Call Call Raise Call Assign For Call If Assign Compare Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "time_count_neighbors",
    "source_code": "def time_count_neighbors(self, mn1n2, p, probe_radius, boxsize, leafsize, cls):\n    if cls != 'cKDTree_weighted':\n        self.T1.count_neighbors(self.T2, probe_radius, p=p)\n    else:\n        self.T1.count_neighbors(self.T2, probe_radius, weights=(self.w1, self.w2), p=p)",
    "docstring": "Count neighbors kd-tree dim | # points T1 | # points T2 | p | probe radius | BoxSize | LeafSize | cls",
    "type": "method",
    "file_path": "scipy\\benchmarks\\benchmarks\\spatial.py",
    "ast_data": "FunctionDef name:time_count_neighbors arg:self arg:mn1n2 arg:p arg:probe_radius arg:boxsize arg:leafsize arg:cls arguments arg arg arg arg arg arg arg If Compare Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_config",
    "source_code": "def get_config(self):\n    config = {'input_keep_prob': self._input_keep_prob, 'output_keep_prob': self._output_keep_prob, 'state_keep_prob': self._state_keep_prob, 'variational_recurrent': self._variational_recurrent, 'input_size': self._input_size, 'seed': self._seed}\n    if self._dropout_state_filter != _default_dropout_state_filter_visitor:\n        function, function_type, function_module = _serialize_function_to_config(self._dropout_state_filter)\n        config.update({'dropout_fn': function, 'dropout_fn_type': function_type, 'dropout_fn_module': function_module})\n    base_config = super(DropoutWrapperBase, self).get_config()\n    return dict(list(base_config.items()) + list(config.items()))",
    "docstring": "Returns the config of the dropout wrapper.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\legacy_rnn\\rnn_cell_wrapper_impl.py",
    "ast_data": "FunctionDef name:get_config arg:self arguments arg Assign If Compare Assign Call Call Assign Call Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_reverse_repeat_tuple",
    "source_code": "def _reverse_repeat_tuple(t, n):\n    return tuple((x for x in reversed(t) for _ in range(n)))",
    "docstring": "Reverse the order of and repeat each element for times. This can be used to translate padding arg used by Conv and Pooling modules to the ones used by .",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\modules\\utils.py",
    "ast_data": "FunctionDef name:_reverse_repeat_tuple arg:t arg:n arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "KLDivergence",
    "source_code": "class KLDivergence(MeanMetricWrapper):\n\n    def __init__(self, name='kullback_leibler_divergence', dtype=None):\n        super(KLDivergence, self).__init__(kullback_leibler_divergence, name, dtype=dtype)",
    "docstring": "Computes Kullback-Leibler divergence metric between and . Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.KLDivergence() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]]) >>> m.result().numpy() 0.45814306 >>> m.reset_state() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]], ... sample_weight=[1, 0]) >>> m.result().numpy() 0.9162892 Usage with API:",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py",
    "ast_data": "ClassDef name:KLDivergence FunctionDef name:__init__ arg:self arg:name arg:dtype arguments arg arg arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "all_gather_dtensor",
    "source_code": "@abstractmethod\ndef all_gather_dtensor(self, tensor: DTensor, parent_mesh: Optional[DeviceMesh]) -> torch.Tensor:\n    ...",
    "docstring": "This is to be called before loading a *sharded* DTensor state dict. This gathers tensor in FSDP dimension and returns local tensor of TP DTensor.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_fsdp_extensions.py",
    "ast_data": "FunctionDef name:all_gather_dtensor arg:self arg:tensor arg:parent_mesh arguments arg arg arg"
  },
  {
    "library": "scipy",
    "name": "run_basinhopping",
    "source_code": "def run_basinhopping(self):\n    kwargs = self.minimizer_kwargs\n    if hasattr(self.fun, 'temperature'):\n        kwargs['T'] = self.function.temperature\n    if hasattr(self.fun, 'stepsize'):\n        kwargs['stepsize'] = self.function.stepsize\n    minimizer_kwargs = {'method': 'L-BFGS-B'}\n    x0 = self.function.initial_vector()\n    minimizer_kwargs['jac'] = False\n    self.function.nfev = 0\n    t0 = time.time()\n    res = basinhopping(self.fun, x0, accept_test=self.accept_test, minimizer_kwargs=minimizer_kwargs, **kwargs)\n    t1 = time.time()\n    res.success = self.function.success(res.x)\n    res.nfev = self.function.nfev\n    self.add_result(res, t1 - t0, 'basinh.')",
    "docstring": "Do an optimization run for basinhopping",
    "type": "method",
    "file_path": "scipy\\benchmarks\\benchmarks\\optimize.py",
    "ast_data": "FunctionDef name:run_basinhopping arg:self arguments arg Assign If Call Assign If Call Assign Assign Assign Call Assign Assign Assign Call Assign Call Assign Call Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "stop",
    "source_code": "def stop(self):\n    self._should_preemption_thread_run = False\n    with self._cluster_update_lock:\n        self._cluster_due_for_update_or_finish.set()",
    "docstring": "Ensure the worker preemption thread is closed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py",
    "ast_data": "FunctionDef name:stop arg:self arguments arg Assign With Call"
  },
  {
    "library": "tensorflow",
    "name": "broadcast_shape",
    "source_code": "def broadcast_shape(shape_x, shape_y):\n    if shape_x.ndims is None or shape_y.ndims is None:\n        return tensor_shape.unknown_shape()\n    return_dims = _broadcast_shape_helper(shape_x, shape_y)\n    if return_dims is None:\n        raise ValueError(f'Incompatible shapes for broadcasting. Two shapes are compatible if for each dimension pair they are either equal or one of them is 1. Received: {shape_x} and {shape_y}.')\n    return tensor_shape.TensorShape(return_dims)",
    "docstring": "Returns the broadcasted shape between and . Args: shape_x: A shape_y: A Returns: A representing the broadcasted shape. Raises: ValueError: If the two shapes can not be broadcasted.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\common_shapes.py",
    "ast_data": "FunctionDef name:broadcast_shape arg:shape_x arg:shape_y arguments arg arg If BoolOp Compare Compare Return return:yes Call Assign Call If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "_default_values",
    "source_code": "def _default_values(self, n: int) -> list:\n    if n > 2:\n        msg = ' '.join([f'The variable assigned to {self.variable} has more than two levels,', f'so {self.variable} values will cycle and may be uninterpretable'])\n        warnings.warn(msg, UserWarning)\n    return [x for x, _ in zip(itertools.cycle([True, False]), range(n))]",
    "docstring": "Return a list of n values, alternating True and False.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\properties.py",
    "ast_data": "FunctionDef name:_default_values arg:self arg:n arguments arg arg If Compare Assign Call Call Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_alpha",
    "source_code": "def set_alpha(self, alpha):\n    martist.Artist._set_alpha_for_array(self, alpha)\n    if np.ndim(alpha) not in (0, 2):\n        raise TypeError('alpha must be a float, two-dimensional array, or None')\n    self._imcache = None",
    "docstring": "Set the alpha value used for blending - not supported on all backends. Parameters ---------- alpha : float or 2D array-like or None",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\image.py",
    "ast_data": "FunctionDef name:set_alpha arg:self arg:alpha arguments arg arg Call If Compare Call Raise Call Assign"
  },
  {
    "library": "scikit-learn",
    "name": "fit_predict",
    "source_code": "def fit_predict(self, X, y=None):\n    return super().fit_predict(X, y)",
    "docstring": "Perform spectral clustering on and return cluster labels. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_samples) Training instances to cluster, similarities / affinities between instances if ``. y : Ignored Not used, present here for API consistency by convention. Returns ------- labels : ndarray of shape (n_samples,) Cluster labels.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_spectral.py",
    "ast_data": "FunctionDef name:fit_predict arg:self arg:X arg:y arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_FakeServer",
    "source_code": "class _FakeServer(object):\n\n    def start(self):\n        logging.info('Creating a remote session to start a TensorFlow server, target = %r, session_config=%r', target, session_config)\n        session.Session(target=target, config=session_config)\n\n    def join(self):\n        while True:\n            time.sleep(5)",
    "docstring": "A fake server that runs a master session.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distribute_coordinator_utils.py",
    "ast_data": "ClassDef name:_FakeServer FunctionDef name:start arg:self arguments arg Call Call FunctionDef name:join arg:self arguments arg While Call"
  },
  {
    "library": "scipy",
    "name": "resample",
    "source_code": "def resample(self, size=None, seed=None):\n    if size is None:\n        size = int(self.neff)\n    random_state = check_random_state(seed)\n    norm = transpose(random_state.multivariate_normal(zeros((self.d,), float), self.covariance, size=size))\n    indices = random_state.choice(self.n, size=size, p=self.weights)\n    means = self.dataset[:, indices]\n    return means + norm",
    "docstring": "Randomly sample a dataset from the estimated pdf. Parameters ---------- size : int, optional The number of samples to draw. If not provided, then the size is the same as the effective number of samples in the underlying dataset. seed : {None, int, , }, optional If is None (or ), the singleton is used. If is an int, a new `seedseedsize`) ndarray The sampled dataset.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_kde.py",
    "ast_data": "FunctionDef name:resample arg:self arg:size arg:seed arguments arg arg arg If Compare Assign Call Assign Call Assign Call Call Call Assign Call Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "AdjustBrightness",
    "source_code": "class AdjustBrightness(Module):\n\n    def __init__(self, brightness_factor: Union[float, Tensor]) -> None:\n        super().__init__()\n        self.brightness_factor: Union[float, Tensor] = brightness_factor\n\n    def forward(self, input: Tensor) -> Tensor:\n        return adjust_brightness(input, self.brightness_factor)",
    "docstring": "Adjust Brightness of an image. This implementation aligns OpenCV, not PIL. Hence, the output differs from TorchVision. The input image is expected to be in the range of [0, 1]. Args: brightness_factor: Brightness adjust factor per element in the batch. 0 does not modify the input image while any other number modify the brightness. Shape: - Input: Image/Input to be adjusted in the shape of :math:. - Output: Adjusted image in the shape of :math:. Example: >>> x = torch.ones(1, 1, 3, 3) >>> AdjustBrightness(1.)(x) tensor([[[[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]]]]) >>> x = torch.ones(2, 5, 3, 3) >>> y = torch.ones(2) >>> AdjustBrightness(y)(x).shape torch.Size([2, 5, 3, 3])",
    "type": "class",
    "file_path": "kornia\\kornia\\enhance\\adjust.py",
    "ast_data": "ClassDef name:AdjustBrightness FunctionDef name:__init__ arg:self arg:brightness_factor arguments arg arg Call Call FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "validate_udf",
    "source_code": "def validate_udf(func: Callable) -> None:\n    if not callable(func):\n        raise NotImplementedError('Numba engine can only be used with a single function.')\n    udf_signature = list(inspect.signature(func).parameters.keys())\n    expected_args = ['values', 'index']\n    min_number_args = len(expected_args)\n    if len(udf_signature) < min_number_args or udf_signature[:min_number_args] != expected_args:\n        raise NumbaUtilError(f'The first {min_number_args} arguments to {func.__name__} must be {expected_args}')",
    "docstring": "Validate user defined function for ops when using Numba with groupby ops. The first signature arguments should include: def f(values, index, ...): ... Parameters ---------- func : function, default False user defined function Returns ------- None Raises ------ NumbaUtilError",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\groupby\\numba_.py",
    "ast_data": "FunctionDef name:validate_udf arg:func arguments arg If Call Raise Call Assign Call Call Call Assign Assign Call If BoolOp Compare Call Compare Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "publish_traceback",
    "source_code": "def publish_traceback(debug_server_urls, graph, feed_dict, fetches, old_graph_version):\n    from tensorflow.python.debug.lib import source_remote\n    if graph.version > old_graph_version:\n        run_key = common.get_run_key(feed_dict, fetches)\n        source_remote.send_graph_tracebacks(debug_server_urls, run_key, traceback.extract_stack(), graph, send_source=True)\n        return graph.version\n    else:\n        return old_graph_version",
    "docstring": "Publish traceback and source code if graph version is new. is compared with . If the former is higher (i.e., newer), the graph traceback and the associated source code is sent to the debug server at the specified gRPC URLs. Args: debug_server_urls: A single gRPC debug server URL as a or a of debug server URLs. graph: A Python object. feed_dict: Feed dictionary given to the call. fetches: Fetches from the call. old_graph_version: Old graph version to compare to. Returns: If , the new graph version as an . Else, the is returned.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\grpc_wrapper.py",
    "ast_data": "FunctionDef name:publish_traceback arg:debug_server_urls arg:graph arg:feed_dict arg:fetches arg:old_graph_version arguments arg arg arg arg arg If Compare Assign Call Call Call Return return:yes Return return:yes"
  },
  {
    "library": "kornia",
    "name": "trans",
    "source_code": "@classmethod\ndef trans(cls, x: Tensor, y: Tensor) -> Se2:\n    KORNIA_CHECK(x.shape == y.shape)\n    KORNIA_CHECK_SAME_DEVICES([x, y])\n    batch_size = x.shape[0] if len(x.shape) > 0 else None\n    rotation = So2.identity(batch_size, x.device, x.dtype)\n    return cls(rotation, stack((x, y), -1))",
    "docstring": "Construct a translation only Se2 instance. Args: x: the x-axis translation. y: the y-axis translation.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\se2.py",
    "ast_data": "FunctionDef name:trans arg:cls arg:x arg:y arguments arg arg arg Call Compare Call Assign Compare Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_parallel_func",
    "source_code": "def _parallel_func(self, X, y, func, routed_params):\n    self.transformer_list = list(self.transformer_list)\n    self._validate_transformers()\n    self._validate_transformer_weights()\n    transformers = list(self._iter())\n    return Parallel(n_jobs=self.n_jobs)((delayed(func)(transformer, X, y, weight, message_clsname='FeatureUnion', message=self._log_message(name, idx, len(transformers)), params=routed_params[name]) for idx, (name, transformer, weight) in enumerate(transformers, 1)))",
    "docstring": "Runs func in parallel on X and y",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\pipeline.py",
    "ast_data": "FunctionDef name:_parallel_func arg:self arg:X arg:y arg:func arg:routed_params arguments arg arg arg arg arg Assign Call Call Call Assign Call Call Return return:yes Call Call Call Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_validate_setitem_value",
    "source_code": "def _validate_setitem_value(self, value):\n    kind = self.dtype.kind\n    if kind == 'b':\n        if lib.is_bool(value):\n            return value\n    elif kind == 'f':\n        if lib.is_integer(value) or lib.is_float(value):\n            return value\n    elif lib.is_integer(value) or (lib.is_float(value) and value.is_integer()):\n        return value\n    raise TypeError(f\"Invalid value '{value!s}' for dtype '{self.dtype}'\")",
    "docstring": "Check if we have a scalar that we can cast losslessly. Raises ------ TypeError",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\masked.py",
    "ast_data": "FunctionDef name:_validate_setitem_value arg:self arg:value arguments arg arg Assign If Compare If Call Return return:yes If Compare If BoolOp Call Call Return return:yes If BoolOp Call BoolOp Call Call Return return:yes Raise Call"
  },
  {
    "library": "django",
    "name": "crs",
    "source_code": "@property\ndef crs(self):\n    return self.srs",
    "docstring": "Alias for property.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:crs arg:self arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "rollaxis",
    "source_code": "@array_function_dispatch(_rollaxis_dispatcher)\ndef rollaxis(a, axis, start=0):\n    n = a.ndim\n    axis = normalize_axis_index(axis, n)\n    if start < 0:\n        start += n\n    msg = \"'%s' arg requires %d <= %s < %d, but %d was passed in\"\n    if not 0 <= start < n + 1:\n        raise AxisError(msg % ('start', -n, 'start', n + 1, start))\n    if axis < start:\n        start -= 1\n    if axis == start:\n        return a[...]\n    axes = list(range(n))\n    axes.remove(axis)\n    axes.insert(start, axis)\n    return a.transpose(axes)",
    "docstring": "Roll the specified axis backwards, until it lies in a given position. This function continues to be supported for backward compatibility, but you should prefer . The function was added in NumPy 1.11. Parameters ---------- a : ndarray Input array. axis : int The axis to be rolled. The positions of the other axes do not change relative to one another. start : int, optional When `aa` is returned only if the order of the axes is changed, otherwise the input array is returned. See Also -------- moveaxis : Move array axes to new positions. roll : Roll the elements of an array by a number of positions along a given axis. Examples -------- >>> import numpy as np >>> a = np.ones((3,4,5,6)) >>> np.rollaxis(a, 3, 1).shape (3, 6, 4, 5) >>> np.rollaxis(a, 2).shape (5, 3, 4, 6) >>> np.rollaxis(a, 1, 4).shape (3, 5, 6, 4)",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\numeric.py",
    "ast_data": "FunctionDef name:rollaxis arg:a arg:axis arg:start arguments arg arg arg Assign Assign Call If Compare Assign If Compare Raise Call If Compare If Compare Return return:yes Assign Call Call Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "poisson_nll_loss",
    "source_code": "def poisson_nll_loss(input: Tensor, target: Tensor, log_input: bool=True, full: bool=False, size_average: Optional[bool]=None, eps: float=1e-08, reduce: Optional[bool]=None, reduction: str='mean') -> Tensor:\n    if has_torch_function_variadic(input, target):\n        return handle_torch_function(poisson_nll_loss, (input, target), input, target, log_input=log_input, full=full, size_average=size_average, eps=eps, reduce=reduce, reduction=reduction)\n    if size_average is not None or reduce is not None:\n        reduction = _Reduction.legacy_get_string(size_average, reduce)\n    if reduction != 'none' and reduction != 'mean' and (reduction != 'sum'):\n        ret = input\n        raise ValueError(reduction + ' is not a valid value for reduction')\n    ret = torch.poisson_nll_loss(input, target, log_input, full, eps, _Reduction.get_enum(reduction))\n    return ret",
    "docstring": "Compute the Poisson negative log likelihood loss. See :class: for details. Args: input: Expectation of underlying Poisson distribution. target: Random sample :math:. log_input: If `\\exp(\\text{input}) - \\text{target} * \\text{input}\\text{input} - \\text{target} * \\log(\\text{input}+\\text{eps})\\text{target} * \\log(\\text{target}) - \\text{target} + 0.5 * \\log(2 * \\pi * \\text{target})reduction\\log(0)log_inputreductionsize_averagereducereduction`",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:poisson_nll_loss arg:input arg:target arg:log_input arg:full arg:size_average arg:eps arg:reduce arg:reduction arguments arg arg arg arg arg arg arg arg If Call Return return:yes Call If BoolOp Compare Compare Assign Call If BoolOp Compare Compare Compare Assign Raise Call Assign Call Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "_reorder_columns",
    "source_code": "def _reorder_columns(self, res, data):\n    cols = [c for c in data if c in res]\n    cols += [c for c in res if c not in data]\n    return res.reindex(columns=pd.Index(cols))",
    "docstring": "Reorder result columns to match original order with new columns appended.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\groupby.py",
    "ast_data": "FunctionDef name:_reorder_columns arg:self arg:res arg:data arguments arg arg arg Assign Compare Compare Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "disable_apex_o2_state_dict_hook",
    "source_code": "@deprecated('Please remove usage of this function. Copy its logic if it is required in user code', category=None)\n@contextlib.contextmanager\ndef disable_apex_o2_state_dict_hook(model: torch.nn.Module | torch.jit.ScriptFunction):\n    if not isinstance(model, torch.jit.ScriptFunction):\n        model_hooks = {}\n        for module in model.modules():\n            for key, hook in module._state_dict_hooks.items():\n                if type(hook).__name__ == 'O2StateDictHook':\n                    if module not in model_hooks:\n                        model_hooks[module] = {}\n                    model_hooks[module][key] = hook\n            if module in model_hooks:\n                for key in model_hooks[module]:\n                    module._state_dict_hooks.pop(key)\n        try:\n            yield\n        finally:\n            for module, m_map in model_hooks.items():\n                for key, hook in m_map.items():\n                    module._state_dict_hooks[key] = hook\n    else:\n        try:\n            yield\n        finally:\n            pass",
    "docstring": "A context manager to temporarily disable the Apex O2 hook that returns. .. deprecated:: 2.7 Please remove usage of this function.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\utils.py",
    "ast_data": "FunctionDef name:disable_apex_o2_state_dict_hook arg:model arguments arg If Call Assign For Call For Call If Compare Call If Compare Assign Assign If Compare For Call Try For Call For Call Assign Try Call"
  },
  {
    "library": "pytorch",
    "name": "set_rng_state_all",
    "source_code": "def set_rng_state_all(new_states: Iterable[Tensor]) -> None:\n    for i, state in enumerate(new_states):\n        set_rng_state(state, i)",
    "docstring": "Set the random number generator state of all devices. Args: new_states (Iterable of torch.ByteTensor): The desired state for each device.",
    "type": "function",
    "file_path": "pytorch\\torch\\xpu\\random.py",
    "ast_data": "FunctionDef name:set_rng_state_all arg:new_states arguments arg For Call Call"
  },
  {
    "library": "kornia",
    "name": "set_session",
    "source_code": "def set_session(self, session: ort.InferenceSession) -> None:\n    self._session = session",
    "docstring": "Set a custom ONNXRuntime InferenceSession. Args: session: ort.InferenceSession The custom ONNXRuntime session to be set for inference.",
    "type": "method",
    "file_path": "kornia\\kornia\\core\\mixin\\onnx.py",
    "ast_data": "FunctionDef name:set_session arg:self arg:session arguments arg arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "random_poisson_v2",
    "source_code": "@tf_export('random.poisson', v1=[])\n@dispatch.add_dispatch_support\ndef random_poisson_v2(shape, lam, dtype=dtypes.float32, seed=None, name=None):\n    with ops.name_scope(name, 'random_poisson', [lam, shape]):\n        shape = ops.convert_to_tensor(shape, name='shape', dtype=dtypes.int32)\n        seed1, seed2 = random_seed.get_seed(seed)\n        result = gen_random_ops.random_poisson_v2(shape, lam, dtype=dtype, seed=seed1, seed2=seed2)\n        _maybe_set_static_shape_helper(result, shape, lam)\n        return result",
    "docstring": "Draws samples from each of the given Poisson distribution(s). is the rate parameter describing the distribution(s). Example: Args: shape: A 1-D integer Tensor or Python array. The shape of the output samples to be drawn per \"rate\"-parameterized distribution. lam: A Tensor or Python value or N-D array of type . provides the rate parameter(s) describing the poisson distribution(s) to sample. dtype: The type of the output: , , , or . seed: A Python integer. Used to create a random seed for the distributions. See for behavior. name: Optional name for the operation. Returns: samples: a of shape with values of type .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\random_ops.py",
    "ast_data": "FunctionDef name:random_poisson_v2 arg:shape arg:lam arg:dtype arg:seed arg:name arguments arg arg arg arg arg With Call Assign Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "add_equality",
    "source_code": "def add_equality(self, source: Source, expr: sympy.Expr) -> None:\n    if expr.is_number:\n        self._static_results.add(f'{source.name()} == {expr}')\n    else:\n        self._symbolic_equivalences.append((source, expr))",
    "docstring": "Add an equality constraint",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:add_equality arg:self arg:source arg:expr arguments arg arg arg If Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "PlaceHolderLayoutEngine",
    "source_code": "class PlaceHolderLayoutEngine(LayoutEngine):\n\n    def __init__(self, adjust_compatible, colorbar_gridspec, **kwargs):\n        self._adjust_compatible = adjust_compatible\n        self._colorbar_gridspec = colorbar_gridspec\n        super().__init__(**kwargs)\n\n    def execute(self, fig):\n        return",
    "docstring": "This layout engine does not adjust the figure layout at all. The purpose of this is to act as a placeholder when the user removes a layout engine to ensure an incompatible cannot be set later. Parameters ---------- adjust_compatible, colorbar_gridspec : bool Allow the PlaceHolderLayoutEngine to mirror the behavior of whatever layout engine it is replacing.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\layout_engine.py",
    "ast_data": "ClassDef name:PlaceHolderLayoutEngine FunctionDef name:__init__ arg:self arg:adjust_compatible arg:colorbar_gridspec arguments arg arg arg arg Assign Assign Call Call FunctionDef name:execute arg:self arg:fig arguments arg arg Return return:no"
  },
  {
    "library": "scikit-learn",
    "name": "SkipTestWarning",
    "source_code": "class SkipTestWarning(UserWarning):\n    pass",
    "docstring": "Warning class used to notify the user of a test that was skipped. For example, one of the estimator checks requires a pandas import. If the pandas package cannot be imported, the test will be skipped rather than register as a failure.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\exceptions.py",
    "ast_data": "ClassDef name:SkipTestWarning"
  },
  {
    "library": "matplotlib",
    "name": "set_color",
    "source_code": "def set_color(self, c):\n    self.set_edgecolor(c)",
    "docstring": "Set the edgecolor(s) of the LineCollection. Parameters ---------- c : :mpltype: or list of :mpltype: Single color (all lines have same color), or a sequence of RGBA tuples; if it is a sequence the lines will cycle through the sequence.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:set_color arg:self arg:c arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "variables",
    "source_code": "@property\ndef variables(self):\n\n    def deref(weak_v):\n        v = weak_v()\n        if v is None:\n            raise AssertionError('Called a function referencing variables which have been deleted. This likely means that function-local variables were created and not referenced elsewhere in the program. This is generally a mistake; consider storing variables in an object attribute on first call.')\n        return v\n    return tuple((deref(v) for v in self._weak_variables))",
    "docstring": "A sequence of variables accessed by this FuncGraph. Note that functions keep only weak references to variables. Calling the function after a variable it accesses has been deleted is an error. Returns: Sequence of variables for this func graph.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\func_graph.py",
    "ast_data": "FunctionDef name:variables arg:self arguments arg FunctionDef name:deref arg:weak_v arguments arg Assign Call If Compare Raise Call Return return:yes Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "limit_string_length",
    "source_code": "def limit_string_length(string, max_len=50):\n    if max_len is None or len(string) <= max_len:\n        return string\n    else:\n        return '...' + string[len(string) - max_len:]",
    "docstring": "Limit the length of input string. Args: string: Input string. max_len: (int or None) If int, the length limit. If None, no limit. Returns: Possibly length-limited string.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\check_numerics_callback.py",
    "ast_data": "FunctionDef name:limit_string_length arg:string arg:max_len arguments arg arg If BoolOp Compare Compare Call Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_export_metrics",
    "source_code": "def _export_metrics(convert_func):\n\n    @functools.wraps(convert_func)\n    def wrapper(self, *args, **kwargs):\n        return self._convert_and_export_metrics(convert_func, *args, **kwargs)\n    return wrapper",
    "docstring": "The decorator around convert function to export metrics.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "FunctionDef name:_export_metrics arg:convert_func arguments arg FunctionDef name:wrapper arg:self arguments arg arg arg Return return:yes Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "guess_byte_order",
    "source_code": "def guess_byte_order(self):\n    return boc.native_code",
    "docstring": "As we do not know what file type we have, assume native",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\matlab\\_miobase.py",
    "ast_data": "FunctionDef name:guess_byte_order arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "size_internal",
    "source_code": "def size_internal(input, name=None, optimize=True, out_type=dtypes.int32):\n    if context.executing_eagerly() and (not hasattr(input, 'graph')) and (not isinstance(input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue))):\n        input = ops.convert_to_tensor(input)\n        np_out_type = out_type.as_numpy_dtype\n        num_elements = np.prod(input._shape_tuple(), dtype=np_out_type)\n        return ops.convert_to_tensor(num_elements, dtype=out_type)\n    with ops.name_scope(name, 'Size', [input]) as name:\n        if isinstance(input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):\n            return gen_math_ops.prod(gen_math_ops.cast(input.dense_shape, out_type), 0, name=name)\n        else:\n            input = ops.convert_to_tensor(input)\n            input_shape = input.get_shape()\n            if optimize:\n                if input_shape.is_fully_defined():\n                    return constant(input_shape.num_elements(), out_type, name=name)\n                if input_shape.dims and any((dim == 0 for dim in input_shape.dims)):\n                    return constant(0, out_type, name=name)\n            return gen_array_ops.size(input, name=name, out_type=out_type)",
    "docstring": "Returns the size of a tensor. Args: input: A or . name: A name for the operation (optional). optimize: if true, encode the size as a constant when possible. out_type: (Optional) The specified non-quantized numeric output type of the operation. Defaults to . Returns: A of type . Defaults to .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:size_internal arg:input arg:name arg:optimize arg:out_type arguments arg arg arg arg If BoolOp Call Call Call Assign Call Assign Assign Call Call Return return:yes Call With Call If Call Return return:yes Call Call Assign Call Assign Call If If Call Return return:yes Call Call If BoolOp Call Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_rng_state_all",
    "source_code": "def get_rng_state_all() -> list[Tensor]:\n    results = [get_rng_state(i) for i in range(device_count())]\n    return results",
    "docstring": "Return a list of ByteTensor representing the random number states of all devices.",
    "type": "function",
    "file_path": "pytorch\\torch\\xpu\\random.py",
    "ast_data": "FunctionDef name:get_rng_state_all arguments Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "last_unref",
    "source_code": "@property\ndef last_unref(self) -> int:\n    return max(self._unref_times)",
    "docstring": "Last unreference timestamp of this tensor (long integer).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py",
    "ast_data": "FunctionDef name:last_unref arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "sparse_categorical_accuracy",
    "source_code": "@dispatch.add_dispatch_support\ndef sparse_categorical_accuracy(y_true, y_pred):\n    y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)\n    y_true = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_true)\n    y_pred_rank = y_pred.shape.ndims\n    y_true_rank = y_true.shape.ndims\n    if y_true_rank is not None and y_pred_rank is not None and (len(backend.int_shape(y_true)) == len(backend.int_shape(y_pred))):\n        y_true = array_ops.squeeze(y_true, [-1])\n    y_pred = math_ops.argmax(y_pred, axis=-1)\n    if backend.dtype(y_pred) != backend.dtype(y_true):\n        y_pred = math_ops.cast(y_pred, backend.dtype(y_true))\n    return math_ops.cast(math_ops.equal(y_true, y_pred), backend.floatx())",
    "docstring": "Calculates how often predictions match integer labels. Standalone usage: >>> y_true = [2, 1] >>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]] >>> m = tf.keras.metrics.sparse_categorical_accuracy(y_true, y_pred) >>> assert m.shape == (2,) >>> m.numpy() array([0., 1.], dtype=float32) You can provide logits of classes as , since argmax of logits and probabilities are same. Args: y_true: Integer ground truth values. y_pred: The prediction values. Returns: Sparse categorical accuracy values.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py",
    "ast_data": "FunctionDef name:sparse_categorical_accuracy arg:y_true arg:y_pred arguments arg arg Assign Call Assign Call Assign Assign If BoolOp Compare Compare Compare Call Call Call Call Assign Call Assign Call If Compare Call Call Assign Call Call Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_cmap",
    "source_code": "def set_cmap(cmap: Colormap | str) -> None:\n    cmap = get_cmap(cmap)\n    rc('image', cmap=cmap.name)\n    im = gci()\n    if im is not None:\n        im.set_cmap(cmap)",
    "docstring": "Set the default colormap, and applies it to the current image if any. Parameters ---------- cmap : or str A colormap instance or the name of a registered colormap. See Also -------- colormaps get_cmap",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:set_cmap arg:cmap arguments arg Assign Call Call Assign Call If Compare Call"
  },
  {
    "library": "pytorch",
    "name": "named_buffers",
    "source_code": "@compatibility(is_backward_compatible=False)\ndef named_buffers(self) -> Iterator[tuple[str, torch.Tensor]]:\n    non_persistent_buffers = set(self.graph_signature.non_persistent_buffers)\n    for buffer_name in self.graph_signature.buffers:\n        if buffer_name in non_persistent_buffers:\n            yield (buffer_name, self.constants[buffer_name])\n        else:\n            yield (buffer_name, self.state_dict[buffer_name])",
    "docstring": "Returns an iterator over original module buffers, yielding both the name of the buffer as well as the buffer itself.",
    "type": "method",
    "file_path": "pytorch\\torch\\export\\exported_program.py",
    "ast_data": "FunctionDef name:named_buffers arg:self arguments arg Assign Call For If Compare Call"
  },
  {
    "library": "pytorch",
    "name": "_fft_c2r",
    "source_code": "def _fft_c2r(func_name: str, input: TensorLikeType, n: Optional[int], dim: int, norm: NormType, forward: bool) -> TensorLikeType:\n    input = _maybe_promote_tensor_fft(input, require_complex=True)\n    dims = (utils.canonicalize_dim(input.ndim, dim, wrap_scalar=False),)\n    last_dim_size = n if n is not None else 2 * (input.shape[dim] - 1)\n    torch._check(last_dim_size >= 1, lambda: f'Invalid number of data points ({last_dim_size}) specified')\n    if n is not None:\n        input = _resize_fft_input(input, dims=dims, sizes=(last_dim_size // 2 + 1,))\n    if forward:\n        input = torch.conj(input)\n    output = prims.fft_c2r(input, dim=dims, last_dim_size=last_dim_size)\n    return _apply_norm(output, norm=norm, signal_numel=last_dim_size, forward=forward)",
    "docstring": "Common code for performing any complex to real FFT (irfft or hfft)",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\fft.py",
    "ast_data": "FunctionDef name:_fft_c2r arg:func_name arg:input arg:n arg:dim arg:norm arg:forward arguments arg arg arg arg arg arg Assign Call Assign Call Assign Compare Call Compare arguments If Compare Assign Call If Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "cs_diff",
    "source_code": "def cs_diff(x, a, b, period=None, _cache=_cache):\n    if isinstance(_cache, threading.local):\n        if not hasattr(_cache, 'cs_diff_cache'):\n            _cache.cs_diff_cache = {}\n        _cache = _cache.cs_diff_cache\n    tmp = asarray(x)\n    if iscomplexobj(tmp):\n        return cs_diff(tmp.real, a, b, period, _cache) + 1j * cs_diff(tmp.imag, a, b, period, _cache)\n    if period is not None:\n        a = a * 2 * pi / period\n        b = b * 2 * pi / period\n    n = len(x)\n    omega = _cache.get((n, a, b))\n    if omega is None:\n        if len(_cache) > 20:\n            while _cache:\n                _cache.popitem()\n\n        def kernel(k, a=a, b=b):\n            if k:\n                return -cosh(a * k) / sinh(b * k)\n            return 0\n        omega = convolve.init_convolution_kernel(n, kernel, d=1)\n        _cache[n, a, b] = omega\n    overwrite_x = _datacopied(tmp, x)\n    return convolve.convolve(tmp, omega, swap_real_imag=1, overwrite_x=overwrite_x)",
    "docstring": "Return (a,b)-cosh/sinh pseudo-derivative of a periodic sequence. If `xxx` is taken as zero.",
    "type": "function",
    "file_path": "scipy\\scipy\\fftpack\\_pseudo_diffs.py",
    "ast_data": "FunctionDef name:cs_diff arg:x arg:a arg:b arg:period arg:_cache arguments arg arg arg arg arg If Call If Call Assign Assign Assign Call If Call Return return:yes Call Call If Compare Assign Assign Assign Call Assign Call If Compare If Compare Call While Call FunctionDef name:kernel arg:k arg:a arg:b arguments arg arg arg If Return return:yes Call Call Return return:yes Assign Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "reshape",
    "source_code": "def reshape(self, *args, **kwargs):\n    shape = check_shape(args, self.shape, allow_nd=range(1, 65))\n    order, copy = check_reshape_kwargs(kwargs)\n    if shape == self.shape:\n        if copy:\n            return self.copy()\n        else:\n            return self\n    return self.tocoo(copy=copy).reshape(shape, order=order, copy=False)",
    "docstring": "reshape(self, shape, order='C', copy=False) Gives a new shape to a sparse array/matrix without changing its data. Parameters ---------- shape : tuple of ints The new shape should be compatible with the original shape. order : {'C', 'F'}, optional Read the elements using this index order. 'C' means to read and write the elements using C-like index order; e.g., read entire first row, then second row, etc. 'F' means to read and write the elements using Fortran-like index order; e.g., read entire first column, then second column, etc. copy : bool, optional Indicates whether or not attributes of self should be copied whenever possible. The degree to which attributes are copied varies depending on the type of sparse array being used. Returns ------- reshaped : sparse array/matrix A sparse array/matrix with the given , not necessarily of the same format as the current object. See Also -------- numpy.reshape : NumPy's implementation of 'reshape' for ndarrays",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_base.py",
    "ast_data": "FunctionDef name:reshape arg:self arguments arg arg arg Assign Call Call Assign Call If Compare If Return return:yes Call Return return:yes Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_save_options",
    "source_code": "def get_save_options():\n    return _save_context.options()",
    "docstring": "Returns the save options if under a save context.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\save_context.py",
    "ast_data": "FunctionDef name:get_save_options arguments Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "tzinfo",
    "source_code": "@property\ndef tzinfo(self) -> tzinfo | None:\n    return self.tz",
    "docstring": "Alias for tz attribute",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimes.py",
    "ast_data": "FunctionDef name:tzinfo arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "pool3d",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef pool3d(x, pool_size, strides=(1, 1, 1), padding='valid', data_format=None, pool_mode='max'):\n    if data_format is None:\n        data_format = image_data_format()\n    if data_format not in {'channels_first', 'channels_last'}:\n        raise ValueError('Unknown data_format: ' + str(data_format))\n    x, tf_data_format = _preprocess_conv3d_input(x, data_format)\n    padding = _preprocess_padding(padding)\n    if tf_data_format == 'NDHWC':\n        strides = (1,) + strides + (1,)\n        pool_size = (1,) + pool_size + (1,)\n    else:\n        strides = (1, 1) + strides\n        pool_size = (1, 1) + pool_size\n    if pool_mode == 'max':\n        x = nn.max_pool3d(x, pool_size, strides, padding=padding, data_format=tf_data_format)\n    elif pool_mode == 'avg':\n        x = nn.avg_pool3d(x, pool_size, strides, padding=padding, data_format=tf_data_format)\n    else:\n        raise ValueError('Invalid pooling mode: ' + str(pool_mode))\n    if data_format == 'channels_first' and tf_data_format == 'NDHWC':\n        x = array_ops.transpose(x, (0, 4, 1, 2, 3))\n    return x",
    "docstring": "3D Pooling. Args: x: Tensor or variable. pool_size: tuple of 3 integers. strides: tuple of 3 integers. padding: string, or . data_format: string, or . pool_mode: string, or . Returns: A tensor, result of 3D pooling. Raises: ValueError: if is neither or . ValueError: if is neither or .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:pool3d arg:x arg:pool_size arg:strides arg:padding arg:data_format arg:pool_mode arguments arg arg arg arg arg arg If Compare Assign Call If Compare Raise Call Call Assign Call Assign Call If Compare Assign Assign Assign Assign If Compare Assign Call If Compare Assign Call Raise Call Call If BoolOp Compare Compare Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "operate_blockwise",
    "source_code": "def operate_blockwise(self, other: BlockManager, array_op) -> BlockManager:\n    return operate_blockwise(self, other, array_op)",
    "docstring": "Apply array_op blockwise with another (aligned) BlockManager.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:operate_blockwise arg:self arg:other arg:array_op arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_calc_view_axes",
    "source_code": "def _calc_view_axes(self, eye):\n    elev_rad = np.deg2rad(art3d._norm_angle(self.elev))\n    roll_rad = np.deg2rad(art3d._norm_angle(self.roll))\n    R = 0.5 * self._roll_to_vertical(self._box_aspect)\n    V = np.zeros(3)\n    V[self._vertical_axis] = -1 if abs(elev_rad) > np.pi / 2 else 1\n    u, v, w = proj3d._view_axes(eye, R, V, roll_rad)\n    return (u, v, w)",
    "docstring": "Get the unit vectors for the viewing axes in data coordinates. is towards the right of the screen is towards the top of the screen is out of the screen",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:_calc_view_axes arg:self arg:eye arguments arg arg Assign Call Call Assign Call Call Assign Call Assign Call Assign Compare Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "warning_once",
    "source_code": "@functools.lru_cache(None)\ndef warning_once(logger_obj, *args, **kwargs):\n    logger_obj.warning(*args, **kwargs)",
    "docstring": "This function is similar to , but will emit the warning with the same message only once Note: The cache is for the function arguments, so 2 different callers using the same arguments will hit the cache. The assumption here is that all warning messages are unique across the code. If they aren't then need to switch to another type of cache that includes the caller frame information in the hashing function.",
    "type": "function",
    "file_path": "pytorch\\torch\\_logging\\_internal.py",
    "ast_data": "FunctionDef name:warning_once arg:logger_obj arguments arg arg arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_params_to_average",
    "source_code": "def get_params_to_average(params: Union[Iterable[torch.nn.Parameter], Iterable[dict[str, torch.nn.Parameter]]]):\n    filtered_params = []\n    for param in params:\n        if isinstance(param, torch.nn.Parameter):\n            param_data = param\n            if param_data.grad is not None:\n                filtered_params.append(param_data)\n        elif isinstance(param, dict):\n            for param_data in param['params']:\n                if param_data.grad is not None:\n                    filtered_params.append(param_data)\n        else:\n            raise NotImplementedError(f'Parameter input of type {type(param)} is not supported')\n    return filtered_params",
    "docstring": "Return a list of parameters that need to average. This filters out the parameters that do not contain any gradients. Args: params: The parameters of a model or parameter groups of an optimizer.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\model_averaging\\utils.py",
    "ast_data": "FunctionDef name:get_params_to_average arg:params arguments arg Assign For If Call Assign If Compare Call If Call For If Compare Call Raise Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_cache",
    "source_code": "def _cache(self, mapping):\n    mapping = mapping.merge(mapping=self._lookup(mapping.x, mapping.y, mapping.kwargs))\n    if mapping.x is None and mapping.y is None:\n        raise ValueError('Caching expects at least one of (x,y) to be known, i.e., not None.')\n    self._from_x[mapping.x_key] = mapping\n    self._from_y[mapping.y_key] = mapping",
    "docstring": "Helper which stores mapping info in forward/inverse dicts.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bijector_impl.py",
    "ast_data": "FunctionDef name:_cache arg:self arg:mapping arguments arg arg Assign Call Call If BoolOp Compare Compare Raise Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "abs",
    "source_code": "@tf_export('math.abs', 'abs')\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef abs(x, name=None):\n    with ops.name_scope(name, 'Abs', [x]) as name:\n        x = ops.convert_to_tensor(x, name='x')\n        if x.dtype.is_complex:\n            return gen_math_ops.complex_abs(x, Tout=x.dtype.real_dtype, name=name)\n        return gen_math_ops._abs(x, name=name)",
    "docstring": "Computes the absolute value of a tensor. Given a tensor of integer or floating-point values, this operation returns a tensor of the same type, where each element contains the absolute value of the corresponding element in the input. Given a tensor of complex numbers, this operation returns a tensor of type or that is the absolute value of each element in . For a complex number \\\\(a + bj\\\\), its absolute value is computed as \\\\(\\sqrt{a^2 + b^2}\\\\). For example: >>> # real number >>> x = tf.constant([-2.25, 3.25]) >>> tf.abs(x) >>> # complex number >>> x = tf.constant([[-2.25 + 4.75j], [-3.25 + 5.75j]]) >>> tf.abs(x) Args: x: A or of type , , , , , or . name: A name for the operation (optional). Returns: A or of the same size, type and sparsity as , with absolute values. Note, for or input, the returned will be of type or , respectively.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:abs arg:x arg:name arguments arg arg With Call Assign Call If Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_shared_name",
    "source_code": "@property\ndef _shared_name(self):\n    shared_name = ''\n    if context.executing_eagerly():\n        shared_name += str(ops.uid())\n    return shared_name",
    "docstring": "Returns a shared name to be used by the table.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "FunctionDef name:_shared_name arg:self arguments arg Assign If Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_to_tensor_list",
    "source_code": "def _to_tensor_list(self, value) -> List['core_types.Symbol']:\n    return nest.flatten(self._to_components(value), expand_composites=True)",
    "docstring": "Encodes as a flat list of . By default, this just flattens using . However, subclasses may override this to return a different tensor encoding for values. In particular, some subclasses of override this method to return a \"boxed\" encoding for values, which then can be batched or unbatched. See for more details. Args: value: A value with compatible this . (Caller is responsible for ensuring compatibility.) Returns: A list of , compatible with , which can be used to reconstruct .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py",
    "ast_data": "FunctionDef name:_to_tensor_list arg:self arg:value arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "is_ignorable_request",
    "source_code": "def is_ignorable_request(self, request, uri, domain, referer):\n    if not referer:\n        return True\n    if settings.APPEND_SLASH and uri.endswith('/') and (referer == uri[:-1]):\n        return True\n    if not self.is_internal_request(domain, referer) and '?' in referer:\n        return True\n    parsed_referer = urlsplit(referer)\n    if parsed_referer.netloc in ['', domain] and parsed_referer.path == uri:\n        return True\n    return any((pattern.search(uri) for pattern in settings.IGNORABLE_404_URLS))",
    "docstring": "Return True if the given request *shouldn't* notify the site managers according to project settings or in situations outlined by the inline comments.",
    "type": "method",
    "file_path": "django\\django\\middleware\\common.py",
    "ast_data": "FunctionDef name:is_ignorable_request arg:self arg:request arg:uri arg:domain arg:referer arguments arg arg arg arg arg If Return return:yes If BoolOp Call Compare Return return:yes If BoolOp Call Compare Return return:yes Assign Call If BoolOp Compare Compare Return return:yes Return return:yes Call Call"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, bus):\n    self.bus = bus\n    self.handlers = {'SIGTERM': self.bus.exit, 'SIGHUP': self.handle_SIGHUP, 'SIGUSR1': self.bus.graceful}\n    if sys.platform[:4] == 'java':\n        del self.handlers['SIGUSR1']\n        self.handlers['SIGUSR2'] = self.bus.graceful\n        self.bus.log('SIGUSR1 cannot be set on the JVM platform. Using SIGUSR2 instead.')\n        self.handlers['SIGINT'] = self._jython_SIGINT_handler\n    self._previous_handlers = {}\n    self._original_pid = os.getpid()",
    "docstring": "Initialize a signal handler plugin.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\plugins.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:bus arguments arg arg Assign Assign If Compare Assign Call Assign Assign Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_flatten_tensors",
    "source_code": "def _flatten_tensors(tensors):\n    if not tensors:\n        raise ValueError('tensors cannot be empty')\n    shape = tensors[0].shape\n    for tensor in tensors:\n        shape = shape.merge_with(tensor.shape)\n    if not shape.is_fully_defined():\n        raise ValueError('Tensors must have statically known shape.')\n    if len(shape) != 1:\n        reshaped = []\n        for t in tensors:\n            with ops.colocate_with(t):\n                reshaped.append(array_ops.reshape(t, [-1]))\n        tensors = reshaped\n    return (tensors, shape)",
    "docstring": "Check tensors for isomorphism and flatten. Args: tensors: list of which must all have the same shape. Returns: tensors: a list of which are flattened (1D) views of tensors shape: the original shape of each element of input tensors Raises: ValueError: tensors are empty or non-isomorphic or have unknown shape.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\all_reduce.py",
    "ast_data": "FunctionDef name:_flatten_tensors arg:tensors arguments arg If Raise Call Assign For Assign Call If Call Raise Call If Compare Call Assign For With Call Call Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_fullname",
    "source_code": "def get_fullname(self):\n    name = self._header.get(b'FullName')\n    if name is None:\n        name = self._header[b'FontName']\n    return name",
    "docstring": "Return the font full name, e.g., 'Times-Roman'.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_afm.py",
    "ast_data": "FunctionDef name:get_fullname arg:self arguments arg Assign Call If Compare Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_validate_dynamic_axes",
    "source_code": "def _validate_dynamic_axes(dynamic_axes, model, input_names, output_names):\n    if len(dynamic_axes) == 0:\n        return\n    if hasattr(model, 'graph'):\n        if input_names is None or len(input_names) == 0:\n            input_names = [x.debugName() for x in model.graph.inputs()]\n        if output_names is None or len(output_names) == 0:\n            output_names = [y.debugName() for y in model.graph.outputs()]\n    valid_names = set((input_names or []) + (output_names or []))\n    for key, value in dynamic_axes.items():\n        if key not in valid_names:\n            warnings.warn(f'Provided key {key} for dynamic axes is not a valid input/output name')\n        if isinstance(value, list):\n            warnings.warn(f'No names were found for specified dynamic axes of provided input.Automatically generated names will be applied to each dynamic axes of input {key}')\n            value_dict = {}\n            for i, x in enumerate(value):\n                if not isinstance(x, int):\n                    raise ValueError('The type of axis index is expected to be an integer')\n                if x in value_dict:\n                    warnings.warn(f'Duplicate dynamic axis index {x} was provided for input {key}.')\n                else:\n                    value_dict[x] = str(key) + '_dynamic_axes_' + str(i + 1)\n            dynamic_axes[key] = value_dict",
    "docstring": "Ensures dynamic axes argument is follows the expected format.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\utils.py",
    "ast_data": "FunctionDef name:_validate_dynamic_axes arg:dynamic_axes arg:model arg:input_names arg:output_names arguments arg arg arg arg If Compare Call Return return:no If Call If BoolOp Compare Compare Call Assign Call Call If BoolOp Compare Compare Call Assign Call Call Assign Call BoolOp BoolOp For Call If Compare Call If Call Call Assign For Call If Call Raise Call If Compare Call Assign Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_parse_and_analyze",
    "source_code": "def _parse_and_analyze(func):\n    node, source = parser.parse_entity(func, future_features=())\n    node = qual_names.resolve(node)\n    entity_info = transformer.EntityInfo(name=func.__name__, source_code=source, source_file=None, future_features=(), namespace={})\n    namer = naming.Namer({})\n    ctx = transformer.Context(entity_info, namer, None)\n    node = activity.resolve(node, ctx)\n    return node",
    "docstring": "Parse and analyze Python Function code.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\core\\function\\capture\\free_vars_detect.py",
    "ast_data": "FunctionDef name:_parse_and_analyze arg:func arguments arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "dtype_short_repr",
    "source_code": "def dtype_short_repr(dtype):\n    if type(dtype).__repr__ != np.dtype.__repr__:\n        return repr(dtype)\n    if dtype.names is not None:\n        return str(dtype)\n    elif issubclass(dtype.type, flexible):\n        return f\"'{str(dtype)}'\"\n    typename = dtype.name\n    if not dtype.isnative:\n        return f\"'{str(dtype)}'\"\n    if typename and (not (typename[0].isalpha() and typename.isalnum())):\n        typename = repr(typename)\n    return typename",
    "docstring": "Convert a dtype to a short form which evaluates to the same dtype. The intent is roughly that the following holds >>> from numpy import * >>> dt = np.int64([1, 2]).dtype >>> assert eval(dtype_short_repr(dt)) == dt",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\arrayprint.py",
    "ast_data": "FunctionDef name:dtype_short_repr arg:dtype arguments arg If Compare Call Return return:yes Call If Compare Return return:yes Call If Call Return return:yes Call Assign If Return return:yes Call If BoolOp BoolOp Call Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "is_dashed",
    "source_code": "def is_dashed(self):\n    return self._linestyle in ('--', '-.', ':')",
    "docstring": "Return whether line has a dashed linestyle. A custom linestyle is assumed to be dashed, we do not inspect the `~.Line2D.set_linestyle`.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:is_dashed arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "django",
    "name": "get_db_prep_save",
    "source_code": "def get_db_prep_save(self, value, connection):\n    if hasattr(value, 'as_sql'):\n        return value\n    return self.get_db_prep_value(value, connection=connection, prepared=False)",
    "docstring": "Return field's value prepared for saving into a database.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\__init__.py",
    "ast_data": "FunctionDef name:get_db_prep_save arg:self arg:value arg:connection arguments arg arg arg If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "def forward(self, desc1: Tensor, desc2: Tensor) -> Tuple[Tensor, Tensor]:\n    if self.match_mode == 'nn':\n        out = match_nn(desc1, desc2)\n    elif self.match_mode == 'mnn':\n        out = match_mnn(desc1, desc2)\n    elif self.match_mode == 'snn':\n        out = match_snn(desc1, desc2, self.th)\n    elif self.match_mode == 'smnn':\n        out = match_smnn(desc1, desc2, self.th)\n    else:\n        raise NotImplementedError\n    return out",
    "docstring": "Run forward. Args: desc1: Batch of descriptors of a shape :math:. desc2: Batch of descriptors of a shape :math:. Returns: - Descriptor distance of matching descriptors, shape of :math:. - Long tensor indexes of matching descriptors in desc1 and desc2, shape of :math: where :math:.",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\matching.py",
    "ast_data": "FunctionDef name:forward arg:self arg:desc1 arg:desc2 arguments arg arg arg If Compare Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call Raise Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "save_attributes_to_hdf5_group",
    "source_code": "def save_attributes_to_hdf5_group(group, name, data):\n    bad_attributes = [x for x in data if len(x) > HDF5_OBJECT_HEADER_LIMIT]\n    if bad_attributes:\n        raise RuntimeError('The following attributes cannot be saved to HDF5 file because they are larger than %d bytes: %s' % (HDF5_OBJECT_HEADER_LIMIT, ', '.join(bad_attributes)))\n    data_npy = np.asarray(data)\n    num_chunks = 1\n    chunked_data = np.array_split(data_npy, num_chunks)\n    while any((x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data)):\n        num_chunks += 1\n        chunked_data = np.array_split(data_npy, num_chunks)\n    if num_chunks > 1:\n        for chunk_id, chunk_data in enumerate(chunked_data):\n            group.attrs['%s%d' % (name, chunk_id)] = chunk_data\n    else:\n        group.attrs[name] = data",
    "docstring": "Saves attributes (data) of the specified name into the HDF5 group. This method deals with an inherent problem of HDF5 file which is not able to store data larger than HDF5_OBJECT_HEADER_LIMIT bytes. Args: group: A pointer to a HDF5 group. name: A name of the attributes to save. data: Attributes data to store. Raises: RuntimeError: If any single attribute is too large to be saved.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\hdf5_format.py",
    "ast_data": "FunctionDef name:save_attributes_to_hdf5_group arg:group arg:name arg:data arguments arg arg arg Assign Compare Call If Raise Call Call Assign Call Assign Assign Call While Call Compare Assign Call If Compare For Call Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "set",
    "source_code": "def set(self, metric: str, value: Any, overwrite: bool=False) -> None:\n    if self._level == 0:\n        raise RuntimeError(f'Cannot set {metric} outside of a MetricsContext')\n    if metric in self._metrics and (not overwrite):\n        raise RuntimeError(f\"Metric '{metric}' has already been set in the current context\")\n    self._metrics[metric] = value",
    "docstring": "Set a metric to a given value. Raises if the metric has been assigned previously in the current context.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\metrics_context.py",
    "ast_data": "FunctionDef name:set arg:self arg:metric arg:value arg:overwrite arguments arg arg arg arg If Compare Raise Call If BoolOp Compare Raise Call Assign"
  },
  {
    "library": "sphinx",
    "name": "desc_sig_operator",
    "source_code": "class desc_sig_operator(desc_sig_element, _sig_element=True):\n    classes = ['o']",
    "docstring": "Node for an operator in a signature.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\addnodes.py",
    "ast_data": "ClassDef name:desc_sig_operator Assign"
  },
  {
    "library": "tensorflow",
    "name": "combined_non_singular_hint",
    "source_code": "def combined_non_singular_hint(operator_a, operator_b):\n    if operator_a.is_non_singular is False or operator_b.is_non_singular is False:\n        return False\n    return operator_a.is_non_singular and operator_b.is_non_singular",
    "docstring": "Get combined hint for when .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\property_hint_util.py",
    "ast_data": "FunctionDef name:combined_non_singular_hint arg:operator_a arg:operator_b arguments arg arg If BoolOp Compare Compare Return return:yes Return return:yes BoolOp"
  },
  {
    "library": "tensorflow",
    "name": "do_encode",
    "source_code": "def do_encode(self, type_spec_value, encode_fn):\n    type_state = type_spec_value._serialize()\n    num_flat_components = len(nest.flatten(type_spec_value._component_specs, expand_composites=True))\n    encoded_type_spec = struct_pb2.StructuredValue()\n    encoded_type_spec.type_spec_value.CopyFrom(struct_pb2.TypeSpecProto(type_spec_class=self.type_spec_proto_enum, type_state=encode_fn(type_state), type_spec_class_name=self.type_spec_class.__name__, num_flat_components=num_flat_components))\n    return encoded_type_spec",
    "docstring": "Returns an encoded proto for the given built-in TypeSpec.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\nested_structure_coder.py",
    "ast_data": "FunctionDef name:do_encode arg:self arg:type_spec_value arg:encode_fn arguments arg arg arg Assign Call Assign Call Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_choices_default",
    "source_code": "def _choices_default():\n    from torch._inductor.choices import InductorChoices\n    rv = InductorChoices()\n    setattr(threadlocal, _choices._key, rv)\n    return rv",
    "docstring": "Lazy init the global choices handler We virtualize InductorChoices to allow changing inductor heuristics from out of tree.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\virtualized.py",
    "ast_data": "FunctionDef name:_choices_default arguments Assign Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_ensure_ndmin_ndarray",
    "source_code": "def _ensure_ndmin_ndarray(a, *, ndmin: int):\n    if a.ndim > ndmin:\n        a = np.squeeze(a)\n    if a.ndim < ndmin:\n        if ndmin == 1:\n            a = np.atleast_1d(a)\n        elif ndmin == 2:\n            a = np.atleast_2d(a).T\n    return a",
    "docstring": "This is a helper function of loadtxt and genfromtxt to ensure proper minimum dimension as requested ndim : int. Supported values 1, 2, 3 ^^ whenever this changes, keep in sync with _ensure_ndmin_ndarray_check_param",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_npyio_impl.py",
    "ast_data": "FunctionDef name:_ensure_ndmin_ndarray arg:a arguments arg arg If Compare Assign Call If Compare If Compare Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "num_interior_rings",
    "source_code": "@property\ndef num_interior_rings(self):\n    return capi.get_nrings(self.ptr)",
    "docstring": "Return the number of interior rings.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\polygon.py",
    "ast_data": "FunctionDef name:num_interior_rings arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "AutoFunctionalizedV2",
    "source_code": "class AutoFunctionalizedV2(HigherOrderOperator):\n\n    def __init__(self) -> None:\n        super().__init__('auto_functionalized_v2', cacheable=True)\n\n    def __call__(self, /, _mutable_op: _MutableOpType, **kwargs: Any) -> tuple[Any, tuple[Tensor, ...]]:\n        _op_to_check: Optional[Union[OpOverload, HopInstance]] = None\n        if isinstance(_mutable_op, HigherOrderOperator):\n            _op_to_check = HopInstance(_mutable_op, SchemaHolder.from_tree_spec(kwargs.get('_op_schema', None)).schema)\n        else:\n            _op_to_check = _mutable_op\n        assert _op_to_check is not None\n        assert can_auto_functionalize(_op_to_check)\n        assert isinstance(kwargs, dict)\n        return super().__call__(_mutable_op, **kwargs)",
    "docstring": "auto_functionalized_v2(_mutable_op, **kwargs) This HOP runs a \"functional\" version of _mutable_op. Unlike AutoFunctionalized, this version is improved to better handle view tensors. This version is only used in non export mode.",
    "type": "class",
    "file_path": "pytorch\\torch\\_higher_order_ops\\auto_functionalize.py",
    "ast_data": "ClassDef name:AutoFunctionalizedV2 FunctionDef name:__init__ arg:self arguments arg Call Call FunctionDef name:__call__ arg:_mutable_op arguments arg arg arg If Call Assign Call Call Call Assign Compare Call Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_get_cmap_instance",
    "source_code": "def _get_cmap_instance(colormap: str | Colormap) -> Colormap:\n    if isinstance(colormap, str):\n        cmap = colormap\n        colormap = mpl.colormaps[colormap]\n        if colormap is None:\n            raise ValueError(f'Colormap {cmap} is not recognized')\n    return colormap",
    "docstring": "Get instance of matplotlib colormap.",
    "type": "function",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\style.py",
    "ast_data": "FunctionDef name:_get_cmap_instance arg:colormap arguments arg If Call Assign Assign If Compare Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "not_equal",
    "source_code": "@tf_export('math.not_equal', 'not_equal')\n@dispatch.register_binary_elementwise_api\n@dispatch.add_dispatch_support\ndef not_equal(x, y, name=None):\n    return gen_math_ops.not_equal(x, y, name=name)",
    "docstring": "Returns the truth value of (x != y) element-wise. Performs a [broadcast]( with the arguments and then an element-wise inequality comparison, returning a Tensor of boolean values. For example: >>> x = tf.constant([2, 4]) >>> y = tf.constant(2) >>> tf.math.not_equal(x, y) >>> x = tf.constant([2, 4]) >>> y = tf.constant([2, 4]) >>> tf.math.not_equal(x, y) Args: x: A . y: A . name: A name for the operation (optional). Returns: A of type bool with the same size as that of x or y. Raises: : If shapes of arguments are incompatible",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:not_equal arg:x arg:y arg:name arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_coordinates",
    "source_code": "def get_coordinates(self):\n    return self._coordinates",
    "docstring": "Return the vertices of the mesh as an (M+1, N+1, 2) array. M, N are the number of quadrilaterals in the rows / columns of the mesh, corresponding to (M+1, N+1) vertices. The last dimension specifies the components (x, y).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:get_coordinates arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_data",
    "source_code": "def set_data(self, x, y, A):\n    A = self._normalize_image_array(A)\n    x = np.array(x, np.float32)\n    y = np.array(y, np.float32)\n    if not (x.ndim == y.ndim == 1 and A.shape[:2] == y.shape + x.shape):\n        raise TypeError(\"Axes don't match array shape\")\n    self._A = A\n    self._Ax = x\n    self._Ay = y\n    self._imcache = None\n    self.stale = True",
    "docstring": "Set the grid for the pixel centers, and the pixel values. Parameters ---------- x, y : 1D array-like Monotonic arrays of shapes (N,) and (M,), respectively, specifying pixel centers. A : array-like (M, N) or masked array of values to be colormapped, or (M, N, 3) RGB array, or (M, N, 4) RGBA array.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\image.py",
    "ast_data": "FunctionDef name:set_data arg:self arg:x arg:y arg:A arguments arg arg arg arg Assign Call Assign Call Assign Call If BoolOp Compare Compare Raise Call Assign Assign Assign Assign Assign"
  },
  {
    "library": "sphinx",
    "name": "Cell",
    "source_code": "class Cell:\n\n    def __init__(self, text: str='', rowspan: int=1, colspan: int=1) -> None:\n        self.text = text\n        self.wrapped: list[str] = []\n        self.rowspan = rowspan\n        self.colspan = colspan\n        self.col: int | None = None\n        self.row: int | None = None\n\n    def __repr__(self) -> str:\n        return f'<Cell {self.text!r} {self.row}v{self.rowspan}/{self.col}>{self.colspan}>'\n\n    def __hash__(self) -> int:\n        return hash((self.col, self.row))\n\n    def __bool__(self) -> bool:\n        return bool(self.text) and self.col is not None and (self.row is not None)\n\n    def wrap(self, width: int) -> None:\n        self.wrapped = my_wrap(self.text, width)",
    "docstring": "Represents a cell in a table. It can span multiple columns or multiple lines.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\writers\\text.py",
    "ast_data": "ClassDef name:Cell FunctionDef name:__init__ arg:self arg:text arg:rowspan arg:colspan arguments arg arg arg arg Assign Assign Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:__hash__ arg:self arguments arg Return return:yes Call FunctionDef name:__bool__ arg:self arguments arg Return return:yes BoolOp Call Compare Compare FunctionDef name:wrap arg:self arg:width arguments arg arg Assign Call"
  },
  {
    "library": "pytorch",
    "name": "Max",
    "source_code": "class Max(MinMaxBase, Application):\n    zero = S.Infinity\n    identity = S.NegativeInfinity\n\n    def _eval_is_positive(self):\n        return fuzzy_or((a.is_positive for a in self.args))\n\n    def _eval_is_nonnegative(self):\n        return fuzzy_or((a.is_nonnegative for a in self.args))\n\n    def _eval_is_negative(self):\n        return fuzzy_and((a.is_negative for a in self.args))",
    "docstring": "Return, if possible, the maximum value of the list.",
    "type": "class",
    "file_path": "pytorch\\torch\\utils\\_sympy\\functions.py",
    "ast_data": "ClassDef name:Max Assign Assign FunctionDef name:_eval_is_positive arg:self arguments arg Return return:yes Call FunctionDef name:_eval_is_nonnegative arg:self arguments arg Return return:yes Call FunctionDef name:_eval_is_negative arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_Identity",
    "source_code": "def _Identity(tensor, name=None):\n    tensor = ops.internal_convert_to_tensor_or_composite(tensor, as_ref=True)\n    tensor = variable_utils.convert_variables_to_tensors(tensor)\n    if isinstance(tensor, tensor_lib.Tensor):\n        if tensor.dtype._is_ref_dtype:\n            return gen_array_ops.ref_identity(tensor, name=name)\n        else:\n            return array_ops.identity(tensor, name=name)\n    elif isinstance(tensor, composite_tensor.CompositeTensor):\n        return nest.map_structure(_Identity, tensor, expand_composites=True)\n    else:\n        raise TypeError(f\"'tensor' must be a Tensor or CompositeTensor. Received: {type(tensor)}.\")",
    "docstring": "Return a tensor with the same shape and contents as the input tensor. Args: tensor: A Tensor. name: A name for this operation (optional). Returns: A Tensor with the same type and value as the input Tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:_Identity arg:tensor arg:name arguments arg arg Assign Call Assign Call If Call If Return return:yes Call Return return:yes Call If Call Return return:yes Call Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "true_negatives_at_thresholds",
    "source_code": "@tf_export(v1=['metrics.true_negatives_at_thresholds'])\ndef true_negatives_at_thresholds(labels, predictions, thresholds, weights=None, metrics_collections=None, updates_collections=None, name=None):\n    if context.executing_eagerly():\n        raise RuntimeError('tf.metrics.true_negatives_at_thresholds is not supported when eager execution is enabled.')\n    with variable_scope.variable_scope(name, 'true_negatives', (predictions, labels, weights)):\n        values, update_ops = _confusion_matrix_at_thresholds(labels, predictions, thresholds, weights=weights, includes=('tn',))\n        tn_value = _aggregate_variable(values['tn'], metrics_collections)\n        if updates_collections:\n            ops.add_to_collections(updates_collections, update_ops['tn'])\n        return (tn_value, update_ops['tn'])",
    "docstring": "Computes true negatives at provided threshold values. If is , weights default to 1. Use weights of 0 to mask values. Args: labels: A whose shape matches . Will be cast to . predictions: A floating point of arbitrary shape and whose values are in the range . thresholds: A python list or tuple of float thresholds in . weights: Optional whose rank is either 0, or the same rank as , and must be broadcastable to (i.e., all dimensions must be either , or the same as the corresponding dimension). metrics_collections: An optional list of collections that should be added to. updates_collections: An optional list of collections that should be added to. name: An optional variable_scope name. Returns: true_negatives: A float of shape . update_op: An operation that updates the variable and returns its current value. Raises: ValueError: If and have mismatched shapes, or if is not and its shape doesn't match , or if either or are not a list or tuple. RuntimeError: If eager execution is enabled.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\metrics_impl.py",
    "ast_data": "FunctionDef name:true_negatives_at_thresholds arg:labels arg:predictions arg:thresholds arg:weights arg:metrics_collections arg:updates_collections arg:name arguments arg arg arg arg arg arg arg If Call Raise Call With Call Assign Call Assign Call If Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_text_getter",
    "source_code": "def _text_getter(self, obj):\n    raise AbstractMethodError(self)",
    "docstring": "Return the text of an individual DOM node. Parameters ---------- obj : node-like A DOM node. Returns ------- text : str or unicode The text from an individual DOM node.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\html.py",
    "ast_data": "FunctionDef name:_text_getter arg:self arg:obj arguments arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_with_num_row_partitions",
    "source_code": "def _with_num_row_partitions(self, num_row_partitions):\n    rank = self.rank\n    if rank is None:\n        raise ValueError('Rank must be known to adjust num_row_partitions')\n    if not isinstance(num_row_partitions, int):\n        raise ValueError('num_row_partitions must be an int')\n    if num_row_partitions < 0:\n        raise ValueError('num_row_partitions must be nonnegative')\n    if num_row_partitions == self.num_row_partitions:\n        return self\n    if num_row_partitions >= rank:\n        raise ValueError('num_row_partitions must be less than rank')\n    if num_row_partitions > self.num_row_partitions:\n        num_row_partitions_diff = num_row_partitions - self.num_row_partitions\n        new_inner_rank = self.rank - num_row_partitions\n        nvals = self._inner_shape_dim(0)\n        more_rp = []\n        for i in range(num_row_partitions_diff):\n            nrows = nvals\n            row_length = self._inner_shape_dim(i + 1)\n            nvals = nrows * row_length\n            rp = RowPartition.from_uniform_row_length(row_length, nrows=nrows, dtype=self.dtype)\n            more_rp.append(rp)\n        alt_inner = self._alt_inner_shape(new_inner_rank)\n        return DynamicRaggedShape(list(self.row_partitions) + more_rp, alt_inner)\n    else:\n        assert num_row_partitions < self.num_row_partitions\n        return DynamicRaggedShape(self.row_partitions[:num_row_partitions], self._alt_inner_shape(self.rank - num_row_partitions))",
    "docstring": "Creates an identical shape with the given num_row_partitions. Note that the shape must be statically refactorable to this rank. In particular: * rank must be known. * num_row_partitions must be a nonnegative int. * num_row_partitions must be less than the rank of the shape * num_row_partitions must be greater or equal to the index of any ragged dimension. Note that if the num_row_partitions is the same, self is returned. Args: num_row_partitions: the target num_row_partitions (must be a nonnegative int). Returns: a shape with a (possibly) different num_row_partitions. Raises: ValueError: if the rank is unknown, the argument is not a nonnegative int, or there is a dimension that is nonuniform.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:_with_num_row_partitions arg:self arg:num_row_partitions arguments arg arg Assign If Compare Raise Call If Call Raise Call If Compare Raise Call If Compare Return return:yes If Compare Raise Call If Compare Assign Assign Assign Call Assign For Call Assign Assign Call Assign Assign Call Call Assign Call Return return:yes Call Call Compare Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "cophenet",
    "source_code": "@lazy_cython\ndef cophenet(Z, Y=None):\n    xp = array_namespace(Z, Y)\n    Z = _asarray(Z, order='C', dtype=xp.float64, xp=xp)\n    _is_valid_linkage(Z, throw=True, name='Z', xp=xp)\n\n    def cy_cophenet(Z, validate):\n        if validate:\n            _is_valid_linkage(Z, throw=True, name='Z', xp=np)\n        n = Z.shape[0] + 1\n        zz = np.zeros(n * (n - 1) // 2, dtype=np.float64)\n        _hierarchy.cophenetic_distances(Z, zz, n)\n        return zz\n    n = Z.shape[0] + 1\n    zz = xpx.lazy_apply(cy_cophenet, Z, validate=is_lazy_array(Z), shape=(n * (n - 1) // 2,), dtype=xp.float64, as_numpy=True, xp=xp)\n    if Y is None:\n        return zz\n    Y = _asarray(Y, order='C', xp=xp)\n    distance.is_valid_y(Y, throw=True, name='Y')\n    z = xp.mean(zz)\n    y = xp.mean(Y)\n    Yy = Y - y\n    Zz = zz - z\n    numerator = Yy * Zz\n    denomA = Yy ** 2\n    denomB = Zz ** 2\n    c = xp.sum(numerator) / xp.sqrt(xp.sum(denomA) * xp.sum(denomB))\n    return (c, zz)",
    "docstring": "Calculate the cophenetic distances between each observation in the hierarchical clustering defined by the linkage `linkageZnmYZijijscipy.cluster.hierarchy.cophenetscipy.spatial.distance.squareform` that are very close (i.e., in the same corner) is 1. For other pairs of points is 2, because the points will be located in clusters at different corners - thus, the distance between these clusters will be larger.",
    "type": "function",
    "file_path": "scipy\\scipy\\cluster\\hierarchy.py",
    "ast_data": "FunctionDef name:cophenet arg:Z arg:Y arguments arg arg Assign Call Assign Call Call FunctionDef name:cy_cophenet arg:Z arg:validate arguments arg arg If Call Assign Assign Call Call Return return:yes Assign Assign Call Call If Compare Return return:yes Assign Call Call Assign Call Assign Call Assign Assign Assign Assign Assign Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "serialize",
    "source_code": "def serialize(format, queryset, **options):\n    s = get_serializer(format)()\n    s.serialize(queryset, **options)\n    return s.getvalue()",
    "docstring": "Serialize a queryset (or any iterator that returns database objects) using a certain serializer.",
    "type": "function",
    "file_path": "django\\django\\core\\serializers\\__init__.py",
    "ast_data": "FunctionDef name:serialize arg:format arg:queryset arguments arg arg arg Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "all",
    "source_code": "def all(self, axis=None, out=None, keepdims=np._NoValue):\n    kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}\n    mask = _check_mask_axis(self._mask, axis, **kwargs)\n    if out is None:\n        d = self.filled(True).all(axis=axis, **kwargs).view(type(self))\n        if d.ndim:\n            d.__setmask__(mask)\n        elif mask:\n            return masked\n        return d\n    self.filled(True).all(axis=axis, out=out, **kwargs)\n    if isinstance(out, MaskedArray):\n        if out.ndim or mask:\n            out.__setmask__(mask)\n    return out",
    "docstring": "Returns True if all elements evaluate to True. The output array is masked where all the values along the given axis are masked: if the output would have been a scalar and that all the values are masked, then the output is . Refer to for full documentation. See Also -------- numpy.ndarray.all : corresponding function for ndarrays numpy.all : equivalent function Examples -------- >>> import numpy as np >>> np.ma.array([1,2,3]).all() True >>> a = np.ma.array([1,2,3], mask=True) >>> (a.all() is np.ma.masked) True",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:all arg:self arg:axis arg:out arg:keepdims arguments arg arg arg arg Assign Compare Assign Call If Compare Assign Call Call Call Call If Call If Return return:yes Return return:yes Call Call If Call If BoolOp Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "isgeneratorfunction",
    "source_code": "def isgeneratorfunction(object):\n    return _inspect.isgeneratorfunction(tf_decorator.unwrap(object)[1])",
    "docstring": "TFDecorator-aware replacement for inspect.isgeneratorfunction.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\tf_inspect.py",
    "ast_data": "FunctionDef name:isgeneratorfunction arg:object arguments arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_concat_same_type",
    "source_code": "@classmethod\ndef _concat_same_type(cls, to_concat) -> Self:\n    chunks = [array for ea in to_concat for array in ea._pa_array.iterchunks()]\n    if to_concat[0].dtype == 'string':\n        pa_dtype = pa.large_string()\n    else:\n        pa_dtype = to_concat[0].dtype.pyarrow_dtype\n    arr = pa.chunked_array(chunks, type=pa_dtype)\n    return cls(arr)",
    "docstring": "Concatenate multiple ArrowExtensionArrays. Parameters ---------- to_concat : sequence of ArrowExtensionArrays Returns ------- ArrowExtensionArray",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\arrow\\array.py",
    "ast_data": "FunctionDef name:_concat_same_type arg:cls arg:to_concat arguments arg arg Assign Call If Compare Assign Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "make_one_shot_iterator",
    "source_code": "@deprecation.deprecated(None, 'This is a deprecated API that should only be used in TF 1 graph mode and legacy TF 2 graph mode available through `tf.compat.v1`. In all other situations -- namely, eager mode and inside `tf.function` -- you can consume dataset elements using `for elem in dataset: ...` or by explicitly creating iterator via `iterator = iter(dataset)` and fetching its elements via `values = next(iterator)`. Furthermore, this API is not available in TF 2. During the transition from TF 1 to TF 2 you can use `tf.compat.v1.data.make_one_shot_iterator(dataset)` to create a TF 1 graph mode style iterator for a dataset created through TF 2 APIs. Note that this should be a transient state of your code base as there are in general no guarantees about the interoperability of TF 1 and TF 2 code.')\ndef make_one_shot_iterator(self) -> Union[iterator_ops.Iterator, iterator_ops.OwnedIterator]:\n    return self._make_one_shot_iterator()",
    "docstring": "Creates an iterator for elements of this dataset. Note: The returned iterator will be initialized automatically. A \"one-shot\" iterator does not currently support re-initialization. For that see . Example: Returns: An for elements of this dataset.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:make_one_shot_iterator arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "float8_e4m3fn",
    "source_code": "def float8_e4m3fn(self):\n    _warn_typed_storage_removal()\n    return self._to(torch.float8_e4m3fn)",
    "docstring": "Casts this storage to float8_e4m3fn type",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:float8_e4m3fn arg:self arguments arg Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_add_nat",
    "source_code": "@final\ndef _add_nat(self) -> Self:\n    if isinstance(self.dtype, PeriodDtype):\n        raise TypeError(f'Cannot add {type(self).__name__} and {type(NaT).__name__}')\n    result = np.empty(self.shape, dtype=np.int64)\n    result.fill(iNaT)\n    result = result.view(self._ndarray.dtype)\n    return type(self)._simple_new(result, dtype=self.dtype, freq=None)",
    "docstring": "Add pd.NaT to self",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py",
    "ast_data": "FunctionDef name:_add_nat arg:self arguments arg If Call Raise Call Call Call Assign Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "parse_debug_node_name",
    "source_code": "def parse_debug_node_name(node_name):\n    prefix = '__dbg_'\n    name = node_name\n    if not name.startswith(prefix):\n        raise ValueError(\"Invalid prefix in debug node name: '%s'\" % node_name)\n    name = name[len(prefix):]\n    if name.count('_') < 2:\n        raise ValueError(\"Invalid debug node name: '%s'\" % node_name)\n    debug_op = name[name.rindex('_') + 1:]\n    name = name[:name.rindex('_')]\n    debug_op_index = int(name[name.rindex('_') + 1:])\n    name = name[:name.rindex('_')]\n    if name.count(':') != 1:\n        raise ValueError(\"Invalid tensor name in debug node name: '%s'\" % node_name)\n    watched_node_name = name[:name.index(':')]\n    watched_output_slot = int(name[name.index(':') + 1:])\n    return (watched_node_name, watched_output_slot, debug_op_index, debug_op)",
    "docstring": "Parse the name of a debug node. Args: node_name: Name of the debug node. Returns: 1. Name of the watched node, as a str. 2. Output slot index of the watched tensor, as an int. 3. Index of the debug node, as an int. 4. Name of the debug op, as a str, e.g, \"DebugIdentity\". Raises: ValueError: If the input node name is not a valid debug node name.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_graphs.py",
    "ast_data": "FunctionDef name:parse_debug_node_name arg:node_name arguments arg Assign Assign If Call Raise Call Assign Call If Compare Call Raise Call Assign Call Assign Call Assign Call Call Assign Call If Compare Call Raise Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "to_frame",
    "source_code": "def to_frame(self, name: Hashable=lib.no_default) -> DataFrame:\n    columns: Index\n    if name is lib.no_default:\n        name = self.name\n        if name is None:\n            columns = default_index(1)\n        else:\n            columns = Index([name])\n    else:\n        columns = Index([name])\n    mgr = self._mgr.to_2d_mgr(columns)\n    df = self._constructor_expanddim_from_mgr(mgr, axes=mgr.axes)\n    return df.__finalize__(self, method='to_frame')",
    "docstring": "Convert Series to DataFrame. Parameters ---------- name : object, optional The passed name should substitute for the series name (if it has one). Returns ------- DataFrame DataFrame representation of Series. See Also -------- Series.to_dict : Convert Series to dict object. Examples -------- >>> s = pd.Series([\"a\", \"b\", \"c\"], name=\"vals\") >>> s.to_frame() vals 0 a 1 b 2 c",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:to_frame arg:self arg:name arguments arg arg If Compare Assign If Compare Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "forward_inverse",
    "source_code": "def forward_inverse(self) -> Tensor:\n    return torch.unsqueeze(torch.inverse(self.model), dim=0)",
    "docstring": "Interted Single-batch homography\". Returns: Homography martix with shape :math:.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\transform\\image_registrator.py",
    "ast_data": "FunctionDef name:forward_inverse arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_assert_state",
    "source_code": "def _assert_state(self, state: Union[TrainingState, list[TrainingState]]) -> None:\n    if isinstance(state, TrainingState):\n        state = [state]\n    if self.training_state not in state:\n        msg = f'expected to be in states {state} but current state is {self.training_state}'\n        if self.rank == 0:\n            print(f'Asserting FSDP instance is: {self}')\n            print(f'ERROR: {msg}')\n            traceback.print_stack()\n        raise ValueError(msg)",
    "docstring": "Assert we are in the given state.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\fully_sharded_data_parallel.py",
    "ast_data": "FunctionDef name:_assert_state arg:self arg:state arguments arg arg If Call Assign If Compare Assign If Compare Call Call Call Raise Call"
  },
  {
    "library": "django",
    "name": "table_names",
    "source_code": "def table_names(self, cursor=None, include_views=False):\n\n    def get_names(cursor):\n        return sorted((ti.name for ti in self.get_table_list(cursor) if include_views or ti.type == 't'))\n    if cursor is None:\n        with self.connection.cursor() as cursor:\n            return get_names(cursor)\n    return get_names(cursor)",
    "docstring": "Return a list of names of all tables that exist in the database. Sort the returned table list by Python's default sorting. Do NOT use the database's ORDER BY here to avoid subtle differences in sorting order between databases.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\introspection.py",
    "ast_data": "FunctionDef name:table_names arg:self arg:cursor arg:include_views arguments arg arg arg FunctionDef name:get_names arg:cursor arguments arg Return return:yes Call Call BoolOp Compare If Compare With Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "ComptimeVariable",
    "source_code": "class ComptimeVariable(VariableTracker):\n\n    def reconstruct(self, codegen: 'PyCodegen'):\n        raise NotImplementedError('comptime is special form')\n\n    def var_getattr(self, tx: 'InstructionTranslator', name: str) -> 'VariableTracker':\n        from ..comptime import comptime\n        from .functions import UserFunctionVariable\n        return UserFunctionVariable(getattr(comptime, name), source=AttrSource(self.source, name))\n\n    def call_function(self, tx: 'InstructionTranslator', args: 'list[VariableTracker]', kwargs: 'dict[str, VariableTracker]') -> 'VariableTracker':\n        from ..comptime import ComptimeContext\n        assert not kwargs\n        assert len(args) <= 2\n        fn = args[0]\n        if isinstance(fn, UserFunctionVariable):\n            fn.get_function()(ComptimeContext(tx))\n        elif isinstance(fn, NestedUserFunctionVariable):\n            code = fn.get_code()\n            assert not fn.closure, f'comptime function must not have free variables, but these variables were free: {code.co_freevars}'\n            func = types.FunctionType(code, fn.f_globals, fn.fn_name.as_python_constant(), tuple(fn.defaults.items) if fn.defaults else None, ())\n            func(ComptimeContext(tx))\n        else:\n            raise RuntimeError(f'unsupported argument to comptime: {type(fn)}')\n        return variables.ConstantVariable.create(None)",
    "docstring": "This variable is special, it lets you execute arbitrary code at Dynamo compile time",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\misc.py",
    "ast_data": "ClassDef name:ComptimeVariable FunctionDef name:reconstruct arg:self arg:codegen arguments arg arg Raise Call FunctionDef name:var_getattr arg:self arg:tx arg:name arguments arg arg arg Return return:yes Call Call Call FunctionDef name:call_function arg:self arg:tx arg:args arg:kwargs arguments arg arg arg arg Compare Call Assign If Call Call Call Call If Call Assign Call Assign Call Call Call Call Call Raise Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "DrawEvent",
    "source_code": "class DrawEvent(Event):\n\n    def __init__(self, name, canvas, renderer):\n        super().__init__(name, canvas)\n        self.renderer = renderer",
    "docstring": "An event triggered by a draw operation on the canvas. In most backends, callbacks subscribed to this event will be fired after the rendering is complete but before the screen is updated. Any extra artists drawn to the canvas's renderer will be reflected without an explicit call to `EventRendererBase` The renderer for the draw event.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "ClassDef name:DrawEvent FunctionDef name:__init__ arg:self arg:name arg:canvas arg:renderer arguments arg arg arg arg Call Call Assign"
  },
  {
    "library": "pandas",
    "name": "datetime_format",
    "source_code": "@property\ndef datetime_format(self) -> str:\n    return self._datetime_format",
    "docstring": "Format string for dates written into Excel files (e.g. 'YYYY-MM-DD').",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\excel\\_base.py",
    "ast_data": "FunctionDef name:datetime_format arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "rotation",
    "source_code": "@property\ndef rotation(self) -> So2:\n    return self._rotation",
    "docstring": "Return the underlying .",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\se2.py",
    "ast_data": "FunctionDef name:rotation arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_config",
    "source_code": "def get_config(self):\n    from tensorflow.python.feature_column.serialization import serialize_feature_column\n    config = dict(zip(self._fields, self))\n    config['source_column'] = serialize_feature_column(self.source_column)\n    return config",
    "docstring": "See 'FeatureColumn` base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:get_config arg:self arguments arg Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "_cleanup",
    "source_code": "def _cleanup(self) -> None:\n    for tmp_dir in self._tmp_dirs:\n        with contextlib.suppress(Exception):\n            shutil.rmtree(tmp_dir)",
    "docstring": "Remove temporary directories.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\theming.py",
    "ast_data": "FunctionDef name:_cleanup arg:self arguments arg For With Call Call"
  },
  {
    "library": "tensorflow",
    "name": "request_unwatch",
    "source_code": "def request_unwatch(self, node_name, output_slot, debug_op):\n    self._debug_ops_state_change_queue.put(_state_change(debug_service_pb2.EventReply.DebugOpStateChange.DISABLED, node_name, output_slot, debug_op))",
    "docstring": "Request disabling a debug tensor watchpoint or breakpoint. This is the opposite of . Args: node_name: () name of the node that the to-be-watched tensor belongs to, e.g., \"hidden/Weights\". output_slot: () output slot index of the tensor to watch. debug_op: () name of the debug op to enable. This should not include any attribute substrings.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\grpc_debug_server.py",
    "ast_data": "FunctionDef name:request_unwatch arg:self arg:node_name arg:output_slot arg:debug_op arguments arg arg arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_regular_normalize_batch_in_training",
    "source_code": "def _regular_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=0.001):\n    mean, var = nn.moments(x, reduction_axes, None, None, False)\n    normed = nn.batch_normalization(x, mean, var, beta, gamma, epsilon)\n    return (normed, mean, var)",
    "docstring": "Non-fused version of . Args: x: Input tensor or variable. gamma: Tensor by which to scale the input. beta: Tensor with which to center the input. reduction_axes: iterable of integers, axes over which to normalize. epsilon: Fuzz factor. Returns: A tuple length of 3, .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:_regular_normalize_batch_in_training arg:x arg:gamma arg:beta arg:reduction_axes arg:epsilon arguments arg arg arg arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "VectorParse",
    "source_code": "class VectorParse(NamedTuple):\n    width: float\n    height: float\n    depth: float\n    glyphs: list[tuple[FT2Font, float, int, float, float]]\n    rects: list[tuple[float, float, float, float]]",
    "docstring": "The namedtuple type returned by ``. Attributes ---------- width, height, depth : float The global metrics. glyphs : list The glyphs including their positions. rect : list The list of rectangles.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\_mathtext.py",
    "ast_data": "ClassDef name:VectorParse"
  },
  {
    "library": "matplotlib",
    "name": "patch_collection_2d_to_3d",
    "source_code": "def patch_collection_2d_to_3d(col, zs=0, zdir='z', depthshade=None, axlim_clip=False, *args, depthshade_minalpha=None):\n    if isinstance(col, PathCollection):\n        col.__class__ = Path3DCollection\n        col._offset_zordered = None\n    elif isinstance(col, PatchCollection):\n        col.__class__ = Patch3DCollection\n    if depthshade is None:\n        depthshade = rcParams['axes3d.depthshade']\n    if depthshade_minalpha is None:\n        depthshade_minalpha = rcParams['axes3d.depthshade_minalpha']\n    col._depthshade = depthshade\n    col._depthshade_minalpha = depthshade_minalpha\n    col._in_draw = False\n    col.set_3d_properties(zs, zdir, axlim_clip)",
    "docstring": "Convert a into a object (or a into a object). Parameters ---------- col : or The collection to convert. zs : float or array of floats The location or locations to place the patches in the collection along the *zdir* axis. Default: 0. zdir : {'x', 'y', 'z'} The axis in which to place the patches. Default: \"z\". See for a description of the values. depthshade : bool, default: None Whether to shade the patches to give a sense of depth. If None, use the value from rcParams['axes3d.depthshade']. axlim_clip : bool, default: False Whether to hide patches with a vertex outside the axes view limits. .. versionadded:: 3.10 depthshade_minalpha : float, default: None Sets the minimum alpha value used by depth-shading. If None, use the value from rcParams['axes3d.depthshade_minalpha']. .. versionadded:: 3.11",
    "type": "function",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py",
    "ast_data": "FunctionDef name:patch_collection_2d_to_3d arg:col arg:zs arg:zdir arg:depthshade arg:axlim_clip arguments arg arg arg arg arg arg arg If Call Assign Assign If Call Assign If Compare Assign If Compare Assign Assign Assign Assign Call"
  },
  {
    "library": "pandas",
    "name": "_equal_values",
    "source_code": "def _equal_values(self, other: Self) -> bool:\n    raise AbstractMethodError(self)",
    "docstring": "To be implemented by the subclasses. Only check the column values assuming shape and indexes have already been checked.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:_equal_values arg:self arg:other arguments arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_get_fqn_to_param",
    "source_code": "def _get_fqn_to_param(model: torch.nn.Module) -> dict[str, torch.nn.Parameter]:\n    param_to_param_name = _get_param_to_fqn(model)\n    return dict(zip(param_to_param_name.values(), param_to_param_name.keys()))",
    "docstring": "Construct the inverse mapping of :meth:.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\fully_sharded_data_parallel.py",
    "ast_data": "FunctionDef name:_get_fqn_to_param arg:model arguments arg Assign Call Return return:yes Call Call Call Call"
  },
  {
    "library": "seaborn",
    "name": "Dots",
    "source_code": "@document_properties\n@dataclass\nclass Dots(DotBase):\n    marker: MappableString = Mappable(rc='scatter.marker', grouping=False)\n    pointsize: MappableFloat = Mappable(4, grouping=False)\n    stroke: MappableFloat = Mappable(0.75, grouping=False)\n    color: MappableColor = Mappable('C0', grouping=False)\n    alpha: MappableFloat = Mappable(1, grouping=False)\n    fill: MappableBool = Mappable(True, grouping=False)\n    fillcolor: MappableColor = Mappable(depend='color', grouping=False)\n    fillalpha: MappableFloat = Mappable(0.2, grouping=False)\n\n    def _resolve_properties(self, data, scales):\n        resolved = super()._resolve_properties(data, scales)\n        resolved['linewidth'] = resolved.pop('stroke')\n        resolved['facecolor'] = resolve_color(self, data, 'fill', scales)\n        resolved['edgecolor'] = resolve_color(self, data, '', scales)\n        resolved.setdefault('edgestyle', (0, None))\n        fc = resolved['facecolor']\n        if isinstance(fc, tuple):\n            resolved['facecolor'] = (fc[0], fc[1], fc[2], fc[3] * resolved['fill'])\n        else:\n            fc[:, 3] = fc[:, 3] * resolved['fill']\n            resolved['facecolor'] = fc\n        return resolved",
    "docstring": "A dot mark defined by strokes to better handle overplotting. See also -------- Dot : A mark suitable for dot plots or less-dense scatterplots. Examples -------- .. include:: ../docstrings/objects.Dots.rst",
    "type": "class",
    "file_path": "seaborn\\seaborn\\_marks\\dot.py",
    "ast_data": "ClassDef name:Dots Call Call Call Call Call Call Call Call FunctionDef name:_resolve_properties arg:self arg:data arg:scales arguments arg arg arg Assign Call Call Assign Call Assign Call Assign Call Call Assign If Call Assign Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "rewrite_fn",
    "source_code": "def rewrite_fn(*args):\n    del args\n    per_replica_inputs = multi_worker_iterator.get_next()\n    replicate_inputs = []\n    for replica_id in range(self._num_replicas_in_sync):\n        select_replica = lambda x: distribute_utils.select_replica(replica_id, x)\n        replicate_inputs.append((nest.map_structure(select_replica, per_replica_inputs),))\n    replicate_outputs = tpu.replicate(run_fn, replicate_inputs, device_assignment=self._device_assignment, xla_options=tpu.XLAOptions(use_spmd_for_xla_partitioning=self._use_spmd_for_xla_partitioning))\n    if isinstance(replicate_outputs[0], list):\n        replicate_outputs = nest.flatten(replicate_outputs)\n    return replicate_outputs",
    "docstring": "The rewritten step fn running on TPU.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_strategy.py",
    "ast_data": "FunctionDef name:rewrite_fn arguments arg Assign Call Assign For Call Assign arguments arg Call Call Call Assign Call Call If Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_qat_conv_bn_pattern",
    "source_code": "def _qat_conv_bn_pattern(x: torch.Tensor, conv_weight: torch.Tensor, conv_bias: torch.Tensor, bn_weight: torch.Tensor, bn_bias: torch.Tensor, bn_running_mean: torch.Tensor, bn_running_var: torch.Tensor) -> torch.Tensor:\n    bn_eps = 1e-05\n    running_std = torch.sqrt(bn_running_var + bn_eps)\n    scale_factor = bn_weight / running_std\n    weight_shape = [1] * len(conv_weight.shape)\n    weight_in_channel_axis = 1 if _is_conv_transpose_fn(conv_fn) else 0\n    weight_shape[weight_in_channel_axis] = -1\n    bias_shape = [1] * len(conv_weight.shape)\n    bias_shape[1] = -1\n    scaled_weight = conv_weight * scale_factor.reshape(weight_shape)\n    zero_bias = torch.zeros_like(conv_bias, dtype=x.dtype)\n    x = conv_fn(x, scaled_weight, zero_bias)\n    x = x / scale_factor.reshape(bias_shape)\n    x = x + conv_bias.reshape(bias_shape)\n    x = F.batch_norm(x, bn_running_mean, bn_running_var, bn_weight, bn_bias, training=True, eps=bn_eps)\n    return x",
    "docstring": "Approximated method to fuse conv and bn. It requires only one forward pass. conv_orig = conv / scale_factor where scale_factor = bn.weight / running_std. This is based on .",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\qat_utils.py",
    "ast_data": "FunctionDef name:_qat_conv_bn_pattern arg:x arg:conv_weight arg:conv_bias arg:bn_weight arg:bn_bias arg:bn_running_mean arg:bn_running_var arguments arg arg arg arg arg arg arg Assign Assign Call Assign Assign Call Assign Call Assign Assign Call Assign Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "from_row_splits",
    "source_code": "@classmethod\ndef from_row_splits(cls, row_splits, validate=True, dtype=None, dtype_hint=None):\n    if not isinstance(validate, bool):\n        raise TypeError('validate must have type bool')\n    if isinstance(row_splits, (list, tuple)) and (not row_splits):\n        raise ValueError('row_splits tensor may not be empty.')\n    if isinstance(row_splits, tensor_lib.TensorSpec):\n        return cls(row_splits=row_splits, internal=_row_partition_factory_key)\n    with ops.name_scope(None, 'RowPartitionFromRowSplits', [row_splits]):\n        row_splits = cls._convert_row_partition(row_splits, 'row_splits', dtype_hint=dtype_hint, dtype=dtype)\n        row_splits.shape.assert_has_rank(1)\n        if validate:\n            msg = 'Arguments to from_row_splits do not form a valid RaggedTensor:'\n            checks = [check_ops.assert_rank(row_splits, 1, message=msg + 'rank'), _assert_zero(row_splits[0], message=msg + 'zero'), _assert_monotonic_increasing(row_splits, message=msg + 'monotonic')]\n            row_splits = control_flow_ops.with_dependencies(checks, row_splits)\n        return cls(row_splits=row_splits, internal=_row_partition_factory_key)",
    "docstring": "Creates a with rows partitioned by . This divides a sequence into rows by indicating where each row begins and ends: Args: row_splits: A 1-D integer tensor with shape . Must not be empty, and must be sorted in ascending order. must be zero. validate: If true, then use assertions to check that the arguments form a valid . dtype: Optional dtype for the RowPartition. If missing, the type is inferred from the type of , dtype_hint, or tf.int64. dtype_hint: Optional dtype for the RowPartition, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so dtype_hint can be used as a soft preference. If the conversion to is not possible, this argument has no effect. Returns: A . Raises: ValueError: If is an empty list.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py",
    "ast_data": "FunctionDef name:from_row_splits arg:cls arg:row_splits arg:validate arg:dtype arg:dtype_hint arguments arg arg arg arg arg If Call Raise Call If BoolOp Call Raise Call If Call Return return:yes Call With Call Assign Call Call If Assign Assign Call Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "node_attributes",
    "source_code": "def node_attributes(self, node_name, device_name=None):\n    if not self._debug_graphs:\n        raise LookupError('No partition graphs have been loaded.')\n    device_name = self._infer_device_name(device_name, node_name)\n    return self._debug_graphs[device_name].node_attributes[node_name]",
    "docstring": "Get the attributes of a node. Args: node_name: Name of the node in question. device_name: () name of the device. If there is only one device or if node_name exists on only one device, this argument is optional. Returns: Attributes of the node. Raises: LookupError: If no partition graphs have been loaded.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:node_attributes arg:self arg:node_name arg:device_name arguments arg arg arg If Raise Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "triton_builtin",
    "source_code": "def triton_builtin(f: _T) -> _T:\n    f.__triton_builtin__ = True\n    return f",
    "docstring": "Decorator to mark a function as a Triton built-in function. These functions are evaluated at compile time. Args: f (function): The function to be marked as a Triton built-in. Returns: function: The same function, marked as a Triton built-in.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\triton_helpers.py",
    "ast_data": "FunctionDef name:triton_builtin arg:f arguments arg Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_diag_update_positive",
    "source_code": "@property\ndef is_diag_update_positive(self):\n    return self._is_diag_update_positive",
    "docstring": "If this operator is , this hints elementwise.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_low_rank_update.py",
    "ast_data": "FunctionDef name:is_diag_update_positive arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "pre_save_val",
    "source_code": "def pre_save_val(self, field, obj):\n    if self.query.raw:\n        return getattr(obj, field.attname)\n    return field.pre_save(obj, add=True)",
    "docstring": "Get the given field's value off the given obj. pre_save() is used for things like auto_now on DateTimeField. Skip it if this is a raw query.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\compiler.py",
    "ast_data": "FunctionDef name:pre_save_val arg:self arg:field arg:obj arguments arg arg arg If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "to_list",
    "source_code": "def to_list(self):\n    output = []\n    if self.none:\n        output.append(None)\n    if self.nan:\n        output.append(np.nan)\n    return output",
    "docstring": "Convert tuple to a list where None is always first.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_encode.py",
    "ast_data": "FunctionDef name:to_list arg:self arguments arg Assign If Call If Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "tensor_inference_rule",
    "source_code": "@register_inference_rule(torch.tensor)\ndef tensor_inference_rule(n: Node, symbols, constraints, counter):\n    return ([], counter)",
    "docstring": "If the tensor is a scalar, we will skip it since we do not support scalars yet. We will add support in the future if it's needed. For our examples so far, scalars are not needed.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_generator.py",
    "ast_data": "FunctionDef name:tensor_inference_rule arg:n arg:symbols arg:constraints arg:counter arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "id",
    "source_code": "@property\ndef id(self) -> tuple[int, int]:\n    return super().id",
    "docstring": "Returns the ID of this pool as a tuple of two ints.",
    "type": "method",
    "file_path": "pytorch\\torch\\cuda\\memory.py",
    "ast_data": "FunctionDef name:id arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "FixedFormatter",
    "source_code": "class FixedFormatter(Formatter):\n\n    def __init__(self, seq):\n        self.seq = seq\n        self.offset_string = ''\n\n    def __call__(self, x, pos=None):\n        if pos is None or pos >= len(self.seq):\n            return ''\n        else:\n            return self.seq[pos]\n\n    def get_offset(self):\n        return self.offset_string\n\n    def set_offset_string(self, ofs):\n        self.offset_string = ofs",
    "docstring": "Return fixed strings for tick labels based only on position, not value. .. note:: should only be used together with . Otherwise, the labels may end up in unexpected positions.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "ClassDef name:FixedFormatter FunctionDef name:__init__ arg:self arg:seq arguments arg arg Assign Assign FunctionDef name:__call__ arg:self arg:x arg:pos arguments arg arg arg If BoolOp Compare Compare Call Return return:yes Return return:yes FunctionDef name:get_offset arg:self arguments arg Return return:yes FunctionDef name:set_offset_string arg:self arg:ofs arguments arg arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "HiddenTfApiAttribute",
    "source_code": "class HiddenTfApiAttribute(property):\n\n    def __init__(self, deprecation_message):\n\n        def raise_error(unused_self):\n            raise AttributeError(deprecation_message)\n        super(HiddenTfApiAttribute, self).__init__(raise_error)",
    "docstring": "Hides a class attribute from the public API. Attributes in public classes can be hidden from the API by having an '_' in front of the name (e.g. ClassName._variables). This doesn't work when attributes or methods are inherited from a parent class. To hide inherited attributes, set their values to be .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\util\\deprecation.py",
    "ast_data": "ClassDef name:HiddenTfApiAttribute FunctionDef name:__init__ arg:self arg:deprecation_message arguments arg arg FunctionDef name:raise_error arg:unused_self arguments arg Raise Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "float8_e4m3fnuz",
    "source_code": "def float8_e4m3fnuz(self):\n    return self._to(torch.float8_e4m3fnuz)",
    "docstring": "Casts this storage to float8_e4m3fnuz type",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:float8_e4m3fnuz arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "LinearMixing",
    "source_code": "class LinearMixing(GenericBroyden):\n\n    def __init__(self, alpha=None):\n        GenericBroyden.__init__(self)\n        self.alpha = alpha\n\n    def solve(self, f, tol=0):\n        return -f * self.alpha\n\n    def matvec(self, f):\n        return -f / self.alpha\n\n    def rsolve(self, f, tol=0):\n        return -f * np.conj(self.alpha)\n\n    def rmatvec(self, f):\n        return -f / np.conj(self.alpha)\n\n    def todense(self):\n        return np.diag(np.full(self.shape[0], -1 / self.alpha))\n\n    def _update(self, x, f, dx, df, dx_norm, df_norm):\n        pass",
    "docstring": "Find a root of a function, using a scalar Jacobian approximation. .. warning:: This algorithm may be useful for specific problems, but whether it will work may depend strongly on the problem. Parameters ---------- %(params_basic)s alpha : float, optional The Jacobian approximation is (-1/alpha). %(params_extra)s See Also -------- root : Interface to root finding algorithms for multivariate functions. See `` in particular.",
    "type": "class",
    "file_path": "scipy\\scipy\\optimize\\_nonlin.py",
    "ast_data": "ClassDef name:LinearMixing FunctionDef name:__init__ arg:self arg:alpha arguments arg arg Call Assign FunctionDef name:solve arg:self arg:f arg:tol arguments arg arg arg Return return:yes FunctionDef name:matvec arg:self arg:f arguments arg arg Return return:yes FunctionDef name:rsolve arg:self arg:f arg:tol arguments arg arg arg Return return:yes Call FunctionDef name:rmatvec arg:self arg:f arguments arg arg Return return:yes Call FunctionDef name:todense arg:self arguments arg Return return:yes Call Call FunctionDef name:_update arg:self arg:x arg:f arg:dx arg:df arg:dx_norm arg:df_norm arguments arg arg arg arg arg arg arg"
  },
  {
    "library": "django",
    "name": "SignatureExpired",
    "source_code": "class SignatureExpired(BadSignature):\n    pass",
    "docstring": "Signature timestamp is older than required max_age.",
    "type": "class",
    "file_path": "django\\django\\core\\signing.py",
    "ast_data": "ClassDef name:SignatureExpired"
  },
  {
    "library": "pytorch",
    "name": "get",
    "source_code": "@abc.abstractmethod\ndef get(self, size: int, timeout: float) -> list[TimerRequest]:\n    pass",
    "docstring": "Gets up to `` seconds).",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\timer\\api.py",
    "ast_data": "FunctionDef name:get arg:self arg:size arg:timeout arguments arg arg arg"
  },
  {
    "library": "pytorch",
    "name": "_SACMetadata",
    "source_code": "@dataclass\nclass _SACMetadata:\n    func: Any\n    time_taken: float\n    memory_used: float\n    curr_idx: int\n    output_ids: tuple[int, ...]\n    inplace_info: tuple[int, ...]\n    is_view_like: bool\n    is_rand_op: bool",
    "docstring": "Stores metadata for a single operator for SAC. Attributes: func (Any): The operator function. time_taken (float): The time taken by the operator. memory_used (float): The memory used by the operator. curr_idx (int): The current operator index. output_ids (Tuple[int, ...]): The storage IDs of the operator's outputs. inplace_info (Tuple[int, ...]): Tuple of self and parent operator for in-place operator. is_view_like (bool): Whether the operator is view-like. is_rand_op (bool): Whether the operator is a random operator.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\_tools\\sac_estimator.py",
    "ast_data": "ClassDef name:_SACMetadata"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, cache_staged_state_dict: bool=False, type_check: bool=False):\n    self.cache_staged_state_dict = cache_staged_state_dict\n    self.type_check = type_check\n    self.state_dict_cache: Optional[STATE_DICT_TYPE] = None",
    "docstring": "Initializes the BlockingAsyncStager. Args: cache_staged_state_dict: Whether to cache the staged state_dict. This option decreases staging latency at the cost of increases memory usage. Additionally, if this parameter is set to True, it's the expectation that the stager is maintained and re-used for multiple dcp.async_save calls. Default to False. type_check: Whether to perform a type check during cpu_offload. Defaults to False.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\staging.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:cache_staged_state_dict arg:type_check arguments arg arg arg Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "InvalidVersion",
    "source_code": "class InvalidVersion(ValueError):\n    pass",
    "docstring": "Raised when a version string is not a valid version. >>> Version(\"invalid\") Traceback (most recent call last): ... packaging.version.InvalidVersion: Invalid version: 'invalid'",
    "type": "class",
    "file_path": "pytorch\\torch\\_vendor\\packaging\\version.py",
    "ast_data": "ClassDef name:InvalidVersion"
  },
  {
    "library": "tensorflow",
    "name": "get_gpu_count",
    "source_code": "def get_gpu_count():\n    key = 'gpu_count_no_sudo'\n    out, err = run_shell_cmd(cmds_all[PLATFORM][key])\n    if err and FLAGS.debug:\n        print('Error in detecting GPU count:\\n %s' % str(err))\n    return out.strip(b'\\n')",
    "docstring": "Retrieves total number of GPU's available in the system. Returns: Integer that is the total # of GPU's found.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\tensorflow_builder\\config_detector\\config_detector.py",
    "ast_data": "FunctionDef name:get_gpu_count arguments Assign Assign Call If BoolOp Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_skip_if_lti",
    "source_code": "def _skip_if_lti(arg):\n    if isinstance(arg, tuple):\n        return arg\n    else:\n        return (None,)",
    "docstring": "Handle arg overloads. ATM, only pass tuples through. Consider updating when cupyx.lti class is supported.",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_delegators.py",
    "ast_data": "FunctionDef name:_skip_if_lti arg:arg arguments arg If Call Return return:yes Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "_convert_as_saved_model",
    "source_code": "def _convert_as_saved_model(self):\n    temp_dir = tempfile.mkdtemp()\n    try:\n        graph_def, input_tensors, _ = self._convert_concrete_functions_to_saved_model(temp_dir)\n        if self.saved_model_dir:\n            self._validate_inputs(graph_def, input_tensors)\n            return self._convert_from_saved_model(graph_def)\n    finally:\n        shutil.rmtree(temp_dir, True)\n    return None",
    "docstring": "Converts the given concrete functions as a saved model format. Returns: The converted data in serialized format.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "FunctionDef name:_convert_as_saved_model arg:self arguments arg Assign Call Try Assign Call If Call Return return:yes Call Call Return return:no"
  },
  {
    "library": "pytorch",
    "name": "get_param",
    "source_code": "def get_param(program: 'ExportedProgram', node: torch.fx.Node) -> Optional[torch.nn.Parameter]:\n    if is_param(program, node):\n        parameter_name = program.graph_signature.inputs_to_parameters[node.name]\n        return program.state_dict[parameter_name]\n    return None",
    "docstring": "Returns the parameter associated with the given node in the exported program. Returns None if the node is not a parameter within the exported program",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\utils.py",
    "ast_data": "FunctionDef name:get_param arg:program arg:node arguments arg arg If Call Assign Return return:yes Return return:no"
  },
  {
    "library": "scikit-learn",
    "name": "_validate_params",
    "source_code": "def _validate_params(self):\n    validate_parameter_constraints(self._parameter_constraints, self.get_params(deep=False), caller_name=self.__class__.__name__)",
    "docstring": "Validate types and values of constructor parameters The expected type and values must be defined in the class attribute, which is a dictionary . See the docstring of for a description of the accepted constraints.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\base.py",
    "ast_data": "FunctionDef name:_validate_params arg:self arguments arg Call Call"
  },
  {
    "library": "django",
    "name": "render_annotated",
    "source_code": "def render_annotated(self, context):\n    return self.s",
    "docstring": "Return the given value. The default implementation of this method handles exceptions raised during rendering, which is not necessary for text nodes.",
    "type": "method",
    "file_path": "django\\django\\template\\base.py",
    "ast_data": "FunctionDef name:render_annotated arg:self arg:context arguments arg arg Return return:yes"
  },
  {
    "library": "django",
    "name": "handle_one_request",
    "source_code": "def handle_one_request(self):\n    self.raw_requestline = self.rfile.readline(65537)\n    if len(self.raw_requestline) > 65536:\n        self.requestline = ''\n        self.request_version = ''\n        self.command = ''\n        self.send_error(414)\n        return\n    if not self.parse_request():\n        return\n    handler = ServerHandler(self.rfile, self.wfile, self.get_stderr(), self.get_environ())\n    handler.request_handler = self\n    handler.run(self.server.get_app())",
    "docstring": "Copy of WSGIRequestHandler.handle() but with different ServerHandler",
    "type": "method",
    "file_path": "django\\django\\core\\servers\\basehttp.py",
    "ast_data": "FunctionDef name:handle_one_request arg:self arguments arg Assign Call If Compare Call Assign Assign Assign Call Return return:no If Call Return return:no Assign Call Call Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "_pre_dp_module_transform",
    "source_code": "def _pre_dp_module_transform(module: nn.Module):\n    _localize_dtensor(module, None, None)\n    module.register_forward_pre_hook(_reconstruct_dtensor)\n    module.register_forward_hook(_localize_dtensor)",
    "docstring": "Enable the composability between Tensor Parallelism (TP) and Data Parallelism(DP) in PyTorch when using DDP. We need to convert Parameters which are DTensors to local tensors before wrapping with data parallelism API. We then register two hooks, one for converting local tensors back to DTensor preforward and one to convert DTensors back to tensors after Forward. By integrating this way, we avoid any special handling of DTensor parameters by DDP and get DTensor's gradients propagated back to DP, e.g. gradient buckets of DDP. For now, this API only works with `nn.Module`): Module which has been applied TP on. Example:: >>> # xdoctest: +SKIP(\"distributed\") >>> from torch.distributed.tensor.parallel import parallelize_module, PairwiseParallel >>> from torch.nn.parallel import DistributedDataParallel as DDP >>> from torch.distributed.tensor.parallel.ddp import pre_dp_module_transform >>> >>> # Define the module. >>> m = module(...) >>> parallelize_module(m, PairwiseParallel()) >>> m = pre_dp_module_transform(m) >>> m = DDP(m) >>>",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\parallel\\ddp.py",
    "ast_data": "FunctionDef name:_pre_dp_module_transform arg:module arguments arg Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "set_last_checkpoints",
    "source_code": "def set_last_checkpoints(self, last_checkpoints):\n    assert isinstance(last_checkpoints, list)\n    self._last_checkpoints = [(s, np.inf) for s in last_checkpoints]",
    "docstring": "DEPRECATED: Use set_last_checkpoints_with_time. Sets the list of old checkpoint filenames. Args: last_checkpoints: A list of checkpoint filenames. Raises: AssertionError: If last_checkpoints is not a list.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\saver.py",
    "ast_data": "FunctionDef name:set_last_checkpoints arg:self arg:last_checkpoints arguments arg arg Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "set_number_of_partitions",
    "source_code": "def set_number_of_partitions(self, number_of_partitions):\n    if self._frozen:\n        if self._number_of_partitions != number_of_partitions:\n            raise ValueError(f\"Can't set number_of_partitions to {number_of_partitions} since it has been frozen to use {self._number_of_partitions}.\")\n    else:\n        self._number_of_partitions = number_of_partitions",
    "docstring": "Sets the number of partitions for the current policy. If the policy has been frozen then shard_dimension must match the existing setting. Args: number_of_partitions: The number of partitions to use in the policy. Raises: ValueError: If the policy has been frozen and shard_dimension differs from the frozen value.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_sharding.py",
    "ast_data": "FunctionDef name:set_number_of_partitions arg:self arg:number_of_partitions arguments arg arg If If Compare Raise Call Assign"
  },
  {
    "library": "kornia",
    "name": "unproject_points_orthographic",
    "source_code": "def unproject_points_orthographic(points_in_camera: Tensor, extension: Tensor) -> Tensor:\n    KORNIA_CHECK_SHAPE(points_in_camera, ['*', '2'])\n    if len(points_in_camera.shape) != len(extension.shape):\n        extension = extension[..., None]\n    return ops.concatenate([points_in_camera, extension], dim=-1)",
    "docstring": "Unproject one or more points from the canonical z=1 plane into the camera frame. .. math:: \\begin{bmatrix} x \\\\ y \\\\ z \\end{bmatrix} = \\begin{bmatrix} u \\\\ v \\\\ w \\end{bmatrix} Args: points_in_camera: Tensor representing the points to unproject with shape (..., 2). extension: Tensor representing the extension of the points to unproject with shape (..., 1). Returns: Tensor representing the unprojected points with shape (..., 3). Example: >>> points = torch.tensor([1., 2.]) >>> extension = torch.tensor([3.]) >>> unproject_points_orthographic(points, extension) tensor([1., 2., 3.])",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\camera\\projection_orthographic.py",
    "ast_data": "FunctionDef name:unproject_points_orthographic arg:points_in_camera arg:extension arguments arg arg Call If Compare Call Call Assign Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "extract_bool_array",
    "source_code": "def extract_bool_array(mask: ArrayLike) -> npt.NDArray[np.bool_]:\n    if isinstance(mask, ExtensionArray):\n        mask = mask.to_numpy(dtype=bool, na_value=False)\n    mask = np.asarray(mask, dtype=bool)\n    return mask",
    "docstring": "If we have a SparseArray or BooleanArray, convert it to ndarray[bool].",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\array_algos\\putmask.py",
    "ast_data": "FunctionDef name:extract_bool_array arg:mask arguments arg If Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "residual",
    "source_code": "def residual(self, x):\n    return (self.A @ x - self.lb, self.ub - self.A @ x)",
    "docstring": "Calculate the residual between the constraint function and the limits For a linear constraint of the form:: lb <= A@x <= ub the lower and upper residuals between `` indicates that the corresponding element of the constraint is not satisfied. Parameters ---------- x: array_like Vector of independent variables Returns ------- sl, sb : array-like The lower and upper residuals",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_constraints.py",
    "ast_data": "FunctionDef name:residual arg:self arg:x arguments arg arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_axes_locator",
    "source_code": "def get_axes_locator(self):\n    return self._axes_locator",
    "docstring": "Return the axes_locator.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:get_axes_locator arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, name: str, size: tuple[Union[str, int], ...], min_elements: Optional[int]=None, max_elements: Optional[int]=None, dim_parameter: Optional[str]=None, sparse_dim: Optional[str]=None, nnz: Optional[str]=None, density: Optional[str]=None, coalesced: Optional[str]=None, dtype=torch.float32, cuda=False):\n    super().__init__(name=name, size=size, min_elements=min_elements, max_elements=max_elements, dim_parameter=dim_parameter, dtype=dtype, cuda=cuda)\n    self._density = density\n    self._coalesced = coalesced\n    self._sparse_dim = sparse_dim",
    "docstring": "Args: name: A string identifier for the generated Tensor. size: A tuple of integers or strings specifying the size of the generated Tensor. String values will replaced with a concrete int during the generation process, while ints are simply passed as literals. min_elements: The minimum number of parameters that this Tensor must have for a set of parameters to be valid. (Otherwise they are resampled.) max_elements: Like , but setting an upper bound. dim_parameter: The length of will be truncated to this value. This allows Tensors of varying dimensions to be generated by the Fuzzer. sparse_dim: The number of sparse dimensions in a sparse tensor. density: This value allows tensors of varying sparsities to be generated by the Fuzzer. coalesced: The sparse tensor format permits uncoalesced sparse tensors, where there may be duplicate coordinates in the indices. dtype: The PyTorch dtype of the generated Tensor. cuda: Whether to place the Tensor on a GPU.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\benchmark\\utils\\sparse_fuzzer.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:name arg:size arg:min_elements arg:max_elements arg:dim_parameter arg:sparse_dim arg:nnz arg:density arg:coalesced arg:dtype arg:cuda arguments arg arg arg arg arg arg arg arg arg arg arg arg Call Call Assign Assign Assign"
  },
  {
    "library": "pandas",
    "name": "format",
    "source_code": "def format(self):\n    return self.condition",
    "docstring": "return the actual ne format",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\computation\\pytables.py",
    "ast_data": "FunctionDef name:format arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "init_population_qmc",
    "source_code": "def init_population_qmc(self, qmc_engine):\n    from scipy.stats import qmc\n    rng = self.random_number_generator\n    if qmc_engine == 'latinhypercube':\n        sampler = qmc.LatinHypercube(d=self.parameter_count, seed=rng)\n    elif qmc_engine == 'sobol':\n        sampler = qmc.Sobol(d=self.parameter_count, seed=rng)\n    elif qmc_engine == 'halton':\n        sampler = qmc.Halton(d=self.parameter_count, seed=rng)\n    else:\n        raise ValueError(self.__init_error_msg)\n    self.population = sampler.random(n=self.num_population_members)\n    self.population_energies = np.full(self.num_population_members, np.inf)\n    self._nfev = 0",
    "docstring": "Initializes the population with a QMC method. QMC methods ensures that each parameter is uniformly sampled over its range. Parameters ---------- qmc_engine : str The QMC method to use for initialization. Can be one of ``.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_differentialevolution.py",
    "ast_data": "FunctionDef name:init_population_qmc arg:self arg:qmc_engine arguments arg arg Assign If Compare Assign Call If Compare Assign Call If Compare Assign Call Raise Call Assign Call Assign Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_descending_sort",
    "source_code": "def _descending_sort(values, axis, return_argsort=False):\n    k = array_ops.shape(values)[axis]\n    rank = array_ops.rank(values)\n    static_rank = values.shape.ndims\n    if axis == -1 or axis + 1 == values.get_shape().ndims:\n        top_k_input = values\n        transposition = None\n    else:\n        if axis < 0:\n            axis += static_rank or rank\n        if static_rank is not None:\n            transposition = constant_op.constant(np.r_[np.arange(axis), [static_rank - 1], np.arange(axis + 1, static_rank - 1), [axis]], name='transposition')\n        else:\n            transposition = array_ops.tensor_scatter_update(math_ops.range(rank), [[axis], [rank - 1]], [rank - 1, axis])\n        top_k_input = array_ops.transpose(values, transposition)\n    values, indices = nn_ops.top_k(top_k_input, k)\n    return_value = indices if return_argsort else values\n    if transposition is not None:\n        return_value = array_ops.transpose(return_value, transposition)\n    return return_value",
    "docstring": "Sorts values in reverse using . Args: values: Tensor of numeric values. axis: Index of the axis which values should be sorted along. return_argsort: If False, return the sorted values. If True, return the indices that would sort the values. Returns: The sorted values.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\sort_ops.py",
    "ast_data": "FunctionDef name:_descending_sort arg:values arg:axis arg:return_argsort arguments arg arg arg Assign Call Assign Call Assign If BoolOp Compare Compare Call Assign Assign If Compare BoolOp If Compare Assign Call Call Call Assign Call Call Assign Call Assign Call Assign If Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_expand_st_row_partitions",
    "source_code": "def _expand_st_row_partitions(st, axis):\n    if axis == 0:\n        if st.shape.rank == 0:\n            return ()\n        nvals = st.nrows()\n        new_partition = RowPartition.from_uniform_row_length(nvals, nvals, nrows=1, validate=False)\n        return (new_partition,) + st.row_partitions\n    elif axis == st.rank:\n        nvals = st.row_partitions[axis - 2].nvals() if axis - 2 >= 0 else st.nrows()\n        return st.row_partitions + (RowPartition.from_uniform_row_length(1, nvals, nrows=nvals, validate=False),)\n    else:\n        nvals = st.row_partitions[axis - 1].nrows() if axis - 1 >= 0 else st.nrows()\n        return st.row_partitions[:axis - 1] + (RowPartition.from_uniform_row_length(1, nvals, nrows=nvals, validate=False),) + st.row_partitions[axis - 1:]",
    "docstring": "Create the row_partitions for expand_dims.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_array_ops.py",
    "ast_data": "FunctionDef name:_expand_st_row_partitions arg:st arg:axis arguments arg arg If Compare If Compare Return return:no Assign Call Assign Call Return return:yes If Compare Assign Compare Call Call Return return:yes Call Assign Compare Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "trace",
    "source_code": "def trace(self, graph_element_name):\n    self._depth_count += 1\n    node_name = get_node_name(graph_element_name)\n    if node_name == self._destination_node_name:\n        raise GraphTracingReachedDestination()\n    if node_name in self._skip_node_names:\n        return\n    if node_name in self._visited_nodes:\n        return\n    self._visited_nodes.append(node_name)\n    for input_list in self._input_lists:\n        if node_name not in input_list:\n            continue\n        for inp in input_list[node_name]:\n            if get_node_name(inp) in self._visited_nodes:\n                continue\n            self._inputs.append(inp)\n            self._depth_list.append(self._depth_count)\n            self.trace(inp)\n    self._depth_count -= 1",
    "docstring": "Trace inputs. Args: graph_element_name: Name of the node or an output tensor of the node, as a str. Raises: GraphTracingReachedDestination: if destination_node_name of this tracer object is not None and the specified node is reached.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_graphs.py",
    "ast_data": "FunctionDef name:trace arg:self arg:graph_element_name arguments arg arg Assign Call If Compare Raise Call If Compare Return return:no If Compare Return return:no Call For If Compare For If Compare Call Call Call Call"
  },
  {
    "library": "authlib",
    "name": "create_query_client_func",
    "source_code": "def create_query_client_func(session, client_model):\n\n    def query_client(client_id):\n        q = session.query(client_model)\n        return q.filter_by(client_id=client_id).first()\n    return query_client",
    "docstring": "Create an `` function that can be used in authorization server. :param session: SQLAlchemy session :param client_model: Client model class",
    "type": "function",
    "file_path": "authlib\\authlib\\integrations\\sqla_oauth2\\functions.py",
    "ast_data": "FunctionDef name:create_query_client_func arg:session arg:client_model arguments arg arg FunctionDef name:query_client arg:client_id arguments arg Assign Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_xheight",
    "source_code": "def get_xheight(self, font: str, fontsize: float, dpi: float) -> float:\n    raise NotImplementedError()",
    "docstring": "Get the xheight for the given *font* and *fontsize*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_mathtext.py",
    "ast_data": "FunctionDef name:get_xheight arg:self arg:font arg:fontsize arg:dpi arguments arg arg arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "xpu",
    "source_code": "def xpu(self, device: Optional[Union[int, device]]=None) -> Self:\n    return self._apply(lambda t: t.xpu(device))",
    "docstring": "Move all model parameters and buffers to the XPU. This also makes associated parameters and buffers different objects. So it should be called before constructing optimizer if the module will live on XPU while being optimized. .. note:: This method modifies the module in-place. Arguments: device (int, optional): if specified, all parameters will be copied to that device Returns: Module: self",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:xpu arg:self arg:device arguments arg arg Return return:yes Call arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_current_tf_device",
    "source_code": "def _get_current_tf_device():\n    graph = get_graph()\n    op = _TfDeviceCaptureOp()\n    graph._apply_device_functions(op)\n    if tf2.enabled():\n        return device_spec.DeviceSpecV2.from_string(op.device)\n    else:\n        return device_spec.DeviceSpecV1.from_string(op.device)",
    "docstring": "Return explicit device of current context, otherwise returns . Returns: If the current device scope is explicitly set, it returns a string with the device ( or ). If the scope is not explicitly set, it will return .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:_get_current_tf_device arguments Assign Call Assign Call Call If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "instant",
    "source_code": "@staticmethod\ndef instant(event_name: str, metadata: dict[str, Any], time_ns: Optional[int]=None):\n    CompileEventLogger.log_instant_event(event_name, metadata, time_ns, CompileEventLogLevel.CHROMIUM)",
    "docstring": "Log an instant event to chromium logs with name at time . The field in Perfetto will point to metadata. should be a value obtained from time.time_ns().",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\utils.py",
    "ast_data": "FunctionDef name:instant arg:event_name arg:metadata arg:time_ns arguments arg arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "set_alignment",
    "source_code": "@staticmethod\ndef set_alignment(torch_layout, op_element) -> bool:\n    alignment = cutlass_utils.get_max_alignment(torch_layout)\n    cuda_arch = cutlass_utils.get_cuda_arch()\n    if cuda_arch and int(cuda_arch) >= 90 and (alignment < op_element.alignment):\n        return False\n    else:\n        op_element.alignment = alignment\n        return True",
    "docstring": "Helper method to update the alignment of a given CUTLASS GEMM op operand's element. This method modifies the alignment of the given Cutlass GEMM op operand's element to match the layout of the corresponding ir.Buffer node. Args: torch_layout: The layout of the corresponding ir.Buffer node. op_element: The Cutlass GEMM op operand's element whose alignment is to be updated. Returns: bool: True if the alignment was successfully updated, False otherwise.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\gemm_template.py",
    "ast_data": "FunctionDef name:set_alignment arg:torch_layout arg:op_element arguments arg arg Assign Call Assign Call If BoolOp Compare Call Compare Return return:yes Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "setY",
    "source_code": "def setY(self, index, value):\n    self.setOrdinate(1, index, value)",
    "docstring": "Set Y with the value at the given index.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\coordseq.py",
    "ast_data": "FunctionDef name:setY arg:self arg:index arg:value arguments arg arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "param_static_shapes",
    "source_code": "@classmethod\ndef param_static_shapes(cls, sample_shape):\n    if isinstance(sample_shape, tensor_shape.TensorShape):\n        if not sample_shape.is_fully_defined():\n            raise ValueError('TensorShape sample_shape must be fully defined')\n        sample_shape = sample_shape.as_list()\n    params = cls.param_shapes(sample_shape)\n    static_params = {}\n    for name, shape in params.items():\n        static_shape = tensor_util.constant_value(shape)\n        if static_shape is None:\n            raise ValueError('sample_shape must be a fully-defined TensorShape or list/tuple')\n        static_params[name] = tensor_shape.TensorShape(static_shape)\n    return static_params",
    "docstring": "param_shapes with static (i.e. ) shapes. This is a class method that describes what key/value arguments are required to instantiate the given so that a particular shape is returned for that instance's call to . Assumes that the sample's shape is known statically. Subclasses should override class method to return constant-valued tensors when constant values are fed. Args: sample_shape: or python list/tuple. Desired shape of a call to . Returns: of parameter name to . Raises: ValueError: if is a and is not fully defined.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:param_static_shapes arg:cls arg:sample_shape arguments arg arg If Call If Call Raise Call Assign Call Assign Call Assign For Call Assign Call If Compare Raise Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "ModelBase",
    "source_code": "class ModelBase(Module, ONNXExportMixin, ModelBaseMixin):\n\n    def __init__(self, model: Module, pre_processor: Module, post_processor: Module, name: Optional[str]=None) -> None:\n        super().__init__()\n        self.model = model.eval()\n        self.pre_processor = pre_processor.eval()\n        self.post_processor = post_processor.eval()\n        if name is not None:\n            self.name = name",
    "docstring": "Wrap a model and perform pre-processing and post-processing.",
    "type": "class",
    "file_path": "kornia\\kornia\\models\\base.py",
    "ast_data": "ClassDef name:ModelBase FunctionDef name:__init__ arg:self arg:model arg:pre_processor arg:post_processor arg:name arguments arg arg arg arg arg Call Call Assign Call Assign Call Assign Call If Compare Assign"
  },
  {
    "library": "pytorch",
    "name": "_validate_pruning_amount_init",
    "source_code": "def _validate_pruning_amount_init(amount):\n    if not isinstance(amount, numbers.Real):\n        raise TypeError(f'Invalid type for amount: {amount}. Must be int or float.')\n    if isinstance(amount, numbers.Integral) and amount < 0 or (not isinstance(amount, numbers.Integral) and (float(amount) > 1.0 or float(amount) < 0.0)):\n        raise ValueError(f'amount={amount} should either be a float in the range [0, 1] or a non-negative integer')",
    "docstring": "Validate helper to check the range of amount at init. Args: amount (int or float): quantity of parameters to prune. If float, should be between 0.0 and 1.0 and represent the fraction of parameters to prune. If int, it represents the absolute number of parameters to prune. Raises: ValueError: if amount is a float not in [0, 1], or if it's a negative integer. TypeError: if amount is neither a float nor an integer. Note: This does not take into account the number of parameters in the tensor to be pruned, which is known only at prune.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\utils\\prune.py",
    "ast_data": "FunctionDef name:_validate_pruning_amount_init arg:amount arguments arg If Call Raise Call If BoolOp BoolOp Call Compare BoolOp Call BoolOp Compare Call Compare Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "delete_unused_values",
    "source_code": "def delete_unused_values(user: Node):\n    if user.op == 'placeholder':\n        return\n    if user.op == 'output':\n        body.append('\\n')\n        return\n    nodes_to_delete = user_to_last_uses.get(user, [])\n    if len(user.users.keys()) == 0:\n        nodes_to_delete.append(user)\n    if len(nodes_to_delete):\n        to_delete_str = ' = '.join([repr(n) for n in nodes_to_delete] + ['None'])\n        body.append(f';  {dim(to_delete_str)}\\n')\n    else:\n        body.append('\\n')",
    "docstring": "Delete values after their last use. This ensures that values that are not used in the remainder of the code are freed and the memory usage of the code is optimal.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\graph.py",
    "ast_data": "FunctionDef name:delete_unused_values arg:user arguments arg If Compare Return return:no If Compare Call Return return:no Assign Call If Compare Call Call Call If Call Assign Call Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "fhtcoeff",
    "source_code": "def fhtcoeff(n, dln, mu, offset=0.0, bias=0.0, inverse=False):\n    lnkr, q = (offset, bias)\n    xp = (mu + 1 + q) / 2\n    xm = (mu + 1 - q) / 2\n    y = np.linspace(0, np.pi * (n // 2) / (n * dln), n // 2 + 1)\n    u = np.empty(n // 2 + 1, dtype=complex)\n    v = np.empty(n // 2 + 1, dtype=complex)\n    u.imag[:] = y\n    u.real[:] = xm\n    loggamma(u, out=v)\n    u.real[:] = xp\n    loggamma(u, out=u)\n    y *= 2 * (LN_2 - lnkr)\n    u.real -= v.real\n    u.real += LN_2 * q\n    u.imag += v.imag\n    u.imag += y\n    np.exp(u, out=u)\n    if n % 2 == 0:\n        u.imag[-1] = 0\n    if not np.isfinite(u[0]):\n        u[0] = 2 ** q * poch(xm, xp - xm)\n    if np.isinf(u[0]) and (not inverse):\n        warn('singular transform; consider changing the bias', stacklevel=3)\n        u = np.copy(u)\n        u[0] = 0\n    elif u[0] == 0 and inverse:\n        warn('singular inverse transform; consider changing the bias', stacklevel=3)\n        u = np.copy(u)\n        u[0] = np.inf\n    return u",
    "docstring": "Compute the coefficient array for a fast Hankel transform.",
    "type": "function",
    "file_path": "scipy\\scipy\\fft\\_fftlog_backend.py",
    "ast_data": "FunctionDef name:fhtcoeff arg:n arg:dln arg:mu arg:offset arg:bias arg:inverse arguments arg arg arg arg arg arg Assign Assign Assign Assign Call Assign Call Assign Call Assign Assign Call Assign Call Call If Compare Assign If Call Assign Call If BoolOp Call Call Assign Call Assign If BoolOp Compare Call Assign Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "array",
    "source_code": "@property\ndef array(self) -> ExtensionArray:\n    raise AbstractMethodError(self)",
    "docstring": "The ExtensionArray of the data backing this Series or Index. This property provides direct access to the underlying array data of a Series or Index without requiring conversion to a NumPy array. It returns an ExtensionArray, which is the native storage format for pandas extension dtypes. Returns ------- ExtensionArray An ExtensionArray of the values stored within. For extension types, this is the actual array. For NumPy native types, this is a thin (no copy) wrapper around :class:. `arrays.NumpyExtensionArraySeries.to_numpy` instead. Examples -------- For regular NumPy types like int, and float, a NumpyExtensionArray is returned. >>> pd.Series([1, 2, 3]).array [1, 2, 3] Length: 3, dtype: int64 For extension types, like Categorical, the actual ExtensionArray is returned >>> ser = pd.Series(pd.Categorical([\"a\", \"b\", \"a\"])) >>> ser.array ['a', 'b', 'a'] Categories (2, object): ['a', 'b']",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\base.py",
    "ast_data": "FunctionDef name:array arg:self arguments arg Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "get_transformed_points_and_affine",
    "source_code": "def get_transformed_points_and_affine(self):\n    self._revalidate()\n    return (self._transformed_points, self.get_affine())",
    "docstring": "Return a copy of the child path, with the non-affine part of the transform already applied, along with the affine part of the path necessary to complete the transformation. Unlike :meth:, no interpolation will be performed.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:get_transformed_points_and_affine arg:self arguments arg Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "cdf",
    "source_code": "def cdf(self, value):\n    for transform in self.transforms[::-1]:\n        value = transform.inv(value)\n    if self._validate_args:\n        self.base_dist._validate_sample(value)\n    value = self.base_dist.cdf(value)\n    value = self._monotonize_cdf(value)\n    return value",
    "docstring": "Computes the cumulative distribution function by inverting the transform(s) and computing the score of the base distribution.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributions\\transformed_distribution.py",
    "ast_data": "FunctionDef name:cdf arg:self arg:value arguments arg arg For Assign Call If Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_StridedSliceGradGrad",
    "source_code": "@ops.RegisterGradient('StridedSliceGrad')\ndef _StridedSliceGradGrad(op: ops.Operation, grad):\n    begin = op.inputs[1]\n    end = op.inputs[2]\n    strides = op.inputs[3]\n    return (None, None, None, None, array_ops.strided_slice(grad, begin, end, strides, begin_mask=op.get_attr('begin_mask'), end_mask=op.get_attr('end_mask'), ellipsis_mask=op.get_attr('ellipsis_mask'), new_axis_mask=op.get_attr('new_axis_mask'), shrink_axis_mask=op.get_attr('shrink_axis_mask')))",
    "docstring": "Gradient for StridedSliceGrad op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_grad.py",
    "ast_data": "FunctionDef name:_StridedSliceGradGrad arg:op arg:grad arguments arg arg Assign Assign Assign Return return:yes Call Call Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_infer_ep_from_device",
    "source_code": "def _infer_ep_from_device(*args) -> tuple[str, ...]:\n    eps = []\n    for arg in args:\n        if hasattr(arg, 'device'):\n            device = arg.device\n            if device.type == 'cuda':\n                eps.append('CUDAExecutionProvider')\n            elif device.type == 'cpu':\n                eps.append('CPUExecutionProvider')\n    return tuple(eps)",
    "docstring": "Return the first valid device (i.e., GPU or CPU) in argument list.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\onnxruntime.py",
    "ast_data": "FunctionDef name:_infer_ep_from_device arguments arg Assign For If Call Assign If Compare Call If Compare Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "add",
    "source_code": "def add(self, profile_datum):\n    self.total_op_time += profile_datum.op_time\n    self.total_exec_time += profile_datum.exec_time\n    device_and_node = '%s:%s' % (profile_datum.device_name, profile_datum.node_exec_stats.node_name)\n    device_and_node = '%s:%s' % (profile_datum.device_name, profile_datum.node_exec_stats.node_name)\n    if device_and_node in self._node_to_exec_count:\n        self._node_to_exec_count[device_and_node] += 1\n    else:\n        self._node_to_exec_count[device_and_node] = 1",
    "docstring": "Accumulate a new instance of ProfileDatum. Args: profile_datum: () an instance of to accumulate to this object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\profiling.py",
    "ast_data": "FunctionDef name:add arg:self arg:profile_datum arguments arg arg Assign Assign If Compare Assign"
  },
  {
    "library": "seaborn",
    "name": "_freedman_diaconis_bins",
    "source_code": "def _freedman_diaconis_bins(a):\n    a = np.asarray(a)\n    if len(a) < 2:\n        return 1\n    iqr = np.subtract.reduce(np.nanpercentile(a, [75, 25]))\n    h = 2 * iqr / len(a) ** (1 / 3)\n    if h == 0:\n        return int(np.sqrt(a.size))\n    else:\n        return int(np.ceil((a.max() - a.min()) / h))",
    "docstring": "Calculate number of hist bins using Freedman-Diaconis rule.",
    "type": "function",
    "file_path": "seaborn\\seaborn\\distributions.py",
    "ast_data": "FunctionDef name:_freedman_diaconis_bins arg:a arguments arg Assign Call If Compare Call Return return:yes Assign Call Call Assign Call If Compare Return return:yes Call Call Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "row_starts",
    "source_code": "def row_starts(self, name=None):\n    with ops.name_scope(name, 'RaggedRowStarts', [self]):\n        return self._row_partition.row_starts()",
    "docstring": "Returns the start indices for rows in this ragged tensor. These indices specify where the values for each row begin in . is equal to . Args: name: A name prefix for the returned tensor (optional). Returns: A 1-D integer Tensor with shape . The returned tensor is nonnegative, and is sorted in ascending order. #### Example: >>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []]) >>> print(rt.values) tf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32) >>> print(rt.row_starts()) # indices of row starts in rt.values tf.Tensor([0 4 4 7 8], shape=(5,), dtype=int64)",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:row_starts arg:self arg:name arguments arg arg With Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_memory_info",
    "source_code": "def get_memory_info(self, dev):\n    self._initialize_physical_devices()\n    self.ensure_initialized()\n    return pywrap_tfe.TFE_GetMemoryInfo(self._context_handle, dev)",
    "docstring": "Returns a dict of memory info for the device.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:get_memory_info arg:self arg:dev arguments arg arg Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "db",
    "source_code": "@property\ndef db(self):\n    if self._for_write:\n        return self._db or router.db_for_write(self.model, **self._hints)\n    return self._db or router.db_for_read(self.model, **self._hints)",
    "docstring": "Return the database used if this query is executed now.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:db arg:self arguments arg If Return return:yes BoolOp Call Return return:yes BoolOp Call"
  },
  {
    "library": "pandas",
    "name": "encode",
    "source_code": "@forbid_nonstring_types(['bytes'])\ndef encode(self, encoding, errors: str='strict'):\n    result = self._data.array._str_encode(encoding, errors)\n    return self._wrap_result(result, returns_string=False)",
    "docstring": "Encode character string in the Series/Index using indicated encoding. Equivalent to :meth:. Parameters ---------- encoding : str Specifies the encoding to be used. errors : str, optional Specifies the error handling scheme. Possible values are those supported by :meth:. Returns ------- Series/Index of objects A Series or Index with strings encoded into bytes. See Also -------- Series.str.decode : Decodes bytes into strings in a Series/Index. Examples -------- >>> ser = pd.Series([\"cow\", \"123\", \"()\"]) >>> ser.str.encode(encoding=\"ascii\") 0 b'cow' 1 b'123' 2 b'()' dtype: object",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\strings\\accessor.py",
    "ast_data": "FunctionDef name:encode arg:self arg:encoding arg:errors arguments arg arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "virtualenv",
    "name": "create",
    "source_code": "@abstractmethod\ndef create(self):\n    raise NotImplementedError",
    "docstring": "Perform the virtual environment creation.",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\create\\creator.py",
    "ast_data": "FunctionDef name:create arg:self arguments arg Raise"
  },
  {
    "library": "cryptography",
    "name": "public_bytes",
    "source_code": "@abc.abstractmethod\ndef public_bytes(self, encoding: _serialization.Encoding, format: _serialization.PublicFormat) -> bytes:\n    pass",
    "docstring": "The serialized bytes of the public key.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ed448.py",
    "ast_data": "FunctionDef name:public_bytes arg:self arg:encoding arg:format arguments arg arg arg"
  },
  {
    "library": "kornia",
    "name": "box_blur",
    "source_code": "def box_blur(input: Tensor, kernel_size: tuple[int, int] | int, border_type: str='reflect', separable: bool=False) -> Tensor:\n    KORNIA_CHECK_IS_TENSOR(input)\n    if separable:\n        ky, kx = _unpack_2d_ks(kernel_size)\n        kernel_y = get_box_kernel1d(ky, device=input.device, dtype=input.dtype)\n        kernel_x = get_box_kernel1d(kx, device=input.device, dtype=input.dtype)\n        out = filter2d_separable(input, kernel_x, kernel_y, border_type)\n    else:\n        kernel = get_box_kernel2d(kernel_size, device=input.device, dtype=input.dtype)\n        out = filter2d(input, kernel, border_type)\n    return out",
    "docstring": "Blur an image using the box filter. .. image:: _static/img/box_blur.png The function smooths an image using the kernel: .. math:: K = \\frac{1}{\\text{kernel_size}_x * \\text{kernel_size}_y} \\begin{bmatrix} 1 & 1 & 1 & \\cdots & 1 & 1 \\\\ 1 & 1 & 1 & \\cdots & 1 & 1 \\\\ \\vdots & \\vdots & \\vdots & \\ddots & \\vdots & \\vdots \\\\ 1 & 1 & 1 & \\cdots & 1 & 1 \\\\ \\end{bmatrix} Args: input: the image to blur with shape :math:. kernel_size: the blurring kernel size. border_type: the padding mode to be applied before convolving. The expected modes are: `(B,C,H,W)here `__. Example: >>> input = torch.rand(2, 4, 5, 7) >>> output = box_blur(input, (3, 3)) # 2x4x5x7 >>> output.shape torch.Size([2, 4, 5, 7])",
    "type": "function",
    "file_path": "kornia\\kornia\\filters\\blur.py",
    "ast_data": "FunctionDef name:box_blur arg:input arg:kernel_size arg:border_type arg:separable arguments arg arg arg arg Call If Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "_setup_joins",
    "source_code": "def _setup_joins(self, pieces, opts, alias):\n    alias = alias or self.query.get_initial_alias()\n    field, targets, opts, joins, path, transform_function = self.query.setup_joins(pieces, opts, alias)\n    alias = joins[-1]\n    return (field, targets, alias, joins, path, opts, transform_function)",
    "docstring": "Helper method for get_order_by() and get_distinct(). get_ordering() and get_distinct() must produce same target columns on same input, as the prefixes of get_ordering() and get_distinct() must match. Executing SQL where this is not true is an error.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\compiler.py",
    "ast_data": "FunctionDef name:_setup_joins arg:self arg:pieces arg:opts arg:alias arguments arg arg arg arg Assign BoolOp Call Assign Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "max",
    "source_code": "def max(self, axis: AxisInt | None=None, skipna: bool=True, *args, **kwargs):\n    nv.validate_max(args, kwargs)\n    nv.validate_minmax_axis(axis)\n    if not len(self):\n        return self._na_value\n    if len(self) and self.is_monotonic_increasing:\n        last = self[-1]\n        if not isna(last):\n            return last\n    if not self._is_multi and self.hasnans:\n        mask = self._isnan\n        if not skipna or mask.all():\n            return self._na_value\n    if not self._is_multi and (not isinstance(self._values, np.ndarray)):\n        return self._values._reduce(name='max', skipna=skipna)\n    return nanops.nanmax(self._values, skipna=skipna)",
    "docstring": "Return the maximum value of the Index. Parameters ---------- axis : int, optional For compatibility with NumPy. Only 0 or None are allowed. skipna : bool, default True Exclude NA/null values when showing the result. *args, **kwargs Additional arguments and keywords for compatibility with NumPy. Returns ------- scalar Maximum value. See Also -------- Index.min : Return the minimum value in an Index. Series.max : Return the maximum value in a Series. DataFrame.max : Return the maximum values in a DataFrame. Examples -------- >>> idx = pd.Index([3, 2, 1]) >>> idx.max() 3 >>> idx = pd.Index([\"c\", \"b\", \"a\"]) >>> idx.max() 'c' For a MultiIndex, the maximum is determined lexicographically. >>> idx = pd.MultiIndex.from_product([(\"a\", \"b\"), (2, 1)]) >>> idx.max() ('b', 2)",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:max arg:self arg:axis arg:skipna arguments arg arg arg arg arg Call Call If Call Return return:yes If BoolOp Call Assign If Call Return return:yes If BoolOp Assign If BoolOp Call Return return:yes If BoolOp Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_scale_axis_limits",
    "source_code": "def _scale_axis_limits(self, scale_x, scale_y, scale_z):\n    cx, cy, cz, dx, dy, dz = self._get_w_centers_ranges()\n    self.set_xlim3d(cx - dx * scale_x / 2, cx + dx * scale_x / 2, auto=None)\n    self.set_ylim3d(cy - dy * scale_y / 2, cy + dy * scale_y / 2, auto=None)\n    self.set_zlim3d(cz - dz * scale_z / 2, cz + dz * scale_z / 2, auto=None)",
    "docstring": "Keeping the center of the x, y, and z data axes fixed, scale their limits by scale factors. A scale factor > 1 zooms out and a scale factor < 1 zooms in. Parameters ---------- scale_x : float Scale factor for the x data axis. scale_y : float Scale factor for the y data axis. scale_z : float Scale factor for the z data axis.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:_scale_axis_limits arg:self arg:scale_x arg:scale_y arg:scale_z arguments arg arg arg arg Assign Call Call Call Call"
  },
  {
    "library": "django",
    "name": "swappable_setting",
    "source_code": "@property\ndef swappable_setting(self):\n    if self.swappable:\n        if isinstance(self.remote_field.model, str):\n            to_string = self.remote_field.model\n        else:\n            to_string = self.remote_field.model._meta.label\n        return apps.get_swappable_settings_name(to_string)\n    return None",
    "docstring": "Get the setting that this is powered from for swapping, or None if it's not swapped in / marked with swappable=False.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\related.py",
    "ast_data": "FunctionDef name:swappable_setting arg:self arguments arg If If Call Assign Assign Return return:yes Call Return return:no"
  },
  {
    "library": "matplotlib",
    "name": "update_keymap",
    "source_code": "def update_keymap(self, name, key):\n    if name not in self._tools:\n        raise KeyError(f'{name!r} not in Tools')\n    self._remove_keys(name)\n    if isinstance(key, str):\n        key = [key]\n    for k in key:\n        if k in self._keys:\n            _api.warn_external(f'Key {k} changed from {self._keys[k]} to {name}')\n        self._keys[k] = name",
    "docstring": "Set the keymap to associate with the specified tool. Parameters ---------- name : str Name of the Tool. key : str or list of str Keys to associate with the tool.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_managers.py",
    "ast_data": "FunctionDef name:update_keymap arg:self arg:name arg:key arguments arg arg arg If Compare Raise Call Call If Call Assign For If Compare Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "default_float_type",
    "source_code": "def default_float_type():\n    if not is_prefer_float32() and is_allow_float64():\n        return float64\n    else:\n        return float32",
    "docstring": "Gets the default float type. Returns: If is false and is true, returns float64; otherwise returns float32.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_dtypes.py",
    "ast_data": "FunctionDef name:default_float_type arguments If BoolOp Call Call Return return:yes Return return:yes"
  },
  {
    "library": "pandas",
    "name": "is_unique",
    "source_code": "@cache_readonly\ndef is_unique(self) -> bool:\n    return self._engine.is_unique",
    "docstring": "Return if the index has unique values. Returns ------- bool See Also -------- Index.has_duplicates : Inverse method that checks if it has duplicate values. Examples -------- >>> idx = pd.Index([1, 5, 7, 7]) >>> idx.is_unique False >>> idx = pd.Index([1, 5, 7]) >>> idx.is_unique True >>> idx = pd.Index([\"Watermelon\", \"Orange\", \"Apple\", \"Watermelon\"]).astype( ... \"category\" ... ) >>> idx.is_unique False >>> idx = pd.Index([\"Orange\", \"Apple\", \"Watermelon\"]).astype(\"category\") >>> idx.is_unique True",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:is_unique arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, filenames, record_bytes, header_bytes=None, footer_bytes=None, buffer_size=None, compression_type=None, name=None):\n    self._filenames = filenames\n    self._record_bytes = ops.convert_to_tensor(record_bytes, dtype=dtypes.int64, name='record_bytes')\n    self._header_bytes = convert.optional_param_to_tensor('header_bytes', header_bytes)\n    self._footer_bytes = convert.optional_param_to_tensor('footer_bytes', footer_bytes)\n    self._buffer_size = convert.optional_param_to_tensor('buffer_size', buffer_size, _DEFAULT_READER_BUFFER_SIZE_BYTES)\n    self._compression_type = convert.optional_param_to_tensor('compression_type', compression_type, argument_default='', argument_dtype=dtypes.string)\n    self._name = name\n    variant_tensor = gen_dataset_ops.fixed_length_record_dataset_v2(self._filenames, self._header_bytes, self._record_bytes, self._footer_bytes, self._buffer_size, self._compression_type, metadata=self._metadata.SerializeToString())\n    super(_FixedLengthRecordDataset, self).__init__(variant_tensor)",
    "docstring": "Creates a . Args: filenames: A tensor containing one or more filenames. record_bytes: A scalar representing the number of bytes in each record. header_bytes: (Optional.) A scalar representing the number of bytes to skip at the start of a file. footer_bytes: (Optional.) A scalar representing the number of bytes to ignore at the end of a file. buffer_size: (Optional.) A scalar representing the number of bytes to buffer when reading. compression_type: (Optional.) A scalar evaluating to one of (no compression), , or . name: (Optional.) A name for the tf.data operation.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\readers.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:filenames arg:record_bytes arg:header_bytes arg:footer_bytes arg:buffer_size arg:compression_type arg:name arguments arg arg arg arg arg arg arg arg Assign Assign Call Assign Call Assign Call Assign Call Assign Call Assign Assign Call Call Call Call"
  },
  {
    "library": "scrapy",
    "name": "DontCloseSpider",
    "source_code": "class DontCloseSpider(Exception):\n    pass",
    "docstring": "Request the spider not to be closed yet",
    "type": "class",
    "file_path": "scrapy\\scrapy\\exceptions.py",
    "ast_data": "ClassDef name:DontCloseSpider"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "@_docstring.interpd\ndef __init__(self, values, edges, *, orientation='vertical', baseline=0, **kwargs):\n    self.orientation = orientation\n    self._edges = np.asarray(edges)\n    self._values = np.asarray(values)\n    self._baseline = np.asarray(baseline) if baseline is not None else None\n    self._update_path()\n    super().__init__(self._path, **kwargs)",
    "docstring": "Parameters ---------- values : array-like The step heights. edges : array-like The edge positions, with `Patch` properties: %(Patch:kwdoc)s",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:values arg:edges arguments arg arg arg arg arg arg Assign Assign Call Assign Call Assign Compare Call Call Call Call"
  },
  {
    "library": "django",
    "name": "get_fields",
    "source_code": "def get_fields(self, request, obj=None):\n    if self.fields:\n        return self.fields\n    form = self._get_form_for_get_fields(request, obj)\n    return [*form.base_fields, *self.get_readonly_fields(request, obj)]",
    "docstring": "Hook for specifying fields.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:get_fields arg:self arg:request arg:obj arguments arg arg arg If Return return:yes Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "__getitem__",
    "source_code": "def __getitem__(self, key):\n    return super().__getitem__(key.replace('_', '-'))",
    "docstring": "Allow header lookup using underscores in place of hyphens.",
    "type": "method",
    "file_path": "django\\django\\http\\request.py",
    "ast_data": "FunctionDef name:__getitem__ arg:self arg:key arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "numpy",
    "name": "mask_indices",
    "source_code": "@set_module('numpy')\ndef mask_indices(n, mask_func, k=0):\n    m = ones((n, n), int)\n    a = mask_func(m, k)\n    return nonzero(a != 0)",
    "docstring": "Return the indices to access (n, n) arrays, given a masking function. Assume is a function that, for a square array a of size `ktriutriltriutrilxkmask_functriutrilna` is a 3x3 array: >>> a = np.arange(9).reshape(3, 3) >>> a array([[0, 1, 2], [3, 4, 5], [6, 7, 8]]) >>> a[iu] array([0, 1, 2, 4, 5, 8]) An offset can be passed also to the masking function. This gets us the indices starting on the first diagonal right of the main one: >>> iu1 = np.mask_indices(3, np.triu, 1) with which we now extract only three elements: >>> a[iu1] array([1, 2, 5])",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_twodim_base_impl.py",
    "ast_data": "FunctionDef name:mask_indices arg:n arg:mask_func arg:k arguments arg arg arg Assign Call Assign Call Return return:yes Call Compare Call"
  },
  {
    "library": "matplotlib",
    "name": "Fraction",
    "source_code": "class Fraction(_Base):\n\n    def __init__(self, fraction, ref_size):\n        _api.check_isinstance(Real, fraction=fraction)\n        self._fraction_ref = ref_size\n        self._fraction = fraction\n\n    def get_size(self, renderer):\n        if self._fraction_ref is None:\n            return (self._fraction, 0.0)\n        else:\n            r, a = self._fraction_ref.get_size(renderer)\n            rel_size = r * self._fraction\n            abs_size = a * self._fraction\n            return (rel_size, abs_size)",
    "docstring": "An instance whose size is a *fraction* of the *ref_size*. >>> s = Fraction(0.3, AxesX(ax))",
    "type": "class",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_size.py",
    "ast_data": "ClassDef name:Fraction FunctionDef name:__init__ arg:self arg:fraction arg:ref_size arguments arg arg arg Call Assign Assign FunctionDef name:get_size arg:self arg:renderer arguments arg arg If Compare Return return:yes Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "experimental_type_proto",
    "source_code": "@classmethod\ndef experimental_type_proto(cls) -> Type[struct_pb2.TypeSpecProto]:\n    return struct_pb2.TypeSpecProto",
    "docstring": "Returns the type of proto associated with TypeSpec serialization. Do NOT override for custom non-TF types.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py",
    "ast_data": "FunctionDef name:experimental_type_proto arg:cls arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "concatenate",
    "source_code": "def concatenate(inputs, axis=-1, **kwargs):\n    return Concatenate(axis=axis, **kwargs)(inputs)",
    "docstring": "Functional interface to the layer. >>> x = np.arange(20).reshape(2, 2, 5) >>> print(x) [[[ 0 1 2 3 4] [ 5 6 7 8 9]] [[10 11 12 13 14] [15 16 17 18 19]]] >>> y = np.arange(20, 30).reshape(2, 1, 5) >>> print(y) [[[20 21 22 23 24]] [[25 26 27 28 29]]] >>> tf.keras.layers.concatenate([x, y], ... axis=1) Args: inputs: A list of input tensors (at least 2). axis: Concatenation axis. **kwargs: Standard layer keyword arguments. Returns: A tensor, the concatenation of the inputs alongside axis .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\merge.py",
    "ast_data": "FunctionDef name:concatenate arg:inputs arg:axis arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "seaborn",
    "name": "desaturate",
    "source_code": "def desaturate(color, prop):\n    if not 0 <= prop <= 1:\n        raise ValueError('prop must be between 0 and 1')\n    rgb = to_rgb(color)\n    if prop == 1:\n        return rgb\n    h, l, s = colorsys.rgb_to_hls(*rgb)\n    s *= prop\n    new_color = colorsys.hls_to_rgb(h, l, s)\n    return new_color",
    "docstring": "Decrease the saturation channel of a color by some percent. Parameters ---------- color : matplotlib color hex, rgb-tuple, or html color name prop : float saturation channel of color will be multiplied by this value Returns ------- new_color : rgb tuple desaturated color code in RGB tuple representation",
    "type": "function",
    "file_path": "seaborn\\seaborn\\utils.py",
    "ast_data": "FunctionDef name:desaturate arg:color arg:prop arguments arg arg If Compare Raise Call Assign Call If Compare Return return:yes Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "embedding_strategy",
    "source_code": "@register_op_strategy(aten.embedding.default)\ndef embedding_strategy(op_schema: OpSchema) -> StrategyType:\n    weight_strategy = cast(OpStrategy, op_schema.args_schema[0])\n    indices_strategy = cast(OpStrategy, op_schema.args_schema[1])\n    mesh = op_schema.get_mesh_from_args()\n    weight_shape = weight_strategy.shape\n    indices_shape = indices_strategy.shape\n    output_emd_dim = len(indices_shape)\n    single_mesh_dim_strategies = []\n    all_replicate: PlacementList = [Replicate()] * 3\n    single_mesh_dim_strategies.append(all_replicate)\n    colwise_sharding: PlacementList = [Shard(output_emd_dim), Shard(1), Replicate()]\n    single_mesh_dim_strategies.append(colwise_sharding)\n    embedding_partial_placement = _MaskPartial(offset_shape=weight_shape, offset_dim=0)\n    rowwise_sharding: PlacementList = [embedding_partial_placement, Shard(0), embedding_partial_placement]\n    single_mesh_dim_strategies.append(rowwise_sharding)\n    for input_dim in range(len(indices_shape)):\n        batch_sharding: PlacementList = [Shard(input_dim), Replicate(), Shard(input_dim)]\n        single_mesh_dim_strategies.append(batch_sharding)\n    return expand_to_full_mesh_op_strategy(mesh, op_schema, single_mesh_dim_strategies)",
    "docstring": "This strategy handles embedding op. We have two possible embedding shardings: rowwise and colwise",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_ops\\_embedding_ops.py",
    "ast_data": "FunctionDef name:embedding_strategy arg:op_schema arguments arg Assign Call Assign Call Assign Call Assign Assign Assign Call Assign Call Call Call Call Call Call Assign Call Call Call For Call Call Call Call Call Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "iget",
    "source_code": "def iget(self, i: int, track_ref: bool=True) -> SingleBlockManager:\n    block = self.blocks[self.blknos[i]]\n    values = block.iget(self.blklocs[i])\n    bp = BlockPlacement(slice(0, len(values)))\n    nb = type(block)(values, placement=bp, ndim=1, refs=block.refs if track_ref else None)\n    return SingleBlockManager(nb, self.axes[1])",
    "docstring": "Return the data as a SingleBlockManager.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:iget arg:self arg:i arg:track_ref arguments arg arg arg Assign Assign Call Assign Call Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "rvs",
    "source_code": "def rvs(self, mean=None, cov=1, size=1, random_state=None):\n    dim, mean, cov_object = self._process_parameters(mean, cov)\n    random_state = self._get_random_state(random_state)\n    if isinstance(cov_object, _covariance.CovViaPSD):\n        cov = cov_object.covariance\n        out = random_state.multivariate_normal(mean, cov, size)\n        out = _squeeze_output(out)\n    else:\n        size = size or tuple()\n        if not np.iterable(size):\n            size = (size,)\n        shape = tuple(size) + (cov_object.shape[-1],)\n        x = random_state.normal(size=shape)\n        out = mean + cov_object.colorize(x)\n    return out",
    "docstring": "Draw random samples from a multivariate normal distribution. Parameters ---------- %(_mvn_doc_default_callparams)s size : integer, optional Number of samples to draw (default 1). %(_doc_random_state)s Returns ------- rvs : ndarray or scalar Random variates of size (, ), where is the dimension of the random variable. Notes ----- %(_mvn_doc_callparams_note)s",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:rvs arg:self arg:mean arg:cov arg:size arg:random_state arguments arg arg arg arg arg Assign Call Assign Call If Call Assign Assign Call Assign Call Assign BoolOp Call If Call Assign Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "generate",
    "source_code": "@abstractmethod\ndef generate(self, docnames: Iterable[str] | None=None) -> tuple[list[tuple[str, list[IndexEntry]]], bool]:\n    raise NotImplementedError",
    "docstring": "Get entries for the index. If `IndexEntry`.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\domains\\_index.py",
    "ast_data": "FunctionDef name:generate arg:self arg:docnames arguments arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "apply_func_graph_transforms",
    "source_code": "def apply_func_graph_transforms(func_graph):\n    for transform in FUNC_GRAPH_TRANSFORMS:\n        transform(func_graph)",
    "docstring": "Applies registered transformations to FuncGraph.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\transform.py",
    "ast_data": "FunctionDef name:apply_func_graph_transforms arg:func_graph arguments arg For Call"
  },
  {
    "library": "pandas",
    "name": "_parse_temporal_dtype_string",
    "source_code": "@classmethod\ndef _parse_temporal_dtype_string(cls, string: str) -> ArrowDtype:\n    head, tail = string.split('[', 1)\n    if not tail.endswith(']'):\n        raise ValueError\n    tail = tail[:-1]\n    if head == 'timestamp':\n        assert ',' in tail\n        unit, tz = tail.split(',', 1)\n        unit = unit.strip()\n        tz = tz.strip()\n        if tz.startswith('tz='):\n            tz = tz[3:]\n        pa_type = pa.timestamp(unit, tz=tz)\n        dtype = cls(pa_type)\n        return dtype\n    raise NotImplementedError(string)",
    "docstring": "Construct a temporal ArrowDtype from string.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py",
    "ast_data": "FunctionDef name:_parse_temporal_dtype_string arg:cls arg:string arguments arg arg Assign Call If Call Raise Assign If Compare Compare Assign Call Assign Call Assign Call If Call Assign Assign Call Assign Call Return return:yes Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_restore_from_tensors",
    "source_code": "def _restore_from_tensors(self, restored_tensors):\n    with ops.name_scope('%s_table_restore' % self._name):\n        with ops.colocate_with(self.resource_handle):\n            return gen_lookup_ops.lookup_table_import_v2(self.resource_handle, restored_tensors['-keys'], restored_tensors['-values'])",
    "docstring": "Implements checkpointing interface in .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "FunctionDef name:_restore_from_tensors arg:self arg:restored_tensors arguments arg arg With Call With Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "experimental_should_init",
    "source_code": "@property\ndef experimental_should_init(self):\n    raise NotImplementedError('must be implemented in descendants')",
    "docstring": "Whether initialization is needed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:experimental_should_init arg:self arguments arg Raise Call"
  },
  {
    "library": "django",
    "name": "__str__",
    "source_code": "def __str__(self):\n    return self.pretty_wkt",
    "docstring": "Use 'pretty' WKT.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\srs.py",
    "ast_data": "FunctionDef name:__str__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "unstack",
    "source_code": "@tf_should_use.should_use_result\ndef unstack(self, value, name=None):\n    with ops.name_scope(name, 'TensorArrayUnstack', [self._handle, value]):\n        num_elements = array_ops.shape(value)[0]\n        return self.scatter(indices=math_ops.range(0, num_elements), value=value, name=name)",
    "docstring": "See TensorArray.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:unstack arg:self arg:value arg:name arguments arg arg arg With Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "unregister",
    "source_code": "def unregister(self, model_or_iterable):\n    if isinstance(model_or_iterable, ModelBase):\n        model_or_iterable = [model_or_iterable]\n    for model in model_or_iterable:\n        if not self.is_registered(model):\n            raise NotRegistered('The model %s is not registered' % model.__name__)\n        del self._registry[model]",
    "docstring": "Unregister the given model(s). If a model isn't already registered, raise NotRegistered.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\sites.py",
    "ast_data": "FunctionDef name:unregister arg:self arg:model_or_iterable arguments arg arg If Call Assign For If Call Raise Call"
  },
  {
    "library": "pandas",
    "name": "num_rows",
    "source_code": "@abstractmethod\ndef num_rows(self) -> int | None:\n    pass",
    "docstring": "Return the number of rows in the DataFrame, if available.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\interchange\\dataframe_protocol.py",
    "ast_data": "FunctionDef name:num_rows arg:self arguments arg"
  },
  {
    "library": "scikit-learn",
    "name": "cosine_distances",
    "source_code": "@validate_params({'X': ['array-like', 'sparse matrix'], 'Y': ['array-like', 'sparse matrix', None]}, prefer_skip_nested_validation=True)\ndef cosine_distances(X, Y=None):\n    xp, _ = get_namespace(X, Y)\n    S = cosine_similarity(X, Y)\n    S *= -1\n    S += 1\n    S = xp.clip(S, 0.0, 2.0)\n    if X is Y or Y is None:\n        _fill_or_add_to_diagonal(S, 0.0, xp, add_value=False)\n    return S",
    "docstring": "Compute cosine distance between samples in X and Y. Cosine distance is defined as 1.0 minus the cosine similarity. Read more in the :ref:. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples_X, n_features) Matrix . Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), default=None Matrix . Returns ------- distances : ndarray of shape (n_samples_X, n_samples_Y) Returns the cosine distance between samples in X and Y. See Also -------- cosine_similarity : Compute cosine similarity between samples in X and Y. scipy.spatial.distance.cosine : Dense matrices only. Examples -------- >>> from sklearn.metrics.pairwise import cosine_distances >>> X = [[0, 0, 0], [1, 1, 1]] >>> Y = [[1, 0, 0], [1, 1, 0]] >>> cosine_distances(X, Y) array([[1. , 1. ], [0.422, 0.183]])",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\metrics\\pairwise.py",
    "ast_data": "FunctionDef name:cosine_distances arg:X arg:Y arguments arg arg Assign Call Assign Call Assign Call If BoolOp Compare Compare Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "_transform_output_shape",
    "source_code": "def _transform_output_shape(output: Tensor, shape: Tuple[int, ...], *, reference_shape: Optional[Tensor]=None) -> Tensor:\n    out_tensor = output.clone()\n    for dim in range(len(out_tensor.shape) - len(shape)):\n        idx = 0\n        if reference_shape is not None and out_tensor.shape[0] == reference_shape[0] != 1 and (len(shape) > 2):\n            idx = 1\n        if out_tensor.shape[idx] != 1:\n            raise AssertionError(f'Dimension {dim} of input is expected to be 1, got {out_tensor.shape[idx]}')\n        out_tensor = out_tensor.squeeze(idx)\n    return out_tensor",
    "docstring": "Collapse the broadcasted batch dimensions an input tensor to be the specified shape. Args: output: Tensor shape: List/tuple of int reference_shape: Tensor representation of shape to control which dimensions are collapsed. Returns: Tensor",
    "type": "function",
    "file_path": "kornia\\kornia\\augmentation\\utils\\helpers.py",
    "ast_data": "FunctionDef name:_transform_output_shape arg:output arg:shape arguments arg arg arg Assign Call For Call Call Call Assign If BoolOp Compare Compare Compare Call Assign If Compare Raise Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "BlobDoG",
    "source_code": "class BlobDoG(Module):\n\n    def __init__(self) -> None:\n        super().__init__()\n\n    def __repr__(self) -> str:\n        return self.__class__.__name__\n\n    def forward(self, input: Tensor, sigmas: Optional[Tensor]=None) -> Tensor:\n        return dog_response(input)",
    "docstring": "Module that calculates Difference-of-Gaussians blobs. See :func: for details.",
    "type": "class",
    "file_path": "kornia\\kornia\\feature\\responses.py",
    "ast_data": "ClassDef name:BlobDoG FunctionDef name:__init__ arg:self arguments arg Call Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:forward arg:self arg:input arg:sigmas arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_arith_method_with_reindex",
    "source_code": "def _arith_method_with_reindex(self, right: DataFrame, op) -> DataFrame:\n    left = self\n    cols, lcol_indexer, rcol_indexer = left.columns.join(right.columns, how='inner', return_indexers=True)\n    new_left = left if lcol_indexer is None else left.iloc[:, lcol_indexer]\n    new_right = right if rcol_indexer is None else right.iloc[:, rcol_indexer]\n    if isinstance(cols, MultiIndex):\n        new_left = new_left.copy(deep=False)\n        new_right = new_right.copy(deep=False)\n        new_left.columns = cols\n        new_right.columns = cols\n    result = op(new_left, new_right)\n    join_columns = left.columns.join(right.columns, how='outer')\n    if result.columns.has_duplicates:\n        indexer, _ = result.columns.get_indexer_non_unique(join_columns)\n        indexer = algorithms.unique1d(indexer)\n        result = result._reindex_with_indexers({1: [join_columns, indexer]}, allow_dups=True)\n    else:\n        result = result.reindex(join_columns, axis=1)\n    return result",
    "docstring": "For DataFrame-with-DataFrame operations that require reindexing, operate only on shared columns, then reindex. Parameters ---------- right : DataFrame op : binary operator Returns ------- DataFrame",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:_arith_method_with_reindex arg:self arg:right arg:op arguments arg arg arg Assign Assign Call Assign Compare Assign Compare If Call Assign Call Assign Call Assign Assign Assign Call Assign Call If Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_recv_ops",
    "source_code": "def _get_recv_ops(self, recv_infos: tuple[InputInfo, ...]) -> list[dist.P2POp]:\n    ops: list[dist.P2POp] = []\n    for info in recv_infos:\n        if not isinstance(info, _RecvInfo):\n            continue\n        peer_rank = self.stage_index_to_group_rank[info.source]\n        peer_global_rank = peer_rank if self.group is None else dist.get_global_rank(self.group, peer_rank)\n        ops.append(dist.P2POp(dist.irecv, info.buffer, peer_global_rank, self.group))\n    return ops",
    "docstring": "Helper function shared by and . Returns a list of ops that correspond to the recv infos.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py",
    "ast_data": "FunctionDef name:_get_recv_ops arg:self arg:recv_infos arguments arg arg For If Call Assign Assign Compare Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "duplicated",
    "source_code": "def duplicated(self, keep: DropKeep='first') -> npt.NDArray[np.bool_]:\n    if self.is_unique:\n        return np.zeros(len(self), dtype=bool)\n    return self._duplicated(keep=keep)",
    "docstring": "Indicate duplicate index values. Duplicated values are indicated as ``, all duplicates are True: >>> idx.duplicated(keep=False) array([ True, False, True, False, True])",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:duplicated arg:self arg:keep arguments arg arg If Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_fontsize",
    "source_code": "def set_fontsize(self, size):\n    self._text.set_fontsize(size)\n    self.stale = True",
    "docstring": "Set the text fontsize.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\table.py",
    "ast_data": "FunctionDef name:set_fontsize arg:self arg:size arguments arg arg Call Assign"
  },
  {
    "library": "pytorch",
    "name": "data_ptrs_dead_since_invocation",
    "source_code": "def data_ptrs_dead_since_invocation(self) -> list[int]:\n    curr_liveness = self._get_liveness(self.path_weakrefs)\n    _get_different_indices = self._get_different_indices(self.recorded_liveness_after_graph, curr_liveness)\n    path = list(self._path_from_root)\n    ptrs_to_deallocate = []\n    for depth, output_index in _get_different_indices:\n        ptrs_to_deallocate.append(path[depth].outputs_metadata[output_index]['data_ptr'])\n    return ptrs_to_deallocate",
    "docstring": "Since this node was invoked, return data ptrs of all tensor outputs that have died in the current executing tree path.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py",
    "ast_data": "FunctionDef name:data_ptrs_dead_since_invocation arg:self arguments arg Assign Call Assign Call Assign Call Assign For Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_adjust_binner_for_upsample",
    "source_code": "def _adjust_binner_for_upsample(self, binner):\n    if self.closed == 'right':\n        binner = binner[1:]\n    else:\n        binner = binner[:-1]\n    return binner",
    "docstring": "Adjust our binner when upsampling. The range of a new index should not be outside specified range",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\resample.py",
    "ast_data": "FunctionDef name:_adjust_binner_for_upsample arg:self arg:binner arguments arg arg If Compare Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_control_flow_submodules",
    "source_code": "def _get_control_flow_submodules(graph_module: torch.fx.GraphModule) -> list[tuple[str, torch.nn.Module, torch.fx.Node]]:\n    control_flow_submodules = []\n    for node in graph_module.graph.nodes:\n        if node.op != 'call_function':\n            continue\n        if node.target is torch.ops.higher_order.cond:\n            control_flow_submodules.append(_get_submodule(graph_module, node, 1))\n            control_flow_submodules.append(_get_submodule(graph_module, node, 2))\n        if node.target is torch.ops.higher_order.map_impl:\n            control_flow_submodules.append(_get_submodule(graph_module, node, 0))\n    return control_flow_submodules",
    "docstring": "Returns a list of submodules used for control flow operations (torch.ops.higher_order.cond/map) that are in the given toplevel graph (does not look into submodules). Specifically, the returned value is a list containing a tuple of (name of the submodule that's stored in the graph module, the submodule itself, and the fx node that uses this submodule).",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\graph_utils.py",
    "ast_data": "FunctionDef name:_get_control_flow_submodules arg:graph_module arguments arg Assign For If Compare If Compare Call Call Call Call If Compare Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_matmat",
    "source_code": "def _matmat(self, x):\n    return self._matvec(x)",
    "docstring": "Construct matrix-free callable matrix-matrix multiplication by the Stiffness mass matrix without constructing or storing the matrix itself by reusing the ``.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_special_sparse_arrays.py",
    "ast_data": "FunctionDef name:_matmat arg:self arg:x arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "log_hook",
    "source_code": "def log_hook(fn: Callable, level=logging.INFO) -> Callable:\n\n    @wraps(fn)\n    def wrapped_fn(gm):\n        val = fn(gm)\n        logger.log(level, 'Ran pass %s\\t Return value: %s', fn, val)\n        return val\n    return wrapped_fn",
    "docstring": "Logs callable output. This is useful for logging output of passes. Note inplace_wrapper replaces the pass output with the modified object. If we want to log the original output, apply this wrapper before inplace_wrapper. Args: fn (Callable[Type1, Type2]) level: logging level (e.g. logging.INFO) Returns: wrapped_fn (Callable[Type1, Type2])",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\passes\\pass_manager.py",
    "ast_data": "FunctionDef name:log_hook arg:fn arg:level arguments arg arg FunctionDef name:wrapped_fn arg:gm arguments arg Assign Call Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_sparse_tensors",
    "source_code": "def get_sparse_tensors(self, transformation_cache, state_manager):\n    return CategoricalColumn.IdWeightPair(transformation_cache.get(self, state_manager), None)",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:get_sparse_tensors arg:self arg:transformation_cache arg:state_manager arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "BlasNotFoundError",
    "source_code": "class BlasNotFoundError(NotFoundError):\n    pass",
    "docstring": "Blas ( libraries not found. Directories to search for the libraries can be specified in the numpy/distutils/site.cfg file (section [blas]) or by setting the BLAS environment variable.",
    "type": "class",
    "file_path": "numpy\\numpy\\distutils\\system_info.py",
    "ast_data": "ClassDef name:BlasNotFoundError"
  },
  {
    "library": "pytorch",
    "name": "should_reinplace_scatter",
    "source_code": "def should_reinplace_scatter(node: torch.fx.Node) -> bool:\n    inp, _src, _view_ops = node.args\n    if scatter_always_uses_mutation(node):\n        return True\n    if is_node_realized(inp) and is_node_realized(node):\n        return True\n    if inp.op in ('placeholder', 'get_attr') and any((user.target is aten.copy_.default and user.args[0] is inp for user in node.users)):\n        return True\n    return False",
    "docstring": "Choose between mutating and functional scatter decompositions Reinplacing view scatter ops can be pessimising as it blocks fusion with the input or output tensor computations. However, it is still profitable if the input and output would have been realized anyway.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\reinplace.py",
    "ast_data": "FunctionDef name:should_reinplace_scatter arg:node arguments arg Assign If Call Return return:yes If BoolOp Call Call Return return:yes If BoolOp Compare Call BoolOp Compare Compare Return return:yes Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_get_funcs",
    "source_code": "def _get_funcs(names, arrays, dtype, lib_name, fmodule, cmodule, fmodule_name, cmodule_name, alias, ilp64=False):\n    funcs = []\n    unpack = False\n    dtype = np.dtype(dtype)\n    module1 = (cmodule, cmodule_name)\n    module2 = (fmodule, fmodule_name)\n    if isinstance(names, str):\n        names = (names,)\n        unpack = True\n    prefix, dtype, prefer_fortran = find_best_blas_type(arrays, dtype)\n    if prefer_fortran:\n        module1, module2 = (module2, module1)\n    for name in names:\n        func_name = prefix + name\n        func_name = alias.get(func_name, func_name)\n        func = getattr(module1[0], func_name, None)\n        module_name = module1[1]\n        if func is None:\n            func = getattr(module2[0], func_name, None)\n            module_name = module2[1]\n        if func is None:\n            raise ValueError(f'{lib_name} function {func_name} could not be found')\n        func.module_name, func.typecode = (module_name, prefix)\n        func.dtype = dtype\n        if not ilp64:\n            func.int_dtype = np.dtype(np.intc)\n        else:\n            func.int_dtype = np.dtype(np.int64)\n        func.prefix = prefix\n        funcs.append(func)\n    if unpack:\n        return funcs[0]\n    else:\n        return funcs",
    "docstring": "Return available BLAS/LAPACK functions. Used also in lapack.py. See get_blas_funcs for docstring.",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\blas.py",
    "ast_data": "FunctionDef name:_get_funcs arg:names arg:arrays arg:dtype arg:lib_name arg:fmodule arg:cmodule arg:fmodule_name arg:cmodule_name arg:alias arg:ilp64 arguments arg arg arg arg arg arg arg arg arg arg Assign Assign Assign Call Assign Assign If Call Assign Assign Assign Call If Assign For Assign Assign Call Assign Call Assign If Compare Assign Call Assign If Compare Raise Call Assign Assign If Assign Call Assign Call Assign Call If Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_clone_helper",
    "source_code": "def _clone_helper(op_to_clone, variant_tensor_ops):\n    remap_dict = {}\n    for input_tensor in op_to_clone.inputs:\n        input_tensor_op = input_tensor.op\n        if input_tensor_op in variant_tensor_ops:\n            recursive_map = _clone_helper(input_tensor_op, variant_tensor_ops)\n            remap_dict.update(recursive_map)\n    inputs_list = []\n    for input_tensor in op_to_clone.inputs:\n        input_tensor_op = input_tensor.op\n        if input_tensor_op in remap_dict:\n            remapped_input = remap_dict[input_tensor_op].outputs[0]\n            inputs_list.append(remapped_input)\n        else:\n            inputs_list.append(input_tensor_op.outputs[input_tensor.value_index])\n    g = ops.get_default_graph()\n    new_op = g.create_op(op_to_clone.type, inputs_list, [o.dtype for o in op_to_clone.outputs], name=op_to_clone.name, attrs=op_to_clone.node_def.attr, op_def=_get_op_def(op_to_clone))\n    remap_dict[op_to_clone] = new_op\n    return remap_dict",
    "docstring": "Helper method that recursively clones . Args: op_to_clone: The op we want to clone. variant_tensor_ops: A list of ops that we have to clone along the way. Returns: A dictionary mapping old_ops to new_ops created. Includes op_to_clone as a key.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_ops.py",
    "ast_data": "FunctionDef name:_clone_helper arg:op_to_clone arg:variant_tensor_ops arguments arg arg Assign For Assign If Compare Assign Call Call Assign For Assign If Compare Assign Call Call Assign Call Assign Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "swap_submodule",
    "source_code": "def swap_submodule(self, path: str, value: 'torch.nn.Module') -> 'torch.nn.Module':\n    prefix, _, attr = path.rpartition('.')\n    return swap_submodule(self.get_submodule(prefix), attr, value)",
    "docstring": "Swap the submodule specified by the given ``.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\utils\\_named_member_accessor.py",
    "ast_data": "FunctionDef name:swap_submodule arg:self arg:path arg:value arguments arg arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_SparseSegmentSqrtNGrad",
    "source_code": "@ops.RegisterGradient('SparseSegmentSqrtN')\ndef _SparseSegmentSqrtNGrad(op: ops.Operation, grad):\n    if _GetOpAttrOrNone(op, 'sparse_gradient'):\n        return (_SparseSegmentReduceGradV2(op, grad, 'sqrtn'), None, None)\n    dim0 = array_ops.shape(op.inputs[0])[0]\n    return (math_ops.sparse_segment_sqrt_n_grad(grad, op.inputs[1], op.inputs[2], dim0), None, None)",
    "docstring": "Gradient for SparseSegmentSqrtN.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_SparseSegmentSqrtNGrad arg:op arg:grad arguments arg arg If Call Return return:yes Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "loss_boxes",
    "source_code": "def loss_boxes(self, outputs, targets, indices, num_boxes):\n    assert 'pred_boxes' in outputs\n    idx = self._get_src_permutation_idx(indices)\n    src_boxes = outputs['pred_boxes'][idx]\n    target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0)\n    loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none')\n    losses = {}\n    losses['loss_bbox'] = loss_bbox.sum() / num_boxes\n    loss_giou = 1 - torch.diag(generalized_box_iou(box_cxcywh_to_xyxy(src_boxes), box_cxcywh_to_xyxy(target_boxes)))\n    losses['loss_giou'] = loss_giou.sum() / num_boxes\n    return losses",
    "docstring": "Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss targets dicts must contain the key \"boxes\" containing a tensor of dim [nb_target_boxes, 4] The target boxes are expected in format (center_x, center_y, h, w), normalized by the image size.",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\functional_autograd_benchmark\\torchvision_models.py",
    "ast_data": "FunctionDef name:loss_boxes arg:self arg:outputs arg:targets arg:indices arg:num_boxes arguments arg arg arg arg arg Compare Assign Call Assign Assign Call Call Assign Call Assign Assign Call Assign Call Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "_get_current_day",
    "source_code": "def _get_current_day(self, date):\n    return date",
    "docstring": "Return the start date of the current interval.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\dates.py",
    "ast_data": "FunctionDef name:_get_current_day arg:self arg:date arguments arg arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "sh_chebyu",
    "source_code": "def sh_chebyu(n, monic=False):\n    base = sh_jacobi(n, 2.0, 1.5, monic=monic)\n    if monic:\n        return base\n    factor = 4 ** n\n    base._scale(factor)\n    return base",
    "docstring": "Shifted Chebyshev polynomial of the second kind. Defined as :math: for :math: the nth Chebyshev polynomial of the second kind. Parameters ---------- n : int Degree of the polynomial. monic : bool, optional If , scale the leading coefficient to be 1. Default is . Returns ------- U : orthopoly1d Shifted Chebyshev polynomial of the second kind. Notes ----- The polynomials :math: are orthogonal over :math: with weight function :math:.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_orthogonal.py",
    "ast_data": "FunctionDef name:sh_chebyu arg:n arg:monic arguments arg arg Assign Call If Return return:yes Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "ptp",
    "source_code": "def ptp(self, axis=None, out=None, fill_value=None, keepdims=False):\n    if out is None:\n        result = self.max(axis=axis, fill_value=fill_value, keepdims=keepdims)\n        result -= self.min(axis=axis, fill_value=fill_value, keepdims=keepdims)\n        return result\n    out.flat = self.max(axis=axis, out=out, fill_value=fill_value, keepdims=keepdims)\n    min_value = self.min(axis=axis, fill_value=fill_value, keepdims=keepdims)\n    np.subtract(out, min_value, out=out, casting='unsafe')\n    return out",
    "docstring": "Return (maximum - minimum) along the given dimension (i.e. peak-to-peak value). .. warning:: preserves the data type of the array. This means the return value for an input of signed integers with n bits (e.g. , , etc) is also a signed integer with n bits. In that case, peak-to-peak values greater than `view()` method to view the result as unsigned integers with the same bit width: >>> y.ptp(axis=1).view(np.uint8) masked_array(data=[126, 127, 128, 129], mask=False, fill_value=np.uint64(999999), dtype=uint8)",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:ptp arg:self arg:axis arg:out arg:fill_value arg:keepdims arguments arg arg arg arg arg If Compare Assign Call Call Return return:yes Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_validate_targets",
    "source_code": "def _validate_targets(self, y):\n    return column_or_1d(y, warn=True).astype(np.float64, copy=False)",
    "docstring": "Validation of y and class_weight. Default implementation for SVR and one-class; overridden in BaseSVC.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\svm\\_base.py",
    "ast_data": "FunctionDef name:_validate_targets arg:self arg:y arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "split_by_unquoted",
    "source_code": "def split_by_unquoted(line, characters):\n    assert not set('\"\\'') & set(characters), 'cannot split by unquoted quotes'\n    r = re.compile('\\\\A(?P<before>({single_quoted}|{double_quoted}|{not_quoted})*)(?P<after>{char}.*)\\\\Z'.format(not_quoted=f\"\"\"[^\"'{re.escape(characters)}]\"\"\", char=f'[{re.escape(characters)}]', single_quoted=\"('([^'\\\\\\\\]|(\\\\\\\\.))*')\", double_quoted='(\"([^\"\\\\\\\\]|(\\\\\\\\.))*\")'))\n    m = r.match(line)\n    if m:\n        d = m.groupdict()\n        return (d['before'], d['after'])\n    return (line, '')",
    "docstring": "Splits the line into (line[:i], line[i:]), where i is the index of first occurrence of one of the characters not within quotes, or len(line) if no such index exists",
    "type": "function",
    "file_path": "numpy\\numpy\\f2py\\crackfortran.py",
    "ast_data": "FunctionDef name:split_by_unquoted arg:line arg:characters arguments arg arg Call Call Assign Call Call Call Call Assign Call If Assign Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "should_skip_lowering",
    "source_code": "def should_skip_lowering(op: torch.fx.node.Node, qconfig_map: dict[str, QConfigAny]):\n    return op.name in qconfig_map and qconfig_map[op.name] is None",
    "docstring": "Return True if the op is configured with a None qconfig, False otherwise. Note: maybe need to generalize this to also check for the dtype, and we only lower when dtype matches, but right now fbgemm/qnnpack only support a single dtype, so it is OK for now.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_lower_to_native_backend.py",
    "ast_data": "FunctionDef name:should_skip_lowering arg:op arg:qconfig_map arguments arg arg Return return:yes BoolOp Compare Compare"
  },
  {
    "library": "tensorflow",
    "name": "partition_outer_dimension",
    "source_code": "def partition_outer_dimension(self, row_partition):\n    if not isinstance(row_partition, RowPartition):\n        raise TypeError('row_partition must be a RowPartition.')\n    if self.shape.rank == 0:\n        raise ValueError('Shape %s must have rank at least 1' % self.shape)\n    return _partition_outer_dimension(self, row_partition)",
    "docstring": "Partitions the outer dimension of this StructuredTensor. Returns a new with the same values as , where the outer dimension is partitioned into two (possibly ragged) dimensions. Requires that this StructuredTensor have an outer dimension (i.e., ). >>> st = tf.experimental.StructuredTensor.from_pyval( ... [{'foo': 12}, {'foo': 33}, {'foo': 99}]) >>> partition = RowPartition.from_row_lengths([2, 0, 1]) >>> st.partition_outer_dimension(partition) }, shape=(3, None))> Args: row_partition: A . Returns: A with rank .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor.py",
    "ast_data": "FunctionDef name:partition_outer_dimension arg:self arg:row_partition arguments arg arg If Call Raise Call If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "__init__",
    "source_code": "def __init__(self, module_name: str, dev_dependency: bool=False) -> None:\n    self.module_name = module_name\n    self.module: Optional[ModuleType] = None\n    self.dev_dependency = dev_dependency",
    "docstring": "Initialize the LazyLoader with the name of the module. Args: module_name: The name of the module to be lazily loaded. dev_dependency: If the dependency is required in the dev environment. If True, the module will be loaded in the dev environment. If False, the module will not be loaded in the dev environment.",
    "type": "method",
    "file_path": "kornia\\kornia\\core\\external.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:module_name arg:dev_dependency arguments arg arg arg Assign Assign"
  },
  {
    "library": "scipy",
    "name": "Whitley",
    "source_code": "class Whitley(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-10.24] * self.N, [10.24] * self.N))\n        self.custom_bounds = ([-1, 2], [-1, 2])\n        self.global_optimum = [[1.0 for _ in range(self.N)]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        XI = x\n        XJ = atleast_2d(x).T\n        temp = 100.0 * (XI ** 2.0 - XJ) + (1.0 - XJ) ** 2.0\n        inner = temp ** 2.0 / 4000.0 - cos(temp) + 1.0\n        return sum(sum(inner, axis=0))",
    "docstring": "Whitley objective function. This class defines the Whitley [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Whitley}}(x) = \\sum_{i=1}^n \\sum_{j=1}^n \\left[\\frac{(100(x_i^2-x_j)^2 + (1-x_j)^2)^2}{4000} - \\cos(100(x_i^2-x_j)^2 + (1-x_j)^2)+1 \\right] Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015 TODO Jamil#167 has '+ 1' inside the cos term, when it should be outside it.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_W.py",
    "ast_data": "ClassDef name:Whitley Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Assign Call Assign Assign Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "__and__",
    "source_code": "def __and__(self, other):\n    return self.intersection(other)",
    "docstring": "Return the intersection of this Geometry and the other.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:__and__ arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "backend_registered",
    "source_code": "def backend_registered(backend_name):\n    return backend_name in BackendType.__members__.keys()",
    "docstring": "Checks if backend_name is registered as an RPC backend. Args: backend_name (str): string to identify the RPC backend. Returns: True if the backend has been registered with ``, else False.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\rpc\\backend_registry.py",
    "ast_data": "FunctionDef name:backend_registered arg:backend_name arguments arg Return return:yes Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "inverse_event_shape_tensor",
    "source_code": "def inverse_event_shape_tensor(self, output_shape, name='inverse_event_shape_tensor'):\n    with self._name_scope(name, [output_shape]):\n        output_shape = ops.convert_to_tensor(output_shape, dtype=dtypes.int32, name='output_shape')\n        return self._inverse_event_shape_tensor(output_shape)",
    "docstring": "Shape of a single sample from a single batch as an 1D . Args: output_shape: , vector indicating event-portion shape passed into function. name: name to give to the op Returns: inverse_event_shape_tensor: , vector indicating event-portion shape after applying .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bijector_impl.py",
    "ast_data": "FunctionDef name:inverse_event_shape_tensor arg:self arg:output_shape arg:name arguments arg arg arg With Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_SnapshotState",
    "source_code": "class _SnapshotState(Enum):\n    NotStarted = 0\n    Restored = 1\n    Iterating = 2",
    "docstring": "These are the snapshotting-related states that IterDataPipes can be in. - allows you to restore a snapshot and create an iterator with reset - cannot restore again, allows you to create an iterator without resetting the DataPipe - can restore, will reset if you create a new iterator",
    "type": "class",
    "file_path": "pytorch\\torch\\utils\\data\\datapipes\\_hook_iterator.py",
    "ast_data": "ClassDef name:_SnapshotState Assign Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "_update_gc",
    "source_code": "def _update_gc(self, gc, new_gc_dict):\n    new_gc_dict = new_gc_dict.copy()\n    dashes = new_gc_dict.pop('dashes', None)\n    if dashes:\n        gc.set_dashes(**dashes)\n    for k, v in new_gc_dict.items():\n        set_method = getattr(gc, 'set_' + k, None)\n        if not callable(set_method):\n            raise AttributeError(f'Unknown property {k}')\n        set_method(v)\n    return gc",
    "docstring": "Update the given GraphicsContext with the given dict of properties. The keys in the dictionary are used to identify the appropriate `` method on the *gc*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patheffects.py",
    "ast_data": "FunctionDef name:_update_gc arg:self arg:gc arg:new_gc_dict arguments arg arg arg Assign Call Assign Call If Call For Call Assign Call If Call Raise Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_materialize",
    "source_code": "def _materialize(self, device: Optional[torch.device]=None) -> torch.Tensor:\n    if device is None:\n        device = torch.device('cpu')\n    if self.variant == CausalVariant.UPPER_LEFT:\n        return self._upper_left(device)\n    elif self.variant == CausalVariant.LOWER_RIGHT:\n        return self._lower_right(device)",
    "docstring": "Materializes the causal bias into a tensor form. Depending on the variant, this method generates either an upper-left or lower-right triangular matrix to represent the causal bias. Args: device (Optional[torch.device]): The device on which to create the tensor. Defaults to CPU. Returns: torch.Tensor: The materialized bias tensor.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\attention\\bias.py",
    "ast_data": "FunctionDef name:_materialize arg:self arg:device arguments arg arg If Compare Assign Call If Compare Return return:yes Call If Compare Return return:yes Call"
  },
  {
    "library": "django",
    "name": "leaf_nodes",
    "source_code": "def leaf_nodes(self, app=None):\n    leaves = set()\n    for node in self.nodes:\n        if all((key[0] != node[0] for key in self.node_map[node].children)) and (not app or app == node[0]):\n            leaves.add(node)\n    return sorted(leaves)",
    "docstring": "Return all leaf nodes - that is, nodes with no dependents in their app. These are the \"most current\" version of an app's schema. Having more than one per app is technically an error, but one that gets handled further up, in the interactive command - it's usually the result of a VCS merge and needs some user input.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\graph.py",
    "ast_data": "FunctionDef name:leaf_nodes arg:self arg:app arguments arg arg Assign Call For If BoolOp Call Compare BoolOp Compare Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_short_axis",
    "source_code": "def _short_axis(self):\n    if self.orientation == 'vertical':\n        return self.ax.xaxis\n    return self.ax.yaxis",
    "docstring": "Return the short axis",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colorbar.py",
    "ast_data": "FunctionDef name:_short_axis arg:self arguments arg If Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_InterleaveDataset",
    "source_code": "class _InterleaveDataset(dataset_ops.UnaryDataset):\n\n    def __init__(self, input_dataset, map_func, cycle_length, block_length, name=None):\n        self._input_dataset = input_dataset\n        self._map_func = structured_function.StructuredFunctionWrapper(map_func, self._transformation_name(), dataset=input_dataset)\n        if not isinstance(self._map_func.output_structure, dataset_ops.DatasetSpec):\n            raise TypeError(f'The `map_func` argument must return a `Dataset` object. Got {dataset_ops.get_type(self._map_func.output_structure)!r}.')\n        self._structure = self._map_func.output_structure._element_spec\n        self._cycle_length = ops.convert_to_tensor(cycle_length, dtype=dtypes.int64, name='cycle_length')\n        self._block_length = ops.convert_to_tensor(block_length, dtype=dtypes.int64, name='block_length')\n        self._name = name\n        variant_tensor = gen_dataset_ops.interleave_dataset(input_dataset._variant_tensor, self._map_func.function.captured_inputs, self._cycle_length, self._block_length, f=self._map_func.function, **self._common_args)\n        super().__init__(input_dataset, variant_tensor)\n\n    def _functions(self):\n        return [self._map_func]\n\n    @property\n    def element_spec(self):\n        return self._structure\n\n    def _transformation_name(self):\n        return 'Dataset.interleave()'",
    "docstring": "A that interleaves the result of transformed inputs.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\interleave_op.py",
    "ast_data": "ClassDef name:_InterleaveDataset FunctionDef name:__init__ arg:self arg:input_dataset arg:map_func arg:cycle_length arg:block_length arg:name arguments arg arg arg arg arg arg Assign Assign Call Call If Call Raise Call Call Assign Assign Call Assign Call Assign Assign Call Call Call FunctionDef name:_functions arg:self arguments arg Return return:yes FunctionDef name:element_spec arg:self arguments arg Return return:yes FunctionDef name:_transformation_name arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "overridden",
    "source_code": "def overridden(self, key: _K) -> bool:\n    return key in self._overrides",
    "docstring": "Checks if a key-value pair is overridden.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\registration.py",
    "ast_data": "FunctionDef name:overridden arg:self arg:key arguments arg arg Return return:yes Compare"
  },
  {
    "library": "django",
    "name": "_get_page",
    "source_code": "def _get_page(self, *args, **kwargs):\n    return Page(*args, **kwargs)",
    "docstring": "Return an instance of a single page. This hook can be used by subclasses to use an alternative to the standard :cls: object.",
    "type": "method",
    "file_path": "django\\django\\core\\paginator.py",
    "ast_data": "FunctionDef name:_get_page arg:self arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "KORNIA_CHECK_DM_DESC",
    "source_code": "def KORNIA_CHECK_DM_DESC(desc1: Tensor, desc2: Tensor, dm: Tensor, raises: bool=True) -> bool:\n    if not (dm.size(0) == desc1.size(0) and dm.size(1) == desc2.size(0)):\n        if raises:\n            raise TypeError(f'distance matrix shape {dm.shape} is not onsistent with descriptors shape: desc1 {desc1.shape} desc2 {desc2.shape}')\n        return False\n    return True",
    "docstring": "Check whether the provided descriptors match with a distance matrix. Args: desc1: first descriptor tensor to evaluate. desc2: second descriptor tensor to evaluate. dm: distance matrix tensor to evaluate. raises: bool indicating whether an exception should be raised upon failure. Raises: TypeException: if the descriptors shape do not match with the distance matrix and raises is True. Example: >>> desc1 = torch.rand(4) >>> desc2 = torch.rand(8) >>> dm = torch.rand(4, 8) >>> KORNIA_CHECK_DM_DESC(desc1, desc2, dm) True",
    "type": "function",
    "file_path": "kornia\\kornia\\core\\check.py",
    "ast_data": "FunctionDef name:KORNIA_CHECK_DM_DESC arg:desc1 arg:desc2 arg:dm arg:raises arguments arg arg arg arg If BoolOp Compare Call Call Compare Call Call If Raise Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "caption_to_item",
    "source_code": "def caption_to_item(self, caption):\n    captions = self.captions()\n    if caption not in captions:\n        raise LookupError('There is no menu item with the caption \"%s\"' % caption)\n    return self._items[captions.index(caption)]",
    "docstring": "Get a MenuItem from the caption. Args: caption: (str) The caption to look up. Returns: (MenuItem) The first-match menu item with the caption, if any. Raises: LookupError: If a menu item with the caption does not exist.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py",
    "ast_data": "FunctionDef name:caption_to_item arg:self arg:caption arguments arg arg Assign Call If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "ProcessGroupState",
    "source_code": "@dataclass\nclass ProcessGroupState:\n    local_rank: int\n    global_rank: int\n    local_world_size: int\n    global_world_size: int",
    "docstring": "State for ser-de of process group",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\api.py",
    "ast_data": "ClassDef name:ProcessGroupState"
  },
  {
    "library": "matplotlib",
    "name": "get_minor_ticks",
    "source_code": "def get_minor_ticks(self, numticks=None):\n    if numticks is None:\n        numticks = len(self.get_minorticklocs())\n    while len(self.minorTicks) < numticks:\n        tick = self._get_tick(major=False)\n        self.minorTicks.append(tick)\n        self._copy_tick_props(self.minorTicks[0], tick)\n    return self.minorTicks[:numticks]",
    "docstring": "Return the list of minor \\s. .. warning:: Ticks are not guaranteed to be persistent. Various operations can create, delete and modify the Tick instances. There is an imminent risk that changes to individual ticks will not survive if you work on the figure further (including also panning/zooming on a displayed figure). Working on the individual ticks is a method of last resort. Use instead if possible.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:get_minor_ticks arg:self arg:numticks arguments arg arg If Compare Assign Call Call While Compare Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_bounds",
    "source_code": "def set_bounds(self, low=None, high=None):\n    if self.spine_type == 'circle':\n        raise ValueError('set_bounds() method incompatible with circular spines')\n    if high is None and np.iterable(low):\n        low, high = low\n    old_low, old_high = self.get_bounds() or (None, None)\n    if low is None:\n        low = old_low\n    if high is None:\n        high = old_high\n    self._bounds = (low, high)\n    self.stale = True",
    "docstring": "Set the spine bounds. Parameters ---------- low : float or None, optional The lower spine bound. Passing *None* leaves the limit unchanged. The bounds may also be passed as the tuple (*low*, *high*) as the first positional argument. .. ACCEPTS: (low: float, high: float) high : float or None, optional The higher spine bound. Passing *None* leaves the limit unchanged.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\spines.py",
    "ast_data": "FunctionDef name:set_bounds arg:self arg:low arg:high arguments arg arg arg If Compare Raise Call If BoolOp Compare Call Assign Assign BoolOp Call If Compare Assign If Compare Assign Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "convert",
    "source_code": "@staticmethod\ndef convert(obj, unit, axis):\n    return obj",
    "docstring": "Convert *obj* using *unit* for the specified *axis*. If *obj* is a sequence, return the converted sequence. The output must be a sequence of scalars that can be used by the numpy array layer.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\units.py",
    "ast_data": "FunctionDef name:convert arg:obj arg:unit arg:axis arguments arg arg arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "nnls",
    "source_code": "@_deprecate_positional_args(version='1.18.0', deprecated_args={'atol'})\ndef nnls(A, b, *, maxiter=None, atol=_NoValue):\n    A = np.asarray_chkfinite(A, dtype=np.float64, order='C')\n    b = np.asarray_chkfinite(b, dtype=np.float64)\n    if len(A.shape) != 2:\n        raise ValueError(f'Expected a 2D array, but the shape of A is {A.shape}')\n    if b.ndim > 2 or (b.ndim == 2 and b.shape[1] != 1):\n        raise ValueError(f'Expected a 1D array,(or 2D with one column), but the, shape of b is {b.shape}')\n    elif b.ndim == 2 and b.shape[1] == 1:\n        b = b.ravel()\n    m, n = A.shape\n    if m != b.shape[0]:\n        raise ValueError('Incompatible dimensions. The first dimension of ' + f'A is {m}, while the shape of b is {(b.shape[0],)}')\n    if not maxiter:\n        maxiter = 3 * n\n    x, rnorm, info = _nnls(A, b, maxiter)\n    if info == 3:\n        raise RuntimeError('Maximum number of iterations reached.')\n    return (x, rnorm)",
    "docstring": "Solve `10.1137/1.9781611971217` Examples -------- >>> import numpy as np >>> from scipy.optimize import nnls ... >>> A = np.array([[1, 0], [1, 0], [0, 1]]) >>> b = np.array([2, 1, 1]) >>> nnls(A, b) (array([1.5, 1. ]), 0.7071067811865475) >>> b = np.array([-1, -1, -1]) >>> nnls(A, b) (array([0., 0.]), 1.7320508075688772)",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_nnls.py",
    "ast_data": "FunctionDef name:nnls arg:A arg:b arguments arg arg arg arg Assign Call Assign Call If Compare Call Raise Call If BoolOp Compare BoolOp Compare Compare Raise Call If BoolOp Compare Compare Assign Call Assign If Compare Raise Call If Assign Assign Call If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "is_callable_type",
    "source_code": "def is_callable_type(type_hint) -> bool:\n    return type_hint.__name__ == 'Callable'",
    "docstring": "Special Case of is_type.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fuzzer.py",
    "ast_data": "FunctionDef name:is_callable_type arg:type_hint arguments arg Return return:yes Compare"
  },
  {
    "library": "pygame",
    "name": "samples",
    "source_code": "def samples(sound):\n    return numpy.array(sound, copy=False)",
    "docstring": "pygame.sndarray.samples(Sound): return array Reference Sound samples into an array. Creates a new array that directly references the samples in a Sound object. Modifying the array will change the Sound. The array will always be in the format returned from pygame.mixer.get_init().",
    "type": "function",
    "file_path": "pygame\\src_py\\sndarray.py",
    "ast_data": "FunctionDef name:samples arg:sound arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_context_data",
    "source_code": "def get_context_data(self, **kwargs):\n    if 'form' not in kwargs:\n        kwargs['form'] = self.get_form()\n    return super().get_context_data(**kwargs)",
    "docstring": "Insert the form into the context dict.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\edit.py",
    "ast_data": "FunctionDef name:get_context_data arg:self arguments arg arg If Compare Assign Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y, sample_weight=None):\n    y = validate_data(self, y=y)\n    return self._partial_fit(X, y, np.unique(y), _refit=True, sample_weight=sample_weight)",
    "docstring": "Fit Gaussian Naive Bayes according to X, y. Parameters ---------- X : array-like of shape (n_samples, n_features) Training vectors, where is the number of samples and is the number of features. y : array-like of shape (n_samples,) Target values. sample_weight : array-like of shape (n_samples,), default=None Weights applied to individual samples (1. for unweighted). .. versionadded:: 0.17 Gaussian Naive Bayes supports fitting with *sample_weight*. Returns ------- self : object Returns the instance itself.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\naive_bayes.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_conv_bn_add_relu_extra_inputs_getter_left",
    "source_code": "def _conv_bn_add_relu_extra_inputs_getter_left(pattern):\n    _relu, add_pattern = pattern\n    _, _bn_conv, extra_input = add_pattern\n    return [extra_input]",
    "docstring": "get inputs pattern for extra inputs, inputs for root node are assumed to be copied over from root node to the fused node",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\onednn.py",
    "ast_data": "FunctionDef name:_conv_bn_add_relu_extra_inputs_getter_left arg:pattern arguments arg Assign Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "adapt_datefield_value",
    "source_code": "def adapt_datefield_value(self, value):\n    return value",
    "docstring": "Transform a date value to an object compatible with what is expected by the backend driver for date columns. The default implementation transforms the date to text, but that is not necessary for Oracle.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\oracle\\operations.py",
    "ast_data": "FunctionDef name:adapt_datefield_value arg:self arg:value arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "from_row_starts",
    "source_code": "@classmethod\n@dispatch.add_dispatch_support\ndef from_row_starts(cls, values, row_starts, name=None, validate=True):\n    if not isinstance(validate, bool):\n        raise TypeError(f'Argument `validate` must have type bool. Received {validate}.')\n    with ops.name_scope(name, 'RaggedFromRowStarts', [values, row_starts]):\n        values = _convert_to_ragged_tensor_values(values)\n        row_partition = RowPartition.from_row_starts(row_starts=row_starts, nvals=_nrows(values), validate=validate, dtype_hint=_get_optional_partition_dtype(values))\n        return cls._from_row_partition(values, row_partition, validate=validate)",
    "docstring": "Creates a with rows partitioned by . Equivalent to: . Args: values: A potentially ragged tensor with shape . row_starts: A 1-D integer tensor with shape . Must be nonnegative and sorted in ascending order. If , then must be zero. name: A name prefix for the RaggedTensor (optional). validate: If true, then use assertions to check that the arguments form a valid . Note: these assertions incur a runtime cost, since they must be checked for each tensor value. Returns: A . . . #### Example: >>> print(tf.RaggedTensor.from_row_starts( ... values=[3, 1, 4, 1, 5, 9, 2, 6], ... row_starts=[0, 4, 4, 7, 8]))",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:from_row_starts arg:cls arg:values arg:row_starts arg:name arg:validate arguments arg arg arg arg arg If Call Raise Call With Call Assign Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "virtualenv",
    "name": "path_exe_finder",
    "source_code": "def path_exe_finder(spec: PythonSpec) -> Callable[[Path], Generator[tuple[Path, bool], None, None]]:\n    pat = spec.generate_re(windows=sys.platform == 'win32')\n    direct = spec.str_spec\n    if sys.platform == 'win32':\n        direct = f'{direct}.exe'\n\n    def path_exes(path: Path) -> Generator[tuple[Path, bool], None, None]:\n        direct_path = path / direct\n        if direct_path.exists():\n            yield (direct_path, False)\n        for exe in path.iterdir():\n            match = pat.fullmatch(exe.name)\n            if match:\n                yield (exe.absolute(), match['impl'] == 'python')\n    return path_exes",
    "docstring": "Given a spec, return a function that can be called on a path to find all matching files in it.",
    "type": "function",
    "file_path": "virtualenv\\src\\virtualenv\\discovery\\builtin.py",
    "ast_data": "FunctionDef name:path_exe_finder arg:spec arguments arg Assign Call Compare Assign If Compare Assign FunctionDef name:path_exes arg:path arguments arg Assign If Call For Call Assign Call If Call Compare Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "finalize_prefix",
    "source_code": "def finalize_prefix(self):\n    old_prefix = self.prefix\n    self.prefix = IndentedBuffer()\n    super().finalize_prefix()\n    for kernel in self._triton_call_wrappers.values():\n        self.prefix.writeline('\\n')\n        kernel.generate(self)\n    self.prefix.writeline('\\n')\n    self.prefix.splice(old_prefix)",
    "docstring": "Define the triton kernels now that autotuning is finished",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp_wrapper_gpu.py",
    "ast_data": "FunctionDef name:finalize_prefix arg:self arguments arg Assign Assign Call Call Call For Call Call Call Call Call"
  },
  {
    "library": "kornia",
    "name": "cx",
    "source_code": "@property\ndef cx(self) -> Tensor:\n    return self._params[..., 2]",
    "docstring": "Returns the principal point in x direction.",
    "type": "method",
    "file_path": "kornia\\kornia\\sensors\\camera\\camera_model.py",
    "ast_data": "FunctionDef name:cx arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "flatten_graph_inputs",
    "source_code": "def flatten_graph_inputs(gm: torch.fx.GraphModule, inputs, compile_gm):\n    inputs_idx_to_clear = [i for i, node in enumerate(gm.graph.nodes) if node.op == 'placeholder' and node.meta.get('steal_arg', False)]\n    if torch._dynamo.compiled_autograd.in_compiled_autograd_region:\n        assert inputs_idx_to_clear == [0]\n        assert isinstance(inputs[0], list)\n        boxed_inputs_count = len(inputs[0])\n\n        def flatten_fn(args):\n            return args[0] + list(args[1:])\n\n        def unflatten_fn(flat_args):\n            return (flat_args[:boxed_inputs_count], *flat_args[boxed_inputs_count:])\n        compiled_fn = compile_gm(GmWrapper(gm, unflatten_fn), flatten_fn(inputs))\n    else:\n        flat_inputs, spec = pytree.tree_flatten(inputs)\n        unflatten_fn = functools.partial(pytree.tree_unflatten, treespec=spec)\n        compiled_fn = compile_gm(GmWrapper(gm, unflatten_fn), flat_inputs)\n        flatten_fn = pytree.arg_tree_leaves\n\n    def wrapper(*args):\n        flat_args = flatten_fn(args)\n        for i in inputs_idx_to_clear:\n            args[i].clear()\n        return compiled_fn(flat_args)\n    return wrapper",
    "docstring": "Mutate inputs so that they are flat and wrap gm such that it accepts those inputs. This is needed for graphs that take bumpy inputs.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\utils.py",
    "ast_data": "FunctionDef name:flatten_graph_inputs arg:gm arg:inputs arg:compile_gm arguments arg arg arg Assign Call BoolOp Compare Call If Compare Call Assign Call FunctionDef name:flatten_fn arg:args arguments arg Return return:yes Call FunctionDef name:unflatten_fn arg:flat_args arguments arg Return return:yes Assign Call Call Call Assign Call Assign Call Assign Call Call Assign FunctionDef name:wrapper arguments arg Assign Call For Call Return return:yes Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "desc_addname",
    "source_code": "class desc_addname(_desc_classes_injector, nodes.Part, nodes.Inline, nodes.FixedTextElement):\n    classes = ['sig-prename', 'descclassname']",
    "docstring": "Node for additional name parts for an object. For example, in the declaration of a Python class ``.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\addnodes.py",
    "ast_data": "ClassDef name:desc_addname Assign"
  },
  {
    "library": "seaborn",
    "name": "map",
    "source_code": "def map(self, func, **kwargs):\n    row_indices, col_indices = np.indices(self.axes.shape)\n    indices = zip(row_indices.flat, col_indices.flat)\n    self._map_bivariate(func, indices, **kwargs)\n    return self",
    "docstring": "Plot with the same function in every subplot. Parameters ---------- func : callable plotting function Must take x, y arrays as positional arguments and draw onto the \"currently active\" matplotlib Axes. Also needs to accept kwargs called ``.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\axisgrid.py",
    "ast_data": "FunctionDef name:map arg:self arg:func arguments arg arg arg Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "so3",
    "source_code": "@property\ndef so3(self) -> So3:\n    return self._rotation",
    "docstring": "Return the underlying rotation(So3).",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\se3.py",
    "ast_data": "FunctionDef name:so3 arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "is_initialized",
    "source_code": "def is_initialized():\n    return _initialized and (not _is_in_bad_fork())",
    "docstring": "Return whether PyTorch's XPU state has been initialized.",
    "type": "function",
    "file_path": "pytorch\\torch\\xpu\\__init__.py",
    "ast_data": "FunctionDef name:is_initialized arguments Return return:yes BoolOp Call"
  },
  {
    "library": "pytorch",
    "name": "ConvAdd2d",
    "source_code": "class ConvAdd2d(_FusedModule):\n\n    def __init__(self, conv, add):\n        super().__init__(conv)\n        self.add = add\n\n    def forward(self, x1, x2):\n        return self.add(self[0](x1), x2)",
    "docstring": "This is a sequential container which calls the Conv2d modules with extra Add. During quantization this will be replaced with the corresponding fused module.",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\nn\\intrinsic\\modules\\fused.py",
    "ast_data": "ClassDef name:ConvAdd2d FunctionDef name:__init__ arg:self arg:conv arg:add arguments arg arg arg Call Call Assign FunctionDef name:forward arg:self arg:x1 arg:x2 arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "build_u_freqs_array",
    "source_code": "def build_u_freqs_array(self, maxu):\n    n1, n2 = (self.n1, self.n2)\n    total = special.binom(n1 + n2, n1)\n    if maxu + 1 <= self.configurations.size:\n        return self.configurations[:maxu + 1] / total\n    s_array = self.build_sigma_array(maxu)\n    configurations = np.zeros(maxu + 1, dtype=np.uint64)\n    configurations_is_uint = True\n    uint_max = np.iinfo(np.uint64).max\n    configurations[0] = 1\n    for u in np.arange(1, maxu + 1):\n        coeffs = s_array[u - 1::-1]\n        new_val = np.dot(configurations[:u], coeffs) / u\n        if new_val > uint_max and configurations_is_uint:\n            configurations = configurations.astype(float)\n            configurations_is_uint = False\n        configurations[u] = new_val\n    self.configurations = configurations\n    return configurations / total",
    "docstring": "Build all the array of frequencies for u from 0 to maxu. Assumptions: n1 <= n2 maxu <= n1 * n2 / 2",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_mannwhitneyu.py",
    "ast_data": "FunctionDef name:build_u_freqs_array arg:self arg:maxu arguments arg arg Assign Assign Call If Compare Return return:yes Assign Call Assign Call Assign Assign Call Assign For Call Assign Assign Call If BoolOp Compare Assign Call Assign Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "convert_to_tensor_or_indexed_slices",
    "source_code": "@tf_export(v1=['convert_to_tensor_or_indexed_slices'])\ndef convert_to_tensor_or_indexed_slices(value, dtype=None, name=None):\n    return internal_convert_to_tensor_or_indexed_slices(value=value, dtype=dtype, name=name, as_ref=False)",
    "docstring": "Converts the given object to a or an . If is an or it is returned unmodified. Otherwise, it is converted to a using . Args: value: An , , or an object that can be consumed by . dtype: (Optional.) The required of the returned or . name: (Optional.) A name to use if a new is created. Returns: A , , or based on . Raises: ValueError: If does not match the element type of .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\indexed_slices.py",
    "ast_data": "FunctionDef name:convert_to_tensor_or_indexed_slices arg:value arg:dtype arg:name arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "seaborn",
    "name": "_patch_colormap_display",
    "source_code": "def _patch_colormap_display():\n\n    def _repr_png_(self):\n        import io\n        from PIL import Image\n        import numpy as np\n        IMAGE_SIZE = (400, 50)\n        X = np.tile(np.linspace(0, 1, IMAGE_SIZE[0]), (IMAGE_SIZE[1], 1))\n        pixels = self(X, bytes=True)\n        png_bytes = io.BytesIO()\n        Image.fromarray(pixels).save(png_bytes, format='png')\n        return png_bytes.getvalue()\n\n    def _repr_html_(self):\n        import base64\n        png_bytes = self._repr_png_()\n        png_base64 = base64.b64encode(png_bytes).decode('ascii')\n        return '<img ' + 'alt=\"' + self.name + ' color map\" ' + 'title=\"' + self.name + '\"' + 'src=\"data:image/png;base64,' + png_base64 + '\">'\n    mpl.colors.Colormap._repr_png_ = _repr_png_\n    mpl.colors.Colormap._repr_html_ = _repr_html_",
    "docstring": "Simplify the rich display of matplotlib color maps in a notebook.",
    "type": "function",
    "file_path": "seaborn\\seaborn\\palettes.py",
    "ast_data": "FunctionDef name:_patch_colormap_display arguments FunctionDef name:_repr_png_ arg:self arguments arg Assign Assign Call Call Assign Call Assign Call Call Call Return return:yes Call FunctionDef name:_repr_html_ arg:self arguments arg Assign Call Assign Call Call Return return:yes Assign Assign"
  },
  {
    "library": "django",
    "name": "main_help_text",
    "source_code": "def main_help_text(self, commands_only=False):\n    if commands_only:\n        usage = sorted(get_commands())\n    else:\n        usage = ['', \"Type '%s help <subcommand>' for help on a specific subcommand.\" % self.prog_name, '', 'Available subcommands:']\n        commands_dict = defaultdict(lambda: [])\n        for name, app in get_commands().items():\n            if app == 'django.core':\n                app = 'django'\n            else:\n                app = app.rpartition('.')[-1]\n            commands_dict[app].append(name)\n        style = color_style()\n        for app in sorted(commands_dict):\n            usage.append('')\n            usage.append(style.NOTICE('[%s]' % app))\n            for name in sorted(commands_dict[app]):\n                usage.append('    %s' % name)\n        if self.settings_exception is not None:\n            usage.append(style.NOTICE('Note that only Django core commands are listed as settings are not properly configured (error: %s).' % self.settings_exception))\n    return '\\n'.join(usage)",
    "docstring": "Return the script's main help text, as a string.",
    "type": "method",
    "file_path": "django\\django\\core\\management\\__init__.py",
    "ast_data": "FunctionDef name:main_help_text arg:self arg:commands_only arguments arg arg If Assign Call Call Assign Assign Call arguments For Call Call If Compare Assign Assign Call Call Assign Call For Call Call Call Call For Call Call If Compare Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_vectorize_rvs_over_shapes",
    "source_code": "def _vectorize_rvs_over_shapes(_rvs1):\n\n    def _rvs(*args, size, random_state):\n        _rvs1_size, _rvs1_indices = _check_shape(args[0].shape, size)\n        size = np.array(size)\n        _rvs1_size = np.array(_rvs1_size)\n        _rvs1_indices = np.array(_rvs1_indices)\n        if np.all(_rvs1_indices):\n            return _rvs1(*args, size, random_state)\n        out = np.empty(size)\n        j0 = np.arange(out.ndim)\n        j1 = np.hstack((j0[~_rvs1_indices], j0[_rvs1_indices]))\n        out = np.moveaxis(out, j1, j0)\n        for i in np.ndindex(*size[~_rvs1_indices]):\n            out[i] = _rvs1(*[np.squeeze(arg)[i] for arg in args], _rvs1_size, random_state)\n        return np.moveaxis(out, j0, j1)\n    return _rvs",
    "docstring": "Decorator that vectorizes _rvs method to work on ndarray shapes",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:_vectorize_rvs_over_shapes arg:_rvs1 arguments arg FunctionDef name:_rvs arguments arg arg arg Assign Call Assign Call Assign Call Assign Call If Call Return return:yes Call Assign Call Assign Call Assign Call Assign Call For Call Assign Call Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pygame",
    "name": "__init__",
    "source_code": "def __init__(self, *sprites, **kwargs):\n    self._spritelayers = {}\n    self._spritelist = []\n    AbstractGroup.__init__(self)\n    self._default_layer = kwargs.get('default_layer', 0)\n    self.add(*sprites, **kwargs)",
    "docstring": "initialize an instance of LayeredUpdates with the given attributes You can set the default layer through kwargs using 'default_layer' and an integer for the layer. The default layer is 0. If the sprite you add has an attribute _layer, then that layer will be used. If **kwarg contains 'layer', then the passed sprites will be added to that layer (overriding the sprite._layer attribute). If neither the sprite nor **kwarg has a 'layer', then the default layer is used to add the sprites.",
    "type": "method",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg arg arg Assign Assign Call Assign Call Call"
  },
  {
    "library": "sphinx",
    "name": "fix_genindex",
    "source_code": "def fix_genindex(self, tree: list[tuple[str, list[tuple[str, Any]]]]) -> None:\n    for _key, columns in tree:\n        for _entryname, (links, subitems, _key) in columns:\n            for i, (ismain, link) in enumerate(links):\n                if (m := self.refuri_re.match(link)):\n                    links[i] = (ismain, self.fix_fragment(m.group(1), m.group(2)))\n            for _subentryname, subentrylinks in subitems:\n                for i, (ismain, link) in enumerate(subentrylinks):\n                    if (m := self.refuri_re.match(link)):\n                        subentrylinks[i] = (ismain, self.fix_fragment(m.group(1), m.group(2)))",
    "docstring": "Fix href attributes for genindex pages.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\_epub_base.py",
    "ast_data": "FunctionDef name:fix_genindex arg:self arg:tree arguments arg arg For For For Call If Call Assign Call Call Call For For Call If Call Assign Call Call Call"
  },
  {
    "library": "django",
    "name": "describe_operation",
    "source_code": "@staticmethod\ndef describe_operation(operation, backwards):\n    prefix = ''\n    is_error = False\n    if hasattr(operation, 'code'):\n        code = operation.reverse_code if backwards else operation.code\n        action = code.__doc__ or '' if code else None\n    elif hasattr(operation, 'sql'):\n        action = operation.reverse_sql if backwards else operation.sql\n    else:\n        action = ''\n        if backwards:\n            prefix = 'Undo '\n    if action is not None:\n        action = str(action).replace('\\n', '')\n    elif backwards:\n        action = 'IRREVERSIBLE'\n        is_error = True\n    if action:\n        action = ' -> ' + action\n    truncated = Truncator(action)\n    return (prefix + operation.describe() + truncated.chars(40), is_error)",
    "docstring": "Return a string that describes a migration operation for --plan.",
    "type": "method",
    "file_path": "django\\django\\core\\management\\commands\\migrate.py",
    "ast_data": "FunctionDef name:describe_operation arg:operation arg:backwards arguments arg arg Assign Assign If Call Assign Assign BoolOp If Call Assign Assign If Assign If Compare Assign Call Call If Assign Assign If Assign Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_comm_counts",
    "source_code": "def get_comm_counts(self) -> dict[Any, int]:\n    return self.comm_counts",
    "docstring": "Returns the communication counts as a dictionary. Returns: Dict[Any, int]: The communication counts as a dictionary.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\debug\\_comm_mode.py",
    "ast_data": "FunctionDef name:get_comm_counts arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_sanitize_slices",
    "source_code": "def _sanitize_slices(slices, intended_shape, deficient_shape):\n    sanitized_slices = []\n    idx = 0\n    for slc in slices:\n        if slc is Ellipsis:\n            if idx < 0:\n                raise ValueError('Found multiple `...` in slices {}'.format(slices))\n            num_remaining_non_newaxis_slices = sum((s is not array_ops.newaxis for s in slices[slices.index(Ellipsis) + 1:]))\n            idx = -num_remaining_non_newaxis_slices\n        elif slc is array_ops.newaxis:\n            pass\n        else:\n            is_broadcast = intended_shape[idx] > deficient_shape[idx]\n            if isinstance(slc, slice):\n                start, stop, step = (slc.start, slc.stop, slc.step)\n                if start is not None:\n                    start = _prefer_static_where(is_broadcast, 0, start)\n                if stop is not None:\n                    stop = _prefer_static_where(is_broadcast, 1, stop)\n                if step is not None:\n                    step = _prefer_static_where(is_broadcast, 1, step)\n                slc = slice(start, stop, step)\n            else:\n                slc = _prefer_static_where(is_broadcast, 0, slc)\n            idx += 1\n        sanitized_slices.append(slc)\n    return sanitized_slices",
    "docstring": "Restricts slices to avoid overflowing size-1 (broadcast) dimensions. Args: slices: iterable of slices received by . intended_shape: int shape for which the slices were intended. deficient_shape: int shape to which the slices will be applied. Must have the same rank as . Returns: sanitized_slices: Python of slice objects.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\slicing.py",
    "ast_data": "FunctionDef name:_sanitize_slices arg:slices arg:intended_shape arg:deficient_shape arguments arg arg arg Assign Assign For If Compare If Compare Raise Call Call Assign Call Compare Call Assign If Compare Assign Compare If Call Assign If Compare Assign Call If Compare Assign Call If Compare Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "geometry_field",
    "source_code": "def geometry_field(self):\n    opts = self.model._meta\n    return opts.get_field(self.geom_field)",
    "docstring": "Return the GeometryField instance associated with the geographic column.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\utils\\layermapping.py",
    "ast_data": "FunctionDef name:geometry_field arg:self arguments arg Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_copy_weights_to_original_model",
    "source_code": "def _copy_weights_to_original_model(model, mode):\n    if model._distribution_strategy and mode == ModeKeys.TRAIN:\n        distributed_model = get_distributed_model(model, mode)\n        updated_weights = model._distribution_strategy.unwrap(distributed_model)[0].get_weights()\n        model.set_weights(updated_weights)",
    "docstring": "Copies weights from first distributed model back to original model.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_training_utils_v1.py",
    "ast_data": "FunctionDef name:_copy_weights_to_original_model arg:model arg:mode arguments arg arg If BoolOp Compare Assign Call Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "ts_compile",
    "source_code": "@make_boxed_compiler\ndef ts_compile(fx_g: fx.GraphModule, inps) -> Callable:\n    with _disable_jit_autocast():\n        strip_overloads(fx_g)\n        for node in fx_g.graph.find_nodes(op='call_function', target=torch.ops.aten._to_copy):\n            if len(node.args) == 1 and len(node.kwargs) == 1 and ('dtype' in node.kwargs):\n                node.target = torch.ops.aten.to\n        for node in fx_g.graph.nodes:\n            new_kwargs = {}\n            for k, v in node.kwargs.items():\n                if isinstance(v, torch.device):\n                    v = v.type\n                new_kwargs[k] = v\n            node.kwargs = new_kwargs\n        fx_g.graph.lint()\n        fx_g.recompile()\n        f = torch.jit.script(fx_g)\n        torch._C._jit_pass_remove_mutation(f.graph)\n        f = torch.jit.freeze(f.eval())\n        f = torch.jit.optimize_for_inference(f)\n        if not any((isinstance(t, torch._subclasses.FakeTensor) for t in inps)):\n            f(*inps)\n    return f",
    "docstring": "Compiles the :attr: with Torchscript compiler. .. warning:: This API is experimental and likely to change. Args: fx_g(fx.GraphModule): The input Fx graph module to be compiled. Returns: Torch scripted model.",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\compilers.py",
    "ast_data": "FunctionDef name:ts_compile arg:fx_g arg:inps arguments arg arg With Call Call For Call If BoolOp Compare Call Compare Call Compare Assign For Assign For Call If Call Assign Assign Assign Call Call Assign Call Call Assign Call Call Assign Call If Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "set_element",
    "source_code": "def set_element(root_dict: STATE_DICT_TYPE, path: OBJ_PATH, value: STATE_DICT_ITEM) -> None:\n    cur_container = cast(CONTAINER_TYPE, root_dict)\n\n    def extend_list(lst: list[STATE_DICT_ITEM], idx: int) -> None:\n        while len(lst) <= idx:\n            lst.append(None)\n    for i in range(1, len(path)):\n        prev_key = path[i - 1]\n        key = path[i]\n        def_val = cast(STATE_DICT_ITEM, {} if type(key) == str else [])\n        if isinstance(cur_container, Mapping):\n            cur_container = cast(CONTAINER_TYPE, cur_container.setdefault(prev_key, def_val))\n        else:\n            extend_list(cur_container, prev_key)\n            if cur_container[prev_key] is None:\n                cur_container[prev_key] = def_val\n            cur_container = cur_container[prev_key]\n    key = path[-1]\n    if type(key) == int:\n        extend_list(cast(list[STATE_DICT_ITEM], cur_container), key)\n    cur_container[key] = value",
    "docstring": "Set `` object path.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\_traverse.py",
    "ast_data": "FunctionDef name:set_element arg:root_dict arg:path arg:value arguments arg arg arg Assign Call FunctionDef name:extend_list arg:lst arg:idx arguments arg arg While Compare Call Call For Call Call Assign Assign Assign Call Compare Call If Call Assign Call Call Call If Compare Assign Assign Assign If Compare Call Call Call Assign"
  },
  {
    "library": "numpy",
    "name": "feature_names",
    "source_code": "def feature_names(self, names=None, force_flags=None, macros=[]):\n    assert names is None or (not isinstance(names, str) and hasattr(names, '__iter__'))\n    assert force_flags is None or isinstance(force_flags, list)\n    if names is None:\n        names = self.feature_supported.keys()\n    supported_names = set()\n    for f in names:\n        if self.feature_is_supported(f, force_flags=force_flags, macros=macros):\n            supported_names.add(f)\n    return supported_names",
    "docstring": "Returns a set of CPU feature names that supported by platform and the **C** compiler. Parameters ---------- names : sequence or None, optional Specify certain CPU features to test it against the **C** compiler. if None(default), it will test all current supported features. **Note**: feature names must be in upper-case. force_flags : list or None, optional If None(default), default compiler flags for every CPU feature will be used during the test. macros : list of tuples, optional A list of C macro definitions.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py",
    "ast_data": "FunctionDef name:feature_names arg:self arg:names arg:force_flags arg:macros arguments arg arg arg arg BoolOp Compare BoolOp Call Call BoolOp Compare Call If Compare Assign Call Assign Call For If Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "nunique",
    "source_code": "def nunique(self, dropna: bool=True) -> DataFrame:\n    return self._apply_to_column_groupbys(lambda sgb: sgb.nunique(dropna))",
    "docstring": "Return DataFrame with counts of unique elements in each position. Parameters ---------- dropna : bool, default True Don't include NaN in the counts. Returns ------- nunique: DataFrame Counts of unique elements in each position. See Also -------- DataFrame.nunique : Count number of distinct elements in specified axis. Examples -------- >>> df = pd.DataFrame( ... { ... \"id\": [\"spam\", \"egg\", \"egg\", \"spam\", \"ham\", \"ham\"], ... \"value1\": [1, 5, 5, 2, 5, 5], ... \"value2\": list(\"abbaxy\"), ... } ... ) >>> df id value1 value2 0 spam 1 a 1 egg 5 b 2 egg 5 b 3 spam 2 a 4 ham 5 x 5 ham 5 y >>> df.groupby(\"id\").nunique() value1 value2 id egg 1 1 ham 1 2 spam 2 1 Check for rows with the same id but conflicting values: >>> df.groupby(\"id\").filter(lambda g: (g.nunique() > 1).any()) id value1 value2 0 spam 1 a 3 spam 2 a 4 ham 5 x 5 ham 5 y",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\groupby\\generic.py",
    "ast_data": "FunctionDef name:nunique arg:self arg:dropna arguments arg arg Return return:yes Call arguments arg Call"
  },
  {
    "library": "seaborn",
    "name": "_lookup_single",
    "source_code": "def _lookup_single(self, key):\n    return self.lookup_table[key]",
    "docstring": "Apply the mapping to a single data value.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_base.py",
    "ast_data": "FunctionDef name:_lookup_single arg:self arg:key arguments arg arg Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "delete",
    "source_code": "def delete(self):\n    raise NotImplementedError",
    "docstring": "Remove ALL cached variants of the current resource.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\caching.py",
    "ast_data": "FunctionDef name:delete arg:self arguments arg Raise"
  },
  {
    "library": "pytorch",
    "name": "preview_type_promotion",
    "source_code": "@abc.abstractmethod\ndef preview_type_promotion(self, args: tuple, kwargs: dict) -> TypePromotionSnapshot:\n    ...",
    "docstring": "Preview type promotion results for provided set of args and kwargs. Returns a TypePromotionSnapshot object that contains the promoted dtypes for the arguments and the expected output dtype.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\type_promotion.py",
    "ast_data": "FunctionDef name:preview_type_promotion arg:self arg:args arg:kwargs arguments arg arg arg"
  },
  {
    "library": "matplotlib",
    "name": "set_axis_direction",
    "source_code": "def set_axis_direction(self, axis_direction):\n    self.major_ticklabels.set_axis_direction(axis_direction)\n    self.label.set_axis_direction(axis_direction)\n    self._axis_direction = axis_direction\n    if axis_direction in ['left', 'top']:\n        self.set_ticklabel_direction('-')\n        self.set_axislabel_direction('-')\n    else:\n        self.set_ticklabel_direction('+')\n        self.set_axislabel_direction('+')",
    "docstring": "Adjust the direction, text angle, and text alignment of tick labels and axis labels following the Matplotlib convention for the rectangle axes. The *axis_direction* must be one of [left, right, bottom, top]. ===================== ========== ========= ========== ========== Property left bottom right top ===================== ========== ========= ========== ========== ticklabel direction \"-\" \"+\" \"+\" \"-\" axislabel direction \"-\" \"+\" \"+\" \"-\" ticklabel angle 90 0 -90 180 ticklabel va center baseline center baseline ticklabel ha right center right center axislabel angle 180 0 0 180 axislabel va center top center bottom axislabel ha right center right center ===================== ========== ========= ========== ========== Note that the direction \"+\" and \"-\" are relative to the direction of the increasing coordinate. Also, the text angles are actually relative to (90 + angle of the direction to the ticklabel), which gives 0 for bottom axis. Parameters ---------- axis_direction : {\"left\", \"bottom\", \"right\", \"top\"}",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axis_artist.py",
    "ast_data": "FunctionDef name:set_axis_direction arg:self arg:axis_direction arguments arg arg Call Call Assign If Compare Call Call Call Call"
  },
  {
    "library": "numpy",
    "name": "wrap_unlinkable_objects",
    "source_code": "def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir):\n    if self.c_compiler.compiler_type == 'msvc':\n        archives = []\n        plain_objects = []\n        for obj in objects:\n            if obj.lower().endswith('.a'):\n                archives.append(obj)\n            else:\n                plain_objects.append(obj)\n        chained_libs = []\n        chained_dlls = []\n        for archive in archives[::-1]:\n            lib, dll = self._link_wrapper_lib([archive], output_dir, extra_dll_dir, chained_dlls=chained_dlls, is_archive=True)\n            chained_libs.insert(0, lib)\n            chained_dlls.insert(0, dll)\n        if not plain_objects:\n            return chained_libs\n        lib, dll = self._link_wrapper_lib(plain_objects, output_dir, extra_dll_dir, chained_dlls=chained_dlls, is_archive=False)\n        return [lib] + chained_libs\n    else:\n        raise ValueError('Unsupported C compiler')",
    "docstring": "Convert a set of object files that are not compatible with the default linker, to a file that is compatible.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\fcompiler\\gnu.py",
    "ast_data": "FunctionDef name:wrap_unlinkable_objects arg:self arg:objects arg:output_dir arg:extra_dll_dir arguments arg arg arg arg If Compare Assign Assign For If Call Call Call Call Assign Assign For Assign Call Call Call If Return return:yes Assign Call Return return:yes Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_create_mlir_loc",
    "source_code": "def _create_mlir_loc(self, loc):\n    if loc is not None and loc.loc.filename:\n        file_name = os.path.basename(loc.loc.filename)\n        return 'loc(\"{}\":{}:{})'.format(file_name, loc.loc.lineno, loc.loc.col_offset)\n    else:\n        return 'loc(unknown)'",
    "docstring": "Creates mlir location from autograph ORIGIN value. Args: loc: OriginInfo Returns: A serialized mlir location string.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\tfr\\python\\tfr_gen.py",
    "ast_data": "FunctionDef name:_create_mlir_loc arg:self arg:loc arguments arg arg If BoolOp Compare Assign Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_Int64Codec",
    "source_code": "class _Int64Codec:\n\n    def can_encode(self, pyobj):\n        return not isinstance(pyobj, bool) and isinstance(pyobj, int)\n\n    def do_encode(self, int_value, encode_fn):\n        del encode_fn\n        value = struct_pb2.StructuredValue()\n        value.int64_value = int_value\n        return value\n\n    def can_decode(self, value):\n        return value.HasField('int64_value')\n\n    def do_decode(self, value, decode_fn):\n        del decode_fn\n        return int(value.int64_value)",
    "docstring": "Codec for Python integers (limited to 64 bit values).",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\nested_structure_coder.py",
    "ast_data": "ClassDef name:_Int64Codec FunctionDef name:can_encode arg:self arg:pyobj arguments arg arg Return return:yes BoolOp Call Call FunctionDef name:do_encode arg:self arg:int_value arg:encode_fn arguments arg arg arg Assign Call Assign Return return:yes FunctionDef name:can_decode arg:self arg:value arguments arg arg Return return:yes Call FunctionDef name:do_decode arg:self arg:value arg:decode_fn arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pygame",
    "name": "resource_exists",
    "source_code": "def resource_exists(_package_or_requirement, _resource_name):\n    return False",
    "docstring": "A stub for when we fail to import this function. :return: Always returns False",
    "type": "function",
    "file_path": "pygame\\src_py\\pkgdata.py",
    "ast_data": "FunctionDef name:resource_exists arg:_package_or_requirement arg:_resource_name arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_summarize_eager",
    "source_code": "def _summarize_eager(tensor, summarize=None):\n    if summarize is None:\n        summarize = 3\n    elif summarize < 0:\n        summarize = array_ops.size(tensor)\n    if tensor._rank():\n        flat = tensor.numpy().reshape((-1,))\n        lst = [str(x) for x in flat[:summarize]]\n        if len(lst) < flat.size:\n            lst.append('...')\n    elif gen_math_ops.not_equal(summarize, 0):\n        lst = [str(tensor.numpy())]\n    else:\n        lst = []\n    return ', '.join(lst)",
    "docstring": "Returns a summarized string representation of eager . Args: tensor: EagerTensor to summarize summarize: Include these many first elements of",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_assert.py",
    "ast_data": "FunctionDef name:_summarize_eager arg:tensor arg:summarize arguments arg arg If Compare Assign If Compare Assign Call If Call Assign Call Call Assign Call If Compare Call Call If Call Assign Call Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "executions",
    "source_code": "def executions(self, digest=False, begin=None, end=None):\n    digests = self._execution_digests\n    if begin is not None or end is not None:\n        begin = begin or 0\n        end = end or len(digests)\n        digests = digests[begin:end]\n    if digest:\n        return digests\n    else:\n        return [self.read_execution(digest) for digest in digests]",
    "docstring": "Get s or s this reader has read so far. Args: digest: Whether the results are returned in a digest form, i.e., format, instead of the more detailed format. begin: Optional beginning index for the requested execution data objects or their digests. Python-style negative indices are supported. end: Optional ending index for the requested execution data objects or their digests. Python-style negative indices are supported. Returns: If : a of objects. Else: a of objects.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py",
    "ast_data": "FunctionDef name:executions arg:self arg:digest arg:begin arg:end arguments arg arg arg arg Assign If BoolOp Compare Compare Assign BoolOp Assign BoolOp Call Assign If Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_get_optimizer_constructor",
    "source_code": "def _get_optimizer_constructor(self, optimizer_class: Any) -> Any:\n    functional_optims = functional_optim_map.values()\n    if not self._overlap_with_ddp:\n        if optimizer_class in functional_optims:\n            raise ValueError(f'Passing in a functional optimizer {optimizer_class} when `overlap_with_ddp=False`')\n        else:\n            return optimizer_class\n    elif optimizer_class in functional_optims:\n        return optimizer_class\n    elif optimizer_class in functional_optim_map:\n        optim_constructor = functional_optim_map[optimizer_class]\n        logger.info('Using the functional optimizer %s instead of %s since `overlap_with_ddp=True`', optim_constructor, optimizer_class)\n        return optim_constructor\n    else:\n        raise ValueError(f'Using `ddp_with_overlap=True` requires using a functional optimizer, but there is no supported functional optimizer equivalent for {optimizer_class}')",
    "docstring": "Return the optimizer constructor using validation and transformation depending on `` is a functional optimizer.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\optim\\zero_redundancy_optimizer.py",
    "ast_data": "FunctionDef name:_get_optimizer_constructor arg:self arg:optimizer_class arguments arg arg Assign Call If If Compare Raise Call Return return:yes If Compare Return return:yes If Compare Assign Call Return return:yes Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "handle",
    "source_code": "def handle(self, args, kwargs):\n    return self.NOT_SUPPORTED",
    "docstring": "Handle this dispatcher's operation with the specified arguments. If this operation dispatcher can handle the given arguments, then return an appropriate value (or raise an appropriate exception). Args: args: The arguments to the operation. kwargs: They keyword arguments to the operation. Returns: The result of the operation, or if this dispatcher can not handle the given arguments.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\util\\dispatch.py",
    "ast_data": "FunctionDef name:handle arg:self arg:args arg:kwargs arguments arg arg arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_dispatch_frame_op",
    "source_code": "def _dispatch_frame_op(self, right, func: Callable, axis: AxisInt | None=None) -> DataFrame:\n    array_op = ops.get_array_op(func)\n    right = lib.item_from_zerodim(right)\n    if not is_list_like(right):\n        bm = self._mgr.apply(array_op, right=right)\n        return self._constructor_from_mgr(bm, axes=bm.axes)\n    elif isinstance(right, DataFrame):\n        assert self.index.equals(right.index)\n        assert self.columns.equals(right.columns)\n        bm = self._mgr.operate_blockwise(right._mgr, array_op)\n        return self._constructor_from_mgr(bm, axes=bm.axes)\n    elif isinstance(right, Series) and axis == 1:\n        assert right.index.equals(self.columns)\n        right = right._values\n        assert not isinstance(right, np.ndarray)\n        arrays = [array_op(_left, _right) for _left, _right in zip(self._iter_column_arrays(), right)]\n    elif isinstance(right, Series):\n        assert right.index.equals(self.index)\n        right = right._values\n        arrays = [array_op(left, right) for left in self._iter_column_arrays()]\n    else:\n        raise NotImplementedError(right)\n    return type(self)._from_arrays(arrays, self.columns, self.index, verify_integrity=False)",
    "docstring": "Evaluate the frame operation func(left, right) by evaluating column-by-column, dispatching to the Series implementation. Parameters ---------- right : scalar, Series, or DataFrame func : arithmetic or comparison operator axis : {None, 0, 1} Returns ------- DataFrame Notes ----- Caller is responsible for setting np.errstate where relevant.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:_dispatch_frame_op arg:self arg:right arg:func arg:axis arguments arg arg arg arg Assign Call Assign Call If Call Assign Call Return return:yes Call If Call Call Call Assign Call Return return:yes Call If BoolOp Call Compare Call Assign Call Assign Call Call Call If Call Call Assign Assign Call Call Raise Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "load_source",
    "source_code": "def load_source(source_file_path):\n    if os.path.isfile(source_file_path):\n        with open(source_file_path, 'rb') as f:\n            source_text = f.read().decode('utf-8')\n        source_lines = source_text.split('\\n')\n    else:\n        source_lines = _try_load_par_source(source_file_path)\n        if source_lines is None:\n            raise IOError('Source path neither exists nor can be loaded as a .par file: %s' % source_file_path)\n    line_num_width = int(np.ceil(np.log10(len(source_lines)))) + 3\n    return (source_lines, line_num_width)",
    "docstring": "Load the content of a Python source code file. This function covers the following case: 1. source_file_path points to an existing Python (.py) file on the file system. 2. source_file_path is a path within a .par file (i.e., a zip-compressed, self-contained Python executable). Args: source_file_path: Path to the Python source file to read. Returns: A length-2 tuple: - Lines of the source file, as a of s. - The width of the string needed to show the line number in the file. This is calculated based on the number of lines in the source file. Raises: IOError: if loading is unsuccessful.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\source_utils.py",
    "ast_data": "FunctionDef name:load_source arg:source_file_path arguments arg If Call With Call Assign Call Call Assign Call Assign Call If Compare Raise Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "LinearRgbToRgb",
    "source_code": "class LinearRgbToRgb(Module):\n    ONNX_DEFAULT_INPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n    ONNX_DEFAULT_OUTPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n\n    def forward(self, image: Tensor) -> Tensor:\n        return linear_rgb_to_rgb(image)",
    "docstring": "Convert a linear RGB image to sRGB. Applies gamma correction to linear RGB values, at the end of colorspace conversions, to get sRGB. Returns: sRGB version of the image. Shape: - image: :math: - output: :math: Example: >>> input = torch.rand(2, 3, 4, 5) >>> srgb = LinearRgbToRgb() >>> output = srgb(input) # 2x3x4x5 References: [1] [2] [3]",
    "type": "class",
    "file_path": "kornia\\kornia\\color\\rgb.py",
    "ast_data": "ClassDef name:LinearRgbToRgb FunctionDef name:forward arg:self arg:image arguments arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit_intercept_only",
    "source_code": "def fit_intercept_only(self, y_true, sample_weight=None):\n    if sample_weight is None:\n        return np.median(y_true, axis=0)\n    else:\n        return _weighted_percentile(y_true, sample_weight, 50)",
    "docstring": "Compute raw_prediction of an intercept-only model. This is the weighted median of the target, i.e. over the samples axis=0.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\_loss\\loss.py",
    "ast_data": "FunctionDef name:fit_intercept_only arg:self arg:y_true arg:sample_weight arguments arg arg arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_SubprocExceptionInfo",
    "source_code": "class _SubprocExceptionInfo:\n\n    def __init__(self, details: str) -> None:\n        self.details = details",
    "docstring": "Carries exception info from subprocesses across the wire. traceback objects are not pickleable, so we store the trace as a string and use it for the message in the exception thrown in the main process.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\compile_worker\\subproc_pool.py",
    "ast_data": "ClassDef name:_SubprocExceptionInfo FunctionDef name:__init__ arg:self arg:details arguments arg arg Assign"
  },
  {
    "library": "pandas",
    "name": "_update_strl_names",
    "source_code": "def _update_strl_names(self) -> None:\n    pass",
    "docstring": "No-op, forward compatibility",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\stata.py",
    "ast_data": "FunctionDef name:_update_strl_names arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "_composition_must_be_self_adjoint",
    "source_code": "def _composition_must_be_self_adjoint(operators):\n    if len(operators) == 1 and operators[0].is_self_adjoint:\n        return True\n    if linear_operator_util.is_aat_form(operators):\n        return True\n    return False",
    "docstring": "Runs some checks to see if composition operators must be SA. Args: operators: List of LinearOperators. Returns: True if the composition must be SA. False if it is not SA OR if we did not determine whether the composition is SA.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_composition.py",
    "ast_data": "FunctionDef name:_composition_must_be_self_adjoint arg:operators arguments arg If BoolOp Compare Call Return return:yes If Call Return return:yes Return return:yes"
  },
  {
    "library": "pandas",
    "name": "set_test_mode",
    "source_code": "def set_test_mode(v: bool=True) -> None:\n    global _TEST_MODE, _TEST_RESULT\n    _TEST_MODE = v\n    _TEST_RESULT = []",
    "docstring": "Keeps track of whether numexpr was used. Stores an additional ``.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\computation\\expressions.py",
    "ast_data": "FunctionDef name:set_test_mode arg:v arguments arg Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "LambertTransform",
    "source_code": "class LambertTransform(_GeoTransform):\n\n    def __init__(self, center_longitude, center_latitude, resolution):\n        _GeoTransform.__init__(self, resolution)\n        self._center_longitude = center_longitude\n        self._center_latitude = center_latitude\n\n    def transform_non_affine(self, values):\n        longitude, latitude = values.T\n        clong = self._center_longitude\n        clat = self._center_latitude\n        cos_lat = np.cos(latitude)\n        sin_lat = np.sin(latitude)\n        diff_long = longitude - clong\n        cos_diff_long = np.cos(diff_long)\n        inner_k = np.maximum(1 + np.sin(clat) * sin_lat + np.cos(clat) * cos_lat * cos_diff_long, 1e-15)\n        k = np.sqrt(2 / inner_k)\n        x = k * cos_lat * np.sin(diff_long)\n        y = k * (np.cos(clat) * sin_lat - np.sin(clat) * cos_lat * cos_diff_long)\n        return np.column_stack([x, y])\n\n    def inverted(self):\n        return LambertAxes.InvertedLambertTransform(self._center_longitude, self._center_latitude, self._resolution)",
    "docstring": "The base Lambert transform.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\geo.py",
    "ast_data": "ClassDef name:LambertTransform FunctionDef name:__init__ arg:self arg:center_longitude arg:center_latitude arg:resolution arguments arg arg arg arg Call Assign Assign FunctionDef name:transform_non_affine arg:self arg:values arguments arg arg Assign Assign Assign Assign Call Assign Call Assign Assign Call Assign Call Call Call Assign Call Assign Call Assign Call Call Return return:yes Call FunctionDef name:inverted arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "xyann",
    "source_code": "@property\ndef xyann(self):\n    return self.get_position()",
    "docstring": "The text position. See also *xytext* in .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:xyann arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "KeyNetHardNet",
    "source_code": "class KeyNetHardNet(LocalFeature):\n\n    def __init__(self, num_features: int=8000, upright: bool=False, device: Optional[Device]=None, scale_laf: float=1.0) -> None:\n        if device is None:\n            device = torch.device('cpu')\n        ori_module = PassLAF() if upright else LAFOrienter(angle_detector=OriNet(True))\n        detector = KeyNetDetector(True, num_features=num_features, ori_module=ori_module).to(device)\n        descriptor = LAFDescriptor(None, patch_size=32, grayscale_descriptor=True).to(device)\n        super().__init__(detector, descriptor, scale_laf)",
    "docstring": "Convenience module, which implements KeyNet detector + HardNet descriptor.",
    "type": "class",
    "file_path": "kornia\\kornia\\feature\\integrated.py",
    "ast_data": "ClassDef name:KeyNetHardNet FunctionDef name:__init__ arg:self arg:num_features arg:upright arg:device arg:scale_laf arguments arg arg arg arg arg If Compare Assign Call Assign Call Call Call Assign Call Call Assign Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, vertices, codes=None, _interpolation_steps=1, closed=False, readonly=False):\n    vertices = _to_unmasked_float_array(vertices)\n    _api.check_shape((None, 2), vertices=vertices)\n    if codes is not None and len(vertices):\n        codes = np.asarray(codes, self.code_type)\n        if codes.ndim != 1 or len(codes) != len(vertices):\n            raise ValueError(f\"'codes' must be a 1D list or array with the same length of 'vertices'. Your vertices have shape {vertices.shape} but your codes have shape {codes.shape}\")\n        if len(codes) and codes[0] != self.MOVETO:\n            raise ValueError(f\"The first element of 'code' must be equal to 'MOVETO' ({self.MOVETO}).  Your first code is {codes[0]}\")\n    elif closed and len(vertices):\n        codes = np.empty(len(vertices), dtype=self.code_type)\n        codes[0] = self.MOVETO\n        codes[1:-1] = self.LINETO\n        codes[-1] = self.CLOSEPOLY\n    self._vertices = vertices\n    self._codes = codes\n    self._interpolation_steps = _interpolation_steps\n    self._update_values()\n    if readonly:\n        self._vertices.flags.writeable = False\n        if self._codes is not None:\n            self._codes.flags.writeable = False\n        self._readonly = True\n    else:\n        self._readonly = False",
    "docstring": "Create a new path with the given vertices and codes. Parameters ---------- vertices : (N, 2) array-like The path vertices, as an array, masked array or sequence of pairs. Masked values, if any, will be converted to NaNs, which are then handled correctly by the Agg PathIterator and other consumers of path data, such as :meth:. codes : array-like or None, optional N-length array of integers representing the codes of the path. If not None, codes must be the same length as vertices. If None, *vertices* will be treated as a series of line segments. _interpolation_steps : int, optional Used as a hint to certain projections, such as Polar, that this path should be linearly interpolated immediately before drawing. This attribute is primarily an implementation detail and is not intended for public use. closed : bool, optional If *codes* is None and closed is True, vertices will be treated as line segments of a closed polygon. Note that the last vertex will then be ignored (as the corresponding code will be set to ). readonly : bool, optional Makes the path behave in an immutable way and sets the vertices and codes as read-only arrays.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\path.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:vertices arg:codes arg:_interpolation_steps arg:closed arg:readonly arguments arg arg arg arg arg arg Assign Call Call If BoolOp Compare Call Assign Call If BoolOp Compare Compare Call Call Raise Call If BoolOp Call Compare Raise Call If BoolOp Call Assign Call Call Assign Assign Assign Assign Assign Assign Call If Assign If Compare Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "tuple_shapes",
    "source_code": "@property\ndef tuple_shapes(self):\n    return self._tuple_shapes",
    "docstring": "Returns the shapes of the InfeedQueue tuple elements.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_feed.py",
    "ast_data": "FunctionDef name:tuple_shapes arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "request_stop",
    "source_code": "def request_stop(self, ex=None):\n    self._coord.request_stop(ex=ex)",
    "docstring": "Request that the coordinator stop the threads. See . Args: ex: Optional , or Python tuple as returned by . If this is the first call to the corresponding exception is recorded and re-raised from .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py",
    "ast_data": "FunctionDef name:request_stop arg:self arg:ex arguments arg arg Call"
  },
  {
    "library": "scipy",
    "name": "_onenorm_matrix_power_nnm",
    "source_code": "def _onenorm_matrix_power_nnm(A, p):\n    if int(p) != p or p < 0:\n        raise ValueError('expected non-negative integer p')\n    p = int(p)\n    if len(A.shape) != 2 or A.shape[0] != A.shape[1]:\n        raise ValueError('expected A to be like a square matrix')\n    v = np.ones((A.shape[0], 1), dtype=float)\n    M = A.T\n    for i in range(p):\n        v = M.dot(v)\n    return np.max(v)",
    "docstring": "Compute the 1-norm of a non-negative integer power of a non-negative matrix. Parameters ---------- A : a square ndarray or matrix or sparse arrays Input matrix with non-negative entries. p : non-negative integer The power to which the matrix is to be raised. Returns ------- out : float The 1-norm of the matrix power p of A.",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_matfuncs.py",
    "ast_data": "FunctionDef name:_onenorm_matrix_power_nnm arg:A arg:p arguments arg arg If BoolOp Compare Call Compare Raise Call Assign Call If BoolOp Compare Call Compare Raise Call Assign Call Assign For Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "export",
    "source_code": "def export(self) -> dict[str, Any]:\n    return {'apply': copy.copy(self._todo), 'table_attributes': self.table_attributes, 'table_styles': copy.copy(self.table_styles), 'hide_index': all(self.hide_index_), 'hide_columns': all(self.hide_columns_), 'hide_index_names': self.hide_index_names, 'hide_column_names': self.hide_column_names, 'css': copy.copy(self.css)}",
    "docstring": "Export the styles applied to the current Styler. Can be applied to a second Styler with `` Examples -------- >>> styler = pd.DataFrame([[1, 2], [3, 4]]).style >>> styler2 = pd.DataFrame([[9, 9, 9]]).style >>> styler.hide(axis=0).highlight_max(axis=1) # doctest: +SKIP >>> export = styler.export() >>> styler2.use(export) # doctest: +SKIP",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\style.py",
    "ast_data": "FunctionDef name:export arg:self arguments arg Return return:yes Call Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "get_rows",
    "source_code": "def get_rows(self, infer_nrows: int, skiprows: set[int] | None=None) -> list[str]:\n    if skiprows is None:\n        skiprows = set()\n    buffer_rows = []\n    detect_rows = []\n    for i, row in enumerate(self.f):\n        if i not in skiprows:\n            detect_rows.append(row)\n        buffer_rows.append(row)\n        if len(detect_rows) >= infer_nrows:\n            break\n    self.buffer = iter(buffer_rows)\n    return detect_rows",
    "docstring": "Read rows from self.f, skipping as specified. We distinguish buffer_rows (the first <= infer_nrows lines) from the rows returned to detect_colspecs because it's simpler to leave the other locations with skiprows logic alone than to modify them to deal with the fact we skipped some rows here as well. Parameters ---------- infer_nrows : int Number of rows to read from self.f, not counting rows that are skipped. skiprows: set, optional Indices of rows to skip. Returns ------- detect_rows : list of str A list containing the rows to read.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\parsers\\python_parser.py",
    "ast_data": "FunctionDef name:get_rows arg:self arg:infer_nrows arg:skiprows arguments arg arg arg If Compare Assign Call Assign Assign For Call If Compare Call Call If Compare Call Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "NVHPCFCompiler",
    "source_code": "class NVHPCFCompiler(FCompiler):\n    compiler_type = 'nv'\n    description = 'NVIDIA HPC SDK'\n    version_pattern = '\\\\s*(nvfortran|.+ \\\\(aka nvfortran\\\\)) (?P<version>[\\\\d.-]+).*'\n    executables = {'version_cmd': ['<F90>', '-V'], 'compiler_f77': ['nvfortran'], 'compiler_fix': ['nvfortran', '-Mfixed'], 'compiler_f90': ['nvfortran'], 'linker_so': ['<F90>'], 'archiver': ['ar', '-cr'], 'ranlib': ['ranlib']}\n    pic_flags = ['-fpic']\n    module_dir_switch = '-module '\n    module_include_switch = '-I'\n\n    def get_flags(self):\n        opt = ['-Minform=inform', '-Mnosecond_underscore']\n        return self.pic_flags + opt\n\n    def get_flags_opt(self):\n        return ['-fast']\n\n    def get_flags_debug(self):\n        return ['-g']\n\n    def get_flags_linker_so(self):\n        return ['-shared', '-fpic']\n\n    def runtime_library_dir_option(self, dir):\n        return '-R%s' % dir",
    "docstring": "NVIDIA High Performance Computing (HPC) SDK Fortran Compiler Since august 2020 the NVIDIA HPC SDK includes the compilers formerly known as The Portland Group compilers, See also .",
    "type": "class",
    "file_path": "numpy\\numpy\\distutils\\fcompiler\\nv.py",
    "ast_data": "ClassDef name:NVHPCFCompiler Assign Assign Assign Assign Assign Assign Assign FunctionDef name:get_flags arg:self arguments arg Assign Return return:yes FunctionDef name:get_flags_opt arg:self arguments arg Return return:yes FunctionDef name:get_flags_debug arg:self arguments arg Return return:yes FunctionDef name:get_flags_linker_so arg:self arguments arg Return return:yes FunctionDef name:runtime_library_dir_option arg:self arg:dir arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "summary_writer",
    "source_code": "@property\ndef summary_writer(self):\n    return self._summary_writer",
    "docstring": "Return the SummaryWriter used by the chief supervisor. Returns: A SummaryWriter.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py",
    "ast_data": "FunctionDef name:summary_writer arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_SequentialSharder",
    "source_code": "class _SequentialSharder(_LoadBalancer):\n\n    @classmethod\n    def shard(cls, buffer: torch.Tensor, mesh: DeviceMesh, seq_dim: int) -> torch.Tensor:\n        assert buffer.size()[seq_dim] % mesh.size() == 0\n        return buffer.chunk(mesh.size(), dim=seq_dim)[mesh.get_local_rank()]\n\n    @classmethod\n    def unshard(cls, buffer: torch.Tensor, mesh: DeviceMesh, seq_dim: int) -> torch.Tensor:\n        buffer = buffer.contiguous()\n        all_buffers = [torch.empty_like(buffer) for _ in range(mesh.size())]\n        ft_c.all_gather_inplace(all_buffers, buffer, mesh)\n        return torch.cat(all_buffers, dim=seq_dim)",
    "docstring": "This load balancer chunks the buffer into cp_world_size and rank0 gets 0th shard, rank1 gets 1st shard, ... So this doesn't have any load balancing effect when using the causal masking.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\tensor\\experimental\\_attention.py",
    "ast_data": "ClassDef name:_SequentialSharder FunctionDef name:shard arg:cls arg:buffer arg:mesh arg:seq_dim arguments arg arg arg arg Compare Call Call Return return:yes Call Call Call FunctionDef name:unshard arg:cls arg:buffer arg:mesh arg:seq_dim arguments arg arg arg arg Assign Call Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "get_all_class_names",
    "source_code": "def get_all_class_names(self) -> list[str]:\n    return [fullname for _, fullname, _, _ in self.class_info]",
    "docstring": "Get all of the class names involved in the graph.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\ext\\inheritance_diagram.py",
    "ast_data": "FunctionDef name:get_all_class_names arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "sheets",
    "source_code": "@property\ndef sheets(self) -> dict[str, Any]:\n    result = {name: self.book[name] for name in self.book.sheetnames}\n    return result",
    "docstring": "Mapping of sheet names to sheet objects.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\excel\\_openpyxl.py",
    "ast_data": "FunctionDef name:sheets arg:self arguments arg Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, sess, grpc_debug_server_addresses, thread_name_filter=None, send_traceback_and_source_code=True):\n\n    def _gated_grpc_watch_fn(fetches, feeds):\n        del fetches, feeds\n        return framework.WatchOptions(debug_ops=['DebugIdentity(gated_grpc=true)'])\n    super().__init__(sess, grpc_debug_server_addresses, watch_fn=_gated_grpc_watch_fn, thread_name_filter=thread_name_filter)\n    self._send_traceback_and_source_code = send_traceback_and_source_code\n    self._sent_graph_version = -1\n    register_signal_handler()",
    "docstring": "Constructor of TensorBoardDebugWrapperSession. Args: sess: The instance to be wrapped. grpc_debug_server_addresses: gRPC address(es) of debug server(s), as a or a of s. E.g., \"localhost:2333\", \"grpc://localhost:2333\", [\"192.168.0.7:2333\", \"192.168.0.8:2333\"]. thread_name_filter: Optional filter for thread names. send_traceback_and_source_code: Whether traceback of graph elements and the source code are to be sent to the debug server(s).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\grpc_wrapper.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:sess arg:grpc_debug_server_addresses arg:thread_name_filter arg:send_traceback_and_source_code arguments arg arg arg arg arg FunctionDef name:_gated_grpc_watch_fn arg:fetches arg:feeds arguments arg arg Return return:yes Call Call Call Assign Assign Call"
  },
  {
    "library": "scrapy",
    "name": "follow",
    "source_code": "def follow(self, url: str | Link, callback: CallbackT | None=None, method: str='GET', headers: Mapping[AnyStr, Any] | Iterable[tuple[AnyStr, Any]] | None=None, body: bytes | str | None=None, cookies: CookiesT | None=None, meta: dict[str, Any] | None=None, encoding: str | None='utf-8', priority: int=0, dont_filter: bool=False, errback: Callable[[Failure], Any] | None=None, cb_kwargs: dict[str, Any] | None=None, flags: list[str] | None=None) -> Request:\n    if encoding is None:\n        raise ValueError(\"encoding can't be None\")\n    if isinstance(url, Link):\n        url = url.url\n    elif url is None:\n        raise ValueError(\"url can't be None\")\n    url = self.urljoin(url)\n    return Request(url=url, callback=callback, method=method, headers=headers, body=body, cookies=cookies, meta=meta, encoding=encoding, priority=priority, dont_filter=dont_filter, errback=errback, cb_kwargs=cb_kwargs, flags=flags)",
    "docstring": "Return a :class: instance to follow a link `~scrapy.link.Link~.TextResponse~.TextResponse.follow` method which supports selectors in addition to absolute/relative URLs and Link objects. .. versionadded:: 2.0 The *flags* parameter.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\http\\response\\__init__.py",
    "ast_data": "FunctionDef name:follow arg:self arg:url arg:callback arg:method arg:headers arg:body arg:cookies arg:meta arg:encoding arg:priority arg:dont_filter arg:errback arg:cb_kwargs arg:flags arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg If Compare Raise Call If Call Assign If Compare Raise Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "disable_observer",
    "source_code": "def disable_observer(mod):\n    if isinstance(mod, FakeQuantizeBase) or _is_fake_quant_script_module(mod):\n        mod.disable_observer()",
    "docstring": "Disable observation for this module. Disable observation for this module, if applicable. Example usage:: # model is any PyTorch model model.apply(torch.ao.quantization.disable_observer)",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fake_quantize.py",
    "ast_data": "FunctionDef name:disable_observer arg:mod arguments arg If BoolOp Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "height",
    "source_code": "@property\ndef height(self) -> int:\n    height = 0\n    while self.table.cells[self.row + height, self.col] == self.cell_id:\n        height += 1\n    return height",
    "docstring": "Returns the cell height.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\writers\\latex.py",
    "ast_data": "FunctionDef name:height arg:self arguments arg Assign While Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "skip_summary",
    "source_code": "def skip_summary():\n    replica_context = distribute_lib.get_replica_context()\n    if not replica_context:\n        return False\n    replica_id = replica_context.replica_id_in_sync_group\n    if isinstance(replica_id, tensor.Tensor):\n        replica_id = tensor_util.constant_value(replica_id)\n    return replica_id and replica_id > 0",
    "docstring": "Determines if summary should be skipped. If using multiple replicas in distributed strategy, skip summaries on all replicas except the first one (replica_id=0). Returns: True if the summary is skipped; False otherwise.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\summary_op_util.py",
    "ast_data": "FunctionDef name:skip_summary arguments Assign Call If Return return:yes Assign If Call Assign Call Return return:yes BoolOp Compare"
  },
  {
    "library": "tensorflow",
    "name": "_replica_id",
    "source_code": "@property\ndef _replica_id(self):\n    return self._local_replica_id",
    "docstring": "This is the local replica id in a given sync group.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:_replica_id arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "build",
    "source_code": "def build(self):\n\n    def _create_per_worker_dataset():\n        dataset = self._dataset_fn()\n        return dataset\n    per_worker_dataset = self._coordinator._create_per_worker_resources(_create_per_worker_dataset)\n    dataset_fn_output_type_spec = self._dataset_fn.structured_outputs._type_spec\n    for dataset_remote_value in per_worker_dataset._values:\n        dataset_remote_value._type_spec = dataset_fn_output_type_spec\n    return per_worker_dataset",
    "docstring": "Trigger dataset creation on workers without creating an iterator. Returns: A PerWorkerValues object containing a tuple of RemoteValues, themselves containing the built Dataset for each worker",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\values.py",
    "ast_data": "FunctionDef name:build arg:self arguments arg FunctionDef name:_create_per_worker_dataset arguments Assign Call Return return:yes Assign Call Assign For Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_MirroredSaveable",
    "source_code": "class _MirroredSaveable(saveable_object.SaveableObject):\n\n    def __init__(self, mirrored_variable, primary_variable, name):\n        self._mirrored_variable = mirrored_variable\n        tensor, spec = values_util.get_on_write_saveable(self._mirrored_variable, primary_variable, name)\n        super(_MirroredSaveable, self).__init__(tensor, spec, name)\n\n    def restore(self, restored_tensors, restored_shapes):\n        tensor, = restored_tensors\n        return values_util.get_on_write_restore_ops(self._mirrored_variable, tensor)",
    "docstring": "Class for defining how to restore a MirroredVariable.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py",
    "ast_data": "ClassDef name:_MirroredSaveable FunctionDef name:__init__ arg:self arg:mirrored_variable arg:primary_variable arg:name arguments arg arg arg arg Assign Assign Call Call Call FunctionDef name:restore arg:self arg:restored_tensors arg:restored_shapes arguments arg arg arg Assign Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "md5_hex",
    "source_code": "def md5_hex(s):\n    return md5(ntob(s, 'utf-8')).hexdigest()",
    "docstring": "Return hexdigest of md5sum.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\auth_digest.py",
    "ast_data": "FunctionDef name:md5_hex arg:s arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_drv2_moment",
    "source_code": "def _drv2_moment(self, n, *args):\n\n    def fun(x):\n        return np.power(x, n) * self._pmf(x, *args)\n    _a, _b = self._get_support(*args)\n    return _expect(fun, _a, _b, self._ppf(0.5, *args), self.inc)",
    "docstring": "Non-central moment of discrete distribution.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:_drv2_moment arg:self arg:n arguments arg arg arg FunctionDef name:fun arg:x arguments arg Return return:yes Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "wkt",
    "source_code": "@property\ndef wkt(self):\n    return capi.to_wkt(self.ptr, byref(c_char_p()))",
    "docstring": "Return the WKT representation of this Spatial Reference.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\srs.py",
    "ast_data": "FunctionDef name:wkt arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "check_number_of_labels",
    "source_code": "def check_number_of_labels(n_labels, n_samples):\n    if not 1 < n_labels < n_samples:\n        raise ValueError('Number of labels is %d. Valid values are 2 to n_samples - 1 (inclusive)' % n_labels)",
    "docstring": "Check that number of labels are valid. Parameters ---------- n_labels : int Number of labels. n_samples : int Number of samples.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\metrics\\cluster\\_unsupervised.py",
    "ast_data": "FunctionDef name:check_number_of_labels arg:n_labels arg:n_samples arguments arg arg If Compare Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_convert_fields",
    "source_code": "def _convert_fields(fields, field_values, context):\n    converted = {}\n    if len(fields) != len(field_values):\n        _report_field_mismatches(fields, field_values)\n    for field in fields:\n        if field.name not in field_values:\n            _report_field_mismatches(fields, field_values)\n        field_value = field_values[field.name]\n        converted[field.name] = _convert_value(field_value, field.value_type, (field.name,), context)\n    field_values.update(converted)",
    "docstring": "Type-checks and converts each field in (in place). Args: fields: A list of objects. field_values: A mapping field names to values. Must contain an entry for each field. I.e., must be equal to . context: _ConversionContext, indicates what kind of value we are converting. Raises: ValueError: If the keys of do not match the names of the fields in . TypeError: If any value in does not have the type indicated by the corresponding object.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type_field.py",
    "ast_data": "FunctionDef name:_convert_fields arg:fields arg:field_values arg:context arguments arg arg arg Assign If Compare Call Call Call For If Compare Call Assign Assign Call Call"
  },
  {
    "library": "matplotlib",
    "name": "demo",
    "source_code": "@staticmethod\ndef demo():\n    import numpy as np\n    import matplotlib.pyplot as plt\n\n    def plot_angle(ax, x, y, angle, style):\n        phi = np.radians(angle)\n        xx = [x + 0.5, x, x + 0.5 * np.cos(phi)]\n        yy = [y, y, y + 0.5 * np.sin(phi)]\n        ax.plot(xx, yy, lw=12, color='tab:blue', solid_joinstyle=style)\n        ax.plot(xx, yy, lw=1, color='black')\n        ax.plot(xx[1], yy[1], 'o', color='tab:red', markersize=3)\n    fig, ax = plt.subplots(figsize=(5, 4), constrained_layout=True)\n    ax.set_title('Join style')\n    for x, style in enumerate(['miter', 'round', 'bevel']):\n        ax.text(x, 5, style)\n        for y, angle in enumerate([20, 45, 60, 90, 120]):\n            plot_angle(ax, x, y, angle, style)\n            if x == 0:\n                ax.text(-1.3, y, f'{angle} degrees')\n    ax.set_xlim(-1.5, 2.75)\n    ax.set_ylim(-0.5, 5.5)\n    ax.set_axis_off()\n    fig.show()",
    "docstring": "Demonstrate how each JoinStyle looks for various join angles.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_enums.py",
    "ast_data": "FunctionDef name:demo arguments FunctionDef name:plot_angle arg:ax arg:x arg:y arg:angle arg:style arguments arg arg arg arg arg Assign Call Assign Call Assign Call Call Call Call Assign Call Call For Call Call For Call Call If Compare Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "create_node_from_old_node_preserve_meta",
    "source_code": "def create_node_from_old_node_preserve_meta(quantized_graph: Graph, create_node_args: tuple[Any, ...], old_node: Node) -> Node:\n    new_node = quantized_graph.create_node(*create_node_args)\n    new_node.stack_trace = old_node.stack_trace\n    return new_node",
    "docstring": "Creates and copies the necessary metadata to it from .",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\utils.py",
    "ast_data": "FunctionDef name:create_node_from_old_node_preserve_meta arg:quantized_graph arg:create_node_args arg:old_node arguments arg arg arg Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "range_pop",
    "source_code": "def range_pop():\n    return _itt.rangePop()",
    "docstring": "Pops a range off of a stack of nested range spans. Returns the zero-based depth of the range that is ended.",
    "type": "function",
    "file_path": "pytorch\\torch\\profiler\\itt.py",
    "ast_data": "FunctionDef name:range_pop arguments Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_clear_cached_properties",
    "source_code": "def _clear_cached_properties(self, setting, **kwargs):\n    if setting == 'MEDIA_ROOT':\n        self.__dict__.pop('base_location', None)\n        self.__dict__.pop('location', None)\n    elif setting == 'MEDIA_URL':\n        self.__dict__.pop('base_url', None)\n    elif setting == 'FILE_UPLOAD_PERMISSIONS':\n        self.__dict__.pop('file_permissions_mode', None)\n    elif setting == 'FILE_UPLOAD_DIRECTORY_PERMISSIONS':\n        self.__dict__.pop('directory_permissions_mode', None)",
    "docstring": "Reset setting based property values.",
    "type": "method",
    "file_path": "django\\django\\core\\files\\storage\\mixins.py",
    "ast_data": "FunctionDef name:_clear_cached_properties arg:self arg:setting arguments arg arg arg If Compare Call Call If Compare Call If Compare Call If Compare Call"
  },
  {
    "library": "pytorch",
    "name": "size_based_auto_wrap_policy",
    "source_code": "def size_based_auto_wrap_policy(module: nn.Module, recurse: bool, nonwrapped_numel: int, min_num_params: int=int(100000000.0), force_leaf_modules: Optional[set[type[nn.Module]]]=None, exclude_wrap_modules: Optional[set[type[nn.Module]]]=None) -> bool:\n    force_leaf_modules = size_based_auto_wrap_policy.FORCE_LEAF_MODULES if force_leaf_modules is None else force_leaf_modules\n    exclude_wrap_modules = size_based_auto_wrap_policy.EXCLUDE_WRAP_MODULES if exclude_wrap_modules is None else exclude_wrap_modules\n    min_nonwrapped_numel = min_num_params\n    is_large = nonwrapped_numel >= min_nonwrapped_numel\n    if recurse:\n        return is_large and (not isinstance(module, tuple(force_leaf_modules)))\n    else:\n        return is_large and (not isinstance(module, tuple(exclude_wrap_modules)))",
    "docstring": "A size-based auto wrap policy. Args: module (nn.Module): Current module being considered. recurse (bool): If `` should be wrapped.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\wrap.py",
    "ast_data": "FunctionDef name:size_based_auto_wrap_policy arg:module arg:recurse arg:nonwrapped_numel arg:min_num_params arg:force_leaf_modules arg:exclude_wrap_modules arguments arg arg arg arg arg arg Call Assign Compare Assign Compare Assign Assign Compare If Return return:yes BoolOp Call Call Return return:yes BoolOp Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_fix_connectivity",
    "source_code": "def _fix_connectivity(X, connectivity, affinity):\n    n_samples = X.shape[0]\n    if connectivity.shape[0] != n_samples or connectivity.shape[1] != n_samples:\n        raise ValueError('Wrong shape for connectivity matrix: %s when X is %s' % (connectivity.shape, X.shape))\n    connectivity = connectivity + connectivity.T\n    if not sparse.issparse(connectivity):\n        connectivity = sparse.lil_matrix(connectivity)\n    if connectivity.format != 'lil':\n        connectivity = connectivity.tolil()\n    n_connected_components, labels = connected_components(connectivity)\n    if n_connected_components > 1:\n        warnings.warn('the number of connected components of the connectivity matrix is %d > 1. Completing it to avoid stopping the tree early.' % n_connected_components, stacklevel=2)\n        connectivity = _fix_connected_components(X=X, graph=connectivity, n_connected_components=n_connected_components, component_labels=labels, metric=affinity, mode='connectivity')\n    return (connectivity, n_connected_components)",
    "docstring": "Fixes the connectivity matrix. The different steps are: - copies it - makes it symmetric - converts it to LIL if necessary - completes it if necessary. Parameters ---------- X : array-like of shape (n_samples, n_features) Feature matrix representing samples to be clustered. connectivity : sparse matrix, default=None Connectivity matrix. Defines for each sample the neighboring samples following a given structure of the data. The matrix is assumed to be symmetric and only the upper triangular half is used. Default is , i.e, the Ward algorithm is unstructured. affinity : {\"euclidean\", \"precomputed\"}, default=\"euclidean\" Which affinity to use. At the moment and `euclidean` uses the negative squared Euclidean distance between points. Returns ------- connectivity : sparse matrix The fixed connectivity matrix. n_connected_components : int The number of connected components in the graph.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\cluster\\_agglomerative.py",
    "ast_data": "FunctionDef name:_fix_connectivity arg:X arg:connectivity arg:affinity arguments arg arg arg Assign If BoolOp Compare Compare Raise Call Assign If Call Assign Call If Compare Assign Call Assign Call If Compare Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "synchronize",
    "source_code": "def synchronize() -> None:\n    return torch._C._mps_deviceSynchronize()",
    "docstring": "Waits for all kernels in all streams on a MPS device to complete.",
    "type": "function",
    "file_path": "pytorch\\torch\\mps\\__init__.py",
    "ast_data": "FunctionDef name:synchronize arguments Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_CachedClassProperty",
    "source_code": "class _CachedClassProperty(object):\n\n    def __init__(self, func):\n        self._func = func\n        self._cache = {}\n\n    def __get__(self, obj, objtype):\n        if objtype not in self._cache:\n            self._cache[objtype] = self._func(objtype)\n        return self._cache[objtype]\n\n    def __set__(self, obj, value):\n        raise AttributeError('property %s is read-only' % self._func.__name__)\n\n    def __delete__(self, obj):\n        raise AttributeError('property %s is read-only' % self._func.__name__)",
    "docstring": "Cached class property decorator. Transforms a class method into a property whose value is computed once and then cached as a normal attribute for the life of the class. Example usage: >>> class MyClass(object): ... @cached_classproperty ... def value(cls): ... print(\"Computing value\") ... return '' % cls.__name__ >>> class MySubclass(MyClass): ... pass >>> MyClass.value Computing value '' >>> MyClass.value # uses cached value '' >>> MySubclass.value Computing value '' This decorator is similar to , but it adds a property to the class, not to individual instances.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\util\\decorator_utils.py",
    "ast_data": "ClassDef name:_CachedClassProperty FunctionDef name:__init__ arg:self arg:func arguments arg arg Assign Assign FunctionDef name:__get__ arg:self arg:obj arg:objtype arguments arg arg arg If Compare Assign Call Return return:yes FunctionDef name:__set__ arg:self arg:obj arg:value arguments arg arg arg Raise Call FunctionDef name:__delete__ arg:self arg:obj arguments arg arg Raise Call"
  },
  {
    "library": "pygame",
    "name": "_parse_font_entry_win",
    "source_code": "def _parse_font_entry_win(name, font, fonts):\n    true_type_suffix = '(TrueType)'\n    mods = ('demibold', 'narrow', 'light', 'unicode', 'bt', 'mt')\n    if name.endswith(true_type_suffix):\n        name = name.rstrip(true_type_suffix).rstrip()\n    name = name.lower().split()\n    bold = italic = False\n    for mod in mods:\n        if mod in name:\n            name.remove(mod)\n    if 'bold' in name:\n        name.remove('bold')\n        bold = True\n    if 'italic' in name:\n        name.remove('italic')\n        italic = True\n    name = ''.join(name)\n    name = _simplename(name)\n    _addfont(name, bold, italic, font, fonts)",
    "docstring": "Parse out a simpler name and the font style from the initial file name. :param name: The font name :param font: The font file path :param fonts: The pygame font dictionary",
    "type": "function",
    "file_path": "pygame\\src_py\\sysfont.py",
    "ast_data": "FunctionDef name:_parse_font_entry_win arg:name arg:font arg:fonts arguments arg arg arg Assign Assign If Call Assign Call Call Assign Call Call Assign For If Compare Call If Compare Call Assign If Compare Call Assign Assign Call Assign Call Call"
  },
  {
    "library": "pandas",
    "name": "flatten",
    "source_code": "def flatten(self) -> Series:\n    from pandas import Series\n    counts = pa.compute.list_value_length(self._pa_array)\n    flattened = pa.compute.list_flatten(self._pa_array)\n    index = self._data.index.repeat(counts.fill_null(pa.scalar(0, counts.type)))\n    return Series(flattened, dtype=ArrowDtype(flattened.type), index=index, name=self._data.name)",
    "docstring": "Flatten list values. Returns ------- pandas.Series The data from all lists in the series flattened. See Also -------- ListAccessor.__getitem__ : Index or slice values in the Series. Examples -------- >>> import pyarrow as pa >>> s = pd.Series( ... [ ... [1, 2, 3], ... [3], ... ], ... dtype=pd.ArrowDtype(pa.list_(pa.int64())), ... ) >>> s.list.flatten() 0 1 0 2 0 3 1 3 dtype: int64[pyarrow]",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\arrow\\accessors.py",
    "ast_data": "FunctionDef name:flatten arg:self arguments arg Assign Call Assign Call Assign Call Call Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_pre_padding",
    "source_code": "@cache\ndef _pre_padding(self) -> tuple[int, int]:\n    w2 = self.win.real ** 2 + self.win.imag ** 2\n    n0 = -self.m_num_mid\n    for q_, n_ in enumerate(range(n0, n0 - self.m_num - 1, -self.hop)):\n        n_next = n_ - self.hop\n        if n_next + self.m_num <= 0 or all(w2[n_next:] == 0):\n            return (n_, -q_)\n    raise RuntimeError('This is code line should not have been reached!')",
    "docstring": "Smallest signal index and slice index due to padding. Since, per convention, for time t=0, n,q is zero, the returned values are negative or zero.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_short_time_fft.py",
    "ast_data": "FunctionDef name:_pre_padding arg:self arguments arg Assign Assign For Call Call Assign If BoolOp Compare Call Compare Return return:yes Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_basic_validation",
    "source_code": "def _basic_validation(op, args=(), kwargs=None):\n    from torch.distributed._shard.sharded_tensor import ShardedTensor\n    if len(args) == 0 and (kwargs is None or len(kwargs) == 0):\n        raise ValueError(f\" No input for '{op.__name__}'!\")\n    has_distributed_tensor = False\n\n    def is_distributed_tensor(e):\n        nonlocal has_distributed_tensor\n        if isinstance(e, ShardedTensor):\n            has_distributed_tensor = True\n    pytree.tree_map_(is_distributed_tensor, args)\n    pytree.tree_map_(is_distributed_tensor, kwargs)\n    if not has_distributed_tensor:\n        raise TypeError(f\"torch function '{op.__name__}', with args: {args} and kwargs: {kwargs} are called without any distributed tensor!\")\n    cur_pg: Optional[torch.distributed.ProcessGroup] = None\n\n    def validate_pg(e):\n        nonlocal cur_pg\n        if isinstance(e, ShardedTensor):\n            if cur_pg is not None and e._process_group is not cur_pg:\n                raise RuntimeError('All distributed tensors should use the same ProcessGroup if used together in an op.')\n            cur_pg = e._process_group\n    pytree.tree_map_(validate_pg, args)\n    pytree.tree_map_(validate_pg, kwargs)",
    "docstring": "Common validation across all ops go in here.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\common_op_utils.py",
    "ast_data": "FunctionDef name:_basic_validation arg:op arg:args arg:kwargs arguments arg arg arg If BoolOp Compare Call BoolOp Compare Compare Call Raise Call Assign FunctionDef name:is_distributed_tensor arg:e arguments arg If Call Assign Call Call If Raise Call FunctionDef name:validate_pg arg:e arguments arg If Call If BoolOp Compare Compare Raise Call Assign Call Call"
  },
  {
    "library": "pandas",
    "name": "get_na_values",
    "source_code": "def get_na_values(col, na_values, na_fvalues, keep_default_na: bool):\n    if isinstance(na_values, dict):\n        if col in na_values:\n            return (na_values[col], na_fvalues[col])\n        else:\n            if keep_default_na:\n                return (STR_NA_VALUES, set())\n            return (set(), set())\n    else:\n        return (na_values, na_fvalues)",
    "docstring": "Get the NaN values for a given column. Parameters ---------- col : str The name of the column. na_values : array-like, dict The object listing the NaN values as strings. na_fvalues : array-like, dict The object listing the NaN values as floats. keep_default_na : bool If is a dict, and the column is not mapped in the dictionary, whether to return the default NaN values or the empty set. Returns ------- nan_tuple : A length-two tuple composed of 1) na_values : the string NaN values for that column. 2) na_fvalues : the float NaN values for that column.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\parsers\\base_parser.py",
    "ast_data": "FunctionDef name:get_na_values arg:col arg:na_values arg:na_fvalues arg:keep_default_na arguments arg arg arg arg If Call If Compare Return return:yes If Return return:yes Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "partition",
    "source_code": "def partition(predicate, values):\n    results = ([], [])\n    for item in values:\n        results[predicate(item)].append(item)\n    return results",
    "docstring": "Split the values into two sets, based on the return value of the function (True/False). e.g.: >>> partition(lambda x: x > 3, range(5)) [0, 1, 2, 3], [4]",
    "type": "function",
    "file_path": "django\\django\\utils\\functional.py",
    "ast_data": "FunctionDef name:partition arg:predicate arg:values arguments arg arg Assign For Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "int_truediv",
    "source_code": "def int_truediv(self, x0: T, x1: T) -> T:\n    raise NotImplementedError",
    "docstring": "True division between integers. This is NOT the same as promoting to float and doing integer division, there is a bespoke algorithm for doing the division in higher precision than the above.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ops_handler.py",
    "ast_data": "FunctionDef name:int_truediv arg:self arg:x0 arg:x1 arguments arg arg arg Raise"
  },
  {
    "library": "scikit-learn",
    "name": "__call__",
    "source_code": "def __call__(self, X, Y=None, eval_gradient=False):\n    if Y is None:\n        Y = X\n    elif eval_gradient:\n        raise ValueError('Gradient can only be evaluated when Y is None.')\n    K = np.full((_num_samples(X), _num_samples(Y)), self.constant_value, dtype=np.array(self.constant_value).dtype)\n    if eval_gradient:\n        if not self.hyperparameter_constant_value.fixed:\n            return (K, np.full((_num_samples(X), _num_samples(X), 1), self.constant_value, dtype=np.array(self.constant_value).dtype))\n        else:\n            return (K, np.empty((_num_samples(X), _num_samples(X), 0)))\n    else:\n        return K",
    "docstring": "Return the kernel k(X, Y) and optionally its gradient. Parameters ---------- X : array-like of shape (n_samples_X, n_features) or list of object Left argument of the returned kernel k(X, Y) Y : array-like of shape (n_samples_X, n_features) or list of object, default=None Right argument of the returned kernel k(X, Y). If None, k(X, X) is evaluated instead. eval_gradient : bool, default=False Determines whether the gradient with respect to the log of the kernel hyperparameter is computed. Only supported when Y is None. Returns ------- K : ndarray of shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), optional The gradient of the kernel k(X, X) with respect to the log of the hyperparameter of the kernel. Only returned when eval_gradient is True.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:X arg:Y arg:eval_gradient arguments arg arg arg arg If Compare Assign If Raise Call Assign Call Call Call Call If If Return return:yes Call Call Call Call Return return:yes Call Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "bounds",
    "source_code": "@property\ndef bounds(self):\n    bounds = [hyperparameter.bounds for hyperparameter in self.hyperparameters if not hyperparameter.fixed]\n    if len(bounds) > 0:\n        return np.log(np.vstack(bounds))\n    else:\n        return np.array([])",
    "docstring": "Returns the log-transformed bounds on the theta. Returns ------- bounds : ndarray of shape (n_dims, 2) The log-transformed bounds on the kernel's hyperparameters theta",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:bounds arg:self arguments arg Assign If Compare Call Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "replace_all_uses",
    "source_code": "def replace_all_uses(self, old: str, new: str):\n    assert isinstance(old, str)\n    assert isinstance(new, str)\n    arg_types = (TensorArgument, SymIntArgument, SymFloatArgument, SymBoolArgument, CustomObjArgument, TokenArgument)\n    for o in self.output_specs:\n        if isinstance(o.arg, arg_types):\n            if o.arg.name == old:\n                o.arg.name = new\n    for i in self.input_specs:\n        if isinstance(i.arg, arg_types):\n            if i.arg.name == old:\n                i.arg.name = new",
    "docstring": "Replace all uses of the old name with new name in the signature.",
    "type": "method",
    "file_path": "pytorch\\torch\\export\\graph_signature.py",
    "ast_data": "FunctionDef name:replace_all_uses arg:self arg:old arg:new arguments arg arg arg Call Call Assign For If Call If Compare Assign For If Call If Compare Assign"
  },
  {
    "library": "scipy",
    "name": "solve_triangular",
    "source_code": "@_apply_over_batch(('a', 2), ('b', '1|2'))\ndef solve_triangular(a, b, trans=0, lower=False, unit_diagonal=False, overwrite_b=False, check_finite=True):\n    a1 = _asarray_validated(a, check_finite=check_finite)\n    b1 = _asarray_validated(b, check_finite=check_finite)\n    if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:\n        raise ValueError('expected square matrix')\n    if a1.shape[0] != b1.shape[0]:\n        raise ValueError(f'shapes of a {a1.shape} and b {b1.shape} are incompatible')\n    if b1.size == 0:\n        dt_nonempty = solve_triangular(np.eye(2, dtype=a1.dtype), np.ones(2, dtype=b1.dtype)).dtype\n        return np.empty_like(b1, dtype=dt_nonempty)\n    overwrite_b = overwrite_b or _datacopied(b1, b)\n    x, _ = _solve_triangular(a1, b1, trans, lower, unit_diagonal, overwrite_b)\n    return x",
    "docstring": "Solve the equation `aaabba` is singular Notes ----- .. versionadded:: 0.9.0 Examples -------- Solve the lower triangular system a x = b, where:: [3 0 0 0] [4] a = [2 1 0 0] b = [2] [1 0 1 0] [4] [1 1 1 1] [2] >>> import numpy as np >>> from scipy.linalg import solve_triangular >>> a = np.array([[3, 0, 0, 0], [2, 1, 0, 0], [1, 0, 1, 0], [1, 1, 1, 1]]) >>> b = np.array([4, 2, 4, 2]) >>> x = solve_triangular(a, b, lower=True) >>> x array([ 1.33333333, -0.66666667, 2.66666667, -1.33333333]) >>> a.dot(x) # Check the result array([ 4., 2., 4., 2.])",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_basic.py",
    "ast_data": "FunctionDef name:solve_triangular arg:a arg:b arg:trans arg:lower arg:unit_diagonal arg:overwrite_b arg:check_finite arguments arg arg arg arg arg arg arg Assign Call Assign Call If BoolOp Compare Call Compare Raise Call If Compare Raise Call If Compare Assign Call Call Call Return return:yes Call Assign BoolOp Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "argsort",
    "source_code": "def argsort(self, *args, **kwargs) -> npt.NDArray[np.intp]:\n    ascending = kwargs.pop('ascending', True)\n    kwargs.pop('kind', None)\n    nv.validate_argsort(args, kwargs)\n    start, stop, step = (None, None, None)\n    if self._range.step > 0:\n        if ascending:\n            start = len(self)\n        else:\n            start, stop, step = (len(self) - 1, -1, -1)\n    elif ascending:\n        start, stop, step = (len(self) - 1, -1, -1)\n    else:\n        start = len(self)\n    return np.arange(start, stop, step, dtype=np.intp)",
    "docstring": "Returns the indices that would sort the index and its underlying data. Returns ------- np.ndarray[np.intp] See Also -------- numpy.ndarray.argsort",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\range.py",
    "ast_data": "FunctionDef name:argsort arg:self arguments arg arg arg Assign Call Call Call Assign If Compare If Assign Call Assign Call If Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "response_headers",
    "source_code": "def response_headers(headers=None, debug=False):\n    if debug:\n        cherrypy.log('Setting response headers: %s' % repr(headers), 'TOOLS.RESPONSE_HEADERS')\n    for name, value in headers or []:\n        cherrypy.serving.response.headers[name] = value",
    "docstring": "Set headers on the response.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\cptools.py",
    "ast_data": "FunctionDef name:response_headers arg:headers arg:debug arguments arg arg If Call Call For BoolOp Assign"
  },
  {
    "library": "pytorch",
    "name": "precompute_methods",
    "source_code": "def precompute_methods(obj: Any, methods: list[str]) -> None:\n    for method in methods:\n        precompute_method(obj, method)",
    "docstring": "Replace methods with new methods that returns a precomputed constants.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\utils.py",
    "ast_data": "FunctionDef name:precompute_methods arg:obj arg:methods arguments arg arg For Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_function",
    "source_code": "def _get_function(self, name):\n    return self._functions.get(compat.as_str(name), None)",
    "docstring": "Returns the function definition for 'name'. Args: name: string function name. Returns: The function def proto.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_get_function arg:self arg:name arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "record_gradient",
    "source_code": "def record_gradient(unused_op_name, unused_inputs, unused_attrs, unused_outputs):\n    pass",
    "docstring": "Import backprop if you want gradients recorded.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\execute.py",
    "ast_data": "FunctionDef name:record_gradient arg:unused_op_name arg:unused_inputs arg:unused_attrs arg:unused_outputs arguments arg arg arg arg"
  },
  {
    "library": "django",
    "name": "__repr__",
    "source_code": "def __repr__(self):\n    return '<%s object at %s>' % (self.geom_type, hex(addressof(self.ptr)))",
    "docstring": "Short-hand representation because WKT may be very large.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "addQuickElement",
    "source_code": "def addQuickElement(self, name, contents=None, attrs=None):\n    if attrs is None:\n        attrs = {}\n    self.startElement(name, attrs)\n    if contents is not None:\n        self.characters(contents)\n    self.endElement(name)",
    "docstring": "Convenience method for adding an element with no children",
    "type": "method",
    "file_path": "django\\django\\utils\\xmlutils.py",
    "ast_data": "FunctionDef name:addQuickElement arg:self arg:name arg:contents arg:attrs arguments arg arg arg arg If Compare Assign Call If Compare Call Call"
  },
  {
    "library": "tensorflow",
    "name": "dismantle_func_graph",
    "source_code": "def dismantle_func_graph(func_graph):\n    func_graph._function_captures.clear()\n    ops.dismantle_graph(func_graph)",
    "docstring": "Removes reference cycles in FuncGraph. Helpful for making sure the garbage collector doesn't need to run when the FuncGraph goes out of scope, e.g. in tests using defun with @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True). Args: func_graph: A object to destroy. is unusable after this function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\func_graph.py",
    "ast_data": "FunctionDef name:dismantle_func_graph arg:func_graph arguments arg Call Call"
  },
  {
    "library": "cherrypy",
    "name": "_setup",
    "source_code": "def _setup(self):\n    if appstats.get('Enabled', False):\n        cherrypy.Tool._setup(self)\n        self.record_start()",
    "docstring": "Plug this tool into ``. The standard CherryPy request object will automatically call this method when the tool is \"turned on\" in config.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\cpstats.py",
    "ast_data": "FunctionDef name:_setup arg:self arguments arg If Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "ndims",
    "source_code": "@property\ndef ndims(self):\n    return self.rank",
    "docstring": "Deprecated accessor for .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:ndims arg:self arguments arg Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "handle_finish",
    "source_code": "def handle_finish(self) -> None:\n    self.get_toc()\n    self.build_mimetype()\n    self.build_container()\n    self.build_content()\n    self.build_navigation_doc()\n    self.build_toc()\n    self.build_epub()",
    "docstring": "Create the metainfo files and finally the epub.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\epub3.py",
    "ast_data": "FunctionDef name:handle_finish arg:self arguments arg Call Call Call Call Call Call Call"
  },
  {
    "library": "scrapy",
    "name": "_log_warnsize",
    "source_code": "@property\ndef _log_warnsize(self) -> bool:\n    content_length_header = int(self._response['headers'].get(b'Content-Length', -1))\n    return self._download_warnsize and (self._response['flow_controlled_size'] > self._download_warnsize or content_length_header > self._download_warnsize) and (not self.metadata['reached_warnsize'])",
    "docstring": "Checks if we have received data which exceeds the download warnsize and whether we have not already logged about it. Returns: True if both the above conditions hold true False if any of the conditions is false",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\http2\\stream.py",
    "ast_data": "FunctionDef name:_log_warnsize arg:self arguments arg Assign Call Call Return return:yes BoolOp BoolOp Compare Compare"
  },
  {
    "library": "tensorflow",
    "name": "_has_quantization_method",
    "source_code": "def _has_quantization_method(quantization_specs: qc.QuantizationSpecs, method: str) -> bool:\n    for spec in quantization_specs.specs:\n        if spec.method.HasField(method):\n            return True\n    return False",
    "docstring": "Returns whether a given QuantizationSpecs has the given quantization method.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\stablehlo\\python\\quantization.py",
    "ast_data": "FunctionDef name:_has_quantization_method arg:quantization_specs arg:method arguments arg arg For If Call Return return:yes Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "__call__",
    "source_code": "def __call__(self):\n    raise self",
    "docstring": "Use this exception as a request.handler (raise self).",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cperror.py",
    "ast_data": "FunctionDef name:__call__ arg:self arguments arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "_GetReducedSubscripts",
    "source_code": "def _GetReducedSubscripts(reduced_label_set, input_shape, subscripts):\n    reduced_subs = ''.join(list(reduced_label_set))\n    reduced_axes = [_GetAxisFromLabel(subscripts, s) for s in reduced_subs]\n    reduced_dims = array_ops_stack.stack([input_shape[ax] for ax in reduced_axes])\n    return (reduced_subs, reduced_dims, reduced_axes)",
    "docstring": "Returns reduced subscripts and their corresponding dimensions and axes. Given a set of axis labels, returns their concatenated subscript, their corresponding dimensions from input_shape, and their corresponding axes. Note that the concatenated subscript may have axis labels from in any order. For example, for the reduced label set , subscripts and input shape , returns subscripts , dimensions and axes . Args: reduced_label_set: Set of axis labels which appear in . input_shape: A representing the shape of the einsum operand corresponding to . subscripts: A string denoting the einsum subscript. Returns: reduced_subs: Subscripts formed by a concatenation of labels in . reduced_dims: Dimensions from corresponding to each label in . reduced_axes: Axes described by corresponding to each label in . If there are multiple occurrences in , we consider only the leftmost one.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg_grad.py",
    "ast_data": "FunctionDef name:_GetReducedSubscripts arg:reduced_label_set arg:input_shape arg:subscripts arguments arg arg arg Assign Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "@_docstring.interpd\ndef __init__(self, path, **kwargs):\n    super().__init__(**kwargs)\n    self._path = path",
    "docstring": "*path* is a object. Valid keyword arguments are: %(Patch:kwdoc)s",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:path arguments arg arg arg Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "with_flat_values",
    "source_code": "def with_flat_values(self, new_values):\n    if isinstance(self._values, RaggedTensor):\n        return self.with_values(self.values.with_flat_values(new_values))\n    else:\n        new_values = _convert_to_ragged_tensor_values(new_values)\n    return self.with_values(new_values)",
    "docstring": "Returns a copy of with replaced by . Preserves cached row-partitioning tensors such as and if they have values. Args: new_values: Potentially ragged tensor that should replace . Must have , and must have the same number of rows as . Returns: A . . .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:with_flat_values arg:self arg:new_values arguments arg arg If Call Return return:yes Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_nonlin_wrapper",
    "source_code": "def _nonlin_wrapper(name, jac):\n    signature = _getfullargspec(jac.__init__)\n    args, varargs, varkw, defaults, kwonlyargs, kwdefaults, _ = signature\n    kwargs = list(zip(args[-len(defaults):], defaults))\n    kw_str = ', '.join([f'{k}={v!r}' for k, v in kwargs])\n    if kw_str:\n        kw_str = ', ' + kw_str\n    kwkw_str = ', '.join([f'{k}={k}' for k, v in kwargs])\n    if kwkw_str:\n        kwkw_str = kwkw_str + ', '\n    if kwonlyargs:\n        raise ValueError(f'Unexpected signature {signature}')\n    wrapper = \"\\ndef %(name)s(F, xin, iter=None %(kw)s, verbose=False, maxiter=None,\\n             f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,\\n             tol_norm=None, line_search='armijo', callback=None, **kw):\\n    jac = %(jac)s(%(kwkw)s **kw)\\n    return nonlin_solve(F, xin, jac, iter, verbose, maxiter,\\n                        f_tol, f_rtol, x_tol, x_rtol, tol_norm, line_search,\\n                        callback)\\n\"\n    wrapper = wrapper % dict(name=name, kw=kw_str, jac=jac.__name__, kwkw=kwkw_str)\n    ns = {}\n    ns.update(globals())\n    exec(wrapper, ns)\n    func = ns[name]\n    func.__doc__ = jac.__doc__\n    _set_doc(func)\n    return func",
    "docstring": "Construct a solver wrapper with given name and Jacobian approx. It inspects the keyword arguments of `nonlin_solve`",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_nonlin.py",
    "ast_data": "FunctionDef name:_nonlin_wrapper arg:name arg:jac arguments arg arg Assign Call Assign Assign Call Call Call Assign Call If Assign Assign Call If Assign If Raise Call Assign Assign Call Assign Call Call Call Assign Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "assemble_flags",
    "source_code": "def assemble_flags(self, in_flags):\n    if in_flags is None:\n        return []\n    out_flags = []\n    for in_flag in in_flags:\n        if callable(in_flag):\n            out_flags += in_flag(self)\n        else:\n            out_flags.append(in_flag)\n    return out_flags",
    "docstring": "Assemble flags from flag list Parameters ---------- in_flags : None or sequence None corresponds to empty list. Sequence elements can be strings or callables that return lists of strings. Callable takes as single parameter. Returns ------- out_flags : list",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\command\\build_clib.py",
    "ast_data": "FunctionDef name:assemble_flags arg:self arg:in_flags arguments arg arg If Compare Return return:no Assign For If Call Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "differentiable_polynomial_rounding",
    "source_code": "def differentiable_polynomial_rounding(input: Tensor) -> Tensor:\n    input_round = input.round()\n    output: Tensor = input_round + (input - input_round) ** 3\n    return output",
    "docstring": "Differentiable rounding. Args: input (Tensor): Input tensor of any shape to be rounded. Returns: output (Tensor): Pseudo rounded tensor of the same shape as input tensor.",
    "type": "function",
    "file_path": "kornia\\kornia\\utils\\misc.py",
    "ast_data": "FunctionDef name:differentiable_polynomial_rounding arg:input arguments arg Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_analyze_tensors",
    "source_code": "def _analyze_tensors(self, show_memory: bool) -> None:\n    for dev_stats in self._step_stats.dev_stats:\n        device_pid = self._device_pids[dev_stats.device]\n        tensors_pid = self._tensor_pids[dev_stats.device]\n        for node_stats in dev_stats.node_stats:\n            tid = node_stats.thread_id\n            node_name = node_stats.node_name\n            start_time = node_stats.all_start_micros\n            end_time = node_stats.all_start_micros + node_stats.all_end_rel_micros\n            for index, output in enumerate(node_stats.output):\n                if index:\n                    output_name = '%s:%d' % (node_name, index)\n                else:\n                    output_name = node_name\n                allocation = output.tensor_description.allocation_description\n                num_bytes = allocation.requested_bytes\n                allocator_name = allocation.allocator_name\n                tensor = self._produce_tensor(output_name, start_time, tensors_pid, allocator_name, num_bytes)\n                tensor.add_ref(start_time)\n                tensor.add_unref(end_time)\n                self._flow_starts[output_name] = (end_time, device_pid, tid)\n                if show_memory:\n                    self._chrome_trace.emit_obj_create('Tensor', output_name, start_time, tensors_pid, tid, tensor.object_id)\n                    self._emit_tensor_snapshot(tensor, end_time - 1, tensors_pid, tid, output)",
    "docstring": "Analyze tensor references to track dataflow.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py",
    "ast_data": "FunctionDef name:_analyze_tensors arg:self arg:show_memory arguments arg arg For Assign Assign For Assign Assign Assign Assign For Call If Assign Assign Assign Assign Assign Assign Call Call Call Assign If Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_inverse_event_shape_tensor",
    "source_code": "def _inverse_event_shape_tensor(self, output_shape):\n    return output_shape",
    "docstring": "Subclass implementation for function.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bijector_impl.py",
    "ast_data": "FunctionDef name:_inverse_event_shape_tensor arg:self arg:output_shape arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_in_multi_worker_mode",
    "source_code": "def _in_multi_worker_mode(self):\n    return self._num_workers > 1",
    "docstring": "Whether this strategy indicates working in multi-worker settings.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\collective_all_reduce_strategy.py",
    "ast_data": "FunctionDef name:_in_multi_worker_mode arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "_ragged_op_signature",
    "source_code": "def _ragged_op_signature(op, ragged_args, ragged_varargs=False):\n    op_name = tf_export.get_canonical_name_for_symbol(op)\n    argspec = tf_inspect.getfullargspec(op)\n    arg_names = argspec.args\n    for pos in ragged_args:\n        arg_names[pos] = '**' + arg_names[pos] + '**'\n    if argspec.defaults is not None:\n        for pos in range(-1, -len(argspec.defaults) - 1, -1):\n            arg_names[pos] += '=`{!r}`'.format(argspec.defaults[pos])\n    if argspec.varargs:\n        if ragged_varargs:\n            arg_names.append('***' + argspec.varargs + '**')\n        else:\n            arg_names.append('*' + argspec.varargs)\n    if argspec.varkw:\n        arg_names.append('**' + argspec.varkw)\n    return '* `tf.{}`({})'.format(op_name, ', '.join(arg_names))",
    "docstring": "Returns a signature for the given op, marking ragged args in bold.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_dispatch.py",
    "ast_data": "FunctionDef name:_ragged_op_signature arg:op arg:ragged_args arg:ragged_varargs arguments arg arg arg Assign Call Assign Call Assign For Assign If Compare For Call Call Call If If Call Call If Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "number_of_tuple_elements",
    "source_code": "@property\ndef number_of_tuple_elements(self):\n    return len(self._sharding_policies)",
    "docstring": "Returns the number of InfeedQueue tuple elements.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_feed.py",
    "ast_data": "FunctionDef name:number_of_tuple_elements arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "deserialize_feature_columns",
    "source_code": "def deserialize_feature_columns(configs, custom_objects=None):\n    columns_by_name = {}\n    return [deserialize_feature_column(c, custom_objects, columns_by_name) for c in configs]",
    "docstring": "Deserializes a list of FeatureColumns configs. Returns a list of FeatureColumns given a list of config dicts acquired by . Args: configs: A list of Dicts with the serialization of feature columns acquired by . custom_objects: A Dict from custom_object name to the associated keras serializable objects (FeatureColumns, classes or functions). Returns: FeatureColumn objects corresponding to the input configs. Raises: ValueError if called with input that is not a list of FeatureColumns.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\serialization.py",
    "ast_data": "FunctionDef name:deserialize_feature_columns arg:configs arg:custom_objects arguments arg arg Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_is_valid_shortcut",
    "source_code": "def _is_valid_shortcut(self, key):\n    return 'cmd+' not in key and (not key.startswith('MouseButton.'))",
    "docstring": "Check for a valid shortcut to be displayed. - GTK will never send 'cmd+' (see ). - The shortcut window only shows keyboard shortcuts, not mouse buttons.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_gtk4.py",
    "ast_data": "FunctionDef name:_is_valid_shortcut arg:self arg:key arguments arg arg Return return:yes BoolOp Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_serialized_attributes",
    "source_code": "def _get_serialized_attributes(self, serialization_cache):\n    keras_cache = serialization_cache.setdefault(constants.KERAS_CACHE_KEY, {})\n    if self.obj in keras_cache:\n        return keras_cache[self.obj]\n    serialized_attr = keras_cache[self.obj] = serialized_attributes.SerializedAttributes.new(self.obj)\n    if save_impl.should_skip_serialization(self.obj) or self.obj._must_restore_from_config:\n        return serialized_attr\n    object_dict, function_dict = self._get_serialized_attributes_internal(serialization_cache)\n    serialized_attr.set_and_validate_objects(object_dict)\n    serialized_attr.set_and_validate_functions(function_dict)\n    return serialized_attr",
    "docstring": "Generates or retrieves serialized attributes from cache.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\layer_serialization.py",
    "ast_data": "FunctionDef name:_get_serialized_attributes arg:self arg:serialization_cache arguments arg arg Assign Call If Compare Return return:yes Assign Call If BoolOp Call Return return:yes Assign Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "get_indexer_indexer",
    "source_code": "def get_indexer_indexer(target: Index, level: Level | list[Level] | None, ascending: list[bool] | bool, kind: SortKind, na_position: NaPosition, sort_remaining: bool, key: IndexKeyFunc) -> npt.NDArray[np.intp] | None:\n    target = ensure_key_mapped(target, key, levels=level)\n    target = target._sort_levels_monotonic()\n    if level is not None:\n        _, indexer = target.sortlevel(level, ascending=ascending, sort_remaining=sort_remaining, na_position=na_position)\n    elif np.all(ascending) and target.is_monotonic_increasing or (not np.any(ascending) and target.is_monotonic_decreasing):\n        return None\n    elif isinstance(target, ABCMultiIndex):\n        codes = [lev.codes for lev in target._get_codes_for_sorting()]\n        indexer = lexsort_indexer(codes, orders=ascending, na_position=na_position, codes_given=True)\n    else:\n        indexer = nargsort(target, kind=kind, ascending=cast(bool, ascending), na_position=na_position)\n    return indexer",
    "docstring": "Helper method that return the indexer according to input parameters for the sort_index method of DataFrame and Series. Parameters ---------- target : Index level : int or level name or list of ints or list of level names ascending : bool or list of bools, default True kind : {'quicksort', 'mergesort', 'heapsort', 'stable'} na_position : {'first', 'last'} sort_remaining : bool key : callable, optional Returns ------- Optional[ndarray[intp]] The indexer for the new index.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\sorting.py",
    "ast_data": "FunctionDef name:get_indexer_indexer arg:target arg:level arg:ascending arg:kind arg:na_position arg:sort_remaining arg:key arguments arg arg arg arg arg arg arg Assign Call Assign Call If Compare Assign Call If BoolOp BoolOp Call BoolOp Call Return return:no If Call Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "urlparse_cached",
    "source_code": "def urlparse_cached(request_or_response: Request | Response) -> ParseResult:\n    if request_or_response not in _urlparse_cache:\n        _urlparse_cache[request_or_response] = urlparse(request_or_response.url)\n    return _urlparse_cache[request_or_response]",
    "docstring": "Return urlparse.urlparse caching the result, where the argument can be a Request or Response object",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\httpobj.py",
    "ast_data": "FunctionDef name:urlparse_cached arg:request_or_response arguments arg If Compare Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_label_position",
    "source_code": "def set_label_position(self, position):\n    self.label.set_verticalalignment(_api.check_getitem({'top': 'baseline', 'bottom': 'top'}, position=position))\n    self.label_position = position\n    self.stale = True",
    "docstring": "Set the label position (top or bottom) Parameters ---------- position : {'top', 'bottom'}",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:set_label_position arg:self arg:position arguments arg arg Call Call Assign Assign"
  },
  {
    "library": "authlib",
    "name": "get_oauth_token_secret",
    "source_code": "def get_oauth_token_secret(self):\n    raise NotImplementedError()",
    "docstring": "A method to get the value of ``:: def get_oauth_token_secret(self): return self.oauth_token_secret :return: A string",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth1\\rfc5849\\models.py",
    "ast_data": "FunctionDef name:get_oauth_token_secret arg:self arguments arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "concat",
    "source_code": "def concat(self, name=None):\n    if self.element_shape:\n        element_shape = [None] + self.element_shape.dims[1:]\n    else:\n        element_shape = None\n    value = list_ops.tensor_list_concat(input_handle=self._flow, element_dtype=self._dtype, element_shape=element_shape, name=name)\n    return value",
    "docstring": "See TensorArray.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:concat arg:self arg:name arguments arg arg If Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "items",
    "source_code": "@_copy_to_script_wrapper\ndef items(self) -> Iterable[tuple[str, Module]]:\n    return self._modules.items()",
    "docstring": "Return an iterable of the ModuleDict key/value pairs.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\container.py",
    "ast_data": "FunctionDef name:items arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_PerformUpgrade",
    "source_code": "def _PerformUpgrade(self, data):\n    while data['version'] < self._new_version:\n        self._upgrade_dispatch[data['version']](data)\n        data['version'] += 1",
    "docstring": "Manipulate the (parsed JSON) based on changes in format. This incrementally will upgrade from version to version within data. Args: data: Dictionary representing the TensorFlow data. This will be upgraded in place.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\schema\\upgrade_schema.py",
    "ast_data": "FunctionDef name:_PerformUpgrade arg:self arg:data arguments arg arg While Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "keywords_to_dict",
    "source_code": "def keywords_to_dict(keywords):\n    keys = []\n    values = []\n    for kw in keywords:\n        keys.append(gast.Constant(kw.arg, kind=None))\n        values.append(kw.value)\n    return gast.Dict(keys=keys, values=values)",
    "docstring": "Converts a list of ast.keyword objects to a dict.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\ast_util.py",
    "ast_data": "FunctionDef name:keywords_to_dict arg:keywords arguments arg Assign Assign For Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "channel_shuffle",
    "source_code": "@register_decomposition(aten.channel_shuffle)\n@out_wrapper()\ndef channel_shuffle(input: TensorLikeType, groups: int) -> TensorLikeType:\n    from torch._meta_registrations import device_hint\n    torch._check(input.dim() > 2, lambda: f'channel_shuffle expects input with > 2 dims, but got input with sizes {list(input.size())}')\n    c = input.shape[1]\n    torch._check(groups > 0, lambda: f'Number of groups to divide channels in must be positive. Value of groups:{groups}')\n    torch._check(c % groups == 0, lambda: f'Number of channels must be divisible by groups. Got {c} channels and {groups} groups.')\n    n = input.shape[0]\n    cg = c // groups\n    dhw = input.shape[2:]\n    if input.numel() == 0 or (device_hint(input) == 'cuda' and (groups == 1 or groups == c)):\n        return input.view(input.shape)\n    return input.reshape(n, groups, cg, *dhw).transpose(1, 2).reshape(input.shape).contiguous()",
    "docstring": "Reference implementation of :func:.",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\nn\\functional\\__init__.py",
    "ast_data": "FunctionDef name:channel_shuffle arg:input arg:groups arguments arg arg Call Compare Call arguments Call Call Assign Call Compare arguments Call Compare arguments Assign Assign Assign If BoolOp Compare Call BoolOp Compare Call BoolOp Compare Compare Return return:yes Call Return return:yes Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, graph=None, op_log=None):\n    if not graph and (not context.executing_eagerly()):\n        graph = ops.get_default_graph()\n    self._coverage = 0.0\n    self._graph = graph\n    op_log = tfprof_logger.merge_default_with_oplog(self._graph, op_log=op_log)\n    print_mdl.NewProfiler(_graph_string(self._graph), op_log.SerializeToString())",
    "docstring": "Constructor. Args: graph: tf.Graph. If None and eager execution is not enabled, use default graph. op_log: optional. tensorflow::tfprof::OpLogProto proto. Used to define extra op types.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\model_analyzer.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:graph arg:op_log arguments arg arg arg If BoolOp Call Assign Call Assign Assign Assign Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "is_adapted",
    "source_code": "@property\ndef is_adapted(self):\n    return self._is_adapted",
    "docstring": "Whether the layer has been fit to data already.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_preprocessing_layer.py",
    "ast_data": "FunctionDef name:is_adapted arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "record_snapshot",
    "source_code": "def record_snapshot(self):\n    self._python_memory_checker.record_snapshot()",
    "docstring": "Take a memory snapshot for later analysis. must be called once every iteration at the same location. This is because the detection algorithm relies on the assumption that if there is a leak, it's happening similarly on every snapshot. The recommended number of call depends on the testing code complexity and the allocation pattern.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\memory_checker.py",
    "ast_data": "FunctionDef name:record_snapshot arg:self arguments arg Call"
  },
  {
    "library": "kornia",
    "name": "visualize",
    "source_code": "def visualize(self, images: Union[Tensor, list[Tensor]], detections: Optional[Tensor]=None, output_type: str='torch') -> Union[Tensor, list[Tensor], list[Image.Image]]:\n    dets = detections or self.forward(images)\n    output = []\n    for image, detection in zip(images, dets):\n        out_img = image[None].clone()\n        for out in detection:\n            out_img = draw_rectangle(out_img, torch.Tensor([[[out[-4], out[-3], out[-4] + out[-2], out[-3] + out[-1]]]]))\n        output.append(out_img[0])\n    return self._tensor_to_type(output, output_type, is_batch=isinstance(images, Tensor))",
    "docstring": "Very simple drawing. Needs to be more fancy later.",
    "type": "method",
    "file_path": "kornia\\kornia\\models\\detection\\base.py",
    "ast_data": "FunctionDef name:visualize arg:self arg:images arg:detections arg:output_type arguments arg arg arg arg Assign BoolOp Call Assign For Call Assign Call For Assign Call Call Call Return return:yes Call Call"
  },
  {
    "library": "seaborn",
    "name": "ci",
    "source_code": "def ci(a, which=95, axis=None):\n    p = (50 - which / 2, 50 + which / 2)\n    return np.nanpercentile(a, p, axis)",
    "docstring": "Return a percentile range from an array of values.",
    "type": "function",
    "file_path": "seaborn\\seaborn\\utils.py",
    "ast_data": "FunctionDef name:ci arg:a arg:which arg:axis arguments arg arg arg Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "compute_geom_grads",
    "source_code": "def compute_geom_grads(self):\n    tris_pts = self._tris_pts\n    tris_f = self.z[self._triangles]\n    dM1 = tris_pts[:, 1, :] - tris_pts[:, 0, :]\n    dM2 = tris_pts[:, 2, :] - tris_pts[:, 0, :]\n    dM = np.dstack([dM1, dM2])\n    dM_inv = _safe_inv22_vectorized(dM)\n    dZ1 = tris_f[:, 1] - tris_f[:, 0]\n    dZ2 = tris_f[:, 2] - tris_f[:, 0]\n    dZ = np.vstack([dZ1, dZ2]).T\n    df = np.empty_like(dZ)\n    df[:, 0] = dZ[:, 0] * dM_inv[:, 0, 0] + dZ[:, 1] * dM_inv[:, 1, 0]\n    df[:, 1] = dZ[:, 0] * dM_inv[:, 0, 1] + dZ[:, 1] * dM_inv[:, 1, 1]\n    return df",
    "docstring": "Compute the (global) gradient component of f assumed linear (~f). returns array df of shape (nelems, 2) df[ielem].dM[ielem] = dz[ielem] i.e. df = dz x dM = dM.T^-1 x dz",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triinterpolate.py",
    "ast_data": "FunctionDef name:compute_geom_grads arg:self arguments arg Assign Assign Assign Assign Assign Call Assign Call Assign Assign Assign Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "check_classification_targets",
    "source_code": "def check_classification_targets(y):\n    y_type = type_of_target(y, input_name='y')\n    if y_type not in ['binary', 'multiclass', 'multiclass-multioutput', 'multilabel-indicator', 'multilabel-sequences']:\n        raise ValueError(f'Unknown label type: {y_type}. Maybe you are trying to fit a classifier, which expects discrete classes on a regression target with continuous values.')",
    "docstring": "Ensure that target y is of a non-regression type. Only the following target types (as defined in type_of_target) are allowed: 'binary', 'multiclass', 'multiclass-multioutput', 'multilabel-indicator', 'multilabel-sequences' Parameters ---------- y : array-like Target values.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\multiclass.py",
    "ast_data": "FunctionDef name:check_classification_targets arg:y arguments arg Assign Call If Compare Raise Call"
  },
  {
    "library": "sphinx",
    "name": "_format_rfc3339_microseconds",
    "source_code": "def _format_rfc3339_microseconds(timestamp: int, /) -> str:\n    seconds, fraction = divmod(timestamp, 10 ** 6)\n    time_tuple = time.gmtime(seconds)\n    return time.strftime('%Y-%m-%d %H:%M:%S', time_tuple) + f'.{fraction // 1000}'",
    "docstring": "Return an RFC 3339 formatted string representing the given timestamp. :param timestamp: The timestamp to format, in microseconds.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\_timestamps.py",
    "ast_data": "FunctionDef name:_format_rfc3339_microseconds arguments arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "print_outlier_ratio",
    "source_code": "def print_outlier_ratio(y):\n    uniq, cnt = np.unique(y, return_counts=True)\n    print('----- Target count values: ')\n    for u, c in zip(uniq, cnt):\n        print('------ %s -> %d occurrences' % (str(u), c))\n    print('----- Outlier ratio: %.5f' % (np.min(cnt) / len(y)))",
    "docstring": "Helper function to show the distinct value count of element in the target. Useful indicator for the datasets used in bench_isolation_forest.py.",
    "type": "function",
    "file_path": "scikit-learn\\benchmarks\\bench_online_ocsvm.py",
    "ast_data": "FunctionDef name:print_outlier_ratio arg:y arguments arg Assign Call Call For Call Call Call Call Call Call"
  },
  {
    "library": "authlib",
    "name": "get_authorization_grant",
    "source_code": "@hooked\ndef get_authorization_grant(self, request):\n    for grant_cls, extensions in self._authorization_grants:\n        if grant_cls.check_authorization_endpoint(request):\n            return _create_grant(grant_cls, extensions, request, self)\n    raise UnsupportedResponseTypeError(f\"The response type '{request.payload.response_type}' is not supported by the server.\", request.payload.response_type, redirect_uri=request.payload.redirect_uri)",
    "docstring": "Find the authorization grant for current request. :param request: OAuth2Request instance. :return: grant instance",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\authorization_server.py",
    "ast_data": "FunctionDef name:get_authorization_grant arg:self arg:request arguments arg arg For If Call Return return:yes Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "convert_saved_model_v1",
    "source_code": "@tf_export('mlir.experimental.convert_saved_model_v1')\ndef convert_saved_model_v1(saved_model_path, exported_names, tags, lift_variables, include_variables_in_initializers, upgrade_legacy=True, show_debug_info=False):\n    return pywrap_mlir.experimental_convert_saved_model_v1_to_mlir(saved_model_path, exported_names, tags, lift_variables, include_variables_in_initializers, upgrade_legacy, show_debug_info)",
    "docstring": "Converts a v1 SavedModel to MLIR module. Args: saved_model_path: Path to SavedModel. exported_names: Names to export. tags: MetaGraphDef to be loaded is identified by the supplied tags. lift_variables: Whether to promote tf.VarHandleOp to resource arguments. include_variables_in_initializers: Keeps the variables in initializers before lifting variables. upgrade_legacy: Functionalize the input graph before importing. show_debug_info: Whether to include locations in the emitted textual form. Returns: A textual representation of the MLIR module corresponding to the SavedModule.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\mlir\\mlir.py",
    "ast_data": "FunctionDef name:convert_saved_model_v1 arg:saved_model_path arg:exported_names arg:tags arg:lift_variables arg:include_variables_in_initializers arg:upgrade_legacy arg:show_debug_info arguments arg arg arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "allreduce_hook",
    "source_code": "def allreduce_hook(state: DefaultState, grad: torch.Tensor):\n    if state.gradient_predivide_factor > 1:\n        grad.div_(state.gradient_predivide_factor)\n    dist.all_reduce(grad, group=state.process_group)\n    if state.gradient_postdivide_factor > 1:\n        grad.div_(state.gradient_postdivide_factor)",
    "docstring": "Implement the FSDP communication hook for `` algorithm and a necessary pre- and post-division of gradients. Args: state (DefaultState): State information, configures pre- and post-division factors. grad (torch.Tensor): A gradient for the local batch that needs to be communicated across ranks.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\_comm_hooks\\default_hooks.py",
    "ast_data": "FunctionDef name:allreduce_hook arg:state arg:grad arguments arg arg If Compare Call Call If Compare Call"
  },
  {
    "library": "authlib",
    "name": "encrypt",
    "source_code": "def encrypt(self, msg, aad, iv, key):\n    raise NotImplementedError",
    "docstring": "Encrypt the given \"msg\" text. :param msg: text to be encrypt in bytes :param aad: additional authenticated data in bytes :param iv: initialization vector in bytes :param key: encrypted key in bytes :return: (ciphertext, tag)",
    "type": "method",
    "file_path": "authlib\\authlib\\jose\\rfc7516\\models.py",
    "ast_data": "FunctionDef name:encrypt arg:self arg:msg arg:aad arg:iv arg:key arguments arg arg arg arg arg Raise"
  },
  {
    "library": "pandas",
    "name": "to_string",
    "source_code": "@Substitution(buf=buffering_args, encoding=encoding_args)\ndef to_string(self, buf: FilePath | WriteBuffer[str] | None=None, *, encoding: str | None=None, sparse_index: bool | None=None, sparse_columns: bool | None=None, max_rows: int | None=None, max_columns: int | None=None, delimiter: str=' ') -> str | None:\n    obj = self._copy(deepcopy=True)\n    if sparse_index is None:\n        sparse_index = get_option('styler.sparse.index')\n    if sparse_columns is None:\n        sparse_columns = get_option('styler.sparse.columns')\n    text = obj._render_string(sparse_columns=sparse_columns, sparse_index=sparse_index, max_rows=max_rows, max_cols=max_columns, delimiter=delimiter)\n    return save_to_buffer(text, buf=buf, encoding=encoding if buf is not None else None)",
    "docstring": "Write Styler to a file, buffer or string in text format. .. versionadded:: 1.5.0 Parameters ---------- %(buf)s %(encoding)s sparse_index : bool, optional Whether to sparsify the display of a hierarchical index. Setting to False will display each explicit level element in a hierarchical key for each row. Defaults to `bufNone`. See Also -------- DataFrame.to_string : Render a DataFrame to a console-friendly tabular output. Examples -------- >>> df = pd.DataFrame({\"A\": [1, 2], \"B\": [3, 4]}) >>> df.style.to_string() ' A B\\n0 1 3\\n1 2 4\\n'",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\style.py",
    "ast_data": "FunctionDef name:to_string arg:self arg:buf arguments arg arg arg arg arg arg arg arg Assign Call If Compare Assign Call If Compare Assign Call Assign Call Return return:yes Call Compare Call"
  },
  {
    "library": "matplotlib",
    "name": "axisinfo",
    "source_code": "@staticmethod\ndef axisinfo(unit, axis):\n    StrCategoryConverter._validate_unit(unit)\n    majloc = StrCategoryLocator(unit._mapping)\n    majfmt = StrCategoryFormatter(unit._mapping)\n    return units.AxisInfo(majloc=majloc, majfmt=majfmt)",
    "docstring": "Set the default axis ticks and labels. Parameters ---------- unit : object string unit information for value axis : axis for which information is being set .. note:: *axis* is not used Returns ------- Information to support default tick labeling",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\category.py",
    "ast_data": "FunctionDef name:axisinfo arg:unit arg:axis arguments arg arg Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_sample_uniform_direction",
    "source_code": "def _sample_uniform_direction(dim, size, random_state):\n    samples_shape = np.append(size, dim)\n    samples = random_state.standard_normal(samples_shape)\n    samples /= np.linalg.norm(samples, axis=-1, keepdims=True)\n    return samples",
    "docstring": "Private method to generate uniform directions Reference: Marsaglia, G. (1972). \"Choosing a Point from the Surface of a Sphere\". Annals of Mathematical Statistics. 43 (2): 645-646.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:_sample_uniform_direction arg:dim arg:size arg:random_state arguments arg arg arg Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "set_module",
    "source_code": "def set_module(module) -> Callable[[F], F]:\n\n    def decorator(func: F) -> F:\n        if module is not None:\n            func.__module__ = module\n        return func\n    return decorator",
    "docstring": "Private decorator for overriding __module__ on a function or class. Example usage:: @set_module(\"pandas\") def example(): pass assert example.__module__ == \"pandas\"",
    "type": "function",
    "file_path": "pandas\\pandas\\util\\_decorators.py",
    "ast_data": "FunctionDef name:set_module arg:module arguments arg FunctionDef name:decorator arg:func arguments arg If Compare Assign Return return:yes Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "base",
    "source_code": "def base(self):\n    if self.socket_file:\n        return self.socket_file\n    host = self.socket_host\n    if host in ('0.0.0.0', '::'):\n        import socket\n        host = socket.gethostname()\n    port = self.socket_port\n    if self.ssl_certificate:\n        scheme = 'https'\n        if port != 443:\n            host += ':%s' % port\n    else:\n        scheme = 'http'\n        if port != 80:\n            host += ':%s' % port\n    return '%s://%s' % (scheme, host)",
    "docstring": "Return the base for this server. e.i. scheme://host[:port] or sock file",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpserver.py",
    "ast_data": "FunctionDef name:base arg:self arguments arg If Return return:yes Assign If Compare Assign Call Assign If Assign If Compare Assign If Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "decode_raw_v1",
    "source_code": "@tf_export(v1=['decode_raw', 'io.decode_raw'])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_args(None, 'bytes is deprecated, use input_bytes instead', 'bytes')\ndef decode_raw_v1(input_bytes=None, out_type=None, little_endian=True, name=None, bytes=None):\n    input_bytes = deprecation.deprecated_argument_lookup('input_bytes', input_bytes, 'bytes', bytes)\n    if out_type is None:\n        raise ValueError(\"decode_raw_v1() missing 1 positional argument: 'out_type'\")\n    return gen_parsing_ops.decode_raw(input_bytes, out_type, little_endian=little_endian, name=name)",
    "docstring": "Convert raw byte strings into tensors. Args: input_bytes: Each element of the input Tensor is converted to an array of bytes. out_type: of the output. Acceptable types are , , , , , , , , . little_endian: Whether the data is in little-endian format. Data will be converted into host byte order if necessary. name: A name for the operation (optional). bytes: Deprecated parameter. Use instead. Returns: A object storing the decoded bytes.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parsing_ops.py",
    "ast_data": "FunctionDef name:decode_raw_v1 arg:input_bytes arg:out_type arg:little_endian arg:name arg:bytes arguments arg arg arg arg arg Assign Call If Compare Raise Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "current_blas_handle",
    "source_code": "def current_blas_handle():\n    _lazy_init()\n    return torch._C._cuda_getCurrentBlasHandle()",
    "docstring": "Return cublasHandle_t pointer to current cuBLAS handle",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:current_blas_handle arguments Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "def forward(self, feat0: Tensor, feat1: Tensor, mask0: None | Tensor=None, mask1: None | Tensor=None) -> tuple[Tensor, Tensor]:\n    if self.d_model != feat0.size(2):\n        msg = 'the feature number of src and transformer must be equal'\n        raise ValueError(msg)\n    for layer, name in zip(self.layers, self.layer_names):\n        if name == 'self':\n            feat0 = layer(feat0, feat0, mask0, mask0)\n            feat1 = layer(feat1, feat1, mask1, mask1)\n        elif name == 'cross':\n            feat0 = layer(feat0, feat1, mask0, mask1)\n            feat1 = layer(feat1, feat0, mask1, mask0)\n        else:\n            raise KeyError\n    return (feat0, feat1)",
    "docstring": "Run forward. Args: feat0: [N, L, C] feat1: [N, S, C] mask0: [N, L] (optional) mask1: [N, S] (optional)",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\loftr\\loftr_module\\transformer.py",
    "ast_data": "FunctionDef name:forward arg:self arg:feat0 arg:feat1 arg:mask0 arg:mask1 arguments arg arg arg arg arg If Compare Call Assign Raise Call For Call If Compare Assign Call Assign Call If Compare Assign Call Assign Call Raise Return return:yes"
  },
  {
    "library": "django",
    "name": "OneToOneField",
    "source_code": "class OneToOneField(ForeignKey):\n    many_to_many = False\n    many_to_one = False\n    one_to_many = False\n    one_to_one = True\n    related_accessor_class = ReverseOneToOneDescriptor\n    forward_related_accessor_class = ForwardOneToOneDescriptor\n    rel_class = OneToOneRel\n    description = _('One-to-one relationship')\n\n    def __init__(self, to, on_delete, to_field=None, **kwargs):\n        kwargs['unique'] = True\n        super().__init__(to, on_delete, to_field=to_field, **kwargs)\n\n    def deconstruct(self):\n        name, path, args, kwargs = super().deconstruct()\n        if 'unique' in kwargs:\n            del kwargs['unique']\n        return (name, path, args, kwargs)\n\n    def formfield(self, **kwargs):\n        if self.remote_field.parent_link:\n            return None\n        return super().formfield(**kwargs)\n\n    def save_form_data(self, instance, data):\n        if isinstance(data, self.remote_field.model):\n            setattr(instance, self.name, data)\n        else:\n            setattr(instance, self.attname, data)\n            if data is None:\n                setattr(instance, self.name, data)\n\n    def _check_unique(self, **kwargs):\n        return []",
    "docstring": "A OneToOneField is essentially the same as a ForeignKey, with the exception that it always carries a \"unique\" constraint with it and the reverse relation always returns the object pointed to (since there will only ever be one), rather than returning a list.",
    "type": "class",
    "file_path": "django\\django\\db\\models\\fields\\related.py",
    "ast_data": "ClassDef name:OneToOneField Assign Assign Assign Assign Assign Assign Assign Assign Call FunctionDef name:__init__ arg:self arg:to arg:on_delete arg:to_field arguments arg arg arg arg arg Assign Call Call FunctionDef name:deconstruct arg:self arguments arg Assign Call Call If Compare Return return:yes FunctionDef name:formfield arg:self arguments arg arg If Return return:no Return return:yes Call Call FunctionDef name:save_form_data arg:self arg:instance arg:data arguments arg arg arg If Call Call Call If Compare Call FunctionDef name:_check_unique arg:self arguments arg arg Return return:no"
  },
  {
    "library": "pytorch",
    "name": "should_realize_on_reuse",
    "source_code": "def should_realize_on_reuse(self, users):\n    if users > 1 and isinstance(self.data, (Pointwise, Reduction)):\n        if is_cpu(self.data):\n            opcount = self.data.inner_fn_opcount()\n            heavy_ops = ['exp', 'sigmoid']\n            if any((x in opcount.used_ops for x in heavy_ops)):\n                return True\n        return self.num_reads() > config.realize_reads_threshold or self.has_large_inner_fn()\n    return False",
    "docstring": "A heuristic to decide if we should realize a tensor that is used multiple times.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ir.py",
    "ast_data": "FunctionDef name:should_realize_on_reuse arg:self arg:users arguments arg arg If BoolOp Compare Call If Call Assign Call Assign If Call Compare Return return:yes Return return:yes BoolOp Compare Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_root_scalar_ridder_doc",
    "source_code": "def _root_scalar_ridder_doc():\n    pass",
    "docstring": "Options ------- args : tuple, optional Extra arguments passed to the objective function. bracket: A sequence of 2 floats, optional An interval bracketing a root. `` must have different signs at the two endpoints. xtol : float, optional Tolerance (absolute) for termination. rtol : float, optional Tolerance (relative) for termination. maxiter : int, optional Maximum number of iterations. options: dict, optional Specifies any method-specific options not covered above.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_root_scalar.py",
    "ast_data": "FunctionDef name:_root_scalar_ridder_doc arguments"
  },
  {
    "library": "django",
    "name": "join_parent_model",
    "source_code": "def join_parent_model(self, opts, model, alias, seen):\n    if model in seen:\n        return seen[model]\n    chain = opts.get_base_chain(model)\n    if not chain:\n        return alias\n    curr_opts = opts\n    for int_model in chain:\n        if int_model in seen:\n            curr_opts = int_model._meta\n            alias = seen[int_model]\n            continue\n        if not curr_opts.parents[int_model]:\n            curr_opts = int_model._meta\n            continue\n        link_field = curr_opts.get_ancestor_link(int_model)\n        join_info = self.setup_joins([link_field.name], curr_opts, alias)\n        curr_opts = int_model._meta\n        alias = seen[int_model] = join_info.joins[-1]\n    return alias or seen[None]",
    "docstring": "Make sure the given 'model' is joined in the query. If 'model' isn't a parent of 'opts' or if it is None this method is a no-op. The 'alias' is the root alias for starting the join, 'seen' is a dict of model -> alias of existing joins. It must also contain a mapping of None -> some alias. This will be returned in the no-op case.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\query.py",
    "ast_data": "FunctionDef name:join_parent_model arg:self arg:opts arg:model arg:alias arg:seen arguments arg arg arg arg arg If Compare Return return:yes Assign Call If Return return:yes Assign For If Compare Assign Assign If Assign Assign Call Assign Call Assign Assign Return return:yes BoolOp"
  },
  {
    "library": "tensorflow",
    "name": "_get_var_info",
    "source_code": "def _get_var_info(var, prev_tensor_name=None):\n    if checkpoint_utils._is_variable(var):\n        current_var_name = _infer_var_name([var])\n    elif isinstance(var, list) and all((checkpoint_utils._is_variable(v) for v in var)):\n        current_var_name = _infer_var_name(var)\n    elif isinstance(var, variables_lib.PartitionedVariable):\n        current_var_name = _infer_var_name([var])\n        var = var._get_variable_list()\n    else:\n        raise TypeError('var MUST be one of the following: a Variable, list of Variable or PartitionedVariable, but is {}'.format(type(var)))\n    if not prev_tensor_name:\n        prev_tensor_name = current_var_name\n    return (prev_tensor_name, var)",
    "docstring": "Helper method for standarizing Variable and naming. Args: var: Current graph's variable that needs to be warm-started (initialized). Can be either of the following: (i) (ii) (iii) list of : The list must contain slices of the same larger variable. (iv) prev_tensor_name: Name of the tensor to lookup in provided . If None, we lookup tensor with same name as given . Returns: A tuple of the Tensor name and var.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\warm_starting_util.py",
    "ast_data": "FunctionDef name:_get_var_info arg:var arg:prev_tensor_name arguments arg arg If Call Assign Call If BoolOp Call Call Call Assign Call If Call Assign Call Assign Call Raise Call Call Call If Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "fft_mode",
    "source_code": "@fft_mode.setter\ndef fft_mode(self, t: FFT_MODE_TYPE):\n    if t not in (fft_mode_types := get_args(FFT_MODE_TYPE)):\n        raise ValueError(f\"fft_mode='{t}' not in {fft_mode_types}!\")\n    if t in {'onesided', 'onesided2X'} and np.iscomplexobj(self.win):\n        raise ValueError(f\"One-sided spectra, i.e., fft_mode='{t}', \" + 'are not allowed for complex-valued windows!')\n    if t == 'onesided2X' and self.scaling is None:\n        raise ValueError(f\"For scaling is None, fft_mode='{t}' is invalid!Do scale_to('psd') or scale_to('magnitude')!\")\n    self._fft_mode = t",
    "docstring": "Set mode of FFT. Allowed values are 'twosided', 'centered', 'onesided', 'onesided2X'. See the property for more details.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_short_time_fft.py",
    "ast_data": "FunctionDef name:fft_mode arg:self arg:t arguments arg arg If Compare Call Raise Call If BoolOp Compare Call Raise Call If BoolOp Compare Compare Raise Call Assign"
  },
  {
    "library": "scipy",
    "name": "dense_output",
    "source_code": "def dense_output(self):\n    if self.t_old is None:\n        raise RuntimeError('Dense output is available after a successful step was made.')\n    if self.n == 0 or self.t == self.t_old:\n        return ConstantDenseOutput(self.t_old, self.t, self.y)\n    else:\n        return self._dense_output_impl()",
    "docstring": "Compute a local interpolant over the last successful step. Returns ------- sol : Local interpolant over the last successful step.",
    "type": "method",
    "file_path": "scipy\\scipy\\integrate\\_ivp\\base.py",
    "ast_data": "FunctionDef name:dense_output arg:self arguments arg If Compare Raise Call If BoolOp Compare Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "mm_args",
    "source_code": "def mm_args(mat1, mat2, *others, layout=None, out_dtype=None, use_4x2_dim=False, mat2_transposed=False):\n    mat1, mat2 = realize_inputs(mat1, mat2)\n    *b1, m, k1 = mat1.get_size()\n    if mat2_transposed:\n        *b2, n, k2 = mat2.get_size()\n    else:\n        *b2, k2, n = mat2.get_size()\n    b = [V.graph.sizevars.guard_equals(a, b) for a, b in zip(b1, b2)]\n    if use_4x2_dim:\n        k2 = k2 * 2\n    k = V.graph.sizevars.guard_equals(k1, k2)\n    if layout is None:\n        from torch._inductor.ir import FixedLayout\n        if out_dtype is None:\n            out_dtype = mat1.get_dtype()\n        layout = FixedLayout(mat1.get_device(), out_dtype, [*b, m, n])\n    else:\n        assert out_dtype is None, 'out_dtype is ignored if layout is specified.'\n    from ..lowering import expand\n    others = [realize_inputs(expand(x, layout.size)) for x in others]\n    return [m, n, k, layout, mat1, mat2, *others]",
    "docstring": "Common arg processing for mm,bmm,addmm,etc",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\kernel\\mm_common.py",
    "ast_data": "FunctionDef name:mm_args arg:mat1 arg:mat2 arguments arg arg arg arg arg arg arg Assign Call Assign Call If Assign Call Assign Call Assign Call Call If Assign Assign Call If Compare If Compare Assign Call Assign Call Call Compare Assign Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "setup",
    "source_code": "def setup(self, *params):\n    if self.skip(params):\n        raise NotImplementedError\n    self.X, self.X_val, self.y, self.y_val = self.make_data(params)\n    est_path = get_estimator_path(self, Benchmark.save_dir, params, Benchmark.save_estimators)\n    with est_path.open(mode='rb') as f:\n        self.estimator = pickle.load(f)\n    self.make_scorers()",
    "docstring": "Generate dataset and load the fitted estimator",
    "type": "method",
    "file_path": "scikit-learn\\asv_benchmarks\\benchmarks\\common.py",
    "ast_data": "FunctionDef name:setup arg:self arguments arg arg If Call Raise Assign Call Assign Call With Call Assign Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_title",
    "source_code": "def get_title(self, loc='center'):\n    titles = {'left': self._left_title, 'center': self.title, 'right': self._right_title}\n    title = _api.check_getitem(titles, loc=loc.lower())\n    return title.get_text()",
    "docstring": "Get an Axes title. Get one of the three available Axes titles. The available titles are positioned above the Axes in the center, flush with the left edge, and flush with the right edge. Parameters ---------- loc : {'center', 'left', 'right'}, str, default: 'center' Which title to return. Returns ------- str The title text string.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_axes.py",
    "ast_data": "FunctionDef name:get_title arg:self arg:loc arguments arg arg Assign Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_eager_safe_handle_data",
    "source_code": "def get_eager_safe_handle_data(handle):\n    assert isinstance(handle, tensor_module.Tensor)\n    if isinstance(handle, ops.EagerTensor):\n        return handle._handle_data\n    else:\n        return get_resource_handle_data(handle)",
    "docstring": "Get the data handle from the Tensor .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:get_eager_safe_handle_data arg:handle arguments arg Call If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_restore_slice",
    "source_code": "def _restore_slice(file_pattern, tensor_name, shape_and_slice, tensor_type, name='restore_slice', preferred_shard=-1):\n    base_type = dtypes.as_dtype(tensor_type).base_dtype\n    return gen_io_ops.restore_slice(file_pattern, tensor_name, shape_and_slice, base_type, preferred_shard, name=name)",
    "docstring": "Restore a tensor slice from a set of files with a given pattern. Example usage: RestoreSlice(\"/foo/bar-?????-of-?????\", \"w\", \"10 10 0,2:-\", DT_FLOAT) Args: file_pattern: the file pattern used to match a set of checkpoint files. tensor_name: the name of the tensor to restore. shape_and_slice: the shape-and-slice spec of the slice. tensor_type: the type of the tensor to restore. name: string. Optional name for the op. preferred_shard: Int. Optional shard to open first in the checkpoint file. Returns: A tensor of type \"tensor_type\".",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\io_ops.py",
    "ast_data": "FunctionDef name:_restore_slice arg:file_pattern arg:tensor_name arg:shape_and_slice arg:tensor_type arg:name arg:preferred_shard arguments arg arg arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_feature_names_out",
    "source_code": "def get_feature_names_out(self, input_features=None):\n    check_is_fitted(self, 'n_features_in_')\n    _check_feature_names_in(self, input_features, generate_names=False)\n    class_name = self.__class__.__name__.lower()\n    return np.asarray([f'{class_name}_{name}' for name, est in self.estimators if est != 'drop'], dtype=object)",
    "docstring": "Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Not used, present here for API consistency by convention. Returns ------- feature_names_out : ndarray of str objects Transformed feature names.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_voting.py",
    "ast_data": "FunctionDef name:get_feature_names_out arg:self arg:input_features arguments arg arg Call Call Assign Call Return return:yes Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "fresnel_sin",
    "source_code": "@tf_export('math.special.fresnel_sin')\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef fresnel_sin(x, name=None):\n    with ops.name_scope(name, 'fresnel_sin', [x]):\n        return gen_special_math_ops.fresnel_sin(x)",
    "docstring": "Computes Fresnel's sine integral of element-wise. The Fresnel sine integral is defined as the integral of from to , with the domain of definition all real numbers. >>> tf.math.special.fresnel_sin([-1., -0.1, 0.1, 1.]).numpy() array([-0.43825912, -0.00052359, 0.00052359, 0.43825912], dtype=float32) This implementation is based off of the Cephes math library. Args: x: A or . Must be one of the following types: , . name: A name for the operation (optional). Returns: A or , respectively. Has the same type as . @compatibility(scipy) Equivalent to scipy.special.fresnel first output. @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\special_math_ops.py",
    "ast_data": "FunctionDef name:fresnel_sin arg:x arg:name arguments arg arg With Call Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "identity",
    "source_code": "@finalize_array_function_like\n@set_module('numpy')\ndef identity(n, dtype=None, *, like=None):\n    if like is not None:\n        return _identity_with_like(like, n, dtype=dtype)\n    from numpy import eye\n    return eye(n, dtype=dtype, like=like)",
    "docstring": "Return the identity array. The identity array is a square array with ones on the main diagonal. Parameters ---------- n : int Number of rows (and columns) in x output. dtype : data-type, optional Data-type of the output. Defaults to `nn` array with its main diagonal set to one, and all other elements 0. Examples -------- >>> import numpy as np >>> np.identity(3) array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]])",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\numeric.py",
    "ast_data": "FunctionDef name:identity arg:n arg:dtype arguments arg arg arg If Compare Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_fill_non_empty_info",
    "source_code": "def _fill_non_empty_info(self) -> None:\n    self.add_object_type_line()\n    self.add_index_range_line()\n    self.add_columns_summary_line()\n    self.add_dtypes_line()\n    if self.display_memory_usage:\n        self.add_memory_usage_line()",
    "docstring": "Add lines to the info table, pertaining to non-empty dataframe.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:_fill_non_empty_info arg:self arguments arg Call Call Call Call If Call"
  },
  {
    "library": "pytorch",
    "name": "_all_gather_keys",
    "source_code": "def _all_gather_keys(local_dict: dict[str, Any], group: Optional[dist.ProcessGroup]=None) -> set[str]:\n    keys = list(local_dict.keys())\n    gathered_keys: list[list[str]] = [None] * dist.get_world_size(group)\n    dist.all_gather_object(gathered_keys, keys, group=group)\n    return set(itertools.chain.from_iterable(gathered_keys))",
    "docstring": "Gathers all keys, and returns them sorted.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\utils.py",
    "ast_data": "FunctionDef name:_all_gather_keys arg:local_dict arg:group arguments arg arg Assign Call Call Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_all_reduce_per_replica_values",
    "source_code": "def _all_reduce_per_replica_values(self, reduce_op, per_replica_values, options):\n    values_by_device = [[] for _ in self._devices]\n    num_devices = len(self._devices)\n    for per_replica in per_replica_values:\n        for i in range(num_devices):\n            values_by_device[i].append(per_replica.values[i])\n    if context.executing_eagerly():\n\n        def thread_fn(device_id):\n            with context.eager_mode():\n                return self._all_reduce(reduce_op, values_by_device[device_id], device_id, options)\n        with self._lock:\n            pool = multiprocessing.pool.ThreadPool(len(self._devices))\n            outputs_by_device = pool.map(thread_fn, list(range(num_devices)))\n            pool.close()\n    else:\n        outputs_by_device = []\n        with self._lock:\n            for i in range(num_devices):\n                outputs_by_device.append(self._all_reduce(reduce_op, values_by_device[i], i, options))\n    result = []\n    for values in zip(*outputs_by_device):\n        result.append(distribute_utils.regroup(values, wrap_class=value_lib.Mirrored))\n    return result",
    "docstring": "All reduce a list of per_replica_value.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_ops.py",
    "ast_data": "FunctionDef name:_all_reduce_per_replica_values arg:self arg:reduce_op arg:per_replica_values arg:options arguments arg arg arg arg Assign Assign Call For For Call Call If Call FunctionDef name:thread_fn arg:device_id arguments arg With Call Return return:yes Call With Assign Call Call Assign Call Call Call Call Assign With For Call Call Call Assign For Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_registered_object",
    "source_code": "def get_registered_object(name, custom_objects=None, module_objects=None):\n    if name in _GLOBAL_CUSTOM_OBJECTS:\n        return _GLOBAL_CUSTOM_OBJECTS[name]\n    elif custom_objects and name in custom_objects:\n        return custom_objects[name]\n    elif module_objects and name in module_objects:\n        return module_objects[name]\n    return None",
    "docstring": "Returns the class associated with if it is registered with Keras. This function is part of the Keras serialization and deserialization framework. It maps strings to the objects associated with them for serialization/deserialization. Example: Args: name: The name to look up. custom_objects: A dictionary of custom objects to look the name up in. Generally, custom_objects is provided by the user. module_objects: A dictionary of custom objects to look the name up in. Generally, module_objects is provided by midlevel library implementers. Returns: An instantiable class associated with 'name', or None if no such class exists.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\generic_utils.py",
    "ast_data": "FunctionDef name:get_registered_object arg:name arg:custom_objects arg:module_objects arguments arg arg arg If Compare Return return:yes If BoolOp Compare Return return:yes If BoolOp Compare Return return:yes Return return:no"
  },
  {
    "library": "pytorch",
    "name": "template_dir_for_comments",
    "source_code": "def template_dir_for_comments(self) -> str:\n    return os.path.relpath(self.template_dir, os.path.dirname(__file__))",
    "docstring": "This needs to be deterministic. The template dir is an absolute path that varies across builds. So, just use the path relative to this file, which will point to the codegen source but will be stable.",
    "type": "method",
    "file_path": "pytorch\\torchgen\\utils.py",
    "ast_data": "FunctionDef name:template_dir_for_comments arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "horizontal_partition",
    "source_code": "@staticmethod\ndef horizontal_partition(nodes: list[BaseSchedulerNode], triton_scheduling: SIMDScheduling, kernel_map: dict[BaseSchedulerNode, TritonKernel], node_info_map: dict[BaseSchedulerNode, tuple[Any, Any, Any, Any]], custom_algorithm: bool=False) -> list[list[BaseSchedulerNode]]:\n    if custom_algorithm:\n        raw_partitions = _custom_combo_kernel_horizontal_partition_algorithm(nodes, triton_scheduling, kernel_map, node_info_map)\n    else:\n        raw_partitions = [nodes]\n    'Generates a list of lists of node info tuples which consist of (fused_nodes, tiling, numel, rnumel)\\n        for each subkernel node where each sublist is guaranteed to not exceed CUDA limits for number of args\\n        (read/writes) and to have the same 2D or 1D blocking strategy.'\n    all_partitions = []\n    for raw_partition in raw_partitions:\n        all_partitions.extend(ComboKernel._base_horizontal_partition(raw_partition, triton_scheduling, node_info_map, custom_algorithm))\n    return all_partitions",
    "docstring": "Generates a list of lists of node info tuples which consist of (fused_nodes, tiling, numel, rnum) for each subkernel node where each sublist forms a ComboKernel. It horizontally partitions nodes into sublists in the following way: 1) call _custom_combo_kernel_horizontal_partition_algorithm() if custom_algorithm is True 2) then, call _base_horizontal_partition() to partition nodes into sublists, each sublist is guaranteed to not exceed CUDA limits for number of args (read/writes) and to have the same 2D or 1D blocking strategy.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\triton_combo_kernel.py",
    "ast_data": "FunctionDef name:horizontal_partition arg:nodes arg:triton_scheduling arg:kernel_map arg:node_info_map arg:custom_algorithm arguments arg arg arg arg arg If Assign Call Assign Assign For Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_ArrayLikes",
    "source_code": "class _ArrayLikes(_Constraint):\n\n    def is_satisfied_by(self, val):\n        return _is_arraylike_not_scalar(val)\n\n    def __str__(self):\n        return 'an array-like'",
    "docstring": "Constraint representing array-likes",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\utils\\_param_validation.py",
    "ast_data": "ClassDef name:_ArrayLikes FunctionDef name:is_satisfied_by arg:self arg:val arguments arg arg Return return:yes Call FunctionDef name:__str__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "prod",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef prod(x, axis=None, keepdims=False):\n    return math_ops.reduce_prod(x, axis, keepdims)",
    "docstring": "Multiplies the values in a tensor, alongside the specified axis. Args: x: A tensor or variable. axis: An integer, the axis to compute the product. keepdims: A boolean, whether to keep the dimensions or not. If is , the rank of the tensor is reduced by 1. If is , the reduced dimension is retained with length 1. Returns: A tensor with the product of elements of .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:prod arg:x arg:axis arg:keepdims arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, export_dir):\n    self._export_dir = export_dir\n    self._saved_model = loader.parse_saved_model(export_dir)",
    "docstring": "Creates an MethodNameUpdater object. Args: export_dir: Directory containing the SavedModel files. Raises: IOError: If the saved model file does not exist, or cannot be successfully parsed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\method_name_updater.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:export_dir arguments arg arg Assign Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, atomic_fn: atomic_function.AtomicFunction, func_graph_deleter):\n    self._cached_function_pairs = {}\n    self._func_graph = atomic_fn.graph\n    self._inference_function = atomic_fn\n    self._attrs = atomic_fn.attributes\n    self._gradient_name = None\n    self._num_inference_outputs = len(self._func_graph.outputs)\n    self._func_graph_deleter = func_graph_deleter",
    "docstring": "Construct an inference function and initialize caches.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:atomic_fn arg:func_graph_deleter arguments arg arg arg Assign Assign Assign Assign Assign Assign Call Assign"
  },
  {
    "library": "django",
    "name": "_tx_resource_for_name",
    "source_code": "def _tx_resource_for_name(name):\n    return 'django.' + _tx_resource_slug_for_name(name)",
    "docstring": "Return the Transifex resource name.",
    "type": "function",
    "file_path": "django\\scripts\\manage_translations.py",
    "ast_data": "FunctionDef name:_tx_resource_for_name arg:name arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_init_streams",
    "source_code": "@no_type_check\ndef _init_streams(state: _FSDPState) -> None:\n    assert state._is_root\n    assert state._device_handle.is_available()\n    uses_hybrid_sharding = any((fsdp_state.sharding_strategy in HYBRID_SHARDING_STRATEGIES for fsdp_state in state._all_fsdp_states))\n    high_priority = -1 if state.limit_all_gathers and uses_hybrid_sharding else 0\n    state._default_stream = state._device_handle.current_stream()\n    if state._fsdp_extension is not None:\n        state._fsdp_extension.compute_stream = state._default_stream\n    state._unshard_stream = state._device_handle.Stream(priority=high_priority)\n    state._post_backward_stream = state._device_handle.Stream(priority=high_priority)\n    state._pre_unshard_stream = state._device_handle.Stream(priority=high_priority)\n    state._all_reduce_stream = state._device_handle.Stream() if uses_hybrid_sharding else state._default_stream",
    "docstring": "Initializes CUDA streams for overlapping communication, computation, and data transfers. The streams should be shared across FSDP instances.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_runtime_utils.py",
    "ast_data": "FunctionDef name:_init_streams arg:state arguments arg Call Assign Call Compare Assign BoolOp Assign Call If Compare Assign Assign Call Assign Call Assign Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "DeferredLineBase",
    "source_code": "class DeferredLineBase:\n\n    def __init__(self, line: str):\n        if not line.strip():\n            line = ''\n        self.line = line\n\n    def __call__(self) -> Union[str, None]:\n        raise NotImplementedError\n\n    def _new_line(self, line: str) -> Self:\n        raise NotImplementedError\n\n    def with_prefix(self, prefix: str) -> Self:\n        return self._new_line(f'{prefix}{self.line}')\n\n    def lstrip(self) -> Self:\n        return self._new_line(self.line.lstrip())\n\n    def __getitem__(self, index: Union[int, slice]) -> Self:\n        return self._new_line(self.line[index])\n\n    def __bool__(self) -> bool:\n        return bool(self.line)\n\n    def __len__(self) -> int:\n        return len(self.line)",
    "docstring": "A line that can be 'unwritten' at a later time",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\utils.py",
    "ast_data": "ClassDef name:DeferredLineBase FunctionDef name:__init__ arg:self arg:line arguments arg arg If Call Assign Assign FunctionDef name:__call__ arg:self arguments arg Raise FunctionDef name:_new_line arg:self arg:line arguments arg arg Raise FunctionDef name:with_prefix arg:self arg:prefix arguments arg arg Return return:yes Call FunctionDef name:lstrip arg:self arguments arg Return return:yes Call Call FunctionDef name:__getitem__ arg:self arg:index arguments arg arg Return return:yes Call FunctionDef name:__bool__ arg:self arguments arg Return return:yes Call FunctionDef name:__len__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "RegistrationHandle",
    "source_code": "class RegistrationHandle:\n\n    def __init__(self, on_destroy: Callable):\n        self._on_destroy = on_destroy\n\n    def destroy(self) -> None:\n        self._on_destroy()",
    "docstring": "Does something when someone calls .destroy() on it",
    "type": "class",
    "file_path": "pytorch\\torch\\_library\\utils.py",
    "ast_data": "ClassDef name:RegistrationHandle FunctionDef name:__init__ arg:self arg:on_destroy arguments arg arg Assign FunctionDef name:destroy arg:self arguments arg Call"
  },
  {
    "library": "cherrypy",
    "name": "ModPythonServer",
    "source_code": "class ModPythonServer(object):\n    template = '\\n# Apache2 server configuration file for running CherryPy with mod_python.\\n\\nDocumentRoot \"/\"\\nListen %(port)s\\nLoadModule python_module modules/mod_python.so\\n\\n<Location %(loc)s>\\n    SetHandler python-program\\n    PythonHandler %(handler)s\\n    PythonDebug On\\n%(opts)s\\n</Location>\\n'\n\n    def __init__(self, loc='/', port=80, opts=None, apache_path='apache', handler='cherrypy._cpmodpy::handler'):\n        self.loc = loc\n        self.port = port\n        self.opts = opts\n        self.apache_path = apache_path\n        self.handler = handler\n\n    def start(self):\n        opts = ''.join(['    PythonOption %s %s\\n' % (k, v) for k, v in self.opts])\n        conf_data = self.template % {'port': self.port, 'loc': self.loc, 'opts': opts, 'handler': self.handler}\n        mpconf = os.path.join(os.path.dirname(__file__), 'cpmodpy.conf')\n        with open(mpconf, 'wb') as f:\n            f.write(conf_data)\n        response = read_process(self.apache_path, '-k start -f %s' % mpconf)\n        self.ready = True\n        return response\n\n    def stop(self):\n        os.popen('apache -k stop')\n        self.ready = False",
    "docstring": "A server wrapper for ``.",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\_cpmodpy.py",
    "ast_data": "ClassDef name:ModPythonServer Assign FunctionDef name:__init__ arg:self arg:loc arg:port arg:opts arg:apache_path arg:handler arguments arg arg arg arg arg arg Assign Assign Assign Assign Assign FunctionDef name:start arg:self arguments arg Assign Call Assign Assign Call Call With Call Call Assign Call Assign Return return:yes FunctionDef name:stop arg:self arguments arg Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "finalize",
    "source_code": "def finalize(self) -> None:\n    self._finalized = True",
    "docstring": "Finalizes this graph, making it read-only. After calling , no new operations can be added to . This method is used to ensure that no operations are added to a graph when it is shared between multiple threads, for example when using a .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:finalize arg:self arguments arg Assign"
  },
  {
    "library": "authlib",
    "name": "authenticate_token_endpoint_client",
    "source_code": "def authenticate_token_endpoint_client(self):\n    client = self.server.authenticate_client(self.request, self.TOKEN_ENDPOINT_AUTH_METHODS)\n    self.server.send_signal('after_authenticate_client', client=client, grant=self)\n    return client",
    "docstring": "Authenticate client with the given methods for token endpoint. For example, the client makes the following HTTP request using TLS: .. code-block:: http POST /token HTTP/1.1 Host: server.example.com Authorization: Basic czZCaGRSa3F0MzpnWDFmQmF0M2JW Content-Type: application/x-www-form-urlencoded grant_type=authorization_code&code=SplxlOBeZQQYbYS6WxSbIA &redirect_uri=https%3A%2F%2Fclient%2Eexample%2Ecom%2Fcb Default available methods are: \"none\", \"client_secret_basic\" and \"client_secret_post\". :return: client",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\grants\\base.py",
    "ast_data": "FunctionDef name:authenticate_token_endpoint_client arg:self arguments arg Assign Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "get_chunks",
    "source_code": "def get_chunks(self, n_chunks: int | None=None) -> Iterable[PandasDataFrameXchg]:\n    if n_chunks and n_chunks > 1:\n        size = len(self._df)\n        step = size // n_chunks\n        if size % n_chunks != 0:\n            step += 1\n        for start in range(0, step * n_chunks, step):\n            yield PandasDataFrameXchg(self._df.iloc[start:start + step, :], allow_copy=self._allow_copy)\n    else:\n        yield self",
    "docstring": "Return an iterator yielding the chunks.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\interchange\\dataframe.py",
    "ast_data": "FunctionDef name:get_chunks arg:self arg:n_chunks arguments arg arg If BoolOp Compare Assign Call Assign If Compare For Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_combined_properties",
    "source_code": "def _get_combined_properties(self, dev):\n    return (dev.job if dev.job is not None else self.job, dev.replica if dev.replica is not None else self.replica, dev.task if dev.task is not None else self.task, dev.device_type if dev.device_type is not None else self.device_type, dev.device_index if dev.device_index is not None else self.device_index)",
    "docstring": "Combine the current DeviceSpec with another DeviceSpec. The combination of DeviceSpecs is will give priority to dev. Args: dev: a Returns: A tuple of (job, replica, task, device_type, device_index) which represents the combination of self and dev.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\device_spec.py",
    "ast_data": "FunctionDef name:_get_combined_properties arg:self arg:dev arguments arg arg Return return:yes Compare Compare Compare Compare Compare"
  },
  {
    "library": "scipy",
    "name": "_major_index_fancy",
    "source_code": "def _major_index_fancy(self, idx):\n    idx_dtype = self._get_index_dtype((self.indptr, self.indices))\n    indices = np.asarray(idx, dtype=idx_dtype).ravel()\n    N = self._swap(self._shape_as_2d)[1]\n    M = len(indices)\n    new_shape = self._swap((M, N)) if self.ndim == 2 else (M,)\n    if M == 0:\n        return self.__class__(new_shape, dtype=self.dtype)\n    row_nnz = (self.indptr[indices + 1] - self.indptr[indices]).astype(idx_dtype)\n    res_indptr = np.zeros(M + 1, dtype=idx_dtype)\n    np.cumsum(row_nnz, out=res_indptr[1:])\n    nnz = res_indptr[-1]\n    res_indices = np.empty(nnz, dtype=idx_dtype)\n    res_data = np.empty(nnz, dtype=self.dtype)\n    csr_row_index(M, indices, self.indptr.astype(idx_dtype, copy=False), self.indices.astype(idx_dtype, copy=False), self.data, res_indices, res_data)\n    return self.__class__((res_data, res_indices, res_indptr), shape=new_shape, copy=False)",
    "docstring": "Index along the major axis where idx is an array of ints.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_compressed.py",
    "ast_data": "FunctionDef name:_major_index_fancy arg:self arg:idx arguments arg arg Assign Call Assign Call Call Assign Call Assign Call Assign Compare Call If Compare Return return:yes Call Assign Call Assign Call Call Assign Assign Call Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "MutationSentinel",
    "source_code": "class MutationSentinel(object):\n    _in_cached_state = False\n\n    def mark_as(self, value):\n        may_affect_upstream = value != self._in_cached_state\n        self._in_cached_state = value\n        return may_affect_upstream\n\n    @property\n    def in_cached_state(self):\n        return self._in_cached_state",
    "docstring": "Container for tracking whether a property is in a cached state.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\layer_utils.py",
    "ast_data": "ClassDef name:MutationSentinel Assign FunctionDef name:mark_as arg:self arg:value arguments arg arg Assign Compare Assign Return return:yes FunctionDef name:in_cached_state arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "binary_placeholder_sql",
    "source_code": "def binary_placeholder_sql(self, value):\n    return '%s'",
    "docstring": "Some backends require special syntax to insert binary content (MySQL for example uses '_binary %s').",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:binary_placeholder_sql arg:self arg:value arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "run_benchmark_with_only_cpp_iterations",
    "source_code": "def run_benchmark_with_only_cpp_iterations(self, dataset):\n    dataset = dataset.skip(self.iters - 1)\n    iterator = dataset_ops.make_initializable_iterator(dataset)\n    next_element = iterator.get_next()\n    with session.Session() as sess:\n        deltas = []\n        for _ in range(self.num_reps):\n            sess.run(iterator.initializer)\n            deltas.append(timeit.timeit(lambda: sess.run(next_element.op), number=1))\n    self.report(deltas)",
    "docstring": "Benchmarks the dataset with the iterations performed in C++.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\benchmarks\\meta_benchmark.py",
    "ast_data": "FunctionDef name:run_benchmark_with_only_cpp_iterations arg:self arg:dataset arguments arg arg Assign Call Assign Call Assign Call With Call Assign For Call Call Call Call arguments Call Call"
  },
  {
    "library": "django",
    "name": "__repr__",
    "source_code": "def __repr__(self):\n    path = '%s.%s' % (self.__class__.__module__, self.__class__.__qualname__)\n    name = getattr(self, 'name', None)\n    if name is not None:\n        return '<%s: %s>' % (path, name)\n    return '<%s>' % path",
    "docstring": "Display the module, class, and name of the field.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\__init__.py",
    "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Assign Assign Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_get_dtype_of",
    "source_code": "def _get_dtype_of(obj):\n    if isinstance(obj, np.dtype):\n        return obj\n    elif hasattr(obj, 'dtype'):\n        return obj.dtype\n    else:\n        return np.asanyarray(obj).dtype",
    "docstring": "Convert the argument for *_fill_value into a dtype",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:_get_dtype_of arg:obj arguments arg If Call Return return:yes If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_common_pre_state_dict_hook",
    "source_code": "def _common_pre_state_dict_hook(module: nn.Module, fsdp_state: _FSDPState) -> None:\n    if fsdp_state._device_handle.is_available():\n        fsdp_state._device_handle.synchronize()\n    _lazy_init(fsdp_state, module)\n    if fsdp_state._is_root:\n        _reset_flat_param_grad_info_if_needed(fsdp_state._all_handles)",
    "docstring": "Performs the pre-state_dict tasks shared by all state_dict types.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_state_dict_utils.py",
    "ast_data": "FunctionDef name:_common_pre_state_dict_hook arg:module arg:fsdp_state arguments arg arg If Call Call Call If Call"
  },
  {
    "library": "django",
    "name": "start_transaction_sql",
    "source_code": "def start_transaction_sql(self):\n    return 'BEGIN;'",
    "docstring": "Return the SQL statement required to start a transaction.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:start_transaction_sql arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "checkbreak_unc",
    "source_code": "def checkbreak_unc(maxfun, nf, f, ftarget, x):\n    info = INFO_DEFAULT\n    srname = 'CHECKbreak_UNC'\n    assert INFO_DEFAULT not in [NAN_INF_X, NAN_INF_F, FTARGET_ACHIEVED, MAXFUN_REACHED], f'NAN_INF_X, NAN_INF_F, FTARGET_ACHIEVED, and MAXFUN_REACHED differ from INFO_DFT {srname}'\n    assert not any(np.isnan(x)), f'X does not contain NaN {srname}'\n    assert not (any(np.isnan(f)) or any(np.isposinf(f))), f'F is not NaN/+Inf {srname}'\n    if any(np.isnan(x)) or any(np.isinf(x)):\n        info = NAN_INF_X\n    if any(np.isnan(f)) or any(np.isposinf(f)):\n        info = NAN_INF_F\n    if f <= ftarget:\n        info = FTARGET_ACHIEVED\n    if nf >= maxfun:\n        info = MAXFUN_REACHED\n    return info",
    "docstring": "This module checks whether to break out of the solver loop in the unconstrained case.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\pyprima\\pyprima\\src\\pyprima\\common\\checkbreak.py",
    "ast_data": "FunctionDef name:checkbreak_unc arg:maxfun arg:nf arg:f arg:ftarget arg:x arguments arg arg arg arg arg Assign Assign Compare Call Call BoolOp Call Call Call Call If BoolOp Call Call Call Call Assign If BoolOp Call Call Call Call Assign If Compare Assign If Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "dense_shape",
    "source_code": "@property\ndef dense_shape(self):\n    return self._dense_shape",
    "docstring": "A 1-D containing the shape of the corresponding dense tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\indexed_slices.py",
    "ast_data": "FunctionDef name:dense_shape arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "add_gradient_functions_to_graph",
    "source_code": "def add_gradient_functions_to_graph(self, g=None):\n    if not context.executing_eagerly() and (not g):\n        g = ops.get_default_graph()\n    g._add_function_recursive(self._delayed_rewrite_functions.forward())\n    forward_function, backward_function = self._delayed_rewrite_functions.forward_backward()\n    g._add_function_recursive(forward_function)\n    backward_function.add_to_graph(g)",
    "docstring": "Add forward/backward functions to graph or the current context.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py",
    "ast_data": "FunctionDef name:add_gradient_functions_to_graph arg:self arg:g arguments arg arg If BoolOp Call Assign Call Call Call Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "clean_tensor_name",
    "source_code": "def clean_tensor_name(tensor_name: str) -> str:\n    tensor_name = tensor_name.replace(FSDP_PREFIX, '')\n    tensor_name = tensor_name.replace(_CHECKPOINT_PREFIX, '')\n    return tensor_name",
    "docstring": "Cleans the parameter or buffer name by removing any module wrapper prefixes.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_common_utils.py",
    "ast_data": "FunctionDef name:clean_tensor_name arg:tensor_name arguments arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "ConsentRequiredError",
    "source_code": "class ConsentRequiredError(OAuth2Error):\n    error = 'consent_required'",
    "docstring": "The Authorization Server requires End-User consent. This error MAY be returned when the prompt parameter value in the Authentication Request is none, but the Authentication Request cannot be completed without displaying a user interface for End-User consent.",
    "type": "class",
    "file_path": "authlib\\authlib\\oidc\\core\\errors.py",
    "ast_data": "ClassDef name:ConsentRequiredError Assign"
  },
  {
    "library": "tensorflow",
    "name": "_create_dummy_input",
    "source_code": "def _create_dummy_input(func_graph, template_tensor):\n    with func_graph.as_default():\n        return array_ops.placeholder(template_tensor.dtype, shape=template_tensor.shape)",
    "docstring": "Creates tensors in func_graph to represent template_tensors. Args: func_graph: FuncGraph. template_tensor: a tensor in the outer graph. Returns: A tensor in func_graph.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\cond_v2.py",
    "ast_data": "FunctionDef name:_create_dummy_input arg:func_graph arg:template_tensor arguments arg arg With Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "fetch_attr",
    "source_code": "@compatibility(is_backward_compatible=True)\ndef fetch_attr(self, target: str):\n    target_atoms = target.split('.')\n    attr_itr = self.module\n    for i, atom in enumerate(target_atoms):\n        if not hasattr(attr_itr, atom):\n            raise RuntimeError(f'Node referenced nonexistent target {'.'.join(target_atoms[:i + 1])}')\n        attr_itr = getattr(attr_itr, atom)\n    return attr_itr",
    "docstring": "Fetch an attribute from the ``. Args: target (str): The fully-qualified name of the attribute to fetch Return: Any: The value of the attribute.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\interpreter.py",
    "ast_data": "FunctionDef name:fetch_attr arg:self arg:target arguments arg arg Assign Call Assign For Call If Call Raise Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "minimize",
    "source_code": "def minimize(self, start: Optional[str]=None, end: Optional[str]=None, skip_nodes: Optional[list]=None, find_last_node: Optional[bool]=None) -> NodeSet:\n    print(self.settings)\n    print(self.module.graph)\n    nodes = self._collect_nodes(start, end)\n    if self.settings.traverse_method == 'sequential':\n        return self._sequential_traverse(nodes)\n    if self.settings.traverse_method == 'binary':\n        return self._binary_traverse(nodes)\n    if self.settings.traverse_method == 'accumulate':\n        return self._accumulate_traverse(nodes)\n    if self.settings.traverse_method == 'skip':\n        if skip_nodes is None:\n            raise RuntimeError(\"'skip_nodes' can't be None when 'traverse_method' is 'skip'.\")\n        return self._skip_traverse(nodes, skip_nodes)\n    if self.settings.traverse_method == 'defined':\n        return self._defined_traverse(nodes)\n    if self.settings.traverse_method == 'block':\n        return self._block_traverse(nodes, find_last_node)\n    raise RuntimeError(f'Unknown traverse method {self.settings.traverse_method}!')",
    "docstring": "Minimizing the model from node with name to node with name base on self.settings. Find culprits that causes FxNetMinimizerRunFuncError or FxNetMinimizerResultMismatchError errors. Args: start: The name of the node where we want to start minimizing. If set to None, then we'll start with the first node of the model. end: The name of the node where we want to terminate minimizing. If set to None, we'll end with the last node of the model. skip_nodes: The names of nodes where we want to skip during minimizing. It'll create subgraphs without these skip nodes under the hood. Only applicable in mode \"skip\". find_last_node: True if only last_node of a culprits is needed in mode \"block\". False if only the first_node of a culprits is needed. Only applicable in mode \"block\". Returns: nodes: A list of nodes that causes FxNetMinimizerRunFuncError or FxNetMinimizerResultMismatchError errors during minimizing.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\passes\\net_min_base.py",
    "ast_data": "FunctionDef name:minimize arg:self arg:start arg:end arg:skip_nodes arg:find_last_node arguments arg arg arg arg arg Call Call Assign Call If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call If Compare If Compare Raise Call Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "prune_dense_static_sort",
    "source_code": "@classmethod\ndef prune_dense_static_sort(cls, original_tensor: torch.Tensor, algorithm='') -> 'SparseSemiStructuredTensor':\n    packed, meta, packed_t, meta_t, compressed_swizzled_bitmask = torch._sparse_semi_structured_tile(original_tensor, algorithm=algorithm, use_cutlass=False)\n    return cls(original_tensor.shape, packed=packed, meta=meta, packed_t=packed_t, meta_t=meta_t, compressed_swizzled_bitmask=compressed_swizzled_bitmask, requires_grad=False)",
    "docstring": "This function does the same thing as described in SparseSemiStructuredCUTLASS, but uses the cuSPASRELt metadata layout and sparse matmul. The only functional difference is that cuSPARSELt stores and together into a single tensor. [9 1 7 4] [9 0 7 0] [1 2 3 0] [0 2 0 0] [8 3 5 4] -> prune 4x4 tile -> [8 0 0 4] -> pack to cuSPARSELT semi-structured -> packed [1 2 6 2] [0 0 6 2] -> pack to transposed cuSPARSELt -> packed_t semi-structured representation -> compute swizzled bitmask -> compressed_swizzled_bitmask The equivalent PyTorch code to create the same three outputs from the dense tensor can be found below:",
    "type": "method",
    "file_path": "pytorch\\torch\\sparse\\semi_structured.py",
    "ast_data": "FunctionDef name:prune_dense_static_sort arg:cls arg:original_tensor arg:algorithm arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "ListAvailableOps",
    "source_code": "def ListAvailableOps(self):\n    return tf_cluster.TF_ListAvailableOps()",
    "docstring": "Returns a list of all available operations (sorted alphabetically).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\grappler\\cluster.py",
    "ast_data": "FunctionDef name:ListAvailableOps arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "ScaleBase",
    "source_code": "class ScaleBase:\n\n    def __init__(self, axis):\n        pass\n\n    def get_transform(self):\n        raise NotImplementedError()\n\n    def set_default_locators_and_formatters(self, axis):\n        raise NotImplementedError()\n\n    def limit_range_for_scale(self, vmin, vmax, minpos):\n        return (vmin, vmax)",
    "docstring": "The base class for all scales. Scales are separable transformations, working on a single dimension. Subclasses should override :attr: The scale's name. :meth: A method returning a , which converts data coordinates to scaled coordinates. This transform should be invertible, so that e.g. mouse positions can be converted back to data coordinates. :meth: A method that sets default locators and formatters for an that uses this scale. :meth: An optional method that \"fixes\" the axis range to acceptable values, e.g. restricting log-scaled axes to positive values.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\scale.py",
    "ast_data": "ClassDef name:ScaleBase FunctionDef name:__init__ arg:self arg:axis arguments arg arg FunctionDef name:get_transform arg:self arguments arg Raise Call FunctionDef name:set_default_locators_and_formatters arg:self arg:axis arguments arg arg Raise Call FunctionDef name:limit_range_for_scale arg:self arg:vmin arg:vmax arg:minpos arguments arg arg arg arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "coverage_error",
    "source_code": "@validate_params({'y_true': ['array-like'], 'y_score': ['array-like'], 'sample_weight': ['array-like', None]}, prefer_skip_nested_validation=True)\ndef coverage_error(y_true, y_score, *, sample_weight=None):\n    y_true = check_array(y_true, ensure_2d=True)\n    y_score = check_array(y_score, ensure_2d=True)\n    check_consistent_length(y_true, y_score, sample_weight)\n    y_type = type_of_target(y_true, input_name='y_true')\n    if y_type != 'multilabel-indicator':\n        raise ValueError('{0} format is not supported'.format(y_type))\n    if y_true.shape != y_score.shape:\n        raise ValueError('y_true and y_score have different shape')\n    y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))\n    y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))\n    coverage = (y_score >= y_min_relevant).sum(axis=1)\n    coverage = coverage.filled(0)\n    return float(np.average(coverage, weights=sample_weight))",
    "docstring": "Coverage error measure. Compute how far we need to go through the ranked scores to cover all true labels. The best value is equal to the average number of labels in `User Guide decision_function` scores, values greater than or equal to zero should indicate the positive class. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- coverage_error : float The coverage error. References ---------- .. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010). Mining multi-label data. In Data mining and knowledge discovery handbook (pp. 667-685). Springer US. Examples -------- >>> from sklearn.metrics import coverage_error >>> y_true = [[1, 0, 0], [0, 1, 1]] >>> y_score = [[1, 0, 0], [0, 1, 1]] >>> coverage_error(y_true, y_score) 1.5",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\metrics\\_ranking.py",
    "ast_data": "FunctionDef name:coverage_error arg:y_true arg:y_score arguments arg arg arg Assign Call Assign Call Call Assign Call If Compare Raise Call Call If Compare Raise Call Assign Call Call Assign Call Call Assign Call Compare Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "OperatorNotAllowedInGraphError",
    "source_code": "@tf_export('errors.OperatorNotAllowedInGraphError', v1=[])\nclass OperatorNotAllowedInGraphError(TypeError):\n    pass",
    "docstring": "Raised when an unsupported operator is present in Graph execution. For example, using a as a Python inside a Graph will raise . Iterating over values inside a is also not supported in Graph execution. Example: >>> @tf.function ... def iterate_over(t): ... a,b,c = t ... return a >>> >>> iterate_over(tf.constant([1, 2, 3])) Traceback (most recent call last): ... OperatorNotAllowedInGraphError: ...",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py",
    "ast_data": "ClassDef name:OperatorNotAllowedInGraphError Call"
  },
  {
    "library": "matplotlib",
    "name": "_ScaledRotation",
    "source_code": "class _ScaledRotation(Affine2DBase):\n\n    def __init__(self, theta, trans_shift):\n        super().__init__()\n        self._theta = theta\n        self._trans_shift = trans_shift\n        self._mtx = None\n\n    def get_matrix(self):\n        if self._invalid:\n            transformed_coords = self._trans_shift.transform([[self._theta, 0]])[0]\n            adjusted_theta = transformed_coords[0]\n            rotation = Affine2D().rotate(adjusted_theta)\n            self._mtx = rotation.get_matrix()\n        return self._mtx",
    "docstring": "A transformation that applies rotation by *theta*, after transform by *trans_shift*.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "ClassDef name:_ScaledRotation FunctionDef name:__init__ arg:self arg:theta arg:trans_shift arguments arg arg arg Call Call Assign Assign Assign FunctionDef name:get_matrix arg:self arguments arg If Assign Call Assign Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "check_mixin_order",
    "source_code": "@ignore_warnings\ndef check_mixin_order(name, estimator_orig):\n    dag = [(ClassifierMixin, BaseEstimator), (RegressorMixin, BaseEstimator), (ClusterMixin, BaseEstimator), (TransformerMixin, BaseEstimator), (BiclusterMixin, BaseEstimator), (OneToOneFeatureMixin, BaseEstimator), (ClassNamePrefixFeaturesOutMixin, BaseEstimator), (DensityMixin, BaseEstimator), (OutlierMixin, BaseEstimator), (MetaEstimatorMixin, BaseEstimator), (MultiOutputMixin, BaseEstimator)]\n    violations = []\n    mro = type(estimator_orig).mro()\n    for mixin_a, mixin_b in dag:\n        if mixin_a in mro and mixin_b in mro and (mro.index(mixin_a) > mro.index(mixin_b)):\n            violations.append((mixin_a, mixin_b))\n    violation_str = '\\n'.join((f'{mixin_a.__name__} comes before/left side of {mixin_b.__name__}' for mixin_a, mixin_b in violations))\n    assert not violations, f'{name} is inheriting from mixins in the wrong order. In general, in mixin inheritance, more specialized mixins must come before more general ones. This means, for instance, `BaseEstimator` should be on the right side of most other mixins. You need to change the order so that:\\n{violation_str}'",
    "docstring": "Check that mixins are inherited in the correct order.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\estimator_checks.py",
    "ast_data": "FunctionDef name:check_mixin_order arg:name arg:estimator_orig arguments arg arg Assign Assign Assign Call Call For If BoolOp Compare Compare Compare Call Call Call Assign Call"
  },
  {
    "library": "scipy",
    "name": "confidence_interval",
    "source_code": "def confidence_interval(self, confidence_level=0.95):\n    alternative = self._alternative\n    p = self._p\n    x = np.sort(self._x)\n    n = len(x)\n    bd = stats.binom(n, p)\n    if confidence_level <= 0 or confidence_level >= 1:\n        message = '`confidence_level` must be a number between 0 and 1.'\n        raise ValueError(message)\n    low_index = np.nan\n    high_index = np.nan\n    if alternative == 'less':\n        p = 1 - confidence_level\n        low = -np.inf\n        high_index = int(bd.isf(p))\n        high = x[high_index] if high_index < n else np.nan\n    elif alternative == 'greater':\n        p = 1 - confidence_level\n        low_index = int(bd.ppf(p)) - 1\n        low = x[low_index] if low_index >= 0 else np.nan\n        high = np.inf\n    elif alternative == 'two-sided':\n        p = (1 - confidence_level) / 2\n        low_index = int(bd.ppf(p)) - 1\n        low = x[low_index] if low_index >= 0 else np.nan\n        high_index = int(bd.isf(p))\n        high = x[high_index] if high_index < n else np.nan\n    return ConfidenceInterval(low, high)",
    "docstring": "Compute the confidence interval of the quantile. Parameters ---------- confidence_level : float, default: 0.95 Confidence level for the computed confidence interval of the quantile. Default is 0.95. Returns ------- ci : `` that hold the lower and upper bounds of the confidence interval. Examples -------- >>> import numpy as np >>> import scipy.stats as stats >>> p = 0.75 # quantile of interest >>> q = 0 # hypothesized value of the quantile >>> x = np.exp(np.arange(0, 1.01, 0.01)) >>> res = stats.quantile_test(x, q=q, p=p, alternative='less') >>> lb, ub = res.confidence_interval() >>> lb, ub (-inf, 2.293318740264183) >>> res = stats.quantile_test(x, q=q, p=p, alternative='two-sided') >>> lb, ub = res.confidence_interval(0.9) >>> lb, ub (1.9542373206359396, 2.293318740264183)",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_stats_py.py",
    "ast_data": "FunctionDef name:confidence_interval arg:self arg:confidence_level arguments arg arg Assign Assign Assign Call Assign Call Assign Call If BoolOp Compare Compare Assign Raise Call Assign Assign If Compare Assign Assign Assign Call Call Assign Compare If Compare Assign Assign Call Call Assign Compare Assign If Compare Assign Assign Call Call Assign Compare Assign Call Call Assign Compare Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "frozen",
    "source_code": "def frozen(self):\n    return self",
    "docstring": "Return a frozen copy of this transform node. The frozen copy will not be updated when its children change. Useful for storing a previously known state of a transform where `` might normally be used.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:frozen arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "deserialize",
    "source_code": "def deserialize(config, custom_objects=None):\n    populate_deserializable_objects()\n    return generic_utils.deserialize_keras_object(config, module_objects=LOCAL.ALL_OBJECTS, custom_objects=custom_objects, printable_module_name='initializer')",
    "docstring": "Return an object from its config.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\initializers\\__init__.py",
    "ast_data": "FunctionDef name:deserialize arg:config arg:custom_objects arguments arg arg Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set",
    "source_code": "def set(self, *, h_pad=None, w_pad=None, hspace=None, wspace=None, rect=None):\n    for td in self.set.__kwdefaults__:\n        if locals()[td] is not None:\n            self._params[td] = locals()[td]",
    "docstring": "Set the pads for constrained_layout. Parameters ---------- h_pad, w_pad : float Padding around the Axes elements in inches. Default to :rc: and :rc:. hspace, wspace : float Fraction of the figure to dedicate to space between the axes. These are evenly spread between the gaps between the Axes. A value of 0.2 for a three-column layout would have a space of 0.1 of the figure width between each column. If h/wspace < h/w_pad, then the pads are used instead. Default to :rc: and :rc:. rect : tuple of 4 floats Rectangle in figure coordinates to perform constrained layout in (left, bottom, width, height), each from 0-1.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\layout_engine.py",
    "ast_data": "FunctionDef name:set arg:self arguments arg arg arg arg arg arg For If Compare Call Assign Call"
  },
  {
    "library": "seaborn",
    "name": "blend_palette",
    "source_code": "def blend_palette(colors, n_colors=6, as_cmap=False, input='rgb'):\n    colors = [_color_to_rgb(color, input) for color in colors]\n    name = 'blend'\n    pal = mpl.colors.LinearSegmentedColormap.from_list(name, colors)\n    if not as_cmap:\n        rgb_array = pal(np.linspace(0, 1, int(n_colors)))[:, :3]\n        pal = _ColorPalette(map(tuple, rgb_array))\n    return pal",
    "docstring": "Make a palette that blends between a list of colors. Parameters ---------- colors : sequence of colors in various formats interpreted by hex code, html color name, or tuple in space. n_colors : int, optional Number of colors in the palette. as_cmap : bool, optional If True, return a :class:. Returns ------- palette list of RGB tuples or :class: Examples -------- .. include: ../docstrings/blend_palette.rst",
    "type": "function",
    "file_path": "seaborn\\seaborn\\palettes.py",
    "ast_data": "FunctionDef name:blend_palette arg:colors arg:n_colors arg:as_cmap arg:input arguments arg arg arg arg Assign Call Assign Assign Call If Assign Call Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_linelength",
    "source_code": "def get_linelength(self):\n    return self._linelength",
    "docstring": "Return the length of the lines used to mark each event.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:get_linelength arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_RemoteDataset",
    "source_code": "class _RemoteDataset(dataset_ops.DatasetSource):\n\n    def __init__(self, graph_def, element_spec):\n        self._elem_spec = element_spec\n        variant_tensor = ged_ops.dataset_from_graph(graph_def)\n        super(_RemoteDataset, self).__init__(variant_tensor)\n\n    @property\n    def element_spec(self):\n        return self._elem_spec",
    "docstring": "Creates a dataset given a graph def.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\values.py",
    "ast_data": "ClassDef name:_RemoteDataset FunctionDef name:__init__ arg:self arg:graph_def arg:element_spec arguments arg arg arg Assign Assign Call Call Call FunctionDef name:element_spec arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "DoubleInfiniteFunc",
    "source_code": "class DoubleInfiniteFunc:\n\n    def __init__(self, func):\n        self._func = func\n        self._tmin = sys.float_info.min ** 0.5\n\n    def get_t(self, x):\n        s = -1 if x < 0 else 1\n        return s / (abs(x) + 1)\n\n    def __call__(self, t):\n        if abs(t) < self._tmin:\n            return 0.0\n        else:\n            x = (1 - abs(t)) / t\n            f = self._func(x)\n            return f / t / t",
    "docstring": "Argument transform from (-oo, oo) to (-1, 1)",
    "type": "class",
    "file_path": "scipy\\scipy\\integrate\\_quad_vec.py",
    "ast_data": "ClassDef name:DoubleInfiniteFunc FunctionDef name:__init__ arg:self arg:func arguments arg arg Assign Assign FunctionDef name:get_t arg:self arg:x arguments arg arg Assign Compare Return return:yes Call FunctionDef name:__call__ arg:self arg:t arguments arg arg If Compare Call Return return:yes Assign Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "clone",
    "source_code": "def clone(self):\n    return SpatialReference(capi.clone_srs(self.ptr), axis_order=self.axis_order)",
    "docstring": "Return a clone of this SpatialReference object.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\srs.py",
    "ast_data": "FunctionDef name:clone arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "update_equivalent_types_dict",
    "source_code": "def update_equivalent_types_dict(customized_equivalent_types=None):\n    if customized_equivalent_types is None:\n        raise ValueError('customized_equivalent_types should not be None')\n    global _EQUIVALENT_TYPES\n    global _EQUIVALENT_TYPES_DICT\n    _EQUIVALENT_TYPES = customized_equivalent_types\n    _EQUIVALENT_TYPES_DICT = _create_equivalent_types_dict()",
    "docstring": "Help function for user who wants to customize the _EQUIVALENT_TYPES and _EQUIVALENT_TYPES_DICT. When customized_equivalent_types passes in, re-generate _EQUIVALENT_TYPES and _EQUIVALENT_TYPES_DICT.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\graph_utils.py",
    "ast_data": "FunctionDef name:update_equivalent_types_dict arg:customized_equivalent_types arguments arg If Compare Raise Call Assign Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "row_limits",
    "source_code": "def row_limits(self):\n    return self._row_splits[1:]",
    "docstring": "Returns the limit indices for rows in this row partition. These indices specify where the values for each row end. is equal to . Returns: A 1-D integer Tensor with shape . The returned tensor is nonnegative, and is sorted in ascending order. .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py",
    "ast_data": "FunctionDef name:row_limits arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "parse_node_or_tensor_name",
    "source_code": "def parse_node_or_tensor_name(name):\n    if ':' in name and (not name.endswith(':')):\n        node_name = name[:name.rfind(':')]\n        output_slot = int(name[name.rfind(':') + 1:])\n        return (node_name, output_slot)\n    else:\n        return (name, None)",
    "docstring": "Get the node name from a string that can be node or tensor name. Args: name: An input node name (e.g., \"node_a\") or tensor name (e.g., \"node_a:0\"), as a str. Returns: 1) The node name, as a str. If the input name is a tensor name, i.e., consists of a colon, the final colon and the following output slot will be stripped. 2) If the input name is a tensor name, the output slot, as an int. If the input name is not a tensor name, None.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_graphs.py",
    "ast_data": "FunctionDef name:parse_node_or_tensor_name arg:name arguments arg If BoolOp Compare Call Assign Call Assign Call Call Return return:yes Return return:yes"
  },
  {
    "library": "pandas",
    "name": "__init__",
    "source_code": "@doc(storage_options=_shared_docs['storage_options'])\ndef __init__(self, filepath_or_buffer: FilePath | ReadBuffer[bytes], storage_options: StorageOptions | None=None, engine_kwargs: dict | None=None) -> None:\n    import_optional_dependency('pyxlsb')\n    super().__init__(filepath_or_buffer, storage_options=storage_options, engine_kwargs=engine_kwargs)",
    "docstring": "Reader using pyxlsb engine. Parameters ---------- filepath_or_buffer : str, path object, or Workbook Object to be parsed. {storage_options} engine_kwargs : dict, optional Arbitrary keyword arguments passed to excel engine.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\excel\\_pyxlsb.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:filepath_or_buffer arg:storage_options arg:engine_kwargs arguments arg arg arg arg Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_unregister_replace_node_hook",
    "source_code": "def _unregister_replace_node_hook(self, f):\n    assert callable(f), 'create_node hook must be a callable.'\n    self._replace_hooks.remove(f)",
    "docstring": "Takes a callable which was previously registered to be called everytime when we replace a node. This function will unregister that callable so it is no longer invoked on node replacement.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\graph_module.py",
    "ast_data": "FunctionDef name:_unregister_replace_node_hook arg:self arg:f arguments arg arg Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_check_input_size",
    "source_code": "def _check_input_size(n_components, n_features):\n    if n_components <= 0:\n        raise ValueError('n_components must be strictly positive, got %d' % n_components)\n    if n_features <= 0:\n        raise ValueError('n_features must be strictly positive, got %d' % n_features)",
    "docstring": "Factorize argument checking for random matrix generation.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\random_projection.py",
    "ast_data": "FunctionDef name:_check_input_size arg:n_components arg:n_features arguments arg arg If Compare Raise Call If Compare Raise Call"
  },
  {
    "library": "scipy",
    "name": "isotonic_regression",
    "source_code": "def isotonic_regression(y: 'npt.ArrayLike', *, weights: 'npt.ArrayLike | None'=None, increasing: bool=True) -> OptimizeResult:\n    yarr = np.atleast_1d(y)\n    order = slice(None) if increasing else slice(None, None, -1)\n    x = np.array(yarr[order], order='C', dtype=np.float64, copy=True)\n    if weights is None:\n        wx = np.ones_like(yarr, dtype=np.float64)\n    else:\n        warr = np.atleast_1d(weights)\n        if not (yarr.ndim == warr.ndim == 1 and yarr.shape[0] == warr.shape[0]):\n            raise ValueError('Input arrays y and w must have one dimension of equal length.')\n        if np.any(warr <= 0):\n            raise ValueError('Weights w must be strictly positive.')\n        wx = np.array(warr[order], order='C', dtype=np.float64, copy=True)\n    n = x.shape[0]\n    r = np.full(shape=n + 1, fill_value=-1, dtype=np.intp)\n    x, wx, r, b = pava(x, wx, r)\n    r = r[:b + 1]\n    wx = wx[:b]\n    if not increasing:\n        x = x[::-1]\n        wx = wx[::-1]\n        r = r[-1] - r[::-1]\n    return OptimizeResult(x=x, weights=wx, blocks=r)",
    "docstring": "Nonparametric isotonic regression. A (not strictly) monotonically increasing array with the same length as is calculated by the pool adjacent violators algorithm (PAVA), see [1]_. See the Notes section for more details. Parameters ---------- y : (N,) array_like Response variable. weights : (N,) array_like or None Case weights. increasing : bool If True, fit monotonic increasing, i.e. isotonic, regression. If False, fit a monotonic decreasing, i.e. antitonic, regression. Default is True. Returns ------- res : OptimizeResult The optimization result represented as a `ywy_ix_ixx_i \\leq x_{i+1}xx_ix_{i+1}10.18637/jss.v102.c0110.1007/s10463-021-00808-0` takes about 200 microseconds.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_isotonic.py",
    "ast_data": "FunctionDef name:isotonic_regression arg:y arguments arg arg arg Assign Call Assign Call Call Assign Call If Compare Assign Call Assign Call If BoolOp Compare Compare Raise Call If Call Compare Raise Call Assign Call Assign Assign Call Assign Call Assign Assign If Assign Assign Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "resize_images_v1",
    "source_code": "@dispatch.dispatch_for_api(image_ops.resize_images)\ndef resize_images_v1(images: ragged_tensor.RaggedTensor, size, method=image_ops.ResizeMethodV1.BILINEAR, align_corners=False, preserve_aspect_ratio=False, name=None):\n    with ops.name_scope(name, 'RaggedResizeImages', [images, size]):\n        return _resize_images(image_ops.resize_images, images, size, method=method, preserve_aspect_ratio=preserve_aspect_ratio, align_corners=align_corners)",
    "docstring": "RaggedTensor dispatcher for tf.image.resize (tf-v1).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_image_ops.py",
    "ast_data": "FunctionDef name:resize_images_v1 arg:images arg:size arg:method arg:align_corners arg:preserve_aspect_ratio arg:name arguments arg arg arg arg arg arg With Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "CallOptions",
    "source_code": "@dataclasses.dataclass(frozen=True)\nclass CallOptions:\n    collective_manager_ids_used: List[int] = dataclasses.field(default_factory=list)\n    control_captures: List[Any] = dataclasses.field(default_factory=list)\n    is_stateful: bool = False",
    "docstring": "Specifies additional configuration for an AtomicFunction call.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\atomic_function.py",
    "ast_data": "ClassDef name:CallOptions Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_remainder_matrix_power_triu",
    "source_code": "def _remainder_matrix_power_triu(T, t):\n    m_to_theta = {1: 1.51e-05, 2: 0.00224, 3: 0.0188, 4: 0.0604, 5: 0.124, 6: 0.2, 7: 0.279}\n    n, n = T.shape\n    T0 = T\n    T0_diag = np.diag(T0)\n    if np.array_equal(T0, np.diag(T0_diag)):\n        U = np.diag(T0_diag ** t)\n    else:\n        R, s, m = _inverse_squaring_helper(T0, m_to_theta)\n        U = _fractional_power_pade(-R, t, m)\n        eivals = np.diag(T0)\n        has_principal_branch = all((x.real > 0 or x.imag != 0 for x in eivals))\n        for i in range(s, -1, -1):\n            if i < s:\n                U = U.dot(U)\n            elif has_principal_branch:\n                p = t * np.exp2(-i)\n                U[np.diag_indices(n)] = T0_diag ** p\n                for j in range(n - 1):\n                    l1 = T0[j, j]\n                    l2 = T0[j + 1, j + 1]\n                    t12 = T0[j, j + 1]\n                    f12 = _fractional_power_superdiag_entry(l1, l2, t12, p)\n                    U[j, j + 1] = f12\n    if not np.array_equal(U, np.triu(U)):\n        raise Exception('U is not upper triangular')\n    return U",
    "docstring": "Compute a fractional power of an upper triangular matrix. The fractional power is restricted to fractions -1 < t < 1. This uses algorithm (3.1) of [1]_. The Pade approximation itself uses algorithm (4.1) of [2]_. Parameters ---------- T : (N, N) array_like Upper triangular matrix whose fractional power to evaluate. t : float Fractional power between -1 and 1 exclusive. Returns ------- X : (N, N) array_like The fractional power of the matrix. References ---------- .. [1] Nicholas J. Higham and Lijing Lin (2013) \"An Improved Schur-Pade Algorithm for Fractional Powers of a Matrix and their Frechet Derivatives.\" .. [2] Nicholas J. Higham and Lijing lin (2011) \"A Schur-Pade Algorithm for Fractional Powers of a Matrix.\" SIAM Journal on Matrix Analysis and Applications, 32 (3). pp. 1056-1078. ISSN 0895-4798",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_matfuncs_inv_ssq.py",
    "ast_data": "FunctionDef name:_remainder_matrix_power_triu arg:T arg:t arguments arg arg Assign Assign Assign Assign Call If Call Call Assign Call Assign Call Assign Call Assign Call Assign Call BoolOp Compare Compare For Call If Compare Assign Call If Assign Call Assign Call For Call Assign Assign Assign Assign Call Assign If Call Call Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "minimum",
    "source_code": "@property\ndef minimum(self):\n    return self._minimum",
    "docstring": "Returns a NumPy array specifying the minimum bounds (inclusive).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor.py",
    "ast_data": "FunctionDef name:minimum arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_bbox",
    "source_code": "def get_bbox(self):\n    bbox = Bbox([[0, 0], [0, 0]])\n    bbox.update_from_data_xy(self.get_xydata())\n    return bbox",
    "docstring": "Get the bounding box of this line.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:get_bbox arg:self arguments arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_check_trainable_weights_consistency",
    "source_code": "def _check_trainable_weights_consistency(self):\n    if not hasattr(self, '_collected_trainable_weights'):\n        return\n    if len(self.trainable_weights) != len(self._collected_trainable_weights):\n        logging.log_first_n(logging.WARN, 'Discrepancy between trainable weights and collected trainable weights, did you set `model.trainable` without calling `model.compile` after ?', 1)",
    "docstring": "Check trainable weights count consistency. This will raise a warning if and are inconsistent (i.e. have different number of parameters). Inconsistency will typically arise when one modifies without calling again.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py",
    "ast_data": "FunctionDef name:_check_trainable_weights_consistency arg:self arguments arg If Call Return return:no If Compare Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_setter",
    "source_code": "@tf_export('__internal__.saved_model.load.get_setter', v1=[])\ndef get_setter(proto):\n    _, type_registrations = _REVIVED_TYPE_REGISTRY.get(proto.identifier, (None, None))\n    if type_registrations is not None:\n        for type_registration in type_registrations:\n            if type_registration.should_load(proto):\n                return type_registration.setter\n    return None",
    "docstring": "Gets the registered setter function for the SavedUserObject proto. See VersionedTypeRegistration for info about the setter function. Args: proto: SavedUserObject proto Returns: setter function",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\revived_types.py",
    "ast_data": "FunctionDef name:get_setter arg:proto arguments arg Assign Call If Compare For If Call Return return:yes Return return:no Call"
  },
  {
    "library": "pytorch",
    "name": "synthetic_graph_input",
    "source_code": "def synthetic_graph_input(self, fn, args):\n    example_value = fn(*args)\n    varname = self.new_var()\n    cg = PyCodegen(self.root_tx)\n    cg.add_push_null(lambda: cg.load_import_from(fn.__module__, fn.__name__))\n    cg.foreach(map(variables.ConstantVariable.create, args))\n    cg.call_function(len(args), False)\n    cg.store(varname)\n    self.pregraph_bytecode.extend(cg.get_instructions())\n    source = SyntheticLocalSource(varname)\n    result = VariableTracker.build(self.root_tx, example_value, source)\n    result = result.realize()\n    TracingContext.get().guards_context.dynamo_guards.remove_guards_with_source(source)\n    return result",
    "docstring": "call fn(*args) before the graph runs and turn the result into a fake input.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\output_graph.py",
    "ast_data": "FunctionDef name:synthetic_graph_input arg:self arg:fn arg:args arguments arg arg arg Assign Call Assign Call Assign Call Call arguments Call Call Call Call Call Call Call Call Assign Call Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_get_wrap_line_width",
    "source_code": "def _get_wrap_line_width(self):\n    x0, y0 = self.get_transform().transform(self.get_position())\n    figure_box = self.get_figure().get_window_extent()\n    alignment = self.get_horizontalalignment()\n    self.set_rotation_mode('anchor')\n    rotation = self.get_rotation()\n    left = self._get_dist_to_box(rotation, x0, y0, figure_box)\n    right = self._get_dist_to_box((180 + rotation) % 360, x0, y0, figure_box)\n    if alignment == 'left':\n        line_width = left\n    elif alignment == 'right':\n        line_width = right\n    else:\n        line_width = 2 * min(left, right)\n    return line_width",
    "docstring": "Return the maximum line width for wrapping text based on the current orientation.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:_get_wrap_line_width arg:self arguments arg Assign Call Call Call Assign Call Call Assign Call Call Assign Call Assign Call Assign Call If Compare Assign If Compare Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "recursive_create_dir_v2",
    "source_code": "@tf_export('io.gfile.makedirs')\ndef recursive_create_dir_v2(path):\n    _pywrap_file_io.RecursivelyCreateDir(compat.path_to_bytes(path))",
    "docstring": "Creates a directory and all parent/intermediate directories. It succeeds if path already exists and is writable. Args: path: string, name of the directory to be created Raises: errors.OpError: If the operation fails.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py",
    "ast_data": "FunctionDef name:recursive_create_dir_v2 arg:path arguments arg Call Call Call"
  },
  {
    "library": "django",
    "name": "clone",
    "source_code": "def clone(self):\n    return OGRGeometry(capi.clone_geom(self.ptr), self.srs)",
    "docstring": "Clone this OGR Geometry.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:clone arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_adjust_scalar_from_fx_to_onnx",
    "source_code": "def _adjust_scalar_from_fx_to_onnx(dynamo_value: Union[torch.Tensor, int, float, bool], value_info: 'onnx.ValueInfoProto') -> torch.Tensor:\n    if isinstance(dynamo_value, torch.Tensor) and len(value_info.type.tensor_type.shape.dim) == 0 and (dynamo_value.shape == (1,)):\n        return torch.squeeze(dynamo_value)\n    elif isinstance(dynamo_value, int):\n        return torch.tensor(dynamo_value, dtype=torch.int64)\n    elif isinstance(dynamo_value, float):\n        return torch.tensor(dynamo_value, dtype=torch.float32)\n    elif isinstance(dynamo_value, bool):\n        return torch.tensor(dynamo_value, dtype=torch.bool)\n    else:\n        assert isinstance(dynamo_value, torch.Tensor)\n        return dynamo_value.contiguous()",
    "docstring": "Helper function to wrap PyTorch variables as torch.Tensor",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\onnxruntime.py",
    "ast_data": "FunctionDef name:_adjust_scalar_from_fx_to_onnx arg:dynamo_value arg:value_info arguments arg arg If BoolOp Call Compare Call Compare Return return:yes Call If Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "RankWarning",
    "source_code": "class RankWarning(RuntimeWarning):\n    pass",
    "docstring": "Matrix rank warning. Issued by polynomial functions when the design matrix is rank deficient.",
    "type": "class",
    "file_path": "numpy\\numpy\\exceptions.py",
    "ast_data": "ClassDef name:RankWarning"
  },
  {
    "library": "pytorch",
    "name": "OpsWrapper",
    "source_code": "class OpsWrapper(DefaultHandler):\n\n    def _default(self, name: str, args: tuple[Any, ...], kwargs: dict[str, Any]) -> Any:\n        new_args = [OpsWrapper._unwrap(a) for a in args]\n        new_kwargs = {k: OpsWrapper._unwrap(v) for k, v in kwargs.items()}\n        return OpsWrapper._wrap(getattr(_ops, name)(*new_args, **new_kwargs))\n\n    @staticmethod\n    def _unwrap(x):\n        if isinstance(x, (list, tuple)):\n            return tuple((OpsWrapper._unwrap(v) for v in x))\n        if isinstance(x, OpsValue):\n            return x.value\n        return x\n\n    @staticmethod\n    def _wrap(x):\n        if isinstance(x, (list, tuple)):\n            return tuple((OpsValue(v) for v in x))\n        return OpsValue(x)\n\n    @staticmethod\n    def indirect_indexing(index, size, check=True, wrap_neg=True):\n        index = OpsWrapper._unwrap(index)\n        return _ops.indirect_indexing(index, size, check, wrap_neg)",
    "docstring": "This wraps any returned IR values into an instance, so that we can overload the magic methods for writing mathematical expressions fluently.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\virtualized.py",
    "ast_data": "ClassDef name:OpsWrapper FunctionDef name:_default arg:self arg:name arg:args arg:kwargs arguments arg arg arg arg Assign Call Assign Call Call Return return:yes Call Call Call FunctionDef name:_unwrap arg:x arguments arg If Call Return return:yes Call Call If Call Return return:yes Return return:yes FunctionDef name:_wrap arg:x arguments arg If Call Return return:yes Call Call Return return:yes Call FunctionDef name:indirect_indexing arg:index arg:size arg:check arg:wrap_neg arguments arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_set_view_from_bbox",
    "source_code": "def _set_view_from_bbox(self, bbox, direction='in', mode=None, twinx=False, twiny=False):\n    new_xbound, new_ybound = self._prepare_view_from_bbox(bbox, direction=direction, mode=mode, twinx=twinx, twiny=twiny)\n    if not twinx and mode != 'y':\n        self.set_xbound(new_xbound)\n        self.set_autoscalex_on(False)\n    if not twiny and mode != 'x':\n        self.set_ybound(new_ybound)\n        self.set_autoscaley_on(False)",
    "docstring": "Update view from a selection bbox. .. note:: Intended to be overridden by new projection types, but if not, the default implementation sets the view limits to the bbox directly. Parameters ---------- bbox : 4-tuple or 3 tuple * If bbox is a 4 tuple, it is the selected bounding box limits, in *display* coordinates. * If bbox is a 3 tuple, it is an (xp, yp, scl) triple, where (xp, yp) is the center of zooming and scl the scale factor to zoom by. direction : str The direction to apply the bounding box. * - The bounding box describes the view directly, i.e., it zooms in. * - The bounding box describes the size to make the existing view, i.e., it zooms out. mode : str or None The selection mode, whether to apply the bounding box in only the direction, direction or both (). twinx : bool Whether this axis is twinned in the *x*-direction. twiny : bool Whether this axis is twinned in the *y*-direction.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:_set_view_from_bbox arg:self arg:bbox arg:direction arg:mode arg:twinx arg:twiny arguments arg arg arg arg arg arg Assign Call If BoolOp Compare Call Call If BoolOp Compare Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_foreground",
    "source_code": "def set_foreground(self, fg, isRGBA=False):\n    if self._forced_alpha and isRGBA:\n        self._rgb = fg[:3] + (self._alpha,)\n    elif self._forced_alpha:\n        self._rgb = colors.to_rgba(fg, self._alpha)\n    elif isRGBA:\n        self._rgb = fg\n    else:\n        self._rgb = colors.to_rgba(fg)",
    "docstring": "Set the foreground color. Parameters ---------- fg : :mpltype: isRGBA : bool If *fg* is known to be an `` tuple, *isRGBA* can be set to True to improve performance.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:set_foreground arg:self arg:fg arg:isRGBA arguments arg arg arg If BoolOp Assign If Assign Call If Assign Assign Call"
  },
  {
    "library": "pytorch",
    "name": "wheel_unpack",
    "source_code": "@timed('Unpacking wheel file')\ndef wheel_unpack(self, wheel: Path | str, dest: Path | str, **popen_kwargs: Any) -> subprocess.CompletedProcess[str]:\n    wheel = Path(wheel).absolute()\n    dest = Path(dest).absolute()\n    assert wheel.is_file() and wheel.suffix.lower() == '.whl'\n    return self.wheel('unpack', '--dest', str(dest), str(wheel), **popen_kwargs)",
    "docstring": "Unpack a wheel into a directory.",
    "type": "method",
    "file_path": "pytorch\\tools\\nightly.py",
    "ast_data": "FunctionDef name:wheel_unpack arg:self arg:wheel arg:dest arguments arg arg arg arg Assign Call Call Assign Call Call BoolOp Call Compare Call Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "variables",
    "source_code": "@property\ndef variables(self) -> Dict[tpu_embedding_v2_utils.TableConfig, Dict[str, tf_variables.Variable]]:\n    self._maybe_build()\n    return self._variables",
    "docstring": "Returns a dict of variables, keyed by , then by slot name.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3.py",
    "ast_data": "FunctionDef name:variables arg:self arguments arg Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "newton_point",
    "source_code": "def newton_point(self):\n    if self._newton_point is None:\n        g = self.jac\n        B = self.hess\n        cho_info = scipy.linalg.cho_factor(B)\n        self._newton_point = -scipy.linalg.cho_solve(cho_info, g)\n    return self._newton_point",
    "docstring": "The Newton point is a global minimum of the approximate function.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_trustregion_dogleg.py",
    "ast_data": "FunctionDef name:newton_point arg:self arguments arg If Compare Assign Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, legend, use_blit=False, update='loc'):\n    self.legend = legend\n    _api.check_in_list(['loc', 'bbox'], update=update)\n    self._update = update\n    super().__init__(legend, legend._legend_box, use_blit=use_blit)",
    "docstring": "Wrapper around a to support mouse dragging. Parameters ---------- legend : The instance to wrap. use_blit : bool, optional Use blitting for faster image composition. For details see :ref:. update : {'loc', 'bbox'}, optional If \"loc\", update the *loc* parameter of the legend upon finalizing. If \"bbox\", update the *bbox_to_anchor* parameter.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\legend.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:legend arg:use_blit arg:update arguments arg arg arg arg Assign Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "generate_partition_key",
    "source_code": "def generate_partition_key(repo: str, doc: dict[str, Any]) -> str:\n    workflow_id = doc['workflow_id']\n    job_id = doc['job_id']\n    test_name = doc['test_name']\n    filename = doc['filename']\n    hash_content = hashlib.md5(json.dumps(doc).encode('utf-8'), usedforsecurity=False).hexdigest()\n    return f'{repo}/{workflow_id}/{job_id}/{test_name}/{filename}/{hash_content}'",
    "docstring": "Generate an unique partition key for the document on DynamoDB",
    "type": "function",
    "file_path": "pytorch\\tools\\stats\\upload_dynamo_perf_stats.py",
    "ast_data": "FunctionDef name:generate_partition_key arg:repo arg:doc arguments arg arg Assign Assign Assign Assign Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "fixed_config",
    "source_code": "def fixed_config(config, filename, triton_meta, inductor_meta):\n    config = {**config}\n    return cached_autotune(None, [triton.Config(config, **_pop_config_kwargs(config))], triton_meta=triton_meta, inductor_meta=inductor_meta, heuristic_type=HeuristicType.FIXED, filename=filename)",
    "docstring": "Used when the configuration is already decided at compile time",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\triton_heuristics.py",
    "ast_data": "FunctionDef name:fixed_config arg:config arg:filename arg:triton_meta arg:inductor_meta arguments arg arg arg arg Assign Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_dispatch",
    "source_code": "def _dispatch(func):\n    return generate_multimethod(func, _x_replacer, domain='numpy.scipy.fft')",
    "docstring": "Function annotation that creates a uarray multimethod from the function",
    "type": "function",
    "file_path": "scipy\\scipy\\fft\\_basic.py",
    "ast_data": "FunctionDef name:_dispatch arg:func arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_verify_and_get_subgroup_size",
    "source_code": "def _verify_and_get_subgroup_size(self, group_assignment, num_shards):\n    if not group_assignment:\n        return None\n    if not (isinstance(group_assignment, list) and all((isinstance(i, list) for i in group_assignment))):\n        raise ValueError(f'Argument `group_assignment` must be a list of lists. Received: {group_assignment}')\n    replica_ids = set()\n    for g in group_assignment:\n        for i in g:\n            replica_ids.add(i)\n    if set(range(num_shards)) != replica_ids:\n        raise ValueError(f'Argument `group_assignment` must be a permutation of range({num_shards}). Received: {group_assignment}')\n    subgroup_size_list = [len(group) for group in group_assignment]\n    if all((subgroup_size_list[0] == size for size in subgroup_size_list)):\n        return subgroup_size_list[0]\n    else:\n        raise ValueError(f'The size of each subgroup in `group_assignment` must be equal. Received: {group_assignment}')",
    "docstring": "Verify group_assignment and get the subgroup size\". Args: group_assignment: list of group ids for applying the optimizer to subgroups. num_shards: The number of TPU shards. Returns: The size of one subgroup in group_assignment. Raises: ValueError: If group_assignment is invalid.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_optimizer.py",
    "ast_data": "FunctionDef name:_verify_and_get_subgroup_size arg:self arg:group_assignment arg:num_shards arguments arg arg arg If Return return:no If BoolOp Call Call Call Raise Call Assign Call For For Call If Compare Call Call Raise Call Assign Call If Call Compare Return return:yes Raise Call"
  },
  {
    "library": "django",
    "name": "form_valid",
    "source_code": "def form_valid(self, form):\n    auth_login(self.request, form.get_user())\n    return HttpResponseRedirect(self.get_success_url())",
    "docstring": "Security check complete. Log the user in.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\views.py",
    "ast_data": "FunctionDef name:form_valid arg:self arg:form arguments arg arg Call Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "construct_array_type",
    "source_code": "@classmethod\ndef construct_array_type(cls) -> type_t[BooleanArray]:\n    return BooleanArray",
    "docstring": "Return the array type associated with this dtype. Returns ------- type",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\boolean.py",
    "ast_data": "FunctionDef name:construct_array_type arg:cls arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "type",
    "source_code": "@staticmethod\ndef type() -> str:\n    raise RuntimeError('CacheArtifact is an abstract class, please use a subclass')",
    "docstring": "Returns the type of the artifact. Must be unique across all CacheArtifact classes. CacheArtifactFactory.register will add property method to CacheInfo based on this (def {type}_artifacts) that returns all artifacts for specific cache.",
    "type": "method",
    "file_path": "pytorch\\torch\\compiler\\_cache.py",
    "ast_data": "FunctionDef name:type arguments Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "name",
    "source_code": "@property\ndef name(self):\n    return self._queue.name",
    "docstring": "The string name of the underlying Queue.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\queue_runner_impl.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes"
  },
  {
    "library": "authlib",
    "name": "generate_authorization_code",
    "source_code": "def generate_authorization_code(self):\n    return generate_token(self.AUTHORIZATION_CODE_LENGTH)",
    "docstring": "\"The method to generate \"code\" value for authorization code data. Developers may rewrite this method, or customize the code length with:: class MyAuthorizationCodeGrant(AuthorizationCodeGrant): AUTHORIZATION_CODE_LENGTH = 32 # default is 48",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\core\\grants\\hybrid.py",
    "ast_data": "FunctionDef name:generate_authorization_code arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "__call__",
    "source_code": "def __call__(self, x, *args):\n    self._compute_if_needed(x, *args)\n    return self._value",
    "docstring": "returns the function value",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_optimize.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:x arguments arg arg arg Call Return return:yes"
  },
  {
    "library": "django",
    "name": "as_sql",
    "source_code": "def as_sql(self):\n    sql, params = ([], [])\n    for annotation in self.query.annotation_select.values():\n        ann_sql, ann_params = self.compile(annotation)\n        ann_sql, ann_params = annotation.select_format(self, ann_sql, ann_params)\n        sql.append(ann_sql)\n        params.extend(ann_params)\n    self.col_count = len(self.query.annotation_select)\n    sql = ', '.join(sql)\n    params = tuple(params)\n    inner_query_sql, inner_query_params = self.query.inner_query.get_compiler(self.using, elide_empty=self.elide_empty).as_sql(with_col_aliases=True)\n    sql = 'SELECT %s FROM (%s) subquery' % (sql, inner_query_sql)\n    params += inner_query_params\n    return (sql, params)",
    "docstring": "Create the SQL for this query. Return the SQL string and list of parameters.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\compiler.py",
    "ast_data": "FunctionDef name:as_sql arg:self arguments arg Assign For Call Assign Call Assign Call Call Call Assign Call Assign Call Assign Call Assign Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "post_compile",
    "source_code": "def post_compile(wrappers: list[CompilerWrapper], compiled_fn: Callable, aot_config: AOTConfig, *, runtime_metadata: ViewAndMutationMeta) -> tuple[Callable, ViewAndMutationMeta]:\n    for wrapper in reversed(wrappers):\n        compiled_fn = wrapper.post_compile(compiled_fn, aot_config, runtime_metadata=runtime_metadata)\n    return (compiled_fn, runtime_metadata)",
    "docstring": "Runs a sequence of wrappers on the given function. Should be called after pre_compile()",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\runtime_wrappers.py",
    "ast_data": "FunctionDef name:post_compile arg:wrappers arg:compiled_fn arg:aot_config arguments arg arg arg arg For Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "tanhshrink",
    "source_code": "@elementwise_unary_scalar_wrapper\n@elementwise_type_promotion_wrapper(type_promoting_args=('a',), type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT)\ndef tanhshrink(a: TensorLikeType) -> TensorLikeType:\n    if not isinstance(a, TensorLike):\n        raise RuntimeError('Expected a tensor input for an elementwise unary operation!')\n    return a - torch.tanh(a)",
    "docstring": "Reference implementation of torch.nn.functional.tanhshrink",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\nn\\functional\\__init__.py",
    "ast_data": "FunctionDef name:tanhshrink arg:a arguments arg If Call Raise Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "OpenMLError",
    "source_code": "class OpenMLError(ValueError):\n    pass",
    "docstring": "HTTP 412 is a specific OpenML error code, indicating a generic error",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\datasets\\_openml.py",
    "ast_data": "ClassDef name:OpenMLError"
  },
  {
    "library": "tensorflow",
    "name": "back_prop",
    "source_code": "@property\ndef back_prop(self):\n    if self.GetWhileContext():\n        return self.GetWhileContext().back_prop\n    return False",
    "docstring": "Forwards to the enclosing while context, if any.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\xla\\xla.py",
    "ast_data": "FunctionDef name:back_prop arg:self arguments arg If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "normaltest",
    "source_code": "@xp_capabilities(jax_jit=False, allow_dask_compute=True)\n@_axis_nan_policy_factory(NormaltestResult, n_samples=1, too_small=7)\ndef normaltest(a, axis=0, nan_policy='propagate'):\n    xp = array_namespace(a)\n    s, _ = skewtest(a, axis, _no_deco=True)\n    k, _ = kurtosistest(a, axis, _no_deco=True)\n    statistic = s * s + k * k\n    chi2 = _SimpleChi2(xp.asarray(2.0, dtype=statistic.dtype))\n    pvalue = _get_pvalue(statistic, chi2, alternative='greater', symmetric=False, xp=xp)\n    statistic = statistic[()] if statistic.ndim == 0 else statistic\n    pvalue = pvalue[()] if pvalue.ndim == 0 else pvalue\n    return NormaltestResult(statistic, pvalue)",
    "docstring": "Test whether a sample differs from a normal distribution. This function tests the null hypothesis that a sample comes from a normal distribution. It is based on D'Agostino and Pearson's [1]_, [2]_ test that combines skew and kurtosis to produce an omnibus test of normality. Parameters ---------- a : array_like The array containing the sample to be tested. Must contain at least eight observations. axis : int or None, optional Axis along which to compute test. Default is 0. If None, compute over the whole array . nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. The following options are available (default is 'propagate'): * 'propagate': returns nan * 'raise': throws an error * 'omit': performs the calculations ignoring nan values Returns ------- statistic : float or array `skewtestkurtosistesthypothesis_normaltesthypothesis_normaltest`.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_stats_py.py",
    "ast_data": "FunctionDef name:normaltest arg:a arg:axis arg:nan_policy arguments arg arg arg Assign Call Assign Call Assign Call Assign Assign Call Call Assign Call Assign Compare Assign Compare Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "read_header",
    "source_code": "def read_header(ofile):\n    i = next(ofile)\n    while r_comment.match(i):\n        i = next(ofile)\n    relation = None\n    attributes = []\n    while not r_datameta.match(i):\n        m = r_headerline.match(i)\n        if m:\n            isattr = r_attribute.match(i)\n            if isattr:\n                attr, i = tokenize_attribute(ofile, i)\n                attributes.append(attr)\n            else:\n                isrel = r_relation.match(i)\n                if isrel:\n                    relation = isrel.group(1)\n                else:\n                    raise ValueError(f'Error parsing line {i}')\n                i = next(ofile)\n        else:\n            i = next(ofile)\n    return (relation, attributes)",
    "docstring": "Read the header of the iterable ofile.",
    "type": "function",
    "file_path": "scipy\\scipy\\io\\arff\\_arffread.py",
    "ast_data": "FunctionDef name:read_header arg:ofile arguments arg Assign Call While Call Assign Call Assign Assign While Call Assign Call If Assign Call If Assign Call Call Assign Call If Assign Call Raise Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "convolve1d",
    "source_code": "@_ni_docstrings.docfiller\ndef convolve1d(input, weights, axis=-1, output=None, mode='reflect', cval=0.0, origin=0):\n    weights = np.asarray(weights)\n    weights = weights[::-1]\n    origin = -origin\n    if not weights.shape[0] & 1:\n        origin -= 1\n    if weights.dtype.kind == 'c':\n        weights = weights.conj()\n    return correlate1d(input, weights, axis, output, mode, cval, origin)",
    "docstring": "Calculate a 1-D convolution along the given axis. The lines of the array along the given axis are convolved with the given weights. Parameters ---------- %(input)s weights : ndarray 1-D sequence of numbers. %(axis)s %(output)s %(mode_reflect)s %(cval)s %(origin)s Returns ------- convolve1d : ndarray Convolved array with same shape as input Examples -------- >>> from scipy.ndimage import convolve1d >>> convolve1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3]) array([14, 24, 4, 13, 12, 36, 27, 0])",
    "type": "function",
    "file_path": "scipy\\scipy\\ndimage\\_filters.py",
    "ast_data": "FunctionDef name:convolve1d arg:input arg:weights arg:axis arg:output arg:mode arg:cval arg:origin arguments arg arg arg arg arg arg arg Assign Call Assign Assign If If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "check_destinations",
    "source_code": "def check_destinations(destinations):\n    if isinstance(destinations, (resource_variable_ops.BaseResourceVariable, tensor_lib.Tensor)):\n        return bool(destinations.device)\n    return bool(destinations)",
    "docstring": "Checks whether is not empty. Args: destinations: a , variable, or string object. Returns: Boolean which is True if is not empty.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_ops.py",
    "ast_data": "FunctionDef name:check_destinations arg:destinations arguments arg If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "display",
    "source_code": "def display(self):\n    return self._display",
    "docstring": "Display the string to print for masked values.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:display arg:self arguments arg Return return:yes"
  },
  {
    "library": "authlib",
    "name": "JWSHeader",
    "source_code": "class JWSHeader(dict):\n\n    def __init__(self, protected, header):\n        obj = {}\n        if protected:\n            obj.update(protected)\n        if header:\n            obj.update(header)\n        super().__init__(obj)\n        self.protected = protected\n        self.header = header\n\n    @classmethod\n    def from_dict(cls, obj):\n        if isinstance(obj, cls):\n            return obj\n        return cls(obj.get('protected'), obj.get('header'))",
    "docstring": "Header object for JWS. It combine the protected header and unprotected header together. JWSHeader itself is a dict of the combined dict. e.g. >>> protected = {\"alg\": \"HS256\"} >>> header = {\"kid\": \"a\"} >>> jws_header = JWSHeader(protected, header) >>> print(jws_header) {'alg': 'HS256', 'kid': 'a'} >>> jws_header.protected == protected >>> jws_header.header == header :param protected: dict of protected header :param header: dict of unprotected header",
    "type": "class",
    "file_path": "authlib\\authlib\\jose\\rfc7515\\models.py",
    "ast_data": "ClassDef name:JWSHeader FunctionDef name:__init__ arg:self arg:protected arg:header arguments arg arg arg Assign If Call If Call Call Call Assign Assign FunctionDef name:from_dict arg:cls arg:obj arguments arg arg If Call Return return:yes Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_device_properties",
    "source_code": "def get_device_properties(device: Optional[_device_t]=None) -> _XpuDeviceProperties:\n    _lazy_init()\n    device = _get_device_index(device, optional=True)\n    return _get_device_properties(device)",
    "docstring": "Get the properties of a device. Args: device (torch.device or int or str): device for which to return the properties of the device. Returns: _XpuDeviceProperties: the properties of the device",
    "type": "function",
    "file_path": "pytorch\\torch\\xpu\\__init__.py",
    "ast_data": "FunctionDef name:get_device_properties arg:device arguments arg Call Assign Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "CaseInsensitiveDict",
    "source_code": "class CaseInsensitiveDict(jaraco.collections.KeyTransformingDict):\n\n    @staticmethod\n    def transform_key(key):\n        if key is None:\n            return 'None'\n        return key.title()",
    "docstring": "A case-insensitive dict subclass. Each key is changed on entry to title case.",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\lib\\httputil.py",
    "ast_data": "ClassDef name:CaseInsensitiveDict FunctionDef name:transform_key arg:key arguments arg If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, num_steps=None, last_step=None):\n    if num_steps is None and last_step is None:\n        raise ValueError('One of num_steps or last_step must be specified.')\n    if num_steps is not None and last_step is not None:\n        raise ValueError('Only one of num_steps or last_step can be specified.')\n    self._num_steps = num_steps\n    self._last_step = last_step",
    "docstring": "Initializes a . This hook requests stop after either a number of steps have been executed or a last step has been reached. Only one of the two options can be specified. if is specified, it indicates the number of steps to execute after is called. If instead is specified, it indicates the last step we want to execute, as passed to the call. Args: num_steps: Number of steps to execute. last_step: Step after which to stop. Raises: ValueError: If one of the arguments is invalid.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\basic_session_run_hooks.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:num_steps arg:last_step arguments arg arg arg If BoolOp Compare Compare Raise Call If BoolOp Compare Compare Raise Call Assign Assign"
  },
  {
    "library": "kornia",
    "name": "BlobDoGSingle",
    "source_code": "class BlobDoGSingle(Module):\n\n    def __init__(self, sigma1: float=1.0, sigma2: float=1.6) -> None:\n        super().__init__()\n        self.sigma1 = sigma1\n        self.sigma2 = sigma2\n\n    def __repr__(self) -> str:\n        return f'{self.__class__.__name__}, sigma1={self.sigma1}, sigma2={self.sigma2})'\n\n    def forward(self, input: Tensor, sigmas: Optional[Tensor]=None) -> Tensor:\n        return dog_response_single(input, self.sigma1, self.sigma2)",
    "docstring": "Module that calculates Difference-of-Gaussians blobs. .. image:: _static/img/dog_response_single.png See :func: for details.",
    "type": "class",
    "file_path": "kornia\\kornia\\feature\\responses.py",
    "ast_data": "ClassDef name:BlobDoGSingle FunctionDef name:__init__ arg:self arg:sigma1 arg:sigma2 arguments arg arg arg Call Call Assign Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:forward arg:self arg:input arg:sigmas arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "complex_float",
    "source_code": "def complex_float(self):\n    _warn_typed_storage_removal()\n    return self._to(torch.cfloat)",
    "docstring": "Casts this storage to complex float type.",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:complex_float arg:self arguments arg Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, target='', graph=None, config=None):\n    if not config:\n        gpu_options = config_pb2.GPUOptions(allow_growth=True)\n        config = config_pb2.ConfigProto(gpu_options=gpu_options)\n    config.graph_options.place_pruned_graph = True\n    super(InteractiveSession, self).__init__(target, graph, config)\n    with InteractiveSession._count_lock:\n        if InteractiveSession._active_session_count > 0:\n            logging.error('An interactive session is already active. This can cause out-of-memory errors or some other unexpected errors (due to the unpredictable timing of garbage collection) in some cases. You must explicitly call `InteractiveSession.close()` to release resources held by the other session(s). Please use `tf.Session()` if you intend to productionize.')\n        InteractiveSession._active_session_count += 1\n    self._explicitly_closed = False\n    self._default_session = self.as_default()\n    self._default_session.enforce_nesting = False\n    self._default_session.__enter__()\n    self._explicit_graph = graph\n    if self._explicit_graph is not None:\n        self._default_graph = graph.as_default()\n        self._default_graph.enforce_nesting = False\n        self._default_graph.__enter__()",
    "docstring": "Creates a new interactive TensorFlow session. If no argument is specified when constructing the session, the default graph will be launched in the session. If you are using more than one graph (created with ) in the same process, you will have to use different sessions for each graph, but each graph can be used in multiple sessions. In this case, it is often clearer to pass the graph to be launched explicitly to the session constructor. Args: target: (Optional.) The execution engine to connect to. Defaults to using an in-process engine. graph: (Optional.) The to be launched (described above). config: (Optional) proto used to configure the session.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\session.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:target arg:graph arg:config arguments arg arg arg arg If Assign Call Assign Call Assign Call Call With If Compare Call Assign Assign Call Assign Call Assign If Compare Assign Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "flatten_parameters",
    "source_code": "def flatten_parameters(self) -> None:\n    if len(self._flat_weights) != len(self._flat_weights_names):\n        return\n    for w in self._flat_weights:\n        if not isinstance(w, Tensor):\n            return\n    first_fw = self._flat_weights[0]\n    dtype = first_fw.dtype\n    for fw in self._flat_weights:\n        if not isinstance(fw, Tensor) or not fw.dtype == dtype or (not fw.is_cuda) or (not torch.backends.cudnn.is_acceptable(fw)):\n            return\n    unique_data_ptrs = {p.data_ptr() for p in self._flat_weights}\n    if len(unique_data_ptrs) != len(self._flat_weights):\n        return\n    with torch.cuda.device_of(first_fw):\n        import torch.backends.cudnn.rnn as rnn\n        with torch.no_grad():\n            if torch._use_cudnn_rnn_flatten_weight():\n                num_weights = 4 if self.bias else 2\n                if self.proj_size > 0:\n                    num_weights += 1\n                torch._cudnn_rnn_flatten_weight(self._flat_weights, num_weights, self.input_size, rnn.get_cudnn_mode(self.mode), self.hidden_size, self.proj_size, self.num_layers, self.batch_first, bool(self.bidirectional))",
    "docstring": "Reset parameter data pointer so that they can use faster code paths. Right now, this works only if the module is on the GPU and cuDNN is enabled. Otherwise, it's a no-op.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\rnn.py",
    "ast_data": "FunctionDef name:flatten_parameters arg:self arguments arg If Compare Call Call Return return:no For If Call Return return:no Assign Assign For If BoolOp Call Compare Call Return return:no Assign Call If Compare Call Call Return return:no With Call With Call If Call Assign If Compare Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "stride",
    "source_code": "def stride(self, name, index=None):\n    if name is None:\n        val = self.output_node.get_stride()\n    else:\n        assert isinstance(name, str)\n        val = self.named_input_nodes[name].get_stride()\n    if isinstance(index, int):\n        return texpr(self.rename_indexing(val[index]))\n    return ', '.join([texpr(self.rename_indexing(i)) for i in val])",
    "docstring": "Hook called from template code to get the stride of an arg. Will add needed args to pass it in if it is dynamic.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\select_algorithm.py",
    "ast_data": "FunctionDef name:stride arg:self arg:name arg:index arguments arg arg arg If Compare Assign Call Call Assign Call If Call Return return:yes Call Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "can_fuse_vertical",
    "source_code": "def can_fuse_vertical(self, node1: BaseSchedulerNode, node2: BaseSchedulerNode) -> bool:\n    node1_buf_names = node1.get_buffer_names()\n    why = WhyNoFuse(node1, node2)\n    remaining_deps_by_name: dict[str, list[Dep]] = defaultdict(list)\n    for dep in node2.unmet_dependencies:\n        name = self.mutation_renames.get(dep.name, dep.name)\n        if isinstance(dep, WeakDep) and self.fusable_weak_dep(dep, node1, node2):\n            continue\n        remaining_deps_by_name[name].append(dep)\n    for cd in node1.read_writes.writes:\n        if not isinstance(cd, MemoryDep):\n            continue\n        remaining = remaining_deps_by_name.get(self.mutation_renames.get(cd.name, cd.name))\n        if remaining:\n            for rd in remaining:\n                if self.fusable_read_and_write(rd, cd):\n                    remaining.remove(rd)\n    remaining_deps = OrderedSet((dep.name for dep in itertools.chain.from_iterable(remaining_deps_by_name.values())))\n    if remaining_deps & node1_buf_names:\n        why('memory deps did not match')\n        return False\n    node1_op_names = node1.get_operation_names()\n    for name in remaining_deps:\n        op_name = self.name_to_buf[name].defining_op_name()\n        if node1_op_names & self.name_to_fused_node[op_name].ancestors:\n            why('intermediate nodes between node1 & node2')\n            return False\n    return True",
    "docstring": "Check if it is legal to fuse a consumer (node2) into a producer (node1). We can fuse them if all the reads of node2 either match corresponding writes in node1, or are written by nodes that can be scheduled before the fusion of node1 and node2.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:can_fuse_vertical arg:self arg:node1 arg:node2 arguments arg arg arg Assign Call Assign Call Call For Assign Call If BoolOp Call Call Call For If Call Assign Call Call If For If Call Call Assign Call Call Call If Call Return return:yes Assign Call For Assign Call If Call Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "url_converter",
    "source_code": "def url_converter(self, name, hashed_files, template=None):\n    if template is None:\n        template = self.default_template\n\n    def converter(matchobj):\n        matches = matchobj.groupdict()\n        matched = matches['matched']\n        url = matches['url']\n        if re.match('^[a-z]+:', url) or url.startswith('//'):\n            return matched\n        if url.startswith('/') and (not url.startswith(settings.STATIC_URL)):\n            return matched\n        url_path, fragment = urldefrag(url)\n        if not url_path:\n            return matched\n        if url_path.startswith('/'):\n            assert url_path.startswith(settings.STATIC_URL)\n            target_name = url_path.removeprefix(settings.STATIC_URL)\n        else:\n            source_name = name if os.sep == '/' else name.replace(os.sep, '/')\n            target_name = posixpath.join(posixpath.dirname(source_name), url_path)\n        hashed_url = self._url(self._stored_name, unquote(target_name), force=True, hashed_files=hashed_files)\n        transformed_url = '/'.join(url_path.split('/')[:-1] + hashed_url.split('/')[-1:])\n        if fragment:\n            transformed_url += ('?#' if '?#' in url else '#') + fragment\n        matches['url'] = unquote(transformed_url)\n        return template % matches\n    return converter",
    "docstring": "Return the custom URL converter for the given file name.",
    "type": "method",
    "file_path": "django\\django\\contrib\\staticfiles\\storage.py",
    "ast_data": "FunctionDef name:url_converter arg:self arg:name arg:hashed_files arg:template arguments arg arg arg arg If Compare Assign FunctionDef name:converter arg:matchobj arguments arg Assign Call Assign Assign If BoolOp Call Call Return return:yes If BoolOp Call Call Return return:yes Assign Call If Return return:yes If Call Call Assign Call Assign Compare Call Assign Call Call Assign Call Call Assign Call Call Call If Compare Assign Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_mode",
    "source_code": "def _mode(self):\n    return math_ops.cast(self.probs > 0.5, self.dtype)",
    "docstring": "Returns if and otherwise.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bernoulli.py",
    "ast_data": "FunctionDef name:_mode arg:self arguments arg Return return:yes Call Compare"
  },
  {
    "library": "matplotlib",
    "name": "get_annotation_clip",
    "source_code": "def get_annotation_clip(self):\n    return self._annotation_clip",
    "docstring": "Return the annotation's clipping behavior. See for the meaning of return values.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:get_annotation_clip arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "debug_checks",
    "source_code": "def debug_checks(code):\n    dode = transform_code_object(code, lambda x, y: None, safe=True)\n    assert code.co_code == dode.co_code, debug_bytes(code.co_code, dode.co_code)\n    assert code.co_lnotab == dode.co_lnotab, debug_bytes(code.co_lnotab, dode.co_lnotab)",
    "docstring": "Make sure our assembler produces same bytes as we start with",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\bytecode_transformation.py",
    "ast_data": "FunctionDef name:debug_checks arg:code arguments arg Assign Call arguments arg arg Compare Call Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "_empty_dict_pylist_from_row_partitions",
    "source_code": "def _empty_dict_pylist_from_row_partitions(row_partitions, nrows):\n    if not row_partitions:\n        return [{} for _ in range(nrows)]\n    else:\n        values = _empty_dict_pylist_from_row_partitions(row_partitions[1:], row_partitions[0].row_splits()[-1])\n        splits = row_partitions[0].row_splits()\n        return [values[splits[i]:splits[i + 1]] for i in range(len(splits) - 1)]",
    "docstring": "Returns a python list of empty dicts from the given row partitions. Args: row_partitions: The row-partitions describing the ragged shape of the result. nrows: The number of rows in the outermost row-partition. (Or if , then the number of empty dicts to return.) Returns: A nested python list whose leaves (if any) are empty python dicts.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor.py",
    "ast_data": "FunctionDef name:_empty_dict_pylist_from_row_partitions arg:row_partitions arg:nrows arguments arg arg If Return return:yes Call Assign Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "clear_select_fields",
    "source_code": "def clear_select_fields(self):\n    self.select = ()\n    self.values_select = ()\n    self.selected = None",
    "docstring": "Clear the list of fields to select (but not extra_select columns). Some queryset types completely replace any existing list of select columns.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\query.py",
    "ast_data": "FunctionDef name:clear_select_fields arg:self arguments arg Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "_is_default",
    "source_code": "def _is_default(self, name: str) -> bool:\n    config_val = self._config[name]\n    not_set_env_default = config_val.env_value_default is _UNSET_SENTINEL or config_val.env_value_default == config_val.default\n    not_set_env_force = config_val.env_value_force is _UNSET_SENTINEL or config_val.env_value_force == config_val.default\n    unset = config_val.user_override is _UNSET_SENTINEL\n    if isinstance(config_val.default, (list, set, dict)):\n        unset = unset or config_val.user_override == config_val.default\n    return unset and not_set_env_default and not_set_env_force",
    "docstring": "Returns true if the config is at its default value. configs overriden by the env are not considered default.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\_config_module.py",
    "ast_data": "FunctionDef name:_is_default arg:self arg:name arguments arg arg Assign Assign BoolOp Compare Compare Assign BoolOp Compare Compare Assign Compare If Call Assign BoolOp Compare Return return:yes BoolOp"
  },
  {
    "library": "tensorflow",
    "name": "SharedObjectConfig",
    "source_code": "class SharedObjectConfig(dict):\n\n    def __init__(self, base_config, object_id, **kwargs):\n        self.ref_count = 1\n        self.object_id = object_id\n        super(SharedObjectConfig, self).__init__(base_config, **kwargs)\n\n    def increment_ref_count(self):\n        if self.ref_count == 1:\n            self[SHARED_OBJECT_KEY] = self.object_id\n        self.ref_count += 1",
    "docstring": "A configuration container that keeps track of references. will automatically attach a shared object ID to any configs which are referenced more than once, allowing for proper shared object reconstruction at load time. In most cases, it would be more proper to subclass something like or rather than directly. Unfortunately, python's json encoder does not support s. This is important functionality to retain, since we are dealing with serialization. We should be safe to subclass here, since we aren't actually overriding any core methods, only augmenting with a new one for reference counting.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\generic_utils.py",
    "ast_data": "ClassDef name:SharedObjectConfig FunctionDef name:__init__ arg:self arg:base_config arg:object_id arguments arg arg arg arg Assign Assign Call Call FunctionDef name:increment_ref_count arg:self arguments arg If Compare Assign"
  },
  {
    "library": "matplotlib",
    "name": "_get_dash_pattern",
    "source_code": "def _get_dash_pattern(style):\n    if isinstance(style, str):\n        style = ls_mapper.get(style, style)\n    if style in ['solid', 'None']:\n        offset = 0\n        dashes = None\n    elif style in ['dashed', 'dashdot', 'dotted']:\n        offset = 0\n        dashes = tuple(mpl.rcParams[f'lines.{style}_pattern'])\n    elif isinstance(style, tuple):\n        offset, dashes = style\n        if offset is None:\n            raise ValueError(f'Unrecognized linestyle: {style!r}')\n    else:\n        raise ValueError(f'Unrecognized linestyle: {style!r}')\n    if dashes is not None:\n        dsum = sum(dashes)\n        if dsum:\n            offset %= dsum\n    return (offset, dashes)",
    "docstring": "Convert linestyle to dash pattern.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:_get_dash_pattern arg:style arguments arg If Call Assign Call If Compare Assign Assign If Compare Assign Assign Call If Call Assign If Compare Raise Call Raise Call If Compare Assign Call If Return return:yes"
  },
  {
    "library": "django",
    "name": "_should_delete_form",
    "source_code": "def _should_delete_form(self, form):\n    return form.cleaned_data.get(DELETION_FIELD_NAME, False)",
    "docstring": "Return whether or not the form was marked for deletion.",
    "type": "method",
    "file_path": "django\\django\\forms\\formsets.py",
    "ast_data": "FunctionDef name:_should_delete_form arg:self arg:form arguments arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "XFrameOptionsMiddleware",
    "source_code": "class XFrameOptionsMiddleware(MiddlewareMixin):\n\n    def process_response(self, request, response):\n        if response.get('X-Frame-Options') is not None:\n            return response\n        if getattr(response, 'xframe_options_exempt', False):\n            return response\n        response.headers['X-Frame-Options'] = self.get_xframe_options_value(request, response)\n        return response\n\n    def get_xframe_options_value(self, request, response):\n        return getattr(settings, 'X_FRAME_OPTIONS', 'DENY').upper()",
    "docstring": "Set the X-Frame-Options HTTP header in HTTP responses. Do not set the header if it's already set or if the response contains a xframe_options_exempt value set to True. By default, set the X-Frame-Options header to 'DENY', meaning the response cannot be displayed in a frame, regardless of the site attempting to do so. To enable the response to be loaded on a frame within the same site, set X_FRAME_OPTIONS in your project's Django settings to 'SAMEORIGIN'.",
    "type": "class",
    "file_path": "django\\django\\middleware\\clickjacking.py",
    "ast_data": "ClassDef name:XFrameOptionsMiddleware FunctionDef name:process_response arg:self arg:request arg:response arguments arg arg arg If Compare Call Return return:yes If Call Return return:yes Assign Call Return return:yes FunctionDef name:get_xframe_options_value arg:self arg:request arg:response arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "Ratkowsky01",
    "source_code": "class Ratkowsky01(Benchmark):\n\n    def __init__(self, dimensions=4):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([0.0, 1.0, 0.0, 0.1], [1000, 20.0, 3.0, 6.0]))\n        self.global_optimum = [[699.6415127, 5.2771253025, 0.75962938329, 1.2792483859]]\n        self.fglob = 8786.404908\n        self.a = asarray([16.08, 33.83, 65.8, 97.2, 191.55, 326.2, 386.87, 520.53, 590.03, 651.92, 724.93, 699.56, 689.96, 637.56, 717.41])\n        self.b = arange(1, 16.0)\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        vec = x[0] / (1 + exp(x[1] - x[2] * self.b)) ** (1 / x[3])\n        return sum((self.a - vec) ** 2)",
    "docstring": "Ratkowsky objective function. .. [1]",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_R.py",
    "ast_data": "ClassDef name:Ratkowsky01 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Assign Call Assign Call FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "is_compatible_with",
    "source_code": "def is_compatible_with(self, spec_or_value):\n    if not isinstance(spec_or_value, (type(self), self.value_type)):\n        return False\n    compatible = self.shape.is_compatible_with(spec_or_value.shape) and self.dtype == spec_or_value.dtype and (self.trainable == spec_or_value.trainable)\n    if isinstance(spec_or_value, type(self)):\n        return compatible and self.alias_id == spec_or_value.alias_id\n    return compatible",
    "docstring": "Returns True if is compatible with this . is considered to be compatible with this if * is a or , * their shapes are compatible, * their dtypes are the same, * they are both trainable or not trainable. * they share the same alias_id if is a . Example: >>> v = tf.Variable([1., 2., 3.]) >>> spec = VariableSpec([None]) >>> spec.is_compatible_with(v) True >>> v = tf.Variable(1) >>> spec.is_compatible_with(v) False Args: spec_or_value: A VariableSpec or Variable to compare against. Returns: True if is compatible with this .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:is_compatible_with arg:self arg:spec_or_value arguments arg arg If Call Call Return return:yes Assign BoolOp Call Compare Compare If Call Call Return return:yes BoolOp Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "build",
    "source_code": "@trackable.no_automatic_dependency_tracking\n@generic_utils.default\ndef build(self, input_shape):\n    if not hasattr(self.build, '_is_default'):\n        self._build_input_shape = input_shape\n    self.built = True",
    "docstring": "Creates the variables of the layer (optional, for subclass implementers). This is a method that implementers of subclasses of or can override if they need a state-creation step in-between layer instantiation and layer call. This is typically used to create the weights of subclasses. Args: input_shape: Instance of , or list of instances of if the layer expects a list of inputs (one instance per input).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:build arg:self arg:input_shape arguments arg arg If Call Assign Assign"
  },
  {
    "library": "django",
    "name": "func_accepts_var_args",
    "source_code": "def func_accepts_var_args(func):\n    return any((p for p in _get_callable_parameters(func) if p.kind == p.VAR_POSITIONAL))",
    "docstring": "Return True if function 'func' accepts positional arguments *args.",
    "type": "function",
    "file_path": "django\\django\\utils\\inspect.py",
    "ast_data": "FunctionDef name:func_accepts_var_args arg:func arguments arg Return return:yes Call Call Compare"
  },
  {
    "library": "pytorch",
    "name": "get_first_attr",
    "source_code": "def get_first_attr(obj: Any, *attrs: str) -> Any:\n    for attr in attrs:\n        if hasattr(obj, attr):\n            return getattr(obj, attr)\n    raise AssertionError(f'{obj} does not has any of the attributes: {attrs}')",
    "docstring": "Return the first available attribute or throw an exception if none is present.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\runtime_utils.py",
    "ast_data": "FunctionDef name:get_first_attr arg:obj arguments arg arg For If Call Return return:yes Call Raise Call"
  },
  {
    "library": "kornia",
    "name": "io_name_conversion",
    "source_code": "def io_name_conversion(onnx_model: onnx.ModelProto, io_name_mapping: dict[str, str]) -> onnx.ModelProto:\n    for i in range(len(onnx_model.graph.input)):\n        in_name = onnx_model.graph.input[i].name\n        if in_name in io_name_mapping:\n            onnx_model.graph.input[i].name = io_name_mapping[in_name]\n    for i in range(len(onnx_model.graph.output)):\n        out_name = onnx_model.graph.output[i].name\n        if out_name in io_name_mapping:\n            onnx_model.graph.output[i].name = io_name_mapping[out_name]\n    for i in range(len(onnx_model.graph.node)):\n        for j in range(len(onnx_model.graph.node[i].input)):\n            if onnx_model.graph.node[i].input[j] in io_name_mapping:\n                onnx_model.graph.node[i].input[j] = io_name_mapping[in_name]\n    for j in range(len(onnx_model.graph.node[i].output)):\n        if onnx_model.graph.node[i].output[j] in io_name_mapping:\n            onnx_model.graph.node[i].output[j] = io_name_mapping[out_name]\n    return onnx_model",
    "docstring": "Convert the input and output names of an ONNX model to 'input' and 'output'. Args: onnx_model: The ONNX model to convert. io_name_mapping: A dictionary mapping the original input and output names to the new ones.",
    "type": "function",
    "file_path": "kornia\\kornia\\onnx\\utils.py",
    "ast_data": "FunctionDef name:io_name_conversion arg:onnx_model arg:io_name_mapping arguments arg arg For Call Call Assign If Compare Assign For Call Call Assign If Compare Assign For Call Call For Call Call If Compare Assign For Call Call If Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_variables_path",
    "source_code": "def _get_variables_path(export_dir):\n    return os.path.join(compat.as_text(_get_variables_dir(export_dir)), compat.as_text(constants.VARIABLES_FILENAME))",
    "docstring": "Return the variables path, used as the prefix for checkpoint files.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model_experimental.py",
    "ast_data": "FunctionDef name:_get_variables_path arg:export_dir arguments arg Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "set_intersection",
    "source_code": "@tf_export('sets.intersection', v1=['sets.intersection', 'sets.set_intersection'])\n@dispatch.add_dispatch_support\ndef set_intersection(a, b, validate_indices=True):\n    a, b, _ = _convert_to_tensors_or_sparse_tensors(a, b)\n    return _set_operation(a, b, 'intersection', validate_indices)",
    "docstring": "Compute set intersection of elements in last dimension of and . All but the last dimension of and must match. Example: Args: a: or of the same type as . If sparse, indices must be sorted in row-major order. b: or of the same type as . If sparse, indices must be sorted in row-major order. validate_indices: Whether to validate the order and range of sparse indices in and . Returns: A whose shape is the same rank as and , and all but the last dimension the same. Elements along the last dimension contain the intersections.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\sets_impl.py",
    "ast_data": "FunctionDef name:set_intersection arg:a arg:b arg:validate_indices arguments arg arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_to_tensor",
    "source_code": "def _to_tensor(dataset_id) -> tensor.Tensor:\n    if isinstance(dataset_id, tensor.Tensor):\n        return dataset_id\n    if isinstance(dataset_id, str) or isinstance(dataset_id, bytes):\n        return ops.convert_to_tensor(dataset_id, dtype=dtypes.string, name='dataset_id')\n    return ops.convert_to_tensor(dataset_id, dtype=dtypes.int64, name='dataset_id')",
    "docstring": "Converts to Tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\data_service_ops.py",
    "ast_data": "FunctionDef name:_to_tensor arg:dataset_id arguments arg If Call Return return:yes If BoolOp Call Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "stitch_values",
    "source_code": "def stitch_values(values_and_indices_list):\n    length = 0\n    for values_and_indices in values_and_indices_list:\n        length += len(values_and_indices[0])\n    result = [None] * length\n    for values_and_indices in values_and_indices_list:\n        if values_and_indices and values_and_indices[0]:\n            for v, i in zip(*values_and_indices):\n                assert result[i] is None\n                result[i] = v\n    return result",
    "docstring": "Stitch values together according to their indices. Args: values_and_indices_list: a list of tuples of values and indices indicating the values and positions in the returned list. Returns: a stitched list of values.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_utils.py",
    "ast_data": "FunctionDef name:stitch_values arg:values_and_indices_list arguments arg Assign For Call Assign For If BoolOp For Call Compare Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_initialize_workers",
    "source_code": "@prof\ndef _initialize_workers(self, worker_group: WorkerGroup) -> None:\n    role = worker_group.spec.role\n    logger.info(\"[%s] Rendezvous'ing worker group\", role)\n    self._rendezvous(worker_group)\n    logger.info('[%s] Starting worker group', role)\n    worker_ids = self._start_workers(worker_group)\n    for local_rank, w_id in worker_ids.items():\n        worker = worker_group.workers[local_rank]\n        worker.id = w_id\n        record(self._construct_event('START', EventSource.WORKER, worker))\n    worker_group.state = WorkerState.HEALTHY",
    "docstring": "Start a fresh set of workers for the worker_group. Essentially, a rendezvous followed by a `` method",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\agent\\server\\api.py",
    "ast_data": "FunctionDef name:_initialize_workers arg:self arg:worker_group arguments arg arg Assign Call Call Call Assign Call For Call Assign Assign Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "convert_conv2d_weight_memory_format",
    "source_code": "def convert_conv2d_weight_memory_format(module: _M, memory_format: torch.memory_format) -> _M:\n    if isinstance(module, (torch.nn.Conv2d, torch.nn.ConvTranspose2d)):\n        weight_data = module.weight.detach().clone(memory_format=memory_format)\n        module.weight.data = weight_data.resize_(weight_data.size(), memory_format=memory_format)\n    for child in module.children():\n        convert_conv2d_weight_memory_format(child, memory_format)\n    return module",
    "docstring": "Convert `` Example: >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA) >>> # xdoctest: +REQUIRES(env:CUBLAS_WORKSPACE_CONFIG) >>> input = torch.randint(1, 10, (2, 8, 4, 4), dtype=torch.float16, device=\"cuda\") >>> model = nn.Sequential( >>> nn.Conv2d(8, 4, 3)).cuda().half() >>> # This is identical to: >>> # nn.utils.convert_conv2d_weight_memory_format(model, torch.channels_last) >>> model = nn.utils.convert_conv2d_weight_memory_format(model, torch.channels_last) >>> out = model(input)",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\utils\\memory_format.py",
    "ast_data": "FunctionDef name:convert_conv2d_weight_memory_format arg:module arg:memory_format arguments arg arg If Call Assign Call Call Assign Call Call For Call Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "response_namespace",
    "source_code": "def response_namespace(k, v):\n    if k[:8] == 'headers.':\n        cherrypy.serving.response.headers[k.split('.', 1)[1]] = v\n    else:\n        setattr(cherrypy.serving.response, k, v)",
    "docstring": "Attach response attributes declared in config.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\_cprequest.py",
    "ast_data": "FunctionDef name:response_namespace arg:k arg:v arguments arg arg If Compare Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "create_bw_fn",
    "source_code": "def create_bw_fn(fn: Callable, args: tuple[Any]) -> Callable:\n    from torch._functorch.aot_autograd import AOTConfig, create_joint\n    from torch._higher_order_ops.utils import prepare_fw_with_masks_all_requires_grad\n    dummy_aot_config = AOTConfig(fw_compiler=None, bw_compiler=None, partition_fn=None, decompositions={}, num_params_buffers=0, aot_id=0, keep_inference_input_mutations=False)\n    n_primals = len(args)\n    bw_fn = create_joint(prepare_fw_with_masks_all_requires_grad(fn), aot_config=dummy_aot_config)\n\n    def flat_fn(*args_and_grad_outs):\n        primals = args_and_grad_outs[:n_primals]\n        tangents = args_and_grad_outs[n_primals:]\n        grad_args = bw_fn(primals, tangents)[1]\n        assert len(args) == len(grad_args)\n        return [torch.zeros_like(arg) if isinstance(arg, torch.Tensor) and grad is None else grad for grad, arg in zip(grad_args, primals)]\n    return flat_fn",
    "docstring": "For a fn that accepts flat inputs and returns flat outputs: fw_out = fn(*args), this function returns: grad_args = bw_fn(*args_and_grad_output) with the following invariants: 1. args + fw_out has an 1-1 correspondence to args_and_grad_output 2. grad_args has an 1-1 corresponsence to args 3. for tensor arg whose requires_grad is False, its corresponding grad in grad_args will be a zero tensor with the same shape.",
    "type": "function",
    "file_path": "pytorch\\torch\\_higher_order_ops\\cond.py",
    "ast_data": "FunctionDef name:create_bw_fn arg:fn arg:args arguments arg arg Assign Call Assign Call Assign Call Call FunctionDef name:flat_fn arguments arg Assign Assign Assign Call Compare Call Call Return return:yes BoolOp Call Compare Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "GroupManager",
    "source_code": "class GroupManager(models.Manager):\n    use_in_migrations = True\n\n    def get_by_natural_key(self, name):\n        return self.get(name=name)\n\n    async def aget_by_natural_key(self, name):\n        return await self.aget(name=name)",
    "docstring": "The manager for the auth's Group model.",
    "type": "class",
    "file_path": "django\\django\\contrib\\auth\\models.py",
    "ast_data": "ClassDef name:GroupManager Assign FunctionDef name:get_by_natural_key arg:self arg:name arguments arg arg Return return:yes Call AsyncFunctionDef name:aget_by_natural_key arg:self arg:name arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_unwrap_shared_qspec",
    "source_code": "def _unwrap_shared_qspec(qspec: QuantizationSpecBase, edge_or_node_to_qspec: dict[EdgeOrNode, QuantizationSpecBase], shared_with_map: dict[EdgeOrNode, EdgeOrNode]) -> QuantizationSpecBase:\n    if isinstance(qspec, SharedQuantizationSpec):\n        sharing_with = qspec.edge_or_node\n        root = _find_root_edge_or_node(sharing_with, shared_with_map)\n        qspec = edge_or_node_to_qspec[root]\n        return _unwrap_shared_qspec(qspec, edge_or_node_to_qspec, shared_with_map)\n    return qspec",
    "docstring": "Unwraps qspec to get the final root qspec (non SharedQuantizationSpec) if qspec is SharedQuantizationSpec (1). tries to find the root edge or node for the node that the qspec points to (2). recursively find the root qspec based on the qspec for the root node",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\prepare.py",
    "ast_data": "FunctionDef name:_unwrap_shared_qspec arg:qspec arg:edge_or_node_to_qspec arg:shared_with_map arguments arg arg arg If Call Assign Assign Call Assign Return return:yes Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "dtype",
    "source_code": "@property\ndef dtype(self) -> torch.dtype:\n    return self._data.dtype",
    "docstring": "Returns boxes dtype.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\boxes.py",
    "ast_data": "FunctionDef name:dtype arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "TfMethodTarget",
    "source_code": "class TfMethodTarget:\n    __slots__ = ('weakrefself_target__', 'weakrefself_func__')\n\n    def __init__(self, target, original_python_function):\n        self.weakrefself_target__ = target\n        self.weakrefself_func__ = weakref.ref(original_python_function)\n\n    @property\n    def target(self):\n        return self.weakrefself_target__()\n\n    @property\n    def target_class(self):\n        true_self = self.weakrefself_target__()\n        if tf_inspect.isclass(true_self):\n            return true_self\n        else:\n            return true_self.__class__\n\n    def call(self, args, kwargs):\n        wrapped_fn = self.weakrefself_func__()\n        return wrapped_fn(self.weakrefself_target__(), *args, **kwargs)",
    "docstring": "Binding target for methods replaced by function and defun.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\tf_method_target.py",
    "ast_data": "ClassDef name:TfMethodTarget Assign FunctionDef name:__init__ arg:self arg:target arg:original_python_function arguments arg arg arg Assign Assign Call FunctionDef name:target arg:self arguments arg Return return:yes Call FunctionDef name:target_class arg:self arguments arg Assign Call If Call Return return:yes Return return:yes FunctionDef name:call arg:self arg:args arg:kwargs arguments arg arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "reset_state",
    "source_code": "def reset_state(self):\n    if not self._built:\n        return\n    metrics = [self._loss_metric] + nest.flatten(self._per_output_metrics)\n    for metric_obj in metrics:\n        if metric_obj is not None:\n            metric_obj.reset_state()",
    "docstring": "Resets the state of loss metrics.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\compile_utils.py",
    "ast_data": "FunctionDef name:reset_state arg:self arguments arg If Return return:no Assign Call For If Compare Call"
  },
  {
    "library": "django",
    "name": "size",
    "source_code": "def size(self, name):\n    raise NotImplementedError('subclasses of Storage must provide a size() method')",
    "docstring": "Return the total size, in bytes, of the file specified by name.",
    "type": "method",
    "file_path": "django\\django\\core\\files\\storage\\base.py",
    "ast_data": "FunctionDef name:size arg:self arg:name arguments arg arg Raise Call"
  },
  {
    "library": "django",
    "name": "SpatiaLiteAdapter",
    "source_code": "class SpatiaLiteAdapter(WKTAdapter):\n\n    def __conform__(self, protocol):\n        if protocol is Database.PrepareProtocol:\n            return str(self)",
    "docstring": "SQLite adapter for geometry objects.",
    "type": "class",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\spatialite\\adapter.py",
    "ast_data": "ClassDef name:SpatiaLiteAdapter FunctionDef name:__conform__ arg:self arg:protocol arguments arg arg If Compare Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_assert_rank_condition",
    "source_code": "def _assert_rank_condition(x, rank, static_condition, dynamic_condition, data, summarize):\n    assert_type(rank, dtypes.int32)\n    rank_static = tensor_util.constant_value(rank)\n    if rank_static is not None:\n        if rank_static.ndim != 0:\n            raise ValueError('Rank must be a scalar.')\n        x_rank_static = x.get_shape().ndims\n        if x_rank_static is not None:\n            if not static_condition(x_rank_static, rank_static):\n                raise ValueError('Static rank condition failed', x_rank_static, rank_static)\n            return control_flow_ops.no_op(name='static_checks_determined_all_ok')\n    condition = dynamic_condition(array_ops.rank(x), rank)\n    if rank_static is None:\n        this_data = ['Rank must be a scalar. Received rank: ', rank]\n        rank_check = assert_rank(rank, 0, data=this_data)\n        condition = control_flow_ops.with_dependencies([rank_check], condition)\n    return control_flow_assert.Assert(condition, data, summarize=summarize)",
    "docstring": "Assert has a rank that satisfies a given condition. Args: x: Numeric . rank: Scalar . static_condition: A python function that takes and returns if the condition is satisfied, otherwise. dynamic_condition: An that takes [actual_rank, given_rank] and return if the condition is satisfied, otherwise. data: The tensors to print out if the condition is false. Defaults to error message and first few entries of . summarize: Print this many entries of each tensor. Returns: Op raising if fails dynamic_condition. Raises: ValueError: If static checks determine fails static_condition.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\check_ops.py",
    "ast_data": "FunctionDef name:_assert_rank_condition arg:x arg:rank arg:static_condition arg:dynamic_condition arg:data arg:summarize arguments arg arg arg arg arg arg Call Assign Call If Compare If Compare Raise Call Assign Call If Compare If Call Raise Call Return return:yes Call Assign Call Call If Compare Assign Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "value_counts",
    "source_code": "def value_counts(self, normalize: bool=False, sort: bool=True, ascending: bool=False, bins=None, dropna: bool=True) -> Series:\n    return algorithms.value_counts_internal(self, sort=sort, ascending=ascending, normalize=normalize, bins=bins, dropna=dropna)",
    "docstring": "Return a Series containing counts of unique values. The resulting object will be in descending order so that the first element is the most frequently-occurring element. Excludes NA values by default. Parameters ---------- normalize : bool, default False If True then the object returned will contain the relative frequencies of the unique values. sort : bool, default True Sort by frequencies when True. Preserve the order of the data when False. ascending : bool, default False Sort in ascending order. bins : int, optional Rather than count values, group them into half-open bins, a convenience for `normalizeTruedropnaFalse` doesn't have the same categories. >>> df = pd.DataFrame({\"a\": [1], \"b\": [\"2\"], \"c\": [3], \"d\": [3]}) >>> df = df.astype({\"a\": \"category\", \"c\": \"category\", \"d\": \"category\"}) >>> df a b c d 0 1 2 3 3 >>> df.dtypes a category b object c category d category dtype: object >>> df.dtypes.value_counts() category 2 category 1 object 1 Name: count, dtype: int64",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\base.py",
    "ast_data": "FunctionDef name:value_counts arg:self arg:normalize arg:sort arg:ascending arg:bins arg:dropna arguments arg arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "prod",
    "source_code": "def prod(self, axis=None, dtype=None, out=None):\n    return N.ndarray.prod(self, axis, dtype, out, keepdims=True)._collapse(axis)",
    "docstring": "Return the product of the array elements over the given axis. Refer to for full documentation. See Also -------- prod, ndarray.prod Notes ----- Same as , except, where that returns an , this returns a object instead. Examples -------- >>> x = np.matrix(np.arange(12).reshape((3,4))); x matrix([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]]) >>> x.prod() 0 >>> x.prod(0) matrix([[ 0, 45, 120, 231]]) >>> x.prod(1) matrix([[ 0], [ 840], [7920]])",
    "type": "method",
    "file_path": "numpy\\numpy\\matrixlib\\defmatrix.py",
    "ast_data": "FunctionDef name:prod arg:self arg:axis arg:dtype arg:out arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "T",
    "source_code": "@property\ndef T(self) -> DataFrame:\n    return self.transpose()",
    "docstring": "The transpose of the DataFrame. Returns ------- DataFrame The transposed DataFrame. See Also -------- DataFrame.transpose : Transpose index and columns. Examples -------- >>> df = pd.DataFrame({\"col1\": [1, 2], \"col2\": [3, 4]}) >>> df col1 col2 0 1 3 1 2 4 >>> df.T 0 1 col1 1 2 col2 3 4",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:T arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_get_take_nd_function",
    "source_code": "def _get_take_nd_function(ndim: int, arr_dtype: np.dtype, out_dtype: np.dtype, axis: AxisInt=0, mask_info=None):\n    func = None\n    if ndim <= 2:\n        func = _get_take_nd_function_cached(ndim, arr_dtype, out_dtype, axis)\n    if func is None:\n\n        def func(arr, indexer, out, fill_value=np.nan) -> None:\n            indexer = ensure_platform_int(indexer)\n            _take_nd_object(arr, indexer, out, axis=axis, fill_value=fill_value, mask_info=mask_info)\n    return func",
    "docstring": "Get the appropriate \"take\" implementation for the given dimension, axis and dtypes.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\array_algos\\take.py",
    "ast_data": "FunctionDef name:_get_take_nd_function arg:ndim arg:arr_dtype arg:out_dtype arg:axis arg:mask_info arguments arg arg arg arg arg Assign If Compare Assign Call If Compare FunctionDef name:func arg:arr arg:indexer arg:out arg:fill_value arguments arg arg arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_normalize_sequence",
    "source_code": "def _normalize_sequence(input, rank):\n    is_str = isinstance(input, str)\n    if not is_str and np.iterable(input):\n        normalized = list(input)\n        if len(normalized) != rank:\n            err = 'sequence argument must have length equal to input rank'\n            raise RuntimeError(err)\n    else:\n        normalized = [input] * rank\n    return normalized",
    "docstring": "If input is a scalar, create a sequence of length equal to the rank by duplicating the input. If input is a sequence, check if its length is equal to the length of array.",
    "type": "function",
    "file_path": "scipy\\scipy\\ndimage\\_ni_support.py",
    "ast_data": "FunctionDef name:_normalize_sequence arg:input arg:rank arguments arg arg Assign Call If BoolOp Call Assign Call If Compare Call Assign Raise Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "sparse_top_k_categorical_accuracy",
    "source_code": "@dispatch.add_dispatch_support\ndef sparse_top_k_categorical_accuracy(y_true, y_pred, k=5):\n    y_pred_rank = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred).shape.ndims\n    y_true_rank = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_true).shape.ndims\n    if y_true_rank is not None and y_pred_rank is not None:\n        if y_pred_rank > 2:\n            y_pred = array_ops.reshape(y_pred, [-1, y_pred.shape[-1]])\n        if y_true_rank > 1:\n            y_true = array_ops.reshape(y_true, [-1])\n    return math_ops.cast(nn.in_top_k(y_pred, math_ops.cast(y_true, 'int32'), k), backend.floatx())",
    "docstring": "Computes how often integer targets are in the top predictions. Standalone usage: >>> y_true = [2, 1] >>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]] >>> m = tf.keras.metrics.sparse_top_k_categorical_accuracy( ... y_true, y_pred, k=3) >>> assert m.shape == (2,) >>> m.numpy() array([1., 1.], dtype=float32) Args: y_true: tensor of true targets. y_pred: tensor of predicted targets. k: (Optional) Number of top elements to look at for computing accuracy. Defaults to 5. Returns: Sparse top K categorical accuracy value.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py",
    "ast_data": "FunctionDef name:sparse_top_k_categorical_accuracy arg:y_true arg:y_pred arg:k arguments arg arg arg Assign Call Assign Call If BoolOp Compare Compare If Compare Assign Call If Compare Assign Call Return return:yes Call Call Call Call"
  },
  {
    "library": "cherrypy",
    "name": "set_response",
    "source_code": "def set_response(self):\n    response = cherrypy.serving.response\n    response.status = status = self.status\n    if status in (300, 301, 302, 303, 307, 308):\n        response.headers['Content-Type'] = 'text/html;charset=utf-8'\n        response.headers['Location'] = self.urls[0]\n        msg = {300: 'This resource can be found at ', 301: 'This resource has permanently moved to ', 302: 'This resource resides temporarily at ', 303: 'This resource can be found at ', 307: 'This resource has moved temporarily to ', 308: 'This resource has been moved to '}[status]\n        msg += '<a href=%s>%s</a>.'\n        msgs = [msg % (saxutils.quoteattr(u), html.escape(u, quote=False)) for u in self.urls]\n        response.body = ntob('<br />\\n'.join(msgs), 'utf-8')\n        response.headers.pop('Content-Length', None)\n    elif status == 304:\n        for key in ('Allow', 'Content-Encoding', 'Content-Language', 'Content-Length', 'Content-Location', 'Content-MD5', 'Content-Range', 'Content-Type', 'Expires', 'Last-Modified'):\n            if key in response.headers:\n                del response.headers[key]\n        response.body = None\n        response.headers.pop('Content-Length', None)\n    elif status == 305:\n        response.headers['Location'] = ntob(self.urls[0], 'utf-8')\n        response.body = None\n        response.headers.pop('Content-Length', None)\n    else:\n        raise ValueError('The %s status code is unknown.' % status)",
    "docstring": "Modify ``. Modifies status, headers, and body. CherryPy uses this internally, but you can also use it to create an HTTPRedirect object and set its output without *raising* the exception.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cperror.py",
    "ast_data": "FunctionDef name:set_response arg:self arguments arg Assign Assign If Compare Assign Assign Assign Assign Call Call Assign Call Call Call If Compare For If Compare Assign Call If Compare Assign Call Assign Call Raise Call"
  },
  {
    "library": "scipy",
    "name": "_gen_harmonic",
    "source_code": "def _gen_harmonic(n, a):\n    n, a = np.broadcast_arrays(n, a)\n    return xpx.apply_where(a > 1, (n, a), _gen_harmonic_gt1, _gen_harmonic_leq1)",
    "docstring": "Generalized harmonic number",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_discrete_distns.py",
    "ast_data": "FunctionDef name:_gen_harmonic arg:n arg:a arguments arg arg Assign Call Return return:yes Call Compare"
  },
  {
    "library": "matplotlib",
    "name": "set_clip_rectangle",
    "source_code": "def set_clip_rectangle(self, rectangle):\n    self._cliprect = rectangle",
    "docstring": "Set the clip rectangle to a or None.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:set_clip_rectangle arg:self arg:rectangle arguments arg arg Assign"
  },
  {
    "library": "scikit-learn",
    "name": "_get_response_method",
    "source_code": "def _get_response_method(self):\n    if self.response_method == 'auto':\n        response_method = ['predict_proba', 'decision_function']\n    else:\n        response_method = self.response_method\n    return response_method",
    "docstring": "Define the response method.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_classification_threshold.py",
    "ast_data": "FunctionDef name:_get_response_method arg:self arguments arg If Compare Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "shutdown_tpu_system",
    "source_code": "def shutdown_tpu_system(cluster_resolver=None):\n    tpu_strategy_util.shutdown_tpu_system_impl(cluster_resolver, TPUClusterResolver)",
    "docstring": "Shuts down the TPU devices. This will clear all caches, even those that are maintained through sequential calls to tf.tpu.experimental.initialize_tpu_system, such as the compilation cache. Args: cluster_resolver: A tf.distribute.cluster_resolver.TPUClusterResolver, which provides information about the TPU cluster. Raises: RuntimeError: If no TPU devices found for eager execution or if run in a tf.function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\tpu\\tpu_cluster_resolver.py",
    "ast_data": "FunctionDef name:shutdown_tpu_system arg:cluster_resolver arguments arg Call"
  },
  {
    "library": "pandas",
    "name": "validate_groupby_func",
    "source_code": "def validate_groupby_func(name: str, args, kwargs, allowed=None) -> None:\n    if allowed is None:\n        allowed = []\n    kwargs = set(kwargs) - set(allowed)\n    if len(args) + len(kwargs) > 0:\n        raise UnsupportedFunctionCall(f'numpy operations are not valid with groupby. Use .groupby(...).{name}() instead')",
    "docstring": "'args' and 'kwargs' should be empty, except for allowed kwargs because all of their necessary parameters are explicitly listed in the function signature",
    "type": "function",
    "file_path": "pandas\\pandas\\compat\\numpy\\function.py",
    "ast_data": "FunctionDef name:validate_groupby_func arg:name arg:args arg:kwargs arg:allowed arguments arg arg arg arg If Compare Assign Assign Call Call If Compare Call Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "unfusable_node",
    "source_code": "def unfusable_node(self, node: BaseSchedulerNode) -> bool:\n    return isinstance(node, (ExternKernelSchedulerNode, NopKernelSchedulerNode)) and (not node.is_template()) and (not is_output_of_multi_outputs_template(node.node))",
    "docstring": "Is this node unfusable under any conditions.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:unfusable_node arg:self arg:node arguments arg arg Return return:yes BoolOp Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "twiny",
    "source_code": "def twiny(ax: matplotlib.axes.Axes | None=None) -> _AxesBase:\n    if ax is None:\n        ax = gca()\n    ax1 = ax.twiny()\n    return ax1",
    "docstring": "Make and return a second Axes that shares the *y*-axis. The new Axes will overlay *ax* (or the current Axes if *ax* is *None*), and its ticks will be on the top. Examples -------- :doc:",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:twiny arg:ax arguments arg If Compare Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "quantile",
    "source_code": "def quantile(self, *, qs: Index, interpolation: QuantileInterpolation='linear') -> Self:\n    assert self.ndim >= 2\n    assert is_list_like(qs)\n    new_axes = list(self.axes)\n    new_axes[1] = Index(qs, dtype=np.float64)\n    blocks = [blk.quantile(qs=qs, interpolation=interpolation) for blk in self.blocks]\n    return type(self)(blocks, new_axes)",
    "docstring": "Iterate over blocks applying quantile reduction. This routine is intended for reduction type operations and will do inference on the generated blocks. Parameters ---------- interpolation : type of interpolation, default 'linear' qs : list of the quantiles to be computed Returns ------- BlockManager",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:quantile arg:self arguments arg arg arg Compare Call Assign Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "_alter_many_to_many",
    "source_code": "def _alter_many_to_many(self, model, old_field, new_field, strict):\n    if old_field.remote_field.through._meta.db_table != new_field.remote_field.through._meta.db_table:\n        self.alter_db_table(old_field.remote_field.through, old_field.remote_field.through._meta.db_table, new_field.remote_field.through._meta.db_table)\n    self.alter_field(new_field.remote_field.through, old_field.remote_field.through._meta.get_field(old_field.m2m_reverse_field_name()), new_field.remote_field.through._meta.get_field(new_field.m2m_reverse_field_name()))\n    self.alter_field(new_field.remote_field.through, old_field.remote_field.through._meta.get_field(old_field.m2m_field_name()), new_field.remote_field.through._meta.get_field(new_field.m2m_field_name()))",
    "docstring": "Alter M2Ms to repoint their to= endpoints.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\schema.py",
    "ast_data": "FunctionDef name:_alter_many_to_many arg:self arg:model arg:old_field arg:new_field arg:strict arguments arg arg arg arg arg If Compare Call Call Call Call Call Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__rdiv__",
    "source_code": "def __rdiv__(self, other):\n    raise TypeError(\"unsupported operand type(s) for /: '{}' and 'Dimension', please use // instead\".format(type(other).__name__))",
    "docstring": "Use via instead. This function exists only to have a better error message. Instead of: , this function will explicitly call for usage of instead. Args: other: Another . Raises: TypeError.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:__rdiv__ arg:self arg:other arguments arg arg Raise Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "TraceType",
    "source_code": "@tf_export('types.experimental.TraceType', v1=[])\nclass TraceType(metaclass=abc.ABCMeta):\n\n    @abc.abstractmethod\n    def is_subtype_of(self, other: 'TraceType') -> bool:\n        pass\n\n    @abc.abstractmethod\n    def most_specific_common_supertype(self, others: Sequence['TraceType']) -> Optional['TraceType']:\n        pass\n\n    @abc.abstractmethod\n    def placeholder_value(self, placeholder_context) -> Any:\n        pass\n\n    def to_tensors(self, value: Any) -> List[core.Tensor]:\n        del value\n        return []\n\n    def from_tensors(self, tensors: Iterator[core.Tensor]) -> Any:\n        del tensors\n        return self.placeholder_value(PlaceholderContext())\n\n    def flatten(self) -> List['TraceType']:\n        return []\n\n    def cast(self, value, cast_context) -> Any:\n        del cast_context\n        assert value == self.placeholder_value(PlaceholderContext()), f'Can not cast {value!r} to type {self!r}'\n        return value\n\n    @abc.abstractmethod\n    def __hash__(self) -> int:\n        pass\n\n    @abc.abstractmethod\n    def __eq__(self, other) -> bool:\n        pass",
    "docstring": "Represents the type of object(s) for tf.function tracing purposes. is an abstract class that other classes might inherit from to provide information regarding associated class(es) for the purposes of tf.function tracing. The typing logic provided through this mechanism will be used to make decisions regarding usage of cached concrete functions and retracing. For example, if we have the following tf.function and classes: tf.function does not know when to re-use an existing concrete function in regards to the class so naively it retraces for every new instance. However, we, as the designers of the class, know that each subclass has a fixed flavor and we can reuse an existing traced concrete function if it was the same subclass. Avoiding such unnecessary tracing of concrete functions can have significant performance benefits. Now if we try calling it again:",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\types\\trace.py",
    "ast_data": "ClassDef name:TraceType FunctionDef name:is_subtype_of arg:self arg:other arguments arg arg FunctionDef name:most_specific_common_supertype arg:self arg:others arguments arg arg FunctionDef name:placeholder_value arg:self arg:placeholder_context arguments arg arg FunctionDef name:to_tensors arg:self arg:value arguments arg arg Return return:no FunctionDef name:from_tensors arg:self arg:tensors arguments arg arg Return return:yes Call Call FunctionDef name:flatten arg:self arguments arg Return return:no FunctionDef name:cast arg:self arg:value arg:cast_context arguments arg arg arg Compare Call Call Return return:yes FunctionDef name:__hash__ arg:self arguments arg FunctionDef name:__eq__ arg:self arg:other arguments arg arg Call"
  },
  {
    "library": "scipy",
    "name": "InvalidVersion",
    "source_code": "class InvalidVersion(ValueError):\n    pass",
    "docstring": "An invalid version was found, users should refer to PEP 440.",
    "type": "class",
    "file_path": "scipy\\scipy\\_lib\\_pep440.py",
    "ast_data": "ClassDef name:InvalidVersion"
  },
  {
    "library": "numpy",
    "name": "PosixParser",
    "source_code": "class PosixParser:\n\n    @staticmethod\n    def join(argv):\n        return ' '.join((shlex.quote(arg) for arg in argv))\n\n    @staticmethod\n    def split(cmd):\n        return shlex.split(cmd, posix=True)",
    "docstring": "The parsing behavior used by on Posix.",
    "type": "class",
    "file_path": "numpy\\numpy\\distutils\\_shell_utils.py",
    "ast_data": "ClassDef name:PosixParser FunctionDef name:join arg:argv arguments arg Return return:yes Call Call FunctionDef name:split arg:cmd arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_mutate_many",
    "source_code": "def _mutate_many(self, candidates):\n    rng = self.random_number_generator\n    S = len(candidates)\n    if callable(self.strategy):\n        return self._mutate_custom(candidates)\n    trial = np.copy(self.population[candidates])\n    samples = np.array([self._select_samples(c, 5) for c in candidates])\n    if self.strategy in ['currenttobest1exp', 'currenttobest1bin']:\n        bprime = self.mutation_func(candidates, samples)\n    else:\n        bprime = self.mutation_func(samples)\n    fill_point = rng_integers(rng, self.parameter_count, size=S)\n    crossovers = rng.uniform(size=(S, self.parameter_count))\n    crossovers = crossovers < self.cross_over_probability\n    if self.strategy in self._binomial:\n        i = np.arange(S)\n        crossovers[i, fill_point[i]] = True\n        trial = np.where(crossovers, bprime, trial)\n        return trial\n    elif self.strategy in self._exponential:\n        crossovers[..., 0] = True\n        for j in range(S):\n            i = 0\n            init_fill = fill_point[j]\n            while i < self.parameter_count and crossovers[j, i]:\n                trial[j, init_fill] = bprime[j, init_fill]\n                init_fill = (init_fill + 1) % self.parameter_count\n                i += 1\n        return trial",
    "docstring": "Create trial vectors based on a mutation strategy.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_differentialevolution.py",
    "ast_data": "FunctionDef name:_mutate_many arg:self arg:candidates arguments arg arg Assign Assign Call If Call Return return:yes Call Assign Call Assign Call Call If Compare Assign Call Assign Call Assign Call Assign Call Assign Compare If Compare Assign Call Assign Assign Call Return return:yes If Compare Assign For Call Assign Assign While BoolOp Compare Assign Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "__init__",
    "source_code": "def __init__(self, subject='', body='', from_email=None, to=None, bcc=None, connection=None, attachments=None, headers=None, cc=None, reply_to=None):\n    if to:\n        if isinstance(to, str):\n            raise TypeError('\"to\" argument must be a list or tuple')\n        self.to = list(to)\n    else:\n        self.to = []\n    if cc:\n        if isinstance(cc, str):\n            raise TypeError('\"cc\" argument must be a list or tuple')\n        self.cc = list(cc)\n    else:\n        self.cc = []\n    if bcc:\n        if isinstance(bcc, str):\n            raise TypeError('\"bcc\" argument must be a list or tuple')\n        self.bcc = list(bcc)\n    else:\n        self.bcc = []\n    if reply_to:\n        if isinstance(reply_to, str):\n            raise TypeError('\"reply_to\" argument must be a list or tuple')\n        self.reply_to = list(reply_to)\n    else:\n        self.reply_to = []\n    self.from_email = from_email or settings.DEFAULT_FROM_EMAIL\n    self.subject = subject\n    self.body = body or ''\n    self.attachments = []\n    if attachments:\n        for attachment in attachments:\n            if isinstance(attachment, MIMEBase):\n                self.attach(attachment)\n            else:\n                self.attach(*attachment)\n    self.extra_headers = headers or {}\n    self.connection = connection",
    "docstring": "Initialize a single email message (which can be sent to multiple recipients).",
    "type": "method",
    "file_path": "django\\django\\core\\mail\\message.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:subject arg:body arg:from_email arg:to arg:bcc arg:connection arg:attachments arg:headers arg:cc arg:reply_to arguments arg arg arg arg arg arg arg arg arg arg arg If If Call Raise Call Assign Call Assign If If Call Raise Call Assign Call Assign If If Call Raise Call Assign Call Assign If If Call Raise Call Assign Call Assign Assign BoolOp Assign Assign BoolOp Assign If For If Call Call Call Assign BoolOp Assign"
  },
  {
    "library": "sphinx",
    "name": "add_uids",
    "source_code": "def add_uids(doctree: Node, condition: Callable[[Node], bool]) -> Iterator[Node]:\n    for node in doctree.findall(condition):\n        node.uid = uuid4().hex\n        yield node",
    "docstring": "Add a unique id to every node in the which matches the condition and yield the nodes. :param doctree: A :class: instance. :param condition: A callable which returns either `` for a given node.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\versioning.py",
    "ast_data": "FunctionDef name:add_uids arg:doctree arg:condition arguments arg arg For Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "device_count",
    "source_code": "def device_count() -> int:\n    acc = current_accelerator()\n    if acc is None:\n        return 0\n    mod = torch.get_device_module(acc)\n    return mod.device_count()",
    "docstring": "Return the number of current :ref: available. Returns: int: the number of the current :ref: available. If there is no available accelerators, return 0. .. note:: This API delegates to the device-specific version of . On CUDA, this API will NOT posion fork if NVML discovery succeeds. Otherwise, it will. For more details, see :ref:.",
    "type": "function",
    "file_path": "pytorch\\torch\\accelerator\\__init__.py",
    "ast_data": "FunctionDef name:device_count arguments Assign Call If Compare Return return:yes Assign Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "ExtensionMetadata",
    "source_code": "class ExtensionMetadata(typing.TypedDict, total=False):\n    version: str\n    \"The extension version (default: ``'unknown version'``).\"\n    env_version: int\n    'An integer that identifies the version of env data added by the extension.'\n    parallel_read_safe: bool\n    'Indicate whether parallel reading of source files is supported\\n    by the extension.\\n    '\n    parallel_write_safe: bool\n    'Indicate whether parallel writing of output files is supported\\n    by the extension (default: ``True``).\\n    '",
    "docstring": "The metadata returned by an extension's `ext-metadata`.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\util\\typing.py",
    "ast_data": "ClassDef name:ExtensionMetadata"
  },
  {
    "library": "tensorflow",
    "name": "ones",
    "source_code": "@dispatch.dispatch_for_api(array_ops.ones)\ndef ones(shape: dynamic_ragged_shape.DynamicRaggedShape, dtype=dtypes.float32, name=None, layout=None) -> ragged_tensor.RaggedOrDense:\n    if layout is not None and (not layout.is_fully_replicated()):\n        raise ValueError(f'RaggedTensor only allows replicated layout. got {layout}')\n    flat_values = array_ops.ones(shape.inner_shape, dtype=dtype, name=name, layout=layout)\n    return shape._add_row_partitions(flat_values)",
    "docstring": "Returns ones shaped like x.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_array_ops.py",
    "ast_data": "FunctionDef name:ones arg:shape arg:dtype arg:name arg:layout arguments arg arg arg arg If BoolOp Compare Call Raise Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "ensure_graph_is_valid",
    "source_code": "def ensure_graph_is_valid(graph_def: graph_pb2.GraphDef) -> None:\n    node_map = {}\n    for node in graph_def.node:\n        if node.name not in node_map:\n            node_map[node.name] = node\n        else:\n            raise ValueError('Duplicate node names detected for ', node.name)\n    for node in graph_def.node:\n        for input_name in node.input:\n            input_node_name = node_name_from_input(input_name)\n            if input_node_name not in node_map:\n                raise ValueError('Input for ', node.name, ' not found: ', input_name)",
    "docstring": "Makes sure that the graph is internally consistent. Checks basic properties of the graph def and raises an exception if there are input references to missing nodes, duplicated names, or other logic errors. Args: graph_def: Definition of a graph to be checked. Raises: ValueError: If the graph is incorrectly constructed.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\optimize_for_inference_lib.py",
    "ast_data": "FunctionDef name:ensure_graph_is_valid arg:graph_def arguments arg Assign For If Compare Assign Raise Call For For Assign Call If Compare Raise Call"
  },
  {
    "library": "kornia",
    "name": "transform_boxes_",
    "source_code": "def transform_boxes_(self, M: torch.Tensor) -> Boxes3D:\n    return self.transform_boxes(M, inplace=True)",
    "docstring": "Inplace version of :func:.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\boxes.py",
    "ast_data": "FunctionDef name:transform_boxes_ arg:self arg:M arguments arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "ProbabilityGenerator",
    "source_code": "class ProbabilityGenerator(RandomGeneratorBase):\n\n    def __init__(self, p: float=0.5) -> None:\n        super().__init__()\n        self.p = p\n\n    def __repr__(self) -> str:\n        repr = f'p={self.p}'\n        return repr\n\n    def make_samplers(self, device: torch.device, dtype: torch.dtype) -> None:\n        p = torch.tensor(float(self.p), device=device, dtype=dtype)\n        self.sampler = Bernoulli(p)\n\n    def forward(self, batch_shape: Tuple[int, ...], same_on_batch: bool=False) -> Dict[str, Tensor]:\n        batch_size = batch_shape[0]\n        probs_mask: Tensor = _adapted_sampling((batch_size,), self.sampler, same_on_batch).bool()\n        return {'probs': probs_mask}",
    "docstring": "Generate random probabilities for a batch of inputs. Args: p: probability to generate an 1-d binary mask. Default value is 0.5. Returns: A dict of parameters to be passed for transformation. - probs (Tensor): element-wise probabilities with a shape of (B,). Note: The generated random numbers are not reproducible across different devices and dtypes. By default, the parameters will be generated on CPU in float32. This can be changed by calling ``.",
    "type": "class",
    "file_path": "kornia\\kornia\\augmentation\\random_generator\\_2d\\probability.py",
    "ast_data": "ClassDef name:ProbabilityGenerator FunctionDef name:__init__ arg:self arg:p arguments arg arg Call Call Assign FunctionDef name:__repr__ arg:self arguments arg Assign Return return:yes FunctionDef name:make_samplers arg:self arg:device arg:dtype arguments arg arg arg Assign Call Call Assign Call FunctionDef name:forward arg:self arg:batch_shape arg:same_on_batch arguments arg arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "infrequent_categories_",
    "source_code": "@property\ndef infrequent_categories_(self):\n    infrequent_indices = self._infrequent_indices\n    return [None if indices is None else category[indices] for category, indices in zip(self.categories_, infrequent_indices)]",
    "docstring": "Infrequent categories for each feature.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_encoders.py",
    "ast_data": "FunctionDef name:infrequent_categories_ arg:self arguments arg Assign Return return:yes Compare Call"
  },
  {
    "library": "django",
    "name": "clean_savepoints",
    "source_code": "@async_unsafe\ndef clean_savepoints(self):\n    self.savepoint_state = 0",
    "docstring": "Reset the counter used to generate unique savepoint ids in this thread.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\base.py",
    "ast_data": "FunctionDef name:clean_savepoints arg:self arguments arg Assign"
  },
  {
    "library": "django",
    "name": "DjangoJSONEncoder",
    "source_code": "class DjangoJSONEncoder(json.JSONEncoder):\n\n    def default(self, o):\n        if isinstance(o, datetime.datetime):\n            r = o.isoformat()\n            if o.microsecond:\n                r = r[:23] + r[26:]\n            if r.endswith('+00:00'):\n                r = r.removesuffix('+00:00') + 'Z'\n            return r\n        elif isinstance(o, datetime.date):\n            return o.isoformat()\n        elif isinstance(o, datetime.time):\n            if is_aware(o):\n                raise ValueError(\"JSON can't represent timezone-aware times.\")\n            r = o.isoformat()\n            if o.microsecond:\n                r = r[:12]\n            return r\n        elif isinstance(o, datetime.timedelta):\n            return duration_iso_string(o)\n        elif isinstance(o, (decimal.Decimal, uuid.UUID, Promise)):\n            return str(o)\n        else:\n            return super().default(o)",
    "docstring": "JSONEncoder subclass that knows how to encode date/time, decimal types, and UUIDs.",
    "type": "class",
    "file_path": "django\\django\\core\\serializers\\json.py",
    "ast_data": "ClassDef name:DjangoJSONEncoder FunctionDef name:default arg:self arg:o arguments arg arg If Call Assign Call If Assign If Call Assign Call Return return:yes If Call Return return:yes Call If Call If Call Raise Call Assign Call If Assign Return return:yes If Call Return return:yes Call If Call Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "fail_and_restart_analysis",
    "source_code": "def fail_and_restart_analysis(self):\n    self.failed = True\n    if self.reason is not None:\n        restart_reason = self.reason.reason\n    else:\n        restart_reason = 'Unknown fail_and_restart_analysis'\n    raise exc.SpeculationRestartAnalysis(restart_reason=restart_reason)",
    "docstring": "Start tracing of the current frame over again, and don't take this branch.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\symbolic_convert.py",
    "ast_data": "FunctionDef name:fail_and_restart_analysis arg:self arguments arg Assign If Compare Assign Assign Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_get_handle_to_prefetch",
    "source_code": "@no_type_check\ndef _get_handle_to_prefetch(state: _FSDPState, current_handle: FlatParamHandle) -> FlatParamHandle:\n    training_state = _get_training_state(current_handle)\n    valid_training_states = (HandleTrainingState.BACKWARD_PRE, HandleTrainingState.BACKWARD_POST, HandleTrainingState.FORWARD)\n    _p_assert(training_state in valid_training_states, f'Prefetching is only supported in {valid_training_states} but currently in {training_state}')\n    eod = state._exec_order_data\n    target_handle: Optional[FlatParamHandle] = None\n    if training_state == HandleTrainingState.BACKWARD_PRE and state.backward_prefetch == BackwardPrefetch.BACKWARD_PRE or (training_state == HandleTrainingState.BACKWARD_POST and state.backward_prefetch == BackwardPrefetch.BACKWARD_POST):\n        target_handle_candidate = eod.get_handle_to_backward_prefetch(current_handle)\n        if target_handle_candidate and target_handle_candidate._needs_pre_backward_unshard and (not target_handle_candidate._prefetched):\n            target_handle = target_handle_candidate\n        else:\n            target_handle = None\n    elif training_state == HandleTrainingState.FORWARD and state.forward_prefetch:\n        target_handle_candidate = eod.get_handle_to_forward_prefetch(current_handle)\n        if target_handle_candidate and target_handle_candidate._needs_pre_forward_unshard and (not target_handle_candidate._prefetched):\n            target_handle = target_handle_candidate\n        else:\n            target_handle = None\n    return target_handle",
    "docstring": "Returns a :class: of the handles keys to prefetch for the next module(s), where `` represents the current module. \"Prefetching\" refers to running the unshard logic early (without synchronization), and the \"next\" modules depend on the recorded execution order and the current training state.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_runtime_utils.py",
    "ast_data": "FunctionDef name:_get_handle_to_prefetch arg:state arg:current_handle arguments arg arg Assign Call Assign Call Compare Assign If BoolOp BoolOp Compare Compare BoolOp Compare Compare Assign Call If BoolOp Assign Assign If BoolOp Compare Assign Call If BoolOp Assign Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "savepoint_rollback",
    "source_code": "@async_unsafe\ndef savepoint_rollback(self, sid):\n    if not self._savepoint_allowed():\n        return\n    self.validate_thread_sharing()\n    self._savepoint_rollback(sid)\n    self.run_on_commit = [(sids, func, robust) for sids, func, robust in self.run_on_commit if sid not in sids]",
    "docstring": "Roll back to a savepoint. Do nothing if savepoints are not supported.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\base.py",
    "ast_data": "FunctionDef name:savepoint_rollback arg:self arg:sid arguments arg arg If Call Return return:no Call Call Assign Compare"
  },
  {
    "library": "numpy",
    "name": "flatnonzero",
    "source_code": "@array_function_dispatch(_flatnonzero_dispatcher)\ndef flatnonzero(a):\n    return np.nonzero(np.ravel(a))[0]",
    "docstring": "Return indices that are non-zero in the flattened version of a. This is equivalent to `` that are non-zero. See Also -------- nonzero : Return the indices of the non-zero elements of the input array. ravel : Return a 1-D array containing the elements of the input array. Examples -------- >>> import numpy as np >>> x = np.arange(-2, 3) >>> x array([-2, -1, 0, 1, 2]) >>> np.flatnonzero(x) array([0, 1, 3, 4]) Use the indices of the non-zero elements as an index array to extract these elements: >>> x.ravel()[np.flatnonzero(x)] array([-2, -1, 1, 2])",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\numeric.py",
    "ast_data": "FunctionDef name:flatnonzero arg:a arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "OutDtypeOperator",
    "source_code": "class OutDtypeOperator(HigherOrderOperator):\n\n    def __init__(self) -> None:\n        super().__init__('out_dtype')\n\n    def __call__(self, op, output_dtype, *args):\n        if not isinstance(op, torch._ops.OpOverload):\n            raise ValueError(\"out_dtype's first argument must be an OpOverload\")\n        if op._schema.is_mutable:\n            raise ValueError(\"out_dtype's first argument needs to be a functional operator\")\n        if not (len(op._schema.returns) == 1 and isinstance(op._schema.returns[0].type, torch.TensorType)):\n            raise ValueError(f\"out_dtype's can only apply to ops that return a single tensorInstead got {[r.type for r in op._schema.returns]}\")\n        if op not in ALLOWABLE_OPS:\n            raise ValueError(f'out_dtype only allows the following operators: {ALLOWABLE_OPS}.')\n        res = super().__call__(op, output_dtype, *args)\n        return res",
    "docstring": "The out_dtype operator takes an existing ATen functional operator, an argument, and arguments to the original operator, and executes the original operator and returns a Tensor with the precision. This operator does not mandate a compute precision so it allows the representation to not be opinionated about the exact implementation. The general implementation for all operators will be the following: 1. Promote inputs dtypes based on default PyTorch dtype promotion rules, using the dtypes of all input Tensors/Scalars and the arugument. 2. Execute the operator 3. Cast the output to",
    "type": "class",
    "file_path": "pytorch\\torch\\_higher_order_ops\\out_dtype.py",
    "ast_data": "ClassDef name:OutDtypeOperator FunctionDef name:__init__ arg:self arguments arg Call Call FunctionDef name:__call__ arg:self arg:op arg:output_dtype arguments arg arg arg arg If Call Raise Call If Raise Call If BoolOp Compare Call Call Raise Call If Compare Raise Call Assign Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "total_variation",
    "source_code": "def total_variation(img: Tensor, reduction: str='sum') -> Tensor:\n    if not isinstance(img, Tensor):\n        raise TypeError(f'Not a Tensor type. Got: {type(img)}')\n    KORNIA_CHECK_SHAPE(img, ['*', 'H', 'W'])\n    KORNIA_CHECK(reduction in ('mean', 'sum'), f\"Expected reduction to be one of 'mean'/'sum', but got '{reduction}'.\")\n    pixel_dif1 = img[..., 1:, :] - img[..., :-1, :]\n    pixel_dif2 = img[..., :, 1:] - img[..., :, :-1]\n    res1 = pixel_dif1.abs()\n    res2 = pixel_dif2.abs()\n    reduce_axes = (-2, -1)\n    if reduction == 'mean':\n        if img.is_floating_point():\n            res1 = res1.to(img).mean(dim=reduce_axes)\n            res2 = res2.to(img).mean(dim=reduce_axes)\n        else:\n            res1 = res1.float().mean(dim=reduce_axes)\n            res2 = res2.float().mean(dim=reduce_axes)\n    elif reduction == 'sum':\n        res1 = res1.sum(dim=reduce_axes)\n        res2 = res2.sum(dim=reduce_axes)\n    else:\n        raise NotImplementedError('Invalid reduction option.')\n    return res1 + res2",
    "docstring": "Compute Total Variation according to [1]. Args: img: the input image with shape :math:. reduction : Specifies the reduction to apply to the output: `(*,)here reduction='mean'` was added as an optional reduction method. Reference: [1]",
    "type": "function",
    "file_path": "kornia\\kornia\\losses\\total_variation.py",
    "ast_data": "FunctionDef name:total_variation arg:img arg:reduction arguments arg arg If Call Raise Call Call Call Call Compare Assign Assign Assign Call Assign Call Assign If Compare If Call Assign Call Call Assign Call Call Assign Call Call Assign Call Call If Compare Assign Call Assign Call Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "separable_conv2d",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef separable_conv2d(x, depthwise_kernel, pointwise_kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)):\n    if data_format is None:\n        data_format = image_data_format()\n    if data_format not in {'channels_first', 'channels_last'}:\n        raise ValueError('Unknown data_format: ' + str(data_format))\n    if len(strides) != 2:\n        raise ValueError('`strides` must be a tuple of 2 integers.')\n    x, tf_data_format = _preprocess_conv2d_input(x, data_format)\n    padding = _preprocess_padding(padding)\n    if not isinstance(strides, tuple):\n        strides = tuple(strides)\n    if tf_data_format == 'NHWC':\n        strides = (1,) + strides + (1,)\n    else:\n        strides = (1, 1) + strides\n    x = nn.separable_conv2d(x, depthwise_kernel, pointwise_kernel, strides=strides, padding=padding, rate=dilation_rate, data_format=tf_data_format)\n    if data_format == 'channels_first' and tf_data_format == 'NHWC':\n        x = array_ops.transpose(x, (0, 3, 1, 2))\n    return x",
    "docstring": "2D convolution with separable filters. Args: x: input tensor depthwise_kernel: convolution kernel for the depthwise convolution. pointwise_kernel: kernel for the 1x1 convolution. strides: strides tuple (length 2). padding: string, or . data_format: string, or . dilation_rate: tuple of integers, dilation rates for the separable convolution. Returns: Output tensor. Raises: ValueError: if is neither or . ValueError: if is not a tuple of 2 integers.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:separable_conv2d arg:x arg:depthwise_kernel arg:pointwise_kernel arg:strides arg:padding arg:data_format arg:dilation_rate arguments arg arg arg arg arg arg arg If Compare Assign Call If Compare Raise Call Call If Compare Call Raise Call Assign Call Assign Call If Call Assign Call If Compare Assign Assign Assign Call If BoolOp Compare Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_kwargs_to_dict",
    "source_code": "def _kwargs_to_dict(self, node):\n    if node.keywords:\n        return gast.Call(gast.Name('dict', ctx=gast.Load(), annotation=None, type_comment=None), args=(), keywords=node.keywords)\n    else:\n        return parser.parse_expression('None')",
    "docstring": "Ties together all keyword and **kwarg arguments in a single dict.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\converters\\call_trees.py",
    "ast_data": "FunctionDef name:_kwargs_to_dict arg:self arg:node arguments arg arg If Return return:yes Call Call Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "release_thread",
    "source_code": "def release_thread(self):\n    thread_ident = _thread.get_ident()\n    i = self.threads.pop(thread_ident, None)\n    if i is not None:\n        self.bus.publish('stop_thread', i)",
    "docstring": "Release the current thread and run 'stop_thread' listeners.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\plugins.py",
    "ast_data": "FunctionDef name:release_thread arg:self arguments arg Assign Call Assign Call If Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "CommonEndpoints",
    "source_code": "class CommonEndpoints(SerializedAttributes.with_attributes('CommonEndpoints', checkpointable_objects=['variables', 'trainable_variables', 'regularization_losses'], functions=['__call__', 'call_and_return_all_conditional_losses', '_default_save_signature'])):\n    pass",
    "docstring": "Common endpoints shared by all models loadable by Keras. List of all attributes: variables: List of all variables in the model and its sublayers. trainable_variables: List of all trainable variables in the model and its sublayers. regularization_losses: List of all unconditional losses (losses not dependent on the inputs) in the model and its sublayers. __call__: Function that takes inputs and returns the outputs of the model call function. call_and_return_all_conditional_losses: Function that returns a tuple of (call function outputs, list of all losses that depend on the inputs). _default_save_signature: Traced model call function. This is only included if the top level exported object is a Keras model.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\serialized_attributes.py",
    "ast_data": "ClassDef name:CommonEndpoints Call"
  },
  {
    "library": "pytorch",
    "name": "add_to_set",
    "source_code": "def add_to_set(self, event_name: str, key: str, value: Any):\n    if event_name not in self.get_stack():\n        raise RuntimeError(f\"Event {repr(event_name)} not in {self.get_stack()}. Cannot add metadata to events that aren't in progress. Please make sure the event has started and hasn't ended.\")\n    event_data = self.get_event_data()\n    if event_name not in event_data:\n        event_data[event_name] = {}\n    if key not in event_data[event_name]:\n        event_data[event_name][key] = set()\n    event_data[event_name][key].add(value)",
    "docstring": "Add a value to a set within a event_name's metadata if it exists",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\utils.py",
    "ast_data": "FunctionDef name:add_to_set arg:self arg:event_name arg:key arg:value arguments arg arg arg arg If Compare Call Raise Call Call Call Assign Call If Compare Assign If Compare Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "to_proto",
    "source_code": "def to_proto(self, export_scope=None):\n    if export_scope is None or self._variable.name.startswith(export_scope):\n        var_def = variable_pb2.VariableDef()\n        var_def.variable_name = ops.strip_name_scope(self._variable.name, export_scope)\n        if self._initial_value is not None:\n            var_def.initial_value_name = ops.strip_name_scope(self._initial_value.name, export_scope)\n        var_def.trainable = self.trainable\n        var_def.synchronization = self.synchronization.value\n        var_def.aggregation = self.aggregation.value\n        var_def.initializer_name = ops.strip_name_scope(self.initializer.name, export_scope)\n        var_def.snapshot_name = ops.strip_name_scope(self._snapshot.name, export_scope)\n        if self._save_slice_info:\n            var_def.save_slice_info_def.MergeFrom(self._save_slice_info.to_proto(export_scope=export_scope))\n        return var_def\n    else:\n        return None",
    "docstring": "Converts a to a protocol buffer. Args: export_scope: Optional . Name scope to remove. Returns: A protocol buffer, or if the is not in the specified name scope.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py",
    "ast_data": "FunctionDef name:to_proto arg:self arg:export_scope arguments arg arg If BoolOp Compare Call Assign Call Assign Call If Compare Assign Call Assign Assign Assign Assign Call Assign Call If Call Call Return return:yes Return return:no"
  },
  {
    "library": "django",
    "name": "check_geom",
    "source_code": "def check_geom(result, func, cargs):\n    if isinstance(result, int):\n        result = c_void_p(result)\n    if not result:\n        raise GDALException('Invalid geometry pointer returned from \"%s\".' % func.__name__)\n    return result",
    "docstring": "Check a function that returns a geometry.",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\gdal\\prototypes\\errcheck.py",
    "ast_data": "FunctionDef name:check_geom arg:result arg:func arg:cargs arguments arg arg arg If Call Assign Call If Raise Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_all_points_on_plane",
    "source_code": "def _all_points_on_plane(xs, ys, zs, atol=1e-08):\n    xs, ys, zs = (np.asarray(xs), np.asarray(ys), np.asarray(zs))\n    points = np.column_stack([xs, ys, zs])\n    points = points[~np.isnan(points).any(axis=1)]\n    points = np.unique(points, axis=0)\n    if len(points) <= 3:\n        return True\n    vs = (points - points[0])[1:]\n    vs = vs / np.linalg.norm(vs, axis=1)[:, np.newaxis]\n    vs = np.unique(vs, axis=0)\n    if len(vs) <= 2:\n        return True\n    cross_norms = np.linalg.norm(np.cross(vs[0], vs[1:]), axis=1)\n    zero_cross_norms = np.where(np.isclose(cross_norms, 0, atol=atol))[0] + 1\n    vs = np.delete(vs, zero_cross_norms, axis=0)\n    if len(vs) <= 2:\n        return True\n    n = np.cross(vs[0], vs[1])\n    n = n / np.linalg.norm(n)\n    dots = np.dot(n, vs.transpose())\n    return np.allclose(dots, 0, atol=atol)",
    "docstring": "Check if all points are on the same plane. Note that NaN values are ignored. Parameters ---------- xs, ys, zs : array-like The x, y, and z coordinates of the points. atol : float, default: 1e-8 The tolerance for the equality check.",
    "type": "function",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py",
    "ast_data": "FunctionDef name:_all_points_on_plane arg:xs arg:ys arg:zs arg:atol arguments arg arg arg arg Assign Call Call Call Assign Call Assign Call Call Assign Call If Compare Call Return return:yes Assign Assign Call Assign Call If Compare Call Return return:yes Assign Call Call Assign Call Call Assign Call If Compare Call Return return:yes Assign Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "is_masked_tensor",
    "source_code": "def is_masked_tensor(obj: Any, /) -> TypeIs['MaskedTensor']:\n    return isinstance(obj, MaskedTensor)",
    "docstring": "Returns True if the input is a MaskedTensor, else False Args: a: any input Examples: >>> # xdoctest: +SKIP >>> from torch.masked import MaskedTensor >>> data = torch.arange(6).reshape(2,3) >>> mask = torch.tensor([[True, False, False], [True, True, False]]) >>> mt = MaskedTensor(data, mask) >>> is_masked_tensor(mt) True",
    "type": "function",
    "file_path": "pytorch\\torch\\masked\\maskedtensor\\core.py",
    "ast_data": "FunctionDef name:is_masked_tensor arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "write_file",
    "source_code": "def write_file(filename: Optional[str]=None) -> bool:\n    if filename is None:\n        filename = get_filename()\n    return torch._C._cuda_tunableop_write_file(filename)",
    "docstring": "Write results to a CSV file. If :attr: is not given, `` is called.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\tunable.py",
    "ast_data": "FunctionDef name:write_file arg:filename arguments arg If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "inplace_swap_row",
    "source_code": "def inplace_swap_row(X, m, n):\n    if sp.issparse(X) and X.format == 'csc':\n        inplace_swap_row_csc(X, m, n)\n    elif sp.issparse(X) and X.format == 'csr':\n        inplace_swap_row_csr(X, m, n)\n    else:\n        _raise_typeerror(X)",
    "docstring": "Swap two rows of a CSC/CSR matrix in-place. Parameters ---------- X : sparse matrix of shape (n_samples, n_features) Matrix whose two rows are to be swapped. It should be of CSR or CSC format. m : int Index of the row of X to be swapped. n : int Index of the row of X to be swapped. Examples -------- >>> from sklearn.utils import sparsefuncs >>> from scipy import sparse >>> import numpy as np >>> indptr = np.array([0, 2, 3, 3, 3]) >>> indices = np.array([0, 2, 2]) >>> data = np.array([8, 2, 5]) >>> csr = sparse.csr_matrix((data, indices, indptr)) >>> csr.todense() matrix([[8, 0, 2], [0, 0, 5], [0, 0, 0], [0, 0, 0]]) >>> sparsefuncs.inplace_swap_row(csr, 0, 1) >>> csr.todense() matrix([[0, 0, 5], [8, 0, 2], [0, 0, 0], [0, 0, 0]])",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\sparsefuncs.py",
    "ast_data": "FunctionDef name:inplace_swap_row arg:X arg:m arg:n arguments arg arg arg If BoolOp Call Compare Call If BoolOp Call Compare Call Call"
  },
  {
    "library": "tensorflow",
    "name": "add_ordinary_node",
    "source_code": "def add_ordinary_node(self, ast_node):\n    node = self._add_new_node(ast_node)\n    self.leaves = set((node,))\n    return node",
    "docstring": "Grows the graph by adding an ordinary CFG node. Ordinary nodes are followed by the next node, in lexical order, that is, they become the new leaf set. Args: ast_node: ast.AST Returns: Node",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\cfg.py",
    "ast_data": "FunctionDef name:add_ordinary_node arg:self arg:ast_node arguments arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "parse_message",
    "source_code": "def parse_message(message):\n    error_message = []\n    func_tags = []\n    node_tags = []\n    pos = 0\n    for match in re.finditer(_INTERPOLATION_PATTERN, message):\n        parsed_tag = _ParseTag(match.group('type'), match.group('name'))\n        if parsed_tag.type == 'function_node':\n            error_message.append(match.group('sep'))\n            func_tags.append(parsed_tag)\n        else:\n            error_message.append(match.group())\n            node_tags.append(parsed_tag)\n        pos = match.end()\n    error_message.append(message[pos:])\n    return (''.join(error_message), func_tags, node_tags)",
    "docstring": "Extract function tags and node tags from a message. Tags are named tuples representing the string {{type name}}. For example, in \"123{{node Foo}}456{{function_node Bar}}789\", there are two tags: a node tag and a function tag. Args: message: An error message, possibly from an OpError. Returns: A tuple containing the original message with function nodes stripped, function tags, and node tags. For example, if message is \"123{{node Foo}}456{{function_node Bar}}789\" then this function returns (\"123{{node Foo}}456789\", [_ParseTag(\"function_node\", \"Bar\")], [_ParseTag(\"node\", \"Foo\")]).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\error_interpolation.py",
    "ast_data": "FunctionDef name:parse_message arg:message arguments arg Assign Assign Assign Assign For Call Assign Call Call Call If Compare Call Call Call Call Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "start_server",
    "source_code": "def start_server(cluster_resolver, protocol):\n    if not (cluster_resolver.task_type == 'worker' or cluster_resolver.task_type == 'ps'):\n        raise ValueError('Unexpected task_type to start a server: {}'.format(cluster_resolver.task_type))\n    server = server_lib.Server(cluster_resolver.cluster_spec().as_cluster_def(), job_name=cluster_resolver.task_type, task_index=cluster_resolver.task_id, protocol=protocol)\n    logging.info('TensorFlow server started for job %s, task %d.', cluster_resolver.task_type, cluster_resolver.task_id)\n    server.join()",
    "docstring": "Start a server and block the process from exiting.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\utils.py",
    "ast_data": "FunctionDef name:start_server arg:cluster_resolver arg:protocol arguments arg arg If BoolOp Compare Compare Raise Call Call Assign Call Call Call Call Call"
  },
  {
    "library": "django",
    "name": "__call__",
    "source_code": "def __call__(self, text, trim_url_limit=None, nofollow=False, autoescape=False):\n    safe_input = isinstance(text, SafeData)\n    words = self.word_split_re.split(str(text))\n    local_cache = {}\n    urlized_words = []\n    for word in words:\n        if (urlized_word := local_cache.get(word)) is None:\n            urlized_word = self.handle_word(word, safe_input=safe_input, trim_url_limit=trim_url_limit, nofollow=nofollow, autoescape=autoescape)\n            local_cache[word] = urlized_word\n        urlized_words.append(urlized_word)\n    return ''.join(urlized_words)",
    "docstring": "If trim_url_limit is not None, truncate the URLs in the link text longer than this limit to trim_url_limit - 1 characters and append an ellipsis. If nofollow is True, give the links a rel=\"nofollow\" attribute. If autoescape is True, autoescape the link text and URLs.",
    "type": "method",
    "file_path": "django\\django\\utils\\html.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:text arg:trim_url_limit arg:nofollow arg:autoescape arguments arg arg arg arg arg Assign Call Assign Call Call Assign Assign For If Compare Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_pearsonr_bootstrap_ci",
    "source_code": "def _pearsonr_bootstrap_ci(confidence_level, method, x, y, alternative, axis):\n\n    def statistic(x, y, axis):\n        statistic, _ = pearsonr(x, y, axis=axis)\n        return statistic\n    res = bootstrap((x, y), statistic, confidence_level=confidence_level, axis=axis, paired=True, alternative=alternative, **method._asdict())\n    res.confidence_interval = np.clip(res.confidence_interval, -1, 1)\n    return ConfidenceInterval(*res.confidence_interval)",
    "docstring": "Compute the confidence interval for Pearson's R using the bootstrap.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_stats_py.py",
    "ast_data": "FunctionDef name:_pearsonr_bootstrap_ci arg:confidence_level arg:method arg:x arg:y arg:alternative arg:axis arguments arg arg arg arg arg arg FunctionDef name:statistic arg:x arg:y arg:axis arguments arg arg arg Assign Call Return return:yes Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_delete_data",
    "source_code": "def _delete_data(self, name):\n    self.squash_mask(names=[name], leave_parametrized=False)\n    delattr(self._container, name)\n    self.state.pop(name)\n    self.data_groups.pop(name)",
    "docstring": "Detaches some data from the sparsifier. Args: name (str) Name of the data to be removed from the sparsifier Note: Currently private. Kind of used as a helper function when replacing data of the same name",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\data_sparsifier\\base_data_sparsifier.py",
    "ast_data": "FunctionDef name:_delete_data arg:self arg:name arguments arg arg Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "read_full_array",
    "source_code": "def read_full_array(self, hdr):\n    if hdr.is_complex:\n        res = self.read_sub_array(hdr, copy=False)\n        res_j = self.read_sub_array(hdr, copy=False)\n        return res + res_j * 1j\n    return self.read_sub_array(hdr)",
    "docstring": "Full (rather than sparse) matrix getter Read matrix (array) can be real or complex Parameters ---------- hdr : `` is True, otherwise a real numeric array",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\matlab\\_mio4.py",
    "ast_data": "FunctionDef name:read_full_array arg:self arg:hdr arguments arg arg If Assign Call Assign Call Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "Process",
    "source_code": "class Process(object):\n\n    def __init__(self, *args, **kwargs):\n        del args, kwargs\n        raise unittest.SkipTest('TODO(b/150264776): Windows is not supported in MultiProcessRunner.')",
    "docstring": "A process that skips test (until windows is supported).",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_process_lib.py",
    "ast_data": "ClassDef name:Process FunctionDef name:__init__ arg:self arguments arg arg arg Raise Call"
  },
  {
    "library": "scrapy",
    "name": "iter_spider_classes",
    "source_code": "def iter_spider_classes(module: ModuleType) -> Iterable[type[Spider]]:\n    for obj in vars(module).values():\n        if inspect.isclass(obj) and issubclass(obj, Spider) and (obj.__module__ == module.__name__) and getattr(obj, 'name', None):\n            yield obj",
    "docstring": "Return an iterator over all spider classes defined in the given module that can be instantiated (i.e. which have name)",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\spider.py",
    "ast_data": "FunctionDef name:iter_spider_classes arg:module arguments arg For Call Call If BoolOp Call Call Compare Call"
  },
  {
    "library": "pytorch",
    "name": "impl_save_for_backward",
    "source_code": "def impl_save_for_backward(qualname, *, func=None):\n\n    def inner(func):\n        custom_op = _find_custom_op(qualname, also_check_torch_library=True)\n        custom_op.impl_save_for_backward(_stacklevel=3)(func)\n        return func\n    if func is None:\n        return inner\n    return inner(func)",
    "docstring": "Register a function that tells us what to save for backward. Please see :func: for more details.",
    "type": "function",
    "file_path": "pytorch\\torch\\_custom_ops.py",
    "ast_data": "FunctionDef name:impl_save_for_backward arg:qualname arguments arg arg FunctionDef name:inner arg:func arguments arg Assign Call Call Call Return return:yes If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "stage",
    "source_code": "def stage(self, state_dict: STATE_DICT_TYPE) -> STATE_DICT_TYPE:\n    raise NotImplementedError(f'{self.__class__.__name__} must implement stage method')",
    "docstring": "Returns a \"staged\" copy of . The expectation of the staged copy is that it is innoculated from any updates incurred after the stage call is complete.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\staging.py",
    "ast_data": "FunctionDef name:stage arg:self arg:state_dict arguments arg arg Raise Call"
  },
  {
    "library": "pandas",
    "name": "_create_table_builder",
    "source_code": "def _create_table_builder(self) -> _DataFrameTableBuilder:\n    if self.verbose:\n        return _DataFrameTableBuilderVerbose(info=self.info, with_counts=self.show_counts)\n    elif self.verbose is False:\n        return _DataFrameTableBuilderNonVerbose(info=self.info)\n    elif self.exceeds_info_cols:\n        return _DataFrameTableBuilderNonVerbose(info=self.info)\n    else:\n        return _DataFrameTableBuilderVerbose(info=self.info, with_counts=self.show_counts)",
    "docstring": "Create instance of table builder based on verbosity and display settings.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:_create_table_builder arg:self arguments arg If Return return:yes Call If Compare Return return:yes Call If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "GlorotUniform",
    "source_code": "@tf_export(v1=['glorot_uniform_initializer', 'initializers.glorot_uniform'])\n@deprecation.deprecated_endpoints('glorot_uniform_initializer', 'initializers.glorot_uniform')\nclass GlorotUniform(VarianceScaling):\n\n    @deprecated_args(None, 'Call initializer instance with the dtype argument instead of passing it to the constructor', 'dtype')\n    def __init__(self, seed=None, dtype=dtypes.float32):\n        super(GlorotUniform, self).__init__(scale=1.0, mode='fan_avg', distribution='uniform', seed=seed)\n\n    def get_config(self):\n        return {'seed': self.seed, 'dtype': self.dtype.name}",
    "docstring": "The Glorot uniform initializer, also called Xavier uniform initializer. It draws samples from a uniform distribution within [-limit, limit] where is where is the number of input units in the weight tensor and is the number of output units in the weight tensor. Args: seed: A Python integer. Used to create random seeds. See for behavior. dtype: Default data type, used if no argument is provided when calling the initializer. Only floating point types are supported. References: [Glorot et al., 2010]( ([pdf](",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops.py",
    "ast_data": "ClassDef name:GlorotUniform FunctionDef name:__init__ arg:self arg:seed arg:dtype arguments arg arg arg Call Call Call FunctionDef name:get_config arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_list_indexing",
    "source_code": "def _list_indexing(X, key, key_dtype):\n    if np.isscalar(key) or isinstance(key, slice):\n        return X[key]\n    if key_dtype == 'bool':\n        return list(compress(X, key))\n    return [X[idx] for idx in key]",
    "docstring": "Index a Python list.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_indexing.py",
    "ast_data": "FunctionDef name:_list_indexing arg:X arg:key arg:key_dtype arguments arg arg arg If BoolOp Call Call Return return:yes If Compare Return return:yes Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "LocalTimeNode",
    "source_code": "class LocalTimeNode(Node):\n\n    def __init__(self, nodelist, use_tz):\n        self.nodelist = nodelist\n        self.use_tz = use_tz\n\n    def render(self, context):\n        old_setting = context.use_tz\n        context.use_tz = self.use_tz\n        output = self.nodelist.render(context)\n        context.use_tz = old_setting\n        return output",
    "docstring": "Template node class used by ``.",
    "type": "class",
    "file_path": "django\\django\\templatetags\\tz.py",
    "ast_data": "ClassDef name:LocalTimeNode FunctionDef name:__init__ arg:self arg:nodelist arg:use_tz arguments arg arg arg Assign Assign FunctionDef name:render arg:self arg:context arguments arg arg Assign Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_tf_lazy_or",
    "source_code": "def _tf_lazy_or(cond, b):\n    return tf_cond.cond(cond, lambda: cond, b)",
    "docstring": "Lazy-eval equivalent of \"or\" for Tensors.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\logical.py",
    "ast_data": "FunctionDef name:_tf_lazy_or arg:cond arg:b arguments arg arg Return return:yes Call arguments"
  },
  {
    "library": "tensorflow",
    "name": "update_server_def",
    "source_code": "def update_server_def(self, server_def, keep_alive_secs=_KEEP_ALIVE_SECS):\n    if not server_def:\n        raise ValueError('server_def is None.')\n    self._server_def = server_def\n    if self._context_handle:\n        server_def_str = server_def.SerializeToString()\n        pywrap_tfe.TFE_ContextUpdateServerDef(self._context_handle, keep_alive_secs, server_def_str)\n        self._initialize_logical_devices()\n    self._clear_caches()",
    "docstring": "Update a server_def on the context. Args: server_def: A tensorflow::ServerDef proto. Enables execution on remote devices. keep_alive_secs: Num. seconds after which the remote end will hang up. As long as the client is still alive, the server state for the context will be kept alive. If the client is killed (or there is some failure), the server will clean up its context keep_alive_secs after the final RPC it receives. Raises: ValueError: if server_def is None.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:update_server_def arg:self arg:server_def arg:keep_alive_secs arguments arg arg arg If Raise Call Assign If Assign Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_make_handle_data",
    "source_code": "def _make_handle_data(tensor):\n    return _create_handle_data_proto(tensor.shape.as_proto(), tensor.dtype.as_datatype_enum)",
    "docstring": "Create handle data based on tensor shape and dtype.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\sparse\\sparse_csr_matrix_ops.py",
    "ast_data": "FunctionDef name:_make_handle_data arg:tensor arguments arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "_generate_cache_key",
    "source_code": "def _generate_cache_key(request, method, headerlist, key_prefix):\n    ctx = md5(usedforsecurity=False)\n    for header in headerlist:\n        value = request.META.get(header)\n        if value is not None:\n            ctx.update(value.encode())\n    url = md5(request.build_absolute_uri().encode('ascii'), usedforsecurity=False)\n    cache_key = 'views.decorators.cache.cache_page.%s.%s.%s.%s' % (key_prefix, method, url.hexdigest(), ctx.hexdigest())\n    return _i18n_cache_key_suffix(request, cache_key)",
    "docstring": "Return a cache key from the headers given in the header list.",
    "type": "function",
    "file_path": "django\\django\\utils\\cache.py",
    "ast_data": "FunctionDef name:_generate_cache_key arg:request arg:method arg:headerlist arg:key_prefix arguments arg arg arg arg Assign Call For Assign Call If Compare Call Call Assign Call Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "def forward(self, batch_shape: tuple[int, ...], same_on_batch: bool=False) -> dict[str, Tensor]:\n    batch_size, channels, _, _ = batch_shape\n    _common_param_check(batch_size, same_on_batch)\n    _device, _dtype = (self.device, self.dtype)\n    batch_idx = torch.arange(batch_size, device=_device, dtype=torch.long).reshape(batch_size, 1)\n    channel_idx = torch.argsort(_adapted_rsampling((batch_size, channels), self.drop_sampler, same_on_batch), dim=1)[:, :self.num_drop_channels].to(torch.long)\n    return {'batch_idx': batch_idx, 'channel_idx': channel_idx}",
    "docstring": "Generate a mask for dropout channels.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\random_generator\\_2d\\channel_dropout.py",
    "ast_data": "FunctionDef name:forward arg:self arg:batch_shape arg:same_on_batch arguments arg arg arg Assign Call Assign Assign Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "get_media_requests",
    "source_code": "@abstractmethod\ndef get_media_requests(self, item: Any, info: SpiderInfo) -> list[Request]:\n    raise NotImplementedError",
    "docstring": "Returns the media requests to download",
    "type": "method",
    "file_path": "scrapy\\scrapy\\pipelines\\media.py",
    "ast_data": "FunctionDef name:get_media_requests arg:self arg:item arg:info arguments arg arg arg Raise"
  },
  {
    "library": "django",
    "name": "__contains__",
    "source_code": "def __contains__(self, other):\n    return other in self.children",
    "docstring": "Return True if 'other' is a direct child of this instance.",
    "type": "method",
    "file_path": "django\\django\\utils\\tree.py",
    "ast_data": "FunctionDef name:__contains__ arg:self arg:other arguments arg arg Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "trainable_variables",
    "source_code": "@property\ndef trainable_variables(self):\n    if self._variables_created:\n        return ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES, self.variable_scope_name)\n    else:\n        return []",
    "docstring": "Returns the list of trainable variables created by the Template.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\template.py",
    "ast_data": "FunctionDef name:trainable_variables arg:self arguments arg If Return return:yes Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "Minimum",
    "source_code": "class Minimum(_Merge):\n\n    def _merge_function(self, inputs):\n        output = inputs[0]\n        for i in range(1, len(inputs)):\n            output = math_ops.minimum(output, inputs[i])\n        return output",
    "docstring": "Layer that computes the minimum (element-wise) a list of inputs. It takes as input a list of tensors, all of the same shape, and returns a single tensor (also of the same shape). >>> tf.keras.layers.Minimum()([np.arange(5).reshape(5, 1), ... np.arange(5, 10).reshape(5, 1)]) >>> x1 = tf.keras.layers.Dense(8)(np.arange(10).reshape(5, 2)) >>> x2 = tf.keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2)) >>> minned = tf.keras.layers.Minimum()([x1, x2]) >>> minned.shape TensorShape([5, 8])",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\merge.py",
    "ast_data": "ClassDef name:Minimum FunctionDef name:_merge_function arg:self arg:inputs arguments arg arg Assign For Call Call Assign Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "exists_nonce",
    "source_code": "def exists_nonce(self, nonce, request):\n    raise NotImplementedError()",
    "docstring": "Check if the given nonce is existing in your database. Developers MUST implement this method in subclass, e.g.:: def exists_nonce(self, nonce, request): exists = AuthorizationCode.query.filter_by( client_id=request.payload.client_id, nonce=nonce ).first() return bool(exists) :param nonce: A string of \"nonce\" parameter in request :param request: OAuth2Request instance :return: Boolean",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\core\\grants\\code.py",
    "ast_data": "FunctionDef name:exists_nonce arg:self arg:nonce arg:request arguments arg arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "flatten",
    "source_code": "def flatten(self) -> List['TraceType']:\n    return []",
    "docstring": "Returns a list of TensorSpecs corresponding to values.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\types\\trace.py",
    "ast_data": "FunctionDef name:flatten arg:self arguments arg Return return:no"
  },
  {
    "library": "matplotlib",
    "name": "shrunk",
    "source_code": "def shrunk(self, mx, my):\n    w, h = self.size\n    return Bbox([self._points[0], self._points[0] + [mx * w, my * h]])",
    "docstring": "Return a copy of the , shrunk by the factor *mx* in the *x* direction and the factor *my* in the *y* direction. The lower left corner of the box remains unchanged. Normally *mx* and *my* will be less than 1, but this is not enforced.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:shrunk arg:self arg:mx arg:my arguments arg arg arg Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "map_nodes_to_values",
    "source_code": "@compatibility(is_backward_compatible=True)\ndef map_nodes_to_values(self, args: Argument, n: Node) -> Argument:\n\n    def load_arg(n_arg: Node) -> Any:\n        if n_arg not in self.env:\n            raise RuntimeError(f'Node {n} referenced nonexistent value {n_arg}! Run Graph.lint() to diagnose such issues')\n        return self.env[n_arg]\n    return map_arg(args, load_arg)",
    "docstring": "Recursively descend through `` belongs. This is only used for error reporting.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\interpreter.py",
    "ast_data": "FunctionDef name:map_nodes_to_values arg:self arg:args arg:n arguments arg arg arg FunctionDef name:load_arg arg:n_arg arguments arg If Compare Raise Call Return return:yes Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "is_monotonic_increasing",
    "source_code": "@property\ndef is_monotonic_increasing(self) -> bool:\n    from pandas import Index\n    return Index(self).is_monotonic_increasing",
    "docstring": "Return True if values in the object are monotonically increasing. Returns ------- bool See Also -------- Series.is_monotonic_decreasing : Return boolean if values in the object are monotonically decreasing. Examples -------- >>> s = pd.Series([1, 2, 2]) >>> s.is_monotonic_increasing True >>> s = pd.Series([3, 2, 1]) >>> s.is_monotonic_increasing False",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\base.py",
    "ast_data": "FunctionDef name:is_monotonic_increasing arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "stop",
    "source_code": "def stop(self, timeout=None):\n    self.stop_signal.set()\n    with self.queue.mutex:\n        self.queue.queue.clear()\n        self.queue.unfinished_tasks = 0\n        self.queue.not_full.notify()\n    self.run_thread.join(timeout)\n    _SHARED_SEQUENCES[self.uid] = None",
    "docstring": "Stops running threads and wait for them to exit, if necessary. Should be called by the same thread which called . Args: timeout: maximum time to wait on",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\data_utils.py",
    "ast_data": "FunctionDef name:stop arg:self arg:timeout arguments arg arg Call With Call Assign Call Call Assign"
  },
  {
    "library": "scikit-learn",
    "name": "check_random_state",
    "source_code": "def check_random_state(seed):\n    if seed is None or seed is np.random:\n        return np.random.mtrand._rand\n    if isinstance(seed, numbers.Integral):\n        return np.random.RandomState(seed)\n    if isinstance(seed, np.random.RandomState):\n        return seed\n    raise ValueError('%r cannot be used to seed a numpy.random.RandomState instance' % seed)",
    "docstring": "Turn seed into a np.random.RandomState instance. Parameters ---------- seed : None, int or instance of RandomState If seed is None, return the RandomState singleton used by np.random. If seed is an int, return a new RandomState instance seeded with seed. If seed is already a RandomState instance, return it. Otherwise raise ValueError. Returns ------- :class: The random state object based on parameter. Examples -------- >>> from sklearn.utils.validation import check_random_state >>> check_random_state(42) RandomState(MT19937) at 0x...",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\validation.py",
    "ast_data": "FunctionDef name:check_random_state arg:seed arguments arg If BoolOp Compare Compare Return return:yes If Call Return return:yes Call If Call Return return:yes Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "PLeaf",
    "source_code": "class PLeaf(StructurePattern):\n\n    def __new__(cls):\n        if not hasattr(cls, 'instance'):\n            cls.instance = super().__new__(cls)\n        return cls.instance",
    "docstring": "Represents a singleton leaf StructurePattern.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "ClassDef name:PLeaf FunctionDef name:__new__ arg:cls arguments arg If Call Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "serialize_headers",
    "source_code": "def serialize_headers(self):\n    return b'\\r\\n'.join([key.encode('ascii') + b': ' + value.encode('latin-1') for key, value in self.headers.items()])",
    "docstring": "HTTP headers as a bytestring.",
    "type": "method",
    "file_path": "django\\django\\http\\response.py",
    "ast_data": "FunctionDef name:serialize_headers arg:self arguments arg Return return:yes Call Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_check_mkl_vcomp",
    "source_code": "def _check_mkl_vcomp(self, X, n_samples):\n    if sp.issparse(X):\n        return\n    n_active_threads = int(np.ceil(n_samples / CHUNK_SIZE))\n    if n_active_threads < self._n_threads:\n        modules = _get_threadpool_controller().info()\n        has_vcomp = 'vcomp' in [module['prefix'] for module in modules]\n        has_mkl = ('mkl', 'intel') in [(module['internal_api'], module.get('threading_layer', None)) for module in modules]\n        if has_vcomp and has_mkl:\n            self._warn_mkl_vcomp(n_active_threads)",
    "docstring": "Check when vcomp and mkl are both present",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_kmeans.py",
    "ast_data": "FunctionDef name:_check_mkl_vcomp arg:self arg:X arg:n_samples arguments arg arg arg If Call Return return:no Assign Call Call If Compare Assign Call Call Assign Compare Assign Compare Call If BoolOp Call"
  },
  {
    "library": "pandas",
    "name": "from_spmatrix",
    "source_code": "@classmethod\ndef from_spmatrix(cls, data: spmatrix) -> Self:\n    length, ncol = data.shape\n    if ncol != 1:\n        raise ValueError(f\"'data' must have a single column, not '{ncol}'\")\n    data = data.tocsc()\n    data.sort_indices()\n    arr = data.data\n    idx = data.indices\n    zero = np.array(0, dtype=arr.dtype).item()\n    dtype = SparseDtype(arr.dtype, zero)\n    index = IntIndex(length, idx)\n    return cls._simple_new(arr, index, dtype)",
    "docstring": "Create a SparseArray from a scipy.sparse matrix. Parameters ---------- data : scipy.sparse.sp_matrix This should be a SciPy sparse matrix where the size of the second dimension is 1. In other words, a sparse matrix with a single column. Returns ------- SparseArray Examples -------- >>> import scipy.sparse >>> mat = scipy.sparse.coo_matrix((4, 1)) >>> pd.arrays.SparseArray.from_spmatrix(mat) [0.0, 0.0, 0.0, 0.0] Fill: 0.0 IntIndex Indices: array([], dtype=int32)",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\sparse\\array.py",
    "ast_data": "FunctionDef name:from_spmatrix arg:cls arg:data arguments arg arg Assign If Compare Raise Call Assign Call Call Assign Assign Assign Call Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "Hardswish",
    "source_code": "class Hardswish(Module):\n    __constants__ = ['inplace']\n    inplace: bool\n\n    def __init__(self, inplace: bool=False) -> None:\n        super().__init__()\n        self.inplace = inplace\n\n    def forward(self, input: Tensor) -> Tensor:\n        return F.hardswish(input, self.inplace)",
    "docstring": "Applies the Hardswish function, element-wise. Method described in the paper: _. Hardswish is defined as: .. math:: \\text{Hardswish}(x) = \\begin{cases} 0 & \\text{if~} x \\le -3, \\\\ x & \\text{if~} x \\ge +3, \\\\ x \\cdot (x + 3) /6 & \\text{otherwise} \\end{cases} Args: inplace: can optionally do the operation in-place. Default: `(*)*(*)`, same shape as the input. .. image:: ../scripts/activation_images/Hardswish.png Examples:: >>> m = nn.Hardswish() >>> input = torch.randn(2) >>> output = m(input)",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\activation.py",
    "ast_data": "ClassDef name:Hardswish Assign FunctionDef name:__init__ arg:self arg:inplace arguments arg arg Call Call Assign FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "has_strategy",
    "source_code": "@tf_export('distribute.has_strategy')\ndef has_strategy():\n    return get_strategy() is not _get_default_strategy()",
    "docstring": "Return if there is a current non-default . Returns: True if inside a .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:has_strategy arguments Return return:yes Compare Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "terminate",
    "source_code": "def terminate(self) -> None:\n    self._proc.terminate()",
    "docstring": "Hard interrupt. Immediately SIGTERM subprocess.",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\instruction_counts\\execution\\work.py",
    "ast_data": "FunctionDef name:terminate arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_image_projective_transform_v3_grad",
    "source_code": "@ops.RegisterGradient('ImageProjectiveTransformV3')\ndef _image_projective_transform_v3_grad(op, grad):\n    images = op.inputs[0]\n    transforms = op.inputs[1]\n    interpolation = op.get_attr('interpolation')\n    fill_mode = op.get_attr('fill_mode')\n    image_or_images = ops.convert_to_tensor(images, name='images')\n    transform_or_transforms = ops.convert_to_tensor(transforms, name='transforms', dtype=dtypes.float32)\n    if image_or_images.dtype.base_dtype not in _IMAGE_DTYPES:\n        raise TypeError('Invalid dtype %s.' % image_or_images.dtype)\n    if len(transform_or_transforms.get_shape()) == 1:\n        transforms = transform_or_transforms[None]\n    elif len(transform_or_transforms.get_shape()) == 2:\n        transforms = transform_or_transforms\n    else:\n        raise TypeError('Transforms should have rank 1 or 2.')\n    transforms = flat_transforms_to_matrices(transforms=transforms)\n    inverse = linalg_ops.matrix_inverse(transforms)\n    transforms = matrices_to_flat_transforms(inverse)\n    output = gen_image_ops.image_projective_transform_v3(images=grad, transforms=transforms, output_shape=array_ops.shape(image_or_images)[1:3], interpolation=interpolation, fill_mode=fill_mode, fill_value=0.0)\n    return [output, None, None, None]",
    "docstring": "Computes the gradient for ImageProjectiveTransform.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops.py",
    "ast_data": "FunctionDef name:_image_projective_transform_v3_grad arg:op arg:grad arguments arg arg Assign Assign Assign Call Assign Call Assign Call Assign Call If Compare Raise Call If Compare Call Call Assign If Compare Call Call Assign Raise Call Assign Call Assign Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_unary_method",
    "source_code": "def _unary_method(ufunc, name):\n\n    def func(self):\n        return ufunc(self)\n    func.__name__ = f'__{name}__'\n    return func",
    "docstring": "Implement a unary special method with a ufunc.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\mixins.py",
    "ast_data": "FunctionDef name:_unary_method arg:ufunc arg:name arguments arg arg FunctionDef name:func arg:self arguments arg Return return:yes Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, server_or_cluster_def, job_name=None, task_index=None, protocol=None, config=None, start=True):\n    self._server_def = _make_server_def(server_or_cluster_def, job_name, task_index, protocol, config)\n    self._server = c_api.TF_NewServer(self._server_def.SerializeToString())\n    if start:\n        self.start()",
    "docstring": "Creates a new server with the given definition. The , , and arguments are optional, and override any information provided in . Args: server_or_cluster_def: A or protocol buffer, or a object, describing the server to be created and/or the cluster of which it is a member. job_name: (Optional.) Specifies the name of the job of which the server is a member. Defaults to the value in , if specified. task_index: (Optional.) Specifies the task index of the server in its job. Defaults to the value in , if specified. Otherwise defaults to 0 if the server's job has only one task. protocol: (Optional.) Specifies the protocol to be used by the server. Acceptable values include . Defaults to the value in , if specified. Otherwise defaults to . config: (Options.) A that specifies default configuration options for all sessions that run on this server. start: (Optional.) Boolean, indicating whether to start the server after creating it. Defaults to . Raises: tf.errors.OpError: Or one of its subclasses if an error occurs while creating the TensorFlow server.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\server_lib.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:server_or_cluster_def arg:job_name arg:task_index arg:protocol arg:config arg:start arguments arg arg arg arg arg arg arg Assign Call Assign Call Call If Call"
  },
  {
    "library": "pytorch",
    "name": "chromium",
    "source_code": "@staticmethod\ndef chromium(event_name: str, **metadata: object):\n    CompileEventLogger.add_data(event_name, CompileEventLogLevel.CHROMIUM, overwrite=False, **metadata)",
    "docstring": "Add to in chromium. Each key/value of metadata will appear in the chromium trace. should be the name of a timed event span passed to .",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\utils.py",
    "ast_data": "FunctionDef name:chromium arg:event_name arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_add_row_partitions",
    "source_code": "def _add_row_partitions(self, flat_values, validate=False):\n    if self.row_partitions:\n        if validate:\n            flat_values = self._validate_flat_values(flat_values)\n        return ragged_tensor.RaggedTensor._from_nested_row_partitions(flat_values, self.row_partitions, validate=False)\n    else:\n        return flat_values",
    "docstring": "Add row partitions to flat_values, if necessary. If the shape is truly ragged, then this adds the row_partitions. The shape is dense, then this just returns flat_values. Args: flat_values: the flat_values of a ragged tensor with this shape, or a dense tensor with this shape. validate: validate the flat_values have the right first dimension. Returns: flat_values reshaped to have row_partitions.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:_add_row_partitions arg:self arg:flat_values arg:validate arguments arg arg arg If If Assign Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_SanitizedArgSpec",
    "source_code": "def _SanitizedArgSpec(obj):\n    output_string = ''\n    unsanitized_arg_spec = tf_inspect.getargspec(obj)\n    for clean_attr in ('args', 'varargs', 'keywords'):\n        output_string += '%s=%s, ' % (clean_attr, getattr(unsanitized_arg_spec, clean_attr))\n    if unsanitized_arg_spec.defaults:\n        sanitized_defaults = []\n        for val in unsanitized_arg_spec.defaults:\n            str_val = str(val)\n            if ' at 0x' in str_val:\n                sanitized_defaults.append('%s instance>' % str_val.split(' at ')[0])\n            else:\n                sanitized_defaults.append(str_val)\n        output_string += 'defaults=%s, ' % sanitized_defaults\n    else:\n        output_string += 'defaults=None'\n    return output_string",
    "docstring": "Get an ArgSpec string that is free of addresses. We have callables as function arg defaults. This results in addresses in getargspec output. This function returns a sanitized string list of base classes. Args: obj: A python routine for us the create the sanitized arspec of. Returns: string, a string representation of the argspec.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\api\\lib\\python_object_to_proto_visitor.py",
    "ast_data": "FunctionDef name:_SanitizedArgSpec arg:obj arguments arg Assign Assign Call For Call If Assign For Assign Call If Compare Call Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "__init__",
    "source_code": "def __init__(self, nerf_model: NerfModel, image_size: tuple[int, int], device: torch.device | None, dtype: torch.dtype | None) -> None:\n    self._nerf_model = nerf_model\n    self._image_size = image_size\n    self._device = device\n    self._dtype = dtype\n    self._pixels_grid, self._ones = self._create_pixels_grid()",
    "docstring": "Construct NerfModelRenderer. Args: nerf_model: NeRF model. image_size: image size. device: device to run the model on. dtype: dtype to run the model on.",
    "type": "method",
    "file_path": "kornia\\kornia\\nerf\\nerf_model.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:nerf_model arg:image_size arg:device arg:dtype arguments arg arg arg arg arg Assign Assign Assign Assign Assign Call"
  },
  {
    "library": "seaborn",
    "name": "facet",
    "source_code": "def facet(self, col: VariableSpec=None, row: VariableSpec=None, order: OrderSpec | dict[str, OrderSpec]=None, wrap: int | None=None) -> Plot:\n    variables: dict[str, VariableSpec] = {}\n    if col is not None:\n        variables['col'] = col\n    if row is not None:\n        variables['row'] = row\n    structure = {}\n    if isinstance(order, dict):\n        for dim in ['col', 'row']:\n            dim_order = order.get(dim)\n            if dim_order is not None:\n                structure[dim] = list(dim_order)\n    elif order is not None:\n        if col is not None and row is not None:\n            err = ' '.join([\"When faceting on both col= and row=, passing `order` as a listis ambiguous. Use a dict with 'col' and/or 'row' keys instead.\"])\n            raise RuntimeError(err)\n        elif col is not None:\n            structure['col'] = list(order)\n        elif row is not None:\n            structure['row'] = list(order)\n    spec: FacetSpec = {'variables': variables, 'structure': structure, 'wrap': wrap}\n    new = self._clone()\n    new._facet_spec.update(spec)\n    return new",
    "docstring": "Produce subplots with conditional subsets of the data. Parameters ---------- col, row : data vectors or identifiers Variables used to define subsets along the columns and/or rows of the grid. Can be references to the global data source passed in the constructor. order : list of strings, or dict with dimensional keys Define the order of the faceting variables. wrap : int When using only or , wrap subplots across a two-dimensional grid with this many subplots on the faceting dimension. Examples -------- .. include:: ../docstrings/objects.Plot.facet.rst",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\plot.py",
    "ast_data": "FunctionDef name:facet arg:self arg:col arg:row arg:order arg:wrap arguments arg arg arg arg arg If Compare Assign If Compare Assign Assign If Call For Assign Call If Compare Assign Call If Compare If BoolOp Compare Compare Assign Call Raise Call If Compare Assign Call If Compare Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "create_recv_tensor",
    "source_code": "def create_recv_tensor(placeholder, arg_node):\n    example_value = placeholder.meta['val']\n    if arg_node.op == 'placeholder':\n        return _RootArgPlaceholder(example_value)\n    while arg_node.target is operator.getitem:\n        arg_node = arg_node.args[0]\n    assert arg_node.op == 'call_module', f'Expecting call_module, got {arg_node.op}'\n    src_stage = self.get_stage_index_of_submod(arg_node.name)\n    logger.debug(\"%s Creating recv buffer for input '%s' : %s, %s\", self.log_prefix, placeholder.name, example_value.shape, example_value.dtype)\n    buffer = _make_tensor_from_meta(example_value, self.device)\n    if self.has_backward:\n        buffer.requires_grad_(True)\n    return _RecvInfo(arg_node.name, src_stage, buffer)",
    "docstring": "Create a receive buffer for a placeholder.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py",
    "ast_data": "FunctionDef name:create_recv_tensor arg:placeholder arg:arg_node arguments arg arg Assign If Compare Return return:yes Call While Compare Assign Compare Assign Call Call Assign Call If Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "instantiate",
    "source_code": "def instantiate(self, globals_, closure, defaults=None, kwdefaults=None):\n    if self._unbound_factory is None:\n        raise ValueError('call create first')\n    factory_code = self._unbound_factory.__code__\n    factory_freevars = factory_code.co_freevars\n    closure_map = dict(zip(self._freevars, closure))\n    factory_closure = tuple((closure_map[name] for name in factory_code.co_freevars))\n    if len(factory_closure) != len(closure):\n        raise ValueError('closure mismatch, requested {}, but source function had {}'.format(self._freevars, factory_freevars))\n    bound_factory = types.FunctionType(code=factory_code, globals=globals_, name=self._name, argdefs=(), closure=factory_closure)\n    new_fn = bound_factory(**self._extra_locals)\n    if defaults:\n        new_fn.__defaults__ = defaults\n    if kwdefaults:\n        new_fn.__kwdefaults__ = kwdefaults\n    return new_fn",
    "docstring": "Creates a new function instance.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\transpiler.py",
    "ast_data": "FunctionDef name:instantiate arg:self arg:globals_ arg:closure arg:defaults arg:kwdefaults arguments arg arg arg arg arg If Compare Raise Call Assign Assign Assign Call Call Assign Call If Compare Call Call Raise Call Call Assign Call Assign Call If Assign If Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "nested_to_record",
    "source_code": "def nested_to_record(ds: dict | list[dict], prefix: str='', sep: str='.', level: int=0, max_level: int | None=None) -> dict[str, Any] | list[dict[str, Any]]:\n    singleton = False\n    if isinstance(ds, dict):\n        ds = [ds]\n        singleton = True\n    new_ds = []\n    for d in ds:\n        new_d = copy.deepcopy(d)\n        for k, v in d.items():\n            if not isinstance(k, str):\n                k = str(k)\n            if level == 0:\n                newkey = k\n            else:\n                newkey = prefix + sep + k\n            if not isinstance(v, dict) or (max_level is not None and level >= max_level):\n                if level != 0:\n                    v = new_d.pop(k)\n                    new_d[newkey] = v\n                continue\n            v = new_d.pop(k)\n            new_d.update(nested_to_record(v, newkey, sep, level + 1, max_level))\n        new_ds.append(new_d)\n    if singleton:\n        return new_ds[0]\n    return new_ds",
    "docstring": "A simplified json_normalize Converts a nested dict into a flat dict (\"record\"), unlike json_normalize, it does not attempt to extract a subset of the data. Parameters ---------- ds : dict or list of dicts prefix: the prefix, optional, default: \"\" sep : str, default '.' Nested records will generate names separated by sep, e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar level: int, optional, default: 0 The number of levels in the json string. max_level: int, optional, default: None The max depth to normalize. Returns ------- d - dict or list of dicts, matching Examples -------- >>> nested_to_record( ... dict(flat1=1, dict1=dict(c=1, d=2), nested=dict(e=dict(c=1, d=2), d=2)) ... ) {'flat1': 1, 'dict1.c': 1, 'dict1.d': 2, 'nested.e.c': 1, 'nested.e.d': 2, 'nested.d': 2}",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\json\\_normalize.py",
    "ast_data": "FunctionDef name:nested_to_record arg:ds arg:prefix arg:sep arg:level arg:max_level arguments arg arg arg arg arg Assign If Call Assign Assign Assign For Assign Call For Call If Call Assign Call If Compare Assign Assign If BoolOp Call BoolOp Compare Compare If Compare Assign Call Assign Assign Call Call Call Call If Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "resize",
    "source_code": "def resize(self, w, h):\n    pass",
    "docstring": "For GUI backends, resize the window (in physical pixels).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:resize arg:self arg:w arg:h arguments arg arg arg"
  },
  {
    "library": "tensorflow",
    "name": "RequiresUniqueFunctionRetracing",
    "source_code": "def RequiresUniqueFunctionRetracing(self):\n    return False",
    "docstring": "Returns whether the tf.function should be retraced if the context changes.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:RequiresUniqueFunctionRetracing arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "register_writer",
    "source_code": "def register_writer(klass: ExcelWriter_t) -> None:\n    if not callable(klass):\n        raise ValueError('Can only register callables as engines')\n    engine_name = klass._engine\n    _writers[engine_name] = klass",
    "docstring": "Add engine to the excel writer registry.io.excel. You must use this method to integrate with ``. Parameters ---------- klass : ExcelWriter",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\excel\\_util.py",
    "ast_data": "FunctionDef name:register_writer arg:klass arguments arg If Call Raise Call Assign Assign"
  },
  {
    "library": "scipy",
    "name": "gershgorin_bounds",
    "source_code": "def gershgorin_bounds(H):\n    H_diag = np.diag(H)\n    H_diag_abs = np.abs(H_diag)\n    H_row_sums = np.sum(np.abs(H), axis=1)\n    lb = np.min(H_diag + H_diag_abs - H_row_sums)\n    ub = np.max(H_diag - H_diag_abs + H_row_sums)\n    return (lb, ub)",
    "docstring": "Given a square matrix `` compute upper and lower bounds for its eigenvalues (Gregoshgorin Bounds). Defined ref. [1]. References ---------- .. [1] Conn, A. R., Gould, N. I., & Toint, P. L. Trust region methods. 2000. Siam. pp. 19.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_trustregion_exact.py",
    "ast_data": "FunctionDef name:gershgorin_bounds arg:H arguments arg Assign Call Assign Call Assign Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "_IgnoredRedirection",
    "source_code": "class _IgnoredRedirection(Exception):\n\n    def __init__(self, destination: str, status_code: int) -> None:\n        self.destination = destination\n        self.status_code = status_code",
    "docstring": "Sphinx-internal exception raised when an HTTP redirect is ignored",
    "type": "class",
    "file_path": "sphinx\\sphinx\\util\\requests.py",
    "ast_data": "ClassDef name:_IgnoredRedirection FunctionDef name:__init__ arg:self arg:destination arg:status_code arguments arg arg arg Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_track_value",
    "source_code": "def _track_value(self, value, name):\n    value = sticky_attribute_assignment(trackable=self, value=value, name=name)\n    if isinstance(value, variables.Variable):\n        self._self_extra_variables.append(value)\n    if not isinstance(value, base.Trackable):\n        raise _UntrackableError(value)\n    if hasattr(value, '_use_resource_variables'):\n        value._use_resource_variables = True\n    value_attribute_sentinel = getattr(value, '_attribute_sentinel', None)\n    if value_attribute_sentinel:\n        value_attribute_sentinel.add_parent(self._attribute_sentinel)\n    return value",
    "docstring": "Add a dependency on .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\data_structures.py",
    "ast_data": "FunctionDef name:_track_value arg:self arg:value arg:name arguments arg arg arg Assign Call If Call Call If Call Raise Call If Call Assign Assign Call If Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "predict_proba",
    "source_code": "def predict_proba(self, X):\n    return super().predict_proba(X)",
    "docstring": "Return posterior probabilities of classification. Parameters ---------- X : array-like of shape (n_samples, n_features) Array of samples/test vectors. Returns ------- C : ndarray of shape (n_samples, n_classes) Posterior probabilities of classification per class.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\discriminant_analysis.py",
    "ast_data": "FunctionDef name:predict_proba arg:self arg:X arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "cherrypy",
    "name": "CPWSGIHTTPRequest",
    "source_code": "class CPWSGIHTTPRequest(cheroot.server.HTTPRequest):\n\n    def __init__(self, server, conn):\n        super(CPWSGIHTTPRequest, self).__init__(server, conn, proxy_mode=True)",
    "docstring": "Wrapper for cheroot.server.HTTPRequest. This is a layer, which preserves URI parsing mode like it which was before Cheroot v5.8.0.",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\_cpwsgi_server.py",
    "ast_data": "ClassDef name:CPWSGIHTTPRequest FunctionDef name:__init__ arg:self arg:server arg:conn arguments arg arg arg Call Call"
  },
  {
    "library": "pandas",
    "name": "_from_values_or_dtype",
    "source_code": "@classmethod\ndef _from_values_or_dtype(cls, values=None, categories=None, ordered: bool | None=None, dtype: Dtype | None=None) -> CategoricalDtype:\n    if dtype is not None:\n        if isinstance(dtype, str):\n            if dtype == 'category':\n                if ordered is None and cls.is_dtype(values):\n                    ordered = values.dtype.ordered\n                dtype = CategoricalDtype(categories, ordered)\n            else:\n                raise ValueError(f'Unknown dtype {dtype!r}')\n        elif categories is not None or ordered is not None:\n            raise ValueError('Cannot specify `categories` or `ordered` together with `dtype`.')\n        elif not isinstance(dtype, CategoricalDtype):\n            raise ValueError(f'Cannot not construct CategoricalDtype from {dtype}')\n    elif cls.is_dtype(values):\n        dtype = values.dtype._from_categorical_dtype(values.dtype, categories, ordered)\n    else:\n        dtype = CategoricalDtype(categories, ordered)\n    return cast(CategoricalDtype, dtype)",
    "docstring": "Construct dtype from the input parameters used in :class:. This constructor method specifically does not do the factorization step, if that is needed to find the categories. This constructor may therefore return `categoriesorderedcategoriesordereddtype`. The supplied dtype takes precedence over values' dtype: >>> pd.CategoricalDtype._from_values_or_dtype(c, dtype=dtype2) CategoricalDtype(categories=['x', 'y'], ordered=False, categories_dtype=object)",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py",
    "ast_data": "FunctionDef name:_from_values_or_dtype arg:cls arg:values arg:categories arg:ordered arg:dtype arguments arg arg arg arg arg If Compare If Call If Compare If BoolOp Compare Call Assign Assign Call Raise Call If BoolOp Compare Compare Raise Call If Call Raise Call If Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_get_jacobian",
    "source_code": "@staticmethod\ndef _get_jacobian(tris_pts):\n    a = np.array(tris_pts[:, 1, :] - tris_pts[:, 0, :])\n    b = np.array(tris_pts[:, 2, :] - tris_pts[:, 0, :])\n    J = _to_matrix_vectorized([[a[:, 0], a[:, 1]], [b[:, 0], b[:, 1]]])\n    return J",
    "docstring": "Fast (vectorized) function to compute triangle jacobian matrix. Parameters ---------- tris_pts : array like of dim 3 (shape: (nx, 3, 2)) Coordinates of the containing triangles apexes. Returns ------- array of dim 3 (shape (nx, 2, 2)) Barycentric coordinates of the points inside the containing triangles. J[itri, :, :] is the jacobian matrix at apex 0 of the triangle itri, so that the following (matrix) relationship holds: [dz/dksi] = [J] x [dz/dx] with x: global coordinates ksi: element parametric coordinates in triangle first apex local basis.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triinterpolate.py",
    "ast_data": "FunctionDef name:_get_jacobian arg:tris_pts arguments arg Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "cross_product",
    "source_code": "def cross_product(*inputs):\n    return list(itertools.product(*inputs))",
    "docstring": "Return a list of cartesian product of input iterables. For example, cross_product(A, B) returns ((x,y) for x in A for y in B).",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\operator_benchmark\\benchmark_utils.py",
    "ast_data": "FunctionDef name:cross_product arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_args_kwargs_to_normalized_args_kwargs",
    "source_code": "def _args_kwargs_to_normalized_args_kwargs(sig: inspect.Signature, args: tuple[Any, ...], kwargs: dict[str, Any], normalize_to_only_use_kwargs: bool) -> Optional[ArgsKwargsPair]:\n    supported_parameter_types = {inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.KEYWORD_ONLY}\n    if any((p.kind not in supported_parameter_types for p in sig.parameters.values())):\n        if list(sig.parameters.keys()) != ['input', 'from', 'to', 'generator']:\n            return None\n    bound_args = sig.bind(*args, **kwargs)\n    bound_args.apply_defaults()\n    new_kwargs: dict[str, Any] = {}\n    new_args: list[Any] = []\n    for i, param in enumerate(sig.parameters):\n        if not normalize_to_only_use_kwargs and i < len(args):\n            new_args.append(bound_args.arguments[param])\n        else:\n            new_kwargs[param] = bound_args.arguments[param]\n    return ArgsKwargsPair(tuple(new_args), new_kwargs)",
    "docstring": "Given a call target, args, and kwargs, return the arguments normalized into an ArgsKwargsPair, or None if the type signature is not supported by this normalization. Args: sig (inspect.Signature): Signature object for the target args (Tuple): Arguments that appear at the callsite for kwargs (Dict): Keyword arguments that appear at the callsite for normalize_to_only_use_kwargs (bool): Whether to normalize to only use kwargs. Returns: Optional[ArgsKwargsPair]: Normalized args and kwargs for , or if this target is not supported.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\operator_schemas.py",
    "ast_data": "FunctionDef name:_args_kwargs_to_normalized_args_kwargs arg:sig arg:args arg:kwargs arg:normalize_to_only_use_kwargs arguments arg arg arg arg Assign If Call Compare Call If Compare Call Call Return return:no Assign Call Call For Call If BoolOp Compare Call Call Assign Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "_choice_has_empty_value",
    "source_code": "@staticmethod\ndef _choice_has_empty_value(choice):\n    value, _ = choice\n    return value is None or value == ''",
    "docstring": "Return True if the choice's value is empty string or None.",
    "type": "method",
    "file_path": "django\\django\\forms\\widgets.py",
    "ast_data": "FunctionDef name:_choice_has_empty_value arg:choice arguments arg Assign Return return:yes BoolOp Compare Compare"
  },
  {
    "library": "matplotlib",
    "name": "get_subplotspec_list",
    "source_code": "def get_subplotspec_list(axes_list, grid_spec=None):\n    subplotspec_list = []\n    for ax in axes_list:\n        axes_or_locator = ax.get_axes_locator()\n        if axes_or_locator is None:\n            axes_or_locator = ax\n        if hasattr(axes_or_locator, 'get_subplotspec'):\n            subplotspec = axes_or_locator.get_subplotspec()\n            if subplotspec is not None:\n                subplotspec = subplotspec.get_topmost_subplotspec()\n                gs = subplotspec.get_gridspec()\n                if grid_spec is not None:\n                    if gs != grid_spec:\n                        subplotspec = None\n                elif gs.locally_modified_subplot_params():\n                    subplotspec = None\n        else:\n            subplotspec = None\n        subplotspec_list.append(subplotspec)\n    return subplotspec_list",
    "docstring": "Return a list of subplotspec from the given list of Axes. For an instance of Axes that does not support subplotspec, None is inserted in the list. If grid_spec is given, None is inserted for those not from the given grid_spec.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\_tight_layout.py",
    "ast_data": "FunctionDef name:get_subplotspec_list arg:axes_list arg:grid_spec arguments arg arg Assign For Assign Call If Compare Assign If Call Assign Call If Compare Assign Call Assign Call If Compare If Compare Assign If Call Assign Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ConvBnReLU3d",
    "source_code": "class ConvBnReLU3d(_FusedModule):\n\n    def __init__(self, conv, bn, relu):\n        assert type_before_parametrizations(conv) == Conv3d and type_before_parametrizations(bn) == BatchNorm3d and (type_before_parametrizations(relu) == ReLU), f'Incorrect types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(bn)}{type_before_parametrizations(relu)}'\n        super().__init__(conv, bn, relu)",
    "docstring": "This is a sequential container which calls the Conv 3d, Batch Norm 3d, and ReLU modules. During quantization this will be replaced with the corresponding fused module.",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\nn\\intrinsic\\modules\\fused.py",
    "ast_data": "ClassDef name:ConvBnReLU3d FunctionDef name:__init__ arg:self arg:conv arg:bn arg:relu arguments arg arg arg arg BoolOp Compare Call Compare Call Compare Call Call Call Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n    X = validate_data(self, X, accept_sparse=('csr', 'csc'), accept_large_sparse=not _IS_32BIT)\n    if not sp.issparse(X):\n        X = sp.csr_matrix(X)\n    dtype = X.dtype if X.dtype in (np.float64, np.float32) else np.float64\n    if self.use_idf:\n        n_samples, _ = X.shape\n        df = _document_frequency(X)\n        df = df.astype(dtype, copy=False)\n        df += float(self.smooth_idf)\n        n_samples += int(self.smooth_idf)\n        self.idf_ = np.full_like(df, fill_value=n_samples, dtype=dtype)\n        self.idf_ /= df\n        np.log(self.idf_, out=self.idf_)\n        self.idf_ += 1.0\n    return self",
    "docstring": "Learn the idf vector (global term weights). Parameters ---------- X : sparse matrix of shape (n_samples, n_features) A matrix of term/token counts. y : None This parameter is not needed to compute tf-idf. Returns ------- self : object Fitted transformer.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_extraction\\text.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Assign Call If Call Assign Call Assign Compare If Assign Assign Call Assign Call Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__enter__",
    "source_code": "def __enter__(self):\n    self._push_tape()\n    return self",
    "docstring": "Enters a context inside which operations are recorded on this tape.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\backprop.py",
    "ast_data": "FunctionDef name:__enter__ arg:self arguments arg Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "make_cell",
    "source_code": "def make_cell(val=None):\n    x = val\n\n    def f():\n        return x\n    assert f.__closure__ is not None and len(f.__closure__) == 1\n    return f.__closure__[0]",
    "docstring": "Some black magic to create a cell object that usually only exists in a closure",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\utils.py",
    "ast_data": "FunctionDef name:make_cell arg:val arguments arg Assign FunctionDef name:f arguments Return return:yes BoolOp Compare Compare Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "match_extensions",
    "source_code": "def match_extensions(filename: str, extensions: Iterable) -> bool:\n    return any((filename.endswith(e) for e in extensions))",
    "docstring": "Helper method to see if filename ends with certain extension",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\hipify\\hipify_python.py",
    "ast_data": "FunctionDef name:match_extensions arg:filename arg:extensions arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_guard_if_present",
    "source_code": "def _guard_if_present(self, block, var_name):\n    if not block:\n        return block\n    template = '\\n        if not var_name:\\n          block\\n      '\n    node = templates.replace(template, var_name=var_name, block=block)\n    return node",
    "docstring": "Prevents the block from executing if var_name is set.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\converters\\break_statements.py",
    "ast_data": "FunctionDef name:_guard_if_present arg:self arg:block arg:var_name arguments arg arg arg If Return return:yes Assign Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_orientation",
    "source_code": "def get_orientation(self):\n    return 'horizontal' if self.is_horizontal() else 'vertical'",
    "docstring": "Return the orientation of the event line ('horizontal' or 'vertical').",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:get_orientation arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "RgbToXyz",
    "source_code": "class RgbToXyz(Module):\n    ONNX_DEFAULT_INPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n    ONNX_DEFAULT_OUTPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n\n    def forward(self, image: Tensor) -> Tensor:\n        return rgb_to_xyz(image)",
    "docstring": "Convert an image from RGB to XYZ. The image data is assumed to be in the range of (0, 1). Returns: XYZ version of the image. Shape: - image: :math: - output: :math: Examples: >>> input = torch.rand(2, 3, 4, 5) >>> xyz = RgbToXyz() >>> output = xyz(input) # 2x3x4x5 Reference: [1]",
    "type": "class",
    "file_path": "kornia\\kornia\\color\\xyz.py",
    "ast_data": "ClassDef name:RgbToXyz FunctionDef name:forward arg:self arg:image arguments arg arg Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "deserialize_json",
    "source_code": "def deserialize_json(self, obj, key, decode=None):\n    obj = ensure_dict(obj, 'JWS')\n    payload_segment = obj.get('payload')\n    if payload_segment is None:\n        raise DecodeError('Missing \"payload\" value')\n    payload_segment = to_bytes(payload_segment)\n    payload = _extract_payload(payload_segment)\n    if decode:\n        payload = decode(payload)\n    if 'signatures' not in obj:\n        jws_header, valid = self._validate_json_jws(payload_segment, payload, obj, key)\n        rv = JWSObject(jws_header, payload, 'flat')\n        if valid:\n            return rv\n        raise BadSignatureError(rv)\n    headers = []\n    is_valid = True\n    for header_obj in obj['signatures']:\n        jws_header, valid = self._validate_json_jws(payload_segment, payload, header_obj, key)\n        headers.append(jws_header)\n        if not valid:\n            is_valid = False\n    rv = JWSObject(headers, payload, 'json')\n    if is_valid:\n        return rv\n    raise BadSignatureError(rv)",
    "docstring": "Exact JWS JSON Serialization, and validate with the given key. If key is not provided, it will return a dict without signature verification. Header will still be validated. Via _. :param obj: text of JWS JSON Serialization :param key: key used to verify the signature :param decode: a function to decode payload data :return: JWSObject :raise: BadSignatureError .. _:",
    "type": "method",
    "file_path": "authlib\\authlib\\jose\\rfc7515\\jws.py",
    "ast_data": "FunctionDef name:deserialize_json arg:self arg:obj arg:key arg:decode arguments arg arg arg arg Assign Call Assign Call If Compare Raise Call Assign Call Assign Call If Assign Call If Compare Assign Call Assign Call If Return return:yes Raise Call Assign Assign For Assign Call Call If Assign Assign Call If Return return:yes Raise Call"
  },
  {
    "library": "numpy",
    "name": "combine_paths",
    "source_code": "def combine_paths(self, *args):\n    return combine_paths(*args)",
    "docstring": "Return a list of existing paths composed by all combinations of items from the arguments.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\system_info.py",
    "ast_data": "FunctionDef name:combine_paths arg:self arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "edit_margins",
    "source_code": "def edit_margins(self, todo, size):\n    for i in range(len(self.margin_vals[todo])):\n        self.edit_margin(todo, size, i)",
    "docstring": "Change the size of all the margin of all the cells in the layout grid. Parameters ---------- todo : string (one of 'left', 'right', 'bottom', 'top') margin to alter. size : float Size to set the margins. Fraction of figure size.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_layoutgrid.py",
    "ast_data": "FunctionDef name:edit_margins arg:self arg:todo arg:size arguments arg arg arg For Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_resize_fft_input",
    "source_code": "def _resize_fft_input(x: TensorLikeType, dims: tuple[int, ...], sizes: tuple[int, ...]) -> TensorLikeType:\n    assert len(dims) == len(sizes)\n    must_copy = False\n    x_sizes = x.shape\n    pad_amount = [0] * len(x_sizes) * 2\n    for i in range(len(dims)):\n        if sizes[i] == -1:\n            continue\n        if x_sizes[dims[i]] < sizes[i]:\n            must_copy = True\n            pad_idx = len(pad_amount) - 2 * dims[i] - 1\n            pad_amount[pad_idx] = sizes[i] - x_sizes[dims[i]]\n        if x_sizes[dims[i]] > sizes[i]:\n            x = x.narrow(dims[i], 0, sizes[i])\n    return torch.constant_pad_nd(x, pad_amount) if must_copy else x",
    "docstring": "Fixes the shape of x such that x.size(dims[i]) == sizes[i], either by zero-padding, or by slicing x starting from 0.",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\fft.py",
    "ast_data": "FunctionDef name:_resize_fft_input arg:x arg:dims arg:sizes arguments arg arg arg Compare Call Call Assign Assign Assign Call For Call Call If Compare If Compare Assign Assign Call Assign If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "angular_name",
    "source_code": "@property\ndef angular_name(self):\n    units, name = capi.angular_units(self.ptr, byref(c_char_p()))\n    return name",
    "docstring": "Return the name of the angular units.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\srs.py",
    "ast_data": "FunctionDef name:angular_name arg:self arguments arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "use_required_attribute",
    "source_code": "def use_required_attribute(self, initial):\n    use_required_attribute = super().use_required_attribute(initial)\n    if self.allow_multiple_selected:\n        return use_required_attribute\n    first_choice = next(iter(self.choices), None)\n    return use_required_attribute and first_choice is not None and self._choice_has_empty_value(first_choice)",
    "docstring": "Don't render 'required' if the first has a value, as that's invalid HTML.",
    "type": "method",
    "file_path": "django\\django\\forms\\widgets.py",
    "ast_data": "FunctionDef name:use_required_attribute arg:self arg:initial arguments arg arg Assign Call Call If Return return:yes Assign Call Call Return return:yes BoolOp Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "_check_output_shapes",
    "source_code": "def _check_output_shapes(self, incoming_output_shapes: List[TensorShape]):\n    nest.assert_same_structure(self._output_shapes, incoming_output_shapes)\n    for (path, _), old_output_shape, incoming_output_shape in zip(nest.flatten_with_joined_string_paths(self._feature_config), self._output_shapes, incoming_output_shapes):\n        if old_output_shape and incoming_output_shape:\n            if (len(incoming_output_shape) == 1 or len(incoming_output_shape) == 2) and len(old_output_shape) > len(incoming_output_shape):\n                continue\n            if len(old_output_shape) != len(incoming_output_shape) or not self._is_tensor_shape_match(old_output_shape, incoming_output_shape):\n                raise ValueError(f'Inconsistent shape founded for input feature {path}, Output shape is set to be {old_output_shape}, But got incoming output shape {incoming_output_shape}')",
    "docstring": "Check the incoming output shapes against the output shapes stored.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2.py",
    "ast_data": "FunctionDef name:_check_output_shapes arg:self arg:incoming_output_shapes arguments arg arg Call For Call Call If BoolOp If BoolOp BoolOp Compare Call Compare Call Compare Call Call If BoolOp Compare Call Call Call Raise Call"
  },
  {
    "library": "authlib",
    "name": "register_signature_method",
    "source_code": "@classmethod\ndef register_signature_method(cls, name, verify):\n    cls.SIGNATURE_METHODS[name] = verify",
    "docstring": "Extend signature method verification. :param name: A string to represent signature method. :param verify: A function to verify signature. The `` as parameter:: def verify_custom_method(request): # verify this request, return True or False return True Server.register_signature_method(\"custom-name\", verify_custom_method)",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth1\\rfc5849\\base_server.py",
    "ast_data": "FunctionDef name:register_signature_method arg:cls arg:name arg:verify arguments arg arg arg Assign"
  },
  {
    "library": "pandas",
    "name": "get_op_result_name",
    "source_code": "def get_op_result_name(left, right):\n    if isinstance(right, (ABCSeries, ABCIndex)):\n        name = _maybe_match_name(left, right)\n    else:\n        name = left.name\n    return name",
    "docstring": "Find the appropriate name to pin to an operation result. This result should always be either an Index or a Series. Parameters ---------- left : {Series, Index} right : object Returns ------- name : object Usually a string",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\ops\\common.py",
    "ast_data": "FunctionDef name:get_op_result_name arg:left arg:right arguments arg arg If Call Assign Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_path",
    "source_code": "def get_path(self):\n    codes = [Path.MOVETO]\n    codes.extend((Path.LINETO if edge in self._visible_edges else Path.MOVETO for edge in self._edges))\n    if Path.MOVETO not in codes[1:]:\n        codes[-1] = Path.CLOSEPOLY\n    return Path([[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0], [0.0, 0.0]], codes, readonly=True)",
    "docstring": "Return a for the .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\table.py",
    "ast_data": "FunctionDef name:get_path arg:self arguments arg Assign Call Compare If Compare Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_tensors_from_tensor_names",
    "source_code": "def get_tensors_from_tensor_names(graph, tensor_names):\n    tensor_name_to_tensor = {}\n    for op in graph.get_operations():\n        for tensor in op.values():\n            tensor_name_to_tensor[get_tensor_name(tensor)] = tensor\n    tensors = []\n    invalid_tensors = []\n    for name in tensor_names:\n        if not isinstance(name, str):\n            raise ValueError(\"Invalid type for a tensor name in the provided graph. Expected type for a tensor name is 'str', instead got type '{}' for tensor name '{}'\".format(type(name), name))\n        tensor = tensor_name_to_tensor.get(name)\n        if tensor is None:\n            invalid_tensors.append(name)\n        else:\n            tensors.append(tensor)\n    if invalid_tensors:\n        raise ValueError(\"Invalid tensors '{}' were found.\".format(','.join(invalid_tensors)))\n    return tensors",
    "docstring": "Gets the Tensors associated with the in the provided graph. Args: graph: TensorFlow Graph. tensor_names: List of strings that represent names of tensors in the graph. Returns: A list of Tensor objects in the same order the names are provided. Raises: ValueError: tensor_names contains an invalid tensor name.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\util.py",
    "ast_data": "FunctionDef name:get_tensors_from_tensor_names arg:graph arg:tensor_names arguments arg arg Assign For Call For Call Assign Call Assign Assign For If Call Raise Call Call Call Assign Call If Compare Call Call If Raise Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_meta",
    "source_code": "def get_meta(self, table_name, constraints, column_to_field_name, is_view, is_partition, comment):\n    unique_together = []\n    has_unsupported_constraint = False\n    for params in constraints.values():\n        if params['unique']:\n            columns = params['columns']\n            if None in columns:\n                has_unsupported_constraint = True\n            columns = [x for x in columns if x is not None and x in column_to_field_name]\n            if len(columns) > 1 and (not params['primary_key']):\n                unique_together.append(str(tuple((column_to_field_name[c] for c in columns))))\n    if is_view:\n        managed_comment = \"  # Created from a view. Don't remove.\"\n    elif is_partition:\n        managed_comment = \"  # Created from a partition. Don't remove.\"\n    else:\n        managed_comment = ''\n    meta = ['']\n    if has_unsupported_constraint:\n        meta.append('    # A unique constraint could not be introspected.')\n    meta += ['    class Meta:', '        managed = False%s' % managed_comment, '        db_table = %r' % table_name]\n    if unique_together:\n        tup = '(' + ', '.join(unique_together) + ',)'\n        meta += ['        unique_together = %s' % tup]\n    if comment:\n        meta += [f'        db_table_comment = {comment!r}']\n    return meta",
    "docstring": "Return a sequence comprising the lines of code necessary to construct the inner Meta class for the model corresponding to the given database table name.",
    "type": "method",
    "file_path": "django\\django\\core\\management\\commands\\inspectdb.py",
    "ast_data": "FunctionDef name:get_meta arg:self arg:table_name arg:constraints arg:column_to_field_name arg:is_view arg:is_partition arg:comment arguments arg arg arg arg arg arg arg Assign Assign For Call If Assign If Compare Assign Assign BoolOp Compare Compare If BoolOp Compare Call Call Call Call If Assign If Assign Assign Assign If Call If Assign Call If Return return:yes"
  },
  {
    "library": "authlib",
    "name": "validate_scopes_supported",
    "source_code": "def validate_scopes_supported(self):\n    validate_array_value(self, 'scopes_supported')",
    "docstring": "RECOMMENDED. JSON array containing a list of the OAuth 2.0 [RFC6749] \"scope\" values that this authorization server supports. Servers MAY choose not to advertise some supported scope values even when this parameter is used.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc8414\\models.py",
    "ast_data": "FunctionDef name:validate_scopes_supported arg:self arguments arg Call"
  },
  {
    "library": "scipy",
    "name": "tobsr",
    "source_code": "def tobsr(self, blocksize=None, copy=False):\n    if blocksize not in [None, self.blocksize]:\n        return self.tocsr().tobsr(blocksize=blocksize)\n    if copy:\n        return self.copy()\n    else:\n        return self",
    "docstring": "Convert this array/matrix into Block Sparse Row Format. With copy=False, the data/indices may be shared between this array/matrix and the resultant bsr_array/bsr_matrix. If blocksize=(R, C) is provided, it will be used for determining block size of the bsr_array/bsr_matrix.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_bsr.py",
    "ast_data": "FunctionDef name:tobsr arg:self arg:blocksize arg:copy arguments arg arg arg If Compare Return return:yes Call Call If Return return:yes Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_has_no_reference",
    "source_code": "def _has_no_reference(self, i: int) -> bool:\n    blkno = self.blknos[i]\n    return self._has_no_reference_block(blkno)",
    "docstring": "Check for column if it has references. (whether it references another array or is itself being referenced) Returns True if the column has no references.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:_has_no_reference arg:self arg:i arguments arg arg Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "forward_helper",
    "source_code": "def forward_helper(func, expanded_args, expanded_kwargs):\n    unexpanded_args, unexpanded_kwargs = _check_and_unexpand_args(func, expanded_args, expanded_kwargs)\n    return func(*unexpanded_args, **unexpanded_kwargs)",
    "docstring": "Compute the forward pass for a function that has expanded weight(s) passed to it. It will run the forward pass where all ExpandedWeights are their original weight. It runs checks on the given arguments and detaches the outputs. .. note:: First argument in :attr: must be the input with the batch dimension as the first element of the shape .. note:: :attr: must return a Tensor or tuple of Tensors Args: func: The function to be called expanded_args: Arguments to be passed to :attr:. Will include arguments that need to be unpacked because they are ExpandedWeights expanded_kwargs: Keyword arguments to be passed to :attr:. Similar to :attr:.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\utils\\_expanded_weights\\expanded_weights_utils.py",
    "ast_data": "FunctionDef name:forward_helper arg:func arg:expanded_args arg:expanded_kwargs arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "make_composite_tensor",
    "source_code": "def make_composite_tensor(cls, module_name='tf.linalg'):\n    spec_name = '{}Spec'.format(cls.__name__)\n    spec_type = type(spec_name, (_LinearOperatorSpec,), {'value_type': cls})\n    type_spec_registry.register('{}.{}'.format(module_name, spec_name))(spec_type)\n    cls._type_spec = property(spec_type.from_operator)\n    return cls",
    "docstring": "Class decorator to convert s to .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "FunctionDef name:make_composite_tensor arg:cls arg:module_name arguments arg arg Assign Call Assign Call Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "write",
    "source_code": "def write(self, target, a, comment='', field=None, precision=None, symmetry=None):\n    stream, close_it = self._open(target, 'wb')\n    try:\n        self._write(stream, a, comment, field, precision, symmetry)\n    finally:\n        if close_it:\n            stream.close()\n        else:\n            stream.flush()",
    "docstring": "Writes sparse or dense array to Matrix Market file-like . Parameters ---------- target : str or file-like Matrix Market filename (extension .mtx) or open file-like object. a : array like Sparse or dense 2-D array. comment : str, optional Comments to be prepended to the Matrix Market file. field : None or str, optional Either 'real', 'complex', 'pattern', or 'integer'. precision : None or int, optional Number of digits to display for real or complex values. symmetry : None or str, optional Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'. If symmetry is None the symmetry type of 'a' is determined by its values.",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\_mmio.py",
    "ast_data": "FunctionDef name:write arg:self arg:target arg:a arg:comment arg:field arg:precision arg:symmetry arguments arg arg arg arg arg arg arg Assign Call Try Call If Call Call"
  },
  {
    "library": "kornia",
    "name": "__call__",
    "source_code": "def __call__(self, *inputs: Any, input_names_to_handle: Optional[List[Any]]=None, output_type: str='tensor', **kwargs: Any) -> Any:\n    if not self._disable_features:\n        decorated_forward = self.convert_input_output(input_names_to_handle=input_names_to_handle, output_type=output_type)(super().__call__)\n        _output_image = decorated_forward(*inputs, **kwargs)\n        if output_type == 'tensor':\n            self._output_image = self._detach_tensor_to_cpu(_output_image)\n        else:\n            self._output_image = _output_image\n    else:\n        _output_image = super().__call__(*inputs, **kwargs)\n    return _output_image",
    "docstring": "Overwrite the __call__ function to handle various inputs. Args: inputs: Inputs to operate on. input_names_to_handle: List of input names to convert, if None, handle all inputs. output_type: Desired output type ('tensor', 'numpy', or 'pil'). kwargs: Additional arguments. Returns: Callable: Decorated function with converted input and output types.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\container\\image.py",
    "ast_data": "FunctionDef name:__call__ arg:self arguments arg arg arg arg arg If Assign Call Call Call Assign Call If Compare Assign Call Assign Assign Call Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "get_settings_priority",
    "source_code": "def get_settings_priority(priority: int | str) -> int:\n    if isinstance(priority, str):\n        return SETTINGS_PRIORITIES[priority]\n    return priority",
    "docstring": "Small helper function that looks up a given string priority in the :attr: dictionary and returns its numerical value, or directly returns a given numerical priority.",
    "type": "function",
    "file_path": "scrapy\\scrapy\\settings\\__init__.py",
    "ast_data": "FunctionDef name:get_settings_priority arg:priority arguments arg If Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_fake_args_kwargs",
    "source_code": "def get_fake_args_kwargs(x: torch.fx.Node) -> tuple[bool, tuple[Any], dict[str, Any]]:\n    args, kwargs = tree_map(get_fake, (x.args, x.kwargs))\n    if any((isinstance(a, torch.fx.Node) for a in pytree.arg_tree_leaves(*args, **kwargs))):\n        return (False, args, kwargs)\n    return (True, args, kwargs)",
    "docstring": "First value returns a boolean if any of the input nodes don't have a faketensor.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_utils.py",
    "ast_data": "FunctionDef name:get_fake_args_kwargs arg:x arguments arg Assign Call If Call Call Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_make_execution_function_without_cloning",
    "source_code": "def _make_execution_function_without_cloning(model, mode):\n    strategy = model._distribution_strategy\n    with strategy.scope():\n        per_replica_function = _make_replica_execution_function(model, mode)\n\n        def distributed_function(input_fn):\n            x, y, sample_weights = input_fn()\n            outputs = strategy.run(per_replica_function, args=(x, y, sample_weights))\n            all_outputs = unwrap_outputs(strategy, outputs, with_loss_tensor=mode != ModeKeys.PREDICT)\n            return all_outputs\n        if not model.run_eagerly:\n            distributed_function = def_function.function(distributed_function)\n\n            def execution_function(input_fn):\n                return [out.numpy() for out in distributed_function(input_fn)]\n        else:\n            execution_function = distributed_function\n        return execution_function",
    "docstring": "Creates a function to run one step of distributed model execution.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_training_utils_v1.py",
    "ast_data": "FunctionDef name:_make_execution_function_without_cloning arg:model arg:mode arguments arg arg Assign With Call Assign Call FunctionDef name:distributed_function arg:input_fn arguments arg Assign Call Assign Call Assign Call Compare Return return:yes If Assign Call FunctionDef name:execution_function arg:input_fn arguments arg Return return:yes Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_NumericColumn",
    "source_code": "class _NumericColumn(_DenseColumn, collections.namedtuple('_NumericColumn', ['key', 'shape', 'default_value', 'dtype', 'normalizer_fn'])):\n\n    @property\n    def name(self):\n        return self.key\n\n    @property\n    def _parse_example_spec(self):\n        return {self.key: parsing_ops.FixedLenFeature(self.shape, self.dtype, self.default_value)}\n\n    def _transform_feature(self, inputs):\n        input_tensor = inputs.get(self.key)\n        if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):\n            raise ValueError('The corresponding Tensor of numerical column must be a Tensor. SparseTensor is not supported. key: {}'.format(self.key))\n        if self.normalizer_fn is not None:\n            input_tensor = self.normalizer_fn(input_tensor)\n        return math_ops.cast(input_tensor, dtypes.float32)\n\n    @property\n    def _variable_shape(self):\n        return tensor_shape.TensorShape(self.shape)\n\n    def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):\n        del weight_collections\n        del trainable\n        return inputs.get(self)",
    "docstring": "see .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py",
    "ast_data": "ClassDef name:_NumericColumn Call FunctionDef name:name arg:self arguments arg Return return:yes FunctionDef name:_parse_example_spec arg:self arguments arg Return return:yes Call FunctionDef name:_transform_feature arg:self arg:inputs arguments arg arg Assign Call If Call Raise Call Call If Compare Assign Call Return return:yes Call FunctionDef name:_variable_shape arg:self arguments arg Return return:yes Call FunctionDef name:_get_dense_tensor arg:self arg:inputs arg:weight_collections arg:trainable arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "__getitem__",
    "source_code": "def __getitem__(self, key: int) -> Any:\n    return self._checkpoint_wrapped_module.__getitem__(key)",
    "docstring": "Forward indexing calls in case the module is a nn.Sequential.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\_checkpoint\\checkpoint_wrapper.py",
    "ast_data": "FunctionDef name:__getitem__ arg:self arg:key arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_options_tensor_to_options",
    "source_code": "@classmethod\ndef _options_tensor_to_options(cls, serialized_options):\n    options = options_lib.Options()\n    if tensor_util.constant_value(serialized_options) is not None:\n        pb = dataset_options_pb2.Options.FromString(tensor_util.constant_value(serialized_options))\n        options._from_proto(pb)\n    return options",
    "docstring": "Converts options tensor to tf.data.Options object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:_options_tensor_to_options arg:cls arg:serialized_options arguments arg arg Assign Call If Compare Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "_describe_identifier",
    "source_code": "def _describe_identifier(self, signode: TextElement, identnode: TextElement, env: BuildEnvironment, symbol: Symbol) -> None:\n    raise NotImplementedError",
    "docstring": "Render the prefix into signode, and the last part into identnode.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\domains\\cpp\\_ast.py",
    "ast_data": "FunctionDef name:_describe_identifier arg:self arg:signode arg:identnode arg:env arg:symbol arguments arg arg arg arg arg Raise"
  },
  {
    "library": "scikit-learn",
    "name": "MethodMapping",
    "source_code": "class MethodMapping:\n\n    def __init__(self):\n        self._routes = []\n\n    def __iter__(self):\n        return iter(self._routes)\n\n    def add(self, *, caller, callee):\n        if caller not in METHODS:\n            raise ValueError(f'Given caller:{caller} is not a valid method. Valid methods are: {METHODS}')\n        if callee not in METHODS:\n            raise ValueError(f'Given callee:{callee} is not a valid method. Valid methods are: {METHODS}')\n        self._routes.append(MethodPair(caller=caller, callee=callee))\n        return self\n\n    def _serialize(self):\n        result = list()\n        for route in self._routes:\n            result.append({'caller': route.caller, 'callee': route.callee})\n        return result\n\n    def __repr__(self):\n        return str(self._serialize())\n\n    def __str__(self):\n        return str(repr(self))",
    "docstring": "Stores the mapping between caller and callee methods for a router. This class is primarily used in a `` instances. .. versionadded:: 1.3",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py",
    "ast_data": "ClassDef name:MethodMapping FunctionDef name:__init__ arg:self arguments arg Assign FunctionDef name:__iter__ arg:self arguments arg Return return:yes Call FunctionDef name:add arg:self arguments arg arg arg If Compare Raise Call If Compare Raise Call Call Call Return return:yes FunctionDef name:_serialize arg:self arguments arg Assign Call For Call Return return:yes FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call Call FunctionDef name:__str__ arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "_config_status",
    "source_code": "@staticmethod\ndef _config_status(*, old_config: Config | None, new_config: Config, verbosity: int) -> tuple[int, str]:\n    if old_config is None:\n        return (CONFIG_NEW, '')\n    if old_config.extensions != new_config.extensions:\n        old_extensions = set(old_config.extensions)\n        new_extensions = set(new_config.extensions)\n        extensions = old_extensions ^ new_extensions\n        if len(extensions) == 1:\n            extension = extensions.pop()\n        else:\n            extension = f'{len(extensions)}'\n        return (CONFIG_EXTENSIONS_CHANGED, f' ({extension!r})')\n    if (changed_keys := _differing_config_keys(old_config, new_config)):\n        changed_num = len(changed_keys)\n        if changed_num == 1:\n            logger.info(__('The configuration has changed (1 option: %r)'), next(iter(changed_keys)))\n        elif changed_num <= 5 or verbosity >= 1:\n            logger.info(__('The configuration has changed (%d options: %s)'), changed_num, ', '.join(map(repr, sorted(changed_keys))))\n        else:\n            logger.info(__('The configuration has changed (%d options: %s, ...)'), changed_num, ', '.join(map(repr, sorted(changed_keys)[:5])))\n    for item in new_config.filter(frozenset({'env'})):\n        if old_config[item.name] != item.value:\n            return (CONFIG_CHANGED, f' ({item.name!r})')\n    return (CONFIG_OK, '')",
    "docstring": "Report the differences between two Config objects. Returns a triple of: 1. The new configuration 2. A status code indicating how the configuration has changed. 3. A status message indicating what has changed.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\environment\\__init__.py",
    "ast_data": "FunctionDef name:_config_status arguments arg arg arg If Compare Return return:yes If Compare Assign Call Assign Call Assign If Compare Call Assign Call Assign Call Return return:yes If Call Assign Call If Compare Call Call Call Call If BoolOp Compare Compare Call Call Call Call Call Call Call Call Call Call For Call Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_interpolated_poly",
    "source_code": "def _interpolated_poly(xvals, fvals, x):\n    xvals = np.asarray(xvals)\n    N = len(xvals)\n    Q = np.zeros([N, N])\n    D = np.zeros([N, N])\n    Q[:, 0] = fvals[:]\n    D[:, 0] = fvals[:]\n    for k in range(1, N):\n        alpha = D[k:, k - 1] - Q[k - 1:N - 1, k - 1]\n        diffik = xvals[0:N - k] - xvals[k:N]\n        Q[k:, k] = (xvals[k:] - x) / diffik * alpha\n        D[k:, k] = (xvals[:N - k] - x) / diffik * alpha\n    return np.sum(Q[-1, 1:]) + Q[-1, 0]",
    "docstring": "Compute p(x) for the polynomial passing through the specified locations. Use Neville's algorithm to compute p(x) where p is the minimal degree polynomial passing through the points xvals, fvals",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_zeros_py.py",
    "ast_data": "FunctionDef name:_interpolated_poly arg:xvals arg:fvals arg:x arguments arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Assign For Call Assign Assign Assign Assign Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "buffer_to_ndarray",
    "source_code": "def buffer_to_ndarray(buffer: Buffer, dtype: tuple[DtypeKind, int, str, str], *, length: int, offset: int=0) -> np.ndarray:\n    kind, bit_width, _, _ = dtype\n    column_dtype = _NP_DTYPES.get(kind, {}).get(bit_width, None)\n    if column_dtype is None:\n        raise NotImplementedError(f'Conversion for {dtype} is not yet supported.')\n    ctypes_type = np.ctypeslib.as_ctypes_type(column_dtype)\n    if bit_width == 1:\n        assert length is not None, '`length` must be specified for a bit-mask buffer.'\n        pa = import_optional_dependency('pyarrow')\n        arr = pa.BooleanArray.from_buffers(pa.bool_(), length, [None, pa.foreign_buffer(buffer.ptr, length)], offset=offset)\n        return np.asarray(arr)\n    else:\n        data_pointer = ctypes.cast(buffer.ptr + offset * bit_width // 8, ctypes.POINTER(ctypes_type))\n        if length > 0:\n            return np.ctypeslib.as_array(data_pointer, shape=(length,))\n        return np.array([], dtype=ctypes_type)",
    "docstring": "Build a NumPy array from the passed buffer. Parameters ---------- buffer : Buffer Buffer to build a NumPy array from. dtype : tuple Data type of the buffer conforming protocol dtypes format. offset : int, default: 0 Number of elements to offset from the start of the buffer. length : int, optional If the buffer is a bit-mask, specifies a number of bits to read from the buffer. Has no effect otherwise. Returns ------- np.ndarray Notes ----- The returned array doesn't own the memory. The caller of this function is responsible for keeping the memory owner object alive as long as the returned NumPy array is being used.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\interchange\\from_dataframe.py",
    "ast_data": "FunctionDef name:buffer_to_ndarray arg:buffer arg:dtype arguments arg arg arg arg Assign Assign Call Call If Compare Raise Call Assign Call If Compare Compare Assign Call Assign Call Call Call Return return:yes Call Assign Call Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "take_nd",
    "source_code": "def take_nd(arr: ArrayLike, indexer, axis: AxisInt=0, fill_value=lib.no_default, allow_fill: bool=True) -> ArrayLike:\n    if fill_value is lib.no_default:\n        fill_value = na_value_for_dtype(arr.dtype, compat=False)\n    elif lib.is_np_dtype(arr.dtype, 'mM'):\n        dtype, fill_value = maybe_promote(arr.dtype, fill_value)\n        if arr.dtype != dtype:\n            arr = arr.astype(dtype)\n    if not isinstance(arr, np.ndarray):\n        if not is_1d_only_ea_dtype(arr.dtype):\n            arr = cast('NDArrayBackedExtensionArray', arr)\n            return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill, axis=axis)\n        return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)\n    arr = np.asarray(arr)\n    return _take_nd_ndarray(arr, indexer, axis, fill_value, allow_fill)",
    "docstring": "Specialized Cython take which sets NaN values in one pass This dispatches to `` defined on ExtensionArrays. Note: this function assumes that the indexer is a valid(ated) indexer with no out of bound indices. Parameters ---------- arr : np.ndarray or ExtensionArray Input array. indexer : ndarray 1-D array of indices to take, subarrays corresponding to -1 value indices are filed with fill_value axis : int, default 0 Axis to take from fill_value : any, default np.nan Fill value to replace -1 values with allow_fill : bool, default True If False, indexer is assumed to contain no -1 values so no filling will be done. This short-circuits computation of a mask. Result is undefined if allow_fill == False and -1 is present in indexer. Returns ------- subarray : np.ndarray or ExtensionArray May be the same type as the input, or cast to an ndarray.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\array_algos\\take.py",
    "ast_data": "FunctionDef name:take_nd arg:arr arg:indexer arg:axis arg:fill_value arg:allow_fill arguments arg arg arg arg arg If Compare Assign Call If Call Assign Call If Compare Assign Call If Call If Call Assign Call Return return:yes Call Return return:yes Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "TrapezoidMapTriFinder",
    "source_code": "class TrapezoidMapTriFinder(TriFinder):\n\n    def __init__(self, triangulation):\n        from matplotlib import _tri\n        super().__init__(triangulation)\n        self._cpp_trifinder = _tri.TrapezoidMapTriFinder(triangulation.get_cpp_triangulation())\n        self._initialize()\n\n    def __call__(self, x, y):\n        x = np.asarray(x, dtype=np.float64)\n        y = np.asarray(y, dtype=np.float64)\n        if x.shape != y.shape:\n            raise ValueError('x and y must be array-like with the same shape')\n        indices = self._cpp_trifinder.find_many(x.ravel(), y.ravel()).reshape(x.shape)\n        return indices\n\n    def _get_tree_stats(self):\n        return self._cpp_trifinder.get_tree_stats()\n\n    def _initialize(self):\n        self._cpp_trifinder.initialize()\n\n    def _print_tree(self):\n        self._cpp_trifinder.print_tree()",
    "docstring": "class implemented using the trapezoid map algorithm from the book \"Computational Geometry, Algorithms and Applications\", second edition, by M. de Berg, M. van Kreveld, M. Overmars and O. Schwarzkopf. The triangulation must be valid, i.e. it must not have duplicate points, triangles formed from colinear points, or overlapping triangles. The algorithm has some tolerance to triangles formed from colinear points, but this should not be relied upon.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_trifinder.py",
    "ast_data": "ClassDef name:TrapezoidMapTriFinder FunctionDef name:__init__ arg:self arg:triangulation arguments arg arg Call Call Assign Call Call Call FunctionDef name:__call__ arg:self arg:x arg:y arguments arg arg arg Assign Call Assign Call If Compare Raise Call Assign Call Call Call Call Return return:yes FunctionDef name:_get_tree_stats arg:self arguments arg Return return:yes Call FunctionDef name:_initialize arg:self arguments arg Call FunctionDef name:_print_tree arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "ExitGradWhileContext",
    "source_code": "def ExitGradWhileContext(self, op, before):\n    grad_state = self.GetGradState(op, before)\n    if grad_state:\n        grad_state.grad_context.Exit()",
    "docstring": "Exit the WhileContext for gradient computation.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_state.py",
    "ast_data": "FunctionDef name:ExitGradWhileContext arg:self arg:op arg:before arguments arg arg arg Assign Call If Call"
  },
  {
    "library": "scikit-learn",
    "name": "request_is_valid",
    "source_code": "def request_is_valid(item):\n    return item in VALID_REQUEST_VALUES",
    "docstring": "Check if an item is a valid request value (and not an alias). Parameters ---------- item : object The given item to be checked. Returns ------- result : bool Whether the given item is valid.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py",
    "ast_data": "FunctionDef name:request_is_valid arg:item arguments arg Return return:yes Compare"
  },
  {
    "library": "django",
    "name": "_init_translation_catalog",
    "source_code": "def _init_translation_catalog(self):\n    settingsfile = sys.modules[settings.__module__].__file__\n    localedir = os.path.join(os.path.dirname(settingsfile), 'locale')\n    translation = self._new_gnu_trans(localedir)\n    self.merge(translation)",
    "docstring": "Create a base catalog using global django translations.",
    "type": "method",
    "file_path": "django\\django\\utils\\translation\\trans_real.py",
    "ast_data": "FunctionDef name:_init_translation_catalog arg:self arguments arg Assign Assign Call Call Assign Call Call"
  },
  {
    "library": "matplotlib",
    "name": "null",
    "source_code": "@staticmethod\ndef null():\n    return Bbox([[np.inf, np.inf], [-np.inf, -np.inf]])",
    "docstring": "Create a new null from (inf, inf) to (-inf, -inf).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:null arguments Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "is_nested_list_like",
    "source_code": "def is_nested_list_like(obj: object) -> bool:\n    return is_list_like(obj) and hasattr(obj, '__len__') and (len(obj) > 0) and all((is_list_like(item) for item in obj))",
    "docstring": "Check if the object is list-like, and that all of its elements are also list-like. Parameters ---------- obj : The object to check Returns ------- is_list_like : bool Whether has list-like properties. Examples -------- >>> is_nested_list_like([[1, 2, 3]]) True >>> is_nested_list_like([{1, 2, 3}, {1, 2, 3}]) True >>> is_nested_list_like([\"foo\"]) False >>> is_nested_list_like([]) False >>> is_nested_list_like([[1, 2, 3], 1]) False Notes ----- This won't reliably detect whether a consumable iterator (e. g. a generator) is a nested-list-like without consuming the iterator. To avoid consuming it, we always return False if the outer container doesn't define . See Also -------- is_list_like",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\inference.py",
    "ast_data": "FunctionDef name:is_nested_list_like arg:obj arguments arg Return return:yes BoolOp Call Call Compare Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_display_window",
    "source_code": "def _display_window(self, pf: PythonFileT, r: LintResult) -> Iterator[str]:\n    if r.char is None or not self.report_column_numbers:\n        yield f'{pf.path}:{r.line}: {r.name}'\n    else:\n        yield f'{pf.path}:{r.line}:{r.char + 1}: {r.name}'\n    begin = max((r.line or 0) - ErrorLines.BEFORE, 1)\n    end = min(begin + ErrorLines.WINDOW, 1 + len(pf.lines))\n    for lineno in range(begin, end):\n        source_line = pf.lines[lineno - 1].rstrip()\n        yield f'{lineno:5} | {source_line}'\n        if lineno == r.line:\n            spaces = 8 + (r.char or 0)\n            carets = len(source_line) if r.char is None else r.length or 1\n            yield (spaces * ' ' + carets * '^')",
    "docstring": "Display a window onto the code with an error",
    "type": "method",
    "file_path": "pytorch\\tools\\linter\\adapters\\_linter.py",
    "ast_data": "FunctionDef name:_display_window arg:self arg:pf arg:r arguments arg arg arg If BoolOp Compare Assign Call BoolOp Assign Call Call For Call Assign Call If Compare Assign BoolOp Assign Compare Call BoolOp"
  },
  {
    "library": "sphinx",
    "name": "_get_safe_url",
    "source_code": "def _get_safe_url(url: str) -> str:\n    parts = urlsplit(url)\n    if parts.username is None:\n        return url\n    else:\n        frags = list(parts)\n        if parts.port:\n            frags[1] = f'{parts.username}@{parts.hostname}:{parts.port}'\n        else:\n            frags[1] = f'{parts.username}@{parts.hostname}'\n        return urlunsplit(frags)",
    "docstring": "Gets version of *url* with basic auth passwords obscured. This function returns results suitable for printing and logging. E.g.: => :param url: a url :type url: ``",
    "type": "function",
    "file_path": "sphinx\\sphinx\\ext\\intersphinx\\_load.py",
    "ast_data": "FunctionDef name:_get_safe_url arg:url arguments arg Assign Call If Compare Return return:yes Assign Call If Assign Assign Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "polyvander",
    "source_code": "def polyvander(x, deg):\n    ideg = pu._as_int(deg, 'deg')\n    if ideg < 0:\n        raise ValueError('deg must be non-negative')\n    x = np.array(x, copy=None, ndmin=1) + 0.0\n    dims = (ideg + 1,) + x.shape\n    dtyp = x.dtype\n    v = np.empty(dims, dtype=dtyp)\n    v[0] = x * 0 + 1\n    if ideg > 0:\n        v[1] = x\n        for i in range(2, ideg + 1):\n            v[i] = v[i - 1] * x\n    return np.moveaxis(v, 0, -1)",
    "docstring": "Vandermonde matrix of given degree. Returns the Vandermonde matrix of degree and sample points . The Vandermonde matrix is defined by .. math:: V[..., i] = x^i, where ``0 >> from numpy.polynomial import polynomial as P >>> x, deg = [-1, 2, 3], 5 >>> P.polyvander(x=x, deg=deg) array([[ 1., -1., 1., -1., 1., -1.], [ 1., 2., 4., 8., 16., 32.], [ 1., 3., 9., 27., 81., 243.]])",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\polynomial.py",
    "ast_data": "FunctionDef name:polyvander arg:x arg:deg arguments arg arg Assign Call If Compare Raise Call Assign Call Assign Assign Assign Call Assign If Compare Assign For Call Assign Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "load",
    "source_code": "def load(spider_name):\n    pass",
    "docstring": "Return the Spider class for the given spider name. If the spider name is not found, it must raise a KeyError.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\interfaces.py",
    "ast_data": "FunctionDef name:load arg:spider_name arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "convert_variables_to_constants_from_session_graph",
    "source_code": "def convert_variables_to_constants_from_session_graph(session, graph_def, output_node_names, variable_names_allowlist=None, variable_names_denylist=None):\n    graph_def, _ = _replace_variables_by_constants(converter_data=_SessionConverterData(session=session, graph_def=graph_def, output_node_names=output_node_names, variable_names_allowlist=variable_names_allowlist, variable_names_denylist=variable_names_denylist))\n    return graph_def",
    "docstring": "Replaces all the variables in a graph with constants of the same values. This function works similarly to convert_variables_to_constants_v2, but it retrieves the constant values from a Session instead of from a ConcreteFunction. This is useful when converting graphs generated from TensorFlow V1, where ConcreteFunctions are not available. This also differs from graph_util.convert_variables_to_constants in that it supports resource variables when V2 control flow constructions are present. Args: session: Active TensorFlow session containing the variables. graph_def: A GraphDef to convert. output_node_names: List of name strings for the result nodes of the graph. variable_names_allowlist: The set of variable names to convert (by default, all variables are converted). variable_names_denylist: The set of variable names to omit converting to constants. Returns: An optimized GraphDef.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "FunctionDef name:convert_variables_to_constants_from_session_graph arg:session arg:graph_def arg:output_node_names arg:variable_names_allowlist arg:variable_names_denylist arguments arg arg arg arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "clear_checkbox_name",
    "source_code": "def clear_checkbox_name(self, name):\n    return name + '-clear'",
    "docstring": "Given the name of the file input, return the name of the clear checkbox input.",
    "type": "method",
    "file_path": "django\\django\\forms\\widgets.py",
    "ast_data": "FunctionDef name:clear_checkbox_name arg:self arg:name arguments arg arg Return return:yes"
  },
  {
    "library": "authlib",
    "name": "validate_response_types_supported",
    "source_code": "def validate_response_types_supported(self):\n    response_types_supported = self.get('response_types_supported')\n    if not response_types_supported:\n        raise ValueError('\"response_types_supported\" is required')\n    if not isinstance(response_types_supported, list):\n        raise ValueError('\"response_types_supported\" MUST be JSON array')",
    "docstring": "REQUIRED. JSON array containing a list of the OAuth 2.0 \"response_type\" values that this authorization server supports. The array values used are the same as those used with the \"response_types\" parameter defined by \"OAuth 2.0 Dynamic Client Registration Protocol\" [RFC7591].",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc8414\\models.py",
    "ast_data": "FunctionDef name:validate_response_types_supported arg:self arguments arg Assign Call If Raise Call If Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "random_bernoulli",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef random_bernoulli(shape, p=0.0, dtype=None, seed=None):\n    if dtype is None:\n        dtype = floatx()\n    if seed is None:\n        seed = np.random.randint(10000000.0)\n    return array_ops.where_v2(random_ops.random_uniform(shape, dtype=dtype, seed=seed) <= p, array_ops.ones(shape, dtype=dtype), array_ops.zeros(shape, dtype=dtype))",
    "docstring": "Returns a tensor with random bernoulli distribution of values. Args: shape: A tuple of integers, the shape of tensor to create. p: A float, , probability of bernoulli distribution. dtype: String, dtype of returned tensor. seed: Integer, random seed. Returns: A tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:random_bernoulli arg:shape arg:p arg:dtype arg:seed arguments arg arg arg arg If Compare Assign Call If Compare Assign Call Return return:yes Call Compare Call Call Call"
  },
  {
    "library": "pandas",
    "name": "dtype",
    "source_code": "@property\ndef dtype(self) -> ExtensionDtype:\n    raise AbstractMethodError(self)",
    "docstring": "An instance of ExtensionDtype. See Also -------- api.extensions.ExtensionDtype : Base class for extension dtypes. api.extensions.ExtensionArray : Base class for extension array types. api.extensions.ExtensionArray.dtype : The dtype of an ExtensionArray. Series.dtype : The dtype of a Series. DataFrame.dtype : The dtype of a DataFrame. Examples -------- >>> pd.array([1, 2, 3]).dtype Int64Dtype()",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\base.py",
    "ast_data": "FunctionDef name:dtype arg:self arguments arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "nodes",
    "source_code": "@property\ndef nodes(self):\n    return self._node",
    "docstring": "Returns a dictionary of all nodes to their attributes.",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\_digraph.py",
    "ast_data": "FunctionDef name:nodes arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_reduce",
    "source_code": "def _reduce(self, op, name: str, *, axis: Axis=0, skipna: bool=True, numeric_only: bool=False, filter_type=None, **kwds):\n    delegate = self._values\n    if axis is not None:\n        self._get_axis_number(axis)\n    if isinstance(delegate, ExtensionArray):\n        return delegate._reduce(name, skipna=skipna, **kwds)\n    else:\n        if numeric_only and self.dtype.kind not in 'iufcb':\n            kwd_name = 'numeric_only'\n            if name in ['any', 'all']:\n                kwd_name = 'bool_only'\n            raise TypeError(f'Series.{name} does not allow {kwd_name}={numeric_only} with non-numeric dtypes.')\n        return op(delegate, skipna=skipna, **kwds)",
    "docstring": "Perform a reduction operation. If we have an ndarray as a value, then simply perform the operation, otherwise delegate to the object.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:_reduce arg:self arg:op arg:name arguments arg arg arg arg arg arg arg arg Assign If Compare Call If Call Return return:yes Call If BoolOp Compare Assign If Compare Assign Raise Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_title",
    "source_code": "def set_title(self, title, prop=None):\n    self._legend_title_box._text.set_text(title)\n    if title:\n        self._legend_title_box._text.set_visible(True)\n        self._legend_title_box.set_visible(True)\n    else:\n        self._legend_title_box._text.set_visible(False)\n        self._legend_title_box.set_visible(False)\n    if prop is not None:\n        self._legend_title_box._text.set_fontproperties(prop)\n    self.stale = True",
    "docstring": "Set legend title and title style. Parameters ---------- title : str The legend title. prop : or or The font properties of the legend title. If a , it is interpreted as a fontconfig pattern parsed by . If a , it is interpreted as the absolute path to a font file.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\legend.py",
    "ast_data": "FunctionDef name:set_title arg:self arg:title arg:prop arguments arg arg arg Call If Call Call Call Call If Compare Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "distributed_mode",
    "source_code": "@property\ndef distributed_mode(self):\n    return bool(self._cluster_spec) and self._task_type != _TaskType.EVALUATOR",
    "docstring": "Whether it is distributed training or not.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_coordinator.py",
    "ast_data": "FunctionDef name:distributed_mode arg:self arguments arg Return return:yes BoolOp Call Compare"
  },
  {
    "library": "pytorch",
    "name": "forward",
    "source_code": "def forward(self, *args: Any) -> Any:\n    nested_tensors = _map_tensor_data(self._nested_input)\n    result = self.forward_extended(*nested_tensors)\n    del self._nested_input\n    self._nested_output = result\n    return tuple(_iter_tensors(result))",
    "docstring": "Shared forward utility.",
    "type": "method",
    "file_path": "pytorch\\torch\\autograd\\function.py",
    "ast_data": "FunctionDef name:forward arg:self arguments arg arg Assign Call Assign Call Assign Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "parse_from_yaml",
    "source_code": "def parse_from_yaml(ei: dict[str, object]) -> dict[ETKernelKey, BackendMetadata]:\n    e = ei.copy()\n    if (kernels := e.pop('kernels', None)) is None:\n        return {}\n    type_alias: dict[str, list[str]] = e.pop('type_alias', {})\n    dim_order_alias: dict[str, list[str]] = e.pop('dim_order_alias', {})\n    dim_order_alias.pop('__line__', None)\n    kernel_mapping: dict[ETKernelKey, BackendMetadata] = {}\n    for entry in kernels:\n        arg_meta = entry.get('arg_meta')\n        if arg_meta is not None:\n            arg_meta.pop('__line__')\n        kernel_name = entry.get('kernel_name')\n        namespace_helper = NamespaceHelper.from_namespaced_entity(kernel_name, max_level=3)\n        kernel_namespace = namespace_helper.get_cpp_namespace(default='at')\n        backend_metadata = BackendMetadata(kernel=namespace_helper.entity_name, structured=False, cpp_namespace=kernel_namespace + '::native')\n        kernel_keys = [ETKernelKey((), default=True)] if arg_meta is None else ETKernelKey.gen_from_yaml(arg_meta, type_alias, dim_order_alias)\n        for kernel_key in kernel_keys:\n            assert kernel_key not in kernel_mapping, 'Duplicate kernel key: ' + str(kernel_key) + ' ' + str(e)\n            kernel_mapping[kernel_key] = backend_metadata\n    return kernel_mapping",
    "docstring": "Given a loaded yaml representing kernel assignment information, extract the mapping from to (the latter representing the kernel instance) Args: ei: Dict keys {kernels, type_alias, dim_order_alias} See ETKernelKey for description of arguments",
    "type": "function",
    "file_path": "pytorch\\torchgen\\executorch\\parse.py",
    "ast_data": "FunctionDef name:parse_from_yaml arg:ei arguments arg Assign Call If Compare Call Return return:no Call Call Call For Assign Call If Compare Call Assign Call Assign Call Assign Call Assign Call Assign Compare Call Call For Compare Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "reverse_dict",
    "source_code": "def reverse_dict(d):\n    result = OrderedDict()\n    for key in d:\n        for val in d[key]:\n            result[val] = result.get(val, ()) + (key,)\n    return result",
    "docstring": "Reverses direction of dependence dict >>> d = {\"a\": (1, 2), \"b\": (2, 3), \"c\": ()} >>> reverse_dict(d) # doctest: +SKIP {1: ('a',), 2: ('a', 'b'), 3: ('b',)} :note: dict order are not deterministic. As we iterate on the input dict, it make the output of this function depend on the dict order. So this function output order should be considered as undeterministic.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\unification\\multipledispatch\\utils.py",
    "ast_data": "FunctionDef name:reverse_dict arg:d arguments arg Assign Call For For Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "iter_segments",
    "source_code": "def iter_segments(self, transform=None, remove_nans=True, clip=None, snap=False, stroke_width=1.0, simplify=None, curves=True, sketch=None):\n    if not len(self):\n        return\n    cleaned = self.cleaned(transform=transform, remove_nans=remove_nans, clip=clip, snap=snap, stroke_width=stroke_width, simplify=simplify, curves=curves, sketch=sketch)\n    NUM_VERTICES_FOR_CODE = self.NUM_VERTICES_FOR_CODE\n    STOP = self.STOP\n    vertices = iter(cleaned.vertices)\n    codes = iter(cleaned.codes)\n    for curr_vertices, code in zip(vertices, codes):\n        if code == STOP:\n            break\n        extra_vertices = NUM_VERTICES_FOR_CODE[code] - 1\n        if extra_vertices:\n            for i in range(extra_vertices):\n                next(codes)\n                curr_vertices = np.append(curr_vertices, next(vertices))\n        yield (curr_vertices, code)",
    "docstring": "Iterate over all curve segments in the path. Each iteration returns a pair `Path~matplotlib.transforms.Transformshould_simplifypath.simplifypath.simplify_threshold`. curves : bool, optional If True, curve segments will be returned as curve segments. If False, all curves will be converted to line segments. sketch : None or sequence, optional If not None, must be a 3-tuple of the form (scale, length, randomness), representing the sketch parameters.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\path.py",
    "ast_data": "FunctionDef name:iter_segments arg:self arg:transform arg:remove_nans arg:clip arg:snap arg:stroke_width arg:simplify arg:curves arg:sketch arguments arg arg arg arg arg arg arg arg arg If Call Return return:no Assign Call Assign Assign Assign Call Assign Call For Call If Compare Assign If For Call Call Assign Call Call"
  },
  {
    "library": "kornia",
    "name": "depth_from_plane_equation",
    "source_code": "def depth_from_plane_equation(plane_normals: Tensor, plane_offsets: Tensor, points_uv: Tensor, camera_matrix: Tensor, eps: float=1e-08) -> Tensor:\n    KORNIA_CHECK_SHAPE(plane_normals, ['B', '3'])\n    KORNIA_CHECK_SHAPE(plane_offsets, ['B', '1'])\n    KORNIA_CHECK_SHAPE(points_uv, ['B', 'N', '2'])\n    KORNIA_CHECK_SHAPE(camera_matrix, ['B', '3', '3'])\n    points_xy = normalize_points_with_intrinsics(points_uv, camera_matrix)\n    rays = convert_points_to_homogeneous(points_xy)\n    plane_normals_exp = plane_normals.unsqueeze(1)\n    denom = torch.sum(rays * plane_normals_exp, dim=-1)\n    denom_abs = torch.abs(denom)\n    zero_mask = denom_abs < eps\n    denom = torch.where(zero_mask, eps * torch.sign(denom), denom)\n    depth = plane_offsets / denom\n    return depth",
    "docstring": "Compute depth values from plane equations and pixel coordinates. Args: plane_normals (Tensor): Plane normal vectors of shape (B, 3). plane_offsets (Tensor): Plane offsets of shape (B, 1). points_uv (Tensor): Pixel coordinates of shape (B, N, 2). camera_matrix (Tensor): Camera intrinsic matrix of shape (B, 3, 3). eps: epsilon for numerical stability. Returns: Tensor: Computed depth values at the given pixels, shape (B, N).",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\depth.py",
    "ast_data": "FunctionDef name:depth_from_plane_equation arg:plane_normals arg:plane_offsets arg:points_uv arg:camera_matrix arg:eps arguments arg arg arg arg arg Call Call Call Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Compare Assign Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_create_tpu_topology",
    "source_code": "def _create_tpu_topology(core_locations: List[_CoreLocation], num_tasks: int, num_devices_per_task: int) -> topology.Topology:\n    assert min([l.x for l in core_locations]) == 0\n    assert min([l.y for l in core_locations]) == 0\n    assert min([l.z for l in core_locations]) == 0\n    assert min([l.core for l in core_locations]) == 0\n    x_max = max([l.x for l in core_locations])\n    y_max = max([l.y for l in core_locations])\n    z_max = max([l.z for l in core_locations])\n    core_max = max([l.core for l in core_locations])\n    mesh_shape = [x_max + 1, y_max + 1, z_max + 1, core_max + 1]\n    device_coordinates = [[l.x, l.y, l.z, l.core] for l in core_locations]\n    device_coordinates = numpy_compat.np_asarray(device_coordinates).reshape(num_tasks, num_devices_per_task, 4)\n    return topology.Topology(mesh_shape=mesh_shape, device_coordinates=device_coordinates)",
    "docstring": "Returns a Topology object build from a _CoreLocation list. Args: core_locations: A list of _CoreLocation objects sorted first by TF task ID and then by per-task device ordinals. num_tasks: The number of TF tasks in the cluster. num_devices_per_task: The number of TPU devices local to each task.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\tpu_util.py",
    "ast_data": "FunctionDef name:_create_tpu_topology arg:core_locations arg:num_tasks arg:num_devices_per_task arguments arg arg arg Compare Call Compare Call Compare Call Compare Call Assign Call Assign Call Assign Call Assign Call Assign Assign Assign Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "decode",
    "source_code": "def decode(self, encoding, errors: str='strict', dtype: str | DtypeObj | None=None):\n    if dtype is not None and (not is_string_dtype(dtype)):\n        raise ValueError(f'dtype must be string or object, got dtype={dtype!r}')\n    if dtype is None and get_option('future.infer_string'):\n        dtype = 'str'\n    if encoding in _cpython_optimized_decoders:\n        f = lambda x: x.decode(encoding, errors)\n    else:\n        decoder = codecs.getdecoder(encoding)\n        f = lambda x: decoder(x, errors)[0]\n    arr = self._data.array\n    result = arr._str_map(f)\n    return self._wrap_result(result, dtype=dtype)",
    "docstring": "Decode character string in the Series/Index using indicated encoding. Equivalent to :meth: in python2 and :meth: in python3. Parameters ---------- encoding : str Specifies the encoding to be used. errors : str, optional Specifies the error handling scheme. Possible values are those supported by :meth:. dtype : str or dtype, optional The dtype of the result. When not ``. .. versionadded:: 2.3.0 Returns ------- Series or Index A Series or Index with decoded strings. See Also -------- Series.str.encode : Encodes strings into bytes in a Series/Index. Examples -------- For Series: >>> ser = pd.Series([b\"cow\", b\"123\", b\"()\"]) >>> ser.str.decode(\"ascii\") 0 cow 1 123 2 () dtype: object",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\strings\\accessor.py",
    "ast_data": "FunctionDef name:decode arg:self arg:encoding arg:errors arg:dtype arguments arg arg arg arg If BoolOp Compare Call Raise Call If BoolOp Compare Call Assign If Compare Assign arguments arg Call Assign Call Assign arguments arg Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_source",
    "source_code": "def get_source(stacklevel: int) -> str:\n    frame = inspect.getframeinfo(sys._getframe(stacklevel))\n    source = f'{frame.filename}:{frame.lineno}'\n    return source",
    "docstring": "Get a string that represents the caller. Example: \"/path/to/foo.py:42\" Use stacklevel=1 to get the caller's source Use stacklevel=2 to get the caller's caller's source etc.",
    "type": "function",
    "file_path": "pytorch\\torch\\_library\\utils.py",
    "ast_data": "FunctionDef name:get_source arg:stacklevel arguments arg Assign Call Call Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "__len__",
    "source_code": "def __len__(self):\n    if self._is_all_lists():\n        grid_size = len(ParameterGrid(self.param_distributions))\n        return min(self.n_iter, grid_size)\n    else:\n        return self.n_iter",
    "docstring": "Number of points that will be sampled.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_search.py",
    "ast_data": "FunctionDef name:__len__ arg:self arguments arg If Call Assign Call Call Return return:yes Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "add_js_file",
    "source_code": "def add_js_file(self, filename: str | None, priority: int=500, loading_method: str | None=None, **kwargs: Any) -> None:\n    if loading_method == 'async':\n        kwargs['async'] = 'async'\n    elif loading_method == 'defer':\n        kwargs['defer'] = 'defer'\n    filename = filename or ''\n    self.registry.add_js_file(filename, priority=priority, **kwargs)\n    with contextlib.suppress(AttributeError):\n        self.builder.add_js_file(filename, priority=priority, **kwargs)",
    "docstring": "Register a JavaScript file to include in the HTML output. :param filename: The name of a JavaScript file that the default HTML template will include. It must be relative to the HTML static path, or a full URI with scheme, or ` tag. If the special keyword argument ```html_js_fileshtml-page-context`. And it allows keyword arguments as attributes of script tag. .. versionchanged:: 3.5 Take priority argument. Allow to add a JavaScript file to the specific page. .. versionchanged:: 4.4 Take loading_method argument. Allow to change the loading method of the JavaScript file.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\application.py",
    "ast_data": "FunctionDef name:add_js_file arg:self arg:filename arg:priority arg:loading_method arguments arg arg arg arg arg If Compare Assign If Compare Assign Assign BoolOp Call With Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_proportional_y",
    "source_code": "def _proportional_y(self):\n    if isinstance(self.norm, colors.BoundaryNorm) or self.boundaries is not None:\n        y = self._boundaries - self._boundaries[self._inside][0]\n        y = y / (self._boundaries[self._inside][-1] - self._boundaries[self._inside][0])\n        if self.spacing == 'uniform':\n            yscaled = self._forward_boundaries(self._boundaries)\n        else:\n            yscaled = y\n    else:\n        y = self.norm(self._boundaries.copy())\n        y = np.ma.filled(y, np.nan)\n        yscaled = y\n    y = y[self._inside]\n    yscaled = yscaled[self._inside]\n    norm = colors.Normalize(y[0], y[-1])\n    y = np.ma.filled(norm(y), np.nan)\n    norm = colors.Normalize(yscaled[0], yscaled[-1])\n    yscaled = np.ma.filled(norm(yscaled), np.nan)\n    automin = yscaled[1] - yscaled[0]\n    automax = yscaled[-1] - yscaled[-2]\n    extendlength = [0, 0]\n    if self._extend_lower() or self._extend_upper():\n        extendlength = self._get_extension_lengths(self.extendfrac, automin, automax, default=0.05)\n    return (y, extendlength)",
    "docstring": "Return colorbar data coordinates for the boundaries of a proportional colorbar, plus extension lengths if required:",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colorbar.py",
    "ast_data": "FunctionDef name:_proportional_y arg:self arguments arg If BoolOp Call Compare Assign Assign If Compare Assign Call Assign Assign Call Call Assign Call Assign Assign Assign Assign Call Assign Call Call Assign Call Assign Call Call Assign Assign Assign If BoolOp Call Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "layout",
    "source_code": "def layout(string, font, *, kern_mode=Kerning.DEFAULT):\n    x = 0\n    prev_glyph_idx = None\n    char_to_font = font._get_fontmap(string)\n    base_font = font\n    for char in string:\n        font = char_to_font.get(char, base_font)\n        glyph_idx = font.get_char_index(ord(char))\n        kern = base_font.get_kerning(prev_glyph_idx, glyph_idx, kern_mode) / 64 if prev_glyph_idx is not None else 0.0\n        x += kern\n        glyph = font.load_glyph(glyph_idx, flags=LoadFlags.NO_HINTING)\n        yield LayoutItem(font, char, glyph_idx, x, kern)\n        x += glyph.linearHoriAdvance / 65536\n        prev_glyph_idx = glyph_idx",
    "docstring": "Render *string* with *font*. For each character in *string*, yield a LayoutItem instance. When such an instance is yielded, the font's glyph is set to the corresponding character. Parameters ---------- string : str The string to be rendered. font : FT2Font The font. kern_mode : Kerning A FreeType kerning mode. Yields ------ LayoutItem",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\_text_helpers.py",
    "ast_data": "FunctionDef name:layout arg:string arg:font arguments arg arg arg Assign Assign Assign Call Assign For Assign Call Assign Call Call Assign Compare Call Assign Call Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "axvspan",
    "source_code": "@_docstring.interpd\ndef axvspan(self, xmin, xmax, ymin=0, ymax=1, **kwargs):\n    self._check_no_units([ymin, ymax], ['ymin', 'ymax'])\n    (xmin, xmax), = self._process_unit_info([('x', [xmin, xmax])], kwargs)\n    p = mpatches.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, **kwargs)\n    p.set_transform(self.get_xaxis_transform(which='grid'))\n    iy = self.dataLim.intervaly.copy()\n    my = self.dataLim.minposy\n    self.add_patch(p)\n    self.dataLim.intervaly = iy\n    self.dataLim.minposy = my\n    p.get_path()._interpolation_steps = mpl.axis.GRIDLINE_INTERPOLATION_STEPS\n    self._request_autoscale_view('x')\n    return p",
    "docstring": "Add a vertical span (rectangle) across the Axes. The rectangle spans from *xmin* to *xmax* horizontally, and, by default, the whole y-axis vertically. The y-span can be set using *ymin* (default: 0) and *ymax* (default: 1) which are in axis units; e.g. `~.Axes.set_ylim~matplotlib.patches.Rectangle~matplotlib.patches.Rectangle` properties %(Rectangle:kwdoc)s See Also -------- axhspan : Add a horizontal span across the Axes. Examples -------- Draw a vertical, green, translucent rectangle from x = 1.25 to x = 1.55 that spans the yrange of the Axes. >>> axvspan(1.25, 1.55, facecolor='g', alpha=0.5)",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_axes.py",
    "ast_data": "FunctionDef name:axvspan arg:self arg:xmin arg:xmax arg:ymin arg:ymax arguments arg arg arg arg arg arg Call Assign Call Assign Call Call Call Assign Call Assign Call Assign Assign Assign Call Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "get_jwt_config",
    "source_code": "def get_jwt_config(self, grant):\n    raise NotImplementedError()",
    "docstring": "Get the JWT configuration for OpenIDCode extension. The JWT configuration will be used to generate ``. Developers MUST implement this method in subclass, e.g.:: def get_jwt_config(self, grant): return { \"key\": read_private_key_file(key_path), \"alg\": \"RS256\", \"iss\": \"issuer-identity\", \"exp\": 3600, } :param grant: AuthorizationCodeGrant instance :return: dict",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\core\\grants\\code.py",
    "ast_data": "FunctionDef name:get_jwt_config arg:self arg:grant arguments arg arg Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "_search_estimator_has",
    "source_code": "def _search_estimator_has(attr):\n\n    def check(self):\n        _check_refit(self, attr)\n        if hasattr(self, 'best_estimator_'):\n            getattr(self.best_estimator_, attr)\n            return True\n        getattr(self.estimator, attr)\n        return True\n    return check",
    "docstring": "Check if we can delegate a method to the underlying estimator. Calling a prediction method will only be available if . In such case, we check first the fitted best estimator. If it is not fitted, we check the unfitted estimator. Checking the unfitted estimator allows to use on the instance even before calling .",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_search.py",
    "ast_data": "FunctionDef name:_search_estimator_has arg:attr arguments arg FunctionDef name:check arg:self arguments arg Call If Call Call Return return:yes Call Return return:yes Return return:yes"
  },
  {
    "library": "scipy",
    "name": "getrow",
    "source_code": "def getrow(self, i):\n    M, N = self.shape\n    if i < 0:\n        i += M\n    if i < 0 or i >= M:\n        raise IndexError('row index out of bounds')\n    new = self._lil_container((1, N), dtype=self.dtype)\n    new.rows[0] = self.rows[i][:]\n    new.data[0] = self.data[i][:]\n    return new",
    "docstring": "Returns a copy of the 'i'th row.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_lil.py",
    "ast_data": "FunctionDef name:getrow arg:self arg:i arguments arg arg Assign If Compare If BoolOp Compare Compare Raise Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "placeholder",
    "source_code": "@compatibility(is_backward_compatible=True)\ndef placeholder(self, target: 'Target', args: tuple[Argument, ...], kwargs: dict[str, Any]) -> Proxy:\n    assert isinstance(target, str)\n    default_value = next(iter(args)) if args else inspect.Signature.empty\n    return Proxy(self.new_graph.placeholder(target, default_value=default_value), self.tracer)",
    "docstring": "Execute a `Node `__ for details on semantics args (Tuple): Tuple of positional args for this invocation kwargs (Dict): Dict of keyword arguments for this invocation",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\interpreter.py",
    "ast_data": "FunctionDef name:placeholder arg:self arg:target arg:args arg:kwargs arguments arg arg arg arg Call Assign Call Call Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_mutation_aspect",
    "source_code": "def get_mutation_aspect(self):\n    return self._mutation_aspect if self._mutation_aspect is not None else 1",
    "docstring": "Return the aspect ratio of the bbox mutation.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_mutation_aspect arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "matplotlib",
    "name": "get_style",
    "source_code": "def get_style(self):\n    return self._slant",
    "docstring": "Return the font style. Values are: 'normal', 'italic' or 'oblique'.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\font_manager.py",
    "ast_data": "FunctionDef name:get_style arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "GEOSException",
    "source_code": "class GEOSException(Exception):\n    pass",
    "docstring": "The base GEOS exception, indicates a GEOS-related error.",
    "type": "class",
    "file_path": "django\\django\\contrib\\gis\\geos\\error.py",
    "ast_data": "ClassDef name:GEOSException"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, decay, num_updates=None, zero_debias=False, name='ExponentialMovingAverage'):\n    self._decay = decay\n    self._num_updates = num_updates\n    self._zero_debias = zero_debias\n    self._name = name\n    self._averages = {}",
    "docstring": "Creates a new ExponentialMovingAverage object. The method has to be called to create shadow variables. Follow-on calls to the method will update the moving averages in the shadow variables. (In TF 1.x graphs will return an update op to update the moving averages which must be explicitly run). The optional parameter allows one to tweak the decay rate dynamically. It is typical to pass the count of training steps, usually kept in a variable that is incremented at each step, in which case the decay rate is lower at the start of training. This makes moving averages move faster. If passed, the actual decay rate used is: Args: decay: A scalar float value, , or . The decay parameter. num_updates: Optional count of number of updates applied to variables. zero_debias: If , zero debias moving-averages that are initialized with tensors. (Note: moving averages may not be initialized with non-variable tensors when eager execution is enabled). name: String. Optional prefix name to use for the name of ops added in .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\moving_averages.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:decay arg:num_updates arg:zero_debias arg:name arguments arg arg arg arg arg Assign Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "np_reshape",
    "source_code": "def np_reshape(a, /, shape=None, *, newshape=None, order='C', copy=None):\n    if shape is None:\n        shape = newshape\n    if np.lib.NumpyVersion(np.__version__) >= '2.1.0.rc0':\n        if shape is None and newshape is None:\n            return np.asarray(a, order=order, copy=copy)\n        return np.reshape(a, shape, order=order, copy=copy)\n    return np.reshape(a, shape, order=order)",
    "docstring": "Reshapes an array without changing its data. NumPy 2.1.0rc1 added shape and copy arguments to numpy.reshape. See Both newshape and shape keywords are supported, but newshape is going to be deprecated. Use instead. Besides, shape cannot be None now. See Previously, np.reshape with newshape=None returned a copy. To maintain this behavior, we now use asarray to create an ndarray. Args: a: Array_like. Array to be reshaped. shape: The new shape of the array. newshape: The new shape of the array (deprecated). order: {‘C’, ‘F’, ‘K’}. copy: bool. If True, then the array data is copied. If None, a copy will only be made if it’s required by order. For False it raises a ValueError if a copy cannot be avoided. Returns: This will be a new view object if possible; otherwise, it will be a copy.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\numpy_compat.py",
    "ast_data": "FunctionDef name:np_reshape arg:shape arguments arg arg arg arg arg If Compare Assign If Compare Call If BoolOp Compare Compare Return return:yes Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "interpolate",
    "source_code": "@final\ndef interpolate(self, method: InterpolateOptions='linear', *, axis: Axis=0, limit: int | None=None, inplace: bool=False, limit_direction: Literal['forward', 'backward', 'both']='forward', limit_area=None, downcast=lib.no_default, **kwargs):\n    assert downcast is lib.no_default\n    result = self._upsample('asfreq')\n    obj = self._selected_obj\n    is_period_index = isinstance(obj.index, PeriodIndex)\n    if not is_period_index:\n        final_index = result.index\n        if isinstance(final_index, MultiIndex):\n            raise NotImplementedError('Direct interpolation of MultiIndex data frames is not supported. If you tried to resample and interpolate on a grouped data frame, please use:\\n`df.groupby(...).apply(lambda x: x.resample(...).interpolate(...))`\\ninstead, as resampling and interpolation has to be performed for each group independently.')\n        missing_data_points_index = obj.index.difference(final_index)\n        if len(missing_data_points_index) > 0:\n            result = concat([result, obj.loc[missing_data_points_index]]).sort_index()\n    result_interpolated = result.interpolate(method=method, axis=axis, limit=limit, inplace=inplace, limit_direction=limit_direction, limit_area=limit_area, downcast=downcast, **kwargs)\n    if is_period_index:\n        return result_interpolated\n    result_interpolated = result_interpolated.loc[final_index]\n    result_interpolated.index = final_index\n    return result_interpolated",
    "docstring": "Interpolate values between target timestamps according to different methods. The original index is first reindexed to target timestamps (see :meth:), then the interpolation of `DataFrame.interpolatescipy.interpolate.interp1dscipy.interpolate.UnivariateSplineorderslinearsplinesplineNotesscipy.interpolate.BPoly.from_derivativesSeriesNone`.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\resample.py",
    "ast_data": "FunctionDef name:interpolate arg:self arg:method arguments arg arg arg arg arg arg arg arg arg Compare Assign Call Assign Assign Call If Assign If Call Raise Call Assign Call If Compare Call Assign Call Call Assign Call If Return return:yes Assign Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_legend_handler_map",
    "source_code": "def get_legend_handler_map(self):\n    default_handler_map = self.get_default_handler_map()\n    return {**default_handler_map, **self._custom_handler_map} if self._custom_handler_map else default_handler_map",
    "docstring": "Return this legend instance's handler map.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\legend.py",
    "ast_data": "FunctionDef name:get_legend_handler_map arg:self arguments arg Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "ignore",
    "source_code": "def ignore(self, value):\n    self._ignore = value",
    "docstring": "Set whether the existing bounds of the box should be ignored by subsequent calls to :meth:. value : bool - When `update_from_data_xyBboxupdate_from_data_xyBbox`.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:ignore arg:self arg:value arguments arg arg Assign"
  },
  {
    "library": "pytorch",
    "name": "set_up_storage_reader",
    "source_code": "@abc.abstractmethod\ndef set_up_storage_reader(self, metadata: Metadata, is_coordinator: bool) -> None:\n    pass",
    "docstring": "Initialize this instance. Args: metadata (Metadata): The metadata schema to use. is_coordinator (bool): Whether this instance is responsible for coordinating the checkpoint.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\storage.py",
    "ast_data": "FunctionDef name:set_up_storage_reader arg:self arg:metadata arg:is_coordinator arguments arg arg arg"
  },
  {
    "library": "pytorch",
    "name": "CallModule",
    "source_code": "class CallModule(_TargetArgsExpr):\n    op = 'call_module'",
    "docstring": "Matches a call_module node in the FX graphs:",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\pattern_matcher.py",
    "ast_data": "ClassDef name:CallModule Assign"
  },
  {
    "library": "pytorch",
    "name": "empty",
    "source_code": "def empty(sharding_spec: ShardingSpec, *size, dtype=None, layout=torch.strided, requires_grad=False, pin_memory=False, memory_format=torch.contiguous_format, process_group=None, init_rrefs=False) -> ShardedTensor:\n    return ShardedTensor(sharding_spec, *size, dtype=dtype, layout=layout, requires_grad=requires_grad, pin_memory=pin_memory, memory_format=memory_format, process_group=process_group, init_rrefs=init_rrefs)",
    "docstring": "Returns a :class: filled with uninitialized data. Needs to be called on all ranks in an SPMD fashion. Args: sharding_spec (:class:): The specification describing how to shard the Tensor. size (int...): a sequence of integers defining the shape of the output tensor. Can be a variable number of arguments or a collection like a list or tuple. Keyword args: dtype (:class:, optional): the desired data type of returned tensor. Default: if `torch.set_default_dtypetorch.layouttorch.memory_formattorch.distributed.rpc.RRefShardedTensor` object on each rank",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\__init__.py",
    "ast_data": "FunctionDef name:empty arg:sharding_spec arguments arg arg arg arg arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_im2col_col2im_indices_along_dim",
    "source_code": "def _im2col_col2im_indices_along_dim(input_d, kernel_d, dilation_d, padding_d, stride_d, device):\n    blocks_d = input_d + padding_d * 2 - dilation_d * (kernel_d - 1)\n    arange_kw = partial(torch.arange, dtype=torch.int64, device=device)\n    blocks_d_indices = arange_kw(0, blocks_d, stride_d).unsqueeze(0)\n    kernel_grid = arange_kw(0, kernel_d * dilation_d, dilation_d).unsqueeze(-1)\n    return blocks_d_indices + kernel_grid",
    "docstring": "Utility function to implement im2col and col2im",
    "type": "function",
    "file_path": "pytorch\\torch\\_decomp\\decompositions.py",
    "ast_data": "FunctionDef name:_im2col_col2im_indices_along_dim arg:input_d arg:kernel_d arg:dilation_d arg:padding_d arg:stride_d arg:device arguments arg arg arg arg arg arg Assign Assign Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "Widget",
    "source_code": "class Widget:\n    drawon = True\n    eventson = True\n    _active = True\n\n    def set_active(self, active):\n        self._active = active\n\n    def get_active(self):\n        return self._active\n    active = property(get_active, set_active, doc='Is the widget active?')\n\n    def ignore(self, event):\n        return not self.active",
    "docstring": "Abstract base class for GUI neutral widgets.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "ClassDef name:Widget Assign Assign Assign FunctionDef name:set_active arg:self arg:active arguments arg arg Assign FunctionDef name:get_active arg:self arguments arg Return return:yes Assign Call FunctionDef name:ignore arg:self arg:event arguments arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "all_symbolics_schemas",
    "source_code": "def all_symbolics_schemas() -> dict[str, _TorchSchema]:\n    symbolics_schemas = {}\n    for name in registration.registry.all_functions():\n        func_group = registration.registry.get_function_group(name)\n        assert func_group is not None\n        symbolics_schema = _TorchSchema(name)\n        func = func_group.get(_constants.ONNX_MAX_OPSET)\n        if func is not None:\n            symbolics_schema.arguments = _symbolic_argument_count(func)\n            symbolics_schema.opsets = list(range(func_group.get_min_supported(), _constants.ONNX_MAX_OPSET + 1))\n        else:\n            func = func_group.get(7)\n            symbolics_schema.arguments = _symbolic_argument_count(func)\n            symbolics_schema.opsets = list(range(7, _constants.ONNX_BASE_OPSET))\n        symbolics_schemas[name] = symbolics_schema\n    return symbolics_schemas",
    "docstring": "Returns schemas for all onnx supported ops.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_onnx_supported_ops.py",
    "ast_data": "FunctionDef name:all_symbolics_schemas arguments Assign For Call Assign Call Compare Assign Call Assign Call If Compare Assign Call Assign Call Call Call Assign Call Assign Call Assign Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "print_bt",
    "source_code": "def print_bt(self, *, file=None, stacklevel=0):\n    stack = []\n    tx = self.__get_tx(stacklevel)\n    while tx is not None:\n        stack.append(tx.frame_summary())\n        tx = getattr(tx, 'parent', None)\n    print(''.join(traceback.StackSummary.from_list(reversed(stack)).format()), file=file)",
    "docstring": "Print the user code backtrace, starting at the beginning of the frame Dynamo started evaluating. Note that this MAY NOT go all the way to the torch.compile invocation, as we may have done a graph break and are compiling an intermediate frame as the starting point. If you think the other behavior would be better, file a bug at",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\comptime.py",
    "ast_data": "FunctionDef name:print_bt arg:self arguments arg arg arg Assign Assign Call While Compare Call Call Assign Call Call Call Call Call Call"
  },
  {
    "library": "django",
    "name": "t",
    "source_code": "def t(self):\n    return calendar.monthrange(self.data.year, self.data.month)[1]",
    "docstring": "Number of days in the given month; i.e. '28' to '31'",
    "type": "method",
    "file_path": "django\\django\\utils\\dateformat.py",
    "ast_data": "FunctionDef name:t arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "shape",
    "source_code": "@final\n@property\ndef shape(self) -> Shape:\n    return (len(self),)",
    "docstring": "Return a tuple of the shape of the underlying data. See Also -------- Index.size: Return the number of elements in the underlying data. Index.ndim: Number of dimensions of the underlying data, by definition 1. Index.dtype: Return the dtype object of the underlying data. Index.values: Return an array representing the data in the Index. Examples -------- >>> idx = pd.Index([1, 2, 3]) >>> idx Index([1, 2, 3], dtype='int64') >>> idx.shape (3,)",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:shape arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_initial",
    "source_code": "def get_initial(self):\n    return self.initial.copy()",
    "docstring": "Return the initial data to use for forms on this view.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\edit.py",
    "ast_data": "FunctionDef name:get_initial arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "add_tensor_filter",
    "source_code": "def add_tensor_filter(self, filter_name, tensor_filter):\n    if self._session_wrapper:\n        self._session_wrapper.add_tensor_filter(filter_name, tensor_filter)\n    else:\n        self._pending_tensor_filters[filter_name] = tensor_filter",
    "docstring": "Add a tensor filter. See doc of for details. Override default behavior to accommodate the possibility of this method being called prior to the initialization of the underlying object. Args: filter_name: See doc of for details. tensor_filter: See doc of for details.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\hooks.py",
    "ast_data": "FunctionDef name:add_tensor_filter arg:self arg:filter_name arg:tensor_filter arguments arg arg arg If Call Assign"
  },
  {
    "library": "pytorch",
    "name": "popitem",
    "source_code": "def popitem(self) -> tuple[str, Any]:\n    k, _ = self._keys.popitem()\n    self._keys[k] = None\n    val = self[k]\n    del self[k]\n    return (k, val)",
    "docstring": "Remove and return the last inserted pair from the ParameterDict.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\container.py",
    "ast_data": "FunctionDef name:popitem arg:self arguments arg Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_set_device",
    "source_code": "def _set_device(self, device) -> None:\n    self._set_device_from_string(compat.as_str(_device_string(device)))",
    "docstring": "Set the device of this operation. Args: device: string or device.. The device to set.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_set_device arg:self arg:device arguments arg arg Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_update_grid",
    "source_code": "def _update_grid(self, bbox):\n    pass",
    "docstring": "Cache relevant computations when the axes limits have changed.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axislines.py",
    "ast_data": "FunctionDef name:_update_grid arg:self arg:bbox arguments arg arg"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, shape=None, dtype=dtypes.float32):\n    self._shape = tensor_shape.as_shape(shape)\n    self._dtype = dtypes.as_dtype(dtype)",
    "docstring": "Constructs a type specification for a . Args: shape: The dense shape of the , or to allow any dense shape. dtype: of values in the .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\sparse_tensor.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:shape arg:dtype arguments arg arg arg Assign Call Assign Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit_predict",
    "source_code": "@available_if(_final_estimator_has('fit_predict'))\n@_fit_context(prefer_skip_nested_validation=False)\ndef fit_predict(self, X, y=None, **params):\n    routed_params = self._check_method_params(method='fit_predict', props=params)\n    Xt = self._fit(X, y, routed_params)\n    params_last_step = routed_params[self.steps[-1][0]]\n    with _print_elapsed_time('Pipeline', self._log_message(len(self.steps) - 1)):\n        y_pred = self.steps[-1][1].fit_predict(Xt, y, **params_last_step.get('fit_predict', {}))\n    return y_pred",
    "docstring": "Transform the data, and apply with the final estimator. Call of each transformer in the pipeline. The transformed data are finally passed to the final estimator that calls method. Only valid if the final estimator implements . Parameters ---------- X : iterable Training data. Must fulfill input requirements of first step of the pipeline. y : iterable, default=None Training targets. Must fulfill label requirements for all steps of the pipeline. **params : dict of str -> object - If (default): Parameters to the `enable_metadata_routing=Trueenable_metadata_routing=TrueMetadata Routing User Guide fit_predict` on the final estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\pipeline.py",
    "ast_data": "FunctionDef name:fit_predict arg:self arg:X arg:y arguments arg arg arg arg Assign Call Assign Call Assign With Call Call Call Assign Call Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "xw_plus_b_v1",
    "source_code": "def xw_plus_b_v1(x, weights, biases, name=None):\n    with ops.name_scope(name, 'xw_plus_b_v1', [x, weights, biases]) as name:\n        x = ops.convert_to_tensor(x, name='x')\n        weights = ops.convert_to_tensor(weights, name='weights')\n        biases = ops.convert_to_tensor(biases, name='biases')\n        mm = math_ops.matmul(x, weights)\n        return bias_add_v1(mm, biases, name=name)",
    "docstring": "Computes matmul(x, weights) + biases. This is a deprecated version of that will soon be removed. Args: x: a 2D tensor. Dimensions typically: batch, in_units weights: a 2D tensor. Dimensions typically: in_units, out_units biases: a 1D tensor. Dimensions: out_units name: A name for the operation (optional). If not specified \"xw_plus_b_v1\" is used. Returns: A 2-D Tensor computing matmul(x, weights) + biases. Dimensions typically: batch, out_units.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py",
    "ast_data": "FunctionDef name:xw_plus_b_v1 arg:x arg:weights arg:biases arg:name arguments arg arg arg arg With Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_trim_to_apps",
    "source_code": "def _trim_to_apps(self, changes, app_labels):\n    app_dependencies = {}\n    for app_label, migrations in changes.items():\n        for migration in migrations:\n            for dep_app_label, name in migration.dependencies:\n                app_dependencies.setdefault(app_label, set()).add(dep_app_label)\n    required_apps = set(app_labels)\n    old_required_apps = None\n    while old_required_apps != required_apps:\n        old_required_apps = set(required_apps)\n        required_apps.update(*[app_dependencies.get(app_label, ()) for app_label in required_apps])\n    for app_label in list(changes):\n        if app_label not in required_apps:\n            del changes[app_label]\n    return changes",
    "docstring": "Take changes from arrange_for_graph() and set of app labels, and return a modified set of changes which trims out as many migrations that are not in app_labels as possible. Note that some other migrations may still be present as they may be required dependencies.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\autodetector.py",
    "ast_data": "FunctionDef name:_trim_to_apps arg:self arg:changes arg:app_labels arguments arg arg arg Assign For Call For For Call Call Call Assign Call Assign While Compare Assign Call Call Call For Call If Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_dtype_from_nested_lists",
    "source_code": "def _get_dtype_from_nested_lists(list_or_tuple):\n    for elem in list_or_tuple:\n        if isinstance(elem, core.Tensor):\n            return elem.dtype.base_dtype\n        elif isinstance(elem, (list, tuple)):\n            maybe_dtype = _get_dtype_from_nested_lists(elem)\n            if maybe_dtype is not None:\n                return maybe_dtype\n    return None",
    "docstring": "Returns the dtype of any tensor-like object in , if found. Args: list_or_tuple: A list or tuple representing an object that can be converted to a . Returns: The dtype of any tensor-like object in , or if no such object exists.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:_get_dtype_from_nested_lists arg:list_or_tuple arguments arg For If Call Return return:yes If Call Assign Call If Compare Return return:yes Return return:no"
  },
  {
    "library": "pytorch",
    "name": "load",
    "source_code": "@_dcp_method_logger(log_exceptions=True)\n@_api_bc_check\ndef load(state_dict: dict[str, Any], *, checkpoint_id: Union[str, os.PathLike, None]=None, storage_reader: Optional[StorageReader]=None, planner: Optional[LoadPlanner]=None, process_group: Optional[dist.ProcessGroup]=None, no_dist: bool=False) -> None:\n    no_dist = no_dist or not dist.is_available() or (not dist.is_initialized())\n    if no_dist:\n        warnings.warn('torch.distributed is disabled, unavailable or uninitialized, assuming the intent is to load in a single process.')\n    with _profile():\n        storage_reader = cast(StorageReader, _storage_setup(storage_reader, checkpoint_id, reader=True))\n        keys = sorted(state_dict.keys())\n        statetful_sd = {}\n        for key in keys:\n            if key not in state_dict:\n                continue\n            elem = state_dict[key]\n            statetful_sd[key] = elem.state_dict() if isinstance(elem, Stateful) else elem\n        _load_state_dict(state_dict=statetful_sd, storage_reader=storage_reader, process_group=process_group, no_dist=no_dist, planner=planner)\n        for key in keys:\n            if key not in state_dict:\n                continue\n            elem = state_dict[key]\n            if isinstance(elem, Stateful):\n                elem.load_state_dict(statetful_sd[key])\n            else:\n                state_dict[key] = statetful_sd[key]",
    "docstring": "Load a checkpoint into a distributed state dict in SPMD style. Each rank must have the same keys in their `state_dictShardedTensorDTensortorch.load()load_state_dict`.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\state_dict_loader.py",
    "ast_data": "FunctionDef name:load arg:state_dict arguments arg arg arg arg arg arg Assign BoolOp Call Call If Call With Call Assign Call Call Assign Call Call Assign For If Compare Assign Assign Call Call Call For If Compare Assign If Call Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "FixedQParamsObserver",
    "source_code": "class FixedQParamsObserver(ObserverBase):\n    scale: torch.Tensor\n    zero_point: torch.Tensor\n\n    def __init__(self, scale, zero_point, dtype=torch.quint8, qscheme=torch.per_tensor_affine, quant_min=0, quant_max=255, is_dynamic=False, **kwargs):\n        if is_dynamic:\n            raise NotImplementedError(\"FixedQParamsObserver doesn't support dynamic quantization\")\n        super().__init__(dtype=dtype, is_dynamic=is_dynamic, **kwargs)\n        self.quant_min = quant_min\n        self.quant_max = quant_max\n        self.register_buffer('scale', torch.tensor([scale], dtype=torch.float))\n        self.register_buffer('zero_point', torch.tensor([zero_point], dtype=torch.int))\n        self.dtype = dtype\n        self.qscheme = qscheme\n\n    def forward(self, X):\n        return X\n\n    @torch.jit.export\n    def calculate_qparams(self):\n        return (self.scale, self.zero_point)",
    "docstring": "Observer that simulates quantize and dequantize with fixed quantization parameters in training time. Only per tensor quantization is supported. Args: (float): fixed scale for the observer (int): fixed zero point for the observer , , ,",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\quantization\\observer.py",
    "ast_data": "ClassDef name:FixedQParamsObserver FunctionDef name:__init__ arg:self arg:scale arg:zero_point arg:dtype arg:qscheme arg:quant_min arg:quant_max arg:is_dynamic arguments arg arg arg arg arg arg arg arg arg If Raise Call Call Call Assign Assign Call Call Call Call Assign Assign FunctionDef name:forward arg:self arg:X arguments arg arg Return return:yes FunctionDef name:calculate_qparams arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, x_transform, y_transform, **kwargs):\n    is_affine = x_transform.is_affine and y_transform.is_affine\n    is_separable = x_transform.is_separable and y_transform.is_separable\n    is_correct = is_affine and is_separable\n    if not is_correct:\n        raise ValueError('Both *x_transform* and *y_transform* must be 2D affine transforms')\n    Transform.__init__(self, **kwargs)\n    self._x = x_transform\n    self._y = y_transform\n    self.set_children(x_transform, y_transform)\n    Affine2DBase.__init__(self)\n    self._mtx = None",
    "docstring": "Create a new \"blended\" transform using *x_transform* to transform the *x*-axis and *y_transform* to transform the *y*-axis. Both *x_transform* and *y_transform* must be 2D affine transforms. You will generally not call this constructor directly but use the function instead, which can determine automatically which kind of blended transform to create.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:x_transform arg:y_transform arguments arg arg arg arg Assign BoolOp Assign BoolOp Assign BoolOp If Raise Call Call Assign Assign Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "_ParsedStackTrace",
    "source_code": "@dataclass(frozen=True)\nclass _ParsedStackTrace:\n    file: str\n    lineno: str\n    name: str\n    code: str\n\n    def get_summary_str(self):\n        return f'File: {self.file}:{self.lineno} in {self.name}, code: {self.code}'",
    "docstring": "Represents the top-most frame of a parsed stack trace",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\graph.py",
    "ast_data": "ClassDef name:_ParsedStackTrace FunctionDef name:get_summary_str arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "converged",
    "source_code": "def converged(self):\n    if np.any(np.isinf(self.population_energies)):\n        return False\n    return np.std(self.population_energies) <= self.atol + self.tol * np.abs(np.mean(self.population_energies))",
    "docstring": "Return True if the solver has converged.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_differentialevolution.py",
    "ast_data": "FunctionDef name:converged arg:self arguments arg If Call Call Return return:yes Return return:yes Compare Call Call Call"
  },
  {
    "library": "scrapy",
    "name": "send_catch_log_deferred",
    "source_code": "@inlineCallbacks\ndef send_catch_log_deferred(signal: TypingAny=Any, sender: TypingAny=Anonymous, *arguments: TypingAny, **named: TypingAny) -> Generator[Deferred[TypingAny], TypingAny, list[tuple[TypingAny, TypingAny]]]:\n\n    def logerror(failure: Failure, recv: TypingAny) -> Failure:\n        if dont_log is None or not isinstance(failure.value, dont_log):\n            logger.error('Error caught on signal handler: %(receiver)s', {'receiver': recv}, exc_info=failure_to_exc_info(failure), extra={'spider': spider})\n        return failure\n    dont_log = named.pop('dont_log', None)\n    spider = named.get('spider')\n    dfds: list[Deferred[tuple[TypingAny, TypingAny]]] = []\n    for receiver in liveReceivers(getAllReceivers(sender, signal)):\n        d: Deferred[TypingAny] = maybeDeferred_coro(robustApply, receiver, *arguments, signal=signal, sender=sender, **named)\n        d.addErrback(logerror, receiver)\n        d2: Deferred[tuple[TypingAny, TypingAny]] = d.addBoth(lambda result: (receiver, result))\n        dfds.append(d2)\n    results = (yield DeferredList(dfds))\n    return [result[1] for result in results]",
    "docstring": "Like :func: but supports :ref:. Returns a deferred that gets fired once all signal handlers have finished.",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\signal.py",
    "ast_data": "FunctionDef name:send_catch_log_deferred arg:signal arg:sender arguments arg arg arg arg FunctionDef name:logerror arg:failure arg:recv arguments arg arg If BoolOp Compare Call Call Call Return return:yes Assign Call Assign Call For Call Call Call Call Call arguments arg Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "validate_checkpoint_id",
    "source_code": "@classmethod\n@abc.abstractmethod\ndef validate_checkpoint_id(cls, checkpoint_id: Union[str, os.PathLike]) -> bool:\n    ...",
    "docstring": "Check if the given checkpoint_id is supported by the stroage. This allow us to enable automatic storage selection.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\storage.py",
    "ast_data": "FunctionDef name:validate_checkpoint_id arg:cls arg:checkpoint_id arguments arg arg"
  },
  {
    "library": "pytorch",
    "name": "half",
    "source_code": "def half(self) -> Self:\n    return self._apply(lambda t: t.half() if t.is_floating_point() else t)",
    "docstring": "Casts all floating point parameters and buffers to `` datatype. .. note:: This method modifies the module in-place. Returns: Module: self",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:half arg:self arguments arg Return return:yes Call arguments arg Call Call"
  },
  {
    "library": "pandas",
    "name": "_needs_reindex_multi",
    "source_code": "def _needs_reindex_multi(self, axes, method, level) -> bool:\n    return False",
    "docstring": "Check if we do need a multi reindex; this is for compat with higher dims.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:_needs_reindex_multi arg:self arg:axes arg:method arg:level arguments arg arg arg arg Return return:yes"
  },
  {
    "library": "django",
    "name": "__or__",
    "source_code": "def __or__(self, other):\n    return self.union(other)",
    "docstring": "Return the union of the two geometries.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:__or__ arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "bitcast",
    "source_code": "@dispatch.dispatch_for_api(array_ops.bitcast)\ndef bitcast(input: ragged_tensor.RaggedOrDense, type, name=None) -> ragged_tensor.RaggedOrDense:\n    type = dtypes.as_dtype(type)\n    with ops.name_scope(name, 'Bitcast', [input]):\n        input = ragged_tensor.convert_to_tensor_or_ragged_tensor(input, name='input')\n        if input.dtype.size < type.size and input.flat_values.shape.rank < 2:\n            raise ValueError(f'`input.flat_values` is required to have rank >= 2 when input.dtype.size < type.size. Actual rank: {input.flat_values.shape.rank}')\n        return input.with_flat_values(array_ops.bitcast(input.flat_values, type))",
    "docstring": "RaggedTensor dispatch override for tf.bitcast.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_array_ops.py",
    "ast_data": "FunctionDef name:bitcast arg:input arg:type arg:name arguments arg arg arg Assign Call With Call Assign Call If BoolOp Compare Compare Raise Call Return return:yes Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_hasna",
    "source_code": "@property\ndef _hasna(self) -> bool:\n    return bool(self._isnan.any())",
    "docstring": "return if I have any nans; enables various perf speedups",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py",
    "ast_data": "FunctionDef name:_hasna arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "score_samples",
    "source_code": "@available_if(_check_novelty_score_samples)\ndef score_samples(self, X):\n    check_is_fitted(self)\n    X = check_array(X, accept_sparse='csr')\n    distances_X, neighbors_indices_X = self.kneighbors(X, n_neighbors=self.n_neighbors_)\n    if X.dtype == np.float32:\n        distances_X = distances_X.astype(X.dtype, copy=False)\n    X_lrd = self._local_reachability_density(distances_X, neighbors_indices_X)\n    lrd_ratios_array = self._lrd[neighbors_indices_X] / X_lrd[:, np.newaxis]\n    return -np.mean(lrd_ratios_array, axis=1)",
    "docstring": "Opposite of the Local Outlier Factor of X. It is the opposite as bigger is better, i.e. large values correspond to inliers. **Only available for novelty detection (when novelty is set to True).** The argument X is supposed to contain *new data*: if X contains a point from training, it considers the later in its own neighborhood. Also, the samples in X are not considered in the neighborhood of any point. Because of this, the scores obtained via `` attribute. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The query sample or samples to compute the Local Outlier Factor w.r.t. the training samples. Returns ------- opposite_lof_scores : ndarray of shape (n_samples,) The opposite of the Local Outlier Factor of each input samples. The lower, the more abnormal.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neighbors\\_lof.py",
    "ast_data": "FunctionDef name:score_samples arg:self arg:X arguments arg arg Call Assign Call Assign Call If Compare Assign Call Assign Call Assign Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "adjust_contrast_with_mean_subtraction",
    "source_code": "def adjust_contrast_with_mean_subtraction(image: Tensor, factor: Union[float, Tensor]) -> Tensor:\n    KORNIA_CHECK_IS_TENSOR(image, 'Expected shape (*, H, W)')\n    KORNIA_CHECK(isinstance(factor, (float, Tensor)), 'Factor should be float or Tensor.')\n    if isinstance(factor, float):\n        factor = torch.as_tensor(factor, device=image.device, dtype=image.dtype)\n    elif isinstance(factor, Tensor):\n        factor = factor.to(image.device, image.dtype)\n    while len(factor.shape) != len(image.shape):\n        factor = factor[..., None]\n    if image.shape[-3] == 3:\n        img_mean = rgb_to_grayscale(image).mean((-2, -1), True)\n    else:\n        img_mean = image.mean()\n    img_adjust: Tensor = image * factor + img_mean * (1 - factor)\n    img_adjust = img_adjust.clamp(min=0.0, max=1.0)\n    return img_adjust",
    "docstring": "Adjust the contrast of an image tensor by subtracting the mean over channels. .. note:: this is just a convenience function to have compatibility with Pil. For exact definition of image contrast adjustment consider using :func:. Args: image: Image to be adjusted in the shape of :math:. factor: Contrast adjust factor per element in the batch. 0 generates a completely black image, 1 does not modify the input image while any other non-negative number modify the brightness by this factor. Return: Adjusted image in the shape of :math:. Example: >>> import torch >>> x = torch.ones(1, 1, 2, 2) >>> adjust_contrast_with_mean_subtraction(x, 0.5) tensor([[[[1., 1.], [1., 1.]]]]) >>> x = torch.ones(2, 5, 3, 3) >>> y = torch.tensor([0.65, 0.50]) >>> adjust_contrast_with_mean_subtraction(x, y).shape torch.Size([2, 5, 3, 3])",
    "type": "function",
    "file_path": "kornia\\kornia\\enhance\\adjust.py",
    "ast_data": "FunctionDef name:adjust_contrast_with_mean_subtraction arg:image arg:factor arguments arg arg Call Call Call If Call Assign Call If Call Assign Call While Compare Call Call Assign If Compare Assign Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, rfile):\n    self.rfile = rfile\n    self.bytes_read = 0",
    "docstring": "Initialize a read byte counter.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\cpstats.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:rfile arguments arg arg Assign Assign"
  },
  {
    "library": "pandas",
    "name": "set_inplace",
    "source_code": "def set_inplace(self, locs, values: ArrayLike, copy: bool=False) -> None:\n    if copy:\n        self.values = self.values.copy()\n    self.values[locs] = values",
    "docstring": "Modify block values in-place with new item value. If copy=True, first copy the underlying values in place before modifying (for Copy-on-Write). Notes ----- never creates a new array or new Block, whereas _may_ create a new array and always creates a new Block. Caller is responsible for checking values.dtype == self.dtype.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\blocks.py",
    "ast_data": "FunctionDef name:set_inplace arg:self arg:locs arg:values arg:copy arguments arg arg arg arg If Assign Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "logsumexp",
    "source_code": "def logsumexp(x, axis=None, keepdims=False):\n    return math_ops.reduce_logsumexp(x, axis, keepdims)",
    "docstring": "Computes log(sum(exp(elements across dimensions of a tensor))). This function is more numerically stable than log(sum(exp(x))). It avoids overflows caused by taking the exp of large inputs and underflows caused by taking the log of small inputs. Args: x: A tensor or variable. axis: An integer, the axis to reduce over. keepdims: A boolean, whether to keep the dimensions or not. If is , the rank of the tensor is reduced by 1. If is , the reduced dimension is retained with length 1. Returns: The reduced tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:logsumexp arg:x arg:axis arg:keepdims arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "uvicorn",
    "name": "shutdown",
    "source_code": "def shutdown(self) -> None:\n    if self.cycle is None or self.cycle.response_complete:\n        event = h11.ConnectionClosed()\n        self.conn.send(event)\n        self.transport.close()\n    else:\n        self.cycle.keep_alive = False",
    "docstring": "Called by the server to commence a graceful shutdown.",
    "type": "method",
    "file_path": "uvicorn\\uvicorn\\protocols\\http\\h11_impl.py",
    "ast_data": "FunctionDef name:shutdown arg:self arguments arg If BoolOp Compare Assign Call Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_validate_operator_dimensions",
    "source_code": "def _validate_operator_dimensions(self):\n    for i in range(1, len(self.operators)):\n        for j in range(i):\n            op = self.operators[i][j]\n            above_op = self.operators[i - 1][j]\n            right_op = self.operators[i][j + 1]\n            if op.domain_dimension is not None and above_op.domain_dimension is not None:\n                if op.domain_dimension != above_op.domain_dimension:\n                    raise ValueError(f'Argument `operators[{i}][{j}].domain_dimension` ({op.domain_dimension}) must be the same as `operators[{i - 1}][{j}].domain_dimension` ({above_op.domain_dimension}).')\n            if op.range_dimension is not None and right_op.range_dimension is not None:\n                if op.range_dimension != right_op.range_dimension:\n                    raise ValueError(f'Argument `operators[{i}][{j}].range_dimension` ({op.range_dimension}) must be the same as `operators[{i}][{j + 1}].range_dimension` ({right_op.range_dimension}).')",
    "docstring": "Check that have compatible dimensions.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_block_lower_triangular.py",
    "ast_data": "FunctionDef name:_validate_operator_dimensions arg:self arguments arg For Call Call For Call Assign Assign Assign If BoolOp Compare Compare If Compare Raise Call If BoolOp Compare Compare If Compare Raise Call"
  },
  {
    "library": "scipy",
    "name": "siegelslopes",
    "source_code": "def siegelslopes(y, x=None, method='hierarchical'):\n    y = ma.asarray(y).ravel()\n    if x is None:\n        x = ma.arange(len(y), dtype=float)\n    else:\n        x = ma.asarray(x).ravel()\n        if len(x) != len(y):\n            raise ValueError(f'Incompatible lengths ! ({len(y)}<>{len(x)})')\n    m = ma.mask_or(ma.getmask(x), ma.getmask(y))\n    y._mask = x._mask = m\n    y = y.compressed()\n    x = x.compressed().astype(float)\n    return stats_siegelslopes(y, x, method=method)",
    "docstring": "Computes the Siegel estimator for a set of points (x, y). implements a method for robust linear regression using repeated medians to fit a line to the points (x, y). The method is robust to outliers with an asymptotic breakdown point of 50%. Parameters ---------- y : array_like Dependent variable. x : array_like or None, optional Independent variable. If None, use `siegelslopesscipy.stats.siegelslopes`.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mstats_basic.py",
    "ast_data": "FunctionDef name:siegelslopes arg:y arg:x arg:method arguments arg arg arg Assign Call Call If Compare Assign Call Call Assign Call Call If Compare Call Call Raise Call Call Call Assign Call Call Call Assign Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_relative_degree",
    "source_code": "def _relative_degree(z, p):\n    degree = p.shape[0] - z.shape[0]\n    if degree < 0:\n        raise ValueError('Improper transfer function. Must have at least as many poles as zeros.')\n    else:\n        return degree",
    "docstring": "Return relative degree of transfer function from zeros and poles",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_filter_design.py",
    "ast_data": "FunctionDef name:_relative_degree arg:z arg:p arguments arg arg Assign If Compare Raise Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "protocol_df_chunk_to_pandas",
    "source_code": "def protocol_df_chunk_to_pandas(df: DataFrameXchg) -> pd.DataFrame:\n    columns: dict[str, Any] = {}\n    buffers = []\n    for name in df.column_names():\n        if not isinstance(name, str):\n            raise ValueError(f'Column {name} is not a string')\n        if name in columns:\n            raise ValueError(f'Column {name} is not unique')\n        col = df.get_column_by_name(name)\n        dtype = col.dtype[0]\n        if dtype in (DtypeKind.INT, DtypeKind.UINT, DtypeKind.FLOAT, DtypeKind.BOOL):\n            columns[name], buf = primitive_column_to_ndarray(col)\n        elif dtype == DtypeKind.CATEGORICAL:\n            columns[name], buf = categorical_column_to_series(col)\n        elif dtype == DtypeKind.STRING:\n            columns[name], buf = string_column_to_ndarray(col)\n        elif dtype == DtypeKind.DATETIME:\n            columns[name], buf = datetime_column_to_ndarray(col)\n        else:\n            raise NotImplementedError(f'Data type {dtype} not handled yet')\n        buffers.append(buf)\n    pandas_df = pd.DataFrame(columns)\n    pandas_df.attrs['_INTERCHANGE_PROTOCOL_BUFFERS'] = buffers\n    return pandas_df",
    "docstring": "Convert interchange protocol chunk to ``. Parameters ---------- df : DataFrameXchg Returns ------- pd.DataFrame",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\interchange\\from_dataframe.py",
    "ast_data": "FunctionDef name:protocol_df_chunk_to_pandas arg:df arguments arg Assign For Call If Call Raise Call If Compare Raise Call Assign Call Assign If Compare Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call Raise Call Call Assign Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "levshape",
    "source_code": "@property\ndef levshape(self) -> Shape:\n    return tuple((len(x) for x in self.levels))",
    "docstring": "A tuple representing the length of each level in the MultiIndex. In a , each level can contain multiple unique values. The property provides a quick way to assess the size of each level by returning a tuple where each entry represents the number of unique values in that specific level. This is particularly useful in scenarios where you need to understand the structure and distribution of your index levels, such as when working with multidimensional data. See Also -------- MultiIndex.shape : Return a tuple of the shape of the MultiIndex. MultiIndex.levels : Returns the levels of the MultiIndex. Examples -------- >>> mi = pd.MultiIndex.from_arrays([[\"a\"], [\"b\"], [\"c\"]]) >>> mi MultiIndex([('a', 'b', 'c')], ) >>> mi.levshape (1, 1, 1)",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "FunctionDef name:levshape arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "scrapy",
    "name": "method_is_overridden",
    "source_code": "def method_is_overridden(subclass: type, base_class: type, method_name: str) -> bool:\n    base_method = getattr(base_class, method_name)\n    sub_method = getattr(subclass, method_name)\n    return base_method.__code__ is not sub_method.__code__",
    "docstring": "Return True if a method named ``. >>> class Base: ... def foo(self): ... pass >>> class Sub1(Base): ... pass >>> class Sub2(Base): ... def foo(self): ... pass >>> class Sub3(Sub1): ... def foo(self): ... pass >>> class Sub4(Sub2): ... pass >>> method_is_overridden(Sub1, Base, 'foo') False >>> method_is_overridden(Sub2, Base, 'foo') True >>> method_is_overridden(Sub3, Base, 'foo') True >>> method_is_overridden(Sub4, Base, 'foo') True",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\deprecate.py",
    "ast_data": "FunctionDef name:method_is_overridden arg:subclass arg:base_class arg:method_name arguments arg arg arg Assign Call Assign Call Return return:yes Compare"
  },
  {
    "library": "kornia",
    "name": "Normalize",
    "source_code": "class Normalize(Module):\n\n    def __init__(self, mean: Union[Tensor, Tuple[float], List[float], float], std: Union[Tensor, Tuple[float], List[float], float]) -> None:\n        super().__init__()\n        if isinstance(mean, (int, float)):\n            mean = torch.tensor([mean])\n        if isinstance(std, (int, float)):\n            std = torch.tensor([std])\n        if isinstance(mean, (tuple, list)):\n            mean = torch.tensor(mean)[None]\n        if isinstance(std, (tuple, list)):\n            std = torch.tensor(std)[None]\n        self.mean = mean\n        self.std = std\n\n    def forward(self, input: Tensor) -> Tensor:\n        return normalize(input, self.mean, self.std)\n\n    def __repr__(self) -> str:\n        repr = f'(mean={self.mean}, std={self.std})'\n        return self.__class__.__name__ + repr",
    "docstring": "Normalize a tensor image with mean and standard deviation. .. math:: \\text{input[channel] = (input[channel] - mean[channel]) / std[channel]} Where is :math: and :math: for channels, Args: mean: Mean for each channel. std: Standard deviations for each channel. Shape: - Input: Image tensor of size :math:. - Output: Normalised tensor with same size as input :math:. Examples: >>> x = torch.rand(1, 4, 3, 3) >>> out = Normalize(0.0, 255.)(x) >>> out.shape torch.Size([1, 4, 3, 3]) >>> x = torch.rand(1, 4, 3, 3) >>> mean = torch.zeros(4) >>> std = 255. * torch.ones(4) >>> out = Normalize(mean, std)(x) >>> out.shape torch.Size([1, 4, 3, 3])",
    "type": "class",
    "file_path": "kornia\\kornia\\enhance\\normalize.py",
    "ast_data": "ClassDef name:Normalize FunctionDef name:__init__ arg:self arg:mean arg:std arguments arg arg arg Call Call If Call Assign Call If Call Assign Call If Call Assign Call If Call Assign Call Assign Assign FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:__repr__ arg:self arguments arg Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "coshm",
    "source_code": "@_apply_over_batch(('A', 2))\ndef coshm(A):\n    A = _asarray_square(A)\n    return _maybe_real(A, 0.5 * (expm(A) + expm(-A)))",
    "docstring": "Compute the hyperbolic matrix cosine. This routine uses expm to compute the matrix exponentials. Parameters ---------- A : (N, N) array_like Input array. Returns ------- coshm : (N, N) ndarray Hyperbolic matrix cosine of Examples -------- >>> import numpy as np >>> from scipy.linalg import tanhm, sinhm, coshm >>> a = np.array([[1.0, 3.0], [1.0, 4.0]]) >>> c = coshm(a) >>> c array([[ 11.24592233, 38.76236492], [ 12.92078831, 50.00828725]]) Verify tanhm(a) = sinhm(a).dot(inv(coshm(a))) >>> t = tanhm(a) >>> s = sinhm(a) >>> t - s.dot(np.linalg.inv(c)) array([[ 2.72004641e-15, 4.55191440e-15], [ 0.00000000e+00, -5.55111512e-16]])",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_matfuncs.py",
    "ast_data": "FunctionDef name:coshm arg:A arguments arg Assign Call Return return:yes Call Call Call Call"
  },
  {
    "library": "kornia",
    "name": "get_affine_matrix2d",
    "source_code": "def get_affine_matrix2d(translations: Tensor, center: Tensor, scale: Tensor, angle: Tensor, sx: Optional[Tensor]=None, sy: Optional[Tensor]=None) -> Tensor:\n    transform: Tensor = get_rotation_matrix2d(center, -angle, scale)\n    transform[..., 2] += translations\n    transform_h = convert_affinematrix_to_homography(transform)\n    if any((s is not None for s in [sx, sy])):\n        shear_mat = get_shear_matrix2d(center, sx, sy)\n        transform_h = transform_h @ shear_mat\n    return transform_h",
    "docstring": "Compose affine matrix from the components. Args: translations: tensor containing the translation vector with shape :math:. center: tensor containing the center vector with shape :math:. scale: tensor containing the scale factor with shape :math:. angle: tensor of angles in degrees :math:. sx: tensor containing the shear factor in the x-direction with shape :math:. sy: tensor containing the shear factor in the y-direction with shape :math:. Returns: the affine transformation matrix :math:. .. note:: This function is often used in conjunction with :func:, :func:.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\transform\\imgwarp.py",
    "ast_data": "FunctionDef name:get_affine_matrix2d arg:translations arg:center arg:scale arg:angle arg:sx arg:sy arguments arg arg arg arg arg arg Call Assign Call If Call Compare Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "wrap_torch_function",
    "source_code": "def wrap_torch_function(dispatcher: Callable):\n\n    def inner(func):\n\n        @functools.wraps(func)\n        def wrapped(*args, **kwargs):\n            relevant_args = dispatcher(*args, **kwargs)\n            if has_torch_function(relevant_args):\n                return handle_torch_function(wrapped, relevant_args, *args, **kwargs)\n            return func(*args, **kwargs)\n        return wrapped\n    return inner",
    "docstring": "Wraps a given function with `` -related functionality. Parameters ---------- dispatcher: Callable A callable that returns an iterable of Tensor-likes passed into the function. Note ---- This decorator may reduce the performance of your code. Generally, it's enough to express your code as a series of functions that, themselves, support __torch_function__. If you find yourself in the rare situation where this is not the case, e.g. if you're wrapping a low-level library and you also need it to work for Tensor-likes, then this function is available. Examples -------- >>> def dispatcher(a): # Must have the same signature as func ... return (a,) >>> @torch.overrides.wrap_torch_function(dispatcher) >>> def func(a): # This will make func dispatchable by __torch_function__ ... return a + 0",
    "type": "function",
    "file_path": "pytorch\\torch\\overrides.py",
    "ast_data": "FunctionDef name:wrap_torch_function arg:dispatcher arguments arg FunctionDef name:inner arg:func arguments arg FunctionDef name:wrapped arguments arg arg Assign Call If Call Return return:yes Call Return return:yes Call Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "interpolate",
    "source_code": "def interpolate(input, size=None, scale_factor=None, mode='nearest', align_corners=None):\n    if not input.is_quantized:\n        raise ValueError(\"Input to 'quantized.interpolate' must be quantized!\")\n    return torch.nn.functional.interpolate(input, size, scale_factor, mode, align_corners)",
    "docstring": "Down/up samples the input to either the given :attr: or the given :attr: See :func: for implementation details. The input dimensions are interpreted in the form: . .. note:: The input quantization parameters propagate to the output. .. note:: Only 2D/3D input is supported for quantized inputs .. note:: Only the following modes are supported for the quantized inputs: - - Args: input (Tensor): the input tensor size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int]): output spatial size. scale_factor (float or Tuple[float]): multiplier for spatial size. Has to match input size if it is a tuple. mode (str): algorithm used for upsampling: `scale_factormode`",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\functional.py",
    "ast_data": "FunctionDef name:interpolate arg:input arg:size arg:scale_factor arg:mode arg:align_corners arguments arg arg arg arg arg If Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_load_options",
    "source_code": "def get_load_options():\n    return _load_context.load_options()",
    "docstring": "Returns the load options under a load context.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\load_context.py",
    "ast_data": "FunctionDef name:get_load_options arguments Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_pinv_1d",
    "source_code": "def _pinv_1d(v, eps=1e-05):\n    return np.array([0 if abs(x) <= eps else 1 / x for x in v], dtype=float)",
    "docstring": "A helper function for computing the pseudoinverse. Parameters ---------- v : iterable of numbers This may be thought of as a vector of eigenvalues or singular values. eps : float Values with magnitude no greater than eps are considered negligible. Returns ------- v_pinv : 1d float ndarray A vector of pseudo-inverted numbers.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:_pinv_1d arg:v arg:eps arguments arg arg Return return:yes Call Compare Call"
  },
  {
    "library": "matplotlib",
    "name": "get_data_ratio",
    "source_code": "def get_data_ratio(self):\n    txmin, txmax = self.xaxis.get_transform().transform(self.get_xbound())\n    tymin, tymax = self.yaxis.get_transform().transform(self.get_ybound())\n    xsize = max(abs(txmax - txmin), 1e-30)\n    ysize = max(abs(tymax - tymin), 1e-30)\n    return ysize / xsize",
    "docstring": "Return the aspect ratio of the scaled data. Notes ----- This method is intended to be overridden by new projection types.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:get_data_ratio arg:self arguments arg Assign Call Call Call Assign Call Call Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "RequestAborted",
    "source_code": "class RequestAborted(Exception):\n    pass",
    "docstring": "The request was closed before it was completed, or timed out.",
    "type": "class",
    "file_path": "django\\django\\core\\exceptions.py",
    "ast_data": "ClassDef name:RequestAborted"
  },
  {
    "library": "pytorch",
    "name": "MockCutlassHandler",
    "source_code": "class MockCutlassHandler(CutlassEVTOpsMixIn, WrapperHandler):\n    pass",
    "docstring": "Passthrough handler for cutlass ops, used for running epilogue nodes for memory planning",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\cutlass_python_evt.py",
    "ast_data": "ClassDef name:MockCutlassHandler"
  },
  {
    "library": "django",
    "name": "tuple",
    "source_code": "@property\ndef tuple(self):\n    if self.is_3d and self.is_measured:\n        return (self.x, self.y, self.z, self.m)\n    if self.is_3d:\n        return (self.x, self.y, self.z)\n    if self.is_measured:\n        return (self.x, self.y, self.m)\n    return (self.x, self.y)",
    "docstring": "Return the tuple of this point.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:tuple arg:self arguments arg If BoolOp Return return:yes If Return return:yes If Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "CloseableQueue",
    "source_code": "class CloseableQueue:\n\n    def __init__(self, maxsize=0):\n        self._maxsize = maxsize\n        self._queue = collections.deque()\n        self._closed = False\n        self._mutex = threading.Lock()\n        self._not_empty = threading.Condition(self._mutex)\n        self._not_full = threading.Condition(self._mutex)\n\n    def get(self):\n        with self._not_empty:\n            while not self._queue:\n                self._not_empty.wait()\n            item = self._queue.popleft()\n            self._not_full.notify()\n            return item\n\n    def put(self, item):\n        with self._not_full:\n            if self._closed:\n                raise QueueClosedError()\n            if self._maxsize > 0:\n                while len(self._queue) == self._maxsize:\n                    self._not_full.wait()\n                    if self._closed:\n                        raise QueueClosedError()\n            self._queue.append(item)\n            self._not_empty.notify()\n\n    def close(self):\n        with self._not_full:\n            self._closed = True\n            self._not_full.notify_all()",
    "docstring": "Stripped-down fork of the standard library Queue that is closeable.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\event_file_writer.py",
    "ast_data": "ClassDef name:CloseableQueue FunctionDef name:__init__ arg:self arg:maxsize arguments arg arg Assign Assign Call Assign Assign Call Assign Call Assign Call FunctionDef name:get arg:self arguments arg With While Call Assign Call Call Return return:yes FunctionDef name:put arg:self arg:item arguments arg arg With If Raise Call If Compare While Compare Call Call If Raise Call Call Call FunctionDef name:close arg:self arguments arg With Assign Call"
  },
  {
    "library": "pytorch",
    "name": "mutates_and_returns_first_arg",
    "source_code": "def mutates_and_returns_first_arg(op: OpOverload):\n    if op.namespace != 'aten':\n        return False\n    schema = op._schema\n    if not len(schema.returns) == 1:\n        return False\n    if schema.returns[0].alias_info is None:\n        return False\n    alias_set = schema.returns[0].alias_info.after_set\n    if len(alias_set) != 1:\n        return False\n    loc = next(iter(alias_set))\n    if len(schema.arguments) < 1:\n        return False\n    first_arg = schema.arguments[0]\n    if first_arg.alias_info is None:\n        return False\n    if not first_arg.alias_info.is_write:\n        return False\n    alias_set = first_arg.alias_info.after_set\n    if len(alias_set) != 1:\n        return False\n    if loc != next(iter(alias_set)):\n        return False\n    for arg in schema.arguments[1:]:\n        if arg.alias_info is not None:\n            return False\n    return True",
    "docstring": "Check if an op is an inplace aten op, i.e. it mutates and returns the first arg. TODO: torchgen/model.py's FunctionSchema.parse is the source of truth for this, but not all PyTorch builds have torchgen (due to the yaml dependency being weird). Figure this out. Example: add_(Tensor(a!) x, Tensor y) -> Tensor(a)",
    "type": "function",
    "file_path": "pytorch\\torch\\_library\\utils.py",
    "ast_data": "FunctionDef name:mutates_and_returns_first_arg arg:op arguments arg If Compare Return return:yes Assign If Compare Call Return return:yes If Compare Return return:yes Assign If Compare Call Return return:yes Assign Call Call If Compare Call Return return:yes Assign If Compare Return return:yes If Return return:yes Assign If Compare Call Return return:yes If Compare Call Call Return return:yes For If Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "output_classes",
    "source_code": "@property\n@deprecation.deprecated(None, 'Use `tf.compat.v1.data.get_output_classes(iterator)`.')\ndef output_classes(self):\n    return nest.map_structure(lambda component_spec: component_spec._to_legacy_output_classes(), self._element_spec)",
    "docstring": "Returns the class of each component of an element of this iterator. The expected values are and . Returns: A (nested) structure of Python objects corresponding to each component of an element of this dataset.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\iterator_ops.py",
    "ast_data": "FunctionDef name:output_classes arg:self arguments arg Return return:yes Call arguments arg Call Call"
  },
  {
    "library": "scipy",
    "name": "logpdf",
    "source_code": "def logpdf(self, X, mean=None, rowcov=1, colcov=1):\n    dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov, colcov)\n    X = self._process_quantiles(X, dims)\n    rowpsd = _PSD(rowcov, allow_singular=False)\n    colpsd = _PSD(colcov, allow_singular=False)\n    out = self._logpdf(dims, X, mean, rowpsd.U, rowpsd.log_pdet, colpsd.U, colpsd.log_pdet)\n    return _squeeze_output(out)",
    "docstring": "Log of the matrix normal probability density function. Parameters ---------- X : array_like Quantiles, with the last two axes of denoting the components. %(_matnorm_doc_default_callparams)s Returns ------- logpdf : ndarray Log of the probability density function evaluated at Notes ----- %(_matnorm_doc_callparams_note)s",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:logpdf arg:self arg:X arg:mean arg:rowcov arg:colcov arguments arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "import_xml",
    "source_code": "def import_xml(self, xml):\n    capi.from_xml(self.ptr, xml)",
    "docstring": "Import the Spatial Reference from an XML string.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\srs.py",
    "ast_data": "FunctionDef name:import_xml arg:self arg:xml arguments arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "is_unbacked_symint",
    "source_code": "def is_unbacked_symint(self, symbol: sympy.Symbol) -> bool:\n    return symbol_is_type(symbol, SymT.UNBACKED_INT)",
    "docstring": "Check if a sympy symbol matches the naming convention for unbacked symbols",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:is_unbacked_symint arg:self arg:symbol arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "LogFormatterSciNotation",
    "source_code": "class LogFormatterSciNotation(LogFormatterMathtext):\n\n    def _non_decade_format(self, sign_string, base, fx, usetex):\n        b = float(base)\n        exponent = math.floor(fx)\n        coeff = b ** (fx - exponent)\n        if _is_close_to_int(coeff):\n            coeff = round(coeff)\n        return '$\\\\mathdefault{%s%g\\\\times%s^{%d}}$' % (sign_string, coeff, base, exponent)",
    "docstring": "Format values following scientific notation in a logarithmic axis.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "ClassDef name:LogFormatterSciNotation FunctionDef name:_non_decade_format arg:self arg:sign_string arg:base arg:fx arg:usetex arguments arg arg arg arg arg Assign Call Assign Call Assign If Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_collection",
    "source_code": "def get_collection(self, name):\n    scope = self._name + '/' if self._name else ''\n    return ops.get_collection(name, scope)",
    "docstring": "Get this scope's variables.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variable_scope.py",
    "ast_data": "FunctionDef name:get_collection arg:self arg:name arguments arg arg Assign Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "resolve_toctree",
    "source_code": "def resolve_toctree(self, docname: str, builder: Builder, toctree: addnodes.toctree, prune: bool=True, maxdepth: int=0, titles_only: bool=False, collapse: bool=False, includehidden: bool=False) -> Node | None:\n    return toctree_adapters._resolve_toctree(self, docname, builder, toctree, prune=prune, maxdepth=maxdepth, titles_only=titles_only, collapse=collapse, includehidden=includehidden, tags=builder.tags)",
    "docstring": "Resolve a *toctree* node into individual bullet lists with titles as items, returning None (if no containing titles are found) or a new node. If *prune* is True, the tree is pruned to *maxdepth*, or if that is 0, to the value of the *maxdepth* option on the *toctree* node. If *titles_only* is True, only toplevel document titles will be in the resulting tree. If *collapse* is True, all branches not containing docname will be collapsed.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\environment\\__init__.py",
    "ast_data": "FunctionDef name:resolve_toctree arg:self arg:docname arg:builder arg:toctree arg:prune arg:maxdepth arg:titles_only arg:collapse arg:includehidden arguments arg arg arg arg arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "conv3x3",
    "source_code": "def conv3x3(in_planes: int, out_planes: int, stride: int=1) -> nn.Conv2d:\n    return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)",
    "docstring": "3x3 convolution with padding.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\loftr\\backbone\\resnet_fpn.py",
    "ast_data": "FunctionDef name:conv3x3 arg:in_planes arg:out_planes arg:stride arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "__call__",
    "source_code": "def __call__(self, *args, **kwargs):\n    return self.error(*args, **kwargs)",
    "docstring": "Record an error log entry.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cplogging.py",
    "ast_data": "FunctionDef name:__call__ arg:self arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "on_predict_batch_begin",
    "source_code": "def on_predict_batch_begin(self, batch, logs=None):\n    if self._should_call_predict_batch_hooks:\n        self._call_batch_hook(ModeKeys.PREDICT, 'begin', batch, logs=logs)",
    "docstring": "Calls the methods of its callbacks. Args: batch: Integer, index of batch within the current epoch. logs: Dict, contains the return value of , it typically returns a dict with a key 'outputs' containing the model's outputs.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:on_predict_batch_begin arg:self arg:batch arg:logs arguments arg arg arg If Call"
  },
  {
    "library": "tensorflow",
    "name": "UniquePtr",
    "source_code": "class UniquePtr(object):\n    __slots__ = ['_obj', 'deleter', 'name', 'type_name']\n\n    def __init__(self, name, obj, deleter):\n        self._obj = obj\n        self.name = name\n        self.deleter = deleter\n        self.type_name = str(type(obj))\n\n    @contextlib.contextmanager\n    def get(self):\n        if self._obj is None:\n            raise AlreadyGarbageCollectedError(self.name, self.type_name)\n        yield self._obj\n\n    def __del__(self):\n        obj = self._obj\n        if obj is not None:\n            self._obj = None\n            self.deleter(obj)",
    "docstring": "Wrapper around single-ownership C-API objects that handles deletion.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\c_api_util.py",
    "ast_data": "ClassDef name:UniquePtr Assign FunctionDef name:__init__ arg:self arg:name arg:obj arg:deleter arguments arg arg arg arg Assign Assign Assign Assign Call Call FunctionDef name:get arg:self arguments arg If Compare Raise Call FunctionDef name:__del__ arg:self arguments arg Assign If Compare Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "trainable_variables",
    "source_code": "def trainable_variables(self):\n    return self.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)",
    "docstring": "Get this scope's trainable variables.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variable_scope.py",
    "ast_data": "FunctionDef name:trainable_variables arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "drop_add_residual_stochastic_depth_list",
    "source_code": "def drop_add_residual_stochastic_depth_list(x_list: List[Tensor], residual_func: Callable[[Tensor, Any], Tensor], sample_drop_ratio: float=0.0, scaling_vector=None) -> Tensor:\n    branges_scales = [get_branges_scales(x, sample_drop_ratio=sample_drop_ratio) for x in x_list]\n    branges = [s[0] for s in branges_scales]\n    residual_scale_factors = [s[1] for s in branges_scales]\n    attn_bias, x_cat = get_attn_bias_and_cat(x_list, branges)\n    residual_list = attn_bias.split(residual_func(x_cat, attn_bias=attn_bias))\n    outputs = []\n    for x, brange, residual, residual_scale_factor in zip(x_list, branges, residual_list, residual_scale_factors):\n        outputs.append(add_residual(x, brange, residual, residual_scale_factor, scaling_vector).view_as(x))\n    return outputs",
    "docstring": "Add residual connections to a list of tensors.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\dedode\\transformer\\layers\\block.py",
    "ast_data": "FunctionDef name:drop_add_residual_stochastic_depth_list arg:x_list arg:residual_func arg:sample_drop_ratio arg:scaling_vector arguments arg arg arg arg Assign Call Assign Assign Assign Call Assign Call Call Assign For Call Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "score_variant",
    "source_code": "def score_variant(self, variant1, variant2):\n    if variant1 == variant2:\n        return 0.0\n    else:\n        return 1.0",
    "docstring": "Return a match score between *variant1* and *variant2*. An exact match returns 0.0, otherwise 1.0.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\font_manager.py",
    "ast_data": "FunctionDef name:score_variant arg:self arg:variant1 arg:variant2 arguments arg arg arg If Compare Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, parent, subplotspec, *, facecolor=None, edgecolor=None, linewidth=0.0, frameon=None, **kwargs):\n    super().__init__(**kwargs)\n    if facecolor is None:\n        facecolor = 'none'\n    edgecolor = mpl._val_or_rc(edgecolor, 'figure.edgecolor')\n    frameon = mpl._val_or_rc(frameon, 'figure.frameon')\n    self._subplotspec = subplotspec\n    self._parent = parent\n    self._root_figure = parent._root_figure\n    self._axstack = parent._axstack\n    self.subplotpars = parent.subplotpars\n    self.dpi_scale_trans = parent.dpi_scale_trans\n    self._axobservers = parent._axobservers\n    self.transFigure = parent.transFigure\n    self.bbox_relative = Bbox.null()\n    self._redo_transform_rel_fig()\n    self.figbbox = self._parent.figbbox\n    self.bbox = TransformedBbox(self.bbox_relative, self._parent.transSubfigure)\n    self.transSubfigure = BboxTransformTo(self.bbox)\n    self.patch = Rectangle(xy=(0, 0), width=1, height=1, visible=frameon, facecolor=facecolor, edgecolor=edgecolor, linewidth=linewidth, in_layout=False, transform=self.transSubfigure)\n    self._set_artist_props(self.patch)\n    self.patch.set_antialiased(False)",
    "docstring": "Parameters ---------- parent : or Figure or subfigure that contains the SubFigure. SubFigures can be nested. subplotspec : Defines the region in a parent gridspec where the subfigure will be placed. facecolor : default: `figure.edgecolorfigure.frameon.SubFigure` properties, optional %(SubFigure:kwdoc)s",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:parent arg:subplotspec arguments arg arg arg arg arg arg arg arg Call Call If Compare Assign Assign Call Assign Call Assign Assign Assign Assign Assign Assign Assign Assign Assign Call Call Assign Assign Call Assign Call Assign Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_aggregate_score_dicts",
    "source_code": "def _aggregate_score_dicts(scores):\n    return {key: np.asarray([score[key] for score in scores]) if isinstance(scores[0][key], numbers.Number) else [score[key] for score in scores] for key in scores[0]}",
    "docstring": "Aggregate the list of dict to dict of np ndarray The aggregated output of _aggregate_score_dicts will be a list of dict of form [{'prec': 0.1, 'acc':1.0}, {'prec': 0.1, 'acc':1.0}, ...] Convert it to a dict of array {'prec': np.array([0.1 ...]), ...} Parameters ---------- scores : list of dict List of dicts of the scores for all scorers. This is a flat list, assumed originally to be of row major order. Example ------- >>> scores = [{'a': 1, 'b':10}, {'a': 2, 'b':2}, {'a': 3, 'b':3}, ... {'a': 10, 'b': 10}] # doctest: +SKIP >>> _aggregate_score_dicts(scores) # doctest: +SKIP {'a': array([1, 2, 3, 10]), 'b': array([10, 2, 3, 10])}",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_validation.py",
    "ast_data": "FunctionDef name:_aggregate_score_dicts arg:scores arguments arg Return return:yes Call Call"
  },
  {
    "library": "authlib",
    "name": "handle_response",
    "source_code": "def handle_response(self, status, body, headers):\n    raise NotImplementedError()",
    "docstring": "Return HTTP response. Framework MUST implement this function.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\authorization_server.py",
    "ast_data": "FunctionDef name:handle_response arg:self arg:status arg:body arg:headers arguments arg arg arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "PythonCode",
    "source_code": "@compatibility(is_backward_compatible=True)\n@dataclass\nclass PythonCode:\n    src: str\n    globals: dict[str, Any]\n    _lineno_map: Optional[dict[int, Optional[int]]]",
    "docstring": "Represents all the information necessary to exec or save a graph as Python code.",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\graph.py",
    "ast_data": "ClassDef name:PythonCode Call"
  },
  {
    "library": "tensorflow",
    "name": "_set_variable_or_list_initializer",
    "source_code": "def _set_variable_or_list_initializer(variable_or_list, ckpt_file, tensor_name):\n    if isinstance(variable_or_list, (list, tuple)):\n        slice_name = None\n        for v in variable_or_list:\n            slice_info = v._save_slice_info\n            if slice_name is None:\n                slice_name = slice_info.full_name\n            elif slice_name != slice_info.full_name:\n                raise ValueError('Slices must all be from the same tensor: %s != %s' % (slice_name, slice_info.full_name))\n            _set_checkpoint_initializer(v, ckpt_file, tensor_name, slice_info.spec)\n    else:\n        _set_checkpoint_initializer(variable_or_list, ckpt_file, tensor_name, '')",
    "docstring": "Overrides initialization op of given variable or list of variables. Calls for each variable in the given list of variables. Args: variable_or_list: object or a list of objects. ckpt_file: string, full path of the checkpoint. tensor_name: Name of the tensor to load from the checkpoint. Raises: ValueError: if all objects in are not partitions of the same large variable.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\checkpoint_utils.py",
    "ast_data": "FunctionDef name:_set_variable_or_list_initializer arg:variable_or_list arg:ckpt_file arg:tensor_name arguments arg arg arg If Call Assign For Assign If Compare Assign If Compare Raise Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "init",
    "source_code": "def init(self):\n    if context.executing_eagerly() and self._closed:\n        raise RuntimeError(f'SummaryWriter {self!r} is already closed')\n    return self._init_op",
    "docstring": "See .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "FunctionDef name:init arg:self arguments arg If BoolOp Call Raise Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "format",
    "source_code": "def format(self, formatter: ExtFormatter | None=None, subset: Subset | None=None, na_rep: str | None=None, precision: int | None=None, decimal: str='.', thousands: str | None=None, escape: str | None=None, hyperlinks: str | None=None) -> StylerRenderer:\n    if all((formatter is None, subset is None, precision is None, decimal == '.', thousands is None, na_rep is None, escape is None, hyperlinks is None)):\n        self._display_funcs.clear()\n        return self\n    subset = slice(None) if subset is None else subset\n    subset = non_reducing_slice(subset)\n    data = self.data.loc[subset]\n    if not isinstance(formatter, dict):\n        formatter = dict.fromkeys(data.columns, formatter)\n    cis = self.columns.get_indexer_for(data.columns)\n    ris = self.index.get_indexer_for(data.index)\n    for ci in cis:\n        format_func = _maybe_wrap_formatter(formatter.get(self.columns[ci]), na_rep=na_rep, precision=precision, decimal=decimal, thousands=thousands, escape=escape, hyperlinks=hyperlinks)\n        for ri in ris:\n            self._display_funcs[ri, ci] = format_func\n    return self",
    "docstring": "Format the text display value of cells. Parameters ---------- formatter : str, callable, dict or None Object to define how values are displayed. See notes. subset : label, array-like, IndexSlice, optional A valid 2d input to , or, in the case of a 1d input or single key, to where the columns are prioritised, to limit ```ValueErrorStyler.formatStyler.to_excelnumber-formatnumber-format.formatto_excel` permissible formatting. Note that semi-colons are CSS protected characters but used as separators in Excel's format string. Replace semi-colons with the section separator character (ASCII-245) when defining the formatting here. >>> df = pd.DataFrame({\"A\": [1, 0, -1]}) >>> pseudo_css = \"number-format: 0§§-§@;\" >>> filename = \"formatted_file.xlsx\" >>> df.style.map(lambda v: pseudo_css).to_excel(filename) # doctest: +SKIP .. figure:: ../../_static/style/format_excel_css.png",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\style_render.py",
    "ast_data": "FunctionDef name:format arg:self arg:formatter arg:subset arg:na_rep arg:precision arg:decimal arg:thousands arg:escape arg:hyperlinks arguments arg arg arg arg arg arg arg arg arg If Call Compare Compare Compare Compare Compare Compare Compare Compare Call Return return:yes Assign Compare Call Assign Call Assign If Call Assign Call Assign Call Assign Call For Assign Call Call For Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "fx_nodes",
    "source_code": "def fx_nodes(self) -> Generator[torch.fx.Node, None, None]:\n    for node in self._nodes:\n        if isinstance(node, _ModuleNode):\n            yield from node.fx_nodes()\n        else:\n            assert isinstance(node, _LeafNode)\n            yield node.fx_node",
    "docstring": "Returns an iterator for the sequence of fx nodes this instance holds.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\modularization.py",
    "ast_data": "FunctionDef name:fx_nodes arg:self arguments arg For If Call Call Call"
  },
  {
    "library": "kornia",
    "name": "Rescale",
    "source_code": "class Rescale(Module):\n\n    def __init__(self, factor: Union[float, Tuple[float, float]], interpolation: str='bilinear', align_corners: bool=True, antialias: bool=False) -> None:\n        super().__init__()\n        self.factor: Union[float, Tuple[float, float]] = factor\n        self.interpolation: str = interpolation\n        self.align_corners: Optional[bool] = align_corners\n        self.antialias: bool = antialias\n\n    def forward(self, input: Tensor) -> Tensor:\n        return rescale(input, self.factor, self.interpolation, align_corners=self.align_corners, antialias=self.antialias)",
    "docstring": "Rescale the input Tensor with the given factor. Args: factor: Desired scaling factor in each direction. If scalar, the value is used for both the x- and y-direction. interpolation: algorithm used for upsampling: ``. antialias: if True, then image will be filtered with Gaussian before downscaling. No effect for upscaling. Returns: The rescaled tensor with the shape according to the given factor. Example: >>> img = torch.rand(1, 3, 4, 4) >>> out = Rescale((2, 3))(img) >>> print(out.shape) torch.Size([1, 3, 8, 12])",
    "type": "class",
    "file_path": "kornia\\kornia\\geometry\\transform\\affwarp.py",
    "ast_data": "ClassDef name:Rescale FunctionDef name:__init__ arg:self arg:factor arg:interpolation arg:align_corners arg:antialias arguments arg arg arg arg arg Call Call FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_set_diag",
    "source_code": "def _set_diag(laplacian, value, norm_laplacian):\n    n_nodes = laplacian.shape[0]\n    if not sparse.issparse(laplacian):\n        if norm_laplacian:\n            laplacian.flat[::n_nodes + 1] = value\n    else:\n        laplacian = laplacian.tocoo()\n        if norm_laplacian:\n            diag_idx = laplacian.row == laplacian.col\n            laplacian.data[diag_idx] = value\n        n_diags = np.unique(laplacian.row - laplacian.col).size\n        if n_diags <= 7:\n            laplacian = laplacian.todia()\n        else:\n            laplacian = laplacian.tocsr()\n    return laplacian",
    "docstring": "Set the diagonal of the laplacian matrix and convert it to a sparse format well suited for eigenvalue decomposition. Parameters ---------- laplacian : {ndarray, sparse matrix} The graph laplacian. value : float The value of the diagonal. norm_laplacian : bool Whether the value of the diagonal should be changed or not. Returns ------- laplacian : {array, sparse matrix} An array of matrix in a form that is well suited to fast eigenvalue decomposition, depending on the band width of the matrix.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\manifold\\_spectral_embedding.py",
    "ast_data": "FunctionDef name:_set_diag arg:laplacian arg:value arg:norm_laplacian arguments arg arg arg Assign If Call If Assign Assign Call If Assign Compare Assign Assign Call If Compare Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_filter_top_k",
    "source_code": "def _filter_top_k(x, k):\n    _, top_k_idx = nn_ops.top_k(x, k, sorted=False)\n    top_k_mask = math_ops.reduce_sum(array_ops.one_hot(top_k_idx, array_ops.shape(x)[-1], axis=-1), axis=-2)\n    return x * top_k_mask + NEG_INF * (1 - top_k_mask)",
    "docstring": "Filters top-k values in the last dim of x and set the rest to NEG_INF. Used for computing top-k prediction values in dense labels (which has the same shape as predictions) for recall and precision top-k metrics. Args: x: tensor with any dimensions. k: the number of values to keep. Returns: tensor with same shape and dtype as x.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\metrics_utils.py",
    "ast_data": "FunctionDef name:_filter_top_k arg:x arg:k arguments arg arg Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "__or__",
    "source_code": "def __or__(self, other):\n    return self.union(other)",
    "docstring": "Return the union of this Geometry and the other.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:__or__ arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "BadNumericalValue",
    "source_code": "class BadNumericalValue(ArffException):\n    message = 'Invalid numerical value, at line %d.'",
    "docstring": "Error raised when and invalid numerical value is used in some data instance.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\externals\\_arff.py",
    "ast_data": "ClassDef name:BadNumericalValue Assign"
  },
  {
    "library": "pytorch",
    "name": "convert_frame_assert",
    "source_code": "def convert_frame_assert(compiler_fn: CompilerFn, one_graph: bool=True, export: bool=False, export_constraints: Optional[typing.Never]=None) -> ConvertFrameAssert:\n    return ConvertFrameAssert(compiler_fn, one_graph, export, export_constraints)",
    "docstring": "Fully convert a frame into an FX graph",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\convert_frame.py",
    "ast_data": "FunctionDef name:convert_frame_assert arg:compiler_fn arg:one_graph arg:export arg:export_constraints arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_uint_to_uniform_float",
    "source_code": "def _uint_to_uniform_float(x):\n    assert x.type() == hl.UInt(32) or x.type() == hl.Int(32)\n    x = hl.cast(hl.Int(32), x)\n    scale = hl.f64(4.6566127342e-10)\n    x = hl.select(x < 0, -x - 1, x)\n    return x * scale",
    "docstring": "Numerically stable function to convert a random uint into a random float uniformly sampled in [0, 1).",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\halide_helpers.py",
    "ast_data": "FunctionDef name:_uint_to_uniform_float arg:x arguments arg BoolOp Compare Call Call Compare Call Call Assign Call Call Assign Call Assign Call Compare Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_powerlimits",
    "source_code": "def set_powerlimits(self, lims):\n    if len(lims) != 2:\n        raise ValueError(\"'lims' must be a sequence of length 2\")\n    self._powerlimits = lims",
    "docstring": "Set size thresholds for scientific notation. Parameters ---------- lims : (int, int) A tuple *(min_exp, max_exp)* containing the powers of 10 that determine the switchover threshold. For a number representable as :math: with :math:axes.formatter.limits1 \\times 10^{-3}, 9.9 \\times 10^{-3}, 0.01,9999, 1 \\times 10^4`. See Also -------- ScalarFormatter.set_scientific",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:set_powerlimits arg:self arg:lims arguments arg arg If Compare Call Raise Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_AssertGrayscaleImage",
    "source_code": "def _AssertGrayscaleImage(image):\n    return control_flow_ops.with_dependencies(_CheckGrayscaleImage(image, require_static=False), image)",
    "docstring": "Assert that we are working with a properly shaped grayscale image. Performs the check statically if possible (i.e. if the shape is statically known). Otherwise adds a control dependency to an assert op that checks the dynamic shape. Args: image: >= 2-D Tensor of size [*, 1] Raises: ValueError: if image.shape is not a [>= 2] vector or if last dimension is not size 1. Returns: If the shape of could be verified statically, is returned unchanged, otherwise there will be a control dependency added that asserts the correct dynamic shape.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:_AssertGrayscaleImage arg:image arguments arg Return return:yes Call Call"
  },
  {
    "library": "authlib",
    "name": "UnsupportedGrantTypeError",
    "source_code": "class UnsupportedGrantTypeError(OAuth2Error):\n    error = 'unsupported_grant_type'\n\n    def __init__(self, grant_type):\n        super().__init__()\n        self.grant_type = grant_type\n\n    def get_error_description(self):\n        return f'grant_type={self.grant_type} is not supported'",
    "docstring": "The authorization grant type is not supported by the authorization server.",
    "type": "class",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\errors.py",
    "ast_data": "ClassDef name:UnsupportedGrantTypeError Assign FunctionDef name:__init__ arg:self arg:grant_type arguments arg arg Call Call Assign FunctionDef name:get_error_description arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "order_by_dependency",
    "source_code": "def order_by_dependency(dependency_map):\n    reverse_dependency_map = collections.defaultdict(set)\n    for x, deps in dependency_map.items():\n        for dep in deps:\n            reverse_dependency_map[dep].add(x)\n    unknown_keys = reverse_dependency_map.keys() - dependency_map.keys()\n    if unknown_keys:\n        raise ValueError(f'Found values in the dependency map which are not keys: {unknown_keys}')\n    reversed_dependency_arr = []\n    to_visit = [x for x in dependency_map if x not in reverse_dependency_map]\n    while to_visit:\n        x = to_visit.pop(0)\n        reversed_dependency_arr.append(x)\n        for dep in set(dependency_map[x]):\n            edges = reverse_dependency_map[dep]\n            edges.remove(x)\n            if not edges:\n                to_visit.append(dep)\n                reverse_dependency_map.pop(dep)\n    if reverse_dependency_map:\n        leftover_dependency_map = collections.defaultdict(list)\n        for dep, xs in reverse_dependency_map.items():\n            for x in xs:\n                leftover_dependency_map[x].append(dep)\n        raise CyclicDependencyError(leftover_dependency_map)\n    return reversed(reversed_dependency_arr)",
    "docstring": "Topologically sorts the keys of a map so that dependencies appear first. Uses Kahn's algorithm: Args: dependency_map: a dict mapping values to a list of dependencies (other keys in the map). All keys and dependencies must be hashable types. Returns: A sorted array of keys from dependency_map. Raises: CyclicDependencyError: if there is a cycle in the graph. ValueError: If there are values in the dependency map that are not keys in the map.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\trackable_utils.py",
    "ast_data": "FunctionDef name:order_by_dependency arg:dependency_map arguments arg Assign Call For Call For Call Assign Call Call If Raise Call Assign Assign Compare While Assign Call Call For Call Assign Call If Call Call If Assign Call For Call For Call Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "interpolate",
    "source_code": "def interpolate(self, message, node_names, graph_debug_info):\n    error_message = ['Graph execution error:', '']\n    traces = tf_stack.LoadTracesFromDebugInfo(graph_debug_info)\n    for node_name in node_names:\n        error_message.append(f'Detected at node {node_name} defined at (most recent call last):')\n        if node_name in traces:\n            stack_trace = traces[node_name]\n            for formatted_frame in traceback.format_list(stack_trace):\n                if not any((p in formatted_frame for p in self.DENY_LIST_PHRASES)):\n                    error_message.append(formatted_frame)\n        else:\n            error_message.append('<stack traces unavailable>')\n    error_message.append(message.strip())\n    return '\\n'.join(error_message)",
    "docstring": "Uses the GraphDebugInfo to generate an error message.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\atomic_function.py",
    "ast_data": "FunctionDef name:interpolate arg:self arg:message arg:node_names arg:graph_debug_info arguments arg arg arg arg Assign Assign Call For Call If Compare Assign For Call If Call Compare Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "factorize_from_iterable",
    "source_code": "def factorize_from_iterable(values) -> tuple[np.ndarray, Index]:\n    from pandas import CategoricalIndex\n    if not is_list_like(values):\n        raise TypeError('Input must be list-like')\n    categories: Index\n    vdtype = getattr(values, 'dtype', None)\n    if isinstance(vdtype, CategoricalDtype):\n        values = extract_array(values)\n        cat_codes = np.arange(len(values.categories), dtype=values.codes.dtype)\n        cat = Categorical.from_codes(cat_codes, dtype=values.dtype, validate=False)\n        categories = CategoricalIndex(cat)\n        codes = values.codes\n    else:\n        cat = Categorical(values, ordered=False)\n        categories = cat.categories\n        codes = cat.codes\n    return (codes, categories)",
    "docstring": "Factorize an input into and . Preserves categorical dtype in . Parameters ---------- values : list-like Returns ------- codes : ndarray categories : Index If has a categorical dtype, then is a CategoricalIndex keeping the categories and order of .",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\arrays\\categorical.py",
    "ast_data": "FunctionDef name:factorize_from_iterable arg:values arguments arg If Call Raise Call Assign Call If Call Assign Call Assign Call Call Assign Call Assign Call Assign Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_deregister_orig_params_ctx",
    "source_code": "@contextlib.contextmanager\ndef _deregister_orig_params_ctx(self):\n    _p_assert(self._use_orig_params, '`_deregister_orig_params_ctx()` should only be called when `_use_orig_params=True`')\n    for fsdp_module in traversal_utils._get_fsdp_states(self):\n        _deregister_orig_params(fsdp_module, fsdp_module)\n    try:\n        yield\n    finally:\n        for fsdp_module in traversal_utils._get_fsdp_states(self):\n            _register_orig_params(fsdp_module, fsdp_module)",
    "docstring": "Deregister the original parameters and expose the :class:. If a :class: is sharded, then this refreshes the sharded views before exiting. This method should only be called when using the original parameters.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\fully_sharded_data_parallel.py",
    "ast_data": "FunctionDef name:_deregister_orig_params_ctx arg:self arguments arg Call For Call Call Try For Call Call"
  },
  {
    "library": "tensorflow",
    "name": "assert_negative_v2",
    "source_code": "@tf_export('debugging.assert_negative', v1=[])\n@dispatch.add_dispatch_support\ndef assert_negative_v2(x, message=None, summarize=None, name=None):\n    return assert_negative(x=x, message=message, summarize=summarize, name=name)",
    "docstring": "Assert the condition holds element-wise. This Op checks that holds for every element of . If is empty, this is trivially satisfied. If is not negative everywhere, , as well as the first entries of are printed, and is raised. Args: x: Numeric . message: A string to prefix to the default message. summarize: Print this many entries of each tensor. name: A name for this operation (optional). Defaults to \"assert_negative\". Returns: Op raising unless is all negative. This can be used with inside of s to block followup computation until the check has executed. @compatibility(eager) returns None @end_compatibility Raises: InvalidArgumentError: if the check can be performed immediately and is False. The check can be performed immediately during eager execution or if is statically known.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\check_ops.py",
    "ast_data": "FunctionDef name:assert_negative_v2 arg:x arg:message arg:summarize arg:name arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "add_never_cache_headers",
    "source_code": "def add_never_cache_headers(response):\n    patch_response_headers(response, cache_timeout=-1)\n    patch_cache_control(response, no_cache=True, no_store=True, must_revalidate=True, private=True)",
    "docstring": "Add headers to a response to indicate that a page should never be cached.",
    "type": "function",
    "file_path": "django\\django\\utils\\cache.py",
    "ast_data": "FunctionDef name:add_never_cache_headers arg:response arguments arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_dim",
    "source_code": "def _get_dim(tensor, i):\n    return tensor_shape.dimension_value(tensor.shape[i]) or array_ops.shape(tensor)[i]",
    "docstring": "Get value of tensor shape[i] preferring static value if available.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ctc_ops.py",
    "ast_data": "FunctionDef name:_get_dim arg:tensor arg:i arguments arg arg Return return:yes BoolOp Call Call"
  },
  {
    "library": "tensorflow",
    "name": "is_unsupported",
    "source_code": "def is_unsupported(o):\n    if _is_known_loaded_type(o, 'wrapt', 'FunctionWrapper') or _is_known_loaded_type(o, 'wrapt', 'BoundFunctionWrapper'):\n        logging.warning('{} appears to be decorated by wrapt, which is not yet supported by AutoGraph. The function will run as-is. You may still apply AutoGraph before the wrapt decorator.'.format(o))\n        logging.log(2, 'Permanently allowed: %s: wrapt decorated', o)\n        return True\n    if _is_known_loaded_type(o, 'functools', '_lru_cache_wrapper'):\n        logging.log(2, 'Permanently allowed: %s: lru_cache', o)\n        return True\n    if inspect_utils.isconstructor(o):\n        logging.log(2, 'Permanently allowed: %s: constructor', o)\n        return True\n    if any((_is_of_known_loaded_module(o, m) for m in ('collections', 'pdb', 'copy', 'inspect', 're'))):\n        logging.log(2, 'Permanently allowed: %s: part of builtin module', o)\n        return True\n    if hasattr(o, '__module__') and hasattr(o.__module__, '_IS_TENSORFLOW_PLUGIN'):\n        logging.log(2, 'Permanently allowed: %s: TensorFlow plugin', o)\n        return True\n    return False",
    "docstring": "Checks whether an entity is supported by AutoGraph at all.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\impl\\conversion.py",
    "ast_data": "FunctionDef name:is_unsupported arg:o arguments arg If BoolOp Call Call Call Call Call Return return:yes If Call Call Return return:yes If Call Call Return return:yes If Call Call Call Return return:yes If BoolOp Call Call Call Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "delete_existing",
    "source_code": "def delete_existing(self, obj, commit=True):\n    if commit:\n        obj.delete()",
    "docstring": "Deletes an existing model instance.",
    "type": "method",
    "file_path": "django\\django\\forms\\models.py",
    "ast_data": "FunctionDef name:delete_existing arg:self arg:obj arg:commit arguments arg arg arg If Call"
  },
  {
    "library": "numpy",
    "name": "get_flags_debug",
    "source_code": "def get_flags_debug(self):\n    return []",
    "docstring": "List of compiler flags to compile with debugging information.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\fcompiler\\__init__.py",
    "ast_data": "FunctionDef name:get_flags_debug arg:self arguments arg Return return:no"
  },
  {
    "library": "scikit-learn",
    "name": "_update_filter_sdas",
    "source_code": "def _update_filter_sdas(sdas, mib, xi_complement, reachability_plot):\n    if np.isinf(mib):\n        return []\n    res = [sda for sda in sdas if mib <= reachability_plot[sda['start']] * xi_complement]\n    for sda in res:\n        sda['mib'] = max(sda['mib'], mib)\n    return res",
    "docstring": "Update steep down areas (SDAs) using the new maximum in between (mib) value, and the given complement of xi, i.e. ``.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\cluster\\_optics.py",
    "ast_data": "FunctionDef name:_update_filter_sdas arg:sdas arg:mib arg:xi_complement arg:reachability_plot arguments arg arg arg arg If Call Return return:no Assign Compare For Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "graph_v1",
    "source_code": "def graph_v1(param, step=None, name=None):\n    if not context.executing_eagerly() and (not isinstance(param, tensor_lib.Tensor)):\n        raise TypeError(f'graph() needs a argument `param` to be tf.Tensor (e.g. tf.placeholder) in graph mode, but received param={param} of type {type(param).__name__}.')\n    writer = _summary_state.writer\n    if writer is None:\n        return control_flow_ops.no_op()\n    with ops.device('cpu:0'):\n        if isinstance(param, (ops.Graph, graph_pb2.GraphDef)):\n            tensor = ops.convert_to_tensor(_serialize_graph(param), dtypes.string)\n        else:\n            tensor = array_ops.identity(param)\n        return gen_summary_ops.write_graph_summary(writer._resource, _choose_step(step), tensor, name=name)",
    "docstring": "Writes a TensorFlow graph to the summary interface. The graph summary is, strictly speaking, not a summary. Conditions like do not apply. Only a single graph can be associated with a particular run. If multiple graphs are written, then only the last one will be considered by TensorBoard. When not using eager execution mode, the user should consider passing the parameter to instead of calling this function. Otherwise special care needs to be taken when using the graph to record the graph. Args: param: A containing a serialized graph proto. When eager execution is enabled, this function will automatically coerce , , and string types. step: The global step variable. This doesn't have useful semantics for graph summaries, but is used anyway, due to the structure of event log files. This defaults to the global step. name: A name for the operation (optional). Returns: The created or a if summary writing has not been enabled for this context. Raises: TypeError: If isn't already a in graph mode.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "FunctionDef name:graph_v1 arg:param arg:step arg:name arguments arg arg arg If BoolOp Call Call Raise Call Call Assign If Compare Return return:yes Call With Call If Call Assign Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "tril_matrix_to_vec",
    "source_code": "def tril_matrix_to_vec(mat: Tensor, diag: int=0) -> Tensor:\n    n = mat.shape[-1]\n    if not torch._C._get_tracing_state() and (diag < -n or diag >= n):\n        raise ValueError(f'diag ({diag}) provided is outside [{-n}, {n - 1}].')\n    arange = torch.arange(n, device=mat.device)\n    tril_mask = arange < arange.view(-1, 1) + (diag + 1)\n    vec = mat[..., tril_mask]\n    return vec",
    "docstring": "Convert a matrix or a batch of matrices into a (batched) vector which comprises of lower triangular elements from the matrix in row order.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributions\\utils.py",
    "ast_data": "FunctionDef name:tril_matrix_to_vec arg:mat arg:diag arguments arg arg Assign If BoolOp Call BoolOp Compare Compare Raise Call Assign Call Assign Compare Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "squeeze_batch_dims",
    "source_code": "def squeeze_batch_dims(inp, op, inner_rank):\n    with ops.name_scope_v2('squeeze_batch_dims'):\n        shape = inp.shape\n        inner_shape = shape[-inner_rank:]\n        if not inner_shape.is_fully_defined():\n            inner_shape = array_ops.shape(inp)[-inner_rank:]\n        batch_shape = shape[:-inner_rank]\n        if not batch_shape.is_fully_defined():\n            batch_shape = array_ops.shape(inp)[:-inner_rank]\n        if isinstance(inner_shape, tensor_shape.TensorShape):\n            inp_reshaped = array_ops.reshape(inp, [-1] + inner_shape.as_list())\n        else:\n            inp_reshaped = array_ops.reshape(inp, array_ops.concat(([-1], inner_shape), axis=-1))\n        out_reshaped = op(inp_reshaped)\n        out_inner_shape = out_reshaped.shape[-inner_rank:]\n        if not out_inner_shape.is_fully_defined():\n            out_inner_shape = array_ops.shape(out_reshaped)[-inner_rank:]\n        out = array_ops.reshape(out_reshaped, array_ops.concat((batch_shape, out_inner_shape), axis=-1))\n        out.set_shape(inp.shape[:-inner_rank] + out.shape[-inner_rank:])\n        return out",
    "docstring": "Returns . Where reshapes to shape and does the reverse reshape but on the output. Args: inp: A tensor with dims where is length . op: A callable that takes a single input tensor and returns a single. output tensor. inner_rank: A python integer. Returns: .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\conv_utils.py",
    "ast_data": "FunctionDef name:squeeze_batch_dims arg:inp arg:op arg:inner_rank arguments arg arg arg With Call Assign Assign If Call Assign Call Assign If Call Assign Call If Call Assign Call Call Assign Call Call Assign Call Assign If Call Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "Problem09",
    "source_code": "class Problem09(Benchmark):\n\n    def __init__(self, dimensions=1):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = [(3.1, 20.4)]\n        self.global_optimum = 17.039\n        self.fglob = -1.90596\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        x = x[0]\n        return sin(x) + sin(2.0 / 3.0 * x)",
    "docstring": "Univariate Problem09 objective function. This class defines the Univariate Problem09 global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Problem09}}(x) = \\sin(x) + \\sin \\left(\\frac{2}{3} x \\right) Bound constraints: :math: .. figure:: figures/Problem09.png :alt: Univariate Problem09 function :align: center **Univariate Problem09 function** *Global optimum*: :math: for :math:",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_univariate.py",
    "ast_data": "ClassDef name:Problem09 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "to_code_v1",
    "source_code": "@tf_export(v1=['autograph.to_code'])\ndef to_code_v1(entity, recursive=True, arg_values=None, arg_types=None, indentation='  ', experimental_optional_features=None):\n    del arg_values\n    del arg_types\n    del indentation\n    return to_code(entity, recursive=recursive, experimental_optional_features=experimental_optional_features)",
    "docstring": "Returns the source code generated by AutoGraph, as a string. Example usage: >>> def f(x): ... if x >> tf.autograph.to_code(f) \"...def tf__f(x):...\" Also see: . Note: If a function has been decorated with , pass its underlying Python function, rather than the callable that Nonetf.autograph.experimental.Feature` value. Returns: The converted code as string.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\impl\\api.py",
    "ast_data": "FunctionDef name:to_code_v1 arg:entity arg:recursive arg:arg_values arg:arg_types arg:indentation arg:experimental_optional_features arguments arg arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "authlib",
    "name": "create_oauth2_request",
    "source_code": "def create_oauth2_request(self, request) -> OAuth2Request:\n    raise NotImplementedError()",
    "docstring": "This method MUST be implemented in framework integrations. It is used to create an OAuth2Request instance. :param request: the \"request\" instance in framework :return: OAuth2Request instance",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\authorization_server.py",
    "ast_data": "FunctionDef name:create_oauth2_request arg:self arg:request arguments arg arg Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "_check_feature_names",
    "source_code": "def _check_feature_names(X, feature_names=None):\n    if feature_names is None:\n        if hasattr(X, 'columns') and hasattr(X.columns, 'tolist'):\n            feature_names = X.columns.tolist()\n        else:\n            feature_names = [f'x{i}' for i in range(X.shape[1])]\n    elif hasattr(feature_names, 'tolist'):\n        feature_names = feature_names.tolist()\n    if len(set(feature_names)) != len(feature_names):\n        raise ValueError('feature_names should not contain duplicates.')\n    return feature_names",
    "docstring": "Check feature names. Parameters ---------- X : array-like of shape (n_samples, n_features) Input data. feature_names : None or array-like of shape (n_names,), dtype=str Feature names to check or . Returns ------- feature_names : list of str Feature names validated. If is , then a list of feature names is provided, i.e. the column names of a pandas dataframe or a generic list of feature names (e.g. ) for a NumPy array.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\inspection\\_pd_utils.py",
    "ast_data": "FunctionDef name:_check_feature_names arg:X arg:feature_names arguments arg arg If Compare If BoolOp Call Call Assign Call Assign Call If Call Assign Call If Compare Call Call Call Raise Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_write_map",
    "source_code": "def _write_map(self) -> None:\n    if not self._map:\n        self._map = {'stata_data': 0, 'map': self.handles.handle.tell(), 'variable_types': 0, 'varnames': 0, 'sortlist': 0, 'formats': 0, 'value_label_names': 0, 'variable_labels': 0, 'characteristics': 0, 'data': 0, 'strls': 0, 'value_labels': 0, 'stata_data_close': 0, 'end-of-file': 0}\n    self.handles.handle.seek(self._map['map'])\n    bio = BytesIO()\n    for val in self._map.values():\n        bio.write(struct.pack(self._byteorder + 'Q', val))\n    self._write_bytes(self._tag(bio.getvalue(), 'map'))",
    "docstring": "Called twice during file write. The first populates the values in the map with 0s. The second call writes the final map locations when all blocks have been written.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\stata.py",
    "ast_data": "FunctionDef name:_write_map arg:self arguments arg If Assign Call Call Assign Call For Call Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "benchmark_cpu",
    "source_code": "@time_and_count\ndef benchmark_cpu(self: Self, _callable: Callable[[], Any], warmup: int=20, rep: int=100) -> float:\n\n    def run_for(ms: int) -> list[float]:\n        timings = []\n        run_start_t = time.perf_counter()\n        while True:\n            start_t = time.perf_counter()\n            _callable()\n            end_t = time.perf_counter()\n            timings.append((end_t - start_t) * MILLISECONDS_PER_SECOND)\n            if (end_t - run_start_t) * MILLISECONDS_PER_SECOND > ms:\n                break\n        return timings\n    run_for(warmup)\n    return median(run_for(rep))",
    "docstring": "Benchmark the CPU callable, , and return the median runtime, in milliseconds. Arguments: - _callable: The CPU callable to benchmark. Keyword Arguments: - warmup: Optionally, the duration, in milliseconds, to run before benchmarking starts. - rep: Optionally, the duration, in milliseconds, to run during benchmarking. Returns: - The median runtime of , in milliseconds.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\benchmarking.py",
    "ast_data": "FunctionDef name:benchmark_cpu arg:self arg:_callable arg:warmup arg:rep arguments arg arg arg arg FunctionDef name:run_for arg:ms arguments arg Assign Assign Call While Assign Call Call Assign Call Call If Compare Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "setxor1d",
    "source_code": "def setxor1d(ar1, ar2, assume_unique=False):\n    if not assume_unique:\n        ar1 = unique(ar1)\n        ar2 = unique(ar2)\n    aux = ma.concatenate((ar1, ar2), axis=None)\n    if aux.size == 0:\n        return aux\n    aux.sort()\n    auxf = aux.filled()\n    flag = ma.concatenate(([True], auxf[1:] != auxf[:-1], [True]))\n    flag2 = flag[1:] == flag[:-1]\n    return aux[flag2]",
    "docstring": "Set exclusive-or of 1-D arrays with unique elements. The output is always a masked array. See for more details. See Also -------- numpy.setxor1d : Equivalent function for ndarrays. Examples -------- >>> import numpy as np >>> ar1 = np.ma.array([1, 2, 3, 2, 4]) >>> ar2 = np.ma.array([2, 3, 5, 7, 5]) >>> np.ma.setxor1d(ar1, ar2) masked_array(data=[1, 4, 5, 7], mask=False, fill_value=999999)",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\extras.py",
    "ast_data": "FunctionDef name:setxor1d arg:ar1 arg:ar2 arg:assume_unique arguments arg arg arg If Assign Call Assign Call Assign Call If Compare Return return:yes Call Assign Call Assign Call Compare Assign Compare Return return:yes"
  },
  {
    "library": "pygame",
    "name": "font_constructor",
    "source_code": "def font_constructor(fontpath, size, bold, italic):\n    font = Font(fontpath, size)\n    if bold:\n        font.set_bold(True)\n    if italic:\n        font.set_italic(True)\n    return font",
    "docstring": "pygame.font specific declarations :param fontpath: path to a font. :param size: size of a font. :param bold: bold style, True or False. :param italic: italic style, True or False. :return: A font.Font object.",
    "type": "function",
    "file_path": "pygame\\src_py\\sysfont.py",
    "ast_data": "FunctionDef name:font_constructor arg:fontpath arg:size arg:bold arg:italic arguments arg arg arg arg Assign Call If Call If Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "BaseLink",
    "source_code": "class BaseLink(ABC):\n    is_multiclass = False\n    interval_y_pred = Interval(-np.inf, np.inf, False, False)\n\n    @abstractmethod\n    def link(self, y_pred, out=None):\n        pass\n\n    @abstractmethod\n    def inverse(self, raw_prediction, out=None):\n        pass",
    "docstring": "Abstract base class for differentiable, invertible link functions. Convention: - link function g: raw_prediction = g(y_pred) - inverse link h: y_pred = h(raw_prediction) For (generalized) linear models, is the so called linear predictor, and is the predicted conditional (on X) expected value of the target . The methods are not implemented as staticmethods in case a link function needs parameters.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\_loss\\link.py",
    "ast_data": "ClassDef name:BaseLink Assign Assign Call FunctionDef name:link arg:self arg:y_pred arg:out arguments arg arg arg FunctionDef name:inverse arg:self arg:raw_prediction arg:out arguments arg arg arg"
  },
  {
    "library": "tensorflow",
    "name": "_recommend_command",
    "source_code": "def _recommend_command(command, description, indent=2, create_link=False):\n    indent_str = ' ' * indent\n    if create_link:\n        font_attr = [debugger_cli_common.MenuItem('', command), 'bold']\n    else:\n        font_attr = 'bold'\n    lines = [RL(indent_str) + RL(command, font_attr) + ':', indent_str + '  ' + description]\n    return debugger_cli_common.rich_text_lines_from_rich_line_list(lines)",
    "docstring": "Generate a RichTextLines object that describes a recommended command. Args: command: (str) The command to recommend. description: (str) A description of what the command does. indent: (int) How many spaces to indent in the beginning. create_link: (bool) Whether a command link is to be applied to the command string. Returns: (RichTextLines) Formatted text (with font attributes) for recommending the command.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\cli_shared.py",
    "ast_data": "FunctionDef name:_recommend_command arg:command arg:description arg:indent arg:create_link arguments arg arg arg arg Assign If Assign Call Assign Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "replicate",
    "source_code": "def replicate(dataset, devices):\n    if not isinstance(dataset, data_types.DatasetV2):\n        raise TypeError(f'Invalid `dataset`. Expected a `tf.data.Dataset` object but got {type(dataset)}.')\n    dataset_device = dataset._variant_tensor.device\n    datasets = {}\n    if len(devices) == 1 and devices[0] == dataset_device:\n        datasets[devices[0]] = dataset\n        return datasets\n    with ops.colocate_with(dataset._variant_tensor):\n        dataset = dataset._apply_debug_options()\n        graph_def = dataset._as_serialized_graph(strip_device_assignment=True, external_state_policy=ExternalStatePolicy.WARN)\n    for device in devices:\n        ds = _RemoteDataset(graph_def, device, dataset.element_spec)\n        datasets[device] = ds\n    return datasets",
    "docstring": "A transformation that replicates onto a list of devices. Args: dataset: A object. devices: A list of devices to replicate the dataset on. Returns: A dictionary mapping device name to a dataset on that device.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\distribute.py",
    "ast_data": "FunctionDef name:replicate arg:dataset arg:devices arguments arg arg If Call Raise Call Call Assign Assign If BoolOp Compare Call Compare Assign Return return:yes With Call Assign Call Assign Call For Assign Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_boxstyle",
    "source_code": "def get_boxstyle(self):\n    return self._bbox_transmuter",
    "docstring": "Return the boxstyle object.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_boxstyle arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "issubtype",
    "source_code": "def issubtype(left, right, recursive=True):\n    left = TYPE2ABC.get(left, left)\n    right = TYPE2ABC.get(right, right)\n    if right is Any or left == right:\n        return True\n    if isinstance(right, _GenericAlias):\n        if getattr(right, '__origin__', None) is Generic:\n            return True\n    if right == type(None):\n        return False\n    constraints = _decompose_type(right)\n    if len(constraints) == 0 or Any in constraints:\n        return True\n    if left is Any:\n        return False\n    variants = _decompose_type(left)\n    if len(variants) == 0:\n        return False\n    return all((_issubtype_with_constraints(variant, constraints, recursive) for variant in variants))",
    "docstring": "Check if the left-side type is a subtype of the right-side type. If any of type is a composite type like and with bounds, it would be expanded into a list of types and check all of left-side types are subtypes of either one from right-side types.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\data\\datapipes\\_typing.py",
    "ast_data": "FunctionDef name:issubtype arg:left arg:right arg:recursive arguments arg arg arg Assign Call Assign Call If BoolOp Compare Compare Return return:yes If Call If Compare Call Return return:yes If Compare Call Return return:yes Assign Call If BoolOp Compare Call Compare Return return:yes If Compare Return return:yes Assign Call If Compare Call Return return:yes Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "get_deleted_objects",
    "source_code": "def get_deleted_objects(self, objs, request):\n    return get_deleted_objects(objs, request, self.admin_site)",
    "docstring": "Hook for customizing the delete process for the delete view and the \"delete selected\" action.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:get_deleted_objects arg:self arg:objs arg:request arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "transform",
    "source_code": "@classmethod\n@abstractmethod\ndef transform(cls, input: T, module: Module, param: ParamItem, extra_args: Optional[Dict[str, Any]]=None) -> T:\n    raise NotImplementedError",
    "docstring": "Apply a transformation with respect to the parameters. Args: input: the input tensor. module: any torch Module but only kornia augmentation modules will count to apply transformations. param: the corresponding parameters to the module. extra_args: Optional dictionary of extra arguments with specific options for different input types.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\container\\ops.py",
    "ast_data": "FunctionDef name:transform arg:cls arg:input arg:module arg:param arg:extra_args arguments arg arg arg arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "scatter_nd_sub",
    "source_code": "def scatter_nd_sub(self, indices, updates, name=None):\n    raise NotImplementedError",
    "docstring": "Applies sparse subtraction to individual values or slices in a Variable. Assuming the variable has rank and is a of rank . must be integer tensor, containing indices into self. It must be shape where . The innermost dimension of (with length ) corresponds to indices into elements (if ) or slices (if ) along the th dimension of self. is of rank with shape: For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 elements. In Python, that update would look like this: After the update would look like this: [1, -9, 3, -6, -4, 6, 7, -4] See for more details about how to make updates to slices. Args: indices: The indices to be used in the operation. updates: The values to be used in the operation. name: the name of the operation. Returns: The updated variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:scatter_nd_sub arg:self arg:indices arg:updates arg:name arguments arg arg arg arg Raise"
  },
  {
    "library": "django",
    "name": "WKTAdapter",
    "source_code": "class WKTAdapter:\n\n    def __init__(self, geom):\n        self.wkt = geom.wkt\n        self.srid = geom.srid\n\n    def __eq__(self, other):\n        return isinstance(other, WKTAdapter) and self.wkt == other.wkt and (self.srid == other.srid)\n\n    def __hash__(self):\n        return hash((self.wkt, self.srid))\n\n    def __str__(self):\n        return self.wkt\n\n    @classmethod\n    def _fix_polygon(cls, poly):\n        return poly",
    "docstring": "An adaptor for Geometries sent to the MySQL and Oracle database backends.",
    "type": "class",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\base\\adapter.py",
    "ast_data": "ClassDef name:WKTAdapter FunctionDef name:__init__ arg:self arg:geom arguments arg arg Assign Assign FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes BoolOp Call Compare Compare FunctionDef name:__hash__ arg:self arguments arg Return return:yes Call FunctionDef name:__str__ arg:self arguments arg Return return:yes FunctionDef name:_fix_polygon arg:cls arg:poly arguments arg arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "copy",
    "source_code": "def copy(self):\n    return self.__copy__()",
    "docstring": "Return a copy of the multivarcolormap.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:copy arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "query",
    "source_code": "def query(self):\n    return super().query()",
    "docstring": "Check if all work currently captured by event has completed. Returns: A boolean indicating if all work currently captured by event has completed.",
    "type": "method",
    "file_path": "pytorch\\torch\\cuda\\streams.py",
    "ast_data": "FunctionDef name:query arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "flip_up_down",
    "source_code": "@tf_export('image.flip_up_down')\n@dispatch.add_dispatch_support\ndef flip_up_down(image):\n    return _flip(image, 0, 'flip_up_down')",
    "docstring": "Flip an image vertically (upside down). Outputs the contents of flipped along the height dimension. See also . Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.flip_up_down(x) Args: image: 4-D Tensor of shape or 3-D Tensor of shape . Returns: A of the same type and shape as . Raises: ValueError: if the shape of not supported.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:flip_up_down arg:image arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get",
    "source_code": "def get(self, name=None):\n    if name is None:\n        name = '%s_get' % self._name\n    fn = lambda: gen_data_flow_ops.unstage(dtypes=self._dtypes, shared_name=self._name, name=name, capacity=self._capacity, memory_limit=self._memory_limit)\n    return self.__internal_get(fn, name)",
    "docstring": "Gets one element from this staging area. If the staging area is empty when this operation executes, it will block until there is an element to dequeue. Note that unlike others ops that can block, like the queue Dequeue operations, this can stop other work from happening. To avoid this, the intended use is for this to be called only when there will be an element already available. One method for doing this in a training loop would be to run a call during a warmup session.run call, and then call both and in each subsequent step. The placement of the returned tensor will be determined by the current device scope when this function is called. Args: name: A name for the operation (optional). Returns: The tuple of tensors that was gotten.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:get arg:self arg:name arguments arg arg If Compare Assign Assign arguments Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_estimate_gaussian_covariances_full",
    "source_code": "def _estimate_gaussian_covariances_full(resp, X, nk, means, reg_covar):\n    n_components, n_features = means.shape\n    covariances = np.empty((n_components, n_features, n_features), dtype=X.dtype)\n    for k in range(n_components):\n        diff = X - means[k]\n        covariances[k] = np.dot(resp[:, k] * diff.T, diff) / nk[k]\n        covariances[k].flat[::n_features + 1] += reg_covar\n    return covariances",
    "docstring": "Estimate the full covariance matrices. Parameters ---------- resp : array-like of shape (n_samples, n_components) X : array-like of shape (n_samples, n_features) nk : array-like of shape (n_components,) means : array-like of shape (n_components, n_features) reg_covar : float Returns ------- covariances : array, shape (n_components, n_features, n_features) The covariance matrix of the current components.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\mixture\\_gaussian_mixture.py",
    "ast_data": "FunctionDef name:_estimate_gaussian_covariances_full arg:resp arg:X arg:nk arg:means arg:reg_covar arguments arg arg arg arg arg Assign Assign Call For Call Assign Assign Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "list_to_scope",
    "source_code": "def list_to_scope(scope):\n    if isinstance(scope, (set, tuple, list)):\n        return ' '.join([to_unicode(s) for s in scope])\n    if scope is None:\n        return scope\n    return to_unicode(scope)",
    "docstring": "Convert a list of scopes to a space separated string.",
    "type": "function",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\util.py",
    "ast_data": "FunctionDef name:list_to_scope arg:scope arguments arg If Call Return return:yes Call Call If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "_kernel_distance",
    "source_code": "def _kernel_distance(squared_distances: torch.Tensor, eps: float=1e-08) -> torch.Tensor:\n    return 0.5 * squared_distances * squared_distances.add(eps).log()",
    "docstring": "Compute the TPS kernel distance function: :math:, where is the euclidean distance. Since :math: , this function takes the squared distance matrix and calculates :math: .",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\transform\\thin_plate_spline.py",
    "ast_data": "FunctionDef name:_kernel_distance arg:squared_distances arg:eps arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_check_args_and_maybe_make_decorator",
    "source_code": "def _check_args_and_maybe_make_decorator(script_op, script_op_name, func=None, inp=None, Tout=None, **kwargs):\n    if Tout is None:\n        raise TypeError(f\"Missing required argument: 'Tout'\\n  If using {script_op_name} as a decorator, set `Tout`\\n  **by name** above the function:\\n  `@{script_op_name}(Tout=tout)`\")\n    if func is None:\n        if inp is not None:\n            raise TypeError(f\"Don't set the `inp` argument when using {script_op_name} as a decorator (`func=None`).\")\n\n        def py_function_decorator(fun):\n\n            @functools.wraps(fun)\n            def py_function_wrapper(*args):\n                return script_op(fun, inp=args, Tout=Tout, **kwargs)\n            return py_function_wrapper\n        return py_function_decorator\n    if inp is None:\n        raise TypeError(f'Missing argument `inp`:\\n  You must set the `inp` argument (the list of arguments to the\\n  function), unless you use `{script_op_name}` as a decorator(`func=None`).')\n    return None",
    "docstring": "Checks the arguments and returns a decorator if func is None.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\script_ops.py",
    "ast_data": "FunctionDef name:_check_args_and_maybe_make_decorator arg:script_op arg:script_op_name arg:func arg:inp arg:Tout arguments arg arg arg arg arg arg If Compare Raise Call If Compare If Compare Raise Call FunctionDef name:py_function_decorator arg:fun arguments arg FunctionDef name:py_function_wrapper arguments arg Return return:yes Call Call Return return:yes Return return:yes If Compare Raise Call Return return:no"
  },
  {
    "library": "pandas",
    "name": "ChainedAssignmentError",
    "source_code": "class ChainedAssignmentError(Warning):\n    pass",
    "docstring": "Warning raised when trying to set using chained assignment. When the `the user guide`. See Also -------- options.mode.copy_on_write : Global setting for enabling or disabling Copy-on-Write behavior. Examples -------- >>> pd.options.mode.copy_on_write = True >>> df = pd.DataFrame({\"A\": [1, 1, 1, 2, 2]}, columns=[\"A\"]) >>> df[\"A\"][0:3] = 10 # doctest: +SKIP ... # ChainedAssignmentError: ... >>> pd.options.mode.copy_on_write = False",
    "type": "class",
    "file_path": "pandas\\pandas\\errors\\__init__.py",
    "ast_data": "ClassDef name:ChainedAssignmentError"
  },
  {
    "library": "tensorflow",
    "name": "graph",
    "source_code": "@property\ndef graph(self):\n    return self.handle.graph",
    "docstring": "The of this variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:graph arg:self arguments arg Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "navigation_doc_metadata",
    "source_code": "def navigation_doc_metadata(self, navlist: list[NavPoint]) -> dict[str, Any]:\n    return {'lang': html.escape(self.config.epub_language), 'toc_locale': html.escape(self.guide_titles['toc']), 'navlist': navlist}",
    "docstring": "Create a dictionary with all metadata for the nav.xhtml file properly escaped.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\epub3.py",
    "ast_data": "FunctionDef name:navigation_doc_metadata arg:self arg:navlist arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "stop_recording",
    "source_code": "@tf_contextlib.contextmanager\ndef stop_recording(self):\n    if self._tape is None:\n        raise RuntimeError('Trying to stop recording a tape which is not recording.')\n    self._pop_tape()\n    try:\n        yield\n    finally:\n        self._push_tape()",
    "docstring": "Temporarily stops recording operations on this tape. Operations executed while this context manager is active will not be recorded on the tape. This is useful for reducing the memory used by tracing all computations. For example: >>> x = tf.constant(4.0) >>> with tf.GradientTape() as tape: ... with tape.stop_recording(): ... y = x ** 2 >>> dy_dx = tape.gradient(y, x) >>> print(dy_dx) None Yields: None Raises: RuntimeError: if the tape is not currently recording.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\backprop.py",
    "ast_data": "FunctionDef name:stop_recording arg:self arguments arg If Compare Raise Call Call Try Call"
  },
  {
    "library": "pytorch",
    "name": "broadcast_object",
    "source_code": "def broadcast_object(self, object: Optional[T]) -> T:\n    object_list = [object]\n    if self.use_dist:\n        dist.broadcast_object_list(object_list=object_list, group=self.group, src=self.coordinator_rank)\n    return cast(T, object_list[0])",
    "docstring": "Implement functionality similar to c10d::broadcast_object_list but without distributed enabled.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\utils.py",
    "ast_data": "FunctionDef name:broadcast_object arg:self arg:object arguments arg arg Assign If Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "outer_graph",
    "source_code": "@property\ndef outer_graph(self):\n    current = self._weak_outer_graph()\n    if current is None:\n        return self._fallback_outer_graph\n    return current",
    "docstring": "The Graph this FuncGraph is nested in. Functions may capture Tensors from graphs they are nested in (transitive). Returns: A Graph object. Initially set to the current default graph when the FuncGraph was created. If the previous was deleted because the function that owns it was deleted, is reset to the outermost default graph active when the FuncGraph was created. This FuncGraph won't have captured anything from the new (and likely not from the previous setting, since that would have created a strong reference), but it is returned so that FuncGraphs always have a parent.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\func_graph.py",
    "ast_data": "FunctionDef name:outer_graph arg:self arguments arg Assign Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "map_upper",
    "source_code": "def map_upper(self, func, **kwargs):\n    indices = zip(*np.triu_indices_from(self.axes, 1))\n    self._map_bivariate(func, indices, **kwargs)\n    return self",
    "docstring": "Plot with a bivariate function on the upper diagonal subplots. Parameters ---------- func : callable plotting function Must take x, y arrays as positional arguments and draw onto the \"currently active\" matplotlib Axes. Also needs to accept kwargs called ``.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\axisgrid.py",
    "ast_data": "FunctionDef name:map_upper arg:self arg:func arguments arg arg arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_observer_state_dict",
    "source_code": "def get_observer_state_dict(mod):\n    od = OrderedDict()\n    if isinstance(mod, torch.jit.RecursiveScriptModule):\n        for k, v in mod.state_dict().items():\n            if 'observer' in k:\n                od[k] = v\n    else:\n        for k, v in mod.state_dict().items():\n            if 'activation_post_process' in k:\n                od[k] = v\n    od._metadata = mod.state_dict()._metadata\n    return od",
    "docstring": "Returns the state dict corresponding to the observer stats. Traverse the model state_dict and extract out the stats.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\observer.py",
    "ast_data": "FunctionDef name:get_observer_state_dict arg:mod arguments arg Assign Call If Call For Call Call If Compare Assign For Call Call If Compare Assign Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "__matmul__",
    "source_code": "def __matmul__(self, other):\n    return self.dot(other)",
    "docstring": "Matrix multiplication using binary operator.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:__matmul__ arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_map_drop_idx_to_infrequent",
    "source_code": "def _map_drop_idx_to_infrequent(self, feature_idx, drop_idx):\n    if not self._infrequent_enabled:\n        return drop_idx\n    default_to_infrequent = self._default_to_infrequent_mappings[feature_idx]\n    if default_to_infrequent is None:\n        return drop_idx\n    infrequent_indices = self._infrequent_indices[feature_idx]\n    if infrequent_indices is not None and drop_idx in infrequent_indices:\n        categories = self.categories_[feature_idx]\n        raise ValueError(f'Unable to drop category {categories[drop_idx].item()!r} from feature {feature_idx} because it is infrequent')\n    return default_to_infrequent[drop_idx]",
    "docstring": "Convert into the index for infrequent categories. If there are no infrequent categories, then is returned. This method is called in when the parameter is an array-like.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_encoders.py",
    "ast_data": "FunctionDef name:_map_drop_idx_to_infrequent arg:self arg:feature_idx arg:drop_idx arguments arg arg arg If Return return:yes Assign If Compare Return return:yes Assign If BoolOp Compare Compare Assign Raise Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "align",
    "source_code": "class align(sympy.Function):\n    nargs = (1,)\n    is_integer = True\n\n    @classmethod\n    def eval(cls, value: sympy.Expr) -> Optional[sympy.Expr]:\n        if isinstance(value, (int, sympy.Integer)):\n            return _align(int(value))\n        if _is_aligned(value):\n            return value",
    "docstring": "Symbolically round up to the nearest multiple of ALIGN_BYTES",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\utils.py",
    "ast_data": "ClassDef name:align Assign Assign FunctionDef name:eval arg:cls arg:value arguments arg arg If Call Return return:yes Call Call If Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "enable_observer",
    "source_code": "def enable_observer(mod):\n    if isinstance(mod, FakeQuantizeBase) or _is_fake_quant_script_module(mod):\n        mod.enable_observer()",
    "docstring": "Enable observation for this module. Enable observation for this module, if applicable. Example usage:: # model is any PyTorch model model.apply(torch.ao.quantization.enable_observer)",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fake_quantize.py",
    "ast_data": "FunctionDef name:enable_observer arg:mod arguments arg If BoolOp Call Call Call"
  },
  {
    "library": "scipy",
    "name": "tiecorrect",
    "source_code": "def tiecorrect(rankvals):\n    arr = np.sort(rankvals)\n    idx = np.nonzero(np.r_[True, arr[1:] != arr[:-1], True])[0]\n    cnt = np.diff(idx).astype(np.float64)\n    size = np.float64(arr.size)\n    return 1.0 if size < 2 else 1.0 - (cnt ** 3 - cnt).sum() / (size ** 3 - size)",
    "docstring": "Tie correction factor for Mann-Whitney U and Kruskal-Wallis H tests. Parameters ---------- rankvals : array_like A 1-D sequence of ranks. Typically this will be the array returned by . Returns ------- factor : float Correction factor for U or H. See Also -------- rankdata : Assign ranks to the data mannwhitneyu : Mann-Whitney rank test kruskal : Kruskal-Wallis H test References ---------- .. [1] Siegel, S. (1956) Nonparametric Statistics for the Behavioral Sciences. New York: McGraw-Hill. Examples -------- >>> from scipy.stats import tiecorrect, rankdata >>> tiecorrect([1, 2.5, 2.5, 4]) 0.9 >>> ranks = rankdata([1, 3, 2, 4, 5, 7, 2, 8, 4]) >>> ranks array([ 1. , 4. , 2.5, 5.5, 7. , 8. , 2.5, 9. , 5.5]) >>> tiecorrect(ranks) 0.9833333333333333",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_stats_py.py",
    "ast_data": "FunctionDef name:tiecorrect arg:rankvals arguments arg Assign Call Assign Call Compare Assign Call Call Assign Call Return return:yes Compare Call"
  },
  {
    "library": "scipy",
    "name": "_norm_factor",
    "source_code": "def _norm_factor(p, k):\n    p = asarray(p, dtype=complex)\n\n    def G(w):\n        return abs(k / prod(1j * w - p))\n\n    def cutoff(w):\n        return G(w) - 1 / np.sqrt(2)\n    return optimize.newton(cutoff, 1.5)",
    "docstring": "Numerically find frequency shift to apply to delay-normalized filter such that -3 dB point is at 1 rad/sec. is an array_like of polynomial poles is a float gain First 10 values are listed in \"Bessel Scale Factors\" table, \"Bessel Filters Polynomials, Poles and Circuit Elements 2003, C. Bond.\"",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_filter_design.py",
    "ast_data": "FunctionDef name:_norm_factor arg:p arg:k arguments arg arg Assign Call FunctionDef name:G arg:w arguments arg Return return:yes Call Call FunctionDef name:cutoff arg:w arguments arg Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "list_fonts",
    "source_code": "def list_fonts(directory, extensions):\n    extensions = ['.' + ext for ext in extensions]\n    return [os.path.join(dirpath, filename) for dirpath, _, filenames in os.walk(directory) for filename in filenames if Path(filename).suffix.lower() in extensions]",
    "docstring": "Return a list of all fonts matching any of the extensions, found recursively under the directory.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\font_manager.py",
    "ast_data": "FunctionDef name:list_fonts arg:directory arg:extensions arguments arg arg Assign Return return:yes Call Call Compare Call Call"
  },
  {
    "library": "tensorflow",
    "name": "num_bytes",
    "source_code": "@property\ndef num_bytes(self) -> int:\n    return self._num_bytes",
    "docstring": "Size of this tensor in bytes (long integer).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py",
    "ast_data": "FunctionDef name:num_bytes arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_logical_not",
    "source_code": "def _logical_not(x):\n    x_ = _static_value(x)\n    if x_ is None:\n        return math_ops.logical_not(x)\n    return constant_op.constant(np.logical_not(x_))",
    "docstring": "Convenience function which attempts to statically apply .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\transformed_distribution.py",
    "ast_data": "FunctionDef name:_logical_not arg:x arguments arg Assign Call If Compare Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "window_frame_rows_start_end",
    "source_code": "def window_frame_rows_start_end(self, start=None, end=None):\n    if isinstance(start, int) and isinstance(end, int) and (start > end):\n        raise ValueError('start cannot be greater than end.')\n    if start is not None and (not isinstance(start, int)):\n        raise ValueError(f\"start argument must be an integer, zero, or None, but got '{start}'.\")\n    if end is not None and (not isinstance(end, int)):\n        raise ValueError(f\"end argument must be an integer, zero, or None, but got '{end}'.\")\n    start_ = self.window_frame_value(start) or self.UNBOUNDED_PRECEDING\n    end_ = self.window_frame_value(end) or self.UNBOUNDED_FOLLOWING\n    return (start_, end_)",
    "docstring": "Return SQL for start and end points in an OVER clause window frame.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:window_frame_rows_start_end arg:self arg:start arg:end arguments arg arg arg If BoolOp Call Call Compare Raise Call If BoolOp Compare Call Raise Call If BoolOp Compare Call Raise Call Assign BoolOp Call Assign BoolOp Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "getslots",
    "source_code": "def getslots(obj: Any) -> dict[str, Any] | dict[str, None] | None:\n    if not isclass(obj):\n        raise TypeError\n    __slots__ = safe_getattr(obj, '__slots__', None)\n    if __slots__ is None:\n        return None\n    elif isinstance(__slots__, dict):\n        return __slots__\n    elif isinstance(__slots__, str):\n        return {__slots__: None}\n    elif isinstance(__slots__, list | tuple):\n        return dict.fromkeys(__slots__)\n    else:\n        raise ValueError",
    "docstring": "Safely get :term: as a dictionary if any. - This returns `TypeErrorValueError` is invalid.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\inspect.py",
    "ast_data": "FunctionDef name:getslots arg:obj arguments arg If Call Raise Assign Call If Compare Return return:no If Call Return return:yes If Call Return return:yes If Call Return return:yes Call Raise"
  },
  {
    "library": "seaborn",
    "name": "_PlottingContext",
    "source_code": "class _PlottingContext(_RCAesthetics):\n    _keys = _context_keys\n    _set = staticmethod(set_context)",
    "docstring": "Light wrapper on a dict to set context temporarily.",
    "type": "class",
    "file_path": "seaborn\\seaborn\\rcmod.py",
    "ast_data": "ClassDef name:_PlottingContext Assign Assign Call"
  },
  {
    "library": "virtualenv",
    "name": "quote",
    "source_code": "@staticmethod\ndef quote(string):\n    string = string.replace(\"'\", \"''\")\n    return f\"'{string}'\"",
    "docstring": "This should satisfy PowerShell quoting rules [1], unless the quoted string is passed directly to Windows native commands [2]. [1]: [2]:",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\activation\\powershell\\__init__.py",
    "ast_data": "FunctionDef name:quote arg:string arguments arg Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_cholesky",
    "source_code": "def _cholesky(a, lower=False, overwrite_a=False, clean=True, check_finite=True):\n    a1 = asarray_chkfinite(a) if check_finite else asarray(a)\n    a1 = atleast_2d(a1)\n    if a1.ndim != 2:\n        raise ValueError(f'Input array needs to be 2D but received a {a1.ndim}d-array.')\n    if a1.shape[0] != a1.shape[1]:\n        raise ValueError(f'Input array is expected to be square but has the shape: {a1.shape}.')\n    if a1.size == 0:\n        dt = cholesky(np.eye(1, dtype=a1.dtype)).dtype\n        return (empty_like(a1, dtype=dt), lower)\n    overwrite_a = overwrite_a or _datacopied(a1, a)\n    potrf, = get_lapack_funcs(('potrf',), (a1,))\n    c, info = potrf(a1, lower=lower, overwrite_a=overwrite_a, clean=clean)\n    if info > 0:\n        raise LinAlgError(f'{info}-th leading minor of the array is not positive definite')\n    if info < 0:\n        raise ValueError(f'LAPACK reported an illegal value in {-info}-th argument on entry to \"POTRF\".')\n    return (c, lower)",
    "docstring": "Common code for cholesky() and cho_factor().",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_decomp_cholesky.py",
    "ast_data": "FunctionDef name:_cholesky arg:a arg:lower arg:overwrite_a arg:clean arg:check_finite arguments arg arg arg arg arg Assign Call Call Assign Call If Compare Raise Call If Compare Raise Call If Compare Assign Call Call Return return:yes Call Assign BoolOp Call Assign Call Assign Call If Compare Raise Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "ragged_hash",
    "source_code": "def ragged_hash(self):\n    g = getattr(self.row_splits, 'graph', None)\n    if tensor.Tensor._USE_EQUALITY and ops.executing_eagerly_outside_functions() and (g is None or g.building_function):\n        raise TypeError('RaggedTensor is unhashable.')\n    else:\n        return id(self)",
    "docstring": "The operation invoked by the operator.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_operators.py",
    "ast_data": "FunctionDef name:ragged_hash arg:self arguments arg Assign Call If BoolOp Call BoolOp Compare Raise Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "divisibleby",
    "source_code": "@register.filter(is_safe=False)\ndef divisibleby(value, arg):\n    return int(value) % int(arg) == 0",
    "docstring": "Return True if the value is divisible by the argument.",
    "type": "function",
    "file_path": "django\\django\\template\\defaultfilters.py",
    "ast_data": "FunctionDef name:divisibleby arg:value arg:arg arguments arg arg Return return:yes Compare Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "forward",
    "source_code": "def forward(self, x: torch.Tensor, output_size: Optional[list[int]]=None) -> torch.Tensor:\n    assert isinstance(self.padding, tuple)\n    output_padding = self._output_padding(input, output_size, self.stride, self.padding, self.kernel_size, self.dilation)\n    weight_quant_dequant = self.get_weight()\n    result = F.conv_transpose2d(x, weight_quant_dequant, self.bias, self.stride, self.padding, output_padding, self.groups, self.dilation)\n    return result",
    "docstring": "we have: w(float) -- quant - dequant x(float) ------------- F.convTranspose2d --- In the full model, we will see w(float) -- quant - *dequant x -- quant --- *dequant -- *F.convTranspose2d --- *quant - dequant and the backend should be able to fuse the ops with into a quantized conv2d",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\reference\\modules\\conv.py",
    "ast_data": "FunctionDef name:forward arg:self arg:x arg:output_size arguments arg arg arg Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "scatter_update",
    "source_code": "def scatter_update(self, sparse_delta, use_locking=False, name=None):\n    if not isinstance(sparse_delta, indexed_slices.IndexedSlices):\n        raise TypeError('sparse_delta is not IndexedSlices: %s' % sparse_delta)\n    return gen_state_ops.scatter_update(self._variable, sparse_delta.indices, sparse_delta.values, use_locking=use_locking, name=name)",
    "docstring": "Assigns to this variable. Args: sparse_delta: to be assigned to this variable. use_locking: If , use locking during the operation. name: the name of the operation. Returns: A that will hold the new value of this variable after the scattered assignment has completed. Raises: TypeError: if is not an .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py",
    "ast_data": "FunctionDef name:scatter_update arg:self arg:sparse_delta arg:use_locking arg:name arguments arg arg arg arg If Call Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_unsorted_segment_N",
    "source_code": "def _unsorted_segment_N(data, segment_ids, num_segments):\n    num_segments = ops.convert_to_tensor(num_segments)\n    segment_ids_shape = array_ops.shape_internal(segment_ids)\n    ones_tensor = array_ops.ones(segment_ids_shape, dtype=data.dtype)\n    n = gen_math_ops.unsorted_segment_sum(ones_tensor, segment_ids, num_segments)\n    broadcastable_shape = array_ops.concat([num_segments[array_ops.newaxis], array_ops.ones([array_ops.rank(data) - array_ops.rank(segment_ids)], dtype=num_segments.dtype)], axis=0)\n    n = array_ops.reshape(n, broadcastable_shape)\n    return gen_math_ops.maximum(n, 1)",
    "docstring": "Helper function for unsorted_segment_mean/_sqrtN. Computes the number of segment entries with 0-entries set to 1 to allow division by N. Args: data: A with data that will be assembled in the output. segment_ids: An integer tensor whose shape is a prefix of . The values must be in the range . The values are always validated to be in range on CPU, never validated on TPU/GPU. num_segments: An integer scalar . The number of distinct segment IDs. Returns: A with the number of segment entries with 0-entries set to 1.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:_unsorted_segment_N arg:data arg:segment_ids arg:num_segments arguments arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Call Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_str_accumulate",
    "source_code": "def _str_accumulate(self, name: str, *, skipna: bool=True, **kwargs) -> ArrowExtensionArray | ExtensionArray:\n    if name == 'cumprod':\n        msg = f\"operation '{name}' not supported for dtype '{self.dtype}'\"\n        raise TypeError(msg)\n    tail: pa.array | None = None\n    na_mask: pa.array | None = None\n    pa_array = self._pa_array\n    np_func = {'cumsum': np.cumsum, 'cummin': np.minimum.accumulate, 'cummax': np.maximum.accumulate}[name]\n    if self._hasna:\n        na_mask = pc.is_null(pa_array)\n        if pc.all(na_mask) == pa.scalar(True):\n            return type(self)(pa_array)\n        if skipna:\n            if name == 'cumsum':\n                pa_array = pc.fill_null(pa_array, '')\n            else:\n                pa_array = pc.fill_null_forward(pa_array)\n                pa_array = pc.fill_null_backward(pa_array)\n        else:\n            idx = pc.index(na_mask, True).as_py()\n            tail = pa.nulls(len(pa_array) - idx, type=pa_array.type)\n            pa_array = pa_array[:idx]\n    pa_result = pa.array(np_func(pa_array), type=pa_array.type)\n    if tail is not None:\n        pa_result = pa.concat_arrays([pa_result, tail])\n    elif na_mask is not None:\n        pa_result = pc.if_else(na_mask, None, pa_result)\n    result = type(self)(pa_result)\n    return result",
    "docstring": "Accumulate implementation for strings, see docstring for details. pyarrow.compute does not implement these methods for strings.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\arrow\\array.py",
    "ast_data": "FunctionDef name:_str_accumulate arg:self arg:name arguments arg arg arg arg If Compare Assign Raise Call Assign Assign If Assign Call If Compare Call Call Return return:yes Call Call If If Compare Assign Call Assign Call Assign Call Assign Call Call Assign Call Call Assign Assign Call Call If Compare Assign Call If Compare Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_compute_gradient_list",
    "source_code": "def _compute_gradient_list(x, x_shape, y, y_shape, x_init_value=None, delta=0.001, init_targets=None, extra_feed_dict=None):\n    assert isinstance(x, list)\n    dx, dy = zip(*[_compute_dx_and_dy(xi, y, y_shape) for xi in x])\n    if init_targets is not None:\n        assert isinstance(init_targets, (list, tuple))\n        for init in init_targets:\n            init.run()\n    if x_init_value is None:\n        x_init_value = [None] * len(x)\n    ret = [_compute_gradient(xi, x_shapei, dxi, y, y_shape, dyi, x_init_valuei, delta, extra_feed_dict=extra_feed_dict) for xi, x_shapei, dxi, dyi, x_init_valuei in zip(x, x_shape, dx, dy, x_init_value)]\n    return ret",
    "docstring": "Compute gradients for a list of x values.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\gradient_checker.py",
    "ast_data": "FunctionDef name:_compute_gradient_list arg:x arg:x_shape arg:y arg:y_shape arg:x_init_value arg:delta arg:init_targets arg:extra_feed_dict arguments arg arg arg arg arg arg arg arg Call Assign Call Call If Compare Call For Call If Compare Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_auto_set_column_width",
    "source_code": "def _auto_set_column_width(self, col, renderer):\n    cells = [cell for key, cell in self._cells.items() if key[1] == col]\n    max_width = max((cell.get_required_width(renderer) for cell in cells), default=0)\n    for cell in cells:\n        cell.set_width(max_width)",
    "docstring": "Automatically set width for column.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\table.py",
    "ast_data": "FunctionDef name:_auto_set_column_width arg:self arg:col arg:renderer arguments arg arg arg Assign Call Compare Assign Call Call For Call"
  },
  {
    "library": "pandas",
    "name": "_setitem_single_block",
    "source_code": "def _setitem_single_block(self, indexer, value, name: str) -> None:\n    from pandas import Series\n    if isinstance(value, ABCSeries) and name != 'iloc' or isinstance(value, dict):\n        value = self._align_series(indexer, Series(value))\n    info_axis = self.obj._info_axis_number\n    item_labels = self.obj._get_axis(info_axis)\n    if isinstance(indexer, tuple):\n        if self.ndim == len(indexer) == 2 and is_integer(indexer[1]) and com.is_null_slice(indexer[0]):\n            col = item_labels[indexer[info_axis]]\n            if len(item_labels.get_indexer_for([col])) == 1:\n                loc = item_labels.get_loc(col)\n                self._setitem_single_column(loc, value, indexer[0])\n                return\n        indexer = maybe_convert_ix(*indexer)\n    if isinstance(value, ABCDataFrame) and name != 'iloc':\n        value = self._align_frame(indexer, value)._values\n    self.obj._mgr = self.obj._mgr.setitem(indexer=indexer, value=value)",
    "docstring": "_setitem_with_indexer for the case when we have a single Block.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexing.py",
    "ast_data": "FunctionDef name:_setitem_single_block arg:self arg:indexer arg:value arg:name arguments arg arg arg arg If BoolOp BoolOp Call Compare Call Assign Call Call Assign Assign Call If Call If BoolOp Compare Call Call Call Assign If Compare Call Call Assign Call Call Return return:no Assign Call If BoolOp Call Compare Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_executor_init",
    "source_code": "@abstractmethod\ndef _get_executor_init(self, workers):\n    raise NotImplementedError",
    "docstring": "Gets the Pool initializer for multiprocessing. Args: workers: Number of workers. Returns: Function, a Function to initialize the pool",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\data_utils.py",
    "ast_data": "FunctionDef name:_get_executor_init arg:self arg:workers arguments arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "get_random_numeric_tensor",
    "source_code": "def get_random_numeric_tensor(self, dtype=None, min_size=_MIN_SIZE, max_size=_MAX_SIZE, min_val=_MIN_INT, max_val=_MAX_INT):\n    if max_size > 8:\n        raise tf.errors.InvalidArgumentError(None, None, 'Given size of {} will result in an OOM error'.format(max_size))\n    seed = self.get_int()\n    shape = self.get_int_list(min_length=min_size, max_length=max_size, min_int=min_size, max_int=max_size)\n    if dtype is None:\n        dtype = self.get_tf_dtype(allowed_set=_TF_RANDOM_DTYPES)\n    elif dtype not in _TF_RANDOM_DTYPES:\n        raise tf.errors.InvalidArgumentError(None, None, 'Given dtype {} is not accepted in get_random_numeric_tensor'.format(dtype))\n    return tf.random.uniform(shape=shape, minval=min_val, maxval=max_val, dtype=dtype, seed=seed)",
    "docstring": "Return a tensor of random shape and values. Generated tensors are capped at dimension sizes of 8, as 2^32 bytes of requested memory crashes the fuzzer (see b/34190148). Returns only type that tf.random.uniform can generate. If you need a different type, consider using tf.cast. Args: dtype: Type of tensor, must of one of the following types: float16, float32, float64, int32, or int64 min_size: Minimum size of returned tensor max_size: Maximum size of returned tensor min_val: Minimum value in returned tensor max_val: Maximum value in returned tensor Returns: Tensor of random shape filled with uniformly random numeric values.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\security\\fuzzing\\python_fuzzing.py",
    "ast_data": "FunctionDef name:get_random_numeric_tensor arg:self arg:dtype arg:min_size arg:max_size arg:min_val arg:max_val arguments arg arg arg arg arg arg If Compare Raise Call Call Assign Call Assign Call If Compare Assign Call If Compare Raise Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "std",
    "source_code": "def std(self, *args, **kwds):\n    kwds['moments'] = 'v'\n    res = sqrt(self.stats(*args, **kwds))\n    return res",
    "docstring": "Standard deviation of the distribution. Parameters ---------- arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- std : float standard deviation of the distribution",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:std arg:self arguments arg arg arg Assign Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "var",
    "source_code": "def var(self, alpha):\n    alpha = _dirichlet_check_parameters(alpha)\n    alpha0 = np.sum(alpha)\n    out = alpha * (alpha0 - alpha) / (alpha0 * alpha0 * (alpha0 + 1))\n    return _squeeze_output(out)",
    "docstring": "Variance of the Dirichlet distribution. Parameters ---------- %(_dirichlet_doc_default_callparams)s Returns ------- v : ndarray or scalar Variance of the Dirichlet distribution.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:var arg:self arg:alpha arguments arg arg Assign Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_remote_cache",
    "source_code": "@staticmethod\n@functools.lru_cache(None)\ndef get_remote_cache() -> Optional[RemoteCache[JsonDataTy]]:\n    cache_id = 'autograd-experimental'\n    return create_cache(cache_id, config.is_fbcode(), 'FbRemoteAOTAutogradCache', 'RemoteAOTAutogradCache')",
    "docstring": "Attempts to load the remote cache, returns None on error.",
    "type": "method",
    "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\autograd_cache.py",
    "ast_data": "FunctionDef name:get_remote_cache arguments Assign Return return:yes Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_validate_skipfooter",
    "source_code": "def _validate_skipfooter(kwds: dict[str, Any]) -> None:\n    if kwds.get('skipfooter'):\n        if kwds.get('iterator') or kwds.get('chunksize'):\n            raise ValueError(\"'skipfooter' not supported for iteration\")\n        if kwds.get('nrows'):\n            raise ValueError(\"'skipfooter' not supported with 'nrows'\")",
    "docstring": "Check whether skipfooter is compatible with other kwargs in TextFileReader. Parameters ---------- kwds : dict Keyword arguments passed to TextFileReader. Raises ------ ValueError If skipfooter is not compatible with other parameters.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\parsers\\readers.py",
    "ast_data": "FunctionDef name:_validate_skipfooter arg:kwds arguments arg If Call If BoolOp Call Call Raise Call If Call Raise Call"
  },
  {
    "library": "scipy",
    "name": "change_D",
    "source_code": "def change_D(D, order, factor):\n    R = compute_R(order, factor)\n    U = compute_R(order, 1)\n    RU = R.dot(U)\n    D[:order + 1] = np.dot(RU.T, D[:order + 1])",
    "docstring": "Change differences array in-place when step size is changed.",
    "type": "function",
    "file_path": "scipy\\scipy\\integrate\\_ivp\\bdf.py",
    "ast_data": "FunctionDef name:change_D arg:D arg:order arg:factor arguments arg arg arg Assign Call Assign Call Assign Call Assign Call"
  },
  {
    "library": "authlib",
    "name": "parse_authorization_code_response",
    "source_code": "def parse_authorization_code_response(uri, state=None):\n    query = urlparse.urlparse(uri).query\n    params = dict(urlparse.parse_qsl(query))\n    if 'code' not in params:\n        raise MissingCodeException()\n    params_state = params.get('state')\n    if state and params_state != state:\n        raise MismatchingStateException()\n    return params",
    "docstring": "Parse authorization grant response URI into a dict. If the resource owner grants the access request, the authorization server issues an authorization code and delivers it to the client by adding the following parameters to the query component of the redirection URI using the `` format: **code** REQUIRED. The authorization code generated by the authorization server. The authorization code MUST expire shortly after it is issued to mitigate the risk of leaks. A maximum authorization code lifetime of 10 minutes is RECOMMENDED. The client MUST NOT use the authorization code more than once. If an authorization code is used more than once, the authorization server MUST deny the request and SHOULD revoke (when possible) all tokens previously issued based on that authorization code. The authorization code is bound to the client identifier and redirection URI. **state** REQUIRED if the \"state\" parameter was present in the client authorization request. The exact value received from the client. :param uri: The full redirect URL back to the client. :param state: The state parameter from the authorization request. For example, the authorization server redirects the user-agent by sending the following HTTP response: .. code-block:: http HTTP/1.1 302 Found Location: &state=xyz",
    "type": "function",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\parameters.py",
    "ast_data": "FunctionDef name:parse_authorization_code_response arg:uri arg:state arguments arg arg Assign Call Assign Call Call If Compare Raise Call Assign Call If BoolOp Compare Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_dense_tensor",
    "source_code": "@abc.abstractmethod\ndef _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):\n    pass",
    "docstring": "Returns a . The output of this function will be used by model-builder-functions. For example the pseudo code of will be like: Args: inputs: A object to access inputs. weight_collections: List of graph collections to which Variables (if any will be created) are added. trainable: If also add variables to the graph collection (see ). Returns: of shape [batch_size] + .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py",
    "ast_data": "FunctionDef name:_get_dense_tensor arg:self arg:inputs arg:weight_collections arg:trainable arguments arg arg arg arg"
  },
  {
    "library": "pytorch",
    "name": "_get_grad_norm",
    "source_code": "def _get_grad_norm(params: Iterable[nn.Parameter], norm_type: float, zero: torch.Tensor, device: torch.device) -> torch.Tensor:\n    params_with_grad = [param for param in params if param.grad is not None]\n    if len(params_with_grad) == 0:\n        return zero\n    grads = [param.grad for param in params_with_grad]\n    grad_dtypes = {grad.dtype for grad in grads}\n    if len(grad_dtypes) != 1:\n        raise ValueError(f'Requires uniform dtype across all gradients but got {grad_dtypes}')\n    grad_norm = torch.linalg.vector_norm(torch.stack([torch.linalg.vector_norm(grad.detach(), norm_type, dtype=torch.float32) for grad in grads]), norm_type, dtype=torch.float32)\n    return grad_norm.to(device=device)",
    "docstring": "Return the gradient norm of parameters `` s, where the gradients are viewed as a single vector. The returned norm is in FP32 even if parameters/gradients are in a low precision. This is because the downstream use of this return value is a reduction across ranks.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\fully_sharded_data_parallel.py",
    "ast_data": "FunctionDef name:_get_grad_norm arg:params arg:norm_type arg:zero arg:device arguments arg arg arg arg Assign Compare If Compare Call Return return:yes Assign Assign If Compare Call Raise Call Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "max_pooling2d",
    "source_code": "def max_pooling2d(inputs, pool_size, strides, padding='valid', data_format='channels_last', name=None):\n    warnings.warn('`tf.layers.max_pooling2d` is deprecated and will be removed in a future version. Please use `tf.keras.layers.MaxPooling2D` instead.')\n    layer = MaxPooling2D(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name)\n    return layer.apply(inputs)",
    "docstring": "Max pooling layer for 2D inputs (e.g. images). Args: inputs: The tensor over which to pool. Must have rank 4. pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. (default) and are supported. corresponds to inputs with shape while corresponds to inputs with shape . name: A string, the name of the layer. Returns: Output tensor. Raises: ValueError: if eager execution is enabled.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\legacy_tf_layers\\pooling.py",
    "ast_data": "FunctionDef name:max_pooling2d arg:inputs arg:pool_size arg:strides arg:padding arg:data_format arg:name arguments arg arg arg arg arg arg Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_assert_unsharded",
    "source_code": "def _assert_unsharded(stage_idx: int):\n    if stage_idx in unshard_ops:\n        unshard_ops[stage_idx].wait()\n        del unshard_ops[stage_idx]\n        unsharded_stages.add(stage_idx)\n    assert stage_idx in unsharded_stages, f'Attempted to compute on sharded stage_idx={stage_idx!r}'",
    "docstring": "If an unshard is active for , wait() it and mark unshared.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\schedules.py",
    "ast_data": "FunctionDef name:_assert_unsharded arg:stage_idx arguments arg If Compare Call Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "unwrap_outputs",
    "source_code": "def unwrap_outputs(distribution_strategy, grouped_outputs, with_loss_tensor=False):\n    if not with_loss_tensor:\n        return flatten_per_replica_values(distribution_strategy, grouped_outputs)\n    if not isinstance(grouped_outputs, list):\n        grouped_outputs = [grouped_outputs]\n    loss = distribution_strategy.reduce(reduce_util.ReduceOp.SUM, grouped_outputs[0], axis=None)\n    all_outputs = flatten_per_replica_values(distribution_strategy, grouped_outputs[1:])\n    if backend.is_tpu_strategy(distribution_strategy) and ops.executing_eagerly_outside_functions():\n        all_outputs = all_outputs[::distribution_strategy.num_replicas_in_sync]\n    return [loss] + all_outputs",
    "docstring": "Unwrap the list of outputs contained in the PerReplica parameters. This function calls to parse each of the input parameters into a list of outputs on the different devices. If we set to be True, we also call on the list of losses on the different devices to give us one loss tensor. Args: distribution_strategy: DistributionStrategy used to distribute training and validation. grouped_outputs: PerReplica outputs returned from the train or test function that we ran on each device. with_loss_tensor: Boolean that indicates if we need to add the reduced loss tensor as one of the outputs. Returns: Values of each of the PerReplica outputs.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_training_utils_v1.py",
    "ast_data": "FunctionDef name:unwrap_outputs arg:distribution_strategy arg:grouped_outputs arg:with_loss_tensor arguments arg arg arg If Return return:yes Call If Call Assign Assign Call Assign Call If BoolOp Call Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "IntCastingNaNError",
    "source_code": "class IntCastingNaNError(ValueError):\n    pass",
    "docstring": "Exception raised when converting (``) an array with NaN to an integer type. This error occurs when attempting to cast a data structure containing non-finite values (such as NaN or infinity) to an integer data type. Integer types do not support non-finite values, so such conversions are explicitly disallowed to prevent silent data corruption or unexpected behavior. See Also -------- DataFrame.astype : Method to cast a pandas DataFrame object to a specified dtype. Series.astype : Method to cast a pandas Series object to a specified dtype. Examples -------- >>> pd.DataFrame(np.array([[1, np.nan], [2, 3]]), dtype=\"i8\") Traceback (most recent call last): IntCastingNaNError: Cannot convert non-finite values (NA or inf) to integer",
    "type": "class",
    "file_path": "pandas\\pandas\\errors\\__init__.py",
    "ast_data": "ClassDef name:IntCastingNaNError"
  },
  {
    "library": "tensorflow",
    "name": "scatter_sub",
    "source_code": "def scatter_sub(self, sparse_delta, use_locking=False, name=None):\n    if not isinstance(sparse_delta, indexed_slices.IndexedSlices):\n        raise TypeError('sparse_delta is not IndexedSlices: %s' % sparse_delta)\n    return gen_state_ops.scatter_sub(self._variable, sparse_delta.indices, sparse_delta.values, use_locking=use_locking, name=name)",
    "docstring": "Subtracts from this variable. Args: sparse_delta: to be subtracted from this variable. use_locking: If , use locking during the operation. name: the name of the operation. Returns: A that will hold the new value of this variable after the scattered subtraction has completed. Raises: TypeError: if is not an .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py",
    "ast_data": "FunctionDef name:scatter_sub arg:self arg:sparse_delta arg:use_locking arg:name arguments arg arg arg arg If Call Raise Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_save_for_lite_interpreter",
    "source_code": "def _save_for_lite_interpreter(self, *args, **kwargs):\n    return self._c._save_for_mobile(*args, **kwargs)",
    "docstring": "Add (or update) the bytecode session to the script model. _save_for_lite_interpreter(f) The updated model is used in lite interpreter for mobile applications. Args: f: a string containing a file name. _extra_files: Map from filename to contents which will be stored as part of 'f'.",
    "type": "method",
    "file_path": "pytorch\\torch\\jit\\_script.py",
    "ast_data": "FunctionDef name:_save_for_lite_interpreter arg:self arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "check_master_doc",
    "source_code": "def check_master_doc(app: Sphinx, env: BuildEnvironment, added: Set[str], changed: Set[str], removed: Set[str]) -> Iterable[str]:\n    docnames = app.project.docnames\n    if app.config.master_doc == 'index' and 'index' not in docnames and ('contents' in docnames):\n        logger.warning(__('Sphinx now uses \"index\" as the master document by default. To keep pre-2.0 behaviour, set \"master_doc = \\'contents\\'\".'))\n        app.config.master_doc = 'contents'\n    return changed",
    "docstring": "Sphinx 2.0 changed the default from 'contents' to 'index'.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\config.py",
    "ast_data": "FunctionDef name:check_master_doc arg:app arg:env arg:added arg:changed arg:removed arguments arg arg arg arg arg Assign If BoolOp Compare Compare Compare Call Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "contains",
    "source_code": "def contains(self, mouseevent):\n    if self._different_canvas(mouseevent):\n        return (False, {})\n    for c in self.get_children():\n        a, b = c.contains(mouseevent)\n        if a:\n            return (a, b)\n    return (False, {})",
    "docstring": "Delegate the mouse event contains-check to the children. As a container, the does not respond itself to mouseevents. Parameters ---------- mouseevent : Returns ------- contains : bool Whether any values are within the radius. details : dict An artist-specific dictionary of details of the event context, such as which points are contained in the pick radius. See the individual Artist subclasses for details. See Also -------- .Artist.contains",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py",
    "ast_data": "FunctionDef name:contains arg:self arg:mouseevent arguments arg arg If Call Return return:yes For Call Assign Call If Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "BVar",
    "source_code": "class BVar:\n\n    def __init__(self, c):\n        self.c = c\n\n    def __repr__(self):\n        return f'BV({self.c})'\n\n    def __eq__(self, other):\n        if isinstance(other, BVar):\n            return self.c == other.c\n        else:\n            return False",
    "docstring": "Boolean variable",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint.py",
    "ast_data": "ClassDef name:BVar FunctionDef name:__init__ arg:self arg:c arguments arg arg Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:__eq__ arg:self arg:other arguments arg arg If Call Return return:yes Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_MeanGrad",
    "source_code": "@ops.RegisterGradient('Mean')\ndef _MeanGrad(op: ops.Operation, grad):\n    sum_grad = _SumGrad(op, grad)[0]\n    input_shape = op.inputs[0]._shape_tuple()\n    output_shape = op.outputs[0]._shape_tuple()\n    if input_shape is not None and output_shape is not None and (None not in input_shape) and (None not in output_shape):\n        input_size = np.prod(input_shape)\n        output_size = np.prod(output_shape)\n        factor = input_size // max(output_size, 1)\n        factor = constant_op.constant(factor, dtype=sum_grad.dtype)\n    else:\n        input_shape = array_ops.shape(op.inputs[0])\n        input_rank = array_ops.size(input_shape)\n        axes = math_ops.cast(op.inputs[1], input_rank.dtype)\n        axes = (axes + input_rank) % input_rank\n        factor = math_ops.reduce_prod(array_ops.gather(input_shape, axes))\n    return (math_ops.truediv(sum_grad, math_ops.cast(factor, sum_grad.dtype)), None)",
    "docstring": "Gradient for Mean.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_MeanGrad arg:op arg:grad arguments arg arg Assign Call Assign Call Assign Call If BoolOp Compare Compare Compare Compare Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Assign Call Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, options=None):\n    if options is not None:\n        self._options = copy.deepcopy(options)\n    else:\n        self._options = {'max_depth': 100, 'min_bytes': 0, 'min_micros': 0, 'min_params': 0, 'min_float_ops': 0, 'min_occurrence': 0, 'order_by': 'name', 'account_type_regexes': ['.*'], 'start_name_regexes': ['.*'], 'trim_name_regexes': [], 'show_name_regexes': ['.*'], 'hide_name_regexes': [], 'account_displayed_op_only': False, 'select': ['micros'], 'step': -1, 'output': 'stdout'}",
    "docstring": "Constructor. Args: options: Optional initial option dict to start with.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\option_builder.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:options arguments arg arg If Compare Assign Call Assign"
  },
  {
    "library": "pytorch",
    "name": "current_device",
    "source_code": "def current_device() -> int:\n    _lazy_init()\n    return torch._C._cuda_getDevice()",
    "docstring": "Return the index of a currently selected device.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:current_device arguments Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "assert_all_finite",
    "source_code": "def assert_all_finite(X, *, allow_nan=False, estimator_name=None, input_name=''):\n    _assert_all_finite(X.data if sp.issparse(X) else X, allow_nan=allow_nan, estimator_name=estimator_name, input_name=input_name)",
    "docstring": "Throw a ValueError if X contains NaN or infinity. Parameters ---------- X : {ndarray, sparse matrix} The input data. allow_nan : bool, default=False If True, do not throw error when contains NaN. estimator_name : str, default=None The estimator name, used to construct the error message. input_name : str, default=\"\" The data name used to construct the error message. In particular if is \"X\" and the data has NaN values and allow_nan is False, the error message will link to the imputer documentation. Examples -------- >>> from sklearn.utils import assert_all_finite >>> import numpy as np >>> array = np.array([1, np.inf, np.nan, 4]) >>> try: ... assert_all_finite(array) ... print(\"Test passed: Array contains only finite values.\") ... except ValueError: ... print(\"Test failed: Array contains non-finite values.\") Test failed: Array contains non-finite values.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\validation.py",
    "ast_data": "FunctionDef name:assert_all_finite arg:X arguments arg arg arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "current_loss_scale",
    "source_code": "@property\ndef current_loss_scale(self):\n    return self._current_loss_scale",
    "docstring": "Returns the current loss scale as a float32 .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\loss_scale_optimizer.py",
    "ast_data": "FunctionDef name:current_loss_scale arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "save_existing",
    "source_code": "def save_existing(self, form, obj, commit=True):\n    return form.save(commit=commit)",
    "docstring": "Save and return an existing model instance for the given form.",
    "type": "method",
    "file_path": "django\\django\\forms\\models.py",
    "ast_data": "FunctionDef name:save_existing arg:self arg:form arg:obj arg:commit arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_root_scalar_bisect_doc",
    "source_code": "def _root_scalar_bisect_doc():\n    pass",
    "docstring": "Options ------- args : tuple, optional Extra arguments passed to the objective function. bracket: A sequence of 2 floats, optional An interval bracketing a root. `` must have different signs at the two endpoints. xtol : float, optional Tolerance (absolute) for termination. rtol : float, optional Tolerance (relative) for termination. maxiter : int, optional Maximum number of iterations. options: dict, optional Specifies any method-specific options not covered above.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_root_scalar.py",
    "ast_data": "FunctionDef name:_root_scalar_bisect_doc arguments"
  },
  {
    "library": "sphinx",
    "name": "copy_image_files",
    "source_code": "def copy_image_files(self) -> None:\n    if self.images:\n        if self.config.epub_fix_images or self.config.epub_max_image_width:\n            if not PILLOW_AVAILABLE:\n                logger.warning(__('Pillow not found - copying image files'))\n                super().copy_image_files()\n            else:\n                self.copy_image_files_pil()\n        else:\n            super().copy_image_files()",
    "docstring": "Copy image files to destination directory. This overwritten method can use Pillow to convert image files.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\_epub_base.py",
    "ast_data": "FunctionDef name:copy_image_files arg:self arguments arg If If BoolOp If Call Call Call Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, s, loc, *, pad=0.4, borderpad=0.5, prop=None, **kwargs):\n    if prop is None:\n        prop = {}\n    badkwargs = {'va', 'verticalalignment'}\n    if badkwargs & set(prop):\n        raise ValueError('Mixing verticalalignment with AnchoredText is not supported.')\n    self.txt = TextArea(s, textprops=prop)\n    fp = self.txt._text.get_fontproperties()\n    super().__init__(loc, pad=pad, borderpad=borderpad, child=self.txt, prop=fp, **kwargs)",
    "docstring": "Parameters ---------- s : str Text. loc : str Location code. See . pad : float, default: 0.4 Padding around the text as fraction of the fontsize. borderpad : float, default: 0.5 Spacing between the offsetbox frame and the *bbox_to_anchor*. prop : dict, optional Dictionary of keyword parameters to be passed to the instance contained inside AnchoredText. **kwargs All other parameters are passed to .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:s arg:loc arguments arg arg arg arg arg arg arg If Compare Assign Assign If Call Raise Call Assign Call Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_type_spec_from_value",
    "source_code": "def _type_spec_from_value(value) -> TypeSpec:\n    if isinstance(value, core_types.Symbol):\n        return trace_type.from_value(value)\n    if isinstance(value, composite_tensor.CompositeTensor):\n        return value._type_spec\n    if isinstance(value, list) and value:\n        subspecs = [_type_spec_from_value(v) for v in value]\n        if isinstance(subspecs[0], BatchableTypeSpec):\n            merged_subspec = subspecs[0].most_specific_common_supertype(subspecs[1:])\n            if merged_subspec is not None:\n                return merged_subspec._batch(len(subspecs))\n    for entry in reversed(_TYPE_CONVERSION_FUNCTION_REGISTRY):\n        type_object, converter_fn, allow_subclass = entry\n        if type(value) is type_object or (allow_subclass and isinstance(value, type_object)):\n            return converter_fn(value)\n    return None",
    "docstring": "Returns a that represents the given .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py",
    "ast_data": "FunctionDef name:_type_spec_from_value arg:value arguments arg If Call Return return:yes Call If Call Return return:yes If BoolOp Call Assign Call If Call Assign Call If Compare Return return:yes Call Call For Call Assign If BoolOp Compare Call BoolOp Call Return return:yes Call Return return:no"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, y):\n    self._cached_dict = None\n    if self.classes is None:\n        classes = sorted(set(itertools.chain.from_iterable(y)))\n    elif len(set(self.classes)) < len(self.classes):\n        raise ValueError('The classes argument contains duplicate classes. Remove these duplicates before passing them to MultiLabelBinarizer.')\n    else:\n        classes = self.classes\n    dtype = int if all((isinstance(c, int) for c in classes)) else object\n    self.classes_ = np.empty(len(classes), dtype=dtype)\n    self.classes_[:] = classes\n    return self",
    "docstring": "Fit the label sets binarizer, storing :term:. Parameters ---------- y : iterable of iterables A set of labels (any orderable and hashable object) for each sample. If the parameter is set, will not be iterated. Returns ------- self : object Fitted estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_label.py",
    "ast_data": "FunctionDef name:fit arg:self arg:y arguments arg arg Assign If Compare Assign Call Call Call If Compare Call Call Call Raise Call Assign Assign Call Call Assign Call Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "next_sample",
    "source_code": "def next_sample(uid):\n    return next(_SHARED_SEQUENCES[uid])",
    "docstring": "Gets the next value from the generator . To allow multiple generators to be used at the same time, we use to get a specific one. A single generator would cause the validation to overwrite the training generator. Args: uid: int, generator identifier Returns: The next value of generator .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\data_utils.py",
    "ast_data": "FunctionDef name:next_sample arg:uid arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_shuffle",
    "source_code": "def _shuffle(y, groups, random_state):\n    if groups is None:\n        indices = random_state.permutation(len(y))\n    else:\n        indices = np.arange(len(groups))\n        for group in np.unique(groups):\n            this_mask = groups == group\n            indices[this_mask] = random_state.permutation(indices[this_mask])\n    return _safe_indexing(y, indices)",
    "docstring": "Return a shuffled copy of y eventually shuffle among same groups.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_validation.py",
    "ast_data": "FunctionDef name:_shuffle arg:y arg:groups arg:random_state arguments arg arg arg If Compare Assign Call Call Assign Call Call For Call Assign Compare Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_needs_reindex_multi",
    "source_code": "def _needs_reindex_multi(self, axes, method, level: Level | None) -> bool:\n    return common.count_not_none(*axes.values()) == self._AXIS_LEN and method is None and (level is None) and self._can_fast_transpose",
    "docstring": "Check if we do need a multi reindex.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:_needs_reindex_multi arg:self arg:axes arg:method arg:level arguments arg arg arg arg Return return:yes BoolOp Compare Call Call Compare Compare"
  },
  {
    "library": "scipy",
    "name": "make_hyp2f1_test_cases",
    "source_code": "def make_hyp2f1_test_cases(rows):\n    result = '[\\n'\n    result += '\\n'.join((_make_hyp2f1_test_case(a, b, c, z, rtol) for a, b, c, z, rtol in rows))\n    result += '\\n]'\n    return result",
    "docstring": "Generate string for a list of test cases for test_hyp2f1.py. Parameters ---------- rows : list List of lists of the form [a, b, c, z, rtol] where a, b, c, z are parameters and the argument for hyp2f1 and rtol is an expected relative error for the associated test case. Returns ------- str String for a list of test cases. The output string can be printed or saved to a file and then copied into an argument for within .",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_precompute\\hyp2f1_data.py",
    "ast_data": "FunctionDef name:make_hyp2f1_test_cases arg:rows arguments arg Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "glyph_name_or_index",
    "source_code": "@property\ndef glyph_name_or_index(self):\n    entry = self._get_pdftexmap_entry()\n    return _parse_enc(entry.encoding)[self.glyph] if entry.encoding is not None else self.glyph",
    "docstring": "Either the glyph name or the native charmap glyph index. If :file: specifies an encoding for this glyph's font, that is a mapping of glyph indices to Adobe glyph names; use it to convert dvi indices to glyph names. Callers can then convert glyph names to glyph indices (with FT_Get_Name_Index/get_name_index), and load the glyph using FT_Load_Glyph/load_glyph. If :file: specifies no encoding, the indices directly map to the font's \"native\" charmap; glyphs should directly load using FT_Load_Char/load_char after selecting the native charmap.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\dviread.py",
    "ast_data": "FunctionDef name:glyph_name_or_index arg:self arguments arg Assign Call Return return:yes Compare Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "def fit(self, X, y=None):\n    self.fit_transform(X)\n    return self",
    "docstring": "Fit model on training data X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. y : Ignored Not used, present here for API consistency by convention. Returns ------- self : object Returns the transformer object.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_truncated_svd.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "fcratio",
    "source_code": "def fcratio(conmat, fval):\n    if DEBUGGING:\n        assert np.size(fval) >= 1\n        assert np.size(conmat, 1) == np.size(fval)\n    cmin = np.min(-conmat, axis=1)\n    cmax = np.max(-conmat, axis=1)\n    fmin = min(fval)\n    fmax = max(fval)\n    if any(cmin < 0.5 * cmax) and fmin < fmax:\n        denom = np.min(np.maximum(cmax, 0) - cmin, where=cmin < 0.5 * cmax, initial=np.inf)\n        r = (fmax - fmin) / denom\n    else:\n        r = 0\n    if DEBUGGING:\n        assert r >= 0\n    return r",
    "docstring": "This function calculates the ratio between the \"typical change\" of F and that of CONSTR. See equations (12)-(13) in Section 3 of the COBYLA paper for the definition of the ratio.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\pyprima\\pyprima\\src\\pyprima\\cobyla\\cobylb.py",
    "ast_data": "FunctionDef name:fcratio arg:conmat arg:fval arguments arg arg If Compare Call Compare Call Call Assign Call Assign Call Assign Call Assign Call If BoolOp Call Compare Compare Assign Call Call Compare Assign Assign If Compare Return return:yes"
  },
  {
    "library": "django",
    "name": "last_arg_byref",
    "source_code": "def last_arg_byref(args):\n    return args[-1]._obj.value",
    "docstring": "Return the last C argument's value by reference.",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\geos\\prototypes\\errcheck.py",
    "ast_data": "FunctionDef name:last_arg_byref arg:args arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "add_nested_compat_imports",
    "source_code": "def add_nested_compat_imports(module_builder, compat_api_versions, output_package):\n    imported_modules = module_builder.get_destination_modules()\n    for v in compat_api_versions:\n        for sv in compat_api_versions:\n            subcompat_module = _SUBCOMPAT_MODULE_TEMPLATE % (v, sv)\n            compat_module = _COMPAT_MODULE_TEMPLATE % sv\n            module_builder.copy_imports(compat_module, subcompat_module)\n            module_builder.copy_imports('%s.compat' % compat_module, '%s.compat' % subcompat_module)\n    compat_prefixes = tuple((_COMPAT_MODULE_TEMPLATE % v + '.' for v in compat_api_versions))\n    for imported_module in imported_modules:\n        if not imported_module.startswith(compat_prefixes):\n            continue\n        module_split = imported_module.split('.')\n        if len(module_split) > 3 and module_split[2] == 'compat':\n            src_module = '.'.join(module_split[:3])\n            src_name = module_split[3]\n            assert src_name != 'v1' and src_name != 'v2', imported_module\n        else:\n            src_module = '.'.join(module_split[:2])\n            src_name = module_split[2]\n            if src_name == 'compat':\n                continue\n        for compat_api_version in compat_api_versions:\n            module_builder.add_import(symbol=None, source_module_name='%s.%s' % (output_package, src_module), source_name=src_name, dest_module_name='compat.v%d.%s' % (compat_api_version, src_module), dest_name=src_name)",
    "docstring": "Adds compat.vN.compat.vK modules to module builder. To avoid circular imports, we want to add __init__.py files under compat.vN.compat.vK and under compat.vN.compat.vK.compat. For all other imports, we point to corresponding modules under compat.vK. Args: module_builder: instance. compat_api_versions: Supported compatibility versions. output_package: Base output python package where generated API will be added.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\api\\generator\\create_python_api.py",
    "ast_data": "FunctionDef name:add_nested_compat_imports arg:module_builder arg:compat_api_versions arg:output_package arguments arg arg arg Assign Call For For Assign Assign Call Call Assign Call For If Call Assign Call If BoolOp Compare Call Compare Assign Call Assign BoolOp Compare Compare Assign Call Assign If Compare For Call"
  },
  {
    "library": "tensorflow",
    "name": "default",
    "source_code": "def default(self, obj):\n    if isinstance(obj, tensor_shape.TensorShape):\n        items = obj.as_list() if obj.rank is not None else None\n        return {'class_name': 'TensorShape', 'items': items}\n    return get_json_type(obj)",
    "docstring": "Encodes objects for types that aren't handled by the default encoder.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\json_utils.py",
    "ast_data": "FunctionDef name:default arg:self arg:obj arguments arg arg If Call Assign Compare Call Return return:yes Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "gc_state",
    "source_code": "@contextmanager\ndef gc_state(state):\n    orig_state = gc.isenabled()\n    set_gc_state(state)\n    yield\n    set_gc_state(orig_state)",
    "docstring": "Context manager to set state of garbage collector to Parameters ---------- state : bool True for gc enabled, False for disabled Examples -------- >>> with gc_state(False): ... assert not gc.isenabled() >>> with gc_state(True): ... assert gc.isenabled()",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_gcutils.py",
    "ast_data": "FunctionDef name:gc_state arg:state arguments arg Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "call_kernel",
    "source_code": "def call_kernel(self, name: str, node=None):\n    wrapper = V.graph.wrapper_code\n    call_args = [f'{n}' for n, arg in self.halide_argdefs() if arg.alias_of is None]\n    current_device = V.graph.get_current_device_or_throw()\n    if current_device.type == 'cuda':\n        stream_name = wrapper.write_get_raw_stream(current_device.index, V.graph.name)\n        call_args.append(stream_name)\n    wrapper.generate_kernel_call(name, call_args, device=current_device, triton=False)",
    "docstring": "Codegen a call to this kernel",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\halide.py",
    "ast_data": "FunctionDef name:call_kernel arg:self arg:name arg:node arguments arg arg arg Assign Assign Call Compare Assign Call If Compare Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "scale",
    "source_code": "@property\ndef scale(self):\n    return self._scale",
    "docstring": "Distribution parameter for standard deviation.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\normal.py",
    "ast_data": "FunctionDef name:scale arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "get_primary_key_column",
    "source_code": "def get_primary_key_column(self, cursor, table_name):\n    columns = self.get_primary_key_columns(cursor, table_name)\n    return columns[0] if columns else None",
    "docstring": "Return the name of the primary key column for the given table.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\introspection.py",
    "ast_data": "FunctionDef name:get_primary_key_column arg:self arg:cursor arg:table_name arguments arg arg arg Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "convert_xunits",
    "source_code": "def convert_xunits(self, x):\n    ax = getattr(self, 'axes', None)\n    if ax is None or ax.xaxis is None:\n        return x\n    return ax.xaxis.convert_units(x)",
    "docstring": "Convert *x* using the unit type of the xaxis. If the artist is not contained in an Axes or if the xaxis does not have units, *x* itself is returned.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:convert_xunits arg:self arg:x arguments arg arg Assign Call If BoolOp Compare Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_lru_cache",
    "source_code": "def _lru_cache(self, fn, maxsize=None):\n    fn_cache = functools.lru_cache(maxsize)(fn)\n    prior_len = len(self.replacements)\n\n    @functools.wraps(fn)\n    def wrapper(*args, **kwargs):\n        nonlocal prior_len\n        if prior_len != len(self.replacements):\n            prior_len = len(self.replacements)\n            fn_cache.cache_clear()\n        return fn_cache(*args, **kwargs)\n    return wrapper",
    "docstring": "Wrapper around functools.lru_cache that clears when replacements has been invalidated.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\sizevars.py",
    "ast_data": "FunctionDef name:_lru_cache arg:self arg:fn arg:maxsize arguments arg arg arg Assign Call Call Assign Call FunctionDef name:wrapper arguments arg arg If Compare Call Assign Call Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_generic_list",
    "source_code": "def is_generic_list(tp):\n    return tp not in (list, typing.List) and getattr(tp, '__origin__', None) in (list, typing.List)",
    "docstring": "Returns true if is a parameterized typing.List value.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\type_annotations.py",
    "ast_data": "FunctionDef name:is_generic_list arg:tp arguments arg Return return:yes BoolOp Compare Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "run_pass_pipeline",
    "source_code": "@tf_export('mlir.experimental.run_pass_pipeline')\ndef run_pass_pipeline(mlir_txt, pass_pipeline, show_debug_info=False):\n    return pywrap_mlir.experimental_run_pass_pipeline(mlir_txt, pass_pipeline, show_debug_info)",
    "docstring": "Runs a pipeline over input module. Args: mlir_txt: Textual representation of the MLIR module. pass_pipeline: Pass pipeline to run on module. show_debug_info: Whether to include locations in the emitted textual form. Returns: A textual representation of the MLIR module corresponding to the transformed module.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\mlir\\mlir.py",
    "ast_data": "FunctionDef name:run_pass_pipeline arg:mlir_txt arg:pass_pipeline arg:show_debug_info arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_cleanup_registry_based_on_opset_version",
    "source_code": "def _cleanup_registry_based_on_opset_version(self) -> None:\n    cleaned_functions = {}\n    for target_or_name, decomps in self.functions.items():\n        decomps = [d for d in decomps if d.opset_introduced <= self.opset_version]\n        if decomps:\n            max_opset = max((d.opset_introduced for d in decomps))\n            cleaned_functions[target_or_name] = [d for d in decomps if d.opset_introduced == max_opset]\n    self.functions = cleaned_functions",
    "docstring": "Pick the implementation with the highest opset version valid until the current opset version.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_registration.py",
    "ast_data": "FunctionDef name:_cleanup_registry_based_on_opset_version arg:self arguments arg Assign For Call Assign Compare If Assign Call Assign Compare Assign"
  },
  {
    "library": "kornia",
    "name": "__init__",
    "source_code": "def __init__(self, dst_from_src: Se2 | Se3, frame_src: str | None=None, frame_dst: str | None=None) -> None:\n    self._dst_from_src = dst_from_src\n    self._frame_src = frame_src or uuid.uuid4().hex\n    self._frame_dst = frame_dst or uuid.uuid4().hex",
    "docstring": "Construct NamedPose. Args: dst_from_src: Pose from source frame to destination frame. frame_src: Name of frame a. frame_dst: Name of frame b.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\pose.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:dst_from_src arg:frame_src arg:frame_dst arguments arg arg arg arg Assign Assign BoolOp Call Assign BoolOp Call"
  },
  {
    "library": "matplotlib",
    "name": "format_xdata",
    "source_code": "def format_xdata(self, x):\n    return (self.fmt_xdata if self.fmt_xdata is not None else self.xaxis.get_major_formatter().format_data_short)(x)",
    "docstring": "Return *x* formatted as an x-value. This function will use the attribute if it is not None, else will fall back on the xaxis major formatter.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:format_xdata arg:self arg:x arguments arg arg Return return:yes Call Compare Call"
  },
  {
    "library": "matplotlib",
    "name": "_get_char_id",
    "source_code": "def _get_char_id(self, font, ccode):\n    return urllib.parse.quote(f'{font.postscript_name}-{ccode:x}')",
    "docstring": "Return a unique id for the given font and character-code set.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\textpath.py",
    "ast_data": "FunctionDef name:_get_char_id arg:self arg:font arg:ccode arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_composite_impl",
    "source_code": "def _composite_impl(matrix, rhs, l2_regularizer):\n    with ops.name_scope(name, 'matrix_solve_ls', [matrix, rhs, l2_regularizer]):\n        matrix_shape = matrix.get_shape()[-2:]\n        if matrix_shape.is_fully_defined():\n            if matrix_shape[-2] >= matrix_shape[-1]:\n                return _overdetermined(matrix, rhs, l2_regularizer)\n            else:\n                return _underdetermined(matrix, rhs, l2_regularizer)\n        else:\n            matrix_shape = array_ops.shape(matrix)[-2:]\n            return cond.cond(matrix_shape[-2] >= matrix_shape[-1], lambda: _overdetermined(matrix, rhs, l2_regularizer), lambda: _underdetermined(matrix, rhs, l2_regularizer))",
    "docstring": "Composite implementation of matrix_solve_ls that supports GPU.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg_ops.py",
    "ast_data": "FunctionDef name:_composite_impl arg:matrix arg:rhs arg:l2_regularizer arguments arg arg arg With Call Assign Call If Call If Compare Return return:yes Call Return return:yes Call Assign Call Return return:yes Call Compare arguments Call arguments Call"
  },
  {
    "library": "sphinx",
    "name": "Centered",
    "source_code": "class Centered(SphinxDirective):\n    has_content = False\n    required_arguments = 1\n    optional_arguments = 0\n    final_argument_whitespace = True\n    option_spec: ClassVar[OptionSpec] = {}\n\n    def run(self) -> list[Node]:\n        if not self.arguments:\n            return []\n        subnode: Element = addnodes.centered()\n        inodes, messages = self.parse_inline(self.arguments[0])\n        subnode.extend(inodes)\n        ret: list[Node] = [subnode]\n        ret += messages\n        return ret",
    "docstring": "Directive to create a centered line of bold text.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\directives\\other.py",
    "ast_data": "ClassDef name:Centered Assign Assign Assign Assign FunctionDef name:run arg:self arguments arg If Return return:no Call Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "absolute_path",
    "source_code": "def absolute_path(self, path):\n    if path.startswith(('http://', 'https://', '/')):\n        return path\n    return static(path)",
    "docstring": "Given a relative or absolute path to a static asset, return an absolute path. An absolute path will be returned unchanged while a relative path will be passed to django.templatetags.static.static().",
    "type": "method",
    "file_path": "django\\django\\forms\\widgets.py",
    "ast_data": "FunctionDef name:absolute_path arg:self arg:path arguments arg arg If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, nrows, ncols, figure=None, left=None, bottom=None, right=None, top=None, wspace=None, hspace=None, width_ratios=None, height_ratios=None):\n    self.left = left\n    self.bottom = bottom\n    self.right = right\n    self.top = top\n    self.wspace = wspace\n    self.hspace = hspace\n    self.figure = figure\n    super().__init__(nrows, ncols, width_ratios=width_ratios, height_ratios=height_ratios)",
    "docstring": "Parameters ---------- nrows, ncols : int The number of rows and columns of the grid. figure : , optional Only used for constrained layout to create a proper layoutgrid. left, right, top, bottom : float, optional Extent of the subplots as a fraction of figure width or height. Left cannot be larger than right, and bottom cannot be larger than top. If not given, the values will be inferred from a figure or rcParams at draw time. See also . wspace : float, optional The amount of width reserved for space between subplots, expressed as a fraction of the average axis width. If not given, the values will be inferred from a figure or rcParams when necessary. See also . hspace : float, optional The amount of height reserved for space between subplots, expressed as a fraction of the average axis height. If not given, the values will be inferred from a figure or rcParams when necessary. See also . width_ratios : array-like of length *ncols*, optional Defines the relative widths of the columns. Each column gets a relative width of ``. If not given, all rows will have the same height.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\gridspec.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:nrows arg:ncols arg:figure arg:left arg:bottom arg:right arg:top arg:wspace arg:hspace arg:width_ratios arg:height_ratios arguments arg arg arg arg arg arg arg arg arg arg arg arg Assign Assign Assign Assign Assign Assign Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "scatter_nd_max",
    "source_code": "def scatter_nd_max(self, indices, updates, name=None):\n    return gen_state_ops.scatter_nd_max(self._variable, indices, updates, use_locking=True, name=name)",
    "docstring": "Updates this variable with the max of and itself. is a with rank and is a of rank . must be integer tensor, containing indices into . It must be shape where . The innermost dimension of (with length ) corresponds to indices into elements (if ) or slices (if ) along the th dimension of . is of rank with shape: See for more details about how to make updates to slices. Args: indices: The indices to be used in the operation. updates: The values to be used in the operation. name: the name of the operation. Returns: A that will hold the new value of this variable after the scattered addition has completed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py",
    "ast_data": "FunctionDef name:scatter_nd_max arg:self arg:indices arg:updates arg:name arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "as_signature_def",
    "source_code": "@abc.abstractmethod\ndef as_signature_def(self, receiver_tensors):\n    pass",
    "docstring": "Generate a SignatureDef proto for inclusion in a MetaGraphDef. The SignatureDef will specify outputs as described in this ExportOutput, and will use the provided receiver_tensors as inputs. Args: receiver_tensors: a , or a dict of string to , specifying input nodes that will be fed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\model_utils\\export_output.py",
    "ast_data": "FunctionDef name:as_signature_def arg:self arg:receiver_tensors arguments arg arg"
  },
  {
    "library": "django",
    "name": "check_minus_one",
    "source_code": "def check_minus_one(result, func, cargs):\n    if result == -1:\n        raise GEOSException('Error encountered in GEOS C function \"%s\".' % func.__name__)\n    else:\n        return result",
    "docstring": "Error checking on routines that should not return -1.",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\geos\\prototypes\\errcheck.py",
    "ast_data": "FunctionDef name:check_minus_one arg:result arg:func arg:cargs arguments arg arg arg If Compare Raise Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "degree",
    "source_code": "@property\ndef degree(self):\n    return self._N - 1",
    "docstring": "Degree of the polynomial. One less the number of control points.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\bezier.py",
    "ast_data": "FunctionDef name:degree arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "Cigar",
    "source_code": "class Cigar(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-100.0] * self.N, [100.0] * self.N))\n        self.custom_bounds = [(-5, 5), (-5, 5)]\n        self.global_optimum = [[0 for _ in range(self.N)]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return x[0] ** 2 + 1000000.0 * sum(x[1:] ** 2)",
    "docstring": "Cigar objective function. This class defines the Cigar [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Cigar}}(x) = x_1^2 + 10^6\\sum_{i=2}^{n} x_i^2 Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_C.py",
    "ast_data": "ClassDef name:Cigar Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "apply",
    "source_code": "def apply(self, model_outputs: Any, model: torch.nn.Module | Callable | torch_export.ExportedProgram | None=None) -> Sequence[Any]:\n    assert isinstance(model, torch_export.ExportedProgram), \"'model' must be torch_export.ExportedProgram\"\n    ordered_buffers = tuple((model.state_dict[name] if name in model.state_dict else model.constants[name] for name in model.graph_signature.buffers_to_mutate.values()))\n    updated_outputs = (*ordered_buffers, *model_outputs)\n    return updated_outputs",
    "docstring": "Flatten the model outputs and validate the output. Args: model_outputs: The model outputs to flatten. model: The PyTorch model. Returns: flattened_outputs: The flattened model outputs.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\io_adapter.py",
    "ast_data": "FunctionDef name:apply arg:self arg:model_outputs arg:model arguments arg arg arg Call Assign Call Compare Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "var_creator",
    "source_code": "def var_creator(**kwargs):\n    collections = kwargs.pop('collections', None)\n    if collections is None:\n        collections = [ops.GraphKeys.GLOBAL_VARIABLES]\n    kwargs['collections'] = []\n    v = next_creator(**kwargs)\n    wrapped = ps_values.AggregatingVariable(self._container_strategy(), v, aggregation)\n    if not context.executing_eagerly():\n        g = ops.get_default_graph()\n        if kwargs.get('trainable', True):\n            collections.append(ops.GraphKeys.TRAINABLE_VARIABLES)\n            l = g.get_collection_ref(ops.GraphKeys.TRAINABLE_VARIABLES)\n            if v in l:\n                l.remove(v)\n        g.add_to_collections(collections, wrapped)\n    elif ops.GraphKeys.GLOBAL_STEP in collections:\n        ops.add_to_collections(ops.GraphKeys.GLOBAL_STEP, wrapped)\n    return wrapped",
    "docstring": "Create an AggregatingVariable and fix up collections.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\parameter_server_strategy.py",
    "ast_data": "FunctionDef name:var_creator arguments arg Assign Call If Compare Assign Assign Assign Call Assign Call Call If Call Assign Call If Call Call Assign Call If Compare Call Call If Compare Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "BuildPhase",
    "source_code": "class BuildPhase(IntEnum):\n    INITIALIZATION = 1\n    READING = 2\n    CONSISTENCY_CHECK = 3\n    RESOLVING = 3\n    WRITING = 4",
    "docstring": "Build phase of Sphinx application.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\util\\build_phase.py",
    "ast_data": "ClassDef name:BuildPhase Assign Assign Assign Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "comment",
    "source_code": "def comment(self, comment):\n    self.__flush()\n    self.__write(self.__indentation[:len(self.__tags)])\n    self.__write(f'<!-- {_escape_comment(comment)} -->\\n')",
    "docstring": "Add a comment to the output stream. Parameters ---------- comment : str Comment text.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_svg.py",
    "ast_data": "FunctionDef name:comment arg:self arg:comment arguments arg arg Call Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "update_datalim",
    "source_code": "def update_datalim(self, xys, updatex=True, updatey=True):\n    xys = np.asarray(xys)\n    if not np.any(np.isfinite(xys)):\n        return\n    self.dataLim.update_from_data_xy(xys, self.ignore_existing_data_limits, updatex=updatex, updatey=updatey)\n    self.ignore_existing_data_limits = False",
    "docstring": "Extend the Bbox to include the given points. If no data is set currently, the Bbox will ignore its limits and set the bound to be the bounds of the xydata (*xys*). Otherwise, it will compute the bounds of the union of its current data and the data in *xys*. Parameters ---------- xys : 2D array-like The points to include in the data limits Bbox. This can be either a list of (x, y) tuples or a (N, 2) array. updatex, updatey : bool, default: True Whether to update the x/y limits.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:update_datalim arg:self arg:xys arg:updatex arg:updatey arguments arg arg arg arg Assign Call If Call Call Return return:no Call Assign"
  },
  {
    "library": "pandas",
    "name": "na_accum_func",
    "source_code": "def na_accum_func(values: ArrayLike, accum_func, *, skipna: bool) -> ArrayLike:\n    mask_a, mask_b = {np.cumprod: (1.0, np.nan), np.maximum.accumulate: (-np.inf, np.nan), np.cumsum: (0.0, np.nan), np.minimum.accumulate: (np.inf, np.nan)}[accum_func]\n    assert values.dtype.kind not in 'mM'\n    if skipna and (not issubclass(values.dtype.type, (np.integer, np.bool_))):\n        vals = values.copy()\n        mask = isna(vals)\n        vals[mask] = mask_a\n        result = accum_func(vals, axis=0)\n        result[mask] = mask_b\n    else:\n        result = accum_func(values, axis=0)\n    return result",
    "docstring": "Cumulative function with skipna support. Parameters ---------- values : np.ndarray or ExtensionArray accum_func : {np.cumprod, np.maximum.accumulate, np.cumsum, np.minimum.accumulate} skipna : bool Returns ------- np.ndarray or ExtensionArray",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\nanops.py",
    "ast_data": "FunctionDef name:na_accum_func arg:values arg:accum_func arguments arg arg arg Assign Compare If BoolOp Call Assign Call Assign Call Assign Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_get_indexer_strict",
    "source_code": "def _get_indexer_strict(self, key, axis_name: str_t) -> tuple[Index, np.ndarray]:\n    keyarr = key\n    if not isinstance(keyarr, Index):\n        keyarr = com.asarray_tuplesafe(keyarr)\n    if self._index_as_unique:\n        indexer = self.get_indexer_for(keyarr)\n        keyarr = self.reindex(keyarr)[0]\n    else:\n        keyarr, indexer, new_indexer = self._reindex_non_unique(keyarr)\n    self._raise_if_missing(keyarr, indexer, axis_name)\n    keyarr = self.take(indexer)\n    if isinstance(key, Index):\n        keyarr.name = key.name\n    if lib.is_np_dtype(keyarr.dtype, 'mM') or isinstance(keyarr.dtype, DatetimeTZDtype):\n        if isinstance(key, list) or (isinstance(key, type(self)) and key.freq is None):\n            keyarr = keyarr._with_freq(None)\n    return (keyarr, indexer)",
    "docstring": "Analogue to get_indexer that raises if any elements are missing.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_get_indexer_strict arg:self arg:key arg:axis_name arguments arg arg arg Assign If Call Assign Call If Assign Call Assign Call Assign Call Call Assign Call If Call Assign If BoolOp Call Call If BoolOp Call BoolOp Call Call Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "partial_tile",
    "source_code": "def partial_tile(tensor, tile_assignment, use_sharding_op=False, unspecified_dims=None):\n    return Sharding.partial_tile(tile_assignment).apply_to_tensor(tensor, use_sharding_op=use_sharding_op, unspecified_dims=unspecified_dims or [])",
    "docstring": "Returns a tensor that has tiled sharding. Args: tensor: A tf.Tensor to shard. tile_assignment: An np.ndarray describing the topology of the tiling and which device will compute which part of the topology. It must have one more dimension than tensor, and the last dimension represents partially replicated tiles. use_sharding_op: If true, adds a sharding op to set the sharding. unspecified_dims: An optional list of dimensions unspecified.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\xla\\experimental\\xla_sharding.py",
    "ast_data": "FunctionDef name:partial_tile arg:tensor arg:tile_assignment arg:use_sharding_op arg:unspecified_dims arguments arg arg arg arg Return return:yes Call Call BoolOp"
  },
  {
    "library": "tensorflow",
    "name": "_FrequentTracingDetectorManager",
    "source_code": "class _FrequentTracingDetectorManager(object):\n    __slots__ = ['_detectors', '_lock']\n\n    def __init__(self):\n        self._detectors = weakref.WeakKeyDictionary()\n        self._lock = threading.Lock()\n\n    def _get_detector(self, key):\n        if key not in self._detectors:\n            self._detectors[key] = _FrequentTracingDetector()\n        return self._detectors[key]\n\n    def called_without_tracing(self, key):\n        with self._lock:\n            detector = self._get_detector(key)\n            detector.called_without_tracing()\n\n    def called_with_tracing(self, key, function_name, omit_warning):\n        with self._lock:\n            detector = self._get_detector(key)\n            detector.called_with_tracing(function_name, omit_warning)",
    "docstring": "Class for the management of all _FrequentTracingDetector objects.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\polymorphic_function.py",
    "ast_data": "ClassDef name:_FrequentTracingDetectorManager Assign FunctionDef name:__init__ arg:self arguments arg Assign Call Assign Call FunctionDef name:_get_detector arg:self arg:key arguments arg arg If Compare Assign Call Return return:yes FunctionDef name:called_without_tracing arg:self arg:key arguments arg arg With Assign Call Call FunctionDef name:called_with_tracing arg:self arg:key arg:function_name arg:omit_warning arguments arg arg arg arg With Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_run",
    "source_code": "def _run(self):\n    self._send_sequence()\n    with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:\n        while True:\n            if self.stop_signal.is_set():\n                return\n            self.queue.put(executor.apply_async(next_sample, (self.uid,)), block=True)",
    "docstring": "Submits request to the executor and queue the objects.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\data_utils.py",
    "ast_data": "FunctionDef name:_run arg:self arguments arg Call With Call Call While If Call Return return:no Call Call"
  },
  {
    "library": "tensorflow",
    "name": "partial_run",
    "source_code": "@deprecation.deprecated('2023-06-01', 'This function is deprecated and we do not expect adding newfunctionality to it. Please do not have your code dependingon this function.')\ndef partial_run(self, handle, fetches, feed_dict=None):\n    return self._run(handle, fetches, feed_dict, None, None)",
    "docstring": "Continues the execution with more feeds and fetches. NOTE: This function is deprecated and we do not expect adding new functionality to it. Please do not have your code depending on this function. This is EXPERIMENTAL and subject to change. To use partial execution, a user first calls and then a sequence of . specifies the list of feeds and fetches that will be used in the subsequent calls. The optional argument allows the caller to override the value of tensors in the graph. See run() for more information. Below is a simple example: Args: handle: A handle for a sequence of partial runs. fetches: A single graph element, a list of graph elements, or a dictionary whose values are graph elements or lists of graph elements (see documentation for ). feed_dict: A dictionary that maps graph elements to values (described above). Returns: Either a single value if is a single graph element, or a list of values if is a list, or a dictionary with the same keys as if that is a dictionary (see documentation for ). Raises: tf.errors.OpError: Or one of its subclasses on error.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\session.py",
    "ast_data": "FunctionDef name:partial_run arg:self arg:handle arg:fetches arg:feed_dict arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_Boolean",
    "source_code": "class _Boolean(Constraint):\n    is_discrete = True\n\n    def check(self, value):\n        return (value == 0) | (value == 1)",
    "docstring": "Constrain to the two values .",
    "type": "class",
    "file_path": "pytorch\\torch\\distributions\\constraints.py",
    "ast_data": "ClassDef name:_Boolean Assign FunctionDef name:check arg:self arg:value arguments arg arg Return return:yes Compare Compare"
  },
  {
    "library": "matplotlib",
    "name": "set_mutation_scale",
    "source_code": "def set_mutation_scale(self, scale):\n    self._mutation_scale = scale\n    self.stale = True",
    "docstring": "Set the mutation scale. Parameters ---------- scale : float",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:set_mutation_scale arg:self arg:scale arguments arg arg Assign Assign"
  },
  {
    "library": "cryptography",
    "name": "load_public",
    "source_code": "def load_public(self, data: memoryview) -> tuple[ed25519.Ed25519PublicKey, memoryview]:\n    (point,), data = self.get_public(data)\n    public_key = ed25519.Ed25519PublicKey.from_public_bytes(point.tobytes())\n    return (public_key, data)",
    "docstring": "Make Ed25519 public key from data.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\serialization\\ssh.py",
    "ast_data": "FunctionDef name:load_public arg:self arg:data arguments arg arg Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "assert_same_rank",
    "source_code": "def assert_same_rank(self, other):\n    other = as_shape(other)\n    if self.rank is not None and other.rank is not None:\n        if self.rank != other.rank:\n            raise ValueError('Shapes %s and %s must have the same rank' % (self, other))",
    "docstring": "Raises an exception if and do not have compatible ranks. Args: other: Another . Raises: ValueError: If and do not represent shapes with the same rank.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:assert_same_rank arg:self arg:other arguments arg arg Assign Call If BoolOp Compare Compare If Compare Raise Call"
  },
  {
    "library": "scipy",
    "name": "__init__",
    "source_code": "def __init__(self, mean=None, cov=1, allow_singular=False, seed=None, maxpts=None, abseps=1e-05, releps=1e-05):\n    self._dist = multivariate_normal_gen(seed)\n    self.dim, self.mean, self.cov_object = self._dist._process_parameters(mean, cov, allow_singular)\n    self.allow_singular = allow_singular or self.cov_object._allow_singular\n    if not maxpts:\n        maxpts = 1000000 * self.dim\n    self.maxpts = maxpts\n    self.abseps = abseps\n    self.releps = releps",
    "docstring": "Create a frozen multivariate normal distribution. Parameters ---------- mean : array_like, default: `numpy.random.Generatornumpy.random.RandomStateseednp.randomnumpy.random.RandomStateseedseedseed`) abseps : float, optional Absolute error tolerance for the cumulative distribution function (default 1e-5) releps : float, optional Relative error tolerance for the cumulative distribution function (default 1e-5) Examples -------- When called with the default parameters, this will create a 1D random variable with mean 0 and covariance 1: >>> from scipy.stats import multivariate_normal >>> r = multivariate_normal() >>> r.mean array([ 0.]) >>> r.cov array([[1.]])",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:mean arg:cov arg:allow_singular arg:seed arg:maxpts arg:abseps arg:releps arguments arg arg arg arg arg arg arg arg Assign Call Assign Call Assign BoolOp If Assign Assign Assign Assign"
  },
  {
    "library": "numpy",
    "name": "_extendLine_pretty",
    "source_code": "def _extendLine_pretty(s, line, word, line_width, next_line_prefix, legacy):\n    words = word.splitlines()\n    if len(words) == 1 or legacy <= 113:\n        return _extendLine(s, line, word, line_width, next_line_prefix, legacy)\n    max_word_length = max((len(word) for word in words))\n    if len(line) + max_word_length > line_width and len(line) > len(next_line_prefix):\n        s += line.rstrip() + '\\n'\n        line = next_line_prefix + words[0]\n        indent = next_line_prefix\n    else:\n        indent = len(line) * ' '\n        line += words[0]\n    for word in words[1:]:\n        s += line.rstrip() + '\\n'\n        line = indent + word\n    suffix_length = max_word_length - len(words[-1])\n    line += suffix_length * ' '\n    return (s, line)",
    "docstring": "Extends line with nicely formatted (possibly multi-line) string ``.",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\arrayprint.py",
    "ast_data": "FunctionDef name:_extendLine_pretty arg:s arg:line arg:word arg:line_width arg:next_line_prefix arg:legacy arguments arg arg arg arg arg arg Assign Call If BoolOp Compare Call Compare Return return:yes Call Assign Call Call If BoolOp Compare Call Compare Call Call Call Assign Assign Assign Call For Call Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "checkpoint_exists_internal",
    "source_code": "def checkpoint_exists_internal(checkpoint_prefix):\n    pathname = _prefix_to_checkpoint_path(checkpoint_prefix, saver_pb2.SaverDef.V2)\n    if file_io.get_matching_files(pathname):\n        return True\n    elif file_io.get_matching_files(checkpoint_prefix):\n        return True\n    else:\n        return False",
    "docstring": "Checks whether a V1 or V2 checkpoint exists with the specified prefix. This is an internal function to check if a checkpoint exists, since it takes into account the naming difference between V1 and V2 formats. Args: checkpoint_prefix: the prefix of a V1 or V2 checkpoint, with V2 taking priority. Typically the result of or that of , regardless of sharded/non-sharded or V1/V2. Returns: A bool, true if a checkpoint referred to by exists.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint_management.py",
    "ast_data": "FunctionDef name:checkpoint_exists_internal arg:checkpoint_prefix arguments arg Assign Call If Call Return return:yes If Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_set_last_step_outputs",
    "source_code": "def _set_last_step_outputs(ctx, last_step_tensor_outputs):\n    last_step_tensor_outputs_dict = nest.pack_sequence_as(ctx.last_step_outputs, last_step_tensor_outputs)\n    for name, reduce_op in ctx._last_step_outputs_reduce_ops.items():\n        output = last_step_tensor_outputs_dict[name]\n        if reduce_op is None:\n            last_step_tensor_outputs_dict[name] = values.PerReplica(output)\n        else:\n            last_step_tensor_outputs_dict[name] = output[0]\n    ctx._set_last_step_outputs(last_step_tensor_outputs_dict)",
    "docstring": "Sets the last step outputs on the given context.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_strategy.py",
    "ast_data": "FunctionDef name:_set_last_step_outputs arg:ctx arg:last_step_tensor_outputs arguments arg arg Assign Call For Call Assign If Compare Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "@deprecation.deprecated('2019-01-01', 'The TensorFlow Distributions library has moved to TensorFlow Probability (https://github.com/tensorflow/probability). You should update all references to use `tfp.distributions` instead of `tf.distributions`.', warn_once=True)\ndef __init__(self, concentration, rate, validate_args=False, allow_nan_stats=True, name='Gamma'):\n    parameters = dict(locals())\n    with ops.name_scope(name, values=[concentration, rate]) as name:\n        with ops.control_dependencies([check_ops.assert_positive(concentration), check_ops.assert_positive(rate)] if validate_args else []):\n            self._concentration = array_ops.identity(concentration, name='concentration')\n            self._rate = array_ops.identity(rate, name='rate')\n            check_ops.assert_same_float_dtype([self._concentration, self._rate])\n    super(Gamma, self).__init__(dtype=self._concentration.dtype, validate_args=validate_args, allow_nan_stats=allow_nan_stats, reparameterization_type=distribution.FULLY_REPARAMETERIZED, parameters=parameters, graph_parents=[self._concentration, self._rate], name=name)",
    "docstring": "Construct Gamma with and parameters. The parameters and must be shaped in a way that supports broadcasting (e.g. is a valid operation). Args: concentration: Floating point tensor, the concentration params of the distribution(s). Must contain only positive values. rate: Floating point tensor, the inverse scale params of the distribution(s). Must contain only positive values. validate_args: Python , default . When distribution parameters are checked for validity despite possibly degrading runtime performance. When invalid inputs may silently render incorrect outputs. allow_nan_stats: Python , default . When , statistics (e.g., mean, mode, variance) use the value \"\" to indicate the result is undefined. When , an exception is raised if one or more of the statistic's batch members are undefined. name: Python name prefixed to Ops created by this class. Raises: TypeError: if and are different dtypes.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\gamma.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:concentration arg:rate arg:validate_args arg:allow_nan_stats arg:name arguments arg arg arg arg arg arg Assign Call Call With Call With Call Call Call Assign Call Assign Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "train",
    "source_code": "def train(self, mode=True):\n    self.training = mode\n    if not self.freeze_bn:\n        for module in self.children():\n            module.train(mode)\n    return self",
    "docstring": "Batchnorm's training behavior is using the self.training flag. Prevent changing it if BN is frozen. This makes sure that calling on a model with a frozen BN will behave properly.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\nn\\intrinsic\\qat\\modules\\linear_fused.py",
    "ast_data": "FunctionDef name:train arg:self arg:mode arguments arg arg Assign If For Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_DictFetchMapper",
    "source_code": "class _DictFetchMapper(_FetchMapper):\n\n    def __init__(self, fetches):\n        self._fetch_type = type(fetches)\n        if isinstance(fetches, collections.defaultdict):\n            self._type_ctor = functools.partial(collections.defaultdict, fetches.default_factory)\n        else:\n            self._type_ctor = self._fetch_type\n        self._keys = fetches.keys()\n        self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in fetches.values()]\n        self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)\n\n    def unique_fetches(self):\n        return self._unique_fetches\n\n    def build_results(self, values):\n\n        def _generator():\n            for k, m, vi in zip(self._keys, self._mappers, self._value_indices):\n                yield (k, m.build_results([values[j] for j in vi]))\n        return self._type_ctor(_generator())",
    "docstring": "Fetch mapper for dicts.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\client\\session.py",
    "ast_data": "ClassDef name:_DictFetchMapper FunctionDef name:__init__ arg:self arg:fetches arguments arg arg Assign Call If Call Assign Call Assign Assign Call Assign Call Call Assign Call FunctionDef name:unique_fetches arg:self arguments arg Return return:yes FunctionDef name:build_results arg:self arg:values arguments arg arg FunctionDef name:_generator arguments For Call Call Return return:yes Call Call"
  },
  {
    "library": "scrapy",
    "name": "join",
    "source_code": "@inlineCallbacks\ndef join(self) -> Generator[Deferred[Any], Any, None]:\n    while self._active:\n        yield DeferredList(self._active)",
    "docstring": "join() Returns a deferred that is fired when all managed :attr: have completed their executions.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\crawler.py",
    "ast_data": "FunctionDef name:join arg:self arguments arg While Call"
  },
  {
    "library": "pytorch",
    "name": "generate_gub",
    "source_code": "@register_transformation_rule(TGreatestUpperBound)\ndef generate_gub(constraint, counter):\n    c1 = Conj([Disj([BinConstraintT(constraint.rhs1, Dyn, op_eq), BinConstraintT(constraint.rhs2, Dyn, op_eq)]), BinConstraintT(constraint.res, Dyn, op_eq)])\n    [c2, c3, c4, c5], counter = gen_greatest_upper_bound(constraint, counter)\n    return (Disj([c1, c2, c3, c4, c5]), counter)",
    "docstring": "Transform greatest upper bound for tensors. Results in equality and Greatest Upper Bound on dimensions",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_transformation.py",
    "ast_data": "FunctionDef name:generate_gub arg:constraint arg:counter arguments arg arg Assign Call Call Call Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "datalim_to_dt",
    "source_code": "def datalim_to_dt(self):\n    dmin, dmax = self.axis.get_data_interval()\n    if dmin > dmax:\n        dmin, dmax = (dmax, dmin)\n    return (num2date(dmin, self.tz), num2date(dmax, self.tz))",
    "docstring": "Convert axis data interval to datetime objects.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\dates.py",
    "ast_data": "FunctionDef name:datalim_to_dt arg:self arguments arg Assign Call If Compare Assign Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "kind",
    "source_code": "@property\ndef kind(self) -> str:\n    return self.subtype.kind",
    "docstring": "The sparse kind. Either 'integer', or 'block'.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py",
    "ast_data": "FunctionDef name:kind arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "CombinedDatetimelikeProperties",
    "source_code": "class CombinedDatetimelikeProperties(DatetimeProperties, TimedeltaProperties, PeriodProperties):\n\n    def __new__(cls, data: Series):\n        if not isinstance(data, ABCSeries):\n            raise TypeError(f'cannot convert an object of type {type(data)} to a datetimelike index')\n        orig = data if isinstance(data.dtype, CategoricalDtype) else None\n        if orig is not None:\n            data = data._constructor(orig.array, name=orig.name, copy=False, dtype=orig._values.categories.dtype, index=orig.index)\n        if isinstance(data.dtype, ArrowDtype) and data.dtype.kind in 'Mm':\n            return ArrowTemporalProperties(data, orig)\n        if lib.is_np_dtype(data.dtype, 'M'):\n            return DatetimeProperties(data, orig)\n        elif isinstance(data.dtype, DatetimeTZDtype):\n            return DatetimeProperties(data, orig)\n        elif lib.is_np_dtype(data.dtype, 'm'):\n            return TimedeltaProperties(data, orig)\n        elif isinstance(data.dtype, PeriodDtype):\n            return PeriodProperties(data, orig)\n        raise AttributeError('Can only use .dt accessor with datetimelike values')",
    "docstring": "Accessor object for Series values' datetime-like, timedelta and period properties. See Also -------- DatetimeIndex : Index of datetime64 data. Examples -------- >>> dates = pd.Series( ... [\"2024-01-01\", \"2024-01-15\", \"2024-02-5\"], dtype=\"datetime64[ns]\" ... ) >>> dates.dt.day 0 1 1 15 2 5 dtype: int32 >>> dates.dt.month 0 1 1 1 2 2 dtype: int32 >>> dates = pd.Series( ... [\"2024-01-01\", \"2024-01-15\", \"2024-02-5\"], dtype=\"datetime64[ns, UTC]\" ... ) >>> dates.dt.day 0 1 1 15 2 5 dtype: int32 >>> dates.dt.month 0 1 1 1 2 2 dtype: int32",
    "type": "class",
    "file_path": "pandas\\pandas\\core\\indexes\\accessors.py",
    "ast_data": "ClassDef name:CombinedDatetimelikeProperties FunctionDef name:__new__ arg:cls arg:data arguments arg arg If Call Raise Call Call Assign Call If Compare Assign Call If BoolOp Call Compare Return return:yes Call If Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Call Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "ParserState",
    "source_code": "class ParserState:\n\n    def __init__(self, fontset: Fonts, font: str, font_class: str, fontsize: float, dpi: float):\n        self.fontset = fontset\n        self._font = font\n        self.font_class = font_class\n        self.fontsize = fontsize\n        self.dpi = dpi\n\n    def copy(self) -> ParserState:\n        return copy.copy(self)\n\n    @property\n    def font(self) -> str:\n        return self._font\n\n    @font.setter\n    def font(self, name: str) -> None:\n        if name in ('rm', 'it', 'bf', 'bfit'):\n            self.font_class = name\n        self._font = name\n\n    def get_current_underline_thickness(self) -> float:\n        return self.fontset.get_underline_thickness(self.font, self.fontsize, self.dpi)",
    "docstring": "Parser state. States are pushed and popped from a stack as necessary, and the \"current\" state is always at the top of the stack. Upon entering and leaving a group { } or math/non-math, the stack is pushed and popped accordingly.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\_mathtext.py",
    "ast_data": "ClassDef name:ParserState FunctionDef name:__init__ arg:self arg:fontset arg:font arg:font_class arg:fontsize arg:dpi arguments arg arg arg arg arg arg Assign Assign Assign Assign Assign FunctionDef name:copy arg:self arguments arg Return return:yes Call FunctionDef name:font arg:self arguments arg Return return:yes FunctionDef name:font arg:self arg:name arguments arg arg If Compare Assign Assign FunctionDef name:get_current_underline_thickness arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_earliest",
    "source_code": "def _earliest(self, *fields):\n    if fields:\n        order_by = fields\n    else:\n        order_by = getattr(self.model._meta, 'get_latest_by')\n        if order_by and (not isinstance(order_by, (tuple, list))):\n            order_by = (order_by,)\n    if order_by is None:\n        raise ValueError(\"earliest() and latest() require either fields as positional arguments or 'get_latest_by' in the model's Meta.\")\n    obj = self._chain()\n    obj.query.set_limits(high=1)\n    obj.query.clear_ordering(force=True)\n    obj.query.add_ordering(*order_by)\n    return obj.get()",
    "docstring": "Return the earliest object according to fields (if given) or by the model's Meta.get_latest_by.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:_earliest arg:self arguments arg arg If Assign Assign Call If BoolOp Call Assign If Compare Raise Call Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "validation",
    "source_code": "def validation(self, parameter_values):\n    all_valid = True\n    dtypes = set()\n    for name, arr in parameter_values.items():\n        parameter = self.parameters[name]\n        arr, dtype, valid = parameter.validate(arr, parameter_values)\n        dtypes.add(dtype)\n        all_valid = all_valid & valid\n        parameter_values[name] = arr\n    dtype = arr.dtype if len(dtypes) == 1 else np.result_type(*list(dtypes))\n    return (all_valid, dtype)",
    "docstring": "Input validation / standardization of parameterization. Parameters ---------- parameter_values : dict The keyword arguments passed as parameter values to the distribution. Returns ------- all_valid : ndarray Logical array indicating the elements of the broadcasted arrays for which all parameter values are valid. dtype : dtype The common dtype of the parameter arrays. This will determine the dtype of the output of distribution methods.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distribution_infrastructure.py",
    "ast_data": "FunctionDef name:validation arg:self arg:parameter_values arguments arg arg Assign Assign Call For Call Assign Assign Call Call Assign Assign Assign Compare Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "sticky_attribute_assignment",
    "source_code": "@tf_export('__internal__.tracking.sticky_attribute_assignment', v1=[])\ndef sticky_attribute_assignment(trackable, name, value):\n    if isinstance(value, NoDependency):\n        add_dependency = False\n    else:\n        add_dependency = True\n    value = wrap_or_unwrap(value)\n    if not add_dependency:\n        return value\n    if isinstance(value, base.Trackable):\n        trackable._track_trackable(value, name=name, overwrite=True)\n    return value",
    "docstring": "Adds dependencies, generally called from __setattr__. This behavior is shared between Trackable and Model. Respects NoDependency indicators, but otherwise makes trackable objects out of common data structures and tracks objects by their attribute names. Args: trackable: The object to add dependencies to (generally the one having an attribute assigned). name: The attribute name being assigned. value: The value being assigned. Not necessarily a trackable object. Returns: The value which should be stored in the attribute (unwrapped from a NoDependency object if necessary).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\data_structures.py",
    "ast_data": "FunctionDef name:sticky_attribute_assignment arg:trackable arg:name arg:value arguments arg arg arg If Call Assign Assign Assign Call If Return return:yes If Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "GlobalPooling1D",
    "source_code": "class GlobalPooling1D(Layer):\n\n    def __init__(self, data_format='channels_last', keepdims=False, **kwargs):\n        super(GlobalPooling1D, self).__init__(**kwargs)\n        self.input_spec = InputSpec(ndim=3)\n        self.data_format = conv_utils.normalize_data_format(data_format)\n        self.keepdims = keepdims\n\n    def compute_output_shape(self, input_shape):\n        input_shape = tensor_shape.TensorShape(input_shape).as_list()\n        if self.data_format == 'channels_first':\n            if self.keepdims:\n                return tensor_shape.TensorShape([input_shape[0], input_shape[1], 1])\n            else:\n                return tensor_shape.TensorShape([input_shape[0], input_shape[1]])\n        elif self.keepdims:\n            return tensor_shape.TensorShape([input_shape[0], 1, input_shape[2]])\n        else:\n            return tensor_shape.TensorShape([input_shape[0], input_shape[2]])\n\n    def call(self, inputs):\n        raise NotImplementedError\n\n    def get_config(self):\n        config = {'data_format': self.data_format, 'keepdims': self.keepdims}\n        base_config = super(GlobalPooling1D, self).get_config()\n        return dict(list(base_config.items()) + list(config.items()))",
    "docstring": "Abstract class for different global pooling 1D layers.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\pooling.py",
    "ast_data": "ClassDef name:GlobalPooling1D FunctionDef name:__init__ arg:self arg:data_format arg:keepdims arguments arg arg arg arg Call Call Assign Call Assign Call Assign FunctionDef name:compute_output_shape arg:self arg:input_shape arguments arg arg Assign Call Call If Compare If Return return:yes Call Return return:yes Call If Return return:yes Call Return return:yes Call FunctionDef name:call arg:self arg:inputs arguments arg arg Raise FunctionDef name:get_config arg:self arguments arg Assign Assign Call Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_fontconfig_pattern",
    "source_code": "def set_fontconfig_pattern(self, pattern):\n    for key, val in parse_fontconfig_pattern(pattern).items():\n        if type(val) is list:\n            getattr(self, 'set_' + key)(val[0])\n        else:\n            getattr(self, 'set_' + key)(val)",
    "docstring": "Set the properties by parsing a fontconfig_ *pattern*. This support does not depend on fontconfig; we are merely borrowing its pattern syntax for use here.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\font_manager.py",
    "ast_data": "FunctionDef name:set_fontconfig_pattern arg:self arg:pattern arguments arg arg For Call Call If Compare Call Call Call Call Call"
  },
  {
    "library": "django",
    "name": "order_by",
    "source_code": "def order_by(self, *field_names):\n    if self.query.is_sliced:\n        raise TypeError('Cannot reorder a query once a slice has been taken.')\n    obj = self._chain()\n    obj.query.clear_ordering(force=True, clear_default=False)\n    obj.query.add_ordering(*field_names)\n    return obj",
    "docstring": "Return a new QuerySet instance with the ordering changed.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:order_by arg:self arguments arg arg If Raise Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_is_avx512_bf16_supported",
    "source_code": "def _is_avx512_bf16_supported() -> bool:\n    return torch._C._cpu._is_avx512_bf16_supported()",
    "docstring": "Returns a bool indicating if CPU supports AVX512_BF16.",
    "type": "function",
    "file_path": "pytorch\\torch\\cpu\\__init__.py",
    "ast_data": "FunctionDef name:_is_avx512_bf16_supported arguments Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "accept",
    "source_code": "def accept(media=None, debug=False):\n    if not media:\n        return\n    if isinstance(media, text_or_bytes):\n        media = [media]\n    request = cherrypy.serving.request\n    ranges = request.headers.elements('Accept')\n    if not ranges:\n        if debug:\n            cherrypy.log('No Accept header elements', 'TOOLS.ACCEPT')\n        return media[0]\n    else:\n        for element in ranges:\n            if element.qvalue > 0:\n                if element.value == '*/*':\n                    if debug:\n                        cherrypy.log('Match due to */*', 'TOOLS.ACCEPT')\n                    return media[0]\n                elif element.value.endswith('/*'):\n                    mtype = element.value[:-1]\n                    for m in media:\n                        if m.startswith(mtype):\n                            if debug:\n                                cherrypy.log('Match due to %s' % element.value, 'TOOLS.ACCEPT')\n                            return m\n                elif element.value in media:\n                    if debug:\n                        cherrypy.log('Match due to %s' % element.value, 'TOOLS.ACCEPT')\n                    return element.value\n    ah = request.headers.get('Accept')\n    if ah is None:\n        msg = 'Your client did not send an Accept header.'\n    else:\n        msg = 'Your client sent this Accept header: %s.' % ah\n    msg += ' But this resource only emits these media types: %s.' % ', '.join(media)\n    raise cherrypy.HTTPError(406, msg)",
    "docstring": "Return the client's preferred media-type (from the given Content-Types). If 'media' is None (the default), no test will be performed. If 'media' is provided, it should be the Content-Type value (as a string) or values (as a list or tuple of strings) which the current resource can emit. The client's acceptable media ranges (as declared in the Accept request header) will be matched in order to these Content-Type values; the first such string is returned. That is, the return value will always be one of the strings provided in the 'media' arg (or None if 'media' is None). If no match is found, then HTTPError 406 (Not Acceptable) is raised. Note that most web browsers send */* as a (low-quality) acceptable media range, which should match any Content-Type. In addition, \"...if no Accept header field is present, then it is assumed that the client accepts all media types.\" Matching types are checked in order of client preference first, and then in the order of the given 'media' values. Note that this function does not honor accept-params (other than \"q\").",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\cptools.py",
    "ast_data": "FunctionDef name:accept arg:media arg:debug arguments arg arg If Return return:no If Call Assign Assign Assign Call If If Call Return return:yes For If Compare If Compare If Call Return return:yes If Call Assign For If Call If Call Return return:yes If Compare If Call Return return:yes Assign Call If Compare Assign Assign Call Raise Call"
  },
  {
    "library": "kornia",
    "name": "angle_to_rotation_matrix",
    "source_code": "def angle_to_rotation_matrix(angle: Tensor) -> Tensor:\n    ang_rad = deg2rad(angle)\n    cos_a: Tensor = cos(ang_rad)\n    sin_a: Tensor = sin(ang_rad)\n    return stack([cos_a, sin_a, -sin_a, cos_a], dim=-1).view(*angle.shape, 2, 2)",
    "docstring": "Create a rotation matrix out of angles in degrees. Args: angle: tensor of angles in degrees, any shape :math:. Returns: tensor of rotation matrices with shape :math:. Example: >>> input = torch.rand(1, 3) # Nx3 >>> output = angle_to_rotation_matrix(input) # Nx3x2x2",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\conversions.py",
    "ast_data": "FunctionDef name:angle_to_rotation_matrix arg:angle arguments arg Assign Call Call Call Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "split_explicit_title",
    "source_code": "def split_explicit_title(text: str) -> tuple[bool, str, str]:\n    match = explicit_title_re.match(text)\n    if match:\n        return (True, match.group(1), match.group(2))\n    return (False, text, text)",
    "docstring": "Split role content into title and target, if given.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\nodes.py",
    "ast_data": "FunctionDef name:split_explicit_title arg:text arguments arg Assign Call If Return return:yes Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "report_proto_path",
    "source_code": "def report_proto_path(self):\n    return self._report_proto_path",
    "docstring": "Getter for path where tensor_tracer.proto object should be written. Returns: A string path.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:report_proto_path arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "driver",
    "source_code": "@cached_property\ndef driver(self):\n    ds_driver = capi.get_ds_driver(self._ptr)\n    return Driver(ds_driver)",
    "docstring": "Return the GDAL Driver used for this raster.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\raster\\source.py",
    "ast_data": "FunctionDef name:driver arg:self arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "deprecate_method_override",
    "source_code": "def deprecate_method_override(method, obj, *, allow_empty=False, **kwargs):\n\n    def empty():\n        pass\n\n    def empty_with_docstring():\n        pass\n    name = method.__name__\n    bound_child = getattr(obj, name)\n    bound_base = method if isinstance(bound_child, type(empty)) and isinstance(obj, type) else method.__get__(obj)\n    if bound_child != bound_base and (not allow_empty or getattr(getattr(bound_child, '__code__', None), 'co_code', None) not in [empty.__code__.co_code, empty_with_docstring.__code__.co_code]):\n        warn_deprecated(**{'name': name, 'obj_type': 'method', **kwargs})\n        return bound_child\n    return None",
    "docstring": "Return `warn_deprecated` to generate the deprecation warning; must at least include the \"since\" key.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\_api\\deprecation.py",
    "ast_data": "FunctionDef name:deprecate_method_override arg:method arg:obj arguments arg arg arg arg FunctionDef name:empty arguments FunctionDef name:empty_with_docstring arguments Assign Assign Call Assign BoolOp Call Call Call Call If BoolOp Compare BoolOp Compare Call Call Call Return return:yes Return return:no"
  },
  {
    "library": "scikit-learn",
    "name": "fast_logdet",
    "source_code": "def fast_logdet(A):\n    xp, _ = get_namespace(A)\n    sign, ld = xp.linalg.slogdet(A)\n    if not sign > 0:\n        return -xp.inf\n    return ld",
    "docstring": "Compute logarithm of determinant of a square matrix. The (natural) logarithm of the determinant of a square matrix is returned if det(A) is non-negative and well defined. If the determinant is zero or negative returns -Inf. Equivalent to : np.log(np.det(A)) but more robust. Parameters ---------- A : array_like of shape (n, n) The square matrix. Returns ------- logdet : float When det(A) is strictly positive, log(det(A)) is returned. When det(A) is non-positive or not defined, then -inf is returned. See Also -------- numpy.linalg.slogdet : Compute the sign and (natural) logarithm of the determinant of an array. Examples -------- >>> import numpy as np >>> from sklearn.utils.extmath import fast_logdet >>> a = np.array([[5, 1], [2, 8]]) >>> fast_logdet(a) np.float64(3.6375861597263857)",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\extmath.py",
    "ast_data": "FunctionDef name:fast_logdet arg:A arguments arg Assign Call Assign Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_assert_weights_created",
    "source_code": "def _assert_weights_created(self):\n    if self.dynamic:\n        return\n    if 'build' in self.__class__.__dict__ and self.__class__ != Model and (not self.built):\n        raise ValueError('Weights for model %s have not yet been created. Weights are created when the Model is first called on inputs or `build()` is called with an `input_shape`.' % self.name)",
    "docstring": "Asserts that all the weights for the model have been created. For a non-dynamic model, the weights must already be created after the layer has been called. For a dynamic model, the exact list of weights can never be known for certain since it may change at any time during execution. We run this check right before accessing weights or getting the Numpy value for the current weights. Otherwise, if the layer has never been called, the user would just get an empty list, which is misleading. Raises: ValueError: if the weights of the network has not yet been created.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py",
    "ast_data": "FunctionDef name:_assert_weights_created arg:self arguments arg If Return return:no If BoolOp Compare Compare Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "split",
    "source_code": "def split(self, count=1):\n\n    def _key_to_state(alg, key):\n        return [0] * (_get_state_size(alg) - 1) + [key]\n    alg = self.algorithm\n    if alg in (a.value for a in random_ops_util.Algorithm):\n        keys = self._make_int64_keys(shape=[count])\n        return [Generator(state=_key_to_state(alg, key), alg=alg) for key in array_ops_stack.unstack(keys, num=count)]\n    else:\n        raise ValueError(stateless_random_ops.unsupported_alg_error_msg(alg))",
    "docstring": "Returns a list of independent objects. Two generators are independent of each other in the sense that the random-number streams they generate don't have statistically detectable correlations. The new generators are also independent of the old one. The old generator's state will be changed (like other random-number generating methods), so two calls of will return different new generators. For example: The new generators will be put on the current device (possible different from the old generator's), for example: Args: count: the number of generators to return. Returns: A list (length ) of objects independent of each other. The new generators have the same RNG algorithm as the old one.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\stateful_random_ops.py",
    "ast_data": "FunctionDef name:split arg:self arg:count arguments arg arg FunctionDef name:_key_to_state arg:alg arg:key arguments arg arg Return return:yes Call Assign If Compare Assign Call Return return:yes Call Call Call Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "trim_significant_figures",
    "source_code": "def trim_significant_figures(self):\n    self._trim_significant_figures = True",
    "docstring": "Enables trimming of significant figures when building the formatted table.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\benchmark\\utils\\compare.py",
    "ast_data": "FunctionDef name:trim_significant_figures arg:self arguments arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "_get_resource_handle",
    "source_code": "def _get_resource_handle(self):\n    if not self._resource_handle:\n        self._resource_handle = resource_handle_pb2.ResourceHandleProto()\n        self._resource_handle.device = self._handle.split(';')[-1]\n        self._resource_handle.container = pywrap_tf_session.TENSOR_HANDLE_KEY\n        self._resource_handle.name = self._handle\n    return self._resource_handle",
    "docstring": "The ResourceHandle representation of this handle.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\session_ops.py",
    "ast_data": "FunctionDef name:_get_resource_handle arg:self arguments arg If Assign Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "insert_activation_post_process",
    "source_code": "def insert_activation_post_process(m, special_act_post_process=None):\n    if needs_observation(m) and (not isinstance(m, DeQuantStub)):\n        m.add_module('activation_post_process', get_activation_post_process(m.qconfig, device, special_act_post_process))\n        _register_activation_post_process_hook(m, pre_hook=_activation_is_memoryless(m.qconfig))",
    "docstring": "Adds an activation post process module and register a pre or post hook that calls the module",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantize.py",
    "ast_data": "FunctionDef name:insert_activation_post_process arg:m arg:special_act_post_process arguments arg arg If BoolOp Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__call__",
    "source_code": "def __call__(self, shardable_tensors: Sequence[sharding_util.ShardableTensor]) -> Sequence[sharding_util.Shard]:\n    tensors_by_task = {}\n    for shardable_tensor in shardable_tensors:\n        tensor = shardable_tensor.tensor\n        checkpoint_key = shardable_tensor.checkpoint_key\n        slice_spec = shardable_tensor.slice_spec\n        tensors_by_task.setdefault(checkpoint_key, {})[slice_spec] = tensor\n    return [tensors_by_task]",
    "docstring": "Callback to split tensors into shards based on their device spec task. Args: shardable_tensors: A list of ShardableTensors. Returns: List of shard dicts containing tensors. [ {checkpoint key: {slice_spec: tensor} } ]",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\sharding\\sharding_policies.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:shardable_tensors arguments arg arg Assign For Assign Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "lookup",
    "source_code": "def lookup(self, function_type: function_type_lib.FunctionType, context: Optional[FunctionContext]=None) -> Optional[Any]:\n    context = context or FunctionContext()\n    if context in self._dispatch_dict:\n        dispatch_type = self._dispatch_dict[context].dispatch(function_type)\n        if dispatch_type:\n            return self._primary[context, dispatch_type]\n    return None",
    "docstring": "Looks up a function based on the context and type.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_cache.py",
    "ast_data": "FunctionDef name:lookup arg:self arg:function_type arg:context arguments arg arg arg Assign BoolOp Call If Compare Assign Call If Return return:yes Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "get_local_ip",
    "source_code": "def get_local_ip(self):\n    return _request_compute_metadata('instance/network-interfaces/0/ip')",
    "docstring": "Return the local ip address of the Google Cloud VM the workload is running on.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\client\\client.py",
    "ast_data": "FunctionDef name:get_local_ip arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "top_left",
    "source_code": "@property\ndef top_left(self) -> torch.Tensor:\n    return self._data[..., (0, 1)]",
    "docstring": "The [x y] position of the top-left coordinate of the bounding box.",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\face_detection.py",
    "ast_data": "FunctionDef name:top_left arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "named_tensors",
    "source_code": "def named_tensors(self, remove_duplicate: bool=True) -> Iterable[tuple[str, torch.Tensor]]:\n    yield from self.module.named_parameters(remove_duplicate=remove_duplicate)\n    yield from self.module.named_buffers(remove_duplicate=remove_duplicate)",
    "docstring": "Iterate over all the tensors in the module.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\utils\\_named_member_accessor.py",
    "ast_data": "FunctionDef name:named_tensors arg:self arg:remove_duplicate arguments arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_add_new_centers",
    "source_code": "def _add_new_centers(self):\n    new_centers = self._choose_initial_centers()\n    if self._distance_metric == COSINE_DISTANCE:\n        new_centers = nn_impl.l2_normalize(new_centers, dim=1)\n    all_centers = cond.cond(math_ops.equal(self._num_selected, 0), lambda: new_centers, lambda: array_ops.concat([self._cluster_centers, new_centers], 0))\n    a = state_ops.assign(self._cluster_centers, all_centers, validate_shape=False)\n    if self._cluster_centers_updated is not self._cluster_centers:\n        a = state_ops.assign(self._cluster_centers_updated, a, validate_shape=False)\n    return self._num_clusters - array_ops.shape(a)[0]",
    "docstring": "Adds some centers and returns the number of centers remaining.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\clustering_ops.py",
    "ast_data": "FunctionDef name:_add_new_centers arg:self arguments arg Assign Call If Compare Assign Call Assign Call Call arguments arguments Call Assign Call If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_on_capture_lost",
    "source_code": "def _on_capture_lost(self, event):\n    self._set_capture(False)",
    "docstring": "Capture changed or lost",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_wx.py",
    "ast_data": "FunctionDef name:_on_capture_lost arg:self arg:event arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "ragged_cumsum",
    "source_code": "@dispatch.dispatch_for_api(math_ops.cumsum)\ndef ragged_cumsum(x: ragged_tensor.Ragged, axis: int=0, exclusive: bool=False, reverse: bool=False, name: typing.Optional[str]=None):\n    with ops.name_scope(name, 'RaggedCumSum', [x, axis, exclusive, reverse]):\n        axis = array_ops.get_positive_axis(axis, x.shape.rank, ndims_name='rank')\n        if axis == x.ragged_rank:\n            last_rp = x._nested_row_partitions[-1]\n            return x.with_flat_values(_cumsum_flat_values_at_ragged_rank(last_rp, x.flat_values, exclusive=exclusive, reverse=reverse))\n        elif axis > x.ragged_rank:\n            new_axis = axis - x.ragged_rank\n            cumsum_bound = functools.partial(math_ops.cumsum, axis=new_axis, exclusive=exclusive, reverse=reverse)\n            return ragged_functional_ops.map_flat_values(cumsum_bound, x)\n        else:\n            dense_version = x.to_tensor()\n            result = math_ops.cumsum(dense_version, axis, exclusive=exclusive, reverse=reverse, name=name)\n            return ragged_tensor.RaggedTensor.from_tensor(result, lengths=x.nested_row_lengths())",
    "docstring": "Calculate math_ops.cumsum for a RaggedTensor. Given a ragged tensor , the is a ragged tensor with the same shape. One can calculate the value of as follows: Args: x: the original ragged tensor to sum. axis: the axis along which to sum, can range -rank<=axis<rank. exclusive: is the sum exclusive or inclusive? If True, then result[0]=0. If False, then result[0]=x[0]. reverse: If True, sum from back to front. name: the name of the op. Returns: the cumulative sum.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_math_ops.py",
    "ast_data": "FunctionDef name:ragged_cumsum arg:x arg:axis arg:exclusive arg:reverse arg:name arguments arg arg arg arg arg With Call Assign Call If Compare Assign Return return:yes Call Call If Compare Assign Assign Call Return return:yes Call Assign Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "scrapy",
    "name": "NoReferrerWhenDowngradePolicy",
    "source_code": "class NoReferrerWhenDowngradePolicy(ReferrerPolicy):\n    name: str = POLICY_NO_REFERRER_WHEN_DOWNGRADE\n\n    def referrer(self, response_url: str, request_url: str) -> str | None:\n        if not self.tls_protected(response_url) or self.tls_protected(request_url):\n            return self.stripped_referrer(response_url)\n        return None",
    "docstring": "The \"no-referrer-when-downgrade\" policy sends a full URL along with requests from a TLS-protected environment settings object to a potentially trustworthy URL, and requests from clients which are not TLS-protected to any origin. Requests from TLS-protected clients to non-potentially trustworthy URLs, on the other hand, will contain no referrer information. A Referer HTTP header will not be sent. This is a user agent's default behavior, if no policy is otherwise specified.",
    "type": "class",
    "file_path": "scrapy\\scrapy\\spidermiddlewares\\referer.py",
    "ast_data": "ClassDef name:NoReferrerWhenDowngradePolicy FunctionDef name:referrer arg:self arg:response_url arg:request_url arguments arg arg arg If BoolOp Call Call Return return:yes Call Return return:no"
  },
  {
    "library": "kornia",
    "name": "SmallestMaxSize",
    "source_code": "class SmallestMaxSize(Resize):\n\n    def __init__(self, max_size: int, resample: Union[str, int, Resample]=Resample.BILINEAR.name, align_corners: bool=True, p: float=1.0) -> None:\n        super().__init__(size=max_size, side='short', resample=resample, align_corners=align_corners, p=p)",
    "docstring": "Rescale an image so that minimum side is equal to max_size, keeping the aspect ratio of the initial image. Args: max_size: maximum size of the image after the transformation.",
    "type": "class",
    "file_path": "kornia\\kornia\\augmentation\\_2d\\geometric\\resize.py",
    "ast_data": "ClassDef name:SmallestMaxSize FunctionDef name:__init__ arg:self arg:max_size arg:resample arg:align_corners arg:p arguments arg arg arg arg arg Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "def fit(self, X, y, **params):\n    return super().fit(X, y, **params)",
    "docstring": "Fit MultiTaskElasticNet model with coordinate descent. Fit is on grid of alphas and best alpha estimated by cross-validation. Parameters ---------- X : ndarray of shape (n_samples, n_features) Training data. y : ndarray of shape (n_samples, n_targets) Training target variable. Will be cast to X's dtype if necessary. **params : dict, default=None Parameters to be passed to the CV splitter. .. versionadded:: 1.4 Only available if , which can be set by using `Metadata Routing User Guide ` for more details. Returns ------- self : object Returns MultiTaskElasticNet instance.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_coordinate_descent.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "_UFuncNoLoopError",
    "source_code": "@_display_as_base\nclass _UFuncNoLoopError(UFuncTypeError):\n\n    def __init__(self, ufunc, dtypes):\n        super().__init__(ufunc)\n        self.dtypes = tuple(dtypes)\n\n    def __str__(self):\n        return f'ufunc {self.ufunc.__name__!r} did not contain a loop with signature matching types {_unpack_tuple(self.dtypes[:self.ufunc.nin])!r} -> {_unpack_tuple(self.dtypes[self.ufunc.nin:])!r}'",
    "docstring": "Thrown when a ufunc loop cannot be found",
    "type": "class",
    "file_path": "numpy\\numpy\\_core\\_exceptions.py",
    "ast_data": "ClassDef name:_UFuncNoLoopError FunctionDef name:__init__ arg:self arg:ufunc arg:dtypes arguments arg arg arg Call Call Assign Call FunctionDef name:__str__ arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "__init__",
    "source_code": "def __init__(self, baseurl, destpath=os.curdir):\n    DataSource.__init__(self, destpath=destpath)\n    self._baseurl = baseurl",
    "docstring": "Create a Repository with a shared url or directory of baseurl.",
    "type": "method",
    "file_path": "numpy\\numpy\\lib\\_datasource.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:baseurl arg:destpath arguments arg arg arg Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "forward_sync",
    "source_code": "@property\ndef forward_sync(self):\n    if self._forward_sync is None:\n        with ops.control_dependencies(None):\n            self._forward_sync = control_flow_ops.control_trigger(name='f_sync')\n        self._forward_sync._set_control_flow_context(self._forward_context)\n        self._forward_index.op._add_control_input(self._forward_sync)\n    return self._forward_sync",
    "docstring": "A control trigger node for synchronization in the forward loop. One main use is to keep the push ops of a stack executed in the iteration order.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_state.py",
    "ast_data": "FunctionDef name:forward_sync arg:self arguments arg If Compare With Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "PlotSpecError",
    "source_code": "class PlotSpecError(RuntimeError):\n\n    @classmethod\n    def _during(cls, step: str, var: str='') -> PlotSpecError:\n        message = []\n        if var:\n            message.append(f'{step} failed for the `{var}` variable.')\n        else:\n            message.append(f'{step} failed.')\n        message.append('See the traceback above for more information.')\n        return cls(' '.join(message))",
    "docstring": "Error class raised from seaborn.objects.Plot for compile-time failures. In the declarative Plot interface, exceptions may not be triggered immediately by bad user input (and validation at input time may not be possible). This class is used to signal that indirect dependency. It should be raised in an exception chain when compile-time operations fail with an error message providing useful context (e.g., scaling errors could specify the variable that failed.)",
    "type": "class",
    "file_path": "seaborn\\seaborn\\_core\\exceptions.py",
    "ast_data": "ClassDef name:PlotSpecError FunctionDef name:_during arg:cls arg:step arg:var arguments arg arg arg Assign If Call Call Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "__len__",
    "source_code": "def __len__(self) -> int:\n    return len(self._pa_array)",
    "docstring": "Length of this array. Returns ------- length : int",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\arrow\\array.py",
    "ast_data": "FunctionDef name:__len__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "x_key",
    "source_code": "@property\ndef x_key(self):\n    return (object_identity.Reference(self.x),) + self._deep_tuple(tuple(sorted(self.kwargs.items())))",
    "docstring": "Returns key used for caching Y=g(X).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bijector_impl.py",
    "ast_data": "FunctionDef name:x_key arg:self arguments arg Return return:yes Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "deny",
    "source_code": "def deny(self, include: 'GlobPattern', *, exclude: 'GlobPattern'=()):\n    self.patterns[GlobGroup(include, exclude=exclude)] = _PatternInfo(_ModuleProviderAction.DENY, allow_empty=True)",
    "docstring": "Blocklist modules who names match the given glob patterns from the list of modules the package can import. If a dependency on any matching packages is found, a :class: is raised. Args: include (Union[List[str], str]): A string e.g. `mock`. exclude (Union[List[str], str]): An optional pattern that excludes some patterns that match the include string.",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\package_exporter.py",
    "ast_data": "FunctionDef name:deny arg:self arg:include arguments arg arg arg Assign Call Call"
  },
  {
    "library": "django",
    "name": "check_envelope",
    "source_code": "def check_envelope(result, func, cargs, offset=-1):\n    return ptr_byref(cargs, offset)",
    "docstring": "Check a function that returns an OGR Envelope by reference.",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\gdal\\prototypes\\errcheck.py",
    "ast_data": "FunctionDef name:check_envelope arg:result arg:func arg:cargs arg:offset arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "normalize_dims",
    "source_code": "def normalize_dims(dims: DimsType, ndim: int) -> DimsSequenceType:\n    if isinstance(dims, int):\n        dims = (normalize_dim(dims, ndim),)\n    elif isinstance(dims, list):\n        dims = [normalize_dim(dim, ndim) for dim in dims]\n    elif isinstance(dims, tuple):\n        dims = tuple([normalize_dim(dim, ndim) for dim in dims])\n    return dims",
    "docstring": "Normalize a dim or a sequence of dims, so that they are all positive.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_ops\\utils.py",
    "ast_data": "FunctionDef name:normalize_dims arg:dims arg:ndim arguments arg arg If Call Assign Call If Call Assign Call If Call Assign Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_recursive_or",
    "source_code": "def _recursive_or(a, b):\n    for name in a.dtype.names:\n        af, bf = (a[name], b[name])\n        if af.dtype.names is not None:\n            _recursive_or(af, bf)\n        else:\n            af |= bf",
    "docstring": "do a|=b on each field of a, recursively",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:_recursive_or arg:a arg:b arguments arg arg For Assign If Compare Call"
  },
  {
    "library": "numpy",
    "name": "uniform",
    "source_code": "@staticmethod\n@memoize\ndef uniform(size, dtype, rnd):\n    return np.ones(size, dtype=dtype)",
    "docstring": "Returns an array that has the same value everywhere.",
    "type": "method",
    "file_path": "numpy\\benchmarks\\benchmarks\\bench_function_base.py",
    "ast_data": "FunctionDef name:uniform arg:size arg:dtype arg:rnd arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_get_default_requests",
    "source_code": "@classmethod\ndef _get_default_requests(cls):\n    requests = MetadataRequest(owner=cls.__name__)\n    for method in SIMPLE_METHODS:\n        setattr(requests, method, cls._build_request_for_signature(router=requests, method=method))\n    substr = '__metadata_request__'\n    for base_class in reversed(inspect.getmro(cls)):\n        for attr, value in vars(base_class).items():\n            if substr not in attr:\n                continue\n            method = attr[attr.index(substr) + len(substr):]\n            for prop, alias in value.items():\n                getattr(requests, method).add_request(param=prop, alias=alias)\n    return requests",
    "docstring": "Collect default request values. This method combines the information present in `` class attributes, as well as determining request keys from method signatures.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py",
    "ast_data": "FunctionDef name:_get_default_requests arg:cls arguments arg Assign Call For Call Call Assign For Call Call For Call Call If Compare Assign Call Call For Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "add_fallback_dispatch_list",
    "source_code": "def add_fallback_dispatch_list(target):\n    if hasattr(target, FALLBACK_DISPATCH_ATTR):\n        raise AssertionError('%s already has a dispatch list' % target)\n    setattr(target, FALLBACK_DISPATCH_ATTR, [])\n    return target",
    "docstring": "Decorator that adds a dispatch_list attribute to an op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\dispatch.py",
    "ast_data": "FunctionDef name:add_fallback_dispatch_list arg:target arguments arg If Call Raise Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "show",
    "source_code": "def show() -> str:\n    return torch._C._show_config()",
    "docstring": "Return a human-readable string with descriptions of the configuration of PyTorch.",
    "type": "function",
    "file_path": "pytorch\\torch\\__config__.py",
    "ast_data": "FunctionDef name:show arguments Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "validate_metadata",
    "source_code": "def validate_metadata(self, *, method, params):\n    param_names = self._get_param_names(method=method, return_alias=False, ignore_self_request=False)\n    if self._self_request:\n        self_params = self._self_request._get_param_names(method=method, return_alias=False)\n    else:\n        self_params = set()\n    extra_keys = set(params.keys()) - param_names - self_params\n    if extra_keys:\n        raise TypeError(f'{self.owner}.{method} got unexpected argument(s) {extra_keys}, which are not routed to any object.')",
    "docstring": "Validate given metadata for a method. This raises a `fit\"fit\"`. params : dict A dictionary of provided metadata.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py",
    "ast_data": "FunctionDef name:validate_metadata arg:self arguments arg arg arg Assign Call If Assign Call Assign Call Assign Call Call If Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "assert_non_singular",
    "source_code": "def assert_non_singular(self, name='assert_non_singular'):\n    with self._name_scope(name):\n        return self._assert_non_singular()",
    "docstring": "Returns an that asserts this operator is non singular. This operator is considered non-singular if Args: name: A string name to prepend to created ops. Returns: An , that, when run, will raise an if the operator is singular.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "FunctionDef name:assert_non_singular arg:self arg:name arguments arg arg With Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "self_adjoint_eigvals",
    "source_code": "@tf_export('linalg.eigvalsh', v1=['linalg.eigvalsh', 'self_adjoint_eigvals'])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints('self_adjoint_eigvals')\ndef self_adjoint_eigvals(tensor, name=None):\n    e, _ = gen_linalg_ops.self_adjoint_eig_v2(tensor, compute_v=False, name=name)\n    return e",
    "docstring": "Computes the eigenvalues of one or more self-adjoint matrices. Note: If your program backpropagates through this function, you should replace it with a call to tf.linalg.eigh (possibly ignoring the second output) to avoid computing the eigen decomposition twice. This is because the eigenvectors are used to compute the gradient w.r.t. the eigenvalues. See _SelfAdjointEigV2Grad in linalg_grad.py. Args: tensor: of shape . name: string, optional name of the operation. Returns: e: Eigenvalues. Shape is . The vector contains the eigenvalues of .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg_ops.py",
    "ast_data": "FunctionDef name:self_adjoint_eigvals arg:tensor arg:name arguments arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_release",
    "source_code": "def _release(self, event):\n    for zoom_id in self._ids_zoom:\n        self.figure.canvas.mpl_disconnect(zoom_id)\n    self._ids_zoom = []\n    if not self._xypress:\n        self._cancel_action()\n        return\n    done_ax = []\n    for cur_xypress in self._xypress:\n        x, y = (event.x, event.y)\n        lastx, lasty, a, _ind, view = cur_xypress\n        if abs(x - lastx) < 5 or abs(y - lasty) < 5:\n            self._cancel_action()\n            return\n        twinx = any((a.get_shared_x_axes().joined(a, a1) for a1 in done_ax))\n        twiny = any((a.get_shared_y_axes().joined(a, a1) for a1 in done_ax))\n        done_ax.append(a)\n        if self._button_pressed == 1:\n            direction = 'in'\n        elif self._button_pressed == 3:\n            direction = 'out'\n        else:\n            continue\n        a._set_view_from_bbox((lastx, lasty, x, y), direction, self._zoom_mode, twinx, twiny)\n    self._zoom_mode = None\n    self.toolmanager.get_tool(_views_positions).push_current()\n    self._cancel_action()",
    "docstring": "Callback for mouse button releases in zoom-to-rectangle mode.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py",
    "ast_data": "FunctionDef name:_release arg:self arg:event arguments arg arg For Call Assign If Call Return return:no Assign For Assign Assign If BoolOp Compare Call Compare Call Call Return return:no Assign Call Call Call Assign Call Call Call Call If Compare Assign If Compare Assign Call Assign Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_raw_predict_init",
    "source_code": "def _raw_predict_init(self, X):\n    self._check_initialized()\n    X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)\n    if self.init_ == 'zero':\n        raw_predictions = np.zeros(shape=(X.shape[0], self.n_trees_per_iteration_), dtype=np.float64)\n    else:\n        raw_predictions = _init_raw_predictions(X, self.init_, self._loss, is_classifier(self))\n    return raw_predictions",
    "docstring": "Check input and compute raw predictions of the init estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_gb.py",
    "ast_data": "FunctionDef name:_raw_predict_init arg:self arg:X arguments arg arg Call Assign Call If Compare Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "style",
    "source_code": "@property\ndef style(self) -> Styler:\n    has_jinja2 = import_optional_dependency('jinja2', errors='ignore')\n    if not has_jinja2:\n        raise AttributeError(\"The '.style' accessor requires jinja2\")\n    from pandas.io.formats.style import Styler\n    return Styler(self)",
    "docstring": "Returns a Styler object. Contains methods for building a styled HTML representation of the DataFrame. See Also -------- io.formats.style.Styler : Helps style a DataFrame or Series according to the data with HTML and CSS. Examples -------- >>> df = pd.DataFrame({\"A\": [1, 2, 3]}) >>> df.style # doctest: +SKIP Please see _ for more examples.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:style arg:self arguments arg Assign Call If Raise Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "CommandError",
    "source_code": "class CommandError(Exception):\n\n    def __init__(self, *args, returncode=1, **kwargs):\n        self.returncode = returncode\n        super().__init__(*args, **kwargs)",
    "docstring": "Exception class indicating a problem while executing a management command. If this exception is raised during the execution of a management command, it will be caught and turned into a nicely-printed error message to the appropriate output stream (i.e., stderr); as a result, raising this exception (with a sensible description of the error) is the preferred way to indicate that something has gone wrong in the execution of a command.",
    "type": "class",
    "file_path": "django\\django\\core\\management\\base.py",
    "ast_data": "ClassDef name:CommandError FunctionDef name:__init__ arg:self arguments arg arg arg arg Assign Call Call"
  },
  {
    "library": "pandas",
    "name": "iterrows",
    "source_code": "def iterrows(self) -> Iterable[tuple[Hashable, Series]]:\n    columns = self.columns\n    klass = self._constructor_sliced\n    for k, v in zip(self.index, self.values):\n        s = klass(v, index=columns, name=k).__finalize__(self)\n        if self._mgr.is_single_block:\n            s._mgr.add_references(self._mgr)\n        yield (k, s)",
    "docstring": "Iterate over DataFrame rows as (index, Series) pairs. Yields ------ index : label or tuple of label The index of the row. A tuple for a . data : Series The data of the row as a Series. See Also -------- DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- 1. Because `itertuples`. 2. You should **never modify** something you are iterating over. This is not guaranteed to work in all cases. Depending on the data types, the iterator returns a copy and not a view, and writing to it will have no effect. Examples -------- >>> df = pd.DataFrame([[1, 1.5]], columns=[\"int\", \"float\"]) >>> row = next(df.iterrows())[1] >>> row int 1.0 float 1.5 Name: 0, dtype: float64 >>> print(row[\"int\"].dtype) float64 >>> print(df[\"int\"].dtype) int64",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:iterrows arg:self arguments arg Assign Assign For Call Assign Call Call If Call"
  },
  {
    "library": "pytorch",
    "name": "autotune_hints_to_configs",
    "source_code": "def autotune_hints_to_configs(hints: OrderedSet[AutotuneHint], size_hints, block_size: int, device_props: DeviceProperties) -> list[Config]:\n    xyz_options: tuple[tuple[int, Optional[int], Optional[int]], ...]\n    configs: list[Config] = []\n    for hint in hints:\n        if hint == AutotuneHint.ONE_ELEMENT_PER_THREAD:\n            if len(size_hints) == 1:\n                xyz_options = ((block_size // 4, None, None),)\n            elif len(size_hints) == 2:\n                xyz_options = ((block_size // 4, 1, None), (1, block_size // 4, None))\n            elif len(size_hints) == 3:\n                xyz_options = ((block_size // 4, 1, 1), (1, block_size // 4, 1), (1, 1, block_size // 4))\n            configs.extend((triton_config(size_hints, *xyz, num_elements_per_warp=device_props.warp_size if device_props.warp_size else 32) for xyz in xyz_options))\n    return configs",
    "docstring": "AutotuneHints can be attached to the metadata of triton kernels for providing suggestions about what to try for autotuning. One reason to do this is if there are some configs that are only useful in specific scenarios, in which case we can avoid wasting compile time on autotuning unless we know we are in one of those scenarios. Based on those hints, this function will generate a list of additional autotuning configs to try.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\triton_heuristics.py",
    "ast_data": "FunctionDef name:autotune_hints_to_configs arg:hints arg:size_hints arg:block_size arg:device_props arguments arg arg arg arg For If Compare If Compare Call Assign If Compare Call Assign If Compare Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_compute_gradient_list",
    "source_code": "def _compute_gradient_list(f, xs, delta):\n    xs = [ops.convert_to_tensor(x) for x in xs]\n    xs_dtypes = [x.dtype for x in xs]\n    xs_shapes = [x.shape for x in xs]\n    f_temp = _prepare(f, xs_dtypes, xs_shapes)\n    y = f_temp(*xs)\n    return tuple(zip(*[_compute_gradient(f, y.shape, dtypes.as_dtype(y.dtype), xs, i, delta) for i in range(len(xs))]))",
    "docstring": "Compute gradients for a list of x values.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\gradient_checker_v2.py",
    "ast_data": "FunctionDef name:_compute_gradient_list arg:f arg:xs arg:delta arguments arg arg arg Assign Call Assign Assign Assign Call Assign Call Return return:yes Call Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "get",
    "source_code": "def get(self, key) -> bytes:\n    b64_key = self.prefix + self._encode(key)\n    kvs = self._try_wait_get([b64_key])\n    if kvs is None:\n        raise LookupError(f'Key {key} not found in EtcdStore')\n    return self._decode(kvs[b64_key])",
    "docstring": "Get a value by key, possibly doing a blocking wait. If key is not immediately present, will do a blocking wait for at most `` Raises: LookupError - If key still not published after timeout",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\etcd_store.py",
    "ast_data": "FunctionDef name:get arg:self arg:key arguments arg arg Assign Call Assign Call If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, name) -> None:\n    if not isinstance(name, str):\n        raise ValueError('name for name_scope must be a string.')\n    self._name = name\n    self._exit_fns = []",
    "docstring": "Initialize the context manager. Args: name: The prefix to use on all names created within the name scope. Raises: ValueError: If name is not a string.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:name arguments arg arg If Call Raise Call Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "_check_liveness",
    "source_code": "@staticmethod\ndef _check_liveness(indices: list[PathOutputIndex], output_refs: list[list[Optional[StorageWeakRefWrapper]]]) -> bool:\n    for depth, output_index in indices:\n        w = output_refs[depth][output_index]\n        assert w is not None\n        if w() is not None:\n            return False\n    return True",
    "docstring": "Check that all of the indices specified are dead references",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py",
    "ast_data": "FunctionDef name:_check_liveness arg:indices arg:output_refs arguments arg arg For Assign Compare If Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "emit_tid",
    "source_code": "def emit_tid(self, name, pid, tid):\n    event = {}\n    event['name'] = 'thread_name'\n    event['ph'] = 'M'\n    event['pid'] = pid\n    event['tid'] = tid\n    event['args'] = {'name': name}\n    self._metadata.append(event)",
    "docstring": "Adds a thread metadata event to the trace. Args: name: The thread name as a string. pid: Identifier of the process as an integer. tid: Identifier of the thread as an integer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py",
    "ast_data": "FunctionDef name:emit_tid arg:self arg:name arg:pid arg:tid arguments arg arg arg arg Assign Assign Assign Assign Assign Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "serialize_function",
    "source_code": "def serialize_function(function, concrete_functions):\n    proto = saved_object_graph_pb2.SavedFunction()\n    function_spec_proto = _serialize_function_spec(function.function_spec)\n    proto.function_spec.CopyFrom(function_spec_proto)\n    for concrete_function in concrete_functions:\n        proto.concrete_functions.append(concrete_function.name)\n    return proto",
    "docstring": "Build a SavedFunction proto.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\function_serialization.py",
    "ast_data": "FunctionDef name:serialize_function arg:function arg:concrete_functions arguments arg arg Assign Call Assign Call Call For Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_start",
    "source_code": "def _start(self):\n    if dill is None:\n        raise unittest.SkipTest('TODO(b/150264776): Resolve dependency issue in CI')\n    self._runner = MultiProcessRunner(fn=lambda: None, cluster_spec=self._cluster_spec, use_dill_for_args=False, share_gpu=self._share_gpu)\n    if self._initializer:\n        initializer = dill.dumps(self._initializer, dill.HIGHEST_PROTOCOL)\n    else:\n        initializer = None\n    for task_type, addresses in self._cluster_spec.items():\n        for task_id, _ in enumerate(addresses):\n            conn1, conn2 = multiprocessing.Pipe(duplex=True)\n            self._conn[task_type, task_id] = conn1\n            self._runner.start_single_process(task_type, task_id, fn=_pool_runner_worker, args=(task_type, task_id, initializer, conn2))",
    "docstring": "Starts the worker pool.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_process_runner.py",
    "ast_data": "FunctionDef name:_start arg:self arguments arg If Compare Raise Call Assign Call arguments If Assign Call Assign For Call For Call Assign Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "insert",
    "source_code": "def insert(self, index: int, module: Module) -> None:\n    for i in range(len(self._modules), index, -1):\n        self._modules[str(i)] = self._modules[str(i - 1)]\n    self._modules[str(index)] = module",
    "docstring": "Insert a given module before a given index in the list. Args: index (int): index to insert. module (nn.Module): module to insert",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\container.py",
    "ast_data": "FunctionDef name:insert arg:self arg:index arg:module arguments arg arg arg For Call Call Assign Call Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "_get_preferred_device",
    "source_code": "def _get_preferred_device(self) -> torch.device:\n    backend = dist.get_backend(self._process_group)\n    if backend == dist.Backend.NCCL:\n        return torch.device(torch.cuda.current_device())\n    elif backend == dist.Backend.GLOO:\n        return torch.device('cpu')\n    else:\n        backend_config = dist.BackendConfig(backend)\n        for device, backend_str in backend_config.get_device_backend_map().items():\n            if backend_str == backend and device != 'cpu':\n                return torch.device(device, _get_device_module(device).current_device())\n    return torch.device('cpu')",
    "docstring": "Return the preferred device to be used when creating tensors for collectives. This method takes into account the associated process group",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\api.py",
    "ast_data": "FunctionDef name:_get_preferred_device arg:self arguments arg Assign Call If Compare Return return:yes Call Call If Compare Return return:yes Call Assign Call For Call Call If BoolOp Compare Compare Return return:yes Call Call Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "get_documenter",
    "source_code": "def get_documenter(app: Sphinx, obj: Any, parent: Any) -> type[Documenter]:\n    return _get_documenter(obj, parent, registry=app.registry)",
    "docstring": "Get an autodoc.Documenter class suitable for documenting the given object. *obj* is the Python object to be documented, and *parent* is an another Python object (e.g. a module or a class) to which *obj* belongs to.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\ext\\autosummary\\__init__.py",
    "ast_data": "FunctionDef name:get_documenter arg:app arg:obj arg:parent arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "Hermite",
    "source_code": "class Hermite(ABCPolyBase):\n    _add = staticmethod(hermadd)\n    _sub = staticmethod(hermsub)\n    _mul = staticmethod(hermmul)\n    _div = staticmethod(hermdiv)\n    _pow = staticmethod(hermpow)\n    _val = staticmethod(hermval)\n    _int = staticmethod(hermint)\n    _der = staticmethod(hermder)\n    _fit = staticmethod(hermfit)\n    _line = staticmethod(hermline)\n    _roots = staticmethod(hermroots)\n    _fromroots = staticmethod(hermfromroots)\n    domain = np.array(hermdomain)\n    window = np.array(hermdomain)\n    basis_name = 'H'",
    "docstring": "An Hermite series class. The Hermite class provides the standard Python numerical methods '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the attributes and methods listed below. Parameters ---------- coef : array_like Hermite coefficients in order of increasing degree, i.e, `domain` for its use. The default value is [-1., 1.]. symbol : str, optional Symbol used to represent the independent variable in string representations of the polynomial expression, e.g. for printing. The symbol must be a valid Python identifier. Default value is 'x'. .. versionadded:: 1.24",
    "type": "class",
    "file_path": "numpy\\numpy\\polynomial\\hermite.py",
    "ast_data": "ClassDef name:Hermite Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign"
  },
  {
    "library": "pytorch",
    "name": "from_dynamic_shapes_to_dynamic_axes",
    "source_code": "def from_dynamic_shapes_to_dynamic_axes(dynamic_shapes: dict[str, Any] | tuple[Any, ...] | list[Any], input_names: Sequence[str], exception: Exception) -> dict[str, Any] | None:\n    flat_dynamic_shapes, _ = _flatten_dynamic_shapes_to_axes(dynamic_shapes)\n    if len(input_names) < len(flat_dynamic_shapes):\n        raise ValueError(f'To construct dynamic_axes from dynamic_shapes, number of input names ({len(input_names)}) should be greater than or equal to the number of graph inputs(flat) ({len(flat_dynamic_shapes)})') from exception\n    dynamic_axes: dict[str, list[int]] = {}\n    for input_name, axes in zip(input_names, flat_dynamic_shapes):\n        if axes is None:\n            continue\n        converted_axes: list[int] = []\n        if isinstance(axes, dict):\n            for axis, dim in axes.items():\n                if dim is None:\n                    continue\n                converted_axes.append(axis)\n            dynamic_axes[input_name] = converted_axes\n        elif isinstance(axes, (list, tuple)):\n            for idx, dim in enumerate(axes):\n                if dim is None:\n                    continue\n                converted_axes.append(idx)\n            dynamic_axes[input_name] = converted_axes\n    return dynamic_axes",
    "docstring": "Converts dynamic_shapes into dynamic_axes by removing torch.export.Dim wrapping and converting to list or dict form based on whether dimension names are present. dynamic_shapes examples: (1) dynamic_shapes = {\"x\": {0: Dim(\"my_custom_axis_name_1\")}, \"y\": {1: Dim(\"my_custom_axis_name_2\")}} (2) dynamic_shapes = ({0: Dim(\"my_custom_axis_name_1\"}, {1: Dim(\"my_custom_axis_name_2\")}) these will be converted to dynamic_axes respectively: (1) dynamic_axes = {\"x\": [0], \"y\": [1]} (2) dynamic_axes = {\"x\": [0], \"y\": [1]} NOTE: If the model input is nested, so is the dynamic_shapes, we need to flatten the dynamic_shapes, and then assign the axes to the input names in the order they are provided. NOTE: input_names are used to assign the axes to the correct input names. If the input names are not provided, or less than the dynamic inputs/axes, it raises an error.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_dynamic_shapes.py",
    "ast_data": "FunctionDef name:from_dynamic_shapes_to_dynamic_axes arg:dynamic_shapes arg:input_names arg:exception arguments arg arg arg Assign Call If Compare Call Call Raise Call Call Call For Call If Compare If Call For Call If Compare Call Assign If Call For Call If Compare Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_expand_sample_shape_to_vector",
    "source_code": "def _expand_sample_shape_to_vector(self, x, name):\n    x_static_val = tensor_util.constant_value(x)\n    if x_static_val is None:\n        prod = math_ops.reduce_prod(x)\n    else:\n        prod = np.prod(x_static_val, dtype=x.dtype.as_numpy_dtype())\n    ndims = x.get_shape().ndims\n    if ndims is None:\n        ndims = array_ops.rank(x)\n        expanded_shape = util.pick_vector(math_ops.equal(ndims, 0), np.array([1], dtype=np.int32), array_ops.shape(x))\n        x = array_ops.reshape(x, expanded_shape)\n    elif ndims == 0:\n        if x_static_val is not None:\n            x = ops.convert_to_tensor(np.array([x_static_val], dtype=x.dtype.as_numpy_dtype()), name=name)\n        else:\n            x = array_ops.reshape(x, [1])\n    elif ndims != 1:\n        raise ValueError('Input is neither scalar nor vector.')\n    return (x, prod)",
    "docstring": "Helper to which ensures input is 1D.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:_expand_sample_shape_to_vector arg:self arg:x arg:name arguments arg arg arg Assign Call If Compare Assign Call Assign Call Call Assign Call If Compare Assign Call Assign Call Call Call Call Assign Call If Compare If Compare Assign Call Call Call Assign Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_lookup_dependency",
    "source_code": "def _lookup_dependency(self, name, cached_dependencies=None):\n    unconditional = super()._lookup_dependency(name, cached_dependencies)\n    if unconditional is not None:\n        return unconditional\n    graph = None if context.executing_eagerly() else ops.get_default_graph()\n    return self._get_non_slot_variable(name, graph=graph)",
    "docstring": "From Trackable. Find a non-slot variable in the current graph.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\optimizer.py",
    "ast_data": "FunctionDef name:_lookup_dependency arg:self arg:name arg:cached_dependencies arguments arg arg arg Assign Call Call If Compare Return return:yes Assign Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_table_list",
    "source_code": "def get_table_list(self, cursor):\n    cursor.execute('\\n            SELECT\\n                table_name,\\n                table_type,\\n                table_comment\\n            FROM information_schema.tables\\n            WHERE table_schema = DATABASE()\\n            ')\n    return [TableInfo(row[0], {'BASE TABLE': 't', 'VIEW': 'v'}.get(row[1]), row[2]) for row in cursor.fetchall()]",
    "docstring": "Return a list of table and view names in the current database.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\mysql\\introspection.py",
    "ast_data": "FunctionDef name:get_table_list arg:self arg:cursor arguments arg arg Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_set_checkpoint_initializer",
    "source_code": "def _set_checkpoint_initializer(variable, ckpt_file, tensor_name, slice_spec, name='checkpoint_initializer'):\n    base_type = variable.dtype.base_dtype\n    with ops.device(variable.device), ops.device('/cpu:0'):\n        restore_op = io_ops.restore_v2(ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]\n        names_to_saveables = saveable_object_util.op_list_to_dict([variable])\n        saveable_objects = []\n        for name, op in names_to_saveables.items():\n            for s in saveable_object_util.saveable_objects_for_op(op, name):\n                saveable_objects.append(s)\n        assert len(saveable_objects) == 1\n    init_op = saveable_objects[0].restore([restore_op], restored_shapes=None)\n    variable._initializer_op = init_op\n    restore_op.set_shape(variable.shape)\n    variable._initial_value = restore_op",
    "docstring": "Overrides given variable's initialization op. Sets variable initializer to assign op that initializes variable from tensor's value in the checkpoint. Args: variable: object. ckpt_file: string, full path of the checkpoint. tensor_name: Name of the tensor to load from the checkpoint. slice_spec: Slice specification for loading partitioned tensors. name: Name of the operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\checkpoint_utils.py",
    "ast_data": "FunctionDef name:_set_checkpoint_initializer arg:variable arg:ckpt_file arg:tensor_name arg:slice_spec arg:name arguments arg arg arg arg arg Assign With Call Call Assign Call Assign Call Assign For Call For Call Call Compare Call Assign Call Assign Call Assign"
  },
  {
    "library": "scipy",
    "name": "_complex_via_real_components",
    "source_code": "def _complex_via_real_components(func, input, weights, output, cval, **kwargs):\n    complex_input = input.dtype.kind == 'c'\n    complex_weights = weights.dtype.kind == 'c'\n    if complex_input and complex_weights:\n        func(input.real, weights.real, output=output.real, cval=np.real(cval), **kwargs)\n        output.real -= func(input.imag, weights.imag, output=None, cval=np.imag(cval), **kwargs)\n        func(input.real, weights.imag, output=output.imag, cval=np.real(cval), **kwargs)\n        output.imag += func(input.imag, weights.real, output=None, cval=np.imag(cval), **kwargs)\n    elif complex_input:\n        func(input.real, weights, output=output.real, cval=np.real(cval), **kwargs)\n        func(input.imag, weights, output=output.imag, cval=np.imag(cval), **kwargs)\n    else:\n        if np.iscomplexobj(cval):\n            raise ValueError('Cannot provide a complex-valued cval when the input is real.')\n        func(input, weights.real, output=output.real, cval=cval, **kwargs)\n        func(input, weights.imag, output=output.imag, cval=cval, **kwargs)\n    return output",
    "docstring": "Complex convolution via a linear combination of real convolutions.",
    "type": "function",
    "file_path": "scipy\\scipy\\ndimage\\_filters.py",
    "ast_data": "FunctionDef name:_complex_via_real_components arg:func arg:input arg:weights arg:output arg:cval arguments arg arg arg arg arg arg Assign Compare Assign Compare If BoolOp Call Call Call Call Call Call Call Call If Call Call Call Call If Call Raise Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_run_single_worker",
    "source_code": "def _run_single_worker(worker_fn, strategy, cluster_spec, task_type, task_id, session_config, rpc_layer='', worker_barrier=None, coord=None):\n    session_config = copy.deepcopy(session_config)\n    strategy = copy.deepcopy(strategy)\n    if task_type == _TaskType.EVALUATOR:\n        if strategy:\n            strategy.configure(session_config)\n    else:\n        assert strategy\n        strategy.configure(session_config, cluster_spec, task_type, task_id)\n    context = _WorkerContext(strategy, cluster_spec, task_type, task_id, session_config=session_config, rpc_layer=rpc_layer, worker_barrier=worker_barrier)\n    with context:\n        if coord:\n            with coord.stop_on_exception():\n                return worker_fn(strategy)\n        else:\n            return worker_fn(strategy)",
    "docstring": "Runs a single worker by calling under context.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_coordinator.py",
    "ast_data": "FunctionDef name:_run_single_worker arg:worker_fn arg:strategy arg:cluster_spec arg:task_type arg:task_id arg:session_config arg:rpc_layer arg:worker_barrier arg:coord arguments arg arg arg arg arg arg arg arg arg Assign Call Assign Call If Compare If Call Call Assign Call With If With Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_output_details",
    "source_code": "def get_output_details(self):\n    result = {}\n    for output_name, tensor_index in self._outputs:\n        result[output_name] = self._interpreter._get_tensor_details(tensor_index, self._subgraph_index)\n    return result",
    "docstring": "Gets output tensor details. Returns: A dictionary from input name to tensor details where each item is a dictionary with details about an output tensor. The dictionary contains the same fields as described for .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\interpreter.py",
    "ast_data": "FunctionDef name:get_output_details arg:self arguments arg Assign For Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "loss_scale",
    "source_code": "@property\ndef loss_scale(self):\n    if isinstance(self._loss_scale, _DynamicLossScaleState):\n        return tensor_conversion.convert_to_tensor_v2_with_dispatch(self._loss_scale.current_loss_scale)\n    else:\n        return tensor_conversion.convert_to_tensor_v2_with_dispatch(self._loss_scale)",
    "docstring": "The current loss scale as a float32 scalar tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\loss_scale_optimizer.py",
    "ast_data": "FunctionDef name:loss_scale arg:self arguments arg If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "wait_stream",
    "source_code": "def wait_stream(self, stream) -> None:\n    self.wait_event(stream.record_event())",
    "docstring": "Synchronize with another stream. All future work submitted to this stream will wait until all kernels submitted to a given stream at the time of call complete. Args: stream (Stream): a stream to synchronize.",
    "type": "method",
    "file_path": "pytorch\\torch\\xpu\\streams.py",
    "ast_data": "FunctionDef name:wait_stream arg:self arg:stream arguments arg arg Call Call"
  },
  {
    "library": "django",
    "name": "__str__",
    "source_code": "def __str__(self):\n    return self.ewkt",
    "docstring": "EWKT is used for the string representation.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:__str__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "desc_content",
    "source_code": "class desc_content(nodes.General, nodes.Element):\n    pass",
    "docstring": "Node for object description content. Must be the last child node in a :py:class: node.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\addnodes.py",
    "ast_data": "ClassDef name:desc_content"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n    X = validate_data(self, X, accept_sparse='csr')\n    rnd = check_random_state(self.random_state)\n    n_samples = X.shape[0]\n    if self.n_components > n_samples:\n        n_components = n_samples\n        warnings.warn('n_components > n_samples. This is not possible.\\nn_components was set to n_samples, which results in inefficient evaluation of the full kernel.')\n    else:\n        n_components = self.n_components\n    n_components = min(n_samples, n_components)\n    inds = rnd.permutation(n_samples)\n    basis_inds = inds[:n_components]\n    basis = X[basis_inds]\n    basis_kernel = pairwise_kernels(basis, metric=self.kernel, filter_params=True, n_jobs=self.n_jobs, **self._get_kernel_params())\n    U, S, V = svd(basis_kernel)\n    S = np.maximum(S, 1e-12)\n    self.normalization_ = np.dot(U / np.sqrt(S), V)\n    self.components_ = basis\n    self.component_indices_ = basis_inds\n    self._n_features_out = n_components\n    return self",
    "docstring": "Fit estimator to data. Samples a subset of training points, computes kernel on these and computes normalization matrix. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. y : array-like, shape (n_samples,) or (n_samples, n_outputs), default=None Target values (None for unsupervised transformations). Returns ------- self : object Returns the instance itself.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\kernel_approximation.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Assign Call Assign Call Assign If Compare Assign Call Assign Assign Call Assign Call Assign Assign Assign Call Call Assign Call Assign Call Assign Call Call Assign Assign Assign Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "AnchorCheckParser",
    "source_code": "class AnchorCheckParser(HTMLParser):\n\n    def __init__(self, search_anchor: str) -> None:\n        super().__init__()\n        self.search_anchor = search_anchor\n        self.found = False\n\n    def handle_starttag(self, tag: Any, attrs: Any) -> None:\n        for key, value in attrs:\n            if key in {'id', 'name'} and value == self.search_anchor:\n                self.found = True\n                break",
    "docstring": "Specialised HTML parser that looks for a specific anchor.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\builders\\linkcheck.py",
    "ast_data": "ClassDef name:AnchorCheckParser FunctionDef name:__init__ arg:self arg:search_anchor arguments arg arg Call Call Assign Assign FunctionDef name:handle_starttag arg:self arg:tag arg:attrs arguments arg arg arg For If BoolOp Compare Compare Assign"
  },
  {
    "library": "matplotlib",
    "name": "edges",
    "source_code": "@property\ndef edges(self):\n    if self._edges is None:\n        self._edges = self.get_cpp_triangulation().get_edges()\n    return self._edges",
    "docstring": "Return integer array of shape (nedges, 2) containing all edges of non-masked triangles. Each row defines an edge by its start point index and end point index. Each edge appears only once, i.e. for an edge between points *i* and *j*, there will only be either *(i, j)* or *(j, i)*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triangulation.py",
    "ast_data": "FunctionDef name:edges arg:self arguments arg If Compare Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__call__",
    "source_code": "def __call__(self, axis_artist, transform):\n    return self.new_line(axis_artist, transform)",
    "docstring": "Given the AxisArtist instance, and transform for the path (set_path method), return the Matplotlib artist for drawing the axis line.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axisline_style.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:axis_artist arg:transform arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_params",
    "source_code": "def get_params(self, deep=True):\n    return dict(kernels=self.kernels)",
    "docstring": "Get parameters of this kernel. Parameters ---------- deep : bool, default=True If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns ------- params : dict Parameter names mapped to their values.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:get_params arg:self arg:deep arguments arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "ExternalReferenceForbidden",
    "source_code": "class ExternalReferenceForbidden(DefusedXmlException):\n\n    def __init__(self, context, base, sysid, pubid):\n        super().__init__()\n        self.context = context\n        self.base = base\n        self.sysid = sysid\n        self.pubid = pubid\n\n    def __str__(self):\n        tpl = \"ExternalReferenceForbidden(system_id='{}', public_id={})\"\n        return tpl.format(self.sysid, self.pubid)",
    "docstring": "Resolving an external reference is forbidden.",
    "type": "class",
    "file_path": "django\\django\\core\\serializers\\xml_serializer.py",
    "ast_data": "ClassDef name:ExternalReferenceForbidden FunctionDef name:__init__ arg:self arg:context arg:base arg:sysid arg:pubid arguments arg arg arg arg arg Call Call Assign Assign Assign Assign FunctionDef name:__str__ arg:self arguments arg Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "use_sticky_edges",
    "source_code": "@property\ndef use_sticky_edges(self):\n    return self._use_sticky_edges",
    "docstring": "When autoscaling, whether to obey all . Default is `autoscaleautoscale_view` is called.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:use_sticky_edges arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "concat_vertical",
    "source_code": "@classmethod\ndef concat_vertical(cls, mgrs: list[Self], axes: list[Index]) -> Self:\n    raise NotImplementedError('This logic lives (for now) in internals.concat')",
    "docstring": "Concatenate uniformly-indexed BlockManagers vertically.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:concat_vertical arg:cls arg:mgrs arg:axes arguments arg arg arg Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "waitforbuttonpress",
    "source_code": "def waitforbuttonpress(self, timeout=-1):\n    event = None\n\n    def handler(ev):\n        nonlocal event\n        event = ev\n        self.canvas.stop_event_loop()\n    _blocking_input.blocking_input_loop(self, ['button_press_event', 'key_press_event'], timeout, handler)\n    return None if event is None else event.name == 'key_press_event'",
    "docstring": "Blocking call to interact with the figure. Wait for user input and return True if a key was pressed, False if a mouse button was pressed and None if no input was given within *timeout* seconds. Negative values deactivate *timeout*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:waitforbuttonpress arg:self arg:timeout arguments arg arg Assign FunctionDef name:handler arg:ev arguments arg Assign Call Call Return return:yes Compare Compare"
  },
  {
    "library": "pytorch",
    "name": "wrap_tensor",
    "source_code": "def wrap_tensor(self, tx: 'InstructionTranslator', tensor_value):\n    from ..decorators import mark_static_address\n    if tensor_value in self.tensor_to_source:\n        mark_static_address(tensor_value)\n        source = self.tensor_to_source[tensor_value]\n        self.static_tensor_names.add(tx.output.module_key_name(source.name()))\n    elif tensor_value in self.grad_to_source:\n        source = self.grad_to_source[tensor_value]\n    else:\n        mark_static_address(tensor_value)\n        global_name = tx.store_global_weakref_by_id(GLOBAL_KEY_PREFIX, tensor_value)\n        source = GlobalWeakRefSource(global_name)\n        self.static_tensor_names.add(tx.output.module_key_name(source.name()))\n    return VariableTracker.build(tx, tensor_value, source)",
    "docstring": "Wrap state tensor in a TensorVariable",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\optimizer.py",
    "ast_data": "FunctionDef name:wrap_tensor arg:self arg:tx arg:tensor_value arguments arg arg arg If Compare Call Assign Call Call Call If Compare Assign Call Assign Call Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "clean_backtick_quoted_toks",
    "source_code": "def clean_backtick_quoted_toks(tok: tuple[int, str]) -> tuple[int, str]:\n    toknum, tokval = tok\n    if toknum == BACKTICK_QUOTED_STRING:\n        return (tokenize.NAME, create_valid_python_identifier(tokval))\n    return (toknum, tokval)",
    "docstring": "Clean up a column name if surrounded by backticks. Backtick quoted string are indicated by a certain tokval value. If a string is a backtick quoted token it will processed by :func: so that the parser can find this string when the query is executed. In this case the tok will get the NAME tokval. Parameters ---------- tok : tuple of int, str ints correspond to the all caps constants in the tokenize module Returns ------- tok : Tuple[int, str] Either the input or token or the replacement values",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\computation\\parsing.py",
    "ast_data": "FunctionDef name:clean_backtick_quoted_toks arg:tok arguments arg Assign If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "UnsupportedFunctionCall",
    "source_code": "class UnsupportedFunctionCall(ValueError):\n    pass",
    "docstring": "Exception raised when attempting to call a unsupported numpy function. For example, ``. See Also -------- DataFrame.groupby : Group DataFrame using a mapper or by a Series of columns. Series.groupby : Group Series using a mapper or by a Series of columns. core.groupby.GroupBy.cumsum : Compute cumulative sum for each group. Examples -------- >>> df = pd.DataFrame( ... {\"A\": [0, 0, 1, 1], \"B\": [\"x\", \"x\", \"z\", \"y\"], \"C\": [1, 2, 3, 4]} ... ) >>> np.cumsum(df.groupby([\"A\"])) Traceback (most recent call last): UnsupportedFunctionCall: numpy operations are not valid with groupby. Use .groupby(...).cumsum() instead",
    "type": "class",
    "file_path": "pandas\\pandas\\errors\\__init__.py",
    "ast_data": "ClassDef name:UnsupportedFunctionCall"
  },
  {
    "library": "pytorch",
    "name": "onnx_compatible",
    "source_code": "def onnx_compatible(self) -> bool:\n    return self in _SCALAR_TYPE_TO_ONNX and self != JitScalarType.UNDEFINED and (self != JitScalarType.COMPLEX32)",
    "docstring": "Return whether this JitScalarType is compatible with ONNX.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_type_utils.py",
    "ast_data": "FunctionDef name:onnx_compatible arg:self arguments arg Return return:yes BoolOp Compare Compare Compare"
  },
  {
    "library": "pytorch",
    "name": "math_sdp_enabled",
    "source_code": "def math_sdp_enabled():\n    return torch._C._get_math_sdp_enabled()",
    "docstring": ".. warning:: This flag is beta and subject to change. Returns whether math scaled dot product attention is enabled or not.",
    "type": "function",
    "file_path": "pytorch\\torch\\backends\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:math_sdp_enabled arguments Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "_handle_wrapping",
    "source_code": "def _handle_wrapping(self, facet_spec: FacetSpec, pair_spec: PairSpec) -> None:\n    self.wrap = wrap = facet_spec.get('wrap') or pair_spec.get('wrap')\n    if not wrap:\n        return\n    wrap_dim = 'row' if self.subplot_spec['nrows'] > 1 else 'col'\n    flow_dim = {'row': 'col', 'col': 'row'}[wrap_dim]\n    n_subplots = self.subplot_spec[f'n{wrap_dim}s']\n    flow = int(np.ceil(n_subplots / wrap))\n    if wrap < self.subplot_spec[f'n{wrap_dim}s']:\n        self.subplot_spec[f'n{wrap_dim}s'] = wrap\n    self.subplot_spec[f'n{flow_dim}s'] = flow\n    self.n_subplots = n_subplots\n    self.wrap_dim = wrap_dim",
    "docstring": "Update figure structure parameters based on facet/pair wrapping.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\subplots.py",
    "ast_data": "FunctionDef name:_handle_wrapping arg:self arg:facet_spec arg:pair_spec arguments arg arg arg Assign BoolOp Call Call If Return return:no Assign Compare Assign Assign Assign Call Call If Compare Assign Assign Assign Assign"
  },
  {
    "library": "numpy",
    "name": "savez",
    "source_code": "@array_function_dispatch(_savez_dispatcher)\ndef savez(file, *args, allow_pickle=True, **kwds):\n    _savez(file, args, kwds, False, allow_pickle=allow_pickle)",
    "docstring": "Save several arrays into a single file in uncompressed `arr_0arr_1kwdsnumpy.lib.formatload~lib.npyio.NpzFilekwdssavezsavez` with \\**kwds, the arrays are saved with the keyword names. >>> outfile = TemporaryFile() >>> np.savez(outfile, x=x, y=y) >>> _ = outfile.seek(0) >>> npzfile = np.load(outfile) >>> sorted(npzfile.files) ['x', 'y'] >>> npzfile['x'] array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_npyio_impl.py",
    "ast_data": "FunctionDef name:savez arg:file arguments arg arg arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "device",
    "source_code": "@tf_contextlib.contextmanager\ndef device(self, device_name_or_function) -> Iterator[None]:\n    self._add_device_to_stack(device_name_or_function, offset=2)\n    old_top_of_stack = self._device_function_stack.peek_top_obj()\n    try:\n        yield\n    finally:\n        new_top_of_stack = self._device_function_stack.peek_top_obj()\n        if old_top_of_stack is not new_top_of_stack:\n            raise RuntimeError('Exiting device scope without proper scope nesting.')\n        self._device_function_stack.pop_obj()",
    "docstring": "Returns a context manager that specifies the default device to use. The argument may either be a device name string, a device function, or None: * If it is a device name string, all operations constructed in this context will be assigned to the device with that name, unless overridden by a nested context. * If it is a function, it will be treated as a function from Operation objects to device name strings, and invoked each time a new Operation is created. The Operation will be assigned to the device with the returned name. * If it is None, all invocations from the enclosing context will be ignored. For information about the valid syntax of device name strings, see the documentation in []( For example: **N.B.** The device scope may be overridden by op wrappers or other library code. For example, a variable assignment op must be colocated with the , and incompatible device scopes will be ignored. Args: device_name_or_function: The device name or function to use in the context. Yields: A context manager that specifies the default device to use for newly created ops. Raises: RuntimeError: If device scopes are not properly nested.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:device arg:self arg:device_name_or_function arguments arg arg Call Assign Call Try Assign Call If Compare Raise Call Call"
  },
  {
    "library": "scipy",
    "name": "__call__",
    "source_code": "def __call__(self, x):\n    return _Interpolator1D.__call__(self, x)",
    "docstring": "Evaluate the interpolating polynomial at the points x Parameters ---------- x : array_like Point or points at which to evaluate the interpolant. Returns ------- y : array_like Interpolated values. Shape is determined by replacing the interpolation axis in the original array with the shape of . Notes ----- Currently the code computes an outer product between and the weights, that is, it constructs an intermediate array of size ``, where N is the degree of the polynomial.",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_polyint.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:x arguments arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "read",
    "source_code": "def read(self, wkt):\n    return GEOSGeometry(super().read(wkt))",
    "docstring": "Return a GEOSGeometry for the given WKT string.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\io.py",
    "ast_data": "FunctionDef name:read arg:self arg:wkt arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "inverse_transform",
    "source_code": "def inverse_transform(self, X):\n    check_is_fitted(self)\n    X = check_array(X, dtype=[np.float64, np.float32], accept_sparse=('csr', 'csc'))\n    if self.compute_inverse_components:\n        return X @ self.inverse_components_.T\n    inverse_components = self._compute_inverse_components()\n    return X @ inverse_components.T",
    "docstring": "Project data back to its original space. Returns an array X_original whose transform would be X. Note that even if X is sparse, X_original is dense: this may use a lot of RAM. If is False, the inverse of the components is computed during each call to which can be costly. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_components) Data to be transformed back. Returns ------- X_original : ndarray of shape (n_samples, n_features) Reconstructed data.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\random_projection.py",
    "ast_data": "FunctionDef name:inverse_transform arg:self arg:X arguments arg arg Call Assign Call If Return return:yes Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_collapsed_shape",
    "source_code": "def _collapsed_shape(shape: ShapeType, start: int, end: int) -> tuple[int, ...]:\n    shape = (1,) if len(shape) == 0 else tuple(shape)\n    dim_length = 1\n    for s in shape[start:end + 1]:\n        dim_length = dim_length * s\n    return shape[0:start] + (dim_length,) + shape[end + 1:]",
    "docstring": "Returns the shape of a with dims in [start, end) merged into a single dimension.",
    "type": "function",
    "file_path": "pytorch\\torch\\_prims\\__init__.py",
    "ast_data": "FunctionDef name:_collapsed_shape arg:shape arg:start arg:end arguments arg arg arg Assign Compare Call Call Assign For Assign Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "desc_sig_space",
    "source_code": "class desc_sig_space(desc_sig_element, _sig_element=True):\n    classes = ['w']\n\n    def __init__(self, rawsource: str='', text: str=' ', *children: Element, **attributes: Any) -> None:\n        super().__init__(rawsource, text, *children, **attributes)",
    "docstring": "Node for a space in a signature.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\addnodes.py",
    "ast_data": "ClassDef name:desc_sig_space Assign FunctionDef name:__init__ arg:self arg:rawsource arg:text arguments arg arg arg arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "global_step",
    "source_code": "@property\ndef global_step(self):\n    return self._global_step",
    "docstring": "Return the global_step Tensor used by the supervisor. Returns: An integer Tensor for the global_step.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py",
    "ast_data": "FunctionDef name:global_step arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_group_tensors_by_device_and_dtype",
    "source_code": "@staticmethod\ndef _group_tensors_by_device_and_dtype(tensorlistlist: TensorListList, with_indices: bool=False) -> Union[dict[tuple[None, None], tuple[TensorListList, Indices]], dict[tuple[torch.device, torch.dtype], tuple[TensorListList, Indices]]]:\n    if torch.compiler.is_compiling():\n        return {(None, None): (tensorlistlist, list(range(len(tensorlistlist[0]))))}\n    else:\n        return _group_tensors_by_device_and_dtype(tensorlistlist, with_indices)",
    "docstring": "Group a list of lists of tensors by device and dtype. Skips this step if we are compiling since this will occur during inductor lowering.",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\optimizer.py",
    "ast_data": "FunctionDef name:_group_tensors_by_device_and_dtype arg:tensorlistlist arg:with_indices arguments arg arg If Call Return return:yes Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_add_op_to_registry",
    "source_code": "def _add_op_to_registry(registry, op, fn):\n    overloads: list[Union[torch._ops.OperatorBase]] = []\n    if isinstance(op, HigherOrderOperator):\n        registry[op] = fn\n        return\n    elif isinstance(op, OpOverload):\n        overloads.append(op)\n    else:\n        assert isinstance(op, OpOverloadPacket)\n        for ol in op.overloads():\n            overloads.append(getattr(op, ol))\n    for op_overload in overloads:\n        if op_overload in registry:\n            raise RuntimeError(f'duplicate registrations for {op_overload}')\n        if torch._C._dispatch_has_kernel(op_overload.name()):\n            registry[op_overload] = fn",
    "docstring": "This is an internal API for adding an op to the decomposition table. If op is OpOverload, it will be added to the registry directly. If op is OpOverloadPacket, all the valid op_overloads in the packet will be added to the registry.",
    "type": "function",
    "file_path": "pytorch\\torch\\_decomp\\__init__.py",
    "ast_data": "FunctionDef name:_add_op_to_registry arg:registry arg:op arg:fn arguments arg arg arg If Call Assign Return return:no If Call Call Call For Call Call Call For If Compare Raise Call If Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_in_multi_worker_mode",
    "source_code": "def _in_multi_worker_mode(self):\n    raise NotImplementedError('must be implemented in descendants')",
    "docstring": "Whether this strategy indicates working in multi-worker settings. Multi-worker training refers to the setup where the training is distributed across multiple workers, as opposed to the case where only a local process performs the training. This function is used by higher-level APIs such as Keras' to infer for example whether or not a distribute coordinator should be run, and thus TensorFlow servers should be started for communication with other servers in the cluster, or whether or not saving/restoring checkpoints is relevant for preemption fault tolerance. Subclasses should override this to provide whether the strategy is currently in multi-worker setup. Experimental. Signature and implementation are subject to change.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:_in_multi_worker_mode arg:self arguments arg Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "check_estimators_unfitted",
    "source_code": "@ignore_warnings\ndef check_estimators_unfitted(name, estimator_orig):\n    err_msg = 'Estimator should raise a NotFittedError when calling `{method}` before fit. Either call `check_is_fitted(self)` at the beginning of `{method}` or set `tags.requires_fit=False` on estimator tags to disable this check.\\n- `check_is_fitted`: https://scikit-learn.org/dev/modules/generated/sklearn.utils.validation.check_is_fitted.html\\n- Estimator Tags: https://scikit-learn.org/dev/developers/develop.html#estimator-tags'\n    X, y = _regression_dataset()\n    estimator = clone(estimator_orig)\n    for method in ('decision_function', 'predict', 'predict_proba', 'predict_log_proba'):\n        if hasattr(estimator, method):\n            with raises(NotFittedError, err_msg=err_msg.format(method=method)):\n                getattr(estimator, method)(X)",
    "docstring": "Check that predict raises an exception in an unfitted estimator. Unfitted estimators should raise a NotFittedError.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\estimator_checks.py",
    "ast_data": "FunctionDef name:check_estimators_unfitted arg:name arg:estimator_orig arguments arg arg Assign Assign Call Assign Call For If Call With Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "cpu",
    "source_code": "def cpu(self, memory_format=torch.preserve_format, process_group=None) -> ShardedTensor:\n    if memory_format != torch.preserve_format and memory_format != torch.contiguous_format:\n        raise RuntimeError('Only `torch.contiguous_format` or `torch.preserve_format` is supported!')\n    all_on_cpu = True\n    for meta in self.metadata().shards_metadata:\n        all_on_cpu &= meta.placement.device().type == 'cpu'\n    if all_on_cpu:\n        return self\n    list_shards: list[Shard] = []\n    for shard in self._local_shards:\n        cpu_tensor = shard.tensor.cpu(memory_format=memory_format)\n        metadata = copy.deepcopy(shard.metadata)\n        metadata.placement._device = torch.device('cpu')\n        list_shards.append(Shard(cpu_tensor, metadata))\n    st_meta = copy.deepcopy(self.metadata())\n    for meta in st_meta.shards_metadata:\n        if meta.placement.device().type != 'cpu':\n            meta.placement._device = torch.device('cpu')\n    pg = self._process_group if process_group is None else process_group\n    st_cpu = ShardedTensor._init_from_local_shards_and_global_metadata(list_shards, sharded_tensor_metadata=st_meta, process_group=pg, init_rrefs=self._init_rrefs)\n    return st_cpu",
    "docstring": "Returns a copy of this object in CPU memory. If this ShardedTensor is already on CPU memory, then no copy is performed and original object is returned. .. note:: When moving a ShardedTensor from GPU to CPU, the ShardedTensor might need to be managed by a different type of ProcessGroup(i.e. ProcessGroupGloo), it is the user's responsiblity to explicitly pass in a new process_group that is compatible with CPU.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\api.py",
    "ast_data": "FunctionDef name:cpu arg:self arg:memory_format arg:process_group arguments arg arg arg If BoolOp Compare Compare Raise Call Assign For Call Compare Call If Return return:yes For Assign Call Assign Call Assign Call Call Call Assign Call Call For If Compare Call Assign Call Assign Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_create_grad_func",
    "source_code": "def _create_grad_func(ys, xs, grads, cond_graph, body_graph, name, while_op, maximum_iterations):\n    assert len(ys) == len(grads)\n    total_iters = while_op.outputs[0]\n    counter = constant_op.constant(0, dtype=total_iters.dtype, name='grad_counter')\n    body_graph_inputs = object_identity.ObjectIdentitySet(body_graph.inputs)\n    body_graph_outputs = object_identity.ObjectIdentitySet(body_graph.outputs)\n    args = [counter, maximum_iterations, total_iters] + list(grads)\n    grad_func_graph = func_graph_module.func_graph_from_py_func(name, lambda *args: _grad_fn(ys, xs, args, body_graph), args, {}, func_graph=_WhileBodyGradFuncGraph(name, cond_graph, body_graph, maximum_iterations, while_op, body_graph_inputs, body_graph_outputs))\n    for external_capture, internal_capture in grad_func_graph.captures:\n        if ops.tensor_id(internal_capture) in grad_func_graph.internal_capture_to_output:\n            new_output = grad_func_graph.internal_capture_to_output[ops.tensor_id(internal_capture)]\n        else:\n            raise ValueError(f'Tensor {str(internal_capture)} which captures {str(external_capture)} is in list of internal_captures but not in internal_capture_to_output.')\n        grad_func_graph.outputs.append(new_output)\n        grad_func_graph.structured_outputs.append(new_output)\n    return (grad_func_graph, args)",
    "docstring": "Builds and returns the gradient FuncGraph of and its args. The returned grad_func_graph must be called with the returned args + grad_func_graph.captures. Args: ys: A or list of tensors to be differentiated. xs: A or list of tensors to be used for differentiation. grads: The incoming grads for . cond_graph: FuncGraph for the forward cond function. body_graph: FuncGraph for the forward body function. name: Name of the returned gradient function. while_op: The forward While op. maximum_iterations: Tensor. The maximum number of iterations. Returns: 2-tuple of (grad_func_graph, args).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\while_v2.py",
    "ast_data": "FunctionDef name:_create_grad_func arg:ys arg:xs arg:grads arg:cond_graph arg:body_graph arg:name arg:while_op arg:maximum_iterations arguments arg arg arg arg arg arg arg arg Compare Call Call Assign Assign Call Assign Call Assign Call Assign Call Assign Call arguments arg Call Call For If Compare Call Assign Call Raise Call Call Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "f",
    "source_code": "def f(self):\n    hour = self.data.hour % 12 or 12\n    minute = self.data.minute\n    return '%d:%02d' % (hour, minute) if minute else hour",
    "docstring": "Time, in 12-hour hours and minutes, with minutes left off if they're zero. Examples: '1', '1:30', '2:05', '2' Proprietary extension.",
    "type": "method",
    "file_path": "django\\django\\utils\\dateformat.py",
    "ast_data": "FunctionDef name:f arg:self arguments arg Assign BoolOp Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "Leon",
    "source_code": "class Leon(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-1.2] * self.N, [1.2] * self.N))\n        self.global_optimum = [[1 for _ in range(self.N)]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return 100.0 * (x[1] - x[0] ** 2.0) ** 2.0 + (1 - x[0]) ** 2.0",
    "docstring": "Leon objective function. This class defines the Leon [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Leon}}(\\mathbf{x}) = \\left(1 - x_{1}\\right)^{2} + 100 \\left(x_{2} - x_{1}^{2} \\right)^{2} with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_L.py",
    "ast_data": "ClassDef name:Leon FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_process_id",
    "source_code": "def get_process_id(self, task_type, task_id):\n    with self._process_lock:\n        p = self._processes.get((task_type, task_id), None)\n    return p.pid if p else None",
    "docstring": "Returns the subprocess id given the task type and task id.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_process_runner.py",
    "ast_data": "FunctionDef name:get_process_id arg:self arg:task_type arg:task_id arguments arg arg arg With Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "wrap_numpy",
    "source_code": "def wrap_numpy(fn):\n    from torch._dynamo.external_utils import wrap_numpy as wrap\n    return wrap(fn)",
    "docstring": "Decorator that turns a function from `torch.compiletorch.compile`. Example:: >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA) >>> # Compile a NumPy function as a Tensor -> Tensor function >>> @torch.compile(fullgraph=True) >>> @torch.compiler.wrap_numpy >>> def fn(a: np.ndarray): >>> return np.sum(a * a) >>> # Execute the NumPy function using Tensors on CUDA and compute the gradients >>> x = torch.arange(6, dtype=torch.float32, device=\"cuda\", requires_grad=True) >>> out = fn(x) >>> out.backward() >>> print(x.grad) tensor([ 0., 2., 4., 6., 8., 10.], device='cuda:0')",
    "type": "function",
    "file_path": "pytorch\\torch\\compiler\\__init__.py",
    "ast_data": "FunctionDef name:wrap_numpy arg:fn arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "Hue",
    "source_code": "class Hue(OperationBase):\n\n    def __init__(self, initial_magnitude: Optional[float]=0.0, initial_probability: float=0.5, magnitude_range: Tuple[float, float]=(-0.5, 0.5), temperature: float=0.1, symmetric_megnitude: bool=False) -> None:\n        super().__init__(K.RandomHue(magnitude_range, same_on_batch=False, p=initial_probability), initial_magnitude=[('hue_factor', initial_magnitude)], temperature=temperature, symmetric_megnitude=symmetric_megnitude)",
    "docstring": "Apply hue operation. Args: initial_probability: the initial probability. If None, the augmentation will be randomly applied according to he augmentation sampling range. initial_magnitude: the initial magnitude. magnitude_range: the sampling range for random sampling and clamping the optimized magnitude. temperature: temperature for RelaxedBernoulli distribution used during training. symmetric_megnitude: if to randomly assign the magnitude as negative or not.",
    "type": "class",
    "file_path": "kornia\\kornia\\augmentation\\auto\\operations\\ops.py",
    "ast_data": "ClassDef name:Hue FunctionDef name:__init__ arg:self arg:initial_magnitude arg:initial_probability arg:magnitude_range arg:temperature arg:symmetric_megnitude arguments arg arg arg arg arg arg Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "call",
    "source_code": "@abc.abstractmethod\ndef call(self, graph_module: GraphModule) -> Optional[PassResult]:\n    pass",
    "docstring": "The pass that is run through the given graph module. To implement a pass, it is required to implement this function. Args: graph_module: The graph module we will run a pass on",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\passes\\infra\\pass_base.py",
    "ast_data": "FunctionDef name:call arg:self arg:graph_module arguments arg arg"
  },
  {
    "library": "matplotlib",
    "name": "enable",
    "source_code": "def enable(self, event=None):\n    pass",
    "docstring": "Enable the toggle tool. calls this method when is False.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py",
    "ast_data": "FunctionDef name:enable arg:self arg:event arguments arg arg"
  },
  {
    "library": "pygame",
    "name": "change_layer",
    "source_code": "def change_layer(self, sprite, new_layer):\n    LayeredUpdates.change_layer(self, sprite, new_layer)\n    if sprite.dirty == 0:\n        sprite.dirty = 1",
    "docstring": "change the layer of the sprite LayeredUpdates.change_layer(sprite, new_layer): return None The sprite must have been added to the renderer already. This is not checked.",
    "type": "method",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:change_layer arg:self arg:sprite arg:new_layer arguments arg arg arg Call If Compare Assign"
  },
  {
    "library": "sphinx",
    "name": "_differing_config_keys",
    "source_code": "def _differing_config_keys(old: Config, new: Config) -> frozenset[str]:\n    old_vals = {c.name: c.value for c in old}\n    new_vals = {c.name: c.value for c in new}\n    not_in_both = old_vals.keys() ^ new_vals.keys()\n    different_values = {key for key in old_vals.keys() & new_vals.keys() if stable_str(old_vals[key]) != stable_str(new_vals[key])}\n    return frozenset(not_in_both | different_values)",
    "docstring": "Return a set of keys that differ between two config objects.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\environment\\__init__.py",
    "ast_data": "FunctionDef name:_differing_config_keys arg:old arg:new arguments arg arg Assign Assign Assign Call Call Assign Call Call Compare Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "host_axes",
    "source_code": "def host_axes(*args, axes_class=Axes, figure=None, **kwargs):\n    import matplotlib.pyplot as plt\n    host_axes_class = host_axes_class_factory(axes_class)\n    if figure is None:\n        figure = plt.gcf()\n    ax = host_axes_class(figure, *args, **kwargs)\n    figure.add_axes(ax)\n    return ax",
    "docstring": "Create axes that can act as a hosts to parasitic axes. Parameters ---------- figure : Figure to which the axes will be added. Defaults to the current figure . *args, **kwargs Will be passed on to the underlying object creation.",
    "type": "function",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\parasite_axes.py",
    "ast_data": "FunctionDef name:host_axes arguments arg arg arg arg Assign Call If Compare Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "prepare",
    "source_code": "def prepare(model, inplace=False, allow_list=None, observer_non_leaf_module_list=None, prepare_custom_config_dict=None):\n    torch._C._log_api_usage_once('quantization_api.quantize.prepare')\n    if prepare_custom_config_dict is None:\n        prepare_custom_config_dict = get_default_custom_config_dict()\n    custom_module_class_mapping = prepare_custom_config_dict.get('float_to_observed_custom_module_class', {})\n    if not inplace:\n        model = copy.deepcopy(model)\n    qconfig_propagation_list = allow_list\n    if allow_list is None:\n        qconfig_propagation_list = get_default_qconfig_propagation_list()\n    propagate_qconfig_(model, qconfig_dict=None)\n    if not any((hasattr(m, 'qconfig') and m.qconfig for m in model.modules())):\n        warnings.warn('None of the submodule got qconfig applied. Make sure you passed correct configuration through `qconfig_dict` or by assigning the `.qconfig` attribute directly on submodules')\n    _add_observer_(model, qconfig_propagation_list, observer_non_leaf_module_list, custom_module_class_mapping=custom_module_class_mapping)\n    return model",
    "docstring": "Prepares a copy of the model for quantization calibration or quantization-aware training. Quantization configuration should be assigned preemptively to individual submodules in attribute. The model will be attached with observer or fake quant modules, and qconfig will be propagated. Args: : input model to be modified in-place : carry out model transformations in-place, the original module is mutated : list of quantizable modules : list of non-leaf modules we want to add observer : customization configuration dictionary for prepare function .. code-block:: python # Example of prepare_custom_config_dict: prepare_custom_config_dict = { # user will manually define the corresponding observed # module class which has a from_float class method that converts # float custom module to observed custom module \"float_to_observed_custom_module_class\": { CustomModule: ObservedCustomModule } }",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantize.py",
    "ast_data": "FunctionDef name:prepare arg:model arg:inplace arg:allow_list arg:observer_non_leaf_module_list arg:prepare_custom_config_dict arguments arg arg arg arg arg Call If Compare Assign Call Assign Call If Assign Call Assign If Compare Assign Call Call If Call BoolOp Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_write",
    "source_code": "def _write(self, index, value):\n    if isinstance(index, ops.EagerTensor):\n        index = index.numpy()\n    if index < 0:\n        raise errors_impl.OutOfRangeError(None, None, 'Writing to negative indices (index %d) is not allowed.' % index)\n    size = len(self._tensor_array)\n    if index >= size:\n        if not self._dynamic_size:\n            raise errors_impl.OutOfRangeError(None, None, 'Tried to write to index %d but array is not resizeable and size is: %d ' % (index, size))\n        self._tensor_array.extend((None for _ in range(index - size + 1)))\n    if not isinstance(value, ops.EagerTensor):\n        value = ops.convert_to_tensor(value, preferred_dtype=self._dtype, name='value')\n    if self._dtype != value.dtype:\n        raise errors_impl.InvalidArgumentError(None, None, 'TensorArray dtype is %s but Op is trying to write dtype %s ' % (self._dtype.name, value.dtype.name))\n    if not self._element_shape.is_compatible_with(value.shape):\n        raise ValueError('Incompatible shape for value (%s), expected (%s)' % (value.shape, self._element_shape))\n    if self._infer_shape:\n        self._element_shape = self._element_shape.merge_with(value.shape)\n    self._tensor_array[index] = value",
    "docstring": "Writes into index named by . Args: index: 0-D. int32 scalar with the index to write to. value: N-D. Tensor of type . The to write to . Raises: errors_impl.InvalidArgumentError: dtype does not match dtype. errors_impl.OutOfRangeError: is out of bounds. ValueError: shape of is not consistent with inferred shape.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:_write arg:self arg:index arg:value arguments arg arg arg If Call Assign Call If Compare Raise Call Assign Call If Compare If Raise Call Call Call If Call Assign Call If Compare Raise Call If Call Raise Call If Assign Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, final_ops, final_ops_feed_dict=None):\n    self._final_ops = final_ops\n    self._final_ops_feed_dict = final_ops_feed_dict\n    self._final_ops_values = None",
    "docstring": "Initializes with ops to run at the end of the session. Args: final_ops: A single , a list of or a dictionary of names to . final_ops_feed_dict: A feed dictionary to use when running .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\basic_session_run_hooks.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:final_ops arg:final_ops_feed_dict arguments arg arg arg Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_apply_fn",
    "source_code": "def _apply_fn(dataset):\n    return dataset.snapshot(path=path, compression=compression, reader_func=reader_func, shard_func=shard_func)",
    "docstring": "Actual dataset transformation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\snapshot.py",
    "ast_data": "FunctionDef name:_apply_fn arg:dataset arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "__str__",
    "source_code": "def __str__(self) -> str:\n    return 'MaskP'",
    "docstring": "human readable representation of the MaskPartial placement",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_ops\\_embedding_ops.py",
    "ast_data": "FunctionDef name:__str__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "O",
    "source_code": "def O(self):\n    if self.timezone is None:\n        return ''\n    offset = self.timezone.utcoffset(self.data)\n    seconds = offset.days * 86400 + offset.seconds\n    sign = '-' if seconds < 0 else '+'\n    seconds = abs(seconds)\n    return '%s%02d%02d' % (sign, seconds // 3600, seconds // 60 % 60)",
    "docstring": "Difference to Greenwich time in hours; e.g. '+0200', '-0430'. If timezone information is not available, return an empty string.",
    "type": "method",
    "file_path": "django\\django\\utils\\dateformat.py",
    "ast_data": "FunctionDef name:O arg:self arguments arg If Compare Return return:yes Assign Call Assign Assign Compare Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "extract_logger_info",
    "source_code": "def extract_logger_info(model_a: nn.Module, model_b: nn.Module, logger_cls: Callable, model_name_to_use_for_layer_names: str) -> NSResultsType:\n    torch._C._log_api_usage_once('quantization_api._numeric_suite_fx.extract_logger_info')\n    results: NSResultsType = {}\n    for model in (model_a, model_b):\n        _extract_logger_info_one_model(model, results, logger_cls)\n    maybe_add_missing_fqns(results)\n    results = rekey_logger_info_on_node_name_of_model(results, model_name_to_use_for_layer_names)\n    return results",
    "docstring": "Traverse all loggers in and , and extract the logged information. Args: model_a: model A model_b: model B logger_cls: class of Logger to use model_name_to_use_for_layer_names: string name of model to use for layer names in the output Return: NSResultsType, containing the logged comparisons",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\ns\\_numeric_suite_fx.py",
    "ast_data": "FunctionDef name:extract_logger_info arg:model_a arg:model_b arg:logger_cls arg:model_name_to_use_for_layer_names arguments arg arg arg arg Call For Call Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_device_properties",
    "source_code": "def get_device_properties(device: Optional[_device_t]=None) -> _CudaDeviceProperties:\n    _lazy_init()\n    device = _get_device_index(device, optional=True)\n    if device < 0 or device >= device_count():\n        raise AssertionError('Invalid device id')\n    return _get_device_properties(device)",
    "docstring": "Get the properties of a device. Args: device (torch.device or int or str, optional): device for which to return the properties of the device. It uses the current device, given by :func:, if :attr: is `` (default). Returns: _CudaDeviceProperties: the properties of the device",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:get_device_properties arg:device arguments arg Call Assign Call If BoolOp Compare Compare Call Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_registered_name",
    "source_code": "def get_registered_name(obj):\n    if obj in _GLOBAL_CUSTOM_NAMES:\n        return _GLOBAL_CUSTOM_NAMES[obj]\n    else:\n        return obj.__name__",
    "docstring": "Returns the name registered to an object within the Keras framework. This function is part of the Keras serialization and deserialization framework. It maps objects to the string names associated with those objects for serialization/deserialization. Args: obj: The object to look up. Returns: The name associated with the object, or the default Python name if the object is not registered.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\generic_utils.py",
    "ast_data": "FunctionDef name:get_registered_name arg:obj arguments arg If Compare Return return:yes Return return:yes"
  },
  {
    "library": "kornia",
    "name": "cart2pol",
    "source_code": "def cart2pol(x: Tensor, y: Tensor, eps: float=1e-08) -> tuple[Tensor, Tensor]:\n    if not isinstance(x, Tensor) & isinstance(y, Tensor):\n        raise TypeError(f'Input type is not a Tensor. Got {type(x)}, {type(y)}')\n    rho = torch.sqrt(x ** 2 + y ** 2 + eps)\n    phi = torch.atan2(y, x)\n    return (rho, phi)",
    "docstring": "Convert cartesian coordinates to polar coordinates. Args: x: Tensor of arbitrary shape. y: Tensor of same arbitrary shape. eps: To avoid division by zero. Returns: - rho: Tensor with same shape as input. - phi: Tensor with same shape as input. Example: >>> x = torch.rand(1, 3, 3) >>> y = torch.rand(1, 3, 3) >>> rho, phi = cart2pol(x, y)",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\conversions.py",
    "ast_data": "FunctionDef name:cart2pol arg:x arg:y arg:eps arguments arg arg arg If Call Call Raise Call Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "register_command_handler",
    "source_code": "def register_command_handler(self, prefix, handler, help_info, prefix_aliases=None):\n    self._command_handler_registry.register_command_handler(prefix, handler, help_info, prefix_aliases=prefix_aliases)\n    self._tab_completion_registry.extend_comp_items('', [prefix])\n    if prefix_aliases:\n        self._tab_completion_registry.extend_comp_items('', prefix_aliases)",
    "docstring": "A wrapper around CommandHandlerRegistry.register_command_handler(). In addition to calling the wrapped register_command_handler() method, this method also registers the top-level tab-completion context based on the command prefixes and their aliases. See the doc string of the wrapped method for more details on the args. Args: prefix: (str) command prefix. handler: (callable) command handler. help_info: (str) help information. prefix_aliases: (list of str) aliases of the command prefix.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\base_ui.py",
    "ast_data": "FunctionDef name:register_command_handler arg:self arg:prefix arg:handler arg:help_info arg:prefix_aliases arguments arg arg arg arg arg Call Call If Call"
  },
  {
    "library": "pytorch",
    "name": "mesh_broadcast",
    "source_code": "def mesh_broadcast(tensor: torch.Tensor, mesh: DeviceMesh, mesh_dim: int=0, async_op: bool=False, *, group_src: int=0) -> Optional[Work]:\n    if tensor.is_meta:\n        return None\n    dim_group = mesh.get_group(mesh_dim)\n    assert isinstance(dim_group, ProcessGroup)\n    return broadcast(tensor, group=dim_group, async_op=async_op, group_src=group_src)",
    "docstring": "broadcast the tensor to a device mesh dimension. We by default use the first rank of the mesh dimension as the source of truth, i.e for a 2d mesh [[0, 1], [2, 3]], if we broadcast on mesh_dim = 1, we will broadcast the tensor on rank 0 to rank 0/1, and tensor on rank 2 to rank 2/3. Args: tensor (torch.Tensor): tensor to broadcast. mesh_dim (int, optional): indicate which mesh dimension we want to scatter on, we by default choose the first rank on the mesh dimension as source of truth. Keyword args: group_src (int, optional): the group rank of the source data for the logical/global tensor, on the specific mesh dimension. By default, we use `Work` object",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_collective_utils.py",
    "ast_data": "FunctionDef name:mesh_broadcast arg:tensor arg:mesh arg:mesh_dim arg:async_op arguments arg arg arg arg arg If Return return:no Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_attr_inference_rule",
    "source_code": "@register_inference_rule(getattr)\ndef get_attr_inference_rule(n: Node, symbols, constraints, counter):\n    assert isinstance(n.args[0], Node)\n    assert isinstance(n.args[1], str)\n    output, counter = gen_tvar(counter)\n    symbols[n] = output\n    input = symbols[n.args[0]]\n    attr = n.args[1]\n    if attr == 'device':\n        return ([BinConstraintT(input, output, op_eq)], counter)\n    else:\n        raise NotImplementedError('Not yet implemented')",
    "docstring": "If the attribute is \"device\" then the tensor shape is preserved",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_generator.py",
    "ast_data": "FunctionDef name:get_attr_inference_rule arg:n arg:symbols arg:constraints arg:counter arguments arg arg arg arg Call Call Assign Call Assign Assign Assign If Compare Return return:yes Call Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "SessionRunHook",
    "source_code": "@tf_export(v1=['train.SessionRunHook'])\nclass SessionRunHook:\n\n    def begin(self):\n        pass\n\n    def after_create_session(self, session, coord):\n        pass\n\n    def before_run(self, run_context):\n        return None\n\n    def after_run(self, run_context, run_values):\n        pass\n\n    def end(self, session):\n        pass",
    "docstring": "Hook to extend calls to MonitoredSession.run().",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\training\\session_run_hook.py",
    "ast_data": "ClassDef name:SessionRunHook FunctionDef name:begin arg:self arguments arg FunctionDef name:after_create_session arg:self arg:session arg:coord arguments arg arg arg FunctionDef name:before_run arg:self arg:run_context arguments arg arg Return return:no FunctionDef name:after_run arg:self arg:run_context arg:run_values arguments arg arg arg FunctionDef name:end arg:self arg:session arguments arg arg Call"
  },
  {
    "library": "authlib",
    "name": "get_redirect_uri",
    "source_code": "def get_redirect_uri(self):\n    raise NotImplementedError()",
    "docstring": "A method to get temporary credential's ``:: def get_redirect_uri(self): return self.oauth_callback :return: A URL string",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth1\\rfc5849\\models.py",
    "ast_data": "FunctionDef name:get_redirect_uri arg:self arguments arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, original_args, session):\n    self._original_args = original_args\n    self._session = session\n    self._stop_requested = False",
    "docstring": "Initializes SessionRunContext.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\session_run_hook.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:original_args arg:session arguments arg arg arg Assign Assign Assign"
  },
  {
    "library": "pandas",
    "name": "_is_comparable_dtype",
    "source_code": "def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:\n    return lib.is_np_dtype(dtype, 'm')",
    "docstring": "Can we compare values of the given dtype to our own?",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\timedeltas.py",
    "ast_data": "FunctionDef name:_is_comparable_dtype arg:self arg:dtype arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "Bind",
    "source_code": "class Bind:\n\n    @classmethod\n    def decorator(cls, d):\n        return lambda f: Bind(f, d)\n\n    def __init__(self, f, d):\n        self._f = f\n        self._d = d\n\n    def __get__(self, instance, owner):\n        if instance is not None:\n            f = self._f.__get__(instance, owner)\n            return tf_decorator.make_decorator(f, Bind(f, self._d))\n        else:\n            return self\n\n    def __call__(self, *a, **k):\n        return self._d(self._f, a, k)",
    "docstring": "When called evaluates but supports binding . >>> @Bind.decorator ... def my_decorator(f, args, kwargs): ... print(\"my_decorator called with\", args, kwargs) ... return f(*args, **kwargs) >>> class Foo: ... @my_decorator ... def bar(self, a, b, c): ... return a * b * c >>> Foo.bar(None, 1, 2, c=3) my_decorator called with (None, 1, 2) {'c': 3} 6 >>> foo = Foo() >>> foo.bar(1, 2, c=3) my_decorator called with (1, 2) {'c': 3} 6",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\custom_gradient.py",
    "ast_data": "ClassDef name:Bind FunctionDef name:decorator arg:cls arg:d arguments arg arg Return return:yes arguments arg Call FunctionDef name:__init__ arg:self arg:f arg:d arguments arg arg arg Assign Assign FunctionDef name:__get__ arg:self arg:instance arg:owner arguments arg arg arg If Compare Assign Call Return return:yes Call Call Return return:yes FunctionDef name:__call__ arg:self arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "q",
    "source_code": "@property\ndef q(self) -> Quaternion:\n    return self._q",
    "docstring": "Return the underlying data with shape :math:.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\so3.py",
    "ast_data": "FunctionDef name:q arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_register_builtin_comm_hook",
    "source_code": "def _register_builtin_comm_hook(self, comm_hook_type):\n    assert self.logger is not None\n    self.logger._set_comm_hook_name(str(comm_hook_type))\n    dist._register_builtin_comm_hook(self.reducer, comm_hook_type)",
    "docstring": "Register a built-in communication hook that specifies how DDP aggregates gradients across multiple workers. The built-in hooks aim to provide efficient C++ implementations for certain hooks, which might not be as efficient if implemented in Python using a Python communication hook. Args: comm_hook_type (dist.BuiltinCommHookType): type of communication hook, such as ALLREDUCE, FP16_COMPRESS, etc. .. warning :: DDP communication hook can only be registered once and should be registered before calling backward. Example:: Below is an example of a FP16 compression where gradients are compressed into 16-bit floating-point numbers before allreduce, and then decompressed after allreduce. >>> # xdoctest: +SKIP('undefined name') >>> ddp._register_builtin_comm_hook(dist.BuiltinCommHookType.FP16_COMPRESS)",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\parallel\\distributed.py",
    "ast_data": "FunctionDef name:_register_builtin_comm_hook arg:self arg:comm_hook_type arguments arg arg Compare Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_rotate_label",
    "source_code": "def set_rotate_label(self, val):\n    self._rotate_label = val\n    self.stale = True",
    "docstring": "Whether to rotate the axis label: True, False or None. If set to None the label will be rotated if longer than 4 chars.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axis3d.py",
    "ast_data": "FunctionDef name:set_rotate_label arg:self arg:val arguments arg arg Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "create_file_writer",
    "source_code": "def create_file_writer(logdir, max_queue=None, flush_millis=None, filename_suffix=None, name=None):\n    if logdir is None:\n        return _NoopSummaryWriter()\n    logdir = str(logdir)\n    with ops.device('cpu:0'):\n        if max_queue is None:\n            max_queue = constant_op.constant(10)\n        if flush_millis is None:\n            flush_millis = constant_op.constant(2 * 60 * 1000)\n        if filename_suffix is None:\n            filename_suffix = constant_op.constant('.v2')\n        if name is None:\n            name = 'logdir:' + logdir\n        resource = gen_summary_ops.summary_writer(shared_name=name)\n        return _LegacyResourceSummaryWriter(resource=resource, init_op_fn=functools.partial(gen_summary_ops.create_summary_file_writer, logdir=logdir, max_queue=max_queue, flush_millis=flush_millis, filename_suffix=filename_suffix))",
    "docstring": "Creates a summary file writer in the current context under the given name. Args: logdir: a string, or None. If a string, creates a summary file writer which writes to the directory named by the string. If None, returns a mock object which acts like a summary writer but does nothing, useful to use as a context manager. max_queue: the largest number of summaries to keep in a queue; will flush once the queue gets bigger than this. Defaults to 10. flush_millis: the largest interval between flushes. Defaults to 120,000. filename_suffix: optional suffix for the event file name. Defaults to . name: Shared name for this SummaryWriter resource stored to default Graph. Defaults to the provided logdir prefixed with . Note: if a summary writer resource with this shared name already exists, the returned SummaryWriter wraps that resource and the other arguments have no effect. Returns: Either a summary writer or an empty object which can be used as a summary writer.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "FunctionDef name:create_file_writer arg:logdir arg:max_queue arg:flush_millis arg:filename_suffix arg:name arguments arg arg arg arg arg If Compare Return return:yes Call Assign Call With Call If Compare Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Assign Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_MatrixM1PowerOperator",
    "source_code": "class _MatrixM1PowerOperator(LinearOperator):\n\n    def __init__(self, A, p):\n        if A.ndim != 2 or A.shape[0] != A.shape[1]:\n            raise ValueError('expected A to be like a square matrix')\n        if p < 0 or p != int(p):\n            raise ValueError('expected p to be a non-negative integer')\n        self._A = A\n        self._p = p\n        self.ndim = A.ndim\n        self.shape = A.shape\n\n    def _matvec(self, x):\n        for i in range(self._p):\n            x = self._A.dot(x) - x\n        return x\n\n    def _rmatvec(self, x):\n        for i in range(self._p):\n            x = x.dot(self._A) - x\n        return x\n\n    def _matmat(self, X):\n        for i in range(self._p):\n            X = self._A.dot(X) - X\n        return X\n\n    def _adjoint(self):\n        return _MatrixM1PowerOperator(self._A.T, self._p)",
    "docstring": "A representation of the linear operator (A - I)^p.",
    "type": "class",
    "file_path": "scipy\\scipy\\linalg\\_matfuncs_inv_ssq.py",
    "ast_data": "ClassDef name:_MatrixM1PowerOperator FunctionDef name:__init__ arg:self arg:A arg:p arguments arg arg arg If BoolOp Compare Compare Raise Call If BoolOp Compare Compare Call Raise Call Assign Assign Assign Assign FunctionDef name:_matvec arg:self arg:x arguments arg arg For Call Assign Call Return return:yes FunctionDef name:_rmatvec arg:self arg:x arguments arg arg For Call Assign Call Return return:yes FunctionDef name:_matmat arg:self arg:X arguments arg arg For Call Assign Call Return return:yes FunctionDef name:_adjoint arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "remove",
    "source_code": "def remove(self, keys, name=None):\n    if keys.dtype != self._key_dtype:\n        raise TypeError(f'Dtype of argument `keys` must be {self._key_dtype}, received: {keys.dtype}')\n    with ops.name_scope(name, '%s_lookup_table_remove' % self.name, (self.resource_handle, keys, self._default_value)):\n        op = gen_lookup_ops.lookup_table_remove_v2(self.resource_handle, keys)\n    return op",
    "docstring": "Removes and its associated values from the table. If a key is not present in the table, it is silently ignored. Args: keys: Keys to remove. Can be a tensor of any shape. Must match the table's key type. name: A name for the operation (optional). Returns: The created Operation. Raises: TypeError: when do not match the table data types.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "FunctionDef name:remove arg:self arg:keys arg:name arguments arg arg arg If Compare Raise Call With Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "compile_args_from_training_config",
    "source_code": "def compile_args_from_training_config(training_config, custom_objects=None):\n    if custom_objects is None:\n        custom_objects = {}\n    with generic_utils.CustomObjectScope(custom_objects):\n        optimizer_config = training_config['optimizer_config']\n        optimizer = optimizers.deserialize(optimizer_config)\n        loss = None\n        loss_config = training_config.get('loss', None)\n        if loss_config is not None:\n            loss = _deserialize_nested_config(losses.deserialize, loss_config)\n        metrics = None\n        metrics_config = training_config.get('metrics', None)\n        if metrics_config is not None:\n            metrics = _deserialize_nested_config(_deserialize_metric, metrics_config)\n        weighted_metrics = None\n        weighted_metrics_config = training_config.get('weighted_metrics', None)\n        if weighted_metrics_config is not None:\n            weighted_metrics = _deserialize_nested_config(_deserialize_metric, weighted_metrics_config)\n        sample_weight_mode = training_config['sample_weight_mode'] if hasattr(training_config, 'sample_weight_mode') else None\n        loss_weights = training_config['loss_weights']\n    return dict(optimizer=optimizer, loss=loss, metrics=metrics, weighted_metrics=weighted_metrics, loss_weights=loss_weights, sample_weight_mode=sample_weight_mode)",
    "docstring": "Return model.compile arguments from training config.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saving_utils.py",
    "ast_data": "FunctionDef name:compile_args_from_training_config arg:training_config arg:custom_objects arguments arg arg If Compare Assign With Call Assign Assign Call Assign Assign Call If Compare Assign Call Assign Assign Call If Compare Assign Call Assign Assign Call If Compare Assign Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "read",
    "source_code": "def read(self, save_path, options=None):\n    if options and options.experimental_enable_async_checkpoint:\n        self._checkpoint_options = options\n    if self._checkpoint_options and self._checkpoint_options.experimental_enable_async_checkpoint:\n        if context.executing_eagerly():\n            return self._async_checkpointer().read(save_path, options)\n        else:\n            logging.warning('Saving async checkpoint in graph mode is currently not supported; switching to regular sync checkpoint instead.')\n    start_time = time.time()\n    if isinstance(save_path, os.PathLike):\n        save_path = os.fspath(save_path)\n    options = options or checkpoint_options.CheckpointOptions()\n    result = self._saver.restore(save_path=save_path, options=options)\n    metrics.AddCheckpointReadDuration(api_label=_CHECKPOINT_V2, microseconds=_get_duration_microseconds(start_time, time.time()))\n    return result",
    "docstring": "Reads a training checkpoint written with . Reads this and any objects it depends on. This method is just like but does not expect the variable in the checkpoint. It only restores the objects that the checkpoint already depends on. The method is primarily intended for use by higher level checkpoint management utilities that use instead of and have their own mechanisms to number and track checkpoints. Example usage: Args: save_path: The path to the checkpoint as returned by . options: Optional object. Returns: A load status object, which can be used to make assertions about the status of a checkpoint restoration. See for details.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:read arg:self arg:save_path arg:options arguments arg arg arg If BoolOp Assign If BoolOp If Call Return return:yes Call Call Call Assign Call If Call Assign Call Assign BoolOp Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "short_path",
    "source_code": "def short_path(path, cwd=None):\n    if not isinstance(path, str):\n        return path\n    if cwd is None:\n        cwd = os.getcwd()\n    abspath = os.path.abspath(path)\n    relpath = os.path.relpath(path, cwd)\n    if len(abspath) <= len(relpath):\n        return abspath\n    return relpath",
    "docstring": "Return relative or absolute path name, whichever is shortest. Parameters ---------- path : str or None cwd : str or None Returns ------- str Relative path or absolute path based on current working directory",
    "type": "function",
    "file_path": "numpy\\tools\\refguide_check.py",
    "ast_data": "FunctionDef name:short_path arg:path arg:cwd arguments arg arg If Call Return return:yes If Compare Assign Call Assign Call Assign Call If Compare Call Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "Granularity",
    "source_code": "@dataclass(frozen=True)\nclass Granularity:\n    pass",
    "docstring": "Base class for representing the granularity of quantization. This class serves as a parent for specific granularity types used in quantization operations, such as per-tensor or per-axis quantization.",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\quantization\\observer.py",
    "ast_data": "ClassDef name:Granularity Call"
  },
  {
    "library": "tensorflow",
    "name": "supports_masking",
    "source_code": "@property\ndef supports_masking(self):\n    return self._supports_masking",
    "docstring": "Whether this layer supports computing a mask using .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:supports_masking arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_frameon",
    "source_code": "def get_frameon(self):\n    return self.patch.get_visible()",
    "docstring": "Return the figure's background patch visibility, i.e. whether the figure background will be drawn. Equivalent to ``.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:get_frameon arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X):\n    check_is_fitted(self)\n    X = validate_data(self, X, reset=False)\n    if self.pooling_func == np.mean and (not issparse(X)):\n        size = np.bincount(self.labels_)\n        n_samples = X.shape[0]\n        nX = np.array([np.bincount(self.labels_, X[i, :]) / size for i in range(n_samples)])\n    else:\n        nX = [self.pooling_func(X[:, self.labels_ == l], axis=1) for l in np.unique(self.labels_)]\n        nX = np.array(nX).T\n    return nX",
    "docstring": "Transform a new matrix using the built clustering. Parameters ---------- X : array-like of shape (n_samples, n_features) or (n_samples, n_samples) A M by N array of M observations in N dimensions or a length M array of M one-dimensional observations. Returns ------- Y : ndarray of shape (n_samples, n_clusters) or (n_clusters,) The pooled values for each feature cluster.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_feature_agglomeration.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Call Assign Call If BoolOp Compare Call Assign Call Assign Assign Call Call Call Assign Call Compare Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_params",
    "source_code": "def set_params(self, subs=None, numticks=None):\n    if numticks is not None:\n        self.numticks = numticks\n    if subs is not None:\n        self._subs = subs",
    "docstring": "Set parameters within this locator.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:set_params arg:self arg:subs arg:numticks arguments arg arg arg If Compare Assign If Compare Assign"
  },
  {
    "library": "tensorflow",
    "name": "get_export_outputs",
    "source_code": "def get_export_outputs(export_outputs, predictions):\n    if export_outputs is None:\n        default_output = export_output_lib.PredictOutput(predictions)\n        export_outputs = {signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: default_output}\n    if not isinstance(export_outputs, dict):\n        raise TypeError(f'`export_outputs` must be dict, received: {export_outputs}.')\n    for v in export_outputs.values():\n        if not isinstance(v, export_output_lib.ExportOutput):\n            raise TypeError(f'Values in `export_outputs` must be ExportOutput objects, received: {export_outputs}.')\n    _maybe_add_default_serving_output(export_outputs)\n    return export_outputs",
    "docstring": "Validate export_outputs or create default export_outputs. Args: export_outputs: Describes the output signatures to be exported to and used during serving. Should be a dict or None. predictions: Predictions or dict of . Returns: Valid export_outputs dict Raises: TypeError: if export_outputs is not a dict or its values are not ExportOutput instances.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\model_utils\\export_utils.py",
    "ast_data": "FunctionDef name:get_export_outputs arg:export_outputs arg:predictions arguments arg arg If Compare Assign Call Assign If Call Raise Call For Call If Call Raise Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "_get_locale_dirs",
    "source_code": "def _get_locale_dirs(resources, include_core=True):\n    contrib_dir = os.path.join(os.getcwd(), 'django', 'contrib')\n    dirs = []\n    for contrib_name in os.listdir(contrib_dir):\n        path = os.path.join(contrib_dir, contrib_name, 'locale')\n        if os.path.isdir(path):\n            dirs.append((contrib_name, path))\n            if contrib_name in HAVE_JS:\n                dirs.append(('%s-js' % contrib_name, path))\n    if include_core:\n        dirs.insert(0, ('core', os.path.join(os.getcwd(), 'django', 'conf', 'locale')))\n    if resources is not None:\n        res_names = [d[0] for d in dirs]\n        dirs = [ld for ld in dirs if ld[0] in resources]\n        if len(resources) > len(dirs):\n            print('You have specified some unknown resources. Available resource names are: %s' % (', '.join(res_names),))\n            exit(1)\n    return dirs",
    "docstring": "Return a tuple (contrib name, absolute path) for all locale directories, optionally including the django core catalog. If resources list is not None, filter directories matching resources content.",
    "type": "function",
    "file_path": "django\\scripts\\manage_translations.py",
    "ast_data": "FunctionDef name:_get_locale_dirs arg:resources arg:include_core arguments arg arg Assign Call Call Assign For Call Assign Call If Call Call If Compare Call If Call Call Call If Compare Assign Assign Compare If Compare Call Call Call Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_wrap_method_output",
    "source_code": "def _wrap_method_output(f, method):\n\n    @wraps(f)\n    def wrapped(self, X, *args, **kwargs):\n        data_to_wrap = f(self, X, *args, **kwargs)\n        if isinstance(data_to_wrap, tuple):\n            return_tuple = (_wrap_data_with_container(method, data_to_wrap[0], X, self), *data_to_wrap[1:])\n            if hasattr(type(data_to_wrap), '_make'):\n                return type(data_to_wrap)._make(return_tuple)\n            return return_tuple\n        return _wrap_data_with_container(method, data_to_wrap, X, self)\n    return wrapped",
    "docstring": "Wrapper used by to automatically wrap methods.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_set_output.py",
    "ast_data": "FunctionDef name:_wrap_method_output arg:f arg:method arguments arg arg FunctionDef name:wrapped arg:self arg:X arguments arg arg arg arg Assign Call If Call Assign Call If Call Call Return return:yes Call Call Return return:yes Return return:yes Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_compute_causal_padding",
    "source_code": "def _compute_causal_padding(self, inputs):\n    left_pad = self.dilation_rate[0] * (self.kernel_size[0] - 1)\n    if getattr(inputs.shape, 'ndims', None) is None:\n        batch_rank = 1\n    else:\n        batch_rank = len(inputs.shape) - 2\n    if self.data_format == 'channels_last':\n        causal_padding = [[0, 0]] * batch_rank + [[left_pad, 0], [0, 0]]\n    else:\n        causal_padding = [[0, 0]] * batch_rank + [[0, 0], [left_pad, 0]]\n    return causal_padding",
    "docstring": "Calculates padding for 'causal' option for 1-d conv layers.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\convolutional.py",
    "ast_data": "FunctionDef name:_compute_causal_padding arg:self arg:inputs arguments arg arg Assign If Compare Call Assign Assign Call If Compare Assign Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_nbytes",
    "source_code": "def _nbytes(self, deep: bool=False) -> int:\n    objsize = 24\n    level_nbytes = sum((i.memory_usage(deep=deep) for i in self.levels))\n    label_nbytes = sum((i.nbytes for i in self.codes))\n    names_nbytes = sum((getsizeof(i, objsize) for i in self.names))\n    result = level_nbytes + label_nbytes + names_nbytes\n    if '_engine' in self._cache:\n        result += self._engine.sizeof(deep=deep)\n    return result",
    "docstring": "return the number of bytes in the underlying data deeply introspect the level data if deep=True include the engine hashtable *this is in internal routine*",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "FunctionDef name:_nbytes arg:self arg:deep arguments arg arg Assign Assign Call Call Assign Call Assign Call Call Assign If Compare Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "Vincent",
    "source_code": "class Vincent(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([0.25] * self.N, [10.0] * self.N))\n        self.global_optimum = [[7.70628098 for _ in range(self.N)]]\n        self.fglob = -float(self.N)\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return -sum(sin(10.0 * log(x)))",
    "docstring": "Vincent objective function. This class defines the Vincent [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Vincent}}(x) = - \\sum_{i=1}^{n} \\sin(10 \\log(x)) Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_V.py",
    "ast_data": "ClassDef name:Vincent Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Call Assign Call FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_linear_configs",
    "source_code": "def _get_linear_configs() -> list[BackendPatternConfig]:\n    observation_type = ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT\n    dtype_configs = [qnnpack_weighted_op_qint8_symmetric_dtype_config, executorch_weighted_op_int8_dtype_config, executorch_default_dynamic_quint8_dtype_config, executorch_default_dynamic_qint8_dtype_config, executorch_default_dynamic_float16_dtype_config]\n    linear_configs: list[BackendPatternConfig] = []\n    linear_configs.append(BackendPatternConfig(torch.nn.Linear).set_observation_type(observation_type).set_dtype_configs(dtype_configs).set_root_module(torch.nn.Linear).set_reference_quantized_module(nnqr.Linear).set_qat_module(nnqat.Linear))\n    linear_configs.append(BackendPatternConfig(nnqat.Linear).set_observation_type(observation_type).set_dtype_configs(dtype_configs).set_root_module(torch.nn.Linear).set_reference_quantized_module(nnqr.Linear))\n    linear_configs.append(BackendPatternConfig(torch.nn.functional.linear).set_observation_type(observation_type).set_dtype_configs(dtype_configs)._set_input_type_to_index({'weight': 1, 'bias': 2}))\n    return linear_configs",
    "docstring": "Return all configs related to linear modules and ops.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\executorch.py",
    "ast_data": "FunctionDef name:_get_linear_configs arguments Assign Assign Call Call Call Call Call Call Call Call Call Call Call Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "GenerateCostReport",
    "source_code": "def GenerateCostReport(metagraph, per_node_report=False, verbose=False, cluster=None):\n    if cluster is None:\n        cluster = gcluster.Cluster(disable_detailed_stats=False)\n    return tf_wrap.GenerateCostReport(metagraph.SerializeToString(), per_node_report, verbose, cluster.tf_cluster)",
    "docstring": "Analyze the cost of each TensorFlow op and node in the provided metagraph. Args: metagraph: A TensorFlow MetaGraphDef. per_node_report: by default the report contains stats aggregated on a per op type basis, setting per_node_report to True adds results for each individual node to the report. verbose: Prints out the entire operation proto instead of a summary table. cluster: Analyze the costs using the specified cluster, or the local machine if no cluster was specified. Returns: A string of cost report.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\grappler\\cost_analyzer.py",
    "ast_data": "FunctionDef name:GenerateCostReport arg:metagraph arg:per_node_report arg:verbose arg:cluster arguments arg arg arg arg If Compare Assign Call Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "AfterCommentParser",
    "source_code": "class AfterCommentParser(TokenProcessor):\n\n    def __init__(self, lines: list[str]) -> None:\n        super().__init__(lines)\n        self.comment: str | None = None\n\n    def fetch_rvalue(self) -> list[Token]:\n        tokens = []\n        while (current := self.fetch_token()):\n            tokens.append(current)\n            if current == [OP, '(']:\n                tokens += self.fetch_until([OP, ')'])\n            elif current == [OP, '{']:\n                tokens += self.fetch_until([OP, '}'])\n            elif current == [OP, '[']:\n                tokens += self.fetch_until([OP, ']'])\n            elif current == INDENT:\n                tokens += self.fetch_until(DEDENT)\n            elif current == [OP, ';']:\n                break\n            elif current and current.kind not in {OP, NAME, NUMBER, STRING}:\n                break\n        return tokens\n\n    def parse(self) -> None:\n        while (tok := self.fetch_token()) and (not tok.match([OP, '='], NEWLINE, COMMENT)):\n            assert tok\n        assert tok is not None\n        if tok == [OP, '=']:\n            self.fetch_rvalue()\n            tok = self.current\n            assert tok is not None\n        if tok == COMMENT:\n            self.comment = tok.value",
    "docstring": "Python source code parser to pick up comments after assignments. This parser takes code which starts with an assignment statement, and returns the comment for the variable if one exists.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\pycode\\parser.py",
    "ast_data": "ClassDef name:AfterCommentParser FunctionDef name:__init__ arg:self arg:lines arguments arg arg Call Call FunctionDef name:fetch_rvalue arg:self arguments arg Assign While Call Call If Compare Call If Compare Call If Compare Call If Compare Call If Compare If BoolOp Compare Return return:yes FunctionDef name:parse arg:self arguments arg While BoolOp Call Call Compare If Compare Call Assign Compare If Compare Assign"
  },
  {
    "library": "tensorflow",
    "name": "_get_variable_creator_initial_value",
    "source_code": "def _get_variable_creator_initial_value(self, replica_id, device, primary_var, **kwargs):\n    if replica_id == 0:\n        return kwargs['initial_value']\n    else:\n        assert primary_var is not None\n        assert device is not None\n        assert kwargs is not None\n\n        def initial_value_fn():\n            if context.executing_eagerly() or ops.inside_function():\n                init_value = primary_var.value()\n                return array_ops.identity(init_value)\n            else:\n                with ops.device(device):\n                    init_value = primary_var.initial_value\n                    return array_ops.identity(init_value)\n        return initial_value_fn",
    "docstring": "Return the initial value for variables on a replica.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\mirrored_strategy.py",
    "ast_data": "FunctionDef name:_get_variable_creator_initial_value arg:self arg:replica_id arg:device arg:primary_var arguments arg arg arg arg arg If Compare Return return:yes Compare Compare Compare FunctionDef name:initial_value_fn arguments If BoolOp Call Call Assign Call Return return:yes Call With Call Assign Return return:yes Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_topmost_subplotspec",
    "source_code": "def get_topmost_subplotspec(self):\n    gridspec = self.get_gridspec()\n    if hasattr(gridspec, 'get_topmost_subplotspec'):\n        return gridspec.get_topmost_subplotspec()\n    else:\n        return self",
    "docstring": "Return the topmost instance associated with the subplot.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\gridspec.py",
    "ast_data": "FunctionDef name:get_topmost_subplotspec arg:self arguments arg Assign Call If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "manual_seed_all",
    "source_code": "def manual_seed_all(seed: int) -> None:\n    seed = int(seed)\n\n    def cb():\n        for i in range(device_count()):\n            default_generator = torch.xpu.default_generators[i]\n            default_generator.manual_seed(seed)\n    _lazy_call(cb, seed_all=True)",
    "docstring": "Set the seed for generating random numbers on all GPUs. It's safe to call this function if XPU is not available; in that case, it is silently ignored. Args: seed (int): The desired seed.",
    "type": "function",
    "file_path": "pytorch\\torch\\xpu\\random.py",
    "ast_data": "FunctionDef name:manual_seed_all arg:seed arguments arg Assign Call FunctionDef name:cb arguments For Call Call Assign Call Call"
  },
  {
    "library": "django",
    "name": "type",
    "source_code": "@property\ndef type(self):\n    return capi.get_field_type(self.ptr)",
    "docstring": "Return the OGR type of this Field.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\field.py",
    "ast_data": "FunctionDef name:type arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "peek",
    "source_code": "def peek(self) -> Request | None:\n    stats = self._downloader_interface.stats(self.pqueues)\n    if not stats:\n        return None\n    slot = min(stats)[1]\n    queue = self.pqueues[slot]\n    return queue.peek()",
    "docstring": "Returns the next object to be returned by :meth:, but without removing it from the queue. Raises :exc: if the underlying queue class does not implement a `` method, which is optional for queues.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\pqueues.py",
    "ast_data": "FunctionDef name:peek arg:self arguments arg Assign Call If Return return:no Assign Call Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_define_gemm_instance",
    "source_code": "def _define_gemm_instance(self, op: GemmOperation, evt_name: Optional[str]=None) -> tuple[str, str]:\n    assert cutlass_utils.try_import_cutlass()\n    import cutlass_library.gemm_operation as cutlass_gemm_op\n    import cutlass_library.library as cutlass_lib\n    if op.gemm_kind == cutlass_lib.GemmKind.Sparse:\n        emitter = cutlass_gemm_op.EmitSparseGemmInstance()\n    else:\n        emitter = cutlass_gemm_op.EmitGemmInstance()\n    op_def = emitter.emit(op)\n    op_def = op_def.replace('cutlass::gemm::device::Gemm', 'cutlass::gemm::device::GemmUniversal')\n    if op.gemm_kind != cutlass_lib.GemmKind.Sparse:\n        op_def = op_def.replace('false,', '')\n    pattern = re.compile('\\\\s*using\\\\s(.*?)\\\\s=')\n    decl = op_def.split('\\n')[2]\n    match = pattern.match(decl)\n    if match is None:\n        raise RuntimeError('Invalid Gemm config: \\n' + op_def)\n    op_type = match.groups()[0]\n    return (op_def, op_type)",
    "docstring": "Defines and renders the Cutlass / CUDA C++ code for a given GEMM operation instance. This function uses the Cutlass library to generate key parts of the codegen process. General Matrix Multiply forms a core part of a number of scientific applications, so this efficient and adaptable implementation is crucial. Args: op (cutlass_library.gemm_op.GemmOperation): This is the core GEMM operation that we are defining and rendering. Returns: Tuple[str, str]: A tuple where the first part is a string that constitutes the defined GEMM operation in C++ code (render) and the second part is the string that specifies the operation type.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\gemm_template.py",
    "ast_data": "FunctionDef name:_define_gemm_instance arg:self arg:op arg:evt_name arguments arg arg arg Call If Compare Assign Call Assign Call Assign Call Assign Call If Compare Assign Call Assign Call Assign Call Assign Call If Compare Raise Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_composite_tensor_fields",
    "source_code": "@property\ndef _composite_tensor_fields(self):\n    return ()",
    "docstring": "A tuple of parameter names to rebuild the . The tuple contains the names of kwargs to the 's constructor that the needs to rebuild the instance. \"is_non_singular\", \"is_self_adjoint\", \"is_positive_definite\", and \"is_square\" are common to all subclasses and may be omitted.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "FunctionDef name:_composite_tensor_fields arg:self arguments arg Return return:no"
  },
  {
    "library": "pytorch",
    "name": "ready_to_flush",
    "source_code": "def ready_to_flush(self) -> bool:\n    return False",
    "docstring": "Check whether the backend is requesting the scheduler to flush the generated kernel. If not supported, please return False.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:ready_to_flush arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "string_length",
    "source_code": "@tf_export(v1=['strings.length'])\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef string_length(input, name=None, unit='BYTE'):\n    return gen_string_ops.string_length(input, unit=unit, name=name)",
    "docstring": "Computes the length of each string given in the input tensor. >>> strings = tf.constant(['Hello','TensorFlow', '🙂']) >>> tf.strings.length(strings).numpy() # default counts bytes array([ 5, 10, 4], dtype=int32) >>> tf.strings.length(strings, unit=\"UTF8_CHAR\").numpy() array([ 5, 10, 1], dtype=int32) Args: input: A of type . The strings for which to compute the length for each element. name: A name for the operation (optional). unit: An optional from: . Defaults to . The unit that is counted to compute string length. One of: (for the number of bytes in each string) or (for the number of UTF-8 encoded Unicode code points in each string). Results are undefined if and the strings do not contain structurally valid UTF-8. Returns: A of type , containing the length of the input string in the same element of the input tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\string_ops.py",
    "ast_data": "FunctionDef name:string_length arg:input arg:name arg:unit arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_writePng",
    "source_code": "def _writePng(self, img):\n    buffer = BytesIO()\n    img.save(buffer, format='png')\n    buffer.seek(8)\n    png_data = b''\n    bit_depth = palette = None\n    while True:\n        length, type = struct.unpack(b'!L4s', buffer.read(8))\n        if type in [b'IHDR', b'PLTE', b'IDAT']:\n            data = buffer.read(length)\n            if len(data) != length:\n                raise RuntimeError('truncated data')\n            if type == b'IHDR':\n                bit_depth = int(data[8])\n            elif type == b'PLTE':\n                palette = data\n            elif type == b'IDAT':\n                png_data += data\n        elif type == b'IEND':\n            break\n        else:\n            buffer.seek(length, 1)\n        buffer.seek(4, 1)\n    return (png_data, bit_depth, palette)",
    "docstring": "Write the image *img* into the pdf file using png predictors with Flate compression.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py",
    "ast_data": "FunctionDef name:_writePng arg:self arg:img arguments arg arg Assign Call Call Call Assign Assign While Assign Call Call If Compare Assign Call If Compare Call Raise Call If Compare Assign Call If Compare Assign If Compare If Compare Call Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "get_transforms",
    "source_code": "def get_transforms(self) -> list[type[Transform]]:\n    transforms = super().get_transforms()\n    transforms.remove(SmartQuotes)\n    return transforms",
    "docstring": "Sphinx's reST parser replaces a transform class for smart-quotes by its own refs: sphinx.io.SphinxStandaloneReader",
    "type": "method",
    "file_path": "sphinx\\sphinx\\parsers.py",
    "ast_data": "FunctionDef name:get_transforms arg:self arguments arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "experimental_logical_device",
    "source_code": "def experimental_logical_device(self, logical_device_id):\n    return self.strategy.extended.experimental_logical_device(logical_device_id)",
    "docstring": "Places variables and ops on the specified logical device.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_strategy.py",
    "ast_data": "FunctionDef name:experimental_logical_device arg:self arg:logical_device_id arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "smart_cond",
    "source_code": "def smart_cond(pred, true_fn=None, false_fn=None, name=None):\n    if isinstance(pred, variables.Variable):\n        return cond.cond(pred, true_fn=true_fn, false_fn=false_fn, name=name)\n    return smart_module.smart_cond(pred, true_fn=true_fn, false_fn=false_fn, name=name)",
    "docstring": "Return either if predicate is true else . If is a bool or has a constant value, we return either or , otherwise we use to dynamically route to both. Args: pred: A scalar determining whether to return the result of or . true_fn: The callable to be performed if pred is true. false_fn: The callable to be performed if pred is false. name: Optional name prefix when using . Returns: Tensors returned by the call to either or . Raises: TypeError: If or is not callable.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\layers\\utils.py",
    "ast_data": "FunctionDef name:smart_cond arg:pred arg:true_fn arg:false_fn arg:name arguments arg arg arg arg If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "BiggsExp04",
    "source_code": "class BiggsExp04(Benchmark):\n\n    def __init__(self, dimensions=4):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([0.0] * 4, [20.0] * 4))\n        self.global_optimum = [[1.0, 10.0, 1.0, 5.0]]\n        self.fglob = 0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        t = arange(1, 11.0) * 0.1\n        y = exp(-t) - 5 * exp(-10 * t)\n        vec = (x[2] * exp(-t * x[0]) - x[3] * exp(-t * x[1]) - y) ** 2\n        return sum(vec)",
    "docstring": "BiggsExp04 objective function. The BiggsExp04 [1]_ global optimization problem is a multimodal minimization problem defined as follows .. math:: \\begin{matrix}\\ f_{\\text{BiggsExp04}}(x) = \\sum_{i=1}^{10} (x_3 e^{-t_i x_1} - x_4 e^{-t_i x_2} - y_i)^2\\\\ t_i = 0.1i\\\\ y_i = e^{-t_i} - 5 e^{-10 t_i}\\\\ \\end{matrix} with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_B.py",
    "ast_data": "ClassDef name:BiggsExp04 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Assign Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "SparseWarning",
    "source_code": "class SparseWarning(Warning):\n    pass",
    "docstring": "General warning for :mod:.",
    "type": "class",
    "file_path": "scipy\\scipy\\sparse\\_base.py",
    "ast_data": "ClassDef name:SparseWarning"
  },
  {
    "library": "numpy",
    "name": "find_duplicates",
    "source_code": "@array_function_dispatch(_find_duplicates_dispatcher)\ndef find_duplicates(a, key=None, ignoremask=True, return_index=False):\n    a = np.asanyarray(a).ravel()\n    fields = get_fieldstructure(a.dtype)\n    base = a\n    if key:\n        for f in fields[key]:\n            base = base[f]\n        base = base[key]\n    sortidx = base.argsort()\n    sortedbase = base[sortidx]\n    sorteddata = sortedbase.filled()\n    flag = sorteddata[:-1] == sorteddata[1:]\n    if ignoremask:\n        sortedmask = sortedbase.recordmask\n        flag[sortedmask[1:]] = False\n    flag = np.concatenate(([False], flag))\n    flag[:-1] = flag[:-1] + flag[1:]\n    duplicates = a[sortidx][flag]\n    if return_index:\n        return (duplicates, sortidx[flag])\n    else:\n        return duplicates",
    "docstring": "Find the duplicates in a structured array along a given key Parameters ---------- a : array-like Input array key : {string, None}, optional Name of the fields along which to check the duplicates. If None, the search is performed by records ignoremask : {True, False}, optional Whether masked data should be discarded or considered as duplicates. return_index : {False, True}, optional Whether to return the indices of the duplicated values. Examples -------- >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> ndtype = [('a', int)] >>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3], ... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype) >>> rfn.find_duplicates(a, ignoremask=True, return_index=True) (masked_array(data=[(1,), (1,), (2,), (2,)], mask=[(False,), (False,), (False,), (False,)], fill_value=(999999,), dtype=[('a', '<i8')]), array([0, 1, 3, 4]))",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\recfunctions.py",
    "ast_data": "FunctionDef name:find_duplicates arg:a arg:key arg:ignoremask arg:return_index arguments arg arg arg arg Assign Call Call Assign Call Assign If For Assign Assign Assign Call Assign Assign Call Assign Compare If Assign Assign Assign Call Assign Assign If Return return:yes Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_pvalue_dunnett",
    "source_code": "def _pvalue_dunnett(rho: np.ndarray, df: int, statistic: np.ndarray, alternative: Literal['two-sided', 'less', 'greater'], rng: SeedType=None) -> np.ndarray:\n    statistic = statistic.reshape(-1, 1)\n    mvt = stats.multivariate_t(shape=rho, df=df, seed=rng)\n    if alternative == 'two-sided':\n        statistic = abs(statistic)\n        pvalue = 1 - mvt.cdf(statistic, lower_limit=-statistic)\n    elif alternative == 'greater':\n        pvalue = 1 - mvt.cdf(statistic, lower_limit=-np.inf)\n    else:\n        pvalue = 1 - mvt.cdf(np.inf, lower_limit=statistic)\n    return np.atleast_1d(pvalue)",
    "docstring": "pvalue from the multivariate t-distribution. Critical values come from the multivariate student-t distribution.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_multicomp.py",
    "ast_data": "FunctionDef name:_pvalue_dunnett arg:rho arg:df arg:statistic arg:alternative arg:rng arguments arg arg arg arg arg Assign Call Assign Call If Compare Assign Call Assign Call If Compare Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "hard_sigmoid",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef hard_sigmoid(x):\n    point_two = _constant_to_tensor(0.2, x.dtype.base_dtype)\n    point_five = _constant_to_tensor(0.5, x.dtype.base_dtype)\n    x = math_ops.multiply(x, point_two)\n    x = math_ops.add(x, point_five)\n    x = clip_ops.clip_by_value(x, 0.0, 1.0)\n    return x",
    "docstring": "Segment-wise linear approximation of sigmoid. Faster than sigmoid. Returns if . In , returns . Args: x: A tensor or variable. Returns: A tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:hard_sigmoid arg:x arguments arg Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "Function",
    "source_code": "class Function(object):\n\n    def __init__(self, func, name=None):\n        self._python_func = func\n        self.name = name or func.__name__\n\n    def __call__(self, *args, **kwargs):\n        flat_args = nest.flatten(args, expand_composites=True)\n        flat_kwargs = nest.flatten(kwargs, expand_composites=True)\n        all_args = flat_args + flat_kwargs\n        outer_ctx = context_lib.get_default()\n        ctx = NewTracingContext(self.name)\n        with context_lib.set_default(ctx):\n            inputs = [ctx.AddParameter(arg.DataType()) for arg in all_args]\n            structured_args = nest.pack_sequence_as(args, inputs[:len(flat_args)])\n            structured_kwargs = nest.pack_sequence_as(kwargs, inputs[len(flat_args):])\n            structured_outputs = self._python_func(*structured_args, **structured_kwargs)\n            py_outputs = nest.flatten(structured_outputs, expand_composites=True)\n            num_outputs = len(py_outputs)\n            finalized_f = ctx.Finalize(py_outputs)\n            outer_ctx.RegisterFunction(finalized_f)\n        call_op = outer_ctx.CreateOperation(self.name, '')\n        call_op.SetOpName(self.name)\n        for arg in all_args:\n            call_op.AddInput(arg)\n        call_op_outputs = call_op.Execute(num_outputs)\n        outer_ctx.RemoveFunction(self.name)\n        return nest.pack_sequence_as(structured_outputs, call_op_outputs)",
    "docstring": "Helper for tf.function.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\experimental\\def_function.py",
    "ast_data": "ClassDef name:Function FunctionDef name:__init__ arg:self arg:func arg:name arguments arg arg arg Assign Assign BoolOp FunctionDef name:__call__ arg:self arguments arg arg arg Assign Call Assign Call Assign Assign Call Assign Call With Call Assign Call Call Assign Call Call Assign Call Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Call For Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "user_can_authenticate",
    "source_code": "def user_can_authenticate(self, user):\n    return getattr(user, 'is_active', True)",
    "docstring": "Reject users with is_active=False. Custom user models that don't have that attribute are allowed.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\backends.py",
    "ast_data": "FunctionDef name:user_can_authenticate arg:self arg:user arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "StringGauge",
    "source_code": "class StringGauge(Metric):\n    __slots__ = []\n\n    def __init__(self, name, description, *labels):\n        super(StringGauge, self).__init__('StringGauge', _string_gauge_methods, len(labels), name, description, *labels)\n\n    def get_cell(self, *labels):\n        return StringGaugeCell(super(StringGauge, self).get_cell(*labels))",
    "docstring": "A stateful class for updating a gauge-like string metric. This class encapsulates a set of string values (or a single value for a label-less metric). Each value is identified by a tuple of labels. The class allows the user to set each value.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py",
    "ast_data": "ClassDef name:StringGauge Assign FunctionDef name:__init__ arg:self arg:name arg:description arguments arg arg arg arg Call Call Call FunctionDef name:get_cell arg:self arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_initialize",
    "source_code": "def _initialize(self):\n    self._cpp_trifinder.initialize()",
    "docstring": "Initialize the underlying C++ object. Can be called multiple times if, for example, the triangulation is modified.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_trifinder.py",
    "ast_data": "FunctionDef name:_initialize arg:self arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "Event",
    "source_code": "class Event:\n\n    def __init__(self, enable_timing: bool=False) -> None:\n        self.__eventId = torch._C._mps_acquireEvent(enable_timing)\n\n    def __del__(self) -> None:\n        if hasattr(torch._C, '_mps_releaseEvent') and self.__eventId > 0:\n            torch._C._mps_releaseEvent(self.__eventId)\n\n    def record(self) -> None:\n        torch._C._mps_recordEvent(self.__eventId)\n\n    def wait(self) -> None:\n        torch._C._mps_waitForEvent(self.__eventId)\n\n    def query(self) -> bool:\n        return torch._C._mps_queryEvent(self.__eventId)\n\n    def synchronize(self) -> None:\n        torch._C._mps_synchronizeEvent(self.__eventId)\n\n    def elapsed_time(self, end_event: 'Event') -> float:\n        return torch._C._mps_elapsedTimeOfEvents(self.__eventId, end_event.__eventId)",
    "docstring": "Wrapper around an MPS event. MPS events are synchronization markers that can be used to monitor the device's progress, to accurately measure timing, and to synchronize MPS streams. Args: enable_timing (bool, optional): indicates if the event should measure time (default: ``)",
    "type": "class",
    "file_path": "pytorch\\torch\\mps\\event.py",
    "ast_data": "ClassDef name:Event FunctionDef name:__init__ arg:self arg:enable_timing arguments arg arg Assign Call FunctionDef name:__del__ arg:self arguments arg If BoolOp Call Compare Call FunctionDef name:record arg:self arguments arg Call FunctionDef name:wait arg:self arguments arg Call FunctionDef name:query arg:self arguments arg Return return:yes Call FunctionDef name:synchronize arg:self arguments arg Call FunctionDef name:elapsed_time arg:self arg:end_event arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_init_axis",
    "source_code": "def _init_axis(self):\n    self.xaxis = axis3d.XAxis(self)\n    self.yaxis = axis3d.YAxis(self)\n    self.zaxis = axis3d.ZAxis(self)",
    "docstring": "Init 3D Axes; overrides creation of regular X/Y Axes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:_init_axis arg:self arguments arg Assign Call Assign Call Assign Call"
  },
  {
    "library": "scipy",
    "name": "Benchmark",
    "source_code": "class Benchmark:\n    pass",
    "docstring": "Base class with sensible options",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\common.py",
    "ast_data": "ClassDef name:Benchmark"
  },
  {
    "library": "numpy",
    "name": "hermesub",
    "source_code": "def hermesub(c1, c2):\n    return pu._sub(c1, c2)",
    "docstring": "Subtract one Hermite series from another. Returns the difference of two Hermite series - . The sequences of coefficients are from lowest order term to highest, i.e., [1,2,3] represents the series ``. Parameters ---------- c1, c2 : array_like 1-D arrays of Hermite series coefficients ordered from low to high. Returns ------- out : ndarray Of Hermite series coefficients representing their difference. See Also -------- hermeadd, hermemulx, hermemul, hermediv, hermepow Notes ----- Unlike multiplication, division, etc., the difference of two Hermite series is a Hermite series (without having to \"reproject\" the result onto the basis set) so subtraction, just like that of \"standard\" polynomials, is simply \"component-wise.\" Examples -------- >>> from numpy.polynomial.hermite_e import hermesub >>> hermesub([1, 2, 3, 4], [1, 2, 3]) array([0., 0., 0., 4.])",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\hermite_e.py",
    "ast_data": "FunctionDef name:hermesub arg:c1 arg:c2 arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_on_enter",
    "source_code": "def _on_enter(self, event):\n    event.Skip()\n    LocationEvent('figure_enter_event', self, *self._mpl_coords(event), modifiers=self._mpl_modifiers(), guiEvent=event)._process()",
    "docstring": "Mouse has entered the window.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_wx.py",
    "ast_data": "FunctionDef name:_on_enter arg:self arg:event arguments arg arg Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "convert",
    "source_code": "def convert(self, y):\n    if y is None:\n        return None\n    if isinstance(y, sparse_tensor.SparseTensor):\n        return self._convert_sparse(y)\n    assert isinstance(y, (tensor_lib.Tensor, ops.Operation)), y\n    output = self._convert_helper(y)\n    if isinstance(output, WrappedTensor):\n        assert isinstance(y, tensor_lib.Tensor)\n        return self._unwrap_or_tile(output)\n    else:\n        assert isinstance(y, ops.Operation)\n        assert not y.outputs\n        assert isinstance(output, ops.Operation)\n    return output",
    "docstring": "Returns the converted value corresponding to y. Args: y: A Tensor or a ops.Operation object. If latter, y should not have any outputs. Returns: If y does not need to be converted, it returns y as is. Else it returns the \"converted value\" corresponding to y.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "FunctionDef name:convert arg:self arg:y arguments arg arg If Compare Return return:no If Call Return return:yes Call Call Assign Call If Call Call Return return:yes Call Call Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "CallLaterOnce",
    "source_code": "class CallLaterOnce(Generic[_T]):\n\n    def __init__(self, func: Callable[_P, _T], *a: _P.args, **kw: _P.kwargs):\n        self._func: Callable[_P, _T] = func\n        self._a: tuple[Any, ...] = a\n        self._kw: dict[str, Any] = kw\n        self._call: DelayedCall | None = None\n        self._deferreds: list[Deferred] = []\n\n    def schedule(self, delay: float=0) -> None:\n        from twisted.internet import reactor\n        if self._call is None:\n            self._call = reactor.callLater(delay, self)\n\n    def cancel(self) -> None:\n        if self._call:\n            self._call.cancel()\n\n    def __call__(self) -> _T:\n        from twisted.internet import reactor\n        self._call = None\n        result = self._func(*self._a, **self._kw)\n        for d in self._deferreds:\n            reactor.callLater(0, d.callback, None)\n        self._deferreds = []\n        return result\n\n    async def wait(self):\n        from scrapy.utils.defer import maybe_deferred_to_future\n        d = Deferred()\n        self._deferreds.append(d)\n        await maybe_deferred_to_future(d)",
    "docstring": "Schedule a function to be called in the next reactor loop, but only if it hasn't been already scheduled since the last time it ran.",
    "type": "class",
    "file_path": "scrapy\\scrapy\\utils\\reactor.py",
    "ast_data": "ClassDef name:CallLaterOnce FunctionDef name:__init__ arg:self arg:func arguments arg arg arg arg FunctionDef name:schedule arg:self arg:delay arguments arg arg If Compare Assign Call FunctionDef name:cancel arg:self arguments arg If Call FunctionDef name:__call__ arg:self arguments arg Assign Assign Call For Call Assign Return return:yes AsyncFunctionDef name:wait arg:self arguments arg Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "devices",
    "source_code": "def devices(self):\n    return self._device_names",
    "docstring": "Get the list of device names. Returns: ( of ) names of the devices.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:devices arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "input",
    "source_code": "@property\ndef input(self):\n    return self._nested_inputs",
    "docstring": "Retrieves the input tensor(s) of a layer. Only applicable if the layer has exactly one input, i.e. if it is connected to one incoming layer. Returns: Input tensor or list of input tensors. Raises: RuntimeError: If called in Eager mode. AttributeError: If no inbound nodes are found.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\functional.py",
    "ast_data": "FunctionDef name:input arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "decode",
    "source_code": "def decode(self, doc):\n    if self.input == 'filename':\n        with open(doc, 'rb') as fh:\n            doc = fh.read()\n    elif self.input == 'file':\n        doc = doc.read()\n    if isinstance(doc, bytes):\n        doc = doc.decode(self.encoding, self.decode_error)\n    if doc is np.nan:\n        raise ValueError('np.nan is an invalid document, expected byte or unicode string.')\n    return doc",
    "docstring": "Decode the input into a string of unicode symbols. The decoding strategy depends on the vectorizer parameters. Parameters ---------- doc : bytes or str The string to decode. Returns ------- doc: str A string of unicode symbols.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_extraction\\text.py",
    "ast_data": "FunctionDef name:decode arg:self arg:doc arguments arg arg If Compare With Call Assign Call If Compare Assign Call If Call Assign Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_check_perm",
    "source_code": "def _check_perm(self, perm):\n    if perm.shape.ndims is not None and perm.shape.ndims < 1:\n        raise ValueError(f'Argument `perm` must have at least 1 dimension. Received: {perm}.')\n    if not perm.dtype.is_integer:\n        raise TypeError(f'Argument `perm` must be integer dtype. Received: {perm}.')\n    static_perm = tensor_util.constant_value(perm)\n    if static_perm is not None:\n        sorted_perm = np.sort(static_perm, axis=-1)\n        if np.any(sorted_perm != np.arange(0, static_perm.shape[-1])):\n            raise ValueError(f'Argument `perm` must be a vector of unique integers from 0 to {static_perm.shape[-1] - 1}.')",
    "docstring": "Static check of perm.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_permutation.py",
    "ast_data": "FunctionDef name:_check_perm arg:self arg:perm arguments arg arg If BoolOp Compare Compare Raise Call If Raise Call Assign Call If Compare Assign Call If Call Compare Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "reorganize_partitions",
    "source_code": "def reorganize_partitions(partitions: list[Partition]) -> None:\n    for i, partition in enumerate(partitions):\n        partition.partition_id = i\n    set_parents_and_children(partitions)\n    return",
    "docstring": "Given a list of partitions, reorganize partition id, its parents and its children for each partition",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\accelerator_partitioner.py",
    "ast_data": "FunctionDef name:reorganize_partitions arg:partitions arguments arg For Call Assign Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "softmax",
    "source_code": "@dispatch.add_dispatch_support\ndef softmax(x, axis=-1):\n    if x.shape.rank > 1:\n        if isinstance(axis, int):\n            output = nn.softmax(x, axis=axis)\n        else:\n            e = math_ops.exp(x - math_ops.reduce_max(x, axis=axis, keepdims=True))\n            s = math_ops.reduce_sum(e, axis=axis, keepdims=True)\n            output = e / s\n    else:\n        raise ValueError('Cannot apply softmax to a tensor that is 1D. Received input: %s' % (x,))\n    output._keras_logits = x\n    return output",
    "docstring": "Softmax converts a vector of values to a probability distribution. The elements of the output vector are in range (0, 1) and sum to 1. Each vector is handled independently. The argument sets which axis of the input the function is applied along. Softmax is often used as the activation for the last layer of a classification network because the result could be interpreted as a probability distribution. The softmax of each vector x is computed as . The input values in are the log-odds of the resulting probability. Args: x : Input tensor. axis: Integer, axis along which the softmax normalization is applied. Returns: Tensor, output of softmax transformation (all values are non-negative and sum to 1). Examples: **Example 1: standalone usage** >>> inputs = tf.random.normal(shape=(32, 10)) >>> outputs = tf.keras.activations.softmax(inputs) >>> tf.reduce_sum(outputs[0, :]) # Each sample in the batch now sums to 1 **Example 2: usage in a layer** >>> layer = tf.keras.layers.Dense(32, activation=tf.keras.activations.softmax)",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\activations.py",
    "ast_data": "FunctionDef name:softmax arg:x arg:axis arguments arg arg If Compare If Call Assign Call Assign Call Call Assign Call Assign Raise Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "equality_inference_rule",
    "source_code": "@register_inference_rule(torch.nn.functional.gelu)\n@register_inference_rule(torch.nn.functional.dropout)\n@register_inference_rule(torch.nn.functional.softmax)\n@register_inference_rule('detach')\n@register_inference_rule('to')\n@register_inference_rule('int')\n@register_inference_rule('long')\n@register_inference_rule('contiguous')\n@register_inference_rule(torch.ones)\n@register_inference_rule(torch.zeros)\ndef equality_inference_rule(n: Node, symbols, constraints, counter):\n    output, counter = gen_tvar(counter)\n    symbols[n] = output\n    if isinstance(n.args[0], Node):\n        input = symbols[n.args[0]]\n        if isinstance(input, TVar):\n            return ([BinConstraintT(input, output, op_eq)], counter)\n        else:\n            for arg in n.args:\n                assert isinstance(symbols[arg], DVar)\n        my_size = [symbols[arg] for arg in n.args]\n        return ([BinConstraintT(output, TensorType(my_size), op_eq)], counter)\n    elif isinstance(n.args[0], tuple):\n        assert len(n.args[0]) <= 4\n        my_size = [symbols[arg] for arg in n.args[0]]\n        return ([BinConstraintT(output, TensorType(my_size), op_eq)], counter)\n    else:\n        raise NotImplementedError('Method not yet implemented')",
    "docstring": "We generate the constraint: input = output",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_generator.py",
    "ast_data": "FunctionDef name:equality_inference_rule arg:n arg:symbols arg:constraints arg:counter arguments arg arg arg arg Assign Call Assign If Call Assign If Call Return return:yes Call For Call Assign Return return:yes Call Call If Call Compare Call Assign Return return:yes Call Call Raise Call Call Call Call Call Call Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "stop",
    "source_code": "def stop():\n    check_error(cudart().cudaProfilerStop())",
    "docstring": "Stops cuda profiler data collection. .. warning:: Raises CudaError in case of it is unable to stop the profiler.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\profiler.py",
    "ast_data": "FunctionDef name:stop arguments Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "sharding_spec",
    "source_code": "def sharding_spec(self) -> shard_spec.ShardingSpec:\n    return self._sharding_spec",
    "docstring": "Returns the ShardingSpec for the tensor.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\api.py",
    "ast_data": "FunctionDef name:sharding_spec arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "state_dict",
    "source_code": "@override\ndef state_dict(self) -> dict[str, Any]:\n    state_dict = {key: value for key, value in self.__dict__.items() if key not in ('optimizer', '_schedulers')}\n    state_dict['_schedulers'] = [None] * len(self._schedulers)\n    for idx, s in enumerate(self._schedulers):\n        state_dict['_schedulers'][idx] = s.state_dict()\n    return state_dict",
    "docstring": "Return the state of the scheduler as a :class:. It contains an entry for every variable in self.__dict__ which is not the optimizer. The wrapped scheduler states will also be saved.",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\lr_scheduler.py",
    "ast_data": "FunctionDef name:state_dict arg:self arguments arg Assign Call Compare Assign Call For Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_to_placeholder",
    "source_code": "def _to_placeholder(self):\n    if self._inferred_value is not None:\n        inferred_value = array_ops.shape(array_ops.placeholder(shape=self._inferred_value, dtype=dtypes.int32))\n        if self.type_spec.shape.rank == 0:\n            inferred_value = inferred_value[0]\n        return inferred_value\n\n    def component_to_placeholder(component):\n        return array_ops.placeholder(component.dtype, component.shape)\n    return nest.map_structure(component_to_placeholder, self.type_spec, expand_composites=True)",
    "docstring": "Convert this KerasTensor to a placeholder in a graph.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\keras_tensor.py",
    "ast_data": "FunctionDef name:_to_placeholder arg:self arguments arg If Compare Assign Call Call If Compare Assign Return return:yes FunctionDef name:component_to_placeholder arg:component arguments arg Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_embedding_lookup_for_sparse_tensor",
    "source_code": "def _embedding_lookup_for_sparse_tensor(inp: sparse_tensor.SparseTensor, weight: Optional[sparse_tensor.SparseTensor], table: tf_variables.Variable, feature: tpu_embedding_v2_utils.FeatureConfig) -> tensor.Tensor:\n    inp_rank = inp.shape.rank\n    if not feature.output_shape and feature.max_sequence_length > 0 and (inp_rank is None or inp_rank == 2):\n        batch_size = math_ops.cast(array_ops.shape(inp)[0], dtype=dtypes.int64)\n        sparse_shape = array_ops_stack.stack([batch_size, feature.max_sequence_length], axis=0)\n        truncated_inp = sparse_ops.sparse_slice(inp, start=[0, 0], size=sparse_shape)\n        dense_output_shape = array_ops_stack.stack([batch_size, feature.max_sequence_length, feature.table.dim], axis=0)\n        return array_ops.scatter_nd(truncated_inp.indices, array_ops.gather(table.read_value(), truncated_inp.values), dense_output_shape)\n    else:\n        if feature.max_sequence_length > 0:\n            logging.warning('max_sequence_length setting will be ignored because the rank of the input tensor is %d which is not 2.', inp_rank)\n        if not feature.validate_weights_and_indices and inp_rank is not None and (inp_rank <= 2):\n            return embedding_ops.embedding_lookup_sparse_v2(table, inp, sp_weights=weight, combiner=feature.table.combiner)\n        else:\n            return embedding_ops.safe_embedding_lookup_sparse_v2(table, inp, sparse_weights=weight, combiner=feature.table.combiner)",
    "docstring": "Embedding lookup for sparse tensor based on its feature config. Args: inp: a single SparseTensor input. weight: None or SparseTensor which has the same shape of the input. table: a table variable. feature: a feature config. Returns: Embedding lookup result.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_for_serving.py",
    "ast_data": "FunctionDef name:_embedding_lookup_for_sparse_tensor arg:inp arg:weight arg:table arg:feature arguments arg arg arg arg Assign If BoolOp Compare BoolOp Compare Compare Assign Call Call Assign Call Assign Call Assign Call Return return:yes Call Call Call If Compare Call If BoolOp Compare Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "show",
    "source_code": "def show(self, n_row: Optional[int]=None, backend: str='pil', display: bool=True) -> Optional[Any]:\n    if self._output_image is None:\n        raise ValueError('No pre-computed images found. Needs to execute first.')\n    if len(self._output_image.shape) == 3:\n        out_image = self._output_image\n    elif len(self._output_image.shape) == 4:\n        from kornia.utils.image import make_grid\n        if n_row is None:\n            n_row = math.ceil(self._output_image.shape[0] ** 0.5)\n        out_image = make_grid(self._output_image, n_row, padding=2)\n    else:\n        raise ValueError\n    if backend == 'pil' and display:\n        Image.fromarray((out_image.permute(1, 2, 0).squeeze().numpy() * 255).astype(np.uint8)).show()\n        return None\n    if backend == 'pil':\n        return Image.fromarray((out_image.permute(1, 2, 0).squeeze().numpy() * 255).astype(np.uint8))\n    raise ValueError(f'Unsupported backend `{backend}`.')",
    "docstring": "Return PIL images. Args: n_row: Number of images displayed in each row of the grid. backend: visualization backend. Only PIL is supported now. display: Whether or not to show the image.",
    "type": "method",
    "file_path": "kornia\\kornia\\core\\mixin\\image_module.py",
    "ast_data": "FunctionDef name:show arg:self arg:n_row arg:backend arg:display arguments arg arg arg arg If Compare Raise Call If Compare Call Assign If Compare Call If Compare Assign Call Assign Call Raise If BoolOp Compare Call Call Call Call Call Call Return return:no If Compare Return return:yes Call Call Call Call Call Raise Call"
  },
  {
    "library": "django",
    "name": "BadMigrationError",
    "source_code": "class BadMigrationError(Exception):\n    pass",
    "docstring": "There's a bad migration (unreadable/bad format/etc.).",
    "type": "class",
    "file_path": "django\\django\\db\\migrations\\exceptions.py",
    "ast_data": "ClassDef name:BadMigrationError"
  },
  {
    "library": "matplotlib",
    "name": "__call__",
    "source_code": "def __call__(self, X, alpha=None, bytes=False):\n    rgba, mask = self._get_rgba_and_mask(X, alpha=alpha, bytes=bytes)\n    if not np.iterable(X):\n        rgba = tuple(rgba)\n    return rgba",
    "docstring": "Parameters ---------- X : float or int or array-like The data value(s) to convert to RGBA. For floats, *X* should be in the interval `numpy.uint8`.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:X arg:alpha arg:bytes arguments arg arg arg arg Assign Call If Call Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "generate_def",
    "source_code": "def generate_def(dll, dfile):\n    dump = dump_table(dll)\n    for i in range(len(dump)):\n        if _START.match(dump[i].decode()):\n            break\n    else:\n        raise ValueError('Symbol table not found')\n    syms = []\n    for j in range(i + 1, len(dump)):\n        m = _TABLE.match(dump[j].decode())\n        if m:\n            syms.append((int(m.group(1).strip()), m.group(2)))\n        else:\n            break\n    if len(syms) == 0:\n        log.warn('No symbols found in %s' % dll)\n    with open(dfile, 'w') as d:\n        d.write('LIBRARY        %s\\n' % os.path.basename(dll))\n        d.write(';CODE          PRELOAD MOVEABLE DISCARDABLE\\n')\n        d.write(';DATA          PRELOAD SINGLE\\n')\n        d.write('\\nEXPORTS\\n')\n        for s in syms:\n            d.write('%s\\n' % s[1])",
    "docstring": "Given a dll file location, get all its exported symbols and dump them into the given def file. The .def file will be overwritten",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\mingw32ccompiler.py",
    "ast_data": "FunctionDef name:generate_def arg:dll arg:dfile arguments arg arg Assign Call For Call Call If Call Call Raise Call Assign For Call Call Assign Call Call If Call Call Call Call Call If Compare Call Call With Call Call Call Call Call Call For Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n    X = validate_data(self, X, accept_sparse=['csr', 'csc'], dtype=[np.float64, np.float32])\n    n_samples, n_features = X.shape\n    if self.n_components == 'auto':\n        self.n_components_ = johnson_lindenstrauss_min_dim(n_samples=n_samples, eps=self.eps)\n        if self.n_components_ <= 0:\n            raise ValueError('eps=%f and n_samples=%d lead to a target dimension of %d which is invalid' % (self.eps, n_samples, self.n_components_))\n        elif self.n_components_ > n_features:\n            raise ValueError('eps=%f and n_samples=%d lead to a target dimension of %d which is larger than the original space with n_features=%d' % (self.eps, n_samples, self.n_components_, n_features))\n    else:\n        if self.n_components > n_features:\n            warnings.warn('The number of components is higher than the number of features: n_features < n_components (%s < %s).The dimensionality of the problem will not be reduced.' % (n_features, self.n_components), DataDimensionalityWarning)\n        self.n_components_ = self.n_components\n    self.components_ = self._make_random_matrix(self.n_components_, n_features).astype(X.dtype, copy=False)\n    if self.compute_inverse_components:\n        self.inverse_components_ = self._compute_inverse_components()\n    self._n_features_out = self.n_components\n    return self",
    "docstring": "Generate a sparse random projection matrix. Parameters ---------- X : {ndarray, sparse matrix} of shape (n_samples, n_features) Training set: only the shape is used to find optimal random matrix dimensions based on the theory referenced in the afore mentioned papers. y : Ignored Not used, present here for API consistency by convention. Returns ------- self : object BaseRandomProjection class instance.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\random_projection.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Assign Call Assign If Compare Assign Call If Compare Raise Call If Compare Raise Call If Compare Call Assign Assign Call Call If Assign Call Assign Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "any",
    "source_code": "def any(self, *, skipna: bool=True, axis: AxisInt | None=0, **kwargs) -> np.bool_ | NAType:\n    nv.validate_any((), kwargs)\n    values = self._data.copy()\n    np.putmask(values, self._mask, self.dtype._falsey_value)\n    result = values.any()\n    if skipna:\n        return result\n    elif result or len(self) == 0 or (not self._mask.any()):\n        return result\n    else:\n        return self.dtype.na_value",
    "docstring": "Return whether any element is truthy. Returns False unless there is at least one element that is truthy. By default, NAs are skipped. If `Kleene logic skipnaskipnapandas.NA` is True or False influences the result): >>> pd.array([True, False, pd.NA]).any(skipna=False) np.True_ >>> pd.array([1, 0, pd.NA]).any(skipna=False) np.True_ >>> pd.array([False, False, pd.NA]).any(skipna=False) >>> pd.array([0, 0, pd.NA]).any(skipna=False)",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\masked.py",
    "ast_data": "FunctionDef name:any arg:self arguments arg arg arg arg Call Assign Call Call Assign Call If Return return:yes If BoolOp Compare Call Call Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "w",
    "source_code": "def w(self):\n    return (self.data.weekday() + 1) % 7",
    "docstring": "Day of the week, numeric, i.e. '0' (Sunday) to '6' (Saturday)",
    "type": "method",
    "file_path": "django\\django\\utils\\dateformat.py",
    "ast_data": "FunctionDef name:w arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "cryptography",
    "name": "load_public",
    "source_code": "def load_public(self, data: memoryview) -> tuple[ec.EllipticCurvePublicKey, memoryview]:\n    public_key, data = _lookup_kformat(_ECDSA_NISTP256).load_public(data)\n    _, data = load_application(data)\n    return (public_key, data)",
    "docstring": "Make ECDSA public key from data.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\serialization\\ssh.py",
    "ast_data": "FunctionDef name:load_public arg:self arg:data arguments arg arg Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "tricontour",
    "source_code": "@_docstring.Substitution(func='tricontour', type='lines')\n@_docstring.interpd\ndef tricontour(ax, *args, **kwargs):\n    kwargs['filled'] = False\n    return TriContourSet(ax, *args, **kwargs)",
    "docstring": "%(_tricontour_doc)s linewidths : float or array-like, default: :rc: The line width of the contour lines. If a number, all levels will be plotted with this linewidth. If a sequence, the levels in ascending order will be plotted with the linewidths in the order specified. If None, this falls back to :rc:. linestyles : {*None*, 'solid', 'dashed', 'dashdot', 'dotted'}, optional If *linestyles* is *None*, the default is 'solid' unless the lines are monochrome. In that case, negative contours will take their linestyle from :rc: setting. *linestyles* can also be an iterable of the above strings specifying a set of linestyles to be used. If this iterable is shorter than the number of contour levels it will be repeated as necessary.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_tricontour.py",
    "ast_data": "FunctionDef name:tricontour arg:ax arguments arg arg arg Assign Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "virtualize_jumps",
    "source_code": "def virtualize_jumps(instructions) -> None:\n    jump_targets = {inst.offset: inst for inst in instructions}\n    for inst in instructions:\n        if inst.opcode in dis.hasjabs or inst.opcode in dis.hasjrel:\n            inst.target = _get_instruction_by_offset(jump_targets, inst.argval)",
    "docstring": "Replace jump targets with pointers to make editing easier",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\bytecode_transformation.py",
    "ast_data": "FunctionDef name:virtualize_jumps arg:instructions arguments arg Assign For If BoolOp Compare Compare Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "cast",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef cast(x, dtype):\n    return math_ops.cast(x, dtype)",
    "docstring": "Casts a tensor to a different dtype and returns it. You can cast a Keras variable but it still returns a Keras tensor. Args: x: Keras tensor (or variable). dtype: String, either (, , or ). Returns: Keras tensor with dtype . Examples: Cast a float32 variable to a float64 tensor >>> input = tf.keras.backend.ones(shape=(1,3)) >>> print(input) >>> cast_input = tf.keras.backend.cast(input, dtype='float64') >>> print(cast_input) tf.Tensor([[1. 1. 1.]], shape=(1, 3), dtype=float64)",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:cast arg:x arg:dtype arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_make_dataset_iterator",
    "source_code": "def _make_dataset_iterator(self, dataset):\n    input_workers = input_lib.InputWorkers(tuple(self._device_input_worker_devices.items()))\n    return input_lib_v1.DatasetIterator(dataset, input_workers, self._container_strategy(), num_replicas_in_sync=self._num_replicas_in_sync)",
    "docstring": "Make iterators for each of the TPU hosts.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_strategy.py",
    "ast_data": "FunctionDef name:_make_dataset_iterator arg:self arg:dataset arguments arg arg Assign Call Call Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_grouped_hist",
    "source_code": "def _grouped_hist(data: Series | DataFrame, column=None, by=None, ax=None, bins: int=50, figsize: tuple[float, float] | None=None, layout=None, sharex: bool=False, sharey: bool=False, rot: float=90, grid: bool=True, xlabelsize: int | None=None, xrot=None, ylabelsize: int | None=None, yrot=None, legend: bool=False, **kwargs):\n    if legend:\n        assert 'label' not in kwargs\n        if data.ndim == 1:\n            kwargs['label'] = data.name\n        elif column is None:\n            kwargs['label'] = data.columns\n        else:\n            kwargs['label'] = column\n\n    def plot_group(group, ax) -> None:\n        ax.hist(group.dropna().values, bins=bins, **kwargs)\n        if legend:\n            ax.legend()\n    if xrot is None:\n        xrot = rot\n    fig, axes = _grouped_plot(plot_group, data, column=column, by=by, sharex=sharex, sharey=sharey, ax=ax, figsize=figsize, layout=layout, rot=rot)\n    set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot)\n    maybe_adjust_figure(fig, bottom=0.15, top=0.9, left=0.1, right=0.9, hspace=0.5, wspace=0.3)\n    return axes",
    "docstring": "Grouped histogram Parameters ---------- data : Series/DataFrame column : object, optional by : object, optional ax : axes, optional bins : int, default 50 figsize : tuple, optional layout : optional sharex : bool, default False sharey : bool, default False rot : float, default 90 grid : bool, default True legend: : bool, default False kwargs : dict, keyword arguments passed to matplotlib.Axes.hist Returns ------- collection of Matplotlib Axes",
    "type": "function",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\hist.py",
    "ast_data": "FunctionDef name:_grouped_hist arg:data arg:column arg:by arg:ax arg:bins arg:figsize arg:layout arg:sharex arg:sharey arg:rot arg:grid arg:xlabelsize arg:xrot arg:ylabelsize arg:yrot arg:legend arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg If Compare If Compare Assign If Compare Assign Assign FunctionDef name:plot_group arg:group arg:ax arguments arg arg Call Call If Call If Compare Assign Assign Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "warn_external",
    "source_code": "def warn_external(message, category=None):\n    kwargs = {}\n    if sys.version_info[:2] >= (3, 12):\n        basedir = pathlib.Path(__file__).parents[2]\n        kwargs['skip_file_prefixes'] = (str(basedir / 'matplotlib'), str(basedir / 'mpl_toolkits'))\n    else:\n        frame = sys._getframe()\n        for stacklevel in itertools.count(1):\n            if frame is None:\n                kwargs['stacklevel'] = stacklevel\n                break\n            if not re.match('\\\\A(matplotlib|mpl_toolkits)(\\\\Z|\\\\.(?!tests\\\\.))', frame.f_globals.get('__name__', '')):\n                kwargs['stacklevel'] = stacklevel\n                break\n            frame = frame.f_back\n        del frame\n    warnings.warn(message, category, **kwargs)",
    "docstring": "wrapper that sets *stacklevel* to \"outside Matplotlib\". The original emitter of the warning can be obtained by patching this function back to , i.e. ``, etc.).",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\_api\\__init__.py",
    "ast_data": "FunctionDef name:warn_external arg:message arg:category arguments arg arg Assign If Compare Assign Call Assign Call Call Assign Call For Call If Compare Assign If Call Call Assign Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, *args, pad=0, **kwargs):\n    axes_class = kwargs.pop('axes_class', self._defaultAxesClass)\n    self.RGB = ax = axes_class(*args, **kwargs)\n    ax.get_figure().add_axes(ax)\n    self.R, self.G, self.B = make_rgb_axes(ax, pad=pad, axes_class=axes_class, **kwargs)\n    for ax1 in [self.RGB, self.R, self.G, self.B]:\n        if isinstance(ax1.axis, MethodType):\n            ad = Axes.AxisDict(self)\n            ad.update(bottom=SimpleAxisArtist(ax1.xaxis, 1, ax1.spines['bottom']), top=SimpleAxisArtist(ax1.xaxis, 2, ax1.spines['top']), left=SimpleAxisArtist(ax1.yaxis, 1, ax1.spines['left']), right=SimpleAxisArtist(ax1.yaxis, 2, ax1.spines['right']))\n        else:\n            ad = ax1.axis\n        ad[:].line.set_color('w')\n        ad[:].major_ticks.set_markeredgecolor('w')",
    "docstring": "Parameters ---------- pad : float, default: 0 Fraction of the Axes height to put as padding. axes_class : Axes class to use. If not provided, `` is used. *args Forwarded to *axes_class* init for the RGB Axes **kwargs Forwarded to *axes_class* init for the RGB, R, G, and B Axes",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_rgb.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg arg arg arg Assign Call Assign Call Call Call Assign Call For If Call Assign Call Call Call Call Call Call Assign Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_n_parameters",
    "source_code": "def _n_parameters(self):\n    _, n_features = self.means_.shape\n    if self.covariance_type == 'full':\n        cov_params = self.n_components * n_features * (n_features + 1) / 2.0\n    elif self.covariance_type == 'diag':\n        cov_params = self.n_components * n_features\n    elif self.covariance_type == 'tied':\n        cov_params = n_features * (n_features + 1) / 2.0\n    elif self.covariance_type == 'spherical':\n        cov_params = self.n_components\n    mean_params = n_features * self.n_components\n    return int(cov_params + mean_params + self.n_components - 1)",
    "docstring": "Return the number of free parameters in the model.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\mixture\\_gaussian_mixture.py",
    "ast_data": "FunctionDef name:_n_parameters arg:self arguments arg Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_rollback",
    "source_code": "def get_rollback(using=None):\n    return get_connection(using).get_rollback()",
    "docstring": "Get the \"needs rollback\" flag -- for *advanced use* only.",
    "type": "function",
    "file_path": "django\\django\\db\\transaction.py",
    "ast_data": "FunctionDef name:get_rollback arg:using arguments arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_apply_conv_mode",
    "source_code": "def _apply_conv_mode(ret, s1, s2, mode, axes, xp):\n    if mode == 'full':\n        return xp_copy(ret, xp=xp)\n    elif mode == 'same':\n        return xp_copy(_centered(ret, s1), xp=xp)\n    elif mode == 'valid':\n        shape_valid = [ret.shape[a] if a not in axes else s1[a] - s2[a] + 1 for a in range(ret.ndim)]\n        return xp_copy(_centered(ret, shape_valid), xp=xp)\n    else:\n        raise ValueError(\"acceptable mode flags are 'valid', 'same', or 'full'\")",
    "docstring": "Calculate the convolution result shape based on the argument. Returns the result sliced to the correct size for the given mode. Parameters ---------- ret : array The result array, with the appropriate shape for the 'full' mode. s1 : list of int The shape of the first input. s2 : list of int The shape of the second input. mode : str {'full', 'valid', 'same'} A string indicating the size of the output. See the documentation for more information. axes : list of ints Axes over which to compute the convolution. Returns ------- ret : array A copy of , sliced to the correct size for the given .",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_signaltools.py",
    "ast_data": "FunctionDef name:_apply_conv_mode arg:ret arg:s1 arg:s2 arg:mode arg:axes arg:xp arguments arg arg arg arg arg arg If Compare Return return:yes Call If Compare Return return:yes Call Call If Compare Assign Compare Call Return return:yes Call Call Raise Call"
  },
  {
    "library": "authlib",
    "name": "authorize_redirect",
    "source_code": "async def authorize_redirect(self, request, redirect_uri=None, **kwargs):\n    if redirect_uri and isinstance(redirect_uri, URL):\n        redirect_uri = str(redirect_uri)\n    rv = await self.create_authorization_url(redirect_uri, **kwargs)\n    await self.save_authorize_data(request, redirect_uri=redirect_uri, **rv)\n    return RedirectResponse(rv['url'], status_code=302)",
    "docstring": "Create a HTTP Redirect for Authorization Endpoint. :param request: HTTP request instance from Starlette view. :param redirect_uri: Callback or redirect URI for authorization. :param kwargs: Extra parameters to include. :return: A HTTP redirect response.",
    "type": "method",
    "file_path": "authlib\\authlib\\integrations\\starlette_client\\apps.py",
    "ast_data": "AsyncFunctionDef name:authorize_redirect arg:self arg:request arg:redirect_uri arguments arg arg arg arg If BoolOp Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "fast_forward",
    "source_code": "def fast_forward(self, n: IntNumber) -> 'QMCEngine':\n    self.random(n=n)\n    return self",
    "docstring": "Fast-forward the sequence by positions. Parameters ---------- n : int Number of points to skip in the sequence. Returns ------- engine : QMCEngine Engine reset to its base state.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_qmc.py",
    "ast_data": "FunctionDef name:fast_forward arg:self arg:n arguments arg arg Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "__arrow_array__",
    "source_code": "def __arrow_array__(self, type=None):\n    import pyarrow as pa\n    if type is None:\n        type = pa.string()\n    values = self._ndarray.copy()\n    values[self.isna()] = None\n    return pa.array(values, type=type, from_pandas=True)",
    "docstring": "Convert myself into a pyarrow Array.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\string_.py",
    "ast_data": "FunctionDef name:__arrow_array__ arg:self arg:type arguments arg arg If Compare Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_model_name",
    "source_code": "def get_model_name(filename):\n    _, tail = os.path.split(filename)\n    modelname = tail[:tail.find('_chrome_trace')]\n    return modelname",
    "docstring": "Get model name from a file in format {model_name}_chrome_trace_*.json",
    "type": "function",
    "file_path": "pytorch\\functorch\\benchmarks\\chrome_trace_parser.py",
    "ast_data": "FunctionDef name:get_model_name arg:filename arguments arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X):\n    check_is_fitted(self)\n    if not hasattr(self.estimators_[0], 'predict'):\n        raise ValueError('The base estimator should implement a predict method')\n    y = Parallel(n_jobs=self.n_jobs)((delayed(e.predict)(X) for e in self.estimators_))\n    return np.asarray(y).T",
    "docstring": "Predict multi-output variable using model for each target variable. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input data. Returns ------- y : {array-like, sparse matrix} of shape (n_samples, n_outputs) Multi-output targets predicted across multiple predictors. Note: Separate models are generated for each predictor.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\multioutput.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Call If Call Raise Call Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "geo_db_type",
    "source_code": "def geo_db_type(self, f):\n    if f.geom_type == 'RASTER':\n        return 'raster'\n    if f.dim == 3:\n        geom_type = f.geom_type + 'Z'\n    else:\n        geom_type = f.geom_type\n    if f.geography:\n        if f.srid != 4326:\n            raise NotSupportedError('PostGIS only supports geography columns with an SRID of 4326.')\n        return 'geography(%s,%d)' % (geom_type, f.srid)\n    else:\n        return 'geometry(%s,%d)' % (geom_type, f.srid)",
    "docstring": "Return the database field type for the given spatial field.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\postgis\\operations.py",
    "ast_data": "FunctionDef name:geo_db_type arg:self arg:f arguments arg arg If Compare Return return:yes If Compare Assign Assign If If Compare Raise Call Return return:yes Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "default_dtypes",
    "source_code": "def default_dtypes(self, *, device=None):\n    return {'real floating': dtype(float64), 'complex floating': dtype(complex128), 'integral': dtype(intp), 'indexing': dtype(intp)}",
    "docstring": "The default data types used for new CuPy arrays. For CuPy, this always returns the following dictionary: - **\"real floating\"**: `` Parameters ---------- device : str, optional The device to get the default data types for. Returns ------- dtypes : dict A dictionary describing the default data types used for new CuPy arrays. See Also -------- __array_namespace_info__.capabilities, __array_namespace_info__.default_device, __array_namespace_info__.dtypes, __array_namespace_info__.devices Examples -------- >>> info = xp.__array_namespace_info__() >>> info.default_dtypes() {'real floating': cupy.float64, 'complex floating': cupy.complex128, 'integral': cupy.int64, 'indexing': cupy.int64}",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\cupy\\_info.py",
    "ast_data": "FunctionDef name:default_dtypes arg:self arguments arg arg Return return:yes Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "download_all",
    "source_code": "def download_all(path=None):\n    if pooch is None:\n        raise ImportError(\"Missing optional dependency 'pooch' required for scipy.datasets module. Please use pip or conda to install 'pooch'.\")\n    if path is None:\n        path = pooch.os_cache('scipy-data')\n    downloader = pooch.HTTPDownloader(headers={'User-Agent': 'SciPy'})\n    for dataset_name, dataset_hash in _registry.registry.items():\n        pooch.retrieve(url=_registry.registry_urls[dataset_name], known_hash=dataset_hash, fname=dataset_name, path=path, downloader=downloader)",
    "docstring": "Utility method to download all the dataset files for module. Parameters ---------- path : str, optional Directory path to download all the dataset files. If None, default to the system cache_dir detected by pooch. Examples -------- Download the datasets to the default cache location: >>> from scipy import datasets >>> datasets.download_all() Download the datasets to the current directory: >>> datasets.download_all(\".\")",
    "type": "function",
    "file_path": "scipy\\scipy\\datasets\\_download_all.py",
    "ast_data": "FunctionDef name:download_all arg:path arguments arg If Compare Raise Call If Compare Assign Call Assign Call For Call Call"
  },
  {
    "library": "scipy",
    "name": "_recalc",
    "source_code": "def _recalc(self, n, m):\n    if n != self.n or m != self.m:\n        self.n, self.m = (n, m)\n        astart, a1, _ = gscale(n, m)\n        self.astart = astart\n        self.freqs = a1.astype(np.float64)\n        self.total = self.freqs.sum()",
    "docstring": "When necessary, recalculate exact distribution.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_morestats.py",
    "ast_data": "FunctionDef name:_recalc arg:self arg:n arg:m arguments arg arg arg If BoolOp Compare Compare Assign Assign Call Assign Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_sum_rows",
    "source_code": "def _sum_rows(x):\n    cols = array_ops.shape(x)[1]\n    ones_shape = array_ops_stack.stack([cols, 1])\n    ones = array_ops.ones(ones_shape, x.dtype)\n    return array_ops.reshape(math_ops.matmul(x, ones), [-1])",
    "docstring": "Returns a vector summing up each row of the matrix x.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_impl.py",
    "ast_data": "FunctionDef name:_sum_rows arg:x arguments arg Assign Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "TimedeltaIndexResamplerGroupby",
    "source_code": "class TimedeltaIndexResamplerGroupby(_GroupByMixin, TimedeltaIndexResampler):\n\n    @property\n    def _resampler_cls(self):\n        return TimedeltaIndexResampler",
    "docstring": "Provides a resample of a groupby implementation.",
    "type": "class",
    "file_path": "pandas\\pandas\\core\\resample.py",
    "ast_data": "ClassDef name:TimedeltaIndexResamplerGroupby FunctionDef name:_resampler_cls arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_or_create_variables_dir",
    "source_code": "def _get_or_create_variables_dir(export_dir):\n    variables_dir = _get_variables_dir(export_dir)\n    file_io.recursive_create_dir(variables_dir)\n    return variables_dir",
    "docstring": "Return variables sub-directory, or create one if it doesn't exist.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model_experimental.py",
    "ast_data": "FunctionDef name:_get_or_create_variables_dir arg:export_dir arguments arg Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "table_name_col",
    "source_code": "@classmethod\ndef table_name_col(cls):\n    return 'f_table_name'",
    "docstring": "Return the name of the metadata column used to store the feature table name.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\spatialite\\models.py",
    "ast_data": "FunctionDef name:table_name_col arg:cls arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "Threads",
    "source_code": "class Threads:\n\n    def __init__(self):\n        self.workers = []\n\n    def alloc(self, target):\n        for worker in range(len(self.workers)):\n            if self.workers[worker] >= target.end:\n                self.workers[worker] = target.start\n                return worker\n        self.workers.append(target.start)\n        return len(self.workers) - 1",
    "docstring": "Tries to reconstruct the parallelism from a .ninja_log",
    "type": "class",
    "file_path": "scipy\\tools\\ninjatracing.py",
    "ast_data": "ClassDef name:Threads FunctionDef name:__init__ arg:self arguments arg Assign FunctionDef name:alloc arg:self arg:target arguments arg arg For Call Call If Compare Assign Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "closest_scrapy_cfg",
    "source_code": "def closest_scrapy_cfg(path: str | os.PathLike='.', prevpath: str | os.PathLike | None=None) -> str:\n    if prevpath is not None and str(path) == str(prevpath):\n        return ''\n    path = Path(path).resolve()\n    cfgfile = path / 'scrapy.cfg'\n    if cfgfile.exists():\n        return str(cfgfile)\n    return closest_scrapy_cfg(path.parent, path)",
    "docstring": "Return the path to the closest scrapy.cfg file by traversing the current directory and its parents",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\conf.py",
    "ast_data": "FunctionDef name:closest_scrapy_cfg arg:path arg:prevpath arguments arg arg If BoolOp Compare Compare Call Call Return return:yes Assign Call Call Assign If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "end_serialization",
    "source_code": "def end_serialization(self):\n    self.indent(0)\n    self.xml.endElement('django-objects')\n    self.xml.endDocument()",
    "docstring": "End serialization -- end the document.",
    "type": "method",
    "file_path": "django\\django\\core\\serializers\\xml_serializer.py",
    "ast_data": "FunctionDef name:end_serialization arg:self arguments arg Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "SumLikeReductionTypePromotionRule",
    "source_code": "class SumLikeReductionTypePromotionRule(ReductionTypePromotionRule):\n\n    def preview_type_promotion(self, args: tuple, kwargs: dict) -> TypePromotionSnapshot:\n        assert len(args) >= 1, f'Reduction op torch.ops.{self.namespace}.{self.op_name} expects at least one argument'\n        arg = args[0]\n        assert isinstance(arg, torch.Tensor), f'type(arg)={type(arg)!r} is not torch.Tensor'\n        dtype: torch.dtype | None = kwargs.get('dtype', None)\n        if dtype is None:\n            if _prims_common.is_boolean_dtype(arg.dtype) or _prims_common.is_integer_dtype(arg.dtype):\n                dtype = torch.int64\n            else:\n                dtype = arg.dtype\n        return super().preview_type_promotion(args, {'dtype': dtype})",
    "docstring": "Reference type promotion rule from torch.ops.aten.sum. This is a special case where computation dtype is always torch.int64 for integral arg, unless overridden by kwarg.",
    "type": "class",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\type_promotion.py",
    "ast_data": "ClassDef name:SumLikeReductionTypePromotionRule FunctionDef name:preview_type_promotion arg:self arg:args arg:kwargs arguments arg arg arg Compare Call Assign Call Call Call If Compare If BoolOp Call Call Assign Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "list_function_names",
    "source_code": "def list_function_names(self):\n    self.ensure_initialized()\n    return set(pywrap_tfe.TFE_ContextListFunctionNames(self._handle))",
    "docstring": "Get a list of names of registered functions. Returns: A set of names of all registered functions for the context.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:list_function_names arg:self arguments arg Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_device_range_start",
    "source_code": "def _device_range_start(msg: str, stream: int=0) -> object:\n    return _nvtx.deviceRangeStart(msg, stream)",
    "docstring": "Marks the start of a range with string message. It returns an opaque heap-allocated handle for this range to pass to the corresponding call to device_range_end(). A key difference between this and range_start is that the range_start marks the range right away, while _device_range_start marks the start of the range as soon as all the tasks on the CUDA stream are completed. Returns: An opaque heap-allocated handle that should be passed to _device_range_end(). Args: msg (str): ASCII message to associate with the range. stream (int): CUDA stream id.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\nvtx.py",
    "ast_data": "FunctionDef name:_device_range_start arg:msg arg:stream arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "mfccs_from_log_mel_spectrograms",
    "source_code": "@tf_export('signal.mfccs_from_log_mel_spectrograms')\n@dispatch.add_dispatch_support\ndef mfccs_from_log_mel_spectrograms(log_mel_spectrograms, name=None):\n    with ops.name_scope(name, 'mfccs_from_log_mel_spectrograms', [log_mel_spectrograms]):\n        log_mel_spectrograms = ops.convert_to_tensor(log_mel_spectrograms)\n        if log_mel_spectrograms.shape.ndims and log_mel_spectrograms.shape.dims[-1].value is not None:\n            num_mel_bins = log_mel_spectrograms.shape.dims[-1].value\n            if num_mel_bins == 0:\n                raise ValueError('num_mel_bins must be positive. Got: %s' % log_mel_spectrograms)\n        else:\n            num_mel_bins = array_ops.shape(log_mel_spectrograms)[-1]\n        dct2 = dct_ops.dct(log_mel_spectrograms, type=2)\n        return dct2 * math_ops.rsqrt(math_ops.cast(num_mel_bins, dct2.dtype) * 2.0)",
    "docstring": "Computes [MFCCs][mfcc] of . Implemented with GPU-compatible ops and supports gradients. [Mel-Frequency Cepstral Coefficient (MFCC)][mfcc] calculation consists of taking the DCT-II of a log-magnitude mel-scale spectrogram. [HTK][htk]'s MFCCs use a particular scaling of the DCT-II which is almost orthogonal normalization. We follow this convention. All MFCCs are returned and it is up to the caller to select a subset of the MFCCs based on their application. For example, it is typical to only use the first few for speech recognition, as this results in an approximately pitch-invariant representation of the signal. For example: Args: log_mel_spectrograms: A / of log-magnitude mel-scale spectrograms. name: An optional name for the operation. Returns: A / of the MFCCs of . Raises: ValueError: If is not positive. [mfcc]: [htk]:",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\signal\\mfcc_ops.py",
    "ast_data": "FunctionDef name:mfccs_from_log_mel_spectrograms arg:log_mel_spectrograms arg:name arguments arg arg With Call Assign Call If BoolOp Compare Assign If Compare Raise Call Assign Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_saveable_from_factory",
    "source_code": "def _get_saveable_from_factory(saveable_factories, serialized_tensor, created_compat_names):\n    matched_factory = None\n    expected_factory_name = serialized_tensor.name\n    factory_input_name = serialized_tensor.checkpoint_key\n    if expected_factory_name in saveable_factories:\n        matched_factory = saveable_factories[expected_factory_name]\n    if matched_factory is None:\n        for factory_name, factory in saveable_factories.items():\n            if expected_factory_name.startswith(factory_name):\n                if matched_factory is not None:\n                    raise ValueError('Forward compatibility load error: Unable to load checkpoint saved in future version of TensorFlow. Please update your version of TensorFlow to the version in which the checkpoint was saved.')\n                matched_factory = factory\n                factory_input_name = _extract_saveable_name(serialized_tensor.checkpoint_key) + factory_name\n                created_compat_names.add(factory_name)\n    if callable(matched_factory):\n        return matched_factory(name=factory_input_name)\n    return matched_factory",
    "docstring": "Returns the saveable generated from the factory method.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\restore.py",
    "ast_data": "FunctionDef name:_get_saveable_from_factory arg:saveable_factories arg:serialized_tensor arg:created_compat_names arguments arg arg arg Assign Assign Assign If Compare Assign If Compare For Call If Call If Compare Raise Call Assign Assign Call Call If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_cpu_isa_version",
    "source_code": "def get_cpu_isa_version():\n    key = 'cpu_isa'\n    out, err = run_shell_cmd(cmds_all[PLATFORM.lower()][key])\n    if err and FLAGS.debug:\n        print('Error in detecting supported ISA:\\n %s' % str(err))\n    ret_val = out\n    required_isa = ['avx', 'avx2', 'avx512f', 'sse4', 'sse4_1']\n    found = []\n    missing = []\n    for isa in required_isa:\n        for sys_isa in ret_val.split(b' '):\n            if isa == sys_isa:\n                if isa not in found:\n                    found.append(isa)\n    missing = list(set(required_isa) - set(found))\n    return (found, missing)",
    "docstring": "Retrieves all Instruction Set Architecture(ISA) available. Required ISA(s): 'avx', 'avx2', 'avx512f', 'sse4', 'sse4_1' Returns: Tuple (list of available ISA, list of missing ISA)",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\tensorflow_builder\\config_detector\\config_detector.py",
    "ast_data": "FunctionDef name:get_cpu_isa_version arguments Assign Assign Call Call If BoolOp Call Call Assign Assign Assign Assign For For Call If Compare If Compare Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_available_if_base_estimator_has",
    "source_code": "def _available_if_base_estimator_has(attr):\n\n    def _check(self):\n        return hasattr(self._get_estimator(), attr) or all((hasattr(est, attr) for est in self.estimators_))\n    return available_if(_check)",
    "docstring": "Return a function to check if or has . Helper for Chain implementations.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\multioutput.py",
    "ast_data": "FunctionDef name:_available_if_base_estimator_has arg:attr arguments arg FunctionDef name:_check arg:self arguments arg Return return:yes BoolOp Call Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "enumerate_epochs",
    "source_code": "def enumerate_epochs(self):\n    with self._truncate_execution_to_epoch():\n        data_iterator = iter(self._dataset)\n        for epoch in range(self._initial_epoch, self._epochs):\n            if self._insufficient_data:\n                break\n            if self._adapter.should_recreate_iterator():\n                data_iterator = iter(self._dataset)\n            yield (epoch, data_iterator)\n            self._adapter.on_epoch_end()",
    "docstring": "Yields .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\data_adapter.py",
    "ast_data": "FunctionDef name:enumerate_epochs arg:self arguments arg With Call Assign Call For Call If If Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "cutlass_layout",
    "source_code": "@staticmethod\ndef cutlass_layout(torch_layout: ir.Layout) -> 'Optional[cutlass_lib.LayoutType]':\n    assert cutlass_utils.try_import_cutlass()\n    import cutlass_library.library as cutlass_lib\n    if V.graph.sizevars.statically_known_equals(torch_layout.stride[-1], 1):\n        return cutlass_lib.LayoutType.RowMajor\n    elif V.graph.sizevars.statically_known_equals(torch_layout.stride[-2], 1):\n        return cutlass_lib.LayoutType.ColumnMajor\n    else:\n        return None",
    "docstring": "Converts an ir.Layout instance into the corresponding cutlass_library.LayoutType enum value (RowMajor, ColumnMajor, or None if no matching value is found ). Args: torch_layout (ir.Layout): The layout that needs to be looked up. Returns: cutlass_lib.LayoutType: The converted layout corresponding to the or None if no matching value is found.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\gemm_template.py",
    "ast_data": "FunctionDef name:cutlass_layout arg:torch_layout arguments arg Call If Call Return return:yes If Call Return return:yes Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "is_chief",
    "source_code": "@property\ndef is_chief(self):\n    return self._is_chief",
    "docstring": "Return True if this is a chief supervisor. Returns: A bool.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py",
    "ast_data": "FunctionDef name:is_chief arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "get_script_prefix",
    "source_code": "def get_script_prefix(scope):\n    if settings.FORCE_SCRIPT_NAME:\n        return settings.FORCE_SCRIPT_NAME\n    return scope.get('root_path', '') or ''",
    "docstring": "Return the script prefix to use from either the scope or a setting.",
    "type": "function",
    "file_path": "django\\django\\core\\handlers\\asgi.py",
    "ast_data": "FunctionDef name:get_script_prefix arg:scope arguments arg If Return return:yes Return return:yes BoolOp Call"
  },
  {
    "library": "matplotlib",
    "name": "_viewlim_mask",
    "source_code": "def _viewlim_mask(xs, ys, zs, axes):\n    mask = np.logical_or.reduce((xs < axes.xy_viewLim.xmin, xs > axes.xy_viewLim.xmax, ys < axes.xy_viewLim.ymin, ys > axes.xy_viewLim.ymax, zs < axes.zz_viewLim.xmin, zs > axes.zz_viewLim.xmax))\n    return mask",
    "docstring": "Return the mask of the points outside the axes view limits. Parameters ---------- xs, ys, zs : array-like The points to mask. axes : Axes3D The axes to use for the view limits. Returns ------- mask : np.array The mask of the points as a bool array.",
    "type": "function",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py",
    "ast_data": "FunctionDef name:_viewlim_mask arg:xs arg:ys arg:zs arg:axes arguments arg arg arg arg Assign Call Compare Compare Compare Compare Compare Compare Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "__exit__",
    "source_code": "def __exit__(self, *args):\n    from .rcmod import set_palette\n    set_palette(self._orig_palette)",
    "docstring": "Close the context.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\palettes.py",
    "ast_data": "FunctionDef name:__exit__ arg:self arguments arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "_create_pre_forward_hook",
    "source_code": "def _create_pre_forward_hook(self, name: str) -> Callable:\n\n    def _pre_forward_hook(module: nn.Module, inputs: Any) -> None:\n        self._cur_module_name = f'{name}.forward'\n        if hasattr(module, '_memory_tracker_is_root') and module._memory_tracker_is_root:\n            self._add_marker('fw_start')\n    return _pre_forward_hook",
    "docstring": "Prefix operator name with current module and 'forward', and insert 'fw_start' marker at forward pass start.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_tools\\memory_tracker.py",
    "ast_data": "FunctionDef name:_create_pre_forward_hook arg:self arg:name arguments arg arg FunctionDef name:_pre_forward_hook arg:module arg:inputs arguments arg arg Assign If BoolOp Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "enable_numpy_style_slicing",
    "source_code": "def enable_numpy_style_slicing() -> None:\n    global _numpy_style_slicing\n    _numpy_style_slicing = True",
    "docstring": "If called, follows NumPy's rules for slicing Tensors. Used for enabling NumPy behavior on slicing for TF NumPy.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:enable_numpy_style_slicing arguments Assign"
  },
  {
    "library": "pygame",
    "name": "pixels_red",
    "source_code": "def pixels_red(surface):\n    return numpy.array(surface.get_view('R'), copy=False)",
    "docstring": "pygame.surfarray.pixels_red(Surface): return array Reference pixel red into a 2d array. Create a new 2D array that directly references the red values in a Surface. Any changes to the array will affect the pixels in the Surface. This is a fast operation since no data is copied. This can only work on 24-bit or 32-bit Surfaces. The Surface this array references will remain locked for the lifetime of the array.",
    "type": "function",
    "file_path": "pygame\\src_py\\surfarray.py",
    "ast_data": "FunctionDef name:pixels_red arg:surface arguments arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X):\n    scores = self._decision_function(X)\n    return self.classes_.take(scores.argmax(axis=1))",
    "docstring": "Perform classification on an array of vectors . Returns the class label for each sample. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Input vectors, where is the number of samples and is the number of features. Returns ------- y_pred : ndarray of shape (n_samples,) Class label for each sample.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\discriminant_analysis.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "aggregate_gradients_using_nccl",
    "source_code": "def aggregate_gradients_using_nccl(replica_grads):\n    agg_all_g_and_v = []\n    for single_g_and_v in zip(*replica_grads):\n        single_grads = [g for g, _ in single_g_and_v]\n        agg_grads = nccl_ops.all_sum(single_grads)\n        agg_all_g_and_v.append([(g, v) for g, (_, v) in zip(agg_grads, single_g_and_v)])\n    agg_all_g_and_v = list(zip(*agg_all_g_and_v))\n    return agg_all_g_and_v",
    "docstring": "Aggregate gradients using nccl allreduce.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_utils.py",
    "ast_data": "FunctionDef name:aggregate_gradients_using_nccl arg:replica_grads arguments arg Assign For Call Assign Assign Call Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "statically_known_gt",
    "source_code": "def statically_known_gt(self, left: Expr, right: Union[Expr, int]) -> bool:\n    expr = left > right\n    return self.is_expr_static_and_true(expr)",
    "docstring": "Returns a bool indicating if it is sound to optimize as if left is greater than right.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\sizevars.py",
    "ast_data": "FunctionDef name:statically_known_gt arg:self arg:left arg:right arguments arg arg arg Assign Compare Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "_transform_boxes",
    "source_code": "def _transform_boxes(boxes: torch.Tensor, M: torch.Tensor) -> torch.Tensor:\n    M = M if M.is_floating_point() else M.float()\n    boxes_per_batch, n_points_per_box, coordinates_dimension = boxes.shape[-3:]\n    if boxes_per_batch == 0:\n        return boxes\n    points = boxes.view(-1, n_points_per_box * boxes_per_batch, coordinates_dimension)\n    M = M if M.ndim == 3 else M.unsqueeze(0)\n    if points.shape[0] != M.shape[0]:\n        raise ValueError(f'Batch size mismatch. Got {points.shape[0]} for boxes and {M.shape[0]} for the transformation matrix.')\n    transformed_boxes: torch.Tensor = transform_points(M, points)\n    transformed_boxes = transformed_boxes.view_as(boxes)\n    return transformed_boxes",
    "docstring": "Transform 3D and 2D in kornia format by applying the transformation matrix M. Boxes and the transformation matrix could be batched or not. Args: boxes: 2D quadrilaterals or 3D hexahedrons in kornia format. M: the transformation matrix of shape :math: or :math: for 2D and :math: or :math: for 3D hexahedron.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\boxes.py",
    "ast_data": "FunctionDef name:_transform_boxes arg:boxes arg:M arguments arg arg Assign Call Call Assign If Compare Return return:yes Assign Call Assign Compare Call If Compare Raise Call Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "reload_cubin_from_raw",
    "source_code": "def reload_cubin_from_raw(self, filepath: str) -> str:\n    if self.cubin_path is None:\n        assert self.cubin_raw is not None\n        os.makedirs(os.path.dirname(filepath), exist_ok=True)\n        with open(filepath, 'wb') as f:\n            f.write(self.cubin_raw)\n            self.cubin_path = filepath\n    return self.cubin_path",
    "docstring": "If the cubin file triton generated gets deleted under us, we can reload it from the raw cubin file.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\static_cuda_launcher.py",
    "ast_data": "FunctionDef name:reload_cubin_from_raw arg:self arg:filepath arguments arg arg If Compare Compare Call Call With Call Call Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "stop_criteria",
    "source_code": "def stop_criteria(self, state, z, last_iteration_failed, optimality, constr_violation, trust_radius, penalty, cg_info):\n    x = self.get_variables(z)\n    if self.global_stop_criteria(state, x, last_iteration_failed, trust_radius, penalty, cg_info, self.barrier_parameter, self.tolerance):\n        self.terminate = True\n        return True\n    else:\n        g_cond = optimality < self.tolerance and constr_violation < self.tolerance\n        x_cond = trust_radius < self.xtol\n        return g_cond or x_cond",
    "docstring": "Stop criteria to the barrier problem. The criteria here proposed is similar to formula (2.3) from [1]_, p.879.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_trustregion_constr\\tr_interior_point.py",
    "ast_data": "FunctionDef name:stop_criteria arg:self arg:state arg:z arg:last_iteration_failed arg:optimality arg:constr_violation arg:trust_radius arg:penalty arg:cg_info arguments arg arg arg arg arg arg arg arg arg Assign Call If Call Assign Return return:yes Assign BoolOp Compare Compare Assign Compare Return return:yes BoolOp"
  },
  {
    "library": "tensorflow",
    "name": "write_dirpath",
    "source_code": "def write_dirpath(dirpath, strategy):\n    if strategy is None:\n        strategy = distribute_lib.get_strategy()\n    if strategy is None:\n        return dirpath\n    if not strategy.extended._in_multi_worker_mode():\n        return dirpath\n    if strategy.extended.should_checkpoint:\n        return dirpath\n    return _get_temp_dir(dirpath, strategy)",
    "docstring": "Returns the writing dir that should be used to save file distributedly. would be created if it doesn't exist. Args: dirpath: Original dirpath that would be used without distribution. strategy: The tf.distribute strategy object currently used. Returns: The writing dir path that should be used to save with distribution.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_file_utils.py",
    "ast_data": "FunctionDef name:write_dirpath arg:dirpath arg:strategy arguments arg arg If Compare Assign Call If Compare Return return:yes If Call Return return:yes If Return return:yes Return return:yes Call"
  },
  {
    "library": "django",
    "name": "set_rollback",
    "source_code": "def set_rollback(self, rollback):\n    if not self.in_atomic_block:\n        raise TransactionManagementError(\"The rollback flag doesn't work outside of an 'atomic' block.\")\n    self.needs_rollback = rollback",
    "docstring": "Set or unset the \"needs rollback\" flag -- for *advanced use* only.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\base.py",
    "ast_data": "FunctionDef name:set_rollback arg:self arg:rollback arguments arg arg If Raise Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "TensorProtocol",
    "source_code": "@runtime_checkable\nclass TensorProtocol(Protocol):\n\n    def __tf_tensor__(self, dtype=None, name=None):\n        pass",
    "docstring": "Protocol type for objects that can be converted to Tensor.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\types\\core.py",
    "ast_data": "ClassDef name:TensorProtocol FunctionDef name:__tf_tensor__ arg:self arg:dtype arg:name arguments arg arg arg"
  },
  {
    "library": "tensorflow",
    "name": "merge",
    "source_code": "@tf_export(v1=['summary.merge'])\ndef merge(inputs, collections=None, name=None):\n    if _context.executing_eagerly():\n        raise RuntimeError('Merging tf.summary.* ops is not compatible with eager execution. Use tf.contrib.summary instead.')\n    if _distribute_summary_op_util.skip_summary():\n        return _constant_op.constant('')\n    name = _summary_op_util.clean_tag(name)\n    with _ops.name_scope(name, 'Merge', inputs):\n        val = _gen_logging_ops.merge_summary(inputs=inputs, name=name)\n        _summary_op_util.collect(val, collections, [])\n    return val",
    "docstring": "Merges summaries. This op creates a []( protocol buffer that contains the union of all the values in the input summaries. When the Op is run, it reports an error if multiple values in the summaries to merge use the same tag. Args: inputs: A list of objects containing serialized protocol buffers. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to . name: A name for the operation (optional). Returns: A scalar of type . The serialized protocol buffer resulting from the merging. Raises: RuntimeError: If called with eager mode enabled. @compatibility(TF2) This API is not compatible with eager execution or . To migrate to TF2, this API can be omitted entirely, because in TF2 individual summary ops, like , write directly to the default summary writer if one is active. Thus, it's not necessary to merge summaries or to manually add the resulting merged summary output to the writer. See the usage example shown below. For a comprehensive migration guide, please follow [Migrating tf.summary usage to TF 2.0]( #### TF1 & TF2 Usage Example TF1: TF2: @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\summary\\summary.py",
    "ast_data": "FunctionDef name:merge arg:inputs arg:collections arg:name arguments arg arg arg If Call Raise Call If Call Return return:yes Call Assign Call With Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "cryptography",
    "name": "digest_size",
    "source_code": "@property\n@abc.abstractmethod\ndef digest_size(self) -> int:\n    pass",
    "docstring": "The size of the resulting digest in bytes.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\hashes.py",
    "ast_data": "FunctionDef name:digest_size arg:self arguments arg"
  },
  {
    "library": "cryptography",
    "name": "sign",
    "source_code": "@abc.abstractmethod\ndef sign(self, data: Buffer) -> bytes:\n    pass",
    "docstring": "Signs the data.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ed25519.py",
    "ast_data": "FunctionDef name:sign arg:self arg:data arguments arg arg"
  },
  {
    "library": "pandas",
    "name": "_set_encoding",
    "source_code": "def _set_encoding(self) -> None:\n    if self._format_version < 118:\n        self._encoding = 'latin-1'\n    else:\n        self._encoding = 'utf-8'",
    "docstring": "Set string encoding which depends on file version",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\stata.py",
    "ast_data": "FunctionDef name:_set_encoding arg:self arguments arg If Compare Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "_parse_to_version_info",
    "source_code": "def _parse_to_version_info(version_str):\n    v = parse_version(version_str)\n    if v.pre is None and v.post is None and (v.dev is None):\n        return _VersionInfo(v.major, v.minor, v.micro, 'final', 0)\n    elif v.dev is not None:\n        return _VersionInfo(v.major, v.minor, v.micro, 'alpha', v.dev)\n    elif v.pre is not None:\n        releaselevel = {'a': 'alpha', 'b': 'beta', 'rc': 'candidate'}.get(v.pre[0], 'alpha')\n        return _VersionInfo(v.major, v.minor, v.micro, releaselevel, v.pre[1])\n    else:\n        return _VersionInfo(v.major, v.minor, v.micro + 1, 'alpha', v.post)",
    "docstring": "Parse a version string to a namedtuple analogous to sys.version_info. See:",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\__init__.py",
    "ast_data": "FunctionDef name:_parse_to_version_info arg:version_str arguments arg Assign Call If BoolOp Compare Compare Compare Return return:yes Call If Compare Return return:yes Call If Compare Assign Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_underline_thickness",
    "source_code": "def get_underline_thickness(self, font: str, fontsize: float, dpi: float) -> float:\n    raise NotImplementedError()",
    "docstring": "Get the line thickness that matches the given font. Used as a base unit for drawing lines such as in a fraction or radical.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_mathtext.py",
    "ast_data": "FunctionDef name:get_underline_thickness arg:self arg:font arg:fontsize arg:dpi arguments arg arg arg arg Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict_proba",
    "source_code": "@available_if(_check_proba)\ndef predict_proba(self, X):\n    X = self._validate_for_predict(X)\n    if self.probA_.size == 0 or self.probB_.size == 0:\n        raise NotFittedError('predict_proba is not available when fitted with probability=False')\n    pred_proba = self._sparse_predict_proba if self._sparse else self._dense_predict_proba\n    return pred_proba(X)",
    "docstring": "Compute probabilities of possible outcomes for samples in X. The model needs to have probability information computed at training time: fit with attribute set to True. Parameters ---------- X : array-like of shape (n_samples, n_features) For kernel=\"precomputed\", the expected shape of X is (n_samples_test, n_samples_train). Returns ------- T : ndarray of shape (n_samples, n_classes) Returns the probability of the sample for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute :term:. Notes ----- The probability model is created using cross validation, so the results can be slightly different than those obtained by predict. Also, it will produce meaningless results on very small datasets.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\svm\\_base.py",
    "ast_data": "FunctionDef name:predict_proba arg:self arg:X arguments arg arg Assign Call If BoolOp Compare Compare Raise Call Assign Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "Where",
    "source_code": "class Where(sympy.Function):\n    nargs: tuple[int, ...] = (3,)\n    precedence: int = 35\n\n    def _eval_is_integer(self) -> Optional[bool]:\n        return True if self.args[1].is_integer and self.args[2].is_integer else None\n\n    def _eval_is_nonnegative(self) -> Optional[bool]:\n        return True if self.args[1].is_nonnegative and self.args[2].is_nonnegative else None\n\n    def _eval_is_positive(self) -> Optional[bool]:\n        return True if self.args[1].is_positive and self.args[2].is_positive else None\n\n    @classmethod\n    def eval(cls, c: sympy.Basic, p: sympy.Basic, q: sympy.Basic) -> Optional[sympy.Basic]:\n        if c == sympy.true:\n            return p\n        elif c == sympy.false:\n            return q\n        return None",
    "docstring": "Good ol' ternary operator",
    "type": "class",
    "file_path": "pytorch\\torch\\utils\\_sympy\\functions.py",
    "ast_data": "ClassDef name:Where FunctionDef name:_eval_is_integer arg:self arguments arg Return return:yes BoolOp FunctionDef name:_eval_is_nonnegative arg:self arguments arg Return return:yes BoolOp FunctionDef name:_eval_is_positive arg:self arguments arg Return return:yes BoolOp FunctionDef name:eval arg:cls arg:c arg:p arg:q arguments arg arg arg arg If Compare Return return:yes If Compare Return return:yes Return return:no"
  },
  {
    "library": "pandas",
    "name": "_empty",
    "source_code": "@classmethod\ndef _empty(cls, shape: Shape, dtype: ExtensionDtype) -> Self:\n    arr = cls._from_sequence([], dtype=dtype)\n    backing = np.empty(shape, dtype=arr._ndarray.dtype)\n    return arr._from_backing_data(backing)",
    "docstring": "Analogous to np.empty(shape, dtype=dtype) Parameters ---------- shape : tuple[int] dtype : ExtensionDtype",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\_mixins.py",
    "ast_data": "FunctionDef name:_empty arg:cls arg:shape arg:dtype arguments arg arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "get_server_metadata",
    "source_code": "def get_server_metadata(self):\n    raise NotImplementedError()",
    "docstring": "Return server metadata which includes supported grant types, response types and etc.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc7592\\endpoint.py",
    "ast_data": "FunctionDef name:get_server_metadata arg:self arguments arg Raise Call"
  },
  {
    "library": "scipy",
    "name": "__class_getitem__",
    "source_code": "@classmethod\ndef __class_getitem__(cls, arg, /):\n    from types import GenericAlias\n    return GenericAlias(cls, arg)",
    "docstring": "Return a parametrized wrapper around the type. .. versionadded:: 1.16.0 Returns ------- alias : types.GenericAlias A parametrized type. Examples -------- >>> import numpy as np >>> from scipy.sparse import coo_matrix >>> coo_matrix[np.int8] scipy.sparse._coo.coo_matrix[numpy.int8]",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_matrix.py",
    "ast_data": "FunctionDef name:__class_getitem__ arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "wrapped_convert",
    "source_code": "def wrapped_convert(model_flags_str, converter_flags_str, input_data_str, debug_info_str):\n    return _pywrap_converter_api.Convert(model_flags_str, converter_flags_str, input_data_str, False, debug_info_str, py_function_lib.PyFunctionLibrary())",
    "docstring": "Wraps Convert with lazy loader.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\lite\\python\\wrap_converter.py",
    "ast_data": "FunctionDef name:wrapped_convert arg:model_flags_str arg:converter_flags_str arg:input_data_str arg:debug_info_str arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_height",
    "source_code": "def get_height(self):\n    return self._height",
    "docstring": "Return the height of the rectangle.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_height arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "check_dtype",
    "source_code": "def check_dtype(arg, dtype):\n    if arg.dtype.base_dtype != dtype:\n        raise TypeError(f'Expected argument to have dtype {dtype}. Found: {arg.dtype} in tensor {arg}.')",
    "docstring": "Check that arg.dtype == self.dtype.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_util.py",
    "ast_data": "FunctionDef name:check_dtype arg:arg arg:dtype arguments arg arg If Compare Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "write_metrics_json",
    "source_code": "def write_metrics_json(out: TextIO, metrics: list[list[str]]):\n    data = {}\n    for name, value, unit in metrics:\n        data[name] = {'value': value, 'unit': unit}\n    json.dump(data, out, sort_keys=True)\n    out.write('\\n')",
    "docstring": "Formats metrics in JSON.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\xla\\backends\\gpu\\codegen\\tools\\ncu_rep_lib.py",
    "ast_data": "FunctionDef name:write_metrics_json arg:out arg:metrics arguments arg arg Assign For Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "mean",
    "source_code": "@property\ndef mean(self) -> Tensor:\n    raise NotImplementedError",
    "docstring": "Returns the mean of the distribution.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:mean arg:self arguments arg Raise"
  },
  {
    "library": "pytorch",
    "name": "put",
    "source_code": "@classmethod\ndef put(cls, kernel_hash: str, device: int) -> None:\n    if (entries := cls._entries) is not None:\n        entries.append(TritonBundleEntry(kernel_hash, device, triton_cache_dir(device)))",
    "docstring": "Lazily observes that we have seen a Triton kernel compilation. Remembers it for when collect is later called.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\triton_bundler.py",
    "ast_data": "FunctionDef name:put arg:cls arg:kernel_hash arg:device arguments arg arg arg If Compare Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "BadAttributeFormat",
    "source_code": "class BadAttributeFormat(ArffException):\n    message = 'Bad @ATTRIBUTE format, at line %d.'",
    "docstring": "Error raised when some attribute declaration is in an invalid format.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\externals\\_arff.py",
    "ast_data": "ClassDef name:BadAttributeFormat Assign"
  },
  {
    "library": "scipy",
    "name": "average",
    "source_code": "@lazy_cython\ndef average(y):\n    return linkage(y, method='average', metric='euclidean')",
    "docstring": "Perform average/UPGMA linkage on a condensed distance matrix. Parameters ---------- y : ndarray The upper triangular of the distance matrix. The result of `linkagescipy.cluster.hierarchy.linkagescipy.cluster.hierarchy.fclusterscipy.cluster.hierarchy.dendrogram` can be used to generate a plot of the dendrogram.",
    "type": "function",
    "file_path": "scipy\\scipy\\cluster\\hierarchy.py",
    "ast_data": "FunctionDef name:average arg:y arguments arg Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "create_s256_code_challenge",
    "source_code": "def create_s256_code_challenge(code_verifier):\n    data = hashlib.sha256(to_bytes(code_verifier, 'ascii')).digest()\n    return to_unicode(urlsafe_b64encode(data))",
    "docstring": "Create S256 code_challenge with the given code_verifier.",
    "type": "function",
    "file_path": "authlib\\authlib\\oauth2\\rfc7636\\challenge.py",
    "ast_data": "FunctionDef name:create_s256_code_challenge arg:code_verifier arguments arg Assign Call Call Call Return return:yes Call Call"
  },
  {
    "library": "authlib",
    "name": "validate_requested_scope",
    "source_code": "def validate_requested_scope(self):\n    scope = self.request.payload.scope\n    return self.server.validate_requested_scope(scope)",
    "docstring": "Validate if requested scope is supported by Authorization Server.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\grants\\base.py",
    "ast_data": "FunctionDef name:validate_requested_scope arg:self arguments arg Assign Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "index",
    "source_code": "@cherrypy.expose\ndef index(self):\n    return '\\n            <p>\"In Python, how do you create a string of random\\n            characters?\" -- \"Read a Perl file!\"</p>\\n            <p>[<a href=\"../\">Return</a>]</p>'",
    "docstring": "Produce HTTP response body of joke page app index URI.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\tutorial\\tut04_complex_site.py",
    "ast_data": "FunctionDef name:index arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "antiderivative",
    "source_code": "def antiderivative(self, nu=1):\n    if nu <= 0:\n        return self.derivative(-nu)\n    if nu > 1:\n        bp = self\n        for k in range(nu):\n            bp = bp.antiderivative()\n        return bp\n    c, x = (self.c, self.x)\n    k = c.shape[0]\n    c2 = np.zeros((k + 1,) + c.shape[1:], dtype=c.dtype)\n    c2[1:, ...] = np.cumsum(c, axis=0) / k\n    delta = x[1:] - x[:-1]\n    c2 *= delta[(None, slice(None)) + (None,) * (c.ndim - 2)]\n    c2[:, 1:] += np.cumsum(c2[k, :], axis=0)[:-1]\n    if self.extrapolate == 'periodic':\n        extrapolate = False\n    else:\n        extrapolate = self.extrapolate\n    return self.construct_fast(c2, x, extrapolate, axis=self.axis)",
    "docstring": "Construct a new piecewise polynomial representing the antiderivative. Parameters ---------- nu : int, optional Order of antiderivative to evaluate. Default is 1, i.e., compute the first integral. If negative, the derivative is returned. Returns ------- bp : BPoly Piecewise polynomial of order k + nu representing the antiderivative of this polynomial. Notes ----- If antiderivative is computed and ``, it will be set to False for the returned instance. This is done because the antiderivative is no longer periodic and its correct evaluation outside of the initially given x interval is difficult.",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_interpolate.py",
    "ast_data": "FunctionDef name:antiderivative arg:self arg:nu arguments arg arg If Compare Return return:yes Call If Compare Assign For Call Assign Call Return return:yes Assign Assign Assign Call Assign Call Assign Call Call If Compare Assign Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_assert_at_most_n_true",
    "source_code": "def _assert_at_most_n_true(predicates, n, msg):\n    preds_c = array_ops_stack.stack(predicates, name='preds_c')\n    num_true_conditions = math_ops.reduce_sum(math_ops.cast(preds_c, dtypes.int32), name='num_true_conds')\n    condition = math_ops.less_equal(num_true_conditions, constant_op.constant(n, name='n_true_conds'))\n    preds_names = ', '.join((getattr(p, 'name', '?') for p in predicates))\n    error_msg = ['%s: more than %d conditions (%s) evaluated as True:' % (msg, n, preds_names), preds_c]\n    return control_flow_assert.Assert(condition, data=error_msg, summarize=len(predicates))",
    "docstring": "Returns an Assert op that checks that at most n predicates are True. Args: predicates: list of bool scalar tensors. n: maximum number of true predicates allowed. msg: Error message.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_case.py",
    "ast_data": "FunctionDef name:_assert_at_most_n_true arg:predicates arg:n arg:msg arguments arg arg arg Assign Call Assign Call Call Assign Call Call Assign Call Call Assign Return return:yes Call Call"
  },
  {
    "library": "virtualenv",
    "name": "__init__",
    "source_code": "def __init__(self, options, interpreter) -> None:\n    self.interpreter = interpreter\n    self._debug = None\n    self.dest = Path(options.dest)\n    self.clear = options.clear\n    self.no_vcs_ignore = options.no_vcs_ignore\n    self.pyenv_cfg = PyEnvCfg.from_folder(self.dest)\n    self.app_data = options.app_data\n    self.env = options.env",
    "docstring": "Construct a new virtual environment creator. :param options: the CLI option as parsed from :meth: :param interpreter: the interpreter to create virtual environment from",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\create\\creator.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:options arg:interpreter arguments arg arg arg Assign Assign Assign Call Assign Assign Assign Call Assign Assign"
  },
  {
    "library": "pandas",
    "name": "to_flat_index",
    "source_code": "def to_flat_index(self) -> Self:\n    return self",
    "docstring": "Identity method. This is implemented for compatibility with subclass implementations when chaining. Returns ------- pd.Index Caller. See Also -------- MultiIndex.to_flat_index : Subclass implementation.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:to_flat_index arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "undistort",
    "source_code": "def undistort(self, params: Tensor, points: Vector2) -> Vector2:\n    fx, fy, cx, cy = (params[..., 0], params[..., 1], params[..., 2], params[..., 3])\n    x = (points.x - cx) / fx\n    y = (points.y - cy) / fy\n    return Vector2.from_coords(x, y)",
    "docstring": "Undistort one or more Vector2 points using the affine transform. Args: params: Tensor representing the affine transform parameters. points: Vector2 representing the points to undistort. Returns: Vector2 representing the undistorted points. Example: >>> params = Tensor([1., 2., 3., 4.]) >>> points = Vector2.from_coords(1., 2.) >>> AffineTransform().undistort(params, points) x: -2.0 y: -1.0",
    "type": "method",
    "file_path": "kornia\\kornia\\sensors\\camera\\distortion_model.py",
    "ast_data": "FunctionDef name:undistort arg:self arg:params arg:points arguments arg arg arg Assign Assign Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "DeQuantize",
    "source_code": "class DeQuantize(torch.nn.Module):\n\n    def forward(self, Xq):\n        return Xq.dequantize()\n\n    @staticmethod\n    def from_float(mod, use_precomputed_fake_quant=False):\n        return DeQuantize()",
    "docstring": "Dequantizes an incoming tensor Examples:: >>> input = torch.tensor([[1., -1.], [1., -1.]]) >>> scale, zero_point, dtype = 1.0, 2, torch.qint8 >>> qm = Quantize(scale, zero_point, dtype) >>> # xdoctest: +SKIP >>> quantized_input = qm(input) >>> dqm = DeQuantize() >>> dequantized = dqm(quantized_input) >>> print(dequantized) tensor([[ 1., -1.], [ 1., -1.]], dtype=torch.float32)",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\modules\\__init__.py",
    "ast_data": "ClassDef name:DeQuantize FunctionDef name:forward arg:self arg:Xq arguments arg arg Return return:yes Call FunctionDef name:from_float arg:mod arg:use_precomputed_fake_quant arguments arg arg Return return:yes Call"
  },
  {
    "library": "virtualenv",
    "name": "exe_stem",
    "source_code": "@classmethod\ndef exe_stem(cls):\n    raise NotImplementedError",
    "docstring": "Executable name without suffix - there seems to be no standard way to get this without creating it.",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\create\\describe.py",
    "ast_data": "FunctionDef name:exe_stem arg:cls arguments arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "get_variable_scope_store",
    "source_code": "def get_variable_scope_store():\n    scope_store = ops.get_collection(_VARSCOPESTORE_KEY)\n    if not scope_store:\n        scope_store = _VariableScopeStore()\n        ops.add_to_collection(_VARSCOPESTORE_KEY, scope_store)\n    else:\n        scope_store = scope_store[0]\n    return scope_store",
    "docstring": "Returns the variable scope store for current thread.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variable_scope.py",
    "ast_data": "FunctionDef name:get_variable_scope_store arguments Assign Call If Assign Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_assert_splits_match",
    "source_code": "def _assert_splits_match(nested_splits_lists):\n    error_msg = 'Inputs must have identical ragged splits'\n    for splits_list in nested_splits_lists:\n        if len(splits_list) != len(nested_splits_lists[0]):\n            raise ValueError(error_msg)\n    return [check_ops.assert_equal(s1, s2, message=error_msg) for splits_list in nested_splits_lists[1:] for s1, s2 in zip(nested_splits_lists[0], splits_list)]",
    "docstring": "Checks that the given splits lists are identical. Performs static tests to ensure that the given splits lists are identical, and returns a list of control dependency op tensors that check that they are fully identical. Args: nested_splits_lists: A list of nested_splits_lists, where each split_list is a list of tensors from a , ordered from outermost ragged dimension to innermost ragged dimension. Returns: A list of control dependency op tensors. Raises: ValueError: If the splits are not identical.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\metrics_utils.py",
    "ast_data": "FunctionDef name:_assert_splits_match arg:nested_splits_lists arguments arg Assign For If Compare Call Call Raise Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "AsteriskPolygonCollection",
    "source_code": "class AsteriskPolygonCollection(RegularPolyCollection):\n    _path_generator = mpath.Path.unit_regular_asterisk",
    "docstring": "Draw a collection of regular asterisks with *numsides* points.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "ClassDef name:AsteriskPolygonCollection Assign"
  },
  {
    "library": "pytorch",
    "name": "structseq",
    "source_code": "class structseq(tuple[_T_co, ...]):\n    __slots__: ClassVar[tuple[()]] = ()\n    n_fields: Final[int]\n    n_sequence_fields: Final[int]\n    n_unnamed_fields: Final[int]\n\n    def __init_subclass__(cls) -> NoReturn:\n        raise TypeError(\"type 'structseq' is not an acceptable base type\")\n\n    def __new__(cls: type[Self], sequence: Iterable[_T_co], dict: dict[str, Any]=...) -> Self:\n        raise NotImplementedError",
    "docstring": "A generic type stub for CPython's `` type.",
    "type": "class",
    "file_path": "pytorch\\torch\\utils\\_pytree.py",
    "ast_data": "ClassDef name:structseq FunctionDef name:__init_subclass__ arg:cls arguments arg Raise Call FunctionDef name:__new__ arg:cls arg:sequence arg:dict arguments arg arg arg Raise"
  },
  {
    "library": "scikit-learn",
    "name": "is_python_scalar",
    "source_code": "def is_python_scalar(x: object) -> TypeIs[complex]:\n    return isinstance(x, int | float | complex) and (not is_numpy_array(x))",
    "docstring": "Return True if is a Python scalar, False otherwise.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_extra\\_lib\\_utils\\_helpers.py",
    "ast_data": "FunctionDef name:is_python_scalar arg:x arguments arg Return return:yes BoolOp Call Call"
  },
  {
    "library": "pytorch",
    "name": "_is_cuda_graph_recorded_tensor",
    "source_code": "def _is_cuda_graph_recorded_tensor(self, t: torch.Tensor) -> bool:\n    for output_refs in self.path_weakrefs:\n        for storage_weak_ref in output_refs:\n            if storage_weak_ref is None:\n                continue\n            data_ptr = storage_weak_ref.data_ptr()\n            if t.untyped_storage().data_ptr() == data_ptr:\n                return True\n    return False",
    "docstring": "Is this tensor an output of a node in this path",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py",
    "ast_data": "FunctionDef name:_is_cuda_graph_recorded_tensor arg:self arg:t arguments arg arg For For If Compare Assign Call If Compare Call Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "are_deterministic_algorithms_enabled",
    "source_code": "def are_deterministic_algorithms_enabled() -> builtins.bool:\n    return _C._get_deterministic_algorithms()",
    "docstring": "Returns True if the global deterministic flag is turned on. Refer to :func: documentation for more details.",
    "type": "function",
    "file_path": "pytorch\\torch\\__init__.py",
    "ast_data": "FunctionDef name:are_deterministic_algorithms_enabled arguments Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_conv_bn_add_extra_inputs_getter_right",
    "source_code": "def _conv_bn_add_extra_inputs_getter_right(pattern):\n    _, extra_input, _bn_conv = pattern\n    return [extra_input]",
    "docstring": "get inputs pattern for extra inputs, inputs for root node are assumed to be copied over from root node to the fused node",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\onednn.py",
    "ast_data": "FunctionDef name:_conv_bn_add_extra_inputs_getter_right arg:pattern arguments arg Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "most_specific_common_supertype",
    "source_code": "def most_specific_common_supertype(self, others: Sequence[trace.TraceType]) -> Optional['NamedTuple']:\n    if not all((isinstance(other, NamedTuple) and self.type_name == other.type_name and (self.attribute_names == other.attribute_names) for other in others)):\n        return None\n    supertyped_attributes = self.attributes.most_specific_common_supertype([other.attributes for other in others])\n    if supertyped_attributes is None:\n        return None\n    return NamedTuple(self.type_name, self.attribute_names, supertyped_attributes.components, self._placeholder_type)",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\trace_type\\default_types.py",
    "ast_data": "FunctionDef name:most_specific_common_supertype arg:self arg:others arguments arg arg If Call BoolOp Call Compare Compare Return return:no Assign Call If Compare Return return:no Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "convert_affinematrix_to_homography3d",
    "source_code": "def convert_affinematrix_to_homography3d(A: Tensor) -> Tensor:\n    if not isinstance(A, Tensor):\n        raise TypeError(f'Input type is not a Tensor. Got {type(A)}')\n    if not (len(A.shape) == 3 and A.shape[-2:] == (3, 4)):\n        raise ValueError(f'Input matrix must be a Bx3x4 tensor. Got {A.shape}')\n    return _convert_affinematrix_to_homography_impl(A)",
    "docstring": "Convert batch of 3d affine matrices. Args: A: the affine matrix with shape :math:. Returns: the homography matrix with shape of :math:. Examples: >>> A = tensor([[[1., 0., 0., 0.], ... [0., 1., 0., 0.], ... [0., 0., 1., 0.]]]) >>> convert_affinematrix_to_homography3d(A) tensor([[[1., 0., 0., 0.], [0., 1., 0., 0.], [0., 0., 1., 0.], [0., 0., 0., 1.]]])",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\conversions.py",
    "ast_data": "FunctionDef name:convert_affinematrix_to_homography3d arg:A arguments arg If Call Raise Call Call If BoolOp Compare Call Compare Raise Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "decreasing_map",
    "source_code": "@staticmethod\ndef decreasing_map(x: Union[AllIn, AllVR], fn: AllFn) -> AllVR:\n    x = ValueRanges.wrap(x)\n    return ValueRanges(fn(x.upper), fn(x.lower))",
    "docstring": "Decreasing: x f(x) >= f(y).",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\_sympy\\value_ranges.py",
    "ast_data": "FunctionDef name:decreasing_map arg:x arg:fn arguments arg arg Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "inverse_transform",
    "source_code": "def inverse_transform(self, y):\n    check_is_fitted(self)\n    xp, _ = get_namespace(y)\n    y = column_or_1d(y, warn=True)\n    if _num_samples(y) == 0:\n        return xp.asarray([])\n    diff = xpx.setdiff1d(y, xp.arange(self.classes_.shape[0], device=device(y)), xp=xp)\n    if diff.shape[0]:\n        raise ValueError('y contains previously unseen labels: %s' % str(diff))\n    y = xp.asarray(y)\n    return xp.take(self.classes_, y, axis=0)",
    "docstring": "Transform labels back to original encoding. Parameters ---------- y : array-like of shape (n_samples,) Target values. Returns ------- y_original : ndarray of shape (n_samples,) Original encoding.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_label.py",
    "ast_data": "FunctionDef name:inverse_transform arg:self arg:y arguments arg arg Call Assign Call Assign Call If Compare Call Return return:yes Call Assign Call Call Call If Raise Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_check_call_args",
    "source_code": "def _check_call_args(self, method_name):\n    fullargspec = self._call_full_argspec\n    if fullargspec.defaults:\n        positional_args = fullargspec.args[:-len(fullargspec.defaults)]\n    else:\n        positional_args = fullargspec.args\n    if 'training' in positional_args:\n        positional_args.remove('training')\n    if len(positional_args) > 2:\n        extra_args = positional_args[2:]\n        raise ValueError('Models passed to `' + method_name + '` can only have `training` and the first argument in `call` as positional arguments, found: ' + str(extra_args) + '.')",
    "docstring": "Check that has only one positional arg.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py",
    "ast_data": "FunctionDef name:_check_call_args arg:self arg:method_name arguments arg arg Assign If Assign Call Assign If Compare Call If Compare Call Assign Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "example",
    "source_code": "@staticmethod\ndef example(t: type[T]) -> Optional[T]:\n    return TypeExemplars.TYPE_EXEMPLARS.get(t.__name__, None)",
    "docstring": "Return an example of a class.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\fuzzer.py",
    "ast_data": "FunctionDef name:example arg:t arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "make_tex",
    "source_code": "@classmethod\ndef make_tex(cls, tex, fontsize):\n    texfile = cls.get_basefile(tex, fontsize) + '.tex'\n    Path(texfile).write_text(cls._get_tex_source(tex, fontsize), encoding='utf-8')\n    return texfile",
    "docstring": "Generate a tex file to render the tex string at a specific font size. Return the file name.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\texmanager.py",
    "ast_data": "FunctionDef name:make_tex arg:cls arg:tex arg:fontsize arguments arg arg arg Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "_check_prepopulated_fields",
    "source_code": "def _check_prepopulated_fields(self, obj):\n    if not isinstance(obj.prepopulated_fields, dict):\n        return must_be('a dictionary', option='prepopulated_fields', obj=obj, id='admin.E026')\n    else:\n        return list(chain.from_iterable((self._check_prepopulated_fields_key(obj, field_name, 'prepopulated_fields') + self._check_prepopulated_fields_value(obj, val, 'prepopulated_fields[\"%s\"]' % field_name) for field_name, val in obj.prepopulated_fields.items())))",
    "docstring": "Check that is a dictionary containing allowed field types.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\checks.py",
    "ast_data": "FunctionDef name:_check_prepopulated_fields arg:self arg:obj arguments arg arg If Call Return return:yes Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "scatter",
    "source_code": "def scatter(inputs, target_gpus, dim=0):\n\n    def scatter_map(obj):\n        if isinstance(obj, torch.Tensor):\n            return Scatter.apply(target_gpus, None, dim, obj)\n        if _is_namedtuple(obj):\n            return [type(obj)(*args) for args in zip(*map(scatter_map, obj))]\n        if isinstance(obj, tuple) and len(obj) > 0:\n            return list(zip(*map(scatter_map, obj)))\n        if isinstance(obj, list) and len(obj) > 0:\n            return [list(i) for i in zip(*map(scatter_map, obj))]\n        if isinstance(obj, dict) and len(obj) > 0:\n            return [type(obj)(i) for i in zip(*map(scatter_map, obj.items()))]\n        return [obj for _ in target_gpus]\n    try:\n        res = scatter_map(inputs)\n    finally:\n        scatter_map = None\n    return res",
    "docstring": "Slice tensors into approximately equal chunks and distributes them across given GPUs. Duplicates references to objects that are not tensors.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\parallel\\scatter_gather.py",
    "ast_data": "FunctionDef name:scatter arg:inputs arg:target_gpus arg:dim arguments arg arg arg FunctionDef name:scatter_map arg:obj arguments arg If Call Return return:yes Call If Call Return return:yes Call Call Call Call If BoolOp Call Compare Call Return return:yes Call Call Call If BoolOp Call Compare Call Return return:yes Call Call Call If BoolOp Call Compare Call Return return:yes Call Call Call Call Call Return return:yes Try Assign Call Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_MaskedPrintOption",
    "source_code": "class _MaskedPrintOption:\n\n    def __init__(self, display):\n        self._display = display\n        self._enabled = True\n\n    def display(self):\n        return self._display\n\n    def set_display(self, s):\n        self._display = s\n\n    def enabled(self):\n        return self._enabled\n\n    def enable(self, shrink=1):\n        self._enabled = shrink\n\n    def __str__(self):\n        return str(self._display)\n    __repr__ = __str__",
    "docstring": "Handle the string used to represent missing data in a masked array.",
    "type": "class",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "ClassDef name:_MaskedPrintOption FunctionDef name:__init__ arg:self arg:display arguments arg arg Assign Assign FunctionDef name:display arg:self arguments arg Return return:yes FunctionDef name:set_display arg:self arg:s arguments arg arg Assign FunctionDef name:enabled arg:self arguments arg Return return:yes FunctionDef name:enable arg:self arg:shrink arguments arg arg Assign FunctionDef name:__str__ arg:self arguments arg Return return:yes Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_find_children_hints_in_while_loop",
    "source_code": "def _find_children_hints_in_while_loop(function_def, nodes_mapping):\n    new_nodes = []\n    for node in function_def.node_def:\n        for i, _ in enumerate(node.input):\n            if node.input[i] in nodes_mapping:\n                node.input[i] = nodes_mapping[node.input[i]]\n        new_nodes.append(_copy.deepcopy(node))\n    name_to_seq_num = _extract_topology_sequence_mapping(function_def.node_def)\n    children_hints = _find_all_hints_in_nodes(new_nodes)\n    children_hints_q = []\n    for hint in children_hints.values():\n        _, output_names = hint.flattened_inputs_and_outputs()\n        seq = name_to_seq_num[output_names[0]]\n        for output_name in output_names:\n            seq = min(seq, name_to_seq_num[output_name])\n        children_hints_q.append((seq, hint))\n    children_hints_q.sort(key=lambda tup: tup[0])\n    ordered_children_hints = [x[1] for x in children_hints_q]\n    return (ordered_children_hints, new_nodes)",
    "docstring": "Find children hints and all nodes inside the while loop. Args: function_def: Function def of the while loop. nodes_mapping: While loop input_arg : real node name. Returns: Ordered children hints and all re-mapped nodes inside the while loop.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\op_hint.py",
    "ast_data": "FunctionDef name:_find_children_hints_in_while_loop arg:function_def arg:nodes_mapping arguments arg arg Assign For For Call If Compare Assign Call Call Assign Call Assign Call Assign For Call Assign Call Assign For Assign Call Call Call arguments arg Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "argrelextrema",
    "source_code": "def argrelextrema(data, comparator, axis=0, order=1, mode='clip'):\n    results = _boolrelextrema(data, comparator, axis, order, mode)\n    return np.nonzero(results)",
    "docstring": "Calculate the relative extrema of . Parameters ---------- data : ndarray Array in which to find the relative extrema. comparator : callable Function to use to compare two data points. Should take two arrays as arguments. axis : int, optional Axis over which to select from . Default is 0. order : int, optional How many points on each side to use for the comparison to consider `numpy.takekdatadata` is 1-D. See Also -------- argrelmin, argrelmax Notes ----- .. versionadded:: 0.11.0 Examples -------- >>> import numpy as np >>> from scipy.signal import argrelextrema >>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0]) >>> argrelextrema(x, np.greater) (array([3, 6]),) >>> y = np.array([[1, 2, 1, 2], ... [2, 2, 0, 0], ... [5, 3, 4, 4]]) ... >>> argrelextrema(y, np.less, axis=1) (array([0, 2]), array([2, 1]))",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_peak_finding.py",
    "ast_data": "FunctionDef name:argrelextrema arg:data arg:comparator arg:axis arg:order arg:mode arguments arg arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "nansem",
    "source_code": "@disallow('M8', 'm8')\ndef nansem(values: np.ndarray, *, axis: AxisInt | None=None, skipna: bool=True, ddof: int=1, mask: npt.NDArray[np.bool_] | None=None) -> float:\n    nanvar(values, axis=axis, skipna=skipna, ddof=ddof, mask=mask)\n    mask = _maybe_get_mask(values, skipna, mask)\n    if values.dtype.kind != 'f':\n        values = values.astype('f8')\n    if not skipna and mask is not None and mask.any():\n        return np.nan\n    count, _ = _get_counts_nanvar(values.shape, mask, axis, ddof, values.dtype)\n    var = nanvar(values, axis=axis, skipna=skipna, ddof=ddof, mask=mask)\n    return np.sqrt(var) / np.sqrt(count)",
    "docstring": "Compute the standard error in the mean along given axis while ignoring NaNs Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. mask : ndarray[bool], optional nan-mask if known Returns ------- result : float64 Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> from pandas.core import nanops >>> s = pd.Series([1, np.nan, 2, 3]) >>> nanops.nansem(s.values) np.float64(0.5773502691896258)",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\nanops.py",
    "ast_data": "FunctionDef name:nansem arg:values arguments arg arg arg arg arg Call Assign Call If Compare Assign Call If BoolOp Compare Call Return return:yes Assign Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "pandas",
    "name": "dedup_names",
    "source_code": "def dedup_names(names: Sequence[Hashable], is_potential_multiindex: bool) -> Sequence[Hashable]:\n    names = list(names)\n    counts: DefaultDict[Hashable, int] = defaultdict(int)\n    for i, col in enumerate(names):\n        cur_count = counts[col]\n        while cur_count > 0:\n            counts[col] = cur_count + 1\n            if is_potential_multiindex:\n                assert isinstance(col, tuple)\n                col = col[:-1] + (f'{col[-1]}.{cur_count}',)\n            else:\n                col = f'{col}.{cur_count}'\n            cur_count = counts[col]\n        names[i] = col\n        counts[col] = cur_count + 1\n    return names",
    "docstring": "Rename column names if duplicates exist. Currently the renaming is done by appending a period and an autonumeric, but a custom pattern may be supported in the future. Examples -------- >>> dedup_names([\"x\", \"y\", \"x\", \"x\"], is_potential_multiindex=False) ['x', 'y', 'x.1', 'x.2']",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\common.py",
    "ast_data": "FunctionDef name:dedup_names arg:names arg:is_potential_multiindex arguments arg arg Assign Call Call For Call Assign While Compare Assign If Call Assign Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_saveable",
    "source_code": "def get_saveable(self, var, primary_var, name):\n    return values_util.get_on_read_saveable(var, primary_var, name)",
    "docstring": "Create a saveable object for the given variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py",
    "ast_data": "FunctionDef name:get_saveable arg:self arg:var arg:primary_var arg:name arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "delayed",
    "source_code": "def delayed(function):\n\n    @functools.wraps(function)\n    def delayed_function(*args, **kwargs):\n        return (_FuncWrapper(function), args, kwargs)\n    return delayed_function",
    "docstring": "Decorator used to capture the arguments of a function. This alternative to is meant to be used in conjunction with . The latter captures the scikit- learn configuration by calling in the current thread, prior to dispatching the first task. The captured configuration is then propagated and enabled for the duration of the execution of the delayed function in the joblib workers. .. versionchanged:: 1.3 was moved from to in scikit-learn 1.3. Parameters ---------- function : callable The function to be delayed. Returns ------- output: tuple Tuple containing the delayed function, the positional arguments, and the keyword arguments.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\parallel.py",
    "ast_data": "FunctionDef name:delayed arg:function arguments arg FunctionDef name:delayed_function arguments arg arg Return return:yes Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "factory_kwargs",
    "source_code": "def factory_kwargs(kwargs):\n    if kwargs is None:\n        return {}\n    simple_keys = {'device', 'dtype', 'memory_format'}\n    expected_keys = simple_keys | {'factory_kwargs'}\n    if not kwargs.keys() <= expected_keys:\n        raise TypeError(f'unexpected kwargs {kwargs.keys() - expected_keys}')\n    r = dict(kwargs.get('factory_kwargs', {}))\n    for k in simple_keys:\n        if k in kwargs:\n            if k in r:\n                raise TypeError(f'{k} specified twice, in **kwargs and in factory_kwargs')\n            r[k] = kwargs[k]\n    return r",
    "docstring": "Return a canonicalized dict of factory kwargs. Given kwargs, returns a canonicalized dict of factory kwargs that can be directly passed to factory functions like torch.empty, or errors if unrecognized kwargs are present. This function makes it simple to write code like this:: class MyModule(nn.Module): def __init__(self, **kwargs): factory_kwargs = torch.nn.factory_kwargs(kwargs) self.weight = Parameter(torch.empty(10, **factory_kwargs)) Why should you use this function instead of just passing along directly? 1. This function does error validation, so if there are unexpected kwargs we will immediately report an error, instead of deferring it to the factory call 2. This function supports a special argument, which can be used to explicitly specify a kwarg to be used for factory functions, in the event one of the factory kwargs conflicts with an already existing argument in the signature (e.g. in the signature ``)",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\__init__.py",
    "ast_data": "FunctionDef name:factory_kwargs arg:kwargs arguments arg If Compare Return return:no Assign Assign If Compare Call Raise Call Call Assign Call Call For If Compare If Compare Raise Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "LazyBatchNorm1d",
    "source_code": "class LazyBatchNorm1d(_LazyNormBase, _BatchNorm):\n    cls_to_become = BatchNorm1d\n\n    def _check_input_dim(self, input):\n        if input.dim() != 2 and input.dim() != 3:\n            raise ValueError(f'expected 2D or 3D input (got {input.dim()}D input)')",
    "docstring": "A :class: module with lazy initialization. Lazy initialization based on the `BatchNorm1dweightbiasrunning_meanrunning_vartorch.nn.modules.lazy.LazyModuleMixinrunning_meanrunning_var`",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\batchnorm.py",
    "ast_data": "ClassDef name:LazyBatchNorm1d Assign FunctionDef name:_check_input_dim arg:self arg:input arguments arg arg If BoolOp Compare Call Compare Call Raise Call Call"
  },
  {
    "library": "django",
    "name": "name",
    "source_code": "@property\ndef name(self):\n    return self.srs.name",
    "docstring": "Return the projection name.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\base\\models.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_subscribe",
    "source_code": "def _subscribe(tensor, side_effects, control_cache):\n    if not tensor.dtype.is_numpy_compatible:\n        logging.debug('Tensor {} has an un-supported {} type and cannot be subscribed.'.format(tensor.name, tensor.dtype))\n        return tensor\n    if _is_subscribed_identity(tensor):\n        return _subscribe_extend(tensor, side_effects)\n    name_scope = tensor.op.name + '/subscription/Identity'\n    consumers = tensor.consumers()\n    matching_ops = [op for op in consumers if op.name.startswith(name_scope)]\n    assert len(matching_ops) <= 1, 'Op {} must only have one subscription op connected to it'.format(tensor.op.name)\n    if len(matching_ops) == 1:\n        candidate_tensor = matching_ops[0].outputs[0]\n        if _is_subscribed_identity(candidate_tensor):\n            return _subscribe_extend(candidate_tensor, side_effects)\n    return _subscribe_new(tensor, side_effects, control_cache)",
    "docstring": "Helper method that subscribes a single tensor to a list of side_effects. This method will check if the given tensor has already been subscribed or if it's a tensor returned by a previous call to and, if so, will reuse the existing identity op, appending the given side effects to the list of existing ones. Args: tensor: The to be subscribed. side_effects: List of side_effect functions, see subscribe for details. control_cache: helper to get control_outputs faster. Returns: The modified replacement to the passed in tensor which triggers the side effects or the given tensor, if it was already been subscribed.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\subscribe.py",
    "ast_data": "FunctionDef name:_subscribe arg:tensor arg:side_effects arg:control_cache arguments arg arg arg If Call Call Return return:yes If Call Return return:yes Call Assign Assign Call Assign Call Compare Call Call If Compare Call Assign If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_AdjointLinearOperator",
    "source_code": "class _AdjointLinearOperator(LinearOperator):\n\n    def __init__(self, A):\n        shape = (A.shape[1], A.shape[0])\n        super().__init__(dtype=A.dtype, shape=shape)\n        self.A = A\n        self.args = (A,)\n\n    def _matvec(self, x):\n        return self.A._rmatvec(x)\n\n    def _rmatvec(self, x):\n        return self.A._matvec(x)\n\n    def _matmat(self, x):\n        return self.A._rmatmat(x)\n\n    def _rmatmat(self, x):\n        return self.A._matmat(x)",
    "docstring": "Adjoint of arbitrary Linear Operator",
    "type": "class",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_interface.py",
    "ast_data": "ClassDef name:_AdjointLinearOperator FunctionDef name:__init__ arg:self arg:A arguments arg arg Assign Call Call Assign Assign FunctionDef name:_matvec arg:self arg:x arguments arg arg Return return:yes Call FunctionDef name:_rmatvec arg:self arg:x arguments arg arg Return return:yes Call FunctionDef name:_matmat arg:self arg:x arguments arg arg Return return:yes Call FunctionDef name:_rmatmat arg:self arg:x arguments arg arg Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "validate_subject_type",
    "source_code": "def validate_subject_type(self):\n    self._validate_claim_value('subject_type')",
    "docstring": "subject_type requested for responses to this Client. The subject_types_supported discovery parameter contains a list of the supported subject_type values for the OP. Valid types include pairwise and public.",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\registration\\claims.py",
    "ast_data": "FunctionDef name:validate_subject_type arg:self arguments arg Call"
  },
  {
    "library": "kornia",
    "name": "get_cuda_or_mps_device_if_available",
    "source_code": "def get_cuda_or_mps_device_if_available() -> torch.device:\n    if sys.platform == 'darwin' and platform.machine() == 'arm64':\n        return get_mps_device_if_available()\n    else:\n        return get_cuda_device_if_available()",
    "docstring": "Check OS and platform and run get_cuda_device_if_available or get_mps_device_if_available. Returns: torch.device",
    "type": "function",
    "file_path": "kornia\\kornia\\utils\\helpers.py",
    "ast_data": "FunctionDef name:get_cuda_or_mps_device_if_available arguments If BoolOp Compare Compare Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "DAG",
    "source_code": "class DAG:\n\n    def __init__(self) -> None:\n        self.nodes: list[DAGNode] = []\n\n    def create_node(self, submodule_node: Node, input_nodes: list[Node], output_nodes: list[Node], logical_devices: list[int], size_bytes: int) -> None:\n        node = DAGNode(submodule_node, input_nodes, output_nodes, logical_devices, size_bytes)\n        self.nodes.append(node)",
    "docstring": "DAG class contains all the DAG nodes",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\experimental\\accelerator_partitioner.py",
    "ast_data": "ClassDef name:DAG FunctionDef name:__init__ arg:self arguments arg FunctionDef name:create_node arg:self arg:submodule_node arg:input_nodes arg:output_nodes arg:logical_devices arg:size_bytes arguments arg arg arg arg arg arg Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "NoopLoadingScope",
    "source_code": "class NoopLoadingScope(object):\n\n    def get(self, unused_object_id):\n        return None\n\n    def set(self, object_id, obj):\n        pass",
    "docstring": "The default shared object loading scope. It does nothing. Created to simplify serialization code that doesn't care about shared objects (e.g. when serializing a single object).",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\generic_utils.py",
    "ast_data": "ClassDef name:NoopLoadingScope FunctionDef name:get arg:self arg:unused_object_id arguments arg arg Return return:no FunctionDef name:set arg:self arg:object_id arg:obj arguments arg arg arg"
  },
  {
    "library": "tensorflow",
    "name": "get_help",
    "source_code": "def get_help(self, cmd_prefix=None):\n    if not cmd_prefix:\n        help_info = RichTextLines([])\n        if self._help_intro:\n            help_info.extend(self._help_intro)\n        sorted_prefixes = sorted(self._handlers)\n        for cmd_prefix in sorted_prefixes:\n            lines = self._get_help_for_command_prefix(cmd_prefix)\n            lines.append('')\n            lines.append('')\n            help_info.extend(RichTextLines(lines))\n        return help_info\n    else:\n        return RichTextLines(self._get_help_for_command_prefix(cmd_prefix))",
    "docstring": "Compile help information into a RichTextLines object. Args: cmd_prefix: Optional command prefix. As the prefix itself or one of its aliases. Returns: A RichTextLines object containing the help information. If cmd_prefix is None, the return value will be the full command-line help. Otherwise, it will be the help information for the specified command.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py",
    "ast_data": "FunctionDef name:get_help arg:self arg:cmd_prefix arguments arg arg If Assign Call If Call Assign Call For Assign Call Call Call Call Call Return return:yes Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "has_usable_password",
    "source_code": "def has_usable_password(self):\n    return is_password_usable(self.password)",
    "docstring": "Return False if set_unusable_password() has been called for this user.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\base_user.py",
    "ast_data": "FunctionDef name:has_usable_password arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "MigrationOptimizer",
    "source_code": "class MigrationOptimizer:\n\n    def optimize(self, operations, app_label):\n        if app_label is None:\n            raise TypeError('app_label must be a str.')\n        self._iterations = 0\n        while True:\n            result = self.optimize_inner(operations, app_label)\n            self._iterations += 1\n            if result == operations:\n                return result\n            operations = result\n\n    def optimize_inner(self, operations, app_label):\n        new_operations = []\n        for i, operation in enumerate(operations):\n            right = True\n            for j, other in enumerate(operations[i + 1:]):\n                result = operation.reduce(other, app_label)\n                if isinstance(result, list):\n                    in_between = operations[i + 1:i + j + 1]\n                    if right:\n                        new_operations.extend(in_between)\n                        new_operations.extend(result)\n                    elif all((op.reduce(other, app_label) is True for op in in_between)):\n                        new_operations.extend(result)\n                        new_operations.extend(in_between)\n                    else:\n                        new_operations.append(operation)\n                        break\n                    new_operations.extend(operations[i + j + 2:])\n                    return new_operations\n                elif not result:\n                    right = False\n            else:\n                new_operations.append(operation)\n        return new_operations",
    "docstring": "Power the optimization process, where you provide a list of Operations and you are returned a list of equal or shorter length - operations are merged into one if possible. For example, a CreateModel and an AddField can be optimized into a new CreateModel, and CreateModel and DeleteModel can be optimized into nothing.",
    "type": "class",
    "file_path": "django\\django\\db\\migrations\\optimizer.py",
    "ast_data": "ClassDef name:MigrationOptimizer FunctionDef name:optimize arg:self arg:operations arg:app_label arguments arg arg arg If Compare Raise Call Assign While Assign Call If Compare Return return:yes Assign FunctionDef name:optimize_inner arg:self arg:operations arg:app_label arguments arg arg arg Assign For Call Assign For Call Assign Call If Call Assign If Call Call If Call Compare Call Call Call Call Call Return return:yes If Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "apis_with_type_based_dispatch",
    "source_code": "def apis_with_type_based_dispatch():\n    return sorted(_TYPE_BASED_DISPATCH_SIGNATURES, key=lambda api: f'{api.__module__}.{api.__name__}')",
    "docstring": "Returns a list of TensorFlow APIs that support type-based dispatch.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\dispatch.py",
    "ast_data": "FunctionDef name:apis_with_type_based_dispatch arguments Return return:yes Call arguments arg"
  },
  {
    "library": "scipy",
    "name": "tsem",
    "source_code": "def tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1):\n    a = ma.asarray(a).ravel()\n    if limits is None:\n        n = float(a.count())\n        return a.std(axis=axis, ddof=ddof) / ma.sqrt(n)\n    am = trima(a.ravel(), limits, inclusive)\n    sd = np.sqrt(am.var(axis=axis, ddof=ddof))\n    return sd / np.sqrt(am.count())",
    "docstring": "Compute the trimmed standard error of the mean. This function finds the standard error of the mean for given values, ignoring values outside the given . Parameters ---------- a : array_like array of values limits : None or (lower limit, upper limit), optional Values in the input array less than the lower limit or greater than the upper limit will be ignored. When limits is None, then all values are used. Either of the limit values in the tuple can also be None representing a half-open interval. The default value is None. inclusive : (bool, bool), optional A tuple consisting of the (lower flag, upper flag). These flags determine whether values exactly equal to the lower or upper limits are included. The default value is (True, True). axis : int or None, optional Axis along which to operate. If None, compute over the whole array. Default is zero. ddof : int, optional Delta degrees of freedom. Default is 1. Returns ------- tsem : float Notes ----- For more details on , see .",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mstats_basic.py",
    "ast_data": "FunctionDef name:tsem arg:a arg:limits arg:inclusive arg:axis arg:ddof arguments arg arg arg arg arg Assign Call Call If Compare Assign Call Call Return return:yes Call Call Assign Call Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_axisbelow",
    "source_code": "def get_axisbelow(self):\n    return self._axisbelow",
    "docstring": "Get whether axis ticks and gridlines are above or below most artists. Returns ------- bool or 'line' See Also -------- set_axisbelow",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:get_axisbelow arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "toolmanager_connect",
    "source_code": "def toolmanager_connect(self, s, func):\n    return self._callbacks.connect(s, func)",
    "docstring": "Connect event with string *s* to *func*. Parameters ---------- s : str The name of the event. The following events are recognized: - 'tool_message_event' - 'tool_removed_event' - 'tool_added_event' For every tool added a new event is created - 'tool_trigger_TOOLNAME', where TOOLNAME is the id of the tool. func : callable Callback function for the toolmanager event with signature:: def func(event: ToolEvent) -> Any Returns ------- cid The callback id for the connection. This can be used in .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_managers.py",
    "ast_data": "FunctionDef name:toolmanager_connect arg:self arg:s arg:func arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "graph",
    "source_code": "@property\ndef graph(self) -> ops.Graph:\n    return self._variable.graph",
    "docstring": "The of this variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py",
    "ast_data": "FunctionDef name:graph arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "irecv",
    "source_code": "def irecv(tensor: torch.Tensor, src: Optional[int]=None, group: Optional[ProcessGroup]=None, tag: int=0, group_src: Optional[int]=None) -> Optional[Work]:\n    _check_single_tensor(tensor, 'tensor')\n    if _rank_not_in_group(group):\n        _warn_not_in_group('irecv')\n        return None\n    if tensor.is_complex():\n        tensor = torch.view_as_real(tensor)\n    group = _group_or_default_group(group)\n    if src is None and group_src is None:\n        return group.recv_anysource([tensor], tag)\n    else:\n        group_src = _canonicalize_group_rank(group, src, group_src)\n        return group.recv([tensor], group_src, tag)",
    "docstring": "Receives a tensor asynchronously. .. warning:: ``. Returns: A distributed request object. None, if not part of the group",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:irecv arg:tensor arg:src arg:group arg:tag arg:group_src arguments arg arg arg arg arg Call If Call Call Return return:no If Call Assign Call Assign Call If BoolOp Compare Compare Return return:yes Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "DisableReduction",
    "source_code": "class DisableReduction(NodeScheduleMarker):\n    pass",
    "docstring": "Marker to invoke . This closes a reduction loop and allows for pointwise ops to occur on the output of a reduction.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\simd_kernel_features.py",
    "ast_data": "ClassDef name:DisableReduction"
  },
  {
    "library": "pytorch",
    "name": "create_multilayer_existing_ranges",
    "source_code": "@classmethod\ndef create_multilayer_existing_ranges(cls, device: torch.device, dst_dtype: torch.dtype, src_dtype: torch.dtype, inner_fn: Callable[..., Any], original_ranges: Sequence[Expr], original_reduction_ranges: Sequence[Expr], new_ranges: list[Integer], new_reduction_ranges: list[Integer], reduction_type: ReductionType, reduction_hint: ReductionHint) -> TensorBox:\n    wrapper_fn = cls._multilayer_wrap_loader_existing_ranges(inner_fn, original_ranges, original_reduction_ranges, new_ranges, new_reduction_ranges)\n    return cls.create_multilayer_helper(device, dst_dtype, src_dtype, wrapper_fn, original_ranges, original_reduction_ranges, [*original_ranges, *new_ranges], new_reduction_ranges, reduction_type, -1, reduction_hint)",
    "docstring": "Break a large reduction up into multiple smaller reductions recursively",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ir.py",
    "ast_data": "FunctionDef name:create_multilayer_existing_ranges arg:cls arg:device arg:dst_dtype arg:src_dtype arg:inner_fn arg:original_ranges arg:original_reduction_ranges arg:new_ranges arg:new_reduction_ranges arg:reduction_type arg:reduction_hint arguments arg arg arg arg arg arg arg arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "end_statement",
    "source_code": "def end_statement(self, stmt):\n    self.active_stmts.remove(stmt)",
    "docstring": "Marks the end of a statement. Args: stmt: Hashable, a key by which the statement can be identified in the CFG's stmt_prev and stmt_next attributes; must match a key previously passed to begin_statement.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\cfg.py",
    "ast_data": "FunctionDef name:end_statement arg:self arg:stmt arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, dtype, shape=None, shared_name=None, name='conditional_accumulator', reduction_type='MEAN'):\n    accumulator_ref = gen_data_flow_ops.resource_conditional_accumulator(dtype=dtype, shape=shape, shared_name=shared_name, name=name, reduction_type=reduction_type)\n    if context.executing_eagerly():\n        self._resource_deleter = resource_variable_ops.EagerResourceDeleter(handle=accumulator_ref, handle_device=context.context().device_name)\n    super(ConditionalAccumulator, self).__init__(dtype, shape, accumulator_ref)",
    "docstring": "Creates a new ConditionalAccumulator. Args: dtype: Datatype of the accumulated gradients. shape: Shape of the accumulated gradients. shared_name: Optional. If non-empty, this accumulator will be shared under the given name across multiple sessions. name: Optional name for the accumulator. reduction_type: Reduction type to use when taking the gradient.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:dtype arg:shape arg:shared_name arg:name arg:reduction_type arguments arg arg arg arg arg arg Assign Call If Call Assign Call Call Call Call"
  },
  {
    "library": "seaborn",
    "name": "__iter__",
    "source_code": "def __iter__(self) -> Generator[dict, None, None]:\n    yield from self._subplot_list",
    "docstring": "Yield each subplot dictionary with Axes object and metadata.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\subplots.py",
    "ast_data": "FunctionDef name:__iter__ arg:self arguments arg"
  },
  {
    "library": "pandas",
    "name": "_cmp_method",
    "source_code": "def _cmp_method(self, other, op):\n    if self.is_(other):\n        if op in {operator.eq, operator.le, operator.ge}:\n            arr = np.ones(len(self), dtype=bool)\n            if self._can_hold_na and (not isinstance(self, ABCMultiIndex)):\n                arr[self.isna()] = False\n            return arr\n        elif op is operator.ne:\n            arr = np.zeros(len(self), dtype=bool)\n            if self._can_hold_na and (not isinstance(self, ABCMultiIndex)):\n                arr[self.isna()] = True\n            return arr\n    if isinstance(other, (np.ndarray, Index, ABCSeries, ExtensionArray)) and len(self) != len(other):\n        raise ValueError('Lengths must match to compare')\n    if not isinstance(other, ABCMultiIndex):\n        other = extract_array(other, extract_numpy=True)\n    else:\n        other = np.asarray(other)\n    if is_object_dtype(self.dtype) and isinstance(other, ExtensionArray):\n        result = op(self._values, other)\n    elif isinstance(self._values, ExtensionArray):\n        result = op(self._values, other)\n    elif is_object_dtype(self.dtype) and (not isinstance(self, ABCMultiIndex)):\n        result = ops.comp_method_OBJECT_ARRAY(op, self._values, other)\n    else:\n        result = ops.comparison_op(self._values, other, op)\n    return result",
    "docstring": "Wrapper used to dispatch comparison operations.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_cmp_method arg:self arg:other arg:op arguments arg arg arg If Call If Compare Assign Call Call If BoolOp Call Assign Call Return return:yes If Compare Assign Call Call If BoolOp Call Assign Call Return return:yes If BoolOp Call Compare Call Call Raise Call If Call Assign Call Assign Call If BoolOp Call Call Assign Call If Call Assign Call If BoolOp Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "lower_to_fbgemm",
    "source_code": "def lower_to_fbgemm(model: GraphModule, qconfig_map: dict[str, QConfigAny], node_name_to_scope: dict[str, tuple[str, type]], keep_original_weights: bool=False) -> GraphModule:\n    return _lower_to_native_backend(model, qconfig_map, node_name_to_scope, keep_original_weights)",
    "docstring": "Lower a quantized reference model (with reference quantized operator patterns) to fbgemm",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\lower_to_fbgemm.py",
    "ast_data": "FunctionDef name:lower_to_fbgemm arg:model arg:qconfig_map arg:node_name_to_scope arg:keep_original_weights arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "inner",
    "source_code": "def inner(a, b):\n    fa = filled(a, 0)\n    fb = filled(b, 0)\n    if fa.ndim == 0:\n        fa.shape = (1,)\n    if fb.ndim == 0:\n        fb.shape = (1,)\n    return np.inner(fa, fb).view(MaskedArray)",
    "docstring": "Returns the inner product of a and b for arrays of floating point types. Like the generic NumPy equivalent the product sum is over the last dimension of a and b. The first argument is not conjugated.",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:inner arg:a arg:b arguments arg arg Assign Call Assign Call If Compare Assign If Compare Assign Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "appendlist",
    "source_code": "def appendlist(self, key, value):\n    self.setlistdefault(key).append(value)",
    "docstring": "Append an item to the internal list associated with key.",
    "type": "method",
    "file_path": "django\\django\\utils\\datastructures.py",
    "ast_data": "FunctionDef name:appendlist arg:self arg:key arg:value arguments arg arg arg Call Call"
  },
  {
    "library": "kornia",
    "name": "sample_points_2d",
    "source_code": "def sample_points_2d(self, heights: Tensor, widths: Tensor, sampling_step: int=1) -> Dict[int, RaySampler.Points2D]:\n    heights = heights.int()\n    widths = widths.int()\n    points2d_as_flat_tensors: Dict[int, RaySampler.Points2D_FlatTensors] = {}\n    for camera_id, (height, width) in enumerate(zip(heights.tolist(), widths.tolist())):\n        n = height * width\n        y_grid, x_grid = torch_meshgrid([torch.arange(0, height, sampling_step, device=self._device, dtype=self._dtype), torch.arange(0, width, sampling_step, device=self._device, dtype=self._dtype)], indexing='ij')\n        RaySampler._add_points2d_as_flat_tensors_to_num_ray_dict(n, x_grid, y_grid, camera_id, points2d_as_flat_tensors)\n    return RaySampler._build_num_ray_dict_of_points2d(points2d_as_flat_tensors)",
    "docstring": "Uniformly sample pixel points in 2d for all scene camera pixels. Args: heights: tensor that holds scene camera image heights (can vary between cameras): math: . widths: tensor that holds scene camera image widths (can vary between cameras): math: . sampling_step: defines uniform strides between rows and columns: int. Returns: dictionary of Points2D objects that holds information on pixel 2d coordinates of each ray and the camera id it was casted by: Dict[int, Points2D]",
    "type": "method",
    "file_path": "kornia\\kornia\\nerf\\samplers.py",
    "ast_data": "FunctionDef name:sample_points_2d arg:self arg:heights arg:widths arg:sampling_step arguments arg arg arg arg Assign Call Assign Call For Call Call Call Call Assign Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "RelaxedUnspecConstraint",
    "source_code": "@dataclass(frozen=True)\nclass RelaxedUnspecConstraint(Constraint):\n\n    def render(self, source: Source) -> str:\n        return f'RelaxedUnspecConstraint({source.name()})'",
    "docstring": "For clients: no explicit constraint; constraint is whatever is implicitly inferred by guards from tracing. For backends: there must exist at least TWO possible values for the size at this dimension which satisfy the guards for this dimension. In other words, this constraint helps us distinguish between \"we don't care if this dimension specializes or not\" versus \"this dimension must be unspecialized.\" However, this constraint doesn't say very much about what specialization is permitted; for example, if we guard on a size being even, this would still be acceptable under an unspec constraint. This makes RelaxedUnspecConstraint useful for eager mode, where your backend compiler may add constraints to otherwise dynamic dimensions; we can't assert that there are NO guards as this is brittle because compilers should be able to add extra constraints. If you want to assert that there are no guards, use StrictMinMaxConstraint with an unbounded ValueRanges.",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "ClassDef name:RelaxedUnspecConstraint FunctionDef name:render arg:self arg:source arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_urls",
    "source_code": "def get_urls(self):\n    return self._urls",
    "docstring": "Return a list of URLs, one for each element of the collection. The list contains *None* for elements without a URL. See :doc: for an example.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:get_urls arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "merge_loops",
    "source_code": "def merge_loops(self) -> LoopBody:\n    old_body = self\n    old_sizes = self.sizes\n    old_iter_vars, old_reduce_vars = old_body.vars\n    old_iter_sizes, old_reduce_sizes = old_sizes\n    index_exprs = [*old_body.indexing_exprs.values()]\n    iter_sizes, iter_reindex, _ = V.graph.sizevars._simplify_loops(old_iter_vars, old_iter_sizes, index_prevent_reordering(index_exprs, old_iter_vars, old_iter_sizes))\n    reduce_sizes, reduce_reindex, _ = V.graph.sizevars._simplify_loops(old_reduce_vars, old_reduce_sizes, index_prevent_reordering(index_exprs, old_reduce_vars, old_reduce_sizes))\n    (iter_vars, reduce_vars), var_ranges = dependencies.index_vars_no_squeeze(iter_sizes, reduce_sizes, prefix='t')\n    new_body = LoopBody(old_body, [iter_reindex(iter_vars), reduce_reindex(reduce_vars)], var_ranges, iter_vars, reduce_vars)\n    (iter_vars2, reduce_vars2), var_ranges2 = dependencies.index_vars_no_squeeze(iter_sizes, reduce_sizes, prefix='p')\n    new_body2 = LoopBody(new_body, (iter_vars2, reduce_vars2), var_ranges2, iter_vars2, reduce_vars2)\n    return new_body2",
    "docstring": "Merge both iteration and reduction loops and return a new LoopBody.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\loop_body.py",
    "ast_data": "FunctionDef name:merge_loops arg:self arguments arg Assign Assign Assign Assign Assign Call Assign Call Call Assign Call Call Assign Call Assign Call Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "unproject_meshgrid",
    "source_code": "def unproject_meshgrid(height: int, width: int, camera_matrix: Tensor, normalize_points: bool=False, device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None) -> Tensor:\n    KORNIA_CHECK_SHAPE(camera_matrix, ['*', '3', '3'])\n    points_uv: Tensor = create_meshgrid(height, width, normalized_coordinates=False, device=device, dtype=dtype).squeeze()\n    camera_matrix_tmp: Tensor = camera_matrix[:, None, None]\n    points_xy = normalize_points_with_intrinsics(points_uv, camera_matrix_tmp)\n    points_xyz = convert_points_to_homogeneous(points_xy)\n    if normalize_points:\n        points_xyz = kornia_ops.normalize(points_xyz, dim=-1, p=2)\n    return points_xyz",
    "docstring": "Compute a 3d point per pixel given its depth value and the camera intrinsics. .. tip:: This function should be used in conjunction with :py:func: to cache the meshgrid computation when warping multiple frames with the same camera intrinsics. Args: height: height of image. width: width of image. camera_matrix: tensor containing the camera intrinsics with shape :math:. normalize_points: whether to normalize the pointcloud. This must be set to when the depth is represented as the Euclidean ray length from the camera position. device: device to place the result on. dtype: dtype of the result. Return: tensor with a 3d point per pixel of the same resolution as the input :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\depth.py",
    "ast_data": "FunctionDef name:unproject_meshgrid arg:height arg:width arg:camera_matrix arg:normalize_points arg:device arg:dtype arguments arg arg arg arg arg arg Call Call Call Assign Call Assign Call If Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, x, name):\n    self.x = x\n    self.name = name",
    "docstring": "Construct DivideDelegateWithName. Args: x: Tensor to use as left operand in operator overloads name: The name that is preferred for the op created.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:x arg:name arguments arg arg arg Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "DatasetIterator",
    "source_code": "class DatasetIterator(DistributedIteratorV1):\n\n    def __init__(self, dataset, input_workers, strategy, num_replicas_in_sync=None, input_context=None):\n        dist_dataset = DistributedDatasetV1(dataset, input_workers, strategy, num_replicas_in_sync=num_replicas_in_sync, input_context=input_context)\n        worker_iterators = _create_iterators_per_worker(dist_dataset._cloned_datasets, input_workers)\n        super(DatasetIterator, self).__init__(input_workers, worker_iterators, strategy, dist_dataset.cardinality, dist_dataset._enable_get_next_as_optional)\n        self._element_spec = dist_dataset.element_spec",
    "docstring": "Iterator created from input dataset.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\input_lib.py",
    "ast_data": "ClassDef name:DatasetIterator FunctionDef name:__init__ arg:self arg:dataset arg:input_workers arg:strategy arg:num_replicas_in_sync arg:input_context arguments arg arg arg arg arg arg Assign Call Assign Call Call Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "write_png",
    "source_code": "def write_png(self, fname):\n    im = self.to_rgba(self._A[::-1] if self.origin == 'lower' else self._A, bytes=True, norm=True)\n    PIL.Image.fromarray(im).save(fname, format='png')",
    "docstring": "Write the image to png file *fname*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\image.py",
    "ast_data": "FunctionDef name:write_png arg:self arg:fname arguments arg arg Assign Call Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "fake_mode",
    "source_code": "@property\ndef fake_mode(self):\n    return _fake_mode._get_handler()",
    "docstring": "The graph currently being generated",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\virtualized.py",
    "ast_data": "FunctionDef name:fake_mode arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_aspect",
    "source_code": "def set_aspect(self, aspect, adjustable=None, anchor=None, share=False):\n    if cbook._str_equal(aspect, 'equal'):\n        aspect = 1\n    if not cbook._str_equal(aspect, 'auto'):\n        aspect = float(aspect)\n        if aspect <= 0 or not np.isfinite(aspect):\n            raise ValueError('aspect must be finite and positive ')\n    if share:\n        axes = {sibling for name in self._axis_names for sibling in self._shared_axes[name].get_siblings(self)}\n    else:\n        axes = [self]\n    for ax in axes:\n        ax._aspect = aspect\n    if adjustable is None:\n        adjustable = self._adjustable\n    self.set_adjustable(adjustable, share=share)\n    if anchor is not None:\n        self.set_anchor(anchor, share=share)\n    self.stale = True",
    "docstring": "Set the aspect ratio of the Axes scaling, i.e. y/x-scale. Parameters ---------- aspect : {'auto', 'equal'} or float Possible values: - 'auto': fill the position rectangle with data. - 'equal': same as `.set_adjustable~.Axes.set_anchor`, apply the settings to all shared Axes. See Also -------- matplotlib.axes.Axes.set_adjustable Set how the Axes adjusts to achieve the required aspect ratio. matplotlib.axes.Axes.set_anchor Set the position in case of extra space.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:set_aspect arg:self arg:aspect arg:adjustable arg:anchor arg:share arguments arg arg arg arg arg If Call Assign If Call Assign Call If BoolOp Compare Call Raise Call If Assign Call Assign For Assign If Compare Assign Call If Compare Call Assign"
  },
  {
    "library": "numpy",
    "name": "get_include",
    "source_code": "def get_include():\n    return os.path.join(os.path.dirname(__file__), 'src')",
    "docstring": "Return the directory that contains the `numpy.distutils` See Also -------- numpy.get_include : function that returns the numpy include directory",
    "type": "function",
    "file_path": "numpy\\numpy\\f2py\\__init__.py",
    "ast_data": "FunctionDef name:get_include arguments Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "has_results",
    "source_code": "def has_results(self):\n    return bool(self.execute_sql(SINGLE))",
    "docstring": "Backends (e.g. NoSQL) can override this in order to use optimized versions of \"query has any results.\"",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\compiler.py",
    "ast_data": "FunctionDef name:has_results arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_init_dict",
    "source_code": "def _init_dict(self, data: Mapping, index: Index | None=None, dtype: DtypeObj | None=None):\n    if data:\n        keys = maybe_sequence_to_range(tuple(data.keys()))\n        values = list(data.values())\n    elif index is not None:\n        if len(index) or dtype is not None:\n            values = na_value_for_dtype(pandas_dtype(dtype), compat=False)\n        else:\n            values = []\n        keys = index\n    else:\n        keys, values = (default_index(0), [])\n    s = Series(values, index=keys, dtype=dtype)\n    if data and index is not None:\n        s = s.reindex(index)\n    return (s._mgr, s.index)",
    "docstring": "Derive the \"_mgr\" and \"index\" attributes of a new Series from a dictionary input. Parameters ---------- data : dict or dict-like Data used to populate the new Series. index : Index or None, default None Index for the new Series: if None, use dict keys. dtype : np.dtype, ExtensionDtype, or None, default None The dtype for the new Series: if None, infer from data. Returns ------- _data : BlockManager for the new Series index : index for the new Series",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:_init_dict arg:self arg:data arg:index arg:dtype arguments arg arg arg arg If Assign Call Call Call Assign Call Call If Compare If BoolOp Call Compare Assign Call Call Assign Assign Assign Call Assign Call If BoolOp Compare Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "set_reshard_after_backward",
    "source_code": "def set_reshard_after_backward(self, reshard_after_backward: bool, *, recurse: bool=True) -> None:\n    self_module = cast(nn.Module, self)\n    modules = list(self_module.modules()) if recurse else [self_module]\n    for module in modules:\n        if isinstance(module, FSDPModule):\n            state = module._get_fsdp_state()\n            if (fsdp_param_group := state._fsdp_param_group):\n                fsdp_param_group.reshard_after_backward = reshard_after_backward",
    "docstring": "Sets if the module should reshard parameters after backward. This can be used during gradient accumulation to trade off higher memory for reduced communication since the unsharded parameters do not need to be re-all-gathered before the next forward. Args: reshard_after_backward (bool): Whether to reshard parameters after backward. recurse (bool): Whether to set for all FSDP submodules or just the passed-in module.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fully_shard.py",
    "ast_data": "FunctionDef name:set_reshard_after_backward arg:self arg:reshard_after_backward arguments arg arg arg Assign Call Assign Call Call For If Call Assign Call If Assign"
  },
  {
    "library": "tensorflow",
    "name": "do_decode",
    "source_code": "def do_decode(self, value, decode_fn):\n    del decode_fn\n    tensor_proto = value.numpy_value\n    numpy = tensor_util.MakeNdarray(tensor_proto)\n    return numpy",
    "docstring": "Returns the encoded by the proto .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\constant_op.py",
    "ast_data": "FunctionDef name:do_decode arg:self arg:value arg:decode_fn arguments arg arg arg Assign Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_ticklabel_direction",
    "source_code": "def set_ticklabel_direction(self, tick_direction):\n    self._ticklabel_add_angle = _api.check_getitem({'+': 0, '-': 180}, tick_direction=tick_direction)",
    "docstring": "Adjust the direction of the tick labels. Note that the *tick_direction*\\s '+' and '-' are relative to the direction of the increasing coordinate. Parameters ---------- tick_direction : {\"+\", \"-\"}",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axis_artist.py",
    "ast_data": "FunctionDef name:set_ticklabel_direction arg:self arg:tick_direction arguments arg arg Assign Call"
  },
  {
    "library": "kornia",
    "name": "inverse_transformation",
    "source_code": "def inverse_transformation(trans_12: Tensor) -> Tensor:\n    KORNIA_CHECK_IS_TENSOR(trans_12)\n    if not (trans_12.dim() in (2, 3) and trans_12.shape[-2:] == (4, 4)):\n        raise ValueError(f'Input size must be a Nx4x4 or 4x4. Got {trans_12.shape}')\n    rmat_12 = trans_12[..., :3, 0:3]\n    tvec_12 = trans_12[..., :3, 3:4]\n    rmat_21 = torch.transpose(rmat_12, -1, -2)\n    tvec_21 = torch.matmul(-rmat_21, tvec_12)\n    trans_21 = zeros_like(trans_12)\n    trans_21[..., :3, 0:3] += rmat_21\n    trans_21[..., :3, -1:] += tvec_21\n    trans_21[..., -1, -1:] += 1.0\n    return trans_21",
    "docstring": "Invert a 4x4 homogeneous transformation. :math: The inverse transformation is computed as follows: .. math:: T_2^{1} = (T_1^{2})^{-1} = \\begin{bmatrix} R_1^T & -R_1^T t_1 \\\\ \\mathbf{0} & 1\\end{bmatrix} Args: trans_12: transformation tensor of shape :math: or :math:. Returns: tensor with inverted transformations with shape :math: or :math:. Example: >>> trans_12 = torch.rand(1, 4, 4) # Nx4x4 >>> trans_21 = inverse_transformation(trans_12) # Nx4x4",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\linalg.py",
    "ast_data": "FunctionDef name:inverse_transformation arg:trans_12 arguments arg Call If BoolOp Compare Call Compare Raise Call Assign Assign Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "parse_datetime_format_str",
    "source_code": "def parse_datetime_format_str(format_str, data) -> pd.Series | np.ndarray:\n    timestamp_meta = re.match('ts([smun]):(.*)', format_str)\n    if timestamp_meta:\n        unit, tz = (timestamp_meta.group(1), timestamp_meta.group(2))\n        if unit != 's':\n            unit += 's'\n        data = data.astype(f'datetime64[{unit}]')\n        if tz != '':\n            data = pd.Series(data).dt.tz_localize('UTC').dt.tz_convert(tz)\n        return data\n    date_meta = re.match('td([Dm])', format_str)\n    if date_meta:\n        unit = date_meta.group(1)\n        if unit == 'D':\n            data = (data.astype(np.uint64) * (24 * 60 * 60)).astype('datetime64[s]')\n        elif unit == 'm':\n            data = data.astype('datetime64[ms]')\n        else:\n            raise NotImplementedError(f'Date unit is not supported: {unit}')\n        return data\n    raise NotImplementedError(f'DateTime kind is not supported: {format_str}')",
    "docstring": "Parse datetime to interpret the .",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\interchange\\from_dataframe.py",
    "ast_data": "FunctionDef name:parse_datetime_format_str arg:format_str arg:data arguments arg arg Assign Call If Assign Call Call If Compare Assign Call If Compare Assign Call Call Call Return return:yes Assign Call If Assign Call If Compare Assign Call Call If Compare Assign Call Raise Call Return return:yes Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "scatter_div",
    "source_code": "def scatter_div(self, sparse_delta, use_locking=False, name=None):\n    raise NotImplementedError",
    "docstring": "Divide this variable by . Args: sparse_delta: to divide this variable by. use_locking: If , use locking during the operation. name: the name of the operation. Returns: The updated variable. Raises: TypeError: if is not an .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:scatter_div arg:self arg:sparse_delta arg:use_locking arg:name arguments arg arg arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "_get_full_name",
    "source_code": "def _get_full_name(self, node):\n    curr = node\n    items = []\n    while not isinstance(curr, ast.Name):\n        if not isinstance(curr, ast.Attribute):\n            return None\n        items.append(curr.attr)\n        curr = curr.value\n    items.append(curr.id)\n    return '.'.join(reversed(items))",
    "docstring": "Traverse an Attribute node to generate a full name, e.g., \"tf.foo.bar\". This is the inverse of . Args: node: A Node of type Attribute. Returns: a '.'-delimited full-name or None if node was not Attribute or Name. i.e. returns None, while would return \"a.b.c\".",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\ast_edits.py",
    "ast_data": "FunctionDef name:_get_full_name arg:self arg:node arguments arg arg Assign Assign While Call If Call Return return:no Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "regularized_lsq_with_qr",
    "source_code": "def regularized_lsq_with_qr(m, n, R, QTb, perm, diag, copy_R=True):\n    if copy_R:\n        R = R.copy()\n    v = QTb.copy()\n    givens_elimination(R, v, diag[perm])\n    abs_diag_R = np.abs(np.diag(R))\n    threshold = EPS * max(m, n) * np.max(abs_diag_R)\n    nns, = np.nonzero(abs_diag_R > threshold)\n    R = R[np.ix_(nns, nns)]\n    v = v[nns]\n    x = np.zeros(n)\n    x[perm[nns]] = solve_triangular(R, v)\n    return x",
    "docstring": "Solve regularized least squares using information from QR-decomposition. The initial problem is to solve the following system in a least-squares sense:: A x = b D x = 0 where D is diagonal matrix. The method is based on QR decomposition of the form A P = Q R, where P is a column permutation matrix, Q is an orthogonal matrix and R is an upper triangular matrix. Parameters ---------- m, n : int Initial shape of A. R : ndarray, shape (n, n) Upper triangular matrix from QR decomposition of A. QTb : ndarray, shape (n,) First n components of Q^T b. perm : ndarray, shape (n,) Array defining column permutation of A, such that ith column of P is perm[i]-th column of identity matrix. diag : ndarray, shape (n,) Array containing diagonal elements of D. Returns ------- x : ndarray, shape (n,) Found least-squares solution.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_lsq\\trf_linear.py",
    "ast_data": "FunctionDef name:regularized_lsq_with_qr arg:m arg:n arg:R arg:QTb arg:perm arg:diag arg:copy_R arguments arg arg arg arg arg arg arg If Assign Call Assign Call Call Assign Call Call Assign Call Call Assign Call Compare Assign Call Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "compute_idle_time",
    "source_code": "def compute_idle_time(self):\n    idle = False\n    idle_start = 0\n    idle_intervals: list[Interval] = []\n    if self.queue_depth_list and self.events:\n        idle_intervals += [Interval(self.events[0].start_time_ns, self.queue_depth_list[0].start), Interval(self.queue_depth_list[-1].end, self.events[-1].end_time_ns)]\n    for data_point in self.queue_depth_list:\n        if data_point.queue_depth == 0 and (not idle):\n            idle_start = data_point.end\n            idle = True\n        if data_point.queue_depth > 0 and idle:\n            idle_intervals.append(Interval(idle_start, data_point.start))\n            idle = False\n    event_list = [e.event for e in self.metrics.keys()]\n    for event in event_list:\n        self.metrics[EventKey(event)].idle_time_ns = EventKey(event).intervals_overlap(idle_intervals)",
    "docstring": "Computes idle time of the profile.",
    "type": "method",
    "file_path": "pytorch\\torch\\profiler\\_utils.py",
    "ast_data": "FunctionDef name:compute_idle_time arg:self arguments arg Assign Assign If BoolOp Call Call For If BoolOp Compare Assign Assign If BoolOp Compare Call Call Assign Assign Call For Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_reset_flat_param_grad_info_if_needed",
    "source_code": "def _reset_flat_param_grad_info_if_needed(handles: list[FlatParamHandle]):\n    if not isinstance(handles, list):\n        handles = [handles]\n    for handle in handles:\n        if handle._use_orig_params:\n            handle._reset_flat_param_grad_info_if_needed()",
    "docstring": "Clears the original parameters' gradients if needed. This method's CPU overhead is minimal, so we may call it throughout FSDP methods, which serve as callsites to free the gradient memory earlier.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_runtime_utils.py",
    "ast_data": "FunctionDef name:_reset_flat_param_grad_info_if_needed arg:handles arguments arg If Call Assign For If Call"
  },
  {
    "library": "django",
    "name": "SuspiciousFileOperation",
    "source_code": "class SuspiciousFileOperation(SuspiciousOperation):\n    pass",
    "docstring": "A Suspicious filesystem operation was attempted",
    "type": "class",
    "file_path": "django\\django\\core\\exceptions.py",
    "ast_data": "ClassDef name:SuspiciousFileOperation"
  },
  {
    "library": "pytorch",
    "name": "ReplaceViewOpsWithViewCopyOpsPass",
    "source_code": "class ReplaceViewOpsWithViewCopyOpsPass(_ExportPassBaseDeprecatedDoNotUse):\n\n    def call_operator(self, op, args, kwargs, meta):\n        if op in _NON_FUNCTIONAL_OPS_TO_FUNCTIONAL_OPS:\n            return super().call_operator(_NON_FUNCTIONAL_OPS_TO_FUNCTIONAL_OPS[op], args, kwargs, meta)\n        if isinstance(op, HigherOrderOperator):\n            return super().call_operator(op, args, kwargs, meta)\n        if (view_copy_op := get_view_copy_of_view_op(op._schema)):\n            return super().call_operator(view_copy_op, args, kwargs, meta)\n        return super().call_operator(op, args, kwargs, meta)",
    "docstring": "Our backend expects pure functional operators. For efficiency purposes, we keep view ops around while functionalizing the exported program. This pass replaces view ops with view copy ops for backends that need AOT memory planning.",
    "type": "class",
    "file_path": "pytorch\\torch\\_export\\passes\\replace_view_ops_with_view_copy_ops_pass.py",
    "ast_data": "ClassDef name:ReplaceViewOpsWithViewCopyOpsPass FunctionDef name:call_operator arg:self arg:op arg:args arg:kwargs arg:meta arguments arg arg arg arg arg If Compare Return return:yes Call Call If Call Return return:yes Call Call If Call Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "is_vector_graphics",
    "source_code": "def is_vector_graphics(self, filename: str) -> bool:\n    ext = os.path.splitext(filename)[-1]\n    return ext in VECTOR_GRAPHICS_EXTENSIONS",
    "docstring": "Does the filename extension indicate a vector graphic format?",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\_epub_base.py",
    "ast_data": "FunctionDef name:is_vector_graphics arg:self arg:filename arguments arg arg Assign Call Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, cell):\n    self._cell = cell",
    "docstring": "Creates a new StringGaugeCell. Args: cell: A c pointer of TFE_MonitoringStringGaugeCell.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:cell arguments arg arg Assign"
  },
  {
    "library": "pandas",
    "name": "name",
    "source_code": "@property\ndef name(self) -> str:\n    return f'{self.pyarrow_dtype!s}[{self.storage}]'",
    "docstring": "A string identifying the data type.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_count",
    "source_code": "def _count(self, X, Y):\n    self.feature_count_ += safe_sparse_dot(Y.T, X)\n    self.class_count_ += Y.sum(axis=0)",
    "docstring": "Count and smooth feature occurrences.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\naive_bayes.py",
    "ast_data": "FunctionDef name:_count arg:self arg:X arg:Y arguments arg arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_default_initializer",
    "source_code": "def _get_default_initializer(self, name, shape=None, dtype=dtypes.float32):\n    del shape\n    if dtype.is_floating:\n        initializer = init_ops.glorot_uniform_initializer()\n        initializing_from_value = False\n    elif dtype.is_integer or dtype.is_unsigned or dtype.is_bool or (dtype == dtypes.string):\n        initializer = init_ops.zeros_initializer()\n        initializing_from_value = False\n    else:\n        raise ValueError('An initializer for variable %s of %s is required' % (name, dtype.base_dtype))\n    return (initializer, initializing_from_value)",
    "docstring": "Provide a default initializer and a corresponding value. Args: name: see get_variable. shape: see get_variable. dtype: see get_variable. Returns: initializer and initializing_from_value. See get_variable above. Raises: ValueError: When giving unsupported dtype.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variable_scope.py",
    "ast_data": "FunctionDef name:_get_default_initializer arg:self arg:name arg:shape arg:dtype arguments arg arg arg arg If Assign Call Assign If BoolOp Compare Assign Call Assign Raise Call Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "public_key",
    "source_code": "@abc.abstractmethod\ndef public_key(self) -> Ed25519PublicKey:\n    pass",
    "docstring": "The Ed25519PublicKey derived from the private key.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ed25519.py",
    "ast_data": "FunctionDef name:public_key arg:self arguments arg"
  },
  {
    "library": "scikit-learn",
    "name": "_get_oob_predictions",
    "source_code": "@staticmethod\ndef _get_oob_predictions(tree, X):\n    y_pred = tree.predict(X, check_input=False)\n    if y_pred.ndim == 1:\n        y_pred = y_pred[:, np.newaxis, np.newaxis]\n    else:\n        y_pred = y_pred[:, np.newaxis, :]\n    return y_pred",
    "docstring": "Compute the OOB predictions for an individual tree. Parameters ---------- tree : DecisionTreeRegressor object A single decision tree regressor. X : ndarray of shape (n_samples, n_features) The OOB samples. Returns ------- y_pred : ndarray of shape (n_samples, 1, n_outputs) The OOB associated predictions.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_forest.py",
    "ast_data": "FunctionDef name:_get_oob_predictions arg:tree arg:X arguments arg arg Assign Call If Compare Assign Assign Return return:yes"
  },
  {
    "library": "authlib",
    "name": "validate_response_types",
    "source_code": "def validate_response_types(self):\n    self._validate_claim_value('response_types')",
    "docstring": "Array of the OAuth 2.0 response type strings that the client can use at the authorization endpoint.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc7591\\claims.py",
    "ast_data": "FunctionDef name:validate_response_types arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "add_to_tensor",
    "source_code": "def add_to_tensor(self, mat, name='add_to_tensor'):\n    return self._possibly_broadcast_batch_shape(mat)",
    "docstring": "Add matrix represented by this operator to . Equiv to . Args: mat: with same and shape broadcastable to . name: A name to give this . Returns: A with broadcast shape and same as .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_zeros.py",
    "ast_data": "FunctionDef name:add_to_tensor arg:self arg:mat arg:name arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "has_canonical_format",
    "source_code": "@property\ndef has_canonical_format(self) -> bool:\n    if not getattr(self, '_has_sorted_indices', True):\n        self._has_canonical_format = False\n    elif not hasattr(self, '_has_canonical_format'):\n        M = len(self.indptr) - 1\n        self.has_canonical_format = bool(csr_has_canonical_format(M, self.indptr, self.indices))\n    return self._has_canonical_format",
    "docstring": "Whether the array/matrix has sorted indices and no duplicates Returns - True: if the above applies - False: otherwise has_canonical_format implies has_sorted_indices, so if the latter flag is False, so will the former be; if the former is found True, the latter flag is also set.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_compressed.py",
    "ast_data": "FunctionDef name:has_canonical_format arg:self arguments arg If Call Assign If Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "fresnelc_zeros",
    "source_code": "def fresnelc_zeros(nt):\n    if floor(nt) != nt or nt <= 0 or (not isscalar(nt)):\n        raise ValueError('Argument must be positive scalar integer.')\n    return _specfun.fcszo(1, nt)",
    "docstring": "Compute nt complex zeros of cosine Fresnel integral C(z). Parameters ---------- nt : int Number of zeros to compute Returns ------- fresnelc_zeros: ndarray Zeros of the cosine Fresnel integral References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special Functions\", John Wiley and Sons, 1996.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_basic.py",
    "ast_data": "FunctionDef name:fresnelc_zeros arg:nt arguments arg If BoolOp Compare Call Compare Call Raise Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "derivative",
    "source_code": "def derivative(self, nu=1):\n    if nu < 0:\n        return self.antiderivative(-nu)\n    if nu > 1:\n        bp = self\n        for k in range(nu):\n            bp = bp.derivative()\n        return bp\n    if nu == 0:\n        c2 = self.c.copy()\n    else:\n        rest = (None,) * (self.c.ndim - 2)\n        k = self.c.shape[0] - 1\n        dx = np.diff(self.x)[(None, slice(None)) + rest]\n        c2 = k * np.diff(self.c, axis=0) / dx\n    if c2.shape[0] == 0:\n        c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)\n    return self.construct_fast(c2, self.x, self.extrapolate, self.axis)",
    "docstring": "Construct a new piecewise polynomial representing the derivative. Parameters ---------- nu : int, optional Order of derivative to evaluate. Default is 1, i.e., compute the first derivative. If negative, the antiderivative is returned. Returns ------- bp : BPoly Piecewise polynomial of order k - nu representing the derivative of this polynomial.",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_interpolate.py",
    "ast_data": "FunctionDef name:derivative arg:self arg:nu arguments arg arg If Compare Return return:yes Call If Compare Assign For Call Assign Call Return return:yes If Compare Assign Call Assign Assign Assign Call Call Assign Call If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_bfs_level_partition",
    "source_code": "def get_bfs_level_partition(partitions: list[Partition]) -> None:\n    current_level: set[Partition] = set()\n    visited: set[Partition] = set()\n    for partition in partitions:\n        if len(partition.parents) == 0:\n            current_level.add(partition)\n    next_level: set[Partition] = set()\n    level = 0\n    while current_level:\n        partition = current_level.pop()\n        partition.bfs_level = level\n        visited.add(partition)\n        children = partition.children\n        for child in children:\n            if child not in next_level:\n                next_level.add(child)\n        if not current_level:\n            current_level = next_level.copy()\n            next_level = set()\n            level += 1\n    return",
    "docstring": "Given a list of partitions, mark the bfs level for each partition",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\accelerator_partitioner.py",
    "ast_data": "FunctionDef name:get_bfs_level_partition arg:partitions arguments arg Call Call For If Compare Call Call Call Assign While Assign Call Assign Call Assign For If Compare Call If Assign Call Assign Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "create_variable_resource",
    "source_code": "def create_variable_resource(self, function, args=None, kwargs=None):\n    closure = PerWorkerVariableClosure(function, self._cluster.resource_cancellation_mgr, args=args, kwargs=kwargs)\n    return self._register_and_schedule_resource_closure(closure)",
    "docstring": "Create a per-worker variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py",
    "ast_data": "FunctionDef name:create_variable_resource arg:self arg:function arg:args arg:kwargs arguments arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_SegmentMinOrMaxGrad",
    "source_code": "def _SegmentMinOrMaxGrad(op: ops.Operation, grad):\n    zeros = array_ops.zeros_like(op.inputs[0], dtype=op.inputs[0].dtype)\n    gathered_outputs = array_ops.gather(op.outputs[0], op.inputs[1])\n    is_selected = math_ops.equal(op.inputs[0], gathered_outputs)\n    num_selected = math_ops.segment_sum(math_ops.cast(is_selected, grad.dtype), op.inputs[1])\n    weighted_grads = math_ops.divide(grad, num_selected)\n    gathered_grads = array_ops.gather(weighted_grads, op.inputs[1])\n    return (array_ops.where_v2(is_selected, gathered_grads, zeros), None)",
    "docstring": "Gradient for SegmentMin and SegmentMax.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_SegmentMinOrMaxGrad arg:op arg:grad arguments arg arg Assign Call Assign Call Assign Call Assign Call Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "Area",
    "source_code": "@document_properties\n@dataclass\nclass Area(AreaBase, Mark):\n    color: MappableColor = Mappable('C0')\n    alpha: MappableFloat = Mappable(0.2)\n    fill: MappableBool = Mappable(True)\n    edgecolor: MappableColor = Mappable(depend='color')\n    edgealpha: MappableFloat = Mappable(1)\n    edgewidth: MappableFloat = Mappable(rc='patch.linewidth')\n    edgestyle: MappableStyle = Mappable('-')\n    baseline: MappableFloat = Mappable(0, grouping=False)\n\n    def _standardize_coordinate_parameters(self, data, orient):\n        dv = {'x': 'y', 'y': 'x'}[orient]\n        return data.rename(columns={'baseline': f'{dv}min', dv: f'{dv}max'})\n\n    def _postprocess_artist(self, artist, ax, orient):\n        artist.set_linewidth(artist.get_linewidth() * 2)\n        linestyle = artist.get_linestyle()\n        if linestyle[1]:\n            linestyle = (linestyle[0], tuple((x / 2 for x in linestyle[1])))\n        artist.set_linestyle(linestyle)\n        artist.set_clip_path(artist.get_path(), artist.get_transform() + ax.transData)\n        if self.artist_kws.get('clip_on', True):\n            artist.set_clip_box(ax.bbox)\n        val_idx = ['y', 'x'].index(orient)\n        artist.sticky_edges[val_idx][:] = (0, np.inf)",
    "docstring": "A fill mark drawn from a baseline to data values. See also -------- Band : A fill mark representing an interval between values. Examples -------- .. include:: ../docstrings/objects.Area.rst",
    "type": "class",
    "file_path": "seaborn\\seaborn\\_marks\\area.py",
    "ast_data": "ClassDef name:Area Call Call Call Call Call Call Call Call FunctionDef name:_standardize_coordinate_parameters arg:self arg:data arg:orient arguments arg arg arg Assign Return return:yes Call FunctionDef name:_postprocess_artist arg:self arg:artist arg:ax arg:orient arguments arg arg arg arg Call Call Assign Call If Assign Call Call Call Call Call If Call Call Assign Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "map_indices_in_shard",
    "source_code": "def map_indices_in_shard(num_sparse_cores: int, offset_in_shard: int, shard_rotation: int, row_indices: tensor.Tensor) -> tuple[tensor.Tensor, tensor.Tensor]:\n    shard_index = (row_indices % num_sparse_cores + shard_rotation) % num_sparse_cores\n    position_in_shard = offset_in_shard + row_indices // num_sparse_cores\n    return (shard_index, position_in_shard)",
    "docstring": "Maps a row of a given table to its sparse core shard and position. Maps a given a row index of a logical table and its layout in sparse core, returns the index of the shard where the row is placed and its relative position within that sparse core shard. Args: num_sparse_cores: The number of sparsecores, this determines the number of shards present. offset_in_shard: Offset within a shard where the queried table starts. shard_rotation: The rotation of this table's shards. row_indices: row indices of the embedding table being looked up. Returns: A Tuple representing shard_index and position of the row in that shard.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3_utils.py",
    "ast_data": "FunctionDef name:map_indices_in_shard arg:num_sparse_cores arg:offset_in_shard arg:shard_rotation arg:row_indices arguments arg arg arg arg Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "meshgrid",
    "source_code": "@tf_export('meshgrid')\n@dispatch.add_dispatch_support\ndef meshgrid(*args, **kwargs):\n    indexing = kwargs.pop('indexing', 'xy')\n    name = kwargs.pop('name', 'meshgrid')\n    if kwargs:\n        key = list(kwargs.keys())[0]\n        raise TypeError(\"'{}' is an invalid keyword argument for this function\".format(key))\n    if indexing not in ('xy', 'ij'):\n        raise ValueError(f\"Argument `indexing` parameter must be either 'xy' or 'ij', got '{indexing}'\")\n    with ops.name_scope(name, 'meshgrid', args) as name:\n        ndim = len(args)\n        s0 = (1,) * ndim\n        if not ndim:\n            return []\n        output = []\n        for i, x in enumerate(args):\n            output.append(reshape(array_ops_stack.stack(x), s0[:i] + (-1,) + s0[i + 1:]))\n        shapes = [size(x) for x in args]\n        output_dtype = ops.convert_to_tensor(args[0]).dtype.base_dtype\n        if indexing == 'xy' and ndim > 1:\n            output[0] = reshape(output[0], (1, -1) + (1,) * (ndim - 2))\n            output[1] = reshape(output[1], (-1, 1) + (1,) * (ndim - 2))\n            shapes[0], shapes[1] = (shapes[1], shapes[0])\n        mult_fact = ones(shapes, output_dtype)\n        return [x * mult_fact for x in output]",
    "docstring": "Broadcasts parameters for evaluation on an N-D grid. Given N one-dimensional coordinate arrays , returns a list of N-D coordinate arrays for evaluating expressions on an N-D grid. Notes: supports cartesian ('xy') and matrix ('ij') indexing conventions. When the argument is set to 'xy' (the default), the broadcasting instructions for the first two dimensions are swapped. Examples: Calling with the tensors Args: *args: s with rank 1. **kwargs: - indexing: Either 'xy' or 'ij' (optional, default: 'xy'). - name: A name for the operation (optional). Returns: outputs: A list of N s with rank N. Raises: TypeError: When no keyword arguments (kwargs) are passed. ValueError: When indexing keyword argument is not one of or .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:meshgrid arguments arg arg Assign Call Assign Call If Assign Call Call Raise Call Call If Compare Raise Call With Call Assign Call Assign If Return return:no Assign For Call Call Call Call Assign Call Assign Call If BoolOp Compare Compare Assign Call Assign Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "index",
    "source_code": "@cherrypy.expose\ndef index(self):\n    count = cherrypy.session.get('count', 0) + 1\n    cherrypy.session['count'] = count\n    return \"\\n            During your current session, you've viewed this\\n            page %s times! Your life is a patio of fun!\\n        \" % count",
    "docstring": "Produce HTTP response body of hit counter app index URI.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\tutorial\\tut07_sessions.py",
    "ast_data": "FunctionDef name:index arg:self arguments arg Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_make_storage",
    "source_code": "def _make_storage(self, wrapped_list):\n    return wrapped_list",
    "docstring": "Use the user's original list for storage.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\data_structures.py",
    "ast_data": "FunctionDef name:_make_storage arg:self arg:wrapped_list arguments arg arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_antialiased",
    "source_code": "def get_antialiased(self):\n    return self._antialiaseds",
    "docstring": "Get the antialiasing state for rendering. Returns ------- array of bools",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:get_antialiased arg:self arguments arg Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "downgrade_wsgi_ux_to_1x",
    "source_code": "def downgrade_wsgi_ux_to_1x(environ):\n    env1x = {}\n    url_encoding = environ[ntou('wsgi.url_encoding')]\n    for k, v in environ.copy().items():\n        if k in [ntou('PATH_INFO'), ntou('SCRIPT_NAME'), ntou('QUERY_STRING')]:\n            v = v.encode(url_encoding)\n        elif isinstance(v, str):\n            v = v.encode('ISO-8859-1')\n        env1x[k.encode('ISO-8859-1')] = v\n    return env1x",
    "docstring": "Return new environ dict for WSGI 1.x from provided WSGI u.x environ.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\_cpwsgi.py",
    "ast_data": "FunctionDef name:downgrade_wsgi_ux_to_1x arg:environ arguments arg Assign Assign Call For Call Call If Compare Call Call Call Assign Call If Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_profiled_init",
    "source_code": "def _profiled_init(self, target='', graph=None, config=None):\n    self._profiler_init_internal(target, graph, config)",
    "docstring": "Overwrites the session.__init__.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\profile_context.py",
    "ast_data": "FunctionDef name:_profiled_init arg:self arg:target arg:graph arg:config arguments arg arg arg arg Call"
  },
  {
    "library": "cherrypy",
    "name": "www_authenticate",
    "source_code": "def www_authenticate(realm, key, algorithm='MD5', nonce=None, qop=qop_auth, stale=False, accept_charset=DEFAULT_CHARSET[:]):\n    if qop not in valid_qops:\n        raise ValueError(\"Unsupported value for qop: '%s'\" % qop)\n    if algorithm not in valid_algorithms:\n        raise ValueError(\"Unsupported value for algorithm: '%s'\" % algorithm)\n    HEADER_PATTERN = 'Digest realm=\"%s\", nonce=\"%s\", algorithm=\"%s\", qop=\"%s\"%s%s'\n    if nonce is None:\n        nonce = synthesize_nonce(realm, key)\n    stale_param = ', stale=\"true\"' if stale else ''\n    charset_declaration = _get_charset_declaration(accept_charset)\n    return HEADER_PATTERN % (realm, nonce, algorithm, qop, stale_param, charset_declaration)",
    "docstring": "Construct a WWW-Authenticate header for Digest authentication.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\auth_digest.py",
    "ast_data": "FunctionDef name:www_authenticate arg:realm arg:key arg:algorithm arg:nonce arg:qop arg:stale arg:accept_charset arguments arg arg arg arg arg arg arg If Compare Raise Call If Compare Raise Call Assign If Compare Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "copy",
    "source_code": "def copy(self) -> Self:\n    raise AbstractMethodError(self)",
    "docstring": "Return a copy of the array. This method creates a copy of the where modifying the data in the copy will not affect the original array. This is useful when you want to manipulate data without altering the original dataset. Returns ------- ExtensionArray A new object that is a copy of the current instance. See Also -------- DataFrame.copy : Return a copy of the DataFrame. Series.copy : Return a copy of the Series. Examples -------- >>> arr = pd.array([1, 2, 3]) >>> arr2 = arr.copy() >>> arr[0] = 2 >>> arr2 [1, 2, 3] Length: 3, dtype: Int64",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\base.py",
    "ast_data": "FunctionDef name:copy arg:self arguments arg Raise Call"
  },
  {
    "library": "kornia",
    "name": "save",
    "source_code": "def save(self, name: Optional[str]=None, n_row: Optional[int]=None) -> None:\n    if name is None:\n        name = f'Kornia-{datetime.datetime.now(tz=datetime.timezone.utc).strftime('%Y%m%d%H%M%S')!s}.jpg'\n    if len(self._output_image.shape) == 3:\n        out_image = self._output_image\n    if len(self._output_image.shape) == 4:\n        if n_row is None:\n            n_row = math.ceil(self._output_image.shape[0] ** 0.5)\n        out_image = kornia.utils.image.make_grid(self._output_image, n_row, padding=2)\n    kornia.io.write_image(name, out_image.mul(255.0).byte())",
    "docstring": "Save the output image(s) to a directory. Args: name: Directory to save the images. n_row: Number of images displayed in each row of the grid.",
    "type": "method",
    "file_path": "kornia\\kornia\\core\\module.py",
    "ast_data": "FunctionDef name:save arg:self arg:name arg:n_row arguments arg arg arg If Compare Assign Call Call If Compare Call Assign If Compare Call If Compare Assign Call Assign Call Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_unique",
    "source_code": "def _unique(values, *, return_inverse=False, return_counts=False):\n    if values.dtype == object:\n        return _unique_python(values, return_inverse=return_inverse, return_counts=return_counts)\n    return _unique_np(values, return_inverse=return_inverse, return_counts=return_counts)",
    "docstring": "Helper function to find unique values with support for python objects. Uses pure python method for object dtype, and numpy method for all other dtypes. Parameters ---------- values : ndarray Values to check for unknowns. return_inverse : bool, default=False If True, also return the indices of the unique values. return_counts : bool, default=False If True, also return the number of times each unique item appears in values. Returns ------- unique : ndarray The sorted unique values. unique_inverse : ndarray The indices to reconstruct the original array from the unique array. Only provided if is True. unique_counts : ndarray The number of times each of the unique values comes up in the original array. Only provided if is True.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_encode.py",
    "ast_data": "FunctionDef name:_unique arg:values arguments arg arg arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_check_and_log_subprocess",
    "source_code": "def _check_and_log_subprocess(command, logger, **kwargs):\n    logger.debug('%s', _pformat_subprocess(command))\n    proc = subprocess.run(command, capture_output=True, **kwargs)\n    if proc.returncode:\n        stdout = proc.stdout\n        if isinstance(stdout, bytes):\n            stdout = stdout.decode()\n        stderr = proc.stderr\n        if isinstance(stderr, bytes):\n            stderr = stderr.decode()\n        raise RuntimeError(f'The command\\n    {_pformat_subprocess(command)}\\nfailed and generated the following output:\\n{stdout}\\nand the following error:\\n{stderr}')\n    if proc.stdout:\n        logger.debug('stdout:\\n%s', proc.stdout)\n    if proc.stderr:\n        logger.debug('stderr:\\n%s', proc.stderr)\n    return proc.stdout",
    "docstring": "Run *command*, returning its stdout output if it succeeds. If it fails (exits with nonzero return code), raise an exception whose text includes the failed command and captured stdout and stderr output. Regardless of the return code, the command is logged at DEBUG level on *logger*. In case of success, the output is likewise logged.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\cbook.py",
    "ast_data": "FunctionDef name:_check_and_log_subprocess arg:command arg:logger arguments arg arg arg Call Call Assign Call If Assign If Call Assign Call Assign If Call Assign Call Raise Call Call If Call If Call Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "recover_data_from_signature",
    "source_code": "@abc.abstractmethod\ndef recover_data_from_signature(self, signature: bytes, padding: AsymmetricPadding, algorithm: hashes.HashAlgorithm | None) -> bytes:\n    pass",
    "docstring": "Recovers the original data from the signature.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\rsa.py",
    "ast_data": "FunctionDef name:recover_data_from_signature arg:self arg:signature arg:padding arg:algorithm arguments arg arg arg arg"
  },
  {
    "library": "scipy",
    "name": "_process_parameters",
    "source_code": "def _process_parameters(self, **params):\n    return params",
    "docstring": "Process and cache distribution parameters for reuse. This is intended to be overridden by subclasses. It allows distribution authors to pre-process parameters for re-use. For instance, when a user parameterizes a LogUniform distribution with and , it makes sense to calculate and because these values will be used in almost all distribution methods. The dictionary returned by this method is passed to all private methods that calculate functions of the distribution.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distribution_infrastructure.py",
    "ast_data": "FunctionDef name:_process_parameters arg:self arguments arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "is_nccl_available",
    "source_code": "def is_nccl_available() -> bool:\n    return _NCCL_AVAILABLE",
    "docstring": "Check if the NCCL backend is available.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:is_nccl_available arguments Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_run_calibration",
    "source_code": "def _run_calibration(saved_model_path: str, signature_keys: Sequence[str], tags: Collection[str], force_graph_mode_calibration: bool, representative_dataset_file_map: Mapping[str, quantization_options_pb2.RepresentativeDatasetFile]) -> bool:\n    repr_dataset_map = rd.TfRecordRepresentativeDatasetLoader(representative_dataset_file_map).load()\n    _run_graph_for_calibration(saved_model_path, signature_keys, tags, repr_dataset_map, force_graph_mode_calibration)\n    return True",
    "docstring": "Runs calibration and adds calibration statistics to exported model. Args: saved_model_path: Path to the SavedModel to run calibration. signature_keys: List of signature keys corresponding to SignatureDefs to run calibration on. tags: A set of tags that identify the MetaGraphDef. force_graph_mode_calibration: If True, runs the calibration in graph mode. representative_dataset_file_map: Signature key -> mapping for running the calibration step. Each dataset file stores the representative dataset for the function matching the signature key. Returns: upon successfully running calibration.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\py_function_lib.py",
    "ast_data": "FunctionDef name:_run_calibration arg:saved_model_path arg:signature_keys arg:tags arg:force_graph_mode_calibration arg:representative_dataset_file_map arguments arg arg arg arg arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_urls",
    "source_code": "def set_urls(self, urls):\n    self._urls = urls if urls is not None else [None]\n    self.stale = True",
    "docstring": "Parameters ---------- urls : list of str or None Notes ----- URLs are currently only implemented by the SVG backend. They are ignored by all other backends.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:set_urls arg:self arg:urls arguments arg arg Assign Compare Assign"
  },
  {
    "library": "pandas",
    "name": "_validate_can_reindex",
    "source_code": "@final\ndef _validate_can_reindex(self, indexer: np.ndarray) -> None:\n    if not self._index_as_unique and len(indexer):\n        raise ValueError('cannot reindex on an axis with duplicate labels')",
    "docstring": "Check if we are allowing reindexing with this particular indexer. Parameters ---------- indexer : an integer ndarray Raises ------ ValueError if its a duplicate axis",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_validate_can_reindex arg:self arg:indexer arguments arg arg If BoolOp Call Raise Call"
  },
  {
    "library": "django",
    "name": "get",
    "source_code": "def get(self, field):\n    field_name = getattr(field, 'name', field)\n    return self[field_name].value",
    "docstring": "Return the value of the field, instead of an instance of the Field object. May take a string of the field name or a Field object as parameters.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\feature.py",
    "ast_data": "FunctionDef name:get arg:self arg:field arguments arg arg Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_get_estimator",
    "source_code": "@abstractmethod\ndef _get_estimator(self):\n    pass",
    "docstring": "Resolve which estimator to return.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_bagging.py",
    "ast_data": "FunctionDef name:_get_estimator arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "_handle_wrap_partial_func",
    "source_code": "def _handle_wrap_partial_func(obj):\n    modified = True\n    while modified:\n        modified = False\n        while hasattr(obj, '__wrapped__'):\n            obj = obj.__wrapped__\n            modified = True\n        if isinstance(obj, functools.partial) or isinstance(obj, functools.partialmethod):\n            obj = obj.func\n            modified = True\n    return obj",
    "docstring": "Processes wrapped function and partial functions recursively.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\core\\function\\capture\\free_vars_detect.py",
    "ast_data": "FunctionDef name:_handle_wrap_partial_func arg:obj arguments arg Assign While Assign While Call Assign Assign If BoolOp Call Call Assign Assign Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "compact_paragraph",
    "source_code": "class compact_paragraph(nodes.paragraph):\n    pass",
    "docstring": "Node for a compact paragraph (which never makes a node).",
    "type": "class",
    "file_path": "sphinx\\sphinx\\addnodes.py",
    "ast_data": "ClassDef name:compact_paragraph"
  },
  {
    "library": "django",
    "name": "ThreadedWSGIServer",
    "source_code": "class ThreadedWSGIServer(socketserver.ThreadingMixIn, WSGIServer):\n    daemon_threads = True\n\n    def __init__(self, *args, connections_override=None, **kwargs):\n        super().__init__(*args, **kwargs)\n        self.connections_override = connections_override\n\n    def process_request_thread(self, request, client_address):\n        if self.connections_override:\n            for alias, conn in self.connections_override.items():\n                connections[alias] = conn\n        super().process_request_thread(request, client_address)\n\n    def _close_connections(self):\n        connections.close_all()\n\n    def close_request(self, request):\n        self._close_connections()\n        super().close_request(request)",
    "docstring": "A threaded version of the WSGIServer",
    "type": "class",
    "file_path": "django\\django\\core\\servers\\basehttp.py",
    "ast_data": "ClassDef name:ThreadedWSGIServer Assign FunctionDef name:__init__ arg:self arguments arg arg arg arg Call Call Assign FunctionDef name:process_request_thread arg:self arg:request arg:client_address arguments arg arg arg If For Call Assign Call Call FunctionDef name:_close_connections arg:self arguments arg Call FunctionDef name:close_request arg:self arg:request arguments arg arg Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_double_factorial",
    "source_code": "def _double_factorial(n):\n    return np.prod(np.arange(n, 1, -2))",
    "docstring": "The double factorial function for small Python integer .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\special_math.py",
    "ast_data": "FunctionDef name:_double_factorial arg:n arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "impl_save_for_backward",
    "source_code": "def impl_save_for_backward(self, _stacklevel=2):\n\n    def inner(f):\n        self._check_can_register_backward()\n        self._check_doesnt_have_library_autograd_impl()\n        if not self._registered_autograd_kernel_indirection:\n            self._register_autograd_kernel_indirection()\n        self._register_impl('save_for_backward', f, stacklevel=_stacklevel)\n        if self._has_impl('backward'):\n            self._register_autograd_kernel()\n    return inner",
    "docstring": "Register a function that tells us what to save for backward. Please see impl_backward for more details.",
    "type": "method",
    "file_path": "pytorch\\torch\\_custom_op\\impl.py",
    "ast_data": "FunctionDef name:impl_save_for_backward arg:self arg:_stacklevel arguments arg arg FunctionDef name:inner arg:f arguments arg Call Call If Call Call If Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "batch_norm_py",
    "source_code": "def batch_norm_py(tensor, mean, variance, beta, gamma, scale):\n    return nn_impl.batch_normalization(tensor, mean, variance, beta, gamma if scale else None, 0.001)",
    "docstring": "Python implementation of batch normalization.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\batch_norm_benchmark.py",
    "ast_data": "FunctionDef name:batch_norm_py arg:tensor arg:mean arg:variance arg:beta arg:gamma arg:scale arguments arg arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "CircleCollection",
    "source_code": "class CircleCollection(_CollectionWithSizes):\n    _factor = np.pi ** (-1 / 2)\n\n    def __init__(self, sizes, **kwargs):\n        super().__init__(**kwargs)\n        self.set_sizes(sizes)\n        self.set_transform(transforms.IdentityTransform())\n        self._paths = [mpath.Path.unit_circle()]",
    "docstring": "A collection of circles, drawn using splines.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "ClassDef name:CircleCollection Assign FunctionDef name:__init__ arg:self arg:sizes arguments arg arg arg Call Call Call Call Call Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "set_figure",
    "source_code": "def set_figure(self, fig):\n    if self._parent_figure is fig:\n        return\n    if self._parent_figure is not None:\n        raise RuntimeError('Can not put single artist in more than one figure')\n    self._parent_figure = fig\n    if self._parent_figure and self._parent_figure is not self:\n        self.pchanged()\n    self.stale = True",
    "docstring": "Set the or instance the artist belongs to. Parameters ---------- fig : or",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:set_figure arg:self arg:fig arguments arg arg If Compare Return return:no If Compare Raise Call Assign If BoolOp Compare Call Assign"
  },
  {
    "library": "pytorch",
    "name": "_get_buffers_and_dtypes_for_computation",
    "source_code": "@no_type_check\ndef _get_buffers_and_dtypes_for_computation(state: _FSDPState, root_module: nn.Module) -> tuple[list[torch.Tensor], list[Optional[torch.dtype]]]:\n    _p_assert(state._is_root, 'Expects the root to cast buffers')\n    buffers: list[torch.Tensor] = []\n    buffer_dtypes: list[Optional[torch.dtype]] = []\n    visited_buffers: set[torch.Tensor] = set()\n    fsdp_states, fsdp_modules = traversal_utils._get_fsdp_states_with_modules(root_module)\n    for fsdp_state, fsdp_module in zip(reversed(fsdp_states), reversed(fsdp_modules)):\n        for buffer_name, buffer in fsdp_module.named_buffers():\n            if buffer in visited_buffers:\n                continue\n            visited_buffers.add(buffer)\n            if clean_tensor_name(buffer_name) in fsdp_state._ignored_buffer_names:\n                continue\n            buffers.append(buffer)\n            buffer_dtypes.append(fsdp_state.mixed_precision.buffer_dtype)\n    assert len(buffers) == len(buffer_dtypes), f'{len(buffers)} {len(buffer_dtypes)}'\n    return (buffers, buffer_dtypes)",
    "docstring": "Returns all buffers in the module tree rooted at `` if buffer mixed precision is not enabled or the buffer low precision dtype otherwise.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_runtime_utils.py",
    "ast_data": "FunctionDef name:_get_buffers_and_dtypes_for_computation arg:state arg:root_module arguments arg arg Call Call Assign Call For Call Call Call For Call If Compare Call If Compare Call Call Call Compare Call Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "FieldIsAForeignKeyColumnName",
    "source_code": "class FieldIsAForeignKeyColumnName(Exception):\n    pass",
    "docstring": "A field is a foreign key attname, i.e. _id.",
    "type": "class",
    "file_path": "django\\django\\contrib\\admin\\utils.py",
    "ast_data": "ClassDef name:FieldIsAForeignKeyColumnName"
  },
  {
    "library": "pygame",
    "name": "spritecollide",
    "source_code": "def spritecollide(sprite, group, dokill, collided=None):\n    default_sprite_collide_func = sprite.rect.colliderect\n    if dokill:\n        crashed = []\n        append = crashed.append\n        for group_sprite in group.sprites():\n            if collided is not None:\n                if collided(sprite, group_sprite):\n                    group_sprite.kill()\n                    append(group_sprite)\n            elif default_sprite_collide_func(group_sprite.rect):\n                group_sprite.kill()\n                append(group_sprite)\n        return crashed\n    if collided is not None:\n        return [group_sprite for group_sprite in group if collided(sprite, group_sprite)]\n    return [group_sprite for group_sprite in group if default_sprite_collide_func(group_sprite.rect)]",
    "docstring": "find Sprites in a Group that intersect another Sprite pygame.sprite.spritecollide(sprite, group, dokill, collided=None): return Sprite_list Return a list containing all Sprites in a Group that intersect with another Sprite. Intersection is determined by comparing the Sprite.rect attribute of each Sprite. The dokill argument is a bool. If set to True, all Sprites that collide will be removed from the Group. The collided argument is a callback function used to calculate if two sprites are colliding. it should take two sprites as values, and return a bool value indicating if they are colliding. If collided is not passed, all sprites must have a \"rect\" value, which is a rectangle of the sprite area, which will be used to calculate the collision.",
    "type": "function",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:spritecollide arg:sprite arg:group arg:dokill arg:collided arguments arg arg arg arg Assign If Assign Assign For Call If Compare If Call Call Call If Call Call Call Return return:yes If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "download_request",
    "source_code": "def download_request(self, request: Request, spider: Spider) -> Deferred[Response]:\n    factory = self.HTTPClientFactory(request)\n    self._connect(factory)\n    return factory.deferred",
    "docstring": "Return a deferred for the HTTP download",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\downloader\\handlers\\http10.py",
    "ast_data": "FunctionDef name:download_request arg:self arg:request arg:spider arguments arg arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "norm_from_scale",
    "source_code": "def norm_from_scale(scale, norm):\n    if isinstance(norm, mpl.colors.Normalize):\n        return norm\n    if scale is None:\n        return None\n    if norm is None:\n        vmin = vmax = None\n    else:\n        vmin, vmax = norm\n\n    class ScaledNorm(mpl.colors.Normalize):\n\n        def __call__(self, value, clip=None):\n            value, is_scalar = self.process_value(value)\n            self.autoscale_None(value)\n            if self.vmin > self.vmax:\n                raise ValueError('vmin must be less or equal to vmax')\n            if self.vmin == self.vmax:\n                return np.full_like(value, 0)\n            if clip is None:\n                clip = self.clip\n            if clip:\n                value = np.clip(value, self.vmin, self.vmax)\n            t_value = self.transform(value).reshape(np.shape(value))\n            t_vmin, t_vmax = self.transform([self.vmin, self.vmax])\n            if not np.isfinite([t_vmin, t_vmax]).all():\n                raise ValueError('Invalid vmin or vmax')\n            t_value -= t_vmin\n            t_value /= t_vmax - t_vmin\n            t_value = np.ma.masked_invalid(t_value, copy=False)\n            return t_value[0] if is_scalar else t_value\n    new_norm = ScaledNorm(vmin, vmax)\n    new_norm.transform = scale.get_transform().transform\n    return new_norm",
    "docstring": "Produce a Normalize object given a Scale and min/max domain limits.",
    "type": "function",
    "file_path": "seaborn\\seaborn\\_compat.py",
    "ast_data": "FunctionDef name:norm_from_scale arg:scale arg:norm arguments arg arg If Call Return return:yes If Compare Return return:no If Compare Assign Assign ClassDef name:ScaledNorm FunctionDef name:__call__ arg:self arg:value arg:clip arguments arg arg arg Assign Call Call If Compare Raise Call If Compare Return return:yes Call If Compare Assign If Assign Call Assign Call Call Call Assign Call If Call Call Raise Call Assign Call Return return:yes Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_infer_steps",
    "source_code": "def _infer_steps(self, steps, dataset):\n    if steps == -1:\n        self._log_indefinite_training_warning()\n        return None\n    if steps is not None:\n        return steps\n    adapter_steps = self._adapter.get_size()\n    if adapter_steps is not None:\n        return adapter_steps\n    size = cardinality.cardinality(dataset)\n    if size == cardinality.INFINITE and steps is None:\n        raise ValueError('When passing an infinitely repeating dataset, please specify a `steps_per_epoch` value so that epoch level callbacks continue to work. The value can be arbitrary, or a number that you think correctly defines the size of an epoch. Epoch-level callbacks will then be called at this interval.')\n    if size >= 0:\n        return size.numpy().item()\n    return None",
    "docstring": "Infers steps_per_epoch needed to loop through a dataset.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\data_adapter.py",
    "ast_data": "FunctionDef name:_infer_steps arg:self arg:steps arg:dataset arguments arg arg arg If Compare Call Return return:no If Compare Return return:yes Assign Call If Compare Return return:yes Assign Call If BoolOp Compare Compare Raise Call If Compare Return return:yes Call Call Return return:no"
  },
  {
    "library": "pytorch",
    "name": "sigmoid",
    "source_code": "def sigmoid(input):\n    return input.sigmoid()",
    "docstring": "sigmoid(input) -> Tensor Applies the element-wise function :math: See :class: for more details.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:sigmoid arg:input arguments arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_unpack_tuple",
    "source_code": "def _unpack_tuple(x):\n    if len(x) == 1:\n        return x[0]\n    else:\n        return x",
    "docstring": "Unpacks one-element tuples for use as return values",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_arraysetops_impl.py",
    "ast_data": "FunctionDef name:_unpack_tuple arg:x arguments arg If Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "checkpoint_key",
    "source_code": "def checkpoint_key(object_path, local_name):\n    key_suffix = escape_local_name(local_name)\n    if local_name == SERIALIZE_TO_TENSORS_NAME:\n        key_suffix = ''\n    return f'{object_path}/{OBJECT_ATTRIBUTES_NAME}/{key_suffix}'",
    "docstring": "Returns the checkpoint key for a local attribute of an object.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\trackable_utils.py",
    "ast_data": "FunctionDef name:checkpoint_key arg:object_path arg:local_name arguments arg arg Assign Call If Compare Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "vgg11_bn",
    "source_code": "def vgg11_bn(*, weights: Optional[Any]=None, **kwargs: Any) -> VGG:\n    return _vgg('A', True, weights, **kwargs)",
    "docstring": "VGG-11-BN from __. Args: weights (:class:, optional): The pretrained weights to use. See :class: below for more details, and possible values. By default, no pre-trained weights are used. progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True. **kwargs: parameters passed to the `source code `_ for more details about this class. .. autoclass:: torchvision.models.VGG11_BN_Weights :members:",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\dedode\\vgg.py",
    "ast_data": "FunctionDef name:vgg11_bn arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "run_metadata",
    "source_code": "def run_metadata(name, data, step=None):\n    summary_metadata = summary_pb2.SummaryMetadata()\n    summary_metadata.plugin_data.plugin_name = 'graph_run_metadata'\n    summary_metadata.plugin_data.content = b'1'\n    with summary_scope(name, 'graph_run_metadata_summary', [data, step]) as (tag, _):\n        with ops.device('cpu:0'):\n            tensor = constant_op.constant(data.SerializeToString(), dtype=dtypes.string)\n        return write(tag=tag, tensor=tensor, step=step, metadata=summary_metadata)",
    "docstring": "Writes entire RunMetadata summary. A RunMetadata can contain DeviceStats, partition graphs, and function graphs. Please refer to the proto for definition of each field. Args: name: A name for this summary. The summary tag used for TensorBoard will be this name prefixed by any active name scopes. data: A RunMetadata proto to write. step: Explicit -castable monotonic step value for this summary. If omitted, this defaults to , which must not be None. Returns: True on success, or false if no summary was written because no default summary writer was available. Raises: ValueError: if a default writer exists, but no step was provided and is None.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "FunctionDef name:run_metadata arg:name arg:data arg:step arguments arg arg arg Assign Call Assign Assign With Call With Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "__instancecheck__",
    "source_code": "def __instancecheck__(cls, instance) -> bool:\n    if type(instance) is variables.LazyVariableTracker and cls not in (VariableTracker, variables.LazyVariableTracker):\n        instance = instance.realize()\n    return type.__instancecheck__(cls, instance)",
    "docstring": "Make isinstance work with LazyVariableTracker",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\base.py",
    "ast_data": "FunctionDef name:__instancecheck__ arg:cls arg:instance arguments arg arg If BoolOp Compare Call Compare Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_connection",
    "source_code": "def get_connection(backend=None, fail_silently=False, **kwds):\n    klass = import_string(backend or settings.EMAIL_BACKEND)\n    return klass(fail_silently=fail_silently, **kwds)",
    "docstring": "Load an email backend and return an instance of it. If backend is None (default), use settings.EMAIL_BACKEND. Both fail_silently and other keyword arguments are used in the constructor of the backend.",
    "type": "function",
    "file_path": "django\\django\\core\\mail\\__init__.py",
    "ast_data": "FunctionDef name:get_connection arg:backend arg:fail_silently arguments arg arg arg Assign Call BoolOp Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "pool",
    "source_code": "def pool(self):\n    return super().pool()",
    "docstring": "Return an opaque token representing the id of this graph's memory pool. This id can optionally be passed to another graph's ``, which hints the other graph may share the same memory pool.",
    "type": "method",
    "file_path": "pytorch\\torch\\cuda\\graphs.py",
    "ast_data": "FunctionDef name:pool arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "treat_as_nested",
    "source_code": "def treat_as_nested(data) -> bool:\n    return len(data) > 0 and is_list_like(data[0]) and (getattr(data[0], 'ndim', 1) == 1) and (not (isinstance(data, ExtensionArray) and data.ndim == 2))",
    "docstring": "Check if we should use nested_data_to_arrays.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\internals\\construction.py",
    "ast_data": "FunctionDef name:treat_as_nested arg:data arguments arg Return return:yes BoolOp Compare Call Call Compare Call BoolOp Call Compare"
  },
  {
    "library": "django",
    "name": "GEOSFuncFactory",
    "source_code": "class GEOSFuncFactory:\n    argtypes = None\n    restype = None\n    errcheck = None\n\n    def __init__(self, func_name, *, restype=None, errcheck=None, argtypes=None):\n        self.func_name = func_name\n        if restype is not None:\n            self.restype = restype\n        if errcheck is not None:\n            self.errcheck = errcheck\n        if argtypes is not None:\n            self.argtypes = argtypes\n\n    def __call__(self, *args):\n        return self.func(*args)\n\n    @cached_property\n    def func(self):\n        from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc\n        func = GEOSFunc(self.func_name)\n        func.argtypes = self.argtypes or []\n        func.restype = self.restype\n        if self.errcheck:\n            func.errcheck = self.errcheck\n        return func",
    "docstring": "Lazy loading of GEOS functions.",
    "type": "class",
    "file_path": "django\\django\\contrib\\gis\\geos\\libgeos.py",
    "ast_data": "ClassDef name:GEOSFuncFactory Assign Assign Assign FunctionDef name:__init__ arg:self arg:func_name arguments arg arg arg arg arg Assign If Compare Assign If Compare Assign If Compare Assign FunctionDef name:__call__ arg:self arguments arg arg Return return:yes Call FunctionDef name:func arg:self arguments arg Assign Call Assign BoolOp Assign If Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "copy",
    "source_code": "def copy(self):\n    copied = self._data.copy().view(type(self))\n    copied._mask = self._mask.copy()\n    return copied",
    "docstring": "Returns a copy of the masked record.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\mrecords.py",
    "ast_data": "FunctionDef name:copy arg:self arguments arg Assign Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_ensure_unique_tensor_objects",
    "source_code": "def _ensure_unique_tensor_objects(parameter_positions, args):\n    s = set()\n    for i, t in enumerate(args):\n        if i in parameter_positions:\n            tid = ops.tensor_id(t)\n            if tid in s:\n                args[i] = gen_array_ops.identity(args[i])\n            else:\n                s.add(tid)\n    return args",
    "docstring": "Make each of the parameter_positions in args a unique tensor_lib.Tensor object. Ensure that each parameter is treated independently. For example: def f(x, y): return x * y g = gradients_function(f) one = tf.constant(1.) g(one, one) should return [1., 1.] (even though the two arguments are the same Tensor object). Args: parameter_positions: List of indices into args defining the arguments to differentiate against. args: A list of arguments to the function to be differentiated. Returns: args, possibly edited in-place.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\backprop.py",
    "ast_data": "FunctionDef name:_ensure_unique_tensor_objects arg:parameter_positions arg:args arguments arg arg Assign Call For Call If Compare Assign Call If Compare Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "unravel_index",
    "source_code": "def unravel_index(self):\n    idx_ranges = [range(self.dim_size(dim_name)) for dim_name in self.dim_names]\n    mesh_pos = itertools.product(*idx_ranges)\n    mapping = {}\n    for device_id, device_pos in enumerate(mesh_pos):\n        device_loc = {}\n        for dim_name, dim_index in zip(self.dim_names, device_pos):\n            device_loc[dim_name] = dim_index\n        mapping[device_id] = device_loc\n    return mapping",
    "docstring": "Returns a dictionary from device ID to {dim_name: dim_index}. For example, for a 3x2 mesh, return this:",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\layout.py",
    "ast_data": "FunctionDef name:unravel_index arg:self arguments arg Assign Call Call Assign Call Assign For Call Assign For Call Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "create_python_bindings_sharded",
    "source_code": "def create_python_bindings_sharded(fm: FileManager, pairs: Sequence[PythonSignatureNativeFunctionPair], pred: Callable[[NativeFunction], bool], module: str | None, filename: str, *, method: bool, num_shards: int, symint: bool=True) -> None:\n    grouped = group_filter_overloads(pairs, pred)\n\n    def key_func(kv: tuple[BaseOperatorName, list[PythonSignatureNativeFunctionPair]]) -> str:\n        return kv[0].base\n\n    def env_func(kv: tuple[BaseOperatorName, list[PythonSignatureNativeFunctionPair]]) -> dict[str, list[str]]:\n        name, fn_pairs = kv\n        return {'ops_headers': [f'#include <ATen/ops/{name.base}.h>'], 'py_forwards': list(forward_decls(name, fn_pairs, method=method)), 'py_methods': [method_impl(name, module, fn_pairs, method=method, symint=symint)], 'py_method_defs': [method_def(name, module, fn_pairs, method=method)]}\n    fm.write_sharded(filename, grouped.items(), base_env={'generated_comment': '@' + f'generated from {fm.template_dir_for_comments()}/{filename}'}, key_fn=key_func, env_callable=env_func, num_shards=num_shards, sharded_keys={'ops_headers', 'py_forwards', 'py_methods', 'py_method_defs'})",
    "docstring": "Generates Python bindings to ATen functions",
    "type": "function",
    "file_path": "pytorch\\tools\\autograd\\gen_python_functions.py",
    "ast_data": "FunctionDef name:create_python_bindings_sharded arg:fm arg:pairs arg:pred arg:module arg:filename arguments arg arg arg arg arg arg arg arg Assign Call FunctionDef name:key_func arg:kv arguments arg Return return:yes FunctionDef name:env_func arg:kv arguments arg Assign Return return:yes Call Call Call Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_get_tool_pos",
    "source_code": "def _get_tool_pos(self, tool):\n    pos, = (pos for pos in range(self.ToolsCount) if self.GetToolByPos(pos) == tool)\n    return pos",
    "docstring": "Find the position (index) of a wx.ToolBarToolBase in a ToolBar. `` is not useful because wx assigns the same Id to all Separators and StretchableSpaces.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_wx.py",
    "ast_data": "FunctionDef name:_get_tool_pos arg:self arg:tool arguments arg arg Assign Call Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "enable_numpy_behavior",
    "source_code": "@tf_export.tf_export('experimental.numpy.experimental_enable_numpy_behavior', v1=[])\ndef enable_numpy_behavior(prefer_float32=False, dtype_conversion_mode='legacy'):\n    if dtype_conversion_mode == 'safe' or dtype_conversion_mode == 'all':\n        tf_logging.warning('UserWarning: enabling the new type promotion must happen at the beginning of the program. Please ensure no TF APIs have been used yet.')\n    ops.set_dtype_conversion_mode(dtype_conversion_mode)\n    ops.enable_numpy_style_slicing()\n    np_math_ops.enable_numpy_methods_on_tensor()\n    np_dtypes.set_prefer_float32(prefer_float32)",
    "docstring": "Enable NumPy behavior on Tensors. Enabling NumPy behavior has three effects: * It adds to some common NumPy methods such as , and . * It changes dtype promotion in operators to be compatible with NumPy. For example, used to throw a \"dtype incompatible\" error, but after this it will return a float64 tensor (obeying NumPy's promotion rules). * It enhances 's indexing capability to be on par with [NumPy's]( Args: prefer_float32: Controls whether dtype inference will use float32 for Python floats, or float64 (the default and the NumPy-compatible behavior). dtype_conversion_mode: a string that specifies promotion mode. This string corresponds to a PromoMode Enum and can be 'off', 'legacy', 'safe', or 'all'. 'safe' or 'all' mode enables the auto dtype conversion semantics.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_config.py",
    "ast_data": "FunctionDef name:enable_numpy_behavior arg:prefer_float32 arg:dtype_conversion_mode arguments arg arg If BoolOp Compare Compare Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "experimental_assign_to_logical_device",
    "source_code": "def experimental_assign_to_logical_device(self, tensor, logical_device_id):\n    if self.extended._use_spmd_for_xla_partitioning:\n        raise ValueError('Cannot assign a tensor to a logical device in SPMD mode. To disable SPMD, Please construct the TPUStrategy with `experimental_spmd_xla_partitioning=False`')\n    num_logical_devices_per_replica = self.extended._tpu_devices.shape[1]\n    if logical_device_id < 0 or logical_device_id >= num_logical_devices_per_replica:\n        raise ValueError('`logical_core_id` to assign must be lower then total number of logical devices per replica. Received logical device id {} but there are only total of {} logical devices in replica.'.format(logical_device_id, num_logical_devices_per_replica))\n    return xla_sharding.assign_device(tensor, logical_device_id, use_sharding_op=True)",
    "docstring": "Adds annotation that will be assigned to a logical device. This adds an annotation to specifying that operations on will be invoked on logical core device id . When model parallelism is used, the default behavior is that all ops are placed on zero-th logical device. Args: tensor: Input tensor to annotate. logical_device_id: Id of the logical core to which the tensor will be assigned. Raises: ValueError: The logical device id presented is not consistent with total number of partitions specified by the device assignment or the TPUStrategy is constructed with . Returns: Annotated tensor with identical value as .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_strategy.py",
    "ast_data": "FunctionDef name:experimental_assign_to_logical_device arg:self arg:tensor arg:logical_device_id arguments arg arg arg If Raise Call Assign If BoolOp Compare Compare Raise Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "watched_variables",
    "source_code": "def watched_variables(self):\n    return pywrap_tfe.TFE_Py_VariableWatcherWatchedVariables(self._variable_watcher)",
    "docstring": "Returns a tuple of variables accessed under this scope.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\record.py",
    "ast_data": "FunctionDef name:watched_variables arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "validate_at_hash",
    "source_code": "def validate_at_hash(self):\n    access_token = self.params.get('access_token')\n    if access_token and 'at_hash' not in self:\n        raise MissingClaimError('at_hash')\n    super().validate_at_hash()",
    "docstring": "If the ID Token is issued from the Authorization Endpoint with an access_token value, which is the case for the response_type value id_token token, this is REQUIRED; it MAY NOT be used when no Access Token is issued, which is the case for the response_type value id_token.",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\core\\claims.py",
    "ast_data": "FunctionDef name:validate_at_hash arg:self arguments arg Assign Call If BoolOp Compare Raise Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_record_count",
    "source_code": "def _record_count(self) -> int:\n    self.filepath_or_buffer.seek(0, 2)\n    total_records_length = self.filepath_or_buffer.tell() - self.record_start\n    if total_records_length % 80 != 0:\n        warnings.warn('xport file may be corrupted.', stacklevel=find_stack_level())\n    if self.record_length > 80:\n        self.filepath_or_buffer.seek(self.record_start)\n        return total_records_length // self.record_length\n    self.filepath_or_buffer.seek(-80, 2)\n    last_card_bytes = self.filepath_or_buffer.read(80)\n    last_card = np.frombuffer(last_card_bytes, dtype=np.uint64)\n    ix = np.flatnonzero(last_card == 2314885530818453536)\n    if len(ix) == 0:\n        tail_pad = 0\n    else:\n        tail_pad = 8 * len(ix)\n    self.filepath_or_buffer.seek(self.record_start)\n    return (total_records_length - tail_pad) // self.record_length",
    "docstring": "Get number of records in file. This is maybe suboptimal because we have to seek to the end of the file. Side effect: returns file position to record_start.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\sas\\sas_xport.py",
    "ast_data": "FunctionDef name:_record_count arg:self arguments arg Call Assign Call If Compare Call Call If Compare Call Return return:yes Call Assign Call Assign Call Assign Call Compare If Compare Call Assign Assign Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "KRt_from_projection",
    "source_code": "def KRt_from_projection(P: Tensor, eps: float=1e-06) -> Tuple[Tensor, Tensor, Tensor]:\n    KORNIA_CHECK_SHAPE(P, ['*', '3', '4'])\n    submat_3x3 = P[:, 0:3, 0:3]\n    last_column = P[:, 0:3, 3].unsqueeze(-1)\n    reverse = torch.tensor([[0, 0, 1], [0, 1, 0], [1, 0, 0]], device=P.device, dtype=P.dtype).unsqueeze(0)\n    submat_3x3 = torch.matmul(reverse, submat_3x3).permute(0, 2, 1)\n    ortho_mat, upper_mat = linalg_qr(submat_3x3)\n    ortho_mat = torch.matmul(reverse, ortho_mat.permute(0, 2, 1))\n    upper_mat = torch.matmul(reverse, torch.matmul(upper_mat.permute(0, 2, 1), reverse))\n    diagonals = torch.diagonal(upper_mat, dim1=-2, dim2=-1) + eps\n    signs = torch.sign(diagonals)\n    signs_mat = torch.diag_embed(signs)\n    K = torch.matmul(upper_mat, signs_mat)\n    R = torch.matmul(signs_mat, ortho_mat)\n    t = torch.matmul(torch.inverse(K), last_column)\n    return (K, R, t)",
    "docstring": "Decompose the Projection matrix into Camera-Matrix, Rotation Matrix and Translation vector. Args: P: the projection matrix with shape :math:. eps: epsilon for numerical stability. Returns: - The Camera matrix with shape :math:. - The Rotation matrix with shape :math:. - The Translation vector with shape :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\epipolar\\projection.py",
    "ast_data": "FunctionDef name:KRt_from_projection arg:P arg:eps arguments arg arg Call Assign Assign Call Assign Call Call Assign Call Call Assign Call Assign Call Call Assign Call Call Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "reset_state",
    "source_code": "@contextlib.contextmanager\ndef reset_state():\n    with set_state(get_state()):\n        yield",
    "docstring": "Returns a context manager that resets all state once exited. See Also -------- set_state Context manager that sets the backend state. get_state Gets a state to be set by this context manager.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_uarray\\_backend.py",
    "ast_data": "FunctionDef name:reset_state arguments With Call Call"
  },
  {
    "library": "kornia",
    "name": "bgr_to_grayscale",
    "source_code": "def bgr_to_grayscale(image: Tensor) -> Tensor:\n    KORNIA_CHECK_IS_TENSOR(image)\n    if len(image.shape) < 3 or image.shape[-3] != 3:\n        raise ValueError(f'Input size must have a shape of (*, 3, H, W). Got {image.shape}')\n    image_rgb: Tensor = bgr_to_rgb(image)\n    return rgb_to_grayscale(image_rgb)",
    "docstring": "Convert a BGR image to grayscale. The image data is assumed to be in the range of (0, 1). First flips to RGB, then converts. Args: image: BGR image to be converted to grayscale with shape :math:. Returns: grayscale version of the image with shape :math:. Example: >>> input = torch.rand(2, 3, 4, 5) >>> gray = bgr_to_grayscale(input) # 2x1x4x5",
    "type": "function",
    "file_path": "kornia\\kornia\\color\\gray.py",
    "ast_data": "FunctionDef name:bgr_to_grayscale arg:image arguments arg Call If BoolOp Compare Call Compare Raise Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "__init__",
    "source_code": "def __init__(self, **kwargs):\n    for key, value in kwargs.items():\n        setattr(self, key, value)",
    "docstring": "Constructor. Called in the URLconf; can contain helpful extra keyword arguments, and other things.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\base.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg arg For Call Call"
  },
  {
    "library": "pandas",
    "name": "from_fields",
    "source_code": "@classmethod\ndef from_fields(cls, *, year=None, quarter=None, month=None, day=None, hour=None, minute=None, second=None, freq=None) -> Self:\n    fields = {'year': year, 'quarter': quarter, 'month': month, 'day': day, 'hour': hour, 'minute': minute, 'second': second}\n    fields = {key: value for key, value in fields.items() if value is not None}\n    arr = PeriodArray._from_fields(fields=fields, freq=freq)\n    return cls._simple_new(arr)",
    "docstring": "Construct a PeriodIndex from fields (year, month, day, etc.). Parameters ---------- year : int, array, or Series, default None Year for the PeriodIndex. quarter : int, array, or Series, default None Quarter for the PeriodIndex. month : int, array, or Series, default None Month for the PeriodIndex. day : int, array, or Series, default None Day for the PeriodIndex. hour : int, array, or Series, default None Hour for the PeriodIndex. minute : int, array, or Series, default None Minute for the PeriodIndex. second : int, array, or Series, default None Second for the PeriodIndex. freq : str or period object, optional One of pandas period strings or corresponding objects. Returns ------- PeriodIndex See Also -------- PeriodIndex.from_ordinals : Construct a PeriodIndex from ordinals. PeriodIndex.to_timestamp : Cast to DatetimeArray/Index. Examples -------- >>> idx = pd.PeriodIndex.from_fields(year=[2000, 2002], quarter=[1, 3]) >>> idx PeriodIndex(['2000Q1', '2002Q3'], dtype='period[Q-DEC]')",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\period.py",
    "ast_data": "FunctionDef name:from_fields arg:cls arguments arg arg arg arg arg arg arg arg arg Assign Assign Call Compare Assign Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "vgg11",
    "source_code": "def vgg11(*, weights: Optional[Any]=None, **kwargs: Any) -> VGG:\n    return _vgg('A', False, weights, **kwargs)",
    "docstring": "VGG-11 from __. Args: weights (:class:, optional): The pretrained weights to use. See :class: below for more details, and possible values. By default, no pre-trained weights are used. progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True. **kwargs: parameters passed to the `source code `_ for more details about this class. .. autoclass:: torchvision.models.VGG11_Weights :members:",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\dedode\\vgg.py",
    "ast_data": "FunctionDef name:vgg11 arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "concat",
    "source_code": "@dispatch.dispatch_for_api(array_ops.concat)\ndef concat(values: typing.List[ragged_tensor.RaggedOrDense], axis, name=None):\n    if not isinstance(values, (list, tuple)):\n        values = [values]\n    with ops.name_scope(name, 'RaggedConcat', values):\n        return _ragged_stack_concat_helper(values, axis, stack_values=False)",
    "docstring": "Concatenates potentially ragged tensors along one dimension. Given a list of tensors with the same rank (), returns a rank- such that is the concatenation of . Args: values: A list of potentially ragged tensors. May not be empty. All must have the same rank and the same dtype; but unlike , they can have arbitrary shapes. axis: A python integer, indicating the dimension along which to concatenate. (Note: Unlike , the parameter must be statically known.) Negative values are supported only if the rank of at least one value is statically known. name: A name prefix for the returned tensor (optional). Returns: A with rank . . Raises: ValueError: If is empty, if is out of bounds or if the input tensors have different ranks. #### Example: >>> t1 = tf.ragged.constant([[1, 2], [3, 4, 5]]) >>> t2 = tf.ragged.constant([[6], [7, 8, 9]]) >>> tf.concat([t1, t2], axis=0) >>> tf.concat([t1, t2], axis=1)",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_concat_ops.py",
    "ast_data": "FunctionDef name:concat arg:values arg:axis arg:name arguments arg arg arg If Call Assign With Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "apply",
    "source_code": "def apply(self, model_args: Sequence[Any], model_kwargs: Mapping[str, Any], model: torch.nn.Module | Callable | torch_export.ExportedProgram | None=None) -> tuple[Sequence[Any], Mapping[str, Any]]:\n    return ((*model_args, *self.inputs), model_kwargs)",
    "docstring": "Append model's parameters and buffers into its input. Args: model_args: The model args. model_kwargs: The model kwargs. model: The PyTorch model. Returns: A tuple of the model args + appended inputs and kwargs.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\io_adapter.py",
    "ast_data": "FunctionDef name:apply arg:self arg:model_args arg:model_kwargs arg:model arguments arg arg arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "GetContainingXLAContext",
    "source_code": "def GetContainingXLAContext(ctxt):\n    while ctxt:\n        if ctxt.IsXLAContext():\n            return ctxt\n        ctxt = ctxt.outer_context\n    return None",
    "docstring": "Returns the first ancestor XLAContext of . Returns if is a XLAContext, or None if is not in a while loop. Args: ctxt: ControlFlowContext Returns: if is a XLAContext, the most nested XLAContext containing , or None if is not in a while loop.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_util.py",
    "ast_data": "FunctionDef name:GetContainingXLAContext arg:ctxt arguments arg While If Call Return return:yes Assign Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "latest_checkpoint",
    "source_code": "@property\ndef latest_checkpoint(self):\n    return self._latest_checkpoint",
    "docstring": "The prefix of the most recent checkpoint in . Equivalent to where is the constructor argument to . Suitable for passing to to resume training. Returns: The checkpoint prefix. If there are no checkpoints, returns .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint_management.py",
    "ast_data": "FunctionDef name:latest_checkpoint arg:self arguments arg Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "get_config",
    "source_code": "def get_config(use_closest: bool=True) -> ConfigParser:\n    sources = get_sources(use_closest)\n    cfg = ConfigParser()\n    cfg.read(sources)\n    return cfg",
    "docstring": "Get Scrapy config file as a ConfigParser",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\conf.py",
    "ast_data": "FunctionDef name:get_config arg:use_closest arguments arg Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "queryables",
    "source_code": "def queryables(self) -> dict[str, Any]:\n    axis_names = {0: 'index', 1: 'columns'}\n    d1 = [(a.cname, a) for a in self.index_axes]\n    d2 = [(axis_names[axis], None) for axis, values in self.non_index_axes]\n    d3 = [(v.cname, v) for v in self.values_axes if v.name in set(self.data_columns)]\n    return dict(d1 + d2 + d3)",
    "docstring": "return a dict of the kinds allowable columns for this object",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:queryables arg:self arguments arg Assign Assign Assign Assign Compare Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "collections",
    "source_code": "@property\ndef collections(self) -> list[str]:\n    return list(self._collections)",
    "docstring": "Returns the names of the collections known to this graph.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:collections arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "jobs",
    "source_code": "@tf_export('experimental.dtensor.jobs', v1=[])\ndef jobs() -> List[str]:\n    d_jobs = os.environ.get(_DT_JOBS)\n    if d_jobs is None:\n        return []\n    d_jobs_list = d_jobs.split(',')\n    if any([name.startswith('/bns/') for name in d_jobs_list]):\n        if d_jobs_list != sorted(d_jobs_list, key=_bns_task_id):\n            raise ValueError(f'Unexpected DTENSOR_JOBS content {d_jobs}. Sort entries in DTENSOR_JOBS because cluster construction relies on the order.')\n    return d_jobs_list",
    "docstring": "Returns a list of job names of all clients in this DTensor cluster.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\config.py",
    "ast_data": "FunctionDef name:jobs arguments Assign Call If Compare Return return:no Assign Call If Call Call If Compare Call Raise Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "reversed",
    "source_code": "def reversed(self, axis_0=True, axis_1=True):\n    r_0 = -1 if axis_0 else 1\n    r_1 = -1 if axis_1 else 1\n    return self.resampled((r_0, r_1))",
    "docstring": "Reverses both or one of the axis.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:reversed arg:self arg:axis_0 arg:axis_1 arguments arg arg arg Assign Assign Return return:yes Call"
  },
  {
    "library": "pygame",
    "name": "draw_lines",
    "source_code": "def draw_lines(surf, color, closed, points, width=1):\n    return _multi_lines(surf, color, closed, points, width, aaline=False)",
    "docstring": "draw several lines connected through the points.",
    "type": "function",
    "file_path": "pygame\\src_py\\draw_py.py",
    "ast_data": "FunctionDef name:draw_lines arg:surf arg:color arg:closed arg:points arg:width arguments arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "register_keras_tensor_specialization",
    "source_code": "def register_keras_tensor_specialization(cls, keras_tensor_subclass):\n    keras_tensor_classes.insert(-1, (cls, keras_tensor_subclass))",
    "docstring": "Register a specialized KerasTensor subclass for a Tensor type.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\keras_tensor.py",
    "ast_data": "FunctionDef name:register_keras_tensor_specialization arg:cls arg:keras_tensor_subclass arguments arg arg Call"
  },
  {
    "library": "cherrypy",
    "name": "__get__",
    "source_code": "def __get__(self, obj, objclass=None):\n    if obj is None:\n        return self\n    else:\n        return obj._body",
    "docstring": "Return a response body through the descriptor protocol.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cprequest.py",
    "ast_data": "FunctionDef name:__get__ arg:self arg:obj arg:objclass arguments arg arg arg If Compare Return return:yes Return return:yes"
  },
  {
    "library": "pandas",
    "name": "unbox_series",
    "source_code": "@unbox(SeriesType)\ndef unbox_series(typ, obj, c):\n    index_obj = c.pyapi.object_getattr_string(obj, 'index')\n    values_obj = c.pyapi.object_getattr_string(obj, 'values')\n    name_obj = c.pyapi.object_getattr_string(obj, 'name')\n    series = cgutils.create_struct_proxy(typ)(c.context, c.builder)\n    series.index = c.unbox(typ.index, index_obj).value\n    series.values = c.unbox(typ.values, values_obj).value\n    series.name = c.unbox(typ.namety, name_obj).value\n    c.pyapi.decref(index_obj)\n    c.pyapi.decref(values_obj)\n    c.pyapi.decref(name_obj)\n    return NativeValue(series._getvalue())",
    "docstring": "Convert a Series object to a native structure.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\_numba\\extensions.py",
    "ast_data": "FunctionDef name:unbox_series arg:typ arg:obj arg:c arguments arg arg arg Assign Call Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Call Call Call Return return:yes Call Call Call"
  },
  {
    "library": "pandas",
    "name": "data_label",
    "source_code": "@property\ndef data_label(self) -> str:\n    self._ensure_open()\n    return self._data_label",
    "docstring": "Return data label of Stata file. The data label is a descriptive string associated with the dataset stored in the Stata file. This property provides access to that label, if one is present. See Also -------- io.stata.StataReader.variable_labels : Return a dict associating each variable name with corresponding label. DataFrame.to_stata : Export DataFrame object to Stata dta format. Examples -------- >>> df = pd.DataFrame([(1,)], columns=[\"variable\"]) >>> time_stamp = pd.Timestamp(2000, 2, 29, 14, 21) >>> data_label = \"This is a data file.\" >>> path = \"/My_path/filename.dta\" >>> df.to_stata( ... path, ... time_stamp=time_stamp, # doctest: +SKIP ... data_label=data_label, # doctest: +SKIP ... version=None, ... ) # doctest: +SKIP >>> with pd.io.stata.StataReader(path) as reader: # doctest: +SKIP ... print(reader.data_label) # doctest: +SKIP This is a data file.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\stata.py",
    "ast_data": "FunctionDef name:data_label arg:self arguments arg Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_AxisArtistHelperBase",
    "source_code": "class _AxisArtistHelperBase:\n\n    def __init__(self, nth_coord):\n        self.nth_coord = nth_coord\n\n    def update_lim(self, axes):\n        pass\n\n    def get_nth_coord(self):\n        return self.nth_coord\n\n    def _to_xy(self, values, const):\n        if self.nth_coord == 0:\n            return np.stack(np.broadcast_arrays(values, const), axis=-1)\n        elif self.nth_coord == 1:\n            return np.stack(np.broadcast_arrays(const, values), axis=-1)\n        else:\n            raise ValueError('Unexpected nth_coord')",
    "docstring": "Base class for axis helper. Subclasses should define the methods listed below. The *axes* argument will be the `` attribute of the caller artist. :: # Construct the spine. def get_line_transform(self, axes): return transform def get_line(self, axes): return path # Construct the label. def get_axislabel_transform(self, axes): return transform def get_axislabel_pos_angle(self, axes): return (x, y), angle # Construct the ticks. def get_tick_transform(self, axes): return transform def get_tick_iterators(self, axes): # A pair of iterables (one for major ticks, one for minor ticks) # that yield (tick_position, tick_angle, tick_label). return iter_major, iter_minor",
    "type": "class",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axislines.py",
    "ast_data": "ClassDef name:_AxisArtistHelperBase FunctionDef name:__init__ arg:self arg:nth_coord arguments arg arg Assign FunctionDef name:update_lim arg:self arg:axes arguments arg arg FunctionDef name:get_nth_coord arg:self arguments arg Return return:yes FunctionDef name:_to_xy arg:self arg:values arg:const arguments arg arg arg If Compare Return return:yes Call Call If Compare Return return:yes Call Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_apply_norm",
    "source_code": "def _apply_norm(x: TensorLikeType, norm: NormType, signal_numel: int, forward: bool) -> TensorLikeType:\n    torch._check(norm in _NORM_VALUES, lambda: f'Invalid normalization mode: {norm}')\n    if norm == 'ortho':\n        return x * (1 / math.sqrt(signal_numel))\n    normalize = not forward and (norm is None or norm == 'backward') or (forward and norm == 'forward')\n    return x * (1 / signal_numel) if normalize else x",
    "docstring": "Apply normalization to the un-normalized FFT result",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\fft.py",
    "ast_data": "FunctionDef name:_apply_norm arg:x arg:norm arg:signal_numel arg:forward arguments arg arg arg arg Call Compare arguments If Compare Return return:yes Call Assign BoolOp BoolOp BoolOp Compare Compare BoolOp Compare Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, numsides, *, rotation=0, sizes=(1,), **kwargs):\n    super().__init__(**kwargs)\n    self.set_sizes(sizes)\n    self._numsides = numsides\n    self._paths = [self._path_generator(numsides)]\n    self._rotation = rotation\n    self.set_transform(transforms.IdentityTransform())",
    "docstring": "Parameters ---------- numsides : int The number of sides of the polygon. rotation : float The rotation of the polygon in radians. sizes : tuple of float The area of the circle circumscribing the polygon in points^2. **kwargs Forwarded to . Examples -------- See :doc: for a complete example:: offsets = np.random.rand(20, 2) facecolors = [cm.jet(x) for x in np.random.rand(20)] collection = RegularPolyCollection( numsides=5, # a pentagon rotation=0, sizes=(50,), facecolors=facecolors, edgecolors=(\"black\",), linewidths=(1,), offsets=offsets, offset_transform=ax.transData, )",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:numsides arguments arg arg arg arg arg Call Call Call Assign Assign Call Assign Call Call"
  },
  {
    "library": "pandas",
    "name": "_slice",
    "source_code": "def _slice(self, slobj: slice, axis: AxisInt=0) -> Self:\n    assert isinstance(slobj, slice), type(slobj)\n    axis = self._get_block_manager_axis(axis)\n    new_mgr = self._mgr.get_slice(slobj, axis=axis)\n    result = self._constructor_from_mgr(new_mgr, axes=new_mgr.axes)\n    result = result.__finalize__(self)\n    return result",
    "docstring": "Construct a slice of this container. Slicing with this method is *always* positional.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:_slice arg:self arg:slobj arg:axis arguments arg arg arg Call Call Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_text_path",
    "source_code": "def get_text_path(self, prop, s, ismath=False):\n    if ismath == 'TeX':\n        glyph_info, glyph_map, rects = self.get_glyphs_tex(prop, s)\n    elif not ismath:\n        font = self._get_font(prop)\n        glyph_info, glyph_map, rects = self.get_glyphs_with_font(font, s)\n    else:\n        glyph_info, glyph_map, rects = self.get_glyphs_mathtext(prop, s)\n    verts, codes = ([], [])\n    for glyph_id, xposition, yposition, scale in glyph_info:\n        verts1, codes1 = glyph_map[glyph_id]\n        verts.extend(verts1 * scale + [xposition, yposition])\n        codes.extend(codes1)\n    for verts1, codes1 in rects:\n        verts.extend(verts1)\n        codes.extend(codes1)\n    if not verts:\n        verts = np.empty((0, 2))\n    return (verts, codes)",
    "docstring": "Convert text *s* to path (a tuple of vertices and codes for matplotlib.path.Path). Parameters ---------- prop : The font properties for the text. s : str The text to be converted. ismath : {False, True, \"TeX\"} If True, use mathtext parser. If \"TeX\", use tex for rendering. Returns ------- verts : list A list of arrays containing the (x, y) coordinates of the vertices. codes : list A list of path codes. Examples -------- Create a list of vertices and codes from a text, and create a from those:: from matplotlib.path import Path from matplotlib.text import TextToPath from matplotlib.font_manager import FontProperties fp = FontProperties(family=\"Comic Neue\", style=\"italic\") verts, codes = TextToPath().get_text_path(fp, \"ABC\") path = Path(verts, codes, closed=False) Also see for a more direct way to create a path from a text.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\textpath.py",
    "ast_data": "FunctionDef name:get_text_path arg:self arg:prop arg:s arg:ismath arguments arg arg arg arg If Compare Assign Call If Assign Call Assign Call Assign Call Assign For Assign Call Call For Call Call If Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_optimize_result_for_equal_bounds",
    "source_code": "def _optimize_result_for_equal_bounds(fun, bounds, method, args=(), constraints=()):\n    success = True\n    message = 'All independent variables were fixed by bounds.'\n    x0 = bounds.lb\n    if constraints:\n        message = 'All independent variables were fixed by bounds at values that satisfy the constraints.'\n        constraints = standardize_constraints(constraints, x0, 'new')\n    maxcv = 0\n    for c in constraints:\n        pc = PreparedConstraint(c, x0)\n        violation = pc.violation(x0)\n        if np.sum(violation):\n            maxcv = max(maxcv, np.max(violation))\n            success = False\n            message = f'All independent variables were fixed by bounds, but the independent variables do not satisfy the constraints exactly. (Maximum violation: {maxcv}).'\n    return OptimizeResult(x=x0, fun=fun(x0, *args), success=success, message=message, nfev=1, njev=0, nhev=0)",
    "docstring": "Provides a default OptimizeResult for when a bounded minimization method has (lb == ub).all(). Parameters ---------- fun: callable bounds: Bounds method: str constraints: Constraint",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_minimize.py",
    "ast_data": "FunctionDef name:_optimize_result_for_equal_bounds arg:fun arg:bounds arg:method arg:args arg:constraints arguments arg arg arg arg arg Assign Assign Assign If Assign Assign Call Assign For Assign Call Assign Call If Call Assign Call Call Assign Assign Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "numpy_dtype",
    "source_code": "@cache_readonly\ndef numpy_dtype(self) -> np.dtype:\n    return np.dtype(self.type)",
    "docstring": "Return an instance of our numpy dtype",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py",
    "ast_data": "FunctionDef name:numpy_dtype arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "crawlers",
    "source_code": "@property\ndef crawlers(self) -> set[Crawler]:\n    return self._crawlers",
    "docstring": "Set of :class: started by :meth: and managed by this class.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\crawler.py",
    "ast_data": "FunctionDef name:crawlers arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_axis_direction",
    "source_code": "def set_axis_direction(self, label_direction):\n    self.set_default_alignment(label_direction)\n    self.set_default_angle(label_direction)\n    self._axis_direction = label_direction",
    "docstring": "Adjust the text angle and text alignment of ticklabels according to the Matplotlib convention. The *label_direction* must be one of [left, right, bottom, top]. ===================== ========== ========= ========== ========== Property left bottom right top ===================== ========== ========= ========== ========== ticklabel angle 90 0 -90 180 ticklabel va center baseline center baseline ticklabel ha right center right center ===================== ========== ========= ========== ========== Note that the text angles are actually relative to (90 + angle of the direction to the ticklabel), which gives 0 for bottom axis. Parameters ---------- label_direction : {\"left\", \"bottom\", \"right\", \"top\"}",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axis_artist.py",
    "ast_data": "FunctionDef name:set_axis_direction arg:self arg:label_direction arguments arg arg Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "createResolutionCallbackForClassMethods",
    "source_code": "def createResolutionCallbackForClassMethods(cls):\n    fns = [getattr(cls, name) for name in cls.__dict__ if inspect.isroutine(getattr(cls, name))]\n    fns = [fn for fn in fns if not inspect.isbuiltin(fn) and hasattr(fn, '__globals__')]\n    captures = {}\n    for fn in fns:\n        captures.update(get_closure(fn))\n        captures.update(get_type_hint_captures(fn))\n\n    def lookup_in_class(key):\n        if key in captures:\n            return captures[key]\n        else:\n            return getattr(builtins, key, None)\n    return lookup_in_class",
    "docstring": "This looks at all the methods defined in a class and pulls their closed-over variables into a dictionary and uses that to resolve variables.",
    "type": "function",
    "file_path": "pytorch\\torch\\_jit_internal.py",
    "ast_data": "FunctionDef name:createResolutionCallbackForClassMethods arg:cls arguments arg Assign Call Call Call Assign BoolOp Call Call Assign For Call Call Call Call FunctionDef name:lookup_in_class arg:key arguments arg If Compare Return return:yes Return return:yes Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "index",
    "source_code": "def index(self, sub, start=0, end=None):\n    return index(self, sub, start, end)",
    "docstring": "Like , but raises :exc: when the substring is not found. See Also -------- char.index",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:index arg:self arg:sub arg:start arg:end arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_User2DTransform",
    "source_code": "class _User2DTransform(Transform):\n    input_dims = output_dims = 2\n\n    def __init__(self, forward, backward):\n        super().__init__()\n        self._forward = forward\n        self._backward = backward\n\n    def transform_non_affine(self, values):\n        return np.transpose(self._forward(*np.transpose(values)))\n\n    def inverted(self):\n        return type(self)(self._backward, self._forward)",
    "docstring": "A transform defined by two user-set functions.",
    "type": "class",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\grid_finder.py",
    "ast_data": "ClassDef name:_User2DTransform Assign FunctionDef name:__init__ arg:self arg:forward arg:backward arguments arg arg arg Call Call Assign Assign FunctionDef name:transform_non_affine arg:self arg:values arguments arg arg Return return:yes Call Call Call FunctionDef name:inverted arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_dir",
    "source_code": "def get_dir() -> str:\n    if os.getenv('TORCH_HUB'):\n        warnings.warn('TORCH_HUB is deprecated, please use env TORCH_HOME instead')\n    if _hub_dir is not None:\n        return _hub_dir\n    return os.path.join(_get_torch_home(), 'hub')",
    "docstring": "Get the Torch Hub cache directory used for storing downloaded models & weights. If :func: is not called, default path is `` if the environment variable is not set.",
    "type": "function",
    "file_path": "pytorch\\torch\\hub.py",
    "ast_data": "FunctionDef name:get_dir arguments If Call Call If Compare Return return:yes Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "stopping_criteria",
    "source_code": "def stopping_criteria(self):\n    if self.maxiter is not None:\n        self.finite_iterations()\n    if self.iters is not None:\n        self.finite_iterations()\n    if self.maxfev is not None:\n        self.finite_fev()\n    if self.maxev is not None:\n        self.finite_ev()\n    if self.maxtime is not None:\n        self.finite_time()\n    if self.f_min_true is not None:\n        self.finite_precision()\n    if self.minhgrd is not None:\n        self.finite_homology_growth()\n    return self.stop_global",
    "docstring": "Various stopping criteria ran every iteration Returns ------- stop : bool",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_shgo.py",
    "ast_data": "FunctionDef name:stopping_criteria arg:self arguments arg If Compare Call If Compare Call If Compare Call If Compare Call If Compare Call If Compare Call If Compare Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "update_cookie",
    "source_code": "def update_cookie(id):\n    cherrypy.serving.response.cookie[name] = id",
    "docstring": "Update the cookie every time the session id changes.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\sessions.py",
    "ast_data": "FunctionDef name:update_cookie arg:id arguments arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "set_int_list_attr",
    "source_code": "def set_int_list_attr(op, attr_name, ints) -> None:\n    ints_list = attr_value_pb2.AttrValue.ListValue(i=ints)\n    op._set_attr(attr_name, attr_value_pb2.AttrValue(list=ints_list))",
    "docstring": "TF internal method used to set a list(int) attribute in the node_def.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:set_int_list_attr arg:op arg:attr_name arg:ints arguments arg arg arg Assign Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "is_horizontal",
    "source_code": "def is_horizontal(self):\n    return self._is_horizontal",
    "docstring": "True if the eventcollection is horizontal, False if vertical.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:is_horizontal arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "constant_value",
    "source_code": "def constant_value(pred):\n    if isinstance(pred, tensor.Tensor):\n        return tensor_util.constant_value(pred)\n    if pred in {0, 1}:\n        return bool(pred)\n    if isinstance(pred, bool):\n        return pred\n    if isinstance(pred, variables.Variable):\n        return None\n    raise TypeError('`pred` must be a Tensor, or a Python bool, or 1 or 0. Found instead: %s' % type(pred))",
    "docstring": "Return the bool value for , or None if had a dynamic value. Args: pred: A scalar, either a Python bool or a TensorFlow boolean variable or tensor, or the Python integer 1 or 0. Returns: True or False if has a constant boolean value, None otherwise. Raises: TypeError: If is not a Variable, Tensor or bool, or Python integer 1 or 0.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\control_flow_util.py",
    "ast_data": "FunctionDef name:constant_value arg:pred arguments arg If Call Return return:yes Call If Compare Return return:yes Call If Call Return return:yes If Call Return return:no Raise Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "SVCBenchmark",
    "source_code": "class SVCBenchmark(Predictor, Estimator, Benchmark):\n    param_names = ['kernel']\n    params = (['linear', 'poly', 'rbf', 'sigmoid'],)\n\n    def setup_cache(self):\n        super().setup_cache()\n\n    def make_data(self, params):\n        return _synth_classification_dataset()\n\n    def make_estimator(self, params):\n        kernel, = params\n        estimator = SVC(max_iter=100, tol=1e-16, kernel=kernel, random_state=0, gamma='scale')\n        return estimator\n\n    def make_scorers(self):\n        make_gen_classif_scorers(self)",
    "docstring": "Benchmarks for SVC.",
    "type": "class",
    "file_path": "scikit-learn\\asv_benchmarks\\benchmarks\\svm.py",
    "ast_data": "ClassDef name:SVCBenchmark Assign Assign FunctionDef name:setup_cache arg:self arguments arg Call Call FunctionDef name:make_data arg:self arg:params arguments arg arg Return return:yes Call FunctionDef name:make_estimator arg:self arg:params arguments arg arg Assign Assign Call Return return:yes FunctionDef name:make_scorers arg:self arguments arg Call"
  },
  {
    "library": "scikit-learn",
    "name": "decision_function",
    "source_code": "def decision_function(self, X):\n    check_is_fitted(self)\n    negative_mahal_dist = self.score_samples(X)\n    return negative_mahal_dist - self.offset_",
    "docstring": "Compute the decision function of the given observations. Parameters ---------- X : array-like of shape (n_samples, n_features) The data matrix. Returns ------- decision : ndarray of shape (n_samples,) Decision function of the samples. It is equal to the shifted Mahalanobis distances. The threshold for being an outlier is 0, which ensures a compatibility with other outlier detection algorithms.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\covariance\\_elliptic_envelope.py",
    "ast_data": "FunctionDef name:decision_function arg:self arg:X arguments arg arg Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "LogicalDevice",
    "source_code": "@tf_export('config.LogicalDevice')\nclass LogicalDevice(collections.namedtuple('LogicalDevice', ['name', 'device_type'])):\n    pass",
    "docstring": "Abstraction for a logical device initialized by the runtime. A corresponds to an initialized logical device on a or a remote device visible to the cluster. Tensors and operations can be placed on a specific logical device by calling with a specified . Fields: name: The fully qualified name of the device. Can be used for Op or function placement. device_type: String declaring the type of device such as \"CPU\" or \"GPU\".",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "ClassDef name:LogicalDevice Call Call"
  },
  {
    "library": "pandas",
    "name": "_validate_fill_value",
    "source_code": "def _validate_fill_value(self, value):\n    return self._data._validate_setitem_value(value)",
    "docstring": "Convert value to be insertable to underlying array.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\extension.py",
    "ast_data": "FunctionDef name:_validate_fill_value arg:self arg:value arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "forbid_in_graph",
    "source_code": "def forbid_in_graph(fn):\n    if isinstance(fn, (list, tuple)):\n        return [forbid_in_graph(x) for x in fn]\n    assert callable(fn), 'forbid_in_graph applies only to callables'\n    fn._dynamo_forbidden = True\n    return fn",
    "docstring": "Customize which functions TorchDynamo will assert are not present while tracing. If you want a graph break on this function instead, use disallow_in_graph. TODO(voz): We now have allow_in_graph, disallow_in_graph, forbid_in_graph - some more robust documentation would not be amiss.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\decorators.py",
    "ast_data": "FunctionDef name:forbid_in_graph arg:fn arguments arg If Call Return return:yes Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "var_scope",
    "source_code": "@property\n@deprecated('2017-02-21', 'The .var_scope property is deprecated. Please change your code to use the .variable_scope property')\ndef var_scope(self):\n    return self._variable_scope",
    "docstring": "Returns the variable scope object created by this Template.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\template.py",
    "ast_data": "FunctionDef name:var_scope arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "distance",
    "source_code": "def distance(self, other):\n    if not isinstance(other, GEOSGeometry):\n        raise TypeError('distance() works only on other GEOS Geometries.')\n    return capi.geos_distance(self.ptr, other.ptr, byref(c_double()))",
    "docstring": "Return the distance between the closest points on this Geometry and the other. Units will be in those of the coordinate system of the Geometry.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:distance arg:self arg:other arguments arg arg If Call Raise Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "validate",
    "source_code": "def validate(self, options: VerificationOptions):\n    onnx_session = _onnx_backend_session(io.BytesIO(self.proto), options.backend)\n    run_outputs = onnx_session.run(None, self.inputs)\n    if hasattr(onnx_session, 'get_outputs'):\n        output_names = [o.name for o in onnx_session.get_outputs()]\n    elif hasattr(onnx_session, 'output_names'):\n        output_names = onnx_session.output_names\n    else:\n        raise ValueError(f'Unknown onnx session type: {type(onnx_session)}')\n    expected_outs = [self.outputs[name] for name in output_names]\n    _compare_onnx_pytorch_outputs_in_np(run_outputs, expected_outs, options)",
    "docstring": "Run the ONNX test case with options.backend, and compare with the expected outputs. Args: options: Options for validation. Raise: AssertionError: if outputs from options.backend and expected outputs are not equal up to specified precision.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\verification.py",
    "ast_data": "FunctionDef name:validate arg:self arg:options arguments arg arg Assign Call Call Assign Call If Call Assign Call If Call Assign Raise Call Call Assign Call"
  },
  {
    "library": "kornia",
    "name": "equalize",
    "source_code": "@perform_keep_shape_image\ndef equalize(input: Tensor) -> Tensor:\n    res = []\n    for image in input:\n        scaled_image = torch.stack([_scale_channel(image[i, :, :]) for i in range(len(image))])\n        res.append(scaled_image)\n    return torch.stack(res)",
    "docstring": "Apply equalize on the input tensor. .. image:: _static/img/equalize.png Implements Equalize function from PIL using PyTorch ops based on uint8 format: Args: input: image tensor to equalize with shape :math:. Returns: Equalized image tensor with shape :math:. Example: >>> x = torch.rand(1, 2, 3, 3) >>> equalize(x).shape torch.Size([1, 2, 3, 3])",
    "type": "function",
    "file_path": "kornia\\kornia\\enhance\\adjust.py",
    "ast_data": "FunctionDef name:equalize arg:input arguments arg Assign For Assign Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "wait_stream",
    "source_code": "def wait_stream(self, stream) -> None:\n    self.wait_event(stream.record_event())",
    "docstring": "Synchronize with another stream. All future work submitted to this stream will wait until all kernels submitted to a given stream at the time of call complete. Args: stream (Stream): a stream to synchronize. .. note:: This function returns without waiting for currently enqueued kernels in :attr:: only future operations are affected.",
    "type": "method",
    "file_path": "pytorch\\torch\\cuda\\streams.py",
    "ast_data": "FunctionDef name:wait_stream arg:self arg:stream arguments arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, output, output_name, loss_fn, loss_weight=None, training_target=None, output_loss_metric=None, sample_weight=None, sample_weight_mode=None):\n    self._output = output\n    self._output_name = output_name\n    self._loss_fn = loss_fn\n    self._loss_weight = loss_weight\n    self._training_target = training_target\n    self._output_loss_metric = output_loss_metric\n    self._sample_weight = sample_weight\n    self._sample_weight_mode = sample_weight_mode",
    "docstring": "Initialize the _TrainingEndpoint. Note that the output and output_name should be stable as long as the model structure doesn't change. The training_target suppose to be mutable since the information is provided via Args: output: the output tensor of the model. output_name: the unique name of the output tensor. loss_fn: the loss function for the output tensor. loss_weight: float, the weights for the loss. training_target: the _TrainingTarget for the model. output_loss_metric: the metric object for the loss function. sample_weight: the weights for how a sample is weighted during metric and loss calculation. Could be None. sample_weight_mode: string, 'temporal', 'samplewise' or None. The mode for how the sample_weight is populated.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:output arg:output_name arg:loss_fn arg:loss_weight arg:training_target arg:output_loss_metric arg:sample_weight arg:sample_weight_mode arguments arg arg arg arg arg arg arg arg arg Assign Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "django",
    "name": "cycle",
    "source_code": "@register.tag\ndef cycle(parser, token):\n    args = token.split_contents()\n    if len(args) < 2:\n        raise TemplateSyntaxError(\"'cycle' tag requires at least two arguments\")\n    if len(args) == 2:\n        name = args[1]\n        if not hasattr(parser, '_named_cycle_nodes'):\n            raise TemplateSyntaxError(\"No named cycles in template. '%s' is not defined\" % name)\n        if name not in parser._named_cycle_nodes:\n            raise TemplateSyntaxError(\"Named cycle '%s' does not exist\" % name)\n        return parser._named_cycle_nodes[name]\n    as_form = False\n    if len(args) > 4:\n        if args[-3] == 'as':\n            if args[-1] != 'silent':\n                raise TemplateSyntaxError(\"Only 'silent' flag is allowed after cycle's name, not '%s'.\" % args[-1])\n            as_form = True\n            silent = True\n            args = args[:-1]\n        elif args[-2] == 'as':\n            as_form = True\n            silent = False\n    if as_form:\n        name = args[-1]\n        values = [parser.compile_filter(arg) for arg in args[1:-2]]\n        node = CycleNode(values, name, silent=silent)\n        if not hasattr(parser, '_named_cycle_nodes'):\n            parser._named_cycle_nodes = {}\n        parser._named_cycle_nodes[name] = node\n    else:\n        values = [parser.compile_filter(arg) for arg in args[1:]]\n        node = CycleNode(values)\n    parser._last_cycle_node = node\n    return node",
    "docstring": "Cycle among the given strings each time this tag is encountered. Within a loop, cycles among the given strings each time through the loop:: {% for o in some_list %} ... {% endfor %} Outside of a loop, give the values a unique name the first time you call it, then use that name each successive time through:: ... ... ... You can use any number of values, separated by spaces. Commas can also be used to separate values; if a comma is used, the cycle values are interpreted as literal strings. The optional flag \"silent\" can be used to prevent the cycle declaration from returning any value:: {% for o in some_list %} {% cycle 'row1' 'row2' as rowcolors silent %} {% include \"subtemplate.html \" %} {% endfor %}",
    "type": "function",
    "file_path": "django\\django\\template\\defaulttags.py",
    "ast_data": "FunctionDef name:cycle arg:parser arg:token arguments arg arg Assign Call If Compare Call Raise Call If Compare Call Assign If Call Raise Call If Compare Raise Call Return return:yes Assign If Compare Call If Compare If Compare Raise Call Assign Assign Assign If Compare Assign Assign If Assign Assign Call Assign Call If Call Assign Assign Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "convert",
    "source_code": "def convert(self):\n    return super(TFLiteConverter, self).convert()",
    "docstring": "Converts a TensorFlow GraphDef based on instance variables. Returns: The converted data in serialized format, either a TFLite Flatbuffer or a Graphviz graph depending on value in . Raises: ValueError: Input shape is not specified. None value for dimension in input_tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "FunctionDef name:convert arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "XyzToRgb",
    "source_code": "class XyzToRgb(Module):\n    ONNX_DEFAULT_INPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n    ONNX_DEFAULT_OUTPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n\n    def forward(self, image: Tensor) -> Tensor:\n        return xyz_to_rgb(image)",
    "docstring": "Converts an image from XYZ to RGB. Returns: RGB version of the image. Shape: - image: :math: - output: :math: Examples: >>> input = torch.rand(2, 3, 4, 5) >>> rgb = XyzToRgb() >>> output = rgb(input) # 2x3x4x5 Reference: [1]",
    "type": "class",
    "file_path": "kornia\\kornia\\color\\xyz.py",
    "ast_data": "ClassDef name:XyzToRgb FunctionDef name:forward arg:self arg:image arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "build_chunks",
    "source_code": "def build_chunks(self) -> int:\n    size_diff = 0\n    if _GREEDY_SPLIT(self.proto_size) and (not _ABOVE_MAX_SIZE(self.proto_size)):\n        size_diff += LargeMessageSplitter(self._proto, self.proto_size, parent_splitter=self, fields_in_parent=[]).build_chunks()\n    if _ABOVE_MAX_SIZE(self.proto_size):\n        size_diff += RepeatedMessageSplitter(self._proto, 'node_def', [ConstantNodeDefSplitter, LargeMessageSplitter], parent_splitter=self, fields_in_parent=[]).build_chunks()\n    return size_diff",
    "docstring": "Splits the proto, and returns the size of the chunks created.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\tools\\proto_splitter\\split_graph_def.py",
    "ast_data": "FunctionDef name:build_chunks arg:self arguments arg Assign If BoolOp Call Call Call Call If Call Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "to_pil",
    "source_code": "def to_pil(self, x: Any) -> 'Image.Image':\n    if isinstance(x, (Tensor,)):\n        x = x.cpu().detach() * 255\n        if x.dim() == 3:\n            x = x.permute(1, 2, 0)\n            return Image.fromarray(x.byte().numpy())\n        elif x.dim() == 4:\n            x = x.permute(0, 2, 3, 1)\n            return [Image.fromarray(_x.byte().numpy()) for _x in x]\n        else:\n            raise NotImplementedError\n    if isinstance(x, (np.ndarray,)):\n        raise NotImplementedError\n    if isinstance(x, (Image.Image,)):\n        return x\n    raise TypeError('Input type not supported')",
    "docstring": "Convert input to PIL image. Args: x: The input to convert. Returns: Image.Image: The converted PIL image.",
    "type": "method",
    "file_path": "kornia\\kornia\\core\\mixin\\image_module.py",
    "ast_data": "FunctionDef name:to_pil arg:self arg:x arguments arg arg If Call Assign Call Call If Compare Call Assign Call Return return:yes Call Call Call If Compare Call Assign Call Return return:yes Call Call Call Raise If Call Raise If Call Return return:yes Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_replace_per_replica_spec",
    "source_code": "def _replace_per_replica_spec(spec, i):\n    if isinstance(spec, values.PerReplicaSpec):\n        return spec._value_specs[i]\n    else:\n        return spec",
    "docstring": "If is a , then return its th value_spec.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py",
    "ast_data": "FunctionDef name:_replace_per_replica_spec arg:spec arg:i arguments arg arg If Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "diff",
    "source_code": "def diff(self, other):\n    r = set(self.nn_modules.keys()).difference(set(other.nn_modules.keys()))\n    if len(r) == 0:\n        return None\n    return r",
    "docstring": "Produces a delta against another ModuleContextCheckpointState. Returns None if no delta is found, otherwise, return a set() of mismatched module key names.",
    "type": "method",
    "file_path": "pytorch\\torch\\_guards.py",
    "ast_data": "FunctionDef name:diff arg:self arg:other arguments arg arg Assign Call Call Call Call Call If Compare Call Return return:no Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "scatter_nd_min",
    "source_code": "def scatter_nd_min(self, indices, updates, name=None):\n    return self._lazy_read(gen_state_ops.resource_scatter_nd_min(self.handle, indices, ops.convert_to_tensor(updates, self.dtype), name=name))",
    "docstring": "Updates this variable with the min of and itself. is a with rank and is a of rank . must be integer tensor, containing indices into . It must be shape where . The innermost dimension of (with length ) corresponds to indices into elements (if ) or slices (if ) along the th dimension of . is of rank with shape: See for more details about how to make updates to slices. Args: indices: The indices to be used in the operation. updates: The values to be used in the operation. name: the name of the operation. Returns: The updated variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:scatter_nd_min arg:self arg:indices arg:updates arg:name arguments arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "@deprecation.deprecated('2019-01-01', 'The TensorFlow Distributions library has moved to TensorFlow Probability (https://github.com/tensorflow/probability). You should update all references to use `tfp.distributions` instead of `tf.distributions`.', warn_once=True)\ndef __init__(self, loc, scale, validate_args=False, allow_nan_stats=True, name='Normal'):\n    parameters = dict(locals())\n    with ops.name_scope(name, values=[loc, scale]) as name:\n        with ops.control_dependencies([check_ops.assert_positive(scale)] if validate_args else []):\n            self._loc = array_ops.identity(loc, name='loc')\n            self._scale = array_ops.identity(scale, name='scale')\n            check_ops.assert_same_float_dtype([self._loc, self._scale])\n    super(Normal, self).__init__(dtype=self._scale.dtype, reparameterization_type=distribution.FULLY_REPARAMETERIZED, validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters, graph_parents=[self._loc, self._scale], name=name)",
    "docstring": "Construct Normal distributions with mean and stddev and . The parameters and must be shaped in a way that supports broadcasting (e.g. is a valid operation). Args: loc: Floating point tensor; the means of the distribution(s). scale: Floating point tensor; the stddevs of the distribution(s). Must contain only positive values. validate_args: Python , default . When distribution parameters are checked for validity despite possibly degrading runtime performance. When invalid inputs may silently render incorrect outputs. allow_nan_stats: Python , default . When , statistics (e.g., mean, mode, variance) use the value \"\" to indicate the result is undefined. When , an exception is raised if one or more of the statistic's batch members are undefined. name: Python name prefixed to Ops created by this class. Raises: TypeError: if and have different .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\normal.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:loc arg:scale arg:validate_args arg:allow_nan_stats arg:name arguments arg arg arg arg arg arg Assign Call Call With Call With Call Call Assign Call Assign Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "__str__",
    "source_code": "def __str__(self) -> str:\n    return 'R'",
    "docstring": "human readable representation of the Replicate placement",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\placement_types.py",
    "ast_data": "FunctionDef name:__str__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_set_text_position",
    "source_code": "def _set_text_position(self, renderer):\n    bbox = self.get_window_extent(renderer)\n    y = bbox.y0 + bbox.height / 2\n    loc = self._text.get_horizontalalignment()\n    if loc == 'center':\n        x = bbox.x0 + bbox.width / 2\n    elif loc == 'left':\n        x = bbox.x0 + bbox.width * self.PAD\n    else:\n        x = bbox.x0 + bbox.width * (1 - self.PAD)\n    self._text.set_position((x, y))",
    "docstring": "Set text up so it is drawn in the right place.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\table.py",
    "ast_data": "FunctionDef name:_set_text_position arg:self arg:renderer arguments arg arg Assign Call Assign Assign Call If Compare Assign If Compare Assign Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_executor_init",
    "source_code": "def _get_executor_init(self, workers):\n\n    def pool_fn(seqs):\n        pool = get_pool_class(True)(workers, initializer=init_pool_generator, initargs=(seqs, self.random_seed, get_worker_id_queue()))\n        _DATA_POOLS.add(pool)\n        return pool\n    return pool_fn",
    "docstring": "Gets the Pool initializer for multiprocessing. Args: workers: Number of works. Returns: A Function to initialize the pool",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\data_utils.py",
    "ast_data": "FunctionDef name:_get_executor_init arg:self arg:workers arguments arg arg FunctionDef name:pool_fn arg:seqs arguments arg Assign Call Call Call Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "TracingContext",
    "source_code": "class TracingContext(metaclass=abc.ABCMeta):\n    pass",
    "docstring": "Contains information scoped to the tracing of multiple objects. is a container class for flags and variables that have any kind of influence on the tracing behaviour of the class implementing the __tf_tracing_type__. This context will be shared across all __tf_tracing_type__ calls while constructing the TraceType for a particular set of objects.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\types\\trace.py",
    "ast_data": "ClassDef name:TracingContext"
  },
  {
    "library": "pygame",
    "name": "blit_array",
    "source_code": "def blit_array(surface, array):\n    if isinstance(array, numpy_ndarray) and array.dtype in numpy_floats:\n        array = array.round(0).astype(numpy_uint32)\n    return array_to_surface(surface, array)",
    "docstring": "pygame.surfarray.blit_array(Surface, array): return None Blit directly from a array values. Directly copy values from an array into a Surface. This is faster than converting the array into a Surface and blitting. The array must be the same dimensions as the Surface and will completely replace all pixel values. Only integer, ascii character and record arrays are accepted. This function will temporarily lock the Surface as the new values are copied.",
    "type": "function",
    "file_path": "pygame\\src_py\\surfarray.py",
    "ast_data": "FunctionDef name:blit_array arg:surface arg:array arguments arg arg If BoolOp Call Compare Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "OnnxBackend",
    "source_code": "class OnnxBackend(enum.Enum):\n    REFERENCE = 'ONNXReferenceEvaluator'\n    ONNX_RUNTIME_CPU = 'CPUExecutionProvider'\n    ONNX_RUNTIME_CUDA = 'CUDAExecutionProvider'",
    "docstring": "Enum class for ONNX backend used for export verification. .. deprecated:: 2.7 Consider using `` to test the ONNX model.",
    "type": "class",
    "file_path": "pytorch\\torch\\onnx\\verification.py",
    "ast_data": "ClassDef name:OnnxBackend Assign Assign Assign"
  },
  {
    "library": "scipy",
    "name": "init_options",
    "source_code": "def init_options(self, options):\n    self.minimizer_kwargs['options'].update(options)\n    for opt in ['jac', 'hess', 'hessp']:\n        if opt in self.minimizer_kwargs['options']:\n            self.minimizer_kwargs[opt] = self.minimizer_kwargs['options'].pop(opt)\n    self.minimize_every_iter = options.get('minimize_every_iter', True)\n    self.maxiter = options.get('maxiter', None)\n    self.maxfev = options.get('maxfev', None)\n    self.maxev = options.get('maxev', None)\n    self.init = time.time()\n    self.maxtime = options.get('maxtime', None)\n    if 'f_min' in options:\n        self.f_min_true = options['f_min']\n        self.f_tol = options.get('f_tol', 0.0001)\n    else:\n        self.f_min_true = None\n    self.minhgrd = options.get('minhgrd', None)\n    self.symmetry = options.get('symmetry', False)\n    if self.symmetry:\n        self.symmetry = [0] * len(self.bounds)\n    else:\n        self.symmetry = None\n    self.local_iter = options.get('local_iter', False)\n    self.infty_cons_sampl = options.get('infty_constraints', True)\n    self.disp = options.get('disp', False)",
    "docstring": "Initiates the options. Can also be useful to change parameters after class initiation. Parameters ---------- options : dict Returns ------- None",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_shgo.py",
    "ast_data": "FunctionDef name:init_options arg:self arg:options arguments arg arg Call For If Compare Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call If Compare Assign Assign Call Assign Assign Call Assign Call If Assign Call Assign Assign Call Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "master",
    "source_code": "def master(self, task_type=None, task_id=None, rpc_layer=None):\n    task_type = task_type if task_type is not None else self.task_type\n    task_id = task_id if task_id is not None else self.task_id\n    if task_type is not None and task_id is not None:\n        return format_master_url(self.cluster_spec().task_address(task_type, task_id), rpc_layer or self.rpc_layer)\n    return ''",
    "docstring": "Returns the master string for connecting to a TensorFlow master. Args: task_type: (Optional) Overrides the default auto-selected task type. task_id: (Optional) Overrides the default auto-selected task index. rpc_layer: (Optional) Overrides the default RPC protocol TensorFlow uses to communicate across nodes. Returns: A connection string for connecting to a TensorFlow master.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\slurm_cluster_resolver.py",
    "ast_data": "FunctionDef name:master arg:self arg:task_type arg:task_id arg:rpc_layer arguments arg arg arg arg Assign Compare Assign Compare If BoolOp Compare Compare Return return:yes Call Call Call BoolOp Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_round_frac",
    "source_code": "def _round_frac(x, precision: int):\n    if not np.isfinite(x) or x == 0:\n        return x\n    else:\n        frac, whole = np.modf(x)\n        if whole == 0:\n            digits = -int(np.floor(np.log10(abs(frac)))) - 1 + precision\n        else:\n            digits = precision\n        return np.around(x, digits)",
    "docstring": "Round the fractional part of the given number",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\reshape\\tile.py",
    "ast_data": "FunctionDef name:_round_frac arg:x arg:precision arguments arg arg If BoolOp Call Compare Return return:yes Assign Call If Compare Assign Call Call Call Call Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "SimpleOperatorEntry",
    "source_code": "class SimpleOperatorEntry:\n\n    def __init__(self, qualname: str):\n        self.qualname: str = qualname\n        self.fake_impl: FakeImplHolder = FakeImplHolder(qualname)\n        self.torch_dispatch_rules: GenericTorchDispatchRuleHolder = GenericTorchDispatchRuleHolder(qualname)\n\n    @property\n    def abstract_impl(self):\n        return self.fake_impl",
    "docstring": "This is 1:1 to an operator overload. The fields of SimpleOperatorEntry are Holders where kernels can be registered to.",
    "type": "class",
    "file_path": "pytorch\\torch\\_library\\simple_registry.py",
    "ast_data": "ClassDef name:SimpleOperatorEntry FunctionDef name:__init__ arg:self arg:qualname arguments arg arg Call Call FunctionDef name:abstract_impl arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_run_short_description",
    "source_code": "def get_run_short_description(run_call_count, fetches, feed_dict, is_callable_runner=False):\n    if is_callable_runner:\n        return 'runner from make_callable()'\n    description = 'run #%d: ' % run_call_count\n    if isinstance(fetches, (tensor_lib.Tensor, ops.Operation, variables.Variable)):\n        description += '1 fetch (%s); ' % common.get_graph_element_name(fetches)\n    else:\n        num_fetches = len(common.get_flattened_names(fetches))\n        if num_fetches > 1:\n            description += '%d fetches; ' % num_fetches\n        else:\n            description += '%d fetch; ' % num_fetches\n    if not feed_dict:\n        description += '0 feeds'\n    elif len(feed_dict) == 1:\n        for key in feed_dict:\n            description += '1 feed (%s)' % (key if isinstance(key, str) or not hasattr(key, 'name') else key.name)\n    else:\n        description += '%d feeds' % len(feed_dict)\n    return description",
    "docstring": "Get a short description of the run() call. Args: run_call_count: (int) Run call counter. fetches: Fetches of the call. See doc of for more details. feed_dict: Feeds to the call. See doc of for more details. is_callable_runner: (bool) whether a runner returned by Session.make_callable is being run. Returns: (str) A short description of the run() call, including information about the fetche(s) and feed(s).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\cli_shared.py",
    "ast_data": "FunctionDef name:get_run_short_description arg:run_call_count arg:fetches arg:feed_dict arg:is_callable_runner arguments arg arg arg arg If Return return:yes Assign If Call Call Assign Call Call If Compare If If Compare Call For BoolOp Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "astype_array",
    "source_code": "def astype_array(values: ArrayLike, dtype: DtypeObj, copy: bool=False) -> ArrayLike:\n    if values.dtype == dtype:\n        if copy:\n            return values.copy()\n        return values\n    if not isinstance(values, np.ndarray):\n        values = values.astype(dtype, copy=copy)\n    else:\n        values = _astype_nansafe(values, dtype, copy=copy)\n    if isinstance(dtype, np.dtype) and issubclass(values.dtype.type, str):\n        values = np.array(values, dtype=object)\n    return values",
    "docstring": "Cast array (ndarray or ExtensionArray) to the new dtype. Parameters ---------- values : ndarray or ExtensionArray dtype : dtype object copy : bool, default False copy if indicated Returns ------- ndarray or ExtensionArray",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\astype.py",
    "ast_data": "FunctionDef name:astype_array arg:values arg:dtype arg:copy arguments arg arg arg If Compare If Return return:yes Call Return return:yes If Call Assign Call Assign Call If BoolOp Call Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "Quintic",
    "source_code": "class Quintic(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n        self.custom_bounds = [(-2, 2), (-2, 2)]\n        self.global_optimum = [[-1.0 for _ in range(self.N)]]\n        self.fglob = 0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return sum(abs(x ** 5 - 3 * x ** 4 + 4 * x ** 3 + 2 * x ** 2 - 10 * x - 4))",
    "docstring": "Quintic objective function. This class defines the Quintic [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Quintic}}(x) = \\sum_{i=1}^{n} \\left|{x_{i}^{5} - 3 x_{i}^{4} + 4 x_{i}^{3} + 2 x_{i}^{2} - 10 x_{i} -4}\\right| Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_Q.py",
    "ast_data": "ClassDef name:Quintic Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "invoke",
    "source_code": "def invoke(self):\n    self._ensure_safe()\n    self._interpreter.Invoke()",
    "docstring": "Invoke the interpreter. Be sure to set the input sizes, allocate tensors and fill values before calling this. Also, note that this function releases the GIL so heavy computation can be done in the background while the Python interpreter continues. No other function on this object should be called while the invoke() call has not finished. Raises: ValueError: When the underlying interpreter fails raise ValueError.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\interpreter.py",
    "ast_data": "FunctionDef name:invoke arg:self arguments arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_create_example_string",
    "source_code": "def _create_example_string(example_dict):\n    example = example_pb2.Example()\n    for feature_name, feature_list in example_dict.items():\n        if not isinstance(feature_list, list):\n            raise ValueError('feature value must be a list, but %s: \"%s\" is %s' % (feature_name, feature_list, type(feature_list)))\n        if isinstance(feature_list[0], float):\n            example.features.feature[feature_name].float_list.value.extend(feature_list)\n        elif isinstance(feature_list[0], str):\n            example.features.feature[feature_name].bytes_list.value.extend([f.encode('utf8') for f in feature_list])\n        elif isinstance(feature_list[0], bytes):\n            example.features.feature[feature_name].bytes_list.value.extend(feature_list)\n        elif isinstance(feature_list[0], int):\n            example.features.feature[feature_name].int64_list.value.extend(feature_list)\n        else:\n            raise ValueError('Type %s for value %s is not supported for tf.train.Feature.' % (type(feature_list[0]), feature_list[0]))\n    return example.SerializeToString()",
    "docstring": "Create a serialized tf.example from feature dictionary.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\saved_model_cli.py",
    "ast_data": "FunctionDef name:_create_example_string arg:example_dict arguments arg Assign Call For Call If Call Raise Call Call If Call Call If Call Call Call If Call Call If Call Call Raise Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_offset",
    "source_code": "def get_offset(self):\n    return self._offset",
    "docstring": "Return offset of the container.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py",
    "ast_data": "FunctionDef name:get_offset arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "xframe_options_exempt",
    "source_code": "def xframe_options_exempt(view_func):\n    if iscoroutinefunction(view_func):\n\n        async def _view_wrapper(*args, **kwargs):\n            response = await view_func(*args, **kwargs)\n            response.xframe_options_exempt = True\n            return response\n    else:\n\n        def _view_wrapper(*args, **kwargs):\n            response = view_func(*args, **kwargs)\n            response.xframe_options_exempt = True\n            return response\n    return wraps(view_func)(_view_wrapper)",
    "docstring": "Modify a view function by setting a response variable that instructs XFrameOptionsMiddleware to NOT set the X-Frame-Options HTTP header. Usage: @xframe_options_exempt def some_view(request): ...",
    "type": "function",
    "file_path": "django\\django\\views\\decorators\\clickjacking.py",
    "ast_data": "FunctionDef name:xframe_options_exempt arg:view_func arguments arg If Call AsyncFunctionDef name:_view_wrapper arguments arg arg Assign Call Assign Return return:yes FunctionDef name:_view_wrapper arguments arg arg Assign Call Assign Return return:yes Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "normalize_source_lines",
    "source_code": "def normalize_source_lines(sourcelines: list[str]) -> list[str]:\n\n    def remove_prefix(text, prefix):\n        return text[text.startswith(prefix) and len(prefix):]\n    idx = None\n    for i, l in enumerate(sourcelines):\n        if l.lstrip().startswith('def'):\n            idx = i\n            break\n    if idx is None:\n        return sourcelines\n    fn_def = sourcelines[idx]\n    whitespace = fn_def.split('def')[0]\n    aligned_prefix = [whitespace + remove_prefix(s, whitespace) for s in sourcelines[:idx]]\n    aligned_suffix = [whitespace + remove_prefix(s, whitespace) for s in sourcelines[idx + 1:]]\n    aligned_prefix.append(fn_def)\n    return aligned_prefix + aligned_suffix",
    "docstring": "This helper function accepts a list of source lines. It finds the indentation level of the function definition (), then it indents all lines in the function body to a point at or greater than that level. This allows for comments and continued string literals that are at a lower indentation than the rest of the code. Args: sourcelines: function source code, separated into lines by the ' ' character Returns: A list of source lines that have been correctly aligned",
    "type": "function",
    "file_path": "pytorch\\torch\\_sources.py",
    "ast_data": "FunctionDef name:normalize_source_lines arg:sourcelines arguments arg FunctionDef name:remove_prefix arg:text arg:prefix arguments arg arg Return return:yes BoolOp Call Call Assign For Call If Call Call Assign If Compare Return return:yes Assign Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_queue_children_for_restoration",
    "source_code": "def _queue_children_for_restoration(checkpoint_position, visit_queue):\n    trackable = checkpoint_position.trackable\n    trackable_children = trackable._trackable_children()\n    adapter = _maybe_get_adapter(checkpoint_position, trackable)\n    for child in checkpoint_position.object_proto.children:\n        correspondence = checkpoint_position.checkpoint.object_by_proto_id.get(child.node_id, None)\n        if correspondence is not None:\n            continue\n        child_position = checkpoint_position.create_child_position(child.node_id)\n        local_object = trackable._lookup_dependency(child.local_name, trackable_children)\n        child_proto = child_position.object_proto\n        if local_object is None:\n            if child_proto.HasField('has_checkpoint_values'):\n                has_value = child_proto.has_checkpoint_values.value\n            else:\n                has_value = bool(child_proto.children or child_proto.attributes or child_proto.slot_variables or child_proto.HasField('registered_saver'))\n            if has_value:\n                local_trackable_name = child.local_name\n                if adapter:\n                    local_trackable_name, reshard_callback = adapter.maybe_reshard(child.local_name)\n                    if reshard_callback:\n                        child_position.update_resharding_callback(reshard_callback)\n                trackable._deferred_dependencies.setdefault(local_trackable_name, []).append(child_position)\n        elif child_position.bind_object(trackable=local_object):\n            visit_queue.append((child_position, local_object))",
    "docstring": "Queues the restoration of trackable's children or defers them.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\restore.py",
    "ast_data": "FunctionDef name:_queue_children_for_restoration arg:checkpoint_position arg:visit_queue arguments arg arg Assign Assign Call Assign Call For Assign Call If Compare Assign Call Assign Call Assign If Compare If Call Assign Assign Call BoolOp Call If Assign If Assign Call If Call Call Call If Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_structured_tensor_from_dense_tensor",
    "source_code": "def _structured_tensor_from_dense_tensor(t):\n    if t.shape.is_fully_defined():\n        return StructuredTensor.from_fields({}, shape=t.shape)\n    elif t.shape.rank is None:\n        raise ValueError(\"Can't build StructuredTensor w/ unknown rank\")\n    elif t.shape.rank == 1:\n        return StructuredTensor.from_fields({}, shape=t.shape, nrows=array_ops.shape(t)[0])\n    else:\n        rt = ragged_tensor.RaggedTensor.from_tensor(t)\n        return _structured_tensor_from_row_partitions(t.shape, rt._nested_row_partitions)",
    "docstring": "Create a structured tensor with the shape of a dense tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_array_ops.py",
    "ast_data": "FunctionDef name:_structured_tensor_from_dense_tensor arg:t arguments arg If Call Return return:yes Call If Compare Raise Call If Compare Return return:yes Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "tolil",
    "source_code": "def tolil(self, copy=False):\n    return self.tocsr(copy=False).tolil(copy=copy)",
    "docstring": "Convert this array/matrix to List of Lists format. With copy=False, the data/indices may be shared between this array/matrix and the resultant lil_array/matrix.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_base.py",
    "ast_data": "FunctionDef name:tolil arg:self arg:copy arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "kern",
    "source_code": "def kern(self) -> None:\n    new_children = []\n    num_children = len(self.children)\n    if num_children:\n        for i in range(num_children):\n            elem = self.children[i]\n            if i < num_children - 1:\n                next = self.children[i + 1]\n            else:\n                next = None\n            new_children.append(elem)\n            kerning_distance = elem.get_kerning(next)\n            if kerning_distance != 0.0:\n                kern = Kern(kerning_distance)\n                new_children.append(kern)\n        self.children = new_children",
    "docstring": "Insert nodes between nodes to set kerning. The nodes themselves determine the amount of kerning they need (in ), and this function just creates the correct linked list.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_mathtext.py",
    "ast_data": "FunctionDef name:kern arg:self arguments arg Assign Assign Call If For Call Assign If Compare Assign Assign Call Assign Call If Compare Assign Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "record",
    "source_code": "def record(self, flat_outputs, inference_args, input_tangents):\n    backward_function, to_record = self._backward(flat_outputs)\n    record.record_operation(self._inference_function.cached_definition.signature.name, to_record, inference_args + input_tangents, backward_function)",
    "docstring": "Record the function call operation. _DelayedRewriteGradientFunctions supports only first-order backprop tape gradients (and then only when graph building). It does not work with higher-order tape gradients or forward autodiff, but does work with higher-order symbolic gradients (tf.gradients). Args: flat_outputs: The result of running . inference_args: A flat list of Tensors with inference inputs to the operation. input_tangents: A flat list of Tensors with input tangents consumed by the operation.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py",
    "ast_data": "FunctionDef name:record arg:self arg:flat_outputs arg:inference_args arg:input_tangents arguments arg arg arg arg Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, interpreter=None, signature_key=None):\n    if not interpreter:\n        raise ValueError('None interpreter provided.')\n    if not signature_key:\n        raise ValueError('None signature_key provided.')\n    self._interpreter = interpreter\n    self._interpreter_wrapper = interpreter._interpreter\n    self._signature_key = signature_key\n    signature_defs = interpreter._get_full_signature_list()\n    if signature_key not in signature_defs:\n        raise ValueError(f'Invalid signature_key provided: \"{signature_key}\".')\n    self._signature_def = signature_defs[signature_key]\n    self._outputs = self._signature_def['outputs'].items()\n    self._inputs = self._signature_def['inputs']\n    self._subgraph_index = self._interpreter_wrapper.GetSubgraphIndexFromSignature(self._signature_key)",
    "docstring": "Constructor. Args: interpreter: Interpreter object that is already initialized with the requested model. signature_key: SignatureDef key to be used.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\interpreter.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:interpreter arg:signature_key arguments arg arg arg If Raise Call If Raise Call Assign Assign Assign Assign Call If Compare Raise Call Assign Assign Call Assign Assign Call"
  },
  {
    "library": "scipy",
    "name": "newer",
    "source_code": "def newer(source, target):\n    if not os.path.exists(source):\n        raise ValueError(f\"file '{os.path.abspath(source)}' does not exist\")\n    if not os.path.exists(target):\n        return 1\n    mtime1 = os.stat(source)[ST_MTIME]\n    mtime2 = os.stat(target)[ST_MTIME]\n    return mtime1 > mtime2",
    "docstring": "Return true if 'source' exists and is more recently modified than 'target', or if 'source' exists and 'target' doesn't. Return false if both exist and 'target' is the same age or younger than 'source'.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\utils\\makenpz.py",
    "ast_data": "FunctionDef name:newer arg:source arg:target arguments arg arg If Call Raise Call Call If Call Return return:yes Assign Call Assign Call Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "set_windows_build_flags",
    "source_code": "def set_windows_build_flags(environ_cp):\n    write_to_bazelrc('build --copt=/d2ReducedOptimizeHugeFunctions --host_copt=/d2ReducedOptimizeHugeFunctions')\n    if get_var(environ_cp, 'TF_OVERRIDE_EIGEN_STRONG_INLINE', 'Eigen strong inline', True, 'Would you like to override eigen strong inline for some C++ compilation to reduce the compilation time?', 'Eigen strong inline overridden.', 'Not overriding eigen strong inline, some compilations could take more than 20 mins.'):\n        write_to_bazelrc('build --define=override_eigen_strong_inline=true')",
    "docstring": "Set Windows specific build options.",
    "type": "function",
    "file_path": "tensorflow\\configure.py",
    "ast_data": "FunctionDef name:set_windows_build_flags arg:environ_cp arguments arg Call If Call Call"
  },
  {
    "library": "scipy",
    "name": "_expm_multiply_interval_core_1",
    "source_code": "def _expm_multiply_interval_core_1(A, X, h, mu, m_star, s, q, tol):\n    d = q // s\n    input_shape = X.shape[1:]\n    K_shape = (m_star + 1,) + input_shape\n    K = np.empty(K_shape, dtype=X.dtype)\n    for i in range(s):\n        Z = X[i * d]\n        K[0] = Z\n        high_p = 0\n        for k in range(1, d + 1):\n            F = K[0]\n            c1 = _exact_inf_norm(F)\n            for p in range(1, m_star + 1):\n                if p > high_p:\n                    K[p] = h * A.dot(K[p - 1]) / float(p)\n                coeff = float(pow(k, p))\n                F = F + coeff * K[p]\n                inf_norm_K_p_1 = _exact_inf_norm(K[p])\n                c2 = coeff * inf_norm_K_p_1\n                if c1 + c2 <= tol * _exact_inf_norm(F):\n                    break\n                c1 = c2\n            X[k + i * d] = np.exp(k * h * mu) * F\n    return (X, 1)",
    "docstring": "A helper function, for the case q > s and q % s == 0.",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_expm_multiply.py",
    "ast_data": "FunctionDef name:_expm_multiply_interval_core_1 arg:A arg:X arg:h arg:mu arg:m_star arg:s arg:q arg:tol arguments arg arg arg arg arg arg arg arg Assign Assign Assign Assign Call For Call Assign Assign Assign For Call Assign Assign Call For Call If Compare Assign Call Call Assign Call Call Assign Assign Call Assign If Compare Call Assign Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_transform",
    "source_code": "def set_transform(self, t):\n    self._transform = t\n    self._transformSet = True\n    self.pchanged()\n    self.stale = True",
    "docstring": "Set the artist transform. Parameters ---------- t :",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:set_transform arg:self arg:t arguments arg arg Assign Assign Call Assign"
  },
  {
    "library": "numpy",
    "name": "openhook",
    "source_code": "def openhook(filename, mode):\n    if charset_normalizer is not None:\n        encoding = charset_normalizer.from_path(filename).best().encoding\n    else:\n        nbytes = min(32, os.path.getsize(filename))\n        with open(filename, 'rb') as fhandle:\n            raw = fhandle.read(nbytes)\n            if raw.startswith(codecs.BOM_UTF8):\n                encoding = 'UTF-8-SIG'\n            elif raw.startswith((codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE)):\n                encoding = 'UTF-32'\n            elif raw.startswith((codecs.BOM_LE, codecs.BOM_BE)):\n                encoding = 'UTF-16'\n            else:\n                encoding = 'ascii'\n    return open(filename, mode, encoding=encoding)",
    "docstring": "Ensures that filename is opened with correct encoding parameter. This function uses charset_normalizer package, when available, for determining the encoding of the file to be opened. When charset_normalizer is not available, the function detects only UTF encodings, otherwise, ASCII encoding is used as fallback.",
    "type": "function",
    "file_path": "numpy\\numpy\\f2py\\crackfortran.py",
    "ast_data": "FunctionDef name:openhook arg:filename arg:mode arguments arg arg If Compare Assign Call Call Assign Call Call With Call Assign Call If Call Assign If Call Assign If Call Assign Assign Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "wilcoxon",
    "source_code": "@_rename_parameter('mode', 'method')\n@_axis_nan_policy_factory(wilcoxon_result_object, paired=True, n_samples=lambda kwds: 2 if kwds.get('y', None) is not None else 1, result_to_tuple=wilcoxon_result_unpacker, n_outputs=wilcoxon_outputs)\ndef wilcoxon(x, y=None, zero_method='wilcox', correction=False, alternative='two-sided', method='auto', *, axis=0):\n    if method == 'approx':\n        method = 'asymptotic'\n    return _wilcoxon._wilcoxon_nd(x, y, zero_method, correction, alternative, method, axis)",
    "docstring": "Calculate the Wilcoxon signed-rank test. The Wilcoxon signed-rank test tests the null hypothesis that two related paired samples come from the same distribution. In particular, it tests whether the distribution of the differences `ywilcoxonxyxyPermutationMethodalternativealternativemethodstatisticzstatisticPermutationMethod10.1080/01621459.1959.1050152610.2307/300196810.1080/01621459.1967.10500917wilcoxonwilcoxon` and adjusting it as necessary to ensure that theoretically identically values are not numerically distinct. For example: >>> d2 = np.around(x - y, decimals=3) >>> wilcoxon(d2, alternative='greater') WilcoxonResult(statistic=6.0, pvalue=0.5)",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_morestats.py",
    "ast_data": "FunctionDef name:wilcoxon arg:x arg:y arg:zero_method arg:correction arg:alternative arg:method arguments arg arg arg arg arg arg arg If Compare Assign Return return:yes Call Call Call arguments arg Compare Call"
  },
  {
    "library": "django",
    "name": "set_annotation_mask",
    "source_code": "def set_annotation_mask(self, names):\n    if names is None:\n        self.annotation_select_mask = None\n    else:\n        self.annotation_select_mask = set(names)\n        if self.selected:\n            self.selected = {key: value for key, value in self.selected.items() if not isinstance(value, str) or value in self.annotation_select_mask}\n            for name in names:\n                self.selected[name] = name\n    self._annotation_select_cache = None",
    "docstring": "Set the mask of annotations that will be returned by the SELECT.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\query.py",
    "ast_data": "FunctionDef name:set_annotation_mask arg:self arg:names arguments arg arg If Compare Assign Assign Call If Assign Call BoolOp Call Compare For Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "train",
    "source_code": "def train(self, mode: bool=True) -> Self:\n    if not isinstance(mode, bool):\n        raise ValueError('training mode is expected to be boolean')\n    self.training = mode\n    for module in self.children():\n        module.train(mode)\n    return self",
    "docstring": "Set the module in training mode. This has an effect only on certain modules. See the documentation of particular modules for details of their behaviors in training/evaluation mode, i.e., whether they are affected, e.g. :class:, :class:, etc. Args: mode (bool): whether to set training mode (``. Returns: Module: self",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:train arg:self arg:mode arguments arg arg If Call Raise Call Assign For Call Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "help",
    "source_code": "def help(self) -> str:\n    return self.long_desc()",
    "docstring": "An extensive help for the command. It will be shown when using the \"help\" command. It can contain newlines since no post-formatting will be applied to its contents.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\commands\\__init__.py",
    "ast_data": "FunctionDef name:help arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_is_avx2_supported",
    "source_code": "def _is_avx2_supported() -> bool:\n    return torch._C._cpu._is_avx2_supported()",
    "docstring": "Returns a bool indicating if CPU supports AVX2.",
    "type": "function",
    "file_path": "pytorch\\torch\\cpu\\__init__.py",
    "ast_data": "FunctionDef name:_is_avx2_supported arguments Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "__init__",
    "source_code": "def __init__(self, column: pd.Series, allow_copy: bool=True) -> None:\n    if isinstance(column, pd.DataFrame):\n        raise TypeError(f'Expected a Series, got a DataFrame. This likely happened because you called __dataframe__ on a DataFrame which, after converting column names to string, resulted in duplicated names: {column.columns}. Please rename these columns before using the interchange protocol.')\n    if not isinstance(column, pd.Series):\n        raise NotImplementedError(f'Columns of type {type(column)} not handled yet')\n    self._col = column\n    self._allow_copy = allow_copy",
    "docstring": "Note: doesn't deal with extension arrays yet, just assume a regular Series/ndarray for now.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\interchange\\column.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:column arg:allow_copy arguments arg arg arg If Call Raise Call If Call Raise Call Call Assign Assign"
  },
  {
    "library": "scikit-learn",
    "name": "_staged_raw_predict",
    "source_code": "def _staged_raw_predict(self, X):\n    check_is_fitted(self)\n    X = self._preprocess_X(X, reset=False)\n    if X.shape[1] != self._n_features:\n        raise ValueError('X has {} features but this estimator was trained with {} features.'.format(X.shape[1], self._n_features))\n    n_samples = X.shape[0]\n    raw_predictions = np.zeros(shape=(n_samples, self.n_trees_per_iteration_), dtype=self._baseline_prediction.dtype, order='F')\n    raw_predictions += self._baseline_prediction\n    n_threads = _openmp_effective_n_threads()\n    for iteration in range(len(self._predictors)):\n        self._predict_iterations(X, self._predictors[iteration:iteration + 1], raw_predictions, is_binned=False, n_threads=n_threads)\n        yield raw_predictions.copy()",
    "docstring": "Compute raw predictions of `classes_`.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\gradient_boosting.py",
    "ast_data": "FunctionDef name:_staged_raw_predict arg:self arg:X arguments arg arg Call Assign Call If Compare Raise Call Call Assign Assign Call Assign Call For Call Call Call Call"
  },
  {
    "library": "django",
    "name": "DeletionMixin",
    "source_code": "class DeletionMixin:\n    success_url = None\n\n    def delete(self, request, *args, **kwargs):\n        self.object = self.get_object()\n        success_url = self.get_success_url()\n        self.object.delete()\n        return HttpResponseRedirect(success_url)\n\n    def post(self, request, *args, **kwargs):\n        return self.delete(request, *args, **kwargs)\n\n    def get_success_url(self):\n        if self.success_url:\n            return self.success_url.format(**self.object.__dict__)\n        else:\n            raise ImproperlyConfigured('No URL to redirect to. Provide a success_url.')",
    "docstring": "Provide the ability to delete objects.",
    "type": "class",
    "file_path": "django\\django\\views\\generic\\edit.py",
    "ast_data": "ClassDef name:DeletionMixin Assign FunctionDef name:delete arg:self arg:request arguments arg arg arg arg Assign Call Assign Call Call Return return:yes Call FunctionDef name:post arg:self arg:request arguments arg arg arg arg Return return:yes Call FunctionDef name:get_success_url arg:self arguments arg If Return return:yes Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "Modality",
    "source_code": "class Modality(enum.Enum):\n    CORE = 'CORE'\n    DATA = 'DATA'",
    "docstring": "Modality/semantic used for treating nested structures. - Modality.CORE follows tensorflow_core/tf.nest semantics. The following collection types are recognized by as nested structures: * (except and ). This includes , , and . * (with sortable keys). This includes and . * (with sortable keys). * [ classes]( Any other values are considered **atoms**. Not all collection types are considered nested structures. For example, the following types are considered atoms: * ; is an atom, while is a nested structure. * [ classes]( * * - Modality.DATA follows tf.data's nest semantics. This modality makes two changes: 1. It removes support for lists as a level of nesting in nested structures. 2. It adds support for as an atomic element. The motivation for this change is twofold: 1. It seems more natural for lists to be treated (e.g. in Dataset constructors) as tensors, rather than lists of (lists of...) tensors. 2. This is needed because is implemented as a that would normally be flattened and we want to be able to create sparse tensor from `SparseTensorValue's similarly to creating tensors from numpy arrays.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\util\\nest_util.py",
    "ast_data": "ClassDef name:Modality Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_fix_linecache_record",
    "source_code": "def _fix_linecache_record(obj):\n    if hasattr(obj, '__module__'):\n        obj_file = inspect.getfile(obj)\n        obj_module = obj.__module__\n        loaded_modules = tuple(sys.modules.values())\n        for m in loaded_modules:\n            if hasattr(m, '__file__') and m.__file__ == obj_file:\n                if obj_module is not m:\n                    linecache.updatecache(obj_file, m.__dict__)",
    "docstring": "Fixes potential corruption of linecache in the presence of functools.wraps. functools.wraps modifies the target object's __module__ field, which seems to confuse linecache in special instances, for example when the source is loaded from a .par file (see This function simply triggers a call to linecache.updatecache when a mismatch was detected between the object's __module__ property and the object's source file. Args: obj: Any",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\inspect_utils.py",
    "ast_data": "FunctionDef name:_fix_linecache_record arg:obj arguments arg If Call Assign Call Assign Assign Call Call For If BoolOp Call Compare If Compare Call"
  },
  {
    "library": "sphinx",
    "name": "fix_ids",
    "source_code": "def fix_ids(self, tree: nodes.document) -> None:\n\n    def update_node_id(node: Element) -> None:\n        new_ids: list[str] = []\n        for node_id in node['ids']:\n            new_id = self.fix_fragment('', node_id)\n            if new_id not in new_ids:\n                new_ids.append(new_id)\n        node['ids'] = new_ids\n    for reference in tree.findall(nodes.reference):\n        if 'refuri' in reference:\n            m = self.refuri_re.match(reference['refuri'])\n            if m:\n                reference['refuri'] = self.fix_fragment(m.group(1), m.group(2))\n        if 'refid' in reference:\n            reference['refid'] = self.fix_fragment('', reference['refid'])\n    for target in tree.findall(nodes.target):\n        update_node_id(target)\n        next_node: Node = target.next_node(ascend=True)\n        if isinstance(next_node, nodes.Element):\n            update_node_id(next_node)\n    for desc_signature in tree.findall(addnodes.desc_signature):\n        update_node_id(desc_signature)",
    "docstring": "Replace colons with hyphens in href and id attributes. Some readers crash because they interpret the part as a transport protocol specification.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\_epub_base.py",
    "ast_data": "FunctionDef name:fix_ids arg:self arg:tree arguments arg arg FunctionDef name:update_node_id arg:node arguments arg For Assign Call If Compare Call Assign For Call If Compare Assign Call If Assign Call Call Call If Compare Assign Call For Call Call Call If Call Call For Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_check_num_inputs_outputs",
    "source_code": "def _check_num_inputs_outputs(cond_graph, body_graph, num_flattened_loop_vars):\n    assert len(cond_graph.inputs) == num_flattened_loop_vars, 'cond_graph takes %d inputs; Expected: %d' % (len(cond_graph.inputs), num_flattened_loop_vars)\n    assert len(cond_graph.outputs) == 1, 'cond_graph has %d outputs; Expected: 1' % len(cond_graph.outputs)\n    assert len(body_graph.inputs) == num_flattened_loop_vars, 'body_graph takes %d inputs; Expected: %d' % (len(body_graph.inputs), num_flattened_loop_vars)\n    assert len(body_graph.outputs) == num_flattened_loop_vars, 'body_graph has %d outputs; Expected: %d' % (len(body_graph.outputs), num_flattened_loop_vars)",
    "docstring": "Checks the number of inputs/outputs of and .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\while_v2.py",
    "ast_data": "FunctionDef name:_check_num_inputs_outputs arg:cond_graph arg:body_graph arg:num_flattened_loop_vars arguments arg arg arg Compare Call Call Compare Call Call Compare Call Call Compare Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_logdir",
    "source_code": "def get_logdir(self):\n    return self.event_writer.get_logdir()",
    "docstring": "Returns the directory where event file will be written.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\writer.py",
    "ast_data": "FunctionDef name:get_logdir arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_make_unique_kwarg_list",
    "source_code": "def _make_unique_kwarg_list(seq: Sequence[tuple[Any, Any]]) -> Sequence[tuple[Any, Any]]:\n    return [(pair[0], f'{pair[1]}_{seq[:i].count(pair)}') if seq.count(pair) > 1 else pair for i, pair in enumerate(seq)]",
    "docstring": "Uniquify aggfunc name of the pairs in the order list Examples: -------- >>> kwarg_list = [(\"a\", \"\"), (\"a\", \"\"), (\"b\", \"\")] >>> _make_unique_kwarg_list(kwarg_list) [('a', '_0'), ('a', '_1'), ('b', '')]",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\apply.py",
    "ast_data": "FunctionDef name:_make_unique_kwarg_list arg:seq arguments arg Return return:yes Compare Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_recursive",
    "source_code": "def _get_recursive(self, key):\n    value = self.get(key)\n    if value is not None:\n        return value\n    if isinstance(key, func_graph.FuncGraph):\n        return self._get_recursive(self._get_parent_graph(key))\n    return None",
    "docstring": "Gets the value at key or the closest parent graph.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:_get_recursive arg:self arg:key arguments arg arg Assign Call If Compare Return return:yes If Call Return return:yes Call Call Return return:no"
  },
  {
    "library": "pytorch",
    "name": "_i_will_not_complain_if_bc_breaks_VariableTracker",
    "source_code": "def _i_will_not_complain_if_bc_breaks_VariableTracker(self):\n    return self.__variable",
    "docstring": "Returns the internal data structure VariableTracker that Dynamo uses to represent variables at compile time. There are no BC guarantees on this API and WE RESERVE THE RIGHT TO BREAK YOUR CODE if you rely on it.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\comptime.py",
    "ast_data": "FunctionDef name:_i_will_not_complain_if_bc_breaks_VariableTracker arg:self arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "ComplexFloatingFormat",
    "source_code": "class ComplexFloatingFormat:\n\n    def __init__(self, x, precision, floatmode, suppress_small, sign=False, *, legacy=None):\n        if isinstance(sign, bool):\n            sign = '+' if sign else '-'\n        floatmode_real = floatmode_imag = floatmode\n        if legacy <= 113:\n            floatmode_real = 'maxprec_equal'\n            floatmode_imag = 'maxprec'\n        self.real_format = FloatingFormat(x.real, precision, floatmode_real, suppress_small, sign=sign, legacy=legacy)\n        self.imag_format = FloatingFormat(x.imag, precision, floatmode_imag, suppress_small, sign='+', legacy=legacy)\n\n    def __call__(self, x):\n        r = self.real_format(x.real)\n        i = self.imag_format(x.imag)\n        sp = len(i.rstrip())\n        i = i[:sp] + 'j' + i[sp:]\n        return r + i",
    "docstring": "Formatter for subtypes of np.complexfloating",
    "type": "class",
    "file_path": "numpy\\numpy\\_core\\arrayprint.py",
    "ast_data": "ClassDef name:ComplexFloatingFormat FunctionDef name:__init__ arg:self arg:x arg:precision arg:floatmode arg:suppress_small arg:sign arguments arg arg arg arg arg arg arg If Call Assign Assign If Compare Assign Assign Assign Call Assign Call FunctionDef name:__call__ arg:self arg:x arguments arg arg Assign Call Assign Call Assign Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_load_actions",
    "source_code": "def _load_actions(self, actions: dict[int, list[Optional[_Action]]], format: str='compute_only'):\n    super()._validate_and_set_stage_mapping(actions)\n    self.pipeline_order_with_comms: dict[int, list[_Action]] = {}\n    if format == 'compute_comms':\n        for rank in actions:\n            self.pipeline_order_with_comms[rank] = []\n            for action in actions[rank]:\n                assert action is not None\n                self.pipeline_order_with_comms[rank].append(action)\n    elif format == 'compute_only':\n        for rank in actions:\n            self.pipeline_order_with_comms[rank] = _add_unshard_reshard(actions[rank])\n        self.pipeline_order_with_comms = _add_send_recv(self.pipeline_order_with_comms, stage_to_rank=lambda s: self.stage_index_to_group_rank[s], num_stages=self._num_stages)\n    else:\n        raise NotImplementedError(f'format={format!r} is not implemented')",
    "docstring": "Given an in-memory representation for a simple compute-only schedule, lower it to a complex schedule including communication actions. Stores the schedule in self, and must be called before running step_mo()",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\schedules.py",
    "ast_data": "FunctionDef name:_load_actions arg:self arg:actions arg:format arguments arg arg arg Call Call If Compare For Assign For Compare Call If Compare For Assign Call Assign Call arguments arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "device_count",
    "source_code": "def device_count() -> int:\n    return 1",
    "docstring": "Returns number of CPU devices (not cores). Always 1. N.B. This function only exists to facilitate device-agnostic code",
    "type": "function",
    "file_path": "pytorch\\torch\\cpu\\__init__.py",
    "ast_data": "FunctionDef name:device_count arguments Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "interface_script",
    "source_code": "def interface_script(mod_interface, nn_module):\n    if isinstance(nn_module, torch.jit.ScriptModule):\n        return nn_module\n    check_module_initialized(nn_module)\n\n    def infer_interface_methods_to_compile(nn_module):\n        stubs = [make_stub_from_method(nn_module, method) for method in mod_interface.getMethodNames()]\n        return stubs\n    return create_script_module(nn_module, infer_interface_methods_to_compile)",
    "docstring": "Make a ScriptModule from an nn.Module, using the interface methods rule for determining which methods to compile. Args: mod_interface: the interface type that the module have nn_module: The original Python nn.Module that we are creating a ScriptModule for.",
    "type": "function",
    "file_path": "pytorch\\torch\\jit\\_recursive.py",
    "ast_data": "FunctionDef name:interface_script arg:mod_interface arg:nn_module arguments arg arg If Call Return return:yes Call FunctionDef name:infer_interface_methods_to_compile arg:nn_module arguments arg Assign Call Call Return return:yes Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_auto_draw_if_interactive",
    "source_code": "def _auto_draw_if_interactive(fig, val):\n    if val and matplotlib.is_interactive() and (not fig.canvas.is_saving()) and (not fig.canvas._is_idle_drawing):\n        with fig.canvas._idle_draw_cntx():\n            fig.canvas.draw_idle()",
    "docstring": "An internal helper function for making sure that auto-redrawing works as intended in the plain python repl. Parameters ---------- fig : Figure A figure object which is assumed to be associated with a canvas",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:_auto_draw_if_interactive arg:fig arg:val arguments arg arg If BoolOp Call Call With Call Call"
  },
  {
    "library": "authlib",
    "name": "authorize_redirect",
    "source_code": "def authorize_redirect(self, request, redirect_uri=None, **kwargs):\n    rv = self.create_authorization_url(redirect_uri, **kwargs)\n    self.save_authorize_data(request, redirect_uri=redirect_uri, **rv)\n    return HttpResponseRedirect(rv['url'])",
    "docstring": "Create a HTTP Redirect for Authorization Endpoint. :param request: HTTP request instance from Django view. :param redirect_uri: Callback or redirect URI for authorization. :param kwargs: Extra parameters to include. :return: A HTTP redirect response.",
    "type": "method",
    "file_path": "authlib\\authlib\\integrations\\django_client\\apps.py",
    "ast_data": "FunctionDef name:authorize_redirect arg:self arg:request arg:redirect_uri arguments arg arg arg arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "UnBatcherIterDataPipe",
    "source_code": "@functional_datapipe('unbatch')\nclass UnBatcherIterDataPipe(IterDataPipe):\n\n    def __init__(self, datapipe: IterDataPipe, unbatch_level: int=1):\n        self.datapipe = datapipe\n        self.unbatch_level = unbatch_level\n\n    def __iter__(self):\n        for element in self.datapipe:\n            yield from self._dive(element, unbatch_level=self.unbatch_level)\n\n    def _dive(self, element, unbatch_level):\n        if unbatch_level < -1:\n            raise ValueError('unbatch_level must be -1 or >= 0')\n        if unbatch_level == -1:\n            if isinstance(element, (list, DataChunk)):\n                for item in element:\n                    yield from self._dive(item, unbatch_level=-1)\n            else:\n                yield element\n        elif unbatch_level == 0:\n            yield element\n        elif isinstance(element, (list, DataChunk)):\n            for item in element:\n                yield from self._dive(item, unbatch_level=unbatch_level - 1)\n        else:\n            raise IndexError(f'unbatch_level {self.unbatch_level} exceeds the depth of the DataPipe')",
    "docstring": "Undos batching of data (functional name: `` will flatten the entire DataPipe. Example: >>> # xdoctest: +SKIP >>> from torchdata.datapipes.iter import IterableWrapper >>> source_dp = IterableWrapper([[[0, 1], [2]], [[3, 4], [5]], [[6]]]) >>> dp1 = source_dp.unbatch() >>> list(dp1) [[0, 1], [2], [3, 4], [5], [6]] >>> dp2 = source_dp.unbatch(unbatch_level=2) >>> list(dp2) [0, 1, 2, 3, 4, 5, 6]",
    "type": "class",
    "file_path": "pytorch\\torch\\utils\\data\\datapipes\\iter\\grouping.py",
    "ast_data": "ClassDef name:UnBatcherIterDataPipe FunctionDef name:__init__ arg:self arg:datapipe arg:unbatch_level arguments arg arg arg Assign Assign FunctionDef name:__iter__ arg:self arguments arg For Call FunctionDef name:_dive arg:self arg:element arg:unbatch_level arguments arg arg arg If Compare Raise Call If Compare If Call For Call If Compare If Call For Call Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_infer_device_name",
    "source_code": "def _infer_device_name(graph_def):\n    device_name = None\n    for node in graph_def.node:\n        if node.device:\n            device_name = node.device\n            break\n    if device_name is None:\n        logging.warn('Failed to infer device name from partition GraphDef: none of the nodes of the GraphDef has a non-empty device name.')\n    return device_name",
    "docstring": "Infer device name from a partition GraphDef.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_graphs.py",
    "ast_data": "FunctionDef name:_infer_device_name arg:graph_def arguments arg Assign For If Assign If Compare Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_from_join_target",
    "source_code": "def _from_join_target(self, result: np.ndarray) -> ArrayLike:\n    if isinstance(self.values, BaseMaskedArray):\n        return type(self.values)(result, np.zeros(result.shape, dtype=np.bool_))\n    elif isinstance(self.values, (ArrowExtensionArray, StringArray)):\n        return type(self.values)._from_sequence(result, dtype=self.dtype)\n    return result",
    "docstring": "Cast the ndarray returned from one of the libjoin.foo_indexer functions back to type(self._data).",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_from_join_target arg:self arg:result arguments arg arg If Call Return return:yes Call Call Call If Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "_check_min_num",
    "source_code": "def _check_min_num(self, obj):\n    if obj.min_num is None:\n        return []\n    elif not isinstance(obj.min_num, int):\n        return must_be('an integer', option='min_num', obj=obj, id='admin.E205')\n    else:\n        return []",
    "docstring": "Check that min_num is an integer.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\checks.py",
    "ast_data": "FunctionDef name:_check_min_num arg:self arg:obj arguments arg arg If Compare Return return:no If Call Return return:yes Call Return return:no"
  },
  {
    "library": "django",
    "name": "is_installed",
    "source_code": "def is_installed(self, app_name):\n    self.check_apps_ready()\n    return any((ac.name == app_name for ac in self.app_configs.values()))",
    "docstring": "Check whether an application with this name exists in the registry. app_name is the full name of the app e.g. 'django.contrib.admin'.",
    "type": "method",
    "file_path": "django\\django\\apps\\registry.py",
    "ast_data": "FunctionDef name:is_installed arg:self arg:app_name arguments arg arg Call Return return:yes Call Compare Call"
  },
  {
    "library": "sphinx",
    "name": "tabular_col_spec",
    "source_code": "class tabular_col_spec(nodes.Element):\n    pass",
    "docstring": "Node for specifying tabular columns, used for LaTeX output.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\addnodes.py",
    "ast_data": "ClassDef name:tabular_col_spec"
  },
  {
    "library": "django",
    "name": "proj_version",
    "source_code": "def proj_version(self):\n    return self._get_spatialite_func('proj4_version()')",
    "docstring": "Return the version of the PROJ library used by SpatiaLite.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\spatialite\\operations.py",
    "ast_data": "FunctionDef name:proj_version arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "description",
    "source_code": "@property\ndef description(self):\n    return force_str(capi.get_band_description(self._ptr))",
    "docstring": "Return the description string of the band.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\raster\\band.py",
    "ast_data": "FunctionDef name:description arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "copy_assets_to_destination_dir",
    "source_code": "def copy_assets_to_destination_dir(asset_filename_map, destination_dir, saved_files=None):\n    if saved_files is None:\n        saved_files = set()\n    assets_destination_dir = path_helpers.get_or_create_assets_dir(destination_dir)\n    for asset_basename, asset_source_filepath in asset_filename_map.items():\n        asset_destination_filepath = file_io.join(compat.as_bytes(assets_destination_dir), compat.as_bytes(asset_basename))\n        if file_io.file_exists(asset_source_filepath) and asset_source_filepath != asset_destination_filepath and (asset_destination_filepath not in saved_files):\n            file_io.copy(asset_source_filepath, asset_destination_filepath, overwrite=True)\n            saved_files.add(asset_destination_filepath)\n    tf_logging.info('Assets written to: %s', compat.as_text(assets_destination_dir))",
    "docstring": "Copy all assets from source path to destination path. Args: asset_filename_map: a dict of filenames used for saving the asset in the SavedModel to full paths from which the filenames were derived. destination_dir: the destination directory that assets are stored in. saved_files: a set of destination filepaths that have already been copied and will be skipped",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\builder_impl.py",
    "ast_data": "FunctionDef name:copy_assets_to_destination_dir arg:asset_filename_map arg:destination_dir arg:saved_files arguments arg arg arg If Compare Assign Call Assign Call For Call Assign Call Call Call If BoolOp Call Compare Compare Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_minmax",
    "source_code": "def _minmax(func: Callable, values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool=True, axis: AxisInt | None=None):\n    if not skipna:\n        if mask.any() or not values.size:\n            return libmissing.NA\n        else:\n            return func(values, axis=axis)\n    else:\n        subset = values[~mask]\n        if subset.size:\n            return func(subset, axis=axis)\n        else:\n            return libmissing.NA",
    "docstring": "Reduction for 1D masked array. Parameters ---------- func : np.min or np.max values : np.ndarray Numpy array with the values (can be of any dtype that support the operation). mask : np.ndarray[bool] Boolean numpy array (True values indicate missing values). skipna : bool, default True Whether to skip NA. axis : int, optional, default None",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\array_algos\\masked_reductions.py",
    "ast_data": "FunctionDef name:_minmax arg:func arg:values arg:mask arguments arg arg arg arg arg If If BoolOp Call Return return:yes Return return:yes Call Assign If Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "check_export_model_diff",
    "source_code": "def check_export_model_diff(model: torch.nn.Module | torch.jit.ScriptModule, test_input_groups: Sequence[tuple[tuple[Any, ...], Mapping[str, Any]]], export_options: _experimental.ExportOptions | None=None) -> str:\n    export_options = _experimental.ExportOptions() if export_options is None else export_options\n    jit_diff_report = _check_graph_diff(model, test_input_groups, export_options, _traced_graph_from_model)\n    if jit_diff_report:\n        return jit_diff_report\n    return _check_graph_diff(model, test_input_groups, export_options, _onnx_graph_from_model)",
    "docstring": "Verify exported model discrepancy between different groups of inputs. A graph is exported for each group of inputs. The exported graphs are then compared to each other, and discrepancies of first pair of nodes are reported. This function first checks the jit graph. If no discrepancies were found, it then checks the onnx graph. Unless otherwise specified, the jit/ONNX graph is expected to be the same, regardless of the inputs used for exporting. A discrepancy implies the graph exported is not accurate when run on other groups of inputs, which will typically results in runtime errors or mismatching output. Args: model (torch.nn.Module or torch.jit.ScriptModule): The model to be exported. test_input_groups (Sequence[Tuple[Tuple[Any, ...], Mapping[str, Any]]]): A sequence of input groups to be used to export the model. Each input group is a pair of (args, kwargs). export_options (_experimental.ExportOptions, optional): An _experimental.ExportOptions object that controls the export behavior. Returns: str: A string containing the diff of the exported models.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\verification.py",
    "ast_data": "FunctionDef name:check_export_model_diff arg:model arg:test_input_groups arg:export_options arguments arg arg arg Assign Compare Call Assign Call If Return return:yes Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "parse_cachecontrol",
    "source_code": "def parse_cachecontrol(header: bytes) -> dict[bytes, bytes | None]:\n    directives = {}\n    for directive in header.split(b','):\n        key, sep, val = directive.strip().partition(b'=')\n        if key:\n            directives[key.lower()] = val if sep else None\n    return directives",
    "docstring": "Parse Cache-Control header >>> parse_cachecontrol(b'public, max-age=3600') == {b'public': None, ... b'max-age': b'3600'} True >>> parse_cachecontrol(b'') == {} True",
    "type": "function",
    "file_path": "scrapy\\scrapy\\extensions\\httpcache.py",
    "ast_data": "FunctionDef name:parse_cachecontrol arg:header arguments arg Assign For Call Assign Call Call If Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "build_kwargs_from_config",
    "source_code": "def build_kwargs_from_config(config: dict[str, Any], target_func: Any) -> dict[str, Any]:\n    valid_keys = list(signature(target_func).parameters)\n    kwargs = {}\n    for key, value in config.items():\n        if key in valid_keys:\n            kwargs[key] = value\n    return kwargs",
    "docstring": "Return kwargs from config object.",
    "type": "function",
    "file_path": "kornia\\kornia\\contrib\\models\\efficient_vit\\utils\\network.py",
    "ast_data": "FunctionDef name:build_kwargs_from_config arg:config arg:target_func arguments arg arg Assign Call Call Assign For Call If Compare Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_nodes",
    "source_code": "def _get_nodes(nodes: list[Node]) -> tuple[Node, Node, Optional[Node]]:\n    conv_node, bn_node, getitem_node = (None, None, None)\n    for n in nodes:\n        if n.op != 'call_function':\n            continue\n        if _is_conv_or_conv_transpose_node(n):\n            assert conv_node is None\n            conv_node = n\n        if _is_bn_node(n):\n            assert bn_node is None\n            bn_node = n\n        if n.target == operator.getitem:\n            assert getitem_node is None\n            getitem_node = n\n    assert conv_node is not None\n    assert bn_node is not None\n    return (conv_node, bn_node, getitem_node)",
    "docstring": "Return a 3-tuple of (conv_node, bn_node, getitem_node). This asserts that the match contains exactly one of each node.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\qat_utils.py",
    "ast_data": "FunctionDef name:_get_nodes arg:nodes arguments arg Assign For If Compare If Call Compare Assign If Call Compare Assign If Compare Compare Assign Compare Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "field_names",
    "source_code": "def field_names(self):\n    return tuple(self._fields.keys())",
    "docstring": "Returns the string field names for this .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor.py",
    "ast_data": "FunctionDef name:field_names arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_BenchmarkRegistrar",
    "source_code": "class _BenchmarkRegistrar(type):\n\n    def __new__(mcs, clsname, base, attrs):\n        newclass = type.__new__(mcs, clsname, base, attrs)\n        if not newclass.is_abstract():\n            GLOBAL_BENCHMARK_REGISTRY.add(newclass)\n        return newclass",
    "docstring": "The Benchmark class registrar. Used by abstract Benchmark class.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\platform\\benchmark.py",
    "ast_data": "ClassDef name:_BenchmarkRegistrar FunctionDef name:__new__ arg:mcs arg:clsname arg:base arg:attrs arguments arg arg arg arg Assign Call If Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_axisline_style",
    "source_code": "def get_axisline_style(self):\n    return self._axisline_style",
    "docstring": "Return the current axisline style.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axis_artist.py",
    "ast_data": "FunctionDef name:get_axisline_style arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "merge",
    "source_code": "def merge(self, x=None, y=None, ildj_map=None, kwargs=None, mapping=None):\n    if mapping is None:\n        mapping = _Mapping(x=x, y=y, ildj_map=ildj_map, kwargs=kwargs)\n    elif any((arg is not None for arg in [x, y, ildj_map, kwargs])):\n        raise ValueError('Cannot simultaneously specify mapping and individual arguments.')\n    return _Mapping(x=self._merge(self.x, mapping.x), y=self._merge(self.y, mapping.y), ildj_map=self._merge_dicts(self.ildj_map, mapping.ildj_map), kwargs=self._merge(self.kwargs, mapping.kwargs))",
    "docstring": "Returns new _Mapping with args merged with self. Args: x: . Forward. y: . Inverse. ildj_map: . This is a mapping from event_ndims to a representing the inverse log det jacobian. kwargs: Python dictionary. Extra args supplied to forward/inverse/etc functions. mapping: Instance of _Mapping to merge. Can only be specified if no other arg is specified. Returns: mapping: New instance of which has inputs merged with self. Raises: ValueError: if mapping and any other arg is not .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bijector_impl.py",
    "ast_data": "FunctionDef name:merge arg:self arg:x arg:y arg:ildj_map arg:kwargs arg:mapping arguments arg arg arg arg arg arg If Compare Assign Call If Call Compare Raise Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "key",
    "source_code": "@property\ndef key(self):\n    alg = self.algorithm\n    if alg in (a.value for a in random_ops_util.Algorithm):\n        return self._state_var[-1]\n    else:\n        raise ValueError(stateless_random_ops.unsupported_alg_error_msg(alg))",
    "docstring": "The 'key' part of the state of a counter-based RNG. For a counter-base RNG algorithm such as Philox and ThreeFry (as described in paper 'Parallel Random Numbers: As Easy as 1, 2, 3' [ the RNG state consists of two parts: counter and key. The output is generated via the formula: output=hash(key, counter), i.e. a hashing of the counter parametrized by the key. Two RNGs with two different keys can be thought as generating two independent random-number streams (a stream is formed by increasing the counter). Returns: A scalar which is the 'key' part of the state, if the RNG algorithm is counter-based; otherwise it raises a ValueError.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\stateful_random_ops.py",
    "ast_data": "FunctionDef name:key arg:self arguments arg Assign If Compare Return return:yes Raise Call Call"
  },
  {
    "library": "kornia",
    "name": "restore_from_patches",
    "source_code": "def restore_from_patches(self, patches: Tensor, grid_size: Tuple[int, int]=(4, 4), pad: Optional[Tuple[int, int, int, int]]=None) -> Tensor:\n    if grid_size is None:\n        grid_size = self.grid_size\n    patches_tensor = patches.view(-1, grid_size[0], grid_size[1], *patches.shape[-3:])\n    restored_tensor = concatenate(torch.chunk(patches_tensor, grid_size[0], 1), -2).squeeze(1)\n    restored_tensor = concatenate(torch.chunk(restored_tensor, grid_size[1], 1), -1).squeeze(1)\n    if pad is not None:\n        restored_tensor = fpad(restored_tensor, [-i for i in pad])\n    return restored_tensor",
    "docstring": "Restore input from patches. Example: >>> import kornia.augmentation as K >>> pas = PatchSequential(K.ColorJiggle(0.1, 0.1, 0.1, 0.1, p=1.0), patchwise_apply=False) >>> out = pas.extract_patches(torch.arange(16).view(1, 1, 4, 4), grid_size=(2, 2)) >>> pas.restore_from_patches(out, grid_size=(2, 2)) tensor([[[[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11], [12, 13, 14, 15]]]])",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\container\\patch.py",
    "ast_data": "FunctionDef name:restore_from_patches arg:self arg:patches arg:grid_size arg:pad arguments arg arg arg arg If Compare Assign Assign Call Assign Call Call Call Assign Call Call Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_fix_offset",
    "source_code": "def _fix_offset(str: str, offset: int) -> int:\n    as_utf8 = str.encode('utf-8')\n    return len(as_utf8[:offset].decode('utf-8', errors='replace'))",
    "docstring": "Convert byte offset of into character offset. Byte offset is used for 3.11+ instruction column data. Takes things like unicode characters into consideration. Unchanged from CPython implementation.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\utils.py",
    "ast_data": "FunctionDef name:_fix_offset arg:str arg:offset arguments arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_DawsnGrad",
    "source_code": "@ops.RegisterGradient('Dawsn')\ndef _DawsnGrad(op: ops.Operation, grad):\n    x = op.inputs[0]\n    y = op.outputs[0]\n    with ops.control_dependencies([grad]):\n        return grad * (1.0 - 2 * x * y)",
    "docstring": "Compute gradient of dawsn(x) with respect to its argument.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_DawsnGrad arg:op arg:grad arguments arg arg Assign Assign With Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "next_rendezvous",
    "source_code": "@abstractmethod\ndef next_rendezvous(self) -> RendezvousInfo:\n    pass",
    "docstring": "Main entry-point into the rendezvous barrier. Blocks until the rendezvous is complete and the current process is included in the formed worker group, or a timeout occurs, or the rendezvous was marked closed. Returns: Instance of :py:class:. Raises: RendezvousClosedError: The rendezvous is closed. RendezvousConnectionError: The connection to the rendezvous backend has failed. RendezvousStateError: The rendezvous state is corrupt. RendezvousTimeoutError: The rendezvous did not complete on time.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\api.py",
    "ast_data": "FunctionDef name:next_rendezvous arg:self arguments arg"
  },
  {
    "library": "kornia",
    "name": "save",
    "source_code": "def save(self, images: Union[Tensor, List[Tensor]], edge_maps: Optional[Union[Tensor, List[Tensor]]]=None, directory: Optional[str]=None, output_type: str='torch') -> None:\n    outputs = self.visualize(images, edge_maps, output_type)\n    self._save_outputs(images, directory, suffix='_src')\n    self._save_outputs(outputs, directory, suffix='_sr')",
    "docstring": "Save the super resolution results. Args: images: input tensor. edge_maps: detected edges. output_type: type of the output. directory: where to save outputs. output_type: backend used to generate outputs. Returns: output tensor.",
    "type": "method",
    "file_path": "kornia\\kornia\\models\\super_resolution\\base.py",
    "ast_data": "FunctionDef name:save arg:self arg:images arg:edge_maps arg:directory arg:output_type arguments arg arg arg arg arg Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_zeros_diag",
    "source_code": "def _zeros_diag(self):\n    if self.shape.is_fully_defined():\n        d_shape = self.batch_shape.concatenate([self._min_matrix_dim()])\n    else:\n        d_shape = array_ops.concat([self.batch_shape_tensor(), [self._min_matrix_dim_tensor()]], axis=0)\n    return array_ops.zeros(shape=d_shape, dtype=self.dtype)",
    "docstring": "Returns the diagonal of this operator as all zeros.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_zeros.py",
    "ast_data": "FunctionDef name:_zeros_diag arg:self arguments arg If Call Assign Call Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, serialized=None, mesh_shape=None, device_coordinates=None):\n    self._serialized = serialized\n    if serialized:\n        self._parse_topology(serialized)\n    else:\n        self._mesh_shape = numpy_compat.np_asarray(mesh_shape, dtype=np.int32)\n        self._device_coordinates = numpy_compat.np_asarray(device_coordinates, dtype=np.int32)\n        if len(self._mesh_shape) != 4 or any(self._mesh_shape < 1):\n            raise ValueError(f'`mesh_shape` must be a sequence of 4 positive entries; got `mesh_shape={self._mesh_shape}`')\n        if len(self._device_coordinates.shape) != 3 or self._device_coordinates.shape[2] != len(self._mesh_shape):\n            raise ValueError('`device_coordinates` must be a rank 3 int32 array with minor dimension equal to the `mesh_shape` rankgot device_coordinates.shape={} len(device_coordinates.shape)={} device_coordinates.shape[2]={} mesh_shape={}, len(mesh_shape)={}'.format(self._device_coordinates.shape, len(self._device_coordinates.shape), self._device_coordinates.shape[2], self._mesh_shape, len(self._mesh_shape)))\n    self._topology_tasks, self._topology_devices = self._invert_topology()\n    self._missing_devices = np.argwhere(self._topology_tasks < 0)",
    "docstring": "Builds a Topology object. If is not , the topology is parsed from and the other arguments are ignored. Otherwise, the topology is computed from and . Args: serialized: A serialized , or . If not , the serialized proto is parsed to discover the topology. mesh_shape: A sequence of 4 positive integers, or . If not , the shape of the TPU topology, in number of cores. Ignored if is not . device_coordinates: A rank 3 numpy array that describes the mapping from TensorFlow TPU devices to TPU fabric coordinates, or . If specified, array is a rank 3 int32 array with shape . is the number of tasks in the TPU cluster, is the number of TPU devices per task, and is the number of axes in the TPU cluster topology. Each entry gives the -th coordinate in the topology of a task/device pair. TPU topologies are 4-dimensional, with dimensions . This arg is ignored if NoneserializedserializedNonemesh_shapeserializedNonedevice_coordinates` is not a rank 3 numpy int32 array that describes a valid coordinate mapping.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\topology.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:serialized arg:mesh_shape arg:device_coordinates arguments arg arg arg arg Assign If Call Assign Call Assign Call If BoolOp Compare Call Call Compare Raise Call If BoolOp Compare Call Compare Call Raise Call Call Call Call Assign Call Assign Call Compare"
  },
  {
    "library": "scipy",
    "name": "_nolan_round_x_near_zeta",
    "source_code": "def _nolan_round_x_near_zeta(x0, alpha, zeta, x_tol_near_zeta):\n    if np.abs(x0 - zeta) < x_tol_near_zeta * alpha ** (1 / alpha):\n        x0 = zeta\n    return x0",
    "docstring": "Round x close to zeta for Nolan's method in [NO].",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_levy_stable\\__init__.py",
    "ast_data": "FunctionDef name:_nolan_round_x_near_zeta arg:x0 arg:alpha arg:zeta arg:x_tol_near_zeta arguments arg arg arg arg If Compare Call Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "polysub",
    "source_code": "@array_function_dispatch(_binary_op_dispatcher)\ndef polysub(a1, a2):\n    truepoly = isinstance(a1, poly1d) or isinstance(a2, poly1d)\n    a1 = atleast_1d(a1)\n    a2 = atleast_1d(a2)\n    diff = len(a2) - len(a1)\n    if diff == 0:\n        val = a1 - a2\n    elif diff > 0:\n        zr = NX.zeros(diff, a1.dtype)\n        val = NX.concatenate((zr, a1)) - a2\n    else:\n        zr = NX.zeros(abs(diff), a2.dtype)\n        val = a1 - NX.concatenate((zr, a2))\n    if truepoly:\n        val = poly1d(val)\n    return val",
    "docstring": "Difference (subtraction) of two polynomials. .. note:: This forms part of the old polynomial API. Since version 1.4, the new polynomial API defined in is preferred. A summary of the differences can be found in the :doc:. Given two polynomials and , returns `a1a2poly1dpoly1d` object of the difference polynomial's coefficients. See Also -------- polyval, polydiv, polymul, polyadd Examples -------- .. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2) >>> import numpy as np >>> np.polysub([2, 10, -2], [3, 10, -4]) array([-1, 0, 2])",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_polynomial_impl.py",
    "ast_data": "FunctionDef name:polysub arg:a1 arg:a2 arguments arg arg Assign BoolOp Call Call Assign Call Assign Call Assign Call Call If Compare Assign If Compare Assign Call Assign Call Assign Call Call Assign Call If Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "expandtabs",
    "source_code": "def expandtabs(self, tabsize=8):\n    return asarray(expandtabs(self, tabsize))",
    "docstring": "Return a copy of each string element where all tab characters are replaced by one or more spaces. See Also -------- char.expandtabs",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:expandtabs arg:self arg:tabsize arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "subtract",
    "source_code": "def subtract(self, y: Array | complex, /, copy: bool | None=None, xp: ModuleType | None=None) -> Array:\n    return self._op(_AtOp.SUBTRACT, operator.isub, operator.sub, y, copy=copy, xp=xp)",
    "docstring": "Apply `` and return the updated array.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_extra\\_lib\\_at.py",
    "ast_data": "FunctionDef name:subtract arg:copy arg:xp arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "MinMaxNorm",
    "source_code": "class MinMaxNorm(Constraint):\n\n    def __init__(self, min_value=0.0, max_value=1.0, rate=1.0, axis=0):\n        self.min_value = min_value\n        self.max_value = max_value\n        self.rate = rate\n        self.axis = axis\n\n    @doc_controls.do_not_generate_docs\n    def __call__(self, w):\n        norms = backend.sqrt(math_ops.reduce_sum(math_ops.square(w), axis=self.axis, keepdims=True))\n        desired = self.rate * backend.clip(norms, self.min_value, self.max_value) + (1 - self.rate) * norms\n        return w * (desired / (backend.epsilon() + norms))\n\n    @doc_controls.do_not_generate_docs\n    def get_config(self):\n        return {'min_value': self.min_value, 'max_value': self.max_value, 'rate': self.rate, 'axis': self.axis}",
    "docstring": "MinMaxNorm weight constraint. Constrains the weights incident to each hidden unit to have the norm between a lower bound and an upper bound. Also available via the shortcut function . Args: min_value: the minimum norm for the incoming weights. max_value: the maximum norm for the incoming weights. rate: rate for enforcing the constraint: weights will be rescaled to yield . Effectively, this means that rate=1.0 stands for strict enforcement of the constraint, while rate<1.0 means that weights will be rescaled at each step to slowly move towards a value inside the desired interval. axis: integer, axis along which to calculate weight norms. For instance, in a layer the weight matrix has shape , set to to constrain each weight vector of length . In a layer with , the weight tensor has shape , set to to constrain the weights of each filter tensor of size .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\constraints.py",
    "ast_data": "ClassDef name:MinMaxNorm FunctionDef name:__init__ arg:self arg:min_value arg:max_value arg:rate arg:axis arguments arg arg arg arg arg Assign Assign Assign Assign FunctionDef name:__call__ arg:self arg:w arguments arg arg Assign Call Call Call Assign Call Return return:yes Call FunctionDef name:get_config arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "add_inference_rule",
    "source_code": "@register_inference_rule(torch.add)\n@register_inference_rule(operator.add)\ndef add_inference_rule(n: Node):\n    assert isinstance(n.args[0], Node)\n    assert isinstance(n.args[1], Node)\n    t1 = n.args[0].type\n    t2 = n.args[1].type\n    if t1 == int and isinstance(t2, TensorType):\n        n.type = t2\n        return n.type\n    elif t2 == int and isinstance(t1, TensorType):\n        n.type = t1\n        return n.type\n    new_t1, new_t2 = broadcast_types(t1, t2)\n    if new_t1 != t1 or new_t2 != t2:\n        n.meta['broadcast'] = True\n        n.meta[str(n.args[0])] = new_t1\n        n.meta[str(n.args[1])] = new_t2\n    else:\n        n.meta['broadcast'] = False\n    new_t1 = t1 if not n.meta['broadcast'] else new_t1\n    new_t2 = t2 if not n.meta['broadcast'] else new_t2\n    if is_consistent(new_t1, new_t2):\n        if is_more_precise(new_t1, new_t2):\n            n.type = new_t2\n        else:\n            n.type = new_t1\n        return n.type\n    else:\n        raise TypeError(f'Cannot add arguments {n.args[0]} ({n.args[0].type}) and {n.args[1]} ({n.args[1].type}) in node {n}. Types should match ')",
    "docstring": "Apply the addition inference rule. This includes: - scalar addition - broadcasting semantics Note that we always return the least precise type between the operands (after applying broadcasting) to be the final type of the operation Note that we do not modify the operand types themselves after applying broadcasting to them. We only use them to calculate the final type",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\graph_gradual_typechecker.py",
    "ast_data": "FunctionDef name:add_inference_rule arg:n arguments arg Call Call Assign Assign If BoolOp Compare Call Assign Return return:yes If BoolOp Compare Call Assign Return return:yes Assign Call If BoolOp Compare Compare Assign Assign Call Assign Call Assign Assign Assign If Call If Call Assign Assign Return return:yes Raise Call Call Call"
  },
  {
    "library": "kornia",
    "name": "tx",
    "source_code": "@property\ndef tx(self) -> Tensor:\n    return self.extrinsics[..., 0, -1]",
    "docstring": "Return the x-coordinate of the translation vector. Returns: tensor of shape :math:.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\camera\\pinhole.py",
    "ast_data": "FunctionDef name:tx arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "dataset_is_infinite",
    "source_code": "def dataset_is_infinite(dataset):\n    if ops.executing_eagerly_outside_functions():\n        return math_ops.equal(cardinality.cardinality(dataset), cardinality.INFINITE)\n    else:\n        dataset_size = K.get_session().run(cardinality.cardinality(dataset))\n        return dataset_size == cardinality.INFINITE",
    "docstring": "True if the passed dataset is infinite.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_utils.py",
    "ast_data": "FunctionDef name:dataset_is_infinite arg:dataset arguments arg If Call Return return:yes Call Call Assign Call Call Call Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "_should_act_as_resource_variable",
    "source_code": "def _should_act_as_resource_variable(self):\n    pass",
    "docstring": "Pass resource_variable_ops.is_resource_variable check.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py",
    "ast_data": "FunctionDef name:_should_act_as_resource_variable arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "with_dependencies",
    "source_code": "@abc.abstractmethod\ndef with_dependencies(self, checks):\n    pass",
    "docstring": "Add dependencies to a _LayerBroadcaster. Args: checks: a list of ops that need to be run before any tensors from the Broadcaster are used. Returns: a copy of this _LayerBroadcaster with dependencies added.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:with_dependencies arg:self arg:checks arguments arg arg"
  },
  {
    "library": "pytorch",
    "name": "_reap_worker",
    "source_code": "@abc.abstractmethod\ndef _reap_worker(self, worker_id: Any) -> bool:\n    pass",
    "docstring": "Reaps the given worker. Returns True if the worker has been successfully reaped, False otherwise. If any uncaught exception is thrown from this method, the worker is considered reaped and all associated timers will be removed.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\timer\\api.py",
    "ast_data": "FunctionDef name:_reap_worker arg:self arg:worker_id arguments arg arg"
  },
  {
    "library": "matplotlib",
    "name": "stale",
    "source_code": "@property\ndef stale(self):\n    return self._stale",
    "docstring": "Whether the artist is 'stale' and needs to be re-drawn for the output to match the internal state of the artist.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:stale arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "Literal",
    "source_code": "class Literal(TokenBase):\n    id = 'literal'\n    lbp = 0\n\n    def __init__(self, value):\n        self.value = value\n\n    def display(self):\n        return repr(self.value)\n\n    def nud(self, parser):\n        return self\n\n    def eval(self, context):\n        return self.value\n\n    def __repr__(self):\n        return '(%s %r)' % (self.id, self.value)",
    "docstring": "A basic self-resolvable object similar to a Django template variable.",
    "type": "class",
    "file_path": "django\\django\\template\\smartif.py",
    "ast_data": "ClassDef name:Literal Assign Assign FunctionDef name:__init__ arg:self arg:value arguments arg arg Assign FunctionDef name:display arg:self arguments arg Return return:yes Call FunctionDef name:nud arg:self arg:parser arguments arg arg Return return:yes FunctionDef name:eval arg:self arg:context arguments arg arg Return return:yes FunctionDef name:__repr__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "add_weight",
    "source_code": "@doc_controls.for_subclass_implementers\ndef add_weight(self, name, shape=(), aggregation=variables_module.VariableAggregation.SUM, synchronization=variables_module.VariableSynchronization.ON_READ, initializer=None, dtype=None):\n    if distribute_lib.has_strategy():\n        strategy = distribute_lib.get_strategy()\n    else:\n        strategy = None\n    if backend.is_tpu_strategy(strategy):\n        synchronization = variables_module.VariableSynchronization.ON_WRITE\n    with ops.init_scope():\n        return super(Metric, self).add_weight(name=name, shape=shape, dtype=self._dtype if dtype is None else dtype, trainable=False, initializer=initializer, collections=[], synchronization=synchronization, aggregation=aggregation)",
    "docstring": "Adds state variable. Only for use by subclasses.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py",
    "ast_data": "FunctionDef name:add_weight arg:self arg:name arg:shape arg:aggregation arg:synchronization arg:initializer arg:dtype arguments arg arg arg arg arg arg arg If Call Assign Call Assign If Call Assign With Call Return return:yes Call Call Compare"
  },
  {
    "library": "pandas",
    "name": "_execute_insert",
    "source_code": "def _execute_insert(self, conn, keys: list[str], data_iter) -> int:\n    data = [dict(zip(keys, row)) for row in data_iter]\n    result = self.pd_sql.execute(self.table.insert(), data)\n    return result.rowcount",
    "docstring": "Execute SQL statement inserting data Parameters ---------- conn : sqlalchemy.engine.Engine or sqlalchemy.engine.Connection keys : list of str Column names data_iter : generator of list Each item contains a list of values to be inserted",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\sql.py",
    "ast_data": "FunctionDef name:_execute_insert arg:self arg:conn arg:keys arg:data_iter arguments arg arg arg arg Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "residual",
    "source_code": "def residual(self, x):\n    return (x - self.lb, self.ub - x)",
    "docstring": "Calculate the residual (slack) between the input and the bounds For a bound constraint of the form:: lb <= x <= ub the lower and upper residuals between and the bounds are values `` is out of bounds. Parameters ---------- x: array_like Vector of independent variables Returns ------- sl, sb : array-like The lower and upper residuals",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_constraints.py",
    "ast_data": "FunctionDef name:residual arg:self arg:x arguments arg arg Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "set_xlabels",
    "source_code": "def set_xlabels(self, label=None, clear_inner=True, **kwargs):\n    if label is None:\n        label = self._x_var\n    for ax in self._bottom_axes:\n        ax.set_xlabel(label, **kwargs)\n    if clear_inner:\n        for ax in self._not_bottom_axes:\n            ax.set_xlabel('')\n    return self",
    "docstring": "Label the x axis on the bottom row of the grid.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\axisgrid.py",
    "ast_data": "FunctionDef name:set_xlabels arg:self arg:label arg:clear_inner arguments arg arg arg arg If Compare Assign For Call If For Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "_pe_encoding",
    "source_code": "def _pe_encoding(self, coords: Tensor) -> Tensor:\n    coords = 2 * coords - 1\n    coords = coords @ self.positional_encoding_gaussian_matrix\n    coords = 2 * pi * coords\n    return concatenate([sin(coords), cos(coords)], dim=-1)",
    "docstring": "Positionally encode points that are normalized to [0,1].",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\models\\sam\\architecture\\prompt_encoder.py",
    "ast_data": "FunctionDef name:_pe_encoding arg:self arg:coords arguments arg arg Assign Assign Assign Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_init_shard_metadata",
    "source_code": "def _init_shard_metadata(self, numel_padded: int, unsharded_start_idx: int, unsharded_end_idx: int) -> None:\n    flat_param = self.flat_param\n    flat_param._sharded_size = flat_param.size()\n    sharded_flat_param_numel = flat_param.numel()\n    _p_assert(unsharded_start_idx >= 0 and unsharded_start_idx <= unsharded_end_idx, f'unsharded_start_idx: {unsharded_start_idx} unsharded_end_idx: {unsharded_end_idx}')\n    _p_assert(numel_padded <= sharded_flat_param_numel, f'numel_padded: {numel_padded} sharded_flat_param_numel: {sharded_flat_param_numel}')\n    shard_param_infos = self._get_shard_metadata(unsharded_start_idx, unsharded_end_idx)\n    assert len(shard_param_infos) == flat_param._num_params, f'Expects length {flat_param._num_params} but got {len(shard_param_infos)}'\n    flat_param._shard_param_infos = shard_param_infos\n    flat_param._shard_numel_padded = numel_padded",
    "docstring": "Initialize shard-related metadata for this rank's shard of the flat parameter. This includes `` 's data is the sharded flat parameter.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py",
    "ast_data": "FunctionDef name:_init_shard_metadata arg:self arg:numel_padded arg:unsharded_start_idx arg:unsharded_end_idx arguments arg arg arg arg Assign Assign Call Assign Call Call BoolOp Compare Compare Call Compare Assign Call Compare Call Call Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "_set_inputs",
    "source_code": "def _set_inputs(self) -> None:\n    depends_on_gradient = self._any_version_depends_on_gradient()\n    produces_gradient: set[TensorAndID] = set()\n    for node in reversed(self._data_flow_graph.flow_nodes):\n        tensors = {(key, version) for key, (_, version) in node.inputs.items()}\n        tensors |= node.outputs.items()\n        if any((self._categories.get(*i) in (Category.GRADIENT, Category.PARAMETER) or i in produces_gradient for i in tensors)):\n            produces_gradient |= tensors\n    input_candidates = produces_gradient.copy()\n    for node in self._data_flow_graph.flow_nodes:\n        if RecordScope.BACKWARD_FUNCTION in get_scopes(node._event):\n            input_candidates -= set(node.outputs.items())\n    for key, version in input_candidates:\n        if key.id not in depends_on_gradient:\n            self._categories.setdefault_by_version(key, version, Category.INPUT)",
    "docstring": "Mark inputs based on which Tensors are updated using gradients. The process for differentiating between inputs and activations is more involved. Most Tensors in a training loop depend on at least one gradient: parameters depend on them through updates, and activations and optimizer state depend on them transitively through parameters. Critically, we do not need to know which Tensors are parameters to apply this method; we can simply walk the data flow graph to build the set of all values which depend on a gradient and then obtain the set of inputs from the conjugate set. There is, however, one hiccup. The first time we see a parameter is generally on the forward pass of the first step. We know from inspection of the data flow graph that v1 of that Tensor depends on a gradient (provided we profile an optimizer step), but not v0. To address this problem we weaken the definition of \"depends on a gradient\" to \"any version of this Tensor depends on a gradient\", which in turn strengthens the criteria for the input set enough to filter the activations in the forward pass of the first step.",
    "type": "method",
    "file_path": "pytorch\\torch\\profiler\\_memory_profiler.py",
    "ast_data": "FunctionDef name:_set_inputs arg:self arguments arg Assign Call Call For Call Assign Call Call If Call BoolOp Compare Call Compare Assign Call For If Compare Call Call Call For If Compare Call"
  },
  {
    "library": "pytorch",
    "name": "patch_method",
    "source_code": "def patch_method(self, cls: type, name: str, new_fn: Callable, deduplicate: bool=True):\n    new_fn.__fx_already_patched = deduplicate\n    orig_fn = getattr(cls, name)\n    if getattr(orig_fn, '__fx_already_patched', False):\n        return\n    self.patches_made.append(_PatchedFnSetAttr(cls, name, orig_fn, new_fn))\n    self.patches_made[-1].patch()",
    "docstring": "Replace object_or_dict.name with new_fn until we exit the context manager.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\_symbolic_trace.py",
    "ast_data": "FunctionDef name:patch_method arg:self arg:cls arg:name arg:new_fn arg:deduplicate arguments arg arg arg arg arg Assign Assign Call If Call Return return:no Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_RendezvousContext",
    "source_code": "class _RendezvousContext:\n    node: _NodeDesc\n    state: _RendezvousState\n    settings: RendezvousSettings\n\n    def __init__(self, node: _NodeDesc, state: _RendezvousState, settings: RendezvousSettings) -> None:\n        self.node = node\n        self.state = state\n        self.settings = settings",
    "docstring": "Holds the context of the rendezvous. Attributes: node: The node descriptor associated with the current rendezvous handler instance. state: The current state of the rendezvous. settings: The rendezvous settings.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\dynamic_rendezvous.py",
    "ast_data": "ClassDef name:_RendezvousContext FunctionDef name:__init__ arg:self arg:node arg:state arg:settings arguments arg arg arg arg Assign Assign Assign"
  },
  {
    "library": "pandas",
    "name": "ids",
    "source_code": "@property\ndef ids(self) -> Index:\n    return self.data.columns",
    "docstring": "Column names. Returns ------- ids : Index DataFrame's column names.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:ids arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_to",
    "source_code": "def _to(self, device, non_blocking=False):\n    if self.device == device:\n        return self\n    if device.type == 'cpu':\n        pin_memory = non_blocking and self.device.type in ('cuda', torch._C._get_privateuse1_backend_name())\n        untyped_storage = torch.empty(self.nbytes(), dtype=torch.uint8, device=device, pin_memory=pin_memory).untyped_storage()\n        untyped_storage.copy_(self, non_blocking)\n        return untyped_storage\n    device_module = getattr(torch, device.type, None)\n    assert device_module is not None, f'{device.type.upper()} device module is not loaded'\n    with device_module.device(device):\n        if self.is_sparse and hasattr(device_module, 'sparse'):\n            new_type = getattr(device_module.sparse, self.__class__.__name__)\n            indices = getattr(torch.Tensor._indices(self), device.type)(device, non_blocking)\n            values = getattr(torch.Tensor._values(self), device.type)(device, non_blocking)\n            return new_type(indices, values, self.size())\n        else:\n            assert not self.is_sparse, f'sparse storage is not supported for {device.type.upper()} tensors'\n            untyped_storage = torch.UntypedStorage(self.size(), device=device)\n            untyped_storage.copy_(self, non_blocking)\n            return untyped_storage",
    "docstring": "Returns a copy of this object in device memory. If this object is already on the correct device, then no copy is performed and the original object is returned. Args: device (int): The destination device. non_blocking (bool): If `` and the source is in pinned memory, the copy will be asynchronous with respect to the host. Otherwise, the argument has no effect.",
    "type": "function",
    "file_path": "pytorch\\torch\\_utils.py",
    "ast_data": "FunctionDef name:_to arg:self arg:device arg:non_blocking arguments arg arg arg If Compare Return return:yes If Compare Assign BoolOp Compare Call Assign Call Call Call Call Return return:yes Assign Call Compare Call With Call If BoolOp Call Assign Call Assign Call Call Call Assign Call Call Call Return return:yes Call Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_ints_to_td64ns",
    "source_code": "def _ints_to_td64ns(data, unit: str='ns') -> tuple[np.ndarray, bool]:\n    copy_made = False\n    unit = unit if unit is not None else 'ns'\n    if data.dtype != np.int64:\n        data = data.astype(np.int64)\n        copy_made = True\n    if unit != 'ns':\n        dtype_str = f'timedelta64[{unit}]'\n        data = data.view(dtype_str)\n        data = astype_overflowsafe(data, dtype=TD64NS_DTYPE)\n        copy_made = True\n    else:\n        data = data.view('timedelta64[ns]')\n    return (data, copy_made)",
    "docstring": "Convert an ndarray with integer-dtype to timedelta64[ns] dtype, treating the integers as multiples of the given timedelta unit. Parameters ---------- data : numpy.ndarray with integer-dtype unit : str, default \"ns\" The timedelta unit to treat integers as multiples of. Returns ------- numpy.ndarray : timedelta64[ns] array converted from data bool : whether a copy was made",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\arrays\\timedeltas.py",
    "ast_data": "FunctionDef name:_ints_to_td64ns arg:data arg:unit arguments arg arg Assign Assign Compare If Compare Assign Call Assign If Compare Assign Assign Call Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_print_tensor_info",
    "source_code": "def _print_tensor_info(tensor_info, indent=0):\n    indent_str = '  ' * indent\n\n    def in_print(s):\n        print(indent_str + s)\n    in_print('    dtype: ' + {value: key for key, value in types_pb2.DataType.items()}[tensor_info.dtype])\n    if tensor_info.tensor_shape.unknown_rank:\n        shape = 'unknown_rank'\n    else:\n        dims = [str(dim.size) for dim in tensor_info.tensor_shape.dim]\n        shape = ', '.join(dims)\n        shape = '(' + shape + ')'\n    in_print('    shape: ' + shape)\n    in_print('    name: ' + tensor_info.name)",
    "docstring": "Prints details of the given tensor_info. Args: tensor_info: TensorInfo object to be printed. indent: How far (in increments of 2 spaces) to indent each line output",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\saved_model_cli.py",
    "ast_data": "FunctionDef name:_print_tensor_info arg:tensor_info arg:indent arguments arg arg Assign FunctionDef name:in_print arg:s arguments arg Call Call Call If Assign Assign Call Assign Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_needs_no_arguments",
    "source_code": "def _needs_no_arguments(python_callable):\n    num_arguments = len(tf_inspect.getargspec(python_callable).args)\n    if not tf_inspect.isfunction(python_callable) and (not isinstance(python_callable, functools.partial)):\n        num_arguments -= 1\n    return num_arguments == len(tf_inspect.getargspec(python_callable).defaults or [])",
    "docstring": "Returns true if the callable needs no arguments to call.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variable_scope.py",
    "ast_data": "FunctionDef name:_needs_no_arguments arg:python_callable arguments arg Assign Call Call If BoolOp Call Call Return return:yes Compare Call BoolOp Call"
  },
  {
    "library": "pytorch",
    "name": "add_compilation_metrics_to_chromium",
    "source_code": "def add_compilation_metrics_to_chromium(c: CompilationMetrics) -> None:\n    event_logger = get_chromium_event_logger()\n    event_name = event_logger.get_outermost_event()\n    if not event_name:\n        return\n    event_logger.add_event_data(event_name=event_name, frame_key=c.frame_key, co_name=c.co_name, co_filename=c.co_filename, co_firstlineno=c.co_firstlineno, cache_size=c.cache_size, accumulated_cache_size=c.accumulated_cache_size, guard_count=c.guard_count, shape_env_guard_count=c.shape_env_guard_count, graph_op_count=c.graph_op_count, graph_node_count=c.graph_node_count, graph_input_count=c.graph_input_count, fail_type=c.fail_type, fail_reason=c.fail_reason, fail_user_frame_filename=c.fail_user_frame_filename, fail_user_frame_lineno=c.fail_user_frame_lineno, non_compliant_ops=list(c.non_compliant_ops) if c.non_compliant_ops is not None else None, compliant_custom_ops=list(c.compliant_custom_ops) if c.compliant_custom_ops is not None else None, restart_reasons=list(c.restart_reasons) if c.restart_reasons is not None else None, dynamo_time_before_restart_s=c.dynamo_time_before_restart_s, has_guarded_code=c.has_guarded_code, dynamo_config=c.dynamo_config)",
    "docstring": "These are the common fields in CompilationMetrics that existed before metrics_context, and aren't set by MetricsContext.set(). We add the subset of them that make sense in /toplevel events in PT2 Compile Events directly. If you're tempted to add to this list, consider using CompileEventLogger.compilation_metric() instead, which will automatically also add it to tlparse and PT2 Compile Events. TODO: Get rid of this function and replace it with CompileEventLogger directly instead.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\utils.py",
    "ast_data": "FunctionDef name:add_compilation_metrics_to_chromium arg:c arguments arg Assign Call Assign Call If Return return:no Call Compare Call Compare Call Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "stop",
    "source_code": "def stop(self):\n    self._should_worker_thread_run = False",
    "docstring": "Ensure the worker thread is closed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py",
    "ast_data": "FunctionDef name:stop arg:self arguments arg Assign"
  },
  {
    "library": "scrapy",
    "name": "__init__",
    "source_code": "def __init__(uri, *, feed_options=None):\n    pass",
    "docstring": "Initialize the storage with the parameters given in the URI and the feed-specific options (see :setting:)",
    "type": "method",
    "file_path": "scrapy\\scrapy\\extensions\\feedexport.py",
    "ast_data": "FunctionDef name:__init__ arg:uri arguments arg arg"
  },
  {
    "library": "pytorch",
    "name": "build",
    "source_code": "@staticmethod\ndef build(tx: 'InstructionTranslatorBase', value: Any, source: Optional[Source]=None) -> Any:\n    if source is None:\n        return builder.SourcelessBuilder.create(tx, value)\n    else:\n        return variables.LazyVariableTracker.create(value, source)",
    "docstring": "Create a new VariableTracker from a value and optional Source",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\base.py",
    "ast_data": "FunctionDef name:build arg:tx arg:value arg:source arguments arg arg arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "assign_sub",
    "source_code": "def assign_sub(self, var, value, use_locking=False, name=None, read_value=True):\n    with distribute_lib.enter_or_assert_strategy(var.distribute_strategy):\n        if distribute_lib.in_cross_replica_context() and (not values_util.in_replica_update_context()):\n            values_util.mark_as_unsaveable()\n            return values_util.on_read_assign_sub_cross_replica(var, value, read_value=read_value)\n        else:\n            return values_util.on_write_assign_sub(var, value, use_locking=use_locking, name=name, read_value=read_value)",
    "docstring": "Subtracts a value from this variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py",
    "ast_data": "FunctionDef name:assign_sub arg:self arg:var arg:value arg:use_locking arg:name arg:read_value arguments arg arg arg arg arg arg With Call If BoolOp Call Call Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "experimental_distribute_dataset",
    "source_code": "def experimental_distribute_dataset(self, dataset, options=None):\n    return super(OneDeviceStrategy, self).experimental_distribute_dataset(dataset, options)",
    "docstring": "Distributes a tf.data.Dataset instance provided via dataset. In this case, there is only one device, so this is only a thin wrapper around the input dataset. It will, however, prefetch the input data to the specified device. The returned distributed dataset can be iterated over similar to how regular datasets can. NOTE: Currently, the user cannot add any more transformations to a distributed dataset. Example: Args: dataset: to be prefetched to device. options: used to control options on how this dataset is distributed. Returns: A \"distributed \" that the caller can iterate over.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\one_device_strategy.py",
    "ast_data": "FunctionDef name:experimental_distribute_dataset arg:self arg:dataset arg:options arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "companion",
    "source_code": "def companion(a):\n    a = np.atleast_1d(a)\n    n = a.shape[-1]\n    if n < 2:\n        raise ValueError('The length of `a` along the last axis must be at least 2.')\n    if np.any(a[..., 0] == 0):\n        raise ValueError('The first coefficient(s) of `a` (i.e. elements of `a[..., 0]`) must not be zero.')\n    first_row = -a[..., 1:] / (1.0 * a[..., 0:1])\n    c = np.zeros(a.shape[:-1] + (n - 1, n - 1), dtype=first_row.dtype)\n    c[..., 0, :] = first_row\n    c[..., np.arange(1, n - 1), np.arange(0, n - 2)] = 1\n    return c",
    "docstring": "Create a companion matrix. Create the companion matrix [1]_ associated with the polynomial whose coefficients are given in . Parameters ---------- a : (..., N) array_like 1-D array of polynomial coefficients. The length of must be at least two, and `c`a.shape[-1] >> from scipy.linalg import companion >>> companion([1, -10, 31, -30]) array([[ 10., -31., 30.], [ 1., 0., 0.], [ 0., 1., 0.]])",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_special_matrices.py",
    "ast_data": "FunctionDef name:companion arg:a arguments arg Assign Call Assign If Compare Raise Call If Call Compare Raise Call Assign Assign Call Assign Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_rotation_mode",
    "source_code": "def get_rotation_mode(self):\n    return self._rotation_mode",
    "docstring": "Return the text rotation mode.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:get_rotation_mode arg:self arguments arg Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "global_toctree_for_doc",
    "source_code": "def global_toctree_for_doc(env: BuildEnvironment, docname: str, builder: Builder, collapse: bool=False, includehidden: bool=True, maxdepth: int=0, titles_only: bool=False) -> Element | None:\n    resolved = (_resolve_toctree(env, docname, builder, toctree_node, prune=True, maxdepth=int(maxdepth), titles_only=titles_only, collapse=collapse, includehidden=includehidden, tags=builder.tags) for toctree_node in env.master_doctree.findall(addnodes.toctree))\n    toctrees = [toctree for toctree in resolved if toctree is not None]\n    if not toctrees:\n        return None\n    result = toctrees[0]\n    for toctree in toctrees[1:]:\n        result.extend(toctree.children)\n    return result",
    "docstring": "Get the global ToC tree at a given document. This gives the global ToC, with all ancestors and their siblings.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\environment\\adapters\\toctree.py",
    "ast_data": "FunctionDef name:global_toctree_for_doc arg:env arg:docname arg:builder arg:collapse arg:includehidden arg:maxdepth arg:titles_only arguments arg arg arg arg arg arg arg Assign Call Call Call Assign Compare If Return return:no Assign For Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_BoolCodec",
    "source_code": "class _BoolCodec:\n\n    def can_encode(self, pyobj):\n        return isinstance(pyobj, bool)\n\n    def do_encode(self, bool_value, encode_fn):\n        del encode_fn\n        value = struct_pb2.StructuredValue()\n        value.bool_value = bool_value\n        return value\n\n    def can_decode(self, value):\n        return value.HasField('bool_value')\n\n    def do_decode(self, value, decode_fn):\n        del decode_fn\n        return value.bool_value",
    "docstring": "Codec for booleans.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\nested_structure_coder.py",
    "ast_data": "ClassDef name:_BoolCodec FunctionDef name:can_encode arg:self arg:pyobj arguments arg arg Return return:yes Call FunctionDef name:do_encode arg:self arg:bool_value arg:encode_fn arguments arg arg arg Assign Call Assign Return return:yes FunctionDef name:can_decode arg:self arg:value arguments arg arg Return return:yes Call FunctionDef name:do_decode arg:self arg:value arg:decode_fn arguments arg arg arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "is_platform_little_endian",
    "source_code": "def is_platform_little_endian() -> bool:\n    return sys.byteorder == 'little'",
    "docstring": "Checking if the running platform is little endian. Returns ------- bool True if the running platform is little endian.",
    "type": "function",
    "file_path": "pandas\\pandas\\compat\\__init__.py",
    "ast_data": "FunctionDef name:is_platform_little_endian arguments Return return:yes Compare"
  },
  {
    "library": "django",
    "name": "_maindb_connection",
    "source_code": "@cached_property\ndef _maindb_connection(self):\n    settings_dict = settings.DATABASES[self.connection.alias]\n    user = settings_dict.get('SAVED_USER') or settings_dict['USER']\n    password = settings_dict.get('SAVED_PASSWORD') or settings_dict['PASSWORD']\n    settings_dict = {**settings_dict, 'USER': user, 'PASSWORD': password}\n    DatabaseWrapper = type(self.connection)\n    return DatabaseWrapper(settings_dict, alias=self.connection.alias)",
    "docstring": "This is analogous to other backends' property, which allows access to an \"administrative\" connection which can be used to manage the test databases. For Oracle, the only connection that can be used for that purpose is the main (non-test) connection.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\oracle\\creation.py",
    "ast_data": "FunctionDef name:_maindb_connection arg:self arguments arg Assign Assign BoolOp Call Assign BoolOp Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_restore_updates",
    "source_code": "def _restore_updates(self):\n    data_dict = {}\n    for name, var in self.state_variables.items():\n        data_dict[name] = var.numpy()\n    return data_dict",
    "docstring": "Recreates a dict of updates from the layer's weights.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_preprocessing_layer.py",
    "ast_data": "FunctionDef name:_restore_updates arg:self arguments arg Assign For Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "variable_shape",
    "source_code": "@property\ndef variable_shape(self):\n    if isinstance(self.categorical_column, fc_types.FeatureColumn):\n        return tensor_shape.TensorShape([1, self.categorical_column.num_buckets])\n    else:\n        return tensor_shape.TensorShape([1, self.categorical_column._num_buckets])",
    "docstring": "Returns a representing the shape of the dense .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:variable_shape arg:self arguments arg If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "SymbolicGridFn",
    "source_code": "class SymbolicGridFn:\n\n    def __init__(self, fn: Callable[..., tuple[Any, Any, Any]]):\n        self.fn = fn\n        self.kwargs_int = {}\n        self.kwargs_sym = {}\n        params = inspect.signature(fn).parameters\n        for name, fn_sym, fn_int in [('cdiv', CeilDiv, ceildiv), ('min', sympy.Min, min), ('max', sympy.Max, max)]:\n            if name in params:\n                self.kwargs_int[name] = fn_int\n                self.kwargs_sym[name] = fn_sym\n\n    def __call__(self, *args, **kwargs) -> tuple[int, int, int]:\n        return self.fn(*args, **kwargs, **self.kwargs_int)\n\n    def sympy_call(self, *args, **kwargs):\n        return self.fn(*args, **kwargs, **self.kwargs_sym)",
    "docstring": "Wrapper around a grid function that allows either int or sympy inputs. @SymbolicGridFn def grid(x, meta, *, cdiv): return cdiv(x, meta[\"BLOCK_X\"])",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\select_algorithm.py",
    "ast_data": "ClassDef name:SymbolicGridFn FunctionDef name:__init__ arg:self arg:fn arguments arg arg Assign Assign Assign Assign Call For If Compare Assign Assign FunctionDef name:__call__ arg:self arguments arg arg arg Return return:yes Call FunctionDef name:sympy_call arg:self arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "fetch_access_token",
    "source_code": "async def fetch_access_token(self, request_token=None, **kwargs):\n    async with self._get_oauth_client() as client:\n        if request_token is None:\n            raise MissingRequestTokenError()\n        token = {}\n        token.update(request_token)\n        token.update(kwargs)\n        client.token = token\n        params = self.access_token_params or {}\n        token = await client.fetch_access_token(self.access_token_url, **params)\n    return token",
    "docstring": "Fetch access token in one step. :param request_token: A previous request token for OAuth 1. :param kwargs: Extra parameters to fetch access token. :return: A token dict.",
    "type": "method",
    "file_path": "authlib\\authlib\\integrations\\base_client\\async_app.py",
    "ast_data": "AsyncFunctionDef name:fetch_access_token arg:self arg:request_token arguments arg arg arg Call If Compare Raise Call Assign Call Call Assign Assign BoolOp Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "transpose_inference_rule",
    "source_code": "@register_inference_rule(torch.transpose)\ndef transpose_inference_rule(n: Node):\n    if n.target == torch.transpose:\n        assert isinstance(n.args[0], Node)\n        t = n.args[0].type\n        assert isinstance(n.args[1], int)\n        assert isinstance(n.args[2], int)\n        dim1, dim2 = (n.args[1], n.args[2])\n        if t == Dyn:\n            n.type = Dyn\n            return n.type\n        elif isinstance(t, TensorType):\n            if 0 <= dim1 < len(t.__args__) and 0 <= dim2 < len(t.__args__):\n                new_type = list(t.__args__)\n                new_type[dim1], new_type[dim2] = (new_type[dim2], new_type[dim1])\n                final = TensorType(new_type)\n                n.type = get_greatest_upper_bound(n.type, final)\n                return n.type\n            else:\n                raise TypeError(f'Cannot transpose {dim1} and {dim2} in type {t} for node {n}')\n        else:\n            raise TypeError(f'Cannot transpose {dim1} and {dim2} in type {t} for node {n}')",
    "docstring": "We check that dimensions for the transpose operations are within range of the tensor type of the node",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\graph_gradual_typechecker.py",
    "ast_data": "FunctionDef name:transpose_inference_rule arg:n arguments arg If Compare Call Assign Call Call Assign If Compare Assign Return return:yes If Call If BoolOp Compare Call Compare Call Assign Call Assign Assign Call Assign Call Return return:yes Raise Call Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "GroupMember",
    "source_code": "class GroupMember(metaclass=_WorldMeta):\n    NON_GROUP_MEMBER = -100",
    "docstring": "Group member class.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "ClassDef name:GroupMember Assign"
  },
  {
    "library": "pytorch",
    "name": "map_aggregate",
    "source_code": "@compatibility(is_backward_compatible=True)\ndef map_aggregate(a: ArgumentT, fn: Callable[[Argument], Argument]) -> ArgumentT:\n    return _fx_map_aggregate(a, fn)",
    "docstring": "Apply fn recursively to each object appearing in arg. arg may be a list, tuple, slice, or dict with string keys: the return value will have the same type and structure.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\node.py",
    "ast_data": "FunctionDef name:map_aggregate arg:a arg:fn arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "QuantizationConfig",
    "source_code": "@tf_export('tpu.experimental.embedding.QuantizationConfig')\nclass QuantizationConfig:\n\n    def __init__(self, num_buckets: int, lower: float, upper: float):\n        if num_buckets < 2:\n            raise ValueError(f'num_buckets is {num_buckets}, must be at least 2 for simulated quantization.')\n        self.num_buckets = num_buckets\n        self.lower = lower\n        self.upper = upper\n\n    def _set_optimization_parameters(self, parameters: optimization_parameters_pb2.OptimizationParameters):\n        parameters.simulated_quantization.enabled = True\n        parameters.simulated_quantization.num_buckets = self.num_buckets\n        parameters.simulated_quantization.clipping_limits.lower.value = self.lower\n        parameters.simulated_quantization.clipping_limits.upper.value = self.upper\n\n    def __repr__(self):\n        return 'QuantizationConfig(num_buckets={num_buckets!r}, lower={lower!r}, upper={upper!r})'.format(num_buckets=self.num_buckets, lower=self.lower, upper=self.upper)",
    "docstring": "Settings for simulated quantization of the tpu embedding table. When simulated quantization is enabled, the results of the embedding lookup are clipped and quantized according to the settings here before the combiner is applied. For example, to quantize the following is done: See tensorflow/core/protobuf/tpu/optimization_parameters.proto for more details. NOTE: This does not change the storage type of the embedding table, that will continue to be float32 as will the saved variable in the checkpoint. You will have to manually quantize the variable (typically with the same algorithm and settings as above) manually.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2_utils.py",
    "ast_data": "ClassDef name:QuantizationConfig FunctionDef name:__init__ arg:self arg:num_buckets arg:lower arg:upper arguments arg arg arg arg If Compare Raise Call Assign Assign Assign FunctionDef name:_set_optimization_parameters arg:self arg:parameters arguments arg arg Assign Assign Assign Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "tilbert",
    "source_code": "def tilbert(x, h, period=None, _cache=_cache):\n    if isinstance(_cache, threading.local):\n        if not hasattr(_cache, 'tilbert_cache'):\n            _cache.tilbert_cache = {}\n        _cache = _cache.tilbert_cache\n    tmp = asarray(x)\n    if iscomplexobj(tmp):\n        return tilbert(tmp.real, h, period, _cache) + 1j * tilbert(tmp.imag, h, period, _cache)\n    if period is not None:\n        h = h * 2 * pi / period\n    n = len(x)\n    omega = _cache.get((n, h))\n    if omega is None:\n        if len(_cache) > 20:\n            while _cache:\n                _cache.popitem()\n\n        def kernel(k, h=h):\n            if k:\n                return 1.0 / tanh(h * k)\n            return 0\n        omega = convolve.init_convolution_kernel(n, kernel, d=1)\n        _cache[n, h] = omega\n    overwrite_x = _datacopied(tmp, x)\n    return convolve.convolve(tmp, omega, swap_real_imag=1, overwrite_x=overwrite_x)",
    "docstring": "Return h-Tilbert transform of a periodic sequence x. If x_j and y_j are Fourier coefficients of periodic functions x and y, respectively, then:: y_j = sqrt(-1)*coth(j*h*2*pi/period) * x_j y_0 = 0 Parameters ---------- x : array_like The input array to transform. h : float Defines the parameter of the Tilbert transform. period : float, optional The assumed period of the sequence. Default period is `` is taken zero.",
    "type": "function",
    "file_path": "scipy\\scipy\\fftpack\\_pseudo_diffs.py",
    "ast_data": "FunctionDef name:tilbert arg:x arg:h arg:period arg:_cache arguments arg arg arg arg If Call If Call Assign Assign Assign Call If Call Return return:yes Call Call If Compare Assign Assign Call Assign Call If Compare If Compare Call While Call FunctionDef name:kernel arg:k arg:h arguments arg arg If Return return:yes Call Return return:yes Assign Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_serialize_to_proto",
    "source_code": "def _serialize_to_proto(self, object_proto=None, **kwargs):\n    del object_proto, kwargs\n    return None",
    "docstring": "Returns a proto of any type to be saved into the SavedModel. Trackable classes decorated with should overwrite this method to save metadata for this object to the SavedModel. The proto returned by this function will be passed to in the form of a proto. This data is only saved and used by the Python API. Existing C++ loading APIs such as will not read this field at all. Args: object_proto: A proto that may be filled by this function. Only the core serializable types (Variable, Function, Constant, Asset) should modify this argument. **kwargs: Future keyword arguments passed to the object during saving. Returns: A proto that serializes this class's type.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\base.py",
    "ast_data": "FunctionDef name:_serialize_to_proto arg:self arg:object_proto arguments arg arg arg Return return:no"
  },
  {
    "library": "numpy",
    "name": "real",
    "source_code": "@property\ndef real(self):\n    result = self._data.real.view(type(self))\n    result.__setmask__(self._mask)\n    return result",
    "docstring": "The real part of the masked array. This property is a view on the real part of this . See Also -------- imag Examples -------- >>> import numpy as np >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False]) >>> x.real masked_array(data=[1.0, --, 3.45], mask=[False, True, False], fill_value=1e+20)",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:real arg:self arguments arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_flatbuffer_module_info",
    "source_code": "def get_flatbuffer_module_info(path_or_file):\n    if isinstance(path_or_file, (str, os.PathLike)):\n        with open(path_or_file, 'rb') as f:\n            all_bytes = f.read()\n    else:\n        all_bytes = path_or_file.read()\n    return torch._C._get_module_info_from_flatbuffer(all_bytes)",
    "docstring": "Get some information regarding a model file in flatbuffer format. Args: path_or_file: Either str, Path or file like object (BytesIO OK). If it's str or Path, we will read the file referenced by that path as Bytes. Returns: A dict with metadata on what that file contains, currently looks like this: { 'bytecode_version': 4, # int 'operator_version': 4, # int 'function_names': { '__torch__.___torch_mangle_0.Foo.forward'}, # set 'type_names': set(), # set 'opname_to_num_args': {'aten::linear': 3} # Dict[str, int] }",
    "type": "function",
    "file_path": "pytorch\\torch\\jit\\_serialization.py",
    "ast_data": "FunctionDef name:get_flatbuffer_module_info arg:path_or_file arguments arg If Call With Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "fallthrough_kernel",
    "source_code": "def fallthrough_kernel():\n    raise NotImplementedError('fallthrough_kernel() should never be called.')",
    "docstring": "A dummy function to pass to `` in order to register a fallthrough.",
    "type": "function",
    "file_path": "pytorch\\torch\\library.py",
    "ast_data": "FunctionDef name:fallthrough_kernel arguments Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "__enter__",
    "source_code": "def __enter__(self):\n    return self",
    "docstring": "Make usable with \"with\" statement.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\writer.py",
    "ast_data": "FunctionDef name:__enter__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "evaluate_conditional_with_constraints",
    "source_code": "def evaluate_conditional_with_constraints(tracer_root, graph, node, counter=0, user_constraints=None):\n    transformed_positive, transformed_negative = transform_all_constraints_trace_time(tracer_root, graph, node, counter)\n    s = z3.Solver()\n    s.add(transformed_positive)\n    if user_constraints is not None:\n        s.add(user_constraints)\n    condition = s.check()\n    s = z3.Solver()\n    s.add(transformed_negative)\n    if user_constraints is not None:\n        s.add(user_constraints)\n    negation = s.check()\n    return (condition, negation)",
    "docstring": "Given an IR and a node representing a conditional, evaluate the conditional and its negation Args: tracer_root: Tracer root for module instances node: The node to be evaluated Returns: the results of evaluating the condition and the negation with the rest of the constraints",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\transform_to_z3.py",
    "ast_data": "FunctionDef name:evaluate_conditional_with_constraints arg:tracer_root arg:graph arg:node arg:counter arg:user_constraints arguments arg arg arg arg arg Assign Call Assign Call Call If Compare Call Assign Call Assign Call Call If Compare Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_get_support",
    "source_code": "def _get_support(self, *args, **kwargs):\n    return (self.a, self.b)",
    "docstring": "Return the support of the (unscaled, unshifted) distribution. *Must* be overridden by distributions which have support dependent upon the shape parameters of the distribution. Any such override *must not* set or change any of the class members, as these members are shared amongst all instances of the distribution. Parameters ---------- arg1, arg2, ... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). Returns ------- a, b : numeric (float, or int or +/-np.inf) end-points of the distribution's support for the specified shape parameters.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:_get_support arg:self arguments arg arg arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "isdigit",
    "source_code": "def isdigit(self):\n    return isdigit(self)",
    "docstring": "Returns true for each element if all characters in the string are digits and there is at least one character, false otherwise. See Also -------- char.isdigit",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:isdigit arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_split",
    "source_code": "def _split(self, data):\n    idx = data.index(b'eexec')\n    idx += len(b'eexec')\n    while data[idx] in b' \\t\\r\\n':\n        idx += 1\n    len1 = idx\n    idx = data.rindex(b'cleartomark') - 1\n    zeros = 512\n    while zeros and data[idx] in b'0' or data[idx] in b'\\r\\n':\n        if data[idx] in b'0':\n            zeros -= 1\n        idx -= 1\n    if zeros:\n        _log.info('Insufficiently many zeros in Type 1 font')\n    idx1 = len1 + (idx - len1 + 2 & ~1)\n    binary = binascii.unhexlify(data[len1:idx1])\n    return (data[:len1], binary, data[idx + 1:])",
    "docstring": "Split the Type 1 font into its three main parts. The three parts are: (1) the cleartext part, which ends in a eexec operator; (2) the encrypted part; (3) the fixed part, which contains 512 ASCII zeros possibly divided on various lines, a cleartomark operator, and possibly something else.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_type1font.py",
    "ast_data": "FunctionDef name:_split arg:self arg:data arguments arg arg Assign Call Call While Compare Assign Assign Call Assign While BoolOp BoolOp Compare Compare If Compare If Call Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "num_replicas",
    "source_code": "@property\ndef num_replicas(self) -> int:\n    return self._num_replicas",
    "docstring": "The number of replicas of the computation.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\device_assignment.py",
    "ast_data": "FunctionDef name:num_replicas arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "gcf",
    "source_code": "def gcf() -> Figure:\n    manager = _pylab_helpers.Gcf.get_active()\n    if manager is not None:\n        return manager.canvas.figure\n    else:\n        return figure()",
    "docstring": "Get the current figure. If there is currently no figure on the pyplot figure stack, a new one is created using . (To test whether there is currently a figure on the pyplot figure stack, check whether is empty.)",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:gcf arguments Assign Call If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "experimental_run_functions_eagerly",
    "source_code": "@deprecation.deprecated(None, 'Use `tf.config.run_functions_eagerly` instead of the experimental version.')\n@tf_export('config.experimental_run_functions_eagerly')\ndef experimental_run_functions_eagerly(run_eagerly):\n    return run_functions_eagerly(run_eagerly)",
    "docstring": "Enables / disables eager execution of s. Calling will make all invocations of run eagerly instead of running as a traced graph function. See for an example. Note: This flag has no effect on functions passed into tf.data transformations as arguments. tf.data functions are never executed eagerly and are always executed as a compiled Tensorflow Graph. Args: run_eagerly: Boolean. Whether to run functions eagerly. Returns: None",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\eager_function_run.py",
    "ast_data": "FunctionDef name:experimental_run_functions_eagerly arg:run_eagerly arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "numpy",
    "name": "_DomainGreaterEqual",
    "source_code": "class _DomainGreaterEqual:\n\n    def __init__(self, critical_value):\n        self.critical_value = critical_value\n\n    def __call__(self, x):\n        with np.errstate(invalid='ignore'):\n            return umath.less(x, self.critical_value)",
    "docstring": "DomainGreaterEqual(v)(x) is True where x < v.",
    "type": "class",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "ClassDef name:_DomainGreaterEqual FunctionDef name:__init__ arg:self arg:critical_value arguments arg arg Assign FunctionDef name:__call__ arg:self arg:x arguments arg arg With Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "image_list_to_tensor",
    "source_code": "def image_list_to_tensor(images: List[Any]) -> Tensor:\n    if not images:\n        raise ValueError('Input list of numpy images is empty')\n    if len(images[0].shape) != 3:\n        raise ValueError('Input images must be three dimensional arrays')\n    list_of_tensors: List[Tensor] = []\n    for image in images:\n        list_of_tensors.append(image_to_tensor(image))\n    tensor: Tensor = torch.stack(list_of_tensors)\n    return tensor",
    "docstring": "Convert a list of numpy images to a PyTorch 4d tensor image. Args: images: list of images, each of the form :math:. Image shapes must be consistent Returns: tensor of the form :math:. Example: >>> imgs = [np.ones((4, 4, 1)), np.zeros((4, 4, 1))] >>> image_list_to_tensor(imgs).shape torch.Size([2, 1, 4, 4])",
    "type": "function",
    "file_path": "kornia\\kornia\\utils\\image.py",
    "ast_data": "FunctionDef name:image_list_to_tensor arg:images arguments arg If Raise Call If Compare Call Raise Call For Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, node_def, op, message, *args):\n    super(UnimplementedError, self).__init__(node_def, op, message, UNIMPLEMENTED, *args)",
    "docstring": "Creates an .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:node_def arg:op arg:message arguments arg arg arg arg arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "_execute_dependency_graph",
    "source_code": "def _execute_dependency_graph(self):\n    self._validate_dependency_graph()\n    extern_modules = []\n    for module_name, attrs in self.dependency_graph.nodes.items():\n        action = attrs['action']\n        if action == _ModuleProviderAction.EXTERN:\n            for hook in self._extern_hooks.values():\n                hook(self, module_name)\n            extern_modules.append(module_name)\n        elif action == _ModuleProviderAction.MOCK:\n            for hook in self._mock_hooks.values():\n                hook(self, module_name)\n            self._write_mock_file()\n            is_package = hasattr(self._import_module(module_name), '__path__')\n            self._write_source_string(module_name, _MOCK_IMPL, is_package)\n        elif action == _ModuleProviderAction.INTERN:\n            for hook in self._intern_hooks.values():\n                hook(self, module_name)\n            if 'provided' not in attrs:\n                raise AssertionError(f'Module was marked `intern` but not provided: {module_name}')\n            if attrs.get('is_pickle') is True:\n                continue\n            is_package = attrs['is_package']\n            source = attrs['source']\n            self._write_source_string(module_name, source, is_package)\n        elif action == _ModuleProviderAction.REPACKAGED_MOCK_MODULE:\n            self._write_mock_file()\n        elif action == _ModuleProviderAction.SKIP:\n            continue\n        else:\n            raise AssertionError(f'Invalid action: {module_name}, {action}. Please report a bug to PyTorch.')\n    extern_file_contents = '\\n'.join(extern_modules) + '\\n'\n    self._write('.data/extern_modules', extern_file_contents)",
    "docstring": "Takes a finalized dependency graph describing how to package all modules and executes it, writing to the ZIP archive.",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\package_exporter.py",
    "ast_data": "FunctionDef name:_execute_dependency_graph arg:self arguments arg Call Assign For Call Assign If Compare For Call Call Call If Compare For Call Call Call Assign Call Call Call If Compare For Call Call If Compare Raise Call If Compare Call Assign Assign Call If Compare Call If Compare Raise Call Assign Call Call"
  },
  {
    "library": "authlib",
    "name": "rebuild_auth",
    "source_code": "def rebuild_auth(self, prepared_request, response):\n    if 'Authorization' in prepared_request.headers:\n        prepared_request.headers.pop('Authorization', True)\n        prepared_request.prepare_auth(self.auth)",
    "docstring": "When being redirected we should always strip Authorization header, since nonce may not be reused as per OAuth spec.",
    "type": "method",
    "file_path": "authlib\\authlib\\integrations\\requests_client\\oauth1_session.py",
    "ast_data": "FunctionDef name:rebuild_auth arg:self arg:prepared_request arg:response arguments arg arg arg If Compare Call Call"
  },
  {
    "library": "tensorflow",
    "name": "fetch",
    "source_code": "def fetch(self):\n    raise NotImplementedError('Must be implemented in subclasses.')",
    "docstring": "Wait for the result of and return the numpy result. This makes the value concrete by copying the remote value to local. Returns: The numpy array structure of the actual output of the associated with this , previously returned by a call. This can be a single value, or a structure of values, depending on the output of the . Raises: tf.errors.CancelledError: If the function that produces this is aborted or cancelled due to failure.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\remote_value.py",
    "ast_data": "FunctionDef name:fetch arg:self arguments arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "benchmarks_main",
    "source_code": "def benchmarks_main(true_main, argv=None):\n    if argv is None:\n        argv = sys.argv\n    found_arg = [arg for arg in argv if arg.startswith('--benchmark_filter=') or arg.startswith('-benchmark_filter=')]\n    if found_arg:\n        argv.remove(found_arg[0])\n        regex = found_arg[0].split('=')[1]\n        app.run(lambda _: _run_benchmarks(regex), argv=argv)\n    else:\n        true_main()",
    "docstring": "Run benchmarks as declared in argv. Args: true_main: True main function to run if benchmarks are not requested. argv: the command line arguments (if None, uses sys.argv).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\platform\\benchmark.py",
    "ast_data": "FunctionDef name:benchmarks_main arg:true_main arg:argv arguments arg arg If Compare Assign Assign BoolOp Call Call If Call Assign Call Call arguments arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_run_function_for_calibration_graph_mode",
    "source_code": "def _run_function_for_calibration_graph_mode(sess: session.Session, signature_def: meta_graph_pb2.SignatureDef, representative_dataset: rd.RepresentativeDataset) -> None:\n    output_tensor_names = [output_tensor_info.name for output_tensor_info in signature_def.outputs.values()]\n    sample_validator = _create_sample_validator(expected_input_keys=signature_def.inputs.keys())\n    for sample in map(sample_validator, _log_sample_num_for_calibration(representative_dataset)):\n        feed_dict = rd.create_feed_dict_from_input_data(sample, signature_def)\n        sess.run(output_tensor_names, feed_dict=feed_dict)",
    "docstring": "Runs the representative dataset through a function for calibration. NOTE: This is intended to be run in graph mode (TF1). The function is identified by the SignatureDef. Args: sess: The Session object to run the function in. signature_def: A SignatureDef that identifies a function by specifying the inputs and outputs. representative_dataset: The representative dataset to run through the function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\py_function_lib.py",
    "ast_data": "FunctionDef name:_run_function_for_calibration_graph_mode arg:sess arg:signature_def arg:representative_dataset arguments arg arg arg Assign Call Assign Call Call For Call Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_export_to_saved_model_graph",
    "source_code": "def _export_to_saved_model_graph(self, object_map, tensor_map, options, **kwargs):\n    resource_list = self._v._export_to_saved_model_graph(object_map, tensor_map, options, **kwargs)\n    object_map[self] = object_map[self._v]\n    return resource_list",
    "docstring": "For implementing .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\ps_values.py",
    "ast_data": "FunctionDef name:_export_to_saved_model_graph arg:self arg:object_map arg:tensor_map arg:options arguments arg arg arg arg arg Assign Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "check_errcode",
    "source_code": "def check_errcode(result, func, cargs, cpl=False):\n    check_err(result, cpl=cpl)",
    "docstring": "Check the error code returned (c_int).",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\gdal\\prototypes\\errcheck.py",
    "ast_data": "FunctionDef name:check_errcode arg:result arg:func arg:cargs arg:cpl arguments arg arg arg arg Call"
  },
  {
    "library": "scipy",
    "name": "logpdf",
    "source_code": "def logpdf(self, x, alpha):\n    alpha = _dirichlet_check_parameters(alpha)\n    x = _dirichlet_check_input(alpha, x)\n    out = self._logpdf(x, alpha)\n    return _squeeze_output(out)",
    "docstring": "Log of the Dirichlet probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of denoting the components. %(_dirichlet_doc_default_callparams)s Returns ------- pdf : ndarray or scalar Log of the probability density function evaluated at .",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:logpdf arg:self arg:x arg:alpha arguments arg arg arg Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "to_pgraster",
    "source_code": "def to_pgraster(rast):\n    rasterheader = (1, 0, len(rast.bands), rast.scale.x, rast.scale.y, rast.origin.x, rast.origin.y, rast.skew.x, rast.skew.y, rast.srs.srid, rast.width, rast.height)\n    result = pack(POSTGIS_HEADER_STRUCTURE, rasterheader)\n    for band in rast.bands:\n        structure = 'B' + GDAL_TO_STRUCT[band.datatype()]\n        pixeltype = GDAL_TO_POSTGIS[band.datatype()]\n        if band.nodata_value is not None:\n            pixeltype |= BANDTYPE_FLAG_HASNODATA\n        bandheader = pack(structure, (pixeltype, band.nodata_value or 0))\n        result += bandheader + band.data(as_memoryview=True)\n    return result",
    "docstring": "Convert a GDALRaster into PostGIS Raster format.",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\postgis\\pgraster.py",
    "ast_data": "FunctionDef name:to_pgraster arg:rast arguments arg Assign Call Assign Call For Assign Call Assign Call If Compare Assign Call BoolOp Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "_key_func_1",
    "source_code": "def _key_func_1(entry: tuple[str, _IndexEntry]) -> tuple[tuple[int, str], str]:\n    key, (_targets, _sub_items, category_key) = entry\n    if category_key:\n        key = category_key\n    lc_key = unicodedata.normalize('NFD', key.lower())\n    lc_key = lc_key.removeprefix('\\u200f')\n    if not lc_key[0:1].isalpha() and (not lc_key.startswith('_')):\n        group = 0\n    else:\n        group = 1\n    return ((group, lc_key), entry[0])",
    "docstring": "Sort the index entries",
    "type": "function",
    "file_path": "sphinx\\sphinx\\environment\\adapters\\indexentries.py",
    "ast_data": "FunctionDef name:_key_func_1 arg:entry arguments arg Assign If Assign Assign Call Call Assign Call If BoolOp Call Call Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_track_dim_from_dims",
    "source_code": "def _track_dim_from_dims(val: Union[None, int, _DimHint, Dim]) -> Union[None, int, str]:\n    if val is None or isinstance(val, int):\n        return val\n    if isinstance(val, _DimHint):\n        return val.__class__.__name__ + '.' + val.type.name\n    assert isinstance(val, Dim)\n    root = val.root if isinstance(val, _DerivedDim) else val\n    if root.__name__ not in dims:\n        dims[root.__name__] = {'min': root.min, 'max': root.max, 'derived': set()}\n    if isinstance(val, _DerivedDim):\n        dims[root.__name__]['derived'].add(val.__name__)\n    return val.__name__",
    "docstring": "Tracks dims, ranges, derived dims from the standardized dynamic_shapes spec.",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\serde\\dynamic_shapes.py",
    "ast_data": "FunctionDef name:_track_dim_from_dims arg:val arguments arg If BoolOp Compare Call Return return:yes If Call Return return:yes Call Assign Call If Compare Assign Call If Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "time_rotvec_conversion",
    "source_code": "def time_rotvec_conversion(self, num_rotations):\n    Rotation.from_rotvec(self.rotations.as_rotvec())",
    "docstring": "Time converting rotation from and to rotation vectors",
    "type": "method",
    "file_path": "scipy\\benchmarks\\benchmarks\\spatial.py",
    "ast_data": "FunctionDef name:time_rotvec_conversion arg:self arg:num_rotations arguments arg arg Call Call"
  },
  {
    "library": "django",
    "name": "_prepare",
    "source_code": "def _prepare(self):\n    self.message = str(self.message)\n    self.extra_tags = str(self.extra_tags) if self.extra_tags is not None else None",
    "docstring": "Prepare the message for serialization by forcing the `` to str in case they are lazy translations.",
    "type": "method",
    "file_path": "django\\django\\contrib\\messages\\storage\\base.py",
    "ast_data": "FunctionDef name:_prepare arg:self arguments arg Assign Call Assign Compare Call"
  },
  {
    "library": "pytorch",
    "name": "broadcast_dim",
    "source_code": "def broadcast_dim(tensor_input1, tensor_input2, res1, res2, index, padding=False):\n    if tensor_input1[index] is None:\n        assert padding\n    if not padding:\n        return Conj([BinConstraintD(tensor_input1[index], 1, op_eq), BinConstraintD(res1[index], res2[index], op_eq), BinConstraintD(res2[index], tensor_input2[index], op_eq)])\n    else:\n        return Conj([BinConstraintD(res1[index], res2[index], op_eq), BinConstraintD(res2[index], tensor_input2[index], op_eq)])",
    "docstring": "Apply broadcasting to the 'index' dimension of tensor_input1. Args: tensor_input1: should represent [d1, ..., d_index, ...] where d_index = 1 tensor_input2: represents the second input res1: broadcasted result 1 res2: broadcasted result 2 index: the index to broadcast padding: If padding was used, then tensor_input1[index] does not exist Returns:",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_transformation.py",
    "ast_data": "FunctionDef name:broadcast_dim arg:tensor_input1 arg:tensor_input2 arg:res1 arg:res2 arg:index arg:padding arguments arg arg arg arg arg arg If Compare If Return return:yes Call Call Call Call Return return:yes Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "__call__",
    "source_code": "def __call__(self, X, Y=None, eval_gradient=False):\n    pairwise_kernels_kwargs = self.pairwise_kernels_kwargs\n    if self.pairwise_kernels_kwargs is None:\n        pairwise_kernels_kwargs = {}\n    X = np.atleast_2d(X)\n    K = pairwise_kernels(X, Y, metric=self.metric, gamma=self.gamma, filter_params=True, **pairwise_kernels_kwargs)\n    if eval_gradient:\n        if self.hyperparameter_gamma.fixed:\n            return (K, np.empty((X.shape[0], X.shape[0], 0)))\n        else:\n\n            def f(gamma):\n                return pairwise_kernels(X, Y, metric=self.metric, gamma=np.exp(gamma), filter_params=True, **pairwise_kernels_kwargs)\n            return (K, _approx_fprime(self.theta, f, 1e-10))\n    else:\n        return K",
    "docstring": "Return the kernel k(X, Y) and optionally its gradient. Parameters ---------- X : ndarray of shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : ndarray of shape (n_samples_Y, n_features), default=None Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradient : bool, default=False Determines whether the gradient with respect to the log of the kernel hyperparameter is computed. Only supported when Y is None. Returns ------- K : ndarray of shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), optional The gradient of the kernel k(X, X) with respect to the log of the hyperparameter of the kernel. Only returned when is True.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:X arg:Y arg:eval_gradient arguments arg arg arg arg Assign If Compare Assign Assign Call Assign Call If If Return return:yes Call FunctionDef name:f arg:gamma arguments arg Return return:yes Call Call Return return:yes Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "flatten",
    "source_code": "def flatten(seq, scalarp=is_scalar_or_string):\n    for item in seq:\n        if scalarp(item) or item is None:\n            yield item\n        else:\n            yield from flatten(item, scalarp)",
    "docstring": "Return a generator of flattened nested containers. For example: >>> from matplotlib.cbook import flatten >>> l = (('John', ['Hunter']), (1, 23), [[([42, (5, 23)], )]]) >>> print(list(flatten(l))) ['John', 'Hunter', 1, 23, 42, 5, 23] By: Composite of Holger Krekel and Luther Blissett From: and Recipe 1.12 in cookbook",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\cbook.py",
    "ast_data": "FunctionDef name:flatten arg:seq arg:scalarp arguments arg arg For If BoolOp Call Compare Call"
  },
  {
    "library": "matplotlib",
    "name": "LArrow",
    "source_code": "@_register_style(_style_list)\nclass LArrow:\n\n    def __init__(self, pad=0.3):\n        self.pad = pad\n\n    def __call__(self, x0, y0, width, height, mutation_size):\n        pad = mutation_size * self.pad\n        width, height = (width + 2 * pad, height + 2 * pad)\n        x0, y0 = (x0 - pad, y0 - pad)\n        x1, y1 = (x0 + width, y0 + height)\n        dx = (y1 - y0) / 2\n        dxx = dx / 2\n        x0 = x0 + pad / 1.4\n        return Path._create_closed([(x0 + dxx, y0), (x1, y0), (x1, y1), (x0 + dxx, y1), (x0 + dxx, y1 + dxx), (x0 - dx, y0 + dx), (x0 + dxx, y0 - dxx), (x0 + dxx, y0)])",
    "docstring": "A box in the shape of a left-pointing arrow.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "ClassDef name:LArrow FunctionDef name:__init__ arg:self arg:pad arguments arg arg Assign FunctionDef name:__call__ arg:self arg:x0 arg:y0 arg:width arg:height arg:mutation_size arguments arg arg arg arg arg arg Assign Assign Assign Assign Assign Assign Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "device_function",
    "source_code": "def device_function(self, op):\n    if not self._merge_devices and op.device:\n        return op.device\n    current_device = pydev.DeviceSpec.from_string(op.device or '')\n    node_def = op if isinstance(op, node_def_pb2.NodeDef) else op.node_def\n    if self._ps_tasks and self._ps_device and (node_def.op in self._ps_ops):\n        ps_device = pydev.DeviceSpec.from_string(self._ps_device)\n        current_job, ps_job = (current_device.job, ps_device.job)\n        if ps_job and (not current_job or current_job == ps_job):\n            ps_device = ps_device.replace(task=self._ps_strategy(op))\n        ps_device = ps_device.make_merged_spec(current_device)\n        return ps_device.to_string()\n    worker_device = pydev.DeviceSpec.from_string(self._worker_device or '')\n    worker_device = worker_device.make_merged_spec(current_device)\n    return worker_device.to_string()",
    "docstring": "Choose a device for . Args: op: an . Returns: The device to use for the .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\device_setter.py",
    "ast_data": "FunctionDef name:device_function arg:self arg:op arguments arg arg If BoolOp Return return:yes Assign Call BoolOp Assign Call If BoolOp Compare Assign Call Assign If BoolOp BoolOp Compare Assign Call Call Assign Call Return return:yes Call Assign Call BoolOp Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_constructor_expanddim",
    "source_code": "@property\ndef _constructor_expanddim(self) -> Callable[..., DataFrame]:\n    from pandas.core.frame import DataFrame\n    return DataFrame",
    "docstring": "Used when a manipulation result has one higher dimension as the original, such as Series.to_frame()",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:_constructor_expanddim arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_make_reduce_func",
    "source_code": "def _make_reduce_func(self, reduce_func, input_dataset):\n    nested_dataset = dataset_ops.DatasetSpec(input_dataset.element_spec)\n    input_structure = (tensor_spec.TensorSpec([], dtypes.int64), nested_dataset)\n    self._reduce_func = structured_function.StructuredFunctionWrapper(reduce_func, self._transformation_name(), input_structure=input_structure)\n    if not isinstance(self._reduce_func.output_structure, dataset_ops.DatasetSpec):\n        raise TypeError(f'Invalid `reduce_func`. `reduce_func` must return a single `tf.data.Dataset` object but its return type is {self._reduce_func.output_structure}.')\n    self._element_spec = self._reduce_func.output_structure._element_spec",
    "docstring": "Make wrapping defun for reduce_func.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\group_by_window_op.py",
    "ast_data": "FunctionDef name:_make_reduce_func arg:self arg:reduce_func arg:input_dataset arguments arg arg arg Assign Call Assign Call Assign Call Call If Call Raise Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "dtensor_shutdown_tpu_system",
    "source_code": "def dtensor_shutdown_tpu_system():\n    from . import accelerator_util\n    accelerator_util.shutdown_accelerator_system()",
    "docstring": "Deprecated way to shutodwn the TPU system.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\tpu_util.py",
    "ast_data": "FunctionDef name:dtensor_shutdown_tpu_system arguments Call"
  },
  {
    "library": "sphinx",
    "name": "patmatch",
    "source_code": "def patmatch(name: str, pat: str) -> re.Match[str] | None:\n    if pat not in _pat_cache:\n        _pat_cache[pat] = re.compile(_translate_pattern(pat))\n    return _pat_cache[pat].match(name)",
    "docstring": "Return if name matches the regular expression (pattern) ```. Adapted from fnmatch module.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\matching.py",
    "ast_data": "FunctionDef name:patmatch arg:name arg:pat arguments arg arg If Compare Assign Call Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "BrownConradyModel",
    "source_code": "class BrownConradyModel(CameraModelBase):\n\n    def __init__(self, image_size: ImageSize, params: Tensor) -> None:\n        if params.shape[-1] != 12 or len(params.shape) > 2:\n            raise ValueError('params must be of shape (B, 12) for BROWN_CONRADY Camera')\n        super().__init__(BrownConradyTransform(), Z1Projection(), image_size, params)",
    "docstring": "Brown Conrady Camera Model.",
    "type": "class",
    "file_path": "kornia\\kornia\\sensors\\camera\\camera_model.py",
    "ast_data": "ClassDef name:BrownConradyModel FunctionDef name:__init__ arg:self arg:image_size arg:params arguments arg arg arg If BoolOp Compare Compare Call Raise Call Call Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_rfe_single_fit",
    "source_code": "def _rfe_single_fit(rfe, estimator, X, y, train, test, scorer, routed_params):\n    X_train, y_train = _safe_split(estimator, X, y, train)\n    X_test, y_test = _safe_split(estimator, X, y, test, train)\n    fit_params = _check_method_params(X, params=routed_params.estimator.fit, indices=train)\n    score_params = _check_method_params(X=X, params=routed_params.scorer.score, indices=test)\n    rfe._fit(X_train, y_train, lambda estimator, features: _score(estimator, X_test[:, features], y_test, scorer, score_params=score_params), **fit_params)\n    return (rfe.step_scores_, rfe.step_support_, rfe.step_ranking_, rfe.step_n_features_)",
    "docstring": "Return the score and n_features per step for a fit across one fold.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\feature_selection\\_rfe.py",
    "ast_data": "FunctionDef name:_rfe_single_fit arg:rfe arg:estimator arg:X arg:y arg:train arg:test arg:scorer arg:routed_params arguments arg arg arg arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call Call arguments arg arg Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, name, description, *labels):\n    super(Counter, self).__init__('Counter', _counter_methods, len(labels), name, description, *labels)",
    "docstring": "Creates a new Counter. Args: name: name of the new metric. description: description of the new metric. *labels: The label list of the new metric.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:name arg:description arguments arg arg arg arg Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "_template_basename",
    "source_code": "def _template_basename(filename: Path) -> Path | None:\n    basename = filename.name.lower()\n    if basename.endswith('_t'):\n        return filename.with_name(filename.name[:-2])\n    elif basename.endswith('.jinja'):\n        return filename.with_name(filename.name[:-6])\n    return None",
    "docstring": "Given an input filename: If the input looks like a template, then return the filename output should be written to. Otherwise, return no result (None).",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\fileutil.py",
    "ast_data": "FunctionDef name:_template_basename arg:filename arguments arg Assign Call If Call Return return:yes Call If Call Return return:yes Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "run_server",
    "source_code": "def run_server(self, blocking=True):\n    self._server_lock.acquire()\n    try:\n        if self._stop_requested:\n            raise ValueError('Server has already stopped')\n        if self._server_started:\n            raise ValueError('Server has already started running')\n        no_max_message_sizes = [('grpc.max_receive_message_length', -1), ('grpc.max_send_message_length', -1)]\n        self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10), options=no_max_message_sizes)\n        debug_service_pb2_grpc.add_EventListenerServicer_to_server(self, self.server)\n        self.server.add_insecure_port('[::]:%d' % self._server_port)\n        self.server.start()\n        self._server_started = True\n    finally:\n        self._server_lock.release()\n    if blocking:\n        while not self._stop_requested:\n            time.sleep(1.0)",
    "docstring": "Start running the server. Args: blocking: If , block until is invoked. Raises: ValueError: If server stop has already been requested, or if the server has already started running.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\grpc_debug_server.py",
    "ast_data": "FunctionDef name:run_server arg:self arg:blocking arguments arg arg Call Try If Raise Call If Raise Call Assign Assign Call Call Call Call Call Assign Call If While Call"
  },
  {
    "library": "scipy",
    "name": "Katsuura",
    "source_code": "class Katsuura(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([0.0] * self.N, [100.0] * self.N))\n        self.global_optimum = [[0.0 for _ in range(self.N)]]\n        self.custom_bounds = [(0, 1), (0, 1)]\n        self.fglob = 1.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        d = 32\n        k = atleast_2d(arange(1, d + 1)).T\n        i = arange(0.0, self.N * 1.0)\n        inner = round(2 ** k * x) * 2.0 ** (-k)\n        return prod(sum(inner, axis=0) * (i + 1) + 1)",
    "docstring": "Katsuura objective function. This class defines the Katsuura [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Katsuura}}(x) = \\prod_{i=0}^{n-1} \\left [ 1 + (i+1) \\sum_{k=1}^{d} \\lfloor (2^k x_i) \\rfloor 2^{-k} \\right ] Where, in this exercise, :math:. Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math:. .. [1] Adorio, E. MVF - \"Multivariate Test Functions Library in C for Unconstrained Global Optimization\", 2005 .. [2] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015 TODO: Adorio has wrong global minimum. Adorio uses round, Gavana docstring uses floor, but Gavana code uses round. We'll use round...",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_K.py",
    "ast_data": "ClassDef name:Katsuura Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Assign Call Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_coordinate_where",
    "source_code": "def _coordinate_where(condition):\n    if not isinstance(condition, ragged_tensor.RaggedTensor):\n        return array_ops.where(condition)\n    selected_coords = _coordinate_where(condition.values)\n    condition = condition.with_row_splits_dtype(selected_coords.dtype)\n    first_index = selected_coords[:, 0]\n    selected_rows = array_ops.gather(condition.value_rowids(), first_index)\n    selected_row_starts = array_ops.gather(condition.row_splits, selected_rows)\n    selected_cols = first_index - selected_row_starts\n    return array_ops.concat([array_ops.expand_dims(selected_rows, 1), array_ops.expand_dims(selected_cols, 1), selected_coords[:, 1:]], axis=1)",
    "docstring": "Ragged version of tf.where(condition).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_where_op.py",
    "ast_data": "FunctionDef name:_coordinate_where arg:condition arguments arg If Call Return return:yes Call Assign Call Assign Call Assign Assign Call Call Assign Call Assign Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "OpRegGen",
    "source_code": "class OpRegGen(transpiler.GenericTranspiler):\n\n    def transform_ast(self, node, ctx):\n        gen = OpRegGenImpl(ctx)\n        gen.visit(node)\n        return gen.code_buffer",
    "docstring": "Transforms Python objects into TFR MLIR source code.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\tfr\\python\\op_reg_gen.py",
    "ast_data": "ClassDef name:OpRegGen FunctionDef name:transform_ast arg:self arg:node arg:ctx arguments arg arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "_check_pattern_startswith_slash",
    "source_code": "def _check_pattern_startswith_slash(self):\n    if not settings.APPEND_SLASH:\n        return []\n    if self._regex.startswith(('/', '^/', '^\\\\/')) and (not self._regex.endswith('/')):\n        warning = Warning(\"Your URL pattern {} has a route beginning with a '/'. Remove this slash as it is unnecessary. If this pattern is targeted in an include(), ensure the include() pattern has a trailing '/'.\".format(self.describe()), id='urls.W002')\n        return [warning]\n    else:\n        return []",
    "docstring": "Check that the pattern does not begin with a forward slash.",
    "type": "method",
    "file_path": "django\\django\\urls\\resolvers.py",
    "ast_data": "FunctionDef name:_check_pattern_startswith_slash arg:self arguments arg If Return return:no If BoolOp Call Call Assign Call Call Call Return return:yes Return return:no"
  },
  {
    "library": "pandas",
    "name": "to_flat_index",
    "source_code": "def to_flat_index(self) -> Index:\n    return Index(self._values, tupleize_cols=False)",
    "docstring": "Convert a MultiIndex to an Index of Tuples containing the level values. Returns ------- pd.Index Index with the MultiIndex data represented in Tuples. See Also -------- MultiIndex.from_tuples : Convert flat index back to MultiIndex. Notes ----- This method will simply return the caller if called by anything other than a MultiIndex. Examples -------- >>> index = pd.MultiIndex.from_product( ... [[\"foo\", \"bar\"], [\"baz\", \"qux\"]], names=[\"a\", \"b\"] ... ) >>> index.to_flat_index() Index([('foo', 'baz'), ('foo', 'qux'), ('bar', 'baz'), ('bar', 'qux')], dtype='object')",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "FunctionDef name:to_flat_index arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_ch_helper",
    "source_code": "def _ch_helper(gamma, s, r, h, p0, p1, x):\n    xg = x ** gamma\n    a = h * xg * (1 - xg) / 2\n    phi = 2 * np.pi * (s / 3 + r * x)\n    return xg + a * (p0 * np.cos(phi) + p1 * np.sin(phi))",
    "docstring": "Helper function for generating picklable cubehelix colormaps.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\_cm.py",
    "ast_data": "FunctionDef name:_ch_helper arg:gamma arg:s arg:r arg:h arg:p0 arg:p1 arg:x arguments arg arg arg arg arg arg arg Assign Assign Assign Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "densify",
    "source_code": "def densify(self):\n    msg = 'Estimator, %(name)s, must be fitted before densifying.'\n    check_is_fitted(self, msg=msg)\n    if sp.issparse(self.coef_):\n        self.coef_ = self.coef_.toarray()\n    return self",
    "docstring": "Convert coefficient matrix to dense array format. Converts the `` and is required for fitting, so calling this method is only required on models that have previously been sparsified; otherwise, it is a no-op. Returns ------- self Fitted estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_base.py",
    "ast_data": "FunctionDef name:densify arg:self arguments arg Assign Call If Call Assign Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "_apply",
    "source_code": "def _apply(self, config):\n    which_env = config.get('environment')\n    if which_env:\n        env = self.environments[which_env]\n        for k in env:\n            if k not in config:\n                config[k] = env[k]\n    dict.update(self, config)\n    self.namespaces(config)",
    "docstring": "Update self from a dict.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\reprconf.py",
    "ast_data": "FunctionDef name:_apply arg:self arg:config arguments arg arg Assign Call If Assign For If Compare Assign Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_topmost_artist",
    "source_code": "def _topmost_artist(artists, _cached_max=functools.partial(max, key=operator.attrgetter('zorder'))):\n    return _cached_max(reversed(artists))",
    "docstring": "Get the topmost artist of a list. In case of a tie, return the *last* of the tied artists, as it will be drawn on top of the others. returns the first maximum in case of ties, so we need to iterate over the list in reverse order.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\cbook.py",
    "ast_data": "FunctionDef name:_topmost_artist arg:artists arg:_cached_max arguments arg arg Call Call Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "ClassificationHead",
    "source_code": "class ClassificationHead(nn.Module):\n\n    def __init__(self, embed_size: int=768, num_classes: int=10) -> None:\n        super().__init__()\n        self.norm = nn.LayerNorm(embed_size)\n        self.linear = nn.Linear(embed_size, num_classes)\n\n    def forward(self, x: torch.Tensor) -> torch.Tensor:\n        out = x.mean(-2)\n        return self.linear(self.norm(out))",
    "docstring": "Module to be used as a classification head. Args: embed_size: the logits tensor coming from the networks. num_classes: an integer representing the numbers of classes to classify. Example: >>> feat = torch.rand(1, 256, 256) >>> head = ClassificationHead(256, 10) >>> head(feat).shape torch.Size([1, 10])",
    "type": "class",
    "file_path": "kornia\\kornia\\contrib\\classification.py",
    "ast_data": "ClassDef name:ClassificationHead FunctionDef name:__init__ arg:self arg:embed_size arg:num_classes arguments arg arg arg Call Call Assign Call Assign Call FunctionDef name:forward arg:self arg:x arguments arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "deprecated_arg_values",
    "source_code": "def deprecated_arg_values(date, instructions, warn_once=True, **deprecated_kwargs):\n    _validate_deprecation_args(date, instructions)\n    if not deprecated_kwargs:\n        raise ValueError('Specify which argument values are deprecated.')\n\n    def deprecated_wrapper(func):\n        decorator_utils.validate_callable(func, 'deprecated_arg_values')\n\n        @functools.wraps(func)\n        def new_func(*args, **kwargs):\n            if _PRINT_DEPRECATION_WARNINGS:\n                named_args = tf_inspect.getcallargs(func, *args, **kwargs)\n                for arg_name, arg_value in deprecated_kwargs.items():\n                    if arg_name in named_args and _safe_eq(named_args[arg_name], arg_value):\n                        if (func, arg_name) not in _PRINTED_WARNING:\n                            if warn_once:\n                                _PRINTED_WARNING[func, arg_name] = True\n                            _log_deprecation('From %s: calling %s (from %s) with %s=%s is deprecated and will be removed %s.\\nInstructions for updating:\\n%s', _call_location(), decorator_utils.get_qualified_name(func), func.__module__, arg_name, arg_value, 'in a future version' if date is None else 'after %s' % date, instructions)\n            return func(*args, **kwargs)\n        doc = _add_deprecated_arg_value_notice_to_docstring(func.__doc__, date, instructions, deprecated_kwargs)\n        return tf_decorator.make_decorator(func, new_func, 'deprecated', doc)\n    return deprecated_wrapper",
    "docstring": "Decorator for marking specific function argument values as deprecated. This decorator logs a deprecation warning whenever the decorated function is called with the deprecated argument values. It has the following format: Calling (from ) with = is deprecated and will be removed after . Instructions for updating: If is None, 'after ' is replaced with 'in a future version'. will include the class name if it is a method. It also edits the docstring of the function: ' (deprecated arguments)' is appended to the first line of the docstring and a deprecation notice is prepended to the rest of the docstring. Args: date: String or None. The date the function is scheduled to be removed. Must be ISO 8601 (YYYY-MM-DD), or None instructions: String. Instructions on how to update code using the deprecated function. warn_once: If , warn only the first time this function is called with deprecated argument values. Otherwise, every call (with a deprecated argument value) will log a warning. **deprecated_kwargs: The deprecated argument values. Returns: Decorated function or method. Raises: ValueError: If date is not None or in ISO 8601 format, or instructions are empty.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\deprecation.py",
    "ast_data": "FunctionDef name:deprecated_arg_values arg:date arg:instructions arg:warn_once arguments arg arg arg arg Call If Raise Call FunctionDef name:deprecated_wrapper arg:func arguments arg Call FunctionDef name:new_func arguments arg arg If Assign Call For Call If BoolOp Compare Call If Compare If Assign Call Call Call Compare Return return:yes Call Call Assign Call Return return:yes Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "ShowDirs",
    "source_code": "@cli.cls_cmd('show_PYTHONPATH')\nclass ShowDirs(Python):\n    ctx = CONTEXT\n    pythonpath = Python.pythonpath\n    extra_argv = Python.extra_argv\n\n    @classmethod\n    def run(cls, pythonpath, extra_argv, **kwargs):\n        cls._setup(pythonpath, **kwargs)\n        py_path = os.environ.get('PYTHONPATH', '')\n        click.echo(f'PYTHONPATH={py_path}')",
    "docstring": ":information: Show value of the PYTHONPATH environment variable used in this script. PYTHONPATH sets the default search path for module files for the interpreter. Here, it includes the path to the local SciPy build (typically ). Use the global option to skip the building step, e.g.:",
    "type": "class",
    "file_path": "scipy\\dev.py",
    "ast_data": "ClassDef name:ShowDirs Assign Assign Assign FunctionDef name:run arg:cls arg:pythonpath arg:extra_argv arguments arg arg arg arg Call Assign Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "PickEvent",
    "source_code": "class PickEvent(Event):\n\n    def __init__(self, name, canvas, mouseevent, artist, guiEvent=None, **kwargs):\n        if guiEvent is None:\n            guiEvent = mouseevent.guiEvent\n        super().__init__(name, canvas, guiEvent)\n        self.mouseevent = mouseevent\n        self.artist = artist\n        self.__dict__.update(kwargs)",
    "docstring": "A pick event. This event is fired when the user picks a location on the canvas sufficiently close to an artist that has been made pickable with . A PickEvent has a number of special attributes in addition to those defined by the parent class. Attributes ---------- mouseevent : The mouse event that generated the pick. artist : The picked artist. Note that artists are not pickable by default (see ). other Additional attributes may be present depending on the type of the picked object; e.g., a pick may define different extra attributes than a pick. Examples -------- Bind a function `` to pick events, that prints the coordinates of the picked data point:: ax.plot(np.rand(100), 'o', picker=5) # 5 points tolerance def on_pick(event): line = event.artist xdata, ydata = line.get_data() ind = event.ind print(f'on pick line: {xdata[ind]:.3f}, {ydata[ind]:.3f}') cid = fig.canvas.mpl_connect('pick_event', on_pick)",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "ClassDef name:PickEvent FunctionDef name:__init__ arg:self arg:name arg:canvas arg:mouseevent arg:artist arg:guiEvent arguments arg arg arg arg arg arg arg If Compare Assign Call Call Assign Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "identity",
    "source_code": "def identity(self):\n    return self._implementation.identity()",
    "docstring": "Returns a TensorArray with the same content and properties. Returns: A new TensorArray object with flow that ensures the control dependencies from the contexts will become control dependencies for writes, reads, etc. Use this object for all subsequent operations.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:identity arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self):\n    self._weights = {}",
    "docstring": "Initializes the loss scale class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\experimental\\loss_scale.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg Assign"
  },
  {
    "library": "pytorch",
    "name": "_DimHintType",
    "source_code": "class _DimHintType(Enum):\n    AUTO = auto()\n    STATIC = auto()\n    DYNAMIC = auto()",
    "docstring": "Enum for dynamic shape hints. - AUTO means automatic inference of shape (static or dynamic). - STATIC means static shape (always specialized). - DYNAMIC means dynamic, will error out if specialized.",
    "type": "class",
    "file_path": "pytorch\\torch\\export\\dynamic_shapes.py",
    "ast_data": "ClassDef name:_DimHintType Assign Call Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "find",
    "source_code": "def find(self, predicate, first_n=0, device_name=None, exclude_node_names=None):\n    if exclude_node_names:\n        exclude_node_names = re.compile(exclude_node_names)\n    matched_data = []\n    for device in self._dump_tensor_data if device_name is None else (self._dump_tensor_data[device_name],):\n        for datum in self._dump_tensor_data[device]:\n            if exclude_node_names and exclude_node_names.match(datum.node_name):\n                continue\n            if predicate(datum, datum.get_tensor()):\n                matched_data.append(datum)\n                if first_n > 0 and len(matched_data) >= first_n:\n                    return matched_data\n    return matched_data",
    "docstring": "Find dumped tensor data by a certain predicate. Args: predicate: A callable that takes two input arguments: where is an instance of , which carries the metadata, such as the 's node name, output slot timestamp, debug op name, etc.; and is the dumped tensor value as a . first_n: () return only the first n instances (in time order) for which the predicate returns True. To return all the instances, let first_n be <= 0. device_name: optional device name. exclude_node_names: Optional regular expression to exclude nodes with names matching the regular expression. Returns: A list of all objects in this object for which predicate returns True, sorted in ascending order of the timestamp.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:find arg:self arg:predicate arg:first_n arg:device_name arg:exclude_node_names arguments arg arg arg arg arg If Assign Call Assign For Compare For If BoolOp Call If Call Call Call If BoolOp Compare Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "FunctionDefSplitter",
    "source_code": "class FunctionDefSplitter(SplitBasedOnSize):\n\n    def build_chunks(self) -> int:\n        size_diff = 0\n        if _GREEDY_SPLIT(self.proto_size) and (not _ABOVE_MAX_SIZE(self.proto_size)):\n            size_diff += LargeMessageSplitter(self._proto, self.proto_size, parent_splitter=self, fields_in_parent=[]).build_chunks()\n        if _ABOVE_MAX_SIZE(self.proto_size):\n            size_diff += RepeatedMessageSplitter(self._proto, 'node_def', [ConstantNodeDefSplitter, LargeMessageSplitter], parent_splitter=self, fields_in_parent=[]).build_chunks()\n        return size_diff",
    "docstring": "Splits the FunctionDef message type.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\tools\\proto_splitter\\split_graph_def.py",
    "ast_data": "ClassDef name:FunctionDefSplitter FunctionDef name:build_chunks arg:self arguments arg Assign If BoolOp Call Call Call Call If Call Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "ones",
    "source_code": "@finalize_array_function_like\n@set_module('numpy')\ndef ones(shape, dtype=None, order='C', *, device=None, like=None):\n    if like is not None:\n        return _ones_with_like(like, shape, dtype=dtype, order=order, device=device)\n    a = empty(shape, dtype, order, device=device)\n    multiarray.copyto(a, 1, casting='unsafe')\n    return a",
    "docstring": "Return a new array of given shape and type, filled with ones. Parameters ---------- shape : int or sequence of ints Shape of the new array, e.g., `numpy.int8numpy.float64` if passed. .. versionadded:: 2.0.0 ${ARRAY_FUNCTION_LIKE} .. versionadded:: 1.20.0 Returns ------- out : ndarray Array of ones with the given shape, dtype, and order. See Also -------- ones_like : Return an array of ones with shape and type of input. empty : Return a new uninitialized array. zeros : Return a new array setting values to zero. full : Return a new array of given shape filled with value. Examples -------- >>> import numpy as np >>> np.ones(5) array([1., 1., 1., 1., 1.]) >>> np.ones((5,), dtype=int) array([1, 1, 1, 1, 1]) >>> np.ones((2, 1)) array([[1.], [1.]]) >>> s = (2,2) >>> np.ones(s) array([[1., 1.], [1., 1.]])",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\numeric.py",
    "ast_data": "FunctionDef name:ones arg:shape arg:dtype arg:order arguments arg arg arg arg arg If Compare Return return:yes Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "SessionRunContext",
    "source_code": "@tf_export(v1=['train.SessionRunContext'])\nclass SessionRunContext:\n\n    def __init__(self, original_args, session):\n        self._original_args = original_args\n        self._session = session\n        self._stop_requested = False\n\n    @property\n    def original_args(self):\n        return self._original_args\n\n    @property\n    def session(self):\n        return self._session\n\n    @property\n    def stop_requested(self):\n        return self._stop_requested\n\n    def request_stop(self):\n        self._stop_requested = True",
    "docstring": "Provides information about the call being made. Provides information about original request to function. SessionRunHook objects can stop the loop by calling of . In the future we may use this object to add more information about run without changing the Hook API.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\training\\session_run_hook.py",
    "ast_data": "ClassDef name:SessionRunContext FunctionDef name:__init__ arg:self arg:original_args arg:session arguments arg arg arg Assign Assign Assign FunctionDef name:original_args arg:self arguments arg Return return:yes FunctionDef name:session arg:self arguments arg Return return:yes FunctionDef name:stop_requested arg:self arguments arg Return return:yes FunctionDef name:request_stop arg:self arguments arg Assign Call"
  },
  {
    "library": "scipy",
    "name": "run",
    "source_code": "def run(self, f, jac, y0, t0, t1, f_params, jac_params):\n    raise NotImplementedError('all integrators must define run(f, jac, t0, t1, y0, f_params, jac_params)')",
    "docstring": "Integrate from t=t0 to t=t1 using y0 as an initial condition. Return 2-tuple (y1,t1) where y1 is the result and t=t1 defines the stoppage coordinate of the result.",
    "type": "method",
    "file_path": "scipy\\scipy\\integrate\\_ode.py",
    "ast_data": "FunctionDef name:run arg:self arg:f arg:jac arg:y0 arg:t0 arg:t1 arg:f_params arg:jac_params arguments arg arg arg arg arg arg arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "with_pprof_output",
    "source_code": "def with_pprof_output(self, pprof_file):\n    self._options['output'] = 'pprof:outfile=%s' % pprof_file\n    return self",
    "docstring": "Generate a pprof profile gzip file. To use the pprof file: pprof -png --nodecount=100 --sample_index=1 Args: pprof_file: filename for output, usually suffixed with .pb.gz. Returns: self.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\option_builder.py",
    "ast_data": "FunctionDef name:with_pprof_output arg:self arg:pprof_file arguments arg arg Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_from_values",
    "source_code": "@staticmethod\ndef _from_values(data, mask):\n\n    class Constructor(torch.autograd.Function):\n\n        @staticmethod\n        def forward(ctx, data, mask):\n            return MaskedTensor(data, mask)\n\n        @staticmethod\n        def backward(ctx, grad_output):\n            return (grad_output, None)\n    result = Constructor.apply(data, mask)\n    return result",
    "docstring": "Differentiable constructor for MaskedTensor",
    "type": "method",
    "file_path": "pytorch\\torch\\masked\\maskedtensor\\core.py",
    "ast_data": "FunctionDef name:_from_values arg:data arg:mask arguments arg arg ClassDef name:Constructor FunctionDef name:forward arg:ctx arg:data arg:mask arguments arg arg arg Return return:yes Call FunctionDef name:backward arg:ctx arg:grad_output arguments arg arg Return return:yes Assign Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "init_app",
    "source_code": "def init_app(self, app, cache=None, fetch_token=None, update_token=None):\n    self.app = app\n    if cache is not None:\n        self.cache = cache\n    if fetch_token:\n        self.fetch_token = fetch_token\n    if update_token:\n        self.update_token = update_token\n    app.extensions = getattr(app, 'extensions', {})\n    app.extensions['authlib.integrations.flask_client'] = self",
    "docstring": "Initialize lazy for Flask app. This is usually used for Flask application factory pattern.",
    "type": "method",
    "file_path": "authlib\\authlib\\integrations\\flask_client\\__init__.py",
    "ast_data": "FunctionDef name:init_app arg:self arg:app arg:cache arg:fetch_token arg:update_token arguments arg arg arg arg arg Assign If Compare Assign If Assign If Assign Assign Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_EagerTensorCache",
    "source_code": "class _EagerTensorCache(object):\n    __slots__ = ['_data', '_max_items', '_max_tensor_size']\n\n    def __init__(self, max_items=256, max_tensor_size=10000):\n        self._data = collections.OrderedDict()\n        self._max_items = max_items\n        self._max_tensor_size = max_tensor_size\n\n    def put(self, key, value):\n        if value._num_elements() > self._max_tensor_size:\n            return\n        self._data[key] = value\n        if len(self._data) > self._max_items:\n            self._data.popitem(last=False)\n\n    def get(self, key):\n        return self._data.get(key, None)\n\n    def flush(self):\n        self._data.clear()",
    "docstring": "Simple cache which evicts items based on length in a FIFO manner.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "ClassDef name:_EagerTensorCache Assign FunctionDef name:__init__ arg:self arg:max_items arg:max_tensor_size arguments arg arg arg Assign Call Assign Assign FunctionDef name:put arg:self arg:key arg:value arguments arg arg arg If Compare Call Return return:no Assign If Compare Call Call FunctionDef name:get arg:self arg:key arguments arg arg Return return:yes Call FunctionDef name:flush arg:self arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "fuse_known_modules",
    "source_code": "def fuse_known_modules(mod_list, is_qat, additional_fuser_method_mapping=None):\n    types = tuple((type_before_parametrizations(m) for m in mod_list))\n    fuser_method = get_fuser_method(types, additional_fuser_method_mapping)\n    if fuser_method is None:\n        raise NotImplementedError(f'Cannot fuse modules: {types}')\n    new_mod: list[Optional[nn.Module]] = [None] * len(mod_list)\n    fused = fuser_method(is_qat, *mod_list)\n    for pre_hook_fn in mod_list[0]._forward_pre_hooks.values():\n        fused.register_forward_pre_hook(pre_hook_fn)\n    mod_list[0]._forward_pre_hooks.clear()\n    for hook_fn in mod_list[-1]._forward_hooks.values():\n        fused.register_forward_hook(hook_fn)\n    mod_list[-1]._forward_hooks.clear()\n    new_mod[0] = fused\n    for i in range(1, len(mod_list)):\n        identity = nn.Identity()\n        identity.training = mod_list[0].training\n        new_mod[i] = identity\n    return new_mod",
    "docstring": "Return a list of known fuse modules. Returns a list of modules that fuses the operations specified in the input module list. Fuses only the following sequence of modules: conv, bn conv, bn, relu conv, relu linear, bn linear, relu For these sequences, the first element in the output module list performs the fused operation. The rest of the elements are set to nn.Identity()",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fuse_modules.py",
    "ast_data": "FunctionDef name:fuse_known_modules arg:mod_list arg:is_qat arg:additional_fuser_method_mapping arguments arg arg arg Assign Call Call Assign Call If Compare Raise Call Call Assign Call For Call Call Call For Call Call Call Assign For Call Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_update_indexed_slices_param",
    "source_code": "def _update_indexed_slices_param(graph, loop_vars, init_slices, input_slices, output_slices, old_output_slices):\n    structured_idx = _get_tensor_index_in_iterable(graph.structured_outputs, old_output_slices)\n    flat_idx = _get_tensor_index_in_iterable(graph.outputs, func_graph.flatten(old_output_slices)[0])\n    graph.structured_outputs[structured_idx] = output_slices\n    graph.outputs = func_graph.flatten(graph.structured_outputs)\n    graph.inputs = graph.inputs[:flat_idx] + _flatten(input_slices) + graph.inputs[flat_idx + 1:]\n    return loop_vars[:flat_idx] + _flatten(init_slices) + loop_vars[flat_idx + 1:]",
    "docstring": "Updates graph with new IndexedSlices input/output. Updates graph's metadata to output the gradient computation defined by init_slices, input_slices, and output_slices, instead of outputting old_output_slices. Also returns a new version of loop_vars with init_slices replacing the old input. Args: graph: _WhileBodyGradFuncGraph. loop_vars: the inputs to graph. init_slices: the new IndexedSlices to use as input to graph. input_slices: the new IndexedSlices in graph that should be fed by init_slices. output_slices: the new IndexedSlices in graph that should be the corresponding output to input_slices. old_output_slices: the IndexedSlices in graph that are currently being output. Returns: New loop_vars to pass to graph.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\while_v2_indexed_slices_rewriter.py",
    "ast_data": "FunctionDef name:_update_indexed_slices_param arg:graph arg:loop_vars arg:init_slices arg:input_slices arg:output_slices arg:old_output_slices arguments arg arg arg arg arg arg Assign Call Assign Call Call Assign Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "split_aot_inductor_output_path",
    "source_code": "@functools.lru_cache(None)\ndef split_aot_inductor_output_path(path: str) -> tuple[str, str]:\n    if path.endswith('.so'):\n        return os.path.split(path)\n    elif path.endswith('.pt2'):\n        return os.path.split(path)\n    else:\n        return (path, '')",
    "docstring": "Returns the path where the AOT Inductor compiled kernels are stored.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\codecache.py",
    "ast_data": "FunctionDef name:split_aot_inductor_output_path arg:path arguments arg If Call Return return:yes Call If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "value",
    "source_code": "def value(self):\n    return pywrap_tfe.TFE_MonitoringBoolGaugeCellValue(self._cell)",
    "docstring": "Retrieves the current value.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py",
    "ast_data": "FunctionDef name:value arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "parse_stop_word",
    "source_code": "def parse_stop_word(source: str) -> frozenset[str]:\n    stop_words: set[str] = set()\n    for line in source.splitlines():\n        stop_words.update(line.partition('|')[0].split())\n    return frozenset(stop_words)",
    "docstring": "Collect the stopwords from a snowball style word list: .. code:: text list of space separated stop words | optional comment",
    "type": "function",
    "file_path": "sphinx\\utils\\generate_snowball.py",
    "ast_data": "FunctionDef name:parse_stop_word arg:source arguments arg Call For Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "get_object",
    "source_code": "@classmethod\ndef get_object(cls, obj, transposed: bool):\n    if transposed:\n        obj = obj.T\n    return obj",
    "docstring": "these are written transposed",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:get_object arg:cls arg:obj arg:transposed arguments arg arg arg If Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_freeze_keras_model",
    "source_code": "@convert_phase(Component.PREPARE_TF_MODEL, SubComponent.FREEZE_KERAS_MODEL)\ndef _freeze_keras_model(self):\n    input_signature = None\n    if not isinstance(self._keras_model.call, _def_function.Function):\n        input_signature = _model_input_signature(self._keras_model, keep_original_batch_size=True)\n    func = _trace_model_call(self._keras_model, input_signature)\n    concrete_func = func.get_concrete_function()\n    self._funcs = [concrete_func]\n    frozen_func, graph_def = _convert_to_constants.convert_variables_to_constants_v2_as_graph(self._funcs[0], lower_control_flow=False)\n    input_tensors = [tensor for tensor in frozen_func.inputs if tensor.dtype != _dtypes.resource]\n    output_tensors = frozen_func.outputs\n    return (graph_def, input_tensors, output_tensors, frozen_func)",
    "docstring": "Freeze Keras model to frozen graph. Returns: graph_def: The frozen GraphDef. input_tensors: List of input tensors. output_tensors: List of output tensors. frozen_func: The frozen ConcreteFunction.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "FunctionDef name:_freeze_keras_model arg:self arguments arg Assign If Call Assign Call Assign Call Assign Call Assign Assign Call Assign Compare Assign Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_convert_empty_str_key",
    "source_code": "def _convert_empty_str_key(self) -> None:\n    if self.namespaces and '' in self.namespaces.keys():\n        self.namespaces[None] = self.namespaces.pop('', 'default')",
    "docstring": "Replace zero-length string in . This method will replace '' with None to align to requirement that empty string prefixes are not allowed.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\xml.py",
    "ast_data": "FunctionDef name:_convert_empty_str_key arg:self arguments arg If BoolOp Compare Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_SnapshotChunkDataset",
    "source_code": "class _SnapshotChunkDataset(dataset_ops.DatasetSource):\n\n    def __init__(self, chunk_file: str, element_spec: Any, compression: str):\n        self._chunk_file = chunk_file\n        self._element_spec = element_spec\n        variant_tensor = ged_ops.snapshot_chunk_dataset(chunk_file, compression=compression, **self._flat_structure)\n        super().__init__(variant_tensor)\n\n    @property\n    def element_spec(self) -> Any:\n        return self._element_spec",
    "docstring": "A dataset for one chunk file from a tf.data distributed snapshot.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\load_op.py",
    "ast_data": "ClassDef name:_SnapshotChunkDataset FunctionDef name:__init__ arg:self arg:chunk_file arg:element_spec arg:compression arguments arg arg arg arg Assign Assign Assign Call Call Call FunctionDef name:element_spec arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_validated_sharding_policy",
    "source_code": "def _get_validated_sharding_policy(processing_mode) -> ShardingPolicy:\n    if isinstance(processing_mode, ShardingPolicy):\n        return processing_mode\n    if processing_mode == _PARALLEL_EPOCHS:\n        return ShardingPolicy.OFF\n    if processing_mode == _DISTRIBUTED_EPOCH:\n        return ShardingPolicy.DYNAMIC\n    raise ValueError(f'tf.data service processing mode should be a `tf.data.experimental.service.ShardingPolicy`, `\"parallel_epochs\"`, or `\"distributed_epoch\"`. Got {processing_mode!r}.')",
    "docstring": "Validates and converts it to ShardingPolicy.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\data_service_ops.py",
    "ast_data": "FunctionDef name:_get_validated_sharding_policy arg:processing_mode arguments arg If Call Return return:yes If Compare Return return:yes If Compare Return return:yes Raise Call"
  },
  {
    "library": "authlib",
    "name": "create_token_response",
    "source_code": "@hooked\ndef create_token_response(self):\n    user = self.request.user\n    scope = self.request.payload.scope\n    token = self.generate_token(user=user, scope=scope)\n    log.debug('Issue token %r to %r', token, self.client)\n    self.save_token(token)\n    return (200, token, self.TOKEN_RESPONSE_HEADER)",
    "docstring": "If the access token request is valid and authorized, the authorization server issues an access token and optional refresh token as described in Section 5.1. If the request failed client authentication or is invalid, the authorization server returns an error response as described in Section 5.2. An example successful response: .. code-block:: http HTTP/1.1 200 OK Content-Type: application/json Cache-Control: no-store Pragma: no-cache { \"access_token\":\"2YotnFZFEjr1zCsicMWpAA\", \"token_type\":\"example\", \"expires_in\":3600, \"refresh_token\":\"tGzv3JOkF0XG5Qx2TlKWIA\", \"example_parameter\":\"example_value\" } :returns: (status_code, body, headers)",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\grants\\resource_owner_password_credentials.py",
    "ast_data": "FunctionDef name:create_token_response arg:self arguments arg Assign Assign Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_default_mesh",
    "source_code": "@tf_export('experimental.dtensor.get_default_mesh', v1=[])\ndef get_default_mesh() -> Optional[layout_lib.Mesh]:\n    if _dtensor_singleton is None:\n        return None\n    else:\n        return _dtensor_singleton._current_default_mesh",
    "docstring": "Return the default mesh under the current dtensor device context. In the case that dtensor device system is not initialized, this function will return None. Returns: The current default mesh for the dtensor device context.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\api.py",
    "ast_data": "FunctionDef name:get_default_mesh arguments If Compare Return return:no Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit_transform",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit_transform(self, X, y=None, sample_weight=None):\n    rnd = check_random_state(self.random_state)\n    y = rnd.uniform(size=_num_samples(X))\n    super().fit(X, y, sample_weight=sample_weight)\n    self.one_hot_encoder_ = OneHotEncoder(sparse_output=self.sparse_output)\n    output = self.one_hot_encoder_.fit_transform(self.apply(X))\n    self._n_features_out = output.shape[1]\n    return output",
    "docstring": "Fit estimator and transform dataset. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Input data used to build forests. Use `` for maximum efficiency. y : Ignored Not used, present for API consistency by convention. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If None, then samples are equally weighted. Splits that would create child nodes with net zero or negative weight are ignored while searching for a split in each node. In the case of classification, splits are also ignored if they would result in any single class carrying a negative weight in either child node. Returns ------- X_transformed : sparse matrix of shape (n_samples, n_out) Transformed dataset.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_forest.py",
    "ast_data": "FunctionDef name:fit_transform arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg Assign Call Assign Call Call Call Call Assign Call Assign Call Call Assign Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "NullFrequencyError",
    "source_code": "class NullFrequencyError(ValueError):\n    pass",
    "docstring": "Exception raised when a ``. See Also -------- Index.shift : Shift values of Index. Series.shift : Shift values of Series. Examples -------- >>> df = pd.DatetimeIndex([\"2011-01-01 10:00\", \"2011-01-01\"], freq=None) >>> df.shift(2) Traceback (most recent call last): NullFrequencyError: Cannot shift with no freq",
    "type": "class",
    "file_path": "pandas\\pandas\\errors\\__init__.py",
    "ast_data": "ClassDef name:NullFrequencyError"
  },
  {
    "library": "matplotlib",
    "name": "reversed",
    "source_code": "def reversed(self, name=None):\n    if name is None:\n        name = self.name + '_r'\n    data_r = {key: functools.partial(self._reverser, data) if callable(data) else [(1.0 - x, y1, y0) for x, y0, y1 in reversed(data)] for key, data in self._segmentdata.items()}\n    new_cmap = LinearSegmentedColormap(name, data_r, self.N, self._gamma)\n    new_cmap._rgba_over = self._rgba_under\n    new_cmap._rgba_under = self._rgba_over\n    new_cmap._rgba_bad = self._rgba_bad\n    return new_cmap",
    "docstring": "Return a reversed instance of the Colormap. Parameters ---------- name : str, optional The name for the reversed colormap. If None, the name is set to ``. Returns ------- LinearSegmentedColormap The reversed colormap.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:reversed arg:self arg:name arguments arg arg If Compare Assign Assign Call Call Call Call Assign Call Assign Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "serialize",
    "source_code": "@classmethod\ndef serialize(cls, operation: 'GemmOperation', indent: int=2):\n    assert operation.__class__.__qualname__ == 'GemmOperation', 'Only GemmOperation objects are supported via the main API'\n    ret = json.dumps(cls._gemm_operation_to_json(operation), indent=indent)\n    return ret",
    "docstring": "Serialize a GEMM operation to JSON string. Args: operation: GemmOperation object indent: JSON indentation spaces Returns: str: JSON representation of the operation",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\serialization.py",
    "ast_data": "FunctionDef name:serialize arg:cls arg:operation arg:indent arguments arg arg arg Compare Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "update_state",
    "source_code": "def update_state(self, y_true, y_pred, sample_weight=None):\n    y_true = math_ops.cast(y_true, self._dtype)\n    y_pred = math_ops.cast(y_pred, self._dtype)\n    [y_pred, y_true], sample_weight = metrics_utils.ragged_assert_compatible_and_get_flat_values([y_pred, y_true], sample_weight)\n    y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(y_pred, y_true)\n    y_pred, self.normalizer = losses_utils.remove_squeezable_dimensions(y_pred, self.normalizer)\n    y_pred.shape.assert_is_compatible_with(y_true.shape)\n    relative_errors = math_ops.div_no_nan(math_ops.abs(y_true - y_pred), self.normalizer)\n    return super(MeanRelativeError, self).update_state(relative_errors, sample_weight=sample_weight)",
    "docstring": "Accumulates metric statistics. Args: y_true: The ground truth values. y_pred: The predicted values. sample_weight: Optional weighting of each example. Defaults to 1. Can be a whose rank is either 0, or the same rank as , and must be broadcastable to . Returns: Update op.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py",
    "ast_data": "FunctionDef name:update_state arg:self arg:y_true arg:y_pred arg:sample_weight arguments arg arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_width_from_char_name",
    "source_code": "def get_width_from_char_name(self, name):\n    return self._metrics_by_name[name].width",
    "docstring": "Get the width of the character from a type1 character name.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_afm.py",
    "ast_data": "FunctionDef name:get_width_from_char_name arg:self arg:name arguments arg arg Return return:yes"
  },
  {
    "library": "authlib",
    "name": "introspect_token",
    "source_code": "def introspect_token(self, token_string):\n    raise NotImplementedError()",
    "docstring": "Request introspection token endpoint with the given token string, authorization server will return token information in JSON format. Developers MUST implement this method before using it:: def introspect_token(self, token_string): # for example, introspection token endpoint has limited # internal IPs to access, so there is no need to add # authentication. url = \" resp = requests.post(url, data={\"token\": token_string}) resp.raise_for_status() return resp.json()",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc7662\\token_validator.py",
    "ast_data": "FunctionDef name:introspect_token arg:self arg:token_string arguments arg arg Raise Call"
  },
  {
    "library": "kornia",
    "name": "StatsTracker",
    "source_code": "class StatsTracker:\n\n    def __init__(self) -> None:\n        self._stats: Dict[str, AverageMeter] = {}\n\n    @property\n    def stats(self) -> Dict[str, AverageMeter]:\n        return self._stats\n\n    def update(self, key: str, val: float, batch_size: int) -> None:\n        if key not in self._stats:\n            self._stats[key] = AverageMeter()\n        self._stats[key].update(val, batch_size)\n\n    def update_from_dict(self, dic: Dict[str, float], batch_size: int) -> None:\n        for k, v in dic.items():\n            self.update(k, v, batch_size)\n\n    def __repr__(self) -> str:\n        return ' '.join([f'{k.upper()}: {v.val:.2f} {v.val:.2f} ' for k, v in self._stats.items()])\n\n    def as_dict(self) -> Dict[str, AverageMeter]:\n        return self._stats",
    "docstring": "Stats tracker for computing metrics on the fly.",
    "type": "class",
    "file_path": "kornia\\kornia\\x\\utils.py",
    "ast_data": "ClassDef name:StatsTracker FunctionDef name:__init__ arg:self arguments arg FunctionDef name:stats arg:self arguments arg Return return:yes FunctionDef name:update arg:self arg:key arg:val arg:batch_size arguments arg arg arg arg If Compare Assign Call Call FunctionDef name:update_from_dict arg:self arg:dic arg:batch_size arguments arg arg arg For Call Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call Call Call FunctionDef name:as_dict arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "map_to_output_names",
    "source_code": "def map_to_output_names(y_pred, output_names, struct):\n    single_output = not nest.is_nested(y_pred)\n    outputs_are_flat_list = not single_output and isinstance(y_pred, (list, tuple)) and (not any((nest.is_nested(y_p) for y_p in y_pred)))\n    if (single_output or outputs_are_flat_list) and isinstance(struct, dict):\n        output_names = output_names or create_pseudo_output_names(y_pred)\n        struct = copy.copy(struct)\n        new_struct = [struct.pop(name, None) for name in output_names]\n        if struct:\n            raise ValueError('Found unexpected keys that do not correspond to any Model output: {}. Expected: {}'.format(struct.keys(), output_names))\n        if len(new_struct) == 1:\n            return new_struct[0]\n        return new_struct\n    else:\n        return struct",
    "docstring": "Maps a dict to a list using as keys. This is a convenience feature only. When a 's outputs are a list, you can specify per-output losses and metrics as a dict, where the keys are the output names. If you specify per-output losses and metrics via the same structure as the 's outputs (recommended), no mapping is performed. For the Functional API, the output names are the names of the last layer of each output. For the Subclass API, the output names are determined by (For example: for a list of outputs). This mapping preserves backwards compatibility for and . Args: y_pred: Sample outputs of the Model, to determine if this convenience feature should be applied ( is returned unmodified if isn't a flat list). output_names: List. The names of the outputs of the Model. struct: The structure to map. Returns: mapped to a list in same order as .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\compile_utils.py",
    "ast_data": "FunctionDef name:map_to_output_names arg:y_pred arg:output_names arg:struct arguments arg arg arg Assign Call Assign BoolOp Call Call Call If BoolOp BoolOp Call Assign BoolOp Call Assign Call Assign Call If Raise Call Call Call If Compare Call Return return:yes Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_fast_from_codes_and_verts",
    "source_code": "@classmethod\ndef _fast_from_codes_and_verts(cls, verts, codes, internals_from=None):\n    pth = cls.__new__(cls)\n    pth._vertices = _to_unmasked_float_array(verts)\n    pth._codes = codes\n    pth._readonly = False\n    if internals_from is not None:\n        pth._should_simplify = internals_from._should_simplify\n        pth._simplify_threshold = internals_from._simplify_threshold\n        pth._interpolation_steps = internals_from._interpolation_steps\n    else:\n        pth._should_simplify = True\n        pth._simplify_threshold = mpl.rcParams['path.simplify_threshold']\n        pth._interpolation_steps = 1\n    return pth",
    "docstring": "Create a Path instance without the expense of calling the constructor. Parameters ---------- verts : array-like codes : array internals_from : Path or None If not None, another from which the attributes `` by this constructor.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\path.py",
    "ast_data": "FunctionDef name:_fast_from_codes_and_verts arg:cls arg:verts arg:codes arg:internals_from arguments arg arg arg arg Assign Call Assign Call Assign Assign If Compare Assign Assign Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_valid_name",
    "source_code": "def get_valid_name(name):\n    return name.replace('.', '_')",
    "docstring": "Replaces '.' with '_' as names with '.' are invalid in data sparsifier",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\data_sparsifier\\benchmarks\\dlrm_utils.py",
    "ast_data": "FunctionDef name:get_valid_name arg:name arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "joined",
    "source_code": "def joined(self, a, b):\n    return self._grouper.joined(a, b)",
    "docstring": "Return whether *a* and *b* are members of the same set.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\cbook.py",
    "ast_data": "FunctionDef name:joined arg:self arg:a arg:b arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "run_shgo",
    "source_code": "def run_shgo(self):\n    self.function.nfev = 0\n    t0 = time.time()\n    res = shgo(self.fun, self.bounds)\n    t1 = time.time()\n    res.success = self.function.success(res.x)\n    res.nfev = self.function.nfev\n    self.add_result(res, t1 - t0, 'SHGO')",
    "docstring": "Do an optimization run for shgo",
    "type": "method",
    "file_path": "scipy\\benchmarks\\benchmarks\\optimize.py",
    "ast_data": "FunctionDef name:run_shgo arg:self arguments arg Assign Assign Call Assign Call Assign Call Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "assign",
    "source_code": "def assign(self, value, use_locking=None, name=None, read_value=True):\n    with _handle_graph(self.handle):\n        value_tensor = ops.convert_to_tensor(value, dtype=self.dtype)\n        if not self._shape.is_compatible_with(value_tensor.shape):\n            if self.name is None:\n                tensor_name = ''\n            else:\n                tensor_name = ' ' + str(self.name)\n            raise ValueError(f\"Cannot assign value to variable '{tensor_name}': Shape mismatch.The variable shape {self._shape}, and the assigned value shape {value_tensor.shape} are incompatible.\")\n        kwargs = {}\n        if forward_compat.forward_compatible(2022, 3, 23):\n            validate_shape = self._validate_shape and self._shape.is_fully_defined()\n            kwargs['validate_shape'] = validate_shape\n        assign_op = gen_resource_variable_ops.assign_variable_op(self.handle, value_tensor, name=name, **kwargs)\n        if read_value:\n            return self._lazy_read(assign_op)\n    return assign_op",
    "docstring": "Assigns a new value to this variable. Args: value: A . The new value for this variable. use_locking: If , use locking during the assignment. name: The name to use for the assignment. read_value: A . Whether to read and return the new value of the variable or not. Returns: If is , this method will return the new value of the variable after the assignment has completed. Otherwise, when in graph mode it will return the that does the assignment, and when in eager mode it will return .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:assign arg:self arg:value arg:use_locking arg:name arg:read_value arguments arg arg arg arg arg With Call Assign Call If Call If Compare Assign Assign Call Raise Call Assign If Call Assign BoolOp Call Assign Assign Call If Return return:yes Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_mean_tweedie_deviance",
    "source_code": "def _mean_tweedie_deviance(y_true, y_pred, sample_weight, power):\n    xp, _, device_ = get_namespace_and_device(y_true, y_pred)\n    p = power\n    if p < 0:\n        dev = 2 * (xp.pow(xp.where(y_true > 0, y_true, 0.0), 2 - p) / ((1 - p) * (2 - p)) - y_true * xp.pow(y_pred, 1 - p) / (1 - p) + xp.pow(y_pred, 2 - p) / (2 - p))\n    elif p == 0:\n        dev = (y_true - y_pred) ** 2\n    elif p == 1:\n        dev = 2 * (xlogy(y_true, y_true / y_pred) - y_true + y_pred)\n    elif p == 2:\n        dev = 2 * (xp.log(y_pred / y_true) + y_true / y_pred - 1)\n    else:\n        dev = 2 * (xp.pow(y_true, 2 - p) / ((1 - p) * (2 - p)) - y_true * xp.pow(y_pred, 1 - p) / (1 - p) + xp.pow(y_pred, 2 - p) / (2 - p))\n    return float(_average(dev, weights=sample_weight))",
    "docstring": "Mean Tweedie deviance regression loss.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\metrics\\_regression.py",
    "ast_data": "FunctionDef name:_mean_tweedie_deviance arg:y_true arg:y_pred arg:sample_weight arg:power arguments arg arg arg arg Assign Call Assign If Compare Assign Call Call Compare Call Call If Compare Assign If Compare Assign Call If Compare Assign Call Assign Call Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_create_per_replica",
    "source_code": "def _create_per_replica(value_list, strategy):\n    always_wrap = _always_wrap(strategy)\n    per_replicas = distribute_utils.regroup(value_list, always_wrap=always_wrap)\n    return per_replicas",
    "docstring": "Creates a PerReplica. For strategies other than OneDeviceStrategy, it creates a PerReplica whose type spec is set to the element spec of the dataset. This helps avoid retracing for partial batches. Retracing is problematic for multi client when different client retraces different time, since retracing changes the collective keys in the tf.function, and causes mismatches among clients. For single client strategies, this simply calls distribute_utils.regroup(). Args: value_list: a list of values, one for each replica. strategy: the . Returns: a structure of PerReplica.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py",
    "ast_data": "FunctionDef name:_create_per_replica arg:value_list arg:strategy arguments arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set",
    "source_code": "def set(self, child):\n    if hasattr(self, '_child'):\n        self.invalidate()\n        new_dims = (child.input_dims, child.output_dims)\n        old_dims = (self._child.input_dims, self._child.output_dims)\n        if new_dims != old_dims:\n            raise ValueError(f'The input and output dims of the new child {new_dims} do not match those of current child {old_dims}')\n        self._child._parents.pop(id(self), None)\n    self._child = child\n    self.set_children(child)\n    self.transform = child.transform\n    self.transform_affine = child.transform_affine\n    self.transform_non_affine = child.transform_non_affine\n    self.transform_path = child.transform_path\n    self.transform_path_affine = child.transform_path_affine\n    self.transform_path_non_affine = child.transform_path_non_affine\n    self.get_affine = child.get_affine\n    self.inverted = child.inverted\n    self.get_matrix = child.get_matrix\n    self._invalid = 0\n    self.invalidate()\n    self._invalid = 0",
    "docstring": "Replace the current child of this transform with another one. The new child must have the same number of input and output dimensions as the current child.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:set arg:self arg:child arguments arg arg If Call Call Assign Assign If Compare Raise Call Call Call Assign Call Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_is_valid_classification_signature",
    "source_code": "def _is_valid_classification_signature(signature_def):\n    if signature_def.method_name != signature_constants.CLASSIFY_METHOD_NAME:\n        return False\n    if set(signature_def.inputs.keys()) != set([signature_constants.CLASSIFY_INPUTS]):\n        return False\n    if signature_def.inputs[signature_constants.CLASSIFY_INPUTS].dtype != types_pb2.DT_STRING:\n        return False\n    allowed_outputs = set([signature_constants.CLASSIFY_OUTPUT_CLASSES, signature_constants.CLASSIFY_OUTPUT_SCORES])\n    if not signature_def.outputs.keys():\n        return False\n    if set(signature_def.outputs.keys()) - allowed_outputs:\n        return False\n    if signature_constants.CLASSIFY_OUTPUT_CLASSES in signature_def.outputs and signature_def.outputs[signature_constants.CLASSIFY_OUTPUT_CLASSES].dtype != types_pb2.DT_STRING:\n        return False\n    if signature_constants.CLASSIFY_OUTPUT_SCORES in signature_def.outputs and signature_def.outputs[signature_constants.CLASSIFY_OUTPUT_SCORES].dtype != types_pb2.DT_FLOAT:\n        return False\n    return True",
    "docstring": "Determine whether the argument is a servable 'classify' SignatureDef.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\signature_def_utils_impl.py",
    "ast_data": "FunctionDef name:_is_valid_classification_signature arg:signature_def arguments arg If Compare Return return:yes If Compare Call Call Call Return return:yes If Compare Return return:yes Assign Call If Call Return return:yes If Call Call Return return:yes If BoolOp Compare Compare Return return:yes If BoolOp Compare Compare Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_locs",
    "source_code": "def set_locs(self, locs):\n    self.locs = locs",
    "docstring": "Set the locations of the ticks. This method is called before computing the tick labels because some formatters need to know all tick locations to do so.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:set_locs arg:self arg:locs arguments arg arg Assign"
  },
  {
    "library": "pytorch",
    "name": "CacheSizeRelevantForFrame",
    "source_code": "@dataclass\nclass CacheSizeRelevantForFrame:\n    num_cache_entries: int = 0\n    num_cache_entries_with_same_id_matched_objs: int = 0\n\n    def will_compilation_exceed(self, limit: int) -> bool:\n        return self.will_compilation_exceed_accumulated_limit() or self.will_compilation_exceed_specific_limit(limit)\n\n    def will_compilation_exceed_accumulated_limit(self) -> bool:\n        return self.num_cache_entries >= config.accumulated_recompile_limit\n\n    def will_compilation_exceed_specific_limit(self, limit: int) -> bool:\n        return self.num_cache_entries_with_same_id_matched_objs >= limit",
    "docstring": "We track the number of cache entries that have same id_match objects as the given frame. TODO(janimesh) - Consider adding a map from tuple_of_match_ids to count - - this could be useful for debugging as well.",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\cache_size.py",
    "ast_data": "ClassDef name:CacheSizeRelevantForFrame FunctionDef name:will_compilation_exceed arg:self arg:limit arguments arg arg Return return:yes BoolOp Call Call FunctionDef name:will_compilation_exceed_accumulated_limit arg:self arguments arg Return return:yes Compare FunctionDef name:will_compilation_exceed_specific_limit arg:self arg:limit arguments arg arg Return return:yes Compare"
  },
  {
    "library": "matplotlib",
    "name": "juggle_axes",
    "source_code": "def juggle_axes(xs, ys, zs, zdir):\n    if zdir == 'x':\n        return (zs, xs, ys)\n    elif zdir == 'y':\n        return (xs, zs, ys)\n    elif zdir[0] == '-':\n        return rotate_axes(xs, ys, zs, zdir)\n    else:\n        return (xs, ys, zs)",
    "docstring": "Reorder coordinates so that 2D *xs*, *ys* can be plotted in the plane orthogonal to *zdir*. *zdir* is normally 'x', 'y' or 'z'. However, if *zdir* starts with a '-' it is interpreted as a compensation for .",
    "type": "function",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py",
    "ast_data": "FunctionDef name:juggle_axes arg:xs arg:ys arg:zs arg:zdir arguments arg arg arg arg If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "django",
    "name": "_if_unmodified_since_passes",
    "source_code": "def _if_unmodified_since_passes(last_modified, if_unmodified_since):\n    return last_modified and last_modified <= if_unmodified_since",
    "docstring": "Test the If-Unmodified-Since comparison as defined in RFC 9110 Section 13.1.4.",
    "type": "function",
    "file_path": "django\\django\\utils\\cache.py",
    "ast_data": "FunctionDef name:_if_unmodified_since_passes arg:last_modified arg:if_unmodified_since arguments arg arg Return return:yes BoolOp Compare"
  },
  {
    "library": "matplotlib",
    "name": "InvertedPolarTransform",
    "source_code": "class InvertedPolarTransform(mtransforms.Transform):\n    input_dims = output_dims = 2\n\n    def __init__(self, axis=None, use_rmin=True, *, apply_theta_transforms=True):\n        super().__init__()\n        self._axis = axis\n        self._use_rmin = use_rmin\n        self._apply_theta_transforms = apply_theta_transforms\n        if apply_theta_transforms:\n            _apply_theta_transforms_warn()\n    __str__ = mtransforms._make_str_method('_axis', use_rmin='_use_rmin', apply_theta_transforms='_apply_theta_transforms')\n\n    def transform_non_affine(self, values):\n        x, y = values.T\n        r = np.hypot(x, y)\n        theta = (np.arctan2(y, x) + 2 * np.pi) % (2 * np.pi)\n        if self._apply_theta_transforms and self._axis is not None:\n            theta -= self._axis.get_theta_offset()\n            theta *= self._axis.get_theta_direction()\n            theta %= 2 * np.pi\n        if self._use_rmin and self._axis is not None:\n            r += self._axis.get_rorigin()\n            r *= self._axis.get_rsign()\n        return np.column_stack([theta, r])\n\n    def inverted(self):\n        return PolarAxes.PolarTransform(self._axis, self._use_rmin, apply_theta_transforms=self._apply_theta_transforms)",
    "docstring": "The inverse of the polar transform, mapping Cartesian coordinate space *x* and *y* back to *theta* and *r*.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\polar.py",
    "ast_data": "ClassDef name:InvertedPolarTransform Assign FunctionDef name:__init__ arg:self arg:axis arg:use_rmin arguments arg arg arg arg Call Call Assign Assign Assign If Call Assign Call FunctionDef name:transform_non_affine arg:self arg:values arguments arg arg Assign Assign Call Assign Call If BoolOp Compare Call Call If BoolOp Compare Call Call Return return:yes Call FunctionDef name:inverted arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_sticky_export",
    "source_code": "@typing.no_type_check\ndef _sticky_export(forward_func, dynamic_shapes_callback=None):\n    model = forward_func.__self__\n    original_forward = forward_func.__func__\n\n    @functools.wraps(forward_func)\n    def wrapper(*args, **kwargs):\n        model.forward = types.MethodType(original_forward, model)\n        dynamic_shapes_spec = None\n        if dynamic_shapes_callback:\n            dynamic_shapes_spec = dynamic_shapes_callback(*args, **kwargs)\n        try:\n            exported = torch.export.export(model, args, kwargs, dynamic_shapes=dynamic_shapes_spec).module()\n            wrapper._exported_artifact = exported\n        finally:\n            model.forward = wrapper\n        return exported(*args, **kwargs)\n    return wrapper",
    "docstring": "Lazily export the model on first forward call. Usage: model.forward = _sticky_export(model.forward, dynamic_shapes_callback=callback)",
    "type": "function",
    "file_path": "pytorch\\torch\\export\\experimental\\__init__.py",
    "ast_data": "FunctionDef name:_sticky_export arg:forward_func arg:dynamic_shapes_callback arguments arg arg Assign Assign FunctionDef name:wrapper arguments arg arg Assign Call Assign If Assign Call Try Assign Call Call Assign Assign Return return:yes Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "register_tab_comp_context",
    "source_code": "def register_tab_comp_context(self, *args, **kwargs):\n    self._tab_completion_registry.register_tab_comp_context(*args, **kwargs)",
    "docstring": "Wrapper around TabCompletionRegistry.register_tab_comp_context().",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\base_ui.py",
    "ast_data": "FunctionDef name:register_tab_comp_context arg:self arguments arg arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "pass_result_wrapper",
    "source_code": "@compatibility(is_backward_compatible=False)\ndef pass_result_wrapper(fn: Callable) -> Callable:\n    if fn is None:\n        return None\n\n    @wraps(fn)\n    def wrapped_fn(gm):\n        res = fn(gm)\n        if res is None:\n            return PassResult(gm, True)\n        if isinstance(res, PassResult):\n            return res\n        elif isinstance(res, nn.Module):\n            return PassResult(res, True)\n    if not inspect.isfunction(fn):\n        wrapped_fn.__name__ = type(fn).__name__\n    return wrapped_fn",
    "docstring": "Wrapper for passes which currently do not return a PassResult. This wrapper makes them return a PassResult containing the modified object and True for the \"modified\" flag. Args: fn (Callable[Module, Any]) Returns: wrapped_fn (Callable[Module, PassResult])",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\passes\\infra\\pass_manager.py",
    "ast_data": "FunctionDef name:pass_result_wrapper arg:fn arguments arg If Compare Return return:no FunctionDef name:wrapped_fn arg:gm arguments arg Assign Call If Compare Return return:yes Call If Call Return return:yes If Call Return return:yes Call Call If Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_levels_to_axis",
    "source_code": "def _levels_to_axis(ss, levels: tuple[int] | list[int], valid_ilocs: npt.NDArray[np.intp], sort_labels: bool=False) -> tuple[npt.NDArray[np.intp], list[IndexLabel]]:\n    if sort_labels and len(levels) == 1:\n        ax_coords = ss.index.codes[levels[0]][valid_ilocs]\n        ax_labels = ss.index.levels[levels[0]]\n    else:\n        levels_values = lib.fast_zip([ss.index.get_level_values(lvl).to_numpy() for lvl in levels])\n        codes, ax_labels = factorize(levels_values, sort=sort_labels)\n        ax_coords = codes[valid_ilocs]\n    ax_labels = ax_labels.tolist()\n    return (ax_coords, ax_labels)",
    "docstring": "For a MultiIndexed sparse Series , return and , where are the coordinates along one of the two axes of the destination sparse matrix, and are the labels from ' Index which correspond to these coordinates. Parameters ---------- ss : Series levels : tuple/list valid_ilocs : numpy.ndarray Array of integer positions of valid values for the sparse matrix in ss. sort_labels : bool, default False Sort the axis labels before forming the sparse matrix. When refers to a single level, set to True for a faster execution. Returns ------- ax_coords : numpy.ndarray (axis coordinates) ax_labels : list (axis labels)",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\arrays\\sparse\\scipy_sparse.py",
    "ast_data": "FunctionDef name:_levels_to_axis arg:ss arg:levels arg:valid_ilocs arg:sort_labels arguments arg arg arg arg If BoolOp Compare Call Assign Assign Assign Call Call Call Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "_cull",
    "source_code": "def _cull(self):\n    filelist = self._list_cache_files()\n    num_entries = len(filelist)\n    if num_entries < self._max_entries:\n        return\n    if self._cull_frequency == 0:\n        return self.clear()\n    filelist = random.sample(filelist, int(num_entries / self._cull_frequency))\n    for fname in filelist:\n        self._delete(fname)",
    "docstring": "Remove random cache entries if max_entries is reached at a ratio of num_entries / cull_frequency. A value of 0 for CULL_FREQUENCY means that the entire cache will be purged.",
    "type": "method",
    "file_path": "django\\django\\core\\cache\\backends\\filebased.py",
    "ast_data": "FunctionDef name:_cull arg:self arguments arg Assign Call Assign Call If Compare Return return:no If Compare Return return:yes Call Assign Call Call For Call"
  },
  {
    "library": "matplotlib",
    "name": "limit_range_for_scale",
    "source_code": "def limit_range_for_scale(self, vmin, vmax):\n    return self._scale.limit_range_for_scale(vmin, vmax, self.get_minpos())",
    "docstring": "Return the range *vmin*, *vmax*, restricted to the domain supported by the current scale.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:limit_range_for_scale arg:self arg:vmin arg:vmax arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_check_expression",
    "source_code": "def _check_expression(expr) -> None:\n    if not expr:\n        raise ValueError('expr cannot be an empty string')",
    "docstring": "Make sure an expression is not an empty string Parameters ---------- expr : object An object that can be converted to a string Raises ------ ValueError * If expr is an empty string",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\computation\\eval.py",
    "ast_data": "FunctionDef name:_check_expression arg:expr arguments arg If Raise Call"
  },
  {
    "library": "pytorch",
    "name": "load_state",
    "source_code": "def load_state(self, filename: str='fuzzer_state.pkl') -> None:\n    with open(filename, 'rb') as f:\n        state = pickle.load(f)\n        self.results = state['results']\n        self.detailed_results = state.get('detailed_results', {})",
    "docstring": "Load fuzzer state from a file",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\fuzzer.py",
    "ast_data": "FunctionDef name:load_state arg:self arg:filename arguments arg arg With Call Assign Call Assign Assign Call"
  },
  {
    "library": "pytorch",
    "name": "_extract_input_info",
    "source_code": "def _extract_input_info(self, model: GraphModule) -> dict[str, dict]:\n    input_info: dict[str, dict] = {}\n    for fqn, module in model.named_modules():\n        if self._is_supported(module):\n            pre_obs = getattr(module, self.DEFAULT_PRE_OBSERVER_NAME)\n            input_info[fqn] = {self.ACTIVATION_PREFIX + self.PER_CHANNEL_MAX_KEY: pre_obs.max_val, self.ACTIVATION_PREFIX + self.PER_CHANNEL_MIN_KEY: pre_obs.min_val, self.ACTIVATION_PREFIX + self.GLOBAL_MAX_KEY: max(pre_obs.max_val), self.ACTIVATION_PREFIX + self.GLOBAL_MIN_KEY: min(pre_obs.min_val)}\n    return input_info",
    "docstring": "Takes in a calibrated GraphModule and then finds the relevant observers. It then extracts the input information for each observer returns it Args model (GraphModule): The prepared and calibrated GraphModule with inserted ModelReportObservers Returns a dict mapping relevant module fqns (str) to a dict with keys: \"input_activation_per_channel_max\" : maps to the per_channel max values \"input_activation_per_channel_min\" : maps to the per_channel min values \"input_activation_global_max\" : maps to the global max recorded \"input_activation_global_min\" : maps to the global min recorded",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\detector.py",
    "ast_data": "FunctionDef name:_extract_input_info arg:self arg:model arguments arg arg For Call If Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "convert",
    "source_code": "@staticmethod\ndef convert(value, unit, axis):\n    if isinstance(value, Decimal):\n        return float(value)\n    elif isinstance(value, ma.MaskedArray):\n        return ma.asarray(value, dtype=float)\n    else:\n        return np.asarray(value, dtype=float)",
    "docstring": "Convert Decimals to floats. The *unit* and *axis* arguments are not used. Parameters ---------- value : decimal.Decimal or iterable Decimal or list of Decimal need to be converted",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\units.py",
    "ast_data": "FunctionDef name:convert arg:value arg:unit arg:axis arguments arg arg arg If Call Return return:yes Call If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "create_symbolic_sizes_strides_storage_offset",
    "source_code": "def create_symbolic_sizes_strides_storage_offset(self, ex: torch.Tensor, source: Source, *, symbolic_context: Optional[SymbolicContext]=None) -> tuple[tuple[IntLikeType, ...], tuple[IntLikeType, ...], IntLikeType]:\n    ex_size = tuple((self._maybe_specialize_sym_int_with_hint(sz) for sz in ex.size()))\n    ex_stride = tuple((self._maybe_specialize_sym_int_with_hint(sd) for sd in ex.stride()))\n    ex_storage_offset = self._maybe_specialize_sym_int_with_hint(ex.storage_offset())\n    return self._create_symbolic_sizes_strides_storage_offset(ex_size, ex_stride, ex_storage_offset, [_is_dim_dynamic(ex, i) for i in range(ex.dim())], source, symbolic_context=symbolic_context)",
    "docstring": "Returns a list of symbolic sizes and strides for the given tensor. We try our best to express stride in terms of the sizes, so as to not introduce new symbolic variables.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:create_symbolic_sizes_strides_storage_offset arg:self arg:ex arg:source arguments arg arg arg arg Assign Call Call Call Assign Call Call Call Assign Call Call Return return:yes Call Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "BadObject",
    "source_code": "class BadObject(ArffException):\n\n    def __init__(self, msg='Invalid object.'):\n        self.msg = msg\n\n    def __str__(self):\n        return '%s' % self.msg",
    "docstring": "Error raised when the object representing the ARFF file has something wrong.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\externals\\_arff.py",
    "ast_data": "ClassDef name:BadObject FunctionDef name:__init__ arg:self arg:msg arguments arg arg Assign FunctionDef name:__str__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "capture_by_value",
    "source_code": "def capture_by_value(self, graph: Any, tensor: core.Tensor, name: Optional[str]=None) -> core.Tensor:\n    if isinstance(tensor, core.Value):\n        if name is None:\n            name = str(pywrap_tfe.TFE_Py_UID())\n        if tensor.dtype in dtypes.TF_VALUE_DTYPES and functools.reduce(lambda a, b: a * b, tensor.shape, 1) <= _EAGER_CONST_THRESHOLD:\n            graph_const = self.by_val_internal.get(id(tensor))\n            if graph_const is None:\n                graph_const = tensor._capture_as_const(name)\n                if graph_const is None:\n                    graph_const = self._create_placeholder_helper(graph, tensor, name)\n                self.add_or_replace(key=id(tensor), external=tensor, internal=graph_const, is_by_ref=False)\n                graph.inputs.append(graph_const)\n            graph_const._record_tape(tensor)\n            return graph_const\n        return self._create_placeholder_helper(graph, tensor, name)\n    if tensor.graph is not graph:\n        graph._validate_in_scope(tensor)\n        if name is None:\n            assert tensor.op is not None, (tensor.__class__, dir(tensor), tensor.__class__.__name__)\n            name = tensor.op.name\n        return graph._capture_helper(tensor, name)\n    return tensor",
    "docstring": "Captures if it's external to this graph. If is from a different graph, returns a placeholder for it. and the placeholder will appear in self.captures, and the placeholder will appear in self.inputs. Multiple calls to this method with the same argument will return the same placeholder. If is from this graph, returns . Args: graph: The FuncGraph that captures this tensor. tensor: Tensor. May be from this FuncGraph or a different graph. name: Optional name if a placeholder is created. Returns: Tensor from this FuncGraph. Raises: InaccessibleTensorError: if any tensors are accessed in a manner that bypasses the mechanisms required for the data dependencies to be correctly wired.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\capture\\capture_container.py",
    "ast_data": "FunctionDef name:capture_by_value arg:self arg:graph arg:tensor arg:name arguments arg arg arg arg If Call If Compare Assign Call Call If BoolOp Compare Compare Call arguments arg arg Assign Call Call If Compare Assign Call If Compare Assign Call Call Call Call Call Return return:yes Return return:yes Call If Compare Call If Compare Compare Call Assign Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_record_variable_scope_and_name",
    "source_code": "def _record_variable_scope_and_name(embedding_var_name, embedding_var_name_in_fc, is_shared_embedding=False, bypass_scope_validation=False):\n    g = ops.get_default_graph()\n    collection = g.get_collection_ref(_TPU_FC_TO_SCOPE)\n    if not collection:\n        collection.append({})\n    var_def_dict = collection[0]\n    captured_scope = variable_scope.get_variable_scope()\n    captured_scope_name = captured_scope.name\n    if embedding_var_name in var_def_dict:\n        if var_def_dict[embedding_var_name][0] != captured_scope_name and (not is_shared_embedding) and (not bypass_scope_validation):\n            raise ValueError('For embedding var name {}, the variable scope name is different, got {}; expected {}'.format(embedding_var_name, captured_scope_name, var_def_dict[embedding_var_name][0]))\n        if var_def_dict[embedding_var_name][1] != embedding_var_name_in_fc:\n            raise ValueError('For embedding var name {}, the embedding name is different, got {}; expected {}'.format(embedding_var_name, embedding_var_name_in_fc, var_def_dict[embedding_var_name][1]))\n    else:\n        var_def_dict[embedding_var_name] = (captured_scope_name, embedding_var_name_in_fc)",
    "docstring": "Add embedding variable name and scope to collection.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\feature_column.py",
    "ast_data": "FunctionDef name:_record_variable_scope_and_name arg:embedding_var_name arg:embedding_var_name_in_fc arg:is_shared_embedding arg:bypass_scope_validation arguments arg arg arg arg Assign Call Assign Call If Call Assign Assign Call Assign If Compare If BoolOp Compare Raise Call Call If Compare Raise Call Call Assign"
  },
  {
    "library": "pygame",
    "name": "match_font",
    "source_code": "def match_font(name, bold=False, italic=False):\n    initsysfonts()\n    fontname = None\n    if isinstance(name, (str, bytes)):\n        name = name.split(b',' if isinstance(name, bytes) else ',')\n    for single_name in name:\n        if isinstance(single_name, bytes):\n            single_name = single_name.decode()\n        single_name = _simplename(single_name)\n        styles = Sysfonts.get(single_name)\n        if not styles:\n            styles = Sysalias.get(single_name)\n        if styles:\n            while not fontname:\n                fontname = styles.get((bold, italic))\n                if italic:\n                    italic = 0\n                elif bold:\n                    bold = 0\n                elif not fontname:\n                    fontname = list(styles.values())[0]\n        if fontname:\n            break\n    return fontname",
    "docstring": "pygame.font.match_font(name, bold=0, italic=0) -> name find the filename for the named system font This performs the same font search as the SysFont() function, only it returns the path to the TTF file that would be loaded. The font name can also be an iterable of font names or a string/bytes of comma-separated font names to try. If no match is found, None is returned.",
    "type": "function",
    "file_path": "pygame\\src_py\\sysfont.py",
    "ast_data": "FunctionDef name:match_font arg:name arg:bold arg:italic arguments arg arg arg Call Assign If Call Assign Call Call For If Call Assign Call Assign Call Assign Call If Assign Call If While Assign Call If Assign If Assign If Assign Call Call If Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_IdentityRegressor",
    "source_code": "class _IdentityRegressor(RegressorMixin, BaseEstimator):\n\n    def decision_function(self, y_predict):\n        return y_predict\n\n    def predict(self, y_predict):\n        return y_predict",
    "docstring": "Fake regressor which will directly output the prediction.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_ridge.py",
    "ast_data": "ClassDef name:_IdentityRegressor FunctionDef name:decision_function arg:self arg:y_predict arguments arg arg Return return:yes FunctionDef name:predict arg:self arg:y_predict arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "broadcast_to",
    "source_code": "def broadcast_to(rt_input, shape: DynamicRaggedShape):\n    if not isinstance(shape, DynamicRaggedShape):\n        raise TypeError('shape must be a DynamicRaggedShape')\n    rt_input = ragged_tensor.convert_to_tensor_or_ragged_tensor(rt_input)\n    origin_shape = None\n    if ragged_tensor.is_ragged(rt_input):\n        if shape.num_row_partitions != 0:\n            if rt_input.row_splits.dtype != shape.dtype:\n                raise ValueError('Cannot coerce row_splits.dtype')\n        else:\n            shape = shape.with_dtype(rt_input.row_splits.dtype)\n        origin_shape = DynamicRaggedShape.from_tensor(rt_input)\n    elif shape.num_row_partitions != 0:\n        origin_shape = DynamicRaggedShape.from_tensor(rt_input, dtype=shape.dtype)\n    else:\n        origin_shape = DynamicRaggedShape.from_tensor(rt_input, dtype=dtypes.int64)\n        shape = shape.with_dtype(dtype=dtypes.int64)\n    broadcaster = _get_broadcaster(origin_shape, shape)\n    return broadcaster.broadcast(rt_input)",
    "docstring": "Broadcasts a potentially ragged tensor to a ragged shape. Tiles as necessary to match the given shape. Behavior is undefined if is not broadcast-compatible with . Args: rt_input: The potentially ragged tensor to broadcast. shape: A Returns: A potentially ragged tensor whose values are taken from , and whose shape matches .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:broadcast_to arg:rt_input arg:shape arguments arg arg If Call Raise Call Assign Call Assign If Call If Compare If Compare Raise Call Assign Call Assign Call If Compare Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "connected",
    "source_code": "def connected(self, x, y):\n    return self._indices[self[x]] == self._indices[self[y]]",
    "docstring": "Test whether and are in the same subset. Parameters ---------- x, y : hashable object Elements to test. Returns ------- result : bool True if and are in the same set, False otherwise.",
    "type": "method",
    "file_path": "scipy\\scipy\\_lib\\_disjoint_set.py",
    "ast_data": "FunctionDef name:connected arg:self arg:x arg:y arguments arg arg arg Return return:yes Compare"
  },
  {
    "library": "pytorch",
    "name": "reset_parameters",
    "source_code": "def reset_parameters(self) -> None:\n    if self.elementwise_affine:\n        init.ones_(self.weight)",
    "docstring": "Resets parameters based on their initialization used in __init__.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\normalization.py",
    "ast_data": "FunctionDef name:reset_parameters arg:self arguments arg If Call"
  },
  {
    "library": "pytorch",
    "name": "apply",
    "source_code": "def apply(self, model_args: Sequence[Any], model_kwargs: Mapping[str, Any], model: torch.nn.Module | Callable | torch_export.ExportedProgram | None=None) -> tuple[Sequence[Any], Mapping[str, Any]]:\n    flattened_args, spec = pytree.tree_flatten((model_args, model_kwargs))\n    if self._spec is None:\n        self._spec = spec\n    else:\n        _assert_identical_pytree_spec(self._spec, spec, error_message='Model inputs incompatible with the format that was exported. ')\n    return (flattened_args, {})",
    "docstring": "Flatten the model args and kwargs and validate the output. Args: model_args: The model args. model_kwargs: The model kwargs. model: The PyTorch model. Returns: A tuple of the flattened model args and kwargs. The kwargs is empty, because they are flattened and merged into the args. Raises: ValueError: If the output produced from the current is not identical to the output produced from the first that was passed to this method.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\io_adapter.py",
    "ast_data": "FunctionDef name:apply arg:self arg:model_args arg:model_kwargs arg:model arguments arg arg arg arg Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "infer_dense_strides",
    "source_code": "def infer_dense_strides(size: Sequence[int], orig_strides: Sequence[int]):\n    fill_order = get_fill_order(orig_strides, V.graph.sizevars.shape_env)\n    return construct_strides(size, fill_order)",
    "docstring": "This is a mirror of the same function in aten/src/ATen/ExpandUtils.cpp Args: size: The size of the output tensor orig_strides: The strides of the input tensor Returns: List[int]: Dense non-overlapping strides that preserve the input tensor's layout permutation. The returned strides follow the same stride propagation rules as TensorIterator. This matches The behavior of empty_like()",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\kernel\\flex_attention.py",
    "ast_data": "FunctionDef name:infer_dense_strides arg:size arg:orig_strides arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "__getitem__",
    "source_code": "def __getitem__(self, index):\n    if not isinstance(index, tuple):\n        index = (index,)\n    fixed = []\n    length, dims = (len(index), self.ndim)\n    for slice_ in index:\n        if slice_ is Ellipsis:\n            fixed.extend([slice(None)] * (dims - length + 1))\n            length = len(fixed)\n        elif isinstance(slice_, int):\n            fixed.append(slice(slice_, slice_ + 1, 1))\n        else:\n            fixed.append(slice_)\n    index = tuple(fixed)\n    if len(index) < dims:\n        index += (slice(None),) * (dims - len(index))\n    out = self.__class__(self.var, self.buf_size)\n    for i, (start, stop, step, slice_) in enumerate(zip(self.start, self.stop, self.step, index)):\n        out.start[i] = start + (slice_.start or 0)\n        out.step[i] = step * (slice_.step or 1)\n        out.stop[i] = start + (slice_.stop or stop - start)\n        out.stop[i] = min(stop, out.stop[i])\n    return out",
    "docstring": "Return a new arrayterator.",
    "type": "method",
    "file_path": "numpy\\numpy\\lib\\_arrayterator_impl.py",
    "ast_data": "FunctionDef name:__getitem__ arg:self arg:index arguments arg arg If Call Assign Assign Assign Call For If Compare Call Call Assign Call If Call Call Call Call Assign Call If Compare Call Call Call Assign Call For Call Call Assign BoolOp Assign BoolOp Assign BoolOp Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "resolve",
    "source_code": "def resolve(self, context):\n    if self.lookups is not None:\n        value = self._resolve_lookup(context)\n    else:\n        value = self.literal\n    if self.translate:\n        is_safe = isinstance(value, SafeData)\n        msgid = value.replace('%', '%%')\n        msgid = mark_safe(msgid) if is_safe else msgid\n        if self.message_context:\n            return pgettext_lazy(self.message_context, msgid)\n        else:\n            return gettext_lazy(msgid)\n    return value",
    "docstring": "Resolve this variable against a given context.",
    "type": "method",
    "file_path": "django\\django\\template\\base.py",
    "ast_data": "FunctionDef name:resolve arg:self arg:context arguments arg arg If Compare Assign Call Assign If Assign Call Assign Call Assign Call If Return return:yes Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "PerToken",
    "source_code": "class PerToken(Granularity):\n    pass",
    "docstring": "Represents per-token granularity in quantization. This granularity type calculates a different set of quantization parameters for each token, which is represented as the last dimension of the tensor. For example, if the input tensor has shape [2, 3, 4], then there are 6 tokens with 4 elements each, and we will calculate 6 sets of quantization parameters, one for each token. If the input tensor has only two dimensions, e.g. [8, 16], then this is equivalent to , which yields 8 sets of quantization parameters.",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\quantization\\observer.py",
    "ast_data": "ClassDef name:PerToken"
  },
  {
    "library": "pandas",
    "name": "_map_values",
    "source_code": "@final\ndef _map_values(self, mapper, na_action=None):\n    arr = self._values\n    if isinstance(arr, ExtensionArray):\n        return arr.map(mapper, na_action=na_action)\n    return algorithms.map_array(arr, mapper, na_action=na_action)",
    "docstring": "An internal function that maps values using the input correspondence (which can be a dict, Series, or function). Parameters ---------- mapper : function, dict, or Series The input correspondence object na_action : {None, 'ignore'} If 'ignore', propagate NA values, without passing them to the mapping function Returns ------- Union[Index, MultiIndex], inferred The output of the mapping function applied to the index. If the function returns a tuple with more than one element a MultiIndex will be returned.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\base.py",
    "ast_data": "FunctionDef name:_map_values arg:self arg:mapper arg:na_action arguments arg arg arg Assign If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "cryptography",
    "name": "finalize_with_tag",
    "source_code": "@abc.abstractmethod\ndef finalize_with_tag(self, tag: bytes) -> bytes:\n    pass",
    "docstring": "Returns the results of processing the final block as bytes and allows delayed passing of the authentication tag.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\ciphers\\base.py",
    "ast_data": "FunctionDef name:finalize_with_tag arg:self arg:tag arguments arg arg"
  },
  {
    "library": "pandas",
    "name": "_query_iterator",
    "source_code": "def _query_iterator(self, result, exit_stack: ExitStack, chunksize: int | None, columns, coerce_float: bool=True, parse_dates=None, dtype_backend: DtypeBackend | Literal['numpy']='numpy') -> Generator[DataFrame]:\n    has_read_data = False\n    with exit_stack:\n        while True:\n            data = result.fetchmany(chunksize)\n            if not data:\n                if not has_read_data:\n                    yield DataFrame.from_records([], columns=columns, coerce_float=coerce_float)\n                break\n            has_read_data = True\n            self.frame = _convert_arrays_to_dataframe(data, columns, coerce_float, dtype_backend)\n            self._harmonize_columns(parse_dates=parse_dates, dtype_backend=dtype_backend)\n            if self.index is not None:\n                self.frame.set_index(self.index, inplace=True)\n            yield self.frame",
    "docstring": "Return generator through chunked result set.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\sql.py",
    "ast_data": "FunctionDef name:_query_iterator arg:self arg:result arg:exit_stack arg:chunksize arg:columns arg:coerce_float arg:parse_dates arg:dtype_backend arguments arg arg arg arg arg arg arg arg Assign With While Assign Call If If Call Assign Assign Call Call If Compare Call"
  },
  {
    "library": "pytorch",
    "name": "tuning_is_enabled",
    "source_code": "def tuning_is_enabled() -> bool:\n    return torch._C._cuda_tunableop_tuning_is_enabled()",
    "docstring": "Returns whether TunableOp implementations can be tuned.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\tunable.py",
    "ast_data": "FunctionDef name:tuning_is_enabled arguments Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_traceback_text",
    "source_code": "def get_traceback_text(self):\n    with self.text_template_path.open(encoding='utf-8') as fh:\n        t = DEBUG_ENGINE.from_string(fh.read())\n    c = Context(self.get_traceback_data(), autoescape=False, use_l10n=False)\n    return t.render(c)",
    "docstring": "Return plain text version of debug 500 HTTP error page.",
    "type": "method",
    "file_path": "django\\django\\views\\debug.py",
    "ast_data": "FunctionDef name:get_traceback_text arg:self arguments arg With Call Assign Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "setuptools_run",
    "source_code": "def setuptools_run(self):\n    from distutils.command.install import install as distutils_install\n    if self.old_and_unmanageable or self.single_version_externally_managed:\n        return distutils_install.run(self)\n    caller = sys._getframe(3)\n    caller_module = caller.f_globals.get('__name__', '')\n    caller_name = caller.f_code.co_name\n    if caller_module != 'distutils.dist' or caller_name != 'run_commands':\n        distutils_install.run(self)\n    else:\n        self.do_egg_install()",
    "docstring": "The setuptools version of the .run() method. We must pull in the entire code so we can override the level used in the _getframe() call since we wrap this call by one more level.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\command\\install.py",
    "ast_data": "FunctionDef name:setuptools_run arg:self arguments arg If BoolOp Return return:yes Call Assign Call Assign Call Assign If BoolOp Compare Compare Call Call"
  },
  {
    "library": "matplotlib",
    "name": "find_all",
    "source_code": "def find_all(self, pattern):\n    pattern_re = re.compile(pattern)\n    return RcParams(((key, value) for key, value in self.items() if pattern_re.search(key)))",
    "docstring": "Return the subset of this RcParams dictionary whose keys match, using :func:, the given ``. .. note:: Changes to the returned dictionary are *not* propagated to the parent RcParams dictionary.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\__init__.py",
    "ast_data": "FunctionDef name:find_all arg:self arg:pattern arguments arg arg Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "next",
    "source_code": "@property\ndef next(self) -> 'Node':\n    return self._next",
    "docstring": "Returns the next `` in the linked list of Nodes.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\node.py",
    "ast_data": "FunctionDef name:next arg:self arguments arg Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "group_order",
    "source_code": "@property\n@abc.abstractmethod\ndef group_order(self) -> int:\n    pass",
    "docstring": "The order of the curve's group.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ec.py",
    "ast_data": "FunctionDef name:group_order arg:self arguments arg"
  },
  {
    "library": "matplotlib",
    "name": "_get_position_xy",
    "source_code": "def _get_position_xy(self, renderer):\n    return self._get_xy(renderer, self.xy, self.xycoords)",
    "docstring": "Return the pixel position of the annotated point.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:_get_position_xy arg:self arg:renderer arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_task_info",
    "source_code": "def get_task_info(self):\n    return (self.task_type, self.task_id)",
    "docstring": "Returns job name and task_id for the process which calls this. This returns the job name and task index for the process which calls this function according to its rank and cluster specification. The job name and task index are set after a cluster is constructed by cluster_spec otherwise defaults to None. Returns: A string specifying job name the process belongs to and an integer specifying the task index the process belongs to in that job.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\slurm_cluster_resolver.py",
    "ast_data": "FunctionDef name:get_task_info arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "report_proto_path",
    "source_code": "def report_proto_path(self, trace_dir, summary_tag_name):\n    filename = _TT_REPORT_PROTO + '.' + summary_tag_name.replace('/', '_')\n    return os.path.join(trace_dir, filename)",
    "docstring": "Returns the path where report proto should be written. Args: trace_dir: String denoting the trace directory. summary_tag_name: Name of the unique tag that relates to the report. Returns: A string denoting the path to the report proto.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer_report.py",
    "ast_data": "FunctionDef name:report_proto_path arg:self arg:trace_dir arg:summary_tag_name arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, saved_model_dir, saved_model_tags, saved_model_exported_names, experimental_debug_info_func=None):\n    super(TFLiteSavedModelConverter, self).__init__(experimental_debug_info_func)\n    self.saved_model_dir = saved_model_dir\n    self._saved_model_tags = saved_model_tags\n    self._saved_model_exported_names = saved_model_exported_names\n    if len(self._saved_model_exported_names) != 1:\n        raise ValueError('Only supports a single signature key.')\n    signature_key = self._saved_model_exported_names[0]\n    result = _freeze_saved_model(self.saved_model_dir, None, None, None, self._saved_model_tags, signature_key)\n    self._graph_def = result[0]\n    self._input_tensors = result[1]\n    self._output_tensors = result[2]\n    self._parse_saved_model_args()",
    "docstring": "Constructor for TFLiteConverter. Args: saved_model_dir: Directory of the SavedModel. saved_model_tags: Set of tags identifying the MetaGraphDef within the SavedModel to analyze. All tags in the tag set must be present. (default {tf.saved_model.SERVING}). saved_model_exported_names: Names to be exported when the saved model import path is on. experimental_debug_info_func: An experimental function to retrieve the graph debug info for a set of nodes from the . Raises: ValueError: Invalid arguments.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:saved_model_dir arg:saved_model_tags arg:saved_model_exported_names arg:experimental_debug_info_func arguments arg arg arg arg arg Call Call Assign Assign Assign If Compare Call Raise Call Assign Assign Call Assign Assign Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "emit_counter",
    "source_code": "def emit_counter(self, category: str, name: str, pid: int, timestamp: int, counter: str, value: int) -> None:\n    event = self._create_event('C', category, name, pid, 0, timestamp)\n    event['args'] = {counter: value}\n    self._events.append(event)",
    "docstring": "Emits a record for a single counter. Args: category: The event category as a string. name: The event name as a string. pid: Identifier of the process generating this event as an integer. timestamp: The timestamp of this event as a long integer. counter: Name of the counter as a string. value: Value of the counter as an integer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py",
    "ast_data": "FunctionDef name:emit_counter arg:self arg:category arg:name arg:pid arg:timestamp arg:counter arg:value arguments arg arg arg arg arg arg arg Assign Call Assign Call"
  },
  {
    "library": "kornia",
    "name": "scale_laf",
    "source_code": "def scale_laf(laf: Tensor, scale_coef: Union[float, Tensor]) -> Tensor:\n    if not isinstance(scale_coef, (float, Tensor)):\n        raise TypeError(f'scale_coef should be float or Tensor. Got {type(scale_coef)}')\n    KORNIA_CHECK_LAF(laf)\n    centerless_laf = laf[:, :, :2, :2]\n    return concatenate([scale_coef * centerless_laf, laf[:, :, :, 2:]], dim=3)",
    "docstring": "Multiplies region part of LAF ([:, :, :2, :2]) by a scale_coefficient. So the center, shape and orientation of the local feature stays the same, but the region area changes. Args: laf: :math: scale_coef: broadcastable tensor or float. Returns: LAF :math: Example: >>> input = torch.ones(1, 5, 2, 3) # BxNx2x3 >>> scale = 0.5 >>> output = scale_laf(input, scale) # BxNx2x3",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\laf.py",
    "ast_data": "FunctionDef name:scale_laf arg:laf arg:scale_coef arguments arg arg If Call Raise Call Call Call Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, scale_transform, limits):\n    super().__init__()\n    self._scale_transform = scale_transform\n    self._limits = limits\n    self.set_children(scale_transform, limits)\n    self._mtx = None",
    "docstring": "Parameters ---------- scale_transform : Scaling transform for the data. This is used to remove any scaling from the radial view limits. limits : View limits of the data. The only part of its bounds that is used is the y limits (for the radius limits).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\polar.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:scale_transform arg:limits arguments arg arg arg Call Call Assign Assign Call Assign"
  },
  {
    "library": "pytorch",
    "name": "_rewrite_spec_if_needed",
    "source_code": "def _rewrite_spec_if_needed(spec: shard_spec.ShardingSpec, tensor: torch.Tensor, rank: int) -> shard_spec.ShardingSpec:\n    if not isinstance(spec, ChunkShardingSpec):\n        return spec\n    rewrite = False\n    for p in spec.placements:\n        p = cast(_remote_device, p)\n        if p.rank() == rank and p.device() != tensor.device:\n            rewrite = True\n            break\n    if rewrite:\n        spec = copy.deepcopy(spec)\n        for i, placement in enumerate(spec.placements):\n            placement = cast(_remote_device, placement)\n            if placement.rank() == rank and placement.device() != tensor.device:\n                spec.placements[i] = _remote_device(f'rank:{rank}/{tensor.device}')\n    return spec",
    "docstring": "Rewrite ``. FSDP.sharded_optim_state_dict sneakly ships optimizer state to CPU so if the original ShardingSpec produces CUDA metadata, ST construction bombs.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\parallel\\fsdp.py",
    "ast_data": "FunctionDef name:_rewrite_spec_if_needed arg:spec arg:tensor arg:rank arguments arg arg arg If Call Return return:yes Assign For Assign Call If BoolOp Compare Call Compare Call Assign If Assign Call For Call Assign Call If BoolOp Compare Call Compare Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "m_num_mid",
    "source_code": "@property\ndef m_num_mid(self) -> int:\n    return self.m_num // 2",
    "docstring": "Center index of window . For odd , `m_numwinm_num`. hop: ime increment in signal samples for sliding window. win: Window function as real- or complex-valued 1d array. ShortTimeFFT: Class this property belongs to.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_short_time_fft.py",
    "ast_data": "FunctionDef name:m_num_mid arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "on_batch_end",
    "source_code": "@doc_controls.for_subclass_implementers\n@generic_utils.default\ndef on_batch_end(self, batch, logs=None):\n    pass",
    "docstring": "A backwards compatibility alias for .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:on_batch_end arg:self arg:batch arg:logs arguments arg arg arg"
  },
  {
    "library": "scipy",
    "name": "convergence",
    "source_code": "@property\ndef convergence(self):\n    if np.any(np.isinf(self.population_energies)):\n        return np.inf\n    return np.std(self.population_energies) / (np.abs(np.mean(self.population_energies)) + _MACHEPS)",
    "docstring": "The standard deviation of the population energies divided by their mean.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_differentialevolution.py",
    "ast_data": "FunctionDef name:convergence arg:self arguments arg If Call Call Return return:yes Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "range_start",
    "source_code": "def range_start(msg) -> int:\n    return _nvtx.rangeStartA(msg)",
    "docstring": "Mark the start of a range with string message. It returns an unique handle for this range to pass to the corresponding call to rangeEnd(). A key difference between this and range_push/range_pop is that the range_start/range_end version supports range across threads (start on one thread and end on another thread). Returns: A range handle (uint64_t) that can be passed to range_end(). Args: msg (str): ASCII message to associate with the range.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\nvtx.py",
    "ast_data": "FunctionDef name:range_start arg:msg arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "mark_compile_region",
    "source_code": "def mark_compile_region(fn=None):\n\n    def wrap(func):\n\n        def inner(*args, **kwargs):\n            return invoke_subgraph_placeholder(func, *args, **kwargs)\n        return inner\n    if fn:\n        return wrap(fn)\n    else:\n        return wrap",
    "docstring": "This wrapper instructs torch.compile to compile the wrapped region once and reuse the compiled artifact, instead of the usual way of aggressively inlining the function. Under the hood, it tells TorchDynamo to use InvokeSubgraph HOP for the region. For PyTorch eager, this is a no-op.",
    "type": "function",
    "file_path": "pytorch\\torch\\_higher_order_ops\\invoke_subgraph.py",
    "ast_data": "FunctionDef name:mark_compile_region arg:fn arguments arg FunctionDef name:wrap arg:func arguments arg FunctionDef name:inner arguments arg arg Return return:yes Call Return return:yes If Return return:yes Call Return return:yes"
  },
  {
    "library": "django",
    "name": "spatial_aggregate_name",
    "source_code": "def spatial_aggregate_name(self, agg_name):\n    agg_name = 'unionagg' if agg_name.lower() == 'union' else agg_name.lower()\n    return getattr(self, agg_name)",
    "docstring": "Return the spatial aggregate SQL name.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\oracle\\operations.py",
    "ast_data": "FunctionDef name:spatial_aggregate_name arg:self arg:agg_name arguments arg arg Assign Compare Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_ITraceObserver",
    "source_code": "class _ITraceObserver(ABC):\n\n    @abstractmethod\n    def start(self):\n        pass\n\n    @abstractmethod\n    def stop(self):\n        pass\n\n    @abstractmethod\n    def cleanup(self):\n        pass",
    "docstring": "Abstract interface for a Trace observer. This satisfies 3 methods: start, stop and cleanup",
    "type": "class",
    "file_path": "pytorch\\torch\\profiler\\profiler.py",
    "ast_data": "ClassDef name:_ITraceObserver FunctionDef name:start arg:self arguments arg FunctionDef name:stop arg:self arguments arg FunctionDef name:cleanup arg:self arguments arg"
  },
  {
    "library": "scikit-learn",
    "name": "inverse_transform",
    "source_code": "def inverse_transform(self, X):\n    self._check_vocabulary()\n    X = check_array(X, accept_sparse='csr')\n    n_samples = X.shape[0]\n    terms = np.array(list(self.vocabulary_.keys()))\n    indices = np.array(list(self.vocabulary_.values()))\n    inverse_vocabulary = terms[np.argsort(indices)]\n    if sp.issparse(X):\n        return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel() for i in range(n_samples)]\n    else:\n        return [inverse_vocabulary[np.flatnonzero(X[i, :])].ravel() for i in range(n_samples)]",
    "docstring": "Return terms per document with nonzero entries in X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Document-term matrix. Returns ------- X_original : list of arrays of shape (n_samples,) List of arrays of terms.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_extraction\\text.py",
    "ast_data": "FunctionDef name:inverse_transform arg:self arg:X arguments arg arg Call Assign Call Assign Assign Call Call Call Assign Call Call Call Assign Call If Call Return return:yes Call Call Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "batch_scatter_update",
    "source_code": "def batch_scatter_update(self, sparse_delta, use_locking=False, name=None):\n    return state_ops.batch_scatter_update(self, sparse_delta.indices, sparse_delta.values, use_locking=use_locking, name=name)",
    "docstring": "Assigns to this variable batch-wise. Analogous to . This assumes that this variable and the sparse_delta IndexedSlices have a series of leading dimensions that are the same for all of them, and the updates are performed on the last dimension of indices. In other words, the dimensions should be the following: where And the operation performed can be expressed as: When sparse_delta.indices is a 1D tensor, this operation is equivalent to . To avoid this operation one can looping over the first of the variable and using on the subtensors that result of slicing the first dimension. This is a valid option for , but less efficient than this implementation. Args: sparse_delta: to be assigned to this variable. use_locking: If , use locking during the operation. name: the name of the operation. Returns: A that will hold the new value of this variable after the scattered assignment has completed. Raises: TypeError: if is not an .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py",
    "ast_data": "FunctionDef name:batch_scatter_update arg:self arg:sparse_delta arg:use_locking arg:name arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "parse_nodes_dict",
    "source_code": "def parse_nodes_dict(nodes):\n    d = {}\n    if nodes:\n        for node in nodes:\n            key, val = parse_entry(node)\n            if key is not None:\n                d[key] = val\n    return d",
    "docstring": "Parse a series of key-value pairs and return a dictionary",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\optimize_for_inference_lib.py",
    "ast_data": "FunctionDef name:parse_nodes_dict arg:nodes arguments arg Assign If For Assign Call If Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_assign_if_finite",
    "source_code": "def _assign_if_finite(var, value):\n    return cond.cond(math_ops.is_finite(value), lambda: _op_in_graph_mode(var.assign(value)), control_flow_ops.no_op)",
    "docstring": "Assigns a value to a variable if the value is finite.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\experimental\\loss_scale.py",
    "ast_data": "FunctionDef name:_assign_if_finite arg:var arg:value arguments arg arg Return return:yes Call Call arguments Call Call"
  },
  {
    "library": "seaborn",
    "name": "__repr__",
    "source_code": "def __repr__(self):\n    if self._val is not None:\n        s = f'<{repr(self._val)}>'\n    elif self._depend is not None:\n        s = f'<depend:{self._depend}>'\n    elif self._rc is not None:\n        s = f'<rc:{self._rc}>'\n    elif self._auto:\n        s = '<auto>'\n    else:\n        s = '<undefined>'\n    return s",
    "docstring": "Nice formatting for when object appears in Mark init signature.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_marks\\base.py",
    "ast_data": "FunctionDef name:__repr__ arg:self arguments arg If Compare Assign Call If Compare Assign If Compare Assign If Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "restart_ordering",
    "source_code": "@deprecated('`restart_ordering` is deprecated, if you would like to eagerly order the dispatchers, you should call the `reorder()` method on each dispatcher.', category=FutureWarning)\ndef restart_ordering(on_ambiguity=ambiguity_warn):\n    pass",
    "docstring": "Deprecated interface to temporarily resume ordering.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\unification\\multipledispatch\\dispatcher.py",
    "ast_data": "FunctionDef name:restart_ordering arg:on_ambiguity arguments arg Call"
  },
  {
    "library": "numpy",
    "name": "chebline",
    "source_code": "def chebline(off, scl):\n    if scl != 0:\n        return np.array([off, scl])\n    else:\n        return np.array([off])",
    "docstring": "Chebyshev series whose graph is a straight line. Parameters ---------- off, scl : scalars The specified line is given by ``. See Also -------- numpy.polynomial.polynomial.polyline numpy.polynomial.legendre.legline numpy.polynomial.laguerre.lagline numpy.polynomial.hermite.hermline numpy.polynomial.hermite_e.hermeline Examples -------- >>> import numpy.polynomial.chebyshev as C >>> C.chebline(3,2) array([3, 2]) >>> C.chebval(-3, C.chebline(3,2)) # should be -3 -3.0",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\chebyshev.py",
    "ast_data": "FunctionDef name:chebline arg:off arg:scl arguments arg arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "check_inner_shape",
    "source_code": "def check_inner_shape(item, shape):\n    is_nested = isinstance(item, (list, tuple)) or np.ndim(item) != 0\n    if is_nested != bool(shape):\n        raise ValueError('inner values have inconsistent shape')\n    if is_nested:\n        if shape[0] != len(item):\n            raise ValueError('inner values have inconsistent shape')\n        for child in item:\n            check_inner_shape(child, shape[1:])",
    "docstring": "Checks that has a consistent shape matching .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_factory_ops.py",
    "ast_data": "FunctionDef name:check_inner_shape arg:item arg:shape arguments arg arg Assign BoolOp Call Compare Call If Compare Call Raise Call If If Compare Call Raise Call For Call"
  },
  {
    "library": "tensorflow",
    "name": "sync",
    "source_code": "def sync(self):\n    if self._checkpoint:\n        self._checkpoint.sync()",
    "docstring": "Wait for any outstanding save or restore operations.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint_management.py",
    "ast_data": "FunctionDef name:sync arg:self arguments arg If Call"
  },
  {
    "library": "tensorflow",
    "name": "_cast_unsupported_dtypes",
    "source_code": "def _cast_unsupported_dtypes(tensor):\n    if tensor.dtype.__eq__(dtypes.int64):\n        return math_ops.cast(tensor, dtypes.int32)\n    if tensor.dtype.__eq__(dtypes.bfloat16) or tensor.dtype.__eq__(dtypes.float16):\n        return math_ops.cast(tensor, dtypes.float32)\n    return tensor",
    "docstring": "Casts tensor to a supported type.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:_cast_unsupported_dtypes arg:tensor arguments arg If Call Return return:yes Call If BoolOp Call Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='mean_absolute_percentage_error'):\n    super().__init__(mean_absolute_percentage_error, name=name, reduction=reduction)",
    "docstring": "Initializes instance. Args: reduction: Type of to apply to loss. Default value is . indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to . When used with , outside of built-in training loops such as and , using or will raise an error. Please see this custom training [tutorial]( for more details. name: Optional name for the instance. Defaults to 'mean_absolute_percentage_error'.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:reduction arg:name arguments arg arg arg Call Call"
  },
  {
    "library": "django",
    "name": "__getstate__",
    "source_code": "def __getstate__(self):\n    state = self.__dict__.copy()\n    state['_state'] = copy.copy(state['_state'])\n    state['_state'].fields_cache = state['_state'].fields_cache.copy()\n    _memoryview_attrs = []\n    for attr, value in state.items():\n        if isinstance(value, memoryview):\n            _memoryview_attrs.append((attr, bytes(value)))\n    if _memoryview_attrs:\n        state['_memoryview_attrs'] = _memoryview_attrs\n        for attr, value in _memoryview_attrs:\n            state.pop(attr)\n    return state",
    "docstring": "Hook to allow choosing the attributes to pickle.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\base.py",
    "ast_data": "FunctionDef name:__getstate__ arg:self arguments arg Assign Call Assign Call Assign Call Assign For Call If Call Call Call If Assign For Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "clone",
    "source_code": "def clone(self) -> 'set_multithreading_enabled':\n    return self.__class__(self.mode)",
    "docstring": "Create a copy of this class",
    "type": "method",
    "file_path": "pytorch\\torch\\autograd\\grad_mode.py",
    "ast_data": "FunctionDef name:clone arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "rectangle",
    "source_code": "@property\ndef rectangle(self):\n    return self._rectangle",
    "docstring": ": the indicator frame.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\inset.py",
    "ast_data": "FunctionDef name:rectangle arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "Jinja2",
    "source_code": "class Jinja2(EngineMixin, BaseRenderer):\n\n    @cached_property\n    def backend(self):\n        from django.template.backends.jinja2 import Jinja2\n        return Jinja2",
    "docstring": "Load Jinja2 templates from the built-in widget templates in django/forms/jinja2 and from apps' 'jinja2' directory.",
    "type": "class",
    "file_path": "django\\django\\forms\\renderers.py",
    "ast_data": "ClassDef name:Jinja2 FunctionDef name:backend arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "flex_decoding_grid",
    "source_code": "@SymbolicGridFn\ndef flex_decoding_grid(batch_size, kv_heads, gqa_group_size, n_keys, d_model, meta):\n    return (batch_size * kv_heads, meta['SPLIT_KV'], 1)",
    "docstring": "How is this kernel parallelized? We create a grid of (batch_size * kv_heads, SPLIT_KV, 1) Each block is responsible for iterating over blocks of keys and values calculating the local output for their tile of keys and values over all full length of query. groups of SPLIT_KV blocks then combine their output to produce the final result.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\kernel\\flex_decoding.py",
    "ast_data": "FunctionDef name:flex_decoding_grid arg:batch_size arg:kv_heads arg:gqa_group_size arg:n_keys arg:d_model arg:meta arguments arg arg arg arg arg arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_supxlabel",
    "source_code": "def get_supxlabel(self):\n    text_obj = self._supxlabel\n    return '' if text_obj is None else text_obj.get_text()",
    "docstring": "Return the supxlabel as string or an empty string if not set.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:get_supxlabel arg:self arguments arg Assign Return return:yes Compare Call"
  },
  {
    "library": "kornia",
    "name": "js_div_loss_2d",
    "source_code": "def js_div_loss_2d(pred: Tensor, target: Tensor, reduction: str='mean') -> Tensor:\n    return _reduce_loss(_js_div_2d(target, pred), reduction)",
    "docstring": "Calculate the Jensen-Shannon divergence loss between heatmaps. Args: pred: the input tensor with shape :math:. target: the target tensor with shape :math:. reduction: Specifies the reduction to apply to the output: ``: the output will be summed. Examples: >>> pred = torch.full((1, 1, 2, 4), 0.125) >>> loss = js_div_loss_2d(pred, pred) >>> loss.item() 0.0",
    "type": "function",
    "file_path": "kornia\\kornia\\losses\\divergence.py",
    "ast_data": "FunctionDef name:js_div_loss_2d arg:pred arg:target arg:reduction arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_decompositions",
    "source_code": "def get_decompositions(aten_ops: Sequence[Union[torch._ops.OperatorBase, OpOverloadPacket]], type: str='post_autograd') -> dict[torch._ops.OperatorBase, Callable]:\n    assert type in {'post_autograd', 'pre_autograd', 'meta'}\n    registry = global_decomposition_table[type]\n    packets_to_overloads = defaultdict(list)\n    for opo in registry:\n        if isinstance(opo, (OpOverload, OpOverloadPacket)):\n            packets_to_overloads[opo.overloadpacket].append(opo)\n    decompositions: dict[torch._ops.OperatorBase, Callable] = {}\n    for op in aten_ops:\n        if isinstance(op, OpOverloadPacket) and op in packets_to_overloads:\n            for op_overload in packets_to_overloads[op]:\n                decompositions[op_overload] = registry[op_overload]\n        elif isinstance(op, torch._ops.OperatorBase) and op in registry:\n            decompositions[op] = registry[op]\n    return decompositions",
    "docstring": "Retrieve a dictionary of decompositions corresponding to the list of operator overloads and overload packets passed as input. Overload packets will include all decomposed overloads in the packet. If there is no decomposition for a requested operator, it is silently ignored. This API is experimental; we are almost certainly going to give an alternate, more recommended formulation, where a user provides the set of operators they know how to implement, and we provide decompositions for everything not in this set.",
    "type": "function",
    "file_path": "pytorch\\torch\\_decomp\\__init__.py",
    "ast_data": "FunctionDef name:get_decompositions arg:aten_ops arg:type arguments arg arg Compare Assign Assign Call For If Call Call For If BoolOp Call Compare For Assign If BoolOp Call Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_value",
    "source_code": "def get_value(self):\n    self._check_status()\n    if self._output_specs is None or isinstance(self._output_specs, none_tensor.NoneTensorSpec):\n        flat_output_dtypes = []\n        return_none = True\n    else:\n        return_none = False\n        flat_output_dtypes = [s.dtype for s in nest.flatten(self._output_specs)]\n    result = gen_rpc_ops.rpc_get_value(self._status_or, Tout=flat_output_dtypes)\n    if return_none:\n        return None\n    else:\n        return nest.pack_sequence_as(self._output_specs, result)",
    "docstring": "Returns the returned response value from RPC Call when RPC is successful. The returned value is tensors in the output_specs format as returned from the RPC call This call will block for RPC result.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\experimental\\rpc\\rpc_ops.py",
    "ast_data": "FunctionDef name:get_value arg:self arguments arg Call If BoolOp Compare Call Assign Assign Assign Assign Call Assign Call If Return return:no Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_get_shared_axes",
    "source_code": "def _get_shared_axes(self):\n    return self.axes._shared_axes[self._get_axis_name()].get_siblings(self.axes)",
    "docstring": "Return Grouper of shared Axes for current axis.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:_get_shared_axes arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "flush",
    "source_code": "def flush(self):\n    if self.base is not None and hasattr(self.base, 'flush'):\n        self.base.flush()",
    "docstring": "Write any changes in the array to the file on disk. For further information, see . Parameters ---------- None See Also -------- memmap",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\memmap.py",
    "ast_data": "FunctionDef name:flush arg:self arguments arg If BoolOp Compare Call Call"
  },
  {
    "library": "scipy",
    "name": "generate_decl_wrapper",
    "source_code": "def generate_decl_wrapper(name, return_type, argnames, argtypes, accelerate):\n    if name in WRAPPED_FUNCS:\n        return ''\n    if accelerate and name in USE_OLD_ACCELERATE:\n        return ''\n    c_return_type = C_TYPES[return_type]\n    c_argtypes = [C_TYPES[t] for t in argtypes]\n    param_list = ', '.join((f'{t} *{n}' for t, n in zip(c_argtypes, argnames)))\n    argnames = ', '.join(argnames)\n    blas_macro, blas_name = get_blas_macro_and_name(name, accelerate)\n    return f'\\n{c_return_type} {blas_macro}({blas_name})({param_list});\\n{c_return_type} F_FUNC({name},{name.upper()})({param_list}){{\\n    return {blas_macro}({blas_name})({argnames});\\n}}\\n'",
    "docstring": "Create wrapper function declaration. Wrapper has symbol and wraps the BLAS/LAPACK function (by default: ).",
    "type": "function",
    "file_path": "scipy\\scipy\\_build_utils\\_generate_blas_wrapper.py",
    "ast_data": "FunctionDef name:generate_decl_wrapper arg:name arg:return_type arg:argnames arg:argtypes arg:accelerate arguments arg arg arg arg arg If Compare Return return:yes If BoolOp Compare Return return:yes Assign Assign Assign Call Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "unparse",
    "source_code": "def unparse(node, indentation=None, include_encoding_marker=True):\n    del indentation\n    if not isinstance(node, (list, tuple)):\n        node = (node,)\n    codes = []\n    if include_encoding_marker:\n        codes.append('# coding=utf-8')\n    for n in node:\n        if isinstance(n, gast.AST):\n            ast_n = gast.gast_to_ast(n)\n        else:\n            ast_n = n\n        if astunparse is ast:\n            ast.fix_missing_locations(ast_n)\n        codes.append(astunparse.unparse(ast_n).strip())\n    return '\\n'.join(codes)",
    "docstring": "Returns the source code of given AST. Args: node: The code to compile, as an AST object. indentation: Unused, deprecated. The returning code will always be indented at 4 spaces. include_encoding_marker: Bool, whether to include a comment on the first line to explicitly specify UTF-8 encoding. Returns: code: The source code generated from the AST object source_mapping: A mapping between the user and AutoGraph generated code.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\parser.py",
    "ast_data": "FunctionDef name:unparse arg:node arg:indentation arg:include_encoding_marker arguments arg arg arg If Call Assign Assign If Call For If Call Assign Call Assign If Compare Call Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "is_checkpoint_initial_value",
    "source_code": "def is_checkpoint_initial_value(initial_value: Any) -> bool:\n    return isinstance(initial_value, base.CheckpointInitialValue) or isinstance(initial_value, base.CheckpointInitialValueCallable) or (isinstance(initial_value, functools.partial) and isinstance(initial_value.func, base.CheckpointInitialValueCallable))",
    "docstring": "Whether the initial value is from checkpoint.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3.py",
    "ast_data": "FunctionDef name:is_checkpoint_initial_value arg:initial_value arguments arg Return return:yes BoolOp Call Call BoolOp Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "LinearRegressionBenchmark",
    "source_code": "class LinearRegressionBenchmark(Predictor, Estimator, Benchmark):\n    param_names = ['representation']\n    params = (['dense', 'sparse'],)\n\n    def setup_cache(self):\n        super().setup_cache()\n\n    def make_data(self, params):\n        representation, = params\n        if representation == 'dense':\n            data = _synth_regression_dataset(n_samples=1000000, n_features=100)\n        else:\n            data = _synth_regression_sparse_dataset(n_samples=10000, n_features=100000, density=0.01)\n        return data\n\n    def make_estimator(self, params):\n        estimator = LinearRegression()\n        return estimator\n\n    def make_scorers(self):\n        make_gen_reg_scorers(self)",
    "docstring": "Benchmarks for Linear Regression.",
    "type": "class",
    "file_path": "scikit-learn\\asv_benchmarks\\benchmarks\\linear_model.py",
    "ast_data": "ClassDef name:LinearRegressionBenchmark Assign Assign FunctionDef name:setup_cache arg:self arguments arg Call Call FunctionDef name:make_data arg:self arg:params arguments arg arg Assign If Compare Assign Call Assign Call Return return:yes FunctionDef name:make_estimator arg:self arg:params arguments arg arg Assign Call Return return:yes FunctionDef name:make_scorers arg:self arguments arg Call"
  },
  {
    "library": "scikit-learn",
    "name": "_map_infrequent_categories",
    "source_code": "def _map_infrequent_categories(self, X_int, X_mask, ignore_category_indices):\n    if not self._infrequent_enabled:\n        return\n    ignore_category_indices = ignore_category_indices or {}\n    for col_idx in range(X_int.shape[1]):\n        infrequent_idx = self._infrequent_indices[col_idx]\n        if infrequent_idx is None:\n            continue\n        X_int[~X_mask[:, col_idx], col_idx] = infrequent_idx[0]\n        if self.handle_unknown == 'infrequent_if_exist':\n            X_mask[:, col_idx] = True\n    for i, mapping in enumerate(self._default_to_infrequent_mappings):\n        if mapping is None:\n            continue\n        if i in ignore_category_indices:\n            rows_to_update = X_int[:, i] != ignore_category_indices[i]\n        else:\n            rows_to_update = slice(None)\n        X_int[rows_to_update, i] = np.take(mapping, X_int[rows_to_update, i])",
    "docstring": "Map infrequent categories to integer representing the infrequent category. This modifies X_int in-place. Values that were invalid based on are mapped to the infrequent category if there was an infrequent category for that feature. Parameters ---------- X_int: ndarray of shape (n_samples, n_features) Integer encoded categories. X_mask: ndarray of shape (n_samples, n_features) Bool mask for valid values in . ignore_category_indices : dict Dictionary mapping from feature_idx to category index to ignore. Ignored indexes will not be grouped and the original ordinal encoding will remain.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_encoders.py",
    "ast_data": "FunctionDef name:_map_infrequent_categories arg:self arg:X_int arg:X_mask arg:ignore_category_indices arguments arg arg arg arg If Return return:no Assign BoolOp For Call Assign If Compare Assign If Compare Assign For Call If Compare If Compare Assign Compare Assign Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "create_cherry_pick_branch",
    "source_code": "def create_cherry_pick_branch(github_actor: str, repo: GitRepo, pr: GitHubPR, commit_sha: str, onto_branch: str) -> str:\n    repo.checkout(branch=onto_branch)\n    repo._run_git('submodule', 'update', '--init', '--recursive')\n    github_actor = re.sub('[^0-9a-zA-Z]+', '_', github_actor)\n    cherry_pick_branch = f'cherry-pick-{pr.pr_num}-by-{github_actor}'\n    repo.create_branch_and_checkout(branch=cherry_pick_branch)\n    repo._run_git('cherry-pick', '-x', commit_sha)\n    repo.push(branch=cherry_pick_branch, dry_run=False)\n    return cherry_pick_branch",
    "docstring": "Create a local branch and cherry pick the commit. Return the name of the local cherry picking branch.",
    "type": "function",
    "file_path": "pytorch\\.github\\scripts\\cherry_pick.py",
    "ast_data": "FunctionDef name:create_cherry_pick_branch arg:github_actor arg:repo arg:pr arg:commit_sha arg:onto_branch arguments arg arg arg arg arg Call Call Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "xp_copy",
    "source_code": "def xp_copy(x: Array, *, xp: ModuleType | None=None) -> Array:\n    if xp is None:\n        xp = array_namespace(x)\n    return _asarray(x, copy=True, xp=xp)",
    "docstring": "Copies an array. Parameters ---------- x : array xp : array_namespace Returns ------- copy : array Copied array Notes ----- This copy function does not offer all the semantics of , i.e. the and keywords are not used.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_array_api.py",
    "ast_data": "FunctionDef name:xp_copy arg:x arguments arg arg If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "den",
    "source_code": "@property\ndef den(self):\n    return self._den",
    "docstring": "Denominator of the system.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:den arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_TensorListGatherGrad",
    "source_code": "@ops.RegisterGradient('TensorListGather')\ndef _TensorListGatherGrad(op: ops.Operation, dtensor):\n    input_list, indices, _ = op.inputs\n    element_shape = gen_list_ops.tensor_list_element_shape(input_list, shape_type=dtypes.int32)\n    num_elements = gen_list_ops.tensor_list_length(input_list)\n    dlist = tensor_list_reserve(element_shape, num_elements, dtensor.dtype)\n    dlist = tensor_list_scatter(tensor=dtensor, indices=indices, input_handle=dlist)\n    return (dlist, None, None)",
    "docstring": "Gradient function for TensorListGather.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\list_ops.py",
    "ast_data": "FunctionDef name:_TensorListGatherGrad arg:op arg:dtensor arguments arg arg Assign Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "generate",
    "source_code": "def generate(self, input_nodes: list[Buffer], layout: Layout, **kwargs: Any) -> SubgraphChoiceCaller:\n    return SubgraphChoiceCaller(name=self.name, input_nodes=input_nodes, layout=layout, description='', make_fx_graph=self.make_fx_graph)",
    "docstring": "Generate a SubgraphChoiceCaller instance for autotuning. Args: input_nodes: List of input nodes to the subgraph layout: Memory layout information for the output example_inputs: Example tensor inputs used to trace and benchmark the subgraph **kwargs: Additional keyword arguments Returns: SubgraphChoiceCaller: A callable object that can be used for autotuning",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\subgraph.py",
    "ast_data": "FunctionDef name:generate arg:self arg:input_nodes arg:layout arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_replace_columnwise",
    "source_code": "def _replace_columnwise(self, mapping: dict[Hashable, tuple[Any, Any]], inplace: bool, regex) -> Self | None:\n    res = self if inplace else self.copy(deep=False)\n    ax = self.columns\n    for i, ax_value in enumerate(ax):\n        if ax_value in mapping:\n            ser = self.iloc[:, i]\n            target, value = mapping[ax_value]\n            newobj = ser.replace(target, value, regex=regex)\n            res._iset_item(i, newobj, inplace=inplace)\n    if inplace:\n        return None\n    return res.__finalize__(self)",
    "docstring": "Dispatch to Series.replace column-wise. Parameters ---------- mapping : dict of the form {col: (target, value)} inplace : bool regex : bool or same types as in DataFrame.replace Returns ------- DataFrame or None",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:_replace_columnwise arg:self arg:mapping arg:inplace arg:regex arguments arg arg arg arg Assign Call Assign For Call If Compare Assign Assign Assign Call Call If Return return:no Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_is_supported",
    "source_code": "def _is_supported(self, module: nn.Module, insert: bool=False) -> bool:\n    is_supported_type = any((type(module) is x for x in self.SUPPORTED_MODULES))\n    if insert:\n        return is_supported_type\n    else:\n        has_obs = hasattr(module, self.DEFAULT_PRE_OBSERVER_NAME)\n        return is_supported_type and has_obs",
    "docstring": "Returns whether the given module is supported for observers Args module: The module to check and ensure is supported insert: True if this is check for observer insertion, false if for report gen Returns True if the module is supported by observer, False otherwise",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\detector.py",
    "ast_data": "FunctionDef name:_is_supported arg:self arg:module arg:insert arguments arg arg arg Assign Call Compare Call If Return return:yes Assign Call Return return:yes BoolOp"
  },
  {
    "library": "pytorch",
    "name": "DivElementwiseTypePromotionRule",
    "source_code": "class DivElementwiseTypePromotionRule(ElementwiseTypePromotionRule):\n\n    def __init__(self):\n        super().__init__('aten', 'div', promote_args_positions=(0, 1), promote_kwargs_names=(), promotion_kind=_prims_common.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT)\n\n    def preview_type_promotion(self, args: tuple, kwargs: dict) -> TypePromotionSnapshot:\n        rounding_mode = kwargs.get('rounding_mode', None)\n        if rounding_mode is None:\n            self.promotion_kind = _prims_common.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT\n            return super().preview_type_promotion(args, kwargs)\n        if rounding_mode == 'trunc':\n            self.promotion_kind = _prims_common.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT\n            return super().preview_type_promotion(args, kwargs)\n        if rounding_mode == 'floor':\n            self.promotion_kind = _prims_common.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT\n            return super().preview_type_promotion(args, kwargs)\n        raise ValueError(f'Unknown rounding_mode: {rounding_mode}')",
    "docstring": "Reference type promotion rule from torch._refs.div. Rule depends on the value of the argument.",
    "type": "class",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\type_promotion.py",
    "ast_data": "ClassDef name:DivElementwiseTypePromotionRule FunctionDef name:__init__ arg:self arguments arg Call Call FunctionDef name:preview_type_promotion arg:self arg:args arg:kwargs arguments arg arg arg Assign Call If Compare Assign Return return:yes Call Call If Compare Assign Return return:yes Call Call If Compare Assign Return return:yes Call Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "all_prims",
    "source_code": "@functools.lru_cache(None)\ndef all_prims():\n    return {torch._prims.__dict__.get(s) for s in torch._prims.__all__}",
    "docstring": "Set of all prim functions, e.g., torch._prims.add in all_prims()",
    "type": "function",
    "file_path": "pytorch\\torch\\_prims\\context.py",
    "ast_data": "FunctionDef name:all_prims arguments Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_from_selection",
    "source_code": "@final\n@property\ndef _from_selection(self) -> bool:\n    return self._timegrouper is not None and (self._timegrouper.key is not None or self._timegrouper.level is not None)",
    "docstring": "Is the resampling from a DataFrame column or MultiIndex level.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\resample.py",
    "ast_data": "FunctionDef name:_from_selection arg:self arguments arg Return return:yes BoolOp Compare BoolOp Compare Compare"
  },
  {
    "library": "tensorflow",
    "name": "value",
    "source_code": "def value(self):\n    return self._snapshot",
    "docstring": "Returns the last snapshot of this variable. You usually do not need to call this method as all ops that need the value of the variable call it automatically through a call. Returns a which holds the value of the variable. You can not assign a new value to this tensor as it is not a reference to the variable. To avoid copies, if the consumer of the returned value is on the same device as the variable, this actually returns the live value of the variable, not a copy. Updates to the variable are seen by the consumer. If the consumer is on a different device it will get a copy of the variable. Returns: A containing the value of the variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py",
    "ast_data": "FunctionDef name:value arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "hexewkb",
    "source_code": "@property\ndef hexewkb(self):\n    return ewkb_w(dim=3 if self.hasz else 2).write_hex(self)",
    "docstring": "Return the EWKB of this Geometry in hexadecimal form. This is an extension of the WKB specification that includes SRID value that are a part of this geometry.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:hexewkb arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "PassResult",
    "source_code": "@compatibility(is_backward_compatible=False)\nclass PassResult(namedtuple('PassResult', ['graph_module', 'modified'])):\n    __slots__ = ()\n\n    def __new__(cls, graph_module, modified):\n        return super().__new__(cls, graph_module, modified)",
    "docstring": "Result of a pass: graph_module: The modified graph module modified: A flag for if the pass has modified the graph module",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\passes\\infra\\pass_base.py",
    "ast_data": "ClassDef name:PassResult Call Assign FunctionDef name:__new__ arg:cls arg:graph_module arg:modified arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "numpy",
    "name": "__reduce__",
    "source_code": "def __reduce__(self):\n    return (_mareconstruct, (self.__class__, self._baseclass, (0,), 'b'), self.__getstate__())",
    "docstring": "Return a 3-tuple for pickling a MaskedArray.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:__reduce__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "AugmentationBase3D",
    "source_code": "class AugmentationBase3D(_AugmentationBase):\n\n    def validate_tensor(self, input: Tensor) -> None:\n        _validate_input_dtype(input, accepted_dtypes=[float16, float32, float64])\n        if len(input.shape) != 5:\n            raise RuntimeError(f'Expect (B, C, D, H, W). Got {input.shape}.')\n\n    def transform_tensor(self, input: Tensor, *, shape: Optional[Tensor]=None, match_channel: bool=True) -> Tensor:\n        _validate_input_dtype(input, accepted_dtypes=[float16, float32, float64])\n        if shape is None:\n            return _transform_input3d(input)\n        else:\n            return _transform_input3d_by_shape(input, reference_shape=shape, match_channel=match_channel)\n\n    def identity_matrix(self, input: Tensor) -> Tensor:\n        return kornia.eye_like(4, input)",
    "docstring": "AugmentationBase3D base class for customized augmentation implementations. Args: p: probability for applying an augmentation. This param controls the augmentation probabilities element-wise for a batch. p_batch: probability for applying an augmentation to a batch. This param controls the augmentation probabilities batch-wise. same_on_batch: apply the same transformation across the batch.",
    "type": "class",
    "file_path": "kornia\\kornia\\augmentation\\_3d\\base.py",
    "ast_data": "ClassDef name:AugmentationBase3D FunctionDef name:validate_tensor arg:self arg:input arguments arg arg Call If Compare Call Raise Call FunctionDef name:transform_tensor arg:self arg:input arguments arg arg arg arg Call If Compare Return return:yes Call Return return:yes Call FunctionDef name:identity_matrix arg:self arg:input arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "is_saving",
    "source_code": "def is_saving(self):\n    return self._is_saving",
    "docstring": "Return whether the renderer is in the process of saving to a file, rather than rendering for an on-screen buffer.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:is_saving arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_has_struct",
    "source_code": "def _has_struct(elem):\n    return isinstance(elem, np.ndarray) and elem.size > 0 and (elem.ndim > 0) and isinstance(elem[0], mat_struct)",
    "docstring": "Determine if elem is an array and if first array item is a struct.",
    "type": "function",
    "file_path": "scipy\\scipy\\io\\matlab\\_mio5.py",
    "ast_data": "FunctionDef name:_has_struct arg:elem arguments arg Return return:yes BoolOp Call Compare Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "shape",
    "source_code": "@tf_export(v1=['shape'])\n@dispatch.add_dispatch_support\ndef shape(input, name=None, out_type=None):\n    if out_type is None:\n        if flags.config().tf_shape_default_int64.value():\n            out_type = dtypes.int64\n        else:\n            out_type = dtypes.int32\n    return shape_internal(input, name, optimize=True, out_type=out_type)",
    "docstring": "Returns the shape of a tensor. This operation returns a 1-D integer tensor representing the shape of . For example: Args: input: A or . name: A name for the operation (optional). out_type: (Optional) The specified output type of the operation ( or ). Defaults to . Returns: A of type .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:shape arg:input arg:name arg:out_type arguments arg arg arg If Compare If Call Call Assign Assign Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "DeleteView",
    "source_code": "class DeleteView(SingleObjectTemplateResponseMixin, BaseDeleteView):\n    template_name_suffix = '_confirm_delete'",
    "docstring": "View for deleting an object retrieved with self.get_object(), with a response rendered by a template.",
    "type": "class",
    "file_path": "django\\django\\views\\generic\\edit.py",
    "ast_data": "ClassDef name:DeleteView Assign"
  },
  {
    "library": "pandas",
    "name": "SeriesDescriber",
    "source_code": "class SeriesDescriber(NDFrameDescriberAbstract):\n    obj: Series\n\n    def describe(self, percentiles: Sequence[float] | np.ndarray) -> Series:\n        describe_func = select_describe_func(self.obj)\n        return describe_func(self.obj, percentiles)",
    "docstring": "Class responsible for creating series description.",
    "type": "class",
    "file_path": "pandas\\pandas\\core\\methods\\describe.py",
    "ast_data": "ClassDef name:SeriesDescriber FunctionDef name:describe arg:self arg:percentiles arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_take_tensors",
    "source_code": "def _take_tensors(tensors, size_limit):\n    buf_dict: defaultdict[str, list] = defaultdict(lambda: [[], 0])\n    for tensor in tensors:\n        t = tensor.type()\n        if tensor.is_sparse:\n            indices = torch.Tensor._indices(tensor)\n            values = torch.Tensor._values(tensor)\n            size = indices.numel() * indices.element_size() + values.numel() * values.element_size()\n        else:\n            size = tensor.numel() * tensor.element_size()\n        buf_and_size = buf_dict[t]\n        if buf_and_size[1] + size > size_limit and buf_and_size[1] > 0:\n            yield buf_and_size[0]\n            buf_and_size = buf_dict[t] = [[], 0]\n        buf_and_size[0].append(tensor)\n        buf_and_size[1] += size\n    for buf, _ in buf_dict.values():\n        if len(buf) > 0:\n            yield buf",
    "docstring": "Group tensors into chunks. This generator yields a chunk at each time, each containing tensors of same type up to certain byte limit in total size. Args: tensors (Sequence): A sequence of tensors to be separated into chunks. size_limit (int): The limit of each chunk in bytes. Yields: Blocks of tensors of same type and within size_limit. The yielded tensors are only ordered as the original sequence within its types.",
    "type": "function",
    "file_path": "pytorch\\torch\\_utils.py",
    "ast_data": "FunctionDef name:_take_tensors arg:tensors arg:size_limit arguments arg arg Call arguments For Assign Call If Assign Call Assign Call Assign Call Call Call Call Assign Call Call Assign If BoolOp Compare Compare Assign Call For Call If Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "transform",
    "source_code": "def transform(self, obj, user_context):\n    if inspect.isfunction(obj) or inspect.ismethod(obj):\n        return self.transform_function(obj, user_context)\n    raise NotImplementedError('Non-function: {}'.format(type(obj)))",
    "docstring": "Transforms a Python object. Users typically call this method. Args: obj: A Python object, function, type, etc. user_context: An opaque object (may be None) that is forwarded to transform_ast, through the ctx.user attribute. Returns: The result of calling transform_function. Raises: NotImplementedError: if the type of obj is not handled.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\transpiler.py",
    "ast_data": "FunctionDef name:transform arg:self arg:obj arg:user_context arguments arg arg arg If BoolOp Call Call Return return:yes Call Raise Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "final_reduction_define",
    "source_code": "def final_reduction_define(buffer, result_var: str, value: str, result_type: Optional[str]) -> None:\n    value = final_reduction(buffer, value, result_type)\n    buffer.splice(f'{result_var} = {value}')",
    "docstring": "Generate a reduction and assign it to an existing variable.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py",
    "ast_data": "FunctionDef name:final_reduction_define arg:buffer arg:result_var arg:value arg:result_type arguments arg arg arg arg Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_post_order_named_modules",
    "source_code": "def _get_post_order_named_modules(root_module: nn.Module) -> list[tuple[str, nn.Module]]:\n    visited_modules = {root_module}\n    stack = [('', root_module)]\n    reverse_post_order_named_modules: list[tuple[str, nn.Module]] = []\n    while stack:\n        module_name, module = stack.pop()\n        reverse_post_order_named_modules.append((module_name, module))\n        for child_module_name, child_module in module.named_children():\n            if child_module is None:\n                continue\n            if child_module not in visited_modules:\n                visited_modules.add(child_module)\n                if module_name != '':\n                    child_module_name = module_name + '.' + child_module_name\n                stack.append((child_module_name, child_module))\n    post_order_named_modules = list(reversed(reverse_post_order_named_modules))\n    return post_order_named_modules",
    "docstring": "This returns the named modules following a post-order traversal, which is a valid reverse topological sort. We achieve this using the reverse of a stack-based DFS order instead of reversing `` order is [S3, SS2, SS1, S2, S1, M].",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_wrap_utils.py",
    "ast_data": "FunctionDef name:_get_post_order_named_modules arg:root_module arguments arg Assign Assign While Assign Call Call For Call If Compare If Compare Call If Compare Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_hist_bin_scott",
    "source_code": "def _hist_bin_scott(x, range):\n    del range\n    return (24.0 * np.pi ** 0.5 / x.size) ** (1.0 / 3.0) * np.std(x)",
    "docstring": "Scott histogram bin estimator. The binwidth is proportional to the standard deviation of the data and inversely proportional to the cube root of data size (asymptotically optimal). Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. Returns ------- h : An estimate of the optimal bin width for the given data.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_histograms_impl.py",
    "ast_data": "FunctionDef name:_hist_bin_scott arg:x arg:range arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_device_capability",
    "source_code": "def _device_capability(group: Optional[ProcessGroup]=None) -> list[str]:\n    group = group or _get_default_group()\n    return [device.type for device in group._device_types]",
    "docstring": "Return the device type(s) supported by ``.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:_device_capability arg:group arguments arg Assign BoolOp Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "score",
    "source_code": "def score(self, X, y, **score_params):\n    _raise_for_params(score_params, self, 'score')\n    scoring = self._get_scorer()\n    if _routing_enabled():\n        routed_params = process_routing(self, 'score', **score_params)\n    else:\n        routed_params = Bunch()\n        routed_params.scorer = Bunch(score={})\n    return scoring(self, X, y, **routed_params.scorer.score)",
    "docstring": "Score using the option on the given test data and labels. Parameters ---------- X : array-like of shape (n_samples, n_features) Test samples. y : array-like of shape (n_samples,) True labels for X. **score_params : dict Parameters to pass to the method of the underlying scorer. .. versionadded:: 1.6 Only available if , which can be set by using `Metadata Routing User Guide scoring`.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_selection\\_rfe.py",
    "ast_data": "FunctionDef name:score arg:self arg:X arg:y arguments arg arg arg arg Call Assign Call If Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_dedup_weights",
    "source_code": "def _dedup_weights(self, weights):\n    output, seen_ids = ([], set())\n    for w in weights:\n        if id(w) not in seen_ids:\n            output.append(w)\n            seen_ids.add(id(w))\n    return output",
    "docstring": "Dedupe weights while maintaining order as much as possible.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py",
    "ast_data": "FunctionDef name:_dedup_weights arg:self arg:weights arguments arg arg Assign Call For If Compare Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "value_dtype",
    "source_code": "@property\ndef value_dtype(self):\n    return self._value_dtype",
    "docstring": "The expected table value dtype.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "FunctionDef name:value_dtype arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "install_callbacks",
    "source_code": "@contextmanager\ndef install_callbacks(self) -> Generator[None, Any, Any]:\n    try:\n        with self.__pending_callbacks_counter_lock:\n            if self.__pending_callbacks_counter == 0:\n                self.run_start_callbacks()\n            self.__pending_callbacks_counter += 1\n        yield\n    finally:\n        with self.__pending_callbacks_counter_lock:\n            assert self.__pending_callbacks_counter > 0, 'Pending callbacks counter cannot become negative.'\n            if self.__pending_callbacks_counter == 1:\n                self.run_end_callbacks()\n            self.__pending_callbacks_counter -= 1",
    "docstring": "Context manager to install the callbacks and run them when the context is exited.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\callback.py",
    "ast_data": "FunctionDef name:install_callbacks arg:self arguments arg Try With If Compare Call With Compare If Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "op",
    "source_code": "@property\ndef op(self) -> ops.Operation:\n    return self._parent_op",
    "docstring": "The op for this variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:op arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, numticks=None, presets=None):\n    self.numticks = numticks\n    if presets is None:\n        self.presets = {}\n    else:\n        self.presets = presets",
    "docstring": "Parameters ---------- numticks : int or None, default None Number of ticks. If None, *numticks* = 11. presets : dict or None, default: None Dictionary mapping ``.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:numticks arg:presets arguments arg arg arg Assign If Compare Assign Assign"
  },
  {
    "library": "pandas",
    "name": "decons_obs_group_ids",
    "source_code": "def decons_obs_group_ids(comp_ids: npt.NDArray[np.intp], obs_ids: npt.NDArray[np.intp], shape: Shape, labels: Sequence[npt.NDArray[np.signedinteger]], xnull: bool) -> list[npt.NDArray[np.intp]]:\n    if not xnull:\n        lift = np.fromiter(((a == -1).any() for a in labels), dtype=np.intp)\n        arr_shape = np.asarray(shape, dtype=np.intp) + lift\n        shape = tuple(arr_shape)\n    if not is_int64_overflow_possible(shape):\n        out = _decons_group_index(obs_ids, shape)\n        return out if xnull or not lift.any() else [x - y for x, y in zip(out, lift)]\n    indexer = unique_label_indices(comp_ids)\n    return [lab[indexer].astype(np.intp, subok=False, copy=True) for lab in labels]",
    "docstring": "Reconstruct labels from observed group ids. Parameters ---------- comp_ids : np.ndarray[np.intp] obs_ids: np.ndarray[np.intp] shape : tuple[int] labels : Sequence[np.ndarray[np.signedinteger]] xnull : bool If nulls are excluded; i.e. -1 labels are passed through.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\sorting.py",
    "ast_data": "FunctionDef name:decons_obs_group_ids arg:comp_ids arg:obs_ids arg:shape arg:labels arg:xnull arguments arg arg arg arg arg If Assign Call Call Compare Assign Call Assign Call If Call Assign Call Return return:yes BoolOp Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "configure_logging",
    "source_code": "def configure_logging(settings: Settings | dict[_SettingsKeyT, Any] | None=None, install_root_handler: bool=True) -> None:\n    if not sys.warnoptions:\n        logging.captureWarnings(True)\n    observer = twisted_log.PythonLoggingObserver('twisted')\n    observer.start()\n    dictConfig(DEFAULT_LOGGING)\n    if isinstance(settings, dict) or settings is None:\n        settings = Settings(settings)\n    if settings.getbool('LOG_STDOUT'):\n        sys.stdout = StreamLogger(logging.getLogger('stdout'))\n    if install_root_handler:\n        install_scrapy_root_handler(settings)",
    "docstring": "Initialize logging defaults for Scrapy. :param settings: settings used to create and configure a handler for the root logger (default: None). :type settings: dict, :class: object or `topics-logging-settings` is empty or None, defaults are used.",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\log.py",
    "ast_data": "FunctionDef name:configure_logging arg:settings arg:install_root_handler arguments arg arg If Call Assign Call Call Call If BoolOp Call Compare Assign Call If Call Assign Call Call If Call"
  },
  {
    "library": "pytorch",
    "name": "main_hook",
    "source_code": "def main_hook(self):\n    self.zero.step()",
    "docstring": "Perform an optimizer step. This step updates the joined process's shard of the parameters and broadcasts those parameters.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\optim\\zero_redundancy_optimizer.py",
    "ast_data": "FunctionDef name:main_hook arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_zero_state_tensors",
    "source_code": "def _zero_state_tensors(state_size, batch_size, dtype):\n\n    def get_state_shape(s):\n        c = _concat(batch_size, s)\n        size = array_ops.zeros(c, dtype=dtype)\n        if not context.executing_eagerly():\n            c_static = _concat(batch_size, s, static=True)\n            size.set_shape(c_static)\n        return size\n    return nest.map_structure(get_state_shape, state_size)",
    "docstring": "Create tensors of zeros based on state_size, batch_size, and dtype.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\legacy_rnn\\rnn_cell_impl.py",
    "ast_data": "FunctionDef name:_zero_state_tensors arg:state_size arg:batch_size arg:dtype arguments arg arg arg FunctionDef name:get_state_shape arg:s arguments arg Assign Call Assign Call If Call Assign Call Call Return return:yes Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_ellipdeg",
    "source_code": "def _ellipdeg(n, m1):\n    K1 = special.ellipk(m1)\n    K1p = special.ellipkm1(m1)\n    q1 = np.exp(-np.pi * K1p / K1)\n    q = q1 ** (1 / n)\n    mnum = np.arange(_ELLIPDEG_MMAX + 1)\n    mden = np.arange(1, _ELLIPDEG_MMAX + 2)\n    num = np.sum(q ** (mnum * (mnum + 1)))\n    den = 1 + 2 * np.sum(q ** mden ** 2)\n    return 16 * q * (num / den) ** 4",
    "docstring": "Solve degree equation using nomes Given n, m1, solve n * K(m) / K'(m) = K1(m1) / K1'(m1) for m See [1], Eq. (49) References ---------- .. [1] Orfanidis, \"Lecture Notes on Elliptic Filter Design\",",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_filter_design.py",
    "ast_data": "FunctionDef name:_ellipdeg arg:n arg:m1 arguments arg arg Assign Call Assign Call Assign Call Assign Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "release",
    "source_code": "@property\ndef release(self) -> Tuple[int, ...]:\n    return self._version.release",
    "docstring": "The components of the \"release\" segment of the version. >>> Version(\"1.2.3\").release (1, 2, 3) >>> Version(\"2.0.0\").release (2, 0, 0) >>> Version(\"1!2.0.0.post0\").release (2, 0, 0) Includes trailing zeroes but not the epoch or any pre-release / development / post-release suffixes.",
    "type": "method",
    "file_path": "pytorch\\torch\\_vendor\\packaging\\version.py",
    "ast_data": "FunctionDef name:release arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "sparse_dense_cwise_add",
    "source_code": "def sparse_dense_cwise_add(sp_t, dense_t):\n    result = gen_sparse_ops.sparse_dense_cwise_add(sp_t.indices, sp_t.values, sp_t.dense_shape, dense_t)\n    return sparse_tensor.SparseTensor(sp_t.indices, result, sp_t.dense_shape)",
    "docstring": "Adds up a SparseTensor and a dense Tensor, using these special rules: (1) Broadcasts the dense side to have the same shape as the sparse side, if eligible; (2) Then, only the dense values pointed to by the indices of the SparseTensor participate in the cwise addition. By the rules, the result is a logical SparseTensor with exactly the same indices and shape, but possibly with different non-zero values. The output of this Op is the resultant non-zero values. Args: sp_t: the SparseTensor operand. dense_t: the dense Tensor operand; must have the same dtype and a broadcast-compatible shape as . Returns: output: the SparseTensor output.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_ops.py",
    "ast_data": "FunctionDef name:sparse_dense_cwise_add arg:sp_t arg:dense_t arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_time_or_placeholder",
    "source_code": "def _get_time_or_placeholder(value) -> int:\n    if value == 0:\n        return 1\n    if value is None:\n        return 0\n    return value",
    "docstring": "Modifies time-based config values to account for special behaviors.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\service\\server_lib.py",
    "ast_data": "FunctionDef name:_get_time_or_placeholder arg:value arguments arg If Compare Return return:yes If Compare Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "exporter_context",
    "source_code": "@deprecated('The feature will be removed. Please remove usage of this function and implement equivalent logic if needed', category=None)\n@contextlib.contextmanager\ndef exporter_context(model, mode: _C_onnx.TrainingMode, verbose: bool):\n    with select_model_mode_for_export(model, mode) as mode_ctx, disable_apex_o2_state_dict_hook(model) as apex_ctx, setup_onnx_logging(verbose) as log_ctx:\n        yield (mode_ctx, apex_ctx, log_ctx)",
    "docstring": "A context manager to temporarily set the training mode of ``, disable the Apex O2 hook, and set the ONNX logging verbosity. .. deprecated:: 2.7 Please set training mode before exporting the model.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\utils.py",
    "ast_data": "FunctionDef name:exporter_context arg:model arg:mode arg:verbose arguments arg arg arg With Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "prepare_qat",
    "source_code": "def prepare_qat(model, mapping=None, inplace=False):\n    torch._C._log_api_usage_once('quantization_api.quantize.prepare_qat')\n    assert model.training, 'prepare_qat only works on models in training mode'\n    if mapping is None:\n        mapping = get_default_qat_module_mappings()\n    if not inplace:\n        model = copy.deepcopy(model)\n    propagate_qconfig_(model, qconfig_dict=None)\n    convert(model, mapping=mapping, inplace=True, remove_qconfig=False)\n    prepare(model, observer_non_leaf_module_list=set(mapping.values()), inplace=True)\n    return model",
    "docstring": "Prepares a copy of the model for quantization calibration or quantization-aware training and converts it to quantized version. Quantization configuration should be assigned preemptively to individual submodules in attribute. Args: model: input model to be modified in-place mapping: dictionary that maps float modules to quantized modules to be replaced. inplace: carry out model transformations in-place, the original module is mutated",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantize.py",
    "ast_data": "FunctionDef name:prepare_qat arg:model arg:mapping arg:inplace arguments arg arg arg Call If Compare Assign Call If Assign Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "init_state",
    "source_code": "def init_state(self, node):\n    raise NotImplementedError('Subclasses must implement this.')",
    "docstring": "State initialization function. Optional to overload. An in/out state slot will be created for each node in the graph. Subclasses must overload this to control what that is initialized to. Args: node: Node",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\cfg.py",
    "ast_data": "FunctionDef name:init_state arg:self arg:node arguments arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "get_tensor_id",
    "source_code": "def get_tensor_id(tensor):\n    return torch._C._lazy._get_tensor_id(tensor)",
    "docstring": "Return a unique id of the lazy tensor maintained by LTC",
    "type": "function",
    "file_path": "pytorch\\torch\\_lazy\\__init__.py",
    "ast_data": "FunctionDef name:get_tensor_id arg:tensor arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "is_dynamo_compiling",
    "source_code": "def is_dynamo_compiling() -> bool:\n    return False",
    "docstring": "Indicates whether a graph is traced via TorchDynamo. It's stricter than is_compiling() flag, as it would only be set to True when TorchDynamo is used. Example:: >>> def forward(self, x): >>> if not torch.compiler.is_dynamo_compiling(): >>> pass # ...logic that is not needed in a TorchDynamo-traced graph... >>> >>> # ...rest of the function...",
    "type": "function",
    "file_path": "pytorch\\torch\\compiler\\__init__.py",
    "ast_data": "FunctionDef name:is_dynamo_compiling arguments Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_flatten_tensor_size",
    "source_code": "def _flatten_tensor_size(size) -> torch.Size:\n    if len(size) == 1 and isinstance(size[0], collections.abc.Sequence):\n        dims = list(*size)\n    else:\n        dims = list(size)\n    for dim in dims:\n        if not isinstance(dim, int):\n            raise TypeError(f'size has to be a sequence of ints, found: {dims}')\n    return torch.Size(dims)",
    "docstring": "Checks if tensor size is valid, then flatten/return a torch.Size object.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\utils.py",
    "ast_data": "FunctionDef name:_flatten_tensor_size arg:size arguments arg If BoolOp Compare Call Call Assign Call Assign Call For If Call Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "prepend",
    "source_code": "def prepend(self, line, font_attr_segs=None):\n    other = RichTextLines(line)\n    if font_attr_segs:\n        other.font_attr_segs[0] = font_attr_segs\n    self._extend_before(other)",
    "docstring": "Prepend (i.e., add to the front) a single line of text. Args: line: (str) The text to be added to the front. font_attr_segs: (list of tuples) Font attribute segments of the appended line.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py",
    "ast_data": "FunctionDef name:prepend arg:self arg:line arg:font_attr_segs arguments arg arg arg Assign Call If Assign Call"
  },
  {
    "library": "scipy",
    "name": "rvs",
    "source_code": "def rvs(self, dim, size=1, random_state=None):\n    random_state = self._get_random_state(random_state)\n    q = ortho_group.rvs(dim, size, random_state)\n    dets = np.linalg.det(q)\n    if dim:\n        q[..., 0, :] /= dets[..., np.newaxis]\n    return q",
    "docstring": "Draw random samples from SO(N). Parameters ---------- dim : integer Dimension of rotation space (N). size : integer, optional Number of samples to draw (default 1). Returns ------- rvs : ndarray or scalar Random size N-dimensional matrices, dimension (size, dim, dim)",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:rvs arg:self arg:dim arg:size arg:random_state arguments arg arg arg arg Assign Call Assign Call Assign Call If Return return:yes"
  },
  {
    "library": "numpy",
    "name": "check_compiler_gcc",
    "source_code": "def check_compiler_gcc(self):\n    return check_compiler_gcc(self)",
    "docstring": "Return True if the C compiler is gcc",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\command\\config.py",
    "ast_data": "FunctionDef name:check_compiler_gcc arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "Container",
    "source_code": "class Container(object):\n\n    def __init__(self, output_names=None):\n        self._output_names = output_names\n\n    def build(self, y_pred):\n        if self._output_names is None:\n            self._output_names = create_pseudo_output_names(y_pred)\n\n    def _conform_to_outputs(self, outputs, struct):\n        struct = map_to_output_names(outputs, self._output_names, struct)\n        struct = map_missing_dict_keys(outputs, struct)\n        if not nest.is_nested(struct) and nest.is_nested(outputs):\n            struct = nest.map_structure(lambda _: struct, outputs)\n        return struct\n\n    def _maybe_broadcast_to_outputs(self, outputs, objects):\n        if not self._should_broadcast(objects):\n            return objects\n        should_copy_objects = len(nest.flatten(outputs)) > 1\n\n        def _broadcast_fn():\n            if should_copy_objects:\n                return nest.map_structure(self._copy_object, objects)\n            return objects\n        return nest.map_structure(lambda _: _broadcast_fn(), outputs)\n\n    def _should_broadcast(self, objects):\n        raise NotImplementedError\n\n    def _copy_object(self, obj):\n        raise NotImplementedError",
    "docstring": "Base Container class.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\compile_utils.py",
    "ast_data": "ClassDef name:Container FunctionDef name:__init__ arg:self arg:output_names arguments arg arg Assign FunctionDef name:build arg:self arg:y_pred arguments arg arg If Compare Assign Call FunctionDef name:_conform_to_outputs arg:self arg:outputs arg:struct arguments arg arg arg Assign Call Assign Call If BoolOp Call Call Assign Call arguments arg Return return:yes FunctionDef name:_maybe_broadcast_to_outputs arg:self arg:outputs arg:objects arguments arg arg arg If Call Return return:yes Assign Compare Call Call FunctionDef name:_broadcast_fn arguments If Return return:yes Call Return return:yes Return return:yes Call arguments arg Call FunctionDef name:_should_broadcast arg:self arg:objects arguments arg arg Raise FunctionDef name:_copy_object arg:self arg:obj arguments arg arg Raise"
  },
  {
    "library": "cherrypy",
    "name": "assert_native",
    "source_code": "def assert_native(n):\n    if not isinstance(n, str):\n        raise TypeError('n must be a native str (got %s)' % type(n).__name__)",
    "docstring": "Ensure that input is a native :class:.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\_cpcompat.py",
    "ast_data": "FunctionDef name:assert_native arg:n arguments arg If Call Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "validate_constraints",
    "source_code": "def validate_constraints(self):\n    if self._validated:\n        return\n    for constraint in self.constraints:\n        _validate_pass_schedule_constraint(constraint, self.passes)\n    self._validated = True",
    "docstring": "Validates that current pass schedule defined by is valid according to all constraints in",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\passes\\infra\\pass_manager.py",
    "ast_data": "FunctionDef name:validate_constraints arg:self arguments arg If Return return:no For Call Assign"
  },
  {
    "library": "pytorch",
    "name": "TupleStrategy",
    "source_code": "class TupleStrategy(StrategyType):\n\n    def __init__(self, childs: Sequence[StrategyType]) -> None:\n        super().__init__()\n        self.childs: Sequence[StrategyType] = childs\n\n    def child_mesh(self, index: int) -> DeviceMesh:\n        op_strategy = self.childs[index]\n        assert isinstance(op_strategy, OpStrategy)\n        return op_strategy.mesh\n\n    def __str__(self) -> str:\n        child_strategies_str = ', '.join([f'{str(strat)}' for idx, strat in enumerate(self.childs)])\n        return f'TupleStrategy({child_strategies_str})'",
    "docstring": "TupleStrategy represents the output strategy of this op is a tuple of strategy, i.e. If the output of this op is a tuple of tensors or list of tensors with possibly different placement strategies, we should return a TupleStrategy that contains a tuple of OpStrategy, where each child represents the sharding strategy of \"each element\" of the tuple/list of tensors the op returns. NOTE: if the output of the op is a List[Tensor] and they share the same placement strategy, then we should return a single OpStrategy instead of a TupleStrategy",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_op_schema.py",
    "ast_data": "ClassDef name:TupleStrategy FunctionDef name:__init__ arg:self arg:childs arguments arg arg Call Call FunctionDef name:child_mesh arg:self arg:index arguments arg arg Assign Call Return return:yes FunctionDef name:__str__ arg:self arguments arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "subscribe",
    "source_code": "def subscribe(self):\n    if hasattr(self.bus, 'signal_handler'):\n        self.bus.signal_handler.subscribe()\n    if hasattr(self.bus, 'console_control_handler'):\n        self.bus.console_control_handler.subscribe()",
    "docstring": "Add the handlers based on the platform.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\__init__.py",
    "ast_data": "FunctionDef name:subscribe arg:self arguments arg If Call Call If Call Call"
  },
  {
    "library": "django",
    "name": "disjoint",
    "source_code": "def disjoint(self, other):\n    return capi.geos_disjoint(self.ptr, other.ptr)",
    "docstring": "Return true if the DE-9IM intersection matrix for the two Geometries is FF*FF****.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:disjoint arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "with_values",
    "source_code": "def with_values(self, new_values):\n    return SparseTensor(self._indices, new_values, self._dense_shape)",
    "docstring": "Returns a copy of with replaced by . This method produces a new that has the same nonzero and same , but updated values. Args: new_values: The values of the new . Needs to have the same shape as the current . May have a different type than the current . Returns: A with identical indices and shape but updated values. Example usage: >>> st = tf.sparse.from_dense([[1, 0, 2, 0], [3, 0, 0, 4]]) >>> tf.sparse.to_dense(st.with_values([10, 20, 30, 40])) # 4 nonzero values",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\sparse_tensor.py",
    "ast_data": "FunctionDef name:with_values arg:self arg:new_values arguments arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_check_info",
    "source_code": "def _check_info(info, driver, positive='did not converge (LAPACK info=%d)'):\n    if info < 0:\n        raise ValueError(f'illegal value in argument {-info} of internal {driver}')\n    if info > 0 and positive:\n        raise LinAlgError(('%s ' + positive) % (driver, info))",
    "docstring": "Check info return value.",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_decomp.py",
    "ast_data": "FunctionDef name:_check_info arg:info arg:driver arg:positive arguments arg arg arg If Compare Raise Call If BoolOp Compare Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "watched_variables",
    "source_code": "def watched_variables(self):\n    if self._tape is not None:\n        self._watched_variables = self._tape.watched_variables()\n    return self._watched_variables",
    "docstring": "Returns variables watched by this tape in order of construction.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\backprop.py",
    "ast_data": "FunctionDef name:watched_variables arg:self arguments arg If Compare Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_wrap_data_with_container",
    "source_code": "def _wrap_data_with_container(method, data_to_wrap, original_input, estimator):\n    output_config = _get_output_config(method, estimator)\n    if output_config['dense'] == 'default' or not _auto_wrap_is_configured(estimator):\n        return data_to_wrap\n    dense_config = output_config['dense']\n    if issparse(data_to_wrap):\n        raise ValueError(f\"The transformer outputs a scipy sparse matrix. Try to set the transformer output to a dense array or disable {dense_config.capitalize()} output with set_output(transform='default').\")\n    adapter = ADAPTERS_MANAGER.adapters[dense_config]\n    return adapter.create_container(data_to_wrap, original_input, columns=estimator.get_feature_names_out)",
    "docstring": "Wrap output with container based on an estimator's or global config. Parameters ---------- method : {\"transform\"} Estimator's method to get container output for. data_to_wrap : {ndarray, dataframe} Data to wrap with container. original_input : {ndarray, dataframe} Original input of function. estimator : estimator instance Estimator with to get the output configuration from. Returns ------- output : {ndarray, dataframe} If the output config is \"default\" or the estimator is not configured for wrapping return unchanged. If the output config is \"pandas\", return as a pandas DataFrame.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_set_output.py",
    "ast_data": "FunctionDef name:_wrap_data_with_container arg:method arg:data_to_wrap arg:original_input arg:estimator arguments arg arg arg arg Assign Call If BoolOp Compare Call Return return:yes Assign If Call Raise Call Call Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "height",
    "source_code": "@property\ndef height(self):\n    points = self.get_points()\n    return points[1, 1] - points[0, 1]",
    "docstring": "The (signed) height of the bounding box.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:height arg:self arguments arg Assign Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "_HandleSignalsPlugin",
    "source_code": "class _HandleSignalsPlugin(object):\n\n    def __init__(self, bus):\n        self.bus = bus\n\n    def subscribe(self):\n        if hasattr(self.bus, 'signal_handler'):\n            self.bus.signal_handler.subscribe()\n        if hasattr(self.bus, 'console_control_handler'):\n            self.bus.console_control_handler.subscribe()",
    "docstring": "Handle signals from other processes. Based on the configured platform handlers above.",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\__init__.py",
    "ast_data": "ClassDef name:_HandleSignalsPlugin FunctionDef name:__init__ arg:self arg:bus arguments arg arg Assign FunctionDef name:subscribe arg:self arguments arg If Call Call If Call Call"
  },
  {
    "library": "numpy",
    "name": "isspace",
    "source_code": "def isspace(self):\n    return isspace(self)",
    "docstring": "Returns true for each element if there are only whitespace characters in the string and there is at least one character, false otherwise. See Also -------- char.isspace",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:isspace arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "list",
    "source_code": "def list(github, force_reload=False, skip_validation=False, trust_repo=None, verbose=True):\n    repo_dir = _get_cache_or_reload(github, force_reload, trust_repo, 'list', verbose=verbose, skip_validation=skip_validation)\n    with _add_to_sys_path(repo_dir):\n        hubconf_path = os.path.join(repo_dir, MODULE_HUBCONF)\n        hub_module = _import_module(MODULE_HUBCONF, hubconf_path)\n    entrypoints = [f for f in dir(hub_module) if callable(getattr(hub_module, f)) and (not f.startswith('_'))]\n    return entrypoints",
    "docstring": "List all callable entrypoints available in the repo specified by ``. Returns: list: The available callables entrypoint Example: >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_HUB) >>> entrypoints = torch.hub.list(\"pytorch/vision\", force_reload=True)",
    "type": "function",
    "file_path": "pytorch\\torch\\hub.py",
    "ast_data": "FunctionDef name:list arg:github arg:force_reload arg:skip_validation arg:trust_repo arg:verbose arguments arg arg arg arg arg Assign Call With Call Assign Call Assign Call Assign Call BoolOp Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_zmargin",
    "source_code": "def set_zmargin(self, m):\n    if m <= -0.5:\n        raise ValueError('margin must be greater than -0.5')\n    self._zmargin = m\n    self._request_autoscale_view('z')\n    self.stale = True",
    "docstring": "Set padding of Z data limits prior to autoscaling. *m* times the data interval will be added to each end of that interval before it is used in autoscaling. If *m* is negative, this will clip the data range instead of expanding it. For example, if your data is in the range [0, 2], a margin of 0.1 will result in a range [-0.2, 2.2]; a margin of -0.1 will result in a range of [0.2, 1.8]. Parameters ---------- m : float greater than -0.5",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:set_zmargin arg:self arg:m arguments arg arg If Compare Raise Call Assign Call Assign"
  },
  {
    "library": "scipy",
    "name": "info",
    "source_code": "@classmethod\ndef info(self, source):\n    stream, close_it = self._open(source)\n    try:\n        line = stream.readline()\n        mmid, matrix, format, field, symmetry = (asstr(part.strip()) for part in line.split())\n        if not mmid.startswith('%%MatrixMarket'):\n            raise ValueError('source is not in Matrix Market format')\n        if not matrix.lower() == 'matrix':\n            raise ValueError('Problem reading file header: ' + line)\n        if format.lower() == 'array':\n            format = self.FORMAT_ARRAY\n        elif format.lower() == 'coordinate':\n            format = self.FORMAT_COORDINATE\n        while line:\n            if line.lstrip() and line.lstrip()[0] in ['%', 37]:\n                line = stream.readline()\n            else:\n                break\n        while not line.strip():\n            line = stream.readline()\n        split_line = line.split()\n        if format == self.FORMAT_ARRAY:\n            if not len(split_line) == 2:\n                raise ValueError('Header line not of length 2: ' + line.decode('ascii'))\n            rows, cols = map(int, split_line)\n            entries = rows * cols\n        else:\n            if not len(split_line) == 3:\n                raise ValueError('Header line not of length 3: ' + line.decode('ascii'))\n            rows, cols, entries = map(int, split_line)\n        return (rows, cols, entries, format, field.lower(), symmetry.lower())\n    finally:\n        if close_it:\n            stream.close()",
    "docstring": "Return size, storage parameters from Matrix Market file-like 'source'. Parameters ---------- source : str or file-like Matrix Market filename (extension .mtx) or open file-like object Returns ------- rows : int Number of matrix rows. cols : int Number of matrix columns. entries : int Number of non-zero entries of a sparse matrix or rows*cols for a dense matrix. format : str Either 'coordinate' or 'array'. field : str Either 'real', 'complex', 'pattern', or 'integer'. symmetry : str Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'.",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\_mmio.py",
    "ast_data": "FunctionDef name:info arg:self arg:source arguments arg arg Assign Call Try Assign Call Assign Call Call Call If Call Raise Call If Compare Call Raise Call If Compare Call Assign If Compare Call Assign While If BoolOp Call Compare Call Assign Call While Call Assign Call Assign Call If Compare If Compare Call Raise Call Call Assign Call Assign If Compare Call Raise Call Call Assign Call Return return:yes Call Call If Call"
  },
  {
    "library": "tensorflow",
    "name": "_as_record_writer_options",
    "source_code": "def _as_record_writer_options(self):\n    options = _pywrap_record_io.RecordWriterOptions(compat.as_bytes(self.get_compression_type_string(self.compression_type)))\n    if self.flush_mode is not None:\n        options.zlib_options.flush_mode = self.flush_mode\n    if self.input_buffer_size is not None:\n        options.zlib_options.input_buffer_size = self.input_buffer_size\n    if self.output_buffer_size is not None:\n        options.zlib_options.output_buffer_size = self.output_buffer_size\n    if self.window_bits is not None:\n        options.zlib_options.window_bits = self.window_bits\n    if self.compression_level is not None:\n        options.zlib_options.compression_level = self.compression_level\n    if self.compression_method is not None:\n        options.zlib_options.compression_method = self.compression_method\n    if self.mem_level is not None:\n        options.zlib_options.mem_level = self.mem_level\n    if self.compression_strategy is not None:\n        options.zlib_options.compression_strategy = self.compression_strategy\n    return options",
    "docstring": "Convert to RecordWriterOptions for use with PyRecordWriter.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\tf_record.py",
    "ast_data": "FunctionDef name:_as_record_writer_options arg:self arguments arg Assign Call Call Call If Compare Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_DispatchCacheKey",
    "source_code": "@dataclass_slots\n@dataclass\nclass _DispatchCacheKey:\n    key: tuple[object, ...]\n    hashvalue: int\n\n    def __init__(self, tup: tuple[object, ...]) -> None:\n        self.key = tup\n        self.hashvalue = hash(tup)\n\n    def __eq__(self, other: object) -> bool:\n        return isinstance(other, _DispatchCacheKey) and self.key == other.key\n\n    def __hash__(self) -> int:\n        return self.hashvalue\n\n    def strip_shape_env(self) -> None:\n        for v in self.key:\n            if isinstance(v, _PySymInputStub):\n                v.strip_shape_env()",
    "docstring": "Key for the FakeTensor dispatch cache.",
    "type": "class",
    "file_path": "pytorch\\torch\\_subclasses\\fake_tensor.py",
    "ast_data": "ClassDef name:_DispatchCacheKey FunctionDef name:__init__ arg:self arg:tup arguments arg arg Assign Assign Call FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes BoolOp Call Compare FunctionDef name:__hash__ arg:self arguments arg Return return:yes FunctionDef name:strip_shape_env arg:self arguments arg For If Call Call"
  },
  {
    "library": "django",
    "name": "get_permission_denied_message",
    "source_code": "def get_permission_denied_message(self):\n    return self.permission_denied_message",
    "docstring": "Override this method to override the permission_denied_message attribute.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\mixins.py",
    "ast_data": "FunctionDef name:get_permission_denied_message arg:self arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "broadcast_to",
    "source_code": "@array_function_dispatch(_broadcast_to_dispatcher, module='numpy')\ndef broadcast_to(array, shape, subok=False):\n    return _broadcast_to(array, shape, subok=subok, readonly=True)",
    "docstring": "Broadcast an array to a new shape. Parameters ---------- array : array_like The array to broadcast. shape : tuple or int The shape of the desired array. A single integer ``. subok : bool, optional If True, then sub-classes will be passed-through, otherwise the returned array will be forced to be a base-class array (default). Returns ------- broadcast : array A readonly view on the original array with the given shape. It is typically not contiguous. Furthermore, more than one element of a broadcasted array may refer to a single memory location. Raises ------ ValueError If the array is not compatible with the new shape according to NumPy's broadcasting rules. See Also -------- broadcast broadcast_arrays broadcast_shapes Examples -------- >>> import numpy as np >>> x = np.array([1, 2, 3]) >>> np.broadcast_to(x, (3, 3)) array([[1, 2, 3], [1, 2, 3], [1, 2, 3]])",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_stride_tricks_impl.py",
    "ast_data": "FunctionDef name:broadcast_to arg:array arg:shape arg:subok arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "__getitem__",
    "source_code": "@available_if(_estimator_has('__getitem__'))\ndef __getitem__(self, *args, **kwargs):\n    return self.estimator.__getitem__(*args, **kwargs)",
    "docstring": "__getitem__ is defined in :class: and :class:.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\frozen\\_frozen.py",
    "ast_data": "FunctionDef name:__getitem__ arg:self arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "cluster_spec",
    "source_code": "def cluster_spec(self):\n    tf_config = _load_tf_config()\n    if 'cluster' not in tf_config:\n        return ClusterSpec({})\n    return ClusterSpec(tf_config['cluster'])",
    "docstring": "Returns a ClusterSpec based on the TF_CONFIG environment variable. Returns: A ClusterSpec with information from the TF_CONFIG environment variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\tfconfig_cluster_resolver.py",
    "ast_data": "FunctionDef name:cluster_spec arg:self arguments arg Assign Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_put_str",
    "source_code": "def _put_str(s: str | Dtype, space: int) -> str:\n    return str(s)[:space].ljust(space)",
    "docstring": "Make string of specified length, padding to the right if necessary. Parameters ---------- s : Union[str, Dtype] String to be formatted. space : int Length to force string to be of. Returns ------- str String coerced to given length. Examples -------- >>> pd.io.formats.info._put_str(\"panda\", 6) 'panda ' >>> pd.io.formats.info._put_str(\"panda\", 4) 'pand'",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:_put_str arg:s arg:space arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_maybe_disallow_fill",
    "source_code": "@final\ndef _maybe_disallow_fill(self, allow_fill: bool, fill_value, indices) -> bool:\n    if allow_fill and fill_value is not None:\n        if self._can_hold_na:\n            if (indices < -1).any():\n                raise ValueError('When allow_fill=True and fill_value is not None, all indices must be >= -1')\n        else:\n            cls_name = type(self).__name__\n            raise ValueError(f'Unable to fill values because {cls_name} cannot contain NA')\n    else:\n        allow_fill = False\n    return allow_fill",
    "docstring": "We only use pandas-style take when allow_fill is True _and_ fill_value is not None.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_maybe_disallow_fill arg:self arg:allow_fill arg:fill_value arg:indices arguments arg arg arg arg If BoolOp Compare If If Call Compare Raise Call Assign Call Raise Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "remove_override",
    "source_code": "def remove_override(self, key: _K) -> None:\n    self._overrides.pop(key, None)\n    self._merged.pop(key, None)\n    if key in self._base:\n        self._merged[key] = self._base[key]",
    "docstring": "Un-overrides a key-value pair.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\registration.py",
    "ast_data": "FunctionDef name:remove_override arg:self arg:key arguments arg arg Call Call If Compare Assign"
  },
  {
    "library": "scipy",
    "name": "_get_more_basis_columns",
    "source_code": "def _get_more_basis_columns(A, basis):\n    m, n = A.shape\n    a = np.arange(m + n)\n    bl = np.zeros(len(a), dtype=bool)\n    bl[basis] = 1\n    options = a[~bl]\n    options = options[options < n]\n    B = np.zeros((m, m))\n    B[:, 0:len(basis)] = A[:, basis]\n    if basis.size > 0 and np.linalg.matrix_rank(B[:, :len(basis)]) < len(basis):\n        raise Exception('Basis has dependent columns')\n    rank = 0\n    for i in range(n):\n        new_basis = np.random.permutation(options)[:m - len(basis)]\n        B[:, len(basis):] = A[:, new_basis]\n        rank = np.linalg.matrix_rank(B)\n        if rank == m:\n            break\n    return np.concatenate((basis, new_basis))",
    "docstring": "Called when the auxiliary problem terminates with artificial columns in the basis, which must be removed and replaced with non-artificial columns. Finds additional columns that do not make the matrix singular.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_linprog_rs.py",
    "ast_data": "FunctionDef name:_get_more_basis_columns arg:A arg:basis arguments arg arg Assign Assign Call Assign Call Call Assign Assign Assign Compare Assign Call Assign Call If BoolOp Compare Compare Call Call Call Raise Call Assign For Call Assign Call Call Assign Call Assign Call If Compare Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "deferred_external_captures",
    "source_code": "@property\ndef deferred_external_captures(self):\n    return list(self._function_captures.by_ref_external.values())",
    "docstring": "Ordered nest of tensors whose placeholders will be fed at call time.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\func_graph.py",
    "ast_data": "FunctionDef name:deferred_external_captures arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "copy_stemmer_js",
    "source_code": "def copy_stemmer_js(self) -> None:\n    if self.indexer is not None:\n        if hasattr(self.indexer, 'get_js_stemmer_rawcodes'):\n            for jsfile in self.indexer.get_js_stemmer_rawcodes():\n                js_path = Path(jsfile)\n                copyfile(js_path, self._static_dir / js_path.name, force=True)\n        elif (js_stemmer_rawcode := self.indexer.get_js_stemmer_rawcode()):\n            copyfile(js_stemmer_rawcode, self._static_dir / '_stemmer.js', force=True)",
    "docstring": "Copy a JavaScript file for stemmer.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\html\\__init__.py",
    "ast_data": "FunctionDef name:copy_stemmer_js arg:self arguments arg If Compare If Call For Call Assign Call Call If Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_useLocale",
    "source_code": "def set_useLocale(self, val):\n    self._useLocale = mpl._val_or_rc(val, 'axes.formatter.use_locale')",
    "docstring": "Set whether to use locale settings for decimal sign and positive sign. Parameters ---------- val : bool or None *None* resets to :rc:.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:set_useLocale arg:self arg:val arguments arg arg Assign Call"
  },
  {
    "library": "kornia",
    "name": "project",
    "source_code": "def project(self, points: Vector3) -> Vector2:\n    xy = points.data[..., :2]\n    z = points.z\n    uv = (xy.T @ z.diag().inverse()).T if len(z.shape) else xy.T * 1 / z\n    return Vector2(uv)",
    "docstring": "Project one or more Vector3 from the camera frame into the canonical z=1 plane through perspective division. Args: points: Vector3 representing the points to project. Returns: Vector2 representing the projected points. Example: >>> points = Vector3.from_coords(1., 2., 3.) >>> Z1Projection().project(points) x: 0.3333333432674408 y: 0.6666666865348816",
    "type": "method",
    "file_path": "kornia\\kornia\\sensors\\camera\\projection_model.py",
    "ast_data": "FunctionDef name:project arg:self arg:points arguments arg arg Assign Assign Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_node_io_shapes",
    "source_code": "def get_node_io_shapes(node, key):\n    out_shape = []\n    for shape in node.attr[key].list.shape:\n        out_shape.append([dim.size for dim in shape.dim])\n    return out_shape",
    "docstring": "Returns the input/output shapes of a GraphDef Node.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\tensorrt\\utils.py",
    "ast_data": "FunctionDef name:get_node_io_shapes arg:node arg:key arguments arg arg Assign For Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "hat",
    "source_code": "@staticmethod\ndef hat(v: Vector3 | Tensor) -> Tensor:\n    if isinstance(v, Tensor):\n        a, b, c = (v[..., 0], v[..., 1], v[..., 2])\n    else:\n        a, b, c = (v.x, v.y, v.z)\n    z = zeros_like(a)\n    row0 = stack((z, -c, b), -1)\n    row1 = stack((c, z, -a), -1)\n    row2 = stack((-b, a, z), -1)\n    return stack((row0, row1, row2), -2)",
    "docstring": "Convert elements from vector space to lie algebra. Returns matrix of shape :math:. Args: v: Vector3 or tensor of shape :math:. Example: >>> v = torch.ones((1,3)) >>> m = So3.hat(v) >>> m tensor([[[ 0., -1., 1.], [ 1., 0., -1.], [-1., 1., 0.]]])",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\so3.py",
    "ast_data": "FunctionDef name:hat arg:v arguments arg If Call Assign Assign Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "tridiag",
    "source_code": "def tridiag(below=None, diag=None, above=None, name=None):\n\n    def _pad(x):\n        shape = array_ops.concat([array_ops.shape(x)[:-1], [1]], axis=0)\n        z = array_ops.zeros(shape, dtype=x.dtype)\n        return array_ops.concat([z, x, z], axis=-1)\n\n    def _add(*x):\n        s = None\n        for y in x:\n            if y is None:\n                continue\n            elif s is None:\n                s = y\n            else:\n                s += y\n        if s is None:\n            raise ValueError('Must specify at least one of `below`, `diag`, `above`.')\n        return s\n    with ops.name_scope(name, 'tridiag', [below, diag, above]):\n        if below is not None:\n            below = ops.convert_to_tensor(below, name='below')\n            below = array_ops.matrix_diag(_pad(below))[..., :-1, 1:]\n        if diag is not None:\n            diag = ops.convert_to_tensor(diag, name='diag')\n            diag = array_ops.matrix_diag(diag)\n        if above is not None:\n            above = ops.convert_to_tensor(above, name='above')\n            above = array_ops.matrix_diag(_pad(above))[..., 1:, :-1]\n        return _add(below, diag, above)",
    "docstring": "Creates a matrix with values set above, below, and on the diagonal. Example: Warning: This Op is intended for convenience, not efficiency. Args: below: of shape corresponding to the below diagonal part. is logically equivalent to . diag: of shape corresponding to the diagonal part. is logically equivalent to . above: of shape corresponding to the above diagonal part. is logically equivalent to . name: Python . The name to give this op. Returns: tridiag: with values set above, below and on the diagonal. Raises: ValueError: if all inputs are .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\util.py",
    "ast_data": "FunctionDef name:tridiag arg:below arg:diag arg:above arg:name arguments arg arg arg arg FunctionDef name:_pad arg:x arguments arg Assign Call Call Assign Call Return return:yes Call FunctionDef name:_add arguments arg Assign For If Compare If Compare Assign If Compare Raise Call Return return:yes With Call If Compare Assign Call Assign Call Call If Compare Assign Call Assign Call If Compare Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_update_set_signature_and_docstring",
    "source_code": "@classmethod\ndef _update_set_signature_and_docstring(cls):\n    cls.set.__signature__ = Signature([Parameter('self', Parameter.POSITIONAL_OR_KEYWORD), *[Parameter(prop, Parameter.KEYWORD_ONLY, default=_api.UNSET) for prop in ArtistInspector(cls).get_setters() if prop not in Artist._PROPERTIES_EXCLUDED_FROM_SET]])\n    cls.set._autogenerated_signature = True\n    cls.set.__doc__ = 'Set multiple properties at once.\\n\\nSupported properties are\\n\\n' + kwdoc(cls)",
    "docstring": "Update the signature of the set function to list all properties as keyword arguments. Property aliases are not listed in the signature for brevity, but are still accepted as keyword arguments.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:_update_set_signature_and_docstring arg:cls arguments arg Assign Call Call Call Call Call Compare Assign Assign Call"
  },
  {
    "library": "sphinx",
    "name": "discover",
    "source_code": "def discover(self, exclude_paths: Iterable[str]=(), include_paths: Iterable[str]=('**',)) -> set[str]:\n    self.docnames.clear()\n    self._path_to_docname.clear()\n    self._docname_to_path.clear()\n    for filename in get_matching_files(self.srcdir, include_paths, [*exclude_paths, *EXCLUDE_PATHS]):\n        if (docname := self.path2doc(filename)):\n            if docname in self.docnames:\n                files = [str(f.relative_to(self.srcdir)) for f in self.srcdir.glob(f'{docname}.*')]\n                logger.warning(__('multiple files found for the document \"%s\": %s\\nUse %r for the build.'), docname, ', '.join(files), self.doc2path(docname, absolute=True), once=True)\n            elif os.access(self.srcdir / filename, os.R_OK):\n                self.docnames.add(docname)\n                path = Path(filename)\n                self._path_to_docname[path] = docname\n                self._docname_to_path[docname] = path\n            else:\n                logger.warning(__('Ignored unreadable document %r.'), filename, location=docname)\n    return self.docnames",
    "docstring": "Find all document files in the source directory and put them in :attr:.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\project.py",
    "ast_data": "FunctionDef name:discover arg:self arg:exclude_paths arg:include_paths arguments arg arg arg Call Call Call For Call If Call If Compare Assign Call Call Call Call Call Call Call If Call Call Assign Call Assign Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "rsample",
    "source_code": "def rsample(self, sample_shape: _size=torch.Size()) -> Tensor:\n    raise NotImplementedError",
    "docstring": "Generates a sample_shape shaped reparameterized sample or sample_shape shaped batch of reparameterized samples if the distribution parameters are batched.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:rsample arg:self arg:sample_shape arguments arg arg Call Raise"
  },
  {
    "library": "tensorflow",
    "name": "random_contrast",
    "source_code": "@tf_export('image.random_contrast')\n@dispatch.add_dispatch_support\ndef random_contrast(image, lower, upper, seed=None):\n    if upper <= lower:\n        raise ValueError('upper must be > lower.')\n    if lower < 0:\n        raise ValueError('lower must be non-negative.')\n    contrast_factor = random_ops.random_uniform([], lower, upper, seed=seed)\n    return adjust_contrast(image, contrast_factor)",
    "docstring": "Adjust the contrast of an image or images by a random factor. Equivalent to but uses a randomly picked in the interval . For producing deterministic results given a value, use . Unlike using the param with ops, ops guarantee the same results given the same seed independent of how many times the function is called, and independent of global seed settings (e.g. tf.random.set_seed). Args: image: An image tensor with 3 or more dimensions. lower: float. Lower bound for the random contrast factor. upper: float. Upper bound for the random contrast factor. seed: A Python integer. Used to create a random seed. See for behavior. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.random_contrast(x, 0.2, 0.5) Returns: The contrast-adjusted image(s). Raises: ValueError: if or if .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:random_contrast arg:image arg:lower arg:upper arg:seed arguments arg arg arg arg If Compare Raise Call If Compare Raise Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "norm",
    "source_code": "def norm(x):\n    return sqrt(squared_norm(x))",
    "docstring": "Dot product-based Euclidean norm implementation. See: Parameters ---------- x : array-like Vector for which to compute the norm.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_nmf.py",
    "ast_data": "FunctionDef name:norm arg:x arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_comparison",
    "source_code": "def _comparison(tf_fun, x1, x2, cast_bool_to_int=False):\n    dtype = np_utils.result_type(x1, x2)\n    x1 = np_array_ops.array(x1, dtype=dtype)\n    x2 = np_array_ops.array(x2, dtype=dtype)\n    if cast_bool_to_int and x1.dtype == dtypes.bool:\n        x1 = math_ops.cast(x1, dtypes.int32)\n        x2 = math_ops.cast(x2, dtypes.int32)\n    return tf_fun(x1, x2)",
    "docstring": "Helper function for comparision.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_math_ops.py",
    "ast_data": "FunctionDef name:_comparison arg:tf_fun arg:x1 arg:x2 arg:cast_bool_to_int arguments arg arg arg arg Assign Call Assign Call Assign Call If BoolOp Compare Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_deserialize",
    "source_code": "@classmethod\ndef _deserialize(cls, serialization):\n    return cls(*serialization)",
    "docstring": "Reconstructs a TypeSpec from a value returned by . Args: serialization: A value returned by _serialize. In some contexts, s in may not have the identical type that was returned by (but its type will still be a type with the same type name and field names). For example, the code that loads a SavedModel does not have access to the original type, so it dynamically creates a new type with the same type name and field names as the original one. If necessary, you can check for these duck-typed types, and restore them to the original type. (E.g., this would be necessary if you rely on type checks such as for this 's member variables). Returns: A of type .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py",
    "ast_data": "FunctionDef name:_deserialize arg:cls arg:serialization arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "conditional_reset",
    "source_code": "@functools.wraps(reset_func)\ndef conditional_reset(*args, **kwargs):\n    datapipe = args[0]\n    if datapipe._snapshot_state in (_SnapshotState.Iterating, _SnapshotState.NotStarted):\n        datapipe._number_of_samples_yielded = 0\n        datapipe._fast_forward_iterator = None\n        reset_func(*args, **kwargs)\n    datapipe._snapshot_state = _SnapshotState.Iterating",
    "docstring": "Only execute DataPipe's method if is or . This allows recently restored DataPipe to preserve its restored state during the initial call.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\data\\datapipes\\_typing.py",
    "ast_data": "FunctionDef name:conditional_reset arguments arg arg Assign If Compare Assign Assign Call Assign Call"
  },
  {
    "library": "scipy",
    "name": "_process_parameters",
    "source_code": "def _process_parameters(self, dim):\n    if dim is None or not np.isscalar(dim) or dim < 0 or (dim != int(dim)):\n        raise ValueError('Dimension of rotation must be specified,\\n                                and must be a scalar nonnegative integer.')\n    return dim",
    "docstring": "Dimension N must be specified; it cannot be inferred.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:_process_parameters arg:self arg:dim arguments arg arg If BoolOp Compare Call Compare Compare Call Raise Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit_transform",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit_transform(self, X, y=None, W=None, H=None):\n    X = validate_data(self, X, accept_sparse=('csr', 'csc'), dtype=[np.float64, np.float32])\n    with config_context(assume_finite=True):\n        W, H, n_iter, n_steps = self._fit_transform(X, W=W, H=H)\n    self.reconstruction_err_ = _beta_divergence(X, W, H, self._beta_loss, square_root=True)\n    self.n_components_ = H.shape[0]\n    self.components_ = H\n    self.n_iter_ = n_iter\n    self.n_steps_ = n_steps\n    return W",
    "docstring": "Learn a NMF model for the data X and returns the transformed data. This is more efficient than calling fit followed by transform. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Data matrix to be decomposed. y : Ignored Not used, present here for API consistency by convention. W : array-like of shape (n_samples, n_components), default=None If , it is used as initial guess for the solution. If , uses the initialisation method specified in . H : array-like of shape (n_components, n_features), default=None If , it is used as initial guess for the solution. If , uses the initialisation method specified in . Returns ------- W : ndarray of shape (n_samples, n_components) Transformed data.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_nmf.py",
    "ast_data": "FunctionDef name:fit_transform arg:self arg:X arg:y arg:W arg:H arguments arg arg arg arg arg Assign Call With Call Assign Call Assign Call Assign Assign Assign Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_UnsortedSegmentProdGrad",
    "source_code": "@ops.RegisterGradient('UnsortedSegmentProd')\ndef _UnsortedSegmentProdGrad(op: ops.Operation, grad):\n    is_zero = math_ops.equal(op.inputs[0], 0)\n    num_zeros = gen_math_ops.unsorted_segment_sum(math_ops.cast(is_zero, dtype=dtypes.int32), op.inputs[1], op.inputs[2])\n    grad = array_ops.where_v2(math_ops.greater(num_zeros, 1), array_ops.zeros_like(grad), grad)\n    non_zero_data = array_ops.where_v2(is_zero, array_ops.ones_like(op.inputs[0]), op.inputs[0])\n    non_zero_prod = gen_math_ops.unsorted_segment_prod(non_zero_data, op.inputs[1], op.inputs[2])\n    zero_clipped_indices = math_ops.maximum(op.inputs[1], array_ops.zeros_like(op.inputs[1]))\n    gathered_prod = array_ops.gather(op.outputs[0], zero_clipped_indices)\n    gathered_non_zero_prod = array_ops.gather(non_zero_prod, zero_clipped_indices)\n    prod_divided_by_el = gathered_prod / op.inputs[0]\n    partial_derivative = array_ops.where_v2(is_zero, gathered_non_zero_prod, prod_divided_by_el)\n    gathered_grad = _GatherDropNegatives(grad, op.inputs[1], zero_clipped_indices)[0]\n    return (gathered_grad * partial_derivative, None, None)",
    "docstring": "Gradient for UnsortedSegmentProd. The gradient can be expressed for each segment by dividing the segment's product by each element of the segment input tensor, but this approach can't deal with zeros in the input. Unlike reduce_prod we can't use cumsum here as individual segments may have a different number of elements. Therefore we consider three cases: 1) A segment input contains no zeros and we can safely divide by the input tensor. 2) A segment contains exactly one zero. Then the gradient of each input of the segment is zero except for the 0-input, there the gradient is the product of the remaining segment entries. 3) A segment contains at least two zeros. The gradient is zero for all segment inputs.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_UnsortedSegmentProdGrad arg:op arg:grad arguments arg arg Assign Call Assign Call Call Assign Call Call Call Assign Call Call Assign Call Assign Call Call Assign Call Assign Call Assign Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pygame",
    "name": "write",
    "source_code": "def write(self, data):\n    _check_init()\n    self._check_open()\n    self._output.Write(data)",
    "docstring": "writes a list of midi data to the Output Output.write(data) writes series of MIDI information in the form of a list: write([[[status ],timestamp], [[status ],timestamp],...]) fields are optional example: choose program change 1 at time 20000 and send note 65 with velocity 100 500 ms later. write([[[0xc0,0,0],20000],[[0x90,60,100],20500]]) notes: 1. timestamps will be ignored if latency = 0. 2. To get a note to play immediately, send MIDI info with timestamp read from function Time. 3. understanding optional data fields: write([[[0xc0,0,0],20000]]) is equivalent to write([[[0xc0],20000]]) Can send up to 1024 elements in your data list, otherwise an IndexError exception is raised.",
    "type": "method",
    "file_path": "pygame\\src_py\\midi.py",
    "ast_data": "FunctionDef name:write arg:self arg:data arguments arg arg Call Call Call"
  },
  {
    "library": "django",
    "name": "force_group_by",
    "source_code": "def force_group_by(self):\n    return []",
    "docstring": "Return a GROUP BY clause to use with a HAVING clause when no grouping is specified.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:force_group_by arg:self arguments arg Return return:no"
  },
  {
    "library": "pytorch",
    "name": "finish_plan",
    "source_code": "@abc.abstractmethod\ndef finish_plan(self, central_plan: LoadPlan) -> LoadPlan:\n    pass",
    "docstring": "Accept the plan from coordinator and return final LoadPlan.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\planner.py",
    "ast_data": "FunctionDef name:finish_plan arg:self arg:central_plan arguments arg arg"
  },
  {
    "library": "pytorch",
    "name": "Dropout",
    "source_code": "class Dropout(_DropoutNd):\n\n    def forward(self, input: Tensor) -> Tensor:\n        return F.dropout(input, self.p, self.training, self.inplace)",
    "docstring": "During training, randomly zeroes some of the elements of the input tensor with probability :attr:. The zeroed elements are chosen independently for each forward call and are sampled from a Bernoulli distribution. Each channel will be zeroed out independently on every forward call. This has proven to be an effective technique for regularization and preventing the co-adaptation of neurons as described in the paper _ . Furthermore, the outputs are scaled by a factor of :math: during training. This means that during evaluation the module simply computes an identity function. Args: p: probability of an element to be zeroed. Default: 0.5 inplace: If set to `(*)(*)`. Output is of the same shape as input Examples:: >>> m = nn.Dropout(p=0.2) >>> input = torch.randn(20, 16) >>> output = m(input) .. _Improving neural networks by preventing co-adaptation of feature detectors:",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\dropout.py",
    "ast_data": "ClassDef name:Dropout FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_n_leaves",
    "source_code": "def get_n_leaves(self):\n    check_is_fitted(self)\n    return self.tree_.n_leaves",
    "docstring": "Return the number of leaves of the decision tree. Returns ------- self.tree_.n_leaves : int Number of leaves.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\tree\\_classes.py",
    "ast_data": "FunctionDef name:get_n_leaves arg:self arguments arg Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_process_op_fetches",
    "source_code": "def _process_op_fetches(self, op_fetches):\n    if op_fetches is None:\n        return []\n    if not isinstance(op_fetches, (list, tuple)):\n        op_fetches = [op_fetches]\n    fetches = []\n    for fetch in op_fetches:\n        if isinstance(fetch, ops.Operation):\n            fetches.append(fetch)\n        elif isinstance(fetch, tensor_lib.Tensor):\n            fetches.append(fetch.op)\n        else:\n            logging.warning('Ignoring the given op_fetch:%s, which is not an op.' % fetch)\n    return fetches",
    "docstring": "Check that op_fetches have valid ops.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:_process_op_fetches arg:self arg:op_fetches arguments arg arg If Compare Return return:no If Call Assign Assign For If Call Call If Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_create_or_restore_slot_variable",
    "source_code": "def _create_or_restore_slot_variable(self, slot_variable_position, slot_name, variable):\n    variable_key = _var_key(variable)\n    slot_dict = self._slots.get(variable_key, {})\n    slot_variable = slot_dict.get(slot_name, None)\n    if slot_variable is None and context.executing_eagerly() and slot_variable_position.is_simple_variable() and (not ops.get_default_graph()._variable_creator_stack or self._distribution_strategy):\n        initializer = trackable.CheckpointInitialValueCallable(checkpoint_position=slot_variable_position)\n        slot_variable = self.add_slot(var=variable, initializer=initializer, slot_name=slot_name, shape=slot_variable_position.value_shape())\n    if slot_variable is not None:\n        slot_variable_position.restore(slot_variable)\n    else:\n        self._deferred_slot_restorations.setdefault(slot_name, {}).setdefault(variable_key, []).append(slot_variable_position)",
    "docstring": "Restore a slot variable's value, possibly creating it. Called when a variable which has an associated slot variable is created or restored. When executing eagerly, we create the slot variable with a restoring initializer. No new variables are created when graph building. Instead, _restore_slot_variable catches these after normal creation and adds restore ops to the graph. This method is nonetheless important when graph building for the case when a slot variable has already been created but has just been added to a dependency graph (causing us to realize that the slot variable needs to be restored). Args: slot_variable_position: A object indicating the slot variable object to be restored. slot_name: The name of this 's slot to restore into. variable: The variable object this slot is being created for.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py",
    "ast_data": "FunctionDef name:_create_or_restore_slot_variable arg:self arg:slot_variable_position arg:slot_name arg:variable arguments arg arg arg arg Assign Call Assign Call Assign Call If BoolOp Compare Call Call BoolOp Call Assign Call Assign Call Call If Compare Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "size",
    "source_code": "def size(self, name=None):\n    with ops.name_scope(name, '%s_Size' % self.name, [self.resource_handle]):\n        return gen_lookup_ops.lookup_table_size_v2(self.resource_handle)",
    "docstring": "Compute the number of elements in this table. Args: name: A name for the operation (optional). Returns: A scalar tensor containing the number of elements in this table.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "FunctionDef name:size arg:self arg:name arguments arg arg With Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_from_datetime64",
    "source_code": "@classmethod\ndef _from_datetime64(cls, data, freq, tz=None) -> Self:\n    if isinstance(freq, BaseOffset):\n        freq = PeriodDtype(freq)._freqstr\n    data, freq = dt64arr_to_periodarr(data, freq, tz)\n    dtype = PeriodDtype(freq)\n    return cls(data, dtype=dtype)",
    "docstring": "Construct a PeriodArray from a datetime64 array Parameters ---------- data : ndarray[datetime64[ns], datetime64[ns, tz]] freq : str or Tick tz : tzinfo, optional Returns ------- PeriodArray[freq]",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\period.py",
    "ast_data": "FunctionDef name:_from_datetime64 arg:cls arg:data arg:freq arg:tz arguments arg arg arg arg If Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "indexable",
    "source_code": "def indexable(*iterables):\n    result = [_make_indexable(X) for X in iterables]\n    check_consistent_length(*result)\n    return result",
    "docstring": "Make arrays indexable for cross-validation. Checks consistent length, passes through None, and ensures that everything can be indexed by converting sparse matrices to csr and converting non-iterable objects to arrays. Parameters ---------- *iterables : {lists, dataframes, ndarrays, sparse matrices} List of objects to ensure sliceability. Returns ------- result : list of {ndarray, sparse matrix, dataframe} or None Returns a list containing indexable arrays (i.e. NumPy array, sparse matrix, or dataframe) or . Examples -------- >>> from sklearn.utils import indexable >>> from scipy.sparse import csr_matrix >>> import numpy as np >>> iterables = [ ... [1, 2, 3], np.array([2, 3, 4]), None, csr_matrix([[5], [6], [7]]) ... ] >>> indexable(*iterables) [[1, 2, 3], array([2, 3, 4]), None, ]",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\validation.py",
    "ast_data": "FunctionDef name:indexable arguments arg Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_CacheKeyState",
    "source_code": "@dataclass_slots\n@dataclass\nclass _CacheKeyState:\n    sym_node_lookup: dict[int, int]\n    known_symbols: set[sympy.Symbol]\n    shape_env: Optional[ShapeEnv]\n\n    def __init__(self, shape_env: Optional[ShapeEnv]=None) -> None:\n        self.sym_node_lookup = {}\n        self.known_symbols = set()\n        self.shape_env = shape_env\n\n    def cache_on_shape_env(self) -> bool:\n        return bool(self.sym_node_lookup)\n\n    def convert_sym_int(self, result: list[object], arg: SymInt) -> None:\n        node_id = id(arg.node)\n        if node_id in self.sym_node_lookup:\n            result.append(_InputBackref(self.sym_node_lookup[node_id]))\n        else:\n            self.sym_node_lookup[node_id] = len(result)\n            self.known_symbols.update(arg.node.expr.free_symbols)\n            if self.shape_env is None:\n                self.shape_env = arg.node.shape_env\n            result.append(_PySymInputStub(arg))\n\n    def convert_output(self, arg: _MetadataIntLike) -> _MetadataIntLike:\n        if isinstance(arg, SymInt):\n            return _SymIntOutputStub(arg, self.sym_node_lookup.get(id(arg.node), None))\n        else:\n            return arg",
    "docstring": "State used while building our cache key.",
    "type": "class",
    "file_path": "pytorch\\torch\\_subclasses\\_fake_tensor_utils.py",
    "ast_data": "ClassDef name:_CacheKeyState FunctionDef name:__init__ arg:self arg:shape_env arguments arg arg Assign Assign Call Assign FunctionDef name:cache_on_shape_env arg:self arguments arg Return return:yes Call FunctionDef name:convert_sym_int arg:self arg:result arg:arg arguments arg arg arg Assign Call If Compare Call Call Assign Call Call If Compare Assign Call Call FunctionDef name:convert_output arg:self arg:arg arguments arg arg If Call Return return:yes Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_trackable_children",
    "source_code": "def _trackable_children(self, save_type=trackable.SaveType.CHECKPOINT, **kwargs):\n    if context.executing_eagerly():\n        graph_key = None\n    else:\n        graph = ops.get_default_graph()\n        graph_key = graph._graph_key\n    weights = {}\n    for (name, g), v in sorted(self._weights.items(), key=lambda i: i[0][0]):\n        if g == graph_key:\n            weights[name] = v\n    weights.update(super(LossScale, self)._trackable_children(save_type, **kwargs))\n    return weights",
    "docstring": "From Trackable. Gather graph-specific weights to save.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\experimental\\loss_scale.py",
    "ast_data": "FunctionDef name:_trackable_children arg:self arg:save_type arguments arg arg arg If Call Assign Assign Call Assign Assign For Call Call arguments arg If Compare Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "FlushNonExecutionFiles",
    "source_code": "def FlushNonExecutionFiles(self):\n    _pywrap_debug_events_writer.FlushNonExecutionFiles(self._dump_root)",
    "docstring": "Flush the non-execution debug event files.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_writer.py",
    "ast_data": "FunctionDef name:FlushNonExecutionFiles arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_store_sparse",
    "source_code": "def _maybe_store_sparse(t, map_op_name, keep_input):\n    return utils.smart_cond(keep_input, lambda: _store_sparse(t, shared_name=map_op_name), lambda: constant_op.constant(-1, dtypes.int64))",
    "docstring": "Conditionally store a single sparse Tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\input.py",
    "ast_data": "FunctionDef name:_maybe_store_sparse arg:t arg:map_op_name arg:keep_input arguments arg arg arg Return return:yes Call arguments Call arguments Call"
  },
  {
    "library": "pytorch",
    "name": "wait_for_peers",
    "source_code": "def wait_for_peers(self, expected_version):\n    active_version, state = self.get_rdzv_state()\n    while True:\n        if state['status'] == 'frozen' and state['version'] == expected_version:\n            return active_version\n        elif state['status'] == 'joinable' and state['version'] == expected_version:\n            active_version, state = self.try_wait_for_state_change(etcd_index=active_version.etcd_index + 1)\n        else:\n            raise EtcdRendezvousRetryableFailure('Rendezvous state transition no longer possible. Must re-enter.')",
    "docstring": "Helper method for the join phase.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\etcd_rendezvous.py",
    "ast_data": "FunctionDef name:wait_for_peers arg:self arg:expected_version arguments arg arg Assign Call While If BoolOp Compare Compare Return return:yes If BoolOp Compare Compare Assign Call Raise Call"
  },
  {
    "library": "kornia",
    "name": "HlsToRgb",
    "source_code": "class HlsToRgb(Module):\n    ONNX_DEFAULT_INPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n    ONNX_DEFAULT_OUTPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n\n    def forward(self, image: Tensor) -> Tensor:\n        return hls_to_rgb(image)",
    "docstring": "Convert an image from HLS to RGB. The image data is assumed to be in the range of (0, 1). Returns: RGB version of the image. Shape: - input: :math: - output: :math: Reference: Examples: >>> input = torch.rand(2, 3, 4, 5) >>> rgb = HlsToRgb() >>> output = rgb(input) # 2x3x4x5",
    "type": "class",
    "file_path": "kornia\\kornia\\color\\hls.py",
    "ast_data": "ClassDef name:HlsToRgb FunctionDef name:forward arg:self arg:image arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_interpolate_single_key",
    "source_code": "def _interpolate_single_key(self, return_key, tri_index, x, y):\n    raise NotImplementedError('TriInterpolator subclasses' + 'should implement _interpolate_single_key!')",
    "docstring": "Interpolate at points belonging to the triangulation (inside an unmasked triangles). Parameters ---------- return_key : {'z', 'dzdx', 'dzdy'} The requested values (z or its derivatives). tri_index : 1D int array Valid triangle index (cannot be -1). x, y : 1D arrays, same shape as Valid locations where interpolation is requested. Returns ------- 1-d array Returned array of the same size as *tri_index*",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triinterpolate.py",
    "ast_data": "FunctionDef name:_interpolate_single_key arg:self arg:return_key arg:tri_index arg:x arg:y arguments arg arg arg arg arg Raise Call"
  },
  {
    "library": "pandas",
    "name": "write_metadata",
    "source_code": "def write_metadata(self, key: str, values: np.ndarray) -> None:\n    self.parent.put(self._get_metadata_path(key), Series(values, copy=False), format='table', encoding=self.encoding, errors=self.errors, nan_rep=self.nan_rep)",
    "docstring": "Write out a metadata array to the key as a fixed-format Series. Parameters ---------- key : str values : ndarray",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:write_metadata arg:self arg:key arg:values arguments arg arg arg Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_alpha",
    "source_code": "def set_alpha(self, alpha):\n    self.alpha = None if isinstance(alpha, np.ndarray) else alpha",
    "docstring": "Set the transparency between 0 (transparent) and 1 (opaque). If an array is provided, *alpha* will be set to None to use the transparency values associated with the colormap.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colorbar.py",
    "ast_data": "FunctionDef name:set_alpha arg:self arg:alpha arguments arg arg Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, node_def, op, message, *args):\n    super(DataLossError, self).__init__(node_def, op, message, DATA_LOSS, *args)",
    "docstring": "Creates a .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:node_def arg:op arg:message arguments arg arg arg arg arg Call Call"
  },
  {
    "library": "numpy",
    "name": "_nanquantile_ureduce_func",
    "source_code": "def _nanquantile_ureduce_func(a: np.array, q: np.array, weights: np.array, axis: int | None=None, out=None, overwrite_input: bool=False, method='linear'):\n    if axis is None or a.ndim == 1:\n        part = a.ravel()\n        wgt = None if weights is None else weights.ravel()\n        result = _nanquantile_1d(part, q, overwrite_input, method, weights=wgt)\n    elif weights is None:\n        result = np.apply_along_axis(_nanquantile_1d, axis, a, q, overwrite_input, method, weights)\n        if q.ndim != 0:\n            from_ax = [axis + i for i in range(q.ndim)]\n            result = np.moveaxis(result, from_ax, list(range(q.ndim)))\n    else:\n        a = np.moveaxis(a, axis, -1)\n        if weights is not None:\n            weights = np.moveaxis(weights, axis, -1)\n        if out is not None:\n            result = out\n        else:\n            result = np.empty_like(a, shape=q.shape + a.shape[:-1])\n        for ii in np.ndindex(a.shape[:-1]):\n            result[(...,) + ii] = _nanquantile_1d(a[ii], q, weights=weights[ii], overwrite_input=overwrite_input, method=method)\n        return result\n    if out is not None:\n        out[...] = result\n    return result",
    "docstring": "Private function that doesn't support extended axis or keepdims. These methods are extended to this function using _ureduce See nanpercentile for parameter usage",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_nanfunctions_impl.py",
    "ast_data": "FunctionDef name:_nanquantile_ureduce_func arg:a arg:q arg:weights arg:axis arg:out arg:overwrite_input arg:method arguments arg arg arg arg arg arg arg If BoolOp Compare Compare Assign Call Assign Compare Call Assign Call If Compare Assign Call If Compare Assign Call Assign Call Call Call Assign Call If Compare Assign Call If Compare Assign Assign Call For Call Assign Call Return return:yes If Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "check_valid",
    "source_code": "def check_valid(spec):\n    DeviceSpec.from_string(spec)",
    "docstring": "Check that a device spec is valid. Args: spec: a string. Raises: An exception if the spec is invalid.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\device.py",
    "ast_data": "FunctionDef name:check_valid arg:spec arguments arg Call"
  },
  {
    "library": "authlib",
    "name": "acquire_token",
    "source_code": "def acquire_token(self, request, scopes=None, **kwargs):\n    req = DjangoJsonRequest(request)\n    kwargs['scopes'] = scopes\n    for claim in kwargs:\n        if isinstance(kwargs[claim], str):\n            kwargs[claim] = [kwargs[claim]]\n    token = self.validate_request(request=req, **kwargs)\n    token_authenticated.send(sender=self.__class__, token=token)\n    return token",
    "docstring": "A method to acquire current valid token with the given scope. :param request: Django HTTP request instance :param scopes: a list of scope values :return: token object",
    "type": "method",
    "file_path": "authlib\\authlib\\integrations\\django_oauth2\\resource_protector.py",
    "ast_data": "FunctionDef name:acquire_token arg:self arg:request arg:scopes arguments arg arg arg arg Assign Call Assign For If Call Assign Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "dual_level",
    "source_code": "class dual_level(_DecoratorContextManager):\n\n    def __enter__(self):\n        return enter_dual_level()\n\n    def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:\n        exit_dual_level()",
    "docstring": "Context-manager for forward AD, where all forward AD computation must occur within the `torch.func.jvpforward-mode AD tutorial `__ for detailed steps on how to use this API.",
    "type": "class",
    "file_path": "pytorch\\torch\\autograd\\forward_ad.py",
    "ast_data": "ClassDef name:dual_level FunctionDef name:__enter__ arg:self arguments arg Return return:yes Call FunctionDef name:__exit__ arg:self arg:exc_type arg:exc_value arg:traceback arguments arg arg arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "structured_input_signature",
    "source_code": "@property\ndef structured_input_signature(self):\n    return self._func_graph.structured_input_signature",
    "docstring": "Returns structured signature for this concrete function. Returns: A tuple , where: * is a tuple that specifies the expected type or value each for positional argument. * is a dictionary that specifies the expected type or value for each keyword-only argument. The type or value for each argument is specified using one of the following: * A , indicating that a Tensor or other TensorFlow-native value is expected. * A Python value, such as an integer, indicating that an equal value is expected. * A nested structure of s and Python values, indicating that a corresponding nested structure is expected.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py",
    "ast_data": "FunctionDef name:structured_input_signature arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "flush",
    "source_code": "def flush(self):\n    self._warn_if_event_writer_is_closed()\n    self.event_writer.flush()",
    "docstring": "Flushes the event file to disk. Call this method to make sure that all pending events have been written to disk.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\writer.py",
    "ast_data": "FunctionDef name:flush arg:self arguments arg Call Call"
  },
  {
    "library": "django",
    "name": "add_immediate_loading",
    "source_code": "def add_immediate_loading(self, field_names):\n    existing, defer = self.deferred_loading\n    field_names = set(field_names)\n    if 'pk' in field_names:\n        field_names.remove('pk')\n        field_names.add(self.get_meta().pk.name)\n    if defer:\n        self.deferred_loading = (field_names.difference(existing), False)\n    else:\n        self.deferred_loading = (frozenset(field_names), False)",
    "docstring": "Add the given list of model field names to the set of fields to retrieve when the SQL is executed (\"immediate loading\" fields). The field names replace any existing immediate loading field names. If there are field names already specified for deferred loading, remove those names from the new field_names before storing the new names for immediate loading. (That is, immediate loading overrides any existing immediate values, but respects existing deferrals.)",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\query.py",
    "ast_data": "FunctionDef name:add_immediate_loading arg:self arg:field_names arguments arg arg Assign Assign Call If Compare Call Call Call If Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "batch_shape",
    "source_code": "@property\ndef batch_shape(self):\n    return self.shape[:-2]",
    "docstring": "of batch dimensions of this . If this operator acts like the batch matrix with , then this returns , equivalent to Returns: , statically determined, may be undefined.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "FunctionDef name:batch_shape arg:self arguments arg Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "encode_public",
    "source_code": "def encode_public(self, public_key: dsa.DSAPublicKey, f_pub: _FragList) -> None:\n    public_numbers = public_key.public_numbers()\n    parameter_numbers = public_numbers.parameter_numbers\n    self._validate(public_numbers)\n    f_pub.put_mpint(parameter_numbers.p)\n    f_pub.put_mpint(parameter_numbers.q)\n    f_pub.put_mpint(parameter_numbers.g)\n    f_pub.put_mpint(public_numbers.y)",
    "docstring": "Write DSA public key",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\serialization\\ssh.py",
    "ast_data": "FunctionDef name:encode_public arg:self arg:public_key arg:f_pub arguments arg arg arg Assign Call Assign Call Call Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "EfficiencyWarning",
    "source_code": "class EfficiencyWarning(UserWarning):\n    pass",
    "docstring": "Warning used to notify the user of inefficient computation. This warning notifies the user that the efficiency may not be optimal due to some reason which may be included as a part of the warning message. This may be subclassed into a more specific Warning class. .. versionadded:: 0.18",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\exceptions.py",
    "ast_data": "ClassDef name:EfficiencyWarning"
  },
  {
    "library": "matplotlib",
    "name": "TransformedPatchPath",
    "source_code": "class TransformedPatchPath(TransformedPath):\n\n    def __init__(self, patch):\n        super().__init__(patch.get_path(), patch.get_transform())\n        self._patch = patch\n\n    def _revalidate(self):\n        patch_path = self._patch.get_path()\n        if patch_path != self._path:\n            self._path = patch_path\n            self._transformed_path = None\n        super()._revalidate()",
    "docstring": "A caches a non-affine transformed copy of the . This cached copy is automatically updated when the non-affine part of the transform or the patch changes.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "ClassDef name:TransformedPatchPath FunctionDef name:__init__ arg:self arg:patch arguments arg arg Call Call Call Call Assign FunctionDef name:_revalidate arg:self arguments arg Assign Call If Compare Assign Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "concat",
    "source_code": "def concat(tensors, axis=0):\n    if isinstance(tensors[0], sparse_tensor.SparseTensor):\n        return sparse_ops.sparse_concat_v2(axis=axis, sp_inputs=tensors)\n    elif _is_scalar(tensors[0]):\n        return array_ops_stack.stack(tensors, axis=axis)\n    else:\n        return array_ops.concat(tensors, axis=axis)",
    "docstring": "Concats s along .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py",
    "ast_data": "FunctionDef name:concat arg:tensors arg:axis arguments arg arg If Call Return return:yes Call If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "config_cc",
    "source_code": "class config_cc(Command):\n    description = 'specify C/C++ compiler information'\n    user_options = [('compiler=', None, 'specify C/C++ compiler type')]\n\n    def initialize_options(self):\n        self.compiler = None\n\n    def finalize_options(self):\n        log.info('unifying config_cc, config, build_clib, build_ext, build commands --compiler options')\n        build_clib = self.get_finalized_command('build_clib')\n        build_ext = self.get_finalized_command('build_ext')\n        config = self.get_finalized_command('config')\n        build = self.get_finalized_command('build')\n        cmd_list = [self, config, build_clib, build_ext, build]\n        for a in ['compiler']:\n            l = []\n            for c in cmd_list:\n                v = getattr(c, a)\n                if v is not None:\n                    if not isinstance(v, str):\n                        v = v.compiler_type\n                    if v not in l:\n                        l.append(v)\n            if not l:\n                v1 = None\n            else:\n                v1 = l[0]\n            if len(l) > 1:\n                log.warn('  commands have different --%s options: %s, using first in list as default' % (a, l))\n            if v1:\n                for c in cmd_list:\n                    if getattr(c, a) is None:\n                        setattr(c, a, v1)\n        return\n\n    def run(self):\n        return",
    "docstring": "Distutils command to hold user specified options to C/C++ compilers.",
    "type": "class",
    "file_path": "numpy\\numpy\\distutils\\command\\config_compiler.py",
    "ast_data": "ClassDef name:config_cc Assign Assign FunctionDef name:initialize_options arg:self arguments arg Assign FunctionDef name:finalize_options arg:self arguments arg Call Assign Call Assign Call Assign Call Assign Call Assign For Assign For Assign Call If Compare If Call Assign If Compare Call If Assign Assign If Compare Call Call If For If Compare Call Call Return return:no FunctionDef name:run arg:self arguments arg Return return:no"
  },
  {
    "library": "pandas",
    "name": "col_count",
    "source_code": "@property\ndef col_count(self) -> int:\n    return self.info.col_count",
    "docstring": "Number of columns to be summarized.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:col_count arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "allows_duplicate_labels",
    "source_code": "@property\ndef allows_duplicate_labels(self) -> bool:\n    return self._allows_duplicate_labels",
    "docstring": "Whether this object allows duplicate labels. Setting `duplicates` for more. See Also -------- DataFrame.attrs : Set global metadata on this object. DataFrame.set_flags : Set global flags on this object. Examples -------- >>> df = pd.DataFrame({\"A\": [1, 2]}, index=[\"a\", \"a\"]) >>> df.flags.allows_duplicate_labels True >>> df.flags.allows_duplicate_labels = False Traceback (most recent call last): ... pandas.errors.DuplicateLabelError: Index has duplicates. positions label a [0, 1]",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\flags.py",
    "ast_data": "FunctionDef name:allows_duplicate_labels arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "random_structured",
    "source_code": "def random_structured(module, name, amount, dim):\n    RandomStructured.apply(module, name, amount, dim)\n    return module",
    "docstring": "Prune tensor by removing random channels along the specified dimension. Prunes tensor corresponding to parameter called ``, it represents the absolute number of parameters to prune. dim (int): index of the dim along which we define channels to prune. Returns: module (nn.Module): modified (i.e. pruned) version of the input module Examples: >>> # xdoctest: +SKIP >>> m = prune.random_structured( ... nn.Linear(5, 3), 'weight', amount=3, dim=1 ... ) >>> columns_pruned = int(sum(torch.sum(m.weight, dim=0) == 0)) >>> print(columns_pruned) 3",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\utils\\prune.py",
    "ast_data": "FunctionDef name:random_structured arg:module arg:name arg:amount arg:dim arguments arg arg arg arg Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_infer_parameters",
    "source_code": "def _infer_parameters(self: _LazyProtocol, module, args, kwargs=None):\n    kwargs = kwargs if kwargs else {}\n    module.initialize_parameters(*args, **kwargs)\n    if module.has_uninitialized_params():\n        raise RuntimeError(f'module {self._get_name()} has not been fully initialized')\n    module._initialize_hook.remove()\n    module._load_hook.remove()\n    delattr(module, '_initialize_hook')\n    delattr(module, '_load_hook')\n    if module.cls_to_become is not None:\n        module.__class__ = module.cls_to_become",
    "docstring": "Infers the size and initializes the parameters according to the provided input batch. Given a module that contains parameters that were declared inferrable using :class:, runs a forward pass in the complete module using the provided input to initialize all the parameters as needed. The module is set into evaluation mode before running the forward pass in order to avoid saving statistics or calculating gradients",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\lazy.py",
    "ast_data": "FunctionDef name:_infer_parameters arg:self arg:module arg:args arg:kwargs arguments arg arg arg arg Assign Call If Call Raise Call Call Call Call Call Call If Compare Assign"
  },
  {
    "library": "pytorch",
    "name": "fp16_compress_hook",
    "source_code": "def fp16_compress_hook(process_group: dist.ProcessGroup, bucket: dist.GradBucket) -> torch.futures.Future[torch.Tensor]:\n    return _compress_hook(torch.float16, process_group, bucket)",
    "docstring": "Compress by casting ``). Example:: >>> # xdoctest: +SKIP >>> ddp_model.register_comm_hook(process_group, fp16_compress_hook)",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\ddp_comm_hooks\\default_hooks.py",
    "ast_data": "FunctionDef name:fp16_compress_hook arg:process_group arg:bucket arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "parse_args",
    "source_code": "def parse_args(self):\n    arg_parser = argparse.ArgumentParser(description='Parse the arguments for the TensorFlow build environment  setter')\n    arg_parser.add_argument('--disable-mkl', dest='disable_mkl', help='Turn off MKL. By default the compiler flag --config=mkl is enabled.', action='store_true')\n    arg_parser.add_argument('--disable-v2', dest='disable_v2', help='Build TensorFlow v1 rather than v2. By default the  compiler flag --config=v2 is enabled.', action='store_true')\n    arg_parser.add_argument('--enable-bfloat16', dest='enable_bfloat16', help='Enable bfloat16 build. By default it is  disabled if no parameter is passed.', action='store_true')\n    arg_parser.add_argument('--enable-dnnl1', dest='enable_dnnl1', help='Enable dnnl1 build. By default it is  disabled if no parameter is passed.', action='store_true')\n    arg_parser.add_argument('-s', '--secure-build', dest='secure_build', help='Enable secure build flags.', action='store_true')\n    arg_parser.add_argument('-p', '--platform', choices=self.PLATFORMS_.keys(), help='The target platform.', dest='target_platform', default=self.default_platform_)\n    arg_parser.add_argument('-f', '--bazelrc-file', dest='bazelrc_file', help='The full path to the bazelrc file into which the build command will be written. The path will be relative to the container  environment.', required=True)\n    self.args = arg_parser.parse_args()",
    "docstring": "Set up argument parser, and parse CLI args.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\tools\\ci_build\\linux\\mkl\\set-build-env.py",
    "ast_data": "FunctionDef name:parse_args arg:self arguments arg Assign Call Call Call Call Call Call Call Call Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "tighten",
    "source_code": "def tighten(self, other) -> ValueRanges:\n    return self & other",
    "docstring": "Given two ValueRanges, returns their intersection",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\_sympy\\value_ranges.py",
    "ast_data": "FunctionDef name:tighten arg:self arg:other arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "deserialize_sparse_tensors",
    "source_code": "def deserialize_sparse_tensors(tensors, types, shapes, classes):\n    ret = nest.pack_sequence_as(types, [sparse_ops.deserialize_sparse(tensor, dtype=ty, rank=shape.ndims) if c is sparse_tensor.SparseTensor else tensor for tensor, ty, shape, c in zip(nest.flatten(tensors), nest.flatten(types), nest.flatten(shapes), nest.flatten(classes))])\n    return ret",
    "docstring": "Deserializes sparse tensors. Args: tensors: a structure of tensors to deserialize. types: a structure that holds information about types of shapes: a structure that holds information about shapes of classes: a structure of objects that identify the dataset item classes Returns: with any serialized sparse tensors replaced by their deserialized version.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\util\\sparse.py",
    "ast_data": "FunctionDef name:deserialize_sparse_tensors arg:tensors arg:types arg:shapes arg:classes arguments arg arg arg arg Assign Call Compare Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_sharding_strategy",
    "source_code": "def _get_sharding_strategy(handle):\n    return handle._sharding_strategy if handle else None",
    "docstring": "Returns the sharding strategy of the handle.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_common_utils.py",
    "ast_data": "FunctionDef name:_get_sharding_strategy arg:handle arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "sparse_batch",
    "source_code": "def sparse_batch(self, batch_size, row_shape, name=None) -> 'DatasetV2':\n    from tensorflow.python.data.ops import sparse_batch_op\n    return sparse_batch_op._sparse_batch(self, batch_size, row_shape, name)",
    "docstring": "Combines consecutive elements into s. Like , this transformation combines multiple consecutive elements of the dataset, which might have different shapes, into a single element. The resulting element has three components (, , and ), which comprise a that represents the same data. The represents the dense shape of each row in the resulting , to which the effective batch size is prepended. For example: Args: batch_size: A scalar , representing the number of consecutive elements of this dataset to combine in a single batch. row_shape: A or vector tensor-like object representing the equivalent dense shape of a row in the resulting . Each element of this dataset must have the same rank as , and must have size less than or equal to in each dimension. name: (Optional.) A string indicating a name for the operation. Returns: A new with the transformation applied as described above.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:sparse_batch arg:self arg:batch_size arg:row_shape arg:name arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "all_newer",
    "source_code": "def all_newer(dst_files, src_files):\n    return all((os.path.exists(dst) and newer(dst, src) for dst in dst_files for src in src_files))",
    "docstring": "True only if all dst_files exist and are newer than all src_files.",
    "type": "function",
    "file_path": "scipy\\scipy\\_build_utils\\_wrappers_common.py",
    "ast_data": "FunctionDef name:all_newer arg:dst_files arg:src_files arguments arg arg Return return:yes Call BoolOp Call Call"
  },
  {
    "library": "tensorflow",
    "name": "main",
    "source_code": "def main(_):\n    code.interact()\n    return 0",
    "docstring": "Run an interactive console.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\pip_package\\simple_console.py",
    "ast_data": "FunctionDef name:main arg:_ arguments arg Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "compiled_fx_graph_hash",
    "source_code": "def compiled_fx_graph_hash(gm: torch.fx.GraphModule, example_inputs: Sequence[InputType], fx_kwargs: _CompileFxKwargs, inputs_to_check: Sequence[int]) -> tuple[str, list[str]]:\n    details = FxGraphHashDetails(gm, example_inputs, fx_kwargs, inputs_to_check)\n    has_user_defined_triton_kernels = len(details.user_defined_triton_source) != 0\n    pickler = FxGraphCachePickler(gm, has_user_defined_triton_kernels)\n    key = 'f' + pickler.get_hash(details)\n    debug_lines = pickler.debug_lines(details)\n    debug_str = '\\n'.join(debug_lines)\n    log.debug(f'FX graph cache hash details for key {key}:\\n{debug_str}')\n    return (key, debug_lines)",
    "docstring": "Generate a unique hash of the FX graph for caching.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\codecache.py",
    "ast_data": "FunctionDef name:compiled_fx_graph_hash arg:gm arg:example_inputs arg:fx_kwargs arg:inputs_to_check arguments arg arg arg arg Assign Call Assign Compare Call Assign Call Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "parse_accept_lang_header",
    "source_code": "def parse_accept_lang_header(lang_string):\n    if len(lang_string) <= LANGUAGE_CODE_MAX_LENGTH:\n        return _parse_accept_lang_header(lang_string)\n    if (index := lang_string.rfind(',', 0, LANGUAGE_CODE_MAX_LENGTH)) > 0:\n        return _parse_accept_lang_header(lang_string[:index])\n    return ()",
    "docstring": "Parse the value of the Accept-Language header up to a maximum length. The value of the header is truncated to a maximum length to avoid potential denial of service and memory exhaustion attacks. Excessive memory could be used if the raw value is very large as it would be cached due to the use of functools.lru_cache() to avoid repetitive parsing of common header values.",
    "type": "function",
    "file_path": "django\\django\\utils\\translation\\trans_real.py",
    "ast_data": "FunctionDef name:parse_accept_lang_header arg:lang_string arguments arg If Compare Call Return return:yes Call If Compare Call Return return:yes Call Return return:no"
  },
  {
    "library": "scipy",
    "name": "ConstantDenseOutput",
    "source_code": "class ConstantDenseOutput(DenseOutput):\n\n    def __init__(self, t_old, t, value):\n        super().__init__(t_old, t)\n        self.value = value\n\n    def _call_impl(self, t):\n        if t.ndim == 0:\n            return self.value\n        else:\n            ret = np.empty((self.value.shape[0], t.shape[0]))\n            ret[:] = self.value[:, None]\n            return ret",
    "docstring": "Constant value interpolator. This class used for degenerate integration cases: equal integration limits or a system with 0 equations.",
    "type": "class",
    "file_path": "scipy\\scipy\\integrate\\_ivp\\base.py",
    "ast_data": "ClassDef name:ConstantDenseOutput FunctionDef name:__init__ arg:self arg:t_old arg:t arg:value arguments arg arg arg arg Call Call Assign FunctionDef name:_call_impl arg:self arg:t arguments arg arg If Compare Return return:yes Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "set_default",
    "source_code": "@contextlib.contextmanager\ndef set_default(ctx):\n    try:\n        _default_ctx_stack.push(ctx)\n        yield\n    finally:\n        _default_ctx_stack.pop()",
    "docstring": "Returns a contextmanager with as the default execution context.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\experimental\\context_stack.py",
    "ast_data": "FunctionDef name:set_default arg:ctx arguments arg Try Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_MatMulGradAgainstSecondOnly",
    "source_code": "def _MatMulGradAgainstSecondOnly(op: ops.Operation, grad):\n    t_a = op.get_attr('transpose_a')\n    t_b = op.get_attr('transpose_b')\n    a = math_ops.conj(op.inputs[0])\n    if not t_a and (not t_b):\n        grad_b = gen_math_ops.mat_mul(a, grad, transpose_a=True, grad_b=True)\n    elif not t_a and t_b:\n        grad_b = gen_math_ops.mat_mul(grad, a, transpose_a=True, grad_b=True)\n    elif t_a and (not t_b):\n        grad_b = gen_math_ops.mat_mul(a, grad, grad_b=True)\n    elif t_a and t_b:\n        grad_b = gen_math_ops.mat_mul(grad, a, transpose_a=True, transpose_b=True, grad_b=True)\n    return (None, grad_b)",
    "docstring": "Gradient for MatMul, only for the second input.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_MatMulGradAgainstSecondOnly arg:op arg:grad arguments arg arg Assign Call Assign Call Assign Call If BoolOp Assign Call If BoolOp Assign Call If BoolOp Assign Call If BoolOp Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_reference_quantized_add_relu",
    "source_code": "def _reference_quantized_add_relu(x_i8, x_scale, x_zero_point, y_i8, y_scale, y_zero_point, out_scale, out_zero_point, quant_min, quant_max):\n    x_i32 = x_i8.to(torch.int32)\n    y_i32 = y_i8.to(torch.int32)\n    x_i32 = out_dtype(torch.ops.aten.mul.Tensor, torch.int32, x_i32 - x_zero_point, x_scale / out_scale)\n    y_i32 = out_dtype(torch.ops.aten.mul.Tensor, torch.int32, y_i32 - y_zero_point, y_scale / out_scale)\n    out_i32 = x_i32 + y_i32 + out_zero_point\n    out_i8 = torch.ops.aten.clamp(out_i32, out_zero_point, quant_max).to(torch.int8)\n    return out_i8",
    "docstring": "See comments for for more information on how to derive the formula for out_i8 based on x_i8 and y_i8",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\representation\\rewrite.py",
    "ast_data": "FunctionDef name:_reference_quantized_add_relu arg:x_i8 arg:x_scale arg:x_zero_point arg:y_i8 arg:y_scale arg:y_zero_point arg:out_scale arg:out_zero_point arg:quant_min arg:quant_max arguments arg arg arg arg arg arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "apply",
    "source_code": "@doc_controls.do_not_doc_inheritable\ndef apply(self, inputs, *args, **kwargs):\n    warnings.warn('`layer.apply` is deprecated and will be removed in a future version. Please use `layer.__call__` method instead.')\n    return self.__call__(inputs, *args, **kwargs)",
    "docstring": "Deprecated, do NOT use! This is an alias of . Args: inputs: Input tensor(s). *args: additional positional arguments to be passed to . **kwargs: additional keyword arguments to be passed to . Returns: Output tensor(s).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py",
    "ast_data": "FunctionDef name:apply arg:self arg:inputs arguments arg arg arg arg Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "TabularColumns",
    "source_code": "class TabularColumns(SphinxDirective):\n    has_content = False\n    required_arguments = 1\n    optional_arguments = 0\n    final_argument_whitespace = True\n    option_spec: ClassVar[OptionSpec] = {}\n\n    def run(self) -> list[Node]:\n        node = addnodes.tabular_col_spec()\n        node['spec'] = self.arguments[0]\n        self.set_source_info(node)\n        return [node]",
    "docstring": "Directive to give an explicit tabulary column definition to LaTeX.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\directives\\other.py",
    "ast_data": "ClassDef name:TabularColumns Assign Assign Assign Assign FunctionDef name:run arg:self arguments arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_default_broadcast_coalesced",
    "source_code": "def _default_broadcast_coalesced(self, bufs=None, bucket_size=None, authoritative_rank=0):\n    if bufs is None:\n        bufs = self.modules_buffers\n    if bucket_size is None:\n        bucket_size = self.broadcast_bucket_size\n    self._distributed_broadcast_coalesced(bufs, bucket_size, authoritative_rank)",
    "docstring": "Broadcasts buffers from rank 0 to rest of workers. If bufs, bucket_size are None, default values self.modules_buffers and self.broadcast_bucket_size are used instead.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\parallel\\distributed.py",
    "ast_data": "FunctionDef name:_default_broadcast_coalesced arg:self arg:bufs arg:bucket_size arg:authoritative_rank arguments arg arg arg arg If Compare Assign If Compare Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "reveal_undocumented",
    "source_code": "def reveal_undocumented(symbol_name, target_module=None):\n    if symbol_name not in _HIDDEN_ATTRIBUTES:\n        raise LookupError('Symbol %s is not a hidden symbol' % symbol_name)\n    symbol_basename = symbol_name.split('.')[-1]\n    original_module, attr_value = _HIDDEN_ATTRIBUTES[symbol_name]\n    if not target_module:\n        target_module = original_module\n    setattr(target_module, symbol_basename, attr_value)",
    "docstring": "Reveals a symbol that was previously removed by . This should be used by tensorflow internal tests only. It explicitly defeats the encapsulation afforded by . It throws an exception when the symbol was not hidden in the first place. Args: symbol_name: a string representing the full absolute path of the symbol. target_module: if specified, the module in which to restore the symbol.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\all_util.py",
    "ast_data": "FunctionDef name:reveal_undocumented arg:symbol_name arg:target_module arguments arg arg If Compare Raise Call Assign Call Assign If Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "__call__",
    "source_code": "def __call__(self, shape, dtype=None, **kwargs):\n    del kwargs\n    return constant_op.constant(self.value, dtype=_get_dtype(dtype), shape=shape)",
    "docstring": "Returns a tensor object initialized to . Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. If not specified, is used, which default to unless you configured it otherwise (via ). **kwargs: Additional keyword arguments.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\initializers\\initializers_v2.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:shape arg:dtype arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "experimental_from_proto",
    "source_code": "@classmethod\ndef experimental_from_proto(cls, proto: types_pb2.SerializedDType) -> 'DType':\n    return DType(proto.datatype)",
    "docstring": "Returns a Dtype instance based on the serialized proto.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\dtypes.py",
    "ast_data": "FunctionDef name:experimental_from_proto arg:cls arg:proto arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "EllipseSelector",
    "source_code": "@_docstring.Substitution(_RECTANGLESELECTOR_PARAMETERS_DOCSTRING.replace('__ARTIST_NAME__', 'ellipse'))\nclass EllipseSelector(RectangleSelector):\n\n    def _init_shape(self, **props):\n        return Ellipse((0, 0), 0, 1, visible=False, **props)\n\n    def _draw_shape(self, extents):\n        x0, x1, y0, y1 = extents\n        xmin, xmax = sorted([x0, x1])\n        ymin, ymax = sorted([y0, y1])\n        center = [x0 + (x1 - x0) / 2.0, y0 + (y1 - y0) / 2.0]\n        a = (xmax - xmin) / 2.0\n        b = (ymax - ymin) / 2.0\n        self._selection_artist.center = center\n        self._selection_artist.width = 2 * a\n        self._selection_artist.height = 2 * b\n        self._selection_artist.angle = self.rotation\n\n    @property\n    def _rect_bbox(self):\n        x, y = self._selection_artist.center\n        width = self._selection_artist.width\n        height = self._selection_artist.height\n        return (x - width / 2.0, y - height / 2.0, width, height)",
    "docstring": "Select an elliptical region of an Axes. For the cursor to remain responsive you must keep a reference to it. Press and release events triggered at the same coordinates outside the selection will clear the selector, except when `/gallery/widgets/rectangle_selector`",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "ClassDef name:EllipseSelector FunctionDef name:_init_shape arg:self arguments arg arg Return return:yes Call FunctionDef name:_draw_shape arg:self arg:extents arguments arg arg Assign Assign Call Assign Call Assign Assign Assign Assign Assign Assign Assign FunctionDef name:_rect_bbox arg:self arguments arg Assign Assign Assign Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "TimerClient",
    "source_code": "class TimerClient(abc.ABC):\n\n    @abc.abstractmethod\n    def acquire(self, scope_id: str, expiration_time: float) -> None:\n        pass\n\n    @abc.abstractmethod\n    def release(self, scope_id: str):\n        pass",
    "docstring": "Client library to acquire and release countdown timers by communicating with the TimerServer.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\elastic\\timer\\api.py",
    "ast_data": "ClassDef name:TimerClient FunctionDef name:acquire arg:self arg:scope_id arg:expiration_time arguments arg arg arg FunctionDef name:release arg:self arg:scope_id arguments arg arg"
  },
  {
    "library": "scipy",
    "name": "hstack",
    "source_code": "def hstack(blocks, format=None, dtype=None):\n    blocks = np.asarray(blocks, dtype='object')\n    if any((isinstance(b, sparray) for b in blocks.flat)):\n        return _block([blocks], format, dtype)\n    else:\n        return _block([blocks], format, dtype, return_spmatrix=True)",
    "docstring": "Stack sparse matrices horizontally (column wise) Parameters ---------- blocks sequence of sparse matrices with compatible shapes format : str sparse format of the result (e.g., \"csr\") by default an appropriate sparse matrix format is returned. This choice is subject to change. dtype : dtype, optional The data-type of the output matrix. If not given, the dtype is determined from that of . Returns ------- new_array : sparse matrix or array If any block in blocks is a sparse array, return a sparse array. Otherwise return a sparse matrix. If you want a sparse array built from blocks that are not sparse arrays, use ``. See Also -------- vstack : stack sparse matrices vertically (row wise) Examples -------- >>> from scipy.sparse import coo_matrix, hstack >>> A = coo_matrix([[1, 2], [3, 4]]) >>> B = coo_matrix([[5], [6]]) >>> hstack([A,B]).toarray() array([[1, 2, 5], [3, 4, 6]])",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\_construct.py",
    "ast_data": "FunctionDef name:hstack arg:blocks arg:format arg:dtype arguments arg arg arg Assign Call If Call Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "transform_path",
    "source_code": "def transform_path(self, path):\n    return self.transform_path_affine(self.transform_path_non_affine(path))",
    "docstring": "Apply the transform to *path*, returning a new . In some cases, this transform may insert curves into the path that began as line segments.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:transform_path arg:self arg:path arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "from_config",
    "source_code": "@classmethod\ndef from_config(cls, config, custom_objects=None):\n    if 'lr' in config:\n        config['learning_rate'] = config.pop('lr')\n    if 'learning_rate' in config:\n        if isinstance(config['learning_rate'], dict):\n            config['learning_rate'] = learning_rate_schedule.deserialize(config['learning_rate'], custom_objects=custom_objects)\n    return cls(**config)",
    "docstring": "Creates an optimizer from its config. This method is the reverse of , capable of instantiating the same optimizer from the config dictionary. Args: config: A Python dictionary, typically the output of get_config. custom_objects: A Python dictionary mapping names to additional Python objects used to create this optimizer, such as a function used for a hyperparameter. Returns: An optimizer instance.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py",
    "ast_data": "FunctionDef name:from_config arg:cls arg:config arg:custom_objects arguments arg arg arg If Compare Assign Call If Compare If Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, start, end, flattened, dims_to_flatten):\n    assert isinstance(dims_to_flatten, list)\n    assert isinstance(flattened, TVar)\n    assert isinstance(start, int)\n    assert isinstance(end, int)\n    self.start = start\n    self.end = end\n    self.dims_to_flatten = dims_to_flatten\n    self.flattened = flattened",
    "docstring": ":param start: start index :param end: end index :param flattened: variable to store the product :param dims_to_flatten: the type which we will flatten",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:start arg:end arg:flattened arg:dims_to_flatten arguments arg arg arg arg arg Call Call Call Call Assign Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "is_from_package",
    "source_code": "def is_from_package(obj: Any) -> bool:\n    if type(obj) == ModuleType:\n        return is_mangled(obj.__name__)\n    else:\n        return is_mangled(type(obj).__module__)",
    "docstring": "Return whether an object was loaded from a package. Note: packaged objects from externed modules will return ``.",
    "type": "function",
    "file_path": "pytorch\\torch\\package\\analyze\\is_from_package.py",
    "ast_data": "FunctionDef name:is_from_package arg:obj arguments arg If Compare Call Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "derive_from_graph",
    "source_code": "def derive_from_graph(func_graph):\n    input_signature = (tuple((trace_type.from_value(i) for i in func_graph.inputs)), {})\n    output_signature = tuple((trace_type.from_value(o) for o in func_graph.outputs))\n    return function_type_lib.from_structured_signature(input_signature, output_signature, func_graph.function_captures.capture_types)",
    "docstring": "Derives a FunctionType from FuncGraph.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\function_type_utils.py",
    "ast_data": "FunctionDef name:derive_from_graph arg:func_graph arguments arg Assign Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "rjust",
    "source_code": "@register.filter(is_safe=True)\n@stringfilter\ndef rjust(value, arg):\n    return value.rjust(int(arg))",
    "docstring": "Right-align the value in a field of a given width.",
    "type": "function",
    "file_path": "django\\django\\template\\defaultfilters.py",
    "ast_data": "FunctionDef name:rjust arg:value arg:arg arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_drawstyle",
    "source_code": "def get_drawstyle(self):\n    return self._drawstyle",
    "docstring": "Return the drawstyle. See also .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:get_drawstyle arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "DFSGraphTracer",
    "source_code": "class DFSGraphTracer(object):\n\n    def __init__(self, input_lists, skip_node_names=None, destination_node_name=None):\n        self._input_lists = input_lists\n        self._skip_node_names = skip_node_names\n        self._inputs = []\n        self._visited_nodes = []\n        self._depth_count = 0\n        self._depth_list = []\n        self._destination_node_name = destination_node_name\n\n    def trace(self, graph_element_name):\n        self._depth_count += 1\n        node_name = get_node_name(graph_element_name)\n        if node_name == self._destination_node_name:\n            raise GraphTracingReachedDestination()\n        if node_name in self._skip_node_names:\n            return\n        if node_name in self._visited_nodes:\n            return\n        self._visited_nodes.append(node_name)\n        for input_list in self._input_lists:\n            if node_name not in input_list:\n                continue\n            for inp in input_list[node_name]:\n                if get_node_name(inp) in self._visited_nodes:\n                    continue\n                self._inputs.append(inp)\n                self._depth_list.append(self._depth_count)\n                self.trace(inp)\n        self._depth_count -= 1\n\n    def inputs(self):\n        return self._inputs\n\n    def depth_list(self):\n        return self._depth_list",
    "docstring": "Graph input tracer using depth-first search.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_graphs.py",
    "ast_data": "ClassDef name:DFSGraphTracer FunctionDef name:__init__ arg:self arg:input_lists arg:skip_node_names arg:destination_node_name arguments arg arg arg arg Assign Assign Assign Assign Assign Assign Assign FunctionDef name:trace arg:self arg:graph_element_name arguments arg arg Assign Call If Compare Raise Call If Compare Return return:no If Compare Return return:no Call For If Compare For If Compare Call Call Call Call FunctionDef name:inputs arg:self arguments arg Return return:yes FunctionDef name:depth_list arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "build",
    "source_code": "def build(self):\n    if self._built:\n        return\n    self._variables = self._create_variables_and_slots()\n    self._track_restore_info_for_cpu()\n    self._built = True",
    "docstring": "Create variables and slots variables for TPU embeddings.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3.py",
    "ast_data": "FunctionDef name:build arg:self arguments arg If Return return:no Assign Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_var_scope_name",
    "source_code": "@property\ndef _var_scope_name(self):\n    return self.name",
    "docstring": "Returns string. Used for variable_scope. Defaults to self.name.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py",
    "ast_data": "FunctionDef name:_var_scope_name arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_add_ephemeral_timeout_for_all_pgs",
    "source_code": "def _add_ephemeral_timeout_for_all_pgs(timeout: timedelta) -> None:\n    for pg in _world.pg_map.keys():\n        devices = pg._device_types\n        if torch.device('cuda') in devices:\n            backend = pg._get_backend(torch.device('cuda'))\n            if is_nccl_available() and isinstance(backend, ProcessGroupNCCL):\n                backend._add_ephemeral_timeout(timeout)",
    "docstring": "This API adds an ephemeral timeout extension for all PGs locally on one rank. The timeout gets reset when the first collective issued after API called finished. NOTE: We only support to set timeout for cuda backends for now. NOTE: While this feature provides flexibility in specific scenarios, it introduces statefulness to timeout setting. Therefore, it is advisable to use this API sparingly and consider alternative approaches, such as directly setting the timeout or utilizing a barrier collective (one can set any timeout to the barrier), whenever feasible. Args: timeout (timedelta): The delta of timeout to extend. Returns: None.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:_add_ephemeral_timeout_for_all_pgs arg:timeout arguments arg For Call Assign If Compare Call Assign Call Call If BoolOp Call Call Call"
  },
  {
    "library": "pandas",
    "name": "StorageExtensionDtype",
    "source_code": "class StorageExtensionDtype(ExtensionDtype):\n    name: str\n    _metadata = ('storage',)\n\n    def __init__(self, storage: str | None=None) -> None:\n        self.storage = storage\n\n    def __repr__(self) -> str:\n        return f'{self.name}[{self.storage}]'\n\n    def __str__(self) -> str:\n        return self.name\n\n    def __eq__(self, other: object) -> bool:\n        if isinstance(other, str) and other == self.name:\n            return True\n        return super().__eq__(other)\n\n    def __hash__(self) -> int:\n        return super().__hash__()\n\n    @property\n    def na_value(self) -> libmissing.NAType:\n        return libmissing.NA",
    "docstring": "ExtensionDtype that may be backed by more than one implementation.",
    "type": "class",
    "file_path": "pandas\\pandas\\core\\dtypes\\base.py",
    "ast_data": "ClassDef name:StorageExtensionDtype Assign FunctionDef name:__init__ arg:self arg:storage arguments arg arg Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:__str__ arg:self arguments arg Return return:yes FunctionDef name:__eq__ arg:self arg:other arguments arg arg If BoolOp Call Compare Return return:yes Return return:yes Call Call FunctionDef name:__hash__ arg:self arguments arg Return return:yes Call Call FunctionDef name:na_value arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_PosDimTensorInfo",
    "source_code": "class _PosDimTensorInfo(NamedTuple):\n    shape: torch.Size\n    dtype: torch.dtype",
    "docstring": "Metadata for positive-dimension tensors used internally for :meth:. Attributes: shape (torch.Size): Sharded tensor shape (which is equal to the unsharded tensor shape if the tensor is optimizer state for a non-FSDP parameter and is hence not sharded). dtype (torch.dtype): Data type of the tensor.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_optim_utils.py",
    "ast_data": "ClassDef name:_PosDimTensorInfo"
  },
  {
    "library": "matplotlib",
    "name": "contains_points",
    "source_code": "def contains_points(self, points, radius=None):\n    radius = self._process_radius(radius)\n    return self.get_path().contains_points(points, self.get_transform(), radius)",
    "docstring": "Return whether the given points are inside the patch. Parameters ---------- points : (N, 2) array The points to check, in target coordinates of `.Patch.get_transform.Path.contains_pointNone.Artist.get_picker.Patch.contains_point`.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:contains_points arg:self arg:points arg:radius arguments arg arg arg Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "tanhm",
    "source_code": "@_apply_over_batch(('A', 2))\ndef tanhm(A):\n    A = _asarray_square(A)\n    return _maybe_real(A, solve(coshm(A), sinhm(A)))",
    "docstring": "Compute the hyperbolic matrix tangent. This routine uses expm to compute the matrix exponentials. Parameters ---------- A : (N, N) array_like Input array Returns ------- tanhm : (N, N) ndarray Hyperbolic matrix tangent of Examples -------- >>> import numpy as np >>> from scipy.linalg import tanhm, sinhm, coshm >>> a = np.array([[1.0, 3.0], [1.0, 4.0]]) >>> t = tanhm(a) >>> t array([[ 0.3428582 , 0.51987926], [ 0.17329309, 0.86273746]]) Verify tanhm(a) = sinhm(a).dot(inv(coshm(a))) >>> s = sinhm(a) >>> c = coshm(a) >>> t - s.dot(np.linalg.inv(c)) array([[ 2.72004641e-15, 4.55191440e-15], [ 0.00000000e+00, -5.55111512e-16]])",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_matfuncs.py",
    "ast_data": "FunctionDef name:tanhm arg:A arguments arg Assign Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_find_onnxscript_op",
    "source_code": "def _find_onnxscript_op(graph_proto, included_node_func: set[str], custom_opsets: Mapping[str, int], onnx_function_list: list):\n    for node in graph_proto.node:\n        node_kind = node.domain + '::' + node.op_type\n        for attr in node.attribute:\n            if attr.g is not None:\n                _find_onnxscript_op(attr.g, included_node_func, custom_opsets, onnx_function_list)\n        onnx_function_group = registration.registry.get_function_group(node_kind)\n        if node.domain and (not jit_utils.is_aten(node.domain)) and (not jit_utils.is_prim(node.domain)) and (not jit_utils.is_onnx(node.domain)) and (onnx_function_group is not None) and (node_kind not in included_node_func):\n            specified_version = custom_opsets.get(node.domain, 1)\n            onnx_fn = onnx_function_group.get(specified_version)\n            if onnx_fn is not None:\n                if hasattr(onnx_fn, 'to_function_proto'):\n                    onnx_function_proto = onnx_fn.to_function_proto()\n                    onnx_function_list.append(onnx_function_proto)\n                    included_node_func.add(node_kind)\n                continue\n            raise errors.UnsupportedOperatorError(node_kind, specified_version, onnx_function_group.get_min_supported() if onnx_function_group else None)\n    return (onnx_function_list, included_node_func)",
    "docstring": "Recursively iterate ModelProto to find ONNXFunction op as it may contain control flow Op.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\onnx_proto_utils.py",
    "ast_data": "FunctionDef name:_find_onnxscript_op arg:graph_proto arg:included_node_func arg:custom_opsets arg:onnx_function_list arguments arg arg arg arg For Assign For If Compare Call Assign Call If BoolOp Call Call Call Compare Compare Assign Call Assign Call If Compare If Call Assign Call Call Call Raise Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "do_3d_projection",
    "source_code": "def do_3d_projection(self):\n    vs_list = [vs for vs, _ in self._3dverts_codes]\n    if self._axlim_clip:\n        vs_list = [np.ma.array(vs, mask=np.broadcast_to(_viewlim_mask(*vs.T, self.axes), vs.shape)) for vs in vs_list]\n    xyzs_list = [proj3d.proj_transform(*vs.T, self.axes.M) for vs in vs_list]\n    self._paths = [mpath.Path(np.ma.column_stack([xs, ys]), cs) for (xs, ys, _), (_, cs) in zip(xyzs_list, self._3dverts_codes)]\n    zs = np.concatenate([zs for _, _, zs in xyzs_list])\n    return zs.min() if len(zs) else 1000000000.0",
    "docstring": "Project the points according to renderer matrix.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py",
    "ast_data": "FunctionDef name:do_3d_projection arg:self arguments arg Assign If Assign Call Call Call Assign Call Assign Call Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "TPUEmbeddingVariable",
    "source_code": "class TPUEmbeddingVariable(sharded_variable.ShardedVariableMixin):\n\n    @property\n    def _in_graph_mode(self):\n        return self.variables[0]._in_graph_mode",
    "docstring": "A ShardedVariable class for TPU.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2.py",
    "ast_data": "ClassDef name:TPUEmbeddingVariable FunctionDef name:_in_graph_mode arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_add_trackable",
    "source_code": "@doc_controls.for_subclass_implementers\ndef _add_trackable(self, trackable_object, trainable):\n    if isinstance(trackable_object, base_layer_utils.TrackableWeightHandler):\n        handler = trackable_object\n    else:\n        handler = base_layer_utils.TrackableWeightHandler(trackable_object)\n    if trainable:\n        self._trainable_weights.append(handler)\n    else:\n        self._non_trainable_weights.append(handler)\n    return handler",
    "docstring": "Adds a Trackable object to this layer's state. Args: trackable_object: The tf.tracking.Trackable object to add. trainable: Boolean, whether the variable should be part of the layer's \"trainable_variables\" (e.g. variables, biases) or \"non_trainable_variables\" (e.g. BatchNorm mean and variance). Returns: The TrackableWeightHandler used to track this object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py",
    "ast_data": "FunctionDef name:_add_trackable arg:self arg:trackable_object arg:trainable arguments arg arg arg If Call Assign Assign Call If Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_celld",
    "source_code": "def get_celld(self):\n    return self._cells",
    "docstring": "Return a dict of cells in the table mapping *(row, column)* to \\s. Notes ----- You can also directly index into the Table object to access individual cells:: cell = table[row, col]",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\table.py",
    "ast_data": "FunctionDef name:get_celld arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "object_graph_key_mapping",
    "source_code": "def object_graph_key_mapping(checkpoint_path):\n    reader = py_checkpoint_reader.NewCheckpointReader(checkpoint_path)\n    object_graph_string = reader.get_tensor(trackable.OBJECT_GRAPH_PROTO_KEY)\n    object_graph_proto = trackable_object_graph_pb2.TrackableObjectGraph()\n    object_graph_proto.ParseFromString(object_graph_string)\n    names_to_keys = {}\n    for node in object_graph_proto.nodes:\n        for attribute in node.attributes:\n            names_to_keys[attribute.full_name] = attribute.checkpoint_key\n    return names_to_keys",
    "docstring": "Return name to key mappings from the checkpoint. Args: checkpoint_path: string, path to object-based checkpoint Returns: Dictionary mapping tensor names to checkpoint keys.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\saver.py",
    "ast_data": "FunctionDef name:object_graph_key_mapping arg:checkpoint_path arguments arg Assign Call Assign Call Assign Call Call Assign For For Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_moment_standardized",
    "source_code": "def _moment_standardized(self, order=1, *, method=None):\n    methods = self._moment_methods if method is None else {method}\n    return self._moment_standardized_dispatch(order, methods=methods, **self._parameters)",
    "docstring": "Standardized distribution moment.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distribution_infrastructure.py",
    "ast_data": "FunctionDef name:_moment_standardized arg:self arg:order arguments arg arg arg Assign Compare Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_kernel_matrix",
    "source_code": "def _kernel_matrix(x, kernel):\n    out = np.empty((x.shape[0], x.shape[0]), dtype=float)\n    kernel_func = NAME_TO_FUNC[kernel]\n    kernel_matrix(x, kernel_func, out)\n    return out",
    "docstring": "Return RBFs, with centers at , evaluated at .",
    "type": "function",
    "file_path": "scipy\\scipy\\interpolate\\_rbfinterp_pythran.py",
    "ast_data": "FunctionDef name:_kernel_matrix arg:x arg:kernel arguments arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_ProfilerStats",
    "source_code": "@dataclass\nclass _ProfilerStats:\n    profiling_window_duration_sec: float = 0\n    number_of_events: int = 0\n    profiler_prepare_call_duration_us: int = 0\n    profiler_enable_call_duration_us: int = 0\n    profiler_disable_call_duration_us: int = 0\n    parse_kineto_call_duration_us: int = 0\n    function_events_build_tree_call_duration_us: int = 0",
    "docstring": "Profiler timing and stats used by developers to catch issues/regressions",
    "type": "class",
    "file_path": "pytorch\\torch\\autograd\\profiler.py",
    "ast_data": "ClassDef name:_ProfilerStats"
  },
  {
    "library": "scikit-learn",
    "name": "_route_params",
    "source_code": "def _route_params(self, *, params, method, parent, caller):\n    return getattr(self, method)._route_params(params=params, parent=parent, caller=caller)",
    "docstring": "Prepare the given parameters to be passed to the method. The output of this method can be used directly as the input to the corresponding method as extra keyword arguments to pass metadata. Parameters ---------- params : dict A dictionary of provided metadata. method : str The name of the method for which the parameters are requested and routed. parent : object Parent class object, that routes the metadata. caller : str Method from the parent class object, where the metadata is routed from. Returns ------- params : Bunch A :class: of {prop: value} which can be given to the corresponding method.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py",
    "ast_data": "FunctionDef name:_route_params arg:self arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "initialize_resources",
    "source_code": "@tf_should_use.should_use_result\ndef initialize_resources(resource_list, name='init'):\n    if resource_list:\n        return control_flow_ops.group(*[r.create for r in resource_list], name=name)\n    return control_flow_ops.no_op(name=name)",
    "docstring": "Initializes the resources in the given list. Args: resource_list: list of resources to initialize. name: name of the initialization op. Returns: op responsible for initializing all resources.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resources.py",
    "ast_data": "FunctionDef name:initialize_resources arg:resource_list arg:name arguments arg arg If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_yeo_johnson_inverse_transform",
    "source_code": "def _yeo_johnson_inverse_transform(self, x, lmbda):\n    x_inv = np.zeros_like(x)\n    pos = x >= 0\n    if abs(lmbda) < np.spacing(1.0):\n        x_inv[pos] = np.exp(x[pos]) - 1\n    else:\n        x_inv[pos] = np.power(x[pos] * lmbda + 1, 1 / lmbda) - 1\n    if abs(lmbda - 2) > np.spacing(1.0):\n        x_inv[~pos] = 1 - np.power(-(2 - lmbda) * x[~pos] + 1, 1 / (2 - lmbda))\n    else:\n        x_inv[~pos] = 1 - np.exp(-x[~pos])\n    return x_inv",
    "docstring": "Return inverse-transformed input x following Yeo-Johnson inverse transform with parameter lambda.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py",
    "ast_data": "FunctionDef name:_yeo_johnson_inverse_transform arg:self arg:x arg:lmbda arguments arg arg arg Assign Call Assign Compare If Compare Call Call Assign Call Assign Call If Compare Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "cpu",
    "source_code": "def cpu(self) -> Self:\n    return self._apply(lambda t: t.cpu())",
    "docstring": "Move all model parameters and buffers to the CPU. .. note:: This method modifies the module in-place. Returns: Module: self",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:cpu arg:self arguments arg Return return:yes Call arguments arg Call"
  },
  {
    "library": "scipy",
    "name": "get_result_no_mp",
    "source_code": "def get_result_no_mp(a, b, c, z, group):\n    expected, observed = (complex('nan'), hyp2f1(a, b, c, z))\n    relative_error, absolute_error = (float('nan'), float('nan'))\n    return (a, b, c, z, abs(z), get_region(z), group, expected, observed, relative_error, absolute_error)",
    "docstring": "Get results for given parameter and value combination.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_precompute\\hyp2f1_data.py",
    "ast_data": "FunctionDef name:get_result_no_mp arg:a arg:b arg:c arg:z arg:group arguments arg arg arg arg arg Assign Call Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "as_dict",
    "source_code": "@tf_export('experimental.extension_type.as_dict')\ndef as_dict(value):\n    return {field.name: getattr(value, field.name) for field in value._tf_extension_type_fields()}",
    "docstring": "Extracts the attributes of and their values to a dict format. Unlike , this function is not recursive and in case of nested objects, only the top level object is converted to a dict. Args: value: An object. Returns: A dict that contains the attributes of and their values.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type.py",
    "ast_data": "FunctionDef name:as_dict arg:value arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "authlib",
    "name": "validate_tos_uri",
    "source_code": "def validate_tos_uri(self):\n    self._validate_uri('tos_uri')",
    "docstring": "URL string that points to a human-readable terms of service document for the client that describes a contractual relationship between the end-user and the client that the end-user accepts when authorizing the client. The authorization server SHOULD display this URL to the end-user if it is provided. The value of this field MUST point to a valid web page. The value of this field MAY be internationalized, as described in Section 2.2.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc7591\\claims.py",
    "ast_data": "FunctionDef name:validate_tos_uri arg:self arguments arg Call"
  },
  {
    "library": "django",
    "name": "alter_index_together",
    "source_code": "def alter_index_together(self, model, old_index_together, new_index_together):\n    olds = {tuple(fields) for fields in old_index_together}\n    news = {tuple(fields) for fields in new_index_together}\n    for fields in olds.difference(news):\n        self._delete_composed_index(model, fields, {'index': True, 'unique': False}, self.sql_delete_index)\n    for field_names in news.difference(olds):\n        fields = [model._meta.get_field(field) for field in field_names]\n        self.execute(self._create_index_sql(model, fields=fields, suffix='_idx'))",
    "docstring": "Deal with a model changing its index_together. The input index_togethers must be doubly-nested, not the single-nested [\"foo\", \"bar\"] format.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\schema.py",
    "ast_data": "FunctionDef name:alter_index_together arg:self arg:model arg:old_index_together arg:new_index_together arguments arg arg arg arg Assign Call Assign Call For Call Call For Call Assign Call Call Call"
  },
  {
    "library": "scipy",
    "name": "roots_sh_jacobi",
    "source_code": "def roots_sh_jacobi(n, p1, q1, mu=False):\n    if p1 - q1 <= -1 or q1 <= 0:\n        message = '(p - q) must be greater than -1, and q must be greater than 0.'\n        raise ValueError(message)\n    x, w, m = roots_jacobi(n, p1 - q1, q1 - 1, True)\n    x = (x + 1) / 2\n    scale = 2.0 ** p1\n    w /= scale\n    m /= scale\n    if mu:\n        return (x, w, m)\n    else:\n        return (x, w)",
    "docstring": "Gauss-Jacobi (shifted) quadrature. Compute the sample points and weights for Gauss-Jacobi (shifted) quadrature. The sample points are the roots of the nth degree shifted Jacobi polynomial, :math:. These sample points and weights correctly integrate polynomials of degree :math: or less over the interval :math: with weight function :math:. See 22.2.2 in [AS]_ for details. Parameters ---------- n : int quadrature order p1 : float (p1 - q1) must be > -1 q1 : float q1 must be > 0 mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights See Also -------- scipy.integrate.fixed_quad References ---------- .. [AS] Milton Abramowitz and Irene A. Stegun, eds. Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables. New York: Dover, 1972.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_orthogonal.py",
    "ast_data": "FunctionDef name:roots_sh_jacobi arg:n arg:p1 arg:q1 arg:mu arguments arg arg arg arg If BoolOp Compare Compare Assign Raise Call Assign Call Assign Assign If Return return:yes Return return:yes"
  },
  {
    "library": "authlib",
    "name": "JWESharedHeader",
    "source_code": "class JWESharedHeader(dict):\n\n    def __init__(self, protected, unprotected):\n        obj = {}\n        if protected:\n            obj.update(protected)\n        if unprotected:\n            obj.update(unprotected)\n        super().__init__(obj)\n        self.protected = protected if protected else {}\n        self.unprotected = unprotected if unprotected else {}\n\n    def update_protected(self, addition):\n        self.update(addition)\n        self.protected.update(addition)\n\n    @classmethod\n    def from_dict(cls, obj):\n        if isinstance(obj, cls):\n            return obj\n        return cls(obj.get('protected'), obj.get('unprotected'))",
    "docstring": "Shared header object for JWE. Combines protected header and shared unprotected header together.",
    "type": "class",
    "file_path": "authlib\\authlib\\jose\\rfc7516\\models.py",
    "ast_data": "ClassDef name:JWESharedHeader FunctionDef name:__init__ arg:self arg:protected arg:unprotected arguments arg arg arg Assign If Call If Call Call Call Assign Assign FunctionDef name:update_protected arg:self arg:addition arguments arg arg Call Call FunctionDef name:from_dict arg:cls arg:obj arguments arg arg If Call Return return:yes Return return:yes Call Call Call"
  },
  {
    "library": "numpy",
    "name": "get",
    "source_code": "@staticmethod\ndef get(place):\n    return 'DOC_' + place.upper().replace('.', '_')",
    "docstring": "Returns the C #definition name of docstring according to ufunc place. C #definitions are generated by generate_umath_doc.py in a separate C header.",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\code_generators\\generate_umath.py",
    "ast_data": "FunctionDef name:get arg:place arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "NodeStateTracker",
    "source_code": "class NodeStateTracker(object):\n\n    def __init__(self, ctx):\n        self._lineno = 0\n        self._col_offset = 0\n        self.ctx = ctx\n        self.state = _State()\n\n    def debug_print(self, node):\n        if __debug__:\n            print(pretty_printer.fmt(node))\n        return node\n\n    def debug_print_src(self, node):\n        if __debug__:\n            print(parser.unparse(node))\n        return node\n\n    def visit_block(self, nodes, before_visit=None, after_visit=None):\n        if nodes is None:\n            return None\n        results = []\n        node_destination = results\n        for node in nodes:\n            if before_visit:\n                before_visit()\n            replacement = self.visit(node)\n            if after_visit and replacement:\n                replacement, new_destination = after_visit(replacement)\n            else:\n                new_destination = None\n            if replacement:\n                if isinstance(replacement, (list, tuple)):\n                    node_destination.extend(replacement)\n                else:\n                    node_destination.append(replacement)\n            if new_destination is not None:\n                node_destination = new_destination\n        return results",
    "docstring": "Base class for general-purpose Python code transformation. This abstract class provides helpful functions, like state tracking within the scope of arbitrary node, helpers for processing code blocks, debugging, mapping of transformed code to original code, and others. Scope-local state tracking: to keep state across nodes, at the level of (possibly nested) scopes, use enter/exit_local_scope and set/get_local. You must call enter/exit_local_scope manually, but the transformer detects when they are not properly paired. The transformer allows keeping state across calls that is local to arbitrary nodes and their descendants, using the self.state attribute. Multiple independent scopes are allowed and automatically constructed. For example, to keep track of the node that encloses any node, one can write: Alternatively, the / calls can be managed by a statement:",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\transformer.py",
    "ast_data": "ClassDef name:NodeStateTracker FunctionDef name:__init__ arg:self arg:ctx arguments arg arg Assign Assign Assign Assign Call FunctionDef name:debug_print arg:self arg:node arguments arg arg If Call Call Return return:yes FunctionDef name:debug_print_src arg:self arg:node arguments arg arg If Call Call Return return:yes FunctionDef name:visit_block arg:self arg:nodes arg:before_visit arg:after_visit arguments arg arg arg arg If Compare Return return:no Assign Assign For If Call Assign Call If BoolOp Assign Call Assign If If Call Call Call If Compare Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "plan",
    "source_code": "def plan(self, lines: list[Any]) -> list[Any]:\n    lines = [*lines]\n    self.drop_removed_buffers(lines)\n    self.convert_to_pool_lines(lines)\n    self.compute_live_ranges(lines)\n    self.allocate_groups()\n    self.mark_first_last_usage(lines)\n    return lines",
    "docstring": "Call all the memory planning passes in sequence",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\memory_planning.py",
    "ast_data": "FunctionDef name:plan arg:self arg:lines arguments arg arg Assign Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "check_bool_indexer",
    "source_code": "def check_bool_indexer(index: Index, key) -> np.ndarray:\n    result = key\n    if isinstance(key, ABCSeries) and (not key.index.equals(index)):\n        indexer = result.index.get_indexer_for(index)\n        if -1 in indexer:\n            raise IndexingError('Unalignable boolean Series provided as indexer (index of the boolean Series and of the indexed object do not match).')\n        result = result.take(indexer)\n        if not isinstance(result.dtype, ExtensionDtype):\n            return result.astype(bool)._values\n    if is_object_dtype(key):\n        result = np.asarray(result, dtype=bool)\n    elif not is_array_like(result):\n        result = pd_array(result, dtype=bool)\n    return check_array_indexer(index, result)",
    "docstring": "Check if key is a valid boolean indexer for an object with such index and perform reindexing or conversion if needed. This function assumes that is_bool_indexer(key) == True. Parameters ---------- index : Index Index of the object on which the indexing is done. key : list-like Boolean indexer to check. Returns ------- np.array Resulting key. Raises ------ IndexError If the key does not have the same length as index. IndexingError If the index of the key is unalignable to index.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\indexing.py",
    "ast_data": "FunctionDef name:check_bool_indexer arg:index arg:key arguments arg arg Assign If BoolOp Call Call Assign Call If Compare Raise Call Assign Call If Call Return return:yes Call If Call Assign Call If Call Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "ImmutableList",
    "source_code": "class ImmutableList(tuple):\n\n    def __new__(cls, *args, warning='ImmutableList object is immutable.', **kwargs):\n        self = tuple.__new__(cls, *args, **kwargs)\n        self.warning = warning\n        return self\n\n    def complain(self, *args, **kwargs):\n        raise AttributeError(self.warning)\n    __delitem__ = complain\n    __delslice__ = complain\n    __iadd__ = complain\n    __imul__ = complain\n    __setitem__ = complain\n    __setslice__ = complain\n    append = complain\n    extend = complain\n    insert = complain\n    pop = complain\n    remove = complain\n    sort = complain\n    reverse = complain",
    "docstring": "A tuple-like object that raises useful errors when it is asked to mutate. Example:: >>> a = ImmutableList(range(5), warning=\"You cannot mutate this.\") >>> a[3] = '4' Traceback (most recent call last): ... AttributeError: You cannot mutate this.",
    "type": "class",
    "file_path": "django\\django\\utils\\datastructures.py",
    "ast_data": "ClassDef name:ImmutableList FunctionDef name:__new__ arg:cls arguments arg arg arg arg Assign Call Assign Return return:yes FunctionDef name:complain arg:self arguments arg arg arg Raise Call Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "AOTConfig",
    "source_code": "@dataclass\nclass AOTConfig:\n    fw_compiler: Callable\n    bw_compiler: Callable\n    partition_fn: Callable\n    decompositions: dict[OpOverload, Callable]\n    num_params_buffers: int\n    aot_id: int\n    keep_inference_input_mutations: bool\n    is_export: bool = False\n    no_tangents: bool = False\n    dynamic_shapes: bool = False\n    aot_autograd_arg_pos_to_source: Optional[list[Source]] = None\n    static_input_indices: Optional[list[int]] = None\n    inference_compiler: Optional[Callable] = None\n    enable_log: bool = True\n    pre_dispatch: bool = False\n    cache_info: Optional[AOTAutogradCacheInfo] = None\n    ignore_shape_env: bool = False\n\n    def __post_init__(self):\n        if self.pre_dispatch:\n            assert self.is_export, 'Can only have pre_dispatch IR for export.'",
    "docstring": "Configuration for AOTDispatcher",
    "type": "class",
    "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\schemas.py",
    "ast_data": "ClassDef name:AOTConfig FunctionDef name:__post_init__ arg:self arguments arg If"
  },
  {
    "library": "matplotlib",
    "name": "minorticks_off",
    "source_code": "def minorticks_off(self):\n    self.set_minor_locator(mticker.NullLocator())",
    "docstring": "Remove minor ticks from the Axis.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:minorticks_off arg:self arguments arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "metrics_report",
    "source_code": "def metrics_report():\n    return torch._C._lazy._metrics_report()",
    "docstring": "Return the combined (lazy core and backend) metric report",
    "type": "function",
    "file_path": "pytorch\\torch\\_lazy\\metrics.py",
    "ast_data": "FunctionDef name:metrics_report arguments Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "set_module_name",
    "source_code": "def set_module_name(self, module_name: str, quantization_config: Optional[QuantizationConfig]):\n    assert quantization_config is not None, ' quantization_config == None is not supported yet'\n    self.module_name_config[module_name] = quantization_config\n    return self",
    "docstring": "Set quantization_config for a submodule with name: , for example: quantizer.set_module_name(\"blocks.sub\"), it will quantize all supported operator/operator patterns in the submodule with this module name with the given",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\xnnpack_quantizer.py",
    "ast_data": "FunctionDef name:set_module_name arg:self arg:module_name arg:quantization_config arguments arg arg arg Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_SparseSegmentMeanWithNumSegmentsGrad",
    "source_code": "@ops.RegisterGradient('SparseSegmentMeanWithNumSegments')\ndef _SparseSegmentMeanWithNumSegmentsGrad(op: ops.Operation, grad):\n    if _GetOpAttrOrNone(op, 'sparse_gradient'):\n        return (_SparseSegmentReduceGradV2(op, grad, 'mean'), None, None, None)\n    dim0 = array_ops.shape(op.inputs[0])[0]\n    return (math_ops.sparse_segment_mean_grad(grad, op.inputs[1], op.inputs[2], dim0), None, None, None)",
    "docstring": "Gradient for SparseSegmentMeanWithNumSegments.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_SparseSegmentMeanWithNumSegmentsGrad arg:op arg:grad arguments arg arg If Call Return return:yes Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "denoise",
    "source_code": "def denoise(self) -> 'FunctionCounts':\n    return self.filter(lambda fn: 'dictobject.c:lookdict_unicode' not in fn)",
    "docstring": "Remove known noisy instructions. Several instructions in the CPython interpreter are rather noisy. These instructions involve unicode to dictionary lookups which Python uses to map variable names. FunctionCounts is generally a content agnostic container, however this is sufficiently important for obtaining reliable results to warrant an exception.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\benchmark\\utils\\valgrind_wrapper\\timer_interface.py",
    "ast_data": "FunctionDef name:denoise arg:self arguments arg Return return:yes Call arguments arg Compare"
  },
  {
    "library": "django",
    "name": "check_setting_languages_bidi",
    "source_code": "@register(Tags.translation)\ndef check_setting_languages_bidi(app_configs, **kwargs):\n    return [Error(E003.msg.format(tag), id=E003.id) for tag in settings.LANGUAGES_BIDI if not isinstance(tag, str) or not language_code_re.match(tag)]",
    "docstring": "Error if LANGUAGES_BIDI setting is invalid.",
    "type": "function",
    "file_path": "django\\django\\core\\checks\\translation.py",
    "ast_data": "FunctionDef name:check_setting_languages_bidi arg:app_configs arguments arg arg Return return:yes Call Call BoolOp Call Call Call"
  },
  {
    "library": "numpy",
    "name": "recordmask",
    "source_code": "@property\ndef recordmask(self):\n    _mask = self._mask.view(ndarray)\n    if _mask.dtype.names is None:\n        return _mask\n    return np.all(flatten_structured_array(_mask), axis=-1)",
    "docstring": "Get or set the mask of the array if it has no named fields. For structured arrays, returns a ndarray of booleans where entries are `` otherwise: >>> x = np.ma.array([(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)], ... mask=[(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)], ... dtype=[('a', int), ('b', int)]) >>> x.recordmask array([False, False, True, False, False])",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:recordmask arg:self arguments arg Assign Call If Compare Return return:yes Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_SignatureMap",
    "source_code": "class _SignatureMap(collections_abc.Mapping, base.Trackable):\n\n    def __init__(self):\n        self._signatures = {}\n\n    def _add_signature(self, name, concrete_function):\n        self._signatures[name] = concrete_function\n\n    def __getitem__(self, key):\n        return self._signatures[key]\n\n    def __iter__(self):\n        return iter(self._signatures)\n\n    def __len__(self):\n        return len(self._signatures)\n\n    def __repr__(self):\n        return '_SignatureMap({})'.format(self._signatures)\n\n    def _trackable_children(self, save_type=base.SaveType.CHECKPOINT, **kwargs):\n        if save_type != base.SaveType.SAVEDMODEL:\n            return {}\n        return {key: value for key, value in self.items() if isinstance(value, (def_function.Function, defun.ConcreteFunction))}",
    "docstring": "A collection of SavedModel signatures.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\signature_serialization.py",
    "ast_data": "ClassDef name:_SignatureMap FunctionDef name:__init__ arg:self arguments arg Assign FunctionDef name:_add_signature arg:self arg:name arg:concrete_function arguments arg arg arg Assign FunctionDef name:__getitem__ arg:self arg:key arguments arg arg Return return:yes FunctionDef name:__iter__ arg:self arguments arg Return return:yes Call FunctionDef name:__len__ arg:self arguments arg Return return:yes Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call FunctionDef name:_trackable_children arg:self arg:save_type arguments arg arg arg If Compare Return return:no Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "TritonSymbols",
    "source_code": "class TritonSymbols:\n    reduction_types = OrderedSet([SymT.R0_INDEX, SymT.R1_INDEX])\n    block_types = OrderedSet([SymT.XBLOCK, SymT.YBLOCK, SymT.ZBLOCK, *reduction_types])\n    block_offsets = {symt: sympy.Symbol(f'{prefix_str[symt]}offset', integer=True, nonnegative=True) for symt in block_types}\n    block_sizes = {symt: sympy.Symbol(f'{prefix_str[symt].upper()}BLOCK', integer=True, positive=True) for symt in block_types}\n\n    @classmethod\n    def get_block_size(cls, tree: IterationRanges) -> sympy.Symbol:\n        return cls.block_sizes[tree.symt]\n\n    @classmethod\n    def get_block_offset(cls, tree: IterationRanges) -> sympy.Symbol:\n        return cls.block_offsets[tree.symt]",
    "docstring": "Stores sympy.Symbol instances and constants associated with triton codegen.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py",
    "ast_data": "ClassDef name:TritonSymbols Assign Call Assign Call Assign Call Assign Call Call FunctionDef name:get_block_size arg:cls arg:tree arguments arg arg Return return:yes FunctionDef name:get_block_offset arg:cls arg:tree arguments arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "record_post_forward",
    "source_code": "def record_post_forward(self, handle: Optional[FlatParamHandle]) -> None:\n    if not handle:\n        return\n    if handle._post_forward_index:\n        self.handles_post_forward_order.append(handle)\n        return\n    index = len(self.handles_post_forward_order)\n    handle._post_forward_index = index\n    self.handles_post_forward_order.append(handle)",
    "docstring": "Records `record_pre_forwardnext_iter`.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_exec_order_utils.py",
    "ast_data": "FunctionDef name:record_post_forward arg:self arg:handle arguments arg arg If Return return:no If Call Return return:no Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "from_non_deterministic_state",
    "source_code": "@classmethod\ndef from_non_deterministic_state(cls, alg=None):\n    if config.is_op_determinism_enabled():\n        raise RuntimeError('\"from_non_deterministic_state\" cannot be called when determinism is enabled.')\n    if alg is None:\n        alg = DEFAULT_ALGORITHM\n    alg = random_ops_util.convert_alg_to_int(alg)\n    state = non_deterministic_ints(shape=[_get_state_size(alg)], dtype=SEED_TYPE)\n    return cls(state=state, alg=alg)",
    "docstring": "Creates a generator by non-deterministically initializing its state. The source of the non-determinism will be platform- and time-dependent. Args: alg: (optional) the RNG algorithm. If None, it will be auto-selected. See for its possible values. Returns: The new generator.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\stateful_random_ops.py",
    "ast_data": "FunctionDef name:from_non_deterministic_state arg:cls arg:alg arguments arg arg If Call Raise Call If Compare Assign Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "staged_decision_function",
    "source_code": "def staged_decision_function(self, X):\n    for staged_decision in self._staged_raw_predict(X):\n        if staged_decision.shape[1] == 1:\n            staged_decision = staged_decision.ravel()\n        yield staged_decision",
    "docstring": "Compute decision function of `classes_`.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\gradient_boosting.py",
    "ast_data": "FunctionDef name:staged_decision_function arg:self arg:X arguments arg arg For Call If Compare Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "get_collection_ref",
    "source_code": "def get_collection_ref(self, name) -> list[Any]:\n    with self._lock:\n        coll_list = self._collections.get(name, None)\n        if coll_list is None:\n            coll_list = []\n            self._collections[name] = coll_list\n        return coll_list",
    "docstring": "Returns a list of values in the collection with the given . If the collection exists, this returns the list itself, which can be modified in place to change the collection. If the collection does not exist, it is created as an empty list and the list is returned. This is different from which always returns a copy of the collection list if it exists and never creates an empty collection. Args: name: The key for the collection. For example, the class contains many standard names for collections. Returns: The list of values in the collection with the given , or an empty list if no value has been added to that collection.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:get_collection_ref arg:self arg:name arguments arg arg With Assign Call If Compare Assign Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_generate_sfnts",
    "source_code": "def _generate_sfnts(fontdata, font, breakpoints):\n    s = '/sfnts['\n    pos = 0\n    while pos < len(fontdata):\n        i = bisect.bisect_left(breakpoints, pos + 65534)\n        newpos = breakpoints[i - 1]\n        if newpos <= pos:\n            newpos = breakpoints[-1]\n        s += f'<{fontdata[pos:newpos].hex()}00>'\n        pos = newpos\n    s += ']def'\n    return '\\n'.join((s[i:i + 100] for i in range(0, len(s), 100)))",
    "docstring": "Transform font data into PostScript sfnts format. Helper function for _font_to_ps_type42. Parameters ---------- fontdata : bytes The raw data of the font font : fontTools.ttLib.ttFont.TTFont The fontTools font object breakpoints : list Sorted offsets of possible breakpoints Returns ------- str The sfnts array for the font definition, consisting of hex-encoded strings in PostScript format",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_ps.py",
    "ast_data": "FunctionDef name:_generate_sfnts arg:fontdata arg:font arg:breakpoints arguments arg arg arg Assign Assign While Compare Call Assign Call Assign If Compare Assign Call Assign Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "LocalTimerClient",
    "source_code": "class LocalTimerClient(TimerClient):\n\n    def __init__(self, mp_queue):\n        super().__init__()\n        self._mp_queue = mp_queue\n\n    def acquire(self, scope_id, expiration_time):\n        pid = os.getpid()\n        acquire_request = TimerRequest(pid, scope_id, expiration_time)\n        self._mp_queue.put(acquire_request)\n\n    def release(self, scope_id):\n        pid = os.getpid()\n        release_request = TimerRequest(pid, scope_id, -1)\n        self._mp_queue.put(release_request)",
    "docstring": "Client side of `` is running on and uses pid to uniquely identify a worker. This is particularly useful in situations where one spawns a subprocess (trainer) per GPU on a host with multiple GPU devices.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\elastic\\timer\\local_timer.py",
    "ast_data": "ClassDef name:LocalTimerClient FunctionDef name:__init__ arg:self arg:mp_queue arguments arg arg Call Call Assign FunctionDef name:acquire arg:self arg:scope_id arg:expiration_time arguments arg arg arg Assign Call Assign Call Call FunctionDef name:release arg:self arg:scope_id arguments arg arg Assign Call Assign Call Call"
  },
  {
    "library": "pandas",
    "name": "_get_custom_index_name",
    "source_code": "def _get_custom_index_name(self):\n    return self.xlabel",
    "docstring": "Specify whether xlabel/ylabel should be used to override index name",
    "type": "method",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\core.py",
    "ast_data": "FunctionDef name:_get_custom_index_name arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "create_script_list",
    "source_code": "def create_script_list(obj, type_hint=None):\n    return torch._C.ScriptList(obj)",
    "docstring": "Create a `` and can be passed between Python and TorchScript with reference semantics and zero copy overhead.",
    "type": "function",
    "file_path": "pytorch\\torch\\jit\\_script.py",
    "ast_data": "FunctionDef name:create_script_list arg:obj arg:type_hint arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_create_iterators_per_worker",
    "source_code": "def _create_iterators_per_worker(worker_datasets, input_workers, options=None):\n    assert isinstance(input_workers, input_lib.InputWorkers)\n    assert len(worker_datasets) == len(input_workers.worker_devices)\n    iterators = []\n    for i, worker in enumerate(input_workers.worker_devices):\n        with ops.device(worker):\n            worker_devices = input_workers.compute_devices_for_worker(i)\n            iterator = _SingleWorkerDatasetIterator(worker_datasets[i], worker, worker_devices, options)\n            iterators.append(iterator)\n    return iterators",
    "docstring": "Create a multidevice iterator on each of the workers.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\input_lib.py",
    "ast_data": "FunctionDef name:_create_iterators_per_worker arg:worker_datasets arg:input_workers arg:options arguments arg arg arg Call Compare Call Call Assign For Call With Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "module_name",
    "source_code": "def module_name(self):\n    if self.user_given_name:\n        return self.user_given_name\n    return self.__class__.__name__",
    "docstring": "this is used to label the operator being benchmarked",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\operator_benchmark\\benchmark_pytorch.py",
    "ast_data": "FunctionDef name:module_name arg:self arguments arg If Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_forward_log_det_jacobian",
    "source_code": "def _forward_log_det_jacobian(self, x):\n    raise NotImplementedError('forward_log_det_jacobian not implemented.')",
    "docstring": "Subclass implementation of public function. In particular, this method differs from the public function, in that it does not take . Thus, this implements the minimal Jacobian determinant calculation (i.e. over ). Args: x: . The input to the \"forward_log_det_jacobian\" evaluation. Returns: forward_log_det_jacobian: , if this bijector is injective. If not injective, returns the k-tuple containing jacobians for the unique points such that .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bijector_impl.py",
    "ast_data": "FunctionDef name:_forward_log_det_jacobian arg:self arg:x arguments arg arg Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "_find_tails",
    "source_code": "def _find_tails(self, mag, rounding=True, half=5, full=10, flag=50):\n    if rounding:\n        mag = half * np.around(mag / half)\n    n_flags, mag = divmod(mag, flag)\n    n_barb, mag = divmod(mag, full)\n    half_flag = mag >= half\n    empty_flag = ~(half_flag | (n_flags > 0) | (n_barb > 0))\n    return (n_flags.astype(int), n_barb.astype(int), half_flag, empty_flag)",
    "docstring": "Find how many of each of the tail pieces is necessary. Parameters ---------- mag : Vector magnitudes; must be non-negative (and an actual ndarray). rounding : bool, default: True Whether to round or to truncate to the nearest half-barb. half, full, flag : float, defaults: 5, 10, 50 Increments for a half-barb, a barb, and a flag. Returns ------- n_flags, n_barbs : int array For each entry in *mag*, the number of flags and barbs. half_flag : bool array For each entry in *mag*, whether a half-barb is needed. empty_flag : bool array For each entry in *mag*, whether nothing is drawn.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\quiver.py",
    "ast_data": "FunctionDef name:_find_tails arg:self arg:mag arg:rounding arg:half arg:full arg:flag arguments arg arg arg arg arg arg If Assign Call Assign Call Assign Call Assign Compare Assign Compare Compare Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "RMSNorm",
    "source_code": "class RMSNorm(Module):\n    __constants__ = ['normalized_shape', 'eps', 'elementwise_affine']\n    normalized_shape: tuple[int, ...]\n    eps: Optional[float]\n    elementwise_affine: bool\n\n    def __init__(self, normalized_shape: _shape_t, eps: Optional[float]=None, elementwise_affine: bool=True, device=None, dtype=None) -> None:\n        factory_kwargs = {'device': device, 'dtype': dtype}\n        super().__init__()\n        if isinstance(normalized_shape, numbers.Integral):\n            normalized_shape = (normalized_shape,)\n        self.normalized_shape = tuple(normalized_shape)\n        self.eps = eps\n        self.elementwise_affine = elementwise_affine\n        if self.elementwise_affine:\n            self.weight = Parameter(torch.empty(self.normalized_shape, **factory_kwargs))\n        else:\n            self.register_parameter('weight', None)\n        self.reset_parameters()\n\n    def reset_parameters(self) -> None:\n        if self.elementwise_affine:\n            init.ones_(self.weight)\n\n    def forward(self, x: torch.Tensor) -> torch.Tensor:\n        return F.rms_norm(x, self.normalized_shape, self.weight, self.eps)\n\n    def extra_repr(self) -> str:\n        return '{normalized_shape}, eps={eps}, elementwise_affine={elementwise_affine}'.format(**self.__dict__)",
    "docstring": "Applies Root Mean Square Layer Normalization over a mini-batch of inputs. This layer implements the operation as described in the paper __ .. math:: y_i = \\frac{x_i}{\\mathrm{RMS}(x)} * \\gamma_i, \\quad \\text{where} \\quad \\text{RMS}(x) = \\sqrt{\\epsilon + \\frac{1}{n} \\sum_{i=1}^{n} x_i^2} The RMS is taken over the last `normalized_shapenormalized_shape(N, *)(N, *)` (same shape as input) Examples:: >>> rms_norm = nn.RMSNorm([2, 3]) >>> input = torch.randn(2, 2, 3) >>> rms_norm(input)",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\normalization.py",
    "ast_data": "ClassDef name:RMSNorm Assign FunctionDef name:__init__ arg:self arg:normalized_shape arg:eps arg:elementwise_affine arg:device arg:dtype arguments arg arg arg arg arg arg Assign Call Call If Call Assign Assign Call Assign Assign If Assign Call Call Call Call FunctionDef name:reset_parameters arg:self arguments arg If Call FunctionDef name:forward arg:self arg:x arguments arg arg Return return:yes Call FunctionDef name:extra_repr arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "__mul__",
    "source_code": "def __mul__(self, right: Se2 | Tensor) -> Se2 | Tensor:\n    so2 = self.so2\n    t = self.t\n    if isinstance(right, Se2):\n        KORNIA_CHECK_TYPE(right, Se2)\n        return self._mul_se2(right)\n    elif isinstance(right, (Vector2, Tensor)):\n        return so2 * right + t\n    else:\n        raise TypeError(f'Unsupported type: {type(right)}')",
    "docstring": "Compose two Se2 transformations. Args: right: the other Se2 transformation. Return: The resulting Se2 transformation.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\se2.py",
    "ast_data": "FunctionDef name:__mul__ arg:self arg:right arguments arg arg Assign Assign If Call Call Return return:yes Call If Call Return return:yes Raise Call Call"
  },
  {
    "library": "kornia",
    "name": "TFeat",
    "source_code": "class TFeat(nn.Module):\n    patch_size = 32\n\n    def __init__(self, pretrained: bool=False) -> None:\n        super().__init__()\n        self.features = nn.Sequential(nn.InstanceNorm2d(1, affine=False), nn.Conv2d(1, 32, kernel_size=7), nn.Tanh(), nn.MaxPool2d(kernel_size=2, stride=2), nn.Conv2d(32, 64, kernel_size=6), nn.Tanh())\n        self.descr = nn.Sequential(nn.Linear(64 * 8 * 8, 128), nn.Tanh())\n        if pretrained:\n            pretrained_dict = torch.hub.load_state_dict_from_url(urls['liberty'], map_location=torch.device('cpu'))\n            self.load_state_dict(pretrained_dict, strict=True)\n        self.eval()\n\n    def forward(self, input: torch.Tensor) -> torch.Tensor:\n        KORNIA_CHECK_SHAPE(input, ['B', '1', '32', '32'])\n        x = self.features(input)\n        x = x.view(x.size(0), -1)\n        x = self.descr(x)\n        return x",
    "docstring": "Module, which computes TFeat descriptors of given grayscale patches of 32x32. This is based on the original code from paper \"Learning local feature descriptors with triplets and shallow convolutional neural networks\". See :cite: for more details Args: pretrained: Download and set pretrained weights to the model. Returns: torch.Tensor: TFeat descriptor of the patches. Shape: - Input: :math: - Output: :math: Examples: >>> input = torch.rand(16, 1, 32, 32) >>> tfeat = TFeat() >>> descs = tfeat(input) # 16x128",
    "type": "class",
    "file_path": "kornia\\kornia\\feature\\tfeat.py",
    "ast_data": "ClassDef name:TFeat Assign FunctionDef name:__init__ arg:self arg:pretrained arguments arg arg Call Call Assign Call Call Call Call Call Call Call Assign Call Call Call If Assign Call Call Call Call FunctionDef name:forward arg:self arg:input arguments arg arg Call Assign Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_set_domain_adj",
    "source_code": "def _set_domain_adj(self):\n    loc = self.loc\n    scale = self.scale\n    lb = self._domain[0] * scale + loc\n    ub = self._domain[1] * scale + loc\n    self._domain_adj = (lb, ub)",
    "docstring": "Adjust the domain based on loc and scale.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_sampling.py",
    "ast_data": "FunctionDef name:_set_domain_adj arg:self arguments arg Assign Assign Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "apply",
    "source_code": "def apply(self, fn: Callable[['Module'], None]) -> Self:\n    for module in self.children():\n        module.apply(fn)\n    fn(self)\n    return self",
    "docstring": "Apply `nn-init-docModule` -> None): function to be applied to each submodule Returns: Module: self Example:: >>> @torch.no_grad() >>> def init_weights(m): >>> print(m) >>> if type(m) == nn.Linear: >>> m.weight.fill_(1.0) >>> print(m.weight) >>> net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2)) >>> net.apply(init_weights) Linear(in_features=2, out_features=2, bias=True) Parameter containing: tensor([[1., 1.], [1., 1.]], requires_grad=True) Linear(in_features=2, out_features=2, bias=True) Parameter containing: tensor([[1., 1.], [1., 1.]], requires_grad=True) Sequential( (0): Linear(in_features=2, out_features=2, bias=True) (1): Linear(in_features=2, out_features=2, bias=True) )",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:apply arg:self arg:fn arguments arg arg For Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "GlorotUniform",
    "source_code": "class GlorotUniform(VarianceScaling):\n\n    def __init__(self, seed=None):\n        super(GlorotUniform, self).__init__(scale=1.0, mode='fan_avg', distribution='uniform', seed=seed)\n\n    def get_config(self):\n        return {'seed': self.seed}",
    "docstring": "The Glorot uniform initializer, also called Xavier uniform initializer. Also available via the shortcut function . Draws samples from a uniform distribution within , where ( is the number of input units in the weight tensor and is the number of output units). Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.GlorotUniform() >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.GlorotUniform() >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) Args: seed: A Python integer. An initializer created with a given seed will always produce the same random tensor for a given shape and dtype. References: [Glorot et al., 2010]( ([pdf](",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\initializers\\initializers_v2.py",
    "ast_data": "ClassDef name:GlorotUniform FunctionDef name:__init__ arg:self arg:seed arguments arg arg Call Call FunctionDef name:get_config arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "activate",
    "source_code": "def activate(timezone):\n    if isinstance(timezone, tzinfo):\n        _active.value = timezone\n    elif isinstance(timezone, str):\n        _active.value = zoneinfo.ZoneInfo(timezone)\n    else:\n        raise ValueError('Invalid timezone: %r' % timezone)",
    "docstring": "Set the time zone for the current thread. The `` argument must be an instance of a tzinfo subclass or a time zone name.",
    "type": "function",
    "file_path": "django\\django\\utils\\timezone.py",
    "ast_data": "FunctionDef name:activate arg:timezone arguments arg If Call Assign If Call Assign Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "ShardableTensor",
    "source_code": "@tf_export.tf_export('train.experimental.ShardableTensor')\n@dataclasses.dataclass(frozen=True)\nclass ShardableTensor:\n    _tensor_save_spec: saveable_object.SaveSpec\n    tensor: tensor_lib.Tensor\n    dtype: dtypes.DType\n    device: device_lib.DeviceSpec\n    name: str\n    shape: tensor_shape.TensorShape\n    slice_spec: variables.Variable.SaveSliceInfo\n    checkpoint_key: str\n    trackable: base.Trackable\n\n    def __hash__(self) -> int:\n        return hash((self.name, self.dtype, str(self.device), self.checkpoint_key))\n\n    def __repr__(self) -> str:\n        return f'\\n{self.__class__.__name__}:\\n  _tensor_save_spec={self._tensor_save_spec!r}\\n  tensor={self.tensor!r}\\n  dtype={self.dtype!r}\\n  device={self.device!r}\\n  name={self.name!r}\\n  shape={self.shape!r}\\n  slice_spec={self.slice_spec!r}\\n  checkpoint_key={self.checkpoint_key!r}\\n  trackable={self.trackable!r}'",
    "docstring": "Tensor wrapper containing data necessary for sharding. The tensor representation used as inputs to pre-made and custom s, which can be specified using the option in .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\sharding\\sharding_util.py",
    "ast_data": "ClassDef name:ShardableTensor FunctionDef name:__hash__ arg:self arguments arg Return return:yes Call Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "InvalidAlgorithm",
    "source_code": "class InvalidAlgorithm(ValueError):\n    pass",
    "docstring": "Algorithm is not supported by hashlib.",
    "type": "class",
    "file_path": "django\\django\\utils\\crypto.py",
    "ast_data": "ClassDef name:InvalidAlgorithm"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, reader_ref, supports_serialize=False):\n    if context.executing_eagerly():\n        raise RuntimeError('Readers are not supported when eager execution is enabled. Instead, please use tf.data to get data into your model.')\n    self._reader_ref = reader_ref\n    self._supports_serialize = supports_serialize",
    "docstring": "Creates a new ReaderBase. Args: reader_ref: The operation that implements the reader. supports_serialize: True if the reader implementation can serialize its state. Raises: RuntimeError: If eager execution is enabled.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\io_ops.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:reader_ref arg:supports_serialize arguments arg arg arg If Call Raise Call Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "ProfilerContextVariable",
    "source_code": "class ProfilerContextVariable(ContextWrappingVariable):\n\n    def __init__(self, **kwargs) -> None:\n        super().__init__(target_values=None, **kwargs)\n\n    def enter(self, tx):\n        return self\n\n    def exit(self, tx: 'InstructionTranslator', *args):\n        return variables.ConstantVariable.create(None)\n\n    def module_name(self):\n        return 'contextlib'\n\n    def fn_name(self):\n        return 'nullcontext'\n\n    def reconstruct(self, cg):\n        unimplemented_v2(gb_type='torch.profiler object escaped from compiled region', context=str(self), explanation=\"Dynamo doesn't support compiling a region that returns a torch.profiler context manager.\", hints=[*graph_break_hints.SUPPORTABLE])",
    "docstring": "This class represents a set of torch profiler context objects, where Dynamo ignores all the side-effects in the __init__, __enter__ and __exit__ methods by treating the object mostly as a , except for edge cases like the method which returns the object itself rather than , per implementation of the torch objects.",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\ctx_manager.py",
    "ast_data": "ClassDef name:ProfilerContextVariable FunctionDef name:__init__ arg:self arguments arg arg Call Call FunctionDef name:enter arg:self arg:tx arguments arg arg Return return:yes FunctionDef name:exit arg:self arg:tx arguments arg arg arg Return return:yes Call FunctionDef name:module_name arg:self arguments arg Return return:yes FunctionDef name:fn_name arg:self arguments arg Return return:yes FunctionDef name:reconstruct arg:self arg:cg arguments arg arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "reshard",
    "source_code": "def reshard(self, free_unsharded_flat_param: bool):\n    self._use_sharded_flat_param()\n    if free_unsharded_flat_param:\n        self._free_unsharded_flat_param()",
    "docstring": "Run the reshard logic. This includes freeing the unsharded flat parameter if `` attribute which resides on CPU.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py",
    "ast_data": "FunctionDef name:reshard arg:self arg:free_unsharded_flat_param arguments arg arg Call If Call"
  },
  {
    "library": "pytorch",
    "name": "_detect_fake_mode_from_gm",
    "source_code": "def _detect_fake_mode_from_gm(gm: torch.fx.GraphModule) -> torch._subclasses.fake_tensor.FakeTensorMode:\n    fake_inps: list[torch.Tensor] = []\n    fake_vals: list[torch.Tensor] = []\n    for node in gm.graph.nodes:\n        if node.op == 'placeholder' and 'val' in node.meta:\n            fake_val = node.meta['val']\n            if fake_val is not None and isinstance(fake_val, torch.Tensor):\n                fake_inps.append(fake_val)\n        elif len(fake_inps) == 0 and ('example_value' in node.meta or 'val' in node.meta):\n            fake_val = None\n            if 'example_value' in node.meta:\n                fake_val = node.meta['example_value']\n            elif 'val' in node.meta:\n                fake_val = node.meta['val']\n            if fake_val is not None and isinstance(fake_val, torch.Tensor):\n                fake_vals.append(fake_val)\n    return detect_fake_mode(fake_inps + fake_vals)",
    "docstring": "For a given graph module, we look at the \"val\" of placeholder nodes to find the fake inputs. Additionally, if gm doesn't have placeholders, we further look at the \"example_value\" or \"val\" of other nodes. If no fake mode is found, we return None for fake_mode.",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\utils.py",
    "ast_data": "FunctionDef name:_detect_fake_mode_from_gm arg:gm arguments arg For If BoolOp Compare Compare Assign If BoolOp Compare Call Call If BoolOp Compare Call BoolOp Compare Compare Assign If Compare Assign If Compare Assign If BoolOp Compare Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_make_execution_function",
    "source_code": "def _make_execution_function(model, mode):\n    if is_distributing_by_cloning(model):\n        return _make_execution_function_with_cloning(model, mode)\n    distributed_function = get_distributed_function(model, mode)\n    if distributed_function:\n        return distributed_function\n    distribution_function = _make_execution_function_without_cloning(model, mode)\n    set_distributed_function(model, mode, distribution_function)\n    return distribution_function",
    "docstring": "Makes or reuses function to run one step of distributed model execution.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_training_utils_v1.py",
    "ast_data": "FunctionDef name:_make_execution_function arg:model arg:mode arguments arg arg If Call Return return:yes Call Assign Call If Return return:yes Assign Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_pad_or_backfill",
    "source_code": "def _pad_or_backfill(self, *, method: FillnaOptions, limit: int | None=None, limit_area: Literal['inside', 'outside'] | None=None, copy: bool=True) -> Self:\n    if copy:\n        out_data = self._ndarray.copy()\n    else:\n        out_data = self._ndarray\n    meth = missing.clean_fill_method(method)\n    missing.pad_or_backfill_inplace(out_data.T, method=meth, axis=0, limit=limit, limit_area=limit_area)\n    if not copy:\n        return self\n    return type(self)._simple_new(out_data, dtype=self.dtype)",
    "docstring": "ffill or bfill along axis=0.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\numpy_.py",
    "ast_data": "FunctionDef name:_pad_or_backfill arg:self arguments arg arg arg arg arg If Assign Call Assign Assign Call Call If Return return:yes Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "StateDictType",
    "source_code": "class StateDictType(Enum):\n    FULL_STATE_DICT = auto()\n    LOCAL_STATE_DICT = auto()\n    SHARDED_STATE_DICT = auto()",
    "docstring": "This enum indicates that which type of `: this pair of APIs return and load the non-sharded, unflattened parameters. The semantics is the same as using DDP. 2. `state_dict_type` can be used by all other parallel schemes (resharding may be required).",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\api.py",
    "ast_data": "ClassDef name:StateDictType Assign Call Assign Call Assign Call"
  },
  {
    "library": "scikit-learn",
    "name": "generate_invalid_param_val",
    "source_code": "def generate_invalid_param_val(constraint):\n    if isinstance(constraint, StrOptions):\n        return f'not {' or '.join(constraint.options)}'\n    if isinstance(constraint, MissingValues):\n        return np.array([1, 2, 3])\n    if isinstance(constraint, _VerboseHelper):\n        return -1\n    if isinstance(constraint, HasMethods):\n        return type('HasNotMethods', (), {})()\n    if isinstance(constraint, _IterablesNotString):\n        return 'a string'\n    if isinstance(constraint, _CVObjects):\n        return 'not a cv object'\n    if isinstance(constraint, Interval) and constraint.type is Integral:\n        if constraint.left is not None:\n            return constraint.left - 1\n        if constraint.right is not None:\n            return constraint.right + 1\n        raise NotImplementedError\n    if isinstance(constraint, Interval) and constraint.type in (Real, RealNotInt):\n        if constraint.left is not None:\n            return constraint.left - 1e-06\n        if constraint.right is not None:\n            return constraint.right + 1e-06\n        if constraint.closed in ('right', 'neither'):\n            return -np.inf\n        if constraint.closed in ('left', 'neither'):\n            return np.inf\n        return np.nan\n    raise NotImplementedError",
    "docstring": "Return a value that does not satisfy the constraint. Raises a NotImplementedError if there exists no invalid value for this constraint. This is only useful for testing purpose. Parameters ---------- constraint : _Constraint instance The constraint to generate a value for. Returns ------- val : object A value that does not satisfy the constraint.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_param_validation.py",
    "ast_data": "FunctionDef name:generate_invalid_param_val arg:constraint arguments arg If Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes If Call Return return:yes Call Call If Call Return return:yes If Call Return return:yes If BoolOp Call Compare If Compare Return return:yes If Compare Return return:yes Raise If BoolOp Call Compare If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Return return:yes Raise"
  },
  {
    "library": "tensorflow",
    "name": "assert_zero_imag_part",
    "source_code": "def assert_zero_imag_part(x, message=None, name='assert_zero_imag_part'):\n    with ops.name_scope(name, values=[x]):\n        x = tensor_conversion.convert_to_tensor_v2_with_dispatch(x, name='x')\n        dtype = x.dtype.base_dtype\n        if dtype.is_floating:\n            return control_flow_ops.no_op()\n        zero = tensor_conversion.convert_to_tensor_v2_with_dispatch(0, dtype=dtype.real_dtype)\n        return check_ops.assert_equal(zero, math_ops.imag(x), message=message)",
    "docstring": "Returns that asserts Tensor has no non-zero imaginary parts. Args: x: Numeric , real, integer, or complex. message: A string message to prepend to failure message. name: A name to give this . Returns: An that asserts has no entries with modulus zero.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_util.py",
    "ast_data": "FunctionDef name:assert_zero_imag_part arg:x arg:message arg:name arguments arg arg arg With Call Assign Call Assign If Return return:yes Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "graph_mode",
    "source_code": "def graph_mode():\n    return context()._mode(GRAPH_MODE)",
    "docstring": "Context-manager to disable eager execution for the current thread.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:graph_mode arguments Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_load_bitmap",
    "source_code": "def _load_bitmap(filename):\n    return wx.Bitmap(str(cbook._get_data_path('images', filename)))",
    "docstring": "Load a wx.Bitmap from a file in the \"images\" directory of the Matplotlib data.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_wx.py",
    "ast_data": "FunctionDef name:_load_bitmap arg:filename arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "is_lowp_fp_source_no_promote",
    "source_code": "def is_lowp_fp_source_no_promote(node: torch.fx.Node, dt: torch.dtype):\n    return is_lowp_fp_source(node, dt) and all((is_lowp_fp_sink(user, dt) for user in node.users))",
    "docstring": "Check if the node is a lowp fp sources which are all directly fed to ops that accepts lowp fp input thus no need to promote to float",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp.py",
    "ast_data": "FunctionDef name:is_lowp_fp_source_no_promote arg:node arg:dt arguments arg arg Return return:yes BoolOp Call Call Call"
  },
  {
    "library": "kornia",
    "name": "ssim3d_loss",
    "source_code": "def ssim3d_loss(img1: Tensor, img2: Tensor, window_size: int, max_val: float=1.0, eps: float=1e-12, reduction: str='mean', padding: str='same') -> Tensor:\n    ssim_map: Tensor = metrics.ssim3d(img1, img2, window_size, max_val, eps, padding)\n    loss = 1.0 - ssim_map\n    if reduction == 'mean':\n        loss = loss.mean()\n    elif reduction == 'sum':\n        loss = loss.sum()\n    elif reduction == 'none':\n        pass\n    else:\n        raise NotImplementedError('Invalid reduction option.')\n    return loss",
    "docstring": "Compute a loss based on the SSIM measurement. The loss, or the Structural dissimilarity (DSSIM) is described as: .. math:: \\text{loss}(x, y) = \\frac{1 - \\text{SSIM}(x, y)}{2} See :meth: for details about SSIM. Args: img1: the first input image with shape :math:. img2: the second input image with shape :math:. window_size: the size of the gaussian kernel to smooth the images. max_val: the dynamic range of the images. eps: Small value for numerically stability when dividing. reduction : Specifies the reduction to apply to the output: ``. Whether to only use the \"valid\" convolution area to compute SSIM to match the MATLAB implementation of original SSIM paper. Returns: The loss based on the ssim index. Examples: >>> input1 = torch.rand(1, 4, 5, 5, 5) >>> input2 = torch.rand(1, 4, 5, 5, 5) >>> loss = ssim3d_loss(input1, input2, 5)",
    "type": "function",
    "file_path": "kornia\\kornia\\losses\\ssim3d.py",
    "ast_data": "FunctionDef name:ssim3d_loss arg:img1 arg:img2 arg:window_size arg:max_val arg:eps arg:reduction arg:padding arguments arg arg arg arg arg arg arg Call Assign If Compare Assign Call If Compare Assign Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_is_valid_shortcut",
    "source_code": "def _is_valid_shortcut(self, key):\n    return 'cmd+' not in key and (not key.startswith('MouseButton.'))",
    "docstring": "Check for a valid shortcut to be displayed. - GTK will never send 'cmd+' (see ). - The shortcut window only shows keyboard shortcuts, not mouse buttons.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_gtk3.py",
    "ast_data": "FunctionDef name:_is_valid_shortcut arg:self arg:key arguments arg arg Return return:yes BoolOp Compare Call"
  },
  {
    "library": "pytorch",
    "name": "_observer_forward_pre_hook",
    "source_code": "def _observer_forward_pre_hook(self, input):\n    return self.activation_post_process(input[0])",
    "docstring": "Forward pre hook that calls observer on the output",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantize.py",
    "ast_data": "FunctionDef name:_observer_forward_pre_hook arg:self arg:input arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_slot_names",
    "source_code": "@abc.abstractmethod\ndef _slot_names(self) -> List[Text]:\n    raise NotImplementedError",
    "docstring": "Returns the name of all the slot variables. This does not include the 'parameters' variable and these names must match the names of the slots variables as used in the corresponding ops.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2_utils.py",
    "ast_data": "FunctionDef name:_slot_names arg:self arguments arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "_bucketized_column",
    "source_code": "def _bucketized_column(source_column, boundaries):\n    if not isinstance(source_column, _NumericColumn):\n        raise ValueError('source_column must be a column generated with numeric_column(). Given: {}'.format(source_column))\n    if len(source_column.shape) > 1:\n        raise ValueError('source_column must be one-dimensional column. Given: {}'.format(source_column))\n    if not boundaries or not (isinstance(boundaries, list) or isinstance(boundaries, tuple)):\n        raise ValueError('boundaries must be a sorted list.')\n    for i in range(len(boundaries) - 1):\n        if boundaries[i] >= boundaries[i + 1]:\n            raise ValueError('boundaries must be a sorted list.')\n    return _BucketizedColumn(source_column, tuple(boundaries))",
    "docstring": "Represents discretized dense input. Buckets include the left boundary, and exclude the right boundary. Namely, generates buckets , , , and . For example, if the inputs are then the output will be Example: A can also be crossed with another categorical column using : Args: source_column: A one-dimensional dense column which is generated with . boundaries: A sorted list or tuple of floats specifying the boundaries. Returns: A . Raises: ValueError: If is not a numeric column, or if it is not one-dimensional. ValueError: If is not a sorted list or tuple.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py",
    "ast_data": "FunctionDef name:_bucketized_column arg:source_column arg:boundaries arguments arg arg If Call Raise Call Call If Compare Call Raise Call Call If BoolOp BoolOp Call Call Raise Call For Call Call If Compare Raise Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_log_epoch_metrics",
    "source_code": "def _log_epoch_metrics(self, epoch, logs):\n    if not logs:\n        return\n    train_logs = {k: v for k, v in logs.items() if not k.startswith('val_')}\n    val_logs = {k: v for k, v in logs.items() if k.startswith('val_')}\n    train_logs = self._collect_learning_rate(train_logs)\n    if self.write_steps_per_second:\n        train_logs['steps_per_second'] = self._compute_steps_per_second()\n    with summary_ops_v2.record_if(True):\n        if train_logs:\n            with self._train_writer.as_default():\n                for name, value in train_logs.items():\n                    summary_ops_v2.scalar('epoch_' + name, value, step=epoch)\n        if val_logs:\n            with self._val_writer.as_default():\n                for name, value in val_logs.items():\n                    name = name[4:]\n                    summary_ops_v2.scalar('epoch_' + name, value, step=epoch)",
    "docstring": "Writes epoch metrics out as scalar summaries. Args: epoch: Int. The global step to use for TensorBoard. logs: Dict. Keys are scalar summary names, values are scalars.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:_log_epoch_metrics arg:self arg:epoch arg:logs arguments arg arg arg If Return return:no Assign Call Call Assign Call Call Assign Call If Assign Call With Call If With Call For Call Call If With Call For Call Assign Call"
  },
  {
    "library": "scipy",
    "name": "step",
    "source_code": "def step(self, x0=None, t=None, n=None):\n    return dstep(self, x0=x0, t=t, n=n)",
    "docstring": "Return the step response of the discrete-time system. See for details.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:step arg:self arg:x0 arg:t arg:n arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "get_hanning_kernel2d",
    "source_code": "def get_hanning_kernel2d(kernel_size: tuple[int, int] | int, device: Optional[Device]=None, dtype: Optional[Dtype]=None) -> Tensor:\n    kernel_size = _unpack_2d_ks(kernel_size)\n    _check_kernel_size(kernel_size, 2, allow_even=True)\n    ky = get_hanning_kernel1d(kernel_size[0], device, dtype)[None].T\n    kx = get_hanning_kernel1d(kernel_size[1], device, dtype)[None]\n    kernel2d = ky @ kx\n    return kernel2d",
    "docstring": "Return 2d Hanning kernel, used in signal processing and KCF tracker. Args: kernel_size: The size of the kernel for the filter. It should be positive. device: tensor device desired to create the kernel dtype: tensor dtype desired to create the kernel Returns: 2D tensor with Hanning filter coefficients. Shape: math: .. math:: w(n) = 0.5 - 0.5cos\\\\left(\\\\frac{2\\\\pi{n}}{M-1}\\\\right)",
    "type": "function",
    "file_path": "kornia\\kornia\\filters\\kernels.py",
    "ast_data": "FunctionDef name:get_hanning_kernel2d arg:kernel_size arg:device arg:dtype arguments arg arg arg Assign Call Call Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "clear_stop",
    "source_code": "def clear_stop(self):\n    with self._lock:\n        self._joined = False\n        self._exc_info_to_raise = None\n        if self._stop_event.is_set():\n            self._stop_event.clear()",
    "docstring": "Clears the stop flag. After this is called, calls to will return .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\coordinator.py",
    "ast_data": "FunctionDef name:clear_stop arg:self arguments arg With Assign Assign If Call Call"
  },
  {
    "library": "matplotlib",
    "name": "twiny",
    "source_code": "def twiny(self, axes_class=None, **kwargs):\n    if axes_class:\n        kwargs['axes_class'] = axes_class\n    ax2 = self._make_twin_axes(sharey=self, **kwargs)\n    ax2.xaxis.tick_top()\n    ax2.xaxis.set_label_position('top')\n    ax2.set_autoscaley_on(self.get_autoscaley_on())\n    self.xaxis.tick_bottom()\n    ax2.yaxis.set_visible(False)\n    ax2.patch.set_visible(False)\n    ax2.yaxis.units = self.yaxis.units\n    return ax2",
    "docstring": "Create a twin Axes sharing the yaxis. Create a new Axes with an invisible y-axis and an independent x-axis positioned opposite to the original one (i.e. at top). The y-axis autoscale setting will be inherited from the original Axes. To ensure that the tick marks of both x-axes align, see . Parameters ---------- axes_class : subclass type of , optional The subclass that is instantiated. This parameter is incompatible with *projection* and *polar*. See :ref: for examples. By default, is used. .. versionadded:: 3.11 kwargs : dict The keyword arguments passed to or . .. versionadded:: 3.11 Returns ------- Axes The newly created Axes instance Notes ----- For those who are 'picking' artists while using twiny, pick events are only called for the artists in the top-most Axes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:twiny arg:self arg:axes_class arguments arg arg arg If Assign Assign Call Call Call Call Call Call Call Call Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "reweight_covariance",
    "source_code": "def reweight_covariance(self, data):\n    n_samples, n_features = data.shape\n    mask = self.dist_ < chi2(n_features).isf(0.025)\n    if self.assume_centered:\n        location_reweighted = np.zeros(n_features)\n    else:\n        location_reweighted = data[mask].mean(0)\n    covariance_reweighted = self._nonrobust_covariance(data[mask], assume_centered=self.assume_centered)\n    support_reweighted = np.zeros(n_samples, dtype=bool)\n    support_reweighted[mask] = True\n    self._set_covariance(covariance_reweighted)\n    self.location_ = location_reweighted\n    self.support_ = support_reweighted\n    X_centered = data - self.location_\n    self.dist_ = np.sum(np.dot(X_centered, self.get_precision()) * X_centered, 1)\n    return (location_reweighted, covariance_reweighted, support_reweighted)",
    "docstring": "Re-weight raw Minimum Covariance Determinant estimates. Re-weight observations using Rousseeuw's method (equivalent to deleting outlying observations from the data set before computing location and covariance estimates) described in [RVDriessen]_. Parameters ---------- data : array-like of shape (n_samples, n_features) The data matrix, with p features and n samples. The data set must be the one which was used to compute the raw estimates. Returns ------- location_reweighted : ndarray of shape (n_features,) Re-weighted robust location estimate. covariance_reweighted : ndarray of shape (n_features, n_features) Re-weighted robust covariance estimate. support_reweighted : ndarray of shape (n_samples,), dtype=bool A mask of the observations that have been used to compute the re-weighted robust location and covariance estimates. References ---------- .. [RVDriessen] A Fast Algorithm for the Minimum Covariance Determinant Estimator, 1999, American Statistical Association and the American Society for Quality, TECHNOMETRICS",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\covariance\\_robust_covariance.py",
    "ast_data": "FunctionDef name:reweight_covariance arg:self arg:data arguments arg arg Assign Assign Compare Call Call If Assign Call Assign Call Assign Call Assign Call Assign Call Assign Assign Assign Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "ConvertNumpyArrayToLiteral",
    "source_code": "def ConvertNumpyArrayToLiteral(value):\n    if isinstance(value, tuple):\n        literal = xla_data_pb2.LiteralProto()\n        literal.shape.CopyFrom(xla_shape.CreateShapeFromNumpy(value).message)\n        for component in value:\n            component_literal = literal.tuple_literals.add()\n            component_literal.CopyFrom(ConvertNumpyArrayToLiteral(component))\n        return literal\n    else:\n        return _ConvertNumpyArrayToLiteral(value)",
    "docstring": "Converts a Numpy array or a nested tuple thereof to an XLA literal.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\xla\\python_api\\xla_literal.py",
    "ast_data": "FunctionDef name:ConvertNumpyArrayToLiteral arg:value arguments arg If Call Assign Call Call Call For Assign Call Call Call Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "transpose_inference_rule",
    "source_code": "@register_inference_rule('transpose')\ndef transpose_inference_rule(n: Node, symbols, constraints, counter):\n    assert isinstance(n.args[0], Node)\n    assert isinstance(n.args[1], int)\n    assert isinstance(n.args[2], int)\n    output, counter = gen_tvar(counter)\n    symbols[n] = output\n    from_arg = symbols[n.args[0]]\n    assert isinstance(from_arg, TVar)\n    is_dyn = Conj([BinConstraintT(from_arg, Dyn, op_eq), BinConstraintT(output, Dyn, op_eq)])\n    c3 = Disj([Transpose(i + 1, from_arg, n.args[1], n.args[2], output) for i in range(MAX_TENSOR_RANK)])\n    return ([Disj([is_dyn, c3])], counter)",
    "docstring": "Can be considered as a sequence of two index selects, so we generate constraints accordingly",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_generator.py",
    "ast_data": "FunctionDef name:transpose_inference_rule arg:n arg:symbols arg:constraints arg:counter arguments arg arg arg arg Call Call Call Assign Call Assign Assign Call Assign Call Call Call Assign Call Call Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "CrossLegTable",
    "source_code": "class CrossLegTable(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n        self.global_optimum = [[0.0, 0.0]]\n        self.fglob = -1.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        u = 100 - sqrt(x[0] ** 2 + x[1] ** 2) / pi\n        v = sin(x[0]) * sin(x[1])\n        return -(abs(v * exp(abs(u))) + 1) ** (-0.1)",
    "docstring": "Cross-Leg-Table objective function. This class defines the Cross-Leg-Table [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{CrossLegTable}}(x) = - \\frac{1}{\\left(\\left|{e^{\\left|{100 - \\frac{\\sqrt{x_{1}^{2} + x_{2}^{2}}}{\\pi}}\\right|} \\sin\\left(x_{1}\\right) \\sin\\left(x_{2}\\right)}\\right| + 1\\right)^{0.1}} with :math: for :math:. *Global optimum*: :math:. The global minimum is found on the planes :math: and :math: ..[1] Mishra, S. Global Optimization by Differential Evolution and Particle Swarm Methods: Evaluation on Some Benchmark Functions Munich University, 2006",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_C.py",
    "ast_data": "ClassDef name:CrossLegTable FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Assign Call Call Return return:yes Call Call Call"
  },
  {
    "library": "kornia",
    "name": "HorizontalFlip",
    "source_code": "class HorizontalFlip(OperationBase):\n\n    def __init__(self, initial_probability: float=0.5, temperature: float=0.1) -> None:\n        super().__init__(K.RandomHorizontalFlip(same_on_batch=False, p=initial_probability), initial_magnitude=None, temperature=temperature, symmetric_megnitude=False)",
    "docstring": "Apply horizontal flip operation. Args: initial_probability: the initial probability. If None, the augmentation will be randomly applied according to he augmentation sampling range. temperature: temperature for RelaxedBernoulli distribution used during training.",
    "type": "class",
    "file_path": "kornia\\kornia\\augmentation\\auto\\operations\\ops.py",
    "ast_data": "ClassDef name:HorizontalFlip FunctionDef name:__init__ arg:self arg:initial_probability arg:temperature arguments arg arg arg Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_stack_multi_column_index",
    "source_code": "def _stack_multi_column_index(columns: MultiIndex) -> MultiIndex | Index:\n    if len(columns.levels) <= 2:\n        return columns.levels[0]._rename(name=columns.names[0])\n    levs = ([lev[c] if c >= 0 else None for c in codes] for lev, codes in zip(columns.levels[:-1], columns.codes[:-1]))\n    tuples = zip(*levs)\n    unique_tuples = (key for key, _ in itertools.groupby(tuples))\n    new_levs = zip(*unique_tuples)\n    return MultiIndex.from_arrays([Index(new_lev, dtype=lev.dtype) if None not in new_lev else new_lev for new_lev, lev in zip(new_levs, columns.levels)], names=columns.names[:-1])",
    "docstring": "Creates a MultiIndex from the first N-1 levels of this MultiIndex.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\reshape\\reshape.py",
    "ast_data": "FunctionDef name:_stack_multi_column_index arg:columns arguments arg If Compare Call Return return:yes Call Assign Compare Call Assign Call Assign Call Assign Call Return return:yes Call Compare Call Call"
  },
  {
    "library": "seaborn",
    "name": "_get_scale_transforms",
    "source_code": "def _get_scale_transforms(self, axis):\n    if self.ax is None:\n        axis_list = [getattr(ax, f'{axis}axis') for ax in self.facets.axes.flat]\n        scales = {axis.get_scale() for axis in axis_list}\n        if len(scales) > 1:\n            err = 'Cannot determine transform with mixed scales on faceted axes.'\n            raise RuntimeError(err)\n        transform_obj = axis_list[0].get_transform()\n    else:\n        transform_obj = getattr(self.ax, f'{axis}axis').get_transform()\n    return (transform_obj.transform, transform_obj.inverted().transform)",
    "docstring": "Return a function implementing the scale transform (or its inverse).",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_base.py",
    "ast_data": "FunctionDef name:_get_scale_transforms arg:self arg:axis arguments arg arg If Compare Assign Call Assign Call If Compare Call Assign Raise Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "matches_any",
    "source_code": "def matches_any(patterns: List[Pattern[str]], line: str) -> bool:\n    stripped_line = line.strip()\n    for pattern in patterns:\n        if pattern.match(stripped_line):\n            return True\n    return False",
    "docstring": "Checks if the line matches any of the given patterns. Args: patterns: A list of compiled regular expression patterns. line: The line to check for matches. Returns: True if the line matches any of the patterns, False otherwise.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\third_party\\py\\rules_pywrap\\def_file_filter_tool.py",
    "ast_data": "FunctionDef name:matches_any arg:patterns arg:line arguments arg arg Assign Call For If Call Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "get_level_tags",
    "source_code": "def get_level_tags():\n    return {**constants.DEFAULT_TAGS, **getattr(settings, 'MESSAGE_TAGS', {})}",
    "docstring": "Return the message level tags.",
    "type": "function",
    "file_path": "django\\django\\contrib\\messages\\utils.py",
    "ast_data": "FunctionDef name:get_level_tags arguments Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "combine_paths",
    "source_code": "def combine_paths(*args, **kws):\n    r = []\n    for a in args:\n        if not a:\n            continue\n        if is_string(a):\n            a = [a]\n        r.append(a)\n    args = r\n    if not args:\n        return []\n    if len(args) == 1:\n        result = reduce(lambda a, b: a + b, map(glob, args[0]), [])\n    elif len(args) == 2:\n        result = []\n        for a0 in args[0]:\n            for a1 in args[1]:\n                result.extend(glob(os.path.join(a0, a1)))\n    else:\n        result = combine_paths(*combine_paths(args[0], args[1]) + args[2:])\n    log.debug('(paths: %s)', ','.join(result))\n    return result",
    "docstring": "Return a list of existing paths composed by all combinations of items from arguments.",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\system_info.py",
    "ast_data": "FunctionDef name:combine_paths arguments arg arg Assign For If If Call Assign Call Assign If Return return:no If Compare Call Assign Call arguments arg arg Call If Compare Call Assign For For Call Call Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "AdamOptimizer",
    "source_code": "class AdamOptimizer(BaseOptimizer):\n\n    def __init__(self, params, learning_rate_init=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08):\n        super().__init__(learning_rate_init)\n        self.beta_1 = beta_1\n        self.beta_2 = beta_2\n        self.epsilon = epsilon\n        self.t = 0\n        self.ms = [np.zeros_like(param) for param in params]\n        self.vs = [np.zeros_like(param) for param in params]\n\n    def _get_updates(self, grads):\n        self.t += 1\n        self.ms = [self.beta_1 * m + (1 - self.beta_1) * grad for m, grad in zip(self.ms, grads)]\n        self.vs = [self.beta_2 * v + (1 - self.beta_2) * grad ** 2 for v, grad in zip(self.vs, grads)]\n        self.learning_rate = self.learning_rate_init * np.sqrt(1 - self.beta_2 ** self.t) / (1 - self.beta_1 ** self.t)\n        updates = [-self.learning_rate * m / (np.sqrt(v) + self.epsilon) for m, v in zip(self.ms, self.vs)]\n        return updates",
    "docstring": "Stochastic gradient descent optimizer with Adam Note: All default values are from the original Adam paper Parameters ---------- params : list, length = len(coefs_) + len(intercepts_) The concatenated list containing coefs_ and intercepts_ in MLP model. Used for initializing velocities and updating params learning_rate_init : float, default=0.001 The initial learning rate used. It controls the step-size in updating the weights beta_1 : float, default=0.9 Exponential decay rate for estimates of first moment vector, should be in [0, 1) beta_2 : float, default=0.999 Exponential decay rate for estimates of second moment vector, should be in [0, 1) epsilon : float, default=1e-8 Value for numerical stability Attributes ---------- learning_rate : float The current learning rate t : int Timestep ms : list, length = len(params) First moment vectors vs : list, length = len(params) Second moment vectors References ---------- :arxiv:`Kingma, Diederik, and Jimmy Ba (2014) \"Adam: A method for stochastic optimization.\"",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\neural_network\\_stochastic_optimizers.py",
    "ast_data": "ClassDef name:AdamOptimizer FunctionDef name:__init__ arg:self arg:params arg:learning_rate_init arg:beta_1 arg:beta_2 arg:epsilon arguments arg arg arg arg arg arg Call Call Assign Assign Assign Assign Assign Call Assign Call FunctionDef name:_get_updates arg:self arg:grads arguments arg arg Assign Call Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_transpose_batch_time",
    "source_code": "def _transpose_batch_time(x):\n    x_static_shape = x.get_shape()\n    if x_static_shape.rank is not None and x_static_shape.rank < 2:\n        return x\n    x_rank = array_ops.rank(x)\n    x_t = array_ops.transpose(x, array_ops.concat(([1, 0], math_ops.range(2, x_rank)), axis=0))\n    x_t.set_shape(tensor_shape.TensorShape([x_static_shape.dims[1].value, x_static_shape.dims[0].value]).concatenate(x_static_shape[2:]))\n    return x_t",
    "docstring": "Transposes the batch and time dimensions of a Tensor. If the input tensor has rank < 2 it returns the original tensor. Retains as much of the static shape information as possible. Args: x: A Tensor. Returns: x transposed along the first two dimensions.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\rnn.py",
    "ast_data": "FunctionDef name:_transpose_batch_time arg:x arguments arg Assign Call If BoolOp Compare Compare Return return:yes Assign Call Assign Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_block",
    "source_code": "def _block(arrays, max_depth, result_ndim, depth=0):\n    if depth < max_depth:\n        arrs = [_block(arr, max_depth, result_ndim, depth + 1) for arr in arrays]\n        return _concatenate(arrs, axis=-(max_depth - depth))\n    else:\n        return _atleast_nd(arrays, result_ndim)",
    "docstring": "Internal implementation of block based on repeated concatenation. is the argument passed to block. is the depth of nested lists within and is the greatest of the dimensions of the arrays in and the depth of the lists in (see block docstring for details).",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\shape_base.py",
    "ast_data": "FunctionDef name:_block arg:arrays arg:max_depth arg:result_ndim arg:depth arguments arg arg arg arg If Compare Assign Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "def forward(self, img: Tensor, mask: Optional[Tensor]=None) -> Tuple[Tensor, Tensor]:\n    responses, lafs = self.detect(img, self.num_features, mask)\n    lafs = self.aff(lafs, img)\n    lafs = self.ori(lafs, img)\n    return (lafs, responses)",
    "docstring": "Three stage local feature detection. First the location and scale of interest points are determined by detect function. Then affine shape and orientation. Args: img: image to extract features with shape [BxCxHxW] mask: a mask with weights where to apply the response function. The shape must be the same as the input image. Returns: lafs: shape [BxNx2x3]. Detected local affine frames. responses: shape [BxNx1]. Response function values for corresponding lafs",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\scale_space_detector.py",
    "ast_data": "FunctionDef name:forward arg:self arg:img arg:mask arguments arg arg arg Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "total_error_count",
    "source_code": "def total_error_count(self):\n    return len(self.non_form_errors()) + sum((len(form_errors) for form_errors in self.errors))",
    "docstring": "Return the number of errors across all forms in the formset.",
    "type": "method",
    "file_path": "django\\django\\forms\\formsets.py",
    "ast_data": "FunctionDef name:total_error_count arg:self arguments arg Return return:yes Call Call Call Call"
  },
  {
    "library": "django",
    "name": "get_fieldsets",
    "source_code": "def get_fieldsets(self, request, obj=None):\n    if self.fieldsets:\n        return self.fieldsets\n    return [(None, {'fields': self.get_fields(request, obj)})]",
    "docstring": "Hook for specifying fieldsets.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:get_fieldsets arg:self arg:request arg:obj arguments arg arg arg If Return return:yes Return return:yes Call"
  },
  {
    "library": "django",
    "name": "start_serialization",
    "source_code": "def start_serialization(self):\n    raise NotImplementedError('subclasses of Serializer must provide a start_serialization() method')",
    "docstring": "Called when serializing of the queryset starts.",
    "type": "method",
    "file_path": "django\\django\\core\\serializers\\base.py",
    "ast_data": "FunctionDef name:start_serialization arg:self arguments arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "QuantizationDebugOptions",
    "source_code": "@tf_export.tf_export('lite.experimental.QuantizationDebugOptions')\nclass QuantizationDebugOptions:\n\n    def __init__(self, layer_debug_metrics: Optional[Mapping[str, Callable[[np.ndarray], float]]]=None, model_debug_metrics: Optional[Mapping[str, Callable[[Sequence[np.ndarray], Sequence[np.ndarray]], float]]]=None, layer_direct_compare_metrics: Optional[Mapping[str, Callable[[Sequence[np.ndarray], Sequence[np.ndarray], float, int], float]]]=None, denylisted_ops: Optional[List[str]]=None, denylisted_nodes: Optional[List[str]]=None, fully_quantize: bool=False) -> None:\n        self.layer_debug_metrics = layer_debug_metrics\n        self.model_debug_metrics = model_debug_metrics\n        self.layer_direct_compare_metrics = layer_direct_compare_metrics\n        keys = []\n        for metrics in [layer_debug_metrics, model_debug_metrics, layer_direct_compare_metrics]:\n            if metrics is not None:\n                keys.extend(metrics.keys())\n        if len(keys) != len(set(keys)):\n            raise ValueError('Provided metrics have duplicate keys.')\n        self.denylisted_ops = denylisted_ops\n        self.denylisted_nodes = denylisted_nodes\n        self.fully_quantize = fully_quantize",
    "docstring": "Debug options to set up a given QuantizationDebugger.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\lite\\tools\\optimize\\debugging\\python\\debugger.py",
    "ast_data": "ClassDef name:QuantizationDebugOptions FunctionDef name:__init__ arg:self arg:layer_debug_metrics arg:model_debug_metrics arg:layer_direct_compare_metrics arg:denylisted_ops arg:denylisted_nodes arg:fully_quantize arguments arg arg arg arg arg arg arg Assign Assign Assign Assign For If Compare Call Call If Compare Call Call Call Raise Call Assign Assign Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_index_matrix",
    "source_code": "def _index_matrix(layout: layout_lib.Layout, elem_spec: tensor_spec.TensorSpec) -> tensor.Tensor:\n    matrix = []\n    for dim in layout.mesh.dim_names:\n        row = [0]\n        for layout_idx, spec in enumerate(layout.sharding_specs[1:]):\n            if spec == layout_lib.UNSHARDED or spec != dim:\n                row.append(0)\n            else:\n                row.append(elem_spec.shape[layout_idx] // layout.mesh.dim_size(dim))\n        matrix.append(row)\n    return constant_op.constant(matrix, dtype=dtypes.int32)",
    "docstring": "Computes a utility matrix to derive device-based slice offsets. This function builds a matrix of shape for each dataset element. This matrix can be used to slice the DTensor components returned by the iterator according to the local device that component is to be placed on. This can be done by multiplying the device offsets of shape with this index matrix to get a shape tensor containing the slice offsets. Note: the index on the batch dim is always 0 since sharding on the batch dimension is handled by either tf.data.Dataset's shard transformation (in the single-client case) or tf.data service's distribute function (in the multi-client case). If there is no sharding on the batch dimension (or any other dimension), the slice index remains 0. Args: layout: the layout of the dataset element. elem_spec: the spec of the dataset element. Returns: The index matrix as a tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\input_util.py",
    "ast_data": "FunctionDef name:_index_matrix arg:layout arg:elem_spec arguments arg arg Assign For Assign For Call If BoolOp Compare Compare Call Call Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_strip_once",
    "source_code": "def _strip_once(value):\n    s = MLStripper()\n    s.feed(value)\n    s.close()\n    return s.get_data()",
    "docstring": "Internal tag stripping utility used by strip_tags.",
    "type": "function",
    "file_path": "django\\django\\utils\\html.py",
    "ast_data": "FunctionDef name:_strip_once arg:value arguments arg Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "ready_size",
    "source_code": "def ready_size(self, name=None):\n    if name is None:\n        name = '%s_BarrierReadySize' % self._name\n    return gen_data_flow_ops.barrier_ready_size(self._barrier_ref, name=name)",
    "docstring": "Compute the number of complete elements in the given barrier. Args: name: A name for the operation (optional). Returns: A single-element tensor containing the number of complete elements in the given barrier.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:ready_size arg:self arg:name arguments arg arg If Compare Assign Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_constructor",
    "source_code": "@property\ndef _constructor(self) -> type[Self]:\n    return type(self)",
    "docstring": "Class constructor (for this class it's just ).",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\base.py",
    "ast_data": "FunctionDef name:_constructor arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "crop_by_transform_mat",
    "source_code": "def crop_by_transform_mat(input_tensor: Tensor, transform: Tensor, out_size: Tuple[int, int], mode: str='bilinear', padding_mode: str='zeros', align_corners: bool=True) -> Tensor:\n    dst_trans_src = as_tensor(transform.expand(input_tensor.shape[0], -1, -1), device=input_tensor.device, dtype=input_tensor.dtype)\n    patches: Tensor = warp_affine(input_tensor, dst_trans_src[:, :2, :], out_size, mode=mode, padding_mode=padding_mode, align_corners=align_corners)\n    return patches",
    "docstring": "Perform crop transform on 2D images (4D tensor) given a perspective transformation matrix. Args: input_tensor: the 2D image tensor with shape (B, C, H, W). transform: a perspective transformation matrix with shape (B, 3, 3). out_size: size of the output image (height, width). mode: interpolation mode to calculate output values ``. align_corners: mode for grid_generation. Returns: the output tensor with patches.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\transform\\crop2d.py",
    "ast_data": "FunctionDef name:crop_by_transform_mat arg:input_tensor arg:transform arg:out_size arg:mode arg:padding_mode arg:align_corners arguments arg arg arg arg arg arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_discovered_machar",
    "source_code": "def _discovered_machar(ftype):\n    params = _MACHAR_PARAMS[ftype]\n    return MachAr(lambda v: array([v], ftype), lambda v: _fr0(v.astype(params['itype']))[0], lambda v: array(_fr0(v)[0], ftype), lambda v: params['fmt'] % array(_fr0(v)[0], ftype), params['title'])",
    "docstring": "Create MachAr instance with found information on float types TODO: MachAr should be retired completely ideally. We currently only ever use it system with broken longdouble (valgrind, WSL).",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\getlimits.py",
    "ast_data": "FunctionDef name:_discovered_machar arg:ftype arguments arg Assign Return return:yes Call arguments arg Call arguments arg Call Call arguments arg Call Call arguments arg Call Call"
  },
  {
    "library": "scipy",
    "name": "_woodbury_algorithm",
    "source_code": "def _woodbury_algorithm(A, ur, ll, b, k):\n    k_mod = k - k % 2\n    bs = int((k - 1) / 2) + (k + 1) % 2\n    n = A.shape[1] + 1\n    U = np.zeros((n - 1, k_mod))\n    VT = np.zeros((k_mod, n - 1))\n    U[:bs, :bs] = ur\n    VT[np.arange(bs), np.arange(bs) - bs] = 1\n    U[-bs:, -bs:] = ll\n    VT[np.arange(bs) - bs, np.arange(bs)] = 1\n    Z = solve_banded((bs, bs), A, U)\n    H = solve(np.identity(k_mod) + VT @ Z, np.identity(k_mod))\n    y = solve_banded((bs, bs), A, b)\n    c = y - Z @ (H @ (VT @ y))\n    return c",
    "docstring": "Solve a cyclic banded linear system with upper right and lower blocks of size ``, otherwise corner block elements will intersect with diagonals. Examples -------- Consider the case of n = 8, k = 5 (size of blocks - 2 x 2). The matrix of a system: U: V: x x x * * a b a b 0 0 0 0 1 0 x x x x * * c 0 c 0 0 0 0 0 1 x x x x x * * 0 0 0 0 0 0 0 0 * x x x x x * 0 0 0 0 0 0 0 0 * * x x x x x 0 0 0 0 0 0 0 0 d * * x x x x 0 0 d 0 1 0 0 0 e f * * x x x 0 0 e f 0 1 0 0 References ---------- .. [1] William H. Press, Saul A. Teukolsky, William T. Vetterling and Brian P. Flannery, Numerical Recipes, 2007, Section 2.7.3",
    "type": "function",
    "file_path": "scipy\\scipy\\interpolate\\_bsplines.py",
    "ast_data": "FunctionDef name:_woodbury_algorithm arg:A arg:ur arg:ll arg:b arg:k arguments arg arg arg arg arg Assign Assign Call Assign Assign Call Assign Call Assign Assign Call Call Assign Assign Call Call Assign Call Assign Call Call Call Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_flatten_zero_dim_tensor_optim_state",
    "source_code": "def _flatten_zero_dim_tensor_optim_state(state_name: str, zero_dim_tensors: list[torch.Tensor], unflat_param_names: list[str]) -> torch.Tensor:\n    non_none_tensors = [t for t in zero_dim_tensors if t is not None]\n    values_set = {t.item() if t is not None else None for t in zero_dim_tensors}\n    dtypes = {t.dtype if t is not None else None for t in zero_dim_tensors}\n    if len(non_none_tensors) != len(zero_dim_tensors) or len(values_set) != 1 or len(dtypes) != 1:\n        raise ValueError(f'All unflattened parameters comprising a single flat parameter must have scalar state with the same value and dtype but got values {values_set} and dtypes {dtypes} for state {state_name} and unflattened parameter names {unflat_param_names}')\n    value = next(iter(values_set))\n    dtype = next(iter(dtypes))\n    return torch.tensor(value, dtype=dtype, device=torch.device('cpu'))",
    "docstring": "Flattens the zero-dimension tensor optimizer state given by the values `list`.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_optim_utils.py",
    "ast_data": "FunctionDef name:_flatten_zero_dim_tensor_optim_state arg:state_name arg:zero_dim_tensors arg:unflat_param_names arguments arg arg arg Assign Compare Assign Compare Call Assign Compare If BoolOp Compare Call Call Compare Call Compare Call Raise Call Assign Call Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "cherrypy",
    "name": "decode_TEXT_maybe",
    "source_code": "def decode_TEXT_maybe(value):\n    return decode_TEXT(value) if '=?' in value else value",
    "docstring": "Decode the text but only if '=?' appears in it.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\httputil.py",
    "ast_data": "FunctionDef name:decode_TEXT_maybe arg:value arguments arg Return return:yes Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "_num_buckets",
    "source_code": "@property\ndef _num_buckets(self):\n    return self.hash_bucket_size",
    "docstring": "Returns number of buckets in this sparse feature.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py",
    "ast_data": "FunctionDef name:_num_buckets arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "def forward(self, input_tensor: Tensor) -> Tensor:\n    return apply_colormap(input_tensor, self.colormap)",
    "docstring": "Apply the colormap to the input tensor. Args: input_tensor: The input tensor representing the grayscale image. .. note:: The input tensor must be integer values in the range of [0-255] or float values in the range of [0-1]. Returns: The output tensor representing the image with the applied colormap.",
    "type": "method",
    "file_path": "kornia\\kornia\\color\\colormap.py",
    "ast_data": "FunctionDef name:forward arg:self arg:input_tensor arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_observer_forward_hook",
    "source_code": "def _observer_forward_hook(self, input, output):\n    return self.activation_post_process(output)",
    "docstring": "Forward hook that calls observer on the output",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantize.py",
    "ast_data": "FunctionDef name:_observer_forward_hook arg:self arg:input arg:output arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_argspec_for_partial",
    "source_code": "def _get_argspec_for_partial(obj):\n    n_prune_args = len(obj.args)\n    partial_keywords = obj.keywords or {}\n    args, varargs, keywords, defaults = getargspec(obj.func)\n    args = args[n_prune_args:]\n    no_default = object()\n    all_defaults = [no_default] * len(args)\n    if defaults:\n        all_defaults[-len(defaults):] = defaults\n    for kw, default in iter(partial_keywords.items()):\n        if kw in args:\n            idx = args.index(kw)\n            all_defaults[idx] = default\n        elif not keywords:\n            raise ValueError(f'{obj} does not have a **kwargs parameter, but contains an unknown partial keyword {kw}.')\n    first_default = next((idx for idx, x in enumerate(all_defaults) if x is not no_default), None)\n    if first_default is None:\n        return ArgSpec(args, varargs, keywords, None)\n    invalid_default_values = [args[i] for i, j in enumerate(all_defaults) if j is no_default and i > first_default]\n    if invalid_default_values:\n        raise ValueError(f'{obj} has some keyword-only arguments, which are not supported: {invalid_default_values}.')\n    return ArgSpec(args, varargs, keywords, tuple(all_defaults[first_default:]))",
    "docstring": "Implements for objects. Args: obj: The object Returns: An Raises: ValueError: When callable's signature can not be expressed with ArgSpec.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\tf_inspect.py",
    "ast_data": "FunctionDef name:_get_argspec_for_partial arg:obj arguments arg Assign Call Assign BoolOp Assign Call Assign Assign Call Assign Call If Assign Call For Call Call If Compare Assign Call Assign If Raise Call Assign Call Call Compare If Compare Return return:yes Call Assign Call BoolOp Compare Compare If Raise Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "fill_empty_rows",
    "source_code": "def fill_empty_rows(ragged_input, default_value, name=None):\n    with ops.name_scope(name, 'RaggedFillEmptyRows', [ragged_input]):\n        if not isinstance(ragged_input, ragged_tensor.RaggedTensor):\n            raise TypeError(f'ragged_input must be RaggedTensor,             got {type(ragged_input)}')\n        default_value = ops.convert_to_tensor(default_value, dtype=ragged_input.dtype)\n        output_value_rowids, output_values, empty_row_indicator, unused_reverse_index_map = gen_ragged_array_ops.ragged_fill_empty_rows(value_rowids=ragged_input.value_rowids(), values=ragged_input.values, nrows=ragged_input.nrows(), default_value=default_value)\n        return (ragged_tensor.RaggedTensor.from_value_rowids(values=output_values, value_rowids=output_value_rowids, validate=False), empty_row_indicator)",
    "docstring": "Fills empty rows in the input with rank 2 with a default value. This op adds entries with the specified for any row in the input that does not already have a value. The op also returns an indicator vector such that empty_row_indicator[i] = True iff row i was an empty row. Args: ragged_input: A with rank 2. default_value: The value to fill for empty rows, with the same type as name: A name prefix for the returned tensors (optional) Returns: ragged_ordered_output: A with all empty rows filled in with . empty_row_indicator: A bool vector indicating whether each input row was empty. Raises: TypeError: If is not a .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_array_ops.py",
    "ast_data": "FunctionDef name:fill_empty_rows arg:ragged_input arg:default_value arg:name arguments arg arg arg With Call If Call Raise Call Call Assign Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "get_numfig_title",
    "source_code": "def get_numfig_title(self, node: Node) -> str | None:\n    if self.is_enumerable_node(node):\n        elem = cast('Element', node)\n        _, title_getter = self.enumerable_nodes.get(elem.__class__, (None, None))\n        if title_getter:\n            return title_getter(elem)\n        else:\n            for subnode in elem:\n                if isinstance(subnode, nodes.caption | nodes.title):\n                    return clean_astext(subnode)\n    return None",
    "docstring": "Get the title of enumerable nodes to refer them using its title",
    "type": "method",
    "file_path": "sphinx\\sphinx\\domains\\std\\__init__.py",
    "ast_data": "FunctionDef name:get_numfig_title arg:self arg:node arguments arg arg If Call Assign Call Assign Call If Return return:yes Call For If Call Return return:yes Call Return return:no"
  },
  {
    "library": "matplotlib",
    "name": "get_font",
    "source_code": "def get_font(font_filepaths, hinting_factor=None):\n    if isinstance(font_filepaths, (str, Path, bytes)):\n        paths = (_cached_realpath(font_filepaths),)\n    else:\n        paths = tuple((_cached_realpath(fname) for fname in font_filepaths))\n    hinting_factor = mpl._val_or_rc(hinting_factor, 'text.hinting_factor')\n    return _get_font(paths, hinting_factor, _kerning_factor=mpl.rcParams['text.kerning_factor'], thread_id=threading.get_ident(), enable_last_resort=mpl.rcParams['font.enable_last_resort'])",
    "docstring": "Get an object given a list of file paths. Parameters ---------- font_filepaths : Iterable[str, Path, bytes], str, Path, bytes Relative or absolute paths to the font files to be used. If a single string, bytes, or , then it will be treated as a list with that entry only. If more than one filepath is passed, then the returned FT2Font object will fall back through the fonts, in the order given, to find a needed glyph. Returns -------",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\font_manager.py",
    "ast_data": "FunctionDef name:get_font arg:font_filepaths arg:hinting_factor arguments arg arg If Call Assign Call Assign Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__call__",
    "source_code": "def __call__(self, *args, **kwargs):\n\n    def replica_local_fn(*args, **kwargs):\n        if any((isinstance(arg, keras_tensor.KerasTensor) for arg in nest.flatten((args, kwargs)))):\n            update_op = None\n        else:\n            update_op = self.update_state(*args, **kwargs)\n        update_ops = []\n        if update_op is not None:\n            update_ops.append(update_op)\n        with ops.control_dependencies(update_ops):\n            result_t = self.result()\n            result_t._metric_obj = self\n            return result_t\n    from tensorflow.python.keras.distribute import distributed_training_utils\n    return distributed_training_utils.call_replica_local_fn(replica_local_fn, *args, **kwargs)",
    "docstring": "Accumulates statistics and then computes metric result value. Args: *args: **kwargs: A mini-batch of inputs to the Metric, passed on to . Returns: The metric value tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py",
    "ast_data": "FunctionDef name:__call__ arg:self arguments arg arg arg FunctionDef name:replica_local_fn arguments arg arg If Call Call Call Assign Assign Call Assign If Compare Call With Call Assign Call Assign Return return:yes Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_do_update",
    "source_code": "def _do_update(self, base_qs, using, pk_val, values, update_fields, forced_update):\n    filtered = base_qs.filter(pk=pk_val)\n    if not values:\n        return update_fields is not None or filtered.exists()\n    if self._meta.select_on_save and (not forced_update):\n        return filtered.exists() and (filtered._update(values) > 0 or filtered.exists())\n    return filtered._update(values) > 0",
    "docstring": "Try to update the model. Return True if the model was updated (if an update query was done and a matching row was found in the DB).",
    "type": "method",
    "file_path": "django\\django\\db\\models\\base.py",
    "ast_data": "FunctionDef name:_do_update arg:self arg:base_qs arg:using arg:pk_val arg:values arg:update_fields arg:forced_update arguments arg arg arg arg arg arg arg Assign Call If Return return:yes BoolOp Compare Call If BoolOp Return return:yes BoolOp Call BoolOp Compare Call Call Return return:yes Compare Call"
  },
  {
    "library": "cryptography",
    "name": "revocation_date",
    "source_code": "@property\n@abc.abstractmethod\ndef revocation_date(self) -> datetime.datetime:\n    pass",
    "docstring": "Returns the date of when this certificate was revoked.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\x509\\base.py",
    "ast_data": "FunctionDef name:revocation_date arg:self arguments arg"
  },
  {
    "library": "numpy",
    "name": "imag",
    "source_code": "@property\ndef imag(self):\n    result = self._data.imag.view(type(self))\n    result.__setmask__(self._mask)\n    return result",
    "docstring": "The imaginary part of the masked array. This property is a view on the imaginary part of this . See Also -------- real Examples -------- >>> import numpy as np >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False]) >>> x.imag masked_array(data=[1.0, --, 1.6], mask=[False, True, False], fill_value=1e+20)",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:imag arg:self arguments arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "zero_grad",
    "source_code": "@torch._disable_dynamo\ndef zero_grad(self, set_to_none: bool=True) -> None:\n    foreach = self.defaults.get('foreach', False) or self.defaults.get('fused', False)\n    if not hasattr(self, '_zero_grad_profile_name'):\n        self._patch_step_function()\n    per_device_and_dtype_grads: Optional[defaultdict[torch.device, defaultdict[torch.dtype, list[torch.Tensor]]]]\n    if foreach:\n        per_device_and_dtype_grads = defaultdict(lambda: defaultdict(list))\n    else:\n        per_device_and_dtype_grads = None\n    with torch.autograd.profiler.record_function(self._zero_grad_profile_name):\n        for group in self.param_groups:\n            for p in group['params']:\n                if p.grad is not None:\n                    if set_to_none:\n                        p.grad = None\n                    else:\n                        if p.grad.grad_fn is not None:\n                            p.grad.detach_()\n                        else:\n                            p.grad.requires_grad_(False)\n                        if not foreach or p.grad.is_sparse:\n                            p.grad.zero_()\n                        else:\n                            assert per_device_and_dtype_grads is not None\n                            per_device_and_dtype_grads[p.grad.device][p.grad.dtype].append(p.grad)\n        if foreach:\n            assert per_device_and_dtype_grads is not None\n            for per_dtype_grads in per_device_and_dtype_grads.values():\n                for grads in per_dtype_grads.values():\n                    torch._foreach_zero_(grads)",
    "docstring": "Reset the gradients of all optimized :class: s. Args: set_to_none (bool): instead of setting to zero, set the grads to None. This will in general have lower memory footprint, and can modestly improve performance. However, it changes certain behaviors. For example: 1. When the user tries to access a gradient and perform manual ops on it, a None attribute or a Tensor full of 0s will behave differently. 2. If the user requests `` optimizers have a different behavior if the gradient is 0 or None (in one case it does the step with a gradient of 0 and in the other it skips the step altogether).",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\optimizer.py",
    "ast_data": "FunctionDef name:zero_grad arg:self arg:set_to_none arguments arg arg Assign BoolOp Call Call If Call Call If Assign Call arguments Call Assign With Call For For If Compare If Assign If Compare Call Call If BoolOp Call Compare Call If Compare For Call For Call Call"
  },
  {
    "library": "django",
    "name": "to_python",
    "source_code": "def to_python(self, value):\n    if value in self.empty_values:\n        return None\n    if isinstance(value, datetime.datetime):\n        return value.date()\n    if isinstance(value, datetime.date):\n        return value\n    return super().to_python(value)",
    "docstring": "Validate that the input can be converted to a date. Return a Python datetime.date object.",
    "type": "method",
    "file_path": "django\\django\\forms\\fields.py",
    "ast_data": "FunctionDef name:to_python arg:self arg:value arguments arg arg If Compare Return return:no If Call Return return:yes Call If Call Return return:yes Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, sess, session_root, watch_fn=None, thread_name_filter=None, pass_through_operrors=None):\n    framework.NonInteractiveDebugWrapperSession.__init__(self, sess, watch_fn=watch_fn, thread_name_filter=thread_name_filter, pass_through_operrors=pass_through_operrors)\n    session_root = os.path.expanduser(session_root)\n    if gfile.Exists(session_root):\n        if not gfile.IsDirectory(session_root):\n            raise ValueError('session_root path points to a file: %s' % session_root)\n        elif gfile.ListDirectory(session_root):\n            raise ValueError('session_root path points to a non-empty directory: %s' % session_root)\n    else:\n        gfile.MakeDirs(session_root)\n    self._session_root = session_root\n    self._run_counter = 0\n    self._run_counter_lock = threading.Lock()",
    "docstring": "Constructor of DumpingDebugWrapperSession. Args: sess: The TensorFlow object being wrapped. session_root: () Path to the session root directory. Must be a directory that does not exist or an empty directory. If the directory does not exist, it will be created by the debugger core during debug calls. As the calls occur, subdirectories will be added to . The subdirectories' names has the following pattern: run__ E.g., run_1480734393835964_ad4c953a85444900ae79fc1b652fb324 watch_fn: () A Callable that can be used to define per-run debug ops and watched tensors. See the doc of for details. thread_name_filter: Regular-expression white list for threads on which the wrapper session will be active. See doc of for more details. pass_through_operrors: If true, all captured OpErrors will be propagated. By default this captures all OpErrors. Raises: ValueError: If is an existing and non-empty directory or if is a file.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\dumping_wrapper.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:sess arg:session_root arg:watch_fn arg:thread_name_filter arg:pass_through_operrors arguments arg arg arg arg arg arg Call Assign Call If Call If Call Raise Call If Call Raise Call Call Assign Assign Assign Call"
  },
  {
    "library": "scikit-learn",
    "name": "size",
    "source_code": "def size(x: HasShape[Collection[SupportsIndex | None]]) -> int | None:\n    if None in x.shape:\n        return None\n    out = math.prod(cast('Collection[SupportsIndex]', x.shape))\n    return None if math.isnan(out) else out",
    "docstring": "Return the total number of elements of x. This is equivalent to according to the __. This helper is included because PyTorch defines in an :external+torch:meth:. It also fixes dask.array's behaviour which returns nan for unknown sizes, whereas the standard requires None.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\common\\_helpers.py",
    "ast_data": "FunctionDef name:size arg:x arguments arg If Compare Return return:no Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__tf_tensor__",
    "source_code": "def __tf_tensor__(self, dtype=None, name=None):\n    pass",
    "docstring": "Converts this object to a Tensor. Args: dtype: data type for the returned Tensor name: a name for the operations which create the Tensor Returns: A Tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\types\\core.py",
    "ast_data": "FunctionDef name:__tf_tensor__ arg:self arg:dtype arg:name arguments arg arg arg"
  },
  {
    "library": "pytorch",
    "name": "reset_modules",
    "source_code": "def reset_modules(nodes: list[fx.Node], modules: dict[str, nn.Module], old_modules: dict[nn.Module, nn.Module]):\n    for node in nodes:\n        if node.op == 'call_module':\n            assert isinstance(node.target, str)\n            cur_module = modules[node.target]\n            if cur_module in old_modules:\n                replace_node_module(node, modules, old_modules[cur_module])",
    "docstring": "Maps each module that's been changed with back to its original.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\optimization.py",
    "ast_data": "FunctionDef name:reset_modules arg:nodes arg:modules arg:old_modules arguments arg arg arg For If Compare Call Assign If Compare Call"
  },
  {
    "library": "pytorch",
    "name": "has_uninitialized_params",
    "source_code": "def has_uninitialized_params(self: _LazyProtocol):\n    params = self._parameters.values()\n    buffers = self._buffers.values()\n    for param in itertools.chain(params, buffers):\n        if is_lazy(param):\n            return True\n    return False",
    "docstring": "Check if a module has parameters that are not initialized.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\lazy.py",
    "ast_data": "FunctionDef name:has_uninitialized_params arg:self arguments arg Assign Call Assign Call For Call If Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_row_partitions_for_ragged_tensor",
    "source_code": "def _row_partitions_for_ragged_tensor(value, rank, dtype):\n    assert rank > 1\n    value_row_partitions = value._nested_row_partitions[:rank - 1]\n    if len(value_row_partitions) < rank - 1:\n        value_row_partitions += _row_partitions_for_tensor(value.flat_values, rank - len(value_row_partitions), dtype)\n    assert len(value_row_partitions) == rank - 1\n    return value_row_partitions",
    "docstring": "Returns the row partitions for a tf.RaggedTensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor.py",
    "ast_data": "FunctionDef name:_row_partitions_for_ragged_tensor arg:value arg:rank arg:dtype arguments arg arg arg Compare Assign If Compare Call Call Call Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "RegisterGradient",
    "source_code": "@tf_export('RegisterGradient')\nclass RegisterGradient(object):\n    __slots__ = ['_op_type']\n\n    def __init__(self, op_type):\n        if not isinstance(op_type, str):\n            raise TypeError('op_type must be a string')\n        self._op_type = op_type\n\n    def __call__(self, f: _T) -> _T:\n        gradient_registry.register(f, self._op_type)\n        return f",
    "docstring": "A decorator for registering the gradient function for an op type. This decorator is only used when defining a new op type. For an op with inputs and outputs, the gradient function is a function that takes the original and objects (representing the gradients with respect to each output of the op), and returns objects (representing the partial gradients with respect to each input of the op). For example, assuming that operations of type take two inputs and , and return a single output , the following gradient function would be registered: The decorator argument is the string type of an operation. This corresponds to the field for the proto that defines the operation.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "ClassDef name:RegisterGradient Assign FunctionDef name:__init__ arg:self arg:op_type arguments arg arg If Call Raise Call Assign FunctionDef name:__call__ arg:self arg:f arguments arg arg Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "mark_checked",
    "source_code": "def mark_checked(tensors):\n\n    def _mark_checked(tensor):\n        tensor._keras_history_checked = True\n    nest.map_structure(_mark_checked, tensors)",
    "docstring": "Marks that these Tensors should not be tracked. This prevents Layers from attempting to create TensorFlowOpLayers for these Tensors. Args: tensors: An arbitrary structure of Tensors.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_utils.py",
    "ast_data": "FunctionDef name:mark_checked arg:tensors arguments arg FunctionDef name:_mark_checked arg:tensor arguments arg Assign Call"
  },
  {
    "library": "scipy",
    "name": "basis_element",
    "source_code": "@classmethod\ndef basis_element(cls, t, extrapolate=True):\n    k = len(t) - 2\n    t = _as_float_array(t)\n    t = np.r_[(t[0] - 1,) * k, t, (t[-1] + 1,) * k]\n    c = np.zeros_like(t)\n    c[k] = 1.0\n    return cls.construct_fast(t, c, k, extrapolate)",
    "docstring": "Return a B-spline basis element `tktt`, and compare to its explicit form: >>> t = [0, 1, 1, 2] >>> b = BSpline.basis_element(t) >>> def f(x): ... return np.where(x >> import matplotlib.pyplot as plt >>> fig, ax = plt.subplots() >>> x = np.linspace(0, 2, 51) >>> ax.plot(x, b(x), 'g', lw=3) >>> ax.plot(x, f(x), 'r', lw=8, alpha=0.4) >>> ax.grid(True) >>> plt.show()",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_bsplines.py",
    "ast_data": "FunctionDef name:basis_element arg:cls arg:t arg:extrapolate arguments arg arg arg Assign Call Assign Call Assign Assign Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "TFRecordCompressionType",
    "source_code": "@tf_export(v1=['io.TFRecordCompressionType', 'python_io.TFRecordCompressionType'])\n@deprecation.deprecated_endpoints('io.TFRecordCompressionType', 'python_io.TFRecordCompressionType')\nclass TFRecordCompressionType(object):\n    NONE = 0\n    ZLIB = 1\n    GZIP = 2",
    "docstring": "The type of compression for the record.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\tf_record.py",
    "ast_data": "ClassDef name:TFRecordCompressionType Assign Assign Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "load_attributes_from_hdf5_group",
    "source_code": "def load_attributes_from_hdf5_group(group, name):\n    if name in group.attrs:\n        data = [n.decode('utf8') if hasattr(n, 'decode') else n for n in group.attrs[name]]\n    else:\n        data = []\n        chunk_id = 0\n        while '%s%d' % (name, chunk_id) in group.attrs:\n            data.extend([n.decode('utf8') if hasattr(n, 'decode') else n for n in group.attrs['%s%d' % (name, chunk_id)]])\n            chunk_id += 1\n    return data",
    "docstring": "Loads attributes of the specified name from the HDF5 group. This method deals with an inherent problem of HDF5 file which is not able to store data larger than HDF5_OBJECT_HEADER_LIMIT bytes. Args: group: A pointer to a HDF5 group. name: A name of the attributes to load. Returns: data: Attributes data.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\hdf5_format.py",
    "ast_data": "FunctionDef name:load_attributes_from_hdf5_group arg:group arg:name arguments arg arg If Compare Assign Call Call Assign Assign While Compare Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "RecordingObserver",
    "source_code": "class RecordingObserver(ObserverBase):\n    __annotations__ = {'tensor_val': list[Optional[torch.Tensor]]}\n\n    def __init__(self, dtype=torch.quint8):\n        super().__init__(dtype=dtype, is_dynamic=False)\n        self.tensor_val = []\n\n    def forward(self, x):\n        self.tensor_val.append(x.clone())\n        return x\n\n    @torch.jit.export\n    def calculate_qparams(self):\n        raise Exception('calculate_qparams should not be called for RecordingObserver')\n\n    @torch.jit.export\n    def get_tensor_value(self):\n        return self.tensor_val",
    "docstring": "The module is mainly for debug and records the tensor values during runtime. Args: dtype: Quantized data type qscheme: Quantization scheme to be used reduce_range: Reduces the range of the quantized data type by 1 bit",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\quantization\\observer.py",
    "ast_data": "ClassDef name:RecordingObserver Assign FunctionDef name:__init__ arg:self arg:dtype arguments arg arg Call Call Assign FunctionDef name:forward arg:self arg:x arguments arg arg Call Call Return return:yes FunctionDef name:calculate_qparams arg:self arguments arg Raise Call FunctionDef name:get_tensor_value arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "as_sql",
    "source_code": "def as_sql(self):\n    if self.single_alias and (self.connection.features.delete_can_self_reference_subquery or not self.contains_self_reference_subquery):\n        return self._as_sql(self.query)\n    innerq = self.query.clone()\n    innerq.__class__ = Query\n    innerq.clear_select_clause()\n    pk = self.query.model._meta.pk\n    innerq.select = [pk.get_col(self.query.get_initial_alias())]\n    outerq = Query(self.query.model)\n    if not self.connection.features.update_can_self_select:\n        sql, params = innerq.get_compiler(connection=self.connection).as_sql()\n        innerq = RawSQL('SELECT * FROM (%s) subquery' % sql, params)\n    outerq.add_filter('pk__in', innerq)\n    return self._as_sql(outerq)",
    "docstring": "Create the SQL for this query. Return the SQL string and list of parameters.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\compiler.py",
    "ast_data": "FunctionDef name:as_sql arg:self arguments arg If BoolOp BoolOp Return return:yes Call Assign Call Assign Call Assign Assign Call Call Assign Call If Assign Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "__setitem__",
    "source_code": "def __setitem__(self, key, value):\n    existing = self.get(key)\n    dict.__setitem__(self, key, value)\n    if isinstance(existing, threading.Event):\n        existing.result = value\n        existing.set()",
    "docstring": "Set the cached value for the given key.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\caching.py",
    "ast_data": "FunctionDef name:__setitem__ arg:self arg:key arg:value arguments arg arg arg Assign Call Call If Call Assign Call"
  },
  {
    "library": "django",
    "name": "only_relation_agnostic_fields",
    "source_code": "def only_relation_agnostic_fields(self, fields):\n    fields_def = []\n    for name, field in sorted(fields.items()):\n        deconstruction = self.deep_deconstruct(field)\n        if field.remote_field and field.remote_field.model:\n            deconstruction[2].pop('to', None)\n        fields_def.append(deconstruction)\n    return fields_def",
    "docstring": "Return a definition of the fields that ignores field names and what related fields actually relate to. Used for detecting renames (as the related fields change during renames).",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\autodetector.py",
    "ast_data": "FunctionDef name:only_relation_agnostic_fields arg:self arg:fields arguments arg arg Assign For Call Call Assign Call If BoolOp Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "sort_result",
    "source_code": "def sort_result(self):\n    results = self.LMC.sort_cache_result()\n    self.res.xl = results['xl']\n    self.res.funl = results['funl']\n    self.res.x = results['x']\n    self.res.fun = results['fun']\n    self.res.nfev = self.fn + self.res.nlfev\n    return self.res",
    "docstring": "Sort results and build the global return object",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_shgo.py",
    "ast_data": "FunctionDef name:sort_result arg:self arguments arg Assign Call Assign Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "dtype_from_ctypes_type",
    "source_code": "def dtype_from_ctypes_type(t):\n    import _ctypes\n    if issubclass(t, _ctypes.Array):\n        return _from_ctypes_array(t)\n    elif issubclass(t, _ctypes._Pointer):\n        raise TypeError('ctypes pointers have no dtype equivalent')\n    elif issubclass(t, _ctypes.Structure):\n        return _from_ctypes_structure(t)\n    elif issubclass(t, _ctypes.Union):\n        return _from_ctypes_union(t)\n    elif isinstance(getattr(t, '_type_', None), str):\n        return _from_ctypes_scalar(t)\n    else:\n        raise NotImplementedError(f'Unknown ctypes type {t.__name__}')",
    "docstring": "Construct a dtype object from a ctypes type",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\_dtype_ctypes.py",
    "ast_data": "FunctionDef name:dtype_from_ctypes_type arg:t arguments arg If Call Return return:yes Call If Call Raise Call If Call Return return:yes Call If Call Return return:yes Call If Call Call Return return:yes Call Raise Call"
  },
  {
    "library": "django",
    "name": "_get_single_external",
    "source_code": "def _get_single_external(self, index):\n    return GEOSGeometry(capi.geom_clone(self._get_single_internal(index)), srid=self.srid)",
    "docstring": "Return the Geometry from this Collection at the given index (0-based).",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\collections.py",
    "ast_data": "FunctionDef name:_get_single_external arg:self arg:index arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "report",
    "source_code": "@trace.trace_wrapper\ndef report(self):\n    self._python_memory_checker.report()",
    "docstring": "Generates a html graph file showing allocations over snapshots. It create a temporary directory and put all the output files there. If this is running under Google internal testing infra, it will use the directory provided the infra instead.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\memory_checker.py",
    "ast_data": "FunctionDef name:report arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_library_paths",
    "source_code": "def _library_paths():\n    return ['', 'lib64', 'lib', 'lib/*-linux-gnu', 'lib/x64', 'extras/CUPTI/*', 'local/cuda/lib64', 'local/cuda/extras/CUPTI/lib64']",
    "docstring": "Returns hard-coded set of relative paths to look for library files.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\third_party\\gpus\\find_cuda_config.py",
    "ast_data": "FunctionDef name:_library_paths arguments Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_extra_size_of",
    "source_code": "def get_extra_size_of(node: Node, nodes: set[Node]) -> int:\n    input_nodes: dict[Node, None] = {}\n    map_arg(node.args, input_nodes.setdefault)\n    map_arg(node.kwargs, input_nodes.setdefault)\n    total_size_of_input_nodes = 0\n    for n in input_nodes:\n        if n not in nodes:\n            size_bytes = getattr(n, 'size_bytes', None)\n            if size_bytes:\n                total_size_of_input_nodes += size_bytes.output_size\n            else:\n                raise RuntimeError('node has no size_bytes attr')\n    size_bytes = getattr(node, 'size_bytes', None)\n    if size_bytes:\n        total_size_of_input_nodes += size_bytes.total_size\n    else:\n        raise RuntimeError('node has no size_bytes attr')\n    return total_size_of_input_nodes",
    "docstring": "Given a node and a set of nodes, this function return the extra size that needed if this node is included in this set.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\partitioner_utils.py",
    "ast_data": "FunctionDef name:get_extra_size_of arg:node arg:nodes arguments arg arg Call Call Assign For If Compare Assign Call If Raise Call Assign Call If Raise Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "kolmogn",
    "source_code": "def kolmogn(n, x, cdf=True):\n    it = np.nditer([n, x, cdf, None], flags=['zerosize_ok'], op_dtypes=[None, np.float64, np.bool_, np.float64])\n    for _n, _x, _cdf, z in it:\n        if np.isnan(_n):\n            z[...] = _n\n            continue\n        if int(_n) != _n:\n            raise ValueError(f'n is not integral: {_n}')\n        z[...] = _kolmogn(int(_n), _x, cdf=_cdf)\n    result = it.operands[-1]\n    return result",
    "docstring": "Computes the CDF for the two-sided Kolmogorov-Smirnov distribution. The two-sided Kolmogorov-Smirnov distribution has as its CDF Pr(D_n <= x), for a sample of size n drawn from a distribution with CDF F(t), where :math:, and :math: is the Empirical Cumulative Distribution Function of the sample. Parameters ---------- n : integer, array_like the number of samples x : float, array_like The K-S statistic, float between 0 and 1 cdf : bool, optional whether to compute the CDF(default=true) or the SF. Returns ------- cdf : ndarray CDF (or SF it cdf is False) at the specified locations. The return value has shape the result of numpy broadcasting n and x.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_ksstats.py",
    "ast_data": "FunctionDef name:kolmogn arg:n arg:x arg:cdf arguments arg arg arg Assign Call For If Call Assign If Compare Call Raise Call Assign Call Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "__eq__",
    "source_code": "def __eq__(self, other):\n    if isinstance(other, OGRGeomType):\n        return self.num == other.num\n    elif isinstance(other, str):\n        return self.name.lower() == other.lower()\n    elif isinstance(other, int):\n        return self.num == other\n    else:\n        return False",
    "docstring": "Do an equivalence test on the OGR type with the given other OGRGeomType, the short-hand string, or the integer.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geomtype.py",
    "ast_data": "FunctionDef name:__eq__ arg:self arg:other arguments arg arg If Call Return return:yes Compare If Call Return return:yes Compare Call Call If Call Return return:yes Compare Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "adaptive_max_pool3d_with_indices",
    "source_code": "def adaptive_max_pool3d_with_indices(input: Tensor, output_size: BroadcastingList3[int], return_indices: bool=False) -> tuple[Tensor, Tensor]:\n    if has_torch_function_unary(input):\n        return handle_torch_function(adaptive_max_pool3d_with_indices, (input,), input, output_size, return_indices=return_indices)\n    output_size = _list_with_default(output_size, input.size())\n    return torch._C._nn.adaptive_max_pool3d(input, output_size)",
    "docstring": "adaptive_max_pool3d(input, output_size, return_indices=False) Applies a 3D adaptive max pooling over an input signal composed of several input planes. See :class: for details and output shape. Args: output_size: the target output size (single integer or triple-integer tuple) return_indices: whether to return pooling indices. Default: ``",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:adaptive_max_pool3d_with_indices arg:input arg:output_size arg:return_indices arguments arg arg arg If Call Return return:yes Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "estimate_fun_jac",
    "source_code": "def estimate_fun_jac(fun, x, y, p, f0=None):\n    n, m = y.shape\n    if f0 is None:\n        f0 = fun(x, y, p)\n    dtype = y.dtype\n    df_dy = np.empty((n, n, m), dtype=dtype)\n    h = EPS ** 0.5 * (1 + np.abs(y))\n    for i in range(n):\n        y_new = y.copy()\n        y_new[i] += h[i]\n        hi = y_new[i] - y[i]\n        f_new = fun(x, y_new, p)\n        df_dy[:, i, :] = (f_new - f0) / hi\n    k = p.shape[0]\n    if k == 0:\n        df_dp = None\n    else:\n        df_dp = np.empty((n, k, m), dtype=dtype)\n        h = EPS ** 0.5 * (1 + np.abs(p))\n        for i in range(k):\n            p_new = p.copy()\n            p_new[i] += h[i]\n            hi = p_new[i] - p[i]\n            f_new = fun(x, y, p_new)\n            df_dp[:, i, :] = (f_new - f0) / hi\n    return (df_dy, df_dp)",
    "docstring": "Estimate derivatives of an ODE system rhs with forward differences. Returns ------- df_dy : ndarray, shape (n, n, m) Derivatives with respect to y. An element (i, j, q) corresponds to d f_i(x_q, y_q) / d (y_q)_j. df_dp : ndarray with shape (n, k, m) or None Derivatives with respect to p. An element (i, j, q) corresponds to d f_i(x_q, y_q, p) / d p_j. If is empty, None is returned.",
    "type": "function",
    "file_path": "scipy\\scipy\\integrate\\_bvp.py",
    "ast_data": "FunctionDef name:estimate_fun_jac arg:fun arg:x arg:y arg:p arg:f0 arguments arg arg arg arg arg Assign If Compare Assign Call Assign Assign Call Assign Call For Call Assign Call Assign Assign Call Assign Assign If Compare Assign Assign Call Assign Call For Call Assign Call Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_numerical_jacobian",
    "source_code": "@deprecated(\"`get_numerical_jacobian` was part of PyTorch's private API and not meant to be exposed. We are deprecating it and it will be removed in a future version of PyTorch. If you have a specific use for this or feature request for this to be a stable API, please file us an issue at https://github.com/pytorch/pytorch/issues/new\", category=FutureWarning)\ndef get_numerical_jacobian(fn, inputs, target=None, eps=0.001, grad_out=1.0):\n    if grad_out != 1.0:\n        raise ValueError('Expected grad_out to be 1.0. get_numerical_jacobian no longer supports values of grad_out != 1.0.')\n\n    def fn_pack_inps(*inps):\n        return fn(inps)\n    jacobians = _get_numerical_jacobian(fn_pack_inps, inputs, None, target, eps)\n    return tuple((jacobian_for_each_output[0] for jacobian_for_each_output in jacobians))",
    "docstring": "Compute the numerical Jacobian for a given fn and its inputs. This is a Deprecated API. Args: fn: the function to compute the Jacobian for (must take inputs as a tuple) inputs: input to target: the Tensors wrt whom Jacobians are calculated (default=) eps: the magnitude of the perturbation during finite differencing (default=) grad_out: defaults to 1.0. Returns: A list of Jacobians of (restricted to its first output) with respect to each input or target, if provided. Note that may not even be part of to , so please be **very careful** in this to not clone .",
    "type": "function",
    "file_path": "pytorch\\torch\\autograd\\gradcheck.py",
    "ast_data": "FunctionDef name:get_numerical_jacobian arg:fn arg:inputs arg:target arg:eps arg:grad_out arguments arg arg arg arg arg If Compare Raise Call FunctionDef name:fn_pack_inps arguments arg Return return:yes Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "toggle_toolitem",
    "source_code": "def toggle_toolitem(self, name, toggled):\n    raise NotImplementedError",
    "docstring": "A hook to toggle a toolitem without firing an event. This hook must be implemented in each backend and contains the backend-specific code to silently toggle a toolbar element. .. warning:: This is part of the backend implementation and should not be called by end-users. They should instead call or (which are equivalent). Parameters ---------- name : str Id of the tool to toggle. toggled : bool Whether to set this tool as toggled or not.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:toggle_toolitem arg:self arg:name arg:toggled arguments arg arg arg Raise"
  },
  {
    "library": "pytorch",
    "name": "infer_size",
    "source_code": "def infer_size(shape: ShapeType, numel: int) -> tuple[int, ...]:\n    from torch.fx.experimental.symbolic_shapes import guard_or_false\n    dim = None\n    newsize = 1\n    for i, d in enumerate(shape):\n        if guard_or_false(d == -1):\n            torch._check(dim is None, lambda: 'only one dimension can be inferred')\n            dim = i\n        else:\n            torch._check(d >= 0, lambda: f'invalid shape dimension {d}. If this was symbolic, it was assumed to not be -1.If this was meant to be inferred, please explicitly pass in -1.')\n            newsize *= d\n    if dim is None:\n        torch._check(numel == newsize, lambda: f\"shape '{list(shape)}' is invalid for input of size {numel}\")\n    else:\n        torch._check(newsize != 0, lambda: f'cannot reshape tensor of 0 elements into shape {list(shape)} because the unspecified dimension size -1 can be any value and is ambiguous' if guard_or_false(numel == 0) else f\"shape '{list(shape)}' is invalid for input of size {numel}\")\n        torch._check(numel % newsize == 0, lambda: f\"shape '{list(shape)}' is invalid for input of size {numel}\")\n        shape = list(shape)\n        shape[dim] = numel // newsize\n        torch._check_is_size(shape[dim])\n    return tuple(shape)",
    "docstring": "Infers the size of a dim with size -1, if it exists. Also checks that new shape is compatible with the number of elements.",
    "type": "function",
    "file_path": "pytorch\\torch\\_prims_common\\__init__.py",
    "ast_data": "FunctionDef name:infer_size arg:shape arg:numel arguments arg arg Assign Assign For Call If Call Compare Call Compare arguments Assign Call Compare arguments If Compare Call Compare arguments Call Call Compare arguments Call Compare Call Call Call Compare arguments Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "rc_context",
    "source_code": "@contextlib.contextmanager\ndef rc_context(rc=None, fname=None):\n    orig = dict(rcParams.copy())\n    del orig['backend']\n    try:\n        if fname:\n            rc_file(fname)\n        if rc:\n            rcParams.update(rc)\n        yield\n    finally:\n        rcParams._update_raw(orig)",
    "docstring": "Return a context manager for temporarily changing rcParams. The :rc: will not be reset by the context manager. rcParams changed both through the context manager invocation and in the body of the context will be reset on context exit. Parameters ---------- rc : dict The rcParams to temporarily set. fname : str or path-like A file with Matplotlib rc settings. If both *fname* and *rc* are given, settings from *rc* take precedence. See Also -------- :ref: Examples -------- Passing explicit values via a dict:: with mpl.rc_context({'interactive': False}): fig, ax = plt.subplots() ax.plot(range(3), range(3)) fig.savefig('example.png') plt.close(fig) Loading settings from a file:: with mpl.rc_context(fname='print.rc'): plt.plot(x, y) # uses 'print.rc' Setting in the context body:: with mpl.rc_context(): # will be reset mpl.rcParams['lines.linewidth'] = 5 plt.plot(x, y)",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\__init__.py",
    "ast_data": "FunctionDef name:rc_context arg:rc arg:fname arguments arg arg Assign Call Call Try If Call If Call Call"
  },
  {
    "library": "pytorch",
    "name": "add_safe_globals",
    "source_code": "def add_safe_globals(safe_globals: list[Union[Callable, tuple[Callable, str]]]) -> None:\n    _weights_only_unpickler._add_safe_globals(safe_globals)",
    "docstring": "Marks the given globals as safe for `torch.load(f.name, weights_only=True)` will fail with # Unsupported global: GLOBAL __main__.MyTensor was not an allowed global by default. # Check the code and make sure MyTensor is safe to be used when loaded from an arbitrary checkpoint. ... torch.serialization.add_safe_globals([MyTensor]) ... torch.load(f.name, weights_only=True) # MyTensor([[-0.5024, -1.8152, -0.5455], # [-0.8234, 2.0500, -0.3657]])",
    "type": "function",
    "file_path": "pytorch\\torch\\serialization.py",
    "ast_data": "FunctionDef name:add_safe_globals arg:safe_globals arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "store_non_graphable_args",
    "source_code": "def store_non_graphable_args(self, combined_args: dict[str, Any]) -> tuple[dict, int]:\n\n    def is_graphable(val: Any) -> bool:\n        return isinstance(val, (fx.node.base_types, fx.Node))\n    non_graphable_args = {k: v for k, v in combined_args.items() if not is_graphable(v)}\n    graphable_args = {k: v for k, v in combined_args.items() if is_graphable(v)}\n    constant_args_idx = kernel_side_table.add_constant_args(non_graphable_args)\n    return (graphable_args, constant_args_idx)",
    "docstring": "Some args cannot be stored in the FX graph. Put them in the side table.",
    "type": "method",
    "file_path": "pytorch\\torch\\_higher_order_ops\\triton_kernel_wrap.py",
    "ast_data": "FunctionDef name:store_non_graphable_args arg:self arg:combined_args arguments arg arg FunctionDef name:is_graphable arg:val arguments arg Return return:yes Call Assign Call Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "reindex_indexer",
    "source_code": "def reindex_indexer(self, new_axis: Index, indexer: npt.NDArray[np.intp] | None, axis: AxisInt, fill_value=None, allow_dups: bool=False, only_slice: bool=False, *, use_na_proxy: bool=False) -> Self:\n    if indexer is None:\n        if new_axis is self.axes[axis]:\n            return self\n        result = self.copy(deep=False)\n        result.axes = list(self.axes)\n        result.axes[axis] = new_axis\n        return result\n    assert isinstance(indexer, np.ndarray)\n    if not allow_dups:\n        self.axes[axis]._validate_can_reindex(indexer)\n    if axis >= self.ndim:\n        raise IndexError('Requested axis not found in manager')\n    if axis == 0:\n        new_blocks = list(self._slice_take_blocks_ax0(indexer, fill_value=fill_value, only_slice=only_slice, use_na_proxy=use_na_proxy))\n    else:\n        new_blocks = [blk.take_nd(indexer, axis=1, fill_value=fill_value if fill_value is not None else blk.fill_value) for blk in self.blocks]\n    new_axes = list(self.axes)\n    new_axes[axis] = new_axis\n    new_mgr = type(self).from_blocks(new_blocks, new_axes)\n    if axis == 1:\n        new_mgr._blknos = self.blknos.copy()\n        new_mgr._blklocs = self.blklocs.copy()\n    return new_mgr",
    "docstring": "Parameters ---------- new_axis : Index indexer : ndarray[intp] or None axis : int fill_value : object, default None allow_dups : bool, default False only_slice : bool, default False Whether to take views, not copies, along columns. use_na_proxy : bool, default False Whether to use a np.void ndarray for newly introduced columns. pandas-indexer with -1's only.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:reindex_indexer arg:self arg:new_axis arg:indexer arg:axis arg:fill_value arg:allow_dups arg:only_slice arguments arg arg arg arg arg arg arg arg If Compare If Compare Return return:yes Assign Call Assign Call Assign Return return:yes Call If Call If Compare Raise Call If Compare Assign Call Call Assign Call Compare Assign Call Assign Assign Call Call If Compare Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_tf_data_yield_flat_up_to",
    "source_code": "def _tf_data_yield_flat_up_to(shallow_tree, input_tree):\n    if _tf_data_is_nested(shallow_tree):\n        for shallow_branch, input_branch in zip(_tf_data_yield_value(shallow_tree), _tf_data_yield_value(input_tree)):\n            for input_leaf in _tf_data_yield_flat_up_to(shallow_branch, input_branch):\n                yield input_leaf\n    else:\n        yield input_tree",
    "docstring": "Yields elements partially flattened up to .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\nest_util.py",
    "ast_data": "FunctionDef name:_tf_data_yield_flat_up_to arg:shallow_tree arg:input_tree arguments arg arg If Call For Call Call Call For Call"
  },
  {
    "library": "pytorch",
    "name": "current_device",
    "source_code": "def current_device() -> int:\n    return torch._C._accelerator_hooks_get_current_device()",
    "docstring": "Return the index of a currently selected device.",
    "type": "function",
    "file_path": "pytorch\\torch\\mtia\\__init__.py",
    "ast_data": "FunctionDef name:current_device arguments Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "dl_open_guard",
    "source_code": "@contextlib.contextmanager\ndef dl_open_guard():\n    if not _SET_GLOBAL_FLAGS:\n        yield\n        return\n    old_flags = sys.getdlopenflags()\n    sys.setdlopenflags(old_flags | ctypes.RTLD_GLOBAL)\n    try:\n        yield\n    finally:\n        sys.setdlopenflags(old_flags)",
    "docstring": "Context manager to set the RTLD_GLOBAL dynamic linker flag while we open a shared library to load custom operators.",
    "type": "function",
    "file_path": "pytorch\\torch\\_ops.py",
    "ast_data": "FunctionDef name:dl_open_guard arguments If Return return:no Assign Call Call Try Call"
  },
  {
    "library": "pytorch",
    "name": "_create_table",
    "source_code": "def _create_table(shards: list[tuple[tuple[int, int], tuple[int, int], int]], device_kind: str=''):\n    from tabulate import tabulate\n    row_ranges = sorted({block[0] for block in shards})\n    col_ranges = sorted({block[1] for block in shards})\n    matrix = [['' for _ in col_ranges] for _ in row_ranges]\n    for block in shards:\n        row_index = row_ranges.index(block[0])\n        col_index = col_ranges.index(block[1])\n        if matrix[row_index][col_index] == '':\n            matrix[row_index][col_index] = device_kind + ':' + str(block[2])\n        else:\n            matrix[row_index][col_index] += ',' + str(block[2])\n    row_headers = [f'Row {r[0]}-{r[1]}' for r in row_ranges]\n    col_headers = [f'Col {c[0]}-{c[1]}' for c in col_ranges]\n    return tabulate(matrix, headers=col_headers, showindex=row_headers)",
    "docstring": "Creates a tabulate table given row and column ranges with device name",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\debug\\_visualize_sharding.py",
    "ast_data": "FunctionDef name:_create_table arg:shards arg:device_kind arguments arg arg Assign Call Assign Call Assign For Assign Call Assign Call If Compare Assign Call Call Assign Assign Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "get_hanning_kernel1d",
    "source_code": "def get_hanning_kernel1d(kernel_size: int, device: Optional[Device]=None, dtype: Optional[Dtype]=None) -> Tensor:\n    _check_kernel_size(kernel_size, 2, allow_even=True)\n    x = torch.arange(kernel_size, device=device, dtype=dtype)\n    x = 0.5 - 0.5 * cos(2.0 * math.pi * x / float(kernel_size - 1))\n    return x",
    "docstring": "Return Hanning (also known as Hann) kernel, used in signal processing and KCF tracker. .. math:: w(n) = 0.5 - 0.5cos\\\\left(\\\\frac{2\\\\pi{n}}{M-1}\\\\right) \\\\qquad 0 \\\\leq n \\\\leq M-1 See further in numpy docs Args: kernel_size: The size the of the kernel. It should be positive. device: tensor device desired to create the kernel dtype: tensor dtype desired to create the kernel Returns: 1D tensor with Hanning filter coefficients. Shape math: .. math:: w(n) = 0.5 - 0.5cos\\\\left(\\\\frac{2\\\\pi{n}}{M-1}\\\\right) Examples: >>> get_hanning_kernel1d(4) tensor([0.0000, 0.7500, 0.7500, 0.0000])",
    "type": "function",
    "file_path": "kornia\\kornia\\filters\\kernels.py",
    "ast_data": "FunctionDef name:get_hanning_kernel1d arg:kernel_size arg:device arg:dtype arguments arg arg arg Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "__rmul__",
    "source_code": "def __rmul__(self, n):\n    return self.__class__(list(self) * n)",
    "docstring": "multiply",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\mutable_list.py",
    "ast_data": "FunctionDef name:__rmul__ arg:self arg:n arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "build_nccl_all_reduce",
    "source_code": "def build_nccl_all_reduce(input_tensors, red_op, un_op=None):\n    if red_op == math_ops.add:\n        output_tensors = nccl_ops.all_sum(input_tensors)\n    else:\n        raise ValueError('red_op not supported by NCCL all-reduce: ', red_op)\n    if un_op:\n        un_op_wrapped = []\n        for t in output_tensors:\n            with ops.colocate_with(t):\n                un_op_wrapped.append(un_op(t))\n        output_tensors = un_op_wrapped\n    return output_tensors",
    "docstring": "Build a subgraph that does one full all-reduce, using NCCL. Args: input_tensors: list of of same-shape and type values to be reduced. red_op: binary elementwise reduction operator. Must be one of {tf.add} un_op: optional unary elementwise Op to apply to fully-reduce values. Returns: list of of reduced values. Raises: ValueError: red_op not supported.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\all_reduce.py",
    "ast_data": "FunctionDef name:build_nccl_all_reduce arg:input_tensors arg:red_op arg:un_op arguments arg arg arg If Compare Assign Call Raise Call If Assign For With Call Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "report_benchmark",
    "source_code": "def report_benchmark(self, iters=None, cpu_time=None, wall_time=None, throughput=None, extras=None, name=None, metrics=None):\n    name = self._get_name(overwrite_name=name)\n    _global_report_benchmark(name=name, iters=iters, cpu_time=cpu_time, wall_time=wall_time, throughput=throughput, extras=extras, metrics=metrics)",
    "docstring": "Report a benchmark. Args: iters: (optional) How many iterations were run cpu_time: (optional) Median or mean cpu time in seconds. wall_time: (optional) Median or mean wall time in seconds. throughput: (optional) Throughput (in MB/s) extras: (optional) Dict mapping string keys to additional benchmark info. Values may be either floats or values that are convertible to strings. name: (optional) Override the BenchmarkEntry name with . Otherwise it is inferred from the top-level method name. metrics: (optional) A list of dict, where each dict has the keys below name (required), string, metric name value (required), double, metric value min_value (optional), double, minimum acceptable metric value max_value (optional), double, maximum acceptable metric value",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\platform\\benchmark.py",
    "ast_data": "FunctionDef name:report_benchmark arg:self arg:iters arg:cpu_time arg:wall_time arg:throughput arg:extras arg:name arg:metrics arguments arg arg arg arg arg arg arg arg Assign Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_cursor",
    "source_code": "def set_cursor(self, cursor):\n    pass",
    "docstring": "Set the current cursor. This may have no effect if the backend does not display anything. If required by the backend, this method should trigger an update in the backend event loop after the cursor is set, as this method may be called e.g. before a long-running task during which the GUI is not updated. Parameters ---------- cursor : The cursor to display over the canvas. Note: some backends may change the cursor for the entire window.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:set_cursor arg:self arg:cursor arguments arg arg"
  },
  {
    "library": "scipy",
    "name": "cplx02_f",
    "source_code": "def cplx02_f(z, a):\n    return np.exp(z) - a",
    "docstring": "e**z - a: Use to find the log of a",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_tstutils.py",
    "ast_data": "FunctionDef name:cplx02_f arg:z arg:a arguments arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "rewrite_warning",
    "source_code": "@contextlib.contextmanager\ndef rewrite_warning(target_message: str, target_category: type[Warning], new_message: str, new_category: type[Warning] | None=None) -> Generator[None]:\n    if new_category is None:\n        new_category = target_category\n    with warnings.catch_warnings(record=True) as record:\n        yield\n    if len(record) > 0:\n        match = re.compile(target_message)\n        for warning in record:\n            if warning.category is target_category and re.search(match, str(warning.message)):\n                category = new_category\n                message: Warning | str = new_message\n            else:\n                category, message = (warning.category, warning.message)\n            warnings.warn_explicit(message=message, category=category, filename=warning.filename, lineno=warning.lineno)",
    "docstring": "Rewrite the message of a warning. Parameters ---------- target_message : str Warning message to match. target_category : Warning Warning type to match. new_message : str New warning message to emit. new_category : Warning or None, default None New warning type to emit. When None, will be the same as target_category.",
    "type": "function",
    "file_path": "pandas\\pandas\\util\\_exceptions.py",
    "ast_data": "FunctionDef name:rewrite_warning arg:target_message arg:target_category arg:new_message arg:new_category arguments arg arg arg arg If Compare Assign With Call If Compare Call Assign Call For If BoolOp Compare Call Call Assign Assign Call"
  },
  {
    "library": "numpy",
    "name": "_is_packed",
    "source_code": "def _is_packed(dtype):\n    align = dtype.isalignedstruct\n    max_alignment = 1\n    total_offset = 0\n    for name in dtype.names:\n        fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name])\n        if align:\n            total_offset = _aligned_offset(total_offset, fld_dtype.alignment)\n            max_alignment = max(max_alignment, fld_dtype.alignment)\n        if fld_offset != total_offset:\n            return False\n        total_offset += fld_dtype.itemsize\n    if align:\n        total_offset = _aligned_offset(total_offset, max_alignment)\n    return total_offset == dtype.itemsize",
    "docstring": "Checks whether the structured data type in 'dtype' has a simple layout, where all the fields are in order, and follow each other with no alignment padding. When this returns true, the dtype can be reconstructed from a list of the field names and dtypes with no additional dtype parameters. Duplicates the C function.",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\_dtype.py",
    "ast_data": "FunctionDef name:_is_packed arg:dtype arguments arg Assign Assign Assign For Assign Call If Assign Call Assign Call If Compare Return return:yes If Assign Call Return return:yes Compare"
  },
  {
    "library": "scipy",
    "name": "_minimize_trust_krylov",
    "source_code": "def _minimize_trust_krylov(fun, x0, args=(), jac=None, hess=None, hessp=None, inexact=True, **trust_region_options):\n    if jac is None:\n        raise ValueError('Jacobian is required for trust region ', 'exact minimization.')\n    if hess is None and hessp is None:\n        raise ValueError('Either the Hessian or the Hessian-vector product is required for Krylov trust-region minimization')\n    if inexact:\n        return _minimize_trust_region(fun, x0, args=args, jac=jac, hess=hess, hessp=hessp, subproblem=get_trlib_quadratic_subproblem(tol_rel_i=-2.0, tol_rel_b=-3.0, disp=trust_region_options.get('disp', False)), **trust_region_options)\n    else:\n        return _minimize_trust_region(fun, x0, args=args, jac=jac, hess=hess, hessp=hessp, subproblem=get_trlib_quadratic_subproblem(tol_rel_i=1e-08, tol_rel_b=1e-06, disp=trust_region_options.get('disp', False)), **trust_region_options)",
    "docstring": "Minimization of a scalar function of one or more variables using a nearly exact trust-region algorithm that only requires matrix vector products with the hessian matrix. .. versionadded:: 1.0.0 Options ------- inexact : bool, optional Accuracy to solve subproblems. If True requires less nonlinear iterations, but more vector products.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_trustregion_krylov.py",
    "ast_data": "FunctionDef name:_minimize_trust_krylov arg:fun arg:x0 arg:args arg:jac arg:hess arg:hessp arg:inexact arguments arg arg arg arg arg arg arg arg If Compare Raise Call If BoolOp Compare Compare Raise Call If Return return:yes Call Call Call Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "flush",
    "source_code": "def flush(self):\n    pass",
    "docstring": "Flush the output stream.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_svg.py",
    "ast_data": "FunctionDef name:flush arg:self arguments arg"
  },
  {
    "library": "scrapy",
    "name": "_get_handler",
    "source_code": "def _get_handler(settings: Settings) -> logging.Handler:\n    filename = settings.get('LOG_FILE')\n    handler: logging.Handler\n    if filename:\n        mode = 'a' if settings.getbool('LOG_FILE_APPEND') else 'w'\n        encoding = settings.get('LOG_ENCODING')\n        handler = logging.FileHandler(filename, mode=mode, encoding=encoding)\n    elif settings.getbool('LOG_ENABLED'):\n        handler = logging.StreamHandler()\n    else:\n        handler = logging.NullHandler()\n    formatter = logging.Formatter(fmt=settings.get('LOG_FORMAT'), datefmt=settings.get('LOG_DATEFORMAT'))\n    handler.setFormatter(formatter)\n    handler.setLevel(settings.get('LOG_LEVEL'))\n    if settings.getbool('LOG_SHORT_NAMES'):\n        handler.addFilter(TopLevelFormatter(['scrapy']))\n    return handler",
    "docstring": "Return a log handler object according to settings",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\log.py",
    "ast_data": "FunctionDef name:_get_handler arg:settings arguments arg Assign Call If Assign Call Assign Call Assign Call If Call Assign Call Assign Call Assign Call Call Call Call Call Call If Call Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "_pairwise_euclidean_distance",
    "source_code": "def _pairwise_euclidean_distance(self, data1: Tensor, data2: Tensor) -> Tensor:\n    A = data1[:, None, ...]\n    B = data2[None, ...]\n    distance = euclidean_distance(A, B)\n    return distance",
    "docstring": "Compute pairwise squared distance between 2 sets of vectors. Args: data1: 2D tensor of shape N, D data2: 2D tensor of shape C, D Returns: 2D tensor of shape N, C",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\kmeans.py",
    "ast_data": "FunctionDef name:_pairwise_euclidean_distance arg:self arg:data1 arg:data2 arguments arg arg arg Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_class_and_config_for_serialized_keras_object",
    "source_code": "def _class_and_config_for_serialized_keras_object(config, module_objects=None, custom_objects=None, printable_module_name='object'):\n    if not isinstance(config, dict) or 'class_name' not in config or 'config' not in config:\n        raise ValueError('Improper config format: ' + str(config))\n    class_name = config['class_name']\n    cls = _get_registered_object(class_name, custom_objects=custom_objects, module_objects=module_objects)\n    if cls is None:\n        raise ValueError('Unknown ' + printable_module_name + ': ' + class_name)\n    cls_config = config['config']\n    deserialized_objects = {}\n    for key, item in cls_config.items():\n        if isinstance(item, dict) and '__passive_serialization__' in item:\n            deserialized_objects[key] = _deserialize_keras_object(item, module_objects=module_objects, custom_objects=custom_objects, printable_module_name='config_item')\n        elif isinstance(item, six.string_types) and tf_inspect.isfunction(_get_registered_object(item, custom_objects)):\n            deserialized_objects[key] = _get_registered_object(item, custom_objects)\n    for key, item in deserialized_objects.items():\n        cls_config[key] = deserialized_objects[key]\n    return (cls, cls_config)",
    "docstring": "Returns the class name and config for a serialized keras object.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\serialization.py",
    "ast_data": "FunctionDef name:_class_and_config_for_serialized_keras_object arg:config arg:module_objects arg:custom_objects arg:printable_module_name arguments arg arg arg arg If BoolOp Call Compare Compare Raise Call Call Assign Assign Call If Compare Raise Call Assign Assign For Call If BoolOp Call Compare Assign Call If BoolOp Call Call Call Assign Call For Call Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_integrate_pdf",
    "source_code": "@lambda func: np.vectorize(func, otypes=[np.float64])\n@staticmethod\ndef _integrate_pdf(x0, x1, p, a, b):\n    user_data = np.array([p, a, b], float).ctypes.data_as(ctypes.c_void_p)\n    llc = LowLevelCallable.from_cython(_stats, '_genhyperbolic_pdf', user_data)\n    d = np.sqrt((a + b) * (a - b))\n    mean = b / d * sc.kv(p + 1, d) / sc.kv(p, d)\n    epsrel = 1e-10\n    epsabs = 0\n    if x0 < mean < x1:\n        intgrl = integrate.quad(llc, x0, mean, epsrel=epsrel, epsabs=epsabs)[0] + integrate.quad(llc, mean, x1, epsrel=epsrel, epsabs=epsabs)[0]\n    else:\n        intgrl = integrate.quad(llc, x0, x1, epsrel=epsrel, epsabs=epsabs)[0]\n    if np.isnan(intgrl):\n        msg = 'Infinite values encountered in scipy.special.kve. Values replaced by NaN to avoid incorrect results.'\n        warnings.warn(msg, RuntimeWarning, stacklevel=3)\n    return max(0.0, min(1.0, intgrl))",
    "docstring": "Integrate the pdf of the genhyberbolic distribution from x0 to x1. This is a private function used by _cdf() and _sf() only; either x0 will be -inf or x1 will be inf.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_continuous_distns.py",
    "ast_data": "FunctionDef name:_integrate_pdf arg:x0 arg:x1 arg:p arg:a arg:b arguments arg arg arg arg arg Assign Call Call Assign Call Assign Call Assign Call Call Assign Assign If Compare Assign Call Call Assign Call If Call Assign Call Return return:yes Call Call arguments arg Call"
  },
  {
    "library": "matplotlib",
    "name": "_non_decade_format",
    "source_code": "def _non_decade_format(self, sign_string, base, fx, usetex):\n    b = float(base)\n    exponent = math.floor(fx)\n    coeff = b ** (fx - exponent)\n    if _is_close_to_int(coeff):\n        coeff = round(coeff)\n    return '$\\\\mathdefault{%s%g\\\\times%s^{%d}}$' % (sign_string, coeff, base, exponent)",
    "docstring": "Return string for non-decade locations.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:_non_decade_format arg:self arg:sign_string arg:base arg:fx arg:usetex arguments arg arg arg arg arg Assign Call Assign Call Assign If Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, shorthand_name=None):\n    self._parents = {}\n    self._invalid = self._INVALID_FULL\n    self._shorthand_name = shorthand_name or ''",
    "docstring": "Parameters ---------- shorthand_name : str A string representing the \"name\" of the transform. The name carries no significance other than to improve the readability of `` when DEBUG=True.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:shorthand_name arguments arg arg Assign Assign Assign BoolOp"
  },
  {
    "library": "scipy",
    "name": "_postsolve",
    "source_code": "def _postsolve(x, postsolve_args, complete=False):\n    c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality = postsolve_args[0]\n    revstack, C, b_scale = postsolve_args[1:]\n    x = _unscale(x, C, b_scale)\n    n_x = bounds.shape[0]\n    if not complete and bounds is not None:\n        n_unbounded = 0\n        for i, bi in enumerate(bounds):\n            lbi = bi[0]\n            ubi = bi[1]\n            if lbi == -np.inf and ubi == np.inf:\n                n_unbounded += 1\n                x[i] = x[i] - x[n_x + n_unbounded - 1]\n            elif lbi == -np.inf:\n                x[i] = ubi - x[i]\n            else:\n                x[i] += lbi\n    x = x[:n_x]\n    for rev in reversed(revstack):\n        x = rev(x)\n    fun = x.dot(c)\n    with np.errstate(invalid='ignore'):\n        slack = b_ub - A_ub.dot(x)\n        con = b_eq - A_eq.dot(x)\n    return (x, fun, slack, con)",
    "docstring": "Given solution x to presolved, standard form linear program x, add fixed variables back into the problem and undo the variable substitutions to get solution to original linear program. Also, calculate the objective function value, slack in original upper bound constraints, and residuals in original equality constraints. Parameters ---------- x : 1-D array Solution vector to the standard-form problem. postsolve_args : tuple Data needed by _postsolve to convert the solution to the standard-form problem into the solution to the original problem, including: lp : A consisting of the following fields: c : 1D array The coefficients of the linear objective function to be minimized. A_ub : 2D array, optional The inequality constraint matrix. Each row of `x0`",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_linprog_util.py",
    "ast_data": "FunctionDef name:_postsolve arg:x arg:postsolve_args arg:complete arguments arg arg arg Assign Assign Assign Call Assign If BoolOp Compare Assign For Call Assign Assign If BoolOp Compare Compare Assign If Compare Assign Assign For Call Assign Call Assign Call With Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "__iter__",
    "source_code": "def __iter__(self) -> Iterator:\n    if not isinstance(self._values, np.ndarray):\n        return iter(self._values)\n    else:\n        return map(self._values.item, range(self._values.size))",
    "docstring": "Return an iterator of the values. These are each a scalar type, which is a Python scalar (for str, int, float) or a pandas scalar (for Timestamp/Timedelta/Interval/Period) Returns ------- iterator An iterator yielding scalar values from the Series. See Also -------- Series.items : Lazily iterate over (index, value) tuples. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> for x in s: ... print(x) 1 2 3",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\base.py",
    "ast_data": "FunctionDef name:__iter__ arg:self arguments arg If Call Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "TensorArrayTraceType",
    "source_code": "class TensorArrayTraceType(trace.TraceType):\n\n    def __init__(self, value):\n        self._value = value\n\n    def is_subtype_of(self, other):\n        return self == other\n\n    def most_specific_common_supertype(self, types):\n        return self if all((self == other for other in types)) else None\n\n    def placeholder_value(self, placeholder_context):\n        return self._value\n\n    def flatten(self):\n        return [tensor_lib.TensorSpec([], dtypes.variant)]\n\n    def from_tensors(self, tensors):\n        return next(tensors)\n\n    def __eq__(self, other):\n        if not isinstance(other, trace.TraceType):\n            return NotImplemented\n        if not isinstance(other, TensorArrayTraceType):\n            return False\n        return self._value is other._value\n\n    def __hash__(self):\n        return id(self._value)\n\n    def __repr__(self):\n        return f'{self.__class__.__name__}(value={self._value!r})'",
    "docstring": "Represents TraceType of TensorArray.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "ClassDef name:TensorArrayTraceType FunctionDef name:__init__ arg:self arg:value arguments arg arg Assign FunctionDef name:is_subtype_of arg:self arg:other arguments arg arg Return return:yes Compare FunctionDef name:most_specific_common_supertype arg:self arg:types arguments arg arg Return return:yes Call Compare FunctionDef name:placeholder_value arg:self arg:placeholder_context arguments arg arg Return return:yes FunctionDef name:flatten arg:self arguments arg Return return:yes Call FunctionDef name:from_tensors arg:self arg:tensors arguments arg arg Return return:yes Call FunctionDef name:__eq__ arg:self arg:other arguments arg arg If Call Return return:yes If Call Return return:yes Return return:yes Compare FunctionDef name:__hash__ arg:self arguments arg Return return:yes Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "__set__",
    "source_code": "def __set__(self, instance, value):\n    if value is None:\n        rel_obj = self.related.get_cached_value(instance, default=None)\n        if rel_obj is not None:\n            self.related.delete_cached_value(instance)\n            setattr(rel_obj, self.related.field.name, None)\n    elif not isinstance(value, self.related.related_model):\n        raise ValueError('Cannot assign \"%r\": \"%s.%s\" must be a \"%s\" instance.' % (value, instance._meta.object_name, self.related.accessor_name, self.related.related_model._meta.object_name))\n    else:\n        if instance._state.db is None:\n            instance._state.db = router.db_for_write(instance.__class__, instance=value)\n        if value._state.db is None:\n            value._state.db = router.db_for_write(value.__class__, instance=instance)\n        if not router.allow_relation(value, instance):\n            raise ValueError('Cannot assign \"%r\": the current database router prevents this relation.' % value)\n        related_pk = tuple((getattr(instance, field.attname) for field in self.related.field.foreign_related_fields))\n        for index, field in enumerate(self.related.field.local_related_fields):\n            setattr(value, field.attname, related_pk[index])\n        self.related.set_cached_value(instance, value)\n        self.related.field.set_cached_value(value, instance)",
    "docstring": "Set the related instance through the reverse relation. With the example above, when setting ``.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\related_descriptors.py",
    "ast_data": "FunctionDef name:__set__ arg:self arg:instance arg:value arguments arg arg arg If Compare Assign Call If Compare Call Call If Call Raise Call If Compare Assign Call If Compare Assign Call If Call Raise Call Assign Call Call For Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_print_tensor_argparser",
    "source_code": "def get_print_tensor_argparser(description):\n    ap = argparse.ArgumentParser(description=description, usage=argparse.SUPPRESS)\n    ap.add_argument('tensor_name', type=str, help='Name of the tensor, followed by any slicing indices, e.g., hidden1/Wx_plus_b/MatMul:0, hidden1/Wx_plus_b/MatMul:0[1, :]')\n    ap.add_argument('-n', '--number', dest='number', type=int, default=-1, help='0-based dump number for the specified tensor. Required for tensor with multiple dumps.')\n    ap.add_argument('-r', '--ranges', dest='ranges', type=str, default='', help='Numerical ranges to highlight tensor elements in. Examples: -r 0,1e-8, -r [-0.1,0.1], -r \"[[-inf, -0.1], [0.1, inf]]\"')\n    ap.add_argument('-a', '--all', dest='print_all', action='store_true', help='Print the tensor in its entirety, i.e., do not use ellipses.')\n    ap.add_argument('-s', '--numeric_summary', action='store_true', help='Include summary for non-empty tensors of numeric (int*, float*, complex*) and Boolean types.')\n    ap.add_argument('-w', '--write_path', type=str, default='', help='Path of the numpy file to write the tensor data to, using numpy.save().')\n    return ap",
    "docstring": "Get an ArgumentParser for a command that prints tensor values. Examples of such commands include print_tensor and print_feed. Args: description: Description of the ArgumentParser. Returns: An instance of argparse.ArgumentParser.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\command_parser.py",
    "ast_data": "FunctionDef name:get_print_tensor_argparser arg:description arguments arg Assign Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "memory_limit",
    "source_code": "@property\ndef memory_limit(self):\n    return self._memory_limit",
    "docstring": "The maximum number of bytes of this staging area.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:memory_limit arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__is_compatible",
    "source_code": "@staticmethod\ndef __is_compatible(a, b):\n    if isinstance(a, TypeSpec):\n        return a.is_compatible_with(b)\n    if not TypeSpec.__same_types(a, b):\n        return False\n    if isinstance(a, (list, tuple)):\n        return len(a) == len(b) and all((TypeSpec.__is_compatible(x, y) for x, y in zip(a, b)))\n    if isinstance(a, dict):\n        return len(a) == len(b) and sorted(a.keys()) == sorted(b.keys()) and all((TypeSpec.__is_compatible(a[k], b[k]) for k in a.keys()))\n    if isinstance(a, (tensor_shape.TensorShape, dtypes.DType)):\n        return a.is_compatible_with(b)\n    return a == b",
    "docstring": "Returns true if the given type serializations compatible.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py",
    "ast_data": "FunctionDef name:__is_compatible arg:a arg:b arguments arg arg If Call Return return:yes Call If Call Return return:yes If Call Return return:yes BoolOp Compare Call Call Call Call Call If Call Return return:yes BoolOp Compare Call Call Compare Call Call Call Call Call Call Call If Call Return return:yes Call Return return:yes Compare"
  },
  {
    "library": "pytorch",
    "name": "get_swap_module_params_on_conversion",
    "source_code": "def get_swap_module_params_on_conversion() -> bool:\n    return _swap_module_params_on_conversion",
    "docstring": "Returns whether to use :func: instead of setting .data to change the existing parameters in-place when converting an `~torch.__future__.set_swap_module_params_on_conversion` for more information.",
    "type": "function",
    "file_path": "pytorch\\torch\\__future__.py",
    "ast_data": "FunctionDef name:get_swap_module_params_on_conversion arguments Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "fill_object_graph_proto",
    "source_code": "def fill_object_graph_proto(self, proto: saved_object_graph_pb2.SavedObjectGraph):\n    for node_id, node in enumerate(self.nodes):\n        assert self.node_ids[node] == node_id\n        object_proto = proto.nodes.add()\n        object_proto.slot_variables.extend(self._slot_variables.get(node, ()))\n        if isinstance(node, _CapturedTensor):\n            continue\n        for child in self.augmented_graph_view.list_children(node):\n            child_proto = object_proto.children.add()\n            child_proto.node_id = self.node_ids[child.ref]\n            child_proto.local_name = child.name\n        for name, ref in self.augmented_graph_view.list_dependencies(node):\n            child_proto = object_proto.dependencies.add()\n            child_proto.node_id = self.node_ids[ref]\n            child_proto.local_name = name\n        if node in self._saveable_objects_map:\n            assert node not in self._obj_to_registered_saver, \"Objects can't have both SaveableObjects and a registered saver\"\n            for local_name, (save_fn, restore_fn) in self._saveable_objects_map[node].items():\n                saveable_object_proto = object_proto.saveable_objects[local_name]\n                saveable_object_proto.save_function = self.node_ids[save_fn]\n                saveable_object_proto.restore_function = self.node_ids[restore_fn]\n        elif node in self._obj_to_registered_saver:\n            object_proto.registered_saver = self._obj_to_registered_saver[node]",
    "docstring": "Populate the nodes, children and slot_variables of a SavedObjectGraph.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\save.py",
    "ast_data": "FunctionDef name:fill_object_graph_proto arg:self arg:proto arguments arg arg For Call Compare Assign Call Call Call If Call For Call Assign Call Assign Assign For Call Assign Call Assign Assign If Compare Compare For Call Assign Assign Assign If Compare Assign"
  },
  {
    "library": "matplotlib",
    "name": "get_rgba",
    "source_code": "@classmethod\ndef get_rgba(cls, tex, fontsize=None, dpi=None, rgb=(0, 0, 0)):\n    alpha = cls.get_grey(tex, fontsize, dpi)\n    rgba = np.empty((*alpha.shape, 4))\n    rgba[..., :3] = mpl.colors.to_rgb(rgb)\n    rgba[..., -1] = alpha\n    return rgba",
    "docstring": "Return latex's rendering of the tex string as an RGBA array. Examples -------- >>> texmanager = TexManager() >>> s = r\"\\TeX\\ is $\\displaystyle\\sum_n\\frac{-e^{i\\pi}}{2^n}$!\" >>> Z = texmanager.get_rgba(s, fontsize=12, dpi=80, rgb=(1, 0, 0))",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\texmanager.py",
    "ast_data": "FunctionDef name:get_rgba arg:cls arg:tex arg:fontsize arg:dpi arg:rgb arguments arg arg arg arg arg Assign Call Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_transform",
    "source_code": "def _transform(self, X, dictionary):\n    X = validate_data(self, X, reset=False)\n    if hasattr(self, 'alpha') and self.transform_alpha is None:\n        transform_alpha = self.alpha\n    else:\n        transform_alpha = self.transform_alpha\n    code = sparse_encode(X, dictionary, algorithm=self.transform_algorithm, n_nonzero_coefs=self.transform_n_nonzero_coefs, alpha=transform_alpha, max_iter=self.transform_max_iter, n_jobs=self.n_jobs, positive=self.positive_code)\n    if self.split_sign:\n        n_samples, n_features = code.shape\n        split_code = np.empty((n_samples, 2 * n_features))\n        split_code[:, :n_features] = np.maximum(code, 0)\n        split_code[:, n_features:] = -np.minimum(code, 0)\n        code = split_code\n    return code",
    "docstring": "Private method allowing to accommodate both DictionaryLearning and SparseCoder.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_dict_learning.py",
    "ast_data": "FunctionDef name:_transform arg:self arg:X arg:dictionary arguments arg arg arg Assign Call If BoolOp Call Compare Assign Assign Assign Call If Assign Assign Call Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_ExtractOutputStructure",
    "source_code": "def _ExtractOutputStructure(op_type_name, op_def, attr_protos, output_structure):\n    for arg in op_def.output_arg:\n        if arg.number_attr:\n            n = _AttrValue(attr_protos, arg.number_attr, op_type_name).i\n            output_structure.append(n)\n        elif arg.type_attr:\n            t = _AttrValue(attr_protos, arg.type_attr, op_type_name)\n            output_structure.append(None)\n        elif arg.type_list_attr:\n            t = _AttrValue(attr_protos, arg.type_list_attr, op_type_name)\n            output_structure.append(len(t.list.type))\n        else:\n            output_structure.append(None)",
    "docstring": "Extracts . For use in _apply_op_helper.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\op_def_library.py",
    "ast_data": "FunctionDef name:_ExtractOutputStructure arg:op_type_name arg:op_def arg:attr_protos arg:output_structure arguments arg arg arg arg For If Assign Call Call If Assign Call Call If Assign Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "write",
    "source_code": "@doc(storage_options=_shared_docs['storage_options'])\ndef write(self, writer: FilePath | WriteExcelBuffer | ExcelWriter, sheet_name: str='Sheet1', startrow: int=0, startcol: int=0, freeze_panes: tuple[int, int] | None=None, engine: str | None=None, storage_options: StorageOptions | None=None, engine_kwargs: dict | None=None) -> None:\n    from pandas.io.excel import ExcelWriter\n    num_rows, num_cols = self.df.shape\n    if num_rows > self.max_rows or num_cols > self.max_cols:\n        raise ValueError(f'This sheet is too large! Your sheet size is: {num_rows}, {num_cols} Max sheet size is: {self.max_rows}, {self.max_cols}')\n    if engine_kwargs is None:\n        engine_kwargs = {}\n    formatted_cells = self.get_formatted_cells()\n    if isinstance(writer, ExcelWriter):\n        need_save = False\n    else:\n        writer = ExcelWriter(writer, engine=engine, storage_options=storage_options, engine_kwargs=engine_kwargs)\n        need_save = True\n    try:\n        writer._write_cells(formatted_cells, sheet_name, startrow=startrow, startcol=startcol, freeze_panes=freeze_panes)\n    finally:\n        if need_save:\n            writer.close()",
    "docstring": "writer : path-like, file-like, or ExcelWriter object File path or existing ExcelWriter sheet_name : str, default 'Sheet1' Name of sheet which will contain DataFrame startrow : upper left cell row to dump data frame startcol : upper left cell column to dump data frame freeze_panes : tuple of integer (length 2), default None Specifies the one-based bottommost row and rightmost column that is to be frozen engine : string, default None write engine to use if writer is a path - you can also set this via the options ``. {storage_options} engine_kwargs: dict, optional Arbitrary keyword arguments passed to excel engine.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\excel.py",
    "ast_data": "FunctionDef name:write arg:self arg:writer arg:sheet_name arg:startrow arg:startcol arg:freeze_panes arg:engine arg:storage_options arg:engine_kwargs arguments arg arg arg arg arg arg arg arg arg Assign If BoolOp Compare Compare Raise Call If Compare Assign Assign Call If Call Assign Assign Call Assign Try Call If Call Call"
  },
  {
    "library": "pandas",
    "name": "__init__",
    "source_code": "def __init__(self, index_array: np.ndarray | None=None, window_size: int | BaseIndexer=0, groupby_indices: dict | None=None, window_indexer: type[BaseIndexer]=BaseIndexer, indexer_kwargs: dict | None=None, **kwargs) -> None:\n    self.groupby_indices = groupby_indices or {}\n    self.window_indexer = window_indexer\n    self.indexer_kwargs = indexer_kwargs.copy() if indexer_kwargs else {}\n    super().__init__(index_array=index_array, window_size=self.indexer_kwargs.pop('window_size', window_size), **kwargs)",
    "docstring": "Parameters ---------- index_array : np.ndarray or None np.ndarray of the index of the original object that we are performing a chained groupby operation over. This index has been pre-sorted relative to the groups window_size : int or BaseIndexer window size during the windowing operation groupby_indices : dict or None dict of {group label: [positional index of rows belonging to the group]} window_indexer : BaseIndexer BaseIndexer class determining the start and end bounds of each group indexer_kwargs : dict or None Custom kwargs to be passed to window_indexer **kwargs : keyword arguments that will be available when get_window_bounds is called",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexers\\objects.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:index_array arg:window_size arg:groupby_indices arg:window_indexer arg:indexer_kwargs arguments arg arg arg arg arg arg arg Assign BoolOp Assign Assign Call Call Call Call"
  },
  {
    "library": "scrapy",
    "name": "call_spider_async",
    "source_code": "async def call_spider_async(self, result: Response | Failure, request: Request) -> Iterable[Any] | AsyncIterator[Any]:\n    await maybe_deferred_to_future(_defer_sleep())\n    assert self.crawler.spider\n    if isinstance(result, Response):\n        if getattr(result, 'request', None) is None:\n            result.request = request\n        assert result.request\n        callback = result.request.callback or self.crawler.spider._parse\n        warn_on_generator_with_return_value(self.crawler.spider, callback)\n        output = callback(result, **result.request.cb_kwargs)\n    else:\n        result.request = request\n        if not request.errback:\n            result.raiseException()\n        warn_on_generator_with_return_value(self.crawler.spider, request.errback)\n        output = request.errback(result)\n        if isinstance(output, Failure):\n            output.raiseException()\n    return await maybe_deferred_to_future(maybeDeferred(iterate_spider_output, output))",
    "docstring": "Call the request callback or errback with the response or failure.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\scraper.py",
    "ast_data": "AsyncFunctionDef name:call_spider_async arg:self arg:result arg:request arguments arg arg arg Call Call If Call If Compare Call Assign Assign BoolOp Call Assign Call Assign If Call Call Assign Call If Call Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "bisect",
    "source_code": "def bisect(f, a, b, args=(), xtol=_xtol, rtol=_rtol, maxiter=_iter, full_output=False, disp=True):\n    if not isinstance(args, tuple):\n        args = (args,)\n    maxiter = operator.index(maxiter)\n    if xtol <= 0:\n        raise ValueError(f'xtol too small ({xtol:g} <= 0)')\n    if rtol < _rtol:\n        raise ValueError(f'rtol too small ({rtol:g} < {_rtol:g})')\n    f = _wrap_nan_raise(f)\n    r = _zeros._bisect(f, a, b, xtol, rtol, maxiter, args, full_output, disp)\n    return results_c(full_output, r, 'bisect')",
    "docstring": "Find root of a function within an interval using bisection. Basic bisection routine to find a root of the function between the arguments and . and cannot have the same signs. Slow but sure. Parameters ---------- f : function Python function returning a number. must be continuous, and f(a) and f(b) must have opposite signs. a : scalar One end of the bracketing interval [a,b]. b : scalar The other end of the bracketing interval [a,b]. xtol : number, optional The computed root `maxiterfffull_outputfull_outputRootResultsRootResultsfabRootResults`abs(x - x0) >> def f(x): ... return (x**2 - 1) >>> from scipy import optimize >>> root = optimize.bisect(f, 0, 2) >>> root 1.0 >>> root = optimize.bisect(f, -2, 0) >>> root -1.0 See Also -------- brentq, brenth, bisect, newton fixed_point : scalar fixed-point finder fsolve : n-dimensional root-finding elementwise.find_root : efficient elementwise 1-D root-finder",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_zeros_py.py",
    "ast_data": "FunctionDef name:bisect arg:f arg:a arg:b arg:args arg:xtol arg:rtol arg:maxiter arg:full_output arg:disp arguments arg arg arg arg arg arg arg arg arg If Call Assign Assign Call If Compare Raise Call If Compare Raise Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "SegmentationResults",
    "source_code": "@dataclass\nclass SegmentationResults:\n    logits: Tensor\n    scores: Tensor\n    mask_threshold: float = 0.0\n\n    @property\n    def binary_masks(self) -> Tensor:\n        if self._original_res_logits is not None:\n            x = self._original_res_logits\n        else:\n            x = self.logits\n        return x > self.mask_threshold\n\n    def original_res_logits(self, input_size: tuple[int, int], original_size: tuple[int, int], image_size_encoder: Optional[tuple[int, int]]) -> Tensor:\n        x = self.logits\n        if isinstance(image_size_encoder, tuple):\n            x = resize(x, size=image_size_encoder, interpolation='bilinear', align_corners=False, antialias=False)\n        x = x[..., :input_size[0], :input_size[1]]\n        x = resize(x, size=original_size, interpolation='bilinear', align_corners=False, antialias=False)\n        self._original_res_logits = x\n        return self._original_res_logits\n\n    def squeeze(self, dim: int=0) -> SegmentationResults:\n        self.logits = self.logits.squeeze(dim)\n        self.scores = self.scores.squeeze(dim)\n        if isinstance(self._original_res_logits, Tensor):\n            self._original_res_logits = self._original_res_logits.squeeze(dim)\n        return self",
    "docstring": "Encapsulate the results obtained by a Segmentation model. Args: logits: Results logits with shape :math:, where :math: refers to the number of predicted masks scores: The scores from the logits. Shape :math: mask_threshold: The threshold value to generate the from the",
    "type": "class",
    "file_path": "kornia\\kornia\\contrib\\models\\structures.py",
    "ast_data": "ClassDef name:SegmentationResults FunctionDef name:binary_masks arg:self arguments arg If Compare Assign Assign Return return:yes Compare FunctionDef name:original_res_logits arg:self arg:input_size arg:original_size arg:image_size_encoder arguments arg arg arg arg Assign If Call Assign Call Assign Assign Call Assign Return return:yes FunctionDef name:squeeze arg:self arg:dim arguments arg arg Assign Call Assign Call If Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "rotate_axes",
    "source_code": "def rotate_axes(xs, ys, zs, zdir):\n    if zdir in ('x', '-y'):\n        return (ys, zs, xs)\n    elif zdir in ('-x', 'y'):\n        return (zs, xs, ys)\n    else:\n        return (xs, ys, zs)",
    "docstring": "Reorder coordinates so that the axes are rotated with *zdir* along the original z axis. Prepending the axis with a '-' does the inverse transform, so *zdir* can be 'x', '-x', 'y', '-y', 'z' or '-z'.",
    "type": "function",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py",
    "ast_data": "FunctionDef name:rotate_axes arg:xs arg:ys arg:zs arg:zdir arguments arg arg arg arg If Compare Return return:yes If Compare Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "info_dict",
    "source_code": "def info_dict(self) -> dict[str, Union[PrimitiveInfoType, list[PrimitiveInfoType]]]:\n    return {'backend': 'extern', 'kernel_call_name': self.choice.call_name()}",
    "docstring": "Information returned here is logged to the autotune log file when that is enabled.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\select_algorithm.py",
    "ast_data": "FunctionDef name:info_dict arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "distance",
    "source_code": "def distance(self, point: Tensor) -> Tensor:\n    return self.squared_distance(point).sqrt()",
    "docstring": "Return the distance of a point to its projections onto the line. Args: point: the point to calculate the distance into the line.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\line.py",
    "ast_data": "FunctionDef name:distance arg:self arg:point arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "build_navigation_doc",
    "source_code": "def build_navigation_doc(self) -> None:\n    logger.info(__('writing nav.xhtml file...'))\n    if self.config.epub_tocscope == 'default':\n        doctree = self.env.get_and_resolve_doctree(self.config.root_doc, self, prune_toctrees=False, includehidden=False)\n        refnodes = self.get_refnodes(doctree, [])\n        self.toc_add_files(refnodes)\n    else:\n        refnodes = self.refnodes\n    navlist = self.build_navlist(refnodes)\n    copy_asset_file(self.template_dir / 'nav.xhtml.jinja', self.outdir, context=self.navigation_doc_metadata(navlist), force=True)\n    if 'nav.xhtml' not in self.files:\n        self.files.append('nav.xhtml')",
    "docstring": "Write the metainfo file nav.xhtml.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\epub3.py",
    "ast_data": "FunctionDef name:build_navigation_doc arg:self arguments arg Call Call If Compare Assign Call Assign Call Call Assign Assign Call Call Call If Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "_assign_dependencies",
    "source_code": "@contextlib.contextmanager\ndef _assign_dependencies(self):\n    if self._cached_value is not None:\n        with ops.control_dependencies([self._cached_value]):\n            yield\n    else:\n        yield",
    "docstring": "Makes assignments depend on the cached value, if any. This prevents undefined behavior with reads not ordered wrt writes. Yields: None.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:_assign_dependencies arg:self arguments arg If Compare With Call"
  },
  {
    "library": "pytorch",
    "name": "ELU",
    "source_code": "class ELU(Module):\n    __constants__ = ['alpha', 'inplace']\n    alpha: float\n    inplace: bool\n\n    def __init__(self, alpha: float=1.0, inplace: bool=False) -> None:\n        super().__init__()\n        self.alpha = alpha\n        self.inplace = inplace\n\n    def forward(self, input: Tensor) -> Tensor:\n        return F.elu(input, self.alpha, self.inplace)\n\n    def extra_repr(self) -> str:\n        inplace_str = ', inplace=True' if self.inplace else ''\n        return f'alpha={self.alpha}{inplace_str}'",
    "docstring": "Applies the Exponential Linear Unit (ELU) function, element-wise. Method described in the paper: __. ELU is defined as: .. math:: \\text{ELU}(x) = \\begin{cases} x, & \\text{ if } x > 0\\\\ \\alpha * (\\exp(x) - 1), & \\text{ if } x \\leq 0 \\end{cases} Args: alpha: the :math: value for the ELU formulation. Default: 1.0 inplace: can optionally do the operation in-place. Default: `(*)*(*)`, same shape as the input. .. image:: ../scripts/activation_images/ELU.png Examples:: >>> m = nn.ELU() >>> input = torch.randn(2) >>> output = m(input)",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\activation.py",
    "ast_data": "ClassDef name:ELU Assign FunctionDef name:__init__ arg:self arg:alpha arg:inplace arguments arg arg arg Call Call Assign Assign FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:extra_repr arg:self arguments arg Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "write_build_info",
    "source_code": "def write_build_info(filename, key_value_list):\n    build_info = {}\n    if cuda_config:\n        build_info.update(cuda_config.config)\n    if tensorrt_config:\n        build_info.update(tensorrt_config.config)\n    for arg in key_value_list:\n        key, value = arg.split('=')\n        if value.lower() == 'true':\n            build_info[key] = True\n        elif value.lower() == 'false':\n            build_info[key] = False\n        else:\n            build_info[key] = value.format(**build_info)\n    sorted_build_info_pairs = sorted(build_info.items())\n    contents = '\\n# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\\n#\\n# Licensed under the Apache License, Version 2.0 (the \"License\");\\n# you may not use this file except in compliance with the License.\\n# You may obtain a copy of the License at\\n#\\n#     http://www.apache.org/licenses/LICENSE-2.0\\n#\\n# Unless required by applicable law or agreed to in writing, software\\n# distributed under the License is distributed on an \"AS IS\" BASIS,\\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n# See the License for the specific language governing permissions and\\n# limitations under the License.\\n# ==============================================================================\\n\"\"\"Auto-generated module providing information about the build.\"\"\"\\nimport collections\\n\\nbuild_info = collections.OrderedDict(%s)\\n' % sorted_build_info_pairs\n    open(filename, 'w').write(contents)",
    "docstring": "Writes a Python that describes the build. Args: filename: filename to write to. key_value_list: A list of \"key=value\" strings that will be added to the module's \"build_info\" dictionary as additional entries.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\build_info\\gen_build_info.py",
    "ast_data": "FunctionDef name:write_build_info arg:filename arg:key_value_list arguments arg arg Assign If Call If Call For Assign Call If Compare Call Assign If Compare Call Assign Assign Call Assign Call Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "finalize_indexing",
    "source_code": "def finalize_indexing(self, indices: Sequence[sympy.Expr]) -> None:\n    pass",
    "docstring": "Hook called right before codegen with every index that will be used in the fused kernel.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\simd.py",
    "ast_data": "FunctionDef name:finalize_indexing arg:self arg:indices arguments arg arg"
  },
  {
    "library": "pytorch",
    "name": "force_lazy_device",
    "source_code": "def force_lazy_device(model: fx.GraphModule):\n\n    def tolazydevice(dev):\n        if isinstance(dev, torch.device):\n            return torch.device('lazy', index=dev.index)\n        return dev\n\n    def hasDeviceArg(args, kwargs):\n        return any((isinstance(arg, torch.device) for arg in itertools.chain(args, kwargs.values())))\n    for nd in model.graph.nodes:\n        nd.args = tuple((tolazydevice(arg) for arg in nd.args))\n        nd.kwargs = {k: tolazydevice(v) for k, v in nd.kwargs.items()}\n        if nd.target in tensor_factory_functions and (not hasDeviceArg(nd.args, nd.kwargs)):\n            kwargs = dict(nd.kwargs)\n            kwargs['device'] = torch.device('lazy')\n            nd.kwargs = kwargs\n    model.recompile()",
    "docstring": "Factory methods in a Fx graph may create tensors for a specific eager devices. If we take no actions, those eager tensors will be mixed with lazy tensors and cause crash. This method overwrite those eager device to lazy device.",
    "type": "function",
    "file_path": "pytorch\\torch\\_lazy\\extract_compiled_graph.py",
    "ast_data": "FunctionDef name:force_lazy_device arg:model arguments arg FunctionDef name:tolazydevice arg:dev arguments arg If Call Return return:yes Call Return return:yes FunctionDef name:hasDeviceArg arg:args arg:kwargs arguments arg arg Return return:yes Call Call Call Call For Assign Call Call Assign Call Call If BoolOp Compare Call Assign Call Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_checkpoint_size",
    "source_code": "def _get_checkpoint_size(prefix):\n    size = 0\n    files = glob.glob('{}*'.format(prefix))\n    for file in files:\n        size += metrics.CalculateFileSize(file)\n    return size",
    "docstring": "Calculates filesize of checkpoint based on prefix.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\saver.py",
    "ast_data": "FunctionDef name:_get_checkpoint_size arg:prefix arguments arg Assign Assign Call Call For Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "Root",
    "source_code": "@cherrypy.config(**{'tools.log_tracebacks.on': True})\nclass Root:\n\n    @cherrypy.expose\n    def index(self):\n        return \"<html>\\n<body>Try some <a href='%s?a=7'>other</a> path,\\nor a <a href='%s?n=14'>default</a> path.<br />\\nOr, just look at the pretty picture:<br />\\n<img src='%s' />\\n</body></html>\" % (url('other'), url('else'), url('files/made_with_cherrypy_small.png'))\n\n    @cherrypy.expose\n    def default(self, *args, **kwargs):\n        return 'args: %s kwargs: %s' % (args, kwargs)\n\n    @cherrypy.expose\n    def other(self, a=2, b='bananas', c=None):\n        cherrypy.response.headers['Content-Type'] = 'text/plain'\n        if c is None:\n            return 'Have %d %s.' % (int(a), b)\n        else:\n            return 'Have %d %s, %s.' % (int(a), b, c)\n    files = tools.staticdir.handler(section='/files', dir=os.path.join(local_dir, 'static'), match='\\\\.(css|gif|html?|ico|jpe?g|js|png|swf|xml)$')",
    "docstring": "Declaration of the CherryPy app URI structure.",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\scaffold\\__init__.py",
    "ast_data": "ClassDef name:Root FunctionDef name:index arg:self arguments arg Return return:yes Call Call Call FunctionDef name:default arg:self arguments arg arg arg Return return:yes FunctionDef name:other arg:self arg:a arg:b arg:c arguments arg arg arg arg Assign If Compare Return return:yes Call Return return:yes Call Assign Call Call Call"
  },
  {
    "library": "django",
    "name": "check_constraints",
    "source_code": "def check_constraints(self, table_names=None):\n    with self.cursor() as cursor:\n        cursor.execute('SET CONSTRAINTS ALL IMMEDIATE')\n        cursor.execute('SET CONSTRAINTS ALL DEFERRED')",
    "docstring": "Check constraints by setting them to immediate. Return them to deferred afterward.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\postgresql\\base.py",
    "ast_data": "FunctionDef name:check_constraints arg:self arg:table_names arguments arg arg With Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_replace_row_partitions",
    "source_code": "def _replace_row_partitions(value, new_partitions):\n    if isinstance(value, tensor.Tensor) or not new_partitions:\n        return value\n    elif isinstance(value, ragged_tensor.RaggedTensor):\n        return ragged_tensor.RaggedTensor._from_row_partition(values=_replace_row_partitions(value.values, new_partitions[1:]), row_partition=new_partitions[0])\n    else:\n        assert isinstance(value, StructuredTensor)\n        new_fields = dict(((k, _replace_row_partitions(v, new_partitions)) for k, v in value._fields.items()))\n        return StructuredTensor._old_init(fields=new_fields, shape=value.shape, nrows=value.nrows(), row_partitions=tuple(new_partitions) + tuple(value.row_partitions[len(new_partitions):]))",
    "docstring": "Updates to use as its (outer) row partitions. This is used to ensure that all fields in a use identical objects for the shared dimensions. In particular, first merges all of the row partitions from any fields, and then replaces the outer row partitions of all fields with the merged row partitions (using this function). Args: value: A , , or . new_partitions: A list of row-partitions that should be used by . Must be equivalent to 's current row partitions. Returns: A value that is equivalent to , where outer row partitions have been replaced by .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor.py",
    "ast_data": "FunctionDef name:_replace_row_partitions arg:value arg:new_partitions arguments arg arg If BoolOp Call Return return:yes If Call Return return:yes Call Call Call Assign Call Call Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "torch_save_to_dcp",
    "source_code": "def torch_save_to_dcp(torch_save_path: Union[str, os.PathLike], dcp_checkpoint_dir: Union[str, os.PathLike]):\n    state_dict = torch.load(torch_save_path, weights_only=False)\n    _save_state_dict(state_dict, storage_writer=FileSystemWriter(dcp_checkpoint_dir), no_dist=True)",
    "docstring": "Given the location of a torch save file, converts it into a DCP checkpoint. Args: torch_save_path: Filename of the Torch save file. dcp_checkpoint_dir: Directory to store the DCP checkpoint. .. warning:: To avoid OOM, it's recommended to only run this function on a single rank.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\format_utils.py",
    "ast_data": "FunctionDef name:torch_save_to_dcp arg:torch_save_path arg:dcp_checkpoint_dir arguments arg arg Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_compute_size_of_strided_dim",
    "source_code": "def _compute_size_of_strided_dim(shrink, spec, size):\n    unknown = None\n    use_full_range = None\n    if shrink:\n        return 1\n    if size is unknown or size.value is unknown:\n        return unknown\n    size = size.value\n    stride = spec.step\n    if stride is not unknown:\n        if stride == 0:\n            return unknown\n        stride = spec.step\n        valid_range = [0, size] if stride > 0 else [-1, size - 1]\n\n        def canonical(x, c):\n            if x is use_full_range:\n                return valid_range[c] if stride > 0 else valid_range[c + 1 & 1]\n            else:\n                x_fwd = size + x if x < 0 else x\n                return max(valid_range[0], min(valid_range[1], x_fwd))\n        begin = canonical(spec.start, 0)\n        end = canonical(spec.stop, 1)\n        interval_length = end - begin\n        if interval_length == 0 or (interval_length < 0) != (stride < 0):\n            return 0\n        else:\n            remainder = 1 if interval_length % stride != 0 else 0\n            return interval_length // stride + remainder\n    else:\n        return unknown",
    "docstring": "Computes the size of a single strided slice dimension.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:_compute_size_of_strided_dim arg:shrink arg:spec arg:size arguments arg arg arg Assign Assign If Return return:yes If BoolOp Compare Compare Return return:yes Assign Assign If Compare If Compare Return return:yes Assign Assign Compare FunctionDef name:canonical arg:x arg:c arguments arg arg If Compare Return return:yes Compare Assign Compare Return return:yes Call Call Assign Call Assign Call Assign If BoolOp Compare Compare Compare Compare Return return:yes Assign Compare Return return:yes Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_funm_multiply_krylov_lanczos",
    "source_code": "def _funm_multiply_krylov_lanczos(A, b, bnorm, V, H, m):\n    dotprod = np.vdot if np.iscomplexobj(b) else np.dot\n    norm_tol = np.finfo(b.dtype.char).eps ** 2\n    V[:, 0] = b / bnorm\n    for k in range(0, m):\n        if k > 0:\n            V[:, k + 1] = A.dot(V[:, k]) - H[k, k - 1] * V[:, k - 1]\n        else:\n            V[:, k + 1] = A.dot(V[:, k])\n        H[k, k] = dotprod(V[:, k + 1], V[:, k])\n        V[:, k + 1] = V[:, k + 1] - H[k, k] * V[:, k]\n        H[k + 1, k] = norm(V[:, k + 1])\n        if H[k + 1, k] < norm_tol:\n            return (True, k)\n        V[:, k + 1] = V[:, k + 1] / H[k + 1, k]\n        if k < m - 1:\n            H[k, k + 1] = H[k + 1, k]\n    return (False, m)",
    "docstring": "The Lanczos iteration for constructing the basis V and the projection H = V * A V for the Krylov subspace Km(A, b) of order m. A must be Hermitian. Parameters ---------- A : transposable linear operator The operator whose matrix function is of interest. b : ndarray The vector b to multiply the f(A) with. V : ndarray The n x (m + 1) matrix whose columns determines the basis for Krylov subspace Km(A, b). H : ndarray A (m + 1) x m upper Hessenberg matrix representing the projection of A onto Km(A, b). m : int The order of the Krylov subspace. Returns ------- breakdown : bool Indicate if the Arnoldi broke down or not iter : int Returns the last valid iteration.",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_funm_multiply_krylov.py",
    "ast_data": "FunctionDef name:_funm_multiply_krylov_lanczos arg:A arg:b arg:bnorm arg:V arg:H arg:m arguments arg arg arg arg arg arg Assign Call Assign Call Assign For Call If Compare Assign Call Assign Call Assign Call Assign Assign Call If Compare Return return:yes Assign If Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "summary_computed",
    "source_code": "def summary_computed(self, sess, summary, global_step=None):\n    if not self._summary_writer:\n        raise RuntimeError('Writing a summary requires a summary writer.')\n    if global_step is None and self.global_step is not None:\n        global_step = training_util.global_step(sess, self.global_step)\n    self._summary_writer.add_summary(summary, global_step)",
    "docstring": "Indicate that a summary was computed. Args: sess: A object. summary: A Summary proto, or a string holding a serialized summary proto. global_step: Int. global step this summary is associated with. If , it will try to fetch the current step. Raises: TypeError: if 'summary' is not a Summary proto or a string. RuntimeError: if the Supervisor was created without a .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py",
    "ast_data": "FunctionDef name:summary_computed arg:self arg:sess arg:summary arg:global_step arguments arg arg arg arg If Raise Call If BoolOp Compare Compare Assign Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_linelength",
    "source_code": "def set_linelength(self, linelength):\n    if linelength == self.get_linelength():\n        return\n    lineoffset = self.get_lineoffset()\n    segments = self.get_segments()\n    pos = 1 if self.is_horizontal() else 0\n    for segment in segments:\n        segment[0, pos] = lineoffset + linelength / 2.0\n        segment[1, pos] = lineoffset - linelength / 2.0\n    self.set_segments(segments)\n    self._linelength = linelength",
    "docstring": "Set the length of the lines used to mark each event.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:set_linelength arg:self arg:linelength arguments arg arg If Compare Call Return return:no Assign Call Assign Call Assign Call For Assign Assign Call Assign"
  },
  {
    "library": "django",
    "name": "select_format",
    "source_code": "def select_format(self, compiler, sql, params):\n    if hasattr(self.output_field, 'select_format'):\n        return self.output_field.select_format(compiler, sql, params)\n    return (sql, params)",
    "docstring": "Custom format for select clauses. For example, EXISTS expressions need to be wrapped in CASE WHEN on Oracle.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\expressions.py",
    "ast_data": "FunctionDef name:select_format arg:self arg:compiler arg:sql arg:params arguments arg arg arg arg If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "update_into",
    "source_code": "@abc.abstractmethod\ndef update_into(self, data: Buffer, buf: Buffer) -> int:\n    pass",
    "docstring": "Processes the provided bytes and writes the resulting data into the provided buffer. Returns the number of bytes written.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\ciphers\\base.py",
    "ast_data": "FunctionDef name:update_into arg:self arg:data arg:buf arguments arg arg arg"
  },
  {
    "library": "pandas",
    "name": "argsort",
    "source_code": "def argsort(self, axis: Axis=0, kind: SortKind='quicksort', order: None=None, stable: None=None) -> Series:\n    if axis != -1:\n        self._get_axis_number(axis)\n    result = self.array.argsort(kind=kind)\n    res = self._constructor(result, index=self.index, name=self.name, dtype=np.intp, copy=False)\n    return res.__finalize__(self, method='argsort')",
    "docstring": "Return the integer indices that would sort the Series values. Override ndarray.argsort. Argsorts the value, omitting NA/null values, and places the result in the same locations as the non-NA values. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. kind : {'mergesort', 'quicksort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See :func: for more information. 'mergesort' and 'stable' are the only stable algorithms. order : None Has no effect but is accepted for compatibility with numpy. stable : None Has no effect but is accepted for compatibility with numpy. Returns ------- Series[np.intp] Positions of values within the sort order with -1 indicating nan values. See Also -------- numpy.ndarray.argsort : Returns the indices that would sort this array. Examples -------- >>> s = pd.Series([3, 2, 1]) >>> s.argsort() 0 2 1 1 2 0 dtype: int64",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:argsort arg:self arg:axis arg:kind arg:order arg:stable arguments arg arg arg arg arg If Compare Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_ci_lower",
    "source_code": "def _ci_lower(table, alpha):\n    if _sample_odds_ratio(table) == 0:\n        return 0\n    x, M, n, N = _hypergeom_params_from_table(table)\n    nc = _solve(lambda nc: nchypergeom_fisher.sf(x - 1, M, n, N, nc) - alpha)\n    return nc",
    "docstring": "Compute the lower end of the confidence interval.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_odds_ratio.py",
    "ast_data": "FunctionDef name:_ci_lower arg:table arg:alpha arguments arg arg If Compare Call Return return:yes Assign Call Assign Call arguments arg Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "wait",
    "source_code": "def wait(self, timeout=None):\n    with self._put_wait_lock, self._queue_lock:\n        logging.info('Waiting for all global closures to be finished.')\n        while not self._error and (not self._queue.empty() or self._inflight_closure_count > 0):\n            if not self._stop_waiting_condition.wait(timeout=timeout):\n                return False\n        self._raise_if_error()\n        return True",
    "docstring": "Wait for all closures to be finished before returning. If was called before or during , the error from the first invocation of will be raised. Args: timeout: A float specifying a timeout for the wait in seconds. Returns: True unless the given timeout expired, in which case it returns False.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py",
    "ast_data": "FunctionDef name:wait arg:self arg:timeout arguments arg arg With Call While BoolOp BoolOp Call Compare If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_tf_range",
    "source_code": "def _tf_range(start_or_stop, stop, step):\n    if step is not UNSPECIFIED:\n        return math_ops.range(start_or_stop, stop, step)\n    if stop is not UNSPECIFIED:\n        stop = math_ops.maximum(start_or_stop, stop)\n        return math_ops.range(start_or_stop, stop)\n    start_or_stop = math_ops.maximum(start_or_stop, 0)\n    return math_ops.range(start_or_stop)",
    "docstring": "Overload of range_ that generates a TF range tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\py_builtins.py",
    "ast_data": "FunctionDef name:_tf_range arg:start_or_stop arg:stop arg:step arguments arg arg arg If Compare Return return:yes Call If Compare Assign Call Return return:yes Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "write",
    "source_code": "def write(self, file_prefix, options=None):\n    if isinstance(file_prefix, os.PathLike):\n        file_prefix = os.fspath(file_prefix)\n    return self._write(file_prefix, options)",
    "docstring": "Writes a training checkpoint. The checkpoint includes variables created by this object and any trackable objects it depends on at the time is called. does not number checkpoints, increment , or update the metadata used by . It is primarily intended for use by higher level checkpoint management utilities. provides a very basic implementation of these features. Checkpoints written with must be read with . Example usage: Args: file_prefix: A prefix to use for the checkpoint filenames (/path/to/directory/and_a_prefix). options: Optional object. Returns: The full path to the checkpoint (i.e. ).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:write arg:self arg:file_prefix arg:options arguments arg arg arg If Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "full",
    "source_code": "def full(sharding_spec: ShardingSpec, size, fill_value, *, dtype=None, layout=torch.strided, requires_grad=False, pin_memory=False, memory_format=torch.contiguous_format, process_group=None, init_rrefs=False) -> ShardedTensor:\n    sharded_tensor = ShardedTensor(sharding_spec, *size, dtype=dtype, layout=layout, requires_grad=requires_grad, pin_memory=pin_memory, memory_format=memory_format, process_group=process_group, init_rrefs=init_rrefs)\n    torch.nn.init.constant_(sharded_tensor, fill_value)\n    return sharded_tensor",
    "docstring": "Creates a :class: filled with fill_value. The tensor's dtype is inferred from fill_value. If dtype is specified, it will override the inferred type from fill_value. Needs to be called on all ranks in an SPMD fashion. Args: sharding_spec (:class:): The specification describing how to shard the Tensor. size (int...): a list, tuple, or of integers defining the shape of the output tensor. fill_value (Scalar) - the value to fill the output tensor with. Keyword args: dtype (:class:, optional): the desired data type of returned tensor. Default: if `torch.set_default_dtypetorch.layouttorch.distributed.rpc.RRefShardedTensor` object on each rank",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\__init__.py",
    "ast_data": "FunctionDef name:full arg:sharding_spec arg:size arg:fill_value arguments arg arg arg arg arg arg arg arg arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "base_version",
    "source_code": "@property\ndef base_version(self) -> str:\n    parts = []\n    if self.epoch != 0:\n        parts.append(f'{self.epoch}!')\n    parts.append('.'.join((str(x) for x in self.release)))\n    return ''.join(parts)",
    "docstring": "The \"base version\" of the version. >>> Version(\"1.2.3\").base_version '1.2.3' >>> Version(\"1.2.3+abc\").base_version '1.2.3' >>> Version(\"1!1.2.3+abc.dev1\").base_version '1!1.2.3' The \"base version\" is the public version of the project without any pre or post release markers.",
    "type": "method",
    "file_path": "pytorch\\torch\\_vendor\\packaging\\version.py",
    "ast_data": "FunctionDef name:base_version arg:self arguments arg Assign If Compare Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_ema_avg_fn",
    "source_code": "def get_ema_avg_fn(decay=0.999):\n    if decay < 0.0 or decay > 1.0:\n        raise ValueError(f'Invalid decay value {decay} provided. Please provide a value in [0,1] range.')\n\n    @torch.no_grad()\n    def ema_update(ema_param: Tensor, current_param: Tensor, num_averaged):\n        return decay * ema_param + (1 - decay) * current_param\n    return ema_update",
    "docstring": "Get the function applying exponential moving average (EMA) across a single param.",
    "type": "function",
    "file_path": "pytorch\\torch\\optim\\swa_utils.py",
    "ast_data": "FunctionDef name:get_ema_avg_fn arg:decay arguments arg If BoolOp Compare Compare Raise Call FunctionDef name:ema_update arg:ema_param arg:current_param arg:num_averaged arguments arg arg arg Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "node_op_type",
    "source_code": "def node_op_type(self, node_name, device_name=None):\n    if not self._debug_graphs:\n        raise LookupError('Node op types are not loaded from partition graphs yet.')\n    device_name = self._infer_device_name(device_name, node_name)\n    return self._debug_graphs[device_name].node_op_types[node_name]",
    "docstring": "Get the op type of given node. Args: node_name: () name of the node. device_name: () name of the device. If there is only one device or if node_name exists on only one device, this argument is optional. Returns: () op type of the node. Raises: LookupError: If node op types have not been loaded from partition graphs yet.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:node_op_type arg:self arg:node_name arg:device_name arguments arg arg arg If Raise Call Assign Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "_check_statsmodels",
    "source_code": "def _check_statsmodels(self):\n    options = ('logistic', 'robust', 'lowess')\n    err = '`{}=True` requires statsmodels, an optional dependency, to be installed.'\n    for option in options:\n        if getattr(self, option) and (not _has_statsmodels):\n            raise RuntimeError(err.format(option))",
    "docstring": "Check whether statsmodels is installed if any boolean options require it.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\regression.py",
    "ast_data": "FunctionDef name:_check_statsmodels arg:self arguments arg Assign Assign For If BoolOp Call Raise Call Call"
  },
  {
    "library": "scrapy",
    "name": "StopDownload",
    "source_code": "class StopDownload(Exception):\n\n    def __init__(self, *, fail: bool=True):\n        super().__init__()\n        self.fail = fail",
    "docstring": "Stop the download of the body for a given response. The 'fail' boolean parameter indicates whether or not the resulting partial response should be handled by the request errback. Note that 'fail' is a keyword-only argument.",
    "type": "class",
    "file_path": "scrapy\\scrapy\\exceptions.py",
    "ast_data": "ClassDef name:StopDownload FunctionDef name:__init__ arg:self arguments arg arg Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_dict_to_tensor",
    "source_code": "def _dict_to_tensor(self, x, k1, k2):\n    return array_ops_stack.stack([array_ops_stack.stack([x[i, j] for j in range(k2)]) for i in range(k1)])",
    "docstring": "Convert a dictionary to a tensor. Args: x: A k1 * k2 dictionary. k1: First dimension of x. k2: Second dimension of x. Returns: A k1 * k2 tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops.py",
    "ast_data": "FunctionDef name:_dict_to_tensor arg:self arg:x arg:k1 arg:k2 arguments arg arg arg arg Return return:yes Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "to",
    "source_code": "def to(self, *args: Any, **kwargs: Any) -> Self:\n    data = self.data.to(*args, **kwargs)\n    if data is self.data:\n        return self\n    else:\n        kwargs = dict(filter(lambda t: t[0] != 'device' and t[0] != 'dtype', kwargs.items()))\n        sorted_indices = bind(self.sorted_indices, lambda t: t.to(data.device, **kwargs))\n        unsorted_indices = bind(self.unsorted_indices, lambda t: t.to(data.device, **kwargs))\n        return type(self)(data, self.batch_sizes, sorted_indices, unsorted_indices)",
    "docstring": "Perform dtype and/or device conversion on . It has similar signature as :meth:, except optional arguments like and should be passed as kwargs, not args, or they will not apply to the index tensors. .. note:: If the `torch.dtypetorch.device` is returned. Otherwise, returns a copy with the desired configuration.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\utils\\rnn.py",
    "ast_data": "FunctionDef name:to arg:self arguments arg arg arg Assign Call If Compare Return return:yes Assign Call Call arguments arg BoolOp Compare Compare Call Assign Call arguments arg Call Assign Call arguments arg Call Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "polysub",
    "source_code": "def polysub(c1, c2):\n    return pu._sub(c1, c2)",
    "docstring": "Subtract one polynomial from another. Returns the difference of two polynomials - . The arguments are sequences of coefficients from lowest order term to highest, i.e., [1,2,3] represents the polynomial ``. Parameters ---------- c1, c2 : array_like 1-D arrays of polynomial coefficients ordered from low to high. Returns ------- out : ndarray Of coefficients representing their difference. See Also -------- polyadd, polymulx, polymul, polydiv, polypow Examples -------- >>> from numpy.polynomial import polynomial as P >>> c1 = (1, 2, 3) >>> c2 = (3, 2, 1) >>> P.polysub(c1,c2) array([-2., 0., 2.]) >>> P.polysub(c2, c1) # -P.polysub(c1,c2) array([ 2., 0., -2.])",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\polynomial.py",
    "ast_data": "FunctionDef name:polysub arg:c1 arg:c2 arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "create_op_support",
    "source_code": "@compatibility(is_backward_compatible=False)\ndef create_op_support(is_node_supported: IsNodeSupported) -> OperatorSupportBase:\n\n    class FunctionalOperatorSupport(OperatorSupportBase):\n\n        def is_node_supported(self, submodules: t.Mapping[str, torch.nn.Module], node: torch.fx.Node) -> bool:\n            return is_node_supported(submodules, node)\n    return FunctionalOperatorSupport()",
    "docstring": "Wraps a function into an instance has the same call signature as",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\passes\\operator_support.py",
    "ast_data": "FunctionDef name:create_op_support arg:is_node_supported arguments arg ClassDef name:FunctionalOperatorSupport FunctionDef name:is_node_supported arg:self arg:submodules arg:node arguments arg arg arg Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "kneighbors_graph",
    "source_code": "def kneighbors_graph(self, X=None, n_neighbors=None, mode='connectivity'):\n    check_is_fitted(self)\n    if n_neighbors is None:\n        n_neighbors = self.n_neighbors\n    if mode == 'connectivity':\n        A_ind = self.kneighbors(X, n_neighbors, return_distance=False)\n        n_queries = A_ind.shape[0]\n        A_data = np.ones(n_queries * n_neighbors)\n    elif mode == 'distance':\n        A_data, A_ind = self.kneighbors(X, n_neighbors, return_distance=True)\n        A_data = np.ravel(A_data)\n    else:\n        raise ValueError(f'Unsupported mode, must be one of \"connectivity\", or \"distance\" but got \"{mode}\" instead')\n    n_queries = A_ind.shape[0]\n    n_samples_fit = self.n_samples_fit_\n    n_nonzero = n_queries * n_neighbors\n    A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)\n    kneighbors_graph = csr_matrix((A_data, A_ind.ravel(), A_indptr), shape=(n_queries, n_samples_fit))\n    return kneighbors_graph",
    "docstring": "Compute the (weighted) graph of k-Neighbors for points in X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_queries, n_features), or (n_queries, n_indexed) if metric == 'precomputed', default=None The query point or points. If not provided, neighbors of each indexed point are returned. In this case, the query point is not considered its own neighbor. For `n_samples_fitA[i, j]ij`. The matrix is of CSR format. See Also -------- NearestNeighbors.radius_neighbors_graph : Compute the (weighted) graph of Neighbors for points in X. Examples -------- >>> X = [[0], [3], [1]] >>> from sklearn.neighbors import NearestNeighbors >>> neigh = NearestNeighbors(n_neighbors=2) >>> neigh.fit(X) NearestNeighbors(n_neighbors=2) >>> A = neigh.kneighbors_graph(X) >>> A.toarray() array([[1., 0., 1.], [0., 1., 1.], [1., 0., 1.]])",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neighbors\\_base.py",
    "ast_data": "FunctionDef name:kneighbors_graph arg:self arg:X arg:n_neighbors arg:mode arguments arg arg arg arg Call If Compare Assign If Compare Assign Call Assign Assign Call If Compare Assign Call Assign Call Raise Call Assign Assign Assign Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "xxd_output_to_object",
    "source_code": "def xxd_output_to_object(input_cc_file):\n    model_bytes = xxd_output_to_bytes(input_cc_file)\n    return convert_bytearray_to_object(model_bytes)",
    "docstring": "Converts xxd output C++ source file to object. Args: input_cc_file: Full path name to th C++ source file dumped by xxd Raises: RuntimeError: If input_cc_file path is invalid. IOError: If input_cc_file cannot be opened. Returns: A python object corresponding to the input tflite file.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\tools\\flatbuffer_utils.py",
    "ast_data": "FunctionDef name:xxd_output_to_object arg:input_cc_file arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_check_managers",
    "source_code": "@classmethod\ndef _check_managers(cls, **kwargs):\n    errors = []\n    for manager in cls._meta.managers:\n        errors.extend(manager.check(**kwargs))\n    return errors",
    "docstring": "Perform all manager checks.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\base.py",
    "ast_data": "FunctionDef name:_check_managers arg:cls arguments arg arg Assign For Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "broadcast_weights",
    "source_code": "@tf_export('__internal__.ops.broadcast_weights', v1=[])\ndef broadcast_weights(weights, values):\n    with ops.name_scope(None, 'broadcast_weights', (weights, values)) as scope:\n        values = ops.convert_to_tensor(values, name='values')\n        weights = ops.convert_to_tensor(weights, dtype=values.dtype.base_dtype, name='weights')\n        weights_shape = weights.get_shape()\n        values_shape = values.get_shape()\n        if weights_shape.is_fully_defined() and values_shape.is_fully_defined() and weights_shape.is_compatible_with(values_shape):\n            return weights\n        if control_flow_ops.get_enclosing_xla_context() is not None:\n            return math_ops.multiply(weights, array_ops.ones_like(values), name=scope)\n        with ops.control_dependencies((assert_broadcastable(weights, values),)):\n            return math_ops.multiply(weights, array_ops.ones_like(values), name=scope)",
    "docstring": "Broadcast to the same shape as . This returns a version of following the same broadcast rules as , but limited to the weights shapes allowed by . When computing a weighted average, use this function to broadcast before summing them; e.g., . Args: weights: whose shape is broadcastable to according to the rules of . values: of any shape. Returns: broadcast to shape according to the rules of .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\weights_broadcast_ops.py",
    "ast_data": "FunctionDef name:broadcast_weights arg:weights arg:values arguments arg arg With Call Assign Call Assign Call Assign Call Assign Call If BoolOp Call Call Call Return return:yes If Compare Call Return return:yes Call Call With Call Call Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, *args, zs=(), zdir='z', axlim_clip=False, **kwargs):\n    super().__init__(*args, **kwargs)\n    self.set_3d_properties(zs, zdir, axlim_clip)",
    "docstring": "Parameters ---------- verts : zs : float The location along the *zdir* axis in 3D space to position the patch. zdir : {'x', 'y', 'z'} Plane to plot patch orthogonal to. Default: 'z'. See for a description of the values. axlim_clip : bool, default: False Whether to hide patches with a vertex outside the axes view limits. .. versionadded:: 3.10",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg arg arg arg arg arg Call Call Call"
  },
  {
    "library": "django",
    "name": "smart_split",
    "source_code": "def smart_split(text):\n    for bit in smart_split_re.finditer(str(text)):\n        yield bit[0]",
    "docstring": "Generator that splits a string by spaces, leaving quoted phrases together. Supports both single and double quotes, and supports escaping quotes with backslashes. In the output, strings will keep their initial and trailing quote marks and escaped quotes will remain escaped (the results can then be further processed with unescape_string_literal()). >>> list(smart_split(r'This is \"a person\\'s\" test.')) ['This', 'is', '\"a person\\\\\\'s\"', 'test.'] >>> list(smart_split(r\"Another 'person\\'s' test.\")) ['Another', \"'person\\\\'s'\", 'test.'] >>> list(smart_split(r'A \"\\\"funky\\\" style\" test.')) ['A', '\"\\\\\"funky\\\\\" style\"', 'test.']",
    "type": "function",
    "file_path": "django\\django\\utils\\text.py",
    "ast_data": "FunctionDef name:smart_split arg:text arguments arg For Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_trainable_state",
    "source_code": "def _get_trainable_state(self):\n    layers = self._flatten_layers(include_self=False, recursive=False)\n    trainable_state = {self: self.trainable}\n    for l in layers:\n        trainable_state.update(l._get_trainable_state())\n    return trainable_state",
    "docstring": "Get the state of each sublayer. Returns: A dict mapping all sublayers to their value.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py",
    "ast_data": "FunctionDef name:_get_trainable_state arg:self arguments arg Assign Call Assign For Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "define_stack_allocated_buffer",
    "source_code": "def define_stack_allocated_buffer(self, name, sizes: list[Any], dtype=torch.float) -> str:\n    sizes = parse_expr_with_index_symbols(sizes)\n    buf = ir.Buffer(name=name, layout=ir.FixedLayout(torch.device('cpu'), dtype, sizes))\n    self.local_buffers[name] = buf\n    ctype = f'{DTYPE_TO_CPP[dtype]}'\n    numel = f'{cexpr_index(buf.get_numel())}'\n    return f'alignas(64) {ctype} _{name}[{numel}]; {ctype}* {name} = _{name};'",
    "docstring": "Define stack-allocated buffer",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp_template_kernel.py",
    "ast_data": "FunctionDef name:define_stack_allocated_buffer arg:self arg:name arg:sizes arg:dtype arguments arg arg arg arg Assign Call Assign Call Call Call Assign Assign Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "legend_artist",
    "source_code": "def legend_artist(self, legend, orig_handle, fontsize, handlebox):\n    xdescent, ydescent, width, height = self.adjust_drawing_area(legend, orig_handle, handlebox.xdescent, handlebox.ydescent, handlebox.width, handlebox.height, fontsize)\n    artists = self.create_artists(legend, orig_handle, xdescent, ydescent, width, height, fontsize, handlebox.get_transform())\n    for a in artists:\n        handlebox.add_artist(a)\n    return artists[0]",
    "docstring": "Return the artist that this HandlerBase generates for the given original artist/handle. Parameters ---------- legend : The legend for which these legend artists are being created. orig_handle : :class: or similar The object for which these legend artists are being created. fontsize : int The fontsize in pixels. The artists being created should be scaled according to the given fontsize. handlebox : The box which has been created to hold this legend entry's artists. Artists created in the method must be added to this handlebox inside this method.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\legend_handler.py",
    "ast_data": "FunctionDef name:legend_artist arg:self arg:legend arg:orig_handle arg:fontsize arg:handlebox arguments arg arg arg arg arg Assign Call Assign Call Call For Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_allclose_dense_sparse",
    "source_code": "def _allclose_dense_sparse(x, y, rtol=1e-07, atol=1e-09):\n    if sp.issparse(x) and sp.issparse(y):\n        x = x.tocsr()\n        y = y.tocsr()\n        x.sum_duplicates()\n        y.sum_duplicates()\n        return np.array_equal(x.indices, y.indices) and np.array_equal(x.indptr, y.indptr) and np.allclose(x.data, y.data, rtol=rtol, atol=atol)\n    elif not sp.issparse(x) and (not sp.issparse(y)):\n        return np.allclose(x, y, rtol=rtol, atol=atol)\n    raise ValueError('Can only compare two sparse matrices, not a sparse matrix and an array')",
    "docstring": "Check allclose for sparse and dense data. Both x and y need to be either sparse or dense, they can't be mixed. Parameters ---------- x : {array-like, sparse matrix} First array to compare. y : {array-like, sparse matrix} Second array to compare. rtol : float, default=1e-7 Relative tolerance; see numpy.allclose. atol : float, default=1e-9 absolute tolerance; see numpy.allclose. Note that the default here is more tolerant than the default for numpy.testing.assert_allclose, where atol=0.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\validation.py",
    "ast_data": "FunctionDef name:_allclose_dense_sparse arg:x arg:y arg:rtol arg:atol arguments arg arg arg arg If BoolOp Call Call Assign Call Assign Call Call Call Return return:yes BoolOp Call Call Call If BoolOp Call Call Return return:yes Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "get_observers_of_interest",
    "source_code": "def get_observers_of_interest(self) -> dict[str, set[str]]:\n    return self._detector_name_to_observer_fqns.copy()",
    "docstring": "Returns a copy of the observers of interest for viewing",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\model_report.py",
    "ast_data": "FunctionDef name:get_observers_of_interest arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "offset",
    "source_code": "@property\ndef offset(self) -> int:\n    return 0",
    "docstring": "Offset of first element. Always zero.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\interchange\\column.py",
    "ast_data": "FunctionDef name:offset arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "__call__",
    "source_code": "def __call__(self, times, order=0):\n    if order not in [0, 1, 2]:\n        raise ValueError('`order` must be 0, 1 or 2.')\n    times = np.asarray(times, dtype=float)\n    if times.ndim > 1:\n        raise ValueError('`times` must be at most 1-dimensional.')\n    singe_time = times.ndim == 0\n    times = np.atleast_1d(times)\n    rotvecs = self.interpolator(times)\n    if order == 0:\n        index = np.searchsorted(self.times, times, side='right')\n        index -= 1\n        index[index < 0] = 0\n        n_segments = len(self.times) - 1\n        index[index > n_segments - 1] = n_segments - 1\n        result = self.rotations[index] * Rotation.from_rotvec(rotvecs)\n    elif order == 1:\n        rotvecs_dot = self.interpolator(times, 1)\n        result = _compute_angular_rate(rotvecs, rotvecs_dot)\n    elif order == 2:\n        rotvecs_dot = self.interpolator(times, 1)\n        rotvecs_dot_dot = self.interpolator(times, 2)\n        result = _compute_angular_acceleration(rotvecs, rotvecs_dot, rotvecs_dot_dot)\n    else:\n        assert False\n    if singe_time:\n        result = result[0]\n    return result",
    "docstring": "Compute interpolated values. Parameters ---------- times : float or array_like Times of interest. order : {0, 1, 2}, optional Order of differentiation: * 0 (default) : return Rotation * 1 : return the angular rate in rad/sec * 2 : return the angular acceleration in rad/sec/sec Returns ------- Interpolated Rotation, angular rate or acceleration.",
    "type": "method",
    "file_path": "scipy\\scipy\\spatial\\transform\\_rotation_spline.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:times arg:order arguments arg arg arg If Compare Raise Call Assign Call If Compare Raise Call Assign Compare Assign Call Assign Call If Compare Assign Call Assign Compare Assign Call Assign Compare Assign Call If Compare Assign Call Assign Call If Compare Assign Call Assign Call Assign Call If Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "count",
    "source_code": "def count(self) -> int:\n    return notna(self._values).sum().astype('int64')",
    "docstring": "Return number of non-NA/null observations in the Series. Returns ------- int Number of non-null values in the Series. See Also -------- DataFrame.count : Count non-NA cells for each column or row. Examples -------- >>> s = pd.Series([0.0, 1.0, np.nan]) >>> s.count() 2",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:count arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "isowner",
    "source_code": "def isowner(self, o):\n    return self._owner is o",
    "docstring": "Return whether *o* owns this lock.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:isowner arg:self arg:o arguments arg arg Return return:yes Compare"
  },
  {
    "library": "pytorch",
    "name": "build_subgraph_buffer",
    "source_code": "def build_subgraph_buffer(args: list[TensorBox], subgraph: Subgraph):\n    cnt = 0\n    env = {}\n    for node in subgraph.graph_module.graph.nodes:\n        if node.op == 'placeholder':\n            env[node] = args[cnt]\n            cnt += 1\n        elif node.op == 'call_function':\n            args, kwargs = tree_map(lambda x: env[x] if x in env else x, (node.args, node.kwargs))\n            env[node] = lowerings[node.target](*args, **kwargs)\n        elif node.op == 'output':\n\n            def convert_output_node_to_buffer(output):\n                if output is None:\n                    return None\n                output_node = output\n                output_buffer = env[output_node]\n                assert isinstance(output_buffer, TensorBox), (\"The output node for B2B-GEMM's subgraph must be a TensorBox, but got: \", type(output_buffer))\n                assert isinstance(output_buffer.data, StorageBox), (\"The output node for B2B-GEMM's subgraph must be a StorageBox, but got: \", type(output_buffer))\n                subgraph_buffer = ComputedBuffer(name=None, layout=FlexibleLayout(device=output_buffer.data.get_device(), dtype=output_buffer.data.get_dtype(), size=output_buffer.data.get_size()), data=output_buffer.data.data)\n                return subgraph_buffer\n            return tree_map(convert_output_node_to_buffer, node.args[0])\n    raise ValueError('B2B-GEMM was passed a subgraph with no output node!')",
    "docstring": "This function is adapted from ../kernel/flex_attention.py. The goal is to take in the required args and produce the subgraph buffer The subgraph buffer is a ComputedBuffer that will be inlined into the triton template Args: args: The args that are passed into the subgraph subgraph: The Subgraph ir for which to produce the output node",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\b2b_gemm.py",
    "ast_data": "FunctionDef name:build_subgraph_buffer arg:args arg:subgraph arguments arg arg Assign Assign For If Compare Assign If Compare Assign Call arguments arg Compare Assign Call If Compare FunctionDef name:convert_output_node_to_buffer arg:output arguments arg If Compare Return return:no Assign Assign Call Call Call Call Assign Call Call Call Call Call Return return:yes Return return:yes Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "ResidualWrapper",
    "source_code": "@deprecated(None, 'Please use tf.keras.layers.RNN instead.')\n@tf_export('nn.RNNCellResidualWrapper', v1=[])\nclass ResidualWrapper(rnn_cell_wrapper_impl.ResidualWrapperBase, _RNNCellWrapperV2):\n\n    def __init__(self, *args, **kwargs):\n        super(ResidualWrapper, self).__init__(*args, **kwargs)\n    __init__.__doc__ = rnn_cell_wrapper_impl.ResidualWrapperBase.__init__.__doc__",
    "docstring": "RNNCell wrapper that ensures cell inputs are added to the outputs.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\rnn_cell_wrapper_v2.py",
    "ast_data": "ClassDef name:ResidualWrapper FunctionDef name:__init__ arg:self arguments arg arg arg Call Call Assign Call Call"
  },
  {
    "library": "matplotlib",
    "name": "add_toolitem",
    "source_code": "def add_toolitem(self, name, group, position, image, description, toggle):\n    raise NotImplementedError",
    "docstring": "A hook to add a toolitem to the container. This hook must be implemented in each backend and contains the backend-specific code to add an element to the toolbar. .. warning:: This is part of the backend implementation and should not be called by end-users. They should instead call . The callback associated with the button click event must be *exactly* `NoneTrueFalse` : The button is a normal button (returns to unpressed state after release).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:add_toolitem arg:self arg:name arg:group arg:position arg:image arg:description arg:toggle arguments arg arg arg arg arg arg arg Raise"
  },
  {
    "library": "django",
    "name": "Choice",
    "source_code": "class Choice(list):\n    pass",
    "docstring": "Represent multiple possibilities at this point in a pattern string.",
    "type": "class",
    "file_path": "django\\django\\utils\\regex_helper.py",
    "ast_data": "ClassDef name:Choice"
  },
  {
    "library": "matplotlib",
    "name": "get_dialog",
    "source_code": "def get_dialog(self):\n    dialog = self.parent()\n    while not isinstance(dialog, QtWidgets.QDialog):\n        dialog = dialog.parent()\n    return dialog",
    "docstring": "Return FormDialog instance",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\qt_editor\\_formlayout.py",
    "ast_data": "FunctionDef name:get_dialog arg:self arguments arg Assign Call While Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "min",
    "source_code": "def min(self, axis=None, skipna: bool=True, *args, **kwargs) -> int | float:\n    nv.validate_minmax_axis(axis)\n    nv.validate_min(args, kwargs)\n    return self._minmax('min')",
    "docstring": "The minimum value of the RangeIndex",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\range.py",
    "ast_data": "FunctionDef name:min arg:self arg:axis arg:skipna arguments arg arg arg arg arg Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "singular_leading_submatrix",
    "source_code": "def singular_leading_submatrix(A, U, k):\n    delta = np.sum(U[:k - 1, k - 1] ** 2) - A[k - 1, k - 1]\n    n = len(A)\n    v = np.zeros(n)\n    v[k - 1] = 1\n    if k != 1:\n        v[:k - 1] = solve_triangular(U[:k - 1, :k - 1], -U[:k - 1, k - 1])\n    return (delta, v)",
    "docstring": "Compute term that makes the leading `A` is added to its element (k, k).",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_trustregion_exact.py",
    "ast_data": "FunctionDef name:singular_leading_submatrix arg:A arg:U arg:k arguments arg arg arg Assign Call Assign Call Assign Call Assign If Compare Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "Accessor",
    "source_code": "class Accessor:\n\n    def __init__(self, name: str, accessor) -> None:\n        self._name = name\n        self._accessor = accessor\n\n    def __get__(self, obj, cls):\n        if obj is None:\n            return self._accessor\n        return self._accessor(obj)",
    "docstring": "Custom property-like object. A descriptor for accessors. Parameters ---------- name : str Namespace that will be accessed under, e.g. ``.",
    "type": "class",
    "file_path": "pandas\\pandas\\core\\accessor.py",
    "ast_data": "ClassDef name:Accessor FunctionDef name:__init__ arg:self arg:name arg:accessor arguments arg arg arg Assign Assign FunctionDef name:__get__ arg:self arg:obj arg:cls arguments arg arg arg If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_get_pattern_output_dtype",
    "source_code": "def _get_pattern_output_dtype(match: Match):\n    pattern_output_nodes = match.output_nodes()\n    assert len(pattern_output_nodes) == 1\n    output_node = pattern_output_nodes[0]\n    assert isinstance(output_node, torch.fx.Node)\n    output_dtype = output_node.meta['val'].dtype\n    assert output_dtype in [torch.int8, torch.uint8, torch.float32, torch.bfloat16]\n    return output_dtype",
    "docstring": "Get the pattern's output dtype from node's meta Assume only 1 output node in this matched pattern.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\quantization.py",
    "ast_data": "FunctionDef name:_get_pattern_output_dtype arg:match arguments arg Assign Call Compare Call Assign Call Assign Compare Return return:yes"
  },
  {
    "library": "django",
    "name": "ModelSignal",
    "source_code": "class ModelSignal(Signal):\n\n    def _lazy_method(self, method, apps, receiver, sender, **kwargs):\n        from django.db.models.options import Options\n        partial_method = partial(method, receiver, **kwargs)\n        if isinstance(sender, str):\n            apps = apps or Options.default_apps\n            apps.lazy_model_operation(partial_method, make_model_tuple(sender))\n        else:\n            return partial_method(sender)\n\n    def connect(self, receiver, sender=None, weak=True, dispatch_uid=None, apps=None):\n        self._lazy_method(super().connect, apps, receiver, sender, weak=weak, dispatch_uid=dispatch_uid)\n\n    def disconnect(self, receiver=None, sender=None, dispatch_uid=None, apps=None):\n        return self._lazy_method(super().disconnect, apps, receiver, sender, dispatch_uid=dispatch_uid)",
    "docstring": "Signal subclass that allows the sender to be lazily specified as a string of the form.",
    "type": "class",
    "file_path": "django\\django\\db\\models\\signals.py",
    "ast_data": "ClassDef name:ModelSignal FunctionDef name:_lazy_method arg:self arg:method arg:apps arg:receiver arg:sender arguments arg arg arg arg arg arg Assign Call If Call Assign BoolOp Call Call Return return:yes Call FunctionDef name:connect arg:self arg:receiver arg:sender arg:weak arg:dispatch_uid arg:apps arguments arg arg arg arg arg arg Call Call FunctionDef name:disconnect arg:self arg:receiver arg:sender arg:dispatch_uid arg:apps arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_process_mst",
    "source_code": "def _process_mst(min_spanning_tree):\n    row_order = np.argsort(min_spanning_tree['distance'])\n    min_spanning_tree = min_spanning_tree[row_order]\n    return make_single_linkage(min_spanning_tree)",
    "docstring": "Builds a single-linkage tree (SLT) from the provided minimum spanning tree (MST). The MST is first sorted then processed by a custom Cython routine. Parameters ---------- min_spanning_tree : ndarray of shape (n_samples - 1,), dtype=MST_edge_dtype The MST representation of the mutual-reachability graph. The MST is represented as a collection of edges. Returns ------- single_linkage : ndarray of shape (n_samples - 1,), dtype=HIERARCHY_dtype The single-linkage tree tree (dendrogram) built from the MST.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\cluster\\_hdbscan\\hdbscan.py",
    "ast_data": "FunctionDef name:_process_mst arg:min_spanning_tree arguments arg Assign Call Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "state_dict",
    "source_code": "@torch._disable_dynamo\ndef state_dict(self) -> StateDict:\n    for pre_hook in self._optimizer_state_dict_pre_hooks.values():\n        pre_hook(self)\n    param_mappings: dict[int, int] = {}\n    start_index = 0\n\n    def pack_group(group: dict[str, Any]) -> dict[str, Any]:\n        nonlocal start_index\n        packed = {k: v for k, v in group.items() if k != 'params'}\n        param_mappings.update({id(p): i for i, p in enumerate(group['params'], start_index) if id(p) not in param_mappings})\n        packed['params'] = [param_mappings[id(p)] for p in group['params']]\n        start_index += len(packed['params'])\n        return packed\n    param_groups = [pack_group(g) for g in self.param_groups]\n    packed_state = {param_mappings[id(k)] if isinstance(k, torch.Tensor) else k: v for k, v in self.state.items()}\n    state_dict = {'state': packed_state, 'param_groups': param_groups}\n    for post_hook in self._optimizer_state_dict_post_hooks.values():\n        hook_result = post_hook(self, state_dict)\n        if hook_result is not None:\n            state_dict = hook_result\n    return state_dict",
    "docstring": "Return the state of the optimizer as a :class:. It contains two entries: * `` s) in order to match state WITHOUT additional verification. A returned state dict might look something like: .. code-block:: text { 'state': { 0: {'momentum_buffer': tensor(...), ...}, 1: {'momentum_buffer': tensor(...), ...}, 2: {'momentum_buffer': tensor(...), ...}, 3: {'momentum_buffer': tensor(...), ...} }, 'param_groups': [ { 'lr': 0.01, 'weight_decay': 0, ... 'params': [0] 'param_names' ['param0'] (optional) }, { 'lr': 0.001, 'weight_decay': 0.5, ... 'params': [1, 2, 3] 'param_names': ['param1', 'layer.weight', 'layer.bias'] (optional) } ] }",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\optimizer.py",
    "ast_data": "FunctionDef name:state_dict arg:self arguments arg For Call Call Assign FunctionDef name:pack_group arg:group arguments arg Assign Call Compare Call Call Call Compare Call Assign Call Call Return return:yes Assign Call Assign Call Call Call Assign For Call Assign Call If Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_add_flops",
    "source_code": "@ops.RegisterStatistics('Add', 'flops')\n@ops.RegisterStatistics('AddV2', 'flops')\ndef _add_flops(graph, node):\n    return _binary_per_element_op_flops(graph, node)",
    "docstring": "Compute flops for Add operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py",
    "ast_data": "FunctionDef name:_add_flops arg:graph arg:node arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "authlib",
    "name": "encrypt",
    "source_code": "def encrypt(self, msg, aad, iv, key):\n    self.check_iv(iv)\n    cipher = Cipher(AES(key), GCM(iv), backend=default_backend())\n    enc = cipher.encryptor()\n    enc.authenticate_additional_data(aad)\n    ciphertext = enc.update(msg) + enc.finalize()\n    return (ciphertext, enc.tag)",
    "docstring": "Key Encryption with AES GCM. :param msg: text to be encrypt in bytes :param aad: additional authenticated data in bytes :param iv: initialization vector in bytes :param key: encrypted key in bytes :return: (ciphertext, iv, tag)",
    "type": "method",
    "file_path": "authlib\\authlib\\jose\\rfc7518\\jwe_encs.py",
    "ast_data": "FunctionDef name:encrypt arg:self arg:msg arg:aad arg:iv arg:key arguments arg arg arg arg arg Call Assign Call Call Call Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "statically_true",
    "source_code": "def statically_true(self, e):\n    var_to_range = (*self.var_to_range, *((k, ValueRanges(0, upper_bound(v) - 1)) for k, v in self.indirect_var_ranges.items()))\n    return evaluate_expr(self.shape_env, e, self.axioms, var_to_range)",
    "docstring": "Given some iter_ranges, return a function that given an expression, returns whether it is true or false using value ranges, guard knowledge and runtime_asserts. FIXME I think this may not be entirely right, as we may not be able to use all runtime_asserts If this is an issue, just use guards in . The proper way of handling this would be to have a global shape_env that adds runtime_asserts as they happen in the code. Then, it shuld be used in SimplifyIndexing to perform wrap_expr and in CSEProxy.check_bounds to elide upper / lower bounds also for indirect_indexing",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\index_propagation.py",
    "ast_data": "FunctionDef name:statically_true arg:self arg:e arguments arg arg Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "add_message_catalog",
    "source_code": "def add_message_catalog(self, catalog: str, locale_dir: str | os.PathLike[str]) -> None:\n    locale.init([locale_dir], self.config.language, catalog)\n    locale.init_console(locale_dir, catalog)",
    "docstring": "Register a message catalog. :param catalog: The name of the catalog :param locale_dir: The base path of the message catalog For more details, see :func:. .. versionadded:: 1.8",
    "type": "method",
    "file_path": "sphinx\\sphinx\\application.py",
    "ast_data": "FunctionDef name:add_message_catalog arg:self arg:catalog arg:locale_dir arguments arg arg arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "convert_to_sympy_symbols",
    "source_code": "def convert_to_sympy_symbols(self, typ):\n    if isinstance(typ, Var):\n        return sympy.symbols(str(typ))\n    elif isinstance(typ, TensorType):\n        new_args = [self.convert_to_sympy_symbols(a) for a in typ.__args__]\n        return TensorType(tuple(new_args))\n    elif isinstance(typ, list):\n        return [self.convert_to_sympy_symbols(t) for t in typ]\n    elif isinstance(typ, tuple):\n        return (self.convert_to_sympy_symbols(t) for t in typ)\n    else:\n        return typ",
    "docstring": "Replace all unknown types with fresh type variables.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\graph_gradual_typechecker.py",
    "ast_data": "FunctionDef name:convert_to_sympy_symbols arg:self arg:typ arguments arg arg If Call Return return:yes Call Call If Call Assign Call Return return:yes Call Call If Call Return return:yes Call If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "apply_mask",
    "source_code": "def apply_mask(y_p, sw, mask):\n    if mask is not None:\n        mask = math_ops.cast(mask, y_p.dtype)\n        if sw is not None:\n            mask, _, sw = losses_utils.squeeze_or_expand_dimensions(mask, sample_weight=sw)\n            sw *= mask\n        else:\n            sw = mask\n    return sw",
    "docstring": "Applies any mask on predictions to sample weights.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\compile_utils.py",
    "ast_data": "FunctionDef name:apply_mask arg:y_p arg:sw arg:mask arguments arg arg arg If Compare Assign Call If Compare Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_create_object_from_type_and_dict",
    "source_code": "def _create_object_from_type_and_dict(cls, obj_dict):\n    value = object.__new__(cls)\n    value.__dict__.update(obj_dict)\n    return value",
    "docstring": "Creates an object, bypassing the constructor. Creates an object of type , whose is updated to contain . Args: cls: The type of the new object. obj_dict: A that should be used to initialize the new object's . Returns: An object of type .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type.py",
    "ast_data": "FunctionDef name:_create_object_from_type_and_dict arg:cls arg:obj_dict arguments arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "register_function_func",
    "source_code": "def register_function_func(ops):\n\n    def wrapper(func):\n        for op in ops:\n            _MASKEDTENSOR_FUNCTION_TABLE[op] = partial(func, op)\n    return wrapper",
    "docstring": "Used for registering a new __torch_function__ function to MaskedTensor Called via _MASKEDTENSOR_FUNCTION_TABLE The code to register a new function looks like: @register_function_func(list_of_ops) def foo(func, *args, **kwargs):",
    "type": "function",
    "file_path": "pytorch\\torch\\masked\\maskedtensor\\_ops_refs.py",
    "ast_data": "FunctionDef name:register_function_func arg:ops arguments arg FunctionDef name:wrapper arg:func arguments arg For Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "validate_qmin_qmax",
    "source_code": "def validate_qmin_qmax(quant_min: int, quant_max: int) -> None:\n    assert quant_min <= 0 <= quant_max, 'Used-specified quantization range must include 0.'\n    assert quant_min < quant_max, 'qmin must be strictly less than qmax for user-specified quantization range.'",
    "docstring": "Validates that the user-specified quantization range is properly initialized and within the given bound supported by the observer dtype. To accommodate lower-bit quantization with respect to the existing torch.qint8 and torch.quint8 datatypes, the user can choose to use dynamic quantization range by passing in a tuple of initial qmin and qmax values. One use case is these customized qmin and qmax values are used to calculate static estimates of the scale and zero point for aggressive lower-bit fake quantization. These estimates are compared against parameters learned through backpropagation. The related literatures for scale and zero point via backpropagation are as follows: Learned Step Size Quantization: Trained Quantization Thresholds:",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\utils.py",
    "ast_data": "FunctionDef name:validate_qmin_qmax arg:quant_min arg:quant_max arguments arg arg Compare Compare"
  },
  {
    "library": "tensorflow",
    "name": "convert",
    "source_code": "def convert(self, graph_def, input_tensors, output_tensors):\n    self._validate_inputs(graph_def, input_tensors)\n    converter_kwargs = self._get_base_converter_args()\n    converter_kwargs.update(self._quant_mode.converter_flags())\n    if not self.experimental_new_converter:\n        logging.warning('Please consider switching to the new converter by setting experimental_new_converter=True. The old converter is deprecated.')\n    else:\n        logging.info('Using new converter: If you encounter a problem please file a bug. You can opt-out by setting experimental_new_converter=False')\n    result = _convert_graphdef(input_data=graph_def, input_tensors=input_tensors, output_tensors=output_tensors, **converter_kwargs)\n    return self._optimize_tflite_model(result, self._quant_mode, _build_conversion_flags(**converter_kwargs).debug_options, quant_io=self.experimental_new_quantizer)",
    "docstring": "Converts a TensorFlow GraphDef based on instance variables. Args: graph_def: Frozen TensorFlow GraphDef. input_tensors: List of input tensors. output_tensors: List of output tensors. Returns: The converted data in serialized format. Raises: ValueError: No concrete function is specified. Multiple concrete functions are specified. Input shape is not specified. Invalid quantization parameters.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "FunctionDef name:convert arg:self arg:graph_def arg:input_tensors arg:output_tensors arguments arg arg arg arg Call Assign Call Call Call If Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_values",
    "source_code": "@property\ndef _values(self):\n    return self._mgr.internal_values()",
    "docstring": "Return the internal repr of this data (defined by Block.interval_values). This are the values as stored in the Block (ndarray or ExtensionArray depending on the Block class), with datetime64[ns] and timedelta64[ns] wrapped in ExtensionArrays to match Index._values behavior. Differs from the public `` ensures to always return an ExtensionArray. Overview: dtype | values | _values | array | ----------- | ------------- | ------------- | --------------------- | Numeric | ndarray | ndarray | NumpyExtensionArray | Category | Categorical | Categorical | Categorical | dt64[ns] | ndarray[M8ns] | DatetimeArray | DatetimeArray | dt64[ns tz] | ndarray[M8ns] | DatetimeArray | DatetimeArray | td64[ns] | ndarray[m8ns] | TimedeltaArray| TimedeltaArray | Period | ndarray[obj] | PeriodArray | PeriodArray | Nullable | EA | EA | EA |",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:_values arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "ReprHTMLMixin",
    "source_code": "class ReprHTMLMixin:\n\n    @property\n    def _repr_html_(self):\n        if get_config()['display'] != 'diagram':\n            raise AttributeError(\"_repr_html_ is only defined when the 'display' configuration option is set to 'diagram'\")\n        return self._repr_html_inner\n\n    def _repr_html_inner(self):\n        return self._html_repr()\n\n    def _repr_mimebundle_(self, **kwargs):\n        output = {'text/plain': repr(self)}\n        if get_config()['display'] == 'diagram':\n            output['text/html'] = self._html_repr()\n        return output",
    "docstring": "Mixin to handle consistently the HTML representation. When inheriting from this class, you need to define an attribute which is a callable that returns the HTML representation to be shown.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\utils\\_repr_html\\base.py",
    "ast_data": "ClassDef name:ReprHTMLMixin FunctionDef name:_repr_html_ arg:self arguments arg If Compare Call Raise Call Return return:yes FunctionDef name:_repr_html_inner arg:self arguments arg Return return:yes Call FunctionDef name:_repr_mimebundle_ arg:self arguments arg arg Assign Call If Compare Call Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "time_randint_fast",
    "source_code": "def time_randint_fast(self):\n    np.random.randint(0, 2 ** 30, size=10 ** 5)",
    "docstring": "Compare to uint32 below",
    "type": "method",
    "file_path": "numpy\\benchmarks\\benchmarks\\bench_random.py",
    "ast_data": "FunctionDef name:time_randint_fast arg:self arguments arg Call"
  },
  {
    "library": "sphinx",
    "name": "_visit_sig_parameter_list",
    "source_code": "def _visit_sig_parameter_list(self, node: Element, parameter_group: type[Element], sig_open_paren: str, sig_close_paren: str) -> None:\n    self.add_text(sig_open_paren)\n    self.is_first_param = True\n    self.optional_param_level = 0\n    self.params_left_at_level = 0\n    self.param_group_index = 0\n    self.list_is_required_param = [isinstance(c, parameter_group) for c in node.children]\n    self.required_params_left = sum(self.list_is_required_param)\n    self.param_separator = ', '\n    self.multi_line_parameter_list = node.get('multi_line_parameter_list', False)\n    self.trailing_comma = node.get('multi_line_trailing_comma', False)\n    if self.multi_line_parameter_list:\n        self.param_separator = self.param_separator.rstrip()\n    self.context.append(sig_close_paren)",
    "docstring": "Visit a signature parameters or type parameters list. The *parameter_group* value is the type of a child node acting as a required parameter or as a set of contiguous optional parameters.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\writers\\text.py",
    "ast_data": "FunctionDef name:_visit_sig_parameter_list arg:self arg:node arg:parameter_group arg:sig_open_paren arg:sig_close_paren arguments arg arg arg arg arg Call Assign Assign Assign Assign Assign Call Assign Call Assign Assign Call Assign Call If Assign Call Call"
  },
  {
    "library": "django",
    "name": "get_cache_key",
    "source_code": "def get_cache_key(request, key_prefix=None, method='GET', cache=None):\n    if key_prefix is None:\n        key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX\n    cache_key = _generate_cache_header_key(key_prefix, request)\n    if cache is None:\n        cache = caches[settings.CACHE_MIDDLEWARE_ALIAS]\n    headerlist = cache.get(cache_key)\n    if headerlist is not None:\n        return _generate_cache_key(request, method, headerlist, key_prefix)\n    else:\n        return None",
    "docstring": "Return a cache key based on the request URL and query. It can be used in the request phase because it pulls the list of headers to take into account from the global URL registry and uses those to build a cache key to check against. If there isn't a headerlist stored, return None, indicating that the page needs to be rebuilt.",
    "type": "function",
    "file_path": "django\\django\\utils\\cache.py",
    "ast_data": "FunctionDef name:get_cache_key arg:request arg:key_prefix arg:method arg:cache arguments arg arg arg arg If Compare Assign Assign Call If Compare Assign Assign Call If Compare Return return:yes Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "KerasModeKeys",
    "source_code": "class KerasModeKeys(object):\n    TRAIN = 'train'\n    TEST = 'test'\n    PREDICT = 'predict'",
    "docstring": "Standard names for model modes. The following standard keys are defined: * : training/fitting mode. * : testing/evaluation mode. * : prediction/inference mode.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\model_utils\\mode_keys.py",
    "ast_data": "ClassDef name:KerasModeKeys Assign Assign Assign"
  },
  {
    "library": "seaborn",
    "name": "PlotConfig",
    "source_code": "class PlotConfig:\n\n    def __init__(self):\n        self._theme = ThemeConfig()\n        self._display = {'format': 'png', 'scaling': 0.85, 'hidpi': True}\n\n    @property\n    def theme(self) -> dict[str, Any]:\n        return self._theme\n\n    @property\n    def display(self) -> DisplayConfig:\n        return self._display",
    "docstring": "Configuration for default behavior / appearance of class: instances.",
    "type": "class",
    "file_path": "seaborn\\seaborn\\_core\\plot.py",
    "ast_data": "ClassDef name:PlotConfig FunctionDef name:__init__ arg:self arguments arg Assign Call Assign FunctionDef name:theme arg:self arguments arg Return return:yes FunctionDef name:display arg:self arguments arg Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "connect",
    "source_code": "def connect(self, name: str, callback: Callable[..., Any], priority: int) -> int:\n    if name not in self.events:\n        raise ExtensionError(__('Unknown event name: %s') % name)\n    listener_id = self.next_listener_id\n    self.next_listener_id += 1\n    self.listeners[name].append(EventListener(listener_id, callback, priority))\n    return listener_id",
    "docstring": "Connect a handler to specific event.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\events.py",
    "ast_data": "FunctionDef name:connect arg:self arg:name arg:callback arg:priority arguments arg arg arg arg If Compare Raise Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "ndim",
    "source_code": "@doc_controls.do_not_generate_docs\ndef ndim(x):\n    return x.shape.rank",
    "docstring": "Returns the number of axes in a tensor, as an integer. Args: x: Tensor or variable. Returns: Integer (scalar), number of axes. Examples: >>> input = tf.keras.backend.placeholder(shape=(2, 4, 5)) >>> val = np.array([[1, 2], [3, 4]]) >>> kvar = tf.keras.backend.variable(value=val) >>> tf.keras.backend.ndim(input) 3 >>> tf.keras.backend.ndim(kvar) 2",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:ndim arg:x arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_kind",
    "source_code": "@property\n@abstractmethod\ndef _kind(self) -> str:\n    raise NotImplementedError",
    "docstring": "Specify kind str. Must be overridden in child class",
    "type": "method",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\core.py",
    "ast_data": "FunctionDef name:_kind arg:self arguments arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "_make_dataset_iterator",
    "source_code": "def _make_dataset_iterator(self, dataset):\n    return input_lib_v1.DatasetIterator(dataset, self._input_workers, self._container_strategy())",
    "docstring": "Make iterator from dataset without splitting the batch.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\one_device_strategy.py",
    "ast_data": "FunctionDef name:_make_dataset_iterator arg:self arg:dataset arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "ifftshift",
    "source_code": "def ifftshift(x, axes=None):\n    xp = array_namespace(x)\n    if hasattr(xp, 'fft'):\n        return xp.fft.ifftshift(x, axes=axes)\n    x = np.asarray(x)\n    y = np.fft.ifftshift(x, axes=axes)\n    return xp.asarray(y)",
    "docstring": "The inverse of . Although identical for even-length , the functions differ by one sample for odd-length . Parameters ---------- x : array_like Input array. axes : int or shape tuple, optional Axes over which to calculate. Defaults to None, which shifts all axes. Returns ------- y : ndarray The shifted array. See Also -------- fftshift : Shift zero-frequency component to the center of the spectrum. Examples -------- >>> import numpy as np >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3) >>> freqs array([[ 0., 1., 2.], [ 3., 4., -4.], [-3., -2., -1.]]) >>> np.fft.ifftshift(np.fft.fftshift(freqs)) array([[ 0., 1., 2.], [ 3., 4., -4.], [-3., -2., -1.]])",
    "type": "function",
    "file_path": "scipy\\scipy\\fft\\_helper.py",
    "ast_data": "FunctionDef name:ifftshift arg:x arg:axes arguments arg arg Assign Call If Call Return return:yes Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "quote_etag",
    "source_code": "def quote_etag(etag_str):\n    if ETAG_MATCH.match(etag_str):\n        return etag_str\n    else:\n        return '\"%s\"' % etag_str",
    "docstring": "If the provided string is already a quoted ETag, return it. Otherwise, wrap the string in quotes, making it a strong ETag.",
    "type": "function",
    "file_path": "django\\django\\utils\\http.py",
    "ast_data": "FunctionDef name:quote_etag arg:etag_str arguments arg If Call Return return:yes Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_gen_rows_with_counts",
    "source_code": "@abstractmethod\ndef _gen_rows_with_counts(self) -> Iterator[Sequence[str]]:\n    pass",
    "docstring": "Iterator with string representation of body data with counts.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:_gen_rows_with_counts arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "register_step_pre_hook",
    "source_code": "def register_step_pre_hook(self, hook: OptimizerPreHook) -> RemovableHandle:\n    handle = hooks.RemovableHandle(self._optimizer_step_pre_hooks)\n    self._optimizer_step_pre_hooks[handle.id] = hook\n    return handle",
    "docstring": "Register an optimizer step pre hook which will be called before optimizer step. It should have the following signature:: hook(optimizer, args, kwargs) -> None or modified args and kwargs The `torch.utils.hooks.RemovableHandle`",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\optimizer.py",
    "ast_data": "FunctionDef name:register_step_pre_hook arg:self arg:hook arguments arg arg Assign Call Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "rgb255_to_normals",
    "source_code": "def rgb255_to_normals(image: Tensor) -> Tensor:\n    KORNIA_CHECK_IS_COLOR(image)\n    normals = normalize(image / 255.0 * 2.0 - 1.0, dim=-3, p=2.0)\n    return normals",
    "docstring": "Convert an image from RGB [0, 255] to surface normals for visualization purposes. Args: image: RGB Image to be converted to surface normals of shape :math:. Returns: surface normals version of the image with shape of shape :math:. Example: >>> input = torch.rand(2, 3, 4, 5) >>> output = rgb255_to_normals(input) # 2x3x4x5",
    "type": "function",
    "file_path": "kornia\\kornia\\color\\rgb.py",
    "ast_data": "FunctionDef name:rgb255_to_normals arg:image arguments arg Call Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "outer",
    "source_code": "def outer(a, b):\n    fa = filled(a, 0).ravel()\n    fb = filled(b, 0).ravel()\n    d = np.outer(fa, fb)\n    ma = getmask(a)\n    mb = getmask(b)\n    if ma is nomask and mb is nomask:\n        return masked_array(d)\n    ma = getmaskarray(a)\n    mb = getmaskarray(b)\n    m = make_mask(1 - np.outer(1 - ma, 1 - mb), copy=False)\n    return masked_array(d, mask=m)",
    "docstring": "maskedarray version of the numpy function.",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:outer arg:a arg:b arguments arg arg Assign Call Call Assign Call Call Assign Call Assign Call Assign Call If BoolOp Compare Compare Return return:yes Call Assign Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "virtualenv",
    "name": "run",
    "source_code": "@abstractmethod\ndef run(self, creator):\n    raise NotImplementedError",
    "docstring": "Perform the seed operation. :param creator: the creator (based of :class:) we used to create this virtual environment",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\seed\\seeder.py",
    "ast_data": "FunctionDef name:run arg:self arg:creator arguments arg arg Raise"
  },
  {
    "library": "scikit-learn",
    "name": "_get_threadlocal_config",
    "source_code": "def _get_threadlocal_config():\n    if not hasattr(_threadlocal, 'global_config'):\n        _threadlocal.global_config = _global_config.copy()\n    return _threadlocal.global_config",
    "docstring": "Get a threadlocal **mutable** configuration. If the configuration does not exist, copy the default global configuration.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\_config.py",
    "ast_data": "FunctionDef name:_get_threadlocal_config arguments If Call Assign Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "__delitem__",
    "source_code": "def __delitem__(self, key):\n    if not self.loaded:\n        self.load()\n    del self._data[key]",
    "docstring": "Delete object stored in the session.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\sessions.py",
    "ast_data": "FunctionDef name:__delitem__ arg:self arg:key arguments arg arg If Call"
  },
  {
    "library": "pytorch",
    "name": "timer",
    "source_code": "@contextlib.contextmanager\ndef timer(logger: logging.Logger, prefix: str) -> Iterator[None]:\n    start_time = time.perf_counter()\n    yield\n    logger.info('%s took %.3f [s]', prefix, time.perf_counter() - start_time)",
    "docstring": "Timed context manager",
    "type": "function",
    "file_path": "pytorch\\tools\\nightly.py",
    "ast_data": "FunctionDef name:timer arg:logger arg:prefix arguments arg arg Assign Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_convert_to_protection",
    "source_code": "@classmethod\ndef _convert_to_protection(cls, protection_dict):\n    from openpyxl.styles import Protection\n    return Protection(**protection_dict)",
    "docstring": "Convert `` to an openpyxl v2 Protection object. Parameters ---------- protection_dict : dict A dict with zero or more of the following keys. 'locked' 'hidden' Returns -------",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\excel\\_openpyxl.py",
    "ast_data": "FunctionDef name:_convert_to_protection arg:cls arg:protection_dict arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_copy_single_tensor",
    "source_code": "def _copy_single_tensor(tensor):\n    device = tensor.device\n    if isinstance(tensor, saveable_object_lib.SaveSpec):\n        with ops.device(device):\n            tensor = tensor.tensor\n    if tensor is not None:\n        with ops.device(saveable_object_util.set_cpu0(device)):\n            tensor = array_ops.identity(tensor)\n    return tensor",
    "docstring": "Copies a single Tensor / SaveSpec onto the CPU device.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:_copy_single_tensor arg:tensor arguments arg Assign If Call With Call Assign If Compare With Call Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "pbdn_seq",
    "source_code": "def pbdn_seq(n, z):\n    if not (isscalar(n) and isscalar(z)):\n        raise ValueError('arguments must be scalars.')\n    if floor(n) != n:\n        raise ValueError('n must be an integer.')\n    if abs(n) <= 1:\n        n1 = 1\n    else:\n        n1 = n\n    cpb, cpd = _specfun.cpbdn(n1, z)\n    return (cpb[:n1 + 1], cpd[:n1 + 1])",
    "docstring": "Parabolic cylinder functions Dn(z) and derivatives. Parameters ---------- n : int Order of the parabolic cylinder function z : complex Value at which to evaluate the function and derivatives Returns ------- dv : ndarray Values of D_i(z), for i=0, ..., i=n. dp : ndarray Derivatives D_i'(z), for i=0, ..., i=n. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special Functions\", John Wiley and Sons, 1996, chapter 13.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_basic.py",
    "ast_data": "FunctionDef name:pbdn_seq arg:n arg:z arguments arg arg If BoolOp Call Call Raise Call If Compare Call Raise Call If Compare Call Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_alpha",
    "source_code": "def get_alpha(self):\n    return self._alpha",
    "docstring": "Return the alpha value used for blending - not supported on all backends.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:get_alpha arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "BaseTzLoader",
    "source_code": "class BaseTzLoader(TimestamptzLoader):\n    timezone = None\n\n    def load(self, data):\n        res = super().load(data)\n        return res.replace(tzinfo=self.timezone)",
    "docstring": "Load a PostgreSQL timestamptz using the a specific timezone. The timezone can be None too, in which case it will be chopped.",
    "type": "class",
    "file_path": "django\\django\\db\\backends\\postgresql\\psycopg_any.py",
    "ast_data": "ClassDef name:BaseTzLoader Assign FunctionDef name:load arg:self arg:data arguments arg arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "split",
    "source_code": "def split(self, X, y=None, groups=None):\n    return super().split(X, y, groups)",
    "docstring": "Generate indices to split data into training and test set. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. y : array-like of shape (n_samples,), default=None The target variable for supervised learning problems. groups : array-like of shape (n_samples,) Group labels for the samples used while splitting the dataset into train/test set. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split. Notes ----- Randomized CV splitters may return different results for each call of split. You can make the results identical by setting to an integer.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py",
    "ast_data": "FunctionDef name:split arg:self arg:X arg:y arg:groups arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "construct_fast",
    "source_code": "@classmethod\ndef construct_fast(cls, c, x, extrapolate=None):\n    self = object.__new__(cls)\n    self.c = c\n    self.x = x\n    if extrapolate is None:\n        extrapolate = True\n    self.extrapolate = extrapolate\n    return self",
    "docstring": "Construct the piecewise polynomial without making checks. Takes the same parameters as the constructor. Input arguments `` array must have dtype float.",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_interpolate.py",
    "ast_data": "FunctionDef name:construct_fast arg:cls arg:c arg:x arg:extrapolate arguments arg arg arg arg Assign Call Assign Assign If Compare Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "prune_choices_postscreen",
    "source_code": "@staticmethod\ndef prune_choices_postscreen(choices: list[ChoiceCaller], candidate_timings: dict[ChoiceCaller, float]) -> list[ChoiceCaller]:\n    from .codegen.cuda.cuda_kernel import CUDATemplateCaller\n    if len(candidate_timings) < 10:\n        return []\n    log.debug('Before pruning using prescreening timings, %d choices', len(choices))\n    sorted_candidates = sorted(candidate_timings.keys(), key=lambda choice: candidate_timings[choice])\n    num_to_keep = max(int(math.sqrt(len(choices)) / 4), 8)\n    candidates_to_prune = OrderedSet((candidate.hash_key() for candidate in sorted_candidates[num_to_keep:]))\n    for candidate in sorted_candidates[:num_to_keep]:\n        if candidate_timings[candidate] == float('inf'):\n            candidates_to_prune.add(candidate.hash_key())\n        elif isinstance(candidate, CUDATemplateCaller):\n            candidate.bmreq.ensure_dll_loaded()\n    choices = [choice for choice in choices if choice.hash_key() not in candidates_to_prune]\n    log.debug('After pruning using prescreening timings, %d choices', len(choices))\n    return choices",
    "docstring": "Prune the choices after prescreening.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\select_algorithm.py",
    "ast_data": "FunctionDef name:prune_choices_postscreen arg:choices arg:candidate_timings arguments arg arg If Compare Call Return return:no Call Call Assign Call Call arguments arg Assign Call Call Call Call Assign Call Call For If Compare Call Call Call If Call Call Assign Compare Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_set_noise_parameters",
    "source_code": "def _set_noise_parameters(self, noise_level):\n    include_DTensor_ops = False\n    include_module_data = False\n    include_ops = False\n    include_trivial_ops = False\n    if noise_level > 0:\n        include_DTensor_ops = True\n        include_module_data = True\n    if noise_level > 1:\n        include_ops = True\n    if noise_level > 2:\n        include_trivial_ops = True\n    return (include_DTensor_ops, include_module_data, include_ops, include_trivial_ops)",
    "docstring": "sets variables controlling what information displays based on noise level",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\debug\\_comm_mode.py",
    "ast_data": "FunctionDef name:_set_noise_parameters arg:self arg:noise_level arguments arg arg Assign Assign Assign Assign If Compare Assign Assign If Compare Assign If Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "FunctionContext",
    "source_code": "class FunctionContext(NamedTuple):\n    context: Any = None\n    scope_type: Any = None",
    "docstring": "Contains information regarding tf.function execution context.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_cache.py",
    "ast_data": "ClassDef name:FunctionContext"
  },
  {
    "library": "pandas",
    "name": "_should_compare",
    "source_code": "@final\ndef _should_compare(self, other: Index) -> bool:\n    if other.inferred_type == 'boolean' and is_any_real_numeric_dtype(self.dtype) or (self.inferred_type == 'boolean' and is_any_real_numeric_dtype(other.dtype)):\n        return False\n    dtype = _unpack_nested_dtype(other)\n    return self._is_comparable_dtype(dtype) or is_object_dtype(dtype) or is_string_dtype(dtype)",
    "docstring": "Check if can ever have non-False entries.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_should_compare arg:self arg:other arguments arg arg If BoolOp BoolOp Compare Call BoolOp Compare Call Return return:yes Assign Call Return return:yes BoolOp Call Call Call"
  },
  {
    "library": "pandas",
    "name": "pop",
    "source_code": "def pop(self, item: Hashable) -> Any:\n    return super().pop(item=item)",
    "docstring": "Return item and drops from series. Raise KeyError if not found. Parameters ---------- item : label Index of the element that needs to be removed. Returns ------- scalar Value that is popped from series. See Also -------- Series.drop: Drop specified values from Series. Series.drop_duplicates: Return Series with duplicate values removed. Examples -------- >>> ser = pd.Series([1, 2, 3]) >>> ser.pop(0) 1 >>> ser 1 2 2 3 dtype: int64",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:pop arg:self arg:item arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "saver",
    "source_code": "@property\ndef saver(self):\n    return self._saver",
    "docstring": "Return the Saver used by the supervisor. Returns: A Saver object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py",
    "ast_data": "FunctionDef name:saver arg:self arguments arg Return return:yes"
  },
  {
    "library": "authlib",
    "name": "validate_userinfo_encryption_enc_values_supported",
    "source_code": "def validate_userinfo_encryption_enc_values_supported(self):\n    validate_array_value(self, 'userinfo_encryption_enc_values_supported')",
    "docstring": "OPTIONAL. JSON array containing a list of the JWE encryption algorithms (enc values) [JWA] supported by the UserInfo Endpoint to encode the Claims in a JWT.",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\discovery\\models.py",
    "ast_data": "FunctionDef name:validate_userinfo_encryption_enc_values_supported arg:self arguments arg Call"
  },
  {
    "library": "matplotlib",
    "name": "_value_in_bounds",
    "source_code": "def _value_in_bounds(self, val):\n    val = self._stepped_value(val)\n    if val <= self.valmin:\n        if not self.closedmin:\n            return\n        val = self.valmin\n    elif val >= self.valmax:\n        if not self.closedmax:\n            return\n        val = self.valmax\n    if self.slidermin is not None and val <= self.slidermin.val:\n        if not self.closedmin:\n            return\n        val = self.slidermin.val\n    if self.slidermax is not None and val >= self.slidermax.val:\n        if not self.closedmax:\n            return\n        val = self.slidermax.val\n    return val",
    "docstring": "Makes sure *val* is with given bounds.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:_value_in_bounds arg:self arg:val arguments arg arg Assign Call If Compare If Return return:no Assign If Compare If Return return:no Assign If BoolOp Compare Compare If Return return:no Assign If BoolOp Compare Compare If Return return:no Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_annotation_clip",
    "source_code": "def set_annotation_clip(self, b):\n    self._annotation_clip = b\n    self.stale = True",
    "docstring": "Set the annotation's clipping behavior. Parameters ---------- b : bool or None - True: The annotation will be clipped when ``.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:set_annotation_clip arg:self arg:b arguments arg arg Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "Splitter",
    "source_code": "class Splitter(abc.ABC):\n\n    @property\n    @abc.abstractmethod\n    def version_def(self) -> versions_pb2.VersionDef:\n        pass\n\n    @abc.abstractmethod\n    def split(self) -> tuple[Sequence[Union[message.Message, bytes]], chunk_pb2.ChunkedMessage]:\n        pass\n\n    @abc.abstractmethod\n    def write(self, file_prefix: str) -> str:\n        pass",
    "docstring": "An abstract class for splitting and writing protos that are > 2GB. See the README on how to use or subclass this class.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\tools\\proto_splitter\\split.py",
    "ast_data": "ClassDef name:Splitter FunctionDef name:version_def arg:self arguments arg FunctionDef name:split arg:self arguments arg FunctionDef name:write arg:self arg:file_prefix arguments arg arg"
  },
  {
    "library": "pandas",
    "name": "bisect_left",
    "source_code": "@numba.njit(nogil=True, parallel=False)\ndef bisect_left(a: list[Any], x: Any, lo: int=0, hi: int=-1) -> int:\n    if hi == -1:\n        hi = len(a)\n    while lo < hi:\n        mid = (lo + hi) // 2\n        if a[mid] < x:\n            lo = mid + 1\n        else:\n            hi = mid\n    return lo",
    "docstring": "Same as not in numba yet!",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\_numba\\kernels\\min_max_.py",
    "ast_data": "FunctionDef name:bisect_left arg:a arg:x arg:lo arg:hi arguments arg arg arg arg If Compare Assign Call While Compare Assign If Compare Assign Assign Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "chebval3d",
    "source_code": "def chebval3d(x, y, z, c):\n    return pu._valnd(chebval, c, x, y, z)",
    "docstring": "Evaluate a 3-D Chebyshev series at points (x, y, z). This function returns the values: .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * T_i(x) * T_j(y) * T_k(z) The parameters , , and are converted to arrays only if they are tuples or a lists, otherwise they are treated as a scalars and they must have the same shape after conversion. In either case, either , , and or their elements must support multiplication and addition both with themselves and with the elements of . If has fewer than 3 dimensions, ones are implicitly appended to its shape to make it 3-D. The shape of the result will be c.shape[3:] + x.shape. Parameters ---------- x, y, z : array_like, compatible object The three dimensional series is evaluated at the points `xyzxyzcxyz`. See Also -------- chebval, chebval2d, chebgrid2d, chebgrid3d",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\chebyshev.py",
    "ast_data": "FunctionDef name:chebval3d arg:x arg:y arg:z arg:c arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, axis, functions):\n    forward, inverse = functions\n    transform = FuncTransform(forward, inverse)\n    self._transform = transform",
    "docstring": "Parameters ---------- axis : The axis for the scale. functions : (callable, callable) two-tuple of the forward and inverse functions for the scale. The forward function must be monotonic. Both functions must have the signature:: def forward(values: array-like) -> array-like",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\scale.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:axis arg:functions arguments arg arg arg Assign Assign Call Assign"
  },
  {
    "library": "pytorch",
    "name": "enable_symm_mem_for_group",
    "source_code": "def enable_symm_mem_for_group(group_name: str) -> None:\n    if group_name in _group_name_to_store:\n        return\n    group = c10d._resolve_process_group(group_name)\n    global_ranks = sorted(c10d._world.pg_group_ranks[group].keys())\n    global_ranks_str = '_'.join(map(str, global_ranks))\n    store = c10d.PrefixStore(f'symmetric_memory-{global_ranks_str}', c10d._get_process_group_store(group))\n    _group_name_to_store[group_name] = store\n    _SymmetricMemory.set_group_info(group_name, group.rank(), group.size(), store)",
    "docstring": "Enables symmetric memory for a process group. Args: group_name (str): the name of the process group.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_symmetric_memory\\__init__.py",
    "ast_data": "FunctionDef name:enable_symm_mem_for_group arg:group_name arguments arg If Compare Return return:no Assign Call Assign Call Call Assign Call Call Assign Call Call Assign Call Call Call"
  },
  {
    "library": "kornia",
    "name": "brightness",
    "source_code": "def brightness(probability: float, magnitude: int) -> OperationBase:\n    magnitudes = linspace(0.1, 1.9, 11)\n    return Brightness(None, probability, magnitude_range=(magnitudes[magnitude].item(), magnitudes[magnitude + 1].item()))",
    "docstring": "Return brightness op.",
    "type": "function",
    "file_path": "kornia\\kornia\\augmentation\\auto\\autoaugment\\ops.py",
    "ast_data": "FunctionDef name:brightness arg:probability arg:magnitude arguments arg arg Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "estimate_rank",
    "source_code": "def estimate_rank(A, eps, rng=None):\n    from scipy.sparse.linalg import LinearOperator\n    rng = np.random.default_rng(rng)\n    real = _is_real(A)\n    if isinstance(A, np.ndarray):\n        A = _C_contiguous_copy(A)\n        if real:\n            rank, _ = _backend.idd_estrank(A, eps, rng=rng)\n        else:\n            rank, _ = _backend.idz_estrank(A, eps, rng=rng)\n        if rank == 0:\n            rank = min(A.shape)\n        return rank\n    elif isinstance(A, LinearOperator):\n        if real:\n            return _backend.idd_findrank(A, eps, rng=rng)[0]\n        else:\n            return _backend.idz_findrank(A, eps, rng=rng)[0]\n    else:\n        raise _TYPE_ERROR",
    "docstring": "Estimate matrix rank to a specified relative precision using randomized methods. The matrix can be given as either a :class: or a :class:, with different algorithms used for each case. If is of type :class:, then the output rank is typically about 8 higher than the actual numerical rank. .. This function automatically detects the form of the input parameters and passes them to the appropriate backend. For details, see :func:, :func:, :func:, and :func:. Parameters ---------- A : :class: or :class: Matrix whose rank is to be estimated, given as either a :class: or a :class: with the method (to apply the matrix adjoint). eps : float Relative error for numerical rank definition. rng : , optional Pseudorandom number generator state. When is None, a new is created using entropy from the operating system. Types other than are passed to to instantiate a `rand`, the argument is ignored. Returns ------- int Estimated matrix rank.",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\interpolative.py",
    "ast_data": "FunctionDef name:estimate_rank arg:A arg:eps arg:rng arguments arg arg arg Assign Call Assign Call If Call Assign Call If Assign Call Assign Call If Compare Assign Call Return return:yes If Call If Return return:yes Call Return return:yes Call Raise"
  },
  {
    "library": "tensorflow",
    "name": "_update_sample_weight_mode",
    "source_code": "def _update_sample_weight_mode(model, mode, inputs):\n    if mode == ModeKeys.PREDICT:\n        return\n    sample_weights = None\n    if not callable(inputs):\n        sample_weights = inputs[len(model._feed_inputs) + len(model._feed_targets):]\n        has_learning_phase_pl = mode == ModeKeys.TRAIN and (not isinstance(backend.symbolic_learning_phase(), int))\n        if has_learning_phase_pl:\n            sample_weights = sample_weights[:-1]\n        model._update_sample_weight_modes(sample_weights=sample_weights)\n    if model._distribution_strategy:\n        distributed_training_utils_v1._update_sample_weight_modes(model, mode, sample_weights)",
    "docstring": "Updates the sample_weight_mode of a given model.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_arrays_v1.py",
    "ast_data": "FunctionDef name:_update_sample_weight_mode arg:model arg:mode arg:inputs arguments arg arg arg If Compare Return return:no Assign If Call Assign Call Call Assign BoolOp Compare Call Call If Assign Call If Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=False)\ndef fit(self, X, y=None):\n    self._fit(X)\n    self._n_features_out = self.n_samples_fit_\n    return self",
    "docstring": "Fit the k-nearest neighbors transformer from the training dataset. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_samples) if metric='precomputed' Training data. y : Ignored Not used, present for API consistency by convention. Returns ------- self : KNeighborsTransformer The fitted k-nearest neighbors transformer.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neighbors\\_graph.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, tensor, slice_spec, name, dtype=None, device=None):\n    self._tensor = tensor\n    self.slice_spec = slice_spec\n    self.name = name\n    if callable(self._tensor):\n        if dtype is None or device is None:\n            raise AssertionError('When passing a callable `tensor` to a SaveSpec, an explicit dtype and device must be provided.')\n        self.dtype = dtype\n        self.device = device\n    else:\n        self.dtype = tensor.dtype\n        if device is not None:\n            self.device = device\n        else:\n            self.device = tensor.device",
    "docstring": "Creates a object. Args: tensor: the tensor to save or callable that produces a tensor to save. If the value is , the is ignored. slice_spec: the slice to be saved. See . name: the name to save the tensor under. dtype: The data type of the Tensor. Required if is callable. Used for error checking in the restore op. device: The device generating and consuming this tensor. Required if is callable. Used to group objects to save by device.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\saving\\saveable_object.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:tensor arg:slice_spec arg:name arg:dtype arg:device arguments arg arg arg arg arg arg Assign Assign Assign If Call If BoolOp Compare Compare Raise Call Assign Assign Assign If Compare Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "MeanShadowLogger",
    "source_code": "class MeanShadowLogger(ns.Logger):\n\n    def __init__(self):\n        super().__init__()\n        self.stats['float'] = None\n        self.stats['quantized'] = None\n        self.count = 0\n        self.float_sum = None\n        self.quant_sum = None\n\n    def forward(self, x, y):\n        if x.is_quantized:\n            x = x.dequantize()\n        self.count += 1\n        if self.stats['quantized'] is None:\n            self.stats['quantized'] = x\n            self.quant_sum = x\n        else:\n            self.quant_sum += x\n            self.stats['quantized'] = self.quant_sum / self.count\n        if self.stats['float'] is None:\n            self.stats['float'] = y\n            self.float_sum = y\n        else:\n            self.float_sum += y\n            self.stats['float'] = self.float_sum / self.count\n\n    def clear(self):\n        self.stats['float'] = None\n        self.stats['quantized'] = None\n        self.count = 0\n        self.float_sum = None\n        self.quant_sum = None",
    "docstring": "Mean Logger for a Shadow module. A logger for a Shadow module whose purpose is to record the rolling mean of the data passed to the floating point and quantized models",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\quantization\\_correct_bias.py",
    "ast_data": "ClassDef name:MeanShadowLogger FunctionDef name:__init__ arg:self arguments arg Call Call Assign Assign Assign Assign Assign FunctionDef name:forward arg:self arg:x arg:y arguments arg arg arg If Assign Call If Compare Assign Assign Assign If Compare Assign Assign Assign FunctionDef name:clear arg:self arguments arg Assign Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_get_wrapper",
    "source_code": "def _get_wrapper(x, tf_should_use_helper):\n    type_x = type(x)\n    memoized = _WRAPPERS.get(type_x, None)\n    if memoized:\n        return memoized(x, tf_should_use_helper)\n    tx = copy.deepcopy(ShouldUseWrapper)\n    bases = getattr(tx, '__orig_bases__', tx.__bases__)\n\n    def set_body(ns):\n        ns.update(tx.__dict__)\n        return ns\n    copy_tx = types.new_class(tx.__name__, bases, exec_body=set_body)\n    copy_tx.__init__ = _new__init__\n    copy_tx.__getattribute__ = _new__getattribute__\n    for op in OVERLOADABLE_OPERATORS:\n        if hasattr(type_x, op):\n            setattr(copy_tx, op, getattr(type_x, op))\n    copy_tx.mark_used = _new_mark_used\n    copy_tx.__setattr__ = _new__setattr__\n    _WRAPPERS[type_x] = copy_tx\n    return copy_tx(x, tf_should_use_helper)",
    "docstring": "Create a wrapper for object x, whose class subclasses type(x). The wrapper will emit a warning if it is deleted without any of its properties being accessed or methods being called. Args: x: The instance to wrap. tf_should_use_helper: The object that tracks usage. Returns: An object wrapping , of type .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\tf_should_use.py",
    "ast_data": "FunctionDef name:_get_wrapper arg:x arg:tf_should_use_helper arguments arg arg Assign Call Assign Call If Return return:yes Call Assign Call Assign Call FunctionDef name:set_body arg:ns arguments arg Call Return return:yes Assign Call Assign Assign For If Call Call Call Assign Assign Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_insert_observer_around_module",
    "source_code": "def _insert_observer_around_module(self, obs_fqn: str, target_node: torch.fx.node.Node, obs_to_insert: ObserverBase, observer_args: tuple, insert_post: bool):\n    if insert_post:\n        target_node = target_node.next\n    with self._model.graph.inserting_before(target_node):\n        self._model.add_submodule(obs_fqn, obs_to_insert)\n        self._model.graph.create_node(op='call_module', target=obs_fqn, args=observer_args)\n    self._model.recompile()",
    "docstring": "Helper function that inserts the observer into both the graph structure and the module of the model Args node_fqn (str): The fully qualified name of the observer we want to insert target_node (torch.fx.node.Node): The node in model we are inserting observers around obs_to_insert (ObserverBase): The observer we are inserting around target_node observer_args (Tuple): The arguments we want to pass into the observer insert_post (bool): whether this is meant to be a post observer for this node",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\model_report.py",
    "ast_data": "FunctionDef name:_insert_observer_around_module arg:self arg:obs_fqn arg:target_node arg:obs_to_insert arg:observer_args arg:insert_post arguments arg arg arg arg arg arg If Assign With Call Call Call Call"
  },
  {
    "library": "numpy",
    "name": "polymulx",
    "source_code": "def polymulx(c):\n    [c] = pu.as_series([c])\n    if len(c) == 1 and c[0] == 0:\n        return c\n    prd = np.empty(len(c) + 1, dtype=c.dtype)\n    prd[0] = c[0] * 0\n    prd[1:] = c\n    return prd",
    "docstring": "Multiply a polynomial by x. Multiply the polynomial by x, where x is the independent variable. Parameters ---------- c : array_like 1-D array of polynomial coefficients ordered from low to high. Returns ------- out : ndarray Array representing the result of the multiplication. See Also -------- polyadd, polysub, polymul, polydiv, polypow Examples -------- >>> from numpy.polynomial import polynomial as P >>> c = (1, 2, 3) >>> P.polymulx(c) array([0., 1., 2., 3.])",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\polynomial.py",
    "ast_data": "FunctionDef name:polymulx arg:c arguments arg Assign Call If BoolOp Compare Call Compare Return return:yes Assign Call Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "device",
    "source_code": "@property\ndef device(self):\n    return self._variable.device",
    "docstring": "The device of this variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py",
    "ast_data": "FunctionDef name:device arg:self arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "compressed",
    "source_code": "def compressed(self):\n    data = ndarray.ravel(self._data)\n    if self._mask is not nomask:\n        data = data.compress(np.logical_not(ndarray.ravel(self._mask)))\n    return data",
    "docstring": "Return all the non-masked data as a 1-D array. Returns ------- data : ndarray A new holding the non-masked data is returned. Notes ----- The result is **not** a MaskedArray! Examples -------- >>> import numpy as np >>> x = np.ma.array(np.arange(5), mask=[0]*2 + [1]*3) >>> x.compressed() array([0, 1]) >>> type(x.compressed()) N-D arrays are compressed to 1-D. >>> arr = [[1, 2], [3, 4]] >>> mask = [[1, 0], [0, 1]] >>> x = np.ma.array(arr, mask=mask) >>> x.compressed() array([2, 3])",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:compressed arg:self arguments arg Assign Call If Compare Assign Call Call Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "heatmap",
    "source_code": "def heatmap(data, *, vmin=None, vmax=None, cmap=None, center=None, robust=False, annot=None, fmt='.2g', annot_kws=None, linewidths=0, linecolor='white', cbar=True, cbar_kws=None, cbar_ax=None, square=False, xticklabels='auto', yticklabels='auto', mask=None, ax=None, **kwargs):\n    plotter = _HeatMapper(data, vmin, vmax, cmap, center, robust, annot, fmt, annot_kws, cbar, cbar_kws, xticklabels, yticklabels, mask)\n    kwargs['linewidths'] = linewidths\n    kwargs['edgecolor'] = linecolor\n    if ax is None:\n        ax = plt.gca()\n    if square:\n        ax.set_aspect('equal')\n    plotter.plot(ax, cbar_ax, kwargs)\n    return ax",
    "docstring": "Plot rectangular data as a color-encoded matrix. This is an Axes-level function and will draw the heatmap into the currently-active Axes if none is provided to the `matplotlib.axes.Axes.textmatplotlib.figure.Figure.colorbarmatplotlib.axes.Axes.pcolormesh`. Returns ------- ax : matplotlib Axes Axes object with the heatmap. See Also -------- clustermap : Plot a matrix using hierarchical clustering to arrange the rows and columns. Examples -------- .. include:: ../docstrings/heatmap.rst",
    "type": "function",
    "file_path": "seaborn\\seaborn\\matrix.py",
    "ast_data": "FunctionDef name:heatmap arg:data arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg Assign Call Assign Assign If Compare Assign Call If Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "finalize",
    "source_code": "def finalize(self):\n    for i, pool in enumerate(itertools.chain.from_iterable(self.device_to_pools.values())):\n        pool.finalize(f'pool{i}')",
    "docstring": "Called at the end of allocation process",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\memory_planning.py",
    "ast_data": "FunctionDef name:finalize arg:self arguments arg For Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "ProcessGroupVariable",
    "source_code": "class ProcessGroupVariable(DistributedVariable):\n\n    def as_python_constant(self):\n        return self.value\n\n    def call_method(self, tx, name, args: 'list[VariableTracker]', kwargs: 'dict[str, VariableTracker]') -> 'VariableTracker':\n        if name == 'rank':\n            return variables.ConstantVariable.create(self.value.rank())\n        if name == 'size':\n            return variables.ConstantVariable.create(self.value.size())\n        if name == '_get_backend_name':\n            return variables.ConstantVariable.create(self.value._get_backend_name())\n        return super().call_method(tx, name, args, kwargs)\n\n    def var_getattr(self, tx: 'InstructionTranslator', name):\n        if name == 'group_name':\n            return variables.ConstantVariable.create(self.value.group_name)\n        if name in ['rank', 'size']:\n            return variables.LambdaVariable(lambda *args, **kwargs: self.call_method(tx, name, args, kwargs))\n        return super().var_getattr(tx, name)\n\n    @staticmethod\n    def is_process_group(value):\n        if not DistributedVariable.is_available():\n            return False\n        from torch._C._distributed_c10d import ProcessGroup\n        from torch.testing._internal.distributed.fake_pg import FakeProcessGroup\n        return istype(value, (ProcessGroup, FakeProcessGroup))",
    "docstring": "We don't want a ProcessGroup object to end up in our output graph. But it's common for dynamo to intercept a PG that is then used to get info like rank() or world_size(), as well as passed to utility functions in distributed_c10d which desugar it into plain types like a ranklist and tag. For convenience and proper guarding, we construct a variable type. TODO: make it possible to use ProcessGroupVariable as input to simple functions like _expand_group without dynamo complaining about making a proxy for it. It is not a tensor-like type, and we don't want a proxy- but dynamo assumes torch library functions are dealing with tensor-like types and would have proxies for their args. TODO: should we make this inherit VT instead of UDOV? Do we want any of the default behaviors or just graph-break whenever one of our special cases is not hit?",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\distributed.py",
    "ast_data": "ClassDef name:ProcessGroupVariable FunctionDef name:as_python_constant arg:self arguments arg Return return:yes FunctionDef name:call_method arg:self arg:tx arg:name arg:args arg:kwargs arguments arg arg arg arg arg If Compare Return return:yes Call Call If Compare Return return:yes Call Call If Compare Return return:yes Call Call Return return:yes Call Call FunctionDef name:var_getattr arg:self arg:tx arg:name arguments arg arg arg If Compare Return return:yes Call If Compare Return return:yes Call arguments arg arg Call Return return:yes Call Call FunctionDef name:is_process_group arg:value arguments arg If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "infer_inputs_from_restored_call_function",
    "source_code": "def infer_inputs_from_restored_call_function(fn):\n\n    def common_spec(x, y):\n        common_shape = get_common_shape(x.shape, y.shape)\n        if isinstance(x, sparse_tensor.SparseTensorSpec):\n            return sparse_tensor.SparseTensorSpec(common_shape, x.dtype)\n        elif isinstance(x, ragged_tensor.RaggedTensorSpec):\n            return ragged_tensor.RaggedTensorSpec(common_shape, x.dtype)\n        return tensor_spec.TensorSpec(common_shape, x.dtype, x.name)\n    spec = fn.concrete_functions[0].structured_input_signature[0][0]\n    for concrete in fn.concrete_functions[1:]:\n        spec2 = concrete.structured_input_signature[0][0]\n        spec = nest.map_structure(common_spec, spec, spec2)\n    return spec",
    "docstring": "Returns TensorSpec of inputs from a restored call function. Args: fn: Restored layer call function. It is assumed that has at least one concrete function and that the inputs are in the first argument. Returns: TensorSpec of call function inputs.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\load.py",
    "ast_data": "FunctionDef name:infer_inputs_from_restored_call_function arg:fn arguments arg FunctionDef name:common_spec arg:x arg:y arguments arg arg Assign Call If Call Return return:yes Call If Call Return return:yes Call Return return:yes Call Assign For Assign Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_add_contourf_set",
    "source_code": "def _add_contourf_set(self, cset, zdir='z', offset=None, axlim_clip=False):\n    zdir = '-' + zdir\n    midpoints = cset.levels[:-1] + np.diff(cset.levels) / 2\n    if cset._extend_min:\n        min_level = cset.levels[0] - np.diff(cset.levels[:2]) / 2\n        midpoints = np.insert(midpoints, 0, min_level)\n    if cset._extend_max:\n        max_level = cset.levels[-1] + np.diff(cset.levels[-2:]) / 2\n        midpoints = np.append(midpoints, max_level)\n    art3d.collection_2d_to_3d(cset, zs=offset if offset is not None else midpoints, zdir=zdir, axlim_clip=axlim_clip)\n    return midpoints",
    "docstring": "Returns ------- levels : Levels at which the filled contours are added.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:_add_contourf_set arg:self arg:cset arg:zdir arg:offset arg:axlim_clip arguments arg arg arg arg arg Assign Assign Call If Assign Call Assign Call If Assign Call Assign Call Call Compare Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "remove_redundant_views",
    "source_code": "def remove_redundant_views(gm: torch.fx.GraphModule):\n    with torch.utils._python_dispatch._disable_current_modes():\n        views: dict[torch.fx.Node, dict[torch.dtype, torch.fx.Node]] = {}\n        graph = gm.graph\n        for node in graph.find_nodes(op='call_function', target=torch.ops.aten.view.dtype):\n            src = node.args[0]\n            to_type = node.args[1]\n            existing_views = views.get(src)\n            is_needed = True\n            if existing_views:\n                alias = existing_views.get(to_type)\n                if alias:\n                    is_needed = False\n                    node.replace_all_uses_with(alias)\n                    alias.meta.update(node.meta)\n                    graph.erase_node(node)\n            else:\n                from_type = src.meta['val'].dtype\n                existing_views = {from_type: src}\n                views[src] = existing_views\n            if is_needed:\n                existing_views.setdefault(to_type, node)\n                views[node] = existing_views\n        while True:\n            unused_views = [alias for alias in views if not alias.users]\n            if len(unused_views) == 0:\n                break\n            for unused in unused_views:\n                views.pop(unused)\n                graph.erase_node(unused)",
    "docstring": "Removes redundant views by reusing existing ones.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\joint_graph.py",
    "ast_data": "FunctionDef name:remove_redundant_views arg:gm arguments arg With Call Assign For Call Assign Assign Assign Call Assign If Assign Call If Assign Call Call Call Assign Assign Assign If Call Assign While Assign If Compare Call For Call Call"
  },
  {
    "library": "tensorflow",
    "name": "Flatten",
    "source_code": "class Flatten(keras_layers.Flatten, base.Layer):\n    pass",
    "docstring": "Flattens an input tensor while preserving the batch axis (axis 0). Args: data_format: A string, one of (default) or . The ordering of the dimensions in the inputs. corresponds to inputs with shape while corresponds to inputs with shape . Examples:",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\legacy_tf_layers\\core.py",
    "ast_data": "ClassDef name:Flatten"
  },
  {
    "library": "scikit-learn",
    "name": "_score_without_scorer",
    "source_code": "def _score_without_scorer(self, squared_errors):\n    if self.alpha_per_target:\n        _score = -squared_errors.mean(axis=0)\n    else:\n        _score = -squared_errors.mean()\n    return _score",
    "docstring": "Performs scoring using squared errors when the scorer is None.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_ridge.py",
    "ast_data": "FunctionDef name:_score_without_scorer arg:self arg:squared_errors arguments arg arg If Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "SendTracebacks",
    "source_code": "def SendTracebacks(self, request, context):\n    return debug_service_pb2.EventReply()",
    "docstring": "Base implementation of the handling of SendTracebacks calls. The base implementation does nothing with the incoming request. Override in an implementation of the server if necessary. Args: request: A proto, containing information about the type (e.g., graph vs. eager execution) and source-code traceback of the call and (any) associated s. context: Server context. Returns: A proto.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\grpc_debug_server.py",
    "ast_data": "FunctionDef name:SendTracebacks arg:self arg:request arg:context arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "MatWriteWarning",
    "source_code": "class MatWriteWarning(UserWarning):\n    pass",
    "docstring": "Warning class for write issues.",
    "type": "class",
    "file_path": "scipy\\scipy\\io\\matlab\\_miobase.py",
    "ast_data": "ClassDef name:MatWriteWarning"
  },
  {
    "library": "pytorch",
    "name": "ExponentialLR",
    "source_code": "class ExponentialLR(LRScheduler):\n\n    def __init__(self, optimizer: Optimizer, gamma: float, last_epoch: int=-1) -> None:\n        self.gamma = gamma\n        super().__init__(optimizer, last_epoch)\n\n    @override\n    def get_lr(self) -> list[float]:\n        _warn_get_lr_called_within_step(self)\n        if self._is_initial:\n            return [group['lr'] for group in self.optimizer.param_groups]\n        return [group['lr'] * self.gamma for group in self.optimizer.param_groups]\n\n    def _get_closed_form_lr(self):\n        return [base_lr * self.gamma ** self.last_epoch for base_lr in self.base_lrs]",
    "docstring": "Decays the learning rate of each parameter group by gamma every epoch. When last_epoch=-1, sets initial lr as lr. Args: optimizer (Optimizer): Wrapped optimizer. gamma (float): Multiplicative factor of learning rate decay. last_epoch (int): The index of last epoch. Default: -1. Example: >>> # xdoctest: +SKIP >>> scheduler = ExponentialLR(optimizer, gamma=0.95) >>> for epoch in range(100): >>> train(...) >>> validate(...) >>> scheduler.step() .. image:: ../scripts/lr_scheduler_images/ExponentialLR.png",
    "type": "class",
    "file_path": "pytorch\\torch\\optim\\lr_scheduler.py",
    "ast_data": "ClassDef name:ExponentialLR FunctionDef name:__init__ arg:self arg:optimizer arg:gamma arg:last_epoch arguments arg arg arg arg Assign Call Call FunctionDef name:get_lr arg:self arguments arg Call If Return return:yes Return return:yes FunctionDef name:_get_closed_form_lr arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_device_to_params_per_rank",
    "source_code": "@property\ndef _device_to_params_per_rank(self) -> dict[torch.device, list[list[torch.Tensor]]]:\n    assert self.parameters_as_bucket_view, '`_device_to_params_per_rank` should only be used if `parameters_as_bucket_view=True`'\n    if len(self._device_to_params_per_rank_cache) == 0:\n        for rank, param_groups in enumerate(self._partition_parameters()):\n            for param_group in param_groups:\n                for param in param_group['params']:\n                    device = param.device\n                    if device not in self._device_to_params_per_rank_cache:\n                        self._device_to_params_per_rank_cache[device] = [[] for _ in range(self.world_size)]\n                    self._device_to_params_per_rank_cache[device][rank].append(param)\n    return self._device_to_params_per_rank_cache",
    "docstring": "Return device parameters assigned per rank. :class: mapping each device to a :class: of the per-rank parameter lists filtered to only include the parameters stored on that device. Each per-rank parameter list gives the parameters assigned to that rank to update. This is used for constructing the parameter buckets if ``, ... ...",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\optim\\zero_redundancy_optimizer.py",
    "ast_data": "FunctionDef name:_device_to_params_per_rank arg:self arguments arg If Compare Call For Call Call For For Assign If Compare Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_supports_transparency",
    "source_code": "def _supports_transparency(self):\n    return False",
    "docstring": "Whether this writer supports transparency. Writers may consult output file type and codec to determine this at runtime.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\animation.py",
    "ast_data": "FunctionDef name:_supports_transparency arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "decorated",
    "source_code": "@parameterized.named_parameters(*params)\n@functools.wraps(f)\ndef decorated(self, run_mode, *args, **kwargs):\n    if run_mode == 'v1_session':\n        _v1_session_test(f, self, config, *args, **kwargs)\n    elif run_mode == 'v2_eager':\n        _v2_eager_test(f, self, *args, **kwargs)\n    elif run_mode == 'v2_function':\n        _v2_function_test(f, self, *args, **kwargs)\n    else:\n        return ValueError('Unknown run mode %s' % run_mode)",
    "docstring": "A run of a single test case w/ specified run mode.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\keras_parameterized.py",
    "ast_data": "FunctionDef name:decorated arg:self arg:run_mode arguments arg arg arg arg If Compare Call If Compare Call If Compare Call Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "fit",
    "source_code": "@_call_super_mom\ndef fit(self, data, *args, **kwds):\n    if len(args) > 0:\n        raise TypeError('Too many arguments.')\n    floc = kwds.pop('floc', None)\n    fscale = kwds.pop('fscale', None)\n    _remove_optimizer_parameters(kwds)\n    if floc is not None and fscale is not None:\n        raise ValueError('All parameters fixed. There is nothing to optimize.')\n    data = np.asarray(data)\n    if not np.isfinite(data).all():\n        raise ValueError('The data contains non-finite values.')\n    if fscale is None:\n        if floc is None:\n            loc = data.min()\n            scale = np.ptp(data)\n        else:\n            loc = floc\n            scale = data.max() - loc\n            if data.min() < loc:\n                raise FitDataError('uniform', lower=loc, upper=loc + scale)\n    else:\n        ptp = np.ptp(data)\n        if ptp > fscale:\n            raise FitUniformFixedScaleDataError(ptp=ptp, fscale=fscale)\n        loc = data.min() - 0.5 * (fscale - ptp)\n        scale = fscale\n    return (float(loc), float(scale))",
    "docstring": "Maximum likelihood estimate for the location and scale parameters. uses only the following parameters. Because exact formulas are used, the parameters related to optimization that are available in the method of other distributions are ignored here. The only positional argument accepted is . Parameters ---------- data : array_like Data to use in calculating the maximum likelihood estimate. floc : float, optional Hold the location parameter fixed to the specified value. fscale : float, optional Hold the scale parameter fixed to the specified value. Returns ------- loc, scale : float Maximum likelihood estimates for the location and scale. Notes ----- An error is raised if is given and any values in are less than , or if is given and is less than `flocfscalexfscalefit`.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_continuous_distns.py",
    "ast_data": "FunctionDef name:fit arg:self arg:data arguments arg arg arg arg If Compare Call Raise Call Assign Call Assign Call Call If BoolOp Compare Compare Raise Call Assign Call If Call Call Raise Call If Compare If Compare Assign Call Assign Call Assign Assign Call If Compare Call Raise Call Assign Call If Compare Raise Call Assign Call Assign Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "subguards_allowed",
    "source_code": "def subguards_allowed(self):\n    return self.guard_source() != GuardSource.SYNTHETIC_LOCAL",
    "docstring": "True if you can guard on attributes of this",
    "type": "method",
    "file_path": "pytorch\\torch\\_guards.py",
    "ast_data": "FunctionDef name:subguards_allowed arg:self arguments arg Return return:yes Compare Call"
  },
  {
    "library": "pytorch",
    "name": "get_stream_from_external",
    "source_code": "def get_stream_from_external(data_ptr: int, device: Optional[_device_t]=None) -> Stream:\n    _lazy_init()\n    streamdata = torch._C._cuda_getStreamFromExternal(data_ptr, _get_device_index(device, optional=True))\n    return Stream(stream_id=streamdata[0], device_index=streamdata[1], device_type=streamdata[2])",
    "docstring": "Return a :class: from an externally allocated CUDA stream. This function is used to wrap streams allocated in other libraries in order to facilitate data exchange and multi-library interactions. .. note:: This function doesn't manage the stream life-cycle, it is the user responsibility to keep the referenced stream alive while this returned stream is being used. Args: data_ptr(int): Integer representation of the value that is allocated externally. device(torch.device or int, optional): the device where the stream was originally allocated. If device is specified incorrectly, subsequent launches using this stream may fail.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:get_stream_from_external arg:data_ptr arg:device arguments arg arg Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_multi_decorate",
    "source_code": "def _multi_decorate(decorators, method):\n    if hasattr(decorators, '__iter__'):\n        decorators = decorators[::-1]\n    else:\n        decorators = [decorators]\n\n    def _wrapper(self, *args, **kwargs):\n        bound_method = wraps(method)(partial(method.__get__(self, type(self))))\n        for dec in decorators:\n            bound_method = dec(bound_method)\n        return bound_method(*args, **kwargs)\n    for dec in decorators:\n        _update_method_wrapper(_wrapper, dec)\n    update_wrapper(_wrapper, method)\n    if iscoroutinefunction(method):\n        markcoroutinefunction(_wrapper)\n    return _wrapper",
    "docstring": "Decorate with one or more function decorators. can be a single decorator or an iterable of decorators.",
    "type": "function",
    "file_path": "django\\django\\utils\\decorators.py",
    "ast_data": "FunctionDef name:_multi_decorate arg:decorators arg:method arguments arg arg If Call Assign Assign FunctionDef name:_wrapper arg:self arguments arg arg arg Assign Call Call Call Call Call For Assign Call Return return:yes Call For Call Call If Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "reduce",
    "source_code": "def reduce(self, reduce_op, value, axis):\n    return super(OneDeviceStrategy, self).reduce(reduce_op, value, axis)",
    "docstring": "Reduce across replicas. In , there is only one replica, so if axis=None, value is simply returned. If axis is specified as something other than None, such as axis=0, value is reduced along that axis and returned. Example: Args: reduce_op: A value specifying how values should be combined. value: A \"per replica\" value, e.g. returned by to be combined into a single tensor. axis: Specifies the dimension to reduce along within each replica's tensor. Should typically be set to the batch dimension, or to only reduce across replicas (e.g. if the tensor has no batch dimension). Returns: A .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\one_device_strategy.py",
    "ast_data": "FunctionDef name:reduce arg:self arg:reduce_op arg:value arg:axis arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "containsy",
    "source_code": "def containsy(self, y):\n    y0, y1 = self.intervaly\n    return y0 <= y <= y1 or y0 >= y >= y1",
    "docstring": "Return whether *y* is in the closed (:attr:, :attr:) interval.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:containsy arg:self arg:y arguments arg arg Assign Return return:yes BoolOp Compare Compare"
  },
  {
    "library": "tensorflow",
    "name": "SparseCoreLayoutsTrackable",
    "source_code": "class SparseCoreLayoutsTrackable(trackable_base.Trackable):\n\n    def __init__(self, proto_str_tensor: tensor.Tensor):\n        self.value = proto_str_tensor\n\n    def _serialize_to_tensors(self) -> Dict[str, tensor.Tensor]:\n        return {trackable_base.VARIABLE_VALUE_KEY: self.value}\n\n    def _restore_from_tensors(self, restored_tensors: Dict[str, tensor.Tensor]) -> None:\n        gen_control_flow_ops.no_op()",
    "docstring": "Trackable for sparsecore layouts used in training.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3_utils.py",
    "ast_data": "ClassDef name:SparseCoreLayoutsTrackable FunctionDef name:__init__ arg:self arg:proto_str_tensor arguments arg arg Assign FunctionDef name:_serialize_to_tensors arg:self arguments arg Return return:yes FunctionDef name:_restore_from_tensors arg:self arg:restored_tensors arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "SamplerCell",
    "source_code": "class SamplerCell(object):\n    __slots__ = ['_cell']\n\n    def __init__(self, cell):\n        self._cell = cell\n\n    def add(self, value):\n        pywrap_tfe.TFE_MonitoringSamplerCellAdd(self._cell, value)\n\n    def value(self):\n        with c_api_util.tf_buffer() as buffer_:\n            pywrap_tfe.TFE_MonitoringSamplerCellValue(self._cell, buffer_)\n            proto_data = pywrap_tf_session.TF_GetBuffer(buffer_)\n        histogram_proto = summary_pb2.HistogramProto()\n        histogram_proto.ParseFromString(compat.as_bytes(proto_data))\n        return histogram_proto",
    "docstring": "SamplerCell stores each value of a Sampler.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py",
    "ast_data": "ClassDef name:SamplerCell Assign FunctionDef name:__init__ arg:self arg:cell arguments arg arg Assign FunctionDef name:add arg:self arg:value arguments arg arg Call FunctionDef name:value arg:self arguments arg With Call Call Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "arcsin",
    "source_code": "@set_module('numpy.lib.scimath')\n@array_function_dispatch(_unary_dispatcher)\ndef arcsin(x):\n    x = _fix_real_abs_gt_1(x)\n    return nx.arcsin(x)",
    "docstring": "Compute the inverse sine of x. Return the \"principal value\" (for a description of this, see ) of the inverse sine of . For real such that `abs(x) >> import numpy as np >>> np.set_printoptions(precision=4) >>> np.emath.arcsin(0) 0.0 >>> np.emath.arcsin([0,1]) array([0. , 1.5708])",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_scimath_impl.py",
    "ast_data": "FunctionDef name:arcsin arg:x arguments arg Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "meta_graph_filename",
    "source_code": "def meta_graph_filename(checkpoint_filename, meta_graph_suffix='meta'):\n    basename = re.sub('-[\\\\d\\\\?]+-of-\\\\d+$', '', checkpoint_filename)\n    suffixed_filename = '.'.join([basename, meta_graph_suffix])\n    return suffixed_filename",
    "docstring": "Returns the meta graph filename. Args: checkpoint_filename: Name of the checkpoint file. meta_graph_suffix: Suffix for file. Defaults to 'meta'. Returns: MetaGraph file name.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint_management.py",
    "ast_data": "FunctionDef name:meta_graph_filename arg:checkpoint_filename arg:meta_graph_suffix arguments arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "__call__",
    "source_code": "def __call__(self, data, var):\n    vals = data[var]\n    if callable(self.estimator):\n        estimate = self.estimator(vals)\n    else:\n        estimate = vals.agg(self.estimator)\n    if self.error_method is None:\n        err_min = err_max = np.nan\n    elif len(data) <= 1:\n        err_min = err_max = np.nan\n    elif callable(self.error_method):\n        err_min, err_max = self.error_method(vals)\n    elif self.error_method == 'sd':\n        half_interval = vals.std() * self.error_level\n        err_min, err_max = (estimate - half_interval, estimate + half_interval)\n    elif self.error_method == 'se':\n        half_interval = vals.sem() * self.error_level\n        err_min, err_max = (estimate - half_interval, estimate + half_interval)\n    elif self.error_method == 'pi':\n        err_min, err_max = _percentile_interval(vals, self.error_level)\n    elif self.error_method == 'ci':\n        units = data.get('units', None)\n        boots = bootstrap(vals, units=units, func=self.estimator, **self.boot_kws)\n        err_min, err_max = _percentile_interval(boots, self.error_level)\n    return pd.Series({var: estimate, f'{var}min': err_min, f'{var}max': err_max})",
    "docstring": "Aggregate over column of with estimate and error interval.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_statistics.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:data arg:var arguments arg arg arg Assign If Call Assign Call Assign Call If Compare Assign If Compare Call Assign If Call Assign Call If Compare Assign Call Assign If Compare Assign Call Assign If Compare Assign Call If Compare Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "_check_list_length",
    "source_code": "def _check_list_length(self, levels: list, values: list) -> list:\n    message = ''\n    if len(levels) > len(values):\n        message = ' '.join([f'\\nThe {self.variable} list has fewer values ({len(values)})', f'than needed ({len(levels)}) and will cycle, which may', 'produce an uninterpretable plot.'])\n        values = [x for _, x in zip(levels, itertools.cycle(values))]\n    elif len(values) > len(levels):\n        message = ' '.join([f'The {self.variable} list has more values ({len(values)})', f'than needed ({len(levels)}), which may not be intended.'])\n        values = values[:len(levels)]\n    if message:\n        warnings.warn(message, UserWarning)\n    return values",
    "docstring": "Input check when values are provided as a list.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\properties.py",
    "ast_data": "FunctionDef name:_check_list_length arg:self arg:levels arg:values arguments arg arg arg Assign If Compare Call Call Assign Call Call Call Assign Call Call If Compare Call Call Assign Call Call Call Assign Call If Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "build_stage",
    "source_code": "def build_stage(self, stage_index: int, device: torch.device, group: Optional[ProcessGroup]=None) -> _PipelineStage:\n    stage_module = self.get_stage_module(stage_index)\n    if isinstance(stage_module, torch.fx.GraphModule):\n        _modify_graph_op_device(stage_module, device)\n    else:\n        logger.warning(f'Expected a `torch.fx.GraphModule` but got {type(stage_module)}')\n    pipe_info = self.info()\n    return _PipelineStage(stage_module, stage_index, pipe_info, device, group)",
    "docstring": "Create a given a stage index and distributed group. The can run with s.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\_IR.py",
    "ast_data": "FunctionDef name:build_stage arg:self arg:stage_index arg:device arg:group arguments arg arg arg arg Assign Call If Call Call Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_require_cross_replica_or_default_context_extended",
    "source_code": "def _require_cross_replica_or_default_context_extended(extended, error_message=None):\n    context = _get_per_thread_mode()\n    cross_replica = context.cross_replica_context\n    if cross_replica is not None and cross_replica.extended is extended:\n        return\n    if context is _get_default_replica_mode():\n        return\n    strategy = extended._container_strategy()\n    if context.strategy is not strategy:\n        _wrong_strategy_scope(strategy, context)\n    assert cross_replica is None\n    if not error_message:\n        error_message = 'Method requires being in cross-replica context, use get_replica_context().merge_call()'\n    raise RuntimeError(error_message)",
    "docstring": "Verify in cross-replica context.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:_require_cross_replica_or_default_context_extended arg:extended arg:error_message arguments arg arg Assign Call Assign If BoolOp Compare Compare Return return:no If Compare Call Return return:no Assign Call If Compare Call Compare If Assign Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_save_cached_when_graph_building",
    "source_code": "def _save_cached_when_graph_building(self, file_prefix, object_graph_tensor, options):\n    serialized_tensors, feed_additions, registered_savers, graph_proto = self._gather_serialized_tensors(object_graph_tensor)\n    if self._last_save_object_graph != graph_proto or context.executing_eagerly() or ops.inside_function():\n        saver = functional_saver.MultiDeviceSaver(serialized_tensors, registered_savers)\n        save_op = saver.save(file_prefix, options=options)\n        with ops.device('/cpu:0'):\n            with ops.control_dependencies([save_op]):\n                self._cached_save_operation = array_ops.identity(file_prefix)\n        self._last_save_object_graph = graph_proto\n    return (self._cached_save_operation, feed_additions)",
    "docstring": "Create or retrieve save ops. Args: file_prefix: The prefix for saved checkpoint files. object_graph_tensor: A to which the current object graph will be fed. options: object. Returns: A two-element tuple with a filename tensor and a feed_dict of tensors to feed when running it (if graph building). The feed dict contains the current object graph and any Python state to be saved in the checkpoint. When executing eagerly only the first argument is meaningful.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:_save_cached_when_graph_building arg:self arg:file_prefix arg:object_graph_tensor arg:options arguments arg arg arg arg Assign Call If BoolOp Compare Call Call Assign Call Assign Call With Call With Call Assign Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_update_ctx_header",
    "source_code": "def _update_ctx_header(self, attrs: DataFrame, axis: AxisInt) -> None:\n    for j in attrs.columns:\n        ser = attrs[j]\n        for i, c in ser.items():\n            if not c or pd.isna(c):\n                continue\n            css_list = maybe_convert_css_to_tuples(c)\n            if axis == 0:\n                self.ctx_index[i, j].extend(css_list)\n            else:\n                self.ctx_columns[j, i].extend(css_list)",
    "docstring": "Update the state of the `` for header cells. Collects a mapping of {index_label: [('', ''), ..]}. Parameters ---------- attrs : Series Should contain strings of ': ;: ', and an integer index. Whitespace shouldn't matter and the final trailing ';' shouldn't matter. axis : int Identifies whether the ctx object being updated is the index or columns",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\style.py",
    "ast_data": "FunctionDef name:_update_ctx_header arg:self arg:attrs arg:axis arguments arg arg arg For Assign For Call If BoolOp Call Assign Call If Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "__getstate__",
    "source_code": "def __getstate__(self):\n    logger.warning('NOTE: Process group is not serializable and excluded from a saved state.')\n    return {slot: getattr(self, slot) for slot in self.__slots__ if slot != 'process_group'}",
    "docstring": "Return a `` is not serializable and excluded from a returned state.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\ddp_comm_hooks\\powerSGD_hook.py",
    "ast_data": "FunctionDef name:__getstate__ arg:self arguments arg Call Return return:yes Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "variables",
    "source_code": "def variables(self):\n    current_graph = ops.get_default_graph()\n\n    def _from_current_graph(variable):\n        if variable._in_graph_mode:\n            return variable.op.graph is current_graph\n        else:\n            return variable._graph_key == current_graph._graph_key\n    optimizer_variables = [v for v in self._non_slot_variables() if _from_current_graph(v)]\n    for _, variable_dict in self._slots.items():\n        for _, slot_for_variable in variable_dict.items():\n            if _from_current_graph(slot_for_variable):\n                optimizer_variables.append(slot_for_variable)\n    return sorted(optimizer_variables, key=lambda v: v.name)",
    "docstring": "A list of variables which encode the current state of . Includes slot variables and additional global variables created by the optimizer in the current default graph. Returns: A list of variables.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\optimizer.py",
    "ast_data": "FunctionDef name:variables arg:self arguments arg Assign Call FunctionDef name:_from_current_graph arg:variable arguments arg If Return return:yes Compare Return return:yes Compare Assign Call Call For Call For Call If Call Call Return return:yes Call arguments arg"
  },
  {
    "library": "scikit-learn",
    "name": "_threadpool_controller_decorator",
    "source_code": "def _threadpool_controller_decorator(limits=1, user_api='blas'):\n\n    def decorator(func):\n\n        @functools.wraps(func)\n        def wrapper(*args, **kwargs):\n            controller = _get_threadpool_controller()\n            with controller.limit(limits=limits, user_api=user_api):\n                return func(*args, **kwargs)\n        return wrapper\n    return decorator",
    "docstring": "Decorator to limit the number of threads used at the function level. It should be preferred over because this one only loads the shared libraries when the function is called while the latter loads them at import time.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\parallel.py",
    "ast_data": "FunctionDef name:_threadpool_controller_decorator arg:limits arg:user_api arguments arg arg FunctionDef name:decorator arg:func arguments arg FunctionDef name:wrapper arguments arg arg Assign Call With Call Return return:yes Call Call Return return:yes Return return:yes"
  },
  {
    "library": "pygame",
    "name": "get_arraytype",
    "source_code": "def get_arraytype():\n    warnings.warn(DeprecationWarning('only numpy arrays are now supported, this function will be removed in a future version of the module'))\n    return 'numpy'",
    "docstring": "pygame.surfarray.get_arraytype(): return str DEPRECATED - only numpy arrays are now supported.",
    "type": "function",
    "file_path": "pygame\\src_py\\surfarray.py",
    "ast_data": "FunctionDef name:get_arraytype arguments Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_watch_step_to_save_key",
    "source_code": "def _watch_step_to_save_key(self):\n    step_value = context.context().get_config_key_value(_INITIAL_RUN_COUNT_KEY)\n    if step_value != _STOP_WATCHING_CLUSTER_VALUE:\n        self._step_to_checkpoint = step_value\n        self._received_checkpoint_step.set()\n        ack_key = f'{_ACKNOWLEDGE_KEY}_{_INITIAL_RUN_COUNT_KEY}_{self._id_in_cluster}'\n        context.context().set_config_key_value(ack_key, '1')\n        logging.info('PreemptionCheckpointHandler: %s set, preemption awareness acknowledged', ack_key)\n        if self._grace_period > 0:\n            final_step_value = context.context().get_config_key_value(_FINAL_RUN_COUNT_KEY)\n            if final_step_value != _STOP_WATCHING_CLUSTER_VALUE:\n                ack_key = f'{_ACKNOWLEDGE_KEY}_{_FINAL_RUN_COUNT_KEY}_{self._id_in_cluster}'\n                context.context().set_config_key_value(ack_key, '1')\n                logging.info('PreemptionCheckpointHandler: %s acknowledged, final checkpoint timing received.', ack_key)\n                self._received_checkpoint_step.set()\n                self._step_to_checkpoint = final_step_value",
    "docstring": "Watch out for step-to-save config key and acknowledge. All workers, including the one to be preempted, execute this function to get step-to-save.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\failure_handling\\failure_handling.py",
    "ast_data": "FunctionDef name:_watch_step_to_save_key arg:self arguments arg Assign Call Call If Compare Assign Call Assign Call Call Call If Compare Assign Call Call If Compare Assign Call Call Call Call Assign"
  },
  {
    "library": "django",
    "name": "debug",
    "source_code": "def debug(request):\n    context_extras = {}\n    if settings.DEBUG and request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS:\n        context_extras['debug'] = True\n        from django.db import connections\n        context_extras['sql_queries'] = lazy(lambda: list(itertools.chain.from_iterable((connections[x].queries for x in connections))), list)\n    return context_extras",
    "docstring": "Return context variables helpful for debugging.",
    "type": "function",
    "file_path": "django\\django\\template\\context_processors.py",
    "ast_data": "FunctionDef name:debug arg:request arguments arg Assign If BoolOp Compare Call Assign Assign Call arguments Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit_transform",
    "source_code": "def fit_transform(self, X, y=None):\n    return self.fit(X).transform(X)",
    "docstring": "Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters ---------- X : array-like of shape (n_samples, n_features) Training set. y : Ignored Not used, present for API consistency by convention. Returns ------- Xt : sparse matrix of shape (n_samples, n_samples) Xt[i, j] is assigned the weight of edge that connects i to j. Only the neighbors have an explicit value. The diagonal is always explicit. The matrix is of CSR format.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neighbors\\_graph.py",
    "ast_data": "FunctionDef name:fit_transform arg:self arg:X arg:y arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "MatchingFilesBenchmark",
    "source_code": "class MatchingFilesBenchmark(benchmark_base.DatasetBenchmarkBase):\n\n    def benchmark_nested_directories(self):\n        tmp_dir = tempfile.mkdtemp()\n        width = 500\n        depth = 10\n        for i in range(width):\n            for j in range(depth):\n                new_base = os.path.join(tmp_dir, str(i), *[str(dir_name) for dir_name in range(j)])\n                os.makedirs(new_base)\n                child_files = ['a.py', 'b.pyc'] if j < depth - 1 else ['c.txt', 'd.log']\n                for f in child_files:\n                    filename = os.path.join(new_base, f)\n                    open(filename, 'w').close()\n        patterns = [os.path.join(tmp_dir, os.path.join(*['**' for _ in range(depth)]), suffix) for suffix in ['*.txt', '*.log']]\n        num_elements = width * 2\n        dataset = matching_files.MatchingFilesDataset(patterns)\n        self.run_and_report_benchmark(dataset=dataset, iters=3, num_elements=num_elements, extras={'model_name': 'matching_files.benchmark.1', 'parameters': '%d.%d' % (width, depth)}, name='nested_directory(%d*%d)' % (width, depth))\n        shutil.rmtree(tmp_dir, ignore_errors=True)",
    "docstring": "Benchmark for the experimental .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\benchmarks\\matching_files_benchmark.py",
    "ast_data": "ClassDef name:MatchingFilesBenchmark FunctionDef name:benchmark_nested_directories arg:self arguments arg Assign Call Assign Assign For Call For Call Assign Call Call Call Call Call Assign Compare For Assign Call Call Call Assign Call Call Call Assign Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "parse_example_spec",
    "source_code": "@property\ndef parse_example_spec(self):\n    return self.categorical_column.parse_example_spec",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:parse_example_spec arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, xy, width, height, *, edgecolor='k', facecolor='w', fill=True, text='', loc='right', fontproperties=None, visible_edges='closed'):\n    super().__init__(xy, width=width, height=height, fill=fill, edgecolor=edgecolor, facecolor=facecolor)\n    self.set_clip_on(False)\n    self.visible_edges = visible_edges\n    self._loc = loc\n    self._text = Text(x=xy[0], y=xy[1], clip_on=False, text=text, fontproperties=fontproperties, horizontalalignment=loc, verticalalignment='center')",
    "docstring": "Parameters ---------- xy : 2-tuple The position of the bottom left corner of the cell. width : float The cell width. height : float The cell height. edgecolor : :mpltype:, default: 'k' The color of the cell border. facecolor : :mpltype:, default: 'w' The cell facecolor. fill : bool, default: True Whether the cell background is filled. text : str, optional The cell text. loc : {'right', 'center', 'left'} The alignment of the text within the cell. fontproperties : dict, optional A dict defining the font properties of the text. Supported keys and values are the keyword arguments accepted by . visible_edges : {'closed', 'open', 'horizontal', 'vertical'} or substring of 'BRTL' The cell edges to be drawn with a line: a substring of 'BRTL' (bottom, right, top, left), or one of 'open' (no edges drawn), 'closed' (all edges drawn), 'horizontal' (bottom and top), 'vertical' (right and left).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\table.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:xy arg:width arg:height arguments arg arg arg arg arg arg arg arg arg arg arg Call Call Call Assign Assign Assign Call"
  },
  {
    "library": "django",
    "name": "_merge_known_related_objects",
    "source_code": "def _merge_known_related_objects(self, other):\n    for field, objects in other._known_related_objects.items():\n        self._known_related_objects.setdefault(field, {}).update(objects)",
    "docstring": "Keep track of all known related objects from either QuerySet instance.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:_merge_known_related_objects arg:self arg:other arguments arg arg For Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_update_feature_log_prob",
    "source_code": "def _update_feature_log_prob(self, alpha):\n    comp_count = self.feature_all_ + alpha - self.feature_count_\n    logged = np.log(comp_count / comp_count.sum(axis=1, keepdims=True))\n    if self.norm:\n        summed = logged.sum(axis=1, keepdims=True)\n        feature_log_prob = logged / summed\n    else:\n        feature_log_prob = -logged\n    self.feature_log_prob_ = feature_log_prob",
    "docstring": "Apply smoothing to raw counts and compute the weights.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\naive_bayes.py",
    "ast_data": "FunctionDef name:_update_feature_log_prob arg:self arg:alpha arguments arg arg Assign Assign Call Call If Assign Call Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "get",
    "source_code": "def get(self, opset: OpsetVersion) -> Optional[Callable]:\n    version = _dispatch_opset_version(opset, self._functions)\n    if version is None:\n        return None\n    return self._functions[version]",
    "docstring": "Find the most recent version of the function.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\registration.py",
    "ast_data": "FunctionDef name:get arg:self arg:opset arguments arg arg Assign Call If Compare Return return:no Return return:yes"
  },
  {
    "library": "kornia",
    "name": "_KeyNetConvBlock",
    "source_code": "def _KeyNetConvBlock(in_channels: int=8, out_channels: int=8, kernel_size: int=5, stride: int=1, padding: int=2, dilation: int=1) -> nn.Sequential:\n    return nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True))",
    "docstring": "Create KeyNet Conv Block. Default learnable convolutional block for KeyNet.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\keynet.py",
    "ast_data": "FunctionDef name:_KeyNetConvBlock arg:in_channels arg:out_channels arg:kernel_size arg:stride arg:padding arg:dilation arguments arg arg arg arg arg arg Return return:yes Call Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X, **params):\n    _raise_for_params(params, self, 'predict')\n    check_is_fitted(self)\n    X = validate_data(self, X, accept_sparse=['csr', 'csc'], dtype=None, ensure_all_finite=False, reset=False)\n    if _routing_enabled():\n        routed_params = process_routing(self, 'predict', **params)\n    else:\n        routed_params = Bunch()\n        routed_params.estimator = Bunch(predict=Bunch())\n    n_jobs, _, starts = _partition_estimators(self.n_estimators, self.n_jobs)\n    all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose)((delayed(_parallel_predict_regression)(self.estimators_[starts[i]:starts[i + 1]], self.estimators_features_[starts[i]:starts[i + 1]], X, params=routed_params.estimator.predict) for i in range(n_jobs)))\n    y_hat = sum(all_y_hat) / self.n_estimators\n    return y_hat",
    "docstring": "Predict regression target for X. The predicted regression target of an input sample is computed as the mean predicted regression targets of the estimators in the ensemble. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The training input samples. Sparse matrices are accepted only if they are supported by the base estimator. **params : dict Parameters routed to the method of the sub-estimators via the metadata routing API. .. versionadded:: 1.7 Only available if is set. See :ref: for more details. Returns ------- y : ndarray of shape (n_samples,) The predicted values.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_bagging.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg arg Call Call Assign Call If Call Assign Call Assign Call Assign Call Call Assign Call Assign Call Call Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "shear_y",
    "source_code": "def shear_y(probability: float, magnitude: int) -> OperationBase:\n    magnitudes = linspace(-0.3, 0.3, 11) * 180.0\n    return ShearY(None, probability, magnitude_range=(magnitudes[magnitude].item(), magnitudes[magnitude + 1].item()), symmetric_megnitude=False)",
    "docstring": "Return ShearY op.",
    "type": "function",
    "file_path": "kornia\\kornia\\augmentation\\auto\\autoaugment\\ops.py",
    "ast_data": "FunctionDef name:shear_y arg:probability arg:magnitude arguments arg arg Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "initialize",
    "source_code": "def initialize(self, n, approx_type):\n    raise NotImplementedError('The method ``initialize(n, approx_type)`` is not implemented.')",
    "docstring": "Initialize internal matrix. Allocate internal memory for storing and updating the Hessian or its inverse. Parameters ---------- n : int Problem dimension. approx_type : {'hess', 'inv_hess'} Selects either the Hessian or the inverse Hessian. When set to 'hess' the Hessian will be stored and updated. When set to 'inv_hess' its inverse will be used instead.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_hessian_update_strategy.py",
    "ast_data": "FunctionDef name:initialize arg:self arg:n arg:approx_type arguments arg arg arg Raise Call"
  },
  {
    "library": "numpy",
    "name": "reduce",
    "source_code": "def reduce(self, target, axis=0, dtype=None):\n    tclass = get_masked_subclass(target)\n    m = getmask(target)\n    t = filled(target, self.filly)\n    if t.shape == ():\n        t = t.reshape(1)\n        if m is not nomask:\n            m = make_mask(m, copy=True)\n            m.shape = (1,)\n    if m is nomask:\n        tr = self.f.reduce(t, axis)\n        mr = nomask\n    else:\n        tr = self.f.reduce(t, axis, dtype=dtype)\n        mr = umath.logical_and.reduce(m, axis)\n    if not tr.shape:\n        if mr:\n            return masked\n        else:\n            return tr\n    masked_tr = tr.view(tclass)\n    masked_tr._mask = mr\n    return masked_tr",
    "docstring": "Reduce along the given .",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:reduce arg:self arg:target arg:axis arg:dtype arguments arg arg arg arg Assign Call Assign Call Assign Call If Compare Assign Call If Compare Assign Call Assign If Compare Assign Call Assign Assign Call Assign Call If If Return return:yes Return return:yes Assign Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "in_bulk",
    "source_code": "def in_bulk(self, id_list=None, *, field_name='pk'):\n    if self.query.is_sliced:\n        raise TypeError(\"Cannot use 'limit' or 'offset' with in_bulk().\")\n    if not issubclass(self._iterable_class, ModelIterable):\n        raise TypeError('in_bulk() cannot be used with values() or values_list().')\n    opts = self.model._meta\n    unique_fields = [constraint.fields[0] for constraint in opts.total_unique_constraints if len(constraint.fields) == 1]\n    if field_name != 'pk' and (not opts.get_field(field_name).unique) and (field_name not in unique_fields) and (self.query.distinct_fields != (field_name,)):\n        raise ValueError(\"in_bulk()'s field_name must be a unique field but %r isn't.\" % field_name)\n    if id_list is not None:\n        if not id_list:\n            return {}\n        filter_key = '{}__in'.format(field_name)\n        batch_size = connections[self.db].features.max_query_params\n        id_list = tuple(id_list)\n        if batch_size and batch_size < len(id_list):\n            qs = ()\n            for offset in range(0, len(id_list), batch_size):\n                batch = id_list[offset:offset + batch_size]\n                qs += tuple(self.filter(**{filter_key: batch}))\n        else:\n            qs = self.filter(**{filter_key: id_list})\n    else:\n        qs = self._chain()\n    return {getattr(obj, field_name): obj for obj in qs}",
    "docstring": "Return a dictionary mapping each of the given IDs to the object with that ID. If isn't provided, evaluate the entire QuerySet.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:in_bulk arg:self arg:id_list arguments arg arg arg If Raise Call If Call Raise Call Assign Assign Compare Call If BoolOp Compare Call Compare Compare Raise Call If Compare If Return return:no Assign Call Assign Assign Call If BoolOp Compare Call Assign For Call Call Assign Call Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "argsort",
    "source_code": "def argsort(self, axis=-1, kind=None, order=None):\n    return self.__array__().argsort(axis, kind, order)",
    "docstring": "Return the indices that sort the array lexicographically. For full documentation see , for which this method is in fact merely a \"thin wrapper.\" Examples -------- >>> c = np.array(['a1b c', '1b ca', 'b ca1', 'Ca1b'], 'S5') >>> c = c.view(np.char.chararray); c chararray(['a1b c', '1b ca', 'b ca1', 'Ca1b'], dtype='|S5') >>> c[c.argsort()] chararray(['1b ca', 'Ca1b', 'a1b c', 'b ca1'], dtype='|S5')",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:argsort arg:self arg:axis arg:kind arg:order arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_get_i8_values_and_mask",
    "source_code": "@final\ndef _get_i8_values_and_mask(self, other) -> tuple[int | npt.NDArray[np.int64], None | npt.NDArray[np.bool_]]:\n    if isinstance(other, Period):\n        i8values = other.ordinal\n        mask = None\n    elif isinstance(other, (Timestamp, Timedelta)):\n        i8values = other._value\n        mask = None\n    else:\n        mask = other._isnan\n        i8values = other.asi8\n    return (i8values, mask)",
    "docstring": "Get the int64 values and b_mask to pass to add_overflowsafe.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py",
    "ast_data": "FunctionDef name:_get_i8_values_and_mask arg:self arg:other arguments arg arg If Call Assign Assign If Call Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "_key_to_file",
    "source_code": "def _key_to_file(self, key, version=None):\n    key = self.make_and_validate_key(key, version=version)\n    return os.path.join(self._dir, ''.join([md5(key.encode(), usedforsecurity=False).hexdigest(), self.cache_suffix]))",
    "docstring": "Convert a key into a cache file path. Basically this is the root cache path joined with the md5sum of the key and a suffix.",
    "type": "method",
    "file_path": "django\\django\\core\\cache\\backends\\filebased.py",
    "ast_data": "FunctionDef name:_key_to_file arg:self arg:key arg:version arguments arg arg arg Assign Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "load_source",
    "source_code": "def load_source(source, delete_on_exit):\n    with tempfile.NamedTemporaryFile(mode='w', suffix='.py', prefix='__autograph_generated_file', delete=False, encoding='utf-8') as f:\n        module_name = os.path.basename(f.name[:-3])\n        file_name = f.name\n        f.write(source)\n    if delete_on_exit:\n        atexit.register(lambda: _remove_file(file_name))\n    spec = importlib.util.spec_from_file_location(module_name, file_name)\n    module = importlib.util.module_from_spec(spec)\n    spec.loader.exec_module(module)\n    sys.modules[module_name] = module\n    return (module, file_name)",
    "docstring": "Loads the given source code as a Python module.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\loader.py",
    "ast_data": "FunctionDef name:load_source arg:source arg:delete_on_exit arguments arg arg With Call Assign Call Assign Call If Call arguments Call Assign Call Assign Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "set_root_name",
    "source_code": "def set_root_name(self, root_name):\n    self._root_name = root_name",
    "docstring": "Override the default root name of 'tf'.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\tools\\common\\public_api.py",
    "ast_data": "FunctionDef name:set_root_name arg:self arg:root_name arguments arg arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "mean_squared_error",
    "source_code": "@dispatch.add_dispatch_support\ndef mean_squared_error(y_true, y_pred):\n    y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)\n    y_true = math_ops.cast(y_true, y_pred.dtype)\n    return backend.mean(math_ops.squared_difference(y_pred, y_true), axis=-1)",
    "docstring": "Computes the mean squared error between labels and predictions. After computing the squared distance between the inputs, the mean value over the last dimension is returned. Standalone usage: >>> y_true = np.random.randint(0, 2, size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.mean_squared_error(y_true, y_pred) >>> assert loss.shape == (2,) >>> assert np.array_equal( ... loss.numpy(), np.mean(np.square(y_true - y_pred), axis=-1)) Args: y_true: Ground truth values. shape = . y_pred: The predicted values. shape = . Returns: Mean squared error values. shape = .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py",
    "ast_data": "FunctionDef name:mean_squared_error arg:y_true arg:y_pred arguments arg arg Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "done",
    "source_code": "def done(self):\n    return self._cluster.done()",
    "docstring": "Returns whether all the scheduled functions have finished execution. If any previously scheduled function raises an error, will fail by raising any one of those errors. When returns True or raises, it guarantees that there is no function that is still being executed. Returns: Whether all the scheduled functions have finished execution. Raises: Exception: one of the exceptions caught by the coordinator by any previously scheduled function since the last time an error was thrown or since the beginning of the program.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py",
    "ast_data": "FunctionDef name:done arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "copy",
    "source_code": "def copy(self, alias=None):\n    settings_dict = copy.deepcopy(self.settings_dict)\n    if alias is None:\n        alias = self.alias\n    return type(self)(settings_dict, alias)",
    "docstring": "Return a copy of this connection. For tests that require two connections to the same database.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\base.py",
    "ast_data": "FunctionDef name:copy arg:self arg:alias arguments arg arg Assign Call If Compare Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, table: TableConfig, max_sequence_length: int=0, validate_weights_and_indices: bool=True, output_shape: Optional[Union[List[int], TensorShape]]=None, name: Optional[Text]=None):\n    if not isinstance(table, TableConfig):\n        raise ValueError(f'Argument `table` has invalid type {type(table)}. Expected `tf.tpu.experimental.embedding.TableConfig`.')\n    if not isinstance(max_sequence_length, int) or max_sequence_length < 0:\n        raise ValueError(f'Argument `max_sequence_length` must be an int and must be >= 0. Received: {max_sequence_length}')\n    self.table = table\n    self.max_sequence_length = max_sequence_length\n    self.name = name\n    self.output_shape = TensorShape(output_shape)\n    if not isinstance(validate_weights_and_indices, bool):\n        raise ValueError(f'Argument `validate_weights_and_indices` must be a boolean. Received: {validate_weights_and_indices}')\n    self.validate_weights_and_indices = validate_weights_and_indices",
    "docstring": "Feature configuration. Args: table: An instance of , describing the table in which this feature should be looked up. max_sequence_length: If positive, the feature is a sequence feature with the corresponding maximum sequence length. If the sequence is longer than this, it will be truncated. If 0, the feature is not a sequence feature. validate_weights_and_indices: If true, uses safe_embedding_lookup during serving which ensures there are no empty rows and all weights and ids are positive at the expense of extra compute cost. output_shape: Optional argument to config the output shape of the feature activation. If provided, the feature feeding to the has to match the shape (for ragged tensor, the input shape and output shape can mismatch). If not provided, the shape can be either provided to the or auto detected at the runtime. name: An optional string used to name the table. Must be defined if running on SparseCore. Returns: . Raises: ValueError: if is not an instance of . ValueError: if not an integer or is negative.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2_utils.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:table arg:max_sequence_length arg:validate_weights_and_indices arg:output_shape arg:name arguments arg arg arg arg arg arg If Call Raise Call Call If BoolOp Call Compare Raise Call Assign Assign Assign Assign Call If Call Raise Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "trace_wrapper",
    "source_code": "def trace_wrapper(trace_name, **trace_kwargs):\n    if callable(trace_name):\n        func = trace_name\n        name = getattr(func, '__qualname__', None)\n        if not name:\n            name = getattr(func, '__name__', 'unknown function')\n        return trace_wrapper(name)(func)\n\n    def inner_wrapper(func):\n\n        @functools.wraps(func)\n        def wrapped(*args, **kwargs):\n            if enabled:\n                with Trace(trace_name, **trace_kwargs):\n                    return func(*args, **kwargs)\n            return func(*args, **kwargs)\n        return wrapped\n    return inner_wrapper",
    "docstring": "Decorator alternative to . It's faster. Args: trace_name: The name of the trace event, or a callable to be traced, in which case the name is inferred from qualname or name of the callable. **trace_kwargs: Keyword arguments added to the trace event. Both the key and value are of types that can be converted to strings, which will be interpreted by the profiler according to the traceme name. Returns: A decorator that can wrap a function and apply scope if needed, or a decorated function if used as a decorator directly. Example usage: or",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\trace.py",
    "ast_data": "FunctionDef name:trace_wrapper arg:trace_name arguments arg arg If Call Assign Assign Call If Assign Call Return return:yes Call Call FunctionDef name:inner_wrapper arg:func arguments arg FunctionDef name:wrapped arguments arg arg If With Call Return return:yes Call Return return:yes Call Call Return return:yes Return return:yes"
  },
  {
    "library": "scipy",
    "name": "to_discrete",
    "source_code": "def to_discrete(self, dt, method='zoh', alpha=None):\n    return ZerosPolesGain(*cont2discrete((self.zeros, self.poles, self.gain), dt, method=method, alpha=alpha)[:-1], dt=dt)",
    "docstring": "Returns the discretized system. Parameters: See for details. Returns ------- sys: instance of and",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:to_discrete arg:self arg:dt arg:method arg:alpha arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "all_to_all_single",
    "source_code": "def all_to_all_single(self: torch.Tensor, output_split_sizes: Optional[list[int]], input_split_sizes: Optional[list[int]], group: RANK_TYPES, tag: str='') -> torch.Tensor:\n    if output_split_sizes is not None:\n        assert all((isinstance(size, (int, torch.SymInt)) for size in output_split_sizes)), output_split_sizes\n    if input_split_sizes is not None:\n        assert all((isinstance(size, (int, torch.SymInt)) for size in input_split_sizes)), input_split_sizes\n    group_name = _resolve_group_name(group, tag)\n    group_size = c10d._get_group_size_by_name(group_name)\n    if output_split_sizes is None or input_split_sizes is None:\n        assert output_split_sizes is None and input_split_sizes is None, 'output_split_sizes and input_split_sizes must either be specified together or both set to None'\n        output_split_sizes = [self.shape[0] // group_size] * group_size\n        input_split_sizes = output_split_sizes\n    tensor = torch.ops._c10d_functional.all_to_all_single(self, output_split_sizes, input_split_sizes, group_name)\n    return _maybe_wrap_tensor(tensor)",
    "docstring": "Each process splits input tensor and then scatters the split list to all processes in a group. Then concatenate the received tensors from all the processes in the group and return single output tensor. Group can be one of: List[int]: ranks participating in the collective. List[List[int]]: 2D mesh of ranks taking part of this collective in MPMD. ProcessGroup: Will perform a collective using the ranks and tag of the PG. DeviceMesh: Do a SPMD collective over all ranks of the mesh (DeviceMesh, int): Do a MPMD collective over one dimension of the DeviceMesh :: N.B. If you pass a PG or a 1D list to perform a MPMD collective, the compiler won't be able to recover that information and perform collective algebraic optimization. Use other forms of input for that.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_functional_collectives.py",
    "ast_data": "FunctionDef name:all_to_all_single arg:self arg:output_split_sizes arg:input_split_sizes arg:group arg:tag arguments arg arg arg arg arg If Compare Call Call If Compare Call Call Assign Call Assign Call If BoolOp Compare Compare BoolOp Compare Compare Assign Assign Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "view_limits",
    "source_code": "def view_limits(self, dmin, dmax):\n    if mpl.rcParams['axes.autolimit_mode'] == 'round_numbers':\n        vmin = self._edge.le(dmin - self._offset) * self._edge.step + self._offset\n        vmax = self._edge.ge(dmax - self._offset) * self._edge.step + self._offset\n        if vmin == vmax:\n            vmin -= 1\n            vmax += 1\n    else:\n        vmin = dmin\n        vmax = dmax\n    return mtransforms.nonsingular(vmin, vmax)",
    "docstring": "Set the view limits to the nearest tick values that contain the data.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:view_limits arg:self arg:dmin arg:dmax arguments arg arg arg If Compare Assign Call Assign Call If Compare Assign Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "dropout",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef dropout(x, level, noise_shape=None, seed=None):\n    if seed is None:\n        seed = np.random.randint(10000000.0)\n    return nn.dropout_v2(x, rate=level, noise_shape=noise_shape, seed=seed)",
    "docstring": "Sets entries in to zero at random, while scaling the entire tensor. Args: x: tensor level: fraction of the entries in the tensor that will be set to 0. noise_shape: shape for randomly generated keep/drop flags, must be broadcastable to the shape of seed: random seed to ensure determinism. Returns: A tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:dropout arg:x arg:level arg:noise_shape arg:seed arguments arg arg arg arg If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "convex_hull",
    "source_code": "@property\ndef convex_hull(self):\n    return self._geomgen(capi.geom_convex_hull)",
    "docstring": "Return the smallest convex Polygon that contains all the points in this Geometry.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:convex_hull arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, *args, **kwargs):\n    super().__init__()\n    self._storage = self._make_storage(*args, **kwargs)\n    for index, element in enumerate(self._storage):\n        self._storage[index] = self._track_value(element, name=self._name_element(index))",
    "docstring": "Construct a new sequence. Arguments are passed to .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\data_structures.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg arg arg Call Call Assign Call For Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "Extension",
    "source_code": "class Extension(abc.ABC):\n\n    @staticmethod\n    @abc.abstractmethod\n    def registry_name() -> str:\n        pass\n\n    @staticmethod\n    @abc.abstractmethod\n    def from_descriptor(version: str) -> 'Extension':\n        pass\n\n    @abc.abstractmethod\n    def get_descriptor(self) -> str:\n        pass",
    "docstring": "Extensions provide modular additions to functionality within distributed checkpointing, which affect the layout or format of the written artifacts. Extensions may be built into pytorch, or provided externally. When writing, the caller provides a list of extension instances of the appropriate type. Each extension can output a descriptor which is used to reconstitute the extension at read-time.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\_extension.py",
    "ast_data": "ClassDef name:Extension FunctionDef name:registry_name arguments FunctionDef name:from_descriptor arg:version arguments arg FunctionDef name:get_descriptor arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "apply_activation_checkpointing",
    "source_code": "def apply_activation_checkpointing(model, checkpoint_wrapper_fn=checkpoint_wrapper, check_fn=lambda _: True, auto_wrap_policy: Optional[Callable[[nn.Module, bool, int], bool]]=None):\n    from torch.distributed.fsdp._wrap_utils import _construct_wrap_fn, _post_order_apply\n    from torch.distributed.fsdp.wrap import _Policy, _recursive_wrap, lambda_auto_wrap_policy\n    policy = auto_wrap_policy if auto_wrap_policy is not None else partial(lambda_auto_wrap_policy, lambda_fn=check_fn)\n    if not callable(policy):\n        if not isinstance(policy, _Policy):\n            raise ValueError(f'Expected {policy} to be callable or be a pre-defined wrap policy')\n        target_module_to_kwargs = policy._run_policy(model, ignored_modules=set(), root_kwargs={})\n        wrap_fn = _construct_wrap_fn(model, target_module_to_kwargs, checkpoint_wrapper_fn)\n        _post_order_apply(model, wrap_fn)\n        return\n    _recursive_wrap(module=model, auto_wrap_policy=policy, wrapper_cls=checkpoint_wrapper_fn, ignored_modules=set(), ignored_params=set(), only_wrap_children=True)",
    "docstring": "Apply :func: to modules within based on a user-defined configuration. For each module within , the is used to decide whether should be wrapped with :func: or not. Note:: This function modifies in place and replaces appropriate layers with their checkpoint-wrapped modules. Note:: This function will not wrap the overall root module. If this is needed, please directly use :func: or :func:. Usage:: model = nn.Sequential( nn.Linear(10, 10), nn.Linear(10, 10), nn.Linear(10, 10) ) check_fn = lambda l: isinstance(l, nn.Linear) # checkpoint activations apply_activation_checkpointing(model, checkpoint_wrapper_fn=checkpoint_wrapper, check_fn=check_fn) # Or offload activations to CPU apply_activation_checkpointing(model, checkpoint_wrapper_fn=offload_wrapper, check_fn=check_fn) Args: model (nn.Module): The model whose submodules should be wrapped with activation checkpointing. checkpoint_wrapper_fn (Optional[Callable[nn.Module]]) A `model` is modified inplace)",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\_checkpoint\\checkpoint_wrapper.py",
    "ast_data": "FunctionDef name:apply_activation_checkpointing arg:model arg:checkpoint_wrapper_fn arg:check_fn arg:auto_wrap_policy arguments arg arg arg arg arguments arg Assign Compare Call If Call If Call Raise Call Assign Call Call Assign Call Call Return return:no Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "draw_text",
    "source_code": "def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):\n    self._draw_text_as_path(gc, x, y, s, prop, angle, ismath)",
    "docstring": "Draw a text instance. Parameters ---------- gc : The graphics context. x : float The x location of the text in display coords. y : float The y location of the text baseline in display coords. s : str The text string. prop : The font properties. angle : float The rotation angle in degrees anti-clockwise. ismath : bool or \"TeX\" If True, use mathtext parser. mtext : The original text object to be rendered. Notes ----- **Notes for backend implementers:** also supports passing \"TeX\" to the *ismath* parameter to use TeX rendering, but this is not required for actual rendering backends, and indeed many builtin backends do not support this. Rather, TeX rendering is provided by .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:draw_text arg:self arg:gc arg:x arg:y arg:s arg:prop arg:angle arg:ismath arg:mtext arguments arg arg arg arg arg arg arg arg arg Call"
  },
  {
    "library": "sphinx",
    "name": "install_packages_for_ja",
    "source_code": "def install_packages_for_ja(app: Sphinx) -> None:\n    if app.config.language == 'ja' and app.config.latex_engine in {'platex', 'uplatex'}:\n        app.add_latex_package('pxjahyper', after_hyperref=True)",
    "docstring": "Install packages for Japanese.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\builders\\latex\\__init__.py",
    "ast_data": "FunctionDef name:install_packages_for_ja arg:app arguments arg If BoolOp Compare Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "shard_dimensions",
    "source_code": "@property\ndef shard_dimensions(self):\n    return [policy.shard_dimension for policy in self._sharding_policies]",
    "docstring": "Gets the shard dimension of each tuple element. Returns: A list of length number_of_tuple_elements, where each list entry is the shard dimension of that tuple element or None if the shard dimension has not been set.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_feed.py",
    "ast_data": "FunctionDef name:shard_dimensions arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_assert_same_graph",
    "source_code": "def _assert_same_graph(original_item, item):\n    original_graph = getattr(original_item, 'graph', None)\n    graph = getattr(item, 'graph', None)\n    if original_graph and graph and (original_graph is not graph):\n        raise ValueError('%s must be from the same graph as %s (graphs are %s and %s).' % (item, original_item, graph, original_graph))",
    "docstring": "Fail if the 2 items are from different graphs. Args: original_item: Original item to check against. item: Item to check. Raises: ValueError: if graphs do not match.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:_assert_same_graph arg:original_item arg:item arguments arg arg Assign Call Assign Call If BoolOp Compare Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_function_scope_options",
    "source_code": "def _function_scope_options(self, fn_scope):\n    if fn_scope.level == 2:\n        return self.ctx.user.options\n    return self.ctx.user.options.call_options()",
    "docstring": "Returns the options with which to create function scopes.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\converters\\functions.py",
    "ast_data": "FunctionDef name:_function_scope_options arg:self arg:fn_scope arguments arg arg If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "django",
    "name": "write",
    "source_code": "def write(self, geom):\n    geom = self._handle_empty_point(geom)\n    wkb = wkb_writer_write(self.ptr, geom.ptr, byref(c_size_t()))\n    return memoryview(wkb)",
    "docstring": "Return the WKB representation of the given geometry.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\prototypes\\io.py",
    "ast_data": "FunctionDef name:write arg:self arg:geom arguments arg arg Assign Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "pygame",
    "name": "draw_aalines",
    "source_code": "def draw_aalines(surf, color, closed, points, blend=True):\n    return _multi_lines(surf, color, closed, points, blend=blend, aaline=True)",
    "docstring": "draw several anti-aliased lines connected through the points.",
    "type": "function",
    "file_path": "pygame\\src_py\\draw_py.py",
    "ast_data": "FunctionDef name:draw_aalines arg:surf arg:color arg:closed arg:points arg:blend arguments arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "call",
    "source_code": "def call(self, inputs, state):\n    cur_state_pos = 0\n    cur_inp = inputs\n    new_states = []\n    for i, cell in enumerate(self._cells):\n        with vs.variable_scope('cell_%d' % i):\n            if self._state_is_tuple:\n                if not nest.is_nested(state):\n                    raise ValueError('Expected state to be a tuple of length %d, but received: %s' % (len(self.state_size), state))\n                cur_state = state[i]\n            else:\n                cur_state = array_ops.slice(state, [0, cur_state_pos], [-1, cell.state_size])\n                cur_state_pos += cell.state_size\n            cur_inp, new_state = cell(cur_inp, cur_state)\n            new_states.append(new_state)\n    new_states = tuple(new_states) if self._state_is_tuple else array_ops.concat(new_states, 1)\n    return (cur_inp, new_states)",
    "docstring": "Run this multi-layer cell on inputs, starting from state.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\legacy_rnn\\rnn_cell_impl.py",
    "ast_data": "FunctionDef name:call arg:self arg:inputs arg:state arguments arg arg arg Assign Assign Assign For Call With Call If If Call Raise Call Call Assign Assign Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "roots_sh_chebyt",
    "source_code": "def roots_sh_chebyt(n, mu=False):\n    xw = roots_chebyt(n, mu)\n    return ((xw[0] + 1) / 2,) + xw[1:]",
    "docstring": "Gauss-Chebyshev (first kind, shifted) quadrature. Compute the sample points and weights for Gauss-Chebyshev quadrature. The sample points are the roots of the nth degree shifted Chebyshev polynomial of the first kind, :math:. These sample points and weights correctly integrate polynomials of degree :math: or less over the interval :math: with weight function :math:. See 22.2.8 in [AS]_ for more details. Parameters ---------- n : int quadrature order mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights See Also -------- scipy.integrate.fixed_quad References ---------- .. [AS] Milton Abramowitz and Irene A. Stegun, eds. Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables. New York: Dover, 1972.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_orthogonal.py",
    "ast_data": "FunctionDef name:roots_sh_chebyt arg:n arg:mu arguments arg arg Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_get_empty_indexer",
    "source_code": "def _get_empty_indexer() -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:\n    return (np.array([], dtype=np.intp), np.array([], dtype=np.intp))",
    "docstring": "Return empty join indexers.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\reshape\\merge.py",
    "ast_data": "FunctionDef name:_get_empty_indexer arguments Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "smoke_docs",
    "source_code": "@spin.util.extend_command(test, doc='')\ndef smoke_docs(*, parent_callback, pytest_args, **kwargs):\n    if not importlib.util.find_spec('scipy_doctest'):\n        raise ModuleNotFoundError('Please install scipy-doctest')\n    tests = kwargs['tests']\n    if kwargs['submodule']:\n        tests = PROJECT_MODULE + '.' + kwargs['submodule']\n    if not pytest_args and (not tests):\n        pytest_args = ('scipy',)\n    doctest_args = ('--doctest-modules', '--doctest-collect=api')\n    if not tests:\n        doctest_args += ('--doctest-collect=api',)\n    pytest_args = pytest_args + doctest_args\n    parent_callback(**{'pytest_args': pytest_args, **kwargs})",
    "docstring": "🔧 Run doctests of objects in the public API. PYTEST_ARGS are passed through directly to pytest, e.g.: spin smoke-docs -- --pdb To run tests on a directory: \b spin smoke-docs scipy/linalg To report the durations of the N slowest doctests: spin smoke-docs -- --durations=N To run doctests that match a given pattern: \b spin smoke-docs -- -k \"slogdet\" spin smoke-docs scipy/linalg -- -k \"det and not slogdet\" \b Note: ----- \b - This command only runs doctests and skips everything under tests/ - This command only doctests public objects: those which are accessible from the top-level file.",
    "type": "function",
    "file_path": "scipy\\.spin\\cmds.py",
    "ast_data": "FunctionDef name:smoke_docs arguments arg arg arg If Call Raise Call Assign If Assign If BoolOp Assign Assign If Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "ambiguities",
    "source_code": "def ambiguities(signatures):\n    signatures = list(map(tuple, signatures))\n    return {(a, b) for a in signatures for b in signatures if hash(a) < hash(b) and ambiguous(a, b) and (not any((supercedes(c, a) and supercedes(c, b) for c in signatures)))}",
    "docstring": "All signature pairs such that A is ambiguous with B",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\unification\\multipledispatch\\conflict.py",
    "ast_data": "FunctionDef name:ambiguities arg:signatures arguments arg Assign Call Call Return return:yes BoolOp Compare Call Call Call Call BoolOp Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_approximate_basis",
    "source_code": "def get_approximate_basis(A: Tensor, q: int, niter: Optional[int]=2, M: Optional[Tensor]=None) -> Tensor:\n    niter = 2 if niter is None else niter\n    dtype = _utils.get_floating_dtype(A) if not A.is_complex() else A.dtype\n    matmul = _utils.matmul\n    R = torch.randn(A.shape[-1], q, dtype=dtype, device=A.device)\n    X = matmul(A, R)\n    if M is not None:\n        X = X - matmul(M, R)\n    Q = torch.linalg.qr(X).Q\n    for _ in range(niter):\n        X = matmul(A.mH, Q)\n        if M is not None:\n            X = X - matmul(M.mH, Q)\n        Q = torch.linalg.qr(X).Q\n        X = matmul(A, Q)\n        if M is not None:\n            X = X - matmul(M, Q)\n        Q = torch.linalg.qr(X).Q\n    return Q",
    "docstring": "Return tensor :math: with :math: orthonormal columns such that :math: approximates :math:. If :math: is specified, then :math: is such that :math: approximates :math:. without instantiating any tensors of the size of :math: or :math:. .. note:: The implementation is based on the Algorithm 4.4 from Halko et al., 2009. .. note:: For an adequate approximation of a k-rank matrix :math:, where k is not known in advance but could be estimated, the number of :math: columns, q, can be choosen according to the following criteria: in general, :math:_).",
    "type": "function",
    "file_path": "pytorch\\torch\\_lowrank.py",
    "ast_data": "FunctionDef name:get_approximate_basis arg:A arg:q arg:niter arg:M arguments arg arg arg arg Assign Compare Assign Call Call Assign Assign Call Assign Call If Compare Assign Call Assign Call For Call Assign Call If Compare Assign Call Assign Call Assign Call If Compare Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_input_idx_for_binary_node",
    "source_code": "def _get_input_idx_for_binary_node(self, conv_gemm_node: torch.fx.Node, binary_node: torch.fx.Node):\n    conv_gemm_node_idx = None\n    extra_input_node_idx = None\n    if binary_node.args[0].op == 'call_function' and binary_node.args[0] == conv_gemm_node:\n        conv_gemm_node_idx = 0\n        extra_input_node_idx = 1\n    elif binary_node.args[1].op == 'call_function' and binary_node.args[1] == conv_gemm_node:\n        conv_gemm_node_idx = 1\n        extra_input_node_idx = 0\n    extra_input_node = binary_node.args[extra_input_node_idx]\n    assert isinstance(extra_input_node, Node)\n    return (conv_gemm_node_idx, extra_input_node_idx)",
    "docstring": "Helper function to check conv_gemm and extra input node index for binary node fused with conv_gemm.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\x86_inductor_quantizer.py",
    "ast_data": "FunctionDef name:_get_input_idx_for_binary_node arg:self arg:conv_gemm_node arg:binary_node arguments arg arg arg Assign Assign If BoolOp Compare Compare Assign Assign If BoolOp Compare Compare Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "theta",
    "source_code": "@theta.setter\ndef theta(self, theta):\n    k1_dims = self.k1.n_dims\n    self.k1.theta = theta[:k1_dims]\n    self.k2.theta = theta[k1_dims:]",
    "docstring": "Sets the (flattened, log-transformed) non-fixed hyperparameters. Parameters ---------- theta : ndarray of shape (n_dims,) The non-fixed, log-transformed hyperparameters of the kernel",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:theta arg:self arg:theta arguments arg arg Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_is_op_stateful",
    "source_code": "def _is_op_stateful(op):\n    if op.type == 'GlobalIterId':\n        return False\n    if op.type == 'UpdateFdoWithGlobalMinibatchStatistics':\n        return False\n    if op.type == 'CollectiveGatherV2' and op.get_attr('is_stateless'):\n        return False\n    if op.type == 'CollectiveAllToAllV2' and op.get_attr('is_stateless'):\n        return False\n    return op._is_stateful",
    "docstring": "Check whether an op is stateful. This helper function handles two special cases to make the stateful analysis consistent with the mlir side effect analysis. 1. GlobalIterIdOp should be stateless. 2. CollectiveGatherV2 with attribute is_stateless to be True should be stateless. Args: op: Operation Returns: Boolean indicates whether the operation is stateless or not.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\cond_v2.py",
    "ast_data": "FunctionDef name:_is_op_stateful arg:op arguments arg If Compare Return return:yes If Compare Return return:yes If BoolOp Compare Call Return return:yes If BoolOp Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, paths, sizes=None, **kwargs):\n    super().__init__(**kwargs)\n    self.set_paths(paths)\n    self.set_sizes(sizes)\n    self.stale = True",
    "docstring": "Parameters ---------- paths : list of The paths that will make up the . sizes : array-like The factor by which to scale each drawn . One unit squared in the Path's data space is scaled to be `.Collection`.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:paths arg:sizes arguments arg arg arg arg Call Call Call Call Assign"
  },
  {
    "library": "scikit-learn",
    "name": "score",
    "source_code": "def score(self, X, y, sample_weight=None):\n    from .metrics import r2_score\n    y_pred = self.predict(X)\n    return r2_score(y, y_pred, sample_weight=sample_weight)",
    "docstring": "Return :ref: on test data. The coefficient of determination, :math:, is defined as :math:, where :math: is the residual sum of squares `vyR^2XR^2yR^2~sklearn.metrics.r2_score~sklearn.multioutput.MultiOutputRegressor`).",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\base.py",
    "ast_data": "FunctionDef name:score arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_contrib_layers_l1_regularizer_transformer",
    "source_code": "def _contrib_layers_l1_regularizer_transformer(parent, node, full_name, name, logs):\n    scope_keyword = None\n    for keyword in node.keywords:\n        if keyword.arg == 'scale':\n            logs.append((ast_edits.INFO, node.lineno, node.col_offset, 'Renaming scale arg of regularizer\\n'))\n            keyword.arg = 'l'\n        if keyword.arg == 'scope':\n            scope_keyword = keyword\n    if scope_keyword:\n        logs.append((ast_edits.INFO, node.lineno, node.col_offset, 'Dropping scope arg from tf.contrib.layers.l1_regularizer, because it is unsupported in tf.keras.regularizers.l1\\n'))\n        node.keywords.remove(scope_keyword)\n    if len(node.args) > 1:\n        node.args = node.args[:1]\n        logs.append((ast_edits.INFO, node.lineno, node.col_offset, 'Dropping scope arg from tf.contrib.layers.l1_regularizer, because it is unsupported in tf.keras.regularizers.l1\\n'))\n    lineno = node.func.value.lineno\n    col_offset = node.func.value.col_offset\n    node.func.value = ast_edits.full_name_node('tf.keras.regularizers')\n    node.func.value.lineno = lineno\n    node.func.value.col_offset = col_offset\n    node.func.attr = 'l1'\n    return node",
    "docstring": "Replace slim l1 regularizer with Keras one. This entails renaming the 'scale' arg to 'l' and dropping any provided scope arg.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\tf_upgrade_v2.py",
    "ast_data": "FunctionDef name:_contrib_layers_l1_regularizer_transformer arg:parent arg:node arg:full_name arg:name arg:logs arguments arg arg arg arg arg Assign For If Compare Call Assign If Compare Assign If Call Call If Compare Call Assign Call Assign Assign Assign Call Assign Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "bn2d_inference_rule",
    "source_code": "@register_inference_rule(BatchNorm2d)\ndef bn2d_inference_rule(n: Node, module_instance):\n    assert isinstance(n.args[0], Node)\n    n.args[0].type = expand_to_tensor_dim(n.args[0].type, 4)\n    arg_type = n.args[0].type\n    n.type = expand_to_tensor_dim(n.type, 4)\n    if is_consistent(arg_type.__args__[1], module_instance.num_features) and is_consistent(n.type.__args__[1], module_instance.num_features) and is_consistent(arg_type, n.type):\n        n.type = get_greatest_upper_bound(arg_type, n.type)\n        return n.type\n    else:\n        raise TypeError(f'Cannot apply {module_instance} with input type {arg_type} and existing type {n.type} on {n}')",
    "docstring": "Given a BatchNorm2D instance and a node check the following conditions: - the input type can be expanded to a size 4 tensor: t = (x_1, x_2, x_3, x_4) - the current node type can be expanded to a size 4 tensor: t' = (x_1', x_2', x_3', x_4') - t is consistent with t' - x_2 is consistent with the module's num_features - x_2' is consistent with the module's num_features output type: the more precise type of t and t'",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\graph_gradual_typechecker.py",
    "ast_data": "FunctionDef name:bn2d_inference_rule arg:n arg:module_instance arguments arg arg Call Assign Call Assign Assign Call If BoolOp Call Call Call Assign Call Return return:yes Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_tf_core_yield_flat_up_to",
    "source_code": "def _tf_core_yield_flat_up_to(shallow_tree, input_tree, is_nested_fn, path=()):\n    if not is_nested_fn(shallow_tree):\n        yield (path, input_tree)\n    else:\n        input_tree = dict(_tf_core_yield_sorted_items(input_tree))\n        for shallow_key, shallow_subtree in _tf_core_yield_sorted_items(shallow_tree):\n            subpath = path + (shallow_key,)\n            input_subtree = input_tree[shallow_key]\n            for leaf_path, leaf_value in _tf_core_yield_flat_up_to(shallow_subtree, input_subtree, is_nested_fn, path=subpath):\n                yield (leaf_path, leaf_value)",
    "docstring": "Yields (path, value) pairs of input_tree flattened up to shallow_tree. Args: shallow_tree: Nested structure. Traverse no further than its leaf nodes. input_tree: Nested structure. Return the paths and values from this tree. Must have the same upper structure as shallow_tree. is_nested_fn: Function used to test if a value should be treated as a nested structure. path: Tuple. Optional argument, only used when recursing. The path from the root of the original shallow_tree, down to the root of the shallow_tree arg of this recursive call. Yields: Pairs of (path, value), where path the tuple path of a leaf node in shallow_tree, and value is the value of the corresponding node in input_tree.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\nest_util.py",
    "ast_data": "FunctionDef name:_tf_core_yield_flat_up_to arg:shallow_tree arg:input_tree arg:is_nested_fn arg:path arguments arg arg arg arg If Call Assign Call Call For Call Assign Assign For Call"
  },
  {
    "library": "matplotlib",
    "name": "setp",
    "source_code": "def setp(obj, *args, file=None, **kwargs):\n    if isinstance(obj, Artist):\n        objs = [obj]\n    else:\n        objs = list(cbook.flatten(obj))\n    if not objs:\n        return\n    insp = ArtistInspector(objs[0])\n    if not kwargs and len(args) < 2:\n        if args:\n            print(insp.pprint_setters(prop=args[0]), file=file)\n        else:\n            print('\\n'.join(insp.pprint_setters()), file=file)\n        return\n    if len(args) % 2:\n        raise ValueError('The set args must be string, value pairs')\n    funcvals = dict(zip(args[::2], args[1::2]))\n    ret = [o.update(funcvals) for o in objs] + [o.set(**kwargs) for o in objs]\n    return list(cbook.flatten(ret))",
    "docstring": "Set one or more properties on an , or list allowed values. Parameters ---------- obj : or list of The artist(s) whose properties are being set or queried. When setting properties, all artists are affected; when querying the allowed values, only the first instance in the sequence is queried. For example, two lines can be made thicker and red with a single call: >>> x = arange(0, 1, 0.01) >>> lines = plot(x, sin(2*pi*x), x, sin(4*pi*x)) >>> setp(lines, linewidth=2, color='r') file : file-like, default: Where writes its output when asked to list allowed values. >>> with open('output.log') as file: ... setp(line, file=file) The default, `sys.stdoutsetp` also supports MATLAB style string/value pairs. For example, the following are equivalent: >>> setp(lines, 'linewidth', 2, 'color', 'r') # MATLAB style >>> setp(lines, linewidth=2, color='r') # Python style See Also -------- getp",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:setp arg:obj arguments arg arg arg arg If Call Assign Assign Call Call If Return return:no Assign Call If BoolOp Compare Call If Call Call Call Call Call Return return:no If Call Raise Call Assign Call Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_rng_state",
    "source_code": "def get_rng_state(device: Union[int, str, torch.device]='xpu') -> Tensor:\n    _lazy_init()\n    if isinstance(device, str):\n        device = torch.device(device)\n    elif isinstance(device, int):\n        device = torch.device('xpu', device)\n    idx = device.index\n    if idx is None:\n        idx = current_device()\n    default_generator = torch.xpu.default_generators[idx]\n    return default_generator.get_state()",
    "docstring": "Return the random number generator state of the specified GPU as a ByteTensor. Args: device (torch.device or int, optional): The device to return the RNG state of. Default: ``, the current XPU device). .. warning:: This function eagerly initializes XPU.",
    "type": "function",
    "file_path": "pytorch\\torch\\xpu\\random.py",
    "ast_data": "FunctionDef name:get_rng_state arg:device arguments arg Call If Call Assign Call If Call Assign Call Assign If Compare Assign Call Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "finalize",
    "source_code": "def finalize(self, pool, offset) -> AllocationTreeNode:\n    return self",
    "docstring": "Called after all allocations have been made",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\memory_planning.py",
    "ast_data": "FunctionDef name:finalize arg:self arg:pool arg:offset arguments arg arg arg Return return:yes"
  },
  {
    "library": "django",
    "name": "Group",
    "source_code": "class Group(list):\n    pass",
    "docstring": "Represent a capturing group in the pattern string.",
    "type": "class",
    "file_path": "django\\django\\utils\\regex_helper.py",
    "ast_data": "ClassDef name:Group"
  },
  {
    "library": "pandas",
    "name": "array",
    "source_code": "@property\ndef array(self):\n    raise ValueError(\"MultiIndex has no single backing array. Use 'MultiIndex.to_numpy()' to get a NumPy array of tuples.\")",
    "docstring": "Raises a ValueError for because there's no single array backing a MultiIndex. Raises ------ ValueError",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "FunctionDef name:array arg:self arguments arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_recursive_script_module_reducer",
    "source_code": "def _recursive_script_module_reducer(recursive_script_module):\n    if hasattr(recursive_script_module._c, 'module_rref'):\n        raise RuntimeError('Passing a script RemoteModule over RPC is not supported. Please create a RemoteModule in the sender, send the `module_rref` to the receiver, and create a new instance on the receiver end by passing this `module_rref`.')\n    f = io.BytesIO()\n    torch.jit.save(recursive_script_module, f)\n    return (_recursive_script_module_receiver, (f.getvalue(),))",
    "docstring": "Serialize a RecursiveScriptModule that does not contain a script RemoteModule, and raises an error otherwise.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\nn\\api\\remote_module.py",
    "ast_data": "FunctionDef name:_recursive_script_module_reducer arg:recursive_script_module arguments arg If Call Raise Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_ResizeBicubicGrad",
    "source_code": "@ops.RegisterGradient('ResizeBicubic')\ndef _ResizeBicubicGrad(op: ops.Operation, grad):\n    allowed_types = [dtypes.float32, dtypes.float64]\n    grad0 = None\n    if op.inputs[0].dtype in allowed_types:\n        grad0 = gen_image_ops.resize_bicubic_grad(grad, op.inputs[0], align_corners=op.get_attr('align_corners'), half_pixel_centers=op.get_attr('half_pixel_centers'))\n    return [grad0, None]",
    "docstring": "The derivatives for bicubic resizing. Args: op: The ResizeBicubic op. grad: The tensor representing the gradient w.r.t. the output. Returns: The gradients w.r.t. the input.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_grad.py",
    "ast_data": "FunctionDef name:_ResizeBicubicGrad arg:op arg:grad arguments arg arg Assign Assign If Compare Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "CancellationManagerContext",
    "source_code": "class CancellationManagerContext:\n\n    def __init__(self, cancellation_manager):\n        self._cancellation_manager = cancellation_manager\n\n    def __enter__(self):\n        global _active_context\n        _active_context = self._cancellation_manager\n\n    def __exit__(self, exc_type, exc_value, exc_tb):\n        global _active_context\n        _active_context = None",
    "docstring": "A Python context for wrapping a cancellable ConcreteFunction.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\cancellation.py",
    "ast_data": "ClassDef name:CancellationManagerContext FunctionDef name:__init__ arg:self arg:cancellation_manager arguments arg arg Assign FunctionDef name:__enter__ arg:self arguments arg Assign FunctionDef name:__exit__ arg:self arg:exc_type arg:exc_value arg:exc_tb arguments arg arg arg arg Assign"
  },
  {
    "library": "matplotlib",
    "name": "ToolFullScreen",
    "source_code": "class ToolFullScreen(ToolBase):\n    description = 'Toggle fullscreen mode'\n    default_keymap = property(lambda self: mpl.rcParams['keymap.fullscreen'])\n\n    def trigger(self, sender, event, data=None):\n        self.figure.canvas.manager.full_screen_toggle()",
    "docstring": "Tool to toggle full screen.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py",
    "ast_data": "ClassDef name:ToolFullScreen Assign Assign Call arguments arg FunctionDef name:trigger arg:self arg:sender arg:event arg:data arguments arg arg arg arg Call"
  },
  {
    "library": "scipy",
    "name": "FortranFormattingError",
    "source_code": "class FortranFormattingError(TypeError, OSError):\n    pass",
    "docstring": "Indicates that the file ended mid-record. Descends from TypeError for backward compatibility.",
    "type": "class",
    "file_path": "scipy\\scipy\\io\\_fortran.py",
    "ast_data": "ClassDef name:FortranFormattingError"
  },
  {
    "library": "tensorflow",
    "name": "_run_in_graph_client",
    "source_code": "def _run_in_graph_client(worker_fn, strategy, eval_fn, eval_strategy, cluster_spec, session_config, rpc_layer):\n    coord = coordinator.Coordinator()\n    eval_thread = None\n    if _TaskType.EVALUATOR in cluster_spec.jobs:\n        eval_thread = threading.Thread(target=_run_single_worker, args=(eval_fn, eval_strategy, cluster_spec, _TaskType.EVALUATOR, 0, session_config), kwargs={'rpc_layer': rpc_layer, 'coord': coord})\n        eval_thread.start()\n    worker_result = _run_single_worker(worker_fn, strategy, cluster_spec, None, None, session_config, rpc_layer=rpc_layer, coord=coord)\n    if eval_thread:\n        coord.join([eval_thread])\n    return worker_result",
    "docstring": "Runs a standalone client for in-graph replication.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_coordinator.py",
    "ast_data": "FunctionDef name:_run_in_graph_client arg:worker_fn arg:strategy arg:eval_fn arg:eval_strategy arg:cluster_spec arg:session_config arg:rpc_layer arguments arg arg arg arg arg arg arg Assign Call Assign If Compare Assign Call Call Assign Call If Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "piecewise_arange",
    "source_code": "def piecewise_arange(piecewise_idxer: Tensor) -> Tensor:\n    dv = piecewise_idxer.device\n    uni: Tensor\n    uni, counts = torch.unique_consecutive(piecewise_idxer, return_counts=True)\n    maxcnt = int(torch.max(counts).item())\n    numuni = uni.shape[0]\n    tmp = torch.zeros(size=(numuni, maxcnt), device=dv).bool()\n    ranges = torch.arange(maxcnt, device=dv).unsqueeze(0).expand(numuni, -1)\n    tmp[ranges < counts.unsqueeze(-1)] = True\n    return ranges[tmp]",
    "docstring": "Count repeated indices. Example: [0, 0, 0, 3, 3, 3, 3, 1, 1, 2] -> [0, 1, 2, 0, 1, 2, 3, 0, 1, 0]",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\adalam\\utils.py",
    "ast_data": "FunctionDef name:piecewise_arange arg:piecewise_idxer arguments arg Assign Assign Call Assign Call Call Call Assign Assign Call Call Assign Call Call Call Assign Compare Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "new_parameter_placeholder",
    "source_code": "def new_parameter_placeholder(size: tuple[int, ...], dtype: torch.dtype, device: torch.device, requires_grad: bool) -> torch.nn.Parameter:\n    result = torch.nn.Parameter(torch.empty(size, dtype=dtype, device=device), requires_grad=requires_grad)\n    result.untyped_storage().resize_(0)\n    return result",
    "docstring": "Create a placeholder to be passed to the above functions",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\create_parameter_op.py",
    "ast_data": "FunctionDef name:new_parameter_placeholder arg:size arg:dtype arg:device arg:requires_grad arguments arg arg arg arg Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "from_dtype",
    "source_code": "@classmethod\ndef from_dtype(cls, dtype: torch.dtype | None) -> JitScalarType:\n    if dtype not in _DTYPE_TO_SCALAR_TYPE:\n        raise errors.OnnxExporterError(f'Unknown dtype: {dtype}')\n    return _DTYPE_TO_SCALAR_TYPE[dtype]",
    "docstring": "Convert a torch dtype to JitScalarType. Note: DO NOT USE this API when comes from a calls. A \"RuntimeError: INTERNAL ASSERT FAILED at \"../aten/src/ATen/core/jit_type_base.h\" can be raised in several scenarios where shape info is not present. Instead use API which is safer. Args: dtype: A torch.dtype to create a JitScalarType from Returns: JitScalarType Raises: OnnxExporterError: if dtype is not a valid torch.dtype or if it is None.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_type_utils.py",
    "ast_data": "FunctionDef name:from_dtype arg:cls arg:dtype arguments arg arg If Compare Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "record_untuned_enable",
    "source_code": "def record_untuned_enable(val: bool=True) -> None:\n    torch._C._cuda_record_untuned_enable(val)",
    "docstring": "Enable recording untuned of TunableOp perations for offline tuning. When enabled, if a tuned entry isn't found, write it to the untuned file.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\tunable.py",
    "ast_data": "FunctionDef name:record_untuned_enable arg:val arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "add_metadata",
    "source_code": "def add_metadata(self, key: str, value: str):\n    wrapped_value = '\"' + value.replace('\"', '\\\\\"') + '\"'\n    torch.autograd._add_metadata_json(key, wrapped_value)",
    "docstring": "Adds a user defined metadata with a string key and a string value into the trace file",
    "type": "method",
    "file_path": "pytorch\\torch\\profiler\\profiler.py",
    "ast_data": "FunctionDef name:add_metadata arg:self arg:key arg:value arguments arg arg arg Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "collect_all",
    "source_code": "def collect_all(futures: list[Future]) -> Future[list[Future]]:\n    return cast(Future[list[Future]], torch._C._collect_all(cast(list[torch._C.Future], futures)))",
    "docstring": "Collects the provided :class: objects into a single combined :class: that is completed when all of the sub-futures are completed. Args: futures (list): a list of :class: objects. Returns: Returns a :class: object to a list of the passed in Futures. Example:: >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES) >>> fut0 = torch.futures.Future() >>> fut1 = torch.futures.Future() >>> fut = torch.futures.collect_all([fut0, fut1]) >>> fut0.set_result(0) >>> fut1.set_result(1) >>> fut_list = fut.wait() >>> print(f\"fut0 result = {fut_list[0].wait()}\") fut0 result = 0 >>> print(f\"fut1 result = {fut_list[1].wait()}\") fut1 result = 1",
    "type": "function",
    "file_path": "pytorch\\torch\\futures\\__init__.py",
    "ast_data": "FunctionDef name:collect_all arg:futures arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "scrapy",
    "name": "__init__",
    "source_code": "def __init__(self, stream_id: int, request: Request, protocol: H2ClientProtocol, download_maxsize: int=0, download_warnsize: int=0) -> None:\n    self.stream_id: int = stream_id\n    self._request: Request = request\n    self._protocol: H2ClientProtocol = protocol\n    self._download_maxsize = self._request.meta.get('download_maxsize', download_maxsize)\n    self._download_warnsize = self._request.meta.get('download_warnsize', download_warnsize)\n    self.metadata: dict[str, Any] = {'request_content_length': 0 if self._request.body is None else len(self._request.body), 'request_sent': False, 'reached_warnsize': False, 'remaining_content_length': 0 if self._request.body is None else len(self._request.body), 'stream_closed_local': False, 'stream_closed_server': False}\n    self._response: dict[str, Any] = {'body': BytesIO(), 'flow_controlled_size': 0, 'headers': Headers({})}\n\n    def _cancel(_: Any) -> None:\n        if self.metadata['request_sent']:\n            self.reset_stream(StreamCloseReason.CANCELLED)\n        else:\n            self.close(StreamCloseReason.CANCELLED)\n    self._deferred_response: Deferred[Response] = Deferred(_cancel)",
    "docstring": "Arguments: stream_id -- Unique identifier for the stream within a single HTTP/2 connection request -- The HTTP request associated to the stream protocol -- Parent H2ClientProtocol instance",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\http2\\stream.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:stream_id arg:request arg:protocol arg:download_maxsize arg:download_warnsize arguments arg arg arg arg arg arg Assign Call Assign Call Compare Call Compare Call Call Call FunctionDef name:_cancel arg:_ arguments arg If Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "UnboundInstanceCache",
    "source_code": "class UnboundInstanceCache(_TransformedFnCache):\n\n    def _get_key(self, entity):\n        if inspect.ismethod(entity):\n            return entity.__func__\n        return entity",
    "docstring": "A function cache based on unbound function objects. Using the function for the cache key allows efficient handling of object methods. Unlike the _CodeObjectCache, this discriminates between different functions even if they have the same code. This is needed for decorators that may masquerade as another function.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\cache.py",
    "ast_data": "ClassDef name:UnboundInstanceCache FunctionDef name:_get_key arg:self arg:entity arguments arg arg If Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_in_keras_graph",
    "source_code": "def is_in_keras_graph():\n    return call_context().in_keras_graph",
    "docstring": "Returns if currently executing inside of a Keras graph.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_utils.py",
    "ast_data": "FunctionDef name:is_in_keras_graph arguments Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_fixed_default_rng",
    "source_code": "@contextmanager\ndef _fixed_default_rng(seed=1638083107694713882823079058616272161):\n    orig_fun = np.random.default_rng\n    np.random.default_rng = lambda seed=seed: orig_fun(seed)\n    try:\n        yield\n    finally:\n        np.random.default_rng = orig_fun",
    "docstring": "Context with a fixed np.random.default_rng seed.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_util.py",
    "ast_data": "FunctionDef name:_fixed_default_rng arg:seed arguments arg Assign Assign arguments arg Call Try Assign"
  },
  {
    "library": "authlib",
    "name": "import_key",
    "source_code": "@classmethod\ndef import_key(cls, raw, options=None):\n    if isinstance(raw, cls):\n        if options is not None:\n            raw.options.update(options)\n        return raw\n    if isinstance(raw, dict):\n        cls.check_required_fields(raw)\n        key = cls(options=options)\n        key._dict_data = raw\n    else:\n        raw_key = to_bytes(raw)\n        if raw_key.startswith(POSSIBLE_UNSAFE_KEYS):\n            raise ValueError('This key may not be safe to import')\n        key = cls(raw_key=raw_key, options=options)\n    return key",
    "docstring": "Import a key from bytes, string, or dict data.",
    "type": "method",
    "file_path": "authlib\\authlib\\jose\\rfc7518\\oct_key.py",
    "ast_data": "FunctionDef name:import_key arg:cls arg:raw arg:options arguments arg arg arg If Call If Compare Call Return return:yes If Call Call Assign Call Assign Assign Call If Call Raise Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_observer_dict",
    "source_code": "def _get_observer_dict(mod, target_dict, prefix=''):\n\n    def get_prefix(prefix):\n        return prefix if prefix == '' else prefix + '.'\n    if hasattr(mod, 'activation_post_process'):\n        target_dict[get_prefix(prefix) + 'activation_post_process'] = mod.activation_post_process\n    for name, child in mod.named_children():\n        module_prefix = get_prefix(prefix) + name if prefix else name\n        _get_observer_dict(child, target_dict, module_prefix)",
    "docstring": "Traverse the modules and save all observers into dict. This is mainly used for quantization accuracy debug Args: mod: the top module we want to save all observers prefix: the prefix for the current module target_dict: the dictionary used to save all the observers",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantize.py",
    "ast_data": "FunctionDef name:_get_observer_dict arg:mod arg:target_dict arg:prefix arguments arg arg arg FunctionDef name:get_prefix arg:prefix arguments arg Return return:yes Compare If Call Assign Call For Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "error",
    "source_code": "def error(msg):\n    return debugger_cli_common.rich_text_lines_from_rich_line_list([RL('ERROR: ' + msg, COLOR_RED)])",
    "docstring": "Generate a RichTextLines output for error. Args: msg: (str) The error message. Returns: (debugger_cli_common.RichTextLines) A representation of the error message for screen output.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\cli_shared.py",
    "ast_data": "FunctionDef name:error arg:msg arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "vmap",
    "source_code": "@exposed_in('torch.func')\ndef vmap(func: Callable, in_dims: in_dims_t=0, out_dims: out_dims_t=0, randomness: str='error', *, chunk_size=None) -> Callable:\n    from torch._dynamo import is_compiling\n    _check_randomness_arg(randomness)\n    if not (chunk_size is None or chunk_size > 0):\n        raise ValueError(f'vmap: chunk_size should be None or greater than 0. (got {chunk_size})')\n\n    def wrapped(*args, **kwargs):\n        return vmap_impl(func, in_dims, out_dims, randomness, chunk_size, *args, **kwargs)\n    if not is_compiling():\n        wrapped = functools.wraps(func)(wrapped)\n    return wrapped",
    "docstring": "vmap is the vectorizing map; `torch.vmaptorch.func.vmapchunk_sizechunk_size=1vmapvmapvmapvmapvmapvmapvmap` >>> f = lambda x: x ** 2 >>> x = torch.randn(2, 5) >>> batched_pow = torch.vmap(f, out_dims=1) >>> batched_pow(x) # [5, 2] For any function that uses kwargs, the returned function will not batch the kwargs but will accept kwargs >>> x = torch.randn([2, 5]) >>> def fn(x, scale=4.): >>> return x * scale >>> >>> batched_pow = torch.vmap(fn) >>> assert torch.allclose(batched_pow(x), x * 4) >>> batched_pow(x, scale=x) # scale is not batched, output has shape [2, 2, 5] .. note:: vmap does not provide general autobatching or handle variable-length sequences out of the box.",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\apis.py",
    "ast_data": "FunctionDef name:vmap arg:func arg:in_dims arg:out_dims arg:randomness arguments arg arg arg arg arg Call If BoolOp Compare Compare Raise Call FunctionDef name:wrapped arguments arg arg Return return:yes Call If Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_build_option_description",
    "source_code": "def _build_option_description(k: str) -> str:\n    o = _get_registered_option(k)\n    d = _get_deprecated_option(k)\n    s = f'{k} '\n    if o.doc:\n        s += '\\n'.join(o.doc.strip().split('\\n'))\n    else:\n        s += 'No description available.'\n    if o:\n        with warnings.catch_warnings():\n            warnings.simplefilter('ignore', FutureWarning)\n            warnings.simplefilter('ignore', DeprecationWarning)\n            s += f'\\n    [default: {o.defval}] [currently: {get_option(k)}]'\n    if d:\n        rkey = d.rkey or ''\n        s += '\\n    (Deprecated'\n        s += f', use `{rkey}` instead.'\n        s += ')'\n    return s",
    "docstring": "Builds a formatted description of a registered option and prints it",
    "type": "function",
    "file_path": "pandas\\pandas\\_config\\config.py",
    "ast_data": "FunctionDef name:_build_option_description arg:k arguments arg Assign Call Assign Call Assign If Call Call Call If With Call Call Call Call If Assign BoolOp Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "sctype_from_string",
    "source_code": "def sctype_from_string(s):\n    if s in _names:\n        return _names[s]\n    if s in _name_aliases.keys():\n        return _name_aliases[s]\n    if s in _typecodes:\n        return _typecodes[s]\n    if s in _aliases:\n        return _aliases[s]\n    if s in _python_types:\n        return _python_types[s]\n    raise TypeError(f'data type {s!r} not understood')",
    "docstring": "Normalize a string value: a type 'name' or a typecode or a width alias.",
    "type": "function",
    "file_path": "pytorch\\torch\\_numpy\\_dtypes.py",
    "ast_data": "FunctionDef name:sctype_from_string arg:s arguments arg If Compare Return return:yes If Compare Call Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Raise Call"
  },
  {
    "library": "pytorch",
    "name": "is_rel_with_deb_info",
    "source_code": "def is_rel_with_deb_info(self) -> bool:\n    return self.build_type_string == 'RelWithDebInfo'",
    "docstring": "Checks RelWithDebInfo build.",
    "type": "method",
    "file_path": "pytorch\\tools\\setup_helpers\\env.py",
    "ast_data": "FunctionDef name:is_rel_with_deb_info arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "django",
    "name": "close",
    "source_code": "def close(self):\n    self._producer = []",
    "docstring": "Used to invalidate/disable this lazy stream. Replace the producer with an empty list. Any leftover bytes that have already been read will still be reported upon read() and/or next().",
    "type": "method",
    "file_path": "django\\django\\http\\multipartparser.py",
    "ast_data": "FunctionDef name:close arg:self arguments arg Assign"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, marker_pad=0.3, numpoints=None, bottom=None, yoffsets=None, **kwargs):\n    super().__init__(marker_pad=marker_pad, numpoints=numpoints, yoffsets=yoffsets, **kwargs)\n    self._bottom = bottom",
    "docstring": "Parameters ---------- marker_pad : float, default: 0.3 Padding between points in legend entry. numpoints : int, optional Number of points to show in legend entry. bottom : float, optional yoffsets : array of floats, optional Length *numpoints* list of y offsets for each point in legend entry. **kwargs Keyword arguments forwarded to .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\legend_handler.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:marker_pad arg:numpoints arg:bottom arg:yoffsets arguments arg arg arg arg arg arg Call Call Assign"
  },
  {
    "library": "django",
    "name": "references_model",
    "source_code": "def references_model(self, name, app_label):\n    return True",
    "docstring": "Return True if there is a chance this operation references the given model name (as a string), with an app label for accuracy. Used for optimization. If in doubt, return True; returning a false positive will merely make the optimizer a little less efficient, while returning a false negative may result in an unusable optimized migration.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\operations\\base.py",
    "ast_data": "FunctionDef name:references_model arg:self arg:name arg:app_label arguments arg arg arg Return return:yes"
  },
  {
    "library": "django",
    "name": "create",
    "source_code": "def create(self):\n    self.modified = True",
    "docstring": "To create a new key, set the modified flag so that the cookie is set on the client for the current request.",
    "type": "method",
    "file_path": "django\\django\\contrib\\sessions\\backends\\signed_cookies.py",
    "ast_data": "FunctionDef name:create arg:self arguments arg Assign"
  },
  {
    "library": "scikit-learn",
    "name": "get_precision",
    "source_code": "def get_precision(self):\n    check_is_fitted(self)\n    n_features = self.components_.shape[1]\n    if self.n_components == 0:\n        return np.diag(1.0 / self.noise_variance_)\n    if self.n_components == n_features:\n        return linalg.inv(self.get_covariance())\n    components_ = self.components_\n    precision = np.dot(components_ / self.noise_variance_, components_.T)\n    precision.flat[::len(precision) + 1] += 1.0\n    precision = np.dot(components_.T, np.dot(linalg.inv(precision), components_))\n    precision /= self.noise_variance_[:, np.newaxis]\n    precision /= -self.noise_variance_[np.newaxis, :]\n    precision.flat[::len(precision) + 1] += 1.0 / self.noise_variance_\n    return precision",
    "docstring": "Compute data precision matrix with the FactorAnalysis model. Returns ------- precision : ndarray of shape (n_features, n_features) Estimated precision of data.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_factor_analysis.py",
    "ast_data": "FunctionDef name:get_precision arg:self arguments arg Call Assign If Compare Return return:yes Call If Compare Return return:yes Call Call Assign Assign Call Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "configure",
    "source_code": "def configure(timer_client: TimerClient):\n    global _timer_client\n    _timer_client = timer_client\n    logger.info('Timer client configured to: %s', type(_timer_client).__name__)",
    "docstring": "Configures a timer client. Must be called before using ``.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\elastic\\timer\\api.py",
    "ast_data": "FunctionDef name:configure arg:timer_client arguments arg Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_callable_template_with_options_and_metadata",
    "source_code": "def _callable_template_with_options_and_metadata(fetch_list, target_list, fetch_handler, options=None, run_metadata=None):\n    options_ptr = tf_session.TF_NewBufferFromString(compat.as_bytes(options.SerializeToString())) if options else None\n    run_metadata_ptr = tf_session.TF_NewBuffer() if run_metadata else None\n    try:\n        results = self._call_tf_sessionrun(options_ptr, {}, fetch_list, target_list, run_metadata_ptr)\n        if fetch_handler:\n            results = fetch_handler.build_results(self, results)\n        else:\n            results = results[0] if results else None\n        if run_metadata:\n            proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)\n            run_metadata.ParseFromString(compat.as_bytes(proto_data))\n    finally:\n        if run_metadata_ptr:\n            tf_session.TF_DeleteBuffer(run_metadata_ptr)\n        if options:\n            tf_session.TF_DeleteBuffer(options_ptr)\n    return results",
    "docstring": "Template callable that accepts RunOptions and RunMetadata.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\session.py",
    "ast_data": "FunctionDef name:_callable_template_with_options_and_metadata arg:fetch_list arg:target_list arg:fetch_handler arg:options arg:run_metadata arguments arg arg arg arg arg Assign Call Call Call Assign Call Try Assign Call If Assign Call Assign If Assign Call Call Call If Call If Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_shared_name",
    "source_code": "@property\ndef _shared_name(self):\n    return self.name[:self.name.index(':')]",
    "docstring": "The shared name of the variable. Unlike name(), shared_name doesn't have \":0\" suffix. It is user-specified name with name scope prefix. Returns: variable name.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:_shared_name arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "OpTypes",
    "source_code": "@dataclass\nclass OpTypes:\n    fusible_ops: OrderedSet[Callable]\n    compute_intensive_ops: OrderedSet[Callable]\n    random_ops: OrderedSet[Callable]\n    view_ops: OrderedSet[Callable]\n    recomputable_ops: OrderedSet[Callable]\n\n    def is_fusible(self, node: fx.Node):\n        return get_aten_target(node) in self.fusible_ops\n\n    def is_compute_intensive(self, node: fx.Node):\n        return get_aten_target(node) in self.compute_intensive_ops\n\n    def is_random(self, node: fx.Node):\n        return get_aten_target(node) in self.random_ops\n\n    def is_view(self, node: fx.Node):\n        return get_aten_target(node) in self.view_ops\n\n    def is_recomputable(self, node: fx.Node):\n        return get_aten_target(node) in self.recomputable_ops",
    "docstring": "Class for keeping track of different operator categories",
    "type": "class",
    "file_path": "pytorch\\torch\\_functorch\\partitioners.py",
    "ast_data": "ClassDef name:OpTypes FunctionDef name:is_fusible arg:self arg:node arguments arg arg Return return:yes Compare Call FunctionDef name:is_compute_intensive arg:self arg:node arguments arg arg Return return:yes Compare Call FunctionDef name:is_random arg:self arg:node arguments arg arg Return return:yes Compare Call FunctionDef name:is_view arg:self arg:node arguments arg arg Return return:yes Compare Call FunctionDef name:is_recomputable arg:self arg:node arguments arg arg Return return:yes Compare Call"
  },
  {
    "library": "pytorch",
    "name": "worker_main",
    "source_code": "@record\n@contextmanager\ndef worker_main() -> Generator[None, None, None]:\n    with ExitStack() as stack:\n        socket_path = os.environ.get(TORCH_WORKER_SERVER_SOCKET)\n        if socket_path is not None:\n            stack.enter_context(_worker_server(socket_path))\n        yield",
    "docstring": "This is a context manager that wraps your main entry function. This combines the existing ``. Example :: @worker_main() def main(): pass if __name__ == \"__main__\": main()",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\elastic\\control_plane.py",
    "ast_data": "FunctionDef name:worker_main arguments With Call Assign Call If Compare Call Call"
  },
  {
    "library": "kornia",
    "name": "score",
    "source_code": "@property\ndef score(self) -> torch.Tensor:\n    return self._data[..., 14]",
    "docstring": "The detection score.",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\face_detection.py",
    "ast_data": "FunctionDef name:score arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_mark_if_deprecated",
    "source_code": "def _mark_if_deprecated(self, option):\n    option_str = f'{option!r}'\n    if option in self.deprecated:\n        option_str = f'{option_str} (deprecated)'\n    return option_str",
    "docstring": "Add a deprecated mark to an option if needed.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_param_validation.py",
    "ast_data": "FunctionDef name:_mark_if_deprecated arg:self arg:option arguments arg arg Assign If Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_record_and_ignore_transient_timeouts",
    "source_code": "def _record_and_ignore_transient_timeouts(self, e):\n    if self._transient_timeouts_threshold <= 0:\n        return False\n    if not isinstance(e, errors.DeadlineExceededError):\n        return False\n    with self._transient_timeouts_lock:\n        self._transient_timeouts_count += 1\n        if self._transient_timeouts_count >= self._transient_timeouts_threshold:\n            return False\n    return True",
    "docstring": "Records observed timeout error and return if it should be ignored.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py",
    "ast_data": "FunctionDef name:_record_and_ignore_transient_timeouts arg:self arg:e arguments arg arg If Compare Return return:yes If Call Return return:yes With If Compare Return return:yes Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "supported_float_dtypes",
    "source_code": "def supported_float_dtypes(xp):\n    if hasattr(xp, 'float16'):\n        return (xp.float64, xp.float32, xp.float16)\n    else:\n        return (xp.float64, xp.float32)",
    "docstring": "Supported floating point types for the namespace. Note: float16 is not officially part of the Array API spec at the time of writing but scikit-learn estimators and functions can choose to accept it when xp.float16 is defined.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_array_api.py",
    "ast_data": "FunctionDef name:supported_float_dtypes arg:xp arguments arg If Call Return return:yes Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "get_precision",
    "source_code": "def get_precision(self):\n    if self.store_precision:\n        precision = self.precision_\n    else:\n        precision = linalg.pinvh(self.covariance_, check_finite=False)\n    return precision",
    "docstring": "Getter for the precision matrix. Returns ------- precision_ : array-like of shape (n_features, n_features) The precision matrix associated to the current covariance object.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\covariance\\_empirical_covariance.py",
    "ast_data": "FunctionDef name:get_precision arg:self arguments arg If Assign Assign Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "modules",
    "source_code": "def modules(modulePath):\n    __import__(modulePath)\n    return sys.modules[modulePath]",
    "docstring": "Load a module and retrieve a reference to that module.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\reprconf.py",
    "ast_data": "FunctionDef name:modules arg:modulePath arguments arg Call Return return:yes"
  },
  {
    "library": "django",
    "name": "resolve_request",
    "source_code": "def resolve_request(self, request):\n    if hasattr(request, 'urlconf'):\n        urlconf = request.urlconf\n        set_urlconf(urlconf)\n        resolver = get_resolver(urlconf)\n    else:\n        resolver = get_resolver()\n    resolver_match = resolver.resolve(request.path_info)\n    request.resolver_match = resolver_match\n    return resolver_match",
    "docstring": "Retrieve/set the urlconf for the request. Return the view resolved, with its args and kwargs.",
    "type": "method",
    "file_path": "django\\django\\core\\handlers\\base.py",
    "ast_data": "FunctionDef name:resolve_request arg:self arg:request arguments arg arg If Call Assign Call Assign Call Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "desc_sig_element",
    "source_code": "class desc_sig_element(nodes.inline, _desc_classes_injector):\n    classes: list[str] = []\n\n    def __init__(self, rawsource: str='', text: str='', *children: Element, **attributes: Any) -> None:\n        super().__init__(rawsource, text, *children, **attributes)\n        self['classes'].extend(self.classes)\n\n    def __init_subclass__(cls, *, _sig_element: bool=False, **kwargs: Any) -> None:\n        super().__init_subclass__(**kwargs)\n        if _sig_element:\n            SIG_ELEMENTS.add(cls)",
    "docstring": "Common parent class of nodes for inline text of a signature.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\addnodes.py",
    "ast_data": "ClassDef name:desc_sig_element FunctionDef name:__init__ arg:self arg:rawsource arg:text arguments arg arg arg arg arg Call Call Call FunctionDef name:__init_subclass__ arg:cls arguments arg arg arg Call Call If Call"
  },
  {
    "library": "scipy",
    "name": "_nonmonotone_line_search_cheng",
    "source_code": "def _nonmonotone_line_search_cheng(f, x_k, d, f_k, C, Q, eta, gamma=0.0001, tau_min=0.1, tau_max=0.5, nu=0.85):\n    alpha_p = 1\n    alpha_m = 1\n    alpha = 1\n    while True:\n        xp = x_k + alpha_p * d\n        fp, Fp = f(xp)\n        if fp <= C + eta - gamma * alpha_p ** 2 * f_k:\n            alpha = alpha_p\n            break\n        alpha_tp = alpha_p ** 2 * f_k / (fp + (2 * alpha_p - 1) * f_k)\n        xp = x_k - alpha_m * d\n        fp, Fp = f(xp)\n        if fp <= C + eta - gamma * alpha_m ** 2 * f_k:\n            alpha = -alpha_m\n            break\n        alpha_tm = alpha_m ** 2 * f_k / (fp + (2 * alpha_m - 1) * f_k)\n        alpha_p = np.clip(alpha_tp, tau_min * alpha_p, tau_max * alpha_p)\n        alpha_m = np.clip(alpha_tm, tau_min * alpha_m, tau_max * alpha_m)\n    Q_next = nu * Q + 1\n    C = (nu * Q * (C + eta) + fp) / Q_next\n    Q = Q_next\n    return (alpha, xp, fp, Fp, C, Q)",
    "docstring": "Nonmonotone line search from [1] Parameters ---------- f : callable Function returning a tuple `` the residual. x_k : ndarray Initial position. d : ndarray Search direction. f_k : float Initial merit function value. C, Q : float Control parameters. On the first iteration, give values Q=1.0, C=f_k eta : float Allowed merit function increase, see [1]_ nu, gamma, tau_min, tau_max : float, optional Search parameters, see [1]_ Returns ------- alpha : float Step length xp : ndarray Next position fp : float Merit function value at next position Fp : ndarray Residual at next position C : float New value for the control parameter C Q : float New value for the control parameter Q References ---------- .. [1] W. Cheng & D.-H. Li, ''A derivative-free nonmonotone line search and its application to the spectral residual method'', IMA J. Numer. Anal. 29, 814 (2009).",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_linesearch.py",
    "ast_data": "FunctionDef name:_nonmonotone_line_search_cheng arg:f arg:x_k arg:d arg:f_k arg:C arg:Q arg:eta arg:gamma arg:tau_min arg:tau_max arg:nu arguments arg arg arg arg arg arg arg arg arg arg arg Assign Assign Assign While Assign Assign Call If Compare Assign Assign Assign Assign Call If Compare Assign Assign Assign Call Assign Call Assign Assign Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "clear_cache",
    "source_code": "def clear_cache(datasets=None):\n    _clear_cache(datasets)",
    "docstring": "Cleans the scipy datasets cache directory. If a scipy.datasets method or a list/tuple of the same is provided, then clear_cache removes all the data files associated to the passed dataset method callable(s). By default, it removes all the cached data files. Parameters ---------- datasets : callable or list/tuple of callable or None Examples -------- >>> from scipy import datasets >>> ascent_array = datasets.ascent() >>> ascent_array.shape (512, 512) >>> datasets.clear_cache([datasets.ascent]) Cleaning the file ascent.dat for dataset ascent",
    "type": "function",
    "file_path": "scipy\\scipy\\datasets\\_utils.py",
    "ast_data": "FunctionDef name:clear_cache arg:datasets arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "CELU",
    "source_code": "class CELU(Module):\n    __constants__ = ['alpha', 'inplace']\n    alpha: float\n    inplace: bool\n\n    def __init__(self, alpha: float=1.0, inplace: bool=False) -> None:\n        super().__init__()\n        self.alpha = alpha\n        self.inplace = inplace\n\n    def forward(self, input: Tensor) -> Tensor:\n        return F.celu(input, self.alpha, self.inplace)\n\n    def extra_repr(self) -> str:\n        inplace_str = ', inplace=True' if self.inplace else ''\n        return f'alpha={self.alpha}{inplace_str}'",
    "docstring": "Applies the CELU function element-wise. .. math:: \\text{CELU}(x) = \\max(0,x) + \\min(0, \\alpha * (\\exp(x/\\alpha) - 1)) More details can be found in the paper _ . Args: alpha: the :math: value for the CELU formulation. Default: 1.0 inplace: can optionally do the operation in-place. Default: `(*)*(*)Continuously Differentiable Exponential Linear Units`:",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\activation.py",
    "ast_data": "ClassDef name:CELU Assign FunctionDef name:__init__ arg:self arg:alpha arg:inplace arguments arg arg arg Call Call Assign Assign FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:extra_repr arg:self arguments arg Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_get_listlike_indexer",
    "source_code": "def _get_listlike_indexer(self, key, axis: AxisInt):\n    ax = self.obj._get_axis(axis)\n    axis_name = self.obj._get_axis_name(axis)\n    keyarr, indexer = ax._get_indexer_strict(key, axis_name)\n    return (keyarr, indexer)",
    "docstring": "Transform a list-like of keys into a new index and an indexer. Parameters ---------- key : list-like Targeted labels. axis: int Dimension on which the indexing is being made. Raises ------ KeyError If at least one key was requested but none was found. Returns ------- keyarr: Index New index (coinciding with 'key' if the axis is unique). values : array-like Indexer for the return object, -1 denotes keys not found.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexing.py",
    "ast_data": "FunctionDef name:_get_listlike_indexer arg:self arg:key arg:axis arguments arg arg arg Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pygame",
    "name": "set_bold",
    "source_code": "def set_bold(self, value):\n    self.wide = bool(value)",
    "docstring": "set_bold(bool) -> None enable fake rendering of bold text",
    "type": "method",
    "file_path": "pygame\\src_py\\ftfont.py",
    "ast_data": "FunctionDef name:set_bold arg:self arg:value arguments arg arg Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "AveragePooling2D",
    "source_code": "class AveragePooling2D(keras_layers.AveragePooling2D, base.Layer):\n\n    def __init__(self, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs):\n        if strides is None:\n            raise ValueError('Argument `strides` must not be None.')\n        super(AveragePooling2D, self).__init__(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name, **kwargs)",
    "docstring": "Average pooling layer for 2D inputs (e.g. images). Args: pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. (default) and are supported. corresponds to inputs with shape while corresponds to inputs with shape . name: A string, the name of the layer.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\legacy_tf_layers\\pooling.py",
    "ast_data": "ClassDef name:AveragePooling2D FunctionDef name:__init__ arg:self arg:pool_size arg:strides arg:padding arg:data_format arg:name arguments arg arg arg arg arg arg arg If Compare Raise Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_ControlOutputCache",
    "source_code": "class _ControlOutputCache(object):\n    __slots__ = ['cache']\n\n    def __init__(self):\n        self.cache = {}\n\n    def calc_control_outputs(self, graph):\n        control_outputs = {}\n        for op in graph.get_operations():\n            for control_input in op.control_inputs:\n                if control_input not in control_outputs:\n                    control_outputs[control_input] = set()\n                control_outputs[control_input].add(op)\n        return control_outputs\n\n    def get_control_outputs(self, op):\n        if op.graph not in self.cache:\n            control_outputs = self.calc_control_outputs(op.graph)\n            self.cache[op.graph] = control_outputs\n        else:\n            control_outputs = self.cache[op.graph]\n        return control_outputs.get(op, [])",
    "docstring": "Helper class to manage calculating and caching control_outputs in graph.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\subscribe.py",
    "ast_data": "ClassDef name:_ControlOutputCache Assign FunctionDef name:__init__ arg:self arguments arg Assign FunctionDef name:calc_control_outputs arg:self arg:graph arguments arg arg Assign For Call For If Compare Assign Call Call Return return:yes FunctionDef name:get_control_outputs arg:self arg:op arguments arg arg If Compare Assign Call Assign Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "from_config",
    "source_code": "@classmethod\ndef from_config(cls, config, custom_objects=None, columns_by_name=None):\n    _check_config_keys(config, cls._fields)\n    kwargs = _standardize_and_copy_config(config)\n    kwargs['dtype'] = dtypes.as_dtype(config['dtype'])\n    return cls(**kwargs)",
    "docstring": "See 'FeatureColumn` base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:from_config arg:cls arg:config arg:custom_objects arg:columns_by_name arguments arg arg arg arg Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_xla_sharding",
    "source_code": "def get_xla_sharding(var: BaseResourceVariable) -> Any:\n    return var._get_xla_sharding()",
    "docstring": "Returns the XLA sharding associated with the variable.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:get_xla_sharding arg:var arguments arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "download_wheels",
    "source_code": "def download_wheels(version, wheelhouse, test=False):\n    http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED')\n    wheel_names = get_wheel_names(version)\n    for i, wheel_name in enumerate(wheel_names):\n        wheel_url = f'{FILES_URL}/{version}/download/{wheel_name}'\n        wheel_path = os.path.join(wheelhouse, wheel_name)\n        with open(wheel_path, 'wb') as f:\n            with http.request('GET', wheel_url, preload_content=False) as r:\n                info = r.info()\n                length = int(info.get('Content-Length', '0'))\n                if length == 0:\n                    length = 'unknown size'\n                else:\n                    length = f'{length / 1024 / 1024:.2f}MB'\n                print(f'{i + 1:<4}{wheel_name} {length}')\n                if not test:\n                    shutil.copyfileobj(r, f)\n    print(f'\\nTotal files downloaded: {len(wheel_names)}')",
    "docstring": "Download release wheels. The release wheels for the given NumPy version are downloaded into the given directory. Parameters ---------- version : str The release version. For instance, \"1.18.3\". wheelhouse : str Directory in which to download the wheels.",
    "type": "function",
    "file_path": "numpy\\tools\\download-wheels.py",
    "ast_data": "FunctionDef name:download_wheels arg:version arg:wheelhouse arg:test arguments arg arg arg Assign Call Assign Call For Call Assign Assign Call With Call With Call Assign Call Assign Call Call If Compare Assign Assign Call If Call Call Call"
  },
  {
    "library": "numpy",
    "name": "ifftshift",
    "source_code": "@array_function_dispatch(_fftshift_dispatcher, module='numpy.fft')\ndef ifftshift(x, axes=None):\n    x = asarray(x)\n    if axes is None:\n        axes = tuple(range(x.ndim))\n        shift = [-(dim // 2) for dim in x.shape]\n    elif isinstance(axes, integer_types):\n        shift = -(x.shape[axes] // 2)\n    else:\n        shift = [-(x.shape[ax] // 2) for ax in axes]\n    return roll(x, shift, axes)",
    "docstring": "The inverse of . Although identical for even-length , the functions differ by one sample for odd-length . Parameters ---------- x : array_like Input array. axes : int or shape tuple, optional Axes over which to calculate. Defaults to None, which shifts all axes. Returns ------- y : ndarray The shifted array. See Also -------- fftshift : Shift zero-frequency component to the center of the spectrum. Examples -------- >>> import numpy as np >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3) >>> freqs array([[ 0., 1., 2.], [ 3., 4., -4.], [-3., -2., -1.]]) >>> np.fft.ifftshift(np.fft.fftshift(freqs)) array([[ 0., 1., 2.], [ 3., 4., -4.], [-3., -2., -1.]])",
    "type": "function",
    "file_path": "numpy\\numpy\\fft\\_helper.py",
    "ast_data": "FunctionDef name:ifftshift arg:x arg:axes arguments arg arg Assign Call If Compare Assign Call Call Assign If Call Assign Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "update_if_not_finite_grads",
    "source_code": "def update_if_not_finite_grads():\n    new_loss_scale = math_ops.maximum(self._current_loss_scale / self._multiplier, 1)\n    return control_flow_ops.group(self._num_good_steps.assign(0), self._current_loss_scale.assign(new_loss_scale))",
    "docstring": "Update assuming the gradients are nonfinite.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\experimental\\loss_scale.py",
    "ast_data": "FunctionDef name:update_if_not_finite_grads arguments Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "grad_and_value",
    "source_code": "@exposed_in('torch.func')\ndef grad_and_value(func: Callable, argnums: argnums_t=0, has_aux: bool=False) -> Callable:\n    from torch._dynamo import is_compiling\n    from torch._functorch import eager_transforms\n\n    def wrapper(*args, **kwargs):\n        return eager_transforms.grad_and_value_impl(func, argnums, has_aux, args, kwargs)\n    if not is_compiling():\n        wrapper = functools.wraps(func)(wrapper)\n    return wrapper",
    "docstring": "Returns a function to compute a tuple of the gradient and primal, or forward, computation. Args: func (Callable): A Python function that takes one or more arguments. Must return a single-element Tensor. If specified `grad` for examples",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\apis.py",
    "ast_data": "FunctionDef name:grad_and_value arg:func arg:argnums arg:has_aux arguments arg arg arg FunctionDef name:wrapper arguments arg arg Return return:yes Call If Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "is_initialized",
    "source_code": "def is_initialized() -> bool:\n    return GroupMember.WORLD is not None",
    "docstring": "Check if the default process group has been initialized.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:is_initialized arguments Return return:yes Compare"
  },
  {
    "library": "pytorch",
    "name": "histogram",
    "source_code": "def histogram(name, values, bins, max_bins=None):\n    values = make_np(values)\n    hist = make_histogram(values.astype(float), bins, max_bins)\n    return Summary(value=[Summary.Value(tag=name, histo=hist)])",
    "docstring": "Output a protocol buffer with a histogram. The generated []( has one summary value containing a histogram for . This op reports an error if any value is not finite. Args: name: A name for the generated node. Will also serve as a series name in TensorBoard. values: A real numeric . Any shape. Values to use to build the histogram. Returns: A scalar of type . The serialized protocol buffer.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\tensorboard\\summary.py",
    "ast_data": "FunctionDef name:histogram arg:name arg:values arg:bins arg:max_bins arguments arg arg arg arg Assign Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "dtype",
    "source_code": "@property\ndef dtype(self):\n    return self._dtype_policy.variable_dtype",
    "docstring": "The dtype of the layer weights. This is equivalent to . Unless mixed precision is used, this is the same as , the dtype of the layer's computations.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:dtype arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "coerce_indexer_dtype",
    "source_code": "def coerce_indexer_dtype(indexer, categories) -> np.ndarray:\n    length = len(categories)\n    if length < _int8_max:\n        return ensure_int8(indexer)\n    elif length < _int16_max:\n        return ensure_int16(indexer)\n    elif length < _int32_max:\n        return ensure_int32(indexer)\n    return ensure_int64(indexer)",
    "docstring": "coerce the indexer input array to the smallest dtype possible",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\cast.py",
    "ast_data": "FunctionDef name:coerce_indexer_dtype arg:indexer arg:categories arguments arg arg Assign Call If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_prepare_input_for_export",
    "source_code": "def _prepare_input_for_export(args, kwargs):\n    args, kwargs = _prepare_input_for_pytorch(args, kwargs)\n    if not kwargs and len(args) > 0 and isinstance(args[-1], dict):\n        onnx_inputs = args + ({},)\n    elif kwargs:\n        onnx_inputs = args + (kwargs,)\n    else:\n        onnx_inputs = args\n    return onnx_inputs",
    "docstring": "Prepare input for ONNX model export. Any future changes/formatting to the input before dispatching to the :func: api should be made in this function. Args: args: positional arguments for PyTorch model forward method. kwargs: keyword arguments for PyTorch model forward method. Returns: onnx_inputs: positional arguments for ONNX model export, as in :func:.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\verification.py",
    "ast_data": "FunctionDef name:_prepare_input_for_export arg:args arg:kwargs arguments arg arg Assign Call If BoolOp Compare Call Call Assign If Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_deserialize_keras_tensors",
    "source_code": "def _deserialize_keras_tensors(kwargs, layer_map):\n\n    def _deserialize_keras_tensor(t):\n        if isinstance(t, tf_utils.ListWrapper):\n            t = t.as_list()\n            layer_name = t[0]\n            node_index = t[1]\n            tensor_index = t[2]\n            layer = layer_map[layer_name]\n            new_node_index = get_node_index(layer, node_index)\n            if new_node_index is None:\n                raise IndexError\n            node = layer._inbound_nodes[new_node_index]\n            return nest.flatten(node.outputs)[tensor_index]\n        return t\n    kwargs = tf_utils.convert_inner_node_data(kwargs, wrap=True)\n    return nest.map_structure(_deserialize_keras_tensor, kwargs)",
    "docstring": "Deserializes Keras Tensors passed to ..",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\functional.py",
    "ast_data": "FunctionDef name:_deserialize_keras_tensors arg:kwargs arg:layer_map arguments arg arg FunctionDef name:_deserialize_keras_tensor arg:t arguments arg If Call Assign Call Assign Assign Assign Assign Assign Call If Compare Raise Assign Return return:yes Call Return return:yes Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_monotonize_cdf",
    "source_code": "def _monotonize_cdf(self, value):\n    sign = 1\n    for transform in self.transforms:\n        sign = sign * transform.sign\n    if isinstance(sign, int) and sign == 1:\n        return value\n    return sign * (value - 0.5) + 0.5",
    "docstring": "This conditionally flips `cdf` is monotone increasing.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributions\\transformed_distribution.py",
    "ast_data": "FunctionDef name:_monotonize_cdf arg:self arg:value arguments arg arg Assign For Assign If BoolOp Call Compare Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_antialiased",
    "source_code": "def set_antialiased(self, b):\n    self._antialiased = int(bool(b))",
    "docstring": "Set whether object should be drawn with antialiased rendering.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:set_antialiased arg:self arg:b arguments arg arg Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "SymbolicRegistry",
    "source_code": "class SymbolicRegistry:\n\n    def __init__(self) -> None:\n        self._registry: dict[str, _SymbolicFunctionGroup] = {}\n\n    def register(self, name: str, opset: OpsetVersion, func: Callable, custom: bool=False) -> None:\n        if '::' not in name:\n            raise ValueError(f\"The name must be in the form of 'domain::op', not '{name}'\")\n        symbolic_functions = self._registry.setdefault(name, _SymbolicFunctionGroup(name))\n        if custom:\n            symbolic_functions.add_custom(func, opset)\n        else:\n            symbolic_functions.add(func, opset)\n\n    def unregister(self, name: str, opset: OpsetVersion) -> None:\n        if name not in self._registry:\n            return\n        self._registry[name].remove_custom(opset)\n\n    def get_function_group(self, name: str) -> Optional[_SymbolicFunctionGroup]:\n        return self._registry.get(name)\n\n    def is_registered_op(self, name: str, version: int) -> bool:\n        functions = self.get_function_group(name)\n        if functions is None:\n            return False\n        return functions.get(version) is not None\n\n    def all_functions(self) -> set[str]:\n        return set(self._registry)",
    "docstring": "Registry for symbolic functions. The registry maintains a mapping from qualified names to symbolic functions. It is used to register new symbolic functions and to dispatch calls to the appropriate function.",
    "type": "class",
    "file_path": "pytorch\\torch\\onnx\\_internal\\registration.py",
    "ast_data": "ClassDef name:SymbolicRegistry FunctionDef name:__init__ arg:self arguments arg FunctionDef name:register arg:self arg:name arg:opset arg:func arg:custom arguments arg arg arg arg arg If Compare Raise Call Assign Call Call If Call Call FunctionDef name:unregister arg:self arg:name arg:opset arguments arg arg arg If Compare Return return:no Call FunctionDef name:get_function_group arg:self arg:name arguments arg arg Return return:yes Call FunctionDef name:is_registered_op arg:self arg:name arg:version arguments arg arg arg Assign Call If Compare Return return:yes Return return:yes Compare Call FunctionDef name:all_functions arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "sequential_split",
    "source_code": "def sequential_split(gm: torch.fx.GraphModule, node_call_back: Callable[[torch.fx.Node], Union[torch.fx.Node, bool]]) -> torch.fx.GraphModule:\n    from torch.fx.passes.split_module import split_module\n    split_map = {}\n    split_id = 0\n    for node in gm.graph.nodes:\n        if node_call_back(node):\n            split_id += 1\n        split_map[node] = split_id\n    new_gm = split_module(gm, gm, lambda node: split_map[node], keep_original_order=True, keep_original_node_name=True)\n    new_gm.graph._codegen = gm.graph._codegen\n    new_gm.recompile()\n    return new_gm",
    "docstring": "sequential_split creates a new graph module that splits the input graph module into multiple submodules based on the node_call_back. It doesn't mutate the input graph module. The node_call_back should return True if the node is a delimiter. Delimiter will be the first node in the next submodule.",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\utils.py",
    "ast_data": "FunctionDef name:sequential_split arg:gm arg:node_call_back arguments arg arg Assign Assign For If Call Assign Assign Call arguments arg Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "global_seed",
    "source_code": "def global_seed():\n    return context()._seed",
    "docstring": "Returns the eager mode seed.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:global_seed arguments Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "diagsvd",
    "source_code": "@_apply_over_batch(('s', 1))\ndef diagsvd(s, M, N):\n    part = diag(s)\n    typ = part.dtype.char\n    MorN = len(s)\n    if MorN == M:\n        return np.hstack((part, zeros((M, N - M), dtype=typ)))\n    elif MorN == N:\n        return r_[part, zeros((M - N, N), dtype=typ)]\n    else:\n        raise ValueError('Length of s must be M or N.')",
    "docstring": "Construct the sigma matrix in SVD from singular values and size M, N. Parameters ---------- s : (M,) or (N,) array_like Singular values M : int Size of the matrix whose singular values are . N : int Size of the matrix whose singular values are . Returns ------- S : (M, N) ndarray The S-matrix in the singular value decomposition See Also -------- svd : Singular value decomposition of a matrix svdvals : Compute singular values of a matrix. Examples -------- >>> import numpy as np >>> from scipy.linalg import diagsvd >>> vals = np.array([1, 2, 3]) # The array representing the computed svd >>> diagsvd(vals, 3, 4) array([[1, 0, 0, 0], [0, 2, 0, 0], [0, 0, 3, 0]]) >>> diagsvd(vals, 4, 3) array([[1, 0, 0], [0, 2, 0], [0, 0, 3], [0, 0, 0]])",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_decomp_svd.py",
    "ast_data": "FunctionDef name:diagsvd arg:s arg:M arg:N arguments arg arg arg Assign Call Assign Assign Call If Compare Return return:yes Call Call If Compare Return return:yes Call Raise Call Call"
  },
  {
    "library": "pandas",
    "name": "_can_hold_na",
    "source_code": "@property\ndef _can_hold_na(self) -> bool:\n    return True",
    "docstring": "Can arrays of this dtype hold NA values?",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\base.py",
    "ast_data": "FunctionDef name:_can_hold_na arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "AnonymousExtensionTypeSpec",
    "source_code": "@type_spec_registry.register('tf.AnonymousExtensionType.Spec')\nclass AnonymousExtensionTypeSpec(ExtensionTypeSpec):\n\n    def __init__(self, **fields):\n        for name in fields:\n            if extension_type_field.ExtensionTypeField.is_reserved_name(name) or (name.startswith('__') and name.endswith('__')):\n                raise ValueError(f'Reserved field name {name} was encountered when trying to instantiate an AnonymousExtensionTypeSpec.')\n        fields = [(k, _convert_anonymous_fields(v, for_spec=True)) for k, v in fields.items()]\n        self.__dict__.update(fields)\n        super().__init__()\n    value_type = AnonymousExtensionType\n\n    def _serialize(self):\n        return tuple(((name, _change_nested_mappings_to(value, dict)) for name, value in self.__dict__.items() if not extension_type_field.ExtensionTypeField.is_reserved_name(name)))\n\n    def __setattr__(self, name, value):\n        if name in type_spec.CACHED_FIXED_PROPERTIES:\n            super().__setattr__(name, value)\n        else:\n            raise AttributeError(f'Cannot set attribute `{name}`. AnonymousExtensionTypeSpec instances are immutable.')\n\n    def __delattr__(self, name):\n        raise AttributeError(f'Cannot delete attribute `{name}`. AnonymousExtensionTypeSpec instances are immutable.')",
    "docstring": "TypeSpec for AnonymousExtensionType.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type.py",
    "ast_data": "ClassDef name:AnonymousExtensionTypeSpec FunctionDef name:__init__ arg:self arguments arg arg For If BoolOp Call BoolOp Call Call Raise Call Assign Call Call Call Call Call Assign FunctionDef name:_serialize arg:self arguments arg Return return:yes Call Call Call Call FunctionDef name:__setattr__ arg:self arg:name arg:value arguments arg arg arg If Compare Call Call Raise Call FunctionDef name:__delattr__ arg:self arg:name arguments arg arg Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "atleast_1d",
    "source_code": "def atleast_1d(*tensors):\n    if has_torch_function(tensors):\n        return handle_torch_function(atleast_1d, tensors, *tensors)\n    if len(tensors) == 1:\n        tensors = tensors[0]\n    return _VF.atleast_1d(tensors)",
    "docstring": "Returns a 1-dimensional view of each input tensor with zero dimensions. Input tensors with one or more dimensions are returned as-is. Args: input (Tensor or list of Tensors) Returns: output (Tensor or tuple of Tensors) Example:: >>> x = torch.arange(2) >>> x tensor([0, 1]) >>> torch.atleast_1d(x) tensor([0, 1]) >>> x = torch.tensor(1.) >>> x tensor(1.) >>> torch.atleast_1d(x) tensor([1.]) >>> x = torch.tensor(0.5) >>> y = torch.tensor(1.) >>> torch.atleast_1d((x, y)) (tensor([0.5000]), tensor([1.]))",
    "type": "function",
    "file_path": "pytorch\\torch\\functional.py",
    "ast_data": "FunctionDef name:atleast_1d arguments arg If Call Return return:yes Call If Compare Call Assign Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "feature_extra_checks",
    "source_code": "@_Cache.me\ndef feature_extra_checks(self, name):\n    assert isinstance(name, str)\n    d = self.feature_supported[name]\n    extra_checks = d.get('extra_checks', [])\n    if not extra_checks:\n        return []\n    self.dist_log(\"Testing extra checks for feature '%s'\" % name, extra_checks)\n    flags = self.feature_flags(name)\n    available = []\n    not_available = []\n    for chk in extra_checks:\n        test_path = os.path.join(self.conf_check_path, 'extra_%s.c' % chk.lower())\n        if not os.path.exists(test_path):\n            self.dist_fatal('extra check file does not exist', test_path)\n        is_supported = self.dist_test(test_path, flags + self.cc_flags['werror'])\n        if is_supported:\n            available.append(chk)\n        else:\n            not_available.append(chk)\n    if not_available:\n        self.dist_log('testing failed for checks', not_available, stderr=True)\n    return available",
    "docstring": "Return a list of supported extra checks after testing them against the compiler. Parameters ---------- names : str CPU feature name in uppercase.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py",
    "ast_data": "FunctionDef name:feature_extra_checks arg:self arg:name arguments arg arg Call Assign Assign Call If Return return:no Call Assign Call Assign Assign For Assign Call Call If Call Call Assign Call If Call Call If Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "is_enabled",
    "source_code": "@staticmethod\ndef is_enabled():\n    return torch._C._check_sparse_tensor_invariants()",
    "docstring": "Return True if the sparse tensor invariants checking is enabled. .. note:: Use :func: or :func: to manage the state of the sparse tensor invariants checks.",
    "type": "method",
    "file_path": "pytorch\\torch\\sparse\\__init__.py",
    "ast_data": "FunctionDef name:is_enabled arguments Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, strategy):\n    if not getattr(self, '_has_initialized', False):\n        if not hasattr(strategy, '_is_parameter_server_strategy_v2'):\n            raise ValueError('Only `tf.distribute.experimental.ParameterServerStrategy` is supported to work with `tf.distribute.experimental.coordinator.ClusterCoordinator` currently.')\n        self._strategy = strategy\n        self.strategy.extended._used_with_coordinator = True\n        self._cluster = Cluster(strategy)\n        self._has_initialized = True",
    "docstring": "Initialization of a instance. Args: strategy: a supported object. Currently, only is supported. Raises: ValueError: if the strategy being used is not supported.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:strategy arguments arg arg If Call If Call Raise Call Assign Assign Assign Call Assign"
  },
  {
    "library": "scipy",
    "name": "expect",
    "source_code": "def expect(self, func=None, args=(), loc=0, lb=None, ub=None, conditional=False, maxcount=1000, tolerance=1e-10, chunksize=32):\n    args, _, _ = self._parse_args(*args)\n    if func is None:\n\n        def fun(x):\n            return (x + loc) * self._pmf(x, *args)\n    else:\n\n        def fun(x):\n            return func(x + loc) * self._pmf(x, *args)\n    _a, _b = self._get_support(*args)\n    if lb is None:\n        lb = _a\n    else:\n        lb = lb - loc\n    if ub is None:\n        ub = _b\n    else:\n        ub = ub - loc\n    if conditional:\n        invfac = self.sf(lb - 1, *args) - self.sf(ub, *args)\n    else:\n        invfac = 1.0\n    if isinstance(self, rv_sample):\n        res = self._expect(fun, lb, ub)\n        return res / invfac\n    x0 = self._ppf(0.5, *args)\n    res = _expect(fun, lb, ub, x0, self.inc, maxcount, tolerance, chunksize)\n    return res / invfac",
    "docstring": "Calculate expected value of a function with respect to the distribution for discrete distribution by numerical summation. Parameters ---------- func : callable, optional Function for which the expectation value is calculated. Takes only one argument. The default is the identity mapping f(k) = k. args : tuple, optional Shape parameters of the distribution. loc : float, optional Location parameter. Default is 0. lb, ub : int, optional Lower and upper bound for the summation, default is set to the support of the distribution, inclusive (`funcfuncmaxcountchunksize` may improve the result, but may also make zipf very slow. The function is not vectorized.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:expect arg:self arg:func arg:args arg:loc arg:lb arg:ub arg:conditional arg:maxcount arg:tolerance arg:chunksize arguments arg arg arg arg arg arg arg arg arg arg Assign Call If Compare FunctionDef name:fun arg:x arguments arg Return return:yes Call FunctionDef name:fun arg:x arguments arg Return return:yes Call Call Assign Call If Compare Assign Assign If Compare Assign Assign If Assign Call Call Assign If Call Assign Call Return return:yes Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "ones_rank_cache",
    "source_code": "def ones_rank_cache(self):\n    return _tensor_caches_map[self._id].ones_rank_cache",
    "docstring": "Per-device cache for scalars.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:ones_rank_cache arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "equalize",
    "source_code": "def equalize(min_mag: float, max_mag: float) -> OperationBase:\n    return Equalize(1.0)",
    "docstring": "Return equalize op.",
    "type": "function",
    "file_path": "kornia\\kornia\\augmentation\\auto\\rand_augment\\ops.py",
    "ast_data": "FunctionDef name:equalize arg:min_mag arg:max_mag arguments arg arg Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "delete",
    "source_code": "def delete(self, url, **kwargs):\n    return self.request('DELETE', url, **kwargs)",
    "docstring": "Invoke DELETE http request. If `` configured, shortcut is available:: client.delete(\"posts/123\")",
    "type": "method",
    "file_path": "authlib\\authlib\\integrations\\base_client\\sync_app.py",
    "ast_data": "FunctionDef name:delete arg:self arg:url arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "Reduction",
    "source_code": "class Reduction(Enum):\n    SUM = 'sum'\n    SUM_OVER_BATCH_SIZE = 'sum_over_batch_size'\n    WEIGHTED_MEAN = 'weighted_mean'",
    "docstring": "Types of metrics reduction. Contains the following values: * : Scalar sum of weighted values. * : Scalar sum of weighted values divided by number of elements. * : Scalar sum of weighted values divided by sum of weights.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\metrics_utils.py",
    "ast_data": "ClassDef name:Reduction Assign Assign Assign"
  },
  {
    "library": "pandas",
    "name": "_check_data_length",
    "source_code": "@final\ndef _check_data_length(self, columns: Sequence[Hashable], data: Sequence[ArrayLike]) -> None:\n    if not self.index_col and len(columns) != len(data) and columns:\n        empty_str = is_object_dtype(data[-1]) and data[-1] == ''\n        empty_str_or_na = empty_str | isna(data[-1])\n        if len(columns) == len(data) - 1 and np.all(empty_str_or_na):\n            return\n        warnings.warn('Length of header or names does not match length of data. This leads to a loss of data with index_col=False.', ParserWarning, stacklevel=find_stack_level())",
    "docstring": "Checks if length of data is equal to length of column names. One set of trailing commas is allowed. self.index_col not False results in a ParserError previously when lengths do not match. Parameters ---------- columns: list of column names data: list of array-likes containing the data column-wise.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\parsers\\base_parser.py",
    "ast_data": "FunctionDef name:_check_data_length arg:self arg:columns arg:data arguments arg arg arg If BoolOp Compare Call Call Assign BoolOp Call Compare Assign Call If BoolOp Compare Call Call Call Return return:no Call Call"
  },
  {
    "library": "django",
    "name": "AdminConfig",
    "source_code": "class AdminConfig(SimpleAdminConfig):\n    default = True\n\n    def ready(self):\n        super().ready()\n        self.module.autodiscover()",
    "docstring": "The default AppConfig for admin which does autodiscovery.",
    "type": "class",
    "file_path": "django\\django\\contrib\\admin\\apps.py",
    "ast_data": "ClassDef name:AdminConfig Assign FunctionDef name:ready arg:self arguments arg Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_write_strls",
    "source_code": "def _write_strls(self) -> None:\n    pass",
    "docstring": "No-op, future compatibility",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\stata.py",
    "ast_data": "FunctionDef name:_write_strls arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "set_item",
    "source_code": "def set_item(target, i, x):\n    if isinstance(target, tensor_array_ops.TensorArray):\n        return _tf_tensorarray_set_item(target, i, x)\n    elif tensor_util.is_tf_type(target):\n        if target.dtype == dtypes.variant:\n            return _tf_tensor_list_set_item(target, i, x)\n        else:\n            return _tf_tensor_set_item(target, i, x)\n    else:\n        return _py_set_item(target, i, x)",
    "docstring": "The slice write operator (i.e. __setitem__). Note: it is unspecified whether target will be mutated or not. In general, if target is mutable (like Python lists), it will be mutated. Args: target: An entity that supports setitem semantics. i: Index to modify. x: The new element value. Returns: Same as target, after the update was performed. Raises: ValueError: if target is not of a supported type.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\slices.py",
    "ast_data": "FunctionDef name:set_item arg:target arg:i arg:x arguments arg arg arg If Call Return return:yes Call If Call If Compare Return return:yes Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "to_dtype",
    "source_code": "def to_dtype(self, x: T, dtype: torch.dtype, src_dtype: Optional[torch.dtype]=None, use_compute_types: bool=True) -> T:\n    raise NotImplementedError",
    "docstring": "Convert x to dtype. src_dtype can be optionally set to specify what the original dtype of x was, which can improve code generation (used by torch to(dtype=dtype)).",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ops_handler.py",
    "ast_data": "FunctionDef name:to_dtype arg:self arg:x arg:dtype arg:src_dtype arg:use_compute_types arguments arg arg arg arg arg Raise"
  },
  {
    "library": "matplotlib",
    "name": "_serialize_type42",
    "source_code": "def _serialize_type42(font, subset, fontdata):\n    version, breakpoints = _version_and_breakpoints(font.get('loca'), fontdata)\n    post = font['post']\n    name = font['name']\n    chars = _generate_charstrings(subset)\n    sfnts = _generate_sfnts(fontdata, subset, breakpoints)\n    return textwrap.dedent(f'\\n        %%!PS-TrueTypeFont-{version[0]}.{version[1]}-{font['head'].fontRevision:.7f}\\n        10 dict begin\\n        /FontType 42 def\\n        /FontMatrix [1 0 0 1 0 0] def\\n        /FontName /{name.getDebugName(6)} def\\n        /FontInfo 7 dict dup begin\\n        /FullName ({name.getDebugName(4)}) def\\n        /FamilyName ({name.getDebugName(1)}) def\\n        /Version ({name.getDebugName(5)}) def\\n        /ItalicAngle {post.italicAngle} def\\n        /isFixedPitch {('true' if post.isFixedPitch else 'false')} def\\n        /UnderlinePosition {post.underlinePosition} def\\n        /UnderlineThickness {post.underlineThickness} def\\n        end readonly def\\n        /Encoding StandardEncoding def\\n        /FontBBox [{_nums_to_str(*_bounds(font))}] def\\n        /PaintType 0 def\\n        /CIDMap 0 def\\n        {chars}\\n        {sfnts}\\n        FontName currentdict end definefont pop\\n        ')",
    "docstring": "Output a PostScript Type-42 format representation of font Parameters ---------- font : fontTools.ttLib.ttFont.TTFont The original font object subset : fontTools.ttLib.ttFont.TTFont The subset font object fontdata : bytes The raw font data in TTF format Returns ------- str The Type-42 formatted font",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_ps.py",
    "ast_data": "FunctionDef name:_serialize_type42 arg:font arg:subset arg:fontdata arguments arg arg arg Assign Call Call Assign Assign Assign Call Assign Call Return return:yes Call Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "read",
    "source_code": "def read(self, save_path, options=None):\n    return self.restore(save_path, options)",
    "docstring": "Restore the checkpointed variables. This method has exactly the same logic as restore(). This method is implemented only to fulfill the duty of subclassing tf.train.Checkpoint. Args: save_path: The full name of the checkpoint file to be restored. options: CheckpointOption instance. Returns: A load status object, which can be used to make assertions about the status of a checkpoint restoration. See tf.train.Checkpoint.restore() for more details.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\async_checkpoint_helper.py",
    "ast_data": "FunctionDef name:read arg:self arg:save_path arg:options arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "define",
    "source_code": "def define(self, schema, alias_analysis='', *, tags=()):\n    if torch._running_with_deploy():\n        _library.utils.warn_deploy()\n        return\n    if alias_analysis not in ['', 'FROM_SCHEMA', 'CONSERVATIVE']:\n        raise RuntimeError(f'Invalid alias_analysis type {alias_analysis}')\n    assert self.m is not None\n    if isinstance(tags, torch.Tag):\n        tags = (tags,)\n    name = schema.split('(')[0]\n    packet_name = name.split('.')[0] if '.' in name else name\n    has_preexisting_packet = hasattr(torch.ops, self.ns) and hasattr(getattr(torch.ops, self.ns), packet_name)\n    result = self.m.define(schema, alias_analysis, tuple(tags))\n    name = schema.split('(')[0]\n    qualname = self.ns + '::' + name\n    if has_preexisting_packet:\n        ns = getattr(torch.ops, self.ns)\n        packet = getattr(ns, packet_name)\n        torch._ops._refresh_packet(packet)\n    self._op_defs.add(qualname)\n    _defs.add(qualname)\n    return result",
    "docstring": "Defines a new operator and its semantics in the ns namespace. Args: schema: function schema to define a new operator. alias_analysis (optional): Indicates if the aliasing properties of the operator arguments can be inferred from the schema (default behavior) or not (\"CONSERVATIVE\"). tags (Tag | Sequence[Tag]): one or more torch.Tag to apply to this operator. Tagging an operator changes the operator's behavior under various PyTorch subsystems; please read the docs for the torch.Tag carefully before applying it. Returns: name of the operator as inferred from the schema. Example:: >>> my_lib = Library(\"mylib\", \"DEF\") >>> my_lib.define(\"sum(Tensor self) -> Tensor\")",
    "type": "method",
    "file_path": "pytorch\\torch\\library.py",
    "ast_data": "FunctionDef name:define arg:self arg:schema arg:alias_analysis arguments arg arg arg arg If Call Call Return return:no If Compare Raise Call Compare If Call Assign Assign Call Assign Compare Call Assign BoolOp Call Call Call Assign Call Call Assign Call Assign If Assign Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_free_port",
    "source_code": "def get_free_port():\n    sock = get_socket_with_port()\n    with closing(sock):\n        return sock.getsockname()[1]",
    "docstring": "Returns an unused port on localhost. This function finds an unused port on localhost by opening to socket to bind to a port and then closing it. Returns: int: an unused port on localhost Example: >>> # xdoctest: +SKIP(\"Nondeterministic\") >>> get_free_port() 63976 .. note:: The port returned by :func: is not reserved and may be taken by another process after this function returns.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\elastic\\utils\\distributed.py",
    "ast_data": "FunctionDef name:get_free_port arguments Assign Call With Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "int_to_base36",
    "source_code": "def int_to_base36(i):\n    char_set = '0123456789abcdefghijklmnopqrstuvwxyz'\n    if i < 0:\n        raise ValueError('Negative base36 conversion input.')\n    if i < 36:\n        return char_set[i]\n    b36 = ''\n    while i != 0:\n        i, n = divmod(i, 36)\n        b36 = char_set[n] + b36\n    return b36",
    "docstring": "Convert an integer to a base36 string.",
    "type": "function",
    "file_path": "django\\django\\utils\\http.py",
    "ast_data": "FunctionDef name:int_to_base36 arg:i arguments arg Assign If Compare Raise Call If Compare Return return:yes Assign While Compare Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_to_numpy_type",
    "source_code": "def _to_numpy_type(dtype):\n    if isinstance(dtype, dtypes.DType):\n        return dtype.as_numpy_dtype\n    return np.dtype(dtype)",
    "docstring": "Converts a native python or TF DType to numpy type. Args: dtype: Could be a python type, a numpy type or a TF DType. Returns: A NumPy .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_utils.py",
    "ast_data": "FunctionDef name:_to_numpy_type arg:dtype arguments arg If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "conv2d_inference_rule",
    "source_code": "@register_inference_rule(Conv2d)\ndef conv2d_inference_rule(n: Node, module_instance):\n    assert isinstance(n.args[0], Node)\n    n.args[0].type = expand_to_tensor_dim(n.args[0].type, 4)\n    arg_type = n.args[0].type\n    curr_node_type = expand_to_tensor_dim(n.type, 4)\n    if is_consistent(arg_type.__args__[1], module_instance.in_channels):\n        w_in = arg_type.__args__[3]\n        h_in = arg_type.__args__[2]\n        h_out = calculate_out_dimension(h_in, module_instance, 0)\n        w_out = calculate_out_dimension(w_in, module_instance, 1)\n        new_type = TensorType((arg_type.__args__[0], module_instance.out_channels, h_out, w_out))\n        gub = get_greatest_upper_bound(new_type, curr_node_type)\n        n.type = gub\n        return n.type\n    else:\n        raise TypeError(f'Cannot apply {module_instance} with input type {arg_type} and existing type {n.type} on {n}')",
    "docstring": "Given a Conv2D instance and a node check the following conditions: - the input type can be expanded to a size 4 tensor: t = (x_1, x_2, H, W) - the current node type can be expanded to a size 4 tensor: t' = (x_1', x_2', x_3', x_4') - x_2 is consistent with the module's in_channels - let o = (x_1, out_channels, H_out, W_out) then the output is the greatest upper bound of o and the existing node type t'.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\graph_gradual_typechecker.py",
    "ast_data": "FunctionDef name:conv2d_inference_rule arg:n arg:module_instance arguments arg arg Call Assign Call Assign Assign Call If Call Assign Assign Assign Call Assign Call Assign Call Assign Call Assign Return return:yes Raise Call Call"
  },
  {
    "library": "numpy",
    "name": "PushbackIterator",
    "source_code": "class PushbackIterator:\n\n    def __init__(self, iterable):\n        object.__init__(self)\n        self.iterable = iter(iterable)\n        self.buffer = []\n\n    def __iter__(self):\n        return self\n\n    def __next__(self):\n        if self.buffer:\n            return self.buffer.pop()\n        else:\n            return next(self.iterable)\n\n    def pushback(self, item):\n        self.buffer.append(item)\n    next = __next__",
    "docstring": "PushbackIterator(iterable) Return an iterator for which items can be pushed back into. Call the .pushback(item) method to have item returned as the next value of next().",
    "type": "class",
    "file_path": "numpy\\numpy\\linalg\\lapack_lite\\fortran.py",
    "ast_data": "ClassDef name:PushbackIterator FunctionDef name:__init__ arg:self arg:iterable arguments arg arg Call Assign Call Assign FunctionDef name:__iter__ arg:self arguments arg Return return:yes FunctionDef name:__next__ arg:self arguments arg If Return return:yes Call Return return:yes Call FunctionDef name:pushback arg:self arg:item arguments arg arg Call Assign"
  },
  {
    "library": "scikit-learn",
    "name": "_AtOp",
    "source_code": "class _AtOp(Enum):\n    SET = 'set'\n    ADD = 'add'\n    SUBTRACT = 'subtract'\n    MULTIPLY = 'multiply'\n    DIVIDE = 'divide'\n    POWER = 'power'\n    MIN = 'min'\n    MAX = 'max'\n\n    def __str__(self) -> str:\n        return self.value",
    "docstring": "Operations for use in .",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_extra\\_lib\\_at.py",
    "ast_data": "ClassDef name:_AtOp Assign Assign Assign Assign Assign Assign Assign Assign FunctionDef name:__str__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "adapt_response",
    "source_code": "def adapt_response(self, response: Response) -> Response:\n    return response",
    "docstring": "This method has the same purpose as the one in XMLFeedSpider",
    "type": "method",
    "file_path": "scrapy\\scrapy\\spiders\\feed.py",
    "ast_data": "FunctionDef name:adapt_response arg:self arg:response arguments arg arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "head",
    "source_code": "@final\n@Substitution(name='groupby')\n@Substitution(see_also=_common_see_also)\ndef head(self, n: int=5) -> NDFrameT:\n    mask = self._make_mask_from_positional_indexer(slice(None, n))\n    return self._mask_selected_obj(mask)",
    "docstring": "Return first n rows of each group. Similar to `` flag is ignored). Parameters ---------- n : int If positive: number of entries to include from start of each group. If negative: number of entries to exclude from end of each group. Returns ------- Series or DataFrame Subset of original Series or DataFrame as determined by n. %(see_also)s Examples -------- >>> df = pd.DataFrame([[1, 2], [1, 4], [5, 6]], columns=[\"A\", \"B\"]) >>> df.groupby(\"A\").head(1) A B 0 1 2 2 5 6 >>> df.groupby(\"A\").head(-1) A B 0 1 2",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\groupby\\groupby.py",
    "ast_data": "FunctionDef name:head arg:self arg:n arguments arg arg Assign Call Call Return return:yes Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "process_doc",
    "source_code": "def process_doc(self, app: Sphinx, doctree: nodes.document) -> None:\n    for node in doctree.findall(addnodes.download_reference):\n        targetname = node['reftarget']\n        if '://' in targetname:\n            node['refuri'] = targetname\n        else:\n            rel_filename, filename = app.env.relfn2path(targetname, app.env.docname)\n            app.env.note_dependency(rel_filename)\n            if not os.access(filename, os.R_OK):\n                logger.warning(__('download file not readable: %s'), filename, location=node, type='download', subtype='not_readable')\n                continue\n            node['filename'] = app.env.dlfiles.add_file(app.env.docname, rel_filename).as_posix()",
    "docstring": "Process downloadable file paths.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\environment\\collectors\\asset.py",
    "ast_data": "FunctionDef name:process_doc arg:self arg:app arg:doctree arguments arg arg arg For Call Assign If Compare Assign Assign Call Call If Call Call Call Assign Call Call"
  },
  {
    "library": "django",
    "name": "allows_auto_pk_0",
    "source_code": "@cached_property\ndef allows_auto_pk_0(self):\n    return 'NO_AUTO_VALUE_ON_ZERO' in self.connection.sql_mode",
    "docstring": "Autoincrement primary key can be set to 0 if it doesn't generate new autoincrement values.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\mysql\\features.py",
    "ast_data": "FunctionDef name:allows_auto_pk_0 arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "pytorch",
    "name": "get_grid_search_values",
    "source_code": "def get_grid_search_values(self):\n    return {'max_depth': [5, 6, 7], 'min_samples_leaf': [1, 5, 10, 0.01, 0.05, 0.02], 'criterion': ['gini', 'entropy']}",
    "docstring": "Standard values for grid search. Can be overriden.",
    "type": "method",
    "file_path": "pytorch\\torchgen\\_autoheuristic\\train_decision.py",
    "ast_data": "FunctionDef name:get_grid_search_values arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "fuse_conv_bn",
    "source_code": "def fuse_conv_bn(is_qat, conv, bn):\n    assert conv.training == bn.training, 'Conv and BN both must be in the same mode (train or eval).'\n    fused_module_class_map = {nn.Conv1d: nni.ConvBn1d, nn.Conv2d: nni.ConvBn2d, nn.Conv3d: nni.ConvBn3d}\n    if is_qat:\n        assert bn.num_features == conv.out_channels, 'Output channel of Conv2d must match num_features of BatchNorm2d'\n        assert bn.affine, 'Only support fusing BatchNorm2d with affine set to True'\n        assert bn.track_running_stats, 'Only support fusing BatchNorm2d with tracking_running_stats set to True'\n        fused_module_class = fused_module_class_map.get(type(conv), None)\n        if fused_module_class is not None:\n            return fused_module_class(conv, bn)\n        else:\n            raise NotImplementedError(f'Cannot fuse train modules: {(conv, bn)}')\n    else:\n        return nn.utils.fuse_conv_bn_eval(conv, bn)",
    "docstring": "Return the fused the conv and bn modules. Given the conv and bn modules, fuses them and returns the fused module Args: is_qat: a flag for whether we are using quantization aware training fusion or post training quantization fusion conv: Module instance of type conv2d/conv3d bn: Spatial BN instance that needs to be fused with the conv Examples:: >>> m1 = nn.Conv2d(10, 20, 3) >>> b1 = nn.BatchNorm2d(20) >>> # xdoctest: +SKIP >>> m2 = fuse_conv_bn(m1, b1)",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fuser_method_mappings.py",
    "ast_data": "FunctionDef name:fuse_conv_bn arg:is_qat arg:conv arg:bn arguments arg arg arg Compare Assign If Compare Assign Call Call If Compare Return return:yes Call Raise Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_threshold_for_binary_predict",
    "source_code": "def _threshold_for_binary_predict(estimator):\n    if hasattr(estimator, 'decision_function') and is_classifier(estimator):\n        return 0.0\n    else:\n        return 0.5",
    "docstring": "Threshold for predictions from binary estimator.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\multiclass.py",
    "ast_data": "FunctionDef name:_threshold_for_binary_predict arg:estimator arguments arg If BoolOp Call Call Return return:yes Return return:yes"
  },
  {
    "library": "kornia",
    "name": "apply_cached_rotary_emb",
    "source_code": "def apply_cached_rotary_emb(freqs: Tensor, t: Tensor) -> Tensor:\n    return t * freqs[0] + rotate_half(t) * freqs[1]",
    "docstring": "Apply rotary embedding.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\lightglue.py",
    "ast_data": "FunctionDef name:apply_cached_rotary_emb arg:freqs arg:t arguments arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "get_arrays_tol",
    "source_code": "def get_arrays_tol(*arrays):\n    if len(arrays) == 0:\n        raise ValueError('At least one array must be provided.')\n    size = max((array.size for array in arrays))\n    weight = max((np.max(np.abs(array[np.isfinite(array)]), initial=1.0) for array in arrays))\n    return 10.0 * EPS * max(size, 1.0) * weight",
    "docstring": "Get a relative tolerance for a set of arrays. Borrowed from COBYQA Parameters ---------- *arrays: tuple Set of to get the tolerance for. Returns ------- float Relative tolerance for the set of arrays. Raises ------ ValueError If no array is provided.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\pyprima\\pyprima\\src\\pyprima\\common\\linalg.py",
    "ast_data": "FunctionDef name:get_arrays_tol arguments arg If Compare Call Raise Call Assign Call Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "arrow",
    "source_code": "@_docstring.interpd\ndef arrow(self, x, y, dx, dy, **kwargs):\n    x = self.convert_xunits(x)\n    y = self.convert_yunits(y)\n    dx = self.convert_xunits(dx)\n    dy = self.convert_yunits(dy)\n    a = mpatches.FancyArrow(x, y, dx, dy, **kwargs)\n    self.add_patch(a)\n    self._request_autoscale_view()\n    return a",
    "docstring": "[*Discouraged*] Add an arrow to the Axes. This draws an arrow from `~.Axes.annotate.FancyArrow.FancyArrow` object.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_axes.py",
    "ast_data": "FunctionDef name:arrow arg:self arg:x arg:y arg:dx arg:dy arguments arg arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "triton_config_to_hashable",
    "source_code": "def triton_config_to_hashable(cfg: Config) -> Hashable:\n    items = sorted(cfg.kwargs.items())\n    items.append(('num_warps', cfg.num_warps))\n    items.append(('num_stages', cfg.num_stages))\n    return tuple(items)",
    "docstring": "Convert triton config to a tuple that can uniquely identify it. We can use the return value as a dictionary key.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\runtime_utils.py",
    "ast_data": "FunctionDef name:triton_config_to_hashable arg:cfg arguments arg Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_tolerance",
    "source_code": "def _tolerance(X, tol):\n    if tol == 0:\n        return 0\n    if sp.issparse(X):\n        variances = mean_variance_axis(X, axis=0)[1]\n    else:\n        variances = np.var(X, axis=0)\n    return np.mean(variances) * tol",
    "docstring": "Return a tolerance which is dependent on the dataset.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\cluster\\_kmeans.py",
    "ast_data": "FunctionDef name:_tolerance arg:X arg:tol arguments arg arg If Compare Return return:yes If Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_insert_update_mgr_locs",
    "source_code": "def _insert_update_mgr_locs(self, loc) -> None:\n    blknos = np.bincount(self.blknos[loc:]).nonzero()[0]\n    for blkno in blknos:\n        blk = self.blocks[blkno]\n        blk._mgr_locs = blk._mgr_locs.increment_above(loc)",
    "docstring": "When inserting a new Block at location 'loc', we increment all of the mgr_locs of blocks above that by one.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:_insert_update_mgr_locs arg:self arg:loc arguments arg arg Assign Call Call For Assign Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "reset_from_seed",
    "source_code": "def reset_from_seed(self, seed):\n    state = create_rng_state(seed, self.algorithm)\n    self._state_var.assign(state)",
    "docstring": "Resets the generator by a new seed. See for the meaning of \"seed\". Args: seed: the new seed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\stateful_random_ops.py",
    "ast_data": "FunctionDef name:reset_from_seed arg:self arg:seed arguments arg arg Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "_cuda_lib_options",
    "source_code": "def _cuda_lib_options() -> list[str]:\n    _set_gpu_runtime_env()\n    from torch.utils import cpp_extension\n    lpaths = cpp_extension.library_paths(device_type='cuda')\n    if use_re_build():\n        lpaths += [build_paths.sdk_lib, os.path.join(build_paths.sdk_lib, 'stubs')]\n    extra_ldflags: list[str] = []\n    if is_linux():\n        _transform_cuda_paths(lpaths)\n        for path in lpaths:\n            if 'torch/lib' in path:\n                continue\n            extra_ldflags.extend([f'-L{path}', '-Xlinker', f'-rpath={path}'])\n        extra_ldflags.append('-lcuda')\n        extra_ldflags.append('-lcudart')\n    else:\n        raise NotImplementedError('Unsupported env, failed to find cuda libs! Currently only Linux is supported.')\n    return extra_ldflags",
    "docstring": "Util function for CUTLASS backend to find the correct CUDA libraries.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\codecache.py",
    "ast_data": "FunctionDef name:_cuda_lib_options arguments Call Assign Call If Call Call If Call Call For If Compare Call Call Call Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "run_restore_ops",
    "source_code": "def run_restore_ops(self, session=None):\n    if context.executing_eagerly():\n        return\n    if session is None:\n        session = get_session()\n    session.run(self._checkpoint.restore_ops, feed_dict=self._feed_dict)",
    "docstring": "Run operations to restore objects in the dependency graph.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:run_restore_ops arg:self arg:session arguments arg arg If Call Return return:no If Compare Assign Call Call"
  },
  {
    "library": "cherrypy",
    "name": "clear",
    "source_code": "def clear(self):\n    self.store = {}\n    self.expirations = {}\n    self.tot_puts = 0\n    self.tot_gets = 0\n    self.tot_hist = 0\n    self.tot_expires = 0\n    self.tot_non_modified = 0\n    self.cursize = 0",
    "docstring": "Reset the cache to its initial, empty state.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\caching.py",
    "ast_data": "FunctionDef name:clear arg:self arguments arg Assign Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "mean_reduce_helper",
    "source_code": "def mean_reduce_helper(v, axes=axis):\n    numer = math_ops.reduce_sum(v, axis=axes)\n\n    def dimension(axis):\n        if v.shape.rank is not None:\n            if axis < 0:\n                if axis + v.shape.rank < 0:\n                    raise ValueError('`axis` = %r out of range for `value` with rank %d' % (axis, v.shape.rank))\n                axis += v.shape.rank\n            elif axis >= v.shape.rank:\n                raise ValueError('`axis` = %r out of range for `value` with rank %d' % (axis, v.shape.rank))\n            dim = tensor_shape.dimension_value(v.shape[axis])\n            if dim is not None:\n                return array_ops.identity(constant_op.constant(dim, dtype=dtypes.int64))\n        elif axis < 0:\n            axis = axis + array_ops.rank(v)\n        return array_ops.identity(array_ops.shape_v2(v, out_type=dtypes.int64)[axis])\n    if isinstance(axis, six.integer_types):\n        denom = dimension(axis)\n    elif isinstance(axis, (tuple, list)):\n        denom = math_ops.reduce_prod([dimension(a) for a in axes])\n    else:\n        raise TypeError('Expected `axis` to be an integer, tuple or list not: %r' % axis)\n    return (numer, denom)",
    "docstring": "Computes the numerator and denominator on each replica.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:mean_reduce_helper arg:v arg:axes arguments arg arg Assign Call FunctionDef name:dimension arg:axis arguments arg If Compare If Compare If Compare Raise Call If Compare Raise Call Assign Call If Compare Return return:yes Call Call If Compare Assign Call Return return:yes Call Call If Call Assign Call If Call Assign Call Call Raise Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_weights_are_valid",
    "source_code": "def _weights_are_valid(weights, a, axis):\n    wgt = np.asanyarray(weights)\n    if a.shape != wgt.shape:\n        if axis is None:\n            raise TypeError('Axis must be specified when shapes of a and weights differ.')\n        if wgt.shape != tuple((a.shape[ax] for ax in axis)):\n            raise ValueError('Shape of weights must be consistent with shape of a along specified axis.')\n        wgt = wgt.transpose(np.argsort(axis))\n        wgt = wgt.reshape(tuple((s if ax in axis else 1 for ax, s in enumerate(a.shape))))\n    return wgt",
    "docstring": "Validate weights array. We assume, weights is not None.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_function_base_impl.py",
    "ast_data": "FunctionDef name:_weights_are_valid arg:weights arg:a arg:axis arguments arg arg arg Assign Call If Compare If Compare Raise Call If Compare Call Raise Call Assign Call Call Assign Call Call Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "random_shuffle",
    "source_code": "@tf_export('random.shuffle', v1=['random.shuffle', 'random_shuffle'])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints('random_shuffle')\ndef random_shuffle(value, seed=None, name=None):\n    seed1, seed2 = random_seed.get_seed(seed)\n    return gen_random_ops.random_shuffle(value, seed=seed1, seed2=seed2, name=name)",
    "docstring": "Randomly shuffles a tensor along its first dimension. The tensor is shuffled along dimension 0, such that each is mapped to one and only one . For example, a mapping that might occur for a 3x2 tensor is: Args: value: A Tensor to be shuffled. seed: A Python integer. Used to create a random seed for the distribution. See for behavior. name: A name for the operation (optional). Returns: A tensor of same shape and type as , shuffled along its first dimension.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\random_ops.py",
    "ast_data": "FunctionDef name:random_shuffle arg:value arg:seed arg:name arguments arg arg arg Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "pandas",
    "name": "dtype",
    "source_code": "@property\ndef dtype(self) -> CategoricalDtype:\n    return self._dtype",
    "docstring": "The :class: for this instance. See Also -------- astype : Cast argument to a specified dtype. CategoricalDtype : Type for categorical data. Examples -------- >>> cat = pd.Categorical([\"a\", \"b\"], ordered=True) >>> cat ['a', 'b'] Categories (2, object): ['a' >> cat.dtype CategoricalDtype(categories=['a', 'b'], ordered=True, categories_dtype=object)",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\categorical.py",
    "ast_data": "FunctionDef name:dtype arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "back",
    "source_code": "def back(self):\n    self.views[self.figure].back()\n    self.positions[self.figure].back()",
    "docstring": "Back one step in the stack of views and positions.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py",
    "ast_data": "FunctionDef name:back arg:self arguments arg Call Call"
  },
  {
    "library": "pandas",
    "name": "_parse_thead_tbody_tfoot",
    "source_code": "def _parse_thead_tbody_tfoot(self, table_html):\n    header_rows = self._parse_thead_tr(table_html)\n    body_rows = self._parse_tbody_tr(table_html)\n    footer_rows = self._parse_tfoot_tr(table_html)\n\n    def row_is_all_th(row):\n        return all((self._equals_tag(t, 'th') for t in self._parse_td(row)))\n    if not header_rows:\n        while body_rows and row_is_all_th(body_rows[0]):\n            header_rows.append(body_rows.pop(0))\n    header, rem = self._expand_colspan_rowspan(header_rows, section='header')\n    body, rem = self._expand_colspan_rowspan(body_rows, section='body', remainder=rem, overflow=len(footer_rows) > 0)\n    footer, _ = self._expand_colspan_rowspan(footer_rows, section='footer', remainder=rem, overflow=False)\n    return (header, body, footer)",
    "docstring": "Given a table, return parsed header, body, and foot. Parameters ---------- table_html : node-like Returns ------- tuple of (header, body, footer), each a list of list-of-text rows. Notes ----- Header and body are lists-of-lists. Top level list is a list of rows. Each row is a list of str text. Logic: Use , , elements to identify header, body, and footer, otherwise: - Put all rows into body - Move rows from top of body to header only if all elements inside row are - Move rows from bottom of body to footer only if all elements inside row are",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\html.py",
    "ast_data": "FunctionDef name:_parse_thead_tbody_tfoot arg:self arg:table_html arguments arg arg Assign Call Assign Call Assign Call FunctionDef name:row_is_all_th arg:row arguments arg Return return:yes Call Call Call If While BoolOp Call Call Call Assign Call Assign Call Compare Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "adjust_brightness",
    "source_code": "@tf_export('image.adjust_brightness')\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef adjust_brightness(image, delta):\n    with ops.name_scope(None, 'adjust_brightness', [image, delta]) as name:\n        image = ops.convert_to_tensor(image, name='image')\n        orig_dtype = image.dtype\n        if orig_dtype in [dtypes.float16, dtypes.float32]:\n            flt_image = image\n        else:\n            flt_image = convert_image_dtype(image, dtypes.float32)\n        adjusted = math_ops.add(flt_image, math_ops.cast(delta, flt_image.dtype), name=name)\n        return convert_image_dtype(adjusted, orig_dtype, saturate=True)",
    "docstring": "Adjust the brightness of RGB or Grayscale images. This is a convenience method that converts RGB images to float representation, adjusts their brightness, and then converts them back to the original data type. If several adjustments are chained, it is advisable to minimize the number of redundant conversions. The value is added to all components of the tensor . is converted to and scaled appropriately if it is in fixed-point representation, and is converted to the same data type. For regular images, should be in the range , as it is added to the image in floating point representation, where pixel values are in the range. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.adjust_brightness(x, delta=0.1) Args: image: RGB image or images to adjust. delta: A scalar. Amount to add to the pixel values. Returns: A brightness-adjusted tensor of the same shape and type as .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:adjust_brightness arg:image arg:delta arguments arg arg With Call Assign Call Assign If Compare Assign Assign Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_ebrahimi_entropy",
    "source_code": "def _ebrahimi_entropy(X, m, *, xp):\n    n = X.shape[-1]\n    X = _pad_along_last_axis(X, m, xp=xp)\n    differences = X[..., 2 * m:] - X[..., :-2 * m]\n    i = xp.arange(1, n + 1, dtype=X.dtype)\n    ci = xp.where(i <= m, 1 + (i - 1) / m, 2.0)\n    cond = i >= n - m + 1\n    ci = xpx.at(ci, cond).set(1 + (n - i[cond]) / m)\n    logs = xp.log(n * differences / (ci * m))\n    return xp.mean(logs, axis=-1)",
    "docstring": "Compute the Ebrahimi estimator as described in [6].",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_entropy.py",
    "ast_data": "FunctionDef name:_ebrahimi_entropy arg:X arg:m arguments arg arg arg Assign Assign Call Assign Assign Call Assign Call Compare Assign Compare Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_get_support",
    "source_code": "def _get_support(self, *args):\n    return (self.a, self.b)",
    "docstring": "Return the support of the (unscaled, unshifted) distribution. Parameters ---------- arg1, arg2, ... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). Returns ------- a, b : numeric (float, or int or +/-np.inf) end-points of the distribution's support.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:_get_support arg:self arguments arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_output_mask",
    "source_code": "def _output_mask(op, input: Tensor, *args, **kwargs) -> Tensor:\n    if callable(op):\n        is_reduction = op.__name__ in {'sum', 'prod', 'amax', 'amin', 'argmax', 'argmin', 'mean', 'median', 'norm', 'var', 'std', 'logsumexp'}\n        is_normalization = op.__name__ in {'softmax', 'log_softmax', 'softmin', 'normalize', 'cumsum', 'cumprod'}\n        if is_reduction:\n            if op.__name__ == 'norm':\n                if args:\n                    args = args[1:]\n            dim = args[0] if args else kwargs.get('dim')\n            outmask = _input_mask(input, *args, **kwargs)\n            keepdim = kwargs.get('keepdim', False)\n            dim_ = _canonical_dim(dim, input.ndim)\n            return _any(outmask, dim_, bool(keepdim))\n        elif is_normalization:\n            return _input_mask(input, *args, **kwargs)\n        else:\n            raise ValueError(f'_output_mask expected masked operation (got callable {op.__module__}.{op.__name__})')\n    else:\n        raise ValueError(f'_output_mask expected masked operation (got {type(op).__name__} object)')",
    "docstring": "Return output mask of masked operation applied to given arguments.",
    "type": "function",
    "file_path": "pytorch\\torch\\masked\\_ops.py",
    "ast_data": "FunctionDef name:_output_mask arg:op arg:input arguments arg arg arg arg If Call Assign Compare Assign Compare If If Compare If Assign Assign Call Assign Call Assign Call Assign Call Return return:yes Call Call If Return return:yes Call Raise Call Raise Call Call"
  },
  {
    "library": "cherrypy",
    "name": "data",
    "source_code": "@cherrypy.expose\ndef data(self):\n    s = extrapolate_statistics(logging.statistics)\n    cherrypy.response.headers['Content-Type'] = 'application/json'\n    return json.dumps(s, sort_keys=True, indent=4).encode('utf-8')",
    "docstring": "Render statistics as JSON.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\cpstats.py",
    "ast_data": "FunctionDef name:data arg:self arguments arg Assign Call Assign Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_decompose_scatter_functional",
    "source_code": "def _decompose_scatter_functional(graph: torch.fx.Graph, node: torch.fx.Node) -> torch.fx.Node:\n    assert node.target is _generalized_scatter\n    return _decompose_scatter_functional_helper(graph, *node.args)",
    "docstring": "Decompose _generalized_scatter to a sequence of view_scatter operations e.g. _generalized_scatter(inp, src, [(aten.slice, 0, 0, 10), (aten.slice, 1, 10, -10)]) will become view = aten.slice(inp, 0, 0, 10) view_updated = aten.slice_scatter(view, src, 1, 10, -10) inp_updated = aten.slice_scatter(inp, view_updated, 0, 0, 10)",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\reinplace.py",
    "ast_data": "FunctionDef name:_decompose_scatter_functional arg:graph arg:node arguments arg arg Compare Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_raw_delete",
    "source_code": "def _raw_delete(self, using):\n    query = self.query.clone()\n    query.__class__ = sql.DeleteQuery\n    return query.get_compiler(using).execute_sql(ROW_COUNT)",
    "docstring": "Delete objects found from the given queryset in single direct SQL query. No signals are sent and there is no protection for cascades.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:_raw_delete arg:self arg:using arguments arg arg Assign Call Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, caption, content, enabled=True):\n    self._caption = caption\n    self._content = content\n    self._enabled = enabled",
    "docstring": "Menu constructor. TODO(cais): Nested menu is currently not supported. Support it. Args: caption: (str) caption of the menu item. content: Content of the menu item. For a menu item that triggers a command, for example, content is the command string. enabled: (bool) whether this menu item is enabled.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:caption arg:content arg:enabled arguments arg arg arg arg Assign Assign Assign"
  },
  {
    "library": "scipy",
    "name": "trstlp",
    "source_code": "def trstlp(A, b, delta, g):\n    num_constraints = A.shape[1]\n    num_vars = A.shape[0]\n    if DEBUGGING:\n        assert num_vars >= 1\n        assert num_constraints >= 0\n        assert np.size(g) == num_vars\n        assert np.size(b) == num_constraints\n        assert delta > 0\n    vmultc = np.zeros(num_constraints + 1)\n    iact = np.zeros(num_constraints + 1, dtype=int)\n    nact = 0\n    d = np.zeros(num_vars)\n    z = np.zeros((num_vars, num_vars))\n    A_aug = np.hstack([A, g.reshape((num_vars, 1))])\n    b_aug = np.hstack([b, 0])\n    for i in range(num_constraints + 1):\n        if (maxval := max(abs(A_aug[:, i]))) > 1000000000000.0:\n            modscal = max(2 * REALMIN, 1 / maxval)\n            A_aug[:, i] *= modscal\n            b_aug[i] *= modscal\n    iact[:num_constraints], nact, d, vmultc[:num_constraints], z = trstlp_sub(iact[:num_constraints], nact, 1, A_aug[:, :num_constraints], b_aug[:num_constraints], delta, d, vmultc[:num_constraints], z)\n    iact, nact, d, vmultc, z = trstlp_sub(iact, nact, 2, A_aug, b_aug, delta, d, vmultc, z)\n    if DEBUGGING:\n        assert all(np.isfinite(d))\n        assert np.linalg.norm(d) <= 2 * delta\n    return d",
    "docstring": "This function calculated an n-component vector d by the following two stages. In the first stage, d is set to the shortest vector that minimizes the greatest violation of the constraints A.T @ D = B. In other words, the A and B in our implementation are the negative of those in Powell's implementation. 1. The algorithm was NOT documented in the COBYLA paper. A note should be written to introduce it! 2. As a major part of the algorithm (see trstlp_sub), the code maintains and updates the QR factorization of A[iact[:nact]], i.e. the gradients of all the active (linear) constraints. The matrix Z is indeed Q, and the vector zdota is the diagonal of R. The factorization is updated by Givens rotations when an index is added in or removed from iact. 3. There are probably better algorithms available for the trust-region linear programming problem.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\pyprima\\pyprima\\src\\pyprima\\cobyla\\trustregion.py",
    "ast_data": "FunctionDef name:trstlp arg:A arg:b arg:delta arg:g arguments arg arg arg arg Assign Assign If Compare Compare Compare Call Compare Call Compare Assign Call Assign Call Assign Assign Call Assign Call Assign Call Call Assign Call For Call If Compare Call Call Assign Call Assign Call Assign Call If Call Call Compare Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "Empty",
    "source_code": "@dataclasses.dataclass\nclass Empty(AllocationTreeNode):\n    size_hint: int\n\n    def get_live_ranges(self):\n        return LiveRanges([])\n\n    def get_size_hint(self):\n        return self.size_hint\n\n    def get_symbolic_size(self):\n        return 0\n\n    def is_empty(self):\n        return True",
    "docstring": "Placeholder to represent empty space in the allocation pool. Only exists to get the size_hint correct in parent nodes.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\memory_planning.py",
    "ast_data": "ClassDef name:Empty FunctionDef name:get_live_ranges arg:self arguments arg Return return:yes Call FunctionDef name:get_size_hint arg:self arguments arg Return return:yes FunctionDef name:get_symbolic_size arg:self arguments arg Return return:yes FunctionDef name:is_empty arg:self arguments arg Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "BaseScheduler",
    "source_code": "class BaseScheduler(metaclass=BaseSchedulerMeta):\n\n    @classmethod\n    def from_crawler(cls, crawler: Crawler) -> Self:\n        return cls()\n\n    def open(self, spider: Spider) -> Deferred[None] | None:\n        pass\n\n    def close(self, reason: str) -> Deferred[None] | None:\n        pass\n\n    @abstractmethod\n    def has_pending_requests(self) -> bool:\n        raise NotImplementedError\n\n    @abstractmethod\n    def enqueue_request(self, request: Request) -> bool:\n        raise NotImplementedError\n\n    @abstractmethod\n    def next_request(self) -> Request | None:\n        raise NotImplementedError",
    "docstring": "The scheduler component is responsible for storing requests received from the engine, and feeding them back upon request (also to the engine). The original sources of said requests are: * Spider: `request-order`. The methods defined in this class constitute the minimal interface that the Scrapy engine will interact with.",
    "type": "class",
    "file_path": "scrapy\\scrapy\\core\\scheduler.py",
    "ast_data": "ClassDef name:BaseScheduler FunctionDef name:from_crawler arg:cls arg:crawler arguments arg arg Return return:yes Call FunctionDef name:open arg:self arg:spider arguments arg arg FunctionDef name:close arg:self arg:reason arguments arg arg FunctionDef name:has_pending_requests arg:self arguments arg Raise FunctionDef name:enqueue_request arg:self arg:request arguments arg arg Raise FunctionDef name:next_request arg:self arguments arg Raise"
  },
  {
    "library": "django",
    "name": "get_extra_restriction",
    "source_code": "def get_extra_restriction(self, alias, related_alias):\n    return None",
    "docstring": "Return a pair condition used for joining and subquery pushdown. The condition is something that responds to as_sql(compiler, connection) method. Note that currently referring both the 'alias' and 'related_alias' will not work in some conditions, like subquery pushdown. A parallel method is get_extra_descriptor_filter() which is used in instance.fieldname related object fetching.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\related.py",
    "ast_data": "FunctionDef name:get_extra_restriction arg:self arg:alias arg:related_alias arguments arg arg arg Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "_sparse_false_positive_at_k",
    "source_code": "def _sparse_false_positive_at_k(labels, predictions_idx, class_id=None, weights=None):\n    with ops.name_scope(None, 'false_positives', (predictions_idx, labels, weights)):\n        labels, predictions_idx = _maybe_select_class_id(labels, predictions_idx, class_id)\n        fp = sets.set_size(sets.set_difference(predictions_idx, labels, aminusb=True))\n        fp = math_ops.cast(fp, dtypes.float64)\n        if weights is not None:\n            with ops.control_dependencies((weights_broadcast_ops.assert_broadcastable(weights, fp),)):\n                weights = math_ops.cast(weights, dtypes.float64)\n                fp = math_ops.multiply(fp, weights)\n        return fp",
    "docstring": "Calculates false positives for precision@k. If is specified, calculate binary true positives for only. If is not specified, calculate metrics for predicted vs label classes, where is the 2nd dimension of . Args: labels: or with shape [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and has shape [batch_size, num_labels]. [D1, ... DN] must match . predictions_idx: 1-D or higher with last dimension , top predicted classes. For rank , the first dimensions must match . class_id: Class for which we want binary metrics. weights: whose rank is either 0, or n-1, where n is the rank of . If the latter, it must be broadcastable to (i.e., all dimensions must be either , or the same as the corresponding dimension). Returns: A [D1, ... DN] of false positive counts.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\metrics_impl.py",
    "ast_data": "FunctionDef name:_sparse_false_positive_at_k arg:labels arg:predictions_idx arg:class_id arg:weights arguments arg arg arg arg With Call Assign Call Assign Call Call Assign Call If Compare With Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_bbox_to_anchor",
    "source_code": "def get_bbox_to_anchor(self):\n    if self._bbox_to_anchor is None:\n        return self.parent.bbox\n    else:\n        return self._bbox_to_anchor",
    "docstring": "Return the bbox that the legend will be anchored to.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\legend.py",
    "ast_data": "FunctionDef name:get_bbox_to_anchor arg:self arguments arg If Compare Return return:yes Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_binary_roc_auc_score",
    "source_code": "def _binary_roc_auc_score(y_true, y_score, sample_weight=None, max_fpr=None):\n    if len(np.unique(y_true)) != 2:\n        warnings.warn('Only one class is present in y_true. ROC AUC score is not defined in that case.', UndefinedMetricWarning)\n        return np.nan\n    fpr, tpr, _ = roc_curve(y_true, y_score, sample_weight=sample_weight)\n    if max_fpr is None or max_fpr == 1:\n        return auc(fpr, tpr)\n    if max_fpr <= 0 or max_fpr > 1:\n        raise ValueError('Expected max_fpr in range (0, 1], got: %r' % max_fpr)\n    stop = np.searchsorted(fpr, max_fpr, 'right')\n    x_interp = [fpr[stop - 1], fpr[stop]]\n    y_interp = [tpr[stop - 1], tpr[stop]]\n    tpr = np.append(tpr[:stop], np.interp(max_fpr, x_interp, y_interp))\n    fpr = np.append(fpr[:stop], max_fpr)\n    partial_auc = auc(fpr, tpr)\n    min_area = 0.5 * max_fpr ** 2\n    max_area = max_fpr\n    return 0.5 * (1 + (partial_auc - min_area) / (max_area - min_area))",
    "docstring": "Binary roc auc score.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\metrics\\_ranking.py",
    "ast_data": "FunctionDef name:_binary_roc_auc_score arg:y_true arg:y_score arg:sample_weight arg:max_fpr arguments arg arg arg arg If Compare Call Call Call Return return:yes Assign Call If BoolOp Compare Compare Return return:yes Call If BoolOp Compare Compare Raise Call Assign Call Assign Assign Assign Call Call Assign Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "joint_fwd_bwd",
    "source_code": "@torch.enable_grad()\ndef joint_fwd_bwd(fn: Callable[..., Any], args: Sequence[Any]) -> torch.fx.GraphModule:\n    gm: Optional[torch.fx.GraphModule] = None\n\n    def record_joint_graph(joint_graph: torch.fx.GraphModule, inputs: Sequence[Any], **kwargs: Any) -> tuple[torch.fx.GraphModule, torch.fx.GraphModule]:\n        nonlocal gm\n        assert not gm\n        gm = clone_graph(joint_graph)\n        return default_partition(joint_graph, inputs, **kwargs)\n    with torch._guards.tracing(None):\n        aot_function(fn, lambda g, i: make_boxed_func(g), partition_fn=record_joint_graph, decompositions=select_decomp_table(), keep_inference_input_mutations=True, enable_log=False)(*args)\n    assert gm\n    from .fx_passes.post_grad import remove_noop_ops\n    remove_noop_ops(gm.graph)\n    from .fx_passes.joint_graph import pointless_view\n    matcher_pass = PatternMatcherPass()\n    pattern = CallFunction(torch.ops.aten.view.default, KeywordArg('arg'), KeywordArg('size'))\n    GraphPatternEntry(pattern=pattern, handler=pointless_view, extra_check=_return_true).register(matcher_pass.patterns)\n    matcher_pass.apply(gm.graph)\n    gm.graph._codegen = torch.fx.graph.CodeGen()\n    gm.graph.eliminate_dead_code()\n    gm.recompile()\n    return gm",
    "docstring": "Build a normalized training graph, for use with fx_to_pattern",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\pattern_matcher.py",
    "ast_data": "FunctionDef name:joint_fwd_bwd arg:fn arg:args arguments arg arg FunctionDef name:record_joint_graph arg:joint_graph arg:inputs arguments arg arg arg Assign Call Return return:yes Call With Call Call Call arguments arg arg Call Call Call Assign Call Assign Call Call Call Call Call Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_frame_props",
    "source_code": "def set_frame_props(self, props):\n    _api.check_isinstance(dict, props=props)\n    if 's' in props:\n        props['sizes'] = np.broadcast_to(props.pop('s'), len(self.labels))\n    self._frames.update(props)",
    "docstring": "Set properties of the check button frames. .. versionadded:: 3.7 Parameters ---------- props : dict Dictionary of properties to be used for the check button frames.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:set_frame_props arg:self arg:props arguments arg arg Call If Compare Assign Call Call Call Call"
  },
  {
    "library": "numpy",
    "name": "hermgrid2d",
    "source_code": "def hermgrid2d(x, y, c):\n    return pu._gridnd(hermval, c, x, y)",
    "docstring": "Evaluate a 2-D Hermite series on the Cartesian product of x and y. This function returns the values: .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * H_i(a) * H_j(b) where the points `axbyxyxyxyccxyxycxy`. See Also -------- hermval, hermval2d, hermval3d, hermgrid3d Examples -------- >>> from numpy.polynomial.hermite import hermgrid2d >>> x = [1, 2, 3] >>> y = [4, 5] >>> c = [[1, 2, 3], [4, 5, 6]] >>> hermgrid2d(x, y, c) array([[1035., 1599.], [1867., 2883.], [2699., 4167.]])",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\hermite.py",
    "ast_data": "FunctionDef name:hermgrid2d arg:x arg:y arg:c arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "process_grouped_nodes",
    "source_code": "def process_grouped_nodes(self) -> None:\n    new_nodes: list[BaseSchedulerNode] = []\n    for node in self.nodes:\n        new_nodes.extend(node.unpack() if isinstance(node, GroupedSchedulerNode) else [node])\n    self.nodes = new_nodes",
    "docstring": "Unpack GroupedSchedulerNode into regular nodes.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:process_grouped_nodes arg:self arguments arg For Call Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_create_host_array",
    "source_code": "def _create_host_array(self, shape, host_id):\n    num_global_devices = np.prod(shape)\n    global_device_ids = np.arange(num_global_devices).reshape(shape)\n    local_device_list = [tf_device.DeviceSpec(job=config.full_job_name(), device_type='CPU', device_index=0)]\n    num_local_devices = len(local_device_list)\n    local_device_ids = [x + host_id * num_local_devices for x in range(num_local_devices)]\n    return (global_device_ids, local_device_ids, local_device_list)",
    "docstring": "Returns ID and device lists that can be used to create a host mesh.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\dtensor_device.py",
    "ast_data": "FunctionDef name:_create_host_array arg:self arg:shape arg:host_id arguments arg arg arg Assign Call Assign Call Call Assign Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "state_dict",
    "source_code": "def state_dict(self) -> dict[str, Any]:\n    if self._enabled:\n        return {'scale': self.get_scale(), 'growth_factor': self._growth_factor, 'backoff_factor': self._backoff_factor, 'growth_interval': self._growth_interval, '_growth_tracker': self._get_growth_tracker()}\n    return {}",
    "docstring": "Return the state of the scaler as a :class:. It contains five entries: * `state_dictupdate`.",
    "type": "method",
    "file_path": "pytorch\\torch\\amp\\grad_scaler.py",
    "ast_data": "FunctionDef name:state_dict arg:self arguments arg If Return return:yes Call Call Return return:no"
  },
  {
    "library": "matplotlib",
    "name": "set",
    "source_code": "def set(self, **kwargs):\n    self._construct.update(kwargs)\n    self._update_rrule(**self._construct)",
    "docstring": "Set parameters for an existing wrapper.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\dates.py",
    "ast_data": "FunctionDef name:set arg:self arguments arg arg Call Call"
  },
  {
    "library": "scipy",
    "name": "_munp",
    "source_code": "def _munp(self, n, a, b):\n\n    def nth_moment(n_k, a_k, b_k):\n        num = (a_k + b_k) ** (0.5 * n_k)\n        denom = 2 ** n_k * sc.beta(a_k, b_k)\n        indices = np.arange(n_k + 1)\n        sgn = np.where(indices % 2 > 0, -1, 1)\n        d = sc.beta(a_k + 0.5 * n_k - indices, b_k - 0.5 * n_k + indices)\n        sum_terms = sc.comb(n_k, indices) * sgn * d\n        return num / denom * sum_terms.sum()\n    nth_moment_valid = (a > 0.5 * n) & (b > 0.5 * n) & (n >= 0)\n    return xpx.apply_where(nth_moment_valid, (n, a, b), np.vectorize(nth_moment, otypes=[np.float64]), fill_value=np.nan)",
    "docstring": "Returns the n-th moment(s) where all the following hold: - n >= 0 - a > n / 2 - b > n / 2 The result is np.nan in all other cases.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_continuous_distns.py",
    "ast_data": "FunctionDef name:_munp arg:self arg:n arg:a arg:b arguments arg arg arg arg FunctionDef name:nth_moment arg:n_k arg:a_k arg:b_k arguments arg arg arg Assign Assign Call Assign Call Assign Call Compare Assign Call Assign Call Return return:yes Call Assign Compare Compare Compare Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n    X = validate_data(self, X)\n    if self.assume_centered:\n        self.location_ = np.zeros(X.shape[1])\n    else:\n        self.location_ = X.mean(0)\n    covariance = empirical_covariance(X, assume_centered=self.assume_centered)\n    covariance = shrunk_covariance(covariance, self.shrinkage)\n    self._set_covariance(covariance)\n    return self",
    "docstring": "Fit the shrunk covariance model to X. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns the instance itself.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\covariance\\_shrunk_covariance.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Assign Call If Assign Call Assign Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "average_uriset_time",
    "source_code": "def average_uriset_time(s):\n    return s['Count'] and s['Sum'] / s['Count'] or 0",
    "docstring": "Compute average request processing time within a URI set.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\cpstats.py",
    "ast_data": "FunctionDef name:average_uriset_time arg:s arguments arg Return return:yes BoolOp BoolOp"
  },
  {
    "library": "pytorch",
    "name": "load_pybinding_async",
    "source_code": "@classmethod\ndef load_pybinding_async(cls, argtypes: Sequence[str], main_code: str, device_type: str='cpu', num_outputs: int=-1, submit_fn: Any=None, extra_flags: Sequence[str]=(), kernel_code: Optional[str]=None) -> Any:\n    parseargs = ', '.join((f'parse_arg<{argtype.replace('const ', '')}>(args, {n})' for n, argtype in enumerate(argtypes)))\n    suffix = cls.suffix_template.format(arg_len=len(argtypes), call_entry_func=cls.call_entry_function.format(parseargs), entry_func=cls.entry_function, extra_parse_arg=cls.extra_parse_arg.format(array_len=num_outputs))\n    get_result = cls.load_async(main_code + suffix, device_type, submit_fn=submit_fn, extra_flags=extra_flags, optimized_code=kernel_code)\n    result = None\n\n    def future() -> Any:\n        nonlocal result\n        if result is None:\n            result = get_result()\n            assert isinstance(result, ModuleType)\n        return getattr(result, cls.entry_function)\n    return future",
    "docstring": "Wrap a C++ function in fast Python bindings. Args: argtypes: The types of args to ENTRY_FUNCTION(), e.g. [\"float*\", \"long\"] main_code: C++ source code containing ENTRY_FUNCTION(). Will be built at -O3 if kernel_code is None (to maximize performance in any kernels that are present), or -O1 otherwise (to minimize compile time). kernel_code: If present, C++ source code that will be built at -O3 and linked to main_code. Returns: A python version of ENTRY_FUNCTION()",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codecache.py",
    "ast_data": "FunctionDef name:load_pybinding_async arg:cls arg:argtypes arg:main_code arg:device_type arg:num_outputs arg:submit_fn arg:extra_flags arg:kernel_code arguments arg arg arg arg arg arg arg arg Assign Call Call Call Assign Call Call Call Call Assign Call Assign FunctionDef name:future arguments If Compare Assign Call Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_init_default_fully_shard_mesh",
    "source_code": "def _init_default_fully_shard_mesh() -> DeviceMesh:\n    if not dist.distributed_c10d.is_initialized():\n        dist.distributed_c10d.init_process_group()\n    default_pg = dist.distributed_c10d._get_default_group()\n    device = torch._C._get_accelerator()\n    mesh = init_device_mesh(device.type, mesh_shape=(default_pg.size(),))\n    return mesh",
    "docstring": "Default to global CUDA mesh if possible else global CPU mesh.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fsdp_init.py",
    "ast_data": "FunctionDef name:_init_default_fully_shard_mesh arguments If Call Call Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_arg_info_from_tensor_fqn",
    "source_code": "def get_arg_info_from_tensor_fqn(model: nn.Module, tensor_fqn: str) -> dict[str, Any]:\n    tensor_name = tensor_fqn.split('.')[-1]\n    module_fqn = tensor_fqn[:-len(tensor_name) - ('.' in tensor_fqn)]\n    module = fqn_to_module(model, module_fqn)\n    return {'module_fqn': module_fqn, 'module': module, 'tensor_name': tensor_name, 'tensor_fqn': tensor_fqn}",
    "docstring": "Uses tensor_fqn to obtain a dict containing module_fqn, module and tensor_name",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\pruning\\sparsifier\\utils.py",
    "ast_data": "FunctionDef name:get_arg_info_from_tensor_fqn arg:model arg:tensor_fqn arguments arg arg Assign Call Assign Call Compare Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "z",
    "source_code": "def z(self):\n    return self.data.timetuple().tm_yday",
    "docstring": "Day of the year, i.e. 1 to 366.",
    "type": "method",
    "file_path": "django\\django\\utils\\dateformat.py",
    "ast_data": "FunctionDef name:z arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "from_float",
    "source_code": "@staticmethod\ndef from_float(cls, mod, use_precomputed_fake_quant=False):\n    assert type(mod) == cls._FLOAT_MODULE, 'qat.' + cls.__name__ + '.from_float only works for ' + cls._FLOAT_MODULE.__name__\n    assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined'\n    assert mod.qconfig, 'Input float module must have a valid qconfig'\n    if issubclass(type(mod), _FusedModule):\n        mod = mod[0]\n    qconfig = mod.qconfig\n    qat_conv = cls(mod.in_channels, mod.out_channels, mod.kernel_size, stride=mod.stride, padding=mod.padding, dilation=mod.dilation, groups=mod.groups, bias=mod.bias is not None, padding_mode=mod.padding_mode, qconfig=qconfig)\n    qat_conv.weight = mod.weight\n    qat_conv.bias = mod.bias\n    return qat_conv",
    "docstring": "Create a qat module from a float module Args: : a float module, either produced by torch.ao.quantization utilities or directly from user",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\nn\\qat\\modules\\conv.py",
    "ast_data": "FunctionDef name:from_float arg:cls arg:mod arg:use_precomputed_fake_quant arguments arg arg arg Compare Call Call If Call Call Assign Assign Assign Call Compare Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_static_value",
    "source_code": "def get_static_value(x):\n    if isinstance(x, core.Tensor) and (x.dtype.is_floating or x.dtype.is_complex):\n        return None\n    return tensor_util.constant_value(x)",
    "docstring": "A version of tf.get_static_value that returns None on float dtypes. It returns None on float dtypes in order to avoid breaking gradients. Args: x: a tensor. Returns: Same as , except that it returns None when has a float dtype.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_utils.py",
    "ast_data": "FunctionDef name:get_static_value arg:x arguments arg If BoolOp Call BoolOp Return return:no Return return:yes Call"
  },
  {
    "library": "django",
    "name": "do_extends",
    "source_code": "@register.tag('extends')\ndef do_extends(parser, token):\n    bits = token.split_contents()\n    if len(bits) != 2:\n        raise TemplateSyntaxError(\"'%s' takes one argument\" % bits[0])\n    bits[1] = construct_relative_path(parser.origin.template_name, bits[1])\n    parent_name = parser.compile_filter(bits[1])\n    nodelist = parser.parse()\n    if nodelist.get_nodes_by_type(ExtendsNode):\n        raise TemplateSyntaxError(\"'%s' cannot appear more than once in the same template\" % bits[0])\n    return ExtendsNode(nodelist, parent_name)",
    "docstring": "Signal that this template extends a parent template. This tag may be used in two ways: `` as either the name of the parent template to extend (if it evaluates to a string) or as the parent template itself (if it evaluates to a Template object).",
    "type": "function",
    "file_path": "django\\django\\template\\loader_tags.py",
    "ast_data": "FunctionDef name:do_extends arg:parser arg:token arguments arg arg Assign Call If Compare Call Raise Call Assign Call Assign Call Assign Call If Call Raise Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_maybe_write",
    "source_code": "def _maybe_write(filename, new_content):\n    if os.path.exists(filename):\n        with open(filename) as f:\n            content = f.read()\n        if content == new_content:\n            return\n    with open(filename, 'w') as source_file:\n        source_file.write(new_content)",
    "docstring": "Equivalent to writing the content into the file but will not touch the file if it already had the right content (to avoid triggering recompile).",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\cpp_extension.py",
    "ast_data": "FunctionDef name:_maybe_write arg:filename arg:new_content arguments arg arg If Call With Call Assign Call If Compare Return return:no With Call Call"
  },
  {
    "library": "django",
    "name": "render",
    "source_code": "def render(self, context):\n    template = self.template.resolve(context)\n    if not callable(getattr(template, 'render', None)):\n        template_name = template or ()\n        if isinstance(template_name, str):\n            template_name = (construct_relative_path(self.origin.template_name, template_name),)\n        else:\n            template_name = tuple(template_name)\n        cache = context.render_context.dicts[0].setdefault(self, {})\n        template = cache.get(template_name)\n        if template is None:\n            template = context.template.engine.select_template(template_name)\n            cache[template_name] = template\n    elif hasattr(template, 'template'):\n        template = template.template\n    values = {name: var.resolve(context) for name, var in self.extra_context.items()}\n    if self.isolated_context:\n        return template.render(context.new(values))\n    with context.push(**values):\n        return template.render(context)",
    "docstring": "Render the specified template and context. Cache the template object in render_context to avoid reparsing and loading when used in a for loop.",
    "type": "method",
    "file_path": "django\\django\\template\\loader_tags.py",
    "ast_data": "FunctionDef name:render arg:self arg:context arguments arg arg Assign Call If Call Call Assign BoolOp If Call Assign Call Assign Call Assign Call Assign Call If Compare Assign Call Assign If Call Assign Assign Call Call If Return return:yes Call Call With Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "have_units",
    "source_code": "def have_units(self):\n    ax = self.axes\n    return ax and any((axis.have_units() for axis in ax._axis_map.values()))",
    "docstring": "Return whether units are set on any axis.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:have_units arg:self arguments arg Assign Return return:yes BoolOp Call Call Call"
  },
  {
    "library": "django",
    "name": "schema_editor",
    "source_code": "def schema_editor(self, *args, **kwargs):\n    if self.SchemaEditorClass is None:\n        raise NotImplementedError('The SchemaEditorClass attribute of this database wrapper is still None')\n    return self.SchemaEditorClass(self, *args, **kwargs)",
    "docstring": "Return a new instance of this backend's SchemaEditor.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\base.py",
    "ast_data": "FunctionDef name:schema_editor arg:self arguments arg arg arg If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_rlabel_position",
    "source_code": "def get_rlabel_position(self):\n    return np.rad2deg(self._r_label_position.get_matrix()[0, 2])",
    "docstring": "Returns ------- float The theta position of the radius labels in degrees.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\polar.py",
    "ast_data": "FunctionDef name:get_rlabel_position arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_sharing_strategy",
    "source_code": "def get_sharing_strategy():\n    return _sharing_strategy",
    "docstring": "Return the current strategy for sharing CPU tensors.",
    "type": "function",
    "file_path": "pytorch\\torch\\multiprocessing\\__init__.py",
    "ast_data": "FunctionDef name:get_sharing_strategy arguments Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "enqueue_many",
    "source_code": "def enqueue_many(self, vals, name=None):\n    with ops.name_scope(name, '%s_EnqueueMany' % self._name, self._scope_vals(vals)) as scope:\n        vals = self._check_enqueue_dtypes(vals)\n        batch_dim = tensor_shape.dimension_value(vals[0].get_shape().with_rank_at_least(1)[0])\n        batch_dim = tensor_shape.Dimension(batch_dim)\n        for val, shape in zip(vals, self._shapes):\n            val_batch_dim = tensor_shape.dimension_value(val.get_shape().with_rank_at_least(1)[0])\n            val_batch_dim = tensor_shape.Dimension(val_batch_dim)\n            batch_dim = batch_dim.merge_with(val_batch_dim)\n            val.get_shape()[1:].assert_is_compatible_with(shape)\n        return gen_data_flow_ops.queue_enqueue_many_v2(self._queue_ref, vals, name=scope)",
    "docstring": "Enqueues zero or more elements to this queue. This operation slices each component tensor along the 0th dimension to make multiple queue elements. All of the tensors in must have the same size in the 0th dimension. If the queue is full when this operation executes, it will block until all of the elements have been enqueued. At runtime, this operation may raise an error if the queue is before or during its execution. If the queue is closed before this operation runs, will be raised. If this operation is blocked, and either (i) the queue is closed by a close operation with , or (ii) the session is , will be raised. >>> q = tf.queue.FIFOQueue(capacity=10, dtypes=tf.int32) >>> q.enqueue_many(tf.constant([1, 2, 3, 4, 5], dtype=tf.int32)) >>> q.size() Args: vals: A tensor, a list or tuple of tensors, or a dictionary from which the queue elements are taken. name: A name for the operation (optional). Returns: The operation that enqueues a batch of tuples of tensors to the queue.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:enqueue_many arg:self arg:vals arg:name arguments arg arg arg With Call Call Assign Call Assign Call Call Call Assign Call For Call Assign Call Call Call Assign Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "construct_array_type",
    "source_code": "@classmethod\ndef construct_array_type(cls) -> type[FloatingArray]:\n    return FloatingArray",
    "docstring": "Return the array type associated with this dtype. Returns ------- type",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\floating.py",
    "ast_data": "FunctionDef name:construct_array_type arg:cls arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "get_feature_names_out",
    "source_code": "def get_feature_names_out(self, input_features=None):\n    check_is_fitted(self)\n    input_features = _check_feature_names_in(self, input_features)\n    cats = [self._compute_transformed_categories(i) for i, _ in enumerate(self.categories_)]\n    name_combiner = self._check_get_feature_name_combiner()\n    feature_names = []\n    for i in range(len(cats)):\n        names = [name_combiner(input_features[i], t) for t in cats[i]]\n        feature_names.extend(names)\n    return np.array(feature_names, dtype=object)",
    "docstring": "Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Input features. - If is , then is used as feature names in. If is not defined, then the following input feature names are generated: . - If is an array-like, then must match if is defined. Returns ------- feature_names_out : ndarray of str objects Transformed feature names.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_encoders.py",
    "ast_data": "FunctionDef name:get_feature_names_out arg:self arg:input_features arguments arg arg Call Assign Call Assign Call Call Assign Call Assign For Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_update_offset_text_position",
    "source_code": "def _update_offset_text_position(self, bboxes, bboxes2):\n    x, y = self.offsetText.get_position()\n    if not hasattr(self, '_tick_position'):\n        self._tick_position = 'bottom'\n    if self._tick_position == 'bottom':\n        if not len(bboxes):\n            bottom = self.axes.bbox.ymin\n        else:\n            bbox = mtransforms.Bbox.union(bboxes)\n            bottom = bbox.y0\n        y = bottom - self.OFFSETTEXTPAD * self.get_figure(root=True).dpi / 72\n    else:\n        if not len(bboxes2):\n            top = self.axes.bbox.ymax\n        else:\n            bbox = mtransforms.Bbox.union(bboxes2)\n            top = bbox.y1\n        y = top + self.OFFSETTEXTPAD * self.get_figure(root=True).dpi / 72\n    self.offsetText.set_position((x, y))",
    "docstring": "Update the offset_text position based on the sequence of bounding boxes of all the ticklabels",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:_update_offset_text_position arg:self arg:bboxes arg:bboxes2 arguments arg arg arg Assign Call If Call Assign If Compare If Call Assign Assign Call Assign Assign Call If Call Assign Assign Call Assign Assign Call Call"
  },
  {
    "library": "django",
    "name": "geo_db_type",
    "source_code": "def geo_db_type(self, f):\n    return 'MDSYS.SDO_GEOMETRY'",
    "docstring": "Return the geometry database type for Oracle. Unlike other spatial backends, no stored procedure is necessary and it's the same for all geometry types.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\oracle\\operations.py",
    "ast_data": "FunctionDef name:geo_db_type arg:self arg:f arguments arg arg Return return:yes"
  },
  {
    "library": "django",
    "name": "chararray_output",
    "source_code": "def chararray_output(func, argtypes, errcheck=True):\n    func.argtypes = argtypes\n    func.restype = POINTER(c_char_p)\n    if errcheck:\n        func.errcheck = check_pointer\n    return func",
    "docstring": "For functions that return a c_char_p array.",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\gdal\\prototypes\\generation.py",
    "ast_data": "FunctionDef name:chararray_output arg:func arg:argtypes arg:errcheck arguments arg arg arg Assign Assign Call If Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_assert",
    "source_code": "def _assert(condition, message):\n    if type(condition) is not torch.Tensor and overrides.has_torch_function((condition,)):\n        return overrides.handle_torch_function(_assert, (condition,), condition, message)\n    assert condition, message",
    "docstring": "A wrapper around Python's assert which is symbolically traceable.",
    "type": "function",
    "file_path": "pytorch\\torch\\__init__.py",
    "ast_data": "FunctionDef name:_assert arg:condition arg:message arguments arg arg If BoolOp Compare Call Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "needleman_wunsch",
    "source_code": "def needleman_wunsch(self, scores: Tensor) -> Tensor:\n    KORNIA_CHECK_SHAPE(scores, ['B', 'N', 'M'])\n    b, n, m = scores.shape\n    gap = 0.1\n    nw_scores = scores - gap\n    dev = scores.device\n    nw_grid = torch.zeros(b, n + 1, m + 1, dtype=torch.float, device=dev)\n    for i in range(n):\n        for j in range(m):\n            nw_grid[:, i + 1, j + 1] = torch.maximum(torch.maximum(nw_grid[:, i + 1, j], nw_grid[:, i, j + 1]), nw_grid[:, i, j] + nw_scores[:, i, j])\n    return nw_grid[:, -1, -1]",
    "docstring": "Batched implementation of the Needleman-Wunsch algorithm. The cost of the InDel operation is set to 0 by subtracting the gap penalty to the scores. Args: scores: a (B, N, M) Tensor containing the pairwise scores of the elements to match.",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\sold2\\sold2.py",
    "ast_data": "FunctionDef name:needleman_wunsch arg:self arg:scores arguments arg arg Call Assign Assign Assign Assign Assign Call For Call For Call Assign Call Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "expired",
    "source_code": "def expired(self):\n    return False",
    "docstring": "Communicate that the object hasn't expired.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\locking.py",
    "ast_data": "FunctionDef name:expired arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "neighbors",
    "source_code": "@property\ndef neighbors(self):\n    if self._neighbors is None:\n        self._neighbors = self.get_cpp_triangulation().get_neighbors()\n    return self._neighbors",
    "docstring": "Return integer array of shape (ntri, 3) containing neighbor triangles. For each triangle, the indices of the three triangles that share the same edges, or -1 if there is no such neighboring triangle. ``.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triangulation.py",
    "ast_data": "FunctionDef name:neighbors arg:self arguments arg If Compare Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "extended_timestamp",
    "source_code": "@property\ndef extended_timestamp(self):\n    return self._extended_timestamp",
    "docstring": "Extended timestamp, possibly with an index suffix. The index suffix, e.g., \"-1\", is for disambiguating multiple dumps of the same tensor with the same timestamp, which can occur if the dumping events are spaced by shorter than the temporal resolution of the timestamps. Returns: () The extended timestamp.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:extended_timestamp arg:self arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "polygrid2d",
    "source_code": "@array_function_dispatch(_polygrid2d_dispatcher)\ndef polygrid2d(x, y, c):\n    return pu._gridnd(polyval, c, x, y)",
    "docstring": "Evaluate a 2-D polynomial on the Cartesian product of x and y. This function returns the values: .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * a^i * b^j where the points `axbyxyxyxyccxyxycxy`. See Also -------- polyval, polyval2d, polyval3d, polygrid3d Examples -------- >>> from numpy.polynomial import polynomial as P >>> c = ((1, 2, 3), (4, 5, 6)) >>> P.polygrid2d([0, 1], [0, 1], c) array([[ 1., 6.], [ 5., 21.]])",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\polynomial.py",
    "ast_data": "FunctionDef name:polygrid2d arg:x arg:y arg:c arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "copy",
    "source_code": "def copy(self) -> 'TraceableStack[T]':\n    return TraceableStack(self._stack)",
    "docstring": "Return a copy of self referencing the same objects but in a new list. This method is implemented to support thread-local stacks. Returns: TraceableStack with a new list that holds existing objects.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\traceable_stack.py",
    "ast_data": "FunctionDef name:copy arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_handle_deferred_layer_dependencies",
    "source_code": "def _handle_deferred_layer_dependencies(self, layers):\n    layer_checkpoint_dependencies = self._layer_checkpoint_dependencies\n    layer_to_name = {v: k for k, v in layer_checkpoint_dependencies.items()}\n    for layer in layers:\n        if layer in layer_to_name:\n            self._handle_deferred_dependencies(name=layer_to_name[layer], trackable=layer)",
    "docstring": "Handles layer checkpoint dependencies that are added after init.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\functional.py",
    "ast_data": "FunctionDef name:_handle_deferred_layer_dependencies arg:self arg:layers arguments arg arg Assign Assign Call For If Compare Call"
  },
  {
    "library": "pytorch",
    "name": "update_stack_example_value",
    "source_code": "def update_stack_example_value(node, metadata, dim=0, op=torch.stack):\n    if node is not None and hasattr(node, 'meta'):\n        if op == torch.stack:\n            example_value = torch.stack(metadata, dim=dim)\n        elif op == torch.unbind:\n            example_value = torch.unbind(metadata, dim=dim)\n        else:\n            return\n        node.meta['example_value'] = example_value",
    "docstring": "Update the example value of the node in the graph to enable followup split cat opt.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\group_batch_fusion.py",
    "ast_data": "FunctionDef name:update_stack_example_value arg:node arg:metadata arg:dim arg:op arguments arg arg arg arg If BoolOp Compare Call If Compare Assign Call If Compare Assign Call Return return:no Assign"
  },
  {
    "library": "scikit-learn",
    "name": "inplace_swap_row_csr",
    "source_code": "def inplace_swap_row_csr(X, m, n):\n    for t in [m, n]:\n        if isinstance(t, np.ndarray):\n            raise TypeError('m and n should be valid integers')\n    if m < 0:\n        m += X.shape[0]\n    if n < 0:\n        n += X.shape[0]\n    if m > n:\n        m, n = (n, m)\n    indptr = X.indptr\n    m_start = indptr[m]\n    m_stop = indptr[m + 1]\n    n_start = indptr[n]\n    n_stop = indptr[n + 1]\n    nz_m = m_stop - m_start\n    nz_n = n_stop - n_start\n    if nz_m != nz_n:\n        X.indptr[m + 2:n] += nz_n - nz_m\n        X.indptr[m + 1] = m_start + nz_n\n        X.indptr[n] = n_stop - nz_m\n    X.indices = np.concatenate([X.indices[:m_start], X.indices[n_start:n_stop], X.indices[m_stop:n_start], X.indices[m_start:m_stop], X.indices[n_stop:]])\n    X.data = np.concatenate([X.data[:m_start], X.data[n_start:n_stop], X.data[m_stop:n_start], X.data[m_start:m_stop], X.data[n_stop:]])",
    "docstring": "Swap two rows of a CSR matrix in-place. Parameters ---------- X : sparse matrix of shape (n_samples, n_features) Matrix whose two rows are to be swapped. It should be of CSR format. m : int Index of the row of X to be swapped. n : int Index of the row of X to be swapped.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\sparsefuncs.py",
    "ast_data": "FunctionDef name:inplace_swap_row_csr arg:X arg:m arg:n arguments arg arg arg For If Call Raise Call If Compare If Compare If Compare Assign Assign Assign Assign Assign Assign Assign Assign If Compare Assign Assign Assign Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "recv",
    "source_code": "@_exception_logger\ndef recv(tensor: torch.Tensor, src: Optional[int]=None, group: Optional[ProcessGroup]=None, tag: int=0, group_src: Optional[int]=None) -> int:\n    work = irecv(tensor, src=src, group=group, tag=tag, group_src=group_src)\n    if work is None:\n        return -1\n    work.wait()\n    if src is None:\n        if group_src is None:\n            group_src = work._source_rank()\n        group = _group_or_default_group(group)\n        _check_not_self_rank(group, group_src, 'source')\n        src = get_global_rank(group, group_src)\n    return src",
    "docstring": "Receives a tensor synchronously. .. warning:: ``. Returns: Sender rank -1, if not part of the group",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:recv arg:tensor arg:src arg:group arg:tag arg:group_src arguments arg arg arg arg arg Assign Call If Compare Return return:yes Call If Compare If Compare Assign Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "TransformGraph",
    "source_code": "def TransformGraph(input_graph_def, inputs, outputs, transforms):\n    input_graph_def_string = input_graph_def.SerializeToString()\n    inputs_string = compat.as_bytes(','.join(inputs))\n    outputs_string = compat.as_bytes(','.join(outputs))\n    transforms_string = compat.as_bytes(' '.join(transforms))\n    output_graph_def_string = TransformGraphWithStringInputs(input_graph_def_string, inputs_string, outputs_string, transforms_string)\n    output_graph_def = graph_pb2.GraphDef()\n    output_graph_def.ParseFromString(output_graph_def_string)\n    return output_graph_def",
    "docstring": "Python wrapper for the Graph Transform Tool. Gives access to all graph transforms available through the command line tool. See documentation at for full details of the options available. Args: input_graph_def: GraphDef object containing a model to be transformed. inputs: List of node names for the model inputs. outputs: List of node names for the model outputs. transforms: List of strings containing transform names and parameters. Returns: New GraphDef with transforms applied.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\graph_transforms\\__init__.py",
    "ast_data": "FunctionDef name:TransformGraph arg:input_graph_def arg:inputs arg:outputs arg:transforms arguments arg arg arg arg Assign Call Assign Call Call Assign Call Call Assign Call Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_solid_capstyle",
    "source_code": "def get_solid_capstyle(self):\n    return self._solidcapstyle.name",
    "docstring": "Return the for solid lines. See also .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:get_solid_capstyle arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "bucketized_column",
    "source_code": "@doc_controls.header(_FEATURE_COLUMN_DEPRECATION_WARNING)\n@tf_export('feature_column.bucketized_column')\n@deprecation.deprecated(None, _FEATURE_COLUMN_DEPRECATION_RUNTIME_WARNING)\ndef bucketized_column(source_column, boundaries):\n    if not isinstance(source_column, (NumericColumn, fc_old._NumericColumn)):\n        raise ValueError('source_column must be a column generated with numeric_column(). Given: {}'.format(source_column))\n    if len(source_column.shape) > 1:\n        raise ValueError('source_column must be one-dimensional column. Given: {}'.format(source_column))\n    if not boundaries:\n        raise ValueError('boundaries must not be empty.')\n    if not (isinstance(boundaries, list) or isinstance(boundaries, tuple)):\n        raise ValueError('boundaries must be a sorted list.')\n    for i in range(len(boundaries) - 1):\n        if boundaries[i] >= boundaries[i + 1]:\n            raise ValueError('boundaries must be a sorted list.')\n    return BucketizedColumn(source_column, tuple(boundaries))",
    "docstring": "Represents discretized dense input bucketed by . Buckets include the left boundary, and exclude the right boundary. Namely, generates buckets , , , and . For example, if the inputs are then the output will be Example: A can also be crossed with another categorical column using : Args: source_column: A one-dimensional dense column which is generated with . boundaries: A sorted list or tuple of floats specifying the boundaries. Returns: A . Raises: ValueError: If is not a numeric column, or if it is not one-dimensional. ValueError: If is not a sorted list or tuple.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:bucketized_column arg:source_column arg:boundaries arguments arg arg If Call Raise Call Call If Compare Call Raise Call Call If Raise Call If BoolOp Call Call Raise Call For Call Call If Compare Raise Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "is_1d_only_ea_dtype",
    "source_code": "def is_1d_only_ea_dtype(dtype: DtypeObj | None) -> bool:\n    return isinstance(dtype, ExtensionDtype) and (not dtype._supports_2d)",
    "docstring": "Analogue to is_extension_array_dtype but excluding DatetimeTZDtype.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\common.py",
    "ast_data": "FunctionDef name:is_1d_only_ea_dtype arg:dtype arguments arg Return return:yes BoolOp Call"
  },
  {
    "library": "authlib",
    "name": "save_token",
    "source_code": "def save_token(self, token, request):\n    client = request.client\n    if request.user:\n        user_id = request.user.pk\n    else:\n        user_id = client.user_id\n    item = self.token_model(client_id=client.client_id, user_id=user_id, **token)\n    item.save()\n    return item",
    "docstring": "Default method for ``. Developers MAY rewrite this function to meet their own needs.",
    "type": "method",
    "file_path": "authlib\\authlib\\integrations\\django_oauth2\\authorization_server.py",
    "ast_data": "FunctionDef name:save_token arg:self arg:token arg:request arguments arg arg arg Assign If Assign Assign Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "exclude",
    "source_code": "def exclude(self, *args, **kwargs):\n    self._not_support_combined_queries('exclude')\n    return self._filter_or_exclude(True, args, kwargs)",
    "docstring": "Return a new QuerySet instance with NOT (args) ANDed to the existing set.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:exclude arg:self arguments arg arg arg Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "evaluate",
    "source_code": "def evaluate(calcfc, x, m_nlcon, amat, bvec):\n    m_lcon = len(bvec) if bvec is not None else 0\n    if DEBUGGING:\n        assert not any(np.isnan(x))\n    constr = np.zeros(m_lcon + m_nlcon)\n    if amat is not None:\n        constr[:m_lcon] = matprod(x, amat.T) - bvec\n    if any(np.isnan(x)):\n        f = primasum(x)\n        constr = np.ones(m_nlcon) * f\n    else:\n        f, constr[m_lcon:] = calcfc(moderatex(x))\n        f = moderatef(f)\n        constr[m_lcon:] = moderatec(constr[m_lcon:])\n    if DEBUGGING:\n        assert not (np.isnan(f) or np.isposinf(f))\n        assert not any(np.isnan(constr) | np.isposinf(constr))\n    return (f, constr)",
    "docstring": "This function evaluates CALCFC at X, returning the objective function value and the constraint value. Nan/Inf are handled by a moderated extreme barrier.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\pyprima\\pyprima\\src\\pyprima\\common\\evaluate.py",
    "ast_data": "FunctionDef name:evaluate arg:calcfc arg:x arg:m_nlcon arg:amat arg:bvec arguments arg arg arg arg arg Assign Compare Call If Call Call Assign Call If Compare Assign Call If Call Call Assign Call Assign Call Assign Call Call Assign Call Assign Call If BoolOp Call Call Call Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "make_upright",
    "source_code": "def make_upright(laf: Tensor, eps: float=1e-09) -> Tensor:\n    KORNIA_CHECK_LAF(laf)\n    det = get_laf_scale(laf)\n    scale = det\n    b2a2 = torch.sqrt(laf[..., 0:1, 1:2] ** 2 + laf[..., 0:1, 0:1] ** 2) + eps\n    laf1_ell = concatenate([(b2a2 / det).contiguous(), torch.zeros_like(det)], dim=3)\n    laf2_ell = concatenate([(laf[..., 1:2, 1:2] * laf[..., 0:1, 1:2] + laf[..., 1:2, 0:1] * laf[..., 0:1, 0:1]) / (b2a2 * det), (det / b2a2).contiguous()], dim=3)\n    laf_unit_scale = concatenate([concatenate([laf1_ell, laf2_ell], dim=2), laf[..., :, 2:3]], dim=3)\n    return scale_laf(laf_unit_scale, scale)",
    "docstring": "Rectify the affine matrix, so that it becomes upright. Args: laf: :math: eps: for safe division. Returns: laf: :math: Example: >>> input = torch.ones(1, 5, 2, 3) # BxNx2x3 >>> output = make_upright(input) # BxNx2x3",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\laf.py",
    "ast_data": "FunctionDef name:make_upright arg:laf arg:eps arguments arg arg Call Assign Call Assign Assign Call Assign Call Call Call Assign Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_construct_node",
    "source_code": "def _construct_node(signature: _schemas.OpSignature, named_inputs: Mapping[str, ir.Value | None], named_attrs: Mapping[str, ValidAttributeType], opset: onnxscript.values.Opset, num_outputs: int) -> ir.Node:\n    inputs: list[ir.Value | None] = []\n    for value in named_inputs.values():\n        if isinstance(value, Sequence):\n            inputs.extend(value)\n        else:\n            inputs.append(value)\n    for input in reversed(inputs):\n        if input is not None:\n            break\n        inputs.pop()\n    attributes = [attr for attr in ir_convenience.convert_attributes(named_attrs) if attr.value is not None]\n    outputs = [_tensors.SymbolicTensor(opset) for _ in range(num_outputs)]\n    return ir.Node(signature.domain, signature.name, inputs=inputs, attributes=attributes, outputs=outputs, version=signature.opset_version)",
    "docstring": "Construct the node with the inputs and attributes. Variadic inputs are flattened. Args: signature: The OpSignature for the node. named_inputs: The mapping of parameter names to their arguments. When we do not have the schema of an operator, we do not know the names of the inputs, in which case the names can be anything because they are not used in this function. The data structure is passed in for consistency with the other functions. named_attrs: The mapping of attribute names to their values. num_outputs: The number of outputs for the node.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_building.py",
    "ast_data": "FunctionDef name:_construct_node arg:signature arg:named_inputs arg:named_attrs arg:opset arg:num_outputs arguments arg arg arg arg arg For Call If Call Call Call For Call If Compare Call Assign Call Compare Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "highlight_warnings",
    "source_code": "def highlight_warnings(self):\n    self._highlight_warnings = True",
    "docstring": "Enables warning highlighting when building formatted table.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\benchmark\\utils\\compare.py",
    "ast_data": "FunctionDef name:highlight_warnings arg:self arguments arg Assign"
  },
  {
    "library": "pytorch",
    "name": "parse_node_kind",
    "source_code": "def parse_node_kind(kind: str) -> tuple[str, str]:\n    if '::' not in kind:\n        raise ValueError(f\"Node kind: {kind} is invalid. '::' is not in node kind.\")\n    domain, opname = kind.split('::', 1)\n    if '::' in opname:\n        raise ValueError(f\"Node kind: {kind} is invalid. '::' should only apear once.\")\n    return (domain, opname)",
    "docstring": "Parse node kind into domain and Op name.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\jit_utils.py",
    "ast_data": "FunctionDef name:parse_node_kind arg:kind arguments arg If Compare Raise Call Assign Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "django",
    "name": "TemplatesSetting",
    "source_code": "class TemplatesSetting(BaseRenderer):\n\n    def get_template(self, template_name):\n        return get_template(template_name)",
    "docstring": "Load templates using template.loader.get_template() which is configured based on settings.TEMPLATES.",
    "type": "class",
    "file_path": "django\\django\\forms\\renderers.py",
    "ast_data": "ClassDef name:TemplatesSetting FunctionDef name:get_template arg:self arg:template_name arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_XLog1pyGrad",
    "source_code": "@ops.RegisterGradient('Xlog1py')\ndef _XLog1pyGrad(op: ops.Operation, grad):\n    x = op.inputs[0]\n    y = op.inputs[1]\n    sx = array_ops.shape(x)\n    sy = array_ops.shape(y)\n    rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)\n    with ops.control_dependencies([grad]):\n        not_zero_x = math_ops.cast(math_ops.not_equal(x, math_ops.cast(0.0, dtype=x.dtype)), dtype=x.dtype)\n        partial_x = gen_math_ops.xlog1py(not_zero_x, y)\n        partial_y = gen_math_ops.xdivy(x, y + 1.0)\n        return (array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx), array_ops.reshape(math_ops.reduce_sum(partial_y * grad, ry), sy))",
    "docstring": "Returns gradient of xlog1py(x, y) with respect to x and y.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_XLog1pyGrad arg:op arg:grad arguments arg arg Assign Assign Assign Call Assign Call Assign Call With Call Assign Call Call Call Assign Call Assign Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "tobanded",
    "source_code": "def tobanded(self):\n    d0 = np.r_[5, 6 * np.ones(self.n - 2, dtype=self.dtype), 5]\n    d1 = -4 * np.ones(self.n, dtype=self.dtype)\n    d2 = np.ones(self.n, dtype=self.dtype)\n    return np.array([d2, d1, d0]).astype(self.dtype)",
    "docstring": "Construct the Sakurai matrix as a banded array.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_special_sparse_arrays.py",
    "ast_data": "FunctionDef name:tobanded arg:self arguments arg Assign Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_neg",
    "source_code": "@deprecation.deprecated('2016-12-30', '`tf.neg(x)` is deprecated, please use `tf.negative(x)` or `-x`')\ndef _neg(x, name=None):\n    return negative(x, name)",
    "docstring": "Computes numerical negative value element-wise. I.e., \\(y = -x\\). Args: x: A or . Must be one of the following types: , , , , , , . name: A name for the operation (optional). Returns: A or , respectively. Has the same type as .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:_neg arg:x arg:name arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "ConfigureSubplotsBase",
    "source_code": "class ConfigureSubplotsBase(ToolBase):\n    description = 'Configure subplots'\n    image = 'mpl-data/images/subplots'",
    "docstring": "Base tool for the configuration of subplots.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py",
    "ast_data": "ClassDef name:ConfigureSubplotsBase Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "group_fn",
    "source_code": "def group_fn(self, sizes: Sequence[Sequence[sympy.Expr]]) -> tuple[tuple[sympy.Expr, ...], ...]:\n    raise NotImplementedError",
    "docstring": "Process the iteration sizes in case a transformation needs to be applied.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:group_fn arg:self arg:sizes arguments arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "image_gradients",
    "source_code": "@tf_export('image.image_gradients')\n@dispatch.add_dispatch_support\ndef image_gradients(image):\n    if image.get_shape().ndims != 4:\n        raise ValueError('image_gradients expects a 4D tensor [batch_size, h, w, d], not {}.'.format(image.get_shape()))\n    image_shape = array_ops.shape(image)\n    batch_size, height, width, depth = array_ops_stack.unstack(image_shape)\n    dy = image[:, 1:, :, :] - image[:, :-1, :, :]\n    dx = image[:, :, 1:, :] - image[:, :, :-1, :]\n    shape = array_ops_stack.stack([batch_size, 1, width, depth])\n    dy = array_ops.concat([dy, array_ops.zeros(shape, image.dtype)], 1)\n    dy = array_ops.reshape(dy, image_shape)\n    shape = array_ops_stack.stack([batch_size, height, 1, depth])\n    dx = array_ops.concat([dx, array_ops.zeros(shape, image.dtype)], 2)\n    dx = array_ops.reshape(dx, image_shape)\n    return (dy, dx)",
    "docstring": "Returns image gradients (dy, dx) for each color channel. Both output tensors have the same shape as the input: [batch_size, h, w, d]. The gradient values are organized so that [I(x+1, y) - I(x, y)] is in location (x, y). That means that dy will always have zeros in the last row, and dx will always have zeros in the last column. Usage Example: Args: image: Tensor with shape [batch_size, h, w, d]. Returns: Pair of tensors (dy, dx) holding the vertical and horizontal image gradients (1-step finite difference). Raises: ValueError: If is not a 4D tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:image_gradients arg:image arguments arg If Compare Call Raise Call Call Call Assign Call Assign Call Assign Assign Assign Call Assign Call Call Assign Call Assign Call Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "gui_repaint",
    "source_code": "def gui_repaint(self, drawDC=None):\n    _log.debug('%s - gui_repaint()', type(self))\n    if not (self and self.IsShownOnScreen()):\n        return\n    if not drawDC:\n        drawDC = wx.ClientDC(self)\n    bmp = self.bitmap.ConvertToImage().ConvertToBitmap() if wx.Platform == '__WXMSW__' and isinstance(self.figure.canvas.get_renderer(), RendererWx) else self.bitmap\n    drawDC.DrawBitmap(bmp, 0, 0)\n    if self._rubberband_rect is not None:\n        x0, y0, x1, y1 = map(round, self._rubberband_rect)\n        rect = [(x0, y0, x1, y0), (x1, y0, x1, y1), (x0, y0, x0, y1), (x0, y1, x1, y1)]\n        drawDC.DrawLineList(rect, self._rubberband_pen_white)\n        drawDC.DrawLineList(rect, self._rubberband_pen_black)",
    "docstring": "Update the displayed image on the GUI canvas, using the supplied wx.PaintDC device context.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_wx.py",
    "ast_data": "FunctionDef name:gui_repaint arg:self arg:drawDC arguments arg arg Call Call If BoolOp Call Return return:no If Assign Call Assign BoolOp Compare Call Call Call Call Call If Compare Assign Call Assign Call Call"
  },
  {
    "library": "kornia",
    "name": "print",
    "source_code": "def print(self, max_width: int=256) -> None:\n    print(image_to_string(self.data, max_width))",
    "docstring": "Print the image tensor to the console. Args: max_width: the maximum width of the image to print. .. code-block:: python img = Image.from_file(\"panda.png\") img.print() .. image::",
    "type": "method",
    "file_path": "kornia\\kornia\\image\\image.py",
    "ast_data": "FunctionDef name:print arg:self arg:max_width arguments arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "initial_value",
    "source_code": "@property\ndef initial_value(self):\n    raise NotImplementedError",
    "docstring": "Returns the Tensor used as the initial value for the variable. Note that this is different from which runs the op that initializes the variable before returning its value. This method returns the tensor that is used by the op that initializes the variable. Returns: A .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:initial_value arg:self arguments arg Raise"
  },
  {
    "library": "pandas",
    "name": "time_frame_period_formatting_iso8601_strftime_offset",
    "source_code": "def time_frame_period_formatting_iso8601_strftime_offset(self, nobs, freq):\n    self.data['p'].dt.strftime(date_format='%Y-%m-%dT%H:%M:%S%z')",
    "docstring": "Not optimized yet as %z is not supported by",
    "type": "method",
    "file_path": "pandas\\asv_bench\\benchmarks\\strftime.py",
    "ast_data": "FunctionDef name:time_frame_period_formatting_iso8601_strftime_offset arg:self arg:nobs arg:freq arguments arg arg arg Call"
  },
  {
    "library": "django",
    "name": "tokenize",
    "source_code": "def tokenize(self):\n    in_tag = False\n    lineno = 1\n    result = []\n    for token_string in tag_re.split(self.template_string):\n        if token_string:\n            result.append(self.create_token(token_string, None, lineno, in_tag))\n            lineno += token_string.count('\\n')\n        in_tag = not in_tag\n    return result",
    "docstring": "Return a list of tokens from a given template_string.",
    "type": "method",
    "file_path": "django\\django\\template\\base.py",
    "ast_data": "FunctionDef name:tokenize arg:self arguments arg Assign Assign Assign For Call If Call Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_valid_dtypes",
    "source_code": "def _valid_dtypes(self):\n    return _DEFAULT_VALID_DTYPES",
    "docstring": "Valid types for loss, variables and gradients. Subclasses should override to allow other float types. Returns: Valid types for loss, variables and gradients.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py",
    "ast_data": "FunctionDef name:_valid_dtypes arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "run_and_report_benchmark",
    "source_code": "def run_and_report_benchmark(self, dataset, num_elements, name, iters=5, extras=None, warmup=True, apply_default_optimizations=False, session_config=None):\n    wall_time = self.run_benchmark(dataset=dataset, num_elements=num_elements, iters=iters, warmup=warmup, apply_default_optimizations=apply_default_optimizations, session_config=session_config)\n    if extras is None:\n        extras = {}\n    if context.executing_eagerly():\n        name = '{}.eager'.format(name)\n        extras['implementation'] = 'eager'\n    else:\n        name = '{}.graph'.format(name)\n        extras['implementation'] = 'graph'\n    extras['num_elements'] = num_elements\n    self.report_benchmark(wall_time=wall_time, iters=iters, name=name, extras=extras)\n    return wall_time",
    "docstring": "Benchmarks the dataset and reports the stats. Runs the dataset times. In each iteration, the benchmark measures the time it takes to go through elements of the dataset. This is followed by logging/printing the benchmark stats. Args: dataset: Dataset to benchmark. num_elements: Number of dataset elements to iterate through each benchmark iteration. name: Name of the benchmark. iters: Number of times to repeat the timing. extras: A dict which maps string keys to additional benchmark info. warmup: If true, warms up the session caches by running an untimed run. apply_default_optimizations: Determines whether default optimizations should be applied. session_config: A ConfigProto protocol buffer with configuration options for the session. Applicable only for benchmarking in graph mode. Returns: A float, representing the per-element wall time of the dataset in seconds. This is the median time (with respect to ) it takes for the dataset to go through elements, divided by",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\benchmarks\\benchmark_base.py",
    "ast_data": "FunctionDef name:run_and_report_benchmark arg:self arg:dataset arg:num_elements arg:name arg:iters arg:extras arg:warmup arg:apply_default_optimizations arg:session_config arguments arg arg arg arg arg arg arg arg arg Assign Call If Compare Assign If Call Assign Call Assign Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "maximum_iterations",
    "source_code": "@property\ndef maximum_iterations(self):\n    return self._maximum_iterations",
    "docstring": "The maximum number of iterations that will be executed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:maximum_iterations arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "format_value",
    "source_code": "def format_value(self, value):\n    if self.is_initial(value):\n        return value",
    "docstring": "Return the file object if it has a defined url attribute.",
    "type": "method",
    "file_path": "django\\django\\forms\\widgets.py",
    "ast_data": "FunctionDef name:format_value arg:self arg:value arguments arg arg If Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "isin",
    "source_code": "def isin(self, values) -> Series:\n    result = algorithms.isin(self._values, values)\n    return self._constructor(result, index=self.index, copy=False).__finalize__(self, method='isin')",
    "docstring": "Whether elements in Series are contained in . Return a boolean Series showing whether each element in the Series matches an element in the passed sequence of exactly. Parameters ---------- values : set or list-like The sequence of values to test. Passing in a single string will raise a `values` will raise an error. Use a list of one element instead: >>> s.isin([\"llama\"]) 0 True 1 False 2 True 3 False 4 True 5 False Name: animal, dtype: bool Strings and integers are distinct and are therefore not comparable: >>> pd.Series([1]).isin([\"1\"]) 0 False dtype: bool >>> pd.Series([1.1]).isin([\"1.1\"]) 0 False dtype: bool",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:isin arg:self arg:values arguments arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_range_in_self",
    "source_code": "def _range_in_self(self, other: range) -> bool:\n    if not other:\n        return True\n    if not self._range:\n        return False\n    if len(other) > 1 and other.step % self._range.step:\n        return False\n    return other.start in self._range and other[-1] in self._range",
    "docstring": "Check if other range is contained in self",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\range.py",
    "ast_data": "FunctionDef name:_range_in_self arg:self arg:other arguments arg arg If Return return:yes If Return return:yes If BoolOp Compare Call Return return:yes Return return:yes BoolOp Compare Compare"
  },
  {
    "library": "django",
    "name": "y",
    "source_code": "@property\ndef y(self):\n    return self._listarr(capi.gety)",
    "docstring": "Return the Y coordinates in a list.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:y arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "Problem11",
    "source_code": "class Problem11(Benchmark):\n\n    def __init__(self, dimensions=1):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = [(-pi / 2, 2 * pi)]\n        self.global_optimum = 2.09439\n        self.fglob = -1.5\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        x = x[0]\n        return 2 * cos(x) + cos(2 * x)",
    "docstring": "Univariate Problem11 objective function. This class defines the Univariate Problem11 global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Problem11}}(x) = 2\\cos(x) + \\cos(2x) Bound constraints: :math: .. figure:: figures/Problem11.png :alt: Univariate Problem11 function :align: center **Univariate Problem11 function** *Global optimum*: :math: for :math:",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_univariate.py",
    "ast_data": "ClassDef name:Problem11 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "MultiIndexUInt64Engine",
    "source_code": "class MultiIndexUInt64Engine(libindex.BaseMultiIndexCodesEngine, libindex.UInt64Engine):\n    _base = libindex.UInt64Engine\n    _codes_dtype = 'uint64'",
    "docstring": "Manages a MultiIndex by mapping label combinations to positive integers. The number of possible label combinations must not overflow the 64 bits integers.",
    "type": "class",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "ClassDef name:MultiIndexUInt64Engine Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_set_mutable",
    "source_code": "def _set_mutable(self, mutable):\n    object.__setattr__(self, '_mutable', mutable)",
    "docstring": "Change the mutability value to on this options and children.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\options.py",
    "ast_data": "FunctionDef name:_set_mutable arg:self arg:mutable arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "experimental_as_proto",
    "source_code": "def experimental_as_proto(self) -> struct_pb2.BoundedTensorSpecProto:\n    return struct_pb2.BoundedTensorSpecProto(shape=self.shape.experimental_as_proto(), dtype=self.dtype.experimental_as_proto().datatype, minimum=tensor_util.make_tensor_proto(self._minimum), maximum=tensor_util.make_tensor_proto(self._maximum), name=self.name)",
    "docstring": "Returns a proto representation of the BoundedTensorSpec instance.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor.py",
    "ast_data": "FunctionDef name:experimental_as_proto arg:self arguments arg Return return:yes Call Call Call Call Call"
  },
  {
    "library": "django",
    "name": "send",
    "source_code": "def send(self, sender, **named):\n    if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:\n        return []\n    responses = []\n    sync_receivers, async_receivers = self._live_receivers(sender)\n    for receiver in sync_receivers:\n        response = receiver(signal=self, sender=sender, **named)\n        responses.append((receiver, response))\n    if async_receivers:\n\n        async def asend():\n            async_responses = await asyncio.gather(*(receiver(signal=self, sender=sender, **named) for receiver in async_receivers))\n            return zip(async_receivers, async_responses)\n        responses.extend(async_to_sync(asend)())\n    return responses",
    "docstring": "Send signal from sender to all connected receivers. If any receiver raises an error, the error propagates back through send, terminating the dispatch loop. So it's possible that all receivers won't be called if an error is raised. If any receivers are asynchronous, they are called after all the synchronous receivers via a single call to async_to_sync(). They are also executed concurrently with asyncio.gather(). Arguments: sender The sender of the signal. Either a specific object or None. named Named arguments which will be passed to receivers. Return a list of tuple pairs [(receiver, response), ... ].",
    "type": "method",
    "file_path": "django\\django\\dispatch\\dispatcher.py",
    "ast_data": "FunctionDef name:send arg:self arg:sender arguments arg arg arg If BoolOp Compare Call Return return:no Assign Assign Call For Assign Call Call If AsyncFunctionDef name:asend arguments Assign Call Call Return return:yes Call Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "drop_duplicates",
    "source_code": "def drop_duplicates(self, *, keep: DropKeep='first') -> Self:\n    if self.is_unique:\n        return self._view()\n    return super().drop_duplicates(keep=keep)",
    "docstring": "Return Index with duplicate values removed. Parameters ---------- keep : {'first', 'last', `keep` discards all sets of duplicated entries. >>> idx.drop_duplicates(keep=False) Index(['cow', 'beetle', 'hippo'], dtype='object')",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:drop_duplicates arg:self arguments arg arg If Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_time_to_exit",
    "source_code": "def _time_to_exit(self):\n    return self._grace_period <= 0 or self._final_checkpoint_countdown",
    "docstring": "Return whether to exit: exit if no grace period or grace period ends.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\failure_handling\\failure_handling.py",
    "ast_data": "FunctionDef name:_time_to_exit arg:self arguments arg Return return:yes BoolOp Compare"
  },
  {
    "library": "pytorch",
    "name": "_check_is_size",
    "source_code": "def _check_is_size(i, message=None, *, max=None):\n    _check(i >= 0, message)\n    from torch.fx.experimental.symbolic_shapes import _advise_is_size\n    _advise_is_size(i)\n    if max is not None:\n        _check(i <= max, message)\n        from torch.fx.experimental.symbolic_shapes import _advise_is_bounded\n        _advise_is_bounded(i, max)",
    "docstring": "Checks that a given integer is a valid size (i.e., is non-negative). You should use this over `` tests, we assume that a constant max bound is treated equivalently to all other values. Symbolic max bounds are not yet supported. NB: Do NOT use this in contexts where a -1 size would be valid (indicating to infer the size from context, or if you should wrap-around or truncate). Only use this if the only valid value is an honest to goodness size.",
    "type": "function",
    "file_path": "pytorch\\torch\\__init__.py",
    "ast_data": "FunctionDef name:_check_is_size arg:i arg:message arguments arg arg arg Call Compare Call If Compare Call Compare Call"
  },
  {
    "library": "matplotlib",
    "name": "contourf",
    "source_code": "@_preprocess_data()\ndef contourf(self, X, Y, Z, *args, zdir='z', offset=None, axlim_clip=False, **kwargs):\n    had_data = self.has_data()\n    jX, jY, jZ = art3d.rotate_axes(X, Y, Z, zdir)\n    cset = super().contourf(jX, jY, jZ, *args, **kwargs)\n    levels = self._add_contourf_set(cset, zdir, offset, axlim_clip)\n    self._auto_scale_contourf(X, Y, Z, zdir, levels, had_data)\n    return cset",
    "docstring": "Create a 3D filled contour plot. Parameters ---------- X, Y, Z : array-like Input data. See for supported data shapes. zdir : {'x', 'y', 'z'}, default: 'z' The direction to use. offset : float, optional If specified, plot a projection of the contour lines at this position in a plane normal to *zdir*. axlim_clip : bool, default: False Whether to hide lines with a vertex outside the axes view limits. .. versionadded:: 3.10 data : indexable object, optional DATA_PARAMETER_PLACEHOLDER *args, **kwargs Other arguments are forwarded to . Returns ------- matplotlib.contour.QuadContourSet",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:contourf arg:self arg:X arg:Y arg:Z arguments arg arg arg arg arg arg arg arg arg Assign Call Assign Call Assign Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_handle_getitem_node",
    "source_code": "def _handle_getitem_node(node: torch.fx.Node, node_name_to_values: dict[str, ir.Value | Sequence[ir.Value]]) -> ir.Value:\n    assert len(node.all_input_nodes) == 1\n    source = node.all_input_nodes[0]\n    source_outputs = node_name_to_values[source.name]\n    assert isinstance(source_outputs, Sequence), f'Expected {source.name} to output sequence, got {node_name_to_values[source.name]}'\n    index = typing.cast(int, node.args[1])\n    value = source_outputs[index]\n    node_name_to_values[node.name] = value\n    value.name = node.name\n    return value",
    "docstring": "Handle a getitem node. Add the input value it is getting to the mapping, then return the value. There are two cases for this node: 1. The output is a Sequence (traced), we can simply get the value from the sequence 2. The output is produced by a SplitToSequence node, we need to get the value from the sequence value This function only handles the first case",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_core.py",
    "ast_data": "FunctionDef name:_handle_getitem_node arg:node arg:node_name_to_values arguments arg arg Compare Call Assign Assign Call Assign Call Assign Assign Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_to_ijv",
    "source_code": "def _to_ijv(ss, row_levels: tuple[int] | list[int]=(0,), column_levels: tuple[int] | list[int]=(1,), sort_labels: bool=False) -> tuple[np.ndarray, npt.NDArray[np.intp], npt.NDArray[np.intp], list[IndexLabel], list[IndexLabel]]:\n    _check_is_partition([row_levels, column_levels], range(ss.index.nlevels))\n    sp_vals = ss.array.sp_values\n    na_mask = notna(sp_vals)\n    values = sp_vals[na_mask]\n    valid_ilocs = ss.array.sp_index.indices[na_mask]\n    i_coords, i_labels = _levels_to_axis(ss, row_levels, valid_ilocs, sort_labels=sort_labels)\n    j_coords, j_labels = _levels_to_axis(ss, column_levels, valid_ilocs, sort_labels=sort_labels)\n    return (values, i_coords, j_coords, i_labels, j_labels)",
    "docstring": "For an arbitrary MultiIndexed sparse Series return (v, i, j, ilabels, jlabels) where (v, (i, j)) is suitable for passing to scipy.sparse.coo constructor, and ilabels and jlabels are the row and column labels respectively. Parameters ---------- ss : Series row_levels : tuple/list column_levels : tuple/list sort_labels : bool, default False Sort the row and column labels before forming the sparse matrix. When and/or refer to a single level, set to for a faster execution. Returns ------- values : numpy.ndarray Valid values to populate a sparse matrix, extracted from ss. i_coords : numpy.ndarray (row coordinates of the values) j_coords : numpy.ndarray (column coordinates of the values) i_labels : list (row labels) j_labels : list (column labels)",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\arrays\\sparse\\scipy_sparse.py",
    "ast_data": "FunctionDef name:_to_ijv arg:ss arg:row_levels arg:column_levels arg:sort_labels arguments arg arg arg arg Call Call Assign Assign Call Assign Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "unique_with_mask",
    "source_code": "def unique_with_mask(values, mask: npt.NDArray[np.bool_] | None=None):\n    values = _ensure_arraylike(values, func_name='unique')\n    if isinstance(values.dtype, ExtensionDtype):\n        return values.unique()\n    if isinstance(values, ABCIndex):\n        return values.unique()\n    original = values\n    hashtable, values = _get_hashtable_algo(values)\n    table = hashtable(len(values))\n    if mask is None:\n        uniques = table.unique(values)\n        uniques = _reconstruct_data(uniques, original.dtype, original)\n        return uniques\n    else:\n        uniques, mask = table.unique(values, mask=mask)\n        uniques = _reconstruct_data(uniques, original.dtype, original)\n        assert mask is not None\n        return (uniques, mask.astype('bool'))",
    "docstring": "See algorithms.unique for docs. Takes a mask for masked arrays.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\algorithms.py",
    "ast_data": "FunctionDef name:unique_with_mask arg:values arg:mask arguments arg arg Assign Call If Call Return return:yes Call If Call Return return:yes Call Assign Assign Call Assign Call Call If Compare Assign Call Assign Call Return return:yes Assign Call Assign Call Compare Return return:yes Call"
  },
  {
    "library": "django",
    "name": "collect_sql",
    "source_code": "def collect_sql(self, plan):\n    statements = []\n    state = None\n    for migration, backwards in plan:\n        with self.connection.schema_editor(collect_sql=True, atomic=migration.atomic) as schema_editor:\n            if state is None:\n                state = self.project_state((migration.app_label, migration.name), at_end=False)\n            if not backwards:\n                state = migration.apply(state, schema_editor, collect_sql=True)\n            else:\n                state = migration.unapply(state, schema_editor, collect_sql=True)\n        statements.extend(schema_editor.collected_sql)\n    return statements",
    "docstring": "Take a migration plan and return a list of collected SQL statements that represent the best-efforts version of that plan.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\loader.py",
    "ast_data": "FunctionDef name:collect_sql arg:self arg:plan arguments arg arg Assign Assign For With Call If Compare Assign Call If Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "call_kernel",
    "source_code": "def call_kernel(self, name: str, node: Any=None) -> None:\n    wrapper = V.graph.wrapper_code\n    for v in self.args.sizevars.keys():\n        wrapper.ensure_size_computed(v)\n    args = [*self.args.output_buffers.keys(), *self.args.input_buffers.keys()]\n    args = [arg for arg in args if arg not in self.removed_buffers]\n    args += [str(v) for v in self.args.sizevars.keys()]\n    if len(self.active_range_trees()) > 0:\n        threads = [self.pexpr(sympy.Min(v.numel, self.max_threadgroup_size) if v.is_reduction else v.numel) for v in self.active_range_trees()]\n        if V.graph.cpp_wrapper:\n            args += [f'{', '.join(threads)}']\n        else:\n            args += [f'threads=[{', '.join(threads)}]']\n    elif V.graph.cpp_wrapper:\n        raise RuntimeError('We should always have threads?')\n    if self.inside_reduction:\n        threads = [self.pexpr(sympy.Min(v.numel, self.max_threadgroup_size)) if v.is_reduction else '1' for v in self.active_range_trees()]\n        if V.graph.cpp_wrapper:\n            args += [f'{{{', '.join(threads)}}}']\n        else:\n            args += [f'group_size=[{', '.join(threads)}]']\n    elif V.graph.cpp_wrapper:\n        args += [None]\n    wrapper.generate_kernel_call(name, args, device=torch.device('cpu'), triton=False)",
    "docstring": "Codegen a call to this kernel",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\mps.py",
    "ast_data": "FunctionDef name:call_kernel arg:self arg:name arg:node arguments arg arg arg Assign For Call Call Assign Call Call Assign Compare Call Call If Compare Call Call Assign Call Call Call If Call Call If Raise Call If Assign Call Call Call If Call Call If Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_tick_update_position",
    "source_code": "def _tick_update_position(tick, tickxs, tickys, labelpos):\n    tick.label1.set_position(labelpos)\n    tick.label2.set_position(labelpos)\n    tick.tick1line.set_visible(True)\n    tick.tick2line.set_visible(False)\n    tick.tick1line.set_linestyle('-')\n    tick.tick1line.set_marker('')\n    tick.tick1line.set_data(tickxs, tickys)\n    tick.gridline.set_data([0], [0])",
    "docstring": "Update tick line and label position and style.",
    "type": "function",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axis3d.py",
    "ast_data": "FunctionDef name:_tick_update_position arg:tick arg:tickxs arg:tickys arg:labelpos arguments arg arg arg arg Call Call Call Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_moments",
    "source_code": "def _moments(a, b, n):\n    arg1 = 1 + n / a\n    log_value = torch.lgamma(arg1) + torch.lgamma(b) - torch.lgamma(arg1 + b)\n    return b * torch.exp(log_value)",
    "docstring": "Computes nth moment of Kumaraswamy using using torch.lgamma",
    "type": "function",
    "file_path": "pytorch\\torch\\distributions\\kumaraswamy.py",
    "ast_data": "FunctionDef name:_moments arg:a arg:b arg:n arguments arg arg arg Assign Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "average_pooling1d",
    "source_code": "def average_pooling1d(inputs, pool_size, strides, padding='valid', data_format='channels_last', name=None):\n    warnings.warn('`tf.layers.average_pooling1d` is deprecated and will be removed in a future version. Please use `tf.keras.layers.AveragePooling1D` instead.')\n    layer = AveragePooling1D(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name)\n    return layer.apply(inputs)",
    "docstring": "Average Pooling layer for 1D inputs. Args: inputs: The tensor over which to pool. Must have rank 3. pool_size: An integer or tuple/list of a single integer, representing the size of the pooling window. strides: An integer or tuple/list of a single integer, specifying the strides of the pooling operation. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string, one of (default) or . The ordering of the dimensions in the inputs. corresponds to inputs with shape while corresponds to inputs with shape . name: A string, the name of the layer. Returns: The output tensor, of rank 3. Raises: ValueError: if eager execution is enabled.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\legacy_tf_layers\\pooling.py",
    "ast_data": "FunctionDef name:average_pooling1d arg:inputs arg:pool_size arg:strides arg:padding arg:data_format arg:name arguments arg arg arg arg arg arg Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "cleanup",
    "source_code": "def cleanup(self) -> None:\n    for s in self.var_to_stack.values():\n        s.cleanup()\n    for ras in self.deferred_runtime_asserts.values():\n        for ra in ras:\n            ra.stack.cleanup()",
    "docstring": "Break reference cycles. This destroys the stacks. If you really want to keep them, we just need some way to break references on code objects.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:cleanup arg:self arguments arg For Call Call For Call For Call"
  },
  {
    "library": "django",
    "name": "check_for_language",
    "source_code": "@functools.lru_cache(maxsize=1000)\ndef check_for_language(lang_code):\n    if lang_code is None or not language_code_re.search(lang_code):\n        return False\n    return any((gettext_module.find('django', path, [to_locale(lang_code)]) is not None for path in all_locale_paths()))",
    "docstring": "Check whether there is a global language file for the given language code. This is used to decide whether a user-provided language is available. lru_cache should have a maxsize to prevent from memory exhaustion attacks, as the provided language codes are taken from the HTTP request. See also .",
    "type": "function",
    "file_path": "django\\django\\utils\\translation\\trans_real.py",
    "ast_data": "FunctionDef name:check_for_language arg:lang_code arguments arg If BoolOp Compare Call Return return:yes Return return:yes Call Compare Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "insert_or_assign",
    "source_code": "def insert_or_assign(self, keys, values, name=None):\n    with ops.name_scope(name, '%s_lookup_table_insert' % self.name, [self.resource_handle, keys, values]):\n        keys = ops.convert_to_tensor(keys, dtype=self._key_dtype, name='keys')\n        values = ops.convert_to_tensor(values, dtype=self._value_dtype, name='values')\n        with ops.colocate_with(self.resource_handle):\n            op = gen_lookup_ops.lookup_table_insert_v2(self.resource_handle, keys, values)\n        return op",
    "docstring": "Associates with . Args: keys: Keys to insert. Can be a tensor of any shape. Must match the table's key type. values: Values to be associated with keys. Must be a tensor of the same shape as and match the table's value type. name: A name for the operation (optional). Returns: The created Operation. Raises: TypeError: when or doesn't match the table data types.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "FunctionDef name:insert_or_assign arg:self arg:keys arg:values arg:name arguments arg arg arg arg With Call Assign Call Assign Call With Call Assign Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "unwrap_all",
    "source_code": "def unwrap_all(obj: Any, *, stop: Callable[[Any], bool] | None=None) -> Any:\n    if callable(stop):\n        while not stop(obj):\n            if ispartial(obj):\n                obj = obj.func\n            elif inspect.isroutine(obj) and hasattr(obj, '__wrapped__'):\n                obj = obj.__wrapped__\n            elif isclassmethod(obj) or isstaticmethod(obj):\n                obj = obj.__func__\n            else:\n                return obj\n        return obj\n    while True:\n        if ispartial(obj):\n            obj = obj.func\n        elif inspect.isroutine(obj) and hasattr(obj, '__wrapped__'):\n            obj = obj.__wrapped__\n        elif isclassmethod(obj) or isstaticmethod(obj):\n            obj = obj.__func__\n        else:\n            return obj",
    "docstring": "Get an original object from wrapped object. Unlike :func:, this unwraps partial functions, wrapped functions, class methods and static methods. When specified, *stop* is a predicate indicating whether an object should be unwrapped or not.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\inspect.py",
    "ast_data": "FunctionDef name:unwrap_all arg:obj arguments arg arg If Call While Call If Call Assign If BoolOp Call Call Assign If BoolOp Call Call Assign Return return:yes Return return:yes While If Call Assign If BoolOp Call Call Assign If BoolOp Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "make_parse_example_spec_v2",
    "source_code": "@doc_controls.header(_FEATURE_COLUMN_DEPRECATION_WARNING)\n@tf_export('feature_column.make_parse_example_spec', v1=[])\n@deprecation.deprecated(None, _FEATURE_COLUMN_DEPRECATION_RUNTIME_WARNING)\ndef make_parse_example_spec_v2(feature_columns):\n    result = {}\n    for column in feature_columns:\n        if not isinstance(column, fc_types.FeatureColumn):\n            raise ValueError('All feature_columns must be FeatureColumn instances. Given: {}'.format(column))\n        config = column.parse_example_spec\n        for key, value in six.iteritems(config):\n            if key in result and value != result[key]:\n                raise ValueError('feature_columns contain different parse_spec for key {}. Given {} and {}'.format(key, value, result[key]))\n        result.update(config)\n    return result",
    "docstring": "Creates parsing spec dictionary from input feature_columns. The returned dictionary can be used as arg 'features' in . Typical usage example: For the above example, make_parse_example_spec would return the dict: Args: feature_columns: An iterable containing all feature columns. All items should be instances of classes derived from . Returns: A dict mapping each feature key to a or value. Raises: ValueError: If any of the given is not a instance.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:make_parse_example_spec_v2 arg:feature_columns arguments arg Assign For If Call Raise Call Call Assign For Call If BoolOp Compare Compare Raise Call Call Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "switch_to_thread_local",
    "source_code": "def switch_to_thread_local(self) -> None:\n    if not self._stack_state_is_thread_local:\n        self._stack_state_is_thread_local = True",
    "docstring": "Make device, colocation and dependencies stacks thread-local. Device, colocation and dependencies stacks are not thread-local be default. If multiple threads access them, then the state is shared. This means that one thread may affect the behavior of another thread. After this method is called, the stacks become thread-local. If multiple threads access them, then the state is not shared. Each thread uses its own value; a thread doesn't affect other threads by mutating such a stack. The initial value for every thread's stack is set to the current value of the stack when was first called.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:switch_to_thread_local arg:self arguments arg If Assign"
  },
  {
    "library": "pandas",
    "name": "Appender",
    "source_code": "class Appender:\n    addendum: str | None\n\n    def __init__(self, addendum: str | None, join: str='', indents: int=0) -> None:\n        if indents > 0:\n            self.addendum = indent(addendum, indents=indents)\n        else:\n            self.addendum = addendum\n        self.join = join\n\n    def __call__(self, func: T) -> T:\n        func.__doc__ = func.__doc__ if func.__doc__ else ''\n        self.addendum = self.addendum if self.addendum else ''\n        docitems = [func.__doc__, self.addendum]\n        func.__doc__ = dedent(self.join.join(docitems))\n        return func",
    "docstring": "A function decorator that will append an addendum to the docstring of the target function. This decorator should be robust even if func.__doc__ is None (for example, if -OO was passed to the interpreter). Usage: construct a docstring.Appender with a string to be joined to the original docstring. An optional 'join' parameter may be supplied which will be used to join the docstring and addendum. e.g. add_copyright = Appender(\"Copyright (c) 2009\", join=' ') @add_copyright def my_dog(has='fleas'): \"This docstring will have a copyright below\" pass",
    "type": "class",
    "file_path": "pandas\\pandas\\util\\_decorators.py",
    "ast_data": "ClassDef name:Appender FunctionDef name:__init__ arg:self arg:addendum arg:join arg:indents arguments arg arg arg arg If Compare Assign Call Assign Assign FunctionDef name:__call__ arg:self arg:func arguments arg arg Assign Assign Assign Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "transform_constraint",
    "source_code": "def transform_constraint(constraint: Constraint, counter: int):\n    if type(constraint) in _TRANSFORMATION_RULES:\n        return _TRANSFORMATION_RULES[type(constraint)](constraint, counter)\n    else:\n        return (constraint, counter)",
    "docstring": "Transforms a constraint into a simpler constraint. Ex: precision and consistency are transformed to equality Args: constraint: constraint to be transformed counter: for variable tracking Returns: Constraint",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_transformation.py",
    "ast_data": "FunctionDef name:transform_constraint arg:constraint arg:counter arguments arg arg If Compare Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "alloc",
    "source_code": "def alloc(self, target):\n    for worker in range(len(self.workers)):\n        if self.workers[worker] >= target.end:\n            self.workers[worker] = target.start\n            return worker\n    self.workers.append(target.start)\n    return len(self.workers) - 1",
    "docstring": "Places target in an available thread, or adds a new thread.",
    "type": "method",
    "file_path": "scipy\\tools\\ninjatracing.py",
    "ast_data": "FunctionDef name:alloc arg:self arg:target arguments arg arg For Call Call If Compare Assign Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "scatter_nd_add",
    "source_code": "def scatter_nd_add(self, indices, updates, name=None):\n    return gen_state_ops.scatter_nd_add(self._variable, indices, updates, use_locking=True, name=name)",
    "docstring": "Applies sparse addition to individual values or slices in a Variable. is a with rank and is a of rank . must be integer tensor, containing indices into . It must be shape where . The innermost dimension of (with length ) corresponds to indices into elements (if ) or slices (if ) along the th dimension of . is of rank with shape: For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 elements. In Python, that update would look like this: The resulting update to ref would look like this: [1, 13, 3, 14, 14, 6, 7, 20] See for more details about how to make updates to slices. Args: indices: The indices to be used in the operation. updates: The values to be used in the operation. name: the name of the operation. Returns: A that will hold the new value of this variable after the scattered addition has completed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py",
    "ast_data": "FunctionDef name:scatter_nd_add arg:self arg:indices arg:updates arg:name arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_log_deprecation",
    "source_code": "def _log_deprecation(msg, *args, **kwargs):\n    if strict_mode.STRICT_MODE:\n        logging.error(msg, *args, **kwargs)\n        raise RuntimeError('This behavior has been deprecated, which raises an error in strict mode.')\n    else:\n        logging.warning(msg, *args, **kwargs)",
    "docstring": "Raises errors for deprecated methods if in strict mode, warns otherwise.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\deprecation.py",
    "ast_data": "FunctionDef name:_log_deprecation arg:msg arguments arg arg arg If Call Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_opt_einsum_contract_path",
    "source_code": "def _get_opt_einsum_contract_path(equation, shaped_inputs_tuple, optimize):\n    _, contractions = opt_einsum.contract_path(equation, *shaped_inputs_tuple, optimize=optimize, einsum_call=True, use_blas=True)\n    indices_and_equations = tuple([(expr[0], expr[2]) for expr in contractions])\n    return indices_and_equations",
    "docstring": "Returns the (memoized) result of opt_einsum.contract_path.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\special_math_ops.py",
    "ast_data": "FunctionDef name:_get_opt_einsum_contract_path arg:equation arg:shaped_inputs_tuple arg:optimize arguments arg arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X):\n    X = validate_data(self, X, dtype=DTYPE, order='C', accept_sparse='csr', reset=False)\n    return self._raw_predict(X).ravel()",
    "docstring": "Predict regression target for X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``. Returns ------- y : ndarray of shape (n_samples,) The predicted values.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_gb.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_TensorParallelTransformPass",
    "source_code": "class _TensorParallelTransformPass(PassBase):\n\n    def __init__(self, rank: int, world_size: int, device_type: str, state_dict: dict[str, torch.Tensor], graph_signature: ExportGraphSignature, parallel_strategies: dict[str, ParallelStyle]) -> None:\n        super().__init__()\n        self.rank = rank\n        self.mesh = DeviceMesh(device_type, torch.arange(world_size))\n        self.state_dict: dict[str, torch.Tensor] = state_dict\n        self.graph_signature = graph_signature\n        self.parallel_strategies = parallel_strategies\n\n    def call(self, graph_module) -> PassResult:\n        gm = copy.deepcopy(graph_module)\n        parameter_placements = _generate_parameter_and_buffer_placements(list(self.state_dict.keys()), self.parallel_strategies)\n        placement_strategies = _mark_sharding(gm, self.graph_signature, self.mesh, parameter_placements)\n        _partitioner(gm)\n        _shard_state_dict(self.state_dict, placement_strategies, self.graph_signature, self.mesh)\n        return PassResult(gm, True)",
    "docstring": "This pass is responsible for transforming a single-device graph into a tensor parallel graph. It will mark the placement strategy of each node in the graph, partition the graph into distributed graph, then shard the parameters/buffers accordingly.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\tensor\\experimental\\_tp_transform.py",
    "ast_data": "ClassDef name:_TensorParallelTransformPass FunctionDef name:__init__ arg:self arg:rank arg:world_size arg:device_type arg:state_dict arg:graph_signature arg:parallel_strategies arguments arg arg arg arg arg arg arg Call Call Assign Assign Call Call Assign Assign FunctionDef name:call arg:self arg:graph_module arguments arg arg Assign Call Assign Call Call Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "find",
    "source_code": "@set_module('numpy.strings')\ndef find(a, sub, start=0, end=None):\n    end = end if end is not None else MAX\n    return _find_ufunc(a, sub, start, end)",
    "docstring": "For each element, return the lowest index in the string where substring `np.bytes_np.str_` dtype The substring to search for. start, end : array_like, with any integer dtype The range to look in, interpreted as in slice notation. Returns ------- y : ndarray Output array of ints See Also -------- str.find Examples -------- >>> import numpy as np >>> a = np.array([\"NumPy is a Python library\"]) >>> np.strings.find(a, \"Python\") array([11])",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\strings.py",
    "ast_data": "FunctionDef name:find arg:a arg:sub arg:start arg:end arguments arg arg arg arg Assign Compare Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_check_penalty_number",
    "source_code": "def _check_penalty_number(x):\n    if not isinstance(x, (float, int)):\n        raise ValueError('Value: {} is not a valid regularization penalty number, expected an int or float value'.format(x))\n    if math.isinf(x) or math.isnan(x):\n        raise ValueError('Value: {} is not a valid regularization penalty number, a positive/negative infinity or NaN is not a property value'.format(x))",
    "docstring": "check penalty number availability, raise ValueError if failed.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\regularizers.py",
    "ast_data": "FunctionDef name:_check_penalty_number arg:x arguments arg If Call Raise Call Call If BoolOp Call Call Raise Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_lstsq",
    "source_code": "def _lstsq(X, y, indices, fit_intercept):\n    fit_intercept = int(fit_intercept)\n    n_features = X.shape[1] + fit_intercept\n    n_subsamples = indices.shape[1]\n    weights = np.empty((indices.shape[0], n_features))\n    X_subpopulation = np.ones((n_subsamples, n_features))\n    y_subpopulation = np.zeros(max(n_subsamples, n_features))\n    lstsq, = get_lapack_funcs(('gelss',), (X_subpopulation, y_subpopulation))\n    for index, subset in enumerate(indices):\n        X_subpopulation[:, fit_intercept:] = X[subset, :]\n        y_subpopulation[:n_subsamples] = y[subset]\n        weights[index] = lstsq(X_subpopulation, y_subpopulation)[1][:n_features]\n    return weights",
    "docstring": "Least Squares Estimator for TheilSenRegressor class. This function calculates the least squares method on a subset of rows of X and y defined by the indices array. Optionally, an intercept column is added if intercept is set to true. Parameters ---------- X : array-like of shape (n_samples, n_features) Design matrix, where is the number of samples and is the number of features. y : ndarray of shape (n_samples,) Target vector, where is the number of samples. indices : ndarray of shape (n_subpopulation, n_subsamples) Indices of all subsamples with respect to the chosen subpopulation. fit_intercept : bool Fit intercept or not. Returns ------- weights : ndarray of shape (n_subpopulation, n_features + intercept) Solution matrix of n_subpopulation solved least square problems.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_theil_sen.py",
    "ast_data": "FunctionDef name:_lstsq arg:X arg:y arg:indices arg:fit_intercept arguments arg arg arg arg Assign Call Assign Assign Assign Call Assign Call Assign Call Call Assign Call For Call Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_canonicalize_group_rank",
    "source_code": "def _canonicalize_group_rank(group: ProcessGroup, global_rank: Optional[int]=None, group_rank: Optional[int]=None, return_global: bool=False) -> int:\n    if group_rank is not None:\n        if global_rank is not None:\n            raise ValueError(\"Can't specify both group_rank and global_rank\")\n        if return_global:\n            return get_global_rank(group, group_rank)\n    else:\n        if global_rank is None:\n            raise ValueError('Must specify global_rank or group_rank')\n        if return_global:\n            return global_rank\n        group_rank = get_group_rank(group, global_rank)\n    return group_rank",
    "docstring": "Helper method to take _either_ a global rank or a group rank and produce a group rank. If 'return_global' is true, produce a global rank instead of a group rank.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:_canonicalize_group_rank arg:group arg:global_rank arg:group_rank arg:return_global arguments arg arg arg arg If Compare If Compare Raise Call If Return return:yes Call If Compare Raise Call If Return return:yes Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_gather_all_orig_param_state",
    "source_code": "def _gather_all_orig_param_state(fsdp_param_info: FSDPParamInfo, input_states: dict[str, Any], shard_state: bool, to_save: bool, cpu_offload: bool) -> dict[str, Any]:\n    fsdp_state = fsdp_param_info.state\n    if fsdp_state.world_size == 1 or fsdp_state.sharding_strategy == ShardingStrategy.NO_SHARD:\n        return input_states if to_save else {}\n    with SimpleProfiler.profile(SimpleProfiler.Type.RESHARDING):\n        with SimpleProfiler.profile(SimpleProfiler.Type.ALLGATHER_OBJ):\n            gathered_state_info = _allgather_state_info(fsdp_state, input_states)\n        output_states = _allgather_orig_param_states(fsdp_param_info, gathered_state_info, input_states, shard_state, to_save, cpu_offload)\n    if to_save:\n        for key, idx in fsdp_param_info.param_indices.items():\n            if key in output_states:\n                continue\n            if not fsdp_param_info.param_requires_grad[idx]:\n                continue\n            raise RuntimeError(f'{key} is not in the output state. The FSDPParamInfo has the param keys {sorted(fsdp_param_info.param_indices.keys())} while the output_states has the param keys {sorted(output_states.keys())}.')\n        return output_states\n    else:\n        return {}",
    "docstring": "Given a optimizer state dict, `` must be managed by FSDP.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_optim_utils.py",
    "ast_data": "FunctionDef name:_gather_all_orig_param_state arg:fsdp_param_info arg:input_states arg:shard_state arg:to_save arg:cpu_offload arguments arg arg arg arg arg Assign If BoolOp Compare Compare Return return:yes With Call With Call Assign Call Assign Call If For Call If Compare If Raise Call Call Call Call Call Return return:yes Return return:no"
  },
  {
    "library": "pytorch",
    "name": "pwlf_sac_tradeoff_curve",
    "source_code": "def pwlf_sac_tradeoff_curve(self, n_segments: int=2, save_tradeoff_graphs: bool=False) -> None:\n    for mod_fqn, sac_stats in self.sac_mod_stats.items():\n        self.sac_mod_tradeoff_stats[mod_fqn] = self._get_sac_tradeoff_pwlf_stats(sac_stats=sac_stats, greedy_order_meta=self.sac_mod_greedy_order_meta[mod_fqn], n_segments=n_segments, save_tradeoff_graph=save_tradeoff_graphs, filename=mod_fqn)",
    "docstring": "Fits a piecewise linear function with the specified sumber of segments to the SAC trade-off curve of discarded memory vs recomputation time. Args: n_segments (int, optional): The number of segments to be used for fitting the piecewise linear function to the trade-off curve. Defaults to 2. save_tradeoff_graphs (bool, optional): Whether to save the trade-off graphs to file. Defaults to False. If save_tradeoff_graphs is True, the trade-off graphs are saved to file using the module FQN as the filename.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_tools\\sac_estimator.py",
    "ast_data": "FunctionDef name:pwlf_sac_tradeoff_curve arg:self arg:n_segments arg:save_tradeoff_graphs arguments arg arg arg For Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_serialize_to_tensors",
    "source_code": "def _serialize_to_tensors(self):\n    return {trackable.VARIABLE_VALUE_KEY: self}",
    "docstring": "Implements Trackable._serialize_to_tensors.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py",
    "ast_data": "FunctionDef name:_serialize_to_tensors arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_list_all_concrete_functions_for_serialization",
    "source_code": "def _list_all_concrete_functions_for_serialization(self):\n    seen_signatures = []\n    if self.input_signature is not None:\n        seen_signatures.append((self.input_signature, {}))\n    else:\n        concrete_functions = self._list_all_concrete_functions()\n        for concrete_function in concrete_functions:\n            signature = concrete_function.structured_input_signature\n            flattened = nest.flatten(signature)\n            if any((isinstance(arg, func_graph_module.UnknownArgument) for arg in flattened)):\n                logging.info('Unsupported signature for serialization: %s.', signature)\n                continue\n            equal_to_signature = functools.partial(function_type_utils.is_same_structure, signature, check_values=True)\n            if not any((equal_to_signature(s) for s in seen_signatures)):\n                seen_signatures.append(signature)\n    concrete_functions = []\n    for args, kwargs in seen_signatures:\n        concrete_functions.append(self.get_concrete_function(*args, **kwargs))\n    return concrete_functions",
    "docstring": "Returns all concrete functions for serialization. Returns: A list of instances of .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\polymorphic_function.py",
    "ast_data": "FunctionDef name:_list_all_concrete_functions_for_serialization arg:self arguments arg Assign If Compare Call Assign Call For Assign Assign Call If Call Call Call Assign Call If Call Call Call Assign For Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_fill",
    "source_code": "def set_fill(self, b):\n    self._fill = bool(b)\n    self._set_facecolor(self._original_facecolor)\n    self._set_edgecolor(self._original_edgecolor)\n    self._set_hatchcolor(self._original_hatchcolor)\n    self.stale = True",
    "docstring": "Set whether to fill the patch. Parameters ---------- b : bool",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:set_fill arg:self arg:b arguments arg arg Assign Call Call Call Call Assign"
  },
  {
    "library": "scipy",
    "name": "orthogonal_procrustes",
    "source_code": "@_apply_over_batch(('A', 2), ('B', 2))\ndef orthogonal_procrustes(A, B, check_finite=True):\n    if check_finite:\n        A = np.asarray_chkfinite(A)\n        B = np.asarray_chkfinite(B)\n    else:\n        A = np.asanyarray(A)\n        B = np.asanyarray(B)\n    if A.ndim != 2:\n        raise ValueError(f'expected ndim to be 2, but observed {A.ndim}')\n    if A.shape != B.shape:\n        raise ValueError(f'the shapes of A and B differ ({A.shape} vs {B.shape})')\n    u, w, vt = svd((B.T @ np.conjugate(A)).T)\n    R = u @ vt\n    scale = w.sum()\n    return (R, scale)",
    "docstring": "Compute the matrix solution of the orthogonal (or unitary) Procrustes problem. Given matrices and of the same shape, find an orthogonal (or unitary in the case of complex input) matrix that most closely maps to using the algorithm given in [1]_. Parameters ---------- A : (M, N) array_like Matrix to be mapped. B : (M, N) array_like Target matrix. check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- R : (N, N) ndarray The matrix solution of the orthogonal Procrustes problem. Minimizes the Frobenius norm of `10.1007/BF02289451orthogonal_procrustes`. >>> R, _ = orthogonal_procrustes(A, B) >>> np.allclose(R, Q) True",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_procrustes.py",
    "ast_data": "FunctionDef name:orthogonal_procrustes arg:A arg:B arg:check_finite arguments arg arg arg If Assign Call Assign Call Assign Call Assign Call If Compare Raise Call If Compare Raise Call Assign Call Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "validate_no_atomic_block",
    "source_code": "def validate_no_atomic_block(self):\n    if self.in_atomic_block:\n        raise TransactionManagementError(\"This is forbidden when an 'atomic' block is active.\")",
    "docstring": "Raise an error if an atomic block is active.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\base.py",
    "ast_data": "FunctionDef name:validate_no_atomic_block arg:self arguments arg If Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "is_initialized",
    "source_code": "def is_initialized(self, name=None):\n    if values_util.is_saving_non_distributed():\n        return self._primary.is_initialized()\n    if self._use_packed_variable():\n        return self._packed_var.is_initialized()\n    result = self._primary.is_initialized()\n    for v in self._values[1:-1]:\n        result = math_ops.logical_and(result, v.is_initialized())\n    result = math_ops.logical_and(result, self._values[-1].is_initialized(), name=name)\n    return result",
    "docstring": "Identifies if all the component variables are initialized. Args: name: Name of the final op. Returns: The op that evaluates to True or False depending on if all the component variables are initialized.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py",
    "ast_data": "FunctionDef name:is_initialized arg:self arg:name arguments arg arg If Call Return return:yes Call If Call Return return:yes Call Assign Call For Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_subexpr_involving_symbol",
    "source_code": "@classmethod\ndef get_subexpr_involving_symbol(cls, expr: Expr, symbol: Symbol) -> Expr:\n    expr = cls._preprocess(expr)\n    return sympy.S.Zero + sum((term for term in sympy.Add.make_args(expr) if symbol in term.free_symbols))",
    "docstring": "Given a sympy expression, return the subexpression comprised only of terms involving the specified symbol. For example, if is , and is , this returns .",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\block_analysis.py",
    "ast_data": "FunctionDef name:get_subexpr_involving_symbol arg:cls arg:expr arg:symbol arguments arg arg arg Assign Call Return return:yes Call Call Compare"
  },
  {
    "library": "django",
    "name": "__eq__",
    "source_code": "def __eq__(self, other):\n    return bool(capi.feature_equal(self.ptr, other._ptr))",
    "docstring": "Do equivalence testing on the features.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\feature.py",
    "ast_data": "FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "ForkerIterDataPipe",
    "source_code": "@functional_datapipe('fork')\nclass ForkerIterDataPipe(IterDataPipe):\n\n    def __new__(cls, datapipe: IterDataPipe, num_instances: int, buffer_size: int=1000, copy: Optional[Literal['shallow', 'deep']]=None):\n        if num_instances < 1:\n            raise ValueError(f'Expected `num_instances` larger than 0, but {num_instances} is found')\n        if num_instances == 1:\n            return datapipe\n        container = _ForkerIterDataPipe(datapipe, num_instances, buffer_size, copy)\n        return [_ChildDataPipe(container, i) for i in range(num_instances)]",
    "docstring": "Creates multiple instances of the same Iterable DataPipe (functional name: ``. Note: All branches of the forked pipeline return the identical object unless the copy parameter is supplied. If the object is mutable or contains mutable objects, changing them in one branch will affect all others. Example: >>> # xdoctest: +REQUIRES(module:torchdata) >>> from torchdata.datapipes.iter import IterableWrapper >>> source_dp = IterableWrapper(range(5)) >>> dp1, dp2 = source_dp.fork(num_instances=2) >>> list(dp1) [0, 1, 2, 3, 4] >>> list(dp2) [0, 1, 2, 3, 4]",
    "type": "class",
    "file_path": "pytorch\\torch\\utils\\data\\datapipes\\iter\\combining.py",
    "ast_data": "ClassDef name:ForkerIterDataPipe FunctionDef name:__new__ arg:cls arg:datapipe arg:num_instances arg:buffer_size arg:copy arguments arg arg arg arg arg If Compare Raise Call If Compare Return return:yes Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "weakmethod",
    "source_code": "def weakmethod(method):\n    cls = method.im_class\n    func = method.im_func\n    instance_ref = weakref.ref(method.im_self)\n\n    @functools.wraps(method)\n    def inner(*args, **kwargs):\n        return func.__get__(instance_ref(), cls)(*args, **kwargs)\n    del method\n    return inner",
    "docstring": "Creates a weak reference to the bound method.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\metrics_utils.py",
    "ast_data": "FunctionDef name:weakmethod arg:method arguments arg Assign Assign Assign Call FunctionDef name:inner arguments arg arg Return return:yes Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "read",
    "source_code": "def read(self, queue, name=None):\n    if isinstance(queue, tensor_lib.Tensor):\n        queue_ref = queue\n    else:\n        queue_ref = queue.queue_ref\n    if self._reader_ref.dtype == dtypes.resource:\n        return gen_io_ops.reader_read_v2(self._reader_ref, queue_ref, name=name)\n    else:\n        old_queue_op = gen_data_flow_ops.fake_queue(queue_ref)\n        return gen_io_ops.reader_read(self._reader_ref, old_queue_op, name=name)",
    "docstring": "Returns the next record (key, value) pair produced by a reader. Will dequeue a work unit from queue if necessary (e.g. when the Reader needs to start reading from a new file since it has finished with the previous file). Args: queue: A Queue or a mutable string Tensor representing a handle to a Queue, with string work items. name: A name for the operation (optional). Returns: A tuple of Tensors (key, value). key: A string scalar Tensor. value: A string scalar Tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\io_ops.py",
    "ast_data": "FunctionDef name:read arg:self arg:queue arg:name arguments arg arg arg If Call Assign Assign If Compare Return return:yes Call Assign Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "transform_keypoints",
    "source_code": "def transform_keypoints(self, M: Tensor, inplace: bool=False) -> 'Keypoints3D':\n    raise NotImplementedError",
    "docstring": "Apply a transformation matrix to the 2D keypoints. Args: M: The transformation matrix to be applied, shape of :math: or :math:. inplace: do transform in-place and return self. Returns: The transformed keypoints.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\keypoints.py",
    "ast_data": "FunctionDef name:transform_keypoints arg:self arg:M arg:inplace arguments arg arg arg Raise"
  },
  {
    "library": "pytorch",
    "name": "events",
    "source_code": "def events(self):\n    assert self.profiler\n    return self.profiler.function_events",
    "docstring": "Returns the list of unaggregated profiler events, to be used in the trace callback or after the profiling is finished",
    "type": "method",
    "file_path": "pytorch\\torch\\profiler\\profiler.py",
    "ast_data": "FunctionDef name:events arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_fft_func",
    "source_code": "def _fft_func(self, x: np.ndarray) -> np.ndarray:\n    if self.phase_shift is not None:\n        if x.shape[-1] < self.mfft:\n            z_shape = list(x.shape)\n            z_shape[-1] = self.mfft - x.shape[-1]\n            x = np.hstack((x, np.zeros(z_shape, dtype=x.dtype)))\n        p_s = (self.phase_shift + self.m_num_mid) % self.m_num\n        x = np.roll(x, -p_s, axis=-1)\n    if self.fft_mode == 'twosided':\n        return fft_lib.fft(x, n=self.mfft, axis=-1)\n    if self.fft_mode == 'centered':\n        return fft_lib.fftshift(fft_lib.fft(x, self.mfft, axis=-1), axes=-1)\n    if self.fft_mode == 'onesided':\n        return fft_lib.rfft(x, n=self.mfft, axis=-1)\n    if self.fft_mode == 'onesided2X':\n        X = fft_lib.rfft(x, n=self.mfft, axis=-1)\n        fac = np.sqrt(2) if self.scaling == 'psd' else 2\n        X[..., 1:-1 if self.mfft % 2 == 0 else None] *= fac\n        return X\n    fft_modes = get_args(FFT_MODE_TYPE)\n    raise RuntimeError(f'self.fft_mode={self.fft_mode!r} not in {fft_modes}!')",
    "docstring": "FFT based on the , , and attributes. For multidimensional arrays the transformation is carried out on the last axis.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_short_time_fft.py",
    "ast_data": "FunctionDef name:_fft_func arg:self arg:x arguments arg arg If Compare If Compare Assign Call Assign Assign Call Call Assign Assign Call If Compare Return return:yes Call If Compare Return return:yes Call Call If Compare Return return:yes Call If Compare Assign Call Assign Compare Call Compare Return return:yes Assign Call Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, units_mapping):\n    self._units = units_mapping",
    "docstring": "Parameters ---------- units_mapping : dict Mapping of category names (str) to indices (int).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\category.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:units_mapping arguments arg arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "is_subtype_of",
    "source_code": "def is_subtype_of(self, other: trace.TraceType) -> bool:\n    if not isinstance(other, TensorShape):\n        return False\n    if other.rank is None:\n        return True\n    if self.rank != other.rank:\n        return False\n    return all((o is None or s == o for s, o in zip(self._dims, other._dims)))",
    "docstring": "Returns True iff is subtype of . Shape A is a subtype of shape B if shape B can successfully represent it: * A of any rank is a subtype of . * TensorShapes of equal ranks are covariant, i.e. is a subtype of iff An is a subtype of Bn. An is subtype of Bn iff An == Bn or Bn is None. * TensorShapes of different defined ranks have no subtyping relation. The subtyping relation is reflexive and transitive, but not symmetric. Some examples: * is a subtype of , and is also a subtype of but and are not subtypes of each other. * All two-dimensional shapes are subtypes of , such as . There is no subtype relationship with, for example, or . * is also a subtype of and . It is not a subtype of, for example, , , or . * is a subtype of itself, and also , , and . It has no subtype relation with, for example, or . Args: other: Another . Returns: True iff is subtype of .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:is_subtype_of arg:self arg:other arguments arg arg If Call Return return:yes If Compare Return return:yes If Compare Return return:yes Return return:yes Call BoolOp Compare Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "_synset_to_word",
    "source_code": "def _synset_to_word(filepath):\n    mat = scipy.io.loadmat(filepath)\n    entries = mat['synsets']\n    fields = ['synset_id', 'WNID', 'words', 'gloss', 'num_children', 'children', 'wordnet_height', 'num_train_images']\n    synset_index = fields.index('synset_id')\n    words_index = fields.index('words')\n    synset_to_word = {}\n    for entry in entries:\n        entry = entry[0]\n        synset_id = int(entry[synset_index][0])\n        first_word = entry[words_index][0].split(',')[0]\n        synset_to_word[synset_id] = first_word\n    return synset_to_word",
    "docstring": "Returns synset to word dictionary by reading sysnset arrays.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\tools\\evaluation\\tasks\\imagenet_image_classification\\generate_validation_labels.py",
    "ast_data": "FunctionDef name:_synset_to_word arg:filepath arguments arg Assign Call Assign Assign Assign Call Assign Call Assign For Assign Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "from_row_limits",
    "source_code": "@classmethod\n@dispatch.add_dispatch_support\ndef from_row_limits(cls, values, row_limits, name=None, validate=True):\n    if not isinstance(validate, bool):\n        raise TypeError(f'Argument `validate` must have type bool. Received {validate}.')\n    with ops.name_scope(name, 'RaggedFromRowLimits', [values, row_limits]):\n        values = _convert_to_ragged_tensor_values(values)\n        row_partition = RowPartition.from_row_limits(row_limits=row_limits, validate=validate, dtype_hint=_get_optional_partition_dtype(values))\n        return cls._from_row_partition(values, row_partition, validate=validate)",
    "docstring": "Creates a with rows partitioned by . Equivalent to: . Args: values: A potentially ragged tensor with shape . row_limits: A 1-D integer tensor with shape . Must be sorted in ascending order. If , then must be . name: A name prefix for the RaggedTensor (optional). validate: If true, then use assertions to check that the arguments form a valid . Note: these assertions incur a runtime cost, since they must be checked for each tensor value. Returns: A . . . #### Example: >>> print(tf.RaggedTensor.from_row_limits( ... values=[3, 1, 4, 1, 5, 9, 2, 6], ... row_limits=[4, 4, 7, 8, 8]))",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:from_row_limits arg:cls arg:values arg:row_limits arg:name arg:validate arguments arg arg arg arg arg If Call Raise Call With Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "equals",
    "source_code": "def equals(self, other: object) -> bool:\n    if type(self) != type(other):\n        return False\n    other = cast(ExtensionArray, other)\n    if self.dtype != other.dtype:\n        return False\n    elif len(self) != len(other):\n        return False\n    else:\n        equal_values = self == other\n        if isinstance(equal_values, ExtensionArray):\n            equal_values = equal_values.fillna(False)\n        equal_na = self.isna() & other.isna()\n        return bool((equal_values | equal_na).all())",
    "docstring": "Return if another array is equivalent to this array. Equivalent means that both arrays have the same shape and dtype, and all values compare equal. Missing values in the same location are considered equal (in contrast with normal equality). Parameters ---------- other : ExtensionArray Array to compare to this Array. Returns ------- boolean Whether the arrays are equivalent. See Also -------- numpy.array_equal : Equivalent method for numpy array. Series.equals : Equivalent method for Series. DataFrame.equals : Equivalent method for DataFrame. Examples -------- >>> arr1 = pd.array([1, 2, np.nan]) >>> arr2 = pd.array([1, 2, np.nan]) >>> arr1.equals(arr2) True >>> arr1 = pd.array([1, 3, np.nan]) >>> arr2 = pd.array([1, 2, np.nan]) >>> arr1.equals(arr2) False",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\base.py",
    "ast_data": "FunctionDef name:equals arg:self arg:other arguments arg arg If Compare Call Call Return return:yes Assign Call If Compare Return return:yes If Compare Call Call Return return:yes Assign Compare If Call Assign Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "print_difference",
    "source_code": "def print_difference(mode, t1, t2):\n    difference = (t2 - t1) / t1 * 100.0\n    print('=== %s: %.1f%% ===' % (mode, difference))",
    "docstring": "Print the difference in timing between two runs.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\batch_norm_benchmark.py",
    "ast_data": "FunctionDef name:print_difference arg:mode arg:t1 arg:t2 arguments arg arg arg Assign Call"
  },
  {
    "library": "django",
    "name": "_get_current_month",
    "source_code": "def _get_current_month(self, date):\n    return date.replace(day=1)",
    "docstring": "Return the start date of the previous interval.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\dates.py",
    "ast_data": "FunctionDef name:_get_current_month arg:self arg:date arguments arg arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "FortranRoutine",
    "source_code": "class FortranRoutine:\n    type = 'generic'\n\n    def __init__(self, name=None, filename=None):\n        self.filename = filename\n        if name is None:\n            root, ext = os.path.splitext(filename)\n            name = root\n        self.name = name\n        self._dependencies = None\n\n    def dependencies(self):\n        if self._dependencies is None:\n            deps = fortran.getDependencies(self.filename)\n            self._dependencies = [d.lower() for d in deps]\n        return self._dependencies\n\n    def __repr__(self):\n        return f'FortranRoutine({self.name!r}, filename={self.filename!r})'",
    "docstring": "Wrapper for a Fortran routine in a file.",
    "type": "class",
    "file_path": "numpy\\numpy\\linalg\\lapack_lite\\make_lite.py",
    "ast_data": "ClassDef name:FortranRoutine Assign FunctionDef name:__init__ arg:self arg:name arg:filename arguments arg arg arg Assign If Compare Assign Call Assign Assign Assign FunctionDef name:dependencies arg:self arguments arg If Compare Assign Call Assign Call Return return:yes FunctionDef name:__repr__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "save",
    "source_code": "def save(self, f, **kwargs):\n    return self._c.save(str(f), **kwargs)",
    "docstring": "Save with a file-like object. save(f, _extra_files={}) See :func: which accepts a file-like object. This function, torch.save(), converts the object to a string, treating it as a path. DO NOT confuse these two functions when it comes to the 'f' parameter functionality.",
    "type": "method",
    "file_path": "pytorch\\torch\\jit\\_script.py",
    "ast_data": "FunctionDef name:save arg:self arg:f arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "register_module_module_registration_hook",
    "source_code": "def register_module_module_registration_hook(hook: Callable[..., None]) -> RemovableHandle:\n    handle = RemovableHandle(_global_module_registration_hooks)\n    _global_module_registration_hooks[handle.id] = hook\n    return handle",
    "docstring": "Register a module registration hook common to all modules. .. warning :: This adds global state to the module The hook will be called every time :func: is invoked. It should have the following signature:: hook(module, name, submodule) -> None or new submodule The hook can modify the input or return a single modified value in the hook. Returns: :class:: a handle that can be used to remove the added hook by calling ``",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:register_module_module_registration_hook arg:hook arguments arg Assign Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "ready",
    "source_code": "def ready(self):\n    pass",
    "docstring": "Override this method in subclasses to run code when Django starts.",
    "type": "method",
    "file_path": "django\\django\\apps\\config.py",
    "ast_data": "FunctionDef name:ready arg:self arguments arg"
  },
  {
    "library": "scikit-learn",
    "name": "_estimate_wishart_full",
    "source_code": "def _estimate_wishart_full(self, nk, xk, sk):\n    _, n_features = xk.shape\n    self.degrees_of_freedom_ = self.degrees_of_freedom_prior_ + nk\n    self.covariances_ = np.empty((self.n_components, n_features, n_features))\n    for k in range(self.n_components):\n        diff = xk[k] - self.mean_prior_\n        self.covariances_[k] = self.covariance_prior_ + nk[k] * sk[k] + nk[k] * self.mean_precision_prior_ / self.mean_precision_[k] * np.outer(diff, diff)\n    self.covariances_ /= self.degrees_of_freedom_[:, np.newaxis, np.newaxis]",
    "docstring": "Estimate the full Wishart distribution parameters. Parameters ---------- X : array-like of shape (n_samples, n_features) nk : array-like of shape (n_components,) xk : array-like of shape (n_components, n_features) sk : array-like of shape (n_components, n_features, n_features)",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\mixture\\_bayesian_mixture.py",
    "ast_data": "FunctionDef name:_estimate_wishart_full arg:self arg:nk arg:xk arg:sk arguments arg arg arg arg Assign Assign Assign Call For Call Assign Assign Call"
  },
  {
    "library": "django",
    "name": "_add_local_translations",
    "source_code": "def _add_local_translations(self):\n    for localedir in reversed(settings.LOCALE_PATHS):\n        translation = self._new_gnu_trans(localedir)\n        self.merge(translation)",
    "docstring": "Merge translations defined in LOCALE_PATHS.",
    "type": "method",
    "file_path": "django\\django\\utils\\translation\\trans_real.py",
    "ast_data": "FunctionDef name:_add_local_translations arg:self arguments arg For Call Assign Call Call"
  },
  {
    "library": "pandas",
    "name": "_format_data",
    "source_code": "@final\ndef _format_data(self, name=None) -> str_t:\n    is_justify = True\n    if self.inferred_type == 'string':\n        is_justify = False\n    elif isinstance(self.dtype, CategoricalDtype):\n        self = cast('CategoricalIndex', self)\n        if is_object_dtype(self.categories.dtype):\n            is_justify = False\n    elif isinstance(self, ABCRangeIndex):\n        return ''\n    return format_object_summary(self, self._formatter_func, is_justify=is_justify, name=name, line_break_each_value=self._is_multi)",
    "docstring": "Return the formatted data as a unicode string.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_format_data arg:self arg:name arguments arg arg Assign If Compare Assign If Call Assign Call If Call Assign If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "decrypt",
    "source_code": "def decrypt(self, ciphertext, aad, iv, tag, key):\n    self.check_iv(iv)\n    chacha = Cryptodome_ChaCha20_Poly1305.new(key=key, nonce=iv)\n    chacha.update(aad)\n    return chacha.decrypt_and_verify(ciphertext, tag)",
    "docstring": "Content Decryption with AEAD_XCHACHA20_POLY1305. :param ciphertext: ciphertext in bytes :param aad: additional authenticated data in bytes :param iv: initialization vector in bytes :param tag: authentication tag in bytes :param key: encrypted key in bytes :return: message",
    "type": "method",
    "file_path": "authlib\\authlib\\jose\\drafts\\_jwe_enc_cryptodome.py",
    "ast_data": "FunctionDef name:decrypt arg:self arg:ciphertext arg:aad arg:iv arg:tag arg:key arguments arg arg arg arg arg arg Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "value_container",
    "source_code": "def value_container(val):\n    container = None\n    if not isinstance(val, values_lib.DistributedVariable):\n        if hasattr(val, '_distributed_container'):\n            container = val._distributed_container()\n        elif isinstance(val, composite_tensor.CompositeTensor) and hasattr(val, 'handle') and hasattr(val.handle, '_distributed_container'):\n            container = val.handle._distributed_container()\n    return container if container is not None else val",
    "docstring": "Returns the container that this per-replica belongs to. Args: val: A value returned by or a variable created in . Returns: A container that belongs to. If value does not belong to any container (including the case of container having been destroyed), returns the value itself.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_utils.py",
    "ast_data": "FunctionDef name:value_container arg:val arguments arg Assign If Call If Call Assign Call If BoolOp Call Call Call Assign Call Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "stateless_categorical",
    "source_code": "@tf_export('random.stateless_categorical')\n@dispatch.add_dispatch_support\ndef stateless_categorical(logits, num_samples, seed, dtype=dtypes.int64, name=None):\n    with ops.name_scope(name, 'stateless_categorical', [logits, seed]):\n        return stateless_multinomial_categorical_impl(logits, num_samples, dtype, seed)",
    "docstring": "Draws deterministic pseudorandom samples from a categorical distribution. This is a stateless version of : if run twice with the same seeds and shapes, it will produce the same pseudorandom numbers. The output is consistent across multiple runs on the same hardware (and between CPU and GPU), but may change between versions of TensorFlow or on non-CPU/GPU hardware. Example: Args: logits: 2-D Tensor with shape . Each slice represents the unnormalized log-probabilities for all classes. num_samples: 0-D. Number of independent samples to draw for each row slice. seed: A shape [2] Tensor, the seed to the random number generator. Must have dtype or . (When using XLA, only is allowed.) dtype: The integer type of the output: or . Defaults to . name: Optional name for the operation. Returns: The drawn samples of shape .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\stateless_random_ops.py",
    "ast_data": "FunctionDef name:stateless_categorical arg:logits arg:num_samples arg:seed arg:dtype arg:name arguments arg arg arg arg arg With Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_label_coords",
    "source_code": "def set_label_coords(self, x, y, transform=None):\n    self._autolabelpos = False\n    if transform is None:\n        transform = self.axes.transAxes\n    self.label.set_transform(transform)\n    self.label.set_position((x, y))\n    self.stale = True",
    "docstring": "Set the coordinates of the label. By default, the x coordinate of the y label and the y coordinate of the x label are determined by the tick label bounding boxes, but this can lead to poor alignment of multiple labels if there are multiple Axes. You can also specify the coordinate system of the label with the transform. If None, the default coordinate system will be the axes coordinate system: (0, 0) is bottom left, (0.5, 0.5) is center, etc.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:set_label_coords arg:self arg:x arg:y arg:transform arguments arg arg arg arg Assign If Compare Assign Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "name",
    "source_code": "@property\ndef name(self):\n    return self._name",
    "docstring": "The name of this variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_verticalalignment",
    "source_code": "def get_verticalalignment(self):\n    return self._verticalalignment",
    "docstring": "Return the vertical alignment as a string. Will be one of 'top', 'center', 'bottom', 'baseline' or 'center_baseline'.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:get_verticalalignment arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_check_shapes",
    "source_code": "def _check_shapes(self):\n    uv_shape = array_ops.broadcast_static_shape(self.u.shape, self.v.shape)\n    batch_shape = array_ops.broadcast_static_shape(self.base_operator.batch_shape, uv_shape[:-2])\n    tensor_shape.Dimension(self.base_operator.domain_dimension).assert_is_compatible_with(uv_shape[-2])\n    if self._diag_update is not None:\n        tensor_shape.dimension_at_index(uv_shape, -1).assert_is_compatible_with(self._diag_update.shape[-1])\n        array_ops.broadcast_static_shape(batch_shape, self._diag_update.shape[:-1])",
    "docstring": "Static check that shapes are compatible.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_low_rank_update.py",
    "ast_data": "FunctionDef name:_check_shapes arg:self arguments arg Assign Call Assign Call Call Call If Compare Call Call Call"
  },
  {
    "library": "seaborn",
    "name": "Band",
    "source_code": "@document_properties\n@dataclass\nclass Band(AreaBase, Mark):\n    color: MappableColor = Mappable('C0')\n    alpha: MappableFloat = Mappable(0.2)\n    fill: MappableBool = Mappable(True)\n    edgecolor: MappableColor = Mappable(depend='color')\n    edgealpha: MappableFloat = Mappable(1)\n    edgewidth: MappableFloat = Mappable(0)\n    edgestyle: MappableFloat = Mappable('-')\n\n    def _standardize_coordinate_parameters(self, data, orient):\n        other = {'x': 'y', 'y': 'x'}[orient]\n        if not set(data.columns) & {f'{other}min', f'{other}max'}:\n            agg = {f'{other}min': (other, 'min'), f'{other}max': (other, 'max')}\n            data = data.groupby(orient).agg(**agg).reset_index()\n        return data",
    "docstring": "A fill mark representing an interval between values. See also -------- Area : A fill mark drawn from a baseline to data values. Examples -------- .. include:: ../docstrings/objects.Band.rst",
    "type": "class",
    "file_path": "seaborn\\seaborn\\_marks\\area.py",
    "ast_data": "ClassDef name:Band Call Call Call Call Call Call Call FunctionDef name:_standardize_coordinate_parameters arg:self arg:data arg:orient arguments arg arg arg Assign If Call Assign Assign Call Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "def forward(self, desc0: Tensor, desc1: Tensor) -> Tuple[Tensor, Tensor]:\n    dtype = self.token[0].weight.dtype\n    orig_dtype = desc0.dtype\n    return (self.token(desc0.detach().to(dtype)).squeeze(-1).to(orig_dtype), self.token(desc1.detach().to(dtype)).squeeze(-1).to(orig_dtype))",
    "docstring": "Get confidence tokens.",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\lightglue.py",
    "ast_data": "FunctionDef name:forward arg:self arg:desc0 arg:desc1 arguments arg arg arg Assign Assign Return return:yes Call Call Call Call Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_ParseDocstringArgSpec",
    "source_code": "def _ParseDocstringArgSpec(doc):\n    match = re.search('^\\\\w+\\\\(.*\\\\)', doc)\n    args_spec = _GenerateArgsSpec(doc)\n    if not match or args_spec is None:\n        raise ValueError(f'Failed to parse argspec from docstring: {doc}')\n    output_string = f'args=[{args_spec}], varargs=None, keywords=None, defaults=None'\n    return output_string",
    "docstring": "Get an ArgSpec string from a method docstring. This method is used to generate argspec for C extension functions that follow pybind11 DocString format function signature. For example: Args: doc: A python string which starts with function signature. Returns: string: a argspec string representation if successful. If not, return None. Raises: ValueError: Raised when failed to parse the input docstring.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\api\\lib\\python_object_to_proto_visitor.py",
    "ast_data": "FunctionDef name:_ParseDocstringArgSpec arg:doc arguments arg Assign Call Assign Call If BoolOp Compare Raise Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "update_user_library",
    "source_code": "def update_user_library(library):\n    for stylelib_path in map(os.path.expanduser, USER_LIBRARY_PATHS):\n        styles = read_style_directory(stylelib_path)\n        update_nested_dict(library, styles)\n    return library",
    "docstring": "Update style library with user-defined rc files.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\style\\core.py",
    "ast_data": "FunctionDef name:update_user_library arg:library arguments arg For Call Assign Call Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "authorize_access_token",
    "source_code": "def authorize_access_token(self, **kwargs):\n    if request.method == 'GET':\n        error = request.args.get('error')\n        if error:\n            description = request.args.get('error_description')\n            raise OAuthError(error=error, description=description)\n        params = {'code': request.args.get('code'), 'state': request.args.get('state')}\n    else:\n        params = {'code': request.form.get('code'), 'state': request.form.get('state')}\n    state_data = self.framework.get_state_data(session, params.get('state'))\n    self.framework.clear_state_data(session, params.get('state'))\n    params = self._format_state_params(state_data, params)\n    claims_options = kwargs.pop('claims_options', None)\n    claims_cls = kwargs.pop('claims_cls', None)\n    leeway = kwargs.pop('leeway', 120)\n    token = self.fetch_access_token(**params, **kwargs)\n    self.token = token\n    if 'id_token' in token and 'nonce' in state_data:\n        userinfo = self.parse_id_token(token, nonce=state_data['nonce'], claims_options=claims_options, claims_cls=claims_cls, leeway=leeway)\n        token['userinfo'] = userinfo\n    return token",
    "docstring": "Fetch access token in one step. :return: A token dict.",
    "type": "method",
    "file_path": "authlib\\authlib\\integrations\\flask_client\\apps.py",
    "ast_data": "FunctionDef name:authorize_access_token arg:self arguments arg arg If Compare Assign Call If Assign Call Raise Call Assign Call Call Assign Call Call Assign Call Call Call Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign If BoolOp Compare Compare Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_gather_saveables_for_checkpoint",
    "source_code": "def _gather_saveables_for_checkpoint(self):\n\n    def _saveable_factory(name=self._common_name):\n        return _MirroredSaveable(self, self._primary, name)\n    return {trackable.VARIABLE_VALUE_KEY: _saveable_factory}",
    "docstring": "Overrides Trackable method. This allows both name-based and object-based save and restore of MirroredVariables. Returns: A dictionary mapping attribute names to factories.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py",
    "ast_data": "FunctionDef name:_gather_saveables_for_checkpoint arg:self arguments arg FunctionDef name:_saveable_factory arg:name arguments arg Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "adaptive_autorange",
    "source_code": "def adaptive_autorange(self, threshold: float=0.1, *, min_run_time: float=0.01, max_run_time: float=10.0, callback: Optional[Callable[[int, float], NoReturn]]=None) -> common.Measurement:\n    number = self._estimate_block_size(min_run_time=0.05)\n\n    def time_hook() -> float:\n        return self._timeit(number)\n\n    def stop_hook(times: list[float]) -> bool:\n        if len(times) > 3:\n            return common.Measurement(number_per_run=number, raw_times=times, task_spec=self._task_spec).meets_confidence(threshold=threshold)\n        return False\n    times = self._threaded_measurement_loop(number, time_hook, stop_hook, min_run_time, max_run_time, callback=callback)\n    return common.Measurement(number_per_run=number, raw_times=times, task_spec=self._task_spec)",
    "docstring": "Similar to but also checks for variablility in measurements and repeats until iqr/median is smaller than or is reached. At a high level, adaptive_autorange executes the following pseudo-code:: times = [] while times.sum 3 and times.sum > min_run_time small_iqr=times.iqr/times.mean<threshold if enough_data and small_iqr: break Args: threshold: value of iqr/median threshold for stopping min_run_time: total runtime needed before checking max_run_time: total runtime for all measurements regardless of Returns: A object that contains measured runtimes and repetition counts, and can be used to compute statistics. (mean, median, etc.)",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\benchmark\\utils\\timer.py",
    "ast_data": "FunctionDef name:adaptive_autorange arg:self arg:threshold arguments arg arg arg arg arg Assign Call FunctionDef name:time_hook arguments Return return:yes Call FunctionDef name:stop_hook arg:times arguments arg If Compare Call Return return:yes Call Call Return return:yes Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "Price01",
    "source_code": "class Price01(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-500.0] * self.N, [500.0] * self.N))\n        self.custom_bounds = ([-10.0, 10.0], [-10.0, 10.0])\n        self.global_optimum = [[5.0, 5.0]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return (abs(x[0]) - 5.0) ** 2.0 + (abs(x[1]) - 5.0) ** 2.0",
    "docstring": "Price 1 objective function. This class defines the Price 1 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Price01}}(x) = (\\lvert x_1 \\rvert - 5)^2 + (\\lvert x_2 \\rvert - 5)^2 with :math: for :math:. *Global optimum*: :math: for :math: or :math: or :math: or :math:. .. [1] Price, W. A controlled random search procedure for global optimisation Computer Journal, 1977, 20, 367-370",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_P.py",
    "ast_data": "ClassDef name:Price01 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "with_step",
    "source_code": "def with_step(self, step):\n    self._options['step'] = step\n    return self",
    "docstring": "Which profile step to use for profiling. The 'step' here refers to the step defined by API. Args: step: When multiple steps of profiles are available, select which step's profile to use. If -1, use average of all available steps. Returns: self",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\option_builder.py",
    "ast_data": "FunctionDef name:with_step arg:self arg:step arguments arg arg Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "rotation_matrix",
    "source_code": "@property\ndef rotation_matrix(self) -> Tensor:\n    return self.extrinsics[..., :3, :3]",
    "docstring": "Return the 3x3 rotation matrix from the extrinsics. Returns: tensor of shape :math:.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\camera\\pinhole.py",
    "ast_data": "FunctionDef name:rotation_matrix arg:self arguments arg Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "publish_msgstr",
    "source_code": "def publish_msgstr(app: Sphinx, source: str, source_path: str, source_line: int, config: Config, settings: Any) -> nodes.Element:\n    try:\n        rst_prolog = config.rst_prolog\n        config.rst_prolog = None\n        from sphinx.io import SphinxI18nReader\n        reader = SphinxI18nReader()\n        reader.setup(app)\n        filetype = get_filetype(config.source_suffix, source_path)\n        parser = app.registry.create_source_parser(app, filetype)\n        doc = reader.read(source=StringInput(source=source, source_path=f'{source_path}:{source_line}:<translated>'), parser=parser, settings=settings)\n        with contextlib.suppress(IndexError):\n            return doc[0]\n        return doc\n    finally:\n        config.rst_prolog = rst_prolog",
    "docstring": "Publish msgstr (single line) into docutils document :param sphinx.application.Sphinx app: sphinx application :param str source: source text :param str source_path: source path for warning indication :param source_line: source line for warning indication :param sphinx.config.Config config: sphinx config :param docutils.frontend.Values settings: docutils settings :return: document :rtype: docutils.nodes.document",
    "type": "function",
    "file_path": "sphinx\\sphinx\\transforms\\i18n.py",
    "ast_data": "FunctionDef name:publish_msgstr arg:app arg:source arg:source_path arg:source_line arg:config arg:settings arguments arg arg arg arg arg arg Try Assign Assign Assign Call Call Assign Call Assign Call Assign Call Call With Call Return return:yes Return return:yes Assign"
  },
  {
    "library": "pytorch",
    "name": "_create_wrappers_for_dispatch",
    "source_code": "def _create_wrappers_for_dispatch(needs_autograd: bool) -> list[CompilerWrapper]:\n    return [AOTDedupeWrapper(), AOTSyntheticBaseWrapper(trace_joint=needs_autograd)]",
    "docstring": "Wrappers that run on every dispatch function",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\jit_compile_runtime_wrappers.py",
    "ast_data": "FunctionDef name:_create_wrappers_for_dispatch arg:needs_autograd arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "gen_fake_kwargs",
    "source_code": "def gen_fake_kwargs(self) -> KwargsType:\n    return tree_map_only(DTensorSpec, _rebuild_tensor_from_dtensor_meta, self.kwargs_schema, is_leaf=lambda x: isinstance(x, DTensorSpec))",
    "docstring": "gen_fake_kwargs: generate fake kwargs for the operator, this is mainly used by sharding propagation rules to generate fake kwargs for the operator to run the local tensor operator and get the output spec.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_op_schema.py",
    "ast_data": "FunctionDef name:gen_fake_kwargs arg:self arguments arg Return return:yes Call arguments arg Call"
  },
  {
    "library": "matplotlib",
    "name": "get_size",
    "source_code": "def get_size(self, renderer):\n    raise NotImplementedError('Subclasses must implement')",
    "docstring": "Return two-float tuple with relative and absolute sizes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_size.py",
    "ast_data": "FunctionDef name:get_size arg:self arg:renderer arguments arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_gather_saveable_objects",
    "source_code": "def _gather_saveable_objects(self):\n    objects = util.list_objects(self._object_graph_view)\n    saveable_objects = []\n    for trackable in objects:\n        trackable._maybe_initialize_trackable()\n        if trackable._update_uid < self._checkpoint.restore_uid:\n            trackable._update_uid = self._checkpoint.restore_uid\n        else:\n            continue\n        saveable_objects.extend(self._checkpoint.globally_named_object_attributes(trackable))\n    return saveable_objects",
    "docstring": "Walk the object graph, using global names for SaveableObjects.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:_gather_saveable_objects arg:self arguments arg Assign Call Assign For Call If Compare Assign Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "get_flags_arch",
    "source_code": "def get_flags_arch(self):\n    return []",
    "docstring": "List of architecture dependent compiler flags.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\fcompiler\\__init__.py",
    "ast_data": "FunctionDef name:get_flags_arch arg:self arguments arg Return return:no"
  },
  {
    "library": "sphinx",
    "name": "_last_modified_time",
    "source_code": "def _last_modified_time(source: str | os.PathLike[str], /) -> int:\n    st = source.stat() if isinstance(source, os.DirEntry) else os.stat(source)\n    return -(st.st_mtime_ns // -1000)",
    "docstring": "Return the last modified time of ``. The time is returned as integer microseconds. The lowest common denominator of modern file-systems seems to be microsecond-level precision. We prefer to err on the side of re-rendering a file, so we round up to the nearest microsecond.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\osutil.py",
    "ast_data": "FunctionDef name:_last_modified_time arguments arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "process_exception_by_middleware",
    "source_code": "def process_exception_by_middleware(self, exception, request):\n    for middleware_method in self._exception_middleware:\n        response = middleware_method(request, exception)\n        if response:\n            return response\n    return None",
    "docstring": "Pass the exception to the exception middleware. If no middleware return a response for this exception, return None.",
    "type": "method",
    "file_path": "django\\django\\core\\handlers\\base.py",
    "ast_data": "FunctionDef name:process_exception_by_middleware arg:self arg:exception arg:request arguments arg arg arg For Assign Call If Return return:yes Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "_compute_carry_and_output",
    "source_code": "def _compute_carry_and_output(self, x, h_tm1, c_tm1):\n    x_i, x_f, x_c, x_o = x\n    h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o = h_tm1\n    i = self.recurrent_activation(x_i + backend.dot(h_tm1_i, self.recurrent_kernel[:, :self.units]))\n    f = self.recurrent_activation(x_f + backend.dot(h_tm1_f, self.recurrent_kernel[:, self.units:self.units * 2]))\n    c = f * c_tm1 + i * self.activation(x_c + backend.dot(h_tm1_c, self.recurrent_kernel[:, self.units * 2:self.units * 3]))\n    o = self.recurrent_activation(x_o + backend.dot(h_tm1_o, self.recurrent_kernel[:, self.units * 3:]))\n    return (c, o)",
    "docstring": "Computes carry and output using split kernels.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\recurrent.py",
    "ast_data": "FunctionDef name:_compute_carry_and_output arg:self arg:x arg:h_tm1 arg:c_tm1 arguments arg arg arg arg Assign Assign Assign Call Call Assign Call Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "temp_setattr",
    "source_code": "@contextlib.contextmanager\ndef temp_setattr(obj, attr: str, value, condition: bool=True) -> Generator[None]:\n    if condition:\n        old_value = getattr(obj, attr)\n        setattr(obj, attr, value)\n    try:\n        yield obj\n    finally:\n        if condition:\n            setattr(obj, attr, old_value)",
    "docstring": "Temporarily set attribute on an object. Parameters ---------- obj : object Object whose attribute will be modified. attr : str Attribute to modify. value : Any Value to temporarily set attribute to. condition : bool, default True Whether to set the attribute. Provided in order to not have to conditionally use this context manager. Yields ------ object : obj with modified attribute.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\common.py",
    "ast_data": "FunctionDef name:temp_setattr arg:obj arg:attr arg:value arg:condition arguments arg arg arg arg If Assign Call Call Try If Call"
  },
  {
    "library": "matplotlib",
    "name": "_fix_pts_xy_order",
    "source_code": "def _fix_pts_xy_order(self, pts):\n    return pts[:, ::-1] if self.t_direction == 'y' else pts",
    "docstring": "Fix pts calculation results with . In the workflow, it is assumed that is 'x'. If this is not true, we need to exchange the coordinates.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:_fix_pts_xy_order arg:self arg:pts arguments arg arg Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "_show_full_tensor",
    "source_code": "def _show_full_tensor(tensor):\n    return _print_tensor(tensor_name, -1, tensor, tensor)",
    "docstring": "Trace function for printing the entire tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:_show_full_tensor arg:tensor arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "reraise",
    "source_code": "def reraise(exc, backend):\n    new = copy_exception(exc, backend)\n    raise new from exc",
    "docstring": "Reraise TemplateDoesNotExist while maintaining template debug information.",
    "type": "function",
    "file_path": "django\\django\\template\\backends\\django.py",
    "ast_data": "FunctionDef name:reraise arg:exc arg:backend arguments arg arg Assign Call Raise"
  },
  {
    "library": "pytorch",
    "name": "init",
    "source_code": "def init():\n    _lazy_init()",
    "docstring": "Initialize PyTorch's CUDA state. You may need to call this explicitly if you are interacting with PyTorch via its C API, as Python bindings for CUDA functionality will not be available until this initialization takes place. Ordinary users should not need this, as all of PyTorch's CUDA methods automatically initialize CUDA state on-demand. Does nothing if the CUDA state is already initialized.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:init arguments Call"
  },
  {
    "library": "cherrypy",
    "name": "check_localhost",
    "source_code": "def check_localhost(self):\n    for k, v in cherrypy.config.items():\n        if k == 'server.socket_host' and v == 'localhost':\n            warnings.warn(\"The use of 'localhost' as a socket host can cause problems on newer systems, since 'localhost' can map to either an IPv4 or an IPv6 address. You should use '127.0.0.1' or '[::1]' instead.\")",
    "docstring": "Warn if any socket_host is 'localhost'. See #711.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpchecker.py",
    "ast_data": "FunctionDef name:check_localhost arg:self arguments arg For Call If BoolOp Compare Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "input_pipeline_id",
    "source_code": "@property\ndef input_pipeline_id(self):\n    return self._input_pipeline_id",
    "docstring": "Returns the input pipeline ID.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:input_pipeline_id arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "is_int8_woq_gemm_small_m_dim",
    "source_code": "def is_int8_woq_gemm_small_m_dim(self, X: ir.ReinterpretView, W: ir.ReinterpretView, N, K, micro_gemm):\n    return isinstance(micro_gemm, CppMicroGemmFP32Vec) and is_int8_woq_gemm_small_m_dim_corner_case(micro_gemm, X.get_size()[0], N, K) and (X.get_dtype() is torch.bfloat16) and (W.get_dtype() is torch.int8)",
    "docstring": "Use SMALL_M_GEMM_TEMPLATE",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp_gemm_template.py",
    "ast_data": "FunctionDef name:is_int8_woq_gemm_small_m_dim arg:self arg:X arg:W arg:N arg:K arg:micro_gemm arguments arg arg arg arg arg arg Return return:yes BoolOp Call Call Call Compare Call Compare Call"
  },
  {
    "library": "django",
    "name": "set_script_prefix",
    "source_code": "def set_script_prefix(prefix):\n    if not prefix.endswith('/'):\n        prefix += '/'\n    _prefixes.value = prefix",
    "docstring": "Set the script prefix for the current thread.",
    "type": "function",
    "file_path": "django\\django\\urls\\base.py",
    "ast_data": "FunctionDef name:set_script_prefix arg:prefix arguments arg If Call Assign"
  },
  {
    "library": "pytorch",
    "name": "InvokeSubgraphHopGraphs",
    "source_code": "@dataclasses.dataclass\nclass InvokeSubgraphHopGraphs:\n    partitioning_done: bool = False\n    old_num_fw_outputs: Optional[int] = None\n    old_num_fw_inputs: Optional[int] = None\n    new_fw_hop_gm: Optional[torch.fx.GraphModule] = None\n    new_bw_hop_gm: Optional[torch.fx.GraphModule] = None\n    new_num_sym_nodes: Optional[int] = None\n    new_num_saved_nodes: Optional[int] = None",
    "docstring": "A data structure to hold all the information needed to partition the and joint graph and the restitch the and into the bigger .",
    "type": "class",
    "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\jit_compile_runtime_wrappers.py",
    "ast_data": "ClassDef name:InvokeSubgraphHopGraphs"
  },
  {
    "library": "kornia",
    "name": "_modify_lw",
    "source_code": "def _modify_lw(self) -> None:\n    pass",
    "docstring": "No modification required.",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\mkd.py",
    "ast_data": "FunctionDef name:_modify_lw arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "_insert_dequant_stub",
    "source_code": "def _insert_dequant_stub(node: Node, model: torch.nn.Module, named_modules: dict[str, torch.nn.Module], graph: Graph) -> Node:\n    prefix = 'dequant_stub_'\n    get_new_dequant_stub_name = get_new_attr_name_with_prefix(prefix)\n    dequant_stub_name = get_new_dequant_stub_name(model)\n    dequant_stub = DeQuantStub()\n    setattr(model, dequant_stub_name, dequant_stub)\n    named_modules[dequant_stub_name] = dequant_stub\n    with graph.inserting_after(node):\n        return graph.call_module(dequant_stub_name, (node,))",
    "docstring": "Attach a to the model and create a node that calls this on the output of , similar to how observers are inserted.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\utils.py",
    "ast_data": "FunctionDef name:_insert_dequant_stub arg:node arg:model arg:named_modules arg:graph arguments arg arg arg arg Assign Assign Call Assign Call Assign Call Call Assign With Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_get_outer_edges",
    "source_code": "def _get_outer_edges(a, range):\n    if range is not None:\n        first_edge, last_edge = range\n        if first_edge > last_edge:\n            raise ValueError('max must be larger than min in range parameter.')\n        if not (np.isfinite(first_edge) and np.isfinite(last_edge)):\n            raise ValueError(f'supplied range of [{first_edge}, {last_edge}] is not finite')\n    elif a.size == 0:\n        first_edge, last_edge = (0, 1)\n    else:\n        first_edge, last_edge = (a.min(), a.max())\n        if not (np.isfinite(first_edge) and np.isfinite(last_edge)):\n            raise ValueError(f'autodetected range of [{first_edge}, {last_edge}] is not finite')\n    if first_edge == last_edge:\n        first_edge = first_edge - 0.5\n        last_edge = last_edge + 0.5\n    return (first_edge, last_edge)",
    "docstring": "Determine the outer bin edges to use, from either the data or the range argument",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_histograms_impl.py",
    "ast_data": "FunctionDef name:_get_outer_edges arg:a arg:range arguments arg arg If Compare Assign If Compare Raise Call If BoolOp Call Call Raise Call If Compare Assign Assign Call Call If BoolOp Call Call Raise Call If Compare Assign Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "draw",
    "source_code": "def draw(self, renderer):\n    self._set_lims()\n    self._set_scale()\n    super().draw(renderer)",
    "docstring": "Draw the secondary Axes. Consults the parent Axes for its limits and converts them using the converter specified by (or *functions* parameter when Axes initialized.)",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_secondary_axes.py",
    "ast_data": "FunctionDef name:draw arg:self arg:renderer arguments arg arg Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "install_repl_displayhook",
    "source_code": "def install_repl_displayhook() -> None:\n    global _REPL_DISPLAYHOOK\n    if _REPL_DISPLAYHOOK is _ReplDisplayHook.IPYTHON:\n        return\n    mod_ipython = sys.modules.get('IPython')\n    if not mod_ipython:\n        _REPL_DISPLAYHOOK = _ReplDisplayHook.PLAIN\n        return\n    ip = mod_ipython.get_ipython()\n    if not ip:\n        _REPL_DISPLAYHOOK = _ReplDisplayHook.PLAIN\n        return\n    ip.events.register('post_execute', _draw_all_if_interactive)\n    _REPL_DISPLAYHOOK = _ReplDisplayHook.IPYTHON\n    if mod_ipython.version_info[:2] < (8, 24):\n        from IPython.core.pylabtools import backend2gui\n        ipython_gui_name = backend2gui.get(get_backend())\n    else:\n        _, ipython_gui_name = backend_registry.resolve_backend(get_backend())\n    if ipython_gui_name:\n        ip.enable_gui(ipython_gui_name)",
    "docstring": "Connect to the display hook of the current shell. The display hook gets called when the read-evaluate-print-loop (REPL) of the shell has finished the execution of a command. We use this callback to be able to automatically update a figure in interactive mode. This works both with IPython and with vanilla python shells.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:install_repl_displayhook arguments If Compare Return return:no Assign Call If Assign Return return:no Assign Call If Assign Return return:no Call Assign If Compare Assign Call Call Assign Call Call If Call"
  },
  {
    "library": "tensorflow",
    "name": "cross_entropy",
    "source_code": "@deprecation.deprecated('2019-01-01', 'The TensorFlow Distributions library has moved to TensorFlow Probability (https://github.com/tensorflow/probability). You should update all references to use `tfp.distributions` instead of `tf.distributions`.', warn_once=True)\ndef cross_entropy(ref, other, allow_nan_stats=True, name=None):\n    with ops.name_scope(name, 'cross_entropy'):\n        return ref.entropy() + kl_divergence(ref, other, allow_nan_stats=allow_nan_stats)",
    "docstring": "Computes the (Shannon) cross entropy. Denote two distributions by () and (). Assuming are absolutely continuous with respect to one another and permit densities and , (Shanon) cross entropy is defined as: where denotes the support of the random variable . Args: ref: instance. other: instance. allow_nan_stats: Python , default . When , statistics (e.g., mean, mode, variance) use the value \"\" to indicate the result is undefined. When , an exception is raised if one or more of the statistic's batch members are undefined. name: Python prepended to names of ops created by this function. Returns: cross_entropy: with shape representing different calculations of (Shanon) cross entropy.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\kullback_leibler.py",
    "ast_data": "FunctionDef name:cross_entropy arg:ref arg:other arg:allow_nan_stats arg:name arguments arg arg arg arg With Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "run_once",
    "source_code": "def run_once(self, *args, **kwargs):\n    return self._benchmark.run_once(*args, **kwargs)",
    "docstring": "Given input id (input_idx) run benchmark once and return prediction. This is useful for testing that benchmark actually runs the module you want it to run. input_idx here is an index into inputs array populated by calling add_input() method.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\throughput_benchmark.py",
    "ast_data": "FunctionDef name:run_once arg:self arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "unstack",
    "source_code": "def unstack(self, unstacker, fill_value) -> BlockManager:\n    new_columns = unstacker.get_new_columns(self.items)\n    new_index = unstacker.new_index\n    allow_fill = not unstacker.mask_all\n    if allow_fill:\n        new_mask2D = (~unstacker.mask).reshape(*unstacker.full_shape)\n        needs_masking = new_mask2D.any(axis=0)\n    else:\n        needs_masking = np.zeros(unstacker.full_shape[1], dtype=bool)\n    new_blocks: list[Block] = []\n    columns_mask: list[np.ndarray] = []\n    if len(self.items) == 0:\n        factor = 1\n    else:\n        fac = len(new_columns) / len(self.items)\n        assert fac == int(fac)\n        factor = int(fac)\n    for blk in self.blocks:\n        mgr_locs = blk.mgr_locs\n        new_placement = mgr_locs.tile_for_unstack(factor)\n        blocks, mask = blk._unstack(unstacker, fill_value, new_placement=new_placement, needs_masking=needs_masking)\n        new_blocks.extend(blocks)\n        columns_mask.extend(mask)\n        assert mask.sum() == sum((len(nb._mgr_locs) for nb in blocks))\n    new_columns = new_columns[columns_mask]\n    bm = BlockManager(new_blocks, [new_columns, new_index], verify_integrity=False)\n    return bm",
    "docstring": "Return a BlockManager with all blocks unstacked. Parameters ---------- unstacker : reshape._Unstacker fill_value : Any fill_value for newly introduced missing values. Returns ------- unstacked : BlockManager",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:unstack arg:self arg:unstacker arg:fill_value arguments arg arg arg Assign Call Assign Assign If Assign Call Assign Call Assign Call If Compare Call Assign Assign Call Call Compare Call Assign Call For Assign Assign Call Assign Call Call Call Compare Call Call Call Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_extra_args",
    "source_code": "def get_extra_args():\n    g = ops.get_default_graph()\n    if isinstance(g, _FuncGraph):\n        return g.extra_args\n    else:\n        return []",
    "docstring": "Returns the corresponding function arguments for the captured inputs. Returns: If the default graph is being used to define a function, the returned list of place holders are those used inside the function body corresponding those returned by get_extra_inputs(). Otherwise, returns an empty list.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\function.py",
    "ast_data": "FunctionDef name:get_extra_args arguments Assign Call If Call Return return:yes Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "split",
    "source_code": "@tf_should_use.should_use_result\ndef split(self, value, lengths, name=None):\n    with ops.name_scope(name, 'TensorArraySplit', [self._flow, value, lengths]):\n        value = ops.convert_to_tensor(value, preferred_dtype=self._dtype, name='value')\n        _check_dtypes(value, self._dtype)\n        lengths_64 = math_ops.cast(lengths, dtypes.int64)\n        if not context.executing_eagerly():\n            clengths = tensor_util.constant_value(lengths_64)\n            if value.shape.dims is not None and clengths is not None:\n                if clengths.shape and clengths.max() == clengths.min():\n                    self._check_element_shape(tensor_shape.TensorShape([clengths[0]]).concatenate(value.shape[1:]))\n        flow_out = list_ops.tensor_list_split(tensor=value, lengths=lengths_64, element_shape=self.element_shape, name=name)\n        return build_ta_with_new_flow(self, flow_out)",
    "docstring": "See TensorArray.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:split arg:self arg:value arg:lengths arg:name arguments arg arg arg arg With Call Assign Call Call Assign Call If Call Assign Call If BoolOp Compare Compare If BoolOp Compare Call Call Call Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "scatter_data",
    "source_code": "@property\ndef scatter_data(self):\n    x_j = self.x_jitter\n    if x_j is None:\n        x = self.x\n    else:\n        x = self.x + np.random.uniform(-x_j, x_j, len(self.x))\n    y_j = self.y_jitter\n    if y_j is None:\n        y = self.y\n    else:\n        y = self.y + np.random.uniform(-y_j, y_j, len(self.y))\n    return (x, y)",
    "docstring": "Data where each observation is a point.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\regression.py",
    "ast_data": "FunctionDef name:scatter_data arg:self arguments arg Assign If Compare Assign Assign Call Call Assign If Compare Assign Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "copy",
    "source_code": "def copy(self):\n    return self.__copy__()",
    "docstring": "Return a copy of the colormap.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:copy arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pygame",
    "name": "spritecollideany",
    "source_code": "def spritecollideany(sprite, group, collided=None):\n    default_sprite_collide_func = sprite.rect.colliderect\n    if collided is not None:\n        for group_sprite in group:\n            if collided(sprite, group_sprite):\n                return group_sprite\n    else:\n        for group_sprite in group:\n            if default_sprite_collide_func(group_sprite.rect):\n                return group_sprite\n    return None",
    "docstring": "finds any sprites in a group that collide with the given sprite pygame.sprite.spritecollideany(sprite, group): return sprite Given a sprite and a group of sprites, this will return any single sprite that collides with the given sprite. If there are no collisions, then this returns None. If you don't need all the features of the spritecollide function, this function will be a bit quicker. Collided is a callback function used to calculate if two sprites are colliding. It should take two sprites as values and return a bool value indicating if they are colliding. If collided is not passed, then all sprites must have a \"rect\" value, which is a rectangle of the sprite area, which will be used to calculate the collision.",
    "type": "function",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:spritecollideany arg:sprite arg:group arg:collided arguments arg arg arg Assign If Compare For If Call Return return:yes For If Call Return return:yes Return return:no"
  },
  {
    "library": "kornia",
    "name": "__init__",
    "source_code": "def __init__(self, embed_dim: int, image_embedding_size: tuple[int, int], input_image_size: tuple[int, int], mask_in_chans: int, activation: type[Module]=nn.GELU) -> None:\n    super().__init__()\n    self.embed_dim = embed_dim\n    self.input_image_size = input_image_size\n    self.image_embedding_size = image_embedding_size\n    self.pe_layer = PositionEmbeddingRandom(embed_dim // 2)\n    self.num_point_embeddings: int = 4\n    point_embeddings = [nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)]\n    self.point_embeddings = nn.ModuleList(point_embeddings)\n    self.not_a_point_embed = nn.Embedding(1, embed_dim)\n    self.mask_input_size = (4 * image_embedding_size[0], 4 * image_embedding_size[1])\n    self.mask_downscaling = nn.Sequential(nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2), LayerNorm2d(mask_in_chans // 4), activation(), nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2), LayerNorm2d(mask_in_chans), activation(), nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1))\n    self.no_mask_embed = nn.Embedding(1, embed_dim)",
    "docstring": "Encode prompts for input to SAM's mask decoder. Args: embed_dim: The prompts' embedding dimension image_embedding_size: The spatial size of the image embedding, as (H, W). input_image_size: The padded size of the image as input to the image encoder, as (H, W). mask_in_chans: The number of hidden channels used for encoding input masks. activation: The activation to use when encoding input masks.",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\models\\sam\\architecture\\prompt_encoder.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:embed_dim arg:image_embedding_size arg:input_image_size arg:mask_in_chans arg:activation arguments arg arg arg arg arg arg Call Call Assign Assign Assign Assign Call Assign Call Call Assign Call Assign Call Assign Assign Call Call Call Call Call Call Call Call Assign Call"
  },
  {
    "library": "django",
    "name": "get_rollback",
    "source_code": "def get_rollback(self):\n    if not self.in_atomic_block:\n        raise TransactionManagementError(\"The rollback flag doesn't work outside of an 'atomic' block.\")\n    return self.needs_rollback",
    "docstring": "Get the \"needs rollback\" flag -- for *advanced use* only.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\base.py",
    "ast_data": "FunctionDef name:get_rollback arg:self arguments arg If Raise Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_bool_arith_check",
    "source_code": "def _bool_arith_check(op, a: np.ndarray, b) -> None:\n    if op in _BOOL_OP_NOT_ALLOWED:\n        if a.dtype.kind == 'b' and (is_bool_dtype(b) or lib.is_bool(b)):\n            op_name = op.__name__.strip('_').lstrip('r')\n            raise NotImplementedError(f\"operator '{op_name}' not implemented for bool dtypes\")",
    "docstring": "In contrast to numpy, pandas raises an error for certain operations with booleans.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:_bool_arith_check arg:op arg:a arg:b arguments arg arg arg If Compare If BoolOp Compare BoolOp Call Call Assign Call Call Raise Call"
  },
  {
    "library": "kornia",
    "name": "SuperpointDecoder",
    "source_code": "class SuperpointDecoder(Module):\n\n    def __init__(self, input_feat_dim: int=128, grid_size: int=8) -> None:\n        super().__init__()\n        self.relu = nn.ReLU(inplace=True)\n        self.convPa = nn.Conv2d(input_feat_dim, 256, kernel_size=3, stride=2, padding=1)\n        self.convPb = nn.Conv2d(256, 65, kernel_size=1, stride=1, padding=0)\n        self.grid_size = grid_size\n\n    def forward(self, input_features: Tensor) -> Tensor:\n        feat = self.relu(self.convPa(input_features))\n        semi = self.convPb(feat)\n        junc_prob = softmax(semi, dim=1)\n        junc_pred = pixel_shuffle(junc_prob[:, :-1, :, :], self.grid_size)[:, 0]\n        return junc_pred",
    "docstring": "Junction decoder based on the SuperPoint architecture. Args: input_feat_dim: channel size of the input features. Returns: the junction heatmap, with shape (B, H, W).",
    "type": "class",
    "file_path": "kornia\\kornia\\feature\\sold2\\backbones.py",
    "ast_data": "ClassDef name:SuperpointDecoder FunctionDef name:__init__ arg:self arg:input_feat_dim arg:grid_size arguments arg arg arg Call Call Assign Call Assign Call Assign Call Assign FunctionDef name:forward arg:self arg:input_features arguments arg arg Assign Call Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_finish_log_prob_for_one_fiber",
    "source_code": "def _finish_log_prob_for_one_fiber(self, y, x, ildj, event_ndims):\n    x = self._maybe_rotate_dims(x, rotate_right=True)\n    log_prob = self.distribution.log_prob(x)\n    if self._is_maybe_event_override:\n        log_prob = math_ops.reduce_sum(log_prob, self._reduce_event_indices)\n    log_prob += math_ops.cast(ildj, log_prob.dtype)\n    if self._is_maybe_event_override and isinstance(event_ndims, int):\n        log_prob.set_shape(array_ops.broadcast_static_shape(y.get_shape().with_rank_at_least(1)[:-event_ndims], self.batch_shape))\n    return log_prob",
    "docstring": "Finish computation of log_prob on one element of the inverse image.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\transformed_distribution.py",
    "ast_data": "FunctionDef name:_finish_log_prob_for_one_fiber arg:self arg:y arg:x arg:ildj arg:event_ndims arguments arg arg arg arg arg Assign Call Assign Call If Assign Call Call If BoolOp Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_from_c_op",
    "source_code": "@classmethod\ndef _from_c_op(cls: type[OperationType], c_op, g) -> OperationType:\n    self = Operation(c_op, SymbolicTensor)\n    self._init(g)\n    return self",
    "docstring": "Create an Operation from a TF_Operation. For internal use only: This is useful for creating Operation for ops indirectly created by C API methods, e.g. the ops created by TF_ImportGraphDef. Args: c_op: a TF_Operation. g: A Graph. Returns: an Operation object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_from_c_op arg:cls arg:c_op arg:g arguments arg arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "annotate",
    "source_code": "def annotate(self, model: torch.fx.GraphModule) -> torch.fx.GraphModule:\n    for module_name, quantization_config in self.module_name_qconfig.items():\n        self._annotate_with_config(model, quantization_config, _create_module_name_filter(module_name))\n    for operator_type, quantization_config in self.operator_type_qconfig.items():\n        self._annotate_with_config(model, quantization_config, _create_operator_type_filter(operator_type))\n    if self.global_config:\n        self._annotate_with_config(model, self.global_config, _global_config_filter)\n    self._annotate_output_for_int8_in_int8_out_pattern_entry(model)\n    return model",
    "docstring": "Annotate the given model with quantization configurations. Annotation contracts: 1. Annotate each node according to the user's qconfig in the following order: , , and . 2. Avoid re-annotating nodes already annotated in prior stages. For example, if has been annotated by , it won't be annotated again during the processing of the 'operator_type_qconfig' or 'global_config'. 3. For config is , the node will be annotated with . For each pair of (module_name_or_operator_type_or_global, qconfig), a filter function is created. This filter function checks if the node is marked by current stage and not annotated by the previous stage.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\x86_inductor_quantizer.py",
    "ast_data": "FunctionDef name:annotate arg:self arg:model arguments arg arg For Call Call Call For Call Call Call If Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "with_min_parameters",
    "source_code": "def with_min_parameters(self, min_params):\n    self._options['min_params'] = min_params\n    return self",
    "docstring": "Only show profiler nodes holding no less than 'min_params' parameters. 'Parameters' normally refers the weights of in TensorFlow variables. It reflects the 'capacity' of models. Args: min_params: Only show profiler nodes holding number parameters no less than this. Returns: self",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\option_builder.py",
    "ast_data": "FunctionDef name:with_min_parameters arg:self arg:min_params arguments arg arg Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "limit_epochs",
    "source_code": "@tf_export(v1=['train.limit_epochs'])\n@deprecation.deprecated(None, 'Queue-based input pipelines have been replaced by `tf.data`. Use `tf.data.Dataset.from_tensors(tensor).repeat(num_epochs)`.')\ndef limit_epochs(tensor, num_epochs=None, name=None):\n    if num_epochs is None:\n        return tensor\n    if num_epochs <= 0:\n        raise ValueError('num_epochs must be > 0 not %d.' % num_epochs)\n    with ops.name_scope(name, 'limit_epochs', [tensor]) as name:\n        zero64 = constant_op.constant(0, dtype=dtypes.int64)\n        epochs = variable_v1.VariableV1(zero64, name='epochs', trainable=False, collections=[ops.GraphKeys.LOCAL_VARIABLES])\n        counter = epochs.count_up_to(num_epochs)\n        with ops.control_dependencies([counter]):\n            return array_ops.identity(tensor, name=name)",
    "docstring": "Returns tensor times and then raises an error. Note: creates local counter . Use to initialize local variables. Args: tensor: Any . num_epochs: A positive integer (optional). If specified, limits the number of steps the output tensor may be evaluated. name: A name for the operations (optional). Returns: tensor or . Raises: ValueError: if is invalid.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\input.py",
    "ast_data": "FunctionDef name:limit_epochs arg:tensor arg:num_epochs arg:name arguments arg arg arg If Compare Return return:yes If Compare Raise Call With Call Assign Call Assign Call Assign Call With Call Return return:yes Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_check_sample_weight",
    "source_code": "def _check_sample_weight(sample_weight, X, *, dtype=None, ensure_non_negative=False, copy=False):\n    n_samples = _num_samples(X)\n    if dtype is not None and dtype not in [np.float32, np.float64]:\n        dtype = np.float64\n    if sample_weight is None:\n        sample_weight = np.ones(n_samples, dtype=dtype)\n    elif isinstance(sample_weight, numbers.Number):\n        sample_weight = np.full(n_samples, sample_weight, dtype=dtype)\n    else:\n        if dtype is None:\n            dtype = [np.float64, np.float32]\n        sample_weight = check_array(sample_weight, accept_sparse=False, ensure_2d=False, dtype=dtype, order='C', copy=copy, input_name='sample_weight')\n        if sample_weight.ndim != 1:\n            raise ValueError('Sample weights must be 1D array or scalar')\n        if sample_weight.shape != (n_samples,):\n            raise ValueError('sample_weight.shape == {}, expected {}!'.format(sample_weight.shape, (n_samples,)))\n    if ensure_non_negative:\n        check_non_negative(sample_weight, '`sample_weight`')\n    return sample_weight",
    "docstring": "Validate sample weights. Note that passing sample_weight=None will output an array of ones. Therefore, in some cases, you may want to protect the call with: if sample_weight is not None: sample_weight = _check_sample_weight(...) Parameters ---------- sample_weight : {ndarray, Number or None}, shape (n_samples,) Input sample weights. X : {ndarray, list, sparse matrix} Input data. dtype : dtype, default=None dtype of the validated . If None, and is an array: - If is one of , then the dtype is preserved. - Else the output has NumPy's default dtype: . If is not , then output will be . ensure_non_negative : bool, default=False, Whether or not the weights are expected to be non-negative. .. versionadded:: 1.0 copy : bool, default=False If True, a copy of sample_weight will be created. Returns ------- sample_weight : ndarray of shape (n_samples,) Validated sample weight. It is guaranteed to be \"C\" contiguous.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\validation.py",
    "ast_data": "FunctionDef name:_check_sample_weight arg:sample_weight arg:X arguments arg arg arg arg arg Assign Call If BoolOp Compare Compare Assign If Compare Assign Call If Call Assign Call If Compare Assign Assign Call If Compare Raise Call If Compare Raise Call Call If Call Return return:yes"
  },
  {
    "library": "django",
    "name": "m",
    "source_code": "@property\ndef m(self):\n    if self.is_measured:\n        return self._listarr(capi.getm)",
    "docstring": "Return the M coordinates in a list.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:m arg:self arguments arg If Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "AuthorizationPendingError",
    "source_code": "class AuthorizationPendingError(OAuth2Error):\n    error = 'authorization_pending'",
    "docstring": "The authorization request is still pending as the end user hasn't yet completed the user-interaction steps (Section 3.3).",
    "type": "class",
    "file_path": "authlib\\authlib\\oauth2\\rfc8628\\errors.py",
    "ast_data": "ClassDef name:AuthorizationPendingError Assign"
  },
  {
    "library": "pytorch",
    "name": "pre_compile",
    "source_code": "def pre_compile(wrappers: list[CompilerWrapper], flat_fn: Callable, flat_args: list[Any], aot_config: AOTConfig, *, fw_metadata: ViewAndMutationMeta) -> tuple[Callable, list[Tensor], ViewAndMutationMeta]:\n    for wrapper in wrappers:\n        flat_fn, flat_args, fw_metadata = wrapper.pre_compile(flat_fn, flat_args, aot_config, fw_metadata=fw_metadata)\n    return (flat_fn, flat_args, fw_metadata)",
    "docstring": "Runs a sequence of wrappers on the given function and arguments. Mutates wrappers in place.",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\runtime_wrappers.py",
    "ast_data": "FunctionDef name:pre_compile arg:wrappers arg:flat_fn arg:flat_args arg:aot_config arguments arg arg arg arg arg For Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "visit_Attribute",
    "source_code": "def visit_Attribute(self, node):\n    if anno.hasanno(node, anno.Basic.QN):\n        qn = anno.getanno(node, anno.Basic.QN)\n        if isinstance(node.ctx, gast.Load):\n            self.reads.add(qn)\n    node = self.generic_visit(node)\n    return node",
    "docstring": "Visits attribute nodes in the AST.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\gradient_input_output_exclusions.py",
    "ast_data": "FunctionDef name:visit_Attribute arg:self arg:node arguments arg arg If Call Assign Call If Call Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "check_classifiers_one_label_sample_weights",
    "source_code": "@ignore_warnings(category=FutureWarning)\ndef check_classifiers_one_label_sample_weights(name, classifier_orig):\n    error_fit = f\"{name} failed when fitted on one label after sample_weight trimming. Error message is not explicit, it should have 'class'.\"\n    error_predict = f'{name} prediction results should only output the remaining class.'\n    rnd = np.random.RandomState(0)\n    X_train = rnd.uniform(size=(10, 10))\n    X_test = rnd.uniform(size=(10, 10))\n    y = np.arange(10) % 2\n    sample_weight = y.copy()\n    classifier = clone(classifier_orig)\n    if has_fit_parameter(classifier, 'sample_weight'):\n        match = ['\\\\bclass(es)?\\\\b', error_predict]\n        err_type, err_msg = ((AssertionError, ValueError), error_fit)\n    else:\n        match = '\\\\bsample_weight\\\\b'\n        err_type, err_msg = ((TypeError, ValueError), None)\n    with raises(err_type, match=match, may_pass=True, err_msg=err_msg) as cm:\n        classifier.fit(X_train, y, sample_weight=sample_weight)\n        if cm.raised_and_matched:\n            return\n        assert_array_equal(classifier.predict(X_test), np.ones(10), err_msg=error_predict)",
    "docstring": "Check that classifiers accepting sample_weight fit or throws a ValueError with an explicit message if the problem is reduced to one class.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\estimator_checks.py",
    "ast_data": "FunctionDef name:check_classifiers_one_label_sample_weights arg:name arg:classifier_orig arguments arg arg Assign Assign Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call If Call Assign Assign Assign Assign With Call Call If Return return:no Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "take_block_columns",
    "source_code": "@final\ndef take_block_columns(self, indices: npt.NDArray[np.intp]) -> Self:\n    new_mgr_locs = self._mgr_locs[indices]\n    new_values = self._slice(indices)\n    return type(self)(new_values, new_mgr_locs, self.ndim, refs=None)",
    "docstring": "Perform __getitem__-like, return result as block. Only supports slices that preserve dimensionality.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\blocks.py",
    "ast_data": "FunctionDef name:take_block_columns arg:self arg:indices arguments arg arg Assign Assign Call Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "resolveAllDependencies",
    "source_code": "def resolveAllDependencies(self):\n    done_this = set()\n    last_todo = set()\n    while True:\n        todo = set(self.allRoutineNames()) - done_this\n        if todo == last_todo:\n            break\n        for rn in todo:\n            r = self.getRoutine(rn)\n            deps = r.dependencies()\n            for d in deps:\n                self.addRoutine(d)\n            done_this.add(rn)\n        last_todo = todo\n    return todo",
    "docstring": "Try to add routines to the library to satisfy all the dependencies for each routine in the library. Returns a set of routine names that have the dependencies unresolved.",
    "type": "method",
    "file_path": "numpy\\numpy\\linalg\\lapack_lite\\make_lite.py",
    "ast_data": "FunctionDef name:resolveAllDependencies arg:self arguments arg Assign Call Assign Call While Assign Call Call If Compare For Assign Call Assign Call For Call Call Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "InstallableLib",
    "source_code": "class InstallableLib:\n\n    def __init__(self, name, build_info, target_dir):\n        self.name = name\n        self.build_info = build_info\n        self.target_dir = target_dir",
    "docstring": "Container to hold information on an installable library. Parameters ---------- name : str Name of the installed library. build_info : dict Dictionary holding build information. target_dir : str Absolute path specifying where to install the library. See Also -------- Configuration.add_installed_library Notes ----- The three parameters are stored as attributes with the same names.",
    "type": "class",
    "file_path": "numpy\\numpy\\distutils\\misc_util.py",
    "ast_data": "ClassDef name:InstallableLib FunctionDef name:__init__ arg:self arg:name arg:build_info arg:target_dir arguments arg arg arg arg Assign Assign Assign"
  },
  {
    "library": "django",
    "name": "set_many",
    "source_code": "def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None):\n    for key, value in data.items():\n        self.set(key, value, timeout=timeout, version=version)\n    return []",
    "docstring": "Set a bunch of values in the cache at once from a dict of key/value pairs. For certain backends (memcached), this is much more efficient than calling set() multiple times. If timeout is given, use that timeout for the key; otherwise use the default cache timeout. On backends that support it, return a list of keys that failed insertion, or an empty list if all keys were inserted successfully.",
    "type": "method",
    "file_path": "django\\django\\core\\cache\\backends\\base.py",
    "ast_data": "FunctionDef name:set_many arg:self arg:data arg:timeout arg:version arguments arg arg arg arg For Call Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "_get_internal_key",
    "source_code": "def _get_internal_key(self, key):\n    if is_train(key):\n        return KerasModeKeys.TRAIN\n    if is_eval(key):\n        return KerasModeKeys.TEST\n    if is_predict(key):\n        return KerasModeKeys.PREDICT\n    raise ValueError('Invalid mode key: {}.'.format(key))",
    "docstring": "Return keys used for the internal dictionary.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\utils_v1\\mode_keys.py",
    "ast_data": "FunctionDef name:_get_internal_key arg:self arg:key arguments arg arg If Call Return return:yes If Call Return return:yes If Call Return return:yes Raise Call Call"
  },
  {
    "library": "cherrypy",
    "name": "translate_headers",
    "source_code": "def translate_headers(self, environ):\n    for cgiName in environ:\n        if cgiName in self.headerNames:\n            yield (self.headerNames[cgiName], environ[cgiName])\n        elif cgiName[:5] == 'HTTP_':\n            translatedHeader = cgiName[5:].replace('_', '-')\n            yield (translatedHeader, environ[cgiName])",
    "docstring": "Translate CGI-environ header names to HTTP header names.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpwsgi.py",
    "ast_data": "FunctionDef name:translate_headers arg:self arg:environ arguments arg arg For If Compare If Compare Assign Call"
  },
  {
    "library": "pandas",
    "name": "ptr",
    "source_code": "@property\ndef ptr(self) -> int:\n    return self._buffer.address",
    "docstring": "Pointer to start of the buffer as an integer.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\interchange\\buffer.py",
    "ast_data": "FunctionDef name:ptr arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "AppConfigStub",
    "source_code": "class AppConfigStub(AppConfig):\n\n    def __init__(self, label):\n        self.apps = None\n        self.models = {}\n        self.label = label\n        self.name = label\n\n    def import_models(self):\n        self.models = self.apps.all_models[self.label]",
    "docstring": "Stub of an AppConfig. Only provides a label and a dict of models.",
    "type": "class",
    "file_path": "django\\django\\db\\migrations\\state.py",
    "ast_data": "ClassDef name:AppConfigStub FunctionDef name:__init__ arg:self arg:label arguments arg arg Assign Assign Assign Assign FunctionDef name:import_models arg:self arguments arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "Zeros",
    "source_code": "@tf_export('zeros_initializer', v1=[])\nclass Zeros(Initializer):\n\n    def __call__(self, shape, dtype=dtypes.float32, **kwargs):\n        self._validate_kwargs(kwargs)\n        dtype = dtypes.as_dtype(dtype)\n        if not dtype.is_numpy_compatible or dtype == dtypes.string:\n            raise ValueError(f'Argument `dtype` expected to be numeric or boolean. Received {dtype}.')\n        if _PARTITION_SHAPE in kwargs:\n            shape = kwargs[_PARTITION_SHAPE]\n        return array_ops.zeros(shape, dtype)",
    "docstring": "Initializer that generates tensors initialized to 0. Initializers allow you to pre-specify an initialization strategy, encoded in the Initializer object, without knowing the shape and dtype of the variable being initialized. Examples: >>> def make_variables(k, initializer): ... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)), ... tf.Variable(initializer(shape=[k, k], dtype=tf.float32))) >>> v1, v2 = make_variables(3, tf.zeros_initializer()) >>> v1 >>> v2 >>> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.)) (, <tf.Variable...shape=(4, 4) ...",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops_v2.py",
    "ast_data": "ClassDef name:Zeros FunctionDef name:__call__ arg:self arg:shape arg:dtype arguments arg arg arg arg Call Assign Call If BoolOp Compare Raise Call If Compare Assign Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "decide_global_ordering_of_comms",
    "source_code": "def decide_global_ordering_of_comms(nodes: list[BaseSchedulerNode], name_to_buf, name_to_fused_node) -> list[BaseSchedulerNode]:\n    if not torch.distributed.is_available():\n        return nodes\n    comm_nodes = [n for n in nodes if contains_collective(n)]\n    for i in range(1, len(comm_nodes)):\n        mutating_buf = next(iter(comm_nodes[i].get_buffer_names()))\n        for buf in comm_nodes[i - 1].get_buffer_names():\n            comm_nodes[i].add_fake_dep(WeakDep(buf, mutating_buf=mutating_buf))\n    return nodes",
    "docstring": "Decide global ordering of comms, by just enforcing the ordering that's in the input graph (might not be the same ordering as the eager mode program). TODO: Come up with a better approach",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\comms.py",
    "ast_data": "FunctionDef name:decide_global_ordering_of_comms arg:nodes arg:name_to_buf arg:name_to_fused_node arguments arg arg arg If Call Return return:yes Assign Call For Call Call Assign Call Call Call For Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "Shubert03",
    "source_code": "class Shubert03(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n        self.global_optimum = [[5.791794, 5.791794]]\n        self.fglob = -24.062499\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        j = atleast_2d(arange(1, 6)).T\n        y = -j * sin((j + 1) * x + j)\n        return sum(sum(y))",
    "docstring": "Shubert 3 objective function. This class defines the Shubert 3 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Shubert03}}(x) = \\sum_{i=1}^n \\sum_{j=1}^5 -j \\sin((j+1)x_i + j) Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: (and many others). .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015 TODO: Jamil#134 has wrong global minimum value, and is missing a minus sign before the whole thing.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_S.py",
    "ast_data": "ClassDef name:Shubert03 Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "cherrypy",
    "name": "__iter__",
    "source_code": "def __iter__(self):\n    return self",
    "docstring": "Make a file reader iterator.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\cpstats.py",
    "ast_data": "FunctionDef name:__iter__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_tf_tensorarray_append",
    "source_code": "def _tf_tensorarray_append(list_, x):\n    return list_.write(list_.size(), x)",
    "docstring": "Overload of list_append that stages a TensorArray write.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\data_structures.py",
    "ast_data": "FunctionDef name:_tf_tensorarray_append arg:list_ arg:x arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_get_atol_rtol",
    "source_code": "def _get_atol_rtol(name, b_norm, atol=0.0, rtol=1e-05):\n    if atol == 'legacy' or atol is None or atol < 0:\n        msg = f\"'scipy.sparse.linalg.{name}' called with invalid `atol`={atol}; if set, `atol` must be a real, non-negative number.\"\n        raise ValueError(msg)\n    atol = max(float(atol), float(rtol) * float(b_norm))\n    return (atol, rtol)",
    "docstring": "A helper function to handle tolerance normalization",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_isolve\\iterative.py",
    "ast_data": "FunctionDef name:_get_atol_rtol arg:name arg:b_norm arg:atol arg:rtol arguments arg arg arg arg If BoolOp Compare Compare Compare Assign Raise Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_assert_same_keys",
    "source_code": "def _assert_same_keys(state_dict: dict[str, Any], process_group: Optional[dist.ProcessGroup]=None) -> None:\n    if dist.get_world_size(process_group) == 1:\n        return\n    all_keys = _all_gather_keys(state_dict, process_group)\n    my_keys = set(state_dict.keys())\n    diff = all_keys - my_keys\n    if len(diff) > 0:\n        raise AssertionError(f'Key(s) present in other ranks but not this one, difference: {diff}')",
    "docstring": "Asserts that all ranks have the same keys in their state dict. This is a collective call which requires all ranks in `` to join. It will also induce cross-rank communication and block CPU.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\utils.py",
    "ast_data": "FunctionDef name:_assert_same_keys arg:state_dict arg:process_group arguments arg arg If Compare Call Return return:no Assign Call Assign Call Call Assign If Compare Call Raise Call"
  },
  {
    "library": "scipy",
    "name": "rosen_der",
    "source_code": "@xp_capabilities(skip_backends=[('jax.numpy', \"JAX doesn't allow item assignment.\")])\ndef rosen_der(x):\n    xp = array_namespace(x)\n    x = xp_promote(x, force_floating=True, xp=xp)\n    xm = x[1:-1]\n    xm_m1 = x[:-2]\n    xm_p1 = x[2:]\n    der = xp.zeros_like(x)\n    der[1:-1] = 200 * (xm - xm_m1 ** 2) - 400 * (xm_p1 - xm ** 2) * xm - 2 * (1 - xm)\n    der[0] = -400 * x[0] * (x[1] - x[0] ** 2) - 2 * (1 - x[0])\n    der[-1] = 200 * (x[-1] - x[-2] ** 2)\n    return der",
    "docstring": "The derivative (i.e. gradient) of the Rosenbrock function. Parameters ---------- x : array_like 1-D array of points at which the derivative is to be computed. Returns ------- rosen_der : (N,) ndarray The gradient of the Rosenbrock function at . See Also -------- rosen, rosen_hess, rosen_hess_prod Examples -------- >>> import numpy as np >>> from scipy.optimize import rosen_der >>> X = 0.1 * np.arange(9) >>> rosen_der(X) array([ -2. , 10.6, 15.6, 13.4, 6.4, -3. , -12.4, -19.4, 62. ])",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_optimize.py",
    "ast_data": "FunctionDef name:rosen_der arg:x arguments arg Assign Call Assign Call Assign Assign Assign Assign Call Assign Assign Assign Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "box_sphere_intersections",
    "source_code": "def box_sphere_intersections(z, d, lb, ub, trust_radius, entire_line=False, extra_info=False):\n    ta_b, tb_b, intersect_b = box_intersections(z, d, lb, ub, entire_line)\n    ta_s, tb_s, intersect_s = sphere_intersections(z, d, trust_radius, entire_line)\n    ta = np.maximum(ta_b, ta_s)\n    tb = np.minimum(tb_b, tb_s)\n    if intersect_b and intersect_s and (ta <= tb):\n        intersect = True\n    else:\n        intersect = False\n    if extra_info:\n        sphere_info = {'ta': ta_s, 'tb': tb_s, 'intersect': intersect_s}\n        box_info = {'ta': ta_b, 'tb': tb_b, 'intersect': intersect_b}\n        return (ta, tb, intersect, sphere_info, box_info)\n    else:\n        return (ta, tb, intersect)",
    "docstring": "Find the intersection between segment (or line) and box/sphere constraints. Find the intersection between the segment (or line) defined by the parametric equation `` for which the line intercepts the box. And a boolean value indicating whether the box is intersected by the line.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_trustregion_constr\\qp_subproblem.py",
    "ast_data": "FunctionDef name:box_sphere_intersections arg:z arg:d arg:lb arg:ub arg:trust_radius arg:entire_line arg:extra_info arguments arg arg arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call If BoolOp Compare Assign Assign If Assign Assign Return return:yes Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "request_from_dict",
    "source_code": "def request_from_dict(d: dict[str, Any], *, spider: Spider | None=None) -> Request:\n    request_cls: type[Request] = load_object(d['_class']) if '_class' in d else Request\n    kwargs = {key: value for key, value in d.items() if key in request_cls.attributes}\n    if d.get('callback') and spider:\n        kwargs['callback'] = _get_method(spider, d['callback'])\n    if d.get('errback') and spider:\n        kwargs['errback'] = _get_method(spider, d['errback'])\n    return request_cls(**kwargs)",
    "docstring": "Create a :class: object from a dict. If a spider is given, it will try to resolve the callbacks looking at the spider for methods with the same name.",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\request.py",
    "ast_data": "FunctionDef name:request_from_dict arg:d arguments arg arg Compare Call Assign Call Compare If BoolOp Call Assign Call If BoolOp Call Assign Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "_inverse_sigmoid",
    "source_code": "def _inverse_sigmoid(x: torch.Tensor, eps: float=1e-05) -> torch.Tensor:\n    out = x.clip(min=0.0, max=1.0)\n    return torch.log(out.clip(min=eps) / (1.0 - out).clip(min=eps))",
    "docstring": "Inverse sigmoid function. Args: x: input tensor eps: epsilon value for numerical stability Returns: output tensor",
    "type": "function",
    "file_path": "kornia\\kornia\\contrib\\models\\rt_detr\\architecture\\rtdetr_head.py",
    "ast_data": "FunctionDef name:_inverse_sigmoid arg:x arg:eps arguments arg arg Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "cast_losses_to_common_dtype",
    "source_code": "def cast_losses_to_common_dtype(losses):\n    highest_float = None\n    for loss in losses:\n        if loss.dtype.is_floating:\n            if highest_float is None or loss.dtype.size > highest_float.size:\n                highest_float = loss.dtype\n            elif {loss.dtype, highest_float} == {'bfloat16', 'float16'}:\n                highest_float = 'float32'\n        if loss.dtype.is_complex:\n            return losses\n    if highest_float:\n        losses = [math_ops.cast(loss, highest_float) for loss in losses]\n    return losses",
    "docstring": "Cast a list of losses to a common dtype. If any loss is floating-point, they will all be casted to the most-precise floating-point loss. Otherwise the losses are not casted. We also skip casting losses if there are any complex losses. Args: losses: A list of losses. Returns: , but they have been casted to a common dtype.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\losses_utils.py",
    "ast_data": "FunctionDef name:cast_losses_to_common_dtype arg:losses arguments arg Assign For If If BoolOp Compare Compare Assign If Compare Assign If Return return:yes If Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "linebreaks_filter",
    "source_code": "@register.filter('linebreaks', is_safe=True, needs_autoescape=True)\n@stringfilter\ndef linebreaks_filter(value, autoescape=True):\n    autoescape = autoescape and (not isinstance(value, SafeData))\n    return mark_safe(linebreaks(value, autoescape))",
    "docstring": "Replace line breaks in plain text with appropriate HTML; a single newline becomes an HTML line break (`).",
    "type": "function",
    "file_path": "django\\django\\template\\defaultfilters.py",
    "ast_data": "FunctionDef name:linebreaks_filter arg:value arg:autoescape arguments arg arg Assign BoolOp Call Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "__getstate__",
    "source_code": "def __getstate__(self):\n    obj_dict = self.__dict__.copy()\n    if not self._is_rendered:\n        raise ContentNotRenderedError('The response content must be rendered before it can be pickled.')\n    for attr in self.rendering_attrs:\n        if attr in obj_dict:\n            del obj_dict[attr]\n    return obj_dict",
    "docstring": "Raise an exception if trying to pickle an unrendered response. Pickle only rendered data, not the data used to construct the response.",
    "type": "method",
    "file_path": "django\\django\\template\\response.py",
    "ast_data": "FunctionDef name:__getstate__ arg:self arguments arg Assign Call If Raise Call For If Compare Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "screen",
    "source_code": "@property\ndef screen(self):\n    h = self._get_builtin_handler\n    has_h = h(self.error_log, 'screen') or h(self.access_log, 'screen')\n    return bool(has_h)",
    "docstring": "Turn stderr/stdout logging on or off. If you set this to True, it'll add the appropriate StreamHandler for you. If you set it to False, it will remove the handler.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cplogging.py",
    "ast_data": "FunctionDef name:screen arg:self arguments arg Assign Assign BoolOp Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_split_inputs",
    "source_code": "def _split_inputs(self, args: tuple[Any, ...], kwargs: Optional[dict[str, Any]]=None):\n    if args or kwargs:\n        args_split, kwargs_split = split_args_kwargs_into_chunks(args, kwargs, self._n_microbatches, self._args_chunk_spec, self._kwargs_chunk_spec)\n        return (args_split, kwargs_split)\n    else:\n        return ([()] * self._n_microbatches, [{}] * self._n_microbatches)",
    "docstring": "Splits a full-batch input into chunks (i.e. microbatches) and returns the chunks",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\schedules.py",
    "ast_data": "FunctionDef name:_split_inputs arg:self arg:args arg:kwargs arguments arg arg arg If BoolOp Assign Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "add_to_collections",
    "source_code": "@tf_export(v1=['add_to_collections'])\ndef add_to_collections(names, value) -> None:\n    get_default_graph().add_to_collections(names, value)",
    "docstring": "Wrapper for using the default graph. See for more details. Args: names: The key for the collections. The class contains many standard names for collections. value: The value to add to the collections. @compatibility(eager) Collections are only supported in eager when variables are created inside an EagerVariableStore (e.g. as part of a layer or template). @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:add_to_collections arg:names arg:value arguments arg arg Call Call Call"
  },
  {
    "library": "pandas",
    "name": "diff",
    "source_code": "@doc(klass='Series', extra_params='', other_klass='DataFrame', examples=dedent('\\n        Difference with previous row\\n\\n        >>> s = pd.Series([1, 1, 2, 3, 5, 8])\\n        >>> s.diff()\\n        0    NaN\\n        1    0.0\\n        2    1.0\\n        3    1.0\\n        4    2.0\\n        5    3.0\\n        dtype: float64\\n\\n        Difference with 3rd previous row\\n\\n        >>> s.diff(periods=3)\\n        0    NaN\\n        1    NaN\\n        2    NaN\\n        3    2.0\\n        4    4.0\\n        5    6.0\\n        dtype: float64\\n\\n        Difference with following row\\n\\n        >>> s.diff(periods=-1)\\n        0    0.0\\n        1   -1.0\\n        2   -1.0\\n        3   -2.0\\n        4   -3.0\\n        5    NaN\\n        dtype: float64\\n\\n        Overflow in input dtype\\n\\n        >>> s = pd.Series([1, 0], dtype=np.uint8)\\n        >>> s.diff()\\n        0      NaN\\n        1    255.0\\n        dtype: float64'))\ndef diff(self, periods: int=1) -> Series:\n    if not lib.is_integer(periods):\n        if not (is_float(periods) and periods.is_integer()):\n            raise ValueError('periods must be an integer')\n    result = algorithms.diff(self._values, periods)\n    return self._constructor(result, index=self.index, copy=False).__finalize__(self, method='diff')",
    "docstring": "First discrete difference of element. Calculates the difference of a {klass} element compared with another element in the {klass} (default is element in previous row). Parameters ---------- periods : int, default 1 Periods to shift for calculating difference, accepts negative values. {extra_params} Returns ------- {klass} First differences of the Series. See Also -------- {klass}.pct_change: Percent change over given number of periods. {klass}.shift: Shift index by desired number of periods with an optional time freq. {other_klass}.diff: First discrete difference of object. Notes ----- For boolean dtypes, this uses :meth: rather than :meth:. The result is calculated according to current dtype in {klass}, however dtype of the result is always float64. Examples -------- {examples}",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:diff arg:self arg:periods arguments arg arg If Call If BoolOp Call Call Raise Call Assign Call Return return:yes Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_get_hatch",
    "source_code": "def _get_hatch(self, gc, rgbFace):\n    if rgbFace is not None:\n        rgbFace = tuple(rgbFace)\n    edge = gc.get_hatch_color()\n    if edge is not None:\n        edge = tuple(edge)\n    lw = gc.get_hatch_linewidth()\n    dictkey = (gc.get_hatch(), rgbFace, edge, lw)\n    oid = self._hatchd.get(dictkey)\n    if oid is None:\n        oid = self._make_id('h', dictkey)\n        self._hatchd[dictkey] = ((gc.get_hatch_path(), rgbFace, edge, lw), oid)\n    else:\n        _, oid = oid\n    return oid",
    "docstring": "Create a new hatch pattern",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_svg.py",
    "ast_data": "FunctionDef name:_get_hatch arg:self arg:gc arg:rgbFace arguments arg arg arg If Compare Assign Call Assign Call If Compare Assign Call Assign Call Assign Call Assign Call If Compare Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "with_row_splits_dtype",
    "source_code": "def with_row_splits_dtype(self, dtype):\n    dtype = dtypes.as_dtype(dtype)\n    if dtype not in (dtypes.int32, dtypes.int64):\n        raise ValueError(f'Argument `row_splits` dtype must be int32 or int64. Received {dtype}.')\n    if self._row_partition.dtype == dtype:\n        return self\n    current_values = self._values\n    if isinstance(current_values, RaggedTensor):\n        return RaggedTensor(values=current_values.with_row_splits_dtype(dtype), row_partition=self._row_partition.with_dtype(dtype), internal=True)\n    else:\n        return RaggedTensor(values=current_values, row_partition=self._row_partition.with_dtype(dtype), internal=True)",
    "docstring": "Returns a copy of this RaggedTensor with the given dtype. For RaggedTensors with multiple ragged dimensions, the for all nested objects are cast to the given dtype. Args: dtype: The dtype for . One of or . Returns: A copy of this RaggedTensor, with the cast to the given type.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:with_row_splits_dtype arg:self arg:dtype arguments arg arg Assign Call If Compare Raise Call If Compare Return return:yes Assign If Call Return return:yes Call Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_restore_checkpoint_and_maybe_run_saved_model_initializers",
    "source_code": "def _restore_checkpoint_and_maybe_run_saved_model_initializers(sess: session.Session, saver: saver_lib.Saver, path: str):\n    saved_model_init_ops = ops.get_collection('saved_model_initializers')\n    if saved_model_init_ops:\n        sess.run(saved_model_init_ops)\n    saver.restore(sess, path)",
    "docstring": "Restores checkpoint values and SavedModel initializers if found.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\session_manager.py",
    "ast_data": "FunctionDef name:_restore_checkpoint_and_maybe_run_saved_model_initializers arg:sess arg:saver arg:path arguments arg arg arg Assign Call If Call Call"
  },
  {
    "library": "pytorch",
    "name": "filename",
    "source_code": "@property\ndef filename(self) -> _Optional[str]:\n    return self._untyped_storage.filename",
    "docstring": "Returns the file name associated with this storage if the storage was memory mapped from a file. or `` if the storage was not created by memory mapping a file.",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:filename arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_reset_context",
    "source_code": "def _reset_context():\n    global _context\n    global _device_parsing_cache\n    gc.collect()\n    pywrap_tfe.TFE_ClearScalarCache()\n    with _context_lock:\n        if _context is not None:\n            _context._clear_caches()\n            _context = None\n    _create_context()\n    _device_parsing_cache = {}",
    "docstring": "Clears and re-initializes the singleton context. Should only be used for testing.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:_reset_context arguments Call Call With If Compare Call Assign Call Assign"
  },
  {
    "library": "scikit-learn",
    "name": "_get_finite_row_indices",
    "source_code": "def _get_finite_row_indices(matrix):\n    if issparse(matrix):\n        row_indices = np.array([i for i, row in enumerate(matrix.tolil().data) if np.all(np.isfinite(row))])\n    else:\n        row_indices, = np.isfinite(matrix.sum(axis=1)).nonzero()\n    return row_indices",
    "docstring": "Returns the indices of the purely finite rows of a sparse matrix or dense ndarray",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\cluster\\_hdbscan\\hdbscan.py",
    "ast_data": "FunctionDef name:_get_finite_row_indices arg:matrix arguments arg If Call Assign Call Call Call Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "to_device",
    "source_code": "def to_device(x: Array, device: Device, /, *, stream: int | Any | None=None) -> Array:\n    if is_numpy_array(x):\n        if stream is not None:\n            raise ValueError('The stream argument to to_device() is not supported')\n        if device == 'cpu':\n            return x\n        raise ValueError(f'Unsupported device {device!r}')\n    elif is_cupy_array(x):\n        return _cupy_to_device(x, device, stream=stream)\n    elif is_torch_array(x):\n        return _torch_to_device(x, device, stream=stream)\n    elif is_dask_array(x):\n        if stream is not None:\n            raise ValueError('The stream argument to to_device() is not supported')\n        if device == 'cpu':\n            return x\n        raise ValueError(f'Unsupported device {device!r}')\n    elif is_jax_array(x):\n        if not hasattr(x, '__array_namespace__'):\n            import jax.experimental.array_api\n            if not hasattr(x, 'to_device'):\n                return x\n        return x.to_device(device, stream=stream)\n    elif is_pydata_sparse_array(x) and device == _device(x):\n        return x\n    return x.to_device(device, stream=stream)",
    "docstring": "Copy the array from the device on which it currently resides to the specified `x.to_device(device, stream=stream)standard to_deviceDevice Support Device Stream x.to(device) ` argument is not supported in PyTorch). See Also -------- device : Hardware device the array data resides on.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\common\\_helpers.py",
    "ast_data": "FunctionDef name:to_device arguments arg arg arg If Call If Compare Raise Call If Compare Return return:yes Raise Call If Call Return return:yes Call If Call Return return:yes Call If Call If Compare Raise Call If Compare Return return:yes Raise Call If Call If Call If Call Return return:yes Return return:yes Call If BoolOp Call Compare Call Return return:yes Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "expect",
    "source_code": "def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds):\n    lockwds = {'loc': loc, 'scale': scale}\n    self._argcheck(*args)\n    _a, _b = self._get_support(*args)\n    if func is None:\n\n        def fun(x, *args):\n            return x * self.pdf(x, *args, **lockwds)\n    else:\n\n        def fun(x, *args):\n            return func(x) * self.pdf(x, *args, **lockwds)\n    if lb is None:\n        lb = loc + _a * scale\n    if ub is None:\n        ub = loc + _b * scale\n    cdf_bounds = self.cdf([lb, ub], *args, **lockwds)\n    invfac = cdf_bounds[1] - cdf_bounds[0]\n    kwds['args'] = args\n    alpha = 0.05\n    inner_bounds = np.array([alpha, 1 - alpha])\n    cdf_inner_bounds = cdf_bounds[0] + invfac * inner_bounds\n    c, d = loc + self._ppf(cdf_inner_bounds, *args) * scale\n    lbc = integrate.quad(fun, lb, c, **kwds)[0]\n    cd = integrate.quad(fun, c, d, **kwds)[0]\n    dub = integrate.quad(fun, d, ub, **kwds)[0]\n    vals = lbc + cd + dub\n    if conditional:\n        vals /= invfac\n    return np.array(vals)[()]",
    "docstring": "Calculate expected value of a function with respect to the distribution by numerical integration. The expected value of a function `scipy.integrate.quadscipy.integrate.quadscipy.integrate.quadscipy.integrate.quad` . >>> import numpy as np >>> from scipy.stats import vonmises >>> res = vonmises(loc=2, kappa=1).expect(lambda x: np.exp(1j*x), ... complex_func=True) >>> res (-0.18576377217422957+0.40590124735052263j) >>> np.angle(res) # location of the (circular) distribution 2.0",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:expect arg:self arg:func arg:args arg:loc arg:scale arg:lb arg:ub arg:conditional arguments arg arg arg arg arg arg arg arg arg Assign Call Assign Call If Compare FunctionDef name:fun arg:x arguments arg arg Return return:yes Call FunctionDef name:fun arg:x arguments arg arg Return return:yes Call Call If Compare Assign If Compare Assign Assign Call Assign Assign Assign Assign Call Assign Assign Call Assign Call Assign Call Assign Call Assign If Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "FileTimerRequest",
    "source_code": "class FileTimerRequest(TimerRequest):\n    __slots__ = ['version', 'worker_pid', 'scope_id', 'expiration_time', 'signal']\n\n    def __init__(self, worker_pid: int, scope_id: str, expiration_time: float, signal: int=0) -> None:\n        self.version = 1\n        self.worker_pid = worker_pid\n        self.scope_id = scope_id\n        self.expiration_time = expiration_time\n        self.signal = signal\n\n    def __eq__(self, other) -> bool:\n        if isinstance(other, FileTimerRequest):\n            return self.version == other.version and self.worker_pid == other.worker_pid and (self.scope_id == other.scope_id) and (self.expiration_time == other.expiration_time) and (self.signal == other.signal)\n        return False\n\n    def to_json(self) -> str:\n        return json.dumps({'version': self.version, 'pid': self.worker_pid, 'scope_id': self.scope_id, 'expiration_time': self.expiration_time, 'signal': self.signal})",
    "docstring": "Data object representing a countdown timer acquisition and release that is used between the `` is the signal to reap the worker process from the server process.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\elastic\\timer\\file_based_local_timer.py",
    "ast_data": "ClassDef name:FileTimerRequest Assign FunctionDef name:__init__ arg:self arg:worker_pid arg:scope_id arg:expiration_time arg:signal arguments arg arg arg arg arg Assign Assign Assign Assign Assign FunctionDef name:__eq__ arg:self arg:other arguments arg arg If Call Return return:yes BoolOp Compare Compare Compare Compare Compare Return return:yes FunctionDef name:to_json arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "cryptography",
    "name": "ExtendableOutputFunction",
    "source_code": "class ExtendableOutputFunction(metaclass=abc.ABCMeta):\n    pass",
    "docstring": "An interface for extendable output functions.",
    "type": "class",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\hashes.py",
    "ast_data": "ClassDef name:ExtendableOutputFunction"
  },
  {
    "library": "kornia",
    "name": "warp_frame_depth",
    "source_code": "def warp_frame_depth(image_src: Tensor, depth_dst: Tensor, src_trans_dst: Tensor, camera_matrix: Tensor, normalize_points: bool=False) -> Tensor:\n    KORNIA_CHECK_SHAPE(image_src, ['B', 'D', 'H', 'W'])\n    KORNIA_CHECK_SHAPE(depth_dst, ['B', '1', 'H', 'W'])\n    KORNIA_CHECK_SHAPE(src_trans_dst, ['B', '4', '4'])\n    KORNIA_CHECK_SHAPE(camera_matrix, ['B', '3', '3'])\n    points_3d_dst: Tensor = depth_to_3d(depth_dst, camera_matrix, normalize_points)\n    points_3d_dst = points_3d_dst.permute(0, 2, 3, 1)\n    points_3d_src = transform_points(src_trans_dst[:, None], points_3d_dst)\n    camera_matrix_tmp: Tensor = camera_matrix[:, None, None]\n    points_2d_src: Tensor = project_points(points_3d_src, camera_matrix_tmp)\n    height, width = depth_dst.shape[-2:]\n    points_2d_src_norm: Tensor = normalize_pixel_coordinates(points_2d_src, height, width)\n    return kornia_ops.map_coordinates(image_src, points_2d_src_norm, align_corners=True)",
    "docstring": "Warp a tensor from a source to destination frame by the depth in the destination. Compute 3d points from the depth, transform them using given transformation, then project the point cloud to an image plane. Args: image_src: image tensor in the source frame with shape :math:. depth_dst: depth tensor in the destination frame with shape :math:. src_trans_dst: transformation matrix from destination to source with shape :math:. camera_matrix: tensor containing the camera intrinsics with shape :math:. normalize_points: whether to normalize the pointcloud. This must be set to `(B,3,H,W)`.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\depth.py",
    "ast_data": "FunctionDef name:warp_frame_depth arg:image_src arg:depth_dst arg:src_trans_dst arg:camera_matrix arg:normalize_points arguments arg arg arg arg arg Call Call Call Call Call Assign Call Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "_finalize",
    "source_code": "def _finalize(self, p: Plot, axis: Axis) -> None:\n    pass",
    "docstring": "Perform scale-specific axis tweaks after adding artists.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\scales.py",
    "ast_data": "FunctionDef name:_finalize arg:self arg:p arg:axis arguments arg arg arg"
  },
  {
    "library": "tensorflow",
    "name": "_fallback_apply_state",
    "source_code": "def _fallback_apply_state(self, var_device, var_dtype):\n    apply_state = {(var_device, var_dtype): {}}\n    self._prepare_local(var_device, var_dtype, apply_state)\n    return apply_state[var_device, var_dtype]",
    "docstring": "Compatibility for subclasses that don't pass apply_state through.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py",
    "ast_data": "FunctionDef name:_fallback_apply_state arg:self arg:var_device arg:var_dtype arguments arg arg arg Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "add_ops",
    "source_code": "def add_ops(op_classes):\n\n    def f(cls):\n        for op_attr_name, op_class in op_classes.items():\n            ops = getattr(cls, f'{op_attr_name}_ops')\n            ops_map = getattr(cls, f'{op_attr_name}_op_nodes_map')\n            for op in ops:\n                op_node = ops_map[op]\n                if op_node is not None:\n                    made_op = _op_maker(op_class, op)\n                    setattr(cls, f'visit_{op_node}', made_op)\n        return cls\n    return f",
    "docstring": "Decorator to add default implementation of ops.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\computation\\expr.py",
    "ast_data": "FunctionDef name:add_ops arg:op_classes arguments arg FunctionDef name:f arg:cls arguments arg For Call Assign Call Assign Call For Assign If Compare Assign Call Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "tril",
    "source_code": "@property\ndef tril(self):\n    return self._tril",
    "docstring": "The lower triangular matrix defining this operator.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_lower_triangular.py",
    "ast_data": "FunctionDef name:tril arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "sum",
    "source_code": "def sum(self, axis=None, dtype=None, out=None):\n    axis = validateaxis(axis, ndim=self.ndim)\n    res_dtype = get_sum_dtype(self.dtype)\n    if axis is None:\n        if self.nnz == 0:\n            return np.sum(self._ascontainer([0]), dtype=dtype or res_dtype, out=out)\n        return np.sum(self._ascontainer(_todata(self)), dtype=dtype, out=out)\n    elif isspmatrix(self):\n        new_shape = (1, self.shape[1]) if axis == (0,) else (self.shape[0], 1)\n    else:\n        new_shape = tuple((self.shape[i] for i in range(self.ndim) if i not in axis))\n    if out is None:\n        out = self._ascontainer(np.zeros(new_shape, dtype=dtype or res_dtype))\n    elif out.shape != new_shape:\n        raise ValueError('out dimensions do not match shape')\n    if self.ndim > 2:\n        return self._sum_nd(axis, res_dtype, out)\n    if axis == (0,):\n        ones = self._ascontainer(np.ones((1, self.shape[0]), dtype=res_dtype))\n        out[...] = (ones @ self).reshape(new_shape)\n    else:\n        ones = self._ascontainer(np.ones((self.shape[1], 1), dtype=res_dtype))\n        out[...] = (self @ ones).reshape(new_shape)\n    return out",
    "docstring": "Sum the array/matrix elements over a given axis. Parameters ---------- axis : {-2, -1, 0, 1, None} optional Axis along which the sum is computed. The default is to compute the sum of all the array/matrix elements, returning a scalar (i.e., = ). dtype : dtype, optional The type of the returned array/matrix and of the accumulator in which the elements are summed. The dtype of is used by default unless has an integer dtype of less precision than the default platform integer. In that case, if is signed then the platform integer is used while if is unsigned then an unsigned integer of the same precision as the platform integer is used. .. versionadded:: 0.18.0 out : np.matrix, optional Alternative output matrix in which to place the result. It must have the same shape as the expected output, but the type of the output values will be cast if necessary. .. versionadded:: 0.18.0 Returns ------- sum_along_axis : np.matrix A matrix with the same shape as , with the specified axis removed. See Also -------- numpy.matrix.sum : NumPy's implementation of 'sum' for matrices",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_base.py",
    "ast_data": "FunctionDef name:sum arg:self arg:axis arg:dtype arg:out arguments arg arg arg arg Assign Call Assign Call If Compare If Compare Return return:yes Call Call BoolOp Return return:yes Call Call Call If Call Assign Compare Assign Call Call Compare If Compare Assign Call Call BoolOp If Compare Raise Call If Compare Return return:yes Call If Compare Assign Call Call Assign Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_custom_device",
    "source_code": "def is_custom_device(self, device_name):\n    self.ensure_initialized()\n    return pywrap_tfe.TFE_Py_IsCustomDevice(self._handle, device_name)",
    "docstring": "Calls TFE_IsCustomDevice. See the non-member function.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:is_custom_device arg:self arg:device_name arguments arg arg Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_update_qconfig_for_fusion",
    "source_code": "def _update_qconfig_for_fusion(model: GraphModule, qconfig_mapping: QConfigMapping):\n    object_type_dict = qconfig_mapping.object_type_qconfigs\n    if len(object_type_dict) == 0:\n        return qconfig_mapping\n    modules = dict(model.named_modules())\n    for node in model.graph.nodes:\n        if node.op == 'call_module' and node.target in modules:\n            maybe_fused_module = modules[str(node.target)]\n            if not isinstance(maybe_fused_module, _FusedModule):\n                continue\n            ops = list(maybe_fused_module._modules.values())\n            fused_qconfig = object_type_dict.get(type(ops[0]), None)\n            for op in ops[1:]:\n                if not qconfig_equals(object_type_dict.get(type(op), None), fused_qconfig):\n                    raise LookupError('During fusion, we need to specify the same ' + f'qconfigs for all module types in {type(maybe_fused_module)} ' + f'offending type: {type(op)}')\n            if fused_qconfig is not None:\n                object_type_dict[type(maybe_fused_module)] = fused_qconfig",
    "docstring": "Update the QConfigMapping to account for fused modules such as LinearReLU. This assumes the QConfigMapping's attributes have already been converted to OrderedDicts.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\qconfig_mapping_utils.py",
    "ast_data": "FunctionDef name:_update_qconfig_for_fusion arg:model arg:qconfig_mapping arguments arg arg Assign If Compare Call Return return:yes Assign Call Call For If BoolOp Compare Compare Assign Call If Call Assign Call Call Assign Call Call For If Call Call Call Raise Call Call Call If Compare Assign Call"
  },
  {
    "library": "scipy",
    "name": "__str__",
    "source_code": "def __str__(self):\n    return self.name + ',' + self.type_name",
    "docstring": "Parse a value of this type.",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\arff\\_arffread.py",
    "ast_data": "FunctionDef name:__str__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "_check_list_display",
    "source_code": "def _check_list_display(self, obj):\n    if not isinstance(obj.list_display, (list, tuple)):\n        return must_be('a list or tuple', option='list_display', obj=obj, id='admin.E107')\n    else:\n        return list(chain.from_iterable((self._check_list_display_item(obj, item, 'list_display[%d]' % index) for index, item in enumerate(obj.list_display))))",
    "docstring": "Check that list_display only contains fields or usable attributes.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\checks.py",
    "ast_data": "FunctionDef name:_check_list_display arg:self arg:obj arguments arg arg If Call Return return:yes Call Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "grad",
    "source_code": "def grad(dy):\n    with ops.control_dependencies([dy]):\n        sigmoid_features = math_ops.sigmoid(beta * features)\n    activation_grad = sigmoid_features * (1.0 + beta * features * (1.0 - sigmoid_features))\n    beta_grad = math_ops.reduce_sum(dy * math_ops.square(features) * sigmoid_features * (1.0 - sigmoid_features))\n    return (dy * activation_grad, beta_grad)",
    "docstring": "Gradient for the Swish activation function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_impl.py",
    "ast_data": "FunctionDef name:grad arg:dy arguments arg With Call Assign Call Assign Assign Call Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "clear_doc",
    "source_code": "def clear_doc(self, docname: str) -> None:\n    if docname in self.all_docs:\n        self.all_docs.pop(docname, None)\n        self.included.pop(docname, None)\n        self.reread_always.discard(docname)\n    self.domains._clear_doc(docname)",
    "docstring": "Remove all traces of a source file in the inventory.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\environment\\__init__.py",
    "ast_data": "FunctionDef name:clear_doc arg:self arg:docname arguments arg arg If Compare Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_hatch_linewidth",
    "source_code": "def set_hatch_linewidth(self, lw):\n    self._hatch_linewidth = lw",
    "docstring": "Set the hatch linewidth.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:set_hatch_linewidth arg:self arg:lw arguments arg arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "squeeze_batch_dims",
    "source_code": "def squeeze_batch_dims(inp, op, inner_rank, name=None):\n    with ops.name_scope(name, 'squeeze_batch_dims', [inp]):\n        inp = ops.convert_to_tensor(inp, name='input')\n        shape = inp.shape\n        inner_shape = shape[-inner_rank:]\n        if not inner_shape.is_fully_defined():\n            inner_shape = array_ops.shape(inp)[-inner_rank:]\n        batch_shape = shape[:-inner_rank]\n        if not batch_shape.is_fully_defined():\n            batch_shape = array_ops.shape(inp)[:-inner_rank]\n        if isinstance(inner_shape, tensor_shape.TensorShape):\n            inp_reshaped = array_ops.reshape(inp, [-1] + inner_shape.as_list())\n        else:\n            inp_reshaped = array_ops.reshape(inp, array_ops.concat(([-1], inner_shape), axis=-1))\n        out_reshaped = op(inp_reshaped)\n        out_inner_shape = out_reshaped.shape[-inner_rank:]\n        if not out_inner_shape.is_fully_defined():\n            out_inner_shape = array_ops.shape(out_reshaped)[-inner_rank:]\n        out = array_ops.reshape(out_reshaped, array_ops.concat((batch_shape, out_inner_shape), axis=-1))\n        out.set_shape(inp.shape[:-inner_rank] + out.shape[-inner_rank:])\n        return out",
    "docstring": "Returns . Where reshapes to shape and does the reverse reshape but on the output. Args: inp: A tensor with dims where is length . op: A callable that takes a single input tensor and returns a single. output tensor. inner_rank: A python integer. name: A string. Returns: .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py",
    "ast_data": "FunctionDef name:squeeze_batch_dims arg:inp arg:op arg:inner_rank arg:name arguments arg arg arg arg With Call Assign Call Assign Assign If Call Assign Call Assign If Call Assign Call If Call Assign Call Call Assign Call Call Assign Call Assign If Call Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "form_valid",
    "source_code": "def form_valid(self, form):\n    self.object = form.save()\n    return super().form_valid(form)",
    "docstring": "If the form is valid, save the associated model.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\edit.py",
    "ast_data": "FunctionDef name:form_valid arg:self arg:form arguments arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_get_fixed_fit_value",
    "source_code": "def _get_fixed_fit_value(kwds, names):\n    vals = [(name, kwds.pop(name)) for name in names if name in kwds]\n    if len(vals) > 1:\n        repeated = [name for name, val in vals]\n        raise ValueError('fit method got multiple keyword arguments to specify the same fixed parameter: ' + ', '.join(repeated))\n    return vals[0][1] if vals else None",
    "docstring": "Given names such as `kwdskwdskwds` are removed.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:_get_fixed_fit_value arg:kwds arg:names arguments arg arg Assign Call Compare If Compare Call Assign Raise Call Call Return return:yes"
  },
  {
    "library": "virtualenv",
    "name": "AppData",
    "source_code": "class AppData(ABC):\n\n    @abstractmethod\n    def close(self):\n        pass\n\n    @abstractmethod\n    def reset(self):\n        pass\n\n    @abstractmethod\n    def py_info(self, path):\n        raise NotImplementedError\n\n    @abstractmethod\n    def py_info_clear(self):\n        raise NotImplementedError\n\n    @property\n    def can_update(self):\n        raise NotImplementedError\n\n    @abstractmethod\n    def embed_update_log(self, distribution, for_py_version):\n        raise NotImplementedError\n\n    @property\n    def house(self):\n        raise NotImplementedError\n\n    @property\n    def transient(self):\n        raise NotImplementedError\n\n    @abstractmethod\n    def wheel_image(self, for_py_version, name):\n        raise NotImplementedError\n\n    @contextmanager\n    def ensure_extracted(self, path, to_folder=None):\n        if IS_ZIPAPP:\n            with self.extract(path, to_folder) as result:\n                yield result\n        else:\n            yield path\n\n    @abstractmethod\n    @contextmanager\n    def extract(self, path, to_folder):\n        raise NotImplementedError\n\n    @abstractmethod\n    @contextmanager\n    def locked(self, path):\n        raise NotImplementedError",
    "docstring": "Abstract storage interface for the virtualenv application.",
    "type": "class",
    "file_path": "virtualenv\\src\\virtualenv\\app_data\\base.py",
    "ast_data": "ClassDef name:AppData FunctionDef name:close arg:self arguments arg FunctionDef name:reset arg:self arguments arg FunctionDef name:py_info arg:self arg:path arguments arg arg Raise FunctionDef name:py_info_clear arg:self arguments arg Raise FunctionDef name:can_update arg:self arguments arg Raise FunctionDef name:embed_update_log arg:self arg:distribution arg:for_py_version arguments arg arg arg Raise FunctionDef name:house arg:self arguments arg Raise FunctionDef name:transient arg:self arguments arg Raise FunctionDef name:wheel_image arg:self arg:for_py_version arg:name arguments arg arg arg Raise FunctionDef name:ensure_extracted arg:self arg:path arg:to_folder arguments arg arg arg If With Call FunctionDef name:extract arg:self arg:path arg:to_folder arguments arg arg arg Raise FunctionDef name:locked arg:self arg:path arguments arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "merge_all_summaries",
    "source_code": "@deprecated('2016-11-30', 'Please switch to tf.summary.merge_all.')\ndef merge_all_summaries(key=ops.GraphKeys.SUMMARIES):\n    summary_ops = ops.get_collection(key)\n    if not summary_ops:\n        return None\n    else:\n        return merge_summary(summary_ops)",
    "docstring": "Merges all summaries collected in the default graph. This op is deprecated. Please switch to tf.compat.v1.summary.merge_all, which has identical behavior. Args: key: used to collect the summaries. Defaults to . Returns: If no summaries were collected, returns None. Otherwise returns a scalar of type containing the serialized protocol buffer resulting from the merging.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\logging_ops.py",
    "ast_data": "FunctionDef name:merge_all_summaries arg:key arguments arg Assign Call If Return return:no Return return:yes Call Call"
  },
  {
    "library": "authlib",
    "name": "get",
    "source_code": "def get(self, url, **kwargs):\n    return self.request('GET', url, **kwargs)",
    "docstring": "Invoke GET http request. If `` configured, shortcut is available:: client.get(\"users/lepture\")",
    "type": "method",
    "file_path": "authlib\\authlib\\integrations\\base_client\\sync_app.py",
    "ast_data": "FunctionDef name:get arg:self arg:url arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "add_real_datasets",
    "source_code": "def add_real_datasets(self, datasets, other_datasets, cat_feature2cats, ranking=False):\n    if other_datasets:\n        for name, path in other_datasets:\n            df_other, choices, _, _, _ = self.get_df(path, cat_feature2cats=cat_feature2cats, apply_filters=False, add_near_best=ranking)\n            datasets[name] = df_other",
    "docstring": "Adds datasets specified by the user to the datasets dictionary.",
    "type": "method",
    "file_path": "pytorch\\torchgen\\_autoheuristic\\train_decision.py",
    "ast_data": "FunctionDef name:add_real_datasets arg:self arg:datasets arg:other_datasets arg:cat_feature2cats arg:ranking arguments arg arg arg arg arg If For Assign Call Assign"
  },
  {
    "library": "seaborn",
    "name": "pair",
    "source_code": "def pair(self, x: VariableSpecList=None, y: VariableSpecList=None, wrap: int | None=None, cross: bool=True) -> Plot:\n    pair_spec: PairSpec = {}\n    axes = {'x': [] if x is None else x, 'y': [] if y is None else y}\n    for axis, arg in axes.items():\n        if isinstance(arg, (str, int)):\n            err = f'You must pass a sequence of variable keys to `{axis}`'\n            raise TypeError(err)\n    pair_spec['variables'] = {}\n    pair_spec['structure'] = {}\n    for axis in 'xy':\n        keys = []\n        for i, col in enumerate(axes[axis]):\n            key = f'{axis}{i}'\n            keys.append(key)\n            pair_spec['variables'][key] = col\n        if keys:\n            pair_spec['structure'][axis] = keys\n    if not cross and len(axes['x']) != len(axes['y']):\n        err = 'Lengths of the `x` and `y` lists must match with cross=False'\n        raise ValueError(err)\n    pair_spec['cross'] = cross\n    pair_spec['wrap'] = wrap\n    new = self._clone()\n    new._pair_spec.update(pair_spec)\n    return new",
    "docstring": "Produce subplots by pairing multiple and/or variables. Parameters ---------- x, y : sequence(s) of data vectors or identifiers Variables that will define the grid of subplots. wrap : int When using only or , \"wrap\" subplots across a two-dimensional grid with this many columns (when using ) or rows (when using ). cross : bool When False, zip the and lists such that the first subplot gets the first pair, the second gets the second pair, etc. Otherwise, create a two-dimensional grid from the cartesian product of the lists. Examples -------- .. include:: ../docstrings/objects.Plot.pair.rst",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\plot.py",
    "ast_data": "FunctionDef name:pair arg:self arg:x arg:y arg:wrap arg:cross arguments arg arg arg arg arg Assign Compare Compare For Call If Call Assign Raise Call Assign Assign For Assign For Call Assign Call Assign If Assign If BoolOp Compare Call Call Assign Raise Call Assign Assign Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "ptr_byref",
    "source_code": "def ptr_byref(args, offset=-1):\n    return args[offset]._obj",
    "docstring": "Return the pointer argument passed in by-reference.",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\gdal\\prototypes\\errcheck.py",
    "ast_data": "FunctionDef name:ptr_byref arg:args arg:offset arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "filter_hunks_by_path",
    "source_code": "def filter_hunks_by_path(hunks: Iterable[diff_parser.Hunk], *, path_regexes: list[str], path_regex_exclusions: list[str]) -> list[diff_parser.Hunk]:\n    if not path_regexes:\n        path_regexes = ['.*']\n    path_regexes = [re.compile(regex) for regex in path_regexes]\n\n    def should_include(path: str) -> bool:\n        return any((regex.search(path) for regex in path_regexes))\n    path_regex_exclusions = [re.compile(regex) for regex in path_regex_exclusions]\n\n    def should_exclude(path: str) -> bool:\n        return any((regex.search(path) for regex in path_regex_exclusions))\n    return [hunk for hunk in hunks if should_include(hunk.file) and (not should_exclude(hunk.file))]",
    "docstring": "Filters files according to path_regexes. If a file matches both a path_regex and a path_regex_exclusion, then it will be filtered out. Arguments: hunks: A sequence of Hunk objects representing the hunks of the diff in the change. path_regexes: A list of regexes. Paths matching these will pass through the filter. By default, every path is matched. path_regex_exclusions: A list of regexes. Paths that match both a path_regex and a path_regex_exclusion won't pass through the filter. Returns: A list of FileDiffs whose paths match a path_regex and don't match any path_regex_exclusions.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\build_tools\\lint\\check_contents.py",
    "ast_data": "FunctionDef name:filter_hunks_by_path arg:hunks arguments arg arg arg If Assign Assign Call FunctionDef name:should_include arg:path arguments arg Return return:yes Call Call Assign Call FunctionDef name:should_exclude arg:path arguments arg Return return:yes Call Call Return return:yes BoolOp Call Call"
  },
  {
    "library": "django",
    "name": "get_modified_time",
    "source_code": "def get_modified_time(self, name):\n    raise NotImplementedError('subclasses of Storage must provide a get_modified_time() method')",
    "docstring": "Return the last modified time (as a datetime) of the file specified by name. The datetime will be timezone-aware if USE_TZ=True.",
    "type": "method",
    "file_path": "django\\django\\core\\files\\storage\\base.py",
    "ast_data": "FunctionDef name:get_modified_time arg:self arg:name arguments arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "RendezvousHandler",
    "source_code": "class RendezvousHandler(ABC):\n\n    @abstractmethod\n    def get_backend(self) -> str:\n        pass\n\n    @property\n    def use_agent_store(self) -> bool:\n        return False\n\n    @abstractmethod\n    def next_rendezvous(self) -> RendezvousInfo:\n        pass\n\n    @abstractmethod\n    def is_closed(self) -> bool:\n        pass\n\n    @abstractmethod\n    def set_closed(self):\n        pass\n\n    @abstractmethod\n    def num_nodes_waiting(self) -> int:\n        pass\n\n    @abstractmethod\n    def get_run_id(self) -> str:\n        pass\n\n    @abstractmethod\n    def shutdown(self) -> bool:\n        pass",
    "docstring": "Main rendezvous interface. Note: Distributed Torch users normally **do not** need to implement their own ``. An implementation based on C10d Store is already provided, and is recommended for most users.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\api.py",
    "ast_data": "ClassDef name:RendezvousHandler FunctionDef name:get_backend arg:self arguments arg FunctionDef name:use_agent_store arg:self arguments arg Return return:yes FunctionDef name:next_rendezvous arg:self arguments arg FunctionDef name:is_closed arg:self arguments arg FunctionDef name:set_closed arg:self arguments arg FunctionDef name:num_nodes_waiting arg:self arguments arg FunctionDef name:get_run_id arg:self arguments arg FunctionDef name:shutdown arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "MultiKernelState",
    "source_code": "class MultiKernelState:\n\n    def __init__(self):\n        self.subkernel_to_kernel_name = {}\n        self.kernel_defs = IndentedBuffer()\n\n    def define_kernel(self, kernels):\n        kernel_names = tuple((k.kernel_name for k in kernels))\n        if kernel_names in self.subkernel_to_kernel_name:\n            return self.subkernel_to_kernel_name[kernel_names]\n        multi_kernel_name = f'multi_kernel_{len(self.subkernel_to_kernel_name)}'\n        self.subkernel_to_kernel_name[kernel_names] = multi_kernel_name\n        if V.graph.cpp_wrapper and (not config.triton.autotune_at_compile_time):\n            return multi_kernel_name\n        buf = self.kernel_defs\n        buf.writeline('')\n        buf.writeline(f'{multi_kernel_name} = async_compile.multi_kernel({multi_kernel_name!r}, [')\n        with buf.indent():\n            for name in kernel_names:\n                buf.writeline(f'{name},')\n        buf.writeline('])')\n        if config.triton.autotune_at_compile_time:\n            V.graph.wrapper_code.src_to_kernel['\\n'.join(kernel_names)] = multi_kernel_name\n        return multi_kernel_name",
    "docstring": "Maintain state of multi-kernel compilation so we don't define duplicated multi-kernel for the same set of sub-kernels. V.graph.wrapper_code has a reference to MultiKernelState instance.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\multi_kernel.py",
    "ast_data": "ClassDef name:MultiKernelState FunctionDef name:__init__ arg:self arguments arg Assign Assign Call FunctionDef name:define_kernel arg:self arg:kernels arguments arg arg Assign Call If Compare Return return:yes Assign Call Assign If BoolOp Return return:yes Assign Call Call With Call For Call Call If Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "prefix_validation_error",
    "source_code": "def prefix_validation_error(error, prefix, code, params):\n    if error.error_list == [error]:\n        error_params = error.params or {}\n        return ValidationError(message=format_lazy('{} {}', SimpleLazyObject(lambda: prefix % params), SimpleLazyObject(lambda: error.message % error_params)), code=code, params={**error_params, **params})\n    return ValidationError([prefix_validation_error(e, prefix, code, params) for e in error.error_list])",
    "docstring": "Prefix a validation error message while maintaining the existing validation data structure.",
    "type": "function",
    "file_path": "django\\django\\contrib\\postgres\\utils.py",
    "ast_data": "FunctionDef name:prefix_validation_error arg:error arg:prefix arg:code arg:params arguments arg arg arg arg If Compare Assign BoolOp Return return:yes Call Call Call arguments Call arguments Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "_create_octave_mask",
    "source_code": "def _create_octave_mask(mask: Tensor, octave_shape: List[int]) -> Tensor:\n    mask_shape = octave_shape[-2:]\n    mask_octave = F.interpolate(mask, mask_shape, mode='bilinear', align_corners=False)\n    return mask_octave.unsqueeze(1)",
    "docstring": "Downsample a mask based on the given octave shape.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\scale_space_detector.py",
    "ast_data": "FunctionDef name:_create_octave_mask arg:mask arg:octave_shape arguments arg arg Assign Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "install_gcc_via_conda",
    "source_code": "def install_gcc_via_conda() -> str:\n    prefix = os.path.join(cache_dir(), 'gcc')\n    cxx_path = os.path.join(prefix, 'bin', 'g++')\n    if not os.path.exists(cxx_path):\n        log.info('Downloading GCC via conda')\n        conda = os.environ.get('CONDA_EXE', 'conda')\n        if conda is None:\n            conda = shutil.which('conda')\n        if conda is not None:\n            subprocess.check_call([conda, 'create', f'--prefix={prefix}', '--channel=conda-forge', '--quiet', '-y', 'python=3.8', 'gxx'], stdout=subprocess.PIPE)\n    return cxx_path",
    "docstring": "On older systems, this is a quick way to get a modern compiler",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\cpp_builder.py",
    "ast_data": "FunctionDef name:install_gcc_via_conda arguments Assign Call Call Assign Call If Call Call Assign Call If Compare Assign Call If Compare Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "authors",
    "source_code": "@click.command()\n@click.argument('revision_args', nargs=2)\n@click.pass_context\ndef authors(ctx_obj, revision_args):\n    if revision_args:\n        sys.argv = revision_args\n        start_revision = sys.argv[0]\n        end_revision = sys.argv[1]\n    cmd = ['python', 'tools/authors.py', f'{start_revision}..{end_revision}']\n    click.secho(' '.join(cmd), bold=True, fg='bright_blue')\n    util.run(cmd)",
    "docstring": "Generate list of authors who contributed within revision interval. Example: spin authors v1.7.0 v1.8.0",
    "type": "function",
    "file_path": "scipy\\.spin\\cmds.py",
    "ast_data": "FunctionDef name:authors arg:ctx_obj arg:revision_args arguments arg arg If Assign Assign Assign Assign Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "contains_op",
    "source_code": "def contains_op(self, op_name: str) -> bool:\n    return bool(self.op_counts().get(op_name))",
    "docstring": "True if V.ops.{op_name} is used in node_schedule",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\simd_kernel_features.py",
    "ast_data": "FunctionDef name:contains_op arg:self arg:op_name arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "NoSampleWeightWrapper",
    "source_code": "class NoSampleWeightWrapper(BaseEstimator):\n\n    def __init__(self, est=None):\n        self.est = est\n\n    def fit(self, X, y):\n        return self.est.fit(X, y)\n\n    def predict(self, X):\n        return self.est.predict(X)\n\n    def predict_proba(self, X):\n        return self.est.predict_proba(X)\n\n    def __sklearn_tags__(self):\n        tags = super().__sklearn_tags__()\n        tags._skip_test = True\n        return tags",
    "docstring": "Wrap estimator which will not expose . Parameters ---------- est : estimator, default=None The estimator to wrap.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\utils\\_mocking.py",
    "ast_data": "ClassDef name:NoSampleWeightWrapper FunctionDef name:__init__ arg:self arg:est arguments arg arg Assign FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Return return:yes Call FunctionDef name:predict arg:self arg:X arguments arg arg Return return:yes Call FunctionDef name:predict_proba arg:self arg:X arguments arg arg Return return:yes Call FunctionDef name:__sklearn_tags__ arg:self arguments arg Assign Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_backward",
    "source_code": "def _backward(self, outputs):\n\n    def _backward_function(*args):\n        call_op = outputs[0].op\n        return self._rewrite_forward_and_call_backward(call_op, *args)\n    return (_backward_function, outputs)",
    "docstring": "Fetch a backward function for from the forward function.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py",
    "ast_data": "FunctionDef name:_backward arg:self arg:outputs arguments arg arg FunctionDef name:_backward_function arguments arg Assign Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "register_fake",
    "source_code": "def register_fake(hop, fn=None):\n    assert hop not in registered_hop_fake_fns\n\n    def register(func):\n        from torch._subclasses.fake_tensor import FakeTensorMode\n\n        @hop.py_impl(FakeTensorMode)\n        def _(mode, *args, **kwargs):\n            return mode.__torch_dispatch__(hop, [], args, kwargs)\n        registered_hop_fake_fns[hop] = func\n        return func\n    if fn is None:\n        return register\n    return register(fn)",
    "docstring": "Register a fake function for a HOP. This is conceptually equivalent of the register_fake utility for the custom ops. The registered function is called inside the fake_tensor _dispatch_impl.",
    "type": "function",
    "file_path": "pytorch\\torch\\_higher_order_ops\\utils.py",
    "ast_data": "FunctionDef name:register_fake arg:hop arg:fn arguments arg arg Compare FunctionDef name:register arg:func arguments arg FunctionDef name:_ arg:mode arguments arg arg arg Return return:yes Call Call Assign Return return:yes If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_is_interesting_op",
    "source_code": "def _is_interesting_op(self, op):\n    return op_priority(op.type) <= self._parameters.trace_level",
    "docstring": "Returns True if the given op is not an interesting one to be traced.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:_is_interesting_op arg:self arg:op arguments arg arg Return return:yes Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "shape_tensor",
    "source_code": "def shape_tensor(shape):\n    dtype = None\n    if isinstance(shape, (tuple, list)):\n        if not shape:\n            dtype = dtypes.int32\n        else:\n            shape = tuple(map(tensor_shape.dimension_value, shape))\n    return ops.convert_to_tensor(shape, dtype=dtype, name='shape')",
    "docstring": "Convert to an int32 or int64 tensor, defaulting to int32 if empty.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\shape_util.py",
    "ast_data": "FunctionDef name:shape_tensor arg:shape arguments arg Assign If Call If Assign Assign Call Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "spatial_expectation2d",
    "source_code": "def spatial_expectation2d(input: Tensor, normalized_coordinates: bool=True) -> Tensor:\n    _validate_batched_image_tensor_input(input)\n    batch_size, channels, height, width = input.shape\n    grid = create_meshgrid(height, width, normalized_coordinates, input.device)\n    grid = grid.to(input.dtype)\n    pos_x = grid[..., 0].reshape(-1)\n    pos_y = grid[..., 1].reshape(-1)\n    input_flat = input.view(batch_size, channels, -1)\n    expected_y = torch.sum(pos_y * input_flat, -1, keepdim=True)\n    expected_x = torch.sum(pos_x * input_flat, -1, keepdim=True)\n    output = concatenate([expected_x, expected_y], -1)\n    return output.view(batch_size, channels, 2)",
    "docstring": "Compute the expectation of coordinate values using spatial probabilities. The input heatmap is assumed to represent a valid spatial probability distribution, which can be achieved using :func:. Args: input: the input tensor representing dense spatial probabilities with shape :math:. normalized_coordinates: whether to return the coordinates normalized in the range of :math:. Otherwise, it will return the coordinates in the range of the input shape. Returns: expected value of the 2D coordinates with shape :math:. Output order of the coordinates is (x, y). Examples: >>> heatmaps = torch.tensor([[[ ... [0., 0., 0.], ... [0., 0., 0.], ... [0., 1., 0.]]]]) >>> spatial_expectation2d(heatmaps, False) tensor([[[1., 2.]]])",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\subpix\\dsnt.py",
    "ast_data": "FunctionDef name:spatial_expectation2d arg:input arg:normalized_coordinates arguments arg arg Call Assign Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_usetex",
    "source_code": "def get_usetex(self):\n    return self._usetex",
    "docstring": "Return whether this object uses TeX for rendering.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:get_usetex arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "name",
    "source_code": "@property\ndef name(self):\n    return self._name",
    "docstring": "Name of the layer (string), set in the constructor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "visit_Call",
    "source_code": "def visit_Call(self, node):\n    assert self._stack[-1] is node\n    full_name = self._get_full_name(node.func)\n    if full_name:\n        name = full_name.split('.')[-1]\n    elif isinstance(node.func, ast.Name):\n        name = node.func.id\n    elif isinstance(node.func, ast.Attribute):\n        name = node.func.attr\n    else:\n        name = None\n    self._maybe_add_call_warning(node, full_name, name)\n    self._maybe_add_arg_names(node, full_name)\n    self._maybe_modify_args(node, full_name, name)\n    transformers = self._get_applicable_entries('function_transformers', full_name, name)\n    parent = self._stack[-2]\n    if transformers:\n        if uses_star_args_or_kwargs_in_call(node):\n            self.add_log(WARNING, node.lineno, node.col_offset, '(Manual check required) upgrading %s may require modifying call arguments, but it was passed variable-length *args or **kwargs. The upgrade script cannot handle these automatically.' % (full_name or name))\n    for transformer in transformers:\n        logs = []\n        new_node = transformer(parent, node, full_name, name, logs)\n        self.add_logs(logs)\n        if new_node and new_node is not node:\n            pasta.ast_utils.replace_child(parent, node, new_node)\n            node = new_node\n            self._stack[-1] = node\n    self.generic_visit(node)",
    "docstring": "Handle visiting a call node in the AST. Args: node: Current Node",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\ast_edits.py",
    "ast_data": "FunctionDef name:visit_Call arg:self arg:node arguments arg arg Compare Assign Call If Assign Call If Call Assign If Call Assign Assign Call Call Call Assign Call Assign If If Call Call BoolOp For Assign Assign Call Call If BoolOp Compare Call Assign Assign Call"
  },
  {
    "library": "pytorch",
    "name": "add",
    "source_code": "@_onnx_symbolic('aten::add')\ndef add(g: jit_utils.GraphContext, self, other, alpha=None):\n    if symbolic_helper._is_value(self) and symbolic_helper._is_tensor_list(self):\n        return symbolic_helper._onnx_opset_unsupported_detailed('Add', 9, 11, 'Add between list of tensors not supported', self)\n    if alpha and symbolic_helper._scalar(symbolic_helper._maybe_get_scalar(alpha)) != 1:\n        other = g.op('Mul', other, alpha)\n    return g.op('Add', self, other)",
    "docstring": "This function takes the add function and returns the corresponding ONNX operator. This function is not meant to be called directly by the user. Args: g (GraphContext): The graph context. self (Tensor): The first operand. other (Tensor): The second operand. alpha (float, optional): The scaling factor for the second operand. Defaults to None. Returns: ONNX operator.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\symbolic_opset9.py",
    "ast_data": "FunctionDef name:add arg:g arg:self arg:other arg:alpha arguments arg arg arg arg If BoolOp Call Call Return return:yes Call If BoolOp Compare Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_metadata_routing",
    "source_code": "def get_metadata_routing(self):\n    return self._get_metadata_request()",
    "docstring": "Get metadata routing of this object. Please check :ref: on how the routing mechanism works. Returns ------- routing : MetadataRequest A :class: encapsulating routing information.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py",
    "ast_data": "FunctionDef name:get_metadata_routing arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "__init__",
    "source_code": "@docfiller\ndef __init__(self, mat_stream, byte_order=None, mat_dtype=False, squeeze_me=False, chars_as_strings=True, matlab_compatible=False, struct_as_record=True, verify_compressed_data_integrity=True, uint16_codec=None, simplify_cells=False):\n    super().__init__(mat_stream, byte_order, mat_dtype, squeeze_me, chars_as_strings, matlab_compatible, struct_as_record, verify_compressed_data_integrity, simplify_cells)\n    if not uint16_codec:\n        uint16_codec = sys.getdefaultencoding()\n    self.uint16_codec = uint16_codec\n    self._file_reader = None\n    self._matrix_reader = None",
    "docstring": "Initializer for matlab 5 file format reader %(matstream_arg)s %(load_args)s %(struct_arg)s uint16_codec : {None, string} Set codec to use for uint16 char arrays (e.g., 'utf-8'). Use system default codec if None",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\matlab\\_mio5.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:mat_stream arg:byte_order arg:mat_dtype arg:squeeze_me arg:chars_as_strings arg:matlab_compatible arg:struct_as_record arg:verify_compressed_data_integrity arg:uint16_codec arg:simplify_cells arguments arg arg arg arg arg arg arg arg arg arg arg Call Call If Assign Call Assign Assign Assign"
  },
  {
    "library": "cherrypy",
    "name": "stop",
    "source_code": "def stop(self):\n    for thread_ident, i in self.threads.items():\n        self.bus.publish('stop_thread', i)\n    self.threads.clear()",
    "docstring": "Release all threads and run all 'stop_thread' listeners.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\plugins.py",
    "ast_data": "FunctionDef name:stop arg:self arguments arg For Call Call Call"
  },
  {
    "library": "numpy",
    "name": "_min_int",
    "source_code": "def _min_int(low, high):\n    if high <= i1.max and low >= i1.min:\n        return int8\n    if high <= i2.max and low >= i2.min:\n        return int16\n    if high <= i4.max and low >= i4.min:\n        return int32\n    return int64",
    "docstring": "get small int that fits the range",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_twodim_base_impl.py",
    "ast_data": "FunctionDef name:_min_int arg:low arg:high arguments arg arg If BoolOp Compare Compare Return return:yes If BoolOp Compare Compare Return return:yes If BoolOp Compare Compare Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "VCentered",
    "source_code": "class VCentered(Vlist):\n\n    def __init__(self, elements: list[Node]):\n        super().__init__([Glue('ss'), *elements, Glue('ss')])",
    "docstring": "A convenience class to create a whose contents are centered within its enclosing box.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\_mathtext.py",
    "ast_data": "ClassDef name:VCentered FunctionDef name:__init__ arg:self arg:elements arguments arg arg Call Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_DataListMixin",
    "source_code": "class _DataListMixin:\n\n    def decode_rows(self, stream, conversors):\n        return list(super().decode_rows(stream, conversors))",
    "docstring": "Mixin to return a list from decode_rows instead of a generator",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\externals\\_arff.py",
    "ast_data": "ClassDef name:_DataListMixin FunctionDef name:decode_rows arg:self arg:stream arg:conversors arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "coord",
    "source_code": "@property\ndef coord(self):\n    return self._coord",
    "docstring": "Return the Coordinator used by the Supervisor. The Coordinator can be useful if you want to run multiple threads during your training. Returns: A Coordinator object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py",
    "ast_data": "FunctionDef name:coord arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "convert",
    "source_code": "@staticmethod\ndef convert(value, unit, axis):\n    return date2num(value)",
    "docstring": "If *value* is not already a number or sequence of numbers, convert it with . The *unit* and *axis* arguments are not used.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\dates.py",
    "ast_data": "FunctionDef name:convert arg:value arg:unit arg:axis arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_transform_loss",
    "source_code": "def _transform_loss(self, loss):\n    return loss",
    "docstring": "Called in to transform loss before computing gradients.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py",
    "ast_data": "FunctionDef name:_transform_loss arg:self arg:loss arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "assign_device",
    "source_code": "def assign_device(tensor, device, assign_tuple_sharding=False, use_sharding_op=False):\n    return Sharding.assign_device(device).apply_to_tensor(tensor, assign_tuple_sharding=assign_tuple_sharding, use_sharding_op=use_sharding_op)",
    "docstring": "Returns a tensor that has AssignDevice sharding attribute.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\xla\\experimental\\xla_sharding.py",
    "ast_data": "FunctionDef name:assign_device arg:tensor arg:device arg:assign_tuple_sharding arg:use_sharding_op arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "track_new_user_defined_object",
    "source_code": "def track_new_user_defined_object(self, base_cls_vt, cls_vt, init_args):\n    cls_source = cls_vt.source\n    user_cls = cls_vt.value\n    variable_cls = self.get_variable_cls(user_cls)\n    obj = self.get_example_value(base_cls_vt, cls_vt, init_args)\n    variable = variable_cls(obj, cls_source=cls_vt.source, base_cls_vt=base_cls_vt, init_args=init_args, mutation_type=AttributeMutationNew(cls_source))\n    self.id_to_variable[id(obj)] = variable\n    self.keepalive.append(obj)\n    return variable",
    "docstring": "Creates a UserDefinedObjectVariable (or its subclass) variable tracker and mark it for attribute mutation tracking. Also records the variable trackers to call __new__ method on reconstruction. Roughly, the reconstruction looks like this base_cls_vt.__new__(user_cls, *init_args)",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\side_effects.py",
    "ast_data": "FunctionDef name:track_new_user_defined_object arg:self arg:base_cls_vt arg:cls_vt arg:init_args arguments arg arg arg arg Assign Assign Assign Call Assign Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "wordcount",
    "source_code": "@register.filter(is_safe=False)\n@stringfilter\ndef wordcount(value):\n    return len(value.split())",
    "docstring": "Return the number of words.",
    "type": "function",
    "file_path": "django\\django\\template\\defaultfilters.py",
    "ast_data": "FunctionDef name:wordcount arg:value arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_build_graph",
    "source_code": "def _build_graph(self):\n    if self.kernel == 'knn':\n        self.nn_fit = None\n    affinity_matrix = self._get_kernel(self.X_)\n    normalizer = affinity_matrix.sum(axis=0)\n    if sparse.issparse(affinity_matrix):\n        affinity_matrix.data /= np.diag(np.array(normalizer))\n    else:\n        affinity_matrix /= normalizer[:, np.newaxis]\n    return affinity_matrix",
    "docstring": "Matrix representing a fully connected graph between each sample This basic implementation creates a non-stochastic affinity matrix, so class distributions will exceed 1 (normalization may be desired).",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\semi_supervised\\_label_propagation.py",
    "ast_data": "FunctionDef name:_build_graph arg:self arguments arg If Compare Assign Assign Call Assign Call If Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, executor_type=None, config_proto=None):\n    self.config_proto_serialized = config_proto\n    self.executor_type = executor_type",
    "docstring": "Constructor. Args: executor_type: (optional) name of the executor to be used to execute the eager function. If None or an empty string, the default Tensorflow executor will be used. config_proto: (optional) a proto or a serialized string of that proto. The config used by Grappler when optimizing the function graph. Each concrete function is optimized the first time is called. Changing config_proto after the first call has no effect. If config_proto is None, an empty RewriterConfig will be used.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:executor_type arg:config_proto arguments arg arg arg Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "scan",
    "source_code": "def scan(self, dtypes: tuple[torch.dtype, ...], combine_fn: Callable[[tuple[T, ...], tuple[T, ...]], tuple[T, ...]], values: tuple[T, ...]) -> tuple[T, ...]:\n    raise NotImplementedError",
    "docstring": "Perform an associative scan on 'value'.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ops_handler.py",
    "ast_data": "FunctionDef name:scan arg:self arg:dtypes arg:combine_fn arg:values arguments arg arg arg arg Raise"
  },
  {
    "library": "pandas",
    "name": "is_scalar_indexer",
    "source_code": "def is_scalar_indexer(indexer, ndim: int) -> bool:\n    if ndim == 1 and is_integer(indexer):\n        return True\n    if isinstance(indexer, tuple) and len(indexer) == ndim:\n        return all((is_integer(x) for x in indexer))\n    return False",
    "docstring": "Return True if we are all scalar indexers. Parameters ---------- indexer : object ndim : int Number of dimensions in the object being indexed. Returns ------- bool",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\indexers\\utils.py",
    "ast_data": "FunctionDef name:is_scalar_indexer arg:indexer arg:ndim arguments arg arg If BoolOp Compare Call Return return:yes If BoolOp Call Compare Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "OptionError",
    "source_code": "class OptionError(AttributeError, KeyError):\n    pass",
    "docstring": "Exception raised for pandas.options. Backwards compatible with KeyError checks. See Also -------- options : Access and modify global pandas settings. Examples -------- >>> pd.options.context Traceback (most recent call last): OptionError: No such option",
    "type": "class",
    "file_path": "pandas\\pandas\\_config\\config.py",
    "ast_data": "ClassDef name:OptionError"
  },
  {
    "library": "scipy",
    "name": "shell",
    "source_code": "@click.option('--pythonpath', '-p', metavar='PYTHONPATH', default=None, help='Paths to prepend to PYTHONPATH')\n@spin.util.extend_command(spin.cmds.meson.shell)\ndef shell(*, parent_callback, pythonpath, **kwargs):\n    _set_pythonpath(pythonpath)\n    parent_callback(**kwargs)",
    "docstring": "💻 Launch shell with PYTHONPATH set SHELL_ARGS are passed through directly to the shell, e.g.: spin shell -- -c 'echo $PYTHONPATH' Ensure that your shell init file (e.g., ~/.zshrc) does not override the PYTHONPATH.",
    "type": "function",
    "file_path": "scipy\\.spin\\cmds.py",
    "ast_data": "FunctionDef name:shell arguments arg arg arg Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, target='', graph=None, config=None):\n    super(Session, self).__init__(target, graph, config=config)\n    self._default_graph_context_manager = None\n    self._default_session_context_manager = None",
    "docstring": "Creates a new TensorFlow session. If no argument is specified when constructing the session, the default graph will be launched in the session. If you are using more than one graph (created with ) in the same process, you will have to use different sessions for each graph, but each graph can be used in multiple sessions. In this case, it is often clearer to pass the graph to be launched explicitly to the session constructor. Args: target: (Optional.) The execution engine to connect to. Defaults to using an in-process engine. See [Distributed TensorFlow]( for more examples. graph: (Optional.) The to be launched (described above). config: (Optional.) A []( protocol buffer with configuration options for the session.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\session.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:target arg:graph arg:config arguments arg arg arg arg Call Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "floordiv",
    "source_code": "@tf_export('math.floordiv', v1=['math.floordiv', 'floordiv'])\n@dispatch.register_binary_elementwise_api\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints('floordiv')\ndef floordiv(x, y, name=None):\n    with ops.name_scope(name, 'floordiv', [x, y]) as name:\n        return gen_math_ops.floor_div(x, y, name=name)",
    "docstring": "Divides elementwise, rounding toward the most negative integer. Mathematically, this is equivalent to floor(x / y). For example: floor(8.4 / 4.0) = floor(2.1) = 2.0 floor(-8.4 / 4.0) = floor(-2.1) = -3.0 This is equivalent to the '//' operator in Python 3.0 and above. Note: and must have the same type, and the result will have the same type as well. Args: x: numerator of real numeric type. y: denominator of real numeric type. name: A name for the operation (optional). Returns: rounded toward -infinity. Raises: TypeError: If the inputs are complex.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:floordiv arg:x arg:y arg:name arguments arg arg arg With Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "deregister_context",
    "source_code": "def deregister_context(self, context_words):\n    for context_word in context_words:\n        if context_word not in self._comp_dict:\n            raise KeyError('Cannot deregister unregistered context word \"%s\"' % context_word)\n    for context_word in context_words:\n        del self._comp_dict[context_word]",
    "docstring": "Deregister a list of context words. Args: context_words: A list of context words to deregister, as a list of str. Raises: KeyError: if there are word(s) in context_words that do not correspond to any registered contexts.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py",
    "ast_data": "FunctionDef name:deregister_context arg:self arg:context_words arguments arg arg For If Compare Raise Call For"
  },
  {
    "library": "tensorflow",
    "name": "_calculate_t0",
    "source_code": "def _calculate_t0(self):\n    t0s = [t0 for t0 in self._t0s.values() if t0 is not None]\n    self._t0 = min(t0s) if t0s else None",
    "docstring": "Calculate the first timestamp across all devices.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:_calculate_t0 arg:self arguments arg Assign Call Compare Assign Call"
  },
  {
    "library": "pytorch",
    "name": "module_display_name",
    "source_code": "@property\ndef module_display_name(self) -> str:\n    name = self.module_name\n    name = name.removeprefix('L__self___')\n    return name",
    "docstring": "The display name of the module. E.g. .",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\modularization.py",
    "ast_data": "FunctionDef name:module_display_name arg:self arguments arg Assign Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "roots_chebyc",
    "source_code": "def roots_chebyc(n, mu=False):\n    x, w, m = roots_chebyt(n, True)\n    x *= 2\n    w *= 2\n    m *= 2\n    if mu:\n        return (x, w, m)\n    else:\n        return (x, w)",
    "docstring": "Gauss-Chebyshev (first kind) quadrature. Compute the sample points and weights for Gauss-Chebyshev quadrature. The sample points are the roots of the nth degree Chebyshev polynomial of the first kind, :math:. These sample points and weights correctly integrate polynomials of degree :math: or less over the interval :math: with weight function :math:. See 22.2.6 in [AS]_ for more details. Parameters ---------- n : int quadrature order mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights See Also -------- scipy.integrate.fixed_quad References ---------- .. [AS] Milton Abramowitz and Irene A. Stegun, eds. Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables. New York: Dover, 1972.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_orthogonal.py",
    "ast_data": "FunctionDef name:roots_chebyc arg:n arg:mu arguments arg arg Assign Call If Return return:yes Return return:yes"
  },
  {
    "library": "scipy",
    "name": "sf",
    "source_code": "def sf(self, k):\n    kc = np.asarray(self.n1 * self.n2 - k)\n    i = k < kc\n    if np.any(i):\n        kc[i] = k[i]\n        cdfs = np.asarray(self.cdf(kc))\n        cdfs[i] = 1.0 - cdfs[i] + self.pmf(kc[i])\n    else:\n        cdfs = np.asarray(self.cdf(kc))\n    return cdfs[()]",
    "docstring": "Survival function",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_mannwhitneyu.py",
    "ast_data": "FunctionDef name:sf arg:self arg:k arguments arg arg Assign Call Assign Compare If Call Assign Assign Call Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "is_backend_available",
    "source_code": "def is_backend_available(backend: str) -> bool:\n    available_func = getattr(torch.distributed, f'is_{backend.lower()}_available', None)\n    if available_func:\n        return available_func()\n    return backend.lower() in Backend.backend_list",
    "docstring": "Check backend availability. Checks if the given backend is available and supports the built-in backends or third-party backends through function ``. Args: backend (str): Backend name. Returns: bool: Returns true if the backend is available otherwise false.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:is_backend_available arg:backend arguments arg Assign Call Call If Return return:yes Call Return return:yes Compare Call"
  },
  {
    "library": "seaborn",
    "name": "hls_palette",
    "source_code": "def hls_palette(n_colors=6, h=0.01, l=0.6, s=0.65, as_cmap=False):\n    if as_cmap:\n        n_colors = 256\n    hues = np.linspace(0, 1, int(n_colors) + 1)[:-1]\n    hues += h\n    hues %= 1\n    hues -= hues.astype(int)\n    palette = [colorsys.hls_to_rgb(h_i, l, s) for h_i in hues]\n    if as_cmap:\n        return mpl.colors.ListedColormap(palette, 'hls')\n    else:\n        return _ColorPalette(palette)",
    "docstring": "Return hues with constant lightness and saturation in the HLS system. The hues are evenly sampled along a circular path. The resulting palette will be appropriate for categorical or cyclical data. The , , and values should be between 0 and 1. .. note:: While the separation of the resulting colors will be mathematically constant, the HLS system does not construct a perceptually-uniform space, so their apparent intensity will vary. Parameters ---------- n_colors : int Number of colors in the palette. h : float The value of the first hue. l : float The lightness value. s : float The saturation intensity. as_cmap : bool If True, return a matplotlib colormap object. Returns ------- palette list of RGB tuples or :class: See Also -------- husl_palette : Make a palette using evenly spaced hues in the HUSL system. Examples -------- .. include:: ../docstrings/hls_palette.rst",
    "type": "function",
    "file_path": "seaborn\\seaborn\\palettes.py",
    "ast_data": "FunctionDef name:hls_palette arg:n_colors arg:h arg:l arg:s arg:as_cmap arguments arg arg arg arg arg If Assign Assign Call Call Call Assign Call If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "wait_for_unshard",
    "source_code": "def wait_for_unshard(self):\n    if not self._all_gather_result:\n        return\n    async_op = self._all_gather_result.all_gather_work is not None\n    if self._training_state == TrainingState.FORWARD:\n        if (prev_all_gather_state := self.comm_ctx.all_gather_state):\n            self._wait_all_gather_streams_on_event(prev_all_gather_state.event)\n            self.comm_ctx.all_gather_state = None\n    with record_function(self._with_fqn('FSDP::all_gather_copy_out')):\n        foreach_all_gather_copy_out(self._all_gather_result, self.fsdp_params, self._all_gather_process_group)\n    for fsdp_param in self.fsdp_params:\n        fsdp_param.init_unsharded_param()\n    self._to_unsharded()\n    all_gather_copy_out_event = self.device_handle.Event()\n    all_gather_copy_out_event.record()\n    if not async_op and self._training_state == TrainingState.FORWARD:\n        self.comm_ctx.all_gather_state = AllGatherState(self._all_gather_result, all_gather_copy_out_event)\n    else:\n        self._wait_all_gather_streams_on_event(all_gather_copy_out_event)\n    self._all_gather_result = None",
    "docstring": "1. In forward with implict prefetching, to overlap the current copy-out with the next all-gather, we save a reference to the current all-gather result to free after the next copy-out. 2. Otherwise (explicit prefetching or in backward), we free the all-gather result immediately after the current copy-out since we can already overlap the current copy-out with the previous reduce-scatter.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fsdp_param_group.py",
    "ast_data": "FunctionDef name:wait_for_unshard arg:self arguments arg If Return return:no Assign Compare If Compare If Call Assign With Call Call Call For Call Call Assign Call Call If BoolOp Compare Assign Call Call Assign"
  },
  {
    "library": "cherrypy",
    "name": "check_static_paths",
    "source_code": "def check_static_paths(self):\n    request = cherrypy.request\n    for sn, app in cherrypy.tree.apps.items():\n        if not isinstance(app, cherrypy.Application):\n            continue\n        request.app = app\n        for section in app.config:\n            request.get_resource(section + '/dummy.html')\n            conf = request.config.get\n            if conf('tools.staticdir.on', False):\n                msg = ''\n                root = conf('tools.staticdir.root')\n                dir = conf('tools.staticdir.dir')\n                if dir is None:\n                    msg = 'tools.staticdir.dir is not set.'\n                else:\n                    fulldir = ''\n                    if os.path.isabs(dir):\n                        fulldir = dir\n                        if root:\n                            msg = 'dir is an absolute path, even though a root is provided.'\n                            testdir = os.path.join(root, dir[1:])\n                            if os.path.exists(testdir):\n                                msg += '\\nIf you meant to serve the filesystem folder at %r, remove the leading slash from dir.' % (testdir,)\n                    elif not root:\n                        msg = 'dir is a relative path and no root provided.'\n                    else:\n                        fulldir = os.path.join(root, dir)\n                        if not os.path.isabs(fulldir):\n                            msg = '%r is not an absolute path.' % (fulldir,)\n                    if fulldir and (not os.path.exists(fulldir)):\n                        if msg:\n                            msg += '\\n'\n                        msg += '%r (root + dir) is not an existing filesystem path.' % fulldir\n                if msg:\n                    warnings.warn('%s\\nsection: [%s]\\nroot: %r\\ndir: %r' % (msg, section, root, dir))",
    "docstring": "Check Application config for incorrect static paths.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpchecker.py",
    "ast_data": "FunctionDef name:check_static_paths arg:self arguments arg Assign For Call If Call Assign For Call Assign If Call Assign Assign Call Assign Call If Compare Assign Assign If Call Assign If Assign Assign Call If Call If Assign Assign Call If Call Assign If BoolOp Call If If Call"
  },
  {
    "library": "tensorflow",
    "name": "tensor_rank",
    "source_code": "@property\ndef tensor_rank(self, name='tensor_rank'):\n    with self._name_scope(name):\n        return self.shape.ndims",
    "docstring": "Rank (in the sense of tensors) of matrix corresponding to this operator. If this operator acts like the batch matrix with , then this returns . Args: name: A name for this . Returns: Python integer, or None if the tensor rank is undefined.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "FunctionDef name:tensor_rank arg:self arg:name arguments arg arg With Call Return return:yes"
  },
  {
    "library": "django",
    "name": "specificity",
    "source_code": "@property\ndef specificity(self):\n    if self.main_type == '*':\n        return 0\n    elif self.sub_type == '*':\n        return 1\n    elif self.quality == 1:\n        return 2\n    return 3",
    "docstring": "Return a value from 0-3 for how specific the media type is.",
    "type": "method",
    "file_path": "django\\django\\http\\request.py",
    "ast_data": "FunctionDef name:specificity arg:self arguments arg If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "predict_proba",
    "source_code": "def predict_proba(self, X):\n    return np.exp(self.predict_log_proba(X))",
    "docstring": "Return probability estimates for the test vector X. Parameters ---------- X : array-like of shape (n_samples, n_features) The input samples. Returns ------- C : array-like of shape (n_samples, n_classes) Returns the probability of the samples for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute :term:.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\naive_bayes.py",
    "ast_data": "FunctionDef name:predict_proba arg:self arg:X arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "AtlasNotFoundError",
    "source_code": "class AtlasNotFoundError(NotFoundError):\n    pass",
    "docstring": "Atlas ( libraries not found. Directories to search for the libraries can be specified in the numpy/distutils/site.cfg file (section [atlas]) or by setting the ATLAS environment variable.",
    "type": "class",
    "file_path": "numpy\\numpy\\distutils\\system_info.py",
    "ast_data": "ClassDef name:AtlasNotFoundError"
  },
  {
    "library": "pandas",
    "name": "_set_noconvert_dtype_columns",
    "source_code": "@final\ndef _set_noconvert_dtype_columns(self, col_indices: list[int], names: Sequence[Hashable]) -> set[int]:\n    usecols: list[int] | list[str] | None\n    noconvert_columns = set()\n    if self.usecols_dtype == 'integer':\n        usecols = sorted(self.usecols)\n    elif callable(self.usecols) or self.usecols_dtype not in ('empty', None):\n        usecols = col_indices\n    else:\n        usecols = None\n\n    def _set(x) -> int:\n        if usecols is not None and is_integer(x):\n            x = usecols[x]\n        if not is_integer(x):\n            x = col_indices[names.index(x)]\n        return x\n    if isinstance(self.parse_dates, list):\n        validate_parse_dates_presence(self.parse_dates, names)\n        for val in self.parse_dates:\n            noconvert_columns.add(_set(val))\n    elif self.parse_dates:\n        if isinstance(self.index_col, list):\n            for k in self.index_col:\n                noconvert_columns.add(_set(k))\n        elif self.index_col is not None:\n            noconvert_columns.add(_set(self.index_col))\n    return noconvert_columns",
    "docstring": "Set the columns that should not undergo dtype conversions. Currently, any column that is involved with date parsing will not undergo such conversions. If usecols is specified, the positions of the columns not to cast is relative to the usecols not to all columns. Parameters ---------- col_indices: The indices specifying order and positions of the columns names: The column names which order is corresponding with the order of col_indices Returns ------- A set of integers containing the positions of the columns not to convert.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\parsers\\base_parser.py",
    "ast_data": "FunctionDef name:_set_noconvert_dtype_columns arg:self arg:col_indices arg:names arguments arg arg arg Assign Call If Compare Assign Call If BoolOp Call Compare Assign Assign FunctionDef name:_set arg:x arguments arg If BoolOp Compare Call Assign If Call Assign Call Return return:yes If Call Call For Call Call If If Call For Call Call If Compare Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "call_preflattened",
    "source_code": "def call_preflattened(self, args: Sequence[core.Tensor]) -> Any:\n    flat_outputs = self.call_flat(*args)\n    return self.function_type.pack_output(flat_outputs)",
    "docstring": "Calls with flattened tensor inputs and returns the structured output.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\atomic_function.py",
    "ast_data": "FunctionDef name:call_preflattened arg:self arg:args arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_metadata_routing",
    "source_code": "def get_metadata_routing(self):\n    router = MetadataRouter(owner=self.__class__.__name__).add(estimator=self.estimator, method_mapping=MethodMapping().add(callee='fit', caller='fit'))\n    return router",
    "docstring": "Get metadata routing of this object. Please check :ref: on how the routing mechanism works. Returns ------- routing : MetadataRouter A :class: encapsulating routing information.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_classification_threshold.py",
    "ast_data": "FunctionDef name:get_metadata_routing arg:self arguments arg Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "dot",
    "source_code": "def dot(self, p):\n    raise NotImplementedError('The method ``dot(p)`` is not implemented.')",
    "docstring": "Compute the product of the internal matrix with the given vector. Parameters ---------- p : array_like 1-D array representing a vector. Returns ------- Hp : array 1-D represents the result of multiplying the approximation matrix by vector p.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_hessian_update_strategy.py",
    "ast_data": "FunctionDef name:dot arg:self arg:p arguments arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "register_dataclass",
    "source_code": "def register_dataclass(cls: type[Any], *, serialized_type_name: Optional[str]=None) -> None:\n    pytree.register_dataclass(cls, serialized_type_name=serialized_type_name)",
    "docstring": "Registers a dataclass as a valid input/output type for :func:. Args: cls: the dataclass type to register serialized_type_name: The serialized name for the dataclass. This is required if you want to serialize the pytree TreeSpec containing this dataclass. Example:: import torch from dataclasses import dataclass @dataclass class InputDataClass: feature: torch.Tensor bias: int @dataclass class OutputDataClass: res: torch.Tensor torch.export.register_dataclass(InputDataClass) torch.export.register_dataclass(OutputDataClass) class Mod(torch.nn.Module): def forward(self, x: InputDataClass) -> OutputDataClass: res = x.feature + x.bias return OutputDataClass(res=res) ep = torch.export.export(Mod(), (InputDataClass(torch.ones(2, 2), 1), )) print(ep)",
    "type": "function",
    "file_path": "pytorch\\torch\\export\\__init__.py",
    "ast_data": "FunctionDef name:register_dataclass arg:cls arguments arg arg Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, unit='', places=None, sep=' ', *, usetex=None, useMathText=None, useOffset=False):\n    self.unit = unit\n    self.places = places\n    self.sep = sep\n    super().__init__(useOffset=useOffset, useMathText=useMathText, useLocale=False, usetex=usetex)",
    "docstring": "Parameters ---------- unit : str, default: \"\" Unit symbol to use, suitable for use with single-letter representations of powers of 1000. For example, 'Hz' or 'm'. places : int, default: None Precision with which to display the number, specified in digits after the decimal point (there will be between one and three digits before the decimal point). If it is None, the formatting falls back to the floating point format '%g', which displays up to 6 *significant* digits, i.e. the equivalent value for *places* varies between 0 and 5 (inclusive). sep : str, default: \" \" Separator used between the value and the prefix/unit. For example, one get '3.14 mV' if `text.usetexaxes.formatter.use_mathtext10^{3*N}ScalarFormatter.set_useOffset`. .. versionadded:: 3.10",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:unit arg:places arg:sep arguments arg arg arg arg arg arg arg Assign Assign Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "codegen_subgraph",
    "source_code": "def codegen_subgraph(self, parent_graph: GraphLowering) -> None:\n    with dynamo_timed('GraphLowering.codegen_subgraph', log_pt2_compile_event=True):\n        self.wrapper_code = parent_graph.wrapper_code\n        self.device_ops = parent_graph.device_ops\n        self.cpp_wrapper = parent_graph.cpp_wrapper\n        self._update_scheduler()\n        self.scheduler.codegen()",
    "docstring": "This is a more compact version of the above where we codegen this graph as a subgraph of some parent graph. The parent graph is passed as an argument: the intention is to inline codegening of the subgraph in the parent graph's wrapper code (including the generated kerenls). The wrapper code is not finalized (via call), as this will be done in the parent graph's .",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\graph.py",
    "ast_data": "FunctionDef name:codegen_subgraph arg:self arg:parent_graph arguments arg arg With Call Assign Assign Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "run_and_parse_first_match",
    "source_code": "def run_and_parse_first_match(run_lambda, command, regex):\n    rc, out, _ = run_lambda(command)\n    if rc != 0:\n        return None\n    match = re.search(regex, out)\n    if match is None:\n        return None\n    return match.group(1)",
    "docstring": "Run command using run_lambda, returns the first regex match if it exists.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\collect_env.py",
    "ast_data": "FunctionDef name:run_and_parse_first_match arg:run_lambda arg:command arg:regex arguments arg arg arg Assign Call If Compare Return return:no Assign Call If Compare Return return:no Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "is_reserved_name",
    "source_code": "@staticmethod\ndef is_reserved_name(name):\n    return name in RESERVED_FIELD_NAMES or name.lower().startswith('_tf_extension_type')",
    "docstring": "Returns true if is a reserved name.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type_field.py",
    "ast_data": "FunctionDef name:is_reserved_name arg:name arguments arg Return return:yes BoolOp Compare Call Call"
  },
  {
    "library": "django",
    "name": "clean_savepoints",
    "source_code": "def clean_savepoints(using=None):\n    get_connection(using).clean_savepoints()",
    "docstring": "Reset the counter used to generate unique savepoint ids in this thread.",
    "type": "function",
    "file_path": "django\\django\\db\\transaction.py",
    "ast_data": "FunctionDef name:clean_savepoints arg:using arguments arg Call Call"
  },
  {
    "library": "kornia",
    "name": "closing",
    "source_code": "def closing(tensor: torch.Tensor, kernel: torch.Tensor, structuring_element: Optional[torch.Tensor]=None, origin: Optional[List[int]]=None, border_type: str='geodesic', border_value: float=0.0, max_val: float=10000.0, engine: str='unfold') -> torch.Tensor:\n    if not isinstance(tensor, torch.Tensor):\n        raise TypeError(f'Input type is not a torch.Tensor. Got {type(tensor)}')\n    if len(tensor.shape) != 4:\n        raise ValueError(f'Input size must have 4 dimensions. Got {tensor.dim()}')\n    if not isinstance(kernel, torch.Tensor):\n        raise TypeError(f'Kernel type is not a torch.Tensor. Got {type(kernel)}')\n    if len(kernel.shape) != 2:\n        raise ValueError(f'Kernel size must have 2 dimensions. Got {kernel.dim()}')\n    return erosion(dilation(tensor, kernel=kernel, structuring_element=structuring_element, origin=origin, border_type=border_type, border_value=border_value, max_val=max_val, engine=engine), kernel=kernel, structuring_element=structuring_element, origin=origin, border_type=border_type, border_value=border_value, max_val=max_val, engine=engine)",
    "docstring": "Return the closed image, (that means, erosion after a dilation) applying the same kernel in each channel. .. image:: _static/img/closing.png The kernel must have 2 dimensions. Args: tensor: Image with shape :math:. kernel: Positions of non-infinite elements of a flat structuring element. Non-zero values give the set of neighbors of the center over which the operation is applied. Its shape is :math:. For full structural elements use torch.ones_like(structural_element). structuring_element: Structuring element used for the grayscale dilation. It may be a non-flat structuring element. origin: Origin of the structuring element. Default is None and uses the center of the structuring element as origin (rounding towards zero). border_type: It determines how the image borders are handled, where `(B, C, H, W)here `__. Example: >>> tensor = torch.rand(1, 3, 5, 5) >>> kernel = torch.ones(3, 3) >>> closed_img = closing(tensor, kernel)",
    "type": "function",
    "file_path": "kornia\\kornia\\morphology\\morphology.py",
    "ast_data": "FunctionDef name:closing arg:tensor arg:kernel arg:structuring_element arg:origin arg:border_type arg:border_value arg:max_val arg:engine arguments arg arg arg arg arg arg arg arg If Call Raise Call Call If Compare Call Raise Call Call If Call Raise Call Call If Compare Call Raise Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_maybe_record_pointwise_barrier",
    "source_code": "def _maybe_record_pointwise_barrier(func: object, proxy_mode: ProxyTorchDispatchMode) -> None:\n    if proxy_mode.decomp_layers or not proxy_mode.emulate_precision_casts:\n        return\n    if not isinstance(func, torch._ops.OpOverload) or torch.Tag.pointwise not in func.tags:\n        return\n    last_node = next(iter(reversed(proxy_mode.tracer.graph.nodes)))\n    t = last_node.meta.get('val')\n    if not isinstance(t, torch.Tensor) or t.dtype not in (torch.bfloat16, torch.float16):\n        return\n    last_node.meta['low_precision_pointwise_barrier'] = True",
    "docstring": "Records pointwise operators in user program (non decomposed) that were output in fp16/bf16",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\proxy_tensor.py",
    "ast_data": "FunctionDef name:_maybe_record_pointwise_barrier arg:func arg:proxy_mode arguments arg arg If BoolOp Return return:no If BoolOp Call Compare Return return:no Assign Call Call Call Assign Call If BoolOp Call Compare Return return:no Assign"
  },
  {
    "library": "scikit-learn",
    "name": "fallback_lbfgs_solve",
    "source_code": "def fallback_lbfgs_solve(self, X, y, sample_weight):\n    max_iter = self.max_iter - self.iteration\n    opt_res = scipy.optimize.minimize(self.linear_loss.loss_gradient, self.coef, method='L-BFGS-B', jac=True, options={'maxiter': max_iter, 'maxls': 50, 'iprint': self.verbose - 1, 'gtol': self.tol, 'ftol': 64 * np.finfo(np.float64).eps}, args=(X, y, sample_weight, self.l2_reg_strength, self.n_threads))\n    self.iteration += _check_optimize_result('lbfgs', opt_res, max_iter=max_iter)\n    self.coef = opt_res.x\n    self.converged = opt_res.status == 0",
    "docstring": "Fallback solver in case of emergency. If a solver detects convergence problems, it may fall back to this methods in the hope to exit with success instead of raising an error. Sets: - self.coef - self.converged",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_glm\\_newton_solver.py",
    "ast_data": "FunctionDef name:fallback_lbfgs_solve arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg Assign Assign Call Call Call Assign Assign Compare"
  },
  {
    "library": "scipy",
    "name": "aps01_f",
    "source_code": "def aps01_f(x):\n    return np.sin(x) - x / 2",
    "docstring": "Straightforward sum of trigonometric function and polynomial",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_tstutils.py",
    "ast_data": "FunctionDef name:aps01_f arg:x arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_GetFileAndLine",
    "source_code": "def _GetFileAndLine():\n    code, f = _get_caller()\n    if not code:\n        return ('<unknown>', 0)\n    return (code.co_filename, f.f_lineno)",
    "docstring": "Returns (filename, linenumber) for the stack frame.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\platform\\tf_logging.py",
    "ast_data": "FunctionDef name:_GetFileAndLine arguments Assign Call If Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "register_fsdp_forward_method",
    "source_code": "def register_fsdp_forward_method(module: nn.Module, method_name: str) -> None:\n    if not isinstance(module, FSDPModule):\n        return\n    if not hasattr(module, method_name):\n        raise ValueError(f'{type(module)} does not have a method {method_name}')\n    orig_method = getattr(module, method_name)\n\n    @functools.wraps(orig_method)\n    def wrapped_method(self, *args, **kwargs):\n        fsdp_state = self._get_fsdp_state()\n        args, kwargs = fsdp_state._pre_forward(self, args, kwargs)\n        out = orig_method(*args, **kwargs)\n        return fsdp_state._post_forward(self, args, out)\n    setattr(module, method_name, wrapped_method.__get__(module, type(module)))",
    "docstring": "Registers a method on `nn.Module.forwardFSDPModule`, then this is a no-op. Args: module (nn.Module): Module to register the forward method on. method_name (str): Name of the forward method.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fully_shard.py",
    "ast_data": "FunctionDef name:register_fsdp_forward_method arg:module arg:method_name arguments arg arg If Call Return return:no If Call Raise Call Call Assign Call FunctionDef name:wrapped_method arg:self arguments arg arg arg Assign Call Assign Call Assign Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "all_reduce_sum_gradients",
    "source_code": "def all_reduce_sum_gradients(grads_and_vars):\n    grads_and_vars = list(grads_and_vars)\n    filtered_grads_and_vars = filter_empty_gradients(grads_and_vars)\n    if filtered_grads_and_vars:\n        if strategy_supports_no_merge_call():\n            grads = [pair[0] for pair in filtered_grads_and_vars]\n            reduced = distribute_lib.get_strategy().extended._replica_ctx_all_reduce(ds_reduce_util.ReduceOp.SUM, grads)\n        else:\n            reduced = distribute_lib.get_replica_context().merge_call(_all_reduce_sum_fn, args=(filtered_grads_and_vars,))\n    else:\n        reduced = []\n    reduced_with_nones = []\n    reduced_pos = 0\n    for g, v in grads_and_vars:\n        if g is None:\n            reduced_with_nones.append((None, v))\n        else:\n            reduced_with_nones.append((reduced[reduced_pos], v))\n            reduced_pos += 1\n    assert reduced_pos == len(reduced), 'Failed to add all gradients'\n    return reduced_with_nones",
    "docstring": "Returns all-reduced gradients aggregated via summation. Args: grads_and_vars: List of (gradient, variable) pairs. Returns: List of (gradient, variable) pairs where gradients have been all-reduced.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\utils.py",
    "ast_data": "FunctionDef name:all_reduce_sum_gradients arg:grads_and_vars arguments arg Assign Call Assign Call If If Call Assign Assign Call Call Assign Call Call Assign Assign Assign For If Compare Call Call Compare Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "create_partition",
    "source_code": "def create_partition(self) -> Partition:\n    partition_id = len(self.partitions)\n    partition = Partition(partition_id)\n    self.partitions.append(partition)\n    return partition",
    "docstring": "Create a partition and append it to self.partitions.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\accelerator_partitioner.py",
    "ast_data": "FunctionDef name:create_partition arg:self arguments arg Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_annealing_linear",
    "source_code": "@staticmethod\ndef _annealing_linear(start, end, pct):\n    return (end - start) * pct + start",
    "docstring": "Linearly anneal from to as pct goes from 0.0 to 1.0.",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\lr_scheduler.py",
    "ast_data": "FunctionDef name:_annealing_linear arg:start arg:end arg:pct arguments arg arg arg Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "_get_domain_role",
    "source_code": "def _get_domain_role(self, name: str) -> tuple[str | None, str | None]:\n    names = name.split(':')\n    if len(names) == 1:\n        return (None, names[0])\n    elif len(names) == 2:\n        return (names[0], names[1])\n    else:\n        return (None, None)",
    "docstring": "Convert the *name* string into a domain and a role name. - If *name* contains no ``.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\ext\\intersphinx\\_resolve.py",
    "ast_data": "FunctionDef name:_get_domain_role arg:self arg:name arguments arg arg Assign Call If Compare Call Return return:yes If Compare Call Return return:yes Return return:no"
  },
  {
    "library": "pandas",
    "name": "close",
    "source_code": "def close(self) -> None:\n    if self.handles is not None:\n        self.handles.close()",
    "docstring": "If we opened a stream earlier, in _get_data_from_filepath, we should close it. If an open stream or file was passed, we leave it open.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\json\\_json.py",
    "ast_data": "FunctionDef name:close arg:self arguments arg If Compare Call"
  },
  {
    "library": "scikit-learn",
    "name": "barycenter_weights",
    "source_code": "def barycenter_weights(X, Y, indices, reg=0.001):\n    X = check_array(X, dtype=FLOAT_DTYPES)\n    Y = check_array(Y, dtype=FLOAT_DTYPES)\n    indices = check_array(indices, dtype=int)\n    n_samples, n_neighbors = indices.shape\n    assert X.shape[0] == n_samples\n    B = np.empty((n_samples, n_neighbors), dtype=X.dtype)\n    v = np.ones(n_neighbors, dtype=X.dtype)\n    for i, ind in enumerate(indices):\n        A = Y[ind]\n        C = A - X[i]\n        G = np.dot(C, C.T)\n        trace = np.trace(G)\n        if trace > 0:\n            R = reg * trace\n        else:\n            R = reg\n        G.flat[::n_neighbors + 1] += R\n        w = solve(G, v, assume_a='pos')\n        B[i, :] = w / np.sum(w)\n    return B",
    "docstring": "Compute barycenter weights of X from Y along the first axis We estimate the weights to assign to each point in Y[indices] to recover the point X[i]. The barycenter weights sum to 1. Parameters ---------- X : array-like, shape (n_samples, n_dim) Y : array-like, shape (n_samples, n_dim) indices : array-like, shape (n_samples, n_dim) Indices of the points in Y used to compute the barycenter reg : float, default=1e-3 Amount of regularization to add for the problem to be well-posed in the case of n_neighbors > n_dim Returns ------- B : array-like, shape (n_samples, n_neighbors) Notes ----- See developers note for more information.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\manifold\\_locally_linear.py",
    "ast_data": "FunctionDef name:barycenter_weights arg:X arg:Y arg:indices arg:reg arguments arg arg arg arg Assign Call Assign Call Assign Call Assign Compare Assign Call Assign Call For Call Assign Assign Assign Call Assign Call If Compare Assign Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_check_xy",
    "source_code": "def _check_xy(self, renderer=None):\n    if renderer is None:\n        renderer = self.get_figure(root=True)._get_renderer()\n    b = self.get_annotation_clip()\n    if b or (b is None and self.xycoords == 'data'):\n        xy_pixel = self._get_position_xy(renderer)\n        return self.axes.contains_point(xy_pixel)\n    return True",
    "docstring": "Check whether the annotation at *xy_pixel* should be drawn.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:_check_xy arg:self arg:renderer arguments arg arg If Compare Assign Call Call Assign Call If BoolOp BoolOp Compare Compare Assign Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_clip_grads_with_norm_",
    "source_code": "@_no_grad\ndef _clip_grads_with_norm_(parameters: _tensor_or_tensors, max_norm: float, total_norm: torch.Tensor, foreach: Optional[bool]=None) -> None:\n    if isinstance(parameters, torch.Tensor):\n        parameters = [parameters]\n    grads = [p.grad for p in parameters if p.grad is not None]\n    max_norm = float(max_norm)\n    if len(grads) == 0:\n        return\n    grouped_grads: dict[tuple[torch.device, torch.dtype], tuple[list[list[Tensor]], list[int]]] = _group_tensors_by_device_and_dtype([grads])\n    clip_coef = max_norm / (total_norm + 1e-06)\n    clip_coef_clamped = torch.clamp(clip_coef, max=1.0)\n    for (device, _), ([device_grads], _) in grouped_grads.items():\n        if foreach is None and _has_foreach_support(device_grads, device) or (foreach and _device_has_foreach_support(device)):\n            torch._foreach_mul_(device_grads, clip_coef_clamped.to(device))\n        elif foreach:\n            raise RuntimeError(f\"foreach=True was passed, but can't use the foreach API on {device.type} tensors\")\n        else:\n            clip_coef_clamped_device = clip_coef_clamped.to(device)\n            for g in device_grads:\n                g.mul_(clip_coef_clamped_device)",
    "docstring": "Scale the gradients of an iterable of parameters given a pre-calculated total norm and desired max norm. The gradients will be scaled by the following calculation .. math:: grad = grad * \\frac{max\\_norm}{total\\_norm + 1e-6} Gradients are modified in-place. This function is equivalent to :func: with a pre-calculated total norm. Args: parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a single Tensor that will have gradients normalized max_norm (float): max norm of the gradients total_norm (Tensor): total norm of the gradients to use for clipping foreach (bool): use the faster foreach-based implementation. If `` Returns: None",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\utils\\clip_grad.py",
    "ast_data": "FunctionDef name:_clip_grads_with_norm_ arg:parameters arg:max_norm arg:total_norm arg:foreach arguments arg arg arg arg If Call Assign Assign Compare Assign Call If Compare Call Return return:no Call Assign Assign Call For Call If BoolOp BoolOp Compare Call BoolOp Call Call Call If Raise Call Assign Call For Call"
  },
  {
    "library": "matplotlib",
    "name": "_update_label_position",
    "source_code": "def _update_label_position(self, renderer):\n    raise NotImplementedError('Derived must override')",
    "docstring": "Update the label position based on the bounding box enclosing all the ticklabels and axis spine.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:_update_label_position arg:self arg:renderer arguments arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_checkpoint_exists",
    "source_code": "def _checkpoint_exists(self, filepath):\n    if filepath.endswith('.h5'):\n        return file_io.file_exists_v2(filepath)\n    tf_saved_model_exists = file_io.file_exists_v2(filepath)\n    tf_weights_only_checkpoint_exists = file_io.file_exists_v2(filepath + '.index')\n    return tf_saved_model_exists or tf_weights_only_checkpoint_exists",
    "docstring": "Returns whether the checkpoint refers to exists.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:_checkpoint_exists arg:self arg:filepath arguments arg arg If Call Return return:yes Call Assign Call Assign Call Return return:yes BoolOp"
  },
  {
    "library": "scipy",
    "name": "downcast_intp_index",
    "source_code": "def downcast_intp_index(arr):\n    if arr.dtype.itemsize > np.dtype(np.intp).itemsize:\n        if arr.size == 0:\n            return arr.astype(np.intp)\n        maxval = arr.max()\n        minval = arr.min()\n        if maxval > np.iinfo(np.intp).max or minval < np.iinfo(np.intp).min:\n            raise ValueError('Cannot deal with arrays with indices larger than the machine maximum address size (e.g. 64-bit indices on 32-bit machine).')\n        return arr.astype(np.intp)\n    return arr",
    "docstring": "Down-cast index array to np.intp dtype if it is of a larger dtype. Raise an error if the array contains a value that is too large for intp.",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\_sputils.py",
    "ast_data": "FunctionDef name:downcast_intp_index arg:arr arguments arg If Compare Call If Compare Return return:yes Call Assign Call Assign Call If BoolOp Compare Call Compare Call Raise Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "constant",
    "source_code": "def constant(self, value: Union[bool, float, int], dtype: torch.dtype) -> T:\n    raise NotImplementedError",
    "docstring": "Produces a scalar constant of type dtype.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ops_handler.py",
    "ast_data": "FunctionDef name:constant arg:self arg:value arg:dtype arguments arg arg arg Raise"
  },
  {
    "library": "pytorch",
    "name": "local_tensor",
    "source_code": "def local_tensor(self) -> torch.Tensor:\n    if len(self.local_shards()) != 1:\n        raise NotImplementedError('Only single local shard is supported.')\n    return self.local_shards()[0].tensor",
    "docstring": "Return local tensor for a sharded_tensor. For now we only support single local shard. Returns: A :class: of the local shard.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\api.py",
    "ast_data": "FunctionDef name:local_tensor arg:self arguments arg If Compare Call Call Raise Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "LogsDest",
    "source_code": "@dataclass\nclass LogsDest:\n    stdouts: dict[int, str] = field(default_factory=dict)\n    stderrs: dict[int, str] = field(default_factory=dict)\n    tee_stdouts: dict[int, str] = field(default_factory=dict)\n    tee_stderrs: dict[int, str] = field(default_factory=dict)\n    error_files: dict[int, str] = field(default_factory=dict)",
    "docstring": "For each log type, holds mapping of local rank ids to file paths.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\elastic\\multiprocessing\\api.py",
    "ast_data": "ClassDef name:LogsDest Call Call Call Call Call"
  },
  {
    "library": "pygame",
    "name": "layer",
    "source_code": "@property\ndef layer(self):\n    return self._layer",
    "docstring": "Layer property can only be set before the sprite is added to a group, after that it is read only and a sprite's layer in a group should be set via the group's change_layer() method. Overwrites dynamic property from sprite class for speed.",
    "type": "method",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:layer arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_ticks",
    "source_code": "def set_ticks(self, ticks, *, labels=None, minor=False, **kwargs):\n    if np.iterable(ticks):\n        self.long_axis.set_ticks(ticks, labels=labels, minor=minor, **kwargs)\n        self._locator = self.long_axis.get_major_locator()\n    else:\n        self._locator = ticks\n        self.long_axis.set_major_locator(self._locator)\n    self.stale = True",
    "docstring": "Set tick locations. Parameters ---------- ticks : 1D array-like List of tick locations. labels : list of str, optional List of tick labels. If not set, the labels show the data value. minor : bool, default: False If `.Text~.Axes.tick_params`.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colorbar.py",
    "ast_data": "FunctionDef name:set_ticks arg:self arg:ticks arguments arg arg arg arg arg If Call Call Assign Call Assign Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "zeros_like_v2",
    "source_code": "@tf_export('zeros_like', v1=[])\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef zeros_like_v2(input, dtype=None, name=None, layout=None):\n    return zeros_like_impl(input, dtype, name, optimize=True, layout=layout)",
    "docstring": "Creates a tensor with all elements set to zero. See also . Given a single tensor or array-like object (), this operation returns a tensor of the same type and shape as with all elements set to zero. Optionally, you can use to specify a new type for the returned tensor. Note that the layout of the input tensor is not preserved if the op is used inside tf.function. To obtain a tensor with the same layout as the input, chain the returned value to a . Examples: >>> tensor = tf.constant([[1, 2, 3], [4, 5, 6]]) >>> tf.zeros_like(tensor) >>> tf.zeros_like(tensor, dtype=tf.float32) >>> tf.zeros_like([[1, 2, 3], [4, 5, 6]]) Args: input: A or array-like object. dtype: A type for the returned . Must be , , , , , , , , , , , or (optional). name: A name for the operation (optional). layout: Optional, . If provided, the result is a [DTensor]( with the provided layout. Returns: A with all elements set to zero.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:zeros_like_v2 arg:input arg:dtype arg:name arg:layout arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "kaiser_bessel_derived_window",
    "source_code": "@tf_export('signal.kaiser_bessel_derived_window')\n@dispatch.add_dispatch_support\ndef kaiser_bessel_derived_window(window_length, beta=12.0, dtype=dtypes.float32, name=None):\n    with ops.name_scope(name, 'kaiser_bessel_derived_window'):\n        window_length = _check_params(window_length, dtype)\n        halflen = window_length // 2\n        kaiserw = kaiser_window(halflen + 1, beta, dtype=dtype)\n        kaiserw_csum = math_ops.cumsum(kaiserw)\n        halfw = math_ops.sqrt(kaiserw_csum[:-1] / kaiserw_csum[-1])\n        window = array_ops.concat((halfw, halfw[::-1]), axis=0)\n    return window",
    "docstring": "Generate a [Kaiser Bessel derived window][kbd]. Args: window_length: A scalar indicating the window length to generate. beta: Beta parameter for Kaiser window. dtype: The data type to produce. Must be a floating point type. name: An optional name for the operation. Returns: A of shape of type . [kbd]:",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\signal\\window_ops.py",
    "ast_data": "FunctionDef name:kaiser_bessel_derived_window arg:window_length arg:beta arg:dtype arg:name arguments arg arg arg arg With Call Assign Call Assign Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "filename",
    "source_code": "@property\ndef filename(self) -> _Optional[str]:\n    return self._get_filename()",
    "docstring": "Returns the file name associated with this storage. The file name will be a string if the storage is on CPU and was created via :meth: with `` otherwise.",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:filename arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_data",
    "source_code": "def set_data(self, A):\n    if isinstance(A, PIL.Image.Image):\n        A = pil_to_array(A)\n    self._A = self._normalize_image_array(A)\n    self._imcache = None\n    self.stale = True",
    "docstring": "Set the image array. Note that this function does *not* update the normalization used. Parameters ---------- A : array-like or",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\image.py",
    "ast_data": "FunctionDef name:set_data arg:self arg:A arguments arg arg If Call Assign Call Assign Call Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "_is_fake_quant_script_module",
    "source_code": "def _is_fake_quant_script_module(mod):\n    if isinstance(mod, torch.jit.RecursiveScriptModule):\n        suffix = mod._c.qualified_name.split('.', 1)[1]\n        name = re.sub('\\\\.___torch_mangle_\\\\d+', '', suffix)\n        return name == 'torch.ao.quantization.fake_quantize.FakeQuantize' or name == 'torch.ao.quantization.fake_quantize.FusedMovingAvgObsFakeQuantize'\n    return False",
    "docstring": "Return true if given mod is an instance of FakeQuantize script module.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fake_quantize.py",
    "ast_data": "FunctionDef name:_is_fake_quant_script_module arg:mod arguments arg If Call Assign Call Assign Call Return return:yes BoolOp Compare Compare Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "is_grad_dtype",
    "source_code": "def is_grad_dtype(dtype: torch.dtype) -> bool:\n    return dtype.is_floating_point or is_complex_dtype(dtype)",
    "docstring": "Checks if the dtype can require a gradient.",
    "type": "function",
    "file_path": "pytorch\\torch\\_prims_common\\__init__.py",
    "ast_data": "FunctionDef name:is_grad_dtype arg:dtype arguments arg Return return:yes BoolOp Call"
  },
  {
    "library": "tensorflow",
    "name": "assert_same_float_dtype",
    "source_code": "@tf_export('debugging.assert_same_float_dtype', v1=['debugging.assert_same_float_dtype', 'assert_same_float_dtype'])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints('assert_same_float_dtype')\ndef assert_same_float_dtype(tensors=None, dtype=None):\n    if tensors:\n        dtype = _assert_same_base_type(tensors, dtype)\n    if not dtype:\n        dtype = dtypes.float32\n    elif not dtype.is_floating:\n        raise ValueError('Expected floating point type, got %s.' % dtype)\n    return dtype",
    "docstring": "Validate and return float type based on and . For ops such as matrix multiplication, inputs and weights must be of the same float type. This function validates that all are the same type, validates that type is (if supplied), and returns the type. Type must be a floating point type. If neither nor is supplied, the function will return . Args: tensors: Tensors of input values. Can include elements, which will be ignored. dtype: Expected type. Returns: Validated type. Raises: ValueError: if neither nor is supplied, or result is not float, or the common type of the inputs is not a floating point type.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\check_ops.py",
    "ast_data": "FunctionDef name:assert_same_float_dtype arg:tensors arg:dtype arguments arg arg If Assign Call If Assign If Raise Call Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "html_visit_inheritance_diagram",
    "source_code": "def html_visit_inheritance_diagram(self: HTML5Translator, node: inheritance_diagram) -> None:\n    graph = node['graph']\n    graph_hash = get_graph_hash(node)\n    name = 'inheritance%s' % graph_hash\n    graphviz_output_format = self.config.graphviz_output_format.upper()\n    current_filename = os.path.basename(self.builder.current_docname + self.builder.out_suffix)\n    urls = {}\n    pending_xrefs = cast('Iterable[addnodes.pending_xref]', node)\n    for child in pending_xrefs:\n        if child.get('refuri') is not None:\n            if not child.get('internal', True):\n                refname = child['refuri'].rsplit('#', 1)[-1]\n            else:\n                refname = child['reftitle']\n            urls[refname] = child.get('refuri')\n        elif child.get('refid') is not None:\n            if graphviz_output_format == 'SVG':\n                urls[child['reftitle']] = current_filename + '#' + child.get('refid')\n            else:\n                urls[child['reftitle']] = '#' + child.get('refid')\n    dotcode = graph._generate_dot(name, urls, config=self.config)\n    render_dot_html(self, node, dotcode, {}, 'inheritance', 'inheritance', alt='Inheritance diagram of ' + node['content'])\n    raise nodes.SkipNode",
    "docstring": "Output the graph for HTML. This will insert a PNG with clickable image map.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\ext\\inheritance_diagram.py",
    "ast_data": "FunctionDef name:html_visit_inheritance_diagram arg:self arg:node arguments arg arg Assign Assign Call Assign Assign Call Assign Call Assign Assign Call For If Compare Call If Call Assign Call Assign Assign Call If Compare Call If Compare Assign Call Assign Call Assign Call Call Raise"
  },
  {
    "library": "tensorflow",
    "name": "_infer_state_dtype",
    "source_code": "def _infer_state_dtype(explicit_dtype, state):\n    if explicit_dtype is not None:\n        return explicit_dtype\n    elif nest.is_nested(state):\n        inferred_dtypes = [element.dtype for element in nest.flatten(state)]\n        if not inferred_dtypes:\n            raise ValueError(f'Unable to infer dtype from argument state={state}.')\n        all_same = all((x == inferred_dtypes[0] for x in inferred_dtypes))\n        if not all_same:\n            raise ValueError(f'Argument state={state} has tensors of different inferred dtypes. Unable to infer a single representative dtype. Dtypes received: {inferred_dtypes}')\n        return inferred_dtypes[0]\n    else:\n        return state.dtype",
    "docstring": "Infer the dtype of an RNN state. Args: explicit_dtype: explicitly declared dtype or None. state: RNN's hidden state. Must be a Tensor or a nested iterable containing Tensors. Returns: dtype: inferred dtype of hidden state. Raises: ValueError: if has heterogeneous dtypes or is empty.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\rnn.py",
    "ast_data": "FunctionDef name:_infer_state_dtype arg:explicit_dtype arg:state arguments arg arg If Compare Return return:yes If Call Assign Call If Raise Call Assign Call Compare If Raise Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_BesselY1Grad",
    "source_code": "@ops.RegisterGradient('BesselY1')\ndef _BesselY1Grad(op: ops.Operation, grad):\n    x = op.inputs[0]\n    y = op.outputs[0]\n    with ops.control_dependencies([grad]):\n        partial_x = special_math_ops.bessel_y0(x) - math_ops.div(y, x)\n        return grad * partial_x",
    "docstring": "Compute gradient of bessel_y1(x) with respect to its argument.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_BesselY1Grad arg:op arg:grad arguments arg arg Assign Assign With Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_axis_off",
    "source_code": "def set_axis_off(self):\n    self.axison = False\n    self.stale = True",
    "docstring": "Hide all visual components of the x- and y-axis. This sets a flag to suppress drawing of all axis decorations, i.e. axis labels, axis spines, and the axis tick component (tick markers, tick labels, and grid lines). Individual visibility settings of these components are ignored as long as is in effect.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:set_axis_off arg:self arguments arg Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_AddOpInternal",
    "source_code": "def _AddOpInternal(self, op: ops.Operation):\n    if op.type in ['PartitionedCall', 'StatefulPartitionedCall']:\n        op._add_control_input(self.GetControlPivot().op)\n    if not op.inputs:\n        control_inputs, external_inputs = self._RemoveExternalControlEdges(op)\n        if not control_inputs:\n            op._add_control_input(self.GetControlPivot().op)\n        for x in op.outputs:\n            self._values.add(x.name)\n    else:\n        for index in range(len(op.inputs)):\n            x = op.inputs[index]\n            real_x = self.AddValue(x)\n            if real_x != x:\n                op._update_input(index, real_x)\n        _, external_inputs = self._RemoveExternalControlEdges(op)\n        self._MaybeAddControlDependency(op)\n        for x in op.outputs:\n            self._values.add(x.name)\n    if external_inputs:\n        with ops.control_dependencies(None):\n            self.Enter()\n            external_inputs = [array_ops.identity(x.outputs[0]).op for x in external_inputs if x.outputs]\n            self.Exit()\n        op._add_control_inputs(external_inputs)\n    if self._outer_context or not util.IsLoopExit(op):\n        op.graph.prevent_fetching(op)\n        for x in op.outputs:\n            op.graph.prevent_feeding(x)\n    if self._outer_context:\n        self._outer_context.AddInnerOp(op)",
    "docstring": "Add to the current context. We move any external control dependencies of the op to the loop pivot, to ensure they get executed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:_AddOpInternal arg:self arg:op arguments arg arg If Compare Call Call If Assign Call If Call Call For Call For Call Call Assign Assign Call If Compare Call Assign Call Call For Call If With Call Call Assign Call Call Call If BoolOp Call Call For Call If Call"
  },
  {
    "library": "django",
    "name": "bulk_related_objects",
    "source_code": "def bulk_related_objects(self, objs, using=DEFAULT_DB_ALIAS):\n    return self.remote_field.model._base_manager.db_manager(using).filter(**{'%s__pk' % self.content_type_field_name: ContentType.objects.db_manager(using).get_for_model(self.model, for_concrete_model=self.for_concrete_model).pk, '%s__in' % self.object_id_field_name: [obj.pk for obj in objs]})",
    "docstring": "Return all objects related to ``.",
    "type": "method",
    "file_path": "django\\django\\contrib\\contenttypes\\fields.py",
    "ast_data": "FunctionDef name:bulk_related_objects arg:self arg:objs arg:using arguments arg arg arg Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "BoolGauge",
    "source_code": "@tf_export('__internal__.monitoring.BoolGauge', v1=[])\nclass BoolGauge(Metric):\n    __slots__ = []\n\n    def __init__(self, name, description, *labels):\n        super(BoolGauge, self).__init__('BoolGauge', _bool_gauge_methods, len(labels), name, description, *labels)\n\n    def get_cell(self, *labels):\n        return BoolGaugeCell(super(BoolGauge, self).get_cell(*labels))",
    "docstring": "A stateful class for updating a gauge-like bool metric. This class encapsulates a set of boolean values (or a single value for a label-less metric). Each value is identified by a tuple of labels. The class allows the user to set each value.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py",
    "ast_data": "ClassDef name:BoolGauge Assign FunctionDef name:__init__ arg:self arg:name arg:description arguments arg arg arg arg Call Call Call FunctionDef name:get_cell arg:self arguments arg arg Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "inplace_update",
    "source_code": "@deprecation.deprecated(None, 'Prefer tf.tensor_scatter_nd_update, which offers the same functionality with well-defined read-write semantics.')\ndef inplace_update(x, i, v):\n    return alias_inplace_update(gen_array_ops.deep_copy(x), i, v)",
    "docstring": "Applies an inplace update on input x at index i with value v. Note that this function is not actually inplace - it allocates a copy of x. The utility is not avoiding memory copies but rather specifying a sparse update. If i is None, x and v must be the same shape. Computes y = x; y = v; If i is a scalar, x has a rank 1 higher than v's. Computes y = x; y[i, :] = v; Otherwise, x and v must have the same rank. Computes y = x; y[i, :] = v; Args: x: A Tensor. i: None, a scalar or a vector. v: A Tensor. Returns: Returns y, which is guaranteed not to be an alias of x.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\inplace_ops.py",
    "ast_data": "FunctionDef name:inplace_update arg:x arg:i arg:v arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_with_infer",
    "source_code": "@classmethod\ndef _with_infer(cls, *args, **kwargs):\n    result = cls(*args, **kwargs)\n    if result.dtype == _dtype_obj and (not result._is_multi):\n        values = lib.maybe_convert_objects(result._values)\n        if values.dtype.kind in 'iufb':\n            return Index(values, name=result.name)\n    return result",
    "docstring": "Constructor that uses the 1.0.x behavior inferring numeric dtypes for ndarray[object] inputs.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_with_infer arg:cls arguments arg arg arg Assign Call If BoolOp Compare Assign Call If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "write_raw_pb",
    "source_code": "@tf_export('summary.experimental.write_raw_pb', v1=[])\ndef write_raw_pb(tensor, step=None, name=None):\n    with ops.name_scope(name, 'write_raw_pb') as scope:\n        if _summary_state.writer is None:\n            return constant_op.constant(False)\n        if step is None:\n            step = get_step()\n            if step is None:\n                raise ValueError('No step set. Please specify one either through the `step` argument or through tf.summary.experimental.set_step()')\n\n        def record():\n            with ops.device('cpu:0'):\n                raw_summary_op = gen_summary_ops.write_raw_proto_summary(_summary_state.writer._resource, step, array_ops.identity(tensor), name=scope)\n                with ops.control_dependencies([raw_summary_op]):\n                    return constant_op.constant(True)\n        with ops.device('cpu:0'):\n            op = smart_cond.smart_cond(should_record_summaries(), record, _nothing, name='summary_cond')\n            if not context.executing_eagerly():\n                ops.add_to_collection(ops.GraphKeys._SUMMARY_COLLECTION, op)\n            return op",
    "docstring": "Writes a summary using raw protocol buffers. Experimental: this exists to support the usage of V1-style manual summary writing (via the construction of a protocol buffer) with the V2 summary writing API. Args: tensor: the string Tensor holding one or more serialized protobufs step: Explicit -castable monotonic step value for this summary. If omitted, this defaults to , which must not be None. name: Optional string name for this op. Returns: True on success, or false if no summary was written because no default summary writer was available. Raises: ValueError: if a default writer exists, but no step was provided and is None.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "FunctionDef name:write_raw_pb arg:tensor arg:step arg:name arguments arg arg arg With Call If Compare Return return:yes Call If Compare Assign Call If Compare Raise Call FunctionDef name:record arguments With Call Assign Call Call With Call Return return:yes Call With Call Assign Call Call If Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "SummaryWriter",
    "source_code": "@tf_export('summary.SummaryWriter', v1=[])\nclass SummaryWriter(metaclass=abc.ABCMeta):\n\n    def set_as_default(self, step=None):\n        self.as_default(step).__enter__()\n\n    def as_default(self, step=None):\n        return _SummaryContextManager(self, step)\n\n    def init(self):\n        raise NotImplementedError()\n\n    def flush(self):\n        raise NotImplementedError()\n\n    def close(self):\n        raise NotImplementedError()",
    "docstring": "Interface representing a stateful summary writer object.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "ClassDef name:SummaryWriter FunctionDef name:set_as_default arg:self arg:step arguments arg arg Call Call FunctionDef name:as_default arg:self arg:step arguments arg arg Return return:yes Call FunctionDef name:init arg:self arguments arg Raise Call FunctionDef name:flush arg:self arguments arg Raise Call FunctionDef name:close arg:self arguments arg Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_lu_solve_assertions",
    "source_code": "def _lu_solve_assertions(lower_upper, perm, rhs, validate_args):\n    assertions = lu_reconstruct_assertions(lower_upper, perm, validate_args)\n    message = 'Input `rhs` must have at least 2 dimensions.'\n    if rhs.shape.ndims is not None:\n        if rhs.shape.ndims < 2:\n            raise ValueError(message)\n    elif validate_args:\n        assertions.append(check_ops.assert_rank_at_least(rhs, rank=2, message=message))\n    message = '`lower_upper.shape[-1]` must equal `rhs.shape[-1]`.'\n    if lower_upper.shape[-1] is not None and rhs.shape[-2] is not None:\n        if lower_upper.shape[-1] != rhs.shape[-2]:\n            raise ValueError(message)\n    elif validate_args:\n        assertions.append(check_ops.assert_equal(array_ops.shape(lower_upper)[-1], array_ops.shape(rhs)[-2], message=message))\n    return assertions",
    "docstring": "Returns list of assertions related to assumptions.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linalg_impl.py",
    "ast_data": "FunctionDef name:_lu_solve_assertions arg:lower_upper arg:perm arg:rhs arg:validate_args arguments arg arg arg arg Assign Call Assign If Compare If Compare Raise Call If Call Call Assign If BoolOp Compare Compare If Compare Raise Call If Call Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "tuple",
    "source_code": "@property\ndef tuple(self):\n    return (self.min_x, self.min_y, self.max_x, self.max_y)",
    "docstring": "Return a tuple representing the envelope.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\envelope.py",
    "ast_data": "FunctionDef name:tuple arg:self arguments arg Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "get_processed_item",
    "source_code": "def get_processed_item(self, item: Any, response: Response | None) -> Any:\n    return item",
    "docstring": "Return a processed item from the spider output. This method is called with a single item from the start seeds or the spider output. It should return the same or a different item, or `~scrapy.http.Response`",
    "type": "method",
    "file_path": "scrapy\\scrapy\\spidermiddlewares\\base.py",
    "ast_data": "FunctionDef name:get_processed_item arg:self arg:item arg:response arguments arg arg arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "anom",
    "source_code": "def anom(self, axis=None, dtype=None):\n    m = self.mean(axis, dtype)\n    if not axis:\n        return self - m\n    else:\n        return self - expand_dims(m, axis)",
    "docstring": "Compute the anomalies (deviations from the arithmetic mean) along the given axis. Returns an array of anomalies, with the same shape as the input and where the arithmetic mean is computed along the given axis. Parameters ---------- axis : int, optional Axis over which the anomalies are taken. The default is to use the mean of the flattened array as reference. dtype : dtype, optional Type to use in computing the variance. For arrays of integer type the default is float32; for arrays of float types it is the same as the array type. See Also -------- mean : Compute the mean of the array. Examples -------- >>> import numpy as np >>> a = np.ma.array([1,2,3]) >>> a.anom() masked_array(data=[-1., 0., 1.], mask=False, fill_value=1e+20)",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:anom arg:self arg:axis arg:dtype arguments arg arg arg Assign Call If Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "SharedCache",
    "source_code": "class SharedCache(dict):\n\n    def __init__(self) -> None:\n        self.limit = 128\n        self._after_fork()\n        register_after_fork(self, SharedCache._after_fork)\n\n    def _after_fork(self):\n        self.lock = threading.Lock()\n\n    def get(self, key):\n        with self.lock:\n            return dict.get(self, key)\n\n    def __setitem__(self, key, storage_ref):\n        with self.lock:\n            dict.__setitem__(self, key, storage_ref)\n            if len(self) > self.limit:\n                self.free_dead_references()\n\n    def free_dead_references(self):\n        live = 0\n        for key, storage_ref in list(self.items()):\n            if storage_ref.expired():\n                del self[key]\n            else:\n                live += 1\n        self.limit = max(128, live * 2)",
    "docstring": "Dictionary from multiprocessing handles to StorageWeakRef.",
    "type": "class",
    "file_path": "pytorch\\torch\\multiprocessing\\reductions.py",
    "ast_data": "ClassDef name:SharedCache FunctionDef name:__init__ arg:self arguments arg Assign Call Call FunctionDef name:_after_fork arg:self arguments arg Assign Call FunctionDef name:get arg:self arg:key arguments arg arg With Return return:yes Call FunctionDef name:__setitem__ arg:self arg:key arg:storage_ref arguments arg arg arg With Call If Compare Call Call FunctionDef name:free_dead_references arg:self arguments arg Assign For Call Call If Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "adaptive_avg_pool2d",
    "source_code": "def adaptive_avg_pool2d(input: Tensor, output_size: BroadcastingList2[int]) -> Tensor:\n    if not input.is_quantized:\n        raise ValueError(\"Input to 'quantized.functional.adaptive_avg_pool2d' must be quantized!\")\n    return torch.nn.functional.adaptive_avg_pool2d(input, output_size)",
    "docstring": "Applies a 2D adaptive average pooling over a quantized input signal composed of several quantized input planes. .. note:: The input quantization parameters propagate to the output. See :class: for details and output shape. Args: output_size: the target output size (single integer or double-integer tuple)",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\functional.py",
    "ast_data": "FunctionDef name:adaptive_avg_pool2d arg:input arg:output_size arguments arg arg If Raise Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "KORNIA_CHECK_IS_IMAGE",
    "source_code": "def KORNIA_CHECK_IS_IMAGE(x: Tensor, msg: Optional[str]=None, raises: bool=True, bits: int=8) -> bool:\n    if not raises and (not KORNIA_CHECK_IS_COLOR_OR_GRAY(x, msg, raises)):\n        return False\n    min_val, max_val = (x.min(), x.max())\n    if x.dtype in [float16, float32, float64]:\n        if min_val < 0.0 or max_val > 1.0:\n            return _handle_invalid_range(msg, raises, min_val, max_val)\n    else:\n        max_int_value = 2 ** bits - 1\n        if min_val < 0 or max_val > max_int_value:\n            return _handle_invalid_range(msg, raises, min_val, max_val)\n    return True",
    "docstring": "Check whether an image tensor is ranged properly [0, 1] for float or [0, 2 ** bits] for int. Args: x: image tensor to evaluate. msg: message to show in the exception. raises: bool indicating whether an exception should be raised upon failure. bits: the image bits. The default checks if given integer input image is an 8-bit image (0-255) or not. Raises: TypeException: if all the input tensor has not 1) a shape :math:, 2) [0, 1] for float or [0, 255] for int, 3) and raises is True. Example: >>> img = torch.rand(2, 3, 4, 4) >>> KORNIA_CHECK_IS_IMAGE(img, \"It is not an image\") True",
    "type": "function",
    "file_path": "kornia\\kornia\\core\\check.py",
    "ast_data": "FunctionDef name:KORNIA_CHECK_IS_IMAGE arg:x arg:msg arg:raises arg:bits arguments arg arg arg arg If BoolOp Call Return return:yes Assign Call Call If Compare If BoolOp Compare Compare Return return:yes Call Assign If BoolOp Compare Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "gds_register_buffer",
    "source_code": "def gds_register_buffer(s: Storage) -> None:\n    torch._C._gds_register_buffer(s)",
    "docstring": "Registers a storage on a CUDA device as a cufile buffer. Example:: >>> # xdoctest: +SKIP(\"gds filesystem requirements\") >>> src = torch.randn(1024, device=\"cuda\") >>> s = src.untyped_storage() >>> gds_register_buffer(s) Args: s (Storage): Buffer to register.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\gds.py",
    "ast_data": "FunctionDef name:gds_register_buffer arg:s arguments arg Call"
  },
  {
    "library": "pandas",
    "name": "_is_strictly_monotonic_decreasing",
    "source_code": "@final\n@property\ndef _is_strictly_monotonic_decreasing(self) -> bool:\n    return self.is_unique and self.is_monotonic_decreasing",
    "docstring": "Return if the index is strictly monotonic decreasing (only decreasing) values. Examples -------- >>> Index([3, 2, 1])._is_strictly_monotonic_decreasing True >>> Index([3, 2, 2])._is_strictly_monotonic_decreasing False >>> Index([3, 1, 2])._is_strictly_monotonic_decreasing False",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_is_strictly_monotonic_decreasing arg:self arguments arg Return return:yes BoolOp"
  },
  {
    "library": "kornia",
    "name": "apply_transform",
    "source_code": "def apply_transform(self, input: Tensor, params: Dict[str, Tensor], flags: Dict[str, Any], transform: Optional[Tensor]=None) -> Tensor:\n    return input.add(params['gradient'].to(input)).clamp(0, 1)",
    "docstring": "Apply random gaussian gradient illumination to the input image.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\_2d\\intensity\\linear_illumination.py",
    "ast_data": "FunctionDef name:apply_transform arg:self arg:input arg:params arg:flags arg:transform arguments arg arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_dropout",
    "source_code": "def _dropout(self, values, salt_prefix, recurrent_noise, keep_prob, shallow_filtered_substructure=None):\n    if shallow_filtered_substructure is None:\n        shallow_filtered_substructure = values\n    if not self._variational_recurrent:\n\n        def dropout(i, do_dropout, v):\n            if not isinstance(do_dropout, bool) or do_dropout:\n                return nn_ops.dropout_v2(v, rate=1.0 - keep_prob, seed=self._gen_seed(salt_prefix, i))\n            else:\n                return v\n        return _enumerated_map_structure_up_to(shallow_filtered_substructure, dropout, *[shallow_filtered_substructure, values])\n    else:\n\n        def dropout(i, do_dropout, v, n):\n            if not isinstance(do_dropout, bool) or do_dropout:\n                return self._variational_recurrent_dropout_value(i, v, n, keep_prob)\n            else:\n                return v\n        return _enumerated_map_structure_up_to(shallow_filtered_substructure, dropout, *[shallow_filtered_substructure, values, recurrent_noise])",
    "docstring": "Decides whether to perform standard dropout or recurrent dropout.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\legacy_rnn\\rnn_cell_wrapper_impl.py",
    "ast_data": "FunctionDef name:_dropout arg:self arg:values arg:salt_prefix arg:recurrent_noise arg:keep_prob arg:shallow_filtered_substructure arguments arg arg arg arg arg arg If Compare Assign If FunctionDef name:dropout arg:i arg:do_dropout arg:v arguments arg arg arg If BoolOp Call Return return:yes Call Call Return return:yes Return return:yes Call FunctionDef name:dropout arg:i arg:do_dropout arg:v arg:n arguments arg arg arg arg If BoolOp Call Return return:yes Call Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_separate_input_attributes_from_arguments",
    "source_code": "def _separate_input_attributes_from_arguments(self, param_schemas: Sequence[onnxscript.values.ParamSchema], args: Sequence[fx_type_utils.TensorLike | str | int | float | bool | list | complex | None], kwargs: dict[str, fx_type_utils.Argument], fill_defaults: bool=True) -> tuple[list[Any], dict[str, Any]]:\n    import onnx\n    onnx_inputs: list[Any] = []\n    onnx_attributes: dict[str, Any] = {}\n    copy_kwargs = kwargs.copy()\n    for i, param in enumerate(param_schemas):\n        if param.is_variadic_input:\n            onnx_inputs.extend(args[i:])\n            args = []\n            continue\n        if i < len(args):\n            if param.is_input:\n                onnx_inputs.append(args[i])\n            else:\n                onnx_attributes[param.name] = args[i]\n        elif param.name in copy_kwargs:\n            if param.is_input:\n                onnx_inputs.append(copy_kwargs[param.name])\n                copy_kwargs.pop(param.name)\n            else:\n                onnx_attributes[param.name] = copy_kwargs[param.name]\n        elif param.is_attribute and self.attributes[param.name].default_value.type != onnx.AttributeProto.UNDEFINED:\n            if fill_defaults:\n                onnx_attributes[param.name] = param.default\n        elif param.is_input:\n            if fill_defaults:\n                onnx_inputs.append(None)\n    for k, v in copy_kwargs.items():\n        if k not in onnx_attributes and v is not None:\n            onnx_attributes[k] = v\n    return (onnx_inputs, onnx_attributes)",
    "docstring": "Separate Python args and kwargs into ONNX inputs and attributes. Extra_kwargs are ignored if their values are None. For example, if the OpSchema has an attribute \"rounding_mode\" and the caller provides \"rounding_mode=None\", the attribute \"rounding_mode\" will not be included in the returned attributes when the OnnxFunction signature doesn't have \"rounding_mode\" as an attribute. Args: param_schemas: The parameter schemas of an Op or a OnnxFunction. args: The Python positional arguments supplied by the caller. kwargs: The Python keyword arguments supplied by the caller. fill_defaults: Whether to fill the default values for attributes. Returns: A tuple of two elements: - A list of ONNX inputs. - An dictionary of ONNX attribute names and values. Raises: TypeError: When allow_extra_kwargs is False and there are unknown kwargs. TypeError: When a required input is not provided.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\onnxfunction_dispatcher.py",
    "ast_data": "FunctionDef name:_separate_input_attributes_from_arguments arg:self arg:param_schemas arg:args arg:kwargs arg:fill_defaults arguments arg arg arg arg arg Assign Call For Call If Call Assign If Compare Call If Call Assign If Compare If Call Call Assign If BoolOp Compare If Assign If If Call For Call If BoolOp Compare Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "freeze",
    "source_code": "def freeze(self):\n    self._frozen = True\n    if self._tuple_types is None:\n        raise ValueError(\"Can't freeze an InfeedQueue without setting all tuple types.\")\n    if self._tuple_shapes is None:\n        raise ValueError(\"Can't freeze an InfeedQueue without setting all tuple shapes.\")\n    for shape in self._tuple_shapes:\n        if shape.dims is None:\n            raise ValueError(\"Can't freeze an InfeedQueue without setting all tuple shapes.\")\n    for policy in self._sharding_policies:\n        policy.freeze()\n    self._validate()",
    "docstring": "Freezes the InfeedQueue so it can no longer be modified. The configuration is implicitly frozen before any host-side or device-side Ops are generated. The configuration cannot be frozen until the types and shapes of the tuple elements have been set. Raises: ValueError: if the types or shapes of the tuple elements have not been set.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_feed.py",
    "ast_data": "FunctionDef name:freeze arg:self arguments arg Assign If Compare Raise Call If Compare Raise Call For If Compare Raise Call For Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_entries",
    "source_code": "def get_entries(attr_name):\n    assert attr_name in ['inputs', 'outputs']\n    entries = {}\n    for op_type in ops._gradient_registry.list():\n        if op_type in _EXCLUDED_OPS:\n            continue\n        num_values = _get_num_inputs_outputs(op_type)[0 if attr_name == 'inputs' else 1]\n        gradient_fn = ops._gradient_registry.lookup(op_type)\n        if gradient_fn is None:\n            if num_values != -1:\n                entries[op_type] = '{\"%s\"},' % op_type\n            continue\n        used_tensors = _live_tensors(gradient_fn, attr_name=attr_name)\n        if used_tensors is _ALL:\n            continue\n        elif not used_tensors:\n            entries[op_type] = '{\"%s\"},' % op_type\n        else:\n            all_tensors = set(range(num_values))\n            unused_tensors = all_tensors - used_tensors\n            if unused_tensors:\n                unused_tensor_list = sorted(list(unused_tensors))\n                entries[op_type] = '{\"%s\", %d, {%s}},' % (op_type, len(unused_tensor_list), ', '.join((str(i) for i in unused_tensor_list)))\n    return entries",
    "docstring": "Returns the dict of entries. Each entry is of the form {op_name, {true|false, indices}} true: All values are unused. false: are the only unused indices. Note: ops for which all values are used are not printed. Args: attr_name: inputs or outputs. Returns: A dict from op_type to formatted entry in the dict.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\gradient_input_output_exclusions.py",
    "ast_data": "FunctionDef name:get_entries arg:attr_name arguments arg Compare Assign For Call If Compare Assign Call Compare Assign Call If Compare If Compare Assign Assign Call If Compare If Assign Assign Call Call Assign If Assign Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_valid_locales",
    "source_code": "def _valid_locales(locales: list[str] | str, normalize: bool) -> list[str]:\n    return [loc for loc in (locale.normalize(loc.strip()) if normalize else loc.strip() for loc in locales) if can_set_locale(loc)]",
    "docstring": "Return a list of normalized locales that do not throw an `` on each locale. Returns ------- valid_locales : list A list of valid locales.",
    "type": "function",
    "file_path": "pandas\\pandas\\_config\\localization.py",
    "ast_data": "FunctionDef name:_valid_locales arg:locales arg:normalize arguments arg arg Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "set_clang_compiler_path",
    "source_code": "def set_clang_compiler_path(environ_cp):\n    default_clang_path = '/usr/lib/llvm-18/bin/clang'\n    if not os.path.exists(default_clang_path):\n        default_clang_path = '/usr/lib/llvm-17/bin/clang'\n        if not os.path.exists(default_clang_path):\n            default_clang_path = '/usr/lib/llvm-16/bin/clang'\n        if not os.path.exists(default_clang_path):\n            default_clang_path = shutil.which('clang') or ''\n    clang_compiler_path = prompt_loop_or_load_from_env(environ_cp, var_name='CLANG_COMPILER_PATH', var_default=default_clang_path, ask_for_var='Please specify the path to clang executable.', check_success=os.path.exists, resolve_symlinks=True, error_msg='Invalid clang path. %s cannot be found. Note that TensorFlow now requires clang to compile. You may override this behavior by setting TF_NEED_CLANG=0')\n    write_action_env_to_bazelrc('CLANG_COMPILER_PATH', clang_compiler_path)\n    write_to_bazelrc('build --repo_env=CC=%s' % clang_compiler_path)\n    write_to_bazelrc('build --repo_env=BAZEL_COMPILER=%s' % clang_compiler_path)\n    return clang_compiler_path",
    "docstring": "Set CLANG_COMPILER_PATH and environment variables. Loop over user prompts for clang path until receiving a valid response. Default is used if no input is given. Set CLANG_COMPILER_PATH and write environment variables CC and BAZEL_COMPILER to .bazelrc. Args: environ_cp: (Dict) copy of the os.environ. Returns: string value for clang_compiler_path.",
    "type": "function",
    "file_path": "tensorflow\\configure.py",
    "ast_data": "FunctionDef name:set_clang_compiler_path arg:environ_cp arguments arg Assign If Call Assign If Call Assign If Call Assign BoolOp Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "profile",
    "source_code": "def profile(self):\n    profiles = {}\n    data_generator_func = self._get_profile_data_generator()\n    for device_index, device_stats in enumerate(self._run_metadata.step_stats.dev_stats):\n        pprof_proto = self._get_pprof_proto(data_generator_func(device_stats))\n        if not pprof_proto.sample:\n            print('Not enough data to create profile for device %s. Did you pass RunMetadata to session.run call?' % device_stats.device)\n            continue\n        device_count = len(self._run_metadata.step_stats.dev_stats)\n        device_description = 'Device %d of %d: %s' % (device_index + 1, device_count, device_stats.device)\n        device_description_str_index = self._string_table.next_index()\n        pprof_proto.string_table.append(device_description)\n        pprof_proto.comment.append(device_description_str_index)\n        profiles[device_stats.device] = pprof_proto\n    return profiles",
    "docstring": "Generates pprof profiles. Returns: Dictionary mapping from device name to proto in format.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\pprof_profiler.py",
    "ast_data": "FunctionDef name:profile arg:self arguments arg Assign Assign Call For Call Assign Call Call If Call Assign Call Assign Assign Call Call Call Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "sokalsneath",
    "source_code": "def sokalsneath(u, v, w=None):\n    u = _validate_vector(u)\n    v = _validate_vector(v)\n    if u.dtype == v.dtype == bool and w is None:\n        ntt = (u & v).sum()\n    elif w is None:\n        ntt = (u * v).sum()\n    else:\n        w = _validate_weights(w)\n        ntt = (u * v * w).sum()\n    nft, ntf = _nbool_correspond_ft_tf(u, v, w=w)\n    denom = np.array(ntt + 2.0 * (ntf + nft))\n    if not denom.any():\n        raise ValueError('Sokal-Sneath dissimilarity is not defined for vectors that are entirely false.')\n    return float(2.0 * (ntf + nft)) / denom",
    "docstring": "Compute the Sokal-Sneath dissimilarity between two boolean 1-D arrays. The Sokal-Sneath dissimilarity between and , .. math:: \\frac{R} {c_{TT} + R} where :math: is the number of occurrences of :math: and :math: for :math:`k >> from scipy.spatial import distance >>> distance.sokalsneath([1, 0, 0], [0, 1, 0]) 1.0 >>> distance.sokalsneath([1, 0, 0], [1, 1, 0]) 0.66666666666666663 >>> distance.sokalsneath([1, 0, 0], [2, 1, 0]) 0.0 >>> distance.sokalsneath([1, 0, 0], [3, 1, 0]) -2.0",
    "type": "function",
    "file_path": "scipy\\scipy\\spatial\\distance.py",
    "ast_data": "FunctionDef name:sokalsneath arg:u arg:v arg:w arguments arg arg arg Assign Call Assign Call If BoolOp Compare Compare Assign Call If Compare Assign Call Assign Call Assign Call Assign Call Assign Call If Call Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "read_model",
    "source_code": "def read_model(input_tflite_file):\n    if not gfile.Exists(input_tflite_file):\n        raise RuntimeError('Input file not found at %r\\n' % input_tflite_file)\n    with gfile.GFile(input_tflite_file, 'rb') as input_file_handle:\n        model_bytearray = bytearray(input_file_handle.read())\n    return read_model_from_bytearray(model_bytearray)",
    "docstring": "Reads a tflite model as a python object. Args: input_tflite_file: Full path name to the input tflite file Raises: RuntimeError: If input_tflite_file path is invalid. IOError: If input_tflite_file cannot be opened. Returns: A python object corresponding to the input tflite file.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\tools\\flatbuffer_utils.py",
    "ast_data": "FunctionDef name:read_model arg:input_tflite_file arguments arg If Call Raise Call With Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "TextChoices",
    "source_code": "class TextChoices(Choices, StrEnum):\n\n    @staticmethod\n    def _generate_next_value_(name, start, count, last_values):\n        return name",
    "docstring": "Class for creating enumerated string choices.",
    "type": "class",
    "file_path": "django\\django\\db\\models\\enums.py",
    "ast_data": "ClassDef name:TextChoices FunctionDef name:_generate_next_value_ arg:name arg:start arg:count arg:last_values arguments arg arg arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "add_submodule",
    "source_code": "def add_submodule(self, block, prefix):\n    if prefix[-1].isnumeric() and prefix not in self.submodules:\n        name = prefix\n    else:\n        name = f'{prefix}{len(self.submodules)}'\n    self.submodules[name] = block\n    return name",
    "docstring": "Not actually for nn.Modules, but subblocks in generated code are mapped to FX call_module opcodes",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\loop_body.py",
    "ast_data": "FunctionDef name:add_submodule arg:self arg:block arg:prefix arguments arg arg arg If BoolOp Call Compare Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_backend_context",
    "source_code": "def get_backend_context(backend: str):\n    backends = {'fav2': nullcontext(), 'cudnn': sdpa_kernel(SDPBackend.CUDNN_ATTENTION), 'math': sdpa_kernel(SDPBackend.MATH), 'efficient': sdpa_kernel(SDPBackend.EFFICIENT_ATTENTION), 'fav3': nullcontext(), 'fakv': nullcontext(), 'og-eager': nullcontext()}\n    if backend not in backends:\n        raise ValueError(f'Unknown backend: {backend}. Valid options are: {', '.join(backends.keys())}')\n    return backends[backend]",
    "docstring": "Returns a context manager for the specified backend. Args: backend (str): The name of the backend to use. Valid options are 'fav2', 'cudnn', 'math', 'efficient', 'fav3', 'fakv', 'og-eager'. Returns: A context manager for the specified backend. Raises: ValueError: If an invalid backend is specified.",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\transformer\\score_mod.py",
    "ast_data": "FunctionDef name:get_backend_context arg:backend arguments arg Assign Call Call Call Call Call Call Call If Compare Raise Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "flush",
    "source_code": "def flush(self) -> None:\n    raise NotImplementedError",
    "docstring": "Flush the generated kernel and python wrapper code to the source code file.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:flush arg:self arguments arg Raise"
  },
  {
    "library": "authlib",
    "name": "RequestURINotSupportedError",
    "source_code": "class RequestURINotSupportedError(OAuth2Error):\n    error = 'request_uri_not_supported'",
    "docstring": "The OP does not support use of the request_uri parameter.",
    "type": "class",
    "file_path": "authlib\\authlib\\oidc\\core\\errors.py",
    "ast_data": "ClassDef name:RequestURINotSupportedError Assign"
  },
  {
    "library": "tensorflow",
    "name": "enable_v2_behavior",
    "source_code": "@tf_export(v1=['enable_v2_behavior'])\ndef enable_v2_behavior():\n    _v2_behavior_usage_gauge.get_cell('enable').set(True)\n    tf2.enable()\n    ops.enable_eager_execution()\n    tensor_shape.enable_v2_tensorshape()\n    resource_variables_toggle.enable_resource_variables()\n    tensor.enable_tensor_equality()\n    control_flow_v2_toggles.enable_control_flow_v2()\n    for v2_enabler_name in _DATA_V2_CALLBACKS.list():\n        v2_enabler = _DATA_V2_CALLBACKS.lookup(v2_enabler_name)\n        v2_enabler()",
    "docstring": "Enables TensorFlow 2.x behaviors. This function can be called at the beginning of the program (before , or other structures have been created, and before devices have been initialized. It switches all global behaviors that are different between TensorFlow 1.x and 2.x to behave as intended for 2.x. This function is called in the main TensorFlow file, user should not need to call it, except during complex migrations. @compatibility(TF2) This function is not necessary if you are using TF2. V2 behavior is enabled by default. @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\compat\\v2_compat.py",
    "ast_data": "FunctionDef name:enable_v2_behavior arguments Call Call Call Call Call Call Call Call For Call Assign Call Call Call"
  },
  {
    "library": "django",
    "name": "capfirst",
    "source_code": "@keep_lazy_text\ndef capfirst(x):\n    if not x:\n        return x\n    if not isinstance(x, str):\n        x = str(x)\n    return x[0].upper() + x[1:]",
    "docstring": "Capitalize the first letter of a string.",
    "type": "function",
    "file_path": "django\\django\\utils\\text.py",
    "ast_data": "FunctionDef name:capfirst arg:x arguments arg If Return return:yes If Call Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_get_scalar_type_map",
    "source_code": "def _get_scalar_type_map():\n    ct = ctypes\n    simple_types = [ct.c_byte, ct.c_short, ct.c_int, ct.c_long, ct.c_longlong, ct.c_ubyte, ct.c_ushort, ct.c_uint, ct.c_ulong, ct.c_ulonglong, ct.c_float, ct.c_double, ct.c_bool]\n    return {np.dtype(ctype): ctype for ctype in simple_types}",
    "docstring": "Return a dictionary mapping native endian scalar dtype to ctypes types",
    "type": "function",
    "file_path": "numpy\\numpy\\ctypeslib\\_ctypeslib.py",
    "ast_data": "FunctionDef name:_get_scalar_type_map arguments Assign Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_set_metric_attributes",
    "source_code": "def _set_metric_attributes(self):\n    updated_per_output_metrics = []\n    updated_per_output_weighted_metrics = []\n    for i, endpoint in enumerate(self._training_endpoints):\n        if endpoint.should_skip_target():\n            updated_per_output_metrics.append(self._per_output_metrics[i])\n            updated_per_output_weighted_metrics.append(self._per_output_weighted_metrics[i])\n            continue\n        updated_per_output_metrics.append(self._set_per_output_metric_attributes(self._per_output_metrics[i], i))\n        updated_per_output_weighted_metrics.append(self._set_per_output_metric_attributes(self._per_output_weighted_metrics[i], i))\n    if len(self._training_endpoints) > 1:\n        for endpoint in self._training_endpoints:\n            if not endpoint.should_skip_target():\n                endpoint.output_loss_metric = metrics_module.Mean(name=endpoint.loss_name())\n    self._per_output_metrics = updated_per_output_metrics\n    self._per_output_weighted_metrics = updated_per_output_weighted_metrics",
    "docstring": "Sets the metric attributes on the model for all the model outputs.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py",
    "ast_data": "FunctionDef name:_set_metric_attributes arg:self arguments arg Assign Assign For Call If Call Call Call Call Call Call Call If Compare Call For If Call Assign Call Call Assign Assign"
  },
  {
    "library": "pandas",
    "name": "convert_values",
    "source_code": "def convert_values(self) -> None:\n\n    def stringify(value):\n        encoder: Callable\n        if self.encoding is not None:\n            encoder = partial(pprint_thing_encoded, encoding=self.encoding)\n        else:\n            encoder = pprint_thing\n        return encoder(value)\n    lhs, rhs = (self.lhs, self.rhs)\n    if is_term(lhs) and lhs.is_datetime and is_term(rhs) and rhs.is_scalar:\n        v = rhs.value\n        if isinstance(v, (int, float)):\n            v = stringify(v)\n        v = Timestamp(ensure_decoded(v))\n        if v.tz is not None:\n            v = v.tz_convert('UTC')\n        self.rhs.update(v)\n    if is_term(rhs) and rhs.is_datetime and is_term(lhs) and lhs.is_scalar:\n        v = lhs.value\n        if isinstance(v, (int, float)):\n            v = stringify(v)\n        v = Timestamp(ensure_decoded(v))\n        if v.tz is not None:\n            v = v.tz_convert('UTC')\n        self.lhs.update(v)",
    "docstring": "Convert datetimes to a comparable value in an expression.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\computation\\ops.py",
    "ast_data": "FunctionDef name:convert_values arg:self arguments arg FunctionDef name:stringify arg:value arguments arg If Compare Assign Call Assign Return return:yes Call Assign If BoolOp Call Call Assign If Call Assign Call Assign Call Call If Compare Assign Call Call If BoolOp Call Call Assign If Call Assign Call Assign Call Call If Compare Assign Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_text_width_height_descent",
    "source_code": "def get_text_width_height_descent(self, s, prop, ismath):\n    fontsize = prop.get_size_in_points()\n    if ismath == 'TeX':\n        return self.get_texmanager().get_text_width_height_descent(s, fontsize, renderer=self)\n    dpi = self.points_to_pixels(72)\n    if ismath:\n        dims = self._text2path.mathtext_parser.parse(s, dpi, prop)\n        return dims[0:3]\n    flags = self._text2path._get_hinting_flag()\n    font = self._text2path._get_font(prop)\n    font.set_size(fontsize, dpi)\n    font.set_text(s, 0.0, flags=flags)\n    w, h = font.get_width_height()\n    d = font.get_descent()\n    w /= 64.0\n    h /= 64.0\n    d /= 64.0\n    return (w, h, d)",
    "docstring": "Get the width, height, and descent (offset from the bottom to the baseline), in display coords, of the string *s* with *prop*. Whitespace at the start and the end of *s* is included in the reported width.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:get_text_width_height_descent arg:self arg:s arg:prop arg:ismath arguments arg arg arg arg Assign Call If Compare Return return:yes Call Call Assign Call If Assign Call Return return:yes Assign Call Assign Call Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_promote_fields",
    "source_code": "def _promote_fields(dt1, dt2):\n    if (dt1.names is None or dt2.names is None) or dt1.names != dt2.names:\n        raise DTypePromotionError(f'field names `{dt1.names}` and `{dt2.names}` mismatch.')\n    identical = dt1 is dt2\n    new_fields = []\n    for name in dt1.names:\n        field1 = dt1.fields[name]\n        field2 = dt2.fields[name]\n        new_descr = promote_types(field1[0], field2[0])\n        identical = identical and new_descr is field1[0]\n        if field1[2:] != field2[2:]:\n            raise DTypePromotionError(f\"field titles of field '{name}' mismatch\")\n        if len(field1) == 2:\n            new_fields.append((name, new_descr))\n        else:\n            new_fields.append(((field1[2], name), new_descr))\n    res = dtype(new_fields, align=dt1.isalignedstruct or dt2.isalignedstruct)\n    if identical and res.itemsize == dt1.itemsize:\n        for name in dt1.names:\n            if dt1.fields[name][1] != res.fields[name][1]:\n                return res\n        return dt1\n    return res",
    "docstring": "Perform type promotion for two structured dtypes. Parameters ---------- dt1 : structured dtype First dtype. dt2 : structured dtype Second dtype. Returns ------- out : dtype The promoted dtype Notes ----- If one of the inputs is aligned, the result will be. The titles of both descriptors must match (point to the same field).",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\_internal.py",
    "ast_data": "FunctionDef name:_promote_fields arg:dt1 arg:dt2 arguments arg arg If BoolOp BoolOp Compare Compare Compare Raise Call Assign Compare Assign For Assign Assign Assign Call Assign BoolOp Compare If Compare Raise Call If Compare Call Call Call Assign Call BoolOp If BoolOp Compare For If Compare Return return:yes Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "get_connection_params",
    "source_code": "def get_connection_params(self):\n    raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a get_connection_params() method')",
    "docstring": "Return a dict of parameters suitable for get_new_connection.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\base.py",
    "ast_data": "FunctionDef name:get_connection_params arg:self arguments arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_backward_name",
    "source_code": "def _backward_name(n):\n    return '%s%s_%s' % (_BACKWARD_PREFIX, n, ops.uid())",
    "docstring": "The name of a generated backward defun named n.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py",
    "ast_data": "FunctionDef name:_backward_name arg:n arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_min_max_value",
    "source_code": "def get_min_max_value(self) -> tuple[float, float]:\n    average_min_max_statistics = self._statistics.average_min_max_statistics\n    num_samples = average_min_max_statistics.num_samples\n    if num_samples == 0:\n        raise ValueError(f'num_samples must not be 0 when calibration method is AverageMinMax: {self._calib_opts}')\n    min_value, max_value = (average_min_max_statistics.min_sum / num_samples, average_min_max_statistics.max_sum / num_samples)\n    return (min_value, max_value)",
    "docstring": "Calculates the average of min and max values. Returns: (min_value, max_value): Min and max calculated using AverageMinMax Raises: ValueError: num_samples is 0.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\calibrator\\calibration_algorithm.py",
    "ast_data": "FunctionDef name:get_min_max_value arg:self arguments arg Assign Assign If Compare Raise Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_file_names",
    "source_code": "def _get_file_names(file_pattern, shuffle):\n    if isinstance(file_pattern, list):\n        if not file_pattern:\n            raise ValueError('Argument `file_pattern` should not be empty.')\n        file_names = []\n        for entry in file_pattern:\n            file_names.extend(gfile.Glob(entry))\n    else:\n        file_names = list(gfile.Glob(file_pattern))\n    if not file_names:\n        raise ValueError(f'No files match `file_pattern` {file_pattern}.')\n    if not shuffle:\n        file_names = sorted(file_names)\n    return file_names",
    "docstring": "Parse list of file names from pattern, optionally shuffled. Args: file_pattern: File glob pattern, or list of glob patterns. shuffle: Whether to shuffle the order of file names. Returns: List of file names matching . Raises: ValueError: If is empty, or pattern matches no files.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\readers.py",
    "ast_data": "FunctionDef name:_get_file_names arg:file_pattern arg:shuffle arguments arg arg If Call If Raise Call Assign For Call Call Assign Call Call If Raise Call If Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "ObjectIdentityWeakKeyDictionary",
    "source_code": "class ObjectIdentityWeakKeyDictionary(ObjectIdentityDictionary):\n    __slots__ = ['__weakref__']\n\n    def _wrap_key(self, key):\n        return _WeakObjectIdentityWrapper(key)\n\n    def __len__(self):\n        return len(list(self._storage))\n\n    def __iter__(self):\n        keys = self._storage.keys()\n        for key in keys:\n            unwrapped = key.unwrapped\n            if unwrapped is None:\n                del self[key]\n            else:\n                yield unwrapped",
    "docstring": "Like weakref.WeakKeyDictionary, but compares objects with \"is\".",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\util\\object_identity.py",
    "ast_data": "ClassDef name:ObjectIdentityWeakKeyDictionary Assign FunctionDef name:_wrap_key arg:self arg:key arguments arg arg Return return:yes Call FunctionDef name:__len__ arg:self arguments arg Return return:yes Call Call FunctionDef name:__iter__ arg:self arguments arg Assign Call For Assign If Compare"
  },
  {
    "library": "scikit-learn",
    "name": "_resize_state",
    "source_code": "def _resize_state(self):\n    total_n_estimators = self.n_estimators\n    if total_n_estimators < self.estimators_.shape[0]:\n        raise ValueError('resize with smaller n_estimators %d < %d' % (total_n_estimators, self.estimators_[0]))\n    self.estimators_ = np.resize(self.estimators_, (total_n_estimators, self.n_trees_per_iteration_))\n    self.train_score_ = np.resize(self.train_score_, total_n_estimators)\n    if self.subsample < 1 or hasattr(self, 'oob_improvement_'):\n        if hasattr(self, 'oob_improvement_'):\n            self.oob_improvement_ = np.resize(self.oob_improvement_, total_n_estimators)\n            self.oob_scores_ = np.resize(self.oob_scores_, total_n_estimators)\n            self.oob_score_ = np.nan\n        else:\n            self.oob_improvement_ = np.zeros((total_n_estimators,), dtype=np.float64)\n            self.oob_scores_ = np.zeros((total_n_estimators,), dtype=np.float64)\n            self.oob_score_ = np.nan",
    "docstring": "Add additional `` entries to all attributes.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_gb.py",
    "ast_data": "FunctionDef name:_resize_state arg:self arguments arg Assign If Compare Raise Call Assign Call Assign Call If BoolOp Compare Call If Call Assign Call Assign Call Assign Assign Call Assign Call Assign"
  },
  {
    "library": "pygame",
    "name": "get_init",
    "source_code": "def get_init():\n    return _module_init()",
    "docstring": "returns True if the midi module is currently initialized pygame.midi.get_init(): return bool Returns True if the pygame.midi module is currently initialized. New in pygame 1.9.5.",
    "type": "function",
    "file_path": "pygame\\src_py\\midi.py",
    "ast_data": "FunctionDef name:get_init arguments Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "MatlabFunction",
    "source_code": "class MatlabFunction(np.ndarray):\n\n    def __new__(cls, input_array):\n        obj = np.asarray(input_array).view(cls)\n        return obj",
    "docstring": "Subclass for a MATLAB function. This is a simple subclass of :class: meant to be used by :func: and should not be directly instantiated.",
    "type": "class",
    "file_path": "scipy\\scipy\\io\\matlab\\_mio5_params.py",
    "ast_data": "ClassDef name:MatlabFunction FunctionDef name:__new__ arg:cls arg:input_array arguments arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "nonzero",
    "source_code": "def nonzero(self):\n    A = self.tocoo()\n    nz_mask = A.data != 0\n    return tuple((idx[nz_mask] for idx in A.coords))",
    "docstring": "Nonzero indices of the array/matrix. Returns a tuple of arrays (row,col) containing the indices of the non-zero elements of the array. Examples -------- >>> from scipy.sparse import csr_array >>> A = csr_array([[1, 2, 0], [0, 0, 3], [4, 0, 5]]) >>> A.nonzero() (array([0, 0, 1, 2, 2], dtype=int32), array([0, 1, 2, 0, 2], dtype=int32))",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_base.py",
    "ast_data": "FunctionDef name:nonzero arg:self arguments arg Assign Call Assign Compare Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "to",
    "source_code": "def to(self, device: Union[torch.device, str]) -> 'BlockMask':\n    mapped_attributes = tree_map_only(torch.Tensor, lambda x: x.to(device), self.as_tuple(flatten=False))\n    return BlockMask(*mapped_attributes)",
    "docstring": "Moves the BlockMask to the specified device. Args: device (torch.device or str): The target device to move the BlockMask to. Can be a torch.device object or a string (e.g., 'cpu', 'cuda:0'). Returns: BlockMask: A new BlockMask instance with all tensor components moved to the specified device. Note: This method does not modify the original BlockMask in-place. Instead, it returns a new BlockMask instance where invidual tensor attributes may or may not be moved to the specified device, depending on their current device placement.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\attention\\flex_attention.py",
    "ast_data": "FunctionDef name:to arg:self arg:device arguments arg arg Assign Call arguments arg Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "scatter_div",
    "source_code": "def scatter_div(self, sparse_delta, use_locking=False, name=None):\n    if not isinstance(sparse_delta, indexed_slices.IndexedSlices):\n        raise TypeError('sparse_delta is not IndexedSlices: %s' % sparse_delta)\n    return gen_state_ops.scatter_div(self._variable, sparse_delta.indices, sparse_delta.values, use_locking=use_locking, name=name)",
    "docstring": "Divide this variable by . Args: sparse_delta: to divide this variable by. use_locking: If , use locking during the operation. name: the name of the operation. Returns: A that will hold the new value of this variable after the scattered division has completed. Raises: TypeError: if is not an .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py",
    "ast_data": "FunctionDef name:scatter_div arg:self arg:sparse_delta arg:use_locking arg:name arguments arg arg arg arg If Call Raise Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_lr",
    "source_code": "@override\ndef get_lr(self) -> list[float]:\n    _warn_get_lr_called_within_step(self)\n    lrs = []\n    step_num = self.last_epoch\n    if step_num > self.total_steps:\n        raise ValueError(f'Tried to step {step_num} times. The specified number of total steps is {self.total_steps}')\n    for group in self.optimizer.param_groups:\n        start_step = 0.0\n        for i, phase in enumerate(self._schedule_phases):\n            end_step = phase['end_step']\n            if step_num <= end_step or i == len(self._schedule_phases) - 1:\n                pct = (step_num - start_step) / (end_step - start_step)\n                computed_lr = self._anneal_func(group[phase['start_lr']], group[phase['end_lr']], pct)\n                if self.cycle_momentum:\n                    computed_momentum = self._anneal_func(group[phase['start_momentum']], group[phase['end_momentum']], pct)\n                break\n            start_step = phase['end_step']\n        lrs.append(computed_lr)\n        if self.cycle_momentum:\n            if self.use_beta1:\n                group['betas'] = (computed_momentum, *group['betas'][1:])\n            else:\n                group['momentum'] = computed_momentum\n    return lrs",
    "docstring": "Compute the learning rate of each parameter group.",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\lr_scheduler.py",
    "ast_data": "FunctionDef name:get_lr arg:self arguments arg Call Assign Assign If Compare Raise Call For Assign For Call Assign If BoolOp Compare Compare Call Assign Assign Call If Assign Call Assign Call If If Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "add_global_metric",
    "source_code": "def add_global_metric(metric_name: str, metric_value: Any) -> None:\n    global_metrics[metric_name] = metric_value",
    "docstring": "Adds stats that should be emitted with every metric by the current process. If the emit_metrics method specifies a metric with the same name, it will overwrite this value.",
    "type": "function",
    "file_path": "pytorch\\tools\\stats\\upload_metrics.py",
    "ast_data": "FunctionDef name:add_global_metric arg:metric_name arg:metric_value arguments arg arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "EventListenerBaseStreamHandler",
    "source_code": "class EventListenerBaseStreamHandler:\n\n    def __init__(self):\n        pass\n\n    def on_core_metadata_event(self, event):\n        raise NotImplementedError('on_core_metadata_event() is not implemented in the base servicer class')\n\n    def on_graph_def(self, graph_def, device_name, wall_time):\n        raise NotImplementedError('on_graph_def() is not implemented in the base servicer class')\n\n    def on_value_event(self, event):\n        raise NotImplementedError('on_value_event() is not implemented in the base servicer class')",
    "docstring": "Per-stream handler of EventListener gRPC streams.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\grpc_debug_server.py",
    "ast_data": "ClassDef name:EventListenerBaseStreamHandler FunctionDef name:__init__ arg:self arguments arg FunctionDef name:on_core_metadata_event arg:self arg:event arguments arg arg Raise Call FunctionDef name:on_graph_def arg:self arg:graph_def arg:device_name arg:wall_time arguments arg arg arg arg Raise Call FunctionDef name:on_value_event arg:self arg:event arguments arg arg Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "device",
    "source_code": "def device(*array_list, remove_none=True, remove_types=(str,)):\n    array_list = _remove_non_arrays(*array_list, remove_none=remove_none, remove_types=remove_types)\n    if not array_list:\n        return None\n    device_ = _single_array_device(array_list[0])\n    for array in array_list[1:]:\n        device_other = _single_array_device(array)\n        if device_ != device_other:\n            raise ValueError(f'Input arrays use different devices: {device_}, {device_other}')\n    return device_",
    "docstring": "Hardware device where the array data resides on. If the hardware device is not the same for all arrays, an error is raised. Parameters ---------- *array_list : arrays List of array instances from NumPy or an array API compatible library. remove_none : bool, default=True Whether to ignore None objects passed in array_list. remove_types : tuple or list, default=(str,) Types to ignore in array_list. Returns ------- out : device object (see the \"Device Support\" section of the array API spec).",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_array_api.py",
    "ast_data": "FunctionDef name:device arguments arg arg arg Assign Call If Return return:no Assign Call For Assign Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "compute_partial_dependence",
    "source_code": "def compute_partial_dependence(self, grid, target_features, out):\n    _compute_partial_dependence(self.nodes, grid, target_features, out)",
    "docstring": "Fast partial dependence computation. Parameters ---------- grid : ndarray, shape (n_samples, n_target_features) The grid points on which the partial dependence should be evaluated. target_features : ndarray, shape (n_target_features) The set of target features for which the partial dependence should be evaluated. out : ndarray, shape (n_samples) The value of the partial dependence function on each grid point.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\predictor.py",
    "ast_data": "FunctionDef name:compute_partial_dependence arg:self arg:grid arg:target_features arg:out arguments arg arg arg arg Call"
  },
  {
    "library": "matplotlib",
    "name": "get_kerning",
    "source_code": "def get_kerning(self, next: Node | None) -> float:\n    advance = self._metrics.advance - self.width\n    kern = 0.0\n    if isinstance(next, Char):\n        kern = self.fontset.get_kern(self.font, self.font_class, self.c, self.fontsize, next.font, next.font_class, next.c, next.fontsize, self.dpi)\n    return advance + kern",
    "docstring": "Return the amount of kerning between this and the given character. This method is called when characters are strung together into to create nodes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_mathtext.py",
    "ast_data": "FunctionDef name:get_kerning arg:self arg:next arguments arg arg Assign Assign If Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, marker_pad=0.3, numpoints=None, **kwargs):\n    super().__init__(**kwargs)\n    self._numpoints = numpoints\n    self._marker_pad = marker_pad",
    "docstring": "Parameters ---------- marker_pad : float Padding between points in legend entry. numpoints : int Number of points to show in legend entry. **kwargs Keyword arguments forwarded to .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\legend_handler.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:marker_pad arg:numpoints arguments arg arg arg arg Call Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "cdf",
    "source_code": "def cdf(self, value, name='cdf'):\n    return self._call_cdf(value, name)",
    "docstring": "Cumulative distribution function. Given random variable , the cumulative distribution function is: Args: value: or . name: Python prepended to names of ops created by this function. Returns: cdf: a of shape with values of type .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:cdf arg:self arg:value arg:name arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "complete_wheel",
    "source_code": "def complete_wheel(folder: str) -> str:\n    wheel_name = list_dir(f'/{folder}/dist')[0]\n    if 'pytorch' in folder and (not enable_cuda):\n        print('Repairing Wheel with AuditWheel')\n        check_call(['auditwheel', 'repair', f'dist/{wheel_name}'], cwd=folder)\n        repaired_wheel_name = list_dir(f'/{folder}/wheelhouse')[0]\n        print(f'Moving {repaired_wheel_name} wheel to /{folder}/dist')\n        os.rename(f'/{folder}/wheelhouse/{repaired_wheel_name}', f'/{folder}/dist/{repaired_wheel_name}')\n    else:\n        repaired_wheel_name = wheel_name.replace('linux_aarch64', 'manylinux_2_28_aarch64')\n        print(f'Renaming {wheel_name} wheel to {repaired_wheel_name}')\n        os.rename(f'/{folder}/dist/{wheel_name}', f'/{folder}/dist/{repaired_wheel_name}')\n    print(f'Copying {repaired_wheel_name} to artifacts')\n    shutil.copy2(f'/{folder}/dist/{repaired_wheel_name}', f'/artifacts/{repaired_wheel_name}')\n    return repaired_wheel_name",
    "docstring": "Complete wheel build and put in artifact location",
    "type": "function",
    "file_path": "pytorch\\.ci\\aarch64_linux\\aarch64_wheel_ci_build.py",
    "ast_data": "FunctionDef name:complete_wheel arg:folder arguments arg Assign Call If BoolOp Compare Call Call Assign Call Call Call Assign Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "__arrow_c_stream__",
    "source_code": "def __arrow_c_stream__(self, requested_schema=None):\n    pa = import_optional_dependency('pyarrow', min_version='16.0.0')\n    type = pa.DataType._import_from_c_capsule(requested_schema) if requested_schema is not None else None\n    ca = pa.array(self, type=type)\n    if not isinstance(ca, pa.ChunkedArray):\n        ca = pa.chunked_array([ca])\n    return ca.__arrow_c_stream__()",
    "docstring": "Export the pandas Series as an Arrow C stream PyCapsule. This relies on pyarrow to convert the pandas Series to the Arrow format (and follows the default behavior of `` in its handling of the index, i.e. to ignore it). This conversion is not necessarily zero-copy. Parameters ---------- requested_schema : PyCapsule, default None The schema to which the dataframe should be casted, passed as a PyCapsule containing a C ArrowSchema representation of the requested schema. Returns ------- PyCapsule",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:__arrow_c_stream__ arg:self arg:requested_schema arguments arg arg Assign Call Assign Compare Call Assign Call If Call Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "score_samples",
    "source_code": "def score_samples(self, X):\n    check_is_fitted(self)\n    xp, _ = get_namespace(X)\n    X = validate_data(self, X, dtype=[xp.float64, xp.float32], reset=False)\n    Xr = X - self.mean_\n    n_features = X.shape[1]\n    precision = self.get_precision()\n    log_like = -0.5 * xp.sum(Xr * (Xr @ precision), axis=1)\n    log_like -= 0.5 * (n_features * log(2.0 * np.pi) - fast_logdet(precision))\n    return log_like",
    "docstring": "Return the log-likelihood of each sample. See. \"Pattern Recognition and Machine Learning\" by C. Bishop, 12.2.1 p. 574 or Parameters ---------- X : array-like of shape (n_samples, n_features) The data. Returns ------- ll : ndarray of shape (n_samples,) Log-likelihood of each sample under the current model.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_pca.py",
    "ast_data": "FunctionDef name:score_samples arg:self arg:X arguments arg arg Call Assign Call Assign Call Assign Assign Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "__init__",
    "source_code": "def __init__(self, dtype, shape):\n    if dtype is not None:\n        dtype = np.dtype(dtype)\n    shape = tuple(shape)\n    if not isshape(shape):\n        raise ValueError(f'invalid shape {shape!r} (must be 2-d)')\n    self.dtype = dtype\n    self.shape = shape",
    "docstring": "Initialize this LinearOperator. To be called by subclasses. `` should be convertible to a length-2 tuple.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_interface.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:dtype arg:shape arguments arg arg arg If Compare Assign Call Assign Call If Call Raise Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_rename_if_arg_found_and_add_loss_reduction_transformer",
    "source_code": "def _rename_if_arg_found_and_add_loss_reduction_transformer(parent, node, full_name, name, logs, arg_names=None, arg_ok_predicate=None, remove_if_ok=False, message=None):\n    for arg_name in arg_names:\n        rename_node = _rename_if_arg_found_transformer(parent, node, full_name, name, logs, arg_name, arg_ok_predicate, remove_if_ok, message)\n        node = rename_node if rename_node else node\n    return node",
    "docstring": "Combination of _rename_if_arg_found and _add_loss_reduction transformers. Args: parent: Parent of node. node: ast.Call node to maybe modify. full_name: full name of function to modify name: name of function to modify logs: list of logs to append to arg_names: list of names of the argument to look for arg_ok_predicate: predicate callable with the ast of the argument value, returns whether the argument value is allowed. remove_if_ok: remove the argument if present and ok as determined by arg_ok_predicate. message: message to print if a non-ok arg is found (and hence, the function is renamed to its compat.v1 version). Returns: node, if it was modified, else None.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\tf_upgrade_v2.py",
    "ast_data": "FunctionDef name:_rename_if_arg_found_and_add_loss_reduction_transformer arg:parent arg:node arg:full_name arg:name arg:logs arg:arg_names arg:arg_ok_predicate arg:remove_if_ok arg:message arguments arg arg arg arg arg arg arg arg arg For Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "request_watch",
    "source_code": "def request_watch(self, node_name, output_slot, debug_op, breakpoint=False):\n    self._debug_ops_state_change_queue.put(_state_change(debug_service_pb2.EventReply.DebugOpStateChange.READ_WRITE if breakpoint else debug_service_pb2.EventReply.DebugOpStateChange.READ_ONLY, node_name, output_slot, debug_op))",
    "docstring": "Request enabling a debug tensor watchpoint or breakpoint. This will let the server send a EventReply to the client side (i.e., the debugged TensorFlow runtime process) to request adding a watch key (i.e., ::) to the list of enabled watch keys. The list applies only to debug ops with the attribute gated_grpc=True. To disable the watch, use . Args: node_name: () name of the node that the to-be-watched tensor belongs to, e.g., \"hidden/Weights\". output_slot: () output slot index of the tensor to watch. debug_op: () name of the debug op to enable. This should not include any attribute substrings. breakpoint: () Iff , the debug op will block and wait until it receives an response from the server. The proto may carry a TensorProto that modifies the value of the debug op's output tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\grpc_debug_server.py",
    "ast_data": "FunctionDef name:request_watch arg:self arg:node_name arg:output_slot arg:debug_op arg:breakpoint arguments arg arg arg arg arg Call Call"
  },
  {
    "library": "django",
    "name": "get_port",
    "source_code": "def get_port(self):\n    if settings.USE_X_FORWARDED_PORT and 'HTTP_X_FORWARDED_PORT' in self.META:\n        port = self.META['HTTP_X_FORWARDED_PORT']\n    else:\n        port = self.META['SERVER_PORT']\n    return str(port)",
    "docstring": "Return the port number for the request as a string.",
    "type": "method",
    "file_path": "django\\django\\http\\request.py",
    "ast_data": "FunctionDef name:get_port arg:self arguments arg If BoolOp Compare Assign Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "objects_to_serialize",
    "source_code": "@property\ndef objects_to_serialize(self):\n    objects = {key: value for key, value in self.checkpointable_objects.items() if key in CommonEndpoints.all_checkpointable_objects}\n    objects[constants.KERAS_ATTR] = self._keras_trackable\n    return objects",
    "docstring": "Returns objects to attach to the root object during serialization.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\serialized_attributes.py",
    "ast_data": "FunctionDef name:objects_to_serialize arg:self arguments arg Assign Call Compare Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_xaxis_text2_transform",
    "source_code": "def get_xaxis_text2_transform(self, pad_points):\n    labels_align = mpl.rcParams['xtick.alignment']\n    return (self.get_xaxis_transform(which='tick2') + mtransforms.ScaledTranslation(0, pad_points / 72, self.get_figure(root=False).dpi_scale_trans), 'bottom', labels_align)",
    "docstring": "Returns ------- transform : Transform The transform used for drawing secondary x-axis labels, which will add *pad_points* of padding (in points) between the axis and the label. The x-direction is in data coordinates and the y-direction is in axis coordinates valign : {'center', 'top', 'bottom', 'baseline', 'center_baseline'} The text vertical alignment. halign : {'center', 'left', 'right'} The text horizontal alignment. Notes ----- This transformation is primarily used by the class, and is meant to be overridden by new kinds of projections that may need to place axis elements in different locations.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:get_xaxis_text2_transform arg:self arg:pad_points arguments arg arg Assign Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "querystring",
    "source_code": "@register.simple_tag(name='querystring', takes_context=True)\ndef querystring(context, *args, **kwargs):\n    if not args:\n        args = [context.request.GET]\n    params = QueryDict(mutable=True)\n    for d in [*args, kwargs]:\n        if not isinstance(d, Mapping):\n            raise TemplateSyntaxError('querystring requires mappings for positional arguments (got %r instead).' % d)\n        for key, value in d.items():\n            if not isinstance(key, str):\n                raise TemplateSyntaxError('querystring requires strings for mapping keys (got %r instead).' % key)\n            if value is None:\n                params.pop(key, None)\n            elif isinstance(value, Iterable) and (not isinstance(value, str)):\n                params.setlist(key, value)\n            else:\n                params[key] = value\n    query_string = params.urlencode() if params else ''\n    return f'?{query_string}'",
    "docstring": "Build a query string using and arguments. This tag constructs a new query string by adding, removing, or modifying parameters from the given positional and keyword arguments. Positional arguments must be mappings (such as or ), and is used as the starting point if is empty. Keyword arguments are treated as an extra, final mapping. These mappings are processed sequentially, with later arguments taking precedence. A query string prefixed with is returned. Raise TemplateSyntaxError if a positional argument is not a mapping or if keys are not strings. For example:: {# Set a parameter on top of #} {% querystring foo=3 %} {# Remove a key from #} {% querystring foo=None %} {# Use with pagination #} {% querystring page=page_obj.next_page_number %} {# Use a custom `` #} {% querystring my_query_dict foo=3 %} {# Use multiple positional and keyword arguments #} {% querystring my_query_dict my_dict foo=3 bar=None %}",
    "type": "function",
    "file_path": "django\\django\\template\\defaulttags.py",
    "ast_data": "FunctionDef name:querystring arg:context arguments arg arg arg If Assign Assign Call For If Call Raise Call For Call If Call Raise Call If Compare Call If BoolOp Call Call Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "chebval",
    "source_code": "def chebval(x, c, tensor=True):\n    c = np.array(c, ndmin=1, copy=True)\n    if c.dtype.char in '?bBhHiIlLqQpP':\n        c = c.astype(np.double)\n    if isinstance(x, (tuple, list)):\n        x = np.asarray(x)\n    if isinstance(x, np.ndarray) and tensor:\n        c = c.reshape(c.shape + (1,) * x.ndim)\n    if len(c) == 1:\n        c0 = c[0]\n        c1 = 0\n    elif len(c) == 2:\n        c0 = c[0]\n        c1 = c[1]\n    else:\n        x2 = 2 * x\n        c0 = c[-2]\n        c1 = c[-1]\n        for i in range(3, len(c) + 1):\n            tmp = c0\n            c0 = c[-i] - c1\n            c1 = tmp + c1 * x2\n    return c0 + c1 * x",
    "docstring": "Evaluate a Chebyshev series at points x. If is of length , this function returns the value: .. math:: p(x) = c_0 * T_0(x) + c_1 * T_1(x) + ... + c_n * T_n(x) The parameter is converted to an array only if it is a tuple or a list, otherwise it is treated as a scalar. In either case, either or its elements must support multiplication and addition both with themselves and with the elements of . If is a 1-D array, then `xctensortensortensorxxcccxcxxcc` is multidimensional. The default value is True. Returns ------- values : ndarray, algebra_like The shape of the return value is described above. See Also -------- chebval2d, chebgrid2d, chebval3d, chebgrid3d Notes ----- The evaluation uses Clenshaw recursion, aka synthetic division.",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\chebyshev.py",
    "ast_data": "FunctionDef name:chebval arg:x arg:c arg:tensor arguments arg arg arg Assign Call If Compare Assign Call If Call Assign Call If BoolOp Call Assign Call If Compare Call Assign Assign If Compare Call Assign Assign Assign Assign Assign For Call Call Assign Assign Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "Normalize",
    "source_code": "class Normalize(IntensityAugmentationBase2D):\n\n    def __init__(self, mean: Tensor | tuple[float, ...] | list[float] | float, std: Tensor | tuple[float, ...] | list[float] | float, p: float=1.0, keepdim: bool=False) -> None:\n        super().__init__(p=p, same_on_batch=True, keepdim=keepdim)\n        if isinstance(mean, (int, float)):\n            mean = torch.tensor([mean])\n        if isinstance(std, (int, float)):\n            std = torch.tensor([std])\n        if isinstance(mean, (tuple, list)):\n            mean = torch.tensor(mean)\n        if isinstance(std, (tuple, list)):\n            std = torch.tensor(std)\n        self.flags = {'mean': mean, 'std': std}\n\n    def apply_transform(self, input: Tensor, params: dict[str, Tensor], flags: dict[str, Any], transform: Optional[Tensor]=None) -> Tensor:\n        return normalize(input, flags['mean'], flags['std'])",
    "docstring": "Normalize tensor images with mean and standard deviation. .. math:: \\text{input[channel] = (input[channel] - mean[channel]) / std[channel]} Where is :math: and :math: for channels, Args: mean: Mean for each channel. std: Standard deviations for each channel. p: probability of applying the transformation. keepdim: whether to keep the output shape the same as input (True) or broadcast it to the batch form (False). Return: Normalised tensor with same size as input :math:. .. note:: This function internally uses :func:. Examples: >>> norm = Normalize(mean=torch.zeros(4), std=torch.ones(4)) >>> x = torch.rand(1, 4, 3, 3) >>> out = norm(x) >>> out.shape torch.Size([1, 4, 3, 3])",
    "type": "class",
    "file_path": "kornia\\kornia\\augmentation\\_2d\\intensity\\normalize.py",
    "ast_data": "ClassDef name:Normalize FunctionDef name:__init__ arg:self arg:mean arg:std arg:p arg:keepdim arguments arg arg arg arg arg Call Call If Call Assign Call If Call Assign Call If Call Assign Call If Call Assign Call Assign FunctionDef name:apply_transform arg:self arg:input arg:params arg:flags arg:transform arguments arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_SparseMatMul",
    "source_code": "def _SparseMatMul(t1, t2, out_dtype, transpose_a=False, transpose_b=False):\n    assert t1.ref() in is_sparse and t2.ref() in is_sparse\n    t1_sparse = is_sparse[t1.ref()]\n    t2_sparse = is_sparse[t2.ref()]\n    if transpose_b:\n        t2 = array_ops.transpose(t2)\n        transpose_b = False\n    prod = math_ops.matmul(t1, t2, transpose_a=transpose_a, transpose_b=transpose_b, a_is_sparse=t1_sparse, b_is_sparse=t2_sparse)\n    if prod.dtype != out_dtype:\n        prod = math_ops.cast(prod, out_dtype)\n    return prod",
    "docstring": "Helper function to create SparseMatMul op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_SparseMatMul arg:t1 arg:t2 arg:out_dtype arg:transpose_a arg:transpose_b arguments arg arg arg arg arg BoolOp Compare Call Compare Call Assign Call Assign Call If Assign Call Assign Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "rank",
    "source_code": "@property\ndef rank(self):\n    return self._ragged_shape.rank",
    "docstring": "The rank of this StructuredTensor. Guaranteed not to be .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor.py",
    "ast_data": "FunctionDef name:rank arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_FixedAxisArtistHelperBase",
    "source_code": "class _FixedAxisArtistHelperBase(_AxisArtistHelperBase):\n\n    def __init__(self, loc):\n        super().__init__(_api.check_getitem({'bottom': 0, 'top': 0, 'left': 1, 'right': 1}, loc=loc))\n        self._loc = loc\n        self._pos = {'bottom': 0, 'top': 1, 'left': 0, 'right': 1}[loc]\n        self._path = Path(self._to_xy((0, 1), const=self._pos))\n\n    def get_line(self, axes):\n        return self._path\n\n    def get_line_transform(self, axes):\n        return axes.transAxes\n\n    def get_axislabel_transform(self, axes):\n        return axes.transAxes\n\n    def get_axislabel_pos_angle(self, axes):\n        return dict(left=((0.0, 0.5), 90), right=((1.0, 0.5), 90), bottom=((0.5, 0.0), 0), top=((0.5, 1.0), 0))[self._loc]\n\n    def get_tick_transform(self, axes):\n        return [axes.get_xaxis_transform(), axes.get_yaxis_transform()][self.nth_coord]",
    "docstring": "Helper class for a fixed (in the axes coordinate) axis.",
    "type": "class",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axislines.py",
    "ast_data": "ClassDef name:_FixedAxisArtistHelperBase FunctionDef name:__init__ arg:self arg:loc arguments arg arg Call Call Call Assign Assign Assign Call Call FunctionDef name:get_line arg:self arg:axes arguments arg arg Return return:yes FunctionDef name:get_line_transform arg:self arg:axes arguments arg arg Return return:yes FunctionDef name:get_axislabel_transform arg:self arg:axes arguments arg arg Return return:yes FunctionDef name:get_axislabel_pos_angle arg:self arg:axes arguments arg arg Return return:yes Call FunctionDef name:get_tick_transform arg:self arg:axes arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_ort_session_initializer",
    "source_code": "def _ort_session_initializer(model: str | bytes) -> ort.InferenceSession:\n    import onnxruntime as ort\n    session_options = ort.SessionOptions()\n    session_options.log_severity_level = 3\n    possible_providers = ('CUDAExecutionProvider', 'CPUExecutionProvider')\n    available_providers = set(ort.get_available_providers())\n    providers = [provider for provider in possible_providers if provider in available_providers]\n    return ort.InferenceSession(model, providers=providers, sess_options=session_options)",
    "docstring": "Initialize an ONNX Runtime inference session with the specified model.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_onnx_program.py",
    "ast_data": "FunctionDef name:_ort_session_initializer arg:model arguments arg Assign Call Assign Assign Assign Call Call Assign Compare Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "mean",
    "source_code": "def mean(self, axis=None, dtype=None, out=None, keepdims=np._NoValue):\n    kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}\n    if self._mask is nomask:\n        result = super().mean(axis=axis, dtype=dtype, **kwargs)[()]\n    else:\n        is_float16_result = False\n        if dtype is None:\n            if issubclass(self.dtype.type, (ntypes.integer, ntypes.bool)):\n                dtype = mu.dtype('f8')\n            elif issubclass(self.dtype.type, ntypes.float16):\n                dtype = mu.dtype('f4')\n                is_float16_result = True\n        dsum = self.sum(axis=axis, dtype=dtype, **kwargs)\n        cnt = self.count(axis=axis, **kwargs)\n        if cnt.shape == () and cnt == 0:\n            result = masked\n        elif is_float16_result:\n            result = self.dtype.type(dsum * 1.0 / cnt)\n        else:\n            result = dsum * 1.0 / cnt\n    if out is not None:\n        out.flat = result\n        if isinstance(out, MaskedArray):\n            outmask = getmask(out)\n            if outmask is nomask:\n                outmask = out._mask = make_mask_none(out.shape)\n            outmask.flat = getmask(result)\n        return out\n    return result",
    "docstring": "Returns the average of the array elements along given axis. Masked entries are ignored, and result elements which are not finite will be masked. Refer to for full documentation. See Also -------- numpy.ndarray.mean : corresponding function for ndarrays numpy.mean : Equivalent function numpy.ma.average : Weighted average. Examples -------- >>> import numpy as np >>> a = np.ma.array([1,2,3], mask=[False, False, True]) >>> a masked_array(data=[1, 2, --], mask=[False, False, True], fill_value=999999) >>> a.mean() 1.5",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:mean arg:self arg:axis arg:dtype arg:out arg:keepdims arguments arg arg arg arg arg Assign Compare If Compare Assign Call Call Assign If Compare If Call Assign Call If Call Assign Call Assign Assign Call Assign Call If BoolOp Compare Compare Assign If Assign Call Assign If Compare Assign If Call Assign Call If Compare Assign Call Assign Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "switch_map",
    "source_code": "@property\ndef switch_map(self):\n    return self._switch_map",
    "docstring": "The map that records all the Switch ops for the while loop.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_state.py",
    "ast_data": "FunctionDef name:switch_map arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "cycle_key",
    "source_code": "def cycle_key(self):\n    self.save()",
    "docstring": "Keep the same data but with a new key. Call save() and it will automatically save a cookie with a new key at the end of the request.",
    "type": "method",
    "file_path": "django\\django\\contrib\\sessions\\backends\\signed_cookies.py",
    "ast_data": "FunctionDef name:cycle_key arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_forward_event_shape",
    "source_code": "def _forward_event_shape(self, input_shape):\n    return input_shape",
    "docstring": "Subclass implementation for public function.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bijector_impl.py",
    "ast_data": "FunctionDef name:_forward_event_shape arg:self arg:input_shape arguments arg arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_set_oob_score_and_attributes",
    "source_code": "@abstractmethod\ndef _set_oob_score_and_attributes(self, X, y, scoring_function=None):\n    pass",
    "docstring": "Compute and set the OOB score and attributes. Parameters ---------- X : array-like of shape (n_samples, n_features) The data matrix. y : ndarray of shape (n_samples, n_outputs) The target matrix. scoring_function : callable, default=None Scoring function for OOB score. Default depends on whether this is a regression (R2 score) or classification problem (accuracy score).",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_forest.py",
    "ast_data": "FunctionDef name:_set_oob_score_and_attributes arg:self arg:X arg:y arg:scoring_function arguments arg arg arg arg"
  },
  {
    "library": "tensorflow",
    "name": "_set_type_list_attr",
    "source_code": "def _set_type_list_attr(self, attr_name, data_types) -> None:\n    if not data_types:\n        return\n    if isinstance(data_types[0], dtypes.DType):\n        data_types = [dt.as_datatype_enum for dt in data_types]\n    types_list = attr_value_pb2.AttrValue.ListValue(type=data_types)\n    self._set_attr(attr_name, attr_value_pb2.AttrValue(list=types_list))",
    "docstring": "Private method used to set a list(type) attribute in the node_def.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_set_type_list_attr arg:self arg:attr_name arg:data_types arguments arg arg arg If Return return:no If Call Assign Assign Call Call Call"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "def forward(self) -> Tensor:\n    return torch.unsqueeze(self.model / self.model[2, 2], dim=0)",
    "docstring": "Single-batch homography\". Returns: Homography matrix with shape :math:.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\transform\\image_registrator.py",
    "ast_data": "FunctionDef name:forward arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_ticklines",
    "source_code": "def get_ticklines(self, minor=False):\n    if minor:\n        return self.get_minorticklines()\n    return self.get_majorticklines()",
    "docstring": "Return this Axis' tick lines as a list of \\s.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:get_ticklines arg:self arg:minor arguments arg arg If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "@deprecation.deprecated(None, 'Use tf.keras.mixed_precision.LossScaleOptimizer instead. LossScaleOptimizer now has all the functionality of DynamicLossScale')\ndef __init__(self, initial_loss_scale=2 ** 15, increment_period=2000, multiplier=2.0):\n    super(DynamicLossScale, self).__init__()\n    self._initial_loss_scale = float(initial_loss_scale)\n    self._increment_period = int(increment_period)\n    self._multiplier = float(multiplier)\n    self._current_loss_scale = self._add_weight(name='current_loss_scale', dtype=dtypes.float32, initial_value=self._initial_loss_scale)\n    self._num_good_steps = self._add_weight(name='good_steps', dtype=dtypes.int64, initial_value=0)",
    "docstring": "Creates the dynamic loss scale. Args: initial_loss_scale: A Python float. The loss scale to use at the beginning. It's better to start this at a very high number, because a loss scale that is too high gets lowered far more quickly than a loss scale that is too low gets raised. The default is 2 ** 15, which is approximately half the maximum float16 value. increment_period: Increases loss scale every consecutive steps that finite gradients are encountered. If a nonfinite gradient is encountered, the count is reset back to zero. multiplier: The multiplier to use when increasing or decreasing the loss scale.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\experimental\\loss_scale.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:initial_loss_scale arg:increment_period arg:multiplier arguments arg arg arg arg Call Call Assign Call Assign Call Assign Call Assign Call Assign Call Call"
  },
  {
    "library": "django",
    "name": "country_code",
    "source_code": "def country_code(self, query):\n    return self.country(query)['country_code']",
    "docstring": "Return the country code for the given IP Address or FQDN.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geoip2.py",
    "ast_data": "FunctionDef name:country_code arg:self arg:query arguments arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "tokenize_string",
    "source_code": "def tokenize_string(source: str) -> Iterator[tuple[int, str]]:\n    source = ''.join((create_valid_python_identifier(substring[1:-1]) if is_backtick_quoted else substring for is_backtick_quoted, substring in _split_by_backtick(source)))\n    line_reader = StringIO(source).readline\n    token_generator = tokenize.generate_tokens(line_reader)\n    for toknum, tokval, _, _, _ in token_generator:\n        yield (toknum, tokval)",
    "docstring": "Tokenize a Python source code string. Parameters ---------- source : str The Python source code string. Returns ------- tok_generator : Iterator[Tuple[int, str]] An iterator yielding all tokens with only toknum and tokval (Tuple[ing, str]).",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\computation\\parsing.py",
    "ast_data": "FunctionDef name:tokenize_string arg:source arguments arg Assign Call Call Call Assign Call Assign Call For"
  },
  {
    "library": "matplotlib",
    "name": "get_cmap",
    "source_code": "def get_cmap(self):\n    return self._colorizer.cmap",
    "docstring": "Return the instance.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colorizer.py",
    "ast_data": "FunctionDef name:get_cmap arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "experimental_from_proto",
    "source_code": "@classmethod\n@abc.abstractmethod\ndef experimental_from_proto(cls, proto: message.Message) -> 'Serializable':\n    raise NotImplementedError",
    "docstring": "Returns an instance based on a proto.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\trace_type\\serialization.py",
    "ast_data": "FunctionDef name:experimental_from_proto arg:cls arg:proto arguments arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "_change_nested_mappings_to",
    "source_code": "def _change_nested_mappings_to(value, new_type):\n    if isinstance(value, (dict, immutable_dict.ImmutableDict)):\n        return new_type([(k, _change_nested_mappings_to(v, new_type)) for k, v in value.items()])\n    elif isinstance(value, tuple):\n        return tuple((_change_nested_mappings_to(elt, new_type) for elt in value))\n    else:\n        return value",
    "docstring": "Recursively replace mappings with .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type.py",
    "ast_data": "FunctionDef name:_change_nested_mappings_to arg:value arg:new_type arguments arg arg If Call Return return:yes Call Call Call If Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "QnResolver",
    "source_code": "class QnResolver(gast.NodeTransformer):\n\n    def visit_Name(self, node):\n        node = self.generic_visit(node)\n        anno.setanno(node, anno.Basic.QN, QN(node.id))\n        return node\n\n    def visit_Attribute(self, node):\n        node = self.generic_visit(node)\n        if anno.hasanno(node.value, anno.Basic.QN):\n            anno.setanno(node, anno.Basic.QN, QN(anno.getanno(node.value, anno.Basic.QN), attr=node.attr))\n        return node\n\n    def visit_Subscript(self, node):\n        node = self.generic_visit(node)\n        s = node.slice\n        if isinstance(s, (gast.Tuple, gast.Slice)):\n            return node\n        if isinstance(s, gast.Constant) and s.value != Ellipsis:\n            subscript = QN(Literal(s.value))\n        elif anno.hasanno(s, anno.Basic.QN):\n            subscript = anno.getanno(s, anno.Basic.QN)\n        else:\n            return node\n        if anno.hasanno(node.value, anno.Basic.QN):\n            anno.setanno(node, anno.Basic.QN, QN(anno.getanno(node.value, anno.Basic.QN), subscript=subscript))\n        return node",
    "docstring": "Annotates nodes with QN information. Note: Not using NodeAnnos to avoid circular dependencies.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\qual_names.py",
    "ast_data": "ClassDef name:QnResolver FunctionDef name:visit_Name arg:self arg:node arguments arg arg Assign Call Call Call Return return:yes FunctionDef name:visit_Attribute arg:self arg:node arguments arg arg Assign Call If Call Call Call Call Return return:yes FunctionDef name:visit_Subscript arg:self arg:node arguments arg arg Assign Call Assign If Call Return return:yes If BoolOp Call Compare Assign Call Call If Call Assign Call Return return:yes If Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_resolve_prefix",
    "source_code": "def _resolve_prefix(self, token):\n    if token in self._handlers:\n        return token\n    elif token in self._alias_to_prefix:\n        return self._alias_to_prefix[token]\n    else:\n        return None",
    "docstring": "Resolve command prefix from the prefix itself or its alias. Args: token: a str to be resolved. Returns: If resolvable, the resolved command prefix. If not resolvable, None.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py",
    "ast_data": "FunctionDef name:_resolve_prefix arg:self arg:token arguments arg arg If Compare Return return:yes If Compare Return return:yes Return return:no"
  },
  {
    "library": "pandas",
    "name": "count_not_none",
    "source_code": "def count_not_none(*args) -> int:\n    return sum((x is not None for x in args))",
    "docstring": "Returns the count of arguments that are not None.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\common.py",
    "ast_data": "FunctionDef name:count_not_none arguments arg Return return:yes Call Compare"
  },
  {
    "library": "numpy",
    "name": "issubsctype",
    "source_code": "@set_module('numpy')\ndef issubsctype(arg1, arg2):\n    return issubclass(obj2sctype(arg1), obj2sctype(arg2))",
    "docstring": "Determine if the first argument is a subclass of the second argument. Parameters ---------- arg1, arg2 : dtype or dtype specifier Data-types. Returns ------- out : bool The result. See Also -------- issctype, issubdtype, obj2sctype Examples -------- >>> from numpy._core import issubsctype >>> issubsctype('S8', str) False >>> issubsctype(np.array([1]), int) True >>> issubsctype(np.array([1]), float) False",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\numerictypes.py",
    "ast_data": "FunctionDef name:issubsctype arg:arg1 arg:arg2 arguments arg arg Return return:yes Call Call Call Call"
  },
  {
    "library": "kornia",
    "name": "sharpness",
    "source_code": "def sharpness(min_mag: float, max_mag: float) -> OperationBase:\n    return Sharpness(None, 1.0, magnitude_range=(min_mag, max_mag))",
    "docstring": "Return sharpness op.",
    "type": "function",
    "file_path": "kornia\\kornia\\augmentation\\auto\\rand_augment\\ops.py",
    "ast_data": "FunctionDef name:sharpness arg:min_mag arg:max_mag arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "automatic_control_dependencies",
    "source_code": "def automatic_control_dependencies(f):\n\n    def wrapper(*args, **kwargs):\n        with AutomaticControlDependencies() as a:\n            result = f(*args, **kwargs)\n            result_flat = [a.mark_as_return(t) for t in nest.flatten(result)]\n            return nest.pack_sequence_as(result, result_flat)\n    return tf_decorator.make_decorator(f, wrapper)",
    "docstring": "Wraps f to automatically insert control dependencies. The inserted dependencies ensure that: 1. All stateful ops in f run when the result of f runs 2. Updates to the same resources happen in order. Args: f: the function to be wrapped. Returns: The wrapped function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\auto_control_deps.py",
    "ast_data": "FunctionDef name:automatic_control_dependencies arg:f arguments arg FunctionDef name:wrapper arguments arg arg With Call Assign Call Assign Call Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_read_arraydesc",
    "source_code": "def _read_arraydesc(f):\n    arraydesc = {'arrstart': _read_long(f)}\n    if arraydesc['arrstart'] == 8:\n        _skip_bytes(f, 4)\n        arraydesc['nbytes'] = _read_long(f)\n        arraydesc['nelements'] = _read_long(f)\n        arraydesc['ndims'] = _read_long(f)\n        _skip_bytes(f, 8)\n        arraydesc['nmax'] = _read_long(f)\n        arraydesc['dims'] = [_read_long(f) for _ in range(arraydesc['nmax'])]\n    elif arraydesc['arrstart'] == 18:\n        warnings.warn('Using experimental 64-bit array read', stacklevel=3)\n        _skip_bytes(f, 8)\n        arraydesc['nbytes'] = _read_uint64(f)\n        arraydesc['nelements'] = _read_uint64(f)\n        arraydesc['ndims'] = _read_long(f)\n        _skip_bytes(f, 8)\n        arraydesc['nmax'] = 8\n        arraydesc['dims'] = []\n        for d in range(arraydesc['nmax']):\n            v = _read_long(f)\n            if v != 0:\n                raise Exception('Expected a zero in ARRAY_DESC')\n            arraydesc['dims'].append(_read_long(f))\n    else:\n        raise Exception(f'Unknown ARRSTART: {arraydesc['arrstart']}')\n    return arraydesc",
    "docstring": "Function to read in an array descriptor",
    "type": "function",
    "file_path": "scipy\\scipy\\io\\_idl.py",
    "ast_data": "FunctionDef name:_read_arraydesc arg:f arguments arg Assign Call If Compare Call Assign Call Assign Call Assign Call Call Assign Call Assign Call Call If Compare Call Call Assign Call Assign Call Assign Call Call Assign Assign For Call Assign Call If Compare Raise Call Call Call Raise Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "book",
    "source_code": "@property\ndef book(self):\n    return self._reader.book",
    "docstring": "Gets the Excel workbook. Workbook is the top-level container for all document information. Returns ------- Excel Workbook The workbook object of the type defined by the engine being used. See Also -------- read_excel : Read an Excel file into a pandas DataFrame. Examples -------- >>> file = pd.ExcelFile(\"myfile.xlsx\") # doctest: +SKIP >>> file.book # doctest: +SKIP >>> file.book.path # doctest: +SKIP '/xl/workbook.xml' >>> file.book.active # doctest: +SKIP >>> file.book.sheetnames # doctest: +SKIP ['Sheet1', 'Sheet2']",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\excel\\_base.py",
    "ast_data": "FunctionDef name:book arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "random_normal_variable",
    "source_code": "@doc_controls.do_not_generate_docs\ndef random_normal_variable(shape, mean, scale, dtype=None, name=None, seed=None):\n    if dtype is None:\n        dtype = floatx()\n    tf_dtype = dtypes_module.as_dtype(dtype)\n    if seed is None:\n        seed = np.random.randint(1000000000.0)\n    value = init_ops.random_normal_initializer(mean, scale, dtype=tf_dtype, seed=seed)(shape)\n    return variable(value, dtype=dtype, name=name)",
    "docstring": "Instantiates a variable with values drawn from a normal distribution. Args: shape: Tuple of integers, shape of returned Keras variable. mean: Float, mean of the normal distribution. scale: Float, standard deviation of the normal distribution. dtype: String, dtype of returned Keras variable. name: String, name of returned Keras variable. seed: Integer, random seed. Returns: A Keras variable, filled with drawn samples. Example: >>> kvar = tf.keras.backend.random_normal_variable(shape=(2,3), ... mean=0.0, scale=1.0) >>> kvar",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:random_normal_variable arg:shape arg:mean arg:scale arg:dtype arg:name arg:seed arguments arg arg arg arg arg arg If Compare Assign Call Assign Call If Compare Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_adjust_attributes_of_max_pool",
    "source_code": "def _adjust_attributes_of_max_pool(expand_size: int, kernel_size: Sequence[int] | int, stride: Sequence[int] | int, padding: Sequence[int] | int, dilation: Sequence[int] | int) -> tuple[Sequence[int], Sequence[int], Sequence[int], Sequence[int]]:\n    if isinstance(dilation, int):\n        dilation = [dilation] * expand_size\n    if isinstance(kernel_size, int):\n        kernel_shape = [kernel_size] * expand_size\n    else:\n        kernel_shape = kernel_size\n    if isinstance(padding, int):\n        pads = [padding] * expand_size * 2\n    elif len(padding) == 1:\n        pads = padding * expand_size * 2\n    elif len(padding) == 2:\n        pads = padding * 2\n    elif len(padding) == 3:\n        pads = padding * 2\n    else:\n        pads = padding\n    if isinstance(stride, int):\n        strides = [stride] * expand_size\n    elif not stride:\n        strides = kernel_shape\n    else:\n        strides = stride\n    return (kernel_shape, strides, pads, dilation)",
    "docstring": "Adjust attributes of avg_pool to match ONNX specification.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\symbolic_opset10.py",
    "ast_data": "FunctionDef name:_adjust_attributes_of_max_pool arg:expand_size arg:kernel_size arg:stride arg:padding arg:dilation arguments arg arg arg arg arg If Call Assign If Call Assign Assign If Call Assign If Compare Call Assign If Compare Call Assign If Compare Call Assign Assign If Call Assign If Assign Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "get_safe_cookies",
    "source_code": "def get_safe_cookies(self, request):\n    if not hasattr(request, 'COOKIES'):\n        return {}\n    return {k: self.cleanse_setting(k, v) for k, v in request.COOKIES.items()}",
    "docstring": "Return a dictionary of request.COOKIES with sensitive values redacted.",
    "type": "method",
    "file_path": "django\\django\\views\\debug.py",
    "ast_data": "FunctionDef name:get_safe_cookies arg:self arg:request arguments arg arg If Call Return return:no Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "delegate_names",
    "source_code": "def delegate_names(delegate, accessors: list[str], typ: str, overwrite: bool=False, accessor_mapping: Callable[[str], str]=lambda x: x, raise_on_missing: bool=True):\n\n    def add_delegate_accessors(cls):\n        cls._add_delegate_accessors(delegate, accessors, typ, overwrite=overwrite, accessor_mapping=accessor_mapping, raise_on_missing=raise_on_missing)\n        return cls\n    return add_delegate_accessors",
    "docstring": "Add delegated names to a class using a class decorator. This provides an alternative usage to directly calling below a class definition. Parameters ---------- delegate : object The class to get methods/properties & doc-strings. accessors : Sequence[str] List of accessor to add. typ : {'property', 'method'} overwrite : bool, default False Overwrite the method/property in the target class if it exists. accessor_mapping: Callable, default lambda x: x Callable to map the delegate's function to the cls' function. raise_on_missing: bool, default True Raise if an accessor does not exist on delegate. False skips the missing accessor. Returns ------- callable A class decorator. Examples -------- @delegate_names(Categorical, [\"categories\", \"ordered\"], \"property\") class CategoricalAccessor(PandasDelegate): [...]",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\accessor.py",
    "ast_data": "FunctionDef name:delegate_names arg:delegate arg:accessors arg:typ arg:overwrite arg:accessor_mapping arg:raise_on_missing arguments arg arg arg arg arg arg arguments arg FunctionDef name:add_delegate_accessors arg:cls arguments arg Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "start",
    "source_code": "def start(self) -> None:\n    if threading.current_thread() is threading.main_thread():\n        signal.signal(signal.SIGTERM, _terminate_process_handler)\n        signal.signal(signal.SIGINT, _terminate_process_handler)\n        if not IS_WINDOWS:\n            signal.signal(signal.SIGHUP, _terminate_process_handler)\n            signal.signal(signal.SIGQUIT, _terminate_process_handler)\n    else:\n        logger.warning('Failed to register signal handlers since torchelastic is running on a child thread. This could lead to orphaned worker processes if the torchrun is terminated.')\n    self._start()\n    self._stdout_tail.start()\n    self._stderr_tail.start()",
    "docstring": "Start processes using parameters defined in the constructor.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\multiprocessing\\api.py",
    "ast_data": "FunctionDef name:start arg:self arguments arg If Compare Call Call Call Call If Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_matrix_conv",
    "source_code": "def _matrix_conv(self, m1, m2):\n    n = m1[0, 0].shape.as_list()[0]\n    if n != m2[0, 0].shape.as_list()[0]:\n        raise ValueError(f'The entries in matrices m1 and m2 must have the same dimensions. Received m1[0, 0].shape={m1[0, 0].shape} and m2[0, 0].shape={m2[0, 0].shape}.')\n    k = int(np.sqrt(len(m1)))\n    l = int(np.sqrt(len(m2)))\n    result = {}\n    size = k + l - 1\n    for i in range(size):\n        for j in range(size):\n            result[i, j] = array_ops.zeros([n, n], self.dtype)\n            for index1 in range(min(k, i + 1)):\n                for index2 in range(min(k, j + 1)):\n                    if i - index1 < l and j - index2 < l:\n                        result[i, j] += math_ops.matmul(m1[index1, index2], m2[i - index1, j - index2])\n    return result",
    "docstring": "Matrix convolution. Args: m1: A k x k dictionary, each element is a n x n matrix. m2: A l x l dictionary, each element is a n x n matrix. Returns: (k + l - 1) * (k + l - 1) dictionary each element is a n x n matrix. Raises: ValueError: if the entries of m1 and m2 are of different dimensions.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops.py",
    "ast_data": "FunctionDef name:_matrix_conv arg:self arg:m1 arg:m2 arguments arg arg arg Assign Call If Compare Call Raise Call Assign Call Call Call Assign Call Call Call Assign Assign For Call For Call Assign Call For Call Call For Call Call If BoolOp Compare Compare Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "get_audiences",
    "source_code": "def get_audiences(self, request):\n    client = request.client\n    return [client.get_client_id()]",
    "docstring": "Parse value for id_token, default value is client id. Developers MAY rewrite this method to provide a customized audience value.",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\core\\grants\\code.py",
    "ast_data": "FunctionDef name:get_audiences arg:self arg:request arguments arg arg Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_tf_py_func_print",
    "source_code": "def _tf_py_func_print(*objects, **kwargs):\n    override_kwargs = {k: v for k, v in kwargs.items() if v is not py_builtins.UNSPECIFIED}\n    if 'flush' not in override_kwargs:\n        override_kwargs['flush'] = True\n\n    def print_wrapper(*vals, **kwargs):\n        vals = tuple((v.numpy() if tensor_util.is_tf_type(v) else v for v in vals))\n        vals = tuple((v.decode('utf-8') if isinstance(v, bytes) else v for v in vals))\n        print(*vals, **kwargs)\n    return wrap_py_func(print_wrapper, objects, override_kwargs)",
    "docstring": "Overload of print_ as a py_func implementation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\autograph_ops.py",
    "ast_data": "FunctionDef name:_tf_py_func_print arguments arg arg Assign Call Compare If Compare Assign FunctionDef name:print_wrapper arguments arg arg Assign Call Call Call Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "is_valid",
    "source_code": "def is_valid(self):\n    return self.is_bound and (not self.errors)",
    "docstring": "Return True if the form has no errors, or False otherwise.",
    "type": "method",
    "file_path": "django\\django\\forms\\forms.py",
    "ast_data": "FunctionDef name:is_valid arg:self arguments arg Return return:yes BoolOp"
  },
  {
    "library": "django",
    "name": "_listarr",
    "source_code": "def _listarr(self, func):\n    lst = [func(i) for i in range(len(self))]\n    if numpy:\n        return numpy.array(lst)\n    else:\n        return lst",
    "docstring": "Return a sequence (list) corresponding with the given function. Return a numpy array if possible.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\linestring.py",
    "ast_data": "FunctionDef name:_listarr arg:self arg:func arguments arg arg Assign Call Call Call If Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "resource_handle_call_time_value",
    "source_code": "def resource_handle_call_time_value(self):\n\n    def closure():\n        dispatch_context = coordinator_context.get_current_dispatch_context()\n        if dispatch_context:\n            remote_value = self._distributed_table._values[dispatch_context.worker_index]\n            ret = dispatch_context.maybe_get_remote_value(remote_value)\n            return ret\n        else:\n            return self._coordinator_instance.resource_handle\n    return (closure, tensor.TensorSpec([], dtype=dtypes.resource))",
    "docstring": "Returns a closure to run for a resource handle at call time and its spec. This function is called in self.resource_handle to create a placeholder which returns a resource handle on some worker or on the coordinator.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\ps_values.py",
    "ast_data": "FunctionDef name:resource_handle_call_time_value arg:self arguments arg FunctionDef name:closure arguments Assign Call If Assign Assign Call Return return:yes Return return:yes Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "maybe_use_numba",
    "source_code": "def maybe_use_numba(engine: str | None) -> bool:\n    return engine == 'numba' or (engine is None and GLOBAL_USE_NUMBA)",
    "docstring": "Signal whether to use numba routines.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\util\\numba_.py",
    "ast_data": "FunctionDef name:maybe_use_numba arg:engine arguments arg Return return:yes BoolOp Compare BoolOp Compare"
  },
  {
    "library": "seaborn",
    "name": "refline",
    "source_code": "def refline(self, *, x=None, y=None, joint=True, marginal=True, color='.5', linestyle='--', **line_kws):\n    line_kws['color'] = color\n    line_kws['linestyle'] = linestyle\n    if x is not None:\n        if joint:\n            self.ax_joint.axvline(x, **line_kws)\n        if marginal:\n            self.ax_marg_x.axvline(x, **line_kws)\n    if y is not None:\n        if joint:\n            self.ax_joint.axhline(y, **line_kws)\n        if marginal:\n            self.ax_marg_y.axhline(y, **line_kws)\n    return self",
    "docstring": "Add a reference line(s) to joint and/or marginal axes. Parameters ---------- x, y : numeric Value(s) to draw the line(s) at. joint, marginal : bools Whether to add the reference line(s) to the joint/marginal axes. color : :mod: Specifies the color of the reference line(s). linestyle : str Specifies the style of the reference line(s). line_kws : key, value mappings Other keyword arguments are passed to :meth: when `matplotlib.axes.Axes.axhlineJointGrid` for easy method chaining.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\axisgrid.py",
    "ast_data": "FunctionDef name:refline arg:self arguments arg arg arg arg arg arg arg arg Assign Assign If Compare If Call If Call If Compare If Call If Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "normalize_kwargs",
    "source_code": "def normalize_kwargs(kw, alias_mapping=None):\n    from matplotlib.artist import Artist\n    if kw is None:\n        return {}\n    if alias_mapping is None:\n        alias_mapping = {}\n    elif isinstance(alias_mapping, type) and issubclass(alias_mapping, Artist) or isinstance(alias_mapping, Artist):\n        alias_mapping = getattr(alias_mapping, '_alias_map', {})\n    to_canonical = {alias: canonical for canonical, alias_list in alias_mapping.items() for alias in alias_list}\n    canonical_to_seen = {}\n    ret = {}\n    for k, v in kw.items():\n        canonical = to_canonical.get(k, k)\n        if canonical in canonical_to_seen:\n            raise TypeError(f'Got both {canonical_to_seen[canonical]!r} and {k!r}, which are aliases of one another')\n        canonical_to_seen[canonical] = k\n        ret[canonical] = v\n    return ret",
    "docstring": "Helper function to normalize kwarg inputs. Parameters ---------- kw : dict or None A dict of keyword arguments. None is explicitly supported and treated as an empty dict, to support functions with an optional parameter of the form ``. alias_mapping : dict or Artist subclass or Artist instance, optional A mapping between a canonical name to a list of aliases, in order of precedence from lowest to highest. If the canonical value is not in the list it is assumed to have the highest priority. If an Artist subclass or instance is passed, use its properties alias mapping. Raises ------ TypeError To match what Python raises if invalid arguments/keyword arguments are passed to a callable.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\cbook.py",
    "ast_data": "FunctionDef name:normalize_kwargs arg:kw arg:alias_mapping arguments arg arg If Compare Return return:no If Compare Assign If BoolOp BoolOp Call Call Call Assign Call Assign Call Assign Assign For Call Assign Call If Compare Raise Call Assign Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "window",
    "source_code": "@contextlib.contextmanager\ndef window():\n    hwnd = safeCreateWindowExA(0, b'STATIC', None, 0, 0, 0, 0, 0, None, None, None, None)\n    try:\n        yield hwnd\n    finally:\n        safeDestroyWindow(hwnd)",
    "docstring": "Context that provides a valid Windows hwnd.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\clipboard\\__init__.py",
    "ast_data": "FunctionDef name:window arguments Assign Call Try Call"
  },
  {
    "library": "pytorch",
    "name": "decline_if_input_dtype",
    "source_code": "@classmethod\ndef decline_if_input_dtype(cls, dtype: torch.dtype) -> OperatorSupportBase:\n\n    def _decline_if_input_dtype(submodules: t.Mapping[str, torch.nn.Module], node: torch.fx.Node) -> bool:\n        for arg in node.all_input_nodes:\n            arg_dtype = _get_arg_dtype(arg)\n            if arg_dtype == dtype:\n                return False\n        return True\n    return create_op_support(_decline_if_input_dtype)",
    "docstring": "Report a node as non-supported, if any of its arguments is of dtype",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\passes\\operator_support.py",
    "ast_data": "FunctionDef name:decline_if_input_dtype arg:cls arg:dtype arguments arg arg FunctionDef name:_decline_if_input_dtype arg:submodules arg:node arguments arg arg For Assign Call If Compare Return return:yes Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_QrGradSquareAndDeepMatrices",
    "source_code": "def _QrGradSquareAndDeepMatrices(q, r, dq, dr):\n    qdq = math_ops.matmul(q, dq, adjoint_a=True)\n    qdq_ = qdq - _linalg.adjoint(qdq)\n    rdr = math_ops.matmul(r, dr, adjoint_b=True)\n    rdr_ = rdr - _linalg.adjoint(rdr)\n    tril = array_ops.matrix_band_part(qdq_ + rdr_, -1, 0)\n    grad_a = math_ops.matmul(q, dr + _TriangularSolve(tril, r))\n    grad_b = _TriangularSolve(dq - math_ops.matmul(q, qdq), r)\n    ret = grad_a + grad_b\n    if q.dtype.is_complex:\n        m = rdr - _linalg.adjoint(qdq)\n        eyem = _linalg.set_diag(array_ops.zeros_like(m), _linalg.diag_part(m))\n        correction = eyem - math_ops.cast(math_ops.real(eyem), q.dtype)\n        ret = ret + _TriangularSolve(math_ops.matmul(q, _linalg.adjoint(correction)), r)\n    return ret",
    "docstring": "Gradient for matrix orders num_rows >= num_cols and full_matrices is false.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg_grad.py",
    "ast_data": "FunctionDef name:_QrGradSquareAndDeepMatrices arg:q arg:r arg:dq arg:dr arguments arg arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Call Assign If Assign Call Assign Call Call Call Assign Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "metrics",
    "source_code": "@property\ndef metrics(self):\n    collected_metrics = []\n    for layer in self._flatten_layers():\n        with layer._metrics_lock:\n            collected_metrics.extend(layer._metrics)\n    return collected_metrics",
    "docstring": "List of metrics added using the API. Example: >>> input = tf.keras.layers.Input(shape=(3,)) >>> d = tf.keras.layers.Dense(2) >>> output = d(input) >>> d.add_metric(tf.reduce_max(output), name='max') >>> d.add_metric(tf.reduce_min(output), name='min') >>> [m.name for m in d.metrics] ['max', 'min'] Returns: A list of objects.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:metrics arg:self arguments arg Assign For Call With Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "rot_y",
    "source_code": "@classmethod\ndef rot_y(cls, y: Tensor) -> So3:\n    zs = zeros_like(y)\n    return cls.exp(stack((zs, y, zs), -1))",
    "docstring": "Construct a z-axis rotation. Args: y: the y-axis rotation angle.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\so3.py",
    "ast_data": "FunctionDef name:rot_y arg:cls arg:y arguments arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "seaborn",
    "name": "__init__",
    "source_code": "def __init__(self, val: Any=None, depend: str | None=None, rc: str | None=None, auto: bool=False, grouping: bool=True):\n    if depend is not None:\n        assert depend in PROPERTIES\n    if rc is not None:\n        assert rc in mpl.rcParams\n    self._val = val\n    self._rc = rc\n    self._depend = depend\n    self._auto = auto\n    self._grouping = grouping",
    "docstring": "Property that can be mapped from data or set directly, with flexible defaults. Parameters ---------- val : Any Use this value as the default. depend : str Use the value of this feature as the default. rc : str Use the value of this rcParam as the default. auto : bool The default value will depend on other parameters at compile time. grouping : bool If True, use the mapped variable to define groups.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_marks\\base.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:val arg:depend arg:rc arg:auto arg:grouping arguments arg arg arg arg arg arg If Compare Compare If Compare Compare Assign Assign Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "relu6",
    "source_code": "def relu6(input: Tensor, inplace: bool=False) -> Tensor:\n    if has_torch_function_unary(input):\n        return handle_torch_function(relu6, (input,), input, inplace=inplace)\n    if inplace:\n        result = torch._C._nn.relu6_(input)\n    else:\n        result = torch._C._nn.relu6(input)\n    return result",
    "docstring": "relu6(input, inplace=False) -> Tensor Applies the element-wise function :math:. See :class: for more details.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:relu6 arg:input arg:inplace arguments arg arg If Call Return return:yes Call If Assign Call Assign Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "unrepr",
    "source_code": "def unrepr(s):\n    if not s:\n        return s\n    b = _Builder()\n    obj = b.astnode(s)\n    return b.build(obj)",
    "docstring": "Return a Python object compiled from a string.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\reprconf.py",
    "ast_data": "FunctionDef name:unrepr arg:s arguments arg If Return return:yes Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_get_loc_single_level_index",
    "source_code": "def _get_loc_single_level_index(self, level_index: Index, key: Hashable) -> int:\n    if is_scalar(key) and isna(key):\n        return -1\n    else:\n        return level_index.get_loc(key)",
    "docstring": "If key is NA value, location of index unify as -1. Parameters ---------- level_index: Index key : label Returns ------- loc : int If key is NA value, loc is -1 Else, location of key in index. See Also -------- Index.get_loc : The get_loc method for (single-level) index.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "FunctionDef name:_get_loc_single_level_index arg:self arg:level_index arg:key arguments arg arg arg If BoolOp Call Call Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "ReparameterizationType",
    "source_code": "@tf_export(v1=['distributions.ReparameterizationType'])\nclass ReparameterizationType:\n\n    @deprecation.deprecated('2019-01-01', 'The TensorFlow Distributions library has moved to TensorFlow Probability (https://github.com/tensorflow/probability). You should update all references to use `tfp.distributions` instead of `tf.distributions`.', warn_once=True)\n    def __init__(self, rep_type):\n        self._rep_type = rep_type\n\n    def __repr__(self):\n        return '<Reparameterization Type: %s>' % self._rep_type\n\n    def __eq__(self, other):\n        return self is other",
    "docstring": "Instances of this class represent how sampling is reparameterized. Two static instances exist in the distributions library, signifying one of two possible properties for samples from a distribution: : Samples from the distribution are fully reparameterized, and straight-through gradients are supported. : Samples from the distribution are not fully reparameterized, and straight-through gradients are either partially unsupported or are not supported at all. In this case, for purposes of e.g. RL or variational inference, it is generally safest to wrap the sample results in a call and use policy gradients / surrogate loss instead.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py",
    "ast_data": "ClassDef name:ReparameterizationType FunctionDef name:__init__ arg:self arg:rep_type arguments arg arg Assign Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes Compare Call"
  },
  {
    "library": "kornia",
    "name": "get_boxes_shape",
    "source_code": "def get_boxes_shape(self) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n    boxes_xyzwhd = self.to_tensor(mode='xyzwhd')\n    widths, heights, depths = (boxes_xyzwhd[..., 3], boxes_xyzwhd[..., 4], boxes_xyzwhd[..., 5])\n    return (depths, heights, widths)",
    "docstring": "Compute boxes heights and widths. Returns: - Boxes depths, shape of :math: or :math:. - Boxes heights, shape of :math: or :math:. - Boxes widths, shape of :math: or :math:. Example: >>> boxes_xyzxyz = torch.tensor([[ 0, 1, 2, 10, 21, 32], [3, 4, 5, 43, 54, 65]]) >>> boxes3d = Boxes3D.from_tensor(boxes_xyzxyz) >>> boxes3d.get_boxes_shape() (tensor([30., 60.]), tensor([20., 50.]), tensor([10., 40.]))",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\boxes.py",
    "ast_data": "FunctionDef name:get_boxes_shape arg:self arguments arg Assign Call Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "set_global_backend",
    "source_code": "def set_global_backend(backend, coerce=False, only=False, try_last=False):\n    backend = _backend_from_arg(backend)\n    ua.set_global_backend(backend, coerce=coerce, only=only, try_last=try_last)",
    "docstring": "Sets the global fft backend This utility method replaces the default backend for permanent use. It will be tried in the list of backends automatically, unless the `set_backend`. Notes ----- This will overwrite the previously set global backend, which, by default, is the SciPy implementation. Examples -------- We can set the global fft backend: >>> from scipy.fft import fft, set_global_backend >>> set_global_backend(\"scipy\") # Sets global backend (default is \"scipy\"). >>> fft([1]) # Calls the global backend array([1.+0.j])",
    "type": "function",
    "file_path": "scipy\\scipy\\fft\\_backend.py",
    "ast_data": "FunctionDef name:set_global_backend arg:backend arg:coerce arg:only arg:try_last arguments arg arg arg arg Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_Barrier",
    "source_code": "class _Barrier(object):\n\n    def __init__(self, num_participants):\n        self._num_participants = num_participants\n        self._counter = 0\n        self._flag = False\n        self._local_sense = threading.local()\n        self._lock = threading.Lock()\n        self._condition = threading.Condition()\n\n    def wait(self):\n        self._local_sense.value = not self._flag\n        with self._lock:\n            self._counter += 1\n            if self._counter == self._num_participants:\n                self._counter = 0\n                self._flag = self._local_sense.value\n        with self._condition:\n            while self._flag != self._local_sense.value:\n                self._condition.wait()\n            self._condition.notify_all()",
    "docstring": "A reusable barrier class for worker synchronization.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_coordinator.py",
    "ast_data": "ClassDef name:_Barrier FunctionDef name:__init__ arg:self arg:num_participants arguments arg arg Assign Assign Assign Assign Call Assign Call Assign Call FunctionDef name:wait arg:self arguments arg Assign With If Compare Assign Assign With While Compare Call Call"
  },
  {
    "library": "django",
    "name": "get_success_url",
    "source_code": "def get_success_url(self):\n    if not self.success_url:\n        raise ImproperlyConfigured('No URL to redirect to. Provide a success_url.')\n    return str(self.success_url)",
    "docstring": "Return the URL to redirect to after processing a valid form.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\edit.py",
    "ast_data": "FunctionDef name:get_success_url arg:self arguments arg If Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "wait_for_healthy",
    "source_code": "def wait_for_healthy(self, timeout_s=1200, interval=30):\n    timeout = time.time() + timeout_s\n    while self.health() != 'HEALTHY':\n        logging.warning('Waiting for TPU \"%s\" with state \"%s\" and health \"%s\" to become healthy', self.name(), self.state(), self.health())\n        if time.time() + interval > timeout:\n            raise RuntimeError('Timed out waiting for TPU \"%s\" to become healthy' % self.name())\n        time.sleep(interval)\n    logging.warning('TPU \"%s\" is healthy.', self.name())",
    "docstring": "Wait for TPU to become healthy or raise error if timeout reached. Args: timeout_s (int): The timeout in seconds for waiting TPU to become healthy. interval (int): The interval in seconds to poll the TPU for health. Raises: RuntimeError: If the TPU doesn't become healthy by the timeout.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\client\\client.py",
    "ast_data": "FunctionDef name:wait_for_healthy arg:self arg:timeout_s arg:interval arguments arg arg arg Assign Call While Compare Call Call Call Call Call If Compare Call Raise Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_save_tensor_value_to_tmp_cache",
    "source_code": "def _save_tensor_value_to_tmp_cache(self, cache_idx, updates, graph):\n    updates = self._merge_tensor_signatures(updates)\n    updates = array_ops.reshape(updates, [self._num_signature_dimensions()])\n    if graph not in self._temp_cache_var:\n        raise RuntimeError('graph is not in self._temp_cache_var')\n    if cache_idx >= len(self._temp_cache_var[graph]):\n        raise RuntimeError('cache_idx (%d) is out of range (%d)' % (cache_idx, len(self._temp_cache_var[graph])))\n    self._temp_cache_var[graph][cache_idx] = updates",
    "docstring": "Returns an op that will save the given updates to an entry in the cache. Args: cache_idx: The cache index of the tensor within the cache. updates: A dictionary of the signature updates from signature name to a tensor of dimension [1]. graph: A TensorFlow graph. Raises: RuntimeError: (1) graph is not already in self._temp_cache_var, or (2) cache_idx is out of range.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:_save_tensor_value_to_tmp_cache arg:self arg:cache_idx arg:updates arg:graph arguments arg arg arg arg Assign Call Assign Call Call If Compare Raise Call If Compare Call Raise Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "tpu_hardware_feature",
    "source_code": "@property\ndef tpu_hardware_feature(self):\n    return tpu_hardware_feature.HardwareFeature(self._tpu_cluster_resolver.tpu_hardware_feature)",
    "docstring": "Return the class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_strategy.py",
    "ast_data": "FunctionDef name:tpu_hardware_feature arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "__len__",
    "source_code": "def __len__(self):\n    return self.point_count",
    "docstring": "Return the number of points in the LineString.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:__len__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_fhtq",
    "source_code": "def _fhtq(a, u, inverse=False, *, xp=None):\n    if xp is None:\n        xp = np\n    n = a.shape[-1]\n    A = rfft(a, axis=-1)\n    if not inverse:\n        A *= u\n    else:\n        A /= xp.conj(u)\n    A = irfft(A, n, axis=-1)\n    A = xp.flip(A, axis=-1)\n    return A",
    "docstring": "Compute the biased fast Hankel transform. This is the basic FFTLog routine.",
    "type": "function",
    "file_path": "scipy\\scipy\\fft\\_fftlog_backend.py",
    "ast_data": "FunctionDef name:_fhtq arg:a arg:u arg:inverse arguments arg arg arg arg If Compare Assign Assign Assign Call If Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "parse_example_spec",
    "source_code": "@property\ndef parse_example_spec(self):\n    return {self.key: parsing_ops.VarLenFeature(self.dtype)}",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:parse_example_spec arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "SIFTFeatureScaleSpace",
    "source_code": "class SIFTFeatureScaleSpace(LocalFeature):\n\n    def __init__(self, num_features: int=8000, upright: bool=False, rootsift: bool=True, device: Optional[Device]=None) -> None:\n        if device is None:\n            device = torch.device('cpu')\n        patch_size: int = 41\n        detector = ScaleSpaceDetector(num_features, resp_module=BlobDoG(), nms_module=ConvQuadInterp3d(10), scale_pyr_module=ScalePyramid(3, 1.6, 32, double_image=True), ori_module=PassLAF() if upright else LAFOrienter(19), scale_space_response=True, minima_are_also_good=True, mr_size=6.0).to(device)\n        descriptor = LAFDescriptor(SIFTDescriptor(patch_size=patch_size, rootsift=rootsift), patch_size=patch_size, grayscale_descriptor=True).to(device)\n        super().__init__(detector, descriptor)",
    "docstring": "Convenience module, which implements DoG detector + (Root)SIFT descriptor. Using with blur pyramid. Still not as good as OpenCV/VLFeat because of but we are working on it",
    "type": "class",
    "file_path": "kornia\\kornia\\feature\\integrated.py",
    "ast_data": "ClassDef name:SIFTFeatureScaleSpace FunctionDef name:__init__ arg:self arg:num_features arg:upright arg:rootsift arg:device arguments arg arg arg arg arg If Compare Assign Call Assign Call Call Call Call Call Call Call Assign Call Call Call Call Call"
  },
  {
    "library": "django",
    "name": "get_actions",
    "source_code": "def get_actions(self, request):\n    if self.actions is None or IS_POPUP_VAR in request.GET:\n        return {}\n    actions = self._filter_actions_by_permissions(request, self._get_base_actions())\n    return {name: (func, name, desc) for func, name, desc in actions}",
    "docstring": "Return a dictionary mapping the names of all actions for this ModelAdmin to a tuple of (callable, name, description) for each action.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:get_actions arg:self arg:request arguments arg arg If BoolOp Compare Compare Return return:no Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "confirm_login_allowed",
    "source_code": "def confirm_login_allowed(self, user):\n    if not user.is_active:\n        raise ValidationError(self.error_messages['inactive'], code='inactive')",
    "docstring": "Controls whether the given User may log in. This is a policy setting, independent of end-user authentication. This default behavior is to allow login by active users, and reject login by inactive users. If the given user cannot log in, this method should raise a ``. If the given user may log in, this method should return None.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\forms.py",
    "ast_data": "FunctionDef name:confirm_login_allowed arg:self arg:user arguments arg arg If Raise Call"
  },
  {
    "library": "pytorch",
    "name": "clone",
    "source_code": "def clone(self, **kwargs):\n    args = dict(self.__dict__)\n    args.update(kwargs)\n    return self.__class__(**args)",
    "docstring": "Shallow copy with some (optional) changes",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\base.py",
    "ast_data": "FunctionDef name:clone arg:self arguments arg arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_split_region_at_points",
    "source_code": "def _split_region_at_points(a, b, points, xp):\n    regions = [(a, b)]\n    for point in points:\n        if xp.any(xp.isinf(point)):\n            continue\n        new_subregions = []\n        for a_k, b_k in regions:\n            if _is_strictly_in_region(a_k, b_k, point, xp):\n                subregions = _split_subregion(a_k, b_k, xp, point)\n                for left, right in subregions:\n                    if xp.any(left == right):\n                        continue\n                    else:\n                        new_subregions.append((left, right))\n                new_subregions.extend(subregions)\n            else:\n                new_subregions.append((a_k, b_k))\n        regions = new_subregions\n    return regions",
    "docstring": "Given the integration limits and describing a rectangular region and a list of , find the list of `points` lie strictly inside any of the subregions.",
    "type": "function",
    "file_path": "scipy\\scipy\\integrate\\_cubature.py",
    "ast_data": "FunctionDef name:_split_region_at_points arg:a arg:b arg:points arg:xp arguments arg arg arg arg Assign For If Call Call Assign For If Call Assign Call For If Call Compare Call Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "embedding_table_shards",
    "source_code": "@property\ndef embedding_table_shards(self) -> Dict[tpu_embedding_v2_utils.TableConfig, List[tf_variables.Variable]]:\n    self._maybe_build()\n    ordered_devices = []\n    for devices in self._strategy.extended._tpu_devices:\n        ordered_devices.extend(devices)\n    table_shards = {name: [(device, var.read_from_device(device)) for device in ordered_devices] for name, var in self.embedding_tables.items()}\n    return table_shards",
    "docstring": "Returns a dict of embedding tables, keyed by .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3.py",
    "ast_data": "FunctionDef name:embedding_table_shards arg:self arguments arg Call Assign For Call Assign Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "bounds",
    "source_code": "@property\ndef bounds(self):\n    if self.k1.bounds.size == 0:\n        return self.k2.bounds\n    if self.k2.bounds.size == 0:\n        return self.k1.bounds\n    return np.vstack((self.k1.bounds, self.k2.bounds))",
    "docstring": "Returns the log-transformed bounds on the theta. Returns ------- bounds : ndarray of shape (n_dims, 2) The log-transformed bounds on the kernel's hyperparameters theta",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:bounds arg:self arguments arg If Compare Return return:yes If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "generate_file_c",
    "source_code": "def generate_file_c(sigs, lib_name, accelerate):\n    if lib_name == 'BLAS':\n        preamble = [C_PREAMBLE]\n    elif lib_name == 'LAPACK':\n        preamble = [C_PREAMBLE, LAPACK_DECLS]\n    else:\n        raise RuntimeError(f'Unrecognized lib_name: {lib_name}.')\n    preamble = ['/*\\n', *COMMENT_TEXT, '*/\\n'] + preamble + [CPP_GUARD_BEGIN]\n    decls = [generate_decl_c(**sig, accelerate=accelerate) for sig in sigs]\n    content = preamble + decls + [CPP_GUARD_END]\n    return ''.join(content)",
    "docstring": "Generate content for C header file for Cython to import.",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_generate_pyx.py",
    "ast_data": "FunctionDef name:generate_file_c arg:sigs arg:lib_name arg:accelerate arguments arg arg arg If Compare Assign If Compare Assign Raise Call Assign Assign Call Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "NullHandler",
    "source_code": "class NullHandler:\n    pass",
    "docstring": "Sentinel indicating that a global variable is unset ala None. Typically, attempting to access the global variable before it's set is an error, but with NullHandler it won't fail until you try to access an attribute on it.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\virtualized.py",
    "ast_data": "ClassDef name:NullHandler"
  },
  {
    "library": "pytorch",
    "name": "_create_post_forward_hook",
    "source_code": "def _create_post_forward_hook(self, name: str) -> Callable:\n\n    def _post_forward_hook(module: nn.Module, inputs: Sequence[torch.Tensor], outputs: Sequence[torch.Tensor]) -> None:\n        if hasattr(module, '_memory_tracker_is_root') and module._memory_tracker_is_root:\n            self._add_marker('fw_bw_boundary')\n    return _post_forward_hook",
    "docstring": "Insert the marker 'fw_bw_boundary' at the boundary of forward and backward pass.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_tools\\memory_tracker.py",
    "ast_data": "FunctionDef name:_create_post_forward_hook arg:self arg:name arguments arg arg FunctionDef name:_post_forward_hook arg:module arg:inputs arg:outputs arguments arg arg arg If BoolOp Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "AddBackpropAccumulatedValue",
    "source_code": "def AddBackpropAccumulatedValue(self, history_value, value, dead_branch=False):\n    history_ctxt = history_value.op._get_control_flow_context()\n    cond_ctxt = None\n    value_ctxt = value.op._get_control_flow_context()\n    while value_ctxt and value_ctxt != history_ctxt:\n        if isinstance(value_ctxt, control_flow_ops.CondContext):\n            cond_ctxt = value_ctxt\n            break\n        value_ctxt = value_ctxt.outer_context\n    with ops.control_dependencies(None):\n        self.grad_context.Enter()\n        if cond_ctxt:\n            grad_state = self\n            pred = None\n            while pred is None and grad_state:\n                pred = grad_state.history_map.get(cond_ctxt.pred.name)\n                grad_state = grad_state.outer_grad_state\n            if pred is None:\n                pred = cond_ctxt.pred\n            branch = 1 - cond_ctxt.branch if dead_branch else cond_ctxt.branch\n            history_value = control_flow_ops._SwitchRefOrTensor(history_value, pred)[branch]\n        pop = gen_data_flow_ops.stack_pop_v2(history_value, value.dtype.base_dtype)\n        pop.set_shape(value.get_shape())\n        self.grad_context.Exit()\n    parallel_iterations = self.grad_context.parallel_iterations\n    if parallel_iterations > 1:\n        self.grad_sync._add_control_input(pop.op)\n    return pop",
    "docstring": "Add the getter for an accumulated value in the grad context. This is added to the backprop loop. Called in the grad context to get the value of an accumulated value. The stack pop op must be guarded by the pred of the controlling cond. Args: history_value: The history (a stack) of a value. value: The value that is pushed onto the stack. dead_branch: True iff the tensor is on a dead branch of a cond. Returns: The current value (the top of the stack).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_state.py",
    "ast_data": "FunctionDef name:AddBackpropAccumulatedValue arg:self arg:history_value arg:value arg:dead_branch arguments arg arg arg arg Assign Call Assign Assign Call While BoolOp Compare If Call Assign Assign With Call Call If Assign Assign While BoolOp Compare Assign Call Assign If Compare Assign Assign Assign Call Assign Call Call Call Call Assign If Compare Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "log",
    "source_code": "def log(self) -> Tensor:\n    theta = self.so2.log()\n    half_theta = 0.5 * theta\n    denom = self.so2.z.real - 1\n    a = where(denom != 0, -(half_theta * self.so2.z.imag) / denom, tensor(0.0, device=theta.device, dtype=theta.dtype))\n    row0 = stack((a, half_theta), -1)\n    row1 = stack((-half_theta, a), -1)\n    V_inv = stack((row0, row1), -2)\n    upsilon = V_inv @ self.t.data[..., None]\n    return stack((upsilon[..., 0, 0], upsilon[..., 1, 0], theta), -1)",
    "docstring": "Convert elements of lie group to elements of lie algebra. Example: >>> v = torch.ones((1, 3)) >>> s = Se2.exp(v).log() >>> s tensor([[1.0000, 1.0000, 1.0000]], grad_fn=)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\se2.py",
    "ast_data": "FunctionDef name:log arg:self arguments arg Assign Call Assign Assign Assign Call Compare Call Assign Call Assign Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_root_leastsq",
    "source_code": "def _root_leastsq(fun, x0, args=(), jac=None, col_deriv=0, xtol=1.49012e-08, ftol=1.49012e-08, gtol=0.0, maxiter=0, eps=0.0, factor=100, diag=None, **unknown_options):\n    nfev = 0\n\n    def _wrapped_fun(*fargs):\n        nonlocal nfev\n        nfev += 1\n        return fun(*fargs)\n    _check_unknown_options(unknown_options)\n    x, cov_x, info, msg, ier = leastsq(_wrapped_fun, x0, args=args, Dfun=jac, full_output=True, col_deriv=col_deriv, xtol=xtol, ftol=ftol, gtol=gtol, maxfev=maxiter, epsfcn=eps, factor=factor, diag=diag)\n    sol = OptimizeResult(x=x, message=msg, status=ier, success=ier in (1, 2, 3, 4), cov_x=cov_x, fun=info.pop('fvec'), method='lm')\n    sol.update(info)\n    sol.nfev = nfev\n    return sol",
    "docstring": "Solve for least squares with Levenberg-Marquardt Options ------- col_deriv : bool non-zero to specify that the Jacobian function computes derivatives down the columns (faster, because there is no transpose operation). ftol : float Relative error desired in the sum of squares. xtol : float Relative error desired in the approximate solution. gtol : float Orthogonality desired between the function vector and the columns of the Jacobian. maxiter : int The maximum number of calls to the function. If zero, then 100*(N+1) is the maximum where N is the number of elements in x0. eps : float A suitable step length for the forward-difference approximation of the Jacobian (for Dfun=None). If is less than the machine precision, it is assumed that the relative errors in the functions are of the order of the machine precision. factor : float A parameter determining the initial step bound (``. diag : sequence N positive entries that serve as a scale factors for the variables.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_root.py",
    "ast_data": "FunctionDef name:_root_leastsq arg:fun arg:x0 arg:args arg:jac arg:col_deriv arg:xtol arg:ftol arg:gtol arg:maxiter arg:eps arg:factor arg:diag arguments arg arg arg arg arg arg arg arg arg arg arg arg arg Assign FunctionDef name:_wrapped_fun arguments arg Return return:yes Call Call Assign Call Assign Call Compare Call Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_path",
    "source_code": "def get_path(self):\n    boxstyle = self.get_boxstyle()\n    m_aspect = self.get_mutation_aspect()\n    path = boxstyle(self._x, self._y / m_aspect, self._width, self._height / m_aspect, self.get_mutation_scale())\n    return Path(path.vertices * [1, m_aspect], path.codes)",
    "docstring": "Return the mutated path of the rectangle.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_path arg:self arguments arg Assign Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_sub_nat",
    "source_code": "@final\ndef _sub_nat(self) -> np.ndarray:\n    result = np.empty(self.shape, dtype=np.int64)\n    result.fill(iNaT)\n    if self.dtype.kind in 'mM':\n        self = cast('DatetimeArray| TimedeltaArray', self)\n        return result.view(f'timedelta64[{self.unit}]')\n    else:\n        return result.view('timedelta64[ns]')",
    "docstring": "Subtract pd.NaT from self",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py",
    "ast_data": "FunctionDef name:_sub_nat arg:self arguments arg Assign Call Call If Compare Assign Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "join_process_group",
    "source_code": "@property\ndef join_process_group(self) -> Any:\n    return self.process_group",
    "docstring": "Return process group.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\optim\\zero_redundancy_optimizer.py",
    "ast_data": "FunctionDef name:join_process_group arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, init_args, init_func, next_func, finalize_func, output_signature, name=None):\n    self._init_args = init_args\n    self._init_structure = structure.type_spec_from_value(init_args)\n    self._init_func = structured_function.StructuredFunctionWrapper(init_func, self._transformation_name(), input_structure=self._init_structure)\n    self._next_func = structured_function.StructuredFunctionWrapper(next_func, self._transformation_name(), input_structure=self._init_func.output_structure)\n    self._finalize_func = structured_function.StructuredFunctionWrapper(finalize_func, self._transformation_name(), input_structure=self._init_func.output_structure)\n    self._output_signature = output_signature\n    self._name = name\n    variant_tensor = gen_dataset_ops.generator_dataset(structure.to_tensor_list(self._init_structure, self._init_args) + self._init_func.function.captured_inputs, self._next_func.function.captured_inputs, self._finalize_func.function.captured_inputs, init_func=self._init_func.function, next_func=self._next_func.function, finalize_func=self._finalize_func.function, **self._common_args)\n    super().__init__(variant_tensor)",
    "docstring": "Constructs a . Args: init_args: A (nested) structure representing the arguments to . init_func: A TensorFlow function that will be called on each time a C++ iterator over this dataset is constructed. Returns a (nested) structure representing the \"state\" of the dataset. next_func: A TensorFlow function that will be called on the result of to produce each element, and that raises to terminate iteration. finalize_func: A TensorFlow function that will be called on the result of immediately before a C++ iterator over this dataset is destroyed. The return value is ignored. output_signature: A (nested) structure of objects describing the output of . name: Optional. A name for the tf.data transformation.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\from_generator_op.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:init_args arg:init_func arg:next_func arg:finalize_func arg:output_signature arg:name arguments arg arg arg arg arg arg arg Assign Assign Call Assign Call Call Assign Call Call Assign Call Call Assign Assign Assign Call Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "TeeStripANSI",
    "source_code": "class TeeStripANSI:\n\n    def __init__(self, stream_term: SupportsWrite, stream_file: SupportsWrite) -> None:\n        self.stream_term = stream_term\n        self.stream_file = stream_file\n\n    def write(self, text: str, /) -> None:\n        self.stream_term.write(text)\n        self.stream_file.write(strip_escape_sequences(text))\n\n    def flush(self) -> None:\n        if hasattr(self.stream_term, 'flush'):\n            self.stream_term.flush()\n        if hasattr(self.stream_file, 'flush'):\n            self.stream_file.flush()",
    "docstring": "File-like object writing to two streams.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\util\\_io.py",
    "ast_data": "ClassDef name:TeeStripANSI FunctionDef name:__init__ arg:self arg:stream_term arg:stream_file arguments arg arg arg Assign Assign FunctionDef name:write arguments arg arg Call Call Call FunctionDef name:flush arg:self arguments arg If Call Call If Call Call"
  },
  {
    "library": "matplotlib",
    "name": "__call__",
    "source_code": "def __call__(self):\n    vmin, vmax = self.axis.get_view_interval()\n    return self.tick_values(vmin, vmax)",
    "docstring": "Return the locations of the ticks.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:__call__ arg:self arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "filter",
    "source_code": "def filter(self, scope: str):\n    scope = scope_to_list(scope)\n    filtered_claims = [claim for scope_part in scope for claim in self.SCOPES_CLAIMS_MAPPING.get(scope_part, [])]\n    filtered_items = {key: val for key, val in self.items() if key in filtered_claims}\n    return UserInfo(filtered_items)",
    "docstring": "Return a new UserInfo object containing only the claims matching the scope passed in parameter.",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\core\\claims.py",
    "ast_data": "FunctionDef name:filter arg:self arg:scope arguments arg arg Assign Call Assign Call Assign Call Compare Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_create_saver_from_imported_meta_graph",
    "source_code": "def _create_saver_from_imported_meta_graph(meta_graph_def, import_scope, imported_vars):\n    if meta_graph_def.HasField('saver_def'):\n        scope = import_scope\n        var_names = list(imported_vars.keys())\n        if var_names:\n            sample_key = var_names[0]\n            sample_var = imported_vars[sample_key]\n            scope = sample_var.name[:-len(sample_key)]\n        return Saver(saver_def=meta_graph_def.saver_def, name=scope)\n    elif variables._all_saveable_objects(scope=import_scope):\n        return Saver()\n    else:\n        logging.info('Saver not created because there are no variables in the graph to restore')\n        return None",
    "docstring": "Return a saver for restoring variable values to an imported MetaGraph.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\saver.py",
    "ast_data": "FunctionDef name:_create_saver_from_imported_meta_graph arg:meta_graph_def arg:import_scope arg:imported_vars arguments arg arg arg If Call Assign Assign Call Call If Assign Assign Assign Call Return return:yes Call If Call Return return:yes Call Call Return return:no"
  },
  {
    "library": "matplotlib",
    "name": "set_alignment",
    "source_code": "def set_alignment(self, alignment):\n    _api.check_in_list(['center', 'left', 'right'], alignment=alignment)\n    self._alignment = alignment\n    self._legend_box.align = alignment",
    "docstring": "Set the alignment of the legend title and the box of entries. The entries are aligned as a single block, so that markers always lined up. Parameters ---------- alignment : {'center', 'left', 'right'}.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\legend.py",
    "ast_data": "FunctionDef name:set_alignment arg:self arg:alignment arguments arg arg Call Assign Assign"
  },
  {
    "library": "scikit-learn",
    "name": "_prf_divide",
    "source_code": "def _prf_divide(numerator, denominator, metric, modifier, average, warn_for, zero_division='warn'):\n    xp, _ = get_namespace(numerator, denominator)\n    dtype_float = _find_matching_floating_dtype(numerator, denominator, xp=xp)\n    mask = denominator == 0\n    denominator = xp.asarray(denominator, copy=True, dtype=dtype_float)\n    denominator[mask] = 1\n    result = xp.asarray(numerator, dtype=dtype_float) / denominator\n    if not xp.any(mask):\n        return result\n    zero_division_value = _check_zero_division(zero_division)\n    result[mask] = zero_division_value\n    if zero_division != 'warn' or metric not in warn_for:\n        return result\n    if metric in warn_for:\n        _warn_prf(average, modifier, f'{metric.capitalize()} is', result.shape[0])\n    return result",
    "docstring": "Performs division and handles divide-by-zero. On zero-division, sets the corresponding result elements equal to 0, 1 or np.nan (according to `` raises a warning. The metric, modifier and average arguments are used only for determining an appropriate warning.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\metrics\\_classification.py",
    "ast_data": "FunctionDef name:_prf_divide arg:numerator arg:denominator arg:metric arg:modifier arg:average arg:warn_for arg:zero_division arguments arg arg arg arg arg arg arg Assign Call Assign Call Assign Compare Assign Call Assign Assign Call If Call Return return:yes Assign Call Assign If BoolOp Compare Compare Return return:yes If Compare Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "getOrdinate",
    "source_code": "def getOrdinate(self, dimension, index):\n    self._checkindex(index)\n    self._checkdim(dimension)\n    return capi.cs_getordinate(self.ptr, index, dimension, byref(c_double()))",
    "docstring": "Return the value for the given dimension and index.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\coordseq.py",
    "ast_data": "FunctionDef name:getOrdinate arg:self arg:dimension arg:index arguments arg arg arg Call Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "has_fake_kernel",
    "source_code": "def has_fake_kernel(op: torch._ops.OpOverload) -> bool:\n    if can_generate_trivial_fake_impl(op):\n        return True\n    name = op._name\n    if torch._C._dispatch_has_kernel_for_dispatch_key(name, 'CompositeImplicitAutograd'):\n        return True\n    opdef = torch._library.custom_ops._maybe_get_opdef(name)\n    if opdef is None:\n        if torch._C._dispatch_has_kernel_for_dispatch_key(name, 'CompositeExplicitAutograd'):\n            return True\n        entry = torch._library.simple_registry.singleton.find(name)\n        if entry.fake_impl.kernel is not None:\n            return True\n        if torch._C._dispatch_has_kernel_for_dispatch_key(name, 'Meta'):\n            return True\n    elif opdef._abstract_fn is not None:\n        return True\n    return False",
    "docstring": "If an operator (that stays alive until FakeTensorMode) has a Fake kernel. Don't use this if the operator decomposes before FakeTensorMode.",
    "type": "function",
    "file_path": "pytorch\\torch\\_library\\utils.py",
    "ast_data": "FunctionDef name:has_fake_kernel arg:op arguments arg If Call Return return:yes Assign If Call Return return:yes Assign Call If Compare If Call Return return:yes Assign Call If Compare Return return:yes If Call Return return:yes If Compare Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "contains_branch_seperately",
    "source_code": "def contains_branch_seperately(self, other_transform):\n    if self.output_dims != 2:\n        raise ValueError('contains_branch_seperately only supports transforms with 2 output dimensions')\n    return (self.contains_branch(other_transform),) * 2",
    "docstring": "Return whether the given branch is a sub-tree of this transform on each separate dimension. A common use for this method is to identify if a transform is a blended transform containing an Axes' data transform. e.g.:: x_isdata, y_isdata = trans.contains_branch_seperately(ax.transData)",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:contains_branch_seperately arg:self arg:other_transform arguments arg arg If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "is_nullable",
    "source_code": "def is_nullable(self) -> bool:\n    return False",
    "docstring": "Assume a custom class is not nullable.",
    "type": "method",
    "file_path": "pytorch\\torchgen\\model.py",
    "ast_data": "FunctionDef name:is_nullable arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "create_model_instance",
    "source_code": "def create_model_instance(self, data):\n    return self.model(session_key=self._get_or_create_session_key(), session_data=self.encode(data), expire_date=self.get_expiry_date())",
    "docstring": "Return a new instance of the session model object, which represents the current session state. Intended to be used for saving the session data to the database.",
    "type": "method",
    "file_path": "django\\django\\contrib\\sessions\\backends\\db.py",
    "ast_data": "FunctionDef name:create_model_instance arg:self arg:data arguments arg arg Return return:yes Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "load_bytes",
    "source_code": "@abc.abstractmethod\ndef load_bytes(self, read_item: ReadItem, value: io.BytesIO) -> None:\n    pass",
    "docstring": "Load the item described by `` are defined by the SavePlanner used to produce the checkpoint being loaded.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\planner.py",
    "ast_data": "FunctionDef name:load_bytes arg:self arg:read_item arg:value arguments arg arg arg"
  },
  {
    "library": "pytorch",
    "name": "fqn_to_module",
    "source_code": "def fqn_to_module(model: Optional[nn.Module], path: str) -> Optional[nn.Module]:\n    if path != '':\n        for name in path.split('.'):\n            model = getattr(model, name, None)\n    return model",
    "docstring": "Given an fqn, returns the corresponding module or tensor or None if the fqn given by doesn't correspond to anything. Similar to model.get_submodule(path) but works for tensors.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\pruning\\sparsifier\\utils.py",
    "ast_data": "FunctionDef name:fqn_to_module arg:model arg:path arguments arg arg If Compare For Call Assign Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "regenerate",
    "source_code": "def regenerate(self):\n    self.regenerated = True\n    self._regenerate()",
    "docstring": "Replace the current session (with a new id).",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\sessions.py",
    "ast_data": "FunctionDef name:regenerate arg:self arguments arg Assign Call"
  },
  {
    "library": "pytorch",
    "name": "get_test_only_legacy_native_backend_config_dict",
    "source_code": "def get_test_only_legacy_native_backend_config_dict():\n    return get_test_only_legacy_native_backend_config().to_dict()",
    "docstring": "Return the for PyTorch Native backend (fbgemm/qnnpack) with various additional fp16 ops in dictionary form.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\native.py",
    "ast_data": "FunctionDef name:get_test_only_legacy_native_backend_config_dict arguments Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "skip_backend",
    "source_code": "def skip_backend(backend):\n    backend = _backend_from_arg(backend)\n    return ua.skip_backend(backend)",
    "docstring": "Context manager to skip a backend within a fixed scope. Within the context of a `` containing the name of a known backend {'scipy'} or an object that implements the uarray protocol. Examples -------- >>> import scipy.fft as fft >>> fft.fft([1]) # Calls default SciPy backend array([1.+0.j]) >>> with fft.skip_backend('scipy'): # We explicitly skip the SciPy backend ... fft.fft([1]) # leaving no implementation available Traceback (most recent call last): ... BackendNotImplementedError: No selected backends had an implementation ...",
    "type": "function",
    "file_path": "scipy\\scipy\\fft\\_backend.py",
    "ast_data": "FunctionDef name:skip_backend arg:backend arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "SelectiveCheckpointContext",
    "source_code": "class SelectiveCheckpointContext:\n\n    def __init__(self, *, is_recompute):\n        self.is_recompute = is_recompute",
    "docstring": "Context passed to policy function during selective checkpointing. This class is used to pass relevant metadata to the policy function during selective checkpointing. The metadata includes whether the current invocation of the policy function is during recomputation or not. Example: >>> # xdoctest: +SKIP(stub) >>> >>> def policy_fn(ctx, op, *args, **kwargs): >>> print(ctx.is_recompute) >>> >>> context_fn = functools.partial(create_selective_checkpoint_contexts, policy_fn) >>> >>> out = torch.utils.checkpoint.checkpoint( >>> fn, x, y, >>> use_reentrant=False, >>> context_fn=context_fn, >>> )",
    "type": "class",
    "file_path": "pytorch\\torch\\utils\\checkpoint.py",
    "ast_data": "ClassDef name:SelectiveCheckpointContext FunctionDef name:__init__ arg:self arguments arg arg Assign"
  },
  {
    "library": "pytorch",
    "name": "_distribution_func",
    "source_code": "def _distribution_func(self, key, weights):\n    if key in self.saved_cum_distribution:\n        return self.saved_cum_distribution[key]\n    total = sum(weights)\n    result = []\n    cumsum = 0\n    for w in weights:\n        cumsum += w\n        result.append(cumsum / total)\n    self.saved_cum_distribution[key] = result\n    return result",
    "docstring": "this is a cumulative distribution function used for random sampling inputs",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\operator_benchmark\\benchmark_utils.py",
    "ast_data": "FunctionDef name:_distribution_func arg:self arg:key arg:weights arguments arg arg arg If Compare Return return:yes Assign Call Assign Assign For Call Assign Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "Move",
    "source_code": "@dataclass\nclass Move:\n    group_by_orient: ClassVar[bool] = True\n\n    def __call__(self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale]) -> DataFrame:\n        raise NotImplementedError",
    "docstring": "Base class for objects that apply simple positional transforms.",
    "type": "class",
    "file_path": "seaborn\\seaborn\\_core\\moves.py",
    "ast_data": "ClassDef name:Move FunctionDef name:__call__ arg:self arg:data arg:groupby arg:orient arg:scales arguments arg arg arg arg arg Raise"
  },
  {
    "library": "cherrypy",
    "name": "graceful",
    "source_code": "def graceful(self):\n    self.log('Bus graceful')\n    self.publish('graceful')",
    "docstring": "Advise all services to reload.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\wspbus.py",
    "ast_data": "FunctionDef name:graceful arg:self arguments arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_IgammacGrad",
    "source_code": "@ops.RegisterGradient('Igammac')\ndef _IgammacGrad(op: ops.Operation, grad):\n    igamma_grad_a, igamma_grad_x = _IgammaGrad(op, grad)\n    return (-igamma_grad_a, -igamma_grad_x)",
    "docstring": "Returns gradient of igammac(a, x) = 1 - igamma(a, x) w.r.t. a and x.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_IgammacGrad arg:op arg:grad arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_logpdf",
    "source_code": "def _logpdf(self, x, beta, m):\n    N = 1.0 / (m / beta / (m - 1) * np.exp(-beta ** 2 / 2.0) + _norm_pdf_C * _norm_cdf(beta))\n\n    def rhs(x, beta, m):\n        return -x ** 2 / 2\n\n    def lhs(x, beta, m):\n        return m * np.log(m / beta) - beta ** 2 / 2 - m * np.log(m / beta - beta - x)\n    return np.log(N) + xpx.apply_where(x > -beta, (x, beta, m), rhs, lhs)",
    "docstring": "Return the log of the PDF of the crystalball function.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_continuous_distns.py",
    "ast_data": "FunctionDef name:_logpdf arg:self arg:x arg:beta arg:m arguments arg arg arg arg Assign Call Call FunctionDef name:rhs arg:x arg:beta arg:m arguments arg arg arg Return return:yes FunctionDef name:lhs arg:x arg:beta arg:m arguments arg arg arg Return return:yes Call Call Return return:yes Call Call Compare"
  },
  {
    "library": "numpy",
    "name": "_update_from",
    "source_code": "def _update_from(self, obj):\n    if isinstance(obj, ndarray):\n        _baseclass = type(obj)\n    else:\n        _baseclass = ndarray\n    _optinfo = {}\n    _optinfo.update(getattr(obj, '_optinfo', {}))\n    _optinfo.update(getattr(obj, '_basedict', {}))\n    if not isinstance(obj, MaskedArray):\n        _optinfo.update(getattr(obj, '__dict__', {}))\n    _dict = {'_fill_value': getattr(obj, '_fill_value', None), '_hardmask': getattr(obj, '_hardmask', False), '_sharedmask': getattr(obj, '_sharedmask', False), '_isfield': getattr(obj, '_isfield', False), '_baseclass': getattr(obj, '_baseclass', _baseclass), '_optinfo': _optinfo, '_basedict': _optinfo}\n    self.__dict__.update(_dict)\n    self.__dict__.update(_optinfo)",
    "docstring": "Copies some attributes of obj to self.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:_update_from arg:self arg:obj arguments arg arg If Call Assign Call Assign Assign Call Call Call Call If Call Call Call Assign Call Call Call Call Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "is_singledispatch_function",
    "source_code": "def is_singledispatch_function(obj: Any) -> bool:\n    return inspect.isfunction(obj) and hasattr(obj, 'dispatch') and hasattr(obj, 'register') and (obj.dispatch.__module__ == 'functools')",
    "docstring": "Check if the object is a :func: function.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\inspect.py",
    "ast_data": "FunctionDef name:is_singledispatch_function arg:obj arguments arg Return return:yes BoolOp Call Call Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "_calc_conv_flops",
    "source_code": "@ops.RegisterStatistics('Conv2D', 'flops')\ndef _calc_conv_flops(graph, node):\n    input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])\n    input_shape.assert_is_fully_defined()\n    filter_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[1])\n    filter_shape.assert_is_fully_defined()\n    output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)\n    output_shape.assert_is_fully_defined()\n    filter_height = int(filter_shape[0])\n    filter_width = int(filter_shape[1])\n    filter_in_depth = int(filter_shape[2])\n    output_count = np.prod(output_shape.as_list(), dtype=np.int64)\n    return ops.OpStats('flops', output_count * filter_in_depth * filter_height * filter_width * 2)",
    "docstring": "Calculates the compute resources needed for Conv2D.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py",
    "ast_data": "FunctionDef name:_calc_conv_flops arg:graph arg:node arguments arg arg Assign Call Call Assign Call Call Assign Call Call Assign Call Assign Call Assign Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_solve_discrete_lyapunov_direct",
    "source_code": "def _solve_discrete_lyapunov_direct(a, q):\n    lhs = np.kron(a, a.conj())\n    lhs = np.eye(lhs.shape[0]) - lhs\n    x = solve(lhs, q.flatten())\n    return np.reshape(x, q.shape)",
    "docstring": "Solves the discrete Lyapunov equation directly. This function is called by the function with . It is not supposed to be called directly.",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_solvers.py",
    "ast_data": "FunctionDef name:_solve_discrete_lyapunov_direct arg:a arg:q arguments arg arg Assign Call Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "begin_compile",
    "source_code": "@classmethod\ndef begin_compile(cls) -> None:\n    if not TritonBundler.is_enabled():\n        return\n    log.debug('TritonBundler.begin_compile is called')\n    assert cls._entries is None\n    cls._entries = []\n    cls._static_autotuners = []",
    "docstring": "Initializes the TritonBundler. The current TritonBundler bundle is finalized by TritonBundler.collect.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\triton_bundler.py",
    "ast_data": "FunctionDef name:begin_compile arg:cls arguments arg If Call Return return:no Call Compare Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "finalize_fn",
    "source_code": "def finalize_fn(iterator_id_t):\n\n    def finalize_py_func(iterator_id):\n        generator_state.iterator_completed(iterator_id)\n        return np.array(0, dtype=np.int64)\n    return script_ops.numpy_function(finalize_py_func, [iterator_id_t], dtypes.int64)",
    "docstring": "Releases host-side state for the iterator with ID .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\from_generator_op.py",
    "ast_data": "FunctionDef name:finalize_fn arg:iterator_id_t arguments arg FunctionDef name:finalize_py_func arg:iterator_id arguments arg Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_constraint_names",
    "source_code": "def _constraint_names(self, model, column_names=None, unique=None, primary_key=None, index=None, foreign_key=None, check=None, type_=None, exclude=None):\n    if column_names is not None:\n        column_names = [self.connection.introspection.identifier_converter(truncate_name(name, self.connection.ops.max_name_length())) if self.connection.features.truncates_names else self.connection.introspection.identifier_converter(name) for name in column_names]\n    with self.connection.cursor() as cursor:\n        constraints = self.connection.introspection.get_constraints(cursor, model._meta.db_table)\n    result = []\n    for name, infodict in constraints.items():\n        if column_names is None or column_names == infodict['columns']:\n            if unique is not None and infodict['unique'] != unique:\n                continue\n            if primary_key is not None and infodict['primary_key'] != primary_key:\n                continue\n            if index is not None and infodict['index'] != index:\n                continue\n            if check is not None and infodict['check'] != check:\n                continue\n            if foreign_key is not None and (not infodict['foreign_key']):\n                continue\n            if type_ is not None and infodict['type'] != type_:\n                continue\n            if not exclude or name not in exclude:\n                result.append(name)\n    return result",
    "docstring": "Return all constraint names matching the columns and conditions.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\schema.py",
    "ast_data": "FunctionDef name:_constraint_names arg:self arg:model arg:column_names arg:unique arg:primary_key arg:index arg:foreign_key arg:check arg:type_ arg:exclude arguments arg arg arg arg arg arg arg arg arg arg If Compare Assign Call Call Call Call With Call Assign Call Assign For Call If BoolOp Compare Compare If BoolOp Compare Compare If BoolOp Compare Compare If BoolOp Compare Compare If BoolOp Compare Compare If BoolOp Compare If BoolOp Compare Compare If BoolOp Compare Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "FlattenOutputWithTreeSpecValidationOutputStep",
    "source_code": "class FlattenOutputWithTreeSpecValidationOutputStep(OutputAdaptStep):\n    _spec: pytree.TreeSpec | None = None\n\n    def apply(self, model_outputs: Any, model: torch.nn.Module | Callable | torch_export.ExportedProgram | None=None) -> Sequence[Any]:\n        flattened_outputs, spec = pytree.tree_flatten(model_outputs)\n        if self._spec is None:\n            self._spec = spec\n        else:\n            _assert_identical_pytree_spec(self._spec, spec, error_message='Model outputs incompatible with the format that was exported. ')\n        return flattened_outputs",
    "docstring": "Same as `TreeSpecSpecTreeadaptSpecTreeadapt` calls.",
    "type": "class",
    "file_path": "pytorch\\torch\\onnx\\_internal\\io_adapter.py",
    "ast_data": "ClassDef name:FlattenOutputWithTreeSpecValidationOutputStep FunctionDef name:apply arg:self arg:model_outputs arg:model arguments arg arg arg Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_set_device_pixel_ratio",
    "source_code": "def _set_device_pixel_ratio(self, ratio):\n    if self._device_pixel_ratio == ratio:\n        return False\n    dpi = ratio * self.figure._original_dpi\n    self.figure._set_dpi(dpi, forward=False)\n    self._device_pixel_ratio = ratio\n    return True",
    "docstring": "Set the ratio of physical to logical pixels used for the canvas. Subclasses that support High DPI screens can set this property to indicate that said ratio is different. The canvas itself will be created at the physical size, while the client side will use the logical size. Thus the DPI of the Figure will change to be scaled by this ratio. Implementations that support High DPI screens should use physical pixels for events so that transforms back to Axes space are correct. By default, this is 1, meaning physical and logical pixels are the same size. Parameters ---------- ratio : float The ratio of logical to physical pixels used for the canvas. Returns ------- bool Whether the ratio has changed. Backends may interpret this as a signal to resize the window, repaint the canvas, or change any other relevant properties.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:_set_device_pixel_ratio arg:self arg:ratio arguments arg arg If Compare Return return:yes Assign Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_visible_children",
    "source_code": "def get_visible_children(self):\n    return [c for c in self._children if c.get_visible()]",
    "docstring": "Return a list of the visible child \\s.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py",
    "ast_data": "FunctionDef name:get_visible_children arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "class_or_instance_method",
    "source_code": "class class_or_instance_method:\n\n    def __init__(self, class_method, instance_method):\n        self.class_method = class_method\n        self.instance_method = instance_method\n\n    def __get__(self, instance, owner):\n        if instance is None:\n            return functools.partial(self.class_method, owner)\n        return functools.partial(self.instance_method, instance)",
    "docstring": "Hook used in RegisterLookupMixin to return partial functions depending on the caller type (instance or class of models.Field).",
    "type": "class",
    "file_path": "django\\django\\db\\models\\query_utils.py",
    "ast_data": "ClassDef name:class_or_instance_method FunctionDef name:__init__ arg:self arg:class_method arg:instance_method arguments arg arg arg Assign Assign FunctionDef name:__get__ arg:self arg:instance arg:owner arguments arg arg arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_py_if_stmt",
    "source_code": "def _py_if_stmt(cond, body, orelse):\n    return body() if cond else orelse()",
    "docstring": "Overload of if_stmt that executes a Python if statement.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\control_flow.py",
    "ast_data": "FunctionDef name:_py_if_stmt arg:cond arg:body arg:orelse arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "bottom_right",
    "source_code": "@property\ndef bottom_right(self) -> torch.Tensor:\n    return self._data[..., (2, 3)]",
    "docstring": "The [x y] position of the bottom-right coordinate of the bounding box.",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\face_detection.py",
    "ast_data": "FunctionDef name:bottom_right arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_edgecolor",
    "source_code": "def get_edgecolor(self):\n    return self.patch.get_edgecolor()",
    "docstring": "Get the edge color of the Figure rectangle.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:get_edgecolor arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "SQLiteDecimalToFloatMixin",
    "source_code": "class SQLiteDecimalToFloatMixin:\n\n    def as_sqlite(self, compiler, connection, **extra_context):\n        copy = self.copy()\n        copy.set_source_expressions([Value(float(expr.value)) if hasattr(expr, 'value') and isinstance(expr.value, Decimal) else expr for expr in copy.get_source_expressions()])\n        return copy.as_sql(compiler, connection, **extra_context)",
    "docstring": "By default, Decimal values are converted to str by the SQLite backend, which is not acceptable by the GIS functions expecting numeric values.",
    "type": "class",
    "file_path": "django\\django\\contrib\\gis\\db\\models\\functions.py",
    "ast_data": "ClassDef name:SQLiteDecimalToFloatMixin FunctionDef name:as_sqlite arg:self arg:compiler arg:connection arguments arg arg arg arg Assign Call Call BoolOp Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "PeriodIndexResamplerGroupby",
    "source_code": "class PeriodIndexResamplerGroupby(_GroupByMixin, PeriodIndexResampler):\n\n    @property\n    def _resampler_cls(self):\n        return PeriodIndexResampler",
    "docstring": "Provides a resample of a groupby implementation.",
    "type": "class",
    "file_path": "pandas\\pandas\\core\\resample.py",
    "ast_data": "ClassDef name:PeriodIndexResamplerGroupby FunctionDef name:_resampler_cls arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "from_callable",
    "source_code": "@classmethod\ndef from_callable(cls, obj: Callable[..., Any], *, follow_wrapped: bool=True) -> 'FunctionType':\n    signature = super().from_callable(obj, follow_wrapped=follow_wrapped)\n    parameters = [Parameter(p.name, p.kind, p.default is not p.empty, None) for p in signature.parameters.values()]\n    return FunctionType(parameters)",
    "docstring": "Generate FunctionType from a python Callable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_type.py",
    "ast_data": "FunctionDef name:from_callable arg:cls arg:obj arguments arg arg arg Assign Call Call Assign Call Compare Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "add_capture",
    "source_code": "def add_capture(self, tensor, placeholder):\n    self._function_captures.add_or_replace(key=id(tensor), external=tensor, internal=placeholder, is_by_ref=False)\n    self.inputs.append(placeholder)",
    "docstring": "Capture a specific tensor and utilize the provided placeholder. Args: tensor: Tensor to captures. placeholder: Provided placeholder for the tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\func_graph.py",
    "ast_data": "FunctionDef name:add_capture arg:self arg:tensor arg:placeholder arguments arg arg arg Call Call Call"
  },
  {
    "library": "django",
    "name": "make_style",
    "source_code": "def make_style(opts=(), **kwargs):\n    return lambda text: colorize(text, opts, **kwargs)",
    "docstring": "Return a function with default parameters for colorize() Example: bold_red = make_style(opts=('bold',), fg='red') print(bold_red('hello')) KEYWORD = make_style(fg='yellow') COMMENT = make_style(fg='blue', opts=('bold',))",
    "type": "function",
    "file_path": "django\\django\\utils\\termcolors.py",
    "ast_data": "FunctionDef name:make_style arg:opts arguments arg arg Return return:yes arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "is_safe_torch_function",
    "source_code": "def is_safe_torch_function(target):\n    function_name = f'{target.__module__}.{target.__name__}'\n    if function_name == 'torch.autograd.function.FunctionCtx':\n        return torch._functorch.config.autograd_cache_allow_custom_autograd_functions\n    return function_name in torch_non_c_binding_in_graph_functions or function_name in SAFE_TORCH_FUNCTIONS or function_name in torch._inductor.config.unsafe_marked_cacheable_functions",
    "docstring": "Allowlisted torch functions",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\autograd_cache.py",
    "ast_data": "FunctionDef name:is_safe_torch_function arg:target arguments arg Assign If Compare Return return:yes Return return:yes BoolOp Compare Compare Compare"
  },
  {
    "library": "matplotlib",
    "name": "width",
    "source_code": "@property\ndef width(self):\n    points = self.get_points()\n    return points[1, 0] - points[0, 0]",
    "docstring": "The (signed) width of the bounding box.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:width arg:self arguments arg Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_geom_placeholder",
    "source_code": "def get_geom_placeholder(self, f, value, compiler):\n\n    def transform_value(value, field):\n        return value is not None and value.srid != field.srid\n    if hasattr(value, 'as_sql'):\n        return '%s(%%s, %s)' % (self.spatial_function_name('Transform'), f.srid) if transform_value(value.output_field, f) else '%s'\n    if transform_value(value, f):\n        return '%s(%s(%%s,%s), %s)' % (self.spatial_function_name('Transform'), self.from_text, value.srid, f.srid)\n    elif self.connection.features.has_spatialrefsys_table:\n        return '%s(%%s,%s)' % (self.from_text, f.srid)\n    else:\n        return '%s(%%s)' % self.from_text",
    "docstring": "Return the placeholder for the given geometry field with the given value. Depending on the spatial backend, the placeholder may contain a stored procedure call to the transformation function of the spatial backend.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:get_geom_placeholder arg:self arg:f arg:value arg:compiler arguments arg arg arg arg FunctionDef name:transform_value arg:value arg:field arguments arg arg Return return:yes BoolOp Compare Compare If Call Return return:yes Call Call If Call Return return:yes Call If Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "update_catalogs",
    "source_code": "def update_catalogs(resources=None, languages=None, verbosity=0):\n    settings.configure()\n    django.setup()\n    if resources is not None:\n        print('`update_catalogs` will always process all resources.')\n    contrib_dirs = _get_locale_dirs(None, include_core=False)\n    os.chdir(os.path.join(os.getcwd(), 'django'))\n    print('Updating en catalogs for Django and contrib apps...')\n    call_command('makemessages', locale=['en'], verbosity=verbosity)\n    print('Updating en JS catalogs for Django and contrib apps...')\n    call_command('makemessages', locale=['en'], domain='djangojs', verbosity=verbosity)\n    _check_diff('core', os.path.join(os.getcwd(), 'conf', 'locale'))\n    for name, dir_ in contrib_dirs:\n        _check_diff(name, dir_)",
    "docstring": "Update the en/LC_MESSAGES/django.po (main and contrib) files with new/updated translatable strings.",
    "type": "function",
    "file_path": "django\\scripts\\manage_translations.py",
    "ast_data": "FunctionDef name:update_catalogs arg:resources arg:languages arg:verbosity arguments arg arg arg Call Call If Compare Call Assign Call Call Call Call Call Call Call Call Call Call Call For Call"
  },
  {
    "library": "tensorflow",
    "name": "_layers",
    "source_code": "@property\ndef _layers(self):\n    collected = []\n    for obj in self._values:\n        if isinstance(obj, TrackableDataStructure) or layer_utils.is_layer(obj) or layer_utils.has_weights(obj):\n            collected.append(obj)\n    return collected",
    "docstring": "All Layers and Layer containers, including empty containers.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\data_structures.py",
    "ast_data": "FunctionDef name:_layers arg:self arguments arg Assign For If BoolOp Call Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_label_text",
    "source_code": "def set_label_text(self, label, fontdict=None, **kwargs):\n    self.isDefault_label = False\n    self.label.set_text(label)\n    if fontdict is not None:\n        self.label.update(fontdict)\n    self.label.update(kwargs)\n    self.stale = True\n    return self.label",
    "docstring": "Set the text value of the axis label. Parameters ---------- label : str Text string. fontdict : dict Text properties. .. admonition:: Discouraged The use of *fontdict* is discouraged. Parameters should be passed as individual keyword arguments or using dictionary-unpacking ``. **kwargs Merged into fontdict.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:set_label_text arg:self arg:label arg:fontdict arguments arg arg arg arg Assign Call If Compare Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "process_inputs",
    "source_code": "def process_inputs(self, *args: Any) -> Any:\n    return args",
    "docstring": "Transforms the inputs so that the graph can take them as arguments, as non-default codegen may result in the inputs to the function being different from the inputs to the graph. If the graph was directly runnable, this invariant should hold true",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\graph.py",
    "ast_data": "FunctionDef name:process_inputs arg:self arguments arg arg Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "apply",
    "source_code": "def apply(self, func, *args, **kwargs):\n    func(self, *args, **kwargs)\n    return self",
    "docstring": "Pass the grid to a user-supplied function and return self. The must accept an object of this type for its first positional argument. Additional arguments are passed through. The return value of is ignored; this method returns self. See the method if you want the return value. Added in v0.12.0.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\axisgrid.py",
    "ast_data": "FunctionDef name:apply arg:self arg:func arguments arg arg arg arg Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "read_into_file",
    "source_code": "def read_into_file(self, fp_out=None):\n    if fp_out is None:\n        fp_out = self.make_file()\n    self.read_lines_to_boundary(fp_out=fp_out)\n    return fp_out",
    "docstring": "Read the request body into fp_out (or make_file() if None). Return fp_out.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpreqbody.py",
    "ast_data": "FunctionDef name:read_into_file arg:self arg:fp_out arguments arg arg If Compare Assign Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "def forward(self, pred: Tensor, target: Tensor) -> Tensor:\n    if not (pred.shape[2:] == target.shape[2:] and pred.size(0) == target.size(0) and (target.size(1) == 1)):\n        raise ValueError(f'Prediction and target need to be of same size, and target should not be one-hot.Got {pred.shape} and {target.shape}.')\n    if pred.size(1) < target.max().item():\n        raise ValueError('Invalid target value.')\n    out = stack([self.perform_erosion(pred[:, i:i + 1], where(target == i, tensor(1, device=target.device, dtype=target.dtype), tensor(0, device=target.device, dtype=target.dtype))) for i in range(pred.size(1))])\n    if self.reduction == 'mean':\n        out = out.mean()\n    elif self.reduction == 'sum':\n        out = out.sum()\n    elif self.reduction == 'none':\n        pass\n    else:\n        raise NotImplementedError(f'reduction `{self.reduction}` has not been implemented yet.')\n    return out",
    "docstring": "Compute Hausdorff loss. Args: pred: predicted tensor with a shape of :math: or :math:. Each channel is as binary as: 1 -> fg, 0 -> bg. target: target tensor with a shape of :math: or :math:. Returns: Estimated Hausdorff Loss.",
    "type": "method",
    "file_path": "kornia\\kornia\\losses\\hausdorff.py",
    "ast_data": "FunctionDef name:forward arg:self arg:pred arg:target arguments arg arg arg If BoolOp Compare Compare Call Call Compare Call Raise Call If Compare Call Call Call Raise Call Assign Call Call Call Compare Call Call Call Call If Compare Assign Call If Compare Assign Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "fillna",
    "source_code": "def fillna(self, value, limit: int | None=None, copy: bool=True) -> Self:\n    if copy is False:\n        raise NotImplementedError\n    if limit is not None:\n        raise ValueError('limit must be None')\n    value_left, value_right = self._validate_scalar(value)\n    left = self.left.fillna(value=value_left)\n    right = self.right.fillna(value=value_right)\n    return self._shallow_copy(left, right)",
    "docstring": "Fill NA/NaN values using the specified method. Parameters ---------- value : scalar, dict, Series If a scalar value is passed it is used to fill all missing values. Alternatively, a Series or dict can be used to fill in different values for each index. The value should not be a list. The value(s) passed should be either Interval objects or NA/NaN. limit : int, default None (Not implemented yet for IntervalArray) The maximum number of entries where NA values will be filled. copy : bool, default True Whether to make a copy of the data before filling. If False, then the original should be modified and no new memory should be allocated. For ExtensionArray subclasses that cannot do this, it is at the author's discretion whether to ignore \"copy=False\" or to raise. Returns ------- filled : IntervalArray with NA/NaN filled",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\interval.py",
    "ast_data": "FunctionDef name:fillna arg:self arg:value arg:limit arg:copy arguments arg arg arg arg If Compare Raise If Compare Raise Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "ss2zpk",
    "source_code": "def ss2zpk(A, B, C, D, input=0):\n    return tf2zpk(*ss2tf(A, B, C, D, input=input))",
    "docstring": "State-space representation to zero-pole-gain representation. A, B, C, D defines a linear state-space system with inputs, outputs, and state variables. Parameters ---------- A : array_like State (or system) matrix of shape `` input : int, optional For multiple-input systems, the index of the input to use. Returns ------- z, p : sequence Zeros and poles. k : float System gain.",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_lti_conversion.py",
    "ast_data": "FunctionDef name:ss2zpk arg:A arg:B arg:C arg:D arg:input arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_make_orthogonal",
    "source_code": "def _make_orthogonal(A):\n    X, tau = torch.geqrf(A)\n    Q = torch.linalg.householder_product(X, tau)\n    Q *= X.diagonal(dim1=-2, dim2=-1).sgn().unsqueeze(-2)\n    return Q",
    "docstring": "Assume that A is a tall matrix. Compute the Q factor s.t. A = QR (A may be complex) and diag(R) is real and non-negative.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\utils\\parametrizations.py",
    "ast_data": "FunctionDef name:_make_orthogonal arg:A arguments arg Assign Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_xlabel",
    "source_code": "def get_xlabel(self):\n    label = self.xaxis.label\n    return label.get_text()",
    "docstring": "Get the xlabel text string.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:get_xlabel arg:self arguments arg Assign Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_na_for_min_count",
    "source_code": "def _na_for_min_count(values: np.ndarray, axis: AxisInt | None) -> Scalar | np.ndarray:\n    if values.dtype.kind in 'iufcb':\n        values = values.astype('float64')\n    fill_value = na_value_for_dtype(values.dtype)\n    if values.ndim == 1:\n        return fill_value\n    elif axis is None:\n        return fill_value\n    else:\n        result_shape = values.shape[:axis] + values.shape[axis + 1:]\n        return np.full(result_shape, fill_value, dtype=values.dtype)",
    "docstring": "Return the missing value for . Parameters ---------- values : ndarray axis : int or None axis for the reduction, required if values.ndim > 1. Returns ------- result : scalar or ndarray For 1-D values, returns a scalar of the correct missing type. For 2-D values, returns a 1-D array where each element is missing.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\nanops.py",
    "ast_data": "FunctionDef name:_na_for_min_count arg:values arg:axis arguments arg arg If Compare Assign Call Assign Call If Compare Return return:yes If Compare Return return:yes Assign Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "validate_token_endpoint_auth_signing_alg",
    "source_code": "def validate_token_endpoint_auth_signing_alg(self):\n    if self.get('token_endpoint_auth_signing_alg') == 'none':\n        raise InvalidClaimError('token_endpoint_auth_signing_alg')\n    self._validate_claim_value('token_endpoint_auth_signing_alg')",
    "docstring": "JWS [JWS] alg algorithm [JWA] that MUST be used for signing the JWT [JWT] used to authenticate the Client at the Token Endpoint for the private_key_jwt and client_secret_jwt authentication methods. All Token Requests using these authentication methods from this Client MUST be rejected, if the JWT is not signed with this algorithm. Servers SHOULD support RS256. The value none MUST NOT be used. The default, if omitted, is that any algorithm supported by the OP and the RP MAY be used.",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\registration\\claims.py",
    "ast_data": "FunctionDef name:validate_token_endpoint_auth_signing_alg arg:self arguments arg If Compare Call Raise Call Call"
  },
  {
    "library": "django",
    "name": "make_state",
    "source_code": "def make_state(self, nodes=None, at_end=True, real_apps=None):\n    if nodes is None:\n        nodes = list(self.leaf_nodes())\n    if not nodes:\n        return ProjectState()\n    if not isinstance(nodes[0], tuple):\n        nodes = [nodes]\n    plan = self._generate_plan(nodes, at_end)\n    project_state = ProjectState(real_apps=real_apps)\n    for node in plan:\n        project_state = self.nodes[node].mutate_state(project_state, preserve=False)\n    return project_state",
    "docstring": "Given a migration node or nodes, return a complete ProjectState for it. If at_end is False, return the state before the migration has run. If nodes is not provided, return the overall most current project state.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\graph.py",
    "ast_data": "FunctionDef name:make_state arg:self arg:nodes arg:at_end arg:real_apps arguments arg arg arg arg If Compare Assign Call Call If Return return:yes Call If Call Assign Assign Call Assign Call For Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "PHBase",
    "source_code": "@compatibility(is_backward_compatible=False)\nclass PHBase:\n\n    def __repr__(self):\n        return 'PH'",
    "docstring": "Object representing an input placeholder to",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\_symbolic_trace.py",
    "ast_data": "ClassDef name:PHBase FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "is_sparse",
    "source_code": "def is_sparse(x):\n    return isinstance(x, (SparseTensor, SparseTensorValue))",
    "docstring": "Check whether is sparse. Check whether an object is a or . Args: x: A python object to check. Returns: iff is a or .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\sparse_tensor.py",
    "ast_data": "FunctionDef name:is_sparse arg:x arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "RGI_Cubic",
    "source_code": "class RGI_Cubic(Benchmark):\n    param_names = ['ndim', 'n_samples', 'method']\n    params = [[2], [10, 40, 100, 200, 400], ['cubic', 'cubic_legacy']]\n\n    def setup(self, ndim, n_samples, method):\n        rng = np.random.default_rng(314159)\n        self.points = [np.sort(rng.random(size=n_samples)) for _ in range(ndim)]\n        self.values = rng.random(size=[n_samples] * ndim)\n        bounds = [(p.min(), p.max()) for p in self.points]\n        xi = [rng.uniform(low, high, size=n_samples) for low, high in bounds]\n        self.xi = np.array(xi).T\n        self.interp = interpolate.RegularGridInterpolator(self.points, self.values, method=method)\n\n    def time_rgi_setup_interpolator(self, ndim, n_samples, method):\n        self.interp = interpolate.RegularGridInterpolator(self.points, self.values, method=method)\n\n    def time_rgi(self, ndim, n_samples, method):\n        self.interp(self.xi)",
    "docstring": "Benchmark RegularGridInterpolator with method=\"cubic\".",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\interpolate.py",
    "ast_data": "ClassDef name:RGI_Cubic Assign Assign FunctionDef name:setup arg:self arg:ndim arg:n_samples arg:method arguments arg arg arg arg Assign Call Assign Call Call Call Assign Call Assign Call Call Assign Call Assign Call Assign Call FunctionDef name:time_rgi_setup_interpolator arg:self arg:ndim arg:n_samples arg:method arguments arg arg arg arg Assign Call FunctionDef name:time_rgi arg:self arg:ndim arg:n_samples arg:method arguments arg arg arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "num_buckets",
    "source_code": "@property\ndef num_buckets(self):\n    return self.number_buckets",
    "docstring": "Returns number of buckets in this sparse feature.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:num_buckets arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "MAPPING_KEYS_CHECK",
    "source_code": "def MAPPING_KEYS_CHECK(self, guard):\n    ref = self.arg_ref(guard)\n    value = self.get(guard.name)\n    code = []\n    code.append(f'list({ref}.keys()) == {list(value.keys())}')\n    self._set_guard_export_info(guard, code)\n    self.get_guard_manager(guard).add_mapping_keys_guard(value, code)",
    "docstring": "Guard on the key order of types.MappingProxyType object",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\guards.py",
    "ast_data": "FunctionDef name:MAPPING_KEYS_CHECK arg:self arg:guard arguments arg arg Assign Call Assign Call Assign Call Call Call Call Call Call"
  },
  {
    "library": "scrapy",
    "name": "reset_stream",
    "source_code": "def reset_stream(self, reason: StreamCloseReason=StreamCloseReason.RESET) -> None:\n    if self.metadata['stream_closed_local']:\n        raise StreamClosedError(self.stream_id)\n    self._response['body'].truncate(0)\n    self.metadata['stream_closed_local'] = True\n    self._protocol.conn.reset_stream(self.stream_id, ErrorCodes.REFUSED_STREAM)\n    self.close(reason)",
    "docstring": "Close this stream by sending a RST_FRAME to the remote peer",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\http2\\stream.py",
    "ast_data": "FunctionDef name:reset_stream arg:self arg:reason arguments arg arg If Raise Call Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_LeftShift",
    "source_code": "def _LeftShift(x):\n    rank = array_ops.rank(x)\n    zeros = array_ops.zeros((rank - 2, 2), dtype=dtypes.int32)\n    pad = array_ops.concat([zeros, array_ops.constant([[0, 1], [0, 0]])], axis=0)\n    return array_ops.pad(x[..., 1:, :], pad)",
    "docstring": "Shifts next-to-last dimension to the left, adding zero on the right.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg_grad.py",
    "ast_data": "FunctionDef name:_LeftShift arg:x arguments arg Assign Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "RegisterPForWithArgs",
    "source_code": "class RegisterPForWithArgs(RegisterPFor):\n\n    def __init__(self, op_type, *args, **kw_args):\n        super(RegisterPForWithArgs, self).__init__(op_type)\n        self._args = args\n        self._kw_args = kw_args\n\n    def __call__(self, converter):\n\n        def _f(pfor_input: _PforInput):\n            return converter(pfor_input, self.op_type, *self._args, **self._kw_args)\n        super(RegisterPForWithArgs, self).__call__(_f)\n        return converter",
    "docstring": "Utility to register converters for pfor. Usage: @RegisteRPFor(foo_op_type, foo=value, ....) def _foo_converter(pfor_input, foo=None, ....): ... See RegisterPFor for details on the conversion function. allows binding extra arguments to the conversion function at registration time.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "ClassDef name:RegisterPForWithArgs FunctionDef name:__init__ arg:self arg:op_type arguments arg arg arg arg Call Call Assign Assign FunctionDef name:__call__ arg:self arg:converter arguments arg arg FunctionDef name:_f arg:pfor_input arguments arg Return return:yes Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_group",
    "source_code": "def get_group(self, mesh_dim: Optional[Union[int, str]]=None) -> ProcessGroup:\n    if not hasattr(self, '_dim_group_names'):\n        raise RuntimeError('DeviceMesh process groups not initialized!')\n    if self.mesh.ndim > 1 and mesh_dim is None:\n        raise RuntimeError(f'Found the DeviceMesh have {self.mesh.ndim} dimensions', 'Optional kwarg `mesh_dim` needs to be specified when device_mesh.ndim > 1.', 'If you want to get the list of all the ProcessGroups in the DeviceMesh,please use `get_all_groups()` instead.')\n    if self.mesh.ndim == 1 and mesh_dim is None:\n        return not_none(_resolve_process_group(self._dim_group_names[0]))\n    root_mesh = _mesh_resources.get_root_mesh(self)\n    root_to_flatten_mapping = _mesh_resources.root_to_flatten_mapping.get(root_mesh, None)\n    if root_to_flatten_mapping and mesh_dim in root_to_flatten_mapping.keys():\n        dim_group_name = root_to_flatten_mapping[mesh_dim]._dim_group_names[0]\n        return not_none(_resolve_process_group(dim_group_name))\n    else:\n        mesh_dim = _mesh_resources.get_mesh_dim_by_name(self, mesh_dim) if isinstance(mesh_dim, str) else mesh_dim\n        assert isinstance(mesh_dim, int)\n        return not_none(_resolve_process_group(self._dim_group_names[mesh_dim]))",
    "docstring": "Returns the single ProcessGroup specified by mesh_dim, or, if mesh_dim is not specified and the DeviceMesh is 1-dimensional, returns the only ProcessGroup in the mesh. Args: mesh_dim (str/int, optional): it can be the name of the mesh dimension or the index of the mesh dimension. Default is None. Returns: A :class: object.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\device_mesh.py",
    "ast_data": "FunctionDef name:get_group arg:self arg:mesh_dim arguments arg arg If Call Raise Call If BoolOp Compare Compare Raise Call If BoolOp Compare Compare Return return:yes Call Call Assign Call Assign Call If BoolOp Compare Call Assign Return return:yes Call Call Assign Call Call Call Return return:yes Call Call"
  },
  {
    "library": "authlib",
    "name": "validate_authorization_endpoint",
    "source_code": "def validate_authorization_endpoint(self):\n    url = self.get('authorization_endpoint')\n    if url:\n        if not is_secure_transport(url):\n            raise ValueError('\"authorization_endpoint\" MUST use \"https\" scheme')\n        return\n    grant_types_supported = set(self.grant_types_supported)\n    authorization_grant_types = {'authorization_code', 'implicit'}\n    if grant_types_supported & authorization_grant_types:\n        raise ValueError('\"authorization_endpoint\" is required')",
    "docstring": "URL of the authorization server's authorization endpoint [RFC6749]. This is REQUIRED unless no grant types are supported that use the authorization endpoint.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc8414\\models.py",
    "ast_data": "FunctionDef name:validate_authorization_endpoint arg:self arguments arg Assign Call If If Call Raise Call Return return:no Assign Call Assign If Raise Call"
  },
  {
    "library": "pytorch",
    "name": "iters_per_second",
    "source_code": "@property\ndef iters_per_second(self):\n    return self.num_iters / self.total_time_seconds",
    "docstring": "Return total number of iterations per second across all calling threads.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\throughput_benchmark.py",
    "ast_data": "FunctionDef name:iters_per_second arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "factorize_array",
    "source_code": "def factorize_array(values: np.ndarray, use_na_sentinel: bool=True, size_hint: int | None=None, na_value: object=None, mask: npt.NDArray[np.bool_] | None=None) -> tuple[npt.NDArray[np.intp], np.ndarray]:\n    original = values\n    if values.dtype.kind in 'mM':\n        na_value = iNaT\n    hash_klass, values = _get_hashtable_algo(values)\n    table = hash_klass(size_hint or len(values))\n    uniques, codes = table.factorize(values, na_sentinel=-1, na_value=na_value, mask=mask, ignore_na=use_na_sentinel)\n    uniques = _reconstruct_data(uniques, original.dtype, original)\n    codes = ensure_platform_int(codes)\n    return (codes, uniques)",
    "docstring": "Factorize a numpy array to codes and uniques. This doesn't do any coercion of types or unboxing before factorization. Parameters ---------- values : ndarray use_na_sentinel : bool, default True If True, the sentinel -1 will be used for NaN values. If False, NaN values will be encoded as non-negative integers and will not drop the NaN from the uniques of the values. size_hint : int, optional Passed through to the hashtable's 'get_labels' method na_value : object, optional A value in to consider missing. Note: only use this parameter when you know that you don't have any values pandas would consider missing in the array (NaN for float data, iNaT for datetimes, etc.). mask : ndarray[bool], optional If not None, the mask is used as indicator for missing values (True = missing, False = valid) instead of or condition \"val != val\". Returns ------- codes : ndarray[np.intp] uniques : ndarray",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\algorithms.py",
    "ast_data": "FunctionDef name:factorize_array arg:values arg:use_na_sentinel arg:size_hint arg:na_value arg:mask arguments arg arg arg arg arg Assign If Compare Assign Assign Call Assign Call BoolOp Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "model_from_config",
    "source_code": "def model_from_config(config, custom_objects=None):\n    if isinstance(config, list):\n        raise TypeError('`model_from_config` expects a dictionary, not a list. Maybe you meant to use `Sequential.from_config(config)`?')\n    from tensorflow.python.keras.layers import deserialize\n    return deserialize(config, custom_objects=custom_objects)",
    "docstring": "Instantiates a Keras model from its config. Usage: Args: config: Configuration dictionary. custom_objects: Optional dictionary mapping names (strings) to custom classes or functions to be considered during deserialization. Returns: A Keras model instance (uncompiled). Raises: TypeError: if is not a dictionary.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\model_config.py",
    "ast_data": "FunctionDef name:model_from_config arg:config arg:custom_objects arguments arg arg If Call Raise Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_cleanup_fontproperties_init",
    "source_code": "def _cleanup_fontproperties_init(init_method):\n\n    @functools.wraps(init_method)\n    def wrapper(self, *args, **kwargs):\n        if len(args) > 1 or (len(args) == 1 and kwargs):\n            _api.warn_deprecated('3.10', message='Passing individual properties to FontProperties() positionally was deprecated in Matplotlib %(since)s and will be removed in %(removal)s. Please pass all properties via keyword arguments.')\n        if len(args) == 1 and (not kwargs) and (not cbook.is_scalar_or_string(args[0])):\n            _api.warn_deprecated('3.10', message='Passing family as positional argument to FontProperties() was deprecated in Matplotlib %(since)s and will be removed in %(removal)s. Please pass family names as keywordargument.')\n        return init_method(self, *args, **kwargs)\n    return wrapper",
    "docstring": "A decorator to limit the call signature to single a positional argument or alternatively only keyword arguments. We still accept but deprecate all other call signatures. When the deprecation expires we can switch the signature to:: __init__(self, pattern=None, /, *, family=None, style=None, ...) plus a runtime check that pattern is not used alongside with the keyword arguments. This results eventually in the two possible call signatures:: FontProperties(pattern) FontProperties(family=..., size=..., ...)",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\font_manager.py",
    "ast_data": "FunctionDef name:_cleanup_fontproperties_init arg:init_method arguments arg FunctionDef name:wrapper arg:self arguments arg arg arg If BoolOp Compare Call BoolOp Compare Call Call If BoolOp Compare Call Call Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "logpdf",
    "source_code": "def logpdf(self, x):\n    return self._dist._logpdf(x, self.dim, self.mu, self.kappa)",
    "docstring": "Parameters ---------- x : array_like Points at which to evaluate the log of the probability density function. The last axis of must correspond to unit vectors of the same dimensionality as the distribution. Returns ------- logpdf : ndarray or scalar Log of probability density function evaluated at .",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:logpdf arg:self arg:x arguments arg arg Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "PropagateDescDomain",
    "source_code": "class PropagateDescDomain(SphinxPostTransform):\n    default_priority = 200\n\n    def run(self, **kwargs: Any) -> None:\n        for node in self.document.findall(addnodes.desc_signature):\n            if node.parent.get('domain'):\n                node['classes'].append(node.parent['domain'])",
    "docstring": "Add the domain name of the parent node as a class in each desc_signature node.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\transforms\\post_transforms\\__init__.py",
    "ast_data": "ClassDef name:PropagateDescDomain Assign FunctionDef name:run arg:self arguments arg arg For Call If Call Call"
  },
  {
    "library": "django",
    "name": "ask_auto_now_add_addition",
    "source_code": "def ask_auto_now_add_addition(self, field_name, model_name):\n    if not self.dry_run:\n        choice = self._choice_input(f\"It is impossible to add the field '{field_name}' with 'auto_now_add=True' to {model_name} without providing a default. This is because the database needs something to populate existing rows.\\n\", ['Provide a one-off default now which will be set on all existing rows', 'Quit and manually define a default value in models.py.'])\n        if choice == 2:\n            sys.exit(3)\n        else:\n            return self._ask_default(default='timezone.now')\n    return None",
    "docstring": "Adding an auto_now_add field to a model.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\questioner.py",
    "ast_data": "FunctionDef name:ask_auto_now_add_addition arg:self arg:field_name arg:model_name arguments arg arg arg If Assign Call If Compare Call Return return:yes Call Return return:no"
  },
  {
    "library": "matplotlib",
    "name": "MyStyle",
    "source_code": "class MyStyle:\n\n    def __init__(self, pad=0.3):\n        self.pad = pad\n        super().__init__()\n\n    def __call__(self, x0, y0, width, height, mutation_size):\n        pad = mutation_size * self.pad\n        width = width + 2 * pad\n        height = height + 2 * pad\n        x0, y0 = (x0 - pad, y0 - pad)\n        x1, y1 = (x0 + width, y0 + height)\n        return Path([(x0, y0), (x1, y0), (x1, y1), (x0, y1), (x0 - pad, (y0 + y1) / 2), (x0, y0), (x0, y0)], closed=True)",
    "docstring": "A simple box.",
    "type": "class",
    "file_path": "matplotlib\\galleries\\users_explain\\text\\annotations.py",
    "ast_data": "ClassDef name:MyStyle FunctionDef name:__init__ arg:self arg:pad arguments arg arg Assign Call Call FunctionDef name:__call__ arg:self arg:x0 arg:y0 arg:width arg:height arg:mutation_size arguments arg arg arg arg arg arg Assign Assign Assign Assign Assign Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "get_op_key",
    "source_code": "def get_op_key(self, operation):\n    self.check_key_op(operation)\n    if operation in self.PUBLIC_KEY_OPS:\n        return self.get_public_key()\n    return self.get_private_key()",
    "docstring": "Get the raw key for the given key_op. This method will also check if the given key_op is supported by this key. :param operation: key operation value, such as \"sign\", \"encrypt\". :return: raw key",
    "type": "method",
    "file_path": "authlib\\authlib\\jose\\rfc7517\\asymmetric_key.py",
    "ast_data": "FunctionDef name:get_op_key arg:self arg:operation arguments arg arg Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "inplace_logistic",
    "source_code": "def inplace_logistic(X):\n    logistic_sigmoid(X, out=X)",
    "docstring": "Compute the logistic function inplace. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) The input data.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\neural_network\\_base.py",
    "ast_data": "FunctionDef name:inplace_logistic arg:X arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "tuning_enable",
    "source_code": "def tuning_enable(val: bool=True) -> None:\n    torch._C._cuda_tunableop_tuning_enable(val)",
    "docstring": "Enable tuning of TunableOp implementations. When enabled, if a tuned entry isn't found, run the tuning step and record the entry.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\tunable.py",
    "ast_data": "FunctionDef name:tuning_enable arg:val arguments arg Call"
  },
  {
    "library": "matplotlib",
    "name": "get_geometry",
    "source_code": "def get_geometry(self):\n    rows, cols = self.get_gridspec().get_geometry()\n    return (rows, cols, self.num1, self.num2)",
    "docstring": "Return the subplot geometry as tuple `GridSpec`).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\gridspec.py",
    "ast_data": "FunctionDef name:get_geometry arg:self arguments arg Assign Call Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "add_latex_package",
    "source_code": "def add_latex_package(self, packagename: str, options: str | None=None, after_hyperref: bool=False) -> None:\n    self.registry.add_latex_package(packagename, options, after_hyperref)",
    "docstring": "Register a package to include in the LaTeX source code. Add *packagename* to the list of packages that LaTeX source code will include. If you provide *options*, it will be taken to the declaration. If you set *after_hyperref* truthy, the package will be loaded after `` package. .. code-block:: python app.add_latex_package('mypackage') # => \\usepackage{mypackage} app.add_latex_package('mypackage', 'foo,bar') # => \\usepackage[foo,bar]{mypackage} .. versionadded:: 1.3 .. versionadded:: 3.1 *after_hyperref* option.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\application.py",
    "ast_data": "FunctionDef name:add_latex_package arg:self arg:packagename arg:options arg:after_hyperref arguments arg arg arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_step",
    "source_code": "def _step(time, output_ta_t, prev_output, *states):\n    current_input = tuple((ta.read(time) for ta in input_ta))\n    current_input = nest.pack_sequence_as(inputs, current_input)\n    mask_t = masking_fn(time)\n    output, new_states = step_function(current_input, tuple(states) + tuple(constants))\n    flat_output = nest.flatten(output)\n    flat_mask_output = flat_zero_output if zero_output_for_mask else nest.flatten(prev_output)\n    flat_new_output = compute_masked_output(mask_t, flat_output, flat_mask_output)\n    flat_state = nest.flatten(states)\n    flat_new_state = nest.flatten(new_states)\n    for state, new_state in zip(flat_state, flat_new_state):\n        if isinstance(new_state, tensor_lib.Tensor):\n            new_state.set_shape(state.shape)\n    flat_final_state = compute_masked_output(mask_t, flat_new_state, flat_state)\n    new_states = nest.pack_sequence_as(new_states, flat_final_state)\n    output_ta_t = tuple((ta.write(time, out) for ta, out in zip(output_ta_t, flat_new_output)))\n    return (time + 1, output_ta_t, tuple(flat_new_output)) + tuple(new_states)",
    "docstring": "RNN step function. Args: time: Current timestep value. output_ta_t: TensorArray. prev_output: tuple of outputs from time - 1. *states: List of states. Returns: Tuple:",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:_step arg:time arg:output_ta_t arg:prev_output arguments arg arg arg arg Assign Call Call Assign Call Assign Call Assign Call Call Call Assign Call Assign Call Assign Call Assign Call Assign Call For Call If Call Call Assign Call Assign Call Assign Call Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_sub_flops",
    "source_code": "@ops.RegisterStatistics('Sub', 'flops')\ndef _sub_flops(graph, node):\n    return _binary_per_element_op_flops(graph, node)",
    "docstring": "Compute flops for Sub operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py",
    "ast_data": "FunctionDef name:_sub_flops arg:graph arg:node arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_compute_nparams_toprune",
    "source_code": "def _compute_nparams_toprune(amount, tensor_size):\n    if isinstance(amount, numbers.Integral):\n        return amount\n    else:\n        return round(amount * tensor_size)",
    "docstring": "Convert the pruning amount from a percentage to absolute value. Since amount can be expressed either in absolute value or as a percentage of the number of units/channels in a tensor, this utility function converts the percentage to absolute value to standardize the handling of pruning. Args: amount (int or float): quantity of parameters to prune. If float, should be between 0.0 and 1.0 and represent the fraction of parameters to prune. If int, it represents the absolute number of parameters to prune. tensor_size (int): absolute number of parameters in the tensor to prune. Returns: int: the number of units to prune in the tensor",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\utils\\prune.py",
    "ast_data": "FunctionDef name:_compute_nparams_toprune arg:amount arg:tensor_size arguments arg arg If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_initial_values",
    "source_code": "def _initial_values(self, tr_radius):\n    lambda_ub = max(0, self.jac_mag / tr_radius + min(-self.hess_gershgorin_lb, self.hess_fro, self.hess_inf))\n    lambda_lb = max(0, -min(self.hess.diagonal()), self.jac_mag / tr_radius - min(self.hess_gershgorin_ub, self.hess_fro, self.hess_inf))\n    if tr_radius < self.previous_tr_radius:\n        lambda_lb = max(self.lambda_lb, lambda_lb)\n    if lambda_lb == 0:\n        lambda_initial = 0\n    else:\n        lambda_initial = max(np.sqrt(lambda_lb * lambda_ub), lambda_lb + self.UPDATE_COEFF * (lambda_ub - lambda_lb))\n    return (lambda_initial, lambda_lb, lambda_ub)",
    "docstring": "Given a trust radius, return a good initial guess for the damping factor, the lower bound and the upper bound. The values were chosen accordingly to the guidelines on section 7.3.8 (p. 192) from [1]_.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_trustregion_exact.py",
    "ast_data": "FunctionDef name:_initial_values arg:self arg:tr_radius arguments arg arg Assign Call Call Assign Call Call Call Call If Compare Assign Call If Compare Assign Assign Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_permutation_test_score",
    "source_code": "def _permutation_test_score(estimator, X, y, cv, scorer, split_params, fit_params, score_params):\n    fit_params = fit_params if fit_params is not None else {}\n    score_params = score_params if score_params is not None else {}\n    avg_score = []\n    for train, test in cv.split(X, y, **split_params):\n        X_train, y_train = _safe_split(estimator, X, y, train)\n        X_test, y_test = _safe_split(estimator, X, y, test, train)\n        fit_params_train = _check_method_params(X, params=fit_params, indices=train)\n        score_params_test = _check_method_params(X, params=score_params, indices=test)\n        estimator.fit(X_train, y_train, **fit_params_train)\n        avg_score.append(scorer(estimator, X_test, y_test, **score_params_test))\n    return np.mean(avg_score)",
    "docstring": "Auxiliary function for permutation_test_score",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_validation.py",
    "ast_data": "FunctionDef name:_permutation_test_score arg:estimator arg:X arg:y arg:cv arg:scorer arg:split_params arg:fit_params arg:score_params arguments arg arg arg arg arg arg arg arg Assign Compare Assign Compare Assign For Call Assign Call Assign Call Assign Call Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "DelayGraphBreakVariable",
    "source_code": "class DelayGraphBreakVariable(UnknownVariable):\n\n    def __init__(self, msg=None, **kwargs):\n        super().__init__(**kwargs)\n        self.msg = msg\n\n    def call_function(self, tx: 'InstructionTranslator', args: 'list[VariableTracker]', kwargs: 'dict[str, VariableTracker]') -> 'VariableTracker':\n        unimplemented_v2(gb_type='Unsupported function call (delayed)', context=f'source: {self.source}', explanation=f'Dynamo determined that a graph break should occur when calling `{self.source.name()}`. Reason: {self.msg}', hints=[])",
    "docstring": "Used to insert a dummy variable in the stack to do the graph break at CALL_FUNCTION.",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\misc.py",
    "ast_data": "ClassDef name:DelayGraphBreakVariable FunctionDef name:__init__ arg:self arg:msg arguments arg arg arg Call Call Assign FunctionDef name:call_function arg:self arg:tx arg:args arg:kwargs arguments arg arg arg arg Call Call"
  },
  {
    "library": "pandas",
    "name": "_maybe_repeat",
    "source_code": "def _maybe_repeat(arr: ArrayLike, index: Index | None) -> ArrayLike:\n    if index is not None:\n        if 1 == len(arr) != len(index):\n            arr = arr.repeat(len(index))\n    return arr",
    "docstring": "If we have a length-1 array and an index describing how long we expect the result to be, repeat the array.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\construction.py",
    "ast_data": "FunctionDef name:_maybe_repeat arg:arr arg:index arguments arg arg If Compare If Compare Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "DefaultDeviceType",
    "source_code": "class DefaultDeviceType:\n    _default_device_type = 'cuda'\n\n    @staticmethod\n    def set_device_type(device: str='cuda'):\n        DefaultDeviceType._default_device_type = device\n\n    @staticmethod\n    def get_device_type() -> str:\n        return DefaultDeviceType._default_device_type",
    "docstring": "A class that manages the default device type for checkpointing. If no non-CPU tensors are present, the default device type will be used. The default value is 'cuda'. The device type is used in the checkpointing process when determining which device states to save and restore for recomputation.",
    "type": "class",
    "file_path": "pytorch\\torch\\utils\\checkpoint.py",
    "ast_data": "ClassDef name:DefaultDeviceType Assign FunctionDef name:set_device_type arg:device arguments arg Assign FunctionDef name:get_device_type arguments Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_config",
    "source_code": "def get_config(self):\n    return self._get_config()",
    "docstring": "Returns the config of the feature column. A FeatureColumn config is a Python dictionary (serializable) containing the configuration of a FeatureColumn. The same FeatureColumn can be reinstantiated later from this configuration. The config of a feature column does not include information about feature columns depending on it nor the FeatureColumn class name. Example with (de)serialization practices followed in this file: Returns: A serializable Dict that can be used to deserialize the object with from_config.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2_types.py",
    "ast_data": "FunctionDef name:get_config arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n    self._fit(X, y)\n    return self",
    "docstring": "Fit the transformer on . Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Input data, where is the number of samples and is the number of features. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Fitted estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\impute\\_base.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_StatelessRandomGammaV3Grad",
    "source_code": "@ops.RegisterGradient('StatelessRandomGammaV3')\ndef _StatelessRandomGammaV3Grad(op: ops.Operation, grad):\n    shape = op.inputs[0]\n    alpha = op.inputs[4]\n    sample = op.outputs[0]\n    with ops.control_dependencies([grad]):\n        return (None, None, None, None, _StatelessGammaGradAlpha(shape, alpha, sample, grad))",
    "docstring": "Returns the gradient of a Gamma sample w.r.t. alpha. The gradient is computed using implicit differentiation (Figurnov et al., 2018). Args: op: A operation. We assume that the inputs to the operation are , , , , and tensors, and the output is the tensor. grad: The incoming gradient of the same shape as . Returns: A with derivatives . References: Implicit Reparameterization Gradients: [Figurnov et al., 2018] ( ([pdf] (",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\random_grad.py",
    "ast_data": "FunctionDef name:_StatelessRandomGammaV3Grad arg:op arg:grad arguments arg arg Assign Assign Assign With Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_format_strings",
    "source_code": "def _format_strings(self) -> list[str]:\n    ido = self.values._is_dates_only\n    values = self.values.astype(object)\n    formatter = self.formatter or get_format_datetime64(ido, date_format=self.date_format)\n    fmt_values = [formatter(x) for x in values]\n    return fmt_values",
    "docstring": "we by definition have a TZ",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\format.py",
    "ast_data": "FunctionDef name:_format_strings arg:self arguments arg Assign Assign Call Assign BoolOp Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ShapeEnvSettings",
    "source_code": "@dataclass(frozen=True)\nclass ShapeEnvSettings:\n    allow_scalar_outputs: bool\n    allow_dynamic_output_shape_ops: bool\n    assume_static_by_default: bool\n    specialize_zero_one: bool\n    duck_shape: bool\n    prefer_deferred_runtime_asserts_over_guards: bool\n    allow_complex_guards_as_runtime_asserts: bool\n    trace_asserts: bool",
    "docstring": "Encapsulates all shape env settings that could potentially affect FakeTensor dispatch. Used when creating dispatch cache keys.",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "ClassDef name:ShapeEnvSettings Call"
  },
  {
    "library": "pytorch",
    "name": "run_shape_prop",
    "source_code": "def run_shape_prop(self) -> None:\n    ShapeProp(self.module).propagate(*self.sample_input)",
    "docstring": "Helper function to run shape propagation on module. Can be overridden by subclasses for custom shape propagation logic.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\passes\\net_min_base.py",
    "ast_data": "FunctionDef name:run_shape_prop arg:self arguments arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_capturable_supported_devices",
    "source_code": "def _get_capturable_supported_devices(supports_xla: bool=True) -> list[str]:\n    capturable_supported_devices = ['cuda', 'xpu', 'hpu']\n    if not torch.jit.is_scripting():\n        capturable_supported_devices.append(torch._C._get_privateuse1_backend_name())\n    if supports_xla:\n        capturable_supported_devices.append('xla')\n    return capturable_supported_devices",
    "docstring": "Return the device type list that supports capturable optimizer.",
    "type": "function",
    "file_path": "pytorch\\torch\\optim\\optimizer.py",
    "ast_data": "FunctionDef name:_get_capturable_supported_devices arg:supports_xla arguments arg Assign If Call Call Call If Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_init_arpack_v0",
    "source_code": "def _init_arpack_v0(size, random_state):\n    random_state = check_random_state(random_state)\n    v0 = random_state.uniform(-1, 1, size)\n    return v0",
    "docstring": "Initialize the starting vector for iteration in ARPACK functions. Initialize a ndarray with values sampled from the uniform distribution on [-1, 1]. This initialization model has been chosen to be consistent with the ARPACK one as another initialization can lead to convergence issues. Parameters ---------- size : int The size of the eigenvalue vector to be initialized. random_state : int, RandomState instance or None, default=None The seed of the pseudo random number generator used to generate a uniform distribution. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by . Returns ------- v0 : ndarray of shape (size,) The initialized vector.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_arpack.py",
    "ast_data": "FunctionDef name:_init_arpack_v0 arg:size arg:random_state arguments arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_free_unsharded_flat_param",
    "source_code": "def _free_unsharded_flat_param(self):\n    self._check_sharded_strategy()\n    unsharded_flat_param = self._get_padded_unsharded_flat_param()\n    self._check_on_compute_device(unsharded_flat_param)\n    _no_dispatch_record_stream(unsharded_flat_param, self._device_handle.current_stream())\n    _free_storage(unsharded_flat_param)",
    "docstring": "Free the padded unsharded flat parameter. We allow this function to be called even when storage is not allocated The tensor to free depends on the calling context since the unshard may have forced full precision, in which case a different tensor is used.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py",
    "ast_data": "FunctionDef name:_free_unsharded_flat_param arg:self arguments arg Call Assign Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_standardize_shapes",
    "source_code": "def _standardize_shapes(path, tensor, shape):\n    if not isinstance(tensor, torch.Tensor):\n        return None\n    if shape is None:\n        return [Dim.STATIC] * len(tensor.shape)\n    out = []\n    if isinstance(shape, dict):\n        for i, s in enumerate(tensor.shape):\n            out.append(s if shape.get(i) is None else shape.get(i))\n    else:\n        assert isinstance(shape, (tuple, list))\n        for i, s in enumerate(tensor.shape):\n            out.append(s if shape[i] is None else shape[i])\n    return out",
    "docstring": "Helps standardize the dynamic_shapes tree structure we serialize, returning lists for each tensor shape, handling tensor-level Nones.",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\serde\\dynamic_shapes.py",
    "ast_data": "FunctionDef name:_standardize_shapes arg:path arg:tensor arg:shape arguments arg arg arg If Call Return return:no If Compare Return return:yes Call Assign If Call For Call Call Compare Call Call Call For Call Call Compare Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__call__",
    "source_code": "def __call__(self):\n    vmin, vmax = self.axis.get_view_interval()\n    return self.tick_values(vmin, vmax)",
    "docstring": "Return the locations of the ticks.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:__call__ arg:self arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_var_list",
    "source_code": "def _get_var_list(model):\n    var_list, _, _ = graph_view.ObjectGraphView(model).serialize_object_graph()\n    return var_list",
    "docstring": "Returns list of all checkpointed saveable objects in the model.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model_experimental.py",
    "ast_data": "FunctionDef name:_get_var_list arg:model arguments arg Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "transpose",
    "source_code": "@tf_export(v1=['transpose'])\n@dispatch.add_dispatch_support\ndef transpose(a, perm=None, name='transpose', conjugate=False):\n    with ops.name_scope(name, 'transpose', [a]) as name:\n        if not tensor_util.is_tf_type(a):\n            a = ops.convert_to_tensor(a, name='a')\n        if conjugate and a.dtype.is_complex:\n            transpose_fn = gen_array_ops.conjugate_transpose\n        else:\n            transpose_fn = gen_array_ops.transpose\n        if perm is not None:\n            return transpose_fn(a, perm, name=name)\n        rank = a.shape.rank\n        if rank is None:\n            perm = gen_math_ops._range(gen_array_ops.rank(a) - 1, -1, -1)\n        else:\n            perm = np.arange(rank - 1, -1, -1, dtype=np.int32)\n        return transpose_fn(a, perm, name=name)",
    "docstring": "Transposes . Permutes the dimensions according to . The returned tensor's dimension i will correspond to the input dimension . If is not given, it is set to (n-1...0), where n is the rank of the input tensor. Hence, by default, this operation performs a regular matrix transpose on 2-D input Tensors. If conjugate is True and is either or then the values of are conjugated and transposed. @compatibility(numpy) In transposes are memory-efficient constant time operations as they simply return a new view of the same data with adjusted . TensorFlow does not support strides, so returns a new tensor with the items permuted. @end_compatibility For example: Args: a: A . perm: A permutation of the dimensions of . name: A name for the operation (optional). conjugate: Optional bool. Setting it to is mathematically equivalent to tf.math.conj(tf.transpose(input)). Returns: A transposed .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:transpose arg:a arg:perm arg:name arg:conjugate arguments arg arg arg arg With Call If Call Assign Call If BoolOp Assign Assign If Compare Return return:yes Call Assign If Compare Assign Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "round",
    "source_code": "def round(self, decimals=0, out=None):\n    result = self._data.round(decimals=decimals, out=out).view(type(self))\n    if result.ndim > 0:\n        result._mask = self._mask\n        result._update_from(self)\n    elif self._mask:\n        result = masked\n    if out is None:\n        return result\n    if isinstance(out, MaskedArray):\n        out.__setmask__(self._mask)\n    return out",
    "docstring": "Return each element rounded to the given number of decimals. Refer to for full documentation. See Also -------- numpy.ndarray.round : corresponding function for ndarrays numpy.around : equivalent function Examples -------- >>> import numpy as np >>> import numpy.ma as ma >>> x = ma.array([1.35, 2.5, 1.5, 1.75, 2.25, 2.75], ... mask=[0, 0, 0, 1, 0, 0]) >>> ma.round(x) masked_array(data=[1.0, 2.0, 2.0, --, 2.0, 3.0], mask=[False, False, False, True, False, False], fill_value=1e+20)",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:round arg:self arg:decimals arg:out arguments arg arg arg Assign Call Call Call If Compare Assign Call If Assign If Compare Return return:yes If Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "domain_dimension_tensor",
    "source_code": "def domain_dimension_tensor(self, name='domain_dimension_tensor'):\n    with self._name_scope(name):\n        return self._domain_dimension_tensor()",
    "docstring": "Dimension (in the sense of vector spaces) of the domain of this operator. Determined at runtime. If this operator acts like the batch matrix with , then this returns . Args: name: A name for this . Returns:",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "FunctionDef name:domain_dimension_tensor arg:self arg:name arguments arg arg With Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_insert_logger_after_node",
    "source_code": "def _insert_logger_after_node(node: Node, gm: GraphModule, logger_cls: Callable, logger_node_name_suffix: str, ref_node_name: str, model_name: str, ref_name: str, ref_node_target_type: str, results_type: str, index_within_arg: int, index_of_arg: int, fqn: Optional[str]) -> Node:\n    logger_node_name = get_new_attr_name_with_prefix(node.name + logger_node_name_suffix)(gm)\n    target_type = get_target_type_str(node, gm)\n    logger_obj = logger_cls(ref_node_name, node.name, model_name, ref_name, target_type, ref_node_target_type, results_type, index_within_arg, index_of_arg, fqn)\n    setattr(gm, logger_node_name, logger_obj)\n    logger_node = node.graph.create_node('call_module', logger_node_name, (node,), {})\n    return logger_node",
    "docstring": "Given a starting graph of prev_node -> node -> next_node This function creates a new logger_cls obj and adds it after node, resulting in prev_node -> node -> logger_obj -> next_node",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\ns\\fx\\graph_passes.py",
    "ast_data": "FunctionDef name:_insert_logger_after_node arg:node arg:gm arg:logger_cls arg:logger_node_name_suffix arg:ref_node_name arg:model_name arg:ref_name arg:ref_node_target_type arg:results_type arg:index_within_arg arg:index_of_arg arg:fqn arguments arg arg arg arg arg arg arg arg arg arg arg arg Assign Call Call Assign Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, tensor_size, index, res, input_var):\n    assert isinstance(res, DVar)\n    self.res = res\n    self.tensor_size = tensor_size\n    self.index = index\n    self.input_var = input_var",
    "docstring": "Constraint for getting item given a tensor size :param tensor_size: actual number :param index: actual number representing the index :param res: dimension variable to carry the item we get :param input_var: a tensor variable from which we will get item",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:tensor_size arg:index arg:res arg:input_var arguments arg arg arg arg arg Call Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "close",
    "source_code": "def close(self):\n    raise NotImplementedError()",
    "docstring": "Flushes and closes the summary writer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "FunctionDef name:close arg:self arguments arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "tensor_not_equals",
    "source_code": "@tf_export('__operators__.ne', v1=[])\n@dispatch.add_dispatch_support\ndef tensor_not_equals(self, other):\n    if other is None:\n        return True\n    if tensor_lib.Tensor._USE_EQUALITY and ops.executing_eagerly_outside_functions():\n        self, other = override_binary_operator.maybe_promote_tensors(self, other)\n        return gen_math_ops.not_equal(self, other, incompatible_shape_error=False)\n    else:\n        return self is not other",
    "docstring": "The operation invoked by the operator. Compares two tensors element-wise for inequality if they are broadcast-compatible; or returns True if they are not broadcast-compatible. (Note that this behavior differs from , which raises an exception if the two tensors are not broadcast-compatible.) Purpose in the API: This method is exposed in TensorFlow's API so that library developers can register dispatching for to allow it to handle custom composite tensors & other custom objects. The API symbol is not intended to be called by users directly and does appear in TensorFlow's generated documentation. Args: self: The left-hand side of the operator. other: The right-hand side of the operator. Returns: The result of the elementwise operation, or if the arguments are not broadcast-compatible.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:tensor_not_equals arg:self arg:other arguments arg arg If Compare Return return:yes If BoolOp Call Assign Call Return return:yes Call Return return:yes Compare Call"
  },
  {
    "library": "matplotlib",
    "name": "get_horizontalalignment",
    "source_code": "def get_horizontalalignment(self):\n    return self._horizontalalignment",
    "docstring": "Return the horizontal alignment as a string. Will be one of 'left', 'center' or 'right'.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:get_horizontalalignment arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "amax",
    "source_code": "@_apply_docstring_templates\ndef amax(input: Union[Tensor, MaskedTensor], dim: DimOrDims=None, *, keepdim: Optional[bool]=False, dtype: Optional[DType]=None, mask: Optional[Tensor]=None) -> Tensor:\n    if dtype is None:\n        dtype = input.dtype\n    mask_input = _combine_input_and_mask(amax, input, mask)\n    dim_ = _canonical_dim(dim, mask_input.ndim)\n    if mask_input.layout == torch.strided:\n        return torch.amax(mask_input, dim_, bool(keepdim)).to(dtype=dtype)\n    elif mask_input.layout == torch.sparse_coo:\n        if mask is None:\n            raise ValueError('masked amax expects explicit mask for sparse_coo tensor input')\n        return _sparse_coo_scatter_reduction_helper(torch.amax, mask_input, dim_, bool(keepdim), dtype)\n    elif mask_input.layout == torch.sparse_csr:\n        if mask is None:\n            raise ValueError('masked amax expects explicit mask for sparse_csr tensor input')\n        return _sparse_csr_segment_reduction_helper(torch.amax, mask_input, dim_, bool(keepdim), dtype)\n    else:\n        raise ValueError(f'masked amax expects strided, sparse_coo or sparse_csr tensor (got {mask_input.layout} tensor)')",
    "docstring": "{reduction_signature} {reduction_descr} {reduction_identity_dtype} {reduction_args} {reduction_example}",
    "type": "function",
    "file_path": "pytorch\\torch\\masked\\_ops.py",
    "ast_data": "FunctionDef name:amax arg:input arg:dim arguments arg arg arg arg arg If Compare Assign Assign Call Assign Call If Compare Return return:yes Call Call Call If Compare If Compare Raise Call Return return:yes Call Call If Compare If Compare Raise Call Return return:yes Call Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "get_weights",
    "source_code": "def get_weights(self):\n    with self.distribute_strategy.scope():\n        return super(Model, self).get_weights()",
    "docstring": "Retrieves the weights of the model. Returns: A flat list of Numpy arrays.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py",
    "ast_data": "FunctionDef name:get_weights arg:self arguments arg With Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "unwrap",
    "source_code": "def unwrap(self) -> 'VariableTracker':\n    return self",
    "docstring": "Used by LazyVariableTracker to return the real VariableTracker if it already exists",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\base.py",
    "ast_data": "FunctionDef name:unwrap arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_useOffset",
    "source_code": "def set_useOffset(self, val):\n    if isinstance(val, bool):\n        self.offset = 0\n        self._useOffset = val\n    else:\n        self._useOffset = False\n        self.offset = val",
    "docstring": "Set whether to use offset notation. When formatting a set numbers whose value is large compared to their range, the formatter can separate an additive constant. This can shorten the formatted numbers so that they are less likely to overlap when drawn on an axis. Parameters ---------- val : bool or float - If False, do not use offset notation. - If True (=automatic mode), use offset notation if it can make the residual numbers significantly shorter. The exact behavior is controlled by :rc:. - If a number, force an offset of the given value. Examples -------- With active offset notation, the values ``, which is written to the edge of the axis.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:set_useOffset arg:self arg:val arguments arg arg If Call Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_optimize_graph",
    "source_code": "def _optimize_graph(meta_graph_def, signature_def):\n    new_meta_graph_def = copy.deepcopy(meta_graph_def)\n    fetch_collection = meta_graph_pb2.CollectionDef()\n    for tensor_info in list(signature_def.inputs.values()) + list(signature_def.outputs.values()):\n        fetch_collection.node_list.value.append(tensor_info.name)\n    new_meta_graph_def.collection_def['train_op'].CopyFrom(fetch_collection)\n    new_meta_graph_def.ClearField('saver_def')\n    config = config_pb2.ConfigProto()\n    rewrite_options = config.graph_options.rewrite_options\n    rewrite_options.min_graph_nodes = -1\n    return tf_optimizer.OptimizeGraph(config, new_meta_graph_def)",
    "docstring": "Optimize using grappler. Returns a .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\saved_model_aot_compile.py",
    "ast_data": "FunctionDef name:_optimize_graph arg:meta_graph_def arg:signature_def arguments arg arg Assign Call Assign Call For Call Call Call Call Call Call Call Assign Call Assign Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "update",
    "source_code": "def update(self, new_scale: Optional[Union[float, torch.Tensor]]=None) -> None:\n    if not self._enabled:\n        return\n    _scale, _growth_tracker = self._check_scale_growth_tracker('update')\n    if new_scale is not None:\n        assert self._scale is not None\n        if isinstance(new_scale, float):\n            self._scale.fill_(new_scale)\n        else:\n            reason = 'new_scale should be a float or a 1-element torch.cuda.FloatTensor or                     torch.FloatTensor with requires_grad=False.'\n            assert new_scale.device.type == self._device, reason\n            assert new_scale.numel() == 1, reason\n            assert new_scale.requires_grad is False, reason\n            self._scale.copy_(new_scale)\n    else:\n        found_infs = [found_inf.to(device=_scale.device, non_blocking=True) for state in self._per_optimizer_states.values() for found_inf in state['found_inf_per_device'].values()]\n        assert len(found_infs) > 0, 'No inf checks were recorded prior to update.'\n        found_inf_combined = found_infs[0]\n        if len(found_infs) > 1:\n            for i in range(1, len(found_infs)):\n                found_inf_combined += found_infs[i]\n        torch._amp_update_scale_(_scale, _growth_tracker, found_inf_combined, self._growth_factor, self._backoff_factor, self._growth_interval)\n    self._per_optimizer_states = defaultdict(_refresh_per_optimizer_state)",
    "docstring": "Update the scale factor. If any optimizer steps were skipped the scale is multiplied by `torch.Tensorupdate` has been invoked for all optimizers used this iteration. .. warning:: For performance reasons, we do not check the scale factor value to avoid synchronizations, so the scale factor is not guaranteed to be above 1. If the scale falls below 1 and/or you are seeing NaNs in your gradients or loss, something is likely wrong. For example, bf16-pretrained models are often incompatible with AMP/fp16 due to differing dynamic ranges.",
    "type": "method",
    "file_path": "pytorch\\torch\\amp\\grad_scaler.py",
    "ast_data": "FunctionDef name:update arg:self arg:new_scale arguments arg arg If Return return:no Assign Call If Compare Compare If Call Call Assign Compare Compare Call Compare Call Assign Call Call Call Compare Call Assign If Compare Call For Call Call Call Assign Call"
  },
  {
    "library": "django",
    "name": "SQLiteCursorWrapper",
    "source_code": "class SQLiteCursorWrapper(Database.Cursor):\n\n    def execute(self, query, params=None):\n        if params is None:\n            return super().execute(query)\n        param_names = list(params) if isinstance(params, Mapping) else None\n        query = self.convert_query(query, param_names=param_names)\n        return super().execute(query, params)\n\n    def executemany(self, query, param_list):\n        peekable, param_list = tee(iter(param_list))\n        if (params := next(peekable, None)) and isinstance(params, Mapping):\n            param_names = list(params)\n        else:\n            param_names = None\n        query = self.convert_query(query, param_names=param_names)\n        return super().executemany(query, param_list)\n\n    def convert_query(self, query, *, param_names=None):\n        if param_names is None:\n            return FORMAT_QMARK_REGEX.sub('?', query).replace('%%', '%')\n        else:\n            return query % {name: f':{name}' for name in param_names}",
    "docstring": "Django uses the \"format\" and \"pyformat\" styles, but Python's sqlite3 module supports neither of these styles. This wrapper performs the following conversions: - \"format\" style to \"qmark\" style - \"pyformat\" style to \"named\" style In both cases, if you want to use a literal \"%s\", you'll need to use \"%%s\".",
    "type": "class",
    "file_path": "django\\django\\db\\backends\\sqlite3\\base.py",
    "ast_data": "ClassDef name:SQLiteCursorWrapper FunctionDef name:execute arg:self arg:query arg:params arguments arg arg arg If Compare Return return:yes Call Call Assign Call Call Assign Call Return return:yes Call Call FunctionDef name:executemany arg:self arg:query arg:param_list arguments arg arg arg Assign Call Call If BoolOp Call Call Assign Call Assign Assign Call Return return:yes Call Call FunctionDef name:convert_query arg:self arg:query arguments arg arg arg If Compare Return return:yes Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "FuncScaleLog",
    "source_code": "class FuncScaleLog(LogScale):\n    name = 'functionlog'\n\n    def __init__(self, axis, functions, base=10):\n        forward, inverse = functions\n        self.subs = None\n        self._transform = FuncTransform(forward, inverse) + LogTransform(base)\n\n    @property\n    def base(self):\n        return self._transform._b.base\n\n    def get_transform(self):\n        return self._transform",
    "docstring": "Provide an arbitrary scale with user-supplied function for the axis and then put on a logarithmic axes.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\scale.py",
    "ast_data": "ClassDef name:FuncScaleLog Assign FunctionDef name:__init__ arg:self arg:axis arg:functions arg:base arguments arg arg arg arg Assign Assign Assign Call Call FunctionDef name:base arg:self arguments arg Return return:yes FunctionDef name:get_transform arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "single_source_shortest_path_length",
    "source_code": "@validate_params({'graph': ['array-like', 'sparse matrix'], 'source': [Interval(Integral, 0, None, closed='left')], 'cutoff': [Interval(Integral, 0, None, closed='left'), None]}, prefer_skip_nested_validation=True)\ndef single_source_shortest_path_length(graph, source, *, cutoff=None):\n    if sparse.issparse(graph):\n        graph = graph.tolil()\n    else:\n        graph = sparse.lil_matrix(graph)\n    seen = {}\n    level = 0\n    next_level = [source]\n    while next_level:\n        this_level = next_level\n        next_level = set()\n        for v in this_level:\n            if v not in seen:\n                seen[v] = level\n                next_level.update(graph.rows[v])\n        if cutoff is not None and cutoff <= level:\n            break\n        level += 1\n    return seen",
    "docstring": "Return the length of the shortest path from source to all reachable nodes. Parameters ---------- graph : {array-like, sparse matrix} of shape (n_nodes, n_nodes) Adjacency matrix of the graph. Sparse matrix of format LIL is preferred. source : int Start node for path. cutoff : int, default=None Depth to stop the search - only paths of length >> from sklearn.utils.graph import single_source_shortest_path_length >>> import numpy as np >>> graph = np.array([[ 0, 1, 0, 0], ... [ 1, 0, 1, 0], ... [ 0, 1, 0, 0], ... [ 0, 0, 0, 0]]) >>> single_source_shortest_path_length(graph, 0) {0: 0, 1: 1, 2: 2} >>> graph = np.ones((6, 6)) >>> sorted(single_source_shortest_path_length(graph, 2).items()) [(0, 1), (1, 1), (2, 0), (3, 1), (4, 1), (5, 1)]",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\graph.py",
    "ast_data": "FunctionDef name:single_source_shortest_path_length arg:graph arg:source arguments arg arg arg If Call Assign Call Assign Call Assign Assign Assign While Assign Assign Call For If Compare Assign Call If BoolOp Compare Compare Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "broadcast_tensors",
    "source_code": "def broadcast_tensors(*tensors):\n    if has_torch_function(tensors):\n        return handle_torch_function(broadcast_tensors, tensors, *tensors)\n    return _VF.broadcast_tensors(tensors)",
    "docstring": "broadcast_tensors(*tensors) -> List of Tensors Broadcasts the given tensors according to :ref:. Args: *tensors: any number of tensors of the same type .. warning:: More than one element of a broadcasted tensor may refer to a single memory location. As a result, in-place operations (especially ones that are vectorized) may result in incorrect behavior. If you need to write to the tensors, please clone them first. Example:: >>> x = torch.arange(3).view(1, 3) >>> y = torch.arange(2).view(2, 1) >>> a, b = torch.broadcast_tensors(x, y) >>> a.size() torch.Size([2, 3]) >>> a tensor([[0, 1, 2], [0, 1, 2]])",
    "type": "function",
    "file_path": "pytorch\\torch\\functional.py",
    "ast_data": "FunctionDef name:broadcast_tensors arguments arg If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "force_escape",
    "source_code": "@register.filter(is_safe=True)\n@stringfilter\ndef force_escape(value):\n    return escape(value)",
    "docstring": "Escape a string's HTML. Return a new string containing the escaped characters (as opposed to \"escape\", which marks the content for later possible escaping).",
    "type": "function",
    "file_path": "django\\django\\template\\defaultfilters.py",
    "ast_data": "FunctionDef name:force_escape arg:value arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "BufferIndex",
    "source_code": "class BufferIndex:\n\n    def __init__(self, idx, size, hash_value):\n        self.idx = idx\n        self.size = size\n        self.hash_value = hash_value",
    "docstring": "A class to store index, size, hash of the buffers in TFLite model.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\convert.py",
    "ast_data": "ClassDef name:BufferIndex FunctionDef name:__init__ arg:self arg:idx arg:size arg:hash_value arguments arg arg arg arg Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "get",
    "source_code": "def get(self):\n    with self._not_empty:\n        while not self._queue:\n            self._not_empty.wait()\n        item = self._queue.popleft()\n        self._not_full.notify()\n        return item",
    "docstring": "Remove and return an item from the queue. If the queue is empty, blocks until an item is available. Returns: an item from the queue",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\event_file_writer.py",
    "ast_data": "FunctionDef name:get arg:self arguments arg With While Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "register_parameter",
    "source_code": "def register_parameter(self, name: str, param: Optional[Parameter]) -> None:\n    if '_parameters' not in self.__dict__:\n        raise AttributeError('cannot assign parameter before Module.__init__() call')\n    elif not isinstance(name, str):\n        raise TypeError(f'parameter name should be a string. Got {torch.typename(name)}')\n    elif '.' in name:\n        raise KeyError('parameter name can\\'t contain \".\"')\n    elif name == '':\n        raise KeyError('parameter name can\\'t be empty string \"\"')\n    elif hasattr(self, name) and name not in self._parameters:\n        raise KeyError(f\"attribute '{name}' already exists\")\n    if param is None:\n        self._parameters[name] = None\n    elif not isinstance(param, Parameter):\n        raise TypeError(f\"cannot assign '{torch.typename(param)}' object to parameter '{name}' (torch.nn.Parameter or None required)\")\n    elif param.grad_fn:\n        raise ValueError(f\"Cannot assign non-leaf Tensor to parameter '{name}'. Model parameters must be created explicitly. To express '{name}' as a function of another Tensor, compute the value in the forward() method.\")\n    else:\n        for hook in _global_parameter_registration_hooks.values():\n            output = hook(self, name, param)\n            if output is not None:\n                param = output\n        self._parameters[name] = param",
    "docstring": "Add a parameter to the module. The parameter can be accessed as an attribute using given name. Args: name (str): name of the parameter. The parameter can be accessed from this module using the given name param (Parameter or None): parameter to be added to the module. If `cudastate_dict`.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:register_parameter arg:self arg:name arg:param arguments arg arg arg If Compare Raise Call If Call Raise Call Call If Compare Raise Call If Compare Raise Call If BoolOp Call Compare Raise Call If Compare Assign If Call Raise Call Call If Raise Call For Call Assign Call If Compare Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "get_extra_vars",
    "source_code": "def get_extra_vars():\n    g = ops.get_default_graph()\n    if isinstance(g, _FuncGraph):\n        return g.extra_vars\n    else:\n        return []",
    "docstring": "Returns the captured variables by the function. Returns: If the default graph is being used to define a function, the returned list of variables are those created inside the function body so far. Otherwise, returns an empty list.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\function.py",
    "ast_data": "FunctionDef name:get_extra_vars arguments Assign Call If Call Return return:yes Return return:no"
  },
  {
    "library": "scikit-learn",
    "name": "inplace_relu_derivative",
    "source_code": "def inplace_relu_derivative(Z, delta):\n    delta[Z == 0] = 0",
    "docstring": "Apply the derivative of the relu function. It exploits the fact that the derivative is a simple function of the output value from rectified linear units activation function. Parameters ---------- Z : {array-like, sparse matrix}, shape (n_samples, n_features) The data which was output from the rectified linear units activation function during the forward pass. delta : {array-like}, shape (n_samples, n_features) The backpropagated error signal to be modified inplace.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\neural_network\\_base.py",
    "ast_data": "FunctionDef name:inplace_relu_derivative arg:Z arg:delta arguments arg arg Assign Compare"
  },
  {
    "library": "tensorflow",
    "name": "bessel_k0e",
    "source_code": "@tf_export('math.special.bessel_k0e')\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef bessel_k0e(x, name=None):\n    with ops.name_scope(name, 'bessel_k0e', [x]):\n        return gen_special_math_ops.bessel_k0e(x)",
    "docstring": "Computes the Bessel k0e function of element-wise. Modified Bessel function of order 0. >>> tf.math.special.bessel_k0e([0.5, 1., 2., 4.]).numpy() array([1.52410939, 1.14446308, 0.84156822, 0.60929767], dtype=float32) Args: x: A or . Must be one of the following types: , , . name: A name for the operation (optional). Returns: A or , respectively. Has the same type as . @compatibility(scipy) Equivalent to scipy.special.k0e @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\special_math_ops.py",
    "ast_data": "FunctionDef name:bessel_k0e arg:x arg:name arguments arg arg With Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "allclose",
    "source_code": "def allclose(a: TensorLikeType, b: TensorLikeType, rtol: float=1e-05, atol: float=1e-08, equal_nan: bool=False) -> bool:\n    _check_close_args(name='torch.allclose', a=a, b=b, rtol=rtol, atol=atol)\n    return bool(torch.all(torch.isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)).item())",
    "docstring": "Reference implementation of torch.allclose",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\__init__.py",
    "ast_data": "FunctionDef name:allclose arg:a arg:b arg:rtol arg:atol arg:equal_nan arguments arg arg arg arg arg Call Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "execution_mode",
    "source_code": "@execution_mode.setter\ndef execution_mode(self, mode):\n    if mode not in (None, SYNC, ASYNC):\n        raise ValueError('Execution mode should be None/SYNC/ASYNC. Got %s' % mode)\n    if mode is None:\n        mode = SYNC\n    enable_async = mode == ASYNC\n    if self.is_async() != enable_async:\n        if self._context_handle is not None:\n            self.executor.wait()\n            executor_new = executor.new_executor(enable_async)\n            self._thread_local_data.executor = executor_new\n            pywrap_tfe.TFE_ContextSetExecutorForThread(self._context_handle, executor_new.handle())\n        else:\n            self._default_is_async = enable_async",
    "docstring": "Sets execution mode for current thread.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:execution_mode arg:self arg:mode arguments arg arg If Compare Raise Call If Compare Assign Assign Compare If Compare Call If Compare Call Assign Call Assign Call Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "_TransformedBboxWithCallback",
    "source_code": "class _TransformedBboxWithCallback(TransformedBbox):\n\n    def __init__(self, *args, callback, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._callback = callback\n\n    def get_points(self):\n        self._callback()\n        return super().get_points()",
    "docstring": "Variant of which calls *callback* before returning points. Used by to unstale the parent axes' viewlim as needed.",
    "type": "class",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\inset_locator.py",
    "ast_data": "ClassDef name:_TransformedBboxWithCallback FunctionDef name:__init__ arg:self arguments arg arg arg arg Call Call Assign FunctionDef name:get_points arg:self arguments arg Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "restrict",
    "source_code": "def restrict(self, support, indices=False):\n    check_is_fitted(self, 'feature_names_')\n    if not indices:\n        support = np.where(support)[0]\n    names = self.feature_names_\n    new_vocab = {}\n    for i in support:\n        new_vocab[names[i]] = len(new_vocab)\n    self.vocabulary_ = new_vocab\n    self.feature_names_ = [f for f, i in sorted(new_vocab.items(), key=itemgetter(1))]\n    return self",
    "docstring": "Restrict the features to those in support using feature selection. This function modifies the estimator in-place. Parameters ---------- support : array-like Boolean mask or list of indices (as returned by the get_support member of feature selectors). indices : bool, default=False Whether support is a list of indices. Returns ------- self : object DictVectorizer class instance. Examples -------- >>> from sklearn.feature_extraction import DictVectorizer >>> from sklearn.feature_selection import SelectKBest, chi2 >>> v = DictVectorizer() >>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}] >>> X = v.fit_transform(D) >>> support = SelectKBest(chi2, k=2).fit(X, [0, 1]) >>> v.get_feature_names_out() array(['bar', 'baz', 'foo'], ...) >>> v.restrict(support.get_support()) DictVectorizer() >>> v.get_feature_names_out() array(['bar', 'foo'], ...)",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_extraction\\_dict_vectorizer.py",
    "ast_data": "FunctionDef name:restrict arg:self arg:support arg:indices arguments arg arg arg Call If Assign Call Assign Assign For Assign Call Assign Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "flush",
    "source_code": "def flush(self):\n    self._session.run(self._flush_op)",
    "docstring": "Flushes the event file to disk. Call this method to make sure that all pending events have been written to disk.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\event_file_writer_v2.py",
    "ast_data": "FunctionDef name:flush arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_ref",
    "source_code": "def _ref(self):\n    return self._variable",
    "docstring": "Returns a reference to this variable. You usually do not need to call this method as all ops that need a reference to the variable call it automatically. Returns is a which holds a reference to the variable. You can assign a new value to the variable by passing the tensor to an assign op. See if you want to get the value of the variable. Returns: A that is a reference to the variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py",
    "ast_data": "FunctionDef name:_ref arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_make_plot_keywords",
    "source_code": "def _make_plot_keywords(self, kwds: dict[str, Any], y: np.ndarray) -> None:\n    kwds['bottom'] = self.bottom\n    kwds['bins'] = self.bins",
    "docstring": "merge BoxPlot/KdePlot properties to passed kwds",
    "type": "method",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\hist.py",
    "ast_data": "FunctionDef name:_make_plot_keywords arg:self arg:kwds arg:y arguments arg arg arg Assign Assign"
  },
  {
    "library": "numpy",
    "name": "get_pkg_info",
    "source_code": "def get_pkg_info(pkgname, dirs=None):\n    from numpy.distutils.npy_pkg_config import read_config\n    if dirs:\n        dirs.append(get_npy_pkg_dir())\n    else:\n        dirs = [get_npy_pkg_dir()]\n    return read_config(pkgname, dirs)",
    "docstring": "Return library info for the given package. Parameters ---------- pkgname : str Name of the package (should match the name of the .ini file, without the extension, e.g. foo for the file foo.ini). dirs : sequence, optional If given, should be a sequence of additional directories where to look for npy-pkg-config files. Those directories are searched prior to the NumPy directory. Returns ------- pkginfo : class instance The instance containing the build information. Raises ------ PkgNotFound If the package is not found. See Also -------- Configuration.add_npy_pkg_config, Configuration.add_installed_library, get_info",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\misc_util.py",
    "ast_data": "FunctionDef name:get_pkg_info arg:pkgname arg:dirs arguments arg arg If Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "identity",
    "source_code": "@classmethod\ndef identity(cls, batch_size: Optional[int]=None, device: Optional[Device]=None, dtype: Dtype=None) -> 'Quaternion':\n    data = tensor([1.0, 0.0, 0.0, 0.0], device=device, dtype=dtype)\n    if batch_size is not None:\n        data = data.repeat(batch_size, 1)\n    return cls(data)",
    "docstring": "Create a quaternion representing an identity rotation. Args: batch_size: the batch size of the underlying data. device: device to place the result on. dtype: dtype of the result. Example: >>> q = Quaternion.identity() >>> q.data Parameter containing: tensor([1., 0., 0., 0.], requires_grad=True)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\quaternion.py",
    "ast_data": "FunctionDef name:identity arg:cls arg:batch_size arg:device arg:dtype arguments arg arg arg arg Assign Call If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_capture_by_ref",
    "source_code": "def _capture_by_ref(self, graph: Any, lam: Callable[[], Any], key: Hashable=None) -> Any:\n    if key is not None and key in self._by_ref_internal:\n        return self._by_ref_internal[key]\n    if key is None:\n        key = len(self._by_ref_internal)\n        while key in self._by_ref_internal:\n            key += 1\n    value_nested = lam()\n    capture_trace_type = trace_type.from_value(value_nested)\n    ctx = trace_type.InternalPlaceholderContext(graph)\n    internal = capture_trace_type.placeholder_value(ctx)\n\n    def lam_fn():\n        value = lam()\n        return capture_trace_type.to_tensors(value)\n    self._by_ref_external[key] = lam_fn\n    self._by_ref_internal[key] = internal\n    self._by_ref_tracetype[key] = capture_trace_type\n    return self._by_ref_internal[key]",
    "docstring": "Used during tracing process to create/retrive by-ref captures. Args: graph: The FuncGraph that captures this tensor. lam: A callable that takes no arguments and returns tensor captures. key: A hashable identifier. Returns: Tensor from this FuncGraph.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\capture\\capture_container.py",
    "ast_data": "FunctionDef name:_capture_by_ref arg:self arg:graph arg:lam arg:key arguments arg arg arg arg If BoolOp Compare Compare Return return:yes If Compare Assign Call While Compare Assign Call Assign Call Assign Call Assign Call FunctionDef name:lam_fn arguments Assign Call Return return:yes Call Assign Assign Assign Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "ThreadManager",
    "source_code": "class ThreadManager(SimplePlugin):\n    threads = None\n    'A map of {thread ident: index number} pairs.'\n\n    def __init__(self, bus):\n        self.threads = {}\n        SimplePlugin.__init__(self, bus)\n        self.bus.listeners.setdefault('acquire_thread', set())\n        self.bus.listeners.setdefault('start_thread', set())\n        self.bus.listeners.setdefault('release_thread', set())\n        self.bus.listeners.setdefault('stop_thread', set())\n\n    def acquire_thread(self):\n        thread_ident = _thread.get_ident()\n        if thread_ident not in self.threads:\n            i = len(self.threads) + 1\n            self.threads[thread_ident] = i\n            self.bus.publish('start_thread', i)\n\n    def release_thread(self):\n        thread_ident = _thread.get_ident()\n        i = self.threads.pop(thread_ident, None)\n        if i is not None:\n            self.bus.publish('stop_thread', i)\n\n    def stop(self):\n        for thread_ident, i in self.threads.items():\n            self.bus.publish('stop_thread', i)\n        self.threads.clear()\n    graceful = stop",
    "docstring": "Manager for HTTP request threads. If you have control over thread creation and destruction, publish to the 'acquire_thread' and 'release_thread' channels (for each thread). This will register/unregister the current thread and publish to 'start_thread' and 'stop_thread' listeners in the bus as needed. If threads are created and destroyed by code you do not control (e.g., Apache), then, at the beginning of every HTTP request, publish to 'acquire_thread' only. You should not publish to 'release_thread' in this case, since you do not know whether the thread will be re-used or not. The bus will call 'stop_thread' listeners for you when it stops.",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\process\\plugins.py",
    "ast_data": "ClassDef name:ThreadManager Assign FunctionDef name:__init__ arg:self arg:bus arguments arg arg Assign Call Call Call Call Call Call Call Call Call FunctionDef name:acquire_thread arg:self arguments arg Assign Call If Compare Assign Call Assign Call FunctionDef name:release_thread arg:self arguments arg Assign Call Assign Call If Compare Call FunctionDef name:stop arg:self arguments arg For Call Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "RemoteVariable",
    "source_code": "class RemoteVariable(RemoteValueImpl):\n\n    def get(self):\n        self._wait_and_maybe_error()\n        return self._copy_to_local()",
    "docstring": "A RemoteValue that represents a mutable per-worker variable.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\values.py",
    "ast_data": "ClassDef name:RemoteVariable FunctionDef name:get arg:self arguments arg Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_dicts_to_zeros",
    "source_code": "def _dicts_to_zeros(pyval):\n    if isinstance(pyval, dict):\n        return 0\n    return [_dicts_to_zeros(x) for x in pyval]",
    "docstring": "Replaces dictionaries zeros in a pylist.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor.py",
    "ast_data": "FunctionDef name:_dicts_to_zeros arg:pyval arguments arg If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "@deprecation.deprecated(None, 'Queue-based input pipelines have been replaced by `tf.data`. Use `tf.data.TextLineDataset`.')\ndef __init__(self, skip_header_lines=None, name=None):\n    rr = gen_io_ops.text_line_reader_v2(skip_header_lines=skip_header_lines, name=name)\n    super(TextLineReader, self).__init__(rr)",
    "docstring": "Create a TextLineReader. Args: skip_header_lines: An optional int. Defaults to 0. Number of lines to skip from the beginning of every file. name: A name for the operation (optional).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\io_ops.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:skip_header_lines arg:name arguments arg arg arg Assign Call Call Call Call"
  },
  {
    "library": "scrapy",
    "name": "arg_to_iter",
    "source_code": "def arg_to_iter(arg: Any) -> Iterable[Any]:\n    if arg is None:\n        return []\n    if not isinstance(arg, _ITERABLE_SINGLE_VALUES) and hasattr(arg, '__iter__'):\n        return cast(Iterable[Any], arg)\n    return [arg]",
    "docstring": "Convert an argument to an iterable. The argument can be a None, single value, or an iterable. Exception: if arg is a dict, [arg] will be returned",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\misc.py",
    "ast_data": "FunctionDef name:arg_to_iter arg:arg arguments arg If Compare Return return:no If BoolOp Call Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "UnaryUnchangedStructureDataset",
    "source_code": "class UnaryUnchangedStructureDataset(UnaryDataset):\n\n    def __init__(self, input_dataset: DatasetV2, variant_tensor):\n        self._input_dataset = input_dataset\n        super(UnaryUnchangedStructureDataset, self).__init__(input_dataset, variant_tensor)\n\n    @property\n    def element_spec(self):\n        return self._input_dataset.element_spec",
    "docstring": "Represents a unary dataset with the same input and output structure.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "ClassDef name:UnaryUnchangedStructureDataset FunctionDef name:__init__ arg:self arg:input_dataset arg:variant_tensor arguments arg arg arg Assign Call Call FunctionDef name:element_spec arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "__len__",
    "source_code": "def __len__(self):\n    return self.geom_count",
    "docstring": "Return the number of geometries in this Geometry Collection.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:__len__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "exchange",
    "source_code": "@abc.abstractmethod\ndef exchange(self, peer_public_key: X25519PublicKey) -> bytes:\n    pass",
    "docstring": "Performs a key exchange operation using the provided peer's public key.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\x25519.py",
    "ast_data": "FunctionDef name:exchange arg:self arg:peer_public_key arguments arg arg"
  },
  {
    "library": "django",
    "name": "lookup_str",
    "source_code": "@cached_property\ndef lookup_str(self):\n    callback = self.callback\n    if isinstance(callback, functools.partial):\n        callback = callback.func\n    if hasattr(callback, 'view_class'):\n        callback = callback.view_class\n    elif not hasattr(callback, '__name__'):\n        return callback.__module__ + '.' + callback.__class__.__name__\n    return callback.__module__ + '.' + callback.__qualname__",
    "docstring": "A string that identifies the view (e.g. 'path.to.view_function' or 'path.to.ClassBasedView').",
    "type": "method",
    "file_path": "django\\django\\urls\\resolvers.py",
    "ast_data": "FunctionDef name:lookup_str arg:self arguments arg Assign If Call Assign If Call Assign If Call Return return:yes Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "name",
    "source_code": "@property\n@abc.abstractmethod\ndef name(self) -> str:\n    pass",
    "docstring": "A string naming this algorithm (e.g. \"sha256\", \"md5\").",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\hashes.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg"
  },
  {
    "library": "scipy",
    "name": "_fast_spmatrix_to_csc",
    "source_code": "def _fast_spmatrix_to_csc(A, hermitian=False):\n    if A.format == 'csr' and hermitian and (not np.issubdtype(A.dtype, np.complexfloating)):\n        return A.T\n    elif is_pydata_spmatrix(A):\n        return A\n    else:\n        return A.tocsc()",
    "docstring": "Convert sparse matrix to CSC (by transposing, if possible)",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_eigen\\arpack\\arpack.py",
    "ast_data": "FunctionDef name:_fast_spmatrix_to_csc arg:A arg:hermitian arguments arg arg If BoolOp Compare Call Return return:yes If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_broadcast_dynamic_shape_extended_complete",
    "source_code": "def _broadcast_dynamic_shape_extended_complete(a: DynamicRaggedShape, b: DynamicRaggedShape, b_rps: Sequence[RowPartition], c_suffix: Sequence[RowPartition], ac: Sequence[_LayerBroadcaster], bc_suffix: Sequence[_LayerBroadcaster]) -> Tuple[DynamicRaggedShape, _Broadcaster, _Broadcaster]:\n    c_prefix = b_rps[:-len(c_suffix)]\n    bc_prefix_length = b.rank - len(bc_suffix)\n    bc_prefix = [_LayerBroadcaster.get_identity_broadcaster(b._num_slices_in_dimension(i)) for i in range(bc_prefix_length)]\n    c_num_row_partitions = _get_broadcast_num_row_partitions(a, b)\n    c_raw = DynamicRaggedShape.from_row_partitions(c_prefix + tuple(c_suffix))\n    c = c_raw._with_num_row_partitions(c_num_row_partitions)\n    return (c, _Broadcaster(a, c, ac), _Broadcaster(b, c, bc_prefix + bc_suffix))",
    "docstring": "Helper for broadcast_dynamic_shape_extended.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:_broadcast_dynamic_shape_extended_complete arg:a arg:b arg:b_rps arg:c_suffix arg:ac arg:bc_suffix arguments arg arg arg arg arg arg Assign Call Assign Call Assign Call Call Call Assign Call Assign Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_patch_arc",
    "source_code": "def set_patch_arc(self, center, radius, theta1, theta2):\n    self._patch_type = 'arc'\n    self._center = center\n    self._width = radius * 2\n    self._height = radius * 2\n    self._theta1 = theta1\n    self._theta2 = theta2\n    self._path = mpath.Path.arc(theta1, theta2)\n    self.set_transform(self.axes.transAxes)\n    self.stale = True",
    "docstring": "Set the spine to be arc-like.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\spines.py",
    "ast_data": "FunctionDef name:set_patch_arc arg:self arg:center arg:radius arg:theta1 arg:theta2 arguments arg arg arg arg arg Assign Assign Assign Assign Assign Assign Assign Call Call Assign"
  },
  {
    "library": "django",
    "name": "convert_extent",
    "source_code": "def convert_extent(self, box):\n    if box is None:\n        return None\n    ll, ur = box[4:-1].split(',')\n    xmin, ymin = map(float, ll.split())\n    xmax, ymax = map(float, ur.split())\n    return (xmin, ymin, xmax, ymax)",
    "docstring": "Return a 4-tuple extent for the aggregate by converting the bounding box text returned by PostGIS ( argument), for example: \"BOX(-90.0 30.0, -85.0 40.0)\".",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\postgis\\operations.py",
    "ast_data": "FunctionDef name:convert_extent arg:self arg:box arguments arg arg If Compare Return return:no Assign Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "batch_2x2_Q",
    "source_code": "def batch_2x2_Q(m: Tensor) -> Tensor:\n    return batch_2x2_inv(batch_2x2_invQ(m), check_dets=True)",
    "docstring": "Returns Q of batch of 2x2 matrices.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\adalam\\utils.py",
    "ast_data": "FunctionDef name:batch_2x2_Q arg:m arguments arg Return return:yes Call Call"
  },
  {
    "library": "authlib",
    "name": "validate_id_token_signed_response_alg",
    "source_code": "def validate_id_token_signed_response_alg(self):\n    if self.get('id_token_signed_response_alg') == 'none' and 'id_token' in self.get('response_type', ''):\n        raise InvalidClaimError('id_token_signed_response_alg')\n    self.setdefault('id_token_signed_response_alg', 'RS256')\n    self._validate_claim_value('id_token_signed_response_alg')",
    "docstring": "JWS alg algorithm [JWA] REQUIRED for signing the ID Token issued to this Client. The value none MUST NOT be used as the ID Token alg value unless the Client uses only Response Types that return no ID Token from the Authorization Endpoint (such as when only using the Authorization Code Flow). The default, if omitted, is RS256. The public key for validating the signature is provided by retrieving the JWK Set referenced by the jwks_uri element from OpenID Connect Discovery 1.0 [OpenID.Discovery].",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\registration\\claims.py",
    "ast_data": "FunctionDef name:validate_id_token_signed_response_alg arg:self arguments arg If BoolOp Compare Call Compare Call Raise Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_convert_from_saved_model",
    "source_code": "def _convert_from_saved_model(self, graph_def):\n    self._save_conversion_params_metric(graph_def)\n    quant_mode = QuantizationMode(self.optimizations, self.target_spec, self.representative_dataset, graph_def, self._experimental_disable_per_channel, self.experimental_new_dynamic_range_quantizer, self._experimental_low_bit_qat, self._experimental_full_integer_quantization_bias_type, self._experimental_variable_quantization, self._experimental_strict_qdq)\n    self._validate_inference_input_output_types(quant_mode)\n    converter_kwargs = {'enable_tflite_resource_variables': self.experimental_enable_resource_variables}\n    converter_kwargs.update(self._get_base_converter_args())\n    converter_kwargs.update(quant_mode.converter_flags())\n    result = _convert_saved_model(**converter_kwargs)\n    return self._optimize_tflite_model(result, quant_mode, _build_conversion_flags(**converter_kwargs).debug_options, quant_io=self.experimental_new_quantizer)",
    "docstring": "Helper method that converts saved model. Args: graph_def: GraphDef object for the model, used only for stats. Returns: The converted TFLite model.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "FunctionDef name:_convert_from_saved_model arg:self arg:graph_def arguments arg arg Call Assign Call Call Assign Call Call Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "add",
    "source_code": "def add(a, b):\n    return _maybe_static(a) + _maybe_static(b)",
    "docstring": "A version of tf.add that eagerly evaluates if possible.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_utils.py",
    "ast_data": "FunctionDef name:add arg:a arg:b arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_device_from_device_id",
    "source_code": "def _get_device_from_device_id(device_id: Optional[Union[int, torch.device]], rank: int, device_handle: _FSDPDeviceHandle) -> Optional[torch.device]:\n    if device_id is None:\n        return None\n    device = device_id if isinstance(device_id, torch.device) else torch.device(device_id)\n    if device.type != 'cpu' and device.index is None:\n        warnings.warn(f'FSDP got the argument `device_id` {device_id} on rank {rank}, which does not have an explicit index. FSDP will use the current device {device_handle.current_device()}. If this is incorrect, please explicitly call `torch.{device.type}.set_device()` before FSDP initialization or pass in the explicit device index as the `device_id` argument.')\n        device = torch.device(device_handle.current_device())\n    return device",
    "docstring": "Return a ``.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_init_utils.py",
    "ast_data": "FunctionDef name:_get_device_from_device_id arg:device_id arg:rank arg:device_handle arguments arg arg arg If Compare Return return:no Assign Call Call If BoolOp Compare Compare Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, bymonth=None, bymonthday=1, interval=1, tz=None):\n    if bymonth is None:\n        bymonth = range(1, 13)\n    rule = rrulewrapper(MONTHLY, bymonth=bymonth, bymonthday=bymonthday, interval=interval, **self.hms0d)\n    super().__init__(rule, tz=tz)",
    "docstring": "Parameters ---------- bymonth : int or list of int, default: all months Ticks will be placed on every month in *bymonth*. Default is `~datetime.tzinfotimezonedateutil.tz`.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\dates.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:bymonth arg:bymonthday arg:interval arg:tz arguments arg arg arg arg arg If Compare Assign Call Assign Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_convert_to_stop",
    "source_code": "@classmethod\ndef _convert_to_stop(cls, stop_seq):\n    return map(cls._convert_to_color, stop_seq)",
    "docstring": "Convert ``. Returns ------- stop : list of openpyxl.styles.Color",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\excel\\_openpyxl.py",
    "ast_data": "FunctionDef name:_convert_to_stop arg:cls arg:stop_seq arguments arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "sample_lengths",
    "source_code": "def sample_lengths(num_rays: int, num_ray_points: int, device: Device, dtype: torch.dtype, irregular: bool=False) -> Tensor:\n    if num_ray_points <= 1:\n        raise ValueError('Number of ray points must be greater than 1')\n    if not irregular:\n        zero_to_one = torch.linspace(0.0, 1.0, num_ray_points, device=device, dtype=dtype)\n        lengths = zero_to_one.repeat(num_rays, 1)\n    else:\n        zero_to_one = torch.linspace(0.0, 1.0, num_ray_points + 1, device=device, dtype=dtype)\n        lengths = torch.rand(num_rays, num_ray_points, device=device) / num_ray_points + zero_to_one[:-1]\n    return lengths",
    "docstring": "Sample points along the length of rays.",
    "type": "function",
    "file_path": "kornia\\kornia\\nerf\\samplers.py",
    "ast_data": "FunctionDef name:sample_lengths arg:num_rays arg:num_ray_points arg:device arg:dtype arg:irregular arguments arg arg arg arg arg If Compare Raise Call If Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "nop",
    "source_code": "@make_boxed_compiler\ndef nop(fx_g: fx.GraphModule, _) -> Callable:\n    return fx_g",
    "docstring": "Returns the :attr: Fx graph module as it is. This is a no-op compiler and can be used to check accuracy. .. warning:: This API is experimental and likely to change.",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\compilers.py",
    "ast_data": "FunctionDef name:nop arg:fx_g arg:_ arguments arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "cutlass_key",
    "source_code": "@torch_key_cache\ndef cutlass_key() -> bytes:\n    if config.is_fbcode():\n        with importlib.resources.path('cutlass', 'src_hash.txt') as resource_path:\n            with open(resource_path) as resource_file:\n                return resource_file.read().encode()\n    combined_hash = hashlib.sha256()\n    build_code_hash([config.cuda.cutlass_dir], '', combined_hash)\n    return combined_hash.digest()",
    "docstring": "Compute a key representing the state of the CUTLASS library. Note: OSS and fbcode will have different keys.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\codecache.py",
    "ast_data": "FunctionDef name:cutlass_key arguments If Call With Call With Call Return return:yes Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "unbatch",
    "source_code": "def unbatch(self, spec):\n\n    def unbatch_field(f):\n        if isinstance(f, type_spec.BatchableTypeSpec):\n            return f.__batch_encoder__.unbatch(f)\n        elif isinstance(f, tensor_shape.TensorShape):\n            return f[1:]\n        else:\n            return f\n    fields = tuple(spec.__dict__.items())\n    unbatched_fields = nest.map_structure(unbatch_field, fields)\n    return _create_object_from_type_and_dict(type(spec), unbatched_fields)",
    "docstring": "Returns the TypeSpec for a single unbatched element in . The default definition returns a that is equal to , except that the outermost axis is removed from every nested , and field. Subclasses may override this default definition, when necessary. Args: spec: The for a batch of values. Returns: A for an individual value.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type.py",
    "ast_data": "FunctionDef name:unbatch arg:self arg:spec arguments arg arg FunctionDef name:unbatch_field arg:f arguments arg If Call Return return:yes Call If Call Return return:yes Return return:yes Assign Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "to_typst",
    "source_code": "@Substitution(buf=buffering_args, encoding=encoding_args)\ndef to_typst(self, buf: FilePath | WriteBuffer[str] | None=None, *, encoding: str | None=None, sparse_index: bool | None=None, sparse_columns: bool | None=None, max_rows: int | None=None, max_columns: int | None=None) -> str | None:\n    obj = self._copy(deepcopy=True)\n    if sparse_index is None:\n        sparse_index = get_option('styler.sparse.index')\n    if sparse_columns is None:\n        sparse_columns = get_option('styler.sparse.columns')\n    text = obj._render_typst(sparse_columns=sparse_columns, sparse_index=sparse_index, max_rows=max_rows, max_cols=max_columns)\n    return save_to_buffer(text, buf=buf, encoding=encoding if buf is not None else None)",
    "docstring": "Write Styler to a file, buffer or string in Typst format. .. versionadded:: 3.0.0 Parameters ---------- %(buf)s %(encoding)s sparse_index : bool, optional Whether to sparsify the display of a hierarchical index. Setting to False will display each explicit level element in a hierarchical key for each row. Defaults to `bufNone`. See Also -------- DataFrame.to_typst : Write a DataFrame to a file, buffer or string in Typst format. Examples -------- >>> df = pd.DataFrame({\"A\": [1, 2], \"B\": [3, 4]}) >>> df.style.to_typst() # doctest: +SKIP .. code-block:: typst #table( columns: 3, [], [A], [B], [0], [1], [3], [1], [2], [4], )",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\style.py",
    "ast_data": "FunctionDef name:to_typst arg:self arg:buf arguments arg arg arg arg arg arg arg Assign Call If Compare Assign Call If Compare Assign Call Assign Call Return return:yes Call Compare Call"
  },
  {
    "library": "scikit-learn",
    "name": "_check_weights",
    "source_code": "def _check_weights(weights, n_components):\n    weights = check_array(weights, dtype=[np.float64, np.float32], ensure_2d=False)\n    _check_shape(weights, (n_components,), 'weights')\n    if any(np.less(weights, 0.0)) or any(np.greater(weights, 1.0)):\n        raise ValueError(\"The parameter 'weights' should be in the range [0, 1], but got max value %.5f, min value %.5f\" % (np.min(weights), np.max(weights)))\n    atol = 1e-06 if weights.dtype == np.float32 else 1e-08\n    if not np.allclose(np.abs(1.0 - np.sum(weights)), 0.0, atol=atol):\n        raise ValueError(\"The parameter 'weights' should be normalized, but got sum(weights) = %.5f\" % np.sum(weights))\n    return weights",
    "docstring": "Check the user provided 'weights'. Parameters ---------- weights : array-like of shape (n_components,) The proportions of components of each mixture. n_components : int Number of components. Returns ------- weights : array, shape (n_components,)",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\mixture\\_gaussian_mixture.py",
    "ast_data": "FunctionDef name:_check_weights arg:weights arg:n_components arguments arg arg Assign Call Call If BoolOp Call Call Call Call Raise Call Call Call Assign Compare If Call Call Call Raise Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "call_flat",
    "source_code": "def call_flat(self, *args: core.Tensor) -> Sequence[core.Tensor]:\n    expected_len = len(self.cached_definition.signature.input_arg)\n    if len(args) != expected_len:\n        raise ValueError(f'Signature specifies {expected_len} arguments, got: {len(args)}. Expected inputs: {self.cached_definition.signature.input_arg}. Received inputs: {args}. Function Type: {self.function_type!r}')\n    with InterpolateRuntimeError(self):\n        with ops.control_dependencies(self._call_options.control_captures):\n            with record.stop_recording():\n                if self._bound_context.executing_eagerly():\n                    outputs = self._bound_context.call_function(self.name, list(args), len(self.function_type.flat_outputs))\n                else:\n                    outputs = make_call_op_in_graph(self, list(args), self._bound_context.function_call_options.as_attrs())\n    for i, output_type in enumerate(self.function_type.flat_outputs):\n        handle_data = output_type.dtype._handle_data\n        if handle_data:\n            handle_data_util.set_handle_data(outputs[i], handle_data.shape_inference)\n    if not self._bound_context.executing_eagerly():\n        for i, output_type in enumerate(self.function_type.flat_outputs):\n            outputs[i].set_shape(output_type.shape)\n    return outputs",
    "docstring": "Calls with flat tensor inputs and returns flat tensor outputs. Args: *args: arguments to call this function with. Returns: The outputs of the function call. Raises: ValueError: if the number of arguments is incorrect. FunctionAlreadyGarbageCollectedError: if the function is no longer available to be called because it has been garbage collected.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\atomic_function.py",
    "ast_data": "FunctionDef name:call_flat arg:self arguments arg arg Assign Call If Compare Call Raise Call Call With Call With Call With Call If Call Assign Call Call Call Assign Call Call Call For Call Assign If Call If Call For Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "environment",
    "source_code": "@property\ndef environment(self):\n    return ''",
    "docstring": "Returns the current environment which TensorFlow is running in.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\tpu\\tpu_cluster_resolver.py",
    "ast_data": "FunctionDef name:environment arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_dense_tensor",
    "source_code": "def get_dense_tensor(self, transformation_cache, state_manager):\n    return transformation_cache.get(self, state_manager)",
    "docstring": "Returns dense representing numeric feature. Args: transformation_cache: A object to access features. state_manager: A to create / access resources such as lookup tables. Returns: Dense created within .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:get_dense_tensor arg:self arg:transformation_cache arg:state_manager arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_maybe_convert_i8",
    "source_code": "def _maybe_convert_i8(self, key):\n    if is_list_like(key):\n        key = ensure_index(key)\n        key = maybe_upcast_numeric_to_64bit(key)\n    if not self._needs_i8_conversion(key):\n        return key\n    scalar = is_scalar(key)\n    key_dtype = getattr(key, 'dtype', None)\n    if isinstance(key_dtype, IntervalDtype) or isinstance(key, Interval):\n        left = self._maybe_convert_i8(key.left)\n        right = self._maybe_convert_i8(key.right)\n        constructor = Interval if scalar else IntervalIndex.from_arrays\n        return constructor(left, right, closed=self.closed)\n    if scalar:\n        key_dtype, key_i8 = infer_dtype_from_scalar(key)\n        if isinstance(key, Period):\n            key_i8 = key.ordinal\n        elif isinstance(key_i8, Timestamp):\n            key_i8 = key_i8._value\n        elif isinstance(key_i8, (np.datetime64, np.timedelta64)):\n            key_i8 = key_i8.view('i8')\n    else:\n        key_dtype, key_i8 = (key.dtype, Index(key.asi8))\n        if key.hasnans:\n            key_i8 = key_i8.where(~key._isnan)\n    subtype = self.dtype.subtype\n    if subtype != key_dtype:\n        raise ValueError(f'Cannot index an IntervalIndex of subtype {subtype} with values of dtype {key_dtype}')\n    return key_i8",
    "docstring": "Maybe convert a given key to its equivalent i8 value(s). Used as a preprocessing step prior to IntervalTree queries (self._engine), which expects numeric data. Parameters ---------- key : scalar or list-like The key that should maybe be converted to i8. Returns ------- scalar or list-like The original key if no conversion occurred, int if converted scalar, Index with an int64 dtype if converted list-like.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\interval.py",
    "ast_data": "FunctionDef name:_maybe_convert_i8 arg:self arg:key arguments arg arg If Call Assign Call Assign Call If Call Return return:yes Assign Call Assign Call If BoolOp Call Call Assign Call Assign Call Assign Return return:yes Call If Assign Call If Call Assign If Call Assign If Call Assign Call Assign Call If Assign Call Assign If Compare Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "LazyConv3d",
    "source_code": "class LazyConv3d(_LazyConvXdMixin, Conv3d):\n    cls_to_become = Conv3d\n\n    def __init__(self, out_channels: int, kernel_size: _size_3_t, stride: _size_3_t=1, padding: _size_3_t=0, dilation: _size_3_t=1, groups: int=1, bias: bool=True, padding_mode: str='zeros', device=None, dtype=None) -> None:\n        factory_kwargs = {'device': device, 'dtype': dtype}\n        super().__init__(0, 0, kernel_size, stride, padding, dilation, groups, False, padding_mode, **factory_kwargs)\n        self.weight = UninitializedParameter(**factory_kwargs)\n        self.out_channels = out_channels\n        if bias:\n            self.bias = UninitializedParameter(**factory_kwargs)\n\n    def _get_num_spatial_dims(self) -> int:\n        return 3",
    "docstring": "A :class: module with lazy initialization of the `Conv3dweightbiastorch.nn.modules.lazy.LazyModuleMixintorch.nn.Conv3dtorch.nn.modules.lazy.LazyModuleMixin`",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\conv.py",
    "ast_data": "ClassDef name:LazyConv3d Assign FunctionDef name:__init__ arg:self arg:out_channels arg:kernel_size arg:stride arg:padding arg:dilation arg:groups arg:bias arg:padding_mode arg:device arg:dtype arguments arg arg arg arg arg arg arg arg arg arg arg Assign Call Call Assign Call Assign If Assign Call FunctionDef name:_get_num_spatial_dims arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "set_custom_getter",
    "source_code": "def set_custom_getter(self, custom_getter):\n    self._custom_getter = custom_getter",
    "docstring": "Set custom getter for this scope.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variable_scope.py",
    "ast_data": "FunctionDef name:set_custom_getter arg:self arg:custom_getter arguments arg arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "get_config",
    "source_code": "def get_config(self):\n    config = dict(zip(self._fields, self))\n    config['dtype'] = self.dtype.name\n    return config",
    "docstring": "See 'FeatureColumn` base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\sequence_feature_column.py",
    "ast_data": "FunctionDef name:get_config arg:self arguments arg Assign Call Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_wrap",
    "source_code": "def set_wrap(self, wrap):\n    self._wrap = wrap",
    "docstring": "Set whether the text can be wrapped. Wrapping makes sure the text is confined to the (sub)figure box. It does not take into account any other artists. Parameters ---------- wrap : bool Notes ----- Wrapping does not work together with `` in IPython/Jupyter). The 'tight' setting rescales the canvas to accommodate all content and happens before wrapping.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:set_wrap arg:self arg:wrap arguments arg arg Assign"
  },
  {
    "library": "django",
    "name": "encode",
    "source_code": "def encode(self, session_dict):\n    return signing.dumps(session_dict, salt=self.key_salt, serializer=self.serializer, compress=True)",
    "docstring": "Return the given session dictionary serialized and encoded as a string.",
    "type": "method",
    "file_path": "django\\django\\contrib\\sessions\\backends\\base.py",
    "ast_data": "FunctionDef name:encode arg:self arg:session_dict arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_axis_gather",
    "source_code": "def _axis_gather(params, indices, axis):\n    if axis > 1:\n        if not isinstance(params, ragged_tensor.RaggedTensor):\n            params = ragged_tensor.RaggedTensor.from_tensor(params, ragged_rank=1, row_splits_dtype=indices.row_splits.dtype)\n        return params.with_values(_gather(params.values, indices, axis - 1, 0))\n    if indices.shape.rank is None:\n        raise ValueError('rank(indices) must be known statically')\n    assert axis == 1\n    flat_params = _flatten_dims_0_and_1(params)\n    adjustments = _row_starts(params, indices.dtype)\n    adjustments = _increase_rank_to(adjustments, indices.shape.ndims + 1)\n    adjusted_indices = indices + adjustments\n    return _gather(flat_params, adjusted_indices, axis - 1, 0)",
    "docstring": "Helper that implements ragged gather when axis>0 and batch_dims==0. Args: params: The tensor from which to gather values. indices: The indices of values to gather. axis: The axis in to gather from. Returns: A potentially ragged tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_gather_ops.py",
    "ast_data": "FunctionDef name:_axis_gather arg:params arg:indices arg:axis arguments arg arg arg If Compare If Call Assign Call Return return:yes Call Call If Compare Raise Call Compare Assign Call Assign Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "is_user_opted_in",
    "source_code": "def is_user_opted_in(user: str, user_optins: UserOptins, experiment_name: str) -> bool:\n    return experiment_name in user_optins.get(user, [])",
    "docstring": "Check if a user is opted into an experiment",
    "type": "function",
    "file_path": "pytorch\\.github\\scripts\\runner_determinator.py",
    "ast_data": "FunctionDef name:is_user_opted_in arg:user arg:user_optins arg:experiment_name arguments arg arg arg Return return:yes Compare Call"
  },
  {
    "library": "django",
    "name": "valid_value",
    "source_code": "def valid_value(self, value):\n    text_value = str(value)\n    for k, v in self.choices:\n        if isinstance(v, (list, tuple)):\n            for k2, v2 in v:\n                if value == k2 or text_value == str(k2):\n                    return True\n        elif value == k or text_value == str(k):\n            return True\n    return False",
    "docstring": "Check to see if the provided value is a valid choice.",
    "type": "method",
    "file_path": "django\\django\\forms\\fields.py",
    "ast_data": "FunctionDef name:valid_value arg:self arg:value arguments arg arg Assign Call For If Call For If BoolOp Compare Compare Call Return return:yes If BoolOp Compare Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_reconstruct_sequence_inputs",
    "source_code": "def _reconstruct_sequence_inputs(op_def, inputs, attrs) -> list[Union[tensor_lib.Tensor, list[tensor_lib.Tensor]]]:\n    grouped_inputs = []\n    i = 0\n    for input_arg in op_def.input_arg:\n        if input_arg.number_attr:\n            input_len = attrs[input_arg.number_attr].i\n            is_sequence = True\n        elif input_arg.type_list_attr:\n            input_len = len(attrs[input_arg.type_list_attr].list.type)\n            is_sequence = True\n        else:\n            input_len = 1\n            is_sequence = False\n        if is_sequence:\n            grouped_inputs.append(inputs[i:i + input_len])\n        else:\n            grouped_inputs.append(inputs[i])\n        i += input_len\n    assert i == len(inputs)\n    return grouped_inputs",
    "docstring": "Regroups a flat list of input tensors into scalar and sequence inputs. Args: op_def: The (for knowing the input types) inputs: a list of input s to the op. attrs: mapping from attr name to (these define how long each sequence is) Returns: A list of s (corresponding to scalar inputs) and lists of s (corresponding to sequence inputs).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_reconstruct_sequence_inputs arg:op_def arg:inputs arg:attrs arguments arg arg arg Assign Assign For If Assign Assign If Assign Call Assign Assign Assign If Call Call Compare Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "request_fingerprint",
    "source_code": "def request_fingerprint(self, request: Request) -> str:\n    return self.fingerprinter.fingerprint(request).hex()",
    "docstring": "Returns a string that uniquely identifies the specified request.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\dupefilters.py",
    "ast_data": "FunctionDef name:request_fingerprint arg:self arg:request arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "efficientvit_backbone_b3",
    "source_code": "def efficientvit_backbone_b3(**kwargs: dict[str, Any]) -> EfficientViTBackbone:\n    backbone = EfficientViTBackbone(width_list=[32, 64, 128, 256, 512], depth_list=[1, 4, 6, 6, 9], dim=32, **build_kwargs_from_config(kwargs, EfficientViTBackbone))\n    return backbone",
    "docstring": "Create EfficientViT B3.",
    "type": "function",
    "file_path": "kornia\\kornia\\contrib\\models\\efficient_vit\\backbone.py",
    "ast_data": "FunctionDef name:efficientvit_backbone_b3 arguments arg Assign Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "objects_to_datetime64",
    "source_code": "def objects_to_datetime64(data: np.ndarray, dayfirst, yearfirst, utc: bool=False, errors: DateTimeErrorChoices='raise', allow_object: bool=False, out_unit: str | None=None) -> tuple[np.ndarray, tzinfo | None]:\n    assert errors in ['raise', 'coerce']\n    data = np.asarray(data, dtype=np.object_)\n    result, tz_parsed = tslib.array_to_datetime(data, errors=errors, utc=utc, dayfirst=dayfirst, yearfirst=yearfirst, creso=abbrev_to_npy_unit(out_unit))\n    if tz_parsed is not None:\n        return (result, tz_parsed)\n    elif result.dtype.kind == 'M':\n        return (result, tz_parsed)\n    elif result.dtype == object:\n        if allow_object:\n            return (result, tz_parsed)\n        raise TypeError('DatetimeIndex has mixed timezones')\n    else:\n        raise TypeError(result)",
    "docstring": "Convert data to array of timestamps. Parameters ---------- data : np.ndarray[object] dayfirst : bool yearfirst : bool utc : bool, default False Whether to convert/localize timestamps to UTC. errors : {'raise', 'coerce'} allow_object : bool Whether to return an object-dtype ndarray instead of raising if the data contains more than one timezone. out_unit : str or None, default None None indicates we should do resolution inference. Returns ------- result : ndarray np.datetime64[out_unit] if returned values represent wall times or UTC timestamps. object if mixed timezones inferred_tz : tzinfo or None If not None, then the datetime64 values in denote UTC timestamps. Raises ------ ValueError : if data cannot be converted to datetimes TypeError : When a type cannot be converted to datetime",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimes.py",
    "ast_data": "FunctionDef name:objects_to_datetime64 arg:data arg:dayfirst arg:yearfirst arg:utc arg:errors arg:allow_object arg:out_unit arguments arg arg arg arg arg arg arg Compare Assign Call Assign Call Call If Compare Return return:yes If Compare Return return:yes If Compare If Return return:yes Raise Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_post_forward",
    "source_code": "@no_type_check\ndef _post_forward(state: _FSDPState, handle: Optional[FlatParamHandle], reshard_fn: Callable, module: nn.Module, input: Any, output: Any) -> Any:\n    with torch.profiler.record_function('FullyShardedDataParallel._post_forward'):\n        if handle and handle._training_state == HandleTrainingState.BACKWARD_PRE:\n            return output\n        state._exec_order_data.record_post_forward(handle)\n        if reshard_fn is not None:\n            reshard_fn(state, handle)\n        output = _register_pre_backward_hooks(state, module, output, handle)\n        state.training_state = TrainingState.IDLE\n        if handle:\n            handle._training_state = HandleTrainingState.IDLE\n        return output",
    "docstring": "Runs the post-forward logic. This includes an opportunity to reshard currently unsharded parameters such as those used in the current forward and registering pre-backward hooks on the forward outputs. Args: handles (List[FlatParamHandle]): Handles giving the parameters used in the current forward. reshard_fn (Optional[Callable]): A callable to reshard any currently unsharded parameters (e.g. from the current forward) or `` 's data points to the sharded flat parameter.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_runtime_utils.py",
    "ast_data": "FunctionDef name:_post_forward arg:state arg:handle arg:reshard_fn arg:module arg:input arg:output arguments arg arg arg arg arg arg With Call If BoolOp Compare Return return:yes Call If Compare Call Assign Call Assign If Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "nodes_count",
    "source_code": "def nodes_count(nodes: list[torch.fx.Node], node_call_back) -> int:\n    return len(nodes_filter(nodes, node_call_back))",
    "docstring": "Returns the number of nodes that match the node_call_back.",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\utils.py",
    "ast_data": "FunctionDef name:nodes_count arg:nodes arg:node_call_back arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "get_form_class",
    "source_code": "def get_form_class(self):\n    if self.fields is not None and self.form_class:\n        raise ImproperlyConfigured(\"Specifying both 'fields' and 'form_class' is not permitted.\")\n    if self.form_class:\n        return self.form_class\n    else:\n        if self.model is not None:\n            model = self.model\n        elif getattr(self, 'object', None) is not None:\n            model = self.object.__class__\n        else:\n            model = self.get_queryset().model\n        if self.fields is None:\n            raise ImproperlyConfigured(\"Using ModelFormMixin (base class of %s) without the 'fields' attribute is prohibited.\" % self.__class__.__name__)\n        return model_forms.modelform_factory(model, fields=self.fields)",
    "docstring": "Return the form class to use in this view.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\edit.py",
    "ast_data": "FunctionDef name:get_form_class arg:self arguments arg If BoolOp Compare Raise Call If Return return:yes If Compare Assign If Compare Call Assign Assign Call If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "axes",
    "source_code": "@property\ndef axes(self):\n    return self._axes",
    "docstring": "The instance the artist resides in, or *None*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:axes arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_insert_obs_or_fq",
    "source_code": "def _insert_obs_or_fq(node: Node, obs_or_fq: ObserverOrFakeQuantize, model: torch.nn.Module, named_modules: dict[str, torch.nn.Module], graph: Graph) -> Node:\n    model_device = assert_and_get_unique_device(model)\n    if model_device:\n        obs_or_fq.to(model_device)\n    if is_equalization_observer(obs_or_fq):\n        prefix = node.name + '_equalization_process_'\n    else:\n        prefix = 'activation_post_process_'\n    get_new_obs_or_fq_name = get_new_attr_name_with_prefix(prefix)\n    obs_or_fq_name = get_new_obs_or_fq_name(model)\n    setattr(model, obs_or_fq_name, obs_or_fq)\n    named_modules[obs_or_fq_name] = obs_or_fq\n    with graph.inserting_after(node):\n        new_obs = graph.create_node('call_module', obs_or_fq_name, (node,), {})\n    return new_obs",
    "docstring": "Attaches to , and creates a node which calls on the output of . obs_or_fq: an instance of Observer or FakeQuantize module",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\prepare.py",
    "ast_data": "FunctionDef name:_insert_obs_or_fq arg:node arg:obs_or_fq arg:model arg:named_modules arg:graph arguments arg arg arg arg arg Assign Call If Call If Call Assign Assign Assign Call Assign Call Call Assign With Call Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "CCompiler_object_filenames",
    "source_code": "def CCompiler_object_filenames(self, source_filenames, strip_dir=0, output_dir=''):\n    if output_dir is None:\n        output_dir = ''\n    obj_names = []\n    for src_name in source_filenames:\n        base, ext = os.path.splitext(os.path.normpath(src_name))\n        base = os.path.splitdrive(base)[1]\n        base = base[os.path.isabs(base):]\n        if base.startswith('..'):\n            i = base.rfind('..') + 2\n            d = base[:i]\n            d = os.path.basename(os.path.abspath(d))\n            base = d + base[i:]\n        if ext not in self.src_extensions:\n            raise UnknownFileError(\"unknown file type '%s' (from '%s')\" % (ext, src_name))\n        if strip_dir:\n            base = os.path.basename(base)\n        obj_name = os.path.join(output_dir, base + self.obj_extension)\n        obj_names.append(obj_name)\n    return obj_names",
    "docstring": "Return the name of the object files for the given source files. Parameters ---------- source_filenames : list of str The list of paths to source files. Paths can be either relative or absolute, this is handled transparently. strip_dir : bool, optional Whether to strip the directory from the returned paths. If True, the file name prepended by is returned. Default is False. output_dir : str, optional If given, this path is prepended to the returned paths to the object files. Returns ------- obj_names : list of str The list of paths to the object files corresponding to the source files in .",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\ccompiler.py",
    "ast_data": "FunctionDef name:CCompiler_object_filenames arg:self arg:source_filenames arg:strip_dir arg:output_dir arguments arg arg arg arg If Compare Assign Assign For Assign Call Call Assign Call Assign Call If Call Assign Call Assign Assign Call Call Assign If Compare Raise Call If Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n    random_state = check_random_state(self.random_state)\n    X = validate_data(self, X)\n    self.mean_ = X.mean(axis=0)\n    X = X - self.mean_\n    if self.n_components is None:\n        n_components = X.shape[1]\n    else:\n        n_components = self.n_components\n    return self._fit(X, n_components, random_state)",
    "docstring": "Fit the model from data in X. Parameters ---------- X : array-like of shape (n_samples, n_features) Training vector, where is the number of samples and is the number of features. y : Ignored Not used, present here for API consistency by convention. Returns ------- self : object Returns the instance itself.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_sparse_pca.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Assign Call Assign Call Assign Call Assign If Compare Assign Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "task_type",
    "source_code": "@task_type.setter\ndef task_type(self, task_type):\n    self._task_type = task_type",
    "docstring": "Setter of property. See property doc.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\cluster_resolver.py",
    "ast_data": "FunctionDef name:task_type arg:self arg:task_type arguments arg arg Assign"
  },
  {
    "library": "scipy",
    "name": "set_integrator",
    "source_code": "def set_integrator(self, name, **integrator_params):\n    integrator = find_integrator(name)\n    if integrator is None:\n        message = f'No integrator name match with {name!r} or is not available.'\n        warnings.warn(message, stacklevel=2)\n    else:\n        self._integrator = integrator(**integrator_params)\n        if not len(self._y):\n            self.t = 0.0\n            self._y = array([0.0], self._integrator.scalar)\n        self._integrator.reset(len(self._y), self.jac is not None)\n    return self",
    "docstring": "Set integrator by name. Parameters ---------- name : str Name of the integrator. **integrator_params Additional parameters for the integrator.",
    "type": "method",
    "file_path": "scipy\\scipy\\integrate\\_ode.py",
    "ast_data": "FunctionDef name:set_integrator arg:self arg:name arguments arg arg arg Assign Call If Compare Assign Call Assign Call If Call Assign Assign Call Call Call Compare Return return:yes"
  },
  {
    "library": "kornia",
    "name": "trans",
    "source_code": "@classmethod\ndef trans(cls, x: Tensor, y: Tensor, z: Tensor) -> Se3:\n    KORNIA_CHECK(x.shape == y.shape)\n    KORNIA_CHECK(y.shape == z.shape)\n    KORNIA_CHECK_SAME_DEVICES([x, y, z])\n    batch_size = x.shape[0] if len(x.shape) > 0 else None\n    rotation = So3.identity(batch_size, x.device, x.dtype)\n    return cls(rotation, stack((x, y, z), -1))",
    "docstring": "Construct a translation only Se3 instance. Args: x: the x-axis translation. y: the y-axis translation. z: the z-axis translation.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\se3.py",
    "ast_data": "FunctionDef name:trans arg:cls arg:x arg:y arg:z arguments arg arg arg arg Call Compare Call Compare Call Assign Compare Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "close",
    "source_code": "def close(self):\n    if self._file is not None:\n        self._file.finalize()\n        self._file.close()\n        self._file = None",
    "docstring": "Finalize this object, making the underlying file a complete PDF file.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py",
    "ast_data": "FunctionDef name:close arg:self arguments arg If Compare Call Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "set_max",
    "source_code": "def set_max(self, max):\n    self.set_val((self.val[0], max))",
    "docstring": "Set the lower value of the slider to *max*. Parameters ---------- max : float",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:set_max arg:self arg:max arguments arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "model_is_exported",
    "source_code": "def model_is_exported(m: torch.nn.Module) -> bool:\n    return isinstance(m, torch.fx.GraphModule) and any(('val' in n.meta for n in m.graph.nodes))",
    "docstring": "Return True if the was exported, False otherwise (e.g. if the model was FX symbolically traced or not traced at all).",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\export_utils.py",
    "ast_data": "FunctionDef name:model_is_exported arg:m arguments arg Return return:yes BoolOp Call Call Compare"
  },
  {
    "library": "pytorch",
    "name": "get_hash",
    "source_code": "def get_hash(self) -> bytes:\n    if self._is_dirty or self._hash_digest is None:\n        dict_to_hash = self._get_dict(ignored_keys=list(self._compile_ignored_keys))\n        string_to_hash = repr(sorted(dict_to_hash.items()))\n        self._hash_digest = hashlib.md5(string_to_hash.encode('utf-8'), usedforsecurity=False).digest()\n        self._is_dirty = False\n    return self._hash_digest",
    "docstring": "Hashes the configs that are not compile_ignored",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\_config_module.py",
    "ast_data": "FunctionDef name:get_hash arg:self arguments arg If BoolOp Compare Assign Call Call Assign Call Call Call Assign Call Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "call_module",
    "source_code": "@compatibility(is_backward_compatible=True)\ndef call_module(self, m: torch.nn.Module, forward: Callable[..., Any], args: tuple[Any, ...], kwargs: dict[str, Any]) -> Any:\n    module_qualified_name = self.path_of_module(m)\n    with ScopeContextManager(self.scope, Scope(module_qualified_name, type(m))) as _scope:\n        num_calls = self.num_calls.get(module_qualified_name, 0)\n        module_key = f'{_scope.module_path}@{num_calls}' if num_calls > 0 else _scope.module_path\n        self.module_stack[module_key] = (module_qualified_name, _scope.module_type)\n        self.num_calls[module_qualified_name] = num_calls + 1\n        if not self.is_leaf_module(m, module_qualified_name):\n            ret_val = forward(*args, **kwargs)\n        else:\n            ret_val = self.create_proxy('call_module', module_qualified_name, args, kwargs)\n        key, _ = self.module_stack.popitem(last=True)\n        assert key == module_key, f' Unexpected key {key}'\n    return ret_val",
    "docstring": "Method that specifies the behavior of this `` invocation.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\_symbolic_trace.py",
    "ast_data": "FunctionDef name:call_module arg:self arg:m arg:forward arg:args arg:kwargs arguments arg arg arg arg arg Assign Call With Call Call Call Assign Call Assign Compare Assign Assign If Call Assign Call Assign Call Assign Call Compare Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_main_version",
    "source_code": "def get_main_version(version=None):\n    version = get_complete_version(version)\n    parts = 2 if version[2] == 0 else 3\n    return '.'.join((str(x) for x in version[:parts]))",
    "docstring": "Return main version (X.Y[.Z]) from VERSION.",
    "type": "function",
    "file_path": "django\\django\\utils\\version.py",
    "ast_data": "FunctionDef name:get_main_version arg:version arguments arg Assign Call Assign Compare Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_fft_helper",
    "source_code": "def _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft, sides):\n    if nperseg == 1 and noverlap == 0:\n        result = x[..., np.newaxis]\n    else:\n        step = nperseg - noverlap\n        result = np.lib.stride_tricks.sliding_window_view(x, window_shape=nperseg, axis=-1, writeable=True)\n        result = result[..., 0::step, :]\n    result = detrend_func(result)\n    result = win * result\n    if sides == 'twosided':\n        func = sp_fft.fft\n    else:\n        result = result.real\n        func = sp_fft.rfft\n    result = func(result, n=nfft)\n    return result",
    "docstring": "Calculate windowed FFT, for internal use by . .. legacy:: function This function is solely used by the legacy function, which is located also in this file. This is a helper function that does the main FFT calculation for . All input validation is performed there, and the data axis is assumed to be the last axis of x. It is not designed to be called externally. The windows are not averaged over; the result from each window is returned. Returns ------- result : ndarray Array of FFT data Notes ----- Adapted from matplotlib.mlab .. versionadded:: 0.16.0",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_spectral_py.py",
    "ast_data": "FunctionDef name:_fft_helper arg:x arg:win arg:detrend_func arg:nperseg arg:noverlap arg:nfft arg:sides arguments arg arg arg arg arg arg arg If BoolOp Compare Compare Assign Assign Assign Call Assign Assign Call Assign If Compare Assign Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "FxNetMinimizerResultMismatchError",
    "source_code": "@compatibility(is_backward_compatible=False)\nclass FxNetMinimizerResultMismatchError(Exception):\n    pass",
    "docstring": "Raised if comparing function thinks the results are mismatching.",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\passes\\net_min_base.py",
    "ast_data": "ClassDef name:FxNetMinimizerResultMismatchError Call"
  },
  {
    "library": "pytorch",
    "name": "summarize",
    "source_code": "def summarize(self) -> str:\n    sections = [self.title, self.description or '', self.setup_str()]\n    return '\\n'.join([f'{i}\\n' if '\\n' in i else i for i in sections if i])",
    "docstring": "Build TaskSpec portion of repr string for other containers.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\benchmark\\utils\\common.py",
    "ast_data": "FunctionDef name:summarize arg:self arguments arg Assign BoolOp Call Return return:yes Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "_scratch_graph",
    "source_code": "@tf_contextlib.contextmanager\ndef _scratch_graph(graph=None):\n    global _CURRENT_SCRATCH_GRAPH\n    scratch_graph = getattr(_CURRENT_SCRATCH_GRAPH, 'graph', None)\n    if scratch_graph is not None and graph is not None and (scratch_graph is not graph):\n        raise ValueError('Multiple scratch graphs specified.')\n    if scratch_graph:\n        yield scratch_graph\n        return\n    graph = graph or func_graph.FuncGraph('keras_scratch_graph')\n    try:\n        _CURRENT_SCRATCH_GRAPH.graph = graph\n        yield graph\n    finally:\n        _CURRENT_SCRATCH_GRAPH.graph = None",
    "docstring": "Retrieve a shared and temporary func graph. The eager execution path lifts a subgraph from the keras global graph into a scratch graph in order to create a function. DistributionStrategies, in turn, constructs multiple functions as well as a final combined function. In order for that logic to work correctly, all of the functions need to be created on the same scratch FuncGraph. Args: graph: A graph to be used as the current scratch graph. If not set then a scratch graph will either be retrieved or created: Yields: The current scratch graph.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:_scratch_graph arg:graph arguments arg Assign Call If BoolOp Compare Compare Compare Raise Call If Return return:no Assign BoolOp Call Try Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "fn_with_cond",
    "source_code": "def fn_with_cond(inner_args, inner_kwds):\n    condition = True\n    for v, _ in initializers:\n        condition = math_ops.logical_and(condition, resource_variable_ops.var_is_initialized_op(v.handle))\n    return cond.cond(condition, lambda: tracing_compilation.call_function(inner_args, inner_kwds, self._no_variable_creation_config), lambda: self._concrete_variable_creation_fn(*inner_args, **inner_kwds))",
    "docstring": "Conditionally runs initialization if it's needed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\polymorphic_function.py",
    "ast_data": "FunctionDef name:fn_with_cond arg:inner_args arg:inner_kwds arguments arg arg Assign For Assign Call Call Return return:yes Call arguments Call arguments Call"
  },
  {
    "library": "tensorflow",
    "name": "scalar_mul",
    "source_code": "@tf_export(v1=['math.scalar_mul', 'scalar_mul'])\n@dispatch.register_binary_elementwise_api\n@dispatch.add_dispatch_support\ndef scalar_mul(scalar, x, name=None):\n    base_dtype = dtypes.as_dtype(x.dtype).base_dtype\n    scalar = ops.convert_to_tensor(scalar, dtype=base_dtype, name='scalar')\n    shape = scalar.get_shape()\n    if shape.ndims == 0:\n        if isinstance(x, indexed_slices.IndexedSlices):\n            return indexed_slices.IndexedSlices(gen_math_ops.mul(scalar, x.values, name), x.indices, x.dense_shape)\n        else:\n            return gen_math_ops.mul(scalar, x, name)\n    else:\n        raise ValueError(f'The input scalar must be a 0-D value. Received shape {shape}.')",
    "docstring": "Multiplies a scalar times a or object. This is a special case of , where the first value must be a . Unlike the general form of , this is operation is guaranteed to be efficient for . >>> x = tf.reshape(tf.range(30, dtype=tf.float32), [10, 3]) >>> with tf.GradientTape() as g: ... g.watch(x) ... y = tf.gather(x, [1, 2]) # IndexedSlices ... z = tf.math.scalar_mul(10.0, y) Args: scalar: A 0-D scalar . Must have known shape. x: A or to be scaled. name: A name for the operation (optional). Returns: of the same type ( or ) as . Raises: ValueError: if scalar is not a 0-D .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:scalar_mul arg:scalar arg:x arg:name arguments arg arg arg Assign Call Assign Call Assign Call If Compare If Call Return return:yes Call Call Return return:yes Call Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "canonicalize_bool_expr",
    "source_code": "def canonicalize_bool_expr(expr: _T) -> _T:\n    if not isinstance(expr, (sympy.Rel, sympy.And, sympy.Or, sympy.Not, sympy.Eq, sympy.Ne)):\n        return expr\n    if isinstance(expr, (sympy.And, sympy.Or, sympy.Not)):\n        expr = sympy.logic.boolalg.to_cnf(expr)\n    return _canonicalize_bool_expr_impl(expr)",
    "docstring": "Canonicalize a boolean expression by transforming it into a lt / le inequality and moving all the non-constant terms to the rhs. We canonicalize And / Ors / Not via cnf and then canonicalize their subexpr recursively nb. sympy.Rel.canonical is not good enough Args: expr (sympy.Expr): Expression to canonicalize",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:canonicalize_bool_expr arg:expr arguments arg If Call Return return:yes If Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_legacy_path",
    "source_code": "def _get_legacy_path(env_name, default=[]):\n    if env_name in os.environ:\n        match = re.match('^(/[^/ ]*)+/lib/\\\\w+-linux-gnu/?$', os.environ[env_name])\n        if match:\n            return [match.group(1)]\n    return _list_from_env(env_name, default)",
    "docstring": "Returns a path specified by a legacy environment variable. CUDNN_INSTALL_PATH, NCCL_INSTALL_PATH, TENSORRT_INSTALL_PATH set to '/usr/lib/x86_64-linux-gnu' would previously find both library and header paths. Detect those and return '/usr', otherwise forward to _list_from_env().",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\third_party\\gpus\\find_cuda_config.py",
    "ast_data": "FunctionDef name:_get_legacy_path arg:env_name arg:default arguments arg arg If Compare Assign Call If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "LazyLoader",
    "source_code": "class LazyLoader(python_types.ModuleType):\n\n    def __init__(self, local_name, parent_module_globals, name):\n        self._local_name = local_name\n        self._parent_module_globals = parent_module_globals\n        super(LazyLoader, self).__init__(name)\n\n    def _load(self):\n        module = importlib.import_module(self.__name__)\n        self._parent_module_globals[self._local_name] = module\n        self.__dict__.update(module.__dict__)\n        return module\n\n    def __getattr__(self, item):\n        module = self._load()\n        return getattr(module, item)",
    "docstring": "Lazily import a module, mainly to avoid pulling in large dependencies.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\generic_utils.py",
    "ast_data": "ClassDef name:LazyLoader FunctionDef name:__init__ arg:self arg:local_name arg:parent_module_globals arg:name arguments arg arg arg arg Assign Assign Call Call FunctionDef name:_load arg:self arguments arg Assign Call Assign Call Return return:yes FunctionDef name:__getattr__ arg:self arg:item arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "__iter__",
    "source_code": "def __iter__(self):\n    for i in range(len(self)):\n        yield self[i]",
    "docstring": "Iterate over coordinates of this Point.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\point.py",
    "ast_data": "FunctionDef name:__iter__ arg:self arguments arg For Call Call"
  },
  {
    "library": "numpy",
    "name": "_parse_local_version",
    "source_code": "def _parse_local_version(local):\n    if local is not None:\n        return tuple((part.lower() if not part.isdigit() else int(part) for part in _local_version_seperators.split(local)))",
    "docstring": "Takes a string like abc.1.twelve and turns it into (\"abc\", 1, \"twelve\").",
    "type": "function",
    "file_path": "numpy\\numpy\\_utils\\_pep440.py",
    "ast_data": "FunctionDef name:_parse_local_version arg:local arguments arg If Compare Return return:yes Call Call Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_estimate_noise_variance",
    "source_code": "def _estimate_noise_variance(self, X, y, positive):\n    if X.shape[0] <= X.shape[1] + self.fit_intercept:\n        raise ValueError(f'You are using {self.__class__.__name__} in the case where the number of samples is smaller than the number of features. In this setting, getting a good estimate for the variance of the noise is not possible. Provide an estimate of the noise variance in the constructor.')\n    ols_model = LinearRegression(positive=positive, fit_intercept=False)\n    y_pred = ols_model.fit(X, y).predict(X)\n    return np.sum((y - y_pred) ** 2) / (X.shape[0] - X.shape[1] - self.fit_intercept)",
    "docstring": "Compute an estimate of the variance with an OLS model. Parameters ---------- X : ndarray of shape (n_samples, n_features) Data to be fitted by the OLS model. We expect the data to be centered. y : ndarray of shape (n_samples,) Associated target. positive : bool, default=False Restrict coefficients to be >= 0. This should be inline with the parameter from . Returns ------- noise_variance : float An estimator of the noise variance of an OLS model.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_least_angle.py",
    "ast_data": "FunctionDef name:_estimate_noise_variance arg:self arg:X arg:y arg:positive arguments arg arg arg arg If Compare Raise Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "rec_join",
    "source_code": "@array_function_dispatch(_rec_join_dispatcher)\ndef rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2', defaults=None):\n    kwargs = {'jointype': jointype, 'r1postfix': r1postfix, 'r2postfix': r2postfix, 'defaults': defaults, 'usemask': False, 'asrecarray': True}\n    return join_by(key, r1, r2, **kwargs)",
    "docstring": "Join arrays and on keys. Alternative to join_by, that always returns a np.recarray. See Also -------- join_by : equivalent function",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\recfunctions.py",
    "ast_data": "FunctionDef name:rec_join arg:key arg:r1 arg:r2 arg:jointype arg:r1postfix arg:r2postfix arg:defaults arguments arg arg arg arg arg arg arg Assign Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "ensure_tempdir",
    "source_code": "def ensure_tempdir(builder: Builder) -> Path:\n    if not hasattr(builder, '_imgmath_tempdir'):\n        builder._imgmath_tempdir = Path(tempfile.mkdtemp())\n    return builder._imgmath_tempdir",
    "docstring": "Create temporary directory. use only one tempdir per build -- the use of a directory is cleaner than using temporary files, since we can clean up everything at once just removing the whole directory (see cleanup_tempdir)",
    "type": "function",
    "file_path": "sphinx\\sphinx\\ext\\imgmath.py",
    "ast_data": "FunctionDef name:ensure_tempdir arg:builder arguments arg If Call Assign Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "filter_sources",
    "source_code": "def filter_sources(sources):\n    c_sources = []\n    cxx_sources = []\n    f_sources = []\n    fmodule_sources = []\n    for source in sources:\n        if fortran_ext_match(source):\n            modules = _get_f90_modules(source)\n            if modules:\n                fmodule_sources.append(source)\n            else:\n                f_sources.append(source)\n        elif cxx_ext_match(source):\n            cxx_sources.append(source)\n        else:\n            c_sources.append(source)\n    return (c_sources, cxx_sources, f_sources, fmodule_sources)",
    "docstring": "Return four lists of filenames containing C, C++, Fortran, and Fortran 90 module sources, respectively.",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\misc_util.py",
    "ast_data": "FunctionDef name:filter_sources arg:sources arguments arg Assign Assign Assign Assign For If Call Assign Call If Call Call If Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "drag_zoom",
    "source_code": "def drag_zoom(self, event):\n    if event.buttons != {self._zoom_info.button}:\n        self._cleanup_post_zoom()\n        return\n    start_xy = self._zoom_info.start_xy\n    ax = self._zoom_info.axes[0]\n    (x1, y1), (x2, y2) = np.clip([start_xy, [event.x, event.y]], ax.bbox.min, ax.bbox.max)\n    key = event.key\n    if self._zoom_info.cbar == 'horizontal':\n        key = 'x'\n    elif self._zoom_info.cbar == 'vertical':\n        key = 'y'\n    if key == 'x':\n        y1, y2 = ax.bbox.intervaly\n    elif key == 'y':\n        x1, x2 = ax.bbox.intervalx\n    self.draw_rubberband(event, x1, y1, x2, y2)",
    "docstring": "Callback for dragging in zoom mode.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:drag_zoom arg:self arg:event arguments arg arg If Compare Call Return return:no Assign Assign Assign Call Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign Call"
  },
  {
    "library": "pytorch",
    "name": "_create_multi_output_jit_fn",
    "source_code": "def _create_multi_output_jit_fn(code_string: str, num_outputs: int, **kwargs) -> Callable:\n    return _JittedFunction(code_string, return_by_ref=True, num_outputs=num_outputs, **kwargs)",
    "docstring": "Create a jiterator-generated cuda kernel for an elementwise op that supports returning one or more outputs. Args: code_string (str): CUDA code string to be compiled by jiterator. The entry functor must return value by reference. num_outputs(int): number of outputs return by the kernel kwargs (Dict, optional): Keyword arguments for generated function Example:: code_string = \"template void my_kernel(T x, T y, T alpha, T& out) { out = -x + alpha * y; }\" jitted_fn = create_jit_fn(code_string, alpha=1.0) a = torch.rand(3, device='cuda') b = torch.rand(3, device='cuda') # invoke jitted function like a regular python function result = jitted_fn(a, b, alpha=3.14) .. warning:: This API is in beta and may change in future releases. .. warning:: This API only supports up to 8 inputs and 8 outputs",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\jiterator.py",
    "ast_data": "FunctionDef name:_create_multi_output_jit_fn arg:code_string arg:num_outputs arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_bbox",
    "source_code": "def set_bbox(self, rectprops):\n    if rectprops is not None:\n        props = rectprops.copy()\n        boxstyle = props.pop('boxstyle', None)\n        pad = props.pop('pad', None)\n        if boxstyle is None:\n            boxstyle = 'square'\n            if pad is None:\n                pad = 4\n            pad /= self.get_size()\n        elif pad is None:\n            pad = 0.3\n        if isinstance(boxstyle, str) and 'pad' not in boxstyle:\n            boxstyle += ',pad=%0.2f' % pad\n        self._bbox_patch = FancyBboxPatch((0, 0), 1, 1, boxstyle=boxstyle, transform=IdentityTransform(), **props)\n    else:\n        self._bbox_patch = None\n    self._update_clip_properties()",
    "docstring": "Draw a box behind/around the text. This can be used to set a background and/or a frame around the text. It's realized through a behind the text (see also ). The bbox patch is None by default and only created when needed. Parameters ---------- rectprops : dict with properties for or None The default boxstyle is 'square'. The mutation scale of the is set to the fontsize. Pass `` to remove the bbox patch completely. Examples -------- :: t.set_bbox(dict(facecolor='red', alpha=0.5))",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:set_bbox arg:self arg:rectprops arguments arg arg If Compare Assign Call Assign Call Assign Call If Compare Assign If Compare Assign Call If Compare Assign If BoolOp Call Compare Assign Call Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "cudagraph_mark_step_begin",
    "source_code": "def cudagraph_mark_step_begin():\n    from torch._inductor import cudagraph_trees\n    cudagraph_trees.mark_step_begin()",
    "docstring": "Indicates that a new iteration of inference or training is about to begin. CUDA Graphs will free tensors of a prior iteration. A new iteration is started on each invocation of torch.compile, so long as there is not a pending backward that has not been called. If that heuristic is wrong, such as in the following example, manually mark it with this api. .. code-block:: python @torch.compile(mode=\"reduce-overhead\") def rand_foo(): return torch.rand([4], device=\"cuda\") for _ in range(5): torch.compiler.cudagraph_mark_step_begin() rand_foo() + rand_foo() For more details, see __",
    "type": "function",
    "file_path": "pytorch\\torch\\compiler\\__init__.py",
    "ast_data": "FunctionDef name:cudagraph_mark_step_begin arguments Call"
  },
  {
    "library": "pandas",
    "name": "_construct_result",
    "source_code": "def _construct_result(self, result, other) -> DataFrame:\n    out = self._constructor(result, copy=False).__finalize__(self)\n    out.columns = self.columns\n    out.index = self.index\n    out = out.__finalize__(other)\n    return out",
    "docstring": "Wrap the result of an arithmetic, comparison, or logical operation. Parameters ---------- result : DataFrame Returns ------- DataFrame",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:_construct_result arg:self arg:result arg:other arguments arg arg arg Assign Call Call Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "isupper",
    "source_code": "def isupper(self):\n    return isupper(self)",
    "docstring": "Returns true for each element if all cased characters in the string are uppercase and there is at least one character, false otherwise. See Also -------- char.isupper",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:isupper arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_fix_stop_index",
    "source_code": "def _fix_stop_index(index, rank):\n    if index is None:\n        if rank is None:\n            raise ValueError('Rank must be known to use __getitem__ without a stop.')\n        index = rank\n    if index < 0:\n        if rank is None:\n            raise ValueError('Rank must be known to use __getitem__ on a negative index.')\n        index = rank + index\n    if index < 0:\n        index = 0\n    if rank is not None:\n        index = min(rank, index)\n    return index",
    "docstring": "Slice indexes are always silently truncated.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:_fix_stop_index arg:index arg:rank arguments arg arg If Compare If Compare Raise Call Assign If Compare If Compare Raise Call Assign If Compare Assign If Compare Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "Ackley03",
    "source_code": "class Ackley03(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-32.0] * self.N, [32.0] * self.N))\n        self.global_optimum = [[-0.68255758, -0.36070859]]\n        self.fglob = -195.6290282592388\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        a = -200 * exp(-0.02 * sqrt(x[0] ** 2 + x[1] ** 2))\n        a += 5 * exp(cos(3 * x[0]) + sin(3 * x[1]))\n        return a",
    "docstring": "Ackley03 [1]_ objective function. The Ackley03 global optimization problem is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Ackley03}}(x) = -200 e^{-0.02 \\sqrt{x_1^2 + x_2^2}} + 5e^{\\cos(3x_1) + \\sin(3x_2)} with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO: I think the minus sign is missing in front of the first term in eqn3 in [1]_. This changes the global minimum",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_A.py",
    "ast_data": "ClassDef name:Ackley03 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Call Call Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "title",
    "source_code": "def title(self):\n    return asarray(title(self))",
    "docstring": "For each element in , return a titlecased version of the string: words start with uppercase characters, all remaining cased characters are lowercase. See Also -------- char.title",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:title arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_tfmw_add_deprecation_warning",
    "source_code": "def _tfmw_add_deprecation_warning(self, name, attr):\n    if self._tfmw_warning_count < _PER_MODULE_WARNING_LIMIT and name not in self._tfmw_deprecated_checked:\n        self._tfmw_deprecated_checked.add(name)\n        if self._tfmw_module_name:\n            full_name = 'tf.%s.%s' % (self._tfmw_module_name, name)\n        else:\n            full_name = 'tf.%s' % name\n        rename = get_rename_v2(full_name)\n        if rename and (not has_deprecation_decorator(attr)):\n            call_location = _call_location()\n            if not call_location.startswith('<'):\n                logging.warning('From %s: The name %s is deprecated. Please use %s instead.\\n', _call_location(), full_name, rename)\n                self._tfmw_warning_count += 1\n                return True\n    return False",
    "docstring": "Print deprecation warning for attr with given name if necessary.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\util\\module_wrapper.py",
    "ast_data": "FunctionDef name:_tfmw_add_deprecation_warning arg:self arg:name arg:attr arguments arg arg arg If BoolOp Compare Compare Call If Assign Assign Assign Call If BoolOp Call Assign Call If Call Call Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "DispatchError",
    "source_code": "class DispatchError(ConversionError):\n    pass",
    "docstring": "Error during ONNX Function dispatching.",
    "type": "class",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_errors.py",
    "ast_data": "ClassDef name:DispatchError"
  },
  {
    "library": "scipy",
    "name": "_cbc_lattice",
    "source_code": "def _cbc_lattice(n_dim, n_qmc_samples):\n    primes = primes_from_2_to(n_qmc_samples + 1)\n    n_qmc_samples = primes[-1]\n    bt = np.ones(n_dim)\n    gm = np.hstack([1.0, 0.8 ** np.arange(n_dim - 1)])\n    q = 1\n    w = 0\n    z = np.arange(1, n_dim + 1)\n    m = (n_qmc_samples - 1) // 2\n    g = _primitive_root(n_qmc_samples)\n    perm = np.ones(m, dtype=int)\n    for j in range(m - 1):\n        perm[j + 1] = g * perm[j] % n_qmc_samples\n    perm = np.minimum(n_qmc_samples - perm, perm)\n    pn = perm / n_qmc_samples\n    c = pn * pn - pn + 1.0 / 6\n    fc = fft(c)\n    for s in range(1, n_dim):\n        reordered = np.hstack([c[:w + 1][::-1], c[w + 1:m][::-1]])\n        q = q * (bt[s - 1] + gm[s - 1] * reordered)\n        w = ifft(fc * fft(q)).real.argmin()\n        z[s] = perm[w]\n    q = z / n_qmc_samples\n    return (q, n_qmc_samples)",
    "docstring": "Compute a QMC lattice generator using a Fast CBC construction. Parameters ---------- n_dim : int > 0 The number of dimensions for the lattice. n_qmc_samples : int > 0 The desired number of QMC samples. This will be rounded down to the nearest prime to enable the CBC construction. Returns ------- q : float array : shape=(n_dim,) The lattice generator vector. All values are in the open interval ``. actual_n_qmc_samples : int The prime number of QMC samples that must be used with this lattice, no more, no less. References ---------- .. [1] Nuyens, D. and Cools, R. \"Fast Component-by-Component Construction, a Reprise for Different Kernels\", In H. Niederreiter and D. Talay, editors, Monte-Carlo and Quasi-Monte Carlo Methods 2004, Springer-Verlag, 2006, 371-385.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_qmvnt.py",
    "ast_data": "FunctionDef name:_cbc_lattice arg:n_dim arg:n_qmc_samples arguments arg arg Assign Call Assign Assign Call Assign Call Call Assign Assign Assign Call Assign Assign Call Assign Call For Call Assign Assign Call Assign Assign Assign Call For Call Assign Call Assign Assign Call Call Call Assign Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n    self._fit_transform(X)\n    return self",
    "docstring": "Compute the embedding vectors for data X. Parameters ---------- X : array-like of shape (n_samples, n_features) Training set. y : Ignored Not used, present here for API consistency by convention. Returns ------- self : object Fitted class instance.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\manifold\\_locally_linear.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "resize_images",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef resize_images(x, height_factor, width_factor, data_format, interpolation='nearest'):\n    if data_format == 'channels_first':\n        rows, cols = (2, 3)\n    elif data_format == 'channels_last':\n        rows, cols = (1, 2)\n    else:\n        raise ValueError('Invalid `data_format` argument: %s' % (data_format,))\n    new_shape = x.shape[rows:cols + 1]\n    if new_shape.is_fully_defined():\n        new_shape = constant_op.constant(new_shape.as_list(), dtype='int32')\n    else:\n        new_shape = array_ops.shape_v2(x)[rows:cols + 1]\n    new_shape *= constant_op.constant(np.array([height_factor, width_factor], dtype='int32'))\n    if data_format == 'channels_first':\n        x = permute_dimensions(x, [0, 2, 3, 1])\n    if interpolation == 'nearest':\n        x = image_ops.resize_images_v2(x, new_shape, method=image_ops.ResizeMethod.NEAREST_NEIGHBOR)\n    elif interpolation == 'bilinear':\n        x = image_ops.resize_images_v2(x, new_shape, method=image_ops.ResizeMethod.BILINEAR)\n    else:\n        raise ValueError('interpolation should be one of \"nearest\" or \"bilinear\".')\n    if data_format == 'channels_first':\n        x = permute_dimensions(x, [0, 3, 1, 2])\n    return x",
    "docstring": "Resizes the images contained in a 4D tensor. Args: x: Tensor or variable to resize. height_factor: Positive integer. width_factor: Positive integer. data_format: One of , . interpolation: A string, one of or . Returns: A tensor. Raises: ValueError: in case of incorrect value for or .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:resize_images arg:x arg:height_factor arg:width_factor arg:data_format arg:interpolation arguments arg arg arg arg arg If Compare Assign If Compare Assign Raise Call Assign If Call Assign Call Call Assign Call Call Call If Compare Assign Call If Compare Assign Call If Compare Assign Call Raise Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_state_dict",
    "source_code": "def get_state_dict(model: nn.Module, optimizers: Union[torch.optim.Optimizer, Iterable[torch.optim.Optimizer]], *, submodules: Optional[set[nn.Module]]=None, options: Optional[StateDictOptions]=None) -> tuple[dict[str, ValueType], OptimizerStateType]:\n    with _gc_context():\n        optimizers = (optimizers,) if isinstance(optimizers, torch.optim.Optimizer) else tuple(optimizers)\n        info = _verify_options(model, optimizers, optim_only=False, submodules=submodules, options=options)\n        model_state_dict = _get_model_state_dict(model, info)\n        optim_state_dict = _get_optim_state_dict(model, optimizers, info)\n        _verify_state_dict(model_state_dict, optim_state_dict, info)\n        return (model_state_dict, optim_state_dict)",
    "docstring": "Return the model state_dict and optimizers state_dict. `StateDictOptions` that contain model state_dict and optimizer state_dict. :rtype: typing.Tuple[typing.Dict[str, ValueType], OptimizerStateType]",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\state_dict.py",
    "ast_data": "FunctionDef name:get_state_dict arg:model arg:optimizers arguments arg arg arg arg With Call Assign Call Call Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "hermgrid3d",
    "source_code": "def hermgrid3d(x, y, z, c):\n    return pu._gridnd(hermval, c, x, y, z)",
    "docstring": "Evaluate a 3-D Hermite series on the Cartesian product of x, y, and z. This function returns the values: .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * H_i(a) * H_j(b) * H_k(c) where the points `axbyczxyzxyzxyzccxyzxyzcxy`. See Also -------- hermval, hermval2d, hermgrid2d, hermval3d Examples -------- >>> from numpy.polynomial.hermite import hermgrid3d >>> x = [1, 2] >>> y = [4, 5] >>> z = [6, 7] >>> c = [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]] >>> hermgrid3d(x, y, z, c) array([[[ 40077., 54117.], [ 49293., 66561.]], [[ 72375., 97719.], [ 88975., 120131.]]])",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\hermite.py",
    "ast_data": "FunctionDef name:hermgrid3d arg:x arg:y arg:z arg:c arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "add_update_values",
    "source_code": "def add_update_values(self, values):\n    values_seq = []\n    for name, val in values.items():\n        field = self.get_meta().get_field(name)\n        direct = not (field.auto_created and (not field.concrete)) or not field.concrete\n        model = field.model._meta.concrete_model\n        if field.name == 'pk' and model._meta.is_composite_pk:\n            raise FieldError('Composite primary key fields must be updated individually.')\n        if not direct or (field.is_relation and field.many_to_many):\n            raise FieldError('Cannot update model field %r (only non-relations and foreign keys permitted).' % field)\n        if model is not self.get_meta().concrete_model:\n            self.add_related_update(model, field, val)\n            continue\n        values_seq.append((field, model, val))\n    return self.add_update_fields(values_seq)",
    "docstring": "Convert a dictionary of field name to value mappings into an update query. This is the entry point for the public update() method on querysets.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\subqueries.py",
    "ast_data": "FunctionDef name:add_update_values arg:self arg:values arguments arg arg Assign For Call Assign Call Call Assign BoolOp BoolOp Assign If BoolOp Compare Raise Call If BoolOp BoolOp Raise Call If Compare Call Call Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "fallback",
    "source_code": "def fallback(self, node_type: Any) -> None:\n    for node in self.document.findall(node_type):\n        newnode = nodes.inline()\n        newnode.update_all_atts(node)\n        newnode.extend(node)\n        newnode.setdefault('_sig_node_type', node.tagname)\n        node.replace_self(newnode)",
    "docstring": "Translate nodes of type *node_type* to docutils inline nodes. The original node type name is stored as a string in a private `` attribute if the latter did not exist.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\transforms\\post_transforms\\__init__.py",
    "ast_data": "FunctionDef name:fallback arg:self arg:node_type arguments arg arg For Call Assign Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "is_available",
    "source_code": "def is_available():\n    return _itt.is_available()",
    "docstring": "Check if ITT feature is available or not",
    "type": "function",
    "file_path": "pytorch\\torch\\profiler\\itt.py",
    "ast_data": "FunctionDef name:is_available arguments Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "version",
    "source_code": "def version():\n    if not _init():\n        return None\n    return __cudnn_version",
    "docstring": "Return the version of cuDNN.",
    "type": "function",
    "file_path": "pytorch\\torch\\backends\\cudnn\\__init__.py",
    "ast_data": "FunctionDef name:version arguments If Call Return return:no Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "Predictor",
    "source_code": "class Predictor(ABC):\n    if Benchmark.bench_predict:\n\n        def time_predict(self, *args):\n            self.estimator.predict(self.X)\n\n        def peakmem_predict(self, *args):\n            self.estimator.predict(self.X)\n        if Benchmark.base_commit is not None:\n\n            def track_same_prediction(self, *args):\n                est_path = get_estimator_path(self, Benchmark.base_commit, args, True)\n                with est_path.open(mode='rb') as f:\n                    estimator_base = pickle.load(f)\n                y_val_pred_base = estimator_base.predict(self.X_val)\n                y_val_pred = self.estimator.predict(self.X_val)\n                return np.allclose(y_val_pred_base, y_val_pred)\n\n    @property\n    @abstractmethod\n    def params(self):\n        pass",
    "docstring": "Abstract base class for benchmarks of estimators implementing predict",
    "type": "class",
    "file_path": "scikit-learn\\asv_benchmarks\\benchmarks\\common.py",
    "ast_data": "ClassDef name:Predictor If FunctionDef name:time_predict arg:self arguments arg arg Call FunctionDef name:peakmem_predict arg:self arguments arg arg Call If Compare FunctionDef name:track_same_prediction arg:self arguments arg arg Assign Call With Call Assign Call Assign Call Assign Call Return return:yes Call FunctionDef name:params arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "on_run_start",
    "source_code": "@abc.abstractmethod\ndef on_run_start(self, request):\n    pass",
    "docstring": "Callback invoked on run() calls to the debug-wrapper session. This is a blocking callback. The invocation happens after the wrapper's run() call is entered, after an increment of run call counter. Args: request: () callback request object carrying information about the run call such as the fetches, feed dict, run options, run metadata, and how many calls to this wrapper session have occurred. Returns: An instance of , carrying information to debug URLs used to watch the tensors.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\framework.py",
    "ast_data": "FunctionDef name:on_run_start arg:self arg:request arguments arg arg"
  },
  {
    "library": "pytorch",
    "name": "remove_identity",
    "source_code": "def remove_identity(gm: torch.fx.GraphModule) -> torch.fx.GraphModule:\n\n    class IdentityRemover(torch.fx.Transformer):\n\n        def call_module(self, target, args, kwargs):\n            if isinstance(self.submodules[target], nn.Identity):\n                assert len(args) == 1\n                return args[0]\n            else:\n                return super().call_module(target, args, kwargs)\n    return IdentityRemover(gm).transform()",
    "docstring": "Removes all identity layers from the module.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\pre_grad.py",
    "ast_data": "FunctionDef name:remove_identity arg:gm arguments arg ClassDef name:IdentityRemover FunctionDef name:call_module arg:self arg:target arg:args arg:kwargs arguments arg arg arg arg If Call Compare Call Return return:yes Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "diag",
    "source_code": "@abstractmethod\ndef diag(self, X):\n    pass",
    "docstring": "Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters ---------- X : array-like of shape (n_samples,) Left argument of the returned kernel k(X, Y) Returns ------- K_diag : ndarray of shape (n_samples_X,) Diagonal of kernel k(X, X)",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:diag arg:self arg:X arguments arg arg"
  },
  {
    "library": "sphinx",
    "name": "format_default",
    "source_code": "def format_default(self, default: str) -> tuple[nodes.field, list[system_message]]:\n    parsed, msgs = self.parse_inline(default, lineno=self.lineno)\n    field = nodes.field('', nodes.field_name('', _('Default')), nodes.field_body('', *parsed))\n    return (field, msgs)",
    "docstring": "Formats the `` option.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\domains\\std\\__init__.py",
    "ast_data": "FunctionDef name:format_default arg:self arg:default arguments arg arg Assign Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "Choices",
    "source_code": "class Choices(enum.Enum, metaclass=ChoicesType):\n    do_not_call_in_templates = enum.nonmember(True)\n\n    @enum_property\n    def label(self):\n        return self._label_\n\n    def __repr__(self):\n        return f'{self.__class__.__qualname__}.{self._name_}'",
    "docstring": "Class for creating enumerated choices.",
    "type": "class",
    "file_path": "django\\django\\db\\models\\enums.py",
    "ast_data": "ClassDef name:Choices Assign Call FunctionDef name:label arg:self arguments arg Return return:yes FunctionDef name:__repr__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "DGreatestUpperBound",
    "source_code": "class DGreatestUpperBound(Constraint):\n\n    def __init__(self, res, rhs1, rhs2):\n        assert is_dim(res)\n        assert is_dim(rhs1)\n        assert is_dim(rhs2)\n        self.res = res\n        self.rhs1 = rhs1\n        self.rhs2 = rhs2\n\n    def __repr__(self):\n        return f'{self.res} = {self.rhs1}⊔{self.rhs2}'\n\n    def __eq__(self, other):\n        if isinstance(other, DGreatestUpperBound):\n            return self.res == other.res and self.rhs1 == other.rhs1 and (self.rhs2 == other.rhs2)\n        else:\n            return False",
    "docstring": "Greatest Upper bound for dimensions",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint.py",
    "ast_data": "ClassDef name:DGreatestUpperBound FunctionDef name:__init__ arg:self arg:res arg:rhs1 arg:rhs2 arguments arg arg arg arg Call Call Call Assign Assign Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:__eq__ arg:self arg:other arguments arg arg If Call Return return:yes BoolOp Compare Compare Compare Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "includes",
    "source_code": "def includes(self, x):\n    if self.low_inclusive:\n        low = np.greater_equal(x, self.low)\n    else:\n        low = np.greater(x, self.low)\n    if not np.all(low):\n        return False\n    if self.high_inclusive:\n        high = np.less_equal(x, self.high)\n    else:\n        high = np.less(x, self.high)\n    return bool(np.all(high))",
    "docstring": "Test whether all values of x are in interval range. Parameters ---------- x : ndarray Array whose elements are tested to be in interval range. Returns ------- result : bool",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\_loss\\link.py",
    "ast_data": "FunctionDef name:includes arg:self arg:x arguments arg arg If Assign Call Assign Call If Call Return return:yes If Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_run_inline_graph_optimization",
    "source_code": "def _run_inline_graph_optimization(func, lower_control_flow, aggressive_inlining):\n    graph_def = func.graph.as_graph_def()\n    if not lower_control_flow:\n        graph_def = disable_lower_using_switch_merge(graph_def)\n    for function in graph_def.library.function:\n        if 'api_implements' in function.attr:\n            del function.attr['api_implements']\n    meta_graph = export_meta_graph(graph_def=graph_def, graph=func.graph)\n    for name in ['variables', 'model_variables', 'trainable_variables', 'local_variables']:\n        raw_list = []\n        for raw in meta_graph.collection_def['variables'].bytes_list.value:\n            variable = variable_pb2.VariableDef()\n            variable.ParseFromString(raw)\n            variable.ClearField('initializer_name')\n            raw_list.append(variable.SerializeToString())\n        meta_graph.collection_def[name].bytes_list.value[:] = raw_list\n    fetch_collection = meta_graph_pb2.CollectionDef()\n    for array in func.inputs + func.outputs:\n        fetch_collection.node_list.value.append(array.name)\n    meta_graph.collection_def['train_op'].CopyFrom(fetch_collection)\n    config = config_pb2.ConfigProto()\n    rewrite_options = config.graph_options.rewrite_options\n    rewrite_options.min_graph_nodes = -1\n    rewrite_options.optimizers.append('function')\n    if aggressive_inlining:\n        rewrite_options.function_optimization = rewriter_config_pb2.RewriterConfig.AGGRESSIVE\n    return tf_optimizer.OptimizeGraph(config, meta_graph)",
    "docstring": "Apply function inline optimization to the graph. Returns the GraphDef after Grappler's function inlining optimization is applied. This optimization does not work on models with control flow. Args: func: ConcreteFunction. lower_control_flow: Boolean indicating whether or not to lower control flow ops such as If and While. (default True) aggressive_inlining: Boolean indicating whether or not to do aggressive function inlining (might be unsafe if function has stateful ops not properly connected to control outputs). Returns: GraphDef",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "FunctionDef name:_run_inline_graph_optimization arg:func arg:lower_control_flow arg:aggressive_inlining arguments arg arg arg Assign Call If Assign Call For If Compare Assign Call For Assign For Assign Call Call Call Call Call Assign Assign Call For Call Call Assign Call Assign Assign Call If Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "session",
    "source_code": "@property\ndef session(self):\n    return self._session",
    "docstring": "A TensorFlow session object which will execute the .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\session_run_hook.py",
    "ast_data": "FunctionDef name:session arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_handle_inside_pfor",
    "source_code": "def _handle_inside_pfor(pfor_input: _PforInput, handle):\n    while handle.op.type in ('Enter', 'Identity'):\n        handle = handle.op.inputs[0]\n    if handle.op.type not in ['TensorArrayV3', 'TensorArrayGradV3', 'TensorArrayGradWithShape']:\n        raise ValueError(f'Unable to find source for handle {handle}.')\n    else:\n        return pfor_input.pfor.op_is_inside_loop(handle.op)",
    "docstring": "Returns True if handle was created inside the pfor loop.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "FunctionDef name:_handle_inside_pfor arg:pfor_input arg:handle arguments arg arg While Compare Assign If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "invert_affine_transform",
    "source_code": "def invert_affine_transform(matrix: Tensor) -> Tensor:\n    if not isinstance(matrix, Tensor):\n        raise TypeError(f'Input matrix type is not a Tensor. Got {type(matrix)}')\n    if not (len(matrix.shape) == 3 and matrix.shape[-2:] == (2, 3)):\n        raise ValueError(f'Input matrix must be a Bx2x3 tensor. Got {matrix.shape}')\n    matrix_tmp: Tensor = convert_affinematrix_to_homography(matrix)\n    matrix_inv: Tensor = _torch_inverse_cast(matrix_tmp)\n    return matrix_inv[..., :2, :3]",
    "docstring": "Invert an affine transformation. The function computes an inverse affine transformation represented by 2x3 matrix: .. math:: \\begin{bmatrix} a_{11} & a_{12} & b_{1} \\\\ a_{21} & a_{22} & b_{2} \\\\ \\end{bmatrix} The result is also a 2x3 matrix of the same type as M. Args: matrix: original affine transform. The tensor must be in the shape of :math:. Return: the reverse affine transform with shape :math:. .. note:: This function is often used in conjunction with :func:.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\transform\\imgwarp.py",
    "ast_data": "FunctionDef name:invert_affine_transform arg:matrix arguments arg If Call Raise Call Call If BoolOp Compare Call Compare Raise Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "seed",
    "source_code": "def seed() -> int:\n    seed = default_generator.seed()\n    import torch.cuda\n    if not torch.cuda._is_in_bad_fork():\n        torch.cuda.manual_seed_all(seed)\n    import torch.mps\n    if not torch.mps._is_in_bad_fork():\n        torch.mps.manual_seed(seed)\n    import torch.xpu\n    if not torch.xpu._is_in_bad_fork():\n        torch.xpu.manual_seed_all(seed)\n    _seed_custom_device(seed)\n    return seed",
    "docstring": "Sets the seed for generating random numbers to a non-deterministic random number on all devices. Returns a 64 bit number used to seed the RNG.",
    "type": "function",
    "file_path": "pytorch\\torch\\random.py",
    "ast_data": "FunctionDef name:seed arguments Assign Call If Call Call If Call Call If Call Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "height",
    "source_code": "@property\ndef height(self) -> torch.Tensor:\n    return self.ymax - self.ymin",
    "docstring": "The bounding box height.",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\face_detection.py",
    "ast_data": "FunctionDef name:height arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_box_pa_scalar",
    "source_code": "@classmethod\ndef _box_pa_scalar(cls, value, pa_type: pa.DataType | None=None) -> pa.Scalar:\n    if isinstance(value, pa.Scalar):\n        pa_scalar = value\n    elif isna(value):\n        pa_scalar = pa.scalar(None, type=pa_type)\n    else:\n        if isinstance(value, Timedelta):\n            if pa_type is None:\n                pa_type = pa.duration(value.unit)\n            elif value.unit != pa_type.unit:\n                value = value.as_unit(pa_type.unit)\n            value = value._value\n        elif isinstance(value, Timestamp):\n            if pa_type is None:\n                pa_type = pa.timestamp(value.unit, tz=value.tz)\n            elif value.unit != pa_type.unit:\n                value = value.as_unit(pa_type.unit)\n            value = value._value\n        pa_scalar = pa.scalar(value, type=pa_type, from_pandas=True)\n    if pa_type is not None and pa_scalar.type != pa_type:\n        pa_scalar = pa_scalar.cast(pa_type)\n    return pa_scalar",
    "docstring": "Box value into a pyarrow Scalar. Parameters ---------- value : any pa_type : pa.DataType | None Returns ------- pa.Scalar",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\arrow\\array.py",
    "ast_data": "FunctionDef name:_box_pa_scalar arg:cls arg:value arg:pa_type arguments arg arg arg If Call Assign If Call Assign Call If Call If Compare Assign Call If Compare Assign Call Assign If Call If Compare Assign Call If Compare Assign Call Assign Assign Call If BoolOp Compare Compare Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_stretch",
    "source_code": "def get_stretch(self):\n    return self._stretch",
    "docstring": "Return the font stretch or width. Options are: 'ultra-condensed', 'extra-condensed', 'condensed', 'semi-condensed', 'normal', 'semi-expanded', 'expanded', 'extra-expanded', 'ultra-expanded'.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\font_manager.py",
    "ast_data": "FunctionDef name:get_stretch arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "XinSheYang02",
    "source_code": "class XinSheYang02(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-2 * pi] * self.N, [2 * pi] * self.N))\n        self.global_optimum = [[0 for _ in range(self.N)]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return sum(abs(x)) * exp(-sum(sin(x ** 2.0)))",
    "docstring": "Xin-She Yang 2 objective function. This class defines the Xin-She Yang 2 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{XinSheYang02}}(\\x) = \\frac{\\sum_{i=1}^{n} \\lvert{x_{i}}\\rvert} {e^{\\sum_{i=1}^{n} \\sin\\left(x_{i}^{2.0} \\right)}} Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_X.py",
    "ast_data": "ClassDef name:XinSheYang02 Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_prefix_output_keys",
    "source_code": "def _prefix_output_keys(self, output_dict, output_name):\n    new_outputs = {}\n    for key, val in output_dict.items():\n        key = self._prefix_key(key, output_name)\n        new_outputs[key] = val\n    return new_outputs",
    "docstring": "Prepend output_name to the output_dict keys if it doesn't exist. This produces predictable prefixes for the pre-determined outputs of SupervisedOutput. Args: output_dict: dict of string to Tensor, assumed valid. output_name: prefix string to prepend to existing keys. Returns: dict with updated keys and existing values.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\utils_v1\\export_output.py",
    "ast_data": "FunctionDef name:_prefix_output_keys arg:self arg:output_dict arg:output_name arguments arg arg arg Assign For Call Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_generate_default_output_sharding",
    "source_code": "def _generate_default_output_sharding(node: Node, mesh: DeviceMesh, op_schema: OpSchema) -> OutputSharding:\n\n    def update_arg_spec(arg_spec: DTensorSpec) -> DTensorSpec:\n        return DTensorSpec(mesh=arg_spec.mesh, placements=(Replicate(),), tensor_meta=arg_spec.tensor_meta)\n    new_op_schema = OpSchema(op=op_schema.op, args_schema=pytree.tree_map_only(DTensorSpec, update_arg_spec, op_schema.args_schema), kwargs_schema=op_schema.kwargs_schema)\n\n    def create_output_spec(tensor: FakeTensor) -> DTensorSpec:\n        return DTensorSpec(mesh=mesh, placements=(Replicate(),), tensor_meta=TensorMeta(shape=tensor.shape, stride=tensor.stride(), dtype=tensor.dtype))\n    return OutputSharding(output_spec=pytree.tree_map_only(FakeTensor, create_output_spec, node.meta['val']), redistribute_schema=new_op_schema, needs_redistribute=True)",
    "docstring": "Util function to create a default output sharding that suggests Replicate placement for both args and outputs.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\experimental\\_tp_transform.py",
    "ast_data": "FunctionDef name:_generate_default_output_sharding arg:node arg:mesh arg:op_schema arguments arg arg arg FunctionDef name:update_arg_spec arg:arg_spec arguments arg Return return:yes Call Call Assign Call Call FunctionDef name:create_output_spec arg:tensor arguments arg Return return:yes Call Call Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "register_op_strategy",
    "source_code": "def register_op_strategy(self, op_overload: OpOverload, strategy_func: Callable[[OpSchema], StrategyType], schema_info: Optional[RuntimeSchemaInfo]=None):\n    self.op_strategy_funcs[op_overload] = strategy_func\n    if schema_info is not None:\n        self.op_to_schema_info[op_overload] = schema_info",
    "docstring": "Register a sharding strategy generator for an operator.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_sharding_prop.py",
    "ast_data": "FunctionDef name:register_op_strategy arg:self arg:op_overload arg:strategy_func arg:schema_info arguments arg arg arg arg Assign If Compare Assign"
  },
  {
    "library": "kornia",
    "name": "__init__",
    "source_code": "def __init__(self, d_model: int, max_shape: Tuple[int, int]=(256, 256), temp_bug_fix: bool=True) -> None:\n    super().__init__()\n    self.d_model = d_model\n    self.temp_bug_fix = temp_bug_fix\n    pe = self._create_position_encoding(max_shape)\n    self.register_buffer('pe', pe, persistent=False)",
    "docstring": "Construct sinusoidal positional encoding. Args: d_model: Dimensions of model input. max_shape (tuple): for 1/8 featmap, the max length of 256 corresponds to 2048 pixels temp_bug_fix (bool): As noted in this [issue]( the original implementation of LoFTR includes a bug in the pos-enc impl, which has little impact on the final performance. For now, we keep both impls for backward compatibility. We will remove the buggy impl after re-training all variants of our released models.",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\loftr\\utils\\position_encoding.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:d_model arg:max_shape arg:temp_bug_fix arguments arg arg arg arg Call Call Assign Assign Assign Call Call"
  },
  {
    "library": "django",
    "name": "ComboField",
    "source_code": "class ComboField(Field):\n\n    def __init__(self, fields, **kwargs):\n        super().__init__(**kwargs)\n        for f in fields:\n            f.required = False\n        self.fields = fields\n\n    def clean(self, value):\n        super().clean(value)\n        for field in self.fields:\n            value = field.clean(value)\n        return value",
    "docstring": "A Field whose clean() method calls multiple Field clean() methods.",
    "type": "class",
    "file_path": "django\\django\\forms\\fields.py",
    "ast_data": "ClassDef name:ComboField FunctionDef name:__init__ arg:self arg:fields arguments arg arg arg Call Call For Assign Assign FunctionDef name:clean arg:self arg:value arguments arg arg Call Call For Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_ListSnapshotChunksDataset",
    "source_code": "class _ListSnapshotChunksDataset(dataset_ops.DatasetSource):\n\n    def __init__(self, snapshot_path: str):\n        self._snapshot_path = snapshot_path\n        variant_tensor = ged_ops.list_snapshot_chunks_dataset(snapshot_path, **self._flat_structure)\n        super().__init__(variant_tensor)\n\n    @property\n    def element_spec(self) -> tensor_spec.TensorSpec:\n        return tensor_spec.TensorSpec([], dtypes.string)",
    "docstring": "A dataset for listing snapshot chunk files. It supports listing partially written snapshots. When a snapshot is being written, it returns the currently available chunk files.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\load_op.py",
    "ast_data": "ClassDef name:_ListSnapshotChunksDataset FunctionDef name:__init__ arg:self arg:snapshot_path arguments arg arg Assign Assign Call Call Call FunctionDef name:element_spec arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "cat",
    "source_code": "@_onnx_symbolic('aten::cat')\n@symbolic_helper.parse_args('v', 'i')\ndef cat(g: jit_utils.GraphContext, tensor_list, dim):\n    tensors = symbolic_helper._unpack_list(tensor_list)\n    nonempty_tensors = []\n    for t in tensors:\n        if symbolic_helper._is_constant(t) and (not symbolic_helper._get_tensor_dim_size(t, 0)):\n            continue\n        nonempty_tensors.append(t)\n    assert len(nonempty_tensors) > 0\n    assert all((symbolic_helper._get_tensor_rank(nonempty_tensors[0]) is None or symbolic_helper._get_tensor_rank(t) is None or symbolic_helper._get_tensor_rank(t) == symbolic_helper._get_tensor_rank(nonempty_tensors[0]) for t in nonempty_tensors))\n    tensor_list.node().removeAllInputs()\n    for t in nonempty_tensors:\n        tensor_list.node().addInput(t)\n    tensors = symbolic_helper._unpack_list(tensor_list)\n    return g.op('Concat', *tensors, axis_i=dim)",
    "docstring": "Implement concatenation of pytorch tensors in ONNX along the specified dimension. Parameters: g (jit_utils.GraphContext): Graph context. tensor_list (List[torch.Tensor]): List of tensors to concatenate. dim (int): Dimension along which to concatenate the tensors. Returns: ONNX graph node representing the concatenated tensor.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\symbolic_opset9.py",
    "ast_data": "FunctionDef name:cat arg:g arg:tensor_list arg:dim arguments arg arg arg Assign Call Assign For If BoolOp Call Call Call Compare Call Call BoolOp Compare Call Compare Call Compare Call Call Call Call For Call Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "AllocFromPoolLine",
    "source_code": "@dataclasses.dataclass\nclass AllocFromPoolLine(PoolMemoryPlanningLine):\n    is_first_pool_usage: bool = False\n\n    def codegen(self, code: IndentedBuffer):\n        allocation = self.group.allocation\n        assert allocation and allocation.pool\n        pool = allocation.pool\n        name = self.node.get_name()\n        if self.is_first_pool_usage:\n            pool.codegen_create(self.wrapper, code)\n        pool.names_to_del.extend(self.group.names)\n        alloc_from_pool = allocation.codegen_alloc_from_pool(self.wrapper)\n        if alloc_from_pool in pool.creation_cache:\n            code.writeline(self.wrapper.make_tensor_alias(name, pool.creation_cache[alloc_from_pool], 'alloc'))\n        else:\n            pool.creation_cache[alloc_from_pool] = name\n            code.writeline(f'{self.wrapper.declare}{name} = {alloc_from_pool}{self.wrapper.ending}')",
    "docstring": "Similar to AllocationLine, but takes memory from a pool",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\memory_planning.py",
    "ast_data": "ClassDef name:AllocFromPoolLine FunctionDef name:codegen arg:self arg:code arguments arg arg Assign BoolOp Assign Assign Call If Call Call Assign Call If Compare Call Call Assign Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit_transform",
    "source_code": "@_fit_context(prefer_skip_nested_validation=False)\ndef fit_transform(self, X, y=None):\n    self._fit_transform(X)\n    return self.embedding_",
    "docstring": "Fit the model from data in X and transform X. Parameters ---------- X : {array-like, sparse matrix, BallTree, KDTree} Training vector, where is the number of samples and is the number of features. y : Ignored Not used, present for API consistency by convention. Returns ------- X_new : array-like, shape (n_samples, n_components) X transformed in the new space.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\manifold\\_isomap.py",
    "ast_data": "FunctionDef name:fit_transform arg:self arg:X arg:y arguments arg arg arg Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_metadata_routing",
    "source_code": "def get_metadata_routing(self):\n    router = MetadataRouter(owner=self.__class__.__name__).add(estimator=self.estimator, method_mapping=MethodMapping().add(caller='fit', callee='fit').add(caller='predict', callee='predict').add(caller='score', callee='score'))\n    return router",
    "docstring": "Get metadata routing of this object. Please check :ref: on how the routing mechanism works. .. versionadded:: 1.6 Returns ------- routing : MetadataRouter A :class: encapsulating routing information.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_selection\\_rfe.py",
    "ast_data": "FunctionDef name:get_metadata_routing arg:self arguments arg Assign Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "get_names_flat",
    "source_code": "def get_names_flat(adtype):\n    listnames = []\n    names = adtype.names\n    for name in names:\n        listnames.append(name)\n        current = adtype[name]\n        if current.names is not None:\n            listnames.extend(get_names_flat(current))\n    return tuple(listnames)",
    "docstring": "Returns the field names of the input datatype as a tuple. Input datatype must have fields otherwise error is raised. Nested structure are flattened beforehand. Parameters ---------- adtype : dtype Input datatype Examples -------- >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> rfn.get_names_flat(np.empty((1,), dtype=[('A', int)]).dtype) is None False >>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', str)]).dtype) ('A', 'B') >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])]) >>> rfn.get_names_flat(adtype) ('a', 'b', 'ba', 'bb')",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\recfunctions.py",
    "ast_data": "FunctionDef name:get_names_flat arg:adtype arguments arg Assign Assign For Call Assign If Compare Call Call Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "enqueue_request",
    "source_code": "def enqueue_request(self, request: Request) -> bool:\n    if not request.dont_filter and self.df.request_seen(request):\n        self.df.log(request, self.spider)\n        return False\n    dqok = self._dqpush(request)\n    assert self.stats is not None\n    if dqok:\n        self.stats.inc_value('scheduler/enqueued/disk', spider=self.spider)\n    else:\n        self._mqpush(request)\n        self.stats.inc_value('scheduler/enqueued/memory', spider=self.spider)\n    self.stats.inc_value('scheduler/enqueued', spider=self.spider)\n    return True",
    "docstring": "Unless the received request is filtered out by the Dupefilter, attempt to push it into the disk queue, falling back to pushing it into the memory queue. Increment the appropriate stats, such as: `` otherwise.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\scheduler.py",
    "ast_data": "FunctionDef name:enqueue_request arg:self arg:request arguments arg arg If BoolOp Call Call Return return:yes Assign Call Compare If Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, *, verbose: bool=False, dump: bool=False, artifacts_dir: str | os.PathLike='.', timestamp: str | None=None):\n    self._verbose_print = _verbose_printer(verbose)\n    self._dump = dump\n    self._artifacts_dir = pathlib.Path(artifacts_dir)\n    self._timestamp = timestamp or datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S-%f')\n    self._exception: Exception | None = None",
    "docstring": "Initialize the strategy. Args: verbose: Whether to print verbose messages. dump: Whether to dump the intermediate artifacts to a file.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_capture_strategies.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg arg arg arg arg Assign Call Assign Assign Call Assign BoolOp Call Call"
  },
  {
    "library": "pytorch",
    "name": "mish",
    "source_code": "@register_decomposition(aten.mish)\n@_inplace_wrapper\n@out_wrapper()\n@elementwise_type_promotion_wrapper(type_promoting_args=('a',), type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT)\ndef mish(a: TensorLikeType, inplace: bool=False) -> TensorLikeType:\n    if inplace:\n        raise NotImplementedError\n    return a * torch.tanh(torch.nn.functional.softplus(a))",
    "docstring": "Reference implementation of torch.nn.functional.mish",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\nn\\functional\\__init__.py",
    "ast_data": "FunctionDef name:mish arg:a arg:inplace arguments arg arg If Raise Return return:yes Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "ScopedTFFunction",
    "source_code": "class ScopedTFFunction(UniquePtr):\n\n    def __init__(self, func, name):\n        super(ScopedTFFunction, self).__init__(name=name, obj=func, deleter=c_api.TF_DeleteFunction)",
    "docstring": "Wrapper around TF_Function that handles deletion.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\c_api_util.py",
    "ast_data": "ClassDef name:ScopedTFFunction FunctionDef name:__init__ arg:self arg:func arg:name arguments arg arg arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "add_constraint",
    "source_code": "def add_constraint(self, constraint: Callable):\n    self.constraints.append(constraint)\n    self._validated = False",
    "docstring": "Adds a constraint into the current list of constraints.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\passes\\infra\\pass_manager.py",
    "ast_data": "FunctionDef name:add_constraint arg:self arg:constraint arguments arg arg Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "RepeatedTimer",
    "source_code": "class RepeatedTimer(object):\n\n    def __init__(self, interval, function, *args):\n        self._timer = None\n        self.interval = interval\n        self.function = function\n        self.args = args\n        self.start_time = time.time()\n        self.is_running = False\n        self.start()\n\n    def _get_duration_sec(self):\n        return int(time.time() - self.start_time)\n\n    def _run(self):\n        self.is_running = False\n        self.start()\n        self.function(*self.args)\n\n    def start(self):\n        if not self.is_running:\n            self._timer = threading.Timer(self.interval, self._run)\n            self._timer.start()\n            self.is_running = True\n\n    def stop(self):\n        duration = self._get_duration_sec()\n        self._timer.cancel()\n        self.is_running = False\n        return duration",
    "docstring": "Threaded Repeated Timer from",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\utils.py",
    "ast_data": "ClassDef name:RepeatedTimer FunctionDef name:__init__ arg:self arg:interval arg:function arguments arg arg arg arg Assign Assign Assign Assign Assign Call Assign Call FunctionDef name:_get_duration_sec arg:self arguments arg Return return:yes Call Call FunctionDef name:_run arg:self arguments arg Assign Call Call FunctionDef name:start arg:self arguments arg If Assign Call Call Assign FunctionDef name:stop arg:self arguments arg Assign Call Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "validate_func_kwargs",
    "source_code": "def validate_func_kwargs(kwargs: dict) -> tuple[list[str], list[str | Callable[..., Any]]]:\n    tuple_given_message = 'func is expected but received {} in **kwargs.'\n    columns = list(kwargs)\n    func = []\n    for col_func in kwargs.values():\n        if not (isinstance(col_func, str) or callable(col_func)):\n            raise TypeError(tuple_given_message.format(type(col_func).__name__))\n        func.append(col_func)\n    if not columns:\n        no_arg_message = \"Must provide 'func' or named aggregation **kwargs.\"\n        raise TypeError(no_arg_message)\n    return (columns, func)",
    "docstring": "Validates types of user-provided \"named aggregation\" kwargs. is raised if aggfunc is not or callable. Parameters ---------- kwargs : dict Returns ------- columns : List[str] List of user-provided keys. func : List[Union[str, callable[...,Any]]] List of user-provided aggfuncs Examples -------- >>> validate_func_kwargs({\"one\": \"min\", \"two\": \"max\"}) (['one', 'two'], ['min', 'max'])",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\apply.py",
    "ast_data": "FunctionDef name:validate_func_kwargs arg:kwargs arguments arg Assign Assign Call Assign For Call If BoolOp Call Call Raise Call Call Call Call If Assign Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "upsample_nearest",
    "source_code": "def upsample_nearest(input, size=None, scale_factor=None):\n    warnings.warn('nn.quantized.functional.upsample_nearest is deprecated. Use nn.quantized.functional.interpolate instead.')\n    return interpolate(input, size, scale_factor, mode='nearest')",
    "docstring": "Upsamples the input, using nearest neighbours' pixel values. .. warning:: This function is deprecated in favor of :func:. This is equivalent with ``. .. note:: The input quantization parameters propagate to the output. .. note:: Only 2D inputs are supported Args: input (Tensor): quantized input size (int or Tuple[int, int] or Tuple[int, int, int]): output spatial size. scale_factor (int): multiplier for spatial size. Has to be an integer.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\functional.py",
    "ast_data": "FunctionDef name:upsample_nearest arg:input arg:size arg:scale_factor arguments arg arg arg Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "real_if_close",
    "source_code": "@array_function_dispatch(_real_if_close_dispatcher)\ndef real_if_close(a, tol=100):\n    a = asanyarray(a)\n    type_ = a.dtype.type\n    if not issubclass(type_, _nx.complexfloating):\n        return a\n    if tol > 1:\n        f = getlimits.finfo(type_)\n        tol = f.eps * tol\n    if _nx.all(_nx.absolute(a.imag) < tol):\n        a = a.real\n    return a",
    "docstring": "If input is complex with all imaginary parts close to zero, return real parts. \"Close to zero\" is defined as * (machine epsilon of the type for ). Parameters ---------- a : array_like Input array. tol : float Tolerance in machine epsilons for the complex part of the elements in the array. If the tolerance is >> import numpy as np >>> np.finfo(float).eps 2.2204460492503131e-16 # may vary >>> np.real_if_close([2.1 + 4e-14j, 5.2 + 3e-15j], tol=1000) array([2.1, 5.2]) >>> np.real_if_close([2.1 + 4e-13j, 5.2 + 3e-15j], tol=1000) array([2.1+4.e-13j, 5.2 + 3e-15j])",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_type_check_impl.py",
    "ast_data": "FunctionDef name:real_if_close arg:a arg:tol arguments arg arg Assign Call Assign If Call Return return:yes If Compare Assign Call Assign If Call Compare Call Assign Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "def fit(self, X, y=None):\n    return self",
    "docstring": "Do nothing and return the estimator unchanged. This method is just there to implement the usual API and hence work in pipelines. Parameters ---------- X : Ignored Not used, present for API consistency by convention. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns the instance itself.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_dict_learning.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_asset_path_from_tensor",
    "source_code": "def _asset_path_from_tensor(path_tensor):\n    if not isinstance(path_tensor, tensor.Tensor):\n        raise TypeError(f'Asset path tensor {path_tensor} must be a Tensor.')\n    if path_tensor.op.type != 'Const':\n        raise TypeError(f'Asset path tensor {path_tensor} must be of type constant.Has type {path_tensor.op.type} instead.')\n    if path_tensor.dtype != dtypes.string:\n        raise TypeError(f'Asset path tensor {path_tensor}` must be of dtype string.Has type {path_tensor.dtype} instead.')\n    str_values = path_tensor.op.get_attr('value').string_val\n    if len(str_values) != 1:\n        raise TypeError(f'Asset path tensor {path_tensor} must be a scalar.')\n    return str_values[0]",
    "docstring": "Returns the filepath value stored in constant . Args: path_tensor: Tensor of a file-path. Returns: The string value i.e. path of the tensor, if valid. Raises: TypeError if tensor does not match expected op type, dtype or value.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\builder_impl.py",
    "ast_data": "FunctionDef name:_asset_path_from_tensor arg:path_tensor arguments arg If Call Raise Call If Compare Raise Call If Compare Raise Call Assign Call If Compare Call Raise Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "_load_base",
    "source_code": "def _load_base(self) -> list[RGBColor]:\n    return {'autumn': cm_data.get_autumn_base, 'bone': cm_data.get_bone_base, 'jet': cm_data.get_jet_base, 'winter': cm_data.get_winter_base, 'rainbow': cm_data.get_rainbow_base, 'ocean': cm_data.get_ocean_base, 'summer': cm_data.get_summer_base, 'spring': cm_data.get_spring_base, 'cool': cm_data.get_cool_base, 'hsv': cm_data.get_hsv_base, 'brg': cm_data.get_bgr_base, 'pink': cm_data.get_pink_base, 'hot': cm_data.get_hot_base, 'plasma': cm_data.get_plasma_base, 'viridis': cm_data.get_viridis_base, 'cividis': cm_data.get_cividis_base, 'twilight': cm_data.get_twilight_base, 'turbo': cm_data.get_turbo_base, 'seismic': cm_data.get_seismic_base}[self.name]()",
    "docstring": "Load the base colormap corresponding to the enumeration member. Returns: The base colormap.",
    "type": "method",
    "file_path": "kornia\\kornia\\color\\colormap.py",
    "ast_data": "FunctionDef name:_load_base arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, fp, length, maxbytes, bufsize=DEFAULT_BUFFER_SIZE, has_trailers=False):\n    self.fp = fp\n    self.length = length\n    self.maxbytes = maxbytes\n    self.buffer = b''\n    self.bufsize = bufsize\n    self.bytes_read = 0\n    self.done = False\n    self.has_trailers = has_trailers",
    "docstring": "Initialize buffered file handle reader.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpreqbody.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:fp arg:length arg:maxbytes arg:bufsize arg:has_trailers arguments arg arg arg arg arg arg Assign Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "scipy",
    "name": "redrat",
    "source_code": "def redrat(ared, pred, rshrink):\n    if DEBUGGING:\n        assert rshrink >= 0\n    if np.isnan(ared):\n        ratio = -REALMAX\n    elif np.isnan(pred) or pred <= 0:\n        if ared > 0:\n            ratio = rshrink / 2\n        else:\n            ratio = -REALMAX\n    elif np.isposinf(pred) and np.isposinf(ared):\n        ratio = 1\n    elif np.isposinf(pred) and np.isneginf(ared):\n        ratio = -REALMAX\n    else:\n        ratio = ared / pred\n    if DEBUGGING:\n        assert not np.isnan(ratio)\n    return ratio",
    "docstring": "This function evaluates the reduction ratio of a trust-region step, handling inf/nan properly.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\pyprima\\pyprima\\src\\pyprima\\common\\ratio.py",
    "ast_data": "FunctionDef name:redrat arg:ared arg:pred arg:rshrink arguments arg arg arg If Compare If Call Assign If BoolOp Call Compare If Compare Assign Assign If BoolOp Call Call Assign If BoolOp Call Call Assign Assign If Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_math_fontfamily",
    "source_code": "def set_math_fontfamily(self, fontfamily):\n    self._fontproperties.set_math_fontfamily(fontfamily)",
    "docstring": "Set the font family for math text rendered by Matplotlib. This does only affect Matplotlib's own math renderer. It has no effect when rendering with TeX (`default matplotlibrc file `. See Also -------- get_math_fontfamily",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:set_math_fontfamily arg:self arg:fontfamily arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "get_extra_inputs",
    "source_code": "def get_extra_inputs():\n    g = ops.get_default_graph()\n    if isinstance(g, _FuncGraph):\n        return g.extra_inputs\n    else:\n        return []",
    "docstring": "Returns the captured input tensors by the function. Returns: If the default graph is being used to define a function, the returned list of tensors are those accessed inside the function body but defined outside the function body so far. Otherwise, returns an empty list.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\function.py",
    "ast_data": "FunctionDef name:get_extra_inputs arguments Assign Call If Call Return return:yes Return return:no"
  },
  {
    "library": "matplotlib",
    "name": "pickable",
    "source_code": "def pickable(self):\n    return self.get_figure(root=False) is not None and self._picker is not None",
    "docstring": "Return whether the artist is pickable. See Also -------- .Artist.set_picker, .Artist.get_picker, .Artist.pick",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:pickable arg:self arguments arg Return return:yes BoolOp Compare Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "matvec",
    "source_code": "def matvec(self, x, adjoint=False, name='matvec'):\n    with self._name_scope(name):\n        block_dimensions = self._block_range_dimensions() if adjoint else self._block_domain_dimensions()\n        if linear_operator_util.arg_is_blockwise(block_dimensions, x, -1):\n            for i, block in enumerate(x):\n                if not isinstance(block, linear_operator.LinearOperator):\n                    block = tensor_conversion.convert_to_tensor_v2_with_dispatch(block)\n                    self._check_input_dtype(block)\n                    block_dimensions[i].assert_is_compatible_with(block.shape[-1])\n                    x[i] = block\n            x_mat = [block[..., array_ops.newaxis] for block in x]\n            y_mat = self.matmul(x_mat, adjoint=adjoint)\n            return [array_ops.squeeze(y, axis=-1) for y in y_mat]\n        x = tensor_conversion.convert_to_tensor_v2_with_dispatch(x, name='x')\n        self._check_input_dtype(x)\n        op_dimension = self.range_dimension if adjoint else self.domain_dimension\n        op_dimension.assert_is_compatible_with(x.shape[-1])\n        x_mat = x[..., array_ops.newaxis]\n        y_mat = self.matmul(x_mat, adjoint=adjoint)\n        return array_ops.squeeze(y_mat, axis=-1)",
    "docstring": "Transform [batch] vector with left multiplication: . Args: x: with compatible shape and same as , or an iterable of s. s are treated a [batch] vectors, meaning for every set of leading dimensions, the last dimension defines a vector. See class docstring for definition of compatibility. adjoint: Python . If , left multiply by the adjoint: . name: A name for this . Returns: A with shape and same as .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_block_lower_triangular.py",
    "ast_data": "FunctionDef name:matvec arg:self arg:x arg:adjoint arg:name arguments arg arg arg arg With Call Assign Call Call If Call For Call If Call Assign Call Call Call Assign Assign Assign Call Return return:yes Call Assign Call Call Assign Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "sparse_slice",
    "source_code": "@tf_export('sparse.slice', v1=['sparse.slice', 'sparse_slice'])\n@deprecation.deprecated_endpoints('sparse_slice')\ndef sparse_slice(sp_input, start, size, name=None):\n    sp_input = _convert_to_sparse_tensor(sp_input)\n    start = ops.convert_to_tensor(start, dtypes.int64)\n    size = ops.convert_to_tensor(size, dtypes.int64)\n    with ops.name_scope(name, 'SparseSlice', [sp_input]) as name:\n        output_indices, output_values, output_shape = gen_sparse_ops.sparse_slice(sp_input.indices, sp_input.values, sp_input.dense_shape, start, size, name=name)\n        return sparse_tensor.SparseTensor(output_indices, output_values, output_shape)",
    "docstring": "Slice a based on the and . For example, if the input is input_tensor = shape = [2, 7] [ a d e ] [b c ] Graphically the output tensors are: sparse.slice([0, 0], [2, 4]) = shape = [2, 4] [ a ] [b c ] sparse.slice([0, 4], [2, 3]) = shape = [2, 3] [ d e ] [ ] Args: sp_input: The to split. start: 1-D. tensor represents the start of the slice. size: 1-D. tensor represents the size of the slice. name: A name for the operation (optional). Returns: A objects resulting from splicing. Raises: TypeError: If is not a .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_ops.py",
    "ast_data": "FunctionDef name:sparse_slice arg:sp_input arg:start arg:size arg:name arguments arg arg arg arg Assign Call Assign Call Assign Call With Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "home",
    "source_code": "def home(self):\n    self.views[self.figure].home()\n    self.positions[self.figure].home()",
    "docstring": "Recall the first view and position from the stack.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py",
    "ast_data": "FunctionDef name:home arg:self arguments arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "check_table_dtypes",
    "source_code": "def check_table_dtypes(table, key_dtype, value_dtype):\n    if key_dtype.base_dtype != table.key_dtype:\n        raise TypeError(f'Invalid key dtype for table, expected {table.key_dtype} but got {key_dtype}.')\n    if value_dtype.base_dtype != table.value_dtype:\n        raise TypeError(f'Invalid value dtype for table, expected {table.value_dtype} but got {value_dtype}.')",
    "docstring": "Check that the given key_dtype and value_dtype matches the table dtypes. Args: table: The table to check types against to. key_dtype: The key data type to check. value_dtype: The value data type to check. Raises: TypeError: when 'key_dtype' or 'value_dtype' doesn't match the table data types.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "FunctionDef name:check_table_dtypes arg:table arg:key_dtype arg:value_dtype arguments arg arg arg If Compare Raise Call If Compare Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "WhileCondFuncGraph",
    "source_code": "class WhileCondFuncGraph(ControlFlowFuncGraph):\n    pass",
    "docstring": "FuncGraph for the condition of tf.while_loop(). This is used to distinguish while conditions from other functions.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_v2_func_graphs.py",
    "ast_data": "ClassDef name:WhileCondFuncGraph"
  },
  {
    "library": "kornia",
    "name": "linear_rgb_to_rgb",
    "source_code": "def linear_rgb_to_rgb(image: Tensor) -> Tensor:\n    if not isinstance(image, Tensor):\n        raise TypeError(f'Input type is not a Tensor. Got {type(image)}')\n    if len(image.shape) < 3 or image.shape[-3] != 3:\n        raise ValueError(f'Input size must have a shape of (*, 3, H, W).Got {image.shape}')\n    threshold = 0.0031308\n    rgb: Tensor = torch.where(image > threshold, 1.055 * torch.pow(image.clamp(min=threshold), 1 / 2.4) - 0.055, 12.92 * image)\n    return rgb",
    "docstring": "Convert a linear RGB image to sRGB. Used in colorspace conversions. Args: image: linear RGB Image to be converted to sRGB of shape :math:. Returns: sRGB version of the image with shape of shape :math:. Example: >>> input = torch.rand(2, 3, 4, 5) >>> output = linear_rgb_to_rgb(input) # 2x3x4x5",
    "type": "function",
    "file_path": "kornia\\kornia\\color\\rgb.py",
    "ast_data": "FunctionDef name:linear_rgb_to_rgb arg:image arguments arg If Call Raise Call Call If BoolOp Compare Call Compare Raise Call Assign Call Compare Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, dtype, shape, accumulator_ref):\n    self._dtype = dtype\n    if shape is not None:\n        self._shape = tensor_shape.TensorShape(shape)\n    else:\n        self._shape = tensor_shape.unknown_shape()\n    self._accumulator_ref = accumulator_ref\n    if context.executing_eagerly():\n        self._name = context.context().scope_name\n    else:\n        self._name = self._accumulator_ref.op.name.split('/')[-1]",
    "docstring": "Creates a new ConditionalAccumulator. Args: dtype: Datatype of the accumulated gradients. shape: Shape of the accumulated gradients. accumulator_ref: A handle to the conditional accumulator, created by sub- classes",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:dtype arg:shape arg:accumulator_ref arguments arg arg arg arg Assign If Compare Assign Call Assign Call Assign If Call Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "get_asset_tensors",
    "source_code": "def get_asset_tensors(export_dir, meta_graph_def_to_load, import_scope=None):\n    collection_def = meta_graph_def_to_load.collection_def\n    asset_tensor_dict = {}\n    asset_protos = []\n    if meta_graph_def_to_load.asset_file_def:\n        asset_protos = meta_graph_def_to_load.asset_file_def\n    elif constants.ASSETS_KEY in collection_def:\n        assets_any_proto = collection_def[constants.ASSETS_KEY].any_list.value\n        for asset_any_proto in assets_any_proto:\n            asset_proto = meta_graph_pb2.AssetFileDef()\n            asset_any_proto.Unpack(asset_proto)\n            asset_protos.append(asset_proto)\n    assets_directory = file_io.join(compat.as_bytes(export_dir), compat.as_bytes(constants.ASSETS_DIRECTORY))\n    for asset_proto in asset_protos:\n        tensor_name = asset_proto.tensor_info.name\n        if import_scope:\n            tensor_name = '%s/%s' % (import_scope, tensor_name)\n        asset_tensor_dict[tensor_name] = file_io.join(compat.as_bytes(assets_directory), compat.as_bytes(asset_proto.filename))\n    return asset_tensor_dict",
    "docstring": "Gets the asset tensors, if defined in the meta graph def to load. Args: export_dir: Directory where the SavedModel is located. meta_graph_def_to_load: The meta graph def from the SavedModel to be loaded. import_scope: Optional -- if specified, prepend this followed by '/' to all returned asset tensor names. Returns: A dictionary of asset tensors, keyed by the name of the asset tensor. The value in the map corresponds to the absolute path of the asset file.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\loader_impl.py",
    "ast_data": "FunctionDef name:get_asset_tensors arg:export_dir arg:meta_graph_def_to_load arg:import_scope arguments arg arg arg Assign Assign Assign If Assign If Compare Assign For Assign Call Call Call Assign Call Call Call For Assign If Assign Assign Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "select_format",
    "source_code": "def select_format(self, compiler, sql, params):\n    return (sql, params)",
    "docstring": "Custom format for select clauses. For example, GIS columns need to be selected as AsText(table.col) on MySQL as the table.col data can't be used by Django.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\__init__.py",
    "ast_data": "FunctionDef name:select_format arg:self arg:compiler arg:sql arg:params arguments arg arg arg arg Return return:yes"
  },
  {
    "library": "django",
    "name": "body_contains",
    "source_code": "def body_contains(self, text):\n    if text not in self.body:\n        return False\n    for content, mimetype in self.alternatives:\n        if mimetype.startswith('text/') and text not in content:\n            return False\n    return True",
    "docstring": "Checks that `` occurs in the email body and in all attached MIME type text/* alternatives.",
    "type": "method",
    "file_path": "django\\django\\core\\mail\\message.py",
    "ast_data": "FunctionDef name:body_contains arg:self arg:text arguments arg arg If Compare Return return:yes For If BoolOp Call Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "initialize_all_variables",
    "source_code": "@tf_export(v1=['initialize_all_variables'])\n@tf_should_use.should_use_result\n@deprecated('2017-03-02', 'Use `tf.global_variables_initializer` instead.')\ndef initialize_all_variables():\n    return global_variables_initializer()",
    "docstring": "See .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:initialize_all_variables arguments Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "derivative",
    "source_code": "def derivative(self, nu=1):\n    if nu < 0:\n        return self.antiderivative(-nu)\n    if nu == 0:\n        c2 = self.c.copy()\n    else:\n        c2 = self.c[:-nu, :].copy()\n    if c2.shape[0] == 0:\n        c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)\n    factor = spec.poch(np.arange(c2.shape[0], 0, -1), nu)\n    c2 *= factor[(slice(None),) + (None,) * (c2.ndim - 1)]\n    return self.construct_fast(c2, self.x, self.extrapolate, self.axis)",
    "docstring": "Construct a new piecewise polynomial representing the derivative. Parameters ---------- nu : int, optional Order of derivative to evaluate. Default is 1, i.e., compute the first derivative. If negative, the antiderivative is returned. Returns ------- pp : PPoly Piecewise polynomial of order k2 = k - n representing the derivative of this polynomial. Notes ----- Derivatives are evaluated piecewise for each polynomial segment, even if the polynomial is not differentiable at the breakpoints. The polynomial intervals are considered half-open, ``.",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_interpolate.py",
    "ast_data": "FunctionDef name:derivative arg:self arg:nu arguments arg arg If Compare Return return:yes Call If Compare Assign Call Assign Call If Compare Assign Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_parse_latex_table_wrapping",
    "source_code": "def _parse_latex_table_wrapping(table_styles: CSSStyles, caption: str | None) -> bool:\n    IGNORED_WRAPPERS = ['toprule', 'midrule', 'bottomrule', 'column_format']\n    return table_styles is not None and any((d['selector'] not in IGNORED_WRAPPERS for d in table_styles)) or caption is not None",
    "docstring": "Indicate whether LaTeX {tabular} should be wrapped with a {table} environment. Parses the and detects any selectors which must be included outside of {tabular}, i.e. indicating that wrapping must occur, and therefore return True, or if a caption exists and requires similar.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\formats\\style_render.py",
    "ast_data": "FunctionDef name:_parse_latex_table_wrapping arg:table_styles arg:caption arguments arg arg Assign Return return:yes BoolOp BoolOp Compare Call Compare Compare"
  },
  {
    "library": "matplotlib",
    "name": "shrunk_to_aspect",
    "source_code": "def shrunk_to_aspect(self, box_aspect, container=None, fig_aspect=1.0):\n    if box_aspect <= 0 or fig_aspect <= 0:\n        raise ValueError(\"'box_aspect' and 'fig_aspect' must be positive\")\n    if container is None:\n        container = self\n    w, h = container.size\n    H = w * box_aspect / fig_aspect\n    if H <= h:\n        W = w\n    else:\n        W = h * fig_aspect / box_aspect\n        H = h\n    return Bbox([self._points[0], self._points[0] + (W, H)])",
    "docstring": "Return a copy of the , shrunk so that it is as large as it can be while having the desired aspect ratio, *box_aspect*. If the box coordinates are relative (i.e. fractions of a larger box such as a figure) then the physical aspect ratio of that figure is specified with *fig_aspect*, so that *box_aspect* can also be given as a ratio of the absolute dimensions, not the relative dimensions.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:shrunk_to_aspect arg:self arg:box_aspect arg:container arg:fig_aspect arguments arg arg arg arg If BoolOp Compare Compare Raise Call If Compare Assign Assign Assign If Compare Assign Assign Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "Literal",
    "source_code": "class Literal(collections.namedtuple('Literal', ['value'])):\n\n    def __str__(self):\n        if isinstance(self.value, str):\n            return \"'{}'\".format(self.value)\n        return str(self.value)\n\n    def __repr__(self):\n        return str(self)",
    "docstring": "Represents a Python numeric literal.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\qual_names.py",
    "ast_data": "ClassDef name:Literal Call FunctionDef name:__str__ arg:self arguments arg If Call Return return:yes Call Return return:yes Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "ExactWeakKeyDictionary",
    "source_code": "class ExactWeakKeyDictionary:\n\n    def __init__(self):\n        self.values = {}\n        self.refs = {}\n\n    def __getitem__(self, key):\n        return self.values[id(key)]\n\n    def get(self, key, default=None):\n        return self.values.get(id(key), default)\n\n    def __contains__(self, key):\n        return id(key) in self.values\n\n    def __setitem__(self, key, value):\n        idx = id(key)\n        if idx not in self.refs:\n            self.refs[idx] = weakref.ref(key, lambda ref: self._remove_id(idx))\n        self.values[idx] = value\n\n    def _remove_id(self, idx):\n        if idx in self.values:\n            del self.values[idx]\n        if idx in self.refs:\n            del self.refs[idx]\n\n    def clear(self):\n        self.refs.clear()\n        self.values.clear()",
    "docstring": "Similar to weakref.WeakKeyDictionary, but use / rather than to compare equality",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\utils.py",
    "ast_data": "ClassDef name:ExactWeakKeyDictionary FunctionDef name:__init__ arg:self arguments arg Assign Assign FunctionDef name:__getitem__ arg:self arg:key arguments arg arg Return return:yes Call FunctionDef name:get arg:self arg:key arg:default arguments arg arg arg Return return:yes Call Call FunctionDef name:__contains__ arg:self arg:key arguments arg arg Return return:yes Compare Call FunctionDef name:__setitem__ arg:self arg:key arg:value arguments arg arg arg Assign Call If Compare Assign Call arguments arg Call Assign FunctionDef name:_remove_id arg:self arg:idx arguments arg arg If Compare If Compare FunctionDef name:clear arg:self arguments arg Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "BadDataFormat",
    "source_code": "class BadDataFormat(ArffException):\n\n    def __init__(self, value):\n        super().__init__()\n        self.message = 'Bad @DATA instance format in line %d: ' + '%s' % value",
    "docstring": "Error raised when some data instance is in an invalid format.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\externals\\_arff.py",
    "ast_data": "ClassDef name:BadDataFormat FunctionDef name:__init__ arg:self arg:value arguments arg arg Call Call Assign"
  },
  {
    "library": "scrapy",
    "name": "urljoin",
    "source_code": "def urljoin(self, url: str) -> str:\n    return urljoin(get_base_url(self), url)",
    "docstring": "Join this Response's url with a possible relative url to form an absolute interpretation of the latter.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\http\\response\\text.py",
    "ast_data": "FunctionDef name:urljoin arg:self arg:url arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "GZipMiddleware",
    "source_code": "class GZipMiddleware(MiddlewareMixin):\n    max_random_bytes = 100\n\n    def process_response(self, request, response):\n        if not response.streaming and len(response.content) < 200:\n            return response\n        if response.has_header('Content-Encoding'):\n            return response\n        patch_vary_headers(response, ('Accept-Encoding',))\n        ae = request.META.get('HTTP_ACCEPT_ENCODING', '')\n        if not re_accepts_gzip.search(ae):\n            return response\n        if response.streaming:\n            if response.is_async:\n                orignal_iterator = response.streaming_content\n\n                async def gzip_wrapper():\n                    async for chunk in orignal_iterator:\n                        yield compress_string(chunk, max_random_bytes=self.max_random_bytes)\n                response.streaming_content = gzip_wrapper()\n            else:\n                response.streaming_content = compress_sequence(response.streaming_content, max_random_bytes=self.max_random_bytes)\n            del response.headers['Content-Length']\n        else:\n            compressed_content = compress_string(response.content, max_random_bytes=self.max_random_bytes)\n            if len(compressed_content) >= len(response.content):\n                return response\n            response.content = compressed_content\n            response.headers['Content-Length'] = str(len(response.content))\n        etag = response.get('ETag')\n        if etag and etag.startswith('\"'):\n            response.headers['ETag'] = 'W/' + etag\n        response.headers['Content-Encoding'] = 'gzip'\n        return response",
    "docstring": "Compress content if the browser allows gzip compression. Set the Vary header accordingly, so that caches will base their storage on the Accept-Encoding header.",
    "type": "class",
    "file_path": "django\\django\\middleware\\gzip.py",
    "ast_data": "ClassDef name:GZipMiddleware Assign FunctionDef name:process_response arg:self arg:request arg:response arguments arg arg arg If BoolOp Compare Call Return return:yes If Call Return return:yes Call Assign Call If Call Return return:yes If If Assign AsyncFunctionDef name:gzip_wrapper arguments Call Assign Call Assign Call Assign Call If Compare Call Call Return return:yes Assign Assign Call Call Assign Call If BoolOp Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "enable_auto_cast_variables",
    "source_code": "class enable_auto_cast_variables(object):\n    __slots__ = ['_dtype', '_prev_dtype']\n\n    def __init__(self, dtype):\n        if dtype and (not dtype.is_floating):\n            dtype = None\n        self._dtype = dtype\n\n    def __enter__(self):\n        self._prev_dtype = getattr(_autocast_dtype, 'dtype', None)\n        _autocast_dtype.dtype = self._dtype\n\n    def __exit__(self, type_arg, value_arg, traceback_arg):\n        _autocast_dtype.dtype = self._prev_dtype",
    "docstring": "Context manager which enables the autocasting of s. Under this context manager, s will be cast to if is floating-point. Otherwise, s will not be cast.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\autocast_variable.py",
    "ast_data": "ClassDef name:enable_auto_cast_variables Assign FunctionDef name:__init__ arg:self arg:dtype arguments arg arg If BoolOp Assign Assign FunctionDef name:__enter__ arg:self arguments arg Assign Call Assign FunctionDef name:__exit__ arg:self arg:type_arg arg:value_arg arg:traceback_arg arguments arg arg arg arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "_compute_cosine_distance",
    "source_code": "@classmethod\ndef _compute_cosine_distance(cls, inputs, clusters, inputs_normalized=True):\n    output = []\n    if not inputs_normalized:\n        with ops.colocate_with(clusters, ignore_existing=True):\n            clusters = nn_impl.l2_normalize(clusters, axis=1)\n    for inp in inputs:\n        with ops.colocate_with(inp, ignore_existing=True):\n            if not inputs_normalized:\n                inp = nn_impl.l2_normalize(inp, axis=1)\n            output.append(1 - math_ops.matmul(inp, clusters, transpose_b=True))\n    return output",
    "docstring": "Computes cosine distance between each input and each cluster center. Args: inputs: list of input Tensor. clusters: cluster Tensor inputs_normalized: if True, it assumes that inp and clusters are normalized and computes the dot product which is equivalent to the cosine distance. Else it L2 normalizes the inputs first. Returns: list of Tensors, where each element corresponds to each element in inp. The value is the distance of each row to all the cluster centers.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\clustering_ops.py",
    "ast_data": "FunctionDef name:_compute_cosine_distance arg:cls arg:inputs arg:clusters arg:inputs_normalized arguments arg arg arg arg Assign If With Call Assign Call For With Call If Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "transform_all_constraints",
    "source_code": "def transform_all_constraints(traced, counter=0):\n    dimension_dict = {}\n    generator = ConstraintGenerator(traced)\n    new_constraints, counter = generator.generate_constraints(counter)\n    new_constraints, counter = iterate_till_fixed_point(new_constraints, counter)\n    transformed, counter = transform_to_z3(new_constraints, counter, dimension_dict)\n    return transformed",
    "docstring": "Given a trace, generates constraints and transforms them to z3 format",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\transform_to_z3.py",
    "ast_data": "FunctionDef name:transform_all_constraints arg:traced arg:counter arguments arg arg Assign Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "_load_all_namespaces",
    "source_code": "def _load_all_namespaces(resolver, parents=()):\n    url_patterns = getattr(resolver, 'url_patterns', [])\n    namespaces = [':'.join([*parents, url.namespace]) for url in url_patterns if getattr(url, 'namespace', None) is not None]\n    for pattern in url_patterns:\n        namespace = getattr(pattern, 'namespace', None)\n        current = parents\n        if namespace is not None:\n            current += (namespace,)\n        namespaces.extend(_load_all_namespaces(pattern, current))\n    return namespaces",
    "docstring": "Recursively load all namespaces from URL patterns.",
    "type": "function",
    "file_path": "django\\django\\core\\checks\\urls.py",
    "ast_data": "FunctionDef name:_load_all_namespaces arg:resolver arg:parents arguments arg arg Assign Call Assign Call Compare Call For Assign Call Assign If Compare Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "marginal",
    "source_code": "def marginal(self, dimensions):\n    dims = np.atleast_1d(dimensions)\n    if not np.issubdtype(dims.dtype, np.integer):\n        msg = 'Elements of `dimensions` must be integers - the indices of the marginal variables being retained.'\n        raise ValueError(msg)\n    n = len(self.dataset)\n    original_dims = dims.copy()\n    dims[dims < 0] = n + dims[dims < 0]\n    if len(np.unique(dims)) != len(dims):\n        msg = 'All elements of `dimensions` must be unique.'\n        raise ValueError(msg)\n    i_invalid = (dims < 0) | (dims >= n)\n    if np.any(i_invalid):\n        msg = f'Dimensions {original_dims[i_invalid]} are invalid for a distribution in {n} dimensions.'\n        raise ValueError(msg)\n    dataset = self.dataset[dims]\n    weights = self.weights\n    return gaussian_kde(dataset, bw_method=self.covariance_factor(), weights=weights)",
    "docstring": "Return a marginal KDE distribution Parameters ---------- dimensions : int or 1-d array_like The dimensions of the multivariate distribution corresponding with the marginal variables, that is, the indices of the dimensions that are being retained. The other dimensions are marginalized out. Returns ------- marginal_kde : gaussian_kde An object representing the marginal distribution. Notes ----- .. versionadded:: 1.10.0",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_kde.py",
    "ast_data": "FunctionDef name:marginal arg:self arg:dimensions arguments arg arg Assign Call If Call Assign Raise Call Assign Call Assign Call Assign Compare Compare If Compare Call Call Call Assign Raise Call Assign Compare Compare If Call Assign Raise Call Assign Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_old_init",
    "source_code": "@classmethod\ndef _old_init(cls, fields, shape, nrows, row_partitions, internal=False):\n    assert isinstance(fields, dict), fields\n    assert isinstance(shape, tensor_shape.TensorShape), shape\n    assert nrows is None or isinstance(nrows, tensor.Tensor), nrows\n    assert row_partitions is None or isinstance(row_partitions, tuple), row_partitions\n    return StructuredTensor(fields=fields, ragged_shape=_dynamic_ragged_shape_init(fields, shape, nrows, row_partitions))",
    "docstring": "Private constructor -- use factory methods to create StructuredTensors. This constructor builds a from the given attributes, performing minimal validation. Args: fields: A dictionary mapping from string to , , or . (This dict is not copied, so the caller must ensure that it does not get mutated via leaked references.) shape: with statically known rank. nrows: scalar integer , or if . row_partitions: tuple of s, with length . internal: ignored argument. Returns: a StructuredTensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor.py",
    "ast_data": "FunctionDef name:_old_init arg:cls arg:fields arg:shape arg:nrows arg:row_partitions arg:internal arguments arg arg arg arg arg arg Call Call BoolOp Compare Call BoolOp Compare Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_depthshade",
    "source_code": "def set_depthshade(self, depthshade, depthshade_minalpha=None):\n    if depthshade_minalpha is None:\n        depthshade_minalpha = rcParams['axes3d.depthshade_minalpha']\n    self._depthshade = depthshade\n    self._depthshade_minalpha = depthshade_minalpha\n    self.stale = True",
    "docstring": "Set whether depth shading is performed on collection members. Parameters ---------- depthshade : bool Whether to shade the patches in order to give the appearance of depth. depthshade_minalpha : float, default: None Sets the minimum alpha value used by depth-shading. If None, use the value from rcParams['axes3d.depthshade_minalpha']. .. versionadded:: 3.11",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py",
    "ast_data": "FunctionDef name:set_depthshade arg:self arg:depthshade arg:depthshade_minalpha arguments arg arg arg If Compare Assign Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "layer_norm",
    "source_code": "def layer_norm(input: Tensor, normalized_shape: ShapeType, weight: Optional[Tensor]=None, bias: Optional[Tensor]=None, eps: float=1e-05) -> Tensor:\n    return torch.native_layer_norm(input, normalized_shape, weight, bias, eps)[0]",
    "docstring": "Reference implementation of :func:.",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\nn\\functional\\__init__.py",
    "ast_data": "FunctionDef name:layer_norm arg:input arg:normalized_shape arg:weight arg:bias arg:eps arguments arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_duration_microseconds",
    "source_code": "def _get_duration_microseconds(start_time_seconds, end_time_seconds) -> int:\n    return max(int((end_time_seconds - start_time_seconds) * 1000000), 0)",
    "docstring": "Returns the duration between start and end time in microseconds.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\async_checkpoint.py",
    "ast_data": "FunctionDef name:_get_duration_microseconds arg:start_time_seconds arg:end_time_seconds arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "pending_warnings",
    "source_code": "@contextmanager\ndef pending_warnings() -> Iterator[logging.Handler]:\n    logger = logging.getLogger(NAMESPACE)\n    memhandler = MemoryHandler()\n    memhandler.setLevel(logging.WARNING)\n    try:\n        handlers = []\n        for handler in logger.handlers[:]:\n            if isinstance(handler, WarningStreamHandler):\n                logger.removeHandler(handler)\n                handlers.append(handler)\n        logger.addHandler(memhandler)\n        yield memhandler\n    finally:\n        logger.removeHandler(memhandler)\n        for handler in handlers:\n            logger.addHandler(handler)\n        memhandler.flushTo(logger)",
    "docstring": "Context manager to postpone logging warnings temporarily. Similar to :func:.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\logging.py",
    "ast_data": "FunctionDef name:pending_warnings arguments Assign Call Assign Call Call Try Assign For If Call Call Call Call Call For Call Call"
  },
  {
    "library": "pytorch",
    "name": "convert",
    "source_code": "def convert(self, module: nn.Module, mapping: Optional[dict[type[nn.Module], type[nn.Module]]]=None, inplace: bool=False, parameterization: type[nn.Module]=FakeSparsity):\n    if mapping is None:\n        raise NotImplementedError('Need to auto generate mapping ')\n    if not inplace:\n        module = copy.deepcopy(module)\n    reassign = {}\n    for name, mod in module.named_children():\n        if module_contains_param(mod, parameterization) and type_before_parametrizations(mod) in mapping:\n            reassign[name] = swap_module(mod, mapping)\n        else:\n            reassign[name] = self.convert(mod, mapping=mapping, inplace=True, parameterization=parameterization)\n    for key, value in reassign.items():\n        module._modules[key] = value\n    return module",
    "docstring": "Converts submodules in input module to a different module according to by calling method on the target module class Args: module: input module mapping: a dictionary that maps from source module type to target module type, can be overwritten to allow swapping user defined Modules inplace: carry out model transformations in-place, the original module is mutated",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\pruning\\sparsifier\\base_sparsifier.py",
    "ast_data": "FunctionDef name:convert arg:self arg:module arg:mapping arg:inplace arg:parameterization arguments arg arg arg arg arg If Compare Raise Call If Assign Call Assign For Call If BoolOp Call Compare Call Assign Call Assign Call For Call Assign Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "start",
    "source_code": "def start(self):\n    from flup.server.cgi import WSGIServer\n    self.cgiserver = WSGIServer(*self.args, **self.kwargs)\n    self.ready = True\n    self.cgiserver.run()",
    "docstring": "Start the CGI server.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\servers.py",
    "ast_data": "FunctionDef name:start arg:self arguments arg Assign Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "GradScaler",
    "source_code": "class GradScaler(torch.amp.GradScaler):\n\n    @deprecated(\"`torch.cuda.amp.GradScaler(args...)` is deprecated. Please use `torch.amp.GradScaler('cuda', args...)` instead.\", category=FutureWarning)\n    def __init__(self, init_scale: float=2.0 ** 16, growth_factor: float=2.0, backoff_factor: float=0.5, growth_interval: int=2000, enabled: bool=True) -> None:\n        super().__init__('cuda', init_scale=init_scale, growth_factor=growth_factor, backoff_factor=backoff_factor, growth_interval=growth_interval, enabled=enabled)",
    "docstring": "See :class:. `` instead.",
    "type": "class",
    "file_path": "pytorch\\torch\\cuda\\amp\\grad_scaler.py",
    "ast_data": "ClassDef name:GradScaler FunctionDef name:__init__ arg:self arg:init_scale arg:growth_factor arg:backoff_factor arg:growth_interval arg:enabled arguments arg arg arg arg arg arg Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "feed_output_shape",
    "source_code": "@property\ndef feed_output_shape(self):\n    if not self.has_feedable_training_target():\n        return None\n    if isinstance(self.loss_fn, losses.LossFunctionWrapper) and self.loss_fn.fn == losses.sparse_categorical_crossentropy or isinstance(self.loss_fn, losses.SparseCategoricalCrossentropy):\n        if backend.image_data_format() == 'channels_first':\n            return (self.shape[0], 1) + self.shape[2:]\n        else:\n            return self.shape[:-1] + (1,)\n    elif not isinstance(self.loss_fn, losses.Loss) or (isinstance(self.loss_fn, losses.LossFunctionWrapper) and getattr(losses, self.loss_fn.fn.__name__, None) is None):\n        return None\n    else:\n        return self.shape",
    "docstring": "The output shape for the feedable target.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py",
    "ast_data": "FunctionDef name:feed_output_shape arg:self arguments arg If Call Return return:no If BoolOp BoolOp Call Compare Call If Compare Call Return return:yes Return return:yes If BoolOp Call BoolOp Call Compare Call Return return:no Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "finish",
    "source_code": "def finish(self):\n    self.ax.axis([self.extent[0] - self.margin, self.extent[1] + self.margin, self.extent[2] - self.margin, self.extent[3] + self.margin])\n    self.ax.set_aspect('equal', adjustable='datalim')\n    return self.diagrams",
    "docstring": "Adjust the Axes and return a list of information about the Sankey subdiagram(s). Returns a list of subdiagrams with the following fields: ======== ============================================================= Field Description ======== ============================================================= *patch* Sankey outline (a ). *flows* Flow values (positive for input, negative for output). *angles* List of angles of the arrows [deg/90]. For example, if the diagram has not been rotated, an input to the top side has an angle of 3 (DOWN), and an output from the top side has an angle of 1 (UP). If a flow has been skipped (because its magnitude is less than *tolerance*), then its angle will be *None*. *tips* (N, 2)-array of the (x, y) positions of the tips (or \"dips\") of the flow paths. If the magnitude of a flow is less the *tolerance* of this instance, the flow is skipped and its tip will be at the center of the diagram. *text* instance for the diagram label. *texts* List of instances for the flow labels. ======== ============================================================= See Also -------- Sankey.add",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\sankey.py",
    "ast_data": "FunctionDef name:finish arg:self arguments arg Call Call Return return:yes"
  },
  {
    "library": "pygame",
    "name": "__init__",
    "source_code": "def __init__(self, *sprites, **kwargs):\n    LayeredUpdates.__init__(self, *sprites, **kwargs)\n    self._clip = None\n    self._use_update = False\n    self._time_threshold = 1000.0 / 80.0\n    self._bgd = None\n    for key, val in kwargs.items():\n        if key in ['_use_update', '_time_threshold', '_default_layer'] and hasattr(self, key):\n            setattr(self, key, val)",
    "docstring": "initialize group. pygame.sprite.LayeredDirty(*sprites, **kwargs): return LayeredDirty You can specify some additional attributes through kwargs: _use_update: True/False (default is False) _default_layer: default layer where the sprites without a layer are added _time_threshold: threshold time for switching between dirty rect mode and fullscreen mode; defaults to updating at 80 frames per second, which is equal to 1000.0 / 80.0",
    "type": "method",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg arg arg Call Assign Assign Assign Assign For Call If BoolOp Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "expand_inference_rule",
    "source_code": "@register_inference_rule('expand')\ndef expand_inference_rule(n: Node, symbols, constraints, counter):\n    assert isinstance(n.args[0], Node)\n    expand, counter = gen_tvar(counter)\n    symbols[n] = expand\n    e1 = symbols[n.args[0]]\n    e2, counter = gen_tvar(counter)\n    e2_nat_constraints = []\n    for arg in n.args[1:]:\n        assert isinstance(arg, (Node, int))\n        if isinstance(arg, Node):\n            assert isinstance(symbols[arg], DVar)\n            e2_nat_constraints.append(BinConstraintD(0, symbols[arg], op_leq))\n    e2_constraint = BinConstraintT(e2, TensorType([arg if isinstance(arg, int) else symbols[arg] for arg in n.args[1:]]), op_eq)\n    constraints, counter = gen_broadcasting_constraints(e1, e2, symbols, counter, expand)\n    dims, counter = gen_tensor_dims(len(n.args[1:]), counter)\n    nat_constraints = gen_nat_constraints(dims)\n    c = [BinConstraintT(expand, TensorType(dims), op_eq), *nat_constraints, e2_constraint, *e2_nat_constraints]\n    constraints += c\n    return (constraints, counter)",
    "docstring": "We generate the exact constraints as we do for tensor additions but we constraint the rank of this expression to be equal to len(n.args[1:]) so that only those cases get considered for the output",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_generator.py",
    "ast_data": "FunctionDef name:expand_inference_rule arg:n arg:symbols arg:constraints arg:counter arguments arg arg arg arg Call Assign Call Assign Assign Assign Call Assign For Call If Call Call Call Call Assign Call Call Call Assign Call Assign Call Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "or_",
    "source_code": "def or_(a, b):\n    a_val = a()\n    if tensor_util.is_tf_type(a_val):\n        return _tf_lazy_or(a_val, b)\n    return _py_lazy_or(a_val, b)",
    "docstring": "Functional form of \"or\". Uses lazy evaluation semantics.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\logical.py",
    "ast_data": "FunctionDef name:or_ arg:a arg:b arguments arg arg Assign Call If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "build_graph",
    "source_code": "def build_graph(device, dtype, data_format, input_shape, filter_shape, strides, padding, num_iters, warmup_iters):\n    with ops.device('/%s:0' % device):\n        inp = variable_v1.VariableV1(random_ops.truncated_normal(input_shape, dtype=dtype))\n        filt = variable_v1.VariableV1(random_ops.truncated_normal(filter_shape, dtype=dtype))\n        outputs = []\n        conv2d_op = nn_ops.conv2d(inp, filt, strides, padding, data_format=data_format)\n        outputs.append(conv2d_op)\n        for _ in range(1, num_iters):\n            with ops.control_dependencies([conv2d_op]):\n                conv2d_op = nn_ops.conv2d(inp, filt, strides, padding, data_format=data_format)\n                outputs.append(conv2d_op)\n        warmup_groups = []\n        warmup_conv2d_op = nn_ops.conv2d(inp, filt, strides, padding, data_format=data_format)\n        warmup_groups.append(warmup_conv2d_op)\n        for _ in range(1, warmup_iters):\n            with ops.control_dependencies([warmup_conv2d_op]):\n                warmup_conv2d_op = nn_ops.conv2d(inp, filt, strides, padding, data_format=data_format)\n                warmup_groups.append(warmup_conv2d_op)\n        return (control_flow_ops.group(*warmup_groups), control_flow_ops.group(*outputs))",
    "docstring": "builds a graph containing a sequence of conv2d operations. Args: device: String, the device to run on. dtype: Data type for the convolution. data_format: A string from: \"NHWC\" or \"NCHW\". Data format for input and output data. input_shape: Shape of the input tensor. filter_shape: Shape of the filter tensor. strides: A list of ints. 1-D of length 4. The stride of sliding window for each dimension of input. padding: A string from: \"SAME\", \"VALID\". The type of padding algorithm to use. num_iters: number of iterations to run conv2d. warmup_iters: number of iterations for warmup runs. Returns: An array of tensors to run()",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\conv2d_benchmark.py",
    "ast_data": "FunctionDef name:build_graph arg:device arg:dtype arg:data_format arg:input_shape arg:filter_shape arg:strides arg:padding arg:num_iters arg:warmup_iters arguments arg arg arg arg arg arg arg arg arg With Call Assign Call Call Assign Call Call Assign Assign Call Call For Call With Call Assign Call Call Assign Assign Call Call For Call With Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "set_device",
    "source_code": "def set_device(device: _device_t) -> None:\n    device = _get_device_index(device)\n    if device >= 0:\n        torch._C._accelerator_hooks_set_current_device(device)",
    "docstring": "Set the current device. Args: device (torch.device or int): selected device. This function is a no-op if this argument is negative.",
    "type": "function",
    "file_path": "pytorch\\torch\\mtia\\__init__.py",
    "ast_data": "FunctionDef name:set_device arg:device arguments arg Assign Call If Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "assert_has_rank",
    "source_code": "def assert_has_rank(self, rank):\n    if self.rank not in (None, rank):\n        raise ValueError('Shape %s must have rank %d' % (self, rank))",
    "docstring": "Raises an exception if is not compatible with the given . Args: rank: An integer. Raises: ValueError: If does not represent a shape with the given .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:assert_has_rank arg:self arg:rank arguments arg arg If Compare Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "reset",
    "source_code": "def reset(self, name=None):\n    if self._reader_ref.dtype == dtypes.resource:\n        return gen_io_ops.reader_reset_v2(self._reader_ref, name=name)\n    else:\n        return gen_io_ops.reader_reset(self._reader_ref, name=name)",
    "docstring": "Restore a reader to its initial clean state. Args: name: A name for the operation (optional). Returns: The created Operation.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\io_ops.py",
    "ast_data": "FunctionDef name:reset arg:self arg:name arguments arg arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_reverse_and_conj",
    "source_code": "def _reverse_and_conj(x, xp):\n    if not is_torch(xp):\n        reverse = (slice(None, None, -1),) * x.ndim\n        x_rev = x[reverse]\n    else:\n        x_rev = xp.flip(x)\n    if xp.isdtype(x.dtype, 'complex floating'):\n        return xp.conj(x_rev)\n    else:\n        return x_rev",
    "docstring": "Reverse array in all dimensions and perform the complex conjugate",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_signaltools.py",
    "ast_data": "FunctionDef name:_reverse_and_conj arg:x arg:xp arguments arg arg If Call Assign Call Assign Assign Call If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_children_str_at_line",
    "source_code": "def _children_str_at_line(self, line: int) -> str:\n    if self.upper_printer is None and self.lower_printer is None:\n        return ''\n    upper_total_rows = self.upper_printer._total_rows() if self.upper_printer else 1\n    lower_total_rows = self.lower_printer._total_rows() if self.lower_printer else 1\n    if 0 <= line < upper_total_rows:\n        return self.upper_printer._str_at_line(line) if self.upper_printer else '...'\n    elif upper_total_rows < line < upper_total_rows + lower_total_rows + 1:\n        return self.lower_printer._str_at_line(line - upper_total_rows - 1) if self.lower_printer else '...'\n    return ''",
    "docstring": "Get the string representation of the children at the given line. Recursively calls on children nodes.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\verification.py",
    "ast_data": "FunctionDef name:_children_str_at_line arg:self arg:line arguments arg arg If BoolOp Compare Compare Return return:yes Assign Call Assign Call If Compare Return return:yes Call If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "SendEvents",
    "source_code": "def SendEvents(self, request_iterator, context):\n    core_metadata_count = 0\n    graph_def_chunks = {}\n    tensor_chunks = {}\n    stream_handler = None\n    for event in request_iterator:\n        if not stream_handler:\n            stream_handler = self._stream_handler_class()\n        if event.summary and event.summary.value:\n            maybe_tensor_event = self._process_tensor_event_in_chunks(event, tensor_chunks)\n            if maybe_tensor_event:\n                event_reply = stream_handler.on_value_event(maybe_tensor_event)\n                if event_reply is not None:\n                    yield self._process_debug_op_state_changes(event_reply)\n        elif event.graph_def:\n            maybe_graph_def, maybe_device_name, maybe_wall_time = self._process_encoded_graph_def_in_chunks(event, graph_def_chunks)\n            if maybe_graph_def:\n                reply = stream_handler.on_graph_def(maybe_graph_def, maybe_device_name, maybe_wall_time)\n                yield self._process_debug_op_state_changes(reply)\n        elif event.log_message.message:\n            core_metadata_count += 1\n            if core_metadata_count > 1:\n                raise ValueError('Expected one core metadata event; received multiple')\n            reply = stream_handler.on_core_metadata_event(event)\n            yield self._process_debug_op_state_changes(reply)",
    "docstring": "Implementation of the SendEvents service method. This method receives streams of Event protos from the client, and processes them in ways specified in the on_event() callback. The stream is bi-directional, but currently only the client-to-server stream (i.e., the stream from the debug ops to the server) is used. Args: request_iterator: The incoming stream of Event protos. context: Server context. Raises: ValueError: If there are more than one core metadata events. Yields: An empty stream of responses.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\grpc_debug_server.py",
    "ast_data": "FunctionDef name:SendEvents arg:self arg:request_iterator arg:context arguments arg arg arg Assign Assign Assign Assign For If Assign Call If BoolOp Assign Call If Assign Call If Compare Call If Assign Call If Assign Call Call If If Compare Raise Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "convert_dense_weights_data_format",
    "source_code": "def convert_dense_weights_data_format(dense, previous_feature_map_shape, target_data_format='channels_first'):\n    assert target_data_format in {'channels_last', 'channels_first'}\n    kernel, bias = dense.get_weights()\n    for i in range(kernel.shape[1]):\n        if target_data_format == 'channels_first':\n            c, h, w = previous_feature_map_shape\n            original_fm_shape = (h, w, c)\n            ki = kernel[:, i].reshape(original_fm_shape)\n            ki = np.transpose(ki, (2, 0, 1))\n        else:\n            h, w, c = previous_feature_map_shape\n            original_fm_shape = (c, h, w)\n            ki = kernel[:, i].reshape(original_fm_shape)\n            ki = np.transpose(ki, (1, 2, 0))\n        kernel[:, i] = np.reshape(ki, (np.prod(previous_feature_map_shape),))\n    dense.set_weights([kernel, bias])",
    "docstring": "Utility useful when changing a convnet's . When porting the weights of a convnet from one data format to the other, if the convnet includes a layer (applied to the last convolutional feature map) followed by a layer, the weights of that layer should be updated to reflect the new dimension ordering. Args: dense: The target layer. previous_feature_map_shape: A shape tuple of 3 integers, e.g. . The shape of the convolutional feature map right before the layer that came before the target layer. target_data_format: One of \"channels_last\", \"channels_first\". Set it \"channels_last\" if converting a \"channels_first\" model to \"channels_last\", or reciprocally.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\layer_utils.py",
    "ast_data": "FunctionDef name:convert_dense_weights_data_format arg:dense arg:previous_feature_map_shape arg:target_data_format arguments arg arg arg Compare Assign Call For Call If Compare Assign Assign Assign Call Assign Call Assign Assign Assign Call Assign Call Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_losses_for",
    "source_code": "def get_losses_for(self, inputs):\n    if inputs is None:\n        return [l for l in self.losses if l._unconditional_loss]\n    losses = [l for l in self.losses if not l._unconditional_loss]\n    inputs = nest.flatten(inputs)\n    reachable = tf_utils.get_reachable_from_inputs(inputs, losses)\n    return [l for l in losses if l in reachable]",
    "docstring": "Retrieves losses relevant to a specific set of inputs. Args: inputs: Input tensor or list/tuple of input tensors. Returns: List of loss tensors of the layer that depend on .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py",
    "ast_data": "FunctionDef name:get_losses_for arg:self arg:inputs arguments arg arg If Compare Return return:yes Assign Assign Call Assign Call Return return:yes Compare"
  },
  {
    "library": "django",
    "name": "ValuesIterable",
    "source_code": "class ValuesIterable(BaseIterable):\n\n    def __iter__(self):\n        queryset = self.queryset\n        query = queryset.query\n        compiler = query.get_compiler(queryset.db)\n        if query.selected:\n            names = list(query.selected)\n        else:\n            names = [*query.extra_select, *query.values_select, *query.annotation_select]\n        indexes = range(len(names))\n        for row in compiler.results_iter(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size):\n            yield {names[i]: row[i] for i in indexes}",
    "docstring": "Iterable returned by QuerySet.values() that yields a dict for each row.",
    "type": "class",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "ClassDef name:ValuesIterable FunctionDef name:__iter__ arg:self arguments arg Assign Assign Assign Call If Assign Call Assign Assign Call Call For Call"
  },
  {
    "library": "scipy",
    "name": "Giunta",
    "source_code": "class Giunta(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-1.0] * self.N, [1.0] * self.N))\n        self.global_optimum = [[0.4673200277395354, 0.4673200169591304]]\n        self.fglob = 0.06447042053690566\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        arg = 16 * x / 15.0 - 1\n        return 0.6 + sum(sin(arg) + sin(arg) ** 2 + sin(4 * arg) / 50.0)",
    "docstring": "Giunta objective function. This class defines the Giunta [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Giunta}}({x}) = 0.6 + \\sum_{i=1}^{n} \\left[\\sin^{2}\\left(1 - \\frac{16}{15} x_i\\right) - \\frac{1}{50} \\sin\\left(4 - \\frac{64}{15} x_i\\right) - \\sin\\left(1 - \\frac{16}{15} x_i\\right)\\right] with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO Jamil has the wrong fglob. I think there is a lower value.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_G.py",
    "ast_data": "ClassDef name:Giunta FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "on_test_end",
    "source_code": "def on_test_end(self, logs=None):\n    logs = self._process_logs(logs)\n    for callback in self.callbacks:\n        callback.on_test_end(logs)",
    "docstring": "Calls the methods of its callbacks. Args: logs: Dict. Currently no data is passed to this argument for this method but that may change in the future.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:on_test_end arg:self arg:logs arguments arg arg Assign Call For Call"
  },
  {
    "library": "matplotlib",
    "name": "get_masked_triangles",
    "source_code": "def get_masked_triangles(self):\n    if self.mask is not None:\n        return self.triangles[~self.mask]\n    else:\n        return self.triangles",
    "docstring": "Return an array of triangles taking the mask into account.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triangulation.py",
    "ast_data": "FunctionDef name:get_masked_triangles arg:self arguments arg If Compare Return return:yes Return return:yes"
  },
  {
    "library": "scipy",
    "name": "time_spherical_polygon_area_calculation",
    "source_code": "def time_spherical_polygon_area_calculation(self, num_points, ndim):\n    self.sv.calculate_areas()",
    "docstring": "Time the area calculation in the Spherical Voronoi code.",
    "type": "method",
    "file_path": "scipy\\benchmarks\\benchmarks\\spatial.py",
    "ast_data": "FunctionDef name:time_spherical_polygon_area_calculation arg:self arg:num_points arg:ndim arguments arg arg arg Call"
  },
  {
    "library": "pygame",
    "name": "remove_internal",
    "source_code": "def remove_internal(self, group):\n    self.__g.remove(group)",
    "docstring": "For removing this sprite from a group internally. :param group: The group we are removing from.",
    "type": "method",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:remove_internal arg:self arg:group arguments arg arg Call"
  },
  {
    "library": "kornia",
    "name": "K",
    "source_code": "def K(self) -> Tensor:\n    return self.matrix()",
    "docstring": "Return the camera matrix.",
    "type": "method",
    "file_path": "kornia\\kornia\\sensors\\camera\\camera_model.py",
    "ast_data": "FunctionDef name:K arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_TRTEngineResource",
    "source_code": "class _TRTEngineResource(resource.TrackableResource):\n\n    def __init__(self, resource_name, filename, maximum_cached_engines, device='GPU'):\n        super(_TRTEngineResource, self).__init__(device=device)\n        self._resource_name = resource_name\n        self._filename = self._track_trackable(asset.Asset(filename), '_serialized_trt_resource_filename')\n        self._maximum_cached_engines = maximum_cached_engines\n\n    def _create_resource(self):\n        return _get_resource_handle(self._resource_name, self._resource_device)\n\n    def _initialize(self):\n        gen_trt_ops.initialize_trt_resource(self.resource_handle, self._filename, max_cached_engines_count=self._maximum_cached_engines)\n\n    def _destroy_resource(self):\n        handle = _get_resource_handle(self._resource_name, self._resource_device)\n        with ops.device(self._resource_device):\n            gen_resource_variable_ops.destroy_resource_op(handle, ignore_lookup_error=True)",
    "docstring": "Class to track the serialized engines resource.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\tensorrt\\trt_convert.py",
    "ast_data": "ClassDef name:_TRTEngineResource FunctionDef name:__init__ arg:self arg:resource_name arg:filename arg:maximum_cached_engines arg:device arguments arg arg arg arg arg Call Call Assign Assign Call Call Assign FunctionDef name:_create_resource arg:self arguments arg Return return:yes Call FunctionDef name:_initialize arg:self arguments arg Call FunctionDef name:_destroy_resource arg:self arguments arg Assign Call With Call Call"
  },
  {
    "library": "numpy",
    "name": "_merge",
    "source_code": "def _merge(old, new):\n    if not old:\n        return new\n    if new in old:\n        return old\n    return ';'.join([old, new])",
    "docstring": "Concatenate two environment paths avoiding repeats. Here is the environment string before the base class initialize function is called and is the string after the call. The new string will be a fixed string if it is not obtained from the current environment, or the same as the old string if obtained from the same environment. The aim here is not to append the new string if it is already contained in the old string so as to limit the growth of the environment string. Parameters ---------- old : string Previous environment string. new : string New environment string. Returns ------- ret : string Updated environment string.",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\msvc9compiler.py",
    "ast_data": "FunctionDef name:_merge arg:old arg:new arguments arg arg If Return return:yes If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "no_automatic_dependency_tracking",
    "source_code": "@tf_export('__internal__.tracking.no_automatic_dependency_tracking', v1=[])\ndef no_automatic_dependency_tracking(method):\n\n    def _method_wrapper(self, *args, **kwargs):\n        previous_value = getattr(self, '_self_setattr_tracking', True)\n        self._self_setattr_tracking = False\n        try:\n            result = method(self, *args, **kwargs)\n        finally:\n            self._self_setattr_tracking = previous_value\n        return result\n    return tf_decorator.make_decorator(target=method, decorator_func=_method_wrapper)",
    "docstring": "Disables automatic dependency tracking on attribute assignment. Use to decorate any method of a Trackable object. Attribute assignment in that method will not add dependencies (also respected in Model). Harmless if used in a class which does not do automatic dependency tracking (which means it's safe to use in base classes which may have subclasses which also inherit from Trackable). Args: method: The method to decorate. Returns: A decorated method which sets and un-sets automatic dependency tracking for the object the method is called on (not thread safe).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\base.py",
    "ast_data": "FunctionDef name:no_automatic_dependency_tracking arg:method arguments arg FunctionDef name:_method_wrapper arg:self arguments arg arg arg Assign Call Assign Try Assign Call Assign Return return:yes Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "to_tuples",
    "source_code": "def to_tuples(self, na_tuple: bool=True) -> np.ndarray:\n    tuples = com.asarray_tuplesafe(zip(self._left, self._right))\n    if not na_tuple:\n        tuples = np.where(~self.isna(), tuples, np.nan)\n    return tuples",
    "docstring": "Return an ndarray (if self is IntervalArray) or Index (if self is IntervalIndex) of tuples of the form (left, right). Parameters ---------- na_tuple : bool, default True If `selfselfpandas.IntervalArraypandas.IntervalIndex`: >>> idx = pd.interval_range(start=0, end=2) >>> idx IntervalIndex([(0, 1], (1, 2]], dtype='interval[int64, right]') >>> idx.to_tuples() Index([(0, 1), (1, 2)], dtype='object')",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\interval.py",
    "ast_data": "FunctionDef name:to_tuples arg:self arg:na_tuple arguments arg arg Assign Call Call If Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_calc_t_stat",
    "source_code": "def _calc_t_stat(a, b, equal_var, axis=-1):\n    na = a.shape[axis]\n    nb = b.shape[axis]\n    avg_a = np.mean(a, axis=axis)\n    avg_b = np.mean(b, axis=axis)\n    var_a = _var(a, axis=axis, ddof=1)\n    var_b = _var(b, axis=axis, ddof=1)\n    if not equal_var:\n        _, denom = _unequal_var_ttest_denom(var_a, na, var_b, nb)\n    else:\n        _, denom = _equal_var_ttest_denom(var_a, na, var_b, nb)\n    return (avg_a - avg_b) / denom",
    "docstring": "Calculate the t statistic along the given dimension.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_stats_py.py",
    "ast_data": "FunctionDef name:_calc_t_stat arg:a arg:b arg:equal_var arg:axis arguments arg arg arg arg Assign Assign Assign Call Assign Call Assign Call Assign Call If Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_MaybeCaptured",
    "source_code": "def _MaybeCaptured(t):\n    if not isinstance(t, ops.EagerTensor) and _IsFunction(t.op.graph) and (t.op.type == 'Placeholder'):\n        for input_t, placeholder_t in _Captures(t.op.graph):\n            if t is placeholder_t:\n                return _MaybeCaptured(input_t)\n    return t",
    "docstring": "If t is a captured value placeholder, returns the original captured value. Args: t: Tensor Returns: A tensor, potentially from a different Graph/FuncGraph.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\gradients_util.py",
    "ast_data": "FunctionDef name:_MaybeCaptured arg:t arguments arg If BoolOp Call Call Compare For Call If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "disable_resource_variables",
    "source_code": "@deprecation.deprecated(None, 'non-resource variables are not supported in the long term')\n@tf_export(v1=['disable_resource_variables'])\ndef disable_resource_variables() -> None:\n    global _DEFAULT_USE_RESOURCE\n    _DEFAULT_USE_RESOURCE = False\n    logging.vlog(1, 'Disabling resource variables')\n    _api_usage_gauge.get_cell().set(False)",
    "docstring": "Opts out of resource variables. If your code needs tf.disable_resource_variables() to be called to work properly please file a bug.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variables_toggle.py",
    "ast_data": "FunctionDef name:disable_resource_variables arguments Assign Call Call Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "remap_single_linkage_tree",
    "source_code": "def remap_single_linkage_tree(tree, internal_to_raw, non_finite):\n    finite_count = len(internal_to_raw)\n    outlier_count = len(non_finite)\n    for i, _ in enumerate(tree):\n        left = tree[i]['left_node']\n        right = tree[i]['right_node']\n        if left < finite_count:\n            tree[i]['left_node'] = internal_to_raw[left]\n        else:\n            tree[i]['left_node'] = left + outlier_count\n        if right < finite_count:\n            tree[i]['right_node'] = internal_to_raw[right]\n        else:\n            tree[i]['right_node'] = right + outlier_count\n    outlier_tree = np.zeros(len(non_finite), dtype=HIERARCHY_dtype)\n    last_cluster_id = max(tree[tree.shape[0] - 1]['left_node'], tree[tree.shape[0] - 1]['right_node'])\n    last_cluster_size = tree[tree.shape[0] - 1]['cluster_size']\n    for i, outlier in enumerate(non_finite):\n        outlier_tree[i] = (outlier, last_cluster_id + 1, np.inf, last_cluster_size + 1)\n        last_cluster_id += 1\n        last_cluster_size += 1\n    tree = np.concatenate([tree, outlier_tree])\n    return tree",
    "docstring": "Takes an internal single_linkage_tree structure and adds back in a set of points that were initially detected as non-finite and returns that new tree. These points will all be merged into the final node at np.inf distance and considered noise points. Parameters ---------- tree : ndarray of shape (n_samples - 1,), dtype=HIERARCHY_dtype The single-linkage tree tree (dendrogram) built from the MST. internal_to_raw: dict A mapping from internal integer index to the raw integer index non_finite : ndarray Boolean array of which entries in the raw data are non-finite",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\cluster\\_hdbscan\\hdbscan.py",
    "ast_data": "FunctionDef name:remap_single_linkage_tree arg:tree arg:internal_to_raw arg:non_finite arguments arg arg arg Assign Call Assign Call For Call Assign Assign If Compare Assign Assign If Compare Assign Assign Assign Call Call Assign Call Assign For Call Assign Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "SineEnvelope",
    "source_code": "class SineEnvelope(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-100.0] * self.N, [100.0] * self.N))\n        self.custom_bounds = [(-20, 20), (-20, 20)]\n        self.global_optimum = [[0 for _ in range(self.N)]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        X0 = x[:-1]\n        X1 = x[1:]\n        X02X12 = X0 ** 2 + X1 ** 2\n        return sum((sin(sqrt(X02X12)) ** 2 - 0.5) / (1 + 0.001 * X02X12) ** 2 + 0.5)",
    "docstring": "SineEnvelope objective function. This class defines the SineEnvelope [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{SineEnvelope}}(x) = -\\sum_{i=1}^{n-1}\\left[\\frac{\\sin^2( \\sqrt{x_{i+1}^2+x_{i}^2}-0.5)} {(0.001(x_{i+1}^2+x_{i}^2)+1)^2} + 0.5\\right] Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015 TODO: Jamil #136",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_S.py",
    "ast_data": "ClassDef name:SineEnvelope Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Assign Assign Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "SubsetRandomSampler",
    "source_code": "class SubsetRandomSampler(Sampler[int]):\n    indices: Sequence[int]\n\n    def __init__(self, indices: Sequence[int], generator=None) -> None:\n        self.indices = indices\n        self.generator = generator\n\n    def __iter__(self) -> Iterator[int]:\n        for i in torch.randperm(len(self.indices), generator=self.generator).tolist():\n            yield self.indices[i]\n\n    def __len__(self) -> int:\n        return len(self.indices)",
    "docstring": "Samples elements randomly from a given list of indices, without replacement. Args: indices (sequence): a sequence of indices generator (Generator): Generator used in sampling.",
    "type": "class",
    "file_path": "pytorch\\torch\\utils\\data\\sampler.py",
    "ast_data": "ClassDef name:SubsetRandomSampler FunctionDef name:__init__ arg:self arg:indices arg:generator arguments arg arg arg Assign Assign FunctionDef name:__iter__ arg:self arguments arg For Call Call Call FunctionDef name:__len__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "apply",
    "source_code": "def apply(self, model_args: Sequence[Any], model_kwargs: Mapping[str, Any], model: torch.nn.Module | Callable | torch_export.ExportedProgram | None=None) -> tuple[Sequence[Any], Mapping[str, Any]]:\n    return (tuple((torch.view_as_real(arg.resolve_conj()) if isinstance(arg, torch.Tensor) and arg.is_complex() else arg for arg in model_args)), model_kwargs)",
    "docstring": "Convert complex tensors to float tensors. Args: model_args: The model args. model_kwargs: The model kwargs. model: The PyTorch model. Returns: A tuple of the model args and kwargs.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\io_adapter.py",
    "ast_data": "FunctionDef name:apply arg:self arg:model_args arg:model_kwargs arg:model arguments arg arg arg arg Return return:yes Call BoolOp Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "add_to_tensor",
    "source_code": "def add_to_tensor(self, x, name='add_to_tensor'):\n    with self._name_scope(name):\n        x = tensor_conversion.convert_to_tensor_v2_with_dispatch(x, name='x')\n        self._check_input_dtype(x)\n        return self._add_to_tensor(x)",
    "docstring": "Add matrix represented by this operator to . Equivalent to . Args: x: with same and shape broadcastable to . name: A name to give this . Returns: A with broadcast shape and same as .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "FunctionDef name:add_to_tensor arg:self arg:x arg:name arguments arg arg arg With Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "matshow",
    "source_code": "def matshow(self, Z, **kwargs):\n    Z = np.asanyarray(Z)\n    kw = {'origin': 'upper', 'interpolation': 'nearest', 'aspect': 'equal', **kwargs}\n    im = self.imshow(Z, **kw)\n    self.title.set_y(1.05)\n    self.xaxis.tick_top()\n    self.xaxis.set_ticks_position('both')\n    self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=9, steps=[1, 2, 5, 10], integer=True))\n    self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=9, steps=[1, 2, 5, 10], integer=True))\n    return im",
    "docstring": "Plot the values of a 2D matrix or array as color-coded image. The matrix will be shown the way it would be printed, with the first row at the top. Row and column numbering is zero-based. Parameters ---------- Z : (M, N) array-like The matrix to be displayed. Returns ------- Other Parameters ---------------- **kwargs : arguments See Also -------- imshow : More general function to plot data on a 2D regular raster. Notes ----- This is just a convenience function wrapping to set useful defaults for displaying a matrix. In particular: - Set ``. - Ticks are placed to the left and above. - Ticks are formatted to show integer indices.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_axes.py",
    "ast_data": "FunctionDef name:matshow arg:self arg:Z arguments arg arg arg Assign Call Assign Assign Call Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_zalpha",
    "source_code": "def _zalpha(colors, zs, min_alpha=0.3, _data_scale=None):\n    if len(colors) == 0 or len(zs) == 0:\n        return np.zeros((0, 4))\n    min_alpha = np.clip(min_alpha, 0, 1)\n    if _data_scale is None or _data_scale == 0:\n        sats = np.ones_like(zs)\n    else:\n        sats = np.clip(1 - (zs - np.min(zs)) / _data_scale, min_alpha, 1)\n    rgba = np.broadcast_to(mcolors.to_rgba_array(colors), (len(zs), 4))\n    return np.column_stack([rgba[:, :3], rgba[:, 3] * sats])",
    "docstring": "Modify the alpha values of the color list according to z-depth.",
    "type": "function",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py",
    "ast_data": "FunctionDef name:_zalpha arg:colors arg:zs arg:min_alpha arg:_data_scale arguments arg arg arg arg If BoolOp Compare Call Compare Call Return return:yes Call Assign Call If BoolOp Compare Compare Assign Call Assign Call Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "repeat",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef repeat(x, n):\n    assert ndim(x) == 2\n    x = array_ops.expand_dims(x, 1)\n    pattern = array_ops_stack.stack([1, n, 1])\n    return array_ops.tile(x, pattern)",
    "docstring": "Repeats a 2D tensor. if has shape (samples, dim) and is , the output will have shape . Args: x: Tensor or variable. n: Python integer, number of times to repeat. Returns: A tensor. Example: >>> b = tf.constant([[1, 2], [3, 4]]) >>> b >>> tf.keras.backend.repeat(b, n=2)",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:repeat arg:x arg:n arguments arg arg Compare Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "process_results",
    "source_code": "def process_results(self, response: Response, results: Iterable[Any]) -> Iterable[Any]:\n    return results",
    "docstring": "This overridable method is called for each result (item or request) returned by the spider, and it's intended to perform any last time processing required before returning the results to the framework core, for example setting the item GUIDs. It receives a list of results and the response which originated that results. It must return a list of results (items or requests).",
    "type": "method",
    "file_path": "scrapy\\scrapy\\spiders\\feed.py",
    "ast_data": "FunctionDef name:process_results arg:self arg:response arg:results arguments arg arg arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_gotitem",
    "source_code": "def _gotitem(self, key: IndexLabel, ndim: int, subset: DataFrame | Series | None=None) -> DataFrame | Series:\n    if subset is None:\n        subset = self\n    elif subset.ndim == 1:\n        return subset\n    return subset[key]",
    "docstring": "Sub-classes to define. Return a sliced object. Parameters ---------- key : string / list of selections ndim : {1, 2} requested ndim of result subset : object, default None subset to act on",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:_gotitem arg:self arg:key arg:ndim arg:subset arguments arg arg arg arg If Compare Assign If Compare Return return:yes Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "RadiusNeighbors",
    "source_code": "class RadiusNeighbors(BaseDistancesReductionDispatcher):\n\n    @classmethod\n    def compute(cls, X, Y, radius, metric='euclidean', chunk_size=None, metric_kwargs=None, strategy=None, return_distance=False, sort_results=False):\n        if X.dtype == Y.dtype == np.float64:\n            return RadiusNeighbors64.compute(X=X, Y=Y, radius=radius, metric=metric, chunk_size=chunk_size, metric_kwargs=metric_kwargs, strategy=strategy, sort_results=sort_results, return_distance=return_distance)\n        if X.dtype == Y.dtype == np.float32:\n            return RadiusNeighbors32.compute(X=X, Y=Y, radius=radius, metric=metric, chunk_size=chunk_size, metric_kwargs=metric_kwargs, strategy=strategy, sort_results=sort_results, return_distance=return_distance)\n        raise ValueError(f'Only float64 or float32 datasets pairs are supported at this time, got: X.dtype={X.dtype} and Y.dtype={Y.dtype}.')",
    "docstring": "Compute radius-based neighbors for two sets of vectors. For each row-vector X[i] of the queries X, find all the indices j of row-vectors in Y such that: dist(X[i], Y[j]) <= radius The distance function depends on the values of the and parameters. This class is not meant to be instantiated, one should only use its :meth: classmethod which handles allocation and deallocation consistently.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\metrics\\_pairwise_distances_reduction\\_dispatcher.py",
    "ast_data": "ClassDef name:RadiusNeighbors FunctionDef name:compute arg:cls arg:X arg:Y arg:radius arg:metric arg:chunk_size arg:metric_kwargs arg:strategy arg:return_distance arg:sort_results arguments arg arg arg arg arg arg arg arg arg arg If Compare Return return:yes Call If Compare Return return:yes Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "Gradient",
    "source_code": "def Gradient(inputs, f, name=None):\n    tlist = [_.type for _ in f.definition.signature.input_arg]\n    return symbolic_gradient(input=inputs, Tout=tlist, f=f, name=name)",
    "docstring": "Computes the gradient function for function f via backpropagation. Args: inputs: A list of tensors of size N + M. f: The function we want to compute the gradient for. The function 'f' must be a numerical function which takes N inputs and produces M outputs. Its gradient function 'g', which is a function taking N + M inputs and produces N outputs. I.e. if we have (y1, y2, ..., yM) = f(x1, x2, ..., xN), then, g is (dL/dx1, dL/dx2, ..., dL/dxN) = g(x1, x2, ..., xN, dL/dy1, dL/dy2, ..., dL/dyM), where L is a scalar-value function of (x1, x2, ..., xN) (e.g., the loss function). dL/dxi is the partial derivative of L with respect to xi. name: A name for the operation (optional). Returns: A list of tensors of size N.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\functional_ops.py",
    "ast_data": "FunctionDef name:Gradient arg:inputs arg:f arg:name arguments arg arg arg Assign Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "object_ref",
    "source_code": "class object_ref:\n    __slots__ = ()\n\n    def __new__(cls, *args: Any, **kwargs: Any) -> Self:\n        obj = object.__new__(cls)\n        live_refs[cls][obj] = time()\n        return obj",
    "docstring": "Inherit from this class to a keep a record of live instances",
    "type": "class",
    "file_path": "scrapy\\scrapy\\utils\\trackref.py",
    "ast_data": "ClassDef name:object_ref Assign FunctionDef name:__new__ arg:cls arguments arg arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "transform_transpose",
    "source_code": "@register_transformation_rule(Transpose)\ndef transform_transpose(constraint, counter):\n    dims, counter = gen_tensor_dims(constraint.tensor_size, counter)\n    is_valid_index1 = valid_index(constraint.index1, dims)\n    is_valid_index2 = valid_index(constraint.index2, dims)\n    new_dims = copy.deepcopy(dims)\n    nat_constraints = gen_nat_constraints(dims)\n    if is_valid_index1 == T() and is_valid_index2 == T():\n        new_dims[constraint.index1] = dims[constraint.index2]\n        new_dims[constraint.index2] = dims[constraint.index1]\n    transformed_constraint = Conj([BinConstraintT(constraint.input_var, TensorType(dims), op_eq), *nat_constraints, is_valid_index1, is_valid_index2, BinConstraintT(constraint.output, TensorType(new_dims), op_eq)])\n    return (transformed_constraint, counter)",
    "docstring": "Similar to a sequence of two index-selects",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_transformation.py",
    "ast_data": "FunctionDef name:transform_transpose arg:constraint arg:counter arguments arg arg Assign Call Assign Call Assign Call Assign Call Assign Call If BoolOp Compare Call Compare Call Assign Assign Assign Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_IdentityClassifier",
    "source_code": "class _IdentityClassifier(LinearClassifierMixin, BaseEstimator):\n\n    def __init__(self, classes):\n        self.classes_ = classes\n\n    def decision_function(self, y_predict):\n        return y_predict",
    "docstring": "Fake classifier which will directly output the prediction. We inherit from LinearClassifierMixin to get the proper shape for the output .",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_ridge.py",
    "ast_data": "ClassDef name:_IdentityClassifier FunctionDef name:__init__ arg:self arg:classes arguments arg arg Assign FunctionDef name:decision_function arg:self arg:y_predict arguments arg arg Return return:yes"
  },
  {
    "library": "django",
    "name": "CaseInsensitiveMixin",
    "source_code": "class CaseInsensitiveMixin:\n\n    def process_lhs(self, compiler, connection):\n        lhs, lhs_params = super().process_lhs(compiler, connection)\n        if connection.vendor == 'mysql':\n            return ('LOWER(%s)' % lhs, lhs_params)\n        return (lhs, lhs_params)\n\n    def process_rhs(self, compiler, connection):\n        rhs, rhs_params = super().process_rhs(compiler, connection)\n        if connection.vendor == 'mysql':\n            return ('LOWER(%s)' % rhs, rhs_params)\n        return (rhs, rhs_params)",
    "docstring": "Mixin to allow case-insensitive comparison of JSON values on MySQL. MySQL handles strings used in JSON context using the utf8mb4_bin collation. Because utf8mb4_bin is a binary collation, comparison of JSON values is case-sensitive.",
    "type": "class",
    "file_path": "django\\django\\db\\models\\fields\\json.py",
    "ast_data": "ClassDef name:CaseInsensitiveMixin FunctionDef name:process_lhs arg:self arg:compiler arg:connection arguments arg arg arg Assign Call Call If Compare Return return:yes Return return:yes FunctionDef name:process_rhs arg:self arg:compiler arg:connection arguments arg arg arg Assign Call Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "unpack_captures",
    "source_code": "def unpack_captures(self, captures) -> List[core.Tensor]:\n    flat = []\n    for v, t in zip(captures, self.captures.values()):\n        flat.extend(t.to_tensors(v))\n    if len(flat) != len(self.flat_captures):\n        raise TypeError(f'Flattening captures {captures} with type {self!r} produced {len(flat)} tensors instead of {len(self.flat_captures)}')\n    return flat",
    "docstring": "Unpacks captures to flat tensors.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_type.py",
    "ast_data": "FunctionDef name:unpack_captures arg:self arg:captures arguments arg arg Assign For Call Call Call Call If Compare Call Call Raise Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "eval",
    "source_code": "def eval(self) -> Self:\n    return self.train(False)",
    "docstring": "Set the module in evaluation mode. This has an effect only on certain modules. See the documentation of particular modules for details of their behaviors in training/evaluation mode, i.e. whether they are affected, e.g. :class:, :class:, etc. This is equivalent with :meth:. See :ref: for a comparison between and several similar mechanisms that may be confused with it. Returns: Module: self",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:eval arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "endswith_lf",
    "source_code": "def endswith_lf(line):\n    return line.endswith('\\n' if isinstance(line, str) else b'\\n')",
    "docstring": "Return True if line (a text or bytestring) ends with ' '.",
    "type": "function",
    "file_path": "django\\django\\core\\files\\base.py",
    "ast_data": "FunctionDef name:endswith_lf arg:line arguments arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_pearsonr_fisher_ci",
    "source_code": "def _pearsonr_fisher_ci(r, n, confidence_level, alternative):\n    xp = array_namespace(r)\n    ones = xp.ones_like(r)\n    n = xp.asarray(n, dtype=r.dtype)\n    confidence_level = xp.asarray(confidence_level, dtype=r.dtype)\n    with np.errstate(divide='ignore', invalid='ignore'):\n        zr = xp.atanh(r)\n        se = xp.sqrt(1 / (n - 3))\n    if alternative == 'two-sided':\n        h = special.ndtri(0.5 + confidence_level / 2)\n        zlo = zr - h * se\n        zhi = zr + h * se\n        rlo = xp.tanh(zlo)\n        rhi = xp.tanh(zhi)\n    elif alternative == 'less':\n        h = special.ndtri(confidence_level)\n        zhi = zr + h * se\n        rhi = xp.tanh(zhi)\n        rlo = -ones\n    else:\n        h = special.ndtri(confidence_level)\n        zlo = zr - h * se\n        rlo = xp.tanh(zlo)\n        rhi = ones\n    mask = n <= 3\n    rlo = xpx.at(rlo)[mask].set(-1)\n    rhi = xpx.at(rhi)[mask].set(1)\n    rlo = rlo[()] if rlo.ndim == 0 else rlo\n    rhi = rhi[()] if rhi.ndim == 0 else rhi\n    return ConfidenceInterval(low=rlo, high=rhi)",
    "docstring": "Compute the confidence interval for Pearson's R. Fisher's transformation is used to compute the confidence interval (",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_stats_py.py",
    "ast_data": "FunctionDef name:_pearsonr_fisher_ci arg:r arg:n arg:confidence_level arg:alternative arguments arg arg arg arg Assign Call Assign Call Assign Call Assign Call With Call Assign Call Assign Call If Compare Assign Call Assign Assign Assign Call Assign Call If Compare Assign Call Assign Assign Call Assign Assign Call Assign Assign Call Assign Assign Compare Assign Call Call Assign Call Call Assign Compare Assign Compare Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "account_displayed_op_only",
    "source_code": "def account_displayed_op_only(self, is_true):\n    self._options['account_displayed_op_only'] = is_true\n    return self",
    "docstring": "Whether only account the statistics of displayed profiler nodes. Args: is_true: If true, only account statistics of nodes eventually displayed by the outputs. Otherwise, a node's statistics are accounted by its parents as long as it's types match 'account_type_regexes', even if it is hidden from the output, say, by hide_name_regexes. Returns: self",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\option_builder.py",
    "ast_data": "FunctionDef name:account_displayed_op_only arg:self arg:is_true arguments arg arg Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_pre_forward",
    "source_code": "@no_type_check\ndef _pre_forward(state: _FSDPState, handle: Optional[FlatParamHandle], unshard_fn: Callable, module: nn.Module, args: tuple[Any, ...], kwargs: dict[str, Any]) -> tuple[tuple[Any, ...], dict[str, Any]]:\n    with torch.profiler.record_function('FullyShardedDataParallel._pre_forward'):\n        if handle and handle._training_state == HandleTrainingState.BACKWARD_PRE:\n            return (args, kwargs)\n        state.training_state = TrainingState.FORWARD_BACKWARD\n        state._exec_order_data.record_pre_forward(handle, module.training)\n        if handle:\n            handle._training_state = HandleTrainingState.FORWARD\n        if unshard_fn is not None:\n            unshard_fn(state, handle)\n        _register_post_backward_hook(state, handle)\n        if handle and handle._offload_params and (handle.flat_param._cpu_grad is None):\n            handle.flat_param._cpu_grad = torch.zeros_like(handle.flat_param._local_shard, device=torch.device('cpu')).pin_memory()\n        should_cast_forward_inputs = state._handle and (not state._handle._force_full_precision)\n        if should_cast_forward_inputs and state.mixed_precision.cast_forward_inputs:\n            input_dtype: Optional[torch.dtype] = state.mixed_precision.param_dtype\n            args, kwargs = _cast_forward_inputs(input_dtype, *args, **kwargs)\n        _register_post_backward_reshard_only_hook(state, handle, args, kwargs)\n        return (args, kwargs)",
    "docstring": "Runs the pre-forward logic. This includes an opportunity to unshard currently sharded parameters such as those for the current forward and registering post-backward hooks for these current parameters. This function also converts forward ``.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_runtime_utils.py",
    "ast_data": "FunctionDef name:_pre_forward arg:state arg:handle arg:unshard_fn arg:module arg:args arg:kwargs arguments arg arg arg arg arg arg With Call If BoolOp Compare Return return:yes Assign Call If Assign If Compare Call Call If BoolOp Compare Assign Call Call Call Assign BoolOp If BoolOp Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "identity",
    "source_code": "def identity(self, x: T) -> T:\n    raise NotImplementedError",
    "docstring": "Returns x as is. This is used to trigger CSE.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ops_handler.py",
    "ast_data": "FunctionDef name:identity arg:self arg:x arguments arg arg Raise"
  },
  {
    "library": "pandas",
    "name": "_ensure_arraylike",
    "source_code": "def _ensure_arraylike(values, func_name: str) -> ArrayLike:\n    if not isinstance(values, (ABCIndex, ABCSeries, ABCExtensionArray, np.ndarray, ABCNumpyExtensionArray)):\n        if func_name != 'isin-targets':\n            raise TypeError(f'{func_name} requires a Series, Index, ExtensionArray, np.ndarray or NumpyExtensionArray got {type(values).__name__}.')\n        inferred = lib.infer_dtype(values, skipna=False)\n        if inferred in ['mixed', 'string', 'mixed-integer']:\n            if isinstance(values, tuple):\n                values = list(values)\n            values = construct_1d_object_array_from_listlike(values)\n        else:\n            values = np.asarray(values)\n    return values",
    "docstring": "ensure that we are arraylike if not already",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\algorithms.py",
    "ast_data": "FunctionDef name:_ensure_arraylike arg:values arg:func_name arguments arg arg If Call If Compare Raise Call Call Assign Call If Compare If Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "clean",
    "source_code": "def clean(self):\n    pass",
    "docstring": "Hook for doing any extra formset-wide cleaning after Form.clean() has been called on every form. Any ValidationError raised by this method will not be associated with a particular form; it will be accessible via formset.non_form_errors()",
    "type": "method",
    "file_path": "django\\django\\forms\\formsets.py",
    "ast_data": "FunctionDef name:clean arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "local_shards",
    "source_code": "def local_shards(self) -> list[torch.Tensor]:\n    return self._local_shards",
    "docstring": "Returns a list of :class:`torch.Tensor' corresponding to the local shards for this rank. Returns an empty list if the current rank does not host any shards for this Tensor.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_shards_wrapper.py",
    "ast_data": "FunctionDef name:local_shards arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, function_type, default_values, is_pure=False, name=None, jit_compile=None):\n    self._function_type = function_type\n    self._default_values = default_values\n    self._fullargspec = to_fullargspec(function_type, default_values)\n    self._is_pure = is_pure\n    self._jit_compile = jit_compile\n    self._name = name or 'f'\n    self._input_signature = to_input_signature(function_type)",
    "docstring": "Constructs a FunctionSpec describing a python function. Args: function_type: A FunctionType describing the python function signature. default_values: Dictionary mapping parameter names to default values. is_pure: if True all input arguments (including variables and constants) will be converted to tensors and no variable changes allowed. name: Name of the function jit_compile: see .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\function_type_utils.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:function_type arg:default_values arg:is_pure arg:name arg:jit_compile arguments arg arg arg arg arg arg Assign Assign Assign Call Assign Assign Assign BoolOp Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "show",
    "source_code": "def show(*args, **kwargs) -> None:\n    _warn_if_gui_out_of_main_thread()\n    return _get_backend_mod().show(*args, **kwargs)",
    "docstring": "Display all open figures. Parameters ---------- block : bool, optional Whether to wait for all figures to be closed before returning. If block and run the GUI main loop until all figure windows are closed. If ensure that all figure windows are displayed and return immediately. In this case, you are responsible for ensuring that the event loop is running to have responsive figures. Defaults to True in non-interactive mode and to False in interactive mode (see ). See Also -------- ion : Enable interactive mode, which shows / updates the figure after every plotting command, so that calling `.pyplot.savefig.pyplot.show.pyplot.savefig.Figure.savefig` at the end of every cell by default. Thus, you usually don't have to call it explicitly there.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:show arguments arg arg Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_backend",
    "source_code": "def get_backend(*, auto_select=True):\n    if auto_select:\n        return rcParams['backend']\n    else:\n        backend = rcParams._get('backend')\n        if backend is rcsetup._auto_backend_sentinel:\n            return None\n        else:\n            return backend",
    "docstring": "Return the name of the current backend. Parameters ---------- auto_select : bool, default: True Whether to trigger backend resolution if no backend has been selected so far. If True, this ensures that a valid backend is returned. If False, this returns None if no backend has been selected so far. .. versionadded:: 3.10 .. admonition:: Provisional The *auto_select* flag is provisional. It may be changed or removed without prior warning. See Also -------- matplotlib.use",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\__init__.py",
    "ast_data": "FunctionDef name:get_backend arguments arg If Return return:yes Assign Call If Compare Return return:no Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_row_partitions_for_uniform_shape",
    "source_code": "def _row_partitions_for_uniform_shape(shape, rank):\n    shape_cumprod = math_ops.cumprod(shape[:rank])\n    return tuple([RowPartition.from_uniform_row_length(uniform_row_length=shape[i + 1], nvals=shape_cumprod[i + 1], nrows=shape_cumprod[i]) for i in range(rank - 1)])",
    "docstring": "Returns row partitions for the given shape Tensor. Args: shape: A vector describing a uniform shape. rank: The number of dimensions to generate row partitions for Returns: A list of (rank-1) s with uniform row length.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor.py",
    "ast_data": "FunctionDef name:_row_partitions_for_uniform_shape arg:shape arg:rank arguments arg arg Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "ambiguous",
    "source_code": "def ambiguous(a, b):\n    return consistent(a, b) and (not (supercedes(a, b) or supercedes(b, a)))",
    "docstring": "A is consistent with B but neither is strictly more specific",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\unification\\multipledispatch\\conflict.py",
    "ast_data": "FunctionDef name:ambiguous arg:a arg:b arguments arg arg Return return:yes BoolOp Call BoolOp Call Call"
  },
  {
    "library": "pytorch",
    "name": "first_two_eq",
    "source_code": "@register_refinement_rule(torch.nn.AdaptiveAvgPool2d)\n@register_refinement_rule(torch.nn.MaxPool2d)\ndef first_two_eq(n: Node):\n    res = []\n    assert isinstance(n.args[0], Node)\n    arg_type = n.args[0].type\n    if isinstance(arg_type, TensorType) and isinstance(n.type, TensorType):\n        args1 = arg_type.__args__\n        args2 = n.type.__args__\n        res = [Equality(args1[0], args2[0]), Equality(args1[1], args2[1])]\n    return res",
    "docstring": "For operations where the first two dimensions of the input and output shape are equal",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\graph_gradual_typechecker.py",
    "ast_data": "FunctionDef name:first_two_eq arg:n arguments arg Assign Call Assign If BoolOp Call Call Assign Assign Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "__init__",
    "source_code": "def __init__(self, use_caching=False):\n    self.receivers = []\n    self.lock = threading.Lock()\n    self.use_caching = use_caching\n    self.sender_receivers_cache = weakref.WeakKeyDictionary() if use_caching else {}\n    self._dead_receivers = False",
    "docstring": "Create a new signal.",
    "type": "method",
    "file_path": "django\\django\\dispatch\\dispatcher.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:use_caching arguments arg arg Assign Assign Call Assign Assign Call Assign"
  },
  {
    "library": "pytorch",
    "name": "_get_ignored_params",
    "source_code": "def _get_ignored_params(root_module: torch.nn.Module, ignored_modules: set[torch.nn.Module], ignored_parameters: Optional[Iterable[torch.nn.Parameter]]=None) -> set[torch.nn.Parameter]:\n    all_ignored_params: set[torch.nn.Parameter] = set()\n    params_in_ignored_modules = {p for m in ignored_modules for p in m.parameters() if not _is_fsdp_flattened(p)}\n    all_ignored_params.update(params_in_ignored_modules)\n    if ignored_parameters is not None:\n        params_in_ignored_parameters = {p for p in ignored_parameters if not _is_fsdp_flattened(p)}\n        all_ignored_params.update(params_in_ignored_parameters)\n    for submodule in root_module.modules():\n        optional_fsdp_state = _get_module_fsdp_state(submodule)\n        if optional_fsdp_state is not None:\n            assert hasattr(optional_fsdp_state, '_ignored_params')\n            all_ignored_params.update(optional_fsdp_state._ignored_params)\n    return all_ignored_params",
    "docstring": "Return the parameters of the modules in `FlatParameter` s are excluded from the result.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_init_utils.py",
    "ast_data": "FunctionDef name:_get_ignored_params arg:root_module arg:ignored_modules arg:ignored_parameters arguments arg arg arg Call Assign Call Call Call If Compare Assign Call Call For Call Assign Call If Compare Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "instantiate",
    "source_code": "def instantiate(self, input_types):\n    key = _type_list_to_str(input_types)\n    defined = self._overload.get(key)\n    if not defined:\n        name = self._func_name\n        if name is not None:\n            name = '_'.join([name, key])\n        defined = _DefinedFunction(self._func, self._argnames, input_types, name, None, self._python_grad_func, out_names=self._out_names, **self._extra_kwargs)\n        _ = defined.name\n        if self._grad_func:\n            output_types = [dtypes.DType(_.type) for _ in defined._signature.output_arg]\n            defined._grad_func = self._grad_func.instantiate(input_types + output_types)\n        self._overload[key] = defined\n    return defined",
    "docstring": "Instantiate this function given input argument types. Args: input_types: A list of data types for the inputs. Returns: _DefinedFunction for the given input types.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\function.py",
    "ast_data": "FunctionDef name:instantiate arg:self arg:input_types arguments arg arg Assign Call Assign Call If Assign If Compare Assign Call Assign Call Assign If Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "authlib",
    "name": "InteractionRequiredError",
    "source_code": "class InteractionRequiredError(OAuth2Error):\n    error = 'interaction_required'",
    "docstring": "The Authorization Server requires End-User interaction of some form to proceed. This error MAY be returned when the prompt parameter value in the Authentication Request is none, but the Authentication Request cannot be completed without displaying a user interface for End-User interaction.",
    "type": "class",
    "file_path": "authlib\\authlib\\oidc\\core\\errors.py",
    "ast_data": "ClassDef name:InteractionRequiredError Assign"
  },
  {
    "library": "pandas",
    "name": "asfreq",
    "source_code": "@final\ndef asfreq(self, fill_value=None):\n    return self._upsample('asfreq', fill_value=fill_value)",
    "docstring": "Return the values at the new freq, essentially a reindex. Parameters ---------- fill_value : scalar, optional Value to use for missing values, applied during upsampling (note this does not fill NaNs that already were present). Returns ------- DataFrame or Series Values at the specified freq. See Also -------- Series.asfreq: Convert TimeSeries to specified frequency. DataFrame.asfreq: Convert TimeSeries to specified frequency. Examples -------- >>> ser = pd.Series( ... [1, 2, 3, 4], ... index=pd.DatetimeIndex( ... [\"2023-01-01\", \"2023-01-31\", \"2023-02-01\", \"2023-02-28\"] ... ), ... ) >>> ser 2023-01-01 1 2023-01-31 2 2023-02-01 3 2023-02-28 4 dtype: int64 >>> ser.resample(\"MS\").asfreq() 2023-01-01 1 2023-02-01 3 Freq: MS, dtype: int64",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\resample.py",
    "ast_data": "FunctionDef name:asfreq arg:self arg:fill_value arguments arg arg Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "check_config_namespaces",
    "source_code": "def check_config_namespaces(self):\n    for sn, app in cherrypy.tree.apps.items():\n        if not isinstance(app, cherrypy.Application):\n            continue\n        self._known_ns(app)",
    "docstring": "Process config and warn on each unknown config namespace.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpchecker.py",
    "ast_data": "FunctionDef name:check_config_namespaces arg:self arguments arg For Call If Call Call"
  },
  {
    "library": "matplotlib",
    "name": "new_figure_manager",
    "source_code": "@classmethod\ndef new_figure_manager(cls, num, *args, **kwargs):\n    from matplotlib.figure import Figure\n    fig_cls = kwargs.pop('FigureClass', Figure)\n    fig = fig_cls(*args, **kwargs)\n    return cls.new_figure_manager_given_figure(num, fig)",
    "docstring": "Create a new figure manager instance.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:new_figure_manager arg:cls arg:num arguments arg arg arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_dense_fit",
    "source_code": "def _dense_fit(self, X, random_state):\n    if self.ignore_implicit_zeros:\n        warnings.warn(\"'ignore_implicit_zeros' takes effect only with sparse matrix. This parameter has no effect.\")\n    n_samples, n_features = X.shape\n    references = self.references_ * 100\n    if self.subsample is not None and self.subsample < n_samples:\n        X = resample(X, replace=False, n_samples=self.subsample, random_state=random_state)\n    self.quantiles_ = np.nanpercentile(X, references, axis=0)\n    self.quantiles_ = np.maximum.accumulate(self.quantiles_)",
    "docstring": "Compute percentiles for dense matrices. Parameters ---------- X : ndarray of shape (n_samples, n_features) The data used to scale along the features axis.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py",
    "ast_data": "FunctionDef name:_dense_fit arg:self arg:X arg:random_state arguments arg arg arg If Call Assign Assign If BoolOp Compare Compare Assign Call Assign Call Assign Call"
  },
  {
    "library": "pandas",
    "name": "any_none",
    "source_code": "def any_none(*args) -> bool:\n    return any((arg is None for arg in args))",
    "docstring": "Returns a boolean indicating if any argument is None.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\common.py",
    "ast_data": "FunctionDef name:any_none arguments arg Return return:yes Call Compare"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, fig, pos, horizontal, vertical, aspect=None, anchor='C'):\n    self._fig = fig\n    self._pos = pos\n    self._horizontal = horizontal\n    self._vertical = vertical\n    self._anchor = anchor\n    self.set_anchor(anchor)\n    self._aspect = aspect\n    self._xrefindex = 0\n    self._yrefindex = 0\n    self._locator = None",
    "docstring": "Parameters ---------- fig : Figure pos : tuple of 4 floats Position of the rectangle that will be divided. horizontal : list of :mod: Sizes for horizontal division. vertical : list of :mod: Sizes for vertical division. aspect : bool, optional Whether overall rectangular area is reduced so that the relative part of the horizontal and vertical scales have the same scale. anchor : (float, float) or {'C', 'SW', 'S', 'SE', 'E', 'NE', 'N', 'NW', 'W'}, default: 'C' Placement of the reduced rectangle, when *aspect* is True.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_divider.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:fig arg:pos arg:horizontal arg:vertical arg:aspect arg:anchor arguments arg arg arg arg arg arg arg Assign Assign Assign Assign Assign Call Assign Assign Assign Assign"
  },
  {
    "library": "scipy",
    "name": "DropWave",
    "source_code": "class DropWave(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-5.12] * self.N, [5.12] * self.N))\n        self.global_optimum = [[0 for _ in range(self.N)]]\n        self.fglob = -1.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        norm_x = sum(x ** 2)\n        return -(1 + cos(12 * sqrt(norm_x))) / (0.5 * norm_x + 2)",
    "docstring": "DropWave objective function. This class defines the DropWave [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{DropWave}}(x) = - \\frac{1 + \\cos\\left(12 \\sqrt{\\sum_{i=1}^{n} x_i^{2}}\\right)}{2 + 0.5 \\sum_{i=1}^{n} x_i^{2}} with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_D.py",
    "ast_data": "ClassDef name:DropWave FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "__repr__",
    "source_code": "def __repr__(self):\n    return self.__class__.__name__ + repr_fmt % self._asdict()",
    "docstring": "Return a nicely formatted representation string",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_bunch.py",
    "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_maybe_get_mask",
    "source_code": "def _maybe_get_mask(values: np.ndarray, skipna: bool, mask: npt.NDArray[np.bool_] | None) -> npt.NDArray[np.bool_] | None:\n    if mask is None:\n        if values.dtype.kind in 'biu':\n            return None\n        if skipna or values.dtype.kind in 'mM':\n            mask = isna(values)\n    return mask",
    "docstring": "Compute a mask if and only if necessary. This function will compute a mask iff it is necessary. Otherwise, return the provided mask (potentially None) when a mask does not need to be computed. A mask is never necessary if the values array is of boolean or integer dtypes, as these are incapable of storing NaNs. If passing a NaN-capable dtype that is interpretable as either boolean or integer data (eg, timedelta64), a mask must be provided. If the skipna parameter is False, a new mask will not be computed. The mask is computed using isna() by default. Setting invert=True selects notna() as the masking function. Parameters ---------- values : ndarray input array to potentially compute mask for skipna : bool boolean for whether NaNs should be skipped mask : Optional[ndarray] nan-mask if known Returns ------- Optional[np.ndarray[bool]]",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\nanops.py",
    "ast_data": "FunctionDef name:_maybe_get_mask arg:values arg:skipna arg:mask arguments arg arg arg If Compare If Compare Return return:no If BoolOp Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_SpenceGrad",
    "source_code": "@ops.RegisterGradient('Spence')\ndef _SpenceGrad(op: ops.Operation, grad):\n    x = op.inputs[0]\n    with ops.control_dependencies([grad]):\n        partial_x = math_ops.log(x) / (1 - x)\n        partial_x = array_ops.where(math_ops.equal(x, 1.0), -array_ops.ones_like(x), partial_x)\n        return grad * partial_x",
    "docstring": "Compute gradient of spence(x) with respect to its argument.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_SpenceGrad arg:op arg:grad arguments arg arg Assign With Call Assign Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "trace",
    "source_code": "@tf_export('linalg.trace', v1=['linalg.trace', 'trace'])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints('trace')\ndef trace(x, name=None):\n    with ops.name_scope(name, 'Trace', [x]) as name:\n        x = ops.convert_to_tensor(x, name='x')\n        return reduce_sum(array_ops.matrix_diag_part(x), [-1], name=name)",
    "docstring": "Compute the trace of a tensor . returns the sum along the main diagonal of each inner-most matrix in x. If x is of rank with shape , then output is a tensor of rank with dimensions where For example: Args: x: tensor. name: A name for the operation (optional). Returns: The trace of input tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:trace arg:x arg:name arguments arg arg With Call Assign Call Return return:yes Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "count",
    "source_code": "@forbid_nonstring_types(['bytes'])\ndef count(self, pat, flags: int=0):\n    result = self._data.array._str_count(pat, flags)\n    return self._wrap_result(result, returns_string=False)",
    "docstring": "Count occurrences of pattern in each string of the Series/Index. This function is used to count the number of times a particular regex pattern is repeated in each of the string elements of the :class:. Parameters ---------- pat : str Valid regular expression. flags : int, default 0, meaning no flags Flags for the module. For a complete list, _. Returns ------- Series or Index Same type as the calling object containing the integer counts. See Also -------- re : Standard library module for regular expressions. str.count : Standard library version, without regular expression support. Notes ----- Some characters need to be escaped when passing in . eg. `` to find the literal dollar sign. >>> s = pd.Series([\"$\", \"B\", \"Aab$\", \"$$ca\", \"C$B$\", \"cat\"]) >>> s.str.count(\"\\\\$\") 0 1 1 0 2 1 3 2 4 2 5 0 dtype: int64 This is also available on Index >>> pd.Index([\"A\", \"A\", \"Aaba\", \"cat\"]).str.count(\"a\") Index([0, 0, 2, 1], dtype='int64')",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\strings\\accessor.py",
    "ast_data": "FunctionDef name:count arg:self arg:pat arg:flags arguments arg arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "transform_kernels",
    "source_code": "def transform_kernels(kernels, func, n_gates):\n    return np.hstack([func(k) for k in np.hsplit(kernels, n_gates)])",
    "docstring": "Transforms kernel for each gate separately using given function. Args: kernels: Stacked array of kernels for individual gates. func: Function applied to kernel of each gate. n_gates: Number of gates (4 for LSTM, 3 for GRU). Returns: Stacked array of transformed kernels.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\hdf5_format.py",
    "ast_data": "FunctionDef name:transform_kernels arg:kernels arg:func arg:n_gates arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "to_discrete",
    "source_code": "def to_discrete(self, dt, method='zoh', alpha=None):\n    return StateSpace(*cont2discrete((self.A, self.B, self.C, self.D), dt, method=method, alpha=alpha)[:-1], dt=dt)",
    "docstring": "Returns the discretized system. Parameters: See for details. Returns ------- sys: instance of and",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:to_discrete arg:self arg:dt arg:method arg:alpha arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "active_toggle",
    "source_code": "@property\ndef active_toggle(self):\n    return self._toggled",
    "docstring": "Currently toggled tools.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_managers.py",
    "ast_data": "FunctionDef name:active_toggle arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "mean_gamma_deviance",
    "source_code": "@validate_params({'y_true': ['array-like'], 'y_pred': ['array-like'], 'sample_weight': ['array-like', None]}, prefer_skip_nested_validation=True)\ndef mean_gamma_deviance(y_true, y_pred, *, sample_weight=None):\n    return mean_tweedie_deviance(y_true, y_pred, sample_weight=sample_weight, power=2)",
    "docstring": "Mean Gamma deviance regression loss. Gamma deviance is equivalent to the Tweedie deviance with the power parameter . It is invariant to scaling of the target variable, and measures relative errors. Read more in the :ref:. Parameters ---------- y_true : array-like of shape (n_samples,) Ground truth (correct) target values. Requires y_true > 0. y_pred : array-like of shape (n_samples,) Estimated target values. Requires y_pred > 0. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- loss : float A non-negative floating point value (the best value is 0.0). Examples -------- >>> from sklearn.metrics import mean_gamma_deviance >>> y_true = [2, 0.5, 1, 4] >>> y_pred = [0.5, 0.5, 2., 2.] >>> mean_gamma_deviance(y_true, y_pred) 1.0568...",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\metrics\\_regression.py",
    "ast_data": "FunctionDef name:mean_gamma_deviance arg:y_true arg:y_pred arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scrapy",
    "name": "referer_str",
    "source_code": "def referer_str(request: Request) -> str | None:\n    referrer = request.headers.get('Referer')\n    if referrer is None:\n        return referrer\n    return to_unicode(referrer, errors='replace')",
    "docstring": "Return Referer HTTP header suitable for logging.",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\request.py",
    "ast_data": "FunctionDef name:referer_str arg:request arguments arg Assign Call If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "HybridModel",
    "source_code": "class HybridModel(torch.nn.Module):\n\n    def __init__(self, emb_rref_list, device):\n        super().__init__()\n        self.emb_rref_list = emb_rref_list\n        fc1 = torch.nn.Linear(512, 256)\n        fc2 = torch.nn.Linear(256, 128)\n        relu = torch.nn.ReLU()\n        fc3 = torch.nn.Linear(128, 64)\n        fc4 = torch.nn.Linear(64, 32)\n        fc5 = torch.nn.Linear(32, 8)\n        sec = nn.Sequential(fc1, fc2, relu, fc3, fc4, fc5)\n        self.ddp = DDP(sec.to(device), device_ids=[device])\n        self.device = device\n\n    def forward(self, indices, offsets):\n        emb_lookups = []\n        for emb_rref in self.emb_rref_list:\n            emb_lookups.append(emb_rref.rpc_sync().forward(indices, offsets))\n            emb_lookups_cat = torch.cat(emb_lookups, dim=1)\n        assert NUM_PS * EMBEDDING_DIM >= 512\n        dim_normalizer = int(NUM_PS * EMBEDDING_DIM / 512)\n        emb_lookups_reshaped = emb_lookups_cat.reshape([emb_lookups_cat.shape[0] * dim_normalizer, 512])\n        return self.ddp(emb_lookups_reshaped)",
    "docstring": "The model consists of a sparse part and a dense part. The dense part is an nn.Linear module that is replicated across all trainers using DistributedDataParallel. The sparse part has nn.EmbeddingBags stored on multiple parameter servers. The model holds a Remote Reference to the embedding tables on the parameter servers.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\benchmarks\\benchmark_ddp_rpc.py",
    "ast_data": "ClassDef name:HybridModel FunctionDef name:__init__ arg:self arg:emb_rref_list arg:device arguments arg arg arg Call Call Assign Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Assign FunctionDef name:forward arg:self arg:indices arg:offsets arguments arg arg arg Assign For Call Call Call Assign Call Compare Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "FunctionVisitor",
    "source_code": "class FunctionVisitor(transformer.Base):\n\n    def __init__(self, source_info, graphs, resolver):\n        super(FunctionVisitor, self).__init__(source_info)\n        self.graphs = graphs\n        self.resolver = resolver\n\n    def visit_FunctionDef(self, node):\n        subgraph = self.graphs[node]\n        scope = anno.getanno(node, annos.NodeAnno.ARGS_AND_BODY_SCOPE)\n        closure_types = anno.getanno(node, anno.Static.CLOSURE_TYPES, {})\n        analyzer = Analyzer(subgraph, self.resolver, self.ctx.info.namespace, scope, closure_types)\n        analyzer.visit_forward()\n        node.body = self.visit_block(node.body)\n        return node",
    "docstring": "AST visitor that applies type inference to each function separately.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\static_analysis\\type_inference.py",
    "ast_data": "ClassDef name:FunctionVisitor FunctionDef name:__init__ arg:self arg:source_info arg:graphs arg:resolver arguments arg arg arg arg Call Call Assign Assign FunctionDef name:visit_FunctionDef arg:self arg:node arguments arg arg Assign Assign Call Assign Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_redirect_url",
    "source_code": "def get_redirect_url(self, *args, **kwargs):\n    if self.url:\n        url = self.url % kwargs\n    elif self.pattern_name:\n        url = reverse(self.pattern_name, args=args, kwargs=kwargs)\n    else:\n        return None\n    args = self.request.META.get('QUERY_STRING', '')\n    if args and self.query_string:\n        url = '%s?%s' % (url, args)\n    return url",
    "docstring": "Return the URL redirect to. Keyword arguments from the URL pattern match generating the redirect request are provided as kwargs to this method.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\base.py",
    "ast_data": "FunctionDef name:get_redirect_url arg:self arguments arg arg arg If Assign If Assign Call Return return:no Assign Call If BoolOp Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "get_masked_subclass",
    "source_code": "def get_masked_subclass(*arrays):\n    if len(arrays) == 1:\n        arr = arrays[0]\n        if isinstance(arr, MaskedArray):\n            rcls = type(arr)\n        else:\n            rcls = MaskedArray\n    else:\n        arrcls = [type(a) for a in arrays]\n        rcls = arrcls[0]\n        if not issubclass(rcls, MaskedArray):\n            rcls = MaskedArray\n        for cls in arrcls[1:]:\n            if issubclass(cls, rcls):\n                rcls = cls\n    if rcls.__name__ == 'MaskedConstant':\n        return MaskedArray\n    return rcls",
    "docstring": "Return the youngest subclass of MaskedArray from a list of (masked) arrays. In case of siblings, the first listed takes over.",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:get_masked_subclass arguments arg If Compare Call Assign If Call Assign Call Assign Assign Call Assign If Call Assign For If Call Assign If Compare Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_check_exclusionary_keywords",
    "source_code": "@staticmethod\ndef _check_exclusionary_keywords(colorizer, **kwargs):\n    if colorizer is not None:\n        if any([val is not None for val in kwargs.values()]):\n            raise ValueError('The `colorizer` keyword cannot be used simultaneously with any of the following keywords: ' + ', '.join((f'`{key}`' for key in kwargs.keys())))",
    "docstring": "Raises a ValueError if any kwarg is not None while colorizer is not None",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colorizer.py",
    "ast_data": "FunctionDef name:_check_exclusionary_keywords arg:colorizer arguments arg arg If Compare If Call Compare Call Raise Call Call Call"
  },
  {
    "library": "scipy",
    "name": "parse_attribute",
    "source_code": "@classmethod\ndef parse_attribute(cls, name, attr_string):\n    attr_string = attr_string.lower().strip()\n    if attr_string[:len('numeric')] == 'numeric' or attr_string[:len('int')] == 'int' or attr_string[:len('real')] == 'real':\n        return cls(name)\n    else:\n        return None",
    "docstring": "Parse the attribute line if it knows how. Returns the parsed attribute, or None. For numeric attributes, the attribute string would be like 'numeric' or 'int' or 'real'.",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\arff\\_arffread.py",
    "ast_data": "FunctionDef name:parse_attribute arg:cls arg:name arg:attr_string arguments arg arg arg Assign Call Call If BoolOp Compare Call Compare Call Compare Call Return return:yes Call Return return:no"
  },
  {
    "library": "numpy",
    "name": "check_gcc_variable_attribute",
    "source_code": "def check_gcc_variable_attribute(cmd, attribute):\n    cmd._check_compiler()\n    body = textwrap.dedent('\\n        #pragma GCC diagnostic error \"-Wattributes\"\\n        #pragma clang diagnostic error \"-Wattributes\"\\n\\n        int %s foo;\\n\\n        int\\n        main()\\n        {\\n            return 0;\\n        }\\n        ') % (attribute,)\n    return cmd.try_compile(body, None, None) != 0",
    "docstring": "Return True if the given variable attribute is supported.",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\command\\autodist.py",
    "ast_data": "FunctionDef name:check_gcc_variable_attribute arg:cmd arg:attribute arguments arg arg Call Assign Call Return return:yes Compare Call"
  },
  {
    "library": "cherrypy",
    "name": "index",
    "source_code": "@cherrypy.expose\ndef index(self):\n    return 'Hello, world!'",
    "docstring": "Render the index page HTML content.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\gctools.py",
    "ast_data": "FunctionDef name:index arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_FCLinearWrapper",
    "source_code": "class _FCLinearWrapper(base.Layer):\n\n    def __init__(self, feature_column, units=1, sparse_combiner='sum', weight_collections=None, trainable=True, name=None, **kwargs):\n        super(_FCLinearWrapper, self).__init__(trainable=trainable, name=name, **kwargs)\n        self._feature_column = feature_column\n        self._units = units\n        self._sparse_combiner = sparse_combiner\n        self._weight_collections = weight_collections\n\n    def build(self, _):\n        if isinstance(self._feature_column, _CategoricalColumn):\n            weight = self.add_variable(name='weights', shape=(self._feature_column._num_buckets, self._units), initializer=init_ops.zeros_initializer(), trainable=self.trainable)\n        else:\n            num_elements = self._feature_column._variable_shape.num_elements()\n            weight = self.add_variable(name='weights', shape=[num_elements, self._units], initializer=init_ops.zeros_initializer(), trainable=self.trainable)\n        _add_to_collections(weight, self._weight_collections)\n        self._weight_var = weight\n        self.built = True\n\n    def call(self, builder):\n        weighted_sum = _create_weighted_sum(column=self._feature_column, builder=builder, units=self._units, sparse_combiner=self._sparse_combiner, weight_collections=self._weight_collections, trainable=self.trainable, weight_var=self._weight_var)\n        return weighted_sum",
    "docstring": "Wraps a _FeatureColumn in a layer for use in a linear model. See above.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py",
    "ast_data": "ClassDef name:_FCLinearWrapper FunctionDef name:__init__ arg:self arg:feature_column arg:units arg:sparse_combiner arg:weight_collections arg:trainable arg:name arguments arg arg arg arg arg arg arg arg Call Call Assign Assign Assign Assign FunctionDef name:build arg:self arg:_ arguments arg arg If Call Assign Call Call Assign Call Assign Call Call Call Assign Assign FunctionDef name:call arg:self arg:builder arguments arg arg Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "extract",
    "source_code": "def extract(path, to_path):\n    with Archive(path) as archive:\n        archive.extract(to_path)",
    "docstring": "Unpack the tar or zip file at the specified path to the directory specified by to_path.",
    "type": "function",
    "file_path": "django\\django\\utils\\archive.py",
    "ast_data": "FunctionDef name:extract arg:path arg:to_path arguments arg arg With Call Call"
  },
  {
    "library": "pytorch",
    "name": "load_state_dict",
    "source_code": "def load_state_dict(self, state_dict):\n    self.optim.load_state_dict(state_dict)\n    if 'step' in state_dict:\n        self.averager.step = state_dict['step']\n    else:\n        warnings.warn('Loaded state dict does not contain a step counter for an averager. Setting step counter to 0.')\n        self.averager.step = 0",
    "docstring": "This is the same as :class: :meth:, but also restores model averager's step value to the one saved in the provided ``, it will raise a warning and initialize the model averager's step to 0.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\optim\\post_localSGD_optimizer.py",
    "ast_data": "FunctionDef name:load_state_dict arg:self arg:state_dict arguments arg arg Call If Compare Assign Call Assign"
  },
  {
    "library": "pandas",
    "name": "maybe_cast_pointwise_result",
    "source_code": "def maybe_cast_pointwise_result(result: ArrayLike, dtype: DtypeObj, numeric_only: bool=False, same_dtype: bool=True) -> ArrayLike:\n    if isinstance(dtype, ExtensionDtype):\n        cls = dtype.construct_array_type()\n        if same_dtype:\n            result = _maybe_cast_to_extension_array(cls, result, dtype=dtype)\n        else:\n            result = _maybe_cast_to_extension_array(cls, result)\n    elif numeric_only and dtype.kind in 'iufcb' or not numeric_only:\n        result = maybe_downcast_to_dtype(result, dtype)\n    return result",
    "docstring": "Try casting result of a pointwise operation back to the original dtype if appropriate. Parameters ---------- result : array-like Result to cast. dtype : np.dtype or ExtensionDtype Input Series from which result was calculated. numeric_only : bool, default False Whether to cast only numerics or datetimes as well. same_dtype : bool, default True Specify dtype when calling _from_sequence Returns ------- result : array-like result maybe casted to the dtype.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\cast.py",
    "ast_data": "FunctionDef name:maybe_cast_pointwise_result arg:result arg:dtype arg:numeric_only arg:same_dtype arguments arg arg arg arg If Call Assign Call If Assign Call Assign Call If BoolOp BoolOp Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "uses_star_kwargs_in_call",
    "source_code": "def uses_star_kwargs_in_call(node):\n    if sys.version_info[:2] >= (3, 5):\n        for keyword in node.keywords:\n            if keyword.arg is None:\n                return True\n    elif node.kwargs:\n        return True\n    return False",
    "docstring": "Check if an ast.Call node uses arbitrary-length **kwargs. This function works with the AST call node format of Python3.5+ as well as the different AST format of earlier versions of Python. Args: node: The ast.Call node to check arg values for. Returns: True if the node uses starred variadic positional args or keyword args. False if it does not.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\ast_edits.py",
    "ast_data": "FunctionDef name:uses_star_kwargs_in_call arg:node arguments arg If Compare For If Compare Return return:yes If Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_params",
    "source_code": "def set_params(self, minor=None, **kwargs):\n    if minor is not None:\n        self._minor = minor\n    super().set_params(**kwargs)",
    "docstring": "Set parameters within this locator.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:set_params arg:self arg:minor arguments arg arg arg If Compare Assign Call Call"
  },
  {
    "library": "scrapy",
    "name": "iter_default_settings",
    "source_code": "def iter_default_settings() -> Iterable[tuple[str, Any]]:\n    for name in dir(default_settings):\n        if name.isupper():\n            yield (name, getattr(default_settings, name))",
    "docstring": "Return the default settings as an iterator of (name, value) tuples",
    "type": "function",
    "file_path": "scrapy\\scrapy\\settings\\__init__.py",
    "ast_data": "FunctionDef name:iter_default_settings arguments For Call If Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_add_arg_names",
    "source_code": "def _maybe_add_arg_names(self, node, full_name):\n    function_reorders = self._api_change_spec.function_reorders\n    if full_name in function_reorders:\n        if uses_star_args_in_call(node):\n            self.add_log(WARNING, node.lineno, node.col_offset, '(Manual check required) upgrading %s may require re-ordering the call arguments, but it was passed variable-length positional *args. The upgrade script cannot handle these automatically.' % full_name)\n        reordered = function_reorders[full_name]\n        new_args = []\n        new_keywords = []\n        idx = 0\n        for arg in node.args:\n            if sys.version_info[:2] >= (3, 5) and isinstance(arg, ast.Starred):\n                continue\n            keyword_arg = reordered[idx]\n            if keyword_arg:\n                new_keywords.append(ast.keyword(arg=keyword_arg, value=arg))\n            else:\n                new_args.append(arg)\n            idx += 1\n        if new_keywords:\n            self.add_log(INFO, node.lineno, node.col_offset, 'Added keywords to args of function %r' % full_name)\n            node.args = new_args\n            node.keywords = new_keywords + (node.keywords or [])\n            return True\n    return False",
    "docstring": "Make args into keyword args if function called full_name requires it.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\ast_edits.py",
    "ast_data": "FunctionDef name:_maybe_add_arg_names arg:self arg:node arg:full_name arguments arg arg arg Assign If Compare If Call Call Assign Assign Assign Assign For If BoolOp Compare Call Assign If Call Call Call If Call Assign Assign BoolOp Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "delete_session_tensor",
    "source_code": "@tf_export(v1=['delete_session_tensor'])\ndef delete_session_tensor(handle, name=None):\n    handle_device = TensorHandle._get_device_name(handle)\n    with ops.device(handle_device):\n        holder = array_ops.placeholder(dtypes.string)\n        deleter = gen_data_flow_ops.delete_session_tensor(holder, name=name)\n    return (holder, deleter)",
    "docstring": "Delete the tensor for the given tensor handle. This is EXPERIMENTAL and subject to change. Delete the tensor of a given tensor handle. The tensor is produced in a previous run() and stored in the state of the session. Args: handle: The string representation of a persistent tensor handle. name: Optional name prefix for the return tensor. Returns: A pair of graph elements. The first is a placeholder for feeding a tensor handle and the second is a deletion operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\session_ops.py",
    "ast_data": "FunctionDef name:delete_session_tensor arg:handle arg:name arguments arg arg Assign Call With Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "logsf",
    "source_code": "def logsf(self, x, *args, **kwds):\n    args, loc, scale = self._parse_args(*args, **kwds)\n    x, loc, scale = map(asarray, (x, loc, scale))\n    args = tuple(map(asarray, args))\n    _a, _b = self._get_support(*args)\n    dtyp = np.promote_types(x.dtype, np.float64)\n    x = np.asarray((x - loc) / scale, dtype=dtyp)\n    cond0 = self._argcheck(*args) & (scale > 0)\n    cond1 = self._open_support_mask(x, *args) & (scale > 0)\n    cond2 = cond0 & (x <= _a)\n    cond = cond0 & cond1\n    output = empty(shape(cond), dtyp)\n    output.fill(-inf)\n    place(output, 1 - cond0 + np.isnan(x), self.badvalue)\n    place(output, cond2, 0.0)\n    if np.any(cond):\n        goodargs = argsreduce(cond, *(x,) + args)\n        place(output, cond, self._logsf(*goodargs))\n    if output.ndim == 0:\n        return output[()]\n    return output",
    "docstring": "Log of the survival function of the given RV. Returns the log of the \"survival function,\" defined as (1 - ), evaluated at . Parameters ---------- x : array_like quantiles arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- logsf : ndarray Log of the survival function evaluated at .",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:logsf arg:self arg:x arguments arg arg arg arg Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Assign Call Compare Assign Call Compare Assign Compare Assign Assign Call Call Call Call Call Call If Call Assign Call Call Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_gen_starting_points",
    "source_code": "def _gen_starting_points(shape):\n    ny, nx = shape\n    xfirst = 0\n    yfirst = 1\n    xlast = nx - 1\n    ylast = ny - 1\n    x, y = (0, 0)\n    direction = 'right'\n    for i in range(nx * ny):\n        yield (x, y)\n        if direction == 'right':\n            x += 1\n            if x >= xlast:\n                xlast -= 1\n                direction = 'up'\n        elif direction == 'up':\n            y += 1\n            if y >= ylast:\n                ylast -= 1\n                direction = 'left'\n        elif direction == 'left':\n            x -= 1\n            if x <= xfirst:\n                xfirst += 1\n                direction = 'down'\n        elif direction == 'down':\n            y -= 1\n            if y <= yfirst:\n                yfirst += 1\n                direction = 'right'",
    "docstring": "Yield starting points for streamlines. Trying points on the boundary first gives higher quality streamlines. This algorithm starts with a point on the mask corner and spirals inward. This algorithm is inefficient, but fast compared to rest of streamplot.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\streamplot.py",
    "ast_data": "FunctionDef name:_gen_starting_points arg:shape arguments arg Assign Assign Assign Assign Assign Assign Assign For Call If Compare If Compare Assign If Compare If Compare Assign If Compare If Compare Assign If Compare If Compare Assign"
  },
  {
    "library": "pytorch",
    "name": "get_offset",
    "source_code": "def get_offset(self) -> sympy.Expr:\n    return sympy_subs(self.index, dict.fromkeys(self.var_names, 0))",
    "docstring": "Return the offset by setting every variable to be 0.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\dependencies.py",
    "ast_data": "FunctionDef name:get_offset arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "validate_dtype_freq",
    "source_code": "def validate_dtype_freq(dtype, freq: BaseOffsetT | BaseOffset | timedelta | str | None) -> BaseOffsetT:\n    if freq is not None:\n        freq = to_offset(freq, is_period=True)\n    if dtype is not None:\n        dtype = pandas_dtype(dtype)\n        if not isinstance(dtype, PeriodDtype):\n            raise ValueError('dtype must be PeriodDtype')\n        if freq is None:\n            freq = dtype.freq\n        elif freq != dtype.freq:\n            raise IncompatibleFrequency('specified freq and dtype are different')\n    return freq",
    "docstring": "If both a dtype and a freq are available, ensure they match. If only dtype is available, extract the implied freq. Parameters ---------- dtype : dtype freq : DateOffset or None Returns ------- freq : DateOffset Raises ------ ValueError : non-period dtype IncompatibleFrequency : mismatch between dtype and freq",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\arrays\\period.py",
    "ast_data": "FunctionDef name:validate_dtype_freq arg:dtype arg:freq arguments arg arg If Compare Assign Call If Compare Assign Call If Call Raise Call If Compare Assign If Compare Raise Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "RegistrationNotSupportedError",
    "source_code": "class RegistrationNotSupportedError(OAuth2Error):\n    error = 'registration_not_supported'",
    "docstring": "The OP does not support use of the registration parameter.",
    "type": "class",
    "file_path": "authlib\\authlib\\oidc\\core\\errors.py",
    "ast_data": "ClassDef name:RegistrationNotSupportedError Assign"
  },
  {
    "library": "tensorflow",
    "name": "_add_unique_metric_name",
    "source_code": "def _add_unique_metric_name(self, metric_name, metric_fn, output_index):\n    if len(self.output_names) > 1:\n        if not getattr(metric_fn, '_from_serialized', False):\n            metric_name = '%s_%s' % (self.output_names[output_index], metric_name)\n    j = 1\n    base_metric_name = metric_name\n    while metric_name in self.metrics_names:\n        metric_name = '%s_%d' % (base_metric_name, j)\n        j += 1\n    return metric_name",
    "docstring": "Makes the metric name unique. If there are multiple outputs for which the metrics are calculated, the metric names have to be made unique by appending an integer. Args: metric_name: Metric name that corresponds to the metric specified by the user. For example: 'acc'. metric_fn: The Metric object. output_index: The index of the model output for which the metric name is being added. Returns: string, name of the model's unique metric name",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py",
    "ast_data": "FunctionDef name:_add_unique_metric_name arg:self arg:metric_name arg:metric_fn arg:output_index arguments arg arg arg arg If Compare Call If Call Assign Assign Assign While Compare Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "export_memory_timeline",
    "source_code": "def export_memory_timeline(self, path, device_str) -> None:\n    times, sizes = self._coalesce_timeline(device_str)\n    import json\n    with open(path, 'w') as f:\n        json.dump([times, sizes], f)",
    "docstring": "Saves the memory timeline as [times, sizes by category] as a JSON formatted file to the given path for the given device.",
    "type": "method",
    "file_path": "pytorch\\torch\\profiler\\_memory_profiler.py",
    "ast_data": "FunctionDef name:export_memory_timeline arg:self arg:path arg:device_str arguments arg arg arg Assign Call With Call Call"
  },
  {
    "library": "tensorflow",
    "name": "object_name",
    "source_code": "def object_name(self) -> str:\n    return None",
    "docstring": "Returns the local name of the object being restored. Override this method when the local name of object is different than in the checkpoint.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint_adapter.py",
    "ast_data": "FunctionDef name:object_name arg:self arguments arg Return return:no"
  },
  {
    "library": "scikit-learn",
    "name": "clear_data_home",
    "source_code": "@validate_params({'data_home': [str, os.PathLike, None]}, prefer_skip_nested_validation=True)\ndef clear_data_home(data_home=None):\n    data_home = get_data_home(data_home)\n    shutil.rmtree(data_home)",
    "docstring": "Delete all the content of the data home cache. Parameters ---------- data_home : str or path-like, default=None The path to scikit-learn data directory. If , the default path is . Examples -------- >>> from sklearn.datasets import clear_data_home >>> clear_data_home() # doctest: +SKIP",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\datasets\\_base.py",
    "ast_data": "FunctionDef name:clear_data_home arg:data_home arguments arg Assign Call Call Call"
  },
  {
    "library": "django",
    "name": "_get_repr_options",
    "source_code": "def _get_repr_options(self):\n    return {}",
    "docstring": "Return a dict of extra __init__() options to include in the repr.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\expressions.py",
    "ast_data": "FunctionDef name:_get_repr_options arg:self arguments arg Return return:no"
  },
  {
    "library": "scipy",
    "name": "Interpolate",
    "source_code": "class Interpolate(Benchmark):\n    param_names = ['n_samples', 'module']\n    params = [[10, 50, 100], ['numpy', 'scipy']]\n\n    def setup(self, n_samples, module):\n        self.x = np.arange(n_samples)\n        self.y = np.exp(-self.x / 3.0)\n        self.z = np.random.normal(size=self.x.shape)\n\n    def time_interpolate(self, n_samples, module):\n        if module == 'scipy':\n            interpolate.interp1d(self.x, self.y, kind='linear')\n        else:\n            np.interp(self.z, self.x, self.y)",
    "docstring": "Linear Interpolate in scipy and numpy",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\interpolate.py",
    "ast_data": "ClassDef name:Interpolate Assign Assign FunctionDef name:setup arg:self arg:n_samples arg:module arguments arg arg arg Assign Call Assign Call Assign Call FunctionDef name:time_interpolate arg:self arg:n_samples arg:module arguments arg arg arg If Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "step",
    "source_code": "@_use_grad_for_differentiable\ndef step(self, closure=None):\n    self._cuda_graph_capture_health_check()\n    loss = None\n    if closure is not None:\n        with torch.enable_grad():\n            loss = closure()\n    for group in self.param_groups:\n        params_with_grad: list[Tensor] = []\n        grads: list[Tensor] = []\n        mus: list[Tensor] = []\n        axs: list[Tensor] = []\n        etas: list[Tensor] = []\n        state_steps: list[Tensor] = []\n        has_complex = self._init_group(group, params_with_grad, grads, mus, axs, etas, state_steps)\n        asgd(params_with_grad, grads, axs, mus, etas, state_steps, lambd=group['lambd'], lr=group['lr'], t0=group['t0'], alpha=group['alpha'], weight_decay=group['weight_decay'], foreach=group['foreach'], maximize=group['maximize'], differentiable=group['differentiable'], capturable=group['capturable'], has_complex=has_complex)\n    return loss",
    "docstring": "Perform a single optimization step. Args: closure (Callable, optional): A closure that reevaluates the model and returns the loss.",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\asgd.py",
    "ast_data": "FunctionDef name:step arg:self arg:closure arguments arg arg Call Assign If Compare With Call Assign Call For Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "info_dict",
    "source_code": "def info_dict(self) -> dict[str, Any]:\n    return {'backend': 'subgraph', 'kernel_name': self.name}",
    "docstring": "Information returned here is logged to the autotune log file when that is enabled.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\subgraph.py",
    "ast_data": "FunctionDef name:info_dict arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_from_nested_row_partitions",
    "source_code": "@classmethod\ndef _from_nested_row_partitions(cls, flat_values, nested_row_partitions, name=None, validate=True):\n    if not isinstance(validate, bool):\n        raise TypeError(f'Argument `validate` must have type bool. Received {validate}.')\n    if isinstance(nested_row_partitions, RowPartition):\n        raise TypeError(f'Argument `nested_row_partitions` must be a list of RowPartitions. Received {nested_row_partitions}.')\n    if isinstance(nested_row_partitions, tensor_lib.Tensor):\n        raise TypeError(f'Argument `nested_row_partitions` must be a list of RowPartitions. Received {nested_row_partitions}.')\n    with ops.name_scope(name, 'RaggedFromNestedRowPartitions', [flat_values] + list(nested_row_partitions)):\n        result = flat_values\n        for partition in reversed(nested_row_partitions):\n            result = cls._from_row_partition(result, partition, validate=validate)\n        return result",
    "docstring": "Creates a from a nested list of row partitions. Equivalent to: Args: flat_values: A potentially ragged tensor. nested_row_partitions: A list of row partitions. The th element is used as the row partition for the th ragged dimension. name: A name prefix for the RaggedTensor (optional). validate: If true, then use assertions to check that the arguments form a valid . Note: these assertions incur a runtime cost, since they must be checked for each tensor value. Returns: A (or if is empty).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:_from_nested_row_partitions arg:cls arg:flat_values arg:nested_row_partitions arg:name arg:validate arguments arg arg arg arg arg If Call Raise Call If Call Raise Call If Call Raise Call With Call Call Assign For Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "BetaWithSoftplusConcentration",
    "source_code": "class BetaWithSoftplusConcentration(Beta):\n\n    @deprecation.deprecated('2019-01-01', 'Use `tfd.Beta(tf.nn.softplus(concentration1), tf.nn.softplus(concentration2))` instead.', warn_once=True)\n    def __init__(self, concentration1, concentration0, validate_args=False, allow_nan_stats=True, name='BetaWithSoftplusConcentration'):\n        parameters = dict(locals())\n        with ops.name_scope(name, values=[concentration1, concentration0]) as name:\n            super(BetaWithSoftplusConcentration, self).__init__(concentration1=nn.softplus(concentration1, name='softplus_concentration1'), concentration0=nn.softplus(concentration0, name='softplus_concentration0'), validate_args=validate_args, allow_nan_stats=allow_nan_stats, name=name)\n        self._parameters = parameters",
    "docstring": "Beta with softplus transform of and .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\beta.py",
    "ast_data": "ClassDef name:BetaWithSoftplusConcentration FunctionDef name:__init__ arg:self arg:concentration1 arg:concentration0 arg:validate_args arg:allow_nan_stats arg:name arguments arg arg arg arg arg arg Assign Call Call With Call Call Call Call Call Assign Call"
  },
  {
    "library": "kornia",
    "name": "tz",
    "source_code": "@tz.setter\ndef tz(self, value: Union[Tensor, float]) -> 'PinholeCamera':\n    self.extrinsics[..., 2, -1] = value\n    return self",
    "docstring": "Set the y-coordinate of the translation vector with the given value.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\camera\\pinhole.py",
    "ast_data": "FunctionDef name:tz arg:self arg:value arguments arg arg Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "score",
    "source_code": "def score(self, X, y=None):\n    pass",
    "docstring": "Return the score of the model on the data . Parameters ---------- X : array-like of shape (n_samples, n_features) Test samples. y : Ignored Not used, present for API consistency by convention. Returns ------- score : float",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\base.py",
    "ast_data": "FunctionDef name:score arg:self arg:X arg:y arguments arg arg arg"
  },
  {
    "library": "matplotlib",
    "name": "bone",
    "source_code": "def bone() -> None:\n    set_cmap('bone')",
    "docstring": "Set the colormap to 'bone'. This changes the default colormap as well as the colormap of the current image if there is one. See `` for more information.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:bone arguments Call"
  },
  {
    "library": "matplotlib",
    "name": "__call__",
    "source_code": "def __call__(self, transform_xy, x1, y1, x2, y2):\n    tbbox = self._find_transformed_bbox(_User2DTransform(transform_xy, None), Bbox.from_extents(x1, y1, x2, y2))\n    return (tbbox.x0, tbbox.x1, tbbox.y0, tbbox.y1)",
    "docstring": "Compute an approximation of the bounding box obtained by applying *transform_xy* to the box delimited by ``, the padding is computed by expanding the span covered by the extremal coordinates by these fractions.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\grid_finder.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:transform_xy arg:x1 arg:y1 arg:x2 arg:y2 arguments arg arg arg arg arg arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "def fit(self, X, y=None, **fit_params):\n    if _routing_enabled():\n        routed_params = process_routing(self, 'fit', **fit_params)\n    else:\n        routed_params = Bunch()\n        for name, _ in self.transformer_list:\n            routed_params[name] = Bunch(fit={})\n            routed_params[name].fit = fit_params\n    transformers = self._parallel_func(X, y, _fit_one, routed_params)\n    if not transformers:\n        return self\n    self._update_transformer_list(transformers)\n    return self",
    "docstring": "Fit all transformers using X. Parameters ---------- X : iterable or array-like, depending on transformers Input data, used to fit transformers. y : array-like of shape (n_samples, n_outputs), default=None Targets for supervised learning. **fit_params : dict, default=None - If (default): Parameters directly passed to the methods of the sub-transformers. - If : Parameters safely routed to the methods of the sub-transformers. See :ref: for more details. .. versionchanged:: 1.5 can be routed via metadata routing API. Returns ------- self : object FeatureUnion class instance.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\pipeline.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg arg If Call Assign Call Assign Call For Assign Call Assign Assign Call If Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__repr__",
    "source_code": "def __repr__(self):\n    list_of_reprs = [repr(item) for item in self]\n    if len(list_of_reprs) == 0:\n        return self._get_name() + '()'\n    start_end_indices = [[0, 0]]\n    repeated_blocks = [list_of_reprs[0]]\n    for i, r in enumerate(list_of_reprs[1:], 1):\n        if r == repeated_blocks[-1]:\n            start_end_indices[-1][1] += 1\n            continue\n        start_end_indices.append([i, i])\n        repeated_blocks.append(r)\n    lines = []\n    main_str = self._get_name() + '('\n    for (start_id, end_id), b in zip(start_end_indices, repeated_blocks):\n        local_repr = f'({start_id}): {b}'\n        if start_id != end_id:\n            n = end_id - start_id + 1\n            local_repr = f'({start_id}-{end_id}): {n} x {b}'\n        local_repr = _addindent(local_repr, 2)\n        lines.append(local_repr)\n    main_str += '\\n  ' + '\\n  '.join(lines) + '\\n'\n    main_str += ')'\n    return main_str",
    "docstring": "Return a custom repr for ModuleList that compresses repeated module representations.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\container.py",
    "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Assign Call If Compare Call Return return:yes Call Assign Assign For Call If Compare Call Call Assign Assign Call For Call Assign If Compare Assign Assign Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_dense_tensor_internal",
    "source_code": "def _get_dense_tensor_internal(self, transformation_cache, state_manager):\n    _check_invalid_cases(self._embedding_lookup_device)\n    is_cpu = self._embedding_lookup_device == EmbeddingDevice.CPU\n    is_cpu = is_cpu or _is_running_on_cpu()\n    if is_cpu:\n        return super(_TPUSharedDeviceSpecificEmbeddingColumnV2, self)._get_dense_tensor_internal(transformation_cache, state_manager)\n    if self._embedding_lookup_device == EmbeddingDevice.TPU_EMBEDDING_CORE:\n        return super(_TPUSharedDeviceSpecificEmbeddingColumnV2, self)._get_dense_tensor_internal(transformation_cache, state_manager)\n    if tpu.under_tpu_inference_context():\n        sparse_tensor = transformation_cache.get(self.categorical_column.name, state_manager)\n\n        def host_computation():\n            return pad_sparse_embedding_lookup_indices(sparse_tensor, self._tensor_core_shape[1])\n        values, mask = tpu_replication.outside_compilation(host_computation)\n    else:\n        values = transformation_cache.get(self.categorical_column.name, state_manager)\n        mask = transformation_cache.get(self.categorical_column.name + _TENSOR_CORE_MASK_KEY_SUFFIX, state_manager)\n    embedding_weights = self.shared_embedding_column_creator.embedding_weights\n    return sparse_embedding_aggregate_slice(embedding_weights, (values, mask), self.get_combiner())",
    "docstring": "Private method that follows _get_dense_tensor_internal.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\feature_column_v2.py",
    "ast_data": "FunctionDef name:_get_dense_tensor_internal arg:self arg:transformation_cache arg:state_manager arguments arg arg arg Call Assign Compare Assign BoolOp Call If Return return:yes Call Call If Compare Return return:yes Call Call If Call Assign Call FunctionDef name:host_computation arguments Return return:yes Call Assign Call Assign Call Assign Call Assign Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "RgbToRgb255",
    "source_code": "class RgbToRgb255(Module):\n\n    def forward(self, image: Tensor) -> Tensor:\n        return rgb_to_rgb255(image)",
    "docstring": "Convert an image from RGB to RGB [0, 255] for visualization purposes. Returns: RGB version of the image. Shape: - image: :math: - output: :math: Example: >>> input = torch.rand(2, 3, 4, 5) >>> rgb = RgbToRgb255() >>> output = rgb(input) # 2x3x4x5",
    "type": "class",
    "file_path": "kornia\\kornia\\color\\rgb.py",
    "ast_data": "ClassDef name:RgbToRgb255 FunctionDef name:forward arg:self arg:image arguments arg arg Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "PyVariable",
    "source_code": "class PyVariable(PyObject):\n    option_spec: ClassVar[OptionSpec] = PyObject.option_spec.copy()\n    option_spec.update({'type': directives.unchanged, 'value': directives.unchanged})\n\n    def handle_signature(self, sig: str, signode: desc_signature) -> tuple[str, str]:\n        fullname, prefix = super().handle_signature(sig, signode)\n        typ = self.options.get('type')\n        if typ:\n            annotations = _parse_annotation(typ, self.env)\n            signode += addnodes.desc_annotation(typ, '', addnodes.desc_sig_punctuation('', ':'), addnodes.desc_sig_space(), *annotations)\n        value = self.options.get('value')\n        if value:\n            signode += addnodes.desc_annotation(value, '', addnodes.desc_sig_space(), addnodes.desc_sig_punctuation('', '='), addnodes.desc_sig_space(), nodes.Text(value))\n        return (fullname, prefix)\n\n    def get_index_text(self, modname: str, name_cls: tuple[str, str]) -> str:\n        name, _cls = name_cls\n        if modname:\n            return _('%s (in module %s)') % (name, modname)\n        else:\n            return _('%s (built-in variable)') % name",
    "docstring": "Description of a variable.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\domains\\python\\__init__.py",
    "ast_data": "ClassDef name:PyVariable Call Call FunctionDef name:handle_signature arg:self arg:sig arg:signode arguments arg arg arg Assign Call Call Assign Call If Assign Call Call Call Call Assign Call If Call Call Call Call Call Return return:yes FunctionDef name:get_index_text arg:self arg:modname arg:name_cls arguments arg arg arg Assign If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "can_migrate",
    "source_code": "def can_migrate(self, connection):\n    if self.proxy or self.swapped or (not self.managed):\n        return False\n    if isinstance(connection, str):\n        connection = connections[connection]\n    if self.required_db_vendor:\n        return self.required_db_vendor == connection.vendor\n    if self.required_db_features:\n        return all((getattr(connection.features, feat, False) for feat in self.required_db_features))\n    return True",
    "docstring": "Return True if the model can/should be migrated on the . can be either a real connection or a connection alias.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\options.py",
    "ast_data": "FunctionDef name:can_migrate arg:self arg:connection arguments arg arg If BoolOp Return return:yes If Call Assign If Return return:yes Compare If Return return:yes Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "graph_parents",
    "source_code": "@property\ndef graph_parents(self):\n    return self._graph_parents",
    "docstring": "Returns this 's graph_parents as a Python list.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bijector_impl.py",
    "ast_data": "FunctionDef name:graph_parents arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "make_simplify_with_ranges_cache",
    "source_code": "def make_simplify_with_ranges_cache(self) -> Callable[[Expr, VarRanges], Expr]:\n    cache: dict[tuple[Any, ...], Expr] = {}\n    replacement_count = len(self.replacements)\n\n    def simplify_with_ranges(expr: Expr, var_ranges: VarRanges) -> Expr:\n        nonlocal replacement_count\n        if replacement_count != len(self.replacements):\n            cache.clear()\n            replacement_count = len(self.replacements)\n        key = (expr, *var_ranges.items())\n        result = cache.get(key, None)\n        if result is None:\n            result = self._simplify_with_ranges(expr, var_ranges)\n            cache[key] = result\n            if result != expr:\n                cache[result, *var_ranges.items()] = result\n        return result\n    return simplify_with_ranges",
    "docstring": "self._simplify_with_ranges() can be expensive, cache its results",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\sizevars.py",
    "ast_data": "FunctionDef name:make_simplify_with_ranges_cache arg:self arguments arg Assign Call FunctionDef name:simplify_with_ranges arg:expr arg:var_ranges arguments arg arg If Compare Call Call Assign Call Assign Call Assign Call If Compare Assign Call Assign If Compare Assign Call Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_gid",
    "source_code": "def set_gid(self, gid):\n    self._gid = gid",
    "docstring": "Set the (group) id for the artist. Parameters ---------- gid : str",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:set_gid arg:self arg:gid arguments arg arg Assign"
  },
  {
    "library": "pytorch",
    "name": "is_available",
    "source_code": "@_lru_cache\ndef is_available() -> bool:\n    return _opt_einsum is not None",
    "docstring": "Return a bool indicating if opt_einsum is currently available. You must install opt-einsum in order for torch to automatically optimize einsum. To make opt-einsum available, you can install it along with torch: ``. If the package is installed, torch will import it automatically and use it accordingly. Use this function to check whether opt-einsum was installed and properly imported by torch.",
    "type": "function",
    "file_path": "pytorch\\torch\\backends\\opt_einsum\\__init__.py",
    "ast_data": "FunctionDef name:is_available arguments Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "CloudTPUPreemptedHook",
    "source_code": "class CloudTPUPreemptedHook(session_run_hook.SessionRunHook):\n\n    def __init__(self, cluster):\n        self._cluster = cluster\n\n    def after_create_session(self, session, coord):\n        if tpu_cluster_resolver.is_running_in_gce():\n            self._tpu_poller = _TPUPollingThread(self._cluster, session)\n            self._tpu_poller.start()\n\n    def end(self, session):\n        self._tpu_poller.stop()",
    "docstring": "The SessionRunHook for preemptible Cloud TPUs. This is an implementation of SessionRunHook for the pre-emptible Google Cloud TPU service. It attempts to close the session if the TPU is preempted, and exits the coordinator process if the session cannot be closed.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\preempted_hook.py",
    "ast_data": "ClassDef name:CloudTPUPreemptedHook FunctionDef name:__init__ arg:self arg:cluster arguments arg arg Assign FunctionDef name:after_create_session arg:self arg:session arg:coord arguments arg arg arg If Call Assign Call Call FunctionDef name:end arg:self arg:session arguments arg arg Call"
  },
  {
    "library": "authlib",
    "name": "JWEAlgorithmBase",
    "source_code": "class JWEAlgorithmBase(metaclass=ABCMeta):\n    EXTRA_HEADERS = None\n    name = None\n    description = None\n    algorithm_type = 'JWE'\n    algorithm_location = 'alg'\n\n    def prepare_key(self, raw_data):\n        raise NotImplementedError\n\n    def generate_preset(self, enc_alg, key):\n        raise NotImplementedError",
    "docstring": "Base interface for all JWE algorithms.",
    "type": "class",
    "file_path": "authlib\\authlib\\jose\\rfc7516\\models.py",
    "ast_data": "ClassDef name:JWEAlgorithmBase Assign Assign Assign Assign Assign FunctionDef name:prepare_key arg:self arg:raw_data arguments arg arg Raise FunctionDef name:generate_preset arg:self arg:enc_alg arg:key arguments arg arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "TFLiteConverterMetrics",
    "source_code": "class TFLiteConverterMetrics(TFLiteMetrics):\n\n    def __del__(self):\n        pass\n\n    def set_export_required(self):\n        pass\n\n    def export_metrics(self):\n        pass",
    "docstring": "Similar to TFLiteMetrics but specialized for converter.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\metrics\\metrics_portable.py",
    "ast_data": "ClassDef name:TFLiteConverterMetrics FunctionDef name:__del__ arg:self arguments arg FunctionDef name:set_export_required arg:self arguments arg FunctionDef name:export_metrics arg:self arguments arg"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X):\n    check_is_fitted(self)\n    X = validate_data(self, X, reset=False)\n    return self._estimate_weighted_log_prob(X).argmax(axis=1)",
    "docstring": "Predict the labels for the data samples in X using trained model. Parameters ---------- X : array-like of shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- labels : array, shape (n_samples,) Component labels.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\mixture\\_base.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "get_table_list",
    "source_code": "def get_table_list(self, cursor):\n    cursor.execute(\"\\n            SELECT\\n                user_tables.table_name,\\n                't',\\n                user_tab_comments.comments\\n            FROM user_tables\\n            LEFT OUTER JOIN\\n                user_tab_comments\\n                ON user_tab_comments.table_name = user_tables.table_name\\n            WHERE\\n                NOT EXISTS (\\n                    SELECT 1\\n                    FROM user_mviews\\n                    WHERE user_mviews.mview_name = user_tables.table_name\\n                )\\n            UNION ALL\\n            SELECT view_name, 'v', NULL FROM user_views\\n            UNION ALL\\n            SELECT mview_name, 'v', NULL FROM user_mviews\\n        \")\n    return [TableInfo(self.identifier_converter(row[0]), row[1], row[2]) for row in cursor.fetchall()]",
    "docstring": "Return a list of table and view names in the current database.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\oracle\\introspection.py",
    "ast_data": "FunctionDef name:get_table_list arg:self arg:cursor arguments arg arg Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "MemRecordsAcc",
    "source_code": "class MemRecordsAcc:\n\n    def __init__(self, mem_records):\n        self._mem_records = mem_records\n        self._start_nses: list[int] = []\n        self._indices: list[int] = []\n        if len(mem_records) > 0:\n            tmp = sorted([(r[0].start_ns(), i) for i, r in enumerate(mem_records)])\n            self._start_nses, self._indices = zip(*tmp)\n\n    def in_interval(self, start_us, end_us):\n        start_idx = bisect.bisect_left(self._start_nses, start_us * 1000)\n        end_idx = bisect.bisect_right(self._start_nses, end_us * 1000)\n        for i in range(start_idx, end_idx):\n            yield self._mem_records[self._indices[i]]",
    "docstring": "Acceleration structure for accessing mem_records in interval.",
    "type": "class",
    "file_path": "pytorch\\torch\\autograd\\profiler_util.py",
    "ast_data": "ClassDef name:MemRecordsAcc FunctionDef name:__init__ arg:self arg:mem_records arguments arg arg Assign If Compare Call Assign Call Call Call Assign Call FunctionDef name:in_interval arg:self arg:start_us arg:end_us arguments arg arg arg Assign Call Assign Call For Call"
  },
  {
    "library": "tensorflow",
    "name": "_CategoricalColumn",
    "source_code": "class _CategoricalColumn(_FeatureColumn):\n    IdWeightPair = collections.namedtuple('IdWeightPair', ['id_tensor', 'weight_tensor'])\n\n    @abc.abstractproperty\n    def _num_buckets(self):\n        pass\n\n    @abc.abstractmethod\n    def _get_sparse_tensors(self, inputs, weight_collections=None, trainable=None):\n        pass",
    "docstring": "Represents a categorical feature. WARNING: Do not subclass this layer unless you know what you are doing: the API is subject to future changes. A categorical feature typically handled with a of IDs.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py",
    "ast_data": "ClassDef name:_CategoricalColumn Assign Call FunctionDef name:_num_buckets arg:self arguments arg FunctionDef name:_get_sparse_tensors arg:self arg:inputs arg:weight_collections arg:trainable arguments arg arg arg arg"
  },
  {
    "library": "scrapy",
    "name": "is_unverifiable",
    "source_code": "def is_unverifiable(self) -> bool:\n    return cast(bool, self.request.meta.get('is_unverifiable', False))",
    "docstring": "Unverifiable should indicate whether the request is unverifiable, as defined by RFC 2965. It defaults to False. An unverifiable request is one whose URL the user did not have the option to approve. For example, if the request is for an image in an HTML document, and the user had no option to approve the automatic fetching of the image, this should be true.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\http\\cookies.py",
    "ast_data": "FunctionDef name:is_unverifiable arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_is_filepath",
    "source_code": "def _is_filepath(output_stream):\n    return isinstance(output_stream, str) and output_stream.startswith('file://')",
    "docstring": "Returns True if output_stream is a file path.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\logging_ops.py",
    "ast_data": "FunctionDef name:_is_filepath arg:output_stream arguments arg Return return:yes BoolOp Call Call"
  },
  {
    "library": "pytorch",
    "name": "gather",
    "source_code": "def gather(tensor, dst=0, group=group.WORLD):\n    return _Gather.apply(dst, group, tensor)",
    "docstring": "Gathers a list of tensors in a single process. Arguments: tensor (Tensor): Input tensor. dst (int, optional): Destination rank (default is 0). group (ProcessGroup, optional): The process group to work on. Returns: tuple[Tensor]: List of appropriately-sized tensors with the gathered data.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\nn\\functional.py",
    "ast_data": "FunctionDef name:gather arg:tensor arg:dst arg:group arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_register_pre_backward_hooks",
    "source_code": "@no_type_check\ndef _register_pre_backward_hooks(state: _FSDPState, module: nn.Module, outputs: Any, handle: FlatParamHandle) -> None:\n    if not torch.is_grad_enabled():\n        return outputs\n    if state._is_root:\n        state._post_backward_callback_queued = False\n    if handle:\n        handle._needs_pre_backward_unshard = False\n        handle._ran_pre_backward_hook = False\n\n    def _register_hook(t: torch.Tensor) -> torch.Tensor:\n        if t.requires_grad:\n            t.register_hook(torch.utils.hooks.unserializable_hook(functools.partial(_pre_backward_hook, state, module, handle)))\n            if handle:\n                handle._needs_pre_backward_unshard = True\n        return t\n    return _apply_to_tensors(_register_hook, outputs)",
    "docstring": "Registers pre-backward hooks on the tensors that require gradients in the forward pass outputs ``. Args: module (nn.Module): Fully sharded module (see [Note: Fully Sharded Module]). Returns: Forward pass outputs with pre-backward hooks registered to tensors that require gradients.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_runtime_utils.py",
    "ast_data": "FunctionDef name:_register_pre_backward_hooks arg:state arg:module arg:outputs arg:handle arguments arg arg arg arg If Call Return return:yes If Assign If Assign Assign FunctionDef name:_register_hook arg:t arguments arg If Call Call Call If Assign Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "node_name",
    "source_code": "@property\ndef node_name(self):\n    return self._node_name",
    "docstring": "Name of the node from which the tensor value was dumped. Returns: () name of the node watched by the debug op.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:node_name arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_default_qat_module_mappings",
    "source_code": "def get_default_qat_module_mappings() -> dict[Callable, Any]:\n    return copy.deepcopy(DEFAULT_QAT_MODULE_MAPPINGS)",
    "docstring": "Get default module mapping for quantization aware training",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantization_mappings.py",
    "ast_data": "FunctionDef name:get_default_qat_module_mappings arguments Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_is_metadata_of",
    "source_code": "def _is_metadata_of(group: Node, parent_group: Node) -> bool:\n    if group._v_depth <= parent_group._v_depth:\n        return False\n    current = group\n    while current._v_depth > 1:\n        parent = current._v_parent\n        if parent == parent_group and current._v_name == 'meta':\n            return True\n        current = current._v_parent\n    return False",
    "docstring": "Check if a given group is a metadata group for a given parent_group.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:_is_metadata_of arg:group arg:parent_group arguments arg arg If Compare Return return:yes Assign While Compare Assign If BoolOp Compare Compare Return return:yes Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_make_getset_interval",
    "source_code": "def _make_getset_interval(method_name, lim_name, attr_name):\n\n    def getter(self):\n        return getattr(getattr(self.axes, lim_name), attr_name)\n\n    def setter(self, vmin, vmax, ignore=False):\n        if ignore:\n            setattr(getattr(self.axes, lim_name), attr_name, (vmin, vmax))\n        else:\n            oldmin, oldmax = getter(self)\n            if oldmin < oldmax:\n                setter(self, min(vmin, vmax, oldmin), max(vmin, vmax, oldmax), ignore=True)\n            else:\n                setter(self, max(vmin, vmax, oldmin), min(vmin, vmax, oldmax), ignore=True)\n        self.stale = True\n    getter.__name__ = f'get_{method_name}_interval'\n    setter.__name__ = f'set_{method_name}_interval'\n    return (getter, setter)",
    "docstring": "Helper to generate `` implementations.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:_make_getset_interval arg:method_name arg:lim_name arg:attr_name arguments arg arg arg FunctionDef name:getter arg:self arguments arg Return return:yes Call Call FunctionDef name:setter arg:self arg:vmin arg:vmax arg:ignore arguments arg arg arg arg If Call Call Assign Call If Compare Call Call Call Call Call Call Assign Assign Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, raw_documents):\n    check_is_fitted(self, msg='The TF-IDF vectorizer is not fitted')\n    X = super().transform(raw_documents)\n    return self._tfidf.transform(X, copy=False)",
    "docstring": "Transform documents to document-term matrix. Uses the vocabulary and document frequencies (df) learned by fit (or fit_transform). Parameters ---------- raw_documents : iterable An iterable which generates either str, unicode or file objects. Returns ------- X : sparse matrix of (n_samples, n_features) Tf-idf-weighted document-term matrix.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_extraction\\text.py",
    "ast_data": "FunctionDef name:transform arg:self arg:raw_documents arguments arg arg Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "result_nodes",
    "source_code": "def result_nodes(self, document: nodes.document, env: BuildEnvironment, node: Element, is_ref: bool) -> tuple[list[Node], list[system_message]]:\n    return ([node], [])",
    "docstring": "Called before returning the finished nodes. *node* is the reference node if one was created (*is_ref* is then true), else the content node. This method can add other nodes and must return a `` tuple (the usual return value of a role function).",
    "type": "method",
    "file_path": "sphinx\\sphinx\\roles.py",
    "ast_data": "FunctionDef name:result_nodes arg:self arg:document arg:env arg:node arg:is_ref arguments arg arg arg arg arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "LagrangianHessian",
    "source_code": "class LagrangianHessian:\n\n    def __init__(self, n, objective_hess, constraints_hess):\n        self.n = n\n        self.objective_hess = objective_hess\n        self.constraints_hess = constraints_hess\n\n    def __call__(self, x, v_eq, v_ineq=None):\n        if v_ineq is None:\n            v_ineq = np.empty(0)\n        H_objective = self.objective_hess(x)\n        H_constraints = self.constraints_hess(x, v_eq, v_ineq)\n\n        def matvec(p):\n            return H_objective.dot(p) + H_constraints.dot(p)\n        return LinearOperator((self.n, self.n), matvec)",
    "docstring": "The Hessian of the Lagrangian as LinearOperator. The Lagrangian is computed as the objective function plus all the constraints multiplied with some numbers (Lagrange multipliers).",
    "type": "class",
    "file_path": "scipy\\scipy\\optimize\\_trustregion_constr\\minimize_trustregion_constr.py",
    "ast_data": "ClassDef name:LagrangianHessian FunctionDef name:__init__ arg:self arg:n arg:objective_hess arg:constraints_hess arguments arg arg arg arg Assign Assign Assign FunctionDef name:__call__ arg:self arg:x arg:v_eq arg:v_ineq arguments arg arg arg arg If Compare Assign Call Assign Call Assign Call FunctionDef name:matvec arg:p arguments arg Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "summer",
    "source_code": "def summer() -> None:\n    set_cmap('summer')",
    "docstring": "Set the colormap to 'summer'. This changes the default colormap as well as the colormap of the current image if there is one. See `` for more information.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:summer arguments Call"
  },
  {
    "library": "cryptography",
    "name": "sign",
    "source_code": "def sign(self, private_key: CertificateIssuerPrivateKeyTypes, algorithm: _AllowedHashTypes | None, backend: typing.Any=None, *, rsa_padding: padding.PSS | padding.PKCS1v15 | None=None, ecdsa_deterministic: bool | None=None) -> CertificateSigningRequest:\n    if self._subject_name is None:\n        raise ValueError('A CertificateSigningRequest must have a subject')\n    if rsa_padding is not None:\n        if not isinstance(rsa_padding, (padding.PSS, padding.PKCS1v15)):\n            raise TypeError('Padding must be PSS or PKCS1v15')\n        if not isinstance(private_key, rsa.RSAPrivateKey):\n            raise TypeError('Padding is only supported for RSA keys')\n    if ecdsa_deterministic is not None:\n        if not isinstance(private_key, ec.EllipticCurvePrivateKey):\n            raise TypeError('Deterministic ECDSA is only supported for EC keys')\n    return rust_x509.create_x509_csr(self, private_key, algorithm, rsa_padding, ecdsa_deterministic)",
    "docstring": "Signs the request using the requestor's private key.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\x509\\base.py",
    "ast_data": "FunctionDef name:sign arg:self arg:private_key arg:algorithm arg:backend arguments arg arg arg arg arg arg If Compare Raise Call If Compare If Call Raise Call If Call Raise Call If Compare If Call Raise Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "install",
    "source_code": "def install(*, venv: Venv, packages: Iterable[str], subcommand: str='checkout', branch: str | None=None, logger: logging.Logger) -> None:\n    use_existing = subcommand == 'checkout'\n    if use_existing:\n        venv.ensure()\n    else:\n        venv.create(remove_if_exists=True)\n    packages = [p for p in packages if p != 'torch']\n    dependencies = venv.pip_download('torch', prerelease=True)\n    torch_wheel = [dep for dep in dependencies if dep.name.startswith('torch-') and dep.name.endswith('.whl')]\n    if len(torch_wheel) != 1:\n        raise RuntimeError(f'Expected exactly one torch wheel, got {torch_wheel}')\n    torch_wheel = torch_wheel[0]\n    dependencies = [deps for deps in dependencies if deps != torch_wheel]\n    install_packages(venv, [*packages, *map(str, dependencies)])\n    with venv.extracted_wheel(torch_wheel) as wheel_site_dir:\n        if subcommand == 'checkout':\n            checkout_nightly_version(cast(str, branch), wheel_site_dir)\n        elif subcommand == 'pull':\n            pull_nightly_version(wheel_site_dir)\n        else:\n            raise ValueError(f'Subcommand {subcommand} must be one of: checkout, pull.')\n        move_nightly_files(wheel_site_dir)\n    write_pth(venv)\n    logger.info('-------\\nPyTorch Development Environment set up!\\nPlease activate to enable this environment:\\n\\n  $ %s', venv.activate_command)",
    "docstring": "Development install of PyTorch",
    "type": "function",
    "file_path": "pytorch\\tools\\nightly.py",
    "ast_data": "FunctionDef name:install arguments arg arg arg arg arg Assign Compare If Call Call Assign Compare Assign Call Assign BoolOp Call Call If Compare Call Raise Call Assign Assign Compare Call Call With Call If Compare Call Call If Compare Call Raise Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_op_control_flow_context",
    "source_code": "def _get_op_control_flow_context(self, op):\n    op_control_flow_context = op._control_flow_context\n    if control_flow_util.IsLoopExit(op):\n        op_control_flow_context = op_control_flow_context.outer_context\n    return op_control_flow_context",
    "docstring": "Returns the control flow of the given op. Args: op: tf.Operation for which the control flow context is requested. Returns: op_control_flow_context: which the is control flow context of the given op. If the operation type is LoopExit, returns the outer control flow context.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:_get_op_control_flow_context arg:self arg:op arguments arg arg Assign If Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_slice_dim_and_num_slices",
    "source_code": "def _get_slice_dim_and_num_slices(slicing):\n    for slice_dim, num_slices in enumerate(slicing):\n        if num_slices > 1:\n            break\n    else:\n        slice_dim = 0\n        num_slices = 1\n    return (slice_dim, num_slices)",
    "docstring": "Get slicing dimension and number of slices from the partitioner output.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variable_scope.py",
    "ast_data": "FunctionDef name:_get_slice_dim_and_num_slices arg:slicing arguments arg For Call If Compare Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "gen_register_op",
    "source_code": "def gen_register_op(source, method_prefix=None):\n    mlir_funcs = [op_reg_gen(func) for name, func in tf_inspect.getmembers(source, tf_inspect.isfunction) if not method_prefix or name.startswith(method_prefix)]\n    headers = '\\n#include \"tensorflow/core/framework/op.h\"\\n\\nnamespace tensorflow {\\n  '\n    code = '\\n'.join(mlir_funcs)\n    return headers + code + '}  // namespace tensorflow\\n'",
    "docstring": "Parse a python code and emit the TFR functions from a target class.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\tfr\\python\\op_reg_gen.py",
    "ast_data": "FunctionDef name:gen_register_op arg:source arg:method_prefix arguments arg arg Assign Call Call BoolOp Call Assign Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "idctn",
    "source_code": "def idctn(x, type=2, shape=None, axes=None, norm=None, overwrite_x=False):\n    type = _inverse_typemap[type]\n    shape = _good_shape(x, shape, axes)\n    return _pocketfft.dctn(x, type, shape, axes, norm, overwrite_x)",
    "docstring": "Return multidimensional Discrete Cosine Transform along the specified axes. Parameters ---------- x : array_like The input array. type : {1, 2, 3, 4}, optional Type of the DCT (see Notes). Default type is 2. shape : int or array_like of ints or None, optional The shape of the result. If both and (see below) are None, is `shapeaxesshape`shape[i] >> import numpy as np >>> from scipy.fftpack import dctn, idctn >>> rng = np.random.default_rng() >>> y = rng.standard_normal((16, 16)) >>> np.allclose(y, idctn(dctn(y, norm='ortho'), norm='ortho')) True",
    "type": "function",
    "file_path": "scipy\\scipy\\fftpack\\_realtransforms.py",
    "ast_data": "FunctionDef name:idctn arg:x arg:type arg:shape arg:axes arg:norm arg:overwrite_x arguments arg arg arg arg arg arg Assign Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "axisinfo",
    "source_code": "def axisinfo(self, unit, axis):\n    tz = unit\n    majloc = AutoDateLocator(tz=tz, interval_multiples=self._interval_multiples)\n    majfmt = AutoDateFormatter(majloc, tz=tz)\n    datemin = datetime.date(1970, 1, 1)\n    datemax = datetime.date(1970, 1, 2)\n    return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='', default_limits=(datemin, datemax))",
    "docstring": "Return the for *unit*. *unit* is a instance or None. The *axis* argument is required but not used.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\dates.py",
    "ast_data": "FunctionDef name:axisinfo arg:self arg:unit arg:axis arguments arg arg arg Assign Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_streaming_confusion_matrix",
    "source_code": "def _streaming_confusion_matrix(labels, predictions, num_classes, weights=None):\n    total_cm = metric_variable([num_classes, num_classes], dtypes.float64, name='total_confusion_matrix')\n    predictions = math_ops.cast(predictions, dtypes.int64)\n    labels = math_ops.cast(labels, dtypes.int64)\n    num_classes = math_ops.cast(num_classes, dtypes.int64)\n    if predictions.get_shape().ndims > 1:\n        predictions = array_ops.reshape(predictions, [-1])\n    if labels.get_shape().ndims > 1:\n        labels = array_ops.reshape(labels, [-1])\n    if weights is not None and weights.get_shape().ndims > 1:\n        weights = array_ops.reshape(weights, [-1])\n    current_cm = confusion_matrix.confusion_matrix(labels, predictions, num_classes, weights=weights, dtype=dtypes.float64)\n    update_op = state_ops.assign_add(total_cm, current_cm)\n    return (total_cm, update_op)",
    "docstring": "Calculate a streaming confusion matrix. Calculates a confusion matrix. For estimation over a stream of data, the function creates an operation. Args: labels: A of ground truth labels with shape [batch size] and of type or . The tensor will be flattened if its rank > 1. predictions: A of prediction results for semantic labels, whose shape is [batch size] and type or . The tensor will be flattened if its rank > 1. num_classes: The possible number of labels the prediction task can have. This value must be provided, since a confusion matrix of dimension = [num_classes, num_classes] will be allocated. weights: Optional whose rank is either 0, or the same rank as , and must be broadcastable to (i.e., all dimensions must be either , or the same as the corresponding dimension). Returns: total_cm: A representing the confusion matrix. update_op: An operation that increments the confusion matrix.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\metrics_impl.py",
    "ast_data": "FunctionDef name:_streaming_confusion_matrix arg:labels arg:predictions arg:num_classes arg:weights arguments arg arg arg arg Assign Call Assign Call Assign Call Assign Call If Compare Call Assign Call If Compare Call Assign Call If BoolOp Compare Compare Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__call__",
    "source_code": "def __call__(self, func, in_spec, *flat_args, **_unused):\n    assert isinstance(func, _op_types) or pytree._is_constant_holder(func)\n    assert len(_unused) == 0\n    return impl(func, in_spec, *flat_args)",
    "docstring": "Functions that take in non-graphable types cannot directly be put into FX graph. Given func(*args, **kwargs), if all of the non-graphable types are pytrees, then we're able to store a call to flat_apply(func, in_spec, *flat_args) in the FX graph. The semantics of flat_apply(func, in_spec, *flat_args) are roughly equivalent to: >>> def flat_apply_impl(func, in_spec, *flat_args): >>> args, kwargs = pytree.tree_unflatten(flat_args, in_spec) >>> output = func(*args, **kwargs) >>> return output flat_apply supports the following two cases: - an input type is a container type (e.g. of tensors) registered as a pytree. We'll tree_flatten the input type and store the spec. - an input type is a constant type (i.e. torch.compile will specialize on it) registered with pytree.register_constant. The constant type goes directly into the spec.",
    "type": "method",
    "file_path": "pytorch\\torch\\_higher_order_ops\\flat_apply.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:func arg:in_spec arguments arg arg arg arg arg BoolOp Call Call Compare Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "differentiable_clipping",
    "source_code": "def differentiable_clipping(input: Tensor, min_val: Optional[float]=None, max_val: Optional[float]=None, scale: float=0.02) -> Tensor:\n    output: Tensor = input.clone()\n    if max_val is not None:\n        output[output > max_val] = -scale * (torch.exp(-output[output > max_val] + max_val) - 1.0) + max_val\n    if min_val is not None:\n        output[output < min_val] = scale * (torch.exp(output[output < min_val] - min_val) - 1.0) + min_val\n    return output",
    "docstring": "Clip via a differentiable and soft approximation of the clipping operation. Args: input (Tensor): Input tensor of any shape. min_val (Optional[float]): Minimum value. max_val (Optional[float]): Maximum value. scale (float): Scale value. Default 0.02. Returns: output (Tensor): Clipped output tensor of the same shape as the input tensor.",
    "type": "function",
    "file_path": "kornia\\kornia\\utils\\misc.py",
    "ast_data": "FunctionDef name:differentiable_clipping arg:input arg:min_val arg:max_val arg:scale arguments arg arg arg arg Call If Compare Assign Compare Call Compare If Compare Assign Compare Call Compare Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "separator_node",
    "source_code": "@staticmethod\ndef separator_node(*, name: str, max_len: int) -> nodes.Text:\n    if name:\n        return nodes.Text(' ::= '.rjust(max_len - len(name) + 5))\n    return nodes.Text(' ' * (max_len + 5))",
    "docstring": "Return seperator between 'name' and 'tokens'.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\domains\\std\\__init__.py",
    "ast_data": "FunctionDef name:separator_node arguments arg arg If Return return:yes Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_bwd_recv_ops",
    "source_code": "def get_bwd_recv_ops(self, bwd_chunk_id: int) -> list[dist.P2POp]:\n    if not self.has_backward or self.is_last:\n        return []\n    recv_infos = self.grad_recv_info[bwd_chunk_id]\n    return self._get_recv_ops(recv_infos)",
    "docstring": "Returns a list of ops that are needed to receive the gradients for this stage.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py",
    "ast_data": "FunctionDef name:get_bwd_recv_ops arg:self arg:bwd_chunk_id arguments arg arg If BoolOp Return return:no Assign Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "matrix",
    "source_code": "def matrix(self) -> Tensor:\n    z = zeros_like(self.fx)\n    row1 = stack((self.fx, z, self.cx), -1)\n    row2 = stack((z, self.fy, self.cy), -1)\n    row3 = stack((z, z, z), -1)\n    K = stack((row1, row2, row3), -2)\n    K[..., -1, -1] = 1.0\n    return K",
    "docstring": "Return the camera matrix. The matrix is of the form: .. math:: \\begin{bmatrix} fx & 0 & cx \\\\ 0 & fy & cy \\\\ 0 & 0 & 1\\end{bmatrix} Example: >>> cam = CameraModel(ImageSize(480, 640), CameraModelType.PINHOLE, torch.Tensor([1.0, 2.0, 3.0, 4.0])) >>> cam.matrix() tensor([[1., 0., 3.], [0., 2., 4.], [0., 0., 1.]])",
    "type": "method",
    "file_path": "kornia\\kornia\\sensors\\camera\\camera_model.py",
    "ast_data": "FunctionDef name:matrix arg:self arguments arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "split",
    "source_code": "def split(self, X, y, groups=None):\n    if groups is not None:\n        warnings.warn(f'The groups parameter is ignored by {self.__class__.__name__}', UserWarning)\n    y = check_array(y, input_name='y', ensure_2d=False, dtype=None)\n    return super().split(X, y, groups)",
    "docstring": "Generate indices to split data into training and test set. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. Note that providing `random_state` to an integer.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py",
    "ast_data": "FunctionDef name:split arg:self arg:X arg:y arg:groups arguments arg arg arg arg If Compare Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "contribute_to_class",
    "source_code": "def contribute_to_class(self, cls, name, private_only=False):\n    self.set_attributes_from_name(name)\n    self.model = cls\n    cls._meta.add_field(self, private=private_only)\n    if self.column:\n        setattr(cls, self.attname, self.descriptor_class(self))\n    if self.choices is not None:\n        if 'get_%s_display' % self.name not in cls.__dict__:\n            setattr(cls, 'get_%s_display' % self.name, partialmethod(cls._get_FIELD_display, field=self))",
    "docstring": "Register the field with the model class it belongs to. If private_only is True, create a separate instance of this field for every subclass of cls, even if cls is not an abstract model.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\__init__.py",
    "ast_data": "FunctionDef name:contribute_to_class arg:self arg:cls arg:name arg:private_only arguments arg arg arg arg Call Assign Call If Call Call If Compare If Compare Call Call"
  },
  {
    "library": "tensorflow",
    "name": "result_type",
    "source_code": "def result_type(*arrays_and_dtypes):\n    return _result_type_impl(*arrays_and_dtypes)",
    "docstring": "Determine the result promotion dtype using the JNP-like promotion system. Args: *arrays_and_dtypes: A list of Tensors, Variables, NumPy arrays or python numbers. Returns: The result promotion type from all the inputs.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\flexible_dtypes.py",
    "ast_data": "FunctionDef name:result_type arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "element_spec",
    "source_code": "@property\ndef element_spec(self):\n    if self._enable_get_next_as_optional and self._strategy.extended._in_multi_worker_mode():\n        return nest.map_structure(_rebatch_as_dynamic, self._element_spec, expand_composites=False)\n    return self._element_spec",
    "docstring": "The type specification of an element of this dataset.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py",
    "ast_data": "FunctionDef name:element_spec arg:self arguments arg If BoolOp Call Return return:yes Call Return return:yes"
  },
  {
    "library": "uvicorn",
    "name": "message_with_placeholders",
    "source_code": "def message_with_placeholders(message: Any) -> Any:\n    new_message = message.copy()\n    for attr in PLACEHOLDER_FORMAT.keys():\n        if message.get(attr) is not None:\n            content = message[attr]\n            placeholder = PLACEHOLDER_FORMAT[attr].format(length=len(content))\n            new_message[attr] = placeholder\n    return new_message",
    "docstring": "Return an ASGI message, with any body-type content omitted and replaced with a placeholder.",
    "type": "function",
    "file_path": "uvicorn\\uvicorn\\middleware\\message_logger.py",
    "ast_data": "FunctionDef name:message_with_placeholders arg:message arguments arg Assign Call For Call If Compare Call Assign Assign Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "hessians",
    "source_code": "@tf_export(v1=['hessians'])\ndef hessians(ys, xs, name='hessians', colocate_gradients_with_ops=False, gate_gradients=False, aggregation_method=None):\n    xs = gradients_util._AsList(xs)\n    kwargs = {'colocate_gradients_with_ops': colocate_gradients_with_ops, 'gate_gradients': gate_gradients, 'aggregation_method': aggregation_method}\n    hessians = []\n    _gradients = gradients(ys, xs, **kwargs)\n    for gradient, x in zip(_gradients, xs):\n        gradient = array_ops.reshape(gradient, [-1])\n        n = array_ops.size(x)\n        loop_vars = [array_ops.constant(0, dtypes.int32), tensor_array_ops.TensorArray(x.dtype, n)]\n        _, hessian = while_loop.while_loop(lambda j, _: j < n, lambda j, result: (j + 1, result.write(j, gradients(gradient[j], x)[0])), loop_vars)\n        _shape = array_ops.shape(x)\n        _reshaped_hessian = array_ops.reshape(hessian.stack(), array_ops.concat((_shape, _shape), 0))\n        hessians.append(_reshaped_hessian)\n    return hessians",
    "docstring": "Constructs the Hessian of sum of with respect to in . adds ops to the graph to output the Hessian matrix of with respect to . It returns a list of of length where each tensor is the Hessian of . The Hessian is a matrix of second-order partial derivatives of a scalar tensor (see for more details). Args: ys: A or list of tensors to be differentiated. xs: A or list of tensors to be used for differentiation. name: Optional name to use for grouping all the gradient ops together. defaults to 'hessians'. colocate_gradients_with_ops: See documentation for details. gate_gradients: See documentation for details. aggregation_method: See documentation for details. Returns: A list of Hessian matrices of for each in . Raises: LookupError: if one of the operations between and does not have a registered gradient function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\gradients_impl.py",
    "ast_data": "FunctionDef name:hessians arg:ys arg:xs arg:name arg:colocate_gradients_with_ops arg:gate_gradients arg:aggregation_method arguments arg arg arg arg arg arg Assign Call Assign Assign Assign Call For Call Assign Call Assign Call Assign Call Call Assign Call arguments arg arg Compare arguments arg arg Call Call Assign Call Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_command_line_ok",
    "source_code": "def _command_line_ok(_cache=None):\n    if _cache:\n        return _cache[0]\n    elif _cache is None:\n        _cache = []\n    ok = True\n    display_opts = ['--' + n for n in Distribution.display_option_names]\n    for o in Distribution.display_options:\n        if o[1]:\n            display_opts.append('-' + o[1])\n    for arg in sys.argv:\n        if arg.startswith('--help') or arg == '-h' or arg in display_opts:\n            ok = False\n            break\n    _cache.append(ok)\n    return ok",
    "docstring": "Return True if command line does not contain any help or display requests.",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\core.py",
    "ast_data": "FunctionDef name:_command_line_ok arg:_cache arguments arg If Return return:yes If Compare Assign Assign Assign For If Call For If BoolOp Call Compare Compare Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "istitle",
    "source_code": "def istitle(self):\n    return istitle(self)",
    "docstring": "Returns true for each element if the element is a titlecased string and there is at least one character, false otherwise. See Also -------- char.istitle",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:istitle arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "inverse_transform",
    "source_code": "def inverse_transform(self, X):\n    check_is_fitted(self)\n    X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, dtype=FLOAT_DTYPES, force_writeable=True, ensure_all_finite='allow-nan')\n    if sparse.issparse(X):\n        if self.with_scaling:\n            inplace_column_scale(X, self.scale_)\n    else:\n        if self.with_scaling:\n            X *= self.scale_\n        if self.with_centering:\n            X += self.center_\n    return X",
    "docstring": "Scale back the data to the original representation. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The rescaled data to be transformed back. Returns ------- X_original : {ndarray, sparse matrix} of shape (n_samples, n_features) Transformed array.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py",
    "ast_data": "FunctionDef name:inverse_transform arg:self arg:X arguments arg arg Call Assign Call If Call If Call If If Return return:yes"
  },
  {
    "library": "django",
    "name": "_copy_permissions",
    "source_code": "@staticmethod\ndef _copy_permissions(mode, filename):\n    if mode & stat.S_IROTH:\n        os.chmod(filename, mode)",
    "docstring": "If the file in the archive has some permissions (this assumes a file won't be writable/executable without being readable), apply those permissions to the unarchived file.",
    "type": "method",
    "file_path": "django\\django\\utils\\archive.py",
    "ast_data": "FunctionDef name:_copy_permissions arg:mode arg:filename arguments arg arg If Call"
  },
  {
    "library": "scikit-learn",
    "name": "dict_learning",
    "source_code": "@validate_params({'X': ['array-like'], 'method': [StrOptions({'lars', 'cd'})], 'return_n_iter': ['boolean'], 'method_max_iter': [Interval(Integral, 0, None, closed='left')]}, prefer_skip_nested_validation=False)\ndef dict_learning(X, n_components, *, alpha, max_iter=100, tol=1e-08, method='lars', n_jobs=None, dict_init=None, code_init=None, callback=None, verbose=False, random_state=None, return_n_iter=False, positive_dict=False, positive_code=False, method_max_iter=1000):\n    estimator = DictionaryLearning(n_components=n_components, alpha=alpha, max_iter=max_iter, tol=tol, fit_algorithm=method, n_jobs=n_jobs, dict_init=dict_init, callback=callback, code_init=code_init, verbose=verbose, random_state=random_state, positive_code=positive_code, positive_dict=positive_dict, transform_max_iter=method_max_iter).set_output(transform='default')\n    code = estimator.fit_transform(X)\n    if return_n_iter:\n        return (code, estimator.components_, estimator.error_, estimator.n_iter_)\n    return (code, estimator.components_, estimator.error_)",
    "docstring": "Solve a dictionary learning matrix factorization problem. Finds the best dictionary and the corresponding sparse code for approximating the data matrix X by solving:: (U^*, V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1 (U,V) with || V_k ||_2 = 1 for all 0 'lars'linear_model.lars_path'cd'linear_model.Lassojoblib.parallel_backendGlossary code_initdict_initcode_initdict_initGlossary return_n_iterU`: >>> np.mean(U == 0) np.float64(0.62) We can compare the average squared euclidean norm of the reconstruction error of the sparse coded signal relative to the squared euclidean norm of the original signal: >>> X_hat = U @ V >>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1)) np.float64(0.0192)",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_dict_learning.py",
    "ast_data": "FunctionDef name:dict_learning arg:X arg:n_components arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg Assign Call Call Assign Call If Return return:yes Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "FUNCTION_MATCH",
    "source_code": "def FUNCTION_MATCH(self, guard: Guard):\n    if self.serialization_mode == 'save':\n        raise RuntimeError('FUNCTION_MATCH guard cannot be serialized.')\n    return self.ID_MATCH(guard)",
    "docstring": "things like torch.add and user defined functions",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\guards.py",
    "ast_data": "FunctionDef name:FUNCTION_MATCH arg:self arg:guard arguments arg arg If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_if_spawn_run_and_exit",
    "source_code": "def _if_spawn_run_and_exit():\n    is_spawned = '-c' in sys.argv[1:] and sys.argv[sys.argv.index('-c') + 1].startswith('from multiprocessing.')\n    if not is_spawned:\n        return\n    cmd = sys.argv[sys.argv.index('-c') + 1]\n    sys.argv = sys.argv[0:1]\n    exec(cmd)\n    sys.exit(0)",
    "docstring": "If spawned process, run requested spawn task and exit. Else a no-op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_process_lib.py",
    "ast_data": "FunctionDef name:_if_spawn_run_and_exit arguments Assign BoolOp Compare Call Call If Return return:no Assign Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "record_thread_local_summary_state",
    "source_code": "def record_thread_local_summary_state(self):\n    summary_state = summary_ops_v2._summary_state\n    self._summary_step = summary_state.step\n    self._summary_writer = summary_state.writer\n    self._summary_recording = summary_state.is_recording\n    self._summary_recording_distribution_strategy = summary_state.is_recording_distribution_strategy",
    "docstring": "Record the thread local summary state in self.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\mirrored_run.py",
    "ast_data": "FunctionDef name:record_thread_local_summary_state arg:self arguments arg Assign Assign Assign Assign Assign"
  },
  {
    "library": "numpy",
    "name": "_view_roi",
    "source_code": "def _view_roi(array, original_area_slice, axis):\n    axis += 1\n    sl = (slice(None),) * axis + original_area_slice[axis:]\n    return array[sl]",
    "docstring": "Get a view of the current region of interest during iterative padding. When padding multiple dimensions iteratively corner values are unnecessarily overwritten multiple times. This function reduces the working area for the first dimensions so that corners are excluded. Parameters ---------- array : ndarray The array with the region of interest. original_area_slice : tuple of slices Denotes the area with original values of the unpadded array. axis : int The currently padded dimension assuming that is padded before + 1. Returns ------- roi : ndarray The region of interest of the original .",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_arraypad_impl.py",
    "ast_data": "FunctionDef name:_view_roi arg:array arg:original_area_slice arg:axis arguments arg arg arg Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "trace",
    "source_code": "@tf_export('autograph.trace')\ndef trace(*args):\n    print(*args)",
    "docstring": "Traces argument information at compilation time. is useful when debugging, and it always executes during the tracing phase, that is, when the TF graph is constructed. _Example usage_ Args: *args: Arguments to print to .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\utils\\ag_logging.py",
    "ast_data": "FunctionDef name:trace arguments arg Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_get_mask",
    "source_code": "def _get_mask(X, value_to_mask):\n    if not sp.issparse(X):\n        return _get_dense_mask(X, value_to_mask)\n    Xt = _get_dense_mask(X.data, value_to_mask)\n    sparse_constructor = sp.csr_matrix if X.format == 'csr' else sp.csc_matrix\n    Xt_sparse = sparse_constructor((Xt, X.indices.copy(), X.indptr.copy()), shape=X.shape, dtype=bool)\n    return Xt_sparse",
    "docstring": "Compute the boolean mask X == value_to_mask. Parameters ---------- X : {ndarray, sparse matrix} of shape (n_samples, n_features) Input data, where `` is the number of features. value_to_mask : {int, float} The value which is to be masked in X. Returns ------- X_mask : {ndarray, sparse matrix} of shape (n_samples, n_features) Missing mask.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_mask.py",
    "ast_data": "FunctionDef name:_get_mask arg:X arg:value_to_mask arguments arg arg If Call Return return:yes Call Assign Call Assign Compare Assign Call Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "translate_x",
    "source_code": "def translate_x(probability: float, magnitude: int) -> OperationBase:\n    magnitudes = linspace(-0.5, 0.5, 11)\n    return TranslateX(None, probability, magnitude_range=(magnitudes[magnitude].item(), magnitudes[magnitude + 1].item()), symmetric_megnitude=False)",
    "docstring": "Return TranslateX op.",
    "type": "function",
    "file_path": "kornia\\kornia\\augmentation\\auto\\autoaugment\\ops.py",
    "ast_data": "FunctionDef name:translate_x arg:probability arg:magnitude arguments arg arg Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_ScheduleForwardOnly",
    "source_code": "class _ScheduleForwardOnly(PipelineScheduleSingle):\n\n    def _step_microbatches(self, arg_mbs: Optional[list]=None, kwarg_mbs: Optional[list]=None, target_mbs: Optional[list]=None, losses: Optional[list]=None):\n        if target_mbs is not None or losses is not None:\n            raise RuntimeError('Forward-only schedule does not support loss computation')\n        arg_mbs, kwarg_mbs = self._check_inputs(arg_mbs, kwarg_mbs, target_mbs, losses)\n        if not self._stage_initialized:\n            self._initialize_stage(arg_mbs[0], kwarg_mbs[0])\n        fwd_sends_to_wait: list[list[dist.Work]] = []\n        for i in range(self._n_microbatches):\n            with record_function(f'Forward {i}'):\n                ops = self._stage.get_fwd_recv_ops(i)\n                works = _sorted_batch_p2p(ops, desc='fwd_recv')\n                for work in works.values():\n                    _wait_batch_p2p(work)\n                self._stage.forward_one_chunk(i, arg_mbs[i], kwarg_mbs[i])\n                ops = self._stage.get_fwd_send_ops(i)\n                works = _sorted_batch_p2p(ops, desc='fwd_send')\n                fwd_sends_to_wait.extend(works.values())\n            logger.debug('[%s] Forwarded microbatch %s', self._stage.stage_index, i)\n        for work in fwd_sends_to_wait:\n            _wait_batch_p2p(work)",
    "docstring": "The forward-only schedule. Will go through all the microbatches and perform only the forward pass",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\schedules.py",
    "ast_data": "ClassDef name:_ScheduleForwardOnly FunctionDef name:_step_microbatches arg:self arg:arg_mbs arg:kwarg_mbs arg:target_mbs arg:losses arguments arg arg arg arg arg If BoolOp Compare Compare Raise Call Assign Call If Call For Call With Call Assign Call Assign Call For Call Call Call Assign Call Assign Call Call Call Call For Call"
  },
  {
    "library": "tensorflow",
    "name": "update_docstrings_with_api_lists",
    "source_code": "def update_docstrings_with_api_lists():\n    _update_docstring_with_api_list(dispatch_for_unary_elementwise_apis, _UNARY_ELEMENTWISE_APIS)\n    _update_docstring_with_api_list(dispatch_for_binary_elementwise_apis, _BINARY_ELEMENTWISE_APIS)\n    _update_docstring_with_api_list(dispatch_for_binary_elementwise_assert_apis, _BINARY_ELEMENTWISE_ASSERT_APIS)\n    _update_docstring_with_api_list(dispatch_for_api, _TYPE_BASED_DISPATCH_SIGNATURES)",
    "docstring": "Updates the docstrings of dispatch decorators with API lists. Updates docstrings for , , and , by replacing the string '>' with a list of APIs that have been registered for that decorator.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\dispatch.py",
    "ast_data": "FunctionDef name:update_docstrings_with_api_lists arguments Call Call Call Call"
  },
  {
    "library": "cryptography",
    "name": "encode_public",
    "source_code": "def encode_public(self, public_key: ec.EllipticCurvePublicKey, f_pub: _FragList) -> None:\n    point = public_key.public_bytes(Encoding.X962, PublicFormat.UncompressedPoint)\n    f_pub.put_sshstr(self.ssh_curve_name)\n    f_pub.put_sshstr(point)",
    "docstring": "Write ECDSA public key",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\serialization\\ssh.py",
    "ast_data": "FunctionDef name:encode_public arg:self arg:public_key arg:f_pub arguments arg arg arg Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "legacy_saveable_name",
    "source_code": "def legacy_saveable_name(name):\n\n    def decorator(cls_or_obj):\n        setattr(cls_or_obj, _LEGACY_SAVEABLE_NAME, name)\n        return cls_or_obj\n    return decorator",
    "docstring": "Decorator to set the local name to use in the Checkpoint. Needed for migrating certain Trackables (see next paragraph) from the legacy to the new function. This decorator should be used if the SaveableObject generates tensors with different names from the name that is passed to the factory. Example migration: *Before* *After* Args: name: String name of the SaveableObject factory (the key returned in the function) Returns: A decorator.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\saveable_compat.py",
    "ast_data": "FunctionDef name:legacy_saveable_name arg:name arguments arg FunctionDef name:decorator arg:cls_or_obj arguments arg Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_shared_object_saving_scope",
    "source_code": "def _shared_object_saving_scope():\n    return getattr(SHARED_OBJECT_SAVING, 'scope', None)",
    "docstring": "Get the current shared object saving scope in a threadsafe manner.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\generic_utils.py",
    "ast_data": "FunctionDef name:_shared_object_saving_scope arguments Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_endpoint",
    "source_code": "def get_endpoint(self) -> str:\n    return f'{self._host}:{self._port}'",
    "docstring": "Return the etcd server endpoint (host:port).",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\etcd_server.py",
    "ast_data": "FunctionDef name:get_endpoint arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_get_ticker_locator_formatter",
    "source_code": "def _get_ticker_locator_formatter(self):\n    locator = self._locator\n    formatter = self._formatter\n    minorlocator = self._minorlocator\n    if isinstance(self.norm, colors.BoundaryNorm):\n        b = self.norm.boundaries\n        if locator is None:\n            locator = ticker.FixedLocator(b, nbins=10)\n        if minorlocator is None:\n            minorlocator = ticker.FixedLocator(b)\n    elif isinstance(self.norm, colors.NoNorm):\n        if locator is None:\n            nv = len(self._values)\n            base = 1 + int(nv / 10)\n            locator = ticker.IndexLocator(base=base, offset=0.5)\n    elif self.boundaries is not None:\n        b = self._boundaries[self._inside]\n        if locator is None:\n            locator = ticker.FixedLocator(b, nbins=10)\n    else:\n        if locator is None:\n            locator = self.long_axis.get_major_locator()\n        if minorlocator is None:\n            minorlocator = self.long_axis.get_minor_locator()\n    if minorlocator is None:\n        minorlocator = ticker.NullLocator()\n    if formatter is None:\n        formatter = self.long_axis.get_major_formatter()\n    self._locator = locator\n    self._formatter = formatter\n    self._minorlocator = minorlocator\n    _log.debug('locator: %r', locator)",
    "docstring": "Return the `` of the colorbar. If they have not been defined (i.e. are *None*), the formatter and locator are retrieved from the axis, or from the value of the boundaries for a boundary norm. Called by update_ticks...",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colorbar.py",
    "ast_data": "FunctionDef name:_get_ticker_locator_formatter arg:self arguments arg Assign Assign Assign If Call Assign If Compare Assign Call If Compare Assign Call If Call If Compare Assign Call Assign Call Assign Call If Compare Assign If Compare Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call Assign Assign Assign Call"
  },
  {
    "library": "pytorch",
    "name": "zeros",
    "source_code": "def zeros(*size, requires_grad: bool=False, dtype: Optional[torch.dtype]=None, layout: torch.layout=torch.strided, device_mesh: Optional[DeviceMesh]=None, placements: Optional[Sequence[Placement]]=None) -> DTensor:\n    torch_size = normalize_to_torch_size(size)\n    return _dtensor_init_helper(torch.zeros, torch_size, dtype=dtype, layout=layout, requires_grad=requires_grad, device_mesh=device_mesh, placements=placements)",
    "docstring": "Returns a :class: filled with the scalar value 0. Args: size (int...): a sequence of integers defining the shape of the output :class:. Can be a variable number of arguments or a collection like a list or tuple. E.g.: zeros(1,2,3..) or zeros([1,2,3..]) or zeros((1,2,3..)) Keyword args: requires_grad (bool, optional): If autograd should record operations on the returned :class:. Default: `torch.dtypeDTensortorch.set_default_dtypetorch.layoutDTensorDeviceMeshPlacementDTensor` object on each rank",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_api.py",
    "ast_data": "FunctionDef name:zeros arguments arg arg arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "identity_matrix",
    "source_code": "def identity_matrix(self, input: Tensor) -> Tensor:\n    raise NotImplementedError",
    "docstring": "Return identity matrix.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\container\\base.py",
    "ast_data": "FunctionDef name:identity_matrix arg:self arg:input arguments arg arg Raise"
  },
  {
    "library": "pytorch",
    "name": "get_float32_matmul_precision",
    "source_code": "def get_float32_matmul_precision() -> str:\n    return _C._get_float32_matmul_precision()",
    "docstring": "Returns the current value of float32 matrix multiplication precision. Refer to :func: documentation for more details.",
    "type": "function",
    "file_path": "pytorch\\torch\\__init__.py",
    "ast_data": "FunctionDef name:get_float32_matmul_precision arguments Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "primes_from_2_to",
    "source_code": "def primes_from_2_to(n: int) -> np.ndarray:\n    sieve = np.ones(n // 3 + (n % 6 == 2), dtype=bool)\n    for i in range(1, int(n ** 0.5) // 3 + 1):\n        k = 3 * i + 1 | 1\n        sieve[k * k // 3::2 * k] = False\n        sieve[k * (k - 2 * (i & 1) + 4) // 3::2 * k] = False\n    return np.r_[2, 3, 3 * np.nonzero(sieve)[0][1:] + 1 | 1]",
    "docstring": "Prime numbers from 2 to *n*. Parameters ---------- n : int Sup bound with `_.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_qmc.py",
    "ast_data": "FunctionDef name:primes_from_2_to arg:n arguments arg Assign Call Compare For Call Call Assign Assign Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "trace_logger",
    "source_code": "def trace_logger(self) -> JitTypeTraceStoreLogger:\n    return JitTypeTraceStoreLogger(self.trace_store())",
    "docstring": "Return a JitCallTraceStoreLogger that logs to the configured trace store.",
    "type": "method",
    "file_path": "pytorch\\torch\\jit\\_monkeytype_config.py",
    "ast_data": "FunctionDef name:trace_logger arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "release",
    "source_code": "def release(self, o):\n    if not self.available(o):\n        raise ValueError('you do not own this lock')\n    self._owner = None",
    "docstring": "Release the lock from *o*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:release arg:self arg:o arguments arg arg If Call Raise Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, filenames, compression_type=None, buffer_size=None, name=None):\n    self._filenames = filenames\n    self._compression_type = convert.optional_param_to_tensor('compression_type', compression_type, argument_default='', argument_dtype=dtypes.string)\n    self._buffer_size = convert.optional_param_to_tensor('buffer_size', buffer_size, argument_default=_DEFAULT_TF_RECORD_BUFFER_SIZE_BYTES)\n    self._name = name\n    variant_tensor = gen_dataset_ops.tf_record_dataset(self._filenames, self._compression_type, self._buffer_size, metadata=self._metadata.SerializeToString())\n    super(_TFRecordDataset, self).__init__(variant_tensor)",
    "docstring": "Creates a . Args: filenames: A tensor containing one or more filenames. compression_type: (Optional.) A scalar evaluating to one of (no compression), , or . buffer_size: (Optional.) A scalar representing the number of bytes in the read buffer. 0 means no buffering. name: (Optional.) A name for the tf.data operation.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\readers.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:filenames arg:compression_type arg:buffer_size arg:name arguments arg arg arg arg arg Assign Assign Call Assign Call Assign Assign Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_FlatMapDataset",
    "source_code": "class _FlatMapDataset(dataset_ops.UnaryDataset):\n\n    def __init__(self, input_dataset, map_func, name=None):\n        self._input_dataset = input_dataset\n        self._map_func = structured_function.StructuredFunctionWrapper(map_func, self._transformation_name(), dataset=input_dataset)\n        if not isinstance(self._map_func.output_structure, dataset_ops.DatasetSpec):\n            raise TypeError(f'The `map_func` argument must return a `Dataset` object. Got {dataset_ops.get_type(self._map_func.output_structure)!r}.')\n        self._structure = self._map_func.output_structure._element_spec\n        self._name = name\n        variant_tensor = gen_dataset_ops.flat_map_dataset(input_dataset._variant_tensor, self._map_func.function.captured_inputs, f=self._map_func.function, **self._common_args)\n        super().__init__(input_dataset, variant_tensor)\n\n    def _functions(self):\n        return [self._map_func]\n\n    @property\n    def element_spec(self):\n        return self._structure\n\n    def _transformation_name(self):\n        return 'Dataset.flat_map()'",
    "docstring": "A that maps a function over its input and flattens the result.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\flat_map_op.py",
    "ast_data": "ClassDef name:_FlatMapDataset FunctionDef name:__init__ arg:self arg:input_dataset arg:map_func arg:name arguments arg arg arg arg Assign Assign Call Call If Call Raise Call Call Assign Assign Assign Call Call Call FunctionDef name:_functions arg:self arguments arg Return return:yes FunctionDef name:element_spec arg:self arguments arg Return return:yes FunctionDef name:_transformation_name arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "is_monotonic_decreasing",
    "source_code": "@property\ndef is_monotonic_decreasing(self) -> bool:\n    return self._engine.is_monotonic_decreasing",
    "docstring": "Return a boolean if the values are equal or decreasing. Returns ------- bool See Also -------- Index.is_monotonic_increasing : Check if the values are equal or increasing. Examples -------- >>> pd.Index([3, 2, 1]).is_monotonic_decreasing True >>> pd.Index([3, 2, 2]).is_monotonic_decreasing True >>> pd.Index([3, 1, 2]).is_monotonic_decreasing False",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:is_monotonic_decreasing arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "random",
    "source_code": "def random(self, n: IntNumber=1) -> np.ndarray:\n    base_samples = self._standard_normal_samples(n)\n    return self._correlate(base_samples)",
    "docstring": "Draw QMC samples from the multivariate Normal. Parameters ---------- n : int, optional Number of samples to generate in the parameter space. Default is 1. Returns ------- sample : array_like (n, d) Sample.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_qmc.py",
    "ast_data": "FunctionDef name:random arg:self arg:n arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "variable_shape",
    "source_code": "@property\ndef variable_shape(self):\n    return tensor_shape.TensorShape(self.shape)",
    "docstring": "Returns a representing the shape of sequence input.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\sequence_feature_column.py",
    "ast_data": "FunctionDef name:variable_shape arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_sparse_tensors",
    "source_code": "def get_sparse_tensors(self, transformation_cache, state_manager):\n    return CategoricalColumn.IdWeightPair(transformation_cache.get(self, state_manager), None)",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:get_sparse_tensors arg:self arg:transformation_cache arg:state_manager arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "PandasBufferPyarrow",
    "source_code": "class PandasBufferPyarrow(Buffer):\n\n    def __init__(self, buffer: pa.Buffer, *, length: int) -> None:\n        self._buffer = buffer\n        self._length = length\n\n    @property\n    def bufsize(self) -> int:\n        return self._buffer.size\n\n    @property\n    def ptr(self) -> int:\n        return self._buffer.address\n\n    def __dlpack__(self) -> Any:\n        raise NotImplementedError\n\n    def __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]:\n        return (DlpackDeviceType.CPU, None)\n\n    def __repr__(self) -> str:\n        return 'PandasBuffer[pyarrow](' + str({'bufsize': self.bufsize, 'ptr': self.ptr, 'device': 'CPU'}) + ')'",
    "docstring": "Data in the buffer is guaranteed to be contiguous in memory.",
    "type": "class",
    "file_path": "pandas\\pandas\\core\\interchange\\buffer.py",
    "ast_data": "ClassDef name:PandasBufferPyarrow FunctionDef name:__init__ arg:self arg:buffer arguments arg arg arg Assign Assign FunctionDef name:bufsize arg:self arguments arg Return return:yes FunctionDef name:ptr arg:self arguments arg Return return:yes FunctionDef name:__dlpack__ arg:self arguments arg Raise FunctionDef name:__dlpack_device__ arg:self arguments arg Return return:yes FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_check_empty_inputs",
    "source_code": "def _check_empty_inputs(samples, axis):\n    if not any((sample.size == 0 for sample in samples)):\n        return None\n    output_shape = _broadcast_array_shapes_remove_axis(samples, axis)\n    output = np.ones(output_shape) * _get_nan(*samples)\n    return output",
    "docstring": "Check for empty sample; return appropriate output for a vectorized hypotest",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_axis_nan_policy.py",
    "ast_data": "FunctionDef name:_check_empty_inputs arg:samples arg:axis arguments arg arg If Call Compare Return return:no Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "keymap",
    "source_code": "def keymap(func, d, factory=dict):\n    rv = factory()\n    rv.update(zip(map(func, d.keys()), d.values()))\n    return rv",
    "docstring": "Apply function to keys of dictionary >>> bills = {\"Alice\": [20, 15, 30], \"Bob\": [10, 35]} >>> keymap(str.lower, bills) # doctest: +SKIP {'alice': [20, 15, 30], 'bob': [10, 35]} See Also: valmap itemmap",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\unification\\unification_tools.py",
    "ast_data": "FunctionDef name:keymap arg:func arg:d arg:factory arguments arg arg arg Assign Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "forward",
    "source_code": "def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n    return (self.query_proj(query), self.key_proj(key), self.value_proj(value))",
    "docstring": "Projects the input sequences using in-proj layers. Args: query, key, value (Tensors): sequence to be projected Shape: - query, key, value: :math: - Output: :math: where S is the sequence length, N is the batch size, and E is the embedding dimension.",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\functional_autograd_benchmark\\torchaudio_models.py",
    "ast_data": "FunctionDef name:forward arg:self arg:query arg:key arg:value arguments arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "clabel",
    "source_code": "def clabel(self, *args, **kwargs):\n    return None",
    "docstring": "Currently not implemented for 3D Axes, and returns *None*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:clabel arg:self arguments arg arg arg Return return:no"
  },
  {
    "library": "pytorch",
    "name": "_dtype_match",
    "source_code": "def _dtype_match(self, op: 'cutlass_library.gemm_op.GemmOperation') -> bool:\n    X = self.input_nodes[0]\n    W = self.input_nodes[1]\n    accumulator_torch_dtype = cutlass_utils.get_accumulator_dtype([X.get_dtype(), W.get_dtype()])\n    if not (cutlass_utils.dtype_match(X.get_dtype(), op.A.element) and cutlass_utils.dtype_match(W.get_dtype(), op.B.element) and cutlass_utils.dtype_match(self.output_node.get_layout().dtype, op.D.element) and cutlass_utils.dtype_match(accumulator_torch_dtype, op.accumulator_type())):\n        return False\n    return True",
    "docstring": "Checking dtypes of A, B, acc, D here. Empirically speaking, CUTLASS2x ops have same dtype for C and D.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\gemm_template.py",
    "ast_data": "FunctionDef name:_dtype_match arg:self arg:op arguments arg arg Assign Assign Assign Call Call Call If BoolOp Call Call Call Call Call Call Call Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "Analyzer",
    "source_code": "class Analyzer(cfg.GraphVisitor):\n\n    def __init__(self, graph, external_defs):\n        super(Analyzer, self).__init__(graph)\n        self.external_defs = external_defs\n\n    def init_state(self, _):\n        return _NodeState()\n\n    def visit_node(self, node):\n        prev_defs_out = self.out[node]\n        if node is self.graph.entry:\n            defs_in = _NodeState(self.external_defs)\n        else:\n            defs_in = prev_defs_out\n        for n in node.prev:\n            defs_in |= self.out[n]\n        defs_out = defs_in\n        if isinstance(node.ast_node, (gast.Lambda, gast.FunctionDef)):\n            defs_out += node.ast_node\n        self.in_[node] = defs_in\n        self.out[node] = defs_out\n        return prev_defs_out != defs_out",
    "docstring": "CFG visitor that determines reaching definitions at statement level.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\static_analysis\\reaching_fndefs.py",
    "ast_data": "ClassDef name:Analyzer FunctionDef name:__init__ arg:self arg:graph arg:external_defs arguments arg arg arg Call Call Assign FunctionDef name:init_state arg:self arg:_ arguments arg arg Return return:yes Call FunctionDef name:visit_node arg:self arg:node arguments arg arg Assign If Compare Assign Call Assign For Assign If Call Assign Assign Return return:yes Compare"
  },
  {
    "library": "seaborn",
    "name": "_check_grouping_vars",
    "source_code": "def _check_grouping_vars(self, param: str, data_vars: list[str], stacklevel: int=2) -> None:\n    param_vars = getattr(self, param)\n    undefined = set(param_vars) - set(data_vars)\n    if undefined:\n        param = f'{self.__class__.__name__}.{param}'\n        names = ', '.join((f'{x!r}' for x in undefined))\n        msg = f'Undefined variable(s) passed for {param}: {names}.'\n        warnings.warn(msg, stacklevel=stacklevel)",
    "docstring": "Warn if vars are named in parameter without being present in the data.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_stats\\base.py",
    "ast_data": "FunctionDef name:_check_grouping_vars arg:self arg:param arg:data_vars arg:stacklevel arguments arg arg arg arg Assign Call Assign Call Call If Assign Assign Call Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "detrend_mean",
    "source_code": "def detrend_mean(x, axis=None):\n    x = np.asarray(x)\n    if axis is not None and axis + 1 > x.ndim:\n        raise ValueError('axis(=%s) out of bounds' % axis)\n    return x - x.mean(axis, keepdims=True)",
    "docstring": "Return *x* minus the mean(*x*). Parameters ---------- x : array or sequence Array or sequence containing the data Can have any dimensionality axis : int The axis along which to take the mean. See for a description of this argument. See Also -------- detrend_linear : Another detrend algorithm. detrend_none : Another detrend algorithm. detrend : A wrapper around all the detrend algorithms.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\mlab.py",
    "ast_data": "FunctionDef name:detrend_mean arg:x arg:axis arguments arg arg Assign Call If BoolOp Compare Compare Raise Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "register_kl",
    "source_code": "def register_kl(type_p, type_q):\n    if not isinstance(type_p, type) and issubclass(type_p, Distribution):\n        raise TypeError(f'Expected type_p to be a Distribution subclass but got {type_p}')\n    if not isinstance(type_q, type) and issubclass(type_q, Distribution):\n        raise TypeError(f'Expected type_q to be a Distribution subclass but got {type_q}')\n\n    def decorator(fun):\n        _KL_REGISTRY[type_p, type_q] = fun\n        _KL_MEMOIZE.clear()\n        return fun\n    return decorator",
    "docstring": "Decorator to register a pairwise function with :meth:. Usage:: @register_kl(Normal, Normal) def kl_normal_normal(p, q): # insert implementation here Lookup returns the most specific (type,type) match ordered by subclass. If the match is ambiguous, a is raised. For example to resolve the ambiguous situation:: @register_kl(BaseP, DerivedQ) def kl_version1(p, q): ... @register_kl(DerivedP, BaseQ) def kl_version2(p, q): ... you should register a third most-specific implementation, e.g.:: register_kl(DerivedP, DerivedQ)(kl_version1) # Break the tie. Args: type_p (type): A subclass of :class:. type_q (type): A subclass of :class:.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributions\\kl.py",
    "ast_data": "FunctionDef name:register_kl arg:type_p arg:type_q arguments arg arg If BoolOp Call Call Raise Call If BoolOp Call Call Raise Call FunctionDef name:decorator arg:fun arguments arg Assign Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_mark_as_return",
    "source_code": "def _mark_as_return(tensor):\n    if not tensor_util.is_tf_type(tensor):\n        return tensor\n    return_tensor = acd.mark_as_return(tensor)\n    if getattr(tensor, '_keras_mask', None) is not None:\n        return_tensor._keras_mask = acd.mark_as_return(tensor._keras_mask)\n    else:\n        return_tensor._keras_mask = None\n    if getattr(tensor, '_tfp_distribution', None) is not None:\n        return_tensor._tfp_distribution = tensor._tfp_distribution\n    return return_tensor",
    "docstring": "Marks as the return value for automatic control deps.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_utils.py",
    "ast_data": "FunctionDef name:_mark_as_return arg:tensor arguments arg If Call Return return:yes Assign Call If Compare Call Assign Call Assign If Compare Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "isin",
    "source_code": "def isin(self, values: ArrayLike) -> npt.NDArray[np.bool_]:\n    null_mask = np.asarray(isna(values))\n    code_values = self.categories.get_indexer_for(values)\n    code_values = code_values[null_mask | (code_values >= 0)]\n    return algorithms.isin(self.codes, code_values)",
    "docstring": "Check whether are contained in Categorical. Return a boolean NumPy Array showing whether each element in the Categorical matches an element in the passed sequence of exactly. Parameters ---------- values : np.ndarray or ExtensionArray The sequence of values to test. Passing in a single string will raise a `values` will raise an error. Use a list of one element instead: >>> s.isin([\"llama\"]) array([ True, False, True, False, True, False])",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\categorical.py",
    "ast_data": "FunctionDef name:isin arg:self arg:values arguments arg arg Assign Call Call Assign Call Assign Compare Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "_torch_inverse_cast",
    "source_code": "def _torch_inverse_cast(input: Tensor) -> Tensor:\n    if not isinstance(input, Tensor):\n        raise AssertionError(f'Input must be Tensor. Got: {type(input)}.')\n    dtype: torch.dtype = input.dtype\n    if dtype not in (torch.float32, torch.float64):\n        dtype = torch.float32\n    return torch.linalg.inv(input.to(dtype)).to(input.dtype)",
    "docstring": "Make torch.inverse work with other than fp32/64. The function torch.inverse is only implemented for fp32/64 which makes impossible to be used by fp16 or others. What this function does, is cast input data type to fp32, apply torch.inverse, and cast back to the input dtype.",
    "type": "function",
    "file_path": "kornia\\kornia\\utils\\helpers.py",
    "ast_data": "FunctionDef name:_torch_inverse_cast arg:input arguments arg If Call Raise Call Call If Compare Assign Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "APIChangeSpec",
    "source_code": "class APIChangeSpec:\n\n    def preprocess(self, root_node):\n        return (root_node, [], [])\n\n    def clear_preprocessing(self):\n        pass",
    "docstring": "This class defines the transformations that need to happen. This class must provide the following fields: * : maps function names to a map of old -> new argument names * : maps function names to new function names * : a set of function names that have changed (for notifications) * : maps functions whose argument order has changed to the list of arguments in the new order * : maps full names of functions to warnings that will be printed out if the function is used. (e.g. tf.nn.convolution()) * : maps function names to custom handlers * : maps module names to warnings that will be printed if the module is still used after all other transformations have run * : maps import name (must be a short name without '.') to ImportRename instance. For an example, see .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\ast_edits.py",
    "ast_data": "ClassDef name:APIChangeSpec FunctionDef name:preprocess arg:self arg:root_node arguments arg arg Return return:yes FunctionDef name:clear_preprocessing arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "streaming",
    "source_code": "@property\ndef streaming(self):\n    return self._streaming",
    "docstring": "Whether can be called twice without resetting the state.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_preprocessing_layer.py",
    "ast_data": "FunctionDef name:streaming arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "retrieve_clang_version",
    "source_code": "def retrieve_clang_version(clang_executable):\n    stderr = open(os.devnull, 'wb')\n    curr_version = run_shell([clang_executable, '--version'], allow_non_zero=True, stderr=stderr)\n    curr_version_split = curr_version.lower().split('clang version ')\n    if len(curr_version_split) > 1:\n        curr_version = curr_version_split[1].split()[0].split('git')\n    if len(curr_version) > 1:\n        print('WARNING: current clang installation is not a release version.\\n')\n    curr_version = curr_version[0]\n    curr_version_int = convert_version_to_int(curr_version)\n    if not curr_version_int:\n        print('WARNING: current clang installation version unknown.\\n')\n        return None\n    print('You have Clang %s installed.\\n' % curr_version)\n    return curr_version",
    "docstring": "Retrieve installed clang version. Args: clang_executable: (String) path to clang executable Returns: The clang version detected.",
    "type": "function",
    "file_path": "tensorflow\\configure.py",
    "ast_data": "FunctionDef name:retrieve_clang_version arg:clang_executable arguments arg Assign Call Assign Call Assign Call Call If Compare Call Assign Call Call If Compare Call Call Assign Assign Call If Call Return return:no Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_cumulative_simpson_equal_intervals",
    "source_code": "def _cumulative_simpson_equal_intervals(y: np.ndarray, dx: np.ndarray) -> np.ndarray:\n    d = dx[..., :-1]\n    f1 = y[..., :-2]\n    f2 = y[..., 1:-1]\n    f3 = y[..., 2:]\n    return d / 3 * (5 * f1 / 4 + 2 * f2 - f3 / 4)",
    "docstring": "Calculate the Simpson integrals for all h1 intervals assuming equal interval widths. The function can also be used to calculate the integral for all h2 intervals by reversing the inputs, and .",
    "type": "function",
    "file_path": "scipy\\scipy\\integrate\\_quadrature.py",
    "ast_data": "FunctionDef name:_cumulative_simpson_equal_intervals arg:y arg:dx arguments arg arg Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_pad",
    "source_code": "def set_pad(self, val):\n    self._apply_params(pad=val)\n    self.stale = True",
    "docstring": "Set the tick label pad in points Parameters ---------- val : float",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:set_pad arg:self arg:val arguments arg arg Call Assign"
  },
  {
    "library": "pytorch",
    "name": "_loss_fn",
    "source_code": "def _loss_fn(loss: Callable[[torch.Tensor, torch.Tensor], torch.Tensor], x: object, y: object) -> object:\n    if isinstance(x, torch.Tensor) and isinstance(y, torch.Tensor):\n        return loss(x.to(torch.float32), y.to(torch.float32))\n    elif isinstance(x, (list, tuple)) and isinstance(y, (list, tuple)):\n        return type(x)([_loss_fn(loss, e1, e2) for e1, e2 in zip(x, y)])\n    elif isinstance(x, dict) and isinstance(y, dict):\n        return {k: _loss_fn(loss, e, y[k]) for k, e in x.items()}\n    else:\n        return None",
    "docstring": "The returned loss will have the same structure as and , e.g. if both are Tensor, we'll return a Tensor if both are list, we'll return a list of Tensors if both are dict, we'll return a dict with the same key, and value being the loss between the two Tensors",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\_numeric_debugger.py",
    "ast_data": "FunctionDef name:_loss_fn arg:loss arg:x arg:y arguments arg arg arg If BoolOp Call Call Return return:yes Call Call Call If BoolOp Call Call Return return:yes Call Call Call Call If BoolOp Call Call Return return:yes Call Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "avg_pool_v2",
    "source_code": "@tf_export('nn.avg_pool', v1=['nn.avg_pool_v2'])\n@dispatch.add_dispatch_support\ndef avg_pool_v2(input, ksize, strides, padding, data_format=None, name=None):\n    if input.shape is not None:\n        n = len(input.shape) - 2\n    elif data_format is not None:\n        n = len(data_format) - 2\n    else:\n        raise ValueError(f'`input` must have a static shape or `data_format` must be given. Received: input.shape={input.shape} and data_format={data_format}')\n    if not 1 <= n <= 3:\n        raise ValueError(f'`input.shape.rank` must be 3, 4 or 5. Received: input.shape={input.shape} of rank {n + 2}.')\n    if data_format is None:\n        channel_index = n + 1\n    else:\n        channel_index = 1 if data_format.startswith('NC') else n + 1\n    ksize = _get_sequence(ksize, n, channel_index, 'ksize')\n    strides = _get_sequence(strides, n, channel_index, 'strides')\n    avg_pooling_ops = {1: avg_pool1d, 2: gen_nn_ops.avg_pool, 3: gen_nn_ops.avg_pool3d}\n    op = avg_pooling_ops[n]\n    return op(input, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=name)",
    "docstring": "Performs the avg pooling on the input. Each entry in is the mean of the corresponding size window in . Args: input: Tensor of rank N+2, of shape if does not start with \"NC\" (default), or if data_format starts with \"NC\". Pooling happens over the spatial dimensions only. ksize: An int or list of that has length , or . The size of the window for each dimension of the input tensor. strides: An int or list of that has length , or . The stride of the sliding window for each dimension of the input tensor. padding: A string, either or . The padding algorithm. See [here]( for more information. data_format: A string. Specifies the channel dimension. For N=1 it can be either \"NWC\" (default) or \"NCW\", for N=2 it can be either \"NHWC\" (default) or \"NCHW\" and for N=3 either \"NDHWC\" (default) or \"NCDHW\". name: Optional name for the operation. Returns: A of format specified by . The average pooled output tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py",
    "ast_data": "FunctionDef name:avg_pool_v2 arg:input arg:ksize arg:strides arg:padding arg:data_format arg:name arguments arg arg arg arg arg arg If Compare Assign Call If Compare Assign Call Raise Call If Compare Raise Call If Compare Assign Assign Call Assign Call Assign Call Assign Assign Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "top",
    "source_code": "def top(self) -> _ModuleMeta:\n    if self.is_empty_or_root():\n        return _ModuleMeta.create_root()\n    return self._module_stack[-1]",
    "docstring": "Returns the top module meta in the stack. I.e., the meta for leaf module. Example: Consider the following module stack: stack = [GPT, block1, Attention_1, MLP] stack.top() == MLP",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\modularization.py",
    "ast_data": "FunctionDef name:top arg:self arguments arg If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_debug_node",
    "source_code": "def is_debug_node(node_name):\n    return node_name.startswith('__dbg_')",
    "docstring": "Determine whether a node name is that of a debug node. Such nodes are inserted by TensorFlow core upon request in RunOptions.debug_options.debug_tensor_watch_opts. Args: node_name: Name of the node. Returns: A bool indicating whether the input argument is the name of a debug node.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_graphs.py",
    "ast_data": "FunctionDef name:is_debug_node arg:node_name arguments arg Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "check_verifier",
    "source_code": "def check_verifier(self, verifier):\n    raise NotImplementedError()",
    "docstring": "A method to check if the given verifier matches this temporary credential. For instance that this temporary credential has recorded the value in database as column ``:: def check_verifier(self, verifier): return self.oauth_verifier == verifier :return: Boolean",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth1\\rfc5849\\models.py",
    "ast_data": "FunctionDef name:check_verifier arg:self arg:verifier arguments arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, fetches):\n    values = _get_attrs_values(fetches)\n    self._fetch_type = type(fetches)\n    self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in values]\n    self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)",
    "docstring": "Creates a _AttrsFetchMapper. Args: fetches: An instance of an attrs decorated class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\session.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:fetches arguments arg arg Assign Call Assign Call Assign Call Assign Call"
  },
  {
    "library": "scipy",
    "name": "estimate",
    "source_code": "def estimate(self, f, a, b, args=()):\n    raise NotImplementedError",
    "docstring": "Calculate estimate of integral of in rectangular region described by corners and `ffestimatefest`.",
    "type": "method",
    "file_path": "scipy\\scipy\\integrate\\_rules\\_base.py",
    "ast_data": "FunctionDef name:estimate arg:self arg:f arg:a arg:b arg:args arguments arg arg arg arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "TableStacking",
    "source_code": "@dataclasses.dataclass\nclass TableStacking:\n    stacked_table_to_tables: Dict[str, TableConfig] = _fielddict()\n    quantization_configs: Dict[str, QuantizationConfig] = _fielddict()\n    table_name_to_table: Dict[str, TableConfig] = _fielddict()\n    table_to_padding_rows: Dict[str, int] = _fielddict()\n    table_to_padding_columns: Dict[str, int] = _fielddict()\n    table_to_sample_count: Dict[str, int] = _fielddict()\n    table_to_layout: Dict[str, sparse_core_layout_pb2.SparseCoreTableLayout] = _fielddict()\n    table_to_stacked_table_offset: Dict[str, Tuple[str, int, int]] = _fielddict()\n    feature_to_sample_offset: Dict[str, int] = _fielddict()",
    "docstring": "Information about how we stack tables.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3.py",
    "ast_data": "ClassDef name:TableStacking Call Call Call Call Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "from_file",
    "source_code": "@classmethod\ndef from_file(cls, filename, shared, size):\n    _warn_typed_storage_removal()\n    if cls == TypedStorage:\n        raise RuntimeError('from_file can only be called on derived classes')\n    untyped_storage = UntypedStorage.from_file(filename, shared, size * torch._utils._element_size(cls.dtype))\n    storage = cls(wrap_storage=untyped_storage)\n    return storage",
    "docstring": "from_file(filename, shared=False, size=0) -> Storage Creates a CPU storage backed by a memory-mapped file. If `mmap(2) call `_) size (int): number of elements in the storage",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:from_file arg:cls arg:filename arg:shared arg:size arguments arg arg arg arg Call If Compare Raise Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_check_response_method",
    "source_code": "def _check_response_method(estimator, response_method):\n    if isinstance(response_method, str):\n        list_methods = [response_method]\n    else:\n        list_methods = response_method\n    prediction_method = [getattr(estimator, method, None) for method in list_methods]\n    prediction_method = reduce(lambda x, y: x or y, prediction_method)\n    if prediction_method is None:\n        raise AttributeError(f'{estimator.__class__.__name__} has none of the following attributes: {', '.join(list_methods)}.')\n    return prediction_method",
    "docstring": "Check if is available in estimator and return it. .. versionadded:: 1.3 Parameters ---------- estimator : estimator instance Classifier or regressor to check. response_method : {\"predict_proba\", \"predict_log_proba\", \"decision_function\", \"predict\"} or list of such str Specifies the response method to use get prediction from an estimator (i.e. :term:, :term:, :term: or :term:). Possible choices are: - if , it corresponds to the name to the method to return; - if a list of , it provides the method names in order of preference. The method returned corresponds to the first method in the list and which is implemented by . Returns ------- prediction_method : callable Prediction method of estimator. Raises ------ AttributeError If is not available in .",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\validation.py",
    "ast_data": "FunctionDef name:_check_response_method arg:estimator arg:response_method arguments arg arg If Call Assign Assign Assign Call Assign Call arguments arg arg BoolOp If Compare Raise Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "extract_system_builds",
    "source_code": "def extract_system_builds(filepath):\n    lib_names = []\n    system_build_files = []\n    current_name = None\n    with open(filepath, 'r') as f:\n        for line in f:\n            line = line.strip()\n            if line.startswith('name = '):\n                current_name = line[7:-1].strip('\"')\n            elif line.startswith('system_build_file = '):\n                lib_names.append(current_name)\n                system_build_spec = line.split('=')[-1].split('\"')[1]\n                assert system_build_spec.startswith('//')\n                system_build_files.append(system_build_spec[2:].replace(':', os.sep))\n    return (lib_names, system_build_files)",
    "docstring": "Extract the 'name' argument of all rules with a system_build_file argument.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\ci_build\\builds\\check_system_libs.py",
    "ast_data": "FunctionDef name:extract_system_builds arg:filepath arguments arg Assign Assign Assign With Call For Assign Call If Call Assign Call If Call Call Assign Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_is_immutable",
    "source_code": "@property\ndef _is_immutable(self) -> bool:\n    return False",
    "docstring": "Can arrays with this dtype be modified with __setitem__? If not, return True. Immutable arrays are expected to raise TypeError on __setitem__ calls.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\base.py",
    "ast_data": "FunctionDef name:_is_immutable arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_find_localzeros",
    "source_code": "@classmethod\ndef _find_localzeros(cls, values, **options):\n    other_values = set()\n    num_value = None\n    for arg in values:\n        if arg.is_Number:\n            if num_value is None:\n                num_value = arg\n            elif cls is Max:\n                num_value = max(num_value, arg)\n            elif cls is Min:\n                num_value = min(num_value, arg)\n            else:\n                raise AssertionError(f'impossible {cls}')\n        else:\n            other_values.add(arg)\n    if num_value is None:\n        return other_values\n    if len(other_values) == 0:\n        return {num_value}\n    if len(other_values) == 1:\n        other_value = next(iter(other_values))\n        if num_value in (0.0, 0) and other_value.is_nonnegative:\n            return other_values if cls is Max else {num_value}\n        if num_value == 1 and other_value.is_positive:\n            return other_values if cls is Max else {num_value}\n    other_values.add(num_value)\n    return other_values",
    "docstring": "Sequentially allocate values to localzeros. When a value is identified as being more extreme than another member it replaces that member; if this is never true, then the value is simply appended to the localzeros. Unlike the sympy implementation, we only look for zero and one, we don't do generic is connected test pairwise which is slow",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\_sympy\\functions.py",
    "ast_data": "FunctionDef name:_find_localzeros arg:cls arg:values arguments arg arg arg Assign Call Assign For If If Compare Assign If Compare Assign Call If Compare Assign Call Raise Call Call If Compare Return return:yes If Compare Call Return return:yes If Compare Call Assign Call Call If BoolOp Compare Return return:yes Compare If BoolOp Compare Return return:yes Compare Call Return return:yes"
  },
  {
    "library": "pygame",
    "name": "close",
    "source_code": "def close(self):\n    _check_init()\n    if self._input is not None:\n        self._input.Close()\n    self._input = None",
    "docstring": "closes a midi stream, flushing any pending buffers. Input.close(): return None PortMidi attempts to close open streams when the application exits -- this is particularly difficult under Windows.",
    "type": "method",
    "file_path": "pygame\\src_py\\midi.py",
    "ast_data": "FunctionDef name:close arg:self arguments arg Call If Compare Call Assign"
  },
  {
    "library": "pytorch",
    "name": "CompiledTritonKernels",
    "source_code": "@clear_on_fresh_inductor_cache\nclass CompiledTritonKernels:\n    _cache: dict[str, CodeCacheFuture] = {}\n\n    @staticmethod\n    def key(kernel_src: str):\n        return code_hash(kernel_src, extra=torch_key())\n\n    @staticmethod\n    def save(kernel_src: str, future: CodeCacheFuture):\n        key = CompiledTritonKernels.key(kernel_src)\n        CompiledTritonKernels._cache[key] = future\n\n    @staticmethod\n    def get(kernel_src: str) -> Optional[CodeCacheFuture]:\n        key = CompiledTritonKernels.key(kernel_src)\n        return CompiledTritonKernels._cache.get(key, None)\n\n    @staticmethod\n    def cache_clear():\n        CompiledTritonKernels._cache = {}\n\n    @staticmethod\n    def remove_future(kernel_src: str) -> None:\n        key = CompiledTritonKernels.key(kernel_src)\n        if key in CompiledTritonKernels._cache:\n            del CompiledTritonKernels._cache[key]",
    "docstring": "In memory cache for storing compiled triton kernels. Each triton kernel is keyed by the hash of its source code. Each value stored in the cache is a return value of AsyncCompile.triton(). Currently, the cache stores Future objects, but it should be generalizable for any kernels.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\async_compile.py",
    "ast_data": "ClassDef name:CompiledTritonKernels FunctionDef name:key arg:kernel_src arguments arg Return return:yes Call Call FunctionDef name:save arg:kernel_src arg:future arguments arg arg Assign Call Assign FunctionDef name:get arg:kernel_src arguments arg Assign Call Return return:yes Call FunctionDef name:cache_clear arguments Assign FunctionDef name:remove_future arg:kernel_src arguments arg Assign Call If Compare"
  },
  {
    "library": "tensorflow",
    "name": "PaddingFIFOQueue",
    "source_code": "@tf_export('queue.PaddingFIFOQueue', v1=['queue.PaddingFIFOQueue', 'io.PaddingFIFOQueue', 'PaddingFIFOQueue'])\n@deprecation.deprecated_endpoints(['io.PaddingFIFOQueue', 'PaddingFIFOQueue'])\nclass PaddingFIFOQueue(QueueBase):\n\n    def __init__(self, capacity, dtypes, shapes, names=None, shared_name=None, name='padding_fifo_queue'):\n        dtypes = _as_type_list(dtypes)\n        shapes = _as_shape_list(shapes, dtypes, unknown_dim_allowed=True)\n        names = _as_name_list(names, dtypes)\n        if len(dtypes) != len(shapes):\n            raise ValueError(f'Shapes must be provided for all components, but received {len(dtypes)} dtypes and {len(shapes)} shapes.')\n        queue_ref = gen_data_flow_ops.padding_fifo_queue_v2(component_types=dtypes, shapes=shapes, capacity=capacity, shared_name=_shared_name(shared_name), name=name)\n        super(PaddingFIFOQueue, self).__init__(dtypes, shapes, names, queue_ref)",
    "docstring": "A FIFOQueue that supports batching variable-sized tensors by padding. A may contain components with dynamic shape, while also supporting . See the constructor for more details. See for a description of the methods on this class.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "ClassDef name:PaddingFIFOQueue FunctionDef name:__init__ arg:self arg:capacity arg:dtypes arg:shapes arg:names arg:shared_name arg:name arguments arg arg arg arg arg arg arg Assign Call Assign Call Assign Call If Compare Call Call Raise Call Call Call Assign Call Call Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_get_block_manager_axis",
    "source_code": "@final\n@classmethod\ndef _get_block_manager_axis(cls, axis: Axis) -> AxisInt:\n    axis = cls._get_axis_number(axis)\n    ndim = cls._AXIS_LEN\n    if ndim == 2:\n        return 1 - axis\n    return axis",
    "docstring": "Map the axis to the block_manager axis.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:_get_block_manager_axis arg:cls arg:axis arguments arg arg Assign Call Assign If Compare Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "extent",
    "source_code": "@property\ndef extent(self):\n    from .point import Point\n    env = self.envelope\n    if isinstance(env, Point):\n        xmin, ymin = env.tuple\n        xmax, ymax = (xmin, ymin)\n    else:\n        xmin, ymin = env[0][0]\n        xmax, ymax = env[0][2]\n    return (xmin, ymin, xmax, ymax)",
    "docstring": "Return the extent of this geometry as a 4-tuple, consisting of (xmin, ymin, xmax, ymax).",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:extent arg:self arguments arg Assign If Call Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "flatten_per_replica_values",
    "source_code": "def flatten_per_replica_values(distribution_strategy, per_replica_values):\n    return [e for flattened in nest.flatten(per_replica_values) for e in distribution_strategy.unwrap(flattened)]",
    "docstring": "Unwraps and flattens a nest of PerReplica parameters. PerReplica values have one value associated with each device. Each entry in the PerReplica dict has a device and the corresponding value on the device as the . In this function we take a PerReplica value or a list of PerReplica values and return all the values in the PerReplica dict. Args: distribution_strategy: DistributionStrategy used to distribute training and validation. per_replica_values: List of PerReplica object or a single PerReplica object. Returns: List of values of all the PerReplica objects.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_training_utils_v1.py",
    "ast_data": "FunctionDef name:flatten_per_replica_values arg:distribution_strategy arg:per_replica_values arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "seaborn",
    "name": "set",
    "source_code": "def set(self, **kwargs):\n    for ax in self.axes.flat:\n        if ax is not None:\n            ax.set(**kwargs)\n    return self",
    "docstring": "Set attributes on each subplot Axes.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\axisgrid.py",
    "ast_data": "FunctionDef name:set arg:self arguments arg arg For If Compare Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "remove_boolean_dispatch_from_name",
    "source_code": "def remove_boolean_dispatch_from_name(p) -> Any:\n    if p is F.fractional_max_pool2d:\n        return 'torch.nn.functional.fractional_max_pool2d'\n    elif p is F.fractional_max_pool3d:\n        return 'torch.nn.functional.fractional_max_pool3d'\n    elif p is F.max_pool1d:\n        return 'torch.nn.functional.max_pool1d'\n    elif p is F.max_pool2d:\n        return 'torch.nn.functional.max_pool2d'\n    elif p is F.max_pool3d:\n        return 'torch.nn.functional.max_pool3d'\n    elif p is F.adaptive_max_pool1d:\n        return 'torch.nn.functional.adaptive_max_pool1d'\n    elif p is F.adaptive_max_pool2d:\n        return 'torch.nn.functional.adaptive_max_pool2d'\n    elif p is F.adaptive_max_pool3d:\n        return 'torch.nn.functional.adaptive_max_pool3d'\n    assert 'boolean_dispatch' not in str(p), f'{p} does not have a human readable representation in ' + 'quantization documentation'\n    return p",
    "docstring": "Some ops have a default string representation such as '.fn at 0x7ff1106bf280>', this function replaces them with the hardcoded function names.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\utils.py",
    "ast_data": "FunctionDef name:remove_boolean_dispatch_from_name arg:p arguments arg If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Compare Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "get_theme_dirs",
    "source_code": "def get_theme_dirs(self) -> list[_StrPath]:\n    return list(map(_StrPath, self._dirs))",
    "docstring": "Return a list of theme directories, beginning with this theme's, then the base theme's, then that one's base theme's, etc.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\theming.py",
    "ast_data": "FunctionDef name:get_theme_dirs arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_with_index_update_helper",
    "source_code": "def _with_index_update_helper(update_method, a, slice_spec, updates):\n    if isinstance(slice_spec, bool) or (isinstance(slice_spec, core_tf_types.Tensor) and slice_spec.dtype == dtypes.bool) or (isinstance(slice_spec, (np.ndarray, np_arrays.ndarray)) and slice_spec.dtype == np.bool_):\n        slice_spec = nonzero(slice_spec)\n    if not isinstance(slice_spec, tuple):\n        slice_spec = _as_spec_tuple(slice_spec)\n    a_dtype = a.dtype\n    a, updates = _promote_dtype_binary(a, updates)\n    result_t = _slice_helper(a, slice_spec, update_method, updates)\n    return result_t.astype(a_dtype)",
    "docstring": "Implementation of ndarray._with_index_*.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_array_ops.py",
    "ast_data": "FunctionDef name:_with_index_update_helper arg:update_method arg:a arg:slice_spec arg:updates arguments arg arg arg arg If BoolOp Call BoolOp Call Compare BoolOp Call Compare Assign Call If Call Assign Call Assign Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "step",
    "source_code": "def step(self):\n    self.optim.step()\n    self.averager.average_parameters(params=self.param_groups)",
    "docstring": "Performs a single optimization step (parameter update).",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\optim\\post_localSGD_optimizer.py",
    "ast_data": "FunctionDef name:step arg:self arguments arg Call Call"
  },
  {
    "library": "matplotlib",
    "name": "ConversionInterface",
    "source_code": "class ConversionInterface:\n\n    @staticmethod\n    def axisinfo(unit, axis):\n        return None\n\n    @staticmethod\n    def default_units(x, axis):\n        return None\n\n    @staticmethod\n    def convert(obj, unit, axis):\n        return obj",
    "docstring": "The minimal interface for a converter to take custom data types (or sequences) and convert them to values Matplotlib can use.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\units.py",
    "ast_data": "ClassDef name:ConversionInterface FunctionDef name:axisinfo arg:unit arg:axis arguments arg arg Return return:no FunctionDef name:default_units arg:x arg:axis arguments arg arg Return return:no FunctionDef name:convert arg:obj arg:unit arg:axis arguments arg arg arg Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "CachingThreadedResolver",
    "source_code": "@implementer(IResolverSimple)\nclass CachingThreadedResolver(ThreadedResolver):\n\n    def __init__(self, reactor: ReactorBase, cache_size: int, timeout: float):\n        super().__init__(reactor)\n        dnscache.limit = cache_size\n        self.timeout = timeout\n\n    @classmethod\n    def from_crawler(cls, crawler: Crawler, reactor: ReactorBase) -> Self:\n        if crawler.settings.getbool('DNSCACHE_ENABLED'):\n            cache_size = crawler.settings.getint('DNSCACHE_SIZE')\n        else:\n            cache_size = 0\n        return cls(reactor, cache_size, crawler.settings.getfloat('DNS_TIMEOUT'))\n\n    def install_on_reactor(self) -> None:\n        self.reactor.installResolver(self)\n\n    def getHostByName(self, name: str, timeout: Sequence[int]=()) -> Deferred[str]:\n        if name in dnscache:\n            return defer.succeed(dnscache[name])\n        timeout = (self.timeout,)\n        d = super().getHostByName(name, timeout)\n        if dnscache.limit:\n            d.addCallback(self._cache_result, name)\n        return d\n\n    def _cache_result(self, result: Any, name: str) -> Any:\n        dnscache[name] = result\n        return result",
    "docstring": "Default caching resolver. IPv4 only, supports setting a timeout value for DNS requests.",
    "type": "class",
    "file_path": "scrapy\\scrapy\\resolver.py",
    "ast_data": "ClassDef name:CachingThreadedResolver FunctionDef name:__init__ arg:self arg:reactor arg:cache_size arg:timeout arguments arg arg arg arg Call Call Assign Assign FunctionDef name:from_crawler arg:cls arg:crawler arg:reactor arguments arg arg arg If Call Assign Call Assign Return return:yes Call Call FunctionDef name:install_on_reactor arg:self arguments arg Call FunctionDef name:getHostByName arg:self arg:name arg:timeout arguments arg arg arg If Compare Return return:yes Call Assign Assign Call Call If Call Return return:yes FunctionDef name:_cache_result arg:self arg:result arg:name arguments arg arg arg Assign Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "validate_request_uri_parameter_supported",
    "source_code": "def validate_request_uri_parameter_supported(self):\n    _validate_boolean_value(self, 'request_uri_parameter_supported')",
    "docstring": "OPTIONAL. Boolean value specifying whether the OP supports use of the request_uri parameter, with true indicating support. If omitted, the default value is true.",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\discovery\\models.py",
    "ast_data": "FunctionDef name:validate_request_uri_parameter_supported arg:self arguments arg Call"
  },
  {
    "library": "numpy",
    "name": "parse_cmd",
    "source_code": "def parse_cmd():\n    if len(sys.argv) == 3:\n        if sys.argv[1][-4:] == '.lib' and sys.argv[2][-4:] == '.def':\n            libfile, deffile = sys.argv[1:]\n        elif sys.argv[1][-4:] == '.def' and sys.argv[2][-4:] == '.lib':\n            deffile, libfile = sys.argv[1:]\n        else:\n            print(\"I'm assuming that your first argument is the library\")\n            print('and the second is the DEF file.')\n    elif len(sys.argv) == 2:\n        if sys.argv[1][-4:] == '.def':\n            deffile = sys.argv[1]\n            libfile = 'python%s.lib' % py_ver\n        elif sys.argv[1][-4:] == '.lib':\n            deffile = None\n            libfile = sys.argv[1]\n    else:\n        libfile = 'python%s.lib' % py_ver\n        deffile = None\n    return (libfile, deffile)",
    "docstring": "Parses the command-line arguments. libfile, deffile = parse_cmd()",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\lib2def.py",
    "ast_data": "FunctionDef name:parse_cmd arguments If Compare Call If BoolOp Compare Compare Assign If BoolOp Compare Compare Assign Call Call If Compare Call If Compare Assign Assign If Compare Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "serialize",
    "source_code": "@abc.abstractmethod\ndef serialize(self, accumulator):\n    pass",
    "docstring": "Serialize an accumulator for a remote call. This function serializes an accumulator to be sent to a remote process. Args: accumulator: The accumulator to serialize. Returns: A byte string representing the passed accumulator.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_preprocessing_layer.py",
    "ast_data": "FunctionDef name:serialize arg:self arg:accumulator arguments arg arg"
  },
  {
    "library": "pytorch",
    "name": "gen_predict_fn_def",
    "source_code": "def gen_predict_fn_def(self):\n    return 'def get_best_choices(self, context: AHContext) -> Optional[list[tuple[float, int]]]:'",
    "docstring": "Generates the definition of the predict function.",
    "type": "method",
    "file_path": "pytorch\\torchgen\\_autoheuristic\\train_decision.py",
    "ast_data": "FunctionDef name:gen_predict_fn_def arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "saveables",
    "source_code": "@property\ndef saveables(self):\n    return self._saveables",
    "docstring": "Returns a list of SaveableObjects generated from the Trackable object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\saving\\saveable_object_util.py",
    "ast_data": "FunctionDef name:saveables arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "check_sparse_tensor_invariants",
    "source_code": "class check_sparse_tensor_invariants:\n\n    @staticmethod\n    def is_enabled():\n        return torch._C._check_sparse_tensor_invariants()\n\n    @staticmethod\n    def enable():\n        torch._C._set_check_sparse_tensor_invariants(True)\n\n    @staticmethod\n    def disable():\n        torch._C._set_check_sparse_tensor_invariants(False)\n\n    def __init__(self, enable=True):\n        self.state = enable\n        self.saved_state: Optional[bool] = None\n\n    def __enter__(self):\n        if self.saved_state is not None:\n            raise RuntimeError('This context manager instance is already activated. Use a different context manager instance for context nesting.')\n        self.saved_state = self.is_enabled()\n        torch._C._set_check_sparse_tensor_invariants(self.state)\n\n    def __exit__(self, type, value, traceback):\n        assert self.saved_state is not None\n        torch._C._set_check_sparse_tensor_invariants(self.saved_state)\n        self.saved_state = None\n\n    def __call__(self, mth):\n\n        def test_mth(*args, **kwargs):\n            with type(self)(self.state):\n                return mth(*args, **kwargs)\n        return test_mth",
    "docstring": "A tool to control checking sparse tensor invariants. The following options exists to manage sparsr tensor invariants checking in sparse tensor construction: 1. Using a context manager: .. code:: python with torch.sparse.check_sparse_tensor_invariants(): run_my_model() 2. Using a procedural approach: .. code:: python prev_checks_enabled = torch.sparse.check_sparse_tensor_invariants.is_enabled() torch.sparse.check_sparse_tensor_invariants.enable() run_my_model() if not prev_checks_enabled: torch.sparse.check_sparse_tensor_invariants.disable() 3. Using function decoration: .. code:: python @torch.sparse.check_sparse_tensor_invariants() def run_my_model(): ... run_my_model() 4. Using `crow_indices[..., -1] == nnz` is not satisfied.",
    "type": "class",
    "file_path": "pytorch\\torch\\sparse\\__init__.py",
    "ast_data": "ClassDef name:check_sparse_tensor_invariants FunctionDef name:is_enabled arguments Return return:yes Call FunctionDef name:enable arguments Call FunctionDef name:disable arguments Call FunctionDef name:__init__ arg:self arg:enable arguments arg arg Assign FunctionDef name:__enter__ arg:self arguments arg If Compare Raise Call Assign Call Call FunctionDef name:__exit__ arg:self arg:type arg:value arg:traceback arguments arg arg arg arg Compare Call Assign FunctionDef name:__call__ arg:self arg:mth arguments arg arg FunctionDef name:test_mth arguments arg arg With Call Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "reorder_levels",
    "source_code": "def reorder_levels(self, order: Sequence[Level]) -> Series:\n    if not isinstance(self.index, MultiIndex):\n        raise Exception('Can only reorder levels on a hierarchical axis.')\n    result = self.copy(deep=False)\n    assert isinstance(result.index, MultiIndex)\n    result.index = result.index.reorder_levels(order)\n    return result",
    "docstring": "Rearrange index levels using input order. May not drop or duplicate levels. Parameters ---------- order : list of int representing new level order Reference level by number or key. Returns ------- Series Type of caller with index as MultiIndex (new object). See Also -------- DataFrame.reorder_levels : Rearrange index or column levels using input ``. Examples -------- >>> arrays = [ ... np.array([\"dog\", \"dog\", \"cat\", \"cat\", \"bird\", \"bird\"]), ... np.array([\"white\", \"black\", \"white\", \"black\", \"white\", \"black\"]), ... ] >>> s = pd.Series([1, 2, 3, 3, 5, 2], index=arrays) >>> s dog white 1 black 2 cat white 3 black 3 bird white 5 black 2 dtype: int64 >>> s.reorder_levels([1, 0]) white dog 1 black dog 2 white cat 3 black cat 3 white bird 5 black bird 2 dtype: int64",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:reorder_levels arg:self arg:order arguments arg arg If Call Raise Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "update_restore_inputs",
    "source_code": "def update_restore_inputs(self, checkpoint_key: str, shape_and_slice_spec: str) -> tuple[Sequence[str], Sequence[str]]:\n    logging.vlog(1, 'Updating restore v2 inputs for %s[%s]: %s', checkpoint_key, self._object_local_name, shape_and_slice_spec)\n    slices = []\n    first_layout = self._from_shard_layouts[0]\n    full_vocab_size = first_layout.total_rows_per_sparse_core_shard * first_layout.num_sparse_cores\n    stack_dim = first_layout.unsharded_padded_shape[1]\n    full_shape = [full_vocab_size, stack_dim]\n    logging.vlog(1, 'Read checkpoint_key %s: %s', checkpoint_key, full_shape)\n    slices.append(_shard_info_str(full_shape, trackable_base.ShardInfo(offset=[0, 0], shape=full_shape)))\n    return ([checkpoint_key], slices)",
    "docstring": "Return the full shape of the stacked that is passed into restore_v2. This shape information is required by the restore_v2 process to ensure it loads the complete tensor from the checkpoint. The full tensor is required to perform resharding operations. Args: checkpoint_key: The input checkpoint key to be read. shape_and_slice_spec: The shape and slice spec of the checkpoint key to be read. Returns: A tuple of (keys, slices) that should be passed to restore_v2 in order to reshard according to the resharding plan. The restored tensors from restore_v2 op will usually be passed to reshard method of this class to get the final resharded value.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3_checkpoint_adapter.py",
    "ast_data": "FunctionDef name:update_restore_inputs arg:self arg:checkpoint_key arg:shape_and_slice_spec arguments arg arg arg Call Assign Assign Assign Assign Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "named_parameters",
    "source_code": "@compatibility(is_backward_compatible=False)\ndef named_parameters(self) -> Iterator[tuple[str, torch.nn.Parameter]]:\n    for param_name in self.graph_signature.parameters:\n        yield (param_name, self.state_dict[param_name])",
    "docstring": "Returns an iterator over original module parameters, yielding both the name of the parameter as well as the parameter itself.",
    "type": "method",
    "file_path": "pytorch\\torch\\export\\exported_program.py",
    "ast_data": "FunctionDef name:named_parameters arg:self arguments arg For Call"
  },
  {
    "library": "pandas",
    "name": "_read",
    "source_code": "def _read(filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], kwds) -> DataFrame | TextFileReader:\n    if kwds.get('parse_dates', None) is None:\n        if kwds.get('date_format', None) is None:\n            kwds['parse_dates'] = False\n        else:\n            kwds['parse_dates'] = True\n    iterator = kwds.get('iterator', False)\n    chunksize = kwds.get('chunksize', None)\n    errors = kwds.get('encoding_errors', 'strict')\n    if not isinstance(errors, str):\n        raise ValueError(f'encoding_errors must be a string, got {type(errors).__name__}')\n    if kwds.get('engine') == 'pyarrow':\n        if iterator:\n            raise ValueError(\"The 'iterator' option is not supported with the 'pyarrow' engine\")\n        if chunksize is not None:\n            raise ValueError(\"The 'chunksize' option is not supported with the 'pyarrow' engine\")\n    else:\n        chunksize = validate_integer('chunksize', chunksize, 1)\n    nrows = kwds.get('nrows', None)\n    _validate_names(kwds.get('names', None))\n    parser = TextFileReader(filepath_or_buffer, **kwds)\n    if chunksize or iterator:\n        return parser\n    with parser:\n        return parser.read(nrows)",
    "docstring": "Generic reader of line files.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\parsers\\readers.py",
    "ast_data": "FunctionDef name:_read arg:filepath_or_buffer arg:kwds arguments arg arg If Compare Call If Compare Call Assign Assign Assign Call Assign Call Assign Call If Call Raise Call Call If Compare Call If Raise Call If Compare Raise Call Assign Call Assign Call Call Call Assign Call If BoolOp Return return:yes With Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_node_attribute_at_index",
    "source_code": "def _get_node_attribute_at_index(self, node_index, attr, attr_name):\n    if not self._inbound_nodes:\n        raise RuntimeError('The layer has never been called and thus has no defined ' + attr_name + '.')\n    if not len(self._inbound_nodes) > node_index:\n        raise ValueError('Asked to get ' + attr_name + ' at node ' + str(node_index) + ', but the layer has only ' + str(len(self._inbound_nodes)) + ' inbound nodes.')\n    values = getattr(self._inbound_nodes[node_index], attr)\n    if isinstance(values, list) and len(values) == 1:\n        return values[0]\n    else:\n        return values",
    "docstring": "Private utility to retrieves an attribute (e.g. inputs) from a node. This is used to implement the methods: - get_input_shape_at - get_output_shape_at - get_input_at etc... Args: node_index: Integer index of the node from which to retrieve the attribute. attr: Exact node attribute name. attr_name: Human-readable attribute name, for error messages. Returns: The layer's attribute at the node of index . Raises: RuntimeError: If the layer has no inbound nodes, or if called in Eager mode. ValueError: If the index provided does not match any node.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py",
    "ast_data": "FunctionDef name:_get_node_attribute_at_index arg:self arg:node_index arg:attr arg:attr_name arguments arg arg arg arg If Raise Call If Compare Call Raise Call Call Call Call Assign Call If BoolOp Call Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_lookup_args",
    "source_code": "def _lookup_args(self, args: tuple[Any, ...]) -> tuple[Any, ...]:\n    return tuple((self.buffer_to_node[arg] if isinstance(arg, str) else arg.inner_expr if isinstance(arg, SymbolicCallArg) else arg for arg in args))",
    "docstring": "Maps call args back to FX nodes.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\wrapper_fxir.py",
    "ast_data": "FunctionDef name:_lookup_args arg:self arg:args arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "FlushExecutionFiles",
    "source_code": "def FlushExecutionFiles(self):\n    _pywrap_debug_events_writer.FlushExecutionFiles(self._dump_root)",
    "docstring": "Flush the execution debug event files. Causes the current content of the cyclic buffers to be written to the .execution and .graph_execution_traces debug events files. Also clears those cyclic buffers.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_writer.py",
    "ast_data": "FunctionDef name:FlushExecutionFiles arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "compute_mask",
    "source_code": "@generic_utils.default\ndef compute_mask(self, inputs, mask=None):\n    if not self._supports_masking:\n        if any((m is not None for m in nest.flatten(mask))):\n            raise TypeError('Layer ' + self.name + ' does not support masking, but was passed an input_mask: ' + str(mask))\n        return None\n    return mask",
    "docstring": "Computes an output mask tensor. Args: inputs: Tensor or list of tensors. mask: Tensor or list of tensors. Returns: None or a tensor (or list of tensors, one per output tensor of the layer).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:compute_mask arg:self arg:inputs arg:mask arguments arg arg arg If If Call Compare Call Raise Call Call Return return:no Return return:yes"
  },
  {
    "library": "django",
    "name": "Trans",
    "source_code": "class Trans:\n\n    def __getattr__(self, real_name):\n        from django.conf import settings\n        if settings.USE_I18N:\n            from django.utils.translation import trans_real as trans\n            from django.utils.translation.reloader import translation_file_changed, watch_for_translation_changes\n            autoreload_started.connect(watch_for_translation_changes, dispatch_uid='translation_file_changed')\n            file_changed.connect(translation_file_changed, dispatch_uid='translation_file_changed')\n        else:\n            from django.utils.translation import trans_null as trans\n        setattr(self, real_name, getattr(trans, real_name))\n        return getattr(trans, real_name)",
    "docstring": "The purpose of this class is to store the actual translation function upon receiving the first call to that function. After this is done, changes to USE_I18N will have no effect to which function is served upon request. If your tests rely on changing USE_I18N, you can delete all the functions from _trans.__dict__. Note that storing the function with setattr will have a noticeable performance effect, as access to the function goes the normal path, instead of using __getattr__.",
    "type": "class",
    "file_path": "django\\django\\utils\\translation\\__init__.py",
    "ast_data": "ClassDef name:Trans FunctionDef name:__getattr__ arg:self arg:real_name arguments arg arg If Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "size",
    "source_code": "@abstractmethod\ndef size(self) -> int:\n    pass",
    "docstring": "Size of the column, in elements. Corresponds to DataFrame.num_rows() if column is a single chunk; equal to size of this current chunk otherwise.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\interchange\\dataframe_protocol.py",
    "ast_data": "FunctionDef name:size arg:self arguments arg"
  },
  {
    "library": "scipy",
    "name": "_timeit_fast",
    "source_code": "def _timeit_fast(stmt='pass', setup='pass', repeat=3):\n    timer = timeit.Timer(stmt, setup)\n    x = 0\n    for p in range(0, 10):\n        number = 10 ** p\n        x = timer.timeit(number)\n        if x >= 0.005 / 10:\n            break\n    if x > 1:\n        best = x\n    else:\n        number *= 10\n        r = timer.repeat(repeat, number)\n        best = min(r)\n    sec = best / number\n    return sec",
    "docstring": "Returns the time the statement/function took, in seconds. Faster, less precise version of IPython's timeit. can be a statement written as a string or a callable. Will do only 1 loop (like IPython's timeit) with no repetitions (unlike IPython) for very slow functions. For fast functions, only does enough loops to take 5 ms, which seems to produce similar results (on Windows at least), and avoids doing an extraneous cycle that isn't measured.",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_signaltools.py",
    "ast_data": "FunctionDef name:_timeit_fast arg:stmt arg:setup arg:repeat arguments arg arg arg Assign Call Assign For Call Assign Assign Call If Compare If Compare Assign Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_safe_inv22_vectorized",
    "source_code": "def _safe_inv22_vectorized(M):\n    _api.check_shape((None, 2, 2), M=M)\n    M_inv = np.empty_like(M)\n    prod1 = M[:, 0, 0] * M[:, 1, 1]\n    delta = prod1 - M[:, 0, 1] * M[:, 1, 0]\n    rank2 = np.abs(delta) > 1e-08 * np.abs(prod1)\n    if np.all(rank2):\n        delta_inv = 1.0 / delta\n    else:\n        delta_inv = np.zeros(M.shape[0])\n        delta_inv[rank2] = 1.0 / delta[rank2]\n    M_inv[:, 0, 0] = M[:, 1, 1] * delta_inv\n    M_inv[:, 0, 1] = -M[:, 0, 1] * delta_inv\n    M_inv[:, 1, 0] = -M[:, 1, 0] * delta_inv\n    M_inv[:, 1, 1] = M[:, 0, 0] * delta_inv\n    return M_inv",
    "docstring": "Inversion of arrays of (2, 2) matrices, returns 0 for rank-deficient matrices. *M* : array of (2, 2) matrices to inverse, shape (n, 2, 2)",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triinterpolate.py",
    "ast_data": "FunctionDef name:_safe_inv22_vectorized arg:M arguments arg Call Assign Call Assign Assign Assign Compare Call Call If Call Assign Assign Call Assign Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "expand_to_tensor_dim",
    "source_code": "def expand_to_tensor_dim(t, n):\n    if t == Dyn:\n        dims = [Dyn] * n\n        return TensorType(tuple(dims))\n    elif isinstance(t, TensorType):\n        if len(t.__args__) != n:\n            raise TypeError(f'Cannot extend tensor. Tensor {t} has rank {len(t.__args__)}. It should have rank {n}')\n        return t\n    else:\n        raise TypeError(f'Cannot match the type {t}')",
    "docstring": "Expand a type to the desired tensor dimension if possible Raise an error otherwise. - t is the given type - n is a number of dimensions to expand to",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\graph_gradual_typechecker.py",
    "ast_data": "FunctionDef name:expand_to_tensor_dim arg:t arg:n arguments arg arg If Compare Assign Return return:yes Call Call If Call If Compare Call Raise Call Call Return return:yes Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_write_custom_summaries",
    "source_code": "def _write_custom_summaries(self, step, logs=None):\n    logs = logs or {}\n    if context.executing_eagerly():\n        with self.writer.as_default(), summary_ops_v2.record_if(True):\n            for name, value in logs.items():\n                if isinstance(value, np.ndarray):\n                    value = value.item()\n                summary_ops_v2.scalar(name, value, step=step)\n    else:\n        for name, value in logs.items():\n            if isinstance(value, np.ndarray):\n                value = value.item()\n            summary = tf_summary.Summary()\n            summary_value = summary.value.add()\n            summary_value.simple_value = value\n            summary_value.tag = name\n            self.writer.add_summary(summary, step)\n    self.writer.flush()",
    "docstring": "Writes metrics out as custom scalar summaries. Args: step: the global step to use for TensorBoard. logs: dict. Keys are scalar summary names, values are NumPy scalars.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks_v1.py",
    "ast_data": "FunctionDef name:_write_custom_summaries arg:self arg:step arg:logs arguments arg arg arg Assign BoolOp If Call With Call Call For Call If Call Assign Call Call For Call If Call Assign Call Assign Call Assign Call Assign Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "key_dtype",
    "source_code": "@property\ndef key_dtype(self):\n    return self._key_dtype",
    "docstring": "The expected table key dtype.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "FunctionDef name:key_dtype arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_sanitize_non_ordered",
    "source_code": "def _sanitize_non_ordered(data) -> None:\n    if isinstance(data, (set, frozenset)):\n        raise TypeError(f\"'{type(data).__name__}' type is unordered\")",
    "docstring": "Raise only for unordered sets, e.g., not for dict_keys",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\construction.py",
    "ast_data": "FunctionDef name:_sanitize_non_ordered arg:data arguments arg If Call Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "as_proto",
    "source_code": "def as_proto(self):\n    if self._dims is None:\n        return tensor_shape_pb2.TensorShapeProto(unknown_rank=True)\n    else:\n        return tensor_shape_pb2.TensorShapeProto(dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=-1 if d is None else d) for d in self._dims])",
    "docstring": "Returns this shape as a .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:as_proto arg:self arguments arg If Compare Return return:yes Call Return return:yes Call Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "get_loss_reduction",
    "source_code": "@tf_export(v1=['distribute.get_loss_reduction'])\ndef get_loss_reduction():\n    if not distribute_lib.get_strategy()._scale_loss_for_estimator:\n        return ReduceOp.SUM\n    last_reduction = ops.get_default_graph()._last_loss_reduction\n    if last_reduction == losses_impl.Reduction.SUM or last_reduction == 'sum':\n        return ReduceOp.SUM\n    return ReduceOp.MEAN",
    "docstring": "corresponding to the last loss reduction. Returns: corresponding to the last loss reduction for estimator and v1 optimizer use case. otherwise.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_utils.py",
    "ast_data": "FunctionDef name:get_loss_reduction arguments If Call Return return:yes Assign Call If BoolOp Compare Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "aggregate_and_return_name_for_output",
    "source_code": "def aggregate_and_return_name_for_output(self, fused_op_name, output_index, out_graphdef):\n    del fused_op_name, output_index, out_graphdef\n    raise RuntimeError('Unimplemented abstract method.')",
    "docstring": "Add node(s) to graph representing output operands and returns type. Args: fused_op_name: name of the fused op stub name. output_index: Output index that we are currently processing from stub. out_graphdef: The destination graphdef we are currently building up. Returns: The datatype of this identity. Raises: RuntimeError: if the method is not implemented.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\op_hint.py",
    "ast_data": "FunctionDef name:aggregate_and_return_name_for_output arg:self arg:fused_op_name arg:output_index arg:out_graphdef arguments arg arg arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "ZerosLike",
    "source_code": "def ZerosLike(op, index):\n    if not util.IsSwitch(op):\n        return _ZerosLikeV2(op, index)\n    else:\n        return _ZerosLikeV1(op, index)",
    "docstring": "Create zeros_like for the specified output of an op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_state.py",
    "ast_data": "FunctionDef name:ZerosLike arg:op arg:index arguments arg arg If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "RegularPolyCollection",
    "source_code": "class RegularPolyCollection(_CollectionWithSizes):\n    _path_generator = mpath.Path.unit_regular_polygon\n    _factor = np.pi ** (-1 / 2)\n\n    def __init__(self, numsides, *, rotation=0, sizes=(1,), **kwargs):\n        super().__init__(**kwargs)\n        self.set_sizes(sizes)\n        self._numsides = numsides\n        self._paths = [self._path_generator(numsides)]\n        self._rotation = rotation\n        self.set_transform(transforms.IdentityTransform())\n\n    def get_numsides(self):\n        return self._numsides\n\n    def get_rotation(self):\n        return self._rotation\n\n    @artist.allow_rasterization\n    def draw(self, renderer):\n        self.set_sizes(self._sizes, self.get_figure(root=True).dpi)\n        self._transforms = [transforms.Affine2D(x).rotate(-self._rotation).get_matrix() for x in self._transforms]\n        Collection.draw(self, renderer)",
    "docstring": "A collection of n-sided regular polygons.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "ClassDef name:RegularPolyCollection Assign Assign FunctionDef name:__init__ arg:self arg:numsides arguments arg arg arg arg arg Call Call Call Assign Assign Call Assign Call Call FunctionDef name:get_numsides arg:self arguments arg Return return:yes FunctionDef name:get_rotation arg:self arguments arg Return return:yes FunctionDef name:draw arg:self arg:renderer arguments arg arg Call Call Assign Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "wrap_inline",
    "source_code": "def wrap_inline(fn: Callable[_P, _R]) -> Callable[_P, _R]:\n\n    @functools.wraps(fn)\n    def inner(*args: _P.args, **kwargs: _P.kwargs) -> _R:\n        return fn(*args, **kwargs)\n    return inner",
    "docstring": "Create an extra frame around fn that is not in skipfiles.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\external_utils.py",
    "ast_data": "FunctionDef name:wrap_inline arg:fn arguments arg FunctionDef name:inner arguments arg arg Return return:yes Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "predict_proba",
    "source_code": "def predict_proba(self, X):\n    check_is_fitted(self)\n    n_classes = self.n_classes_\n    if n_classes == 1:\n        return np.ones((_num_samples(X), 1))\n    decision = self.decision_function(X)\n    return self._compute_proba_from_decision(decision, n_classes)",
    "docstring": "Predict class probabilities for X. The predicted class probabilities of an input sample is computed as the weighted mean predicted class probabilities of the classifiers in the ensemble. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. COO, DOK, and LIL are converted to CSR. Returns ------- p : ndarray of shape (n_samples, n_classes) The class probabilities of the input samples. The order of outputs is the same of that of the :term: attribute.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_weight_boosting.py",
    "ast_data": "FunctionDef name:predict_proba arg:self arg:X arguments arg arg Call Assign If Compare Return return:yes Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_on_mouse_button",
    "source_code": "def _on_mouse_button(self, event):\n    event.Skip()\n    self._set_capture(event.ButtonDown() or event.ButtonDClick())\n    x, y = self._mpl_coords(event)\n    button_map = {wx.MOUSE_BTN_LEFT: MouseButton.LEFT, wx.MOUSE_BTN_MIDDLE: MouseButton.MIDDLE, wx.MOUSE_BTN_RIGHT: MouseButton.RIGHT, wx.MOUSE_BTN_AUX1: MouseButton.BACK, wx.MOUSE_BTN_AUX2: MouseButton.FORWARD}\n    button = event.GetButton()\n    button = button_map.get(button, button)\n    modifiers = self._mpl_modifiers(event)\n    if event.ButtonDown():\n        MouseEvent('button_press_event', self, x, y, button, modifiers=modifiers, guiEvent=event)._process()\n    elif event.ButtonDClick():\n        MouseEvent('button_press_event', self, x, y, button, dblclick=True, modifiers=modifiers, guiEvent=event)._process()\n    elif event.ButtonUp():\n        MouseEvent('button_release_event', self, x, y, button, modifiers=modifiers, guiEvent=event)._process()",
    "docstring": "Start measuring on an axis.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_wx.py",
    "ast_data": "FunctionDef name:_on_mouse_button arg:self arg:event arguments arg arg Call Call BoolOp Call Call Assign Call Assign Assign Call Assign Call Assign Call If Call Call Call If Call Call Call If Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "connect_event",
    "source_code": "def connect_event(self, event, callback):\n    cid = self.canvas.mpl_connect(event, callback)\n    self._cids.append(cid)",
    "docstring": "Connect a callback function with an event. This should be used in lieu of `` since this function stores callback ids for later clean up.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:connect_event arg:self arg:event arg:callback arguments arg arg arg Assign Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "is_classifier",
    "source_code": "def is_classifier(estimator):\n    if isinstance(estimator, type):\n        warnings.warn(f'passing a class to {print(inspect.stack()[0][3])} is deprecated and will be removed in 1.8. Use an instance of the class instead.', FutureWarning)\n        return getattr(estimator, '_estimator_type', None) == 'classifier'\n    return get_tags(estimator).estimator_type == 'classifier'",
    "docstring": "Return True if the given estimator is (probably) a classifier. Parameters ---------- estimator : object Estimator object to test. Returns ------- out : bool True if estimator is a classifier and False otherwise. Examples -------- >>> from sklearn.base import is_classifier >>> from sklearn.cluster import KMeans >>> from sklearn.svm import SVC, SVR >>> classifier = SVC() >>> regressor = SVR() >>> kmeans = KMeans() >>> is_classifier(classifier) True >>> is_classifier(regressor) False >>> is_classifier(kmeans) False",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\base.py",
    "ast_data": "FunctionDef name:is_classifier arg:estimator arguments arg If Call Call Call Call Return return:yes Compare Call Return return:yes Compare Call"
  },
  {
    "library": "pandas",
    "name": "ensure_index_from_sequences",
    "source_code": "def ensure_index_from_sequences(sequences, names=None) -> Index:\n    from pandas.core.indexes.api import default_index\n    from pandas.core.indexes.multi import MultiIndex\n    if len(sequences) == 0:\n        return default_index(0)\n    elif len(sequences) == 1:\n        if names is not None:\n            names = names[0]\n        return Index(maybe_sequence_to_range(sequences[0]), name=names)\n    else:\n        return MultiIndex.from_arrays(sequences, names=names)",
    "docstring": "Construct an index from sequences of data. A single sequence returns an Index. Many sequences returns a MultiIndex. Parameters ---------- sequences : sequence of sequences names : sequence of str Returns ------- index : Index or MultiIndex Examples -------- >>> ensure_index_from_sequences([[1, 2, 4]], names=[\"name\"]) Index([1, 2, 4], dtype='int64', name='name') >>> ensure_index_from_sequences([[\"a\", \"a\"], [\"a\", \"b\"]], names=[\"L1\", \"L2\"]) MultiIndex([('a', 'a'), ('a', 'b')], names=['L1', 'L2']) See Also -------- ensure_index",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:ensure_index_from_sequences arg:sequences arg:names arguments arg arg If Compare Call Return return:yes Call If Compare Call If Compare Assign Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "register_load_state_dict_post_hook",
    "source_code": "def register_load_state_dict_post_hook(self, hook: Callable[['Optimizer'], None], prepend: bool=False) -> RemovableHandle:\n    handle = hooks.RemovableHandle(self._optimizer_load_state_dict_post_hooks)\n    self._optimizer_load_state_dict_post_hooks[handle.id] = hook\n    if prepend:\n        self._optimizer_load_state_dict_post_hooks.move_to_end(handle.id, last=False)\n    return handle",
    "docstring": "Register a load_state_dict post-hook which will be called after :meth: is called. It should have the following signature:: hook(optimizer) -> None The `torch.utils.hooks.RemoveableHandle`",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\optimizer.py",
    "ast_data": "FunctionDef name:register_load_state_dict_post_hook arg:self arg:hook arg:prepend arguments arg arg arg Assign Call Assign If Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_extents",
    "source_code": "def get_extents(self, transform=None, **kwargs):\n    from .transforms import Bbox\n    if transform is not None:\n        self = transform.transform_path(self)\n    if self.codes is None:\n        xys = self.vertices\n    elif len(np.intersect1d(self.codes, [Path.CURVE3, Path.CURVE4])) == 0:\n        xys = self.vertices[np.isin(self.codes, [Path.MOVETO, Path.LINETO])]\n    else:\n        xys = []\n        for curve, code in self.iter_bezier(**kwargs):\n            _, dzeros = curve.axis_aligned_extrema()\n            xys.append(curve([0, *dzeros, 1]))\n        xys = np.concatenate(xys)\n    if len(xys):\n        return Bbox([xys.min(axis=0), xys.max(axis=0)])\n    else:\n        return Bbox.null()",
    "docstring": "Get Bbox of the path. Parameters ---------- transform : , optional Transform to apply to path before computing extents, if any. **kwargs Forwarded to . Returns ------- matplotlib.transforms.Bbox The extents of the path Bbox([[xmin, ymin], [xmax, ymax]])",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\path.py",
    "ast_data": "FunctionDef name:get_extents arg:self arg:transform arguments arg arg arg If Compare Assign Call If Compare Assign If Compare Call Call Assign Call Assign For Call Assign Call Call Call Assign Call If Call Return return:yes Call Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_label_position",
    "source_code": "def get_label_position(self):\n    return self.label_position",
    "docstring": "Return the label position (top or bottom)",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:get_label_position arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_transformed_cube",
    "source_code": "def _transformed_cube(self, vals):\n    minx, maxx, miny, maxy, minz, maxz = vals\n    xyzs = [(minx, miny, minz), (maxx, miny, minz), (maxx, maxy, minz), (minx, maxy, minz), (minx, miny, maxz), (maxx, miny, maxz), (maxx, maxy, maxz), (minx, maxy, maxz)]\n    return proj3d._proj_points(xyzs, self.M)",
    "docstring": "Return cube with limits from *vals* transformed by self.M.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:_transformed_cube arg:self arg:vals arguments arg arg Assign Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "load_handler",
    "source_code": "def load_handler(path, *args, **kwargs):\n    return import_string(path)(*args, **kwargs)",
    "docstring": "Given a path to a handler, return an instance of that handler. E.g.:: >>> from django.http import HttpRequest >>> request = HttpRequest() >>> load_handler( ... 'django.core.files.uploadhandler.TemporaryFileUploadHandler', ... request, ... )",
    "type": "function",
    "file_path": "django\\django\\core\\files\\uploadhandler.py",
    "ast_data": "FunctionDef name:load_handler arg:path arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "record_summaries_every_n_global_steps",
    "source_code": "def record_summaries_every_n_global_steps(n, global_step=None):\n    if global_step is None:\n        global_step = training_util.get_or_create_global_step()\n    with ops.device('cpu:0'):\n        should = lambda: math_ops.equal(global_step % n, 0)\n        if not context.executing_eagerly():\n            should = should()\n    return record_if(should)",
    "docstring": "Sets the should_record_summaries Tensor to true if global_step % n == 0.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "FunctionDef name:record_summaries_every_n_global_steps arg:n arg:global_step arguments arg arg If Compare Assign Call With Call Assign arguments Call If Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "pad_sequence",
    "source_code": "def pad_sequence(sequences: Union[Tensor, list[Tensor]], batch_first: bool=False, padding_value: float=0.0, padding_side: str='right') -> Tensor:\n    if not (torch.jit.is_tracing() or torch.jit.is_scripting()):\n        if not isinstance(sequences, Iterable):\n            msg = f'pad_sequence: Expected iterable for input sequences, but got arg of type: {type(sequences)}'\n            raise RuntimeError(msg)\n        sequences = tuple(sequences)\n    elif isinstance(sequences, torch.Tensor):\n        sequences = sequences.unbind(0)\n    return torch._C._nn.pad_sequence(sequences, batch_first, padding_value, padding_side)",
    "docstring": "Pad a list of variable length Tensors with :attr:. `sequencesLbatch_firstsequencesTbatch_first` otherwise",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\utils\\rnn.py",
    "ast_data": "FunctionDef name:pad_sequence arg:sequences arg:batch_first arg:padding_value arg:padding_side arguments arg arg arg arg If BoolOp Call Call If Call Assign Call Raise Call Assign Call If Call Assign Call Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "_init_mutable_colormap",
    "source_code": "def _init_mutable_colormap():\n    greys = color_palette('Greys', 256)\n    cmap = LinearSegmentedColormap.from_list('interactive', greys)\n    cmap._init()\n    cmap._set_extremes()\n    return cmap",
    "docstring": "Create a matplotlib colormap that will be updated by the widgets.",
    "type": "function",
    "file_path": "seaborn\\seaborn\\widgets.py",
    "ast_data": "FunctionDef name:_init_mutable_colormap arguments Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_register_deepcopy_hook",
    "source_code": "def _register_deepcopy_hook(self, f):\n    assert callable(f), 'deepcopy hook must be a callable.'\n    self._deepcopy_hooks.append(f)",
    "docstring": "Takes a callable which will be called when we deepcopy this graph module. The callable takes the resulting deepcopied graph module.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\graph_module.py",
    "ast_data": "FunctionDef name:_register_deepcopy_hook arg:self arg:f arguments arg arg Call Call"
  },
  {
    "library": "seaborn",
    "name": "_get_boolean_mapping",
    "source_code": "def _get_boolean_mapping(self, scale: Boolean, data: Series) -> Mapping:\n    values = self._get_values(scale, [True, False])\n\n    def mapping(x):\n        out = np.full(len(x), np.nan)\n        use = np.isfinite(x)\n        out[use] = np.where(x[use], *values)\n        return out\n    return mapping",
    "docstring": "Identify evenly-spaced values using interval or explicit mapping.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\properties.py",
    "ast_data": "FunctionDef name:_get_boolean_mapping arg:self arg:scale arg:data arguments arg arg arg Assign Call FunctionDef name:mapping arg:x arguments arg Assign Call Call Assign Call Assign Call Return return:yes Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "split",
    "source_code": "def split(self, X, y, groups=None):\n    if groups is not None:\n        warnings.warn(f'The groups parameter is ignored by {self.__class__.__name__}', UserWarning)\n    y = check_array(y, input_name='y', ensure_2d=False, dtype=None)\n    return super().split(X, y, groups)",
    "docstring": "Generate indices to split data into training and test set. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. Note that providing `random_state` to an integer.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py",
    "ast_data": "FunctionDef name:split arg:self arg:X arg:y arg:groups arguments arg arg arg arg If Compare Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_hash_pandas_object",
    "source_code": "def _hash_pandas_object(self, *, encoding: str, hash_key: str, categorize: bool) -> npt.NDArray[np.uint64]:\n    from pandas.core.util.hashing import hash_array\n    values, _ = self._values_for_factorize()\n    return hash_array(values, encoding=encoding, hash_key=hash_key, categorize=categorize)",
    "docstring": "Hook for hash_pandas_object. Default is to use the values returned by _values_for_factorize. Parameters ---------- encoding : str Encoding for data & key when strings. hash_key : str Hash_key for string key to encode. categorize : bool Whether to first categorize object arrays before hashing. This is more efficient when the array contains duplicate values. Returns ------- np.ndarray[uint64] An array of hashed values. See Also -------- api.extensions.ExtensionArray._values_for_factorize : Return an array and missing value suitable for factorization. util.hash_array : Given a 1d array, return an array of hashed values. Examples -------- >>> pd.array([1, 2])._hash_pandas_object( ... encoding=\"utf-8\", hash_key=\"1000000000000000\", categorize=False ... ) array([ 6238072747940578789, 15839785061582574730], dtype=uint64)",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\base.py",
    "ast_data": "FunctionDef name:_hash_pandas_object arg:self arguments arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_ixs",
    "source_code": "def _ixs(self, i: int, axis: AxisInt=0) -> Series:\n    if axis == 0:\n        new_mgr = self._mgr.fast_xs(i)\n        result = self._constructor_sliced_from_mgr(new_mgr, axes=new_mgr.axes)\n        result._name = self.index[i]\n        return result.__finalize__(self)\n    else:\n        col_mgr = self._mgr.iget(i)\n        return self._box_col_values(col_mgr, i)",
    "docstring": "Parameters ---------- i : int axis : int Returns ------- Series",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:_ixs arg:self arg:i arg:axis arguments arg arg arg If Compare Assign Call Assign Call Assign Return return:yes Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "val_and_grad_function",
    "source_code": "def val_and_grad_function(f, params=None):\n\n    def decorated(*args, **kwds):\n        dy = kwds.pop('dy', None)\n        if kwds:\n            raise ValueError('Functions to be differentiated cannot receive keyword arguments.')\n        val, vjp = make_vjp(f, params)(*args, **kwds)\n        return (val, vjp(dy=dy))\n    return decorated",
    "docstring": "Returns a function that computes f and its derivative w.r.t. params. Example: Args: f: function to be differentiated. If returns a scalar, this scalar will be differentiated. If returns a tensor or list of tensors, by default a scalar will be computed by adding all their values to produce a single scalar. If desired, the tensors can be elementwise multiplied by the tensors passed as the keyword argument to the returned gradient function. params: list of parameter names of f or list of integers indexing the parameters with respect to which we'll differentiate. Passing differentiates with respect to all parameters. Returns: function which, when called, returns the value of f and the gradient of f with respect to all of . The function takes an extra optional keyword argument \"dy\". Setting it allows computation of vector jacobian products for vectors other than the vector of ones. Raises: ValueError: if the params are not all strings or all integers.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\backprop.py",
    "ast_data": "FunctionDef name:val_and_grad_function arg:f arg:params arguments arg arg FunctionDef name:decorated arguments arg arg Assign Call If Raise Call Assign Call Call Return return:yes Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "create_spline",
    "source_code": "def create_spline(y, yp, x, h):\n    from scipy.interpolate import PPoly\n    n, m = y.shape\n    c = np.empty((4, n, m - 1), dtype=y.dtype)\n    slope = (y[:, 1:] - y[:, :-1]) / h\n    t = (yp[:, :-1] + yp[:, 1:] - 2 * slope) / h\n    c[0] = t / h\n    c[1] = (slope - yp[:, :-1]) / h - t\n    c[2] = yp[:, :-1]\n    c[3] = y[:, :-1]\n    c = np.moveaxis(c, 1, 0)\n    return PPoly(c, x, extrapolate=True, axis=1)",
    "docstring": "Create a cubic spline given values and derivatives. Formulas for the coefficients are taken from interpolate.CubicSpline. Returns ------- sol : PPoly Constructed spline as a PPoly instance.",
    "type": "function",
    "file_path": "scipy\\scipy\\integrate\\_bvp.py",
    "ast_data": "FunctionDef name:create_spline arg:y arg:yp arg:x arg:h arguments arg arg arg arg Assign Assign Call Assign Assign Assign Assign Assign Assign Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "day_name",
    "source_code": "def day_name(self, locale=None) -> npt.NDArray[np.object_]:\n    values = self._local_timestamps()\n    result = fields.get_date_name_field(values, 'day_name', locale=locale, reso=self._creso)\n    result = self._maybe_mask_results(result, fill_value=None)\n    if using_string_dtype():\n        from pandas import StringDtype, array as pd_array\n        return pd_array(result, dtype=StringDtype(na_value=np.nan))\n    return result",
    "docstring": "Return the day names with specified locale. Parameters ---------- locale : str, optional Locale determining the language in which to return the day name. Default is English locale (`` will return day names in Brazilian Portuguese language. >>> idx = pd.date_range(start=\"2018-01-01\", freq=\"D\", periods=3) >>> idx DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'], dtype='datetime64[ns]', freq='D') >>> idx.day_name(locale=\"pt_BR.utf8\") # doctest: +SKIP Index(['Segunda', 'Terça', 'Quarta'], dtype='object')",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimes.py",
    "ast_data": "FunctionDef name:day_name arg:self arg:locale arguments arg arg Assign Call Assign Call Assign Call If Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "hb_write",
    "source_code": "def hb_write(path_or_open_file, m, hb_info=None):\n    m = m.tocsc(copy=False)\n    if hb_info is None:\n        hb_info = HBInfo.from_data(m)\n\n    def _set_matrix(fid):\n        hb = HBFile(fid, hb_info)\n        return hb.write_matrix(m)\n    if hasattr(path_or_open_file, 'write'):\n        return _set_matrix(path_or_open_file)\n    else:\n        with open(path_or_open_file, 'w') as f:\n            return _set_matrix(f)",
    "docstring": "Write HB-format file. Parameters ---------- path_or_open_file : path-like or file-like If a file-like object, it is used as-is. Otherwise, it is opened before writing. m : sparse array or matrix the sparse array to write hb_info : HBInfo contains the meta-data for write Returns ------- None Notes ----- At the moment not the full Harwell-Boeing format is supported. Supported features are: - assembled, non-symmetric, real matrices - integer for pointer/indices - exponential format for float values, and int format Examples -------- We can read and write a harwell-boeing format file: >>> from scipy.io import hb_read, hb_write >>> from scipy.sparse import csr_array, eye >>> data = csr_array(eye(3)) # create a sparse array >>> hb_write(\"data.hb\", data) # write a hb file >>> print(hb_read(\"data.hb\", spmatrix=False)) # read a hb file Coords Values (0, 0) 1.0 (1, 1) 1.0 (2, 2) 1.0",
    "type": "function",
    "file_path": "scipy\\scipy\\io\\_harwell_boeing\\hb.py",
    "ast_data": "FunctionDef name:hb_write arg:path_or_open_file arg:m arg:hb_info arguments arg arg arg Assign Call If Compare Assign Call FunctionDef name:_set_matrix arg:fid arguments arg Assign Call Return return:yes Call If Call Return return:yes Call With Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "reshape_scale",
    "source_code": "def reshape_scale(scale: torch.Tensor, axis: int, input: torch.Tensor) -> torch.Tensor:\n    new_shape = [1] * input.ndim\n    new_shape[axis] = input.size(axis)\n    return scale.view(new_shape)",
    "docstring": "Reshapes the scale so that we can multiply it to the input by the given axis.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_equalize.py",
    "ast_data": "FunctionDef name:reshape_scale arg:scale arg:axis arg:input arguments arg arg arg Assign Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_kneighbors_reduce_func",
    "source_code": "def _kneighbors_reduce_func(self, dist, start, n_neighbors, return_distance):\n    sample_range = np.arange(dist.shape[0])[:, None]\n    neigh_ind = np.argpartition(dist, n_neighbors - 1, axis=1)\n    neigh_ind = neigh_ind[:, :n_neighbors]\n    neigh_ind = neigh_ind[sample_range, np.argsort(dist[sample_range, neigh_ind])]\n    if return_distance:\n        if self.effective_metric_ == 'euclidean':\n            result = (np.sqrt(dist[sample_range, neigh_ind]), neigh_ind)\n        else:\n            result = (dist[sample_range, neigh_ind], neigh_ind)\n    else:\n        result = neigh_ind\n    return result",
    "docstring": "Reduce a chunk of distances to the nearest neighbors. Callback to :func: Parameters ---------- dist : ndarray of shape (n_samples_chunk, n_samples) The distance matrix. start : int The index in X which the first row of dist corresponds to. n_neighbors : int Number of neighbors required for each sample. return_distance : bool Whether or not to return the distances. Returns ------- dist : array of shape (n_samples_chunk, n_neighbors) Returned only if . neigh : array of shape (n_samples_chunk, n_neighbors) The neighbors indices.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neighbors\\_base.py",
    "ast_data": "FunctionDef name:_kneighbors_reduce_func arg:self arg:dist arg:start arg:n_neighbors arg:return_distance arguments arg arg arg arg arg Assign Call Assign Call Assign Assign Call If If Compare Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_alignment",
    "source_code": "def get_alignment(self):\n    return self._legend_box.align",
    "docstring": "Get the alignment value of the legend box",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\legend.py",
    "ast_data": "FunctionDef name:get_alignment arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_forward_navigation_events",
    "source_code": "def set_forward_navigation_events(self, forward):\n    self._forward_navigation_events = forward",
    "docstring": "Set how pan/zoom events are forwarded to Axes below this one. Parameters ---------- forward : bool or \"auto\" Possible values: - True: Forward events to other axes with lower or equal zorder. - False: Events are only executed on this axes. - \"auto\": Default behaviour (*True* for axes with an invisible patch and *False* otherwise) See Also -------- matplotlib.axes.Axes.set_navigate",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:set_forward_navigation_events arg:self arg:forward arguments arg arg Assign"
  },
  {
    "library": "django",
    "name": "get_ordering",
    "source_code": "def get_ordering(self):\n    return self.ordering",
    "docstring": "Return the field or fields to use for ordering the queryset.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\list.py",
    "ast_data": "FunctionDef name:get_ordering arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "store_output",
    "source_code": "def store_output(self, dst: ir.Buffer, src: ir.Buffer, orig_src: Optional[ir.Buffer]=None, epilogue_nodes: Optional[list[ir.IRNode]]=None, offsets: Optional[list[Any]]=None, reindexers: Optional[list[Optional[Callable[[list[Any]], list[Any]]]]]=None):\n    assert isinstance(dst, (ir.Buffer, ir.ReinterpretView))\n    assert dst.get_size() == src.get_size(), f'dst={dst!r}, src={src!r}'\n    if offsets:\n        offsets = parse_expr_with_index_symbols(offsets)\n    if epilogue_nodes:\n        with LocalBufferContext(self.args) as scope:\n            assert orig_src is not None\n            if orig_src.get_name() != src.get_name():\n                scope.add_local_buffer(src, [orig_src])\n                epilogue_nodes = scope.localize_nodes(epilogue_nodes)\n            return self.store_pointwise_nodes(dst, epilogue_nodes, offsets, reindexers)\n    elif dst.get_name() != src.get_name():\n        copy = L.copy(dst, src).data.data\n        with LocalBufferContext(self.args) as scope:\n            scope.add_local_buffer(src)\n            return self.store_pointwise_nodes(dst, [copy])\n    else:\n        assert dst.layout == src.layout, f'dst={dst!r}, src={src!r}'\n        return ''",
    "docstring": "Store the buffer to the buffer. The size of and should match. If is provided, the buffer is firstly computed with the epilogues before stored to . The are all pointwise. Notes: 1. and buffer could be the same buffer in which case we are doing in-place compute and stores. In case are not provided, we do nothing. 2. The , if exist, have computations on before storing to but since they come form the original Inductor IR, they might need to be adjusted before working with and as outlined below: a) or buffer could be a sub-slice of the ranges the work on. In this case, the could be provided to adjust the indices passed to during codegen and the data ranges are also configured according to the sizes of and . b) might be indexed in a different way as the , hence a is needed on the indices to to match the indexing of . c) If is local, we need to add a local buffer for it and localize the buffer in with .",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp_template_kernel.py",
    "ast_data": "FunctionDef name:store_output arg:self arg:dst arg:src arg:orig_src arg:epilogue_nodes arg:offsets arg:reindexers arguments arg arg arg arg arg arg arg Call Compare Call Call If Assign Call If With Call Compare If Compare Call Call Call Assign Call Return return:yes Call If Compare Call Call Assign Call With Call Call Return return:yes Call Compare Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "update",
    "source_code": "def update(self, parameters: Union[Mapping[str, Any], ParameterDict]) -> None:\n    if not isinstance(parameters, container_abcs.Iterable):\n        raise TypeError('ParametersDict.update should be called with an iterable of key/value pairs, but got ' + type(parameters).__name__)\n    if isinstance(parameters, (OrderedDict, ParameterDict)):\n        for key, parameter in parameters.items():\n            self[key] = parameter\n    elif isinstance(parameters, container_abcs.Mapping):\n        for key, parameter in sorted(parameters.items()):\n            self[key] = parameter\n    else:\n        for j, p in enumerate(parameters):\n            if not isinstance(p, container_abcs.Iterable):\n                raise TypeError('ParameterDict update sequence element #' + str(j) + ' should be Iterable; is' + type(p).__name__)\n            if not len(p) == 2:\n                raise ValueError('ParameterDict update sequence element #' + str(j) + ' has length ' + str(len(p)) + '; 2 is required')\n            self[p[0]] = p[1]",
    "docstring": "Update the :class: with key-value pairs from `parameters~torch.nn.ParameterDict~torch.nn.Parameter~torch.nn.Parameter`)",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\container.py",
    "ast_data": "FunctionDef name:update arg:self arg:parameters arguments arg arg If Call Raise Call Call If Call For Call Assign If Call For Call Call Assign For Call If Call Raise Call Call Call If Compare Call Raise Call Call Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "in_interval",
    "source_code": "def in_interval(self, start_us, end_us):\n    start_idx = bisect.bisect_left(self._start_nses, start_us * 1000)\n    end_idx = bisect.bisect_right(self._start_nses, end_us * 1000)\n    for i in range(start_idx, end_idx):\n        yield self._mem_records[self._indices[i]]",
    "docstring": "Return all records in the given interval To maintain backward compatibility, convert us to ns in function",
    "type": "method",
    "file_path": "pytorch\\torch\\autograd\\profiler_util.py",
    "ast_data": "FunctionDef name:in_interval arg:self arg:start_us arg:end_us arguments arg arg arg Assign Call Assign Call For Call"
  },
  {
    "library": "matplotlib",
    "name": "get_antialiased",
    "source_code": "def get_antialiased(self):\n    return self._antialiased",
    "docstring": "Return whether antialiasing is used for drawing.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_antialiased arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "Hunk",
    "source_code": "@dataclasses.dataclass(frozen=True)\nclass Hunk:\n    file: str\n    start: int\n    length: int\n    lines: list[str]\n\n    def added_lines(self) -> Generator[tuple[int, str], None, None]:\n        current_line_no = self.start\n        for line in self.lines:\n            if line.startswith('+'):\n                yield (current_line_no, line[1:])\n                current_line_no += 1\n            elif line.startswith('-'):\n                continue\n            else:\n                current_line_no += 1",
    "docstring": "Represents a hunk of a diff.",
    "type": "class",
    "file_path": "tensorflow\\third_party\\xla\\build_tools\\lint\\diff_parser.py",
    "ast_data": "ClassDef name:Hunk FunctionDef name:added_lines arg:self arguments arg Assign For If Call If Call Call"
  },
  {
    "library": "pytorch",
    "name": "fcn_resnet50",
    "source_code": "def fcn_resnet50(pretrained=False, progress=True, num_classes=21, aux_loss=None, **kwargs):\n    return _load_model('fcn', 'resnet50', pretrained, progress, num_classes, aux_loss, **kwargs)",
    "docstring": "Constructs a Fully-Convolutional Network model with a ResNet-50 backbone. Args: pretrained (bool): If True, returns a model pre-trained on COCO train2017 which contains the same classes as Pascal VOC progress (bool): If True, displays a progress bar of the download to stderr",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\functional_autograd_benchmark\\torchvision_models.py",
    "ast_data": "FunctionDef name:fcn_resnet50 arg:pretrained arg:progress arg:num_classes arg:aux_loss arguments arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "extract_model_metrics",
    "source_code": "def extract_model_metrics(model):\n    if getattr(model, '_compile_metrics', None):\n        return {m.name: m for m in model._compile_metric_functions}\n    return None",
    "docstring": "Convert metrics from a Keras model API to dictionary. This is used for converting Keras models to SavedModels. Args: model: A object. Returns: Dictionary mapping metric names to metric instances. May return if the model does not contain any metrics.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saving_utils.py",
    "ast_data": "FunctionDef name:extract_model_metrics arg:model arguments arg If Call Return return:yes Return return:no"
  },
  {
    "library": "pytorch",
    "name": "failures",
    "source_code": "@property\ndef failures(self) -> dict[int, WRAPPED_EXCEPTION]:\n    return self._failures",
    "docstring": "Return a dictionary mapping node ranks to their associated exceptions in case of failure.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\api.py",
    "ast_data": "FunctionDef name:failures arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_angle",
    "source_code": "def get_angle(self):\n    return self._angle",
    "docstring": "Return the angle of the ellipse.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_angle arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "InputDim",
    "source_code": "@dataclass\nclass InputDim(DimSpec):\n    input_dim: int",
    "docstring": "Output dimension maps directly to an input dimension.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_ops\\_view_ops.py",
    "ast_data": "ClassDef name:InputDim"
  },
  {
    "library": "tensorflow",
    "name": "_get_intermediates",
    "source_code": "def _get_intermediates(func_graph):\n    intermediates = []\n    for op in func_graph.get_operations():\n        for t in op.outputs:\n            if t in func_graph.inputs:\n                continue\n            if t in func_graph.outputs:\n                continue\n            if t.dtype is dtypes.resource:\n                continue\n            if op.type == 'MutexLock':\n                continue\n            intermediates.append(t)\n    return intermediates",
    "docstring": "Returns intermediate tensors of for gradient computation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\cond_v2.py",
    "ast_data": "FunctionDef name:_get_intermediates arg:func_graph arguments arg Assign For Call For If Compare If Compare If Compare If Compare Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "as_json_table_type",
    "source_code": "def as_json_table_type(x: DtypeObj) -> str:\n    if is_integer_dtype(x):\n        return 'integer'\n    elif is_bool_dtype(x):\n        return 'boolean'\n    elif is_numeric_dtype(x):\n        return 'number'\n    elif lib.is_np_dtype(x, 'M') or isinstance(x, (DatetimeTZDtype, PeriodDtype)):\n        return 'datetime'\n    elif lib.is_np_dtype(x, 'm'):\n        return 'duration'\n    elif isinstance(x, ExtensionDtype):\n        return 'any'\n    elif is_string_dtype(x):\n        return 'string'\n    else:\n        return 'any'",
    "docstring": "Convert a NumPy / pandas type to its corresponding json_table. Parameters ---------- x : np.dtype or ExtensionDtype Returns ------- str the Table Schema data types Notes ----- This table shows the relationship between NumPy / pandas dtypes, and Table Schema dtypes. ============== ================= Pandas type Table Schema type ============== ================= int64 integer float64 number bool boolean datetime64[ns] datetime timedelta64[ns] duration object str categorical any =============== =================",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\json\\_table_schema.py",
    "ast_data": "FunctionDef name:as_json_table_type arg:x arguments arg If Call Return return:yes If Call Return return:yes If Call Return return:yes If BoolOp Call Call Return return:yes If Call Return return:yes If Call Return return:yes If Call Return return:yes Return return:yes"
  },
  {
    "library": "scipy",
    "name": "fftshift",
    "source_code": "def fftshift(x, axes=None):\n    xp = array_namespace(x)\n    if hasattr(xp, 'fft'):\n        return xp.fft.fftshift(x, axes=axes)\n    x = np.asarray(x)\n    y = np.fft.fftshift(x, axes=axes)\n    return xp.asarray(y)",
    "docstring": "Shift the zero-frequency component to the center of the spectrum. This function swaps half-spaces for all axes listed (defaults to all). Note that `fftshift`. Examples -------- >>> import numpy as np >>> freqs = np.fft.fftfreq(10, 0.1) >>> freqs array([ 0., 1., 2., ..., -3., -2., -1.]) >>> np.fft.fftshift(freqs) array([-5., -4., -3., -2., -1., 0., 1., 2., 3., 4.]) Shift the zero-frequency component only along the second axis: >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3) >>> freqs array([[ 0., 1., 2.], [ 3., 4., -4.], [-3., -2., -1.]]) >>> np.fft.fftshift(freqs, axes=(1,)) array([[ 2., 0., 1.], [-4., 3., 4.], [-1., -3., -2.]])",
    "type": "function",
    "file_path": "scipy\\scipy\\fft\\_helper.py",
    "ast_data": "FunctionDef name:fftshift arg:x arg:axes arguments arg arg Assign Call If Call Return return:yes Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "cryptography",
    "name": "validate_for_algorithm",
    "source_code": "@abc.abstractmethod\ndef validate_for_algorithm(self, algorithm: CipherAlgorithm) -> None:\n    pass",
    "docstring": "Checks that all the necessary invariants of this (mode, algorithm) combination are met.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\ciphers\\modes.py",
    "ast_data": "FunctionDef name:validate_for_algorithm arg:self arg:algorithm arguments arg arg"
  },
  {
    "library": "scipy",
    "name": "_len_guards",
    "source_code": "def _len_guards(M):\n    if int(M) != M or M < 0:\n        raise ValueError('Window length M must be a non-negative integer')\n    return M <= 1",
    "docstring": "Handle small or incorrect window lengths",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\windows\\_windows.py",
    "ast_data": "FunctionDef name:_len_guards arg:M arguments arg If BoolOp Compare Call Compare Raise Call Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, call_collection, call_fn, name, input_signature):\n    self.call_collection = call_collection\n    self.input_signature = input_signature\n    self.wrapped_call = def_function.function(layer_call_wrapper(call_collection, call_fn, name), input_signature=input_signature)\n    self.original_layer_call = call_collection.layer_call_method",
    "docstring": "Initializes a LayerCall object. Args: call_collection: a LayerCallCollection, which contains the other layer call functions (e.g. call_with_conditional_losses, call). These functions should be traced with the same arguments. call_fn: A call function. name: Name of the call function. input_signature: Input signature of call_fn (can be None).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\save_impl.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:call_collection arg:call_fn arg:name arg:input_signature arguments arg arg arg arg arg Assign Assign Assign Call Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "_get_animated_artists",
    "source_code": "def _get_animated_artists(self):\n    return tuple((a for ax_ in self.ax.get_figure().get_axes() for a in ax_.get_children() if a.get_animated() and a not in self.artists))",
    "docstring": "Convenience method to get all animated artists of the figure containing this widget, excluding those already present in self.artists. The returned tuple is not sorted by 'z_order': z_order sorting is valid only when considering all artists and not only a subset of all artists.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:_get_animated_artists arg:self arguments arg Return return:yes Call Call Call Call BoolOp Call Compare"
  },
  {
    "library": "pytorch",
    "name": "mark_static_address",
    "source_code": "@forbid_in_graph\ndef mark_static_address(t, guard=True):\n    if not isinstance(t, torch.Tensor):\n        raise TypeError(f'mark_static_address expects a tensor but recieved {type(t)}')\n    if guard:\n        t._dynamo_static_input_type = 'guarded'\n    else:\n        t._dynamo_static_input_type = 'unguarded'",
    "docstring": "Marks an input tensor whose data_ptr will not change across multiple calls to a dynamo-compiled function. This indicates to cudagraphs that an extra allocation is not needed for this input. The data_ptr will be guarded if guard=True. Note: Tensors marked in this way will be kept alive until is called.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\decorators.py",
    "ast_data": "FunctionDef name:mark_static_address arg:t arg:guard arguments arg arg If Call Raise Call Call If Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "verify_loop_init_vars",
    "source_code": "def verify_loop_init_vars(init_vars, symbol_names, first_iter_vars=None, extra_message=None):\n    if not symbol_names:\n        return\n    if first_iter_vars is None:\n        first_iter_vars = (None,) * len(symbol_names)\n    assert len(symbol_names) == len(init_vars)\n    assert len(symbol_names) == len(first_iter_vars)\n    for name, val, fi_val in zip(symbol_names, init_vars, first_iter_vars):\n        if isinstance(val, variables.UndefinedReturnValue):\n            if fi_val:\n                raise ValueError('the return value from a TensorFlow loop may only be a {}; got {}'.format(LEGAL_LOOP_TYPES, type(fi_val)))\n            else:\n                raise NotImplementedError('a return statement cannot be placed inside this TensorFlow loop; this may happen if a return statement depends on a static Python condition such as a hyperparameter')\n        error_msg = None\n        if val is None:\n            error_msg = \"'{}' is not allowed to be None before the loop\".format(name)\n        elif isinstance(val, variables.Undefined):\n            error_msg = \"'{}' must be defined before the loop\".format(name)\n            if extra_message:\n                error_msg += '\\n' + extra_message\n        if error_msg is not None:\n            raise ValueError(error_msg)",
    "docstring": "Ensures that all values in the state are valid to use in a TF loop. The init_vars may contain placeholder values derived from first_iter_vars. Args: init_vars: initial loop variables (as taken before entering the loop) symbol_names: corresponding names of the initial loop variables first_iter_vars: loop variables after one iteration of the loop extra_message: an extra string to append to the error message, in case of \"undefined variable\" errors (see variables.Undefined)",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\control_flow.py",
    "ast_data": "FunctionDef name:verify_loop_init_vars arg:init_vars arg:symbol_names arg:first_iter_vars arg:extra_message arguments arg arg arg arg If Return return:no If Compare Assign Call Compare Call Call Compare Call Call For Call If Call If Raise Call Call Call Raise Call Assign If Compare Assign Call If Call Assign Call If If Compare Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "create_feed_dict_from_input_data",
    "source_code": "def create_feed_dict_from_input_data(input_data: RepresentativeSample, signature_def: meta_graph_pb2.SignatureDef) -> Mapping[str, np.ndarray]:\n    feed_dict = {}\n    for input_key, input_value in input_data.items():\n        input_tensor_name = signature_def.inputs[input_key].name\n        value = input_value\n        if isinstance(input_value, core.Tensor):\n            value = input_value.eval()\n        feed_dict[input_tensor_name] = value\n    return feed_dict",
    "docstring": "Constructs a feed_dict from input data. Note: This function should only be used in graph mode. This is a helper function that converts an 'input key -> input value' mapping to a feed dict. A feed dict is an 'input tensor name -> input value' mapping and can be directly passed to the argument of . Args: input_data: Input key -> input value mapping. The input keys should match the input keys of . signature_def: A SignatureDef representing the function that is an input to. Returns: Feed dict, which is intended to be used as input for . It is essentially a mapping: input tensor name -> input value. Note that the input value in the feed dict is not a .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\representative_dataset.py",
    "ast_data": "FunctionDef name:create_feed_dict_from_input_data arg:input_data arg:signature_def arguments arg arg Assign For Call Assign Assign If Call Assign Call Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "apply_transform",
    "source_code": "def apply_transform(self, input: Tensor, params: Dict[str, Tensor], flags: Dict[str, Any], transform: Optional[Tensor]=None) -> Tensor:\n    return self._fn(input=input, params=params, flags=flags, transform=transform)",
    "docstring": "Apply random gaussian gradient illumination to the input image.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\_2d\\intensity\\gaussian_illumination.py",
    "ast_data": "FunctionDef name:apply_transform arg:self arg:input arg:params arg:flags arg:transform arguments arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "filter_def_file",
    "source_code": "def filter_def_file(def_file: str, filter_file: str, filtered_file: str) -> None:\n    with open(filter_file, 'r', encoding='utf-8') as filter_file_handle:\n        filter_json: Dict[str, Any] = json.load(filter_file_handle)\n        inclusion_patterns: List[str] = filter_json['global'] + ['EXPORTS', '*;*']\n        incl_patterns: List[Pattern[str]] = [re.compile(re.escape(p).replace('\\\\*', '.*')) for p in inclusion_patterns]\n        exclusion_patterns: List[str] = filter_json['local']\n        excl_patterns: List[Pattern[str]] = [re.compile(re.escape(p).replace('\\\\*', '.*')) for p in exclusion_patterns]\n    with open(def_file, 'r') as orig_file, open(filtered_file, 'w') as filt_file:\n        for l in orig_file:\n            if not matches_any(excl_patterns, l) or matches_any(incl_patterns, l):\n                filt_file.write(l)",
    "docstring": "Filters a windows .def file based on a filter .json. Args: def_file: The path to the input windows .def file. filter_file: The path to the filter file (JSON format). filtered_file: The path to the output filtered windows .def file.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\third_party\\py\\rules_pywrap\\def_file_filter_tool.py",
    "ast_data": "FunctionDef name:filter_def_file arg:def_file arg:filter_file arg:filtered_file arguments arg arg arg With Call Call Call Call Call Call Call Call With Call Call For If BoolOp Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_unflatten_first_dim",
    "source_code": "def _unflatten_first_dim(x, first_dim):\n    old_shape = array_ops.shape(x)\n    first_dim = math_ops.cast(first_dim, old_shape.dtype)\n    second_dim = constant_op.constant([-1], dtype=old_shape.dtype)\n    new_shape = array_ops.concat([first_dim, second_dim, old_shape[1:]], axis=0)\n    return array_ops.reshape(x, new_shape)",
    "docstring": "Splits first dimension into [first_dim, -1].",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "FunctionDef name:_unflatten_first_dim arg:x arg:first_dim arguments arg arg Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "wait",
    "source_code": "def wait(self, state, interval=0.1, channel=None):\n    if isinstance(state, (tuple, list)):\n        if self.state not in state:\n            events = tuple([self._get_state_event(s) for s in state])\n            win32event.WaitForMultipleObjects(events, 0, win32event.INFINITE)\n    elif self.state != state:\n        event = self._get_state_event(state)\n        win32event.WaitForSingleObject(event, win32event.INFINITE)",
    "docstring": "Wait for the given state(s), KeyboardInterrupt or SystemExit. Since this class uses native win32event objects, the interval argument is ignored.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\win32.py",
    "ast_data": "FunctionDef name:wait arg:self arg:state arg:interval arg:channel arguments arg arg arg arg If Call If Compare Assign Call Call Call If Compare Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "sess_str",
    "source_code": "@property\ndef sess_str(self):\n    raise NotImplementedError('sess_str')",
    "docstring": "The TensorFlow process to which this session will connect.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\session.py",
    "ast_data": "FunctionDef name:sess_str arg:self arguments arg Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "rowspan",
    "source_code": "@property\ndef rowspan(self):\n    ncols = self.get_gridspec().ncols\n    return range(self.num1 // ncols, self.num2 // ncols + 1)",
    "docstring": "The rows spanned by this subplot, as a object.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\gridspec.py",
    "ast_data": "FunctionDef name:rowspan arg:self arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_RandomGenerator",
    "source_code": "class _RandomGenerator:\n\n    def __init__(self, seed=None):\n        super(_RandomGenerator, self).__init__()\n        if seed is not None:\n            self.seed = [seed, 0]\n        else:\n            self.seed = None\n\n    def random_normal(self, shape, mean=0.0, stddev=1, dtype=dtypes.float32):\n        if self.seed:\n            op = stateless_random_ops.stateless_random_normal\n        else:\n            op = random_ops.random_normal\n        return op(shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=self.seed)\n\n    def random_uniform(self, shape, minval, maxval, dtype):\n        if self.seed:\n            op = stateless_random_ops.stateless_random_uniform\n        else:\n            op = random_ops.random_uniform\n        return op(shape=shape, minval=minval, maxval=maxval, dtype=dtype, seed=self.seed)\n\n    def truncated_normal(self, shape, mean, stddev, dtype):\n        if self.seed:\n            op = stateless_random_ops.stateless_truncated_normal\n        else:\n            op = random_ops.truncated_normal\n        return op(shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=self.seed)",
    "docstring": "Random generator that selects appropriate random ops.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops_v2.py",
    "ast_data": "ClassDef name:_RandomGenerator FunctionDef name:__init__ arg:self arg:seed arguments arg arg Call Call If Compare Assign Assign FunctionDef name:random_normal arg:self arg:shape arg:mean arg:stddev arg:dtype arguments arg arg arg arg arg If Assign Assign Return return:yes Call FunctionDef name:random_uniform arg:self arg:shape arg:minval arg:maxval arg:dtype arguments arg arg arg arg arg If Assign Assign Return return:yes Call FunctionDef name:truncated_normal arg:self arg:shape arg:mean arg:stddev arg:dtype arguments arg arg arg arg arg If Assign Assign Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "from_tensor",
    "source_code": "@classmethod\ndef from_tensor(cls, boxes: torch.Tensor | list[torch.Tensor], mode: str='xyxy', validate_boxes: bool=True) -> Boxes:\n    quadrilaterals: torch.Tensor | list[torch.Tensor]\n    if isinstance(boxes, torch.Tensor):\n        quadrilaterals = _boxes_to_quadrilaterals(boxes, mode=mode, validate_boxes=validate_boxes)\n    else:\n        quadrilaterals = [_boxes_to_quadrilaterals(box, mode, validate_boxes) for box in boxes]\n    return cls(quadrilaterals, False, mode)",
    "docstring": "Create :class: from boxes stored in another format. Args: boxes: 2D boxes, shape of :math:, :math:, :math: or :math:. mode: The format in which the boxes are provided. * 'xyxy': boxes are assumed to be in the format `(N, 4)(B, N, 4)(N, 4)(B, N, 4)(N, 4)(B, N, 4)(N, 4, 2)(B, N, 4, 2)(N, 4, 2)(B, N, 4, 2)Boxesboxes`. Examples: >>> boxes_xyxy = torch.as_tensor([[0, 3, 1, 4], [5, 1, 8, 4]]) >>> boxes = Boxes.from_tensor(boxes_xyxy, mode='xyxy') >>> boxes.data # (2, 4, 2) tensor([[[0., 3.], [0., 3.], [0., 3.], [0., 3.]], [[5., 1.], [7., 1.], [7., 3.], [5., 3.]]])",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\boxes.py",
    "ast_data": "FunctionDef name:from_tensor arg:cls arg:boxes arg:mode arg:validate_boxes arguments arg arg arg arg If Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "head",
    "source_code": "@final\ndef head(self, n: int=5) -> Self:\n    return self.iloc[:n].copy()",
    "docstring": "Return the first rows. This function exhibits the same behavior as `nnnn` >>> df.head(-3) animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:head arg:self arg:n arguments arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "gaussian",
    "source_code": "def gaussian(window_size: int, sigma: Tensor | float, *, mean: Optional[Union[Tensor, float]]=None, device: Optional[Device]=None, dtype: Optional[Dtype]=None) -> Tensor:\n    if isinstance(sigma, float):\n        sigma = tensor([[sigma]], device=device, dtype=dtype)\n    KORNIA_CHECK_IS_TENSOR(sigma)\n    KORNIA_CHECK_SHAPE(sigma, ['B', '1'])\n    batch_size = sigma.shape[0]\n    mean = float(window_size // 2) if mean is None else mean\n    if isinstance(mean, float):\n        mean = tensor([[mean]], device=sigma.device, dtype=sigma.dtype)\n    KORNIA_CHECK_IS_TENSOR(mean)\n    KORNIA_CHECK_SHAPE(mean, ['B', '1'])\n    x = (torch.arange(window_size, device=sigma.device, dtype=sigma.dtype) - mean).expand(batch_size, -1)\n    if window_size % 2 == 0:\n        x = x + 0.5\n    gauss = torch.exp(-x.pow(2.0) / (2 * sigma.pow(2.0)))\n    return gauss / gauss.sum(-1, keepdim=True)",
    "docstring": "Compute the gaussian values based on the window and sigma values. Args: window_size: the size which drives the filter amount. sigma: gaussian standard deviation. If a tensor, should be in a shape :math: mean: Mean of the Gaussian function (center). If not provided, it defaults to window_size // 2. If a tensor, should be in a shape :math: device: This value will be used if sigma is a float. Device desired to compute. dtype: This value will be used if sigma is a float. Dtype desired for compute. Returns: A tensor withshape :math:, with Gaussian values.",
    "type": "function",
    "file_path": "kornia\\kornia\\filters\\kernels.py",
    "ast_data": "FunctionDef name:gaussian arg:window_size arg:sigma arguments arg arg arg arg arg If Call Assign Call Call Call Assign Assign Compare Call If Call Assign Call Call Call Assign Call Call If Compare Assign Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "benchmark_gpu",
    "source_code": "@time_and_count\ndef benchmark_gpu(self: Self, _callable: Callable[[], Any], estimation_iters: int=5, memory_warmup_iters: int=100, benchmark_iters: int=100, max_benchmark_duration: int=25, **kwargs: Any) -> float:\n    torch.cuda.synchronize()\n    _callable()\n    torch.cuda.synchronize()\n    buffer = torch.empty(self.L2_cache_size // 4, dtype=torch.int, device='cuda')\n    buffer.zero_()\n    event_pairs = self.get_event_pairs(estimation_iters)\n    for start_event, end_event in event_pairs:\n        buffer.zero_()\n        start_event.record()\n        _callable()\n        end_event.record()\n    torch.cuda.synchronize()\n    estimated_timing = self.get_event_pairs_min_timing(event_pairs)\n    benchmark_iters = max(min(benchmark_iters, int(max_benchmark_duration // estimated_timing)), 1)\n    for _ in range(memory_warmup_iters):\n        buffer.zero_()\n    event_pairs = self.get_event_pairs(benchmark_iters)\n    for start_event, end_event in event_pairs:\n        buffer.zero_()\n        start_event.record()\n        _callable()\n        end_event.record()\n    torch.cuda.synchronize()\n    benchmarked_timing = self.get_event_pairs_min_timing(event_pairs)\n    del buffer\n    return min(estimated_timing, benchmarked_timing)",
    "docstring": "Benchmark a GPU callable using a custom benchmarking implementation. Arguments: - _callable: The callable to benchmark. Keyword Arguments: - estimation_iters: Optionally, the number of iterations to run during runtime estimation. - memory_warmup_iters: Optionally, the number of iterations to flush the L2 cache before starting benchmarking. - benchmark_iters: Optionally, the number of iterations to run during the benchmarking. - max_benchmark_duration: Optionally, the maximum duration of the benchmarking, in milliseconds. An estimated duration is calculated based on the values of and , along with the estimated runtime of and various other factors, and we then shrink to fit in the alloted maximum duration. - **kwargs: Additional kwargs that may be passed to the fallback. Returns: - The minimum runtime of , in milliseconds.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\benchmarking.py",
    "ast_data": "FunctionDef name:benchmark_gpu arg:self arg:_callable arg:estimation_iters arg:memory_warmup_iters arg:benchmark_iters arg:max_benchmark_duration arguments arg arg arg arg arg arg arg Call Call Call Assign Call Call Assign Call For Call Call Call Call Call Assign Call Assign Call Call Call For Call Call Assign Call For Call Call Call Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_init_with_copy",
    "source_code": "def _init_with_copy(self, other: LoopBody, args):\n    indexing_exprs = other.indexing_from_args(args)\n    self.indexing_exprs = {name: V.graph.sizevars.simplify_with_ranges(expr, self.var_ranges) for name, expr in indexing_exprs.items()}\n    self.subblocks = {k: v.clone(self) for k, v in other.subblocks.items()}\n    self.indirect_vars = other.indirect_vars\n    self.indirect_var_ranges = other.indirect_var_ranges\n    self.memory_usage = other.memory_usage\n    self.op_counts = other.op_counts\n    self.root_block = other.root_block.clone(self)\n    submodules = {**other.submodules}\n    submodules.pop('get_index')\n    self.submodules = {'get_index': self.get_index, **{k: v.clone(self) for k, v in submodules.items()}}",
    "docstring": "_init_with_tracing() is slow, so this is a fast path in the case where we are just reordering/merging/splitting the args of an existing LoopBody.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\loop_body.py",
    "ast_data": "FunctionDef name:_init_with_copy arg:self arg:other arg:args arguments arg arg arg Assign Call Assign Call Call Assign Call Call Assign Assign Assign Assign Assign Call Assign Call Assign Call Call"
  },
  {
    "library": "pandas",
    "name": "_where",
    "source_code": "def _where(self, mask: npt.NDArray[np.bool_], value) -> Self:\n    result = self.copy()\n    if is_list_like(value):\n        val = value[~mask]\n    else:\n        val = value\n    result[~mask] = val\n    return result",
    "docstring": "Analogue to np.where(mask, self, value) Parameters ---------- mask : np.ndarray[bool] value : scalar or listlike Returns ------- same type as self",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\base.py",
    "ast_data": "FunctionDef name:_where arg:self arg:mask arg:value arguments arg arg arg Assign Call If Call Assign Assign Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "ker_zeros",
    "source_code": "def ker_zeros(nt):\n    if not isscalar(nt) or floor(nt) != nt or nt <= 0:\n        raise ValueError('nt must be positive integer scalar.')\n    return _specfun.klvnzo(nt, 3)",
    "docstring": "Compute nt zeros of the Kelvin function ker. Parameters ---------- nt : int Number of zeros to compute. Must be positive. Returns ------- ndarray First zeros of the Kelvin function. See Also -------- ker References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special Functions\", John Wiley and Sons, 1996.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_basic.py",
    "ast_data": "FunctionDef name:ker_zeros arg:nt arguments arg If BoolOp Call Compare Call Compare Raise Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "create_pytorch_op_test_case",
    "source_code": "def create_pytorch_op_test_case(op_bench, test_config):\n    test_case = PyTorchOperatorTestCase(op_bench, test_config)\n    test_config = test_case.test_config\n    op = test_case.op_bench\n    func_name = f'{op.module_name()}{test_case.framework}{str(test_config)}'\n    return (func_name, test_case)",
    "docstring": "This method is used to generate est. func_name is a global unique string. For PyTorch add operator with M=8, N=2, K=1, tag = long, here are the values for the members in test_case: op.module_name: add framework: PyTorch test_config: TestConfig(test_name='add_M8_N2_K1', input_config='M: 8, N: 2, K: 1', tag='long', run_backward=False) func_name: addPyTorchTestConfig(test_name='add_M8_N2_K1', input_config='M: 8, N: 2, K: 1', tag='long', run_backward=False)",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\operator_benchmark\\benchmark_pytorch.py",
    "ast_data": "FunctionDef name:create_pytorch_op_test_case arg:op_bench arg:test_config arguments arg arg Assign Call Assign Assign Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_const_dim_count",
    "source_code": "def get_const_dim_count(node_def):\n    const_value = values_from_const(node_def)\n    return const_value.ndim",
    "docstring": "Get the number of dimensions for a Const node. Args: node_def: Const NodeDef. Returns: Number of dimensions for the Const node.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\optimize_for_inference_lib.py",
    "ast_data": "FunctionDef name:get_const_dim_count arg:node_def arguments arg Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "sort_tensors_and_ops",
    "source_code": "def sort_tensors_and_ops(graph):\n    graph_wrapper = collections.namedtuple('GraphWrapper', ['graph', 'operations', 'op_to_idx', 'tensors', 'tensor_to_idx', 'contains_cycle', 'topological_order_or_cycle'])\n    contains_cycle, topological_order_or_cycle = topological_sort(graph)\n    if not contains_cycle:\n        operations = topological_order_or_cycle\n    else:\n        operations = graph.get_operations()\n    op_to_idx = {op.name: index for index, op in enumerate(operations)}\n    tensors = []\n    for op in operations:\n        tensors.extend(op.outputs)\n    tensor_to_idx = {tensor.name: index for index, tensor in enumerate(tensors)}\n    return graph_wrapper(graph=graph, operations=operations, op_to_idx=op_to_idx, tensors=tensors, tensor_to_idx=tensor_to_idx, contains_cycle=contains_cycle, topological_order_or_cycle=topological_order_or_cycle)",
    "docstring": "Returns a wrapper that has consistent tensor and op orders.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer_report.py",
    "ast_data": "FunctionDef name:sort_tensors_and_ops arg:graph arguments arg Assign Call Assign Call If Assign Assign Call Assign Call Assign For Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "add_images",
    "source_code": "def add_images(self, tag, img_tensor, global_step=None, walltime=None, dataformats='NCHW'):\n    torch._C._log_api_usage_once('tensorboard.logging.add_images')\n    self._get_file_writer().add_summary(image(tag, img_tensor, dataformats=dataformats), global_step, walltime)",
    "docstring": "Add batched image data to summary. Note that this requires the `(N, 3, H, W)` is specified, other shape will be accepted. e.g. NCHW or NHWC. Examples:: from torch.utils.tensorboard import SummaryWriter import numpy as np img_batch = np.zeros((16, 3, 100, 100)) for i in range(16): img_batch[i, 0] = np.arange(0, 10000).reshape(100, 100) / 10000 / 16 * i img_batch[i, 1] = (1 - np.arange(0, 10000).reshape(100, 100) / 10000) / 16 * i writer = SummaryWriter() writer.add_images('my_image_batch', img_batch, 0) writer.close() Expected result: .. image:: _static/img/tensorboard/add_images.png :scale: 30 %",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\tensorboard\\writer.py",
    "ast_data": "FunctionDef name:add_images arg:self arg:tag arg:img_tensor arg:global_step arg:walltime arg:dataformats arguments arg arg arg arg arg arg Call Call Call Call"
  },
  {
    "library": "numpy",
    "name": "addIgnorableRoutine",
    "source_code": "def addIgnorableRoutine(self, rname):\n    rname = rname.lower()\n    routine = UnknownFortranRoutine(rname)\n    self.names_to_routines[rname] = routine",
    "docstring": "Add a routine that we don't want to consider when looking at dependencies.",
    "type": "method",
    "file_path": "numpy\\numpy\\linalg\\lapack_lite\\make_lite.py",
    "ast_data": "FunctionDef name:addIgnorableRoutine arg:self arg:rname arguments arg arg Assign Call Assign Call Assign"
  },
  {
    "library": "pytorch",
    "name": "max_memory_cached",
    "source_code": "@deprecated('`torch.cuda.max_memory_cached` has been renamed to `torch.cuda.max_memory_reserved`', category=FutureWarning)\ndef max_memory_cached(device: 'Device'=None) -> int:\n    return max_memory_reserved(device=device)",
    "docstring": "Deprecated; see :func:.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\memory.py",
    "ast_data": "FunctionDef name:max_memory_cached arg:device arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "is_aat_form",
    "source_code": "def is_aat_form(operators):\n    operators = list(operators)\n    if not operators:\n        raise ValueError('AAT form is undefined for empty operators')\n    if len(operators) % 2:\n        return False\n    return all((is_adjoint_pair(operators[i], operators[-1 - i]) for i in range(len(operators) // 2)))",
    "docstring": "Returns True if operators is of the form A @ A.H, possibly recursively.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_util.py",
    "ast_data": "FunctionDef name:is_aat_form arg:operators arguments arg Assign Call If Raise Call If Call Return return:yes Return return:yes Call Call Call Call"
  },
  {
    "library": "numpy",
    "name": "lagval2d",
    "source_code": "def lagval2d(x, y, c):\n    return pu._valnd(lagval, c, x, y)",
    "docstring": "Evaluate a 2-D Laguerre series at points (x, y). This function returns the values: .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * L_i(x) * L_j(y) The parameters and are converted to arrays only if they are tuples or a lists, otherwise they are treated as a scalars and they must have the same shape after conversion. In either case, either and or their elements must support multiplication and addition both with themselves and with the elements of . If is a 1-D array a one is implicitly appended to its shape to make it 2-D. The shape of the result will be c.shape[2:] + x.shape. Parameters ---------- x, y : array_like, compatible objects The two dimensional series is evaluated at the points `xyxycxy`. See Also -------- lagval, laggrid2d, lagval3d, laggrid3d Examples -------- >>> from numpy.polynomial.laguerre import lagval2d >>> c = [[1, 2],[3, 4]] >>> lagval2d(1, 1, c) 1.0",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\laguerre.py",
    "ast_data": "FunctionDef name:lagval2d arg:x arg:y arg:c arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "__setitem__",
    "source_code": "def __setitem__(self, key, value) -> None:\n    raise NotImplementedError(f'{type(self)} does not implement __setitem__.')",
    "docstring": "Set one or more values inplace. This method is not required to satisfy the pandas extension array interface. Parameters ---------- key : int, ndarray, or slice When called from, e.g. ``. Returns ------- None",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\base.py",
    "ast_data": "FunctionDef name:__setitem__ arg:self arg:key arg:value arguments arg arg arg Raise Call Call"
  },
  {
    "library": "scipy",
    "name": "f_oneway",
    "source_code": "def f_oneway(*args):\n    data = argstoarray(*args)\n    ngroups = len(data)\n    ntot = data.count()\n    sstot = (data ** 2).sum() - data.sum() ** 2 / float(ntot)\n    ssbg = (data.count(-1) * (data.mean(-1) - data.mean()) ** 2).sum()\n    sswg = sstot - ssbg\n    dfbg = ngroups - 1\n    dfwg = ntot - ngroups\n    msb = ssbg / float(dfbg)\n    msw = sswg / float(dfwg)\n    f = msb / msw\n    prob = special.fdtrc(dfbg, dfwg, f)\n    return F_onewayResult(f, prob)",
    "docstring": "Performs a 1-way ANOVA, returning an F-value and probability given any number of groups. From Heiman, pp.394-7. Usage: `` is 2 or more arrays, one per treatment group. Returns ------- statistic : float The computed F-value of the test. pvalue : float The associated p-value from the F-distribution.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mstats_basic.py",
    "ast_data": "FunctionDef name:f_oneway arguments arg Assign Call Assign Call Assign Call Assign Call Call Call Assign Call Call Call Call Assign Assign Assign Assign Call Assign Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "reduction",
    "source_code": "def reduction(size_hints, reduction_hint=False, triton_meta=None, filename=None, inductor_meta=None):\n    inductor_meta = {} if inductor_meta is None else inductor_meta\n    inductor_meta['reduction_hint'] = reduction_hint\n    if inductor_meta.get('no_x_dim'):\n        size_hints['x'] = 1\n    assert triton_meta is not None\n    configs = _reduction_configs(size_hints=size_hints, inductor_meta=inductor_meta)\n    return cached_autotune(size_hints, configs=configs, triton_meta=triton_meta, inductor_meta=inductor_meta, heuristic_type=HeuristicType.REDUCTION, filename=filename)",
    "docstring": "args to @triton.heuristics()",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\triton_heuristics.py",
    "ast_data": "FunctionDef name:reduction arg:size_hints arg:reduction_hint arg:triton_meta arg:filename arg:inductor_meta arguments arg arg arg arg arg Assign Compare Assign If Call Assign Compare Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_bin_data",
    "source_code": "def _bin_data(self, X, is_training_data):\n    description = 'training' if is_training_data else 'validation'\n    if self.verbose:\n        print('Binning {:.3f} GB of {} data: '.format(X.nbytes / 1000000000.0, description), end='', flush=True)\n    tic = time()\n    if is_training_data:\n        X_binned = self._bin_mapper.fit_transform(X)\n    else:\n        X_binned = self._bin_mapper.transform(X)\n        X_binned = np.ascontiguousarray(X_binned)\n    toc = time()\n    if self.verbose:\n        duration = toc - tic\n        print('{:.3f} s'.format(duration))\n    return X_binned",
    "docstring": "Bin data X. If is_training_data, then fit the _bin_mapper attribute. Else, the binned data is converted to a C-contiguous array.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\gradient_boosting.py",
    "ast_data": "FunctionDef name:_bin_data arg:self arg:X arg:is_training_data arguments arg arg arg Assign If Call Call Assign Call If Assign Call Assign Call Assign Call Assign Call If Assign Call Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "getannotations",
    "source_code": "def getannotations(obj: Any) -> Mapping[str, Any]:\n    __annotations__ = safe_getattr(obj, '__annotations__', None)\n    if isinstance(__annotations__, Mapping):\n        return __annotations__\n    return {}",
    "docstring": "Safely get the `` attribute of an object.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\inspect.py",
    "ast_data": "FunctionDef name:getannotations arg:obj arguments arg Assign Call If Call Return return:yes Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "var",
    "source_code": "@doc_controls.do_not_generate_docs\ndef var(x, axis=None, keepdims=False):\n    if x.dtype.base_dtype == dtypes_module.bool:\n        x = math_ops.cast(x, floatx())\n    return math_ops.reduce_variance(x, axis=axis, keepdims=keepdims)",
    "docstring": "Variance of a tensor, alongside the specified axis. Args: x: A tensor or variable. axis: An integer, the axis to compute the variance. keepdims: A boolean, whether to keep the dimensions or not. If is , the rank of the tensor is reduced by 1. If is , the reduced dimension is retained with length 1. Returns: A tensor with the variance of elements of .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:var arg:x arg:axis arg:keepdims arguments arg arg arg If Compare Assign Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "RollingGroupby",
    "source_code": "class RollingGroupby(BaseWindowGroupby, Rolling):\n    _attributes = Rolling._attributes + BaseWindowGroupby._attributes\n\n    def _get_window_indexer(self) -> GroupbyIndexer:\n        rolling_indexer: type[BaseIndexer]\n        indexer_kwargs: dict[str, Any] | None = None\n        index_array = self._index_array\n        if isinstance(self.window, BaseIndexer):\n            rolling_indexer = type(self.window)\n            indexer_kwargs = self.window.__dict__.copy()\n            assert isinstance(indexer_kwargs, dict)\n            indexer_kwargs.pop('index_array', None)\n            window = self.window\n        elif self._win_freq_i8 is not None:\n            rolling_indexer = VariableWindowIndexer\n            window = self._win_freq_i8\n        else:\n            rolling_indexer = FixedWindowIndexer\n            window = self.window\n        window_indexer = GroupbyIndexer(index_array=index_array, window_size=window, groupby_indices=self._grouper.indices, window_indexer=rolling_indexer, indexer_kwargs=indexer_kwargs)\n        return window_indexer\n\n    def _validate_datetimelike_monotonic(self) -> None:\n        if self._on.hasnans:\n            self._raise_monotonic_error('values must not have NaT')\n        for group_indices in self._grouper.indices.values():\n            group_on = self._on.take(group_indices)\n            if not (group_on.is_monotonic_increasing or group_on.is_monotonic_decreasing):\n                on = 'index' if self.on is None else self.on\n                raise ValueError(f'Each group within {on} must be monotonic. Sort the values in {on} first.')",
    "docstring": "Provide a rolling groupby implementation.",
    "type": "class",
    "file_path": "pandas\\pandas\\core\\window\\rolling.py",
    "ast_data": "ClassDef name:RollingGroupby Assign FunctionDef name:_get_window_indexer arg:self arguments arg Assign If Call Assign Call Assign Call Call Call Assign If Compare Assign Assign Assign Assign Assign Call Return return:yes FunctionDef name:_validate_datetimelike_monotonic arg:self arguments arg If Call For Call Assign Call If BoolOp Assign Compare Raise Call"
  },
  {
    "library": "pandas",
    "name": "_gotitem",
    "source_code": "def _gotitem(self, key, ndim, subset=None) -> Self:\n    return self",
    "docstring": "Sub-classes to define. Return a sliced object. Parameters ---------- key : string / list of selections ndim : {1, 2} Requested ndim of result. subset : object, default None Subset to act on.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:_gotitem arg:self arg:key arg:ndim arg:subset arguments arg arg arg arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "q",
    "source_code": "@property\ndef q(self) -> Tensor:\n    return self.data",
    "docstring": "Return the underlying data with shape :math:. Alias for :func:",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\quaternion.py",
    "ast_data": "FunctionDef name:q arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "reset",
    "source_code": "def reset(self, context):\n    context.render_context[self] = itertools_cycle(self.cyclevars)",
    "docstring": "Reset the cycle iteration back to the beginning.",
    "type": "method",
    "file_path": "django\\django\\template\\defaulttags.py",
    "ast_data": "FunctionDef name:reset arg:self arg:context arguments arg arg Assign Call"
  },
  {
    "library": "scipy",
    "name": "Ursem01",
    "source_code": "class Ursem01(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = [(-2.5, 3.0), (-2.0, 2.0)]\n        self.global_optimum = [[1.69714, 0.0]]\n        self.fglob = -4.81681406371\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return -sin(2 * x[0] - 0.5 * pi) - 3.0 * cos(x[1]) - 0.5 * x[0]",
    "docstring": "Ursem 1 objective function. This class defines the Ursem 1 [1]_ global optimization problem. This is a unimodal minimization problem defined as follows: .. math:: f_{\\text{Ursem01}}(x) = - \\sin(2x_1 - 0.5 \\pi) - 3 \\cos(x_2) - 0.5 x_1 with :math: and :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_U.py",
    "ast_data": "ClassDef name:Ursem01 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "orientation_diff",
    "source_code": "def orientation_diff(o1: Tensor, o2: Tensor) -> Tensor:\n    diff = o2 - o1\n    diff[diff < -180] += 360\n    diff[diff >= 180] -= 360\n    return diff",
    "docstring": "Orientation difference between two tensors.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\adalam\\utils.py",
    "ast_data": "FunctionDef name:orientation_diff arg:o1 arg:o2 arguments arg arg Assign Compare Compare Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_tightbbox",
    "source_code": "def get_tightbbox(self, renderer=None):\n    bbox = self.get_window_extent(renderer)\n    if self.get_clip_on():\n        clip_box = self.get_clip_box()\n        if clip_box is not None:\n            bbox = Bbox.intersection(bbox, clip_box)\n        clip_path = self.get_clip_path()\n        if clip_path is not None and bbox is not None:\n            clip_path = clip_path.get_fully_transformed_path()\n            bbox = Bbox.intersection(bbox, clip_path.get_extents())\n    return bbox",
    "docstring": "Like , but includes any clipping. Parameters ---------- renderer : subclass, optional renderer that will be used to draw the figures (i.e. `.Bbox` or None The enclosing bounding box (in figure pixel coordinates). Returns None if clipping results in no intersection.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:get_tightbbox arg:self arg:renderer arguments arg arg Assign Call If Call Assign Call If Compare Assign Call Assign Call If BoolOp Compare Compare Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "load_data",
    "source_code": "@memory.cache\ndef load_data(dtype=np.float32, order='C', shuffle=True, seed=0):\n    print('Loading dataset...')\n    data = fetch_openml('mnist_784', as_frame=True)\n    X = check_array(data['data'], dtype=dtype, order=order)\n    y = data['target']\n    if shuffle:\n        X, y = _shuffle(X, y, random_state=seed)\n    X /= 255\n    return (X, y)",
    "docstring": "Load the data, then cache and memmap the train/test split",
    "type": "function",
    "file_path": "scikit-learn\\benchmarks\\bench_tsne_mnist.py",
    "ast_data": "FunctionDef name:load_data arg:dtype arg:order arg:shuffle arg:seed arguments arg arg arg arg Call Assign Call Assign Call Assign If Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_check_trace_files",
    "source_code": "def _check_trace_files(self):\n    if not self._parameters.trace_dir:\n        return\n    if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_SUMMARY:\n        return\n    if not gfile.Exists(self._parameters.trace_dir):\n        file_io.recursive_create_dir(self._parameters.trace_dir)\n        if not gfile.Exists(self._parameters.trace_dir):\n            raise RuntimeError('Failed to create trace directory at %s' % self._parameters.trace_dir)",
    "docstring": "Checks if any requirements for trace files are satisfied.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:_check_trace_files arg:self arguments arg If Return return:no If Compare Return return:no If Call Call If Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "is_inside_loop",
    "source_code": "@property\ndef is_inside_loop(self) -> bool:\n    return self._is_inside_loop",
    "docstring": "Returns true if the while_loop was created inside the pfor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "FunctionDef name:is_inside_loop arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "print_outlier_ratio",
    "source_code": "def print_outlier_ratio(y):\n    uniq, cnt = np.unique(y, return_counts=True)\n    print('----- Target count values: ')\n    for u, c in zip(uniq, cnt):\n        print('------ %s -> %d occurrences' % (str(u), c))\n    print('----- Outlier ratio: %.5f' % (np.min(cnt) / len(y)))",
    "docstring": "Helper function to show the distinct value count of element in the target. Useful indicator for the datasets used in bench_isolation_forest.py.",
    "type": "function",
    "file_path": "scikit-learn\\benchmarks\\bench_isolation_forest.py",
    "ast_data": "FunctionDef name:print_outlier_ratio arg:y arguments arg Assign Call Call For Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "called_with_tracing",
    "source_code": "def called_with_tracing(self, function_name, omit_warning):\n    self._call_count += 1\n    self._calls_per_tracings.append(1)\n    while self._calls_per_tracings:\n        if self._call_count - self._calls_per_tracings[0] > FREQUENT_TRACING_WARNING_MAX_CALL_HISTORY:\n            self._call_count -= self._calls_per_tracings.pop(0)\n        else:\n            break\n    if omit_warning or self._total_warning_count >= FREQUENT_TRACING_WARNING_MAX_WARNING_PER_DETECTOR:\n        return\n    if len(self._calls_per_tracings) >= FREQUENT_TRACING_WARNING_THRESHOLD:\n        self._total_warning_count += 1\n        logging.warning('{} out of the last {} calls to {} triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has reduce_retracing=True option that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/guide/function#controlling_retracing and https://www.tensorflow.org/api_docs/python/tf/function for  more details.'.format(len(self._calls_per_tracings), self._call_count, function_name))",
    "docstring": "Updates the list of most recent calls' tracing information. Warns the user when recent calls caused retracing too often. Args: function_name: the python function being traced. omit_warning: If 'True', this call will not warn the user even if retracing happens too often.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\polymorphic_function.py",
    "ast_data": "FunctionDef name:called_with_tracing arg:self arg:function_name arg:omit_warning arguments arg arg arg Call While If Compare Call If BoolOp Compare Return return:no If Compare Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_Consumers",
    "source_code": "def _Consumers(t, func_graphs):\n    consumers = t.consumers()\n    for func in func_graphs:\n        for input_t, placeholder in _Captures(func):\n            if input_t is t:\n                consumers.extend(_Consumers(placeholder, func_graphs))\n    return consumers",
    "docstring": "Returns the consumers of t, crossing closure boundaries where necessary. Args: t: Tensor func_graphs: a list of FuncGraphs that may have captured t. Returns: A list of tensors. The tensors will be from the current graph and/or func_graphs.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\gradients_util.py",
    "ast_data": "FunctionDef name:_Consumers arg:t arg:func_graphs arguments arg arg Assign Call For For Call If Compare Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_from_mgr",
    "source_code": "@final\n@classmethod\ndef _from_mgr(cls, mgr: Manager, axes: list[Index]) -> Self:\n    obj = cls.__new__(cls)\n    NDFrame.__init__(obj, mgr)\n    return obj",
    "docstring": "Construct a new object of this type from a Manager object and axes. Parameters ---------- mgr : Manager Must have the same ndim as cls. axes : list[Index] Notes ----- The axes must match mgr.axes, but are required for future-proofing in the event that axes are refactored out of the Manager objects.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:_from_mgr arg:cls arg:mgr arg:axes arguments arg arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "call_for_each_replica",
    "source_code": "def call_for_each_replica(self, fn, args=(), kwargs=None):\n    _require_cross_replica_or_default_context_extended(self)\n    if kwargs is None:\n        kwargs = {}\n    with self._container_strategy().scope():\n        return self._call_for_each_replica(fn, args, kwargs)",
    "docstring": "Run once per replica. may call to access methods such as and . is used to communicate between the replicas and re-enter the cross-replica context. All replicas pause their execution having encountered a call. After that the -function is executed. Its results are then unwrapped and given back to each replica call. After that execution resumes until is complete or encounters another . Example: Args: fn: function to run (will be run once per replica). args: Tuple or list with positional arguments for . kwargs: Dict with keyword arguments for . Returns: Merged return value of across all replicas.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:call_for_each_replica arg:self arg:fn arg:args arg:kwargs arguments arg arg arg arg Call If Compare Assign With Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "filter",
    "source_code": "def filter(self, filter_fn: Callable[[str], bool]) -> 'FunctionCounts':\n    return FunctionCounts(tuple((i for i in self if filter_fn(i.function))), self.inclusive)",
    "docstring": "Keep only the elements where applied to function name returns True.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\benchmark\\utils\\valgrind_wrapper\\timer_interface.py",
    "ast_data": "FunctionDef name:filter arg:self arg:filter_fn arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "fetch_returned_insert_rows",
    "source_code": "def fetch_returned_insert_rows(self, cursor):\n    return cursor.fetchall()",
    "docstring": "Given a cursor object that has just performed an INSERT...RETURNING statement into a table, return the tuple of returned data.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\mysql\\operations.py",
    "ast_data": "FunctionDef name:fetch_returned_insert_rows arg:self arg:cursor arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "stack_trace",
    "source_code": "@property\ndef stack_trace(self) -> Optional[str]:\n    return self.meta.get('stack_trace', None)",
    "docstring": "Return the Python stack trace that was recorded during tracing, if any. When traced with fx.Tracer, this property is usually populated by . To record stack traces during tracing for debug purposes, set on the instance. When traced with dynamo, this property will be populated by default by . stack_trace would have the innermost frame at the end of the string.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\node.py",
    "ast_data": "FunctionDef name:stack_trace arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "contains",
    "source_code": "def contains(self, mouseevent):\n    if self._different_canvas(mouseevent) or not self.get_visible():\n        return (False, {})\n    x, y = (mouseevent.x, mouseevent.y)\n    inside = self.get_window_extent().contains(x, y)\n    return (inside, {})",
    "docstring": "Test whether the mouse event occurred within the image.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\image.py",
    "ast_data": "FunctionDef name:contains arg:self arg:mouseevent arguments arg arg If BoolOp Call Call Return return:yes Assign Assign Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "pixel_format",
    "source_code": "@property\ndef pixel_format(self) -> PixelFormat:\n    return self._pixel_format",
    "docstring": "Return the pixel format.",
    "type": "method",
    "file_path": "kornia\\kornia\\image\\image.py",
    "ast_data": "FunctionDef name:pixel_format arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "this_before_that_pass_constraint",
    "source_code": "@compatibility(is_backward_compatible=False)\ndef this_before_that_pass_constraint(this: Callable, that: Callable) -> Callable:\n\n    def depends_on(a: Callable, b: Callable):\n        return a != that or b != this\n    return depends_on",
    "docstring": "Defines a partial order ('depends on' function) where must occur before . For example, the following pass list and constraint list would be invalid. Args: this (Callable): pass which should occur first that (Callable): pass which should occur later Returns: depends_on (Callable[[Object, Object], bool]",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\passes\\infra\\pass_manager.py",
    "ast_data": "FunctionDef name:this_before_that_pass_constraint arg:this arg:that arguments arg arg FunctionDef name:depends_on arg:a arg:b arguments arg arg Return return:yes BoolOp Compare Compare Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "run_node",
    "source_code": "def run_node(self, n: torch.fx.Node) -> Any:\n    with self._set_current_node(n):\n        if (rule := get_type_promotion_rule(n, self.type_promotion_table)):\n            self._maybe_promote_node(n, rule)\n    return super().run_node(n)",
    "docstring": "This method is an override which inserts type promotion nodes as needed. For each node, an initial check is conducted to determine if a type promotion rule is applicable. If a relevant rule exists, type casting nodes are introduced for the corresponding arguments. The OpOverload of the node is updated to one that accommodates the promoted types. Should the output type be different, type casting node is inserted for this output. The call is guaranteed to be invoked for each node. In the case of new or modified nodes, the result of is used to update its value.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\type_promotion.py",
    "ast_data": "FunctionDef name:run_node arg:self arg:n arguments arg arg With Call If Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_arg_min_flops",
    "source_code": "@ops.RegisterStatistics('ArgMin', 'flops')\ndef _arg_min_flops(graph, node):\n    return _reduction_op_flops(graph, node, reduce_flops=1, finalize_flops=0)",
    "docstring": "Compute flops for ArgMin operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py",
    "ast_data": "FunctionDef name:_arg_min_flops arg:graph arg:node arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_revive_from_config",
    "source_code": "def _revive_from_config(self, identifier, metadata, node_id):\n    if identifier == constants.METRIC_IDENTIFIER:\n        obj = self._revive_metric_from_config(metadata)\n    else:\n        obj = self._revive_graph_network(identifier, metadata, node_id) or self._revive_layer_or_model_from_config(metadata, node_id)\n    if obj is None:\n        return (None, None)\n    setter = self._config_node_setter(_revive_setter)\n    self._add_children_recreated_from_config(obj, self._proto.nodes[node_id], node_id)\n    return (obj, setter)",
    "docstring": "Revives a layer/model from config, or returns None.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\load.py",
    "ast_data": "FunctionDef name:_revive_from_config arg:self arg:identifier arg:metadata arg:node_id arguments arg arg arg arg If Compare Assign Call Assign BoolOp Call Call If Compare Return return:no Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "metadata",
    "source_code": "@abc.abstractmethod\ndef metadata(self) -> dict:\n    raise NotImplementedError",
    "docstring": "Return the metadata.",
    "type": "method",
    "file_path": "pytorch\\torch\\autograd\\graph.py",
    "ast_data": "FunctionDef name:metadata arg:self arguments arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "_update",
    "source_code": "def _update(strategy, var, update_fn, args):\n    assert distribute_lib.in_cross_replica_context(), '_update can only be called in cross-replica context'\n    if distribute_lib.get_update_replica_id() is not None:\n        return update_fn(var, *args)\n    else:\n        return strategy.extended.update(var, update_fn, args)",
    "docstring": "Applies updates depending on the context.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\moving_averages.py",
    "ast_data": "FunctionDef name:_update arg:strategy arg:var arg:update_fn arg:args arguments arg arg arg arg Call If Compare Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, limit=100, history_file_path=None):\n    self._commands = []\n    self._limit = limit\n    self._history_file_path = history_file_path or self._get_default_history_file_path()\n    self._load_history_from_file()",
    "docstring": "CommandHistory constructor. Args: limit: Maximum number of the most recent commands that this instance keeps track of, as an int. history_file_path: (str) Manually specified path to history file. Used in testing.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:limit arg:history_file_path arguments arg arg arg Assign Assign Assign BoolOp Call Call"
  },
  {
    "library": "kornia",
    "name": "adjust_log",
    "source_code": "def adjust_log(image: Tensor, gain: float=1, inv: bool=False, clip_output: bool=True) -> Tensor:\n    KORNIA_CHECK_IS_TENSOR(image, 'Expected shape (*, H, W)')\n    if inv:\n        img_adjust = (2 ** image - 1) * gain\n    else:\n        img_adjust = (1 + image).log2() * gain\n    if clip_output:\n        img_adjust = img_adjust.clamp(min=0.0, max=1.0)\n    return img_adjust",
    "docstring": "Adjust log correction on the input image tensor. The input image is expected to be in the range of [0, 1]. Reference: [1]: Args: image: Image to be adjusted in the shape of :math:. gain: The multiplier of logarithmic function. inv: If is set to True the function will return the inverse logarithmic correction. clip_output: Whether to clip the output image with range of [0, 1]. Returns: Adjusted tensor in the shape of :math:. Example: >>> x = torch.zeros(1, 1, 2, 2) >>> adjust_log(x, inv=True) tensor([[[[0., 0.], [0., 0.]]]])",
    "type": "function",
    "file_path": "kornia\\kornia\\enhance\\adjust.py",
    "ast_data": "FunctionDef name:adjust_log arg:image arg:gain arg:inv arg:clip_output arguments arg arg arg arg Call If Assign Assign Call If Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, left=None, bottom=None, right=None, top=None, wspace=None, hspace=None):\n    for key in ['left', 'bottom', 'right', 'top', 'wspace', 'hspace']:\n        setattr(self, key, mpl.rcParams[f'figure.subplot.{key}'])\n    self.update(left, bottom, right, top, wspace, hspace)",
    "docstring": "Defaults are given by :rc:. Parameters ---------- left : float, optional The position of the left edge of the subplots, as a fraction of the figure width. right : float, optional The position of the right edge of the subplots, as a fraction of the figure width. bottom : float, optional The position of the bottom edge of the subplots, as a fraction of the figure height. top : float, optional The position of the top edge of the subplots, as a fraction of the figure height. wspace : float, optional The width of the padding between subplots, as a fraction of the average Axes width. hspace : float, optional The height of the padding between subplots, as a fraction of the average Axes height.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\gridspec.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:left arg:bottom arg:right arg:top arg:wspace arg:hspace arguments arg arg arg arg arg arg arg For Call Call"
  },
  {
    "library": "pandas",
    "name": "from_spmatrix",
    "source_code": "@classmethod\ndef from_spmatrix(cls, data, index=None, columns=None) -> DataFrame:\n    from pandas._libs.sparse import IntIndex\n    from pandas import DataFrame\n    data = data.tocsc()\n    index, columns = cls._prep_index(data, index, columns)\n    n_rows, n_columns = data.shape\n    data.sort_indices()\n    indices = data.indices\n    indptr = data.indptr\n    array_data = data.data\n    dtype = SparseDtype(array_data.dtype)\n    arrays = []\n    for i in range(n_columns):\n        sl = slice(indptr[i], indptr[i + 1])\n        idx = IntIndex(n_rows, indices[sl], check_integrity=False)\n        arr = SparseArray._simple_new(array_data[sl], idx, dtype)\n        arrays.append(arr)\n    return DataFrame._from_arrays(arrays, columns=columns, index=index, verify_integrity=False)",
    "docstring": "Create a new DataFrame from a scipy sparse matrix. Parameters ---------- data : scipy.sparse.spmatrix Must be convertible to csc format. index, columns : Index, optional Row and column labels to use for the resulting DataFrame. Defaults to a RangeIndex. Returns ------- DataFrame Each column of the DataFrame is stored as a :class:. See Also -------- DataFrame.sparse.to_coo : Return the contents of the frame as a sparse SciPy COO matrix. Examples -------- >>> import scipy.sparse >>> mat = scipy.sparse.eye(3, dtype=int) >>> pd.DataFrame.sparse.from_spmatrix(mat) 0 1 2 0 1 0 0 1 0 1 0 2 0 0 1",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\sparse\\accessor.py",
    "ast_data": "FunctionDef name:from_spmatrix arg:cls arg:data arg:index arg:columns arguments arg arg arg arg Assign Call Assign Call Assign Call Assign Assign Assign Assign Call Assign For Call Assign Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "captures",
    "source_code": "@property\ndef captures(self):\n    return self._function_captures.by_val_capture_tuples",
    "docstring": "Order list of tuples containing external and internal captures.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\func_graph.py",
    "ast_data": "FunctionDef name:captures arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_restore_from_tensors",
    "source_code": "def _restore_from_tensors(self, restored_tensors):\n    with ops.name_scope('%s_table_restore' % self._name):\n        with ops.colocate_with(self.resource_handle):\n            return gen_lookup_ops.lookup_table_import_v2(self.resource_handle, restored_tensors['-keys'], restored_tensors['-values'])",
    "docstring": "Implements checkpointing protocols for .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "FunctionDef name:_restore_from_tensors arg:self arg:restored_tensors arguments arg arg With Call With Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "TextParser",
    "source_code": "def TextParser(*args, **kwds) -> TextFileReader:\n    kwds['engine'] = 'python'\n    return TextFileReader(*args, **kwds)",
    "docstring": "Converts lists of lists/tuples into DataFrames with proper type inference and optional (e.g. string to datetime) conversion. Also enables iterating lazily over chunks of large files Parameters ---------- data : file-like object or list delimiter : separator character to use dialect : str or csv.Dialect instance, optional Ignored if delimiter is longer than 1 character names : sequence, default header : int, default 0 Row to use to parse column labels. Defaults to the first row. Prior rows will be discarded index_col : int or list, optional Column or columns to use as the (possibly hierarchical) index has_index_names: bool, default False True if the cols defined in index_col have an index name and are not in the header. na_values : scalar, str, list-like, or dict, optional Additional strings to recognize as NA/NaN. keep_default_na : bool, default True thousands : str, optional Thousands separator comment : str, optional Comment out remainder of line parse_dates : bool, default False date_format : str or dict of column -> format, default `Nonehighlegacyround_trip` for the round-trip converter.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\parsers\\readers.py",
    "ast_data": "FunctionDef name:TextParser arguments arg arg Assign Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, *args, **kwargs):\n    self.bus = kwargs.pop('bus', None)\n    super(PerpetualTimer, self).__init__(*args, **kwargs)",
    "docstring": "Override parent constructor to allow 'bus' to be provided.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\plugins.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg arg arg Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "xavier_uniform_",
    "source_code": "def xavier_uniform_(tensor: Tensor, gain: float=1.0, generator: _Optional[torch.Generator]=None) -> Tensor:\n    fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)\n    std = gain * math.sqrt(2.0 / float(fan_in + fan_out))\n    a = math.sqrt(3.0) * std\n    return _no_grad_uniform_(tensor, -a, a, generator)",
    "docstring": "Fill the input with values using a Xavier uniform distribution. The method is described in - Glorot, X. & Bengio, Y. (2010). The resulting tensor will have values sampled from :math: where .. math:: a = \\text{gain} \\times \\sqrt{\\frac{6}{\\text{fan\\_in} + \\text{fan\\_out}}} Also known as Glorot initialization. Args: tensor: an n-dimensional gain: an optional scaling factor generator: the torch Generator to sample from (default: None) Examples: >>> w = torch.empty(3, 5) >>> nn.init.xavier_uniform_(w, gain=nn.init.calculate_gain('relu')) Note: Be aware that ``.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\init.py",
    "ast_data": "FunctionDef name:xavier_uniform_ arg:tensor arg:gain arg:generator arguments arg arg arg Assign Call Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "abspath",
    "source_code": "def abspath(self, path):\n    from urllib.parse import urlparse\n    splitpath = path.split(self._destpath, 2)\n    if len(splitpath) > 1:\n        path = splitpath[1]\n    scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)\n    netloc = self._sanitize_relative_path(netloc)\n    upath = self._sanitize_relative_path(upath)\n    return os.path.join(self._destpath, netloc, upath)",
    "docstring": "Return absolute path of file in the DataSource directory. If is an URL, then will return either the location the file exists locally or the location it would exist when opened using the method. Parameters ---------- path : str or pathlib.Path Can be a local file or a remote URL. Returns ------- out : str Complete path, including the destination directory. Notes ----- The functionality is based on .",
    "type": "method",
    "file_path": "numpy\\numpy\\lib\\_datasource.py",
    "ast_data": "FunctionDef name:abspath arg:self arg:path arguments arg arg Assign Call If Compare Call Assign Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "score_fusion",
    "source_code": "@staticmethod\ndef score_fusion(scheduler: Scheduler, node1: BaseSchedulerNode, node2: BaseSchedulerNode) -> Sortable:\n    memory_score = scheduler.score_fusion_memory(node1, node2)\n    proximity_score = -max(abs(node1.min_order - node2.max_order), abs(node2.min_order - node1.max_order))\n    if node2.is_template():\n        template_score = 0\n    else:\n        template_score = 1 + (node1.is_template() == config.epilogue_fusion_first and memory_score > 0)\n    return (template_score, node1.is_reduction() == node2.is_reduction() and memory_score > 0, memory_score, proximity_score)",
    "docstring": "Assign a score (higher comes first) to the fusion of node1 and node2. When different fusions conflict with each other, this is the way we decide what order to run them in. Our current score is based on: - The type of fusion (template/reduction/etc) - Estimate of the saved memory operations - Fusions closer together in original graph order",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\choices.py",
    "ast_data": "FunctionDef name:score_fusion arg:scheduler arg:node1 arg:node2 arguments arg arg arg Assign Call Assign Call Call Call If Call Assign Assign BoolOp Compare Call Compare Return return:yes BoolOp Compare Call Call Compare"
  },
  {
    "library": "scipy",
    "name": "TtestResult",
    "source_code": "class TtestResult(TtestResultBase):\n\n    def __init__(self, statistic, pvalue, df, alternative, standard_error, estimate, statistic_np=None, xp=None):\n        super().__init__(statistic, pvalue, df=df)\n        self._alternative = alternative\n        self._standard_error = standard_error\n        self._estimate = estimate\n        self._statistic_np = statistic if statistic_np is None else statistic_np\n        self._dtype = statistic.dtype\n        self._xp = array_namespace(statistic, pvalue) if xp is None else xp\n\n    def confidence_interval(self, confidence_level=0.95):\n        low, high = _t_confidence_interval(self.df, self._statistic_np, confidence_level, self._alternative, self._dtype, self._xp)\n        low = low * self._standard_error + self._estimate\n        high = high * self._standard_error + self._estimate\n        return ConfidenceInterval(low=low, high=high)",
    "docstring": "Result of a t-test. See the documentation of the particular t-test function for more information about the definition of the statistic and meaning of the confidence interval. Attributes ---------- statistic : float or array The t-statistic of the sample. pvalue : float or array The p-value associated with the given alternative. df : float or array The number of degrees of freedom used in calculation of the t-statistic; this is one less than the size of the sample (`lowhigh`.",
    "type": "class",
    "file_path": "scipy\\scipy\\stats\\_stats_py.py",
    "ast_data": "ClassDef name:TtestResult FunctionDef name:__init__ arg:self arg:statistic arg:pvalue arg:df arg:alternative arg:standard_error arg:estimate arg:statistic_np arg:xp arguments arg arg arg arg arg arg arg arg arg Call Call Assign Assign Assign Assign Compare Assign Assign Compare Call FunctionDef name:confidence_interval arg:self arg:confidence_level arguments arg arg Assign Call Assign Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "translate",
    "source_code": "def translate(self, tx, ty):\n    self._mtx[0, 2] += tx\n    self._mtx[1, 2] += ty\n    self.invalidate()\n    return self",
    "docstring": "Add a translation in place. Returns *self*, so this method can easily be chained with more calls to :meth:, :meth:, :meth: and :meth:.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:translate arg:self arg:tx arg:ty arguments arg arg arg Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "dump_properties",
    "source_code": "def dump_properties(self):\n    props = []\n    for key in list(self.executables.keys()) + ['version', 'libraries', 'library_dirs', 'object_switch', 'compile_switch']:\n        if hasattr(self, key):\n            v = getattr(self, key)\n            props.append((key, None, '= ' + repr(v)))\n    props.sort()\n    pretty_printer = FancyGetopt(props)\n    for l in pretty_printer.generate_help('%s instance properties:' % self.__class__.__name__):\n        if l[:4] == '  --':\n            l = '  ' + l[4:]\n        print(l)",
    "docstring": "Print out the attributes of a compiler instance.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\fcompiler\\__init__.py",
    "ast_data": "FunctionDef name:dump_properties arg:self arguments arg Assign For Call Call If Call Assign Call Call Call Call Assign Call For Call If Compare Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_alloc_flow_id",
    "source_code": "def _alloc_flow_id(self) -> int:\n    flow_id = self._next_flow_id\n    self._next_flow_id += 1\n    return flow_id",
    "docstring": "Allocate a flow Id.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py",
    "ast_data": "FunctionDef name:_alloc_flow_id arg:self arguments arg Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "post_starting_merge_comment",
    "source_code": "def post_starting_merge_comment(repo: GitRepo, pr: GitHubPR, explainer: TryMergeExplainer, dry_run: bool, ignore_current_checks_info: Optional[list[tuple[str, Optional[str], Optional[int]]]]=None) -> None:\n    gh_post_pr_comment(pr.org, pr.project, pr.pr_num, explainer.get_merge_message(ignore_current_checks_info), dry_run=dry_run)\n    if pr.is_ghstack_pr():\n        for additional_prs, _ in get_ghstack_prs(repo, pr):\n            if additional_prs.pr_num != pr.pr_num:\n                gh_post_pr_comment(additional_prs.org, additional_prs.project, additional_prs.pr_num, f'Starting merge as part of PR stack under #{pr.pr_num}', dry_run=dry_run)",
    "docstring": "Post the initial merge starting message on the PR. Also post a short message on all PRs in the stack.",
    "type": "function",
    "file_path": "pytorch\\.github\\scripts\\trymerge.py",
    "ast_data": "FunctionDef name:post_starting_merge_comment arg:repo arg:pr arg:explainer arg:dry_run arg:ignore_current_checks_info arguments arg arg arg arg arg Call Call If Call For Call If Compare Call"
  },
  {
    "library": "pandas",
    "name": "value_counts",
    "source_code": "def value_counts(self, dropna: bool=True) -> Series:\n    from pandas import Index, Series\n    keys, counts, _ = algos.value_counts_arraylike(self.sp_values, dropna=dropna)\n    fcounts = self.sp_index.ngaps\n    if fcounts > 0 and (not self._null_fill_value or not dropna):\n        mask = isna(keys) if self._null_fill_value else keys == self.fill_value\n        if mask.any():\n            counts[mask] += fcounts\n        else:\n            keys = np.insert(keys, 0, self.fill_value)\n            counts = np.insert(counts, 0, fcounts)\n    if not isinstance(keys, ABCIndex):\n        index = Index(keys)\n    else:\n        index = keys\n    return Series(counts, index=index, copy=False)",
    "docstring": "Returns a Series containing counts of unique values. Parameters ---------- dropna : bool, default True Don't include counts of NaN, even if NaN is in sp_values. Returns ------- counts : Series",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\sparse\\array.py",
    "ast_data": "FunctionDef name:value_counts arg:self arg:dropna arguments arg arg Assign Call Assign If BoolOp Compare BoolOp Assign Call Compare If Call Assign Call Assign Call If Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_process_cond_unstacked",
    "source_code": "def _process_cond_unstacked(self, conditions, indices, inputs, output_tas):\n    not_all_done = array_ops.reshape(conditions, [])\n    new_output_tas = []\n    for i, out_ta in enumerate(output_tas):\n        inp = inputs[i]\n        new_output_tas.append(tf_cond.cond(not_all_done, lambda: out_ta, lambda: out_ta.write(0, inp)))\n    return (not_all_done, indices, inputs, new_output_tas)",
    "docstring": "Handles case when condition is unstacked. Note that all iterations end together. So we don't need to partition the inputs. When all iterations are done, we write the inputs to the TensorArrays. Note that we only write to index 0 of output_tas. Since all iterations end together, they can all be output together.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "FunctionDef name:_process_cond_unstacked arg:self arg:conditions arg:indices arg:inputs arg:output_tas arguments arg arg arg arg arg Assign Call Assign For Call Assign Call Call arguments arguments Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "GlobalAveragePooling3D",
    "source_code": "class GlobalAveragePooling3D(GlobalPooling3D):\n\n    def call(self, inputs):\n        if self.data_format == 'channels_last':\n            return backend.mean(inputs, axis=[1, 2, 3], keepdims=self.keepdims)\n        else:\n            return backend.mean(inputs, axis=[2, 3, 4], keepdims=self.keepdims)",
    "docstring": "Global Average pooling operation for 3D data. Args: data_format: A string, one of (default) or . The ordering of the dimensions in the inputs. corresponds to inputs with shape while corresponds to inputs with shape . It defaults to the value found in your Keras config file at . If you never set it, then it will be \"channels_last\". keepdims: A boolean, whether to keep the spatial dimensions or not. If is (default), the rank of the tensor is reduced for spatial dimensions. If is , the spatial dimensions are retained with length 1. The behavior is the same as for or . Input shape: - If : 5D tensor with shape: - If : 5D tensor with shape: Output shape: - If =False: 2D tensor with shape . - If =True: - If : 5D tensor with shape - If : 5D tensor with shape",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\pooling.py",
    "ast_data": "ClassDef name:GlobalAveragePooling3D FunctionDef name:call arg:self arg:inputs arguments arg arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "flipy",
    "source_code": "def flipy(self):\n    return True",
    "docstring": "Return whether y values increase from top to bottom. Note that this only affects drawing of texts.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:flipy arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_rotation_coords",
    "source_code": "def _rotation_coords(self):\n    norm_elev = art3d._norm_angle(self.elev)\n    norm_azim = art3d._norm_angle(self.azim)\n    norm_roll = art3d._norm_angle(self.roll)\n    coords = f'elevation={norm_elev:.0f}°, azimuth={norm_azim:.0f}°, roll={norm_roll:.0f}°'.replace('-', '−')\n    return coords",
    "docstring": "Return the rotation angles as a string.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:_rotation_coords arg:self arguments arg Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "should_use_cooperative_reduction",
    "source_code": "@staticmethod\ndef should_use_cooperative_reduction(features: SIMDKernelFeatures) -> bool:\n    if config.triton.force_cooperative_reductions:\n        return True\n    if not config.triton.cooperative_reductions or V.graph.get_current_device_or_throw().type == 'cpu':\n        return False\n    xhint = V.graph.sizevars.size_hint(features.numel, fallback=2)\n    if xhint <= 8:\n        threshold = 32768 * xhint\n    elif xhint <= 16:\n        threshold = 2097152\n    else:\n        return False\n    return V.graph.sizevars.statically_known_geq(features.reduction_numel, threshold)",
    "docstring": "Heuristic to decide if a cooperative reduction should be used.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\choices.py",
    "ast_data": "FunctionDef name:should_use_cooperative_reduction arg:features arguments arg If Return return:yes If BoolOp Compare Call Return return:yes Assign Call If Compare Assign If Compare Assign Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "set_up_planner",
    "source_code": "@abc.abstractmethod\ndef set_up_planner(self, state_dict: STATE_DICT_TYPE, storage_meta: Optional[StorageMeta]=None, is_coordinator: bool=False) -> None:\n    pass",
    "docstring": "Initialize this planner to save ``. Implementations should save those values as they won't be provided lated in the save process. This is called on all ranks.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\planner.py",
    "ast_data": "FunctionDef name:set_up_planner arg:self arg:state_dict arg:storage_meta arg:is_coordinator arguments arg arg arg arg"
  },
  {
    "library": "django",
    "name": "__init__",
    "source_code": "def __init__(self, func):\n    self.__dict__['_setupfunc'] = func\n    super().__init__()",
    "docstring": "Pass in a callable that returns the object to be wrapped. If copies are made of the resulting SimpleLazyObject, which can happen in various circumstances within Django, then you must ensure that the callable can be safely run more than once and will return the same value.",
    "type": "method",
    "file_path": "django\\django\\utils\\functional.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:func arguments arg arg Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "error_code",
    "source_code": "@property\ndef error_code(self):\n    return self._error_code",
    "docstring": "The integer error code that describes the error.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py",
    "ast_data": "FunctionDef name:error_code arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_associated_prs",
    "source_code": "def get_associated_prs(api: github_api.GitHubAPI, commit_hashes: Sequence[str]) -> Generator[int, None, None]:\n    regex = re.compile('PR #(\\\\d+)')\n    for commit_hash in commit_hashes:\n        response = api.get_commit('openxla/xla', commit_hash)\n        message = response['commit']['message']\n        if (maybe_match := regex.match(message)):\n            pr_number = maybe_match.group(1)\n            print(f'Found PR #{pr_number} associated with commit hash {commit_hash}')\n            yield int(pr_number)\n    print(f\"Didn't find any PRs associated with commit hashes: {commit_hashes}\")",
    "docstring": "Finds PRs associated with commits. Arguments: api: GitHubAPI object which will be used to make requests commit_hashes: A sequence of SHAs which may have PRs associated with them Yields: Associated pairs of (PR number, SHA), both as strings",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\.github\\workflows\\rollback_notification.py",
    "ast_data": "FunctionDef name:get_associated_prs arg:api arg:commit_hashes arguments arg arg Assign Call For Assign Call Assign If Call Assign Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_data",
    "source_code": "def get_data(self, name: str, return_original: bool=True):\n    if name not in self.data_groups:\n        raise ValueError('data with specified name does not exist')\n    if return_original:\n        if not parametrize.is_parametrized(self._container, name):\n            raise ValueError('mask squashed - original mask value does not exist')\n        data = getattr(self._container.parametrizations, name).original\n        return data\n    else:\n        return getattr(self._container, name)",
    "docstring": "Returns weight tensor (or data) Args: - name: name of the data to be returned - return_original returns weight tensor without applying parametrization if True else - returns the sparsified version (parametrized)",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\data_sparsifier\\base_data_sparsifier.py",
    "ast_data": "FunctionDef name:get_data arg:self arg:name arg:return_original arguments arg arg arg If Compare Raise Call If If Call Raise Call Assign Call Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "scatter",
    "source_code": "@tf_should_use.should_use_result\ndef scatter(self, indices, value, name=None):\n    with ops.name_scope(name, 'TensorArrayScatter', [self._handle, value, indices]):\n        value = ops.convert_to_tensor(value, preferred_dtype=self._dtype, name='value')\n        _check_dtypes(value, self._dtype)\n        if not context.executing_eagerly():\n            self._check_element_shape(value.shape[1:])\n        with self._maybe_colocate_with(value):\n            flow_out = gen_data_flow_ops.tensor_array_scatter_v3(handle=self._handle, indices=indices, value=value, flow_in=self._flow, name=name)\n        return build_ta_with_new_flow(self, flow_out)",
    "docstring": "See TensorArray.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:scatter arg:self arg:indices arg:value arg:name arguments arg arg arg arg With Call Assign Call Call If Call Call With Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_append_activity_regularizer_loss",
    "source_code": "def _append_activity_regularizer_loss(layer, call_fn_with_losses, activity_regularizer_fn):\n\n    def fn(inputs, *args, **kwargs):\n        outputs, losses = call_fn_with_losses(inputs, *args, **kwargs)\n        losses.append(activity_regularizer_fn(outputs))\n        return (outputs, losses)\n    return _create_call_fn_decorator(layer, fn)",
    "docstring": "Appends activity regularizer loss to losses returned by the wrapped fn.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\save_impl.py",
    "ast_data": "FunctionDef name:_append_activity_regularizer_loss arg:layer arg:call_fn_with_losses arg:activity_regularizer_fn arguments arg arg arg FunctionDef name:fn arg:inputs arguments arg arg arg Assign Call Call Call Return return:yes Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_mpl_coords",
    "source_code": "def _mpl_coords(self, xy=None):\n    if xy is None:\n        surface = self.get_native().get_surface()\n        is_over, x, y, mask = surface.get_device_position(self.get_display().get_default_seat().get_pointer())\n    else:\n        x, y = xy\n    x = x * self.device_pixel_ratio\n    y = self.figure.bbox.height - y * self.device_pixel_ratio\n    return (x, y)",
    "docstring": "Convert the *xy* position of a GTK event, or of the current cursor position if *xy* is None, to Matplotlib coordinates. GTK use logical pixels, but the figure is scaled to physical pixels for rendering. Transform to physical pixels so that all of the down-stream transforms work as expected. Also, the origin is different and needs to be corrected.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_gtk4.py",
    "ast_data": "FunctionDef name:_mpl_coords arg:self arg:xy arguments arg arg If Compare Assign Call Call Assign Call Call Call Call Assign Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_cartesian_product",
    "source_code": "def _cartesian_product(first, second):\n    return [os.path.join(f, s) for f in first for s in second]",
    "docstring": "Returns all path combinations of first and second.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\third_party\\gpus\\find_cuda_config.py",
    "ast_data": "FunctionDef name:_cartesian_product arg:first arg:second arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "cleanup_recompute_tags",
    "source_code": "def cleanup_recompute_tags(joint_module: fx.GraphModule) -> fx.GraphModule:\n    for node in joint_module.graph.nodes:\n        if must_recompute(node):\n            for user in node.users:\n                if must_recompute(user) and user.meta['ac_graph_id'] > node.meta['ac_graph_id']:\n                    node.meta['recompute'] = CheckpointPolicy.MUST_SAVE\n            if node.meta.get('has_backward_hook', False) and (not any((must_recompute(user) for user in node.users))):\n                node.meta['recompute'] = CheckpointPolicy.MUST_SAVE\n    return joint_module",
    "docstring": "If there are two consecutive checkpointed blocks with no operator in between, we would still want to stash the tensor at the boundary of checkpointed blocks. The following pass makes the last output node non-recomputable to allow for that.",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\partitioners.py",
    "ast_data": "FunctionDef name:cleanup_recompute_tags arg:joint_module arguments arg For If Call For If BoolOp Call Compare Assign If BoolOp Call Call Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "get_month_format",
    "source_code": "def get_month_format(self):\n    return self.month_format",
    "docstring": "Get a month format string in strptime syntax to be used to parse the month from url variables.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\dates.py",
    "ast_data": "FunctionDef name:get_month_format arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, path: str, fqn_to_index_mapping: dict[str, int], token: Optional[str]=None) -> None:\n    from huggingface_hub import HfFileSystem\n    if HfFileSystem.protocol not in fsspec.available_protocols():\n        fsspec.register_implementation(HfFileSystem.protocol, HfFileSystem)\n    if token is not None:\n        super().__init__(path=path, token=token, serialization_format=SerializationFormat.SAFETENSORS)\n    else:\n        super().__init__(path=path, serialization_format=SerializationFormat.SAFETENSORS)\n    self._fqn_to_index_mapping: dict[str, int] = fqn_to_index_mapping",
    "docstring": "Initialize the huggingface writer pointing to path. Args: path: hf directory where the checkpoint will be written to. Should begin with hf://. token: The token to use to authenticate with huggingface hub. fqn_to_index_mapping: A mapping from tensor FQN to the index of the file that the tensor should be written to. Indices are from 1 to N, where N is the number of files.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\_hf_storage.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:path arg:fqn_to_index_mapping arg:token arguments arg arg arg arg If Compare Call Call If Compare Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "partial_run",
    "source_code": "def partial_run(self, handle, fetches, feed_dict=None):\n    raise NotImplementedError('partial_run')",
    "docstring": "Continues the execution with additional feeds and fetches.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\session.py",
    "ast_data": "FunctionDef name:partial_run arg:self arg:handle arg:fetches arg:feed_dict arguments arg arg arg arg Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "set_data",
    "source_code": "def set_data(self, x=None, y=None, dx=None, dy=None, width=None):\n    if x is not None:\n        self._x = x\n    if y is not None:\n        self._y = y\n    if dx is not None:\n        self._dx = dx\n    if dy is not None:\n        self._dy = dy\n    if width is not None:\n        self._width = width\n    self._patch_transform = transforms.Affine2D().scale(np.hypot(self._dx, self._dy), self._width).rotate(np.arctan2(self._dy, self._dx)).translate(self._x, self._y).frozen()",
    "docstring": "Set x, y, dx, dy and width. Values left as None will not be updated. Parameters ---------- x, y : float or None, default: None The x and y coordinates of the arrow base. dx, dy : float or None, default: None The length of the arrow along x and y direction. width : float or None, default: None Width of full arrow tail.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:set_data arg:self arg:x arg:y arg:dx arg:dy arg:width arguments arg arg arg arg arg arg If Compare Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign Assign Call Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_SparseMatrixAddGrad",
    "source_code": "@ops.RegisterGradient('SparseMatrixAdd')\ndef _SparseMatrixAddGrad(op: ops.Operation, grad):\n    a_csr, b_csr, alpha, beta = op.inputs\n    return (sparse_csr_matrix_ops.sparse_matrix_mul(_PruneCSRMatrix(grad, a_csr), alpha), sparse_csr_matrix_ops.sparse_matrix_mul(_PruneCSRMatrix(grad, b_csr), beta), None, None)",
    "docstring": "Gradient for sparse_matrix_add op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\sparse\\sparse_csr_matrix_grad.py",
    "ast_data": "FunctionDef name:_SparseMatrixAddGrad arg:op arg:grad arguments arg arg Assign Return return:yes Call Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "YearLocator",
    "source_code": "class YearLocator(RRuleLocator):\n\n    def __init__(self, base=1, month=1, day=1, tz=None):\n        rule = rrulewrapper(YEARLY, interval=base, bymonth=month, bymonthday=day, **self.hms0d)\n        super().__init__(rule, tz=tz)\n        self.base = ticker._Edge_integer(base, 0)\n\n    def _create_rrule(self, vmin, vmax):\n        ymin = max(self.base.le(vmin.year) * self.base.step, 1)\n        ymax = min(self.base.ge(vmax.year) * self.base.step, 9999)\n        c = self.rule._construct\n        replace = {'year': ymin, 'month': c.get('bymonth', 1), 'day': c.get('bymonthday', 1), 'hour': 0, 'minute': 0, 'second': 0}\n        start = vmin.replace(**replace)\n        stop = start.replace(year=ymax)\n        self.rule.set(dtstart=start, until=stop)\n        return (start, stop)",
    "docstring": "Make ticks on a given day of each year that is a multiple of base. Examples:: # Tick every year on Jan 1st locator = YearLocator() # Tick every 5 years on July 4th locator = YearLocator(5, month=7, day=4)",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\dates.py",
    "ast_data": "ClassDef name:YearLocator FunctionDef name:__init__ arg:self arg:base arg:month arg:day arg:tz arguments arg arg arg arg arg Assign Call Call Call Assign Call FunctionDef name:_create_rrule arg:self arg:vmin arg:vmax arguments arg arg arg Assign Call Call Assign Call Call Assign Assign Call Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "CumulativeDistributionTransform",
    "source_code": "class CumulativeDistributionTransform(Transform):\n    bijective = True\n    codomain = constraints.unit_interval\n    sign = +1\n\n    def __init__(self, distribution: Distribution, cache_size: int=0) -> None:\n        super().__init__(cache_size=cache_size)\n        self.distribution = distribution\n\n    @property\n    def domain(self) -> Optional[constraints.Constraint]:\n        return self.distribution.support\n\n    def _call(self, x):\n        return self.distribution.cdf(x)\n\n    def _inverse(self, y):\n        return self.distribution.icdf(y)\n\n    def log_abs_det_jacobian(self, x, y):\n        return self.distribution.log_prob(x)\n\n    def with_cache(self, cache_size=1):\n        if self._cache_size == cache_size:\n            return self\n        return CumulativeDistributionTransform(self.distribution, cache_size=cache_size)",
    "docstring": "Transform via the cumulative distribution function of a probability distribution. Args: distribution (Distribution): Distribution whose cumulative distribution function to use for the transformation. Example:: # Construct a Gaussian copula from a multivariate normal. base_dist = MultivariateNormal( loc=torch.zeros(2), scale_tril=LKJCholesky(2).sample(), ) transform = CumulativeDistributionTransform(Normal(0, 1)) copula = TransformedDistribution(base_dist, [transform])",
    "type": "class",
    "file_path": "pytorch\\torch\\distributions\\transforms.py",
    "ast_data": "ClassDef name:CumulativeDistributionTransform Assign Assign Assign FunctionDef name:__init__ arg:self arg:distribution arg:cache_size arguments arg arg arg Call Call Assign FunctionDef name:domain arg:self arguments arg Return return:yes FunctionDef name:_call arg:self arg:x arguments arg arg Return return:yes Call FunctionDef name:_inverse arg:self arg:y arguments arg arg Return return:yes Call FunctionDef name:log_abs_det_jacobian arg:self arg:x arg:y arguments arg arg arg Return return:yes Call FunctionDef name:with_cache arg:self arg:cache_size arguments arg arg If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "can_zoom",
    "source_code": "def can_zoom(self):\n    return False",
    "docstring": "Return whether this Axes supports the zoom box button functionality. A polar Axes does not support zoom boxes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\polar.py",
    "ast_data": "FunctionDef name:can_zoom arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "auth_code",
    "source_code": "def auth_code(self, target):\n    return capi.get_auth_code(self.ptr, target if target is None else force_bytes(target))",
    "docstring": "Return the authority code for the given string target node.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\srs.py",
    "ast_data": "FunctionDef name:auth_code arg:self arg:target arguments arg arg Return return:yes Call Compare Call"
  },
  {
    "library": "numpy",
    "name": "have_f90c",
    "source_code": "def have_f90c(self):\n    simple_fortran_subroutine = '\\n        subroutine simple\\n        end\\n        '\n    config_cmd = self.get_config_cmd()\n    flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f90')\n    return flag",
    "docstring": "Check for availability of Fortran 90 compiler. Use it inside source generating function to ensure that setup distribution instance has been initialized. Notes ----- True if a Fortran 90 compiler is available (because a simple Fortran 90 code was able to be compiled successfully)",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\misc_util.py",
    "ast_data": "FunctionDef name:have_f90c arg:self arguments arg Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "compute_normalized_l2_error",
    "source_code": "@maybe_dequantize_first_two_tensor_args_and_handle_tuples\ndef compute_normalized_l2_error(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n    return torch.sqrt(((x - y) ** 2).sum() / (x ** 2).sum())",
    "docstring": "Computes the normalized L2 error between and . Args: x: Tensor or tuple of tensors y: Tensor or tuple of tensors Return: float or tuple of floats",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\ns\\fx\\utils.py",
    "ast_data": "FunctionDef name:compute_normalized_l2_error arg:x arg:y arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit_predict",
    "source_code": "def fit_predict(self, X, y=None):\n    self.fit(X)\n    return self.labels_",
    "docstring": "Cluster X and return the associated cluster labels. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features), or ndarray of shape (n_samples, n_samples) A feature array, or array of distances between samples if . y : None Ignored. Returns ------- y : ndarray of shape (n_samples,) Cluster labels.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_hdbscan\\hdbscan.py",
    "ast_data": "FunctionDef name:fit_predict arg:self arg:X arg:y arguments arg arg arg Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "argmin",
    "source_code": "def argmin(self, skipna: bool=True) -> int:\n    validate_bool_kwarg(skipna, 'skipna')\n    if not skipna and self._hasna:\n        raise ValueError('Encountered an NA value with skipna=False')\n    return nargminmax(self, 'argmin')",
    "docstring": "Return the index of minimum value. In case of multiple occurrences of the minimum value, the index corresponding to the first occurrence is returned. Parameters ---------- skipna : bool, default True Returns ------- int See Also -------- ExtensionArray.argmax : Return the index of the maximum value. Examples -------- >>> arr = pd.array([3, 1, 2, 5, 4]) >>> arr.argmin() np.int64(1)",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\base.py",
    "ast_data": "FunctionDef name:argmin arg:self arg:skipna arguments arg arg Call If BoolOp Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_old_get_dense_tensor_internal",
    "source_code": "def _old_get_dense_tensor_internal(self, sparse_tensors, weight_collections, trainable):\n    embedding_shape = (self.categorical_column._num_buckets, self.dimension)\n    if weight_collections and ops.GraphKeys.GLOBAL_VARIABLES not in weight_collections:\n        weight_collections.append(ops.GraphKeys.GLOBAL_VARIABLES)\n    embedding_weights = variable_scope.get_variable(name='embedding_weights', shape=embedding_shape, dtype=dtypes.float32, initializer=self.initializer, trainable=self.trainable and trainable, collections=weight_collections)\n    return self._get_dense_tensor_internal_helper(sparse_tensors, embedding_weights)",
    "docstring": "Private method that follows the signature of _get_dense_tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:_old_get_dense_tensor_internal arg:self arg:sparse_tensors arg:weight_collections arg:trainable arguments arg arg arg arg Assign If BoolOp Compare Call Assign Call BoolOp Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "BatchSampler",
    "source_code": "class BatchSampler(Sampler[list[int]]):\n\n    def __init__(self, sampler: Union[Sampler[int], Iterable[int]], batch_size: int, drop_last: bool) -> None:\n        if not isinstance(batch_size, int) or isinstance(batch_size, bool) or batch_size <= 0:\n            raise ValueError(f'batch_size should be a positive integer value, but got batch_size={batch_size}')\n        if not isinstance(drop_last, bool):\n            raise ValueError(f'drop_last should be a boolean value, but got drop_last={drop_last}')\n        self.sampler = sampler\n        self.batch_size = batch_size\n        self.drop_last = drop_last\n\n    def __iter__(self) -> Iterator[list[int]]:\n        sampler_iter = iter(self.sampler)\n        if self.drop_last:\n            args = [sampler_iter] * self.batch_size\n            for batch_droplast in zip(*args):\n                yield [*batch_droplast]\n        else:\n            batch = [*itertools.islice(sampler_iter, self.batch_size)]\n            while batch:\n                yield batch\n                batch = [*itertools.islice(sampler_iter, self.batch_size)]\n\n    def __len__(self) -> int:\n        if self.drop_last:\n            return len(self.sampler) // self.batch_size\n        else:\n            return (len(self.sampler) + self.batch_size - 1) // self.batch_size",
    "docstring": "Wraps another sampler to yield a mini-batch of indices. Args: sampler (Sampler or Iterable): Base sampler. Can be any iterable object batch_size (int): Size of mini-batch. drop_last (bool): If `` Example: >>> list(BatchSampler(SequentialSampler(range(10)), batch_size=3, drop_last=False)) [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]] >>> list(BatchSampler(SequentialSampler(range(10)), batch_size=3, drop_last=True)) [[0, 1, 2], [3, 4, 5], [6, 7, 8]]",
    "type": "class",
    "file_path": "pytorch\\torch\\utils\\data\\sampler.py",
    "ast_data": "ClassDef name:BatchSampler FunctionDef name:__init__ arg:self arg:sampler arg:batch_size arg:drop_last arguments arg arg arg arg If BoolOp Call Call Compare Raise Call If Call Raise Call Assign Assign Assign FunctionDef name:__iter__ arg:self arguments arg Assign Call If Assign For Call Assign Call While Assign Call FunctionDef name:__len__ arg:self arguments arg If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_convert_key",
    "source_code": "def _convert_key(self, key):\n    if self.ndim == 1 and len(key) > 1:\n        key = (key,)\n    return key",
    "docstring": "Require they keys to be the same type as the index. (so we don't fallback)",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexing.py",
    "ast_data": "FunctionDef name:_convert_key arg:self arg:key arguments arg arg If BoolOp Compare Compare Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "int",
    "source_code": "def int(self):\n    return self._to(torch.int)",
    "docstring": "Casts this storage to int type.",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:int arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_submatrix",
    "source_code": "def get_submatrix(self, i, data):\n    data = check_array(data, accept_sparse='csr')\n    row_ind, col_ind = self.get_indices(i)\n    return data[row_ind[:, np.newaxis], col_ind]",
    "docstring": "Return the submatrix corresponding to bicluster . Parameters ---------- i : int The index of the cluster. data : array-like of shape (n_samples, n_features) The data. Returns ------- submatrix : ndarray of shape (n_rows, n_cols) The submatrix corresponding to bicluster . Notes ----- Works with sparse matrices. Only works if `` attributes exist.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\base.py",
    "ast_data": "FunctionDef name:get_submatrix arg:self arg:i arg:data arguments arg arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "time_interpolate_eval",
    "source_code": "def time_interpolate_eval(self, n_samples, method):\n    self.interpolator(self.xp)",
    "docstring": "Time the evaluation.",
    "type": "method",
    "file_path": "scipy\\benchmarks\\benchmarks\\interpolate.py",
    "ast_data": "FunctionDef name:time_interpolate_eval arg:self arg:n_samples arg:method arguments arg arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "body",
    "source_code": "def body(i, *args):\n    del args\n    fn_result = fn(ctx, iterator.get_next())\n    flat_last_step_outputs = nest.flatten(ctx.last_step_outputs)\n    with ops.control_dependencies([fn_result]):\n        return [i + 1] + flat_last_step_outputs",
    "docstring": "A wrapper around to create the while loop body.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\one_device_strategy.py",
    "ast_data": "FunctionDef name:body arg:i arguments arg arg Assign Call Call Assign Call With Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "infodict",
    "source_code": "def infodict(self):\n    return self._ensure_file().infoDict",
    "docstring": "Return a modifiable information dictionary object (see PDF reference section 10.2.1 'Document Information Dictionary').",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py",
    "ast_data": "FunctionDef name:infodict arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "to_zpk",
    "source_code": "def to_zpk(self):\n    return copy.deepcopy(self)",
    "docstring": "Return a copy of the current 'ZerosPolesGain' system. Returns ------- sys : instance of The current system (copy)",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:to_zpk arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "LazyConvTranspose2d",
    "source_code": "class LazyConvTranspose2d(_LazyConvXdMixin, ConvTranspose2d):\n    cls_to_become = ConvTranspose2d\n\n    def __init__(self, out_channels: int, kernel_size: _size_2_t, stride: _size_2_t=1, padding: _size_2_t=0, output_padding: _size_2_t=0, groups: int=1, bias: bool=True, dilation: int=1, padding_mode: str='zeros', device=None, dtype=None) -> None:\n        factory_kwargs = {'device': device, 'dtype': dtype}\n        super().__init__(0, 0, kernel_size, stride, padding, output_padding, groups, False, dilation, padding_mode, **factory_kwargs)\n        self.weight = UninitializedParameter(**factory_kwargs)\n        self.out_channels = out_channels\n        if bias:\n            self.bias = UninitializedParameter(**factory_kwargs)\n\n    def _get_num_spatial_dims(self) -> int:\n        return 2",
    "docstring": "A :class: module with lazy initialization of the `ConvTranspose2dweightbiastorch.nn.modules.lazy.LazyModuleMixintorch.nn.ConvTranspose2dtorch.nn.modules.lazy.LazyModuleMixin`",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\conv.py",
    "ast_data": "ClassDef name:LazyConvTranspose2d Assign FunctionDef name:__init__ arg:self arg:out_channels arg:kernel_size arg:stride arg:padding arg:output_padding arg:groups arg:bias arg:dilation arg:padding_mode arg:device arg:dtype arguments arg arg arg arg arg arg arg arg arg arg arg arg Assign Call Call Assign Call Assign If Assign Call FunctionDef name:_get_num_spatial_dims arg:self arguments arg Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "_setup",
    "source_code": "def _setup(self):\n    conf = self._merged_args()\n    p = conf.pop('priority', None)\n    cherrypy.serving.request.hooks.attach('before_handler', self._wrapper, priority=p, **conf)",
    "docstring": "Wire caching into ``.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cptools.py",
    "ast_data": "FunctionDef name:_setup arg:self arguments arg Assign Call Assign Call Call"
  },
  {
    "library": "pandas",
    "name": "_reset_block_mgr_locs",
    "source_code": "def _reset_block_mgr_locs(nbs: list[Block], locs) -> None:\n    for nb in nbs:\n        nblocs = locs[nb.mgr_locs.indexer]\n        nb.mgr_locs = nblocs",
    "docstring": "Reset mgr_locs to correspond to our original DataFrame.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\internals\\ops.py",
    "ast_data": "FunctionDef name:_reset_block_mgr_locs arg:nbs arg:locs arguments arg arg For Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "eq",
    "source_code": "def eq(a, b):\n    if tensor_util.is_tf_type(a) or tensor_util.is_tf_type(b):\n        return _tf_equal(a, b)\n    return _py_equal(a, b)",
    "docstring": "Functional form of \"equal\".",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\logical.py",
    "ast_data": "FunctionDef name:eq arg:a arg:b arguments arg arg If BoolOp Call Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "_handle_long_word",
    "source_code": "def _handle_long_word(self, reversed_chunks: list[str], cur_line: list[str], cur_len: int, width: int) -> None:\n    space_left = max(width - cur_len, 1)\n    if self.break_long_words:\n        l, r = self._break_word(reversed_chunks[-1], space_left)\n        cur_line.append(l)\n        reversed_chunks[-1] = r\n    elif not cur_line:\n        cur_line.append(reversed_chunks.pop())",
    "docstring": "Override original method for using self._break_word() instead of slice.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\writers\\text.py",
    "ast_data": "FunctionDef name:_handle_long_word arg:self arg:reversed_chunks arg:cur_line arg:cur_len arg:width arguments arg arg arg arg arg Assign Call If Assign Call Call Assign If Call Call"
  },
  {
    "library": "pytorch",
    "name": "NearlyDiagonalSparsifier",
    "source_code": "class NearlyDiagonalSparsifier(base_sparsifier.BaseSparsifier):\n\n    def __init__(self, nearliness: int=1):\n        defaults = {'nearliness': nearliness}\n        super().__init__(defaults=defaults)\n\n    def update_mask(self, module, tensor_name, nearliness, **kwargs):\n        mask = getattr(module.parametrizations, tensor_name)[0].mask\n        mask.data = torch.zeros_like(mask)\n        if nearliness <= 0:\n            return\n        tensor = getattr(module, tensor_name)\n        height, width = tensor.shape\n        if nearliness % 2 == 0:\n            raise ValueError('nearliness can only be an odd number')\n        dist_to_diagonal = nearliness // 2\n        if dist_to_diagonal >= min(height, width):\n            raise ValueError('nearliness cannot be larger than the dimensions of tensor.')\n        for row in range(0, height):\n            low = max(0, row - dist_to_diagonal)\n            high = min(width, row + dist_to_diagonal + 1)\n            mask[row, low:high].fill_(1)",
    "docstring": "Nearly Diagonal Sparsifier This sparsifier creates a nearly diagonal mask to be applied to the weight matrix. Nearly Diagonal Matrix is a matrix that contains non-zero elements near the diagonal and the rest are zero. An example of a nearly diagonal matrix with degree (or nearliness) 3 and 5 are follows respectively. 1 1 0 0 1 1 1 0 1 1 1 0 1 1 1 1 0 1 1 1 1 1 1 1 0 0 1 1 0 1 1 1 Note that a nearly diagonal matrix with degree 1 is just a matrix with main diagonal populated This sparsifier is controlled by one variable: 1. defines the number of non-zero diagonal lines that are closest to the main diagonal. Currently - supports only odd number Note: This can be accelerated (vectorized) once the Spdiagonal feature (PR: #78439) is landed or the banded matrix feature is landed: Args: nearliness: The degree of nearliness (default = 1)",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\pruning\\sparsifier\\nearly_diagonal_sparsifier.py",
    "ast_data": "ClassDef name:NearlyDiagonalSparsifier FunctionDef name:__init__ arg:self arg:nearliness arguments arg arg Assign Call Call FunctionDef name:update_mask arg:self arg:module arg:tensor_name arg:nearliness arguments arg arg arg arg arg Assign Call Assign Call If Compare Return return:no Assign Call Assign If Compare Raise Call Assign If Compare Call Raise Call For Call Assign Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_framework_stack",
    "source_code": "def get_framework_stack(num_frames: int=25, cpp: bool=False) -> list[dict[str, Any]]:\n    from torch.fx.experimental.symbolic_shapes import uninteresting_files\n    from torch.utils._traceback import CapturedTraceback\n    tb = CapturedTraceback.extract(cpp=cpp).summary()\n    tb = [frame for frame in tb if frame.filename.endswith('.py') and frame.filename not in uninteresting_files() or ('at::' in frame.name or 'torch::' in frame.name)]\n    return from_traceback(tb[-1 * num_frames:])",
    "docstring": "Returns the traceback for the user stack and the framework stack",
    "type": "function",
    "file_path": "pytorch\\torch\\_logging\\structured.py",
    "ast_data": "FunctionDef name:get_framework_stack arg:num_frames arg:cpp arguments arg arg Assign Call Call Assign BoolOp BoolOp Call Compare Call BoolOp Compare Compare Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "connection_info",
    "source_code": "def connection_info():\n    result = ['{fig} - {socket}'.format(fig=manager.canvas.figure.get_label() or f'Figure {manager.num}', socket=manager.web_sockets) for manager in Gcf.get_all_fig_managers()]\n    if not is_interactive():\n        result.append(f'Figures pending show: {len(Gcf.figs)}')\n    return '\\n'.join(result)",
    "docstring": "Return a string showing the figure and connection status for the backend. This is intended as a diagnostic tool, and not for general use.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_nbagg.py",
    "ast_data": "FunctionDef name:connection_info arguments Assign Call BoolOp Call Call If Call Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, nrows, ncols, height_ratios=None, width_ratios=None):\n    if not isinstance(nrows, Integral) or nrows <= 0:\n        raise ValueError(f'Number of rows must be a positive integer, not {nrows!r}')\n    if not isinstance(ncols, Integral) or ncols <= 0:\n        raise ValueError(f'Number of columns must be a positive integer, not {ncols!r}')\n    self._nrows, self._ncols = (nrows, ncols)\n    self.set_height_ratios(height_ratios)\n    self.set_width_ratios(width_ratios)",
    "docstring": "Parameters ---------- nrows, ncols : int The number of rows and columns of the grid. width_ratios : array-like of length *ncols*, optional Defines the relative widths of the columns. Each column gets a relative width of ``. If not given, all rows will have the same height.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\gridspec.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:nrows arg:ncols arg:height_ratios arg:width_ratios arguments arg arg arg arg arg If BoolOp Call Compare Raise Call If BoolOp Call Compare Raise Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "_insert_module_as_submodule",
    "source_code": "def _insert_module_as_submodule(self, mod: torch.nn.Module) -> str:\n    idx = 0\n    mod_name = mod.__class__.__name__.lower()\n    path = f'{mod_name}_{idx}'\n    while hasattr(self.root, path):\n        path = f'{mod_name}_{idx}'\n        idx += 1\n    self.root.add_module(path, mod)\n    return path",
    "docstring": "Helper method which tries to insert a module that was not declared as submodule.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\meta_tracer.py",
    "ast_data": "FunctionDef name:_insert_module_as_submodule arg:self arg:mod arguments arg arg Assign Assign Call Assign While Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "narrow_tensor_by_index",
    "source_code": "def narrow_tensor_by_index(tensor: torch.Tensor, offsets: Sequence[int], sizes: Sequence[int]) -> torch.Tensor:\n    narrowed_tensor = tensor\n    for idx, (offset, size) in enumerate(zip(offsets, sizes)):\n        if size < tensor.size(idx):\n            narrowed_tensor = narrowed_tensor.narrow(idx, offset, size)\n    return narrowed_tensor",
    "docstring": "Narrow the tensor according to ``.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\_utils.py",
    "ast_data": "FunctionDef name:narrow_tensor_by_index arg:tensor arg:offsets arg:sizes arguments arg arg arg Assign For Call Call If Compare Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "ObjectIdentityWeakKeyDictionary",
    "source_code": "class ObjectIdentityWeakKeyDictionary(ObjectIdentityDictionary):\n    __slots__ = ['__weakref__']\n\n    def _wrap_key(self, key):\n        return _WeakObjectIdentityWrapper(key)\n\n    def __len__(self):\n        return len(list(self._storage))\n\n    def __iter__(self):\n        keys = self._storage.keys()\n        for key in keys:\n            unwrapped = key.unwrapped\n            if unwrapped is None:\n                del self[key]\n            else:\n                yield unwrapped",
    "docstring": "Like weakref.WeakKeyDictionary, but compares objects with \"is\".",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\object_identity.py",
    "ast_data": "ClassDef name:ObjectIdentityWeakKeyDictionary Assign FunctionDef name:_wrap_key arg:self arg:key arguments arg arg Return return:yes Call FunctionDef name:__len__ arg:self arguments arg Return return:yes Call Call FunctionDef name:__iter__ arg:self arguments arg Assign Call For Assign If Compare"
  },
  {
    "library": "django",
    "name": "_switch_to_test_user",
    "source_code": "def _switch_to_test_user(self, parameters):\n    real_settings = settings.DATABASES[self.connection.alias]\n    real_settings['SAVED_USER'] = self.connection.settings_dict['SAVED_USER'] = self.connection.settings_dict['USER']\n    real_settings['SAVED_PASSWORD'] = self.connection.settings_dict['SAVED_PASSWORD'] = self.connection.settings_dict['PASSWORD']\n    real_test_settings = real_settings['TEST']\n    test_settings = self.connection.settings_dict['TEST']\n    real_test_settings['USER'] = real_settings['USER'] = test_settings['USER'] = self.connection.settings_dict['USER'] = parameters['user']\n    real_settings['PASSWORD'] = self.connection.settings_dict['PASSWORD'] = parameters['password']",
    "docstring": "Switch to the user that's used for creating the test database. Oracle doesn't have the concept of separate databases under the same user, so a separate user is used; see _create_test_db(). The main user is also needed for cleanup when testing is completed, so save its credentials in the SAVED_USER/SAVED_PASSWORD key in the settings dict.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\oracle\\creation.py",
    "ast_data": "FunctionDef name:_switch_to_test_user arg:self arg:parameters arguments arg arg Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "scipy",
    "name": "SeekEmulatingReader",
    "source_code": "class SeekEmulatingReader:\n\n    def __init__(self, reader):\n        self.reader = reader\n        self.pos = 0\n\n    def read(self, size=-1, /):\n        data = self.reader.read(size)\n        self.pos += len(data)\n        return data\n\n    def seek(self, offset, whence=os.SEEK_SET, /):\n        match whence:\n            case os.SEEK_SET if offset >= self.pos:\n                self.read(offset - self.pos)\n            case os.SEEK_CUR if offset >= 0:\n                self.read(offset)\n            case os.SEEK_END if offset == 0:\n                self.read()\n            case _:\n                raise io.UnsupportedOperation('SeekEmulatingReader was asked to emulate a seek operation it does not support.')\n        return self.pos\n\n    def tell(self):\n        return self.pos\n\n    def close(self):\n        self.reader.close()\n\n    def flush(self):\n        raise io.UnsupportedOperation(\"SeekEmulatingReader can't flush.\")",
    "docstring": "Tracks stream position, provides tell(), and emulates only those seeks that can be supported by reading forward. Other seeks raise io.UnsupportedOperation. Note that this class implements only the minimum necessary to keep wavfile.read() happy.",
    "type": "class",
    "file_path": "scipy\\scipy\\io\\wavfile.py",
    "ast_data": "ClassDef name:SeekEmulatingReader FunctionDef name:__init__ arg:self arg:reader arguments arg arg Assign Assign FunctionDef name:read arguments arg arg Assign Call Call Return return:yes FunctionDef name:seek arguments arg arg arg Compare Call Compare Call Compare Call Raise Call Return return:yes FunctionDef name:tell arg:self arguments arg Return return:yes FunctionDef name:close arg:self arguments arg Call FunctionDef name:flush arg:self arguments arg Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "check_clusterer_compute_labels_predict",
    "source_code": "@ignore_warnings(category=FutureWarning)\ndef check_clusterer_compute_labels_predict(name, clusterer_orig):\n    X, y = make_blobs(n_samples=20, random_state=0)\n    clusterer = clone(clusterer_orig)\n    set_random_state(clusterer)\n    if hasattr(clusterer, 'compute_labels'):\n        X_pred1 = clusterer.fit(X).predict(X)\n        clusterer.set_params(compute_labels=False)\n        X_pred2 = clusterer.fit(X).predict(X)\n        assert_array_equal(X_pred1, X_pred2)",
    "docstring": "Check that predict is invariant of compute_labels.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\estimator_checks.py",
    "ast_data": "FunctionDef name:check_clusterer_compute_labels_predict arg:name arg:clusterer_orig arguments arg arg Assign Call Assign Call Call If Call Assign Call Call Call Assign Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "AllocationTreeNode",
    "source_code": "class AllocationTreeNode:\n\n    def allocate(self, block: Allocation, is_last: bool) -> bool:\n        return False\n\n    def get_live_ranges(self) -> LiveRanges:\n        raise NotImplementedError\n\n    def get_size_hint(self) -> int:\n        raise NotImplementedError\n\n    def get_symbolic_size(self) -> sympy.Expr:\n        raise NotImplementedError\n\n    def finalize(self, pool, offset) -> AllocationTreeNode:\n        return self\n\n    def is_empty(self):\n        return False",
    "docstring": "Abstract base class for nodes in allocation pool.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\memory_planning.py",
    "ast_data": "ClassDef name:AllocationTreeNode FunctionDef name:allocate arg:self arg:block arg:is_last arguments arg arg arg Return return:yes FunctionDef name:get_live_ranges arg:self arguments arg Raise FunctionDef name:get_size_hint arg:self arguments arg Raise FunctionDef name:get_symbolic_size arg:self arguments arg Raise FunctionDef name:finalize arg:self arg:pool arg:offset arguments arg arg arg Return return:yes FunctionDef name:is_empty arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "keyrefs",
    "source_code": "def keyrefs(self):\n    return list(self.data)",
    "docstring": "Return a list of weak references to the keys. The references are not guaranteed to be 'live' at the time they are used, so the result of calling the references needs to be checked before being used. This can be used to avoid creating references that will cause the garbage collector to keep the keys around longer than needed.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\weak.py",
    "ast_data": "FunctionDef name:keyrefs arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_get_sharding_prop_cache_info",
    "source_code": "def _get_sharding_prop_cache_info():\n    from torch.distributed.tensor._api import DTensor\n    return DTensor._op_dispatcher.sharding_propagator.propagate_op_sharding.cache_info()",
    "docstring": "Get the cache info for the sharding propagation cache, used for debugging purpose only. This would return a named tuple showing hits, misses, maxsize and cursize of the sharding propagator cache.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\debug\\__init__.py",
    "ast_data": "FunctionDef name:_get_sharding_prop_cache_info arguments Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "insert",
    "source_code": "def insert(self, loc: int, item) -> Self:\n    loc = validate_insert_loc(loc, len(self))\n    item_arr = type(self)._from_sequence([item], dtype=self.dtype)\n    return type(self)._concat_same_type([self[:loc], item_arr, self[loc:]])",
    "docstring": "Insert an item at the given position. Parameters ---------- loc : int Index where the needs to be inserted. item : scalar-like Value to be inserted. Returns ------- ExtensionArray With inserted at . See Also -------- Index.insert: Make new Index inserting new item at location. Notes ----- This method should be both type and dtype-preserving. If the item cannot be held in an array of this type/dtype, either ValueError or TypeError should be raised. The default implementation relies on _from_sequence to raise on invalid items. Examples -------- >>> arr = pd.array([1, 2, 3]) >>> arr.insert(2, -1) [1, 2, -1, 3] Length: 4, dtype: Int64",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\base.py",
    "ast_data": "FunctionDef name:insert arg:self arg:loc arg:item arguments arg arg arg Assign Call Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "get_max_num",
    "source_code": "def get_max_num(self, request, obj=None, **kwargs):\n    return self.max_num",
    "docstring": "Hook for customizing the max number of extra inline forms.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:get_max_num arg:self arg:request arg:obj arguments arg arg arg arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "pred",
    "source_code": "def pred(part) -> bool:\n    if isinstance(part, tuple):\n        return any((isinstance(s, slice) or is_list_like(s) for s in part))\n    else:\n        return isinstance(part, slice) or is_list_like(part)",
    "docstring": "Returns ------- bool True if slice does *not* reduce, False if is a tuple.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\formats\\style_render.py",
    "ast_data": "FunctionDef name:pred arg:part arguments arg If Call Return return:yes Call BoolOp Call Call Return return:yes BoolOp Call Call"
  },
  {
    "library": "authlib",
    "name": "get_jti",
    "source_code": "def get_jti(self, client, grant_type, user, scope) -> str:\n    return generate_token(16)",
    "docstring": "JWT ID. Create an unique identifier for the token. Developers MAY re-implement this method:: def get_jti(self, client, grant_type, user scope): return generate_random_string(16)",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc9068\\token.py",
    "ast_data": "FunctionDef name:get_jti arg:self arg:client arg:grant_type arg:user arg:scope arguments arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "DistributeConfig",
    "source_code": "class DistributeConfig(collections.namedtuple('DistributeConfig', ['train_distribute', 'eval_distribute', 'remote_cluster'])):\n\n    def __new__(cls, train_distribute=None, eval_distribute=None, remote_cluster=None):\n        return super(DistributeConfig, cls).__new__(cls, train_distribute, eval_distribute, remote_cluster)",
    "docstring": "A config tuple for distribution strategies. Attributes: train_distribute: a object for training. eval_distribute: an optional object for evaluation. remote_cluster: a dict, or object specifying the cluster configurations. If this is given, the method will be running as a standalone client which connects to the cluster for training.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_config.py",
    "ast_data": "ClassDef name:DistributeConfig Call FunctionDef name:__new__ arg:cls arg:train_distribute arg:eval_distribute arg:remote_cluster arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "insert",
    "source_code": "def insert(self, loc: int, column: Hashable, value: object, allow_duplicates: bool | lib.NoDefault=lib.no_default) -> None:\n    if allow_duplicates is lib.no_default:\n        allow_duplicates = False\n    if allow_duplicates and (not self.flags.allows_duplicate_labels):\n        raise ValueError(\"Cannot specify 'allow_duplicates=True' when 'self.flags.allows_duplicate_labels' is False.\")\n    if not allow_duplicates and column in self.columns:\n        raise ValueError(f'cannot insert {column}, already exists')\n    if not is_integer(loc):\n        raise TypeError('loc must be int')\n    loc = int(loc)\n    if isinstance(value, DataFrame) and len(value.columns) > 1:\n        raise ValueError(f'Expected a one-dimensional object, got a DataFrame with {len(value.columns)} columns instead.')\n    elif isinstance(value, DataFrame):\n        value = value.iloc[:, 0]\n    value, refs = self._sanitize_column(value)\n    self._mgr.insert(loc, column, value, refs=refs)",
    "docstring": "Insert column into DataFrame at specified location. Raises a ValueError if is already contained in the DataFrame, unless is set to True. Parameters ---------- loc : int Insertion index. Must verify 0 >> df = pd.DataFrame({\"col1\": [1, 2], \"col2\": [3, 4]}) >>> df col1 col2 0 1 3 1 2 4 >>> df.insert(1, \"newcol\", [99, 99]) >>> df col1 newcol col2 0 1 99 3 1 2 99 4 >>> df.insert(0, \"col1\", [100, 100], allow_duplicates=True) >>> df col1 col1 newcol col2 0 100 1 99 3 1 100 2 99 4 Notice that pandas uses index alignment in case of from type : >>> df.insert(0, \"col0\", pd.Series([5, 6], index=[1, 2])) >>> df col0 col1 col1 newcol col2 0 NaN 100 1 99 3 1 5.0 100 2 99 4",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:insert arg:self arg:loc arg:column arg:value arg:allow_duplicates arguments arg arg arg arg arg If Compare Assign If BoolOp Raise Call If BoolOp Compare Raise Call If Call Raise Call Assign Call If BoolOp Call Compare Call Raise Call Call If Call Assign Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_qconfig_mapping",
    "source_code": "def _get_qconfig_mapping(obj: Any, dict_key: str) -> Optional[QConfigMapping]:\n    if isinstance(obj, QConfigMapping) or obj is None:\n        return obj\n    if isinstance(obj, dict):\n        return QConfigMapping.from_dict(obj)\n    raise ValueError(f\"\"\"Expected QConfigMapping in prepare_custom_config_dict[\"{dict_key}\"], got '{type(obj)}'\"\"\")",
    "docstring": "Convert the given object into a QConfigMapping if possible, else throw an exception.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\custom_config.py",
    "ast_data": "FunctionDef name:_get_qconfig_mapping arg:obj arg:dict_key arguments arg arg If BoolOp Call Compare Return return:yes If Call Return return:yes Call Raise Call Call"
  },
  {
    "library": "django",
    "name": "get_sequences",
    "source_code": "def get_sequences(self, cursor, table_name, table_fields=()):\n    raise NotImplementedError('subclasses of BaseDatabaseIntrospection may require a get_sequences() method')",
    "docstring": "Return a list of introspected sequences for table_name. Each sequence is a dict: {'table': , 'column': }. An optional 'name' key can be added if the backend supports named sequences.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\introspection.py",
    "ast_data": "FunctionDef name:get_sequences arg:self arg:cursor arg:table_name arg:table_fields arguments arg arg arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "kind",
    "source_code": "def kind(self) -> SchemaKind:\n    is_out = bool(self.arguments.out)\n    is_scratch = bool([arg for arg in self.arguments.out if arg.name.startswith('_scratch_')])\n    is_inplace = self.name.name.inplace\n    is_mutable = any((a.annotation is not None and a.annotation.is_write for a in self.arguments.post_self_positional))\n    assert not (is_out and is_inplace)\n    if is_inplace:\n        return SchemaKind.inplace\n    elif is_scratch:\n        assert is_out, 'invariant: all scratch operators are expected to be out= operators too'\n        return SchemaKind.scratch\n    elif is_out:\n        assert not is_scratch, 'We should not categorize a scratch op as an out variant. Check if the order of if statements are expected!'\n        return SchemaKind.out\n    elif is_mutable:\n        return SchemaKind.mutable\n    else:\n        return SchemaKind.functional",
    "docstring": "What kind of schema is this? A functional schema is one that returns a newly allocated output; an inplace schema modifies the self argument inplace; an out schema writes the result into an explicitly provided out argument.",
    "type": "method",
    "file_path": "pytorch\\torchgen\\model.py",
    "ast_data": "FunctionDef name:kind arg:self arguments arg Assign Call Assign Call Call Assign Assign Call BoolOp Compare BoolOp If Return return:yes If Return return:yes If Return return:yes If Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_set_ddp_runtime_logging_sample_rate",
    "source_code": "def _set_ddp_runtime_logging_sample_rate(self, sample_rate):\n    if sample_rate < 1:\n        self._log_and_throw(ValueError, 'DDP runtime logging sample rate should be equal or greater than 1')\n    self.reducer._set_ddp_runtime_logging_sample_rate(sample_rate)",
    "docstring": "Set sample_rate of collecting runtime stats. This interface allows users to set sample_rate of collecting runtime stats. The runtime stats will be recorded for the first 10 iterations, after 10 iterations runtime stats will be recorded once every \"sample_rate\" training iterations. In default, runtime stats are recorded for the first 10 iterations, after 10 iterations runtime stats are recorded once every \"kDDPRuntimeLoggingSampleRate=100\" training iterations. This is a prototype interface and subject to change in the future.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\parallel\\distributed.py",
    "ast_data": "FunctionDef name:_set_ddp_runtime_logging_sample_rate arg:self arg:sample_rate arguments arg arg If Compare Call Call"
  },
  {
    "library": "kornia",
    "name": "KORNIA_CHECK_SAME_DEVICE",
    "source_code": "def KORNIA_CHECK_SAME_DEVICE(x: Tensor, y: Tensor, raises: bool=True) -> bool:\n    if x.device != y.device:\n        if raises:\n            raise TypeError(f'Not same device for tensors. Got: {x.device} and {y.device}')\n        return False\n    return True",
    "docstring": "Check whether two tensor in the same device. Args: x: first tensor to evaluate. y: sencod tensor to evaluate. msg: message to show in the exception. raises: bool indicating whether an exception should be raised upon failure. Raises: TypeException: if the two tensors are not in the same device and raises is True. Example: >>> x1 = torch.rand(2, 3, 3) >>> x2 = torch.rand(1, 3, 1) >>> KORNIA_CHECK_SAME_DEVICE(x1, x2) True",
    "type": "function",
    "file_path": "kornia\\kornia\\core\\check.py",
    "ast_data": "FunctionDef name:KORNIA_CHECK_SAME_DEVICE arg:x arg:y arg:raises arguments arg arg arg If Compare If Raise Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_config",
    "source_code": "def get_config(self):\n    return {'reduction': self.reduction, 'name': self.name}",
    "docstring": "Returns the config dictionary for a instance.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py",
    "ast_data": "FunctionDef name:get_config arg:self arguments arg Return return:yes"
  },
  {
    "library": "authlib",
    "name": "authorize_access_token",
    "source_code": "def authorize_access_token(self, request, **kwargs):\n    if request.method == 'GET':\n        error = request.GET.get('error')\n        if error:\n            description = request.GET.get('error_description')\n            raise OAuthError(error=error, description=description)\n        params = {'code': request.GET.get('code'), 'state': request.GET.get('state')}\n    else:\n        params = {'code': request.POST.get('code'), 'state': request.POST.get('state')}\n    state_data = self.framework.get_state_data(request.session, params.get('state'))\n    self.framework.clear_state_data(request.session, params.get('state'))\n    params = self._format_state_params(state_data, params)\n    claims_options = kwargs.pop('claims_options', None)\n    claims_cls = kwargs.pop('claims_cls', None)\n    leeway = kwargs.pop('leeway', 120)\n    token = self.fetch_access_token(**params, **kwargs)\n    if 'id_token' in token and 'nonce' in state_data:\n        userinfo = self.parse_id_token(token, nonce=state_data['nonce'], claims_options=claims_options, claims_cls=claims_cls, leeway=leeway)\n        token['userinfo'] = userinfo\n    return token",
    "docstring": "Fetch access token in one step. :param request: HTTP request instance from Django view. :return: A token dict.",
    "type": "method",
    "file_path": "authlib\\authlib\\integrations\\django_client\\apps.py",
    "ast_data": "FunctionDef name:authorize_access_token arg:self arg:request arguments arg arg arg If Compare Assign Call If Assign Call Raise Call Assign Call Call Assign Call Call Assign Call Call Call Call Assign Call Assign Call Assign Call Assign Call Assign Call If BoolOp Compare Compare Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_compatible_spec",
    "source_code": "def _get_compatible_spec(value_or_spec1, value_or_spec2):\n    spec1 = _get_spec_for(value_or_spec1)\n    spec2 = _get_spec_for(value_or_spec2)\n    common = spec1._without_tensor_names().most_specific_common_supertype([spec2._without_tensor_names()])\n    if common is None:\n        raise TypeError(f'No common supertype of {spec1} and {spec2}.')\n    return common",
    "docstring": "Returns the most specific compatible spec. Args: value_or_spec1: A TypeSpecs or a value that has a defined TypeSpec. value_or_spec2: A TypeSpecs or a value that has a defined TypeSpec. Returns: The most specific compatible TypeSpecs of the input. Raises: ValueError: If value_or_spec1 is not compatible with value_or_spec2.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\cond_v2.py",
    "ast_data": "FunctionDef name:_get_compatible_spec arg:value_or_spec1 arg:value_or_spec2 arguments arg arg Assign Call Assign Call Assign Call Call Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "SphinxStyle",
    "source_code": "class SphinxStyle(Style):\n    background_color = '#eeffcc'\n    default_style = ''\n    styles = {**FriendlyStyle.styles, Generic.Output: '#333', Comment: 'italic #408090', Number: '#208050'}",
    "docstring": "Like friendly, but a bit darker to enhance contrast on the green background.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\pygments_styles.py",
    "ast_data": "ClassDef name:SphinxStyle Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "topology",
    "source_code": "@property\ndef topology(self) -> Topology:\n    return self._topology",
    "docstring": "A that describes the TPU topology.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\device_assignment.py",
    "ast_data": "FunctionDef name:topology arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_num_buckets",
    "source_code": "@property\ndef _num_buckets(self):\n    return self.hash_bucket_size",
    "docstring": "Returns number of buckets in this sparse feature.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py",
    "ast_data": "FunctionDef name:_num_buckets arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_usetex",
    "source_code": "def set_usetex(self, val):\n    self._usetex = mpl._val_or_rc(val, 'text.usetex')",
    "docstring": "Set whether to use TeX's math mode for rendering numbers in the formatter.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:set_usetex arg:self arg:val arguments arg arg Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_CreateShapeFromNumpy",
    "source_code": "def _CreateShapeFromNumpy(ndarray):\n    element_type = types_.MAP_DTYPE_TO_RECORD[str(ndarray.dtype)].primitive_type\n    dimensions = ndarray.shape\n    if _np.isfortran(ndarray):\n        layout = range(ndarray.ndim)\n    else:\n        layout = list(reversed(range(ndarray.ndim)))\n    return Shape(element_type, dimensions, layout)",
    "docstring": "Create a Shape from a given Numpy array. Args: ndarray: Numpy array. Returns: A Shape object.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\xla\\python_api\\xla_shape.py",
    "ast_data": "FunctionDef name:_CreateShapeFromNumpy arg:ndarray arguments arg Assign Call Assign If Call Assign Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "isproperty",
    "source_code": "def isproperty(obj: Any) -> TypeIs[property | cached_property[Any]]:\n    return isinstance(obj, property | cached_property)",
    "docstring": "Check if the object is property (possibly cached).",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\inspect.py",
    "ast_data": "FunctionDef name:isproperty arg:obj arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_reset",
    "source_code": "def _reset(self):\n    self.best = self.mode_worse\n    self.cooldown_counter = 0\n    self.num_bad_epochs = 0",
    "docstring": "Reset num_bad_epochs counter and cooldown counter.",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\lr_scheduler.py",
    "ast_data": "FunctionDef name:_reset arg:self arguments arg Assign Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "get_rasterized",
    "source_code": "def get_rasterized(self):\n    return self._rasterized",
    "docstring": "Return whether the artist is to be rasterized.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:get_rasterized arg:self arguments arg Return return:yes"
  },
  {
    "library": "authlib",
    "name": "RevocationEndpoint",
    "source_code": "class RevocationEndpoint(TokenEndpoint):\n    ENDPOINT_NAME = 'revocation'\n\n    def authenticate_token(self, request, client):\n        self.check_params(request, client)\n        token = self.query_token(request.form['token'], request.form.get('token_type_hint'))\n        if token and (not token.check_client(client)):\n            raise InvalidGrantError()\n        return token\n\n    def check_params(self, request, client):\n        if 'token' not in request.form:\n            raise InvalidRequestError()\n        hint = request.form.get('token_type_hint')\n        if hint and hint not in self.SUPPORTED_TOKEN_TYPES:\n            raise UnsupportedTokenTypeError()\n\n    def create_endpoint_response(self, request):\n        client = self.authenticate_endpoint_client(request)\n        token = self.authenticate_token(request, client)\n        if token:\n            self.revoke_token(token, request)\n            self.server.send_signal('after_revoke_token', token=token, client=client)\n        return (200, {}, default_json_headers)\n\n    def query_token(self, token_string, token_type_hint):\n        raise NotImplementedError()\n\n    def revoke_token(self, token, request):\n        raise NotImplementedError()",
    "docstring": "Implementation of revocation endpoint which is described in _. .. _RFC7009:",
    "type": "class",
    "file_path": "authlib\\authlib\\oauth2\\rfc7009\\revocation.py",
    "ast_data": "ClassDef name:RevocationEndpoint Assign FunctionDef name:authenticate_token arg:self arg:request arg:client arguments arg arg arg Call Assign Call Call If BoolOp Call Raise Call Return return:yes FunctionDef name:check_params arg:self arg:request arg:client arguments arg arg arg If Compare Raise Call Assign Call If BoolOp Compare Raise Call FunctionDef name:create_endpoint_response arg:self arg:request arguments arg arg Assign Call Assign Call If Call Call Return return:yes FunctionDef name:query_token arg:self arg:token_string arg:token_type_hint arguments arg arg arg Raise Call FunctionDef name:revoke_token arg:self arg:token arg:request arguments arg arg arg Raise Call"
  },
  {
    "library": "kornia",
    "name": "efficientvit_backbone_b0",
    "source_code": "def efficientvit_backbone_b0(**kwargs: dict[str, Any]) -> EfficientViTBackbone:\n    backbone = EfficientViTBackbone(width_list=[8, 16, 32, 64, 128], depth_list=[1, 2, 2, 2, 2], dim=16, **build_kwargs_from_config(kwargs, EfficientViTBackbone))\n    return backbone",
    "docstring": "Create EfficientViT B0.",
    "type": "function",
    "file_path": "kornia\\kornia\\contrib\\models\\efficient_vit\\backbone.py",
    "ast_data": "FunctionDef name:efficientvit_backbone_b0 arguments arg Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_is_conv_transpose_node",
    "source_code": "def _is_conv_transpose_node(n: Node):\n    return n.op == 'call_function' and n.target in [torch.ops.aten.conv_transpose1d, torch.ops.aten.conv_transpose1d.default, torch.ops.aten.conv_transpose2d, torch.ops.aten.conv_transpose2d.input]",
    "docstring": "Return whether the node refers to an aten conv_transpose op.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\utils.py",
    "ast_data": "FunctionDef name:_is_conv_transpose_node arg:n arguments arg Return return:yes BoolOp Compare Compare"
  },
  {
    "library": "sphinx",
    "name": "isfunction",
    "source_code": "def isfunction(obj: Any) -> TypeIs[types.FunctionType]:\n    return inspect.isfunction(unpartial(obj))",
    "docstring": "Check if the object is a user-defined function. Partial objects are unwrapped before checking them. .. seealso:: :external+python:func:",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\inspect.py",
    "ast_data": "FunctionDef name:isfunction arg:obj arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "indirect_indexing",
    "source_code": "def indirect_indexing(self, x: T, size: sympy.Expr, check: bool=True, wrap_neg=True) -> sympy.Expr:\n    raise NotImplementedError",
    "docstring": "Convert an integral x into a sympy.Expr that can be subsequently used in indexing computation. 'size' represents an upper bound on what valid indexes can be; when 'check' is True, we check that the x is in bounds. NB: This is typically mandatory to implement for any analysis, because you MUST return a valid sympy.Expr of some sort (even if it's a meaningless symbol).",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ops_handler.py",
    "ast_data": "FunctionDef name:indirect_indexing arg:self arg:x arg:size arg:check arg:wrap_neg arguments arg arg arg arg arg Raise"
  },
  {
    "library": "pytorch",
    "name": "namedtuple_fields",
    "source_code": "@functools.lru_cache(1)\ndef namedtuple_fields(cls) -> tuple[str, ...]:\n    if cls is slice:\n        return ('start', 'stop', 'step')\n    assert issubclass(cls, tuple)\n    if hasattr(cls, '_fields'):\n        return cls._fields\n\n    @dataclasses.dataclass\n    class Marker:\n        index: int\n    assert cls.__module__ == 'torch.return_types'\n    obj = cls(map(Marker, range(cls.n_fields)))\n    fields: dict[str, int] = {}\n    for name in dir(obj):\n        if name[0] != '_' and isinstance(getattr(obj, name), Marker):\n            fields[name] = getattr(obj, name).index\n    assert len(fields) == cls.n_fields\n    return tuple(sorted(fields, key=fields.get))",
    "docstring": "Get the fields of a namedtuple or a torch.return_types.* quasi-namedtuple",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\utils.py",
    "ast_data": "FunctionDef name:namedtuple_fields arg:cls arguments arg If Compare Return return:yes Call If Call Return return:yes ClassDef name:Marker Compare Assign Call Call Call For Call If BoolOp Compare Call Call Assign Call Compare Call Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "get_shape",
    "source_code": "def get_shape(self):\n    return self._shape",
    "docstring": "Get shape of a sparse matrix.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_dok.py",
    "ast_data": "FunctionDef name:get_shape arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "idxmin",
    "source_code": "def idxmin(self, axis: Axis=0, skipna: bool=True, numeric_only: bool=False) -> Series:\n    axis = self._get_axis_number(axis)\n    if self.empty and len(self.axes[axis]):\n        axis_dtype = self.axes[axis].dtype\n        return self._constructor_sliced(dtype=axis_dtype)\n    if numeric_only:\n        data = self._get_numeric_data()\n    else:\n        data = self\n    res = data._reduce(nanops.nanargmin, 'argmin', axis=axis, skipna=skipna, numeric_only=False)\n    indices = res._values\n    if (indices == -1).any():\n        warnings.warn(f'The behavior of {type(self).__name__}.idxmin with all-NA values, or any-NA and skipna=False, is deprecated. In a future version this will raise ValueError', FutureWarning, stacklevel=find_stack_level())\n    index = data._get_axis(axis)\n    result = algorithms.take(index._values, indices, allow_fill=True, fill_value=index._na_value)\n    final_result = data._constructor_sliced(result, index=data._get_agg_axis(axis))\n    return final_result.__finalize__(self, method='idxmin')",
    "docstring": "Return index of first occurrence of minimum over requested axis. NA/null values are excluded. Parameters ---------- axis : {{0 or 'index', 1 or 'columns'}}, default 0 The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise. skipna : bool, default True Exclude NA/null values. If the entire DataFrame is NA, or if `floatintboolean`. >>> df.idxmin(axis=\"columns\") Pork consumption Wheat Products co2_emissions Beef consumption dtype: object",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:idxmin arg:self arg:axis arg:skipna arg:numeric_only arguments arg arg arg arg Assign Call If BoolOp Call Assign Return return:yes Call If Assign Call Assign Assign Call Assign If Call Compare Call Call Call Assign Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_assign_sub_flops",
    "source_code": "@ops.RegisterStatistics('AssignSub', 'flops')\ndef _assign_sub_flops(graph, node):\n    return _unary_op_flops(graph, node)",
    "docstring": "Compute flops for AssignSub operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py",
    "ast_data": "FunctionDef name:_assign_sub_flops arg:graph arg:node arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "bf16_compress_wrapper",
    "source_code": "def bf16_compress_wrapper(hook: Callable[[Any, dist.GradBucket], torch.futures.Future[torch.Tensor]]) -> Callable[[Any, dist.GradBucket], torch.futures.Future[torch.Tensor]]:\n\n    def bf16_compress_wrapper_hook(hook_state, bucket: dist.GradBucket) -> torch.futures.Future[torch.Tensor]:\n        bucket.set_buffer(bucket.buffer().to(torch.bfloat16))\n        fut = hook(hook_state, bucket)\n\n        def decompress(fut):\n            decompressed_tensor = bucket.buffer()\n            decompressed_tensor.copy_(fut.value())\n            return decompressed_tensor\n        return fut.then(decompress)\n    return bf16_compress_wrapper_hook",
    "docstring": "Warning: This API is experimental, and it requires NCCL version later than 2.9.6. This wrapper casts the input gradient tensor of a given DDP communication hook to half-precision _ (``. Example:: >>> # xdoctest: +SKIP >>> state = PowerSGDState(process_group=process_group, matrix_approximation_rank=1, start_powerSGD_iter=10) >>> ddp_model.register_comm_hook(state, bf16_compress_wrapper(powerSGD_hook))",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\ddp_comm_hooks\\default_hooks.py",
    "ast_data": "FunctionDef name:bf16_compress_wrapper arg:hook arguments arg FunctionDef name:bf16_compress_wrapper_hook arg:hook_state arg:bucket arguments arg arg Call Call Call Assign Call FunctionDef name:decompress arg:fut arguments arg Assign Call Call Call Return return:yes Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "string_join",
    "source_code": "@tf_export('strings.join', v1=['strings.join', 'string_join'])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints('string_join')\ndef string_join(inputs, separator='', name=None):\n    return gen_string_ops.string_join(inputs, separator=separator, name=name)",
    "docstring": "Perform element-wise concatenation of a list of string tensors. Given a list of string tensors of same shape, performs element-wise concatenation of the strings of the same index in all tensors. >>> tf.strings.join(['abc','def']).numpy() b'abcdef' >>> tf.strings.join([['abc','123'], ... ['def','456'], ... ['ghi','789']]).numpy() array([b'abcdefghi', b'123456789'], dtype=object) >>> tf.strings.join([['abc','123'], ... ['def','456']], ... separator=\" \").numpy() array([b'abc def', b'123 456'], dtype=object) The reduction version of this elementwise operation is Args: inputs: A list of objects of same size and dtype. separator: A string added between each string being joined. name: A name for the operation (optional). Returns: A tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\string_ops.py",
    "ast_data": "FunctionDef name:string_join arg:inputs arg:separator arg:name arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "false_positives",
    "source_code": "@tf_export(v1=['metrics.false_positives'])\ndef false_positives(labels, predictions, weights=None, metrics_collections=None, updates_collections=None, name=None):\n    if context.executing_eagerly():\n        raise RuntimeError('tf.metrics.false_positives is not supported when eager execution is enabled.')\n    with variable_scope.variable_scope(name, 'false_positives', (predictions, labels, weights)):\n        predictions, labels, weights = _remove_squeezable_dimensions(predictions=math_ops.cast(predictions, dtype=dtypes.bool), labels=math_ops.cast(labels, dtype=dtypes.bool), weights=weights)\n        is_false_positive = math_ops.logical_and(math_ops.equal(labels, False), math_ops.equal(predictions, True))\n        return _count_condition(is_false_positive, weights, metrics_collections, updates_collections)",
    "docstring": "Sum the weights of false positives. If is , weights default to 1. Use weights of 0 to mask values. Args: labels: The ground truth values, a whose dimensions must match . Will be cast to . predictions: The predicted values, a of arbitrary dimensions. Will be cast to . weights: Optional whose rank is either 0, or the same rank as , and must be broadcastable to (i.e., all dimensions must be either , or the same as the corresponding dimension). metrics_collections: An optional list of collections that the metric value variable should be added to. updates_collections: An optional list of collections that the metric update ops should be added to. name: An optional variable_scope name. Returns: value_tensor: A representing the current value of the metric. update_op: An operation that accumulates the error from a batch of data. Raises: ValueError: If and have mismatched shapes, or if is not and its shape doesn't match , or if either or are not a list or tuple. RuntimeError: If eager execution is enabled.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\metrics_impl.py",
    "ast_data": "FunctionDef name:false_positives arg:labels arg:predictions arg:weights arg:metrics_collections arg:updates_collections arg:name arguments arg arg arg arg arg arg If Call Raise Call With Call Assign Call Call Call Assign Call Call Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X):\n    X_ordinal, X_known_mask = self._transform(X, handle_unknown='ignore', ensure_all_finite='allow-nan')\n    if self.target_type_ == 'multiclass':\n        X_out = np.empty((X_ordinal.shape[0], X_ordinal.shape[1] * len(self.classes_)), dtype=np.float64)\n    else:\n        X_out = np.empty_like(X_ordinal, dtype=np.float64)\n    self._transform_X_ordinal(X_out, X_ordinal, ~X_known_mask, slice(None), self.encodings_, self.target_mean_)\n    return X_out",
    "docstring": "Transform X with the target encoding. .. note:: does not equal because a :term: scheme is used in for encoding. See the :ref:. for details. Parameters ---------- X : array-like of shape (n_samples, n_features) The data to determine the categories of each feature. Returns ------- X_trans : ndarray of shape (n_samples, n_features) or (n_samples, (n_features * n_classes)) Transformed input.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_target_encoder.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Assign Call If Compare Assign Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "cross_replica_sum",
    "source_code": "@tf_export(v1=['tpu.cross_replica_sum'])\ndef cross_replica_sum(x, group_assignment=None, name=None):\n    if group_assignment is None:\n        group_assignment = _create_default_group_assignment()\n    return gen_tpu_ops.cross_replica_sum(x, group_assignment, name=name)",
    "docstring": "Sum the input tensor across replicas according to group_assignment. Args: x: The local tensor to the sum. group_assignment: Optional 2d int32 lists with shape [num_groups, num_replicas_per_group]. represents the replica ids in the ith subgroup. name: Optional op name. Returns: A which is summed across replicas.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\ops\\tpu_ops.py",
    "ast_data": "FunctionDef name:cross_replica_sum arg:x arg:group_assignment arg:name arguments arg arg arg If Compare Assign Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_clean_event",
    "source_code": "def _clean_event(self, event):\n    if event.xdata is None:\n        event = self._prev_event\n    else:\n        event = copy.copy(event)\n    event.xdata, event.ydata = self._get_data(event)\n    self._prev_event = event\n    return event",
    "docstring": "Preprocess an event: - Replace *event* by the previous event if *event* has no `` from this widget's Axes, and clip them to the axes limits. - Update the previous event.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:_clean_event arg:self arg:event arguments arg arg If Compare Assign Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "construct_lcb_delaunay",
    "source_code": "def construct_lcb_delaunay(self, v_min, ind=None):\n    cbounds = [[x_b_i[0], x_b_i[1]] for x_b_i in self.bounds]\n    return cbounds",
    "docstring": "Construct locally (approximately) convex bounds Parameters ---------- v_min : Vertex object The minimizer vertex Returns ------- cbounds : list of lists List of size dimension with length-2 list of bounds for each dimension.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_shgo.py",
    "ast_data": "FunctionDef name:construct_lcb_delaunay arg:self arg:v_min arg:ind arguments arg arg arg Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "extract_settings_user_opt_in_from_text",
    "source_code": "def extract_settings_user_opt_in_from_text(rollout_state: str) -> tuple[str, str]:\n    rollout_state_parts = rollout_state.split('---')\n    if len(rollout_state_parts) >= 2:\n        return (rollout_state_parts[0], rollout_state_parts[1])\n    else:\n        return ('', rollout_state)",
    "docstring": "Extracts the text with settings, if any, and the opted in users from the rollout state. If the issue body contains \"---\" then the text above that is the settings and the text below is the list of opted in users. If it doesn't contain \"---\" then the settings are empty and the rest is the users.",
    "type": "function",
    "file_path": "pytorch\\.github\\scripts\\runner_determinator.py",
    "ast_data": "FunctionDef name:extract_settings_user_opt_in_from_text arg:rollout_state arguments arg Assign Call If Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_deprecated_positional_arguments",
    "source_code": "def _get_deprecated_positional_arguments(names_to_ok_vals, arg_spec):\n    arg_space = arg_spec.args + arg_spec.kwonlyargs\n    arg_name_to_pos = {name: pos for pos, name in enumerate(arg_space)}\n    deprecated_positional_args = {}\n    for arg_name, spec in iter(names_to_ok_vals.items()):\n        if arg_name in arg_name_to_pos:\n            pos = arg_name_to_pos[arg_name]\n            deprecated_positional_args[arg_name] = DeprecatedArgSpec(pos, spec.has_ok_value, spec.ok_value)\n    return deprecated_positional_args",
    "docstring": "Builds a dictionary from deprecated arguments to their spec. Returned dict is keyed by argument name. Each value is a DeprecatedArgSpec with the following fields: position: The zero-based argument position of the argument within the signature. None if the argument isn't found in the signature. ok_values: Values of this argument for which warning will be suppressed. Args: names_to_ok_vals: dict from string arg_name to a list of values, possibly empty, which should not elicit a warning. arg_spec: Output from tf_inspect.getfullargspec on the called function. Returns: Dictionary from arg_name to DeprecatedArgSpec.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\deprecation.py",
    "ast_data": "FunctionDef name:_get_deprecated_positional_arguments arg:names_to_ok_vals arg:arg_spec arguments arg arg Assign Assign Call Assign For Call Call If Compare Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "CreateShapeFromDtypeAndTuple",
    "source_code": "def CreateShapeFromDtypeAndTuple(dtype, shape_tuple):\n    element_type = types_.MAP_DTYPE_TO_RECORD[str(dtype)].primitive_type\n    return Shape(element_type, shape_tuple)",
    "docstring": "Create a shape from a Numpy dtype and a sequence of nonnegative integers. Args: dtype: a numpy dtype, e.g. np.dtype('int32'). shape_tuple: a sequence of nonnegative integers. Returns: A Shape object.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\xla\\python_api\\xla_shape.py",
    "ast_data": "FunctionDef name:CreateShapeFromDtypeAndTuple arg:dtype arg:shape_tuple arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "sampling_custom",
    "source_code": "def sampling_custom(self, n, dim):\n    if self.n_sampled == 0:\n        self.C = self.sampling_function(n, dim)\n    else:\n        self.C = self.sampling_function(n, dim)\n    for i in range(len(self.bounds)):\n        self.C[:, i] = self.C[:, i] * (self.bounds[i][1] - self.bounds[i][0]) + self.bounds[i][0]\n    return self.C",
    "docstring": "Generates uniform sampling points in a hypercube and scales the points to the bound limits.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_shgo.py",
    "ast_data": "FunctionDef name:sampling_custom arg:self arg:n arg:dim arguments arg arg arg If Compare Assign Call Assign Call For Call Call Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "random_state",
    "source_code": "@property\ndef random_state(self):\n    return self._random_state",
    "docstring": "Get or set the Generator object for generating random variates. If is None (or ), the singleton is used. If is an int, a new `seedseed` instance then that instance is used.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:random_state arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "is_ignored_path",
    "source_code": "def is_ignored_path(path, ignore_patterns):\n    path = Path(path)\n\n    def ignore(pattern):\n        return fnmatch.fnmatchcase(path.name, pattern) or fnmatch.fnmatchcase(str(path), pattern)\n    return any((ignore(pattern) for pattern in normalize_path_patterns(ignore_patterns)))",
    "docstring": "Check if the given path should be ignored or not based on matching one of the glob style .",
    "type": "function",
    "file_path": "django\\django\\core\\management\\utils.py",
    "ast_data": "FunctionDef name:is_ignored_path arg:path arg:ignore_patterns arguments arg arg Assign Call FunctionDef name:ignore arg:pattern arguments arg Return return:yes BoolOp Call Call Call Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "convert_yunits",
    "source_code": "def convert_yunits(self, y):\n    ax = getattr(self, 'axes', None)\n    if ax is None or ax.yaxis is None:\n        return y\n    return ax.yaxis.convert_units(y)",
    "docstring": "Convert *y* using the unit type of the yaxis. If the artist is not contained in an Axes or if the yaxis does not have units, *y* itself is returned.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:convert_yunits arg:self arg:y arguments arg arg Assign Call If BoolOp Compare Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_lazy_init",
    "source_code": "@no_type_check\ndef _lazy_init(state: _FSDPState, root_module: nn.Module) -> _FSDPState:\n    if state._is_root is not None:\n        return\n    if not state._device_handle.is_available():\n        raise RuntimeError('FSDP does not support CPU only execution')\n    state._is_root = True\n    _assert_in_training_states(state, [TrainingState.IDLE])\n    _check_flat_params_on_expected_device(state, root_module)\n    state._all_fsdp_states = traversal_utils._get_fsdp_states(root_module)\n    _init_streams(state)\n    buffers, buffer_dtypes = _get_buffers_and_dtypes_for_computation(state, root_module)\n    _cast_buffers_to_dtype_and_device(buffers, buffer_dtypes, state.compute_device)\n    state._exec_order_data.init(state, root_module, state.process_group)\n    _share_state_and_init_handle_attrs(state, root_module)\n    return state",
    "docstring": "Performs initialization lazily, typically right before the first forward pass. The laziness is needed to ensure that the parameter device/dtype and the FSDP hierarchy have finalized. This method's actual logic only runs on the root FSDP instance, which performs initialization for all non-root FSDP instances to avoid partial initialization. For the non-composable code path, `` should be the same, namely the FSDP instance itself.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_runtime_utils.py",
    "ast_data": "FunctionDef name:_lazy_init arg:state arg:root_module arguments arg arg If Compare Return return:no If Call Raise Call Assign Call Call Assign Call Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "state_forwards",
    "source_code": "def state_forwards(self, app_label, state):\n    raise NotImplementedError('subclasses of Operation must provide a state_forwards() method')",
    "docstring": "Take the state from the previous migration, and mutate it so that it matches what this migration would perform.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\operations\\base.py",
    "ast_data": "FunctionDef name:state_forwards arg:self arg:app_label arg:state arguments arg arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "autocast",
    "source_code": "class autocast(torch.amp.autocast_mode.autocast):\n\n    @deprecated(\"`torch.cpu.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cpu', args...)` instead.\", category=FutureWarning)\n    def __init__(self, enabled: bool=True, dtype: torch.dtype=torch.bfloat16, cache_enabled: bool=True):\n        if torch._jit_internal.is_scripting():\n            self._enabled = enabled\n            self.device = 'cpu'\n            self.fast_dtype = dtype\n            return\n        super().__init__('cpu', enabled=enabled, dtype=dtype, cache_enabled=cache_enabled)\n\n    def __enter__(self):\n        if torch._jit_internal.is_scripting():\n            return self\n        return super().__enter__()\n\n    def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any):\n        if torch._jit_internal.is_scripting():\n            return\n        return super().__exit__(exc_type, exc_val, exc_tb)\n\n    def __call__(self, func):\n        if torch._jit_internal.is_scripting():\n            return func\n        return super().__call__(func)",
    "docstring": "See :class:. `` instead.",
    "type": "class",
    "file_path": "pytorch\\torch\\cpu\\amp\\autocast_mode.py",
    "ast_data": "ClassDef name:autocast FunctionDef name:__init__ arg:self arg:enabled arg:dtype arg:cache_enabled arguments arg arg arg arg If Call Assign Assign Assign Return return:no Call Call Call FunctionDef name:__enter__ arg:self arguments arg If Call Return return:yes Return return:yes Call Call FunctionDef name:__exit__ arg:self arg:exc_type arg:exc_val arg:exc_tb arguments arg arg arg arg If Call Return return:no Return return:yes Call Call FunctionDef name:__call__ arg:self arg:func arguments arg arg If Call Return return:yes Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "HourglassBackbone",
    "source_code": "class HourglassBackbone(Module):\n\n    def __init__(self, input_channel: int=1, depth: int=4, num_stacks: int=2, num_blocks: int=1, num_classes: int=5) -> None:\n        super().__init__()\n        self.head = MultitaskHead\n        self.net = hg(HourglassConfig(depth, num_stacks, num_blocks, num_classes, input_channel, head=self.head))\n\n    def forward(self, input_images: Tensor) -> Tensor:\n        return self.net(input_images)",
    "docstring": "Hourglass network, taken from Args: input_channel: number of input channels. depth: number of residual blocks per hourglass module. num_stacks: number of hourglass modules stacked together. num_blocks: number of layers in each residual block. num_classes: number of heads for the output of a hourglass module.",
    "type": "class",
    "file_path": "kornia\\kornia\\feature\\sold2\\backbones.py",
    "ast_data": "ClassDef name:HourglassBackbone FunctionDef name:__init__ arg:self arg:input_channel arg:depth arg:num_stacks arg:num_blocks arg:num_classes arguments arg arg arg arg arg arg Call Call Assign Assign Call Call FunctionDef name:forward arg:self arg:input_images arguments arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_sort_features",
    "source_code": "def _sort_features(self, X, vocabulary):\n    sorted_features = sorted(vocabulary.items())\n    map_index = np.empty(len(sorted_features), dtype=X.indices.dtype)\n    for new_val, (term, old_val) in enumerate(sorted_features):\n        vocabulary[term] = new_val\n        map_index[old_val] = new_val\n    X.indices = map_index.take(X.indices, mode='clip')\n    return X",
    "docstring": "Sort features by name Returns a reordered matrix and modifies the vocabulary in place",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_extraction\\text.py",
    "ast_data": "FunctionDef name:_sort_features arg:self arg:X arg:vocabulary arguments arg arg arg Assign Call Call Assign Call Call For Call Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__setstate__",
    "source_code": "def __setstate__(self, state):\n    self.process_group = distributed_c10d._get_default_group()\n    logger.warning('NOTE: Process group will be set to a default group (i.e. the world size).                If a different group is desired, please set `self.process_group` after PowerSGD state is loaded.')\n    for slot, value in state.items():\n        setattr(self, slot, value)",
    "docstring": "Take a provided `` is set to default.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\ddp_comm_hooks\\powerSGD_hook.py",
    "ast_data": "FunctionDef name:__setstate__ arg:self arg:state arguments arg arg Assign Call Call For Call Call"
  },
  {
    "library": "scipy",
    "name": "find_minimum",
    "source_code": "def find_minimum(f, init, /, *, args=(), tolerances=None, maxiter=100, callback=None):\n\n    def reformat_result(res_in):\n        res_out = _RichResult()\n        res_out.status = res_in.status\n        res_out.success = res_in.success\n        res_out.x = res_in.x\n        res_out.f_x = res_in.fun\n        res_out.nfev = res_in.nfev\n        res_out.nit = res_in.nit\n        res_out.bracket = (res_in.xl, res_in.xm, res_in.xr)\n        res_out.f_bracket = (res_in.fl, res_in.fm, res_in.fr)\n        res_out._order_keys = ['success', 'status', 'x', 'f_x', 'nfev', 'nit', 'bracket', 'f_bracket']\n        return res_out\n    xl, xm, xr = init\n    default_tolerances = dict(xatol=None, xrtol=None, fatol=None, frtol=None)\n    tolerances = {} if tolerances is None else tolerances\n    default_tolerances.update(tolerances)\n    tolerances = default_tolerances\n    if callable(callback):\n\n        def _callback(res):\n            return callback(reformat_result(res))\n    else:\n        _callback = callback\n    res = _chandrupatla_minimize(f, xl, xm, xr, args=args, **tolerances, maxiter=maxiter, callback=_callback)\n    return reformat_result(res)",
    "docstring": "Find the minimum of an unimodal, real-valued function of a real variable. For each element of the output of , seeks the scalar minimizer that minimizes the element. This function currently uses Chandrupatla's bracketing minimization algorithm [1]_ and therefore requires argument to provide a three-point minimization bracket: `ffind_minimumbracket_miniumbracket_minimumfind_minimum` at once: >>> c = np.asarray([1, 1.5, 2]) >>> res_bracket = elementwise.bracket_minimum(f, 0, args=(c,)) >>> res_bracket.bracket (array([0. , 0.5, 0.5]), array([0.5, 1.5, 1.5]), array([1.5, 2.5, 2.5])) >>> res_minimum = elementwise.find_minimum(f, res_bracket.bracket, args=(c,)) >>> res_minimum.x array([1.00000001, 1.5 , 2. ]) >>> res_minimum.f_x array([2., 2., 2.])",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_elementwise.py",
    "ast_data": "FunctionDef name:find_minimum arguments arg arg arg arg arg arg FunctionDef name:reformat_result arg:res_in arguments arg Assign Call Assign Assign Assign Assign Assign Assign Assign Assign Assign Return return:yes Assign Assign Call Assign Compare Call Assign If Call FunctionDef name:_callback arg:res arguments arg Return return:yes Call Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_dots_per_unit",
    "source_code": "def _dots_per_unit(self, units):\n    bb = self.axes.bbox\n    vl = self.axes.viewLim\n    return _api.check_getitem({'x': bb.width / vl.width, 'y': bb.height / vl.height, 'xy': np.hypot(*bb.size) / np.hypot(*vl.size), 'width': bb.width, 'height': bb.height, 'dots': 1.0, 'inches': self.axes.get_figure(root=True).dpi}, units=units)",
    "docstring": "Return a scale factor for converting from units to pixels.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\quiver.py",
    "ast_data": "FunctionDef name:_dots_per_unit arg:self arg:units arguments arg arg Assign Assign Return return:yes Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "serialize",
    "source_code": "def serialize(self, exported_program: ep.ExportedProgram) -> _SerializedProgram:\n    exported_program.validate()\n    gm_serializer = GraphModuleSerializer(exported_program.graph_signature, exported_program.module_call_graph)\n    serialized_graph_module = gm_serializer.serialize(exported_program.graph_module)\n    serialized_range_constraints = serialize_range_constraints(exported_program.range_constraints)\n    constants: dict[str, Any] = gm_serializer.custom_objs.copy()\n    for n, t in exported_program.constants.items():\n        assert n not in constants\n        constants[n] = t\n    serialized_ep = ExportedProgram(graph_module=serialized_graph_module, opset_version=self.opset_version, range_constraints=serialized_range_constraints, schema_version=SchemaVersion(major=SCHEMA_VERSION[0], minor=SCHEMA_VERSION[1]), verifiers=[v.dialect for v in exported_program.verifiers], torch_version=torch.__version__)\n    canonicalize(serialized_ep, set(constants.keys()))\n    new_state_dict = remove_proxy_from_state_dict(exported_program.state_dict, in_place=False)\n    return _SerializedProgram(serialized_ep, serialize_torch_artifact(new_state_dict, self.pickle_protocol), serialize_torch_artifact(constants, self.pickle_protocol), serialize_torch_artifact(exported_program.example_inputs, self.pickle_protocol))",
    "docstring": "Args: exported_program: Exported Program to serialize",
    "type": "method",
    "file_path": "pytorch\\torch\\_export\\serde\\serialize.py",
    "ast_data": "FunctionDef name:serialize arg:self arg:exported_program arguments arg arg Call Assign Call Assign Call Assign Call Call For Call Compare Assign Assign Call Call Call Call Call Assign Call Return return:yes Call Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X):\n    check_is_fitted(self)\n    X = validate_data(self, X, reset=False)\n    Ih = np.eye(len(self.components_))\n    X_transformed = X - self.mean_\n    Wpsi = self.components_ / self.noise_variance_\n    cov_z = linalg.inv(Ih + np.dot(Wpsi, self.components_.T))\n    tmp = np.dot(X_transformed, Wpsi.T)\n    X_transformed = np.dot(tmp, cov_z)\n    return X_transformed",
    "docstring": "Apply dimensionality reduction to X using the model. Compute the expected mean of the latent variables. See Barber, 21.2.33 (or Bishop, 12.66). Parameters ---------- X : array-like of shape (n_samples, n_features) Training data. Returns ------- X_new : ndarray of shape (n_samples, n_components) The latent variables of X.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_factor_analysis.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Call Assign Call Assign Call Call Assign Assign Assign Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_decade_greater",
    "source_code": "def _decade_greater(x, base):\n    if x < 0:\n        return -_decade_less(-x, base)\n    greater = _decade_greater_equal(x, base)\n    if greater == x:\n        greater *= base\n    return greater",
    "docstring": "Return the smallest integer power of *base* that's greater than *x*. If *x* is negative, the exponent will be *smaller*.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:_decade_greater arg:x arg:base arguments arg arg If Compare Return return:yes Call Assign Call If Compare Return return:yes"
  },
  {
    "library": "pandas",
    "name": "construct_from_string",
    "source_code": "@classmethod\ndef construct_from_string(cls, string: str) -> Self:\n    if not isinstance(string, str):\n        raise TypeError(f\"'construct_from_string' expects a string, got {type(string)}\")\n    assert isinstance(cls.name, str), (cls, type(cls.name))\n    if string != cls.name:\n        raise TypeError(f\"Cannot construct a '{cls.__name__}' from '{string}'\")\n    return cls()",
    "docstring": "Construct this type from a string. This is useful mainly for data types that accept parameters. For example, a period dtype accepts a frequency parameter that can be set as ``. Returns ------- ExtensionDtype Instance of the dtype. Raises ------ TypeError If a class cannot be constructed from this 'string'. Examples -------- For extension dtypes with arguments the following may be an adequate implementation. >>> import re >>> @classmethod ... def construct_from_string(cls, string): ... pattern = re.compile(r\"^my_type\\[(?P.+)\\]$\") ... match = pattern.match(string) ... if match: ... return cls(**match.groupdict()) ... else: ... raise TypeError( ... f\"Cannot construct a '{cls.__name__}' from '{string}'\" ... )",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\base.py",
    "ast_data": "FunctionDef name:construct_from_string arg:cls arg:string arguments arg arg If Call Raise Call Call Call Call If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_pop_writer",
    "source_code": "def _pop_writer(self):\n    if self.update_freq == 'epoch':\n        return\n    previous_context = self._prev_summary_state.pop()\n    previous_context[1].__exit__(*sys.exc_info())\n    previous_context[0].__exit__(*sys.exc_info())",
    "docstring": "Pops the current writer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:_pop_writer arg:self arguments arg If Compare Return return:no Assign Call Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_process_args",
    "source_code": "def _process_args(self, *args, **kwargs):\n    if isinstance(args[0], TriContourSet):\n        C = args[0]._contour_generator\n        if self.levels is None:\n            self.levels = args[0].levels\n        self.zmin = args[0].zmin\n        self.zmax = args[0].zmax\n        self._mins = args[0]._mins\n        self._maxs = args[0]._maxs\n    else:\n        from matplotlib import _tri\n        tri, z = self._contour_args(args, kwargs)\n        C = _tri.TriContourGenerator(tri.get_cpp_triangulation(), z)\n        self._mins = [tri.x.min(), tri.y.min()]\n        self._maxs = [tri.x.max(), tri.y.max()]\n    self._contour_generator = C\n    return kwargs",
    "docstring": "Process args and kwargs.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_tricontour.py",
    "ast_data": "FunctionDef name:_process_args arg:self arguments arg arg arg If Call Assign If Compare Assign Assign Assign Assign Assign Assign Call Assign Call Call Assign Call Call Assign Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "add_leaf_node",
    "source_code": "def add_leaf_node(self, leaf_node: _LeafNode) -> None:\n    if self.is_same_module_as(leaf_node) or leaf_node.fx_op == 'call_module':\n        self._nodes.append(leaf_node)\n    elif leaf_node.fx_op == 'placeholder':\n        self._nodes.append(leaf_node)\n    elif self.is_parent_module_of(leaf_node):\n        last_node = self._nodes[-1] if self._nodes else None\n        if isinstance(last_node, _ModuleNode) and (last_node.is_parent_module_of(leaf_node) or last_node.is_same_module_as(leaf_node)):\n            last_node.add_leaf_node(leaf_node)\n        else:\n            stack_meta = copy.deepcopy(self.stack_meta)\n            stack_meta.push(leaf_node.stack_meta[len(self.stack_meta)])\n            last_node = _ModuleNode(self._reference_module, stack_meta)\n            self._nodes.append(last_node)\n            last_node.add_leaf_node(leaf_node)\n    else:\n        raise AssertionError(f'Node {leaf_node} ({leaf_node.stack_meta}) does not belong to module {self._stack_meta}.')",
    "docstring": "Adds a leaf node to the module. The leaf node must belong to the same or a child module. This method will recursively construct _ModuleNode instance based on the stack_meta information of the leaf node.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\modularization.py",
    "ast_data": "FunctionDef name:add_leaf_node arg:self arg:leaf_node arguments arg arg If BoolOp Call Compare Call If Compare Call If Call Assign If BoolOp Call BoolOp Call Call Call Assign Call Call Call Assign Call Call Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_ReuseMode",
    "source_code": "class _ReuseMode(enum.Enum):\n    AUTO_REUSE = 1",
    "docstring": "Mode for variable access within a variable scope.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variable_scope.py",
    "ast_data": "ClassDef name:_ReuseMode Assign"
  },
  {
    "library": "scipy",
    "name": "rankdata",
    "source_code": "def rankdata(data, axis=None, use_missing=False):\n\n    def _rank1d(data, use_missing=False):\n        n = data.count()\n        rk = np.empty(data.size, dtype=float)\n        idx = data.argsort()\n        rk[idx[:n]] = np.arange(1, n + 1)\n        if use_missing:\n            rk[idx[n:]] = (n + 1) / 2.0\n        else:\n            rk[idx[n:]] = 0\n        repeats = find_repeats(data.copy())\n        for r in repeats[0]:\n            condition = (data == r).filled(False)\n            rk[condition] = rk[condition].mean()\n        return rk\n    data = ma.array(data, copy=False)\n    if axis is None:\n        if data.ndim > 1:\n            return _rank1d(data.ravel(), use_missing).reshape(data.shape)\n        else:\n            return _rank1d(data, use_missing)\n    else:\n        return ma.apply_along_axis(_rank1d, axis, data, use_missing).view(ndarray)",
    "docstring": "Returns the rank (also known as order statistics) of each data point along the given axis. If some values are tied, their rank is averaged. If some values are masked, their rank is set to 0 if use_missing is False, or set to the average rank of the unmasked values if use_missing is True. Parameters ---------- data : sequence Input data. The data is transformed to a masked array axis : {None,int}, optional Axis along which to perform the ranking. If None, the array is first flattened. An exception is raised if the axis is specified for arrays with a dimension larger than 2 use_missing : bool, optional Whether the masked values have a rank of 0 (False) or equal to the average rank of the unmasked values (True).",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mstats_basic.py",
    "ast_data": "FunctionDef name:rankdata arg:data arg:axis arg:use_missing arguments arg arg arg FunctionDef name:_rank1d arg:data arg:use_missing arguments arg arg Assign Call Assign Call Assign Call Assign Call If Assign Assign Assign Call Call For Assign Call Compare Assign Call Return return:yes Assign Call If Compare If Compare Return return:yes Call Call Call Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_fontstretch",
    "source_code": "def set_fontstretch(self, stretch):\n    self._fontproperties.set_stretch(stretch)\n    self.stale = True",
    "docstring": "Set the font stretch (horizontal condensation or expansion). Parameters ---------- stretch : {a numeric value in range 0-1000, 'ultra-condensed', 'extra-condensed', 'condensed', 'semi-condensed', 'normal', 'semi-expanded', 'expanded', 'extra-expanded', 'ultra-expanded'} See Also -------- .font_manager.FontProperties.set_stretch",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:set_fontstretch arg:self arg:stretch arguments arg arg Call Assign"
  },
  {
    "library": "scikit-learn",
    "name": "dump",
    "source_code": "def dump(obj, fp):\n    encoder = ArffEncoder()\n    generator = encoder.iter_encode(obj)\n    last_row = next(generator)\n    for row in generator:\n        fp.write(last_row + '\\n')\n        last_row = row\n    fp.write(last_row)\n    return fp",
    "docstring": "Serialize an object representing the ARFF document to a given file-like object. :param obj: a dictionary. :param fp: a file-like object.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\_arff.py",
    "ast_data": "FunctionDef name:dump arg:obj arg:fp arguments arg arg Assign Call Assign Call Assign Call For Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_local_mode",
    "source_code": "def is_local_mode() -> bool:\n    return not jobs()",
    "docstring": "Returns true if DTensor shall run in local mode.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\config.py",
    "ast_data": "FunctionDef name:is_local_mode arguments Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "nerf_model",
    "source_code": "@property\ndef nerf_model(self) -> Module | None:\n    return self._nerf_model",
    "docstring": "Returns the NeRF model.",
    "type": "method",
    "file_path": "kornia\\kornia\\nerf\\nerf_solver.py",
    "ast_data": "FunctionDef name:nerf_model arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, path):\n    if isinstance(path, os.PathLike):\n        path = os.fspath(path)\n    with ops.init_scope(), ops.device('CPU'):\n        self._path = ops.convert_to_tensor(path, dtype=dtypes.string, name='asset_path')",
    "docstring": "Record the full path to the asset.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\asset.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:path arguments arg arg If Call Assign Call With Call Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "wait_tensor",
    "source_code": "def wait_tensor(tensor):\n    return torch.ops._c10d_functional.wait_tensor(tensor)",
    "docstring": "Wait on a tensor returned by the collectives ops. Waiting follows device semantics, which means blocking on CPU and synchronizing streams on CUDA.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_functional_collectives.py",
    "ast_data": "FunctionDef name:wait_tensor arg:tensor arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "origin",
    "source_code": "@property\ndef origin(self) -> Tensor:\n    return self._origin",
    "docstring": "Return the line origin point.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\line.py",
    "ast_data": "FunctionDef name:origin arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "xw_plus_b",
    "source_code": "@tf_export(v1=['nn.xw_plus_b'])\n@dispatch.add_dispatch_support\ndef xw_plus_b(x, weights, biases, name=None):\n    with ops.name_scope(name, 'xw_plus_b', [x, weights, biases]) as name:\n        x = ops.convert_to_tensor(x, name='x')\n        weights = ops.convert_to_tensor(weights, name='weights')\n        biases = ops.convert_to_tensor(biases, name='biases')\n        mm = math_ops.matmul(x, weights)\n        return bias_add(mm, biases, name=name)",
    "docstring": "Computes matmul(x, weights) + biases. Args: x: a 2D tensor. Dimensions typically: batch, in_units weights: a 2D tensor. Dimensions typically: in_units, out_units biases: a 1D tensor. Dimensions: out_units name: A name for the operation (optional). If not specified \"xw_plus_b\" is used. Returns: A 2-D Tensor computing matmul(x, weights) + biases. Dimensions typically: batch, out_units.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py",
    "ast_data": "FunctionDef name:xw_plus_b arg:x arg:weights arg:biases arg:name arguments arg arg arg arg With Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "set_ordered",
    "source_code": "def set_ordered(self, value: bool) -> Self:\n    new_dtype = CategoricalDtype(self.categories, ordered=value)\n    cat = self.copy()\n    NDArrayBacked.__init__(cat, cat._ndarray, new_dtype)\n    return cat",
    "docstring": "Set the ordered attribute to the boolean value. Parameters ---------- value : bool Set whether this categorical is ordered (True) or not (False).",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\categorical.py",
    "ast_data": "FunctionDef name:set_ordered arg:self arg:value arguments arg arg Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "insert",
    "source_code": "def insert(self, index: int, module: Module) -> Self:\n    if not isinstance(module, Module):\n        raise AssertionError(f'module should be of type: {Module}')\n    n = len(self._modules)\n    if not -n <= index <= n:\n        raise IndexError(f'Index out of range: {index}')\n    if index < 0:\n        index += n\n    for i in range(n, index, -1):\n        self._modules[str(i)] = self._modules[str(i - 1)]\n    self._modules[str(index)] = module\n    return self",
    "docstring": "Inserts a module into the Sequential container at the specified index. Args: index (int): The index to insert the module. module (Module): The module to be inserted. Example:: >>> import torch.nn as nn >>> n = nn.Sequential(nn.Linear(1, 2), nn.Linear(2, 3)) >>> n.insert(0, nn.Linear(3, 4)) Sequential( (0): Linear(in_features=3, out_features=4, bias=True) (1): Linear(in_features=1, out_features=2, bias=True) (2): Linear(in_features=2, out_features=3, bias=True) )",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\container.py",
    "ast_data": "FunctionDef name:insert arg:self arg:index arg:module arguments arg arg arg If Call Raise Call Assign Call If Compare Raise Call If Compare For Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_parallel_pairwise",
    "source_code": "def _parallel_pairwise(X, Y, func, n_jobs, **kwds):\n    if Y is None:\n        Y = X\n    X, Y, dtype = _return_float_dtype(X, Y)\n    if effective_n_jobs(n_jobs) == 1:\n        return func(X, Y, **kwds)\n    fd = delayed(_dist_wrapper)\n    ret = np.empty((X.shape[0], Y.shape[0]), dtype=dtype, order='F')\n    Parallel(backend='threading', n_jobs=n_jobs)((fd(func, ret, s, X, Y[s], **kwds) for s in gen_even_slices(_num_samples(Y), effective_n_jobs(n_jobs))))\n    if (X is Y or Y is None) and func is euclidean_distances:\n        np.fill_diagonal(ret, 0)\n    return ret",
    "docstring": "Break the pairwise matrix in n_jobs even slices and compute them using multithreading.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\metrics\\pairwise.py",
    "ast_data": "FunctionDef name:_parallel_pairwise arg:X arg:Y arg:func arg:n_jobs arguments arg arg arg arg arg If Compare Assign Assign Call If Compare Call Return return:yes Call Assign Call Assign Call Call Call Call Call Call Call If BoolOp BoolOp Compare Compare Compare Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "error_norm",
    "source_code": "def error_norm(self, comp_cov, norm='frobenius', scaling=True, squared=True):\n    error = comp_cov - self.covariance_\n    if norm == 'frobenius':\n        squared_norm = np.sum(error ** 2)\n    elif norm == 'spectral':\n        squared_norm = np.amax(linalg.svdvals(np.dot(error.T, error)))\n    else:\n        raise NotImplementedError('Only spectral and frobenius norms are implemented')\n    if scaling:\n        squared_norm = squared_norm / error.shape[0]\n    if squared:\n        result = squared_norm\n    else:\n        result = np.sqrt(squared_norm)\n    return result",
    "docstring": "Compute the Mean Squared Error between two covariance estimators. Parameters ---------- comp_cov : array-like of shape (n_features, n_features) The covariance to compare with. norm : {\"frobenius\", \"spectral\"}, default=\"frobenius\" The type of norm used to compute the error. Available error types: - 'frobenius' (default): sqrt(tr(A^t.A)) - 'spectral': sqrt(max(eigenvalues(A^t.A)) where A is the error `selfcomp_cov` covariance estimators.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\covariance\\_empirical_covariance.py",
    "ast_data": "FunctionDef name:error_norm arg:self arg:comp_cov arg:norm arg:scaling arg:squared arguments arg arg arg arg arg Assign If Compare Assign Call If Compare Assign Call Call Call Raise Call If Assign If Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_without_tensor_names",
    "source_code": "def _without_tensor_names(self) -> 'TensorSpec':\n    if self.name is None:\n        return self\n    else:\n        return TensorSpec(self.shape, self.dtype)",
    "docstring": "Returns a version of with the name removed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor.py",
    "ast_data": "FunctionDef name:_without_tensor_names arg:self arguments arg If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_sample_data",
    "source_code": "def get_sample_data(fname, asfileobj=True):\n    path = _get_data_path('sample_data', fname)\n    if asfileobj:\n        suffix = path.suffix.lower()\n        if suffix == '.gz':\n            return gzip.open(path)\n        elif suffix in ['.npy', '.npz']:\n            return np.load(path)\n        elif suffix in ['.csv', '.xrc', '.txt']:\n            return path.open('r')\n        else:\n            return path.open('rb')\n    else:\n        return str(path)",
    "docstring": "Return a sample data file. *fname* is a path relative to the :file: directory. If *asfileobj* is return a file object, otherwise just a file path. Sample data files are stored in the 'mpl-data/sample_data' directory within the Matplotlib package. If the filename ends in .gz, the file is implicitly ungzipped. If the filename ends with .npy or .npz, and *asfileobj* is , the file is loaded with .",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\cbook.py",
    "ast_data": "FunctionDef name:get_sample_data arg:fname arg:asfileobj arguments arg arg Assign Call If Assign Call If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_make_key_func",
    "source_code": "def _make_key_func(self, key_func, input_dataset):\n    self._key_func = structured_function.StructuredFunctionWrapper(key_func, self._transformation_name(), dataset=input_dataset)\n    if not self._key_func.output_structure.is_compatible_with(tensor_spec.TensorSpec([], dtypes.int64)):\n        raise ValueError(f'Invalid `key_func`. Expected `key_func` to return a scalar tf.int64 tensor, but instead `key_func` has output types={self._key_func.output_types} and shapes={self._key_func.output_shapes}.')",
    "docstring": "Make wrapping defun for key_func.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\grouping.py",
    "ast_data": "FunctionDef name:_make_key_func arg:self arg:key_func arg:input_dataset arguments arg arg arg Assign Call Call If Call Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "from_config",
    "source_code": "@classmethod\ndef from_config(cls, config, custom_objects=None, columns_by_name=None):\n    from tensorflow.python.feature_column.serialization import deserialize_feature_column\n    _check_config_keys(config, cls._fields)\n    kwargs = _standardize_and_copy_config(config)\n    kwargs['categorical_column'] = deserialize_feature_column(config['categorical_column'], custom_objects, columns_by_name)\n    return cls(**kwargs)",
    "docstring": "See 'FeatureColumn` base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:from_config arg:cls arg:config arg:custom_objects arg:columns_by_name arguments arg arg arg arg Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "ReduceOp",
    "source_code": "@tf_export('distribute.ReduceOp')\nclass ReduceOp(enum.Enum):\n    SUM = 'SUM'\n    MEAN = 'MEAN'\n\n    @staticmethod\n    def from_variable_aggregation(aggregation):\n        mapping = {variable_scope.VariableAggregation.SUM: ReduceOp.SUM, variable_scope.VariableAggregation.MEAN: ReduceOp.MEAN}\n        reduce_op = mapping.get(aggregation)\n        if not reduce_op:\n            raise ValueError('Could not convert from `tf.VariableAggregation` %s to`tf.distribute.ReduceOp` type' % aggregation)\n        return reduce_op",
    "docstring": "Indicates how a set of values should be reduced. * : Add all the values. * : Take the arithmetic mean (\"average\") of the values.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\reduce_util.py",
    "ast_data": "ClassDef name:ReduceOp Assign Assign FunctionDef name:from_variable_aggregation arg:aggregation arguments arg Assign Assign Call If Raise Call Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "validate_azp",
    "source_code": "def validate_azp(self):\n    aud = self.get('aud')\n    client_id = self.params.get('client_id')\n    required = False\n    if aud and client_id:\n        if isinstance(aud, list) and len(aud) == 1:\n            aud = aud[0]\n        if aud != client_id:\n            required = True\n    azp = self.get('azp')\n    if required and (not azp):\n        raise MissingClaimError('azp')\n    if azp and client_id and (azp != client_id):\n        raise InvalidClaimError('azp')",
    "docstring": "OPTIONAL. Authorized party - the party to which the ID Token was issued. If present, it MUST contain the OAuth 2.0 Client ID of this party. This Claim is only needed when the ID Token has a single audience value and that audience is different than the authorized party. It MAY be included even when the authorized party is the same as the sole audience. The azp value is a case sensitive string containing a StringOrURI value.",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\core\\claims.py",
    "ast_data": "FunctionDef name:validate_azp arg:self arguments arg Assign Call Assign Call Assign If BoolOp If BoolOp Call Compare Call Assign If Compare Assign Assign Call If BoolOp Raise Call If BoolOp Compare Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_enforce_names_consistency",
    "source_code": "def _enforce_names_consistency(specs):\n\n    def _has_name(spec):\n        return hasattr(spec, 'name') and spec.name is not None\n\n    def _clear_name(spec):\n        spec = copy.deepcopy(spec)\n        if hasattr(spec, 'name'):\n            spec._name = None\n        return spec\n    flat_specs = nest.flatten(specs)\n    name_inconsistency = any((_has_name(s) for s in flat_specs)) and (not all((_has_name(s) for s in flat_specs)))\n    if name_inconsistency:\n        specs = nest.map_structure(_clear_name, specs)\n    return specs",
    "docstring": "Enforces that either all specs have names or none do.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saving_utils.py",
    "ast_data": "FunctionDef name:_enforce_names_consistency arg:specs arguments arg FunctionDef name:_has_name arg:spec arguments arg Return return:yes BoolOp Call Compare FunctionDef name:_clear_name arg:spec arguments arg Assign Call If Call Assign Return return:yes Assign Call Assign BoolOp Call Call Call Call If Assign Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "visit_AnnAssign",
    "source_code": "def visit_AnnAssign(self, node: ast.AnnAssign) -> None:\n    self.visit_Assign(node)",
    "docstring": "Handles AnnAssign node and pick up a variable comment.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\pycode\\parser.py",
    "ast_data": "FunctionDef name:visit_AnnAssign arg:self arg:node arguments arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "_prepare_video",
    "source_code": "def _prepare_video(V):\n    b, t, c, h, w = V.shape\n    if V.dtype == np.uint8:\n        V = np.float32(V) / 255.0\n\n    def is_power2(num):\n        return num != 0 and num & num - 1 == 0\n    if not is_power2(V.shape[0]):\n        len_addition = int(2 ** V.shape[0].bit_length() - V.shape[0])\n        V = np.concatenate((V, np.zeros(shape=(len_addition, t, c, h, w))), axis=0)\n    n_rows = 2 ** ((b.bit_length() - 1) // 2)\n    n_cols = V.shape[0] // n_rows\n    V = np.reshape(V, newshape=(n_rows, n_cols, t, c, h, w))\n    V = np.transpose(V, axes=(2, 0, 4, 1, 5, 3))\n    V = np.reshape(V, newshape=(t, n_rows * h, n_cols * w, c))\n    return V",
    "docstring": "Convert a 5D tensor into 4D tensor. Convesrion is done from [batchsize, time(frame), channel(color), height, width] (5D tensor) to [time(frame), new_width, new_height, channel] (4D tensor). A batch of images are spreaded to a grid, which forms a frame. e.g. Video with batchsize 16 will have a 4x4 grid.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\tensorboard\\_utils.py",
    "ast_data": "FunctionDef name:_prepare_video arg:V arguments arg Assign If Compare Assign Call FunctionDef name:is_power2 arg:num arguments arg Return return:yes BoolOp Compare Compare If Call Assign Call Call Assign Call Call Assign Call Assign Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "unget",
    "source_code": "def unget(self, bytes):\n    if not bytes:\n        return\n    self._update_unget_history(len(bytes))\n    self.position -= len(bytes)\n    self._leftover = bytes + self._leftover",
    "docstring": "Place bytes back onto the front of the lazy stream. Future calls to read() will return those bytes first. The stream position and thus tell() will be rewound.",
    "type": "method",
    "file_path": "django\\django\\http\\multipartparser.py",
    "ast_data": "FunctionDef name:unget arg:self arg:bytes arguments arg arg If Return return:no Call Call Call Assign"
  },
  {
    "library": "scikit-learn",
    "name": "probA_",
    "source_code": "@property\ndef probA_(self):\n    return self._probA",
    "docstring": "Parameter learned in Platt scaling when . Returns ------- ndarray of shape (n_classes * (n_classes - 1) / 2)",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\svm\\_base.py",
    "ast_data": "FunctionDef name:probA_ arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "prepare_detailed_calibration",
    "source_code": "def prepare_detailed_calibration(self) -> GraphModule:\n    if self._prepared_flag:\n        raise ValueError('Already ran preparing detailed callibration. Run the report generation next after callibration.')\n    insert_observers_fqns: dict[str, Any] = {}\n    for detector in self._desired_report_detectors:\n        obs_fqn_to_info = detector.determine_observer_insert_points(self._model)\n        insert_observers_fqns.update(obs_fqn_to_info)\n        self._detector_name_to_observer_fqns[detector.get_detector_name()] = set(obs_fqn_to_info.keys())\n    for observer_fqn in insert_observers_fqns:\n        target_node = insert_observers_fqns[observer_fqn][DETECTOR_TARGET_NODE_KEY]\n        insert_obs = insert_observers_fqns[observer_fqn][DETECTOR_OBS_TO_INSERT_KEY]\n        insert_post = insert_observers_fqns[observer_fqn][DETECTOR_IS_POST_OBS_KEY]\n        observer_args = insert_observers_fqns[observer_fqn][DETECTOR_OBS_ARGS_KEY]\n        self._insert_observer_around_module(observer_fqn, target_node, insert_obs, observer_args, insert_post)\n    self._prepared_flag = True\n    return self._model",
    "docstring": "Takes in a graph model and inserts the following observers: - ModelReportObserver Each observer is inserted based on the desired_reports into the relevant locations Right now, each report in self._desired_detector_names has independent insertions However, if a module already has a Observer of the same type, the insertion will not occur This is because all of the same type of Observer collect same information, so redundant Returns the same GraphModule with the observers inserted",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\model_report.py",
    "ast_data": "FunctionDef name:prepare_detailed_calibration arg:self arguments arg If Raise Call For Assign Call Call Assign Call Call Call For Assign Assign Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "colocate_with",
    "source_code": "@tf_contextlib.contextmanager\ndef colocate_with(self, op, ignore_existing=False) -> Iterator[None]:\n    if op is None and (not ignore_existing):\n        raise ValueError('Trying to reset colocation (op is None) but ignore_existing is not True')\n    op, device_only_candidate = _op_to_colocate_with(op, self)\n    device_fn_tmp = self._device_function_stack\n    self._device_function_stack = traceable_stack.TraceableStack()\n    if ignore_existing:\n        current_stack = self._colocation_stack\n        self._colocation_stack = traceable_stack.TraceableStack()\n    if op is not None:\n        self._colocation_stack.push_obj(op, offset=4)\n        if device_only_candidate is not None:\n            self._colocation_stack.push_obj(device_only_candidate, offset=4)\n    elif not ignore_existing:\n        raise ValueError('Trying to reset colocation (op is None) but ignore_existing is not True')\n    try:\n        yield\n    finally:\n        self._device_function_stack = device_fn_tmp\n        if op is not None:\n            self._colocation_stack.pop_obj()\n            if device_only_candidate is not None:\n                self._colocation_stack.pop_obj()\n        if ignore_existing:\n            self._colocation_stack = current_stack",
    "docstring": "Returns a context manager that specifies an op to colocate with. Note: this function is not for public use, only for internal libraries. For example: and will always be colocated with , no matter where is eventually placed. **NOTE** Using a colocation scope resets any existing device constraints. If is then must be and the new scope resets all colocation and device constraints. Args: op: The op to colocate all created ops with, or . ignore_existing: If true, only applies colocation of this op within the context, rather than applying all colocation properties on the stack. If is , this value must be . Raises: ValueError: if op is None but ignore_existing is False. Yields: A context manager that specifies the op with which to colocate newly created ops.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:colocate_with arg:self arg:op arg:ignore_existing arguments arg arg arg If BoolOp Compare Raise Call Assign Call Assign Assign Call If Assign Assign Call If Compare Call If Compare Call If Raise Call Try Assign If Compare Call If Compare Call If Assign"
  },
  {
    "library": "pandas",
    "name": "_parser_dispatch",
    "source_code": "def _parser_dispatch(flavor: HTMLFlavors | None) -> type[_HtmlFrameParser]:\n    valid_parsers = list(_valid_parsers.keys())\n    if flavor not in valid_parsers:\n        raise ValueError(f'{flavor!r} is not a valid flavor, valid flavors are {valid_parsers}')\n    if flavor in ('bs4', 'html5lib'):\n        import_optional_dependency('html5lib')\n        import_optional_dependency('bs4')\n    else:\n        import_optional_dependency('lxml.etree')\n    return _valid_parsers[flavor]",
    "docstring": "Choose the parser based on the input flavor. Parameters ---------- flavor : {{\"lxml\", \"html5lib\", \"bs4\"}} or None The type of parser to use. This must be a valid backend. Returns ------- cls : _HtmlFrameParser subclass The parser class based on the requested input flavor. Raises ------ ValueError * If is not a valid backend. ImportError * If you do not have the requested",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\html.py",
    "ast_data": "FunctionDef name:_parser_dispatch arg:flavor arguments arg Assign Call Call If Compare Raise Call If Compare Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_training_arg_index",
    "source_code": "def get_training_arg_index(call_fn):\n    argspec = tf_inspect.getfullargspec(call_fn)\n    if argspec.varargs:\n        if 'training' in argspec.kwonlyargs or argspec.varkw:\n            return -1\n        return None\n    else:\n        arg_list = argspec.args\n        if tf_inspect.ismethod(call_fn):\n            arg_list = arg_list[1:]\n        if 'training' in arg_list:\n            return arg_list.index('training')\n        elif 'training' in argspec.kwonlyargs or argspec.varkw:\n            return -1\n        return None",
    "docstring": "Returns the index of 'training' in the layer call function arguments. Args: call_fn: Call function. Returns: - n: index of 'training' in the call function arguments. - -1: if 'training' is not found in the arguments, but layer.call accepts variable keyword arguments - None: if layer doesn't expect a training argument.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\utils.py",
    "ast_data": "FunctionDef name:get_training_arg_index arg:call_fn arguments arg Assign Call If If BoolOp Compare Return return:yes Return return:no Assign If Call Assign If Compare Return return:yes Call If BoolOp Compare Return return:yes Return return:no"
  },
  {
    "library": "sphinx",
    "name": "IndexDirective",
    "source_code": "class IndexDirective(SphinxDirective):\n    has_content = False\n    required_arguments = 1\n    optional_arguments = 0\n    final_argument_whitespace = True\n    option_spec: ClassVar[OptionSpec] = {'name': directives.unchanged}\n\n    def run(self) -> list[Node]:\n        arguments = self.arguments[0].split('\\n')\n        if 'name' in self.options:\n            targetname = self.options['name']\n            targetnode = nodes.target('', '', names=[targetname])\n        else:\n            targetid = 'index-%s' % self.env.new_serialno('index')\n            targetnode = nodes.target('', '', ids=[targetid])\n        self.state.document.note_explicit_target(targetnode)\n        indexnode = addnodes.index()\n        indexnode['entries'] = []\n        indexnode['inline'] = False\n        self.set_source_info(indexnode)\n        for entry in arguments:\n            indexnode['entries'].extend(process_index_entry(entry, targetnode['ids'][0]))\n        return [indexnode, targetnode]",
    "docstring": "Directive to add entries to the index.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\domains\\index.py",
    "ast_data": "ClassDef name:IndexDirective Assign Assign Assign Assign FunctionDef name:run arg:self arguments arg Assign Call If Compare Assign Assign Call Assign Call Assign Call Call Assign Call Assign Assign Call For Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "tuple",
    "source_code": "@property\ndef tuple(self):\n    return tuple((g.tuple for g in self))",
    "docstring": "Return a tuple of all the coordinates in this Geometry Collection",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\collections.py",
    "ast_data": "FunctionDef name:tuple arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "element_spec",
    "source_code": "@property\ndef element_spec(self):\n    if self._enable_get_next_as_optional and self._strategy.extended._in_multi_worker_mode():\n        return nest.map_structure(_rebatch_as_dynamic, self._element_spec, expand_composites=False)\n    return self._element_spec",
    "docstring": "The type specification of an element of this dataset.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py",
    "ast_data": "FunctionDef name:element_spec arg:self arguments arg If BoolOp Call Return return:yes Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "text",
    "source_code": "@_docstring.interpd\ndef text(self, x, y, s, fontdict=None, **kwargs):\n    effective_kwargs = {'transform': self.transSubfigure, **(fontdict if fontdict is not None else {}), **kwargs}\n    text = Text(x=x, y=y, text=s, **effective_kwargs)\n    text.set_figure(self)\n    text.stale_callback = _stale_figure_callback\n    self.texts.append(text)\n    text._remove_method = self.texts.remove\n    self.stale = True\n    return text",
    "docstring": "Add text to figure. Parameters ---------- x, y : float The position to place the text. By default, this is in figure coordinates, floats in [0, 1]. The coordinate system can be changed using the *transform* keyword. s : str The text string. fontdict : dict, optional A dictionary to override the default text properties. If not given, the defaults are determined by :rc:. Properties passed as *kwargs* override the corresponding ones given in *fontdict*. Returns ------- Other Parameters ---------------- **kwargs : properties Other miscellaneous text parameters. %(Text:kwdoc)s See Also -------- .Axes.text .pyplot.text",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:text arg:self arg:x arg:y arg:s arg:fontdict arguments arg arg arg arg arg arg Assign Compare Assign Call Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "JointOutputResult",
    "source_code": "@dataclass(frozen=True)\nclass JointOutputResult:\n    grad_input: ComputedBuffer\n    captured_grads_compute: list[ComputedBuffer]\n    captured_grads: list[Optional[TensorBox]]\n    mutated_grads: list[TensorBox]",
    "docstring": "Results from processing joint outputs.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\kernel\\flex_attention.py",
    "ast_data": "ClassDef name:JointOutputResult Call"
  },
  {
    "library": "tensorflow",
    "name": "extend_comp_items",
    "source_code": "def extend_comp_items(self, context_word, new_comp_items):\n    if context_word not in self._comp_dict:\n        raise KeyError('Context word \"%s\" has not been registered' % context_word)\n    self._comp_dict[context_word].extend(new_comp_items)\n    self._comp_dict[context_word] = sorted(self._comp_dict[context_word])",
    "docstring": "Add a list of completion items to a completion context. Args: context_word: A single completion word as a string. The extension will also apply to all other context words of the same context. new_comp_items: (list of str) New completion items to add. Raises: KeyError: if the context word has not been registered.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py",
    "ast_data": "FunctionDef name:extend_comp_items arg:self arg:context_word arg:new_comp_items arguments arg arg arg If Compare Raise Call Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "_need_skip_config",
    "source_code": "def _need_skip_config(self, quantization_config: Optional[QuantizationConfig]) -> bool:\n    if quantization_config is None:\n        return False\n    need_skip = False\n    current_mode = self._get_current_quantization_mode()\n    if current_mode.qat_state is not None and current_mode.qat_state != quantization_config.is_qat:\n        warnings.warn('Mixed QAT and Non-QAT quantization config is not supported.')\n        need_skip = True\n    if current_mode.dynamic_state is not None:\n        input_activation_spec = quantization_config.input_activation\n        if input_activation_spec is not None and current_mode.dynamic_state != input_activation_spec.is_dynamic:\n            warnings.warn('Mixed dynamic and static quantization config is not supported.')\n            need_skip = True\n    return need_skip",
    "docstring": "Check if the provided quantization config is valid for X86InductorQuantizer. Mixed static/dynamic configurations or mixed QAT/non-QAT configurations are not supported. To avoid such a mix, we compare the incoming configuration with current configuration status. Refer the definition for all possible modes.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\x86_inductor_quantizer.py",
    "ast_data": "FunctionDef name:_need_skip_config arg:self arg:quantization_config arguments arg arg If Compare Return return:yes Assign Assign Call If BoolOp Compare Compare Call Assign If Compare Assign If BoolOp Compare Compare Call Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit_transform",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit_transform(self, X, y=None, W=None, H=None):\n    X = validate_data(self, X, accept_sparse=('csr', 'csc'), dtype=[np.float64, np.float32])\n    with config_context(assume_finite=True):\n        W, H, n_iter = self._fit_transform(X, W=W, H=H)\n    self.reconstruction_err_ = _beta_divergence(X, W, H, self._beta_loss, square_root=True)\n    self.n_components_ = H.shape[0]\n    self.components_ = H\n    self.n_iter_ = n_iter\n    return W",
    "docstring": "Learn a NMF model for the data X and returns the transformed data. This is more efficient than calling fit followed by transform. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vector, where is the number of samples and is the number of features. y : Ignored Not used, present for API consistency by convention. W : array-like of shape (n_samples, n_components), default=None If , it is used as initial guess for the solution. If , uses the initialisation method specified in . H : array-like of shape (n_components, n_features), default=None If , it is used as initial guess for the solution. If , uses the initialisation method specified in . Returns ------- W : ndarray of shape (n_samples, n_components) Transformed data.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_nmf.py",
    "ast_data": "FunctionDef name:fit_transform arg:self arg:X arg:y arg:W arg:H arguments arg arg arg arg arg Assign Call With Call Assign Call Assign Call Assign Assign Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, dtype, size=None, dynamic_size=None, clear_after_read=None, tensor_array_name=None, handle=None, flow=None, infer_shape=True, element_shape=None, colocate_with_first_write_call=True, name=None):\n    del (flow, tensor_array_name, name)\n    if handle is not None:\n        raise ValueError('TensorArray handles are not supported when eager execution is enabled.')\n    if size is None:\n        raise ValueError('Size must be declared for TensorArrays when eager execution is enabled.')\n    self._handle = None\n    self._flow = constant_op.constant(0, dtype=dtypes.int32)\n    self._infer_shape = infer_shape\n    self._element_shape = tensor_shape.as_shape(element_shape)\n    self._colocate_with_first_write_call = colocate_with_first_write_call\n    self._dtype = dtypes.as_dtype(dtype).base_dtype\n    self._dynamic_size = dynamic_size or False\n    self._clear_after_read = True if clear_after_read is None else clear_after_read\n    self._previously_read_indices = []\n    if isinstance(size, ops.EagerTensor):\n        size = size.numpy()\n    self._tensor_array = [None for _ in range(size)]",
    "docstring": "Constructs a TensorArray compatible with eager execution. Args: dtype: (required) data type of the TensorArray. size: (optional) int32 scalar : the size of the TensorArray. Required if handle is not provided. dynamic_size: (optional) Python bool: If true, writes to the TensorArray can grow the TensorArray past its initial size. Default: False. clear_after_read: Boolean (optional, default: True). If True, clear TensorArray values after reading them. This disables read-many semantics, but allows early release of memory. tensor_array_name: unused. handle: unsupported. flow: unsupported. infer_shape: used for error checking, same semantics as TensorArray. element_shape: used for error checking, same semantics as TensorArray. colocate_with_first_write_call: unsupported. name: unsupported. Raises: ValueError: handle or flow are supplied, or if size is not supplied.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:dtype arg:size arg:dynamic_size arg:clear_after_read arg:tensor_array_name arg:handle arg:flow arg:infer_shape arg:element_shape arg:colocate_with_first_write_call arg:name arguments arg arg arg arg arg arg arg arg arg arg arg arg If Compare Raise Call If Compare Raise Call Assign Assign Call Assign Assign Call Assign Assign Call Assign BoolOp Assign Compare Assign If Call Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_run_with_profiling",
    "source_code": "def _run_with_profiling(self, run_start_resp, fetches, feed_dict, options, run_metadata, callable_runner, callable_runner_args, callable_options):\n    decorated_run_options = None\n    if callable_options:\n        callable_options_id = id(callable_options)\n        if callable_options_id not in self._cached_callables_from_options:\n            new_callable_options = config_pb2.CallableOptions()\n            new_callable_options.CopyFrom(callable_options)\n            decorated_run_options = new_callable_options.run_options\n    else:\n        decorated_run_options = options or config_pb2.RunOptions()\n    self._decorate_run_options_for_profile(decorated_run_options)\n    run_metadata = run_metadata or config_pb2.RunMetadata()\n    if callable_runner:\n        retvals = callable_runner(*callable_runner_args, options=decorated_run_options, run_metadata=run_metadata)\n    elif callable_options:\n        callable_object = self._sess._make_callable_from_options(new_callable_options)\n        retvals = callable_object(*callable_runner_args, run_metadata=run_metadata)\n    else:\n        retvals = self._sess.run(fetches, feed_dict=feed_dict, options=decorated_run_options, run_metadata=run_metadata)\n    return (retvals, OnRunEndRequest(run_start_resp.action, run_metadata=run_metadata, client_graph_def=self._sess.graph.as_graph_def()))",
    "docstring": "Perform a session.run() or callable with profiling.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\framework.py",
    "ast_data": "FunctionDef name:_run_with_profiling arg:self arg:run_start_resp arg:fetches arg:feed_dict arg:options arg:run_metadata arg:callable_runner arg:callable_runner_args arg:callable_options arguments arg arg arg arg arg arg arg arg arg Assign If Assign Call If Compare Assign Call Call Assign Assign BoolOp Call Call Assign BoolOp Call If Assign Call If Assign Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "read_var",
    "source_code": "def read_var(self, replica_local_var):\n    if distribute_utils.is_sync_on_read(replica_local_var):\n        return replica_local_var._get_cross_replica()\n    assert distribute_utils.is_mirrored(replica_local_var)\n    return array_ops.identity(replica_local_var._get())",
    "docstring": "Read the aggregate value of a replica-local variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\mirrored_strategy.py",
    "ast_data": "FunctionDef name:read_var arg:self arg:replica_local_var arguments arg arg If Call Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_IVals",
    "source_code": "class _IVals:\n\n    def __init__(self):\n        self.node_names_by_fqn = defaultdict(set)\n\n    def _is_mutable(self, target):\n        if isinstance(target, torch._ops.OpOverload):\n            return target._schema.is_mutable\n        return False\n\n    def read(self, mf, node):\n        assert node.op == 'call_function'\n        b = self._is_mutable(node.target)\n        print('Checking mutability', node.target, b)\n        if not b:\n            fqn, _ = next(reversed(node.meta['nn_module_stack'].values()))\n            self.node_names_by_fqn[fqn].add(node.name)\n        return mf.remap_input(node.args[0])\n\n    def update(self, partitions):\n        for shared_submodules in partitions:\n            for entry in shared_submodules:\n                graph = entry.module.graph\n                node_names = self.node_names_by_fqn[entry.fqn]\n                nodes = [n for n in graph.nodes if n.name in node_names]\n                for node in nodes:\n                    with graph.inserting_after(node):\n                        new_node = graph.create_node('call_function', torch.ops.aten.copy_.default, (node.args[0], node))\n                        new_node.meta = copy.copy(node.meta)",
    "docstring": "Collect the intermediate values of mutations in a graph. Example: in the following graph, suppose that buf_in and buf_out are the input and output values of a buffer. buf_in = placeholder() ... ival1 = f0(buf_in, ...) # inside self.n0(...) ... ival2 = f1(ival1, ...) # inside self.n1(...) ... buf_out = f2(ival2, ...) # inside self.n2(...) return buf_out, ... Here ival1 and ival2 are intermediate values created inside calls to n0 and n1 respectively, and used inside calls to n1 and n2 respectively.",
    "type": "class",
    "file_path": "pytorch\\torch\\export\\unflatten.py",
    "ast_data": "ClassDef name:_IVals FunctionDef name:__init__ arg:self arguments arg Assign Call FunctionDef name:_is_mutable arg:self arg:target arguments arg arg If Call Return return:yes Return return:yes FunctionDef name:read arg:self arg:mf arg:node arguments arg arg arg Compare Assign Call Call If Assign Call Call Call Call Return return:yes Call FunctionDef name:update arg:self arg:partitions arguments arg arg For For Assign Assign Assign Compare For With Call Assign Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "is_valid",
    "source_code": "def is_valid(self) -> bool:\n    module = getattr(torch.ops, self.namespace)\n    py_op = getattr(module, self.op_name, None)\n    if py_op is None:\n        logger.warning('Cannot find op: %s in module: %s', self.op_name, self.namespace)\n        return False\n    if not isinstance(py_op, torch._ops.OpOverloadPacket):\n        logger.warning('Op: torch.ops.%s.%s is not an OpOverloadPacket, got: %s', self.namespace, self.op_name, type(py_op))\n        return False\n    return True",
    "docstring": "Check if the rule is valid.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\type_promotion.py",
    "ast_data": "FunctionDef name:is_valid arg:self arguments arg Assign Call Assign Call If Compare Call Return return:yes If Call Call Call Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "out_of_date",
    "source_code": "def out_of_date(original, derived, includes=None):\n    if not os.path.exists(derived):\n        return True\n    if includes is None:\n        includes = []\n    files_to_check = [original, *includes]\n\n    def out_of_date_one(original, derived_mtime):\n        return os.path.exists(original) and derived_mtime < os.stat(original).st_mtime\n    derived_mtime = os.stat(derived).st_mtime\n    return any((out_of_date_one(f, derived_mtime) for f in files_to_check))",
    "docstring": "Return whether *derived* is out-of-date relative to *original* or any of the RST files included in it using the RST include directive (*includes*). *derived* and *original* are full paths, and *includes* is optionally a list of full paths which may have been included in the *original*.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\sphinxext\\plot_directive.py",
    "ast_data": "FunctionDef name:out_of_date arg:original arg:derived arg:includes arguments arg arg arg If Call Return return:yes If Compare Assign Assign FunctionDef name:out_of_date_one arg:original arg:derived_mtime arguments arg arg Return return:yes BoolOp Call Compare Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "batch_shape_tensor",
    "source_code": "def batch_shape_tensor(self, name='batch_shape_tensor'):\n    with self._name_scope(name):\n        if self.batch_shape.is_fully_defined():\n            return ops.convert_to_tensor(self.batch_shape.as_list(), dtype=dtypes.int32, name='batch_shape')\n        return self._batch_shape_tensor()",
    "docstring": "Shape of a single sample from a single event index as a 1-D . The batch dimensions are indexes into independent, non-identical parameterizations of this distribution. Args: name: name to give to the op Returns: batch_shape: .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:batch_shape_tensor arg:self arg:name arguments arg arg With Call If Call Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "block_depth",
    "source_code": "@property\ndef block_depth(self):\n    return self._block_depth",
    "docstring": "Depth of recursively defined circulant blocks defining this . With the dense representation of this , means is symmetric circulant. For example, means is block symmetric circulant with symmetric circulant blocks. For example, with , , , symmetric circulant, means is block symmetric circulant with block symmetric circulant blocks. Returns: Python .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_circulant.py",
    "ast_data": "FunctionDef name:block_depth arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "dtype",
    "source_code": "@property\ndef dtype(self):\n    return self._variable.dtype",
    "docstring": "The dtype of the underlying variable, before any casts are done.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\autocast_variable.py",
    "ast_data": "FunctionDef name:dtype arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "ReplicaGroup",
    "source_code": "class ReplicaGroup:\n    __slots__ = ('replica_ids',)\n\n    def __init__(self):\n        self.replica_ids = []",
    "docstring": "Python representation of a xla.ReplicaGroup protobuf.",
    "type": "class",
    "file_path": "tensorflow\\third_party\\xla\\xla\\python\\xla_client.py",
    "ast_data": "ClassDef name:ReplicaGroup Assign FunctionDef name:__init__ arg:self arguments arg Assign"
  },
  {
    "library": "pytorch",
    "name": "make_load",
    "source_code": "def make_load(self, name, indices, mask):\n    assert isinstance(indices, (list, tuple))\n    assert isinstance(name, str)\n    assert isinstance(mask, str)\n    stride = self.named_input_nodes[name].get_stride()\n    indices = list(map(OpOverrides.paren, indices))\n    assert len(indices) == len(stride)\n    index = ' + '.join((f'{texpr(self.rename_indexing(s))} * {i}' for s, i in zip(stride, indices)))\n    return f'tl.load({name} + ({index}), {mask}, other=0.0)'",
    "docstring": "Optional helper called from template code to generate the code needed to load from an tensor.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\select_algorithm.py",
    "ast_data": "FunctionDef name:make_load arg:self arg:name arg:indices arg:mask arguments arg arg arg arg Call Call Call Assign Call Assign Call Call Compare Call Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "validate_percentile",
    "source_code": "def validate_percentile(q: float | Iterable[float]) -> np.ndarray:\n    q_arr = np.asarray(q)\n    msg = 'percentiles should all be in the interval [0, 1]'\n    if q_arr.ndim == 0:\n        if not 0 <= q_arr <= 1:\n            raise ValueError(msg)\n    elif not all((0 <= qs <= 1 for qs in q_arr)):\n        raise ValueError(msg)\n    return q_arr",
    "docstring": "Validate percentiles (used by describe and quantile). This function checks if the given float or iterable of floats is a valid percentile otherwise raises a ValueError. Parameters ---------- q: float or iterable of floats A single percentile or an iterable of percentiles. Returns ------- ndarray An ndarray of the percentiles if valid. Raises ------ ValueError if percentiles are not in given interval([0, 1]).",
    "type": "function",
    "file_path": "pandas\\pandas\\util\\_validators.py",
    "ast_data": "FunctionDef name:validate_percentile arg:q arguments arg Assign Call Assign If Compare If Compare Raise Call If Call Compare Raise Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "_load_modules",
    "source_code": "def _load_modules(mod_name: str, ignored_module_exps: Iterable[re.Pattern[str]]) -> Set[str]:\n    if any((exp.match(mod_name) for exp in ignored_module_exps)):\n        return set()\n    mod = import_module(mod_name)\n    modules = {mod_name}\n    if mod.__spec__ is None:\n        return modules\n    search_locations = mod.__spec__.submodule_search_locations\n    for _, sub_mod_name, sub_mod_ispkg in pkgutil.iter_modules(search_locations):\n        if sub_mod_name == '__main__':\n            continue\n        if sub_mod_ispkg:\n            modules |= _load_modules(f'{mod_name}.{sub_mod_name}', ignored_module_exps)\n        else:\n            if any((exp.match(sub_mod_name) for exp in ignored_module_exps)):\n                continue\n            modules.add(f'{mod_name}.{sub_mod_name}')\n    return modules",
    "docstring": "Recursively load all submodules. :param mod_name: The name of a module to load submodules for. :param ignored_module_exps: A list of regexes for modules to ignore. :returns: A set of modules names including the provided module name, `` could not be loaded.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\ext\\coverage.py",
    "ast_data": "FunctionDef name:_load_modules arg:mod_name arg:ignored_module_exps arguments arg arg If Call Call Return return:yes Call Assign Call Assign If Compare Return return:yes Assign For Call If Compare If Call If Call Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "BadRelationFormat",
    "source_code": "class BadRelationFormat(ArffException):\n    message = 'Bad @RELATION format, at line %d.'",
    "docstring": "Error raised when the relation declaration is in an invalid format.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\externals\\_arff.py",
    "ast_data": "ClassDef name:BadRelationFormat Assign"
  },
  {
    "library": "pytorch",
    "name": "has_no_children_ignoring_parametrizations",
    "source_code": "def has_no_children_ignoring_parametrizations(module):\n    if len(module._modules) == 0:\n        return True\n    elif is_parametrized(module):\n        return len(module._modules) == 1 and 'parametrizations' in module._modules\n    else:\n        return False",
    "docstring": "Checks if module._modules is empty or if module is a parametrization, checks that module._modules only has the 'parametrizations' module",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\utils.py",
    "ast_data": "FunctionDef name:has_no_children_ignoring_parametrizations arg:module arguments arg If Compare Call Return return:yes If Call Return return:yes BoolOp Compare Call Compare Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "save",
    "source_code": "def save(obj: object, f: FileLike, pickle_module: Any=pickle, pickle_protocol: int=DEFAULT_PROTOCOL, _use_new_zipfile_serialization: bool=True, _disable_byteorder_record: bool=False) -> None:\n    torch._C._log_api_usage_once('torch.save')\n    _check_dill_version(pickle_module)\n    _check_save_filelike(f)\n    if isinstance(f, (str, os.PathLike)):\n        f = os.fspath(f)\n    if _use_new_zipfile_serialization:\n        with _open_zipfile_writer(f) as opened_zipfile:\n            _save(obj, opened_zipfile, pickle_module, pickle_protocol, _disable_byteorder_record)\n            return\n    else:\n        global _serialization_tls\n        if _serialization_tls.skip_data:\n            raise RuntimeError('Cannot use skip_data=True with _use_new_zipfile_serialization=False')\n        with _open_file_like(f, 'wb') as opened_file:\n            _legacy_save(obj, opened_file, pickle_module, pickle_protocol)",
    "docstring": "save(obj, f, pickle_module=pickle, pickle_protocol=2, _use_new_zipfile_serialization=True) Saves an object to a disk file. See also: :ref: See :ref: for more advanced tools to manipulate a checkpoint. Args: obj: saved object f: a file-like object (has to implement write and flush) or a string or os.PathLike object containing a file name pickle_module: module used for pickling metadata and objects pickle_protocol: can be specified to override the default protocol .. note:: A common PyTorch convention is to save tensors using .pt file extension. .. note:: PyTorch preserves storage sharing across serialization. See :ref: for more details. .. note:: The 1.6 release of PyTorch switched ``. Example: >>> # xdoctest: +SKIP(\"makes cwd dirty\") >>> # Save to file >>> x = torch.tensor([0, 1, 2, 3, 4]) >>> torch.save(x, \"tensor.pt\") >>> # Save to io.BytesIO buffer >>> buffer = io.BytesIO() >>> torch.save(x, buffer)",
    "type": "function",
    "file_path": "pytorch\\torch\\serialization.py",
    "ast_data": "FunctionDef name:save arg:obj arg:f arg:pickle_module arg:pickle_protocol arg:_use_new_zipfile_serialization arg:_disable_byteorder_record arguments arg arg arg arg arg arg Call Call Call If Call Assign Call If With Call Call Return return:no If Raise Call With Call Call"
  },
  {
    "library": "numpy",
    "name": "english_lower",
    "source_code": "def english_lower(s):\n    lowered = s.translate(LOWER_TABLE)\n    return lowered",
    "docstring": "Apply English case rules to convert ASCII strings to all lower case. This is an internal utility function to replace calls to str.lower() such that we can avoid changing behavior with changing locales. In particular, Turkish has distinct dotted and dotless variants of the Latin letter \"I\" in both lowercase and uppercase. Thus, \"I\".lower() != \"i\" in a \"tr\" locale. Parameters ---------- s : str Returns ------- lowered : str Examples -------- >>> from numpy._core.numerictypes import english_lower >>> english_lower('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_') 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz0123456789_' >>> english_lower('') ''",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\_string_helpers.py",
    "ast_data": "FunctionDef name:english_lower arg:s arguments arg Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_rsqrt_flops",
    "source_code": "@ops.RegisterStatistics('Rsqrt', 'flops')\ndef _rsqrt_flops(graph, node):\n    return _unary_op_flops(graph, node, ops_per_element=2)",
    "docstring": "Compute flops for Rsqrt operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py",
    "ast_data": "FunctionDef name:_rsqrt_flops arg:graph arg:node arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "check_perf_csv",
    "source_code": "def check_perf_csv(filename, threshold, threshold_scale):\n    df = pd.read_csv(filename)\n    failed = []\n    for _, row in df.iterrows():\n        model_name = row['name']\n        speedup = row['speedup']\n        if speedup < threshold * threshold_scale:\n            failed.append(model_name)\n        print(f'{model_name:34} {speedup}')\n    if failed:\n        print(textwrap.dedent(f'\\n                Error {len(failed)} models performance regressed\\n                    {' '.join(failed)}\\n                '))\n        sys.exit(1)",
    "docstring": "Basic performance checking.",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\dynamo\\check_perf_csv.py",
    "ast_data": "FunctionDef name:check_perf_csv arg:filename arg:threshold arg:threshold_scale arguments arg arg arg Assign Call Assign For Call Assign Assign If Compare Call Call If Call Call Call Call Call"
  },
  {
    "library": "authlib",
    "name": "validate_revocation_endpoint",
    "source_code": "def validate_revocation_endpoint(self):\n    url = self.get('revocation_endpoint')\n    if url and (not is_secure_transport(url)):\n        raise ValueError('\"revocation_endpoint\" MUST use \"https\" scheme')",
    "docstring": "OPTIONAL. URL of the authorization server's OAuth 2.0 revocation endpoint [RFC7009].",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc8414\\models.py",
    "ast_data": "FunctionDef name:validate_revocation_endpoint arg:self arguments arg Assign Call If BoolOp Call Raise Call"
  },
  {
    "library": "scipy",
    "name": "_fix_shape",
    "source_code": "def _fix_shape(x, shape, axes):\n    must_copy = False\n    index = [slice(None)] * x.ndim\n    for n, ax in zip(shape, axes):\n        if x.shape[ax] >= n:\n            index[ax] = slice(0, n)\n        else:\n            index[ax] = slice(0, x.shape[ax])\n            must_copy = True\n    index = tuple(index)\n    if not must_copy:\n        return (x[index], False)\n    s = list(x.shape)\n    for n, axis in zip(shape, axes):\n        s[axis] = n\n    z = np.zeros(s, x.dtype)\n    z[index] = x[index]\n    return (z, True)",
    "docstring": "Internal auxiliary function for _raw_fft, _raw_fftnd.",
    "type": "function",
    "file_path": "scipy\\scipy\\fft\\_pocketfft\\helper.py",
    "ast_data": "FunctionDef name:_fix_shape arg:x arg:shape arg:axes arguments arg arg arg Assign Assign Call For Call If Compare Assign Call Assign Call Assign Assign Call If Return return:yes Assign Call For Call Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "score_samples",
    "source_code": "def score_samples(self, X):\n    check_is_fitted(self)\n    return -self.mahalanobis(X)",
    "docstring": "Compute the negative Mahalanobis distances. Parameters ---------- X : array-like of shape (n_samples, n_features) The data matrix. Returns ------- negative_mahal_distances : array-like of shape (n_samples,) Opposite of the Mahalanobis distances.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\covariance\\_elliptic_envelope.py",
    "ast_data": "FunctionDef name:score_samples arg:self arg:X arguments arg arg Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "dtype_counts",
    "source_code": "@property\ndef dtype_counts(self) -> Mapping[str, int]:\n    return self.info.dtype_counts",
    "docstring": "Mapping dtype - number of counts.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:dtype_counts arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "read_all",
    "source_code": "def read_all(self):\n    return [wv.get() for wv in self._per_worker_vars._values]",
    "docstring": "Synchronously read variables from all workers into a list of Tensors.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\ps_values.py",
    "ast_data": "FunctionDef name:read_all arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_remainder_matrix_power",
    "source_code": "def _remainder_matrix_power(A, t):\n    A = np.asarray(A)\n    if len(A.shape) != 2 or A.shape[0] != A.shape[1]:\n        raise ValueError('input must be a square array')\n    n, n = A.shape\n    if np.array_equal(A, np.triu(A)):\n        Z = None\n        T = A\n    elif np.isrealobj(A):\n        T, Z = schur(A)\n        if not np.array_equal(T, np.triu(T)):\n            T, Z = rsf2csf(T, Z)\n    else:\n        T, Z = schur(A, output='complex')\n    T_diag = np.diag(T)\n    if np.count_nonzero(T_diag) != n:\n        raise FractionalMatrixPowerError('cannot use inverse scaling and squaring to find the fractional matrix power of a singular matrix')\n    if np.isrealobj(T) and np.min(T_diag) < 0:\n        T = T.astype(complex)\n    U = _remainder_matrix_power_triu(T, t)\n    if Z is not None:\n        ZH = np.conjugate(Z).T\n        return Z.dot(U).dot(ZH)\n    else:\n        return U",
    "docstring": "Compute the fractional power of a matrix, for fractions -1 < t < 1. This uses algorithm (3.1) of [1]_. The Pade approximation itself uses algorithm (4.1) of [2]_. Parameters ---------- A : (N, N) array_like Matrix whose fractional power to evaluate. t : float Fractional power between -1 and 1 exclusive. Returns ------- X : (N, N) array_like The fractional power of the matrix. References ---------- .. [1] Nicholas J. Higham and Lijing Lin (2013) \"An Improved Schur-Pade Algorithm for Fractional Powers of a Matrix and their Frechet Derivatives.\" .. [2] Nicholas J. Higham and Lijing lin (2011) \"A Schur-Pade Algorithm for Fractional Powers of a Matrix.\" SIAM Journal on Matrix Analysis and Applications, 32 (3). pp. 1056-1078. ISSN 0895-4798",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_matfuncs_inv_ssq.py",
    "ast_data": "FunctionDef name:_remainder_matrix_power arg:A arg:t arguments arg arg Assign Call If BoolOp Compare Call Compare Raise Call Assign If Call Call Assign Assign If Call Assign Call If Call Call Assign Call Assign Call Assign Call If Compare Call Raise Call If BoolOp Call Compare Call Assign Call Assign Call If Compare Assign Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "pow",
    "source_code": "@tf_export('math.pow', 'pow')\n@dispatch.register_binary_elementwise_api\n@dispatch.add_dispatch_support\ndef pow(x, y, name=None):\n    with ops.name_scope(name, 'Pow', [x]) as name:\n        return gen_math_ops._pow(x, y, name=name)",
    "docstring": "Computes the power of one value to another. Given a tensor and a tensor , this operation computes \\\\(x^y\\\\) for corresponding elements in and . For example: Args: x: A of type , , , , , , or . y: A of type , , , , , , or . name: A name for the operation (optional). Returns: A .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:pow arg:x arg:y arg:name arguments arg arg arg With Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "point_count",
    "source_code": "@property\ndef point_count(self):\n    return sum((self[i].point_count for i in range(self.geom_count)))",
    "docstring": "Return the number of Points in this Polygon.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:point_count arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_cpu_indeg_count",
    "source_code": "def get_cpu_indeg_count(self, graph: fx.Graph) -> dict[fx.Node, int]:\n    cpu_indeg: dict[fx.Node, int] = Counter()\n    for node in graph.nodes:\n        cpu_count = 0\n\n        def add_cpu_inp(node):\n            nonlocal cpu_count\n            device = self.get_node_device(node)\n            cpu_count += device is not None and device.type == 'cpu'\n        pytree.tree_map_only(fx.Node, add_cpu_inp, (node.args, node.kwargs))\n        if cpu_count:\n            cpu_indeg[node] = cpu_count\n    return cpu_indeg",
    "docstring": "Get the number of cpu inputs to a node",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\post_grad.py",
    "ast_data": "FunctionDef name:get_cpu_indeg_count arg:self arg:graph arguments arg arg Call For Assign FunctionDef name:add_cpu_inp arg:node arguments arg Assign Call BoolOp Compare Compare Call If Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "Callable",
    "source_code": "@tf_export('types.experimental.Callable', v1=[])\nclass Callable(metaclass=abc.ABCMeta):\n\n    @property\n    @abc.abstractmethod\n    def function_type(self) -> FunctionType:\n        pass\n\n    def __call__(self, *args, **kwargs):\n        pass",
    "docstring": "Base class for TF callables like those created by tf.function. Note: Callables are conceptually very similar to : a is a kind of callable.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\types\\core.py",
    "ast_data": "ClassDef name:Callable FunctionDef name:function_type arg:self arguments arg FunctionDef name:__call__ arg:self arguments arg arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_TensorArrayReadGrad",
    "source_code": "@ops.RegisterGradient('TensorArrayRead')\n@ops.RegisterGradient('TensorArrayReadV2')\n@ops.RegisterGradient('TensorArrayReadV3')\ndef _TensorArrayReadGrad(op: ops.Operation, grad):\n    handle = op.inputs[0]\n    index = op.inputs[1]\n    flow = op.inputs[2]\n    dtype = op.get_attr('dtype')\n    grad_source = _GetGradSource(grad)\n    g = tensor_array_ops.TensorArray(dtype=dtype, handle=handle, flow=flow, colocate_with_first_write_call=False).grad(source=grad_source, flow=flow)\n    w_g = g.write(index, grad)\n    return [None, None, w_g.flow]",
    "docstring": "Gradient for TensorArrayRead. Args: op: Forward TensorArrayRead op. grad: Gradient to TensorArrayRead. Returns: A flow , which can be used in control dependencies to force the write of to the gradient .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_grad.py",
    "ast_data": "FunctionDef name:_TensorArrayReadGrad arg:op arg:grad arguments arg arg Assign Assign Assign Assign Call Assign Call Assign Call Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "WrapActivationCheckpoint",
    "source_code": "class WrapActivationCheckpoint(HigherOrderOperator):\n\n    def __init__(self) -> None:\n        super().__init__('wrap_activation_checkpoint', cacheable=False)\n\n    def __call__(self, function, *args, **kwargs):\n        import torch.fx.traceback as fx_traceback\n        from torch.fx import Interpreter\n        kwargs['use_reentrant'] = False\n        kwargs['preserve_rng_state'] = False\n        with fx_traceback.preserve_node_meta():\n            from torch.utils.checkpoint import checkpoint\n            return checkpoint(Interpreter(function).run, *args, **kwargs)",
    "docstring": "This operator is used to wrap torch.utils.checkpoint. This avoids TorchDynamo to look into saved tensor hooks and directly passes the control to AOT Autograd, which is ok with tracing saved tensor hooks. As a result of AOT tracing torch.utils.checkpoint code, we have a backward graph with recomputed forward nodes. However, we might deprecate this operator soon. The difficulty arises in the functionalization of rng ops. Today, there are two different functionalization of rng ops - one at AOT autograd and other at Inductor. And they are difficult to map to each other. The rng states also complicate pattern matching in Inductor. Due to the ease of implementation, we are currently inclined towards functionalization at Inductor level, which means that duplication/recomputation is done as a compiler pass in the partitioners. See TagActivationCheckpoint for more information.",
    "type": "class",
    "file_path": "pytorch\\torch\\_higher_order_ops\\wrap.py",
    "ast_data": "ClassDef name:WrapActivationCheckpoint FunctionDef name:__init__ arg:self arguments arg Call Call FunctionDef name:__call__ arg:self arg:function arguments arg arg arg arg Assign Assign With Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_symptom_msg",
    "source_code": "def _symptom_msg(self, msg):\n    return 'Symptom: ' + msg",
    "docstring": "Return the structured Symptom message.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\client\\client.py",
    "ast_data": "FunctionDef name:_symptom_msg arg:self arg:msg arguments arg arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_get_registered_option",
    "source_code": "def _get_registered_option(key: str):\n    return _registered_options.get(key)",
    "docstring": "Retrieves the option metadata if is a registered option. Returns ------- RegisteredOption (namedtuple) if key is deprecated, None otherwise",
    "type": "function",
    "file_path": "pandas\\pandas\\_config\\config.py",
    "ast_data": "FunctionDef name:_get_registered_option arg:key arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_set_nested_attr",
    "source_code": "def _set_nested_attr(obj: nn.Module, names: list[str], value: Tensor) -> None:\n    if len(names) == 1:\n        setattr(obj, names[0], value)\n    else:\n        _set_nested_attr(getattr(obj, names[0]), names[1:], value)",
    "docstring": "Set the attribute specified by the given list of names to value. For example, to set the attribute obj.conv.weight, use _del_nested_attr(obj, ['conv', 'weight'], value)",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\functional_autograd_benchmark\\utils.py",
    "ast_data": "FunctionDef name:_set_nested_attr arg:obj arg:names arg:value arguments arg arg arg If Compare Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_log_real_standardize",
    "source_code": "def _log_real_standardize(x):\n    shape = x.shape\n    x = np.atleast_1d(x)\n    real = np.real(x).astype(x.dtype)\n    complex = np.imag(x)\n    y = real\n    negative = np.exp(complex * 1j) < 0.5\n    y[negative] = y[negative] + np.pi * 1j\n    return y.reshape(shape)[()]",
    "docstring": "Standardizes the (complex) logarithm of a real number. The logarithm of a real number may be represented by a complex number with imaginary part that is a multiple of pi*1j. Even multiples correspond with a positive real and odd multiples correspond with a negative real. Given a logarithm of a real number , this function returns an equivalent representation in a standard form: the log of a positive real has imaginary part and the log of a negative real has imaginary part .",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_distribution_infrastructure.py",
    "ast_data": "FunctionDef name:_log_real_standardize arg:x arguments arg Assign Assign Call Assign Call Call Assign Call Assign Assign Compare Call Assign Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "capabilities",
    "source_code": "def capabilities(self):\n    return {'boolean indexing': True, 'data-dependent shapes': True, 'max dimensions': 64}",
    "docstring": "Return a dictionary of array API library capabilities. The resulting dictionary has the following keys: - **\"boolean indexing\"**: boolean indicating whether an array library supports boolean indexing. Always `` for PyTorch. See for more details. See Also -------- __array_namespace_info__.default_device, __array_namespace_info__.default_dtypes, __array_namespace_info__.dtypes, __array_namespace_info__.devices Returns ------- capabilities : dict A dictionary of array API library capabilities. Examples -------- >>> info = xp.__array_namespace_info__() >>> info.capabilities() {'boolean indexing': True, 'data-dependent shapes': True, 'max dimensions': 64}",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\torch\\_info.py",
    "ast_data": "FunctionDef name:capabilities arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "adamw",
    "source_code": "def adamw(params: list[Tensor], grads: list[Tensor], exp_avgs: list[Tensor], exp_avg_sqs: list[Tensor], max_exp_avg_sqs: list[Tensor], state_steps: list[Tensor], foreach: Optional[bool]=None, capturable: bool=False, differentiable: bool=False, fused: Optional[bool]=None, grad_scale: Optional[Tensor]=None, found_inf: Optional[Tensor]=None, has_complex: bool=False, *, amsgrad: bool, beta1: float, beta2: float, lr: Union[float, Tensor], weight_decay: float, eps: float, maximize: bool):\n    adam(params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, foreach=foreach, capturable=capturable, differentiable=differentiable, fused=fused, grad_scale=grad_scale, found_inf=found_inf, has_complex=has_complex, amsgrad=amsgrad, beta1=beta1, beta2=beta2, lr=lr, weight_decay=weight_decay, eps=eps, maximize=maximize, decoupled_weight_decay=True)",
    "docstring": "Functional API that performs AdamW algorithm computation. See :class: for details.",
    "type": "function",
    "file_path": "pytorch\\torch\\optim\\adamw.py",
    "ast_data": "FunctionDef name:adamw arg:params arg:grads arg:exp_avgs arg:exp_avg_sqs arg:max_exp_avg_sqs arg:state_steps arg:foreach arg:capturable arg:differentiable arg:fused arg:grad_scale arg:found_inf arg:has_complex arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg Call"
  },
  {
    "library": "numpy",
    "name": "trim",
    "source_code": "def trim(self, tol=0):\n    coef = pu.trimcoef(self.coef, tol)\n    return self.__class__(coef, self.domain, self.window, self.symbol)",
    "docstring": "Remove trailing coefficients Remove trailing coefficients until a coefficient is reached whose absolute value greater than or the beginning of the series is reached. If all the coefficients would be removed the series is set to `tol` will be removed. Returns ------- new_series : series New instance of series with trimmed coefficients.",
    "type": "method",
    "file_path": "numpy\\numpy\\polynomial\\_polybase.py",
    "ast_data": "FunctionDef name:trim arg:self arg:tol arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "check_funcs_once",
    "source_code": "def check_funcs_once(self, funcs, headers=None, include_dirs=None, libraries=None, library_dirs=None, decl=False, call=False, call_args=None):\n    self._check_compiler()\n    body = []\n    if decl:\n        for f, v in decl.items():\n            if v:\n                body.append('int %s (void);' % f)\n    body.append('#ifdef _MSC_VER')\n    for func in funcs:\n        body.append('#pragma function(%s)' % func)\n    body.append('#endif')\n    body.append('int main (void) {')\n    if call:\n        for f in funcs:\n            if f in call and call[f]:\n                if not (call_args and f in call_args and call_args[f]):\n                    args = ''\n                else:\n                    args = call_args[f]\n                body.append('  %s(%s);' % (f, args))\n            else:\n                body.append('  %s;' % f)\n    else:\n        for f in funcs:\n            body.append('  %s;' % f)\n    body.append('  return 0;')\n    body.append('}')\n    body = '\\n'.join(body) + '\\n'\n    return self.try_link(body, headers, include_dirs, libraries, library_dirs)",
    "docstring": "Check a list of functions at once. This is useful to speed up things, since all the functions in the funcs list will be put in one compilation unit. Arguments --------- funcs : seq list of functions to test include_dirs : seq list of header paths libraries : seq list of libraries to link the code snippet to library_dirs : seq list of library paths decl : dict for every (key, value), the declaration in the value will be used for function in key. If a function is not in the dictionary, no declaration will be used. call : dict for every item (f, value), if the value is True, a call will be done to the function f.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\command\\config.py",
    "ast_data": "FunctionDef name:check_funcs_once arg:self arg:funcs arg:headers arg:include_dirs arg:libraries arg:library_dirs arg:decl arg:call arg:call_args arguments arg arg arg arg arg arg arg arg arg Call Assign If For Call If Call Call For Call Call Call If For If BoolOp Compare If BoolOp Compare Assign Assign Call Call For Call Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_matching_activations",
    "source_code": "def get_matching_activations(float_module: nn.Module, q_module: nn.Module) -> dict[str, dict[str, torch.Tensor]]:\n    torch._C._log_api_usage_once('quantization_api._numeric_suite.get_matching_activations')\n    float_dict = get_logger_dict(float_module)\n    quantized_dict = get_logger_dict(q_module)\n    act_dict: dict[str, dict] = {}\n    for key in quantized_dict:\n        if len(quantized_dict[key]['tensor_val']) == 0:\n            continue\n        match_key = _find_match(sorted(float_dict, reverse=True), key, 'stats')\n        if match_key is not None:\n            act_dict[key] = {}\n            act_dict[key]['float'] = float_dict[match_key]['tensor_val']\n            act_dict[key]['quantized'] = quantized_dict[key]['tensor_val']\n    return act_dict",
    "docstring": "Find the matching activation between float and quantized modules. Args: float_module: float module used to generate the q_module q_module: module quantized from float_module Return: act_dict: dict with key corresponding to quantized module names and each entry being a dictionary with two keys 'float' and 'quantized', containing the matching float and quantized activations",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\ns\\_numeric_suite.py",
    "ast_data": "FunctionDef name:get_matching_activations arg:float_module arg:q_module arguments arg arg Call Assign Call Assign Call For If Compare Call Assign Call Call If Compare Assign Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "no_gradient",
    "source_code": "@deprecation.deprecated_endpoints('NotDifferentiable', 'NoGradient')\n@tf_export('no_gradient', v1=['no_gradient', 'NotDifferentiable', 'NoGradient'])\ndef no_gradient(op_type: str) -> None:\n    if not isinstance(op_type, str):\n        raise TypeError('op_type must be a string')\n    gradient_registry.register(None, op_type)",
    "docstring": "Specifies that ops of type is not differentiable. This function should *not* be used for operations that have a well-defined gradient that is not yet implemented. This function is only used when defining a new op type. It may be used for ops such as that are not differentiable. For example: The gradient computed for 'op_type' will then propagate zeros. For ops that have a well-defined gradient but are not yet implemented, no declaration should be made, and an error *must* be thrown if an attempt to request its gradient is made. Args: op_type: The string type of an operation. This corresponds to the field for the proto that defines the operation. Raises: TypeError: If is not a string.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:no_gradient arg:op_type arguments arg If Call Raise Call Call Call Call"
  },
  {
    "library": "authlib",
    "name": "revoke_token",
    "source_code": "def revoke_token(self, url, token=None, token_type_hint=None, body=None, auth=None, headers=None, **kwargs):\n    if auth is None:\n        auth = self.client_auth(self.revocation_endpoint_auth_method)\n    return self._handle_token_hint('revoke_token_request', url, token=token, token_type_hint=token_type_hint, body=body, auth=auth, headers=headers, **kwargs)",
    "docstring": "Revoke token method defined via _. :param url: Revoke Token endpoint, must be HTTPS. :param token: The token to be revoked. :param token_type_hint: The type of the token that to be revoked. It can be \"access_token\" or \"refresh_token\". :param body: Optional application/x-www-form-urlencoded body to add the include in the token request. Prefer kwargs over body. :param auth: An auth tuple or method as accepted by requests. :param headers: Dict to default request headers with. :return: Revocation Response .. _:",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\client.py",
    "ast_data": "FunctionDef name:revoke_token arg:self arg:url arg:token arg:token_type_hint arg:body arg:auth arg:headers arguments arg arg arg arg arg arg arg arg If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "download_request",
    "source_code": "def download_request(self, request: Request, spider: Spider) -> Deferred[Response]:\n    agent = ScrapyAgent(contextFactory=self._contextFactory, pool=self._pool, maxsize=getattr(spider, 'download_maxsize', self._default_maxsize), warnsize=getattr(spider, 'download_warnsize', self._default_warnsize), fail_on_dataloss=self._fail_on_dataloss, crawler=self._crawler)\n    return agent.download_request(request)",
    "docstring": "Return a deferred for the HTTP download",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\downloader\\handlers\\http11.py",
    "ast_data": "FunctionDef name:download_request arg:self arg:request arg:spider arguments arg arg arg Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_chief_queue_runner",
    "source_code": "def get_chief_queue_runner(self):\n    if self._gradients_applied is False:\n        raise ValueError('Should be called after apply_gradients().')\n    return self._chief_queue_runner",
    "docstring": "Returns the QueueRunner for the chief to execute. This includes the operations to synchronize replicas: aggregate gradients, apply to variables, increment global step, insert tokens to token queue. Note that this can only be called after calling apply_gradients() which actually generates this queuerunner. Returns: A for chief to execute. Raises: ValueError: If this is called before apply_gradients().",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\sync_replicas_optimizer.py",
    "ast_data": "FunctionDef name:get_chief_queue_runner arg:self arguments arg If Compare Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_FunctionCallsTracker",
    "source_code": "class _FunctionCallsTracker(transformer.Base):\n\n    def __init__(self, ctx, first_argument_name):\n        super(_FunctionCallsTracker, self).__init__(ctx)\n        self.first_argument_name = first_argument_name\n        self.calls = set()\n\n    def visit_Name(self, node):\n        node = self.generic_visit(node)\n        if isinstance(node.ctx, gast.Load) and node.id in self.ctx.info.namespace:\n            anno.setanno(node, 'static_value', self.ctx.info.namespace[node.id])\n        return node\n\n    def visit_Attribute(self, node):\n        node = self.generic_visit(node)\n        parent_val = anno.getanno(node.value, 'static_value', default=None)\n        if parent_val is not None:\n            if hasattr(parent_val, node.attr):\n                anno.setanno(node, 'static_value', getattr(parent_val, node.attr))\n        return node\n\n    def visit_Call(self, node):\n        node = self.generic_visit(node)\n        if node.args and anno.getanno(node.args[0], anno.Basic.QN, None) == self.first_argument_name:\n            fn_object = anno.getanno(node.func, 'static_value', None)\n            if fn_object is not None:\n                self.calls.add(fn_object)\n        return node",
    "docstring": "Tracks any function calls made with a given first argument name.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\gradient_input_output_exclusions.py",
    "ast_data": "ClassDef name:_FunctionCallsTracker FunctionDef name:__init__ arg:self arg:ctx arg:first_argument_name arguments arg arg arg Call Call Assign Assign Call FunctionDef name:visit_Name arg:self arg:node arguments arg arg Assign Call If BoolOp Call Compare Call Return return:yes FunctionDef name:visit_Attribute arg:self arg:node arguments arg arg Assign Call Assign Call If Compare If Call Call Call Return return:yes FunctionDef name:visit_Call arg:self arg:node arguments arg arg Assign Call If BoolOp Compare Call Assign Call If Compare Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "toc_add_files",
    "source_code": "def toc_add_files(self, refnodes: list[dict[str, Any]]) -> None:\n    refnodes.insert(0, {'level': 1, 'refuri': html.escape(self.config.root_doc + self.out_suffix), 'text': ssp(html.escape(self.env.titles[self.config.root_doc].astext()))})\n    for file, text in reversed(self.config.epub_pre_files):\n        refnodes.insert(0, {'level': 1, 'refuri': html.escape(file), 'text': ssp(html.escape(text))})\n    for file, text in self.config.epub_post_files:\n        refnodes.append({'level': 1, 'refuri': html.escape(file), 'text': ssp(html.escape(text))})",
    "docstring": "Add the root_doc, pre and post files to a list of refnodes.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\_epub_base.py",
    "ast_data": "FunctionDef name:toc_add_files arg:self arg:refnodes arguments arg arg Call Call Call Call Call For Call Call Call Call Call For Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "num_partitioned_dimensions",
    "source_code": "@property\ndef num_partitioned_dimensions(self):\n    return len(self._partitioned_dim_sizes)",
    "docstring": "The number of partitioned dimensions in this shape.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor_shape.py",
    "ast_data": "FunctionDef name:num_partitioned_dimensions arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "SymIntSymbolicContext",
    "source_code": "@dataclass(frozen=True)\nclass SymIntSymbolicContext(SymbolicContext):\n    constraint: DimConstraint",
    "docstring": "Data structure specifying any constraints on a SymInt input",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "ClassDef name:SymIntSymbolicContext Call"
  },
  {
    "library": "scipy",
    "name": "_lazy_valid_checks",
    "source_code": "def _lazy_valid_checks(*args, throw=False, warning=False, materialize=False, xp):\n    conds = xp.concat([xp.reshape(cond, (1,)) for cond, _ in args])\n    lazy = is_lazy_array(conds)\n    if not throw and (not warning) or (lazy and (not materialize)):\n        out = ~xp.any(conds)\n        return out if lazy else bool(out)\n    if is_dask(xp):\n        conds = conds.compute()\n    conds = [bool(cond) for cond in conds]\n    for cond, (_, msg) in zip(conds, args):\n        if throw and cond:\n            raise ValueError(msg)\n        elif warning and cond:\n            warnings.warn(msg, ClusterWarning, stacklevel=3)\n    return not any(conds)",
    "docstring": "Validate a set of conditions on the contents of possibly lazy arrays. Parameters ---------- args : tuples of (Array, str) The first element of each tuple must be a 0-dimensional Array that evaluates to bool; the second element must be the message to convey if the first element evaluates to True. throw: bool Set to True to if is True. warning: bool Set to True to issue a warning with message if is True. materialize: bool Set to True to force materialization of lazy arrays when throw=True or warning=True. If the inputs are lazy and materialize=False, ignore the and flags. xp: module Array API namespace Returns ------- If xp is an eager backend (e.g. numpy) and all conditions are False, return True. If throw is True, raise. Otherwise, return False. If xp is a lazy backend (e.g. Dask or JAX), return a 0-dimensional bool Array.",
    "type": "function",
    "file_path": "scipy\\scipy\\cluster\\hierarchy.py",
    "ast_data": "FunctionDef name:_lazy_valid_checks arguments arg arg arg arg arg Assign Call Call Assign Call If BoolOp BoolOp BoolOp Assign Call Return return:yes Call If Call Assign Call Assign Call For Call If BoolOp Raise Call If BoolOp Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "not_equal",
    "source_code": "@array_function_dispatch(_binary_op_dispatcher)\ndef not_equal(x1, x2):\n    return compare_chararrays(x1, x2, '!=', True)",
    "docstring": "Return (x1 != x2) element-wise. Unlike , this comparison is performed by first stripping whitespace characters from the end of the string. This behavior is provided for backward-compatibility with numarray. Parameters ---------- x1, x2 : array_like of str or unicode Input arrays of the same shape. Returns ------- out : ndarray Output array of bools. See Also -------- equal, greater_equal, less_equal, greater, less Examples -------- >>> import numpy as np >>> x1 = np.array(['a', 'b', 'c']) >>> np.char.not_equal(x1, 'b') array([ True, False, True])",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:not_equal arg:x1 arg:x2 arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_as_index",
    "source_code": "def _as_index(idx, need_scalar=True):\n    if isinstance(idx, (numbers.Integral, tensor_shape.Dimension)):\n        return (idx, True)\n    data = asarray(idx)\n    if data.dtype == dtypes.bool:\n        if data.shape.ndims != 1:\n            raise NotImplementedError('Need rank 1 for bool index %s' % idx)\n        data = array_ops.where_v2(data)\n        data = array_ops.reshape(data, [-1])\n    if need_scalar and data.shape.rank not in (None, 0):\n        raise IndexError(_SLICE_ERROR + ', got {!r}'.format(idx))\n    np_dtype = data.dtype.as_numpy_dtype\n    if not np.issubdtype(np_dtype, np.integer):\n        raise IndexError(_SLICE_ERROR + ', got {!r}'.format(idx))\n    if data.dtype not in (dtypes.int64, dtypes.int32):\n        promoted_dtype = np.promote_types(np.int32, np_dtype)\n        if promoted_dtype == np.int32:\n            data = math_ops.cast(data, dtypes.int32)\n        elif promoted_dtype == np.int64:\n            data = math_ops.cast(data, dtypes.int64)\n        else:\n            raise IndexError(_SLICE_ERROR + ', got {!r}'.format(idx))\n    return (data, data.shape.rank == 0)",
    "docstring": "Helper function to parse idx as an index. Args: idx: index need_scalar: If idx needs to be a scalar value. Returns: A pair, (indx, bool). First one is the parsed index and can be a tensor, or scalar integer / Dimension. Second one is True if rank is known to be 0. Raises: IndexError: For incorrect indices.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_array_ops.py",
    "ast_data": "FunctionDef name:_as_index arg:idx arg:need_scalar arguments arg arg If Call Return return:yes Assign Call If Compare If Compare Raise Call Assign Call Assign Call If BoolOp Compare Raise Call Call Assign If Call Raise Call Call If Compare Assign Call If Compare Assign Call If Compare Assign Call Raise Call Call Return return:yes Compare"
  },
  {
    "library": "pandas",
    "name": "transform",
    "source_code": "@final\ndef transform(self, arg, *args, **kwargs):\n    return self._selected_obj.groupby(self._timegrouper).transform(arg, *args, **kwargs)",
    "docstring": "Call function producing a like-indexed Series on each group. Return a Series with the transformed values. Parameters ---------- arg : function To apply to each group. Should return a Series with the same index. *args, **kwargs Additional arguments and keywords. Returns ------- Series A Series with the transformed values, maintaining the same index as the original object. See Also -------- core.resample.Resampler.apply : Apply a function along each group. core.resample.Resampler.aggregate : Aggregate using one or more operations over the specified axis. Examples -------- >>> s = pd.Series([1, 2], index=pd.date_range(\"20180101\", periods=2, freq=\"1h\")) >>> s 2018-01-01 00:00:00 1 2018-01-01 01:00:00 2 Freq: h, dtype: int64 >>> resampled = s.resample(\"15min\") >>> resampled.transform(lambda x: (x - x.mean()) / x.std()) 2018-01-01 00:00:00 NaN 2018-01-01 01:00:00 NaN Freq: h, dtype: float64",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\resample.py",
    "ast_data": "FunctionDef name:transform arg:self arg:arg arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "font_size",
    "source_code": "@property\ndef font_size(self):\n    return self.font.size",
    "docstring": "The font size.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\dviread.py",
    "ast_data": "FunctionDef name:font_size arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "main_hook",
    "source_code": "def main_hook(self) -> None:\n    pass",
    "docstring": "Call this hook while there exists a non-joined process to shadow collective communications in a training iteration. Training iteration i.e., in one forward pass, backward pass, and optimizer step.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\join.py",
    "ast_data": "FunctionDef name:main_hook arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, name: str, input_nodes: list[Buffer], layout: Layout, input_reorder: Optional[list[int]]=None) -> None:\n    super().__init__(name)\n    self.input_nodes = input_nodes\n    self.output_node: Buffer = Buffer(name='buf_out', layout=layout)\n    self.input_reorder = input_reorder\n    self.layout = layout",
    "docstring": "Baseclass for CUDA C++ Templates, derived from KernelTemplate. Not to be instantiated directly. Args: name (str): The name of the CUDATemplate object. input_nodes (List[IRNode]): A list of input IRNodes. layout (Layout): The layout of the output buffer / tensor. input_reorder (Optional[List[int]]): An optional list that specifies the order of the input nodes.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\cuda_template.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:name arg:input_nodes arg:layout arg:input_reorder arguments arg arg arg arg arg Call Call Assign Call Assign Assign"
  },
  {
    "library": "sphinx",
    "name": "invoke_role",
    "source_code": "def invoke_role(self, role: tuple[str, str]) -> tuple[list[Node], list[system_message]]:\n    _deprecation_warning(__name__, f'{self.__class__.__name__}.invoke_role', '', remove=(9, 0))\n    domain = self.env.get_domain(role[0])\n    if domain:\n        role_func = domain.role(role[1])\n        assert role_func is not None\n        return role_func(':'.join(role), self.rawtext, self.text, self.lineno, self.inliner, self.options, self.content)\n    else:\n        return ([], [])",
    "docstring": "Invoke the role described by a `` pair.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\ext\\intersphinx\\_resolve.py",
    "ast_data": "FunctionDef name:invoke_role arg:self arg:role arguments arg arg Call Assign Call If Assign Call Compare Return return:yes Call Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "_solve",
    "source_code": "def _solve(self, rhs, adjoint=False, adjoint_arg=False):\n    logging.warn('Using (possibly slow) default implementation of solve.  Requires conversion to a dense matrix and O(N^3) operations.')\n    return self._dense_solve(rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)",
    "docstring": "Default implementation of _solve.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "FunctionDef name:_solve arg:self arg:rhs arg:adjoint arg:adjoint_arg arguments arg arg arg arg Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_metadata_routing",
    "source_code": "def get_metadata_routing(self):\n    router = MetadataRouter(owner=self.__class__.__name__).add(estimator=self.estimator, method_mapping=MethodMapping().add(caller='fit', callee='fit').add(caller='fit', callee='score').add(caller='score', callee='score').add(caller='predict', callee='predict'))\n    return router",
    "docstring": "Get metadata routing of this object. Please check :ref: on how the routing mechanism works. .. versionadded:: 1.5 Returns ------- routing : MetadataRouter A :class: encapsulating routing information.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_ransac.py",
    "ast_data": "FunctionDef name:get_metadata_routing arg:self arguments arg Assign Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "max_memory_allocated",
    "source_code": "def max_memory_allocated(device: Optional[_device_t]=None) -> int:\n    if not is_initialized():\n        return 0\n    return memory_stats(device).get('dram', 0).get('peak_bytes', 0)",
    "docstring": "Return the maximum memory allocated in bytes for a given device. Args: device (torch.device, str, or int, optional) selected device. Returns statistics for the current device, given by current_device(), if device is None (default).",
    "type": "function",
    "file_path": "pytorch\\torch\\mtia\\memory.py",
    "ast_data": "FunctionDef name:max_memory_allocated arg:device arguments arg If Call Return return:yes Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "has_free_symbols",
    "source_code": "def has_free_symbols(val: IterateExprs) -> bool:\n    return not all((e.is_number or e.is_Boolean for e in _iterate_exprs(val)))",
    "docstring": "Faster version of bool(free_symbols(val))",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:has_free_symbols arg:val arguments arg Return return:yes Call BoolOp Call"
  },
  {
    "library": "tensorflow",
    "name": "RNNAttributes",
    "source_code": "class RNNAttributes(SerializedAttributes.with_attributes('RNNAttributes', checkpointable_objects=['states'], copy_from=[LayerAttributes])):\n    pass",
    "docstring": "RNN checkpointable objects + functions that are saved to the SavedModel. List of all attributes: All attributes from LayerAttributes (including CommonEndpoints) states: List of state variables",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\serialized_attributes.py",
    "ast_data": "ClassDef name:RNNAttributes Call"
  },
  {
    "library": "pytorch",
    "name": "tanh",
    "source_code": "def tanh(input):\n    return input.tanh()",
    "docstring": "tanh(input) -> Tensor Applies element-wise, :math: See :class: for more details.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:tanh arg:input arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "predict_factor",
    "source_code": "def predict_factor(h_abs, h_abs_old, error_norm, error_norm_old):\n    if error_norm_old is None or h_abs_old is None or error_norm == 0:\n        multiplier = 1\n    else:\n        multiplier = h_abs / h_abs_old * (error_norm_old / error_norm) ** 0.25\n    with np.errstate(divide='ignore'):\n        factor = min(1, multiplier) * error_norm ** (-0.25)\n    return factor",
    "docstring": "Predict by which factor to increase/decrease the step size. The algorithm is described in [1]_. Parameters ---------- h_abs, h_abs_old : float Current and previous values of the step size, can be None (see Notes). error_norm, error_norm_old : float Current and previous values of the error norm, can be None (see Notes). Returns ------- factor : float Predicted factor. Notes ----- If and are both not None then a two-step algorithm is used, otherwise a one-step algorithm is used. References ---------- .. [1] E. Hairer, S. P. Norsett G. Wanner, \"Solving Ordinary Differential Equations II: Stiff and Differential-Algebraic Problems\", Sec. IV.8.",
    "type": "function",
    "file_path": "scipy\\scipy\\integrate\\_ivp\\radau.py",
    "ast_data": "FunctionDef name:predict_factor arg:h_abs arg:h_abs_old arg:error_norm arg:error_norm_old arguments arg arg arg arg If BoolOp Compare Compare Compare Assign Assign With Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ListGetItemSource",
    "source_code": "@dataclasses.dataclass(frozen=True)\nclass ListGetItemSource(GetItemSource):\n\n    def reconstruct(self, codegen: 'PyCodegen'):\n        codegen.add_push_null(lambda: codegen.load_import_from(utils.__name__, 'list_getitem'))\n        codegen(self.base)\n        if self.index_is_slice:\n            raise RuntimeError('List[slice] is a temporary object and should not have a source')\n        else:\n            codegen.append_output(codegen.create_load_const(self.index))\n        codegen.extend_output(create_call_function(2, False))\n\n    def name(self):\n        assert not isinstance(self.index, Source)\n        if self.index_is_slice:\n            raise RuntimeError('List[slice] is a temporary object and should not have a source')\n        else:\n            return f'list.__getitem__({self.base.name()}, {self.index!r})'",
    "docstring": "Same as GetItemSource with reconstruct and name overridden to be list specific.",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\source.py",
    "ast_data": "ClassDef name:ListGetItemSource FunctionDef name:reconstruct arg:self arg:codegen arguments arg arg Call arguments Call Call If Raise Call Call Call Call Call FunctionDef name:name arg:self arguments arg Call If Raise Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_name",
    "source_code": "def get_name(self):\n    return get_font(findfont(self)).family_name",
    "docstring": "Return the name of the font that best matches the font properties.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\font_manager.py",
    "ast_data": "FunctionDef name:get_name arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_qmvn",
    "source_code": "def _qmvn(m, covar, low, high, rng, lattice='cbc', n_batches=10):\n    cho, lo, hi = _permuted_cholesky(covar, low, high)\n    if not cho.flags.c_contiguous:\n        cho = cho.copy()\n    n = cho.shape[0]\n    q, n_qmc_samples = _cbc_lattice(n - 1, max(m // n_batches, 1))\n    rndm = rng.random(size=(n_batches, n))\n    prob, est_error, n_samples = _qmvn_inner(q, rndm, int(n_qmc_samples), int(n_batches), cho, lo, hi)\n    return (prob, est_error, n_samples)",
    "docstring": "Multivariate normal integration over box bounds. Parameters ---------- m : int > n_batches The number of points to sample. This number will be divided into batches that apply random offsets of the sampling lattice for each batch in order to estimate the error. covar : (n, n) float array Possibly singular, positive semidefinite symmetric covariance matrix. low, high : (n,) float array The low and high integration bounds. rng : Generator, optional default_rng(), yada, yada lattice : 'cbc' or callable The type of lattice rule to use to construct the integration points. n_batches : int > 0, optional The number of QMC batches to apply. Returns ------- prob : float The estimated probability mass within the bounds. est_error : float 3 times the standard error of the batch estimates.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_qmvnt.py",
    "ast_data": "FunctionDef name:_qmvn arg:m arg:covar arg:low arg:high arg:rng arg:lattice arg:n_batches arguments arg arg arg arg arg arg arg Assign Call If Assign Call Assign Assign Call Call Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_flatten_dims_0_and_1",
    "source_code": "def _flatten_dims_0_and_1(t):\n    if isinstance(t, ragged_tensor.RaggedTensor):\n        return t.values\n    else:\n        t_shape = array_ops.shape(t)\n        return array_ops.reshape(t, array_ops.concat([[-1], t_shape[2:]], axis=0))",
    "docstring": "Returns a copy of with the outer two dimensions merged.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_gather_ops.py",
    "ast_data": "FunctionDef name:_flatten_dims_0_and_1 arg:t arguments arg If Call Return return:yes Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_should_free_in_backward",
    "source_code": "@no_type_check\ndef _should_free_in_backward(state: _FSDPState, handle: FlatParamHandle) -> bool:\n    if not handle.uses_sharded_strategy:\n        return False\n    return state._sync_gradients or handle._sharding_strategy in RESHARD_AFTER_FORWARD_HANDLE_STRATEGIES",
    "docstring": "Returns whether FSDP should free the unsharded flat parameter in the post-backward or not.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_runtime_utils.py",
    "ast_data": "FunctionDef name:_should_free_in_backward arg:state arg:handle arguments arg arg If Return return:yes Return return:yes BoolOp Compare"
  },
  {
    "library": "scikit-learn",
    "name": "InputTags",
    "source_code": "@dataclass(slots=True)\nclass InputTags:\n    one_d_array: bool = False\n    two_d_array: bool = True\n    three_d_array: bool = False\n    sparse: bool = False\n    categorical: bool = False\n    string: bool = False\n    dict: bool = False\n    positive_only: bool = False\n    allow_nan: bool = False\n    pairwise: bool = False",
    "docstring": "Tags for the input data. Parameters ---------- one_d_array : bool, default=False Whether the input can be a 1D array. two_d_array : bool, default=True Whether the input can be a 2D array. Note that most common tests currently run only if this flag is set to `np.nanXfitTruemetricaffinitykernelmeta-estimatorsklearn.utils.metaestimators._safe_splitpositive_only`.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\utils\\_tags.py",
    "ast_data": "ClassDef name:InputTags Call"
  },
  {
    "library": "tensorflow",
    "name": "is_ref",
    "source_code": "def is_ref(x):\n    return isinstance(x, variables_module.Variable) or (isinstance(x, module.Module) and hasattr(x, 'dtype') and hasattr(x, 'shape'))",
    "docstring": "Evaluates if the object has reference semantics. An object is deemed \"reference\" if it is a instance or is derived from a with and properties. Args: x: Any object. Returns: is_ref: Python indicating input is has nonreference semantics, i.e., is a or a with and properties.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_util.py",
    "ast_data": "FunctionDef name:is_ref arg:x arguments arg Return return:yes BoolOp Call BoolOp Call Call Call"
  },
  {
    "library": "kornia",
    "name": "merge",
    "source_code": "def merge(self, boxes: Boxes, inplace: bool=False) -> Boxes:\n    data = torch.cat([self._data, boxes.data], dim=1)\n    if inplace:\n        self._data = data\n        return self\n    obj = self.clone()\n    obj._data = data\n    return obj",
    "docstring": "Merge boxes. Say, current instance holds :math: and the incoming boxes holds :math:, the merge results in :math:. Args: boxes: 2D boxes. inplace: do transform in-place and return self.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\boxes.py",
    "ast_data": "FunctionDef name:merge arg:self arg:boxes arg:inplace arguments arg arg arg Assign Call If Assign Return return:yes Assign Call Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_NumpyPlugin",
    "source_code": "class _NumpyPlugin(Plugin):\n\n    def get_type_analyze_hook(self, fullname: str) -> _HookFunc | None:\n        if fullname in _PRECISION_DICT:\n            return _hook\n        return None\n\n    def get_additional_deps(self, file: MypyFile) -> list[tuple[int, str, int]]:\n        fullname = file.fullname\n        if fullname == 'numpy':\n            _override_imports(file, f'{_MODULE}._extended_precision', imports=[(v, v) for v in _EXTENDED_PRECISION_LIST])\n        elif fullname == 'numpy.ctypeslib':\n            _override_imports(file, 'ctypes', imports=[(_C_INTP, '_c_intp')])\n        return [(PRI_MED, fullname, -1)]",
    "docstring": "A mypy plugin for handling versus numpy-specific typing tasks.",
    "type": "class",
    "file_path": "numpy\\numpy\\typing\\mypy_plugin.py",
    "ast_data": "ClassDef name:_NumpyPlugin FunctionDef name:get_type_analyze_hook arg:self arg:fullname arguments arg arg If Compare Return return:yes Return return:no FunctionDef name:get_additional_deps arg:self arg:file arguments arg arg Assign If Compare Call If Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "DenseColumn",
    "source_code": "@tf_export('__internal__.feature_column.DenseColumn', v1=[])\nclass DenseColumn(fc_types.FeatureColumn):\n\n    @abc.abstractproperty\n    def variable_shape(self):\n        pass\n\n    @abc.abstractmethod\n    def get_dense_tensor(self, transformation_cache, state_manager):\n        pass",
    "docstring": "Represents a column which can be represented as . Some examples of this type are: numeric_column, embedding_column, indicator_column.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "ClassDef name:DenseColumn FunctionDef name:variable_shape arg:self arguments arg FunctionDef name:get_dense_tensor arg:self arg:transformation_cache arg:state_manager arguments arg arg arg Call"
  },
  {
    "library": "pandas",
    "name": "__init__",
    "source_code": "@doc(storage_options=_shared_docs['storage_options'])\ndef __init__(self, filepath_or_buffer, storage_options: StorageOptions | None=None, engine_kwargs: dict | None=None) -> None:\n    err_msg = 'Install xlrd >= 2.0.1 for xls Excel support'\n    import_optional_dependency('xlrd', extra=err_msg)\n    super().__init__(filepath_or_buffer, storage_options=storage_options, engine_kwargs=engine_kwargs)",
    "docstring": "Reader using xlrd engine. Parameters ---------- filepath_or_buffer : str, path object or Workbook Object to be parsed. {storage_options} engine_kwargs : dict, optional Arbitrary keyword arguments passed to excel engine.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\excel\\_xlrd.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:filepath_or_buffer arg:storage_options arg:engine_kwargs arguments arg arg arg arg Assign Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_get_same_shape_values",
    "source_code": "def _get_same_shape_values(lblk: Block, rblk: Block, left_ea: bool, right_ea: bool) -> tuple[ArrayLike, ArrayLike]:\n    lvals = lblk.values\n    rvals = rblk.values\n    assert rblk.mgr_locs.is_slice_like, rblk.mgr_locs\n    if not (left_ea or right_ea):\n        lvals = lvals[rblk.mgr_locs.indexer, :]\n        assert lvals.shape == rvals.shape, (lvals.shape, rvals.shape)\n    elif left_ea and right_ea:\n        assert lvals.shape == rvals.shape, (lvals.shape, rvals.shape)\n    elif right_ea:\n        lvals = lvals[rblk.mgr_locs.indexer, :]\n        assert lvals.shape[0] == 1, lvals.shape\n        lvals = lvals[0, :]\n    else:\n        assert rvals.shape[0] == 1, rvals.shape\n        rvals = rvals[0, :]\n    return (lvals, rvals)",
    "docstring": "Slice lblk.values to align with rblk. Squeeze if we have EAs.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\internals\\ops.py",
    "ast_data": "FunctionDef name:_get_same_shape_values arg:lblk arg:rblk arg:left_ea arg:right_ea arguments arg arg arg arg Assign Assign If BoolOp Assign Compare If BoolOp Compare If Assign Compare Assign Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_estimate_step_duration",
    "source_code": "def _estimate_step_duration(self, current, now):\n    if current:\n        if self._time_after_first_step is not None and current > 1:\n            time_per_unit = (now - self._time_after_first_step) / (current - 1)\n        else:\n            time_per_unit = (now - self._start) / current\n        if current == 1:\n            self._time_after_first_step = now\n        return time_per_unit\n    else:\n        return 0",
    "docstring": "Estimate the duration of a single step. Given the step number and the corresponding time this function returns an estimate for how long a single step takes. If this is called before one step has been completed (i.e. ) then zero is given as an estimate. The duration estimate ignores the duration of the (assumed to be non-representative) first step for estimates when more steps are available (i.e. ). Args: current: Index of current step. now: The current time. Returns: Estimate of the duration of a single step.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\generic_utils.py",
    "ast_data": "FunctionDef name:_estimate_step_duration arg:self arg:current arg:now arguments arg arg arg If If BoolOp Compare Compare Assign Assign If Compare Assign Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "get_migratable_models",
    "source_code": "def get_migratable_models(self, app_config, db, include_auto_created=False):\n    models = app_config.get_models(include_auto_created=include_auto_created)\n    return [model for model in models if self.allow_migrate_model(db, model)]",
    "docstring": "Return app models allowed to be migrated on provided db.",
    "type": "method",
    "file_path": "django\\django\\db\\utils.py",
    "ast_data": "FunctionDef name:get_migratable_models arg:self arg:app_config arg:db arg:include_auto_created arguments arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "disconnect_all",
    "source_code": "def disconnect_all(self, signal: Any, **kwargs: Any) -> None:\n    kwargs.setdefault('sender', self.sender)\n    _signal.disconnect_all(signal, **kwargs)",
    "docstring": "Disconnect all receivers from the given signal. :param signal: the signal to disconnect from :type signal: object",
    "type": "method",
    "file_path": "scrapy\\scrapy\\signalmanager.py",
    "ast_data": "FunctionDef name:disconnect_all arg:self arg:signal arguments arg arg arg Call Call"
  },
  {
    "library": "pandas",
    "name": "current_year",
    "source_code": "@staticmethod\ndef current_year(context):\n    context['current_year'] = datetime.datetime.now().year\n    return context",
    "docstring": "Add the current year to the context, so it can be used for the copyright note, or other places where it is needed.",
    "type": "method",
    "file_path": "pandas\\web\\pandas_web.py",
    "ast_data": "FunctionDef name:current_year arg:context arguments arg Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_unit",
    "source_code": "def get_unit(self):\n    return self._unit",
    "docstring": "Return the unit for input to the transform used by ``.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:get_unit arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_build_distributed_network",
    "source_code": "def _build_distributed_network(model, strategy, mode, inputs=None, targets=None):\n    with backend.get_graph().as_default(), strategy.scope():\n        distributed_model = strategy.extended.call_for_each_replica(_build_network_on_replica, args=(model, mode, inputs, targets))\n        set_distributed_model(model, mode, distributed_model)",
    "docstring": "Create a cloned model on each replica.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_training_utils_v1.py",
    "ast_data": "FunctionDef name:_build_distributed_network arg:model arg:strategy arg:mode arg:inputs arg:targets arguments arg arg arg arg arg With Call Call Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "preemption_message",
    "source_code": "@property\ndef preemption_message(self):\n    return self._preemption_message",
    "docstring": "Returns the preemption message.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\failure_handling\\preemption_watcher.py",
    "ast_data": "FunctionDef name:preemption_message arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__str__",
    "source_code": "def __str__(self):\n    info = {'section': self._section, 'config': self.config, 'req_type': self._req_type, 'req': str(self.req), 'range': str(self.range), 'exclude': str(self.exclude), 'include': str(self.include), 'init': str(self._initialized)}\n    req_str = '\\n >>> _Reqs Instance <<<\\n'\n    req_str += 'Section: {section}\\n'\n    req_str += 'Configuration name: {config}\\n'\n    req_str += 'Requirement type: {req_type}\\n'\n    req_str += 'Requirement: {req}\\n'\n    req_str += 'Range: {range}\\n'\n    req_str += 'Exclude: {exclude}\\n'\n    req_str += 'Include: {include}\\n'\n    req_str += 'Initialized: {init}\\n\\n'\n    return req_str.format(**info)",
    "docstring": "Prints a requirement and its components. Returns: String that has concatenated information about a requirement.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\tools\\tensorflow_builder\\compat_checker\\compat_checker.py",
    "ast_data": "FunctionDef name:__str__ arg:self arguments arg Assign Call Call Call Call Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "maximum",
    "source_code": "def maximum(inputs, **kwargs):\n    return Maximum(**kwargs)(inputs)",
    "docstring": "Functional interface to compute maximum (element-wise) list of . This is equivalent to the layer. For example: Args: inputs: A list of input tensors (at least 2) of same shape. **kwargs: Standard layer keyword arguments. Returns: A tensor (of same shape as input tensor) with the element-wise maximum of the inputs. Raises: ValueError: If input tensors are of different shape.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\merge.py",
    "ast_data": "FunctionDef name:maximum arg:inputs arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_remove_effect_tokens",
    "source_code": "def _remove_effect_tokens(ep: ExportedProgram) -> ExportedProgram:\n    num_tokens: int = 0\n    input_token_names: list[str] = []\n    new_input_specs: list[InputSpec] = []\n    for inp in ep.graph_signature.input_specs:\n        if inp.kind == InputKind.TOKEN:\n            num_tokens += 1\n            assert isinstance(inp.arg, TokenArgument)\n            input_token_names.append(inp.arg.name)\n        else:\n            new_input_specs.append(inp)\n    num_out_tokens: int = 0\n    new_output_specs: list[OutputSpec] = []\n    output_token_names: list[OutputSpec] = []\n    for out in ep.graph_signature.output_specs:\n        if out.kind == OutputKind.TOKEN:\n            num_out_tokens += 1\n            output_token_names.append(out.arg.name)\n        else:\n            new_output_specs.append(out)\n    ep.graph_signature.input_specs = new_input_specs\n    ep.graph_signature.output_specs = new_output_specs\n    assert num_tokens == num_out_tokens\n    with ep.graph_module._set_replace_hook(ep.graph_signature.get_replace_hook()):\n        _remove_effect_tokens_from_graph_helper(ep, num_tokens, input_token_names, output_token_names)\n    return ep",
    "docstring": "Removes the existance of tokens from the exported program, including: - Removes the input and output tokens - Replaces with_effects(token, func, args) with just func(args) This function does an inplace modification on the given ExportedProgram.",
    "type": "function",
    "file_path": "pytorch\\torch\\export\\_remove_effect_tokens_pass.py",
    "ast_data": "FunctionDef name:_remove_effect_tokens arg:ep arguments arg For If Compare Call Call Call For If Compare Call Call Assign Assign Compare With Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "sunday_to_monday",
    "source_code": "def sunday_to_monday(dt: datetime) -> datetime:\n    if dt.weekday() == 6:\n        return dt + timedelta(1)\n    return dt",
    "docstring": "If holiday falls on Sunday, use day thereafter (Monday) instead.",
    "type": "function",
    "file_path": "pandas\\pandas\\tseries\\holiday.py",
    "ast_data": "FunctionDef name:sunday_to_monday arg:dt arguments arg If Compare Call Return return:yes Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "get_auth_time",
    "source_code": "def get_auth_time(self, user) -> Optional[int]:\n    return None",
    "docstring": "User authentication time. Time when the End-User authentication occurred. Its value is a JSON number representing the number of seconds from 1970-01-01T0:0:0Z as measured in UTC until the date/time. Developers MAY re-implement this method:: def get_auth_time(self, user): return datetime.timestamp(user.get_auth_time())",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc9068\\token.py",
    "ast_data": "FunctionDef name:get_auth_time arg:self arg:user arguments arg arg Return return:no"
  },
  {
    "library": "scikit-learn",
    "name": "_perplexity_precomp_distr",
    "source_code": "def _perplexity_precomp_distr(self, X, doc_topic_distr=None, sub_sampling=False):\n    if doc_topic_distr is None:\n        doc_topic_distr = self._unnormalized_transform(X)\n    else:\n        n_samples, n_components = doc_topic_distr.shape\n        if n_samples != X.shape[0]:\n            raise ValueError('Number of samples in X and doc_topic_distr do not match.')\n        if n_components != self.n_components:\n            raise ValueError('Number of topics does not match.')\n    current_samples = X.shape[0]\n    bound = self._approx_bound(X, doc_topic_distr, sub_sampling)\n    if sub_sampling:\n        word_cnt = X.sum() * (float(self.total_samples) / current_samples)\n    else:\n        word_cnt = X.sum()\n    perword_bound = bound / word_cnt\n    return np.exp(-1.0 * perword_bound)",
    "docstring": "Calculate approximate perplexity for data X with ability to accept precomputed doc_topic_distr Perplexity is defined as exp(-1. * log-likelihood per word) Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Document word matrix. doc_topic_distr : ndarray of shape (n_samples, n_components), default=None Document topic distribution. If it is None, it will be generated by applying transform on X. Returns ------- score : float Perplexity score.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_lda.py",
    "ast_data": "FunctionDef name:_perplexity_precomp_distr arg:self arg:X arg:doc_topic_distr arg:sub_sampling arguments arg arg arg arg If Compare Assign Call Assign If Compare Raise Call If Compare Raise Call Assign Assign Call If Assign Call Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "from_string",
    "source_code": "@classmethod\ndef from_string(cls, layout_str: str) -> 'Layout':\n    return cls._new_object(layout_str=layout_str)",
    "docstring": "Creates an instance from a human-readable string.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\layout.py",
    "ast_data": "FunctionDef name:from_string arg:cls arg:layout_str arguments arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_factorize_int",
    "source_code": "def _factorize_int(n):\n    factors = set()\n    for p in primes_from_2_to(int(np.sqrt(n)) + 1):\n        while not n % p:\n            factors.add(p)\n            n //= p\n        if n == 1:\n            break\n    if n != 1:\n        factors.add(n)\n    return sorted(factors)",
    "docstring": "Return a sorted list of the unique prime factors of a positive integer.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_qmvnt.py",
    "ast_data": "FunctionDef name:_factorize_int arg:n arguments arg Assign Call For Call Call Call While Call If Compare If Compare Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "getdoc",
    "source_code": "def getdoc(self):\n    meth = getattr(MaskedArray, self.__name__, None) or getattr(np, self.__name__, None)\n    signature = self.__name__ + get_object_signature(meth)\n    if meth is not None:\n        doc = f'    {signature}\\n{getattr(meth, '__doc__', None)}'\n        return doc",
    "docstring": "Return the doc of the function (from the doc of the method).",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:getdoc arg:self arguments arg Assign BoolOp Call Call Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_joint_log_likelihood",
    "source_code": "def _joint_log_likelihood(self, X):\n    return safe_sparse_dot(X, self.feature_log_prob_.T) + self.class_log_prior_",
    "docstring": "Calculate the posterior log probability of the samples X",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\naive_bayes.py",
    "ast_data": "FunctionDef name:_joint_log_likelihood arg:self arg:X arguments arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_equal_values",
    "source_code": "def _equal_values(self: BlockManager, other: BlockManager) -> bool:\n    return blockwise_all(self, other, array_equals)",
    "docstring": "Used in .equals defined in base class. Only check the column values assuming shape and indexes have already been checked.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:_equal_values arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_IndexSlice",
    "source_code": "class _IndexSlice:\n\n    def __getitem__(self, arg):\n        return arg",
    "docstring": "Create an object to more easily perform multi-index slicing. See Also -------- MultiIndex.remove_unused_levels : New MultiIndex with no unused levels. Notes ----- See :ref: for further info on slicing a MultiIndex. Examples -------- >>> midx = pd.MultiIndex.from_product([[\"A0\", \"A1\"], [\"B0\", \"B1\", \"B2\", \"B3\"]]) >>> columns = [\"foo\", \"bar\"] >>> dfmi = pd.DataFrame( ... np.arange(16).reshape((len(midx), len(columns))), ... index=midx, ... columns=columns, ... ) Using the default slice command: >>> dfmi.loc[(slice(None), slice(\"B0\", \"B1\")), :] foo bar A0 B0 0 1 B1 2 3 A1 B0 8 9 B1 10 11 Using the IndexSlice class for a more intuitive command: >>> idx = pd.IndexSlice >>> dfmi.loc[idx[:, \"B0\":\"B1\"], :] foo bar A0 B0 0 1 B1 2 3 A1 B0 8 9 B1 10 11",
    "type": "class",
    "file_path": "pandas\\pandas\\core\\indexing.py",
    "ast_data": "ClassDef name:_IndexSlice FunctionDef name:__getitem__ arg:self arg:arg arguments arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "PostLocalSGDState",
    "source_code": "class PostLocalSGDState:\n    __slots__ = ['process_group', 'subgroup', 'start_localSGD_iter', 'post_local_gradient_allreduce', 'iter']\n\n    def __init__(self, process_group, subgroup, start_localSGD_iter, post_local_gradient_allreduce=True):\n        logger.info('Local SGD will be started after %s iterations', start_localSGD_iter)\n        self.process_group = process_group\n        self.subgroup = subgroup\n        self.start_localSGD_iter = start_localSGD_iter\n        self.post_local_gradient_allreduce = post_local_gradient_allreduce\n        self.iter = 0\n\n    def maybe_increase_iter(self, bucket):\n        if bucket.is_last():\n            self.iter += 1\n        if self.iter == self.start_localSGD_iter:\n            logger.info('Start to apply local SGD after %s iterations.', self.iter)",
    "docstring": "Store state for all-reducing gradients globally until given step, then locally after. Stores the state for all-reducing gradients globally using `` may be worth tuning, because both true and false may give a faster convergence.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\ddp_comm_hooks\\post_localSGD_hook.py",
    "ast_data": "ClassDef name:PostLocalSGDState Assign FunctionDef name:__init__ arg:self arg:process_group arg:subgroup arg:start_localSGD_iter arg:post_local_gradient_allreduce arguments arg arg arg arg arg Call Assign Assign Assign Assign Assign FunctionDef name:maybe_increase_iter arg:self arg:bucket arguments arg arg If Call If Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "RaggedTensor",
    "source_code": "class RaggedTensor(object):\n    pass",
    "docstring": "Interface for internal isinstance checks to ops/ragged/ragged_tensor.py. This helps to avoid circular dependencies.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\types\\internal.py",
    "ast_data": "ClassDef name:RaggedTensor"
  },
  {
    "library": "tensorflow",
    "name": "devices",
    "source_code": "@property\n@deprecation.deprecated(None, 'Please avoid relying on devices property.')\ndef devices(self):\n    require_replica_context(self)\n    return (device_util.current(),)",
    "docstring": "Returns the devices this replica is to be executed on, as a tuple of strings. NOTE: For and , this returns a nested list of device strings, e.g., [[\"GPU:0\"]].",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:devices arg:self arguments arg Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "named_parameters",
    "source_code": "def named_parameters(self, remove_duplicate: bool=True) -> Iterable[tuple[str, torch.Tensor]]:\n    yield from self.module.named_parameters(remove_duplicate=remove_duplicate)",
    "docstring": "Iterate over all the parameters in the module.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\utils\\_named_member_accessor.py",
    "ast_data": "FunctionDef name:named_parameters arg:self arg:remove_duplicate arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "global_policy",
    "source_code": "def global_policy():\n    if _global_policy is None:\n        if base_layer_utils.v2_dtype_behavior_enabled():\n            return Policy(backend.floatx())\n        else:\n            return Policy('_infer')\n    return _global_policy",
    "docstring": "Returns the global dtype policy. The global policy is the default used for layers, if no policy is passed to the layer constructor. If no policy has been set with , this will return a policy constructed from (floatx defaults to float32). >>> tf.keras.mixed_precision.global_policy() >>> tf.keras.layers.Dense(10).dtype_policy # Defaults to the global policy If TensorFlow 2 behavior has been disabled with , this will instead return a special \"_infer\" policy which infers the dtype from the dtype of the first input the first time the layer is called. This behavior matches the behavior that existed in TensorFlow 1. See for more information on policies. Returns: The global Policy.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\policy.py",
    "ast_data": "FunctionDef name:global_policy arguments If Compare If Call Return return:yes Call Call Return return:yes Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_fit_one",
    "source_code": "def _fit_one(transformer, X, y, weight, message_clsname='', message=None, params=None):\n    with _print_elapsed_time(message_clsname, message):\n        return transformer.fit(X, y, **params['fit'])",
    "docstring": "Fits ``.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\pipeline.py",
    "ast_data": "FunctionDef name:_fit_one arg:transformer arg:X arg:y arg:weight arg:message_clsname arg:message arg:params arguments arg arg arg arg arg arg arg With Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "ThetaAxis",
    "source_code": "class ThetaAxis(maxis.XAxis):\n    __name__ = 'thetaaxis'\n    axis_name = 'theta'\n    _tick_class = ThetaTick\n\n    def _wrap_locator_formatter(self):\n        self.set_major_locator(ThetaLocator(self.get_major_locator()))\n        self.set_major_formatter(ThetaFormatter())\n        self.isDefault_majloc = True\n        self.isDefault_majfmt = True\n\n    def clear(self):\n        super().clear()\n        self.set_ticks_position('none')\n        self._wrap_locator_formatter()\n\n    def _set_scale(self, value, **kwargs):\n        if value != 'linear':\n            raise NotImplementedError('The xscale cannot be set on a polar plot')\n        super()._set_scale(value, **kwargs)\n        self.get_major_locator().set_params(steps=[1, 1.5, 3, 4.5, 9, 10])\n        self._wrap_locator_formatter()\n\n    def _copy_tick_props(self, src, dest):\n        if src is None or dest is None:\n            return\n        super()._copy_tick_props(src, dest)\n        trans = dest._get_text1_transform()[0]\n        dest.label1.set_transform(trans + dest._text1_translate)\n        trans = dest._get_text2_transform()[0]\n        dest.label2.set_transform(trans + dest._text2_translate)",
    "docstring": "A theta Axis. This overrides certain properties of an to provide special-casing for an angular axis.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\polar.py",
    "ast_data": "ClassDef name:ThetaAxis Assign Assign Assign FunctionDef name:_wrap_locator_formatter arg:self arguments arg Call Call Call Call Call Assign Assign FunctionDef name:clear arg:self arguments arg Call Call Call Call FunctionDef name:_set_scale arg:self arg:value arguments arg arg arg If Compare Raise Call Call Call Call Call Call FunctionDef name:_copy_tick_props arg:self arg:src arg:dest arguments arg arg arg If BoolOp Compare Compare Return return:no Call Call Assign Call Call Assign Call Call"
  },
  {
    "library": "matplotlib",
    "name": "transform_point",
    "source_code": "def transform_point(self, point):\n    if len(point) != self.input_dims:\n        raise ValueError(\"The length of 'point' must be 'self.input_dims'\")\n    return self.transform(point)",
    "docstring": "Return a transformed point. This function is only kept for backcompatibility; the more general method is capable of transforming both a list of points and a single point. The point is given as a sequence of length :attr:. The transformed point is returned as a sequence of length :attr:.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:transform_point arg:self arg:point arguments arg arg If Compare Call Raise Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "matmul",
    "source_code": "def matmul(A: Optional[Tensor], B: Tensor) -> Tensor:\n    if A is None:\n        return B\n    if is_sparse(A):\n        return torch.sparse.mm(A, B)\n    return torch.matmul(A, B)",
    "docstring": "Multiply two matrices. If A is None, return B. A can be sparse or dense. B is always dense.",
    "type": "function",
    "file_path": "pytorch\\torch\\_linalg_utils.py",
    "ast_data": "FunctionDef name:matmul arg:A arg:B arguments arg arg If Compare Return return:yes If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "maybe_increase_iter",
    "source_code": "def maybe_increase_iter(self, bucket):\n    if bucket.is_last():\n        self.iter += 1\n    if self.iter == self.start_localSGD_iter:\n        logger.info('Start to apply local SGD after %s iterations.', self.iter)",
    "docstring": "Track iterations and trigger log message at start of local SGD.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\ddp_comm_hooks\\post_localSGD_hook.py",
    "ast_data": "FunctionDef name:maybe_increase_iter arg:self arg:bucket arguments arg arg If Call If Compare Call"
  },
  {
    "library": "sphinx",
    "name": "ensuredir",
    "source_code": "def ensuredir(file: str | os.PathLike[str]) -> None:\n    os.makedirs(file, exist_ok=True)",
    "docstring": "Ensure that a path exists.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\osutil.py",
    "ast_data": "FunctionDef name:ensuredir arg:file arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "Split",
    "source_code": "@dataclass\nclass Split(DimSpec):\n    input_dim: DimSpec\n    group_shape: Shape\n    split_id: int\n\n    @classmethod\n    def new(cls, dim: DimSpec, group_shape: tuple[int, ...], idx: int) -> DimSpec:\n        assert len(group_shape) > 0\n        if len(group_shape) == 1:\n            assert idx == 0\n            return dim\n        elif group_shape[idx] == 1:\n            return Singleton()\n        else:\n            group_mapping = list(enumerate(((s, i) for i, s in enumerate(group_shape) if s != 1)))\n            new_group_shape = tuple((m[1][0] for m in group_mapping))\n            new_idx = next(filter(lambda x: x[1][1] == idx, group_mapping))[0]\n            return Split(dim, new_group_shape, new_idx)\n\n    def inputs(self) -> Iterable[DimSpec]:\n        return (self.input_dim,)",
    "docstring": "This dimension is a member of a decomposition of the input dim. Note that input_dim itself could be a Flattened set of input dims.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_ops\\_view_ops.py",
    "ast_data": "ClassDef name:Split FunctionDef name:new arg:cls arg:dim arg:group_shape arg:idx arguments arg arg arg arg Compare Call If Compare Call Compare Return return:yes If Compare Return return:yes Call Assign Call Call Call Compare Assign Call Assign Call Call arguments arg Compare Return return:yes Call FunctionDef name:inputs arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_match",
    "source_code": "def _match(modules: dict[str, nn.ModuleDict], node: Node, current: Union[nn.Module, Any]) -> bool:\n    if isinstance(current, type) and issubclass(current, MatchAllNode):\n        return True\n    if not isinstance(node, Node):\n        return False\n    if isinstance(current, type) and issubclass(current, torch.nn.Module):\n        return node.op == 'call_module' and parametrize.type_before_parametrizations(modules[node.target]) == current\n    elif callable(current):\n        return node.op == 'call_function' and node.target is current\n    elif isinstance(current, str):\n        return node.target == current\n    return False",
    "docstring": "checks to see if a single node of a pattern matches",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\pruner\\match_utils.py",
    "ast_data": "FunctionDef name:_match arg:modules arg:node arg:current arguments arg arg arg If BoolOp Call Call Return return:yes If Call Return return:yes If BoolOp Call Call Return return:yes BoolOp Compare Compare Call If Call Return return:yes BoolOp Compare Compare If Call Return return:yes Compare Return return:yes"
  },
  {
    "library": "pandas",
    "name": "is_bool",
    "source_code": "@final\n@property\ndef is_bool(self) -> bool:\n    return self.values.dtype == np.dtype(bool)",
    "docstring": "We can be bool if a) we are bool dtype or b) object dtype with bool objects.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\blocks.py",
    "ast_data": "FunctionDef name:is_bool arg:self arguments arg Return return:yes Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "ProfileAnalysisStub",
    "source_code": "class ProfileAnalysisStub(object):\n\n    def __init__(self, channel):\n        self.NewSession = channel.unary_unary('/tensorflow.ProfileAnalysis/NewSession', request_serializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2.NewProfileSessionRequest.SerializeToString, response_deserializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2.NewProfileSessionResponse.FromString)\n        self.EnumSessions = channel.unary_unary('/tensorflow.ProfileAnalysis/EnumSessions', request_serializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2.EnumProfileSessionsAndToolsRequest.SerializeToString, response_deserializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2.EnumProfileSessionsAndToolsResponse.FromString)\n        self.GetSessionToolData = channel.unary_unary('/tensorflow.ProfileAnalysis/GetSessionToolData', request_serializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2.ProfileSessionDataRequest.SerializeToString, response_deserializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2.ProfileSessionDataResponse.FromString)",
    "docstring": "////////////////////////////////////////////////////////////////////////////// ProfileAnalysis service provide entry point for profiling TPU and for serving profiled data to Tensorboard through GRPC //////////////////////////////////////////////////////////////////////////////",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\profiler\\profiler_analysis_pb2_grpc.py",
    "ast_data": "ClassDef name:ProfileAnalysisStub FunctionDef name:__init__ arg:self arg:channel arguments arg arg Assign Call Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "reciprocal_no_nan",
    "source_code": "@tf_export('math.reciprocal_no_nan')\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef reciprocal_no_nan(x, name=None):\n    with ops.name_scope(name, 'reciprocal_no_nan', [x]) as scope:\n        x = ops.convert_to_tensor(x, name='x')\n        one = constant_op.constant(1, dtype=x.dtype.base_dtype, name='one')\n        return gen_math_ops.div_no_nan(one, x, name=scope)",
    "docstring": "Performs a safe reciprocal operation, element wise. If a particular element is zero, the reciprocal for that element is also set to zero. For example: Args: x: A of type , , or . name: A name for the operation (optional). Returns: A of same shape and type as . Raises: TypeError: x must be of a valid dtype.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:reciprocal_no_nan arg:x arg:name arguments arg arg With Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "cherrypy",
    "name": "start",
    "source_code": "def start(self):\n    from flup.server.fcgi import WSGIServer\n    self.fcgiserver = WSGIServer(*self.args, **self.kwargs)\n    self.fcgiserver._installSignalHandlers = lambda: None\n    self.fcgiserver._oldSIGs = []\n    self.ready = True\n    self.fcgiserver.run()",
    "docstring": "Start the FCGI server.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\servers.py",
    "ast_data": "FunctionDef name:start arg:self arguments arg Assign Call Assign arguments Assign Assign Call"
  },
  {
    "library": "pandas",
    "name": "parse_table_schema",
    "source_code": "def parse_table_schema(json, precise_float: bool) -> DataFrame:\n    table = ujson_loads(json, precise_float=precise_float)\n    col_order = [field['name'] for field in table['schema']['fields']]\n    df = DataFrame(table['data'], columns=col_order)[col_order]\n    dtypes = {field['name']: convert_json_field_to_pandas_type(field) for field in table['schema']['fields']}\n    if 'timedelta64' in dtypes.values():\n        raise NotImplementedError('table=\"orient\" can not yet read ISO-formatted Timedelta data')\n    df = df.astype(dtypes)\n    if 'primaryKey' in table['schema']:\n        df = df.set_index(table['schema']['primaryKey'])\n        if len(df.index.names) == 1:\n            if df.index.name == 'index':\n                df.index.name = None\n        else:\n            df.index.names = [None if x.startswith('level_') else x for x in df.index.names]\n    return df",
    "docstring": "Builds a DataFrame from a given schema Parameters ---------- json : A JSON table schema precise_float : bool Flag controlling precision when decoding string to double values, as dictated by `DataFrame.to_jsonIndexDataFrameIndexMultiIndexIndexMultiIndex` names starting with 'level_' are not supported. See Also -------- build_table_schema : Inverse function. pandas.read_json",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\json\\_table_schema.py",
    "ast_data": "FunctionDef name:parse_table_schema arg:json arg:precise_float arguments arg arg Assign Call Assign Assign Call Assign Call If Compare Call Raise Call Assign Call If Compare Assign Call If Compare Call If Compare Assign Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "flex_attention_backward_grid",
    "source_code": "def flex_attention_backward_grid(batch_size, q_heads, num_queries, d_model, kv_heads, num_key_value, meta):\n    import triton\n    return (triton.cdiv(num_queries, meta['BLOCK_M2']) * (q_heads // kv_heads) + triton.cdiv(num_key_value, meta['BLOCK_N1']), 1, batch_size * kv_heads)",
    "docstring": "How is this kernel parallelized? Currently this is only parallelizing over batch* kv_heads, but we can, and want to parallelize over ceil_div(q_heads//kv_heads * num_key_value, key_value_block_size). To do this will either require atomic updates to some grad values or to have a two pass kernel design.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\kernel\\flex_attention.py",
    "ast_data": "FunctionDef name:flex_attention_backward_grid arg:batch_size arg:q_heads arg:num_queries arg:d_model arg:kv_heads arg:num_key_value arg:meta arguments arg arg arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_iteration_result_is_significant",
    "source_code": "def _iteration_result_is_significant(self, iters, run_time_sec, curr_test_total_time, has_explicit_iteration_count):\n    return (iters > self.max_iters or run_time_sec > self.predefined_minimum_secs or has_explicit_iteration_count) and curr_test_total_time > self.args.min_time_per_test",
    "docstring": "This function decides whether the measured time can be reported based on the following conditions: 1) the number of iterations is larger than the max_iters. 2) the execution time is larger than the predefined minimum_time 3) the execution time is larger than user defined minimum_time",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\operator_benchmark\\benchmark_core.py",
    "ast_data": "FunctionDef name:_iteration_result_is_significant arg:self arg:iters arg:run_time_sec arg:curr_test_total_time arg:has_explicit_iteration_count arguments arg arg arg arg arg Return return:yes BoolOp BoolOp Compare Compare Compare"
  },
  {
    "library": "tensorflow",
    "name": "_is_eager",
    "source_code": "def _is_eager(self):\n    return all((isinstance(t, ops.EagerTensor) for t in (self.indices, self.values, self.dense_shape)))",
    "docstring": "Returns True if this was constructed in eager execution. Requires that each individual component of (, and ) is an instance of .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\sparse_tensor.py",
    "ast_data": "FunctionDef name:_is_eager arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "dropout",
    "source_code": "def dropout(inputs, rate=0.5, noise_shape=None, seed=None, training=False, name=None):\n    warnings.warn('`tf.layers.dropout` is deprecated and will be removed in a future version. Please use `tf.keras.layers.Dropout` instead.')\n    layer = Dropout(rate, noise_shape=noise_shape, seed=seed, name=name)\n    return layer.apply(inputs, training=training)",
    "docstring": "Applies Dropout to the input. Dropout consists in randomly setting a fraction of input units to 0 at each update during training time, which helps prevent overfitting. The units that are kept are scaled by , so that their sum is unchanged at training time and inference time. Args: inputs: Tensor input. rate: The dropout rate, between 0 and 1. E.g. \"rate=0.1\" would drop out 10% of input units. noise_shape: 1D tensor of type representing the shape of the binary dropout mask that will be multiplied with the input. For instance, if your inputs have shape , and you want the dropout mask to be the same for all timesteps, you can use . seed: A Python integer. Used to create random seeds. See for behavior. training: Either a Python boolean, or a TensorFlow boolean scalar tensor (e.g. a placeholder). Whether to return the output in training mode (apply dropout) or in inference mode (return the input untouched). name: The name of the layer (string). Returns: Output tensor. Raises: ValueError: if eager execution is enabled.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\legacy_tf_layers\\core.py",
    "ast_data": "FunctionDef name:dropout arg:inputs arg:rate arg:noise_shape arg:seed arg:training arg:name arguments arg arg arg arg arg arg Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "inverse",
    "source_code": "def inverse(self, name: str='inverse') -> 'LinearOperator':\n    if self.is_square is False:\n        raise ValueError('Cannot take the Inverse: This operator represents a non square matrix.')\n    if self.is_non_singular is False:\n        raise ValueError('Cannot take the Inverse: This operator represents a singular matrix.')\n    with self._name_scope(name):\n        return self._linop_inverse()",
    "docstring": "Returns the Inverse of this . Given representing this , return a representing . Args: name: A name scope to use for ops added by this method. Returns: representing inverse of this matrix. Raises: ValueError: When the is not hinted to be .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "FunctionDef name:inverse arg:self arg:name arguments arg arg If Compare Raise Call If Compare Raise Call With Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "register_op",
    "source_code": "def register_op(self, function: onnxscript.OnnxFunction | onnxscript.TracedOnnxFunction, namespace: str, op_name: str, overload: str | None=None, is_complex: bool=False) -> None:\n    internal_name_instance = registration.OpName.from_name_parts(namespace=namespace, op_name=op_name, overload=overload)\n    symbolic_function = registration.ONNXFunction(onnx_function=function, op_full_name=internal_name_instance.qualified_name(), is_custom=True, is_complex=is_complex)\n    self._register(internal_name_instance, symbolic_function)",
    "docstring": "Registers a custom operator: torch.ops.... Args: function: The onnx-sctip function to register. namespace: The namespace of the operator to register. op_name: The name of the operator to register. overload: The overload of the operator to register. If it's default overload, leave it to None. is_complex: Whether the function is a function that handles complex valued inputs. Raises: ValueError: If the name is not in the form of 'namespace::op'.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\_exporter_legacy.py",
    "ast_data": "FunctionDef name:register_op arg:self arg:function arg:namespace arg:op_name arg:overload arg:is_complex arguments arg arg arg arg arg arg Assign Call Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "TraceableStack",
    "source_code": "class TraceableStack(Generic[T]):\n\n    def __init__(self, existing_stack: Optional[list[TraceableObject[T]]]=None):\n        self._stack: list[TraceableObject[T]] = existing_stack[:] if existing_stack else []\n\n    def push_obj(self, obj: T, offset: int=0):\n        traceable_obj = TraceableObject(obj)\n        self._stack.append(traceable_obj)\n        return traceable_obj.set_filename_and_line_from_caller(offset + 1)\n\n    def pop_obj(self) -> T:\n        return self._stack.pop().obj\n\n    def peek_top_obj(self) -> T:\n        return self._stack[-1].obj\n\n    def peek_objs(self) -> Iterator[T]:\n        return (t_obj.obj for t_obj in reversed(self._stack))\n\n    def peek_traceable_objs(self) -> Iterator[TraceableObject[T]]:\n        return reversed(self._stack)\n\n    def __len__(self) -> int:\n        return len(self._stack)\n\n    def copy(self) -> 'TraceableStack[T]':\n        return TraceableStack(self._stack)",
    "docstring": "A stack of TraceableObjects.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\traceable_stack.py",
    "ast_data": "ClassDef name:TraceableStack FunctionDef name:__init__ arg:self arg:existing_stack arguments arg arg FunctionDef name:push_obj arg:self arg:obj arg:offset arguments arg arg arg Assign Call Call Return return:yes Call FunctionDef name:pop_obj arg:self arguments arg Return return:yes Call FunctionDef name:peek_top_obj arg:self arguments arg Return return:yes FunctionDef name:peek_objs arg:self arguments arg Return return:yes Call FunctionDef name:peek_traceable_objs arg:self arguments arg Return return:yes Call FunctionDef name:__len__ arg:self arguments arg Return return:yes Call FunctionDef name:copy arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "GlobalOpDispatcher",
    "source_code": "@tf_export('__internal__.dispatch.GlobalOpDispatcher', v1=[])\nclass GlobalOpDispatcher(object):\n    NOT_SUPPORTED = OpDispatcher.NOT_SUPPORTED\n\n    def handle(self, op, args, kwargs):\n        pass\n\n    def register(self):\n        _GLOBAL_DISPATCHERS.append(self)",
    "docstring": "Abstract base class for TensorFlow global operator dispatchers.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\util\\dispatch.py",
    "ast_data": "ClassDef name:GlobalOpDispatcher Assign FunctionDef name:handle arg:self arg:op arg:args arg:kwargs arguments arg arg arg arg FunctionDef name:register arg:self arguments arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_default_signal",
    "source_code": "def _get_default_signal() -> signal.Signals:\n    if IS_WINDOWS:\n        return signal.CTRL_C_EVENT\n    else:\n        return signal.SIGTERM",
    "docstring": "Get the default termination signal. SIGTERM for unix, CTRL_C_EVENT for windows.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\elastic\\multiprocessing\\api.py",
    "ast_data": "FunctionDef name:_get_default_signal arguments If Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "python",
    "source_code": "def python(self, *args: str, python: Path | str | None=None, **popen_kwargs: Any) -> subprocess.CompletedProcess[str]:\n    if python is None:\n        python = self.executable\n    cmd = [str(python), *args]\n    env = popen_kwargs.pop('env', None) or {}\n    return subprocess.run(cmd, check=True, text=True, encoding='utf-8', env={**self._env, **env}, **popen_kwargs)",
    "docstring": "Run a Python command in the virtual environment.",
    "type": "method",
    "file_path": "pytorch\\tools\\nightly.py",
    "ast_data": "FunctionDef name:python arg:self arguments arg arg arg arg If Compare Assign Assign Call Assign BoolOp Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "init",
    "source_code": "def init():\n    torch._C._lazy_ts_backend._init()",
    "docstring": "Initializes the lazy Torchscript backend",
    "type": "function",
    "file_path": "pytorch\\torch\\_lazy\\ts_backend.py",
    "ast_data": "FunctionDef name:init arguments Call"
  },
  {
    "library": "sphinx",
    "name": "convert",
    "source_code": "def convert(self, _from: str | os.PathLike[str], _to: str | os.PathLike[str]) -> bool:\n    raise NotImplementedError",
    "docstring": "Convert an image file to the expected format. *_from* is a path of the source image file, and *_to* is a path of the destination file.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\transforms\\post_transforms\\images.py",
    "ast_data": "FunctionDef name:convert arg:self arg:_from arg:_to arguments arg arg arg Raise"
  },
  {
    "library": "django",
    "name": "TemplateSyntaxError",
    "source_code": "class TemplateSyntaxError(Exception):\n    pass",
    "docstring": "The exception used for syntax errors during parsing or rendering.",
    "type": "class",
    "file_path": "django\\django\\template\\exceptions.py",
    "ast_data": "ClassDef name:TemplateSyntaxError"
  },
  {
    "library": "tensorflow",
    "name": "test_main",
    "source_code": "@tf_export('__internal__.distribute.multi_process_runner.test_main', v1=[])\ndef test_main():\n    old_tear_down_module = getattr(sys.modules['__main__'], 'tearDownModule', None)\n\n    def tear_down_module():\n        _shutdown_all_pool_runners()\n        if old_tear_down_module is not None:\n            old_tear_down_module()\n    setattr(sys.modules['__main__'], 'tearDownModule', tear_down_module)\n    multi_process_lib.test_main()",
    "docstring": "Main function to be called within of a test file. Any test module that uses must call this instead of regular inside block, or an error will be raised when is used. This method takes care of needed initialization for launching multiple subprocesses. Example:",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_process_runner.py",
    "ast_data": "FunctionDef name:test_main arguments Assign Call FunctionDef name:tear_down_module arguments Call If Compare Call Call Call Call"
  },
  {
    "library": "cryptography",
    "name": "algorithm",
    "source_code": "@property\n@abc.abstractmethod\ndef algorithm(self) -> HashAlgorithm:\n    pass",
    "docstring": "A HashAlgorithm that will be used by this context.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\hashes.py",
    "ast_data": "FunctionDef name:algorithm arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, gm: torch.fx.GraphModule, has_user_defined_triton_kernels: bool=False) -> None:\n    self._stream = io.BytesIO()\n    super().__init__(self._stream)\n    self.dispatch_table = copyreg.dispatch_table.copy()\n    self.dispatch_table.update({FakeTensor: functools.partial(self._reduce_fake_tensor), torch.Tensor: functools.partial(self._reduce_tensor), torch.nn.parameter.Parameter: functools.partial(self._reduce_tensor), torch.SymInt: functools.partial(self._reduce_symint), torch.fx.experimental._backward_state.BackwardState: functools.partial(self._reduce_unsupported)})\n    if has_user_defined_triton_kernels:\n        self.dispatch_table[gm.__class__] = functools.partial(self._reduce_graph_module)\n    self.fast = True",
    "docstring": "Create an FX graph pickler. If include_non_inlined=True, then pickling will include the _values_ for all Tensors. (Note that any tensors are constants attached as attributes to the GraphModule). Otherwise, pickling will include only the metadata for these tensors.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codecache.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:gm arg:has_user_defined_triton_kernels arguments arg arg arg Assign Call Call Call Assign Call Call Call Call Call Call Call If Assign Call Assign"
  },
  {
    "library": "scikit-learn",
    "name": "_check_warnings",
    "source_code": "def _check_warnings(self, *, method, params):\n    getattr(self, method)._check_warnings(params=params)",
    "docstring": "Check whether metadata is passed which is marked as WARN. If any metadata is passed which is marked as WARN, a warning is raised. Parameters ---------- method : str The name of the method for which the warnings should be checked. params : dict The metadata passed to a method.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py",
    "ast_data": "FunctionDef name:_check_warnings arg:self arguments arg arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_run_benchmarks",
    "source_code": "def _run_benchmarks(regex):\n    registry = list(GLOBAL_BENCHMARK_REGISTRY)\n    selected_benchmarks = []\n    for benchmark in registry:\n        benchmark_name = '%s.%s' % (benchmark.__module__, benchmark.__name__)\n        attrs = dir(benchmark)\n        benchmark_instance = None\n        for attr in attrs:\n            if not attr.startswith('benchmark'):\n                continue\n            candidate_benchmark_fn = getattr(benchmark, attr)\n            if not callable(candidate_benchmark_fn):\n                continue\n            full_benchmark_name = '%s.%s' % (benchmark_name, attr)\n            if regex == 'all' or re.search(regex, full_benchmark_name):\n                selected_benchmarks.append(full_benchmark_name)\n                benchmark_instance = benchmark_instance or benchmark()\n                instance_benchmark_fn = getattr(benchmark_instance, attr)\n                instance_benchmark_fn()\n    if not selected_benchmarks:\n        raise ValueError(\"No benchmarks matched the pattern: '{}'\".format(regex))",
    "docstring": "Run benchmarks that match regex . This function goes through the global benchmark registry, and matches benchmark class and method names of the form to the given regex. If a method matches, it is run. Args: regex: The string regular expression to match Benchmark classes against. Raises: ValueError: If no benchmarks were selected by the input regex.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\platform\\benchmark.py",
    "ast_data": "FunctionDef name:_run_benchmarks arg:regex arguments arg Assign Call Assign For Assign Assign Call Assign For If Call Assign Call If Call Assign If BoolOp Compare Call Call Assign BoolOp Call Assign Call Call If Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "_all_gather_embedding_bag_input",
    "source_code": "def _all_gather_embedding_bag_input(input, per_sample_weights, offsets, pg):\n    input_to_gather = [input]\n    if per_sample_weights is not None:\n        input_to_gather.append(per_sample_weights)\n    if offsets is not None:\n        input_to_gather.append(offsets.clone().resize_(input.size()))\n    gathered_inputs = all_gather(torch.stack(input_to_gather), group=pg)\n    gathered_per_sample_weights = None\n    if per_sample_weights is not None:\n        gathered_per_sample_weights = [t[1] for t in gathered_inputs]\n    gathered_offsets = None\n    if offsets is not None:\n        idx = 2 if per_sample_weights is not None else 1\n        gathered_offsets = [t[idx].resize_(offsets.size()).to(offsets.dtype) for t in gathered_inputs]\n    gathered_inputs = [t[0].to(input.dtype) for t in gathered_inputs]\n    return (gathered_inputs, gathered_per_sample_weights, gathered_offsets)",
    "docstring": "In case we need to gather input and all other parameters of embeddingBag ops, we need to stack all input together to perform `` collective communication just once. Note that since offsets does not share the same size as input and is always smaller than input, we resize it during the communication. Args: input: tensor to be applied op on. per_sample_weights: weights for weighted sum mode. offsets: when input is 1D. offsets determines the starting index position of each bag (sequence) in input. pg: process group. Returns: gathered_inputs: list of input tensor gathered from each rank. gathered_per_sample_weights: list of per_sample_weights from each rank. gathered_offsets: list of offsets from each rank.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharding_spec\\chunk_sharding_spec_ops\\embedding_bag.py",
    "ast_data": "FunctionDef name:_all_gather_embedding_bag_input arg:input arg:per_sample_weights arg:offsets arg:pg arguments arg arg arg arg Assign If Compare Call If Compare Call Call Call Call Assign Call Call Assign If Compare Assign Assign If Compare Assign Compare Assign Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_raise_or_return",
    "source_code": "def _raise_or_return():\n    if raise_unknown:\n        input = input_name if input_name else 'data'\n        raise ValueError(f'Unknown label type for {input}: {y!r}')\n    else:\n        return 'unknown'",
    "docstring": "Depending on the value of raise_unknown, either raise an error or return 'unknown'.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\multiclass.py",
    "ast_data": "FunctionDef name:_raise_or_return arguments If Assign Raise Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "ThetaFormatter",
    "source_code": "class ThetaFormatter(mticker.Formatter):\n\n    def __call__(self, x, pos=None):\n        vmin, vmax = self.axis.get_view_interval()\n        d = np.rad2deg(abs(vmax - vmin))\n        digits = max(-int(np.log10(d) - 1.5), 0)\n        return f'{np.rad2deg(x):0.{digits}f}°'",
    "docstring": "Used to format the *theta* tick labels. Converts the native unit of radians into degrees and adds a degree symbol.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\polar.py",
    "ast_data": "ClassDef name:ThetaFormatter FunctionDef name:__call__ arg:self arg:x arg:pos arguments arg arg arg Assign Call Assign Call Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "nested_parse_to_nodes",
    "source_code": "def nested_parse_to_nodes(state: RSTState, text: str | StringList, *, source: str='<generated text>', offset: int=0, allow_section_headings: bool=True, keep_title_context: bool=False) -> list[Node]:\n    document = state.document\n    content = _text_to_string_list(text, source=source, tab_width=document.settings.tab_width)\n    node = Element()\n    node.document = document\n    if keep_title_context:\n        state.nested_parse(content, offset, node, match_titles=allow_section_headings)\n    else:\n        with _fresh_title_style_context(state):\n            state.nested_parse(content, offset, node, match_titles=allow_section_headings)\n    return node.children",
    "docstring": "Parse *text* into nodes. :param state: The state machine state. Must be a subclass of `` nodes. :param keep_title_context: If this is False (the default), then *content* is parsed as if it were an independent document, meaning that title decorations (e.g. underlines) do not need to match the surrounding document. This is useful when the parsed content comes from a completely different context, such as docstrings. If this is True, then title underlines must match those in the surrounding document, otherwise the behaviour is undefined. .. versionadded:: 7.4",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\parsing.py",
    "ast_data": "FunctionDef name:nested_parse_to_nodes arg:state arg:text arguments arg arg arg arg arg arg Assign Assign Call Assign Call Assign If Call With Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_init",
    "source_code": "def _init(self):\n    raise NotImplementedError('Abstract class only')",
    "docstring": "Generate the lookup table, ``.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:_init arg:self arguments arg Raise Call"
  },
  {
    "library": "cherrypy",
    "name": "_compat",
    "source_code": "def _compat(self, config):\n    for section, conf in config.items():\n        if isinstance(conf, dict):\n            for k in conf:\n                if k in self.obsolete:\n                    warnings.warn('%r is obsolete. Use %r instead.\\nsection: [%s]' % (k, self.obsolete[k], section))\n                elif k in self.deprecated:\n                    warnings.warn('%r is deprecated. Use %r instead.\\nsection: [%s]' % (k, self.deprecated[k], section))\n        elif section in self.obsolete:\n            warnings.warn('%r is obsolete. Use %r instead.' % (section, self.obsolete[section]))\n        elif section in self.deprecated:\n            warnings.warn('%r is deprecated. Use %r instead.' % (section, self.deprecated[section]))",
    "docstring": "Process config and warn on each obsolete or deprecated entry.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpchecker.py",
    "ast_data": "FunctionDef name:_compat arg:self arg:config arguments arg arg For Call If Call For If Compare Call If Compare Call If Compare Call If Compare Call"
  },
  {
    "library": "pytorch",
    "name": "MemPool",
    "source_code": "class MemPool(_MemPool):\n\n    def __init__(self, allocator: Optional[_cuda_CUDAAllocator]=None, use_on_oom: bool=False):\n        super().__init__(allocator, True, use_on_oom)\n\n    @property\n    def id(self) -> tuple[int, int]:\n        return super().id\n\n    @property\n    def allocator(self) -> Optional[_cuda_CUDAAllocator]:\n        return super().allocator\n\n    def use_count(self) -> int:\n        return super().use_count()\n\n    def snapshot(self):\n        try:\n            ctx = MemPoolContext(self)\n            snapshot = torch.cuda.memory_snapshot()\n        finally:\n            del ctx\n        return snapshot",
    "docstring": "MemPool represents a pool of memory in a caching allocator. Currently, it's just the ID of the pool object maintained in the CUDACachingAllocator. Args: allocator(torch._C._cuda_CUDAAllocator, optional): a torch._C._cuda_CUDAAllocator object that can be used to define how memory gets allocated in the pool. If :attr: is `` (default), memory allocation follows the default/ current configuration of the CUDACachingAllocator. use_on_oom(bool): a bool that indicates if this pool can be used as a last resort if a memory allocation outside of the pool fails due to Out Of Memory. This is False by default.",
    "type": "class",
    "file_path": "pytorch\\torch\\cuda\\memory.py",
    "ast_data": "ClassDef name:MemPool FunctionDef name:__init__ arg:self arg:allocator arg:use_on_oom arguments arg arg arg Call Call FunctionDef name:id arg:self arguments arg Return return:yes Call FunctionDef name:allocator arg:self arguments arg Return return:yes Call FunctionDef name:use_count arg:self arguments arg Return return:yes Call Call FunctionDef name:snapshot arg:self arguments arg Try Assign Call Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "hermemul",
    "source_code": "def hermemul(c1, c2):\n    [c1, c2] = pu.as_series([c1, c2])\n    if len(c1) > len(c2):\n        c = c2\n        xs = c1\n    else:\n        c = c1\n        xs = c2\n    if len(c) == 1:\n        c0 = c[0] * xs\n        c1 = 0\n    elif len(c) == 2:\n        c0 = c[0] * xs\n        c1 = c[1] * xs\n    else:\n        nd = len(c)\n        c0 = c[-2] * xs\n        c1 = c[-1] * xs\n        for i in range(3, len(c) + 1):\n            tmp = c0\n            nd = nd - 1\n            c0 = hermesub(c[-i] * xs, c1 * (nd - 1))\n            c1 = hermeadd(tmp, hermemulx(c1))\n    return hermeadd(c0, hermemulx(c1))",
    "docstring": "Multiply one Hermite series by another. Returns the product of two Hermite series * . The arguments are sequences of coefficients, from lowest order \"term\" to highest, e.g., [1,2,3] represents the series ``. Parameters ---------- c1, c2 : array_like 1-D arrays of Hermite series coefficients ordered from low to high. Returns ------- out : ndarray Of Hermite series coefficients representing their product. See Also -------- hermeadd, hermesub, hermemulx, hermediv, hermepow Notes ----- In general, the (polynomial) product of two C-series results in terms that are not in the Hermite polynomial basis set. Thus, to express the product as a Hermite series, it is necessary to \"reproject\" the product onto said basis set, which may produce \"unintuitive\" (but correct) results; see Examples section below. Examples -------- >>> from numpy.polynomial.hermite_e import hermemul >>> hermemul([1, 2, 3], [0, 1, 2]) array([14., 15., 28., 7., 6.])",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\hermite_e.py",
    "ast_data": "FunctionDef name:hermemul arg:c1 arg:c2 arguments arg arg Assign Call If Compare Call Call Assign Assign Assign Assign If Compare Call Assign Assign If Compare Call Assign Assign Assign Call Assign Assign For Call Call Assign Assign Assign Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_save_representative_dataset",
    "source_code": "def _save_representative_dataset(representative_dataset: repr_dataset.RepresentativeDatasetOrMapping, signature_def_map: _SignatureDefMap) -> Mapping[str, _RepresentativeDatasetFile]:\n    if isinstance(representative_dataset, Mapping):\n        if set(signature_def_map.keys()) != set(representative_dataset.keys()):\n            raise ValueError(f'The signature keys and the keys of representative dataset map do not match. Signature keys: {set(signature_def_map.keys())}, representative dataset map: {set(representative_dataset.keys())}.')\n        representative_dataset_map = representative_dataset\n    elif len(signature_def_map.keys()) > 1:\n        raise ValueError(f'Representative dataset is not a mapping (got: {type(representative_dataset)}), but there is more than one signature key provided. Please provide a map of {{signature_key -> dataset}} with more than one signature key.')\n    else:\n        representative_dataset_map = {list(signature_def_map.keys())[0]: representative_dataset}\n    path_map = {}\n    expected_input_key_map = {}\n    for signature_key, signature_def in signature_def_map.items():\n        _, path_map[signature_key] = tempfile.mkstemp(suffix='.tfrecord', prefix=signature_key)\n        expected_input_key_map[signature_key] = signature_def.inputs.keys()\n    return repr_dataset.TfRecordRepresentativeDatasetSaver(path_map=path_map, expected_input_key_map=expected_input_key_map).save(representative_dataset_map)",
    "docstring": "Saves the representative dataset to temporary TFRecord files. Args: representative_dataset: Representative dataset used for the calibration step. Representative datasets should exist for each signature def key in . signature_def_map: Signature def key -> SignatureDef mapping. Returns: A map from signature key to the saved representative dataset file.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\quantize_model.py",
    "ast_data": "FunctionDef name:_save_representative_dataset arg:representative_dataset arg:signature_def_map arguments arg arg If Call If Compare Call Call Call Call Raise Call Call Call Call Call Assign If Compare Call Call Raise Call Call Assign Call Call Assign Assign For Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "set_module_name",
    "source_code": "def set_module_name(self, module_name: str, qconfig_list: list[QConfigAny]) -> QConfigMultiMapping:\n    self._insert_qconfig_list('module_name_qconfigs', [module_name], qconfig_list)\n    return self",
    "docstring": "Set module_name QConfigs see :func: for more info",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\ns\\fx\\qconfig_multi_mapping.py",
    "ast_data": "FunctionDef name:set_module_name arg:self arg:module_name arg:qconfig_list arguments arg arg arg Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "_is_daemonized",
    "source_code": "def _is_daemonized(self):\n    return self._original_pid != os.getpid() and (not os.isatty(sys.stdin.fileno()))",
    "docstring": "Check if current process is running as a daemon. The criteria to determine the condition is to verify if the current pid is not the same as the one that got used on the initial construction of the plugin *and* the stdin is not connected to a terminal. The sole validation of the tty is not enough when the plugin is executing inside other process like in a CI tool (Buildbot, Jenkins).",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\plugins.py",
    "ast_data": "FunctionDef name:_is_daemonized arg:self arguments arg Return return:yes BoolOp Compare Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_x_log_x",
    "source_code": "def _x_log_x(tensor):\n    return torch.special.xlogy(tensor, tensor)",
    "docstring": "Utility function for calculating x log x",
    "type": "function",
    "file_path": "pytorch\\torch\\distributions\\kl.py",
    "ast_data": "FunctionDef name:_x_log_x arg:tensor arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "_max_blur_pool_by_kernel2d",
    "source_code": "def _max_blur_pool_by_kernel2d(input: Tensor, kernel: Tensor, stride: int, max_pool_size: int, ceil_mode: bool) -> Tensor:\n    KORNIA_CHECK(len(kernel.shape) == 4 and kernel.shape[-2] == kernel.shape[-1], f'Invalid kernel shape. Expect CxC_outxNxN, Got {kernel.shape}')\n    input = F.max_pool2d(input, kernel_size=max_pool_size, padding=0, stride=1, ceil_mode=ceil_mode)\n    padding = _compute_zero_padding((kernel.shape[-2], kernel.shape[-1]))\n    return F.conv2d(input, kernel, padding=padding, stride=stride, groups=input.size(1))",
    "docstring": "Compute max_blur_pool by a given :math: kernel.",
    "type": "function",
    "file_path": "kornia\\kornia\\filters\\blur_pool.py",
    "ast_data": "FunctionDef name:_max_blur_pool_by_kernel2d arg:input arg:kernel arg:stride arg:max_pool_size arg:ceil_mode arguments arg arg arg arg arg Call BoolOp Compare Call Compare Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "set_handle_data",
    "source_code": "def set_handle_data(target_t, handle_data):\n    if handle_data is None or not handle_data.is_set or (not handle_data.shape_and_type):\n        return\n    if isinstance(target_t, core.Value):\n        target_t._handle_data = handle_data\n        return\n    with target_t.graph._c_graph.get() as c_graph:\n        pywrap_tf_session.SetHandleShapeAndType(c_graph, target_t._as_tf_output(), handle_data.SerializeToString())",
    "docstring": "Sets handle data on the giver tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\handle_data_util.py",
    "ast_data": "FunctionDef name:set_handle_data arg:target_t arg:handle_data arguments arg arg If BoolOp Compare Return return:no If Call Assign Return return:no With Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "name",
    "source_code": "@property\ndef name(self):\n    return self._name",
    "docstring": "Returns the string name of this .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bijector_impl.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_check_precomputed",
    "source_code": "def _check_precomputed(X):\n    if not issparse(X):\n        X = check_array(X, ensure_non_negative=True, input_name='X')\n        return X\n    else:\n        graph = X\n    if graph.format not in ('csr', 'csc', 'coo', 'lil'):\n        raise TypeError('Sparse matrix in {!r} format is not supported due to its handling of explicit zeros'.format(graph.format))\n    copied = graph.format != 'csr'\n    graph = check_array(graph, accept_sparse='csr', ensure_non_negative=True, input_name='precomputed distance matrix')\n    graph = sort_graph_by_row_values(graph, copy=not copied, warn_when_not_sorted=True)\n    return graph",
    "docstring": "Check precomputed distance matrix. If the precomputed distance matrix is sparse, it checks that the non-zero entries are sorted by distances. If not, the matrix is copied and sorted. Parameters ---------- X : {sparse matrix, array-like}, (n_samples, n_samples) Distance matrix to other samples. X may be a sparse matrix, in which case only non-zero elements may be considered neighbors. Returns ------- X : {sparse matrix, array-like}, (n_samples, n_samples) Distance matrix to other samples. X may be a sparse matrix, in which case only non-zero elements may be considered neighbors.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\neighbors\\_base.py",
    "ast_data": "FunctionDef name:_check_precomputed arg:X arguments arg If Call Assign Call Return return:yes Assign If Compare Raise Call Call Assign Compare Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_transform_rotates_text",
    "source_code": "def get_transform_rotates_text(self):\n    return self._transform_rotates_text",
    "docstring": "Return whether rotations of the transform affect the text direction.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:get_transform_rotates_text arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "stream",
    "source_code": "def stream(stream: Optional['torch.xpu.Stream']) -> StreamContext:\n    return StreamContext(stream)",
    "docstring": "Wrap around the Context-manager StreamContext that selects a given stream. Arguments: stream (Stream): selected stream. This manager is a no-op if it's ``.",
    "type": "function",
    "file_path": "pytorch\\torch\\xpu\\__init__.py",
    "ast_data": "FunctionDef name:stream arg:stream arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict_log_proba",
    "source_code": "@available_if(_check_proba)\ndef predict_log_proba(self, X):\n    return np.log(self.predict_proba(X))",
    "docstring": "Log of probability estimates. This method is only available for log loss and modified Huber loss. When loss=\"modified_huber\", probability estimates may be hard zeros and ones, so taking the logarithm is not possible. See `self.classes_`.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_stochastic_gradient.py",
    "ast_data": "FunctionDef name:predict_log_proba arg:self arg:X arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pygame",
    "name": "remove",
    "source_code": "def remove(self, *groups):\n    has = self.__g.__contains__\n    for group in groups:\n        if hasattr(group, '_spritegroup'):\n            if has(group):\n                group.remove_internal(self)\n                self.remove_internal(group)\n        else:\n            self.remove(*group)",
    "docstring": "remove the sprite from groups Sprite.remove(*groups): return None Any number of Group instances can be passed as arguments. The Sprite will be removed from the Groups it is currently a member of.",
    "type": "method",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:remove arg:self arguments arg arg Assign For If Call If Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "ArgumentParser",
    "source_code": "class ArgumentParser(argparse.ArgumentParser):\n\n    def __init__(self, prog: str | None=None, usage: str | None=None, description: str | None=None, epilog: str | None=None, is_fixer: bool=False, **kwargs: Any) -> None:\n        super().__init__(prog, usage, description, None, **kwargs)\n        self._epilog = epilog\n        help = 'A list of files or directories to lint'\n        self.add_argument('files', nargs='*', help=help)\n        help = 'Fix lint errors if possible' if is_fixer else argparse.SUPPRESS\n        self.add_argument('-f', '--fix', action='store_true', help=help)\n        help = \"Run for lintrunner and print LintMessages which aren't edits\"\n        self.add_argument('-l', '--lintrunner', action='store_true', help=help)\n        help = 'Print more debug info'\n        self.add_argument('-v', '--verbose', action='store_true', help=help)\n\n    def exit(self, status: int=0, message: str | None=None) -> Never:\n        argv = sys.argv[1:]\n        if self._epilog and (not status) and ('-h' in argv) or '--help' in argv:\n            print(self._epilog)\n        super().exit(status, message)",
    "docstring": "Adds better help formatting and default arguments to argparse.ArgumentParser",
    "type": "class",
    "file_path": "pytorch\\tools\\linter\\adapters\\_linter.py",
    "ast_data": "ClassDef name:ArgumentParser FunctionDef name:__init__ arg:self arg:prog arg:usage arg:description arg:epilog arg:is_fixer arguments arg arg arg arg arg arg arg Call Call Assign Assign Call Assign Call Assign Call Assign Call FunctionDef name:exit arg:self arg:status arg:message arguments arg arg arg Assign If BoolOp BoolOp Compare Compare Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "build",
    "source_code": "def build(self):\n    if self._built:\n        return\n    self._variables = self._create_variables_and_slots()\n    self._built = True",
    "docstring": "Create variables and slots variables for TPU embeddings.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_base.py",
    "ast_data": "FunctionDef name:build arg:self arguments arg If Return return:no Assign Call Assign"
  },
  {
    "library": "django",
    "name": "adapt_decimalfield_value",
    "source_code": "def adapt_decimalfield_value(self, value, max_digits=None, decimal_places=None):\n    return value",
    "docstring": "Transform a decimal.Decimal value to an object compatible with what is expected by the backend driver for decimal (numeric) columns.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:adapt_decimalfield_value arg:self arg:value arg:max_digits arg:decimal_places arguments arg arg arg arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "all_not_none",
    "source_code": "def all_not_none(*args) -> bool:\n    return all((arg is not None for arg in args))",
    "docstring": "Returns a boolean indicating if all arguments are not None.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\common.py",
    "ast_data": "FunctionDef name:all_not_none arguments arg Return return:yes Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "assert_integer",
    "source_code": "@tf_export(v1=['debugging.assert_integer', 'assert_integer'])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints('assert_integer')\ndef assert_integer(x, message=None, name=None):\n    with ops.name_scope(name, 'assert_integer', [x]):\n        x = ops.convert_to_tensor(x, name='x')\n        if not x.dtype.is_integer:\n            if context.executing_eagerly():\n                name = 'tensor'\n            else:\n                name = x.name\n            err_msg = '%sExpected \"x\" to be integer type.  Found: %s of dtype %s' % (_message_prefix(message), name, x.dtype)\n            raise TypeError(err_msg)\n        return control_flow_ops.no_op('statically_determined_was_integer')",
    "docstring": "Assert that is of integer dtype. Example of adding a dependency to an operation: Args: x: whose basetype is integer and is not quantized. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to \"assert_integer\". Raises: TypeError: If is anything other than non-quantized integer. Returns: A that does nothing. Type can be determined statically.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\check_ops.py",
    "ast_data": "FunctionDef name:assert_integer arg:x arg:message arg:name arguments arg arg arg With Call Assign Call If If Call Assign Assign Assign Call Raise Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "canonicalize",
    "source_code": "def canonicalize(self):\n    sizevars = V.graph.sizevars\n    sizes = self.get_size()\n    strides = self.get_stride()\n    strides = [sizevars.size_hint(x) for x in strides]\n    index_vars = [sympy_index_symbol(f'd{i}') for i in range(len(sizes))]\n    index_order = sorted(range(len(strides)), key=strides.__getitem__, reverse=True)\n    lookup = {pos: idx for idx, pos in enumerate(index_order)}\n    order = [lookup[i] for i in range(len(lookup))]\n    index_vars = [index_vars[i] for i in order]\n    indexer = self.make_indexer()\n    index = indexer(index_vars)\n    new_sizes, reindex, _prune = V.graph.sizevars._simplify_loops(index_vars, sizes, [index])\n    _, add_var = var_builder('c')\n    replacement = dict(zip(index_vars, reindex([add_var(x) for x in new_sizes])))\n    index = sympy_subs(sympy.expand(index), replacement)\n    return (index, tuple(new_sizes))",
    "docstring": "Manually get canonicalization of the output index",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ir.py",
    "ast_data": "FunctionDef name:canonicalize arg:self arguments arg Assign Assign Call Assign Call Assign Call Assign Call Call Call Assign Call Call Call Assign Call Assign Call Call Assign Assign Call Assign Call Assign Call Assign Call Assign Call Call Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "list_objects",
    "source_code": "def list_objects(graph_view, skip_slot_variables=False):\n    trackable_objects = objects_ids_and_slot_variables_and_paths(graph_view, skip_slot_variables)[0]\n    return trackable_objects",
    "docstring": "Traverse the object graph and list all accessible objects.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\util.py",
    "ast_data": "FunctionDef name:list_objects arg:graph_view arg:skip_slot_variables arguments arg arg Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_relevant_paths_and_versions",
    "source_code": "def get_relevant_paths_and_versions(self, config: 'XLAConfigOptions'):\n    if self.ld_library_path is None:\n        self.ld_library_path = os.environ.get('LD_LIBRARY_PATH', None)\n    if config.host_compiler == HostCompiler.CLANG:\n        self.clang_path = _find_executable_or_die('clang', self.clang_path)\n        self.clang_major_version = self.clang_major_version or _get_clang_major_version(self.clang_path)\n        self.lld_path = self.lld_path or shutil.which('ld.lld')\n    elif config.host_compiler == HostCompiler.GCC:\n        self.gcc_path = _find_executable_or_die('gcc', self.gcc_path)\n        self.gcc_major_version = self.gcc_major_version or _get_gcc_major_version(self.gcc_path)\n    if config.backend == Backend.CUDA:\n        if config.cuda_compiler == CudaCompiler.CLANG:\n            self.clang_path = _find_executable_or_die('clang', self.clang_path)\n        if not self.cuda_compute_capabilities:\n            self.cuda_compute_capabilities = _get_cuda_compute_capabilities_or_die()",
    "docstring": "Gets paths and versions as needed by the config. Args: config: XLAConfigOptions instance that determines what paths and versions to try to autoconfigure.",
    "type": "method",
    "file_path": "tensorflow\\third_party\\xla\\build_tools\\configure\\configure.py",
    "ast_data": "FunctionDef name:get_relevant_paths_and_versions arg:self arg:config arguments arg arg If Compare Assign Call If Compare Assign Call Assign BoolOp Call Assign BoolOp Call If Compare Assign Call Assign BoolOp Call If Compare If Compare Assign Call If Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "Formatter",
    "source_code": "class Formatter(TickHelper):\n    locs = []\n\n    def __call__(self, x, pos=None):\n        raise NotImplementedError('Derived must override')\n\n    def format_ticks(self, values):\n        self.set_locs(values)\n        return [self(value, i) for i, value in enumerate(values)]\n\n    def format_data(self, value):\n        return self.__call__(value)\n\n    def format_data_short(self, value):\n        return self.format_data(value)\n\n    def get_offset(self):\n        return ''\n\n    def set_locs(self, locs):\n        self.locs = locs\n\n    @staticmethod\n    def fix_minus(s):\n        return s.replace('-', '−') if mpl.rcParams['axes.unicode_minus'] else s\n\n    def _set_locator(self, locator):\n        pass",
    "docstring": "Create a string based on a tick value and location.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "ClassDef name:Formatter Assign FunctionDef name:__call__ arg:self arg:x arg:pos arguments arg arg arg Raise Call FunctionDef name:format_ticks arg:self arg:values arguments arg arg Call Return return:yes Call Call FunctionDef name:format_data arg:self arg:value arguments arg arg Return return:yes Call FunctionDef name:format_data_short arg:self arg:value arguments arg arg Return return:yes Call FunctionDef name:get_offset arg:self arguments arg Return return:yes FunctionDef name:set_locs arg:self arg:locs arguments arg arg Assign FunctionDef name:fix_minus arg:s arguments arg Return return:yes Call FunctionDef name:_set_locator arg:self arg:locator arguments arg arg"
  },
  {
    "library": "pandas",
    "name": "waitForPaste",
    "source_code": "def waitForPaste(timeout=None):\n    startTime = time.time()\n    while True:\n        clipboardText = paste()\n        if clipboardText != '':\n            return clipboardText\n        time.sleep(0.01)\n        if timeout is not None and time.time() > startTime + timeout:\n            raise PyperclipTimeoutException('waitForPaste() timed out after ' + str(timeout) + ' seconds.')",
    "docstring": "This function call blocks until a non-empty text string exists on the clipboard. It returns this text. This function raises PyperclipTimeoutException if timeout was set to a number of seconds that has elapsed without non-empty text being put on the clipboard.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\clipboard\\__init__.py",
    "ast_data": "FunctionDef name:waitForPaste arg:timeout arguments arg Assign Call While Assign Call If Compare Return return:yes Call If BoolOp Compare Compare Call Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_write",
    "source_code": "def _write(self, file_prefix, session=None, options=None):\n    start_time = time.time()\n    output = self._saver.save(file_prefix=file_prefix, session=session, options=options)\n    end_time = time.time()\n    metrics.AddCheckpointWriteDuration(api_label=_CHECKPOINT_V1, microseconds=_get_duration_microseconds(start_time, end_time))\n    global _END_TIME_OF_LAST_WRITE\n    with _END_TIME_OF_LAST_WRITE_LOCK:\n        metrics.AddTrainingTimeSaved(api_label=_CHECKPOINT_V1, microseconds=_get_duration_microseconds(_END_TIME_OF_LAST_WRITE, end_time))\n        if checkpoint_context.in_preemption_save_context():\n            _preemption_checkpoint_saved_time_usecs.get_cell().increase_by(_get_duration_microseconds(_END_TIME_OF_LAST_WRITE, end_time))\n        _END_TIME_OF_LAST_WRITE = end_time\n    if tensor_util.is_tf_type(output):\n        if context.executing_eagerly():\n            output = compat.as_str(output.numpy())\n    else:\n        output = compat.as_str(output)\n    if options is not None and options.experimental_write_callbacks is not None:\n        _execute_callbacks(options.experimental_write_callbacks, output)\n    metrics.RecordCheckpointSize(api_label=_CHECKPOINT_V1, filesize=_get_checkpoint_size(output))\n    return output",
    "docstring": "Writes a training checkpoint. The checkpoint includes variables created by this object and any trackable objects it depends on at the time is called. does not number checkpoints, increment , or update the metadata used by . It is primarily intended for use by higher level checkpoint management utilities. provides a very basic implementation of these features. Args: file_prefix: A prefix to use for the checkpoint filenames (/path/to/directory/and_a_prefix). session: The session to evaluate variables in. Ignored when executing eagerly. If not provided when graph building, the default session is used. options: Optional object. Returns: The full path to the checkpoint (i.e. ).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:_write arg:self arg:file_prefix arg:session arg:options arguments arg arg arg arg Assign Call Assign Call Assign Call Call Call With Call Call If Call Call Call Call Assign If Call If Call Assign Call Call Assign Call If BoolOp Compare Compare Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "num_replicas_in_sync",
    "source_code": "@property\ndef num_replicas_in_sync(self):\n    return self._extended._num_replicas_in_sync",
    "docstring": "Returns number of replicas over which gradients are aggregated.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:num_replicas_in_sync arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_copy_collection_props",
    "source_code": "def _copy_collection_props(self, legend_handle, orig_handle):\n    legend_handle.set_color(orig_handle.get_color()[0])\n    legend_handle.set_linestyle(orig_handle.get_linestyle()[0])",
    "docstring": "Copy properties from the *orig_handle* to the *legend_handle*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\legend_handler.py",
    "ast_data": "FunctionDef name:_copy_collection_props arg:self arg:legend_handle arg:orig_handle arguments arg arg arg Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "VBoxDivider",
    "source_code": "class VBoxDivider(SubplotDivider):\n\n    def new_locator(self, ny, ny1=None):\n        return super().new_locator(0, ny, 0, ny1)\n\n    def _locate(self, nx, ny, nx1, ny1, axes, renderer):\n        ny += self._yrefindex\n        ny1 += self._yrefindex\n        fig_w, fig_h = self._fig.bbox.size / self._fig.dpi\n        x, y, w, h = self.get_position_runtime(axes, renderer)\n        summed_hs = self.get_vertical_sizes(renderer)\n        equal_ws = self.get_horizontal_sizes(renderer)\n        y0, x0, oy, ww = _locate(y, x, h, w, summed_hs, equal_ws, fig_h, fig_w, self.get_anchor())\n        if ny1 is None:\n            ny1 = -1\n        x1, w1 = (x0, ww)\n        y1, h1 = (y0 + oy[ny] / fig_h, (oy[ny1] - oy[ny]) / fig_h)\n        return mtransforms.Bbox.from_bounds(x1, y1, w1, h1)",
    "docstring": "A for laying out axes vertically, while ensuring that they have equal widths.",
    "type": "class",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_divider.py",
    "ast_data": "ClassDef name:VBoxDivider FunctionDef name:new_locator arg:self arg:ny arg:ny1 arguments arg arg arg Return return:yes Call Call FunctionDef name:_locate arg:self arg:nx arg:ny arg:nx1 arg:ny1 arg:axes arg:renderer arguments arg arg arg arg arg arg arg Assign Assign Call Assign Call Assign Call Assign Call Call If Compare Assign Assign Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_markeredgewidth",
    "source_code": "def get_markeredgewidth(self):\n    return self._markeredgewidth",
    "docstring": "Return the marker edge width in points. See also .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:get_markeredgewidth arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "mod",
    "source_code": "def mod(a: Tensor, b: int) -> Tensor:\n    return a - a // b * b",
    "docstring": "Compute the modulo operation for two numbers. This function calculates the remainder of the division of 'a' by 'b' using the formula: a - (a // b) * b, which is equivalent to the modulo operation. Args: a: The dividend. b: The divisor. Returns: The remainder of a divided by b. Example: >>> mod(7, 3) 1",
    "type": "function",
    "file_path": "kornia\\kornia\\contrib\\models\\rt_detr\\post_processor.py",
    "ast_data": "FunctionDef name:mod arg:a arg:b arguments arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "MeshTopoInfo",
    "source_code": "@dataclass\nclass MeshTopoInfo:\n    mesh: DeviceMesh\n    mesh_dim_devices: list[int]\n    mesh_dim_bandwidth: list[float]\n    mesh_dim_latency: list[float]\n\n    @staticmethod\n    @lru_cache(None)\n    def build_from_mesh(mesh: DeviceMesh) -> 'MeshTopoInfo':\n        num_devices_per_host = _mesh_resources.num_devices_per_host(mesh.device_type)\n        base_bw = 87.7\n        mesh_dim_bandwidth = [base_bw] * mesh.ndim\n        mesh_dim_latency = [0.6] * mesh.ndim\n        mesh_dim_devices = [1] * mesh.ndim\n        total_num_devices = 1\n        for mesh_dim in reversed(range(mesh.ndim)):\n            num_devices = mesh.size(mesh_dim)\n            mesh_dim_devices[mesh_dim] = num_devices\n            total_num_devices *= num_devices\n            if total_num_devices > num_devices_per_host:\n                mesh_dim_bandwidth[mesh_dim] *= 0.22\n                mesh_dim_latency[mesh_dim] = 2.7\n        return MeshTopoInfo(mesh, mesh_dim_devices, mesh_dim_bandwidth, mesh_dim_latency)",
    "docstring": "Mesh information for collective cost estimation",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_collective_utils.py",
    "ast_data": "ClassDef name:MeshTopoInfo FunctionDef name:build_from_mesh arg:mesh arguments arg Assign Call Assign Assign Assign Assign Assign For Call Call Assign Call Assign If Compare Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_preprocess_conv3d_input",
    "source_code": "def _preprocess_conv3d_input(x, data_format):\n    tf_data_format = 'NDHWC'\n    if data_format == 'channels_first':\n        if not _has_nchw_support():\n            x = array_ops.transpose(x, (0, 2, 3, 4, 1))\n        else:\n            tf_data_format = 'NCDHW'\n    return (x, tf_data_format)",
    "docstring": "Transpose and cast the input before the conv3d. Args: x: input tensor. data_format: string, or . Returns: A tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:_preprocess_conv3d_input arg:x arg:data_format arguments arg arg Assign If Compare If Call Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "stack",
    "source_code": "def stack(self, name=None):\n    with ops.name_scope(name, 'TensorArrayV2Stack', [self._flow]):\n        if not self._dynamic_size and self._size is not None:\n            ta_size = tensor_util.constant_value(self._size)\n        else:\n            ta_size = -1\n        value = list_ops.tensor_list_stack(input_handle=self._flow, element_dtype=self._dtype, num_elements=ta_size, element_shape=self.element_shape)\n        return value",
    "docstring": "See TensorArray.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:stack arg:self arg:name arguments arg arg With Call If BoolOp Compare Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_flat_tensor_specs",
    "source_code": "@property\ndef _flat_tensor_specs(self):\n    return nest.flatten(self._component_specs, expand_composites=True)",
    "docstring": "A list of TensorSpecs compatible with self._to_tensor_list(v).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py",
    "ast_data": "FunctionDef name:_flat_tensor_specs arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "StaticTableHandler",
    "source_code": "class StaticTableHandler(TrackableWeightHandler):\n\n    def __init__(self, getter_lambda):\n        self._num_tensors = 2\n        self._getter = getter_lambda\n        self._distribute_strategy = distribute_lib.get_strategy()\n\n        def raise_error(_):\n            raise RuntimeError('This layer contains a static lookup table, which cannot be changed via set_weights().')\n        self._setter = raise_error",
    "docstring": "Wrapper for handling weight collection for static hash tables.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_utils.py",
    "ast_data": "ClassDef name:StaticTableHandler FunctionDef name:__init__ arg:self arg:getter_lambda arguments arg arg Assign Assign Assign Call FunctionDef name:raise_error arg:_ arguments arg Raise Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "stft",
    "source_code": "@tf_export('signal.stft')\n@dispatch.add_dispatch_support\ndef stft(signals, frame_length, frame_step, fft_length=None, window_fn=window_ops.hann_window, pad_end=False, name=None):\n    with ops.name_scope(name, 'stft', [signals, frame_length, frame_step]):\n        signals = ops.convert_to_tensor(signals, name='signals')\n        signals.shape.with_rank_at_least(1)\n        frame_length = ops.convert_to_tensor(frame_length, name='frame_length')\n        frame_length.shape.assert_has_rank(0)\n        frame_step = ops.convert_to_tensor(frame_step, name='frame_step')\n        frame_step.shape.assert_has_rank(0)\n        if fft_length is None:\n            fft_length = _enclosing_power_of_two(frame_length)\n        else:\n            fft_length = ops.convert_to_tensor(fft_length, name='fft_length')\n        framed_signals = shape_ops.frame(signals, frame_length, frame_step, pad_end=pad_end)\n        if window_fn is not None:\n            window = window_fn(frame_length, dtype=framed_signals.dtype)\n            framed_signals *= window\n        return fft_ops.rfft(framed_signals, [fft_length])",
    "docstring": "Computes the [Short-time Fourier Transform][stft] of . Implemented with TPU/GPU-compatible ops and supports gradients. Args: signals: A / of real-valued signals. frame_length: An integer scalar . The window length in samples. frame_step: An integer scalar . The number of samples to step. fft_length: An integer scalar . The size of the FFT to apply. If not provided, uses the smallest power of 2 enclosing . window_fn: A callable that takes a window length and a keyword argument and returns a of samples in the provided datatype. If set to , no windowing is used. pad_end: Whether to pad the end of with zeros when the provided frame length and step produces a frame that lies partially past its end. name: An optional name for the operation. Returns: A of / STFT values where is (the unique components of the FFT). Raises: ValueError: If is not at least rank 1, is not scalar, or is not scalar. [stft]:",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\signal\\spectral_ops.py",
    "ast_data": "FunctionDef name:stft arg:signals arg:frame_length arg:frame_step arg:fft_length arg:window_fn arg:pad_end arg:name arguments arg arg arg arg arg arg arg With Call Assign Call Call Assign Call Call Assign Call Call If Compare Assign Call Assign Call Assign Call If Compare Assign Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_kolmogni",
    "source_code": "def _kolmogni(n, p, q):\n    if np.isnan(n):\n        return n\n    if int(n) != n or n <= 0:\n        return np.nan\n    if p <= 0:\n        return 1.0 / n\n    if q <= 0:\n        return 1.0\n    delta = np.exp((np.log(p) - scipy.special.loggamma(n + 1)) / n)\n    if delta <= 1.0 / n:\n        return (delta + 1.0 / n) / 2\n    x = -np.expm1(np.log(q / 2.0) / n)\n    if x >= 1 - 1.0 / n:\n        return x\n    x1 = scu._kolmogci(p) / np.sqrt(n)\n    x1 = min(x1, 1.0 - 1.0 / n)\n\n    def _f(x):\n        return _kolmogn(n, x) - p\n    return scipy.optimize.brentq(_f, 1.0 / n, x1, xtol=1e-14)",
    "docstring": "Computes the PPF/ISF of kolmogn. n of type integer, n>= 1 p is the CDF, q the SF, p+q=1",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_ksstats.py",
    "ast_data": "FunctionDef name:_kolmogni arg:n arg:p arg:q arguments arg arg arg If Call Return return:yes If BoolOp Compare Call Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Assign Call Call Call If Compare Return return:yes Assign Call Call If Compare Return return:yes Assign Call Call Assign Call FunctionDef name:_f arg:x arguments arg Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "OptionalSpec",
    "source_code": "@tf_export('OptionalSpec', v1=['OptionalSpec', 'data.experimental.OptionalStructure'])\nclass OptionalSpec(type_spec.TypeSpec):\n    __slots__ = ['_element_spec']\n\n    def __init__(self, element_spec):\n        super().__init__()\n        self._element_spec = element_spec\n\n    @property\n    def value_type(self):\n        return _OptionalImpl\n\n    def _serialize(self):\n        return (self._element_spec,)\n\n    @property\n    def _component_specs(self):\n        return [tensor_spec.TensorSpec((), dtypes.variant)]\n\n    def _to_components(self, value):\n        return [value._variant_tensor]\n\n    def _from_components(self, flat_value):\n        return _OptionalImpl(flat_value[0], self._element_spec)\n\n    @staticmethod\n    def from_value(value):\n        return OptionalSpec(value.element_spec)\n\n    def _to_legacy_output_types(self):\n        return self\n\n    def _to_legacy_output_shapes(self):\n        return self\n\n    def _to_legacy_output_classes(self):\n        return self",
    "docstring": "Type specification for . For instance, can be used to define a tf.function that takes as an input argument: >>> @tf.function(input_signature=[tf.OptionalSpec( ... tf.TensorSpec(shape=(), dtype=tf.int32, name=None))]) ... def maybe_square(optional): ... if optional.has_value(): ... x = optional.get_value() ... return x * x ... return -1 >>> optional = tf.experimental.Optional.from_value(5) >>> print(maybe_square(optional)) tf.Tensor(25, shape=(), dtype=int32) Attributes: element_spec: A (nested) structure of objects that represents the type specification of the optional element.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\optional_ops.py",
    "ast_data": "ClassDef name:OptionalSpec Assign FunctionDef name:__init__ arg:self arg:element_spec arguments arg arg Call Call Assign FunctionDef name:value_type arg:self arguments arg Return return:yes FunctionDef name:_serialize arg:self arguments arg Return return:yes FunctionDef name:_component_specs arg:self arguments arg Return return:yes Call FunctionDef name:_to_components arg:self arg:value arguments arg arg Return return:yes FunctionDef name:_from_components arg:self arg:flat_value arguments arg arg Return return:yes Call FunctionDef name:from_value arg:value arguments arg Return return:yes Call FunctionDef name:_to_legacy_output_types arg:self arguments arg Return return:yes FunctionDef name:_to_legacy_output_shapes arg:self arguments arg Return return:yes FunctionDef name:_to_legacy_output_classes arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "broadcast_dynamic_shape",
    "source_code": "def broadcast_dynamic_shape(shape_x: DynamicRaggedShape, shape_y: DynamicRaggedShape) -> DynamicRaggedShape:\n    if not isinstance(shape_x, DynamicRaggedShape):\n        raise TypeError('shape_x must be a DynamicRaggedShape')\n    if not isinstance(shape_y, DynamicRaggedShape):\n        raise TypeError('shape_y must be a DynamicRaggedShape')\n    return broadcast_dynamic_shape_extended(shape_x, shape_y)[0]",
    "docstring": "Returns the shape formed by broadcasting two shapes to be compatible. 1. If shape_x and shape_y both have row_partitions, then fail if their dtypes don't match. 2. If neither has row_partitions and they have different dtypes, go with int64. 3. If one has row_partitions, go with that dtype. Args: shape_x: A shape_y: A Returns: A . Raises: ValueError: If and are not broadcast-compatible.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:broadcast_dynamic_shape arg:shape_x arg:shape_y arguments arg arg If Call Raise Call If Call Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "reduce_any",
    "source_code": "def reduce_any(input_tensor, axis=None, keepdims=False):\n    v = get_static_value(input_tensor)\n    if v is None:\n        return math_ops.reduce_any(input_tensor, axis=axis, keepdims=keepdims)\n    else:\n        return v.any(axis=axis, keepdims=keepdims)",
    "docstring": "A version of tf.reduce_any that eagerly evaluates if possible.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_utils.py",
    "ast_data": "FunctionDef name:reduce_any arg:input_tensor arg:axis arg:keepdims arguments arg arg arg Assign Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_read_old_value_labels",
    "source_code": "def _read_old_value_labels(self) -> None:\n    assert self._dtype is not None\n    offset = self._nobs * self._dtype.itemsize\n    self._path_or_buf.seek(self._data_location + offset)\n    while True:\n        if not self._path_or_buf.read(2):\n            break\n        self._path_or_buf.seek(-2, os.SEEK_CUR)\n        n = self._read_uint16()\n        labname = self._decode(self._path_or_buf.read(9))\n        self._path_or_buf.read(1)\n        codes = np.frombuffer(self._path_or_buf.read(2 * n), dtype=f'{self._byteorder}i2', count=n)\n        self._value_label_dict[labname] = {}\n        for i in range(n):\n            self._value_label_dict[labname][codes[i]] = self._decode(self._path_or_buf.read(8))",
    "docstring": "Reads value labels with fixed-length strings (105 and earlier format)",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\stata.py",
    "ast_data": "FunctionDef name:_read_old_value_labels arg:self arguments arg Compare Assign Call While If Call Call Assign Call Assign Call Call Call Assign Call Call Assign For Call Assign Call Call"
  },
  {
    "library": "cherrypy",
    "name": "expires",
    "source_code": "def expires(secs=0, force=False, debug=False):\n    response = cherrypy.serving.response\n    headers = response.headers\n    cacheable = False\n    if not force:\n        for indicator in ('Etag', 'Last-Modified', 'Age', 'Expires'):\n            if indicator in headers:\n                cacheable = True\n                break\n    if not cacheable and (not force):\n        if debug:\n            cherrypy.log('request is not cacheable', 'TOOLS.EXPIRES')\n    else:\n        if debug:\n            cherrypy.log('request is cacheable', 'TOOLS.EXPIRES')\n        if isinstance(secs, datetime.timedelta):\n            secs = 86400 * secs.days + secs.seconds\n        if secs == 0:\n            if force or 'Pragma' not in headers:\n                headers['Pragma'] = 'no-cache'\n            if cherrypy.serving.request.protocol >= (1, 1):\n                if force or 'Cache-Control' not in headers:\n                    headers['Cache-Control'] = 'no-cache, must-revalidate'\n            expiry = httputil.HTTPDate(1169942400.0)\n        else:\n            expiry = httputil.HTTPDate(response.time + secs)\n        if force or 'Expires' not in headers:\n            headers['Expires'] = expiry",
    "docstring": "Tool for influencing cache mechanisms using the 'Expires' header. secs Must be either an int or a datetime.timedelta, and indicates the number of seconds between response.time and when the response should expire. The 'Expires' header will be set to response.time + secs. If secs is zero, the 'Expires' header is set one year in the past, and the following \"cache prevention\" headers are also set: * Pragma: no-cache * Cache-Control': no-cache, must-revalidate force If False, the following headers are checked: * Etag * Last-Modified * Age * Expires If any are already present, none of the above response headers are set.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\caching.py",
    "ast_data": "FunctionDef name:expires arg:secs arg:force arg:debug arguments arg arg arg Assign Assign Assign If For If Compare Assign If BoolOp If Call If Call If Call Assign If Compare If BoolOp Compare Assign If Compare If BoolOp Compare Assign Assign Call Assign Call If BoolOp Compare Assign"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, tensor_proto, initialized=True):\n    self._tensor_proto = tensor_proto\n    self._initialized = initialized",
    "docstring": "Constructor. Args: tensor_proto: the object that cannot be represented as a object. initialized: () whether the Tensor is initialized.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:tensor_proto arg:initialized arguments arg arg arg Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "register",
    "source_code": "def register(self, func: Callable, source: str, lib, *, allow_override=False) -> RegistrationHandle:\n    if not allow_override:\n        if self.kernel is not None:\n            raise RuntimeError(f'register_fake(...): the operator {self.qualname} already has an fake impl registered at {self.kernel.source}.')\n        if torch._C._dispatch_has_kernel_for_dispatch_key(self.qualname, 'Meta'):\n            raise RuntimeError(f\"register_fake(...): the operator {self.qualname} already has an DispatchKey::Meta implementation via a pre-existing torch.library or TORCH_LIBRARY registration. Please either remove that registration or don't call register_fake.\")\n        if torch._C._dispatch_has_kernel_for_dispatch_key(self.qualname, 'CompositeImplicitAutograd'):\n            raise RuntimeError(f'register_fake(...): the operator {self.qualname} already has an implementation for this device type via a pre-existing registration to DispatchKey::CompositeImplicitAutograd.CompositeImplicitAutograd operators do not need an fake impl; instead, the operator will decompose into its constituents and those can have fake impls defined on them.')\n    kernel = Kernel(func, source)\n    self.kernels.append(kernel)\n\n    def deregister_fake_kernel():\n        self.kernels.remove(kernel)\n    meta_kernel = construct_meta_kernel(self.qualname, self)\n    lib.impl(self.qualname, meta_kernel, 'Meta', allow_override=allow_override)\n    handle = RegistrationHandle(deregister_fake_kernel)\n    return handle",
    "docstring": "Register an fake impl. Returns a RegistrationHandle that one can use to de-register this fake impl.",
    "type": "method",
    "file_path": "pytorch\\torch\\_library\\fake_impl.py",
    "ast_data": "FunctionDef name:register arg:self arg:func arg:source arg:lib arguments arg arg arg arg arg If If Compare Raise Call If Call Raise Call If Call Raise Call Assign Call Call FunctionDef name:deregister_fake_kernel arguments Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "unparse",
    "source_code": "def unparse(node: ast.AST | None, code: str='') -> str | None:\n    if node is None:\n        return None\n    elif isinstance(node, str):\n        return node\n    return _UnparseVisitor(code).visit(node)",
    "docstring": "Unparse an AST to string.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\pycode\\ast.py",
    "ast_data": "FunctionDef name:unparse arg:node arg:code arguments arg arg If Compare Return return:no If Call Return return:yes Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "from_numpy_dtype",
    "source_code": "@classmethod\ndef from_numpy_dtype(cls, dtype: np.dtype) -> BaseMaskedDtype:\n    if dtype.kind == 'b':\n        from pandas.core.arrays.boolean import BooleanDtype\n        return BooleanDtype()\n    elif dtype.kind in 'iu':\n        from pandas.core.arrays.integer import NUMPY_INT_TO_DTYPE\n        return NUMPY_INT_TO_DTYPE[dtype]\n    elif dtype.kind == 'f':\n        from pandas.core.arrays.floating import NUMPY_FLOAT_TO_DTYPE\n        return NUMPY_FLOAT_TO_DTYPE[dtype]\n    else:\n        raise NotImplementedError(dtype)",
    "docstring": "Construct the MaskedDtype corresponding to the given numpy dtype.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py",
    "ast_data": "FunctionDef name:from_numpy_dtype arg:cls arg:dtype arguments arg arg If Compare Return return:yes Call If Compare Return return:yes If Compare Return return:yes Raise Call"
  },
  {
    "library": "pytorch",
    "name": "__call__",
    "source_code": "def __call__(self, body_fn, args, kwargs, hints):\n    if not isinstance(args, tuple):\n        raise RuntimeError(f'args must be a tuple, got {type(args)}')\n    if not all((isinstance(t, (torch.Tensor, int, float, bool)) for t in args)):\n        raise RuntimeError(f'args must be a tuple of tensors, ints, floats, or bools, got {args}')\n    if not isinstance(kwargs, dict):\n        raise RuntimeError(f'kwargs must be a dict, got {type(kwargs)}')\n    if len(kwargs) > 0:\n        raise RuntimeError(f'kwargs except for hints are not supported, got {kwargs}')\n    if not isinstance(hints, dict):\n        raise RuntimeError(f'hints must be a dict, got {type(hints)}')\n    for k, v in hints.items():\n        if not isinstance(k, str):\n            raise RuntimeError(f'hints key must be a str, got {k}.')\n        if not isinstance(v, (int, float, bool, str)):\n            raise RuntimeError(f'hints must be a dict containing int, float, bool or str value, got value {v} for key {k}.')\n    return super().__call__(body_fn, args, kwargs, hints)",
    "docstring": "Call implementation of hints_wrapper Args: body_fn (Callable): A callable function that is within the scope that is being traced. args (Tuple of torch.Tensor/int/float/bool): A tuple of inputs to body_fn. kwargs (dict): Keyword argument to the body_fn. hints (dict): A dict of context hints which could be passed to backend compiler.",
    "type": "method",
    "file_path": "pytorch\\torch\\_higher_order_ops\\hints_wrap.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:body_fn arg:args arg:kwargs arg:hints arguments arg arg arg arg arg If Call Raise Call Call If Call Call Raise Call If Call Raise Call Call If Compare Call Raise Call If Call Raise Call Call For Call If Call Raise Call If Call Raise Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "trim",
    "source_code": "def trim(a, limits=None, inclusive=(True, True), relative=False, axis=None):\n    if relative:\n        return trimr(a, limits=limits, inclusive=inclusive, axis=axis)\n    else:\n        return trima(a, limits=limits, inclusive=inclusive)",
    "docstring": "Trims an array by masking the data outside some given limits. Returns a masked version of the input array. %s Examples -------- >>> from scipy.stats.mstats import trim >>> z = [ 1, 2, 3, 4, 5, 6, 7, 8, 9,10] >>> print(trim(z,(3,8))) [-- -- 3 4 5 6 7 8 -- --] >>> print(trim(z,(0.1,0.2),relative=True)) [-- 2 3 4 5 6 7 8 -- --]",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mstats_basic.py",
    "ast_data": "FunctionDef name:trim arg:a arg:limits arg:inclusive arg:relative arg:axis arguments arg arg arg arg arg If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_matrix_exp_pade7",
    "source_code": "def _matrix_exp_pade7(matrix):\n    b = [17297280.0, 8648640.0, 1995840.0, 277200.0, 25200.0, 1512.0, 56.0]\n    b = [constant_op.constant(x, matrix.dtype) for x in b]\n    ident = linalg_ops.eye(array_ops.shape(matrix)[-2], batch_shape=array_ops.shape(matrix)[:-2], dtype=matrix.dtype)\n    matrix_2 = math_ops.matmul(matrix, matrix)\n    matrix_4 = math_ops.matmul(matrix_2, matrix_2)\n    matrix_6 = math_ops.matmul(matrix_4, matrix_2)\n    tmp = matrix_6 + b[5] * matrix_4 + b[3] * matrix_2 + b[1] * ident\n    matrix_u = math_ops.matmul(matrix, tmp)\n    matrix_v = b[6] * matrix_6 + b[4] * matrix_4 + b[2] * matrix_2 + b[0] * ident\n    return (matrix_u, matrix_v)",
    "docstring": "7th-order Pade approximant for matrix exponential.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linalg_impl.py",
    "ast_data": "FunctionDef name:_matrix_exp_pade7 arg:matrix arguments arg Assign Assign Call Assign Call Call Call Assign Call Assign Call Assign Call Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "place",
    "source_code": "@array_function_dispatch(_place_dispatcher)\ndef place(arr, mask, vals):\n    return _place(arr, mask, vals)",
    "docstring": "Change elements of an array based on conditional and input values. Similar to `placevalsmaskcopytomaskextractplaceaamaskvalsa` are to be masked, this sequence must be non-empty. See Also -------- copyto, put, take, extract Examples -------- >>> import numpy as np >>> arr = np.arange(6).reshape(2, 3) >>> np.place(arr, arr>2, [44, 55]) >>> arr array([[ 0, 1, 2], [44, 55, 44]])",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_function_base_impl.py",
    "ast_data": "FunctionDef name:place arg:arr arg:mask arg:vals arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "__next__",
    "source_code": "def __next__(self):\n    d = next(self.dataiter)\n    if self.maskiter is not None:\n        m = next(self.maskiter)\n        if isinstance(m, np.void):\n            return mvoid(d, mask=m, hardmask=self.ma._hardmask)\n        elif m:\n            return masked\n    return d",
    "docstring": "Return the next value, or raise StopIteration. Examples -------- >>> import numpy as np >>> x = np.ma.array([3, 2], mask=[0, 1]) >>> fl = x.flat >>> next(fl) 3 >>> next(fl) masked >>> next(fl) Traceback (most recent call last): ... StopIteration",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:__next__ arg:self arguments arg Assign Call If Compare Assign Call If Call Return return:yes Call If Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_subtype_of",
    "source_code": "def is_subtype_of(self, other: trace.TraceType) -> bool:\n    return self == other",
    "docstring": "See tf.types.experimental.TraceType base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\dtypes.py",
    "ast_data": "FunctionDef name:is_subtype_of arg:self arg:other arguments arg arg Return return:yes Compare"
  },
  {
    "library": "pytorch",
    "name": "query",
    "source_code": "def query(self) -> bool:\n    return super().query()",
    "docstring": "Check if all the work submitted has been completed. Returns: A boolean indicating if all kernels in this stream are completed.",
    "type": "method",
    "file_path": "pytorch\\torch\\cuda\\streams.py",
    "ast_data": "FunctionDef name:query arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "variables_accessed",
    "source_code": "def variables_accessed(variables):\n    accessed = []\n    for variable in variables:\n        if variable.trainable:\n            accessed.extend(_variables_override(variable))\n    for var in accessed:\n        pywrap_tfe.TFE_Py_TapeVariableAccessed(var)\n        pywrap_tfe.TFE_Py_VariableWatcherVariableAccessed(var)",
    "docstring": "Notifies all tapes in the stack that variables have been accessed. Only trainable variables are marked as accessed. Args: variables: iterable of variables to mark as accessed.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\tape.py",
    "ast_data": "FunctionDef name:variables_accessed arg:variables arguments arg Assign For If Call Call For Call Call"
  },
  {
    "library": "tensorflow",
    "name": "Mirrored",
    "source_code": "@tf_export('types.experimental.distributed.Mirrored', v1=[])\nclass Mirrored(DistributedValues):\n    pass",
    "docstring": "Holds a distributed value: a map from replica id to synchronized values. values are for which we know that the value on all replicas is the same. values are kept synchronized by the distribution strategy in use, while values are left unsynchronized. values typically represent model weights. We can safely read a value in a cross-replica context by using the value on any replica, while values should not be read or manipulated directly by the user in a cross-replica context.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\types\\distribute.py",
    "ast_data": "ClassDef name:Mirrored Call"
  },
  {
    "library": "tensorflow",
    "name": "get_string",
    "source_code": "def get_string(self, byte_count=_MAX_INT):\n    return self.fdp.ConsumeString(byte_count)",
    "docstring": "Consume a string with given constraints based on a consumed bool. Args: byte_count: Byte count that defaults to _MAX_INT. Returns: Consumed string based on input bytes and constraints.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\security\\fuzzing\\python_fuzzing.py",
    "ast_data": "FunctionDef name:get_string arg:self arg:byte_count arguments arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "geom_typeid",
    "source_code": "@property\ndef geom_typeid(self):\n    return capi.geos_typeid(self.ptr)",
    "docstring": "Return an integer representing the Geometry type.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:geom_typeid arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "date_format",
    "source_code": "@property\ndef date_format(self) -> str:\n    return self._date_format",
    "docstring": "Format string for dates written into Excel files (e.g. 'YYYY-MM-DD').",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\excel\\_base.py",
    "ast_data": "FunctionDef name:date_format arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_init_status",
    "source_code": "def _init_status(self, actives):\n    self._active_check_colors = self._checks.get_facecolor()\n    if len(self._active_check_colors) == 1:\n        self._active_check_colors = np.repeat(self._active_check_colors, len(actives), axis=0)\n    self._checks.set_facecolor([ec if active else 'none' for ec, active in zip(self._active_check_colors, actives)])",
    "docstring": "Initialize properties to match active status. The user may have passed custom colours in *check_props* to the constructor, or to , so we need to modify the visibility after getting whatever the user set.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:_init_status arg:self arg:actives arguments arg arg Assign Call If Compare Call Assign Call Call Call Call"
  },
  {
    "library": "django",
    "name": "technical_500_response",
    "source_code": "def technical_500_response(request, exc_type, exc_value, tb, status_code=500):\n    reporter = get_exception_reporter_class(request)(request, exc_type, exc_value, tb)\n    if request.accepts('text/html'):\n        html = reporter.get_traceback_html()\n        return HttpResponse(html, status=status_code)\n    else:\n        text = reporter.get_traceback_text()\n        return HttpResponse(text, status=status_code, content_type='text/plain; charset=utf-8')",
    "docstring": "Create a technical server error response. The last three arguments are the values returned from sys.exc_info() and friends.",
    "type": "function",
    "file_path": "django\\django\\views\\debug.py",
    "ast_data": "FunctionDef name:technical_500_response arg:request arg:exc_type arg:exc_value arg:tb arg:status_code arguments arg arg arg arg arg Assign Call Call If Call Assign Call Return return:yes Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "agg_dict_like",
    "source_code": "def agg_dict_like(self) -> DataFrame | Series:\n    return self.agg_or_apply_dict_like(op_name='agg')",
    "docstring": "Compute aggregation in the case of a dict-like argument. Returns ------- Result of aggregation.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\apply.py",
    "ast_data": "FunctionDef name:agg_dict_like arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_infer_inputs",
    "source_code": "def _infer_inputs(self, layer_node_id, convert_to_shapes=False):\n    call_fn_id = self._search_for_child_node(layer_node_id, ['call_and_return_all_conditional_losses'])\n    if call_fn_id is None:\n        return None\n    concrete_functions = self._proto.nodes[call_fn_id].function.concrete_functions\n    if not concrete_functions:\n        return None\n    call_fn_name = concrete_functions[0]\n    call_fn_proto = self._proto.concrete_functions[call_fn_name]\n    structured_input_signature = nested_structure_coder.decode_proto(call_fn_proto.canonicalized_input_signature)\n    inputs = structured_input_signature[0][0]\n    if convert_to_shapes:\n        return nest.map_structure(lambda spec: spec.shape, inputs)\n    else:\n        return inputs",
    "docstring": "Infers input shape of layer from SavedModel functions.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\load.py",
    "ast_data": "FunctionDef name:_infer_inputs arg:self arg:layer_node_id arg:convert_to_shapes arguments arg arg arg Assign Call If Compare Return return:no Assign If Return return:no Assign Assign Assign Call Assign If Return return:yes Call arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_autopacking_conversion_function",
    "source_code": "def _autopacking_conversion_function(v, dtype=None, name=None, as_ref=False):\n    if as_ref or _should_not_autopack(v):\n        return NotImplemented\n    inferred_dtype = _get_dtype_from_nested_lists(v)\n    if inferred_dtype is None:\n        return NotImplemented\n    if dtype is None:\n        dtype = inferred_dtype\n    elif dtype != inferred_dtype:\n        v = nest.map_structure(_cast_nested_seqs_to_dtype(dtype), v)\n    return _autopacking_helper(v, dtype, name or 'packed')",
    "docstring": "Tensor conversion function that automatically packs arguments.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:_autopacking_conversion_function arg:v arg:dtype arg:name arg:as_ref arguments arg arg arg arg If BoolOp Call Return return:yes Assign Call If Compare Return return:yes If Compare Assign If Compare Assign Call Call Return return:yes Call BoolOp"
  },
  {
    "library": "tensorflow",
    "name": "__call__",
    "source_code": "def __call__(self, shape, dtype=dtypes.float32, **kwargs):\n    self._validate_kwargs(kwargs)\n    dtype = _assert_float_dtype(dtype)\n    scale = self.scale\n    fan_in, fan_out = _compute_fans(shape)\n    if _PARTITION_SHAPE in kwargs:\n        shape = kwargs[_PARTITION_SHAPE]\n    if self.mode == 'fan_in':\n        scale /= max(1.0, fan_in)\n    elif self.mode == 'fan_out':\n        scale /= max(1.0, fan_out)\n    else:\n        scale /= max(1.0, (fan_in + fan_out) / 2.0)\n    if self.distribution == 'truncated_normal':\n        stddev = math.sqrt(scale) / 0.8796256610342398\n        return self._random_generator.truncated_normal(shape, 0.0, stddev, dtype)\n    elif self.distribution == 'untruncated_normal':\n        stddev = math.sqrt(scale)\n        return self._random_generator.random_normal(shape, 0.0, stddev, dtype)\n    else:\n        limit = math.sqrt(3.0 * scale)\n        return self._random_generator.random_uniform(shape, -limit, limit, dtype)",
    "docstring": "Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only floating point types are supported. **kwargs: Additional keyword arguments. Raises: ValueError: If the dtype is not floating point",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops_v2.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:shape arg:dtype arguments arg arg arg arg Call Assign Call Assign Assign Call If Compare Assign If Compare Call If Compare Call Call If Compare Assign Call Return return:yes Call If Compare Assign Call Return return:yes Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_getargspec",
    "source_code": "def _getargspec(target):\n    fullargspecs = getfullargspec(target)\n    defaults = fullargspecs.defaults or ()\n    if fullargspecs.kwonlydefaults:\n        defaults += tuple(fullargspecs.kwonlydefaults.values())\n    if not defaults:\n        defaults = None\n    argspecs = ArgSpec(args=fullargspecs.args + fullargspecs.kwonlyargs, varargs=fullargspecs.varargs, keywords=fullargspecs.varkw, defaults=defaults)\n    return argspecs",
    "docstring": "A python3 version of getargspec. Calls and assigns args, varargs, varkw, and defaults to a python 2/3 compatible . The parameter name 'varkw' is changed to 'keywords' to fit the struct. Args: target: the target object to inspect. Returns: An ArgSpec with args, varargs, keywords, and defaults parameters from FullArgSpec.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\tf_inspect.py",
    "ast_data": "FunctionDef name:_getargspec arg:target arguments arg Assign Call Assign BoolOp If Call Call If Assign Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X):\n    check_is_fitted(self)\n    X = validate_data(self, X, accept_sparse='csr', reset=False, dtype=(np.float64, np.float32))\n    return self._mean_hiddens(X)",
    "docstring": "Compute the hidden layer activation probabilities, P(h=1|v=X). Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data to be transformed. Returns ------- h : ndarray of shape (n_samples, n_components) Latent representations of the data.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neural_network\\_rbm.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Call Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "paired_euclidean_distances",
    "source_code": "@validate_params({'X': ['array-like', 'sparse matrix'], 'Y': ['array-like', 'sparse matrix']}, prefer_skip_nested_validation=True)\ndef paired_euclidean_distances(X, Y):\n    X, Y = check_paired_arrays(X, Y)\n    return row_norms(X - Y)",
    "docstring": "Compute the paired euclidean distances between X and Y. Read more in the :ref:. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Input array/matrix X. Y : {array-like, sparse matrix} of shape (n_samples, n_features) Input array/matrix Y. Returns ------- distances : ndarray of shape (n_samples,) Output array/matrix containing the calculated paired euclidean distances. Examples -------- >>> from sklearn.metrics.pairwise import paired_euclidean_distances >>> X = [[0, 0, 0], [1, 1, 1]] >>> Y = [[1, 0, 0], [1, 1, 0]] >>> paired_euclidean_distances(X, Y) array([1., 1.])",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\metrics\\pairwise.py",
    "ast_data": "FunctionDef name:paired_euclidean_distances arg:X arg:Y arguments arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "clear_inductor_caches",
    "source_code": "def clear_inductor_caches() -> None:\n    for obj in _registered_caches:\n        obj.cache_clear()",
    "docstring": "Clear all registered caches.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\utils.py",
    "ast_data": "FunctionDef name:clear_inductor_caches arguments For Call"
  },
  {
    "library": "tensorflow",
    "name": "assign_sub",
    "source_code": "def assign_sub(self, delta, use_locking=False, name=None, read_value=True):\n    raise NotImplementedError",
    "docstring": "Subtracts a value from this variable. This is essentially a shortcut for . Args: delta: A . The value to subtract from this variable. use_locking: If , use locking during the operation. name: The name of the operation to be created read_value: if True, will return something which evaluates to the new value of the variable; if False will return the assign op. Returns: The updated variable. If is false, instead returns None in Eager mode and the assign op in graph mode.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:assign_sub arg:self arg:delta arg:use_locking arg:name arg:read_value arguments arg arg arg arg arg Raise"
  },
  {
    "library": "pytorch",
    "name": "qualified_module_class_name",
    "source_code": "@property\ndef qualified_module_class_name(self) -> str:\n    if self._module_class is None:\n        return ''\n    mod_cls = self._module_class\n    if isinstance(mod_cls, type):\n        mod_cls = mod_cls.__module__ + '.' + mod_cls.__qualname__\n    return mod_cls.replace('.', '_')",
    "docstring": "Qualified name of the module class. E.g. .",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\modularization.py",
    "ast_data": "FunctionDef name:qualified_module_class_name arg:self arguments arg If Compare Return return:yes Assign If Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "replace_method_name",
    "source_code": "def replace_method_name(self, signature_key, method_name, tags=None):\n    if not signature_key:\n        raise ValueError('`signature_key` must be defined.')\n    if not method_name:\n        raise ValueError('`method_name` must be defined.')\n    if tags is not None and (not isinstance(tags, list)):\n        tags = [tags]\n    found_match = False\n    for meta_graph_def in self._saved_model.meta_graphs:\n        if tags is None or set(tags) == set(meta_graph_def.meta_info_def.tags):\n            if signature_key not in meta_graph_def.signature_def:\n                raise ValueError(f\"MetaGraphDef associated with tags {tags} does not have a signature_def with key: '{signature_key}'. This means either you specified the wrong signature key or forgot to put the signature_def with the corresponding key in your SavedModel.\")\n            meta_graph_def.signature_def[signature_key].method_name = method_name\n            found_match = True\n    if not found_match:\n        raise ValueError(f'MetaGraphDef associated with tags {tags} could not be found in SavedModel. This means either you specified invalid tags or your SavedModel does not have a MetaGraphDef with the specified tags.')",
    "docstring": "Replaces the method_name in the specified signature_def. This will match and replace multiple sig defs iff tags is None (i.e when multiple s have a signature_def with the same key). If tags is not None, this will only replace a single signature_def in the with matching tags. Args: signature_key: Key of the signature_def to be updated. method_name: new method_name to replace the existing one. tags: A tag or sequence of tags identifying the to update. If None, all meta graphs will be updated. Raises: ValueError: if signature_key or method_name are not defined or if no metagraphs were found with the associated tags or if no meta graph has a signature_def that matches signature_key.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\method_name_updater.py",
    "ast_data": "FunctionDef name:replace_method_name arg:self arg:signature_key arg:method_name arg:tags arguments arg arg arg arg If Raise Call If Raise Call If BoolOp Compare Call Assign Assign For If BoolOp Compare Compare Call Call If Compare Raise Call Assign Assign If Raise Call"
  },
  {
    "library": "kornia",
    "name": "_LearnableBlock",
    "source_code": "class _LearnableBlock(nn.Sequential):\n\n    def __init__(self, in_channels: int=10) -> None:\n        super().__init__()\n        self.conv0 = _KeyNetConvBlock(in_channels)\n        self.conv1 = _KeyNetConvBlock()\n        self.conv2 = _KeyNetConvBlock()\n\n    def forward(self, x: Tensor) -> Tensor:\n        x = self.conv2(self.conv1(self.conv0(x)))\n        return x",
    "docstring": "Helper class for KeyNet. It defines the learnable blocks within the Key.Net",
    "type": "class",
    "file_path": "kornia\\kornia\\feature\\keynet.py",
    "ast_data": "ClassDef name:_LearnableBlock FunctionDef name:__init__ arg:self arg:in_channels arguments arg arg Call Call Assign Call Assign Call Assign Call FunctionDef name:forward arg:self arg:x arguments arg arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_natural_params",
    "source_code": "@property\ndef _natural_params(self) -> tuple[Tensor, ...]:\n    raise NotImplementedError",
    "docstring": "Abstract method for natural parameters. Returns a tuple of Tensors based on the distribution",
    "type": "method",
    "file_path": "pytorch\\torch\\distributions\\exp_family.py",
    "ast_data": "FunctionDef name:_natural_params arg:self arguments arg Raise"
  },
  {
    "library": "django",
    "name": "has_key",
    "source_code": "def has_key(self, key, version=None):\n    return self.get(key, self._missing_key, version=version) is not self._missing_key",
    "docstring": "Return True if the key is in the cache and has not expired.",
    "type": "method",
    "file_path": "django\\django\\core\\cache\\backends\\base.py",
    "ast_data": "FunctionDef name:has_key arg:self arg:key arg:version arguments arg arg arg Return return:yes Compare Call"
  },
  {
    "library": "numpy",
    "name": "compress_nd",
    "source_code": "def compress_nd(x, axis=None):\n    x = asarray(x)\n    m = getmask(x)\n    if axis is None:\n        axis = tuple(range(x.ndim))\n    else:\n        axis = normalize_axis_tuple(axis, x.ndim)\n    if m is nomask or not m.any():\n        return x._data\n    if m.all():\n        return nxarray([])\n    data = x._data\n    for ax in axis:\n        axes = tuple(list(range(ax)) + list(range(ax + 1, x.ndim)))\n        data = data[(slice(None),) * ax + (~m.any(axis=axes),)]\n    return data",
    "docstring": "Suppress slices from multiple dimensions which contain masked values. Parameters ---------- x : array_like, MaskedArray The array to operate on. If not a MaskedArray instance (or if no array elements are masked), is interpreted as a MaskedArray with set to . axis : tuple of ints or int, optional Which dimensions to suppress slices from can be configured with this parameter. - If axis is a tuple of ints, those are the axes to suppress slices from. - If axis is an int, then that is the only axis to suppress slices from. - If axis is None, all axis are selected. Returns ------- compress_array : ndarray The compressed array. Examples -------- >>> import numpy as np >>> arr = [[1, 2], [3, 4]] >>> mask = [[0, 1], [0, 0]] >>> x = np.ma.array(arr, mask=mask) >>> np.ma.compress_nd(x, axis=0) array([[3, 4]]) >>> np.ma.compress_nd(x, axis=1) array([[1], [3]]) >>> np.ma.compress_nd(x) array([[3]])",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\extras.py",
    "ast_data": "FunctionDef name:compress_nd arg:x arg:axis arguments arg arg Assign Call Assign Call If Compare Assign Call Call Assign Call If BoolOp Compare Call Return return:yes If Call Return return:yes Call Assign For Assign Call Call Call Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_retrieve_recv_activations",
    "source_code": "def _retrieve_recv_activations(self, fwd_chunk_id: int):\n    recv_infos = self.args_recv_info[fwd_chunk_id]\n    activations = self._map_tensor_from_recv_info(recv_infos)\n    return activations",
    "docstring": "Retrieve the activations received for the current stage during forward.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py",
    "ast_data": "FunctionDef name:_retrieve_recv_activations arg:self arg:fwd_chunk_id arguments arg arg Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "setup_bare_concrete_function",
    "source_code": "def setup_bare_concrete_function(saved_bare_concrete_function, concrete_functions):\n    concrete_function = concrete_functions[saved_bare_concrete_function.concrete_function_name]\n    concrete_function._arg_keywords = saved_bare_concrete_function.argument_keywords\n    concrete_function._num_positional_args = saved_bare_concrete_function.allowed_positional_arguments\n    if saved_bare_concrete_function.HasField('function_spec'):\n        function_spec = _deserialize_function_spec_as_nonmethod(saved_bare_concrete_function.function_spec)\n        set_preinitialized_function_spec(concrete_function, function_spec)\n    concrete_function.add_to_graph()\n    return concrete_function",
    "docstring": "Makes a restored bare concrete function callable.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\function_deserialization.py",
    "ast_data": "FunctionDef name:setup_bare_concrete_function arg:saved_bare_concrete_function arg:concrete_functions arguments arg arg Assign Assign Assign If Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "update",
    "source_code": "def update(self, *, left=_UNSET, bottom=_UNSET, right=_UNSET, top=_UNSET, wspace=_UNSET, hspace=_UNSET):\n    if left is not _UNSET:\n        self.left = left\n    if bottom is not _UNSET:\n        self.bottom = bottom\n    if right is not _UNSET:\n        self.right = right\n    if top is not _UNSET:\n        self.top = top\n    if wspace is not _UNSET:\n        self.wspace = wspace\n    if hspace is not _UNSET:\n        self.hspace = hspace\n    for figmanager in _pylab_helpers.Gcf.figs.values():\n        for ax in figmanager.canvas.figure.axes:\n            if ax.get_subplotspec() is not None:\n                ss = ax.get_subplotspec().get_topmost_subplotspec()\n                if ss.get_gridspec() == self:\n                    fig = ax.get_figure(root=False)\n                    ax._set_position(ax.get_subplotspec().get_position(fig))",
    "docstring": "Update the subplot parameters of the grid. Parameters that are not explicitly given are not changed. Setting a parameter to *None* resets it to :rc:. Parameters ---------- left, right, top, bottom : float or None, optional Extent of the subplots as a fraction of figure width or height. wspace, hspace : float or None, optional Spacing between the subplots as a fraction of the average subplot width / height.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\gridspec.py",
    "ast_data": "FunctionDef name:update arg:self arguments arg arg arg arg arg arg arg If Compare Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign For Call For If Compare Call Assign Call Call If Compare Call Assign Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_check_single_device_module",
    "source_code": "def _check_single_device_module(module: nn.Module, ignored_params: set[nn.Parameter], device_id: Optional[Union[int, torch.device]]) -> None:\n    devices = {param.device for param in _get_orig_params(module, ignored_params)}\n    if len(devices) == 2 and torch.device('cpu') in devices:\n        if device_id is None:\n            raise RuntimeError('To support a module with both CPU and GPU params, please pass in device_id argument.')\n    elif len(devices) > 1:\n        raise RuntimeError(f'FSDP only supports single device modules but got params on {devices}')",
    "docstring": "Raise an error if ``. Thus, after this method, the module must be either fully on the CPU or fully on a non-CPU device.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_init_utils.py",
    "ast_data": "FunctionDef name:_check_single_device_module arg:module arg:ignored_params arg:device_id arguments arg arg arg Assign Call If BoolOp Compare Call Compare Call If Compare Raise Call If Compare Call Raise Call"
  },
  {
    "library": "pandas",
    "name": "sum",
    "source_code": "@deprecate_nonkeyword_arguments(version='4.0', allowed_args=['self'], name='sum')\ndef sum(self, axis: Axis | None=0, skipna: bool=True, numeric_only: bool=False, min_count: int=0, **kwargs) -> Series:\n    result = super().sum(axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count, **kwargs)\n    if isinstance(result, Series):\n        result = result.__finalize__(self, method='sum')\n    return result",
    "docstring": "Return the sum of the values over the requested axis. This is equivalent to the method `Series` handles all-NA and empty series identically. >>> pd.Series([np.nan]).sum() 0.0 >>> pd.Series([np.nan]).sum(min_count=1) nan",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:sum arg:self arg:axis arg:skipna arg:numeric_only arg:min_count arguments arg arg arg arg arg arg Assign Call Call If Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "core_assignment",
    "source_code": "@property\ndef core_assignment(self) -> np.ndarray:\n    return self._core_assignment",
    "docstring": "The logical to physical core mapping. Returns: An integer numpy array of rank 3, with shape . Maps (replica, logical core) pairs to physical topology coordinates.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\device_assignment.py",
    "ast_data": "FunctionDef name:core_assignment arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "symbolic_sizes_strides",
    "source_code": "def symbolic_sizes_strides(self, ex: torch.Tensor) -> tuple[Sequence[Union[int, Expr]], Sequence[Union[int, Expr]]]:\n    if self.reuse_shape_env:\n        return (convert_shape_to_inductor(ex.size()), convert_shape_to_inductor(ex.stride()))\n    else:\n        from torch._dynamo.source import ConstantSource\n        source = ConstantSource(f'__inductor_unknown_tensor_{len(self._shape_env.var_to_val)}')\n        size, stride, _ = self._shape_env.create_symbolic_sizes_strides_storage_offset(ex, source)\n    r_size = [i.node.expr if isinstance(i, torch.SymInt) else i for i in size]\n    r_stride = [i.node.expr if isinstance(i, torch.SymInt) else i for i in stride]\n    return (r_size, r_stride)",
    "docstring": "Support dynamic shapes and dynamic strides by assigning variables to each dimension. We duck-shape tensors, so if two tensors have the same size they get assigned the same symbolic variable.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\graph.py",
    "ast_data": "FunctionDef name:symbolic_sizes_strides arg:self arg:ex arguments arg arg If Return return:yes Call Call Call Call Assign Call Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_zorder",
    "source_code": "def get_zorder(self):\n    return self.zorder",
    "docstring": "Return the artist's zorder.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:get_zorder arg:self arguments arg Return return:yes"
  },
  {
    "library": "virtualenv",
    "name": "quote",
    "source_code": "@staticmethod\ndef quote(string):\n    max_sharps = 0\n    current_sharps = 0\n    for char in string:\n        if char == '#':\n            current_sharps += 1\n            max_sharps = max(current_sharps, max_sharps)\n        else:\n            current_sharps = 0\n    wrapping = '#' * (max_sharps + 1)\n    return f\"r{wrapping}'{string}'{wrapping}\"",
    "docstring": "Nushell supports raw strings like: r###'this is a string'###. This method finds the maximum continuous sharps in the string and then quote it with an extra sharp.",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\activation\\nushell\\__init__.py",
    "ast_data": "FunctionDef name:quote arg:string arguments arg Assign Assign For If Compare Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_formatter",
    "source_code": "def _formatter(x):\n    if isinstance(x, np.ndarray):\n        if x.size != 0:\n            return np.array2string(x, separator=', ')\n        else:\n            return repr(x.tolist())\n    else:\n        return str(x)",
    "docstring": "Separate Numpy array elements with comma.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:_formatter arg:x arguments arg If Call If Compare Return return:yes Call Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_get_input_node_specs",
    "source_code": "def _get_input_node_specs(node: Node, placement_strategies: dict[Node, PlacementStrategy]) -> tuple[DTensorSpec, ...]:\n    input_specs_list: list[DTensorSpec] = []\n    for input_arg in node.all_input_nodes:\n        if input_arg in placement_strategies:\n            output_spec = placement_strategies[input_arg].output_specs\n            assert isinstance(output_spec, DTensorSpec)\n            input_specs_list.append(output_spec)\n        else:\n            raise ValueError(f'{input_arg} does not have output_spec populated.')\n    return tuple(input_specs_list)",
    "docstring": "Get the input specs of a node.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\experimental\\_tp_transform.py",
    "ast_data": "FunctionDef name:_get_input_node_specs arg:node arg:placement_strategies arguments arg arg For If Compare Assign Call Call Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_platform",
    "source_code": "def get_platform():\n    global PLATFORM\n    cmd = 'uname'\n    out, err = run_shell_cmd(cmd)\n    platform_detected = out.strip().lower()\n    if platform_detected != 'linux':\n        if err and FLAGS.debug:\n            print('Error in detecting platform:\\n %s' % str(err))\n        print('Error: Detected unsupported operating system.\\nStopping...')\n        sys.exit(1)\n    else:\n        PLATFORM = platform_detected\n    return PLATFORM",
    "docstring": "Retrieves platform information. Currently the script only support linux. If other platoforms such as Windows or MacOS is detected, it throws an error and terminates. Returns: String that is platform type. e.g. 'linux'",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\tensorflow_builder\\config_detector\\config_detector.py",
    "ast_data": "FunctionDef name:get_platform arguments Assign Assign Call Assign Call Call If Compare If BoolOp Call Call Call Call Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "blocksize",
    "source_code": "@property\ndef blocksize(self) -> tuple:\n    return self.data.shape[1:]",
    "docstring": "Block size of the matrix.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_bsr.py",
    "ast_data": "FunctionDef name:blocksize arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_ScipyBackend",
    "source_code": "class _ScipyBackend:\n    __ua_domain__ = 'numpy.scipy.fft'\n\n    @staticmethod\n    def __ua_function__(method, args, kwargs):\n        fn = getattr(_basic_backend, method.__name__, None)\n        if fn is None:\n            fn = getattr(_realtransforms_backend, method.__name__, None)\n        if fn is None:\n            fn = getattr(_fftlog_backend, method.__name__, None)\n        if fn is None:\n            return NotImplemented\n        return fn(*args, **kwargs)",
    "docstring": "The default backend for fft calculations Notes ----- We use the domain `` as well.",
    "type": "class",
    "file_path": "scipy\\scipy\\fft\\_backend.py",
    "ast_data": "ClassDef name:_ScipyBackend Assign FunctionDef name:__ua_function__ arg:method arg:args arg:kwargs arguments arg arg arg Assign Call If Compare Assign Call If Compare Assign Call If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_is_prefix",
    "source_code": "def _is_prefix(candidate, target):\n    return len(candidate) < len(target) and target[:len(candidate)] == candidate",
    "docstring": "Check whether is a prefix of .",
    "type": "function",
    "file_path": "pytorch\\torch\\export\\unflatten.py",
    "ast_data": "FunctionDef name:_is_prefix arg:candidate arg:target arguments arg arg Return return:yes BoolOp Compare Call Call Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_op_range",
    "source_code": "def _get_op_range(self):\n    found, op_range = self.get_flag_value(FLAG_NAME_OP_RANGE)\n    if not found or not op_range:\n        op_range = (-1, -1)\n        return op_range\n    match = _OP_RANGE_PAT.match(op_range)\n    if not match:\n        op_range = (-1, -1)\n        return op_range\n    op_range = (int(match.group(1)), int(match.group(2)))\n    return op_range",
    "docstring": "Sets the index range of the Ops that we will consider tracing.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer_flags.py",
    "ast_data": "FunctionDef name:_get_op_range arg:self arguments arg Assign Call If BoolOp Assign Return return:yes Assign Call If Assign Return return:yes Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "SimplePatchShadow",
    "source_code": "class SimplePatchShadow(AbstractPathEffect):\n\n    def __init__(self, offset=(2, -2), shadow_rgbFace=None, alpha=None, rho=0.3, **kwargs):\n        super().__init__(offset)\n        if shadow_rgbFace is None:\n            self._shadow_rgbFace = shadow_rgbFace\n        else:\n            self._shadow_rgbFace = mcolors.to_rgba(shadow_rgbFace)\n        if alpha is None:\n            alpha = 0.3\n        self._alpha = alpha\n        self._rho = rho\n        self._gc = kwargs\n\n    def draw_path(self, renderer, gc, tpath, affine, rgbFace):\n        gc0 = renderer.new_gc()\n        gc0.copy_properties(gc)\n        if self._shadow_rgbFace is None:\n            r, g, b = (rgbFace or (1.0, 1.0, 1.0))[:3]\n            shadow_rgbFace = (r * self._rho, g * self._rho, b * self._rho)\n        else:\n            shadow_rgbFace = self._shadow_rgbFace\n        gc0.set_foreground('none')\n        gc0.set_alpha(self._alpha)\n        gc0.set_linewidth(0)\n        gc0 = self._update_gc(gc0, self._gc)\n        renderer.draw_path(gc0, tpath, affine + self._offset_transform(renderer), shadow_rgbFace)\n        gc0.restore()",
    "docstring": "A simple shadow via a filled patch.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\patheffects.py",
    "ast_data": "ClassDef name:SimplePatchShadow FunctionDef name:__init__ arg:self arg:offset arg:shadow_rgbFace arg:alpha arg:rho arguments arg arg arg arg arg arg Call Call If Compare Assign Assign Call If Compare Assign Assign Assign Assign FunctionDef name:draw_path arg:self arg:renderer arg:gc arg:tpath arg:affine arg:rgbFace arguments arg arg arg arg arg arg Assign Call Call If Compare Assign BoolOp Assign Assign Call Call Call Assign Call Call Call Call"
  },
  {
    "library": "kornia",
    "name": "get_laf_pts_to_draw",
    "source_code": "def get_laf_pts_to_draw(LAF: Tensor, img_idx: int=0) -> Tuple[List[int], List[int]]:\n    KORNIA_CHECK_LAF(LAF)\n    pts = laf_to_boundary_points(LAF[img_idx:img_idx + 1])[0]\n    pts_np = pts.detach().permute(1, 0, 2).cpu()\n    return (pts_np[..., 0].tolist(), pts_np[..., 1].tolist())",
    "docstring": "Return list for drawing LAFs (local features). Args: LAF: :math: img_idx: which points to output. Returns: List of boundary points x, y` Examples: x, y = get_laf_pts_to_draw(LAF, img_idx) plt.figure() plt.imshow(kornia.utils.tensor_to_image(img[img_idx])) plt.plot(x, y, 'r') plt.show()",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\laf.py",
    "ast_data": "FunctionDef name:get_laf_pts_to_draw arg:LAF arg:img_idx arguments arg arg Call Assign Call Assign Call Call Call Return return:yes Call Call"
  },
  {
    "library": "scrapy",
    "name": "_handle_events",
    "source_code": "def _handle_events(self, events: list[Event]) -> None:\n    for event in events:\n        if isinstance(event, ConnectionTerminated):\n            self.connection_terminated(event)\n        elif isinstance(event, DataReceived):\n            self.data_received(event)\n        elif isinstance(event, ResponseReceived):\n            self.response_received(event)\n        elif isinstance(event, StreamEnded):\n            self.stream_ended(event)\n        elif isinstance(event, StreamReset):\n            self.stream_reset(event)\n        elif isinstance(event, WindowUpdated):\n            self.window_updated(event)\n        elif isinstance(event, SettingsAcknowledged):\n            self.settings_acknowledged(event)\n        elif isinstance(event, UnknownFrameReceived):\n            logger.warning('Unknown frame received: %s', event.frame)",
    "docstring": "Private method which acts as a bridge between the events received from the HTTP/2 data and IH2EventsHandler Arguments: events -- A list of events that the remote peer triggered by sending data",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\http2\\protocol.py",
    "ast_data": "FunctionDef name:_handle_events arg:self arg:events arguments arg arg For If Call Call If Call Call If Call Call If Call Call If Call Call If Call Call If Call Call If Call Call"
  },
  {
    "library": "pytorch",
    "name": "CompiledForward",
    "source_code": "@dataclass\nclass CompiledForward(FxGraphCacheLoadable):\n\n    def _is_backward(self) -> bool:\n        return False",
    "docstring": "Cacheable entry for a forward function",
    "type": "class",
    "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\autograd_cache.py",
    "ast_data": "ClassDef name:CompiledForward FunctionDef name:_is_backward arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "build_act",
    "source_code": "def build_act(name: Optional[str], **kwargs: dict[str, Any]) -> Union[nn.Module, None]:\n    if name in REGISTERED_ACT_DICT:\n        act_cls = REGISTERED_ACT_DICT[name]\n        args = build_kwargs_from_config(kwargs, act_cls)\n        return act_cls(**args)\n    return None",
    "docstring": "Return activation op.",
    "type": "function",
    "file_path": "kornia\\kornia\\contrib\\models\\efficient_vit\\nn\\act.py",
    "ast_data": "FunctionDef name:build_act arg:name arguments arg arg If Compare Assign Assign Call Return return:yes Call Return return:no"
  },
  {
    "library": "pytorch",
    "name": "sdpa_flop_count",
    "source_code": "def sdpa_flop_count(query_shape, key_shape, value_shape):\n    b, h, s_q, d_q = query_shape\n    _b2, _h2, s_k, _d2 = key_shape\n    _b3, _h3, _s3, d_v = value_shape\n    assert b == _b2 == _b3 and h == _h2 == _h3 and (d_q == _d2) and (s_k == _s3) and (d_q == _d2)\n    total_flops = 0\n    total_flops += bmm_flop((b * h, s_q, d_q), (b * h, d_q, s_k))\n    total_flops += bmm_flop((b * h, s_q, s_k), (b * h, s_k, d_v))\n    return total_flops",
    "docstring": "Count flops for self-attention. NB: We can assume that value_shape == key_shape",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\flop_counter.py",
    "ast_data": "FunctionDef name:sdpa_flop_count arg:query_shape arg:key_shape arg:value_shape arguments arg arg arg Assign Assign Assign BoolOp Compare Compare Compare Compare Compare Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_pretty_print",
    "source_code": "def _pretty_print(data_item, summarize):\n    if isinstance(data_item, tensor_lib.Tensor):\n        arr = data_item.numpy()\n        if np.isscalar(arr):\n            return str(arr)\n        else:\n            flat = arr.reshape((-1,))\n            lst = [str(x) for x in flat[:summarize]]\n            if len(lst) < flat.size:\n                lst.append('...')\n            return str(lst)\n    else:\n        return str(data_item)",
    "docstring": "Format a data item for use in an error message in eager mode. Args: data_item: One of the items in the \"data\" argument to an assert_* function. Can be a Tensor or a scalar value. summarize: How many elements to retain of each tensor-valued entry in data. Returns: An appropriate string representation of data_item",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\check_ops.py",
    "ast_data": "FunctionDef name:_pretty_print arg:data_item arg:summarize arguments arg arg If Call Assign Call If Call Return return:yes Call Assign Call Assign Call If Compare Call Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "validate_tensor",
    "source_code": "def validate_tensor(self, input: Tensor) -> None:\n    _validate_input_dtype(input, accepted_dtypes=[float16, float32, float64])\n    if len(input.shape) != 5:\n        raise RuntimeError(f'Expect (B, C, D, H, W). Got {input.shape}.')",
    "docstring": "Check if the input tensor is formatted as expected.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\_3d\\base.py",
    "ast_data": "FunctionDef name:validate_tensor arg:self arg:input arguments arg arg Call If Compare Call Raise Call"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, callable, *args, **kwargs):\n    self.callable = callable\n    self.args = args\n    self.kwargs = kwargs",
    "docstring": "Initialize the page handler.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpdispatch.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:callable arguments arg arg arg arg Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "transform_tensor",
    "source_code": "def transform_tensor(self, read_item: ReadItem, tensor: torch.Tensor):\n    return narrow_tensor_by_index(tensor, read_item.dest_offsets, read_item.lengths)",
    "docstring": "Extension from the planner interface to make it easy to extend the default planner.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\default_planner.py",
    "ast_data": "FunctionDef name:transform_tensor arg:self arg:read_item arg:tensor arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "get_lvar_names",
    "source_code": "def get_lvar_names(node: ast.AST, self: ast.arg | None=None) -> list[str]:\n    if self:\n        self_id = self.arg\n    node_name = node.__class__.__name__\n    if node_name in {'Constant', 'Index', 'Slice', 'Subscript'}:\n        raise TypeError('%r does not create new variable' % node)\n    if node_name == 'Name':\n        if self is None or node.id == self_id:\n            return [node.id]\n        else:\n            raise TypeError('The assignment %r is not instance variable' % node)\n    elif node_name in {'Tuple', 'List'}:\n        members = []\n        for elt in node.elts:\n            with contextlib.suppress(TypeError):\n                members.extend(get_lvar_names(elt, self))\n        return members\n    elif node_name == 'Attribute':\n        if node.value.__class__.__name__ == 'Name' and self and (node.value.id == self_id):\n            return ['%s' % get_lvar_names(node.attr, self)[0]]\n        else:\n            raise TypeError('The assignment %r is not instance variable' % node)\n    elif node_name == 'str':\n        return [node]\n    elif node_name == 'Starred':\n        return get_lvar_names(node.value, self)\n    else:\n        raise NotImplementedError('Unexpected node name %r' % node_name)",
    "docstring": "Convert assignment-AST to variable names. This raises if the assignment does not create new variable:: ary[0] = 'foo' dic['bar'] = 'baz' # => TypeError",
    "type": "function",
    "file_path": "sphinx\\sphinx\\pycode\\parser.py",
    "ast_data": "FunctionDef name:get_lvar_names arg:node arg:self arguments arg arg If Assign Assign If Compare Raise Call If Compare If BoolOp Compare Compare Return return:yes Raise Call If Compare Assign For With Call Call Call Return return:yes If Compare If BoolOp Compare Compare Return return:yes Call Raise Call If Compare Return return:yes If Compare Return return:yes Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_all_gather_dtensor",
    "source_code": "def _all_gather_dtensor(tensor: DTensor, parent_mesh: Optional[DeviceMesh]) -> torch.Tensor:\n    assert parent_mesh == tensor.device_mesh\n    placements = list(copy.deepcopy(tensor.placements))\n    for i in range(0, len(placements) - 1):\n        placements[i] = Replicate()\n    tensor = tensor.redistribute(device_mesh=tensor.device_mesh, placements=placements)\n    return tensor.to_local()",
    "docstring": "All gather a DTensor in its FSDP dimension and return the local tensor.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\parallel\\fsdp.py",
    "ast_data": "FunctionDef name:_all_gather_dtensor arg:tensor arg:parent_mesh arguments arg arg Compare Assign Call Call For Call Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "FilledArrow",
    "source_code": "class FilledArrow(SimpleArrow):\n    _ARROW_STYLE = '-|>'\n\n    def __init__(self, axis_artist, line_path, transform, line_mutation_scale, facecolor):\n        super().__init__(axis_artist, line_path, transform, line_mutation_scale)\n        self.set_facecolor(facecolor)",
    "docstring": "The artist class that will be returned for FilledArrow style.",
    "type": "class",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axisline_style.py",
    "ast_data": "ClassDef name:FilledArrow Assign FunctionDef name:__init__ arg:self arg:axis_artist arg:line_path arg:transform arg:line_mutation_scale arg:facecolor arguments arg arg arg arg arg arg Call Call Call"
  },
  {
    "library": "scipy",
    "name": "check_gradient",
    "source_code": "def check_gradient(fcn, Dfcn, x0, args=(), col_deriv=0):\n    x = atleast_1d(x0)\n    n = len(x)\n    x = x.reshape((n,))\n    fvec = atleast_1d(fcn(x, *args))\n    m = len(fvec)\n    fvec = fvec.reshape((m,))\n    ldfjac = m\n    fjac = atleast_1d(Dfcn(x, *args))\n    fjac = fjac.reshape((m, n))\n    if col_deriv == 0:\n        fjac = transpose(fjac)\n    xp = zeros((n,), float)\n    err = zeros((m,), float)\n    fvecp = None\n    _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 1, err)\n    fvecp = atleast_1d(fcn(xp, *args))\n    fvecp = fvecp.reshape((m,))\n    _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 2, err)\n    good = prod(greater(err, 0.5), axis=0)\n    return (good, err)",
    "docstring": "Perform a simple check on the gradient for correctness.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_minpack_py.py",
    "ast_data": "FunctionDef name:check_gradient arg:fcn arg:Dfcn arg:x0 arg:args arg:col_deriv arguments arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Assign Call Call Assign Call If Compare Assign Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "pygame",
    "name": "get_sprites_from_layer",
    "source_code": "def get_sprites_from_layer(self, layer):\n    sprites = []\n    sprites_append = sprites.append\n    sprite_layers = self._spritelayers\n    for spr in self._spritelist:\n        if sprite_layers[spr] == layer:\n            sprites_append(spr)\n        elif sprite_layers[spr] > layer:\n            break\n    return sprites",
    "docstring": "return all sprites from a layer ordered as they where added LayeredUpdates.get_sprites_from_layer(layer): return sprites Returns all sprites from a layer. The sprites are ordered in the sequence that they where added. (The sprites are not removed from the layer.",
    "type": "method",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:get_sprites_from_layer arg:self arg:layer arguments arg arg Assign Assign Assign For If Compare Call If Compare Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "restore_from_strided_representation",
    "source_code": "def restore_from_strided_representation(args):\n    new_args = []\n    args = list(args)\n    while args:\n        a = args.pop(0)\n        if a == STRIDED_REPRESENTATION:\n            d, values = (args.pop(0), args.pop(0))\n            if d['layout'] is torch.sparse_coo:\n                a = torch.sparse_coo_tensor(d['indices'], values, size=d['shape'], is_coalesced=d['is_coalesced'])\n            elif d['layout'] in sparse_compressed_layouts:\n                a = torch.sparse_compressed_tensor(d['compressed_indices'], d['plain_indices'], values, size=d['shape'], layout=d['layout'])\n            else:\n                raise NotImplementedError(f'conversion of {d['layout']} strided representation to tensor')\n        new_args.append(a)\n    return tuple(new_args)",
    "docstring": "Restore non-strided differentiable tensosr from their strided representations.",
    "type": "function",
    "file_path": "pytorch\\torch\\sparse\\__init__.py",
    "ast_data": "FunctionDef name:restore_from_strided_representation arg:args arguments arg Assign Assign Call While Assign Call If Compare Assign Call Call If Compare Assign Call If Compare Assign Call Raise Call Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "IndexInSectionTitleTransform",
    "source_code": "class IndexInSectionTitleTransform(SphinxPostTransform):\n    default_priority = 400\n    formats = ('latex',)\n\n    def run(self, **kwargs: Any) -> None:\n        for node in list(self.document.findall(nodes.title)):\n            if isinstance(node.parent, nodes.section):\n                for i, index in enumerate(node.findall(addnodes.index)):\n                    node.remove(index)\n                    node.parent.insert(i + 1, index)",
    "docstring": "Move index nodes in section title to outside of the title. LaTeX index macro is not compatible with some handling of section titles such as uppercasing done on LaTeX side (cf. fncychap handling of ``). Moving the index node to after the title node fixes that. Before:: blah blah blah blah blah blah ... After:: blah blah blah blah blah blah ...",
    "type": "class",
    "file_path": "sphinx\\sphinx\\builders\\latex\\transforms.py",
    "ast_data": "ClassDef name:IndexInSectionTitleTransform Assign Assign FunctionDef name:run arg:self arguments arg arg For Call Call If Call For Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_MixedPrecision",
    "source_code": "@dataclass\nclass _MixedPrecision:\n    param_dtype: Optional[torch.dtype] = None\n    reduce_dtype: Optional[torch.dtype] = None\n    buffer_dtype: Optional[torch.dtype] = None",
    "docstring": "This configures DDP-native mixed precision training. Attributes: param_dtype (torch.dtype): This specifies the dtype for model parameters, inputs (when `` would result in communication occurring in fp16.",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\parallel\\distributed.py",
    "ast_data": "ClassDef name:_MixedPrecision"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, name, description, *labels):\n    super(BoolGauge, self).__init__('BoolGauge', _bool_gauge_methods, len(labels), name, description, *labels)",
    "docstring": "Creates a new BoolGauge. Args: name: name of the new metric. description: description of the new metric. *labels: The label list of the new metric.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:name arg:description arguments arg arg arg arg Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "test_main",
    "source_code": "def test_main():\n    global _test_main_called\n    _test_main_called = True\n    os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'\n    if _is_enabled():\n        _set_spawn_exe_path()\n        _if_spawn_run_and_exit()\n    test.main()",
    "docstring": "Main function to be called within of a test file.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_process_lib.py",
    "ast_data": "FunctionDef name:test_main arguments Assign Assign If Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "MSPS",
    "source_code": "class MSPS(NamedTuple):\n    func_names: set[str]\n    op_idx: int\n    memory: int\n    runtime: float\n    msps: float",
    "docstring": "Represents Memory and Runtime Statistics for an operator/operator group. Attributes: func_names (set[str]): Set of operator/operator group names. op_idx (int): Operator index (group head index incase of operator groups). memory (int): Memory usage in bytes. runtime (float): Runtime in milliseconds. msps (float): Memory per second calculated as memory/runtime.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\_tools\\sac_estimator.py",
    "ast_data": "ClassDef name:MSPS"
  },
  {
    "library": "pandas",
    "name": "__arrow_c_stream__",
    "source_code": "def __arrow_c_stream__(self, requested_schema=None):\n    pa = import_optional_dependency('pyarrow', min_version='14.0.0')\n    if requested_schema is not None:\n        requested_schema = pa.Schema._import_from_c_capsule(requested_schema)\n    table = pa.Table.from_pandas(self, schema=requested_schema)\n    return table.__arrow_c_stream__()",
    "docstring": "Export the pandas DataFrame as an Arrow C stream PyCapsule. This relies on pyarrow to convert the pandas DataFrame to the Arrow format (and follows the default behaviour of `` in its handling of the index, i.e. store the index as a column except for RangeIndex). This conversion is not necessarily zero-copy. Parameters ---------- requested_schema : PyCapsule, default None The schema to which the dataframe should be casted, passed as a PyCapsule containing a C ArrowSchema representation of the requested schema. Returns ------- PyCapsule",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:__arrow_c_stream__ arg:self arg:requested_schema arguments arg arg Assign Call If Compare Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "_get_window_grid_kernel2d",
    "source_code": "def _get_window_grid_kernel2d(h: int, w: int, device: Optional[torch.device]=None) -> Tensor:\n    if device is None:\n        device = torch.device('cpu')\n    window_grid2d = create_meshgrid(h, w, False, device=device)\n    window_grid2d = normalize_pixel_coordinates(window_grid2d, h, w)\n    conv_kernel = window_grid2d.permute(3, 0, 1, 2)\n    return conv_kernel",
    "docstring": "Generate a kernel to with window coordinates, residual to window center. Args: h: kernel height. w: kernel width. device: device, on which generate. Returns: conv_kernel [2x1xhxw]",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\subpix\\spatial_soft_argmax.py",
    "ast_data": "FunctionDef name:_get_window_grid_kernel2d arg:h arg:w arg:device arguments arg arg arg If Compare Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_get_cached_or_new",
    "source_code": "@classmethod\ndef _get_cached_or_new(cls):\n    return cls._get_cached_or_new_impl(cls._build_latex_header())",
    "docstring": "Return the previous LatexManager if the header and tex system did not change, or a new instance otherwise.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pgf.py",
    "ast_data": "FunctionDef name:_get_cached_or_new arg:cls arguments arg Return return:yes Call Call"
  },
  {
    "library": "pygame",
    "name": "display_capture_pin_properties",
    "source_code": "def display_capture_pin_properties(self):\n    self.dev.displaycapturepinproperties()",
    "docstring": "Displays a dialog containing the property page of the capture pin. For WDM drivers you may find the option to select the resolution most likely here.",
    "type": "method",
    "file_path": "pygame\\src_py\\_camera_vidcapture.py",
    "ast_data": "FunctionDef name:display_capture_pin_properties arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "from_config",
    "source_code": "@classmethod\ndef from_config(cls, config):\n    return cls(**config)",
    "docstring": "Instantiates a from its config. Args: config: Output of . Returns: A instance.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\learning_rate_schedule.py",
    "ast_data": "FunctionDef name:from_config arg:cls arg:config arguments arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "create_padding_tuple",
    "source_code": "def create_padding_tuple(padding: PadType, unpadding: bool=False) -> FullPadType:\n    padding = cast(TuplePadType, _pair(padding))\n    if len(padding) not in [2, 4]:\n        raise AssertionError(f'{('Unpadding' if unpadding else 'Padding')} must be either an int, tuple of two ints or tuple of four ints')\n    if len(padding) == 2:\n        pad_vert = _pair(padding[0])\n        pad_horz = _pair(padding[1])\n    else:\n        pad_vert = padding[:2]\n        pad_horz = padding[2:]\n    padding = cast(FullPadType, pad_horz + pad_vert)\n    return padding",
    "docstring": "Create argument for padding op.",
    "type": "function",
    "file_path": "kornia\\kornia\\contrib\\extract_patches.py",
    "ast_data": "FunctionDef name:create_padding_tuple arg:padding arg:unpadding arguments arg arg Assign Call Call If Compare Call Raise Call If Compare Call Assign Call Assign Call Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "n_features_in_",
    "source_code": "@property\ndef n_features_in_(self):\n    return self.transformer_list[0][1].n_features_in_",
    "docstring": "Number of features seen during :term:.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\pipeline.py",
    "ast_data": "FunctionDef name:n_features_in_ arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "median",
    "source_code": "@lazy_cython\ndef median(y):\n    return linkage(y, method='median', metric='euclidean')",
    "docstring": "Perform median/WPGMC linkage. See for more information on the return structure and algorithm. The following are common calling conventions: 1. `linkagescipy.cluster.hierarchy.linkagescipy.cluster.hierarchy.fclusterscipy.cluster.hierarchy.dendrogram` can be used to generate a plot of the dendrogram.",
    "type": "function",
    "file_path": "scipy\\scipy\\cluster\\hierarchy.py",
    "ast_data": "FunctionDef name:median arg:y arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_inplace_buffer_and_input_mutations",
    "source_code": "def _inplace_buffer_and_input_mutations(graph: torch.fx.Graph, graph_signature: ExportGraphSignature) -> None:\n    output_node = next(iter(reversed(graph.nodes)))\n    assert output_node.op == 'output' and len(output_node.args) == 1\n    return_args = output_node.args[0]\n    input_name_to_node = {node.name: node for node in graph.nodes if node.op == 'placeholder'}\n    mutation_name_to_input_name = {}\n    buffer_fqn_to_input_name = {buffer_fqn: k for k, buffer_fqn in graph_signature.inputs_to_buffers.items()}\n    mutation_name_to_input_name = {k: buffer_fqn_to_input_name[buffer_fqn] for k, buffer_fqn in graph_signature.buffers_to_mutate.items()}\n    mutation_name_to_input_name.update(graph_signature.user_inputs_to_mutate)\n    num_mutations = len(mutation_name_to_input_name)\n    for mutation in return_args[:num_mutations]:\n        input_name = mutation_name_to_input_name[mutation.name]\n        input_node = input_name_to_node[input_name]\n        with graph.inserting_after(mutation):\n            new_node = graph.create_node('call_function', torch.ops.aten.copy_.default, (input_node, mutation))\n            for k, v in mutation.meta.items():\n                new_node.meta[k] = v\n        mutation.replace_all_uses_with(new_node, lambda x: x is not new_node)\n    user_outputs = tuple(return_args[num_mutations:])\n    output_node.args = (user_outputs,)",
    "docstring": "Transform buffer and input mutations from their functionalized form into copy_ nodes in the graph. Functionalization represents a buffer mutation by passing the buffer as an input and output. For example, consider the eager code: def forward(self, x): self.buffer += x return x * x This corresponds to a graph that looks like: def forward(self, buffer, x): mutated_buffer = aten.add(buffer, x) mul = aten.mul(x, x) return (mutated_buffer, mul) We want to inplace this into something that looks like the original eager code: def forward(self, buffer, x): mutated_buffer = aten.add(buffer, x) buffer.copy_(mutated_buffer) mul = aten.mul(x, x) return (mul,) Input mutations are handled similarly.",
    "type": "function",
    "file_path": "pytorch\\torch\\export\\unflatten.py",
    "ast_data": "FunctionDef name:_inplace_buffer_and_input_mutations arg:graph arg:graph_signature arguments arg arg Assign Call Call Call BoolOp Compare Compare Call Assign Assign Compare Assign Assign Call Assign Call Call Assign Call For Assign Assign With Call Assign Call For Call Assign Call arguments arg Compare Assign Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "get_y",
    "source_code": "def get_y(self):\n    return self._y",
    "docstring": "Return the bottom coord of the rectangle.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_y arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_visible",
    "source_code": "def set_visible(self, value):\n    for artist in self.artists:\n        artist.set_visible(value)",
    "docstring": "Set the visibility state of the handles artist.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:set_visible arg:self arg:value arguments arg arg For Call"
  },
  {
    "library": "pytorch",
    "name": "SymbolicContext",
    "source_code": "@dataclass(frozen=True)\nclass SymbolicContext:\n    pass",
    "docstring": "Data structure specifying how we should create symbols in ``; e.g., should they be static or dynamic. This is an abstract base class because we are probably going to add another version of this that says \"use exactly these SymInts, don't allocate fresh symbols.\"",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "ClassDef name:SymbolicContext Call"
  },
  {
    "library": "kornia",
    "name": "make_input_only_sequential",
    "source_code": "def make_input_only_sequential(module: 'K.container.ImageSequentialBase') -> Callable[P, Tensor]:\n\n    def f(*args: P.args, **kwargs: P.kwargs) -> Tensor:\n        return module(*args, **kwargs)\n    return f",
    "docstring": "Disable all other additional inputs (e.g. ) for ImageSequential.",
    "type": "function",
    "file_path": "kornia\\kornia\\augmentation\\container\\ops.py",
    "ast_data": "FunctionDef name:make_input_only_sequential arg:module arguments arg FunctionDef name:f arguments arg arg Return return:yes Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "update_background",
    "source_code": "def update_background(self, event):\n    if not self.useblit:\n        return\n    artists = sorted(self.artists + self._get_animated_artists(), key=lambda a: a.get_zorder())\n    needs_redraw = any((artist.get_visible() for artist in artists))\n    with ExitStack() as stack:\n        if needs_redraw:\n            for artist in artists:\n                stack.enter_context(artist._cm_set(visible=False))\n            self.canvas.draw()\n        self.background = self.canvas.copy_from_bbox(self.ax.bbox)\n    if needs_redraw:\n        for artist in artists:\n            self.ax.draw_artist(artist)",
    "docstring": "Force an update of the background.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:update_background arg:self arg:event arguments arg arg If Return return:no Assign Call Call arguments arg Call Assign Call Call With Call If For Call Call Call Assign Call If For Call"
  },
  {
    "library": "tensorflow",
    "name": "extract_stack",
    "source_code": "def extract_stack(stacklevel=1):\n    thread_key = _get_thread_key()\n    return _tf_stack.extract_stack(_source_mapper_stacks[thread_key][-1].internal_map, _source_filter_stacks[thread_key][-1].internal_set, stacklevel)",
    "docstring": "An eager-friendly alternative to traceback.extract_stack. Args: stacklevel: number of initial frames to skip when producing the stack. Returns: A list-like FrameSummary containing StackFrame-like objects, which are namedtuple-like objects with the following fields: filename, lineno, name, line, meant to masquerade as traceback.FrameSummary objects.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\tf_stack.py",
    "ast_data": "FunctionDef name:extract_stack arg:stacklevel arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "reorder_accumulate_grad_nodes",
    "source_code": "def reorder_accumulate_grad_nodes(self):\n    for node in self.fx_tracer.graph.find_nodes(op='call_function', target=torch.ops.inductor.accumulate_grad_.default):\n        param_node, grad_node = (node.args[0], node.args[1])\n        getitem_node = None\n        if grad_node.target == operator.getitem:\n            getitem_node = grad_node\n            grad_node = getitem_node.args[0]\n        arg = max([param_node, grad_node])\n        if arg is not node.prev and (not self.is_placeholder(arg)):\n            arg.append(node)\n            if getitem_node is not None:\n                arg.append(getitem_node)",
    "docstring": "Usage of AOTAutograd causes all the accumulate_grad_ nodes to get pushed to the end of the graph. This differs from eager mode, which schedules them as soon as possible. This pass attempts to reorder the graph to mimic eager behavior.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\compiled_autograd.py",
    "ast_data": "FunctionDef name:reorder_accumulate_grad_nodes arg:self arguments arg For Call Assign Assign If Compare Assign Assign Assign Call If BoolOp Compare Call Call If Compare Call"
  },
  {
    "library": "pytorch",
    "name": "WorkerFailed",
    "source_code": "class WorkerFailed(Exception):\n\n    def __init__(self, cmd: str, wrapped_trace: Optional[str]=None) -> None:\n        self.cmd: str = cmd\n        self.wrapped_trace: Optional[str] = wrapped_trace\n        super().__init__()",
    "docstring": "Raised in the main process when a worker failure is detected.",
    "type": "class",
    "file_path": "pytorch\\benchmarks\\instruction_counts\\execution\\runner.py",
    "ast_data": "ClassDef name:WorkerFailed FunctionDef name:__init__ arg:self arg:cmd arg:wrapped_trace arguments arg arg arg Call Call"
  },
  {
    "library": "kornia",
    "name": "filter_and_match_lines",
    "source_code": "def filter_and_match_lines(self, scores: Tensor) -> Tensor:\n    KORNIA_CHECK_SHAPE(scores, ['M', 'N', 'n', 'n'])\n    line_scores1 = scores.max(3)[0]\n    valid_scores1 = line_scores1 != -1\n    line_scores1 = (line_scores1 * valid_scores1).sum(2) / valid_scores1.sum(2)\n    line_scores2 = scores.max(2)[0]\n    valid_scores2 = line_scores2 != -1\n    line_scores2 = (line_scores2 * valid_scores2).sum(2) / valid_scores2.sum(2)\n    line_scores = (line_scores1 + line_scores2) / 2\n    topk_lines = torch.argsort(line_scores, dim=1)[:, -self.top_k_candidates:]\n    top_scores = torch.take_along_dim(scores, topk_lines[:, :, None, None], dim=1)\n    top_scores = concatenate([top_scores, torch.flip(top_scores, dims=[-1])], 1)\n    n_lines1, top2k, n, m = top_scores.shape\n    top_scores = top_scores.reshape((n_lines1 * top2k, n, m))\n    nw_scores = self.needleman_wunsch(top_scores)\n    nw_scores = nw_scores.reshape(n_lines1, top2k)\n    matches = torch.remainder(torch.argmax(nw_scores, dim=1), top2k // 2)\n    matches = topk_lines[torch.arange(n_lines1), matches]\n    return matches",
    "docstring": "Use scores to keep the top k best lines. Compute the Needleman- Wunsch algorithm on each candidate pairs, and keep the highest score. Args: scores: a (N, M, n, n) Tensor containing the pairwise scores of the elements to match. Returns: matches: a (N) Tensor containing the indices of the best match",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\sold2\\sold2.py",
    "ast_data": "FunctionDef name:filter_and_match_lines arg:self arg:scores arguments arg arg Call Assign Call Assign Compare Assign Call Call Assign Call Assign Compare Assign Call Call Assign Assign Call Assign Call Assign Call Call Assign Assign Call Assign Call Assign Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "assert_almost_equal",
    "source_code": "def assert_almost_equal(actual, desired, decimal=7, *args, **kwds):\n    rtol, atol = (0, 1.5 * 10 ** (-decimal))\n    return xp_assert_close(actual, desired, *args, atol=atol, rtol=rtol, check_dtype=False, check_shape=False, **kwds)",
    "docstring": "Backwards compatible replacement. In new code, use xp_assert_close instead.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_array_api.py",
    "ast_data": "FunctionDef name:assert_almost_equal arg:actual arg:desired arg:decimal arguments arg arg arg arg arg Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "close",
    "source_code": "def close(self, **kwargs):\n    pass",
    "docstring": "Close the cache connection",
    "type": "method",
    "file_path": "django\\django\\core\\cache\\backends\\base.py",
    "ast_data": "FunctionDef name:close arg:self arguments arg arg"
  },
  {
    "library": "scikit-learn",
    "name": "_mean_hiddens",
    "source_code": "def _mean_hiddens(self, v):\n    p = safe_sparse_dot(v, self.components_.T)\n    p += self.intercept_hidden_\n    return expit(p, out=p)",
    "docstring": "Computes the probabilities P(h=1|v). Parameters ---------- v : ndarray of shape (n_samples, n_features) Values of the visible layer. Returns ------- h : ndarray of shape (n_samples, n_components) Corresponding mean field values for the hidden layer.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neural_network\\_rbm.py",
    "ast_data": "FunctionDef name:_mean_hiddens arg:self arg:v arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "mem_efficient_sdp_enabled",
    "source_code": "def mem_efficient_sdp_enabled():\n    return torch._C._get_mem_efficient_sdp_enabled()",
    "docstring": ".. warning:: This flag is beta and subject to change. Returns whether memory efficient scaled dot product attention is enabled or not.",
    "type": "function",
    "file_path": "pytorch\\torch\\backends\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:mem_efficient_sdp_enabled arguments Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_layer_broadcasters_from_rps",
    "source_code": "def _get_layer_broadcasters_from_rps(zero_broadcaster, source_rps, target_rps):\n    if not isinstance(zero_broadcaster, _LayerBroadcaster):\n        raise TypeError('Not a _LayerBroadcaster: ' + str(zero_broadcaster))\n    assert len(source_rps) == len(target_rps)\n    if not source_rps:\n        return [zero_broadcaster]\n    next_broadcaster = zero_broadcaster.next_layer(source_rps[0], target_rps[0])\n    tail_broadcasters = _get_layer_broadcasters_from_rps(next_broadcaster, source_rps[1:], target_rps[1:])\n    return [zero_broadcaster] + tail_broadcasters",
    "docstring": "Get LayerBroadcasters from RowPartitions. *--zero_broadcaster->* | | source_rps[0] target_rps[0] | | V V *---result[1]------->* | | source_rps[1] target_rps[1] | | V V *---result[2]------->* . . . *---result[k-1]----->* | | source_rps[k] target_rps[k] | | V V *---result[k]------->* Note: result[0] = zero_broadcaster Args: zero_broadcaster: a broadcaster between the source and target row partitions' rows, and equal to result[0]. source_rps: source row partitions. target_rps: target row partitions (same length as source_rps). Returns: result: a list of LayerBroadcasters.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:_get_layer_broadcasters_from_rps arg:zero_broadcaster arg:source_rps arg:target_rps arguments arg arg arg If Call Raise Call Call Compare Call Call If Return return:yes Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_take_first_line",
    "source_code": "def _take_first_line(text: str) -> str:\n    lines = text.split('\\n', maxsplit=1)\n    first_line = lines[0]\n    if len(lines) > 1:\n        first_line += '[...]'\n    return first_line",
    "docstring": "Take the first line of a text.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_capture_strategies.py",
    "ast_data": "FunctionDef name:_take_first_line arg:text arguments arg Assign Call Assign If Compare Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_array",
    "source_code": "def set_array(self, A):\n    height, width = self._coordinates.shape[0:-1]\n    if self._shading == 'flat':\n        h, w = (height - 1, width - 1)\n    else:\n        h, w = (height, width)\n    ok_shapes = [(h, w, 3), (h, w, 4), (h, w), (h * w,)]\n    if A is not None:\n        shape = np.shape(A)\n        if shape not in ok_shapes:\n            raise ValueError(f'For X ({width}) and Y ({height}) with {self._shading} shading, A should have shape {' or '.join(map(str, ok_shapes))}, not {A.shape}')\n    return super().set_array(A)",
    "docstring": "Set the data values. Parameters ---------- A : array-like The mesh data. Supported array shapes are: - (M, N) or (M*N,): a mesh with scalar data. The values are mapped to colors using normalization and a colormap. See parameters *norm*, *cmap*, *vmin*, *vmax*. - (M, N, 3): an image with RGB values (0-1 float or 0-255 int). - (M, N, 4): an image with RGBA values (0-1 float or 0-255 int), i.e. including transparency. If the values are provided as a 2D grid, the shape must match the coordinates grid. If the values are 1D, they are reshaped to 2D. M, N follow from the coordinates grid, where the coordinates grid shape is (M, N) for 'gouraud' *shading* and (M+1, N+1) for 'flat' shading.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:set_array arg:self arg:A arguments arg arg Assign If Compare Assign Assign Assign If Compare Assign Call If Compare Raise Call Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_unroll_reduction_fn",
    "source_code": "@staticmethod\ndef _unroll_reduction_fn(inner_fn: Callable[[Sequence[_IntLike], Sequence[_IntLike]], OpsValue], reduction_ranges: Sequence[_IntLike], reduction_type: str, src_dtype: torch.dtype) -> Callable[[Sequence[_IntLike]], OpsValue]:\n    reduction_ranges = [V.graph.sizevars.evaluate_static_shape(x) for x in reduction_ranges]\n    combine_fn = get_reduction_combine_fn(reduction_type, src_dtype)\n\n    def fn(index: Sequence[_IntLike]) -> Any:\n        return functools.reduce(combine_fn, (value_fn(index, rindex) for rindex in itertools.product(*[range(x) for x in reduction_ranges])))\n    value_fn: Callable[[Sequence[_IntLike], Sequence[_IntLike]], Any]\n    if reduction_type in ('argmin', 'argmax'):\n        flatten_index = FixedLayout(None, None, reduction_ranges, FlexibleLayout.contiguous_strides(reduction_ranges)).make_indexer()\n\n        def value_fn(index: Sequence[_IntLike], rindex: Sequence[_IntLike]) -> tuple[OpsValue, OpsValue]:\n            rindex = [sympy.expand(i) for i in rindex]\n            return (inner_fn(index, rindex), ops.index_expr(flatten_index(rindex), torch.int64))\n        return lambda index: fn(index)[1]\n    else:\n        value_fn = inner_fn\n        return fn",
    "docstring": "Convert inner_fn from a reduction to an pointwise",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ir.py",
    "ast_data": "FunctionDef name:_unroll_reduction_fn arg:inner_fn arg:reduction_ranges arg:reduction_type arg:src_dtype arguments arg arg arg arg Assign Call Assign Call FunctionDef name:fn arg:index arguments arg Return return:yes Call Call Call Call If Compare Assign Call Call Call FunctionDef name:value_fn arg:index arg:rindex arguments arg arg Assign Call Return return:yes Call Call Call Return return:yes arguments arg Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "wrap_function",
    "source_code": "def wrap_function(func):\n\n    def wrapper(*args, **kwargs):\n        result = func(*args, **kwargs)\n        if isinstance(result, str):\n            result = np.array(result, dtype=object)\n        return result\n    return wrapper",
    "docstring": "Wrap user supplied function to work around numpy issue. see",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\apply.py",
    "ast_data": "FunctionDef name:wrap_function arg:func arguments arg FunctionDef name:wrapper arguments arg arg Assign Call If Call Assign Call Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "use_fieldset",
    "source_code": "@property\ndef use_fieldset(self):\n    return self.field.widget.use_fieldset",
    "docstring": "Return the value of this BoundField widget's use_fieldset attribute.",
    "type": "method",
    "file_path": "django\\django\\forms\\boundfield.py",
    "ast_data": "FunctionDef name:use_fieldset arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "RawToRgb2x2Downscaled",
    "source_code": "class RawToRgb2x2Downscaled(Module):\n\n    def __init__(self, cfa: CFA) -> None:\n        super().__init__()\n        self.cfa = cfa\n\n    def forward(self, image: Tensor) -> Tensor:\n        return raw_to_rgb_2x2_downscaled(image, cfa=self.cfa)",
    "docstring": "Module version of the :func: function. The image width and height have to be divisible by two. The image data is assumed to be in the range of (0, 1). Shape: - image: :math: - output: :math: Example: >>> rawinput = torch.rand(2, 1, 4, 6) >>> rgb_downscale = RawToRgb2x2Downscaled(CFA.RG) >>> output = rgb_downscale(rawinput) # 2x3x2x3",
    "type": "class",
    "file_path": "kornia\\kornia\\color\\raw.py",
    "ast_data": "ClassDef name:RawToRgb2x2Downscaled FunctionDef name:__init__ arg:self arg:cfa arguments arg arg Call Call Assign FunctionDef name:forward arg:self arg:image arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "apply",
    "source_code": "@classmethod\ndef apply(cls, module, name):\n    return super().apply(module, name)",
    "docstring": "Add pruning on the fly and reparametrization of a tensor. Adds the forward pre-hook that enables pruning on the fly and the reparametrization of a tensor in terms of the original tensor and the pruning mask. Args: module (nn.Module): module containing the tensor to prune name (str): parameter name within `` on which pruning will act.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\utils\\prune.py",
    "ast_data": "FunctionDef name:apply arg:cls arg:module arg:name arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_add_to_collections",
    "source_code": "def _add_to_collections(var, weight_collections):\n    for weight_collection in weight_collections:\n        if weight_collection == ops.GraphKeys.GLOBAL_VARIABLES:\n            continue\n        if isinstance(var, variables.PartitionedVariable):\n            for constituent_var in list(var):\n                ops.add_to_collection(weight_collection, constituent_var)\n        else:\n            ops.add_to_collection(weight_collection, var)",
    "docstring": "Adds a var to the list of weight_collections provided. Handles the case for partitioned and non-partitioned variables. Args: var: A variable or Partitioned Variable. weight_collections: List of collections to add variable to.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py",
    "ast_data": "FunctionDef name:_add_to_collections arg:var arg:weight_collections arguments arg arg For If Compare If Call For Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "upsample",
    "source_code": "def upsample(input, size=None, scale_factor=None, mode='nearest', align_corners=None):\n    warnings.warn('`nn.functional.upsample` is deprecated. Use `nn.functional.interpolate` instead.', stacklevel=2)\n    return interpolate(input, size, scale_factor, mode, align_corners)",
    "docstring": "Upsample input. Provided tensor is upsampled to either the given :attr: or the given :attr: .. warning:: This function is deprecated in favor of :func:. This is equivalent with `modemini-batch x channels x [optional depth] x [optional height] x widthnearestlinearbilinearbicubictrilinearscale_factormodelinearbilineartrilinear~torch.nn.Upsample` for concrete examples on how this affects the outputs.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:upsample arg:input arg:size arg:scale_factor arg:mode arg:align_corners arguments arg arg arg arg arg Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "formfield",
    "source_code": "def formfield(self, **kwargs):\n    defaults = {}\n    if hasattr(self.remote_field, 'get_related_field'):\n        limit_choices_to = self.remote_field.limit_choices_to\n        defaults.update({'limit_choices_to': limit_choices_to})\n    defaults.update(kwargs)\n    return super().formfield(**defaults)",
    "docstring": "Pass `` to the field being constructed.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\related.py",
    "ast_data": "FunctionDef name:formfield arg:self arguments arg arg Assign If Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "dt",
    "source_code": "@property\ndef dt(self):\n    return self._dt",
    "docstring": "Return the sampling time of the system, for systems.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:dt arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "make_tensor",
    "source_code": "def make_tensor(v, arg_name):\n    if isinstance(v, tensor_pb2.TensorProto):\n        return v\n    elif isinstance(v, str):\n        pb = tensor_pb2.TensorProto()\n        text_format.Merge(v, pb)\n        return pb\n    raise TypeError(\"Don't know how to convert %s to a TensorProto for argument '%s'.\" % (repr(v), arg_name))",
    "docstring": "Ensure v is a TensorProto.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\execute.py",
    "ast_data": "FunctionDef name:make_tensor arg:v arg:arg_name arguments arg arg If Call Return return:yes If Call Assign Call Call Return return:yes Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "_scalar",
    "source_code": "def _scalar(x: Any) -> Number | None:\n    if isinstance(x, torch.Tensor) and x.shape == ():\n        return x.item()\n    return None",
    "docstring": "Convert a scalar tensor into a Python value.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\symbolic_helper.py",
    "ast_data": "FunctionDef name:_scalar arg:x arguments arg If BoolOp Call Compare Return return:yes Call Return return:no"
  },
  {
    "library": "django",
    "name": "get_current_timezone_name",
    "source_code": "def get_current_timezone_name():\n    return _get_timezone_name(get_current_timezone())",
    "docstring": "Return the name of the currently active time zone.",
    "type": "function",
    "file_path": "django\\django\\utils\\timezone.py",
    "ast_data": "FunctionDef name:get_current_timezone_name arguments Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_canvas",
    "source_code": "def set_canvas(self, canvas):\n    self.canvas = canvas",
    "docstring": "Set the canvas that contains the figure Parameters ---------- canvas : FigureCanvas",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:set_canvas arg:self arg:canvas arguments arg arg Assign"
  },
  {
    "library": "pytorch",
    "name": "_wrap_fx_args_as_onnxscript_args",
    "source_code": "def _wrap_fx_args_as_onnxscript_args(complete_args: list[fx_type_utils.Argument], complete_kwargs: dict[str, fx_type_utils.Argument], fx_name_to_onnxscript_value: dict[str, onnxscript_graph_building.TorchScriptTensor | tuple[onnxscript_graph_building.TorchScriptTensor, ...]], tracer: onnxscript_graph_building.TorchScriptTracingEvaluator) -> tuple[Sequence[onnxscript_graph_building.TorchScriptTensor | str | int | float | bool | list | complex | None], dict[str, fx_type_utils.Argument]]:\n    onnxscript_args = tuple((_retrieve_or_adapt_input_to_graph_set(arg, fx_name_to_onnxscript_value, tracer) for arg in complete_args))\n    onnxscript_kwargs = filter_incompatible_and_dtype_convert_kwargs(complete_kwargs)\n    return (onnxscript_args, onnxscript_kwargs)",
    "docstring": "Map all FX arguments of a node to arguments in TorchScript graph.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\fx_onnx_interpreter.py",
    "ast_data": "FunctionDef name:_wrap_fx_args_as_onnxscript_args arg:complete_args arg:complete_kwargs arg:fx_name_to_onnxscript_value arg:tracer arguments arg arg arg arg Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_InstancesOf",
    "source_code": "class _InstancesOf(_Constraint):\n\n    def __init__(self, type):\n        super().__init__()\n        self.type = type\n\n    def is_satisfied_by(self, val):\n        return isinstance(val, self.type)\n\n    def __str__(self):\n        return f'an instance of {_type_name(self.type)!r}'",
    "docstring": "Constraint representing instances of a given type. Parameters ---------- type : type The valid type.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\utils\\_param_validation.py",
    "ast_data": "ClassDef name:_InstancesOf FunctionDef name:__init__ arg:self arg:type arguments arg arg Call Call Assign FunctionDef name:is_satisfied_by arg:self arg:val arguments arg arg Return return:yes Call FunctionDef name:__str__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "DjangoTemplates",
    "source_code": "class DjangoTemplates(EngineMixin, BaseRenderer):\n    backend = DjangoTemplates",
    "docstring": "Load Django templates from the built-in widget templates in django/forms/templates and from apps' 'templates' directory.",
    "type": "class",
    "file_path": "django\\django\\forms\\renderers.py",
    "ast_data": "ClassDef name:DjangoTemplates Assign"
  },
  {
    "library": "tensorflow",
    "name": "function_type",
    "source_code": "@property\ndef function_type(self):\n    return self._function_type",
    "docstring": "Return the FunctionType associated with this ConcreteFunction.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py",
    "ast_data": "FunctionDef name:function_type arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_tracker_snapshot",
    "source_code": "def get_tracker_snapshot(self, type: str='current') -> dict[torch.device, dict[str, int]]:\n    if type == 'current':\n        return deepcopy(self._curr_mem_snap)\n    elif type == 'peak':\n        return deepcopy(self._peak_mem_snap)\n    else:\n        raise ValueError(f'Invalid type {type}')",
    "docstring": "Capture a snapshot of the memory usage breakdown per device, based on the specified type. Args: type (str): The type of snapshot to capture. Can be \"current\" for the current memory usage or \"peak\" for the peak memory usage. Defaults to \"current\". Returns: Dict[torch.device, Dict[str, int]]: A dictionary where each key is a torch.device, and each value is another dictionary. This inner dictionary has keys representing memory reference types as defined in `` and values representing the amount of memory consumed in bytes. Raises: ValueError: If an invalid type is specified.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_tools\\mem_tracker.py",
    "ast_data": "FunctionDef name:get_tracker_snapshot arg:self arg:type arguments arg arg If Compare Return return:yes Call If Compare Return return:yes Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "synchronize",
    "source_code": "def synchronize(device: _device_t=None) -> None:\n    _lazy_init()\n    device = _get_device_index(device, optional=True)\n    return torch._C._xpu_synchronize(device)",
    "docstring": "Wait for all kernels in all streams on a XPU device to complete. Args: device (torch.device or int, optional): device for which to synchronize. It uses the current device, given by :func:, if :attr: is `` (default).",
    "type": "function",
    "file_path": "pytorch\\torch\\xpu\\__init__.py",
    "ast_data": "FunctionDef name:synchronize arg:device arguments arg Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "gen_custom_ops_registration",
    "source_code": "def gen_custom_ops_registration(*, native_functions: Sequence[NativeFunction], selector: SelectiveBuilder, kernel_index: ETKernelIndex, rocm: bool) -> tuple[str, str]:\n    dispatch_key = DispatchKey.CPU\n    backend_index = kernel_index._to_backend_index()\n    static_init_dispatch_registrations = ''\n    ns_grouped_native_functions: dict[str, list[NativeFunction]] = defaultdict(list)\n    for native_function in native_functions:\n        ns_grouped_native_functions[native_function.namespace].append(native_function)\n    for namespace, functions in ns_grouped_native_functions.items():\n        if len(functions) == 0:\n            continue\n        dispatch_registrations_body = '\\n'.join(list(concatMap(dest.RegisterDispatchKey(backend_index, Target.REGISTRATION, selector, rocm=rocm, symint=False, class_method_name=None, skip_dispatcher_op_registration=False), functions)))\n        static_init_dispatch_registrations += f'\\nTORCH_LIBRARY_IMPL({namespace}, {dispatch_key}, m) {{\\n{dispatch_registrations_body}\\n}}'\n    anonymous_definition = '\\n'.join(list(concatMap(dest.RegisterDispatchKey(backend_index, Target.ANONYMOUS_DEFINITION, selector, rocm=rocm, symint=False, class_method_name=None, skip_dispatcher_op_registration=False), native_functions)))\n    return (anonymous_definition, static_init_dispatch_registrations)",
    "docstring": "Generate custom ops registration code for dest.RegisterDispatchKey. :param native_functions: a sequence of :param selector: for selective build. :param kernel_index: kernels for all the ops. :param rocm: bool for dest.RegisterDispatchKey. :return: generated C++ code to register custom operators into PyTorch",
    "type": "function",
    "file_path": "pytorch\\torchgen\\executorch\\api\\custom_ops.py",
    "ast_data": "FunctionDef name:gen_custom_ops_registration arguments arg arg arg arg Assign Assign Call Assign Call For Call For Call If Compare Call Assign Call Call Call Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_strip_padding",
    "source_code": "def _strip_padding(tensors, pad_len):\n    if not tensors:\n        raise ValueError('tensors cannot be empty')\n    shape = tensors[0].shape\n    if len(shape) > 1:\n        raise ValueError('tensors must be 1D')\n    prefix_len = int(shape[0] - pad_len)\n    if prefix_len < 0:\n        raise ValueError('pad_len longer than tensor')\n    stripped = []\n    for t in tensors:\n        with ops.colocate_with(t):\n            stripped.append(array_ops.slice(t, [0], [prefix_len]))\n    return stripped",
    "docstring": "Strip the suffix padding added by _padded_split. Args: tensors: list of of identical length 1D tensors. pad_len: number of elements to be stripped from the end of each tensor. Returns: list of which are the stripped inputs. Raises: ValueError: tensors must be a non-empty list of 1D tensors, and each must be longer than pad_len.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\all_reduce.py",
    "ast_data": "FunctionDef name:_strip_padding arg:tensors arg:pad_len arguments arg arg If Raise Call Assign If Compare Call Raise Call Assign Call If Compare Raise Call Assign For With Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "_check_exclude",
    "source_code": "def _check_exclude(self, obj):\n    if obj.exclude is None:\n        return []\n    elif not isinstance(obj.exclude, (list, tuple)):\n        return must_be('a list or tuple', option='exclude', obj=obj, id='admin.E014')\n    field_counts = collections.Counter(obj.exclude)\n    if (duplicate_fields := [field for field, count in field_counts.items() if count > 1]):\n        return [checks.Error(\"The value of 'exclude' contains duplicate field(s).\", hint='Remove duplicates of %s.' % ', '.join(map(repr, duplicate_fields)), obj=obj.__class__, id='admin.E015')]\n    else:\n        return []",
    "docstring": "Check that exclude is a sequence without duplicates.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\checks.py",
    "ast_data": "FunctionDef name:_check_exclude arg:self arg:obj arguments arg arg If Compare Return return:no If Call Return return:yes Call Assign Call If Call Compare Return return:yes Call Call Call Return return:no"
  },
  {
    "library": "django",
    "name": "hasz",
    "source_code": "@property\ndef hasz(self):\n    return capi.geos_hasz(self.ptr)",
    "docstring": "Return whether the geometry has a Z dimension.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:hasz arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_process_parse_dates_argument",
    "source_code": "def _process_parse_dates_argument(parse_dates):\n    if parse_dates is True or parse_dates is None or parse_dates is False:\n        parse_dates = []\n    elif not hasattr(parse_dates, '__iter__'):\n        parse_dates = [parse_dates]\n    return parse_dates",
    "docstring": "Process parse_dates argument for read_sql functions",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\sql.py",
    "ast_data": "FunctionDef name:_process_parse_dates_argument arg:parse_dates arguments arg If BoolOp Compare Compare Compare Assign If Call Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit_transform",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit_transform(self, X, y=None):\n    self._fit_transform(X)\n    return self.embedding_",
    "docstring": "Compute the embedding vectors for data X and transform X. Parameters ---------- X : array-like of shape (n_samples, n_features) Training set. y : Ignored Not used, present here for API consistency by convention. Returns ------- X_new : array-like, shape (n_samples, n_components) Returns the instance itself.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\manifold\\_locally_linear.py",
    "ast_data": "FunctionDef name:fit_transform arg:self arg:X arg:y arguments arg arg arg Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "as_ctypes_type",
    "source_code": "@set_module('numpy.ctypeslib')\ndef as_ctypes_type(dtype):\n    return _ctype_from_dtype(np.dtype(dtype))",
    "docstring": "Convert a dtype into a ctypes type. Parameters ---------- dtype : dtype The dtype to convert Returns ------- ctype A ctype scalar, union, array, or struct Raises ------ NotImplementedError If the conversion is not possible Notes ----- This function does not losslessly round-trip in either direction. `ctypes.Structurectypes.Unionctypes.Unionctypes.Structure`\\ s - insert padding fields Examples -------- Converting a simple dtype: >>> dt = np.dtype('int8') >>> ctype = np.ctypeslib.as_ctypes_type(dt) >>> ctype Converting a structured dtype: >>> dt = np.dtype([('x', 'i4'), ('y', 'f4')]) >>> ctype = np.ctypeslib.as_ctypes_type(dt) >>> ctype",
    "type": "function",
    "file_path": "numpy\\numpy\\ctypeslib\\_ctypeslib.py",
    "ast_data": "FunctionDef name:as_ctypes_type arg:dtype arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "graph_defs_equal",
    "source_code": "@tf_export('__internal__.graph_util.graph_defs_equal', v1=[])\ndef graph_defs_equal(graph_def_1: graph_pb2.GraphDef, graph_def_2: graph_pb2.GraphDef, treat_nan_as_equal: bool=False) -> bool:\n    if not isinstance(graph_def_1, graph_pb2.GraphDef):\n        raise TypeError(f'graph_def_1 must be a graph_pb2.GraphDef proto, but got type {type(graph_def_1)}.')\n    if not isinstance(graph_def_2, graph_pb2.GraphDef):\n        raise TypeError(f'graph_def_2 must be a graph_pb2.GraphDef proto, but got type {type(graph_def_2)}.')\n    options = _proto_comparators.ProtoComparisonOptions(treat_nan_as_equal)\n    return _proto_comparators.EqualsGraphDef(graph_def_1.SerializeToString(), graph_def_2.SerializeToString(), options)",
    "docstring": "Returns True iff the graph def arguments are structurally equivalent. The notion of equivalence encoded here checks that the set of NodeDefs in the GraphDef's function library and main graph body are identical. Additionally, it checks that the functions in the function library are equal as sets. Example usage: Args: graph_def_1: Instance of to compare. graph_def_2: Instance of to compare. treat_nan_as_equal: Boolean indicating whether or not to treat nan floating-point values as equal. This is crucial for any equivalence relation defined over GraphDefs, to ensure symmetry. Returns: Boolean indicating structural equivalence as described above. Raises: TypeError: If either of the GraphDefs are not instances of .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\graph_util_impl.py",
    "ast_data": "FunctionDef name:graph_defs_equal arg:graph_def_1 arg:graph_def_2 arg:treat_nan_as_equal arguments arg arg arg If Call Raise Call Call If Call Raise Call Call Assign Call Return return:yes Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_need_to_materialize_module",
    "source_code": "def _need_to_materialize_module(module: nn.Module, ignored_params: set[nn.Parameter], ignored_modules: set[nn.Module]) -> tuple[bool, bool]:\n    managed_params = list(_get_orig_params(module, ignored_params))\n    is_meta_module = any((param.is_meta for param in managed_params))\n    for submodule in module.modules():\n        if submodule in ignored_modules:\n            continue\n        for buf in submodule.buffers(recurse=False):\n            is_meta_module |= buf.is_meta\n    is_torchdistX_deferred_init = not is_meta_module and _TORCHDISTX_AVAIL and any((fake.is_fake(param) for param in managed_params))\n    return (is_meta_module, is_torchdistX_deferred_init)",
    "docstring": "Return if `` needs to be materialized.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_init_utils.py",
    "ast_data": "FunctionDef name:_need_to_materialize_module arg:module arg:ignored_params arg:ignored_modules arguments arg arg arg Assign Call Call Assign Call For Call If Compare For Call Assign BoolOp Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "@torch.inference_mode()\ndef forward(self, images: Union[Tensor, list[Tensor]]) -> Union[Tensor, list[Tensor]]:\n    outputs: Union[Tensor, list[Tensor]]\n    if isinstance(images, (list, tuple)):\n        outputs = []\n        for image in images:\n            image = self.pre_processor(image[None])\n            output = self.model(image)\n            output = self.post_processor(output)\n            outputs.append(output[0])\n    else:\n        images = self.pre_processor(images)\n        outputs = self.model(images)\n        outputs = self.post_processor(outputs)\n    return outputs",
    "docstring": "Forward pass of the semantic segmentation model. Args: images: If list of RGB images. Each image is a Tensor with shape :math:. If Tensor, a Tensor with shape :math:. Returns: output tensor.",
    "type": "method",
    "file_path": "kornia\\kornia\\models\\segmentation\\base.py",
    "ast_data": "FunctionDef name:forward arg:self arg:images arguments arg arg If Call Assign For Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "pack",
    "source_code": "def pack(structure, data):\n    return struct.pack('<' + structure, *data)",
    "docstring": "Pack data into hex string with little endian format.",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\postgis\\pgraster.py",
    "ast_data": "FunctionDef name:pack arg:structure arg:data arguments arg arg Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "get_scope",
    "source_code": "def get_scope(self):\n    raise NotImplementedError()",
    "docstring": "A method to get scope of the authorization code. For instance, the column is called ``:: def get_scope(self): return self.scope :return: scope string",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\models.py",
    "ast_data": "FunctionDef name:get_scope arg:self arguments arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "wrap_or_unwrap",
    "source_code": "@tf_export('__internal__.tracking.wrap', v1=[])\ndef wrap_or_unwrap(value):\n    if isinstance(value, NoDependency):\n        return value.value\n    if isinstance(value, base.Trackable):\n        return value\n    elif type(value) == dict:\n        return _DictWrapper(value)\n    elif type(value) == collections.OrderedDict:\n        return _DictWrapper(value)\n    elif type(value) == list:\n        return ListWrapper(value)\n    elif isinstance(value, tuple) and _should_wrap_tuple(value):\n        return _TupleWrapper(value)\n    else:\n        return value",
    "docstring": "Wraps input value into trackable data structures. This is mostly useful for containers like list, dict, etc, which could contain trackable objects in it. Wrapped data structure will be tracked when associated with a , so that save model/checkpoint can properly track the dependency. It will also unwrap NoDependency objects. Args: value: the input object to be wrapped. Returns: Wrapped trackable data structure.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\data_structures.py",
    "ast_data": "FunctionDef name:wrap_or_unwrap arg:value arguments arg If Call Return return:yes If Call Return return:yes If Compare Call Return return:yes Call If Compare Call Return return:yes Call If Compare Call Return return:yes Call If BoolOp Call Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_update_line_limits",
    "source_code": "def _update_line_limits(self, line):\n    path = line.get_path()\n    if path.vertices.size == 0:\n        return\n    line_trf = line.get_transform()\n    if line_trf == self.transData:\n        data_path = path\n    elif any(line_trf.contains_branch_seperately(self.transData)):\n        trf_to_data = line_trf - self.transData\n        if self.transData.is_affine:\n            line_trans_path = line._get_transformed_path()\n            na_path, _ = line_trans_path.get_transformed_path_and_affine()\n            data_path = trf_to_data.transform_path_affine(na_path)\n        else:\n            data_path = trf_to_data.transform_path(path)\n    else:\n        data_path = path\n    if not data_path.vertices.size:\n        return\n    updatex, updatey = line_trf.contains_branch_seperately(self.transData)\n    if self.name != 'rectilinear':\n        if updatex and line_trf == self.get_yaxis_transform():\n            updatex = False\n        if updatey and line_trf == self.get_xaxis_transform():\n            updatey = False\n    self.dataLim.update_from_path(data_path, self.ignore_existing_data_limits, updatex=updatex, updatey=updatey)\n    self.ignore_existing_data_limits = False",
    "docstring": "Figures out the data limit of the given line, updating .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:_update_line_limits arg:self arg:line arguments arg arg Assign Call If Compare Return return:no Assign Call If Compare Assign If Call Call Assign If Assign Call Assign Call Assign Call Assign Call Assign If Return return:no Assign Call If Compare If BoolOp Compare Call Assign If BoolOp Compare Call Assign Call Assign"
  },
  {
    "library": "scipy",
    "name": "__call__",
    "source_code": "def __call__(self, *, res_new, res_old):\n    return bool(self.accept_reject(res_new, res_old))",
    "docstring": "f_new and f_old are mandatory in kwargs",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_basinhopping.py",
    "ast_data": "FunctionDef name:__call__ arg:self arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "_flop_count",
    "source_code": "def _flop_count(idx_contraction, inner, num_terms, size_dictionary):\n    overall_size = _compute_size_by_dict(idx_contraction, size_dictionary)\n    op_factor = max(1, num_terms - 1)\n    if inner:\n        op_factor += 1\n    return overall_size * op_factor",
    "docstring": "Computes the number of FLOPS in the contraction. Parameters ---------- idx_contraction : iterable The indices involved in the contraction inner : bool Does this contraction require an inner product? num_terms : int The number of terms in a contraction size_dictionary : dict The size of each of the indices in idx_contraction Returns ------- flop_count : int The total number of FLOPS required for the contraction. Examples -------- >>> _flop_count('abc', False, 1, {'a': 2, 'b':3, 'c':5}) 30 >>> _flop_count('abc', True, 2, {'a': 2, 'b':3, 'c':5}) 60",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\einsumfunc.py",
    "ast_data": "FunctionDef name:_flop_count arg:idx_contraction arg:inner arg:num_terms arg:size_dictionary arguments arg arg arg arg Assign Call Assign Call If Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "pending_logging",
    "source_code": "@contextmanager\ndef pending_logging() -> Iterator[MemoryHandler]:\n    logger = logging.getLogger(NAMESPACE)\n    try:\n        with suppress_logging() as memhandler:\n            yield memhandler\n    finally:\n        memhandler.flushTo(logger)",
    "docstring": "Context manager to postpone logging all logs temporarily. For example:: >>> with pending_logging(): >>> logger.warning('Warning message!') # not flushed yet >>> some_long_process() >>> Warning message! # the warning is flushed here",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\logging.py",
    "ast_data": "FunctionDef name:pending_logging arguments Assign Call Try With Call Call"
  },
  {
    "library": "scipy",
    "name": "McCormick",
    "source_code": "class McCormick(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = [(-1.5, 4.0), (-3.0, 3.0)]\n        self.global_optimum = [[-0.5471975602214493, -1.547197559268372]]\n        self.fglob = -1.913222954981037\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return sin(x[0] + x[1]) + (x[0] - x[1]) ** 2 - 1.5 * x[0] + 2.5 * x[1] + 1",
    "docstring": "McCormick objective function. This class defines the McCormick [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{McCormick}}(x) = - x_{1} + 2 x_{2} + \\left(x_{1} - x_{2}\\right)^{2} + \\sin\\left(x_{1} + x_{2}\\right) + 1 with :math:, :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_M.py",
    "ast_data": "ClassDef name:McCormick FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "dot",
    "source_code": "def dot(self, other):\n    if np.isscalar(other):\n        return self * other\n    else:\n        return self @ other",
    "docstring": "Ordinary dot product Examples -------- >>> import numpy as np >>> from scipy.sparse import csr_array >>> A = csr_array([[1, 2, 0], [0, 0, 3], [4, 0, 5]]) >>> v = np.array([1, 0, -1]) >>> A.dot(v) array([ 1, -3, -1], dtype=int64)",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_base.py",
    "ast_data": "FunctionDef name:dot arg:self arg:other arguments arg arg If Call Return return:yes Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_validate_skipfooter_arg",
    "source_code": "def _validate_skipfooter_arg(skipfooter: int) -> int:\n    if not is_integer(skipfooter):\n        raise ValueError('skipfooter must be an integer')\n    if skipfooter < 0:\n        raise ValueError('skipfooter cannot be negative')\n    return skipfooter",
    "docstring": "Validate the 'skipfooter' parameter. Checks whether 'skipfooter' is a non-negative integer. Raises a ValueError if that is not the case. Parameters ---------- skipfooter : non-negative integer The number of rows to skip at the end of the file. Returns ------- validated_skipfooter : non-negative integer The original input if the validation succeeds. Raises ------ ValueError : 'skipfooter' was not a non-negative integer.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\parsers\\python_parser.py",
    "ast_data": "FunctionDef name:_validate_skipfooter_arg arg:skipfooter arguments arg If Call Raise Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "django",
    "name": "__html__",
    "source_code": "def __html__(self):\n    return self",
    "docstring": "Return the html representation of a string for interoperability. This allows other template engines to understand Django's SafeData.",
    "type": "method",
    "file_path": "django\\django\\utils\\safestring.py",
    "ast_data": "FunctionDef name:__html__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_module",
    "source_code": "def _get_module(node: Node, named_modules: dict[str, torch.nn.Module]) -> Optional[torch.nn.Module]:\n    if node.op == 'call_module' and str(node.target) in named_modules:\n        return named_modules[str(node.target)]\n    else:\n        return None",
    "docstring": "If refers to a call_module node, return the module, else None.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\utils.py",
    "ast_data": "FunctionDef name:_get_module arg:node arg:named_modules arguments arg arg If BoolOp Compare Compare Call Return return:yes Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "initialized_value",
    "source_code": "@deprecated(None, 'Use Variable.read_value. Variables in 2.X are initialized automatically both in eager and graph (inside tf.defun) contexts.')\ndef initialized_value(self):\n    raise NotImplementedError",
    "docstring": "Returns the value of the initialized variable. You should use this instead of the variable itself to initialize another variable with a value that depends on the value of this variable. Returns: A holding the value of this variable after its initializer has run.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:initialized_value arg:self arguments arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_make_grouped_mirrored",
    "source_code": "def _make_grouped_mirrored(values):\n    if len(values) == 1:\n        return values_lib.Mirrored(values)\n    g = control_flow_ops.group(values)\n    if not all((tensor_util.is_tf_type(v) for v in values)):\n        return g\n    with_dep = []\n    for v in values:\n        with ops.device(v.device), ops.control_dependencies([g]):\n            with_dep.append(array_ops.identity(v))\n    return values_lib.Mirrored(with_dep)",
    "docstring": "Convert per-replica list into Mirrored type with grouping.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_utils.py",
    "ast_data": "FunctionDef name:_make_grouped_mirrored arg:values arguments arg If Compare Call Return return:yes Call Assign Call If Call Call Return return:yes Assign For With Call Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "shape",
    "source_code": "@property\ndef shape(self):\n    nrows = self._row_partition.static_nrows\n    ncols = self._row_partition.static_uniform_row_length\n    value_shape = self._values.shape[1:]\n    return tensor_shape.TensorShape([nrows, ncols]).concatenate(value_shape)",
    "docstring": "The statically known shape of this ragged tensor. Returns: A containing the statically known shape of this ragged tensor. Ragged dimensions have a size of . Examples: >>> tf.ragged.constant([[0], [1, 2]]).shape TensorShape([2, None]) >>> tf.ragged.constant([[[0, 1]], [[1, 2], [3, 4]]], ragged_rank=1).shape TensorShape([2, None, 2])",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:shape arg:self arguments arg Assign Assign Assign Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_get_skiprows",
    "source_code": "def _get_skiprows(skiprows: int | Sequence[int] | slice | None) -> int | Sequence[int]:\n    if isinstance(skiprows, slice):\n        start, step = (skiprows.start or 0, skiprows.step or 1)\n        return list(range(start, skiprows.stop, step))\n    elif isinstance(skiprows, numbers.Integral) or is_list_like(skiprows):\n        return cast('int | Sequence[int]', skiprows)\n    elif skiprows is None:\n        return 0\n    raise TypeError(f'{type(skiprows).__name__} is not a valid type for skipping rows')",
    "docstring": "Get an iterator given an integer, slice or container. Parameters ---------- skiprows : int, slice, container The iterator to use to skip rows; can also be a slice. Raises ------ TypeError * If is not a slice, integer, or Container Returns ------- it : iterable A proper iterator to use to skip rows of a DataFrame.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\html.py",
    "ast_data": "FunctionDef name:_get_skiprows arg:skiprows arguments arg If Call Assign BoolOp BoolOp Return return:yes Call Call If BoolOp Call Call Return return:yes Call If Compare Return return:yes Raise Call Call"
  },
  {
    "library": "django",
    "name": "wkt",
    "source_code": "@property\ndef wkt(self):\n    return wkt_w(dim=3 if self.hasz else 2, trim=True).write(self).decode()",
    "docstring": "Return the WKT (Well-Known Text) representation of this Geometry.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:wkt arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "get_prefix",
    "source_code": "def get_prefix(self):\n    return self.prefix",
    "docstring": "Return the prefix to use for forms.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\edit.py",
    "ast_data": "FunctionDef name:get_prefix arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "isna",
    "source_code": "def isna(self) -> npt.NDArray[np.bool_]:\n    return self._codes == -1",
    "docstring": "Detect missing values Missing values (-1 in .codes) are detected. Returns ------- np.ndarray[bool] of whether my values are null See Also -------- isna : Top-level isna. isnull : Alias of isna. Categorical.notna : Boolean inverse of Categorical.isna.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\categorical.py",
    "ast_data": "FunctionDef name:isna arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "create_noop_writer",
    "source_code": "@tf_export('summary.create_noop_writer', v1=[])\ndef create_noop_writer():\n    return _NoopSummaryWriter()",
    "docstring": "Returns a summary writer that does nothing. This is useful as a placeholder in code that expects a context manager.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "FunctionDef name:create_noop_writer arguments Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "import_module",
    "source_code": "def import_module(self, module):\n    importlib.import_module(module)",
    "docstring": "Imports a Python module that has torch.library registrations. Generally, to extend PyTorch with custom operators, a user will create a Python module whose import triggers registration of the custom operators via a torch.ops.load_library call or a call to one or more torch.library.* APIs. It is unexpected for Python modules to have side effects, so some linters and formatters will complain. Use this API to import Python modules that contain these torch.library side effects. Args: module (str): The name of the Python module to import",
    "type": "method",
    "file_path": "pytorch\\torch\\_ops.py",
    "ast_data": "FunctionDef name:import_module arg:self arg:module arguments arg arg Call"
  },
  {
    "library": "matplotlib",
    "name": "_from_ordinalf",
    "source_code": "def _from_ordinalf(x, tz=None):\n    tz = _get_tzinfo(tz)\n    dt = np.datetime64(get_epoch()) + np.timedelta64(int(np.round(x * MUSECONDS_PER_DAY)), 'us')\n    if dt < np.datetime64('0001-01-01') or dt >= np.datetime64('10000-01-01'):\n        raise ValueError(f'Date ordinal {x} converts to {dt} (using epoch {get_epoch()}), but Matplotlib dates must be between year 0001 and 9999.')\n    dt = dt.tolist()\n    dt = dt.replace(tzinfo=dateutil.tz.gettz('UTC'))\n    dt = dt.astimezone(tz)\n    if np.abs(x) > 70 * 365:\n        ms = round(dt.microsecond / 20) * 20\n        if ms == 1000000:\n            dt = dt.replace(microsecond=0) + datetime.timedelta(seconds=1)\n        else:\n            dt = dt.replace(microsecond=ms)\n    return dt",
    "docstring": "Convert Gregorian float of the date, preserving hours, minutes, seconds and microseconds. Return value is a . The input date *x* is a float in ordinal days at UTC, and the output will be the specified object corresponding to that time in timezone *tz*, or if *tz* is `timezone`.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\dates.py",
    "ast_data": "FunctionDef name:_from_ordinalf arg:x arg:tz arguments arg arg Assign Call Assign Call Call Call Call Call If BoolOp Compare Call Compare Call Raise Call Call Assign Call Assign Call Call Assign Call If Compare Call Assign Call If Compare Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "num_points",
    "source_code": "@property\ndef num_points(self):\n    return self.point_count",
    "docstring": "Alias for (same name method in GEOS API.)",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:num_points arg:self arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "write_array_header_2_0",
    "source_code": "@set_module('numpy.lib.format')\ndef write_array_header_2_0(fp, d):\n    _write_array_header(fp, d, (2, 0))",
    "docstring": "Write the header for an array using the 2.0 format. The 2.0 format allows storing very large structured arrays. Parameters ---------- fp : filelike object d : dict This has the appropriate entries for writing its string representation to the header of the file.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_format_impl.py",
    "ast_data": "FunctionDef name:write_array_header_2_0 arg:fp arg:d arguments arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "base_operator",
    "source_code": "@property\ndef base_operator(self):\n    return self._base_operator",
    "docstring": "If this operator is , this is the .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_low_rank_update.py",
    "ast_data": "FunctionDef name:base_operator arg:self arguments arg Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "open",
    "source_code": "def open(self, spider: Spider) -> Deferred[None] | None:\n    pass",
    "docstring": "Called when the spider is opened by the engine. It receives the spider instance as argument and it's useful to execute initialization code. :param spider: the spider object for the current crawl :type spider: :class:",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\scheduler.py",
    "ast_data": "FunctionDef name:open arg:self arg:spider arguments arg arg"
  },
  {
    "library": "tensorflow",
    "name": "_slice_ragged_row_dimension",
    "source_code": "def _slice_ragged_row_dimension(rt_input, row_key):\n    if row_key.start is None and row_key.stop is None and (row_key.step is None):\n        return rt_input\n    new_starts = rt_input.row_splits[:-1][row_key]\n    new_limits = rt_input.row_splits[1:][row_key]\n    zero_pad = array_ops.zeros([1], rt_input.row_splits.dtype)\n    if row_key.step is None or row_key.step == 1:\n        new_splits = array_ops.concat([zero_pad[array_ops.size(new_starts):], new_starts[:1], new_limits], axis=0)\n        values_start = new_splits[0]\n        values_limit = new_splits[-1]\n        return ragged_tensor.RaggedTensor.from_row_splits(rt_input.values[values_start:values_limit], new_splits - values_start, validate=False)\n    else:\n        return _build_ragged_tensor_from_value_ranges(new_starts, new_limits, 1, rt_input.values)",
    "docstring": "Slice the outer dimension of according to the given . Args: rt_input: The to slice. row_key: The object that should be used to slice . Returns: A containing the indicated slice of .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_getitem.py",
    "ast_data": "FunctionDef name:_slice_ragged_row_dimension arg:rt_input arg:row_key arguments arg arg If BoolOp Compare Compare Compare Return return:yes Assign Assign Assign Call If BoolOp Compare Compare Assign Call Call Assign Assign Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "as_integer_ratio",
    "source_code": "def as_integer_ratio(self) -> tuple['SymInt', builtins.int]:\n    return (self, 1)",
    "docstring": "Represent this int as an exact integer ratio",
    "type": "method",
    "file_path": "pytorch\\torch\\__init__.py",
    "ast_data": "FunctionDef name:as_integer_ratio arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__call__",
    "source_code": "@staticmethod\ndef __call__(fn, fallback_fn=lambda: None):\n    fallback_fn()",
    "docstring": "fn gets called at compile time in TorchDynamo, calls fallback_fn otherwise",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\comptime.py",
    "ast_data": "FunctionDef name:__call__ arg:fn arg:fallback_fn arguments arg arg arguments Call"
  },
  {
    "library": "pytorch",
    "name": "Shard",
    "source_code": "@dataclass\nclass Shard:\n    __slots__ = ['tensor', 'metadata']\n    tensor: torch.Tensor\n    metadata: ShardMetadata\n\n    def __post_init__(self) -> None:\n        if list(self.tensor.size()) != self.metadata.shard_sizes:\n            raise ValueError(f'Shard tensor size does not match with metadata.shard_lengths! Found shard tensor size: {list(self.tensor.size())}, metadata.shard_lengths: {self.metadata.shard_sizes}, ')\n        placement_device = self.metadata.placement\n        if placement_device is not None and placement_device.device() != self.tensor.device:\n            raise ValueError(f\"Local shard tensor device does not match with local Shard's placement! Found local shard tensor device: {self.tensor.device}, local shard metadata placement device: {placement_device.device()}\")\n\n    @classmethod\n    def from_tensor_and_offsets(cls, tensor: torch.Tensor, shard_offsets: list[int], rank: int) -> 'Shard':\n        shard_sizes = list(tensor.size())\n        placement = _remote_device(f'rank:{rank}/{str(tensor.device)}')\n        shard_meta = ShardMetadata(shard_offsets=shard_offsets, shard_sizes=shard_sizes, placement=placement)\n        return Shard(tensor, shard_meta)",
    "docstring": "Container which holds the data for a shard as a Tensor and also the associated metadata for that shard. Args: tensor(torch.Tensor): Local tensor for the shard. metadata(:class ): The metadata for the shard, including offsets, lengths and device placement.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\shard.py",
    "ast_data": "ClassDef name:Shard Assign FunctionDef name:__post_init__ arg:self arguments arg If Compare Call Call Raise Call Call Call Assign If BoolOp Compare Compare Call Raise Call Call FunctionDef name:from_tensor_and_offsets arg:cls arg:tensor arg:shard_offsets arg:rank arguments arg arg arg arg Assign Call Call Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_serialize",
    "source_code": "@abc.abstractmethod\ndef _serialize(self):\n    raise NotImplementedError('%s._serialize()' % type(self).__name__)",
    "docstring": "Returns a nested tuple containing the state of this TypeSpec. The serialization may contain the following value types: boolean, integer, string, float, None, , , , , , and nested tuples, namedtuples, dicts, and OrderedDicts of any of the above. This method is used to provide default definitions for: equality testing (__eq__, __ne__), hashing (__hash__), pickling (__reduce__), string representation (__repr__), , , and protobuf serialization (e.g. TensorInfo and StructuredValue).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py",
    "ast_data": "FunctionDef name:_serialize arg:self arguments arg Raise Call Call"
  },
  {
    "library": "cherrypy",
    "name": "errmsg",
    "source_code": "def errmsg(self, s):\n    return 'Digest Authorization header: %s' % s",
    "docstring": "Make an error message for HTTP Digest Authorization.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\auth_digest.py",
    "ast_data": "FunctionDef name:errmsg arg:self arg:s arguments arg arg Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "deferred_from_coro",
    "source_code": "def deferred_from_coro(o: Awaitable[_T] | _T2) -> Deferred[_T] | _T2:\n    if isinstance(o, Deferred):\n        return o\n    if inspect.isawaitable(o):\n        if not is_asyncio_reactor_installed():\n            return Deferred.fromCoroutine(cast(Coroutine[Deferred[Any], Any, _T], o))\n        event_loop = _get_asyncio_event_loop()\n        return Deferred.fromFuture(asyncio.ensure_future(o, loop=event_loop))\n    return o",
    "docstring": "Converts a coroutine or other awaitable object into a Deferred, or returns the object as is if it isn't a coroutine.",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\defer.py",
    "ast_data": "FunctionDef name:deferred_from_coro arg:o arguments arg If Call Return return:yes If Call If Call Return return:yes Call Call Assign Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "traverse",
    "source_code": "def traverse(obj, visit, parents=[], result=None, *args, **kwargs):\n    if _is_visit_pair(obj):\n        if obj[0] == 'parent_block':\n            return obj\n        new_result = visit(obj, parents, result, *args, **kwargs)\n        if new_result is not None:\n            assert _is_visit_pair(new_result)\n            return new_result\n        parent = obj\n        result_key, obj = obj\n    else:\n        parent = (None, obj)\n        result_key = None\n    if isinstance(obj, list):\n        new_result = []\n        for index, value in enumerate(obj):\n            new_index, new_item = traverse((index, value), visit, parents + [parent], result, *args, **kwargs)\n            if new_index is not None:\n                new_result.append(new_item)\n    elif isinstance(obj, dict):\n        new_result = {}\n        for key, value in obj.items():\n            new_key, new_value = traverse((key, value), visit, parents + [parent], result, *args, **kwargs)\n            if new_key is not None:\n                new_result[new_key] = new_value\n    else:\n        new_result = obj\n    if result_key is None:\n        return new_result\n    return (result_key, new_result)",
    "docstring": "Traverse f2py data structure with the following visit function: def visit(item, parents, result, *args, **kwargs): \"\"\" parents is a list of key-\"f2py data structure\" pairs from which items are taken from. result is a f2py data structure that is filled with the return value of the visit function. item is 2-tuple (index, value) if parents[-1][1] is a list item is 2-tuple (key, value) if parents[-1][1] is a dict The return value of visit must be None, or of the same kind as item, that is, if parents[-1] is a list, the return value must be 2-tuple (new_index, new_value), or if parents[-1] is a dict, the return value must be 2-tuple (new_key, new_value). If new_index or new_value is None, the return value of visit is ignored, that is, it will not be added to the result. If the return value is None, the content of obj will be traversed, otherwise not. \"\"\"",
    "type": "function",
    "file_path": "numpy\\numpy\\f2py\\crackfortran.py",
    "ast_data": "FunctionDef name:traverse arg:obj arg:visit arg:parents arg:result arguments arg arg arg arg arg arg If Call If Compare Return return:yes Assign Call If Compare Call Return return:yes Assign Assign Assign Assign If Call Assign For Call Assign Call If Compare Call If Call Assign For Call Assign Call If Compare Assign Assign If Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "shape_n",
    "source_code": "@tf_export('shape_n')\n@dispatch.add_dispatch_support\ndef shape_n(input, out_type=dtypes.int32, name=None):\n    return gen_array_ops.shape_n(input, out_type=out_type, name=name)",
    "docstring": "Returns shape of a list of tensors. Given a list of tensors, is much faster than applying to each tensor individually. >>> a = tf.ones([1, 2]) >>> b = tf.ones([2, 3]) >>> c = tf.ones([3, 4]) >>> tf.shape_n([a, b, c]) [, , ] Args: input: A list of at least 1 object with the same dtype. out_type: The specified output type of the operation ( or ). Defaults to (optional). name: A name for the operation (optional). Returns: A list of specifying the shape of each input tensor with type of .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:shape_n arg:input arg:out_type arg:name arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "validate_cum_func_with_skipna",
    "source_code": "def validate_cum_func_with_skipna(skipna: bool, args, kwargs, name) -> bool:\n    if not is_bool(skipna):\n        args = (skipna,) + args\n        skipna = True\n    elif isinstance(skipna, np.bool_):\n        skipna = bool(skipna)\n    validate_cum_func(args, kwargs, fname=name)\n    return skipna",
    "docstring": "If this function is called via the 'numpy' library, the third parameter in its signature is 'dtype', which takes either a 'numpy' dtype or 'None', so check if the 'skipna' parameter is a boolean or not",
    "type": "function",
    "file_path": "pandas\\pandas\\compat\\numpy\\function.py",
    "ast_data": "FunctionDef name:validate_cum_func_with_skipna arg:skipna arg:args arg:kwargs arg:name arguments arg arg arg arg If Call Assign Assign If Call Assign Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "masked_less",
    "source_code": "def masked_less(x, value, copy=True):\n    return masked_where(less(x, value), x, copy=copy)",
    "docstring": "Mask an array where less than a given value. This function is a shortcut to `condition` = (x >> import numpy as np >>> import numpy.ma as ma >>> a = np.arange(4) >>> a array([0, 1, 2, 3]) >>> ma.masked_less(a, 2) masked_array(data=[--, --, 2, 3], mask=[ True, True, False, False], fill_value=999999)",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:masked_less arg:x arg:value arg:copy arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "load_binary",
    "source_code": "def load_binary(self, package: str, resource: str) -> bytes:\n    path = self._zipfile_path(package, resource)\n    return self.zip_reader.get_record(path)",
    "docstring": "Load raw bytes. Args: package (str): The name of module package (e.g. ``). resource (str): The unique name for the resource. Returns: bytes: The loaded data.",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\package_importer.py",
    "ast_data": "FunctionDef name:load_binary arg:self arg:package arg:resource arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "make_one_shot_iterator",
    "source_code": "def make_one_shot_iterator(self):\n    return self._make_one_shot_iterator()",
    "docstring": "Get a one time use iterator for DistributedDatasetV1. Note: This API is deprecated. Please use to iterate over the dataset or to create an iterator. Returns: A DistributedIteratorV1 instance.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\input_lib.py",
    "ast_data": "FunctionDef name:make_one_shot_iterator arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "regress_out",
    "source_code": "def regress_out(self, a, b):\n    a_mean = a.mean()\n    a = a - a_mean\n    b = b - b.mean()\n    b = np.c_[b]\n    a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n    return np.asarray(a_prime + a_mean).reshape(a.shape)",
    "docstring": "Regress b from a keeping a's original mean.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\regression.py",
    "ast_data": "FunctionDef name:regress_out arg:self arg:a arg:b arguments arg arg arg Assign Call Assign Assign Call Assign Assign Call Call Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "xframe_options_deny",
    "source_code": "def xframe_options_deny(view_func):\n    if iscoroutinefunction(view_func):\n\n        async def _view_wrapper(*args, **kwargs):\n            response = await view_func(*args, **kwargs)\n            if response.get('X-Frame-Options') is None:\n                response['X-Frame-Options'] = 'DENY'\n            return response\n    else:\n\n        def _view_wrapper(*args, **kwargs):\n            response = view_func(*args, **kwargs)\n            if response.get('X-Frame-Options') is None:\n                response['X-Frame-Options'] = 'DENY'\n            return response\n    return wraps(view_func)(_view_wrapper)",
    "docstring": "Modify a view function so its response has the X-Frame-Options HTTP header set to 'DENY' as long as the response doesn't already have that header set. Usage: @xframe_options_deny def some_view(request): ...",
    "type": "function",
    "file_path": "django\\django\\views\\decorators\\clickjacking.py",
    "ast_data": "FunctionDef name:xframe_options_deny arg:view_func arguments arg If Call AsyncFunctionDef name:_view_wrapper arguments arg arg Assign Call If Compare Call Assign Return return:yes FunctionDef name:_view_wrapper arguments arg arg Assign Call If Compare Call Assign Return return:yes Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_update_label_position",
    "source_code": "def _update_label_position(self, renderer):\n    if not self._autolabelpos:\n        return\n    bboxes, bboxes2 = self._get_tick_boxes_siblings(renderer=renderer)\n    x, y = self.label.get_position()\n    if self.label_position == 'bottom':\n        bbox = mtransforms.Bbox.union([*bboxes, self.axes.spines.get('bottom', self.axes).get_window_extent()])\n        self.label.set_position((x, bbox.y0 - self.labelpad * self.get_figure(root=True).dpi / 72))\n    else:\n        bbox = mtransforms.Bbox.union([*bboxes2, self.axes.spines.get('top', self.axes).get_window_extent()])\n        self.label.set_position((x, bbox.y1 + self.labelpad * self.get_figure(root=True).dpi / 72))",
    "docstring": "Update the label position based on the bounding box enclosing all the ticklabels and axis spine",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:_update_label_position arg:self arg:renderer arguments arg arg If Return return:no Assign Call Assign Call If Compare Assign Call Call Call Call Call Assign Call Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_has_valid_setitem_indexer",
    "source_code": "def _has_valid_setitem_indexer(self, indexer) -> bool:\n    if isinstance(indexer, dict):\n        raise IndexError('iloc cannot enlarge its target object')\n    if isinstance(indexer, ABCDataFrame):\n        raise TypeError('DataFrame indexer for .iloc is not supported. Consider using .loc with a DataFrame indexer for automatic alignment.')\n    if not isinstance(indexer, tuple):\n        indexer = _tuplify(self.ndim, indexer)\n    for ax, i in zip(self.obj.axes, indexer):\n        if isinstance(i, slice):\n            pass\n        elif is_list_like_indexer(i):\n            pass\n        elif is_integer(i):\n            if i >= len(ax):\n                raise IndexError('iloc cannot enlarge its target object')\n        elif isinstance(i, dict):\n            raise IndexError('iloc cannot enlarge its target object')\n    return True",
    "docstring": "Validate that a positional indexer cannot enlarge its target will raise if needed, does not modify the indexer externally. Returns ------- bool",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexing.py",
    "ast_data": "FunctionDef name:_has_valid_setitem_indexer arg:self arg:indexer arguments arg arg If Call Raise Call If Call Raise Call If Call Assign Call For Call If Call If Call If Call If Compare Call Raise Call If Call Raise Call Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "tag",
    "source_code": "@property\n@abc.abstractmethod\ndef tag(self) -> bytes | None:\n    pass",
    "docstring": "The value of the tag supplied to the constructor of this mode.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\ciphers\\modes.py",
    "ast_data": "FunctionDef name:tag arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "flat_param_to",
    "source_code": "def flat_param_to(self, *args, **kwargs):\n    self.flat_param.data = self.flat_param.to(*args, **kwargs)\n    if self._use_orig_params:\n        if self.is_sharded(self.flat_param):\n            self._use_sharded_views()\n        else:\n            self._use_unsharded_views(as_params=True)",
    "docstring": "Wrap an in-place call to ``.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py",
    "ast_data": "FunctionDef name:flat_param_to arg:self arguments arg arg arg Assign Call If If Call Call Call"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self):\n    self._populate_known_types()",
    "docstring": "Initialize Checker instance.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpchecker.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg Call"
  },
  {
    "library": "scikit-learn",
    "name": "_check_weights_parameters",
    "source_code": "def _check_weights_parameters(self):\n    if self.weight_concentration_prior is None:\n        self.weight_concentration_prior_ = 1.0 / self.n_components\n    else:\n        self.weight_concentration_prior_ = self.weight_concentration_prior",
    "docstring": "Check the parameter of the Dirichlet distribution.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\mixture\\_bayesian_mixture.py",
    "ast_data": "FunctionDef name:_check_weights_parameters arg:self arguments arg If Compare Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "to_tensors",
    "source_code": "@doc_controls.do_not_doc_inheritable\ndef to_tensors(self, value):\n    tensors = []\n    nest.map_structure(lambda spec, v: tensors.extend(spec.to_tensors(v)), self._component_specs, self._to_components(value))\n    return tensors",
    "docstring": "See TraceType base class for details. Do not override.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py",
    "ast_data": "FunctionDef name:to_tensors arg:self arg:value arguments arg arg Assign Call arguments arg arg Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "box_series",
    "source_code": "@box(SeriesType)\ndef box_series(typ, val, c):\n    series = cgutils.create_struct_proxy(typ)(c.context, c.builder, value=val)\n    series_const_obj = c.pyapi.unserialize(c.pyapi.serialize_object(Series._from_mgr))\n    mgr_const_obj = c.pyapi.unserialize(c.pyapi.serialize_object(SingleBlockManager.from_array))\n    index_obj = c.box(typ.index, series.index)\n    array_obj = c.box(typ.as_array, series.values)\n    name_obj = c.box(typ.namety, series.name)\n    mgr_obj = c.pyapi.call_function_objargs(mgr_const_obj, (array_obj, index_obj))\n    mgr_axes_obj = c.pyapi.object_getattr_string(mgr_obj, 'axes')\n    series_obj = c.pyapi.call_function_objargs(series_const_obj, (mgr_obj, mgr_axes_obj))\n    c.pyapi.object_setattr_string(series_obj, '_name', name_obj)\n    c.pyapi.decref(series_const_obj)\n    c.pyapi.decref(mgr_axes_obj)\n    c.pyapi.decref(mgr_obj)\n    c.pyapi.decref(mgr_const_obj)\n    c.pyapi.decref(index_obj)\n    c.pyapi.decref(array_obj)\n    c.pyapi.decref(name_obj)\n    return series_obj",
    "docstring": "Convert a native series structure to a Series object.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\_numba\\extensions.py",
    "ast_data": "FunctionDef name:box_series arg:typ arg:val arg:c arguments arg arg arg Assign Call Call Assign Call Call Assign Call Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Call Call Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_attach_model_to_data_sparsifier",
    "source_code": "def _attach_model_to_data_sparsifier(module, data_sparsifier, config=None):\n    if config is None:\n        config = {}\n    for name, parameter in module.named_parameters():\n        if type(parameter) in SUPPORTED_TYPES:\n            valid_name = _get_valid_name(name)\n            data_sparsifier.add_data(name=valid_name, data=parameter, **config.get(valid_name, {}))",
    "docstring": "Attaches a data sparsifier to all the layers of the module. Essentially, loop over all the weight parameters in the module and attach it to the data sparsifier. Note:: The '.' in the layer names are replaced with '_' (refer to _get_valid_name() below) before attaching to the sparsifier. This is because, the data sparsifier uses a dummy model inside to store the weight parameters.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\data_sparsifier\\lightning\\callbacks\\_data_sparstity_utils.py",
    "ast_data": "FunctionDef name:_attach_model_to_data_sparsifier arg:module arg:data_sparsifier arg:config arguments arg arg arg If Compare Assign For Call If Compare Call Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__make_cmp_key",
    "source_code": "def __make_cmp_key(self, value):\n    if isinstance(value, (int, float, bool, np.generic, dtypes.DType, TypeSpec, tensor_shape.TensorShape)):\n        return value\n    if isinstance(value, compat.bytes_or_text_types):\n        return value\n    if value is None:\n        return value\n    if isinstance(value, dict):\n        return tuple([tuple([self.__make_cmp_key(key), self.__make_cmp_key(value[key])]) for key in sorted(value.keys())])\n    if isinstance(value, tuple):\n        return tuple([self.__make_cmp_key(v) for v in value])\n    if isinstance(value, list):\n        return (list, tuple([self.__make_cmp_key(v) for v in value]))\n    if isinstance(value, np.ndarray):\n        return (np.ndarray, value.shape, TypeSpec.__nested_list_to_tuple(value.tolist()))\n    raise ValueError(f'Cannot generate a hashable key for {self} because the _serialize() method returned an unsupported value of type {type(value)}')",
    "docstring": "Converts to a hashable key.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py",
    "ast_data": "FunctionDef name:__make_cmp_key arg:self arg:value arguments arg arg If Call Return return:yes If Call Return return:yes If Compare Return return:yes If Call Return return:yes Call Call Call Call Call Call If Call Return return:yes Call Call If Call Return return:yes Call Call If Call Return return:yes Call Call Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_cast_single_input",
    "source_code": "def _cast_single_input(self, x):\n    if self._should_cast_single_input(x):\n        return math_ops.cast(x, self._compute_dtype_object)\n    else:\n        return x",
    "docstring": "Cast a single Tensor or TensorSpec to the compute dtype.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:_cast_single_input arg:self arg:x arguments arg arg If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "django",
    "name": "__call__",
    "source_code": "def __call__(self, value):\n    regex_matches = self.regex.search(str(value))\n    invalid_input = regex_matches if self.inverse_match else not regex_matches\n    if invalid_input:\n        raise ValidationError(self.message, code=self.code, params={'value': value})",
    "docstring": "Validate that the input contains (or does *not* contain, if inverse_match is True) a match for the regular expression.",
    "type": "method",
    "file_path": "django\\django\\core\\validators.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:value arguments arg arg Assign Call Call Assign If Raise Call"
  },
  {
    "library": "pytorch",
    "name": "onednn_fusion_enabled",
    "source_code": "def onednn_fusion_enabled():\n    return torch._C._jit_llga_enabled()",
    "docstring": "Return whether onednn JIT fusion is enabled.",
    "type": "function",
    "file_path": "pytorch\\torch\\jit\\__init__.py",
    "ast_data": "FunctionDef name:onednn_fusion_enabled arguments Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "to_graph_v1",
    "source_code": "@tf_export(v1=['autograph.to_graph'])\ndef to_graph_v1(entity, recursive=True, arg_values=None, arg_types=None, experimental_optional_features=None):\n    del arg_types\n    del arg_values\n    return to_graph(entity, recursive=recursive, experimental_optional_features=experimental_optional_features)",
    "docstring": "Converts a Python entity into a TensorFlow graph. Also see: , . Unlike , is a low-level transpiler that converts Python code to TensorFlow graph code. It does not implement any caching, variable management or create any actual ops, and is best used where greater control over the generated TensorFlow graph is desired. Another difference from is that will not wrap the graph into a TensorFlow function or a Python callable. Internally, uses . _Example Usage_ Supported Python entities include: * functions * classes * object methods Functions are converted into new functions with converted code. Classes are converted by generating a new class whose methods use converted code. Methods are converted into unbound function that have an additional first argument called . Args: entity: Python callable or class to convert. recursive: Whether to recursively convert any functions that the converted function may call. arg_values: Deprecated. arg_types: Deprecated. experimental_optional_features: , a tuple of, or a single value. Returns: Same as , the converted Python function or class. Raises: ValueError: If the entity could not be converted.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\impl\\api.py",
    "ast_data": "FunctionDef name:to_graph_v1 arg:entity arg:recursive arg:arg_values arg:arg_types arg:experimental_optional_features arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "default_restore_location",
    "source_code": "def default_restore_location(storage, location):\n    for _, _, fn in _package_registry:\n        result = fn(storage, location)\n        if result is not None:\n            return result\n    raise RuntimeError(\"don't know how to restore data location of \" + torch.typename(storage) + ' (tagged with ' + location + ')')",
    "docstring": "Restores using a deserializer function registered for the . This function looks in the registry for deserializer functions that match the . If found, it attempts to use them, in priority order, to restore until one returns a not result. If no deserializer can be found in the registry, or all found fail to bear a result, it raises a . Args: storage (STORAGE): the storage object to restore location (str): the location tag associated with the storage object Returns: storage: Optional[STORAGE] Raises: RuntimeError: If no deserializer matching is found in the registry or if all matching ones return .",
    "type": "function",
    "file_path": "pytorch\\torch\\serialization.py",
    "ast_data": "FunctionDef name:default_restore_location arg:storage arg:location arguments arg arg For Assign Call If Compare Return return:yes Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_copy_functions_to_graph_def",
    "source_code": "def _copy_functions_to_graph_def(self, graph_def, starting_bytesize) -> None:\n    bytesize = starting_bytesize\n    for f in self._functions.values():\n        bytesize += f.cached_definition.ByteSize()\n        if bytesize >= 1 << 31 or bytesize < 0:\n            raise ValueError('GraphDef cannot be larger than 2GB.')\n        graph_def.library.function.extend([f.cached_definition])\n        if getattr(f, 'grad_func_name', None):\n            grad_def = function_pb2.GradientDef()\n            grad_def.function_name = f.name\n            grad_def.gradient_func = f.grad_func_name\n            graph_def.library.gradient.extend([grad_def])",
    "docstring": "If this graph contains functions, copy them to .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_copy_functions_to_graph_def arg:self arg:graph_def arg:starting_bytesize arguments arg arg arg Assign For Call Call If BoolOp Compare Compare Raise Call Call If Call Assign Call Assign Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "get_active",
    "source_code": "@classmethod\ndef get_active(cls):\n    return next(reversed(cls.figs.values())) if cls.figs else None",
    "docstring": "Return the active manager, or *None* if there is no manager.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_pylab_helpers.py",
    "ast_data": "FunctionDef name:get_active arg:cls arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "add_ck_gemm_choices",
    "source_code": "@staticmethod\ndef add_ck_gemm_choices(choices, layout, input_nodes, alpha=1, beta=0, input_reorder=None):\n    template = CKGemmTemplate(input_nodes, layout, alpha=alpha, beta=beta, input_reorder=input_reorder)\n    ops = template.gen_ops()\n    for op in ops:\n        template.maybe_append_choice(choices, op=op.op, kBatch=op.kBatch)",
    "docstring": "Add Composable Kernel Universal GEMM instance choices to the auto-tuning list.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\rocm\\ck_universal_gemm_template.py",
    "ast_data": "FunctionDef name:add_ck_gemm_choices arg:choices arg:layout arg:input_nodes arg:alpha arg:beta arg:input_reorder arguments arg arg arg arg arg arg Assign Call Assign Call For Call"
  },
  {
    "library": "tensorflow",
    "name": "set_cc_opt_flags",
    "source_code": "def set_cc_opt_flags(environ_cp):\n    if is_ppc64le():\n        default_cc_opt_flags = '-mcpu=native'\n    elif is_windows():\n        default_cc_opt_flags = '/arch:AVX'\n    else:\n        default_cc_opt_flags = '-Wno-sign-compare'\n    question = 'Please specify optimization flags to use during compilation when bazel option \"--config=opt\" is specified [Default is %s]: ' % default_cc_opt_flags\n    cc_opt_flags = get_from_env_or_user_or_default(environ_cp, 'CC_OPT_FLAGS', question, default_cc_opt_flags)\n    for opt in cc_opt_flags.split():\n        write_to_bazelrc('build:opt --copt=%s' % opt)\n        write_to_bazelrc('build:opt --host_copt=%s' % opt)",
    "docstring": "Set up architecture-dependent optimization flags. Also append CC optimization flags to bazel.rc.. Args: environ_cp: copy of the os.environ.",
    "type": "function",
    "file_path": "tensorflow\\configure.py",
    "ast_data": "FunctionDef name:set_cc_opt_flags arg:environ_cp arguments arg If Call Assign If Call Assign Assign Assign Assign Call For Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "stride",
    "source_code": "def stride(self, node: IRNode, index: int, default_value: int=0) -> str:\n    if node is None:\n        return str(default_value)\n    index = _normalize_idx(index, len(node.get_size()))\n    if index < 0:\n        return str(default_value)\n    stride = node.get_stride()[index]\n    if V.graph.sizevars.statically_known_leq(stride, 1):\n        return str(stride)\n    return self.find_symbol(node, 'stride', dim=index) or str(stride)",
    "docstring": "Hook called from template code to get the stride of an arg. Generates code which represents stride of a given node at index. If node is None, returns default_value. TODO: Will add needed args to pass it in if it is dynamic.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\cuda_kernel.py",
    "ast_data": "FunctionDef name:stride arg:self arg:node arg:index arg:default_value arguments arg arg arg arg If Compare Return return:yes Call Assign Call Call Call If Compare Return return:yes Call Assign Call If Call Return return:yes Call Return return:yes BoolOp Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_slope",
    "source_code": "def set_slope(self, slope):\n    if self._xy2 is None:\n        self._slope = slope\n    else:\n        raise ValueError(\"Cannot set a 'slope' value while 'xy2' is set; they differ but their functionalities overlap\")",
    "docstring": "Set the *slope* value of the line. .. note:: You can only set *slope* if the line was created using the *slope* parameter. If the line was created using *xy2*, please use . Parameters ---------- slope : float The slope of the line.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:set_slope arg:self arg:slope arguments arg arg If Compare Assign Raise Call"
  },
  {
    "library": "pytorch",
    "name": "__new__",
    "source_code": "@staticmethod\n@torch._disable_dynamo\ndef __new__(cls, local_tensor: torch.Tensor, spec: DTensorSpec, *, requires_grad: bool) -> 'DTensor':\n    if local_tensor.requires_grad and (not requires_grad):\n        warnings.warn(\"To construct DTensor from torch.Tensor, it's recommended to use local_tensor.detach() and make requires_grad consistent.\")\n    assert spec.tensor_meta is not None, 'TensorMeta should not be None!'\n    r = torch.Tensor._make_wrapper_subclass(cls, spec.tensor_meta.shape, strides=spec.tensor_meta.stride, dtype=local_tensor.dtype, device=local_tensor.device, layout=local_tensor.layout, requires_grad=requires_grad)\n    r._spec = spec\n    r._local_tensor = local_tensor\n    return r",
    "docstring": "Construct a DTensor from a local tensor, device mesh, and placement and other tensor properties (i.e. shape, requires_grad, strides, etc). .. note:: This is not a public API and it's only supposed to be used by the operator implementations and internals. If you want to construct a DTensor from a local tensor, consider using ``.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_api.py",
    "ast_data": "FunctionDef name:__new__ arg:cls arg:local_tensor arg:spec arguments arg arg arg arg If BoolOp Call Compare Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_convert_all_to_tensors",
    "source_code": "def _convert_all_to_tensors(values, dtype=None, dtype_hint=None):\n    target_dtype = _get_target_dtype([x for x, _ in values], dtype, dtype_hint)\n    convert_behavior = dtype is None\n    if convert_behavior:\n        return [None if x is None else ops.convert_to_tensor(x, dtype=target_dtype, name=name) for x, name in values]\n    else:\n        return [None if x is None else math_ops.cast(x, dtype=target_dtype, name=name) for x, name in values]",
    "docstring": "Convert a list of objects to tensors of the same dtype.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py",
    "ast_data": "FunctionDef name:_convert_all_to_tensors arg:values arg:dtype arg:dtype_hint arguments arg arg arg Assign Call Assign Compare If Return return:yes Compare Call Return return:yes Compare Call"
  },
  {
    "library": "pytorch",
    "name": "wait",
    "source_code": "def wait(self):\n    has_warned = False\n    start_time = time.time()\n    while os.path.exists(self.lock_file_path):\n        time.sleep(self.wait_seconds)\n        if self.warn_after_seconds is not None:\n            if time.time() - start_time > self.warn_after_seconds and (not has_warned):\n                warnings.warn(f'Waited on lock file \"{self.lock_file_path}\" for {self.warn_after_seconds} seconds.')\n                has_warned = True",
    "docstring": "Periodically sleeps for a certain amount until the baton is released. The amount of time slept depends on the `` parameter passed to the constructor.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\file_baton.py",
    "ast_data": "FunctionDef name:wait arg:self arguments arg Assign Assign Call While Call Call If Compare If BoolOp Compare Call Call Assign"
  },
  {
    "library": "kornia",
    "name": "IntegralTensor",
    "source_code": "class IntegralTensor(Module):\n\n    def __init__(self, dim: Optional[Tuple[int, ...]]=None) -> None:\n        super().__init__()\n        self.dim = dim\n\n    def forward(self, input: Tensor) -> Tensor:\n        return integral_tensor(input, self.dim)",
    "docstring": "Calculates integral of the input tensor. Args: image: the input tensor with shape :math:. Returns: Integral tensor for the input tensor with shape :math:. Shape: - Input: :math: - Output: :math: Examples: >>> input = torch.ones(3, 5) >>> dim = (-2, -1) >>> output = IntegralTensor(dim)(input) >>> output tensor([[ 1., 2., 3., 4., 5.], [ 2., 4., 6., 8., 10.], [ 3., 6., 9., 12., 15.]])",
    "type": "class",
    "file_path": "kornia\\kornia\\enhance\\integral.py",
    "ast_data": "ClassDef name:IntegralTensor FunctionDef name:__init__ arg:self arg:dim arguments arg arg Call Call Assign FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_get_node_namespace",
    "source_code": "def _get_node_namespace(node: torch.fx.Node) -> tuple[str, list[str], list[str]]:\n    nn_module_stack = node.meta.get('nn_module_stack')\n    logger.debug('%s', nn_module_stack)\n    if nn_module_stack is None:\n        logger.warning(\"nn_module_stack not found for node '%s'. Skip adding metadata...\", node.name)\n        return (f'{node.name}: {node.target}', [str(node.target)], [node.name])\n    namespaces = []\n    class_hierarchy = []\n    name_scopes = []\n    for name, nn_module in nn_module_stack.values():\n        name_scopes.append(name)\n        nn_module_name = _get_qualified_module_name(nn_module)\n        class_hierarchy.append(nn_module_name)\n        namespaces.append(f'{name}: {_get_qualified_module_name(nn_module)}')\n    namespaces.append(f'{node.name}: {node.target}')\n    class_hierarchy.append(str(node.target))\n    name_scopes.append(node.name)\n    return ('/'.join(namespaces), class_hierarchy, name_scopes)",
    "docstring": "Get the namespace and scope of the node. Example:: { 'L__self__': ('', ), 'L__self___avgpool': ('avgpool', ) } Will yield namespace: \": torchvision.models.resnet.ResNet/avgpool: torch.nn.modules.pooling.AdaptiveAvgPool2d/node_name: node_target\" class_hierarchy: [\"torchvision.models.resnet.ResNet\", \"torch.nn.modules.pooling.AdaptiveAvgPool2d\", ] name_scopes: [\"\", \"avgpool\", ] Args: node: The node to get the namespace and scope of. Returns: (namespace, class_hierarchy, name_scope)",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_core.py",
    "ast_data": "FunctionDef name:_get_node_namespace arg:node arguments arg Assign Call Call If Compare Call Return return:yes Call Assign Assign Assign For Call Call Assign Call Call Call Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_required_param_names",
    "source_code": "def _get_required_param_names(sig):\n    params = []\n    for p in sig.parameters.values():\n        if p.kind == p.VAR_POSITIONAL:\n            continue\n        if p.kind == p.VAR_KEYWORD:\n            continue\n        if p.default is not p.empty:\n            continue\n        params.append(p.name)\n    return params",
    "docstring": "Returns a list of required parameter names from a python Signature.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\dispatch.py",
    "ast_data": "FunctionDef name:_get_required_param_names arg:sig arguments arg Assign For Call If Compare If Compare If Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "set_visible_devices",
    "source_code": "def set_visible_devices(self, devices, device_type=None):\n    self._initialize_physical_devices()\n    if not isinstance(devices, list):\n        devices = [devices]\n    for d in devices:\n        if d not in self._physical_devices:\n            raise ValueError('Unrecognized device: %s' % repr(d))\n        if device_type is not None and d.device_type != device_type:\n            raise ValueError('Unrecognized device: %s' % repr(d))\n    visible_device_list = []\n    if device_type is not None:\n        visible_device_list = [d for d in self._visible_device_list if d.device_type != device_type]\n    visible_device_list += devices\n    if self._visible_device_list == visible_device_list:\n        return\n    if self._context_handle is not None:\n        raise RuntimeError('Visible devices cannot be modified after being initialized')\n    self._visible_device_list = visible_device_list",
    "docstring": "Set the list of visible devices.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:set_visible_devices arg:self arg:devices arg:device_type arguments arg arg arg Call If Call Assign For If Compare Raise Call Call If BoolOp Compare Compare Raise Call Call Assign If Compare Assign Compare If Compare Return return:no If Compare Raise Call Assign"
  },
  {
    "library": "pytorch",
    "name": "FlattenOutputStep",
    "source_code": "class FlattenOutputStep(OutputAdaptStep):\n\n    def apply(self, model_outputs: Any, model: torch.nn.Module | Callable | torch_export.ExportedProgram | None=None) -> Sequence[Any]:\n        return pytree.tree_leaves(model_outputs)",
    "docstring": "Flatten nested collection types and return a flat list of elements. ONNX can't represent collection types (e.g., dictionary, tuple of tuple of tensor, etc). NOTE: Ideally we would want to use `SpecTree` can be validate for new model outputs. However, this is not possible currently because we never have access to real PyTorch model outputs during export. Only traced outputs may be available, but they are not an accurate reflection of the original PyTorch model outputs format as they are typically in their own unique format, depending on the tracing strategy.",
    "type": "class",
    "file_path": "pytorch\\torch\\onnx\\_internal\\io_adapter.py",
    "ast_data": "ClassDef name:FlattenOutputStep FunctionDef name:apply arg:self arg:model_outputs arg:model arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "NanTensorHook",
    "source_code": "@tf_export(v1=['train.NanTensorHook'])\nclass NanTensorHook(session_run_hook.SessionRunHook):\n\n    def __init__(self, loss_tensor, fail_on_nan_loss=True):\n        self._loss_tensor = loss_tensor\n        self._fail_on_nan_loss = fail_on_nan_loss\n\n    def before_run(self, run_context):\n        return SessionRunArgs(self._loss_tensor)\n\n    def after_run(self, run_context, run_values):\n        if np.isnan(run_values.results):\n            failure_message = 'Model diverged with loss = NaN.'\n            if self._fail_on_nan_loss:\n                logging.error(failure_message)\n                raise NanLossDuringTrainingError\n            else:\n                logging.warning(failure_message)\n                run_context.request_stop()",
    "docstring": "Monitors the loss tensor and stops training if loss is NaN. Can either fail with exception or just stop training.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\training\\basic_session_run_hooks.py",
    "ast_data": "ClassDef name:NanTensorHook FunctionDef name:__init__ arg:self arg:loss_tensor arg:fail_on_nan_loss arguments arg arg arg Assign Assign FunctionDef name:before_run arg:self arg:run_context arguments arg arg Return return:yes Call FunctionDef name:after_run arg:self arg:run_context arg:run_values arguments arg arg arg If Call Assign If Call Raise Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "benchmark",
    "source_code": "@time_and_count\ndef benchmark(self: Self, fn: Callable[..., Any], fn_args: tuple[Any, ...], fn_kwargs: dict[str, Any], **kwargs: Any) -> float:\n    inferred_device = None\n    for arg_or_kwarg in chain(fn_args, fn_kwargs.values()):\n        if not isinstance(arg_or_kwarg, torch.Tensor):\n            continue\n        if inferred_device is None:\n            inferred_device = arg_or_kwarg.device\n        elif arg_or_kwarg.device != inferred_device:\n            raise ValueError(\"Can't safely infer the device type of `fn` with multiple device types in `fn_args` and `fn_kwargs`!\")\n    if inferred_device is None:\n        raise ValueError(\"Can't safely infer the device type of `fn` with no device types in `fn_args` or `fn_kwargs`! You should be calling `.benchmark_cpu` or `.benchmark_gpu` directly.\")\n    _callable = lambda: fn(*fn_args, **fn_kwargs)\n    if inferred_device == torch.device('cpu'):\n        return self.benchmark_cpu(_callable, **kwargs)\n    return self.benchmark_gpu(_callable, **kwargs)",
    "docstring": "Benchmark and return the runtime, in milliseconds (the actual runtime calculation is dictated by the benchmarking implementation, but may be one of [mean, median, minimum, etc.]). Functions as a convenience wrapper around device-specific implementations, like and . Raises if we can't safely infer the device type of ; for example, if multiple device types are found in and , or if no device types are found. Arguments: - fn: The function to benchmark. - fn_args: The function's arguments. - fn_kwargs: The function's kwargs. Keyword Arguments: - **kwargs: The benchmarking implementation's kwargs. Returns: - The runtime of , in milliseconds.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\benchmarking.py",
    "ast_data": "FunctionDef name:benchmark arg:self arg:fn arg:fn_args arg:fn_kwargs arguments arg arg arg arg arg Assign For Call Call If Call If Compare Assign If Compare Raise Call If Compare Raise Call Assign arguments Call If Compare Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_validate_synchronization",
    "source_code": "def _validate_synchronization(kwargs):\n    synchronization = kwargs.get('synchronization', vs.VariableSynchronization.AUTO)\n    if synchronization == vs.VariableSynchronization.NONE:\n        raise ValueError('`NONE` variable synchronization mode is not supported with tf.distribute strategy. Please change the `synchronization` for variable: ' + str(kwargs['name']))\n    if synchronization not in (vs.VariableSynchronization.ON_READ, vs.VariableSynchronization.ON_WRITE, vs.VariableSynchronization.AUTO):\n        raise ValueError('Invalid variable synchronization mode: %s for variable: %s' % (synchronization, kwargs['name']))\n    if synchronization == vs.VariableSynchronization.AUTO:\n        return vs.VariableSynchronization.ON_WRITE\n    return synchronization",
    "docstring": "Validate that given synchronization value is valid.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_utils.py",
    "ast_data": "FunctionDef name:_validate_synchronization arg:kwargs arguments arg Assign Call If Compare Raise Call Call If Compare Raise Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, name: str, make_fx_graph: Callable[..., Any]):\n    self.name = f'{name}_{next(SubgraphTemplate.index_counter)}'\n    self.make_fx_graph = make_fx_graph",
    "docstring": "Initialize a subgraph template. Args: name: The name of this template graph: The FX graph",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\subgraph.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:name arg:make_fx_graph arguments arg arg arg Assign Call Assign"
  },
  {
    "library": "pytorch",
    "name": "_inject_new_class",
    "source_code": "def _inject_new_class(module: Module) -> None:\n    cls = module.__class__\n\n    def default_deepcopy(self, memo):\n        obj = memo.get(id(self), None)\n        if obj is not None:\n            return obj\n        replica = self.__new__(self.__class__)\n        memo[id(self)] = replica\n        replica.__dict__ = deepcopy(self.__dict__, memo)\n        slots_to_save = copyreg._slotnames(self.__class__)\n        for slot in slots_to_save:\n            if hasattr(self, slot):\n                setattr(replica, slot, deepcopy(getattr(self, slot), memo))\n        return replica\n\n    def getstate(self):\n        raise RuntimeError('Serialization of parametrized modules is only supported through state_dict(). See:\\nhttps://pytorch.org/tutorials/beginner/saving_loading_models.html#saving-loading-a-general-checkpoint-for-inference-and-or-resuming-training')\n    dct = {'__getstate__': getstate}\n    if not hasattr(cls, '__deepcopy__'):\n        dct['__deepcopy__'] = default_deepcopy\n    param_cls = type(f'Parametrized{cls.__name__}', (cls,), dct)\n    module.__class__ = param_cls",
    "docstring": "Set up a module to be parametrized. This works by substituting the class of the module by a class that extends it to be able to inject a property Args: module (nn.Module): module into which to inject the property",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\utils\\parametrize.py",
    "ast_data": "FunctionDef name:_inject_new_class arg:module arguments arg Assign FunctionDef name:default_deepcopy arg:self arg:memo arguments arg arg Assign Call Call If Compare Return return:yes Assign Call Assign Call Assign Call Assign Call For If Call Call Call Call Return return:yes FunctionDef name:getstate arg:self arguments arg Raise Call Assign If Call Assign Assign Call Assign"
  },
  {
    "library": "numpy",
    "name": "trace",
    "source_code": "@array_function_dispatch(_trace_dispatcher)\ndef trace(x, /, *, offset=0, dtype=None):\n    return _core_trace(x, offset, axis1=-2, axis2=-1, dtype=dtype)",
    "docstring": "Returns the sum along the specified diagonals of a matrix (or a stack of matrices) `numpy.tracenumpy.traceoffset` argument: >>> a = np.arange(9).reshape((3, 3)); a array([[0, 1, 2], [3, 4, 5], [6, 7, 8]]) >>> np.linalg.trace(a, offset=1) # First superdiagonal 6 >>> np.linalg.trace(a, offset=2) # Second superdiagonal 2 >>> np.linalg.trace(a, offset=-1) # First subdiagonal 10 >>> np.linalg.trace(a, offset=-2) # Second subdiagonal 6",
    "type": "function",
    "file_path": "numpy\\numpy\\linalg\\_linalg.py",
    "ast_data": "FunctionDef name:trace arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "solve_constraints",
    "source_code": "def solve_constraints(self):\n    self.passes = _topological_sort_passes(self.passes, self.constraints)\n    self._validated = True",
    "docstring": "Finds a valid traversal order based on the given constraints and orders the passes based on this order. If a circular dependency exists between the constraints and steps = 1, then we will raise an error because if steps != 1 this means that we will re-run the passes, allowing for circular dependencies.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\passes\\infra\\pass_manager.py",
    "ast_data": "FunctionDef name:solve_constraints arg:self arguments arg Assign Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "experimental_get_tracing_count",
    "source_code": "def experimental_get_tracing_count(self):\n    return len(self._function_cache)",
    "docstring": "Returns the number of times the function has been traced. For more information on when a function is traced and when it is traced multiple times see Example: >>> @tf.function ... def double(a): ... return a + a >>> double(tf.constant(1)) >>> double(tf.constant(2)) >>> double.experimental_get_tracing_count() 1 >>> double(tf.constant(\"a\")) >>> double.experimental_get_tracing_count() 2 The first time experimental_get_tracing_count is called it returns 1, as the function is traced the first time it is called, and the second time the same graph is used since we're calling it with a parameter of the same type. The second time experimental_get_tracing_count is called it returns 2, as we called double with a different argument type, and so it was traced again.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\polymorphic_function.py",
    "ast_data": "FunctionDef name:experimental_get_tracing_count arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "rpartition",
    "source_code": "@set_module('numpy.char')\ndef rpartition(a, sep):\n    return np.stack(strings_rpartition(a, sep), axis=-1)",
    "docstring": "Partition (split) each element around the right-most separator. Calls :meth: element-wise. For each element in , split the element as the last occurrence of , and return 3 strings containing the part before the separator, the separator itself, and the part after the separator. If the separator is not found, return 3 strings containing the string itself, followed by two empty strings. Parameters ---------- a : array-like, with `` dtype, depending on input types. The output array will have an extra dimension with 3 elements per input element. See Also -------- str.rpartition Examples -------- >>> import numpy as np >>> a = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> np.char.rpartition(a, 'A') array([['aAaAa', 'A', ''], [' a', 'A', ' '], ['abB', 'A', 'Bba']], dtype='<U5')",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:rpartition arg:a arg:sep arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_mutation_scale",
    "source_code": "def set_mutation_scale(self, scale):\n    self._mutation_scale = scale\n    self.stale = True",
    "docstring": "Set the mutation scale. Parameters ---------- scale : float",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:set_mutation_scale arg:self arg:scale arguments arg arg Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_get_tensor_name",
    "source_code": "def _get_tensor_name(node_name, output_slot):\n    return '%s:%d' % (node_name, output_slot)",
    "docstring": "Get tensor name given node name and output slot index. Args: node_name: Name of the node that outputs the tensor, as a string. output_slot: Output slot index of the tensor, as an integer. Returns: Name of the tensor, as a string.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:_get_tensor_name arg:node_name arg:output_slot arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "all_to_all_v2",
    "source_code": "def all_to_all_v2(t, group_size, group_key, instance_key, communication_hint='auto', timeout=0, ordering_token=None, name=None):\n    if ordering_token is not None:\n        ordering_token = [ordering_token]\n    else:\n        ordering_token = []\n    return gen_collective_ops.collective_all_to_all_v2(t, group_size=group_size, group_key=group_key, instance_key=instance_key, communication_hint=communication_hint.lower(), timeout_seconds=timeout, is_stateless=False, ordering_token=ordering_token, name=name)",
    "docstring": "Exchanges tensors mutually. Args: t: a . The first dimension should have the length as the size of the group. is sent to within the group. group_size: an int32 tensor, the total number of tensors to be mutually exchanged. Each must reside on a different device. Should be a positive integer. group_key: an int32 tensor identifying the group of devices. instance_key: an int32 tensor identifying the participating group of Ops. communication_hint: preferred collective communication. The implementation may fall back to another mechanism. Options include and . timeout: a float. If set to a non zero, set a completion timeout to detect staleness. If the timer goes off, a DeadlineExceededError is raised. The timeout value in seconds. This feature is experimental. ordering_token: a resource tensor on the same device as the op to order the collectives in a per-device manner by auto control dependency. This argument can be omited when there is one collective Op per , or when explicit control dependency is used instead of auto control dependency. name: name of the Op. Returns: An Op implementing the distributed operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\collective_ops.py",
    "ast_data": "FunctionDef name:all_to_all_v2 arg:t arg:group_size arg:group_key arg:instance_key arg:communication_hint arg:timeout arg:ordering_token arg:name arguments arg arg arg arg arg arg arg arg If Compare Assign Assign Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "has_openmp_flags",
    "source_code": "def has_openmp_flags(target):\n    target_sources = target['target_sources']\n    target_use_openmp_flags = any((has_source_openmp_flags(target_source) for target_source in target_sources))\n    if not target_use_openmp_flags:\n        return False\n    assert len(target_sources) == 2\n    compiler_source, linker_source = target_sources\n    assert 'compiler' in compiler_source\n    assert 'linker' in linker_source\n    compiler_use_openmp_flags = any(('openmp' in arg for arg in compiler_source['parameters']))\n    linker_use_openmp_flags = any(('openmp' in arg for arg in linker_source['parameters']))\n    assert compiler_use_openmp_flags == linker_use_openmp_flags\n    return compiler_use_openmp_flags",
    "docstring": "Return whether target sources use OpenMP flags. Make sure that both compiler and linker source use OpenMP. Look at docstring to see what looks like.",
    "type": "function",
    "file_path": "scikit-learn\\build_tools\\check-meson-openmp-dependencies.py",
    "ast_data": "FunctionDef name:has_openmp_flags arg:target arguments arg Assign Assign Call Call If Return return:yes Compare Call Assign Compare Compare Assign Call Compare Assign Call Compare Compare Return return:yes"
  },
  {
    "library": "kornia",
    "name": "axis_angle_to_quaternion",
    "source_code": "def axis_angle_to_quaternion(axis_angle: Tensor) -> Tensor:\n    if not torch.is_tensor(axis_angle):\n        raise TypeError(f'Input type is not a Tensor. Got {type(axis_angle)}')\n    if not axis_angle.shape[-1] == 3:\n        raise ValueError(f'Input must be a tensor of shape Nx3 or 3. Got {axis_angle.shape}')\n    a0: Tensor = axis_angle[..., 0:1]\n    a1: Tensor = axis_angle[..., 1:2]\n    a2: Tensor = axis_angle[..., 2:3]\n    theta_squared: Tensor = a0 * a0 + a1 * a1 + a2 * a2\n    theta: Tensor = torch.sqrt(theta_squared)\n    half_theta: Tensor = theta * 0.5\n    mask: Tensor = theta_squared > 0.0\n    ones: Tensor = torch.ones_like(half_theta)\n    k_neg: Tensor = 0.5 * ones\n    k_pos: Tensor = sin(half_theta) / theta\n    k: Tensor = where(mask, k_pos, k_neg)\n    w: Tensor = where(mask, cos(half_theta), ones)\n    quaternion: Tensor = torch.zeros(size=(*axis_angle.shape[:-1], 4), dtype=axis_angle.dtype, device=axis_angle.device)\n    quaternion[..., 1:2] = a0 * k\n    quaternion[..., 2:3] = a1 * k\n    quaternion[..., 3:4] = a2 * k\n    quaternion[..., 0:1] = w\n    return quaternion",
    "docstring": "Convert an axis angle to a quaternion. The quaternion vector has components in (w, x, y, z) format. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: axis_angle: tensor with axis angle in radians. Return: tensor with quaternion. Shape: - Input: :math: where means, any number of dimensions - Output: :math: Example: >>> axis_angle = tensor((0., 1., 0.)) >>> axis_angle_to_quaternion(axis_angle) tensor([0.8776, 0.0000, 0.4794, 0.0000])",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\conversions.py",
    "ast_data": "FunctionDef name:axis_angle_to_quaternion arg:axis_angle arguments arg If Call Raise Call Call If Compare Raise Call Call Compare Call Call Call Call Call Call Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "master",
    "source_code": "def master(self, task_type=None, task_id=None, rpc_layer=None):\n    session_master = _get_value_in_tfconfig(_SESSION_MASTER_KEY, self._port)\n    if session_master is not None:\n        return session_master\n    cluster_spec = self.cluster_spec()\n    if not cluster_spec.jobs or (len(cluster_spec.jobs) == 1 and len(cluster_spec.job_tasks(cluster_spec.jobs[0])) == 1):\n        return ''\n    task_type = task_type if task_type is not None else self.task_type\n    task_id = task_id if task_id is not None else self.task_id\n    rpc_layer = rpc_layer if rpc_layer is not None else self.rpc_layer\n    return format_master_url(cluster_spec.task_address(task_type, task_id), rpc_layer)",
    "docstring": "Returns the master address to use when creating a TensorFlow session. Note: this is only useful for TensorFlow 1.x. Args: task_type: (String, optional) Overrides and sets the task_type of the master. task_id: (Integer, optional) Overrides and sets the task id of the master. rpc_layer: (String, optional) Overrides and sets the protocol over which TensorFlow nodes communicate with each other. Returns: The address of the master. Raises: RuntimeError: If the task_type or task_id is not specified and the SageMaker environment variables does not contain a task section.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\sagemaker_cluster_resolver.py",
    "ast_data": "FunctionDef name:master arg:self arg:task_type arg:task_id arg:rpc_layer arguments arg arg arg arg Assign Call If Compare Return return:yes Assign Call If BoolOp BoolOp Compare Call Compare Call Call Return return:yes Assign Compare Assign Compare Assign Compare Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "kernel_vector",
    "source_code": "def kernel_vector(x, y, kernel_func, out):\n    for i in range(y.shape[0]):\n        out[i] = kernel_func(np.linalg.norm(x - y[i]))",
    "docstring": "Evaluate RBFs, with centers at , at the point .",
    "type": "function",
    "file_path": "scipy\\scipy\\interpolate\\_rbfinterp_pythran.py",
    "ast_data": "FunctionDef name:kernel_vector arg:x arg:y arg:kernel_func arg:out arguments arg arg arg arg For Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "FailedMatch",
    "source_code": "class FailedMatch(RuntimeError):\n    format_string: str\n\n    def __init__(self, format_string: str, *args: Any, **kwargs: Any) -> None:\n        self.format_string = format_string\n        if len(format_string) > 200:\n            raise RuntimeError(f'Format string too long - use lazy construction of strings instead. Format string is\\n {format_string}')\n        self.args = args\n        self.kwargs = kwargs\n\n    def __str__(self) -> str:\n        return self.format_string.format(*self.args, **self.kwargs)\n\n    def __bool__(self) -> bool:\n        return False",
    "docstring": "Represents a unsuccessful match. The object is returned to represent a failure to match a pattern.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\pattern_matcher.py",
    "ast_data": "ClassDef name:FailedMatch FunctionDef name:__init__ arg:self arg:format_string arguments arg arg arg arg Assign If Compare Call Raise Call Assign Assign FunctionDef name:__str__ arg:self arguments arg Return return:yes Call FunctionDef name:__bool__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "real_inputs",
    "source_code": "@property\ndef real_inputs(self):\n    return _real_inputs._get_handler()",
    "docstring": "non-fake example inputs",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\virtualized.py",
    "ast_data": "FunctionDef name:real_inputs arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "less",
    "source_code": "@array_function_dispatch(_binary_op_dispatcher)\ndef less(x1, x2):\n    return compare_chararrays(x1, x2, '<', True)",
    "docstring": "Return (x1 >> import numpy as np >>> x1 = np.array(['a', 'b', 'c']) >>> np.char.less(x1, 'b') array([True, False, False])",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:less arg:x1 arg:x2 arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "initialize_local_variables",
    "source_code": "@tf_export(v1=['initialize_local_variables'])\n@tf_should_use.should_use_result\n@deprecated('2017-03-02', 'Use `tf.local_variables_initializer` instead.')\ndef initialize_local_variables():\n    return local_variables_initializer()",
    "docstring": "See .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:initialize_local_variables arguments Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "LazyConv2d",
    "source_code": "class LazyConv2d(_LazyConvXdMixin, Conv2d):\n    cls_to_become = Conv2d\n\n    def __init__(self, out_channels: int, kernel_size: _size_2_t, stride: _size_2_t=1, padding: _size_2_t=0, dilation: _size_2_t=1, groups: int=1, bias: bool=True, padding_mode: str='zeros', device=None, dtype=None) -> None:\n        factory_kwargs = {'device': device, 'dtype': dtype}\n        super().__init__(0, 0, kernel_size, stride, padding, dilation, groups, False, padding_mode, **factory_kwargs)\n        self.weight = UninitializedParameter(**factory_kwargs)\n        self.out_channels = out_channels\n        if bias:\n            self.bias = UninitializedParameter(**factory_kwargs)\n\n    def _get_num_spatial_dims(self) -> int:\n        return 2",
    "docstring": "A :class: module with lazy initialization of the `Conv2dweightbiastorch.nn.modules.lazy.LazyModuleMixintorch.nn.Conv2dtorch.nn.modules.lazy.LazyModuleMixin`",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\conv.py",
    "ast_data": "ClassDef name:LazyConv2d Assign FunctionDef name:__init__ arg:self arg:out_channels arg:kernel_size arg:stride arg:padding arg:dilation arg:groups arg:bias arg:padding_mode arg:device arg:dtype arguments arg arg arg arg arg arg arg arg arg arg arg Assign Call Call Assign Call Assign If Assign Call FunctionDef name:_get_num_spatial_dims arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "get_config",
    "source_code": "def get_config():\n    cfg = VersioneerConfig()\n    cfg.VCS = 'git'\n    cfg.style = 'pep440'\n    cfg.tag_prefix = 'v'\n    cfg.parentdir_prefix = 'pandas-'\n    cfg.versionfile_source = 'pandas/_version.py'\n    cfg.verbose = False\n    return cfg",
    "docstring": "Create, populate and return the VersioneerConfig() object.",
    "type": "function",
    "file_path": "pandas\\pandas\\_version.py",
    "ast_data": "FunctionDef name:get_config arguments Assign Call Assign Assign Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "splrep",
    "source_code": "def splrep(x, y, w=None, xb=None, xe=None, k=3, task=0, s=None, t=None, full_output=0, per=0, quiet=1):\n    res = _impl.splrep(x, y, w, xb, xe, k, task, s, t, full_output, per, quiet)\n    return res",
    "docstring": "Find the B-spline representation of a 1-D curve. .. legacy:: function Specifically, we recommend using in new code. Given the set of data points `. >>> import numpy as np >>> import matplotlib.pyplot as plt >>> from scipy.interpolate import splev, splrep >>> x = np.linspace(0, 10, 10) >>> y = np.sin(x) >>> spl = splrep(x, y) >>> x2 = np.linspace(0, 10, 200) >>> y2 = splev(x2, spl) >>> plt.plot(x, y, 'o', x2, y2) >>> plt.show()",
    "type": "function",
    "file_path": "scipy\\scipy\\interpolate\\_fitpack_py.py",
    "ast_data": "FunctionDef name:splrep arg:x arg:y arg:w arg:xb arg:xe arg:k arg:task arg:s arg:t arg:full_output arg:per arg:quiet arguments arg arg arg arg arg arg arg arg arg arg arg arg Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "register_full_backward_hook",
    "source_code": "def register_full_backward_hook(self, hook: Callable[['Module', _grad_t, _grad_t], Union[None, _grad_t]], prepend: bool=False) -> RemovableHandle:\n    if self._is_full_backward_hook is False:\n        raise RuntimeError('Cannot use both regular backward hooks and full backward hooks on a single Module. Please use only one of them.')\n    self._is_full_backward_hook = True\n    handle = RemovableHandle(self._backward_hooks)\n    self._backward_hooks[handle.id] = hook\n    if prepend:\n        self._backward_hooks.move_to_end(handle.id, last=False)\n    return handle",
    "docstring": "Register a backward hook on the module. The hook will be called every time the gradients with respect to a module are computed, and its firing rules are as follows: 1. Ordinarily, the hook fires when the gradients are computed with respect to the module inputs. 2. If none of the module inputs require gradients, the hook will fire when the gradients are computed with respect to module outputs. 3. If none of the module outputs require gradients, then the hooks will not fire. The hook should have the following signature:: hook(module, grad_input, grad_output) -> tuple(Tensor) or None The :attr: and :attr: are tuples that contain the gradients with respect to the inputs and outputs respectively. The hook should not modify its arguments, but it can optionally return a new gradient with respect to the input that will be used in place of :attr: in subsequent computations. :attr: will only correspond to the inputs given as positional arguments and all kwarg arguments are ignored. Entries in :attr: and :attr: will be `torch.nn.Moduletorch.nn.Moduleregister_module_full_backward_hooktorch.utils.hooks.RemovableHandle`",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:register_full_backward_hook arg:self arg:hook arg:prepend arguments arg arg arg If Compare Raise Call Assign Assign Call Assign If Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "parse",
    "source_code": "def parse(src, preamble_len=0, single_node=True):\n    module_node = gast.parse(src)\n    nodes = module_node.body\n    if preamble_len:\n        nodes = nodes[preamble_len:]\n    if single_node:\n        if len(nodes) != 1:\n            raise ValueError('expected exactly one node, got {}'.format(nodes))\n        return nodes[0]\n    return nodes",
    "docstring": "Returns the AST of given piece of code. Args: src: Text preamble_len: Int, indicates leading nodes in the parsed AST which should be dropped. single_node: Bool, whether is assumed to be represented by exactly one AST node. Returns: ast.AST",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\parser.py",
    "ast_data": "FunctionDef name:parse arg:src arg:preamble_len arg:single_node arguments arg arg arg Assign Call Assign If Assign If If Compare Call Raise Call Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "override_error_code_in_rootcause_data",
    "source_code": "def override_error_code_in_rootcause_data(self, rootcause_error_file: str, rootcause_error: dict[str, Any], error_code: int=0):\n    if 'message' not in rootcause_error:\n        logger.warning('child error file (%s) does not have field `message`. \\ncannot override error code: %s', rootcause_error_file, error_code)\n    elif isinstance(rootcause_error['message'], str):\n        logger.warning('child error file (%s) has a new message format. \\nskipping error code override', rootcause_error_file)\n    else:\n        rootcause_error['message']['errorCode'] = error_code",
    "docstring": "Modify the rootcause_error read from the file, to correctly set the exit code.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\multiprocessing\\errors\\error_handler.py",
    "ast_data": "FunctionDef name:override_error_code_in_rootcause_data arg:self arg:rootcause_error_file arg:rootcause_error arg:error_code arguments arg arg arg arg If Compare Call If Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "ExitResult",
    "source_code": "def ExitResult(self, result):\n    if self._outer_context:\n\n        def fn(x):\n            self._outer_context.AddName(x.name)\n            return x\n        nest.map_structure(fn, result, expand_composites=True)",
    "docstring": "Make a list of tensors available in the outer context.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:ExitResult arg:self arg:result arguments arg arg If FunctionDef name:fn arg:x arguments arg Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "create_args",
    "source_code": "def create_args(parser=None):\n    parser.add_argument('--multi-instance', '--multi_instance', action='store_true', default=False, help='Enable multi-instance, by default one instance per node')\n    parser.add_argument('-m', '--module', default=False, action='store_true', help='Changes each process to interpret the launch script as a python module, executing with the same behavior as\"python -m\".')\n    parser.add_argument('--no-python', '--no_python', default=False, action='store_true', help='Do not prepend the --program script with \"python\" - just exec it directly. Useful when the script is not a Python script.')\n    _add_memory_allocator_params(parser)\n    _add_kmp_iomp_params(parser)\n    _add_multi_instance_params(parser)\n    parser.add_argument('program', type=str, help='The full path to the program/script to be launched. followed by all the arguments for the script')\n    parser.add_argument('program_args', nargs=REMAINDER)",
    "docstring": "Parse the command line options. @retval ArgumentParser",
    "type": "function",
    "file_path": "pytorch\\torch\\backends\\xeon\\run_cpu.py",
    "ast_data": "FunctionDef name:create_args arg:parser arguments arg Call Call Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_training_eval_metrics",
    "source_code": "def _get_training_eval_metrics(self):\n    metrics = []\n    metrics.extend(getattr(self, '_output_loss_metrics', None) or [])\n    metrics.extend(getattr(self, 'metrics', None) or [])\n    return metrics",
    "docstring": "Returns all the metrics that are to be reported. This includes the output loss metrics, compile metrics/weighted metrics, add_metric metrics.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py",
    "ast_data": "FunctionDef name:_get_training_eval_metrics arg:self arguments arg Assign Call BoolOp Call Call BoolOp Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "update_op",
    "source_code": "@abc.abstractmethod\ndef update_op(self, optimizer, g):\n    raise NotImplementedError('Calling an abstract method.')",
    "docstring": "Returns the update ops for updating the variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\optimizer.py",
    "ast_data": "FunctionDef name:update_op arg:self arg:optimizer arg:g arguments arg arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "lookup_backend",
    "source_code": "def lookup_backend(compiler_fn):\n    if isinstance(compiler_fn, str):\n        if compiler_fn not in _BACKENDS:\n            _lazy_import()\n        if compiler_fn not in _BACKENDS:\n            from ..exc import InvalidBackend\n            raise InvalidBackend(name=compiler_fn)\n        if compiler_fn not in _COMPILER_FNS:\n            entry_point = _BACKENDS[compiler_fn]\n            register_backend(compiler_fn=entry_point.load(), name=compiler_fn)\n        compiler_fn = _COMPILER_FNS[compiler_fn]\n    return compiler_fn",
    "docstring": "Expand backend strings to functions",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\backends\\registry.py",
    "ast_data": "FunctionDef name:lookup_backend arg:compiler_fn arguments arg If Call If Compare Call If Compare Raise Call If Compare Assign Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "flatten_refinement_rule",
    "source_code": "@register_refinement_rule(torch.flatten)\ndef flatten_refinement_rule(n: Node):\n    assert isinstance(n.args[0], Node)\n    eq_const = []\n    start_dim = 1\n    end_dim = -1\n    if len(n.args) > 1:\n        assert isinstance(n.args[1], int)\n        start_dim = n.args[1]\n    if len(n.args) > 2:\n        assert isinstance(n.args[2], int)\n        end_dim = n.args[2]\n    if isinstance(n.type, TensorType) and isinstance(n.args[0].type, TensorType):\n        l = len(n.type.__args__)\n        arg_type = n.args[0].type\n        start_dim = l if start_dim == -1 else start_dim\n        end_dim = l + end_dim + 1 if end_dim < 0 else end_dim + 1\n        for t1, t2 in zip(n.type.__args__[0:start_dim], arg_type.__args__[0:start_dim]):\n            eq_const.append(Equality(t1, t2))\n        for t1, t2 in zip(n.type.__args__[end_dim:], arg_type.__args__[end_dim:]):\n            eq_const.append(Equality(t1, t2))\n    return eq_const",
    "docstring": "Generates equality constraints between the dimensions of the input and output that will not be involved in the flatten operation",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\graph_gradual_typechecker.py",
    "ast_data": "FunctionDef name:flatten_refinement_rule arg:n arguments arg Call Assign Assign Assign If Compare Call Call Assign If Compare Call Call Assign If BoolOp Call Call Assign Call Assign Assign Compare Assign Compare For Call Call Call For Call Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "kleene_and",
    "source_code": "def kleene_and(left: bool | libmissing.NAType | np.ndarray, right: bool | libmissing.NAType | np.ndarray, left_mask: np.ndarray | None, right_mask: np.ndarray | None) -> tuple[npt.NDArray[np.bool_], npt.NDArray[np.bool_]]:\n    if left_mask is None:\n        return kleene_and(right, left, right_mask, left_mask)\n    if not isinstance(left, np.ndarray):\n        raise TypeError('Either `left` or `right` need to be a np.ndarray.')\n    raise_for_nan(right, method='and')\n    if right is libmissing.NA:\n        result = np.zeros_like(left)\n    else:\n        result = left & right\n    if right_mask is None:\n        if right is libmissing.NA:\n            mask = left & ~left_mask | left_mask\n        else:\n            mask = left_mask.copy()\n            if right is False:\n                mask[:] = False\n    else:\n        left_false = ~(left | left_mask)\n        right_false = ~(right | right_mask)\n        mask = left_mask & ~right_false | right_mask & ~left_false\n    return (result, mask)",
    "docstring": "Boolean `leftright` value is a scalar. Returns ------- result, mask: ndarray[bool] The result of the logical xor, and the new mask.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\ops\\mask_ops.py",
    "ast_data": "FunctionDef name:kleene_and arg:left arg:right arg:left_mask arg:right_mask arguments arg arg arg arg If Compare Return return:yes Call If Call Raise Call Call If Compare Assign Call Assign If Compare If Compare Assign Assign Call If Compare Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "getLogger",
    "source_code": "def getLogger(name: str) -> SphinxLoggerAdapter:\n    logger = logging.getLogger(NAMESPACE + '.' + name)\n    logger.disabled = False\n    return SphinxLoggerAdapter(logger, {})",
    "docstring": "Get logger wrapped by :class:. Sphinx logger always uses `` namespace to be independent from settings of root logger. It ensures logging is consistent even if a third-party extension or imported application resets logger settings. Example usage:: >>> from sphinx.util import logging >>> logger = logging.getLogger(__name__) >>> logger.info('Hello, this is an extension!') Hello, this is an extension!",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\logging.py",
    "ast_data": "FunctionDef name:getLogger arg:name arguments arg Assign Call Assign Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "log",
    "source_code": "def log(self, message: Any, level: int=logging.DEBUG, **kw: Any) -> None:\n    self.logger.log(level, message, **kw)",
    "docstring": "Log the given message at the given log level This helper wraps a log call to the logger within the spider, but you can use it directly (e.g. Spider.logger.info('msg')) or use any other Python logger too.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\spiders\\__init__.py",
    "ast_data": "FunctionDef name:log arg:self arg:message arg:level arguments arg arg arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_select_class_id",
    "source_code": "def _maybe_select_class_id(labels, predictions_idx, selected_id=None):\n    if selected_id is None:\n        return (labels, predictions_idx)\n    return (_select_class_id(labels, selected_id), _select_class_id(predictions_idx, selected_id))",
    "docstring": "If class ID is specified, filter all other classes. Args: labels: or with shape [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and has shape [batch_size, num_labels]. [D1, ... DN] must match . predictions_idx: of class IDs, with shape [D1, ... DN, k] where N >= 1. Commonly, N=1 and has shape [batch size, k]. selected_id: Int id to select. Returns: Tuple of and , possibly with classes removed.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\metrics_impl.py",
    "ast_data": "FunctionDef name:_maybe_select_class_id arg:labels arg:predictions_idx arg:selected_id arguments arg arg arg If Compare Return return:yes Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "SubprocPickler",
    "source_code": "class SubprocPickler:\n\n    def dumps(self, obj: object) -> bytes:\n        return pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)\n\n    def loads(self, data: bytes) -> object:\n        return pickle.loads(data)",
    "docstring": "Allows a caller to provide a custom pickler for passing data with the subprocess.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\compile_worker\\subproc_pool.py",
    "ast_data": "ClassDef name:SubprocPickler FunctionDef name:dumps arg:self arg:obj arguments arg arg Return return:yes Call FunctionDef name:loads arg:self arg:data arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "list_directory",
    "source_code": "@tf_export(v1=['gfile.ListDirectory'])\ndef list_directory(dirname):\n    return list_directory_v2(dirname)",
    "docstring": "Returns a list of entries contained within a directory. The list is in arbitrary order. It does not contain the special entries \".\" and \"..\". Args: dirname: string, path to a directory Returns: [filename1, filename2, ... filenameN] as strings Raises: errors.NotFoundError if directory doesn't exist",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py",
    "ast_data": "FunctionDef name:list_directory arg:dirname arguments arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "LinearModel",
    "source_code": "class LinearModel(BaseEstimator, metaclass=ABCMeta):\n\n    @abstractmethod\n    def fit(self, X, y):\n        pass\n\n    def _decision_function(self, X):\n        check_is_fitted(self)\n        X = validate_data(self, X, accept_sparse=['csr', 'csc', 'coo'], reset=False)\n        coef_ = self.coef_\n        if coef_.ndim == 1:\n            return X @ coef_ + self.intercept_\n        else:\n            return X @ coef_.T + self.intercept_\n\n    def predict(self, X):\n        return self._decision_function(X)\n\n    def _set_intercept(self, X_offset, y_offset, X_scale):\n        xp, _ = get_namespace(X_offset, y_offset, X_scale)\n        if self.fit_intercept:\n            coef_ = xp.astype(self.coef_, X_scale.dtype, copy=False)\n            coef_ = self.coef_ = xp.divide(coef_, X_scale)\n            if coef_.ndim == 1:\n                intercept_ = y_offset - X_offset @ coef_\n            else:\n                intercept_ = y_offset - X_offset @ coef_.T\n            self.intercept_ = intercept_\n        else:\n            self.intercept_ = 0.0",
    "docstring": "Base class for Linear Models",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_base.py",
    "ast_data": "ClassDef name:LinearModel FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg FunctionDef name:_decision_function arg:self arg:X arguments arg arg Call Assign Call Assign If Compare Return return:yes Return return:yes FunctionDef name:predict arg:self arg:X arguments arg arg Return return:yes Call FunctionDef name:_set_intercept arg:self arg:X_offset arg:y_offset arg:X_scale arguments arg arg arg arg Assign Call If Assign Call Assign Call If Compare Assign Assign Assign Assign"
  },
  {
    "library": "pandas",
    "name": "_color_in_style",
    "source_code": "def _color_in_style(style: str) -> bool:\n    return not set(mpl.colors.BASE_COLORS).isdisjoint(style)",
    "docstring": "Check if there is a color letter in the style string.",
    "type": "function",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\core.py",
    "ast_data": "FunctionDef name:_color_in_style arg:style arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_SingleWorkerCallableIterator",
    "source_code": "class _SingleWorkerCallableIterator(object):\n\n    def __init__(self, fn, worker, devices):\n        self._fn = fn\n        self._worker = worker\n        self._devices = devices\n\n    def get_next(self, device, name=None):\n        del device, name\n        with ops.device(self._worker):\n            return self._fn()\n\n    def get_next_as_list(self, name=None):\n        del name\n        with ops.device(self._worker):\n            data_list = [self._fn() for _ in self._devices]\n            return data_list\n\n    def get_next_as_optional_list(self):\n        with ops.device(self._worker):\n            data_list = [optional_ops.Optional.from_value(self._fn()) for _ in self._devices]\n            return data_list\n\n    def initialize(self):\n        return []",
    "docstring": "Iterator for a single tensor-returning callable.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\input_lib.py",
    "ast_data": "ClassDef name:_SingleWorkerCallableIterator FunctionDef name:__init__ arg:self arg:fn arg:worker arg:devices arguments arg arg arg arg Assign Assign Assign FunctionDef name:get_next arg:self arg:device arg:name arguments arg arg arg With Call Return return:yes Call FunctionDef name:get_next_as_list arg:self arg:name arguments arg arg With Call Assign Call Return return:yes FunctionDef name:get_next_as_optional_list arg:self arguments arg With Call Assign Call Call Return return:yes FunctionDef name:initialize arg:self arguments arg Return return:no"
  },
  {
    "library": "pytorch",
    "name": "dependency_graph_string",
    "source_code": "def dependency_graph_string(self) -> str:\n    return self.dependency_graph.to_dot()",
    "docstring": "Returns digraph string representation of dependencies in package. Returns: A string representation of dependencies in package.",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\package_exporter.py",
    "ast_data": "FunctionDef name:dependency_graph_string arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_group_stride",
    "source_code": "def get_group_stride(self):\n    _size = self.get_size()\n    _stride = self.get_stride()\n    return ([_size, []], _stride)",
    "docstring": "get output sizes and strides, for template_codegen",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ir.py",
    "ast_data": "FunctionDef name:get_group_stride arg:self arguments arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "pinhole_matrix",
    "source_code": "def pinhole_matrix(pinholes: Tensor, eps: float=1e-06) -> Tensor:\n    if not (len(pinholes.shape) == 2 and pinholes.shape[1] == 12):\n        raise AssertionError(pinholes.shape)\n    fx, fy, cx, cy = torch.chunk(pinholes[..., :4], 4, dim=1)\n    k = eye(4, device=pinholes.device, dtype=pinholes.dtype) + eps\n    k = k.view(1, 4, 4).repeat(pinholes.shape[0], 1, 1)\n    k[..., 0, 0:1] = fx\n    k[..., 0, 2:3] = cx\n    k[..., 1, 1:2] = fy\n    k[..., 1, 2:3] = cy\n    return k",
    "docstring": "Return the pinhole matrix from a pinhole model. .. note:: This method is going to be deprecated in version 0.2 in favour of :attr:. Args: pinholes: tensor of pinhole models. eps: epsilon for numerical stability. Returns: tensor of pinhole matrices. Shape: - Input: :math: - Output: :math: Example: >>> rng = torch.manual_seed(0) >>> pinhole = torch.rand(1, 12) # Nx12 >>> pinhole_matrix(pinhole) # Nx4x4 tensor([[[4.9626e-01, 1.0000e-06, 8.8477e-02, 1.0000e-06], [1.0000e-06, 7.6822e-01, 1.3203e-01, 1.0000e-06], [1.0000e-06, 1.0000e-06, 1.0000e+00, 1.0000e-06], [1.0000e-06, 1.0000e-06, 1.0000e-06, 1.0000e+00]]])",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\camera\\pinhole.py",
    "ast_data": "FunctionDef name:pinhole_matrix arg:pinholes arg:eps arguments arg arg If BoolOp Compare Call Compare Raise Call Assign Call Assign Call Assign Call Call Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "constant_to_device",
    "source_code": "def constant_to_device(self, device: torch.device) -> IRNode:\n    return self.data.constant_to_device(device)",
    "docstring": "Move this to a given device. Requires that all reads are to constants.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ir.py",
    "ast_data": "FunctionDef name:constant_to_device arg:self arg:device arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_prune_redundant_deps",
    "source_code": "def _prune_redundant_deps(node: BaseSchedulerNode, name_to_fused_node: dict[str, BaseSchedulerNode], name_to_buf: dict[str, SchedulerBuffer]) -> None:\n    name_to_dep_count: Counter[str] = collections.Counter()\n    for dep in node.unmet_dependencies:\n        if not isinstance(dep, WeakDep):\n            op_name = name_to_buf[dep.name].defining_op_name()\n            name_to_dep_count[name_to_fused_node[op_name].get_name()] += 1\n\n    def should_prune(dep: Dep) -> bool:\n        if isinstance(dep, WeakDep):\n            op_name = name_to_buf[dep.name].defining_op_name()\n            is_redundant = name_to_dep_count[name_to_fused_node[op_name].get_name()] > 0\n            is_self_dep = name_to_fused_node[op_name] == node\n            return is_redundant or is_self_dep\n        else:\n            return False\n    deps_to_prune = OrderedSet((dep for dep in node.unmet_dependencies if should_prune(dep)))\n    if deps_to_prune:\n        node.unmet_dependencies = node.unmet_dependencies - deps_to_prune\n        node.set_read_writes(node.read_writes.remove_reads(deps_to_prune))",
    "docstring": "Prunes weakdeps intended for mutation ordering on an upstream fused node if after fusion there is another dependency on the fused upstream node, making the weakdep redundant In essence this enforces an ordering on fusions. As fusions occur, weakdeps will be incrementally removed, enabling other fusions, ensuring they are fused in order.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:_prune_redundant_deps arg:node arg:name_to_fused_node arg:name_to_buf arguments arg arg arg Call For If Call Assign Call Call FunctionDef name:should_prune arg:dep arguments arg If Call Assign Call Assign Compare Call Assign Compare Return return:yes BoolOp Return return:yes Assign Call Call If Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "ProcessUnusedLoopExits",
    "source_code": "def ProcessUnusedLoopExits(self, pending_count, to_ops_set):\n    loop_exits = []\n    for grad_state in self._map.values():\n        for y in grad_state.forward_loop_exits:\n            if pending_count[y.op] == 0:\n                grad_state.pending_exits_count -= 1\n                if y.op not in to_ops_set:\n                    grad_state.unused_exits.append(y)\n                if grad_state.pending_exits_count == 0:\n                    loop_exits.extend(grad_state.unused_exits)\n        for y in grad_state.forward_context.loop_enters:\n            if pending_count[y.op] == 0:\n                pending_count[y.op] = 1\n    return loop_exits",
    "docstring": "Process all the \"unused\" loop exits. The \"unused\" exits of the loops are added to . An exit is unused if its pending_count is 0. If there is an exit with real gradient, all these deferred exits will enter the backprop loop with zero gradient. Otherwise, they will enter the backprop loop with None. As an example, people often write: The exit node for x2 is not included by the betweenness analysis. But we need to backprop x2 if x2 is involved in computing v1. Args: pending_count: The number of backprop inputs for every op. to_ops_set: The set of ops for ys in gradients(ys, xs) Returns: The set of unused loop exits that we know at this point we need to backprop.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_state.py",
    "ast_data": "FunctionDef name:ProcessUnusedLoopExits arg:self arg:pending_count arg:to_ops_set arguments arg arg arg Assign For Call For If Compare If Compare Call If Compare Call For If Compare Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "trunc_to_int",
    "source_code": "def trunc_to_int(self, x: T, dtype: torch.dtype) -> T:\n    raise NotImplementedError",
    "docstring": "Convert x to dtype with truncation semantics (similar to how the int constructor works in Python). In Inductor codegen, this just decays to trunc and then to_dtype, but this composite operation helps roundtrips for Sympy evaluation. dtype is taken as an explicit parameter because the desired output dtype is typically the index dtype, which may vary between int32 and int64 depending on if we've shown that all the indexing operations can be done in int32.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ops_handler.py",
    "ast_data": "FunctionDef name:trunc_to_int arg:self arg:x arg:dtype arguments arg arg arg Raise"
  },
  {
    "library": "pytorch",
    "name": "ShapeComputeModule",
    "source_code": "class ShapeComputeModule(torch.nn.Module):\n    pass",
    "docstring": "Code-gen-ed module for tensor shape computation. module.prepare will mutate ser_model according to the computed operand shapes, based on the shapes of args. Returns a list of output templates.",
    "type": "class",
    "file_path": "pytorch\\torch\\backends\\_nnapi\\prepare.py",
    "ast_data": "ClassDef name:ShapeComputeModule"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, callable, name=None):\n    Tool.__init__(self, None, callable, name)",
    "docstring": "Initialize an error tool.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cptools.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:callable arg:name arguments arg arg arg Call"
  },
  {
    "library": "scikit-learn",
    "name": "_samme_proba",
    "source_code": "def _samme_proba(estimator, n_classes, X):\n    proba = estimator.predict_proba(X)\n    np.clip(proba, np.finfo(proba.dtype).eps, None, out=proba)\n    log_proba = np.log(proba)\n    return (n_classes - 1) * (log_proba - 1.0 / n_classes * log_proba.sum(axis=1)[:, np.newaxis])",
    "docstring": "Calculate algorithm 4, step 2, equation c) of Zhu et al [1]. References ---------- .. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, \"Multi-class AdaBoost\", 2009.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_weight_boosting.py",
    "ast_data": "FunctionDef name:_samme_proba arg:estimator arg:n_classes arg:X arguments arg arg arg Assign Call Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "password_change",
    "source_code": "def password_change(self, request, extra_context=None):\n    from django.contrib.admin.forms import AdminPasswordChangeForm\n    from django.contrib.auth.views import PasswordChangeView\n    url = reverse('admin:password_change_done', current_app=self.name)\n    defaults = {'form_class': self.password_change_form or AdminPasswordChangeForm, 'success_url': url, 'extra_context': {**self.each_context(request), **(extra_context or {})}}\n    if self.password_change_template is not None:\n        defaults['template_name'] = self.password_change_template\n    request.current_app = self.name\n    return PasswordChangeView.as_view(**defaults)(request)",
    "docstring": "Handle the \"change password\" task -- both form display and validation.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\sites.py",
    "ast_data": "FunctionDef name:password_change arg:self arg:request arg:extra_context arguments arg arg arg Assign Call Assign BoolOp Call BoolOp If Compare Assign Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "concatenate_unique",
    "source_code": "def concatenate_unique(la, lb):\n    la_set = set(la)\n    for l in lb:\n        if l not in la_set:\n            la.append(l)\n            la_set.add(l)\n    return la",
    "docstring": "Add all the elements of to if they are not there already. The elements added to maintain ordering with respect to . Args: la: List of Python objects. lb: List of Python objects. Returns: : The list with missing elements from .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\op_selector.py",
    "ast_data": "FunctionDef name:concatenate_unique arg:la arg:lb arguments arg arg Assign Call For If Compare Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "staticfiles_urlpatterns",
    "source_code": "def staticfiles_urlpatterns(prefix=None):\n    if prefix is None:\n        prefix = settings.STATIC_URL\n    return static(prefix, view=serve)",
    "docstring": "Helper function to return a URL pattern for serving static files.",
    "type": "function",
    "file_path": "django\\django\\contrib\\staticfiles\\urls.py",
    "ast_data": "FunctionDef name:staticfiles_urlpatterns arg:prefix arguments arg If Compare Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_print_tree",
    "source_code": "def _print_tree(self):\n    self._cpp_trifinder.print_tree()",
    "docstring": "Print a text representation of the node tree, which is useful for debugging purposes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_trifinder.py",
    "ast_data": "FunctionDef name:_print_tree arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_from_fields_and_rank",
    "source_code": "@classmethod\ndef _from_fields_and_rank(cls, fields, rank):\n    shape = None\n    for k, v in fields.items():\n        field_shape_untruncated = _dynamic_ragged_shape_spec_from_spec(v)\n        if field_shape_untruncated is None:\n            raise ValueError(f'Cannot convert spec of {k}.')\n        untruncated_rank = field_shape_untruncated.rank\n        if untruncated_rank is not None and untruncated_rank < rank:\n            raise ValueError(f'Rank of field {k} is {untruncated_rank}, but must be at least {rank}.')\n        field_shape = field_shape_untruncated._truncate(rank)\n        if shape is None:\n            shape = field_shape\n        else:\n            shape = shape._merge_with(field_shape)\n    return StructuredTensor.Spec(_ragged_shape=shape, _fields=fields)",
    "docstring": "Creates a spec of a StructuredTensor with fields and rank.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor.py",
    "ast_data": "FunctionDef name:_from_fields_and_rank arg:cls arg:fields arg:rank arguments arg arg arg Assign For Call Assign Call If Compare Raise Call Assign If BoolOp Compare Compare Raise Call Assign Call If Compare Assign Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "placeholder_value",
    "source_code": "@doc_controls.do_not_doc_inheritable\ndef placeholder_value(self, placeholder_context):\n    if placeholder_context.unnest_only:\n        return self\n    component_placeholders = nest.map_structure(lambda x: x.placeholder_value(placeholder_context), self._component_specs)\n    return self._from_components(component_placeholders)",
    "docstring": "Value used for tracing a function signature with this TraceType. WARNING: Do not override. Args: placeholder_context: A class container for context information when creating a placeholder value. Returns: A placeholder whose components are recursively composed of placeholders themselves.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py",
    "ast_data": "FunctionDef name:placeholder_value arg:self arg:placeholder_context arguments arg arg If Return return:yes Assign Call arguments arg Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "compute",
    "source_code": "def compute(i, tas):\n    elems_value_batchable = [ta.read(i) for ta in elems_batchable_ta]\n    elems_value_flat = _elems_value_batchable_to_flat(elems_value_batchable, elems_flat_signature)\n    elems_value = elems_unflatten(elems_value_flat)\n    ag_ctx = autograph_ctx.control_status_ctx()\n    autographed_fn = autograph.tf_convert(fn, ag_ctx)\n    result_value = autographed_fn(elems_value)\n    nest.assert_same_structure(fn_output_signature or elems, result_value)\n    result_value_flat = nest.flatten(result_value)\n    result_value_batchable = _result_value_flat_to_batchable(result_value_flat, result_flat_signature)\n    tas = [ta.write(i, value) for ta, value in zip(tas, result_value_batchable)]\n    return (i + 1, tas)",
    "docstring": "The loop body of map_fn. Args: i: the loop counter tas: the flat TensorArray accumulator list Returns: (i + 1, tas): the updated counter + updated TensorArrays Raises: TypeError: if fn_output_signature and result_value structure don't match ValueType: if fn_output_signature and result_value lengths don't match",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\map_fn.py",
    "ast_data": "FunctionDef name:compute arg:i arg:tas arguments arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call BoolOp Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "add_to_collections",
    "source_code": "def add_to_collections(self, names, value) -> None:\n    names = (names,) if isinstance(names, str) else set(names)\n    for name in names:\n        self.add_to_collection(name, value)",
    "docstring": "Stores in the collections given by . Note that collections are not sets, so it is possible to add a value to a collection several times. This function makes sure that duplicates in are ignored, but it will not check for pre-existing membership of in any of the collections in . can be any iterable, but if is a string, it is treated as a single collection name. Args: names: The keys for the collections to add to. The class contains many standard names for collections. value: The value to add to the collections.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:add_to_collections arg:self arg:names arg:value arguments arg arg arg Assign Call Call For Call"
  },
  {
    "library": "pytorch",
    "name": "masked",
    "source_code": "def masked(self, mask: T, body: Callable[[], T], other: T) -> T:\n    raise NotImplementedError",
    "docstring": "Computes body, but only perform loads/stores if the boolean mask evaluates to true. For example, you would use this if you needed to perform an indirect load that may not be valid on some elements; without masking, invalid accesses can cause IMAs. When mask is true, the result is the result of body; otherwise it is other. Here, needs to be a constant. Contrast this with ops.where, which can multiplex between two values that have been unconditionally computed.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ops_handler.py",
    "ast_data": "FunctionDef name:masked arg:self arg:mask arg:body arg:other arguments arg arg arg arg Raise"
  },
  {
    "library": "scipy",
    "name": "make",
    "source_code": "def make(self, src_templ, evaldict=None, addsource=False, **attrs):\n    src = src_templ % vars(self)\n    evaldict = evaldict or {}\n    mo = DEF.match(src)\n    if mo is None:\n        raise SyntaxError(f'not a valid function template\\n{src}')\n    name = mo.group(1)\n    names = set([name] + [arg.strip(' *') for arg in self.shortsignature.split(',')])\n    for n in names:\n        if n in ('_func_', '_call_'):\n            raise NameError(f'{n} is overridden in\\n{src}')\n    if not src.endswith('\\n'):\n        src += '\\n'\n    filename = f'<decorator-gen-{next(self._compile_count)}>'\n    try:\n        code = compile(src, filename, 'single')\n        exec(code, evaldict)\n    except:\n        print('Error in generated code:', file=sys.stderr)\n        print(src, file=sys.stderr)\n        raise\n    func = evaldict[name]\n    if addsource:\n        attrs['__source__'] = src\n    self.update(func, **attrs)\n    return func",
    "docstring": "Make a new function from a given template and update the signature",
    "type": "method",
    "file_path": "scipy\\scipy\\_lib\\decorator.py",
    "ast_data": "FunctionDef name:make arg:self arg:src_templ arg:evaldict arg:addsource arguments arg arg arg arg arg Assign Call Assign BoolOp Assign Call If Compare Raise Call Assign Call Assign Call Call Call For If Compare Raise Call If Call Assign Call Try Assign Call Call ExceptHandler Call Call Raise Assign If Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_analyze_tab_complete_input",
    "source_code": "def _analyze_tab_complete_input(self, text):\n    text = text.lstrip()\n    if not text:\n        context = ''\n        prefix = ''\n        except_last_word = ''\n    else:\n        items = text.split(' ')\n        if len(items) == 1:\n            context = ''\n            prefix = items[0]\n            except_last_word = ''\n        else:\n            context = items[0]\n            prefix = items[-1]\n            except_last_word = ' '.join(items[:-1]) + ' '\n    return (context, prefix, except_last_word)",
    "docstring": "Analyze raw input to tab-completer. Args: text: (str) the full, raw input text to be tab-completed. Returns: context: (str) the context str. For example, If text == \"print_tensor softmax\", returns \"print_tensor\". If text == \"print\", returns \"\". If text == \"\", returns \"\". prefix: (str) the prefix to be tab-completed, from the last word. For example, if text == \"print_tensor softmax\", returns \"softmax\". If text == \"print\", returns \"print\". If text == \"\", returns \"\". except_last_word: (str) the input text, except the last word. For example, if text == \"print_tensor softmax\", returns \"print_tensor\". If text == \"print_tensor -a softmax\", returns \"print_tensor -a\". If text == \"print\", returns \"\". If text == \"\", returns \"\".",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\base_ui.py",
    "ast_data": "FunctionDef name:_analyze_tab_complete_input arg:self arg:text arguments arg arg Assign Call If Assign Assign Assign Assign Call If Compare Call Assign Assign Assign Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "@compatibility(is_backward_compatible=True)\ndef __init__(self, autowrap_modules: tuple[ModuleType]=(math,), autowrap_functions: tuple[Callable, ...]=(), param_shapes_constant: bool=False) -> None:\n    super().__init__()\n    self._autowrap_function_ids: set[int] = {id(value) for name, value in chain.from_iterable((m.__dict__.items() for m in autowrap_modules)) if not name.startswith('_') and callable(value)}\n    self._autowrap_function_ids.update({id(f) for f in autowrap_functions})\n    self._autowrap_search: list[ModuleType] = list(autowrap_modules)\n    self.param_shapes_constant = param_shapes_constant\n    self.submodule_paths: Optional[dict[torch.nn.Module, str]] = None\n    self.root_module_name: str = ''\n    self.scope = Scope('', None)\n    self.module_stack = collections.OrderedDict()\n    self.num_calls: dict[str, int] = {}\n    self.node_name_to_scope: dict[str, tuple[str, type]] = {}",
    "docstring": "Construct a Tracer object. Args: autowrap_modules (Tuple[ModuleType]): defaults to , Python modules whose functions should be wrapped automatically without needing to use fx.wrap(). Backward-compatibility for this parameter is guaranteed. autowrap_functions (Tuple[Callable, ...]): defaults to , Python functions that should be wrapped automatically without needing to use fx.wrap(). Backward compatibility for this parameter is guaranteed. param_shapes_constant (bool): When this flag is set, calls to shape, size and a few other shape like attributes of a module's parameter will be evaluated directly, rather than returning a new Proxy value for an attribute access. Backward compatibility for this parameter is guaranteed.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\_symbolic_trace.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:autowrap_modules arg:autowrap_functions arg:param_shapes_constant arguments arg arg arg arg Call Call Call Call Call BoolOp Call Call Call Call Call Assign Assign Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "scatter_nd_update",
    "source_code": "def scatter_nd_update(self, indices, updates, name=None):\n    return self._lazy_read(gen_state_ops.resource_scatter_nd_update(self.handle, indices, ops.convert_to_tensor(updates, self.dtype), name=name))",
    "docstring": "Applies sparse assignment to individual values or slices in a Variable. is a with rank and is a of rank . must be integer tensor, containing indices into . It must be shape where . The innermost dimension of (with length ) corresponds to indices into elements (if ) or slices (if ) along the th dimension of . is of rank with shape: For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 elements. In Python, that update would look like this: The resulting update to ref would look like this: [1, 11, 3, 10, 9, 6, 7, 12] See for more details about how to make updates to slices. Args: indices: The indices to be used in the operation. updates: The values to be used in the operation. name: the name of the operation. Returns: The updated variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:scatter_nd_update arg:self arg:indices arg:updates arg:name arguments arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "reverse",
    "source_code": "@dispatch.dispatch_for_api(array_ops.reverse)\ndef reverse(tensor: ragged_tensor.Ragged, axis, name=None):\n    type_error_msg = '`axis` must be a list of int or a constant tensorwhen reversing axes in a ragged tensor'\n    with ops.name_scope(name, 'Reverse', [tensor, axis]):\n        if isinstance(axis, tensor_lib.Tensor):\n            axis = tensor_util.constant_value(axis)\n            if axis is None:\n                raise TypeError(type_error_msg)\n        elif not (isinstance(axis, (list, tuple)) and all((isinstance(dim, int) for dim in axis))):\n            raise TypeError(type_error_msg)\n        tensor = ragged_tensor.convert_to_tensor_or_ragged_tensor(tensor, name='tensor')\n        axis = [array_ops.get_positive_axis(dim, tensor.shape.rank, 'axis[%d]' % i, 'rank(tensor)') for i, dim in enumerate(axis)]\n        slices = [slice(None)] * (max(axis) + 1 if axis else 0)\n        for dim in axis:\n            slices[dim] = slice(None, None, -1)\n        return tensor[tuple(slices)]",
    "docstring": "Reverses a RaggedTensor along the specified axes. #### Example: >>> data = tf.ragged.constant([ ... [[1, 2], [3, 4]], [[5, 6]], [[7, 8], [9, 10], [11, 12]]]) >>> tf.reverse(data, axis=[0, 2]) Args: tensor: A 'RaggedTensor' to reverse. axis: A list or tuple of 'int' or a constant 1D 'tf.Tensor'. The indices of the axes to reverse. name: A name prefix for the returned tensor (optional). Returns: A 'RaggedTensor'.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_array_ops.py",
    "ast_data": "FunctionDef name:reverse arg:tensor arg:axis arg:name arguments arg arg arg Assign With Call If Call Assign Call If Compare Raise Call If BoolOp Call Call Call Raise Call Assign Call Assign Call Call Assign Call Call For Assign Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_initialize_memory_usage",
    "source_code": "def _initialize_memory_usage(memory_usage: bool | str | None=None) -> bool | str:\n    if memory_usage is None:\n        memory_usage = get_option('display.memory_usage')\n    return memory_usage",
    "docstring": "Get memory usage based on inputs and display options.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:_initialize_memory_usage arg:memory_usage arguments arg If Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_XLogyGrad",
    "source_code": "@ops.RegisterGradient('Xlogy')\ndef _XLogyGrad(op: ops.Operation, grad):\n    x = op.inputs[0]\n    y = op.inputs[1]\n    sx = array_ops.shape(x)\n    sy = array_ops.shape(y)\n    rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)\n    with ops.control_dependencies([grad]):\n        not_zero_x = math_ops.cast(math_ops.not_equal(x, math_ops.cast(0.0, dtype=x.dtype)), dtype=x.dtype)\n        partial_x = gen_math_ops.xlogy(not_zero_x, y)\n        partial_y = gen_math_ops.xdivy(x, y)\n        return (array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx), array_ops.reshape(math_ops.reduce_sum(partial_y * grad, ry), sy))",
    "docstring": "Returns gradient of xlogy(x, y) with respect to x and y.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_XLogyGrad arg:op arg:grad arguments arg arg Assign Assign Assign Call Assign Call Assign Call With Call Assign Call Call Call Assign Call Assign Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_hdsd_1D",
    "source_code": "def _hdsd_1D(data, prob):\n    xsorted = np.sort(data.compressed())\n    n = len(xsorted)\n    hdsd = np.empty(len(prob), float64)\n    if n < 2:\n        hdsd.flat = np.nan\n    vv = np.arange(n) / float(n - 1)\n    betacdf = beta.cdf\n    for i, p in enumerate(prob):\n        _w = betacdf(vv, n * p, n * (1 - p))\n        w = _w[1:] - _w[:-1]\n        mx_ = np.zeros_like(xsorted)\n        mx_[1:] = np.cumsum(w * xsorted[:-1])\n        mx_[:-1] += np.cumsum(w[::-1] * xsorted[:0:-1])[::-1]\n        hdsd[i] = np.sqrt(mx_.var() * (n - 1))\n    return hdsd",
    "docstring": "Computes the std error for 1D arrays.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mstats_extras.py",
    "ast_data": "FunctionDef name:_hdsd_1D arg:data arg:prob arguments arg arg Assign Call Call Assign Call Assign Call Call If Compare Assign Assign Call Call Assign For Call Assign Call Assign Assign Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "density",
    "source_code": "def density(w):\n    if hasattr(w, 'toarray'):\n        d = float(w.nnz) / (w.shape[0] * w.shape[1])\n    else:\n        d = 0 if w is None else float((w != 0).sum()) / w.size\n    return d",
    "docstring": "Compute density of a sparse vector. Parameters ---------- w : {ndarray, sparse matrix} The input data can be numpy ndarray or a sparse matrix. Returns ------- float The density of w, between 0 and 1. Examples -------- >>> from scipy import sparse >>> from sklearn.utils.extmath import density >>> X = sparse.random(10, 10, density=0.25, random_state=0) >>> density(X) 0.25",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\extmath.py",
    "ast_data": "FunctionDef name:density arg:w arguments arg If Call Assign Call Assign Compare Call Call Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_process_cond_unstacked",
    "source_code": "def _process_cond_unstacked(self, conditions, indices, inputs, output_tas):\n    not_all_done = array_ops.reshape(conditions, [])\n    return (not_all_done, indices, inputs, output_tas)",
    "docstring": "Handles case when condition is pfor loop invariant.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "FunctionDef name:_process_cond_unstacked arg:self arg:conditions arg:indices arg:inputs arg:output_tas arguments arg arg arg arg arg Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "dynamic",
    "source_code": "@property\ndef dynamic(self):\n    return isinstance(self._loss_scale, _DynamicLossScaleState)",
    "docstring": "Bool indicating whether dynamic loss scaling is used.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\loss_scale_optimizer.py",
    "ast_data": "FunctionDef name:dynamic arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_GenerateArgsSpec",
    "source_code": "def _GenerateArgsSpec(doc):\n    args_spec = []\n    doc = re.search('\\\\(.*\\\\)', doc)\n    if not doc:\n        return None\n    doc = doc.group().strip('(').strip(')')\n    doc_split = doc.split(',')\n    for s in doc_split:\n        arg = re.search('\\\\w+', s)\n        if not arg:\n            return None\n        args_spec.append(f\"'{arg.group()}'\")\n    return ', '.join(args_spec)",
    "docstring": "Generate args spec from a method docstring.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\api\\lib\\python_object_to_proto_visitor.py",
    "ast_data": "FunctionDef name:_GenerateArgsSpec arg:doc arguments arg Assign Assign Call If Return return:no Assign Call Call Call Assign Call For Assign Call If Return return:no Call Call Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "validate_requested_scope",
    "source_code": "def validate_requested_scope(self, scope):\n    if scope and self.scopes_supported:\n        scopes = set(scope_to_list(scope))\n        if not set(self.scopes_supported).issuperset(scopes):\n            raise InvalidScopeError()",
    "docstring": "Validate if requested scope is supported by Authorization Server. Developers CAN re-write this method to meet your needs.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\authorization_server.py",
    "ast_data": "FunctionDef name:validate_requested_scope arg:self arg:scope arguments arg arg If BoolOp Assign Call Call If Call Call Raise Call"
  },
  {
    "library": "seaborn",
    "name": "fit_poly",
    "source_code": "def fit_poly(self, grid, order):\n\n    def reg_func(_x, _y):\n        return np.polyval(np.polyfit(_x, _y, order), grid)\n    x, y = (self.x, self.y)\n    yhat = reg_func(x, y)\n    if self.ci is None:\n        return (yhat, None)\n    yhat_boots = algo.bootstrap(x, y, func=reg_func, n_boot=self.n_boot, units=self.units, seed=self.seed)\n    return (yhat, yhat_boots)",
    "docstring": "Regression using numpy polyfit for higher-order trends.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\regression.py",
    "ast_data": "FunctionDef name:fit_poly arg:self arg:grid arg:order arguments arg arg arg FunctionDef name:reg_func arg:_x arg:_y arguments arg arg Return return:yes Call Call Assign Assign Call If Compare Return return:yes Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "extract_weights",
    "source_code": "def extract_weights(mod: nn.Module) -> tuple[tuple[Tensor, ...], tuple[str, ...], dict[str, list[str]]]:\n    return _extract_members(mod, mod.named_parameters, nn.Parameter)",
    "docstring": "This function removes all the Parameters from the model and return them as a tuple as well as their original attribute names. The weights must be re-loaded with before the model can be used again. Note that this function modifies the model in place and after this call, mod.parameters() will be empty.",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\make_functional.py",
    "ast_data": "FunctionDef name:extract_weights arg:mod arguments arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "Isin",
    "source_code": "class Isin(Benchmark):\n    param_names = ['size', 'highest_element']\n    params = [[10, 100000, 3000000], [10, 10000, int(100000000.0)]]\n\n    def setup(self, size, highest_element):\n        self.array = np.random.randint(low=0, high=highest_element, size=size)\n        self.in_array = np.random.randint(low=0, high=highest_element, size=size)\n\n    def time_isin(self, size, highest_element):\n        np.isin(self.array, self.in_array)",
    "docstring": "Benchmarks for .",
    "type": "class",
    "file_path": "numpy\\benchmarks\\benchmarks\\bench_lib.py",
    "ast_data": "ClassDef name:Isin Assign Assign Call FunctionDef name:setup arg:self arg:size arg:highest_element arguments arg arg arg Assign Call Assign Call FunctionDef name:time_isin arg:self arg:size arg:highest_element arguments arg arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_tensors_for_gradient",
    "source_code": "def _get_tensors_for_gradient(x):\n    if not isinstance(x, composite_tensor.CompositeTensor):\n        return x\n    if not isinstance(x, CompositeTensorGradientProtocol):\n        raise ValueError(f'Type {type(x).__name__} is not supported as a gradient source or gradient target.')\n    composite_gradient = x.__composite_gradient__\n    gradient_components = composite_gradient.get_gradient_components(x)\n    if gradient_components is x:\n        return x\n    return nest.map_structure(_get_tensors_for_gradient, gradient_components)",
    "docstring": "Returns the Tensors in that should be differentiated. Args: x: A or . Returns: A or a nested structure of .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\composite_tensor_gradient.py",
    "ast_data": "FunctionDef name:_get_tensors_for_gradient arg:x arguments arg If Call Return return:yes If Call Raise Call Call Assign Assign Call If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "value_tensors",
    "source_code": "def value_tensors(self, shape_and_slices: Optional[str]=None) -> Mapping[str, tensor.Tensor]:\n    value_tensors = {}\n    for serialized_tensor in self.object_proto.attributes:\n        checkpoint_key = serialized_tensor.checkpoint_key\n        io_device = self._checkpoint.options.experimental_io_device or 'cpu:0'\n        with ops.init_scope():\n            with ops.device(io_device):\n                if shape_and_slices is not None and serialized_tensor.name in shape_and_slices:\n                    shape_and_slice = shape_and_slices[serialized_tensor.name]\n                else:\n                    shape_and_slice = ''\n                checkpoint_keys, full_shape_and_slices = self.callback.update_restore_inputs(checkpoint_key, shape_and_slice)\n                dtypes = []\n                for key in checkpoint_keys:\n                    dtype = self._checkpoint.dtype_map[key]\n                    dtypes.append(dtype.base_dtype)\n                restored_values = io_ops.restore_v2(prefix=self._checkpoint.save_path_tensor, tensor_names=checkpoint_keys, shape_and_slices=full_shape_and_slices, dtypes=dtypes, name='%s_checkpoint_read' % (serialized_tensor.name,))\n                value = self.callback.reshard(restored_values, shape_and_slice)\n            value_tensors[serialized_tensor.name] = array_ops.identity(value)\n    return value_tensors",
    "docstring": "Create value s for this object's attributes. Does not require that the Python object has been created. Used for restore-on-create when executing eagerly. Args: shape_and_slices: A dict mapping from object attribute names to a shape and slice string that will be passed to a RestoreV2 op. If the dict is None or if an object attribute is not in the dict, the full tensor will be restored. Returns: A dictionary mapping from object attribute names to s.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\restore.py",
    "ast_data": "FunctionDef name:value_tensors arg:self arg:shape_and_slices arguments arg arg Assign For Assign Assign BoolOp With Call With Call If BoolOp Compare Compare Assign Assign Assign Call Assign For Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "from_keras_model_file",
    "source_code": "@classmethod\n@_deprecation.deprecated(None, 'Use `lite.TFLiteConverter.from_keras_model_file` instead.')\ndef from_keras_model_file(cls, model_file, input_arrays=None, input_shapes=None, output_arrays=None):\n    return TFLiteConverter.from_keras_model_file(model_file, input_arrays, input_shapes, output_arrays)",
    "docstring": "Creates a TocoConverter class from a tf.keras model file.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "FunctionDef name:from_keras_model_file arg:cls arg:model_file arg:input_arrays arg:input_shapes arg:output_arrays arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "is_torchdynamo_compiling",
    "source_code": "def is_torchdynamo_compiling():\n    return False",
    "docstring": "Can't import torchdynamo in torchdeploy builds currently.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_functional_collectives.py",
    "ast_data": "FunctionDef name:is_torchdynamo_compiling arguments Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, object_local_name: str, checkpoint_local_names: Sequence[str], to_shard_layout: Optional[Sequence[sparse_core_layout_pb2.SparseCoreTableLayout]]=None, to_unshard_layout: Optional[Sequence[sparse_core_layout_pb2.SparseCoreTableLayout]]=None):\n    self._object_local_name = object_local_name\n    self._checkpoint_local_names = checkpoint_local_names\n    self._to_shard_layout = to_shard_layout\n    self._to_unshard_layout = to_unshard_layout\n    self._main_checkpoint_name = checkpoint_local_names[0]",
    "docstring": "Initializes Reshard callback. Args: object_local_name: The local name of the object being restored. checkpoint_local_names: The local names of the checkpoint positions that need to be read. to_shard_layout: (Optional) Target layouts as specified in the embedding being restored. to_unshard_layout: (Optional) Layouts as stored in checkpoint being restored from.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3_checkpoint_adapter.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:object_local_name arg:checkpoint_local_names arg:to_shard_layout arg:to_unshard_layout arguments arg arg arg arg arg Assign Assign Assign Assign Assign"
  },
  {
    "library": "scipy",
    "name": "all_of_type",
    "source_code": "def all_of_type(arg_type):\n\n    def outer(func):\n\n        @functools.wraps(func)\n        def inner(*args, **kwargs):\n            extracted_args = func(*args, **kwargs)\n            return tuple((Dispatchable(arg, arg_type) if not isinstance(arg, Dispatchable) else arg for arg in extracted_args))\n        return inner\n    return outer",
    "docstring": "Marks all unmarked arguments as a given type. Examples -------- >>> @all_of_type(str) ... def f(a, b): ... return a, Dispatchable(b, int) >>> f('a', 1) (, value='a'>, , value=1>)",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_uarray\\_backend.py",
    "ast_data": "FunctionDef name:all_of_type arg:arg_type arguments arg FunctionDef name:outer arg:func arguments arg FunctionDef name:inner arguments arg arg Assign Call Return return:yes Call Call Call Call Return return:yes Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "Toolbox",
    "source_code": "class Toolbox(object):\n\n    def __init__(self, namespace):\n        self.namespace = namespace\n\n    def __setattr__(self, name, value):\n        if isinstance(value, Tool):\n            if value._name is None:\n                value._name = name\n            value.namespace = self.namespace\n        object.__setattr__(self, name, value)\n\n    def __enter__(self):\n        cherrypy.serving.request.toolmaps[self.namespace] = map = {}\n\n        def populate(k, v):\n            toolname, arg = k.split('.', 1)\n            bucket = map.setdefault(toolname, {})\n            bucket[arg] = v\n        return populate\n\n    def __exit__(self, exc_type, exc_val, exc_tb):\n        map = cherrypy.serving.request.toolmaps.get(self.namespace)\n        if map:\n            for name, settings in map.items():\n                if settings.get('on', False):\n                    tool = getattr(self, name)\n                    tool._setup()\n\n    def register(self, point, **kwargs):\n\n        def decorator(func):\n            attr_name = kwargs.get('name', func.__name__)\n            tool = Tool(point, func, **kwargs)\n            setattr(self, attr_name, tool)\n            return func\n        return decorator",
    "docstring": "A collection of Tools. This object also functions as a config namespace handler for itself. Custom toolboxes should be added to each Application's toolboxes dict.",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\_cptools.py",
    "ast_data": "ClassDef name:Toolbox FunctionDef name:__init__ arg:self arg:namespace arguments arg arg Assign FunctionDef name:__setattr__ arg:self arg:name arg:value arguments arg arg arg If Call If Compare Assign Assign Call FunctionDef name:__enter__ arg:self arguments arg Assign FunctionDef name:populate arg:k arg:v arguments arg arg Assign Call Assign Call Assign Return return:yes FunctionDef name:__exit__ arg:self arg:exc_type arg:exc_val arg:exc_tb arguments arg arg arg arg Assign Call If For Call If Call Assign Call Call FunctionDef name:register arg:self arg:point arguments arg arg arg FunctionDef name:decorator arg:func arguments arg Assign Call Assign Call Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "checkpoint_id",
    "source_code": "@property\ndef checkpoint_id(self) -> Union[str, os.PathLike]:\n    return self.path",
    "docstring": "return the checkpoint_id that will be used to save the checkpoint.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\filesystem.py",
    "ast_data": "FunctionDef name:checkpoint_id arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_get_reconciled_name_object",
    "source_code": "def _get_reconciled_name_object(self, other) -> MultiIndex:\n    names = self._maybe_match_names(other)\n    if self.names != names:\n        return self.rename(names)\n    return self",
    "docstring": "If the result of a set operation will be self, return self, unless the names change, in which case make a shallow copy of self.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "FunctionDef name:_get_reconciled_name_object arg:self arg:other arguments arg arg Assign Call If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "validate_token",
    "source_code": "def validate_token(self, token, scopes, request):\n    raise NotImplementedError()",
    "docstring": "A method to validate if the authorized token is valid, if it has the permission on the given scopes. Developers MUST re-implement this method. e.g, check if token is expired, revoked:: def validate_token(self, token, scopes, request): if not token: raise InvalidTokenError() if token.is_expired() or token.is_revoked(): raise InvalidTokenError() if not match_token_scopes(token, scopes): raise InsufficientScopeError()",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\resource_protector.py",
    "ast_data": "FunctionDef name:validate_token arg:self arg:token arg:scopes arg:request arguments arg arg arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "visit",
    "source_code": "def visit(unused_path, unused_parent, children):\n    for child in children:\n        _, attr = tf_decorator.unwrap(child[1])\n        api_names_v1 = [name for name in tf_export.get_v1_names(attr) if '.__internal__.' not in name]\n        api_names_v2 = tf_export.get_v2_names(attr)\n        if not api_names_v2:\n            api_names_v2 = [name for name in api_names_v1 if name in all_v2_names]\n        deprecated_api_names = set(api_names_v1) - set(api_names_v2)\n        for name in deprecated_api_names:\n            renames.add((name, get_canonical_name(api_names_v2, name)))",
    "docstring": "Visitor that collects rename strings to add to rename_line_set.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\update\\generate_v2_renames_map.py",
    "ast_data": "FunctionDef name:visit arg:unused_path arg:unused_parent arg:children arguments arg arg arg For Assign Call Assign Call Compare Assign Call If Assign Compare Assign Call Call For Call Call"
  },
  {
    "library": "authlib",
    "name": "get_client_by_id",
    "source_code": "def get_client_by_id(self, client_id):\n    raise NotImplementedError()",
    "docstring": "Get client instance with the given ``. :param client_id: A string of client_id :return: Client instance",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth1\\rfc5849\\base_server.py",
    "ast_data": "FunctionDef name:get_client_by_id arg:self arg:client_id arguments arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, name: str, enable_df_api_tracing=False) -> None:\n    self.name = name\n    self.enable_df_api_tracing = enable_df_api_tracing",
    "docstring": "Define a functional datapipe. Args: enable_df_api_tracing - if set, any returned DataPipe would accept DataFrames API in tracing mode.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\data\\datapipes\\_decorator.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:name arg:enable_df_api_tracing arguments arg arg arg Assign Assign"
  },
  {
    "library": "cherrypy",
    "name": "_handle_signal",
    "source_code": "def _handle_signal(self, signum=None, frame=None):\n    signame = self.signals[signum]\n    self.bus.log('Caught signal %s.' % signame)\n    self.bus.publish(signame)",
    "docstring": "Python signal handler (self.set_handler subscribes it for you).",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\plugins.py",
    "ast_data": "FunctionDef name:_handle_signal arg:self arg:signum arg:frame arguments arg arg arg Assign Call Call"
  },
  {
    "library": "scipy",
    "name": "MemoizeDer",
    "source_code": "class MemoizeDer:\n\n    def __init__(self, fun):\n        self.fun = fun\n        self.vals = None\n        self.x = None\n        self.n_calls = 0\n\n    def __call__(self, x, *args):\n        if self.vals is None or x != self.x:\n            fg = self.fun(x, *args)\n            self.x = x\n            self.n_calls += 1\n            self.vals = fg[:]\n        return self.vals[0]\n\n    def fprime(self, x, *args):\n        if self.vals is None or x != self.x:\n            self(x, *args)\n        return self.vals[1]\n\n    def fprime2(self, x, *args):\n        if self.vals is None or x != self.x:\n            self(x, *args)\n        return self.vals[2]\n\n    def ncalls(self):\n        return self.n_calls",
    "docstring": "Decorator that caches the value and derivative(s) of function each time it is called. This is a simplistic memoizer that calls and caches a single value of `argsargsx` changes, and only rarely, if at all, does x assume the same value more than once.",
    "type": "class",
    "file_path": "scipy\\scipy\\optimize\\_root_scalar.py",
    "ast_data": "ClassDef name:MemoizeDer FunctionDef name:__init__ arg:self arg:fun arguments arg arg Assign Assign Assign Assign FunctionDef name:__call__ arg:self arg:x arguments arg arg arg If BoolOp Compare Compare Assign Call Assign Assign Return return:yes FunctionDef name:fprime arg:self arg:x arguments arg arg arg If BoolOp Compare Compare Call Return return:yes FunctionDef name:fprime2 arg:self arg:x arguments arg arg arg If BoolOp Compare Compare Call Return return:yes FunctionDef name:ncalls arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "predict_proba",
    "source_code": "def predict_proba(self, X, check_input=True):\n    check_is_fitted(self)\n    X = self._validate_X_predict(X, check_input)\n    proba = self.tree_.predict(X)\n    if self.n_outputs_ == 1:\n        return proba[:, :self.n_classes_]\n    else:\n        all_proba = []\n        for k in range(self.n_outputs_):\n            proba_k = proba[:, k, :self.n_classes_[k]]\n            all_proba.append(proba_k)\n        return all_proba",
    "docstring": "Predict class probabilities of the input samples X. The predicted class probability is the fraction of samples of the same class in a leaf. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to `classes_`.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\tree\\_classes.py",
    "ast_data": "FunctionDef name:predict_proba arg:self arg:X arg:check_input arguments arg arg arg Call Assign Call Assign Call If Compare Return return:yes Assign For Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_deregister_orig_params",
    "source_code": "def _deregister_orig_params(state: _FSDPState, module: nn.Module) -> None:\n    handle = _module_handle(state, module)\n    if not handle:\n        return\n    _p_assert(handle._use_orig_params, f'Inconsistent `_use_orig_params` -- FSDP: {state._use_orig_params} handle: {handle._use_orig_params}')\n    handle._deregister_orig_params()\n    _register_flat_param(state, module)",
    "docstring": "Deregisters the original parameters; registers the ``.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_unshard_param_utils.py",
    "ast_data": "FunctionDef name:_deregister_orig_params arg:state arg:module arguments arg arg Assign Call If Return return:no Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "schema",
    "source_code": "@staticmethod\ndef schema(obj, method) -> torch.FunctionSchema:\n    assert isinstance(obj, torch._inductor.ir.TorchBindObject)\n    val = obj.get_real_obj()\n    schema = val._get_method(method).schema\n    schema_str = str(schema)\n    new_schema_str = f'call_torchbind({str(schema.arguments[0].real_type)} {schema.arguments[0].name},'\n    first_comma_index = schema_str.find(',')\n    if first_comma_index == -1:\n        first_comma_index = schema_str.rfind(') ->')\n    new_schema_str = new_schema_str + ' str method' + schema_str[first_comma_index:]\n    new_schema = torch._C.parse_schema(new_schema_str)\n    return new_schema",
    "docstring": "Returns the schema of ``.",
    "type": "method",
    "file_path": "pytorch\\torch\\_higher_order_ops\\torchbind.py",
    "ast_data": "FunctionDef name:schema arg:obj arg:method arguments arg arg Call Assign Call Assign Call Assign Call Assign Call Assign Call If Compare Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "LookupInterface",
    "source_code": "class LookupInterface(resource.TrackableResource):\n\n    def __init__(self, key_dtype, value_dtype):\n        self._key_dtype = dtypes.as_dtype(key_dtype)\n        self._value_dtype = dtypes.as_dtype(value_dtype)\n        super(LookupInterface, self).__init__()\n\n    def _create_resource(self):\n        raise NotImplementedError\n\n    @property\n    def key_dtype(self):\n        return self._key_dtype\n\n    @property\n    def value_dtype(self):\n        return self._value_dtype\n\n    @property\n    def name(self):\n        return NotImplementedError\n\n    def size(self, name=None):\n        raise NotImplementedError\n\n    def lookup(self, keys, name=None):\n        raise NotImplementedError\n\n    def __getitem__(self, keys):\n        return self.lookup(keys)",
    "docstring": "Represent a lookup table that persists across different steps.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "ClassDef name:LookupInterface FunctionDef name:__init__ arg:self arg:key_dtype arg:value_dtype arguments arg arg arg Assign Call Assign Call Call Call FunctionDef name:_create_resource arg:self arguments arg Raise FunctionDef name:key_dtype arg:self arguments arg Return return:yes FunctionDef name:value_dtype arg:self arguments arg Return return:yes FunctionDef name:name arg:self arguments arg Return return:yes FunctionDef name:size arg:self arg:name arguments arg arg Raise FunctionDef name:lookup arg:self arg:keys arg:name arguments arg arg arg Raise FunctionDef name:__getitem__ arg:self arg:keys arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_aspect",
    "source_code": "def set_aspect(self, aspect):\n    self._divider.set_aspect(aspect)",
    "docstring": "Set the aspect of the SubplotDivider.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_grid.py",
    "ast_data": "FunctionDef name:set_aspect arg:self arg:aspect arguments arg arg Call"
  },
  {
    "library": "numpy",
    "name": "irfft2",
    "source_code": "@array_function_dispatch(_fftn_dispatcher)\ndef irfft2(a, s=None, axes=(-2, -1), norm=None, out=None):\n    return irfftn(a, s, axes, norm, out=None)",
    "docstring": "Computes the inverse of . Parameters ---------- a : array_like The input array s : sequence of ints, optional Shape of the real output to the inverse FFT. .. versionchanged:: 2.0 If it is `saxesssaxesnumpy.fftirfft2irfftnirfftn`. Examples -------- >>> import numpy as np >>> a = np.mgrid[:5, :5][0] >>> A = np.fft.rfft2(a) >>> np.fft.irfft2(A, s=a.shape) array([[0., 0., 0., 0., 0.], [1., 1., 1., 1., 1.], [2., 2., 2., 2., 2.], [3., 3., 3., 3., 3.], [4., 4., 4., 4., 4.]])",
    "type": "function",
    "file_path": "numpy\\numpy\\fft\\_pocketfft.py",
    "ast_data": "FunctionDef name:irfft2 arg:a arg:s arg:axes arg:norm arg:out arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "Scope",
    "source_code": "@compatibility(is_backward_compatible=False)\nclass Scope:\n\n    def __init__(self, module_path: str, module_type: Any):\n        super().__init__()\n        self.module_path = module_path\n        self.module_type = module_type",
    "docstring": "Scope object that records the module path and the module type of a module. Scope is used to track the information of the module that contains a Node in a Graph of GraphModule. For example:: class Sub(torch.nn.Module): def forward(self, x): # This will be a call_method Node in GraphModule, # scope for this would be (module_path=\"sub\", module_type=Sub) return x.transpose(1, 2) class M(torch.nn.Module): def __init__(self) -> None: self.sub = Sub() def forward(self, x): # This will be a call_method Node as well, # scope for this would be (module_path=\"\", None) x = x.transpose(1, 2) x = self.sub(x) return x",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\proxy.py",
    "ast_data": "ClassDef name:Scope FunctionDef name:__init__ arg:self arg:module_path arg:module_type arguments arg arg arg Call Call Assign Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "set_view_interval",
    "source_code": "def set_view_interval(self, vmin, vmax, ignore=False):\n    raise NotImplementedError('Derived must override')",
    "docstring": "Set the axis view limits. This method is for internal use; Matplotlib users should typically use e.g. or . If *ignore* is False (the default), this method will never reduce the preexisting view limits, only expand them if *vmin* or *vmax* are not within them. Moreover, the order of *vmin* and *vmax* does not matter; the orientation of the axis will not change. If *ignore* is True, the view limits will be set exactly to `` in that order.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:set_view_interval arg:self arg:vmin arg:vmax arg:ignore arguments arg arg arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "num_tpus_per_task",
    "source_code": "@property\ndef num_tpus_per_task(self):\n    return self._device_coordinates.shape[1]",
    "docstring": "Returns the number of TPU devices per task in the TPU slice.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\topology.py",
    "ast_data": "FunctionDef name:num_tpus_per_task arg:self arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "arccos",
    "source_code": "@set_module('numpy.lib.scimath')\n@array_function_dispatch(_unary_dispatcher)\ndef arccos(x):\n    x = _fix_real_abs_gt_1(x)\n    return nx.arccos(x)",
    "docstring": "Compute the inverse cosine of x. Return the \"principal value\" (for a description of this, see ) of the inverse cosine of . For real such that `abs(x) >> import numpy as np >>> np.set_printoptions(precision=4) >>> np.emath.arccos(1) # a scalar is returned 0.0 >>> np.emath.arccos([1,2]) array([0.-0.j , 0.-1.317j])",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_scimath_impl.py",
    "ast_data": "FunctionDef name:arccos arg:x arguments arg Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_flatten_array_with_offset",
    "source_code": "def _flatten_array_with_offset(ids, offset_delta, num_rows):\n    offset_delta = math_ops.cast(offset_delta, ids.dtype)\n    n = math_ops.cast(num_rows, dtype=ids.dtype)\n    offsets = math_ops.range(start=0, limit=n * offset_delta, delta=offset_delta, dtype=ids.dtype)\n    offsets = array_ops.expand_dims(offsets, -1)\n    ids += offsets\n    return array_ops.reshape(ids, [-1])",
    "docstring": "Flattens a rank 2 tensor, adding an offset to each row.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "FunctionDef name:_flatten_array_with_offset arg:ids arg:offset_delta arg:num_rows arguments arg arg arg Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "merge_all",
    "source_code": "@tf_export(v1=['summary.merge_all'])\ndef merge_all(key=_ops.GraphKeys.SUMMARIES, scope=None, name=None):\n    if _context.executing_eagerly():\n        raise RuntimeError('Merging tf.summary.* ops is not compatible with eager execution. Use tf.contrib.summary instead.')\n    summary_ops = _ops.get_collection(key, scope=scope)\n    if not summary_ops:\n        return None\n    else:\n        return merge(summary_ops, name=name)",
    "docstring": "Merges all summaries collected in the default graph. Args: key: used to collect the summaries. Defaults to . scope: Optional scope used to filter the summary ops, using . name: A name for the operation (optional). Returns: If no summaries were collected, returns None. Otherwise returns a scalar of type containing the serialized protocol buffer resulting from the merging. Raises: RuntimeError: If called with eager execution enabled. @compatibility(TF2) This API is not compatible with eager execution or . To migrate to TF2, this API can be omitted entirely, because in TF2 individual summary ops, like , write directly to the default summary writer if one is active. Thus, it's not necessary to merge summaries or to manually add the resulting merged summary output to the writer. See the usage example shown below. For a comprehensive migration guide, please follow [Migrating tf.summary usage to TF 2.0]( #### TF1 & TF2 Usage Example TF1: TF2: @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\summary\\summary.py",
    "ast_data": "FunctionDef name:merge_all arg:key arg:scope arg:name arguments arg arg arg If Call Raise Call Assign Call If Return return:no Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "welford_reduce_final_reduction",
    "source_code": "def welford_reduce_final_reduction(self, buffer, result_mean, result_m2, result_weight, mean, m2, weight, dim, dtype):\n    values = self._welford(buffer, mean, m2, weight, dim, dtype)\n    result_exprs = [result_mean, result_m2, result_weight]\n    for result_expr, value in zip(result_exprs, values):\n        buffer.splice(f'{result_expr} = {value}')\n    return (result_mean, result_m2, result_weight)",
    "docstring": "Helper to codegen call to triton_helpers.welford",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py",
    "ast_data": "FunctionDef name:welford_reduce_final_reduction arg:self arg:buffer arg:result_mean arg:result_m2 arg:result_weight arg:mean arg:m2 arg:weight arg:dim arg:dtype arguments arg arg arg arg arg arg arg arg arg arg Assign Call Assign For Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "set_np_doc_form",
    "source_code": "def set_np_doc_form(value):\n    global _np_doc_form\n    _np_doc_form = value",
    "docstring": "Selects the form of the original numpy docstrings. This function sets a global variable that controls how a tf-numpy symbol's docstring should refer to the original numpy docstring. If is , the numpy docstring will be verbatim copied into the tf-numpy docstring. Otherwise, a link to the original numpy docstring will be added. Which numpy version the link points to depends on : * : the current stable version; * : the current development version; * pattern : will be treated as a version number, e.g. '1.16'. Args: value: the value to set the global variable to.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_utils.py",
    "ast_data": "FunctionDef name:set_np_doc_form arg:value arguments arg Assign"
  },
  {
    "library": "scipy",
    "name": "prune",
    "source_code": "def prune(self):\n    R, C = self.blocksize\n    M, N = self.shape\n    if len(self.indptr) != M // R + 1:\n        raise ValueError('index pointer has invalid length')\n    bnnz = self.indptr[-1]\n    if len(self.indices) < bnnz:\n        raise ValueError('indices array has too few elements')\n    if len(self.data) < bnnz:\n        raise ValueError('data array has too few elements')\n    self.data = self.data[:bnnz]\n    self.indices = self.indices[:bnnz]",
    "docstring": "Remove empty space after all non-zero elements.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_bsr.py",
    "ast_data": "FunctionDef name:prune arg:self arguments arg Assign Assign If Compare Call Raise Call Assign If Compare Call Raise Call If Compare Call Raise Call Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "inlined_graph",
    "source_code": "@property\ndef inlined_graph(self):\n    return self.forward.inlined_graph",
    "docstring": "Return a string representation of the internal graph for the `interpreting-graphs` for details.",
    "type": "method",
    "file_path": "pytorch\\torch\\jit\\_script.py",
    "ast_data": "FunctionDef name:inlined_graph arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_fontstyle",
    "source_code": "def set_fontstyle(self, fontstyle):\n    self._fontproperties.set_style(fontstyle)\n    self.stale = True",
    "docstring": "Set the font style. Parameters ---------- fontstyle : {'normal', 'italic', 'oblique'} See Also -------- .font_manager.FontProperties.set_style",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:set_fontstyle arg:self arg:fontstyle arguments arg arg Call Assign"
  },
  {
    "library": "numpy",
    "name": "upper",
    "source_code": "@set_module('numpy.strings')\n@array_function_dispatch(_unary_op_dispatcher)\ndef upper(a):\n    a_arr = np.asarray(a)\n    return _vec_string(a_arr, a_arr.dtype, 'upper')",
    "docstring": "Return an array with the elements converted to uppercase. Calls :meth: element-wise. For 8-bit strings, this method is locale-dependent. Parameters ---------- a : array-like, with `` dtype, depending on input types See Also -------- str.upper Examples -------- >>> import numpy as np >>> c = np.array(['a1b c', '1bca', 'bca1']); c array(['a1b c', '1bca', 'bca1'], dtype='>> np.strings.upper(c) array(['A1B C', '1BCA', 'BCA1'], dtype='<U5')",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\strings.py",
    "ast_data": "FunctionDef name:upper arg:a arguments arg Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_get_feature_names",
    "source_code": "def _get_feature_names(X):\n    feature_names = None\n    if _is_pandas_df(X):\n        feature_names = np.asarray(X.columns, dtype=object)\n    elif hasattr(X, '__dataframe__'):\n        df_protocol = X.__dataframe__()\n        feature_names = np.asarray(list(df_protocol.column_names()), dtype=object)\n    if feature_names is None or len(feature_names) == 0:\n        return\n    types = sorted((t.__qualname__ for t in set((type(v) for v in feature_names))))\n    if len(types) > 1 and 'str' in types:\n        raise TypeError(f'Feature names are only supported if all input features have string names, but your input has {types} as feature name / column name types. If you want feature names to be stored and validated, you must convert them all to strings, by using X.columns = X.columns.astype(str) for example. Otherwise you can remove feature / column names from your input data, or convert them all to a non-string data type.')\n    if len(types) == 1 and types[0] == 'str':\n        return feature_names",
    "docstring": "Get feature names from X. Support for other array containers should place its implementation here. Parameters ---------- X : {ndarray, dataframe} of shape (n_samples, n_features) Array container to extract feature names. - pandas dataframe : The columns will be considered to be feature names. If the dataframe contains non-string feature names, is returned. - All other array containers will return . Returns ------- names: ndarray or None Feature names of . Unrecognized array containers will return .",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\validation.py",
    "ast_data": "FunctionDef name:_get_feature_names arg:X arguments arg Assign If Call Assign Call If Call Assign Call Assign Call Call Call If BoolOp Compare Compare Call Return return:no Assign Call Call Call If BoolOp Compare Call Compare Raise Call If BoolOp Compare Call Compare Return return:yes"
  },
  {
    "library": "scipy",
    "name": "dice",
    "source_code": "def dice(u, v, w=None):\n    u = _validate_vector(u)\n    v = _validate_vector(v)\n    if w is not None:\n        w = _validate_weights(w)\n    if u.dtype == v.dtype == bool and w is None:\n        ntt = (u & v).sum()\n    else:\n        dtype = np.result_type(int, u.dtype, v.dtype)\n        u = u.astype(dtype)\n        v = v.astype(dtype)\n        if w is None:\n            ntt = (u * v).sum()\n        else:\n            ntt = (u * v * w).sum()\n    nft, ntf = _nbool_correspond_ft_tf(u, v, w=w)\n    return float((ntf + nft) / np.array(2.0 * ntt + ntf + nft))",
    "docstring": "Compute the Dice dissimilarity between two boolean 1-D arrays. The Dice dissimilarity between and , is .. math:: \\frac{c_{TF} + c_{FT}} {2c_{TT} + c_{FT} + c_{TF}} where :math: is the number of occurrences of :math: and :math: for :math:`k >> from scipy.spatial import distance >>> distance.dice([1, 0, 0], [0, 1, 0]) 1.0 >>> distance.dice([1, 0, 0], [1, 1, 0]) 0.3333333333333333 >>> distance.dice([1, 0, 0], [2, 0, 0]) -0.3333333333333333",
    "type": "function",
    "file_path": "scipy\\scipy\\spatial\\distance.py",
    "ast_data": "FunctionDef name:dice arg:u arg:v arg:w arguments arg arg arg Assign Call Assign Call If Compare Assign Call If BoolOp Compare Compare Assign Call Assign Call Assign Call Assign Call If Compare Assign Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "check_restrict",
    "source_code": "def check_restrict(cmd):\n    cmd._check_compiler()\n    body = textwrap.dedent('\\n        static int static_func (char * %(restrict)s a)\\n        {\\n            return 0;\\n        }\\n        ')\n    for kw in ['restrict', '__restrict__', '__restrict']:\n        st = cmd.try_compile(body % {'restrict': kw}, None, None)\n        if st:\n            return kw\n    return ''",
    "docstring": "Return the restrict identifier (may be empty).",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\command\\autodist.py",
    "ast_data": "FunctionDef name:check_restrict arg:cmd arguments arg Call Assign Call For Assign Call If Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "length",
    "source_code": "@property\ndef length(self):\n    return capi.geos_length(self.ptr, byref(c_double()))",
    "docstring": "Return the length of this Geometry (e.g., 0 for point, or the circumference of a Polygon).",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:length arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, perm, dtype=dtypes.float32, is_non_singular=None, is_self_adjoint=None, is_positive_definite=None, is_square=None, name='LinearOperatorPermutation'):\n    parameters = dict(perm=perm, dtype=dtype, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, name=name)\n    with ops.name_scope(name, values=[perm]):\n        self._perm = linear_operator_util.convert_nonref_to_tensor(perm, name='perm')\n        self._check_perm(self._perm)\n        if is_non_singular is False:\n            raise ValueError(f'A Permutation operator is always non-singular. Expected argument `is_non_singular` to be True. Received: {is_non_singular}.')\n        if is_square is False:\n            raise ValueError(f'A Permutation operator is always square. Expected argument `is_square` to be True. Received: {is_square}.')\n        is_square = True\n        super(LinearOperatorPermutation, self).__init__(dtype=dtype, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, parameters=parameters, name=name)",
    "docstring": "Initialize a . Args: perm: Shape Integer with . An integer vector that represents the permutation to apply. Note that this argument is same as . However, this permutation is applied on the rows, while the permutation in is applied on the dimensions of the . is required to have unique entries from . dtype: The of arguments to this operator. Default: . Allowed dtypes: , , , , . is_non_singular: Expect that this operator is non-singular. is_self_adjoint: Expect that this operator is equal to its hermitian transpose. This is autoset to true is_positive_definite: Expect that this operator is positive definite, meaning the quadratic form has positive real part for all nonzero . Note that we do not require the operator to be self-adjoint to be positive-definite. See: This is autoset to false. is_square: Expect that this operator acts like square [batch] matrices. This is autoset to true. name: A name for this . Raises: ValueError: is not , is not or is not .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_permutation.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:perm arg:dtype arg:is_non_singular arg:is_self_adjoint arg:is_positive_definite arg:is_square arg:name arguments arg arg arg arg arg arg arg arg Assign Call With Call Assign Call Call If Compare Raise Call If Compare Raise Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "_make_tensor_from_meta",
    "source_code": "def _make_tensor_from_meta(example: Union[torch.Tensor, FakeTensor], device: torch.device) -> torch.Tensor:\n    return torch.empty(example.size(), dtype=example.dtype, layout=example.layout, device=device)",
    "docstring": "Create a real tensor from a tensor.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py",
    "ast_data": "FunctionDef name:_make_tensor_from_meta arg:example arg:device arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_3d_properties",
    "source_code": "def set_3d_properties(self, zs, zdir, axlim_clip=False):\n    self.update_scalarmappable()\n    offsets = self.get_offsets()\n    if len(offsets) > 0:\n        xs, ys = offsets.T\n    else:\n        xs = []\n        ys = []\n    self._offsets3d = juggle_axes(xs, ys, np.atleast_1d(zs), zdir)\n    self._z_markers_idx = slice(-1)\n    self._vzs = None\n    self._axlim_clip = axlim_clip\n    self.stale = True",
    "docstring": "Set the *z* positions and direction of the patches. Parameters ---------- zs : float or array of floats The location or locations to place the patches in the collection along the *zdir* axis. zdir : {'x', 'y', 'z'} Plane to plot patches orthogonal to. All patches must have the same direction. See for a description of the values. axlim_clip : bool, default: False Whether to hide patches with a vertex outside the axes view limits. .. versionadded:: 3.10",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py",
    "ast_data": "FunctionDef name:set_3d_properties arg:self arg:zs arg:zdir arg:axlim_clip arguments arg arg arg arg Call Assign Call If Compare Call Assign Assign Assign Assign Call Call Assign Call Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "squeeze_v2",
    "source_code": "@tf_export('squeeze', v1=[])\n@dispatch.add_dispatch_support\ndef squeeze_v2(input, axis=None, name=None):\n    return squeeze(input, axis, name)",
    "docstring": "Removes dimensions of size 1 from the shape of a tensor. Given a tensor , this operation returns a tensor of the same type with all dimensions of size 1 removed. If you don't want to remove all size 1 dimensions, you can remove specific size 1 dimensions by specifying . For example: Or, to remove specific size 1 dimensions: Unlike the older op , this op does not accept a deprecated argument. Note: if is a , then this operation takes time, where is the number of elements in the squeezed dimensions. Note: If squeeze is performed on dimensions of unknown sizes, then the returned Tensor will be of unknown shape. A common situation is when the first (batch) dimension is of size , returns `axis=Tensorinputints[][-rank(input), rank(input))inputRaggedTensorTensorinputinput`, but has one or more dimensions of size 1 removed. Raises: ValueError: The input cannot be converted to a tensor, or the specified axis cannot be squeezed.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:squeeze_v2 arg:input arg:axis arg:name arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_set_transforms",
    "source_code": "def _set_transforms(self):\n    ax = self.axes\n    fig = self.get_figure(root=False)\n    if self._units == 'xy':\n        sc = 1\n    elif self._units == 'x':\n        sc = ax.bbox.width / ax.viewLim.width\n    elif self._units == 'y':\n        sc = ax.bbox.height / ax.viewLim.height\n    elif self._units == 'inches':\n        sc = fig.dpi\n    elif self._units == 'points':\n        sc = fig.dpi / 72.0\n    elif self._units == 'width':\n        sc = ax.bbox.width\n    elif self._units == 'height':\n        sc = ax.bbox.height\n    elif self._units == 'dots':\n        sc = 1.0\n    else:\n        raise ValueError(f'Unrecognized units: {self._units!r}')\n    self._transforms = np.zeros((len(self._widths), 3, 3))\n    widths = self._widths * sc\n    heights = self._heights * sc\n    sin_angle = np.sin(self._angles)\n    cos_angle = np.cos(self._angles)\n    self._transforms[:, 0, 0] = widths * cos_angle\n    self._transforms[:, 0, 1] = heights * -sin_angle\n    self._transforms[:, 1, 0] = widths * sin_angle\n    self._transforms[:, 1, 1] = heights * cos_angle\n    self._transforms[:, 2, 2] = 1.0\n    _affine = transforms.Affine2D\n    if self._units == 'xy':\n        m = ax.transData.get_affine().get_matrix().copy()\n        m[:2, 2:] = 0\n        self.set_transform(_affine(m))",
    "docstring": "Calculate transforms immediately before drawing.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:_set_transforms arg:self arguments arg Assign Assign Call If Compare Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign Raise Call Assign Call Call Assign Assign Assign Call Assign Call Assign Assign Assign Assign Assign Assign If Compare Assign Call Call Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "register_optimizer_step_pre_hook",
    "source_code": "def register_optimizer_step_pre_hook(hook: GlobalOptimizerPreHook) -> RemovableHandle:\n    handle = hooks.RemovableHandle(_global_optimizer_pre_hooks)\n    _global_optimizer_pre_hooks[handle.id] = hook\n    return handle",
    "docstring": "Register a pre hook common to all optimizers. The hook should have the following signature:: hook(optimizer, args, kwargs) -> None or modified args and kwargs Args: hook (Callable): A user defined hook which is registered on all optimizers. Returns: :class:: a handle that can be used to remove the added hook by calling ``",
    "type": "function",
    "file_path": "pytorch\\torch\\optim\\optimizer.py",
    "ast_data": "FunctionDef name:register_optimizer_step_pre_hook arg:hook arguments arg Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "append",
    "source_code": "def append(self, line, font_attr_segs=None):\n    self._lines.append(line)\n    if font_attr_segs:\n        self._font_attr_segs[len(self._lines) - 1] = font_attr_segs",
    "docstring": "Append a single line of text. Args: line: (str) The text to be added to the end. font_attr_segs: (list of tuples) Font attribute segments of the appended line.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py",
    "ast_data": "FunctionDef name:append arg:self arg:line arg:font_attr_segs arguments arg arg arg Call If Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "set_floatx",
    "source_code": "def set_floatx(value):\n    global _FLOATX\n    if value not in {'float16', 'float32', 'float64'}:\n        raise ValueError('Unknown floatx type: ' + str(value))\n    _FLOATX = str(value)",
    "docstring": "Sets the default float type. Note: It is not recommended to set this to float16 for training, as this will likely cause numeric stability issues. Instead, mixed precision, which is using a mix of float16 and float32, can be used by calling . See the [mixed precision guide]( for details. Args: value: String; , , or . Example: >>> tf.keras.backend.floatx() 'float32' >>> tf.keras.backend.set_floatx('float64') >>> tf.keras.backend.floatx() 'float64' >>> tf.keras.backend.set_floatx('float32') Raises: ValueError: In case of invalid value.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend_config.py",
    "ast_data": "FunctionDef name:set_floatx arg:value arguments arg If Compare Raise Call Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "register_backend",
    "source_code": "@classmethod\ndef register_backend(cls, name, func, extended_api=False, devices: Optional[Union[str, list[str]]]=None) -> None:\n    if not hasattr(Backend, name.upper()):\n        setattr(Backend, name.upper(), name.lower())\n    if name.lower() not in Backend.backend_list:\n        Backend.backend_list.append(name.lower())\n    if devices is not None:\n        for device in devices:\n            if device != 'cpu' and device != 'cuda':\n                Backend.default_device_backend_map[device] = name.lower()\n    Backend.backend_type_map[name.lower()] = ProcessGroup.BackendType.CUSTOM\n    if devices is None:\n        warnings.warn(f'Device capability of {name} unspecified, assuming `cpu` and `cuda`. Please specify it via the `devices` argument of `register_backend`.')\n        Backend.backend_capability[name.lower()] = ['cpu', 'cuda']\n    elif isinstance(devices, str):\n        Backend.backend_capability[name.lower()] = [devices]\n    else:\n        Backend.backend_capability[name.lower()] = devices\n    Backend._plugins[name.upper()] = Backend._BackendPlugin(func, extended_api)",
    "docstring": "Register a new backend with the given name and instantiating function. This class method is used by 3rd party `None`, assuming both \"cpu\" and \"cuda\" .. note:: This support of 3rd party backend is experimental and subject to change.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:register_backend arg:cls arg:name arg:func arg:extended_api arg:devices arguments arg arg arg arg arg If Call Call Call Call Call If Compare Call Call Call If Compare For If BoolOp Compare Compare Assign Call Assign Call If Compare Call Assign Call If Call Assign Call Assign Call Assign Call Call"
  },
  {
    "library": "cherrypy",
    "name": "read",
    "source_code": "def read(self, size=None, fp_out=None):\n    return self.fp.read(size, fp_out)",
    "docstring": "Read bytes from the connection.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpreqbody.py",
    "ast_data": "FunctionDef name:read arg:self arg:size arg:fp_out arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "score_samples",
    "source_code": "def score_samples(self, X):\n    check_is_fitted(self)\n    X = validate_data(self, X, reset=False)\n    Xr = X - self.mean_\n    precision = self.get_precision()\n    n_features = X.shape[1]\n    log_like = -0.5 * (Xr * np.dot(Xr, precision)).sum(axis=1)\n    log_like -= 0.5 * (n_features * log(2.0 * np.pi) - fast_logdet(precision))\n    return log_like",
    "docstring": "Compute the log-likelihood of each sample. Parameters ---------- X : ndarray of shape (n_samples, n_features) The data. Returns ------- ll : ndarray of shape (n_samples,) Log-likelihood of each sample under the current model.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_factor_analysis.py",
    "ast_data": "FunctionDef name:score_samples arg:self arg:X arguments arg arg Call Assign Call Assign Assign Call Assign Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "wrap_unlinkable_objects",
    "source_code": "def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir):\n    raise NotImplementedError()",
    "docstring": "Convert a set of object files that are not compatible with the default linker, to a file that is compatible. Parameters ---------- objects : list List of object files to include. output_dir : str Output directory to place generated object files. extra_dll_dir : str Output directory to place extra DLL files that need to be included on Windows. Returns ------- converted_objects : list of str List of converted object files. Note that the number of output files is not necessarily the same as inputs.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\fcompiler\\__init__.py",
    "ast_data": "FunctionDef name:wrap_unlinkable_objects arg:self arg:objects arg:output_dir arg:extra_dll_dir arguments arg arg arg arg Raise Call"
  },
  {
    "library": "pandas",
    "name": "file_path_to_url",
    "source_code": "def file_path_to_url(path: str) -> str:\n    from urllib.request import pathname2url\n    return urljoin('file:', pathname2url(path))",
    "docstring": "converts an absolute native path to a FILE URL. Parameters ---------- path : a path in native format Returns ------- a valid FILE URL",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\common.py",
    "ast_data": "FunctionDef name:file_path_to_url arg:path arguments arg Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "AdjustContrast",
    "source_code": "class AdjustContrast(Module):\n\n    def __init__(self, contrast_factor: Union[float, Tensor]) -> None:\n        super().__init__()\n        self.contrast_factor: Union[float, Tensor] = contrast_factor\n\n    def forward(self, input: Tensor) -> Tensor:\n        return adjust_contrast(input, self.contrast_factor)",
    "docstring": "Adjust Contrast of an image. This implementation aligns OpenCV, not PIL. Hence, the output differs from TorchVision. The input image is expected to be in the range of [0, 1]. Args: contrast_factor: Contrast adjust factor per element in the batch. 0 generates a completely black image, 1 does not modify the input image while any other non-negative number modify the brightness by this factor. Shape: - Input: Image/Input to be adjusted in the shape of :math:. - Output: Adjusted image in the shape of :math:. Example: >>> x = torch.ones(1, 1, 3, 3) >>> AdjustContrast(0.5)(x) tensor([[[[0.5000, 0.5000, 0.5000], [0.5000, 0.5000, 0.5000], [0.5000, 0.5000, 0.5000]]]]) >>> x = torch.ones(2, 5, 3, 3) >>> y = torch.ones(2) >>> AdjustContrast(y)(x).shape torch.Size([2, 5, 3, 3])",
    "type": "class",
    "file_path": "kornia\\kornia\\enhance\\adjust.py",
    "ast_data": "ClassDef name:AdjustContrast FunctionDef name:__init__ arg:self arg:contrast_factor arguments arg arg Call Call FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "update_kwarg",
    "source_code": "@compatibility(is_backward_compatible=True)\ndef update_kwarg(self, key: str, arg: Argument) -> None:\n    self.kwargs = {**self.kwargs, key: arg}",
    "docstring": "Update an existing keyword argument to contain the new value ``",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\node.py",
    "ast_data": "FunctionDef name:update_kwarg arg:self arg:key arg:arg arguments arg arg arg Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "expint",
    "source_code": "@tf_export('math.special.expint')\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef expint(x, name=None):\n    with ops.name_scope(name, 'expint', [x]):\n        return gen_special_math_ops.expint(x)",
    "docstring": "Computes the Exponential integral of element-wise. The Exponential integral is defined as the integral of from to , with the domain of definition all positive real numbers. >>> tf.math.special.expint([1., 1.1, 2.1, 4.1]).numpy() array([ 1.8951179, 2.1673784, 5.3332353, 21.048464], dtype=float32) This implementation is based off of the Cephes math library. Args: x: A or . Must be one of the following types: , . name: A name for the operation (optional). Returns: A or , respectively. Has the same type as . @compatibility(scipy) Equivalent to scipy.special.expi @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\special_math_ops.py",
    "ast_data": "FunctionDef name:expint arg:x arg:name arguments arg arg With Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_update_feature_log_prob",
    "source_code": "@abstractmethod\ndef _update_feature_log_prob(self, alpha):\n    pass",
    "docstring": "Update feature log probabilities based on counts. This method is called each time or update the model. Parameters ---------- alpha : float smoothing parameter. See :meth:.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\naive_bayes.py",
    "ast_data": "FunctionDef name:_update_feature_log_prob arg:self arg:alpha arguments arg arg"
  },
  {
    "library": "kornia",
    "name": "deprecated",
    "source_code": "def deprecated(replace_with: Optional[str]=None, version: Optional[str]=None, extra_reason: Optional[str]=None) -> Any:\n\n    def _deprecated(func: Callable[..., Any]) -> Any:\n\n        @wraps(func)\n        def wrapper(*args: Any, **kwargs: Any) -> Any:\n            name = ''\n            beginning = f'Since kornia {version} the ' if version is not None else ''\n            if isclass(func):\n                name = func.__class__.__name__\n            if isfunction(func):\n                name = func.__name__\n            warnings.simplefilter('always', DeprecationWarning)\n            if replace_with is not None:\n                warnings.warn(f'{beginning}`{name}` is deprecated in favor of `{replace_with}`.{extra_reason}', category=DeprecationWarning, stacklevel=2)\n            else:\n                warnings.warn(f'{beginning}`{name}` is deprecated and will be removed in the future versions.{extra_reason}', category=DeprecationWarning, stacklevel=2)\n            warnings.simplefilter('default', DeprecationWarning)\n            return func(*args, **kwargs)\n        return wrapper\n    return _deprecated",
    "docstring": "Mark methods as deprecated.",
    "type": "function",
    "file_path": "kornia\\kornia\\utils\\helpers.py",
    "ast_data": "FunctionDef name:deprecated arg:replace_with arg:version arg:extra_reason arguments arg arg arg FunctionDef name:_deprecated arg:func arguments arg FunctionDef name:wrapper arguments arg arg Assign Assign Compare If Call Assign If Call Assign Call If Compare Call Call Call Return return:yes Call Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "AsyncCheckpointerType",
    "source_code": "class AsyncCheckpointerType(Enum):\n    THREAD = 'thread'\n    PROCESS = 'process'",
    "docstring": "Enum for async checkpointer type.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\state_dict_saver.py",
    "ast_data": "ClassDef name:AsyncCheckpointerType Assign Assign"
  },
  {
    "library": "scipy",
    "name": "xp_default_dtype",
    "source_code": "def xp_default_dtype(xp):\n    if is_torch(xp):\n        return xp.get_default_dtype()\n    else:\n        return xp.float64",
    "docstring": "Query the namespace-dependent default floating-point dtype.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_array_api.py",
    "ast_data": "FunctionDef name:xp_default_dtype arg:xp arguments arg If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_prefix",
    "source_code": "@property\ndef _prefix(self):\n    return self._checkpoint_prefix",
    "docstring": "A common prefix for all checkpoints saved with this manager. For example, if (a constructor argument) were , would be and checkpoints would generally be numbered , , and so on. Each checkpoint has several associated files (e.g. ). Returns: A string prefix.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint_management.py",
    "ast_data": "FunctionDef name:_prefix arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_default_group",
    "source_code": "def _get_default_group() -> ProcessGroup:\n    if not is_initialized():\n        raise ValueError('Default process group has not been initialized, please make sure to call init_process_group.')\n    if TYPE_CHECKING:\n        return not_none(GroupMember.WORLD)\n    else:\n        return GroupMember.WORLD",
    "docstring": "Get the default process group created by init_process_group.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:_get_default_group arguments If Call Raise Call If Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_verify_static_batch_size_equality",
    "source_code": "def _verify_static_batch_size_equality(tensors, columns):\n    expected_batch_size = None\n    for i in range(0, len(tensors)):\n        if tensors[i].shape.dims[0].value is not None:\n            if expected_batch_size is None:\n                bath_size_column_index = i\n                expected_batch_size = tensors[i].shape.dims[0]\n            elif not expected_batch_size.is_compatible_with(tensors[i].shape.dims[0]):\n                raise ValueError('Batch size (first dimension) of each feature must be same. Batch size of columns ({}, {}): ({}, {})'.format(columns[bath_size_column_index].name, columns[i].name, expected_batch_size, tensors[i].shape.dims[0]))",
    "docstring": "Validates that the first dim (batch size) of all tensors are equal or None. Args: tensors: list of tensors to check. columns: list of feature columns matching tensors. Will be used for error messaging. Raises: ValueError: if one of the tensors has a variant batch size",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py",
    "ast_data": "FunctionDef name:_verify_static_batch_size_equality arg:tensors arg:columns arguments arg arg Assign For Call Call If Compare If Compare Assign Assign If Call Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_broadcast_half",
    "source_code": "def _broadcast_half(ac_0: _LayerBroadcaster, a_1: RowPartition) -> Tuple[_LayerBroadcaster, RowPartition]:\n    c_1 = ac_0.broadcast_row_partition(a_1)\n    old_value_rowids = array_ops.gather(ac_0.gather_index, c_1.value_rowids())\n    old_row_starts = array_ops.gather(a_1.row_splits(), old_value_rowids)\n    gather_index = old_row_starts + c_1.offsets_in_rows()\n    return [_LayerBroadcaster.from_gather_index(gather_index), c_1]",
    "docstring": "Does a NOOP broadcast of a_1. *-ac_0-->* | | a_1 c_1 | | V V *-ac_1-->* Note that by definition this cannot fail: there is always a well-defined NOOP broadcast. This is usually intended as half of broadcasting two shapes together. Args: ac_0: previous LayerBroadcaster a_1: previous RowPartition Returns: [ac_1, c_1] where ac_1 is the next LayerBroadcaster, and c_1 is the broadcast RowPartition",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:_broadcast_half arg:ac_0 arg:a_1 arguments arg arg Assign Call Assign Call Call Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, ax, loc=None, bbox=None, **kwargs):\n    super().__init__()\n    if isinstance(loc, str):\n        if loc not in self.codes:\n            raise ValueError('Unrecognized location {!r}. Valid locations are\\n\\t{}'.format(loc, '\\n\\t'.join(self.codes)))\n        loc = self.codes[loc]\n    self.set_figure(ax.get_figure(root=False))\n    self._axes = ax\n    self._loc = loc\n    self._bbox = bbox\n    ax._unstale_viewLim()\n    self.set_transform(ax.transAxes)\n    self._cells = {}\n    self._edges = None\n    self._autoColumns = []\n    self._autoFontsize = True\n    self._internal_update(kwargs)\n    self.set_clip_on(False)",
    "docstring": "Parameters ---------- ax : The to plot the table into. loc : str, optional The position of the cell with respect to *ax*. This must be one of the . bbox : or [xmin, ymin, width, height], optional A bounding box to draw the table into. If this is not *None*, this overrides *loc*. Other Parameters ---------------- **kwargs properties.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\table.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:ax arg:loc arg:bbox arguments arg arg arg arg arg Call Call If Call If Compare Raise Call Call Call Assign Call Call Assign Assign Assign Call Call Assign Assign Assign Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_prepare_sample_weights",
    "source_code": "def _prepare_sample_weights(self, sample_weights=None):\n    if sample_weights is not None:\n        if len(sample_weights) != len(self._training_endpoints):\n            raise ValueError('Provided sample weights must have same length as the number of outputs. Expected: {}, got: {}.'.format(len(self._training_endpoints), len(sample_weights)))\n    else:\n        sample_weights = [None] * len(self._training_endpoints)\n    for endpoint, weight in zip(self._training_endpoints, sample_weights):\n        endpoint.populate_sample_weight(weight, endpoint.sample_weight_mode)",
    "docstring": "Sets sample weight attribute on the model.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py",
    "ast_data": "FunctionDef name:_prepare_sample_weights arg:self arg:sample_weights arguments arg arg If Compare If Compare Call Call Raise Call Call Call Call Assign Call For Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_use_env",
    "source_code": "def get_use_env(args) -> bool:\n    if not hasattr(args, 'use_env'):\n        return True\n    return args.use_env",
    "docstring": "Retrieve `` and will be deprecated in future releases.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\run.py",
    "ast_data": "FunctionDef name:get_use_env arg:args arguments arg If Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "reduce_any",
    "source_code": "@dispatch.dispatch_for_api(math_ops.reduce_any)\ndef reduce_any(input_tensor: ragged_tensor.Ragged, axis=None, keepdims=None, name=None):\n    with ops.name_scope(name, 'RaggedReduceAny', [input_tensor, axis]):\n        return _cast(reduce_sum(_cast(input_tensor, dtypes.int32), axis, keepdims), dtypes.bool)",
    "docstring": "For docs, see: _RAGGED_REDUCE_DOCSTRING.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_math_ops.py",
    "ast_data": "FunctionDef name:reduce_any arg:input_tensor arg:axis arg:keepdims arg:name arguments arg arg arg arg With Call Return return:yes Call Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "classes_",
    "source_code": "@property\ndef classes_(self):\n    return self.steps[-1][1].classes_",
    "docstring": "The classes labels. Only exist if the last step is a classifier.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\pipeline.py",
    "ast_data": "FunctionDef name:classes_ arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "DedupList",
    "source_code": "class DedupList(Generic[T]):\n\n    def __init__(self, items: Optional[list[T]]=None, membership: Optional[OrderedSet[T]]=None) -> None:\n        self.items = items or []\n        self.membership = membership or OrderedSet()\n\n    def append(self, node_user: T) -> None:\n        if node_user in self.membership:\n            return\n        self.items.append(node_user)\n        self.membership.add(node_user)\n\n    def __add__(self, other: DedupList[T]) -> DedupList[T]:\n        new_membership = OrderedSet.union(self.membership, other.membership)\n        new_items = self.items + [x for x in other.items if x not in self.membership]\n        return DedupList(new_items, new_membership)",
    "docstring": "This data structure behaves like a list except it makes sure the elements remain unique. Normally one could use a OrderedSet/dict for this purpose however the list in question gets elements appended as it is being iterated over which means that we need to keep the list semantics.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "ClassDef name:DedupList FunctionDef name:__init__ arg:self arg:items arg:membership arguments arg arg arg Assign BoolOp Assign BoolOp Call FunctionDef name:append arg:self arg:node_user arguments arg arg If Compare Return return:no Call Call FunctionDef name:__add__ arg:self arg:other arguments arg arg Assign Call Assign Compare Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "dispatch_fill_zeros",
    "source_code": "def dispatch_fill_zeros(op, left, right, result):\n    if op is divmod:\n        result = (mask_zero_div_zero(left, right, result[0]), _fill_zeros(result[1], left, right))\n    elif op is roperator.rdivmod:\n        result = (mask_zero_div_zero(right, left, result[0]), _fill_zeros(result[1], right, left))\n    elif op is operator.floordiv:\n        result = mask_zero_div_zero(left, right, result)\n    elif op is roperator.rfloordiv:\n        result = mask_zero_div_zero(right, left, result)\n    elif op is operator.mod:\n        result = _fill_zeros(result, left, right)\n    elif op is roperator.rmod:\n        result = _fill_zeros(result, right, left)\n    return result",
    "docstring": "Call _fill_zeros with the appropriate fill value depending on the operation, with special logic for divmod and rdivmod. Parameters ---------- op : function (operator.add, operator.div, ...) left : object (np.ndarray for non-reversed ops) We have excluded ExtensionArrays here right : object (np.ndarray for reversed ops) We have excluded ExtensionArrays here result : ndarray Returns ------- result : np.ndarray Notes ----- For divmod and rdivmod, the parameter and returned is a 2-tuple of ndarray objects.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\ops\\missing.py",
    "ast_data": "FunctionDef name:dispatch_fill_zeros arg:op arg:left arg:right arg:result arguments arg arg arg arg If Compare Assign Call Call If Compare Assign Call Call If Compare Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "has_value",
    "source_code": "@abc.abstractmethod\ndef has_value(self, name=None):\n    raise NotImplementedError('Optional.has_value()')",
    "docstring": "Returns a tensor that evaluates to if this optional has a value. >>> optional = tf.experimental.Optional.from_value(42) >>> print(optional.has_value()) tf.Tensor(True, shape=(), dtype=bool) Args: name: (Optional.) A name for the created operation. Returns: A scalar of type .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\optional_ops.py",
    "ast_data": "FunctionDef name:has_value arg:self arg:name arguments arg arg Raise Call"
  },
  {
    "library": "seaborn",
    "name": "light_palette",
    "source_code": "def light_palette(color, n_colors=6, reverse=False, as_cmap=False, input='rgb'):\n    rgb = _color_to_rgb(color, input)\n    hue, sat, _ = husl.rgb_to_husl(*rgb)\n    gray_s, gray_l = (0.15 * sat, 95)\n    gray = _color_to_rgb((hue, gray_s, gray_l), input='husl')\n    colors = [rgb, gray] if reverse else [gray, rgb]\n    return blend_palette(colors, n_colors, as_cmap)",
    "docstring": "Make a sequential palette that blends from light to `choose_light_paletteinputmatplotlib.colors.ListedColormapmatplotlib.colors.ListedColormap` See Also -------- dark_palette : Create a sequential palette with dark low values. diverging_palette : Create a diverging palette with two colors. Examples -------- .. include:: ../docstrings/light_palette.rst",
    "type": "function",
    "file_path": "seaborn\\seaborn\\palettes.py",
    "ast_data": "FunctionDef name:light_palette arg:color arg:n_colors arg:reverse arg:as_cmap arg:input arguments arg arg arg arg arg Assign Call Assign Call Assign Assign Call Assign Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "pipe",
    "source_code": "def pipe(self, func, *args, **kwargs):\n    return func(self, *args, **kwargs)",
    "docstring": "Pass the grid to a user-supplied function and return its value. The must accept an object of this type for its first positional argument. Additional arguments are passed through. The return value of becomes the return value of this method. See the method if you want to return self instead. Added in v0.12.0.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\axisgrid.py",
    "ast_data": "FunctionDef name:pipe arg:self arg:func arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_facecolor",
    "source_code": "def get_facecolor(self):\n    return self.patch.get_facecolor()",
    "docstring": "Get the face color of the Figure rectangle.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:get_facecolor arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "replace_input_with",
    "source_code": "@compatibility(is_backward_compatible=True)\ndef replace_input_with(self, old_input: 'Node', new_input: 'Node') -> None:\n\n    def maybe_replace_node(n: Node) -> Node:\n        return new_input if n == old_input else n\n    m = self.graph.owning_module\n    if getattr(m, '_replace_hooks', None):\n        for replace_hook in m._replace_hooks:\n            replace_hook(old=old_input, new=new_input.name, user=self)\n    new_args = _fx_map_arg(self.args, maybe_replace_node)\n    new_kwargs = _fx_map_arg(self.kwargs, maybe_replace_node)\n    assert isinstance(new_args, tuple)\n    assert isinstance(new_kwargs, dict)\n    self._update_args_kwargs(new_args, new_kwargs)",
    "docstring": "Loop through input nodes of ``.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\node.py",
    "ast_data": "FunctionDef name:replace_input_with arg:self arg:old_input arg:new_input arguments arg arg arg FunctionDef name:maybe_replace_node arg:n arguments arg Return return:yes Compare Assign If Call For Call Assign Call Assign Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_in_multi_worker_mode",
    "source_code": "def _in_multi_worker_mode(self):\n    return False",
    "docstring": "Whether this strategy indicates working in multi-worker settings.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\one_device_strategy.py",
    "ast_data": "FunctionDef name:_in_multi_worker_mode arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "get_n_splits",
    "source_code": "def get_n_splits(self, X=None, y=None, groups=None):\n    return len(self.cv)",
    "docstring": "Returns the number of splitting iterations in the cross-validator. Parameters ---------- X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns ------- n_splits : int Returns the number of splitting iterations in the cross-validator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py",
    "ast_data": "FunctionDef name:get_n_splits arg:self arg:X arg:y arg:groups arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "is_monotonic_decreasing",
    "source_code": "@property\ndef is_monotonic_decreasing(self) -> bool:\n    from pandas import Index\n    return Index(self).is_monotonic_decreasing",
    "docstring": "Return True if values in the object are monotonically decreasing. Returns ------- bool See Also -------- Series.is_monotonic_increasing : Return boolean if values in the object are monotonically increasing. Examples -------- >>> s = pd.Series([3, 2, 2, 1]) >>> s.is_monotonic_decreasing True >>> s = pd.Series([1, 2, 3]) >>> s.is_monotonic_decreasing False",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\base.py",
    "ast_data": "FunctionDef name:is_monotonic_decreasing arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_dated_queryset",
    "source_code": "def get_dated_queryset(self, **lookup):\n    qs = self.get_queryset().filter(**lookup)\n    date_field = self.get_date_field()\n    allow_future = self.get_allow_future()\n    allow_empty = self.get_allow_empty()\n    paginate_by = self.get_paginate_by(qs)\n    if not allow_future:\n        now = timezone.now() if self.uses_datetime_field else timezone_today()\n        qs = qs.filter(**{'%s__lte' % date_field: now})\n    if not allow_empty:\n        is_empty = not qs if paginate_by is None else not qs.exists()\n        if is_empty:\n            raise Http404(_('No %(verbose_name_plural)s available') % {'verbose_name_plural': qs.model._meta.verbose_name_plural})\n    return qs",
    "docstring": "Get a queryset properly filtered according to and any extra lookup kwargs.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\dates.py",
    "ast_data": "FunctionDef name:get_dated_queryset arg:self arguments arg arg Assign Call Call Assign Call Assign Call Assign Call Assign Call If Assign Call Call Assign Call If Assign Compare Call If Raise Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_state_dict_type",
    "source_code": "@staticmethod\ndef get_state_dict_type(module: nn.Module) -> StateDictSettings:\n    state_dict_settings: Optional[StateDictSettings] = None\n    for submodule in FullyShardedDataParallel.fsdp_modules(module):\n        if state_dict_settings is None:\n            state_dict_settings = StateDictSettings(state_dict_type=submodule._state_dict_type, state_dict_config=submodule._state_dict_config, optim_state_dict_config=submodule._optim_state_dict_config)\n            _set_optim_use_dtensor(submodule, state_dict_settings)\n        else:\n            submodule_settings = StateDictSettings(submodule._state_dict_type, submodule._state_dict_config, submodule._optim_state_dict_config)\n            assert state_dict_settings == submodule_settings, f'All FSDP modules must have the same state dict settings.Got {submodule_settings} and {state_dict_settings}.'\n            _set_optim_use_dtensor(submodule, submodule_settings)\n    return state_dict_settings",
    "docstring": "Get the state_dict_type and the corresponding configurations for the FSDP modules rooted at `` for different FSDP submodules differ.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\fully_sharded_data_parallel.py",
    "ast_data": "FunctionDef name:get_state_dict_type arg:module arguments arg For Call If Compare Assign Call Call Assign Call Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_TileGradShape",
    "source_code": "def _TileGradShape(op: ops.Operation):\n    multiples_shape = op.inputs[1].get_shape().with_rank(1)\n    input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0])\n    multiples = tensor_util.constant_value_as_shape(op.inputs[1]).with_rank(input_shape.ndims)\n    if multiples.ndims is None:\n        return [tensor_shape.unknown_shape()]\n    else:\n        output_dims = []\n        for dim, multiple in zip(input_shape.dims, multiples.dims):\n            output_dims.append(dim // multiple)\n        return [tensor_shape.TensorShape(output_dims)]",
    "docstring": "Shape function for the TileGrad op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:_TileGradShape arg:op arguments arg Assign Call Call Assign Call Call Assign Call Call If Compare Return return:yes Call Assign For Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_view_interval",
    "source_code": "def get_view_interval(self):\n    raise NotImplementedError('Derived must override')",
    "docstring": "Return the `` view limits of this axis.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:get_view_interval arg:self arguments arg Raise Call"
  },
  {
    "library": "django",
    "name": "__getattr__",
    "source_code": "def __getattr__(self, name):\n    if (_wrapped := self._wrapped) is empty:\n        self._setup(name)\n        _wrapped = self._wrapped\n    val = getattr(_wrapped, name)\n    if name in {'MEDIA_URL', 'STATIC_URL'} and val is not None:\n        val = self._add_script_prefix(val)\n    elif name == 'SECRET_KEY' and (not val):\n        raise ImproperlyConfigured('The SECRET_KEY setting must not be empty.')\n    self.__dict__[name] = val\n    return val",
    "docstring": "Return the value of a setting and cache it in self.__dict__.",
    "type": "method",
    "file_path": "django\\django\\conf\\__init__.py",
    "ast_data": "FunctionDef name:__getattr__ arg:self arg:name arguments arg arg If Compare Call Assign Assign Call If BoolOp Compare Compare Assign Call If BoolOp Compare Raise Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "copy_to_device",
    "source_code": "@tf_export('data.experimental.copy_to_device')\ndef copy_to_device(target_device, source_device='/cpu:0'):\n\n    def _apply_fn(dataset):\n        return _CopyToDeviceDataset(dataset, target_device=target_device, source_device=source_device)\n    return _apply_fn",
    "docstring": "A transformation that copies dataset elements to the given . Args: target_device: The name of a device to which elements will be copied. source_device: The original device on which will be placed. Returns: A transformation function, which can be passed to .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\prefetching_ops.py",
    "ast_data": "FunctionDef name:copy_to_device arg:target_device arg:source_device arguments arg arg FunctionDef name:_apply_fn arg:dataset arguments arg Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "map_arg",
    "source_code": "@compatibility(is_backward_compatible=True)\ndef map_arg(a: ArgumentT, fn: Callable[[Node], Argument]) -> ArgumentT:\n    assert callable(fn), 'torch.fx.map_arg(a, fn): fn must be a callable'\n    return _fx_map_arg(a, fn)",
    "docstring": "Apply fn recursively to each Node appearing in arg. arg may be a list, tuple, slice, or dict with string keys: the return value will have the same type and structure.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\node.py",
    "ast_data": "FunctionDef name:map_arg arg:a arg:fn arguments arg arg Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "delete_file_v2",
    "source_code": "@tf_export('io.gfile.remove')\ndef delete_file_v2(path):\n    _pywrap_file_io.DeleteFile(compat.path_to_bytes(path))",
    "docstring": "Deletes the path located at 'path'. Args: path: string, a path Raises: errors.OpError: Propagates any errors reported by the FileSystem API. E.g., if the path does not exist.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py",
    "ast_data": "FunctionDef name:delete_file_v2 arg:path arguments arg Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name=None):\n    losses_utils.ReductionV2.validate(reduction)\n    self.reduction = reduction\n    self.name = name\n    self._allow_sum_over_batch_size = False\n    self._set_name_scope()",
    "docstring": "Initializes class. Args: reduction: Type of to apply to loss. Default value is . indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to . When used with , outside of built-in training loops such as and , using or will raise an error. Please see this custom training [tutorial]( for more details. name: Optional name for the instance.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:reduction arg:name arguments arg arg arg Call Assign Assign Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "from_row_limits",
    "source_code": "@classmethod\ndef from_row_limits(cls, row_limits, validate=True, dtype=None, dtype_hint=None):\n    if not isinstance(validate, bool):\n        raise TypeError('validate must have type bool')\n    with ops.name_scope(None, 'RowPartitionFromRowLimits', [row_limits]):\n        row_limits = cls._convert_row_partition(row_limits, 'row_limits', dtype_hint=dtype_hint, dtype=dtype)\n        row_limits.shape.assert_has_rank(1)\n        if validate:\n            msg = 'Arguments to from_row_limits do not form a valid RaggedTensor'\n            checks = [check_ops.assert_rank(row_limits, 1, message=msg), check_ops.assert_non_negative(row_limits[:1], message=msg), _assert_monotonic_increasing(row_limits, message=msg)]\n            row_limits = control_flow_ops.with_dependencies(checks, row_limits)\n        zero = array_ops.zeros([1], row_limits.dtype)\n        row_splits = array_ops.concat([zero, row_limits], axis=0)\n        return cls(row_splits=row_splits, internal=_row_partition_factory_key)",
    "docstring": "Creates a with rows partitioned by . Equivalent to: . Args: row_limits: A 1-D integer tensor with shape . Must be sorted in ascending order. validate: If true, then use assertions to check that the arguments form a valid . dtype: Optional dtype for the RowPartition. If missing, the type is inferred from the type of , dtype_hint, or tf.int64. dtype_hint: Optional dtype for the RowPartition, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so dtype_hint can be used as a soft preference. If the conversion to is not possible, this argument has no effect. Returns: A .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py",
    "ast_data": "FunctionDef name:from_row_limits arg:cls arg:row_limits arg:validate arg:dtype arg:dtype_hint arguments arg arg arg arg arg If Call Raise Call With Call Assign Call Call If Assign Assign Call Call Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "resolution",
    "source_code": "@property\ndef resolution(self) -> str:\n    return self._resolution_obj.attrname",
    "docstring": "Returns day, hour, minute, second, millisecond or microsecond",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py",
    "ast_data": "FunctionDef name:resolution arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "get_metadata_routing",
    "source_code": "def get_metadata_routing(self):\n    router = MetadataRouter(owner=self.__class__.__name__).add(estimator=self.estimator, method_mapping=MethodMapping().add(caller='partial_fit', callee='partial_fit').add(caller='fit', callee='fit'))\n    return router",
    "docstring": "Get metadata routing of this object. Please check :ref: on how the routing mechanism works. .. versionadded:: 1.3 Returns ------- routing : MetadataRouter A :class: encapsulating routing information.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\multioutput.py",
    "ast_data": "FunctionDef name:get_metadata_routing arg:self arguments arg Assign Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "is_empty_shard",
    "source_code": "def is_empty_shard(self) -> bool:\n    return self._storage_meta.size[0] == 0 and self._storage_meta.size[1] == 0",
    "docstring": "Returns a :class: object indicating if the local tensor on current rank is an empty tensor",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_shards_wrapper.py",
    "ast_data": "FunctionDef name:is_empty_shard arg:self arguments arg Return return:yes BoolOp Compare Compare"
  },
  {
    "library": "tensorflow",
    "name": "evaluate",
    "source_code": "def evaluate(self, tensors):\n    sess = ops.get_default_session() or self.cached_session()\n    return sess.run(tensors)",
    "docstring": "Evaluates tensors and returns numpy values. Args: tensors: A Tensor or a nested list/tuple of Tensors. Returns: tensors numpy values.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\platform\\benchmark.py",
    "ast_data": "FunctionDef name:evaluate arg:self arg:tensors arguments arg arg Assign BoolOp Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "scatter_nd_add",
    "source_code": "def scatter_nd_add(self, indices, updates, name=None):\n    raise NotImplementedError",
    "docstring": "Applies sparse addition to individual values or slices in a Variable. The Variable has rank and is a of rank . must be integer tensor, containing indices into self. It must be shape where . The innermost dimension of (with length ) corresponds to indices into elements (if ) or slices (if ) along the th dimension of self. is of rank with shape: For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 elements. In Python, that update would look like this: The resulting update to v would look like this: [1, 13, 3, 14, 14, 6, 7, 20] See for more details about how to make updates to slices. Args: indices: The indices to be used in the operation. updates: The values to be used in the operation. name: the name of the operation. Returns: The updated variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:scatter_nd_add arg:self arg:indices arg:updates arg:name arguments arg arg arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "get",
    "source_code": "def get(identifier):\n    if isinstance(identifier, dict):\n        return deserialize(identifier)\n    if isinstance(identifier, (int, float)):\n        return loss_scale_module.FixedLossScale(identifier)\n    if identifier == 'dynamic':\n        return loss_scale_module.DynamicLossScale()\n    if isinstance(identifier, loss_scale_module.LossScale):\n        return identifier\n    elif identifier is None:\n        return None\n    else:\n        raise ValueError('Could not interpret loss scale identifier: %s' % identifier)",
    "docstring": "Get a loss scale object.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\loss_scale.py",
    "ast_data": "FunctionDef name:get arg:identifier arguments arg If Call Return return:yes Call If Call Return return:yes Call If Compare Return return:yes Call If Call Return return:yes If Compare Return return:no Raise Call"
  },
  {
    "library": "pandas",
    "name": "_maybe_convert_setitem_value",
    "source_code": "def _maybe_convert_setitem_value(self, value):\n    if is_scalar(value):\n        if isna(value):\n            value = None\n        elif not isinstance(value, str):\n            raise TypeError(f\"Invalid value '{value}' for dtype 'str'. Value should be a string or missing value, got '{type(value).__name__}' instead.\")\n    else:\n        value = np.array(value, dtype=object, copy=True)\n        value[isna(value)] = None\n        for v in value:\n            if not (v is None or isinstance(v, str)):\n                raise TypeError(\"Invalid value for dtype 'str'. Value should be a string or missing value (or array of those).\")\n    return super()._maybe_convert_setitem_value(value)",
    "docstring": "Maybe convert value to be pyarrow compatible.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\string_arrow.py",
    "ast_data": "FunctionDef name:_maybe_convert_setitem_value arg:self arg:value arguments arg arg If Call If Call Assign If Call Raise Call Call Assign Call Assign Call For If BoolOp Compare Call Raise Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_raw_hex_id",
    "source_code": "def _raw_hex_id(obj) -> str:\n    packed = struct.pack('@P', id(obj))\n    return ''.join([_replacer(x) for x in packed])",
    "docstring": "Return the padded hexadecimal id of ``.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\computation\\scope.py",
    "ast_data": "FunctionDef name:_raw_hex_id arg:obj arguments arg Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "MaxExtent",
    "source_code": "class MaxExtent(_Base):\n\n    def __init__(self, artist_list, w_or_h):\n        self._artist_list = artist_list\n        _api.check_in_list(['width', 'height'], w_or_h=w_or_h)\n        self._w_or_h = w_or_h\n\n    def add_artist(self, a):\n        self._artist_list.append(a)\n\n    def get_size(self, renderer):\n        rel_size = 0.0\n        extent_list = [getattr(a.get_window_extent(renderer), self._w_or_h) / a.figure.dpi for a in self._artist_list]\n        abs_size = max(extent_list, default=0)\n        return (rel_size, abs_size)",
    "docstring": "Size whose absolute part is either the largest width or the largest height of the given *artist_list*.",
    "type": "class",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_size.py",
    "ast_data": "ClassDef name:MaxExtent FunctionDef name:__init__ arg:self arg:artist_list arg:w_or_h arguments arg arg arg Assign Call Assign FunctionDef name:add_artist arg:self arg:a arguments arg arg Call FunctionDef name:get_size arg:self arg:renderer arguments arg arg Assign Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "shape_from_pyval",
    "source_code": "def shape_from_pyval(pyval, layout: Sequence[int] | None=None):\n\n    def convert(pyval):\n        if isinstance(pyval, tuple):\n            if layout is not None:\n                raise NotImplementedError('shape_from_pyval does not support layouts for tuple shapes')\n            return Shape.tuple_shape(tuple((convert(elt) for elt in pyval)))\n        else:\n            return Shape.array_shape(pyval.dtype, np.shape(pyval), layout)\n    return convert(pyval)",
    "docstring": "Returns a Shape that describes a tuple-tree of Numpy arrays.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\xla\\python\\xla_client.py",
    "ast_data": "FunctionDef name:shape_from_pyval arg:pyval arg:layout arguments arg arg FunctionDef name:convert arg:pyval arguments arg If Call If Compare Raise Call Return return:yes Call Call Call Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "celu",
    "source_code": "def celu(input: Tensor, scale: float, zero_point: int, alpha: float=1.0) -> Tensor:\n    if not input.is_quantized:\n        raise ValueError(\"Input to 'quantized.celu' must be quantized!\")\n    return torch.ops.quantized.celu(input, scale, zero_point, alpha)",
    "docstring": "celu(input, scale, zero_point, alpha=1.) -> Tensor Applies the quantized CELU function element-wise. .. math:: \\text{CELU}(x) = \\max(0,x) + \\min(0, \\alpha * (\\exp(x / \\alpha) - 1)) Args: input: quantized input alpha: the :math: value for the CELU formulation. Default: 1.0",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\functional.py",
    "ast_data": "FunctionDef name:celu arg:input arg:scale arg:zero_point arg:alpha arguments arg arg arg arg If Raise Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "extract_lsq_problems",
    "source_code": "def extract_lsq_problems():\n    problems = {}\n    for name, problem_class in inspect.getmembers(sys.modules[__name__], inspect.isclass):\n        if name != 'LSQBenchmarkProblem' and issubclass(problem_class, LSQBenchmarkProblem) and hasattr(problem_class, 'INITIAL_GUESSES'):\n            for i, x0 in enumerate(problem_class.INITIAL_GUESSES):\n                if len(problem_class.INITIAL_GUESSES) > 1:\n                    key_name = f'{name}_{i}'\n                else:\n                    key_name = name\n                problems[key_name] = problem_class(x0)\n    return problems",
    "docstring": "Extract all least squares problems in this file for benchmarking. Returns ------- dict, str -> LSQBenchmarkProblem The key is a problem name. The value is an instance of LSQBenchmarkProblem.",
    "type": "function",
    "file_path": "scipy\\benchmarks\\benchmarks\\lsq_problems.py",
    "ast_data": "FunctionDef name:extract_lsq_problems arguments Assign For Call If BoolOp Compare Call Call For Call If Compare Call Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "in_tempdir",
    "source_code": "@contextmanager\ndef in_tempdir():\n    pwd = os.getcwd()\n    d = mkdtemp()\n    os.chdir(d)\n    yield d\n    os.chdir(pwd)\n    rmtree(d)",
    "docstring": "Create, return, and change directory to a temporary directory Examples -------- >>> import os >>> my_cwd = os.getcwd() >>> with in_tempdir() as tmpdir: ... _ = open('test.txt', 'wt').write('some text') ... assert os.path.isfile('test.txt') ... assert os.path.isfile(os.path.join(tmpdir, 'test.txt')) >>> os.path.exists(tmpdir) False >>> os.getcwd() == my_cwd True",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_tmpdirs.py",
    "ast_data": "FunctionDef name:in_tempdir arguments Assign Call Assign Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "experimental_as_proto",
    "source_code": "def experimental_as_proto(self) -> types_pb2.SerializedDType:\n    return types_pb2.SerializedDType(datatype=self._type_enum)",
    "docstring": "Returns a proto representation of the Dtype instance.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\dtypes.py",
    "ast_data": "FunctionDef name:experimental_as_proto arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "_generate_anchors",
    "source_code": "@staticmethod\ndef _generate_anchors(spatial_shapes: list[tuple[int, int]], grid_size: float=0.05, eps: float=0.01, device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None) -> tuple[Tensor, Tensor]:\n    anchors_list: list[Tensor] = []\n    for i, (h, w) in enumerate(spatial_shapes):\n        grid_y, grid_x = torch_meshgrid([torch.arange(h, device=device, dtype=dtype), torch.arange(w, device=device, dtype=dtype)], indexing='ij')\n        grid_xy = torch.stack([grid_x, grid_y], -1)\n        wh = torch.empty(2, device=device, dtype=dtype)\n        wh[0] = w\n        wh[1] = h\n        grid_xy = (grid_xy + 0.5) / wh\n        grid_wh = torch.ones_like(grid_xy) * grid_size * 2.0 ** i\n        anchors_list.append(concatenate([grid_xy, grid_wh], -1).reshape(-1, h * w, 4))\n    anchors = concatenate(anchors_list, 1)\n    valid_mask = ((anchors > eps) * (anchors < 1 - eps)).all(-1, keepdim=True)\n    anchors = torch.log(anchors / (1 - anchors))\n    inf_t = torch.empty(1, device=device, dtype=dtype)\n    inf_t[0] = float('inf')\n    anchors = torch.where(valid_mask, anchors, inf_t)\n    return (anchors, valid_mask)",
    "docstring": "Generate anchors for RT-DETR. Args: spatial_shapes: shape (width, height) of the feature maps grid_size: size of the grid eps: specify the minimum and maximum size of the anchors device: device to place the anchors dtype: data type for the anchors Returns: logit of anchors and mask",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\models\\rt_detr\\architecture\\rtdetr_head.py",
    "ast_data": "FunctionDef name:_generate_anchors arg:spatial_shapes arg:grid_size arg:eps arg:device arg:dtype arguments arg arg arg arg arg For Call Assign Call Call Call Assign Call Assign Call Assign Assign Assign Assign Call Call Call Call Assign Call Assign Call Compare Compare Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "first_layer",
    "source_code": "@classmethod\ndef first_layer(cls, nrows_source, nrows_target):\n    gather_index = _first_layer_gather_index(nrows_source, nrows_target)\n    return _LayerBroadcaster.from_gather_index(gather_index)",
    "docstring": "Create a broadcaster from a gather_index.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:first_layer arg:cls arg:nrows_source arg:nrows_target arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "new_func_strip_path",
    "source_code": "def new_func_strip_path(func_name):\n    filename, line, name = func_name\n    if filename.endswith('__init__.py'):\n        return (os.path.basename(filename[:-12]) + filename[-12:], line, name)\n    return (os.path.basename(filename), line, name)",
    "docstring": "Add `` modules' parents. This makes the profiler output more readable.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\profiler.py",
    "ast_data": "FunctionDef name:new_func_strip_path arg:func_name arguments arg Assign If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "fetch_returned_insert_rows",
    "source_code": "def fetch_returned_insert_rows(self, cursor):\n    return cursor.fetchall()",
    "docstring": "Given a cursor object that has just performed an INSERT...RETURNING statement into a table, return the tuple of returned data.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\postgresql\\operations.py",
    "ast_data": "FunctionDef name:fetch_returned_insert_rows arg:self arg:cursor arguments arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_e_step",
    "source_code": "def _e_step(self, X, cal_sstats, random_init, parallel=None):\n    random_state = self.random_state_ if random_init else None\n    n_jobs = effective_n_jobs(self.n_jobs)\n    if parallel is None:\n        parallel = Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1))\n    results = parallel((delayed(_update_doc_distribution)(X[idx_slice, :], self.exp_dirichlet_component_, self.doc_topic_prior_, self.max_doc_update_iter, self.mean_change_tol, cal_sstats, random_state) for idx_slice in gen_even_slices(X.shape[0], n_jobs)))\n    doc_topics, sstats_list = zip(*results)\n    doc_topic_distr = np.vstack(doc_topics)\n    if cal_sstats:\n        suff_stats = np.zeros(self.components_.shape, dtype=self.components_.dtype)\n        for sstats in sstats_list:\n            suff_stats += sstats\n        suff_stats *= self.exp_dirichlet_component_\n    else:\n        suff_stats = None\n    return (doc_topic_distr, suff_stats)",
    "docstring": "E-step in EM update. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Document word matrix. cal_sstats : bool Parameter that indicate whether to calculate sufficient statistics or not. Set `doc_topic_distrgammasuff_statscal_sstats == False`, it will be None.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_lda.py",
    "ast_data": "FunctionDef name:_e_step arg:self arg:X arg:cal_sstats arg:random_init arg:parallel arguments arg arg arg arg arg Assign Assign Call If Compare Assign Call Call Assign Call Call Call Call Assign Call Assign Call If Assign Call For Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "send_eager_tracebacks",
    "source_code": "def send_eager_tracebacks(destinations, origin_stack, send_source=True):\n    _send_call_tracebacks(destinations, origin_stack, is_eager_execution=True, send_source=send_source)",
    "docstring": "Send the tracebacks of an eager execution call to debug server(s). Args: destinations: gRPC destination addresses, a or a of s, e.g., \"localhost:4242\". If a , gRPC requests containing the same origin_stack: The traceback of the eager operation invocation. send_source: Whether the source files involved in the op tracebacks but outside the TensorFlow library are to be sent.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\source_remote.py",
    "ast_data": "FunctionDef name:send_eager_tracebacks arg:destinations arg:origin_stack arg:send_source arguments arg arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "_pack_hook_tp",
    "source_code": "def _pack_hook_tp(mesh: DeviceMesh, input_reshard_dim: int, x: torch.Tensor) -> Any:\n    if isinstance(x, DTensor) and all((p.is_replicate() for p in x._spec.placements)):\n        return x.redistribute(device_mesh=mesh, placements=[Shard(input_reshard_dim)])\n    elif not isinstance(x, DTensor) and isinstance(x, torch.Tensor) and (x.numel() >= mesh.size()):\n        return DTensor.from_local(x, device_mesh=mesh).redistribute(device_mesh=mesh, placements=[Shard(input_reshard_dim)]).to_local()\n    else:\n        return x",
    "docstring": "Hook function called after FWD to shard input.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\parallel\\input_reshard.py",
    "ast_data": "FunctionDef name:_pack_hook_tp arg:mesh arg:input_reshard_dim arg:x arguments arg arg arg If BoolOp Call Call Call Return return:yes Call Call If BoolOp Call Call Compare Call Call Return return:yes Call Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "register",
    "source_code": "def register(*types):\n    check(types)\n\n    def dec(f):\n        check(getfullargspec(f).args, operator.lt, ' in ' + f.__name__)\n        typemap[types] = f\n        return f\n    return dec",
    "docstring": "Decorator to register an implementation for the given types",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\decorator.py",
    "ast_data": "FunctionDef name:register arguments arg Call FunctionDef name:dec arg:f arguments arg Call Call Assign Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "prepare_wheel_srcs",
    "source_code": "def prepare_wheel_srcs(headers: list[str], srcs: list[str], dests: list[str], aot: list[str], srcs_dir: str, version: str) -> None:\n    prepare_headers(headers, os.path.join(srcs_dir, 'tensorflow/include'))\n    prepare_srcs(srcs, dests, srcs_dir)\n    prepare_aot(aot, os.path.join(srcs_dir, 'tensorflow/xla_aot_runtime_src'))\n    create_init_files(os.path.join(srcs_dir, 'tensorflow'))\n    shutil.move(os.path.join(srcs_dir, 'tensorflow/tools/pip_package/MANIFEST.in'), os.path.join(srcs_dir, 'MANIFEST.in'))\n    shutil.move(os.path.join(srcs_dir, 'tensorflow/tools/pip_package/THIRD_PARTY_NOTICES.txt'), os.path.join(srcs_dir, 'tensorflow/THIRD_PARTY_NOTICES.txt'))\n    update_xla_tsl_imports(os.path.join(srcs_dir, 'tensorflow'))\n    if dests:\n        return\n    if not is_windows():\n        rename_libtensorflow(os.path.join(srcs_dir, 'tensorflow'), version)\n    if not is_macos() and (not is_windows()):\n        patch_so(srcs_dir)",
    "docstring": "Rearrange source and header files. Args: headers: a list of paths to header files. srcs: a list of paths to the rest of files. dests: a list of paths to files with srcs files destinations. aot: a list of paths to files that should be in xla_aot directory. srcs_dir: directory to copy files to. version: tensorflow version.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\pip_package\\build_pip_package.py",
    "ast_data": "FunctionDef name:prepare_wheel_srcs arg:headers arg:srcs arg:dests arg:aot arg:srcs_dir arg:version arguments arg arg arg arg arg arg Call Call Call Call Call Call Call Call Call Call Call Call Call Call Call If Return return:no If Call Call Call If BoolOp Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "visit",
    "source_code": "def visit(unused_path, unused_parent, children):\n    for child in children:\n        _, attr = tf_decorator.unwrap(child[1])\n        api_names_v1 = ['tf.' + name for name in tf_export.get_v1_names(attr)]\n        if any((name in function_names for name in api_names_v1)):\n            for name in api_names_v1:\n                function_name_v1_to_attr[name] = attr\n        api_names_v2 = ['tf.' + name for name in tf_export.get_v2_names(attr)]\n        for name in api_names_v2:\n            function_name_v2_to_attr[name] = attr",
    "docstring": "Visitor that collects arguments for reordered functions.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\update\\generate_v2_reorders_map.py",
    "ast_data": "FunctionDef name:visit arg:unused_path arg:unused_parent arg:children arguments arg arg arg For Assign Call Assign Call If Call Compare For Assign Assign Call For Assign"
  },
  {
    "library": "tensorflow",
    "name": "_serialize_to_tensors",
    "source_code": "def _serialize_to_tensors(self):\n    raise NotImplementedError",
    "docstring": "Gathers tensors to save to the checkpoint. You should only override and if you are defining a custom resource or variable with custom ops. Otherwise, please store the state of your trackable in objects and add them to Trackable object hierarchy using (for subclasses of ) or overriding the method. For an example of a valid implementation of these two methods, please see . **Invalid implementation** In this example, can be saved and restored from checkpoints, but is incompatible with SavedModel, which tries to convert the serialize/restore functions into tf.functions. This fails because attribute assignment () is not graph-friendly. **Suggested fix** If the attribute should be saved to the checkpoint, then convert it a . **TF1 Saver Compatibility** If your Trackable needs to be comatible with , implement . **AsyncCheckpoint Support** If your Trackable implements , needs to be implemented as well to support asynchronous checkpoint. Returns: A dictionary mapping names to tensors.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\base.py",
    "ast_data": "FunctionDef name:_serialize_to_tensors arg:self arguments arg Raise"
  },
  {
    "library": "pytorch",
    "name": "RendezvousStateError",
    "source_code": "class RendezvousStateError(RendezvousError):\n    pass",
    "docstring": "Raised when the state of a rendezvous is corrupt.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\api.py",
    "ast_data": "ClassDef name:RendezvousStateError"
  },
  {
    "library": "pytorch",
    "name": "run_backward",
    "source_code": "def run_backward(self, num_runs, print_per_iter=False):\n    for _ in range(num_runs):\n        self.mean.backward(retain_graph=True)",
    "docstring": "Run the backward path of an op in many iterations",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\operator_benchmark\\benchmark_pytorch.py",
    "ast_data": "FunctionDef name:run_backward arg:self arg:num_runs arg:print_per_iter arguments arg arg arg For Call Call"
  },
  {
    "library": "tensorflow",
    "name": "byte_swap_tflite_buffer",
    "source_code": "def byte_swap_tflite_buffer(tflite_model, from_endiness, to_endiness):\n    if tflite_model is None:\n        return None\n    model = convert_bytearray_to_object(tflite_model)\n    byte_swap_tflite_model_obj(model, from_endiness, to_endiness)\n    return convert_object_to_bytearray(model)",
    "docstring": "Generates a new model byte array after byte swapping its buffers field. Args: tflite_model: TFLite flatbuffer in a byte array. from_endiness: The original endianness format of the buffers in tflite_model. to_endiness: The destined endianness format of the buffers in tflite_model. Returns: TFLite flatbuffer in a byte array, after being byte swapped to to_endiness format.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\tools\\flatbuffer_utils.py",
    "ast_data": "FunctionDef name:byte_swap_tflite_buffer arg:tflite_model arg:from_endiness arg:to_endiness arguments arg arg arg If Compare Return return:no Assign Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "point_count",
    "source_code": "@property\ndef point_count(self):\n    return capi.get_point_count(self.ptr)",
    "docstring": "Return the number of Points in this Geometry.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:point_count arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "handler",
    "source_code": "def handler(self, *args, **kwargs):\n\n    @expose\n    def handle_func(*a, **kw):\n        handled = self.callable(*args, **self._merged_args(kwargs))\n        if not handled:\n            raise cherrypy.NotFound()\n        return cherrypy.serving.response.body\n    return handle_func",
    "docstring": "Use this tool as a CherryPy page handler. For example:: class Root: nav = tools.staticdir.handler(section=\"/nav\", dir=\"nav\", root=absDir)",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cptools.py",
    "ast_data": "FunctionDef name:handler arg:self arguments arg arg arg FunctionDef name:handle_func arguments arg arg Assign Call Call If Raise Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "number_of_partitions",
    "source_code": "@property\ndef number_of_partitions(self):\n    return self._number_of_partitions",
    "docstring": "Returns the number of partitions of the policy or None if unspecified.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_sharding.py",
    "ast_data": "FunctionDef name:number_of_partitions arg:self arguments arg Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "__str__",
    "source_code": "def __str__(self):\n    return str(self.uuid4)",
    "docstring": "Return UUID4 and keep it for future calls.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cprequest.py",
    "ast_data": "FunctionDef name:__str__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_nodes",
    "source_code": "def get_nodes(self) -> Sequence[BaseSchedulerNode]:\n    return list(itertools.chain.from_iterable((x.get_nodes() for x in self.snodes)))",
    "docstring": "Returns all nodes contained in this kernel, unpacking fused nodes into their constituent scheduler nodes.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:get_nodes arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__len__",
    "source_code": "def __len__(self):\n    if self._dims is None:\n        raise ValueError('Cannot take the length of shape with unknown rank.')\n    return len(self._dims)",
    "docstring": "Returns the rank of this shape, or raises ValueError if unspecified.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:__len__ arg:self arguments arg If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "cryptography",
    "name": "private_numbers",
    "source_code": "@abc.abstractmethod\ndef private_numbers(self) -> DSAPrivateNumbers:\n    pass",
    "docstring": "Returns a DSAPrivateNumbers.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\dsa.py",
    "ast_data": "FunctionDef name:private_numbers arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "_add_variable_with_custom_getter",
    "source_code": "def _add_variable_with_custom_getter(self, name, shape=None, dtype=dtypes.float32, initializer=None, getter=None, overwrite=False, **kwargs_for_getter):\n    self._maybe_initialize_trackable()\n    with ops.init_scope():\n        if context.executing_eagerly():\n            checkpoint_initializer = self._preload_simple_restoration(name=name)\n        else:\n            checkpoint_initializer = None\n        if checkpoint_initializer is not None and (not (isinstance(initializer, CheckpointInitialValueCallable) and initializer.restore_uid > checkpoint_initializer.restore_uid)):\n            initializer = checkpoint_initializer\n    new_variable = getter(name=name, shape=shape, dtype=dtype, initializer=initializer, **kwargs_for_getter)\n    if not overwrite or isinstance(new_variable, Trackable):\n        return self._track_trackable(new_variable, name=name, overwrite=overwrite)\n    else:\n        return new_variable",
    "docstring": "Restore-on-create for a variable be saved with this . If the user has requested that this object or another which depends on this object be restored from a checkpoint (deferred loading before variable object creation), may be ignored and the value from the checkpoint used instead. Args: name: A name for the variable. Must be unique within this object. shape: The shape of the variable. dtype: The data type of the variable. initializer: The initializer to use. Ignored if there is a deferred restoration stored in the Trackable. getter: The getter to wrap which actually fetches the variable. overwrite: If True, disables unique name and type checks. **kwargs_for_getter: Passed to the getter. Returns: The new variable object. Raises: ValueError: If the variable name is not unique.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\base.py",
    "ast_data": "FunctionDef name:_add_variable_with_custom_getter arg:self arg:name arg:shape arg:dtype arg:initializer arg:getter arg:overwrite arguments arg arg arg arg arg arg arg arg Call With Call If Call Assign Call Assign If BoolOp Compare BoolOp Call Compare Assign Assign Call If BoolOp Call Return return:yes Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_oas",
    "source_code": "def _oas(X, *, assume_centered=False):\n    if len(X.shape) == 2 and X.shape[1] == 1:\n        if not assume_centered:\n            X = X - X.mean()\n        return (np.atleast_2d((X ** 2).mean()), 0.0)\n    n_samples, n_features = X.shape\n    emp_cov = empirical_covariance(X, assume_centered=assume_centered)\n    alpha = np.mean(emp_cov ** 2)\n    mu = np.trace(emp_cov) / n_features\n    mu_squared = mu ** 2\n    num = alpha + mu_squared\n    den = (n_samples + 1) * (alpha - mu_squared / n_features)\n    shrinkage = 1.0 if den == 0 else min(num / den, 1.0)\n    shrunk_cov = (1.0 - shrinkage) * emp_cov\n    shrunk_cov.flat[::n_features + 1] += shrinkage * mu\n    return (shrunk_cov, shrinkage)",
    "docstring": "Estimate covariance with the Oracle Approximating Shrinkage algorithm. The formulation is based on [1]_. [1] \"Shrinkage algorithms for MMSE covariance estimation.\", Chen, Y., Wiesel, A., Eldar, Y. C., & Hero, A. O. IEEE Transactions on Signal Processing, 58(10), 5016-5029, 2010.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\covariance\\_shrunk_covariance.py",
    "ast_data": "FunctionDef name:_oas arg:X arguments arg arg If BoolOp Compare Call Compare If Assign Call Return return:yes Call Call Assign Assign Call Assign Call Assign Call Assign Assign Assign Assign Compare Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, config, start=True):\n    if config.dispatcher_address is None:\n        raise ValueError('Must specify a `dispatcher_address` in the `config` passed to `WorkerServer`.')\n    if isinstance(config, service_config_pb2.WorkerConfig):\n        config_proto = config\n    else:\n        config_proto = service_config_pb2.WorkerConfig(dispatcher_address=config.dispatcher_address, worker_address=config.worker_address, port=config.port, protocol=config.protocol, heartbeat_interval_ms=config.heartbeat_interval_ms, dispatcher_timeout_ms=config.dispatcher_timeout_ms, data_transfer_protocol=config.data_transfer_protocol, data_transfer_address=config.data_transfer_address)\n    self._server = _pywrap_server_lib.TF_DATA_NewWorkerServer(config_proto.SerializeToString())\n    if start:\n        self._server.start()",
    "docstring": "Creates a new worker server. Args: config: A configuration. start: (Optional.) Boolean, indicating whether to start the server after creating it. Defaults to True.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\service\\server_lib.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:config arg:start arguments arg arg arg If Compare Raise Call If Call Assign Assign Call Assign Call Call If Call"
  },
  {
    "library": "django",
    "name": "make_context",
    "source_code": "def make_context(context, request=None, **kwargs):\n    if context is not None and (not isinstance(context, dict)):\n        raise TypeError('context must be a dict rather than %s.' % context.__class__.__name__)\n    if request is None:\n        context = Context(context, **kwargs)\n    else:\n        original_context = context\n        context = RequestContext(request, **kwargs)\n        if original_context:\n            context.push(original_context)\n    return context",
    "docstring": "Create a suitable Context from a plain dict and optionally an HttpRequest.",
    "type": "function",
    "file_path": "django\\django\\template\\context.py",
    "ast_data": "FunctionDef name:make_context arg:context arg:request arguments arg arg arg If BoolOp Compare Call Raise Call If Compare Assign Call Assign Assign Call If Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_remove_empty_lines",
    "source_code": "def _remove_empty_lines(self, lines: list[list[T]]) -> list[list[T]]:\n    ret = [line for line in lines if len(line) > 1 or (len(line) == 1 and (not isinstance(line[0], str) or line[0].strip()))]\n    return ret",
    "docstring": "Iterate through the lines and remove any that are either empty or contain only one whitespace value Parameters ---------- lines : list of list of Scalars The array of lines that we are to filter. Returns ------- filtered_lines : list of list of Scalars The same array of lines with the \"empty\" ones removed.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\parsers\\python_parser.py",
    "ast_data": "FunctionDef name:_remove_empty_lines arg:self arg:lines arguments arg arg Assign BoolOp Compare Call BoolOp Compare Call BoolOp Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_device_spec",
    "source_code": "def is_device_spec(obj):\n    return isinstance(obj, device_spec.DeviceSpecV2)",
    "docstring": "Abstract away the fact that DeviceSpecV2 is the base class.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\device.py",
    "ast_data": "FunctionDef name:is_device_spec arg:obj arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_check_full_tensor_value",
    "source_code": "def _check_full_tensor_value(self, tensor_value, wall_time, op_type, output_slot, execution_index=None, graph_execution_trace_index=None):\n    size = np.size(tensor_value)\n    if not size or not np.issubdtype(tensor_value.dtype, np.floating):\n        return\n    is_inf = np.isinf(tensor_value)\n    num_neg_inf = np.count_nonzero(np.logical_and(is_inf, np.less(tensor_value, 0.0)))\n    num_pos_inf = np.count_nonzero(np.logical_and(is_inf, np.greater(tensor_value, 0.0)))\n    num_nan = np.count_nonzero(np.isnan(tensor_value))\n    if num_neg_inf or num_pos_inf or num_nan:\n        self._alerts.append(InfNanAlert(wall_time, op_type, output_slot, size=size, num_neg_inf=num_neg_inf, num_pos_inf=num_pos_inf, num_nan=num_nan, execution_index=execution_index, graph_execution_trace_index=graph_execution_trace_index))",
    "docstring": "Check a full tensor value. Appends to the list of alerts if any inf or nan is found in the full tensor value. Args: tensor_value: The full tensor value as a . wall_time: Wall timestamp for the execution event that generated the tensor value. op_type: Op type executed. output_slot: The output slot of the op. execution_index: Index to the top-level execution event. graph_execution_trace_index: Index to the intra-graph execution trace (if applicable.)",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_monitors.py",
    "ast_data": "FunctionDef name:_check_full_tensor_value arg:self arg:tensor_value arg:wall_time arg:op_type arg:output_slot arg:execution_index arg:graph_execution_trace_index arguments arg arg arg arg arg arg arg Assign Call If BoolOp Call Return return:no Assign Call Assign Call Call Call Assign Call Call Call Assign Call Call If BoolOp Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "ConvergenceWarning",
    "source_code": "class ConvergenceWarning(UserWarning):\n    pass",
    "docstring": "Custom warning to capture convergence problems .. versionchanged:: 0.18 Moved from sklearn.utils.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\exceptions.py",
    "ast_data": "ClassDef name:ConvergenceWarning"
  },
  {
    "library": "tensorflow",
    "name": "FlatbufferToDict",
    "source_code": "def FlatbufferToDict(fb, preserve_as_numpy):\n    if isinstance(fb, int) or isinstance(fb, float) or isinstance(fb, str):\n        return fb\n    elif hasattr(fb, '__dict__'):\n        result = {}\n        for attribute_name in dir(fb):\n            attribute = fb.__getattribute__(attribute_name)\n            if not callable(attribute) and attribute_name[0] != '_':\n                snake_name = CamelCaseToSnakeCase(attribute_name)\n                preserve = True if attribute_name == 'buffers' else preserve_as_numpy\n                result[snake_name] = FlatbufferToDict(attribute, preserve)\n        return result\n    elif isinstance(fb, np.ndarray):\n        return fb if preserve_as_numpy else fb.tolist()\n    elif hasattr(fb, '__len__'):\n        return [FlatbufferToDict(entry, preserve_as_numpy) for entry in fb]\n    else:\n        return fb",
    "docstring": "Converts a hierarchy of FB objects into a nested dict. We avoid transforming big parts of the flat buffer into python arrays. This speeds conversion from ten minutes to a few seconds on big graphs. Args: fb: a flat buffer structure. (i.e. ModelT) preserve_as_numpy: true if all downstream np.arrays should be preserved. false if all downstream np.array should become python arrays Returns: A dictionary representing the flatbuffer rather than a flatbuffer object.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\tools\\visualize.py",
    "ast_data": "FunctionDef name:FlatbufferToDict arg:fb arg:preserve_as_numpy arguments arg arg If BoolOp Call Call Call Return return:yes If Call Assign For Call Assign Call If BoolOp Call Compare Assign Call Assign Compare Assign Call Return return:yes If Call Return return:yes Call If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__call__",
    "source_code": "def __call__(self, path, parent, children):\n    if tf_inspect.ismodule(parent) and len(path.split('.')) > 10:\n        raise RuntimeError('Modules nested too deep:\\n%s.%s\\n\\nThis is likely a problem with an accidental public import.' % (self._root_name, path))\n    full_path = '.'.join([self._root_name, path]) if path else self._root_name\n    for name, child in list(children):\n        if self._is_private(full_path, name, child):\n            children.remove((name, child))\n    self._visitor(path, parent, children)\n    for name, child in list(children):\n        if self._do_not_descend(full_path, name):\n            children.remove((name, child))",
    "docstring": "Visitor interface, see for details.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\tools\\common\\public_api.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:path arg:parent arg:children arguments arg arg arg arg If BoolOp Call Compare Call Call Raise Call Assign Call For Call If Call Call Call For Call If Call Call"
  },
  {
    "library": "kornia",
    "name": "AdjustSigmoid",
    "source_code": "class AdjustSigmoid(Module):\n\n    def __init__(self, cutoff: float=0.5, gain: float=10, inv: bool=False) -> None:\n        super().__init__()\n        self.cutoff: float = cutoff\n        self.gain: float = gain\n        self.inv: bool = inv\n\n    def forward(self, image: Tensor) -> Tensor:\n        return adjust_sigmoid(image, cutoff=self.cutoff, gain=self.gain, inv=self.inv)",
    "docstring": "Adjust the contrast of an image tensor or performs sigmoid correction on the input image tensor. The input image is expected to be in the range of [0, 1]. Reference: [1]: Gustav J. Braun, \"Image Lightness Rescaling Using Sigmoidal Contrast Enhancement Functions\", Args: image: Image to be adjusted in the shape of :math:. cutoff: The cutoff of sigmoid function. gain: The multiplier of sigmoid function. inv: If is set to True the function will return the negative sigmoid correction. Example: >>> x = torch.ones(1, 1, 2, 2) >>> AdjustSigmoid(gain=0)(x) tensor([[[[0.5000, 0.5000], [0.5000, 0.5000]]]])",
    "type": "class",
    "file_path": "kornia\\kornia\\enhance\\adjust.py",
    "ast_data": "ClassDef name:AdjustSigmoid FunctionDef name:__init__ arg:self arg:cutoff arg:gain arg:inv arguments arg arg arg arg Call Call FunctionDef name:forward arg:self arg:image arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_prepare",
    "source_code": "def _prepare(self, *args, **kwargs):\n    for config in self.groups:\n        module = config['module']\n        tensor_name = config['tensor_name']\n        parametrization = config.get('parametrization', FakeSparsity)\n        mask = config.get('mask', torch.ones_like(getattr(module, tensor_name)))\n        self.state[config['tensor_fqn']]['mask'] = mask\n        parametrize.register_parametrization(module, tensor_name, parametrization(mask))",
    "docstring": "Adds mask parametrization to the layer weight",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\pruning\\sparsifier\\base_sparsifier.py",
    "ast_data": "FunctionDef name:_prepare arg:self arguments arg arg arg For Assign Assign Assign Call Assign Call Call Call Assign Call Call"
  },
  {
    "library": "django",
    "name": "extent",
    "source_code": "@property\ndef extent(self):\n    xval = self.origin.x + self.scale.x * self.width\n    yval = self.origin.y + self.scale.y * self.height\n    xmin = min(xval, self.origin.x)\n    xmax = max(xval, self.origin.x)\n    ymin = min(yval, self.origin.y)\n    ymax = max(yval, self.origin.y)\n    return (xmin, ymin, xmax, ymax)",
    "docstring": "Return the extent as a 4-tuple (xmin, ymin, xmax, ymax).",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\raster\\source.py",
    "ast_data": "FunctionDef name:extent arg:self arguments arg Assign Assign Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "MatReadError",
    "source_code": "class MatReadError(Exception):\n    pass",
    "docstring": "Exception indicating a read issue.",
    "type": "class",
    "file_path": "scipy\\scipy\\io\\matlab\\_miobase.py",
    "ast_data": "ClassDef name:MatReadError"
  },
  {
    "library": "pytorch",
    "name": "forward",
    "source_code": "def forward(self, x: torch.Tensor) -> torch.Tensor:\n    weight_quant_dequant = self.get_weight()\n    result = F.conv3d(x, weight_quant_dequant, self.bias, self.stride, self.padding, self.dilation, self.groups)\n    return result",
    "docstring": "we have: w(float) -- quant - dequant x(float) ------------- F.conv3d --- In the full model, we will see w(float) -- quant - *dequant x -- quant --- *dequant -- *F.conv3d --- *quant - dequant and the backend should be able to fuse the ops with into a quantized conv3d",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\reference\\modules\\conv.py",
    "ast_data": "FunctionDef name:forward arg:self arg:x arguments arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "to_numpy",
    "source_code": "def to_numpy(self, dtype: npt.DTypeLike | None=None, copy: bool=False, na_value: object=lib.no_default, **kwargs) -> np.ndarray:\n    if isinstance(self.dtype, ExtensionDtype):\n        return self.array.to_numpy(dtype, copy=copy, na_value=na_value, **kwargs)\n    elif kwargs:\n        bad_keys = next(iter(kwargs.keys()))\n        raise TypeError(f\"to_numpy() got an unexpected keyword argument '{bad_keys}'\")\n    fillna = na_value is not lib.no_default and (not (na_value is np.nan and np.issubdtype(self.dtype, np.floating)))\n    values = self._values\n    if fillna and self.hasnans:\n        if not can_hold_element(values, na_value):\n            values = np.asarray(values, dtype=dtype)\n        else:\n            values = values.copy()\n        values[np.asanyarray(isna(self))] = na_value\n    result = np.asarray(values, dtype=dtype)\n    if copy and (not fillna) or not copy:\n        if np.shares_memory(self._values[:2], result[:2]):\n            if not copy:\n                result = result.view()\n                result.flags.writeable = False\n            else:\n                result = result.copy()\n    return result",
    "docstring": "A NumPy ndarray representing the values in this Series or Index. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to pass to :meth:. copy : bool, default False Whether to ensure that the returned value is not a view on another array. Note that `dtypeselfselfSeries.arraydtypeTimestamp` to return an ndarray of native datetime64 values. The values are converted to UTC and the timezone info is dropped. >>> ser.to_numpy(dtype=\"datetime64[ns]\") ... # doctest: +ELLIPSIS array(['1999-12-31T23:00:00.000000000', '2000-01-01T23:00:00...'], dtype='datetime64[ns]')",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\base.py",
    "ast_data": "FunctionDef name:to_numpy arg:self arg:dtype arg:copy arg:na_value arguments arg arg arg arg arg If Call Return return:yes Call If Assign Call Call Call Raise Call Assign BoolOp Compare BoolOp Compare Call Assign If BoolOp If Call Assign Call Assign Call Assign Call Call Assign Call If BoolOp BoolOp If Call If Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_pomeranz_compute_j1j2",
    "source_code": "def _pomeranz_compute_j1j2(i, n, ll, ceilf, roundf):\n    if i == 0:\n        j1, j2 = (-ll - ceilf - 1, ll + ceilf - 1)\n    else:\n        ip1div2, ip1mod2 = divmod(i + 1, 2)\n        if ip1mod2 == 0:\n            if ip1div2 == n + 1:\n                j1, j2 = (n - ll - ceilf - 1, n + ll + ceilf - 1)\n            else:\n                j1, j2 = (ip1div2 - 1 - ll - roundf - 1, ip1div2 + ll - 1 + ceilf - 1)\n        else:\n            j1, j2 = (ip1div2 - 1 - ll - 1, ip1div2 + ll + roundf - 1)\n    return (max(j1 + 2, 0), min(j2, n))",
    "docstring": "Compute the endpoints of the interval for row i.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_ksstats.py",
    "ast_data": "FunctionDef name:_pomeranz_compute_j1j2 arg:i arg:n arg:ll arg:ceilf arg:roundf arguments arg arg arg arg arg If Compare Assign Assign Call If Compare If Compare Assign Assign Assign Return return:yes Call Call"
  },
  {
    "library": "cherrypy",
    "name": "readlines",
    "source_code": "def readlines(self, sizehint=0):\n    total = 0\n    lines = []\n    line = self.readline()\n    while line:\n        lines.append(line)\n        total += len(line)\n        if 0 < sizehint <= total:\n            break\n        line = self.readline()\n    return lines",
    "docstring": "Read a list of lines from file, counting bytes.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\cpstats.py",
    "ast_data": "FunctionDef name:readlines arg:self arg:sizehint arguments arg arg Assign Assign Assign Call While Call Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_tick_params",
    "source_code": "def set_tick_params(self, which='major', reset=False, **kwargs):\n    _api.check_in_list(['major', 'minor', 'both'], which=which)\n    kwtrans = self._translate_tick_params(kwargs)\n    if reset:\n        if which in ['major', 'both']:\n            self._reset_major_tick_kw()\n            self._major_tick_kw.update(kwtrans)\n        if which in ['minor', 'both']:\n            self._reset_minor_tick_kw()\n            self._minor_tick_kw.update(kwtrans)\n        self.reset_ticks()\n    else:\n        if which in ['major', 'both']:\n            self._major_tick_kw.update(kwtrans)\n            for tick in self.majorTicks:\n                tick._apply_params(**kwtrans)\n        if which in ['minor', 'both']:\n            self._minor_tick_kw.update(kwtrans)\n            for tick in self.minorTicks:\n                tick._apply_params(**kwtrans)\n        if 'label1On' in kwtrans or 'label2On' in kwtrans:\n            self.offsetText.set_visible(self._major_tick_kw.get('label1On', False) or self._major_tick_kw.get('label2On', False))\n        if 'labelcolor' in kwtrans:\n            self.offsetText.set_color(kwtrans['labelcolor'])\n    self.stale = True",
    "docstring": "Set appearance parameters for ticks, ticklabels, and gridlines. For documentation of keyword arguments, see :meth:. See Also -------- .Axis.get_tick_params View the current style settings for ticks, ticklabels, and gridlines.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:set_tick_params arg:self arg:which arg:reset arguments arg arg arg arg Call Assign Call If If Compare Call Call If Compare Call Call Call If Compare Call For Call If Compare Call For Call If BoolOp Compare Compare Call BoolOp Call Call If Compare Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "get_ops_and_kernels",
    "source_code": "def get_ops_and_kernels(proto_fileformat, proto_files, default_ops_str):\n    ops = set()\n    for proto_file in proto_files:\n        tf_logging.info('Loading proto file %s', proto_file)\n        if proto_fileformat == 'ops_list':\n            ops = ops.union(_get_ops_from_ops_list(proto_file))\n            continue\n        file_data = gfile.GFile(proto_file, 'rb').read()\n        if proto_fileformat == 'rawproto':\n            graph_def = graph_pb2.GraphDef.FromString(file_data)\n        else:\n            assert proto_fileformat == 'textproto'\n            graph_def = text_format.Parse(file_data, graph_pb2.GraphDef())\n        ops = ops.union(_get_ops_from_graphdef(graph_def))\n    if default_ops_str and default_ops_str != 'all':\n        for s in default_ops_str.split(','):\n            op, kernel = s.split(':')\n            op_and_kernel = (op, kernel)\n            if op_and_kernel not in ops:\n                ops.add(op_and_kernel)\n    return sorted(ops)",
    "docstring": "Gets the ops and kernels needed from the model files.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\selective_registration_header_lib.py",
    "ast_data": "FunctionDef name:get_ops_and_kernels arg:proto_fileformat arg:proto_files arg:default_ops_str arguments arg arg arg Assign Call For Call If Compare Assign Call Call Assign Call Call If Compare Assign Call Compare Assign Call Call Assign Call Call If BoolOp Compare For Call Assign Call Assign If Compare Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_ExitGrad",
    "source_code": "@ops.RegisterGradient('Exit')\ndef _ExitGrad(op, grad):\n    graph = ops.get_default_graph()\n    op_ctxt = op._get_control_flow_context()\n    grad_ctxt = graph._get_control_flow_context()\n    if not grad_ctxt.back_prop:\n        return None\n    if op_ctxt.grad_state:\n        raise TypeError('Second-order gradient for while loops not supported.')\n    if isinstance(grad, tensor.Tensor):\n        grad_ctxt.AddName(grad.name)\n    else:\n        if not isinstance(grad, (indexed_slices.IndexedSlices, sparse_tensor.SparseTensor)):\n            raise TypeError(f'Type {type(grad)} not supported, must be either`indexed_slices.IndexedSlices` or `SparseTensor`.')\n        grad_ctxt.AddName(grad.values.name)\n        grad_ctxt.AddName(grad.indices.name)\n        dense_shape = grad.dense_shape\n        if dense_shape is not None:\n            grad_ctxt.AddName(dense_shape.name)\n    grad_ctxt.Enter()\n    result = control_flow_ops._Enter(grad, grad_ctxt.name, is_constant=False, parallel_iterations=grad_ctxt.parallel_iterations, name='b_exit')\n    grad_ctxt.loop_enters.append(result)\n    grad_ctxt.Exit()\n    return result",
    "docstring": "Gradients for an exit op are calculated using an Enter op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_grad.py",
    "ast_data": "FunctionDef name:_ExitGrad arg:op arg:grad arguments arg arg Assign Call Assign Call Assign Call If Return return:no If Raise Call If Call Call If Call Raise Call Call Call Call Assign If Compare Call Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "shutdown",
    "source_code": "@_require_initialized\ndef shutdown(graceful=True, timeout=DEFAULT_SHUTDOWN_TIMEOUT):\n    if graceful:\n        try:\n            agent = _get_current_rpc_agent()\n            if not isinstance(agent, TensorPipeAgent) or agent.is_static_group:\n                _wait_all_workers(timeout)\n                _delete_all_user_and_unforked_owner_rrefs()\n                agent.join(shutdown=True, timeout=timeout)\n            else:\n                my_worker_info = agent.get_worker_info()\n                my_name = my_worker_info.name\n                with _group_membership_management(agent.store, my_name, False):\n                    all_worker_infos = agent.get_worker_infos()\n                    for worker in all_worker_infos:\n                        if worker.name != my_name:\n                            rpc_sync(worker.name, _update_group_membership, args=(my_worker_info, [], {}, False))\n                    agent.join(shutdown=True, timeout=timeout)\n        finally:\n            _finalize_shutdown()\n    else:\n        _finalize_shutdown()",
    "docstring": "Perform a shutdown of the RPC agent, and then destroy the RPC agent. This stops the local agent from accepting outstanding requests, and shuts down the RPC framework by terminating all RPC threads. If `~torch.futures.Future~torch.distributed.rpc.rpc_async~torch.distributed.init_process_group` API for more details. For example, export MASTER_ADDR=localhost export MASTER_PORT=5678 Then run the following code in two different processes: >>> # xdoctest: +SKIP >>> # On worker 0: >>> import torch >>> import torch.distributed.rpc as rpc >>> rpc.init_rpc(\"worker0\", rank=0, world_size=2) >>> # do some work >>> result = rpc.rpc_sync(\"worker1\", torch.add, args=(torch.ones(1), 1)) >>> # ready to shutdown >>> rpc.shutdown() >>> # On worker 1: >>> import torch.distributed.rpc as rpc >>> rpc.init_rpc(\"worker1\", rank=1, world_size=2) >>> # wait for worker 0 to finish work, and then shutdown. >>> rpc.shutdown()",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\rpc\\api.py",
    "ast_data": "FunctionDef name:shutdown arg:graceful arg:timeout arguments arg arg If Try Assign Call If BoolOp Call Call Call Call Assign Call Assign With Call Assign Call For If Compare Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "range_dimension_tensor",
    "source_code": "def range_dimension_tensor(self, name='range_dimension_tensor'):\n    with self._name_scope(name):\n        return self._range_dimension_tensor()",
    "docstring": "Dimension (in the sense of vector spaces) of the range of this operator. Determined at runtime. If this operator acts like the batch matrix with , then this returns . Args: name: A name for this . Returns:",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "FunctionDef name:range_dimension_tensor arg:self arg:name arguments arg arg With Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "filter_empty_gradients",
    "source_code": "def filter_empty_gradients(grads_and_vars):\n    grads_and_vars = tuple(grads_and_vars)\n    if not grads_and_vars:\n        return grads_and_vars\n    filtered = []\n    vars_with_empty_grads = []\n    for grad, var in grads_and_vars:\n        if grad is None:\n            vars_with_empty_grads.append(var)\n        else:\n            filtered.append((grad, var))\n    filtered = tuple(filtered)\n    if not filtered:\n        raise ValueError('No gradients provided for any variable: %s.' % ([v.name for _, v in grads_and_vars],))\n    if vars_with_empty_grads:\n        logging.warning('Gradients do not exist for variables %s when minimizing the loss.', [v.name for v in vars_with_empty_grads])\n    return filtered",
    "docstring": "Filter out pairs that have a gradient equal to .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\utils.py",
    "ast_data": "FunctionDef name:filter_empty_gradients arg:grads_and_vars arguments arg Assign Call If Return return:yes Assign Assign For If Compare Call Call Assign Call If Raise Call If Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "run",
    "source_code": "def run(self, grid_x: int, grid_y: int, grid_z: int, stream: int, *args: Unpack[tuple[object, ...]]) -> None:\n    from torch._C import _StaticCudaLauncher\n    assert self.function is not None\n    if self.has_global_scratch:\n        arg_tys = self.arg_tys + 'O'\n        args = (*args, None)\n    else:\n        arg_tys = self.arg_tys\n    assert len(args) == len(arg_tys)\n    _StaticCudaLauncher._launch_kernel(self.function, grid_x, grid_y, grid_z, self.num_warps, self.shared, arg_tys, args, stream)",
    "docstring": "Actually run the kernel at runtime. This function is the hot codepath.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\static_cuda_launcher.py",
    "ast_data": "FunctionDef name:run arg:self arg:grid_x arg:grid_y arg:grid_z arg:stream arguments arg arg arg arg arg arg Compare If Assign Assign Assign Compare Call Call Call"
  },
  {
    "library": "pandas",
    "name": "agg_series",
    "source_code": "@final\ndef agg_series(self, obj: Series, func: Callable, preserve_dtype: bool=False) -> ArrayLike:\n    if not isinstance(obj._values, np.ndarray):\n        preserve_dtype = True\n    result = self._aggregate_series_pure_python(obj, func)\n    npvalues = lib.maybe_convert_objects(result, try_float=False)\n    if preserve_dtype:\n        out = maybe_cast_pointwise_result(npvalues, obj.dtype, numeric_only=True)\n    else:\n        out = npvalues\n    return out",
    "docstring": "Parameters ---------- obj : Series func : function taking a Series and returning a scalar-like preserve_dtype : bool Whether the aggregation is known to be dtype-preserving. Returns ------- np.ndarray or ExtensionArray",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\groupby\\ops.py",
    "ast_data": "FunctionDef name:agg_series arg:self arg:obj arg:func arg:preserve_dtype arguments arg arg arg arg If Call Assign Assign Call Assign Call If Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "assign_device",
    "source_code": "@classmethod\ndef assign_device(cls, core):\n    return Sharding(proto=xla_data_pb2.OpSharding(type=xla_data_pb2.OpSharding.MAXIMAL, tile_assignment_dimensions=[1], tile_assignment_devices=[core]))",
    "docstring": "Returns an AssignDevice sharding attribute. This causes an op to be computed in its entirety only on one core in the XLA device. Args: core: The core to assign this Op to.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\xla\\experimental\\xla_sharding.py",
    "ast_data": "FunctionDef name:assign_device arg:cls arg:core arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "build_preprocessor",
    "source_code": "def build_preprocessor(self):\n    if self.preprocessor is not None:\n        return self.preprocessor\n    if not self.strip_accents:\n        strip_accents = None\n    elif callable(self.strip_accents):\n        strip_accents = self.strip_accents\n    elif self.strip_accents == 'ascii':\n        strip_accents = strip_accents_ascii\n    elif self.strip_accents == 'unicode':\n        strip_accents = strip_accents_unicode\n    else:\n        raise ValueError('Invalid value for \"strip_accents\": %s' % self.strip_accents)\n    return partial(_preprocess, accent_function=strip_accents, lower=self.lowercase)",
    "docstring": "Return a function to preprocess the text before tokenization. Returns ------- preprocessor: callable A function to preprocess the text before tokenization.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_extraction\\text.py",
    "ast_data": "FunctionDef name:build_preprocessor arg:self arguments arg If Compare Return return:yes If Assign If Call Assign If Compare Assign If Compare Assign Raise Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, rad=0.0):\n    self.rad = rad",
    "docstring": "Parameters ---------- rad : float Curvature of the curve.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:rad arguments arg arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "object_id",
    "source_code": "@property\ndef object_id(self) -> int:\n    return self._object_id",
    "docstring": "Returns the object identifier of this tensor (integer).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py",
    "ast_data": "FunctionDef name:object_id arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "op_signature_def",
    "source_code": "def op_signature_def(op, key):\n    return build_signature_def(outputs={key: utils.build_tensor_info_from_op(op)})",
    "docstring": "Creates a signature def with the output pointing to an op. Note that op isn't strictly enforced to be an Op object, and may be a Tensor. It is recommended to use the build_signature_def() function for Tensors. Args: op: An Op (or possibly Tensor). key: Key to graph element in the SignatureDef outputs. Returns: A SignatureDef with a single output pointing to the op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\signature_def_utils_impl.py",
    "ast_data": "FunctionDef name:op_signature_def arg:op arg:key arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "ismethod",
    "source_code": "def ismethod(object):\n    return _inspect.ismethod(tf_decorator.unwrap(object)[1])",
    "docstring": "TFDecorator-aware replacement for inspect.ismethod.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\tf_inspect.py",
    "ast_data": "FunctionDef name:ismethod arg:object arguments arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "future_version_msg",
    "source_code": "def future_version_msg(version: str | None) -> str:\n    if version is None:\n        return 'In a future version of pandas'\n    else:\n        return f'Starting with pandas version {version}'",
    "docstring": "Specify which version of pandas the deprecation will take place in.",
    "type": "function",
    "file_path": "pandas\\pandas\\util\\_decorators.py",
    "ast_data": "FunctionDef name:future_version_msg arg:version arguments arg If Compare Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_params",
    "source_code": "def set_params(self, numticks=None, symthresh=None, base=None, subs=None):\n    if numticks is not None:\n        self.numticks = numticks\n    if symthresh is not None:\n        self.symthresh = symthresh\n    if base is not None:\n        self.base = base\n    if subs is not None:\n        self.subs = subs if len(subs) > 0 else None",
    "docstring": "Set parameters within this locator.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:set_params arg:self arg:numticks arg:symthresh arg:base arg:subs arguments arg arg arg arg arg If Compare Assign If Compare Assign If Compare Assign If Compare Assign Compare Call"
  },
  {
    "library": "numpy",
    "name": "_gridnd",
    "source_code": "def _gridnd(val_f, c, *args):\n    for xi in args:\n        c = val_f(xi, c)\n    return c",
    "docstring": "Helper function used to implement the `` functions for more detail",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\polyutils.py",
    "ast_data": "FunctionDef name:_gridnd arg:val_f arg:c arguments arg arg arg For Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_find_shape_dtype",
    "source_code": "def _find_shape_dtype(fields: Mapping[str, _FieldValue], nrows: Optional[tensor.Tensor], row_partitions: Optional[Sequence[RowPartition]]) -> dtypes.DType:\n    field_dtypes = [_field_shape_dtype(v) for v in fields.values()]\n    nrows_dtypes = [nrows.dtype] if isinstance(nrows, tensor.Tensor) else []\n    rp_dtypes = [] if row_partitions is None else [rp.dtype for rp in row_partitions]\n    all_dtypes = field_dtypes + nrows_dtypes + rp_dtypes\n    if dtypes.int64 in all_dtypes:\n        return dtypes.int64\n    if dtypes.int32 in all_dtypes:\n        return dtypes.int32\n    return dtypes.int64",
    "docstring": "Return a consistent dtype for fields, nrows, & row_partitions. In the future, the default will switch from int64 to int32, but for now, we stick with int64. Args: fields: the fields of the StructuredTensor. nrows: the nrows of the StructuredTensor row_partitions: the row_partitions of the StructuredTensor. Returns: If anything requires int64, then return int64. If int32 is explicitly specified, return int32. Otherwise, return int64.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor.py",
    "ast_data": "FunctionDef name:_find_shape_dtype arg:fields arg:nrows arg:row_partitions arguments arg arg arg Assign Call Call Assign Call Assign Compare Assign If Compare Return return:yes If Compare Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "predict",
    "source_code": "def predict(self, model, df, feature_columns):\n    predictions = model.predict(df[feature_columns])\n    proba = model.predict_proba(df[feature_columns])\n    leaf_ids = model.apply(df[feature_columns])\n    return (predictions, proba, leaf_ids)",
    "docstring": "Returns the predictions, probabilities, and leaf ids for a given dataframe.",
    "type": "method",
    "file_path": "pytorch\\torchgen\\_autoheuristic\\train_decision.py",
    "ast_data": "FunctionDef name:predict arg:self arg:model arg:df arg:feature_columns arguments arg arg arg arg Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_topological_sort_passes",
    "source_code": "def _topological_sort_passes(passes: list[Callable], constraints: list[Callable]) -> list[Callable]:\n    if len(constraints) == 0:\n        return passes\n    graph: dict[Callable, list[Callable]] = {p: [] for p in passes}\n    indegree_map: dict[Callable, int] = dict.fromkeys(passes, 0)\n    candidates: Queue = Queue()\n    for a in passes:\n        for b in passes:\n            if a == b:\n                continue\n            for constraint in constraints:\n                if not constraint(a, b):\n                    graph[b].append(a)\n                    indegree_map[a] += 1\n        if indegree_map[a] == 0:\n            candidates.put(a)\n    visited: dict[Callable, bool] = dict.fromkeys(passes, False)\n    sorted_passes: list[Callable] = []\n    while not candidates.empty():\n        p = candidates.get()\n        sorted_passes.append(p)\n        visited[p] = True\n        for n in graph[p]:\n            if not visited[n]:\n                indegree_map[n] -= 1\n                if indegree_map[n] == 0:\n                    candidates.put(n)\n    cycle_passes = list(filter(lambda p: indegree_map[p] != 0, indegree_map.keys()))\n    if len(cycle_passes) != 0:\n        error = f'Circular dependency detected within the following passes: {cycle_passes}'\n        raise RuntimeError(error)\n    return sorted_passes",
    "docstring": "Args passes: Passes that we are ordering constraints: Constraints applied on these passes Returns A sorted list of callables and a boolean of if a circular dependency existed",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\passes\\infra\\pass_manager.py",
    "ast_data": "FunctionDef name:_topological_sort_passes arg:passes arg:constraints arguments arg arg If Compare Call Return return:yes Call Call For For If Compare For If Call Call If Compare Call Call While Call Assign Call Call Assign For If If Compare Call Assign Call Call arguments arg Compare Call If Compare Call Assign Raise Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "BaseDupeFilter",
    "source_code": "class BaseDupeFilter:\n\n    @classmethod\n    def from_settings(cls, settings: BaseSettings) -> Self:\n        warnings.warn(f'{cls.__name__}.from_settings() is deprecated, use from_crawler() instead.', category=ScrapyDeprecationWarning, stacklevel=2)\n        return cls()\n\n    @classmethod\n    def from_crawler(cls, crawler: Crawler) -> Self:\n        return cls()\n\n    def request_seen(self, request: Request) -> bool:\n        return False\n\n    def open(self) -> Deferred[None] | None:\n        pass\n\n    def close(self, reason: str) -> Deferred[None] | None:\n        pass\n\n    def log(self, request: Request, spider: Spider) -> None:\n        warn('Calling BaseDupeFilter.log() is deprecated.', ScrapyDeprecationWarning, stacklevel=2)",
    "docstring": "Dummy duplicate request filtering class (:setting:) that does not filter out any request.",
    "type": "class",
    "file_path": "scrapy\\scrapy\\dupefilters.py",
    "ast_data": "ClassDef name:BaseDupeFilter FunctionDef name:from_settings arg:cls arg:settings arguments arg arg Call Return return:yes Call FunctionDef name:from_crawler arg:cls arg:crawler arguments arg arg Return return:yes Call FunctionDef name:request_seen arg:self arg:request arguments arg arg Return return:yes FunctionDef name:open arg:self arguments arg FunctionDef name:close arg:self arg:reason arguments arg arg FunctionDef name:log arg:self arg:request arg:spider arguments arg arg arg Call"
  },
  {
    "library": "scipy",
    "name": "LuInv",
    "source_code": "class LuInv(LinearOperator):\n\n    def __init__(self, M):\n        self.M_lu = lu_factor(M)\n        self.shape = M.shape\n        self.dtype = M.dtype\n\n    def _matvec(self, x):\n        return lu_solve(self.M_lu, x)",
    "docstring": "LuInv: helper class to repeatedly solve M*x=b using an LU-decomposition of M",
    "type": "class",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_eigen\\arpack\\arpack.py",
    "ast_data": "ClassDef name:LuInv FunctionDef name:__init__ arg:self arg:M arguments arg arg Assign Call Assign Assign FunctionDef name:_matvec arg:self arg:x arguments arg arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "fortranSourceLines",
    "source_code": "def fortranSourceLines(fo):\n    numberingiter = LineIterator(fo)\n    with_extra = itertools.chain(numberingiter, [''])\n    pushbackiter = PushbackIterator(with_extra)\n    for line in pushbackiter:\n        t = lineType(line)\n        if t == COMMENT:\n            continue\n        elif t == STATEMENT:\n            lines = [line]\n            for next_line in pushbackiter:\n                t = lineType(next_line)\n                if t == CONTINUATION:\n                    lines.append(next_line[6:])\n                else:\n                    pushbackiter.pushback(next_line)\n                    break\n            yield (numberingiter.lineno, ''.join(lines))\n        else:\n            raise ValueError('jammed: continuation line not expected: %s:%d' % (fo.name, numberingiter.lineno))",
    "docstring": "Return an iterator over statement lines of a Fortran source file. Comment and blank lines are stripped out, and continuation lines are merged.",
    "type": "function",
    "file_path": "numpy\\numpy\\linalg\\lapack_lite\\fortran.py",
    "ast_data": "FunctionDef name:fortranSourceLines arg:fo arguments arg Assign Call Assign Call Assign Call For Assign Call If Compare If Compare Assign For Assign Call If Compare Call Call Call Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "drag_pan",
    "source_code": "def drag_pan(self, button, key, x, y):\n    points = self._get_pan_points(button, key, x, y)\n    if points is not None:\n        self.set_xlim(points[:, 0])\n        self.set_ylim(points[:, 1])",
    "docstring": "Called when the mouse moves during a pan operation. Parameters ---------- button : The pressed mouse button. key : str or None The pressed key, if any. x, y : float The mouse coordinates in display coords. Notes ----- This is intended to be overridden by new projection types.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:drag_pan arg:self arg:button arg:key arg:x arg:y arguments arg arg arg arg arg Assign Call If Compare Call Call"
  },
  {
    "library": "scipy",
    "name": "tocsr",
    "source_code": "def tocsr(self, copy=False):\n    if self.ndim > 2:\n        raise ValueError(f'Cannot convert. CSR must be 1D or 2D. Got {self.ndim}D')\n    if self.nnz == 0:\n        return self._csr_container(self.shape, dtype=self.dtype)\n    else:\n        from ._csr import csr_array\n        arrays = self._coo_to_compressed(csr_array._swap, copy=copy)\n        indptr, indices, data, shape = arrays\n        x = self._csr_container((data, indices, indptr), shape=self.shape)\n        if not self.has_canonical_format:\n            x.sum_duplicates()\n        return x",
    "docstring": "Convert this array/matrix to Compressed Sparse Row format Duplicate entries will be summed together. Examples -------- >>> from numpy import array >>> from scipy.sparse import coo_array >>> row = array([0, 0, 1, 3, 1, 0, 0]) >>> col = array([0, 2, 1, 3, 1, 0, 0]) >>> data = array([1, 1, 1, 1, 1, 1, 1]) >>> A = coo_array((data, (row, col)), shape=(4, 4)).tocsr() >>> A.toarray() array([[3, 0, 1, 0], [0, 2, 0, 0], [0, 0, 0, 0], [0, 0, 0, 1]])",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_coo.py",
    "ast_data": "FunctionDef name:tocsr arg:self arg:copy arguments arg arg If Compare Raise Call If Compare Return return:yes Call Assign Call Assign Assign Call If Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "numa_aware_check",
    "source_code": "def numa_aware_check(self, core_list):\n    cores_numa_map = self.logical_core_node_map\n    numa_ids = []\n    for core in core_list:\n        numa_id = cores_numa_map[core]\n        if numa_id not in numa_ids:\n            numa_ids.append(numa_id)\n    if len(numa_ids) > 1:\n        logger.warning('Numa Aware: cores:%s on different NUMA nodes:%s. To avoid this behavior, please use --ncores-per-instance knob to make sure number of cores is divisible by --ncores-per-instance. Alternatively, please use --skip-cross-node-cores knob.', str(core_list), str(numa_ids))\n    if len(numa_ids) == 0:\n        raise RuntimeError('invalid number of NUMA nodes; please make sure numa_ids >= 1')\n    return numa_ids",
    "docstring": "Check whether all cores in core_list are in the same NUMA node. Cross NUMA will reduce performance. We strongly advice to not use cores on different nodes.",
    "type": "method",
    "file_path": "pytorch\\torch\\backends\\xeon\\run_cpu.py",
    "ast_data": "FunctionDef name:numa_aware_check arg:self arg:core_list arguments arg arg Assign Assign For Assign If Compare Call If Compare Call Call Call Call If Compare Call Raise Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "depth_from_point",
    "source_code": "def depth_from_point(R: Tensor, t: Tensor, X: Tensor) -> Tensor:\n    X_tmp = R @ X.transpose(-2, -1)\n    X_out = X_tmp[..., 2, :] + t[..., 2, :]\n    return X_out",
    "docstring": "Return the depth of a point transformed by a rigid transform. Args: R: The rotation matrix with shape :math:. t: The translation vector with shape :math:. X: The 3d points with shape :math:. Returns: The depth value per point with shape :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\epipolar\\projection.py",
    "ast_data": "FunctionDef name:depth_from_point arg:R arg:t arg:X arguments arg arg arg Assign Call Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "HsvToRgb",
    "source_code": "class HsvToRgb(Module):\n    ONNX_DEFAULT_INPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n    ONNX_DEFAULT_OUTPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n\n    def forward(self, image: torch.Tensor) -> torch.Tensor:\n        return hsv_to_rgb(image)",
    "docstring": "Convert an image from HSV to RGB. H channel values are assumed to be in the range 0..2pi. S and V are in the range 0..1. Returns: RGB version of the image. Shape: - image: :math: - output: :math: Example: >>> input = torch.rand(2, 3, 4, 5) >>> rgb = HsvToRgb() >>> output = rgb(input) # 2x3x4x5",
    "type": "class",
    "file_path": "kornia\\kornia\\color\\hsv.py",
    "ast_data": "ClassDef name:HsvToRgb FunctionDef name:forward arg:self arg:image arguments arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "as_hidden",
    "source_code": "def as_hidden(self, attrs=None, **kwargs):\n    return self.as_widget(self.field.hidden_widget(), attrs, **kwargs)",
    "docstring": "Return a string of HTML for representing this as an .",
    "type": "method",
    "file_path": "django\\django\\forms\\boundfield.py",
    "ast_data": "FunctionDef name:as_hidden arg:self arg:attrs arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_cudnn_rnn_backward",
    "source_code": "@ops.RegisterGradient('CudnnRNN')\ndef _cudnn_rnn_backward(op: ops.Operation, *grads):\n    if not op.get_attr('is_training'):\n        raise ValueError('To use CudnnRNN in gradients, is_training must be set to True.')\n    return gen_cudnn_rnn_ops.cudnn_rnn_backprop(input=op.inputs[0], input_h=op.inputs[1], input_c=op.inputs[2], params=op.inputs[3], output=op.outputs[0], output_h=op.outputs[1], output_c=op.outputs[2], output_backprop=grads[0], output_h_backprop=grads[1], output_c_backprop=grads[2], reserve_space=op.outputs[3], dropout=op.get_attr('dropout'), seed=op.get_attr('seed'), seed2=op.get_attr('seed2'), rnn_mode=op.get_attr('rnn_mode'), input_mode=op.get_attr('input_mode'), direction=op.get_attr('direction'))",
    "docstring": "Gradients for the CudnnRNN op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\cudnn_rnn_grad.py",
    "ast_data": "FunctionDef name:_cudnn_rnn_backward arg:op arguments arg arg If Call Raise Call Return return:yes Call Call Call Call Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_fractional_power_pade",
    "source_code": "def _fractional_power_pade(R, t, m):\n    if m < 1 or int(m) != m:\n        raise ValueError('expected a positive integer m')\n    if not -1 < t < 1:\n        raise ValueError('expected -1 < t < 1')\n    R = np.asarray(R)\n    if len(R.shape) != 2 or R.shape[0] != R.shape[1]:\n        raise ValueError('expected an upper triangular square matrix')\n    n, n = R.shape\n    ident = np.identity(n)\n    Y = R * _fractional_power_pade_constant(2 * m, t)\n    for j in range(2 * m - 1, 0, -1):\n        rhs = R * _fractional_power_pade_constant(j, t)\n        Y = solve_triangular(ident + Y, rhs)\n    U = ident + Y\n    if not np.array_equal(U, np.triu(U)):\n        raise Exception('U is not upper triangular')\n    return U",
    "docstring": "Evaluate the Pade approximation of a fractional matrix power. Evaluate the degree-m Pade approximation of R to the fractional matrix power t using the continued fraction in bottom-up fashion using algorithm (4.1) in [1]_. Parameters ---------- R : (N, N) array_like Upper triangular matrix whose fractional power to evaluate. t : float Fractional power between -1 and 1 exclusive. m : positive integer Degree of Pade approximation. Returns ------- U : (N, N) array_like The degree-m Pade approximation of R to the fractional power t. This matrix will be upper triangular. References ---------- .. [1] Nicholas J. Higham and Lijing lin (2011) \"A Schur-Pade Algorithm for Fractional Powers of a Matrix.\" SIAM Journal on Matrix Analysis and Applications, 32 (3). pp. 1056-1078. ISSN 0895-4798",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_matfuncs_inv_ssq.py",
    "ast_data": "FunctionDef name:_fractional_power_pade arg:R arg:t arg:m arguments arg arg arg If BoolOp Compare Compare Call Raise Call If Compare Raise Call Assign Call If BoolOp Compare Call Compare Raise Call Assign Assign Call Assign Call For Call Assign Call Assign Call Assign If Call Call Raise Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "select",
    "source_code": "def select(self):\n    if sys.platform == 'win32':\n        self.dc.SelectObject(self.bitmap)\n        self.IsSelected = True",
    "docstring": "Select the current bitmap into this wxDC instance.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_wx.py",
    "ast_data": "FunctionDef name:select arg:self arguments arg If Compare Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_minimum_flops",
    "source_code": "@ops.RegisterStatistics('Minimum', 'flops')\ndef _minimum_flops(graph, node):\n    return _binary_per_element_op_flops(graph, node)",
    "docstring": "Compute flops for Minimum operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py",
    "ast_data": "FunctionDef name:_minimum_flops arg:graph arg:node arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "builtin_template_path",
    "source_code": "def builtin_template_path(name):\n    return Path(__file__).parent / 'templates' / name",
    "docstring": "Return a path to a builtin template. Avoid calling this function at the module level or in a class-definition because __file__ may not exist, e.g. in frozen environments.",
    "type": "function",
    "file_path": "django\\django\\views\\static.py",
    "ast_data": "FunctionDef name:builtin_template_path arg:name arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "is_last",
    "source_code": "@property\ndef is_last(self):\n    return self.stage_index == self.num_stages - 1",
    "docstring": "Returns true if this stage is the last stage in the pipeline.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py",
    "ast_data": "FunctionDef name:is_last arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "kornia",
    "name": "get_branges_scales",
    "source_code": "def get_branges_scales(x, sample_drop_ratio=0.0):\n    b, n, d = x.shape\n    sample_subset_size = max(int(b * (1 - sample_drop_ratio)), 1)\n    brange = torch.randperm(b, device=x.device)[:sample_subset_size]\n    residual_scale_factor = b / sample_subset_size\n    return (brange, residual_scale_factor)",
    "docstring": "Add bernoulli sampled range and scale.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\dedode\\transformer\\layers\\block.py",
    "ast_data": "FunctionDef name:get_branges_scales arg:x arg:sample_drop_ratio arguments arg arg Assign Assign Call Call Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "Iterator",
    "source_code": "class Iterator(object):\n\n    def get_next(self):\n        pass\n\n    def __next__(self):\n        pass\n\n    def __iter__(self):\n        pass",
    "docstring": "Interface for distributed iterators.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\types\\distribute.py",
    "ast_data": "ClassDef name:Iterator FunctionDef name:get_next arg:self arguments arg FunctionDef name:__next__ arg:self arguments arg FunctionDef name:__iter__ arg:self arguments arg"
  },
  {
    "library": "authlib",
    "name": "headers",
    "source_code": "@property\ndef headers(self):\n    if self.type == 'json':\n        return self['header']",
    "docstring": "Alias of `` for JSON typed JWS.",
    "type": "method",
    "file_path": "authlib\\authlib\\jose\\rfc7515\\models.py",
    "ast_data": "FunctionDef name:headers arg:self arguments arg If Compare Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "collect_producer_nodes",
    "source_code": "def collect_producer_nodes(node: Node) -> Optional[list[Node]]:\n    nodes = [node]\n    frontier = [node]\n    while frontier:\n        node = frontier.pop()\n        all_args = list(node.args) + list(node.kwargs.values())\n        for arg in all_args:\n            if not isinstance(arg, Node):\n                continue\n            if arg.op == 'placeholder':\n                return None\n            nodes.append(arg)\n            if not (arg.op == 'call_function' and arg.target == getattr):\n                frontier.append(arg)\n    return nodes",
    "docstring": "Starting from a target node, trace back until we hit inpu or getattr node. This is used to extract the chain of operators starting from getattr to the target node, for example def forward(self, x): observed = self.observer(self.weight) return F.linear(x, observed) collect_producer_nodes(observed) will either return a list of nodes that produces the observed node or None if we can't extract a self contained graph without free variables(inputs of the forward function).",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\utils.py",
    "ast_data": "FunctionDef name:collect_producer_nodes arg:node arguments arg Assign Assign While Assign Call Assign Call Call Call For If Call If Compare Return return:no Call If BoolOp Compare Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "parse_readable_time_str",
    "source_code": "def parse_readable_time_str(time_str):\n\n    def parse_positive_float(value_str):\n        value = float(value_str)\n        if value < 0:\n            raise ValueError('Invalid time %s. Time value must be positive.' % value_str)\n        return value\n    time_str = time_str.strip()\n    if time_str.endswith('us'):\n        return int(parse_positive_float(time_str[:-2]))\n    elif time_str.endswith('ms'):\n        return int(parse_positive_float(time_str[:-2]) * 1000.0)\n    elif time_str.endswith('s'):\n        return int(parse_positive_float(time_str[:-1]) * 1000000.0)\n    return int(parse_positive_float(time_str))",
    "docstring": "Parses a time string in the format N, Nus, Nms, Ns. Args: time_str: () string consisting of an integer time value optionally followed by 'us', 'ms', or 's' suffix. If suffix is not specified, value is assumed to be in microseconds. (e.g. 100us, 8ms, 5s, 100). Returns: Microseconds value.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\command_parser.py",
    "ast_data": "FunctionDef name:parse_readable_time_str arg:time_str arguments arg FunctionDef name:parse_positive_float arg:value_str arguments arg Assign Call If Compare Raise Call Return return:yes Assign Call If Call Return return:yes Call Call If Call Return return:yes Call Call If Call Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_set_c_attrs",
    "source_code": "def _set_c_attrs(self, attrs):\n    for name, attr_value in attrs.items():\n        serialized = attr_value.SerializeToString()\n        with self._c_func.get() as func:\n            c_api.TF_FunctionSetAttrValueProto(func, compat.as_str(name), serialized)",
    "docstring": "Sets as attributes of self._c_func. Requires that self._c_func is not None. Args: attrs: a dictionary from attribute name to attribute proto value",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\function.py",
    "ast_data": "FunctionDef name:_set_c_attrs arg:self arg:attrs arguments arg arg For Call Assign Call With Call Call Call"
  },
  {
    "library": "seaborn",
    "name": "scale",
    "source_code": "def scale(self, **scales: Scale) -> Plot:\n    new = self._clone()\n    new._scales.update(scales)\n    return new",
    "docstring": "Specify mappings from data units to visual properties. Keywords correspond to variables defined in the plot, including coordinate variables (, ) and semantic variables (, , etc.). A number of \"magic\" arguments are accepted, including: - The name of a transform (e.g., , ) - The name of a palette (e.g., , ) - A tuple of values, defining the output range (e.g. ) - A dict, implying a :class: scale (e.g. ) - A list of values, implying a :class: scale (e.g. ) For more explicit control, pass a scale spec object such as :class: or :class:. Or pass to use an \"identity\" scale, which treats data values as literally encoding visual properties. Examples -------- .. include:: ../docstrings/objects.Plot.scale.rst",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\plot.py",
    "ast_data": "FunctionDef name:scale arg:self arguments arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "get_token_grant",
    "source_code": "def get_token_grant(self, request):\n    for grant_cls, extensions in self._token_grants:\n        if grant_cls.check_token_endpoint(request):\n            return _create_grant(grant_cls, extensions, request, self)\n    raise UnsupportedGrantTypeError(request.payload.grant_type)",
    "docstring": "Find the token grant for current request. :param request: OAuth2Request instance. :return: grant instance",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\authorization_server.py",
    "ast_data": "FunctionDef name:get_token_grant arg:self arg:request arguments arg arg For If Call Return return:yes Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "get_closure",
    "source_code": "def get_closure(fn):\n    captures = {}\n    captures.update(fn.__globals__)\n    for index, captured_name in enumerate(fn.__code__.co_freevars):\n        captures[captured_name] = fn.__closure__[index].cell_contents\n    return captures",
    "docstring": "Get a dictionary of closed over variables from a function",
    "type": "function",
    "file_path": "pytorch\\torch\\_jit_internal.py",
    "ast_data": "FunctionDef name:get_closure arg:fn arguments arg Assign Call For Call Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "LimitedParamBenchmark",
    "source_code": "class LimitedParamBenchmark(Benchmark):\n    num_param_combinations = 0\n\n    def setup(self, *args, **kwargs):\n        slow = is_xslow()\n        if slow:\n            return\n        param_seed = kwargs.pop('param_seed', None)\n        if param_seed is None:\n            param_seed = 1\n        params = kwargs.pop('params', None)\n        if params is None:\n            params = self.params\n        num_param_combinations = kwargs.pop('num_param_combinations', None)\n        if num_param_combinations is None:\n            num_param_combinations = self.num_param_combinations\n        all_choices = list(itertools.product(*params))\n        rng = random.Random(param_seed)\n        rng.shuffle(all_choices)\n        active_choices = all_choices[:num_param_combinations]\n        if args not in active_choices:\n            raise NotImplementedError('skipped')",
    "docstring": "Limits parameter combinations to choices, chosen pseudo-randomly with fixed seed. Raises NotImplementedError (skip) if not in active set.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\common.py",
    "ast_data": "ClassDef name:LimitedParamBenchmark Assign FunctionDef name:setup arg:self arguments arg arg arg Assign Call If Return return:no Assign Call If Compare Assign Assign Call If Compare Assign Assign Call If Compare Assign Assign Call Call Assign Call Call Assign If Compare Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_tensor_description_to_json",
    "source_code": "@classmethod\ndef _tensor_description_to_json(cls, tensor_desc):\n    if tensor_desc is None:\n        return None\n    return {'element': cls._enum_to_json(tensor_desc.element), 'layout': cls._enum_to_json(tensor_desc.layout), 'alignment': tensor_desc.alignment, 'complex_transform': cls._enum_to_json(tensor_desc.complex_transform)}",
    "docstring": "Convert TensorDescription to JSON dict. Args: tensor_desc: TensorDescription object Returns: dict: Dictionary representation",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\serialization.py",
    "ast_data": "FunctionDef name:_tensor_description_to_json arg:cls arg:tensor_desc arguments arg arg If Compare Return return:no Return return:yes Call Call Call"
  },
  {
    "library": "numpy",
    "name": "laggrid3d",
    "source_code": "def laggrid3d(x, y, z, c):\n    return pu._gridnd(lagval, c, x, y, z)",
    "docstring": "Evaluate a 3-D Laguerre series on the Cartesian product of x, y, and z. This function returns the values: .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c) where the points `axbyczxyzxyzxyzccxyzxyzcxy`. See Also -------- lagval, lagval2d, laggrid2d, lagval3d Examples -------- >>> from numpy.polynomial.laguerre import laggrid3d >>> c = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] >>> laggrid3d([0, 1], [0, 1], [2, 4], c) array([[[ -4., -44.], [ -2., -18.]], [[ -2., -14.], [ -1., -5.]]])",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\laguerre.py",
    "ast_data": "FunctionDef name:laggrid3d arg:x arg:y arg:z arg:c arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "InvalidRequestURIError",
    "source_code": "class InvalidRequestURIError(OAuth2Error):\n    error = 'invalid_request_uri'",
    "docstring": "The request_uri in the Authorization Request returns an error or contains invalid data.",
    "type": "class",
    "file_path": "authlib\\authlib\\oidc\\core\\errors.py",
    "ast_data": "ClassDef name:InvalidRequestURIError Assign"
  },
  {
    "library": "tensorflow",
    "name": "_AddRestoreOps",
    "source_code": "def _AddRestoreOps(self, filename_tensor, saveables, restore_sequentially, reshape, preferred_shard=-1, name='restore_all'):\n    all_tensors = self.bulk_restore(filename_tensor, saveables, preferred_shard, restore_sequentially)\n    assign_ops = []\n    idx = 0\n    for saveable in saveables:\n        shapes = None\n        if reshape:\n            shapes = []\n            for spec in saveable.specs:\n                v = spec.tensor\n                shape = v.get_shape()\n                if not shape.is_fully_defined():\n                    shape = array_ops.shape(v)\n                shapes.append(shape)\n        saveable_tensors = all_tensors[idx:idx + len(saveable.specs)]\n        idx += len(saveable.specs)\n        assign_ops.append(saveable.restore(saveable_tensors, shapes))\n    return control_flow_ops.group(*assign_ops, name=name)",
    "docstring": "Add operations to restore saveables. Args: filename_tensor: Tensor for the path of the file to load. saveables: A list of SaveableObject objects. restore_sequentially: True if we want to restore variables sequentially within a shard. reshape: True if we want to reshape loaded tensors to the shape of the corresponding variable. preferred_shard: Shard to open first when loading a sharded file. name: Name for the returned op. Returns: An Operation that restores the variables.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\saver.py",
    "ast_data": "FunctionDef name:_AddRestoreOps arg:self arg:filename_tensor arg:saveables arg:restore_sequentially arg:reshape arg:preferred_shard arg:name arguments arg arg arg arg arg arg arg Assign Call Assign Assign For Assign If Assign For Assign Assign Call If Call Assign Call Call Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "url_is_from_any_domain",
    "source_code": "def url_is_from_any_domain(url: UrlT, domains: Iterable[str]) -> bool:\n    host = _parse_url(url).netloc.lower()\n    if not host:\n        return False\n    domains = [d.lower() for d in domains]\n    return any((host == d or host.endswith(f'.{d}') for d in domains))",
    "docstring": "Return True if the url belongs to any of the given domains",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\url.py",
    "ast_data": "FunctionDef name:url_is_from_any_domain arg:url arg:domains arguments arg arg Assign Call Call If Return return:yes Assign Call Return return:yes Call BoolOp Compare Call"
  },
  {
    "library": "scipy",
    "name": "_nnlf_and_penalty",
    "source_code": "def _nnlf_and_penalty(self, x, args):\n    if isinstance(x, CensoredData):\n        xs = x._supported(*self._get_support(*args))\n        n_bad = len(x) - len(xs)\n        i1, i2 = xs._interval.T\n        terms = [self._logpdf(xs._uncensored, *args), self._logcdf(xs._left, *args), self._logsf(xs._right, *args), np.log(self._delta_cdf(i1, i2, *args))]\n    else:\n        cond0 = ~self._support_mask(x, *args)\n        n_bad = np.count_nonzero(cond0)\n        if n_bad > 0:\n            x = argsreduce(~cond0, x)[0]\n        terms = [self._logpdf(x, *args)]\n    totals, bad_counts = zip(*[_sum_finite(term) for term in terms])\n    total = sum(totals)\n    n_bad += sum(bad_counts)\n    return -total + n_bad * _LOGXMAX * 100",
    "docstring": "Compute the penalized negative log-likelihood for the \"standardized\" data (i.e. already shifted by loc and scaled by scale) for the shape parameters in . can be a 1D numpy array or a CensoredData instance.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:_nnlf_and_penalty arg:self arg:x arg:args arguments arg arg arg If Call Assign Call Call Assign Call Call Assign Assign Call Call Call Call Call Assign Call Assign Call If Compare Assign Call Assign Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "InconsistentMetadata",
    "source_code": "class InconsistentMetadata(Exception):\n    pass",
    "docstring": "Exception that is thrown when AutoHeuristic tries to log data to a file where the metadata stored in the file does not match the metadata it would store if the file didn't exist.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\autoheuristic\\autoheuristic.py",
    "ast_data": "ClassDef name:InconsistentMetadata"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "def fit(self, X, y, sample_weight=None, **fit_params):\n    assert _num_samples(X) == _num_samples(y)\n    if self.methods_to_check == 'all' or 'fit' in self.methods_to_check:\n        X, y = self._check_X_y(X, y, should_be_fitted=False)\n    self.n_features_in_ = np.shape(X)[1]\n    self.classes_ = np.unique(check_array(y, ensure_2d=False, allow_nd=True))\n    if self.expected_fit_params:\n        missing = set(self.expected_fit_params) - set(fit_params)\n        if missing:\n            raise AssertionError(f'Expected fit parameter(s) {list(missing)} not seen.')\n        for key, value in fit_params.items():\n            if _num_samples(value) != _num_samples(X):\n                raise AssertionError(f'Fit parameter {key} has length {_num_samples(value)}; expected {_num_samples(X)}.')\n    if self.expected_sample_weight:\n        if sample_weight is None:\n            raise AssertionError('Expected sample_weight to be passed')\n        _check_sample_weight(sample_weight, X)\n    return self",
    "docstring": "Fit classifier. Parameters ---------- X : array-like of shape (n_samples, n_features) Training vector, where is the number of samples and is the number of features. y : array-like of shape (n_samples, n_outputs) or (n_samples,), default=None Target relative to X for classification or regression; None for unsupervised learning. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If None, then samples are equally weighted. **fit_params : dict of string -> object Parameters passed to the `` method of the estimator Returns ------- self",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_mocking.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg arg Compare Call Call If BoolOp Compare Compare Assign Call Assign Call Assign Call Call If Assign Call Call If Raise Call Call For Call If Compare Call Call Raise Call Call Call If If Compare Raise Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_set_gradients_and_temporaries",
    "source_code": "def _set_gradients_and_temporaries(self) -> None:\n    for event in self._op_tree.dfs():\n        for _, p_grad in extract_gradients(event):\n            self._categories.set_by_id(p_grad, Category.GRADIENT)\n    for node in self._data_flow_graph.flow_nodes:\n        for i in node.intermediates:\n            self._categories.set_by_key(i, Category.TEMPORARY)",
    "docstring": "Mark Tensors which are unambiguous and simple to reason about.",
    "type": "method",
    "file_path": "pytorch\\torch\\profiler\\_memory_profiler.py",
    "ast_data": "FunctionDef name:_set_gradients_and_temporaries arg:self arguments arg For Call For Call Call For For Call"
  },
  {
    "library": "scipy",
    "name": "backtracking",
    "source_code": "def backtracking(A, g, x, p, theta, p_dot_g, lb, ub):\n    alpha = 1\n    while True:\n        x_new, _ = reflective_transformation(x + alpha * p, lb, ub)\n        step = x_new - x\n        cost_change = -evaluate_quadratic(A, g, step)\n        if cost_change > -0.1 * alpha * p_dot_g:\n            break\n        alpha *= 0.5\n    active = find_active_constraints(x_new, lb, ub)\n    if np.any(active != 0):\n        x_new, _ = reflective_transformation(x + theta * alpha * p, lb, ub)\n        x_new = make_strictly_feasible(x_new, lb, ub, rstep=0)\n        step = x_new - x\n        cost_change = -evaluate_quadratic(A, g, step)\n    return (x, step, cost_change)",
    "docstring": "Find an appropriate step size using backtracking line search.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_lsq\\trf_linear.py",
    "ast_data": "FunctionDef name:backtracking arg:A arg:g arg:x arg:p arg:theta arg:p_dot_g arg:lb arg:ub arguments arg arg arg arg arg arg arg arg Assign While Assign Call Assign Assign Call If Compare Assign Call If Call Compare Assign Call Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ExportDynamoConfig",
    "source_code": "@dataclasses.dataclass\nclass ExportDynamoConfig:\n    allow_rnn: bool = True",
    "docstring": "Manage Export-specific configurations of Dynamo.",
    "type": "class",
    "file_path": "pytorch\\torch\\_export\\__init__.py",
    "ast_data": "ClassDef name:ExportDynamoConfig"
  },
  {
    "library": "kornia",
    "name": "layout",
    "source_code": "@property\ndef layout(self) -> ImageLayout:\n    return self._layout",
    "docstring": "Return the image layout.",
    "type": "method",
    "file_path": "kornia\\kornia\\image\\image.py",
    "ast_data": "FunctionDef name:layout arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_get_join_indexers",
    "source_code": "def _get_join_indexers(self) -> tuple[npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]:\n    assert self.how != 'asof'\n    return get_join_indexers(self.left_join_keys, self.right_join_keys, sort=self.sort, how=self.how)",
    "docstring": "return the join indexers",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\reshape\\merge.py",
    "ast_data": "FunctionDef name:_get_join_indexers arg:self arguments arg Compare Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "experimental_from_proto",
    "source_code": "@classmethod\ndef experimental_from_proto(cls, proto: tensor_shape_pb2.TensorShapeProto) -> 'TensorShape':\n    return TensorShape(proto)",
    "docstring": "Returns a TensorShape instance based on the serialized proto.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:experimental_from_proto arg:cls arg:proto arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "ToolEvent",
    "source_code": "class ToolEvent:\n\n    def __init__(self, name, sender, tool, data=None):\n        self.name = name\n        self.sender = sender\n        self.tool = tool\n        self.data = data",
    "docstring": "Event for tool manipulation (add/remove).",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_managers.py",
    "ast_data": "ClassDef name:ToolEvent FunctionDef name:__init__ arg:self arg:name arg:sender arg:tool arg:data arguments arg arg arg arg arg Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "with_empty_output",
    "source_code": "def with_empty_output(self):\n    self._options['output'] = 'none'\n    return self",
    "docstring": "Do not generate side-effect outputs.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\option_builder.py",
    "ast_data": "FunctionDef name:with_empty_output arg:self arguments arg Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_tpu_service",
    "source_code": "def _tpu_service(self):\n    if self._service:\n        return self._service\n    if not _GOOGLE_API_CLIENT_INSTALLED:\n        raise RuntimeError('Missing runtime dependency on the Google API client. Run `pip install cloud-tpu-client` to fix.')\n    credentials = self._credentials\n    if credentials is None or credentials == 'default':\n        credentials = client.GoogleCredentials.get_application_default()\n    if self._discovery_url:\n        return discovery.build('tpu', 'v1', credentials=credentials, discoveryServiceUrl=self._discovery_url, cache_discovery=False)\n    else:\n        return discovery.build('tpu', 'v1', credentials=credentials, cache_discovery=False)",
    "docstring": "Creates a new Cloud TPU API object. This works around an issue where the underlying HTTP connection sometimes times out when the script has been running for too long. Other methods in this object call this method to get a new API object whenever they need to communicate with the Cloud API. Raises: RuntimeError: If the dependent Python packages are missing. Returns: A Google Cloud TPU API object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\client\\client.py",
    "ast_data": "FunctionDef name:_tpu_service arg:self arguments arg If Return return:yes If Raise Call Assign If BoolOp Compare Compare Assign Call If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_get",
    "source_code": "def _get(self, *args, **kwargs):\n    return (self.deserialize_messages(self.request.session.get(self.session_key)), True)",
    "docstring": "Retrieve a list of messages from the request's session. This storage always stores everything it is given, so return True for the all_retrieved flag.",
    "type": "method",
    "file_path": "django\\django\\contrib\\messages\\storage\\session.py",
    "ast_data": "FunctionDef name:_get arg:self arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_verbosity",
    "source_code": "@tf_export(v1=['logging.get_verbosity'])\ndef get_verbosity():\n    return get_logger().getEffectiveLevel()",
    "docstring": "Return how much logging output will be produced.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\platform\\tf_logging.py",
    "ast_data": "FunctionDef name:get_verbosity arguments Return return:yes Call Call Call"
  },
  {
    "library": "cherrypy",
    "name": "do_login",
    "source_code": "def do_login(self, username, password, from_page='..', **kwargs):\n    response = cherrypy.serving.response\n    error_msg = self.check_username_and_password(username, password)\n    if error_msg:\n        body = self.login_screen(from_page, username, error_msg)\n        response.body = body\n        if 'Content-Length' in response.headers:\n            del response.headers['Content-Length']\n        return True\n    else:\n        cherrypy.serving.request.login = username\n        cherrypy.session[self.session_key] = username\n        self.on_login(username)\n        raise cherrypy.HTTPRedirect(from_page or '/')",
    "docstring": "Login. May raise redirect, or return True if request handled.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\cptools.py",
    "ast_data": "FunctionDef name:do_login arg:self arg:username arg:password arg:from_page arguments arg arg arg arg arg Assign Assign Call If Assign Call Assign If Compare Return return:yes Assign Assign Call Raise Call BoolOp"
  },
  {
    "library": "tensorflow",
    "name": "_variance_scale_term",
    "source_code": "def _variance_scale_term(self):\n    c0 = self.total_concentration[..., array_ops.newaxis]\n    return math_ops.sqrt((1.0 + c0 / self.total_count) / (1.0 + c0))",
    "docstring": "Helper to and which computes a shared scale.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\dirichlet_multinomial.py",
    "ast_data": "FunctionDef name:_variance_scale_term arg:self arguments arg Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_trackable_children",
    "source_code": "def _trackable_children(self, save_type=trackable.SaveType.CHECKPOINT, **kwargs):\n    if context.executing_eagerly():\n        graph_key = None\n    else:\n        graph = ops.get_default_graph()\n        graph_key = graph._graph_key\n    weights = {}\n    for (name, g), v in sorted(self._weights.items(), key=lambda i: i[0][0]):\n        if g == graph_key:\n            weights[name] = v\n    weights.update(super(_DynamicLossScaleState, self)._trackable_children(save_type, **kwargs))\n    return weights",
    "docstring": "From Trackable. Gather graph-specific weights to save.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\loss_scale_optimizer.py",
    "ast_data": "FunctionDef name:_trackable_children arg:self arg:save_type arguments arg arg arg If Call Assign Assign Call Assign Assign For Call Call arguments arg If Compare Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "list_directory_v2",
    "source_code": "@tf_export('io.gfile.listdir')\ndef list_directory_v2(path):\n    if not is_directory(path):\n        raise errors.NotFoundError(node_def=None, op=None, message='Could not find directory {}'.format(path))\n    return [compat.as_str_any(filename) for filename in _pywrap_file_io.GetChildren(compat.path_to_bytes(path))]",
    "docstring": "Returns a list of entries contained within a directory. The list is in arbitrary order. It does not contain the special entries \".\" and \"..\". Args: path: string, path to a directory Returns: [filename1, filename2, ... filenameN] as strings Raises: errors.NotFoundError if directory doesn't exist",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py",
    "ast_data": "FunctionDef name:list_directory_v2 arg:path arguments arg If Call Raise Call Call Return return:yes Call Call Call Call"
  },
  {
    "library": "numpy",
    "name": "__ifloordiv__",
    "source_code": "def __ifloordiv__(self, other):\n    other_data = getdata(other)\n    dom_mask = _DomainSafeDivide().__call__(self._data, other_data)\n    other_mask = getmask(other)\n    new_mask = mask_or(other_mask, dom_mask)\n    if dom_mask.any():\n        _, fval = ufunc_fills[np.floor_divide]\n        other_data = np.where(dom_mask, other_data.dtype.type(fval), other_data)\n    self._mask |= new_mask\n    other_data = np.where(self._mask, other_data.dtype.type(1), other_data)\n    self._data.__ifloordiv__(other_data)\n    return self",
    "docstring": "Floor divide self by other in-place.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:__ifloordiv__ arg:self arg:other arguments arg arg Assign Call Assign Call Call Assign Call Assign Call If Call Assign Assign Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_set_names",
    "source_code": "def _set_names(self, names, *, level=None) -> None:\n    if names is not None and (not is_list_like(names)):\n        raise ValueError('Names should be list-like for a MultiIndex')\n    names = list(names)\n    if level is not None and len(names) != len(level):\n        raise ValueError('Length of names must match length of level.')\n    if level is None and len(names) != self.nlevels:\n        raise ValueError('Length of names must match number of levels in MultiIndex.')\n    if level is None:\n        level = range(self.nlevels)\n    else:\n        level = (self._get_level_number(lev) for lev in level)\n    for lev, name in zip(level, names):\n        if name is not None:\n            if not is_hashable(name):\n                raise TypeError(f'{type(self).__name__}.name must be a hashable type')\n        self._names[lev] = name\n    self._reset_cache('levels')",
    "docstring": "Set new names on index. Each name has to be a hashable type. Parameters ---------- values : str or sequence name(s) to set level : int, level name, or sequence of int/level names (default None) If the index is a MultiIndex (hierarchical), level(s) to set (None for all levels). Otherwise level must be None Raises ------ TypeError if each name is not hashable. Notes ----- sets names on levels. WARNING: mutates! Note that you generally want to set this *after* changing levels, so that it only acts on copies",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "FunctionDef name:_set_names arg:self arg:names arguments arg arg arg If BoolOp Compare Call Raise Call Assign Call If BoolOp Compare Compare Call Call Raise Call If BoolOp Compare Compare Call Raise Call If Compare Assign Call Assign Call For Call If Compare If Call Raise Call Call Assign Call"
  },
  {
    "library": "numpy",
    "name": "integ",
    "source_code": "def integ(self, m=1, k=[], lbnd=None):\n    off, scl = self.mapparms()\n    if lbnd is None:\n        lbnd = 0\n    else:\n        lbnd = off + scl * lbnd\n    coef = self._int(self.coef, m, k, lbnd, 1.0 / scl)\n    return self.__class__(coef, self.domain, self.window, self.symbol)",
    "docstring": "Integrate. Return a series instance that is the definite integral of the current series. Parameters ---------- m : non-negative int The number of integrations to perform. k : array_like Integration constants. The first constant is applied to the first integration, the second to the second, and so on. The list of values must less than or equal to in length and any missing values are set to zero. lbnd : Scalar The lower bound of the definite integral. Returns ------- new_series : series A new series representing the integral. The domain is the same as the domain of the integrated series.",
    "type": "method",
    "file_path": "numpy\\numpy\\polynomial\\_polybase.py",
    "ast_data": "FunctionDef name:integ arg:self arg:m arg:k arg:lbnd arguments arg arg arg arg Assign Call If Compare Assign Assign Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "nodes_filter",
    "source_code": "def nodes_filter(nodes: list[torch.fx.Node], node_call_back) -> list[torch.fx.Node]:\n    return [node for node in nodes if node_call_back(node)]",
    "docstring": "Returns the nodes that match the node_call_back as a list.",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\utils.py",
    "ast_data": "FunctionDef name:nodes_filter arg:nodes arg:node_call_back arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "stack",
    "source_code": "def stack(self, name=None):\n    with ops.colocate_with(self._handle):\n        with ops.name_scope(name, 'TensorArrayStack', [self._handle]):\n            value = self.gather(math_ops.range(0, self.size()), name=name)\n            if self.element_shape and (not self._dynamic_size) and (self._size is not None):\n                value.set_shape([tensor_util.constant_value(self._size)] + self.element_shape.dims)\n            return value",
    "docstring": "See TensorArray.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:stack arg:self arg:name arguments arg arg With Call With Call Assign Call Call Call If BoolOp Compare Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_tag",
    "source_code": "@staticmethod\ndef _tag(val: str | bytes, tag: str) -> bytes:\n    if isinstance(val, str):\n        val = bytes(val, 'utf-8')\n    return bytes('<' + tag + '>', 'utf-8') + val + bytes('</' + tag + '>', 'utf-8')",
    "docstring": "Surround val with",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\stata.py",
    "ast_data": "FunctionDef name:_tag arg:val arg:tag arguments arg arg If Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "is_valid",
    "source_code": "@property\ndef is_valid(self) -> bool:\n    return self.lhs in self.queryables",
    "docstring": "return True if this is a valid field",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\computation\\pytables.py",
    "ast_data": "FunctionDef name:is_valid arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "django",
    "name": "linenumbers",
    "source_code": "@register.filter(is_safe=True, needs_autoescape=True)\n@stringfilter\ndef linenumbers(value, autoescape=True):\n    lines = value.split('\\n')\n    width = str(len(str(len(lines))))\n    if not autoescape or isinstance(value, SafeData):\n        for i, line in enumerate(lines):\n            lines[i] = ('%0' + width + 'd. %s') % (i + 1, line)\n    else:\n        for i, line in enumerate(lines):\n            lines[i] = ('%0' + width + 'd. %s') % (i + 1, escape(line))\n    return mark_safe('\\n'.join(lines))",
    "docstring": "Display text with line numbers.",
    "type": "function",
    "file_path": "django\\django\\template\\defaultfilters.py",
    "ast_data": "FunctionDef name:linenumbers arg:value arg:autoescape arguments arg arg Assign Call Assign Call Call Call Call If BoolOp Call For Call Assign For Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "get_week_format",
    "source_code": "def get_week_format(self):\n    return self.week_format",
    "docstring": "Get a week format string in strptime syntax to be used to parse the week from url variables.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\dates.py",
    "ast_data": "FunctionDef name:get_week_format arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "StateManager",
    "source_code": "class StateManager(object):\n\n    def create_variable(self, feature_column, name, shape, dtype=None, trainable=True, use_resource=True, initializer=None):\n        del feature_column, name, shape, dtype, trainable, use_resource, initializer\n        raise NotImplementedError('StateManager.create_variable')\n\n    def add_variable(self, feature_column, var):\n        del feature_column, var\n        raise NotImplementedError('StateManager.add_variable')\n\n    def get_variable(self, feature_column, name):\n        del feature_column, name\n        raise NotImplementedError('StateManager.get_var')\n\n    def add_resource(self, feature_column, name, resource):\n        del feature_column, name, resource\n        raise NotImplementedError('StateManager.add_resource')\n\n    def has_resource(self, feature_column, name):\n        del feature_column, name\n        raise NotImplementedError('StateManager.has_resource')\n\n    def get_resource(self, feature_column, name):\n        del feature_column, name\n        raise NotImplementedError('StateManager.get_resource')",
    "docstring": "Manages the state associated with FeatureColumns. Some s create variables or resources to assist their computation. The is responsible for creating and storing these objects since s are supposed to be stateless configuration only.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "ClassDef name:StateManager FunctionDef name:create_variable arg:self arg:feature_column arg:name arg:shape arg:dtype arg:trainable arg:use_resource arg:initializer arguments arg arg arg arg arg arg arg arg Raise Call FunctionDef name:add_variable arg:self arg:feature_column arg:var arguments arg arg arg Raise Call FunctionDef name:get_variable arg:self arg:feature_column arg:name arguments arg arg arg Raise Call FunctionDef name:add_resource arg:self arg:feature_column arg:name arg:resource arguments arg arg arg arg Raise Call FunctionDef name:has_resource arg:self arg:feature_column arg:name arguments arg arg arg Raise Call FunctionDef name:get_resource arg:self arg:feature_column arg:name arguments arg arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_maybe_insert_input_observers_for_node",
    "source_code": "def _maybe_insert_input_observers_for_node(node: Node, qconfig: QConfigAny, model: torch.nn.Module, named_modules: dict[str, torch.nn.Module], obs_or_fq_map: dict[EdgeOrNode, ObserverOrFakeQuantize], is_qat: bool) -> None:\n    new_args = []\n    for arg in node.args:\n        new_arg = _maybe_insert_input_observer_for_arg_or_kwarg(node, arg, qconfig, model, named_modules, obs_or_fq_map, is_qat)\n        new_args.append(new_arg)\n    assert node.target == torch.ops.aten.clone.default or node.target == torch.ops.aten.zeros_like.default or node.target == torch.ops.aten.gelu.default or (len(node.kwargs) == 0), ' expecting kwargs for aten op IR to be empty'\n    node.args = tuple(new_args)",
    "docstring": "If needed, inserts observers to the input args and kwargs of . Note: modifies inplace. For example, if cur_node needs an observer after prev_node, we change from prev_node -> cur_node To prev_node -> obs -> cur_node",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\prepare.py",
    "ast_data": "FunctionDef name:_maybe_insert_input_observers_for_node arg:node arg:qconfig arg:model arg:named_modules arg:obs_or_fq_map arg:is_qat arguments arg arg arg arg arg arg Assign For Assign Call Call BoolOp Compare Compare Compare Compare Call Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "set_gamma",
    "source_code": "def set_gamma(self, gamma):\n    self._gamma = gamma\n    self._init()",
    "docstring": "Set a new gamma value and regenerate colormap.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:set_gamma arg:self arg:gamma arguments arg arg Assign Call"
  },
  {
    "library": "pytorch",
    "name": "get_validators",
    "source_code": "def get_validators() -> tuple[str, str]:\n    return torch._C._cuda_tunableop_get_validators()",
    "docstring": "Return the TunableOp validators.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\tunable.py",
    "ast_data": "FunctionDef name:get_validators arguments Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "take",
    "source_code": "def take(self, indices, *, allow_fill: bool=False, fill_value=None, axis=None, **kwargs) -> Self:\n    nv.validate_take((), kwargs)\n    fill_left = fill_right = fill_value\n    if allow_fill:\n        fill_left, fill_right = self._validate_scalar(fill_value)\n    left_take = take(self._left, indices, allow_fill=allow_fill, fill_value=fill_left)\n    right_take = take(self._right, indices, allow_fill=allow_fill, fill_value=fill_right)\n    return self._shallow_copy(left_take, right_take)",
    "docstring": "Take elements from the IntervalArray. Parameters ---------- indices : sequence of integers Indices to be taken. allow_fill : bool, default False How to handle negative values in . * False: negative values in indicate positional indices from the right (the default). This is similar to :func:. * True: negative values in indicate missing values. These values are set to . Any other other negative values raise a `allow_fillfill_valuefill_valueindicesallow_fill` is True.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\interval.py",
    "ast_data": "FunctionDef name:take arg:self arg:indices arguments arg arg arg arg arg arg Call Assign If Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "is_cached",
    "source_code": "def is_cached(self):\n    return self.cache_infile and self.hit_cache",
    "docstring": "Returns True if the class loaded from the cache file",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py",
    "ast_data": "FunctionDef name:is_cached arg:self arguments arg Return return:yes BoolOp"
  },
  {
    "library": "tensorflow",
    "name": "_get",
    "source_code": "def _get(self):\n    if values_util.is_saving_non_distributed():\n        return self._primary\n    replica_id = values_util.get_current_replica_id_as_int()\n    if replica_id is None:\n        return self._get_cross_replica()\n    else:\n        return self._get_replica(replica_id)",
    "docstring": "Returns the value for the current device or raises a ValueError.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py",
    "ast_data": "FunctionDef name:_get arg:self arguments arg If Call Return return:yes Assign Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "validate_min_itemsize",
    "source_code": "def validate_min_itemsize(self, min_itemsize) -> None:\n    if min_itemsize is None:\n        return\n    if not isinstance(min_itemsize, dict):\n        return\n    q = self.queryables()\n    for k in min_itemsize:\n        if k == 'values':\n            continue\n        if k not in q:\n            raise ValueError(f'min_itemsize has the key [{k}] which is not an axis or data_column')",
    "docstring": "validate the min_itemsize doesn't contain items that are not in the axes this needs data_columns to be defined",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:validate_min_itemsize arg:self arg:min_itemsize arguments arg arg If Compare Return return:no If Call Return return:no Assign Call For If Compare If Compare Raise Call"
  },
  {
    "library": "django",
    "name": "import_wkt",
    "source_code": "def import_wkt(self, wkt):\n    capi.from_wkt(self.ptr, byref(c_char_p(force_bytes(wkt))))",
    "docstring": "Import the Spatial Reference from OGC WKT (string)",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\srs.py",
    "ast_data": "FunctionDef name:import_wkt arg:self arg:wkt arguments arg arg Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "dynamic_size",
    "source_code": "@property\ndef dynamic_size(self):\n    return self._implementation._dynamic_size",
    "docstring": "Python bool; if the TensorArray can grow dynamically.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:dynamic_size arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "convert_saved_model",
    "source_code": "@convert_phase(Component.CONVERT_TF_TO_TFLITE_MODEL, SubComponent.CONVERT_SAVED_MODEL)\ndef convert_saved_model(**kwargs):\n    model_flags = build_model_flags(**kwargs)\n    conversion_flags = build_conversion_flags(**kwargs)\n    data = convert(model_flags, conversion_flags, input_data_str=None, debug_info_str=None)\n    return data",
    "docstring": "Converts a SavedModel using TF Lite converter.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\convert.py",
    "ast_data": "FunctionDef name:convert_saved_model arguments arg Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "bind_object",
    "source_code": "def bind_object(self, trackable):\n    checkpoint = self.checkpoint\n    checkpoint.all_python_objects.add(trackable)\n    current_assignment = checkpoint.object_by_proto_id.get(self._proto_id, None)\n    checkpoint.matched_proto_ids.add(self._proto_id)\n    if current_assignment is None:\n        checkpoint.object_by_proto_id[self._proto_id] = trackable\n        return True\n    else:\n        if current_assignment is not trackable:\n            logging.warning(f'Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:({current_assignment} and {trackable}).')\n        return False",
    "docstring": "Set a checkpointobject correspondence. Args: trackable: The object to record a correspondence for. Returns: True if this is a new assignment, False if this object has already been mapped to a checkpointed proto. Raises: AssertionError: If another object is already bound to the proto.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\restore.py",
    "ast_data": "FunctionDef name:bind_object arg:self arg:trackable arguments arg arg Assign Call Assign Call Call If Compare Assign Return return:yes If Compare Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "add_object_type",
    "source_code": "def add_object_type(self, directivename: str, rolename: str, indextemplate: str='', parse_node: Callable[[BuildEnvironment, str, addnodes.desc_signature], str] | None=None, ref_nodeclass: type[nodes.TextElement] | None=None, objname: str='', doc_field_types: Sequence[Field]=(), override: bool=False) -> None:\n    self.registry.add_object_type(directivename, rolename, indextemplate, parse_node, ref_nodeclass, objname, doc_field_types, override=override)",
    "docstring": "Register a new object type. This method is a very convenient way to add a new :term: type that can be cross-referenced. It will do this: - Create a new directive (called *directivename*) for documenting an object. It will automatically add index entries if *indextemplate* is nonempty; if given, it must contain exactly one instance of `conf.pyfunctionxref-syntax`). If *override* is True, the given object_type is forcedly installed even if an object_type having the same name is already installed. .. versionchanged:: 1.8 Add *override* keyword.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\application.py",
    "ast_data": "FunctionDef name:add_object_type arg:self arg:directivename arg:rolename arg:indextemplate arg:parse_node arg:ref_nodeclass arg:objname arg:doc_field_types arg:override arguments arg arg arg arg arg arg arg arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "write",
    "source_code": "def write(self, index, value, name=None):\n    with ops.name_scope(name, 'TensorArrayV2Write', [self._flow, index, value]):\n        value = ops.convert_to_tensor(value, preferred_dtype=self._dtype, name='value')\n        _check_dtypes(value, self._dtype)\n        self._check_element_shape(value.shape)\n        flow_out = list_ops.tensor_list_set_item(input_handle=self._flow, index=index, item=value, resize_if_index_out_of_bounds=self._dynamic_size, name=name)\n        return build_ta_with_new_flow(self, flow_out)",
    "docstring": "See TensorArray.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:write arg:self arg:index arg:value arg:name arguments arg arg arg arg With Call Assign Call Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_missing_raise",
    "source_code": "def _missing_raise():\n    raise ClusterError('One of the clusters is empty. Re-run kmeans with a different initialization.')",
    "docstring": "Raise a ClusterError when called.",
    "type": "function",
    "file_path": "scipy\\scipy\\cluster\\vq.py",
    "ast_data": "FunctionDef name:_missing_raise arguments Raise Call"
  },
  {
    "library": "pytorch",
    "name": "recompile",
    "source_code": "@compatibility(is_backward_compatible=True)\ndef recompile(self) -> PythonCode:\n    if isinstance(self._graph._codegen, _PyTreeCodeGen):\n        self._in_spec = self._graph._codegen.pytree_info.in_spec\n        self._out_spec = self._graph._codegen.pytree_info.out_spec\n    python_code = self._graph.python_code(root_module='self')\n    self._code = python_code.src\n    self._lineno_map = python_code._lineno_map\n    cls = type(self)\n    co_fields = self._graph._co_fields if hasattr(self._graph, '_co_fields') else {}\n    cls.forward = _forward_from_src(self._code, python_code.globals, co_fields)\n    cls_call = cls.__call__ if '__call__' in vars(cls) else None\n    if '_wrapped_call' not in vars(cls):\n        cls._wrapped_call = _WrappedCall(cls, cls_call)\n\n    def call_wrapped(self, *args, **kwargs):\n        return self._wrapped_call(self, *args, **kwargs)\n    cls.__call__ = call_wrapped\n    return python_code",
    "docstring": "Recompile this GraphModule from its `` will be out of date.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\graph_module.py",
    "ast_data": "FunctionDef name:recompile arg:self arguments arg If Call Assign Assign Assign Call Assign Assign Assign Call Assign Call Assign Call Assign Compare Call If Compare Call Assign Call FunctionDef name:call_wrapped arg:self arguments arg arg arg Return return:yes Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_scale_loss_for_estimator_enabled",
    "source_code": "@tf_contextlib.contextmanager\ndef _scale_loss_for_estimator_enabled(self):\n    self._scale_loss_for_estimator = True\n    try:\n        yield\n    finally:\n        self._scale_loss_for_estimator = False",
    "docstring": "Scope which sets a flag used for scaling losses in optimizer. Yields: is a context manager with a side effect, but doesn't return a value.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:_scale_loss_for_estimator_enabled arg:self arguments arg Assign Try Assign"
  },
  {
    "library": "pytorch",
    "name": "_get_storage_alignment",
    "source_code": "def _get_storage_alignment() -> int:\n    from torch.utils.serialization import config\n    return config.save.storage_alignment",
    "docstring": "Gets alignment for storages in torch.save files/ Defaults to 64. Returns: storage_alginment: int",
    "type": "function",
    "file_path": "pytorch\\torch\\serialization.py",
    "ast_data": "FunctionDef name:_get_storage_alignment arguments Return return:yes"
  },
  {
    "library": "numpy",
    "name": "translate",
    "source_code": "def translate(self, table, deletechars=None):\n    return asarray(translate(self, table, deletechars))",
    "docstring": "For each element in , return a copy of the string where all characters occurring in the optional argument are removed, and the remaining characters have been mapped through the given translation table. See Also -------- char.translate",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:translate arg:self arg:table arg:deletechars arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "SumOverBatchSize",
    "source_code": "class SumOverBatchSize(Reduce):\n\n    def __init__(self, name='sum_over_batch_size', dtype=None):\n        super(SumOverBatchSize, self).__init__(reduction=metrics_utils.Reduction.SUM_OVER_BATCH_SIZE, name=name, dtype=dtype)",
    "docstring": "Computes the weighted sum over batch size of the given values. For example, if values is [1, 3, 5, 7] then the metric value is 4. If the weights were specified as [1, 1, 0, 0] then the value would be 1. This metric creates two variables, and that are used to compute the average of . This average is ultimately returned as sum over batch size which is an idempotent operation that simply divides by . If is , weights default to 1. Use of 0 to mask values.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py",
    "ast_data": "ClassDef name:SumOverBatchSize FunctionDef name:__init__ arg:self arg:name arg:dtype arguments arg arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "sequence_length_from_sparse_tensor",
    "source_code": "def sequence_length_from_sparse_tensor(sp_tensor, num_elements=1):\n    with ops.name_scope(None, 'sequence_length') as name_scope:\n        row_ids = sp_tensor.indices[:, 0]\n        column_ids = sp_tensor.indices[:, 1]\n        column_ids += array_ops.ones_like(column_ids)\n        seq_length = math_ops.segment_max(column_ids, segment_ids=row_ids)\n        seq_length = math_ops.cast(math_ops.ceil(seq_length / num_elements), dtypes.int64)\n        n_pad = array_ops.shape(sp_tensor)[:1] - array_ops.shape(seq_length)[:1]\n        padding = array_ops.zeros(n_pad, dtype=seq_length.dtype)\n        return array_ops.concat([seq_length, padding], axis=0, name=name_scope)",
    "docstring": "Returns a [batch_size] Tensor with per-example sequence length.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\utils.py",
    "ast_data": "FunctionDef name:sequence_length_from_sparse_tensor arg:sp_tensor arg:num_elements arguments arg arg With Call Assign Assign Call Assign Call Assign Call Call Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "parse_attribute",
    "source_code": "@classmethod\ndef parse_attribute(cls, name, attr_string):\n    attr_string = attr_string.lower().strip()\n    if attr_string[:len('string')] == 'string':\n        return cls(name)\n    else:\n        return None",
    "docstring": "Parse the attribute line if it knows how. Returns the parsed attribute, or None. For string attributes, the attribute string would be like 'string'.",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\arff\\_arffread.py",
    "ast_data": "FunctionDef name:parse_attribute arg:cls arg:name arg:attr_string arguments arg arg arg Assign Call Call If Compare Call Return return:yes Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "scatter_max",
    "source_code": "def scatter_max(self, sparse_delta, use_locking=False, name=None):\n    if not isinstance(sparse_delta, indexed_slices.IndexedSlices):\n        raise TypeError('sparse_delta is not IndexedSlices: %s' % sparse_delta)\n    return gen_state_ops.scatter_max(self._variable, sparse_delta.indices, sparse_delta.values, use_locking=use_locking, name=name)",
    "docstring": "Updates this variable with the max of and itself. Args: sparse_delta: to use as an argument of max with this variable. use_locking: If , use locking during the operation. name: the name of the operation. Returns: A that will hold the new value of this variable after the scattered maximization has completed. Raises: TypeError: if is not an .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py",
    "ast_data": "FunctionDef name:scatter_max arg:self arg:sparse_delta arg:use_locking arg:name arguments arg arg arg arg If Call Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "reduced_shape",
    "source_code": "def reduced_shape(input_shape, axes):\n    constant_input_shape = tensor_util.constant_value(input_shape)\n    if constant_input_shape is not None:\n        constant_axes = tensor_util.constant_value(axes)\n        if constant_axes is not None:\n            constant_axes = np.array(constant_axes, dtype=np.int32)\n            constant_input_shape = np.array(constant_input_shape, dtype=np.int32)\n            constant_input_shape[constant_axes] = 1\n            return constant_input_shape\n    axes = ops.convert_to_tensor(axes)\n    input_rank = array_ops.size(input_shape, out_type=axes.dtype)\n    axes = (axes + input_rank) % input_rank\n    axes_shape = array_ops.shape(axes)\n    return gen_data_flow_ops.dynamic_stitch([range(input_rank), axes], [input_shape, array_ops.ones(axes_shape, dtype=input_shape.dtype)])",
    "docstring": "Helper function for reduction ops. Args: input_shape: 1-D Tensor, the shape of the Tensor being reduced. axes: 1-D Tensor, the reduction axes. Returns: A 1-D Tensor, the output shape as if keepdims were set to True.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:reduced_shape arg:input_shape arg:axes arguments arg arg Assign Call If Compare Assign Call If Compare Assign Call Assign Call Assign Return return:yes Assign Call Assign Call Assign Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_populate_recipient_maps",
    "source_code": "def _populate_recipient_maps(self):\n    for node in self._node_inputs:\n        inputs = self._node_inputs[node]\n        for inp in inputs:\n            inp = get_node_name(inp)\n            if inp not in self._node_recipients:\n                self._node_recipients[inp] = []\n            self._node_recipients[inp].append(node)\n            if inp in self._ref_args:\n                if inp not in self._node_reversed_ref_inputs:\n                    self._node_reversed_ref_inputs[inp] = []\n                self._node_reversed_ref_inputs[inp].append(node)\n    for node in self._node_ctrl_inputs:\n        ctrl_inputs = self._node_ctrl_inputs[node]\n        for ctrl_inp in ctrl_inputs:\n            if ctrl_inp in self._copy_send_nodes:\n                continue\n            if ctrl_inp not in self._node_ctrl_recipients:\n                self._node_ctrl_recipients[ctrl_inp] = []\n            self._node_ctrl_recipients[ctrl_inp].append(node)",
    "docstring": "Populate the map from node name to recipient(s) of its output(s). This method also populates the input map based on reversed ref edges.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_graphs.py",
    "ast_data": "FunctionDef name:_populate_recipient_maps arg:self arguments arg For Assign For Assign Call If Compare Assign Call If Compare If Compare Assign Call For Assign For If Compare If Compare Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_convert_composite_tensor",
    "source_code": "def _convert_composite_tensor(value, expected_type, path, context):\n    if context == _ConversionContext.SPEC:\n        if not (isinstance(value, type_spec.TypeSpec) and _issubclass(value.value_type, expected_type)):\n            raise TypeError(f'{''.join(path)}: expected a TypeSpec for {expected_type.__name__!r}, got {type(value).__name__!r}')\n        return value\n    if not isinstance(value, expected_type):\n        raise TypeError(f'{''.join(path)}: expected {expected_type.__name__!r}, got {type(value).__name__!r}')\n    return value",
    "docstring": "Converts to a value of type .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type_field.py",
    "ast_data": "FunctionDef name:_convert_composite_tensor arg:value arg:expected_type arg:path arg:context arguments arg arg arg arg If Compare If BoolOp Call Call Raise Call Call Call Return return:yes If Call Raise Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_special_op_to_preserve_cia",
    "source_code": "def _special_op_to_preserve_cia(*args, **kwargs):\n    return NotImplemented",
    "docstring": "This is an special marker that tells our infra that we shouldn't decompose this op.",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\utils.py",
    "ast_data": "FunctionDef name:_special_op_to_preserve_cia arguments arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_deterministic_debug_mode",
    "source_code": "def get_deterministic_debug_mode() -> builtins.int:\n    if _C._get_deterministic_algorithms():\n        if _C._get_deterministic_algorithms_warn_only():\n            return 1\n        else:\n            return 2\n    else:\n        return 0",
    "docstring": "Returns the current value of the debug mode for deterministic operations. Refer to :func: documentation for more details.",
    "type": "function",
    "file_path": "pytorch\\torch\\__init__.py",
    "ast_data": "FunctionDef name:get_deterministic_debug_mode arguments If Call If Call Return return:yes Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "__call__",
    "source_code": "async def __call__(self, scope, receive, send):\n    if scope['type'] != 'http':\n        raise ValueError('Django can only handle ASGI/HTTP connections, not %s.' % scope['type'])\n    async with ThreadSensitiveContext():\n        await self.handle(scope, receive, send)",
    "docstring": "Async entrypoint - parses the request and hands off to get_response.",
    "type": "method",
    "file_path": "django\\django\\core\\handlers\\asgi.py",
    "ast_data": "AsyncFunctionDef name:__call__ arg:self arg:scope arg:receive arg:send arguments arg arg arg arg If Compare Raise Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "map_kernel_groups_to_node_sizes",
    "source_code": "@classmethod\ndef map_kernel_groups_to_node_sizes(cls, groups: Sequence[sympy.Expr], lengths: Sequence[Sequence[sympy.Expr]], set_ranges) -> list[list[sympy.Expr]]:\n    if len(lengths) == len(groups) and all((V.graph.sizevars.simplify(sympy_product(x) - g) == 0 for x, g in zip(lengths, groups))):\n        return set_ranges(*lengths)\n    new_ranges, return_getters_groups = cls._split_iteration_ranges(groups, lengths)\n    itervars = [*itertools.chain.from_iterable(set_ranges(*new_ranges))]\n    return [[fn(itervars) for fn in fns] for fns in return_getters_groups]",
    "docstring": "We may want to fuse into a tiled kernel with groups (s0, s1). To do this we need to split up the iteration space of i0 into something like: for i1 in s0: for i2 in s1: i0 = i1*s1 + i2 .... This function matches and resplits lengths to the groups of this kernel to enable tiled + non-tiled fusions.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\simd.py",
    "ast_data": "FunctionDef name:map_kernel_groups_to_node_sizes arg:cls arg:groups arg:lengths arg:set_ranges arguments arg arg arg arg If BoolOp Compare Call Call Call Compare Call Call Call Return return:yes Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "is_monotonic_decreasing",
    "source_code": "@cache_readonly\ndef is_monotonic_decreasing(self) -> bool:\n    return self[::-1].is_monotonic_increasing",
    "docstring": "Return True if the IntervalIndex is monotonic decreasing (only equal or decreasing values), else False",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\interval.py",
    "ast_data": "FunctionDef name:is_monotonic_decreasing arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "nlargest",
    "source_code": "def nlargest(self, n: int=5, keep: Literal['first', 'last', 'all']='first') -> Series:\n    return selectn.SelectNSeries(self, n=n, keep=keep).nlargest()",
    "docstring": "Return the largest elements. Parameters ---------- n : int, default 5 Return this many descending sorted values. keep : {'first', 'last', 'all'}, default 'first' When there are duplicate values that cannot all fit in a Series of elements: - `nnnnnnnnnkeepnn` with all duplicates kept. Note that the returned Series has five elements due to the three duplicates. >>> s.nlargest(3, keep=\"all\") France 65000000 Italy 59000000 Malta 434000 Maldives 434000 Brunei 434000 dtype: int64",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:nlargest arg:self arg:n arg:keep arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_sci",
    "source_code": "def _sci(self, im):\n    _api.check_isinstance((mcoll.Collection, mimage.AxesImage), im=im)\n    if im not in self._children:\n        raise ValueError('Argument must be an image or collection in this Axes')\n    self._current_image = im",
    "docstring": "Set the current image. This image will be the target of colormap functions like `~.pyplot.clim`. The current image is an attribute of the current Axes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:_sci arg:self arg:im arguments arg arg Call If Compare Raise Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "device",
    "source_code": "@property\ndef device(self):\n    return self.handle.device",
    "docstring": "The device this variable is on.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:device arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_load",
    "source_code": "def _load(self):\n    module = importlib.import_module(self.__name__)\n    self._tfll_parent_module_globals[self._tfll_local_name] = module\n    if self._tfll_warning:\n        logging.warning(self._tfll_warning)\n        self._tfll_warning = None\n    self.__dict__.update(module.__dict__)\n    return module",
    "docstring": "Load the module and insert it into the parent's globals.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\util\\lazy_loader.py",
    "ast_data": "FunctionDef name:_load arg:self arguments arg Assign Call Assign If Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_load_container_from_state",
    "source_code": "def _load_container_from_state(self, states, data_groups, container_state_dict):\n    for name, state in states.items():\n        config_name = data_groups.get(name, None)\n        if config_name is None:\n            raise RuntimeError(f'Error loading {name}')\n        parametrized_name = f'parametrizations.{name}.original'\n        parametrized = False\n        data = container_state_dict.get(name, None)\n        if name in container_state_dict:\n            data = container_state_dict.get(name)\n        elif parametrized_name in container_state_dict:\n            data = container_state_dict.get(parametrized_name)\n            parametrized = True\n        else:\n            raise RuntimeError(f'Error loading {name}')\n        self._container.register_buffer(name=name, tensor=data)\n        if parametrized:\n            mask = state.get('mask', torch.ones_like(data))\n            param_class = data_groups.get('parametrization', utils.FakeSparsity)\n            parametrize.register_parametrization(self._container, name, param_class(mask))",
    "docstring": "This restores the state of the container specifically based on the data present in state and data_groups If the data was parametrized, then the data would be added to the container and then parametrized, else it would just add the attribute the container.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\data_sparsifier\\base_data_sparsifier.py",
    "ast_data": "FunctionDef name:_load_container_from_state arg:self arg:states arg:data_groups arg:container_state_dict arguments arg arg arg arg For Call Assign Call If Compare Raise Call Assign Assign Assign Call If Compare Assign Call If Compare Assign Call Assign Raise Call Call If Assign Call Call Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "dropout3d",
    "source_code": "def dropout3d(input: Tensor, p: float=0.5, training: bool=True, inplace: bool=False) -> Tensor:\n    if has_torch_function_unary(input):\n        return handle_torch_function(dropout3d, (input,), input, p=p, training=training, inplace=inplace)\n    if p < 0.0 or p > 1.0:\n        raise ValueError(f'dropout probability has to be between 0 and 1, but got {p}')\n    inp_dim = input.dim()\n    if inp_dim not in (4, 5):\n        warn_msg = f'dropout3d: Received a {inp_dim}-D input to dropout3d, which is deprecated and will result in an error in a future release. To retain the behavior and silence this warning, please use dropout instead. Note that dropout3d exists to provide channel-wise dropout on inputs with 3 spatial dimensions, a channel dimension, and an optional batch dimension (i.e. 4D or 5D inputs).'\n        warnings.warn(warn_msg)\n    is_batched = inp_dim == 5\n    if not is_batched:\n        input = input.unsqueeze_(0) if inplace else input.unsqueeze(0)\n    result = _VF.feature_dropout_(input, p, training) if inplace else _VF.feature_dropout(input, p, training)\n    if not is_batched:\n        result = result.squeeze_(0) if inplace else result.squeeze(0)\n    return result",
    "docstring": "Randomly zero out entire channels (a channel is a 3D feature map). For example, the :math:-th channel of the :math:-th sample in the batched input is a 3D tensor :math: of the input tensor. Each channel will be zeroed out independently on every forward call with probability :attr: using samples from a Bernoulli distribution. See :class: for details. Args: p: probability of a channel to be zeroed. Default: 0.5 training: apply dropout if is ``",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:dropout3d arg:input arg:p arg:training arg:inplace arguments arg arg arg arg If Call Return return:yes Call If BoolOp Compare Compare Raise Call Assign Call If Compare Assign Call Assign Compare If Assign Call Call Assign Call Call If Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "new_file",
    "source_code": "def new_file(self, field_name, file_name, content_type, content_length, charset=None, content_type_extra=None):\n    self.field_name = field_name\n    self.file_name = file_name\n    self.content_type = content_type\n    self.content_length = content_length\n    self.charset = charset\n    self.content_type_extra = content_type_extra",
    "docstring": "Signal that a new file has been started. Warning: As with any data from the client, you should not trust content_length (and sometimes won't even get it).",
    "type": "method",
    "file_path": "django\\django\\core\\files\\uploadhandler.py",
    "ast_data": "FunctionDef name:new_file arg:self arg:field_name arg:file_name arg:content_type arg:content_length arg:charset arg:content_type_extra arguments arg arg arg arg arg arg arg Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "reshard",
    "source_code": "def reshard(self, checkpoint_values: List[tensor.Tensor], shape_and_slice_spec: List[str]) -> tensor.Tensor:\n    del shape_and_slice_spec\n    if len(checkpoint_values) != 1:\n        raise ValueError('Default reshard expects a single checkpoint value.')\n    return checkpoint_values[0]",
    "docstring": "Reshards the checkpoint values as read from the checkpoint file. Override this to reshard/modify the restored values Args: checkpoint_values: The values returned by the restore op, as read from file. shape_and_slice_spec: The shape and slice spec required by the caller. Returns: List of restored Tensor values after being resharded.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint_adapter.py",
    "ast_data": "FunctionDef name:reshard arg:self arg:checkpoint_values arg:shape_and_slice_spec arguments arg arg arg If Compare Call Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_constrain_unify",
    "source_code": "@record_shapeenv_event()\ndef _constrain_unify(self, a: SymInt, b: SymInt) -> None:\n    if not isinstance(a, SymInt):\n        if not isinstance(b, SymInt):\n            assert a == b\n        else:\n            assert isinstance(b.node.expr, sympy.Symbol), 'constraining non-Symbols NYI'\n            assert b.node.shape_env is self\n            self.replacements[b.node.expr] = sympy.Integer(a)\n    else:\n        assert isinstance(a.node.expr, sympy.Symbol), 'constraining non-Symbols NYI'\n        assert a.node.shape_env is self\n        if not isinstance(b, SymInt):\n            self.replacements[a.node.expr] = sympy.Integer(b)\n        else:\n            assert a.node.shape_env is b.node.shape_env\n            assert isinstance(b.node.expr, sympy.Symbol), 'constraining non-Symbols NYI'\n            new_var = self._find(a.node.expr)\n            self.replacements[b.node.expr] = new_var",
    "docstring": "Given two SymInts, constrain them so that they must be equal. NB: this will not work with SymInts that represent nontrivial expressions (yet!)",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:_constrain_unify arg:self arg:a arg:b arguments arg arg arg If Call If Call Compare Call Compare Assign Call Call Compare If Call Assign Call Compare Call Assign Call Assign Call"
  },
  {
    "library": "scipy",
    "name": "python",
    "source_code": "@click.option('--pythonpath', '-p', metavar='PYTHONPATH', default=None, help='Paths to prepend to PYTHONPATH')\n@spin.util.extend_command(spin.cmds.meson.python)\ndef python(*, parent_callback, pythonpath, **kwargs):\n    _set_pythonpath(pythonpath)\n    parent_callback(**kwargs)",
    "docstring": "🐍 Launch Python shell with PYTHONPATH set OPTIONS are passed through directly to Python, e.g.: spin python -c 'import sys; print(sys.path)'",
    "type": "function",
    "file_path": "scipy\\.spin\\cmds.py",
    "ast_data": "FunctionDef name:python arguments arg arg arg Call Call Call Call"
  },
  {
    "library": "numpy",
    "name": "min",
    "source_code": "@array_function_dispatch(_min_dispatcher)\ndef min(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, where=np._NoValue):\n    return _wrapreduction(a, np.minimum, 'min', axis, None, out, keepdims=keepdims, initial=initial, where=where)",
    "docstring": "Return the minimum of an array or minimum along an axis. Parameters ---------- a : array_like Input data. axis : None or int or tuple of ints, optional Axis or axes along which to operate. By default, flattened input is used. If this is a tuple of ints, the minimum is selected over multiple axes, instead of a single axis or all the axes as before. out : ndarray, optional Alternative output array in which to place the result. Must be of the same shape and buffer length as the expected output. See :ref: for more details. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input array. If the default value is passed, then will not be passed through to the `ndarraykeepdims~numpy.ufunc.reduce~numpy.ufunc.reduceaaxisaxisaxis~numpy.min` argument. >>> np.min([6], initial=5) 5 >>> min([6], default=5) 6",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\fromnumeric.py",
    "ast_data": "FunctionDef name:min arg:a arg:axis arg:out arg:keepdims arg:initial arg:where arguments arg arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "num_buckets",
    "source_code": "@property\ndef num_buckets(self):\n    return self.hash_bucket_size",
    "docstring": "Returns number of buckets in this sparse feature.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:num_buckets arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_environment_var_to_network_endpoints",
    "source_code": "def _environment_var_to_network_endpoints(endpoints):\n    for endpoint in endpoints.split(','):\n        grpc_prefix = 'grpc://'\n        if endpoint.startswith(grpc_prefix):\n            endpoint = endpoint.split(grpc_prefix)[1]\n        parts = endpoint.split(':')\n        ip_address = parts[0]\n        port = _DEFAULT_ENDPOINT_PORT\n        if len(parts) > 1:\n            port = parts[1]\n        yield {'ipAddress': ip_address, 'port': port}",
    "docstring": "Yields a dict with ip address and port.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\client\\client.py",
    "ast_data": "FunctionDef name:_environment_var_to_network_endpoints arg:endpoints arguments arg For Call Assign If Call Assign Call Assign Call Assign Assign If Compare Call Assign"
  },
  {
    "library": "pytorch",
    "name": "_get_path_of_module",
    "source_code": "def _get_path_of_module(root: torch.nn.Module, submodule: torch.nn.Module) -> Optional[str]:\n    for n, p in root.named_modules():\n        if submodule is p:\n            return n\n    return None",
    "docstring": "Get the path (fully qualified name) of a submodule Example:: >> class M(torch.nn.Module): def __init__(self) -> None: self.linear = torch.nn.Linear(5, 5) def forward(self, x): return self.linear(x) >> m = M() >> l = m.linear >> _get_path_of_module(m, l) \"linear\"",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\utils.py",
    "ast_data": "FunctionDef name:_get_path_of_module arg:root arg:submodule arguments arg arg For Call If Compare Return return:yes Return return:no"
  },
  {
    "library": "django",
    "name": "_set_creation_counter",
    "source_code": "def _set_creation_counter(self):\n    self.creation_counter = BaseManager.creation_counter\n    BaseManager.creation_counter += 1",
    "docstring": "Set the creation counter value for this instance and increment the class-level copy.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\manager.py",
    "ast_data": "FunctionDef name:_set_creation_counter arg:self arguments arg Assign"
  },
  {
    "library": "pytorch",
    "name": "causal_lower_right",
    "source_code": "def causal_lower_right(*size) -> CausalBias:\n    assert len(size) == 2, 'causal_lower_right only supports 2D tensors'\n    seq_len_q, seq_len_kv = size\n    return CausalBias(CausalVariant.LOWER_RIGHT, seq_len_q, seq_len_kv)",
    "docstring": "Creates a lower-right triangular causal bias. This function generates a lower-right triangular matrix to represent causal attention bias with a diagonal offset set so that the inclusive values are aligned to the lower right corner of the matrix. The equivalent pytorch code for constructing this bias is: .. code-block:: python diagonal_offset = size[1] - size[0] torch.tril( torch.ones(size, dtype=torch.bool), diagonal=diagonal_offset, ) For instance, with , the materialized bias tensor will be: .. code-block:: text [[1, 1, 0, 0], [1, 1, 1, 0], [1, 1, 1, 1]] Args: size: The size of the bias matrix. Returns: CausalBias: The LOWER_RIGHT triangular causal bias variant.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\attention\\bias.py",
    "ast_data": "FunctionDef name:causal_lower_right arguments arg Compare Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_config",
    "source_code": "@abc.abstractmethod\ndef get_config(self):\n    config = {'name': self._name}\n    if self.clipnorm is not None:\n        config['clipnorm'] = self.clipnorm\n    if self.clipvalue is not None:\n        config['clipvalue'] = self.clipvalue\n    if self.global_clipnorm is not None:\n        config['global_clipnorm'] = self.global_clipnorm\n    return config",
    "docstring": "Returns the config of the optimizer. An optimizer config is a Python dictionary (serializable) containing the configuration of an optimizer. The same optimizer can be reinstantiated later (without any saved state) from this configuration. Returns: Python dictionary.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py",
    "ast_data": "FunctionDef name:get_config arg:self arguments arg Assign If Compare Assign If Compare Assign If Compare Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "update",
    "source_code": "def update(self):\n    self._nav_stack.clear()\n    self.set_history_buttons()",
    "docstring": "Reset the Axes stack.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:update arg:self arguments arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "variables",
    "source_code": "@property\ndef variables(self):\n    return tuple(self._func_graph.variables)",
    "docstring": "Sequence of variables for this function.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py",
    "ast_data": "FunctionDef name:variables arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_self_suppression",
    "source_code": "def _self_suppression(iou, _, iou_sum, iou_threshold):\n    batch_size = array_ops.shape(iou)[0]\n    can_suppress_others = math_ops.cast(array_ops.reshape(math_ops.reduce_max(iou, 1) < iou_threshold, [batch_size, -1, 1]), iou.dtype)\n    iou_after_suppression = array_ops.reshape(math_ops.cast(math_ops.reduce_max(can_suppress_others * iou, 1) < iou_threshold, iou.dtype), [batch_size, -1, 1]) * iou\n    iou_sum_new = math_ops.reduce_sum(iou_after_suppression, [1, 2])\n    return [iou_after_suppression, math_ops.reduce_any(iou_sum - iou_sum_new > iou_threshold), iou_sum_new, iou_threshold]",
    "docstring": "Suppress boxes in the same tile. Compute boxes that cannot be suppressed by others (i.e., can_suppress_others), and then use them to suppress boxes in the same tile. Args: iou: a tensor of shape [batch_size, num_boxes_with_padding] representing intersection over union. iou_sum: a scalar tensor. iou_threshold: a scalar tensor. Returns: iou_suppressed: a tensor of shape [batch_size, num_boxes_with_padding]. iou_diff: a scalar tensor representing whether any box is supressed in this step. iou_sum_new: a scalar tensor of shape [batch_size] that represents the iou sum after suppression. iou_threshold: a scalar tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:_self_suppression arg:iou arg:_ arg:iou_sum arg:iou_threshold arguments arg arg arg arg Assign Call Assign Call Call Compare Call Assign Call Call Compare Call Assign Call Return return:yes Call Compare"
  },
  {
    "library": "cherrypy",
    "name": "HitCounter",
    "source_code": "class HitCounter:\n    _cp_config = {'tools.sessions.on': True}\n\n    @cherrypy.expose\n    def index(self):\n        count = cherrypy.session.get('count', 0) + 1\n        cherrypy.session['count'] = count\n        return \"\\n            During your current session, you've viewed this\\n            page %s times! Your life is a patio of fun!\\n        \" % count",
    "docstring": "Hit counter app.",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\tutorial\\tut07_sessions.py",
    "ast_data": "ClassDef name:HitCounter Assign FunctionDef name:index arg:self arguments arg Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_loop_fn_has_config",
    "source_code": "def _loop_fn_has_config(loop_fn):\n    if tf_inspect.isfunction(loop_fn):\n        argspec = tf_inspect.getargspec(loop_fn)\n        return PFOR_CONFIG_ARG in argspec.args\n    elif isinstance(loop_fn, functools.partial):\n        fn = loop_fn.func\n        argspec = tf_inspect.getargspec(fn)\n        return PFOR_CONFIG_ARG in argspec.args and PFOR_CONFIG_ARG not in loop_fn.keywords\n    else:\n        loop_class = tf_decorator.unwrap(loop_fn)[1]\n        if not hasattr(loop_class, '__call__'):\n            raise ValueError('`loop_fn` object did not have a __call__ method')\n        argspec = tf_inspect.getargspec(loop_class.__call__)\n        return PFOR_CONFIG_ARG in argspec.args",
    "docstring": "Test if has a argument.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\control_flow_ops.py",
    "ast_data": "FunctionDef name:_loop_fn_has_config arg:loop_fn arguments arg If Call Assign Call Return return:yes Compare If Call Assign Assign Call Return return:yes BoolOp Compare Compare Assign Call If Call Raise Call Assign Call Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "stateless_random_crop",
    "source_code": "@tf_export('image.stateless_random_crop', v1=[])\n@dispatch.add_dispatch_support\ndef stateless_random_crop(value, size, seed, name=None):\n    with ops.name_scope(name, 'random_crop', [value, size]) as name:\n        value = ops.convert_to_tensor(value, name='value')\n        size = ops.convert_to_tensor(size, dtype=dtypes.int32, name='size')\n        shape = array_ops.shape(value)\n        check = control_flow_assert.Assert(math_ops.reduce_all(shape >= size), ['Need value.shape >= size, got ', shape, size], summarize=1000)\n        shape = control_flow_ops.with_dependencies([check], shape)\n        limit = shape - size + 1\n        offset = stateless_random_ops.stateless_random_uniform(array_ops.shape(shape), dtype=size.dtype, maxval=size.dtype.max, seed=seed) % limit\n        return array_ops.slice(value, offset, size, name=name)",
    "docstring": "Randomly crops a tensor to a given size in a deterministic manner. Slices a shape portion out of at a uniformly chosen offset. Requires . If a dimension should not be cropped, pass the full size of that dimension. For example, RGB images can be cropped with . Guarantees the same results given the same independent of how many times the function is called, and independent of global seed settings (e.g. ). Usage Example: >>> image = [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]] >>> seed = (1, 2) >>> tf.image.stateless_random_crop(value=image, size=(1, 2, 3), seed=seed) Args: value: Input tensor to crop. size: 1-D tensor with size the rank of . seed: A shape [2] Tensor, the seed to the random number generator. Must have dtype or . (When using XLA, only is allowed.) name: A name for this operation (optional). Returns: A cropped tensor of the same rank as and shape .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\random_crop_ops.py",
    "ast_data": "FunctionDef name:stateless_random_crop arg:value arg:size arg:seed arg:name arguments arg arg arg arg With Call Assign Call Assign Call Assign Call Assign Call Call Compare Assign Call Assign Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_sum_finite",
    "source_code": "def _sum_finite(x):\n    finite_x = np.isfinite(x)\n    bad_count = finite_x.size - np.count_nonzero(finite_x)\n    return (np.sum(x[finite_x]), bad_count)",
    "docstring": "For a 1D array x, return a tuple containing the sum of the finite values of x and the number of nonfinite values. This is a utility function used when evaluating the negative loglikelihood for a distribution and an array of samples. Examples -------- >>> import numpy as np >>> from scipy.stats._distn_infrastructure import _sum_finite >>> tot, nbad = _sum_finite(np.array([-2, -np.inf, 5, 1])) >>> tot 4.0 >>> nbad 1",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:_sum_finite arg:x arguments arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, dtype, ragged_rank, row_splits_dtype=dtypes.int64):\n    row_splits_dtype = dtypes.as_dtype(row_splits_dtype)\n    self._dtype = dtype\n    self._ragged_rank = ragged_rank\n    self._row_splits_dtype = row_splits_dtype",
    "docstring": "Initializes a RaggedTensorType object. Args: dtype: data type of the 's inner values. ragged_rank: ragged_rank of the declared . row_splits_dtype: data type for the 's row splits. One of: or .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:dtype arg:ragged_rank arg:row_splits_dtype arguments arg arg arg arg Assign Call Assign Assign Assign"
  },
  {
    "library": "sphinx",
    "name": "get_config",
    "source_code": "def get_config(self, section: str, name: str, default: Any=_NO_DEFAULT) -> Any:\n    if section == 'theme':\n        if name == 'stylesheet':\n            value = ', '.join(self.stylesheets) or default\n        elif name == 'sidebars':\n            value = ', '.join(self.sidebar_templates) or default\n        elif name == 'pygments_style':\n            value = self.pygments_style_default or default\n        elif name == 'pygments_dark_style':\n            value = self.pygments_style_dark or default\n        else:\n            value = default\n    elif section == 'options':\n        value = self._options.get(name, default)\n    else:\n        msg = __('Theme configuration sections other than [theme] and [options] are not supported (tried to get a value from %r).')\n        raise ThemeError(msg)\n    if value is _NO_DEFAULT:\n        msg = __('setting %s.%s occurs in none of the searched theme configs') % (section, name)\n        raise ThemeError(msg)\n    return value",
    "docstring": "Return the value for a theme configuration setting, searching the base theme chain.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\theming.py",
    "ast_data": "FunctionDef name:get_config arg:self arg:section arg:name arg:default arguments arg arg arg arg If Compare If Compare Assign BoolOp Call If Compare Assign BoolOp Call If Compare Assign BoolOp If Compare Assign BoolOp Assign If Compare Assign Call Assign Call Raise Call If Compare Assign Call Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "short",
    "source_code": "def short(self):\n    return self._to(torch.short)",
    "docstring": "Casts this storage to short type.",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:short arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_get",
    "source_code": "def _get(self, *args, **kwargs):\n    all_messages = []\n    for storage in self.storages:\n        messages, all_retrieved = storage._get()\n        if messages is None:\n            break\n        if messages:\n            self._used_storages.add(storage)\n        all_messages.extend(messages)\n        if all_retrieved:\n            break\n    return (all_messages, all_retrieved)",
    "docstring": "Get a single list of messages from all storage backends.",
    "type": "method",
    "file_path": "django\\django\\contrib\\messages\\storage\\fallback.py",
    "ast_data": "FunctionDef name:_get arg:self arguments arg arg arg Assign For Assign Call If Compare If Call Call If Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "erfinv",
    "source_code": "def erfinv(x, name='erfinv'):\n    with ops.name_scope(name, values=[x]):\n        x = ops.convert_to_tensor(x, name='x')\n        if x.dtype.as_numpy_dtype not in [np.float32, np.float64]:\n            raise TypeError('x.dtype=%s is not handled, see docstring for supported types.' % x.dtype)\n        return ndtri((x + 1.0) / 2.0) / np.sqrt(2)",
    "docstring": "The inverse function for erf, the error function. Args: x: of type , . name: Python string. A name for the operation (default=\"erfinv\"). Returns: x: with . Raises: TypeError: if is not floating-type.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\special_math.py",
    "ast_data": "FunctionDef name:erfinv arg:x arg:name arguments arg arg With Call Assign Call If Compare Raise Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "partial_fit",
    "source_code": "@_available_if_estimator_has('partial_fit')\ndef partial_fit(self, X, y, sample_weight=None, **partial_fit_params):\n    super().partial_fit(X, y, sample_weight=sample_weight, **partial_fit_params)",
    "docstring": "Incrementally fit the model to data, for each output variable. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input data. y : {array-like, sparse matrix} of shape (n_samples, n_outputs) Multi-output targets. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If , then samples are equally weighted. Only supported if the underlying regressor supports sample weights. **partial_fit_params : dict of str -> object Parameters passed to the `enable_metadata_routing=TrueUser Guide `. .. versionadded:: 1.3 Returns ------- self : object Returns a fitted instance.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\multioutput.py",
    "ast_data": "FunctionDef name:partial_fit arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg arg Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "class_and_config_for_serialized_keras_object",
    "source_code": "def class_and_config_for_serialized_keras_object(config, module_objects=None, custom_objects=None, printable_module_name='object'):\n    if not isinstance(config, dict) or 'class_name' not in config or 'config' not in config:\n        raise ValueError('Improper config format: ' + str(config))\n    class_name = config['class_name']\n    cls = get_registered_object(class_name, custom_objects, module_objects)\n    if cls is None:\n        raise ValueError('Unknown {}: {}. Please ensure this object is passed to the `custom_objects` argument. See https://www.tensorflow.org/guide/keras/save_and_serialize#registering_the_custom_object for details.'.format(printable_module_name, class_name))\n    cls_config = config['config']\n    if isinstance(cls_config, list):\n        return (cls, cls_config)\n    deserialized_objects = {}\n    for key, item in cls_config.items():\n        if key == 'name':\n            deserialized_objects[key] = item\n        elif isinstance(item, dict) and '__passive_serialization__' in item:\n            deserialized_objects[key] = deserialize_keras_object(item, module_objects=module_objects, custom_objects=custom_objects, printable_module_name='config_item')\n        elif isinstance(item, str) and tf_inspect.isfunction(get_registered_object(item, custom_objects)):\n            deserialized_objects[key] = get_registered_object(item, custom_objects)\n    for key, item in deserialized_objects.items():\n        cls_config[key] = deserialized_objects[key]\n    return (cls, cls_config)",
    "docstring": "Returns the class name and config for a serialized keras object.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\generic_utils.py",
    "ast_data": "FunctionDef name:class_and_config_for_serialized_keras_object arg:config arg:module_objects arg:custom_objects arg:printable_module_name arguments arg arg arg arg If BoolOp Call Compare Compare Raise Call Call Assign Assign Call If Compare Raise Call Call Assign If Call Return return:yes Assign For Call If Compare Assign If BoolOp Call Compare Assign Call If BoolOp Call Call Call Assign Call For Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_make_callable_from_options",
    "source_code": "def _make_callable_from_options(self, callable_options):\n    self._extend_graph()\n    return BaseSession._Callable(self, callable_options)",
    "docstring": "Returns a handle to a \"callable\" with the given options. Args: callable_options: A protocol buffer message describing the computation that will be performed by the callable. Returns: A handle to the new callable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\session.py",
    "ast_data": "FunctionDef name:_make_callable_from_options arg:self arg:callable_options arguments arg arg Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "fetch_file",
    "source_code": "def fetch_file(url, folder=None, local_filename=None, sha256=None, n_retries=3, delay=1):\n    folder_from_url, filename_from_url = _derive_folder_and_filename_from_url(url)\n    if local_filename is None:\n        local_filename = filename_from_url\n    if folder is None:\n        folder = Path(get_data_home()) / folder_from_url\n        makedirs(folder, exist_ok=True)\n    remote_metadata = RemoteFileMetadata(filename=local_filename, url=url, checksum=sha256)\n    return _fetch_remote(remote_metadata, dirname=folder, n_retries=n_retries, delay=delay)",
    "docstring": "Fetch a file from the web if not already present in the local folder. If the file already exists locally (and the SHA256 checksums match when provided), the path to the local file is returned without re-downloading. .. versionadded:: 1.6 Parameters ---------- url : str URL of the file to download. folder : str or Path, default=None Directory to save the file to. If None, the file is downloaded in a folder with a name derived from the URL host name and path under scikit-learn data home folder. local_filename : str, default=None Name of the file to save. If None, the filename is inferred from the URL. sha256 : str, default=None SHA256 checksum of the file. If None, no checksum is verified. n_retries : int, default=3 Number of retries when HTTP errors are encountered. delay : int, default=1 Number of seconds between retries. Returns ------- file_path : Path Full path of the downloaded file.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\datasets\\_base.py",
    "ast_data": "FunctionDef name:fetch_file arg:url arg:folder arg:local_filename arg:sha256 arg:n_retries arg:delay arguments arg arg arg arg arg arg Assign Call If Compare Assign If Compare Assign Call Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_cursor_data",
    "source_code": "def get_cursor_data(self, event):\n    return None",
    "docstring": "Return the cursor data for a given event. .. note:: This method is intended to be overridden by artist subclasses. As an end-user of Matplotlib you will most likely not call this method yourself. Cursor data can be used by Artists to provide additional context information for a given event. The default implementation just returns *None*. Subclasses can override the method and return arbitrary data. However, when doing so, they must ensure that can convert the data to a string representation. The only current use case is displaying the z-value of an in the status bar of a plot window, while moving the mouse. Parameters ---------- event : See Also -------- format_cursor_data",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:get_cursor_data arg:self arg:event arguments arg arg Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "_EndPoint",
    "source_code": "class _EndPoint(collections.namedtuple('_EndPoint', ['convertible', 'index'])):\n    __slots__ = ()\n\n    def __str__(self):\n        return '{}[{}]'.format(self.convertible, self.index)",
    "docstring": "An endpoint in a graph.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "ClassDef name:_EndPoint Call Assign FunctionDef name:__str__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "main",
    "source_code": "def main() -> None:\n    args = parse_arguments()\n    pr_number = args.PR_NUMBER\n    custom_target_dir = args.directory\n    strip_count = args.strip\n    if custom_target_dir:\n        if not os.path.isdir(custom_target_dir):\n            print(f\"Error: The specified target directory '{custom_target_dir}' does not exist.\")\n            sys.exit(1)\n        target_dir = custom_target_dir\n        print(f'Using custom target directory: {target_dir}')\n    else:\n        target_dir = get_pytorch_path()\n    repo_url = 'https://github.com/pytorch/pytorch'\n    with tempfile.TemporaryDirectory() as tmpdirname:\n        patch_file = download_patch(pr_number, repo_url, tmpdirname)\n        apply_patch(patch_file, target_dir, strip_count)",
    "docstring": "Main function to orchestrate the patch download and application process. Steps: 1. Parse command-line arguments to get the PR number, optional target directory, and strip count. 2. Retrieve the local PyTorch installation path or use the provided target directory. 3. Download the patch for the provided PR number. 4. Apply the patch to the specified directory with the given strip count.",
    "type": "function",
    "file_path": "pytorch\\tools\\nightly_hotpatch.py",
    "ast_data": "FunctionDef name:main arguments Assign Call Assign Assign Assign If If Call Call Call Assign Call Assign Call Assign With Call Assign Call Call"
  },
  {
    "library": "kornia",
    "name": "laplacian",
    "source_code": "def laplacian(input: Tensor, kernel_size: tuple[int, int] | int, border_type: str='reflect', normalized: bool=True) -> Tensor:\n    kernel = get_laplacian_kernel2d(kernel_size, device=input.device, dtype=input.dtype)[None, ...]\n    if normalized:\n        kernel = normalize_kernel2d(kernel)\n    return filter2d(input, kernel, border_type)",
    "docstring": "Create an operator that returns a tensor using a Laplacian filter. .. image:: _static/img/laplacian.png The operator smooths the given tensor with a laplacian kernel by convolving it to each channel. It supports batched operation. Args: input: the input image tensor with shape :math:. kernel_size: the size of the kernel. border_type: the padding mode to be applied before convolving. The expected modes are: `(B, C, H, W)here `__. Examples: >>> input = torch.rand(2, 4, 5, 5) >>> output = laplacian(input, 3) >>> output.shape torch.Size([2, 4, 5, 5])",
    "type": "function",
    "file_path": "kornia\\kornia\\filters\\laplacian.py",
    "ast_data": "FunctionDef name:laplacian arg:input arg:kernel_size arg:border_type arg:normalized arguments arg arg arg arg Assign Call If Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "new",
    "source_code": "@staticmethod\ndef new(obj):\n    if isinstance(obj, training_lib.Model):\n        return ModelAttributes()\n    elif isinstance(obj, metrics.Metric):\n        return MetricAttributes()\n    elif isinstance(obj, recurrent.RNN):\n        return RNNAttributes()\n    elif isinstance(obj, base_layer.Layer):\n        return LayerAttributes()\n    else:\n        raise TypeError('Internal error during serialization: Expected Keras Layer object, got {} of type {}'.format(obj, type(obj)))",
    "docstring": "Returns a new SerializedAttribute object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\serialized_attributes.py",
    "ast_data": "FunctionDef name:new arg:obj arguments arg If Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Call Raise Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "from_dict",
    "source_code": "@staticmethod\ndef from_dict(chunk_dims: dict[str, int]):\n    kwargs_chunk_spec = map_aggregate(chunk_dims, lambda dim: TensorChunkSpec(dim))\n    return kwargs_chunk_spec",
    "docstring": "A helper for creating a dictionary of from a dictionary of chunk dimensions (int's). Example: >>> # xdoctest: +SKIP >>> # Chunk dimension 0 for the \"id\" argument, 1 for the \"mask\" argument >>> kwargs_chunk_spec = TensorChunkSpec.from_dict({\"id\": 0, \"mask\": 1})",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\microbatch.py",
    "ast_data": "FunctionDef name:from_dict arg:chunk_dims arguments arg Assign Call arguments arg Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "unpack",
    "source_code": "def unpack(self, summed_device_grad_packs):\n    aggregated_device_grads = []\n    for summed_device_grad_packs, device_grads_and_vars, device_shapes, device_sizes in zip(summed_device_grad_packs, self.grouped_grads_and_vars, self.all_device_shapes, self.all_device_sizes):\n        with ops.colocate_with(summed_device_grad_packs[0][0]):\n            device_grad_packs = [g for g, _ in summed_device_grad_packs]\n            device_grads_concat = array_ops.concat(device_grad_packs, 0)\n            grads_with_sizes = array_ops.split(device_grads_concat, device_sizes)\n            grads_with_shapes = [array_ops.reshape(grad, shape) for shape, grad in zip(device_shapes, grads_with_sizes)]\n            summed_device_grads = [(g, v) for g, (_, v) in zip(grads_with_shapes, device_grads_and_vars)]\n            aggregated_device_grads.append(summed_device_grads)\n    return aggregated_device_grads",
    "docstring": "Reverse the pack.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_ops.py",
    "ast_data": "FunctionDef name:unpack arg:self arg:summed_device_grad_packs arguments arg arg Assign For Call With Call Assign Assign Call Assign Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_figheight",
    "source_code": "def set_figheight(self, val, forward=True):\n    self.set_size_inches(self.get_figwidth(), val, forward=forward)",
    "docstring": "Set the height of the figure in inches. Parameters ---------- val : float forward : bool See . See Also -------- matplotlib.figure.Figure.set_figwidth matplotlib.figure.Figure.set_size_inches",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:set_figheight arg:self arg:val arg:forward arguments arg arg arg Call Call"
  },
  {
    "library": "kornia",
    "name": "shear_x",
    "source_code": "def shear_x(min_mag: float, max_mag: float) -> OperationBase:\n    if min_mag != -max_mag:\n        raise ValueError(f'{ShearX.__name__} is a symmetric operation that `- min_mag == max_mag`. Got [{min_mag}, {max_mag}]')\n    return ShearX(None, 1.0, magnitude_range=(0.0, max_mag), symmetric_megnitude=True)",
    "docstring": "Return ShearX op.",
    "type": "function",
    "file_path": "kornia\\kornia\\augmentation\\auto\\rand_augment\\ops.py",
    "ast_data": "FunctionDef name:shear_x arg:min_mag arg:max_mag arguments arg arg If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "__cuda_array_interface__",
    "source_code": "@property\ndef __cuda_array_interface__(self):\n    if has_torch_function_unary(self):\n        return handle_torch_function(Tensor.__cuda_array_interface__.__get__, (self,), self)\n    if not self.is_cuda:\n        raise AttributeError(f\"Can't get __cuda_array_interface__ on non-CUDA tensor type: {self.type()} If CUDA data is required use tensor.cuda() to copy tensor to device memory.\")\n    if self.is_sparse:\n        raise AttributeError(f\"Can't get __cuda_array_interface__ on sparse type: {self.type()} Use Tensor.to_dense() to convert to a dense tensor first.\")\n    if self.requires_grad:\n        raise RuntimeError(\"Can't get __cuda_array_interface__ on Variable that requires grad. If gradients aren't required, use var.detach() to get Variable that doesn't require grad.\")\n    typestr = _dtype_to_typestr(self.dtype)\n    itemsize = self.element_size()\n    shape = tuple(self.shape)\n    if self.is_contiguous():\n        strides = None\n    else:\n        strides = tuple((s * itemsize for s in self.stride()))\n    data_ptr = self.data_ptr() if self.numel() > 0 else 0\n    data = (data_ptr, False)\n    return dict(typestr=typestr, shape=shape, strides=strides, data=data, version=2)",
    "docstring": "Array view description for cuda tensors. See:",
    "type": "method",
    "file_path": "pytorch\\torch\\_tensor.py",
    "ast_data": "FunctionDef name:__cuda_array_interface__ arg:self arguments arg If Call Return return:yes Call If Raise Call Call If Raise Call Call If Raise Call Assign Call Assign Call Assign Call If Call Assign Assign Call Call Assign Compare Call Call Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "do_cache",
    "source_code": "@register.tag('cache')\ndef do_cache(parser, token):\n    nodelist = parser.parse(('endcache',))\n    parser.delete_first_token()\n    tokens = token.split_contents()\n    if len(tokens) < 3:\n        raise TemplateSyntaxError(\"'%r' tag requires at least 2 arguments.\" % tokens[0])\n    if len(tokens) > 3 and tokens[-1].startswith('using='):\n        cache_name = parser.compile_filter(tokens[-1].removeprefix('using='))\n        tokens = tokens[:-1]\n    else:\n        cache_name = None\n    return CacheNode(nodelist, parser.compile_filter(tokens[1]), tokens[2], [parser.compile_filter(t) for t in tokens[3:]], cache_name)",
    "docstring": "This will cache the contents of a template fragment for a given amount of time. Usage:: {% load cache %} {% cache [expire_time] [fragment_name] %} .. some expensive processing .. {% endcache %} This tag also supports varying by a list of arguments:: {% load cache %} {% cache [expire_time] [fragment_name] [var1] [var2] .. %} .. some expensive processing .. {% endcache %} Optionally the cache to use may be specified thus:: {% cache .... using=\"cachename\" %} Each unique set of arguments will result in a unique cache entry.",
    "type": "function",
    "file_path": "django\\django\\templatetags\\cache.py",
    "ast_data": "FunctionDef name:do_cache arg:parser arg:token arguments arg arg Assign Call Call Assign Call If Compare Call Raise Call If BoolOp Compare Call Call Assign Call Call Assign Assign Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_run_key",
    "source_code": "def get_run_key(feed_dict, fetches):\n    return json.dumps(RunKey(get_flattened_names(feed_dict), get_flattened_names(fetches)))",
    "docstring": "Summarize the names of feeds and fetches as a RunKey JSON string. Args: feed_dict: The feed_dict given to the call. fetches: The fetches from the call. Returns: A JSON Array consisting of two items. They first items is a flattened Array of the names of the feeds. The second item is a flattened Array of the names of the fetches.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\common.py",
    "ast_data": "FunctionDef name:get_run_key arg:feed_dict arg:fetches arguments arg arg Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "reduce_sum",
    "source_code": "@dispatch.dispatch_for_api(math_ops.reduce_sum)\ndef reduce_sum(input_tensor: ragged_tensor.Ragged, axis=None, keepdims=None, name=None):\n    return ragged_reduce_aggregate(reduce_op=math_ops.reduce_sum, unsorted_segment_op=math_ops.unsorted_segment_sum, rt_input=input_tensor, axis=axis, keepdims=keepdims, name=name or 'RaggedReduceSum')",
    "docstring": "For docs, see: _RAGGED_REDUCE_DOCSTRING.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_math_ops.py",
    "ast_data": "FunctionDef name:reduce_sum arg:input_tensor arg:axis arg:keepdims arg:name arguments arg arg arg arg Return return:yes Call BoolOp Call"
  },
  {
    "library": "seaborn",
    "name": "standardize",
    "source_code": "def standardize(self, val: Any) -> Any:\n    return val",
    "docstring": "Coerce flexible property value to standardized representation.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\properties.py",
    "ast_data": "FunctionDef name:standardize arg:self arg:val arguments arg arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_get_axis_line_edge_points",
    "source_code": "def _get_axis_line_edge_points(self, minmax, maxmin, position=None):\n    mb = [minmax, maxmin]\n    mb_rev = mb[::-1]\n    mm = [[mb, mb_rev, mb_rev], [mb_rev, mb_rev, mb], [mb, mb, mb]]\n    mm = mm[self.axes._vertical_axis][self._axinfo['i']]\n    juggled = self._axinfo['juggled']\n    edge_point_0 = mm[0].copy()\n    if position == 'lower' and mm[1][juggled[-1]] < mm[0][juggled[-1]] or (position == 'upper' and mm[1][juggled[-1]] > mm[0][juggled[-1]]):\n        edge_point_0[juggled[-1]] = mm[1][juggled[-1]]\n    else:\n        edge_point_0[juggled[0]] = mm[1][juggled[0]]\n    edge_point_1 = edge_point_0.copy()\n    edge_point_1[juggled[1]] = mm[1][juggled[1]]\n    return (edge_point_0, edge_point_1)",
    "docstring": "Get the edge points for the black bolded axis line.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axis3d.py",
    "ast_data": "FunctionDef name:_get_axis_line_edge_points arg:self arg:minmax arg:maxmin arg:position arguments arg arg arg arg Assign Assign Assign Assign Assign Assign Call If BoolOp BoolOp Compare Compare BoolOp Compare Compare Assign Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "authlib",
    "name": "get_issuer",
    "source_code": "def get_issuer(self) -> str:\n    raise NotImplementedError()",
    "docstring": "The OP's Issuer Identifier URL. The value is used to fill the `` claim that is mandatory in signed userinfo:: def get_issuer(self) -> str: return \" This method must be implemented by developers to support JWT userinfo.",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\core\\userinfo.py",
    "ast_data": "FunctionDef name:get_issuer arg:self arguments arg Raise Call"
  },
  {
    "library": "seaborn",
    "name": "Dash",
    "source_code": "@document_properties\n@dataclass\nclass Dash(Paths):\n    width: MappableFloat = Mappable(0.8, grouping=False)\n\n    def _setup_segments(self, data, orient):\n        ori = ['x', 'y'].index(orient)\n        xys = data[['x', 'y']].to_numpy().astype(float)\n        segments = np.stack([xys, xys], axis=1)\n        segments[:, 0, ori] -= data['width'] / 2\n        segments[:, 1, ori] += data['width'] / 2\n        return segments",
    "docstring": "A line mark drawn as an oriented segment for each datapoint. Examples -------- .. include:: ../docstrings/objects.Dash.rst",
    "type": "class",
    "file_path": "seaborn\\seaborn\\_marks\\line.py",
    "ast_data": "ClassDef name:Dash Call FunctionDef name:_setup_segments arg:self arg:data arg:orient arguments arg arg arg Assign Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_SparseSoftmaxGrad",
    "source_code": "@ops.RegisterGradient('SparseSoftmax')\ndef _SparseSoftmaxGrad(op: ops.Operation, grad):\n    indices, shape = (op.inputs[0], op.inputs[2])\n    out_vals = op.outputs[0]\n    sp_output = sparse_tensor.SparseTensor(indices, out_vals, shape)\n    sp_grad = sparse_tensor.SparseTensor(indices, grad, shape)\n    sp_product = sparse_tensor.SparseTensor(indices, sp_output.values * sp_grad.values, shape)\n    sum_reduced = -sparse_ops.sparse_reduce_sum(sp_product, [-1], keepdims=True)\n    sp_sum = sparse_ops.sparse_dense_cwise_add(sp_grad, sum_reduced)\n    grad_x = sp_sum.values * sp_output.values\n    return [None, grad_x, None]",
    "docstring": "Gradients for SparseSoftmax. The calculation is the same as SoftmaxGrad: grad_x = grad_softmax * softmax - sum(grad_softmax * softmax) * softmax where we now only operate on the non-zero values present in the SparseTensors. Args: op: the SparseSoftmax op. grad: the upstream gradient w.r.t. the non-zero SparseSoftmax output values. Returns: Gradients w.r.t. the input (sp_indices, sp_values, sp_shape).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_grad.py",
    "ast_data": "FunctionDef name:_SparseSoftmaxGrad arg:op arg:grad arguments arg arg Assign Assign Assign Call Assign Call Assign Call Assign Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "restore_saveables",
    "source_code": "def restore_saveables(self, tensor_saveables: Dict[str, saveable_object.SaveableObject], python_positions: List[restore_lib.CheckpointPosition], registered_savers: Optional[Dict[str, Dict[str, base.Trackable]]]=None, reader: py_checkpoint_reader.NewCheckpointReader=None) -> Optional[List[ops.Operation]]:\n    del registered_savers\n    restore_ops = []\n    if python_positions:\n        if reader is None:\n            reader = py_checkpoint_reader.NewCheckpointReader(self.save_path_string)\n        for position in python_positions:\n            key = position.object_proto.attributes[0].checkpoint_key\n            position.trackable.deserialize(reader.get_tensor(key))\n    if tensor_saveables:\n        validated_saveables = saveable_object_util.validate_and_slice_inputs(tensor_saveables)\n        validated_names = set((saveable.name for saveable in validated_saveables))\n        if set(tensor_saveables.keys()) != validated_names:\n            raise AssertionError('Saveable keys changed when validating. Got back %s, was expecting %s' % (tensor_saveables.keys(), validated_names))\n        new_restore_ops = _DSaver(self._mesh, validated_saveables).restore(self.save_path_tensor, self.options)\n        if not context.executing_eagerly():\n            for name, restore_op in sorted(new_restore_ops.items()):\n                restore_ops.append(restore_op)\n                assert name not in self.restore_ops_by_name\n                self.restore_ops_by_name[name] = restore_op\n    return restore_ops",
    "docstring": "Run or build restore operations for SaveableObjects. Args: tensor_saveables: s which correspond to Tensors. python_positions: s which correspond to Trackables bound to the checkpoint. registered_savers: a dict mapping saver names-> object name -> Trackable. This argument is not implemented for DTensorCheckpoint. reader: A CheckpointReader. Creates one lazily if None. Returns: When graph building, a list of restore operations, either cached or newly created, to restore .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\d_checkpoint.py",
    "ast_data": "FunctionDef name:restore_saveables arg:self arg:tensor_saveables arg:python_positions arg:registered_savers arg:reader arguments arg arg arg arg arg Assign If If Compare Assign Call For Assign Call Call If Assign Call Assign Call If Compare Call Call Raise Call Call Assign Call Call If Call For Call Call Call Compare Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "Ufunc",
    "source_code": "class Ufunc:\n\n    def __init__(self, nin, nout, identity, docstring, typereso, *type_descriptions, signature=None, indexed=''):\n        self.nin = nin\n        self.nout = nout\n        if identity is None:\n            identity = None_\n        self.identity = identity\n        self.docstring = docstring\n        self.typereso = typereso\n        self.type_descriptions = []\n        self.signature = signature\n        self.indexed = indexed\n        for td in type_descriptions:\n            self.type_descriptions.extend(td)\n        for td in self.type_descriptions:\n            td.finish_signature(self.nin, self.nout)\n        check_td_order(self.type_descriptions)",
    "docstring": "Description of a ufunc. Attributes ---------- nin : number of input arguments nout : number of output arguments identity : identity element for a two-argument function (like Zero) docstring : docstring for the ufunc typereso: type resolver function of type PyUFunc_TypeResolutionFunc type_descriptions : TypeDescription objects signature: a generalized ufunc signature (like for matmul) indexed: add indexed loops (ufunc.at) for these type characters",
    "type": "class",
    "file_path": "numpy\\numpy\\_core\\code_generators\\generate_umath.py",
    "ast_data": "ClassDef name:Ufunc FunctionDef name:__init__ arg:self arg:nin arg:nout arg:identity arg:docstring arg:typereso arguments arg arg arg arg arg arg arg arg arg Assign Assign If Compare Assign Assign Assign Assign Assign Assign Assign For Call For Call Call"
  },
  {
    "library": "scipy",
    "name": "_create_f_oneway_nan_result",
    "source_code": "def _create_f_oneway_nan_result(shape, axis, samples):\n    axis = normalize_axis_index(axis, len(shape))\n    shp = shape[:axis] + shape[axis + 1:]\n    f = np.full(shp, fill_value=_get_nan(*samples))\n    prob = f.copy()\n    return F_onewayResult(f[()], prob[()])",
    "docstring": "This is a helper function for f_oneway for creating the return values in certain degenerate conditions. It creates return values that are all nan with the appropriate shape for the given and .",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_stats_py.py",
    "ast_data": "FunctionDef name:_create_f_oneway_nan_result arg:shape arg:axis arg:samples arguments arg arg arg Assign Call Call Assign Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, path=None):\n    if path is None:\n        request = cherrypy.serving.request\n        path = request.script_name + request.path_info\n    self.args = (path,)\n    HTTPError.__init__(self, 404, \"The path '%s' was not found.\" % path)",
    "docstring": "Initialize an HTTP Not Found error.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cperror.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:path arguments arg arg If Compare Assign Assign Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "size",
    "source_code": "def size(self):\n    return len(self._funcs)",
    "docstring": "Returns how many functions are currently registered.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\script_ops.py",
    "ast_data": "FunctionDef name:size arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "remove",
    "source_code": "def remove(self, val):\n    del self[self.index(val)]",
    "docstring": "Standard list remove method",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\mutable_list.py",
    "ast_data": "FunctionDef name:remove arg:self arg:val arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_dimension_sizes",
    "source_code": "def _dimension_sizes(x):\n    dynamic_shape = array_ops.shape(x)\n    rank = x.get_shape().rank\n    rank_is_known = rank is not None\n    if rank_is_known and rank == 0:\n        return (1,)\n    if rank_is_known and rank > 0:\n        static_shape = x.get_shape().as_list()\n        sizes = [int(size) if size is not None else dynamic_shape[i] for i, size in enumerate(static_shape)]\n        return sizes\n    has_rank_zero = math_ops.equal(array_ops.rank(x), 0)\n    return cond.cond(has_rank_zero, lambda: array_ops.constant([1]), lambda: dynamic_shape)",
    "docstring": "Gets the dimension sizes of a tensor . If a size can be determined statically it is returned as an integer, otherwise as a tensor. If is a scalar it is treated as rank 1 size 1. Args: x: A . Returns: Dimension sizes.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\check_ops.py",
    "ast_data": "FunctionDef name:_dimension_sizes arg:x arguments arg Assign Call Assign Call Assign Compare If BoolOp Compare Return return:yes If BoolOp Compare Assign Call Call Assign Compare Call Call Return return:yes Assign Call Call Return return:yes Call arguments Call arguments"
  },
  {
    "library": "matplotlib",
    "name": "to_dense",
    "source_code": "def to_dense(self):\n    ret = np.zeros([self.n, self.m], dtype=np.float64)\n    nvals = self.vals.size\n    for i in range(nvals):\n        ret[self.rows[i], self.cols[i]] += self.vals[i]\n    return ret",
    "docstring": "Return a dense matrix representing self, mainly for debugging purposes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triinterpolate.py",
    "ast_data": "FunctionDef name:to_dense arg:self arguments arg Assign Call Assign For Call Return return:yes"
  },
  {
    "library": "django",
    "name": "unsign",
    "source_code": "def unsign(self, value, max_age=None):\n    result = super().unsign(value)\n    value, timestamp = result.rsplit(self.sep, 1)\n    timestamp = b62_decode(timestamp)\n    if max_age is not None:\n        if isinstance(max_age, datetime.timedelta):\n            max_age = max_age.total_seconds()\n        age = time.time() - timestamp\n        if age > max_age:\n            raise SignatureExpired('Signature age %s > %s seconds' % (age, max_age))\n    return value",
    "docstring": "Retrieve original value and check it wasn't signed more than max_age seconds ago.",
    "type": "method",
    "file_path": "django\\django\\core\\signing.py",
    "ast_data": "FunctionDef name:unsign arg:self arg:value arg:max_age arguments arg arg arg Assign Call Call Assign Call Assign Call If Compare If Call Assign Call Assign Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get",
    "source_code": "def get(self, key: str, default: Optional[Any]=None) -> Any:\n    return self[key] if key in self else default",
    "docstring": "Return the parameter associated with key if present. Otherwise return default if provided, None if not. Args: key (str): key to get from the ParameterDict default (Parameter, optional): value to return if key not present",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\container.py",
    "ast_data": "FunctionDef name:get arg:self arg:key arg:default arguments arg arg arg Return return:yes Compare"
  },
  {
    "library": "pytorch",
    "name": "apply_replacements",
    "source_code": "def apply_replacements(replacements: dict[str, str], text: str) -> str:\n    for before, after in replacements.items():\n        text = text.replace(before, after)\n    return text",
    "docstring": "Applies the given replacements within the text. Args: replacements (dict): Mapping of str -> str replacements. text (str): Text in which to make replacements. Returns: Text with replacements applied, if any.",
    "type": "function",
    "file_path": "pytorch\\tools\\setup_helpers\\gen_version_header.py",
    "ast_data": "FunctionDef name:apply_replacements arg:replacements arg:text arguments arg arg For Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "variable_creator_scope",
    "source_code": "def variable_creator_scope(self, next_creator, **kwargs):\n    collections = kwargs.pop('collections', None)\n    v = None\n    with ops.name_scope(kwargs.get('name', None), 'Variable', skip_on_eager=False) as name:\n        variable_name = ops.name_from_scope_name(name)\n        kwargs['name'] = name\n    if self._share_variables:\n        v = self._variables_by_name.get(variable_name, None)\n    if v is None:\n        v = next_creator(**kwargs)\n        self._variables_by_name[variable_name] = v\n    if collections is None:\n        collections = [ops.GraphKeys.GLOBAL_VARIABLES]\n    if v.trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections:\n        collections = list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES]\n    ops.add_to_collections(collections, v)\n    return v",
    "docstring": "Creates variables & adds them to collections to match legacy code.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\wrap_function.py",
    "ast_data": "FunctionDef name:variable_creator_scope arg:self arg:next_creator arguments arg arg arg Assign Call Assign With Call Call Assign Call Assign If Assign Call If Compare Assign Call Assign If Compare Assign If BoolOp Compare Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "avg_pool",
    "source_code": "@tf_export(v1=['nn.avg_pool', 'nn.avg_pool2d'])\n@dispatch.add_dispatch_support\ndef avg_pool(value, ksize, strides, padding, data_format='NHWC', name=None, input=None):\n    with ops.name_scope(name, 'AvgPool', [value]) as name:\n        value = deprecation.deprecated_argument_lookup('input', input, 'value', value)\n        if data_format is None:\n            data_format = 'NHWC'\n        channel_index = 1 if data_format.startswith('NC') else 3\n        ksize = _get_sequence(ksize, 2, channel_index, 'ksize')\n        strides = _get_sequence(strides, 2, channel_index, 'strides')\n        return gen_nn_ops.avg_pool(value, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=name)",
    "docstring": "Performs the average pooling on the input. Each entry in is the mean of the corresponding size window in . Args: value: A 4-D of shape and type , , , , or . ksize: An int or list of that has length , or . The size of the window for each dimension of the input tensor. strides: An int or list of that has length , or . The stride of the sliding window for each dimension of the input tensor. padding: A string, either or . The padding algorithm. See the \"returns\" section of for details. data_format: A string. 'NHWC' and 'NCHW' are supported. name: Optional name for the operation. input: Alias for value. Returns: A with the same type as . The average pooled output tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py",
    "ast_data": "FunctionDef name:avg_pool arg:value arg:ksize arg:strides arg:padding arg:data_format arg:name arg:input arguments arg arg arg arg arg arg arg With Call Assign Call If Compare Assign Assign Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "reduce_concat",
    "source_code": "def reduce_concat(self, x):\n    return self.reduce(lambda y: y, x)",
    "docstring": "Performs a concat reduction on across pfor iterations. Note that this currently may not work inside a control flow construct. Args: x: an unvectorized Tensor. Returns: A Tensor that has rank one higher than . The value is the vectorized version of , i.e. stacking the value of across different pfor iterations.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "FunctionDef name:reduce_concat arg:self arg:x arguments arg arg Return return:yes Call arguments arg"
  },
  {
    "library": "pytorch",
    "name": "_validate_device_dim_dtype_shape",
    "source_code": "@classmethod\ndef _validate_device_dim_dtype_shape(cls, original_tensor: torch.Tensor) -> None:\n    if not original_tensor.is_cuda:\n        raise RuntimeError(f'Error original_tensor.device= {original_tensor.device} is not supported! Only CUDA tensors are currently supported.')\n    if original_tensor.dim() != 2:\n        raise RuntimeError(f'Error original_tensor.dim = {original_tensor.dim()} is not supported! Only 2d tensors are currently supported.')\n    if not original_tensor.is_contiguous():\n        raise RuntimeError('Error original_tensor is not contiguous!Only contiguous tensors are currently supported.')\n    if original_tensor.dtype not in cls._DTYPE_SHAPE_CONSTRAINTS:\n        raise RuntimeError(f'Error original_tensor.dtype {original_tensor.dtype} is not a supported dtype for {cls}!')\n    m, n = original_tensor.shape\n    min_rows = cls._DTYPE_SHAPE_CONSTRAINTS[original_tensor.dtype].sparse_min_rows\n    min_cols = cls._DTYPE_SHAPE_CONSTRAINTS[original_tensor.dtype].sparse_min_cols\n    if m < min_rows or m % min_rows or n < min_cols or n % min_cols:\n        raise RuntimeError(f'Error original_tensor.shape {original_tensor.shape} is not supported! Both dimensions must be larger or equal than and a multiple of ({min_rows}, {min_cols})')",
    "docstring": "Assert that the given tensor is valid for semi-structured sparse compression.",
    "type": "method",
    "file_path": "pytorch\\torch\\sparse\\semi_structured.py",
    "ast_data": "FunctionDef name:_validate_device_dim_dtype_shape arg:cls arg:original_tensor arguments arg arg If Raise Call If Compare Call Raise Call Call If Call Raise Call If Compare Raise Call Assign Assign Assign If BoolOp Compare Compare Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_BesselK0eGrad",
    "source_code": "@ops.RegisterGradient('BesselK0e')\ndef _BesselK0eGrad(op: ops.Operation, grad):\n    x = op.inputs[0]\n    y = op.outputs[0]\n    with ops.control_dependencies([grad]):\n        partial_x = y - special_math_ops.bessel_k1e(x)\n        return grad * partial_x",
    "docstring": "Compute gradient of bessel_k0e(x) with respect to its argument.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_BesselK0eGrad arg:op arg:grad arguments arg arg Assign Assign With Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "Future",
    "source_code": "class Future(torch._C.Future, Generic[T], metaclass=_PyFutureMeta):\n\n    def __init__(self, *, devices: Optional[list[Union[int, str, torch.device]]]=None):\n        if devices is None:\n            devices = []\n        super().__init__([torch.device(d) for d in devices])\n\n    def done(self) -> bool:\n        return super().done()\n\n    def wait(self) -> T:\n        return super().wait()\n\n    def value(self) -> T:\n        return super().value()\n\n    def then(self, callback: Callable[[Future[T]], S]) -> Future[S]:\n        return cast(Future[S], super().then(callback))\n\n    def add_done_callback(self, callback: Callable[[Future[T]], None]) -> None:\n        super().add_done_callback(callback)\n\n    def set_result(self, result: T) -> None:\n        super().set_result(result)\n\n    def set_exception(self, result: T) -> None:\n        assert isinstance(result, Exception), f'{result} is of type {type(result)}, not an Exception.'\n\n        def raise_error(fut_result):\n            raise fut_result\n        super()._set_unwrap_func(raise_error)\n        self.set_result(result)",
    "docstring": "Wrapper around a `~torch.distributed.rpc.rpc_async`. It also exposes a set of APIs to add callback functions and set results. .. warning:: GPU support is a beta feature, subject to changes.",
    "type": "class",
    "file_path": "pytorch\\torch\\futures\\__init__.py",
    "ast_data": "ClassDef name:Future FunctionDef name:__init__ arg:self arguments arg arg If Compare Assign Call Call Call FunctionDef name:done arg:self arguments arg Return return:yes Call Call FunctionDef name:wait arg:self arguments arg Return return:yes Call Call FunctionDef name:value arg:self arguments arg Return return:yes Call Call FunctionDef name:then arg:self arg:callback arguments arg arg Return return:yes Call Call Call FunctionDef name:add_done_callback arg:self arg:callback arguments arg arg Call Call FunctionDef name:set_result arg:self arg:result arguments arg arg Call Call FunctionDef name:set_exception arg:self arg:result arguments arg arg Call Call FunctionDef name:raise_error arg:fut_result arguments arg Raise Call Call Call"
  },
  {
    "library": "cherrypy",
    "name": "__getitem__",
    "source_code": "def __getitem__(self, key):\n    if not self.loaded:\n        self.load()\n    return self._data[key]",
    "docstring": "Retrieve a session-stored object.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\sessions.py",
    "ast_data": "FunctionDef name:__getitem__ arg:self arg:key arguments arg arg If Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "trainable_variables",
    "source_code": "@property\ndef trainable_variables(self):\n    return tuple(self._func_graph.trainable_variables)",
    "docstring": "Sequence of trainable variables for this function.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py",
    "ast_data": "FunctionDef name:trainable_variables arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "msvc_runtime_library",
    "source_code": "def msvc_runtime_library():\n    ver = msvc_runtime_major()\n    if ver:\n        if ver < 140:\n            return 'msvcr%i' % ver\n        else:\n            return 'vcruntime%i' % ver\n    else:\n        return None",
    "docstring": "Return name of MSVC runtime library if Python was built with MSVC >= 7",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\misc_util.py",
    "ast_data": "FunctionDef name:msvc_runtime_library arguments Assign Call If If Compare Return return:yes Return return:yes Return return:no"
  },
  {
    "library": "scikit-learn",
    "name": "_check_parameters",
    "source_code": "@abstractmethod\ndef _check_parameters(self, X):\n    pass",
    "docstring": "Check initial parameters of the derived class. Parameters ---------- X : array-like of shape (n_samples, n_features)",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\mixture\\_base.py",
    "ast_data": "FunctionDef name:_check_parameters arg:self arg:X arguments arg arg"
  },
  {
    "library": "kornia",
    "name": "ycbcr_to_rgb",
    "source_code": "def ycbcr_to_rgb(image: Tensor) -> Tensor:\n    if not isinstance(image, Tensor):\n        raise TypeError(f'Input type is not a Tensor. Got {type(image)}')\n    if len(image.shape) < 3 or image.shape[-3] != 3:\n        raise ValueError(f'Input size must have a shape of (*, 3, H, W). Got {image.shape}')\n    y: Tensor = image[..., 0, :, :]\n    cb: Tensor = image[..., 1, :, :]\n    cr: Tensor = image[..., 2, :, :]\n    delta: float = 0.5\n    cb_shifted: Tensor = cb - delta\n    cr_shifted: Tensor = cr - delta\n    r: Tensor = y + 1.403 * cr_shifted\n    g: Tensor = y - 0.714 * cr_shifted - 0.344 * cb_shifted\n    b: Tensor = y + 1.773 * cb_shifted\n    return torch.stack([r, g, b], -3).clamp(0, 1)",
    "docstring": "Convert an YCbCr image to RGB. The image data is assumed to be in the range of (0, 1). Args: image: YCbCr Image to be converted to RGB with shape :math:. Returns: RGB version of the image with shape :math:. Examples: >>> input = torch.rand(2, 3, 4, 5) >>> output = ycbcr_to_rgb(input) # 2x3x4x5",
    "type": "function",
    "file_path": "kornia\\kornia\\color\\ycbcr.py",
    "ast_data": "FunctionDef name:ycbcr_to_rgb arg:image arguments arg If Call Raise Call Call If BoolOp Compare Call Compare Raise Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_host",
    "source_code": "def get_host(self) -> str:\n    return self._host",
    "docstring": "Return the host the server is running on.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\etcd_server.py",
    "ast_data": "FunctionDef name:get_host arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "Deb03",
    "source_code": "class Deb03(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([0.0] * self.N, [1.0] * self.N))\n        self.global_optimum = [[0.93388314, 0.68141781]]\n        self.fglob = -1.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return -(1.0 / self.N) * sum(sin(5 * pi * (x ** 0.75 - 0.05)) ** 6.0)",
    "docstring": "Deb 3 objective function. This class defines the Deb 3 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Deb03}}(x) = - \\frac{1}{N} \\sum_{i=1}^n \\sin^6 \\left[ 5 \\pi \\left ( x_i^{3/4} - 0.05 \\right) \\right ] Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math:. The number of global minima is :math: that are evenly spaced in the function landscape, where :math: represents the dimension of the problem. .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_D.py",
    "ast_data": "ClassDef name:Deb03 Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "scatter",
    "source_code": "def scatter(tensors, src=0, group=group.WORLD):\n    return _Scatter.apply(src, group, *tensors)",
    "docstring": "Scatters a list of tensors to all processes in a group. Each process will receive exactly one tensor and store its data in the `. src (int, optional): Source rank (default is 0). group (ProcessGroup, optional): The process group to work on. Returns: Tensor: Output tensor from the scatter operation.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\nn\\functional.py",
    "ast_data": "FunctionDef name:scatter arg:tensors arg:src arg:group arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "MessageMiddleware",
    "source_code": "class MessageMiddleware(MiddlewareMixin):\n\n    def process_request(self, request):\n        request._messages = default_storage(request)\n\n    def process_response(self, request, response):\n        if hasattr(request, '_messages'):\n            unstored_messages = request._messages.update(response)\n            if unstored_messages and settings.DEBUG:\n                raise ValueError('Not all temporary messages could be stored.')\n        return response",
    "docstring": "Middleware that handles temporary messages.",
    "type": "class",
    "file_path": "django\\django\\contrib\\messages\\middleware.py",
    "ast_data": "ClassDef name:MessageMiddleware FunctionDef name:process_request arg:self arg:request arguments arg arg Assign Call FunctionDef name:process_response arg:self arg:request arg:response arguments arg arg arg If Call Assign Call If BoolOp Raise Call Return return:yes"
  },
  {
    "library": "django",
    "name": "clear_checkbox_id",
    "source_code": "def clear_checkbox_id(self, name):\n    return name + '_id'",
    "docstring": "Given the name of the clear checkbox input, return the HTML id for it.",
    "type": "method",
    "file_path": "django\\django\\forms\\widgets.py",
    "ast_data": "FunctionDef name:clear_checkbox_id arg:self arg:name arguments arg arg Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "join",
    "source_code": "def join(self, data: DataSource, variables: dict[str, VariableSpec] | None) -> PlotData:\n    if data is None:\n        data = self.source_data\n    if not variables:\n        variables = self.source_vars\n    disinherit = [k for k, v in variables.items() if v is None]\n    new = PlotData(data, variables)\n    drop_cols = [k for k in self.frame if k in new.frame or k in disinherit]\n    parts = [self.frame.drop(columns=drop_cols), new.frame]\n    frame = pd.concat(parts, axis=1, sort=False, copy=False)\n    names = {k: v for k, v in self.names.items() if k not in disinherit}\n    names.update(new.names)\n    ids = {k: v for k, v in self.ids.items() if k not in disinherit}\n    ids.update(new.ids)\n    new.frame = frame\n    new.names = names\n    new.ids = ids\n    new.source_data = self.source_data\n    new.source_vars = self.source_vars\n    return new",
    "docstring": "Add, replace, or drop variables and return as a new dataset.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\data.py",
    "ast_data": "FunctionDef name:join arg:self arg:data arg:variables arguments arg arg arg If Compare Assign If Assign Assign Call Compare Assign Call Assign BoolOp Compare Compare Assign Call Assign Call Assign Call Compare Call Assign Call Compare Call Assign Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "sort_complex",
    "source_code": "@array_function_dispatch(_sort_complex)\ndef sort_complex(a):\n    b = array(a, copy=True)\n    b.sort()\n    if not issubclass(b.dtype.type, _nx.complexfloating):\n        if b.dtype.char in 'bhBH':\n            return b.astype('F')\n        elif b.dtype.char == 'g':\n            return b.astype('G')\n        else:\n            return b.astype('D')\n    else:\n        return b",
    "docstring": "Sort a complex array using the real part first, then the imaginary part. Parameters ---------- a : array_like Input array Returns ------- out : complex ndarray Always returns a sorted complex array. Examples -------- >>> import numpy as np >>> np.sort_complex([5, 3, 6, 2, 1]) array([1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j]) >>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j]) array([1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j])",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_function_base_impl.py",
    "ast_data": "FunctionDef name:sort_complex arg:a arguments arg Assign Call Call If Call If Compare Return return:yes Call If Compare Return return:yes Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "SpatialiteSpatialRefSys",
    "source_code": "class SpatialiteSpatialRefSys(models.Model, SpatialRefSysMixin):\n    srid = models.IntegerField(primary_key=True)\n    auth_name = models.CharField(max_length=256)\n    auth_srid = models.IntegerField()\n    ref_sys_name = models.CharField(max_length=256)\n    proj4text = models.CharField(max_length=2048)\n    srtext = models.CharField(max_length=2048)\n\n    class Meta:\n        app_label = 'gis'\n        db_table = 'spatial_ref_sys'\n        managed = False\n\n    @property\n    def wkt(self):\n        return self.srtext",
    "docstring": "The 'spatial_ref_sys' table from SpatiaLite.",
    "type": "class",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\spatialite\\models.py",
    "ast_data": "ClassDef name:SpatialiteSpatialRefSys Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call ClassDef name:Meta Assign Assign Assign FunctionDef name:wkt arg:self arguments arg Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "_add_axis_labels",
    "source_code": "def _add_axis_labels(self, ax, default_x='', default_y=''):\n    if not ax.get_xlabel():\n        x_visible = any((t.get_visible() for t in ax.get_xticklabels()))\n        ax.set_xlabel(self.variables.get('x', default_x), visible=x_visible)\n    if not ax.get_ylabel():\n        y_visible = any((t.get_visible() for t in ax.get_yticklabels()))\n        ax.set_ylabel(self.variables.get('y', default_y), visible=y_visible)",
    "docstring": "Add axis labels if not present, set visibility to match ticklabels.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_base.py",
    "ast_data": "FunctionDef name:_add_axis_labels arg:self arg:ax arg:default_x arg:default_y arguments arg arg arg arg If Call Assign Call Call Call Call Call If Call Assign Call Call Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_fit",
    "source_code": "def _fit(self, X, n_components, random_state):\n    transform_algorithm = 'lasso_' + self.method\n    est = MiniBatchDictionaryLearning(n_components=n_components, alpha=self.alpha, max_iter=self.max_iter, dict_init=None, batch_size=self.batch_size, shuffle=self.shuffle, n_jobs=self.n_jobs, fit_algorithm=self.method, random_state=random_state, transform_algorithm=transform_algorithm, transform_alpha=self.alpha, verbose=self.verbose, callback=self.callback, tol=self.tol, max_no_improvement=self.max_no_improvement)\n    est.set_output(transform='default')\n    est.fit(X.T)\n    self.components_, self.n_iter_ = (est.transform(X.T).T, est.n_iter_)\n    components_norm = np.linalg.norm(self.components_, axis=1)[:, np.newaxis]\n    components_norm[components_norm == 0] = 1\n    self.components_ /= components_norm\n    self.n_components_ = len(self.components_)\n    return self",
    "docstring": "Specialized for MiniBatchSparsePCA.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_sparse_pca.py",
    "ast_data": "FunctionDef name:_fit arg:self arg:X arg:n_components arg:random_state arguments arg arg arg arg Assign Assign Call Call Call Assign Call Assign Call Assign Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "composite_tensor_from_variant",
    "source_code": "def composite_tensor_from_variant(encoded, type_spec, name=None):\n    if not isinstance(encoded, tensor.Tensor):\n        raise TypeError(f'Expected `encoded` to be a Tensor, got {encoded!r}.')\n    if encoded.dtype != dtypes.variant:\n        raise TypeError(f'Expected `encoded` to have dtype=variant, got {encoded!r}.')\n    encoded.shape.assert_is_compatible_with(())\n    metadata = composite_tensor_variant_pb2.CompositeTensorVariantMetadata()\n    metadata.type_spec_proto.CopyFrom(nested_structure_coder.encode_structure(type_spec).type_spec_value)\n    component_dtypes = [t.dtype for t in nest.flatten(type_spec, expand_composites=True)]\n    components = gen_composite_tensor_ops.CompositeTensorVariantToComponents(encoded=encoded, metadata=metadata.SerializeToString(), Tcomponents=component_dtypes, name=name)\n    return nest.pack_sequence_as(type_spec, components, expand_composites=True)",
    "docstring": "Returns the value encoded by a variant scalar tensor. Args: encoded: A Tensor returned by . type_spec: The of the original value. This is used to determine the number and types of the component tensors that comprise the decoded value. Must be compatible with the serilized in . name: Optional name for the operation. Returns: An value that is compatible with . Raises: TypeError: If is not a Tensor with dtype=variant. InvalidArgumentError: If is not compatible with .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\composite_tensor_ops.py",
    "ast_data": "FunctionDef name:composite_tensor_from_variant arg:encoded arg:type_spec arg:name arguments arg arg arg If Call Raise Call If Compare Raise Call Call Assign Call Call Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_axislabel_pos_angle",
    "source_code": "def get_axislabel_pos_angle(self, axes):\n    return dict(left=((0.0, 0.5), 90), right=((1.0, 0.5), 90), bottom=((0.5, 0.0), 0), top=((0.5, 1.0), 0))[self._loc]",
    "docstring": "Return the label reference position in transAxes. get_label_transform() returns a transform of (transAxes+offset)",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axislines.py",
    "ast_data": "FunctionDef name:get_axislabel_pos_angle arg:self arg:axes arguments arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_related_models_tuples",
    "source_code": "def get_related_models_tuples(model):\n    return {(rel_mod._meta.app_label, rel_mod._meta.model_name) for rel_mod in _get_related_models(model)}",
    "docstring": "Return a list of typical (app_label, model_name) tuples for all related models for the given model.",
    "type": "function",
    "file_path": "django\\django\\db\\migrations\\state.py",
    "ast_data": "FunctionDef name:get_related_models_tuples arg:model arguments arg Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "generate_signature_base_string",
    "source_code": "def generate_signature_base_string(request):\n    host = request.headers.get('Host', None)\n    return construct_base_string(request.method, request.uri, request.params, host)",
    "docstring": "Generate signature base string from request.",
    "type": "function",
    "file_path": "authlib\\authlib\\oauth1\\rfc5849\\signature.py",
    "ast_data": "FunctionDef name:generate_signature_base_string arg:request arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "shape",
    "source_code": "@property\ndef shape(self) -> Tuple[int, ...]:\n    return tuple(self.data.shape)",
    "docstring": "Return the shape of the underlying data with shape :math:.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\quaternion.py",
    "ast_data": "FunctionDef name:shape arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "ProcessFormView",
    "source_code": "class ProcessFormView(View):\n\n    def get(self, request, *args, **kwargs):\n        return self.render_to_response(self.get_context_data())\n\n    def post(self, request, *args, **kwargs):\n        form = self.get_form()\n        if form.is_valid():\n            return self.form_valid(form)\n        else:\n            return self.form_invalid(form)\n\n    def put(self, *args, **kwargs):\n        return self.post(*args, **kwargs)",
    "docstring": "Render a form on GET and processes it on POST.",
    "type": "class",
    "file_path": "django\\django\\views\\generic\\edit.py",
    "ast_data": "ClassDef name:ProcessFormView FunctionDef name:get arg:self arg:request arguments arg arg arg arg Return return:yes Call Call FunctionDef name:post arg:self arg:request arguments arg arg arg arg Assign Call If Call Return return:yes Call Return return:yes Call FunctionDef name:put arg:self arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_as_tf",
    "source_code": "def _as_tf(self):\n    if isinstance(self, TransferFunction):\n        return self\n    else:\n        return self.to_tf()",
    "docstring": "Convert to system, without copying. Returns ------- sys: ZerosPolesGain The system. If the class is already an instance of then this instance is returned.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:_as_tf arg:self arguments arg If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "quickstart",
    "source_code": "def quickstart(root=None, script_name='', config=None):\n    if config:\n        _global_conf_alias.update(config)\n    tree.mount(root, script_name, config)\n    engine.signals.subscribe()\n    engine.start()\n    engine.block()",
    "docstring": "Mount the given root, start the builtin server (and engine), then block. root: an instance of a \"controller class\" (a collection of page handler methods) which represents the root of the application. script_name: a string containing the \"mount point\" of the application. This should start with a slash, and be the path portion of the URL at which to mount the given root. For example, if root.index() will handle requests to \" then the script_name argument would be \"/dept/app1\". It MUST NOT end in a slash. If the script_name refers to the root of the URI, it MUST be an empty string (not \"/\"). config: a file or dict containing application config. If this contains a [global] section, those entries will be used in the global (site-wide) config.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\__init__.py",
    "ast_data": "FunctionDef name:quickstart arg:root arg:script_name arg:config arguments arg arg arg If Call Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_axes_pad",
    "source_code": "def get_axes_pad(self):\n    return (self._horiz_pad_size.fixed_size, self._vert_pad_size.fixed_size)",
    "docstring": "Return the axes padding. Returns ------- hpad, vpad Padding (horizontal pad, vertical pad) in inches.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_grid.py",
    "ast_data": "FunctionDef name:get_axes_pad arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "emit_obj_delete",
    "source_code": "def emit_obj_delete(self, category: str, name: str, timestamp: int, pid: int, tid: int, object_id: int) -> None:\n    event = self._create_event('D', category, name, pid, tid, timestamp)\n    event['id'] = object_id\n    self._events.append(event)",
    "docstring": "Adds an object deletion event to the trace. Args: category: The event category as a string. name: The event name as a string. timestamp: The timestamp of this event as a long integer. pid: Identifier of the process generating this event as an integer. tid: Identifier of the thread generating this event as an integer. object_id: Identifier of the object as an integer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py",
    "ast_data": "FunctionDef name:emit_obj_delete arg:self arg:category arg:name arg:timestamp arg:pid arg:tid arg:object_id arguments arg arg arg arg arg arg arg Assign Call Assign Call"
  },
  {
    "library": "cherrypy",
    "name": "id",
    "source_code": "@property\ndef id(self):\n    return self._id",
    "docstring": "Return the current session id.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\sessions.py",
    "ast_data": "FunctionDef name:id arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "dctn",
    "source_code": "@_dispatch\ndef dctn(x, type=2, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *, orthogonalize=None):\n    return (Dispatchable(x, np.ndarray),)",
    "docstring": "Return multidimensional Discrete Cosine Transform along the specified axes. Parameters ---------- x : array_like The input array. type : {1, 2, 3, 4}, optional Type of the DCT (see Notes). Default type is 2. s : int or array_like of ints or None, optional The shape of the result. If both and (see below) are None, is `saxess`s[i] >> import numpy as np >>> from scipy.fft import dctn, idctn >>> rng = np.random.default_rng() >>> y = rng.standard_normal((16, 16)) >>> np.allclose(y, idctn(dctn(y))) True",
    "type": "function",
    "file_path": "scipy\\scipy\\fft\\_realtransforms.py",
    "ast_data": "FunctionDef name:dctn arg:x arg:type arg:s arg:axes arg:norm arg:overwrite_x arg:workers arguments arg arg arg arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "increment_version",
    "source_code": "def increment_version(tensor: Union[torch.Tensor, Iterable[torch.Tensor]]) -> None:\n    if isinstance(tensor, torch.Tensor):\n        tensor = (tensor,)\n    torch._C._increment_version(tensor)",
    "docstring": "Update autograd metadata tracking whether the given Tensor was modified in place. This is to enable more accurate error checking within the autograd engine. It is already done automatically by PyTorch functions and within custom Function when mark_dirty() is called appropriately so you only need to call this explicitly if you are doing inplace operation on the Tensor data in a way that Pytorch doesn't know about. For example a custom kernel that reads the Tensor data_ptr and modifies the memory inplace based on this pointer. Can accept either a tensor, or a list of tensors. Note that incrementing the version counter multiple times for a single inplace operation is not problematic. Note that if you pass in tensor constructed under torch.inference_mode(), we will not bump its version counter (because your tensor does not have one).",
    "type": "function",
    "file_path": "pytorch\\torch\\autograd\\graph.py",
    "ast_data": "FunctionDef name:increment_version arg:tensor arguments arg If Call Assign Call"
  },
  {
    "library": "django",
    "name": "watched_files",
    "source_code": "def watched_files(self, include_globs=True):\n    yield from iter_all_python_module_files()\n    yield from self.extra_files\n    if include_globs:\n        for directory, patterns in self.directory_globs.items():\n            for pattern in patterns:\n                yield from directory.glob(pattern)",
    "docstring": "Yield all files that need to be watched, including module files and files within globs.",
    "type": "method",
    "file_path": "django\\django\\utils\\autoreload.py",
    "ast_data": "FunctionDef name:watched_files arg:self arg:include_globs arguments arg arg Call If For Call For Call"
  },
  {
    "library": "tensorflow",
    "name": "representative_batch_size",
    "source_code": "def representative_batch_size(self):\n    return self.batch_size()",
    "docstring": "Return a representative size for batches in the dataset. This is not guaranteed to be the batch size for all batches in the dataset. It just needs to be a rough approximation for batch sizes in the dataset. Returns: int, a representative size for batches found in the dataset, or None if it is unknown.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\data_adapter.py",
    "ast_data": "FunctionDef name:representative_batch_size arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "rank",
    "source_code": "def rank(values: ArrayLike, axis: AxisInt=0, method: str='average', na_option: str='keep', ascending: bool=True, pct: bool=False) -> npt.NDArray[np.float64]:\n    is_datetimelike = needs_i8_conversion(values.dtype)\n    values = _ensure_data(values)\n    if values.ndim == 1:\n        ranks = algos.rank_1d(values, is_datetimelike=is_datetimelike, ties_method=method, ascending=ascending, na_option=na_option, pct=pct)\n    elif values.ndim == 2:\n        ranks = algos.rank_2d(values, axis=axis, is_datetimelike=is_datetimelike, ties_method=method, ascending=ascending, na_option=na_option, pct=pct)\n    else:\n        raise TypeError('Array with ndim > 2 are not supported.')\n    return ranks",
    "docstring": "Rank the values along a given axis. Parameters ---------- values : np.ndarray or ExtensionArray Array whose values will be ranked. The number of dimensions in this array must not exceed 2. axis : int, default 0 Axis over which to perform rankings. method : {'average', 'min', 'max', 'first', 'dense'}, default 'average' The method by which tiebreaks are broken during the ranking. na_option : {'keep', 'top'}, default 'keep' The method by which NaNs are placed in the ranking. - ``: replace each NaN with either +/- inf so that they there are ranked at the top ascending : bool, default True Whether or not the elements should be ranked in ascending order. pct : bool, default False Whether or not to the display the returned rankings in integer form (e.g. 1, 2, 3) or in percentile form (e.g. 0.333..., 0.666..., 1).",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\algorithms.py",
    "ast_data": "FunctionDef name:rank arg:values arg:axis arg:method arg:na_option arg:ascending arg:pct arguments arg arg arg arg arg arg Assign Call Assign Call If Compare Assign Call If Compare Assign Call Raise Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "TableCell",
    "source_code": "class TableCell:\n\n    def __init__(self, table: Table, row: int, col: int) -> None:\n        if table.cells[row, col] == 0:\n            raise IndexError\n        self.table = table\n        self.cell_id = table.cells[row, col]\n        self.row = row\n        self.col = col\n        while table.cells[self.row - 1, self.col] == self.cell_id:\n            self.row -= 1\n        while table.cells[self.row, self.col - 1] == self.cell_id:\n            self.col -= 1\n\n    @property\n    def width(self) -> int:\n        width = 0\n        while self.table.cells[self.row, self.col + width] == self.cell_id:\n            width += 1\n        return width\n\n    @property\n    def height(self) -> int:\n        height = 0\n        while self.table.cells[self.row + height, self.col] == self.cell_id:\n            height += 1\n        return height",
    "docstring": "Data of a cell in a table.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\writers\\latex.py",
    "ast_data": "ClassDef name:TableCell FunctionDef name:__init__ arg:self arg:table arg:row arg:col arguments arg arg arg arg If Compare Raise Assign Assign Assign Assign While Compare While Compare FunctionDef name:width arg:self arguments arg Assign While Compare Return return:yes FunctionDef name:height arg:self arguments arg Assign While Compare Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_type_name",
    "source_code": "def _type_name(t):\n    module = t.__module__\n    qualname = t.__qualname__\n    if module == 'builtins':\n        return qualname\n    elif t == Real:\n        return 'float'\n    elif t == Integral:\n        return 'int'\n    return f'{module}.{qualname}'",
    "docstring": "Convert type into human readable string.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_param_validation.py",
    "ast_data": "FunctionDef name:_type_name arg:t arguments arg Assign Assign If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "load_nvprof",
    "source_code": "def load_nvprof(path):\n    return EventList(parse_nvprof_trace(path))",
    "docstring": "Open an nvprof trace file and parses autograd annotations. Args: path (str): path to nvprof trace",
    "type": "function",
    "file_path": "pytorch\\torch\\autograd\\profiler.py",
    "ast_data": "FunctionDef name:load_nvprof arg:path arguments arg Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "rfind",
    "source_code": "def rfind(self, sub, start=0, end=None):\n    return rfind(self, sub, start, end)",
    "docstring": "For each element in , return the highest index in the string where substring is found, such that is contained within [, ]. See Also -------- char.rfind",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:rfind arg:self arg:sub arg:start arg:end arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "FontConstantsBase",
    "source_code": "class FontConstantsBase:\n    script_space: T.ClassVar[float] = 0.05\n    subdrop: T.ClassVar[float] = 0.4\n    sup1: T.ClassVar[float] = 0.7\n    sub1: T.ClassVar[float] = 0.3\n    sub2: T.ClassVar[float] = 0.5\n    delta: T.ClassVar[float] = 0.025\n    delta_slanted: T.ClassVar[float] = 0.2\n    delta_integral: T.ClassVar[float] = 0.1",
    "docstring": "A set of constants that controls how certain things, such as sub- and superscripts are laid out. These are all metrics that can't be reliably retrieved from the font metrics in the font itself.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\_mathtext.py",
    "ast_data": "ClassDef name:FontConstantsBase"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, verts, *args, zsort='average', shade=False, lightsource=None, axlim_clip=False, **kwargs):\n    if shade:\n        normals = _generate_normals(verts)\n        facecolors = kwargs.get('facecolors', None)\n        if facecolors is not None:\n            kwargs['facecolors'] = _shade_colors(facecolors, normals, lightsource)\n        edgecolors = kwargs.get('edgecolors', None)\n        if edgecolors is not None:\n            kwargs['edgecolors'] = _shade_colors(edgecolors, normals, lightsource)\n        if facecolors is None and edgecolors is None:\n            raise ValueError('You must provide facecolors, edgecolors, or both for shade to work.')\n    super().__init__(verts, *args, **kwargs)\n    if isinstance(verts, np.ndarray):\n        if verts.ndim != 3:\n            raise ValueError('verts must be a list of (N, 3) array-like')\n    elif any((len(np.shape(vert)) != 2 for vert in verts)):\n        raise ValueError('verts must be a list of (N, 3) array-like')\n    self.set_zsort(zsort)\n    self._codes3d = None\n    self._axlim_clip = axlim_clip",
    "docstring": "Parameters ---------- verts : list of (N, 3) array-like The sequence of polygons [*verts0*, *verts1*, ...] where each element *verts_i* defines the vertices of polygon *i* as a 2D array-like of shape (N, 3). zsort : {'average', 'min', 'max'}, default: 'average' The calculation method for the z-order. See for details. shade : bool, default: False Whether to shade *facecolors* and *edgecolors*. When activating *shade*, *facecolors* and/or *edgecolors* must be provided. .. versionadded:: 3.7 lightsource : , optional The lightsource to use when *shade* is True. .. versionadded:: 3.7 axlim_clip : bool, default: False Whether to hide polygons with a vertex outside the view limits. .. versionadded:: 3.10 *args, **kwargs All other parameters are forwarded to . Notes ----- Note that this class does a bit of magic with the _facecolors and _edgecolors properties.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:verts arguments arg arg arg arg arg arg arg arg If Assign Call Assign Call If Compare Assign Call Assign Call If Compare Assign Call If BoolOp Compare Compare Raise Call Call Call If Call If Compare Raise Call If Call Compare Call Call Raise Call Call Assign Assign"
  },
  {
    "library": "authlib",
    "name": "validate_id_token_encrypted_response_alg",
    "source_code": "def validate_id_token_encrypted_response_alg(self):\n    self._validate_claim_value('id_token_encrypted_response_alg')",
    "docstring": "JWE alg algorithm [JWA] REQUIRED for encrypting the ID Token issued to this Client. If this is requested, the response will be signed then encrypted, with the result being a Nested JWT, as defined in [JWT]. The default, if omitted, is that no encryption is performed.",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\registration\\claims.py",
    "ast_data": "FunctionDef name:validate_id_token_encrypted_response_alg arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "emit_obj_create",
    "source_code": "def emit_obj_create(self, category: str, name: str, timestamp: int, pid: int, tid: int, object_id: int) -> None:\n    event = self._create_event('N', category, name, pid, tid, timestamp)\n    event['id'] = object_id\n    self._events.append(event)",
    "docstring": "Adds an object creation event to the trace. Args: category: The event category as a string. name: The event name as a string. timestamp: The timestamp of this event as a long integer. pid: Identifier of the process generating this event as an integer. tid: Identifier of the thread generating this event as an integer. object_id: Identifier of the object as an integer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py",
    "ast_data": "FunctionDef name:emit_obj_create arg:self arg:category arg:name arg:timestamp arg:pid arg:tid arg:object_id arguments arg arg arg arg arg arg arg Assign Call Assign Call"
  },
  {
    "library": "sphinx",
    "name": "install_dispatcher",
    "source_code": "def install_dispatcher(app: Sphinx, docname: str, source: list[str]) -> None:\n    dispatcher = IntersphinxDispatcher()\n    dispatcher.enable()",
    "docstring": "Enable IntersphinxDispatcher. .. note:: The installed dispatcher will be uninstalled on disabling sphinx_domain automatically.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\ext\\intersphinx\\_resolve.py",
    "ast_data": "FunctionDef name:install_dispatcher arg:app arg:docname arg:source arguments arg arg arg Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "generate_equalization_mapping",
    "source_code": "def generate_equalization_mapping(self) -> QConfigMapping:\n    detector_qconfig_info_combined = self._generate_module_fqn_to_detector_info_mapping(self._update_detector_equalization_qconfig_info)\n    mapping: QConfigMapping = self._generate_qconfig_mapping_helper(detector_qconfig_info_combined, self._equalization_config_generator)\n    return mapping",
    "docstring": "Generates a QConfigMapping based on the suggestions of the ModelReport API for equalization. The generated mapping encompasses all the different types of feedback from the input-weight equalization detector. These configs are based on the suggestions provided by the ModelReport API and can only be generated once the reports have been generated. Returns a QConfigMapping for the equalization configuration",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\model_report.py",
    "ast_data": "FunctionDef name:generate_equalization_mapping arg:self arguments arg Assign Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "isna",
    "source_code": "def isna(self) -> npt.NDArray[np.bool_]:\n    null_count = self._pa_array.null_count\n    if null_count == 0:\n        return np.zeros(len(self), dtype=np.bool_)\n    elif null_count == len(self):\n        return np.ones(len(self), dtype=np.bool_)\n    return self._pa_array.is_null().to_numpy()",
    "docstring": "Boolean NumPy array indicating if each value is missing. This should return a 1-D array the same length as 'self'.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\arrow\\array.py",
    "ast_data": "FunctionDef name:isna arg:self arguments arg Assign If Compare Return return:yes Call Call If Compare Call Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "linalg_replicate_strategy",
    "source_code": "@register_op_strategy([aten._linalg_svd.default, aten.linalg_qr.default, aten.diagonal_copy.default, aten.diag_embed.default, aten.diag.default, aten.diagonal.default, aten.tril.default, aten.triu.default, aten._linalg_eigh.default, aten.upsample_bicubic2d.default, aten.upsample_bilinear2d.default, aten.upsample_linear1d.default, aten.upsample_nearest2d.default, aten.upsample_trilinear3d.default], schema_info=RuntimeSchemaInfo(1))\ndef linalg_replicate_strategy(op_schema: OpSchema) -> OpStrategy:\n    args_schema = op_schema.args_schema\n    input_strategy = args_schema[0]\n    assert isinstance(input_strategy, OpStrategy), f'{input_strategy}'\n    mesh = input_strategy.mesh\n    output_strategies: list[PlacementStrategy] = []\n    for placement_strategy in input_strategy.strategies:\n        replicate_placements = tuple((Replicate() for _ in range(mesh.ndim)))\n        replicate_spec = DTensorSpec(mesh=mesh, placements=replicate_placements, tensor_meta=placement_strategy.output_spec.tensor_meta)\n        redistribute_cost = [generate_redistribute_costs(input_strategy, replicate_spec)]\n        replicate_strategy = PlacementStrategy(output_specs=replicate_spec, input_specs=(replicate_spec,), redistribute_cost=redistribute_cost)\n        output_strategies.append(replicate_strategy)\n    return OpStrategy(output_strategies)",
    "docstring": "Since we do not have a simple way to compute some linear algebra operations like SVD or QR decomposition, always fall back to replicate.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_ops\\_math_ops.py",
    "ast_data": "FunctionDef name:linalg_replicate_strategy arg:op_schema arguments arg Assign Assign Call Assign For Assign Call Call Call Assign Call Assign Call Assign Call Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "from_structured_signature",
    "source_code": "def from_structured_signature(input_signature=None, output_signature=None, capture_types=None, are_keyword_args_also_positional=False) -> FunctionType:\n    if input_signature is None:\n        input_signature = ((), {})\n    args, kwargs = input_signature\n    parameters = []\n    for i, arg in enumerate(args):\n        parameters.append(Parameter('arg_' + str(i), Parameter.POSITIONAL_ONLY, False, trace_type.from_value(arg, trace_type.InternalTracingContext(is_legacy_signature=True))))\n    keyword_arg_kind = Parameter.POSITIONAL_OR_KEYWORD if are_keyword_args_also_positional else Parameter.KEYWORD_ONLY\n    for name, kwarg in kwargs.items():\n        parameters.append(Parameter(sanitize_arg_name(name), keyword_arg_kind, False, trace_type.from_value(kwarg, trace_type.InternalTracingContext(is_legacy_signature=True))))\n    return_type = trace_type.from_value(output_signature, trace_type.InternalTracingContext(is_legacy_signature=True))\n    return FunctionType(parameters, capture_types or {}, return_annotation=return_type)",
    "docstring": "Generates a FunctionType from legacy signature representation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_type.py",
    "ast_data": "FunctionDef name:from_structured_signature arg:input_signature arg:output_signature arg:capture_types arg:are_keyword_args_also_positional arguments arg arg arg arg If Compare Assign Assign Assign For Call Call Call Call Call Call Assign For Call Call Call Call Call Call Assign Call Call Return return:yes Call BoolOp"
  },
  {
    "library": "django",
    "name": "compile_filter",
    "source_code": "def compile_filter(self, token):\n    return FilterExpression(token, self)",
    "docstring": "Convenient wrapper for FilterExpression",
    "type": "method",
    "file_path": "django\\django\\template\\base.py",
    "ast_data": "FunctionDef name:compile_filter arg:self arg:token arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_is_generator_like",
    "source_code": "def _is_generator_like(data):\n    return hasattr(data, '__next__') or hasattr(data, 'next') or isinstance(data, (Sequence, iterator_ops.Iterator, iterator_ops.IteratorBase))",
    "docstring": "Checks if data is a generator, Sequence, or Iterator.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:_is_generator_like arg:data arguments arg Return return:yes BoolOp Call Call Call"
  },
  {
    "library": "django",
    "name": "get_filter_kwargs_for_object",
    "source_code": "def get_filter_kwargs_for_object(self, obj):\n    return {self.name: getattr(obj, self.attname)}",
    "docstring": "Return a dict that when passed as kwargs to self.model.filter(), would yield all instances having the same value for this field as obj has.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\__init__.py",
    "ast_data": "FunctionDef name:get_filter_kwargs_for_object arg:self arg:obj arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_dpi",
    "source_code": "def set_dpi(self, val):\n    self._parent.dpi = val\n    self.stale = True",
    "docstring": "Set the resolution of parent figure in dots-per-inch. Parameters ---------- val : float",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:set_dpi arg:self arg:val arguments arg arg Assign Assign"
  },
  {
    "library": "scikit-learn",
    "name": "_with_config_and_warning_filters",
    "source_code": "def _with_config_and_warning_filters(delayed_func, config, warning_filters):\n    if hasattr(delayed_func, 'with_config_and_warning_filters'):\n        return delayed_func.with_config_and_warning_filters(config, warning_filters)\n    else:\n        warnings.warn('`sklearn.utils.parallel.Parallel` needs to be used in conjunction with `sklearn.utils.parallel.delayed` instead of `joblib.delayed` to correctly propagate the scikit-learn configuration to the joblib workers.', UserWarning)\n        return delayed_func",
    "docstring": "Helper function that intends to attach a config to a delayed function.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\parallel.py",
    "ast_data": "FunctionDef name:_with_config_and_warning_filters arg:delayed_func arg:config arg:warning_filters arguments arg arg arg If Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_next_color",
    "source_code": "def get_next_color(self):\n    entry = self._cycler_items[self._idx]\n    if 'color' in entry:\n        self._idx = (self._idx + 1) % len(self._cycler_items)\n        return entry['color']\n    else:\n        return 'k'",
    "docstring": "Return the next color in the cycle.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:get_next_color arg:self arguments arg Assign If Compare Assign Call Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, *, pad=1.08, h_pad=None, w_pad=None, rect=(0, 0, 1, 1), **kwargs):\n    super().__init__(**kwargs)\n    for td in ['pad', 'h_pad', 'w_pad', 'rect']:\n        self._params[td] = None\n    self.set(pad=pad, h_pad=h_pad, w_pad=w_pad, rect=rect)",
    "docstring": "Initialize tight_layout engine. Parameters ---------- pad : float, default: 1.08 Padding between the figure edge and the edges of subplots, as a fraction of the font size. h_pad, w_pad : float Padding (height/width) between edges of adjacent subplots. Defaults to *pad*. rect : tuple (left, bottom, right, top), default: (0, 0, 1, 1). rectangle in normalized figure coordinates that the subplots (including labels) will fit into.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\layout_engine.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg arg arg arg arg arg Call Call For Assign Call"
  },
  {
    "library": "pytorch",
    "name": "_validate_and_set_stage_mapping",
    "source_code": "def _validate_and_set_stage_mapping(self, actions: dict[int, list[Optional[_Action]]]) -> None:\n    self.stage_index_to_group_rank = _validate_schedule(actions, self.pp_group_size, self._num_stages, self._n_microbatches)\n    for stage in self._stages:\n        stage.stage_index_to_group_rank = self.stage_index_to_group_rank",
    "docstring": "Allocates the stage index to rank mapping which is needed for communication",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\schedules.py",
    "ast_data": "FunctionDef name:_validate_and_set_stage_mapping arg:self arg:actions arguments arg arg Assign Call For Assign"
  },
  {
    "library": "tensorflow",
    "name": "set_tf_cuda_clang",
    "source_code": "def set_tf_cuda_clang(environ_cp):\n    question = 'Do you want to use clang as CUDA compiler?'\n    yes_reply = 'Clang will be used as CUDA compiler.'\n    no_reply = 'nvcc will be used as CUDA compiler.'\n    set_action_env_var(environ_cp, 'TF_CUDA_CLANG', None, True, question=question, yes_reply=yes_reply, no_reply=no_reply, bazel_config_name='cuda_clang')",
    "docstring": "set TF_CUDA_CLANG action_env. Args: environ_cp: copy of the os.environ.",
    "type": "function",
    "file_path": "tensorflow\\configure.py",
    "ast_data": "FunctionDef name:set_tf_cuda_clang arg:environ_cp arguments arg Assign Assign Assign Call"
  },
  {
    "library": "pytorch",
    "name": "NullKernelHandler",
    "source_code": "class NullKernelHandler(NullHandler):\n\n    def __init__(self):\n        super().__init__()\n        self.removed_buffers = OrderedSet[Any]()\n        self.inplaced_to_remove = OrderedSet[Any]()\n        self.index_dtype = 'tl.int64'\n\n    def get_index_dtype_as_torch_dtype(self):\n        import torch\n        if self.index_dtype == 'tl.int64':\n            return torch.int64\n        elif self.index_dtype == 'tl.int32':\n            return torch.int32\n        else:\n            raise ValueError(f'Unknown dtype: {self.index_dtype}')",
    "docstring": "We need access in DeferredLine class when there is no kernel in the context. This happens when codegening the wrapper. Initialize and explicitly so we don't need call 'getattr' with default value which is error prone to typo in attribute name.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\virtualized.py",
    "ast_data": "ClassDef name:NullKernelHandler FunctionDef name:__init__ arg:self arguments arg Call Call Assign Call Assign Call Assign FunctionDef name:get_index_dtype_as_torch_dtype arg:self arguments arg If Compare Return return:yes If Compare Return return:yes Raise Call"
  },
  {
    "library": "django",
    "name": "Stylesheet",
    "source_code": "class Stylesheet:\n\n    def __init__(self, url, mimetype='', media='screen'):\n        self._url = url\n        self._mimetype = mimetype\n        self.media = media\n\n    @property\n    def url(self):\n        return iri_to_uri(self._url)\n\n    @property\n    def mimetype(self):\n        if self._mimetype == '':\n            return _guess_stylesheet_mimetype(self.url)[0]\n        return self._mimetype\n\n    def __str__(self):\n        data = [f'href=\"{self.url}\"']\n        if self.mimetype is not None:\n            data.append(f'type=\"{self.mimetype}\"')\n        if self.media is not None:\n            data.append(f'media=\"{self.media}\"')\n        return ' '.join(data)\n\n    def __repr__(self):\n        return repr((self.url, self.mimetype, self.media))",
    "docstring": "An RSS stylesheet",
    "type": "class",
    "file_path": "django\\django\\utils\\feedgenerator.py",
    "ast_data": "ClassDef name:Stylesheet FunctionDef name:__init__ arg:self arg:url arg:mimetype arg:media arguments arg arg arg arg Assign Assign Assign FunctionDef name:url arg:self arguments arg Return return:yes Call FunctionDef name:mimetype arg:self arguments arg If Compare Return return:yes Call Return return:yes FunctionDef name:__str__ arg:self arguments arg Assign If Compare Call If Compare Call Return return:yes Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "apply_transform_keypoint",
    "source_code": "def apply_transform_keypoint(self, input: Keypoints, params: Dict[str, Tensor], flags: Dict[str, Any], transform: Optional[Tensor]=None) -> Keypoints:\n    raise NotImplementedError",
    "docstring": "Process keypoints corresponding to the inputs that are transformed.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\base.py",
    "ast_data": "FunctionDef name:apply_transform_keypoint arg:self arg:input arg:params arg:flags arg:transform arguments arg arg arg arg arg Raise"
  },
  {
    "library": "django",
    "name": "SwappableTuple",
    "source_code": "class SwappableTuple(tuple):\n\n    def __new__(cls, value, setting):\n        self = tuple.__new__(cls, value)\n        self.setting = setting\n        return self",
    "docstring": "Subclass of tuple so Django can tell this was originally a swappable dependency when it reads the migration file.",
    "type": "class",
    "file_path": "django\\django\\db\\migrations\\migration.py",
    "ast_data": "ClassDef name:SwappableTuple FunctionDef name:__new__ arg:cls arg:value arg:setting arguments arg arg arg Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "set_module_name_qconfig",
    "source_code": "@_config_checker\ndef set_module_name_qconfig(self, module_name: str, quantization_config: Optional[QuantizationConfig]):\n    self.module_name_qconfig[module_name] = quantization_config\n    return self",
    "docstring": "Set quantization_config for a submodule with name: , for example: quantizer.set_module_name_qconfig(\"blocks.sub\"), it will quantize all supported operator/operator patterns in the submodule with this module name with the given The supported operators include and .",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\x86_inductor_quantizer.py",
    "ast_data": "FunctionDef name:set_module_name_qconfig arg:self arg:module_name arg:quantization_config arguments arg arg arg Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X, return_std=False):\n    y_mean = self._decision_function(X)\n    if return_std is False:\n        return y_mean\n    else:\n        col_index = self.lambda_ < self.threshold_lambda\n        X = _safe_indexing(X, indices=col_index, axis=1)\n        sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1)\n        y_std = np.sqrt(sigmas_squared_data + 1.0 / self.alpha_)\n        return (y_mean, y_std)",
    "docstring": "Predict using the linear model. In addition to the mean of the predictive distribution, also its standard deviation can be returned. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Samples. return_std : bool, default=False Whether to return the standard deviation of posterior prediction. Returns ------- y_mean : array-like of shape (n_samples,) Mean of predictive distribution of query points. y_std : array-like of shape (n_samples,) Standard deviation of predictive distribution of query points.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_bayes.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arg:return_std arguments arg arg arg Assign Call If Compare Return return:yes Assign Compare Assign Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "stop_loop",
    "source_code": "def stop_loop(self):\n    pass",
    "docstring": "Called when the thread stops.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\coordinator.py",
    "ast_data": "FunctionDef name:stop_loop arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "get_next",
    "source_code": "def get_next(self, name=None):\n    raise NotImplementedError('Iterating over an `AsyncDistributedIterator` is not supported right now.')",
    "docstring": "Returns the next input from the iterator for all replicas.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\values.py",
    "ast_data": "FunctionDef name:get_next arg:self arg:name arguments arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_process_queue",
    "source_code": "def _process_queue(self):\n    self._maybe_delay()\n    while self._should_worker_thread_run:\n        closure = self._cluster.closure_queue.get(tag=self.worker_index)\n        if not self._should_worker_thread_run or closure is None:\n            if closure is not None:\n                closure.mark_cancelled()\n            return\n        if isinstance(closure, ResourceClosure):\n            self._process_resource_closure(closure)\n        else:\n            self._process_closure(closure)\n        del closure",
    "docstring": "Function running in a worker thread to process closure queues.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py",
    "ast_data": "FunctionDef name:_process_queue arg:self arguments arg Call While Assign Call If BoolOp Compare If Compare Call Return return:no If Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_prepare_forward_infra",
    "source_code": "def _prepare_forward_infra(self, num_microbatches: int, args: tuple[Any, ...], kwargs: Optional[dict[str, Any]]=None) -> tuple[Any, ...]:\n    for chunk in range(num_microbatches):\n        self.args_recv_info[chunk] = self._create_act_recv_info()\n    self.act_send_info = self._create_act_send_info()\n    return tuple()",
    "docstring": "Create send/recv infrastructures for activations (during forward)",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py",
    "ast_data": "FunctionDef name:_prepare_forward_infra arg:self arg:num_microbatches arg:args arg:kwargs arguments arg arg arg arg For Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_load_execution",
    "source_code": "def _load_execution(self):\n    execution_iter = self._reader.execution_iterator()\n    for debug_event, offset in execution_iter:\n        self._execution_digests.append(_execution_digest_from_debug_event_proto(debug_event, offset))\n        if self._monitors:\n            execution = _execution_from_debug_event_proto(debug_event, offset)\n            for monitor in self._monitors:\n                monitor.on_execution(len(self._execution_digests) - 1, execution)",
    "docstring": "Incrementally read the .execution file.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py",
    "ast_data": "FunctionDef name:_load_execution arg:self arguments arg Assign Call For Call Call If Assign Call For Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_build_meta_graph",
    "source_code": "def _build_meta_graph(obj, signatures, options: save_options.SaveOptions, meta_graph_def: meta_graph_pb2.MetaGraphDef=None):\n    with save_context.save_context(options):\n        return _build_meta_graph_impl(obj, signatures, options, meta_graph_def)",
    "docstring": "Creates a MetaGraph under a save context. Args: obj: A trackable object to build the MetaGraph from. signatures: Can be a with an input signature specified or the result of on a -decorated function . may also be a dictionary, in which case it maps from signature keys to instances. If None, finds signature to export from the -decorated methods in . options: object that specifies options for saving. meta_graph_def: Optional, the MetaGraphDef proto fill. Raises: AssertionError: If is executing inside a . ValueError: If is not trackable. Returns: meta_graph_def: Filled MetaGraphDef proto exported_graph: object generated from . object_saver: of the and its dependencies. asset_info: tuple containing external assets in the . saveable_view.nodes: _SaveableView nodes. saveable_view.node_paths: _SaveableView paths.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\save.py",
    "ast_data": "FunctionDef name:_build_meta_graph arg:obj arg:signatures arg:options arg:meta_graph_def arguments arg arg arg arg With Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_sanitize_ndim",
    "source_code": "def _sanitize_ndim(result: ArrayLike, data, dtype: DtypeObj | None, index: Index | None, *, allow_2d: bool=False) -> ArrayLike:\n    if getattr(result, 'ndim', 0) == 0:\n        raise ValueError('result should be arraylike with ndim > 0')\n    if result.ndim == 1:\n        result = _maybe_repeat(result, index)\n    elif result.ndim > 1:\n        if isinstance(data, np.ndarray):\n            if allow_2d:\n                return result\n            raise ValueError(f'Data must be 1-dimensional, got ndarray of shape {data.shape} instead')\n        if is_object_dtype(dtype) and isinstance(dtype, ExtensionDtype):\n            result = com.asarray_tuplesafe(data, dtype=np.dtype('object'))\n            cls = dtype.construct_array_type()\n            result = cls._from_sequence(result, dtype=dtype)\n        else:\n            result = com.asarray_tuplesafe(data, dtype=dtype)\n    return result",
    "docstring": "Ensure we have a 1-dimensional result array.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\construction.py",
    "ast_data": "FunctionDef name:_sanitize_ndim arg:result arg:data arg:dtype arg:index arguments arg arg arg arg arg If Compare Call Raise Call If Compare Assign Call If Compare If Call If Return return:yes Raise Call If BoolOp Call Call Assign Call Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "center",
    "source_code": "@register.filter(is_safe=True)\n@stringfilter\ndef center(value, arg):\n    return value.center(int(arg))",
    "docstring": "Center the value in a field of a given width.",
    "type": "function",
    "file_path": "django\\django\\template\\defaultfilters.py",
    "ast_data": "FunctionDef name:center arg:value arg:arg arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pandas",
    "name": "last",
    "source_code": "@final\ndef last(self, numeric_only: bool=False, min_count: int=-1, skipna: bool=True) -> NDFrameT:\n\n    def last_compat(obj: NDFrameT):\n\n        def last(x: Series):\n            arr = x.array[notna(x.array)]\n            if not len(arr):\n                return x.array.dtype.na_value\n            return arr[-1]\n        if isinstance(obj, DataFrame):\n            return obj.apply(last)\n        elif isinstance(obj, Series):\n            return last(obj)\n        else:\n            raise TypeError(type(obj))\n    return self._agg_general(numeric_only=numeric_only, min_count=min_count, alias='last', npfunc=last_compat, skipna=skipna)",
    "docstring": "Compute the last entry of each column within each group. Defaults to skipping NA elements. Parameters ---------- numeric_only : bool, default False Include only float, int, boolean columns. If None, will attempt to use everything, then use only numeric data. min_count : int, default -1 The required number of valid values to perform the operation. If fewer than `` valid values are present the result will be NA. skipna : bool, default True Exclude NA/null values. If an entire group is NA, the result will be NA. .. versionadded:: 2.2.1 Returns ------- Series or DataFrame Last of values within each group. See Also -------- DataFrame.groupby : Apply a function groupby to each row or column of a DataFrame. core.groupby.DataFrameGroupBy.first : Compute the first non-null entry of each column. core.groupby.DataFrameGroupBy.nth : Take the nth row from each group. Examples -------- >>> df = pd.DataFrame(dict(A=[1, 1, 3], B=[5, None, 6], C=[1, 2, 3])) >>> df.groupby(\"A\").last() B C A 1 5.0 2 3 6.0 3",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\groupby\\groupby.py",
    "ast_data": "FunctionDef name:last arg:self arg:numeric_only arg:min_count arg:skipna arguments arg arg arg arg FunctionDef name:last_compat arg:obj arguments arg FunctionDef name:last arg:x arguments arg Assign Call If Call Return return:yes Return return:yes If Call Return return:yes Call If Call Return return:yes Call Raise Call Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "callable",
    "source_code": "def callable(self, *args, **kwargs):\n    innerfunc = cherrypy.serving.request.handler\n\n    def wrap(*args, **kwargs):\n        return self.newhandler(innerfunc, *args, **kwargs)\n    cherrypy.serving.request.handler = wrap",
    "docstring": "Decorate a request handler with a handler tool callable.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cptools.py",
    "ast_data": "FunctionDef name:callable arg:self arguments arg arg arg Assign FunctionDef name:wrap arguments arg arg Return return:yes Call Assign"
  },
  {
    "library": "pandas",
    "name": "result_ilocs",
    "source_code": "@final\n@cache_readonly\ndef result_ilocs(self) -> npt.NDArray[np.intp]:\n    ids = self.ids\n    if self.has_dropped_na:\n        mask = np.where(ids >= 0)\n        null_gaps = np.cumsum(ids == -1)[mask]\n        ids = ids[mask]\n    result = get_group_index_sorter(ids, self.ngroups)\n    if self.has_dropped_na:\n        result += np.take(null_gaps, result)\n    return result",
    "docstring": "Get the original integer locations of result_index in the input.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\groupby\\ops.py",
    "ast_data": "FunctionDef name:result_ilocs arg:self arguments arg Assign If Assign Call Compare Assign Call Compare Assign Assign Call If Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "assign_add",
    "source_code": "def assign_add(self, delta, use_locking=False, name=None, read_value=True):\n    raise NotImplementedError",
    "docstring": "Adds a value to this variable. This is essentially a shortcut for . Args: delta: A . The value to add to this variable. use_locking: If , use locking during the operation. name: The name of the operation to be created read_value: if True, will return something which evaluates to the new value of the variable; if False will return the assign op. Returns: The updated variable. If is false, instead returns None in Eager mode and the assign op in graph mode.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:assign_add arg:self arg:delta arg:use_locking arg:name arg:read_value arguments arg arg arg arg arg Raise"
  },
  {
    "library": "pandas",
    "name": "IOArgs",
    "source_code": "@dataclasses.dataclass\nclass IOArgs:\n    filepath_or_buffer: str | BaseBuffer\n    encoding: str\n    mode: str\n    compression: CompressionDict\n    should_close: bool = False",
    "docstring": "Return value of io/common.py:_get_filepath_or_buffer.",
    "type": "class",
    "file_path": "pandas\\pandas\\io\\common.py",
    "ast_data": "ClassDef name:IOArgs"
  },
  {
    "library": "numpy",
    "name": "_fromnxfunction_single",
    "source_code": "class _fromnxfunction_single(_fromnxfunction):\n\n    def __call__(self, x, *args, **params):\n        func = getattr(np, self.__name__)\n        if isinstance(x, ndarray):\n            _d = func(x.__array__(), *args, **params)\n            _m = func(getmaskarray(x), *args, **params)\n            return masked_array(_d, mask=_m)\n        else:\n            _d = func(np.asarray(x), *args, **params)\n            _m = func(getmaskarray(x), *args, **params)\n            return masked_array(_d, mask=_m)",
    "docstring": "A version of that is called with a single array argument followed by auxiliary args that are passed verbatim for both the data and mask calls.",
    "type": "class",
    "file_path": "numpy\\numpy\\ma\\extras.py",
    "ast_data": "ClassDef name:_fromnxfunction_single FunctionDef name:__call__ arg:self arg:x arguments arg arg arg arg Assign Call If Call Assign Call Call Assign Call Call Return return:yes Call Assign Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_FakeServer",
    "source_code": "class _FakeServer(object):\n\n    def start(self):\n        logging.info('Creating a remote session to start a TensorFlow server, target = %r, session_config=%r', target, session_config)\n        session.Session(target=target, config=session_config)\n\n    def join(self):\n        while True:\n            time.sleep(5)",
    "docstring": "A fake server that runs a master session.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_coordinator.py",
    "ast_data": "ClassDef name:_FakeServer FunctionDef name:start arg:self arguments arg Call Call FunctionDef name:join arg:self arguments arg While Call"
  },
  {
    "library": "scipy",
    "name": "_distinct_permutations",
    "source_code": "def _distinct_permutations(iterable):\n    items = sorted(iterable)\n    size = len(items)\n    while True:\n        yield tuple(items)\n        for i in range(size - 2, -1, -1):\n            if items[i] < items[i + 1]:\n                break\n        else:\n            return\n        for j in range(size - 1, i, -1):\n            if items[i] < items[j]:\n                break\n        items[i], items[j] = (items[j], items[i])\n        items[i + 1:] = items[:i - size:-1]",
    "docstring": "Find the number of distinct permutations of elements of .",
    "type": "function",
    "file_path": "scipy\\scipy\\integrate\\_rules\\_genz_malik.py",
    "ast_data": "FunctionDef name:_distinct_permutations arg:iterable arguments arg Assign Call Assign Call While Call For Call If Compare Return return:no For Call If Compare Assign Assign"
  },
  {
    "library": "numpy",
    "name": "_parse_multi_target",
    "source_code": "def _parse_multi_target(self, targets):\n    if not targets:\n        self.dist_fatal(\"empty multi-target '()'\")\n    if not all([self.feature_is_exist(tar) for tar in targets]):\n        self.dist_fatal('invalid target name in multi-target', targets)\n    if not all([tar in self.parse_baseline_names or tar in self.parse_dispatch_names for tar in targets]):\n        return None\n    targets = self.feature_ahead(targets)\n    if not targets:\n        return None\n    targets = self.feature_sorted(targets)\n    targets = tuple(targets)\n    return targets",
    "docstring": "validate multi targets that defined between parentheses()",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py",
    "ast_data": "FunctionDef name:_parse_multi_target arg:self arg:targets arguments arg arg If Call If Call Call Call If Call BoolOp Compare Compare Return return:no Assign Call If Return return:no Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "length",
    "source_code": "@property\ndef length(self) -> Index:\n    return Index(self._data.length, copy=False)",
    "docstring": "Calculate the length of each interval in the IntervalIndex. This method returns a new Index containing the lengths of each interval in the IntervalIndex. The length of an interval is defined as the difference between its end and its start. Returns ------- Index An Index containing the lengths of each interval. See Also -------- Interval.length : Return the length of the Interval. Examples -------- >>> intervals = pd.IntervalIndex.from_arrays( ... [1, 2, 3], [4, 5, 6], closed=\"right\" ... ) >>> intervals.length Index([3, 3, 3], dtype='int64') >>> intervals = pd.IntervalIndex.from_tuples([(1, 5), (6, 10), (11, 15)]) >>> intervals.length Index([4, 4, 4], dtype='int64')",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\interval.py",
    "ast_data": "FunctionDef name:length arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "tmean",
    "source_code": "@xp_capabilities()\n@_axis_nan_policy_factory(lambda x: x, n_outputs=1, default_axis=None, result_to_tuple=lambda x, _: (x,))\ndef tmean(a, limits=None, inclusive=(True, True), axis=None):\n    xp = array_namespace(a)\n    a, mask = _put_val_to_limits(a, limits, inclusive, val=0.0, xp=xp)\n    sum = xp.sum(a, axis=axis, dtype=a.dtype)\n    n = xp.sum(xp.asarray(~mask, dtype=a.dtype), axis=axis, dtype=a.dtype)\n    mean = xpx.apply_where(n != 0, (sum, n), operator.truediv, fill_value=xp.nan)\n    return mean[()] if mean.ndim == 0 else mean",
    "docstring": "Compute the trimmed mean. This function finds the arithmetic mean of given values, ignoring values outside the given . Parameters ---------- a : array_like Array of values. limits : None or (lower limit, upper limit), optional Values in the input array less than the lower limit or greater than the upper limit will be ignored. When limits is None (default), then all values are used. Either of the limit values in the tuple can also be None representing a half-open interval. inclusive : (bool, bool), optional A tuple consisting of the (lower flag, upper flag). These flags determine whether values exactly equal to the lower or upper limits are included. The default value is (True, True). axis : int or None, optional Axis along which to compute test. Default is None. Returns ------- tmean : ndarray Trimmed mean. See Also -------- trim_mean : Returns mean after trimming a proportion from both tails. Examples -------- >>> import numpy as np >>> from scipy import stats >>> x = np.arange(20) >>> stats.tmean(x) 9.5 >>> stats.tmean(x, (3,17)) 10.0",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_stats_py.py",
    "ast_data": "FunctionDef name:tmean arg:a arg:limits arg:inclusive arg:axis arguments arg arg arg arg Assign Call Assign Call Assign Call Assign Call Call Assign Call Compare Return return:yes Compare Call Call arguments arg arguments arg arg"
  },
  {
    "library": "pytorch",
    "name": "save_global_state",
    "source_code": "def save_global_state(self, out=None):\n    global_state = cast(dict[str, tuple[Callable[..., Any], bool]], out if out is not None else self.tracing_context.global_context.global_state)\n    global_state['grad_enabled'] = (torch.set_grad_enabled, torch.is_grad_enabled())\n    global_state['autocast_enabled'] = (functools.partial(torch.set_autocast_enabled, 'cuda'), torch.is_autocast_enabled('cuda'))\n    global_state['autocast_cpu_enabled'] = (functools.partial(torch.set_autocast_enabled, 'cpu'), torch.is_autocast_enabled('cpu'))\n    global_state['autocast_gpu_dtype'] = (functools.partial(torch.set_autocast_dtype, 'cuda'), torch.get_autocast_dtype('cuda'))\n    global_state['autocast_cpu_dtype'] = (functools.partial(torch.set_autocast_dtype, 'cpu'), torch.get_autocast_dtype('cpu'))\n    global_state['autocast_cache_enabled'] = (torch.set_autocast_cache_enabled, torch.is_autocast_cache_enabled())",
    "docstring": "Saves to out if it is provided. Else saves to the tracing context's global_state.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\output_graph.py",
    "ast_data": "FunctionDef name:save_global_state arg:self arg:out arguments arg arg Assign Call Compare Assign Call Assign Call Call Assign Call Call Assign Call Call Assign Call Call Assign Call"
  },
  {
    "library": "scipy",
    "name": "C",
    "source_code": "@property\ndef C(self):\n    return self._C",
    "docstring": "Output matrix of the system.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:C arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "while_stmt",
    "source_code": "def while_stmt(test, body, get_state, set_state, symbol_names, opts):\n    with func_graph.FuncGraph('tmp').as_default():\n        init_test = test()\n    if tensors.is_dense_tensor(init_test):\n        _tf_while_stmt(test, body, get_state, set_state, symbol_names, opts)\n        return\n    if not init_test:\n        return\n    body()\n    _py_while_stmt(test, body, get_state, set_state, opts)",
    "docstring": "Functional form of a while statement. The loop operates on a so-called state, which includes all symbols that are variant across loop iterations. In what follows we refer to state as either a tuple of entities that represent an actual state, or a list of arguments of the corresponding types. The inputs and outputs of the callables representing the loop blocks are not explicit - instead, these functions must use nonlocal/global for side effects. The inputs and outputs are instead controlled by the set_state/get_state functions. Args: test: Callable with boolean return type. The loop condition. body: Callable representing the actual loop body. get_state: Additional callable which can capture additional state (such as the values of composite symbols). This is only useful when staging the loop. set_state: Additional callable which save values captured by get_state back into the Python environment. This is only useful when staging the loop. symbol_names: Tuple containing the names of all loop variables. opts: Optional dict of extra loop parameters. Returns: Tuple containing the final state.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\control_flow.py",
    "ast_data": "FunctionDef name:while_stmt arg:test arg:body arg:get_state arg:set_state arg:symbol_names arg:opts arguments arg arg arg arg arg arg With Call Call Assign Call If Call Call Return return:no If Return return:no Call Call"
  },
  {
    "library": "tensorflow",
    "name": "TrainOutput",
    "source_code": "class TrainOutput(_SupervisedOutput):\n\n    def _get_signature_def_fn(self):\n        return signature_def_utils.supervised_train_signature_def",
    "docstring": "Represents the output of a supervised training process. This class generates the appropriate signature def for exporting training output by type-checking and wrapping loss, predictions, and metrics values.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\model_utils\\export_output.py",
    "ast_data": "ClassDef name:TrainOutput FunctionDef name:_get_signature_def_fn arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "setup_dom_indexing",
    "source_code": "def setup_dom_indexing(self):\n    prefix = 'i' if self.inside_reduction else 'o'\n    if prefix in self.dom_renames:\n        return self.dom_renames[prefix]\n    renames = {}\n    for var in self.halide_vars.keys():\n        if not self.inside_reduction and var in self.reduction_renames:\n            continue\n        m = re.match('^h(\\\\d+)$', var.name)\n        assert m\n        renames[var] = sympy_index_symbol(f'h{prefix}{m.group(1)}')\n    self.codegen_rdom(f'{prefix}dom', {rv: self.halide_vars[v] for v, rv in renames.items()})\n    self.dom_renames[prefix] = renames\n    return renames",
    "docstring": "RDom based indexing uses explicit iteration ranges for Func updates",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\halide.py",
    "ast_data": "FunctionDef name:setup_dom_indexing arg:self arguments arg Assign If Compare Return return:yes Assign For Call If BoolOp Compare Assign Call Assign Call Call Call Call Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X):\n    check_is_fitted(self, 'categories_')\n    X_int, X_mask = self._transform(X, handle_unknown=self.handle_unknown, ensure_all_finite='allow-nan', ignore_category_indices=self._missing_indices)\n    X_trans = X_int.astype(self.dtype, copy=False)\n    for cat_idx, missing_idx in self._missing_indices.items():\n        X_missing_mask = X_int[:, cat_idx] == missing_idx\n        X_trans[X_missing_mask, cat_idx] = self.encoded_missing_value\n    if self.handle_unknown == 'use_encoded_value':\n        X_trans[~X_mask] = self.unknown_value\n    return X_trans",
    "docstring": "Transform X to ordinal codes. Parameters ---------- X : array-like of shape (n_samples, n_features) The data to encode. Returns ------- X_out : ndarray of shape (n_samples, n_features) Transformed input.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_encoders.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Call Assign Call Assign Call For Call Assign Compare Assign If Compare Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "formfield_for_foreignkey",
    "source_code": "def formfield_for_foreignkey(self, db_field, request, **kwargs):\n    db = kwargs.get('using')\n    if 'widget' not in kwargs:\n        if db_field.name in self.get_autocomplete_fields(request):\n            kwargs['widget'] = AutocompleteSelect(db_field, self.admin_site, using=db)\n        elif db_field.name in self.raw_id_fields:\n            kwargs['widget'] = widgets.ForeignKeyRawIdWidget(db_field.remote_field, self.admin_site, using=db)\n        elif db_field.name in self.radio_fields:\n            kwargs['widget'] = widgets.AdminRadioSelect(attrs={'class': get_ul_class(self.radio_fields[db_field.name])})\n            kwargs['empty_label'] = kwargs.get('empty_label', _('None')) if db_field.blank else None\n    if 'queryset' not in kwargs:\n        queryset = self.get_field_queryset(db, db_field, request)\n        if queryset is not None:\n            kwargs['queryset'] = queryset\n    return db_field.formfield(**kwargs)",
    "docstring": "Get a form Field for a ForeignKey.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:formfield_for_foreignkey arg:self arg:db_field arg:request arguments arg arg arg arg Assign Call If Compare If Compare Call Assign Call If Compare Assign Call If Compare Assign Call Call Assign Call Call If Compare Assign Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, learning_rate, initial_accumulator_value=0.1, l1_regularization_strength=0.0, l2_regularization_strength=0.0, use_locking=False, name='ProximalAdagrad'):\n    if initial_accumulator_value <= 0.0:\n        raise ValueError('initial_accumulator_value must be positive: %s' % initial_accumulator_value)\n    super(ProximalAdagradOptimizer, self).__init__(use_locking, name)\n    self._learning_rate = learning_rate\n    self._initial_accumulator_value = initial_accumulator_value\n    self._l1_regularization_strength = l1_regularization_strength\n    self._l2_regularization_strength = l2_regularization_strength\n    self._l1_regularization_strength_tensor = None\n    self._l2_regularization_strength_tensor = None\n    self._learning_rate_tensor = None",
    "docstring": "Construct a new ProximalAdagrad optimizer. Args: learning_rate: A or a floating point value. The learning rate. initial_accumulator_value: A floating point value. Starting value for the accumulators, must be positive. l1_regularization_strength: A float value, must be greater than or equal to zero. l2_regularization_strength: A float value, must be greater than or equal to zero. use_locking: If use locks for update operations. name: Optional name prefix for the operations created when applying gradients. Defaults to \"Adagrad\". Raises: ValueError: If the is invalid.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\proximal_adagrad.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:learning_rate arg:initial_accumulator_value arg:l1_regularization_strength arg:l2_regularization_strength arg:use_locking arg:name arguments arg arg arg arg arg arg arg If Compare Raise Call Call Call Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "scikit-learn",
    "name": "partial_fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef partial_fit(self, X, y=None):\n    return self",
    "docstring": "Only validates estimator's parameters. This method allows to: (i) validate the estimator's parameters and (ii) be consistent with the scikit-learn transformer API. Parameters ---------- X : ndarray of shape [n_samples, n_features] Training data. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object HashingVectorizer instance.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_extraction\\text.py",
    "ast_data": "FunctionDef name:partial_fit arg:self arg:X arg:y arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "update_from",
    "source_code": "def update_from(self, other):\n    artist.Artist.update_from(self, other)\n    self._antialiaseds = other._antialiaseds\n    self._mapped_colors = other._mapped_colors\n    self._edge_is_mapped = other._edge_is_mapped\n    self._original_edgecolor = other._original_edgecolor\n    self._edgecolors = other._edgecolors\n    self._face_is_mapped = other._face_is_mapped\n    self._original_facecolor = other._original_facecolor\n    self._facecolors = other._facecolors\n    self._linewidths = other._linewidths\n    self._linestyles = other._linestyles\n    self._us_linestyles = other._us_linestyles\n    self._pickradius = other._pickradius\n    self._hatch = other._hatch\n    self._hatchcolors = other._hatchcolors\n    self._A = other._A\n    self.norm = other.norm\n    self.cmap = other.cmap\n    self.stale = True",
    "docstring": "Copy properties from other to self.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:update_from arg:self arg:other arguments arg arg Call Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_create_none_optionals",
    "source_code": "def _create_none_optionals(func_graph, n):\n    with func_graph.as_default():\n        return [gen_optional_ops.optional_none() for _ in range(n)]",
    "docstring": "Creates optionals in func_graph. Args: func_graph: FuncGraph. n: the number of optionals to make. Returns: A list of tensors in func_graph.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\cond_v2.py",
    "ast_data": "FunctionDef name:_create_none_optionals arg:func_graph arg:n arguments arg arg With Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_z",
    "source_code": "def set_z(self, z):\n    self._z = z\n    self.stale = True",
    "docstring": "Set the *z* position of the text. Parameters ---------- z : float",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py",
    "ast_data": "FunctionDef name:set_z arg:self arg:z arguments arg arg Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "node_ctor_inputs",
    "source_code": "def node_ctor_inputs(schema: LazyIrSchema) -> str:\n    node_ctor_values = [node_ctor_arg_rvalue_string(arg) for arg in schema.filtered_args()]\n    return ', '.join(node_ctor_values)",
    "docstring": "Produce a formatted string with the arguments as passed into the constructor of a node class.",
    "type": "function",
    "file_path": "pytorch\\torchgen\\dest\\lazy_ir.py",
    "ast_data": "FunctionDef name:node_ctor_inputs arg:schema arguments arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "true_positives_at_thresholds",
    "source_code": "@tf_export(v1=['metrics.true_positives_at_thresholds'])\ndef true_positives_at_thresholds(labels, predictions, thresholds, weights=None, metrics_collections=None, updates_collections=None, name=None):\n    if context.executing_eagerly():\n        raise RuntimeError('tf.metrics.true_positives_at_thresholds is not supported when eager execution is enabled.')\n    with variable_scope.variable_scope(name, 'true_positives', (predictions, labels, weights)):\n        values, update_ops = _confusion_matrix_at_thresholds(labels, predictions, thresholds, weights=weights, includes=('tp',))\n        tp_value = _aggregate_variable(values['tp'], metrics_collections)\n        if updates_collections:\n            ops.add_to_collections(updates_collections, update_ops['tp'])\n        return (tp_value, update_ops['tp'])",
    "docstring": "Computes true positives at provided threshold values. If is , weights default to 1. Use weights of 0 to mask values. Args: labels: A whose shape matches . Will be cast to . predictions: A floating point of arbitrary shape and whose values are in the range . thresholds: A python list or tuple of float thresholds in . weights: Optional whose rank is either 0, or the same rank as , and must be broadcastable to (i.e., all dimensions must be either , or the same as the corresponding dimension). metrics_collections: An optional list of collections that should be added to. updates_collections: An optional list of collections that should be added to. name: An optional variable_scope name. Returns: true_positives: A float of shape . update_op: An operation that updates the variable and returns its current value. Raises: ValueError: If and have mismatched shapes, or if is not and its shape doesn't match , or if either or are not a list or tuple. RuntimeError: If eager execution is enabled.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\metrics_impl.py",
    "ast_data": "FunctionDef name:true_positives_at_thresholds arg:labels arg:predictions arg:thresholds arg:weights arg:metrics_collections arg:updates_collections arg:name arguments arg arg arg arg arg arg arg If Call Raise Call With Call Assign Call Assign Call If Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "finalize",
    "source_code": "def finalize(self):\n    self.endStream()\n    self._write_annotations()\n    self.writeFonts()\n    self.writeExtGSTates()\n    self._write_soft_mask_groups()\n    self.writeHatches()\n    self.writeGouraudTriangles()\n    xobjects = {name: ob for image, name, ob in self._images.values()}\n    for tup in self.markers.values():\n        xobjects[tup[0]] = tup[1]\n    for name, value in self.multi_byte_charprocs.items():\n        xobjects[name] = value\n    for name, path, trans, ob, join, cap, padding, filled, stroked in self.paths:\n        xobjects[name] = ob\n    self.writeObject(self.XObjectObject, xobjects)\n    self.writeImages()\n    self.writeMarkers()\n    self.writePathCollectionTemplates()\n    self.writeObject(self.pagesObject, {'Type': Name('Pages'), 'Kids': self.pageList, 'Count': len(self.pageList)})\n    self.writeInfoDict()\n    self.writeXref()\n    self.writeTrailer()",
    "docstring": "Write out the various deferred objects and the pdf end matter.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py",
    "ast_data": "FunctionDef name:finalize arg:self arguments arg Call Call Call Call Call Call Call Assign Call For Call Assign For Call Assign For Assign Call Call Call Call Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "close",
    "source_code": "def close(self):\n    with self._not_full:\n        self._closed = True\n        self._not_full.notify_all()",
    "docstring": "Closes the queue, causing any pending or future calls to fail.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\event_file_writer.py",
    "ast_data": "FunctionDef name:close arg:self arguments arg With Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "parse_example_spec",
    "source_code": "@property\ndef parse_example_spec(self):\n    return self.categorical_column.parse_example_spec",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:parse_example_spec arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "normalize_keyword_aggregation",
    "source_code": "def normalize_keyword_aggregation(kwargs: dict) -> tuple[MutableMapping[Hashable, list[AggFuncTypeBase]], tuple[str, ...], npt.NDArray[np.intp]]:\n    from pandas.core.indexes.base import Index\n    aggspec = defaultdict(list)\n    order = []\n    columns = tuple(kwargs.keys())\n    for column, aggfunc in kwargs.values():\n        aggspec[column].append(aggfunc)\n        order.append((column, com.get_callable_name(aggfunc) or aggfunc))\n    uniquified_order = _make_unique_kwarg_list(order)\n    aggspec_order = [(column, com.get_callable_name(aggfunc) or aggfunc) for column, aggfuncs in aggspec.items() for aggfunc in aggfuncs]\n    uniquified_aggspec = _make_unique_kwarg_list(aggspec_order)\n    col_idx_order = Index(uniquified_aggspec).get_indexer(uniquified_order)\n    return (aggspec, columns, col_idx_order)",
    "docstring": "Normalize user-provided \"named aggregation\" kwargs. Transforms from the new `` style kwargs to the old Dict[str, List[scalar]]]. Parameters ---------- kwargs : dict Returns ------- aggspec : dict The transformed kwargs. columns : tuple[str, ...] The user-provided keys. col_idx_order : List[int] List of columns indices. Examples -------- >>> normalize_keyword_aggregation({\"output\": (\"input\", \"sum\")}) (defaultdict(, {'input': ['sum']}), ('output',), array([0]))",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\apply.py",
    "ast_data": "FunctionDef name:normalize_keyword_aggregation arg:kwargs arguments arg Assign Call Assign Assign Call Call For Call Call Call BoolOp Call Assign Call Assign BoolOp Call Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_expand_ellipsis",
    "source_code": "def _expand_ellipsis(key_list, num_remaining_dims):\n    if num_remaining_dims is None:\n        raise ValueError('Ellipsis not supported for unknown shape RaggedTensors')\n    num_indices = sum((1 for idx in key_list if idx is not array_ops.newaxis))\n    if num_indices > num_remaining_dims + 1:\n        raise IndexError('Too many indices for RaggedTensor')\n    elif num_indices == num_remaining_dims + 1:\n        return key_list[1:]\n    else:\n        return [slice(None, None, None)] + key_list",
    "docstring": "Expands the ellipsis at the start of . Assumes that the first element of is Ellipsis. This will either remove the Ellipsis (if it corresponds to zero indices) or prepend a new (if it corresponds to more than zero indices). Args: key_list: The arguments to . num_remaining_dims: The number of dimensions remaining. Returns: A copy of with he ellipsis expanded. Raises: ValueError: If ragged_rank.shape.ndims is None IndexError: If there are too many elements in .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_getitem.py",
    "ast_data": "FunctionDef name:_expand_ellipsis arg:key_list arg:num_remaining_dims arguments arg arg If Compare Raise Call Assign Call Compare If Compare Raise Call If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "device_index",
    "source_code": "class device_index:\n\n    def __init__(self, device: Optional[int], /) -> None:\n        self.idx = device\n        self.prev_idx = -1\n\n    def __enter__(self) -> None:\n        if self.idx is not None:\n            self.prev_idx = torch._C._accelerator_exchangeDevice(self.idx)\n\n    def __exit__(self, *args: object) -> Literal[False]:\n        if self.idx is not None:\n            torch._C._accelerator_maybeExchangeDevice(self.prev_idx)\n        return False",
    "docstring": "Context manager to set the current device index for the current :ref:. Temporarily changes the current device index to the specified value for the duration of the context, and automatically restores the previous device index when exiting the context. Args: device (Optional[int]): a given device index to temporarily set. If None, no device index switching occurs. Examples: >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA) >>> # Set device 0 as the current device temporarily >>> with torch.accelerator.device_index(0): ... # Code here runs with device 0 as the current device ... pass >>> # Original device is now restored >>> # No-op when None is passed >>> with torch.accelerator.device_index(None): ... # No device switching occurs ... pass",
    "type": "class",
    "file_path": "pytorch\\torch\\accelerator\\__init__.py",
    "ast_data": "ClassDef name:device_index FunctionDef name:__init__ arguments arg arg Assign Assign FunctionDef name:__enter__ arg:self arguments arg If Compare Assign Call FunctionDef name:__exit__ arg:self arguments arg arg If Compare Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_window_extent",
    "source_code": "def get_window_extent(self, renderer=None):\n    return self.bbox",
    "docstring": "Return the Axes bounding box in display space. This bounding box does not include the spines, ticks, ticklabels, or other labels. For a bounding box including these elements use . See Also -------- matplotlib.axes.Axes.get_tightbbox matplotlib.axis.Axis.get_tightbbox matplotlib.spines.Spine.get_window_extent",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:get_window_extent arg:self arg:renderer arguments arg arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "ssim_loss",
    "source_code": "def ssim_loss(img1: torch.Tensor, img2: torch.Tensor, window_size: int, max_val: float=1.0, eps: float=1e-12, reduction: str='mean', padding: str='same') -> torch.Tensor:\n    ssim_map: torch.Tensor = metrics.ssim(img1, img2, window_size, max_val, eps, padding)\n    loss = torch.clamp((1.0 - ssim_map) / 2, min=0, max=1)\n    if reduction == 'mean':\n        loss = torch.mean(loss)\n    elif reduction == 'sum':\n        loss = torch.sum(loss)\n    elif reduction == 'none':\n        pass\n    else:\n        raise NotImplementedError('Invalid reduction option.')\n    return loss",
    "docstring": "Compute a loss based on the SSIM measurement. The loss, or the Structural dissimilarity (DSSIM) is described as: .. math:: \\text{loss}(x, y) = \\frac{1 - \\text{SSIM}(x, y)}{2} See :meth: for details about SSIM. Args: img1: the first input image with shape :math:. img2: the second input image with shape :math:. window_size: the size of the gaussian kernel to smooth the images. max_val: the dynamic range of the images. eps: Small value for numerically stability when dividing. reduction : Specifies the reduction to apply to the output: ``. Whether to only use the \"valid\" convolution area to compute SSIM to match the MATLAB implementation of original SSIM paper. Returns: The loss based on the ssim index. Examples: >>> input1 = torch.rand(1, 4, 5, 5) >>> input2 = torch.rand(1, 4, 5, 5) >>> loss = ssim_loss(input1, input2, 5)",
    "type": "function",
    "file_path": "kornia\\kornia\\losses\\ssim.py",
    "ast_data": "FunctionDef name:ssim_loss arg:img1 arg:img2 arg:window_size arg:max_val arg:eps arg:reduction arg:padding arguments arg arg arg arg arg arg arg Call Assign Call If Compare Assign Call If Compare Assign Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "parallel_async",
    "source_code": "def parallel_async(async_iterable: AsyncIterator[_T], count: int, callable: Callable[Concatenate[_T, _P], Deferred[Any] | None], *args: _P.args, **named: _P.kwargs) -> Deferred[list[tuple[bool, Iterator[Deferred[Any]]]]]:\n    coop = Cooperator()\n    work: Iterator[Deferred[Any]] = _AsyncCooperatorAdapter(async_iterable, callable, *args, **named)\n    dl: Deferred[list[tuple[bool, Iterator[Deferred[Any]]]]] = DeferredList([coop.coiterate(work) for _ in range(count)])\n    return dl",
    "docstring": "Like `` but for async iterators",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\defer.py",
    "ast_data": "FunctionDef name:parallel_async arg:async_iterable arg:count arg:callable arguments arg arg arg arg arg Assign Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "UserDefinedListVariable",
    "source_code": "class UserDefinedListVariable(UserDefinedObjectVariable):\n    _nonvar_fields = UserDefinedObjectVariable._nonvar_fields\n\n    def __init__(self, value, list_vt=None, **kwargs):\n        super().__init__(value, **kwargs)\n        self._list_vt = list_vt\n        if self._list_vt is None:\n            assert self.source is None, 'list_vt must be constructed by builder.py when source is present'\n            self._list_vt = variables.ListVariable([], mutation_type=ValueMutationNew())\n\n    def call_method(self, tx, name, args: 'list[VariableTracker]', kwargs: 'dict[str, VariableTracker]') -> 'VariableTracker':\n        assert self._list_vt is not None\n        method = self._maybe_get_baseclass_method(name)\n        if method in list_methods:\n            return self._list_vt.call_method(tx, name, args, kwargs)\n        return super().call_method(tx, name, args, kwargs)\n\n    def unpack_var_sequence(self, tx):\n        assert self._list_vt is not None\n        if type(self.value).__iter__ is list.__iter__:\n            return self._list_vt.unpack_var_sequence(tx)\n        raise NotImplementedError\n\n    def is_underlying_vt_modified(self, side_effects):\n        return side_effects.is_modified(self._list_vt)",
    "docstring": "Represents user defined objects that are subclasses of lists. Internally, it uses a ListVariable to represent the list part of the variable tracker. For everything else, it falls back to UserDefinedObjectVariable.",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\user_defined.py",
    "ast_data": "ClassDef name:UserDefinedListVariable Assign FunctionDef name:__init__ arg:self arg:value arg:list_vt arguments arg arg arg arg Call Call Assign If Compare Compare Assign Call Call FunctionDef name:call_method arg:self arg:tx arg:name arg:args arg:kwargs arguments arg arg arg arg arg Compare Assign Call If Compare Return return:yes Call Return return:yes Call Call FunctionDef name:unpack_var_sequence arg:self arg:tx arguments arg arg Compare If Compare Call Return return:yes Call Raise FunctionDef name:is_underlying_vt_modified arg:self arg:side_effects arguments arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "to_pil",
    "source_code": "def to_pil(self, x: Any) -> Image.Image:\n    if isinstance(x, (Tensor,)):\n        x = x.cpu().detach() * 255\n        if x.dim() == 3:\n            x = x.permute(1, 2, 0)\n            return Image.fromarray(x.byte().numpy())\n        elif x.dim() == 4:\n            x = x.permute(0, 2, 3, 1)\n            return [Image.fromarray(_x.byte().numpy()) for _x in x]\n        else:\n            raise NotImplementedError\n    if isinstance(x, (np.ndarray,)):\n        raise NotImplementedError\n    if isinstance(x, (Image.Image,)):\n        return x\n    raise TypeError('Input type not supported')",
    "docstring": "Convert input to PIL image. Args: x: The input to convert. Returns: Image.Image: The converted PIL image.",
    "type": "method",
    "file_path": "kornia\\kornia\\core\\module.py",
    "ast_data": "FunctionDef name:to_pil arg:self arg:x arguments arg arg If Call Assign Call Call If Compare Call Assign Call Return return:yes Call Call Call If Compare Call Assign Call Return return:yes Call Call Call Raise If Call Raise If Call Return return:yes Raise Call"
  },
  {
    "library": "pandas",
    "name": "__array__",
    "source_code": "def __array__(self, dtype: NpDtype | None=None, copy: bool | None=None) -> np.ndarray:\n    if copy is False:\n        raise ValueError('Unable to avoid copy while creating an array as requested.')\n    elif copy is None:\n        copy = False\n    return self.to_numpy(dtype=dtype, copy=copy)",
    "docstring": "Correctly construct numpy arrays when passed to .",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\arrow\\array.py",
    "ast_data": "FunctionDef name:__array__ arg:self arg:dtype arg:copy arguments arg arg arg If Compare Raise Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "set_dtype_conversion_mode",
    "source_code": "def set_dtype_conversion_mode(dtype_conversion_mode) -> None:\n    global _dtype_conversion_mode\n    _dtype_conversion_mode = _get_promo_mode_enum(dtype_conversion_mode)",
    "docstring": "Enables the specified dtype conversion mode. Args: dtype_conversion_mode: a string that specifies dtype conversion mode. This string corresponds to a PromoMode Enum and can be 'off', 'legacy', 'safe' or 'all'.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:set_dtype_conversion_mode arg:dtype_conversion_mode arguments arg Assign Call"
  },
  {
    "library": "scipy",
    "name": "ifft",
    "source_code": "def ifft(x, n=None, axis=-1, overwrite_x=False):\n    return _pocketfft.ifft(x, n, axis, None, overwrite_x)",
    "docstring": "Return discrete inverse Fourier transform of real or complex sequence. The returned complex array contains `xxnnx` is real, a \"real IFFT\" algorithm is automatically used, which roughly halves the computation time. Examples -------- >>> from scipy.fftpack import fft, ifft >>> import numpy as np >>> x = np.arange(5) >>> np.allclose(ifft(fft(x)), x, atol=1e-15) # within numerical accuracy. True",
    "type": "function",
    "file_path": "scipy\\scipy\\fftpack\\_basic.py",
    "ast_data": "FunctionDef name:ifft arg:x arg:n arg:axis arg:overwrite_x arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "code_with_constants",
    "source_code": "@property\ndef code_with_constants(self):\n    r = self.forward.code_with_constants\n    return (r[0], ConstMap(r[1]))",
    "docstring": "Return a tuple. Returns a tuple of: [0] a pretty-printed representation (as valid Python syntax) of the internal graph for the `codeinspecting-code` for details.",
    "type": "method",
    "file_path": "pytorch\\torch\\jit\\_script.py",
    "ast_data": "FunctionDef name:code_with_constants arg:self arguments arg Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "default_urlconf",
    "source_code": "def default_urlconf(request):\n    with builtin_template_path('default_urlconf.html').open(encoding='utf-8') as fh:\n        t = DEBUG_ENGINE.from_string(fh.read())\n    c = Context({'version': get_docs_version()})\n    return HttpResponse(t.render(c))",
    "docstring": "Create an empty URLconf 404 error response.",
    "type": "function",
    "file_path": "django\\django\\views\\debug.py",
    "ast_data": "FunctionDef name:default_urlconf arg:request arguments arg With Call Call Assign Call Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_env_variable_or_raise",
    "source_code": "def get_env_variable_or_raise(env_name: str) -> str:\n    value = os.environ.get(env_name, None)\n    if value is None:\n        msg = f'Environment variable {env_name} expected, but not set'\n        raise ValueError(msg)\n    return value",
    "docstring": "Tries to retrieve environment variable. Raises `` if no environment variable found. Args: env_name (str): Name of the env variable",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\elastic\\utils\\api.py",
    "ast_data": "FunctionDef name:get_env_variable_or_raise arg:env_name arguments arg Assign Call If Compare Assign Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "BinConstraintD",
    "source_code": "class BinConstraintD(BinaryConstraint):\n\n    def __init__(self, lhs, rhs, op):\n        assert is_algebraic_expression(lhs) or is_dim(lhs) or is_bool_expr(lhs)\n        assert is_algebraic_expression(rhs) or is_dim(rhs) or is_bool_expr(rhs)\n        super().__init__(lhs, rhs, op)\n\n    def __eq__(self, other):\n        return super().__eq__(other)",
    "docstring": "Binary constraints about dimensions",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint.py",
    "ast_data": "ClassDef name:BinConstraintD FunctionDef name:__init__ arg:self arg:lhs arg:rhs arg:op arguments arg arg arg arg BoolOp Call Call Call BoolOp Call Call Call Call Call FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get",
    "source_code": "def get(self):\n    self._wait_and_maybe_error()\n    return self._copy_to_local()",
    "docstring": "Retrieve value with no caching to ensure we get the up-to-date value.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\values.py",
    "ast_data": "FunctionDef name:get arg:self arguments arg Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "start",
    "source_code": "def start():\n    check_error(cudart().cudaProfilerStart())",
    "docstring": "Starts cuda profiler data collection. .. warning:: Raises CudaError in case of it is unable to start the profiler.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\profiler.py",
    "ast_data": "FunctionDef name:start arguments Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "make_gradient_clipnorm_fn",
    "source_code": "def make_gradient_clipnorm_fn(clipnorm):\n    if clipnorm is None:\n        return lambda grads_and_vars: grads_and_vars\n\n    def gradient_clipnorm_fn(grads_and_vars):\n        if isinstance(distribute_lib.get_strategy(), (central_storage_strategy.CentralStorageStrategy, central_storage_strategy.CentralStorageStrategyV1)):\n            raise ValueError('`clipnorm` is not supported with `CenteralStorageStrategy`')\n        clipped_grads_and_vars = [(clip_ops.clip_by_norm(g, clipnorm), v) for g, v in grads_and_vars]\n        return clipped_grads_and_vars\n    return gradient_clipnorm_fn",
    "docstring": "Creates a gradient transformation function for clipping by norm.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\utils.py",
    "ast_data": "FunctionDef name:make_gradient_clipnorm_fn arg:clipnorm arguments arg If Compare Return return:yes arguments arg FunctionDef name:gradient_clipnorm_fn arg:grads_and_vars arguments arg If Call Call Raise Call Assign Call Return return:yes Return return:yes"
  },
  {
    "library": "kornia",
    "name": "RgbToLuv",
    "source_code": "class RgbToLuv(Module):\n    ONNX_DEFAULT_INPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n    ONNX_DEFAULT_OUTPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n\n    def forward(self, image: torch.Tensor) -> torch.Tensor:\n        return rgb_to_luv(image)",
    "docstring": "Convert an image from RGB to Luv. The image data is assumed to be in the range of :math:. Luv color is computed using the D65 illuminant and Observer 2. Returns: Luv version of the image. Shape: - image: :math: - output: :math: Examples: >>> input = torch.rand(2, 3, 4, 5) >>> luv = RgbToLuv() >>> output = luv(input) # 2x3x4x5 Reference: [1] [2] [3]",
    "type": "class",
    "file_path": "kornia\\kornia\\color\\luv.py",
    "ast_data": "ClassDef name:RgbToLuv FunctionDef name:forward arg:self arg:image arguments arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "from_coeffs",
    "source_code": "@classmethod\ndef from_coeffs(cls, w: float, x: float, y: float, z: float) -> 'Quaternion':\n    return cls(tensor([w, x, y, z]))",
    "docstring": "Create a quaternion from the data coefficients. Args: w: a float representing the :math: component. x: a float representing the :math: component. y: a float representing the :math: component. z: a float representing the :math: component. Example: >>> q = Quaternion.from_coeffs(1., 0., 0., 0.) >>> q.data Parameter containing: tensor([1., 0., 0., 0.], requires_grad=True)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\quaternion.py",
    "ast_data": "FunctionDef name:from_coeffs arg:cls arg:w arg:x arg:y arg:z arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "ones_like_v2",
    "source_code": "@tf_export('ones_like', v1=[])\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef ones_like_v2(input, dtype=None, name=None, layout=None):\n    with ops.name_scope(name, 'ones_like', [input]) as name:\n        return array_like_impl(ones, gen_array_ops.ones_like, input, dtype, name, optimize=True, layout=layout)",
    "docstring": "Creates a tensor of all ones that has the same shape as the input. See also . Given a single tensor (), this operation returns a tensor of the same type and shape as with all elements set to 1. Optionally, you can use to specify a new type for the returned tensor. For example: >>> tensor = tf.constant([[1, 2, 3], [4, 5, 6]]) >>> tf.ones_like(tensor) Note that the layout of the input tensor is not preserved if the op is used inside tf.function. To obtain a tensor with the same layout as the input, chain the returned value to a . Args: input: A . dtype: A type for the returned . Must be , , , , , , , , , , , or . name: A name for the operation (optional). layout: Optional, . If provided, the result is a [DTensor]( with the provided layout. Returns: A with all elements set to one.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:ones_like_v2 arg:input arg:dtype arg:name arg:layout arguments arg arg arg arg With Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "_mysql_storage_engine",
    "source_code": "@cached_property\ndef _mysql_storage_engine(self):\n    return self.connection.mysql_server_data['default_storage_engine']",
    "docstring": "Internal method used in Django tests. Don't rely on this from your code",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\mysql\\features.py",
    "ast_data": "FunctionDef name:_mysql_storage_engine arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "ThreadingOptions",
    "source_code": "@deprecation.deprecated_endpoints('data.experimental.ThreadingOptions')\n@tf_export('data.experimental.ThreadingOptions', 'data.ThreadingOptions')\nclass ThreadingOptions(options_lib.OptionsBase):\n    max_intra_op_parallelism = options_lib.create_option(name='max_intra_op_parallelism', ty=int, docstring='If set, it overrides the maximum degree of intra-op parallelism.')\n    private_threadpool_size = options_lib.create_option(name='private_threadpool_size', ty=int, docstring='If set, the dataset will use a private threadpool of the given size. The value 0 can be used to indicate that the threadpool size should be determined at runtime based on the number of available CPU cores.')\n\n    def _to_proto(self):\n        pb = dataset_options_pb2.ThreadingOptions()\n        if self.max_intra_op_parallelism is not None:\n            pb.max_intra_op_parallelism = self.max_intra_op_parallelism\n        if self.private_threadpool_size is not None:\n            pb.private_threadpool_size = self.private_threadpool_size\n        return pb\n\n    def _from_proto(self, pb):\n        if pb.WhichOneof('optional_max_intra_op_parallelism') is not None:\n            self.max_intra_op_parallelism = pb.max_intra_op_parallelism\n        if pb.WhichOneof('optional_private_threadpool_size') is not None:\n            self.private_threadpool_size = pb.private_threadpool_size",
    "docstring": "Represents options for dataset threading. You can set the threading options of a dataset through the property of ; the property is an instance of .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\options.py",
    "ast_data": "ClassDef name:ThreadingOptions Assign Call Assign Call FunctionDef name:_to_proto arg:self arguments arg Assign Call If Compare Assign If Compare Assign Return return:yes FunctionDef name:_from_proto arg:self arg:pb arguments arg arg If Compare Call Assign If Compare Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "mark_dirty",
    "source_code": "@abstractmethod\ndef mark_dirty(self) -> None:\n    pass",
    "docstring": "Mark the local state as dirty.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\dynamic_rendezvous.py",
    "ast_data": "FunctionDef name:mark_dirty arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "__iter__",
    "source_code": "def __iter__(self):\n    return iter(self._variable_list)",
    "docstring": "Return an iterable for accessing the underlying partition Variables.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:__iter__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "output_node",
    "source_code": "def output_node(gm: torch.fx.GraphModule) -> Node:\n    last_node = next(iter(reversed(gm.graph.nodes)))\n    assert last_node.op == 'output'\n    return last_node",
    "docstring": "Get the output node from an FX graph",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\utils.py",
    "ast_data": "FunctionDef name:output_node arg:gm arguments arg Assign Call Call Call Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "step_increment",
    "source_code": "@property\ndef step_increment(self):\n    return self._step_increment",
    "docstring": "The number to increment the step for methods.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\data_adapter.py",
    "ast_data": "FunctionDef name:step_increment arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_fix_start_index",
    "source_code": "def _fix_start_index(index, rank, num_row_partitions):\n    if index < 0:\n        if rank is None:\n            raise ValueError('Rank must be known to use __getitem__ on a negative index.')\n        index = rank + index\n    if index < 0:\n        index = 0\n    if num_row_partitions > 0 and index <= num_row_partitions + 1:\n        return index\n    if index == 0:\n        return index\n    if rank is None:\n        raise ValueError('Rank must be known to use __getitem__ on a large index.')\n    if index >= rank:\n        index = rank\n    return index",
    "docstring": "Slice indexes are always silently truncated.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:_fix_start_index arg:index arg:rank arg:num_row_partitions arguments arg arg arg If Compare If Compare Raise Call Assign If Compare Assign If BoolOp Compare Compare Return return:yes If Compare Return return:yes If Compare Raise Call If Compare Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "get_default",
    "source_code": "def get_default(self):\n    return self._get_default()",
    "docstring": "Return the default value for this field.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\__init__.py",
    "ast_data": "FunctionDef name:get_default arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "camtoworld_vision_to_graphics_Rt",
    "source_code": "def camtoworld_vision_to_graphics_Rt(R: Tensor, t: Tensor) -> tuple[Tensor, Tensor]:\n    KORNIA_CHECK_SHAPE(R, ['B', '3', '3'])\n    KORNIA_CHECK_SHAPE(t, ['B', '3', '1'])\n    mat4x4 = camtoworld_vision_to_graphics_4x4(Rt_to_matrix4x4(R, t))\n    return matrix4x4_to_Rt(mat4x4)",
    "docstring": "Convert graphics coordinate frame (e.g. OpenGL) to vision coordinate frame (e.g. OpenCV.). I.e. flips y and z axis. Graphics convention: [+x, +y, +z] == [right, up, backwards]. Vision convention: [+x, +y, +z] == [right, down, forwards] Args: R: Rotation matrix, :math: t: Translation matrix :math:. Returns: R: Rotation matrix, :math: t: Translation matrix :math:. Example: >>> R, t = torch.eye(3)[None], torch.ones(3).reshape(1, 3, 1) >>> camtoworld_vision_to_graphics_Rt(R, t) (tensor([[[ 1., 0., 0.], [ 0., -1., 0.], [ 0., 0., -1.]]]), tensor([[[1.], [1.], [1.]]]))",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\conversions.py",
    "ast_data": "FunctionDef name:camtoworld_vision_to_graphics_Rt arg:R arg:t arguments arg arg Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "process_response",
    "source_code": "def process_response(self, request, response):\n    if not self._should_update_cache(request, response):\n        return response\n    if response.streaming or response.status_code not in (200, 304):\n        return response\n    if not request.COOKIES and response.cookies and has_vary_header(response, 'Cookie'):\n        return response\n    if 'private' in response.get('Cache-Control', ()):\n        return response\n    timeout = self.page_timeout\n    if timeout is None:\n        timeout = get_max_age(response)\n        if timeout is None:\n            timeout = self.cache_timeout\n        elif timeout == 0:\n            return response\n    patch_response_headers(response, timeout)\n    if timeout and response.status_code == 200:\n        cache_key = learn_cache_key(request, response, timeout, self.key_prefix, cache=self.cache)\n        if hasattr(response, 'render') and callable(response.render):\n            response.add_post_render_callback(lambda r: self.cache.set(cache_key, r, timeout))\n        else:\n            self.cache.set(cache_key, response, timeout)\n    return response",
    "docstring": "Set the cache, if needed.",
    "type": "method",
    "file_path": "django\\django\\middleware\\cache.py",
    "ast_data": "FunctionDef name:process_response arg:self arg:request arg:response arguments arg arg arg If Call Return return:yes If BoolOp Compare Return return:yes If BoolOp Call Return return:yes If Compare Call Return return:yes Assign If Compare Assign Call If Compare Assign If Compare Return return:yes Call If BoolOp Compare Assign Call If BoolOp Call Call Call arguments arg Call Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "set_xticklabels",
    "source_code": "def set_xticklabels(self, labels=None, step=None, **kwargs):\n    for ax in self.axes.flat:\n        curr_ticks = ax.get_xticks()\n        ax.set_xticks(curr_ticks)\n        if labels is None:\n            curr_labels = [label.get_text() for label in ax.get_xticklabels()]\n            if step is not None:\n                xticks = ax.get_xticks()[::step]\n                curr_labels = curr_labels[::step]\n                ax.set_xticks(xticks)\n            ax.set_xticklabels(curr_labels, **kwargs)\n        else:\n            ax.set_xticklabels(labels, **kwargs)\n    return self",
    "docstring": "Set x axis tick labels of the grid.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\axisgrid.py",
    "ast_data": "FunctionDef name:set_xticklabels arg:self arg:labels arg:step arguments arg arg arg arg For Assign Call Call If Compare Assign Call Call If Compare Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, use_locking, name, use_own_namescope_for_non_slot_vars=False):\n    if not name:\n        raise ValueError('Must specify the optimizer name')\n    self._use_locking = use_locking\n    self._name = name\n    self._use_own_namescope_for_non_slot_vars = use_own_namescope_for_non_slot_vars\n    if self._use_own_namescope_for_non_slot_vars:\n        with variable_scope.variable_scope(None, default_name=self._name) as vs:\n            self._non_slot_variable_scope = vs\n    self._slots = {}\n    self._non_slot_dict = {}\n    self._deferred_slot_restorations = {}",
    "docstring": "Create a new Optimizer. This must be called by the constructors of subclasses. Args: use_locking: Bool. If True apply use locks to prevent concurrent updates to variables. name: A non-empty string. The name to use for accumulators created for the optimizer. use_own_namescope_for_non_slot_vars: If True, use a root namescope under self._name for non-slot variables. Raises: ValueError: If name is malformed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\optimizer.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:use_locking arg:name arg:use_own_namescope_for_non_slot_vars arguments arg arg arg arg If Raise Call Assign Assign Assign If With Call Assign Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "is_none_layout",
    "source_code": "def is_none_layout(buf_name: str) -> bool:\n    buf = self.name_to_buf.get(buf_name, None)\n    if buf is None:\n        return False\n    if isinstance(buf.node.layout, NoneLayout):\n        if isinstance(buf.node, ir.MutationOutput) and (real_name := self.mutation_real_name.get(buf_name, None)):\n            return is_none_layout(real_name)\n        return True\n    return False",
    "docstring": "Checks if buf_name is NoneLayout. Buffers with NoneLayout is not allocated so graph partition should not take it as inputs or outputs.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:is_none_layout arg:buf_name arguments arg Assign Call If Compare Return return:yes If Call If BoolOp Call Call Return return:yes Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "AddInnerOp",
    "source_code": "def AddInnerOp(self, op: ops.Operation):\n    if self._outer_context:\n        self._outer_context.AddInnerOp(op)",
    "docstring": "Notifies a scope about an operator added to an inner scope.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:AddInnerOp arg:self arg:op arguments arg arg If Call"
  },
  {
    "library": "pytorch",
    "name": "share_memory_",
    "source_code": "def share_memory_(self):\n    _warn_typed_storage_removal()\n    return self._share_memory_()",
    "docstring": "See :meth:",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:share_memory_ arg:self arguments arg Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "set_fuser_method",
    "source_code": "def set_fuser_method(self, fuser_method: Callable) -> BackendPatternConfig:\n    self.fuser_method = fuser_method\n    return self",
    "docstring": "Set the function that specifies how to fuse this BackendPatternConfig's pattern. The first argument of this function should be , and the rest of the arguments should be the items in the tuple pattern. The return value of this function should be the resulting fused module. For example, the fuser method for the pattern can be: def fuse_linear_relu(is_qat, linear, relu): return torch.ao.nn.intrinsic.LinearReLU(linear, relu) For a more complicated example, see",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\backend_config.py",
    "ast_data": "FunctionDef name:set_fuser_method arg:self arg:fuser_method arguments arg arg Assign Return return:yes"
  },
  {
    "library": "authlib",
    "name": "create_authorization_url",
    "source_code": "async def create_authorization_url(self, redirect_uri=None, **kwargs):\n    metadata = await self.load_server_metadata()\n    authorization_endpoint = self.authorize_url or metadata.get('authorization_endpoint')\n    if not authorization_endpoint:\n        raise RuntimeError('Missing \"authorize_url\" value')\n    if self.authorize_params:\n        kwargs.update(self.authorize_params)\n    async with self._get_oauth_client(**metadata) as client:\n        client.redirect_uri = redirect_uri\n        return self._create_oauth2_authorization_url(client, authorization_endpoint, **kwargs)",
    "docstring": "Generate the authorization url and state for HTTP redirect. :param redirect_uri: Callback or redirect URI for authorization. :param kwargs: Extra parameters to include. :return: dict",
    "type": "method",
    "file_path": "authlib\\authlib\\integrations\\base_client\\async_app.py",
    "ast_data": "AsyncFunctionDef name:create_authorization_url arg:self arg:redirect_uri arguments arg arg arg Assign Call Assign BoolOp Call If Raise Call If Call Call Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "make_dvi",
    "source_code": "@classmethod\ndef make_dvi(cls, tex, fontsize):\n    basefile = cls.get_basefile(tex, fontsize)\n    dvifile = '%s.dvi' % basefile\n    if not os.path.exists(dvifile):\n        texfile = Path(cls.make_tex(tex, fontsize))\n        cwd = Path(dvifile).parent\n        with TemporaryDirectory(dir=cwd) as tmpdir:\n            tmppath = Path(tmpdir)\n            cls._run_checked_subprocess(['latex', '-interaction=nonstopmode', '--halt-on-error', f'--output-directory={tmppath.name}', f'{texfile.name}'], tex, cwd=cwd)\n            (tmppath / Path(dvifile).name).replace(dvifile)\n    return dvifile",
    "docstring": "Generate a dvi file containing latex's layout of tex string. Return the file name.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\texmanager.py",
    "ast_data": "FunctionDef name:make_dvi arg:cls arg:tex arg:fontsize arguments arg arg arg Assign Call Assign If Call Assign Call Call Assign Call With Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "reduce_scatter",
    "source_code": "@_exception_logger\ndef reduce_scatter(output, input_list, op=ReduceOp.SUM, group=None, async_op=False):\n    _check_single_tensor(output, 'output')\n    _check_tensor_list(input_list, 'input_list')\n    _ensure_all_tensors_same_dtype(output, input_list)\n    if _rank_not_in_group(group):\n        _warn_not_in_group('reduce_scatter')\n        return\n    opts = ReduceScatterOptions()\n    opts.reduceOp = op\n    opts.asyncOp = async_op\n    group = group or _get_default_group()\n    work = group.reduce_scatter([output], [input_list], opts)\n    if async_op:\n        return work\n    elif work is not None:\n        work.wait()",
    "docstring": "Reduces, then scatters a list of tensors to all processes in a group. Args: output (Tensor): Output tensor. input_list (list[Tensor]): List of tensors to reduce and scatter. op (optional): One of the values from `` enum. Specifies an operation used for element-wise reductions. group (ProcessGroup, optional): The process group to work on. If None, the default process group will be used. async_op (bool, optional): Whether this op should be an async op. Returns: Async work handle, if async_op is set to True. None, if not async_op or if not part of the group.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:reduce_scatter arg:output arg:input_list arg:op arg:group arg:async_op arguments arg arg arg arg arg Call Call Call If Call Call Return return:no Assign Call Assign Assign Assign BoolOp Call Assign Call If Return return:yes If Compare Call"
  },
  {
    "library": "matplotlib",
    "name": "__call__",
    "source_code": "def __call__(self, x0, y0, width, height, mutation_size):\n    pad = mutation_size * self.pad\n    width = width + 2 * pad\n    height = height + 2 * pad\n    x0, y0 = (x0 - pad, y0 - pad)\n    x1, y1 = (x0 + width, y0 + height)\n    return Path([(x0, y0), (x1, y0), (x1, y1), (x0, y1), (x0 - pad, (y0 + y1) / 2), (x0, y0), (x0, y0)], closed=True)",
    "docstring": "Given the location and size of the box, return the path of the box around it. Rotation is automatically taken care of. Parameters ---------- x0, y0, width, height : float Box location and size. mutation_size : float Reference scale for the mutation, typically the text font size.",
    "type": "method",
    "file_path": "matplotlib\\galleries\\users_explain\\text\\annotations.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:x0 arg:y0 arg:width arg:height arg:mutation_size arguments arg arg arg arg arg arg Assign Assign Assign Assign Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "definition",
    "source_code": "@property\ndef definition(self) -> function_pb2.FunctionDef:\n    return self._bound_context.get_function_def(self.name)",
    "docstring": "Current FunctionDef in the Runtime.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\atomic_function.py",
    "ast_data": "FunctionDef name:definition arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "transform_feature",
    "source_code": "@abc.abstractmethod\ndef transform_feature(self, transformation_cache, state_manager):\n    pass",
    "docstring": "Returns intermediate representation (usually a ). Uses to create an intermediate representation (usually a ) that other feature columns can use. Example usage of : Let's say a Feature column depends on raw feature ('raw') and another (input_fc). To access corresponding s, transformation_cache will be used as follows: Args: transformation_cache: A object to access features. state_manager: A to create / access resources such as lookup tables. Returns: Transformed feature .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2_types.py",
    "ast_data": "FunctionDef name:transform_feature arg:self arg:transformation_cache arg:state_manager arguments arg arg arg"
  },
  {
    "library": "tensorflow",
    "name": "apply_to_tensor",
    "source_code": "def apply_to_tensor(self, tensor, assign_tuple_sharding=False, use_sharding_op=False, unspecified_dims=None):\n    if unspecified_dims:\n        assert use_sharding_op and (not assign_tuple_sharding)\n    proto = self._proto\n    if isinstance(tensor, resource_variable_ops.BaseResourceVariable) and context.xla_sharding_for_resource_variables_enabled():\n        if assign_tuple_sharding:\n            proto = self._create_tuple_proto(num_outputs=1)\n        tensor._set_xla_sharding(proto)\n        return tensor\n    if use_sharding_op:\n        if assign_tuple_sharding:\n            proto = self._create_tuple_proto(num_outputs=1)\n            tensor = tf2xla.sharding(tensor, sharding=proto.SerializeToString())\n        else:\n            tensor = tf2xla.sharding(tensor, sharding=proto.SerializeToString(), unspecified_dims=unspecified_dims or [])\n    elif assign_tuple_sharding or len(tensor.op.outputs) > 1:\n        proto = self._get_or_create_tuple_proto(tensor.op)\n        tuple_shardings = list(proto.tuple_shardings)\n        tuple_shardings[tensor.value_index] = self._proto\n        proto = xla_data_pb2.OpSharding(type=xla_data_pb2.OpSharding.TUPLE, tuple_shardings=tuple_shardings)\n    tensor.op._set_attr('_XlaSharding', attr_value_pb2.AttrValue(s=proto.SerializeToString()))\n    return tensor",
    "docstring": "Applies this Sharding attribute to . Args: tensor: A tf.Tensor to split. assign_tuple_sharding: If the sharding type should be a tuple. use_sharding_op: Whether to create a sharding op on . unspecified_dims: An optional list of dimensions unspecified. Returns: The tensor with Sharding attribute.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\xla\\experimental\\xla_sharding.py",
    "ast_data": "FunctionDef name:apply_to_tensor arg:self arg:tensor arg:assign_tuple_sharding arg:use_sharding_op arg:unspecified_dims arguments arg arg arg arg arg If BoolOp Assign If BoolOp Call Call If Assign Call Call Return return:yes If If Assign Call Assign Call Call Assign Call Call BoolOp If BoolOp Compare Call Assign Call Assign Call Assign Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_raw_feature_as_tensor",
    "source_code": "def _get_raw_feature_as_tensor(self, key):\n    raw_feature = self._features[key]\n    feature_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(raw_feature)\n\n    def expand_dims(input_tensor):\n        if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):\n            return sparse_ops.sparse_reshape(input_tensor, [array_ops.shape(input_tensor)[0], 1])\n        else:\n            return array_ops.expand_dims(input_tensor, -1)\n    rank = feature_tensor.get_shape().ndims\n    if rank is not None:\n        if rank == 0:\n            raise ValueError('Feature (key: {}) cannot have rank 0. Given: {}'.format(key, feature_tensor))\n        return feature_tensor if rank != 1 else expand_dims(feature_tensor)\n    with ops.control_dependencies([check_ops.assert_positive(array_ops.rank(feature_tensor), message='Feature (key: {}) cannot have rank 0. Given: {}'.format(key, feature_tensor))]):\n        return cond.cond(math_ops.equal(1, array_ops.rank(feature_tensor)), lambda: expand_dims(feature_tensor), lambda: feature_tensor)",
    "docstring": "Gets the raw_feature (keyed by ) as . The raw feature is converted to (sparse) tensor and maybe expand dim. For both and , the rank will be expanded (to 2) if the rank is 1. This supports dynamic rank also. For rank 0 raw feature, will error out as it is not supported. Args: key: A key to access the raw feature. Returns: A or . Raises: ValueError: if the raw feature has rank 0.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:_get_raw_feature_as_tensor arg:self arg:key arguments arg arg Assign Assign Call FunctionDef name:expand_dims arg:input_tensor arguments arg If Call Return return:yes Call Call Return return:yes Call Assign Call If Compare If Compare Raise Call Call Return return:yes Compare Call With Call Call Call Call Return return:yes Call Call Call arguments Call arguments"
  },
  {
    "library": "tensorflow",
    "name": "get_float",
    "source_code": "def get_float(self, min_float=_MIN_FLOAT, max_float=_MAX_FLOAT):\n    return self.fdp.ConsumeFloatInRange(min_float, max_float)",
    "docstring": "Consume a float with given constraints. Args: min_float: Minimum allowed float. max_float: Maximum allowed float. Returns: Consumed float based on input bytes and constraints.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\security\\fuzzing\\python_fuzzing.py",
    "ast_data": "FunctionDef name:get_float arg:self arg:min_float arg:max_float arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "clean_tag",
    "source_code": "def clean_tag(name):\n    if name is not None:\n        new_name = _INVALID_TAG_CHARACTERS.sub('_', name)\n        new_name = new_name.lstrip('/')\n        if new_name != name:\n            tf_logging.info('Summary name %s is illegal; using %s instead.' % (name, new_name))\n            name = new_name\n    return name",
    "docstring": "Cleans a tag. Removes illegal characters for instance. Args: name: The original tag name to be processed. Returns: The cleaned tag name.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_op_util.py",
    "ast_data": "FunctionDef name:clean_tag arg:name arguments arg If Compare Assign Call Assign Call If Compare Call Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_patch_raw_predict",
    "source_code": "@contextmanager\ndef _patch_raw_predict(estimator, raw_predictions):\n    orig_raw_predict = estimator._raw_predict\n\n    def _patched_raw_predicts(*args, **kwargs):\n        return raw_predictions\n    estimator._raw_predict = _patched_raw_predicts\n    yield estimator\n    estimator._raw_predict = orig_raw_predict",
    "docstring": "Context manager that patches _raw_predict to return raw_predictions. is typically a precomputed array to avoid redundant state-wise computations fitting with early stopping enabled: in this case is incrementally updated whenever we add a tree to the boosted ensemble. Note: this makes fitting HistGradientBoosting* models inherently non thread safe at fit time. However thread-safety at fit time was never guaranteed nor enforced for scikit-learn estimators in general. Thread-safety at prediction/transform time is another matter as those operations are typically side-effect free and therefore often thread-safe by default for most scikit-learn models and would like to keep it that way. Therefore this context manager should only be used at fit time. TODO: in the future, we could explore the possibility to extend the scorer public API to expose a way to compute vales from raw predictions. That would probably require also making the scorer aware of the inverse link function used by the estimator which is typically private API for now, hence the need for this patching mechanism.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\gradient_boosting.py",
    "ast_data": "FunctionDef name:_patch_raw_predict arg:estimator arg:raw_predictions arguments arg arg Assign FunctionDef name:_patched_raw_predicts arguments arg arg Return return:yes Assign Assign"
  },
  {
    "library": "pygame",
    "name": "_set_visible",
    "source_code": "def _set_visible(self, val):\n    self._visible = val\n    if self.dirty < 2:\n        self.dirty = 1",
    "docstring": "set the visible value (0 or 1) and makes the sprite dirty",
    "type": "method",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:_set_visible arg:self arg:val arguments arg arg Assign If Compare Assign"
  },
  {
    "library": "matplotlib",
    "name": "_premultiplied_argb32_to_unmultiplied_rgba8888",
    "source_code": "def _premultiplied_argb32_to_unmultiplied_rgba8888(buf):\n    rgba = np.take(buf, [2, 1, 0, 3] if sys.byteorder == 'little' else [1, 2, 3, 0], axis=2)\n    rgb = rgba[..., :-1]\n    alpha = rgba[..., -1]\n    mask = alpha != 0\n    for channel in np.rollaxis(rgb, -1):\n        channel[mask] = (channel[mask].astype(int) * 255 + alpha[mask] // 2) // alpha[mask]\n    return rgba",
    "docstring": "Convert a premultiplied ARGB32 buffer to an unmultiplied RGBA8888 buffer.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\cbook.py",
    "ast_data": "FunctionDef name:_premultiplied_argb32_to_unmultiplied_rgba8888 arg:buf arguments arg Assign Call Compare Assign Assign Assign Compare For Call Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "truncate",
    "source_code": "def truncate(self, size):\n    isize = int(size)\n    if isize != size or isize < 1:\n        raise ValueError('size must be a positive integer')\n    if isize >= len(self.coef):\n        coef = self.coef\n    else:\n        coef = self.coef[:isize]\n    return self.__class__(coef, self.domain, self.window, self.symbol)",
    "docstring": "Truncate series to length . Reduce the series to length by discarding the high degree terms. The value of must be a positive integer. This can be useful in least squares where the coefficients of the high degree terms may be very small. Parameters ---------- size : positive int The series is reduced to length by discarding the high degree terms. The value of must be a positive integer. Returns ------- new_series : series New instance of series with truncated coefficients.",
    "type": "method",
    "file_path": "numpy\\numpy\\polynomial\\_polybase.py",
    "ast_data": "FunctionDef name:truncate arg:self arg:size arguments arg arg Assign Call If BoolOp Compare Compare Raise Call If Compare Call Assign Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "gen_ops",
    "source_code": "def gen_ops(self):\n    filtered_instances = list(filter(self.filter_op, ops()))\n    random.seed(-11)\n    chosen_instances = random.sample(filtered_instances, min(len(filtered_instances), config.rocm.ck_tile_max_profiling_configs)) if config.rocm.ck_tile_max_profiling_configs else filtered_instances\n    log.debug('generated %d ck instances after filter: %s', len(chosen_instances), chosen_instances)\n    return chosen_instances",
    "docstring": "Creates a list of instances that match the GEMM operation this template represents. The instances are guaranteed to have the correct layout, dtype and dimension padding for the GEMM input arguments. An instance may invalidate the GEMM configuration at runtime. Such instances will be assigned +inf runtime by the autotune process.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\rocm\\ck_tile_universal_gemm_template.py",
    "ast_data": "FunctionDef name:gen_ops arg:self arguments arg Assign Call Call Call Call Assign Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "merge_from",
    "source_code": "def merge_from(self, other):\n    assert not self.is_final\n    if self.parent is not None:\n        assert other.parent is not None\n        self.parent.merge_from(other.parent)\n    self.isolated_names.update(other.isolated_names)\n    self.read.update(other.read)\n    self.modified.update(other.modified)\n    self.bound.update(other.bound)\n    self.deleted.update(other.deleted)\n    self.annotations.update(other.annotations)\n    self.params.update(other.params)",
    "docstring": "Adds all activity from another scope to this scope.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\static_analysis\\activity.py",
    "ast_data": "FunctionDef name:merge_from arg:self arg:other arguments arg arg If Compare Compare Call Call Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_recreate_default",
    "source_code": "def _recreate_default(self, proto, node_id, deps):\n    factory = {'user_object': lambda: self._recreate_user_object(proto.user_object, node_id), 'function': lambda: self._recreate_function(proto.function, deps), 'bare_concrete_function': functools.partial(self._recreate_bare_concrete_function, proto=proto.bare_concrete_function, dependencies=deps), 'variable': lambda: self._recreate_variable(proto.variable), 'captured_tensor': functools.partial(self._get_tensor_from_fn, proto.captured_tensor)}\n    kind = proto.WhichOneof('kind')\n    if kind not in factory:\n        raise ValueError(f'Unknown SavedObject type: {kind}. Expected one of {list(factory.keys())}.')\n    return factory[kind]()",
    "docstring": "Creates a Python object from a SavedObject protocol buffer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\load.py",
    "ast_data": "FunctionDef name:_recreate_default arg:self arg:proto arg:node_id arg:deps arguments arg arg arg arg Assign arguments Call arguments Call Call arguments Call Call Assign Call If Compare Raise Call Call Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "NewLineStreamHandler",
    "source_code": "class NewLineStreamHandler(logging.StreamHandler['SafeEncodingWriter']):\n\n    def emit(self, record: logging.LogRecord) -> None:\n        try:\n            self.acquire()\n            if getattr(record, 'nonl', False):\n                self.terminator = ''\n            super().emit(record)\n        finally:\n            self.terminator = '\\n'\n            self.release()",
    "docstring": "StreamHandler which switches line terminator by record.nonl flag.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\util\\logging.py",
    "ast_data": "ClassDef name:NewLineStreamHandler FunctionDef name:emit arg:self arg:record arguments arg arg Try Call If Call Assign Call Call Assign Call"
  },
  {
    "library": "cherrypy",
    "name": "NullHandler",
    "source_code": "class NullHandler(logging.Handler):\n\n    def handle(self, record):\n        pass\n\n    def emit(self, record):\n        pass\n\n    def createLock(self):\n        self.lock = None",
    "docstring": "A no-op logging handler to silence the logging.lastResort handler.",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\_cplogging.py",
    "ast_data": "ClassDef name:NullHandler FunctionDef name:handle arg:self arg:record arguments arg arg FunctionDef name:emit arg:self arg:record arguments arg arg FunctionDef name:createLock arg:self arguments arg Assign"
  },
  {
    "library": "kornia",
    "name": "unproject",
    "source_code": "def unproject(self, point_2d: Tensor, depth: Tensor) -> Tensor:\n    P = self.intrinsics @ self.extrinsics\n    P_inv = _torch_inverse_cast(P)\n    return transform_points(P_inv, convert_points_to_homogeneous(point_2d) * depth)",
    "docstring": "Unproject a 2d point in 3d. Transform coordinates in the pixel frame to the world frame. Args: point_2d: tensor containing the 2d to be projected to world coordinates. The shape of the tensor can be :math:. depth: tensor containing the depth value of each 2d points. The tensor shape must be equal to point2d :math:. normalize: whether to normalize the pointcloud. This must be set to when the depth is represented as the Euclidean ray length from the camera position. Returns: tensor of (x, y, z) world coordinates with shape :math:. Example: >>> _ = torch.manual_seed(0) >>> x = torch.rand(1, 2) >>> depth = torch.ones(1, 1) >>> K = torch.eye(4)[None] >>> E = torch.eye(4)[None] >>> h = torch.ones(1) >>> w = torch.ones(1) >>> pinhole = kornia.geometry.camera.PinholeCamera(K, E, h, w) >>> pinhole.unproject(x, depth) tensor([[0.4963, 0.7682, 1.0000]])",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\camera\\pinhole.py",
    "ast_data": "FunctionDef name:unproject arg:self arg:point_2d arg:depth arguments arg arg arg Assign Assign Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "migrate",
    "source_code": "def migrate(self, targets, plan=None, state=None, fake=False, fake_initial=False):\n    if plan == []:\n        if not self.recorder.has_table():\n            return self._create_project_state(with_applied_migrations=False)\n    else:\n        self.recorder.ensure_schema()\n    if plan is None:\n        plan = self.migration_plan(targets)\n    full_plan = self.migration_plan(self.loader.graph.leaf_nodes(), clean_start=True)\n    all_forwards = all((not backwards for mig, backwards in plan))\n    all_backwards = all((backwards for mig, backwards in plan))\n    if not plan:\n        if state is None:\n            state = self._create_project_state(with_applied_migrations=True)\n    elif all_forwards == all_backwards:\n        raise InvalidMigrationPlan('Migration plans with both forwards and backwards migrations are not supported. Please split your migration process into separate plans of only forwards OR backwards migrations.', plan)\n    elif all_forwards:\n        if state is None:\n            state = self._create_project_state(with_applied_migrations=True)\n        state = self._migrate_all_forwards(state, plan, full_plan, fake=fake, fake_initial=fake_initial)\n    else:\n        state = self._migrate_all_backwards(plan, full_plan, fake=fake)\n    self.check_replacements()\n    return state",
    "docstring": "Migrate the database up to the given targets. Django first needs to create all project states before a migration is (un)applied and in a second step run all the database operations.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\executor.py",
    "ast_data": "FunctionDef name:migrate arg:self arg:targets arg:plan arg:state arg:fake arg:fake_initial arguments arg arg arg arg arg arg If Compare If Call Return return:yes Call Call If Compare Assign Call Assign Call Call Assign Call Assign Call If If Compare Assign Call If Compare Raise Call If If Compare Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "variable_scope",
    "source_code": "@property\ndef variable_scope(self):\n    return self._variable_scope",
    "docstring": "Returns the variable scope object created by this Template.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\template.py",
    "ast_data": "FunctionDef name:variable_scope arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_x",
    "source_code": "def get_x(self):\n    return self._x",
    "docstring": "Return the left coord of the rectangle.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_x arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "list",
    "source_code": "def list(self, ignore_patterns):\n    for storage in self.storages.values():\n        if storage.exists(''):\n            for path in utils.get_files(storage, ignore_patterns):\n                yield (path, storage)",
    "docstring": "List all files in all app storages.",
    "type": "method",
    "file_path": "django\\django\\contrib\\staticfiles\\finders.py",
    "ast_data": "FunctionDef name:list arg:self arg:ignore_patterns arguments arg arg For Call If Call For Call"
  },
  {
    "library": "pandas",
    "name": "unpack_zerodim_and_defer",
    "source_code": "def unpack_zerodim_and_defer(name: str) -> Callable[[F], F]:\n\n    def wrapper(method: F) -> F:\n        return _unpack_zerodim_and_defer(method, name)\n    return wrapper",
    "docstring": "Boilerplate for pandas conventions in arithmetic and comparison methods. Parameters ---------- name : str Returns ------- decorator",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\ops\\common.py",
    "ast_data": "FunctionDef name:unpack_zerodim_and_defer arg:name arguments arg FunctionDef name:wrapper arg:method arguments arg Return return:yes Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "random",
    "source_code": "@classmethod\ndef random(cls, batch_size: Optional[int]=None, device: Optional[Device]=None, dtype: Dtype=None) -> Se3:\n    shape: tuple[int, ...]\n    if batch_size is None:\n        shape = ()\n    else:\n        KORNIA_CHECK(batch_size >= 1, msg='batch_size must be positive')\n        shape = (batch_size,)\n    r = So3.random(batch_size, device, dtype)\n    t = Vector3.random(shape, device, dtype)\n    return cls(r, t)",
    "docstring": "Create a Se3 group representing a random transformation. Args: batch_size: the batch size of the underlying data. device: device to place the result on. dtype: dtype of the result. Example: >>> s = Se3.random() >>> s = Se3.random(batch_size=3)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\se3.py",
    "ast_data": "FunctionDef name:random arg:cls arg:batch_size arg:device arg:dtype arguments arg arg arg arg If Compare Assign Call Compare Assign Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "plaintext_signature",
    "source_code": "def plaintext_signature(client_secret, token_secret):\n    signature = escape(client_secret or '')\n    signature += '&'\n    signature += escape(token_secret or '')\n    return signature",
    "docstring": "Generate signature via PLAINTEXT method, per _. The \"PLAINTEXT\" method does not employ a signature algorithm. It MUST be used with a transport-layer mechanism such as TLS or SSL (or sent over a secure channel with equivalent protections). It does not utilize the signature base string or the \"oauth_timestamp\" and \"oauth_nonce\" parameters. .. _:",
    "type": "function",
    "file_path": "authlib\\authlib\\oauth1\\rfc5849\\signature.py",
    "ast_data": "FunctionDef name:plaintext_signature arg:client_secret arg:token_secret arguments arg arg Assign Call BoolOp Call BoolOp Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "format_as_single_line",
    "source_code": "def format_as_single_line(self, prefix=None, divider=' | ', enabled_item_attrs=None, disabled_item_attrs=None):\n    if enabled_item_attrs is not None and (not isinstance(enabled_item_attrs, list)):\n        enabled_item_attrs = [enabled_item_attrs]\n    if disabled_item_attrs is not None and (not isinstance(disabled_item_attrs, list)):\n        disabled_item_attrs = [disabled_item_attrs]\n    menu_line = prefix if prefix is not None else ''\n    attr_segs = []\n    for item in self._items:\n        menu_line += item.caption\n        item_name_begin = len(menu_line) - len(item.caption)\n        if item.is_enabled():\n            final_attrs = [item]\n            if enabled_item_attrs:\n                final_attrs.extend(enabled_item_attrs)\n            attr_segs.append((item_name_begin, len(menu_line), final_attrs))\n        elif disabled_item_attrs:\n            attr_segs.append((item_name_begin, len(menu_line), disabled_item_attrs))\n        menu_line += divider\n    return RichTextLines(menu_line, font_attr_segs={0: attr_segs})",
    "docstring": "Format the menu as a single-line RichTextLines object. Args: prefix: (str) String added to the beginning of the line. divider: (str) The dividing string between the menu items. enabled_item_attrs: (list or str) Attributes applied to each enabled menu item, e.g., [\"bold\", \"underline\"]. disabled_item_attrs: (list or str) Attributes applied to each disabled menu item, e.g., [\"red\"]. Returns: (RichTextLines) A single-line output representing the menu, with font_attr_segs marking the individual menu items.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py",
    "ast_data": "FunctionDef name:format_as_single_line arg:self arg:prefix arg:divider arg:enabled_item_attrs arg:disabled_item_attrs arguments arg arg arg arg arg If BoolOp Compare Call Assign If BoolOp Compare Call Assign Assign Compare Assign For Assign Call Call If Call Assign If Call Call Call If Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_write_cells",
    "source_code": "def _write_cells(self, cells, sheet_name: str | None=None, startrow: int=0, startcol: int=0, freeze_panes: tuple[int, int] | None=None) -> None:\n    raise NotImplementedError",
    "docstring": "Write given formatted cells into Excel an excel sheet Parameters ---------- cells : generator cell of formatted data to save to Excel sheet sheet_name : str, default None Name of Excel sheet, if None, then use self.cur_sheet startrow : upper left cell row to dump data frame startcol : upper left cell column to dump data frame freeze_panes: int tuple of length 2 contains the bottom-most row and right-most column to freeze",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\excel\\_base.py",
    "ast_data": "FunctionDef name:_write_cells arg:self arg:cells arg:sheet_name arg:startrow arg:startcol arg:freeze_panes arguments arg arg arg arg arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "ensure_initialized",
    "source_code": "def ensure_initialized():\n    context().ensure_initialized()",
    "docstring": "Initialize the context.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:ensure_initialized arguments Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_color",
    "source_code": "def get_color(self):\n    return self.get_colors()[0]",
    "docstring": "Return the color of the lines used to mark each event.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:get_color arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "forward",
    "source_code": "@staticmethod\ndef forward(ctx, input, mask):\n    ctx.save_for_backward(mask)\n    if mask is not None:\n        ctx.mark_non_differentiable(mask)\n    return helper(input, mask)",
    "docstring": "Return input with masked-out elements eliminated for the given operations.",
    "type": "method",
    "file_path": "pytorch\\torch\\masked\\_ops.py",
    "ast_data": "FunctionDef name:forward arg:ctx arg:input arg:mask arguments arg arg arg Call If Compare Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "kl_divergence",
    "source_code": "@dispatch.add_dispatch_support\ndef kl_divergence(y_true, y_pred):\n    y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)\n    y_true = math_ops.cast(y_true, y_pred.dtype)\n    y_true = backend.clip(y_true, backend.epsilon(), 1)\n    y_pred = backend.clip(y_pred, backend.epsilon(), 1)\n    return math_ops.reduce_sum(y_true * math_ops.log(y_true / y_pred), axis=-1)",
    "docstring": "Computes Kullback-Leibler divergence loss between and . See: Standalone usage: >>> y_true = np.random.randint(0, 2, size=(2, 3)).astype(np.float64) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.kullback_leibler_divergence(y_true, y_pred) >>> assert loss.shape == (2,) >>> y_true = tf.keras.backend.clip(y_true, 1e-7, 1) >>> y_pred = tf.keras.backend.clip(y_pred, 1e-7, 1) >>> assert np.array_equal( ... loss.numpy(), np.sum(y_true * np.log(y_true / y_pred), axis=-1)) Args: y_true: Tensor of true targets. y_pred: Tensor of predicted targets. Returns: A with loss. Raises: TypeError: If cannot be cast to the .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py",
    "ast_data": "FunctionDef name:kl_divergence arg:y_true arg:y_pred arguments arg arg Assign Call Assign Call Assign Call Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_SwitchableDateConverter",
    "source_code": "class _SwitchableDateConverter:\n\n    @staticmethod\n    def _get_converter():\n        converter_cls = {'concise': ConciseDateConverter, 'auto': DateConverter}[mpl.rcParams['date.converter']]\n        interval_multiples = mpl.rcParams['date.interval_multiples']\n        return converter_cls(interval_multiples=interval_multiples)\n\n    def axisinfo(self, *args, **kwargs):\n        return self._get_converter().axisinfo(*args, **kwargs)\n\n    def default_units(self, *args, **kwargs):\n        return self._get_converter().default_units(*args, **kwargs)\n\n    def convert(self, *args, **kwargs):\n        return self._get_converter().convert(*args, **kwargs)",
    "docstring": "Helper converter-like object that generates and dispatches to temporary ConciseDateConverter or DateConverter instances based on :rc: and :rc:.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\dates.py",
    "ast_data": "ClassDef name:_SwitchableDateConverter FunctionDef name:_get_converter arguments Assign Assign Return return:yes Call FunctionDef name:axisinfo arg:self arguments arg arg arg Return return:yes Call Call FunctionDef name:default_units arg:self arguments arg arg arg Return return:yes Call Call FunctionDef name:convert arg:self arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "convert_to_strided_representation",
    "source_code": "def convert_to_strided_representation(args):\n    if not isinstance(args, (list, tuple)):\n        args = (args,)\n    new_args: list[Any] = []\n    for obj in args:\n        if isinstance(obj, torch.Tensor) and obj.requires_grad and (obj.layout in sparse_layouts):\n            d = dict(layout=obj.layout, shape=obj.shape)\n            if not masked:\n                batch_dim = obj.ndim - obj.dense_dim() - obj.sparse_dim()\n                blocksize = obj.values().shape[batch_dim + 1:batch_dim + 3] if obj.layout in sparse_block_layouts else None\n                full_mask = torch.ones(obj.shape, device=obj.device, dtype=torch.bool).to_sparse(layout=obj.layout, blocksize=blocksize, dense_dim=obj.dense_dim())\n                obj = obj.to_dense().sparse_mask(full_mask)\n            if obj.layout is torch.sparse_coo:\n                d.update(indices=obj._indices(), is_coalesced=obj.is_coalesced())\n                values = obj._values()\n            elif obj.layout in {torch.sparse_csr, torch.sparse_bsr}:\n                d.update(compressed_indices=obj.crow_indices(), plain_indices=obj.col_indices())\n                values = obj.values()\n            else:\n                d.update(compressed_indices=obj.ccol_indices(), plain_indices=obj.row_indices())\n                values = obj.values()\n            new_args.extend((STRIDED_REPRESENTATION, d, values.requires_grad_(True)))\n        else:\n            new_args.append(obj)\n    return tuple(new_args)",
    "docstring": "Convert differentiable non-strided tensors to a representation containing differentiable strided tensors.",
    "type": "function",
    "file_path": "pytorch\\torch\\sparse\\__init__.py",
    "ast_data": "FunctionDef name:convert_to_strided_representation arg:args arguments arg If Call Assign For If BoolOp Call Compare Assign Call If Assign Call Call Assign Compare Call Assign Call Call Call Assign Call Call If Compare Call Call Call Assign Call If Compare Call Call Call Assign Call Call Call Call Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "extract_arguments",
    "source_code": "def extract_arguments(start, string):\n    arguments = []\n    closures = {'<': 0, '(': 0}\n    current_position = start\n    argument_start_pos = current_position + 1\n    while current_position < len(string):\n        if string[current_position] == '(':\n            closures['('] += 1\n        elif string[current_position] == ')':\n            closures['('] -= 1\n        elif string[current_position] == '<':\n            closures['<'] += 1\n        elif string[current_position] == '>' and string[current_position - 1] != '-' and (closures['<'] > 0):\n            closures['<'] -= 1\n        if closures['('] == 0 and closures['<'] == 0:\n            arguments.append({'start': argument_start_pos, 'end': current_position})\n            break\n        if closures['('] == 1 and closures['<'] == 0 and (string[current_position] == ','):\n            arguments.append({'start': argument_start_pos, 'end': current_position})\n            argument_start_pos = current_position + 1\n        current_position += 1\n    return arguments",
    "docstring": "Return the list of arguments in the upcoming function parameter closure. Example: string (input): '(blocks, threads, 0, THCState_getCurrentStream(state))' arguments (output): '[{'start': 1, 'end': 7}, {'start': 8, 'end': 16}, {'start': 17, 'end': 19}, {'start': 20, 'end': 53}]'",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\hipify\\hipify_python.py",
    "ast_data": "FunctionDef name:extract_arguments arg:start arg:string arguments arg arg Assign Assign Assign Assign While Compare Call If Compare If Compare If Compare If BoolOp Compare Compare Compare If BoolOp Compare Compare Call If BoolOp Compare Compare Compare Call Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_process_size_shape",
    "source_code": "@staticmethod\ndef _process_size_shape(size, r, c):\n    shape = (len(r), len(c))\n    if size is None:\n        return (1, shape)\n    size = np.atleast_1d(size)\n    if not np.issubdtype(size.dtype, np.integer) or np.any(size < 0):\n        raise ValueError('`size` must be a non-negative integer or `None`')\n    return (np.prod(size), tuple(size) + shape)",
    "docstring": "Compute the number of samples to be drawn and the shape of the output",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:_process_size_shape arg:size arg:r arg:c arguments arg arg arg Assign Call Call If Compare Return return:yes Assign Call If BoolOp Call Call Compare Raise Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "memory_cached",
    "source_code": "@deprecated('`torch.cuda.memory_cached` has been renamed to `torch.cuda.memory_reserved`', category=FutureWarning)\ndef memory_cached(device: 'Device'=None) -> int:\n    return memory_reserved(device=device)",
    "docstring": "Deprecated; see :func:.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\memory.py",
    "ast_data": "FunctionDef name:memory_cached arg:device arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "update_mem_consumed",
    "source_code": "def update_mem_consumed(self, st: torch.UntypedStorage) -> int:\n    if st.size() != self.size:\n        self.size = st.size()\n        self.mem_consumed = self._calculate_mem_consumed()\n    return self.mem_consumed",
    "docstring": "Updates and returns the memory consumed if the storage size has changed. Args: st (torch.UntypedStorage): The tensor storage to check for size updates. Returns: int: The updated memory consumed in bytes.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_tools\\mem_tracker.py",
    "ast_data": "FunctionDef name:update_mem_consumed arg:self arg:st arguments arg arg If Compare Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "dynamic_counter",
    "source_code": "@property\ndef dynamic_counter(self):\n    if isinstance(self._loss_scale, _DynamicLossScaleState):\n        return self._loss_scale.counter\n    else:\n        return None",
    "docstring": "The number of steps since the loss scale was last increased or decreased. This is None if is False. The counter is incremented every step. Once it reaches , the loss scale will be doubled and the counter will be reset back to zero. If nonfinite gradients are encountered, the loss scale will be halved and the counter will be reset back to zero.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\loss_scale_optimizer.py",
    "ast_data": "FunctionDef name:dynamic_counter arg:self arguments arg If Call Return return:yes Return return:no"
  },
  {
    "library": "django",
    "name": "rel_db_type",
    "source_code": "def rel_db_type(self, connection):\n    if connection.features.related_fields_match_type:\n        return self.db_type(connection)\n    else:\n        return self.integer_field_class().db_type(connection=connection)",
    "docstring": "Return the data type that a related field pointing to this field should use. In most cases, a foreign key pointing to a positive integer primary key will have an integer column data type but some databases (e.g. MySQL) have an unsigned integer type. In that case (related_fields_match_type=True), the primary key should return its db_type.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\__init__.py",
    "ast_data": "FunctionDef name:rel_db_type arg:self arg:connection arguments arg arg If Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "merge_consecutive_inputs",
    "source_code": "def merge_consecutive_inputs(self, inputs: list[Union[torch.fx.Node, int]]) -> list[Union[torch.fx.Node, _Range]]:\n    merged_ranges = []\n    cur_range = None\n    for input_ in inputs:\n        if isinstance(input_, int):\n            if not cur_range:\n                cur_range = [input_, input_]\n            elif input_ == cur_range[1] + 1:\n                cur_range[1] += 1\n            else:\n                merged_ranges.append(tuple(cur_range))\n                cur_range = [input_, input_]\n        else:\n            if cur_range:\n                merged_ranges.append(tuple(cur_range))\n                cur_range = None\n            merged_ranges.append(input_)\n    if cur_range:\n        merged_ranges.append(tuple(cur_range))\n    return merged_ranges",
    "docstring": "Merge consecutive inputs going into a user node. For e.g. [arg0, 0, 1, 2, arg1] -> [arg0, (0, 2), arg1]",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\split_cat.py",
    "ast_data": "FunctionDef name:merge_consecutive_inputs arg:self arg:inputs arguments arg arg Assign Assign For If Call If Assign If Compare Call Call Assign If Call Call Assign Call If Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "mapparms",
    "source_code": "def mapparms(self):\n    return pu.mapparms(self.domain, self.window)",
    "docstring": "Return the mapping parameters. The returned values define a linear map `` is defined by the equations:: L(l1) = l2 L(r1) = r2",
    "type": "method",
    "file_path": "numpy\\numpy\\polynomial\\_polybase.py",
    "ast_data": "FunctionDef name:mapparms arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "forward",
    "source_code": "def forward(self, input: Tensor, output_size: Optional[list[int]]=None) -> Tensor:\n    if self.padding_mode != 'zeros':\n        raise ValueError('Only `zeros` padding mode is supported for ConvTranspose2d')\n    assert isinstance(self.padding, tuple)\n    num_spatial_dims = 2\n    output_padding = self._output_padding(input, output_size, self.stride, self.padding, self.kernel_size, num_spatial_dims, self.dilation)\n    return F.conv_transpose2d(input, self.weight, self.bias, self.stride, self.padding, output_padding, self.groups, self.dilation)",
    "docstring": "Performs the forward pass. Attributes: input (Tensor): The input tensor. output_size (list[int], optional): A list of integers representing the size of the output tensor. Default is None.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\conv.py",
    "ast_data": "FunctionDef name:forward arg:self arg:input arg:output_size arguments arg arg arg If Compare Raise Call Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "to_cpu",
    "source_code": "@contextlib.contextmanager\ndef to_cpu(self):\n    self._check_sharded_strategy()\n    _p_assert(self.flat_param.size() == self.flat_param._unpadded_unsharded_size, f'Expects size {self.flat_param._unpadded_unsharded_size} but got {self.flat_param.size()}')\n    self._check_on_compute_device(self.flat_param)\n    _p_assert(_same_storage(self.flat_param, self._get_padded_unsharded_flat_param()), 'Expects the unpadded parameter to be a view into the padded parameter')\n    self.flat_param_to(torch.device('cpu'))\n    self._free_unsharded_flat_param()\n    try:\n        yield\n    finally:\n        _p_assert(self.flat_param.size() == self.flat_param._unpadded_unsharded_size, f'Expects size {self.flat_param._unpadded_unsharded_size} but got {self.flat_param.size()}')\n        padded_unsharded_flat_param = self._alloc_padded_unsharded_flat_param()\n        padded_unsharded_flat_param[:self.flat_param.numel()].copy_(self.flat_param)\n        self._use_unsharded_flat_param(padded_unsharded_flat_param)",
    "docstring": "Move the unpadded unsharded flat parameter to CPU while in the context and moves it back to the previous device upon exit. For now, this assumes the `` 's data is the unpadded unsharded flat parameter on the compute device, and the handle uses a sharded strategy. Postcondition: Same as the precondition.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py",
    "ast_data": "FunctionDef name:to_cpu arg:self arguments arg Call Call Compare Call Call Call Call Call Call Call Call Call Try Call Compare Call Call Assign Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "index_expr",
    "source_code": "def index_expr(self, expr: sympy.Expr, dtype: torch.dtype) -> T:\n    raise NotImplementedError",
    "docstring": "Converts a sympy expression into a scalar of type dtype. expr is typically an indexing expression, thus the name; however, it can also be used in non-indexing situations.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ops_handler.py",
    "ast_data": "FunctionDef name:index_expr arg:self arg:expr arg:dtype arguments arg arg arg Raise"
  },
  {
    "library": "sphinx",
    "name": "dump",
    "source_code": "def dump(self, stream: _WritableStream[str] | _WritableStream[bytes], format: Any) -> None:\n    if isinstance(format, str):\n        format = self.formats[format]\n    format.dump(self.freeze(), stream)",
    "docstring": "Dump the frozen index to a stream.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\search\\__init__.py",
    "ast_data": "FunctionDef name:dump arg:self arg:stream arg:format arguments arg arg arg If Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "reshape_inference_rule",
    "source_code": "@register_inference_rule(torch.reshape)\ndef reshape_inference_rule(n: Node):\n    assert isinstance(n.args[0], Node)\n    t1 = n.args[0].type\n    assert isinstance(n.args[1], list)\n    t2 = n.args[1]\n    t2_type = TensorType([Dyn if elem == -1 else elem for elem in t2])\n    if t1 == Dyn:\n        n.type = t2_type\n        return t2_type\n    elif isinstance(t1, TensorType):\n        assert isinstance(t1, TensorType)\n        a = [e if e != Dyn else 1 for e in t1.__args__]\n        p1 = reduce(operator.mul, a)\n        p2 = reduce(operator.mul, t2)\n        if p1 % p2 == 0 or p2 % p1 == 0:\n            n.type = t2_type\n            return t2_type\n        else:\n            raise TypeError(f'Cannot reshape in node {n} from {t1} to {t2_type}')\n    else:\n        raise TypeError(f'Cannot reshape in node {n} from {t1} to {t2_type}')",
    "docstring": "Without dynamism, the rule checks that the product of the elements of the argument tensor type is equal to the product of the elements of the required shape. We gradualize this rule by adding a case to handle fully dynamic input as well as input where some of the tensor dimensions are unknown. In this case we check for divisibility",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\graph_gradual_typechecker.py",
    "ast_data": "FunctionDef name:reshape_inference_rule arg:n arguments arg Call Assign Call Assign Assign Call Compare If Compare Assign Return return:yes If Call Call Assign Compare Assign Call Assign Call If BoolOp Compare Compare Assign Return return:yes Raise Call Raise Call Call"
  },
  {
    "library": "cherrypy",
    "name": "delete",
    "source_code": "def delete(self):\n    uri = cherrypy.url(qs=cherrypy.serving.request.query_string)\n    self.store.pop(uri, None)",
    "docstring": "Remove ALL cached variants of the current resource.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\caching.py",
    "ast_data": "FunctionDef name:delete arg:self arguments arg Assign Call Call"
  },
  {
    "library": "cryptography",
    "name": "key_size",
    "source_code": "@property\n@abc.abstractmethod\ndef key_size(self) -> int:\n    pass",
    "docstring": "The bit length of the prime modulus.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\dh.py",
    "ast_data": "FunctionDef name:key_size arg:self arguments arg"
  },
  {
    "library": "django",
    "name": "pbkdf2",
    "source_code": "def pbkdf2(password, salt, iterations, dklen=0, digest=None):\n    if digest is None:\n        digest = hashlib.sha256\n    dklen = dklen or None\n    password = force_bytes(password)\n    salt = force_bytes(salt)\n    return hashlib.pbkdf2_hmac(digest().name, password, salt, iterations, dklen)",
    "docstring": "Return the hash of password using pbkdf2.",
    "type": "function",
    "file_path": "django\\django\\utils\\crypto.py",
    "ast_data": "FunctionDef name:pbkdf2 arg:password arg:salt arg:iterations arg:dklen arg:digest arguments arg arg arg arg arg If Compare Assign Assign BoolOp Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_op_to_colocate_with",
    "source_code": "def _op_to_colocate_with(v, graph) -> tuple[Optional[Operation], Optional[Callable[[], None]]]:\n    if v is None:\n        return (None, None)\n    if isinstance(v, Operation):\n        return (v, None)\n    if hasattr(v, 'handle') and isinstance(v.handle, tensor_lib.Tensor):\n        device_only_candidate = lambda: None\n        device_only_candidate.device = v.device\n        device_only_candidate.name = v.name\n        if graph.building_function:\n            return (graph.capture(v.handle).op, device_only_candidate)\n        else:\n            return (v.handle.op, device_only_candidate)\n    if isinstance(v, EagerTensor) and (not context.executing_eagerly()):\n        return (convert_to_tensor(v, as_ref=True).op, None)\n    elif isinstance(v, internal.NativeObject):\n        return (v.op, None)\n    else:\n        return (convert_to_tensor(v, as_ref=True).op, None)",
    "docstring": "Operation object corresponding to v to use for colocation constraints.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_op_to_colocate_with arg:v arg:graph arguments arg arg If Compare Return return:no If Call Return return:yes If BoolOp Call Call Assign arguments Assign Assign If Return return:yes Call Return return:yes If BoolOp Call Call Return return:yes Call If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "dtype",
    "source_code": "@property\ndef dtype(self):\n    return self._dtype",
    "docstring": "The of elements in this tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor.py",
    "ast_data": "FunctionDef name:dtype arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "CirclePolygon",
    "source_code": "class CirclePolygon(RegularPolygon):\n\n    def __str__(self):\n        s = 'CirclePolygon((%g, %g), radius=%g, resolution=%d)'\n        return s % (self.xy[0], self.xy[1], self.radius, self.numvertices)\n\n    @_docstring.interpd\n    def __init__(self, xy, radius=5, *, resolution=20, **kwargs):\n        super().__init__(xy, resolution, radius=radius, orientation=0, **kwargs)",
    "docstring": "A polygon-approximation of a circle patch.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "ClassDef name:CirclePolygon FunctionDef name:__str__ arg:self arguments arg Assign Return return:yes FunctionDef name:__init__ arg:self arg:xy arg:radius arguments arg arg arg arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "use_tfrt",
    "source_code": "@use_tfrt.setter\ndef use_tfrt(self, tfrt):\n    if not isinstance(tfrt, bool):\n        raise ValueError('Expecting a boolean but got %s' % type(tfrt))\n    if self._use_tfrt != tfrt:\n        if self._initialized:\n            raise ValueError('use_tfrt should be set before being initialized.')\n        self._use_tfrt = tfrt",
    "docstring": "Sets whether to use TFRT.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:use_tfrt arg:self arg:tfrt arguments arg arg If Call Raise Call Call If Compare If Raise Call Assign"
  },
  {
    "library": "scikit-learn",
    "name": "diag",
    "source_code": "def diag(self, X):\n    return np.ones(X.shape[0])",
    "docstring": "Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters ---------- X : ndarray of shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Returns ------- K_diag : ndarray of shape (n_samples_X,) Diagonal of kernel k(X, X)",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:diag arg:self arg:X arguments arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_topology",
    "source_code": "def _topology(self, func, other):\n    if not isinstance(other, OGRGeometry):\n        raise TypeError('Must use another OGRGeometry object for topology operations!')\n    return func(self.ptr, other.ptr)",
    "docstring": "A generalized function for topology operations, takes a GDAL function and the other geometry to perform the operation on.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:_topology arg:self arg:func arg:other arguments arg arg arg If Call Raise Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "detect_conflicts",
    "source_code": "def detect_conflicts(self):\n    seen_apps = {}\n    conflicting_apps = set()\n    for app_label, migration_name in self.graph.leaf_nodes():\n        if app_label in seen_apps:\n            conflicting_apps.add(app_label)\n        seen_apps.setdefault(app_label, set()).add(migration_name)\n    return {app_label: sorted(seen_apps[app_label]) for app_label in conflicting_apps}",
    "docstring": "Look through the loaded graph and detect any conflicts - apps with more than one leaf migration. Return a dict of the app labels that conflict with the migration names that conflict.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\loader.py",
    "ast_data": "FunctionDef name:detect_conflicts arg:self arguments arg Assign Assign Call For Call If Compare Call Call Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "ortho_group_gen",
    "source_code": "class ortho_group_gen(multi_rv_generic):\n\n    def __init__(self, seed=None):\n        super().__init__(seed)\n        self.__doc__ = doccer.docformat(self.__doc__)\n\n    def __call__(self, dim=None, seed=None):\n        return ortho_group_frozen(dim, seed=seed)\n\n    def _process_parameters(self, dim):\n        if dim is None or not np.isscalar(dim) or dim < 0 or (dim != int(dim)):\n            raise ValueError('Dimension of rotation must be specified,and must be a scalar nonnegative integer.')\n        return dim\n\n    def rvs(self, dim, size=1, random_state=None):\n        random_state = self._get_random_state(random_state)\n        size = int(size)\n        dim = self._process_parameters(dim)\n        size = (size,) if size > 1 else ()\n        z = random_state.normal(size=size + (dim, dim))\n        q, r = np.linalg.qr(z)\n        d = r.diagonal(offset=0, axis1=-2, axis2=-1)\n        q *= (d / abs(d))[..., np.newaxis, :]\n        return q",
    "docstring": "An Orthogonal matrix (O(N)) random variable. Return a random orthogonal matrix, drawn from the O(N) Haar distribution (the only uniform distribution on O(N)). The keyword specifies the dimension N. Methods ------- rvs(dim=None, size=1, random_state=None) Draw random samples from O(N). Parameters ---------- dim : scalar Dimension of matrices seed : {None, int, np.random.RandomState, np.random.Generator}, optional Used for drawing random variates. If is , the singleton is used. If is an int, a new `seedNonespecial_ortho_groupmath-ph/0609050v2dim` parameter, returning a \"frozen\" ortho_group random variable: >>> rv = ortho_group(5) >>> # Frozen object with the same methods but holding the >>> # dimension parameter fixed. See Also -------- special_ortho_group",
    "type": "class",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "ClassDef name:ortho_group_gen FunctionDef name:__init__ arg:self arg:seed arguments arg arg Call Call Assign Call FunctionDef name:__call__ arg:self arg:dim arg:seed arguments arg arg arg Return return:yes Call FunctionDef name:_process_parameters arg:self arg:dim arguments arg arg If BoolOp Compare Call Compare Compare Call Raise Call Return return:yes FunctionDef name:rvs arg:self arg:dim arg:size arg:random_state arguments arg arg arg arg Assign Call Assign Call Assign Call Assign Compare Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "is_inline",
    "source_code": "def is_inline(self, node: Element) -> bool:\n    return isinstance(node.parent, nodes.TextElement)",
    "docstring": "Check whether a node represents an inline element.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\writers\\latex.py",
    "ast_data": "FunctionDef name:is_inline arg:self arg:node arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "output_types",
    "source_code": "@property\n@deprecation.deprecated(None, 'Use `tf.compat.v1.data.get_output_types(iterator)`.')\ndef output_types(self):\n    return nest.map_structure(lambda component_spec: component_spec._to_legacy_output_types(), self._element_spec)",
    "docstring": "Returns the type of each component of an element of this iterator. Returns: A (nested) structure of objects corresponding to each component of an element of this dataset.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\iterator_ops.py",
    "ast_data": "FunctionDef name:output_types arg:self arguments arg Return return:yes Call arguments arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "benchmark_generation_time",
    "source_code": "def benchmark_generation_time(output_token_len):\n    timestamp_start = datetime.datetime.now()\n    reply = sampler.chat(prompt, max_new_tokens=output_token_len)\n    timestamp_end = datetime.datetime.now()\n    timer_delta = timestamp_end - timestamp_start\n    if output_token_len == OUTPUT_TOKEN_LEN:\n        print(reply)\n    return timer_delta.total_seconds() * 1000",
    "docstring": "Benchmark generation time given output token length.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\xla\\backends\\cpu\\benchmarks\\e2e\\gemma2\\flax_2b\\benchmark.py",
    "ast_data": "FunctionDef name:benchmark_generation_time arg:output_token_len arguments arg Assign Call Assign Call Assign Call Assign If Compare Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_find_process_group",
    "source_code": "def _find_process_group(self):\n    for period in reversed(self._periods):\n        if self.step % period == 0:\n            return self.period_process_group_dict[period]\n    return None",
    "docstring": "Return a process group as the value of an `` if not found.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\model_averaging\\hierarchical_model_averager.py",
    "ast_data": "FunctionDef name:_find_process_group arg:self arguments arg For Call If Compare Return return:yes Return return:no"
  },
  {
    "library": "scipy",
    "name": "from_number",
    "source_code": "@classmethod\ndef from_number(cls, n, min=None):\n    width = number_digits(n) + 1\n    if n < 0:\n        width += 1\n    repeat = 80 // width\n    return cls(width, min, repeat=repeat)",
    "docstring": "Given an integer, returns a \"reasonable\" IntFormat instance to represent any number between 0 and n if n > 0, -n and n if n < 0 Parameters ---------- n : int max number one wants to be able to represent min : int minimum number of characters to use for the format Returns ------- res : IntFormat IntFormat instance with reasonable (see Notes) computed width Notes ----- Reasonable should be understood as the minimal string length necessary without losing precision. For example, IntFormat.from_number(1) will return an IntFormat instance of width 2, so that any 0 and 1 may be represented as 1-character strings without loss of information.",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\_harwell_boeing\\_fortran_format_parser.py",
    "ast_data": "FunctionDef name:from_number arg:cls arg:n arg:min arguments arg arg arg Assign Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "CustomPolicy",
    "source_code": "class CustomPolicy(_Policy):\n\n    def __init__(self, lambda_fn: Callable[[nn.Module], Union[bool, dict[str, Any]]]):\n        self._lambda_fn = lambda_fn\n\n    def _run_policy(self, root_module: nn.Module, ignored_modules: set[nn.Module], root_kwargs: dict[str, Any]) -> dict[nn.Module, dict[str, Any]]:\n        target_module_to_kwargs: dict[nn.Module, dict[str, Any]] = {}\n        for module in root_module.modules():\n            if module in ignored_modules:\n                continue\n            res = self._lambda_fn(module)\n            if not isinstance(res, (dict, bool)):\n                raise ValueError(f'The lambda_fn passed to CustomPolicy should return False/True or a kwarg dict, but it returned {res}')\n            if not res:\n                continue\n            kwargs = copy.copy(root_kwargs)\n            if isinstance(res, dict):\n                kwargs.update(res)\n            target_module_to_kwargs[module] = kwargs\n        return target_module_to_kwargs",
    "docstring": "This policy takes in a lambda function that maps a given ``, then the module has the API applied with the root's kwargs. - If the function returns a non-empty dictionary, then the module has the API applied, and the dictionary overrides the root's kwargs. Example:: >>> # xdoctest: +SKIP(\"undefined variables\") >>> model = init_transformer_model(...) >>> def lambda_fn(module: nn.Module): >>> if module is model.lm_head: >>> return {\"sharding_strategy\": ShardingStrategy.SHARD_GRAD_OP} >>> elif isinstance(module, TransformerBlock): >>> return True >>> return False >>> policy = CustomPolicy(lambda_fn) >>> fsdp_model = FSDP(model, auto_wrap_policy=policy)",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\wrap.py",
    "ast_data": "ClassDef name:CustomPolicy FunctionDef name:__init__ arg:self arg:lambda_fn arguments arg arg Assign FunctionDef name:_run_policy arg:self arg:root_module arg:ignored_modules arg:root_kwargs arguments arg arg arg arg For Call If Compare Assign Call If Call Raise Call If Assign Call If Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "convert_fields_for_spec",
    "source_code": "def convert_fields_for_spec(fields, field_values):\n    _convert_fields(fields, field_values, context=_ConversionContext.SPEC)",
    "docstring": "Type-checks and converts field values for a TypeSpec (in place). This is similar to , except that we expect a for tensor-like types. In particular, if the of a field is or a subclass, then the corresponding value in is expected to contain a (rather than a value described by that ). Args: fields: A list of objects. field_values: A mapping field names to values. Must contain an entry for each field. I.e., must be equal to . Raises: ValueError: If the keys of do not match the names of the fields in . TypeError: If any value in does not have the type indicated by the corresponding object.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type_field.py",
    "ast_data": "FunctionDef name:convert_fields_for_spec arg:fields arg:field_values arguments arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "get_symbolic_size",
    "source_code": "def get_symbolic_size(self) -> sympy.Expr:\n    raise NotImplementedError",
    "docstring": "Number of bytes needed at runtime",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\memory_planning.py",
    "ast_data": "FunctionDef name:get_symbolic_size arg:self arguments arg Raise"
  },
  {
    "library": "django",
    "name": "attach_file",
    "source_code": "def attach_file(self, path, mimetype=None):\n    path = Path(path)\n    with path.open('rb') as file:\n        content = file.read()\n        self.attach(path.name, content, mimetype)",
    "docstring": "Attach a file from the filesystem. Set the mimetype to DEFAULT_ATTACHMENT_MIME_TYPE if it isn't specified and cannot be guessed. For a text/* mimetype (guessed or specified), decode the file's content as UTF-8. If that fails, set the mimetype to DEFAULT_ATTACHMENT_MIME_TYPE and don't decode the content.",
    "type": "method",
    "file_path": "django\\django\\core\\mail\\message.py",
    "ast_data": "FunctionDef name:attach_file arg:self arg:path arg:mimetype arguments arg arg arg Assign Call With Call Assign Call Call"
  },
  {
    "library": "cherrypy",
    "name": "bound_addr",
    "source_code": "@property\ndef bound_addr(self):\n    host, port = self.bind_addr\n    if port == 0 and self.httpserver.socket:\n        port = self.httpserver.socket.getsockname()[1]\n    return (host, port)",
    "docstring": "The bind address. If it's an ephemeral port and the socket has been bound, return the actual port bound.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\servers.py",
    "ast_data": "FunctionDef name:bound_addr arg:self arguments arg Assign If BoolOp Compare Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_apply_rule",
    "source_code": "def _apply_rule(self, dates: DatetimeIndex) -> DatetimeIndex:\n    if dates.empty:\n        return dates.copy()\n    if self.observance is not None:\n        return dates.map(lambda d: self.observance(d))\n    if self.offset is not None:\n        if not isinstance(self.offset, list):\n            offsets = [self.offset]\n        else:\n            offsets = self.offset\n        for offset in offsets:\n            with warnings.catch_warnings():\n                warnings.simplefilter('ignore', PerformanceWarning)\n                dates += offset\n    return dates",
    "docstring": "Apply the given offset/observance to a DatetimeIndex of dates. Parameters ---------- dates : DatetimeIndex Dates to apply the given offset/observance rule Returns ------- Dates with rules applied",
    "type": "method",
    "file_path": "pandas\\pandas\\tseries\\holiday.py",
    "ast_data": "FunctionDef name:_apply_rule arg:self arg:dates arguments arg arg If Return return:yes Call If Compare Return return:yes Call arguments arg Call If Compare If Call Assign Assign For With Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_frozen_graph",
    "source_code": "def is_frozen_graph(sess):\n    for op in sess.graph.get_operations():\n        if op.type.startswith('Variable') or op.type.endswith('VariableOp'):\n            return False\n    return True",
    "docstring": "Determines if the graph is frozen. Determines if a graph has previously been frozen by checking for any operations of type Variable*. If variables are found, the graph is not frozen. Args: sess: TensorFlow Session. Returns: Bool.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\util.py",
    "ast_data": "FunctionDef name:is_frozen_graph arg:sess arguments arg For Call If BoolOp Call Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "unzip_folder",
    "source_code": "def unzip_folder(zip_file_path: Path, unzip_to_folder: Path) -> None:\n    print(f'Unzipping {zip_file_path}')\n    print(f'       to {unzip_to_folder}')\n    shutil.unpack_archive(zip_file_path, unzip_to_folder, 'zip')",
    "docstring": "Returns the path to the unzipped folder",
    "type": "function",
    "file_path": "pytorch\\.github\\scripts\\file_io_utils.py",
    "ast_data": "FunctionDef name:unzip_folder arg:zip_file_path arg:unzip_to_folder arguments arg arg Call Call Call"
  },
  {
    "library": "kornia",
    "name": "fit_line",
    "source_code": "def fit_line(points: Tensor, weights: Optional[Tensor]=None) -> ParametrizedLine:\n    KORNIA_CHECK_IS_TENSOR(points, 'points must be a tensor')\n    KORNIA_CHECK_SHAPE(points, ['B', 'N', 'D'])\n    mean = points.mean(-2, True)\n    A = points - mean\n    if weights is not None:\n        KORNIA_CHECK_IS_TENSOR(weights, 'weights must be a tensor')\n        KORNIA_CHECK_SHAPE(weights, ['B', 'N'])\n        KORNIA_CHECK(points.shape[0] == weights.shape[0])\n        A = A.transpose(-2, -1) @ torch.diag_embed(weights) @ A\n    else:\n        A = A.transpose(-2, -1) @ A\n    _, _, V = _torch_svd_cast(A)\n    V = V.transpose(-2, -1)\n    direction = V[..., 0, :]\n    origin = mean[..., 0, :]\n    return ParametrizedLine(origin, direction)",
    "docstring": "Fit a line from a set of points. Args: points: tensor containing a batch of sets of n-dimensional points. The expected shape of the tensor is :math:. weights: weights to use to solve the equations system. The expected shape of the tensor is :math:. Return: A tensor containing the direction of the fited line of shape :math:. Example: >>> points = torch.rand(2, 10, 3) >>> weights = torch.ones(2, 10) >>> line = fit_line(points, weights) >>> line.direction.shape torch.Size([2, 3])",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\line.py",
    "ast_data": "FunctionDef name:fit_line arg:points arg:weights arguments arg arg Call Call Assign Call Assign If Compare Call Call Call Compare Assign Call Call Assign Call Assign Call Assign Call Assign Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "is_macos13_or_newer",
    "source_code": "@_lru_cache\ndef is_macos13_or_newer(minor: int=0) -> bool:\n    return torch._C._mps_is_on_macos_or_newer(13, minor)",
    "docstring": "Return a bool indicating whether MPS is running on MacOS 13 or newer.",
    "type": "function",
    "file_path": "pytorch\\torch\\backends\\mps\\__init__.py",
    "ast_data": "FunctionDef name:is_macos13_or_newer arg:minor arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_is_trainable",
    "source_code": "def _is_trainable(tensor):\n    if not backprop_util.IsTrainable(tensor):\n        return False\n    if tensor.op.type == 'TensorListPopBack' and tensor.value_index == 0:\n        assert tensor.dtype == dtypes.variant\n        element_type = tensor.op.get_attr('element_dtype')\n        return backprop_util.IsTrainable(element_type)\n    return True",
    "docstring": "Returns whether the given tensor is trainable.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\while_v2.py",
    "ast_data": "FunctionDef name:_is_trainable arg:tensor arguments arg If Call Return return:yes If BoolOp Compare Compare Compare Assign Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_constant_eager_impl",
    "source_code": "def _constant_eager_impl(ctx, value, dtype, shape, verify_shape) -> ops._EagerTensorBase:\n    t = convert_to_eager_tensor(value, ctx, dtype)\n    if shape is None:\n        return t\n    shape = tensor_shape.as_shape(shape)\n    if shape == t.shape:\n        return t\n    if verify_shape:\n        raise TypeError(f'Expected Tensor {t} (converted from {value}) with shape {tuple(shape)}, but got shape {tuple(t.shape)}.')\n    num_t = t.shape.num_elements()\n    if num_t == shape.num_elements():\n        return _eager_reshape(t, shape.as_list(), ctx)\n    if num_t == 1:\n        if t.dtype == dtypes.bool:\n            with ops.device('/device:CPU:0'):\n                x = _eager_fill(shape.as_list(), _eager_identity(t, ctx), ctx)\n            return _eager_identity(x, ctx)\n        else:\n            return _eager_fill(shape.as_list(), t, ctx)\n    raise TypeError(f'Eager execution of tf.constant with unsupported shape. Tensor {t} (converted from {value}) has {num_t:d} elements, but got `shape` {shape} with {shape.num_elements()} elements).')",
    "docstring": "Creates a constant on the current device.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\constant_op.py",
    "ast_data": "FunctionDef name:_constant_eager_impl arg:ctx arg:value arg:dtype arg:shape arg:verify_shape arguments arg arg arg arg arg Assign Call If Compare Return return:yes Assign Call If Compare Return return:yes If Raise Call Call Call Assign Call If Compare Call Return return:yes Call Call If Compare If Compare With Call Assign Call Call Call Return return:yes Call Return return:yes Call Call Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "fuse_fx",
    "source_code": "def fuse_fx(model: torch.nn.Module, fuse_custom_config: Union[FuseCustomConfig, dict[str, Any], None]=None, backend_config: Union[BackendConfig, dict[str, Any], None]=None) -> GraphModule:\n    if fuse_custom_config is None:\n        fuse_custom_config = FuseCustomConfig()\n    if isinstance(fuse_custom_config, dict):\n        warnings.warn('Passing a fuse_custom_config_dict to fuse is deprecated and will not be supported in a future version. Please pass in a FuseCustomConfig instead.', FutureWarning, stacklevel=2)\n        fuse_custom_config = FuseCustomConfig.from_dict(fuse_custom_config)\n    torch._C._log_api_usage_once('quantization_api.quantize_fx.fuse_fx')\n    preserved_attr_names = fuse_custom_config.preserved_attributes\n    preserved_attrs = {attr: getattr(model, attr) for attr in preserved_attr_names if hasattr(model, attr)}\n    graph_module = torch.fx.symbolic_trace(model)\n    _attach_meta_to_node_if_not_exist(graph_module)\n    graph_module = _fuse_fx(graph_module, False, fuse_custom_config, backend_config)\n    attach_preserved_attrs_to_model(graph_module, preserved_attrs)\n    return graph_module",
    "docstring": "Fuse modules like conv+bn, conv+bn+relu etc, model must be in eval mode. Fusion rules are defined in torch.ao.quantization.fx.fusion_pattern.py Args: * (torch.nn.Module): a torch.nn.Module model * (FuseCustomConfig): custom configurations for fuse_fx. See :class: for more details Example:: from torch.ao.quantization import fuse_fx m = Model().eval() m = fuse_fx(m)",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantize_fx.py",
    "ast_data": "FunctionDef name:fuse_fx arg:model arg:fuse_custom_config arg:backend_config arguments arg arg arg If Compare Assign Call If Call Call Assign Call Call Assign Assign Call Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "unstack",
    "source_code": "@tf_should_use.should_use_result\ndef unstack(self, value, name=None):\n    with ops.name_scope(name, 'TensorArrayUnstack', [self._flow, value]):\n        value = ops.convert_to_tensor(value, preferred_dtype=self._dtype, name='value')\n        _check_dtypes(value, self._dtype)\n        self._check_element_shape(value.shape[1:])\n        flow_out = list_ops.tensor_list_from_tensor(tensor=value, element_shape=value.shape[1:])\n        return build_ta_with_new_flow(self, flow_out)",
    "docstring": "See TensorArray.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:unstack arg:self arg:value arg:name arguments arg arg arg With Call Assign Call Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "to_tensor",
    "source_code": "def to_tensor(self, x: Any) -> Tensor:\n    if isinstance(x, (str,)):\n        return kornia.io.load_image(x, kornia.io.ImageLoadType.UNCHANGED) / 255\n    if isinstance(x, (Tensor,)):\n        return x\n    if isinstance(x, (np.ndarray,)):\n        return kornia.utils.image.image_to_tensor(x) / 255\n    if isinstance(x, (Image.Image,)):\n        return from_numpy(np.array(x)).permute(2, 0, 1).float() / 255\n    raise TypeError('Input type not supported')",
    "docstring": "Convert input to tensor. Supports image path, numpy array, PIL image, and raw tensor. Args: x: The input to convert. Returns: Tensor: The converted tensor.",
    "type": "method",
    "file_path": "kornia\\kornia\\core\\module.py",
    "ast_data": "FunctionDef name:to_tensor arg:self arg:x arguments arg arg If Call Return return:yes Call If Call Return return:yes If Call Return return:yes Call If Call Return return:yes Call Call Call Call Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "from_scorer",
    "source_code": "@classmethod\ndef from_scorer(cls, scorer, response_method, thresholds):\n    instance = cls(score_func=scorer._score_func, sign=scorer._sign, response_method=response_method, thresholds=thresholds, kwargs=scorer._kwargs)\n    instance._metadata_request = scorer._get_metadata_request()\n    return instance",
    "docstring": "Create a continuous scorer from a normal scorer.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\metrics\\_scorer.py",
    "ast_data": "FunctionDef name:from_scorer arg:cls arg:scorer arg:response_method arg:thresholds arguments arg arg arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "write",
    "source_code": "@tf_export('summary.write', v1=[])\ndef write(tag, tensor, step=None, metadata=None, name=None):\n    with ops.name_scope(name, 'write_summary') as scope:\n        if _summary_state.writer is None:\n            return constant_op.constant(False)\n        if step is None:\n            step = get_step()\n        if metadata is None:\n            serialized_metadata = b''\n        elif hasattr(metadata, 'SerializeToString'):\n            serialized_metadata = metadata.SerializeToString()\n        else:\n            serialized_metadata = metadata\n\n        def record():\n            if step is None:\n                raise ValueError('No step set. Please specify one either through the `step` argument or through tf.summary.experimental.set_step()')\n            with ops.device('cpu:0'):\n                summary_tensor = tensor() if callable(tensor) else array_ops.identity(tensor)\n                writer = _summary_state.writer\n                summary_value = _maybe_convert_tensor_to_dtensor(writer, summary_tensor)\n                step_value = _maybe_convert_tensor_to_dtensor(writer, step)\n                write_summary_op = gen_summary_ops.write_summary(writer._resource, step_value, summary_value, tag, serialized_metadata, name=scope)\n                with ops.control_dependencies([write_summary_op]):\n                    return constant_op.constant(True)\n        op = smart_cond.smart_cond(should_record_summaries(), record, _nothing, name='summary_cond')\n        if not context.executing_eagerly():\n            ops.add_to_collection(ops.GraphKeys._SUMMARY_COLLECTION, op)\n        return op",
    "docstring": "Writes a generic summary to the default SummaryWriter if one exists. This exists primarily to support the definition of type-specific summary ops like scalar() and image(), and is not intended for direct use unless defining a new type-specific summary op. Args: tag: string tag used to identify the summary (e.g. in TensorBoard), usually generated with tensor: the Tensor holding the summary data to write or a callable that returns this Tensor. If a callable is passed, it will only be called when a default SummaryWriter exists and the recording condition specified by is met. step: Explicit -castable monotonic step value for this summary. If omitted, this defaults to , which must not be None. metadata: Optional SummaryMetadata, as a proto or serialized bytes name: Optional string name for this op. Returns: True on success, or false if no summary was written because no default summary writer was available. Raises: ValueError: if a default writer exists, but no step was provided and is None.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "FunctionDef name:write arg:tag arg:tensor arg:step arg:metadata arg:name arguments arg arg arg arg arg With Call If Compare Return return:yes Call If Compare Assign Call If Compare Assign If Call Assign Call Assign FunctionDef name:record arguments If Compare Raise Call With Call Assign Call Call Call Assign Assign Call Assign Call Assign Call With Call Return return:yes Call Assign Call Call If Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "inialize_from_graph",
    "source_code": "@classmethod\ndef inialize_from_graph(cls, joint_graph: Graph, all_recomputable_banned_nodes: list[Node], recorded_knapsack_input_memories: list[float], recorded_knapsack_input_runtimes: list[float]) -> 'GraphInfoProvider':\n    graph_nodes_in_order = [node.name for node in joint_graph.nodes]\n    graph_edges = [(node.name, user.name) for node in joint_graph.nodes for user in node.users]\n    all_recomputable_banned_node_names = [node.name for node in all_recomputable_banned_nodes]\n    return cls(graph_nodes_in_order=graph_nodes_in_order, graph_edges=graph_edges, all_recomputable_banned_nodes=all_recomputable_banned_node_names, recorded_knapsack_input_memories=recorded_knapsack_input_memories, recorded_knapsack_input_runtimes=recorded_knapsack_input_runtimes, joint_graph=joint_graph)",
    "docstring": "Enables initialization from a joint graph.",
    "type": "method",
    "file_path": "pytorch\\torch\\_functorch\\_activation_checkpointing\\graph_info_provider.py",
    "ast_data": "FunctionDef name:inialize_from_graph arg:cls arg:joint_graph arg:all_recomputable_banned_nodes arg:recorded_knapsack_input_memories arg:recorded_knapsack_input_runtimes arguments arg arg arg arg arg Assign Assign Assign Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_set_max_features",
    "source_code": "def _set_max_features(self):\n    if isinstance(self.max_features, str):\n        if self.max_features == 'auto':\n            if is_classifier(self):\n                max_features = max(1, int(np.sqrt(self.n_features_in_)))\n            else:\n                max_features = self.n_features_in_\n        elif self.max_features == 'sqrt':\n            max_features = max(1, int(np.sqrt(self.n_features_in_)))\n        else:\n            max_features = max(1, int(np.log2(self.n_features_in_)))\n    elif self.max_features is None:\n        max_features = self.n_features_in_\n    elif isinstance(self.max_features, Integral):\n        max_features = self.max_features\n    else:\n        max_features = max(1, int(self.max_features * self.n_features_in_))\n    self.max_features_ = max_features",
    "docstring": "Set self.max_features_.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_gb.py",
    "ast_data": "FunctionDef name:_set_max_features arg:self arguments arg If Call If Compare If Call Assign Call Call Call Assign If Compare Assign Call Call Call Assign Call Call Call If Compare Assign If Call Assign Assign Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "choose_qparams_affine_with_min_max",
    "source_code": "def choose_qparams_affine_with_min_max(min_val: torch.Tensor, max_val: torch.Tensor, mapping_type: MappingType, block_size: tuple[int, ...], target_dtype: torch.dtype, quant_min: Optional[int]=None, quant_max: Optional[int]=None, eps: Optional[float]=None, scale_dtype: Optional[torch.dtype]=None, zero_point_dtype: Optional[torch.dtype]=None, preserve_zero: bool=True, zero_point_domain: Optional[ZeroPointDomain]=ZeroPointDomain.INT) -> tuple[torch.Tensor, torch.Tensor]:\n    return _choose_qparams_affine(None, mapping_type.name, block_size, target_dtype, quant_min, quant_max, eps, scale_dtype, zero_point_dtype, preserve_zero, zero_point_domain.name if zero_point_domain is not None else None, min_val, max_val)",
    "docstring": "A variant of :func: operator that pass in min_val and max_val directly instead of deriving these from a single input. This is used for observers in static quantization where min_val and max_val may be obtained through tracking all the data in calibration data set. Args: Mostly same as :func:. with one difference: instead of passing in Tensor and use that to calculate min_val/max_val and then scale/zero_point, we pass in min_val/max_val directly",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\_affine_quantization.py",
    "ast_data": "FunctionDef name:choose_qparams_affine_with_min_max arg:min_val arg:max_val arg:mapping_type arg:block_size arg:target_dtype arg:quant_min arg:quant_max arg:eps arg:scale_dtype arg:zero_point_dtype arg:preserve_zero arg:zero_point_domain arguments arg arg arg arg arg arg arg arg arg arg arg arg Return return:yes Call Compare"
  },
  {
    "library": "matplotlib",
    "name": "is_writable_file_like",
    "source_code": "def is_writable_file_like(obj):\n    return callable(getattr(obj, 'write', None))",
    "docstring": "Return whether *obj* looks like a file object with a *write* method.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\cbook.py",
    "ast_data": "FunctionDef name:is_writable_file_like arg:obj arguments arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_set_axes_scale",
    "source_code": "def _set_axes_scale(self, value, **kwargs):\n    name = self._get_axis_name()\n    old_default_lims = self.get_major_locator().nonsingular(-np.inf, np.inf)\n    for ax in self._get_shared_axes():\n        ax._axis_map[name]._set_scale(value, **kwargs)\n        ax._update_transScale()\n        ax.stale = True\n    new_default_lims = self.get_major_locator().nonsingular(-np.inf, np.inf)\n    if old_default_lims != new_default_lims:\n        self.axes.autoscale_view(**{f'scale{k}': k == name for k in self.axes._axis_names})",
    "docstring": "Set this Axis' scale. Parameters ---------- value : str or The axis scale type to apply. Valid string values are the names of scale classes (\"linear\", \"log\", \"function\",...). These may be the names of any of the :ref: or of any custom scales registered using . **kwargs If *value* is a string, keywords are passed to the instantiation method of the respective class.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:_set_axes_scale arg:self arg:value arguments arg arg arg Assign Call Assign Call Call For Call Call Call Assign Assign Call Call If Compare Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "_continuously_readline_from_sub",
    "source_code": "def _continuously_readline_from_sub(self, pipe_r, task_type, task_id):\n    with os.fdopen(pipe_r.fileno(), 'r', closefd=False) as reader:\n        for line in reader:\n            task_string = '[{}-{}]:'.format(task_type, task_id)\n            formatted_line = '{} {}'.format(task_string.ljust(14), line)\n            if self._stream_output:\n                print(formatted_line, end='', flush=True)\n            if self._return_output:\n                self._streaming_queue.put(formatted_line)",
    "docstring": "Function to continuously read lines from subprocesses.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_process_runner.py",
    "ast_data": "FunctionDef name:_continuously_readline_from_sub arg:self arg:pipe_r arg:task_type arg:task_id arguments arg arg arg arg With Call Call For Assign Call Assign Call Call If Call If Call"
  },
  {
    "library": "tensorflow",
    "name": "_num_workers",
    "source_code": "def _num_workers(self) -> int:\n    return self._server.num_workers()",
    "docstring": "Returns the number of workers registered with the dispatcher.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\service\\server_lib.py",
    "ast_data": "FunctionDef name:_num_workers arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "apply",
    "source_code": "@classmethod\ndef apply(cls, module, name, amount, importance_scores=None):\n    return super().apply(module, name, amount=amount, importance_scores=importance_scores)",
    "docstring": "Add pruning on the fly and reparametrization of a tensor. Adds the forward pre-hook that enables pruning on the fly and the reparametrization of a tensor in terms of the original tensor and the pruning mask. Args: module (nn.Module): module containing the tensor to prune name (str): parameter name within ``, it represents the absolute number of parameters to prune. importance_scores (torch.Tensor): tensor of importance scores (of same shape as module parameter) used to compute mask for pruning. The values in this tensor indicate the importance of the corresponding elements in the parameter being pruned. If unspecified or None, the module parameter will be used in its place.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\utils\\prune.py",
    "ast_data": "FunctionDef name:apply arg:cls arg:module arg:name arg:amount arg:importance_scores arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "hermadd",
    "source_code": "def hermadd(c1, c2):\n    return pu._add(c1, c2)",
    "docstring": "Add one Hermite series to another. Returns the sum of two Hermite series + . The arguments are sequences of coefficients ordered from lowest order term to highest, i.e., [1,2,3] represents the series ``. Parameters ---------- c1, c2 : array_like 1-D arrays of Hermite series coefficients ordered from low to high. Returns ------- out : ndarray Array representing the Hermite series of their sum. See Also -------- hermsub, hermmulx, hermmul, hermdiv, hermpow Notes ----- Unlike multiplication, division, etc., the sum of two Hermite series is a Hermite series (without having to \"reproject\" the result onto the basis set) so addition, just like that of \"standard\" polynomials, is simply \"component-wise.\" Examples -------- >>> from numpy.polynomial.hermite import hermadd >>> hermadd([1, 2, 3], [1, 2, 3, 4]) array([2., 4., 6., 4.])",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\hermite.py",
    "ast_data": "FunctionDef name:hermadd arg:c1 arg:c2 arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "call_method",
    "source_code": "@compatibility(is_backward_compatible=True)\ndef call_method(self, method_name: str, args: Optional[tuple['Argument', ...]]=None, kwargs: Optional[dict[str, 'Argument']]=None, type_expr: Optional[Any]=None) -> Node:\n    return self.create_node('call_method', method_name, args, kwargs, type_expr=type_expr)",
    "docstring": "Insert a `Graph.create_node`.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\graph.py",
    "ast_data": "FunctionDef name:call_method arg:self arg:method_name arg:args arg:kwargs arg:type_expr arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_message",
    "source_code": "def get_message(self):\n    lines = []\n    lines.append('in user code:')\n    lines.append('')\n    for frame_info in reversed(self.translated_stack):\n        if traceback_utils.is_traceback_filtering_enabled() and (not traceback_utils.include_frame(frame_info.filename)):\n            continue\n        formatted_line = f'    File \"{frame_info.filename}\", line {frame_info.lineno}, in {frame_info.function_name}'\n        if frame_info.is_converted:\n            formatted_line += '  *'\n        elif frame_info.is_allowlisted:\n            formatted_line += '  **'\n        lines.append(formatted_line)\n        if frame_info.code is None:\n            code_snippet = '<source unavailable>'\n        else:\n            code_snippet = frame_info.code.strip()\n        lines.append('        {}'.format(code_snippet))\n    lines.append('')\n    message_lines = self.cause_message.split('\\n')\n    for i in range(len(message_lines)):\n        message_lines[i] = '    ' + message_lines[i]\n    lines.extend(message_lines)\n    lines.append('')\n    return '\\n'.join(lines)",
    "docstring": "Returns the message for the underlying exception.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\error_utils.py",
    "ast_data": "FunctionDef name:get_message arg:self arguments arg Assign Call Call For Call If BoolOp Call Call Assign If If Call If Compare Assign Assign Call Call Call Call Assign Call For Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_DecoratorContextManager",
    "source_code": "class _DecoratorContextManager:\n\n    def __call__(self, orig_func: F) -> F:\n        if inspect.isclass(orig_func):\n            warnings.warn('Decorating classes is deprecated and will be disabled in future versions. You should only decorate functions or methods. To preserve the current behavior of class decoration, you can directly decorate the `__init__` method and nothing else.', FutureWarning, stacklevel=2)\n            func = cast(F, lambda *args, **kwargs: orig_func(*args, **kwargs))\n        else:\n            func = orig_func\n        return cast(F, context_decorator(self.clone, func))\n\n    def __enter__(self) -> None:\n        raise NotImplementedError\n\n    def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:\n        raise NotImplementedError\n\n    def clone(self):\n        return self.__class__()",
    "docstring": "Allow a context manager to be used as a decorator.",
    "type": "class",
    "file_path": "pytorch\\torch\\utils\\_contextlib.py",
    "ast_data": "ClassDef name:_DecoratorContextManager FunctionDef name:__call__ arg:self arg:orig_func arguments arg arg If Call Call Assign Call arguments arg arg Call Assign Return return:yes Call Call FunctionDef name:__enter__ arg:self arguments arg Raise FunctionDef name:__exit__ arg:self arg:exc_type arg:exc_value arg:traceback arguments arg arg arg arg Raise FunctionDef name:clone arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "Quantize",
    "source_code": "class Quantize(torch.nn.Module):\n    scale: torch.Tensor\n    zero_point: torch.Tensor\n\n    def __init__(self, scale, zero_point, dtype, factory_kwargs=None):\n        factory_kwargs = torch.nn.factory_kwargs(factory_kwargs)\n        super().__init__()\n        self.register_buffer('scale', torch.tensor([scale], **factory_kwargs))\n        self.register_buffer('zero_point', torch.tensor([zero_point], dtype=torch.long, **{k: v for k, v in factory_kwargs.items() if k != 'dtype'}))\n        self.dtype = dtype\n\n    def forward(self, X):\n        return torch.quantize_per_tensor(X, float(self.scale), int(self.zero_point), self.dtype)\n\n    @staticmethod\n    def from_float(mod, use_precomputed_fake_quant=False):\n        assert hasattr(mod, 'activation_post_process')\n        scale, zero_point = mod.activation_post_process.calculate_qparams()\n        return Quantize(scale.float().item(), zero_point.long().item(), mod.activation_post_process.dtype)\n\n    def extra_repr(self):\n        return f'scale={self.scale}, zero_point={self.zero_point}, dtype={self.dtype}'",
    "docstring": "Quantizes an incoming tensor Args: : scale of the output Quantized Tensor : zero_point of output Quantized Tensor : data type of output Quantized Tensor : Dictionary of kwargs used for configuring initialization of internal buffers. Currently, and are supported. Example: will initialize internal buffers as type on the current CUDA device. Note that only applies to floating-point buffers. Examples:: >>> t = torch.tensor([[1., -1.], [1., -1.]]) >>> scale, zero_point, dtype = 1.0, 2, torch.qint8 >>> qm = Quantize(scale, zero_point, dtype) >>> # xdoctest: +SKIP >>> qt = qm(t) >>> print(qt) tensor([[ 1., -1.], [ 1., -1.]], size=(2, 2), dtype=torch.qint8, scale=1.0, zero_point=2)",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\modules\\__init__.py",
    "ast_data": "ClassDef name:Quantize FunctionDef name:__init__ arg:self arg:scale arg:zero_point arg:dtype arg:factory_kwargs arguments arg arg arg arg arg Assign Call Call Call Call Call Call Call Call Compare Assign FunctionDef name:forward arg:self arg:X arguments arg arg Return return:yes Call Call Call FunctionDef name:from_float arg:mod arg:use_precomputed_fake_quant arguments arg arg Call Assign Call Return return:yes Call Call Call Call Call FunctionDef name:extra_repr arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_process",
    "source_code": "def _process(self):\n    self.canvas.callbacks.process(self.name, self)\n    self.guiEvent = None",
    "docstring": "Process this event on ``.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:_process arg:self arguments arg Call Assign"
  },
  {
    "library": "pytorch",
    "name": "_lower_weight_only_weighted_ref_module",
    "source_code": "def _lower_weight_only_weighted_ref_module(model: GraphModule):\n    named_modules = dict(model.named_modules(remove_duplicate=False))\n    for n in model.graph.nodes:\n        if n.op != 'call_module' or type(named_modules[str(n.target)]) not in set(WEIGHT_ONLY_LOWER_MODULE_MAP.keys()):\n            continue\n        ref_node = n\n        ref_module = named_modules[str(ref_node.target)]\n        ref_class = type(ref_module)\n        q_class = WEIGHT_ONLY_LOWER_MODULE_MAP.get(ref_class)\n        q_module = q_class.from_reference(ref_module)\n        parent_name, module_name = _parent_name(ref_node.target)\n        setattr(named_modules[parent_name], module_name, q_module)",
    "docstring": "Traverse the graph and find ref_module patterns and replace them with the weight only quantized version of the ref module.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_lower_to_native_backend.py",
    "ast_data": "FunctionDef name:_lower_weight_only_weighted_ref_module arg:model arguments arg Assign Call Call For If BoolOp Compare Compare Call Call Call Call Assign Assign Call Assign Call Assign Call Assign Call Assign Call Call"
  },
  {
    "library": "pandas",
    "name": "select_describe_func",
    "source_code": "def select_describe_func(data: Series) -> Callable:\n    if is_bool_dtype(data.dtype):\n        return describe_categorical_1d\n    elif is_numeric_dtype(data):\n        return describe_numeric_1d\n    elif data.dtype.kind == 'M' or isinstance(data.dtype, DatetimeTZDtype):\n        return describe_timestamp_1d\n    elif data.dtype.kind == 'm':\n        return describe_numeric_1d\n    else:\n        return describe_categorical_1d",
    "docstring": "Select proper function for describing series based on data type. Parameters ---------- data : Series Series to be described.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\methods\\describe.py",
    "ast_data": "FunctionDef name:select_describe_func arg:data arguments arg If Call Return return:yes If Call Return return:yes If BoolOp Compare Call Return return:yes If Compare Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_polynomial_value",
    "source_code": "def _polynomial_value(poly, x, zero_power, transition):\n    res = zero_power.clone()\n    for k in range(poly.size(-1) - 2, -1, -1):\n        res = transition(res, x, poly[..., k])\n    return res",
    "docstring": "A generic method for computing poly(x) using the Horner's rule. Args: poly (Tensor): the (possibly batched) 1D Tensor representing polynomial coefficients such that poly[..., i] = (a_{i_0}, ..., a{i_n} (==1)), and poly(x) = poly[..., 0] * zero_power + ... + poly[..., n] * x^n x (Tensor): the value (possible batched) to evalate the polynomial at. zero_power (Tensor): the representation of . It is application-specific. transition (Callable): the function that accepts some intermediate result , the and a specific polynomial coefficient for some iteration . It basically performs one iteration of the Horner's rule defined as . Note that is not a parameter, because the step depends on , whether it is a vector, a matrix, or something else, so this functionality is delegated to the user.",
    "type": "function",
    "file_path": "pytorch\\torch\\_lobpcg.py",
    "ast_data": "FunctionDef name:_polynomial_value arg:poly arg:x arg:zero_power arg:transition arguments arg arg arg arg Assign Call For Call Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "constant_",
    "source_code": "@_sharded_op_impl(torch.nn.init.constant_)\ndef constant_(types, args=(), kwargs=None, pg=None):\n    validate_param(kwargs, 'kwargs')\n    sharded_tensor = kwargs['tensor']\n    validate_param(sharded_tensor, 'tensor')\n    val = kwargs['val']\n    validate_param(val, 'val')\n    for shard in sharded_tensor.local_shards():\n        torch.nn.init.constant_(shard.tensor, val=val)\n    return sharded_tensor",
    "docstring": "Fills the input ShardedTensor with the value \\text{val}val. Args: tensor: tensor sharded across devices val: the value to fill the tensor with",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\_ops\\init.py",
    "ast_data": "FunctionDef name:constant_ arg:types arg:args arg:kwargs arg:pg arguments arg arg arg arg Call Assign Call Assign Call For Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "update",
    "source_code": "def update(self, partitions):\n    for shared_submodules in partitions:\n        for entry in shared_submodules:\n            graph = entry.module.graph\n            node_names = self.node_names_by_fqn[entry.fqn]\n            nodes = [n for n in graph.nodes if n.name in node_names]\n            for node in nodes:\n                with graph.inserting_after(node):\n                    new_node = graph.create_node('call_function', torch.ops.aten.copy_.default, (node.args[0], node))\n                    new_node.meta = copy.copy(node.meta)",
    "docstring": "Update states corresponding to intermediate values that were read.",
    "type": "method",
    "file_path": "pytorch\\torch\\export\\unflatten.py",
    "ast_data": "FunctionDef name:update arg:self arg:partitions arguments arg arg For For Assign Assign Assign Compare For With Call Assign Call Assign Call"
  },
  {
    "library": "django",
    "name": "serve",
    "source_code": "def serve(self, request):\n    return serve(request, self.file_path(request.path), insecure=True)",
    "docstring": "Serve the request path.",
    "type": "method",
    "file_path": "django\\django\\contrib\\staticfiles\\handlers.py",
    "ast_data": "FunctionDef name:serve arg:self arg:request arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_inductor_decomp_graphs",
    "source_code": "def get_inductor_decomp_graphs(model: nn.Module, args, kwargs):\n    compiled_mod = aot_module(model, fw_compiler=fwd_bwd_compiler, decompositions=inductor_decomps)\n    output = compiled_mod(*args, **kwargs)\n    if output.ndim != 0:\n        output = output.sum()\n    output.backward()\n    assert len(graphs) == 2\n    return graphs",
    "docstring": "Obtain forward and backward graphs of a model with inductor decompositions using tracing and aot_module. Convenient util to get the fwd and bwd graphs of an arbitrary model with inductor decompositions. Note that this would simply do tracing with aot_module and don't ensure correctness. This is useful to track the ops needed in DTensor.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\debug\\_op_coverage.py",
    "ast_data": "FunctionDef name:get_inductor_decomp_graphs arg:model arg:args arg:kwargs arguments arg arg arg Assign Call Assign Call If Compare Assign Call Call Compare Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "unpad",
    "source_code": "def unpad(self, padding_size: Tensor) -> 'Keypoints3D':\n    raise NotImplementedError",
    "docstring": "Pad a bounding keypoints. Args: padding_size: (B, 6)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\keypoints.py",
    "ast_data": "FunctionDef name:unpad arg:self arg:padding_size arguments arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "_header_paths",
    "source_code": "def _header_paths():\n    return ['', 'include', 'include/cuda', 'include/*-linux-gnu', 'extras/CUPTI/include', 'include/cuda/CUPTI', 'local/cuda/extras/CUPTI/include', 'targets/x86_64-linux/include']",
    "docstring": "Returns hard-coded set of relative paths to look for header files.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\third_party\\gpus\\find_cuda_config.py",
    "ast_data": "FunctionDef name:_header_paths arguments Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, saved_model_dir, saved_model_tags=None, saved_model_exported_names=None, trackable_obj=None):\n    super(TFLiteSavedModelConverterV2, self).__init__()\n    self.saved_model_dir = saved_model_dir\n    self._saved_model_tags = saved_model_tags\n    self._saved_model_exported_names = saved_model_exported_names\n    self._trackable_obj = trackable_obj\n    self._parse_saved_model_args(always_enable_saved_model_import=True)",
    "docstring": "Constructor for TFLiteConverter. Args: saved_model_dir: Directory of the SavedModel. saved_model_tags: Set of tags identifying the MetaGraphDef within the SavedModel to analyze. All tags in the tag set must be present. (default {tf.saved_model.SERVING}). saved_model_exported_names: Names to be exported when the saved model import path is on. trackable_obj: tf.AutoTrackable object associated with . A reference to this object needs to be maintained so that Variables do not get garbage collected since functions have a weak reference to Variables. This is only required when the tf.AutoTrackable object is not maintained by the user (e.g. ).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:saved_model_dir arg:saved_model_tags arg:saved_model_exported_names arg:trackable_obj arguments arg arg arg arg arg Call Call Assign Assign Assign Assign Call"
  },
  {
    "library": "django",
    "name": "Transform",
    "source_code": "class Transform(RegisterLookupMixin, Func):\n    bilateral = False\n    arity = 1\n\n    @property\n    def lhs(self):\n        return self.get_source_expressions()[0]\n\n    def get_bilateral_transforms(self):\n        if hasattr(self.lhs, 'get_bilateral_transforms'):\n            bilateral_transforms = self.lhs.get_bilateral_transforms()\n        else:\n            bilateral_transforms = []\n        if self.bilateral:\n            bilateral_transforms.append(self.__class__)\n        return bilateral_transforms",
    "docstring": "RegisterLookupMixin() is first so that get_lookup() and get_transform() first examine self and then check output_field.",
    "type": "class",
    "file_path": "django\\django\\db\\models\\lookups.py",
    "ast_data": "ClassDef name:Transform Assign Assign FunctionDef name:lhs arg:self arguments arg Return return:yes Call FunctionDef name:get_bilateral_transforms arg:self arguments arg If Call Assign Call Assign If Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "stop",
    "source_code": "def stop(self, threads=None, close_summary_writer=True, ignore_live_threads=False):\n    self._coord.request_stop()\n    try:\n        self._coord.join(threads, stop_grace_period_secs=self._stop_grace_secs, ignore_live_threads=ignore_live_threads)\n    finally:\n        if close_summary_writer and self._summary_writer:\n            self._summary_writer.add_session_log(SessionLog(status=SessionLog.STOP))\n            self._summary_writer.close()\n            self._graph_added_to_summary = False",
    "docstring": "Stop the services and the coordinator. This does not close the session. Args: threads: Optional list of threads to join with the coordinator. If , defaults to the threads running the standard services, the threads started for , and the threads started by the method. To wait on additional threads, pass the list in this parameter. close_summary_writer: Whether to close the . Defaults to if the summary writer was created by the supervisor, otherwise. ignore_live_threads: If ignores threads that remain running after a grace period when joining threads via the coordinator, instead of raising a RuntimeError.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py",
    "ast_data": "FunctionDef name:stop arg:self arg:threads arg:close_summary_writer arg:ignore_live_threads arguments arg arg arg arg Call Try Call If BoolOp Call Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "object_path_to_string",
    "source_code": "def object_path_to_string(node_path_arr):\n    return '/'.join((escape_local_name(trackable.name) for trackable in node_path_arr))",
    "docstring": "Converts a list of nodes to a string.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\trackable_utils.py",
    "ast_data": "FunctionDef name:object_path_to_string arg:node_path_arr arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "owner_set",
    "source_code": "@property\ndef owner_set(self):\n    owners = set()\n    if self.has_attr() or self.has_subscript():\n        owners.add(self.parent)\n        owners.update(self.parent.owner_set)\n    return owners",
    "docstring": "Returns all the symbols (simple or composite) that own this QN. In other words, if this symbol was modified, the symbols in the owner set may also be affected. Examples: 'a.b[c.d]' has two owners, 'a' and 'a.b'",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\qual_names.py",
    "ast_data": "FunctionDef name:owner_set arg:self arguments arg Assign Call If BoolOp Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_l2_normalize_data",
    "source_code": "@classmethod\ndef _l2_normalize_data(cls, inputs):\n    output = []\n    for inp in inputs:\n        with ops.colocate_with(inp, ignore_existing=True):\n            output.append(nn_impl.l2_normalize(inp, dim=1))\n    return output",
    "docstring": "Normalized the input data.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\clustering_ops.py",
    "ast_data": "FunctionDef name:_l2_normalize_data arg:cls arg:inputs arguments arg arg Assign For With Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "reduction",
    "source_code": "def reduction(self, dtype: torch.dtype, src_dtype: torch.dtype, reduction_type: ReductionType, value: T) -> Union[T, tuple[T, ...]]:\n    raise NotImplementedError",
    "docstring": "Perform a 'reduction_type' reduction on 'value' of dtype 'src_dtype', using 'dtype' as the accumulation dtype for the reduction. The result is an intermediate computation which should be stored to the final location using 'ops.store_reduction'. Valid reduction types are . For Welford reduction types, this function returns multiple outputs; consult reduction_num_outputs to determine the amount in metaprogramming applications.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ops_handler.py",
    "ast_data": "FunctionDef name:reduction arg:self arg:dtype arg:src_dtype arg:reduction_type arg:value arguments arg arg arg arg arg Raise"
  },
  {
    "library": "matplotlib",
    "name": "set_ydata",
    "source_code": "def set_ydata(self, y):\n    if not np.iterable(y):\n        raise RuntimeError('y must be a sequence')\n    self._yorig = copy.copy(y)\n    self._invalidy = True\n    self.stale = True",
    "docstring": "Set the data array for y. Parameters ---------- y : 1D array See Also -------- set_data set_xdata",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:set_ydata arg:self arg:y arguments arg arg If Call Raise Call Assign Call Assign Assign"
  },
  {
    "library": "scipy",
    "name": "_do_extrapolate",
    "source_code": "def _do_extrapolate(fill_value):\n    return isinstance(fill_value, str) and fill_value == 'extrapolate'",
    "docstring": "Helper to check if fill_value == \"extrapolate\" without warnings",
    "type": "function",
    "file_path": "scipy\\scipy\\interpolate\\_interpolate.py",
    "ast_data": "FunctionDef name:_do_extrapolate arg:fill_value arguments arg Return return:yes BoolOp Call Compare"
  },
  {
    "library": "pytorch",
    "name": "set_fused_module",
    "source_code": "def set_fused_module(self, fused_module: type[torch.nn.Module]) -> BackendPatternConfig:\n    self.fused_module = fused_module\n    return self",
    "docstring": "Set the module that represents the fused implementation for this pattern.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\backend_config.py",
    "ast_data": "FunctionDef name:set_fused_module arg:self arg:fused_module arguments arg arg Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "get_context",
    "source_code": "def get_context(config_fname: str, **kwargs):\n    with open(config_fname, encoding='utf-8') as f:\n        context = yaml.safe_load(f)\n    context['source_path'] = os.path.dirname(config_fname)\n    context.update(kwargs)\n    preprocessors = (get_callable(context_prep) for context_prep in context['main']['context_preprocessors'])\n    for preprocessor in preprocessors:\n        context = preprocessor(context)\n        msg = f'{preprocessor.__name__} is missing the return statement'\n        assert context is not None, msg\n    return context",
    "docstring": "Load the config yaml as the base context, and enrich it with the information added by the context preprocessors defined in the file.",
    "type": "function",
    "file_path": "pandas\\web\\pandas_web.py",
    "ast_data": "FunctionDef name:get_context arg:config_fname arguments arg arg With Call Assign Call Assign Call Call Assign Call For Assign Call Assign Compare Return return:yes"
  },
  {
    "library": "django",
    "name": "projected",
    "source_code": "@property\ndef projected(self):\n    return bool(capi.isprojected(self.ptr))",
    "docstring": "Return True if this SpatialReference is a projected coordinate system (root node is PROJCS).",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\srs.py",
    "ast_data": "FunctionDef name:projected arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "where",
    "source_code": "@tf_export.tf_export('experimental.numpy.where', v1=[])\n@np_utils.np_doc_only('where')\ndef where(condition, x=None, y=None):\n    condition = asarray(condition, dtype=np.bool_)\n    if x is None and y is None:\n        return nonzero(condition)\n    elif x is not None and y is not None:\n        x, y = _promote_dtype(x, y)\n        return array_ops.where_v2(condition, x, y)\n    raise ValueError('Both x and y must be ndarrays, or both must be None.')",
    "docstring": "Raises ValueError if exactly one of x or y is not None.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_array_ops.py",
    "ast_data": "FunctionDef name:where arg:condition arg:x arg:y arguments arg arg arg Assign Call If BoolOp Compare Compare Return return:yes Call If BoolOp Compare Compare Assign Call Return return:yes Call Raise Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_set_state_variables",
    "source_code": "def _set_state_variables(self, updates):\n    if not self.built:\n        raise RuntimeError('_set_state_variables() must be called after build().')\n    with ops.init_scope():\n        for var_name, value in updates.items():\n            self.state_variables[var_name].assign(value)",
    "docstring": "Directly update the internal state of this Layer. This method expects a string-keyed dict of {state_variable_name: state}. The precise nature of the state, and the names associated, are describe by the subclasses of CombinerPreprocessingLayer. Args: updates: A string keyed dict of weights to update. Raises: RuntimeError: if 'build()' was not called before 'set_processing_state'.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_preprocessing_layer.py",
    "ast_data": "FunctionDef name:_set_state_variables arg:self arg:updates arguments arg arg If Raise Call With Call For Call Call"
  },
  {
    "library": "pytorch",
    "name": "average_parameters",
    "source_code": "def average_parameters(self, params: Union[Iterable[torch.nn.Parameter], Iterable[dict[str, torch.nn.Parameter]]]):\n    if self.step >= self.warmup_steps and (self.step - self.warmup_steps) % self.period == 0:\n        utils.average_parameters_or_parameter_groups(params, _not_none(self.process_group))\n    self.step += 1",
    "docstring": "Averages parameters or parameter groups of an optimizer if `` is increased by 1 at each iteration in the training loop. Args: params: The parameters of a model or parameter groups of an optimizer.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\model_averaging\\averagers.py",
    "ast_data": "FunctionDef name:average_parameters arg:self arg:params arguments arg arg If BoolOp Compare Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "buffer_names",
    "source_code": "def buffer_names(self, ignore_integer_index: bool=True) -> OrderedSet[str]:\n    names: OrderedSet[str] = OrderedSet()\n    for dep in self.reads_and_writes():\n        if not isinstance(dep, MemoryDep):\n            continue\n        if not ignore_integer_index or not isinstance(dep.index, (int, sympy.Integer)):\n            names.add(dep.name)\n    return names",
    "docstring": "Integer index is used for load_seed.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\dependencies.py",
    "ast_data": "FunctionDef name:buffer_names arg:self arg:ignore_integer_index arguments arg arg Call For Call If Call If BoolOp Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "remove_custom",
    "source_code": "def remove_custom(self, opset: OpsetVersion) -> None:\n    if not self._functions.overridden(opset):\n        warnings.warn(f\"No custom function registered for '{self._name}' opset {opset}\")\n        return\n    self._functions.remove_override(opset)",
    "docstring": "Removes a custom symbolic function. Args: opset: The opset version of the custom function to remove.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\registration.py",
    "ast_data": "FunctionDef name:remove_custom arg:self arg:opset arguments arg arg If Call Call Return return:no Call"
  },
  {
    "library": "tensorflow",
    "name": "NormalWithSoftplusScale",
    "source_code": "class NormalWithSoftplusScale(Normal):\n\n    @deprecation.deprecated('2019-01-01', 'Use `tfd.Normal(loc, tf.nn.softplus(scale)) instead.', warn_once=True)\n    def __init__(self, loc, scale, validate_args=False, allow_nan_stats=True, name='NormalWithSoftplusScale'):\n        parameters = dict(locals())\n        with ops.name_scope(name, values=[scale]) as name:\n            super(NormalWithSoftplusScale, self).__init__(loc=loc, scale=nn.softplus(scale, name='softplus_scale'), validate_args=validate_args, allow_nan_stats=allow_nan_stats, name=name)\n        self._parameters = parameters",
    "docstring": "Normal with softplus applied to .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\normal.py",
    "ast_data": "ClassDef name:NormalWithSoftplusScale FunctionDef name:__init__ arg:self arg:loc arg:scale arg:validate_args arg:allow_nan_stats arg:name arguments arg arg arg arg arg arg Assign Call Call With Call Call Call Call Assign Call"
  },
  {
    "library": "scipy",
    "name": "time_interpolate",
    "source_code": "def time_interpolate(self, n_samples, method):\n    interpolate.interp1d(self.x, self.y, kind=method)",
    "docstring": "Time the construction overhead.",
    "type": "method",
    "file_path": "scipy\\benchmarks\\benchmarks\\interpolate.py",
    "ast_data": "FunctionDef name:time_interpolate arg:self arg:n_samples arg:method arguments arg arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_assign_if_finite",
    "source_code": "def _assign_if_finite(var, value):\n    return cond.cond(math_ops.is_finite(value), lambda: _op_in_graph_mode(var.assign(value)), control_flow_ops.no_op)",
    "docstring": "Assigns a value to a variable if the value is finite.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\loss_scale_optimizer.py",
    "ast_data": "FunctionDef name:_assign_if_finite arg:var arg:value arguments arg arg Return return:yes Call Call arguments Call Call"
  },
  {
    "library": "pytorch",
    "name": "AliasesNewOutput",
    "source_code": "class AliasesNewOutput(OutputAliasInfo):\n    __slots__ = ['index']\n    index: int\n\n    def __init__(self, index: int) -> None:\n        assert isinstance(index, int)\n        self.index = index",
    "docstring": "Marks that the graph output aliases an index in the new, returned outputs",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py",
    "ast_data": "ClassDef name:AliasesNewOutput Assign FunctionDef name:__init__ arg:self arg:index arguments arg arg Call Assign"
  },
  {
    "library": "pandas",
    "name": "_scalar_divlike_op",
    "source_code": "def _scalar_divlike_op(self, other, op):\n    if isinstance(other, self._recognized_scalars):\n        other = Timedelta(other)\n        if cast('Timedelta | NaTType', other) is NaT:\n            res = np.empty(self.shape, dtype=np.float64)\n            res.fill(np.nan)\n            return res\n        return op(self._ndarray, other)\n    else:\n        if op in [roperator.rtruediv, roperator.rfloordiv]:\n            raise TypeError(f'Cannot divide {type(other).__name__} by {type(self).__name__}')\n        result = op(self._ndarray, other)\n        freq = None\n        if self.freq is not None:\n            freq = self.freq / other\n            if freq.nanos == 0 and self.freq.nanos != 0:\n                freq = None\n        return type(self)._simple_new(result, dtype=result.dtype, freq=freq)",
    "docstring": "Shared logic for __truediv__, __rtruediv__, __floordiv__, __rfloordiv__ with scalar 'other'.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\timedeltas.py",
    "ast_data": "FunctionDef name:_scalar_divlike_op arg:self arg:other arg:op arguments arg arg arg If Call Assign Call If Compare Call Assign Call Call Return return:yes Return return:yes Call If Compare Raise Call Call Call Assign Call Assign If Compare Assign If BoolOp Compare Compare Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_MaxGrad",
    "source_code": "@ops.RegisterGradient('Max')\ndef _MaxGrad(op: ops.Operation, grad):\n    return _MinOrMaxGrad(op, grad)",
    "docstring": "Gradient for Max.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_MaxGrad arg:op arg:grad arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "sorted_indices",
    "source_code": "def sorted_indices(self):\n    A = self.copy()\n    A.sort_indices()\n    return A",
    "docstring": "Return a copy of this array/matrix with sorted indices",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_compressed.py",
    "ast_data": "FunctionDef name:sorted_indices arg:self arguments arg Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "TensorType",
    "source_code": "@compatibility(is_backward_compatible=False)\nclass TensorType:\n\n    def __init__(self, dim):\n        self.__origin__ = TensorType\n        self.__args__ = dim\n\n    def __repr__(self):\n        return f'TensorType[{self.__args__}]'\n\n    def __eq__(self, other):\n        if isinstance(other, self.__class__):\n            return list(self.__args__) == list(other.__args__)\n        else:\n            return False\n\n    @staticmethod\n    def __class_getitem__(*args):\n        if len(args) == 1 and isinstance(args[0], tuple):\n            args = args[0]\n        return TensorType(tuple(args))",
    "docstring": "TensorType defines a type for tensors, which consists of a list of dimensions. Example: class M(torch.nn.Module): def forward(self, x:TensorType((1,2,3, Dyn)), y:TensorType((1,2,3, Dyn))): return torch.add(x, y)",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\tensor_type.py",
    "ast_data": "ClassDef name:TensorType FunctionDef name:__init__ arg:self arg:dim arguments arg arg Assign Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:__eq__ arg:self arg:other arguments arg arg If Call Return return:yes Compare Call Call Return return:yes FunctionDef name:__class_getitem__ arguments arg If BoolOp Compare Call Call Assign Return return:yes Call Call Call"
  },
  {
    "library": "kornia",
    "name": "resize_back",
    "source_code": "def resize_back(self, images: Tensor, target_images: Tensor) -> Tensor:\n    if isinstance(target_images, Tensor):\n        return resize(images, target_images.shape[-2:])\n    raise RuntimeError",
    "docstring": "Resize the input images back to the original size of target images. Args: images: The input images to be resized. target_images: The target images whose size is used as the reference for resizing. Returns: The resized images.",
    "type": "method",
    "file_path": "kornia\\kornia\\models\\_hf_models\\hf_onnx_community.py",
    "ast_data": "FunctionDef name:resize_back arg:self arg:images arg:target_images arguments arg arg arg If Call Return return:yes Call Raise"
  },
  {
    "library": "pygame",
    "name": "__len__",
    "source_code": "def __len__(self):\n    return len(self.sprites())",
    "docstring": "return number of sprites in group Group.len(group): return int Returns the number of sprites contained in the group.",
    "type": "method",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:__len__ arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "SynchronousOnlyOperation",
    "source_code": "class SynchronousOnlyOperation(Exception):\n    pass",
    "docstring": "The user tried to call a sync-only function from an async context.",
    "type": "class",
    "file_path": "django\\django\\core\\exceptions.py",
    "ast_data": "ClassDef name:SynchronousOnlyOperation"
  },
  {
    "library": "tensorflow",
    "name": "SharedObjectLoadingScope",
    "source_code": "class SharedObjectLoadingScope(object):\n\n    def __enter__(self):\n        if _shared_object_disabled():\n            return NoopLoadingScope()\n        global SHARED_OBJECT_LOADING\n        SHARED_OBJECT_LOADING.scope = self\n        self._obj_ids_to_obj = {}\n        return self\n\n    def get(self, object_id):\n        if object_id is None:\n            return\n        return self._obj_ids_to_obj.get(object_id)\n\n    def set(self, object_id, obj):\n        if object_id is None:\n            return\n        self._obj_ids_to_obj[object_id] = obj\n\n    def __exit__(self, *args, **kwargs):\n        global SHARED_OBJECT_LOADING\n        SHARED_OBJECT_LOADING.scope = NoopLoadingScope()",
    "docstring": "A context manager for keeping track of loaded objects. During the deserialization process, we may come across objects that are shared across multiple layers. In order to accurately restore the network structure to its original state, allows us to re-use shared objects rather than cloning them.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\generic_utils.py",
    "ast_data": "ClassDef name:SharedObjectLoadingScope FunctionDef name:__enter__ arg:self arguments arg If Call Return return:yes Call Assign Assign Return return:yes FunctionDef name:get arg:self arg:object_id arguments arg arg If Compare Return return:no Return return:yes Call FunctionDef name:set arg:self arg:object_id arg:obj arguments arg arg arg If Compare Return return:no Assign FunctionDef name:__exit__ arg:self arguments arg arg arg Assign Call"
  },
  {
    "library": "scikit-learn",
    "name": "_solve_W",
    "source_code": "def _solve_W(self, X, H, max_iter):\n    avg = np.sqrt(X.mean() / self._n_components)\n    W = np.full((X.shape[0], self._n_components), avg, dtype=X.dtype)\n    W_buffer = W.copy()\n    l1_reg_W, _, l2_reg_W, _ = self._compute_regularization(X)\n    for _ in range(max_iter):\n        W, *_ = _multiplicative_update_w(X, W, H, self._beta_loss, l1_reg_W, l2_reg_W, self._gamma)\n        W_diff = linalg.norm(W - W_buffer) / linalg.norm(W)\n        if self.tol > 0 and W_diff <= self.tol:\n            break\n        W_buffer[:] = W\n    return W",
    "docstring": "Minimize the objective function w.r.t W. Update W with H being fixed, until convergence. This is the heart of but it's also used during when doing fresh restarts.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_nmf.py",
    "ast_data": "FunctionDef name:_solve_W arg:self arg:X arg:H arg:max_iter arguments arg arg arg arg Assign Call Call Assign Call Assign Call Assign Call For Call Assign Call Assign Call Call If BoolOp Compare Compare Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "apply_filter",
    "source_code": "def apply_filter(self, x, axis=-1, mode='constant', cval=0):\n    output_len = _output_len(self._h_len_orig, x.shape[axis], self._up, self._down)\n    output_shape = np.asarray(x.shape, dtype=np.int64)\n    output_shape[axis] = output_len\n    out = np.zeros(output_shape, dtype=self._output_type, order='C')\n    axis = axis % x.ndim\n    mode = _check_mode(mode)\n    _apply(np.asarray(x, self._output_type), self._h_trans_flip, out, self._up, self._down, axis, mode, cval)\n    return out",
    "docstring": "Apply the prepared filter to the specified axis of N-D signal x.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_upfirdn.py",
    "ast_data": "FunctionDef name:apply_filter arg:self arg:x arg:axis arg:mode arg:cval arguments arg arg arg arg arg Assign Call Assign Call Assign Assign Call Assign Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_make_input_fn_iterator",
    "source_code": "def _make_input_fn_iterator(self, input_fn, replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):\n    if self._cluster_spec:\n        input_pipeline_id = multi_worker_util.id_in_cluster(self._cluster_spec, self._task_type, self._task_id)\n        num_input_pipelines = multi_worker_util.worker_count(self._cluster_spec, self._task_type)\n    else:\n        input_pipeline_id = 0\n        num_input_pipelines = 1\n    input_context = distribute_lib.InputContext(num_input_pipelines=num_input_pipelines, input_pipeline_id=input_pipeline_id, num_replicas_in_sync=self._num_replicas_in_sync)\n    return input_lib_v1.InputFunctionIterator(input_fn, self._input_workers, [input_context], self._container_strategy())",
    "docstring": "Distributes the dataset to each local GPU.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\parameter_server_strategy.py",
    "ast_data": "FunctionDef name:_make_input_fn_iterator arg:self arg:input_fn arg:replication_mode arguments arg arg arg If Assign Call Assign Call Assign Assign Assign Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "parse_data",
    "source_code": "def parse_data(self, data_str):\n    if '?' in data_str:\n        return np.nan\n    else:\n        return float(data_str)",
    "docstring": "Parse a value of this type. Parameters ---------- data_str : str string to convert Returns ------- f : float where float can be nan Examples -------- >>> from scipy.io.arff._arffread import NumericAttribute >>> atr = NumericAttribute('atr') >>> atr.parse_data('1') 1.0 >>> atr.parse_data('1\\n') 1.0 >>> atr.parse_data('?\\n') nan",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\arff\\_arffread.py",
    "ast_data": "FunctionDef name:parse_data arg:self arg:data_str arguments arg arg If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "create_local_config_python",
    "source_code": "def create_local_config_python(dst_dir: str) -> None:\n    numpy_include_dir = 'external/pypi_numpy/site-packages/numpy/_core/include'\n    if not os.path.exists(numpy_include_dir):\n        numpy_include_dir = 'external/pypi_numpy/site-packages/numpy/core/include'\n    shutil.copytree(numpy_include_dir, os.path.join(dst_dir, 'numpy_include'))\n    if is_windows():\n        path = 'external/python_*/include'\n    else:\n        path = 'external/python_*/include/python*'\n    shutil.copytree(glob.glob(path)[0], os.path.join(dst_dir, 'python_include'))",
    "docstring": "Copy python and numpy header files to the destination directory.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\pip_package\\build_pip_package.py",
    "ast_data": "FunctionDef name:create_local_config_python arg:dst_dir arguments arg Assign If Call Assign Call Call If Call Assign Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "ExportOptions",
    "source_code": "@dataclasses.dataclass\nclass ExportOptions:\n    export_params: bool = True\n    verbose: bool = False\n    training: _C_onnx.TrainingMode = _C_onnx.TrainingMode.EVAL\n    input_names: Optional[Sequence[str]] = None\n    output_names: Optional[Sequence[str]] = None\n    operator_export_type: _C_onnx.OperatorExportTypes = _C_onnx.OperatorExportTypes.ONNX\n    opset_version: Optional[int] = None\n    do_constant_folding: bool = True\n    dynamic_axes: Optional[Mapping[str, Union[Mapping[int, str], Sequence[int]]]] = None\n    keep_initializers_as_inputs: Optional[bool] = None\n    custom_opsets: Optional[Mapping[str, int]] = None\n    export_modules_as_functions: Union[bool, set[type[torch.nn.Module]]] = False",
    "docstring": "Arguments used by :func:.",
    "type": "class",
    "file_path": "pytorch\\torch\\onnx\\_experimental.py",
    "ast_data": "ClassDef name:ExportOptions"
  },
  {
    "library": "numpy",
    "name": "encode",
    "source_code": "def encode(self, encoding=None, errors=None):\n    return encode(self, encoding, errors)",
    "docstring": "Calls :meth: element-wise. See Also -------- char.encode",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:encode arg:self arg:encoding arg:errors arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "win",
    "source_code": "@property\ndef win(self) -> np.ndarray:\n    return self._win",
    "docstring": "Window function as real- or complex-valued 1d array. This attribute is read-only, since depends on it. To make this array immutable, its WRITEABLE flag is set to `winwinm_num`. hop: ime increment in signal samples for sliding window. win: Window function as real- or complex-valued 1d array. numpy.ndarray.setflags: Modify array flags. ShortTimeFFT: Class this property belongs to.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_short_time_fft.py",
    "ast_data": "FunctionDef name:win arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "matches",
    "source_code": "def matches(self, parent, field, child):\n    if self.parent is ANY or isinstance(parent, self.parent):\n        pass\n    else:\n        return False\n    if self.field is ANY or field == self.field:\n        pass\n    else:\n        return False\n    return self.child is ANY or isinstance(child, self.child)",
    "docstring": "Computes whether this pattern matches the given edge.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\common_transformers\\anf.py",
    "ast_data": "FunctionDef name:matches arg:self arg:parent arg:field arg:child arguments arg arg arg arg If BoolOp Compare Call Return return:yes If BoolOp Compare Compare Return return:yes Return return:yes BoolOp Compare Call"
  },
  {
    "library": "numpy",
    "name": "msvc_runtime_major",
    "source_code": "def msvc_runtime_major():\n    major = {1300: 70, 1310: 71, 1400: 80, 1500: 90, 1600: 100, 1900: 140}.get(msvc_runtime_version(), None)\n    return major",
    "docstring": "Return major version of MSVC runtime coded like get_build_msvc_version",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\misc_util.py",
    "ast_data": "FunctionDef name:msvc_runtime_major arguments Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "classproperty",
    "source_code": "class classproperty:\n\n    def __init__(self, fget, fset=None, fdel=None, doc=None):\n        self._fget = fget\n        if fset is not None or fdel is not None:\n            raise ValueError('classproperty only implements fget.')\n        self.fset = fset\n        self.fdel = fdel\n        self._doc = doc\n\n    def __get__(self, instance, owner):\n        return self._fget(owner)\n\n    @property\n    def fget(self):\n        return self._fget",
    "docstring": "Like , but also triggers on access via the class, and it is the *class* that's passed as argument. Examples -------- :: class C: @classproperty def foo(cls): return cls.__name__ assert C.foo == \"C\"",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\_api\\__init__.py",
    "ast_data": "ClassDef name:classproperty FunctionDef name:__init__ arg:self arg:fget arg:fset arg:fdel arg:doc arguments arg arg arg arg arg Assign If BoolOp Compare Compare Raise Call Assign Assign Assign FunctionDef name:__get__ arg:self arg:instance arg:owner arguments arg arg arg Return return:yes Call FunctionDef name:fget arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_module_docstring",
    "source_code": "def get_module_docstring(module_name, package, api_name):\n    for version in _API_VERSIONS:\n        compat_prefix = _COMPAT_MODULE_TEMPLATE % version\n        if module_name.startswith(compat_prefix):\n            module_name = module_name[len(compat_prefix):].strip('.')\n    docstring_module_name = module_name\n    doc_sources = doc_srcs.get_doc_sources(api_name)\n    if module_name in doc_sources:\n        docsrc = doc_sources[module_name]\n        if docsrc.docstring:\n            return docsrc.docstring\n        if docsrc.docstring_module_name:\n            docstring_module_name = docsrc.docstring_module_name\n    if package != 'tf_keras':\n        docstring_module_name = package + '.' + docstring_module_name\n    if docstring_module_name in sys.modules and sys.modules[docstring_module_name].__doc__:\n        return sys.modules[docstring_module_name].__doc__\n    return 'Public API for tf.%s namespace.' % module_name",
    "docstring": "Get docstring for the given module. This method looks for docstring in the following order: 1. Checks if module has a docstring specified in doc_srcs. 2. Checks if module has a docstring source module specified in doc_srcs. If it does, gets docstring from that module. 3. Checks if module with module_name exists under base package. If it does, gets docstring from that module. 4. Returns a default docstring. Args: module_name: module name relative to tensorflow (excluding 'tensorflow.' prefix) to get a docstring for. package: Base python package containing python with target tf_export decorators. api_name: API you want to generate Currently, only . Returns: One-line docstring to describe the module.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\api\\generator\\create_python_api.py",
    "ast_data": "FunctionDef name:get_module_docstring arg:module_name arg:package arg:api_name arguments arg arg arg For Assign If Call Assign Call Call Assign Assign Call If Compare Assign If Return return:yes If Assign If Compare Assign If BoolOp Compare Return return:yes Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "xkcd_palette",
    "source_code": "def xkcd_palette(colors):\n    palette = [xkcd_rgb[name] for name in colors]\n    return color_palette(palette, len(palette))",
    "docstring": "Make a palette with color names from the xkcd color survey. See xkcd for the full list of colors: This is just a simple wrapper around the dictionary. Parameters ---------- colors : list of strings List of keys in the dictionary. Returns ------- palette A list of colors as RGB tuples. See Also -------- crayon_palette : Make a palette with Crayola crayon colors.",
    "type": "function",
    "file_path": "seaborn\\seaborn\\palettes.py",
    "ast_data": "FunctionDef name:xkcd_palette arg:colors arguments arg Assign Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "load_text",
    "source_code": "def load_text(self, package: str, resource: str, encoding: str='utf-8', errors: str='strict') -> str:\n    data = self.load_binary(package, resource)\n    return data.decode(encoding, errors)",
    "docstring": "Load a string. Args: package (str): The name of module package (e.g. ``. Returns: str: The loaded text.",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\package_importer.py",
    "ast_data": "FunctionDef name:load_text arg:self arg:package arg:resource arg:encoding arg:errors arguments arg arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "get_response",
    "source_code": "def get_response(self) -> Deferred[Response]:\n    return self._deferred_response",
    "docstring": "Simply return a Deferred which fires when response from the asynchronous request is available",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\http2\\stream.py",
    "ast_data": "FunctionDef name:get_response arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_in_eager_or_tf_function",
    "source_code": "def is_in_eager_or_tf_function():\n    return context.executing_eagerly() or is_in_tf_function()",
    "docstring": "Returns if in eager mode or inside of a tf.function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_utils.py",
    "ast_data": "FunctionDef name:is_in_eager_or_tf_function arguments Return return:yes BoolOp Call Call"
  },
  {
    "library": "kornia",
    "name": "identity_matrix",
    "source_code": "def identity_matrix(self, input: Tensor) -> Tensor:\n    if self.contains_3d_augmentation:\n        return eye_like(4, input)\n    else:\n        return eye_like(3, input)",
    "docstring": "Return identity matrix.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\container\\augment.py",
    "ast_data": "FunctionDef name:identity_matrix arg:self arg:input arguments arg arg If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "fromarrays",
    "source_code": "def fromarrays(arraylist, dtype=None, shape=None, formats=None, names=None, titles=None, aligned=False, byteorder=None, fill_value=None):\n    datalist = [ma.getdata(x) for x in arraylist]\n    masklist = [np.atleast_1d(ma.getmaskarray(x)) for x in arraylist]\n    _array = np.rec.fromarrays(datalist, dtype=dtype, shape=shape, formats=formats, names=names, titles=titles, aligned=aligned, byteorder=byteorder).view(mrecarray)\n    _array._mask.flat = list(zip(*masklist))\n    if fill_value is not None:\n        _array.fill_value = fill_value\n    return _array",
    "docstring": "Creates a mrecarray from a (flat) list of masked arrays. Parameters ---------- arraylist : sequence A list of (masked) arrays. Each element of the sequence is first converted to a masked array if needed. If a 2D array is passed as argument, it is processed line by line dtype : {None, dtype}, optional Data type descriptor. shape : {None, integer}, optional Number of records. If None, shape is defined from the shape of the first array in the list. formats : {None, sequence}, optional Sequence of formats for each individual field. If None, the formats will be autodetected by inspecting the fields and selecting the highest dtype possible. names : {None, sequence}, optional Sequence of the names of each field. fill_value : {None, sequence}, optional Sequence of data to be used as filling values. Notes ----- Lists of tuples should be preferred over lists of lists for faster processing.",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\mrecords.py",
    "ast_data": "FunctionDef name:fromarrays arg:arraylist arg:dtype arg:shape arg:formats arg:names arg:titles arg:aligned arg:byteorder arg:fill_value arguments arg arg arg arg arg arg arg arg arg Assign Call Assign Call Call Assign Call Call Assign Call Call If Compare Assign Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "public_bytes_raw",
    "source_code": "@abc.abstractmethod\ndef public_bytes_raw(self) -> bytes:\n    pass",
    "docstring": "The raw bytes of the public key. Equivalent to public_bytes(Raw, Raw).",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ed448.py",
    "ast_data": "FunctionDef name:public_bytes_raw arg:self arguments arg"
  },
  {
    "library": "pygame",
    "name": "init",
    "source_code": "def init():\n    if not _module_init():\n        _pypm.Initialize()\n        _module_init(True)\n        atexit.register(quit)",
    "docstring": "initialize the midi module pygame.midi.init(): return None Call the initialisation function before using the midi module. It is safe to call this more than once.",
    "type": "function",
    "file_path": "pygame\\src_py\\midi.py",
    "ast_data": "FunctionDef name:init arguments If Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_categories_match_up_to_permutation",
    "source_code": "def _categories_match_up_to_permutation(self, other: Categorical) -> bool:\n    return hash(self.dtype) == hash(other.dtype)",
    "docstring": "Returns True if categoricals are the same dtype same categories, and same ordered Parameters ---------- other : Categorical Returns ------- bool",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\categorical.py",
    "ast_data": "FunctionDef name:_categories_match_up_to_permutation arg:self arg:other arguments arg arg Return return:yes Compare Call Call"
  },
  {
    "library": "tensorflow",
    "name": "embedding_tables",
    "source_code": "@property\ndef embedding_tables(self) -> Dict[tpu_embedding_v2_utils.TableConfig, tf_variables.Variable]:\n    self._maybe_build()\n    return {table: self._variables[table.name]['parameters'] for table in self._table_config}",
    "docstring": "Returns a dict of embedding tables, keyed by .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v1.py",
    "ast_data": "FunctionDef name:embedding_tables arg:self arguments arg Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_call_unconverted",
    "source_code": "def _call_unconverted(f, args, kwargs, options, update_cache=True):\n    if update_cache:\n        conversion.cache_allowlisted(f, options)\n    if inspect.ismethod(f) and isinstance(f.__self__, tf_method_target.TfMethodTarget):\n        return f.__self__.call(args, kwargs)\n    if kwargs is not None:\n        return f(*args, **kwargs)\n    return f(*args)",
    "docstring": "Calls the original function without converting with AutoGraph.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\impl\\api.py",
    "ast_data": "FunctionDef name:_call_unconverted arg:f arg:args arg:kwargs arg:options arg:update_cache arguments arg arg arg arg arg If Call If BoolOp Call Call Return return:yes Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_state",
    "source_code": "@abstractmethod\ndef get_state(self) -> Optional[tuple[bytes, Token]]:\n    pass",
    "docstring": "Get the rendezvous state. Returns: A tuple of the encoded rendezvous state and its fencing token or `` if no state is found in the backend. Raises: RendezvousConnectionError: The connection to the backend has failed. RendezvousStateError: The rendezvous state is corrupt.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\dynamic_rendezvous.py",
    "ast_data": "FunctionDef name:get_state arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "get_min_max_value",
    "source_code": "def get_min_max_value(self) -> tuple[float, float]:\n    return self._get_min_max_value_by_expanding_range(self._num_bins // 2)",
    "docstring": "Finds min and max starting from the center index. The HistogramMseSymmetric method starts from the center bin and expands the range to both sides. This works better when the data is well-centered. Returns: (min_value, max_value): Min and max calculated using the method starting from center and expanding.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\calibrator\\calibration_algorithm.py",
    "ast_data": "FunctionDef name:get_min_max_value arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_property_stubs",
    "source_code": "def get_property_stubs(nn_module):\n    module_ty = type(nn_module)\n    properties_asts = get_class_properties(module_ty, self_name='RecursiveScriptModule')\n    rcbs = {}\n    for name in dir(module_ty):\n        item = getattr(module_ty, name, None)\n        if isinstance(item, property):\n            if not item.fget:\n                raise RuntimeError(f'Property {name} of {nn_module.__name__} must have a getter')\n            rcbs[name] = _jit_internal.createResolutionCallbackFromClosure(item.fget)\n    stubs = [PropertyStub(rcbs[ast.name().name], ast) for ast in properties_asts]\n    return stubs",
    "docstring": "Create property stubs for the properties of the module by creating method stubs for the getter and setter.",
    "type": "function",
    "file_path": "pytorch\\torch\\jit\\_recursive.py",
    "ast_data": "FunctionDef name:get_property_stubs arg:nn_module arguments arg Assign Call Assign Call Assign For Call Assign Call If Call If Raise Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "def forward(self, patch: torch.Tensor) -> torch.Tensor:\n    KORNIA_CHECK_SHAPE(patch, ['B', '1', 'H', 'W'])\n    self.weighting = self.weighting.to(patch.dtype).to(patch.device)\n    grads: torch.Tensor = self.gradient(patch) * self.weighting\n    gx: torch.Tensor = grads[:, :, 0]\n    gy: torch.Tensor = grads[:, :, 1]\n    ellipse_shape = torch.cat([gx.pow(2).mean(dim=2).mean(dim=2, keepdim=True), (gx * gy).mean(dim=2).mean(dim=2, keepdim=True), gy.pow(2).mean(dim=2).mean(dim=2, keepdim=True)], dim=2)\n    bad_mask = ((ellipse_shape < self.eps).float().sum(dim=2, keepdim=True) >= 2).to(ellipse_shape.dtype)\n    circular_shape = torch.tensor([1.0, 0.0, 1.0]).to(ellipse_shape.device).to(ellipse_shape.dtype).view(1, 1, 3)\n    ellipse_shape = ellipse_shape * (1.0 - bad_mask) + circular_shape * bad_mask\n    ellipse_shape = ellipse_shape / ellipse_shape.max(dim=2, keepdim=True)[0]\n    return ellipse_shape",
    "docstring": "Run forward. Args: patch: :math: Returns: torch.Tensor: ellipse_shape :math:",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\affine_shape.py",
    "ast_data": "FunctionDef name:forward arg:self arg:patch arguments arg arg Call Assign Call Call Call Assign Call Call Call Call Call Call Call Call Call Assign Call Compare Call Call Compare Assign Call Call Call Call Assign Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "jit_user_function",
    "source_code": "def jit_user_function(func: Callable) -> Callable:\n    if TYPE_CHECKING:\n        import numba\n    else:\n        numba = import_optional_dependency('numba')\n    if numba.extending.is_jitted(func):\n        numba_func = func\n    elif getattr(np, func.__name__, False) is func or isinstance(func, types.BuiltinFunctionType):\n        numba_func = func\n    else:\n        numba_func = numba.extending.register_jitable(func)\n    return numba_func",
    "docstring": "If user function is not jitted already, mark the user's function as jitable. Parameters ---------- func : function user defined function Returns ------- function Numba JITed function, or function marked as JITable by numba",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\util\\numba_.py",
    "ast_data": "FunctionDef name:jit_user_function arg:func arguments arg If Assign Call If Call Assign If BoolOp Compare Call Call Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_py_lazy_or",
    "source_code": "def _py_lazy_or(cond, b):\n    return cond or b()",
    "docstring": "Lazy-eval equivalent of \"or\" in Python.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\logical.py",
    "ast_data": "FunctionDef name:_py_lazy_or arg:cond arg:b arguments arg arg Return return:yes BoolOp Call"
  },
  {
    "library": "tensorflow",
    "name": "LearningRateSchedule",
    "source_code": "class LearningRateSchedule(object):\n\n    @abc.abstractmethod\n    def __call__(self, step):\n        raise NotImplementedError('Learning rate schedule must override __call__')\n\n    @abc.abstractmethod\n    def get_config(self):\n        raise NotImplementedError('Learning rate schedule must override get_config')\n\n    @classmethod\n    def from_config(cls, config):\n        return cls(**config)",
    "docstring": "The learning rate schedule base class. You can use a learning rate schedule to modulate how the learning rate of your optimizer changes over time. Several built-in learning rate schedules are available, such as or : A instance can be passed in as the argument of any optimizer. To implement your own schedule object, you should implement the method, which takes a argument (scalar integer tensor, the current training step count). Like for any other Keras object, you can also optionally make your object serializable by implementing the and methods. Example:",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\learning_rate_schedule.py",
    "ast_data": "ClassDef name:LearningRateSchedule FunctionDef name:__call__ arg:self arg:step arguments arg arg Raise Call FunctionDef name:get_config arg:self arguments arg Raise Call FunctionDef name:from_config arg:cls arg:config arguments arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_alter_column_database_default_sql",
    "source_code": "def _alter_column_database_default_sql(self, model, old_field, new_field, drop=False):\n    if drop:\n        sql = self.sql_alter_column_no_default\n        default_sql = ''\n        params = []\n    else:\n        sql = self.sql_alter_column_default\n        default_sql, params = self.db_default_sql(new_field)\n    new_db_params = new_field.db_parameters(connection=self.connection)\n    return (sql % {'column': self.quote_name(new_field.column), 'type': new_db_params['type'], 'default': default_sql}, params)",
    "docstring": "Hook to specialize column database default alteration. Return a (sql, params) fragment to add or drop (depending on the drop argument) a default to new_field's column.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\schema.py",
    "ast_data": "FunctionDef name:_alter_column_database_default_sql arg:self arg:model arg:old_field arg:new_field arg:drop arguments arg arg arg arg arg If Assign Assign Assign Assign Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "__call__",
    "source_code": "def __call__(self, path_info):\n    func = self.find_handler(path_info)\n    if func:\n        cherrypy.serving.request.handler = LateParamPageHandler(func)\n    else:\n        cherrypy.serving.request.handler = cherrypy.NotFound()",
    "docstring": "Set handler and config for the current request.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpdispatch.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:path_info arguments arg arg Assign Call If Assign Call Assign Call"
  },
  {
    "library": "numpy",
    "name": "strides_as",
    "source_code": "def strides_as(self, obj):\n    if self._zerod:\n        return None\n    return (obj * self._arr.ndim)(*self._arr.strides)",
    "docstring": "Return the strides tuple as an array of some other c-types type. For example: ``.",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\_internal.py",
    "ast_data": "FunctionDef name:strides_as arg:self arg:obj arguments arg arg If Return return:no Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, driver_name, data_source_name, query, output_types):\n    self._driver_name = ops.convert_to_tensor(driver_name, dtype=dtypes.string, name='driver_name')\n    self._data_source_name = ops.convert_to_tensor(data_source_name, dtype=dtypes.string, name='data_source_name')\n    self._query = ops.convert_to_tensor(query, dtype=dtypes.string, name='query')\n    self._element_spec = nest.map_structure(lambda dtype: tensor_spec.TensorSpec([], dtype), output_types)\n    variant_tensor = gen_experimental_dataset_ops.sql_dataset(self._driver_name, self._data_source_name, self._query, **self._flat_structure)\n    super(SqlDatasetV2, self).__init__(variant_tensor)",
    "docstring": "Creates a . Args: driver_name: A 0-D tensor containing the database type. Currently, the only supported value is 'sqlite'. data_source_name: A 0-D tensor containing a connection string to connect to the database. query: A 0-D tensor containing the SQL query to execute. output_types: A tuple of objects representing the types of the columns returned by .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\readers.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:driver_name arg:data_source_name arg:query arg:output_types arguments arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call arguments arg Call Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "solvevec",
    "source_code": "def solvevec(self, rhs, adjoint=False, name='solve'):\n    with self._name_scope(name):\n        block_dimensions = self._block_domain_dimensions() if adjoint else self._block_range_dimensions()\n        if linear_operator_util.arg_is_blockwise(block_dimensions, rhs, -1):\n            for i, block in enumerate(rhs):\n                if not isinstance(block, linear_operator.LinearOperator):\n                    block = tensor_conversion.convert_to_tensor_v2_with_dispatch(block)\n                    self._check_input_dtype(block)\n                    block_dimensions[i].assert_is_compatible_with(block.shape[-1])\n                    rhs[i] = block\n            rhs_mat = [array_ops.expand_dims(block, axis=-1) for block in rhs]\n            solution_mat = self.solve(rhs_mat, adjoint=adjoint)\n            return [array_ops.squeeze(x, axis=-1) for x in solution_mat]\n        rhs = tensor_conversion.convert_to_tensor_v2_with_dispatch(rhs, name='rhs')\n        self._check_input_dtype(rhs)\n        op_dimension = self.domain_dimension if adjoint else self.range_dimension\n        op_dimension.assert_is_compatible_with(rhs.shape[-1])\n        rhs_mat = array_ops.expand_dims(rhs, axis=-1)\n        solution_mat = self.solve(rhs_mat, adjoint=adjoint)\n        return array_ops.squeeze(solution_mat, axis=-1)",
    "docstring": "Solve single equation with best effort: . The returned will be close to an exact solution if is well conditioned. Otherwise closeness will vary. See class docstring for details. Examples: Args: rhs: with same as this operator, or list of s (for blockwise operators). s are treated as [batch] vectors, meaning for every set of leading dimensions, the last dimension defines a vector. See class docstring for definition of compatibility regarding batch dimensions. adjoint: Python . If , solve the system involving the adjoint of this : . name: A name scope to use for ops added by this method. Returns: with shape and same as . Raises: NotImplementedError: If or is False.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_block_lower_triangular.py",
    "ast_data": "FunctionDef name:solvevec arg:self arg:rhs arg:adjoint arg:name arguments arg arg arg arg With Call Assign Call Call If Call For Call If Call Assign Call Call Call Assign Assign Call Assign Call Return return:yes Call Assign Call Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_case_create_default_action",
    "source_code": "def _case_create_default_action(predicates, actions):\n    k = len(predicates) - 1\n    predicate, action = (predicates[k], actions[k])\n    other_predicates, other_actions = (predicates[:k], actions[:k])\n\n    def default_action():\n        others_msg = 'Implementation error: selected default action #%d was called, but some of other predicates are True: ' % k\n        default_msg = ('Input error: None of conditions evaluated as True:', array_ops_stack.stack(predicates, name='preds_c'))\n        with ops.control_dependencies([_assert_at_most_n_true(other_predicates, n=0, msg=others_msg), control_flow_assert.Assert(predicate, data=default_msg)]):\n            return action()\n    return (default_action, other_predicates, other_actions)",
    "docstring": "Creates default action for a list of actions and their predicates. It uses the input actions to select an arbitrary as default and makes sure that corresponding predicates have valid values. Args: predicates: a list of bool scalar tensors actions: a list of callable objects which return tensors. Returns: a callable",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_case.py",
    "ast_data": "FunctionDef name:_case_create_default_action arg:predicates arg:actions arguments arg arg Assign Call Assign Assign FunctionDef name:default_action arguments Assign Assign Call With Call Call Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_choose_qparams_per_token_asymmetric_impl",
    "source_code": "@impl(quantized_decomposed_lib, '_choose_qparams_per_token_asymmetric_impl', 'CompositeImplicitAutograd')\ndef _choose_qparams_per_token_asymmetric_impl(input: torch.Tensor, dtype: torch.dtype) -> tuple[torch.Tensor, torch.Tensor]:\n    qmin, qmax = (-128, 127)\n    min_val = torch.amin(input, dim=-1, keepdim=True)\n    max_val = torch.amax(input, dim=-1, keepdim=True)\n    min_val_neg = torch.min(min_val, torch.zeros_like(min_val))\n    max_val_pos = torch.max(max_val, torch.zeros_like(max_val))\n    eps = torch.finfo(torch.float32).eps\n    scale = (max_val_pos - min_val_neg) / float(qmax - qmin)\n    scale = scale.clamp(min=eps)\n    descaled_min = min_val_neg / scale\n    descaled_max = max_val_pos / scale\n    zero_point_from_min_error = qmin + descaled_min\n    zero_point_from_max_error = qmax + descaled_max\n    zero_point = torch.where(zero_point_from_min_error + zero_point_from_max_error > 0, qmin - descaled_min, qmax - descaled_max)\n    zero_point = torch.clamp(zero_point, qmin, qmax).round()\n    return (scale.to(torch.float64), zero_point.to(torch.int64))",
    "docstring": "Choose quantization parameters for per token quantization. This means for a N dimension Tensor (M1, M2, ...Mn, N), we calculate scales/zero_points for each N elements and quantize every N elements with the same quantization parameter. The dimension for scales/zero_points will be (M1 * M2 ... * Mn) Args: input (torch.Tensor): original float32/float16 Tensor dtype (torch.dtype): dtype (e.g. torch.uint8) for input Tensor Returns: scales and zero_points, both float32 Tensors",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_decomposed.py",
    "ast_data": "FunctionDef name:_choose_qparams_per_token_asymmetric_impl arg:input arg:dtype arguments arg arg Assign Assign Call Assign Call Assign Call Call Assign Call Call Assign Call Assign Call Assign Call Assign Assign Assign Assign Assign Call Compare Assign Call Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_copy_trackable_to_cpu",
    "source_code": "def _copy_trackable_to_cpu(self, object_map):\n    pass",
    "docstring": "For implementing async checkpointing.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\sharded_variable.py",
    "ast_data": "FunctionDef name:_copy_trackable_to_cpu arg:self arg:object_map arguments arg arg"
  },
  {
    "library": "tensorflow",
    "name": "UnbatchBenchmark",
    "source_code": "class UnbatchBenchmark(benchmark_base.DatasetBenchmarkBase):\n\n    def benchmark_native_unbatch(self):\n        batch_sizes = [1, 2, 5, 10, 20, 50]\n        num_elements = 10000\n        for batch_size in batch_sizes:\n            dataset = dataset_ops.Dataset.from_tensors('element').repeat(None)\n            dataset = dataset.batch(batch_size)\n            dataset = dataset.unbatch()\n            self.run_and_report_benchmark(dataset=dataset, num_elements=num_elements, iters=5, extras={'model_name': 'unbatch.benchmark.1', 'parameters': '%d' % batch_size}, name='native_batch_size_%d' % batch_size)\n\n    def benchmark_old_unbatch_implementation(self):\n        batch_sizes = [1, 2, 5, 10, 20, 50]\n        num_elements = 10000\n        for batch_size in batch_sizes:\n            dataset = dataset_ops.Dataset.from_tensors('element').repeat(None)\n            dataset = dataset.batch(batch_size)\n            dataset = dataset.flat_map(dataset_ops.Dataset.from_tensor_slices)\n            self.run_and_report_benchmark(dataset=dataset, num_elements=num_elements, iters=5, extras={'model_name': 'unbatch.benchmark.2', 'parameters': '%d' % batch_size}, name='unfused_batch_size_%d' % batch_size)",
    "docstring": "Benchmarks for .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\benchmarks\\unbatch_benchmark.py",
    "ast_data": "ClassDef name:UnbatchBenchmark FunctionDef name:benchmark_native_unbatch arg:self arguments arg Assign Assign For Assign Call Call Assign Call Assign Call Call FunctionDef name:benchmark_old_unbatch_implementation arg:self arguments arg Assign Assign For Assign Call Call Assign Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "wrap_with_training_arg",
    "source_code": "def wrap_with_training_arg(*args, **kwargs):\n    training_arg_index = get_training_arg_index(original_call)\n    training = get_training_arg(training_arg_index, args, kwargs)\n    if training is None:\n        training = default_training_value or K.learning_phase()\n    args = list(args)\n    kwargs = kwargs.copy()\n\n    def replace_training_and_call(training):\n        set_training_arg(training, training_arg_index, args, kwargs)\n        return wrapped_call(*args, **kwargs)\n    return control_flow_util.smart_cond(training, lambda: replace_training_and_call(True), lambda: replace_training_and_call(False))",
    "docstring": "Wrap the function, and set training argument.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\utils.py",
    "ast_data": "FunctionDef name:wrap_with_training_arg arguments arg arg Assign Call Assign Call If Compare Assign BoolOp Call Assign Call Assign Call FunctionDef name:replace_training_and_call arg:training arguments arg Call Return return:yes Call Return return:yes Call arguments Call arguments Call"
  },
  {
    "library": "scipy",
    "name": "find_active_constraints",
    "source_code": "def find_active_constraints(x, lb, ub, rtol=1e-10):\n    active = np.zeros_like(x, dtype=int)\n    if rtol == 0:\n        active[x <= lb] = -1\n        active[x >= ub] = 1\n        return active\n    lower_dist = x - lb\n    upper_dist = ub - x\n    lower_threshold = rtol * np.maximum(1, np.abs(lb))\n    upper_threshold = rtol * np.maximum(1, np.abs(ub))\n    lower_active = np.isfinite(lb) & (lower_dist <= np.minimum(upper_dist, lower_threshold))\n    active[lower_active] = -1\n    upper_active = np.isfinite(ub) & (upper_dist <= np.minimum(lower_dist, upper_threshold))\n    active[upper_active] = 1\n    return active",
    "docstring": "Determine which constraints are active in a given point. The threshold is computed using and the absolute value of the closest bound. Returns ------- active : ndarray of int with shape of x Each component shows whether the corresponding constraint is active: * 0 - a constraint is not active. * -1 - a lower bound is active. * 1 - a upper bound is active.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_lsq\\common.py",
    "ast_data": "FunctionDef name:find_active_constraints arg:x arg:lb arg:ub arg:rtol arguments arg arg arg arg Assign Call If Compare Assign Compare Assign Compare Return return:yes Assign Assign Assign Call Call Assign Call Call Assign Call Compare Call Assign Assign Call Compare Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "__getitem__",
    "source_code": "def __getitem__(self, index):\n    if 0 <= index < self.geom_count:\n        return OGRGeometry(capi.clone_geom(capi.get_geom_ref(self.ptr, index)), self.srs)\n    else:\n        raise IndexError('Index out of range when accessing rings of a polygon: %s.' % index)",
    "docstring": "Get the ring at the specified index.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:__getitem__ arg:self arg:index arguments arg arg If Compare Return return:yes Call Call Call Raise Call"
  },
  {
    "library": "django",
    "name": "resolve",
    "source_code": "def resolve(self, path, create_if_missing=False, leaf_cls=None, check_exists=True):\n    path_segments = list(pathlib.Path(path).parts)\n    current_node = self\n    while path_segments:\n        path_segment = path_segments.pop(0)\n        if isinstance(current_node, InMemoryFileNode):\n            path_segments = os.path.split(path)\n            current_path = '/'.join(path_segments[:path_segments.index(path_segment)])\n            raise NotADirectoryError(errno.ENOTDIR, os.strerror(errno.ENOTDIR), current_path)\n        current_node = current_node._resolve_child(path_segment, create_if_missing, leaf_cls if len(path_segments) == 0 else InMemoryDirNode)\n        if current_node is None:\n            break\n    if current_node is None and check_exists:\n        raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), path)\n    if leaf_cls and (not isinstance(current_node, leaf_cls)):\n        error_cls, error_code = (NotADirectoryError, errno.ENOTDIR) if leaf_cls is InMemoryDirNode else (IsADirectoryError, errno.EISDIR)\n        raise error_cls(error_code, os.strerror(error_code), path)\n    return current_node",
    "docstring": "Navigate current directory tree, returning node matching path or creating a new one, if missing. - path: path of the node to search - create_if_missing: create nodes if not exist. Defaults to False. - leaf_cls: expected type of leaf node. Defaults to None. - check_exists: if True and the leaf node does not exist, raise a FileNotFoundError. Defaults to True.",
    "type": "method",
    "file_path": "django\\django\\core\\files\\storage\\memory.py",
    "ast_data": "FunctionDef name:resolve arg:self arg:path arg:create_if_missing arg:leaf_cls arg:check_exists arguments arg arg arg arg arg Assign Call Call Assign While Assign Call If Call Assign Call Assign Call Call Raise Call Call Assign Call Compare Call If Compare If BoolOp Compare Raise Call Call If BoolOp Call Assign Compare Raise Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "transform_bbox",
    "source_code": "def transform_bbox(self, bbox):\n    return Bbox(self.transform(bbox.get_points()))",
    "docstring": "Transform the given bounding box. For smarter transforms including caching (a common requirement in Matplotlib), see .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:transform_bbox arg:self arg:bbox arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "_fix_geometry_collection",
    "source_code": "@classmethod\ndef _fix_geometry_collection(cls, coll):\n    coll = coll.clone()\n    for i, geom in enumerate(coll):\n        if isinstance(geom, Polygon):\n            coll[i] = cls._fix_polygon(geom, clone=False)\n    return coll",
    "docstring": "Fix polygon orientations in geometry collections as described in __init__().",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\oracle\\adapter.py",
    "ast_data": "FunctionDef name:_fix_geometry_collection arg:cls arg:coll arguments arg arg Assign Call For Call If Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_check_density",
    "source_code": "def _check_density(density, n_features):\n    if density == 'auto':\n        density = 1 / np.sqrt(n_features)\n    elif density <= 0 or density > 1:\n        raise ValueError('Expected density in range ]0, 1], got: %r' % density)\n    return density",
    "docstring": "Factorize density check according to Li et al.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\random_projection.py",
    "ast_data": "FunctionDef name:_check_density arg:density arg:n_features arguments arg arg If Compare Assign Call If BoolOp Compare Compare Raise Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "column_arrays",
    "source_code": "@property\ndef column_arrays(self) -> list[np.ndarray]:\n    result: list[np.ndarray | None] = [None] * len(self.items)\n    for blk in self.blocks:\n        mgr_locs = blk._mgr_locs\n        values = blk.array_values._values_for_json()\n        if values.ndim == 1:\n            result[mgr_locs[0]] = values\n        else:\n            for i, loc in enumerate(mgr_locs):\n                result[loc] = values[i]\n    return result",
    "docstring": "Used in the JSON C code to access column arrays. This optimizes compared to using by converting each Warning! This doesn't handle Copy-on-Write, so should be used with caution (current use case of consuming this in the JSON code is fine).",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:column_arrays arg:self arguments arg Call For Assign Assign Call If Compare Assign For Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_release",
    "source_code": "def _release(self, event):\n    pass",
    "docstring": "Button release event handler.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:_release arg:self arg:event arguments arg arg"
  },
  {
    "library": "pytorch",
    "name": "BinaryConstraint",
    "source_code": "class BinaryConstraint(Constraint):\n\n    def __init__(self, lhs, rhs, op):\n        self.lhs = lhs\n        self.rhs = rhs\n        self.op = op\n\n    def __eq__(self, other):\n        if isinstance(other, BinaryConstraint):\n            return self.lhs == other.lhs and self.rhs == other.rhs and (self.op == other.op)\n        else:\n            return False\n\n    def __repr__(self):\n        return f'({self.lhs} {self.op} {self.rhs})'",
    "docstring": "Represents all binary operations",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint.py",
    "ast_data": "ClassDef name:BinaryConstraint FunctionDef name:__init__ arg:self arg:lhs arg:rhs arg:op arguments arg arg arg arg Assign Assign Assign FunctionDef name:__eq__ arg:self arg:other arguments arg arg If Call Return return:yes BoolOp Compare Compare Compare Return return:yes FunctionDef name:__repr__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "RaggedTensorType",
    "source_code": "class RaggedTensorType:\n\n    def __init__(self, dtype, ragged_rank, row_splits_dtype=dtypes.int64):\n        row_splits_dtype = dtypes.as_dtype(row_splits_dtype)\n        self._dtype = dtype\n        self._ragged_rank = ragged_rank\n        self._row_splits_dtype = row_splits_dtype\n    dtype = property(lambda self: self._dtype)\n    ragged_rank = property(lambda self: self._ragged_rank)\n    row_splits_dtype = property(lambda self: self._row_splits_dtype)\n\n    def __repr__(self):\n        return 'RaggedTensorType(%r, %r, %r)' % (self.dtype, self.ragged_rank, self.row_splits_dtype)",
    "docstring": "Encoding of a static type for a . Use this type to express/declare that an output must have the type of .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "ClassDef name:RaggedTensorType FunctionDef name:__init__ arg:self arg:dtype arg:ragged_rank arg:row_splits_dtype arguments arg arg arg arg Assign Call Assign Assign Assign Assign Call arguments arg Assign Call arguments arg Assign Call arguments arg FunctionDef name:__repr__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_join",
    "source_code": "@array_function_dispatch(_join_dispatcher)\ndef _join(sep, seq):\n    return _to_bytes_or_str_array(_vec_string(sep, np.object_, 'join', (seq,)), seq)",
    "docstring": "Return a string which is the concatenation of the strings in the sequence . Calls :meth: element-wise. Parameters ---------- sep : array-like, with `` dtype, depending on input types See Also -------- str.join Examples -------- >>> import numpy as np >>> np.strings.join('-', 'osd') # doctest: +SKIP array('o-s-d', dtype='>> np.strings.join(['-', '.'], ['ghc', 'osd']) # doctest: +SKIP array(['g-h-c', 'o.s.d'], dtype='<U5') # doctest: +SKIP",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\strings.py",
    "ast_data": "FunctionDef name:_join arg:sep arg:seq arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_ci_upper",
    "source_code": "def _ci_upper(table, alpha):\n    if _sample_odds_ratio(table) == np.inf:\n        return np.inf\n    x, M, n, N = _hypergeom_params_from_table(table)\n    nc = _solve(lambda nc: -nchypergeom_fisher.cdf(x, M, n, N, nc) + alpha)\n    return nc",
    "docstring": "Compute the upper end of the confidence interval.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_odds_ratio.py",
    "ast_data": "FunctionDef name:_ci_upper arg:table arg:alpha arguments arg arg If Compare Call Return return:yes Assign Call Assign Call arguments arg Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "MinuteLocator",
    "source_code": "class MinuteLocator(RRuleLocator):\n\n    def __init__(self, byminute=None, interval=1, tz=None):\n        if byminute is None:\n            byminute = range(60)\n        rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval, bysecond=0)\n        super().__init__(rule, tz=tz)",
    "docstring": "Make ticks on occurrences of each minute.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\dates.py",
    "ast_data": "ClassDef name:MinuteLocator FunctionDef name:__init__ arg:self arg:byminute arg:interval arg:tz arguments arg arg arg arg If Compare Assign Call Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_replace_names",
    "source_code": "def _replace_names(shape_expr: str, rename_mapping: dict[str, str]) -> str:\n    for old_name, new_name in rename_mapping.items():\n        shape_expr = re.sub(f'(?<!\\\\w){re.escape(old_name)}(?!\\\\w)', new_name, shape_expr)\n    return shape_expr",
    "docstring": "Replace all known names in a shape expression with new names.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_ir_passes.py",
    "ast_data": "FunctionDef name:_replace_names arg:shape_expr arg:rename_mapping arguments arg arg For Call Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_supylabel",
    "source_code": "def get_supylabel(self):\n    text_obj = self._supylabel\n    return '' if text_obj is None else text_obj.get_text()",
    "docstring": "Return the supylabel as string or an empty string if not set.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:get_supylabel arg:self arguments arg Assign Return return:yes Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "_compat_summary_scope",
    "source_code": "@contextlib.contextmanager\ndef _compat_summary_scope(name, family):\n    with _summary_op_util.summary_scope(name, family) as (tag, _):\n        with _summary_op_util.summary_scope(name='', family=None):\n            yield tag",
    "docstring": "Handles argument for v2 op invocation in v1.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\summary\\summary.py",
    "ast_data": "FunctionDef name:_compat_summary_scope arg:name arg:family arguments arg arg With Call With Call"
  },
  {
    "library": "scipy",
    "name": "pdf",
    "source_code": "@abstractmethod\ndef pdf(self, x, /, *, method):\n    raise NotImplementedError()",
    "docstring": "Probability density function The probability density function (\"PDF\"), denoted :math:, is the probability *per unit length* that the random variable will assume the value :math:. Mathematically, it can be defined as the derivative of the cumulative distribution function :math:: .. math:: f(x) = \\frac{d}{dx} F(x) accepts for :math:. Parameters ---------- x : array_like The argument of the PDF. method : {None, 'formula', 'logexp'} The strategy used to evaluate the PDF. By default (`methodmethodx[l, r]0x r11pdf` elsewhere. References ---------- .. [1] Probability density function, *Wikipedia*, Examples -------- Instantiate a distribution with the desired parameters: >>> from scipy import stats >>> X = stats.Uniform(a=-1., b=1.) Evaluate the PDF at the desired argument: >>> X.pdf(0.25) 0.5",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_probability_distribution.py",
    "ast_data": "FunctionDef name:pdf arguments arg arg arg Raise Call"
  },
  {
    "library": "cherrypy",
    "name": "get",
    "source_code": "def get(self):\n    request = cherrypy.serving.request\n    self.tot_gets += 1\n    uri = cherrypy.url(qs=request.query_string)\n    uricache = self.store.get(uri)\n    if uricache is None:\n        return None\n    header_values = [request.headers.get(h, '') for h in uricache.selecting_headers]\n    variant = uricache.wait(key=tuple(sorted(header_values)), timeout=self.antistampede_timeout, debug=self.debug)\n    if variant is not None:\n        self.tot_hist += 1\n    return variant",
    "docstring": "Return the current variant if in the cache, else None.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\caching.py",
    "ast_data": "FunctionDef name:get arg:self arguments arg Assign Assign Call Assign Call If Compare Return return:no Assign Call Assign Call Call Call If Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "device_ids",
    "source_code": "@property\ndef device_ids(self):\n    if self._device_ids is None:\n        with ops.init_scope():\n            device_ids_list = []\n            for index, device in enumerate(self.components):\n                with ops.device(device):\n                    device_ids_list.append(array_ops.identity(constant_op.constant(index)))\n            self._device_ids = self.pack(device_ids_list)\n    return self._device_ids",
    "docstring": "A parallel tensor with scalar integers numbering component devices. Each device ID is placed on its corresponding device, in the same order as the constructor argument. Returns: A parallel tensor containing 0 on the first device, 1 on the second, etc.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\parallel_device\\parallel_device.py",
    "ast_data": "FunctionDef name:device_ids arg:self arguments arg If Compare With Call Assign For Call With Call Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_axis",
    "source_code": "def set_axis(self, axis):\n    self._axis = axis",
    "docstring": "Select axis. Parameters ---------- axis : {\"both\", \"x\", \"y\"}",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axis_artist.py",
    "ast_data": "FunctionDef name:set_axis arg:self arg:axis arguments arg arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "name",
    "source_code": "@property\ndef name(self):\n    return self._name",
    "docstring": "The name of this object. Used for checkpointing.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_replicated_variable.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_compute_compressed_swizzled_bitmask",
    "source_code": "def _compute_compressed_swizzled_bitmask(dense):\n    int_bitmask = dense.bool().to(torch.uint8)\n    bitmask_8x8_chunks = int_bitmask.unfold(0, 8, 8).unfold(1, 8, 8)\n    bitmask_4x4_chunks = bitmask_8x8_chunks.unfold(2, 4, 4).unfold(3, 4, 4)\n    bitmask_binary_representation = bitmask_4x4_chunks.reshape(*bitmask_4x4_chunks.shape[:2], 4, 2, 8)\n    powers_of_two = 2 ** torch.arange(8, dtype=torch.float, device='cuda')\n    compressed_swizzled_bitmask = (bitmask_binary_representation.to(torch.float) @ powers_of_two).to(torch.uint8)\n    return compressed_swizzled_bitmask",
    "docstring": "Calculates the compressed swizzled bitmask from a dense tensor",
    "type": "function",
    "file_path": "pytorch\\torch\\sparse\\_semi_structured_conversions.py",
    "ast_data": "FunctionDef name:_compute_compressed_swizzled_bitmask arg:dense arguments arg Assign Call Call Assign Call Call Assign Call Call Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_helper",
    "source_code": "def get_helper(self):\n    return self._axis_artist_helper",
    "docstring": "Return axis artist helper instance.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axis_artist.py",
    "ast_data": "FunctionDef name:get_helper arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_builtin_constant_ids",
    "source_code": "@FunctionIdSet\ndef _builtin_constant_ids() -> dict[int, str]:\n    rv = {id(v): f'builtins.{k}' for k, v in builtins.__dict__.items() if not k.startswith('_') and (not callable(v))}\n    return rv",
    "docstring": "Collects constant builtins by eliminating callable items.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\trace_rules.py",
    "ast_data": "FunctionDef name:_builtin_constant_ids arguments Assign Call Call BoolOp Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "label_tag",
    "source_code": "def label_tag(self, contents=None, attrs=None, label_suffix=None, tag=None):\n    contents = contents or self.label\n    if label_suffix is None:\n        label_suffix = self.field.label_suffix if self.field.label_suffix is not None else self.form.label_suffix\n    if label_suffix and contents and (contents[-1] not in _(':?.!')):\n        contents = format_html('{}{}', contents, label_suffix)\n    widget = self.field.widget\n    id_ = widget.attrs.get('id') or self.auto_id\n    if id_:\n        id_for_label = widget.id_for_label(id_)\n        if id_for_label:\n            attrs = {**(attrs or {}), 'for': id_for_label}\n        if self.field.required and hasattr(self.form, 'required_css_class'):\n            attrs = attrs or {}\n            if 'class' in attrs:\n                attrs['class'] += ' ' + self.form.required_css_class\n            else:\n                attrs['class'] = self.form.required_css_class\n    context = {'field': self, 'label': contents, 'attrs': attrs, 'use_tag': bool(id_), 'tag': tag or 'label'}\n    return self.form.render(self.form.template_name_label, context)",
    "docstring": "Wrap the given contents in a , if the field has an ID attribute. contents should be mark_safe'd to avoid HTML escaping. If contents aren't given, use the field's HTML-escaped label. If attrs are given, use them as HTML attributes on the tag. label_suffix overrides the form's label_suffix.",
    "type": "method",
    "file_path": "django\\django\\forms\\boundfield.py",
    "ast_data": "FunctionDef name:label_tag arg:self arg:contents arg:attrs arg:label_suffix arg:tag arguments arg arg arg arg arg Assign BoolOp If Compare Assign Compare If BoolOp Compare Call Assign Call Assign Assign BoolOp Call If Assign Call If Assign BoolOp If BoolOp Call Assign BoolOp If Compare Assign Assign Call BoolOp Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_infer_graph",
    "source_code": "def _infer_graph(self, inputs, clusters):\n    assert isinstance(inputs, list)\n    scores = self._distance_graph(inputs, clusters, self._distance_metric)\n    output = []\n    if self._distance_metric == COSINE_DISTANCE and (not self._clusters_l2_normalized()):\n        with ops.colocate_with(clusters, ignore_existing=True):\n            clusters = nn_impl.l2_normalize(clusters, axis=1)\n    for inp, score in zip(inputs, scores):\n        with ops.colocate_with(inp, ignore_existing=True):\n            indices, distances = gen_clustering_ops.nearest_neighbors(inp, clusters, 1)\n            if self._distance_metric == COSINE_DISTANCE:\n                distances *= 0.5\n            output.append((score, array_ops.squeeze(distances, [-1]), array_ops.squeeze(indices, [-1])))\n    return zip(*output)",
    "docstring": "Maps input to closest cluster and the score. Args: inputs: list of input Tensors. clusters: Tensor of cluster centers. Returns: List of tuple, where each value in tuple corresponds to a value in inp. The tuple has following three elements: all_scores: distance of each input to each cluster center. score: distance of each input to closest cluster center. cluster_idx: index of cluster center closest to the corresponding input.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\clustering_ops.py",
    "ast_data": "FunctionDef name:_infer_graph arg:self arg:inputs arg:clusters arguments arg arg arg Call Assign Call Assign If BoolOp Compare Call With Call Assign Call For Call With Call Assign Call If Compare Call Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "sequence_list",
    "source_code": "def sequence_list(self):\n    sequence_list = []\n    with self.connection.cursor() as cursor:\n        for model in self.get_migratable_models():\n            if not model._meta.managed:\n                continue\n            if model._meta.swapped:\n                continue\n            sequence_list.extend(self.get_sequences(cursor, model._meta.db_table, model._meta.local_fields))\n            for f in model._meta.local_many_to_many:\n                if f.remote_field.through._meta.auto_created:\n                    sequence = self.get_sequences(cursor, f.m2m_db_table())\n                    sequence_list.extend(sequence or [{'table': f.m2m_db_table(), 'column': None}])\n    return sequence_list",
    "docstring": "Return a list of information about all DB sequences for all models in all apps.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\introspection.py",
    "ast_data": "FunctionDef name:sequence_list arg:self arguments arg Assign With Call For Call If If Call Call For If Assign Call Call Call BoolOp Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "dump",
    "source_code": "def dump(self):\n    header = [self.title.ljust(72) + self.key.ljust(8)]\n    header.append(f'{self.total_nlines:14d}{self.pointer_nlines:14d}{self.indices_nlines:14d}{self.values_nlines:14d}')\n    header.append(f'{self.mxtype.fortran_format.ljust(14):14s}{self.nrows:14d}{self.ncols:14d}{self.nnon_zeros:14d}{0:14d}')\n    pffmt = self.pointer_format.fortran_format\n    iffmt = self.indices_format.fortran_format\n    vffmt = self.values_format.fortran_format\n    header.append(f'{pffmt.ljust(16):16s}{iffmt.ljust(16):16s}{vffmt.ljust(20):20s}')\n    return '\\n'.join(header)",
    "docstring": "Gives the header corresponding to this instance as a string.",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\_harwell_boeing\\hb.py",
    "ast_data": "FunctionDef name:dump arg:self arguments arg Assign Call Call Call Call Call Assign Assign Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_compute_partial_dependence_recursion",
    "source_code": "def _compute_partial_dependence_recursion(self, grid, target_features):\n    grid = np.asarray(grid, dtype=DTYPE, order='C')\n    target_features = np.asarray(target_features, dtype=np.intp, order='C')\n    averaged_predictions = np.zeros(shape=grid.shape[0], dtype=np.float64, order='C')\n    for tree in self.estimators_:\n        tree.tree_.compute_partial_dependence(grid, target_features, averaged_predictions)\n    averaged_predictions /= len(self.estimators_)\n    return averaged_predictions",
    "docstring": "Fast partial dependence computation. Parameters ---------- grid : ndarray of shape (n_samples, n_target_features), dtype=DTYPE The grid points on which the partial dependence should be evaluated. target_features : ndarray of shape (n_target_features), dtype=np.intp The set of target features for which the partial dependence should be evaluated. Returns ------- averaged_predictions : ndarray of shape (n_samples,) The value of the partial dependence function on each grid point.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_forest.py",
    "ast_data": "FunctionDef name:_compute_partial_dependence_recursion arg:self arg:grid arg:target_features arguments arg arg arg Assign Call Assign Call Assign Call For Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, num_tasks):\n    self._num_tasks = num_tasks\n    self._next_task = 0",
    "docstring": "Create a new . Args: num_tasks: Number of ps tasks to cycle among.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\device_setter.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:num_tasks arguments arg arg Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "autoscale",
    "source_code": "def autoscale(self, A):\n    if A is None:\n        raise TypeError('You must first set_array for mappable')\n    self.norm.autoscale(A)",
    "docstring": "Autoscale the scalar limits on the norm instance using the current array",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colorizer.py",
    "ast_data": "FunctionDef name:autoscale arg:self arg:A arguments arg arg If Compare Raise Call Call"
  },
  {
    "library": "pandas",
    "name": "DatabaseError",
    "source_code": "class DatabaseError(OSError):\n    pass",
    "docstring": "Error is raised when executing SQL with bad syntax or SQL that throws an error. Raised by :func: when a bad SQL statement is passed in. See Also -------- read_sql : Read SQL query or database table into a DataFrame. Examples -------- >>> from sqlite3 import connect >>> conn = connect(\":memory:\") >>> pd.read_sql(\"select * test\", conn) # doctest: +SKIP",
    "type": "class",
    "file_path": "pandas\\pandas\\errors\\__init__.py",
    "ast_data": "ClassDef name:DatabaseError"
  },
  {
    "library": "pytorch",
    "name": "set",
    "source_code": "def set(self, key, value):\n    self.client.set(key=self.prefix + self._encode(key), value=self._encode(value))",
    "docstring": "Write a key/value pair into ``.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\etcd_store.py",
    "ast_data": "FunctionDef name:set arg:self arg:key arg:value arguments arg arg arg Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "make_call_op_in_graph",
    "source_code": "def make_call_op_in_graph(atomic: AtomicFunction, tensor_inputs: Sequence[core.Tensor], context_call_attrs: Dict[str, Any]):\n    graph = ops.get_default_graph()\n    graph._add_function_recursive(atomic)\n    op = partitioned_call_op(name=atomic.name, args=tensor_inputs, is_stateful=atomic.call_options.is_stateful, tout=[o.dtype.as_datatype_enum for o in atomic.function_type.flat_outputs], config=context_call_attrs['config_proto'], executor_type=context_call_attrs['executor_type'], xla_compile_attr=atomic.cached_definition.attr.get(attributes_lib.XLA_COMPILE, None))\n    _set_read_only_resource_inputs_attr(op, atomic.graph)\n    ops.set_int_list_attr(op, acd.COLLECTIVE_MANAGER_IDS, atomic._call_options.collective_manager_ids_used)\n    return op.outputs",
    "docstring": "Adds an AtomicFunction to graph.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\atomic_function.py",
    "ast_data": "FunctionDef name:make_call_op_in_graph arg:atomic arg:tensor_inputs arg:context_call_attrs arguments arg arg arg Assign Call Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, string_table):\n    self._string_table = string_table\n    self._function_key_to_function = {}",
    "docstring": "Constructor. Args: string_table: A object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\pprof_profiler.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:string_table arguments arg arg Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "ConditionalExpressionTransformer",
    "source_code": "class ConditionalExpressionTransformer(converter.Base):\n\n    def visit_IfExp(self, node):\n        template = '\\n        ag__.if_exp(\\n            test,\\n            lambda: true_expr,\\n            lambda: false_expr,\\n            expr_repr)\\n    '\n        expr_repr = parser.unparse(node.test, include_encoding_marker=False).strip()\n        return templates.replace_as_expression(template, test=node.test, true_expr=node.body, false_expr=node.orelse, expr_repr=gast.Constant(expr_repr, kind=None))",
    "docstring": "Converts conditional expressions to functional form.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\converters\\conditional_expressions.py",
    "ast_data": "ClassDef name:ConditionalExpressionTransformer FunctionDef name:visit_IfExp arg:self arg:node arguments arg arg Assign Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_feature_names_out",
    "source_code": "def get_feature_names_out(self, input_features=None):\n    check_is_fitted(self, 'n_features_in_')\n    if self.voting == 'soft' and (not self.flatten_transform):\n        raise ValueError(\"get_feature_names_out is not supported when `voting='soft'` and `flatten_transform=False`\")\n    _check_feature_names_in(self, input_features, generate_names=False)\n    class_name = self.__class__.__name__.lower()\n    active_names = [name for name, est in self.estimators if est != 'drop']\n    if self.voting == 'hard':\n        return np.asarray([f'{class_name}_{name}' for name in active_names], dtype=object)\n    n_classes = len(self.classes_)\n    names_out = [f'{class_name}_{name}{i}' for name in active_names for i in range(n_classes)]\n    return np.asarray(names_out, dtype=object)",
    "docstring": "Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Not used, present here for API consistency by convention. Returns ------- feature_names_out : ndarray of str objects Transformed feature names.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_voting.py",
    "ast_data": "FunctionDef name:get_feature_names_out arg:self arg:input_features arguments arg arg Call If BoolOp Compare Raise Call Call Assign Call Assign Compare If Compare Return return:yes Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_fill_object_graph_proto",
    "source_code": "def _fill_object_graph_proto(graph_view, trackable_objects, node_ids, slot_variables):\n    object_graph_proto = trackable_object_graph_pb2.TrackableObjectGraph()\n    for checkpoint_id, trackable in enumerate(trackable_objects):\n        assert node_ids[trackable] == checkpoint_id\n        object_proto = object_graph_proto.nodes.add(slot_variables=slot_variables.get(trackable, ()))\n        for child in graph_view.list_children(trackable):\n            object_proto.children.add(node_id=node_ids[child.ref], local_name=child.name)\n    return object_graph_proto",
    "docstring": "Name non-slot s and add them to .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\save_util_v1.py",
    "ast_data": "FunctionDef name:_fill_object_graph_proto arg:graph_view arg:trackable_objects arg:node_ids arg:slot_variables arguments arg arg arg arg Assign Call For Call Compare Assign Call Call For Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "to_dlpack",
    "source_code": "def to_dlpack(self) -> DLPack:\n    return to_dlpack(self.data)",
    "docstring": "Return a DLPack capsule from the image tensor.",
    "type": "method",
    "file_path": "kornia\\kornia\\image\\image.py",
    "ast_data": "FunctionDef name:to_dlpack arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "get_test_runner",
    "source_code": "def get_test_runner(project_module):\n    __import__(project_module)\n    test = sys.modules[project_module].test\n    version = sys.modules[project_module].__version__\n    mod_path = sys.modules[project_module].__file__\n    mod_path = os.path.abspath(os.path.join(os.path.dirname(mod_path)))\n    return (test, version, mod_path)",
    "docstring": "get Test Runner from locally installed/built project",
    "type": "function",
    "file_path": "scipy\\dev.py",
    "ast_data": "FunctionDef name:get_test_runner arg:project_module arguments arg Call Assign Assign Assign Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_FirstOrderTapeGradientFunctions",
    "source_code": "class _FirstOrderTapeGradientFunctions(_TapeGradientFunctions):\n\n    def __init__(self, func_graph: func_graph_module.FuncGraph, attrs, func_graph_deleter, forwardprop_input_indices, delayed_rewrite_functions, need_gradients_for_jvps):\n        super().__init__(func_graph, attrs, func_graph_deleter, forwardprop_input_indices, delayed_rewrite_functions, need_gradients_for_jvps)\n        self._func_graph_deleter = func_graph_deleter\n        self._forwardprop_input_indices = forwardprop_input_indices\n\n    def _forward_and_backward_functions(self, inference_args, input_tangents):\n        outputs = self._func_graph.outputs[:self._num_inference_outputs]\n        return self._build_functions_for_outputs(outputs, inference_args, input_tangents)",
    "docstring": "Caches tape-friendly functions for first-order gradients.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py",
    "ast_data": "ClassDef name:_FirstOrderTapeGradientFunctions FunctionDef name:__init__ arg:self arg:func_graph arg:attrs arg:func_graph_deleter arg:forwardprop_input_indices arg:delayed_rewrite_functions arg:need_gradients_for_jvps arguments arg arg arg arg arg arg arg Call Call Assign Assign FunctionDef name:_forward_and_backward_functions arg:self arg:inference_args arg:input_tangents arguments arg arg arg Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "set_state",
    "source_code": "def set_state(self, state: bytes, token: Optional[Token]=None) -> Optional[tuple[bytes, Token, bool]]:\n    base64_state_str: str = b64encode(state).decode()\n    if token:\n        if not isinstance(token, bytes):\n            result = self.get_state()\n            if result is not None:\n                tmp = (*result, False)\n                return tmp\n            return None\n        token = token.decode()\n    else:\n        token = self._NULL_SENTINEL\n    base64_state: bytes = self._call_store('compare_set', self._key, token, base64_state_str)\n    state_token_pair = self._decode_state(base64_state)\n    if state_token_pair is None:\n        return None\n    new_state, new_token = state_token_pair\n    return (new_state, new_token, new_state == state)",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\c10d_rendezvous_backend.py",
    "ast_data": "FunctionDef name:set_state arg:self arg:state arg:token arguments arg arg arg Call Call If If Call Assign Call If Compare Assign Return return:yes Return return:no Assign Call Assign Call Assign Call If Compare Return return:no Assign Return return:yes Compare"
  },
  {
    "library": "pytorch",
    "name": "register_state_dict_pre_hook",
    "source_code": "def register_state_dict_pre_hook(self, hook):\n    handle = RemovableHandle(self._state_dict_pre_hooks)\n    self._state_dict_pre_hooks[handle.id] = hook\n    return handle",
    "docstring": "Register a pre-hook for the :meth: method. It should have the following signature:: hook(module, prefix, keep_vars) -> None The registered hooks can be used to perform pre-processing before the `` call is made.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:register_state_dict_pre_hook arg:self arg:hook arguments arg arg Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "batch_norm_op",
    "source_code": "def batch_norm_op(tensor, mean, variance, beta, gamma, scale):\n    test_util.set_producer_version(ops.get_default_graph(), 8)\n    return gen_nn_ops._batch_norm_with_global_normalization(tensor, mean, variance, beta, gamma, 0.001, scale)",
    "docstring": "Fused kernel for batch normalization.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\batch_norm_benchmark.py",
    "ast_data": "FunctionDef name:batch_norm_op arg:tensor arg:mean arg:variance arg:beta arg:gamma arg:scale arguments arg arg arg arg arg arg Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "ImproperlyConfigured",
    "source_code": "class ImproperlyConfigured(Exception):\n    pass",
    "docstring": "Django is somehow improperly configured",
    "type": "class",
    "file_path": "django\\django\\core\\exceptions.py",
    "ast_data": "ClassDef name:ImproperlyConfigured"
  },
  {
    "library": "pandas",
    "name": "NDArrayBackedExtensionBlock",
    "source_code": "class NDArrayBackedExtensionBlock(EABackedBlock):\n    values: NDArrayBackedExtensionArray\n\n    @property\n    def is_view(self) -> bool:\n        return self.values._ndarray.base is not None",
    "docstring": "Block backed by an NDArrayBackedExtensionArray",
    "type": "class",
    "file_path": "pandas\\pandas\\core\\internals\\blocks.py",
    "ast_data": "ClassDef name:NDArrayBackedExtensionBlock FunctionDef name:is_view arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "ExecutableLocation",
    "source_code": "@tf_export('distribute.cluster_resolver.KubernetesExecutableLocation')\nclass ExecutableLocation(enum.Enum):\n    WITHIN_CLUSTER = 0\n    OFF_CLUSTER = 1",
    "docstring": "Defines where the executable runs on. This is used to determine how to resolve the configuration to talk with the kube api server. means that the TensorFlow code you are running is running in a pod within the cluster itself. means any other enviroment outside the cluster.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\kubernetes_cluster_resolver.py",
    "ast_data": "ClassDef name:ExecutableLocation Assign Assign Call"
  },
  {
    "library": "scikit-learn",
    "name": "apply_where",
    "source_code": "def apply_where(cond: Array, args: Array | tuple[Array, ...], f1: Callable[..., Array], f2: Callable[..., Array] | None=None, /, *, fill_value: Array | complex | None=None, xp: ModuleType | None=None) -> Array:\n    if (f2 is None) == (fill_value is None):\n        msg = 'Exactly one of `fill_value` or `f2` must be given.'\n        raise TypeError(msg)\n    args_ = list(args) if isinstance(args, tuple) else [args]\n    del args\n    xp = array_namespace(cond, fill_value, *args_) if xp is None else xp\n    if isinstance(fill_value, int | float | complex | NoneType):\n        cond, *args_ = xp.broadcast_arrays(cond, *args_)\n    else:\n        cond, fill_value, *args_ = xp.broadcast_arrays(cond, fill_value, *args_)\n    if is_dask_namespace(xp):\n        meta_xp = meta_namespace(cond, fill_value, *args_, xp=xp)\n        return xp.map_blocks(_apply_where, cond, f1, f2, fill_value, *args_, xp=meta_xp)\n    return _apply_where(cond, f1, f2, fill_value, *args_, xp=xp)",
    "docstring": "Run one of two elementwise functions depending on a condition. Equivalent to `fill_valuef1f2condargscondargscondfill_valuecondcondargsf2condargsf1condf2fill_valuecondf1fill_valuef2f1condf2f1f2` are applied to the individual chunks and should use functions from the namespace of the chunks. Examples -------- >>> import array_api_strict as xp >>> import array_api_extra as xpx >>> a = xp.asarray([5, 4, 3]) >>> b = xp.asarray([0, 2, 2]) >>> def f(a, b): ... return a // b >>> xpx.apply_where(b != 0, (a, b), f, fill_value=xp.nan) array([ nan, 2., 1.])",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_extra\\_lib\\_funcs.py",
    "ast_data": "FunctionDef name:apply_where arguments arg arg arg arg arg arg If Compare Compare Compare Assign Raise Call Assign Call Call Assign Compare Call If Call Assign Call Assign Call If Call Assign Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "AdminSplitDateTime",
    "source_code": "class AdminSplitDateTime(forms.SplitDateTimeWidget):\n    template_name = 'admin/widgets/split_datetime.html'\n\n    def __init__(self, attrs=None):\n        widgets = [BaseAdminDateWidget, BaseAdminTimeWidget]\n        forms.MultiWidget.__init__(self, widgets, attrs)\n\n    def get_context(self, name, value, attrs):\n        context = super().get_context(name, value, attrs)\n        context['date_label'] = _('Date:')\n        context['time_label'] = _('Time:')\n        return context",
    "docstring": "A SplitDateTime Widget that has some admin-specific styling.",
    "type": "class",
    "file_path": "django\\django\\contrib\\admin\\widgets.py",
    "ast_data": "ClassDef name:AdminSplitDateTime Assign FunctionDef name:__init__ arg:self arg:attrs arguments arg arg Assign Call FunctionDef name:get_context arg:self arg:name arg:value arg:attrs arguments arg arg arg arg Assign Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "sp_values",
    "source_code": "@property\ndef sp_values(self) -> np.ndarray:\n    return self._sparse_values",
    "docstring": "An ndarray containing the non- `datafill_value` points, as decimal. Examples -------- >>> from pandas.arrays import SparseArray >>> s = SparseArray([0, 0, 1, 0, 2], fill_value=0) >>> s.sp_values array([1, 2])",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\sparse\\array.py",
    "ast_data": "FunctionDef name:sp_values arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_create_graph_constant",
    "source_code": "def _create_graph_constant(value, dtype, shape, name, verify_shape, allow_broadcast) -> tensor_lib.Tensor:\n    g = get_default_graph()\n    tensor_value = attr_value_pb2.AttrValue()\n    tensor_value.tensor.CopyFrom(tensor_util.make_tensor_proto(value, dtype=dtype, shape=shape, verify_shape=verify_shape, allow_broadcast=allow_broadcast))\n    dtype_value = attr_value_pb2.AttrValue(type=tensor_value.tensor.dtype)\n    attrs = {'value': tensor_value, 'dtype': dtype_value}\n    const_tensor = g._create_op_internal('Const', [], [dtype_value.type], attrs=attrs, name=name).outputs[0]\n    if op_callbacks.should_invoke_op_callbacks():\n        callback_outputs = op_callbacks.invoke_op_callbacks('Const', tuple(), attrs, (const_tensor,), op_name=name, graph=g)\n        if callback_outputs is not None:\n            [const_tensor] = callback_outputs\n    return const_tensor",
    "docstring": "Create a graph constant and invoke constant callbacks.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_create_graph_constant arg:value arg:dtype arg:shape arg:name arg:verify_shape arg:allow_broadcast arguments arg arg arg arg arg arg Assign Call Assign Call Call Call Assign Call Assign Assign Call If Call Assign Call Call If Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_normalize_value_destination_pairs",
    "source_code": "def _normalize_value_destination_pairs(value_destination_pairs):\n    result = []\n    value_destination_pairs = list(value_destination_pairs)\n    if not isinstance(value_destination_pairs, (list, tuple)):\n        raise ValueError('`value_destination_pairs` should be a list or tuple')\n    for pair in value_destination_pairs:\n        if not isinstance(pair, tuple):\n            raise ValueError('Each element of `value_destination_pairs` should be a tuple.')\n        if len(pair) != 2:\n            raise ValueError('Each element of `value_destination_pairs` should be a tuple of size 2.')\n        per_replica = _make_tensor_into_per_replica(pair[0])\n        result.append((per_replica, pair[1]))\n    return result",
    "docstring": "Converts each tensor into a PerReplica object in the input list.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_ops.py",
    "ast_data": "FunctionDef name:_normalize_value_destination_pairs arg:value_destination_pairs arguments arg Assign Assign Call If Call Raise Call For If Call Raise Call If Compare Call Raise Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "assert_hermitian_spectrum",
    "source_code": "def assert_hermitian_spectrum(self, name='assert_hermitian_spectrum'):\n    eps = np.finfo(self.dtype.real_dtype.as_numpy_dtype).eps\n    with self._name_scope(name):\n        max_err = eps * self.domain_dimension_tensor()\n        imag_convolution_kernel = math_ops.imag(self.convolution_kernel())\n        return check_ops.assert_less(math_ops.abs(imag_convolution_kernel), max_err, message='Spectrum was not Hermitian')",
    "docstring": "Returns an that asserts this operator has Hermitian spectrum. This operator corresponds to a real-valued matrix if and only if its spectrum is Hermitian. Args: name: A name to give this . Returns: An that asserts this operator has Hermitian spectrum.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_circulant.py",
    "ast_data": "FunctionDef name:assert_hermitian_spectrum arg:self arg:name arguments arg arg Assign Call With Call Assign Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, cluster_resolver=None, communication_options=None):\n    if communication_options is None:\n        communication_options = collective_util.Options()\n    super(CollectiveAllReduceStrategy, self).__init__(CollectiveAllReduceExtended(self, cluster_resolver=cluster_resolver, communication_options=communication_options))\n    distribute_lib.distribution_strategy_gauge.get_cell('V2').set('MultiWorkerMirroredStrategy')\n    distribute_lib.distribution_strategy_replica_gauge.get_cell('num_workers').set(self.extended._num_workers)\n    distribute_lib.distribution_strategy_replica_gauge.get_cell('num_replicas_per_worker').set(self.extended._num_devices_per_worker)",
    "docstring": "Creates the strategy. Args: cluster_resolver: optional . If , is used. communication_options: optional . This configures the default options for cross device communications. It can be overridden by options provided to the communication APIs like . See for details.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\collective_all_reduce_strategy.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:cluster_resolver arg:communication_options arguments arg arg arg If Compare Assign Call Call Call Call Call Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_cudnn_version",
    "source_code": "def get_cudnn_version(run_lambda):\n    if get_platform() == 'win32':\n        system_root = os.environ.get('SYSTEMROOT', 'C:\\\\Windows')\n        cuda_path = os.environ.get('CUDA_PATH', '%CUDA_PATH%')\n        where_cmd = os.path.join(system_root, 'System32', 'where')\n        cudnn_cmd = '{} /R \"{}\\\\bin\" cudnn*.dll'.format(where_cmd, cuda_path)\n    elif get_platform() == 'darwin':\n        cudnn_cmd = 'ls /usr/local/cuda/lib/libcudnn*'\n    else:\n        cudnn_cmd = 'ldconfig -p | grep libcudnn | rev | cut -d\" \" -f1 | rev'\n    rc, out, _ = run_lambda(cudnn_cmd)\n    if len(out) == 0 or (rc != 1 and rc != 0):\n        l = os.environ.get('CUDNN_LIBRARY')\n        if l is not None and os.path.isfile(l):\n            return os.path.realpath(l)\n        return None\n    files_set = set()\n    for fn in out.split('\\n'):\n        fn = os.path.realpath(fn)\n        if os.path.isfile(fn):\n            files_set.add(fn)\n    if not files_set:\n        return None\n    files = sorted(files_set)\n    if len(files) == 1:\n        return files[0]\n    result = '\\n'.join(files)\n    return 'Probably one of the following:\\n{}'.format(result)",
    "docstring": "Return a list of libcudnn.so; it's hard to tell which one is being used.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\collect_env.py",
    "ast_data": "FunctionDef name:get_cudnn_version arg:run_lambda arguments arg If Compare Call Assign Call Assign Call Assign Call Assign Call If Compare Call Assign Assign Assign Call If BoolOp Compare Call BoolOp Compare Compare Assign Call If BoolOp Compare Call Return return:yes Call Return return:no Assign Call For Call Assign Call If Call Call If Return return:no Assign Call If Compare Call Return return:yes Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "extend",
    "source_code": "def extend(self, sequential: Iterable[Module]) -> Self:\n    for layer in sequential:\n        self.append(layer)\n    return self",
    "docstring": "Extends the current Sequential container with layers from another Sequential container. Args: sequential (Sequential): A Sequential container whose layers will be added to the current container. Example:: >>> import torch.nn as nn >>> n = nn.Sequential(nn.Linear(1, 2), nn.Linear(2, 3)) >>> other = nn.Sequential(nn.Linear(3, 4), nn.Linear(4, 5)) >>> n.extend(other) # or Sequential( (0): Linear(in_features=1, out_features=2, bias=True) (1): Linear(in_features=2, out_features=3, bias=True) (2): Linear(in_features=3, out_features=4, bias=True) (3): Linear(in_features=4, out_features=5, bias=True) )",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\container.py",
    "ast_data": "FunctionDef name:extend arg:self arg:sequential arguments arg arg For Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "control_flow_v2_enabled",
    "source_code": "@tf_export(v1=['control_flow_v2_enabled'])\ndef control_flow_v2_enabled():\n    return control_flow_util.EnableControlFlowV2(ops.get_default_graph())",
    "docstring": "Returns if v2 control flow is enabled. Note: v2 control flow is always enabled inside of tf.function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_v2_toggles.py",
    "ast_data": "FunctionDef name:control_flow_v2_enabled arguments Return return:yes Call Call Call"
  },
  {
    "library": "pandas",
    "name": "datetime_column_to_ndarray",
    "source_code": "def datetime_column_to_ndarray(col: Column) -> tuple[np.ndarray | pd.Series, Any]:\n    buffers = col.get_buffers()\n    _, col_bit_width, format_str, _ = col.dtype\n    dbuf, _ = buffers['data']\n    data = buffer_to_ndarray(dbuf, (DtypeKind.INT, col_bit_width, getattr(ArrowCTypes, f'INT{col_bit_width}'), Endianness.NATIVE), offset=col.offset, length=col.size())\n    data = parse_datetime_format_str(format_str, data)\n    data = set_nulls(data, col, buffers['validity'])\n    return (data, buffers)",
    "docstring": "Convert a column holding DateTime data to a NumPy array. Parameters ---------- col : Column Returns ------- tuple Tuple of np.ndarray holding the data and the memory owner object that keeps the memory alive.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\interchange\\from_dataframe.py",
    "ast_data": "FunctionDef name:datetime_column_to_ndarray arg:col arguments arg Assign Call Assign Assign Assign Call Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "enter_dual_level",
    "source_code": "def enter_dual_level():\n    global _current_level\n    new_level = torch._C._enter_dual_level()\n    if new_level != _current_level + 1:\n        raise RuntimeError('Entering a new forward AD level but the current level is not valid. Make sure you did not modified it directly.')\n    _current_level = new_level\n    return new_level",
    "docstring": "Enter a new forward grad level. This level can be used to make and unpack dual Tensors to compute forward gradients. This function also updates the current level that is used by default by the other functions in this API.",
    "type": "function",
    "file_path": "pytorch\\torch\\autograd\\forward_ad.py",
    "ast_data": "FunctionDef name:enter_dual_level arguments Assign Call If Compare Raise Call Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "derivative",
    "source_code": "def derivative(self, x, der=1):\n    x, x_shape = self._prepare_x(x)\n    y = self._evaluate_derivatives(x, der + 1, all_lower=False)\n    return self._finish_y(y, x_shape)",
    "docstring": "Evaluate a single derivative of the polynomial at the point x. Parameters ---------- x : array_like Point or points at which to evaluate the derivatives der : integer, optional Which derivative to evaluate (default: first derivative). This number includes the function value as 0th derivative. Returns ------- d : ndarray Derivative interpolated at the x-points. Shape of is determined by replacing the interpolation axis in the original array with the shape of .",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_polyint.py",
    "ast_data": "FunctionDef name:derivative arg:self arg:x arg:der arguments arg arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "__str__",
    "source_code": "def __str__(self) -> str:\n    return f'_S({self.dim}, {self.split_factor})'",
    "docstring": "human readable representation of the _StridedShard placement",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\placement_types.py",
    "ast_data": "FunctionDef name:__str__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_mesh",
    "source_code": "def _mesh(self):\n    y, _ = self._proportional_y()\n    if isinstance(self.norm, (colors.BoundaryNorm, colors.NoNorm)) or self.boundaries is not None:\n        y = y * (self.vmax - self.vmin) + self.vmin\n    else:\n        with self.norm.callbacks.blocked(), cbook._setattr_cm(self.norm, vmin=self.vmin, vmax=self.vmax):\n            y = self.norm.inverse(y)\n    self._y = y\n    X, Y = np.meshgrid([0.0, 1.0], y)\n    if self.orientation == 'vertical':\n        return (X, Y)\n    else:\n        return (Y, X)",
    "docstring": "Return the coordinate arrays for the colorbar pcolormesh/patches. These are scaled between vmin and vmax, and already handle colorbar orientation.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colorbar.py",
    "ast_data": "FunctionDef name:_mesh arg:self arguments arg Assign Call If BoolOp Call Compare Assign With Call Call Assign Call Assign Assign Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "scipy",
    "name": "PenHolder",
    "source_code": "class PenHolder(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-11.0] * self.N, [11.0] * self.N))\n        self.global_optimum = [[-9.646167708023526, 9.6461676710434]]\n        self.fglob = -0.9635348327265058\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        a = abs(1.0 - sqrt(x[0] ** 2 + x[1] ** 2) / pi)\n        b = cos(x[0]) * cos(x[1]) * exp(a)\n        return -exp(-abs(b) ** (-1))",
    "docstring": "PenHolder objective function. This class defines the PenHolder [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{PenHolder}}(x) = -e^{\\left|{e^{-\\left|{- \\frac{\\sqrt{x_{1}^{2} + x_{2}^{2}}}{\\pi} + 1}\\right|} \\cos\\left(x_{1}\\right) \\cos\\left(x_{2}\\right)}\\right|^{-1}} with :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_P.py",
    "ast_data": "ClassDef name:PenHolder FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Call Assign Call Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_raise_error_for_incorrect_control_flow_context",
    "source_code": "def _raise_error_for_incorrect_control_flow_context(self):\n    graph = ops.get_default_graph()\n    in_tpu_ctx = False\n    while graph is not None:\n        ctx = graph._get_control_flow_context()\n        while ctx is not None:\n            if isinstance(ctx, tpu_replication.TPUReplicateContext):\n                in_tpu_ctx = True\n                break\n            ctx = ctx.outer_context\n        if in_tpu_ctx:\n            break\n        graph = getattr(graph, 'outer_graph', None)\n    if graph != ops.get_default_graph() and in_tpu_ctx:\n        raise RuntimeError('Current graph {} does not match graph which contains TPUReplicateContext {}. This is most likely due to the fact that enqueueing embedding data is called inside control flow or a tf.function inside `strategy.run`. This is not supported because outside compilation fails to extract the enqueue ops as the head of a computation.'.format(ops.get_default_graph(), graph))\n    return in_tpu_ctx",
    "docstring": "Raises an error if we are not in the TPUReplicateContext.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3.py",
    "ast_data": "FunctionDef name:_raise_error_for_incorrect_control_flow_context arg:self arguments arg Assign Call Assign While Compare Assign Call While Compare If Call Assign Assign If Assign Call If BoolOp Compare Call Raise Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_pagecount",
    "source_code": "def get_pagecount(self):\n    return self._n_figures",
    "docstring": "Return the current number of pages in the multipage pdf file.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pgf.py",
    "ast_data": "FunctionDef name:get_pagecount arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_torchscript_type_to_python_type",
    "source_code": "def _torchscript_type_to_python_type(ts_type: 'torch._C.JitType') -> Any:\n    return eval(ts_type.annotation_str, _type_eval_globals)",
    "docstring": "Convert a TorchScript type to a Python type (including subtypes) via eval'ing the annotation_str. _type_eval_globals sets up expressions like \"List\" and \"Future\" to map to actual types (typing.List and jit.Future)",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\operator_schemas.py",
    "ast_data": "FunctionDef name:_torchscript_type_to_python_type arg:ts_type arguments arg Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "process",
    "source_code": "def process(self, msg: str, kwargs: MutableMapping[str, Any]) -> tuple[str, MutableMapping[str, Any]]:\n    if isinstance(kwargs.get('extra'), MutableMapping):\n        kwargs['extra'].update(self.extra)\n    else:\n        kwargs['extra'] = self.extra\n    return (msg, kwargs)",
    "docstring": "Method that augments logging with additional 'extra' data",
    "type": "method",
    "file_path": "scrapy\\scrapy\\utils\\log.py",
    "ast_data": "FunctionDef name:process arg:self arg:msg arg:kwargs arguments arg arg arg If Call Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_check_ignored_states",
    "source_code": "def _check_ignored_states(ignored_states: list[Any], passed_as_ignored_states: bool) -> None:\n    if len(ignored_states) == 0:\n        return\n    if passed_as_ignored_states:\n        all_params = all((isinstance(state, nn.Parameter) for state in ignored_states))\n        all_modules = all((isinstance(state, nn.Module) for state in ignored_states))\n        if not all_params and (not all_modules):\n            sorted_types = sorted({type(state) for state in ignored_states}, key=repr)\n            raise ValueError(f'ignored_states expects all nn.Parameter or all nn.Module list elements but got types {sorted_types}')\n    elif not all((isinstance(state, nn.Module) for state in ignored_states)):\n        sorted_types = sorted({type(state) for state in ignored_states}, key=repr)\n        raise ValueError(f'ignored_modules expects nn.Module list elements but got types {sorted_types}')",
    "docstring": "Check that the ignored states are uniformly parameters or uniformly modules. We may remove this check in the future if we permit mixing.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_init_utils.py",
    "ast_data": "FunctionDef name:_check_ignored_states arg:ignored_states arg:passed_as_ignored_states arguments arg arg If Compare Call Return return:no If Assign Call Call Assign Call Call If BoolOp Assign Call Call Raise Call If Call Call Assign Call Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, custom_op_registerers=None, **kwargs):\n    self._custom_op_registerers = custom_op_registerers or []\n    super(InterpreterWithCustomOps, self).__init__(**kwargs)",
    "docstring": "Constructor. Args: custom_op_registerers: List of str (symbol names) or functions that take a pointer to a MutableOpResolver and register a custom op. When passing functions, use a pybind function that takes a uintptr_t that can be recast as a pointer to a MutableOpResolver. **kwargs: Additional arguments passed to Interpreter. Raises: ValueError: If the interpreter was unable to create.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\interpreter.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:custom_op_registerers arguments arg arg arg Assign BoolOp Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_lib",
    "source_code": "@tf_export('sysconfig.get_lib')\ndef get_lib():\n    import tensorflow as tf\n    return _os_path.join(_os_path.dirname(tf.__file__))",
    "docstring": "Get the directory containing the TensorFlow framework library. Returns: The directory as string.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\platform\\sysconfig.py",
    "ast_data": "FunctionDef name:get_lib arguments Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "metrics",
    "source_code": "@property\ndef metrics(self):\n    metrics = []\n    if self._is_compiled:\n        if not hasattr(self, '_v1_compile_was_called'):\n            return super(Model, self).metrics\n        metrics += self._compile_metric_functions\n    metrics.extend(self._metrics)\n    metrics.extend(_get_metrics_from_layers(list(self._flatten_layers(include_self=False, recursive=False))))\n    return metrics",
    "docstring": "Returns the model's metrics added using , APIs.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py",
    "ast_data": "FunctionDef name:metrics arg:self arguments arg Assign If If Call Return return:yes Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "parse_structure",
    "source_code": "def parse_structure(astr):\n    spanlist = []\n    ind = 0\n    while True:\n        m = routine_start_re.search(astr, ind)\n        if m is None:\n            break\n        start = m.start()\n        if function_start_re.match(astr, start, m.end()):\n            while True:\n                i = astr.rfind('\\n', ind, start)\n                if i == -1:\n                    break\n                start = i\n                if astr[i:i + 7] != '\\n     $':\n                    break\n        start += 1\n        m = routine_end_re.search(astr, m.end())\n        ind = end = m and m.end() - 1 or len(astr)\n        spanlist.append((start, end))\n    return spanlist",
    "docstring": "Return a list of tuples for each function or subroutine each tuple is the start and end of a subroutine or function to be expanded.",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\from_template.py",
    "ast_data": "FunctionDef name:parse_structure arg:astr arguments arg Assign Assign While Assign Call If Compare Assign Call If Call Call While Assign Call If Compare Assign If Compare Assign Call Call Assign BoolOp BoolOp Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "equals",
    "source_code": "def equals(self, other: object) -> bool:\n    if isinstance(other, RangeIndex):\n        return self._range == other._range\n    return super().equals(other)",
    "docstring": "Determines if two Index objects contain the same elements.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\range.py",
    "ast_data": "FunctionDef name:equals arg:self arg:other arguments arg arg If Call Return return:yes Compare Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "numpy",
    "source_code": "def numpy(self):\n    if not isinstance(self.tensor, ops.EagerTensor):\n        raise ValueError('WeakTensor.numpy() is only supported in eager mode.')\n    return self.tensor.numpy()",
    "docstring": "Copy of the contents of this EagerWeakTensor into a NumPy array or scalar.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\weak_tensor.py",
    "ast_data": "FunctionDef name:numpy arg:self arguments arg If Call Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_serialize_to_tensors",
    "source_code": "def _serialize_to_tensors(self):\n    tensors = self.export()\n    return {'-keys': tensors[0], '-values': tensors[1]}",
    "docstring": "Implements checkpointing protocols for .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "FunctionDef name:_serialize_to_tensors arg:self arguments arg Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_IntegerLessThan",
    "source_code": "class _IntegerLessThan(Constraint):\n    is_discrete = True\n\n    def __init__(self, upper_bound):\n        self.upper_bound = upper_bound\n        super().__init__()\n\n    def check(self, value):\n        return (value % 1 == 0) & (value <= self.upper_bound)\n\n    def __repr__(self):\n        fmt_string = self.__class__.__name__[1:]\n        fmt_string += f'(upper_bound={self.upper_bound})'\n        return fmt_string",
    "docstring": "Constrain to an integer interval .",
    "type": "class",
    "file_path": "pytorch\\torch\\distributions\\constraints.py",
    "ast_data": "ClassDef name:_IntegerLessThan Assign FunctionDef name:__init__ arg:self arg:upper_bound arguments arg arg Assign Call Call FunctionDef name:check arg:self arg:value arguments arg arg Return return:yes Compare Compare FunctionDef name:__repr__ arg:self arguments arg Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "initialize",
    "source_code": "def initialize(self):\n    if ops.executing_eagerly_outside_functions():\n        self._iterator._eager_reset()\n        return []\n    else:\n        return [self._iterator.initializer]",
    "docstring": "Initialize underlying iterator. In eager execution, this simply recreates the underlying iterator. In graph execution, it returns the initializer ops for the underlying iterator. Returns: A list of any initializer ops that should be run.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\input_lib.py",
    "ast_data": "FunctionDef name:initialize arg:self arguments arg If Call Call Return return:no Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "__bytes__",
    "source_code": "def __bytes__(self):\n    return ntob(self.__str__())",
    "docstring": "Turn the HTTP header value string representation to bytes.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\httputil.py",
    "ast_data": "FunctionDef name:__bytes__ arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "apply",
    "source_code": "@classmethod\ndef apply(cls, module, name, amount, n, dim, importance_scores=None):\n    return super().apply(module, name, amount=amount, n=n, dim=dim, importance_scores=importance_scores)",
    "docstring": "Add pruning on the fly and reparametrization of a tensor. Adds the forward pre-hook that enables pruning on the fly and the reparametrization of a tensor in terms of the original tensor and the pruning mask. Args: module (nn.Module): module containing the tensor to prune name (str): parameter name within `torch.norm`. dim (int): index of the dim along which we define channels to prune. importance_scores (torch.Tensor): tensor of importance scores (of same shape as module parameter) used to compute mask for pruning. The values in this tensor indicate the importance of the corresponding elements in the parameter being pruned. If unspecified or None, the module parameter will be used in its place.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\utils\\prune.py",
    "ast_data": "FunctionDef name:apply arg:cls arg:module arg:name arg:amount arg:n arg:dim arg:importance_scores arguments arg arg arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "forward",
    "source_code": "def forward(self, x: torch.Tensor) -> torch.Tensor:\n    return F.rms_norm(x, self.normalized_shape, self.weight, self.eps)",
    "docstring": "Runs forward pass.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\normalization.py",
    "ast_data": "FunctionDef name:forward arg:self arg:x arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_callback_model",
    "source_code": "def _get_callback_model(self):\n    if hasattr(self, '_replicated_model') and self._replicated_model:\n        return self._replicated_model\n    if hasattr(self, 'callback_model') and self.callback_model:\n        return self.callback_model\n    return self",
    "docstring": "Returns the Callback Model for this Model.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py",
    "ast_data": "FunctionDef name:_get_callback_model arg:self arguments arg If BoolOp Call Return return:yes If BoolOp Call Return return:yes Return return:yes"
  },
  {
    "library": "scipy",
    "name": "points",
    "source_code": "@property\ndef points(self):\n    return []",
    "docstring": "Any problematic points introduced by the transformation. These should be specified as points where ``.",
    "type": "method",
    "file_path": "scipy\\scipy\\integrate\\_cubature.py",
    "ast_data": "FunctionDef name:points arg:self arguments arg Return return:no"
  },
  {
    "library": "scikit-learn",
    "name": "create_or_update_comment",
    "source_code": "def create_or_update_comment(comment, message, repo, pr_number, token):\n    if comment is not None:\n        print('updating existing comment')\n        response = requests.patch(f'https://api.github.com/repos/{repo}/issues/comments/{comment['id']}', headers=get_headers(token), json={'body': message})\n    else:\n        print('creating new comment')\n        response = requests.post(f'https://api.github.com/repos/{repo}/issues/{pr_number}/comments', headers=get_headers(token), json={'body': message})\n    response.raise_for_status()",
    "docstring": "Create a new comment or update existing one.",
    "type": "function",
    "file_path": "scikit-learn\\build_tools\\get_comment.py",
    "ast_data": "FunctionDef name:create_or_update_comment arg:comment arg:message arg:repo arg:pr_number arg:token arguments arg arg arg arg arg If Compare Call Assign Call Call Call Assign Call Call Call"
  },
  {
    "library": "django",
    "name": "__eq__",
    "source_code": "def __eq__(self, other):\n    if not isinstance(other, BaseContext):\n        return NotImplemented\n    return self.flatten() == other.flatten()",
    "docstring": "Compare two contexts by comparing theirs 'dicts' attributes.",
    "type": "method",
    "file_path": "django\\django\\template\\context.py",
    "ast_data": "FunctionDef name:__eq__ arg:self arg:other arguments arg arg If Call Return return:yes Return return:yes Compare Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_eager_identity",
    "source_code": "def _eager_identity(tensor, ctx):\n    attrs = ('T', tensor.dtype.as_datatype_enum)\n    [result] = execute.execute(b'Identity', 1, inputs=[tensor], attrs=attrs, ctx=ctx)\n    return result",
    "docstring": "Eager-only version of Identity op; requires tensor is an eager Tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\constant_op.py",
    "ast_data": "FunctionDef name:_eager_identity arg:tensor arg:ctx arguments arg arg Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "disable_non_trt_optimizers_in_rewriter_config",
    "source_code": "def disable_non_trt_optimizers_in_rewriter_config(rewriter_config):\n    off = rewriter_config_pb2.RewriterConfig.OFF\n    rewriter_config.arithmetic_optimization = off\n    rewriter_config.auto_mixed_precision = off\n    rewriter_config.auto_parallel.enable = False\n    rewriter_config.constant_folding = off\n    rewriter_config.debug_stripper = off\n    rewriter_config.dependency_optimization = off\n    rewriter_config.disable_meta_optimizer = False\n    rewriter_config.disable_model_pruning = True\n    rewriter_config.function_optimization = off\n    rewriter_config.implementation_selector = off\n    rewriter_config.layout_optimizer = off\n    rewriter_config.loop_optimization = off\n    rewriter_config.memory_optimization = rewriter_config_pb2.RewriterConfig.NO_MEM_OPT\n    rewriter_config.min_graph_nodes = -1\n    rewriter_config.pin_to_host_optimization = off\n    rewriter_config.remapping = off\n    rewriter_config.scoped_allocator_optimization = off\n    rewriter_config.shape_optimization = off",
    "docstring": "Modifies rewriter_config to disable all non-TRT optimizations.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\tensorrt\\utils.py",
    "ast_data": "FunctionDef name:disable_non_trt_optimizers_in_rewriter_config arg:rewriter_config arguments arg Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "shape_tensor",
    "source_code": "def shape_tensor(self, name='shape_tensor'):\n    with self._name_scope(name):\n        if self.shape.is_fully_defined():\n            return linear_operator_util.shape_tensor(self.shape.as_list())\n        else:\n            return self._shape_tensor()",
    "docstring": "Shape of this , determined at runtime. If this operator acts like the batch matrix with , then this returns a holding , equivalent to . Args: name: A name for this . Returns:",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "FunctionDef name:shape_tensor arg:self arg:name arguments arg arg With Call If Call Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "mod",
    "source_code": "def mod(self, other, level=None, fill_value=None, axis: Axis=0) -> Series:\n    return self._flex_method(other, operator.mod, level=level, fill_value=fill_value, axis=axis)",
    "docstring": "Return Modulo of series and other, element-wise (binary operator ). Equivalent to `Python documentation `_ for more details. Examples -------- >>> a = pd.Series([1, 1, 1, np.nan], index=[\"a\", \"b\", \"c\", \"d\"]) >>> a a 1.0 b 1.0 c 1.0 d NaN dtype: float64 >>> b = pd.Series([1, np.nan, 1, np.nan], index=[\"a\", \"b\", \"d\", \"e\"]) >>> b a 1.0 b NaN d 1.0 e NaN dtype: float64 >>> a.mod(b, fill_value=0) a 0.0 b NaN c NaN d 0.0 e NaN dtype: float64",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:mod arg:self arg:other arg:level arg:fill_value arg:axis arguments arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "init_op",
    "source_code": "@property\ndef init_op(self):\n    return self._init_op",
    "docstring": "Return the Init Op used by the supervisor. Returns: An Op or .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py",
    "ast_data": "FunctionDef name:init_op arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "compute_file_sha256",
    "source_code": "def compute_file_sha256(path: str) -> str:\n    if not os.path.exists(path):\n        return ''\n    hash = hashlib.sha256()\n    with open(path, 'rb') as f:\n        for b in f:\n            hash.update(b)\n    return hash.hexdigest()",
    "docstring": "Compute the SHA256 hash of a file and return it as a hex string.",
    "type": "function",
    "file_path": "pytorch\\tools\\linter\\adapters\\s3_init.py",
    "ast_data": "FunctionDef name:compute_file_sha256 arg:path arguments arg If Call Return return:yes Assign Call With Call For Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "start",
    "source_code": "def start(self):\n    from flup.server.scgi import WSGIServer\n    self.scgiserver = WSGIServer(*self.args, **self.kwargs)\n    self.scgiserver._installSignalHandlers = lambda: None\n    self.scgiserver._oldSIGs = []\n    self.ready = True\n    self.scgiserver.run()",
    "docstring": "Start the SCGI server.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\servers.py",
    "ast_data": "FunctionDef name:start arg:self arguments arg Assign Call Assign arguments Assign Assign Call"
  },
  {
    "library": "pytorch",
    "name": "_build_rpc_profiling_key",
    "source_code": "def _build_rpc_profiling_key(exec_type, func_name, current_worker_name, dst_worker_name):\n    profile_key = f'rpc_{exec_type.value}#{func_name}({current_worker_name} -> {dst_worker_name})'\n    return profile_key",
    "docstring": "Builds the key that RPC calls are profiled with using the autograd profiler. This will be the name of the corresponding Event recorded in the profiler. Args: exec_type (RPCExecMode): Type of RPC/RRef call func_name (str): Name of function being profiled. current_worker_name (str): Name of current worker. dst_worker_name (str): Name of the destination worker. Returns: String representing profiling key",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\rpc\\internal.py",
    "ast_data": "FunctionDef name:_build_rpc_profiling_key arg:exec_type arg:func_name arg:current_worker_name arg:dst_worker_name arguments arg arg arg arg Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "ifft",
    "source_code": "@array_function_dispatch(_fft_dispatcher)\ndef ifft(a, n=None, axis=-1, norm=None, out=None):\n    a = asarray(a)\n    if n is None:\n        n = a.shape[axis]\n    output = _raw_fft(a, n, axis, False, False, norm, out=out)\n    return output",
    "docstring": "Compute the one-dimensional inverse discrete Fourier Transform. This function computes the inverse of the one-dimensional *n*-point discrete Fourier transform computed by . In other words, `numpy.fftfftnumpy.fftnnaxisnumpy.fftaxisaxisaxisaifftnifft`. Examples -------- >>> import numpy as np >>> np.fft.ifft([0, 4, 0, 0]) array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j]) # may vary Create and plot a band-limited signal with random phases: >>> import matplotlib.pyplot as plt >>> t = np.arange(400) >>> n = np.zeros((400,), dtype=complex) >>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,))) >>> s = np.fft.ifft(n) >>> plt.plot(t, s.real, label='real') [] >>> plt.plot(t, s.imag, '--', label='imaginary') [] >>> plt.legend() >>> plt.show()",
    "type": "function",
    "file_path": "numpy\\numpy\\fft\\_pocketfft.py",
    "ast_data": "FunctionDef name:ifft arg:a arg:n arg:axis arg:norm arg:out arguments arg arg arg arg arg Assign Call If Compare Assign Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "segment_sqrt_n",
    "source_code": "@dispatch.dispatch_for_api(math_ops.unsorted_segment_sqrt_n)\ndef segment_sqrt_n(data: ragged_tensor.RaggedOrDense, segment_ids: ragged_tensor.RaggedOrDense, num_segments, name=None):\n    with ops.name_scope(name, 'RaggedSegmentSqrtN', [data, segment_ids, num_segments]):\n        total = segment_sum(data, segment_ids, num_segments)\n        ones = ragged_tensor.RaggedTensor.from_nested_row_splits(array_ops.ones_like(data.flat_values), data.nested_row_splits, validate=False)\n        count = segment_sum(ones, segment_ids, num_segments)\n        if ragged_tensor.is_ragged(total):\n            return total.with_flat_values(total.flat_values / math_ops.sqrt(count.flat_values))\n        else:\n            return total / math_ops.sqrt(count)",
    "docstring": "For docs, see: _RAGGED_SEGMENT_DOCSTRING.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_math_ops.py",
    "ast_data": "FunctionDef name:segment_sqrt_n arg:data arg:segment_ids arg:num_segments arg:name arguments arg arg arg arg With Call Assign Call Assign Call Call Assign Call If Call Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_global_clustering",
    "source_code": "def _global_clustering(self, X=None):\n    clusterer = self.n_clusters\n    centroids = self.subcluster_centers_\n    compute_labels = X is not None and self.compute_labels\n    not_enough_centroids = False\n    if isinstance(clusterer, Integral):\n        clusterer = AgglomerativeClustering(n_clusters=self.n_clusters)\n        if len(centroids) < self.n_clusters:\n            not_enough_centroids = True\n    self._subcluster_norms = row_norms(self.subcluster_centers_, squared=True)\n    if clusterer is None or not_enough_centroids:\n        self.subcluster_labels_ = np.arange(len(centroids))\n        if not_enough_centroids:\n            warnings.warn('Number of subclusters found (%d) by BIRCH is less than (%d). Decrease the threshold.' % (len(centroids), self.n_clusters), ConvergenceWarning)\n    else:\n        self.subcluster_labels_ = clusterer.fit_predict(self.subcluster_centers_)\n    if compute_labels:\n        self.labels_ = self._predict(X)",
    "docstring": "Global clustering for the subclusters obtained after fitting",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_birch.py",
    "ast_data": "FunctionDef name:_global_clustering arg:self arg:X arguments arg arg Assign Assign Assign BoolOp Compare Assign If Call Assign Call If Compare Call Assign Assign Call If BoolOp Compare Assign Call Call If Call Call Assign Call If Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "push_forwardprop_state",
    "source_code": "@contextlib.contextmanager\ndef push_forwardprop_state():\n    try:\n        pywrap_tfe.TFE_Py_ForwardAccumulatorPushState()\n        yield\n    finally:\n        pywrap_tfe.TFE_Py_ForwardAccumulatorPopState()",
    "docstring": "Temporarily push or pop transient state for accumulators in the active set. Allows an accumulator which is currently processing an operation to temporarily reset its state. This is useful when building forwardprop versions of functions, where an accumulator will trigger function building and then must process captured symbolic tensors while building it. Without pushing and popping, accumulators ignore operations executed as a direct result of their own jvp computations. Yields: None (used for its side effect).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\forwardprop_util.py",
    "ast_data": "FunctionDef name:push_forwardprop_state arguments Try Call Call"
  },
  {
    "library": "django",
    "name": "update",
    "source_code": "def update(self, other_dict):\n    if not hasattr(other_dict, '__getitem__'):\n        raise TypeError('other_dict must be a mapping (dictionary-like) object.')\n    if isinstance(other_dict, BaseContext):\n        other_dict = other_dict.dicts[1:].pop()\n    return ContextDict(self, other_dict)",
    "docstring": "Push other_dict to the stack of dictionaries in the Context",
    "type": "method",
    "file_path": "django\\django\\template\\context.py",
    "ast_data": "FunctionDef name:update arg:self arg:other_dict arguments arg arg If Call Raise Call If Call Assign Call Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "check_response_type",
    "source_code": "def check_response_type(self, response_type):\n    raise NotImplementedError()",
    "docstring": "Validate if the client can handle the given response_type. There are two response types defined by RFC6749: code and token. For instance, there is a `` column in your client:: def check_response_type(self, response_type): return response_type in self.response_types :param response_type: the requested response_type string. :return: bool",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\models.py",
    "ast_data": "FunctionDef name:check_response_type arg:self arg:response_type arguments arg arg Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "split",
    "source_code": "def split(self, X, y=None, groups=None):\n    X, y, groups = indexable(X, y, groups)\n    for train, test in self._iter_indices(X, y, groups):\n        yield (train, test)",
    "docstring": "Generate indices to split data into training and test set. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. y : array-like of shape (n_samples,) The target variable for supervised learning problems. groups : array-like of shape (n_samples,), default=None Group labels for the samples used while splitting the dataset into train/test set. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split. Notes ----- Randomized CV splitters may return different results for each call of split. You can make the results identical by setting to an integer.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py",
    "ast_data": "FunctionDef name:split arg:self arg:X arg:y arg:groups arguments arg arg arg arg Assign Call For Call"
  },
  {
    "library": "pytorch",
    "name": "_local_post_state_dict_hook",
    "source_code": "@no_type_check\ndef _local_post_state_dict_hook(module: nn.Module, fsdp_state: _FSDPState, state_dict: dict[str, Any], prefix: str) -> dict[str, Any]:\n    _replace_by_prefix(state_dict, f'{prefix}{FSDP_PREFIX}', prefix)\n    if not _has_fsdp_params(fsdp_state, module):\n        return state_dict\n    assert _module_handle(fsdp_state, module), 'Should have returned early'\n    flat_param = _module_handle(fsdp_state, module).flat_param\n    full_numel = flat_param._unpadded_unsharded_size.numel()\n    shard_offset = flat_param.numel() * fsdp_state.rank\n    valid_data_size = flat_param.numel() - flat_param._shard_numel_padded\n    if valid_data_size > 0:\n        flat_param = flat_param[:valid_data_size].view(valid_data_size)\n        local_shards = [Shard.from_tensor_and_offsets(flat_param, [shard_offset], fsdp_state.rank)]\n    else:\n        local_shards = []\n    sharded_tensor = init_from_local_shards(local_shards, full_numel, process_group=fsdp_state.process_group)\n    if fsdp_state._state_dict_config.offload_to_cpu:\n        sharded_tensor = sharded_tensor.cpu()\n    state_dict[f'{prefix}{FLAT_PARAM}'] = sharded_tensor\n    return state_dict",
    "docstring": "This hook create a ShardedTensor from the local flat_param and replace the state_dict[f\"{prefix}{FLAT_PARAM}] with the ShardedTensor. No copy will happen. The underlying storage is the same.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_state_dict_utils.py",
    "ast_data": "FunctionDef name:_local_post_state_dict_hook arg:module arg:fsdp_state arg:state_dict arg:prefix arguments arg arg arg arg Call If Call Return return:yes Call Assign Call Assign Call Assign Call Assign Call If Compare Assign Call Assign Call Assign Assign Call If Assign Call Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "match_snn",
    "source_code": "def match_snn(desc1: Tensor, desc2: Tensor, th: float=0.8, dm: Optional[Tensor]=None) -> Tuple[Tensor, Tensor]:\n    KORNIA_CHECK_SHAPE(desc1, ['B', 'DIM'])\n    KORNIA_CHECK_SHAPE(desc2, ['B', 'DIM'])\n    if desc2.shape[0] < 2:\n        return _no_match(desc1)\n    distance_matrix = _get_lazy_distance_matrix(desc1, desc2, dm)\n    vals, idxs_in_2 = torch.topk(distance_matrix, 2, dim=1, largest=False)\n    ratio = vals[:, 0] / vals[:, 1]\n    mask = ratio <= th\n    match_dists = ratio[mask]\n    if len(match_dists) == 0:\n        return _no_match(distance_matrix)\n    idxs_in1 = torch.arange(0, idxs_in_2.size(0), device=distance_matrix.device)[mask]\n    idxs_in_2 = idxs_in_2[:, 0][mask]\n    matches_idxs = concatenate([idxs_in1.view(-1, 1), idxs_in_2.view(-1, 1)], 1)\n    return (match_dists.view(-1, 1), matches_idxs.view(-1, 2))",
    "docstring": "Find nearest neighbors in desc2 for each vector in desc1. The method satisfies first to second nearest neighbor distance <= th. If the distance matrix dm is not provided, :py:func: is used. Args: desc1: Batch of descriptors of a shape :math:. desc2: Batch of descriptors of a shape :math:. th: distance ratio threshold. dm: Tensor containing the distances from each descriptor in desc1 to each descriptor in desc2, shape of :math:. Return: - Descriptor distance of matching descriptors, shape of :math:. - Long tensor indexes of matching descriptors in desc1 and desc2. Shape: :math:, where 0 <= B3 <= B1.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\matching.py",
    "ast_data": "FunctionDef name:match_snn arg:desc1 arg:desc2 arg:th arg:dm arguments arg arg arg arg Call Call If Compare Return return:yes Call Assign Call Assign Call Assign Assign Compare Assign If Compare Call Return return:yes Call Assign Call Call Assign Assign Call Call Call Return return:yes Call Call"
  },
  {
    "library": "seaborn",
    "name": "_define_support_univariate",
    "source_code": "def _define_support_univariate(self, x, weights):\n    kde = self._fit(x, weights)\n    bw = np.sqrt(kde.covariance.squeeze())\n    grid = self._define_support_grid(x, bw, self.cut, self.clip, self.gridsize)\n    return grid",
    "docstring": "Create a 1D grid of evaluation points.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_statistics.py",
    "ast_data": "FunctionDef name:_define_support_univariate arg:self arg:x arg:weights arguments arg arg arg Assign Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_check_output_shapes_fully_defined",
    "source_code": "def _check_output_shapes_fully_defined(self):\n    for (path, _), output_shape in zip(nest.flatten_with_joined_string_paths(self._feature_config), self._output_shapes):\n        if not output_shape.is_fully_defined():\n            raise ValueError(f'Input Feature {path} has output shape set as {output_shape} which is not fully defined. Please specify the fully defined shape in either FeatureConfig or for the build method.')",
    "docstring": "Check if the output shape is fully defined.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2.py",
    "ast_data": "FunctionDef name:_check_output_shapes_fully_defined arg:self arguments arg For Call Call If Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "create_init_files",
    "source_code": "def create_init_files(dst_dir: str) -> None:\n    for root, _, files in os.walk(dst_dir):\n        if any((file.endswith('.py') or file.endswith('.so') for file in files)):\n            curr_dir = root\n            while curr_dir != dst_dir:\n                init_path = os.path.join(curr_dir, '__init__.py')\n                if not os.path.exists(init_path):\n                    open(init_path, 'w').close()\n                curr_dir = os.path.dirname(curr_dir)",
    "docstring": "Create __init__.py files.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\pip_package\\utils\\utils.py",
    "ast_data": "FunctionDef name:create_init_files arg:dst_dir arguments arg For Call If Call BoolOp Call Call Assign While Compare Assign Call If Call Call Call Assign Call"
  },
  {
    "library": "scipy",
    "name": "chebys",
    "source_code": "def chebys(n, monic=False):\n    if n < 0:\n        raise ValueError('n must be nonnegative.')\n    if n == 0:\n        n1 = n + 1\n    else:\n        n1 = n\n    x, w = roots_chebys(n1)\n    if n == 0:\n        x, w = ([], [])\n    hn = pi\n    kn = 1.0\n    p = orthopoly1d(x, w, hn, kn, wfunc=lambda x: sqrt(1 - x * x / 4.0), limits=(-2, 2), monic=monic)\n    if not monic:\n        factor = (n + 1.0) / p(2)\n        p._scale(factor)\n        p.__dict__['_eval_func'] = lambda x: _ufuncs.eval_chebys(n, x)\n    return p",
    "docstring": "Chebyshev polynomial of the second kind on :math:. Defined as :math: where :math: is the nth Chebychev polynomial of the second kind. Parameters ---------- n : int Degree of the polynomial. monic : bool, optional If , scale the leading coefficient to be 1. Default is . Returns ------- S : orthopoly1d Chebyshev polynomial of the second kind on :math:. See Also -------- chebyu : Chebyshev polynomial of the second kind Notes ----- The polynomials :math: are orthogonal over :math: with weight function :math:. References ---------- .. [1] Abramowitz and Stegun, \"Handbook of Mathematical Functions\" Section 22. National Bureau of Standards, 1972.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_orthogonal.py",
    "ast_data": "FunctionDef name:chebys arg:n arg:monic arguments arg arg If Compare Raise Call If Compare Assign Assign Assign Call If Compare Assign Assign Assign Assign Call arguments arg Call If Assign Call Call Assign arguments arg Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "format_frame",
    "source_code": "def format_frame(frame, *, base=None, line=False):\n    extra_line = ''\n    if line:\n        extra_line = f'{frame.line}  # '\n    return f'{extra_line}{shorten_filename(frame.filename, base=base)}:{frame.lineno} in {frame.name}'",
    "docstring": "Format a FrameSummary in a short way, without printing full absolute path or code. The idea is the result fits on a single line.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_traceback.py",
    "ast_data": "FunctionDef name:format_frame arg:frame arguments arg arg arg Assign If Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__truediv__",
    "source_code": "def __truediv__(self, other):\n    raise TypeError(\"unsupported operand type(s) for /: 'Dimension' and '{}', please use // instead\".format(type(other).__name__))",
    "docstring": "Use via instead. This function exists only to have a better error message. Instead of: , this function will explicitly call for usage of instead. Args: other: Another . Raises: TypeError.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:__truediv__ arg:self arg:other arguments arg arg Raise Call Call Call"
  },
  {
    "library": "pandas",
    "name": "is_exists",
    "source_code": "@property\ndef is_exists(self) -> bool:\n    return 'table' in self.group",
    "docstring": "has this table been created",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:is_exists arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "scipy",
    "name": "argmax",
    "source_code": "def argmax(self, axis=None, out=None, *, explicit=False):\n    return self._argminmax(axis, out, np.argmax, np.greater, explicit)",
    "docstring": "Return indices of maximum elements along an axis. By default, implicit zero elements are taken into account. If there are several minimum values, the index of the first occurrence is returned. If is set, only explicitly stored elements will be considered. Parameters ---------- axis : {-2, -1, 0, 1, None}, optional Axis along which the argmax is computed. If None (default), index of the maximum element in the flatten data is returned. out : None, optional This argument is in the signature *solely* for NumPy compatibility reasons. Do not pass in anything except for the default value, as this argument is not used. explicit : {False, True} optional (default: False) When set to True, only explicitly stored elements will be considered. If axis is not None and an axis has no stored elements, argmax is undefined, so the index `axis` is 1.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_data.py",
    "ast_data": "FunctionDef name:argmax arg:self arg:axis arg:out arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "FileOpenerIterDataPipe",
    "source_code": "@functional_datapipe('open_files')\nclass FileOpenerIterDataPipe(IterDataPipe[tuple[str, IOBase]]):\n\n    def __init__(self, datapipe: Iterable[str], mode: str='r', encoding: Optional[str]=None, length: int=-1):\n        super().__init__()\n        self.datapipe: Iterable = datapipe\n        self.mode: str = mode\n        self.encoding: Optional[str] = encoding\n        if self.mode not in ('b', 't', 'rb', 'rt', 'r'):\n            raise ValueError(f'Invalid mode {mode}')\n        if 'b' in mode and encoding is not None:\n            raise ValueError(\"binary mode doesn't take an encoding argument\")\n        self.length: int = length\n\n    def __iter__(self):\n        yield from get_file_binaries_from_pathnames(self.datapipe, self.mode, self.encoding)\n\n    def __len__(self):\n        if self.length == -1:\n            raise TypeError(f\"{type(self).__name__} instance doesn't have valid length\")\n        return self.length",
    "docstring": "Given pathnames, opens files and yield pathname and file stream in a tuple (functional name: ``. length: Nominal length of the datapipe Note: The opened file handles will be closed by Python's GC periodically. Users can choose to close them explicitly. Example: >>> # xdoctest: +SKIP >>> from torchdata.datapipes.iter import FileLister, FileOpener, StreamReader >>> dp = FileLister(root=\".\").filter(lambda fname: fname.endswith('.txt')) >>> dp = FileOpener(dp) >>> dp = StreamReader(dp) >>> list(dp) [('./abc.txt', 'abc')]",
    "type": "class",
    "file_path": "pytorch\\torch\\utils\\data\\datapipes\\iter\\fileopener.py",
    "ast_data": "ClassDef name:FileOpenerIterDataPipe FunctionDef name:__init__ arg:self arg:datapipe arg:mode arg:encoding arg:length arguments arg arg arg arg arg Call Call If Compare Raise Call If BoolOp Compare Compare Raise Call FunctionDef name:__iter__ arg:self arguments arg Call FunctionDef name:__len__ arg:self arguments arg If Compare Raise Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "gpu_use_nccl_communication",
    "source_code": "def gpu_use_nccl_communication() -> bool:\n    return os.environ.get('DTENSOR_GPU_USE_NCCL_COMMUNICATION', '0') != '0'",
    "docstring": "Return True if environment indicates NCCL shall be used for GPU.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\config.py",
    "ast_data": "FunctionDef name:gpu_use_nccl_communication arguments Return return:yes Compare Call"
  },
  {
    "library": "numpy",
    "name": "masked_not_equal",
    "source_code": "def masked_not_equal(x, value, copy=True):\n    return masked_where(not_equal(x, value), x, copy=copy)",
    "docstring": "Mask an array where *not* equal to a given value. This function is a shortcut to `condition` = (x != value). See Also -------- masked_where : Mask where a condition is met. Examples -------- >>> import numpy as np >>> import numpy.ma as ma >>> a = np.arange(4) >>> a array([0, 1, 2, 3]) >>> ma.masked_not_equal(a, 2) masked_array(data=[--, --, 2, --], mask=[ True, True, False, True], fill_value=999999)",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:masked_not_equal arg:x arg:value arg:copy arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "_izip_fields",
    "source_code": "def _izip_fields(iterable):\n    for element in iterable:\n        if hasattr(element, '__iter__') and (not isinstance(element, str)):\n            yield from _izip_fields(element)\n        elif isinstance(element, np.void) and len(tuple(element)) == 1:\n            yield from _izip_fields(element)\n        else:\n            yield element",
    "docstring": "Returns an iterator of concatenated fields from a sequence of arrays.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\recfunctions.py",
    "ast_data": "FunctionDef name:_izip_fields arg:iterable arguments arg For If BoolOp Call Call Call If BoolOp Call Compare Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_expand_variables",
    "source_code": "def _expand_variables(input_str, cmake_vars):\n\n    def replace(match):\n        if match.group(1) in cmake_vars:\n            return cmake_vars[match.group(1)]\n        return ''\n    return _CMAKE_ATVAR_REGEX.sub(replace, _CMAKE_VAR_REGEX.sub(replace, input_str))",
    "docstring": "Expands ${VARIABLE}s and @VARIABLE@s in 'input_str', using dictionary 'cmake_vars'. Args: input_str: the string containing ${VARIABLE} or @VARIABLE@ expressions to expand. cmake_vars: a dictionary mapping variable names to their values. Returns: The expanded string.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\third_party\\llvm_openmp\\expand_cmake_vars.py",
    "ast_data": "FunctionDef name:_expand_variables arg:input_str arg:cmake_vars arguments arg arg FunctionDef name:replace arg:match arguments arg If Compare Call Return return:yes Call Return return:yes Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_join_cuda_home",
    "source_code": "def _join_cuda_home(*paths) -> str:\n    if CUDA_HOME is None:\n        raise OSError('CUDA_HOME environment variable is not set. Please set it to your CUDA install root.')\n    return os.path.join(CUDA_HOME, *paths)",
    "docstring": "Join paths with CUDA_HOME, or raises an error if it CUDA_HOME is not set. This is basically a lazy way of raising an error for missing $CUDA_HOME only once we need to get any CUDA-specific path.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\cpp_extension.py",
    "ast_data": "FunctionDef name:_join_cuda_home arguments arg If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_call_loss",
    "source_code": "def _call_loss(inputs, ragged_output):\n    r = loss_fn(*inputs)\n    if ragged_output and (not isinstance(r, ragged_tensor.RaggedTensor)):\n        r = ragged_tensor.RaggedTensor.from_tensor(r)\n    elif not ragged_output and isinstance(r, ragged_tensor.RaggedTensor):\n        r = r.to_tensor()\n    return r",
    "docstring": "Adapt the result to ragged or dense tensor according to the expected output type. This is done so that all the return values of the map operation have the same type.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py",
    "ast_data": "FunctionDef name:_call_loss arg:inputs arg:ragged_output arguments arg arg Assign Call If BoolOp Call Assign Call If BoolOp Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_accumulate",
    "source_code": "def _accumulate(self, name: str, *, skipna: bool=True, **kwargs) -> ExtensionArray:\n    raise NotImplementedError(f'cannot perform {name} with type {self.dtype}')",
    "docstring": "Return an ExtensionArray performing an accumulation operation. The underlying data type might change. Parameters ---------- name : str Name of the function, supported values are: - cummin - cummax - cumsum - cumprod skipna : bool, default True If True, skip NA values. **kwargs Additional keyword arguments passed to the accumulation function. Currently, there is no supported kwarg. Returns ------- array An array performing the accumulation operation. Raises ------ NotImplementedError : subclass does not define accumulations See Also -------- api.extensions.ExtensionArray._concat_same_type : Concatenate multiple array of this dtype. api.extensions.ExtensionArray.view : Return a view on the array. api.extensions.ExtensionArray._explode : Transform each element of list-like to a row. Examples -------- >>> arr = pd.array([1, 2, 3]) >>> arr._accumulate(name=\"cumsum\") [1, 3, 6] Length: 3, dtype: Int64",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\base.py",
    "ast_data": "FunctionDef name:_accumulate arg:self arg:name arguments arg arg arg arg Raise Call"
  },
  {
    "library": "numpy",
    "name": "add_library",
    "source_code": "def add_library(self, name, sources, **build_info):\n    self._add_library(name, sources, None, build_info)\n    dist = self.get_distribution()\n    if dist is not None:\n        self.warn('distutils distribution has been initialized, it may be too late to add a library ' + name)",
    "docstring": "Add library to configuration. Parameters ---------- name : str Name of the extension. sources : sequence List of the sources. The list of sources may contain functions (called source generators) which must take an extension instance and a build directory as inputs and return a source file or list of source files or None. If None is returned then no sources are generated. If the Extension instance has no sources after processing all source generators, then no extension module is built. build_info : dict, optional The following keys are allowed: * depends * macros * include_dirs * extra_compiler_args * extra_f77_compile_args * extra_f90_compile_args * f2py_options * language",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\misc_util.py",
    "ast_data": "FunctionDef name:add_library arg:self arg:name arg:sources arguments arg arg arg arg Call Assign Call If Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "merge",
    "source_code": "def merge(self, other):\n    if other.number_of_shards is not None:\n        self.set_number_of_shards(other.number_of_shards)\n    if other.shard_dimension is not None:\n        self.set_shard_dimension(other.shard_dimension)",
    "docstring": "Merges the policy of another policy into the current policy. Args: other: The policy to merge into this one. Raises: ValueError: If this policy has been frozen and the merge conflicts with the frozen policy.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_sharding.py",
    "ast_data": "FunctionDef name:merge arg:self arg:other arguments arg arg If Compare Call If Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "_may_reduce_to_scalar",
    "source_code": "def _may_reduce_to_scalar(keepdims, axis, output):\n    if not _has_fully_defined_shape(output) and (not keepdims) and (axis is None):\n        output.set_shape(())\n    return output",
    "docstring": "Set a reduction's output shape to be a scalar if we are certain.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:_may_reduce_to_scalar arg:keepdims arg:axis arg:output arguments arg arg arg If BoolOp Call Compare Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "__repr__",
    "source_code": "def __repr__(self) -> str_type:\n    return str(self)",
    "docstring": "Return a string representation for a particular object.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py",
    "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_plot_average_dependence",
    "source_code": "def _plot_average_dependence(self, avg_preds, feature_values, ax, pd_line_idx, line_kw, categorical, bar_kw):\n    if categorical:\n        bar_idx = np.unravel_index(pd_line_idx, self.bars_.shape)\n        self.bars_[bar_idx] = ax.bar(feature_values, avg_preds, **bar_kw)[0]\n        ax.tick_params(axis='x', rotation=90)\n    else:\n        line_idx = np.unravel_index(pd_line_idx, self.lines_.shape)\n        self.lines_[line_idx] = ax.plot(feature_values, avg_preds, **line_kw)[0]",
    "docstring": "Plot the average partial dependence. Parameters ---------- avg_preds : ndarray of shape (n_grid_points,) The average predictions for all points of for a given feature for all samples in . feature_values : ndarray of shape (n_grid_points,) The feature values for which the predictions have been computed. ax : Matplotlib axes The axis on which to plot the average PD. pd_line_idx : int The sequential index of the plot. It will be unraveled to find the matching 2D position in the grid layout. line_kw : dict Dict with keywords passed when plotting the PD plot. categorical : bool Whether feature is categorical. bar_kw: dict Dict with keywords passed when plotting the PD bars (categorical).",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\inspection\\_plot\\partial_dependence.py",
    "ast_data": "FunctionDef name:_plot_average_dependence arg:self arg:avg_preds arg:feature_values arg:ax arg:pd_line_idx arg:line_kw arg:categorical arg:bar_kw arguments arg arg arg arg arg arg arg arg If Assign Call Assign Call Call Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "split",
    "source_code": "@tf_should_use.should_use_result\ndef split(self, value, lengths, name=None):\n    return self._implementation.split(value, lengths, name=name)",
    "docstring": "Split the values of a into the TensorArray. Args: value: (N+1)-D. Tensor of type . The Tensor to split. lengths: 1-D. int32 vector with the lengths to use when splitting along its first dimension. name: A name for the operation (optional). Returns: A new TensorArray object with flow that ensures the split occurs. Use this object for all subsequent operations. Raises: ValueError: if the shape inference fails.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:split arg:self arg:value arg:lengths arg:name arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "OutputAdapter",
    "source_code": "class OutputAdapter:\n\n    def __init__(self, steps: list[OutputAdaptStep] | None=None):\n        self._steps = steps or []\n\n    def append_step(self, step: OutputAdaptStep) -> None:\n        self._steps.append(step)\n\n    def apply(self, model_outputs: Any, model: torch.nn.Module | Callable | torch_export.ExportedProgram | None=None) -> Sequence[torch.Tensor | int | float | bool | str]:\n        for step in self._steps:\n            model_outputs = step.apply(model_outputs, model=model)\n        return model_outputs",
    "docstring": "A class that adapts the PyTorch model outputs to exported ONNX model outputs format.",
    "type": "class",
    "file_path": "pytorch\\torch\\onnx\\_internal\\io_adapter.py",
    "ast_data": "ClassDef name:OutputAdapter FunctionDef name:__init__ arg:self arg:steps arguments arg arg Assign BoolOp FunctionDef name:append_step arg:self arg:step arguments arg arg Call FunctionDef name:apply arg:self arg:model_outputs arg:model arguments arg arg arg For Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "observe_object_name",
    "source_code": "def observe_object_name(name):\n    OBSERVED_NAMES.add(name)",
    "docstring": "Observe a name and make sure it won't be used by .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:observe_object_name arg:name arguments arg Call"
  },
  {
    "library": "numpy",
    "name": "__add__",
    "source_code": "def __add__(self, other):\n    if self._delegate_binop(other):\n        return NotImplemented\n    return add(self, other)",
    "docstring": "Add self to other, and return a new masked array.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:__add__ arg:self arg:other arguments arg arg If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_make_sparse",
    "source_code": "def _make_sparse(arr: np.ndarray, kind: SparseIndexKind='block', fill_value=None, dtype: np.dtype | None=None):\n    assert isinstance(arr, np.ndarray)\n    if arr.ndim > 1:\n        raise TypeError('expected dimension <= 1 data')\n    if fill_value is None:\n        fill_value = na_value_for_dtype(arr.dtype)\n    if isna(fill_value):\n        mask = notna(arr)\n    else:\n        if is_string_dtype(arr.dtype):\n            arr = arr.astype(object)\n        if is_object_dtype(arr.dtype):\n            mask = splib.make_mask_object_ndarray(arr, fill_value)\n        else:\n            mask = arr != fill_value\n    length = len(arr)\n    if length != len(mask):\n        indices = mask.sp_index.indices\n    else:\n        indices = mask.nonzero()[0].astype(np.int32)\n    index = make_sparse_index(length, indices, kind)\n    sparsified_values = arr[mask]\n    if dtype is not None:\n        sparsified_values = ensure_wrapped_if_datetimelike(sparsified_values)\n        sparsified_values = astype_array(sparsified_values, dtype=dtype)\n        sparsified_values = np.asarray(sparsified_values)\n    return (sparsified_values, index, fill_value)",
    "docstring": "Convert ndarray to sparse format Parameters ---------- arr : ndarray kind : {'block', 'integer'} fill_value : NaN or another value dtype : np.dtype, optional copy : bool, default False Returns ------- (sparse_values, index, fill_value) : (ndarray, SparseIndex, Scalar)",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\arrays\\sparse\\array.py",
    "ast_data": "FunctionDef name:_make_sparse arg:arr arg:kind arg:fill_value arg:dtype arguments arg arg arg arg Call If Compare Raise Call If Compare Assign Call If Call Assign Call If Call Assign Call If Call Assign Call Assign Compare Assign Call If Compare Call Assign Assign Call Call Assign Call Assign If Compare Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_rgb",
    "source_code": "def get_rgb(self):\n    return self._rgb",
    "docstring": "Return a tuple of three or four floats from 0-1.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:get_rgb arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_is_loop_edge",
    "source_code": "def _is_loop_edge(op):\n    return op.type in ['NextIteration']",
    "docstring": "Returns true if the op is the end of a while-loop creating a cycle.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer_report.py",
    "ast_data": "FunctionDef name:_is_loop_edge arg:op arguments arg Return return:yes Compare"
  },
  {
    "library": "pytorch",
    "name": "ReorderInfo",
    "source_code": "@dataclass\nclass ReorderInfo:\n    initial_exposed: float = -1\n    final_exposed: float = -1\n    limiting_factor: str = 'None'\n    moves: int = 0\n\n    @property\n    def improvement(self):\n        return self.initial_exposed - self.final_exposed",
    "docstring": "Debug info describing how an individual snode was reordered",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\comms.py",
    "ast_data": "ClassDef name:ReorderInfo FunctionDef name:improvement arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_infinite_like",
    "source_code": "def _infinite_like(tensor):\n    return torch.full_like(tensor, inf)",
    "docstring": "Helper function for obtaining infinite KL Divergence throughout",
    "type": "function",
    "file_path": "pytorch\\torch\\distributions\\kl.py",
    "ast_data": "FunctionDef name:_infinite_like arg:tensor arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "update_dtype",
    "source_code": "def update_dtype(self, dtype) -> SparseDtype:\n    from pandas.core.dtypes.astype import astype_array\n    from pandas.core.dtypes.common import pandas_dtype\n    cls = type(self)\n    dtype = pandas_dtype(dtype)\n    if not isinstance(dtype, cls):\n        if not isinstance(dtype, np.dtype):\n            raise TypeError('sparse arrays of extension dtypes not supported')\n        fv_asarray = np.atleast_1d(np.array(self.fill_value))\n        fvarr = astype_array(fv_asarray, dtype)\n        fill_value = fvarr[0]\n        dtype = cls(dtype, fill_value=fill_value)\n    return dtype",
    "docstring": "Convert the SparseDtype to a new dtype. This takes care of converting the `dtypedtypedtypedtype` to an integer dtype). Examples -------- >>> SparseDtype(int, 0).update_dtype(float) Sparse[float64, 0.0] >>> SparseDtype(int, 1).update_dtype(SparseDtype(float, np.nan)) Sparse[float64, nan]",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py",
    "ast_data": "FunctionDef name:update_dtype arg:self arg:dtype arguments arg arg Assign Call Assign Call If Call If Call Raise Call Assign Call Call Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "ToolQuit",
    "source_code": "class ToolQuit(ToolBase):\n    description = 'Quit the figure'\n    default_keymap = property(lambda self: mpl.rcParams['keymap.quit'])\n\n    def trigger(self, sender, event, data=None):\n        Gcf.destroy_fig(self.figure)",
    "docstring": "Tool to call the figure manager destroy method.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py",
    "ast_data": "ClassDef name:ToolQuit Assign Assign Call arguments arg FunctionDef name:trigger arg:self arg:sender arg:event arg:data arguments arg arg arg arg Call"
  },
  {
    "library": "sphinx",
    "name": "captioned_literal_block",
    "source_code": "class captioned_literal_block(nodes.container):\n    pass",
    "docstring": "A node for a container of literal_block having a caption.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\builders\\latex\\nodes.py",
    "ast_data": "ClassDef name:captioned_literal_block"
  },
  {
    "library": "django",
    "name": "id_for_label",
    "source_code": "def id_for_label(self, id_, index='0'):\n    if id_ and self.add_id_index:\n        id_ = '%s_%s' % (id_, index)\n    return id_",
    "docstring": "Use an incremented id for each option where the main widget references the zero index.",
    "type": "method",
    "file_path": "django\\django\\forms\\widgets.py",
    "ast_data": "FunctionDef name:id_for_label arg:self arg:id_ arg:index arguments arg arg arg If BoolOp Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "has_fignum",
    "source_code": "@classmethod\ndef has_fignum(cls, num):\n    return num in cls.figs",
    "docstring": "Return whether figure number *num* exists.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_pylab_helpers.py",
    "ast_data": "FunctionDef name:has_fignum arg:cls arg:num arguments arg arg Return return:yes Compare"
  },
  {
    "library": "matplotlib",
    "name": "to_dict",
    "source_code": "def to_dict(self):\n    return self.__dict__.copy()",
    "docstring": "Return a copy of the subplot parameters as a dict.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\gridspec.py",
    "ast_data": "FunctionDef name:to_dict arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_sort_function_defs",
    "source_code": "def _sort_function_defs(library, function_deps):\n    edges = collections.defaultdict(list)\n    in_count = collections.defaultdict(lambda: 0)\n    for fname, deps in function_deps.items():\n        for dep in deps:\n            edges[dep].append(fname)\n            in_count[fname] += 1\n    ready = [fdef.signature.name for fdef in library.function if in_count[fdef.signature.name] == 0]\n    output = []\n    while ready:\n        node = ready.pop()\n        output.append(node)\n        for dest in edges[node]:\n            in_count[dest] -= 1\n            if not in_count[dest]:\n                ready.append(dest)\n    if len(output) != len(library.function):\n        failed_to_resolve = sorted(set(in_count.keys()) - set(output))\n        raise ValueError('There is a cyclic dependency between functions. ', f'Could not resolve {failed_to_resolve}.')\n    reverse = {fdef.signature.name: fdef for fdef in library.function}\n    return [reverse[x] for x in output]",
    "docstring": "Return a topologic sort of FunctionDefs in a library.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\function_deserialization.py",
    "ast_data": "FunctionDef name:_sort_function_defs arg:library arg:function_deps arguments arg arg Assign Call Assign Call arguments For Call For Call Assign Compare Assign While Assign Call Call For If Call If Compare Call Call Assign Call Call Call Call Raise Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, inputs, num_clusters, initial_clusters, distance_metric, random_seed, kmeans_plus_plus_num_retries, kmc2_chain_length, cluster_centers, cluster_centers_updated, cluster_centers_initialized):\n    self._inputs = inputs\n    self._num_clusters = num_clusters\n    self._initial_clusters = initial_clusters\n    self._distance_metric = distance_metric\n    self._seed = random_seed\n    self._kmeans_plus_plus_num_retries = kmeans_plus_plus_num_retries\n    self._kmc2_chain_length = kmc2_chain_length\n    self._cluster_centers = cluster_centers\n    self._cluster_centers_updated = cluster_centers_updated\n    self._cluster_centers_initialized = cluster_centers_initialized\n    self._num_selected = array_ops.shape(self._cluster_centers)[0]\n    self._num_remaining = self._num_clusters - self._num_selected\n    self._num_data = math_ops.add_n([array_ops.shape(i)[0] for i in self._inputs])",
    "docstring": "Creates an op factory. Args: inputs: See KMeans constructor. num_clusters: An integer Tensor providing the number of clusters. initial_clusters: See KMeans constructor. distance_metric: See KMeans constructor. random_seed: See KMeans constructor. kmeans_plus_plus_num_retries: See KMeans constructor. kmc2_chain_length: See KMeans constructor. cluster_centers: The TF variable holding the initial centers. It may already contain some centers when the op is executed. cluster_centers_updated: A second TF variable to hold a copy of the initial centers, used for full-batch mode. In mini-batch mode, cluster_centers_updated is the same variable as cluster_centers. cluster_centers_initialized: A boolean TF variable that will be set to true when all the initial centers have been chosen.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\clustering_ops.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:inputs arg:num_clusters arg:initial_clusters arg:distance_metric arg:random_seed arg:kmeans_plus_plus_num_retries arg:kmc2_chain_length arg:cluster_centers arg:cluster_centers_updated arg:cluster_centers_initialized arguments arg arg arg arg arg arg arg arg arg arg arg Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Call Assign Assign Call Call"
  },
  {
    "library": "sphinx",
    "name": "Catalog",
    "source_code": "class Catalog:\n    __slots__ = ('metadata',)\n\n    def __init__(self) -> None:\n        self.metadata: dict[str, list[tuple[str, int, str]]] = {}\n\n    def add(self, msg: str, origin: Element | MsgOrigin) -> None:\n        if not hasattr(origin, 'uid'):\n            return\n        msg_metadata = self.metadata.setdefault(msg, [])\n        line = line if (line := origin.line) is not None else -1\n        msg_metadata.append((origin.source or '', line, origin.uid))\n\n    def __iter__(self) -> Iterator[Message]:\n        for message, msg_metadata in self.metadata.items():\n            positions = sorted(set(map(operator.itemgetter(0, 1), msg_metadata)))\n            uuids = list(map(operator.itemgetter(2), msg_metadata))\n            yield Message(text=message, locations=positions, uuids=uuids)\n\n    @property\n    def messages(self) -> list[str]:\n        return list(self.metadata)",
    "docstring": "Catalog of translatable messages.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\builders\\gettext.py",
    "ast_data": "ClassDef name:Catalog Assign FunctionDef name:__init__ arg:self arguments arg FunctionDef name:add arg:self arg:msg arg:origin arguments arg arg arg If Call Return return:no Assign Call Assign Compare Call BoolOp FunctionDef name:__iter__ arg:self arguments arg For Call Assign Call Call Call Call Assign Call Call Call Call FunctionDef name:messages arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "all_reduce",
    "source_code": "def all_reduce(t, group_size, group_key, instance_key, merge_op='Add', final_op='Id', subdiv_offsets=(0,), communication_hint='auto', timeout=0):\n    if group_size < 1:\n        raise ValueError(f'Parameter `group_size` to all_reduce must be at least 1. Received: {group_size}.')\n    return gen_collective_ops.collective_reduce(t, group_size=group_size, group_key=group_key, instance_key=instance_key, merge_op=merge_op, final_op=final_op, subdiv_offsets=subdiv_offsets, communication_hint=communication_hint.lower(), timeout_seconds=timeout)",
    "docstring": "Reduces tensors collectively, across devices. Args: t: the tensor to be reduced. group_size: the total number of tensors to be collectively reduced. Each must reside on a different device. Should be a positive integer. group_key: an integer identifying the group of devices. instance_key: an integer identifying the participating group of Ops. merge_op: string naming the binary Op to be applied to compute each partial reduction. final_op: string naming the unary Op to be applied to each fully reduced value. Can be 'Id' for no operation. subdiv_offsets: a list of integer offsets into the tensor at which each independent subdivision should begin. Use [0] if no subdivision should be done. communication_hint: preferred collective communication. The implementation may fall back to another mechanism. Options include , , and . timeout: a float. If set to a non zero, set a completion timeout to detect staleness. If the timer goes off, a DeadlineExceededError is raised. The timeout value in seconds. This feature is experimental. Returns: An Op implementing the distributed reduction. Raises: ValueError: if any of the input parameter constraints are not met.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\collective_ops.py",
    "ast_data": "FunctionDef name:all_reduce arg:t arg:group_size arg:group_key arg:instance_key arg:merge_op arg:final_op arg:subdiv_offsets arg:communication_hint arg:timeout arguments arg arg arg arg arg arg arg arg arg If Compare Raise Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_scalar_from_string",
    "source_code": "def _scalar_from_string(self, value: str) -> DTScalarOrNaT:\n    raise AbstractMethodError(self)",
    "docstring": "Construct a scalar type from a string. Parameters ---------- value : str Returns ------- Period, Timestamp, or Timedelta, or NaT Whatever the type of `` before unboxing the result.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py",
    "ast_data": "FunctionDef name:_scalar_from_string arg:self arg:value arguments arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "emit_nvtx",
    "source_code": "class emit_nvtx:\n\n    def __init__(self, enabled=True, record_shapes=False):\n        self.enabled = enabled\n        self.entered = False\n        self.record_shapes = record_shapes\n\n    def __enter__(self):\n        if not self.enabled:\n            return\n        if self.entered:\n            raise RuntimeError('NVTX annotation context manager is not reentrant')\n        self.entered = True\n        torch.cuda.synchronize()\n        _run_on_profiler_start()\n        _enable_profiler(ProfilerConfig(ProfilerState.NVTX, self.record_shapes, False, False, False, False, _ExperimentalConfig()), set())\n        return self\n\n    def __exit__(self, exc_type, exc_val, exc_tb):\n        if not self.enabled:\n            return\n        torch.cuda.synchronize()\n        _disable_profiler()\n        _run_on_profiler_stop()\n        return False",
    "docstring": "Context manager that makes every autograd operation emit an NVTX range. It is useful when running the program under nvprof:: nvprof --profile-from-start off -o trace_name.prof -- Unfortunately, there's no way to force nvprof to flush the data it collected to disk, so for CUDA profiling one has to use this context manager to annotate nvprof traces and wait for the process to exit before inspecting them. Then, either NVIDIA Visual Profiler (nvvp) can be used to visualize the timeline, or :func: can load the results for inspection e.g. in Python REPL. .. warning: This context manager should not be called recursively, i.e. at most one instance should be enabled at any given time. Args: enabled (bool, optional): Setting `emit_nvtxemit_nvtxseqseq` numbers in forward-pass ranges is not guaranteed to be 1 to 1. The sequence numbers alone may not be enough to fully disambiguate which forward function created which backward Function object. You may need to make a judgment based on analytic knowledge of what the expected correspondence should be.",
    "type": "class",
    "file_path": "pytorch\\torch\\autograd\\profiler.py",
    "ast_data": "ClassDef name:emit_nvtx FunctionDef name:__init__ arg:self arg:enabled arg:record_shapes arguments arg arg arg Assign Assign Assign FunctionDef name:__enter__ arg:self arguments arg If Return return:no If Raise Call Assign Call Call Call Call Call Call Return return:yes FunctionDef name:__exit__ arg:self arg:exc_type arg:exc_val arg:exc_tb arguments arg arg arg arg If Return return:no Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "NoopObserver",
    "source_code": "class NoopObserver(ObserverBase):\n\n    def __init__(self, dtype=torch.float16, custom_op_name='') -> None:\n        super().__init__(dtype=dtype, is_dynamic=False)\n        self.dtype = dtype\n        self.custom_op = custom_op_name\n\n    def forward(self, x):\n        return x\n\n    @torch.jit.export\n    def calculate_qparams(self):\n        raise Exception('calculate_qparams should not be called for NoopObserver')",
    "docstring": "Observer that doesn't do anything and just passes its configuration to the quantized module's ``. Primarily used for quantization to float16 which doesn't require determining ranges. Args: dtype: Quantized data type custom_op_name: (temporary) specify this observer for an operator that doesn't require any observation (Can be used in Graph Mode Passes for special case ops).",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\quantization\\observer.py",
    "ast_data": "ClassDef name:NoopObserver FunctionDef name:__init__ arg:self arg:dtype arg:custom_op_name arguments arg arg arg Call Call Assign Assign FunctionDef name:forward arg:self arg:x arguments arg arg Return return:yes FunctionDef name:calculate_qparams arg:self arguments arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "RepeatVector",
    "source_code": "class RepeatVector(Layer):\n\n    def __init__(self, n, **kwargs):\n        super(RepeatVector, self).__init__(**kwargs)\n        self.n = n\n        if not isinstance(n, int):\n            raise TypeError(f'Expected an integer value for `n`, got {type(n)}.')\n        self.input_spec = InputSpec(ndim=2)\n\n    def compute_output_shape(self, input_shape):\n        input_shape = tensor_shape.TensorShape(input_shape).as_list()\n        return tensor_shape.TensorShape([input_shape[0], self.n, input_shape[1]])\n\n    def call(self, inputs):\n        return K.repeat(inputs, self.n)\n\n    def get_config(self):\n        config = {'n': self.n}\n        base_config = super(RepeatVector, self).get_config()\n        return dict(list(base_config.items()) + list(config.items()))",
    "docstring": "Repeats the input n times. Example: Args: n: Integer, repetition factor. Input shape: 2D tensor of shape . Output shape: 3D tensor of shape .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\core.py",
    "ast_data": "ClassDef name:RepeatVector FunctionDef name:__init__ arg:self arg:n arguments arg arg arg Call Call Assign If Call Raise Call Call Assign Call FunctionDef name:compute_output_shape arg:self arg:input_shape arguments arg arg Assign Call Call Return return:yes Call FunctionDef name:call arg:self arg:inputs arguments arg arg Return return:yes Call FunctionDef name:get_config arg:self arguments arg Assign Assign Call Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_AddShardedSaveOps",
    "source_code": "def _AddShardedSaveOps(self, filename_tensor, per_device):\n    if self._write_version == saver_pb2.SaverDef.V2:\n        return self._AddShardedSaveOpsForV2(filename_tensor, per_device)\n    num_shards = len(per_device)\n    sharded_saves = []\n    num_shards_tensor = constant_op.constant(num_shards, name='num_shards')\n    for shard, (device, saveables) in enumerate(per_device):\n        with ops.device(device):\n            sharded_filename = self.sharded_filename(filename_tensor, shard, num_shards_tensor)\n            sharded_saves.append(self._AddSaveOps(sharded_filename, saveables))\n    with ops.control_dependencies([x.op for x in sharded_saves]):\n        return gen_io_ops.sharded_filespec(filename_tensor, num_shards_tensor)",
    "docstring": "Add ops to save the params per shard. Args: filename_tensor: a scalar String Tensor. per_device: A list of (device, BaseSaverBuilder.SaveableObject) pairs, as returned by _GroupByDevices(). Returns: An op to save the variables.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\saver.py",
    "ast_data": "FunctionDef name:_AddShardedSaveOps arg:self arg:filename_tensor arg:per_device arguments arg arg arg If Compare Return return:yes Call Assign Call Assign Assign Call For Call With Call Assign Call Call Call With Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "CachingScopeLocal",
    "source_code": "class CachingScopeLocal(threading.local):\n\n    def __init__(self):\n        super(CachingScopeLocal, self).__init__()\n        self.new_cache_scope_count = 0\n        self.cache_scope_exited_count = 0\n\n    def enter_scope(self):\n        self.new_cache_scope_count += 1\n\n    def exit_scope(self):\n        self.cache_scope_exited_count += 1\n\n    def in_caching_scope(self):\n        return self.new_cache_scope_count > self.cache_scope_exited_count",
    "docstring": "Class for maintaining thread local state for caching scope.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_utils.py",
    "ast_data": "ClassDef name:CachingScopeLocal FunctionDef name:__init__ arg:self arguments arg Call Call Assign Assign FunctionDef name:enter_scope arg:self arguments arg FunctionDef name:exit_scope arg:self arguments arg FunctionDef name:in_caching_scope arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "pytorch",
    "name": "set_name",
    "source_code": "def set_name(self, name: str) -> None:\n    if self._thread:\n        raise RuntimeError('The timer has already started.')\n    self._name = name",
    "docstring": "Set the name of the timer. The specified name will be assigned to the background thread and serves for debugging and troubleshooting purposes.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\utils.py",
    "ast_data": "FunctionDef name:set_name arg:self arg:name arguments arg arg If Raise Call Assign"
  },
  {
    "library": "pytorch",
    "name": "load_ratio_left",
    "source_code": "def load_ratio_left(M: int, N: int, O: int, P: int, m: int, n: int, o: int, p: int) -> float:\n    base = M * N + N * O + M * O + O * P\n    gemm = ceildiv(M, m) * ceildiv(P, p) * ceildiv(O, o) * (o * p + ceildiv(N, n) * (m * n + n * o))\n    return base / gemm",
    "docstring": "compute the ratio of estimated numbers of loads in baseline and b2bgemm M, N, O, P are matrix sizes m, n, o, p are block sizes | | baseline (lower bound) | b2bgemm | load | M * N + N * O + M * O + O * P | M / m * P / p * O / o * (o * p + N / n * (m * n + n * o)) | store | M * O + M * P | M * P b2bgemm is always better on stores, but for loads we need to find out beneficial cases using this function",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\b2b_gemm.py",
    "ast_data": "FunctionDef name:load_ratio_left arg:M arg:N arg:O arg:P arg:m arg:n arg:o arg:p arguments arg arg arg arg arg arg arg arg Assign Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_is_array_like",
    "source_code": "def _is_array_like(v):\n    return hasattr(v, '__getitem__') and hasattr(v, 'shape') and hasattr(v, 'dtype') and hasattr(v, '__len__')",
    "docstring": "Return True if v is a Tensor, array, or is array-like.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\data_adapter.py",
    "ast_data": "FunctionDef name:_is_array_like arg:v arguments arg Return return:yes BoolOp Call Call Call Call"
  },
  {
    "library": "kornia",
    "name": "_normalize_input",
    "source_code": "@staticmethod\ndef _normalize_input(x: torch.Tensor, eps: float=1e-06) -> torch.Tensor:\n    sp, mp = torch.std_mean(x, dim=(-3, -2, -1), keepdim=True)\n    return (x - mp.detach()) / (sp.detach() + eps)",
    "docstring": "Normalize the input by batch.",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\affine_shape.py",
    "ast_data": "FunctionDef name:_normalize_input arg:x arg:eps arguments arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "is_torch_namespace",
    "source_code": "@lru_cache(100)\ndef is_torch_namespace(xp: Namespace) -> bool:\n    return xp.__name__ in {'torch', _compat_module_name() + '.torch'}",
    "docstring": "Returns True if is a PyTorch namespace. This includes both PyTorch itself and the version wrapped by array-api-compat. See Also -------- array_namespace is_numpy_namespace is_cupy_namespace is_ndonnx_namespace is_dask_namespace is_jax_namespace is_pydata_sparse_namespace is_array_api_strict_namespace",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\common\\_helpers.py",
    "ast_data": "FunctionDef name:is_torch_namespace arg:xp arguments arg Return return:yes Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "OpName",
    "source_code": "@dataclasses.dataclass(frozen=True, eq=True)\nclass OpName:\n    namespace: str\n    op_name: str\n    overload: str\n\n    @classmethod\n    def from_name_parts(cls, namespace: str, op_name: str, overload: str | None=None) -> OpName:\n        if overload is None or overload == '':\n            overload = 'default'\n        return cls(namespace, op_name, overload)\n\n    @classmethod\n    def from_qualified_name(cls, qualified_name: str) -> OpName:\n        namespace, opname_overload = qualified_name.split('::')\n        op_name, *overload = opname_overload.split('.', 1)\n        overload = overload[0] if overload else 'default'\n        return cls(namespace, op_name, overload)\n\n    @classmethod\n    def from_op_overload(cls, op_overload: torch._ops.OpOverload) -> OpName:\n        return cls.from_qualified_name(op_overload.name())\n\n    @classmethod\n    def from_builtin_function(cls, builtin_function: types.BuiltinFunctionType) -> OpName:\n        op = builtin_function.__name__\n        module = builtin_function.__module__\n        return cls.from_qualified_name(module + '::' + op)\n\n    def qualified_name(self) -> str:\n        return f'{self.namespace}::{self.op_name}.{self.overload}'",
    "docstring": "A class representing an operator name in internal ONNX converter.",
    "type": "class",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\registration.py",
    "ast_data": "ClassDef name:OpName FunctionDef name:from_name_parts arg:cls arg:namespace arg:op_name arg:overload arguments arg arg arg arg If BoolOp Compare Compare Assign Return return:yes Call FunctionDef name:from_qualified_name arg:cls arg:qualified_name arguments arg arg Assign Call Assign Call Assign Return return:yes Call FunctionDef name:from_op_overload arg:cls arg:op_overload arguments arg arg Return return:yes Call Call FunctionDef name:from_builtin_function arg:cls arg:builtin_function arguments arg arg Assign Assign Return return:yes Call FunctionDef name:qualified_name arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "concrete_fields",
    "source_code": "@cached_property\ndef concrete_fields(self):\n    return make_immutable_fields_list('concrete_fields', (f for f in self.fields if f.concrete))",
    "docstring": "Return a list of all concrete fields on the model and its parents. Private API intended only to be used by Django itself; get_fields() combined with filtering of field properties is the public API for obtaining this field list.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\options.py",
    "ast_data": "FunctionDef name:concrete_fields arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_validate_variable_name",
    "source_code": "def _validate_variable_name(self, name: str) -> str:\n    for c in name:\n        if (c < 'A' or c > 'Z') and (c < 'a' or c > 'z') and (c < '0' or c > '9') and (c != '_'):\n            name = name.replace(c, '_')\n    return name",
    "docstring": "Validate variable names for Stata export. Parameters ---------- name : str Variable name Returns ------- str The validated name with invalid characters replaced with underscores. Notes ----- Stata 114 and 117 support ascii characters in a-z, A-Z, 0-9 and _.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\stata.py",
    "ast_data": "FunctionDef name:_validate_variable_name arg:self arg:name arguments arg arg For If BoolOp BoolOp Compare Compare BoolOp Compare Compare BoolOp Compare Compare Compare Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "_get_obj_does_not_exist_redirect",
    "source_code": "def _get_obj_does_not_exist_redirect(self, request, opts, object_id):\n    msg = _('%(name)s with ID “%(key)s” doesn’t exist. Perhaps it was deleted?') % {'name': opts.verbose_name, 'key': unquote(object_id)}\n    self.message_user(request, msg, messages.WARNING)\n    url = reverse('admin:index', current_app=self.admin_site.name)\n    return HttpResponseRedirect(url)",
    "docstring": "Create a message informing the user that the object doesn't exist and return a redirect to the admin index page.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:_get_obj_does_not_exist_redirect arg:self arg:request arg:opts arg:object_id arguments arg arg arg arg Assign Call Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_simplify_cells",
    "source_code": "def _simplify_cells(d):\n    for key in d:\n        if isinstance(d[key], mat_struct):\n            d[key] = _matstruct_to_dict(d[key])\n        elif _has_struct(d[key]):\n            d[key] = _inspect_cell_array(d[key])\n    return d",
    "docstring": "Convert mat objects in dict to nested dicts.",
    "type": "function",
    "file_path": "scipy\\scipy\\io\\matlab\\_mio5.py",
    "ast_data": "FunctionDef name:_simplify_cells arg:d arguments arg For If Call Assign Call If Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_classes_and_not_datetimelike",
    "source_code": "def _classes_and_not_datetimelike(*klasses) -> Callable:\n    return lambda tipo: issubclass(tipo, klasses) and (not issubclass(tipo, (np.datetime64, np.timedelta64)))",
    "docstring": "Evaluate if the tipo is a subclass of the klasses and not a datetimelike.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\common.py",
    "ast_data": "FunctionDef name:_classes_and_not_datetimelike arguments arg Return return:yes arguments arg BoolOp Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_instruction_front",
    "source_code": "def _get_instruction_front(instructions: list[Instruction], idx: int):\n    target = instructions[idx]\n    for offset in (1, 2, 3):\n        if idx >= offset and instructions[idx - offset].opcode == dis.EXTENDED_ARG:\n            target = instructions[idx - offset]\n        else:\n            break\n    return target",
    "docstring": "i.e. get the first EXTENDED_ARG instruction (if any) when targeting instructions[idx] with a jump.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\bytecode_transformation.py",
    "ast_data": "FunctionDef name:_get_instruction_front arg:instructions arg:idx arguments arg arg Assign For If BoolOp Compare Compare Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_augknt",
    "source_code": "def _augknt(x, k):\n    return np.r_[(x[0],) * k, x, (x[-1],) * k]",
    "docstring": "Construct a knot vector appropriate for the order-k interpolation.",
    "type": "function",
    "file_path": "scipy\\scipy\\interpolate\\_bsplines.py",
    "ast_data": "FunctionDef name:_augknt arg:x arg:k arguments arg arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "@available_if(_estimator_has('predict', delegates=('final_estimator_', 'final_estimator')))\ndef predict(self, X, **predict_params):\n    if _routing_enabled():\n        routed_params = process_routing(self, 'predict', **predict_params)\n    else:\n        routed_params = Bunch()\n        routed_params.final_estimator_ = Bunch(predict={})\n        routed_params.final_estimator_.predict = predict_params\n    y_pred = super().predict(X, **routed_params.final_estimator_['predict'])\n    if isinstance(self._label_encoder, list):\n        y_pred = np.array([self._label_encoder[target_idx].inverse_transform(target) for target_idx, target in enumerate(y_pred.T)]).T\n    else:\n        y_pred = self._label_encoder.inverse_transform(y_pred)\n    return y_pred",
    "docstring": "Predict target for X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vectors, where is the number of samples and is the number of features. **predict_params : dict of str -> obj Parameters to the called by the . Note that this may be used to return uncertainties from some estimators with or . Be aware that it will only account for uncertainty in the final estimator. - If (default): Parameters directly passed to the method of the . - If : Parameters safely routed to the method of the . See :ref: for more details. .. versionchanged:: 1.6 can be routed via metadata routing API. Returns ------- y_pred : ndarray of shape (n_samples,) or (n_samples, n_output) Predicted targets.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_stacking.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg arg If Call Assign Call Assign Call Assign Call Assign Assign Call Call If Call Assign Call Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "reverse_sequence",
    "source_code": "@tf_export(v1=['reverse_sequence'])\n@deprecation.deprecated_args(None, 'seq_dim is deprecated, use seq_axis instead', 'seq_dim')\n@deprecation.deprecated_args(None, 'batch_dim is deprecated, use batch_axis instead', 'batch_dim')\ndef reverse_sequence(input, seq_lengths, seq_axis=None, batch_axis=None, name=None, seq_dim=None, batch_dim=None):\n    seq_axis = deprecation.deprecated_argument_lookup('seq_axis', seq_axis, 'seq_dim', seq_dim)\n    batch_axis = deprecation.deprecated_argument_lookup('batch_axis', batch_axis, 'batch_dim', batch_dim)\n    return gen_array_ops.reverse_sequence(input=input, seq_lengths=seq_lengths, seq_dim=seq_axis, batch_dim=batch_axis, name=name)",
    "docstring": "Reverses variable length slices. This op first slices along the dimension , and for each slice , reverses the first elements along the dimension . The elements of must obey TensorTensorint32int64input.dims(batch_axis)max(seq_lengths) <= input.dims(seq_axis)intint0`. The dimension along which reversal is performed. name: A name for the operation (optional). Returns: A Tensor. Has the same type as input.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:reverse_sequence arg:input arg:seq_lengths arg:seq_axis arg:batch_axis arg:name arg:seq_dim arg:batch_dim arguments arg arg arg arg arg arg arg Assign Call Assign Call Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_compute_device_summary_from_list",
    "source_code": "def _compute_device_summary_from_list(name, device_assignment_list, prefix=''):\n    if not device_assignment_list:\n        message = \"No device assignments were active during op '%s' creation.\"\n        message %= name\n        return prefix + message\n    str_list = []\n    str_list.append(\"%sDevice assignments active during op '%s' creation:\" % (prefix, name))\n    for traceable_obj in device_assignment_list:\n        location_summary = '<{file}:{line}>'.format(file=traceable_obj.filename, line=traceable_obj.lineno)\n        subs = {'prefix': prefix, 'indent': '  ', 'dev_name': traceable_obj.obj, 'loc': location_summary}\n        str_list.append('{prefix}{indent}with tf.device({dev_name}): {loc}'.format(**subs))\n    return '\\n'.join(str_list)",
    "docstring": "Return a summary of an op's device function stack. Args: name: The name of the op. device_assignment_list: The op._device_assignments list. prefix: An optional string prefix used before each line of the multi- line string returned by this function. Returns: A multi-line string similar to: Device assignments active during op 'foo' creation: with tf.device(/cpu:0): with tf.device(some_func): The first line will have no padding to its left by default. Subsequent lines will have two spaces of left-padding. Use the prefix argument to increase indentation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\error_interpolation.py",
    "ast_data": "FunctionDef name:_compute_device_summary_from_list arg:name arg:device_assignment_list arg:prefix arguments arg arg arg If Assign Return return:yes Assign Call For Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "constraint_checks_disabled",
    "source_code": "@contextmanager\ndef constraint_checks_disabled(self):\n    disabled = self.disable_constraint_checking()\n    try:\n        yield\n    finally:\n        if disabled:\n            self.enable_constraint_checking()",
    "docstring": "Disable foreign key constraint checking.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\base.py",
    "ast_data": "FunctionDef name:constraint_checks_disabled arg:self arguments arg Assign Call Try If Call"
  },
  {
    "library": "pytorch",
    "name": "WrappedFunction",
    "source_code": "@dataclasses.dataclass(frozen=True)\nclass WrappedFunction:\n    model: Callable[..., Any]\n    static_input_idxs: Sequence[int]\n    id: FunctionID\n    constants: tuple[torch.Tensor, ...]\n    placeholders: Sequence[PlaceholderInfo]\n    mutated_input_idxs: Sequence[int]",
    "docstring": "Represents a function that you want to record for CUDA graph replay, with a little more metadata so we can identify if we have an applicable CUDA graph in our CUDA graph tree for it.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\cudagraph_utils.py",
    "ast_data": "ClassDef name:WrappedFunction Call"
  },
  {
    "library": "django",
    "name": "error",
    "source_code": "def error(request, message, extra_tags='', fail_silently=False):\n    add_message(request, constants.ERROR, message, extra_tags=extra_tags, fail_silently=fail_silently)",
    "docstring": "Add a message with the `` level.",
    "type": "function",
    "file_path": "django\\django\\contrib\\messages\\api.py",
    "ast_data": "FunctionDef name:error arg:request arg:message arg:extra_tags arg:fail_silently arguments arg arg arg arg Call"
  },
  {
    "library": "numpy",
    "name": "gnu_version_match",
    "source_code": "def gnu_version_match(self, version_string):\n    while version_string.startswith('gfortran: warning'):\n        version_string = version_string[version_string.find('\\n') + 1:].strip()\n    if len(version_string) <= 20:\n        m = re.search('([0-9.]+)', version_string)\n        if m:\n            if version_string.startswith('GNU Fortran'):\n                return ('g77', m.group(1))\n            elif m.start() == 0:\n                return ('gfortran', m.group(1))\n    else:\n        m = re.search('GNU Fortran\\\\s+95.*?([0-9-.]+)', version_string)\n        if m:\n            return ('gfortran', m.group(1))\n        m = re.search('GNU Fortran.*?\\\\-?([0-9-.]+\\\\.[0-9-.]+)', version_string)\n        if m:\n            v = m.group(1)\n            if v.startswith(('0', '2', '3')):\n                return ('g77', v)\n            else:\n                return ('gfortran', v)\n    err = 'A valid Fortran version was not found in this string:\\n'\n    raise ValueError(err + version_string)",
    "docstring": "Handle the different versions of GNU fortran compilers",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\fcompiler\\gnu.py",
    "ast_data": "FunctionDef name:gnu_version_match arg:self arg:version_string arguments arg arg While Call Assign Call Call If Compare Call Assign Call If If Call Return return:yes Call If Compare Call Return return:yes Call Assign Call If Return return:yes Call Assign Call If Assign Call If Call Return return:yes Return return:yes Assign Raise Call"
  },
  {
    "library": "pytorch",
    "name": "local_chunks",
    "source_code": "@property\ndef local_chunks(self) -> list[ChunkStorageMetadata]:\n    return self._storage_meta.chunks",
    "docstring": "Returns a :class: object corresponding to the metadata for each tensor shard",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_shards_wrapper.py",
    "ast_data": "FunctionDef name:local_chunks arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "compute_mask",
    "source_code": "def compute_mask(self, t, default_mask):\n\n    def _combine_masks(method, t, mask):\n        new_mask = mask\n        new_mask = new_mask.to(dtype=t.dtype)\n        if method.PRUNING_TYPE == 'unstructured':\n            slc = mask == 1\n        elif method.PRUNING_TYPE == 'structured':\n            if not hasattr(method, 'dim'):\n                raise AttributeError('Pruning methods of PRUNING_TYPE \"structured\" need to have the attribute `dim` defined.')\n            n_dims = t.dim()\n            dim = method.dim\n            if dim < 0:\n                dim = n_dims + dim\n            if dim < 0:\n                raise IndexError(f'Index is out of bounds for tensor with dimensions {n_dims}')\n            keep_channel = mask.sum(dim=[d for d in range(n_dims) if d != dim]) != 0\n            slc = [slice(None)] * n_dims\n            slc[dim] = keep_channel\n        elif method.PRUNING_TYPE == 'global':\n            n_dims = len(t.shape)\n            slc = [slice(None)] * n_dims\n        else:\n            raise ValueError(f'Unrecognized PRUNING_TYPE {method.PRUNING_TYPE}')\n        partial_mask = method.compute_mask(t[slc], default_mask=mask[slc])\n        new_mask[slc] = partial_mask.to(dtype=new_mask.dtype)\n        return new_mask\n    method = self._pruning_methods[-1]\n    mask = _combine_masks(method, t, default_mask)\n    return mask",
    "docstring": "Apply the latest ``).",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\utils\\prune.py",
    "ast_data": "FunctionDef name:compute_mask arg:self arg:t arg:default_mask arguments arg arg arg FunctionDef name:_combine_masks arg:method arg:t arg:mask arguments arg arg arg Assign Assign Call If Compare Assign Compare If Compare If Call Raise Call Assign Call Assign If Compare Assign If Compare Raise Call Assign Compare Call Call Compare Assign Call Assign If Compare Assign Call Assign Call Raise Call Assign Call Assign Call Return return:yes Assign Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "set_dtype_configs",
    "source_code": "def set_dtype_configs(self, dtype_configs: list[DTypeConfig]) -> BackendPatternConfig:\n    self.dtype_configs = dtype_configs\n    return self",
    "docstring": "Set the supported data types passed as arguments to quantize ops in the reference model spec, overriding all previously registered data types.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\backend_config.py",
    "ast_data": "FunctionDef name:set_dtype_configs arg:self arg:dtype_configs arguments arg arg Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "outer_graph",
    "source_code": "@outer_graph.setter\ndef outer_graph(self, new_outer_graph):\n    self._weak_outer_graph = weakref.ref(new_outer_graph)",
    "docstring": "Sets to .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\func_graph.py",
    "ast_data": "FunctionDef name:outer_graph arg:self arg:new_outer_graph arguments arg arg Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "copy",
    "source_code": "def copy(self):\n    return copy.copy(self)",
    "docstring": "Return a copy of self.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\font_manager.py",
    "ast_data": "FunctionDef name:copy arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "softmax",
    "source_code": "def softmax(X, copy=True):\n    xp, is_array_api_compliant = get_namespace(X)\n    if copy:\n        X = xp.asarray(X, copy=True)\n    max_prob = xp.reshape(xp.max(X, axis=1), (-1, 1))\n    X -= max_prob\n    if _is_numpy_namespace(xp):\n        np.exp(X, out=np.asarray(X))\n    else:\n        X = xp.exp(X)\n    sum_prob = xp.reshape(xp.sum(X, axis=1), (-1, 1))\n    X /= sum_prob\n    return X",
    "docstring": "Calculate the softmax function. The softmax function is calculated by np.exp(X) / np.sum(np.exp(X), axis=1) This will cause overflow when large values are exponentiated. Hence the largest value in each row is subtracted from each data point to prevent this. Parameters ---------- X : array-like of float of shape (M, N) Argument to the logistic function. copy : bool, default=True Copy X or not. Returns ------- out : ndarray of shape (M, N) Softmax function evaluated at every point in x.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\extmath.py",
    "ast_data": "FunctionDef name:softmax arg:X arg:copy arguments arg arg Assign Call If Assign Call Assign Call Call If Call Call Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_enclosing_tpu_context_and_graph",
    "source_code": "def _enclosing_tpu_context_and_graph() -> Tuple[Any, Any]:\n    graph = ops.get_default_graph()\n    while graph is not None:\n        context_ = graph._get_control_flow_context()\n        while context_ is not None:\n            if isinstance(context_, TPUReplicateContext):\n                return (context_, graph)\n            context_ = context_.outer_context\n        graph = getattr(graph, 'outer_graph', None)\n    raise ValueError(\"get_replicated_var_handle() called without TPUReplicateContext. This shouldn't happen. Please file a bug.\")",
    "docstring": "Returns the TPUReplicateContext and its associated graph.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_replication.py",
    "ast_data": "FunctionDef name:_enclosing_tpu_context_and_graph arguments Assign Call While Compare Assign Call While Compare If Call Return return:yes Assign Assign Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "RemapOperator",
    "source_code": "def RemapOperator(opcode_name):\n    old_name_to_new_name = {'CONVOLUTION': 'CONV_2D', 'DEPTHWISE_CONVOLUTION': 'DEPTHWISE_CONV_2D', 'AVERAGE_POOL': 'AVERAGE_POOL_2D', 'MAX_POOL': 'MAX_POOL_2D', 'L2_POOL': 'L2_POOL_2D', 'SIGMOID': 'LOGISTIC', 'L2NORM': 'L2_NORMALIZATION', 'LOCAL_RESPONSE_NORM': 'LOCAL_RESPONSE_NORMALIZATION', 'Basic_RNN': 'RNN'}\n    return old_name_to_new_name[opcode_name] if opcode_name in old_name_to_new_name else opcode_name",
    "docstring": "Go from old schema op name to new schema op name. Args: opcode_name: String representing the ops (see :schema.fbs). Returns: Converted opcode_name from V1 to V2.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\schema\\upgrade_schema.py",
    "ast_data": "FunctionDef name:RemapOperator arg:opcode_name arguments arg Assign Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "name",
    "source_code": "@property\ndef name(self):\n    return self._handle_name",
    "docstring": "The name of the handle for this variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "add_css_file",
    "source_code": "def add_css_file(self, filename: str, priority: int=500, **kwargs: Any) -> None:\n    logger.debug('[app] adding stylesheet: %r', filename)\n    self.registry.add_css_files(filename, priority=priority, **kwargs)\n    with contextlib.suppress(AttributeError):\n        self.builder.add_css_file(filename, priority=priority, **kwargs)",
    "docstring": "Register a stylesheet to include in the HTML output. :param filename: The name of a CSS file that the default HTML template will include. It must be relative to the HTML static path, or a full URI with scheme. :param priority: Files are included in ascending order of priority. If multiple CSS files have the same priority, those files will be included in order of registration. See list of \"priority range for CSS files\" below. :param kwargs: Extra keyword arguments are included as attributes of the ```html_css_fileshtml-page-contextdocumentation `. And it allows keyword arguments as attributes of link tag. .. versionchanged:: 3.5 Take priority argument. Allow to add a CSS file to the specific page.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\application.py",
    "ast_data": "FunctionDef name:add_css_file arg:self arg:filename arg:priority arguments arg arg arg arg Call Call With Call Call"
  },
  {
    "library": "pytorch",
    "name": "select_decomp_table",
    "source_code": "def select_decomp_table() -> dict[Any, Callable[..., Any]]:\n    if config.fallback_random:\n        return decompositions\n    return fast_random_decomps()",
    "docstring": "decomps can change based on config",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\decomposition.py",
    "ast_data": "FunctionDef name:select_decomp_table arguments If Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "flush",
    "source_code": "def flush(self):\n    if self._writable_file:\n        self._writable_file.flush()",
    "docstring": "Flushes the Writable file. This only ensures that the data has made its way out of the process without any guarantees on whether it's written to disk. This means that the data would survive an application crash but not necessarily an OS crash.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py",
    "ast_data": "FunctionDef name:flush arg:self arguments arg If Call"
  },
  {
    "library": "scikit-learn",
    "name": "build_analyzer",
    "source_code": "def build_analyzer(self):\n    if callable(self.analyzer):\n        return partial(_analyze, analyzer=self.analyzer, decoder=self.decode)\n    preprocess = self.build_preprocessor()\n    if self.analyzer == 'char':\n        return partial(_analyze, ngrams=self._char_ngrams, preprocessor=preprocess, decoder=self.decode)\n    elif self.analyzer == 'char_wb':\n        return partial(_analyze, ngrams=self._char_wb_ngrams, preprocessor=preprocess, decoder=self.decode)\n    elif self.analyzer == 'word':\n        stop_words = self.get_stop_words()\n        tokenize = self.build_tokenizer()\n        self._check_stop_words_consistency(stop_words, preprocess, tokenize)\n        return partial(_analyze, ngrams=self._word_ngrams, tokenizer=tokenize, preprocessor=preprocess, decoder=self.decode, stop_words=stop_words)\n    else:\n        raise ValueError('%s is not a valid tokenization scheme/analyzer' % self.analyzer)",
    "docstring": "Return a callable to process input data. The callable handles preprocessing, tokenization, and n-grams generation. Returns ------- analyzer: callable A function to handle preprocessing, tokenization and n-grams generation.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_extraction\\text.py",
    "ast_data": "FunctionDef name:build_analyzer arg:self arguments arg If Call Return return:yes Call Assign Call If Compare Return return:yes Call If Compare Return return:yes Call If Compare Assign Call Assign Call Call Return return:yes Call Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "get_hatch",
    "source_code": "def get_hatch(self):\n    return self._hatch",
    "docstring": "Get the current hatch style.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:get_hatch arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "_get_center_kernel3d",
    "source_code": "def _get_center_kernel3d(d: int, h: int, w: int, device: Optional[torch.device]=None) -> Tensor:\n    if device is None:\n        torch.device('cpu')\n    center_kernel = zeros(3, 3, d, h, w, device=device)\n    if h % 2 != 0:\n        h_i1 = h // 2\n        h_i2 = h // 2 + 1\n    else:\n        h_i1 = h // 2 - 1\n        h_i2 = h // 2 + 1\n    if w % 2 != 0:\n        w_i1 = w // 2\n        w_i2 = w // 2 + 1\n    else:\n        w_i1 = w // 2 - 1\n        w_i2 = w // 2 + 1\n    if d % 2 != 0:\n        d_i1 = d // 2\n        d_i2 = d // 2 + 1\n    else:\n        d_i1 = d // 2 - 1\n        d_i2 = d // 2 + 1\n    center_num = float((h_i2 - h_i1) * (w_i2 - w_i1) * (d_i2 - d_i1))\n    center_kernel[(0, 1, 2), (0, 1, 2), d_i1:d_i2, h_i1:h_i2, w_i1:w_i2] = 1.0 / center_num\n    return center_kernel",
    "docstring": "Generate a kernel to return center coordinates, when applied with F.conv2d to 3d coordinates grid. Args: d: kernel depth. h: kernel height. w: kernel width. device: device, on which generate. Returns: conv_kernel [3x3xdxhxw].",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\subpix\\spatial_soft_argmax.py",
    "ast_data": "FunctionDef name:_get_center_kernel3d arg:d arg:h arg:w arg:device arguments arg arg arg arg If Compare Call Assign Call If Compare Assign Assign Assign Assign If Compare Assign Assign Assign Assign If Compare Assign Assign Assign Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "NamedDistribution",
    "source_code": "class NamedDistribution(object):\n\n    def __init__(self, name, distribution_fn, required_gpus=None, required_physical_gpus=0, required_tpu=False, use_cloud_tpu=False, has_chief=False, num_workers=1, num_ps=0, share_gpu=True, pool_runner_fn=None, no_xla=False):\n        object.__init__(self)\n        self._name = name\n        self._distribution_fn = distribution_fn\n        self.required_gpus = required_gpus\n        self.required_physical_gpus = required_physical_gpus\n        self.required_tpu = required_tpu\n        self.use_cloud_tpu = use_cloud_tpu\n        self.has_chief = has_chief\n        self.num_workers = num_workers\n        self.num_ps = num_ps\n        self.share_gpu = share_gpu\n        self._pool_runner_fn = pool_runner_fn\n        self.no_xla = no_xla\n\n    @property\n    def runner(self):\n        if self._pool_runner_fn is not None:\n            return self._pool_runner_fn()\n        return None\n\n    @property\n    def strategy(self):\n        return self._distribution_fn()\n\n    def __repr__(self):\n        return self._name",
    "docstring": "Wraps a and adds a name for test titles.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\combinations.py",
    "ast_data": "ClassDef name:NamedDistribution FunctionDef name:__init__ arg:self arg:name arg:distribution_fn arg:required_gpus arg:required_physical_gpus arg:required_tpu arg:use_cloud_tpu arg:has_chief arg:num_workers arg:num_ps arg:share_gpu arg:pool_runner_fn arg:no_xla arguments arg arg arg arg arg arg arg arg arg arg arg arg arg Call Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign FunctionDef name:runner arg:self arguments arg If Compare Return return:yes Call Return return:no FunctionDef name:strategy arg:self arguments arg Return return:yes Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_verify_output_dir",
    "source_code": "def _verify_output_dir(output_dir: Optional[str], overwrite: bool) -> None:\n    dir_not_empty = output_dir is not None and file_io.file_exists_v2(output_dir) and file_io.list_directory_v2(output_dir)\n    if dir_not_empty and (not overwrite):\n        raise FileExistsError(f'Output directory already exists: {output_dir} . Please set overwrite_output_directory to true to overwrite the existing directory.')",
    "docstring": "Verifies the output directory. Raises an error if is not suitable for writing the output saved model. Args: output_dir: Output directory. overwrite: An option allowing to overwrite the existing output directory if set to true. Does not actually create or modify the in this function. Raises: FileExistsError: Iff is not empty and is false.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\quantize_model.py",
    "ast_data": "FunctionDef name:_verify_output_dir arg:output_dir arg:overwrite arguments arg arg Assign BoolOp Compare Call Call If BoolOp Raise Call"
  },
  {
    "library": "uvicorn",
    "name": "ServerState",
    "source_code": "class ServerState:\n\n    def __init__(self) -> None:\n        self.total_requests = 0\n        self.connections: set[Protocols] = set()\n        self.tasks: set[asyncio.Task[None]] = set()\n        self.default_headers: list[tuple[bytes, bytes]] = []",
    "docstring": "Shared servers state that is available between all protocol instances.",
    "type": "class",
    "file_path": "uvicorn\\uvicorn\\server.py",
    "ast_data": "ClassDef name:ServerState FunctionDef name:__init__ arg:self arguments arg Assign Call Call"
  },
  {
    "library": "django",
    "name": "supports_transactions",
    "source_code": "@cached_property\ndef supports_transactions(self):\n    with self.connection.cursor() as cursor:\n        cursor.execute('CREATE TABLE ROLLBACK_TEST (X INT)')\n        self.connection.set_autocommit(False)\n        cursor.execute('INSERT INTO ROLLBACK_TEST (X) VALUES (8)')\n        self.connection.rollback()\n        self.connection.set_autocommit(True)\n        cursor.execute('SELECT COUNT(X) FROM ROLLBACK_TEST')\n        count, = cursor.fetchone()\n        cursor.execute('DROP TABLE ROLLBACK_TEST')\n    return count == 0",
    "docstring": "Confirm support for transactions.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\features.py",
    "ast_data": "FunctionDef name:supports_transactions arg:self arguments arg With Call Call Call Call Call Call Call Assign Call Call Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "add_to_graph",
    "source_code": "def add_to_graph(self, g=None, overwrite=False):\n    if not context.executing_eagerly() and (not g):\n        g = ops.get_default_graph()\n    if g is not None:\n        g._add_function_recursive(self._delayed_rewrite_functions.forward())",
    "docstring": "Registers the function, adds it to the graph g or default graph. Args: g: If specified, registers the function with this graph. Defaults to the current context (either the default graph or the eager context). overwrite: A bool. If True, its forward function will overwrite any existing function of the same signature name in the graph .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py",
    "ast_data": "FunctionDef name:add_to_graph arg:self arg:g arg:overwrite arguments arg arg arg If BoolOp Call Assign Call If Compare Call Call"
  },
  {
    "library": "tensorflow",
    "name": "scatter_mul",
    "source_code": "def scatter_mul(self, sparse_delta, use_locking=False, name=None):\n    if not isinstance(sparse_delta, indexed_slices.IndexedSlices):\n        raise TypeError('sparse_delta is not IndexedSlices: %s' % sparse_delta)\n    return gen_state_ops.scatter_mul(self._variable, sparse_delta.indices, sparse_delta.values, use_locking=use_locking, name=name)",
    "docstring": "Multiply this variable by . Args: sparse_delta: to multiply this variable by. use_locking: If , use locking during the operation. name: the name of the operation. Returns: A that will hold the new value of this variable after the scattered multiplication has completed. Raises: TypeError: if is not an .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py",
    "ast_data": "FunctionDef name:scatter_mul arg:self arg:sparse_delta arg:use_locking arg:name arguments arg arg arg arg If Call Raise Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "lower",
    "source_code": "@register.filter(is_safe=True)\n@stringfilter\ndef lower(value):\n    return value.lower()",
    "docstring": "Convert a string into all lowercase.",
    "type": "function",
    "file_path": "django\\django\\template\\defaultfilters.py",
    "ast_data": "FunctionDef name:lower arg:value arguments arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_finalize_sample_weight",
    "source_code": "def _finalize_sample_weight(self, sample_weight, y):\n    if self.class_weight is None:\n        return sample_weight\n    expanded_class_weight = compute_sample_weight(self.class_weight, y)\n    if sample_weight is not None:\n        return sample_weight * expanded_class_weight\n    else:\n        return expanded_class_weight",
    "docstring": "Adjust sample_weights with class_weights.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\gradient_boosting.py",
    "ast_data": "FunctionDef name:_finalize_sample_weight arg:self arg:sample_weight arg:y arguments arg arg arg If Compare Return return:yes Assign Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "close",
    "source_code": "def close(self):\n    if self._session and (not self._closed):\n        self._closed = True\n        tf_session.TF_CloseSession(self._session)",
    "docstring": "Closes this session. Calling this method frees all resources associated with the session. Raises: tf.errors.OpError: Or one of its subclasses if an error occurs while closing the TensorFlow session.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\session.py",
    "ast_data": "FunctionDef name:close arg:self arguments arg If BoolOp Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_annotate_variable_ops",
    "source_code": "def _annotate_variable_ops(func, graph_def):\n    ph_shape_map = {}\n    for ph, var in zip(func.graph.internal_captures, func.variables):\n        ph_shape_map[ph.name] = var.shape\n    name_to_node = {node.name: node for node in graph_def.node}\n    for node in graph_def.node:\n        if node.op == 'ReadVariableOp' or node.op == 'ResourceGather':\n            node_ = node\n            while name_to_node[node_.input[0]].op == 'Identity':\n                node_ = name_to_node[node_.input[0]]\n            ph_name = node_.input[0] + ':0'\n            if ph_name in ph_shape_map:\n                shape = ph_shape_map[ph_name]\n                node.attr['_shape'].shape.CopyFrom(shape.as_proto())\n            else:\n                raise RuntimeError('Not found in the function captures: {}'.format(ph_name))",
    "docstring": "Annotates variable operations with custom attribute. This is required for the converters and shape inference. The graph definition is modified in-place. Args: func: Function represented by the graph definition. graph_def: Graph definition to be annotated in-place. Raises: RuntimeError: if some shapes cannot be annotated.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\tensorrt\\trt_convert.py",
    "ast_data": "FunctionDef name:_annotate_variable_ops arg:func arg:graph_def arguments arg arg Assign For Call Assign Assign For If BoolOp Compare Compare Assign While Compare Assign Assign If Compare Assign Call Call Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "codegen_template",
    "source_code": "def codegen_template(self, template_node: BaseSchedulerNode, epilogue_nodes: Sequence[BaseSchedulerNode], prologue_nodes: Sequence[BaseSchedulerNode]):\n    assert self.is_rocm_cpp_template(template_node), 'Template node passed to ROCmScheduler.codegen_template must be a SchedulerNode that wraps a ROCmTemplateBuffer'\n    template_node = cast(SchedulerNode, template_node)\n    _, (_numel, rnumel) = template_node.group\n    assert rnumel == 1\n    ctb: ROCmTemplateBuffer = cast(ROCmTemplateBuffer, template_node.node)\n    kernel, render = ctb.make_kernel_render(ctb)\n    with kernel:\n        template_node.mark_run()\n        src_code = render()\n    with V.set_kernel_handler(kernel):\n        node_schedule = [template_node]\n        kernel_name = self.define_kernel(src_code, node_schedule)\n    kernel.call_kernel(kernel_name, ctb)\n    V.graph.removed_buffers |= kernel.removed_buffers\n    self.free_buffers_in_scheduler()",
    "docstring": "Codegen a ROCm template, possibly with fused epilogues",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\rocm\\rocm_cpp_scheduling.py",
    "ast_data": "FunctionDef name:codegen_template arg:self arg:template_node arg:epilogue_nodes arg:prologue_nodes arguments arg arg arg arg Call Assign Call Assign Compare Call Assign Call With Call Assign Call With Call Assign Assign Call Call Call"
  },
  {
    "library": "kornia",
    "name": "load_config",
    "source_code": "@classmethod\ndef load_config(cls, url: str, download: bool=True, **kwargs: Any) -> dict[str, Any]:\n    if url.startswith(('http:', 'https:')):\n        file_path = cls.download_to_cache(url, os.path.split(url)[-1], download=download, suffix='.json', **kwargs)\n        with open(file_path) as f:\n            json_data = json.load(f)\n            return json_data\n    if not download:\n        raise RuntimeError(f'File `{url}` not found. You may set `download=True`.')\n    raise RuntimeError(f'File `{file_path}` not found.')",
    "docstring": "Load JSON config from the specified URL. Args: url: The URL of the preprocessor config to load. download: If True, the config will be downloaded if it's not already in the local cache. kwargs: Additional download arguments. Returns: dict[str, Any]: The loaded preprocessor config.",
    "type": "method",
    "file_path": "kornia\\kornia\\onnx\\utils.py",
    "ast_data": "FunctionDef name:load_config arg:cls arg:url arg:download arguments arg arg arg arg If Call Assign Call Call With Call Assign Call Return return:yes If Raise Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "tile",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef tile(x, n):\n    if isinstance(n, int):\n        n = [n]\n    return array_ops.tile(x, n)",
    "docstring": "Creates a tensor by tiling by . Args: x: A tensor or variable n: A list of integer. The length must be the same as the number of dimensions in . Returns: A tiled tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:tile arg:x arg:n arguments arg arg If Call Assign Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "diagflat",
    "source_code": "@array_function_dispatch(_diag_dispatcher)\ndef diagflat(v, k=0):\n    conv = _array_converter(v)\n    v, = conv.as_arrays(subok=False)\n    v = v.ravel()\n    s = len(v)\n    n = s + abs(k)\n    res = zeros((n, n), v.dtype)\n    if k >= 0:\n        i = arange(0, n - k, dtype=intp)\n        fi = i + k + i * n\n    else:\n        i = arange(0, n + k, dtype=intp)\n        fi = i + (i - k) * n\n    res.flat[fi] = v\n    return conv.wrap(res)",
    "docstring": "Create a two-dimensional array with the flattened input as a diagonal. Parameters ---------- v : array_like Input data, which is flattened and set as the -th diagonal of the output. k : int, optional Diagonal to set; 0, the default, corresponds to the \"main\" diagonal, a positive (negative) giving the number of the diagonal above (below) the main. Returns ------- out : ndarray The 2-D output array. See Also -------- diag : MATLAB work-alike for 1-D and 2-D arrays. diagonal : Return specified diagonals. trace : Sum along diagonals. Examples -------- >>> import numpy as np >>> np.diagflat([[1,2], [3,4]]) array([[1, 0, 0, 0], [0, 2, 0, 0], [0, 0, 3, 0], [0, 0, 0, 4]]) >>> np.diagflat([1,2], 1) array([[0, 1, 0], [0, 0, 2], [0, 0, 0]])",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_twodim_base_impl.py",
    "ast_data": "FunctionDef name:diagflat arg:v arg:k arguments arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call If Compare Assign Call Assign Assign Call Assign Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_OptionsDataset",
    "source_code": "class _OptionsDataset(UnaryUnchangedStructureDataset):\n\n    def __init__(self, input_dataset, options, name=None):\n        self._input_dataset = input_dataset\n        options_pb = dataset_options_pb2.Options()\n        options_pb.CopyFrom(options._to_proto())\n        self._name = name\n        with ops.colocate_with(input_dataset._variant_tensor):\n            variant_tensor = gen_dataset_ops.options_dataset(input_dataset._variant_tensor, options_pb.SerializeToString(), **self._common_args)\n        super(_OptionsDataset, self).__init__(input_dataset, variant_tensor)\n        if self._options_attr:\n            self._options_attr._set_mutable(True)\n            self._options_attr = self._options_attr.merge(options)\n        else:\n            self._options_attr = options\n        self._options_attr._set_mutable(False)",
    "docstring": "An identity that stores options.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "ClassDef name:_OptionsDataset FunctionDef name:__init__ arg:self arg:input_dataset arg:options arg:name arguments arg arg arg arg Assign Assign Call Call Call Assign With Call Assign Call Call Call Call If Call Assign Call Assign Call"
  },
  {
    "library": "pandas",
    "name": "render_pep440_pre",
    "source_code": "def render_pep440_pre(pieces):\n    if pieces['closest-tag']:\n        if pieces['distance']:\n            tag_version, post_version = pep440_split_post(pieces['closest-tag'])\n            rendered = tag_version\n            if post_version is not None:\n                rendered += f'.post{post_version + 1}.dev{pieces['distance']}'\n            else:\n                rendered += f'.post0.dev{pieces['distance']}'\n        else:\n            rendered = pieces['closest-tag']\n    else:\n        rendered = f'0.post0.dev{pieces['distance']}'\n    return rendered",
    "docstring": "TAG[.postN.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post0.devDISTANCE",
    "type": "function",
    "file_path": "pandas\\pandas\\_version.py",
    "ast_data": "FunctionDef name:render_pep440_pre arg:pieces arguments arg If If Assign Call Assign If Compare Assign Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "get_metadata_routing",
    "source_code": "def get_metadata_routing(self):\n    router = MetadataRouter(owner=self.__class__.__name__).add_self_request(self).add(estimator=self.estimator, method_mapping=MethodMapping().add(caller='fit', callee='fit').add(caller='partial_fit', callee='partial_fit'))\n    return router",
    "docstring": "Get metadata routing of this object. Please check :ref: on how the routing mechanism works. .. versionadded:: 1.4 Returns ------- routing : MetadataRouter A :class: encapsulating routing information.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\multiclass.py",
    "ast_data": "FunctionDef name:get_metadata_routing arg:self arguments arg Assign Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_handle_output_node",
    "source_code": "def _handle_output_node(node: torch.fx.Node, node_name_to_values: dict[str, ir.Value | Sequence[ir.Value]], graph_like: ir.Graph | ir.Function) -> None:\n    for output in node.args[0]:\n        output_value_name = output.name\n        assert isinstance(output_value_name, str), f'Bug: Expected {output_value_name!r} to be a string'\n        values = node_name_to_values[output_value_name]\n        if isinstance(values, Sequence):\n            graph_like.outputs.extend(values)\n            return\n        graph_like.outputs.append(values)",
    "docstring": "Handle an output node by adding the output to the graph's outputs. Args: node: The FX node to translate. node_name_to_values: A mapping of FX node names to their produced ONNX ``. graph_like: The ONNX graph at construction.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_core.py",
    "ast_data": "FunctionDef name:_handle_output_node arg:node arg:node_name_to_values arg:graph_like arguments arg arg arg For Assign Call Assign If Call Call Return return:no Call"
  },
  {
    "library": "tensorflow",
    "name": "rank",
    "source_code": "@tf_export('rank')\n@dispatch.add_dispatch_support\ndef rank(input, name=None):\n    return rank_internal(input, name, optimize=True)",
    "docstring": "Returns the rank of a tensor. See also . Returns a 0-D representing the rank of . For example: **Note**: The rank of a tensor is not the same as the rank of a matrix. The rank of a tensor is the number of indices required to uniquely select each element of the tensor. Rank is also known as \"order\", \"degree\", or \"ndims.\" Args: input: A or . name: A name for the operation (optional). Returns: A of type . @compatibility(numpy) Equivalent to np.ndim @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:rank arg:input arg:name arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_op_details",
    "source_code": "def _get_op_details(self, op_index):\n    operand_types = [self._get_tensor_details(tensor_idx, subgraph_index=0)['dtype'] for tensor_idx in self._interpreter.NodeInputs(op_index) if tensor_idx != -1]\n    result_types = [self._get_tensor_details(tensor_idx, subgraph_index=0)['dtype'] for tensor_idx in self._interpreter.NodeOutputs(op_index) if tensor_idx != -1]\n    details = {'index': int(op_index), 'op_name': self._interpreter.NodeName(op_index), 'inputs': self._interpreter.NodeInputs(op_index), 'outputs': self._interpreter.NodeOutputs(op_index), 'operand_types': operand_types, 'result_types': result_types}\n    return details",
    "docstring": "Gets a dictionary with arrays of ids for tensors involved with an op. Args: op_index: Operation/node index of node to query. Returns: a dictionary containing the index, op name, and arrays with lists of the indices and types for the inputs and outputs of the op/nodes.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\interpreter.py",
    "ast_data": "FunctionDef name:_get_op_details arg:self arg:op_index arguments arg arg Assign Call Call Compare Assign Call Call Compare Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_add_tool_cbk",
    "source_code": "def _add_tool_cbk(self, event):\n    if getattr(event.tool, 'cursor', None) is not None:\n        self.toolmanager.toolmanager_connect(f'tool_trigger_{event.tool.name}', self._tool_trigger_cbk)",
    "docstring": "Process every newly added tool.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py",
    "ast_data": "FunctionDef name:_add_tool_cbk arg:self arg:event arguments arg arg If Compare Call Call"
  },
  {
    "library": "scipy",
    "name": "Lint",
    "source_code": "@cli.cls_cmd('lint')\nclass Lint:\n    fix = Option(['--fix'], default=False, is_flag=True, help='Attempt to auto-fix errors')\n    all = Option(['--all'], default=False, is_flag=True, help='lint all files instead of just modified files.')\n\n    @classmethod\n    def run(cls, fix, all):\n        run_doit_task({'lint': {'fix': fix, 'all': all}, 'check_unicode': {}, 'check_testname': {}, 'check_python_h_first': {}})",
    "docstring": ":dash: Run linter on modified (or all) files and check for disallowed Unicode characters and possibly-invalid test names.",
    "type": "class",
    "file_path": "scipy\\dev.py",
    "ast_data": "ClassDef name:Lint Assign Call Assign Call FunctionDef name:run arg:cls arg:fix arg:all arguments arg arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "read_graph_op_creation_stack_trace",
    "source_code": "def read_graph_op_creation_stack_trace(self, graph_op_creation_digest):\n    return (graph_op_creation_digest.host_name, [self._stack_frame_by_id[frame_id][1:] for frame_id in graph_op_creation_digest.stack_frame_ids])",
    "docstring": "Read the stack trace of a given graph op creation object. Args: graph_op_creation_digest: The GraphOpCreationDigest object of interest. Returns: A tuple consisting of: 1. The host name. 2. The stack trace, as a list of (file_path, lineno, func) tuples.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py",
    "ast_data": "FunctionDef name:read_graph_op_creation_stack_trace arg:self arg:graph_op_creation_digest arguments arg arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_convert_to_number_format",
    "source_code": "@classmethod\ndef _convert_to_number_format(cls, number_format_dict):\n    return number_format_dict['format_code']",
    "docstring": "Convert `` to an openpyxl v2.1.0 number format initializer. Parameters ---------- number_format_dict : dict A dict with zero or more of the following keys. 'format_code' : str Returns ------- number_format : str",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\excel\\_openpyxl.py",
    "ast_data": "FunctionDef name:_convert_to_number_format arg:cls arg:number_format_dict arguments arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "radam",
    "source_code": "@_disable_dynamo_if_unsupported(single_tensor_fn=_single_tensor_radam)\ndef radam(params: list[Tensor], grads: list[Tensor], exp_avgs: list[Tensor], exp_avg_sqs: list[Tensor], state_steps: list[Tensor], decoupled_weight_decay: bool=False, foreach: Optional[bool]=None, differentiable: bool=False, capturable: bool=False, has_complex: bool=False, maximize: bool=False, *, beta1: float, beta2: float, lr: float, weight_decay: float, eps: float):\n    if not all((isinstance(t, torch.Tensor) for t in state_steps)):\n        raise RuntimeError('API has changed, `state_steps` argument must contain a list of singleton tensors')\n    if foreach is None:\n        _, foreach = _default_to_fused_or_foreach(params, differentiable, use_fused=False)\n    if foreach and torch.jit.is_scripting():\n        raise RuntimeError('torch.jit.script not supported with foreach optimizers')\n    if foreach and (not torch.jit.is_scripting()):\n        func = _multi_tensor_radam\n    else:\n        func = _single_tensor_radam\n    func(params, grads, exp_avgs, exp_avg_sqs, state_steps, beta1=beta1, beta2=beta2, lr=lr, weight_decay=weight_decay, eps=eps, maximize=maximize, decoupled_weight_decay=decoupled_weight_decay, differentiable=differentiable, capturable=capturable, has_complex=has_complex)",
    "docstring": "Functional API that performs RAdam algorithm computation. See :class: for details.",
    "type": "function",
    "file_path": "pytorch\\torch\\optim\\radam.py",
    "ast_data": "FunctionDef name:radam arg:params arg:grads arg:exp_avgs arg:exp_avg_sqs arg:state_steps arg:decoupled_weight_decay arg:foreach arg:differentiable arg:capturable arg:has_complex arg:maximize arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg If Call Call Raise Call If Compare Assign Call If BoolOp Call Raise Call If BoolOp Call Assign Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "categorical",
    "source_code": "@tf_export('random.categorical')\n@dispatch.add_dispatch_support\ndef categorical(logits, num_samples, dtype=None, seed=None, name=None):\n    with ops.name_scope(name, 'categorical', [logits]):\n        return multinomial_categorical_impl(logits, num_samples, dtype, seed)",
    "docstring": "Draws samples from a categorical distribution. Example: Args: logits: 2-D Tensor with shape . Each slice represents the unnormalized log-probabilities for all classes. num_samples: 0-D. Number of independent samples to draw for each row slice. dtype: The integer type of the output: or . Defaults to . seed: A Python integer. Used to create a random seed for the distribution. See for behavior. name: Optional name for the operation. Returns: The drawn samples of shape .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\random_ops.py",
    "ast_data": "FunctionDef name:categorical arg:logits arg:num_samples arg:dtype arg:seed arg:name arguments arg arg arg arg arg With Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_handle_zeros_in_scale",
    "source_code": "def _handle_zeros_in_scale(scale, copy=True, constant_mask=None):\n    if np.isscalar(scale):\n        if scale == 0.0:\n            scale = 1.0\n        return scale\n    else:\n        xp, _ = get_namespace(scale)\n        if constant_mask is None:\n            constant_mask = scale < 10 * xp.finfo(scale.dtype).eps\n        if copy:\n            scale = xp.asarray(scale, copy=True)\n        scale[constant_mask] = 1.0\n        return scale",
    "docstring": "Set scales of near constant features to 1. The goal is to avoid division by very small or zero values. Near constant features are detected automatically by identifying scales close to machine precision unless they are precomputed by the caller and passed with the kwarg. Typically for standard scaling, the scales are the standard deviation while near constant features are better detected on the computed variances which are closer to machine precision by construction.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py",
    "ast_data": "FunctionDef name:_handle_zeros_in_scale arg:scale arg:copy arg:constant_mask arguments arg arg arg If Call If Compare Assign Return return:yes Assign Call If Compare Assign Compare Call If Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "standardize_sample_or_class_weights",
    "source_code": "def standardize_sample_or_class_weights(x_weight, output_names, weight_type):\n    if x_weight is None or (isinstance(x_weight, (list, tuple)) and len(x_weight) == 0):\n        return [None for _ in output_names]\n    if len(output_names) == 1:\n        if isinstance(x_weight, (list, tuple)) and len(x_weight) == 1:\n            return x_weight\n        if isinstance(x_weight, dict) and output_names[0] in x_weight:\n            return [x_weight[output_names[0]]]\n        else:\n            return [x_weight]\n    if isinstance(x_weight, (list, tuple)):\n        if len(x_weight) != len(output_names):\n            raise ValueError('Provided `' + weight_type + '` was a list of ' + str(len(x_weight)) + ' elements, but the model has ' + str(len(output_names)) + ' outputs. You should provide one `' + weight_type + '`array per model output.')\n        return x_weight\n    if isinstance(x_weight, collections.abc.Mapping):\n        generic_utils.check_for_unexpected_keys(weight_type, x_weight, output_names)\n        x_weights = []\n        for name in output_names:\n            x_weights.append(x_weight.get(name))\n        return x_weights\n    else:\n        raise TypeError('The model has multiple outputs, so `' + weight_type + '` should be either a list or a dict. Provided `' + weight_type + '` type not understood: ' + str(x_weight))",
    "docstring": "Maps or to model outputs. Args: x_weight: User-provided or argument. output_names: List of output names (strings) in the model. weight_type: A string used purely for exception printing. Returns: A list of or where there are exactly one element per model output. Raises: ValueError: In case of invalid user-provided argument.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py",
    "ast_data": "FunctionDef name:standardize_sample_or_class_weights arg:x_weight arg:output_names arg:weight_type arguments arg arg arg If BoolOp Compare BoolOp Call Compare Call Return return:yes If Compare Call If BoolOp Call Compare Call Return return:yes If BoolOp Call Compare Return return:yes Return return:yes If Call If Compare Call Call Raise Call Call Call Call Call Return return:yes If Call Call Assign For Call Call Return return:yes Raise Call Call"
  },
  {
    "library": "pandas",
    "name": "_can_fast_transpose",
    "source_code": "@property\ndef _can_fast_transpose(self) -> bool:\n    return False",
    "docstring": "Is transposing an array with this dtype zero-copy? Only relevant for cases where _supports_2d is True.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\base.py",
    "ast_data": "FunctionDef name:_can_fast_transpose arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_count",
    "source_code": "def _count(self, X, Y):\n    check_non_negative(X, 'ComplementNB (input X)')\n    self.feature_count_ += safe_sparse_dot(Y.T, X)\n    self.class_count_ += Y.sum(axis=0)\n    self.feature_all_ = self.feature_count_.sum(axis=0)",
    "docstring": "Count feature occurrences.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\naive_bayes.py",
    "ast_data": "FunctionDef name:_count arg:self arg:X arg:Y arguments arg arg arg Call Call Call Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "get_transformed_path_and_affine",
    "source_code": "def get_transformed_path_and_affine(self):\n    self._revalidate()\n    return (self._transformed_path, self.get_affine())",
    "docstring": "Return a copy of the child path, with the non-affine part of the transform already applied, along with the affine part of the path necessary to complete the transformation.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:get_transformed_path_and_affine arg:self arguments arg Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "is_supported",
    "source_code": "def is_supported(self) -> bool:\n    if self.builders and self.app.builder.name not in self.builders:\n        return False\n    return not self.formats or self.app.builder.format in self.formats",
    "docstring": "Check this transform working for current builder.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\transforms\\post_transforms\\__init__.py",
    "ast_data": "FunctionDef name:is_supported arg:self arguments arg If BoolOp Compare Return return:yes Return return:yes BoolOp Compare"
  },
  {
    "library": "pytorch",
    "name": "opening_comment_lines",
    "source_code": "@cached_property\ndef opening_comment_lines(self) -> int:\n    it = (i for i, s in enumerate(self.lines) if not s.startswith('#'))\n    return next(it, 0)",
    "docstring": "The number of comments at the very top of the file.",
    "type": "method",
    "file_path": "pytorch\\tools\\linter\\adapters\\_linter.py",
    "ast_data": "FunctionDef name:opening_comment_lines arg:self arguments arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "recursive_create_dir",
    "source_code": "@tf_export(v1=['gfile.MakeDirs'])\ndef recursive_create_dir(dirname):\n    recursive_create_dir_v2(dirname)",
    "docstring": "Creates a directory and all parent/intermediate directories. It succeeds if dirname already exists and is writable. Args: dirname: string, name of the directory to be created Raises: errors.OpError: If the operation fails.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py",
    "ast_data": "FunctionDef name:recursive_create_dir arg:dirname arguments arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "create_tiling",
    "source_code": "@classmethod\ndef create_tiling(cls, pw_tiling: Sequence[sympy.Expr], reduction_tiling: Sequence[sympy.Expr]) -> dict[str, sympy.Expr]:\n    pw_prefixes = ['z', 'y', 'x'][-len(pw_tiling):]\n    reduction_prefixes = ['r0_', 'r1_'][:len(reduction_tiling)]\n    return immutable_dict([*zip(pw_prefixes, pw_tiling), *zip(reduction_prefixes, reduction_tiling)])",
    "docstring": "Create a tiling dict from pointwise and reduction splits.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\simd.py",
    "ast_data": "FunctionDef name:create_tiling arg:cls arg:pw_tiling arg:reduction_tiling arguments arg arg arg Assign Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "abs",
    "source_code": "def abs(X, /):\n    return FoldedDistribution(X)",
    "docstring": "Absolute value of a random variable Parameters ---------- X : The random variable :math:. Returns ------- Y : A random variable :math:. Examples -------- Suppose we have a normally distributed random variable :math:: >>> import numpy as np >>> from scipy import stats >>> X = stats.Normal() We wish to have a random variable :math: distributed according to the folded normal distribution; that is, a random variable :math:. >>> Y = stats.abs(X) The PDF of the distribution in the left half plane is \"folded\" over to the right half plane. Because the normal PDF is symmetric, the resulting PDF is zero for negative arguments and doubled for positive arguments. >>> import matplotlib.pyplot as plt >>> x = np.linspace(0, 5, 300) >>> ax = plt.gca() >>> Y.plot(x='x', y='pdf', t=('x', -1, 5), ax=ax) >>> plt.plot(x, 2 * X.pdf(x), '--') >>> plt.legend(('PDF of ', 'Doubled PDF of ')) >>> plt.show()",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_distribution_infrastructure.py",
    "ast_data": "FunctionDef name:abs arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "res_unop",
    "source_code": "def res_unop(self, ns, types_ns, node, opnd):\n    raise NotImplementedError('subclasses must implement')",
    "docstring": "Resolves the return type of a unary operation.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\static_analysis\\type_inference.py",
    "ast_data": "FunctionDef name:res_unop arg:self arg:ns arg:types_ns arg:node arg:opnd arguments arg arg arg arg arg Raise Call"
  },
  {
    "library": "kornia",
    "name": "to_onnx",
    "source_code": "def to_onnx(self, onnx_name: Optional[str]=None, include_pre_and_post_processor: bool=True, save: bool=True, additional_metadata: Optional[List[Tuple[str, str]]]=None, **kwargs: Any) -> 'onnx.ModelProto':\n    if onnx_name is None:\n        onnx_name = f'kornia_{self.name}.onnx'\n    return super().to_onnx(onnx_name, input_shape=[-1, 3, self.input_image_size or -1, self.input_image_size or -1], output_shape=[-1, 3, self.output_image_size or -1, self.output_image_size or -1], pseudo_shape=[1, 3, self.pseudo_image_size or 352, self.pseudo_image_size or 352], model=self if include_pre_and_post_processor else self.model, save=save, additional_metadata=additional_metadata, **kwargs)",
    "docstring": "Export the current super resolution model to an ONNX model file. Args: onnx_name: The name of the output ONNX file. If not provided, a default name in the format \"Kornia-.onnx\" will be used. image_size: The size to which input images will be resized during preprocessing. If None, image_size will be dynamic. For DexiNed, recommended scale is 352. include_pre_and_post_processor: Whether to include the pre-processor and post-processor in the exported model. save: If to save the model or load it. additional_metadata: Additional metadata to add to the ONNX model. kwargs: Additional arguments for converting to onnx.",
    "type": "method",
    "file_path": "kornia\\kornia\\models\\super_resolution\\base.py",
    "ast_data": "FunctionDef name:to_onnx arg:self arg:onnx_name arg:include_pre_and_post_processor arg:save arg:additional_metadata arguments arg arg arg arg arg arg If Compare Assign Return return:yes Call Call BoolOp BoolOp BoolOp BoolOp BoolOp BoolOp"
  },
  {
    "library": "tensorflow",
    "name": "_Ndtr",
    "source_code": "def _Ndtr(x):\n    half_sqrt_2 = constant_op.constant(0.5 * np.sqrt(2.0), dtype=x.dtype, name='half_sqrt_2')\n    w = x * half_sqrt_2\n    z = math_ops.abs(w)\n    y = array_ops.where(z < half_sqrt_2, 1.0 + math_ops.erf(w), array_ops.where(w > 0.0, 2.0 - math_ops.erfc(z), math_ops.erfc(z)))\n    return 0.5 * y",
    "docstring": "Normal distribution function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\random_grad.py",
    "ast_data": "FunctionDef name:_Ndtr arg:x arguments arg Assign Call Call Assign Assign Call Assign Call Compare Call Call Compare Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "from_extents",
    "source_code": "@staticmethod\ndef from_extents(*args, minpos=None):\n    bbox = Bbox(np.reshape(args, (2, 2)))\n    if minpos is not None:\n        bbox._minpos[:] = minpos\n    return bbox",
    "docstring": "Create a new Bbox from *left*, *bottom*, *right* and *top*. The *y*-axis increases upwards. Parameters ---------- left, bottom, right, top : float The four extents of the bounding box. minpos : float or None If this is supplied, the Bbox will have a minimum positive value set. This is useful when dealing with logarithmic scales and other scales where negative bounds result in floating point errors.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:from_extents arguments arg arg Assign Call Call If Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "name",
    "source_code": "@property\ndef name(self) -> str:\n    return self._context_name",
    "docstring": "Context name for the while loop.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_shift_right_arithmetic_helper",
    "source_code": "def _shift_right_arithmetic_helper(x, y, name=None):\n    assert y.dtype == x.dtype\n    dtype = x.dtype\n    unsigned = dtype in _UNSIGNED_TO_SIGNED_TABLE\n    if unsigned:\n        signed_dtype = _UNSIGNED_TO_SIGNED_TABLE[dtype]\n        x = math_ops.cast(x, signed_dtype)\n        y = math_ops.cast(y, signed_dtype)\n    output = bitwise_ops.right_shift(x, y, name=name)\n    if unsigned:\n        output = math_ops.cast(output, dtype)\n    return output",
    "docstring": "Performs an integer right arithmetic shift irrespective of input type.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\tf2xla\\python\\xla.py",
    "ast_data": "FunctionDef name:_shift_right_arithmetic_helper arg:x arg:y arg:name arguments arg arg arg Compare Assign Assign Compare If Assign Assign Call Assign Call Assign Call If Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "is_shared",
    "source_code": "def is_shared(self):\n    if has_torch_function_unary(self):\n        return handle_torch_function(Tensor.is_shared, (self,), self)\n    return self._typed_storage()._is_shared()",
    "docstring": "Checks if tensor is in shared memory. This is always `` for CUDA tensors.",
    "type": "method",
    "file_path": "pytorch\\torch\\_tensor.py",
    "ast_data": "FunctionDef name:is_shared arg:self arguments arg If Call Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "lecun_normal",
    "source_code": "@tf_export(v1=['initializers.lecun_normal'])\ndef lecun_normal(seed=None):\n    return VarianceScaling(scale=1.0, mode='fan_in', distribution='truncated_normal', seed=seed)",
    "docstring": "LeCun normal initializer. It draws samples from a truncated normal distribution centered on 0 with standard deviation (after truncation) given by where is the number of input units in the weight tensor. Args: seed: A Python integer. Used to seed the random generator. Returns: An initializer. References: - Self-Normalizing Neural Networks, [Klambauer et al., 2017]( # pylint: disable=line-too-long ([pdf]( - Efficient Backprop, [Lecun et al., 1998](",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops.py",
    "ast_data": "FunctionDef name:lecun_normal arg:seed arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "parse_segments",
    "source_code": "def parse_segments(raw_segments: list[list[int]]) -> list[LlvmCoverageSegment]:\n    ret: list[LlvmCoverageSegment] = []\n    for raw_segment in raw_segments:\n        assert len(raw_segment) == 5 or len(raw_segment) == 6, 'list is not compatible with llvmcom export:'\n        ' Expected to have 5 or 6 elements'\n        if len(raw_segment) == 5:\n            ret.append(LlvmCoverageSegment(raw_segment[0], raw_segment[1], raw_segment[2], raw_segment[3], raw_segment[4], None))\n        else:\n            ret.append(LlvmCoverageSegment(*raw_segment))\n    return ret",
    "docstring": "Creates LlvmCoverageSegment from a list of lists in llvm export json. each segment is represented by 5-element array.",
    "type": "function",
    "file_path": "pytorch\\tools\\code_coverage\\package\\tool\\parser\\llvm_coverage_segment.py",
    "ast_data": "FunctionDef name:parse_segments arg:raw_segments arguments arg For BoolOp Compare Call Compare Call If Compare Call Call Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "IOBase",
    "source_code": "class IOBase(GEOSBase):\n\n    def __init__(self):\n        self.ptr = self._constructor()\n        self.destructor.func",
    "docstring": "Base class for GEOS I/O objects.",
    "type": "class",
    "file_path": "django\\django\\contrib\\gis\\geos\\prototypes\\io.py",
    "ast_data": "ClassDef name:IOBase FunctionDef name:__init__ arg:self arguments arg Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "full_job_name",
    "source_code": "@tf_export('experimental.dtensor.full_job_name', v1=[])\ndef full_job_name(task_id: Optional[int]=None) -> str:\n    if task_id is None:\n        task_id = client_id()\n    if num_clients() == 1 and task_id != 0:\n        raise ValueError(f'Unexpected task ID {task_id} in local runs')\n    return f'{job_name()}/replica:0/task:{task_id}'",
    "docstring": "Returns the fully qualified TF job name for this or another task.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\config.py",
    "ast_data": "FunctionDef name:full_job_name arg:task_id arguments arg If Compare Assign Call If BoolOp Compare Call Compare Raise Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "__call__",
    "source_code": "def __call__(self, t, *args, **kwargs):\n    raise NotImplementedError",
    "docstring": "Apply the transformation to `` and multiply by the Jacobian determinant. This should be the new integrand after the transformation has been applied so that the following is satisfied:: f_transformed = _VariableTransform(f) cubature(f, a, b) == cubature( f_transformed, *f_transformed.transformed_limits(a, b), )",
    "type": "method",
    "file_path": "scipy\\scipy\\integrate\\_cubature.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:t arguments arg arg arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "dtype",
    "source_code": "@property\ndef dtype(self):\n    return self._values.dtype",
    "docstring": "The of values in this tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:dtype arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, cluster_spec, initializer=None, share_gpu=True):\n    _active_pool_runners.add(self)\n    self._cluster_spec = cluster_spec\n    self._initializer = initializer\n    self._share_gpu = share_gpu\n    self._conn = {}\n    self._runner = None",
    "docstring": "Creates a multi-process pool runner. Args: cluster_spec: Dict for cluster spec. The following is an example of cluster with three workers. {\"worker\": [\"worker0.example.com:2222\", \"worker1.example.com:2222\", \"worker2.example.com:2222\"]} initializer: a callable to called at the startup of worker processes. share_gpu: Whether to share GPUs among workers. If False, each worker is assigned different GPUs in a roundrobin fashion. Raises: RuntimeError: if is not called. ValueError: if there are more than one chief in the .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_process_runner.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:cluster_spec arg:initializer arg:share_gpu arguments arg arg arg arg Call Assign Assign Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "start",
    "source_code": "def start(self):\n    entry = os.path.join(os.path.dirname(__file__), '__autotune_main__.py')\n    subproc_read_fd, write_fd = os.pipe()\n    read_fd, subproc_write_fd = os.pipe()\n    self.write_pipe = os.fdopen(write_fd, 'wb')\n    self.read_pipe = os.fdopen(read_fd, 'rb')\n    self.selector = selectors.DefaultSelector()\n    self.selector.register(self.read_pipe, selectors.EVENT_READ)\n    cmd = [sys.executable, entry, f'--parent={os.getpid()}', f'--read-fd={str(subproc_read_fd)}', f'--write-fd={str(subproc_write_fd)}']\n    extra_env = {'PYTHONPATH': os.environ.get('TORCH_CUSTOM_PYTHONPATH', os.pathsep.join(sys.path)), 'TORCH_WARM_POOL': '0', 'LD_LIBRARY_PATH': get_ld_library_path()}\n    if self.device is not None:\n        extra_env[CUDA_VISIBLE_DEVICES] = str(self.device)\n    self.process = subprocess.Popen(cmd, env={**os.environ, **extra_env}, pass_fds=(subproc_read_fd, subproc_write_fd))\n    os.close(subproc_read_fd)\n    os.close(subproc_write_fd)\n    self.running = True",
    "docstring": "Start the benchmarking subprocess.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\autotune_process.py",
    "ast_data": "FunctionDef name:start arg:self arguments arg Assign Call Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Call Call Assign Call Call Call If Compare Assign Call Assign Call Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "encode_png",
    "source_code": "@tf_export('io.encode_png', 'image.encode_png')\n@dispatch.add_dispatch_support\ndef encode_png(image, compression=-1, name=None):\n    return gen_image_ops.encode_png(ops.convert_to_tensor(image), compression, name)",
    "docstring": "PNG-encode an image. is a rank-N Tensor of type uint8 or uint16 with shape , where is: * 1: for grayscale. * 2: for grayscale + alpha. * 3: for RGB. * 4: for RGBA. The ZLIB compression level, , can be -1 for the PNG-encoder default or a value from 0 to 9. 9 is the highest compression level, generating the smallest output, but is slower. Args: image: A . Must be one of the following types: , . Rank N >= 3 with shape . compression: An optional . Defaults to . Compression level. name: A name for the operation (optional). Returns: A of type .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:encode_png arg:image arg:compression arg:name arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "numpy",
    "name": "new_fcompiler",
    "source_code": "def new_fcompiler(plat=None, compiler=None, verbose=0, dry_run=0, force=0, requiref90=False, c_compiler=None):\n    global failed_fcompilers\n    fcompiler_key = (plat, compiler)\n    if fcompiler_key in failed_fcompilers:\n        return None\n    load_all_fcompiler_classes()\n    if plat is None:\n        plat = os.name\n    if compiler is None:\n        compiler = get_default_fcompiler(plat, requiref90=requiref90, c_compiler=c_compiler)\n    if compiler in fcompiler_class:\n        module_name, klass, long_description = fcompiler_class[compiler]\n    elif compiler in fcompiler_aliases:\n        module_name, klass, long_description = fcompiler_aliases[compiler]\n    else:\n        msg = \"don't know how to compile Fortran code on platform '%s'\" % plat\n        if compiler is not None:\n            msg = msg + \" with '%s' compiler.\" % compiler\n            msg = msg + ' Supported compilers are: %s)' % ','.join(fcompiler_class.keys())\n        log.warn(msg)\n        failed_fcompilers.add(fcompiler_key)\n        return None\n    compiler = klass(verbose=verbose, dry_run=dry_run, force=force)\n    compiler.c_compiler = c_compiler\n    return compiler",
    "docstring": "Generate an instance of some FCompiler subclass for the supplied platform/compiler combination.",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\fcompiler\\__init__.py",
    "ast_data": "FunctionDef name:new_fcompiler arg:plat arg:compiler arg:verbose arg:dry_run arg:force arg:requiref90 arg:c_compiler arguments arg arg arg arg arg arg arg Assign If Compare Return return:no Call If Compare Assign If Compare Assign Call If Compare Assign If Compare Assign Assign If Compare Assign Assign Call Call Call Call Return return:no Assign Call Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "predict_log_proba",
    "source_code": "def predict_log_proba(self, X):\n    return np.log(self.predict_proba(X))",
    "docstring": "Predict class log-probabilities for X. The predicted class log-probabilities of an input sample is computed as the weighted mean predicted class log-probabilities of the classifiers in the ensemble. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. COO, DOK, and LIL are converted to CSR. Returns ------- p : ndarray of shape (n_samples, n_classes) The class probabilities of the input samples. The order of outputs is the same of that of the :term: attribute.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_weight_boosting.py",
    "ast_data": "FunctionDef name:predict_log_proba arg:self arg:X arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "scrapy",
    "name": "defer_succeed",
    "source_code": "def defer_succeed(result: _T) -> Deferred[_T]:\n    from twisted.internet import reactor\n    d: Deferred[_T] = Deferred()\n    reactor.callLater(_DEFER_DELAY, d.callback, result)\n    return d",
    "docstring": "Same as twisted.internet.defer.succeed but delay calling callback until next reactor loop It delays by 100ms so reactor has a chance to go through readers and writers before attending pending delayed calls, so do not set delay to zero.",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\defer.py",
    "ast_data": "FunctionDef name:defer_succeed arg:result arguments arg Call Call Return return:yes"
  },
  {
    "library": "pygame",
    "name": "add_internal",
    "source_code": "def add_internal(self, sprite, layer=None):\n    self.spritedict[sprite] = None",
    "docstring": "For adding a sprite to this group internally. :param sprite: The sprite we are adding. :param layer: the layer to add to, if the group type supports layers",
    "type": "method",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:add_internal arg:self arg:sprite arg:layer arguments arg arg arg Assign"
  },
  {
    "library": "kornia",
    "name": "SimpleKD",
    "source_code": "class SimpleKD(nn.Module):\n\n    def __init__(self, patch_size: int=32, kernel_type: str='polar', whitening: str='pcawt', training_set: str='liberty', output_dims: int=128) -> None:\n        super().__init__()\n        relative: bool = kernel_type == 'polar'\n        sigma: float = 1.4 * (patch_size / 64)\n        self.patch_size = patch_size\n        smoothing = GaussianBlur2d((5, 5), (sigma, sigma), 'replicate')\n        gradients = MKDGradients()\n        ori = EmbedGradients(patch_size=patch_size, relative=relative)\n        ese = ExplicitSpacialEncoding(kernel_type=kernel_type, fmap_size=patch_size, in_dims=ori.kernel.d)\n        wh = Whitening(whitening, load_whitening_model(kernel_type, training_set), in_dims=ese.odims, output_dims=output_dims)\n        self.features = nn.Sequential(smoothing, gradients, ori, ese, wh)\n\n    def forward(self, x: Tensor) -> Tensor:\n        return self.features(x)",
    "docstring": "Example to write custom Kernel Descriptors.",
    "type": "class",
    "file_path": "kornia\\kornia\\feature\\mkd.py",
    "ast_data": "ClassDef name:SimpleKD FunctionDef name:__init__ arg:self arg:patch_size arg:kernel_type arg:whitening arg:training_set arg:output_dims arguments arg arg arg arg arg arg Call Call Compare Assign Assign Call Assign Call Assign Call Assign Call Assign Call Call Assign Call FunctionDef name:forward arg:self arg:x arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "braced_sets",
    "source_code": "@cached_property\ndef braced_sets(self) -> list[list[TokenInfo]]:\n    return [self.tokens[b:e + 1] for b, e in self.bracket_pairs.items() if self.is_braced_set(b, e)]",
    "docstring": "A list of lists of tokens, each representing a braced set, like {1}",
    "type": "method",
    "file_path": "pytorch\\tools\\linter\\adapters\\set_linter.py",
    "ast_data": "FunctionDef name:braced_sets arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "make_predictor",
    "source_code": "def make_predictor(self, binning_thresholds):\n    predictor_nodes = np.zeros(self.n_nodes, dtype=PREDICTOR_RECORD_DTYPE)\n    binned_left_cat_bitsets = np.zeros((self.n_categorical_splits, 8), dtype=X_BITSET_INNER_DTYPE)\n    raw_left_cat_bitsets = np.zeros((self.n_categorical_splits, 8), dtype=X_BITSET_INNER_DTYPE)\n    _fill_predictor_arrays(predictor_nodes, binned_left_cat_bitsets, raw_left_cat_bitsets, self.root, binning_thresholds, self.n_bins_non_missing)\n    return TreePredictor(predictor_nodes, binned_left_cat_bitsets, raw_left_cat_bitsets)",
    "docstring": "Make a TreePredictor object out of the current tree. Parameters ---------- binning_thresholds : array-like of floats Corresponds to the bin_thresholds_ attribute of the BinMapper. For each feature, this stores: - the bin frontiers for continuous features - the unique raw category values for categorical features Returns ------- A TreePredictor object.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\grower.py",
    "ast_data": "FunctionDef name:make_predictor arg:self arg:binning_thresholds arguments arg arg Assign Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "__iter__",
    "source_code": "def __iter__(self):\n    for p in self.param_grid:\n        items = sorted(p.items())\n        if not items:\n            yield {}\n        else:\n            keys, values = zip(*items)\n            for v in product(*values):\n                params = dict(zip(keys, v))\n                yield params",
    "docstring": "Iterate over the points in the grid. Returns ------- params : iterator over dict of str to any Yields dictionaries mapping each estimator parameter to one of its allowed values.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_search.py",
    "ast_data": "FunctionDef name:__iter__ arg:self arguments arg For Assign Call Call If Assign Call For Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_container_strategy",
    "source_code": "def _container_strategy(self):\n    container_strategy = self._container_strategy_weakref()\n    assert container_strategy is not None\n    return container_strategy",
    "docstring": "Get the containing . This should not generally be needed except when creating a new and to validate that the caller is in the correct . Returns: The such that is .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:_container_strategy arg:self arguments arg Assign Call Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "config_info_line",
    "source_code": "def config_info_line(name, help_text):\n    print('\\t--config=%-12s\\t# %s' % (name, help_text))",
    "docstring": "Helper function to print formatted help text for Bazel config options.",
    "type": "function",
    "file_path": "tensorflow\\configure.py",
    "ast_data": "FunctionDef name:config_info_line arg:name arg:help_text arguments arg arg Call"
  },
  {
    "library": "kornia",
    "name": "add_residual",
    "source_code": "def add_residual(x, brange, residual, residual_scale_factor, scaling_vector=None):\n    if scaling_vector is None:\n        x_flat = x.flatten(1)\n        residual = residual.flatten(1)\n        x_plus_residual = torch.index_add(x_flat, 0, brange, residual.to(dtype=x.dtype), alpha=residual_scale_factor)\n    else:\n        x_plus_residual = scaled_index_add(x, brange, residual.to(dtype=x.dtype), scaling=scaling_vector, alpha=residual_scale_factor)\n    return x_plus_residual",
    "docstring": "Add residual connections.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\dedode\\transformer\\layers\\block.py",
    "ast_data": "FunctionDef name:add_residual arg:x arg:brange arg:residual arg:residual_scale_factor arg:scaling_vector arguments arg arg arg arg arg If Compare Assign Call Assign Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_version",
    "source_code": "def get_version(version=None):\n    version = get_complete_version(version)\n    main = get_main_version(version)\n    sub = ''\n    if version[3] == 'alpha' and version[4] == 0:\n        git_changeset = get_git_changeset()\n        if git_changeset:\n            sub = '.dev%s' % git_changeset\n    elif version[3] != 'final':\n        mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'rc'}\n        sub = mapping[version[3]] + str(version[4])\n    return main + sub",
    "docstring": "Return a PEP 440-compliant version number from VERSION.",
    "type": "function",
    "file_path": "django\\django\\utils\\version.py",
    "ast_data": "FunctionDef name:get_version arg:version arguments arg Assign Call Assign Call Assign If BoolOp Compare Compare Assign Call If Assign If Compare Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "create_variable",
    "source_code": "def create_variable(self, feature_column, name, shape, dtype=None, trainable=True, use_resource=True, initializer=None):\n    if name in self._cols_to_vars_map[feature_column]:\n        raise ValueError('Variable already exists.')\n    with trackable.no_manual_dependency_tracking_scope(self._layer):\n        var = self._layer.add_weight(name=name, shape=shape, dtype=dtype, initializer=initializer, trainable=self._trainable and trainable, use_resource=use_resource, getter=variable_scope.get_variable)\n    if isinstance(var, variables.PartitionedVariable):\n        for v in var:\n            part_name = name + '/' + str(v._get_save_slice_info().var_offset[0])\n            self._layer._track_trackable(v, feature_column.name + '/' + part_name)\n    elif isinstance(var, trackable.Trackable):\n        self._layer._track_trackable(var, feature_column.name + '/' + name)\n    self._cols_to_vars_map[feature_column][name] = var\n    return var",
    "docstring": "Creates a new variable. Args: feature_column: A object this variable corresponds to. name: variable name. shape: variable shape. dtype: The type of the variable. Defaults to or . trainable: Whether this variable is trainable or not. use_resource: If true, we use resource variables. Otherwise we use RefVariable. initializer: initializer instance (callable). Returns: The created variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:create_variable arg:self arg:feature_column arg:name arg:shape arg:dtype arg:trainable arg:use_resource arg:initializer arguments arg arg arg arg arg arg arg arg If Compare Raise Call With Call Assign Call BoolOp If Call For Assign Call Call Call If Call Call Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "get_sift_pooling_kernel",
    "source_code": "def get_sift_pooling_kernel(ksize: int=25) -> Tensor:\n    ks_2: float = float(ksize) / 2.0\n    xc2 = ks_2 - (torch.arange(ksize).float() + 0.5 - ks_2).abs()\n    kernel = torch.ger(xc2, xc2) / ks_2 ** 2\n    return kernel",
    "docstring": "Return a weighted pooling kernel for SIFT descriptor. Args: ksize: kernel_size. Returns: the pooling kernel with shape :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\siftdesc.py",
    "ast_data": "FunctionDef name:get_sift_pooling_kernel arg:ksize arguments arg Call Assign Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "axis_reverse",
    "source_code": "def axis_reverse(a, axis=-1):\n    return axis_slice(a, step=-1, axis=axis)",
    "docstring": "Reverse the 1-D slices of along axis . Returns axis_slice(a, step=-1, axis=axis).",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_arraytools.py",
    "ast_data": "FunctionDef name:axis_reverse arg:a arg:axis arguments arg arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "asmatrix",
    "source_code": "@set_module('numpy')\ndef asmatrix(data, dtype=None):\n    return matrix(data, dtype=dtype, copy=False)",
    "docstring": "Interpret the input as a matrix. Unlike , does not make a copy if the input is already a matrix or an ndarray. Equivalent to `data` interpreted as a matrix. Examples -------- >>> import numpy as np >>> x = np.array([[1, 2], [3, 4]]) >>> m = np.asmatrix(x) >>> x[0,0] = 5 >>> m matrix([[5, 2], [3, 4]])",
    "type": "function",
    "file_path": "numpy\\numpy\\matrixlib\\defmatrix.py",
    "ast_data": "FunctionDef name:asmatrix arg:data arg:dtype arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "compress_csc",
    "source_code": "def compress_csc(self):\n    _, unique, indices = np.unique(self.rows + self.n * self.cols, return_index=True, return_inverse=True)\n    self.rows = self.rows[unique]\n    self.cols = self.cols[unique]\n    self.vals = np.bincount(indices, weights=self.vals)",
    "docstring": "Compress rows, cols, vals / summing duplicates. Sort for csc format.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triinterpolate.py",
    "ast_data": "FunctionDef name:compress_csc arg:self arguments arg Assign Call Assign Assign Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_add_varlen_feature",
    "source_code": "def _add_varlen_feature(self, key, feature):\n    if not feature.dtype:\n        raise ValueError(f'Missing type for feature {key}. Received feature={feature}')\n    self._add_sparse_key(key, feature.dtype)",
    "docstring": "Adds a VarLenFeature.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parsing_config.py",
    "ast_data": "FunctionDef name:_add_varlen_feature arg:self arg:key arg:feature arguments arg arg arg If Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "essential_node_count",
    "source_code": "def essential_node_count(self) -> int:\n    return sum((1 for n in self.graph.nodes() if n.kind() not in self._EXCLUDED_NODE_KINDS))",
    "docstring": "Return the number of nodes in the subgraph excluding those in .",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\verification.py",
    "ast_data": "FunctionDef name:essential_node_count arg:self arguments arg Return return:yes Call Call Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "configure_ios",
    "source_code": "def configure_ios(environ_cp):\n    if not is_macos():\n        return\n    if not get_var(environ_cp, 'TF_CONFIGURE_IOS', 'iOS', False):\n        return\n    for filepath in APPLE_BAZEL_FILES:\n        existing_filepath = os.path.join(_TF_WORKSPACE_ROOT, filepath + '.apple')\n        renamed_filepath = os.path.join(_TF_WORKSPACE_ROOT, filepath)\n        symlink_force(existing_filepath, renamed_filepath)\n    for filepath in IOS_FILES:\n        filename = os.path.basename(filepath)\n        new_filepath = os.path.join(_TF_WORKSPACE_ROOT, filename)\n        symlink_force(filepath, new_filepath)",
    "docstring": "Configures TensorFlow for iOS builds.",
    "type": "function",
    "file_path": "tensorflow\\configure.py",
    "ast_data": "FunctionDef name:configure_ios arg:environ_cp arguments arg If Call Return return:no If Call Return return:no For Assign Call Assign Call Call For Assign Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "res_compare",
    "source_code": "def res_compare(self, ns, types_ns, node, left, right):\n    raise NotImplementedError('subclasses must implement')",
    "docstring": "Resolves the return type of a unary operation.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\static_analysis\\type_inference.py",
    "ast_data": "FunctionDef name:res_compare arg:self arg:ns arg:types_ns arg:node arg:left arg:right arguments arg arg arg arg arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "ragged_one_hot",
    "source_code": "@dispatch.dispatch_for_api(array_ops.one_hot)\ndef ragged_one_hot(indices: ragged_tensor.Ragged, depth, on_value=None, off_value=None, axis=None, dtype=None, name=None):\n    if isinstance(axis, int) and axis >= 0:\n        if axis <= indices.ragged_rank:\n            raise ValueError('axis (%d) must be greater than indices.ragged_rank (%d).' % (axis, indices.ragged_rank))\n        axis -= indices.ragged_rank\n    with ops.name_scope(name, 'RaggedOneHot', [indices, depth, on_value, off_value, axis]):\n        indices = ragged_tensor.convert_to_tensor_or_ragged_tensor(indices, name='indices')\n        return indices.with_flat_values(array_ops.one_hot(indices.flat_values, depth, on_value, off_value, axis, dtype, name))",
    "docstring": "Applies tf.one_hot along the values of a RaggedTensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_array_ops.py",
    "ast_data": "FunctionDef name:ragged_one_hot arg:indices arg:depth arg:on_value arg:off_value arg:axis arg:dtype arg:name arguments arg arg arg arg arg arg arg If BoolOp Call Compare If Compare Raise Call With Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "authlib",
    "name": "decrypt",
    "source_code": "def decrypt(self, ciphertext, aad, iv, tag, key):\n    self.check_iv(iv)\n    chacha = ChaCha20Poly1305(key)\n    return chacha.decrypt(iv, ciphertext + tag, aad)",
    "docstring": "Content Decryption with AEAD_CHACHA20_POLY1305. :param ciphertext: ciphertext in bytes :param aad: additional authenticated data in bytes :param iv: initialization vector in bytes :param tag: authentication tag in bytes :param key: encrypted key in bytes :return: message",
    "type": "method",
    "file_path": "authlib\\authlib\\jose\\drafts\\_jwe_enc_cryptography.py",
    "ast_data": "FunctionDef name:decrypt arg:self arg:ciphertext arg:aad arg:iv arg:tag arg:key arguments arg arg arg arg arg arg Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "scope",
    "source_code": "def scope(self, dep: MemoryDep) -> MemoryEstimate:\n    if self.inside_reduction and (self.has_reduction_var(dep.index) or dep.is_indirect()):\n        return self.loops[-1]\n    return self.outside_loop",
    "docstring": "Determine how a read/write should be categorized",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\simd_kernel_features.py",
    "ast_data": "FunctionDef name:scope arg:self arg:dep arguments arg arg If BoolOp BoolOp Call Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "pull_nightly_version",
    "source_code": "@timed('Pulling nightly PyTorch')\ndef pull_nightly_version(site_dir: Path) -> None:\n    nightly_version = _nightly_version(site_dir)\n    cmd = git('merge', nightly_version)\n    subprocess.check_call(cmd)",
    "docstring": "Fetches the nightly version and then merges it .",
    "type": "function",
    "file_path": "pytorch\\tools\\nightly.py",
    "ast_data": "FunctionDef name:pull_nightly_version arg:site_dir arguments arg Assign Call Assign Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_get_imagesave_wildcards",
    "source_code": "def _get_imagesave_wildcards(self):\n    default_filetype = self.get_default_filetype()\n    filetypes = self.get_supported_filetypes_grouped()\n    sorted_filetypes = sorted(filetypes.items())\n    wildcards = []\n    extensions = []\n    filter_index = 0\n    for i, (name, exts) in enumerate(sorted_filetypes):\n        ext_list = ';'.join(['*.%s' % ext for ext in exts])\n        extensions.append(exts[0])\n        wildcard = f'{name} ({ext_list})|{ext_list}'\n        if default_filetype in exts:\n            filter_index = i\n        wildcards.append(wildcard)\n    wildcards = '|'.join(wildcards)\n    return (wildcards, extensions, filter_index)",
    "docstring": "Return the wildcard string for the filesave dialog.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_wx.py",
    "ast_data": "FunctionDef name:_get_imagesave_wildcards arg:self arguments arg Assign Call Assign Call Assign Call Call Assign Assign Assign For Call Assign Call Call Assign If Compare Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "jvp",
    "source_code": "def jvp(self, primals, unconnected_gradients=UnconnectedGradients.NONE):\n    unconnected_gradients = UnconnectedGradients(unconnected_gradients)\n    if self._accumulator is None:\n        raise ValueError('Called jvp() without first tracing anything.')\n\n    def _fetch_jvp(tensor):\n        if hasattr(tensor, 'handle'):\n            unwrapped_tensor = ops.convert_to_tensor(tensor.handle)\n        else:\n            unwrapped_tensor = tensor\n        result = pywrap_tfe.TFE_Py_ForwardAccumulatorJVP(self._accumulator, unwrapped_tensor)\n        if result is None and unconnected_gradients == UnconnectedGradients.ZERO:\n            result = array_ops.zeros_like(tensor)\n        return result\n    return nest.map_structure(_fetch_jvp, primals)",
    "docstring": "Fetches the Jacobian-vector product computed for . Note that this method performs no computation, and simply looks up a JVP that was already computed (unlike backprop using a , where the computation happens on the call to ). Args: primals: A watched Tensor or structure of Tensors to fetch the JVPs for. unconnected_gradients: A value which can either hold 'none' or 'zero' and alters the value which will be returned if no JVP was computed for . The possible values and effects are detailed in 'tf.UnconnectedGradients' and it defaults to 'none'. Returns: Tensors with the same shapes and dtypes as , or None if no JVP is available.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\forwardprop.py",
    "ast_data": "FunctionDef name:jvp arg:self arg:primals arg:unconnected_gradients arguments arg arg arg Assign Call If Compare Raise Call FunctionDef name:_fetch_jvp arg:tensor arguments arg If Call Assign Call Assign Assign Call If BoolOp Compare Compare Assign Call Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "with_accounted_types",
    "source_code": "def with_accounted_types(self, account_type_regexes):\n    self._options['account_type_regexes'] = copy.copy(account_type_regexes)\n    return self",
    "docstring": "Selectively counting statistics based on node types. Here, 'types' means the profiler nodes' properties. Profiler by default consider device name (e.g. /job:xx/.../device:GPU:0) and operation type (e.g. MatMul) as profiler nodes' properties. User can also associate customized 'types' to profiler nodes through OpLogProto proto. For example, user can select profiler nodes placed on gpu:0 with: If none of a node's properties match the specified regexes, the node is not displayed nor accounted. Args: account_type_regexes: A list of regexes specifying the types. Returns: self.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\option_builder.py",
    "ast_data": "FunctionDef name:with_accounted_types arg:self arg:account_type_regexes arguments arg arg Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "calculate_qparams",
    "source_code": "@abstractmethod\ndef calculate_qparams(self) -> tuple[torch.Tensor, torch.Tensor]:\n    pass",
    "docstring": "Calculate quantization parameter based on the stats attached to the observer module and returns a tuple of scale and zero_point Tensor",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\observer.py",
    "ast_data": "FunctionDef name:calculate_qparams arg:self arguments arg"
  },
  {
    "library": "pygame",
    "name": "_clip_and_draw_aaline",
    "source_code": "def _clip_and_draw_aaline(surf, rect, color, line, blend):\n    if not clip_line(line, BoundingBox(rect.x - 1, rect.y - 1, rect.x + rect.w, rect.y + rect.h), use_float=True):\n        return\n    _draw_aaline(surf, color, Point(line[0], line[1]), Point(line[2], line[3]), blend)\n    return",
    "docstring": "draw anti-aliased line between two endpoints.",
    "type": "function",
    "file_path": "pygame\\src_py\\draw_py.py",
    "ast_data": "FunctionDef name:_clip_and_draw_aaline arg:surf arg:rect arg:color arg:line arg:blend arguments arg arg arg arg arg If Call Call Return return:no Call Call Call Return return:no"
  },
  {
    "library": "kornia",
    "name": "sigmoid_log_double_softmax",
    "source_code": "def sigmoid_log_double_softmax(sim: Tensor, z0: Tensor, z1: Tensor) -> Tensor:\n    b, m, n = sim.shape\n    certainties = F.logsigmoid(z0) + F.logsigmoid(z1).transpose(1, 2)\n    scores0 = F.log_softmax(sim, 2)\n    scores1 = F.log_softmax(sim.transpose(-1, -2).contiguous(), 2).transpose(-1, -2)\n    scores = sim.new_full((b, m + 1, n + 1), 0)\n    scores[:, :m, :n] = scores0 + scores1 + certainties\n    scores[:, :-1, -1] = F.logsigmoid(-z0.squeeze(-1))\n    scores[:, -1, :-1] = F.logsigmoid(-z1.squeeze(-1))\n    return scores",
    "docstring": "Create the log assignment matrix from logits and similarity.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\lightglue.py",
    "ast_data": "FunctionDef name:sigmoid_log_double_softmax arg:sim arg:z0 arg:z1 arguments arg arg arg Assign Assign Call Call Call Assign Call Assign Call Call Call Call Assign Call Assign Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "random",
    "source_code": "@classmethod\ndef random(cls, batch_size: Optional[int]=None, device: Optional[Device]=None, dtype: Optional[Dtype]=None) -> So2:\n    if batch_size is not None:\n        KORNIA_CHECK(batch_size >= 1, msg='batch_size must be positive')\n        real_data = rand((batch_size,), device=device, dtype=dtype)\n        imag_data = rand((batch_size,), device=device, dtype=dtype)\n    else:\n        real_data = rand((), device=device, dtype=dtype)\n        imag_data = rand((), device=device, dtype=dtype)\n    return cls(complex(real_data, imag_data))",
    "docstring": "Create a So2 group representing a random rotation. Args: batch_size: the batch size of the underlying data. device: device to place the result on. dtype: dtype of the result. Example: >>> s = So2.random() >>> s = So2.random(batch_size=3)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\so2.py",
    "ast_data": "FunctionDef name:random arg:cls arg:batch_size arg:device arg:dtype arguments arg arg arg arg If Compare Call Compare Assign Call Assign Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "is_smartquotable",
    "source_code": "def is_smartquotable(node: Node) -> bool:\n    for pnode in traverse_parent(node.parent):\n        if isinstance(pnode, NON_SMARTQUOTABLE_PARENT_NODES):\n            return False\n        if pnode.get('support_smartquotes', None) is False:\n            return False\n    return getattr(node, 'support_smartquotes', None) is not False",
    "docstring": "Check whether the node is smart-quotable or not.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\nodes.py",
    "ast_data": "FunctionDef name:is_smartquotable arg:node arguments arg For Call If Call Return return:yes If Compare Call Return return:yes Return return:yes Compare Call"
  },
  {
    "library": "kornia",
    "name": "project",
    "source_code": "def project(self, points: Vector3) -> Vector2:\n    return self.distortion.distort(self.params, self.projection.project(points))",
    "docstring": "Projects 3D points to 2D camera plane. Args: points: Vector3 representing 3D points. Returns: Vector2 representing the projected 2D points. Example: >>> points = Vector3(torch.Tensor([1.0, 1.0, 1.0])) >>> cam = CameraModel(ImageSize(480, 640), CameraModelType.PINHOLE, torch.Tensor([328., 328., 320., 240.])) >>> cam.project(points) x: 648.0 y: 568.0",
    "type": "method",
    "file_path": "kornia\\kornia\\sensors\\camera\\camera_model.py",
    "ast_data": "FunctionDef name:project arg:self arg:points arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "SessionInterface",
    "source_code": "class SessionInterface(object):\n\n    @property\n    def graph(self):\n        raise NotImplementedError('graph')\n\n    @property\n    def sess_str(self):\n        raise NotImplementedError('sess_str')\n\n    def run(self, fetches, feed_dict=None, options=None, run_metadata=None):\n        raise NotImplementedError('run')\n\n    def partial_run_setup(self, fetches, feeds=None):\n        raise NotImplementedError('partial_run_setup')\n\n    def partial_run(self, handle, fetches, feed_dict=None):\n        raise NotImplementedError('partial_run')",
    "docstring": "Base class for implementations of TensorFlow client sessions.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\client\\session.py",
    "ast_data": "ClassDef name:SessionInterface FunctionDef name:graph arg:self arguments arg Raise Call FunctionDef name:sess_str arg:self arguments arg Raise Call FunctionDef name:run arg:self arg:fetches arg:feed_dict arg:options arg:run_metadata arguments arg arg arg arg arg Raise Call FunctionDef name:partial_run_setup arg:self arg:fetches arg:feeds arguments arg arg arg Raise Call FunctionDef name:partial_run arg:self arg:handle arg:fetches arg:feed_dict arguments arg arg arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "is_pruned",
    "source_code": "def is_pruned(module):\n    for _, submodule in module.named_modules():\n        for hook in submodule._forward_pre_hooks.values():\n            if isinstance(hook, BasePruningMethod):\n                return True\n    return False",
    "docstring": "Check if a module is pruned by looking for pruning pre-hooks. Check whether `BasePruningMethod` is pruned. Examples: >>> from torch.nn.utils import prune >>> m = nn.Linear(5, 7) >>> print(prune.is_pruned(m)) False >>> prune.random_unstructured(m, name='weight', amount=0.2) >>> print(prune.is_pruned(m)) True",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\utils\\prune.py",
    "ast_data": "FunctionDef name:is_pruned arg:module arguments arg For Call For Call If Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "non_debug_graph_def",
    "source_code": "@property\ndef non_debug_graph_def(self):\n    self._reconstruct_non_debug_graph_def()\n    return self._non_debug_graph_def",
    "docstring": "The GraphDef without the Copy* and Debug* nodes added by the debugger.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_graphs.py",
    "ast_data": "FunctionDef name:non_debug_graph_def arg:self arguments arg Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_DataFrameTableBuilder",
    "source_code": "class _DataFrameTableBuilder(_TableBuilderAbstract):\n\n    def __init__(self, *, info: DataFrameInfo) -> None:\n        self.info: DataFrameInfo = info\n\n    def get_lines(self) -> list[str]:\n        self._lines = []\n        if self.col_count == 0:\n            self._fill_empty_info()\n        else:\n            self._fill_non_empty_info()\n        return self._lines\n\n    def _fill_empty_info(self) -> None:\n        self.add_object_type_line()\n        self.add_index_range_line()\n        self._lines.append(f'Empty {type(self.data).__name__}\\n')\n\n    @abstractmethod\n    def _fill_non_empty_info(self) -> None:\n        pass\n\n    @property\n    def data(self) -> DataFrame:\n        return self.info.data\n\n    @property\n    def ids(self) -> Index:\n        return self.info.ids\n\n    @property\n    def col_count(self) -> int:\n        return self.info.col_count\n\n    def add_memory_usage_line(self) -> None:\n        self._lines.append(f'memory usage: {self.memory_usage_string}')",
    "docstring": "Abstract builder for dataframe info table. Parameters ---------- info : DataFrameInfo. Instance of DataFrameInfo.",
    "type": "class",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "ClassDef name:_DataFrameTableBuilder FunctionDef name:__init__ arg:self arguments arg arg FunctionDef name:get_lines arg:self arguments arg Assign If Compare Call Call Return return:yes FunctionDef name:_fill_empty_info arg:self arguments arg Call Call Call Call FunctionDef name:_fill_non_empty_info arg:self arguments arg FunctionDef name:data arg:self arguments arg Return return:yes FunctionDef name:ids arg:self arguments arg Return return:yes FunctionDef name:col_count arg:self arguments arg Return return:yes FunctionDef name:add_memory_usage_line arg:self arguments arg Call"
  },
  {
    "library": "sphinx",
    "name": "isclassmethod",
    "source_code": "def isclassmethod(obj: Any, cls: Any=None, name: str | None=None) -> TypeIs[classmethod[Any, Any, Any]]:\n    if isinstance(obj, classmethod):\n        return True\n    if ismethod(obj) and obj.__self__ is not None and isclass(obj.__self__):\n        return True\n    if cls and name:\n        sentinel = object()\n        for basecls in getmro(cls):\n            meth = basecls.__dict__.get(name, sentinel)\n            if meth is not sentinel:\n                return isclassmethod(meth)\n    return False",
    "docstring": "Check if the object is a :class:.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\inspect.py",
    "ast_data": "FunctionDef name:isclassmethod arg:obj arg:cls arg:name arguments arg arg arg If Call Return return:yes If BoolOp Call Compare Call Return return:yes If BoolOp Assign Call For Call Assign Call If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "halt_ordering",
    "source_code": "@deprecated('`halt_ordering` is deprecated, you can safely remove this call.', category=FutureWarning)\ndef halt_ordering():\n    pass",
    "docstring": "Deprecated interface to temporarily disable ordering.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\unification\\multipledispatch\\dispatcher.py",
    "ast_data": "FunctionDef name:halt_ordering arguments Call"
  },
  {
    "library": "pytorch",
    "name": "AutoUnset",
    "source_code": "class AutoUnset(enum.Enum):\n    token = 0",
    "docstring": "The identity element of our semilattice, a generic \"don't know\" element that is always subsumed when we get more information.",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\pgo.py",
    "ast_data": "ClassDef name:AutoUnset Assign"
  },
  {
    "library": "tensorflow",
    "name": "_get_device_name",
    "source_code": "@staticmethod\ndef _get_device_name(handle):\n    handle_str = compat.as_str_any(handle)\n    return pydev.canonical_name(handle_str.split(';')[-1])",
    "docstring": "The device name encoded in the handle.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\session_ops.py",
    "ast_data": "FunctionDef name:_get_device_name arg:handle arguments arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "QuantizationAnnotation",
    "source_code": "@dataclass\nclass QuantizationAnnotation:\n    input_qspec_map: dict[Node, Optional[QuantizationSpecBase]] = field(default_factory=dict)\n    output_qspec: Optional[QuantizationSpecBase] = None\n    allow_implicit_sharing: bool = True\n    _annotated: bool = False",
    "docstring": "How are input arguemnt or output should be quantized, expressed as QuantizationSpec, this corresponds to how a Tensor in the operator Graph is observed (PTQ) or fake quantized (QAT)",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\quantizer.py",
    "ast_data": "ClassDef name:QuantizationAnnotation Call"
  },
  {
    "library": "matplotlib",
    "name": "_on_motion",
    "source_code": "def _on_motion(self, event):\n    event.Skip()\n    MouseEvent('motion_notify_event', self, *self._mpl_coords(event), buttons=self._mpl_buttons(), modifiers=self._mpl_modifiers(event), guiEvent=event)._process()",
    "docstring": "Start measuring on an axis.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_wx.py",
    "ast_data": "FunctionDef name:_on_motion arg:self arg:event arguments arg arg Call Call Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_read_structure",
    "source_code": "def _read_structure(f, array_desc, struct_desc):\n    nrows = array_desc['nelements']\n    columns = struct_desc['tagtable']\n    dtype = []\n    for col in columns:\n        if col['structure'] or col['array']:\n            dtype.append(((col['name'].lower(), col['name']), np.object_))\n        elif col['typecode'] in DTYPE_DICT:\n            dtype.append(((col['name'].lower(), col['name']), DTYPE_DICT[col['typecode']]))\n        else:\n            raise Exception(f'Variable type {col['typecode']} not implemented')\n    structure = np.rec.recarray((nrows,), dtype=dtype)\n    for i in range(nrows):\n        for col in columns:\n            dtype = col['typecode']\n            if col['structure']:\n                structure[col['name']][i] = _read_structure(f, struct_desc['arrtable'][col['name']], struct_desc['structtable'][col['name']])\n            elif col['array']:\n                structure[col['name']][i] = _read_array(f, dtype, struct_desc['arrtable'][col['name']])\n            else:\n                structure[col['name']][i] = _read_data(f, dtype)\n    if array_desc['ndims'] > 1:\n        dims = array_desc['dims'][:int(array_desc['ndims'])]\n        dims.reverse()\n        structure = structure.reshape(dims)\n    return structure",
    "docstring": "Read a structure, with the array and structure descriptors given as and respectively.",
    "type": "function",
    "file_path": "scipy\\scipy\\io\\_idl.py",
    "ast_data": "FunctionDef name:_read_structure arg:f arg:array_desc arg:struct_desc arguments arg arg arg Assign Assign Assign For If BoolOp Call Call If Compare Call Call Raise Call Assign Call For Call For Assign If Assign Call If Assign Call Assign Call If Compare Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_aggregate_with_numba",
    "source_code": "@final\ndef _aggregate_with_numba(self, func, *args, engine_kwargs=None, **kwargs):\n    data = self._obj_with_exclusions\n    df = data if data.ndim == 2 else data.to_frame()\n    starts, ends, sorted_index, sorted_data = self._numba_prep(df)\n    numba_.validate_udf(func)\n    args, kwargs = prepare_function_arguments(func, args, kwargs, num_required_args=2)\n    numba_agg_func = numba_.generate_numba_agg_func(func, **get_jit_arguments(engine_kwargs))\n    result = numba_agg_func(sorted_data, sorted_index, starts, ends, len(df.columns), *args)\n    index = self._grouper.result_index\n    if data.ndim == 1:\n        result_kwargs = {'name': data.name}\n        result = result.ravel()\n    else:\n        result_kwargs = {'columns': data.columns}\n    res = data._constructor(result, index=index, **result_kwargs)\n    if not self.as_index:\n        res = self._insert_inaxis_grouper(res)\n        res.index = default_index(len(res))\n    return res",
    "docstring": "Perform groupby aggregation routine with the numba engine. This routine mimics the data splitting routine of the DataSplitter class to generate the indices of each group in the sorted data and then passes the data and indices into a Numba jitted function.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\groupby\\groupby.py",
    "ast_data": "FunctionDef name:_aggregate_with_numba arg:self arg:func arguments arg arg arg arg arg Assign Assign Compare Call Assign Call Call Assign Call Assign Call Call Assign Call Call Assign If Compare Assign Assign Call Assign Assign Call If Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "autoscale",
    "source_code": "def autoscale(self):\n    self._colorizer.autoscale(self._A)",
    "docstring": "Autoscale the scalar limits on the norm instance using the current array",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colorizer.py",
    "ast_data": "FunctionDef name:autoscale arg:self arguments arg Call"
  },
  {
    "library": "sphinx",
    "name": "NoUri",
    "source_code": "class NoUri(Exception):\n    pass",
    "docstring": "Raised by builder.get_relative_uri() or from missing-reference handlers if there is no URI available.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\errors.py",
    "ast_data": "ClassDef name:NoUri"
  },
  {
    "library": "scikit-learn",
    "name": "_return_float_dtype",
    "source_code": "def _return_float_dtype(X, Y):\n    if not issparse(X) and (not isinstance(X, np.ndarray)):\n        X = np.asarray(X)\n    if Y is None:\n        Y_dtype = X.dtype\n    elif not issparse(Y) and (not isinstance(Y, np.ndarray)):\n        Y = np.asarray(Y)\n        Y_dtype = Y.dtype\n    else:\n        Y_dtype = Y.dtype\n    if X.dtype == Y_dtype == np.float32:\n        dtype = np.float32\n    else:\n        dtype = float\n    return (X, Y, dtype)",
    "docstring": "1. If dtype of X and Y is float32, then dtype float32 is returned. 2. Else dtype float is returned.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\metrics\\pairwise.py",
    "ast_data": "FunctionDef name:_return_float_dtype arg:X arg:Y arguments arg arg If BoolOp Call Call Assign Call If Compare Assign If BoolOp Call Call Assign Call Assign Assign If Compare Assign Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "feature_is_supported",
    "source_code": "@_Cache.me\ndef feature_is_supported(self, name, force_flags=None, macros=[]):\n    assert name.isupper()\n    assert force_flags is None or isinstance(force_flags, list)\n    supported = name in self.feature_supported\n    if supported:\n        for impl in self.feature_implies(name):\n            if not self.feature_test(impl, force_flags, macros=macros):\n                return False\n        if not self.feature_test(name, force_flags, macros=macros):\n            return False\n    return supported",
    "docstring": "Check if a certain CPU feature is supported by the platform and compiler. Parameters ---------- name : str CPU feature name in uppercase. force_flags : list or None, optional If None(default), default compiler flags for every CPU feature will be used during test. macros : list of tuples, optional A list of C macro definitions.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py",
    "ast_data": "FunctionDef name:feature_is_supported arg:self arg:name arg:force_flags arg:macros arguments arg arg arg arg Call BoolOp Compare Call Assign Compare If For Call If Call Return return:yes If Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "from_str",
    "source_code": "@classmethod\ndef from_str(cls, vm: str) -> Union['Std', dict[int, 'Std']]:\n\n    def to_std(v: str) -> Std:\n        s = Std(int(v))\n        if s in Std:\n            return s\n    if re.match(_VALUE_REGEX, vm):\n        return to_std(vm)\n    elif re.match(_MAPPING_REGEX, vm):\n        d: dict[int, Std] = {}\n        for m in vm.split(','):\n            i, v = m.split(':')\n            d[int(i)] = to_std(v)\n        return d\n    else:\n        raise ValueError(f'{vm} does not match: <{_VALUE_REGEX}> or <{_MAPPING_REGEX}>')",
    "docstring": "Example: :: from_str(\"0\") -> Std.NONE from_str(\"1\") -> Std.OUT from_str(\"0:3,1:0,2:1,3:2\") -> {0: Std.ALL, 1: Std.NONE, 2: Std.OUT, 3: Std.ERR} Any other input raises an exception",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\multiprocessing\\api.py",
    "ast_data": "FunctionDef name:from_str arg:cls arg:vm arguments arg arg FunctionDef name:to_std arg:v arguments arg Assign Call Call If Compare Return return:yes If Call Return return:yes Call If Call For Call Assign Call Assign Call Call Return return:yes Raise Call"
  },
  {
    "library": "django",
    "name": "ask_rename_model",
    "source_code": "def ask_rename_model(self, old_model_state, new_model_state):\n    msg = 'Was the model %s.%s renamed to %s? [y/N]'\n    return self._boolean_input(msg % (old_model_state.app_label, old_model_state.name, new_model_state.name), False)",
    "docstring": "Was this model really renamed?",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\questioner.py",
    "ast_data": "FunctionDef name:ask_rename_model arg:self arg:old_model_state arg:new_model_state arguments arg arg arg Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_compute_task_and_cores_to_replicas",
    "source_code": "def _compute_task_and_cores_to_replicas(core_assignment, topology):\n    task_and_cores_to_replicas = {}\n    for replica in range(core_assignment.shape[0]):\n        for logical_core in range(core_assignment.shape[1]):\n            coordinates = core_assignment[replica, logical_core, :]\n            task_id = topology.task_ordinal_at_coordinates(coordinates)\n            if task_id not in task_and_cores_to_replicas:\n                task_and_cores_to_replicas[task_id] = {}\n            if logical_core not in task_and_cores_to_replicas[task_id]:\n                task_and_cores_to_replicas[task_id][logical_core] = set()\n            task_and_cores_to_replicas[task_id][logical_core].add(replica)\n    task_to_sorted_replica_id = {}\n    for task, core_to_replicas in task_and_cores_to_replicas.items():\n        core_to_sorted_replicas = {}\n        for core, replicas in core_to_replicas.items():\n            core_to_sorted_replicas[core] = sorted(replicas)\n        task_to_sorted_replica_id[task] = core_to_sorted_replicas\n    return task_to_sorted_replica_id",
    "docstring": "Computes a nested dict which maps task and logical core to replicas.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\device_assignment.py",
    "ast_data": "FunctionDef name:_compute_task_and_cores_to_replicas arg:core_assignment arg:topology arguments arg arg Assign For Call For Call Assign Assign Call If Compare Assign If Compare Assign Call Call Assign For Call Assign For Call Assign Call Assign Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "RemoveTranslatableInline",
    "source_code": "class RemoveTranslatableInline(SphinxTransform):\n    default_priority = 999\n\n    def apply(self, **kwargs: Any) -> None:\n        from sphinx.builders.gettext import MessageCatalogBuilder\n        if isinstance(self.app.builder, MessageCatalogBuilder):\n            return\n        matcher = NodeMatcher(nodes.inline, translatable=Any)\n        for inline in matcher.findall(self.document):\n            inline.parent.remove(inline)\n            inline.parent += inline.children",
    "docstring": "Remove inline nodes used for translation as placeholders.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\transforms\\i18n.py",
    "ast_data": "ClassDef name:RemoveTranslatableInline Assign FunctionDef name:apply arg:self arguments arg arg If Call Return return:no Assign Call For Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, sess, thread_name_filter=None, pass_through_operrors=False):\n    _check_type(sess, (session.BaseSession, monitored_session.MonitoredSession))\n    self._sess = sess\n    self._thread_name_filter_pattern = re.compile(thread_name_filter) if thread_name_filter else None\n    self._pass_through_operrors = pass_through_operrors\n    self._run_call_count = 0\n    response = self.on_session_init(OnSessionInitRequest(self._sess))\n    _check_type(response, OnSessionInitResponse)\n    if response.action == OnSessionInitAction.PROCEED:\n        pass\n    elif response.action == OnSessionInitAction.REMOTE_INSTR_LOOP:\n        raise NotImplementedError('OnSessionInitAction REMOTE_INSTR_LOOP has not been implemented.')\n    else:\n        raise ValueError('Invalid OnSessionInitAction value: %s' % response.action)\n    self._default_session_context_manager = None\n    self._cached_callables_from_options = {}",
    "docstring": "Constructor of . Args: sess: An (unwrapped) TensorFlow session instance. It should be a subtype of or . thread_name_filter: Regular-expression filter (allowlist) for name(s) of thread(s) on which the wrapper session will be active. This regular expression is used in a start-anchored fashion on the thread name, i.e., by applying the method of the compiled pattern. The default means that the wrapper session will be active on all threads. E.g., r\"MainThread$\", r\"QueueRunnerThread.*\". pass_through_operrors: If True, all captured OpErrors will be propagated. By default this captures all OpErrors. Raises: ValueError: On invalid value. NotImplementedError: If a non-DirectSession sess object is received.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\framework.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:sess arg:thread_name_filter arg:pass_through_operrors arguments arg arg arg arg Call Assign Assign Call Assign Assign Assign Call Call Call If Compare If Compare Raise Call Raise Call Assign Assign"
  },
  {
    "library": "scikit-learn",
    "name": "AgglomerationTransform",
    "source_code": "class AgglomerationTransform(TransformerMixin):\n\n    def transform(self, X):\n        check_is_fitted(self)\n        X = validate_data(self, X, reset=False)\n        if self.pooling_func == np.mean and (not issparse(X)):\n            size = np.bincount(self.labels_)\n            n_samples = X.shape[0]\n            nX = np.array([np.bincount(self.labels_, X[i, :]) / size for i in range(n_samples)])\n        else:\n            nX = [self.pooling_func(X[:, self.labels_ == l], axis=1) for l in np.unique(self.labels_)]\n            nX = np.array(nX).T\n        return nX\n\n    def inverse_transform(self, X):\n        check_is_fitted(self)\n        unil, inverse = np.unique(self.labels_, return_inverse=True)\n        return X[..., inverse]",
    "docstring": "A class for feature agglomeration via the transform interface.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\cluster\\_feature_agglomeration.py",
    "ast_data": "ClassDef name:AgglomerationTransform FunctionDef name:transform arg:self arg:X arguments arg arg Call Assign Call If BoolOp Compare Call Assign Call Assign Assign Call Call Call Assign Call Compare Call Assign Call Return return:yes FunctionDef name:inverse_transform arg:self arg:X arguments arg arg Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_build_info",
    "source_code": "@tf_export('sysconfig.get_build_info')\ndef get_build_info():\n    return build_info.build_info",
    "docstring": "Get a dictionary describing TensorFlow's build environment. Values are generated when TensorFlow is compiled, and are static for each TensorFlow package. The return value is a dictionary with string keys such as: - cuda_version - cudnn_version - is_cuda_build - is_rocm_build - msvcp_dll_names - nvcuda_dll_name - cudart_dll_name - cudnn_dll_name Note that the actual keys and values returned by this function is subject to change across different versions of TensorFlow or across platforms. Returns: A Dictionary describing TensorFlow's build environment.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\platform\\sysconfig.py",
    "ast_data": "FunctionDef name:get_build_info arguments Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n    validate_data(self, X, accept_sparse='csr')\n    return self",
    "docstring": "Only validates estimator's parameters. This method allows to: (i) validate the estimator's parameters and (ii) be consistent with the scikit-learn transformer API. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data. y : None Ignored. Returns ------- self : object Fitted transformer.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "pending_xref",
    "source_code": "class pending_xref(nodes.Inline, nodes.Element):\n    child_text_separator = ''",
    "docstring": "Node for cross-references that cannot be resolved without complete information about all documents. These nodes are resolved before writing output, in BuildEnvironment.resolve_references.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\addnodes.py",
    "ast_data": "ClassDef name:pending_xref Assign"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, functions, inference_args, input_tangents, tape_watching):\n    self._functions = functions\n    self._inference_args = inference_args\n    self._input_tangents = input_tangents\n    self._tape_watching = tape_watching",
    "docstring": "Collects information about the function call. Args: functions: An object which produces forward and backward functions, either a _DelayedRewriteGradientFunctions or a _TapeGradientFunctions object. inference_args: A flat list of Tensors, arguments to the inference function. input_tangents: A flat list of Tensors, jvps associated with . tape_watching: Boolean, with True indicating that recording is necessary.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:functions arg:inference_args arg:input_tangents arg:tape_watching arguments arg arg arg arg arg Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, num_input_pipelines=1, input_pipeline_id=0, num_replicas_in_sync=1):\n    self._num_input_pipelines = num_input_pipelines\n    self._input_pipeline_id = input_pipeline_id\n    self._num_replicas_in_sync = num_replicas_in_sync",
    "docstring": "Initializes an InputContext object. Args: num_input_pipelines: the number of input pipelines in a cluster. input_pipeline_id: the current input pipeline id, should be an int in [0,). num_replicas_in_sync: the number of replicas that are in sync.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:num_input_pipelines arg:input_pipeline_id arg:num_replicas_in_sync arguments arg arg arg arg Assign Assign Assign"
  },
  {
    "library": "scikit-learn",
    "name": "_identity",
    "source_code": "def _identity(X):\n    return X",
    "docstring": "The identity function.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_function_transformer.py",
    "ast_data": "FunctionDef name:_identity arg:X arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "format_import",
    "source_code": "def format_import(self, source_module_name, source_name, dest_name):\n    if self._lazy_loading:\n        return \"  '%s': ('%s', '%s'),\" % (dest_name, source_module_name, source_name)\n    elif source_module_name:\n        if source_name == dest_name:\n            return 'from %s import %s' % (source_module_name, source_name)\n        else:\n            return 'from %s import %s as %s' % (source_module_name, source_name, dest_name)\n    elif source_name == dest_name:\n        return 'import %s' % source_name\n    else:\n        return 'import %s as %s' % (source_name, dest_name)",
    "docstring": "Formats import statement. Args: source_module_name: (string) Source module to import from. source_name: (string) Source symbol name to import. dest_name: (string) Destination alias name. Returns: An import statement string.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\api\\generator\\create_python_api.py",
    "ast_data": "FunctionDef name:format_import arg:self arg:source_module_name arg:source_name arg:dest_name arguments arg arg arg arg If Return return:yes If If Compare Return return:yes Return return:yes If Compare Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ignore_fresh_unbacked_symbols",
    "source_code": "@contextmanager\ndef ignore_fresh_unbacked_symbols(self) -> Iterator[None]:\n    prev = self._ignore_fresh_unbacked_symbols_set(True)\n    try:\n        yield\n    finally:\n        self._ignore_fresh_unbacked_symbols_set(prev)",
    "docstring": "Indicates that the newly allocated unbacked SymInts are being discarded",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:ignore_fresh_unbacked_symbols arg:self arguments arg Assign Call Try Call"
  },
  {
    "library": "pytorch",
    "name": "cache_info",
    "source_code": "@classmethod\ndef cache_info(cls) -> DispatchCacheInfo:\n    return DispatchCacheInfo(FakeTensorMode.cache_hits, FakeTensorMode.cache_misses, dict(FakeTensorMode.cache_bypasses), len(FakeTensorMode.cache))",
    "docstring": "Query the state of the dispatch cache.",
    "type": "method",
    "file_path": "pytorch\\torch\\_subclasses\\fake_tensor.py",
    "ast_data": "FunctionDef name:cache_info arg:cls arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "is_initialized",
    "source_code": "def is_initialized(self, name=None):\n    return gen_resource_variable_ops.var_is_initialized_op(self.handle, name)",
    "docstring": "Checks whether a resource variable has been initialized. Outputs boolean scalar indicating whether the tensor has been initialized. Args: name: A name for the operation (optional). Returns: A of type .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:is_initialized arg:self arg:name arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "tree_leaves_with_path",
    "source_code": "def tree_leaves_with_path(tree: PyTree, is_leaf: Optional[Callable[[PyTree], bool]]=None) -> list[tuple[KeyPath, Any]]:\n    return list(_generate_key_paths((), tree, is_leaf))",
    "docstring": "Gets the leaves of a pytree like `tree_flatten_with_path_fnregister_pytree_nodeTrue`, the whole subtree being treated as a leaf. Otherwise, the default pytree registry will be used to determine a node is a leaf or not. If the function is not specified, the default pytree registry will be used. Returns: A list of (key path, leaf) pairs.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_pytree.py",
    "ast_data": "FunctionDef name:tree_leaves_with_path arg:tree arg:is_leaf arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_trace_id",
    "source_code": "def get_trace_id(self):\n    if self.profiler is None:\n        return None\n    return self.profiler.trace_id",
    "docstring": "Returns the current trace ID.",
    "type": "method",
    "file_path": "pytorch\\torch\\profiler\\profiler.py",
    "ast_data": "FunctionDef name:get_trace_id arg:self arguments arg If Compare Return return:no Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "is_valid_experiment_name",
    "source_code": "def is_valid_experiment_name(experiment_name: str) -> bool:\n    valid_char_regex = '^[a-zA-Z0-9]([\\\\w-]*[a-zA-Z0-9])?$'\n    valid = bool(re.match(valid_char_regex, experiment_name))\n    if valid:\n        return True\n    log.error(f\"Invalid experiment name: {experiment_name}. Experiment names should only contain alphanumeric characters, '_', and '-'. They cannot contain spaces, and the special characters '_' and '-' cannot be the first or last characters.\")\n    return False",
    "docstring": "Check if the experiment name is valid. A valid name: - Contains only alphanumeric characters and the special characters \"_\" & \"-\" - The special characters \"_\" & \"-\" shouldn't be the first or last characters - Cannot contain spaces",
    "type": "function",
    "file_path": "pytorch\\.github\\scripts\\runner_determinator.py",
    "ast_data": "FunctionDef name:is_valid_experiment_name arg:experiment_name arguments arg Assign Assign Call Call If Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "comment_on_gh",
    "source_code": "def comment_on_gh(self, comment):\n    with tempfile.NamedTemporaryFile(mode='w', delete=False) as f:\n        f.write(comment)\n        filename = f.name\n    issue_number = '93794'\n    if self.args.dtypes[0] == 'float32':\n        issue_number = '93518'\n    subprocess.check_call([self.args.dashboard_gh_cli_path, 'issue', 'comment', '--repo=https://github.com/pytorch/pytorch.git', issue_number, '-F', filename])\n    os.remove(filename)",
    "docstring": "Send a commment to dashboard",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\dynamo\\runner.py",
    "ast_data": "FunctionDef name:comment_on_gh arg:self arg:comment arguments arg arg With Call Call Assign Assign If Compare Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "on_core_metadata_event",
    "source_code": "def on_core_metadata_event(self, event):\n    raise NotImplementedError('on_core_metadata_event() is not implemented in the base servicer class')",
    "docstring": "Callback for core metadata. Args: event: The Event proto that carries a JSON string in its field. Returns: or an proto to be sent back to the client. If , an proto construct with the default no-arg constructor will be sent back to the client.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\grpc_debug_server.py",
    "ast_data": "FunctionDef name:on_core_metadata_event arg:self arg:event arguments arg arg Raise Call"
  },
  {
    "library": "scrapy",
    "name": "process_chain",
    "source_code": "def process_chain(callbacks: Iterable[Callable[Concatenate[_T, _P], _T]], input: _T, *a: _P.args, **kw: _P.kwargs) -> Deferred[_T]:\n    d: Deferred[_T] = Deferred()\n    for x in callbacks:\n        d.addCallback(x, *a, **kw)\n    d.callback(input)\n    return d",
    "docstring": "Return a Deferred built by chaining the given callbacks",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\defer.py",
    "ast_data": "FunctionDef name:process_chain arg:callbacks arg:input arguments arg arg arg arg Call For Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "choose_qparams_per_token",
    "source_code": "@impl(quantized_decomposed_lib, 'choose_qparams_per_token', 'CompositeExplicitAutograd')\ndef choose_qparams_per_token(input: torch.Tensor, dtype: torch.dtype) -> tuple[torch.Tensor, torch.Tensor]:\n    scales = input.abs().amax(dim=-1, keepdim=True)\n    if scales.dtype == torch.float16:\n        scales = scales.float()\n    if dtype == torch.int8:\n        n_bits = 8\n        quant_max = 2 ** (n_bits - 1) - 1\n    else:\n        raise Exception(f'unsupported dtype in choose_qparams_per_token: {dtype}')\n    scales = scales.clamp(min=1e-05).div(quant_max)\n    zero_points = torch.zeros_like(scales)\n    return (scales, zero_points)",
    "docstring": "Choose quantization parameters for per token quantization. This means for a N dimension Tensor (M1, M2, ...Mn, N), we calculate scales/zero_points for each N elements and quantize every N elements with the same quantization parameter. The dimension for scales/zero_points will be (M1 * M2 ... * Mn) Args: input (torch.Tensor): original float32/float16 Tensor dtype (torch.dtype): dtype (e.g. torch.uint8) for input Tensor Returns: scales and zero_points, both float32 Tensors",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_decomposed.py",
    "ast_data": "FunctionDef name:choose_qparams_per_token arg:input arg:dtype arguments arg arg Assign Call Call If Compare Assign Call If Compare Assign Assign Raise Call Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "integrate_gaussian",
    "source_code": "def integrate_gaussian(self, mean, cov):\n    mean = atleast_1d(squeeze(mean))\n    cov = atleast_2d(cov)\n    if mean.shape != (self.d,):\n        raise ValueError(f'mean does not have dimension {self.d}')\n    if cov.shape != (self.d, self.d):\n        raise ValueError(f'covariance does not have dimension {self.d}')\n    mean = mean[:, newaxis]\n    sum_cov = self.covariance + cov\n    sum_cov_chol = linalg.cho_factor(sum_cov)\n    diff = self.dataset - mean\n    tdiff = linalg.cho_solve(sum_cov_chol, diff)\n    sqrt_det = np.prod(np.diagonal(sum_cov_chol[0]))\n    norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det\n    energies = np_vecdot(diff, tdiff, axis=0) / 2.0\n    result = np_vecdot(exp(-energies), self.weights, axis=0) / norm_const\n    return result",
    "docstring": "Multiply estimated density by a multivariate Gaussian and integrate over the whole space. Parameters ---------- mean : aray_like A 1-D array, specifying the mean of the Gaussian. cov : array_like A 2-D array, specifying the covariance matrix of the Gaussian. Returns ------- result : scalar The value of the integral. Raises ------ ValueError If the mean or covariance of the input Gaussian differs from the KDE's dimensionality.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_kde.py",
    "ast_data": "FunctionDef name:integrate_gaussian arg:self arg:mean arg:cov arguments arg arg arg Assign Call Call Assign Call If Compare Raise Call If Compare Raise Call Assign Assign Assign Call Assign Assign Call Assign Call Call Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "state_dict",
    "source_code": "def state_dict(self) -> dict[str, Any]:\n    state_dict = self._optimizer.state_dict()\n    param_groups = state_dict['param_groups']\n    ret_state = {self.ordered_param_keys[st_key]: state_val for st_key, state_val in state_dict['state'].items()}\n    ret_groups = []\n    for group in param_groups:\n        param_keys = [self.ordered_param_keys[param] for param in group['params']]\n        ret_group = {'params': sorted(param_keys)}\n        for k, v in group.items():\n            if k != 'params':\n                ret_group[k] = deepcopy(v)\n        ret_groups.append(ret_group)\n    return self._post_state_dict({'state': ret_state, 'param_groups': ret_groups})",
    "docstring": "Return the `` of the optimizer. Instead of using number to index parameters, we will use module fully qualified name (FQN) as the key.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\optim\\named_optimizer.py",
    "ast_data": "FunctionDef name:state_dict arg:self arguments arg Assign Call Assign Assign Call Assign For Assign Assign Call For Call If Compare Assign Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_local_reachability_density",
    "source_code": "def _local_reachability_density(self, distances_X, neighbors_indices):\n    dist_k = self._distances_fit_X_[neighbors_indices, self.n_neighbors_ - 1]\n    reach_dist_array = np.maximum(distances_X, dist_k)\n    return 1.0 / (np.mean(reach_dist_array, axis=1) + 1e-10)",
    "docstring": "The local reachability density (LRD) The LRD of a sample is the inverse of the average reachability distance of its k-nearest neighbors. Parameters ---------- distances_X : ndarray of shape (n_queries, self.n_neighbors) Distances to the neighbors (in the training samples ) of each query point to compute the LRD. neighbors_indices : ndarray of shape (n_queries, self.n_neighbors) Neighbors indices (of each query point) among training samples self._fit_X. Returns ------- local_reachability_density : ndarray of shape (n_queries,) The local reachability density of each sample.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neighbors\\_lof.py",
    "ast_data": "FunctionDef name:_local_reachability_density arg:self arg:distances_X arg:neighbors_indices arguments arg arg arg Assign Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "IntGauge",
    "source_code": "class IntGauge(Metric):\n    __slots__ = []\n\n    def __init__(self, name, description, *labels):\n        super(IntGauge, self).__init__('IntGauge', _int_gauge_methods, len(labels), name, description, *labels)\n\n    def get_cell(self, *labels):\n        return IntGaugeCell(super(IntGauge, self).get_cell(*labels))",
    "docstring": "A stateful class for updating a gauge-like integer metric. This class encapsulates a set of integer values (or a single value for a label-less metric). Each value is identified by a tuple of labels. The class allows the user to set each value.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py",
    "ast_data": "ClassDef name:IntGauge Assign FunctionDef name:__init__ arg:self arg:name arg:description arguments arg arg arg arg Call Call Call FunctionDef name:get_cell arg:self arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "gen_consistency_constraints",
    "source_code": "def gen_consistency_constraints(constraint: Constraint, counter: int):\n    all_constraints = []\n    for i in range(1, MAX_TENSOR_RANK + 1):\n        new_dims_rhs_1, counter = gen_tensor_dims(i, counter)\n        new_dims_rhs_2, counter = gen_tensor_dims(i, counter)\n        nat_constraints = gen_nat_constraints(new_dims_rhs_1 + new_dims_rhs_2)\n        c_tensor_i = Conj([BinConstraintT(constraint.lhs, TensorType(new_dims_rhs_1), op_eq), BinConstraintT(constraint.rhs, TensorType(new_dims_rhs_2), op_eq)] + [BinConstraintD(d1, d2, op_consistency) for d1, d2 in zip(new_dims_rhs_1, new_dims_rhs_2)] + nat_constraints)\n        all_constraints.append(c_tensor_i)\n    return (all_constraints, counter)",
    "docstring": "Args: constraint: Consistency constraint on tensors counter: for variable tracking Returns: Equality and consistency constraints on dimensions",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_transformation.py",
    "ast_data": "FunctionDef name:gen_consistency_constraints arg:constraint arg:counter arguments arg arg Assign For Call Assign Call Assign Call Assign Call Assign Call Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "scale",
    "source_code": "def scale(self, xscale, yscale):\n    for c in self._cells.values():\n        c.set_width(c.get_width() * xscale)\n        c.set_height(c.get_height() * yscale)",
    "docstring": "Scale column widths by *xscale* and row heights by *yscale*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\table.py",
    "ast_data": "FunctionDef name:scale arg:self arg:xscale arg:yscale arguments arg arg arg For Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "set_default_tensor_type",
    "source_code": "def set_default_tensor_type(t: _Union[type['torch.Tensor'], str], /) -> None:\n    if isinstance(t, str):\n        t = _import_dotted_name(t)\n    _C._set_default_tensor_type(t)",
    "docstring": ".. warning:: This function is deprecated as of PyTorch 2.1, please use :func: and :func: as alternatives. Sets the default `torch.tensor`. Args: t (type or string): the floating point tensor type or its name Example:: >>> # xdoctest: +SKIP(\"Other tests may have changed the default type. Can we reset it?\") >>> torch.tensor([1.2, 3]).dtype # initial default for floating point is torch.float32 torch.float32 >>> torch.set_default_tensor_type(torch.DoubleTensor) >>> torch.tensor([1.2, 3]).dtype # a new floating point tensor torch.float64",
    "type": "function",
    "file_path": "pytorch\\torch\\__init__.py",
    "ast_data": "FunctionDef name:set_default_tensor_type arguments arg If Call Assign Call Call"
  },
  {
    "library": "django",
    "name": "skew",
    "source_code": "@property\ndef skew(self):\n    return TransformPoint(self, 'skew')",
    "docstring": "Skew of pixels (rotation parameters).",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\raster\\source.py",
    "ast_data": "FunctionDef name:skew arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_with_precomputed_row_splits",
    "source_code": "def _with_precomputed_row_splits(self):\n    return RowPartition(row_splits=self.row_splits(), row_lengths=self._row_lengths, value_rowids=self._value_rowids, nrows=self._nrows, uniform_row_length=self._uniform_row_length, nvals=self._nvals, internal=_row_partition_factory_key)",
    "docstring": "Returns a copy of with precomputed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py",
    "ast_data": "FunctionDef name:_with_precomputed_row_splits arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, width, height, xdescent=0.0, ydescent=0.0, clip=False):\n    super().__init__()\n    self.width = width\n    self.height = height\n    self.xdescent = xdescent\n    self.ydescent = ydescent\n    self._clip_children = clip\n    self.offset_transform = mtransforms.Affine2D()\n    self.dpi_transform = mtransforms.Affine2D()",
    "docstring": "Parameters ---------- width, height : float Width and height of the container box. xdescent, ydescent : float Descent of the box in x- and y-direction. clip : bool Whether to clip the children to the box.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:width arg:height arg:xdescent arg:ydescent arg:clip arguments arg arg arg arg arg arg Call Call Assign Assign Assign Assign Assign Assign Call Assign Call"
  },
  {
    "library": "pandas",
    "name": "_get_group_names",
    "source_code": "def _get_group_names(regex: re.Pattern) -> list[Hashable] | range:\n    rng = range(regex.groups)\n    names = {v: k for k, v in regex.groupindex.items()}\n    if not names:\n        return rng\n    result: list[Hashable] = [names.get(1 + i, i) for i in rng]\n    arr = np.array(result)\n    if arr.dtype.kind == 'i' and lib.is_range_indexer(arr, len(arr)):\n        return rng\n    return result",
    "docstring": "Get named groups from compiled regex. Unnamed groups are numbered. Parameters ---------- regex : compiled regex Returns ------- list of column labels",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\strings\\accessor.py",
    "ast_data": "FunctionDef name:_get_group_names arg:regex arguments arg Assign Call Assign Call If Return return:yes Call Assign Call If BoolOp Compare Call Call Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "__init__",
    "source_code": "def __init__(self, url, length, mime_type):\n    self.length, self.mime_type = (length, mime_type)\n    self.url = iri_to_uri(url)",
    "docstring": "All args are expected to be strings",
    "type": "method",
    "file_path": "django\\django\\utils\\feedgenerator.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:url arg:length arg:mime_type arguments arg arg arg arg Assign Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_search_for_child_node",
    "source_code": "def _search_for_child_node(self, parent_id, path_to_child):\n    if not path_to_child:\n        return parent_id\n    for child in self._proto.nodes[parent_id].children:\n        if child.local_name == path_to_child[0]:\n            return self._search_for_child_node(child.node_id, path_to_child[1:])\n    return None",
    "docstring": "Returns node id of child node. A helper method for traversing the object graph proto. As an example, say that the object graph proto in the SavedModel contains an object with the following child and grandchild attributes: This method can be used to retrieve the node id of using the parent's node id by calling: . Args: parent_id: node id of parent node path_to_child: list of children names. Returns: node_id of child, or None if child isn't found.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\load.py",
    "ast_data": "FunctionDef name:_search_for_child_node arg:self arg:parent_id arg:path_to_child arguments arg arg arg If Return return:yes For If Compare Return return:yes Call Return return:no"
  },
  {
    "library": "matplotlib",
    "name": "_3d_extend_contour",
    "source_code": "def _3d_extend_contour(self, cset, stride=5):\n    dz = (cset.levels[1] - cset.levels[0]) / 2\n    polyverts = []\n    colors = []\n    for idx, level in enumerate(cset.levels):\n        path = cset.get_paths()[idx]\n        subpaths = [*path._iter_connected_components()]\n        color = cset.get_edgecolor()[idx]\n        top = art3d._paths_to_3d_segments(subpaths, level - dz)\n        bot = art3d._paths_to_3d_segments(subpaths, level + dz)\n        if not len(top[0]):\n            continue\n        nsteps = max(round(len(top[0]) / stride), 2)\n        stepsize = (len(top[0]) - 1) / (nsteps - 1)\n        polyverts.extend([(top[0][round(i * stepsize)], top[0][round((i + 1) * stepsize)], bot[0][round((i + 1) * stepsize)], bot[0][round(i * stepsize)]) for i in range(round(nsteps) - 1)])\n        colors.extend([color] * (round(nsteps) - 1))\n    self.add_collection3d(art3d.Poly3DCollection(np.array(polyverts), facecolors=colors, edgecolors=colors, shade=True))\n    cset.remove()",
    "docstring": "Extend a contour in 3D by creating",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:_3d_extend_contour arg:self arg:cset arg:stride arguments arg arg arg Assign Assign Assign For Call Assign Call Assign Call Assign Call Assign Call Assign Call If Call Assign Call Call Call Assign Call Call Call Call Call Call Call Call Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_common_args",
    "source_code": "@property\ndef _common_args(self):\n    return {'metadata': self._metadata.SerializeToString(), 'output_shapes': self._flat_shapes, 'output_types': self._flat_types}",
    "docstring": "Helper for generating arguments that are common across most dataset ops. Most dataset op constructors expect and arguments that represent the flattened structure of an element, as well as a argument for additional metadata such as user-defined dataset name. This helper function generates common attributes as a keyword argument dictionary, allowing implementations to pass to the op constructor. Returns: A dictionary of keyword arguments that can be passed to a dataset op constructor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:_common_args arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "ClosureInputError",
    "source_code": "class ClosureInputError(Exception):\n\n    def __init__(self, original_exception):\n        if isinstance(original_exception, (ClosureInputError, ClosureAbortedError)):\n            self.original_exception = original_exception.original_exception\n        else:\n            self.original_exception = original_exception\n        message = 'Input has an error, the original exception is %r, error message is %s.' % (self.original_exception, str(self.original_exception))\n        super().__init__(message)\n        self.with_traceback(original_exception.__traceback__)",
    "docstring": "Wrapper for errors from resource building. When a closure starts, it first checks for errors in any of its inputs, which are RemoteValues from resource closures. If there were any errors, it wraps the exception in this class and raises so it can be handled by the worker failure handler. Attributes: original_exception:",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py",
    "ast_data": "ClassDef name:ClosureInputError FunctionDef name:__init__ arg:self arg:original_exception arguments arg arg If Call Assign Assign Assign Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "VertexCube",
    "source_code": "class VertexCube(VertexBase):\n\n    def __init__(self, x, nn=None, index=None):\n        super().__init__(x, nn=nn, index=index)\n\n    def connect(self, v):\n        if v is not self and v not in self.nn:\n            self.nn.add(v)\n            v.nn.add(self)\n\n    def disconnect(self, v):\n        if v in self.nn:\n            self.nn.remove(v)\n            v.nn.remove(self)",
    "docstring": "Vertex class to be used for a pure simplicial complex with no associated differential geometry (single level domain that exists in R^n)",
    "type": "class",
    "file_path": "scipy\\scipy\\optimize\\_shgo_lib\\_vertex.py",
    "ast_data": "ClassDef name:VertexCube FunctionDef name:__init__ arg:self arg:x arg:nn arg:index arguments arg arg arg arg Call Call FunctionDef name:connect arg:self arg:v arguments arg arg If BoolOp Compare Compare Call Call FunctionDef name:disconnect arg:self arg:v arguments arg arg If Compare Call Call"
  },
  {
    "library": "tensorflow",
    "name": "inplace_add",
    "source_code": "@deprecation.deprecated(None, 'Prefer tf.tensor_scatter_nd_add, which offers the same functionality with well-defined read-write semantics.')\ndef inplace_add(x, i, v):\n    return alias_inplace_add(gen_array_ops.deep_copy(x), i, v)",
    "docstring": "Applies an inplace add on input x at index i with value v. Note that this function is not actually inplace - it allocates a copy of x. The utility is not avoiding memory copies but rather specifying a sparse update. If i is None, x and v must be the same shape. Computes y = x; y += v; If i is a scalar, x has a rank 1 higher than v's. Computes y = x; y[i, :] += v; Otherwise, x and v must have the same rank. Computes y = x; y[i, :] += v; Args: x: A Tensor. i: None, a scalar or a vector. v: A Tensor. Returns: Returns y, which is guaranteed not to be an alias of x.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\inplace_ops.py",
    "ast_data": "FunctionDef name:inplace_add arg:x arg:i arg:v arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_slot_names",
    "source_code": "def get_slot_names(self):\n    return self._slot_names",
    "docstring": "A list of names for this optimizer's slots.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py",
    "ast_data": "FunctionDef name:get_slot_names arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "__call__",
    "source_code": "def __call__(self, X, Y=None, eval_gradient=False):\n    X = np.atleast_2d(X)\n    length_scale = _check_length_scale(X, self.length_scale)\n    if Y is None:\n        dists = pdist(X / length_scale, metric='sqeuclidean')\n        K = np.exp(-0.5 * dists)\n        K = squareform(K)\n        np.fill_diagonal(K, 1)\n    else:\n        if eval_gradient:\n            raise ValueError('Gradient can only be evaluated when Y is None.')\n        dists = cdist(X / length_scale, Y / length_scale, metric='sqeuclidean')\n        K = np.exp(-0.5 * dists)\n    if eval_gradient:\n        if self.hyperparameter_length_scale.fixed:\n            return (K, np.empty((X.shape[0], X.shape[0], 0)))\n        elif not self.anisotropic or length_scale.shape[0] == 1:\n            K_gradient = (K * squareform(dists))[:, :, np.newaxis]\n            return (K, K_gradient)\n        elif self.anisotropic:\n            K_gradient = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 / length_scale ** 2\n            K_gradient *= K[..., np.newaxis]\n            return (K, K_gradient)\n    else:\n        return K",
    "docstring": "Return the kernel k(X, Y) and optionally its gradient. Parameters ---------- X : ndarray of shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : ndarray of shape (n_samples_Y, n_features), default=None Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradient : bool, default=False Determines whether the gradient with respect to the log of the kernel hyperparameter is computed. Only supported when Y is None. Returns ------- K : ndarray of shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), optional The gradient of the kernel k(X, X) with respect to the log of the hyperparameter of the kernel. Only returned when is True.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:X arg:Y arg:eval_gradient arguments arg arg arg arg Assign Call Assign Call If Compare Assign Call Assign Call Assign Call Call If Raise Call Assign Call Assign Call If If Return return:yes Call If BoolOp Compare Assign Call Return return:yes If Assign Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "output_types",
    "source_code": "@property\n@deprecation.deprecated(None, 'Use `tf.compat.v1.data.get_output_types(iterator)`.')\ndef output_types(self):\n    return nest.map_structure(lambda component_spec: component_spec._to_legacy_output_types(), self._element_spec)",
    "docstring": "Returns the type of each component of an element of this iterator. Returns: A (nested) structure of objects corresponding to each component of an element of this dataset.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\iterator_ops.py",
    "ast_data": "FunctionDef name:output_types arg:self arguments arg Return return:yes Call arguments arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, clean_stop_exception_types=None):\n    if clean_stop_exception_types is None:\n        clean_stop_exception_types = (errors.OutOfRangeError,)\n    self._clean_stop_exception_types = tuple(clean_stop_exception_types)\n    self._lock = threading.Lock()\n    self._stop_event = threading.Event()\n    self._exc_info_to_raise = None\n    self._joined = False\n    self._registered_threads = set()",
    "docstring": "Create a new Coordinator. Args: clean_stop_exception_types: Optional tuple of Exception types that should cause a clean stop of the coordinator. If an exception of one of these types is reported to the coordinator will behave as if was called. Defaults to which is used by input queues to signal the end of input. When feeding training data from a Python iterator it is common to add to this list.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\coordinator.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:clean_stop_exception_types arguments arg arg If Compare Assign Assign Call Assign Call Assign Call Assign Assign Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "add_trace",
    "source_code": "def add_trace(self, *args, **kwargs):\n    args = list(args)\n    kwargs = kwargs.copy()\n    for fn in self._functions.values():\n        if self._expects_training_arg:\n\n            def trace_with_training(value, fn=fn):\n                utils.set_training_arg(value, self._training_arg_index, args, kwargs)\n                add_trace_to_queue(fn, args, kwargs, value)\n            trace_with_training(True)\n            trace_with_training(False)\n        else:\n            add_trace_to_queue(fn, args, kwargs)",
    "docstring": "Traces all functions with the same args and kwargs. Args: *args: Positional args passed to the original function. **kwargs: Keyword args passed to the original function.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\save_impl.py",
    "ast_data": "FunctionDef name:add_trace arg:self arguments arg arg arg Assign Call Assign Call For Call If FunctionDef name:trace_with_training arg:value arg:fn arguments arg arg Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "to_parted",
    "source_code": "def to_parted(self) -> 'Layout':\n    return Layout._new_object(layout=super().to_parted())",
    "docstring": "Returns a \"parted\" layout from a static layout. A parted layout contains axes that are treated as independent by most of SPMD expanders. FIXME(b/285905569): The exact semantics is still being investigated.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\layout.py",
    "ast_data": "FunctionDef name:to_parted arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "temperature",
    "source_code": "def temperature(device: Optional[Union[Device, int]]=None) -> int:\n    if not torch.version.hip:\n        handle = _get_pynvml_handler(device)\n        return pynvml.nvmlDeviceGetTemperature(handle, 0)\n    else:\n        return _get_amdsmi_temperature(device)",
    "docstring": "Return the average temperature of the GPU sensor in Degrees C (Centigrades). The average temperature is computed based on past sample period as given by . Args: device (torch.device or int, optional): selected device. Returns statistic for the current device, given by :func:, if :attr: is `` (default). Warning: Each sample period may be between 1 second and 1/6 second, depending on the product being queried.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:temperature arg:device arguments arg If Assign Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "check_transformers_unfitted_stateless",
    "source_code": "@ignore_warnings(category=FutureWarning)\ndef check_transformers_unfitted_stateless(name, transformer):\n    rng = np.random.RandomState(0)\n    X = rng.uniform(size=(20, 5))\n    X = _enforce_estimator_tags_X(transformer, X)\n    transformer = clone(transformer)\n    X_trans = transformer.transform(X)\n    assert X_trans.shape[0] == X.shape[0]",
    "docstring": "Check that using transform without prior fitting doesn't raise a NotFittedError for stateless transformers.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\estimator_checks.py",
    "ast_data": "FunctionDef name:check_transformers_unfitted_stateless arg:name arg:transformer arguments arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Compare Call"
  },
  {
    "library": "pandas",
    "name": "register_pandas_matplotlib_converters",
    "source_code": "def register_pandas_matplotlib_converters(func: F) -> F:\n\n    @functools.wraps(func)\n    def wrapper(*args, **kwargs):\n        with pandas_converters():\n            return func(*args, **kwargs)\n    return cast(F, wrapper)",
    "docstring": "Decorator applying pandas_converters.",
    "type": "function",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\converter.py",
    "ast_data": "FunctionDef name:register_pandas_matplotlib_converters arg:func arguments arg FunctionDef name:wrapper arguments arg arg With Call Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, target: str, allow_outputs: bool=False) -> None:\n    self.target = target\n    self.allow_outputs = allow_outputs\n    assert isinstance(target, str), f'target should be a string representing the device type. Got: {type(target).__name__}'",
    "docstring": "Move constructors from cpu to the target_device. Sweeps through the module, looking for constructor nodes that can be moved to the target_device. A constructor node can be moved to the target_device iff all of its users can also be moved (tested by cannot_be_moved). Otherwise, all dependent constructor nodes won't be moved. - target: target device type - allow_outputs: allow outputs to be moved",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\post_grad.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:target arg:allow_outputs arguments arg arg arg Assign Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "uncompress",
    "source_code": "def uncompress(element, output_spec):\n    flat_types = structure.get_flat_tensor_types(output_spec)\n    flat_shapes = structure.get_flat_tensor_shapes(output_spec)\n    tensor_list = ged_ops.uncompress_element(element, output_types=flat_types, output_shapes=flat_shapes)\n    return structure.from_tensor_list(output_spec, tensor_list)",
    "docstring": "Uncompress a compressed dataset element. Args: element: A scalar variant tensor to uncompress. The element should have been created by calling . output_spec: A nested structure of representing the type(s) of the uncompressed element. Returns: The uncompressed element.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\compression_ops.py",
    "ast_data": "FunctionDef name:uncompress arg:element arg:output_spec arguments arg arg Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_g_sig_digits",
    "source_code": "def _g_sig_digits(value, delta):\n    if delta == 0:\n        if value == 0:\n            return 3\n        delta = abs(np.spacing(value))\n    return max(0, (math.floor(math.log10(abs(value))) + 1 if value else 1) - math.floor(math.log10(delta))) if math.isfinite(value) else 0",
    "docstring": "Return the number of significant digits to %g-format *value*, assuming that it is known with an error of *delta*.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\cbook.py",
    "ast_data": "FunctionDef name:_g_sig_digits arg:value arg:delta arguments arg arg If Compare If Compare Return return:yes Assign Call Call Return return:yes Call Call Call Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "shade",
    "source_code": "def shade(self, data, cmap, norm=None, blend_mode='overlay', vmin=None, vmax=None, vert_exag=1, dx=1, dy=1, fraction=1, **kwargs):\n    if vmin is None:\n        vmin = data.min()\n    if vmax is None:\n        vmax = data.max()\n    if norm is None:\n        norm = Normalize(vmin=vmin, vmax=vmax)\n    rgb0 = cmap(norm(data))\n    rgb1 = self.shade_rgb(rgb0, elevation=data, blend_mode=blend_mode, vert_exag=vert_exag, dx=dx, dy=dy, fraction=fraction, **kwargs)\n    rgb0[..., :3] = rgb1[..., :3]\n    return rgb0",
    "docstring": "Combine colormapped data values with an illumination intensity map (a.k.a. \"hillshade\") of the values. Parameters ---------- data : 2D array-like The height values used to generate a shaded map. cmap : The colormap used to color the *data* array. Note that this must be a instance. For example, rather than passing in `~matplotlib.colors.Normalize~numpy.ndarray` An (M, N, 4) array of floats ranging between 0-1.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:shade arg:self arg:data arg:cmap arg:norm arg:blend_mode arg:vmin arg:vmax arg:vert_exag arg:dx arg:dy arg:fraction arguments arg arg arg arg arg arg arg arg arg arg arg arg If Compare Assign Call If Compare Assign Call If Compare Assign Call Assign Call Call Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "DynamicShapesSpec",
    "source_code": "@dataclasses.dataclass\nclass DynamicShapesSpec:\n    dynamic_shapes: Union[dict[str, Any], tuple[Any], list[Any], None]\n    dims: dict[str, RootDim]",
    "docstring": "This stores a dynamic_shapes spec for de/serialization.",
    "type": "class",
    "file_path": "pytorch\\torch\\_export\\serde\\dynamic_shapes.py",
    "ast_data": "ClassDef name:DynamicShapesSpec"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, marker, fillstyle=None, transform=None, capstyle=None, joinstyle=None):\n    self._marker_function = None\n    self._user_transform = transform\n    self._user_capstyle = CapStyle(capstyle) if capstyle is not None else None\n    self._user_joinstyle = JoinStyle(joinstyle) if joinstyle is not None else None\n    self._set_fillstyle(fillstyle)\n    self._set_marker(marker)",
    "docstring": "Parameters ---------- marker : str, array-like, Path, MarkerStyle - Another instance of copies the details of that *marker*. - For other possible marker values, see the module docstring . fillstyle : str, default: :rc: One of 'full', 'left', 'right', 'bottom', 'top', 'none'. transform : , optional Transform that will be combined with the native transform of the marker. capstyle : or %(CapStyle)s, optional Cap style that will override the default cap style of the marker. joinstyle : or %(JoinStyle)s, optional Join style that will override the default join style of the marker.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\markers.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:marker arg:fillstyle arg:transform arg:capstyle arg:joinstyle arguments arg arg arg arg arg arg Assign Assign Assign Compare Call Assign Compare Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "graph",
    "source_code": "class graph:\n    default_capture_stream: typing.Optional['torch.cuda.Stream'] = None\n\n    def __init__(self, cuda_graph, pool=None, stream=None, capture_error_mode: str='global'):\n        if self.__class__.default_capture_stream is None:\n            self.__class__.default_capture_stream = torch.cuda.Stream()\n        self.pool = () if pool is None else (pool,)\n        self.capture_stream = stream if stream is not None else self.__class__.default_capture_stream\n        assert self.capture_stream is not None\n        self.stream_ctx = torch.cuda.stream(self.capture_stream)\n        self.cuda_graph = cuda_graph\n        self.capture_error_mode = capture_error_mode\n\n    def __enter__(self):\n        torch.cuda.synchronize()\n        gc.collect()\n        torch.cuda.empty_cache()\n        self.stream_ctx.__enter__()\n        self.cuda_graph.capture_begin(*self.pool, capture_error_mode=self.capture_error_mode)\n\n    def __exit__(self, exc_type, exc_value, traceback):\n        self.cuda_graph.capture_end()\n        self.stream_ctx.__exit__(exc_type, exc_value, traceback)",
    "docstring": "Context-manager that captures CUDA work into a :class: object for later replay. See :ref: for a general introduction, detailed use, and constraints. Arguments: cuda_graph (torch.cuda.CUDAGraph): Graph object used for capture. pool (optional): Opaque token (returned by a call to :func: or :meth:) hinting this graph's capture may share memory from the specified pool. See :ref:. stream (torch.cuda.Stream, optional): If supplied, will be set as the current stream in the context. If not supplied, `cudaStreamCaptureMode ` argument to this capture. .. warning:: This API is in beta and may change in future releases. .. _cudaStreamCaptureMode:",
    "type": "class",
    "file_path": "pytorch\\torch\\cuda\\graphs.py",
    "ast_data": "ClassDef name:graph FunctionDef name:__init__ arg:self arg:cuda_graph arg:pool arg:stream arg:capture_error_mode arguments arg arg arg arg arg If Compare Assign Call Assign Compare Assign Compare Compare Assign Call Assign Assign FunctionDef name:__enter__ arg:self arguments arg Call Call Call Call Call FunctionDef name:__exit__ arg:self arg:exc_type arg:exc_value arg:traceback arguments arg arg arg arg Call Call"
  },
  {
    "library": "kornia",
    "name": "get_pinhole",
    "source_code": "def get_pinhole(self, idx: int) -> PinholeCamera:\n    height: Tensor = self.height[..., idx]\n    width: Tensor = self.width[..., idx]\n    intrinsics: Tensor = self.intrinsics[:, idx]\n    extrinsics: Tensor = self.extrinsics[:, idx]\n    return PinholeCamera(intrinsics, extrinsics, height, width)",
    "docstring": "Return a PinholeCamera object with parameters such as Bx4x4.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\camera\\pinhole.py",
    "ast_data": "FunctionDef name:get_pinhole arg:self arg:idx arguments arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "sample_points_2d",
    "source_code": "def sample_points_2d(self, heights: Tensor, widths: Tensor, num_img_rays: Tensor) -> Dict[int, RaySampler.Points2D]:\n    num_img_rays = num_img_rays.int()\n    points2d_as_flat_tensors: Dict[int, RaySampler.Points2D_FlatTensors] = {}\n    for camera_id, (height, width, n) in enumerate(zip(heights.tolist(), widths.tolist(), num_img_rays.tolist())):\n        y_rand = torch.trunc(torch.rand(n, device=self._device, dtype=self._dtype) * height)\n        x_rand = torch.trunc(torch.rand(n, device=self._device, dtype=self._dtype) * width)\n        RaySampler._add_points2d_as_flat_tensors_to_num_ray_dict(n, x_rand, y_rand, camera_id, points2d_as_flat_tensors)\n    return RaySampler._build_num_ray_dict_of_points2d(points2d_as_flat_tensors)",
    "docstring": "Randomly sample pixel points in 2d. Args: heights: tensor that holds scene camera image heights (can vary between cameras): math: . widths: tensor that holds scene camera image widths (can vary between cameras): math: . num_img_rays: tensor that holds the number of rays to randomly cast from each scene camera: math: . Returns: dictionary of Points2D objects that holds information on pixel 2d coordinates of each ray and the camera id it was casted by: Dict[int, Points2D]",
    "type": "method",
    "file_path": "kornia\\kornia\\nerf\\samplers.py",
    "ast_data": "FunctionDef name:sample_points_2d arg:self arg:heights arg:widths arg:num_img_rays arguments arg arg arg arg Assign Call For Call Call Call Call Call Assign Call Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, canvas, animated_artists=()):\n    self.canvas = canvas\n    self._bg = None\n    self._artists = []\n    for a in animated_artists:\n        self.add_artist(a)\n    self.cid = canvas.mpl_connect('draw_event', self.on_draw)",
    "docstring": "Parameters ---------- canvas : FigureCanvasAgg The canvas to work with, this only works for subclasses of the Agg canvas which have the and methods. animated_artists : Iterable[Artist] List of the artists to manage",
    "type": "method",
    "file_path": "matplotlib\\galleries\\users_explain\\animations\\blitting.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:canvas arg:animated_artists arguments arg arg arg Assign Assign Assign For Call Assign Call"
  },
  {
    "library": "numpy",
    "name": "add_installed_library",
    "source_code": "def add_installed_library(self, name, sources, install_dir, build_info=None):\n    if not build_info:\n        build_info = {}\n    install_dir = os.path.join(self.package_path, install_dir)\n    self._add_library(name, sources, install_dir, build_info)\n    self.installed_libraries.append(InstallableLib(name, build_info, install_dir))",
    "docstring": "Similar to add_library, but the specified library is installed. Most C libraries used with `add_libraryget_infoadd_npy_pkg_config` for more information).",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\misc_util.py",
    "ast_data": "FunctionDef name:add_installed_library arg:self arg:name arg:sources arg:install_dir arg:build_info arguments arg arg arg arg arg If Assign Assign Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "run",
    "source_code": "def run(self, *args: Any, initial_env: dict[torch.fx.Node, Any] | None=None, enable_io_processing: bool=True) -> Any:\n    self.verification_infos = []\n    self._args = args\n    return super().run(*args, initial_env=initial_env, enable_io_processing=enable_io_processing)",
    "docstring": "Run the interpreter with the given input arguments. This method executes the model and populates the :attr: attribute with the verification information for each value. Args: args: The input arguments for the model. initial_env: The initial environment for the interpreter. enable_io_processing: Whether to enable IO processing. Returns: Any: The result of executing the model.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_verification.py",
    "ast_data": "FunctionDef name:run arg:self arguments arg arg arg arg Assign Assign Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "mouse_init",
    "source_code": "def mouse_init(self, rotate_btn=1, pan_btn=2, zoom_btn=3):\n    self.button_pressed = None\n    self._rotate_btn = np.atleast_1d(rotate_btn).tolist()\n    self._pan_btn = np.atleast_1d(pan_btn).tolist()\n    self._zoom_btn = np.atleast_1d(zoom_btn).tolist()",
    "docstring": "Set the mouse buttons for 3D rotation and zooming. Parameters ---------- rotate_btn : int or list of int, default: 1 The mouse button or buttons to use for 3D rotation of the Axes. pan_btn : int or list of int, default: 2 The mouse button or buttons to use to pan the 3D Axes. zoom_btn : int or list of int, default: 3 The mouse button or buttons to use to zoom the 3D Axes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:mouse_init arg:self arg:rotate_btn arg:pan_btn arg:zoom_btn arguments arg arg arg arg Assign Assign Call Call Assign Call Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "collect_fw_donated_buffer_idxs",
    "source_code": "def collect_fw_donated_buffer_idxs(fw_ins: list[Optional[FakeTensor]], user_fw_outs: list[Optional[FakeTensor]], bw_outs: list[Optional[FakeTensor]], saved_tensors: list[FakeTensor]) -> list[int]:\n    storage_refs = set()\n    for t in itertools.chain(fw_ins, user_fw_outs, bw_outs):\n        if t is not None and isinstance(t, FakeTensor) and (not is_sparse_any(t)):\n            storage_refs.add(StorageWeakRef(t.untyped_storage()))\n    num_saved_tensor = len(saved_tensors)\n    donated_buffer_idxs = []\n    for i in range(num_saved_tensor):\n        t = saved_tensors[i]\n        if t is not None and (not is_sparse_any(t)) and (StorageWeakRef(t.untyped_storage()) not in storage_refs):\n            donated_buffer_idxs.append(i)\n    return donated_buffer_idxs",
    "docstring": "Checks if the saved tensors are donated buffers, which means a saved tensor is not an alias of any tensors in fw_ins, user_fw_outs, and bw_outs.",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\jit_compile_runtime_wrappers.py",
    "ast_data": "FunctionDef name:collect_fw_donated_buffer_idxs arg:fw_ins arg:user_fw_outs arg:bw_outs arg:saved_tensors arguments arg arg arg arg Assign Call For Call If BoolOp Compare Call Call Call Call Call Assign Call Assign For Call Assign If BoolOp Compare Call Compare Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_uncompiled_header",
    "source_code": "@classmethod\ndef _get_uncompiled_header(cls, device: str) -> str | None:\n    return None",
    "docstring": "Given a device type, returns the path to a CPP header file to be precompiled.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codecache.py",
    "ast_data": "FunctionDef name:_get_uncompiled_header arg:cls arg:device arguments arg arg Return return:no"
  },
  {
    "library": "matplotlib",
    "name": "number",
    "source_code": "@property\ndef number(self):\n    if hasattr(self, '_number'):\n        return self._number\n    else:\n        raise AttributeError(\"'Figure' object has no attribute 'number'. In the future thiswill change to returning 'None' instead.\")",
    "docstring": "The figure id, used to identify figures in .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:number arg:self arguments arg If Call Return return:yes Raise Call"
  },
  {
    "library": "pytorch",
    "name": "__str__",
    "source_code": "def __str__(self) -> str:\n    args: list[str] = []\n    for field in dataclasses.fields(self):\n        if field.name == 'wrapper':\n            continue\n        val = getattr(self, field.name)\n        args.append(f'{field.name}={(val.get_name() if field.type is ir.Buffer else val)}')\n    return f'{type(self).__name__}({', '.join(args)})'",
    "docstring": "Emits a string representation that fits on one line.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\wrapper.py",
    "ast_data": "FunctionDef name:__str__ arg:self arguments arg For Call If Compare Assign Call Call Compare Call Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "ModuleDeprecationWarning",
    "source_code": "class ModuleDeprecationWarning(DeprecationWarning):\n    pass",
    "docstring": "Module deprecation warning. .. warning:: This warning should not be used, since nose testing is not relevant anymore. The nose tester turns ordinary Deprecation warnings into test failures. That makes it hard to deprecate whole modules, because they get imported by default. So this is a special Deprecation warning that the nose tester will let pass without making tests fail.",
    "type": "class",
    "file_path": "numpy\\numpy\\exceptions.py",
    "ast_data": "ClassDef name:ModuleDeprecationWarning"
  },
  {
    "library": "django",
    "name": "add_base_argument",
    "source_code": "def add_base_argument(self, parser, *args, **kwargs):\n    for arg in args:\n        if arg in self.suppressed_base_arguments:\n            kwargs['help'] = argparse.SUPPRESS\n            break\n    parser.add_argument(*args, **kwargs)",
    "docstring": "Call the parser's add_argument() method, suppressing the help text according to BaseCommand.suppressed_base_arguments.",
    "type": "method",
    "file_path": "django\\django\\core\\management\\base.py",
    "ast_data": "FunctionDef name:add_base_argument arg:self arg:parser arguments arg arg arg arg For If Compare Assign Call"
  },
  {
    "library": "pandas",
    "name": "round",
    "source_code": "def round(self, decimals: int=0) -> Self:\n    return self._constructor(self.to_series().round(decimals))",
    "docstring": "Round each value in the Index to the given number of decimals. Parameters ---------- decimals : int, optional Number of decimal places to round to. If decimals is negative, it specifies the number of positions to the left of the decimal point. Returns ------- Index A new Index with the rounded values. Examples -------- >>> import pandas as pd >>> idx = pd.Index([10.1234, 20.5678, 30.9123, 40.4567, 50.7890]) >>> idx.round(decimals=2) Index([10.12, 20.57, 30.91, 40.46, 50.79], dtype='float64')",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:round arg:self arg:decimals arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "numpy",
    "name": "cyg2win32",
    "source_code": "def cyg2win32(path: str) -> str:\n    if sys.platform != 'cygwin':\n        return path\n    return subprocess.check_output(['/usr/bin/cygpath', '--windows', path], text=True)",
    "docstring": "Convert a path from Cygwin-native to Windows-native. Uses the cygpath utility (part of the Base install) to do the actual conversion. Falls back to returning the original path if this fails. Handles the default `` Parameters ---------- path : str The path to convert Returns ------- converted_path : str The converted path Notes ----- Documentation for cygpath utility: Documentation for the C function it wraps:",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\misc_util.py",
    "ast_data": "FunctionDef name:cyg2win32 arg:path arguments arg If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "python_grad_func",
    "source_code": "@property\ndef python_grad_func(self):\n    return self._python_grad_func",
    "docstring": "Python gradient function callable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\function.py",
    "ast_data": "FunctionDef name:python_grad_func arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "from_config",
    "source_code": "@classmethod\ndef from_config(cls, config, custom_objects=None, columns_by_name=None):\n    from tensorflow.python.feature_column.serialization import deserialize_feature_column\n    _check_config_keys(config, cls._fields)\n    kwargs = _standardize_and_copy_config(config)\n    kwargs['categorical_column'] = deserialize_feature_column(config['categorical_column'], custom_objects, columns_by_name)\n    return cls(**kwargs)",
    "docstring": "See 'FeatureColumn` base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:from_config arg:cls arg:config arg:custom_objects arg:columns_by_name arguments arg arg arg arg Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "set_epsilon",
    "source_code": "def set_epsilon(value):\n    global _EPSILON\n    _EPSILON = value",
    "docstring": "Sets the value of the fuzz factor used in numeric expressions. Args: value: float. New value of epsilon. Example: >>> tf.keras.backend.epsilon() 1e-07 >>> tf.keras.backend.set_epsilon(1e-5) >>> tf.keras.backend.epsilon() 1e-05 >>> tf.keras.backend.set_epsilon(1e-7)",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend_config.py",
    "ast_data": "FunctionDef name:set_epsilon arg:value arguments arg Assign"
  },
  {
    "library": "matplotlib",
    "name": "cla",
    "source_code": "def cla(self):\n    if self._subclass_uses_cla:\n        self.__clear()\n    else:\n        self.clear()",
    "docstring": "Clear the Axes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:cla arg:self arguments arg If Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_heights",
    "source_code": "def set_heights(self, heights):\n    self._heights = 0.5 * np.asarray(heights).ravel()\n    self.stale = True",
    "docstring": "Set the lengths of second axes (e.g., minor axes).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:set_heights arg:self arg:heights arguments arg arg Assign Call Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "set",
    "source_code": "def set(self, other):\n    if np.any(self._points != other.get_points()):\n        self._points = other.get_points()\n        self.invalidate()",
    "docstring": "Set this bounding box from the \"frozen\" bounds of another .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:set arg:self arg:other arguments arg arg If Call Compare Call Assign Call Call"
  },
  {
    "library": "pandas",
    "name": "isnull",
    "source_code": "@doc(NDFrame.isna, klass=_shared_doc_kwargs['klass'])\ndef isnull(self) -> Series:\n    return super().isnull()",
    "docstring": "Series.isnull is an alias for Series.isna.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:isnull arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "kornia",
    "name": "original_res_logits",
    "source_code": "def original_res_logits(self, input_size: tuple[int, int], original_size: tuple[int, int], image_size_encoder: Optional[tuple[int, int]]) -> Tensor:\n    x = self.logits\n    if isinstance(image_size_encoder, tuple):\n        x = resize(x, size=image_size_encoder, interpolation='bilinear', align_corners=False, antialias=False)\n    x = x[..., :input_size[0], :input_size[1]]\n    x = resize(x, size=original_size, interpolation='bilinear', align_corners=False, antialias=False)\n    self._original_res_logits = x\n    return self._original_res_logits",
    "docstring": "Remove padding and upscale the logits to the original image size. Resize to image encoder input -> remove padding (bottom and right) -> Resize to original size .. note:: This method set a internal which will be used if available for the binary masks. Args: input_size: The size of the image input to the model, in (H, W) format. Used to remove padding. original_size: The original size of the image before resizing for input to the model, in (H, W) format. image_size_encoder: The size of the input image for image encoder, in (H, W) format. Used to resize the logits back to encoder resolution before remove the padding. Returns: Batched logits in :math: format, where (H, W) is given by original_size.",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\models\\structures.py",
    "ast_data": "FunctionDef name:original_res_logits arg:self arg:input_size arg:original_size arg:image_size_encoder arguments arg arg arg arg Assign If Call Assign Call Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "outer",
    "source_code": "def outer(self, a, b):\n    da, db = (getdata(a), getdata(b))\n    d = self.f.outer(da, db)\n    ma = getmask(a)\n    mb = getmask(b)\n    if ma is nomask and mb is nomask:\n        m = nomask\n    else:\n        ma = getmaskarray(a)\n        mb = getmaskarray(b)\n        m = umath.logical_or.outer(ma, mb)\n    if not m.ndim and m:\n        return masked\n    if m is not nomask:\n        np.copyto(d, da, where=m)\n    if not d.shape:\n        return d\n    masked_d = d.view(get_masked_subclass(a, b))\n    masked_d._mask = m\n    return masked_d",
    "docstring": "Return the function applied to the outer product of a and b.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:outer arg:self arg:a arg:b arguments arg arg arg Assign Call Call Assign Call Assign Call Assign Call If BoolOp Compare Compare Assign Assign Call Assign Call Assign Call If BoolOp Return return:yes If Compare Call If Return return:yes Assign Call Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "to_feather",
    "source_code": "def to_feather(self, path: FilePath | WriteBuffer[bytes], **kwargs) -> None:\n    from pandas.io.feather_format import to_feather\n    to_feather(self, path, **kwargs)",
    "docstring": "Write a DataFrame to the binary Feather format. Parameters ---------- path : str, path object, file-like object String, path object (implementing `pyarrow.feather.write_feathercompressioncompression_levelchunksizeversionfeather file to_parquet`. Examples -------- >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]]) >>> df.to_feather(\"file.feather\") # doctest: +SKIP",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:to_feather arg:self arg:path arguments arg arg arg Call"
  },
  {
    "library": "matplotlib",
    "name": "set_label_minor",
    "source_code": "def set_label_minor(self, labelOnlyBase):\n    self.labelOnlyBase = labelOnlyBase",
    "docstring": "Switch minor tick labeling on or off. Parameters ---------- labelOnlyBase : bool If True, label ticks only at integer powers of base.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:set_label_minor arg:self arg:labelOnlyBase arguments arg arg Assign"
  },
  {
    "library": "django",
    "name": "get_current_timezone",
    "source_code": "def get_current_timezone():\n    return getattr(_active, 'value', get_default_timezone())",
    "docstring": "Return the currently active time zone as a tzinfo instance.",
    "type": "function",
    "file_path": "django\\django\\utils\\timezone.py",
    "ast_data": "FunctionDef name:get_current_timezone arguments Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_unikey_or_keysym_to_mplkey",
    "source_code": "def _unikey_or_keysym_to_mplkey(unikey, keysym):\n    if unikey and unikey.isprintable():\n        return unikey\n    key = keysym.lower()\n    if key.startswith('kp_'):\n        key = key[3:]\n    if key.startswith('page_'):\n        key = key.replace('page_', 'page')\n    if key.endswith(('_l', '_r')):\n        key = key[:-2]\n    if sys.platform == 'darwin' and key == 'meta':\n        key = 'cmd'\n    key = {'return': 'enter', 'prior': 'pageup', 'next': 'pagedown'}.get(key, key)\n    return key",
    "docstring": "Convert a Unicode key or X keysym to a Matplotlib key name. The Unicode key is checked first; this avoids having to list most printable keysyms such as ``.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\cbook.py",
    "ast_data": "FunctionDef name:_unikey_or_keysym_to_mplkey arg:unikey arg:keysym arguments arg arg If BoolOp Call Return return:yes Assign Call If Call Assign If Call Assign Call If Call Assign If BoolOp Compare Compare Assign Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "_combine",
    "source_code": "def _combine(self, *args: list[onnx.ModelProto], io_maps: Optional[list[tuple[str, str]]]=None) -> onnx.ModelProto:\n    if len(args) == 0:\n        raise ValueError('No operators found.')\n    combined_op = args[0]\n    combined_op = onnx.compose.add_prefix(combined_op, prefix=f'K{str(0).zfill(2)}-')\n    for i, op in enumerate(args[1:]):\n        next_op = onnx.compose.add_prefix(op, prefix=f'K{str(i + 1).zfill(2)}-')\n        if io_maps is None:\n            io_map = [(f'K{str(i).zfill(2)}-output', f'K{str(i + 1).zfill(2)}-input')]\n        else:\n            io_map = [(f'K{str(i).zfill(2)}-{it[0]}', f'K{str(i + 1).zfill(2)}-{it[1]}') for it in io_maps[i]]\n        combined_op = onnx.compose.merge_models(combined_op, next_op, io_map=io_map)\n    return combined_op",
    "docstring": "Combine the provided ONNX models into a single ONNX graph. Optionally, map inputs and outputs between operators using the . Args: args: list of onnx operations. io_maps: A list of list of tuples representing input-output mappings for combining the models. Example: [[(model1_output_name, model2_input_name)], [(model2_output_name, model3_input_name)]]. Returns: onnx.ModelProto: The combined ONNX model as a single ONNX graph.",
    "type": "method",
    "file_path": "kornia\\kornia\\core\\mixin\\onnx.py",
    "ast_data": "FunctionDef name:_combine arg:self arguments arg arg arg If Compare Call Raise Call Assign Assign Call Call Call For Call Assign Call Call Call If Compare Assign Call Call Call Call Assign Call Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "keep_lazy_text",
    "source_code": "def keep_lazy_text(func):\n    return keep_lazy(str)(func)",
    "docstring": "A decorator for functions that accept lazy arguments and return text.",
    "type": "function",
    "file_path": "django\\django\\utils\\functional.py",
    "ast_data": "FunctionDef name:keep_lazy_text arg:func arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_all_nested_row_partitions",
    "source_code": "def _all_nested_row_partitions(rt):\n    if isinstance(rt, tensor_lib.Tensor):\n        if rt.shape.rank <= 1:\n            return ()\n        else:\n            rt2 = ragged_tensor.RaggedTensor.from_tensor(rt)\n            return rt2._nested_row_partitions\n    else:\n        tail_partitions = _all_nested_row_partitions(rt.flat_values)\n        head_partitions = rt._nested_row_partitions\n        return head_partitions + tail_partitions",
    "docstring": "Returns all nested row partitions in rt, including for dense dimensions.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_array_ops.py",
    "ast_data": "FunctionDef name:_all_nested_row_partitions arg:rt arguments arg If Call If Compare Return return:no Assign Call Return return:yes Assign Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_function_hessians",
    "source_code": "def get_function_hessians(self, alpha, J, ecc, dofs):\n    d2sdksi2 = self.get_d2Sidksij2(alpha, ecc)\n    d2fdksi2 = dofs @ d2sdksi2\n    H_rot = self.get_Hrot_from_J(J)\n    d2fdx2 = d2fdksi2 @ H_rot\n    return _transpose_vectorized(d2fdx2)",
    "docstring": "Parameters ---------- *alpha* is a (N x 3 x 1) array (array of column-matrices) of barycentric coordinates *J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at triangle first apex) *ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle eccentricities *dofs* is a (N x 1 x 9) arrays (arrays of row-matrices) of computed degrees of freedom. Returns ------- Returns the values of interpolated function 2nd-derivatives [d2z/dx2, d2z/dy2, d2z/dxdy] in global coordinates at locations alpha, as a column-matrices of shape (N x 3 x 1).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triinterpolate.py",
    "ast_data": "FunctionDef name:get_function_hessians arg:self arg:alpha arg:J arg:ecc arg:dofs arguments arg arg arg arg arg Assign Call Assign Assign Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "display_tpot",
    "source_code": "def display_tpot():\n    e2e_latency_mean = statistics.mean(latency_list)\n    ttft_mean = statistics.mean(ttft_ms_list)\n    generation_time_mean = e2e_latency_mean - ttft_mean\n    tpot = generation_time_mean / (OUTPUT_TOKEN_LEN - 1)\n    print(f'TPOT: {round(tpot, 2)} ms')",
    "docstring": "Calculate the time per output token.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\xla\\backends\\cpu\\benchmarks\\e2e\\gemma2\\pytorch_2b\\benchmark.py",
    "ast_data": "FunctionDef name:display_tpot arguments Assign Call Assign Call Assign Assign Call Call"
  },
  {
    "library": "sphinx",
    "name": "get_terminal_width",
    "source_code": "def get_terminal_width() -> int:\n    return shutil.get_terminal_size().columns - 1",
    "docstring": "Return the width of the terminal in columns.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\console.py",
    "ast_data": "FunctionDef name:get_terminal_width arguments Return return:yes Call"
  },
  {
    "library": "django",
    "name": "convert_extent",
    "source_code": "def convert_extent(self, box):\n    if box is None:\n        return None\n    shell = GEOSGeometry(box).shell\n    xmin, ymin = shell[0][:2]\n    xmax, ymax = shell[2][:2]\n    return (xmin, ymin, xmax, ymax)",
    "docstring": "Convert the polygon data received from SpatiaLite to min/max values.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\spatialite\\operations.py",
    "ast_data": "FunctionDef name:convert_extent arg:self arg:box arguments arg arg If Compare Return return:no Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_state_shape",
    "source_code": "def get_state_shape(s):\n    c = _concat(batch_size, s)\n    size = array_ops.zeros(c, dtype=dtype)\n    if not context.executing_eagerly():\n        c_static = _concat(batch_size, s, static=True)\n        size.set_shape(c_static)\n    return size",
    "docstring": "Combine s with batch_size to get a proper tensor shape.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\legacy_rnn\\rnn_cell_impl.py",
    "ast_data": "FunctionDef name:get_state_shape arg:s arguments arg Assign Call Assign Call If Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_StatelessRandomGammaV2Grad",
    "source_code": "@ops.RegisterGradient('StatelessRandomGammaV2')\ndef _StatelessRandomGammaV2Grad(op: ops.Operation, grad):\n    shape = op.inputs[0]\n    alpha = op.inputs[2]\n    sample = op.outputs[0]\n    with ops.control_dependencies([grad]):\n        return (None, None, _StatelessGammaGradAlpha(shape, alpha, sample, grad))",
    "docstring": "Returns the gradient of a Gamma sample w.r.t. alpha. The gradient is computed using implicit differentiation (Figurnov et al., 2018). Args: op: A operation. We assume that the inputs to the operation are , and tensors, and the output is the tensor. grad: The incoming gradient of the same shape as . Returns: A with derivatives . References: Implicit Reparameterization Gradients: [Figurnov et al., 2018] ( ([pdf] (",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\random_grad.py",
    "ast_data": "FunctionDef name:_StatelessRandomGammaV2Grad arg:op arg:grad arguments arg arg Assign Assign Assign With Call Return return:yes Call Call"
  },
  {
    "library": "seaborn",
    "name": "dropna",
    "source_code": "def dropna(self, *vars):\n    vals = [getattr(self, var) for var in vars]\n    vals = [v for v in vals if v is not None]\n    not_na = np.all(np.column_stack([pd.notnull(v) for v in vals]), axis=1)\n    for var in vars:\n        val = getattr(self, var)\n        if val is not None:\n            setattr(self, var, val[not_na])",
    "docstring": "Remove observations with missing data.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\regression.py",
    "ast_data": "FunctionDef name:dropna arg:self arguments arg arg Assign Call Assign Compare Assign Call Call Call For Assign Call If Compare Call"
  },
  {
    "library": "scipy",
    "name": "_pow10m1",
    "source_code": "def _pow10m1(x):\n    return np.expm1(_POW10_LOG10 * x)",
    "docstring": "10 ** x - 1 for x near 0",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_filter_design.py",
    "ast_data": "FunctionDef name:_pow10m1 arg:x arguments arg Return return:yes Call"
  },
  {
    "library": "virtualenv",
    "name": "add_parser_arguments",
    "source_code": "@classmethod\ndef add_parser_arguments(cls, parser, interpreter, meta, app_data):\n    parser.add_argument('dest', help='directory to create virtualenv at', type=cls.validate_dest)\n    parser.add_argument('--clear', dest='clear', action='store_true', help='remove the destination directory if exist before starting (will overwrite files otherwise)', default=False)\n    parser.add_argument('--no-vcs-ignore', dest='no_vcs_ignore', action='store_true', help=\"don't create VCS ignore directive in the destination directory\", default=False)",
    "docstring": "Add CLI arguments for the creator. :param parser: the CLI parser :param app_data: the application data folder :param interpreter: the interpreter we're asked to create virtual environment for :param meta: value as returned by :meth:",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\create\\creator.py",
    "ast_data": "FunctionDef name:add_parser_arguments arg:cls arg:parser arg:interpreter arg:meta arg:app_data arguments arg arg arg arg arg Call Call Call"
  },
  {
    "library": "scipy",
    "name": "CarromTable",
    "source_code": "class CarromTable(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n        self.global_optimum = [(9.646157266348881, 9.646134286497169), (-9.646157266348881, 9.646134286497169), (9.646157266348881, -9.646134286497169), (-9.646157266348881, -9.646134286497169)]\n        self.fglob = -24.15681551650653\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        u = cos(x[0]) * cos(x[1])\n        v = sqrt(x[0] ** 2 + x[1] ** 2)\n        return -(u * exp(abs(1 - v / pi))) ** 2 / 30.0",
    "docstring": "CarromTable objective function. The CarromTable [1]_ global optimization problem is a multimodal minimization problem defined as follows: .. math:: f_{\\text{CarromTable}}(x) = - \\frac{1}{30}\\left(\\cos(x_1) cos(x_2) e^{\\left|1 - \\frac{\\sqrt{x_1^2 + x_2^2}}{\\pi}\\right|}\\right)^2 with :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_C.py",
    "ast_data": "ClassDef name:CarromTable FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "projections_from_fundamental",
    "source_code": "def projections_from_fundamental(F_mat: Tensor) -> Tensor:\n    KORNIA_CHECK_SHAPE(F_mat, ['*', '3', '3'])\n    R1 = eye_like(3, F_mat)\n    t1 = vec_like(3, F_mat)\n    Ft_mat = F_mat.transpose(-2, -1)\n    _, e2 = _nullspace(Ft_mat)\n    R2 = cross_product_matrix(e2) @ F_mat\n    t2 = e2[..., :, None]\n    P1 = torch.cat([R1, t1], dim=-1)\n    P2 = torch.cat([R2, t2], dim=-1)\n    return stack([P1, P2], dim=-1)",
    "docstring": "Get the projection matrices from the Fundamental Matrix. Args: F_mat: the fundamental matrix with the shape :math:. Returns: The projection matrices with shape :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\epipolar\\projection.py",
    "ast_data": "FunctionDef name:projections_from_fundamental arg:F_mat arguments arg Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "def forward(self, patch: torch.Tensor) -> torch.Tensor:\n    xy = self.features(self._normalize_input(patch)).view(-1, 2)\n    angle = torch.atan2(xy[:, 0] + 1e-08, xy[:, 1] + self.eps)\n    return angle",
    "docstring": "Run forward. Args: patch: :math: Returns: angle in radians: :math:",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\orientation.py",
    "ast_data": "FunctionDef name:forward arg:self arg:patch arguments arg arg Assign Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_to_proto",
    "source_code": "def _to_proto(self) -> data_service_pb2.ProcessingModeDef.ShardingPolicy:\n    if self == ShardingPolicy.OFF:\n        return data_service_pb2.ProcessingModeDef.OFF\n    if self == ShardingPolicy.DYNAMIC:\n        return data_service_pb2.ProcessingModeDef.DYNAMIC\n    if self == ShardingPolicy.FILE:\n        return data_service_pb2.ProcessingModeDef.FILE\n    if self == ShardingPolicy.DATA:\n        return data_service_pb2.ProcessingModeDef.DATA\n    if self == ShardingPolicy.FILE_OR_DATA:\n        return data_service_pb2.ProcessingModeDef.FILE_OR_DATA\n    if self == ShardingPolicy.HINT:\n        return data_service_pb2.ProcessingModeDef.HINT\n    raise ValueError(f'Unable to convert sharding policy {self!r} to proto.')",
    "docstring": "Converts the policy to ProcessingModeDef proto enum.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\data_service_ops.py",
    "ast_data": "FunctionDef name:_to_proto arg:self arguments arg If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "transform_feature",
    "source_code": "def transform_feature(self, transformation_cache, state_manager):\n    weight_tensor = transformation_cache.get(self.weight_feature_key, state_manager)\n    sparse_weight_tensor = self._transform_weight_tensor(weight_tensor)\n    sparse_categorical_tensor = _to_sparse_input_and_drop_ignore_values(transformation_cache.get(self.categorical_column, state_manager))\n    return (sparse_categorical_tensor, sparse_weight_tensor)",
    "docstring": "Applies weights to tensor generated from '.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:transform_feature arg:self arg:transformation_cache arg:state_manager arguments arg arg arg Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "maybe_set_is_frozen_param",
    "source_code": "def maybe_set_is_frozen_param(t: torch.Tensor) -> None:\n    if _freezing_active():\n        t._is_frozen_param = True",
    "docstring": "Mark the provided tensor as a frozen param if freezing is active.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\freezing_utils.py",
    "ast_data": "FunctionDef name:maybe_set_is_frozen_param arg:t arguments arg If Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "NonNeg",
    "source_code": "class NonNeg(Constraint):\n\n    def __call__(self, w):\n        return w * math_ops.cast(math_ops.greater_equal(w, 0.0), backend.floatx())",
    "docstring": "Constrains the weights to be non-negative. Also available via the shortcut function .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\constraints.py",
    "ast_data": "ClassDef name:NonNeg FunctionDef name:__call__ arg:self arg:w arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "register_module_full_backward_pre_hook",
    "source_code": "def register_module_full_backward_pre_hook(hook: Callable[['Module', _grad_t], Union[None, _grad_t]]) -> RemovableHandle:\n    handle = RemovableHandle(_global_backward_pre_hooks)\n    _global_backward_pre_hooks[handle.id] = hook\n    return handle",
    "docstring": "Register a backward pre-hook common to all the modules. .. warning :: This adds global state to the module and it is only intended for debugging/profiling purposes. Hooks registered using this function behave in the same way as those registered by :meth:. Refer to its documentation for more details. Hooks registered using this function will be called before hooks registered using :meth:. Returns: :class:: a handle that can be used to remove the added hook by calling ``",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:register_module_full_backward_pre_hook arg:hook arguments arg Assign Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_get_plot_backend",
    "source_code": "def _get_plot_backend(backend: str | None=None):\n    backend_str: str = backend or get_option('plotting.backend')\n    if backend_str in _backends:\n        return _backends[backend_str]\n    module = _load_backend(backend_str)\n    _backends[backend_str] = module\n    return module",
    "docstring": "Return the plotting backend to use (e.g. ). The plotting system of pandas uses matplotlib by default, but the idea here is that it can also work with other third-party backends. This function returns the module which provides a top-level method that will actually do the plotting. The backend is specified from a string, which either comes from the keyword argument , or, if not specified, from the option . All the rest of the code in this file uses the backend specified there for the plotting. The backend is imported lazily, as matplotlib is a soft dependency, and pandas can be used without it being installed. Notes ----- Modifies with imported backend as a side effect.",
    "type": "function",
    "file_path": "pandas\\pandas\\plotting\\_core.py",
    "ast_data": "FunctionDef name:_get_plot_backend arg:backend arguments arg BoolOp Call If Compare Return return:yes Assign Call Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "trans_y",
    "source_code": "@classmethod\ndef trans_y(cls, y: Tensor) -> Se3:\n    zs = zeros_like(y)\n    return cls.trans(zs, y, zs)",
    "docstring": "Construct a y-axis translation. Args: y: the y-axis translation.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\se3.py",
    "ast_data": "FunctionDef name:trans_y arg:cls arg:y arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "select_one_layer_lstm_function",
    "source_code": "def select_one_layer_lstm_function(input, hx, params):\n\n    def use_mkldnn(input, hx, params):\n        if not torch._C._get_mkldnn_enabled():\n            return False\n        tensors = [input] + list(hx) + list(chain.from_iterable(params))\n        devices = {t.device for t in tensors}\n        if len(devices) != 1:\n            return False\n        device = devices.pop()\n        if device != torch.device('cpu'):\n            return False\n        dtypes = {t.dtype for t in tensors}\n        for dtype in dtypes:\n            if dtype not in [torch.float, torch.bfloat16]:\n                return False\n        if input.requires_grad:\n            return False\n        has_projections = hx[0].size(2) != hx[1].size(2)\n        if has_projections:\n            return False\n        return True\n    if use_mkldnn(input, hx, params):\n        return mkldnn_one_layer_lstm\n    else:\n        return one_layer_lstm",
    "docstring": "Check whether we could use decompose lstm with mkldnn_rnn_layer. All the below conditions need to be met: * `` to LSTM * params: the weight and bias tensors of LSTM",
    "type": "function",
    "file_path": "pytorch\\torch\\_decomp\\decompositions.py",
    "ast_data": "FunctionDef name:select_one_layer_lstm_function arg:input arg:hx arg:params arguments arg arg arg FunctionDef name:use_mkldnn arg:input arg:hx arg:params arguments arg arg arg If Call Return return:yes Assign Call Call Call Assign If Compare Call Return return:yes Assign Call If Compare Call Return return:yes Assign For If Compare Return return:yes If Return return:yes Assign Compare Call Call If Return return:yes Return return:yes If Call Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "Group",
    "source_code": "class Group(models.Model):\n    name = models.CharField(_('name'), max_length=150, unique=True)\n    permissions = models.ManyToManyField(Permission, verbose_name=_('permissions'), blank=True)\n    objects = GroupManager()\n\n    class Meta:\n        verbose_name = _('group')\n        verbose_name_plural = _('groups')\n\n    def __str__(self):\n        return self.name\n\n    def natural_key(self):\n        return (self.name,)",
    "docstring": "Groups are a generic way of categorizing users to apply permissions, or some other label, to those users. A user can belong to any number of groups. A user in a group automatically has all the permissions granted to that group. For example, if the group 'Site editors' has the permission can_edit_home_page, any user in that group will have that permission. Beyond permissions, groups are a convenient way to categorize users to apply some label, or extended functionality, to them. For example, you could create a group 'Special users', and you could write code that would do special things to those users -- such as giving them access to a members-only portion of your site, or sending them members-only email messages.",
    "type": "class",
    "file_path": "django\\django\\contrib\\auth\\models.py",
    "ast_data": "ClassDef name:Group Assign Call Call Assign Call Call Assign Call ClassDef name:Meta Assign Call Assign Call FunctionDef name:__str__ arg:self arguments arg Return return:yes FunctionDef name:natural_key arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "outside_or_skip_tpu_context",
    "source_code": "@contextlib.contextmanager\ndef outside_or_skip_tpu_context():\n    ctx, graph = enclosing_tpu_context_and_graph()\n    if ctx is None:\n        yield\n    else:\n        saved_context = graph._get_control_flow_context()\n        graph._set_control_flow_context(ctx.outer_context)\n        yield\n        graph._set_control_flow_context(saved_context)",
    "docstring": "Returns a context manager that skips current enclosing context if there is any.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_util.py",
    "ast_data": "FunctionDef name:outside_or_skip_tpu_context arguments Assign Call If Compare Assign Call Call Call"
  },
  {
    "library": "kornia",
    "name": "preprocess_keypoints",
    "source_code": "def preprocess_keypoints(input: Union[Tensor, Keypoints]) -> Keypoints:\n    if isinstance(input, Tensor):\n        if not (len(input.shape) == 3 and input.shape[1:] == torch.Size([2])):\n            raise RuntimeError(f'Only BxNx2 tensor is supported. Got {input.shape}.')\n        input = Keypoints(input, False)\n    if isinstance(input, Keypoints):\n        raise RuntimeError(f'Expect `Keypoints` type. Got {type(input)}.')\n    return input",
    "docstring": "Preprocess input keypoints.",
    "type": "function",
    "file_path": "kornia\\kornia\\augmentation\\utils\\helpers.py",
    "ast_data": "FunctionDef name:preprocess_keypoints arg:input arguments arg If Call If BoolOp Compare Call Compare Call Raise Call Assign Call If Call Raise Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "ring",
    "source_code": "@property\ndef ring(self):\n    return capi.geos_isring(self.ptr)",
    "docstring": "Return whether or not the geometry is a ring.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:ring arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "MatplotlibDeprecationWarning",
    "source_code": "class MatplotlibDeprecationWarning(DeprecationWarning):\n    pass",
    "docstring": "A class for issuing deprecation warnings for Matplotlib users.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\_api\\deprecation.py",
    "ast_data": "ClassDef name:MatplotlibDeprecationWarning"
  },
  {
    "library": "django",
    "name": "attr_value",
    "source_code": "def attr_value(self, target, index=0):\n    if not isinstance(target, str) or not isinstance(index, int):\n        raise TypeError\n    return capi.get_attr_value(self.ptr, force_bytes(target), index)",
    "docstring": "The attribute value for the given target node (e.g. 'PROJCS'). The index keyword specifies an index of the child node to return.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\srs.py",
    "ast_data": "FunctionDef name:attr_value arg:self arg:target arg:index arguments arg arg arg If BoolOp Call Call Raise Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_to_batched_tensor_list",
    "source_code": "def _to_batched_tensor_list(self, value: composite_tensor.CompositeTensor) -> List['core_types.Symbol']:\n    get_spec_tensor_list = lambda spec, v: batchable_to_tensor_list(spec, v, minimum_rank=1) if isinstance(spec, BatchableTypeSpec) else spec._to_tensor_list(v)\n    component_batched_tensor_lists = nest.map_structure(get_spec_tensor_list, self._component_specs, self._to_components(value))\n    tensor_list = nest.flatten(component_batched_tensor_lists)\n    if any((t.shape.ndims == 0 for t in tensor_list)):\n        raise ValueError(f'While converting {value} to a list of tensors for batching, found a scalar item which cannot be batched.')\n    return tensor_list",
    "docstring": "Encodes as a flat list of each with rank>0.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py",
    "ast_data": "FunctionDef name:_to_batched_tensor_list arg:self arg:value arguments arg arg Assign arguments arg arg Call Call Call Assign Call Call Assign Call If Call Compare Raise Call Return return:yes"
  },
  {
    "library": "django",
    "name": "check_filterable",
    "source_code": "def check_filterable(self, expression):\n    if hasattr(expression, 'resolve_expression') and (not getattr(expression, 'filterable', True)):\n        raise NotSupportedError(expression.__class__.__name__ + ' is disallowed in the filter clause.')\n    if hasattr(expression, 'get_source_expressions'):\n        for expr in expression.get_source_expressions():\n            self.check_filterable(expr)",
    "docstring": "Raise an error if expression cannot be used in a WHERE clause.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\query.py",
    "ast_data": "FunctionDef name:check_filterable arg:self arg:expression arguments arg arg If BoolOp Call Call Raise Call If Call For Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_geometry",
    "source_code": "def get_geometry(self):\n    return (self._nrows, self._ncols)",
    "docstring": "Return a tuple containing the number of rows and columns in the grid.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\gridspec.py",
    "ast_data": "FunctionDef name:get_geometry arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_functions",
    "source_code": "def set_functions(self, functions):\n    if isinstance(functions, tuple) and len(functions) == 2 and callable(functions[0]) and callable(functions[1]):\n        self._functions = functions\n    elif isinstance(functions, Transform):\n        self._functions = (functions.transform, lambda x: functions.inverted().transform(x))\n    elif functions is None:\n        self._functions = (lambda x: x, lambda x: x)\n    else:\n        raise ValueError('functions argument of secondary Axes must be a two-tuple of callable functions with the first function being the transform and the second being the inverse')\n    self._set_scale()",
    "docstring": "Set how the secondary axis converts limits from the parent Axes. Parameters ---------- functions : 2-tuple of func, or with an inverse. Transform between the parent axis values and the secondary axis values. If supplied as a 2-tuple of functions, the first function is the forward transform function and the second is the inverse transform. If a transform is supplied, then the transform must have an inverse.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_secondary_axes.py",
    "ast_data": "FunctionDef name:set_functions arg:self arg:functions arguments arg arg If BoolOp Call Compare Call Call Call Assign If Call Assign arguments arg Call Call If Compare Assign arguments arg arguments arg Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "dtype_to_type_ctor",
    "source_code": "def dtype_to_type_ctor(dtype: torch.dtype) -> Callable[[NumberType], NumberType]:\n    assert isinstance(dtype, torch.dtype)\n    if dtype is torch.bool:\n        return lambda x: bool(x)\n    if dtype in _integer_dtypes:\n        return sym_int\n    if dtype.is_floating_point:\n        return sym_float\n    if dtype in _complex_dtypes:\n        return lambda x: complex(x)\n    raise ValueError('Invalid dtype!')",
    "docstring": "Computes the corresponding Python type constructor for the given dtype.",
    "type": "function",
    "file_path": "pytorch\\torch\\_prims_common\\__init__.py",
    "ast_data": "FunctionDef name:dtype_to_type_ctor arg:dtype arguments arg Call If Compare Return return:yes arguments arg Call If Compare Return return:yes If Return return:yes If Compare Return return:yes arguments arg Call Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "get_fontext_synonyms",
    "source_code": "def get_fontext_synonyms(fontext):\n    return {'afm': ['afm'], 'otf': ['otf', 'ttc', 'ttf'], 'ttc': ['otf', 'ttc', 'ttf'], 'ttf': ['otf', 'ttc', 'ttf']}[fontext]",
    "docstring": "Return a list of file extensions that are synonyms for the given file extension *fileext*.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\font_manager.py",
    "ast_data": "FunctionDef name:get_fontext_synonyms arg:fontext arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "quantile",
    "source_code": "def quantile(self, value, name='quantile'):\n    return self._call_quantile(value, name)",
    "docstring": "Quantile function. Aka \"inverse cdf\" or \"percent point function\". Given random variable and , the is: Args: value: or . name: Python prepended to names of ops created by this function. Returns: quantile: a of shape with values of type .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:quantile arg:self arg:value arg:name arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "is_hidden",
    "source_code": "@property\ndef is_hidden(self):\n    return self.field.widget.is_hidden",
    "docstring": "Return True if this BoundField's widget is hidden.",
    "type": "method",
    "file_path": "django\\django\\forms\\boundfield.py",
    "ast_data": "FunctionDef name:is_hidden arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "fetch",
    "source_code": "def fetch(self, val):\n\n    def _maybe_fetch(val):\n        if isinstance(val, RemoteValue):\n            return val.fetch()\n        else:\n            return val\n    return nest.map_structure(_maybe_fetch, val)",
    "docstring": "Blocking call to fetch results from the remote values. This is a wrapper around for a structure; it returns the execution results of s. If not ready, wait for them while blocking the caller. Example: Args: val: The value to fetch the results from. If this is structure of , will be called on the individual to get the result. Returns: If is a or a structure of s, return the fetched values immediately if they are available, or block the call until they are available, and return the fetched values with the same structure. If is other types, return it as-is.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py",
    "ast_data": "FunctionDef name:fetch arg:self arg:val arguments arg arg FunctionDef name:_maybe_fetch arg:val arguments arg If Call Return return:yes Call Return return:yes Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_zero_many",
    "source_code": "def _zero_many(self, i, j):\n    i, j, M, N = self._prepare_indices(i, j)\n    n_samples = len(i)\n    offsets = np.empty(n_samples, dtype=self.indices.dtype)\n    ret = csr_sample_offsets(M, N, self.indptr, self.indices, n_samples, i, j, offsets)\n    if ret == 1:\n        self.sum_duplicates()\n        csr_sample_offsets(M, N, self.indptr, self.indices, n_samples, i, j, offsets)\n    self.data[offsets[offsets > -1]] = 0",
    "docstring": "Sets value at each (i, j) to zero, preserving sparsity structure. Here (i,j) index major and minor respectively.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_compressed.py",
    "ast_data": "FunctionDef name:_zero_many arg:self arg:i arg:j arguments arg arg arg Assign Call Assign Call Assign Call Assign Call If Compare Call Call Assign Compare"
  },
  {
    "library": "tensorflow",
    "name": "_dimension",
    "source_code": "def _dimension(self, index: int) -> Optional[int]:\n    if index == 0:\n        if self._row_partitions:\n            return self._row_partitions[0].nrows\n        elif self.inner_rank is None:\n            return None\n        elif self.inner_rank == 0:\n            raise ValueError('Index out of range: 0.')\n        else:\n            return tensor_shape.dimension_value(self._static_inner_shape[0])\n    if index <= len(self._row_partitions):\n        return self._row_partitions[index - 1].uniform_row_length\n    relative_index = index - self.num_row_partitions\n    if self.inner_rank is None:\n        return None\n    elif self.inner_rank <= relative_index:\n        raise ValueError(f'Index out of range: {index}.')\n    else:\n        return tensor_shape.dimension_value(self._static_inner_shape[relative_index])",
    "docstring": "Get the size of dimension index, if known statically.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:_dimension arg:self arg:index arguments arg arg If Compare If Return return:yes If Compare Return return:no If Compare Raise Call Return return:yes Call If Compare Call Return return:yes Assign If Compare Return return:no If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "clear_control_inputs",
    "source_code": "@tf_contextlib.contextmanager\ndef clear_control_inputs():\n    control_flow_context = ops.get_default_graph()._get_control_flow_context()\n    with ops.control_dependencies(None):\n        ops.get_default_graph()._set_control_flow_context(control_flow_context)\n        yield",
    "docstring": "Clears the control inputs but preserves the ControlFlowContext. This is needed to preserve the XLAControlFlowControl when clearing control inputs for the gradient accumulators in while_v2. does not allow that. Yields: A context manager in which the ops created will not have any control inputs by default but the control flow context is the same.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_util_v2.py",
    "ast_data": "FunctionDef name:clear_control_inputs arguments Assign Call Call With Call Call Call"
  },
  {
    "library": "authlib",
    "name": "create_token_response",
    "source_code": "@hooked\ndef create_token_response(self):\n    token = self.generate_token(scope=self.request.payload.scope, include_refresh_token=False)\n    log.debug('Issue token %r to %r', token, self.client)\n    self.save_token(token)\n    return (200, token, self.TOKEN_RESPONSE_HEADER)",
    "docstring": "If the access token request is valid and authorized, the authorization server issues an access token as described in Section 5.1. A refresh token SHOULD NOT be included. If the request failed client authentication or is invalid, the authorization server returns an error response as described in Section 5.2. An example successful response: .. code-block:: http HTTP/1.1 200 OK Content-Type: application/json Cache-Control: no-store Pragma: no-cache { \"access_token\":\"2YotnFZFEjr1zCsicMWpAA\", \"token_type\":\"example\", \"expires_in\":3600, \"example_parameter\":\"example_value\" } :returns: (status_code, body, headers)",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\grants\\client_credentials.py",
    "ast_data": "FunctionDef name:create_token_response arg:self arguments arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "xml",
    "source_code": "@property\ndef xml(self, dialect=''):\n    return capi.to_xml(self.ptr, byref(c_char_p()), force_bytes(dialect))",
    "docstring": "Return the XML representation of this Spatial Reference.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\srs.py",
    "ast_data": "FunctionDef name:xml arg:self arg:dialect arguments arg arg Return return:yes Call Call Call Call"
  },
  {
    "library": "django",
    "name": "get_db_converters",
    "source_code": "def get_db_converters(self, expression):\n    return []",
    "docstring": "Return a list of functions needed to convert field data. Some field types on some backends do not provide data in the correct format, this is the hook for converter functions.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:get_db_converters arg:self arg:expression arguments arg arg Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "set_shard_dimension",
    "source_code": "def set_shard_dimension(self, shard_dimension):\n    if self._frozen:\n        if self._shard_dimension != shard_dimension:\n            raise ValueError(\"Can't set shard dimension to %d since it has been frozen to use %d.\" % (shard_dimension, self._shard_dimension))\n    else:\n        self._shard_dimension = tensor_shape.as_dimension(shard_dimension)",
    "docstring": "Sets the shard dimension for the current policy. If the policy has been frozen then shard_dimension must match the existing setting. Args: shard_dimension: The shard dimension to use in the policy. Raises: ValueError: If the policy has been frozen and shard_dimension differs from the frozen value, or shard_dimension can't be interpreted as a Dimension.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_sharding.py",
    "ast_data": "FunctionDef name:set_shard_dimension arg:self arg:shard_dimension arguments arg arg If If Compare Raise Call Assign Call"
  },
  {
    "library": "kornia",
    "name": "_get_weight_url",
    "source_code": "def _get_weight_url(variant: str) -> str:\n    KORNIA_CHECK(variant in _AVAILABLE_WEIGHTS, f'Variant {variant} does not have pre-trained checkpoint')\n    model_type, patch_size = variant.split('/')\n    return f'https://huggingface.co/kornia/{model_type}{patch_size}_augreg_i21k_r224/resolve/main/{model_type}-{patch_size}.pth'",
    "docstring": "Return the URL of the model weights.",
    "type": "function",
    "file_path": "kornia\\kornia\\contrib\\vit.py",
    "ast_data": "FunctionDef name:_get_weight_url arg:variant arguments arg Call Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "add_update",
    "source_code": "@doc_controls.do_not_doc_inheritable\ndef add_update(self, updates, inputs=None):\n    if inputs is not None:\n        tf_logging.warning('`add_update` `inputs` kwarg has been deprecated. You no longer need to pass a value to `inputs` as it is being automatically inferred.')\n    call_context = base_layer_utils.call_context()\n    if call_context.in_keras_graph:\n        return\n    if not call_context.frozen:\n        for update in nest.flatten(updates):\n            if callable(update):\n                update()",
    "docstring": "Add update op(s), potentially dependent on layer inputs. Weight updates (for instance, the updates of the moving mean and variance in a BatchNormalization layer) may be dependent on the inputs passed when calling a layer. Hence, when reusing the same layer on different inputs and , some entries in may be dependent on and some on . This method automatically keeps track of dependencies. This call is ignored when eager execution is enabled (in that case, variable updates are run on the fly and thus do not need to be tracked for later execution). Args: updates: Update op, or list/tuple of update ops, or zero-arg callable that returns an update op. A zero-arg callable should be passed in order to disable running the updates by setting on this Layer, when executing in Eager mode. inputs: Deprecated, will be automatically inferred.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:add_update arg:self arg:updates arg:inputs arguments arg arg arg If Compare Call Assign Call If Return return:no If For Call If Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_SetOutputMixin",
    "source_code": "class _SetOutputMixin:\n\n    def __init_subclass__(cls, auto_wrap_output_keys=('transform',), **kwargs):\n        super().__init_subclass__(**kwargs)\n        if not (isinstance(auto_wrap_output_keys, tuple) or auto_wrap_output_keys is None):\n            raise ValueError('auto_wrap_output_keys must be None or a tuple of keys.')\n        if auto_wrap_output_keys is None:\n            cls._sklearn_auto_wrap_output_keys = set()\n            return\n        method_to_key = {'transform': 'transform', 'fit_transform': 'transform'}\n        cls._sklearn_auto_wrap_output_keys = set()\n        for method, key in method_to_key.items():\n            if not hasattr(cls, method) or key not in auto_wrap_output_keys:\n                continue\n            cls._sklearn_auto_wrap_output_keys.add(key)\n            if method not in cls.__dict__:\n                continue\n            wrapped_method = _wrap_method_output(getattr(cls, method), key)\n            setattr(cls, method, wrapped_method)\n\n    @available_if(_auto_wrap_is_configured)\n    def set_output(self, *, transform=None):\n        if transform is None:\n            return self\n        if not hasattr(self, '_sklearn_output_config'):\n            self._sklearn_output_config = {}\n        self._sklearn_output_config['transform'] = transform\n        return self",
    "docstring": "Mixin that dynamically wraps methods to return container based on config. Currently wraps and and configures it based on of the global configuration. is only defined if is defined and is the default value.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\utils\\_set_output.py",
    "ast_data": "ClassDef name:_SetOutputMixin FunctionDef name:__init_subclass__ arg:cls arg:auto_wrap_output_keys arguments arg arg arg Call Call If BoolOp Call Compare Raise Call If Compare Assign Call Return return:no Assign Assign Call For Call If BoolOp Call Compare Call If Compare Assign Call Call Call FunctionDef name:set_output arg:self arguments arg arg If Compare Return return:yes If Call Assign Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "scope",
    "source_code": "def scope(self):\n    return super(OneDeviceStrategy, self).scope()",
    "docstring": "Returns a context manager selecting this Strategy as current. Inside a code block, this thread will use a variable creator set by , and will enter its \"cross-replica context\". In , all variables created inside will be on specified at strategy construction time. See example in the docs for this class. Returns: A context manager to use for creating variables with this strategy.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\one_device_strategy.py",
    "ast_data": "FunctionDef name:scope arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "estimators_samples_",
    "source_code": "@property\ndef estimators_samples_(self):\n    return [sample_indices for sample_indices in self._get_estimators_indices()]",
    "docstring": "The subset of drawn samples for each base estimator. Returns a dynamically generated list of indices identifying the samples used for fitting each member of the ensemble, i.e., the in-bag samples. Note: the list is re-created at each call to the property in order to reduce the object memory footprint by not storing the sampling data. Thus fetching the property may be slower than expected.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_forest.py",
    "ast_data": "FunctionDef name:estimators_samples_ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_create_variables_and_slots",
    "source_code": "def _create_variables_and_slots(self) -> Dict[str, Dict[str, tf_variables.Variable]]:\n    self._track_restore_info_for_cpu()\n    variables = {}\n    stacked_variables = self._create_variables_from_stacked_tables()\n    for table in self._table_config:\n        if table.name in stacked_variables:\n            variables[table.name] = {'parameters': stacked_variables[table.name]}\n        else:\n            variables[table.name] = self._create_variables(table, trainable=True)\n    return variables",
    "docstring": "Create variables for TPU embeddings. Returns: A dict of dicts. The outer dict is keyed by the table names and the inner dicts are keyed by 'parameters' and the slot variable names.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_for_serving.py",
    "ast_data": "FunctionDef name:_create_variables_and_slots arg:self arguments arg Call Assign Assign Call For If Compare Assign Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_updated_ctor_param",
    "source_code": "def _updated_ctor_param(self):\n    dct = self._ctor_param.copy()\n    dct['a'] = self.a\n    dct['b'] = self.b\n    dct['badvalue'] = self.badvalue\n    dct['moment_tol'] = self.moment_tol\n    dct['inc'] = self.inc\n    dct['name'] = self.name\n    dct['shapes'] = self.shapes\n    return dct",
    "docstring": "Return the current version of _ctor_param, possibly updated by user. Used by freezing. Keep this in sync with the signature of __init__.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:_updated_ctor_param arg:self arguments arg Assign Call Assign Assign Assign Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "from_rt",
    "source_code": "@classmethod\ndef from_rt(cls, rotation: So3 | So2 | Tensor | Quaternion, translation: Tensor, frame_src: str | None=None, frame_dst: str | None=None) -> NamedPose | None:\n    if isinstance(rotation, (So3, Quaternion)):\n        return cls(Se3(rotation, translation), frame_src, frame_dst)\n    elif isinstance(rotation, So2):\n        return cls(Se2(rotation, translation), frame_src, frame_dst)\n    elif isinstance(rotation, Tensor):\n        check_matrix_shape(rotation)\n        dim = rotation.shape[-1]\n        RT = eye(dim + 1, device=rotation.device, dtype=rotation.dtype)\n        RT[..., :dim, :dim] = rotation\n        RT[..., :dim, dim] = translation\n        if dim == 2:\n            return cls(Se2.from_matrix(RT), frame_src, frame_dst)\n        elif dim == 3:\n            return cls(Se3.from_matrix(RT), frame_src, frame_dst)\n    else:\n        raise ValueError(f'R must be either So2, So3, Quaternion, or Tensor, got {type(rotation)}')\n    return None",
    "docstring": "Construct NamedPose from rotation and translation. Args: rotation: Rotation part of the pose. translation: Translation part of the pose. frame_src: Name of the source frame. frame_dst: Name of the destination frame. Returns: NamedPose constructed from rotation and translation. Example: >>> b_from_a_rot = So3.identity() >>> b_from_a_trans = torch.tensor([1., 2., 3.]) >>> b_from_a = NamedPose.from_rt(b_from_a_rot, b_from_a_trans, frame_src=\"frame_a\", frame_dst=\"frame_b\") >>> b_from_a NamedPose(dst_from_src=rotation: Parameter containing: tensor([1., 0., 0., 0.], requires_grad=True) translation: Parameter containing: tensor([1., 2., 3.], requires_grad=True), frame_src: frame_a -> frame_dst: frame_b)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\pose.py",
    "ast_data": "FunctionDef name:from_rt arg:cls arg:rotation arg:translation arg:frame_src arg:frame_dst arguments arg arg arg arg arg If Call Return return:yes Call Call If Call Return return:yes Call Call If Call Call Assign Assign Call Assign Assign If Compare Return return:yes Call Call If Compare Return return:yes Call Call Raise Call Call Return return:no"
  },
  {
    "library": "matplotlib",
    "name": "end",
    "source_code": "def end(self, tag=None, indent=True):\n    if tag:\n        assert self.__tags, f'unbalanced end({tag})'\n        assert _escape_cdata(tag) == self.__tags[-1], f'expected end({self.__tags[-1]}), got {tag}'\n    else:\n        assert self.__tags, 'unbalanced end()'\n    tag = self.__tags.pop()\n    if self.__data:\n        self.__flush(indent)\n    elif self.__open:\n        self.__open = 0\n        self.__write('/>\\n')\n        return\n    if indent:\n        self.__write(self.__indentation[:len(self.__tags)])\n    self.__write(f'</{tag}>\\n')",
    "docstring": "Close the current element (opened by the most recent call to :meth:). Parameters ---------- tag Element tag. If given, the tag must match the start tag. If omitted, the current element is closed. indent : bool, default: True",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_svg.py",
    "ast_data": "FunctionDef name:end arg:self arg:tag arg:indent arguments arg arg arg If Compare Call Assign Call If Call If Assign Call Return return:no If Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_find",
    "source_code": "@_lru_cache\n@record_shapeenv_event()\ndef _find(self, a: sympy.Symbol) -> sympy.Expr:\n    if a not in self.replacements:\n        return a\n    res = self.replacements[a]\n    cur_replace = {s: self._find(s) for s in res.free_symbols}\n    replaced, changed = self.replacements[a]._xreplace(cur_replace)\n    if changed:\n        self._set_replacement(a, replaced, 'find')\n    return self.replacements[a]",
    "docstring": "Implements a DSU-like algorithm to find the variable that represents a Also handles transitive non-identity replacements. a: b + c c: d",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:_find arg:self arg:a arguments arg arg If Compare Return return:yes Assign Assign Call Assign Call If Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "from_pretrained",
    "source_code": "@classmethod\ndef from_pretrained(cls, embeddings, freeze=True, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False, sparse=False):\n    assert embeddings.dim() == 2, 'Embeddings parameter is expected to be 2-dimensional'\n    rows, cols = embeddings.shape\n    embedding = cls(num_embeddings=rows, embedding_dim=cols, _weight=embeddings, _freeze=freeze, padding_idx=padding_idx, max_norm=max_norm, norm_type=norm_type, scale_grad_by_freq=scale_grad_by_freq, sparse=sparse)\n    return embedding",
    "docstring": "Create Embedding instance from given 2-dimensional FloatTensor. Args: embeddings (Tensor): FloatTensor containing weights for the Embedding. First dimension is being passed to Embedding as `padding_idxpadding_idx`. sparse (bool, optional): See module initialization documentation. Examples:: >>> # FloatTensor containing pretrained weights >>> weight = torch.FloatTensor([[1, 2.3, 3], [4, 5.1, 6.3]]) >>> embedding = nn.Embedding.from_pretrained(weight) >>> # Get embeddings for index 1 >>> input = torch.LongTensor([1]) >>> # xdoctest: +IGNORE_WANT(\"non-deterministic\") >>> embedding(input) tensor([[ 4.0000, 5.1000, 6.3000]])",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\sparse.py",
    "ast_data": "FunctionDef name:from_pretrained arg:cls arg:embeddings arg:freeze arg:padding_idx arg:max_norm arg:norm_type arg:scale_grad_by_freq arg:sparse arguments arg arg arg arg arg arg arg arg Compare Call Assign Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "RandomGaussianBlurGenerator",
    "source_code": "class RandomGaussianBlurGenerator(RandomGeneratorBase):\n\n    def __init__(self, sigma: Union[Tuple[float, float], Tensor]=(0.1, 2.0)) -> None:\n        super().__init__()\n        if sigma[1] < sigma[0]:\n            raise TypeError(f'sigma_max should be higher than sigma_min: {sigma} passed.')\n        self.sigma = sigma\n        self.sigma_sampler: UniformDistribution\n\n    def __repr__(self) -> str:\n        repr_buf = f'sigma={self.sigma}'\n        return repr_buf\n\n    def make_samplers(self, device: torch.device, dtype: torch.dtype) -> None:\n        if not isinstance(self.sigma, torch.Tensor):\n            sigma = torch.tensor(self.sigma, device=device, dtype=dtype)\n        else:\n            sigma = self.sigma.to(device=device, dtype=dtype)\n        _joint_range_check(sigma, 'sigma', (0, float('inf')))\n        self.sigma_sampler = UniformDistribution(sigma[0], sigma[1], validate_args=False)\n\n    def forward(self, batch_shape: Tuple[int, ...], same_on_batch: bool=False) -> Dict[str, Tensor]:\n        batch_size = batch_shape[0]\n        _common_param_check(batch_size, same_on_batch)\n        sigma = _adapted_rsampling((batch_size,), self.sigma_sampler, same_on_batch)\n        return {'sigma': sigma}",
    "docstring": "Generate random gaussian blur parameters for a batch of images. Args: sigma: The range to uniformly sample the standard deviation for the Gaussian kernel. Returns: A dict of parameters to be passed for transformation. - sigma: element-wise standard deviation with a shape of (B,). Note: The generated random numbers are not reproducible across different devices and dtypes. By default, the parameters will be generated on CPU in float32. This can be changed by calling ``.",
    "type": "class",
    "file_path": "kornia\\kornia\\augmentation\\random_generator\\_2d\\gaussian_blur.py",
    "ast_data": "ClassDef name:RandomGaussianBlurGenerator FunctionDef name:__init__ arg:self arg:sigma arguments arg arg Call Call If Compare Raise Call Assign FunctionDef name:__repr__ arg:self arguments arg Assign Return return:yes FunctionDef name:make_samplers arg:self arg:device arg:dtype arguments arg arg arg If Call Assign Call Assign Call Call Call Assign Call FunctionDef name:forward arg:self arg:batch_shape arg:same_on_batch arguments arg arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_preferred_type",
    "source_code": "def get_preferred_type(self, media_types):\n    if not media_types or not self.accepted_types:\n        return None\n    desired_types = [(accepted_type, media_type) for media_type in media_types if (accepted_type := self.accepted_type(media_type)) is not None]\n    if not desired_types:\n        return None\n    return min(desired_types, key=lambda t: self.accepted_types.index(t[0]))[1]",
    "docstring": "Select the preferred media type from the provided options.",
    "type": "method",
    "file_path": "django\\django\\http\\request.py",
    "ast_data": "FunctionDef name:get_preferred_type arg:self arg:media_types arguments arg arg If BoolOp Return return:no Assign Compare Call If Return return:no Return return:yes Call arguments arg Call"
  },
  {
    "library": "seaborn",
    "name": "_dodge",
    "source_code": "def _dodge(self, keys, data):\n    if 'hue' not in self.variables:\n        return\n    hue_idx = self._hue_map.levels.index(keys['hue'])\n    n = len(self._hue_map.levels)\n    data['width'] /= n\n    full_width = data['width'] * n\n    offset = data['width'] * hue_idx + data['width'] / 2 - full_width / 2\n    data[self.orient] += offset",
    "docstring": "Apply a dodge transform to coordinates in place.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\categorical.py",
    "ast_data": "FunctionDef name:_dodge arg:self arg:keys arg:data arguments arg arg arg If Compare Return return:no Assign Call Assign Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_gather_names",
    "source_code": "def _gather_names(tensor_info):\n    return {tensor_info[key].name.split(':')[0] for key in tensor_info}",
    "docstring": "Get the node names from a TensorInfo.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\tensorrt\\trt_convert.py",
    "ast_data": "FunctionDef name:_gather_names arg:tensor_info arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "DayLocator",
    "source_code": "class DayLocator(RRuleLocator):\n\n    def __init__(self, bymonthday=None, interval=1, tz=None):\n        if interval != int(interval) or interval < 1:\n            raise ValueError('interval must be an integer greater than 0')\n        if bymonthday is None:\n            bymonthday = range(1, 32)\n        rule = rrulewrapper(DAILY, bymonthday=bymonthday, interval=interval, **self.hms0d)\n        super().__init__(rule, tz=tz)",
    "docstring": "Make ticks on occurrences of each day of the month. For example, 1, 15, 30.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\dates.py",
    "ast_data": "ClassDef name:DayLocator FunctionDef name:__init__ arg:self arg:bymonthday arg:interval arg:tz arguments arg arg arg arg If BoolOp Compare Call Compare Raise Call If Compare Assign Call Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_param_buffer_mapping",
    "source_code": "def _get_param_buffer_mapping(original_module: torch.nn.Module, traced_module: torch.nn.Module) -> dict[str, str]:\n    param_lookup: dict[int, str] = {}\n    buffer_lookup: dict[int, str] = {}\n    for name, param in original_module.named_parameters(remove_duplicate=False):\n        param_lookup[id(param)] = name\n    for name, buffer in original_module.named_buffers(remove_duplicate=False):\n        buffer_lookup[id(buffer)] = name\n    param_buffer_table: dict[str, str] = {}\n    for dynamo_name, dynamo_param in traced_module.named_parameters(remove_duplicate=False):\n        assert dynamo_name not in param_buffer_table\n        if id(dynamo_param) in param_lookup:\n            param_buffer_table[dynamo_name] = param_lookup[id(dynamo_param)]\n    for dynamo_name, dynamo_buffer in traced_module.named_buffers(remove_duplicate=False):\n        assert dynamo_name not in param_buffer_table\n        if id(dynamo_buffer) in buffer_lookup:\n            param_buffer_table[dynamo_name] = buffer_lookup[id(dynamo_buffer)]\n    return param_buffer_table",
    "docstring": "Returns a mapping of parameter/buffer names from the new module to the original model. This is to help with restoring the FQN for parameter/buffers of a traced module to what the original module contains.",
    "type": "function",
    "file_path": "pytorch\\torch\\export\\_trace.py",
    "ast_data": "FunctionDef name:_get_param_buffer_mapping arg:original_module arg:traced_module arguments arg arg For Call Assign Call For Call Assign Call For Call Compare If Compare Call Assign Call For Call Compare If Compare Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_initialize_variables",
    "source_code": "def _initialize_variables(session):\n    variables = _get_variables(get_graph())\n    candidate_vars = []\n    for v in variables:\n        if not getattr(v, '_keras_initialized', False):\n            candidate_vars.append(v)\n    if candidate_vars:\n        is_initialized = session.run([variable_v1.is_variable_initialized(v) for v in candidate_vars])\n        should_be_initialized = [not is_initialized[n] and v.initializer is not None for n, v in enumerate(candidate_vars)]\n        uninitialized_vars = []\n        for flag, v in zip(should_be_initialized, candidate_vars):\n            if flag:\n                uninitialized_vars.append(v)\n            v._keras_initialized = True\n        if uninitialized_vars:\n            session.run(variables_module.variables_initializer(uninitialized_vars))",
    "docstring": "Utility to initialize uninitialized variables on the fly.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:_initialize_variables arg:session arguments arg Assign Call Call Assign For If Call Call If Assign Call Call Assign BoolOp Compare Call Assign For Call If Call Assign If Call Call"
  },
  {
    "library": "pytorch",
    "name": "_private_register_pytree_node",
    "source_code": "def _private_register_pytree_node(cls: type[Any], flatten_fn: FlattenFunc, unflatten_fn: UnflattenFunc, *, serialized_type_name: Optional[str]=None, to_dumpable_context: Optional[ToDumpableContextFn]=None, from_dumpable_context: Optional[FromDumpableContextFn]=None) -> None:\n    if not optree.is_structseq_class(cls):\n        optree.register_pytree_node(cls, flatten_fn, _reverse_args(unflatten_fn), namespace='torch')",
    "docstring": "This is an internal function that is used to register a pytree node type for the C++ pytree only. End-users should use :func: instead.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_cxx_pytree.py",
    "ast_data": "FunctionDef name:_private_register_pytree_node arg:cls arg:flatten_fn arg:unflatten_fn arguments arg arg arg arg arg arg If Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_executor_init",
    "source_code": "def _get_executor_init(self, workers):\n\n    def pool_fn(seqs):\n        pool = get_pool_class(True)(workers, initializer=init_pool_generator, initargs=(seqs, None, get_worker_id_queue()))\n        _DATA_POOLS.add(pool)\n        return pool\n    return pool_fn",
    "docstring": "Gets the Pool initializer for multiprocessing. Args: workers: Number of workers. Returns: Function, a Function to initialize the pool",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\data_utils.py",
    "ast_data": "FunctionDef name:_get_executor_init arg:self arg:workers arguments arg arg FunctionDef name:pool_fn arg:seqs arguments arg Assign Call Call Call Call Return return:yes Return return:yes"
  },
  {
    "library": "scipy",
    "name": "kei_zeros",
    "source_code": "def kei_zeros(nt):\n    if not isscalar(nt) or floor(nt) != nt or nt <= 0:\n        raise ValueError('nt must be positive integer scalar.')\n    return _specfun.klvnzo(nt, 4)",
    "docstring": "Compute nt zeros of the Kelvin function kei. Parameters ---------- nt : int Number of zeros to compute. Must be positive. Returns ------- ndarray First zeros of the Kelvin function. See Also -------- kei References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special Functions\", John Wiley and Sons, 1996.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_basic.py",
    "ast_data": "FunctionDef name:kei_zeros arg:nt arguments arg If BoolOp Call Compare Call Compare Raise Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "UUIDTextMixin",
    "source_code": "class UUIDTextMixin:\n\n    def process_rhs(self, qn, connection):\n        if not connection.features.has_native_uuid_field:\n            from django.db.models.functions import Replace\n            if self.rhs_is_direct_value():\n                self.rhs = Value(self.rhs)\n            self.rhs = Replace(self.rhs, Value('-'), Value(''), output_field=CharField())\n        rhs, params = super().process_rhs(qn, connection)\n        return (rhs, params)",
    "docstring": "Strip hyphens from a value when filtering a UUIDField on backends without a native datatype for UUID.",
    "type": "class",
    "file_path": "django\\django\\db\\models\\lookups.py",
    "ast_data": "ClassDef name:UUIDTextMixin FunctionDef name:process_rhs arg:self arg:qn arg:connection arguments arg arg arg If If Call Assign Call Assign Call Call Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "sparse_reorder",
    "source_code": "@tf_export('sparse.reorder', v1=['sparse.reorder', 'sparse_reorder'])\n@deprecation.deprecated_endpoints('sparse_reorder')\ndef sparse_reorder(sp_input, name=None):\n    sp_input = _convert_to_sparse_tensor(sp_input)\n    reordered_ind, reordered_val = gen_sparse_ops.sparse_reorder(sp_input.indices, sp_input.values, sp_input.dense_shape, name=name)\n    if sp_input.get_shape().is_fully_defined():\n        dense_shape = sp_input.get_shape().as_list()\n        return sparse_tensor.SparseTensor(reordered_ind, reordered_val, dense_shape)\n    else:\n        dense_shape = array_ops.identity(sp_input.dense_shape)\n        sp_output = sparse_tensor.SparseTensor(reordered_ind, reordered_val, dense_shape)\n        sp_output.set_shape(sp_input.shape)\n        return sp_output",
    "docstring": "Reorders a into the canonical, row-major ordering. Note that by convention, all sparse ops preserve the canonical ordering along increasing dimension number. The only time ordering can be violated is during manual manipulation of the indices and values to add entries. Reordering does not affect the shape of the . For example, if has shape and / : [0, 3]: b [0, 1]: a [3, 1]: d [2, 0]: c then the output will be a of shape and / : [0, 1]: a [0, 3]: b [2, 0]: c [3, 1]: d Args: sp_input: The input . name: A name prefix for the returned tensors (optional) Returns: A with the same shape and non-empty values, but in canonical ordering. Raises: TypeError: If is not a .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_ops.py",
    "ast_data": "FunctionDef name:sparse_reorder arg:sp_input arg:name arguments arg arg Assign Call Assign Call If Call Call Assign Call Call Return return:yes Call Assign Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "make_samplers",
    "source_code": "def make_samplers(self, device: torch.device, dtype: torch.dtype) -> None:\n    gain = _range_bound(self.gain, 'gain').to(device, dtype)\n    self.gain_sampler = UniformDistribution(gain[0], gain[1], validate_args=False)\n    sign = _range_bound(self.sign, 'sign', bounds=(-1.0, 1.0), center=0.0).to(device, dtype)\n    self.sign_sampler = UniformDistribution(sign[0], sign[1], validate_args=False)\n    self.directions_sampler = UniformDistribution(0, 4, validate_args=False)",
    "docstring": "Create samplers for generating random gaussian illumination parameters.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\random_generator\\_2d\\linear_illumination.py",
    "ast_data": "FunctionDef name:make_samplers arg:self arg:device arg:dtype arguments arg arg arg Assign Call Call Assign Call Assign Call Call Assign Call Assign Call"
  },
  {
    "library": "numpy",
    "name": "_parse_policy_not_keepsort",
    "source_code": "def _parse_policy_not_keepsort(self, has_baseline, final_targets, extra_flags):\n    final_targets = self.feature_sorted(final_targets, reverse=True)\n    return (has_baseline, final_targets, extra_flags)",
    "docstring": "sorted depend on the highest interest",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py",
    "ast_data": "FunctionDef name:_parse_policy_not_keepsort arg:self arg:has_baseline arg:final_targets arg:extra_flags arguments arg arg arg arg Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "ops_used_by_graph_def",
    "source_code": "def ops_used_by_graph_def(graph_def):\n    name_to_function = {}\n    for fun in graph_def.library.function:\n        name_to_function[fun.signature.name] = fun\n    used_ops = set()\n    functions_to_process = []\n\n    def mark_op_as_used(op):\n        if op not in used_ops and op in name_to_function:\n            functions_to_process.append(name_to_function[op])\n        used_ops.add(op)\n\n    def process_node(node):\n        mark_op_as_used(node.op)\n        if node.op in ['PartitionedCall', 'StatefulPartitionedCall']:\n            mark_op_as_used(node.attr['f'].func.name)\n    for node in graph_def.node:\n        process_node(node)\n    while functions_to_process:\n        fun = functions_to_process.pop()\n        for node in fun.node_def:\n            process_node(node)\n    return [op for op in used_ops if op not in name_to_function]",
    "docstring": "Collect the list of ops used by a graph. Does not validate that the ops are all registered. Args: graph_def: A proto, as from . Returns: A list of strings, each naming an op used by the graph.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\meta_graph.py",
    "ast_data": "FunctionDef name:ops_used_by_graph_def arg:graph_def arguments arg Assign For Assign Assign Call Assign FunctionDef name:mark_op_as_used arg:op arguments arg If BoolOp Compare Compare Call Call FunctionDef name:process_node arg:node arguments arg Call If Compare Call For Call While Assign Call For Call Return return:yes Compare"
  },
  {
    "library": "seaborn",
    "name": "_plot",
    "source_code": "def _plot(self, split_generator: Callable[[], Generator], scales: dict[str, Scale], orient: str) -> None:\n    raise NotImplementedError()",
    "docstring": "Main interface for creating a plot.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_marks\\base.py",
    "ast_data": "FunctionDef name:_plot arg:self arg:split_generator arg:scales arg:orient arguments arg arg arg arg Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "compute_geom_weights",
    "source_code": "def compute_geom_weights(self):\n    weights = np.zeros([np.size(self._triangles, 0), 3])\n    tris_pts = self._tris_pts\n    for ipt in range(3):\n        p0 = tris_pts[:, ipt % 3, :]\n        p1 = tris_pts[:, (ipt + 1) % 3, :]\n        p2 = tris_pts[:, (ipt - 1) % 3, :]\n        alpha1 = np.arctan2(p1[:, 1] - p0[:, 1], p1[:, 0] - p0[:, 0])\n        alpha2 = np.arctan2(p2[:, 1] - p0[:, 1], p2[:, 0] - p0[:, 0])\n        angle = np.abs((alpha2 - alpha1) / np.pi % 1)\n        weights[:, ipt] = 0.5 - np.abs(angle - 0.5)\n    return weights",
    "docstring": "Build the (nelems, 3) weights coeffs of _triangles angles, renormalized so that np.sum(weights, axis=1) == np.ones(nelems)",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triinterpolate.py",
    "ast_data": "FunctionDef name:compute_geom_weights arg:self arguments arg Assign Call Call Assign For Call Assign Assign Assign Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, role: str, rank: int, local_world_size: int):\n    self.role = role\n    self.rank = rank\n    self.local_world_size = local_world_size",
    "docstring": "Initialize the agent class instance. Args: role (str): user-defined role for the workers with this spec rank (int): the rank of the agent local_world_size (int): number of local workers to run",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\agent\\server\\api.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:role arg:rank arg:local_world_size arguments arg arg arg arg Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "tree_unflatten",
    "source_code": "def tree_unflatten(leaves: Iterable[Any], treespec: TreeSpec) -> PyTree:\n    if not _is_pytreespec_instance(treespec):\n        raise TypeError(f'tree_unflatten(leaves, treespec): Expected `treespec` to be instance of PyTreeSpec but got item of type {type(treespec)}.')\n    return optree.tree_unflatten(treespec, leaves)",
    "docstring": "Reconstruct a pytree from the treespec and the leaves. The inverse of :func:. >>> tree = {\"b\": (2, [3, 4]), \"a\": 1, \"c\": None, \"d\": 5} >>> leaves, treespec = tree_flatten(tree) >>> tree == tree_unflatten(leaves, treespec) True Args: leaves (iterable): The list of leaves to use for reconstruction. The list must match the number of leaves of the treespec. treespec (TreeSpec): The treespec to reconstruct. Returns: The reconstructed pytree, containing the ``.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_cxx_pytree.py",
    "ast_data": "FunctionDef name:tree_unflatten arg:leaves arg:treespec arguments arg arg If Call Raise Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "StagingError",
    "source_code": "class StagingError(AutoGraphError):\n    pass",
    "docstring": "Raised during the staging (i.e. Python execution) of converted code.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\impl\\api.py",
    "ast_data": "ClassDef name:StagingError"
  },
  {
    "library": "tensorflow",
    "name": "_export_model_variables",
    "source_code": "def _export_model_variables(model, saved_model_path):\n    _get_or_create_variables_dir(saved_model_path)\n    checkpoint_prefix = _get_variables_path(saved_model_path)\n    model.save_weights(checkpoint_prefix, save_format='tf', overwrite=True)\n    return checkpoint_prefix",
    "docstring": "Saves model weights in checkpoint format under variables folder.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model_experimental.py",
    "ast_data": "FunctionDef name:_export_model_variables arg:model arg:saved_model_path arguments arg arg Call Assign Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_chisquare",
    "source_code": "def _chisquare(f_obs, f_exp):\n    f_obs = np.asarray(f_obs, dtype=np.float64)\n    k = len(f_obs)\n    chisq = f_obs\n    chisq -= f_exp\n    chisq **= 2\n    with np.errstate(invalid='ignore'):\n        chisq /= f_exp\n    chisq = chisq.sum(axis=0)\n    return (chisq, special.chdtrc(k - 1, chisq))",
    "docstring": "Fast replacement for scipy.stats.chisquare. Version from with additional optimizations.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\feature_selection\\_univariate_selection.py",
    "ast_data": "FunctionDef name:_chisquare arg:f_obs arg:f_exp arguments arg arg Assign Call Assign Call Assign With Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_values",
    "source_code": "@property\ndef _values(self):\n    return self",
    "docstring": "Collect values for TrackableDataStructure.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\data_structures.py",
    "ast_data": "FunctionDef name:_values arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_is_valid_grouped_gemm_fusion",
    "source_code": "def _is_valid_grouped_gemm_fusion(computation_nodes):\n    computation_op = mkldnn._linear_pointwise.default\n    act = computation_nodes[0].args[0]\n    wgt = computation_nodes[0].args[1]\n    wgt_size = wgt.meta.get('val').size()\n    return len(computation_nodes) >= 2 and all((node.target == computation_op and node.args[0] == act and (node.args[1].meta.get('val').size() == wgt_size) and (node.args[1] != wgt or gemm_idx == 0) for gemm_idx, node in enumerate(computation_nodes)))",
    "docstring": "Here we check: 1. More than 1 GEMM nodes has been found. 2. All the GEMM nodes share the same activation. 3. All the GEMM nodes have same weight size but different wgt node.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\mkldnn_fusion.py",
    "ast_data": "FunctionDef name:_is_valid_grouped_gemm_fusion arg:computation_nodes arguments arg Assign Assign Assign Assign Call Call Return return:yes BoolOp Compare Call Call BoolOp Compare Compare Compare Call Call BoolOp Compare Compare Call"
  },
  {
    "library": "pytorch",
    "name": "gen_dvar",
    "source_code": "def gen_dvar(curr):\n    curr += 1\n    return (DVar(curr), curr)",
    "docstring": "Generate a dimension variable :param curr: the current counter :return: a dimension variable and an updated counter",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\util.py",
    "ast_data": "FunctionDef name:gen_dvar arg:curr arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_ica_def",
    "source_code": "def _ica_def(X, tol, g, fun_args, max_iter, w_init):\n    n_components = w_init.shape[0]\n    W = np.zeros((n_components, n_components), dtype=X.dtype)\n    n_iter = []\n    for j in range(n_components):\n        w = w_init[j, :].copy()\n        w /= np.sqrt((w ** 2).sum())\n        for i in range(max_iter):\n            gwtx, g_wtx = g(np.dot(w.T, X), fun_args)\n            w1 = (X * gwtx).mean(axis=1) - g_wtx.mean() * w\n            _gs_decorrelation(w1, W, j)\n            w1 /= np.sqrt((w1 ** 2).sum())\n            lim = np.abs(np.abs((w1 * w).sum()) - 1)\n            w = w1\n            if lim < tol:\n                break\n        n_iter.append(i + 1)\n        W[j, :] = w\n    return (W, max(n_iter))",
    "docstring": "Deflationary FastICA using fun approx to neg-entropy function Used internally by FastICA.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_fastica.py",
    "ast_data": "FunctionDef name:_ica_def arg:X arg:tol arg:g arg:fun_args arg:max_iter arg:w_init arguments arg arg arg arg arg arg Assign Assign Call Assign For Call Assign Call Call Call For Call Assign Call Call Assign Call Call Call Call Call Assign Call Call Call Assign If Compare Call Assign Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "__set__",
    "source_code": "def __set__(self, obj, value):\n    if isinstance(value, str):\n        raise ValueError(self.unicode_err)\n    elif isinstance(value, list):\n        if any((isinstance(item, str) for item in value)):\n            raise ValueError(self.unicode_err)\n    obj._body = encoding.prepare_iter(value)",
    "docstring": "Set a response body through the descriptor protocol. Convert the given value to an iterable object.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cprequest.py",
    "ast_data": "FunctionDef name:__set__ arg:self arg:obj arg:value arguments arg arg arg If Call Raise Call If Call If Call Call Raise Call Assign Call"
  },
  {
    "library": "scipy",
    "name": "compute_banded_symmetric_XT_W_Y",
    "source_code": "def compute_banded_symmetric_XT_W_Y(X, w, Y):\n    W_Y = np.copy(Y)\n    W_Y[2] *= w\n    for i in range(2):\n        W_Y[i, 2 - i:] *= w[:-2 + i]\n        W_Y[3 + i, :-1 - i] *= w[1 + i:]\n    n = X.shape[1]\n    res = np.zeros((4, n))\n    for i in range(n):\n        for j in range(min(n - i, 4)):\n            res[-j - 1, i + j] = sum(X[j:, i] * W_Y[:5 - j, i + j])\n    return res",
    "docstring": "Assuming that the product :math: is symmetric and both `X^T YX^T W Y` is 7-banded. It is also symmetric, so we can store only unique diagonals.",
    "type": "function",
    "file_path": "scipy\\scipy\\interpolate\\_bsplines.py",
    "ast_data": "FunctionDef name:compute_banded_symmetric_XT_W_Y arg:X arg:w arg:Y arguments arg arg arg Assign Call For Call Assign Assign Call For Call For Call Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "inverse_boxes",
    "source_code": "def inverse_boxes(self, input: Union[Tensor, Boxes], params: List[ParamItem], extra_args: Optional[Dict[str, Any]]=None) -> Union[Tensor, Boxes]:\n    if isinstance(input, Tensor):\n        batchsize, frame_num = (input.size(0), input.size(1))\n        input = Boxes.from_tensor(input.view(-1, input.size(2), input.size(3), input.size(4)), mode='vertices_plus')\n        input = super().inverse_boxes(input, params, extra_args=extra_args)\n        input = input.data.view(batchsize, frame_num, -1, 4, 2)\n    else:\n        input = super().inverse_boxes(input, params, extra_args=extra_args)\n    return input",
    "docstring": "Transform bounding boxes. Args: input: tensor with shape :math:. If input is a type, the internal shape is :math:. params: params for the sequence. extra_args: Optional dictionary of extra arguments with specific options for different input types.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\container\\video.py",
    "ast_data": "FunctionDef name:inverse_boxes arg:self arg:input arg:params arg:extra_args arguments arg arg arg arg If Call Assign Call Call Assign Call Call Call Call Call Assign Call Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "draw_bbox",
    "source_code": "def draw_bbox(bbox, renderer, color='k', trans=None):\n    r = Rectangle(xy=bbox.p0, width=bbox.width, height=bbox.height, edgecolor=color, fill=False, clip_on=False)\n    if trans is not None:\n        r.set_transform(trans)\n    r.draw(renderer)",
    "docstring": "A debug function to draw a rectangle around the bounding box returned by an artist's to test whether the artist is returning the correct bbox.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:draw_bbox arg:bbox arg:renderer arg:color arg:trans arguments arg arg arg arg Assign Call If Compare Call Call"
  },
  {
    "library": "scrapy",
    "name": "parse_row",
    "source_code": "def parse_row(self, response: Response, row: dict[str, str]) -> Any:\n    raise NotImplementedError",
    "docstring": "This method must be overridden with your custom spider functionality",
    "type": "method",
    "file_path": "scrapy\\scrapy\\spiders\\feed.py",
    "ast_data": "FunctionDef name:parse_row arg:self arg:response arg:row arguments arg arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "_remove_ancillary_layers",
    "source_code": "def _remove_ancillary_layers(model, layer_map, layers):\n    ancillary_layers = []\n    if not model._is_graph_network:\n        return (layers, ancillary_layers)\n    depths = [depth for depth in model._nodes_by_depth.keys() if depth < 0]\n    depths.sort(reverse=True)\n    for depth in depths:\n        for node in model._nodes_by_depth[depth]:\n            ancillary_layers.append(layer_map[node.outbound_layer])\n    return ([l for l in layers if l not in ancillary_layers], ancillary_layers)",
    "docstring": "Removes and returns any ancillary layers from based on . Ancillary layers are part of the model topology but not used to compute the model outputs, e.g., layers from and . Args: model: A Keras Model. layer_map: A map to from layers in the to those in . layers: A list of all layers. Returns: Two lists of layers: (1) with the ancillary layers removed, and (2) the ancillary layers.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\models.py",
    "ast_data": "FunctionDef name:_remove_ancillary_layers arg:model arg:layer_map arg:layers arguments arg arg arg Assign If Return return:yes Assign Call Compare Call For For Call Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "high",
    "source_code": "@property\ndef high(self):\n    return self._high",
    "docstring": "Upper boundary of the output interval.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\uniform.py",
    "ast_data": "FunctionDef name:high arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "SACTradeOffStats",
    "source_code": "@dataclass\nclass SACTradeOffStats:\n    n_segments: int\n    slopes: list[float]\n    intercepts: list[float]\n    fit_breaks: list[float]\n    tradeoff_curve: OrderedDict[float, float]\n    sac_memory: int\n    sac_runtime: float",
    "docstring": "Stores statistics for activation-checkpointing trade-off. Attributes: n_segments (int): Number of piecewise linear segments fitted to the trade-off curve. slopes (List[float]): Slopes of the pieces of linear segments fitted to the trade-off curve. intercepts (List[float]): Intercepts of the of the pieces of linear segments fitted to the trade-off curve. fit_breaks (List[float]): Breakpoints of the of the pieces of linear segments fitted to the trade-off curve. tradeoff_curve (OrderedDict[float, float]): Trade-off curve data of memory discarded vs recomputation time. sac_memory (int): Total memory of operations available for activation checkpointing in bytes. sac_runtime (float): Total runtime of operations available for activation checkpointing in milliseconds.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\_tools\\sac_estimator.py",
    "ast_data": "ClassDef name:SACTradeOffStats"
  },
  {
    "library": "sphinx",
    "name": "request",
    "source_code": "def request(self, method: str, url: str, _user_agent: str='', _tls_info: tuple[bool, str | dict[str, str] | None]=(), **kwargs: Any) -> requests.Response:\n    headers = kwargs.setdefault('headers', {})\n    headers.setdefault('User-Agent', _user_agent or _USER_AGENT)\n    if _tls_info:\n        tls_verify, tls_cacerts = _tls_info\n        verify = bool(kwargs.get('verify', tls_verify))\n        kwargs.setdefault('verify', verify and _get_tls_cacert(url, tls_cacerts))\n    else:\n        verify = kwargs.get('verify', True)\n    if verify:\n        return super().request(method, url, **kwargs)\n    with warnings.catch_warnings():\n        warnings.filterwarnings('ignore', category=InsecureRequestWarning)\n        return super().request(method, url, **kwargs)",
    "docstring": "Sends a request with an HTTP verb and url. This sets up User-Agent header and TLS verification automatically.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\util\\requests.py",
    "ast_data": "FunctionDef name:request arg:self arg:method arg:url arg:_user_agent arg:_tls_info arguments arg arg arg arg arg arg Assign Call Call BoolOp If Assign Assign Call Call Call BoolOp Call Assign Call If Return return:yes Call Call With Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_concat_vectors",
    "source_code": "def _concat_vectors(*args):\n    args_ = [_static_value(x) for x in args]\n    if any((x_ is None for x_ in args_)):\n        return array_ops.concat(args, 0)\n    return constant_op.constant([x_ for vec_ in args_ for x_ in vec_])",
    "docstring": "Convenience function which concatenates input vectors.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\transformed_distribution.py",
    "ast_data": "FunctionDef name:_concat_vectors arguments arg Assign Call If Call Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_partition_value",
    "source_code": "def _partition_value(self, tensor: torch.Tensor, mesh: DeviceMesh, mesh_dim: int) -> torch.Tensor:\n    if self.reduce_op in ('max', 'min'):\n        return tensor\n    elif self.reduce_op == 'sum':\n        if self.norm_type == 0:\n            raise NotImplementedError(f'Unsupported norm type:: {self.norm_type}')\n        elif self.norm_type == 1:\n            return tensor / mesh.size(mesh_dim)\n        assert isinstance(self.norm_type, (int, float))\n        return tensor / math.pow(mesh.size(mesh_dim), 1 / self.norm_type)\n    raise NotImplementedError(self.reduce_op)",
    "docstring": "For example, consider 4 ranks, a (3,) replicated tensor, and 2-norm: Ranks 0 and 1: sqrt(t1^2 + t2^2 + t3^3) To convert from replicated to partial, we want f(x) such that sqrt(t1^2 + t2^2 + t3^3) = sqrt(4f(t1)^2 + 4f(t2)^2 + 4f(t3)^2) = sqrt(4) sqrt(f(t1)^2 + f(t2)^2 + f(t3)^2). One such f(x) is f(x) = x / sqrt(4). This generalizes to d ranks and p-norm as f(x) = x / d^(1/p).",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_ops\\_math_ops.py",
    "ast_data": "FunctionDef name:_partition_value arg:self arg:tensor arg:mesh arg:mesh_dim arguments arg arg arg arg If Compare Return return:yes If Compare If Compare Raise Call If Compare Return return:yes Call Call Return return:yes Call Call Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit_predict",
    "source_code": "def fit_predict(self, X, y=None, **kwargs):\n    self.fit(X, **kwargs)\n    return self.labels_",
    "docstring": "Perform clustering on and returns cluster labels. Parameters ---------- X : array-like of shape (n_samples, n_features) Input data. y : Ignored Not used, present for API consistency by convention. **kwargs : dict Arguments to be passed to ``. .. versionadded:: 1.4 Returns ------- labels : ndarray of shape (n_samples,), dtype=np.int64 Cluster labels.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\base.py",
    "ast_data": "FunctionDef name:fit_predict arg:self arg:X arg:y arguments arg arg arg arg Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_capstyle",
    "source_code": "@_docstring.interpd\ndef set_capstyle(self, s):\n    cs = CapStyle(s)\n    self._capstyle = cs\n    self.stale = True",
    "docstring": "Set the . The default capstyle is 'round' for and 'butt' for all other patches. Parameters ---------- s : or %(CapStyle)s",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:set_capstyle arg:self arg:s arguments arg arg Assign Call Assign Assign"
  },
  {
    "library": "sphinx",
    "name": "findall",
    "source_code": "def findall(self, node: Node) -> Iterator[N]:\n    for found in node.findall(self):\n        yield cast('N', found)",
    "docstring": "An alternative to with improved type safety. While the object can be used as an argument to , doing so confounds type checkers' ability to determine the return type of the iterator.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\util\\nodes.py",
    "ast_data": "FunctionDef name:findall arg:self arg:node arguments arg arg For Call Call"
  },
  {
    "library": "tensorflow",
    "name": "match_next_flag",
    "source_code": "@staticmethod\ndef match_next_flag(tt_flags, pos):\n    match = _FLAG_DOUBLE_QUOTE_PAT.match(tt_flags, pos)\n    if match:\n        return (match, True)\n    match = _FLAG_SINGLE_QUOTE_PAT.match(tt_flags, pos)\n    if match:\n        return (match, True)\n    match = _FLAG_NO_QUOTE_PAT.match(tt_flags, pos)\n    if match:\n        return (match, True)\n    match = _FLAG_NO_EQUAL_PAT.match(tt_flags, pos)\n    if match:\n        return (match, False)\n    return (None, False)",
    "docstring": "Returns the match for the next TensorTracer flag. Args: tt_flags: a string that contains the flags. pos: where in flags to start the search. Returns: A pair where the first element is the regular-expression match found and the second element indicates if the match has a value.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer_flags.py",
    "ast_data": "FunctionDef name:match_next_flag arg:tt_flags arg:pos arguments arg arg Assign Call If Return return:yes Assign Call If Return return:yes Assign Call If Return return:yes Assign Call If Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_position_3d",
    "source_code": "def set_position_3d(self, xyz, zdir=None):\n    super().set_position(xyz[:2])\n    self.set_z(xyz[2])\n    if zdir is not None:\n        self._dir_vec = get_dir_vector(zdir)",
    "docstring": "Set the (*x*, *y*, *z*) position of the text. Parameters ---------- xyz : (float, float, float) The position in 3D space. zdir : {'x', 'y', 'z', None, 3-tuple} The direction of the text. If unspecified, the *zdir* will not be changed. See for a description of the values.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py",
    "ast_data": "FunctionDef name:set_position_3d arg:self arg:xyz arg:zdir arguments arg arg arg Call Call Call If Compare Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "to_proto",
    "source_code": "def to_proto(self, export_scope=None):\n    if context.executing_eagerly():\n        raise RuntimeError('This operation is not supported when eager execution is enabled.')\n    if export_scope is None or self.handle.name.startswith(export_scope):\n        var_def = variable_pb2.VariableDef()\n        var_def.variable_name = ops.strip_name_scope(self.handle.name, export_scope)\n        if self._initial_value is not None:\n            var_def.initial_value_name = ops.strip_name_scope(self._initial_value.name, export_scope)\n        var_def.initializer_name = ops.strip_name_scope(self.initializer.name, export_scope)\n        if self._cached_value is not None:\n            var_def.snapshot_name = ops.strip_name_scope(self._cached_value.name, export_scope)\n        else:\n            var_def.snapshot_name = ops.strip_name_scope(self._graph_element.name, export_scope)\n        var_def.is_resource = True\n        var_def.trainable = self.trainable\n        var_def.synchronization = self.synchronization.value\n        var_def.aggregation = self.aggregation.value\n        if self._save_slice_info:\n            var_def.save_slice_info_def.MergeFrom(self._save_slice_info.to_proto(export_scope=export_scope))\n        return var_def\n    else:\n        return None",
    "docstring": "Converts a to a protocol buffer. Args: export_scope: Optional . Name scope to remove. Raises: RuntimeError: If run in EAGER mode. Returns: A protocol buffer, or if the is not in the specified name scope.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:to_proto arg:self arg:export_scope arguments arg arg If Call Raise Call If BoolOp Compare Call Assign Call Assign Call If Compare Assign Call Assign Call If Compare Assign Call Assign Call Assign Assign Assign Assign If Call Call Return return:yes Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "element_to_bucket_id",
    "source_code": "def element_to_bucket_id(*args):\n    seq_length = element_length_func(*args)\n    boundaries = list(bucket_boundaries)\n    buckets_min = [np.iinfo(np.int32).min] + boundaries\n    buckets_max = boundaries + [np.iinfo(np.int32).max]\n    conditions_c = math_ops.logical_and(math_ops.less_equal(buckets_min, seq_length), math_ops.less(seq_length, buckets_max))\n    bucket_id = math_ops.reduce_min(array_ops.where(conditions_c))\n    return bucket_id",
    "docstring": "Return int64 id of the length bucket for this element.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:element_to_bucket_id arguments arg Assign Call Assign Call Assign Call Assign Call Assign Call Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "CallableChoiceIterator",
    "source_code": "class CallableChoiceIterator(BaseChoiceIterator):\n\n    def __init__(self, func):\n        self.func = func\n\n    def __iter__(self):\n        yield from normalize_choices(self.func())",
    "docstring": "Iterator to lazily normalize choices generated by a callable.",
    "type": "class",
    "file_path": "django\\django\\utils\\choices.py",
    "ast_data": "ClassDef name:CallableChoiceIterator FunctionDef name:__init__ arg:self arg:func arguments arg arg Assign FunctionDef name:__iter__ arg:self arguments arg Call Call"
  },
  {
    "library": "numpy",
    "name": "ljust",
    "source_code": "def ljust(self, width, fillchar=' '):\n    return asarray(ljust(self, width, fillchar))",
    "docstring": "Return an array with the elements of left-justified in a string of length . See Also -------- char.ljust",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:ljust arg:self arg:width arg:fillchar arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "get_transformation_matrix",
    "source_code": "def get_transformation_matrix(self, input: Tensor, params: Optional[List[ParamItem]]=None, recompute: bool=False, extra_args: Optional[Dict[str, Any]]=None) -> Optional[Tensor]:\n    raise NotImplementedError",
    "docstring": "Compute the transformation matrix according to the provided parameters. Args: input: the input tensor. params: params for the sequence. recompute: if to recompute the transformation matrix according to the params. default: False. extra_args: Optional dictionary of extra arguments with specific options for different input types.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\container\\base.py",
    "ast_data": "FunctionDef name:get_transformation_matrix arg:self arg:input arg:params arg:recompute arg:extra_args arguments arg arg arg arg arg Raise"
  },
  {
    "library": "scikit-learn",
    "name": "add_dummy_feature",
    "source_code": "@validate_params({'X': ['array-like', 'sparse matrix'], 'value': [Interval(Real, None, None, closed='neither')]}, prefer_skip_nested_validation=True)\ndef add_dummy_feature(X, value=1.0):\n    X = check_array(X, accept_sparse=['csc', 'csr', 'coo'], dtype=FLOAT_DTYPES)\n    n_samples, n_features = X.shape\n    shape = (n_samples, n_features + 1)\n    if sparse.issparse(X):\n        if X.format == 'coo':\n            col = X.col + 1\n            col = np.concatenate((np.zeros(n_samples), col))\n            row = np.concatenate((np.arange(n_samples), X.row))\n            data = np.concatenate((np.full(n_samples, value), X.data))\n            return sparse.coo_matrix((data, (row, col)), shape)\n        elif X.format == 'csc':\n            indptr = X.indptr + n_samples\n            indptr = np.concatenate((np.array([0]), indptr))\n            indices = np.concatenate((np.arange(n_samples), X.indices))\n            data = np.concatenate((np.full(n_samples, value), X.data))\n            return sparse.csc_matrix((data, indices, indptr), shape)\n        else:\n            klass = X.__class__\n            return klass(add_dummy_feature(X.tocoo(), value))\n    else:\n        return np.hstack((np.full((n_samples, 1), value), X))",
    "docstring": "Augment dataset with an additional dummy feature. This is useful for fitting an intercept term with implementations which cannot otherwise fit it directly. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Data. value : float Value to use for the dummy feature. Returns ------- X : {ndarray, sparse matrix} of shape (n_samples, n_features + 1) Same data with dummy feature added as first column. Examples -------- >>> from sklearn.preprocessing import add_dummy_feature >>> add_dummy_feature([[0, 1], [1, 0]]) array([[1., 0., 1.], [1., 1., 0.]])",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py",
    "ast_data": "FunctionDef name:add_dummy_feature arg:X arg:value arguments arg arg Assign Call Assign Assign If Call If Compare Assign Assign Call Call Assign Call Call Assign Call Call Return return:yes Call If Compare Assign Assign Call Call Assign Call Call Assign Call Call Return return:yes Call Assign Return return:yes Call Call Call Return return:yes Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "PerTensor",
    "source_code": "@dataclass(frozen=True)\nclass PerTensor(Granularity):\n    pass",
    "docstring": "Represents per-tensor granularity in quantization. This granularity type calculates the quantization parameters based off the entire tensor.",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\quantization\\observer.py",
    "ast_data": "ClassDef name:PerTensor Call"
  },
  {
    "library": "pytorch",
    "name": "_check_orig_params_flattened",
    "source_code": "def _check_orig_params_flattened(fsdp_module, ignored_params: set[nn.Parameter]) -> None:\n    for param_name, param in _named_parameters_with_duplicates(fsdp_module):\n        if param not in ignored_params and (not _is_fsdp_flattened(param)):\n            raise RuntimeError(f'Found an unflattened parameter: {param_name}; {param.size()} {param.__class__}')",
    "docstring": "Check that original parameters in ``. This should be called as a sanity check after flattening the wrapped module's parameters.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_init_utils.py",
    "ast_data": "FunctionDef name:_check_orig_params_flattened arg:fsdp_module arg:ignored_params arguments arg arg For Call If BoolOp Compare Call Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "TensorChunkSpec",
    "source_code": "class TensorChunkSpec:\n\n    def __init__(self, split_dim):\n        self.split_dim = split_dim\n    split_dim: int\n\n    def __repr__(self):\n        return f'{self.__class__.__module__}.{self.__class__.__name__}({self.split_dim})'\n\n    def __str__(self):\n        return f'TensorChunkSpec({self.split_dim})'\n\n    @staticmethod\n    def from_tuple(chunk_dims: tuple[int, ...]):\n        args_chunk_spec = map_aggregate(chunk_dims, lambda dim: TensorChunkSpec(dim))\n        return args_chunk_spec\n\n    @staticmethod\n    def from_dict(chunk_dims: dict[str, int]):\n        kwargs_chunk_spec = map_aggregate(chunk_dims, lambda dim: TensorChunkSpec(dim))\n        return kwargs_chunk_spec",
    "docstring": "Class used to specify chunking of inputs",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\microbatch.py",
    "ast_data": "ClassDef name:TensorChunkSpec FunctionDef name:__init__ arg:self arg:split_dim arguments arg arg Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:__str__ arg:self arguments arg Return return:yes FunctionDef name:from_tuple arg:chunk_dims arguments arg Assign Call arguments arg Call Return return:yes FunctionDef name:from_dict arg:chunk_dims arguments arg Assign Call arguments arg Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_replica_order",
    "source_code": "def _get_replica_order(self):\n    if not self._enable_data_reorder:\n        return None\n    tpu_devices = self._tpu_devices[:, 0]\n    devices_with_ids = []\n    for i, tpu_device in enumerate(tpu_devices):\n        spec = tf_device.DeviceSpec.from_string(tpu_device)\n        devices_with_ids.append(((spec.job, spec.replica, spec.device_type, spec.task, spec.device_index), i))\n    return [i for _, i in sorted(devices_with_ids)]",
    "docstring": "Get the replica order based on the tpu device order. For example, if the tpu_devices are: '/job:worker/replica:0/task:0/device:TPU:0', '/job:worker/replica:0/task:0/device:TPU:2', '/job:worker/replica:0/task:1/device:TPU:0', '/job:worker/replica:0/task:1/device:TPU:2', '/job:worker/replica:0/task:1/device:TPU:6', '/job:worker/replica:0/task:1/device:TPU:4', '/job:worker/replica:0/task:0/device:TPU:6', '/job:worker/replica:0/task:0/device:TPU:4', the returned replica order will be: [0, 1, 7, 6, 2, 3, 5, 4] This replica order will be used to reorder the data returned by the iterators, so that they can be placed on the same node as their computation graphs. Returns: A list containing the order ids of corresponding TPU devices.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_strategy.py",
    "ast_data": "FunctionDef name:_get_replica_order arg:self arguments arg If Return return:no Assign Assign For Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "replace_variables_with_atoms",
    "source_code": "def replace_variables_with_atoms(values):\n\n    def _replace_resource_variable_with_atom(x):\n        if _pywrap_utils.IsResourceVariable(x):\n            return 0\n        else:\n            return x\n    return nest.map_structure(_replace_resource_variable_with_atom, values)",
    "docstring": "Replaces s in with tf.nest atoms. This function is mostly for backward compatibility. Historically, s are treated as tf.nest atoms. This is no longer the case after becoming . Unfortunately, tf.nest doesn't allow customization of what objects are treated as atoms. Calling this function to manually convert s to atoms to avoid breaking tf.assert_same_structure with inputs of a and an atom, like a . The specific implementation uses 0 as the tf.nest atom, but other tf.nest atoms could also serve the purpose. Note, the of None is not a tf.nest atom. Objects other than s in will be returned unchanged. Note: this function does not look into s. Replacing s in a with atoms will change the of the , which violates the semantics of and tf.nest. So s in s will be returned as they are. Args: values: A nested structure of s, or any other objects. Returns: A new structure with s in converted to atoms.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\variable_utils.py",
    "ast_data": "FunctionDef name:replace_variables_with_atoms arg:values arguments arg FunctionDef name:_replace_resource_variable_with_atom arg:x arguments arg If Call Return return:yes Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_FractionalAvgPoolGrad",
    "source_code": "@ops.RegisterGradient('FractionalAvgPool')\ndef _FractionalAvgPoolGrad(op: ops.Operation, grad_0, unused_grad_1, unused_grad_2):\n    return gen_nn_ops.fractional_avg_pool_grad(op.inputs[0].get_shape(), grad_0, op.outputs[1], op.outputs[2], op.get_attr('overlapping'))",
    "docstring": "Returns gradient for FractionalAvgPool. Since FractionalAvgPool has three outputs, there are three gradients passed in for each of the outputs. Only the first one is useful, the other two gradients are empty. Args: op: The FractionalAvgPoolOp. grad_0: Gradient with respect to op.outputs[0] unused_grad_1: Gradient with respect to op.outputs[1]/row_seq. It is empty. unused_grad_2: Gradient with respect to op.outputs[2]/col_seq. It is empty. Returns: Input backprop for FractionalAvgPool op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_grad.py",
    "ast_data": "FunctionDef name:_FractionalAvgPoolGrad arg:op arg:grad_0 arg:unused_grad_1 arg:unused_grad_2 arguments arg arg arg arg Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_TensorSliceDataset",
    "source_code": "class _TensorSliceDataset(dataset_ops.DatasetSource):\n\n    def __init__(self, element, is_files=False, name=None):\n        element = structure.normalize_element(element)\n        batched_spec = structure.type_spec_from_value(element)\n        self._tensors = structure.to_batched_tensor_list(batched_spec, element)\n        if not self._tensors:\n            raise ValueError('Invalid `element`. `element` should not be empty.')\n        self._structure = nest.map_structure(lambda component_spec: component_spec._unbatch(), batched_spec)\n        self._name = name\n        batch_dim = tensor_shape.Dimension(tensor_shape.dimension_value(self._tensors[0].get_shape()[0]))\n        for t in self._tensors[1:]:\n            batch_dim.assert_is_compatible_with(tensor_shape.Dimension(tensor_shape.dimension_value(t.get_shape()[0])))\n        variant_tensor = gen_dataset_ops.tensor_slice_dataset(self._tensors, output_shapes=structure.get_flat_tensor_shapes(self._structure), is_files=is_files, metadata=self._metadata.SerializeToString())\n        super().__init__(variant_tensor)\n\n    @property\n    def element_spec(self):\n        return self._structure",
    "docstring": "A of slices from a dataset element.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\from_tensor_slices_op.py",
    "ast_data": "ClassDef name:_TensorSliceDataset FunctionDef name:__init__ arg:self arg:element arg:is_files arg:name arguments arg arg arg arg Assign Call Assign Call Assign Call If Raise Call Assign Call arguments arg Call Assign Assign Call Call Call For Call Call Call Call Assign Call Call Call Call Call FunctionDef name:element_spec arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_use_flex_decoding",
    "source_code": "def _use_flex_decoding(query, kv_indices, kernel_options, enable_gqa):\n    force_flex = kernel_options.get('FORCE_USE_FLEX_ATTENTION', False)\n    short_query_length = V.graph.sizevars.evaluate_expr(sympy.Lt(query.get_size()[-2], 128))\n    non_zero_length = V.graph.sizevars.evaluate_expr(sympy.Gt(query.get_size()[-2], 0))\n    static_batch = isinstance(query.get_size()[0], (int, sympy.Integer))\n    static_num_heads = isinstance(query.get_size()[1], (int, sympy.Integer))\n    if enable_gqa:\n        valid_block_mask_num_heads = V.graph.sizevars.evaluate_expr(sympy.Eq(kv_indices.get_size()[1], 1))\n    else:\n        valid_block_mask_num_heads = V.graph.sizevars.evaluate_expr(sympy.Or(sympy.Eq(kv_indices.get_size()[1], 1), sympy.Eq(kv_indices.get_size()[1], query.get_size()[1])))\n    return not force_flex and short_query_length and static_batch and static_num_heads and non_zero_length and valid_block_mask_num_heads",
    "docstring": "Decide which kernel to use, return true if use flex decoding kernel. Note: Since the number of splits is calculated based of the the number of batch and head dims we need to ensure that the batch and head dims are statically known. Otherwise we just use the main flex_attention kernel.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\kernel\\flex_attention.py",
    "ast_data": "FunctionDef name:_use_flex_decoding arg:query arg:kv_indices arg:kernel_options arg:enable_gqa arguments arg arg arg arg Assign Call Assign Call Call Call Assign Call Call Call Assign Call Call Assign Call Call If Assign Call Call Call Assign Call Call Call Call Call Call Call Return return:yes BoolOp"
  },
  {
    "library": "pytorch",
    "name": "check_env",
    "source_code": "class check_env(Action):\n\n    def __init__(self, dest, default=False, **kwargs) -> None:\n        env_name = f'PET_{dest.upper()}'\n        default = bool(int(os.environ.get(env_name, '1' if default else '0')))\n        super().__init__(dest=dest, const=True, default=default, nargs=0, **kwargs)\n\n    def __call__(self, parser, namespace, values, option_string=None):\n        setattr(namespace, self.dest, self.const)",
    "docstring": "Check whether the env var `` otherwise. Example: :: parser.add_argument(\"--verbose\", action=check_env) ./program -> args.verbose=False ./program --verbose -> args.verbose=True PET_VERBOSE=1 ./program -> args.verbose=True PET_VERBOSE=0 ./program -> args.verbose=False PET_VERBOSE=0 ./program --verbose -> args.verbose=True Anti-pattern (don't do this): :: parser.add_argument(\"--verbose\", action=check_env, default=True) ./program -> args.verbose=True ./program --verbose -> args.verbose=True PET_VERBOSE=1 ./program -> args.verbose=True PET_VERBOSE=0 ./program -> args.verbose=False",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\argparse_util.py",
    "ast_data": "ClassDef name:check_env FunctionDef name:__init__ arg:self arg:dest arg:default arguments arg arg arg arg Assign Call Assign Call Call Call Call Call FunctionDef name:__call__ arg:self arg:parser arg:namespace arg:values arg:option_string arguments arg arg arg arg arg Call"
  },
  {
    "library": "pandas",
    "name": "infer_objects",
    "source_code": "@final\ndef infer_objects(self, copy: bool=True) -> Index:\n    if self._is_multi:\n        raise NotImplementedError('infer_objects is not implemented for MultiIndex. Use index.to_frame().infer_objects() instead.')\n    if self.dtype != object:\n        return self.copy() if copy else self\n    values = self._values\n    values = cast('npt.NDArray[np.object_]', values)\n    res_values = lib.maybe_convert_objects(values, convert_non_numeric=True)\n    if copy and res_values is values:\n        return self.copy()\n    result = Index(res_values, name=self.name)\n    if not copy and res_values is values and (self._references is not None):\n        result._references = self._references\n        result._references.add_index_reference(result)\n    return result",
    "docstring": "If we have an object dtype, try to infer a non-object dtype. Parameters ---------- copy : bool, default True Whether to make a copy in cases where no inference occurs.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:infer_objects arg:self arg:copy arguments arg arg If Raise Call If Compare Return return:yes Call Assign Assign Call Assign Call If BoolOp Compare Return return:yes Call Assign Call If BoolOp Compare Compare Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_set_ddp_sink_clone",
    "source_code": "def _set_ddp_sink_clone(self, val: bool):\n    self._ddp_sink_clone = val",
    "docstring": "Sets whether or not DDPSink should clone the output tensors or not. The default is True since if the loss is modified in place we run into the view is modified in-place error. Although, cloning the tensors can add significant memory and performance hit if the number and size of tensors are large. As a result, this can be set to False if you are not modifying the loss in place.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\parallel\\distributed.py",
    "ast_data": "FunctionDef name:_set_ddp_sink_clone arg:self arg:val arguments arg arg Assign"
  },
  {
    "library": "virtualenv",
    "name": "Seeder",
    "source_code": "class Seeder(ABC):\n\n    def __init__(self, options, enabled) -> None:\n        self.enabled = enabled\n        self.env = options.env\n\n    @classmethod\n    def add_parser_arguments(cls, parser, interpreter, app_data):\n        raise NotImplementedError\n\n    @abstractmethod\n    def run(self, creator):\n        raise NotImplementedError",
    "docstring": "A seeder will install some seed packages into a virtual environment.",
    "type": "class",
    "file_path": "virtualenv\\src\\virtualenv\\seed\\seeder.py",
    "ast_data": "ClassDef name:Seeder FunctionDef name:__init__ arg:self arg:options arg:enabled arguments arg arg arg Assign Assign FunctionDef name:add_parser_arguments arg:cls arg:parser arg:interpreter arg:app_data arguments arg arg arg arg Raise FunctionDef name:run arg:self arg:creator arguments arg arg Raise"
  },
  {
    "library": "django",
    "name": "NodeNotFoundError",
    "source_code": "class NodeNotFoundError(LookupError):\n\n    def __init__(self, message, node, origin=None):\n        self.message = message\n        self.origin = origin\n        self.node = node\n\n    def __str__(self):\n        return self.message\n\n    def __repr__(self):\n        return 'NodeNotFoundError(%r)' % (self.node,)",
    "docstring": "An attempt on a node is made that is not available in the graph.",
    "type": "class",
    "file_path": "django\\django\\db\\migrations\\exceptions.py",
    "ast_data": "ClassDef name:NodeNotFoundError FunctionDef name:__init__ arg:self arg:message arg:node arg:origin arguments arg arg arg arg Assign Assign Assign FunctionDef name:__str__ arg:self arguments arg Return return:yes FunctionDef name:__repr__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "override",
    "source_code": "def override(self, key: _K, value: _V) -> None:\n    self._overrides[key] = value\n    self._merged[key] = value",
    "docstring": "Overrides a base key-value with a new pair.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\registration.py",
    "ast_data": "FunctionDef name:override arg:self arg:key arg:value arguments arg arg arg Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "NameListToString",
    "source_code": "def NameListToString(name_list):\n    if isinstance(name_list, str):\n        return name_list\n    else:\n        result = ''\n        if name_list is not None:\n            for val in name_list:\n                result = result + chr(int(val))\n        return result",
    "docstring": "Converts a list of integers to the equivalent ASCII string.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\tools\\visualize.py",
    "ast_data": "FunctionDef name:NameListToString arg:name_list arguments arg If Call Return return:yes Assign If Compare For Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_pickradius",
    "source_code": "def set_pickradius(self, pickradius):\n    if not isinstance(pickradius, Real) or pickradius < 0:\n        raise ValueError('pick radius should be a distance')\n    self._pickradius = pickradius",
    "docstring": "Set the depth of the axis used by the picker. Parameters ---------- pickradius : float The acceptance radius for containment tests. See also .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:set_pickradius arg:self arg:pickradius arguments arg arg If BoolOp Call Compare Raise Call Assign"
  },
  {
    "library": "numpy",
    "name": "join",
    "source_code": "@staticmethod\ndef join(argv):\n    raise NotImplementedError",
    "docstring": "Join a list of arguments into a command line string",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\_shell_utils.py",
    "ast_data": "FunctionDef name:join arg:argv arguments arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "_composite_to_tensors",
    "source_code": "def _composite_to_tensors(value, is_batched=False):\n    if _should_expand_composite(value):\n        spec = value._type_spec\n        if not isinstance(spec, type_spec.BatchableTypeSpec):\n            raise ValueError(f'CompositeTensor instance {value} returned from parallel_for or vectorized_map loop body must provide a `BatchableTypeSpec` (saw: {spec}).')\n        if is_batched:\n            return spec._to_batched_tensor_list(value)\n        return spec._to_tensor_list(value)\n    return value",
    "docstring": "Converts a CompositeTensor into a list of stackable tensors.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\control_flow_ops.py",
    "ast_data": "FunctionDef name:_composite_to_tensors arg:value arg:is_batched arguments arg arg If Call Assign If Call Raise Call If Return return:yes Call Return return:yes Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_request_autoscale_view",
    "source_code": "def _request_autoscale_view(self, axis='all', tight=None):\n    axis_names = _api.check_getitem({**{k: [k] for k in self._axis_names}, 'all': self._axis_names}, axis=axis)\n    for name in axis_names:\n        self._stale_viewlims[name] = True\n    if tight is not None:\n        self._tight = tight",
    "docstring": "Mark a single axis, or all of them, as stale wrt. autoscaling. No computation is performed until the next autoscaling; thus, separate calls to control individual axises incur negligible performance cost. Parameters ---------- axis : str, default: \"all\" Either an element of ``, or \"all\". tight : bool or None, default: None",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:_request_autoscale_view arg:self arg:axis arg:tight arguments arg arg arg Assign Call For Assign If Compare Assign"
  },
  {
    "library": "tensorflow",
    "name": "UpSampling1D",
    "source_code": "class UpSampling1D(Layer):\n\n    def __init__(self, size=2, **kwargs):\n        super(UpSampling1D, self).__init__(**kwargs)\n        self.size = int(size)\n        self.input_spec = InputSpec(ndim=3)\n\n    def compute_output_shape(self, input_shape):\n        input_shape = tensor_shape.TensorShape(input_shape).as_list()\n        size = self.size * input_shape[1] if input_shape[1] is not None else None\n        return tensor_shape.TensorShape([input_shape[0], size, input_shape[2]])\n\n    def call(self, inputs):\n        output = backend.repeat_elements(inputs, self.size, axis=1)\n        return output\n\n    def get_config(self):\n        config = {'size': self.size}\n        base_config = super(UpSampling1D, self).get_config()\n        return dict(list(base_config.items()) + list(config.items()))",
    "docstring": "Upsampling layer for 1D inputs. Repeats each temporal step times along the time axis. Examples: >>> input_shape = (2, 2, 3) >>> x = np.arange(np.prod(input_shape)).reshape(input_shape) >>> print(x) [[[ 0 1 2] [ 3 4 5]] [[ 6 7 8] [ 9 10 11]]] >>> y = tf.keras.layers.UpSampling1D(size=2)(x) >>> print(y) tf.Tensor( [[[ 0 1 2] [ 0 1 2] [ 3 4 5] [ 3 4 5]] [[ 6 7 8] [ 6 7 8] [ 9 10 11] [ 9 10 11]]], shape=(2, 4, 3), dtype=int64) Args: size: Integer. Upsampling factor. Input shape: 3D tensor with shape: . Output shape: 3D tensor with shape: .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\convolutional.py",
    "ast_data": "ClassDef name:UpSampling1D FunctionDef name:__init__ arg:self arg:size arguments arg arg arg Call Call Assign Call Assign Call FunctionDef name:compute_output_shape arg:self arg:input_shape arguments arg arg Assign Call Call Assign Compare Return return:yes Call FunctionDef name:call arg:self arg:inputs arguments arg arg Assign Call Return return:yes FunctionDef name:get_config arg:self arguments arg Assign Assign Call Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_gather_serialized_tensors",
    "source_code": "def _gather_serialized_tensors(self, object_graph_tensor=None):\n    serialized_tensors, feed_additions, registered_savers, graph_proto = save_util.serialize_graph_view(self._graph_view, self._object_map, cache=self._cache)\n    if self._saveables_cache is not None:\n        self._saveables_cache = saveable_object_util.serialized_tensors_to_saveable_cache(serialized_tensors)\n    if object_graph_tensor is None:\n        with ops.device('/cpu:0'):\n            object_graph_tensor = constant_op.constant(graph_proto.SerializeToString(), dtype=dtypes.string)\n    else:\n        feed_additions.update({object_graph_tensor: graph_proto.SerializeToString()})\n    assert base.OBJECT_GRAPH_PROTO_KEY not in serialized_tensors.get(None, {})\n    serialized_tensors.setdefault(None, {})[base.OBJECT_GRAPH_PROTO_KEY] = object_graph_tensor\n    return (serialized_tensors, feed_additions, registered_savers, graph_proto)",
    "docstring": "Gathers tensors to save to ckpt and includes the object graph proto.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:_gather_serialized_tensors arg:self arg:object_graph_tensor arguments arg arg Assign Call If Compare Assign Call If Compare With Call Assign Call Call Call Call Compare Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "dense_labels_to_sparse",
    "source_code": "def dense_labels_to_sparse(dense, length):\n    flat_values = array_ops.reshape(dense, [-1])\n    flat_indices = math_ops.range(array_ops.shape(flat_values, out_type=dtypes.int64)[0])\n    mask = array_ops.sequence_mask(length, maxlen=array_ops.shape(dense)[1])\n    flat_mask = array_ops.reshape(mask, [-1])\n    indices = array_ops.expand_dims(array_ops.boolean_mask(flat_indices, flat_mask), 1)\n    values = array_ops.boolean_mask(flat_values, flat_mask)\n    sparse = sparse_tensor.SparseTensor(indices=indices, values=math_ops.cast(values, dtypes.int32), dense_shape=array_ops.shape(flat_values, out_type=dtypes.int64))\n    reshaped = sparse_ops.sparse_reshape(sparse, array_ops.shape(dense))\n    max_length = math_ops.reduce_max(length)\n    return sparse_tensor.SparseTensor(indices=reshaped.indices, values=reshaped.values, dense_shape=[math_ops.cast(reshaped.dense_shape[0], dtypes.int64), math_ops.cast(max_length, dtypes.int64)])",
    "docstring": "Convert dense labels with sequence lengths to sparse tensor. Args: dense: tensor of shape [batch, max_length] length: int tensor of shape [batch] The length of each sequence in dense. Returns: tf.sparse.SparseTensor with values only for the valid elements of sequences.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ctc_ops.py",
    "ast_data": "FunctionDef name:dense_labels_to_sparse arg:dense arg:length arguments arg arg Assign Call Assign Call Call Assign Call Call Assign Call Assign Call Call Assign Call Assign Call Call Call Assign Call Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "cherrypy",
    "name": "_format",
    "source_code": "def _format(self, obj, descend=True):\n    if inspect.isframe(obj):\n        filename, lineno, func, context, index = inspect.getframeinfo(obj)\n        return \"<frame of function '%s'>\" % func\n    if not descend:\n        return self.peek(repr(obj))\n    if isinstance(obj, dict):\n        return '{' + ', '.join(['%s: %s' % (self._format(k, descend=False), self._format(v, descend=False)) for k, v in obj.items()]) + '}'\n    elif isinstance(obj, list):\n        return '[' + ', '.join([self._format(item, descend=False) for item in obj]) + ']'\n    elif isinstance(obj, tuple):\n        return '(' + ', '.join([self._format(item, descend=False) for item in obj]) + ')'\n    r = self.peek(repr(obj))\n    if isinstance(obj, (str, int, float)):\n        return r\n    return '%s: %s' % (type(obj), r)",
    "docstring": "Return a string representation of a single object.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\gctools.py",
    "ast_data": "FunctionDef name:_format arg:self arg:obj arg:descend arguments arg arg arg If Call Assign Call Return return:yes If Return return:yes Call Call If Call Return return:yes Call Call Call Call If Call Return return:yes Call Call If Call Return return:yes Call Call Assign Call Call If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "missing_devices",
    "source_code": "@property\ndef missing_devices(self):\n    return self._missing_devices",
    "docstring": "Array of indices of missing devices.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\topology.py",
    "ast_data": "FunctionDef name:missing_devices arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_get_vector",
    "source_code": "def _get_vector(self, segments3d):\n    if isinstance(segments3d, np.ndarray):\n        _api.check_shape((None, None, 3), segments3d=segments3d)\n        if isinstance(segments3d, np.ma.MaskedArray):\n            self._faces = segments3d.data\n            self._invalid_vertices = segments3d.mask.any(axis=-1)\n        else:\n            self._faces = segments3d\n            self._invalid_vertices = False\n    else:\n        num_faces = len(segments3d)\n        num_verts = np.fromiter(map(len, segments3d), dtype=np.intp)\n        max_verts = num_verts.max(initial=0)\n        segments = np.empty((num_faces, max_verts, 3))\n        for i, face in enumerate(segments3d):\n            segments[i, :len(face)] = face\n        self._faces = segments\n        self._invalid_vertices = np.arange(max_verts) >= num_verts[:, None]",
    "docstring": "Optimize points for projection. Parameters ---------- segments3d : NumPy array or list of NumPy arrays List of vertices of the boundary of every segment. If all paths are of equal length and this argument is a NumPy array, then it should be of shape (num_faces, num_vertices, 3).",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py",
    "ast_data": "FunctionDef name:_get_vector arg:self arg:segments3d arguments arg arg If Call Call If Call Assign Assign Call Assign Assign Assign Call Assign Call Call Assign Call Assign Call For Call Assign Call Assign Assign Compare Call"
  },
  {
    "library": "pytorch",
    "name": "is_built",
    "source_code": "def is_built():\n    return torch._C._has_cuda",
    "docstring": "Return whether PyTorch is built with CUDA support. Note that this doesn't necessarily mean CUDA is available; just that if this PyTorch binary were run on a machine with working CUDA drivers and devices, we would be able to use it.",
    "type": "function",
    "file_path": "pytorch\\torch\\backends\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:is_built arguments Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, metric_name, metric_methods, label_length, *args):\n    self._metric_name = metric_name\n    self._metric_methods = metric_methods\n    self._label_length = label_length\n    if label_length >= len(self._metric_methods):\n        raise ValueError('Cannot create {} metric with label >= {}'.format(self._metric_name, len(self._metric_methods)))\n    self._metric = self._metric_methods[self._label_length].create(*args)",
    "docstring": "Creates a new metric. Args: metric_name: name of the metric class. metric_methods: list of swig metric methods. label_length: length of label args. *args: the arguments to call create method.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:metric_name arg:metric_methods arg:label_length arguments arg arg arg arg arg Assign Assign Assign If Compare Call Raise Call Call Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "FlatArgsAdapter",
    "source_code": "class FlatArgsAdapter(abc.ABC):\n\n    @abc.abstractmethod\n    def adapt(self, target_spec: pytree.TreeSpec, input_spec: pytree.TreeSpec, input_args: list[Any], metadata: Optional[dict[str, Any]]=None, obj: Optional[Any]=None) -> list[Any]:\n        ...",
    "docstring": "Adapts input arguments with ``.",
    "type": "class",
    "file_path": "pytorch\\torch\\export\\unflatten.py",
    "ast_data": "ClassDef name:FlatArgsAdapter FunctionDef name:adapt arg:self arg:target_spec arg:input_spec arg:input_args arg:metadata arg:obj arguments arg arg arg arg arg arg"
  },
  {
    "library": "tensorflow",
    "name": "psnr",
    "source_code": "@tf_export('image.psnr')\n@dispatch.add_dispatch_support\ndef psnr(a, b, max_val, name=None):\n    with ops.name_scope(name, 'PSNR', [a, b]):\n        max_val = math_ops.cast(max_val, a.dtype)\n        max_val = convert_image_dtype(max_val, dtypes.float32)\n        a = convert_image_dtype(a, dtypes.float32)\n        b = convert_image_dtype(b, dtypes.float32)\n        mse = math_ops.reduce_mean(math_ops.squared_difference(a, b), [-3, -2, -1])\n        psnr_val = math_ops.subtract(20 * math_ops.log(max_val) / math_ops.log(10.0), np.float32(10 / np.log(10)) * math_ops.log(mse), name='psnr')\n        _, _, checks = _verify_compatible_image_shapes(a, b)\n        with ops.control_dependencies(checks):\n            return array_ops.identity(psnr_val)",
    "docstring": "Returns the Peak Signal-to-Noise Ratio between a and b. This is intended to be used on signals (or images). Produces a PSNR value for each image in batch. The last three dimensions of input are expected to be [height, width, depth]. Example: Args: a: First set of images. b: Second set of images. max_val: The dynamic range of the images (i.e., the difference between the maximum the and minimum allowed values). name: Namespace to embed the computation in. Returns: The scalar PSNR between a and b. The returned tensor has type and shape [batch_size, 1].",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:psnr arg:a arg:b arg:max_val arg:name arguments arg arg arg arg With Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Call Call Call Call Call Assign Call With Call Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "legline",
    "source_code": "def legline(off, scl):\n    if scl != 0:\n        return np.array([off, scl])\n    else:\n        return np.array([off])",
    "docstring": "Legendre series whose graph is a straight line. Parameters ---------- off, scl : scalars The specified line is given by ``. See Also -------- numpy.polynomial.polynomial.polyline numpy.polynomial.chebyshev.chebline numpy.polynomial.laguerre.lagline numpy.polynomial.hermite.hermline numpy.polynomial.hermite_e.hermeline Examples -------- >>> import numpy.polynomial.legendre as L >>> L.legline(3,2) array([3, 2]) >>> L.legval(-3, L.legline(3,2)) # should be -3 -3.0",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\legendre.py",
    "ast_data": "FunctionDef name:legline arg:off arg:scl arguments arg arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "deserialize",
    "source_code": "def deserialize(proto):\n    _, type_registrations = _REVIVED_TYPE_REGISTRY.get(proto.identifier, (None, None))\n    if type_registrations is not None:\n        for type_registration in type_registrations:\n            if type_registration.should_load(proto):\n                return (type_registration.from_proto(proto), type_registration.setter)\n    return None",
    "docstring": "Create a trackable object from a SavedUserObject proto. Args: proto: A SavedUserObject to deserialize. Returns: A tuple of (trackable, assignment_fn) where assignment_fn has the same signature as setattr and should be used to add dependencies to when they are available.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\revived_types.py",
    "ast_data": "FunctionDef name:deserialize arg:proto arguments arg Assign Call If Compare For If Call Return return:yes Call Return return:no"
  },
  {
    "library": "pandas",
    "name": "dropna",
    "source_code": "def dropna(self, how: AnyAll='any') -> Self:\n    if how not in ('any', 'all'):\n        raise ValueError(f'invalid how option: {how}')\n    if self.hasnans:\n        res_values = self._values[~self._isnan]\n        return type(self)._simple_new(res_values, name=self.name)\n    return self._view()",
    "docstring": "Return Index without NA/NaN values. Parameters ---------- how : {'any', 'all'}, default 'any' If the Index is a MultiIndex, drop the value when any or all levels are NaN. Returns ------- Index Returns an Index object after removing NA/NaN values. See Also -------- Index.fillna : Fill NA/NaN values with the specified value. Index.isna : Detect missing values. Examples -------- >>> idx = pd.Index([1, np.nan, 3]) >>> idx.dropna() Index([1.0, 3.0], dtype='float64')",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:dropna arg:self arg:how arguments arg arg If Compare Raise Call If Assign Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "module_display_name",
    "source_code": "@property\ndef module_display_name(self) -> str:\n    return self.top().module_display_name",
    "docstring": "Returns the module display name of the top module.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\modularization.py",
    "ast_data": "FunctionDef name:module_display_name arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_move_exported_model_to_train",
    "source_code": "def _move_exported_model_to_train(model: torch.fx.GraphModule):\n    is_training = getattr(model, _EXPORTED_TRAINING_ATTR, False)\n    if is_training:\n        return model\n    setattr(model, _EXPORTED_TRAINING_ATTR, True)\n    _replace_dropout(model, train_to_eval=False)\n    _replace_batchnorm(model, train_to_eval=False)\n    return model",
    "docstring": "Move an exported GraphModule to train mode. This is equivalent to model.train() but only for certain special ops like dropout, batchnorm. QAT users should call this before performing training on the model. This call is idempotent; if the model is already in train mode, nothing will happen.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\export_utils.py",
    "ast_data": "FunctionDef name:_move_exported_model_to_train arg:model arguments arg Assign Call If Return return:yes Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "type_spec",
    "source_code": "@property\ndef type_spec(self):\n    return self._type_spec",
    "docstring": "Returns the symbolically inferred for this Keras output.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\keras_tensor.py",
    "ast_data": "FunctionDef name:type_spec arg:self arguments arg Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "key_size",
    "source_code": "@property\n@abc.abstractmethod\ndef key_size(self) -> int:\n    pass",
    "docstring": "The bit length of the public modulus.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\rsa.py",
    "ast_data": "FunctionDef name:key_size arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "watch",
    "source_code": "def watch(self, tensor):\n    for t in _extract_tensors_and_variables(tensor):\n        if not backprop_util.IsTrainable(t):\n            logging.log_first_n(logging.WARN, 'The dtype of the watched tensor must be floating (e.g. tf.float32), got %r', 5, t.dtype)\n        if hasattr(t, 'handle'):\n            tape.watch_variable(self._tape, t)\n        else:\n            tape.watch(self._tape, t)",
    "docstring": "Ensures that is being traced by this tape. Args: tensor: a Tensor/Variable or list of Tensors/Variables. Raises: ValueError: if it encounters something that is not a tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\backprop.py",
    "ast_data": "FunctionDef name:watch arg:self arg:tensor arguments arg arg For Call If Call Call If Call Call Call"
  },
  {
    "library": "django",
    "name": "DistanceField",
    "source_code": "class DistanceField(models.FloatField):\n\n    def __init__(self, geo_field):\n        super().__init__()\n        self.geo_field = geo_field\n\n    def get_prep_value(self, value):\n        if isinstance(value, Distance):\n            return value\n        return super().get_prep_value(value)\n\n    def get_db_prep_value(self, value, connection, prepared=False):\n        if not isinstance(value, Distance):\n            return value\n        distance_att = connection.ops.get_distance_att_for_field(self.geo_field)\n        if not distance_att:\n            raise ValueError('Distance measure is supplied, but units are unknown for result.')\n        return getattr(value, distance_att)\n\n    def from_db_value(self, value, expression, connection):\n        if value is None:\n            return\n        distance_att = connection.ops.get_distance_att_for_field(self.geo_field)\n        return Distance(**{distance_att: value}) if distance_att else value\n\n    def get_internal_type(self):\n        return 'DistanceField'",
    "docstring": "Wrapper for Distance values.",
    "type": "class",
    "file_path": "django\\django\\contrib\\gis\\db\\models\\sql\\conversion.py",
    "ast_data": "ClassDef name:DistanceField FunctionDef name:__init__ arg:self arg:geo_field arguments arg arg Call Call Assign FunctionDef name:get_prep_value arg:self arg:value arguments arg arg If Call Return return:yes Return return:yes Call Call FunctionDef name:get_db_prep_value arg:self arg:value arg:connection arg:prepared arguments arg arg arg arg If Call Return return:yes Assign Call If Raise Call Return return:yes Call FunctionDef name:from_db_value arg:self arg:value arg:expression arg:connection arguments arg arg arg arg If Compare Return return:no Assign Call Return return:yes Call FunctionDef name:get_internal_type arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_position",
    "source_code": "def get_position(self, original=False):\n    if original:\n        return self._originalPosition.frozen()\n    else:\n        locator = self.get_axes_locator()\n        if not locator:\n            self.apply_aspect()\n        return self._position.frozen()",
    "docstring": "Return the position of the Axes within the figure as a . Parameters ---------- original : bool If `.set_position.Bbox`",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:get_position arg:self arg:original arguments arg arg If Return return:yes Call Assign Call If Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "enable_history_recording",
    "source_code": "@contextlib.contextmanager\ndef enable_history_recording() -> Generator[None, None, None]:\n    enabled = torch._C._cuda_isHistoryEnabled()\n    try:\n        if not enabled:\n            torch.cuda.memory._record_memory_history()\n        yield\n    finally:\n        if not enabled:\n            torch.cuda.memory._record_memory_history(None)",
    "docstring": "Turns on history recording in the CUDA Caching Allocator",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py",
    "ast_data": "FunctionDef name:enable_history_recording arguments Assign Call Try If Call If Call"
  },
  {
    "library": "pytorch",
    "name": "sym_eq",
    "source_code": "def sym_eq(x: _T, y: _T) -> BoolLikeType:\n    if isinstance(x, (tuple, list)) and isinstance(y, (list, tuple)):\n        if len(x) != len(y):\n            return False\n        return functools.reduce(operator.and_, map(sym_eq, x, y), True)\n    elif isinstance(x, (int, torch.SymInt)) and isinstance(y, (int, torch.SymInt)):\n        return x == y\n    else:\n        raise AssertionError(f'unexpected sym_eq between {type(x)} {type(y)}')",
    "docstring": "Like ==, but when run on list/tuple, it will recursively test equality and use sym_and to join the results together, without guarding.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:sym_eq arg:x arg:y arguments arg arg If BoolOp Call Call If Compare Call Call Return return:yes Return return:yes Call Call If BoolOp Call Call Return return:yes Compare Raise Call Call Call"
  },
  {
    "library": "scipy",
    "name": "read_sub_array",
    "source_code": "def read_sub_array(self, hdr, copy=True):\n    dt = hdr.dtype\n    num_bytes = reduce(mul, hdr.dims, np.int64(dt.itemsize))\n    if num_bytes > _MAX_INTP:\n        raise ValueError(f\"Variable '{hdr.name.decode('latin1')}' has byte length longer than largest possible NumPy array on this platform.\")\n    buffer = self.mat_stream.read(num_bytes)\n    if len(buffer) != num_bytes:\n        raise ValueError(f\"Not enough bytes to read matrix '{hdr.name.decode('latin1')}'; is this a badly-formed file? Consider listing matrices with `whosmat` and loading named matrices with `variable_names` kwarg to `loadmat`\")\n    arr = np.ndarray(shape=hdr.dims, dtype=dt, buffer=buffer, order='F')\n    if copy:\n        arr = arr.copy()\n    return arr",
    "docstring": "Mat4 read using header dtype and dims Parameters ---------- hdr : object object with attributes `hdrhdr`",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\matlab\\_mio4.py",
    "ast_data": "FunctionDef name:read_sub_array arg:self arg:hdr arg:copy arguments arg arg arg Assign Assign Call Call If Compare Raise Call Call Assign Call If Compare Call Raise Call Call Assign Call If Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "load_backend_module",
    "source_code": "def load_backend_module(self, backend):\n    module_name = self._backend_module_name(backend)\n    return importlib.import_module(module_name)",
    "docstring": "Load and return the module containing the specified backend. Parameters ---------- backend : str Name of backend to load. Returns ------- Module Module containing backend.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\registry.py",
    "ast_data": "FunctionDef name:load_backend_module arg:self arg:backend arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "end_pan",
    "source_code": "def end_pan(self):\n    del self._pan_start",
    "docstring": "Called when a pan operation completes (when the mouse button is up.) Notes ----- This is intended to be overridden by new projection types.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:end_pan arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "_deserialize_function_spec_as_nonmethod",
    "source_code": "def _deserialize_function_spec_as_nonmethod(function_spec_proto):\n    typeless_fullargspec = nested_structure_coder.decode_proto(function_spec_proto.fullargspec)\n    if function_spec_proto.is_method or (typeless_fullargspec.args and typeless_fullargspec.args[0] == 'self'):\n        if not typeless_fullargspec.args:\n            raise NotImplementedError(\"Cannot deserialize a method function without a named 'self' argument.\")\n        args = typeless_fullargspec.args[1:]\n    else:\n        args = typeless_fullargspec.args\n    fullargspec = tf_inspect.FullArgSpec(args=args, varargs=typeless_fullargspec.varargs, varkw=typeless_fullargspec.varkw, defaults=typeless_fullargspec.defaults, kwonlyargs=typeless_fullargspec.kwonlyargs, kwonlydefaults=typeless_fullargspec.kwonlydefaults, annotations=typeless_fullargspec.annotations)\n    input_signature = nested_structure_coder.decode_proto(function_spec_proto.input_signature)\n    jit_compile = {saved_object_graph_pb2.FunctionSpec.JitCompile.DEFAULT: None, saved_object_graph_pb2.FunctionSpec.JitCompile.ON: True, saved_object_graph_pb2.FunctionSpec.JitCompile.OFF: False}.get(function_spec_proto.jit_compile)\n    return function_type_utils.FunctionSpec.from_fullargspec_and_signature(fullargspec=fullargspec, input_signature=input_signature, jit_compile=jit_compile)",
    "docstring": "Deserialize a FunctionSpec object from its proto representation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\function_deserialization.py",
    "ast_data": "FunctionDef name:_deserialize_function_spec_as_nonmethod arg:function_spec_proto arguments arg Assign Call If BoolOp BoolOp Compare If Raise Call Assign Assign Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_get_encoded_fill_value",
    "source_code": "def _get_encoded_fill_value(self):\n    if '_FillValue' in self._attributes:\n        fill_value = np.array(self._attributes['_FillValue'], dtype=self.data.dtype).tobytes()\n        if len(fill_value) == self.itemsize():\n            return fill_value\n        else:\n            return self._default_encoded_fill_value()\n    else:\n        return self._default_encoded_fill_value()",
    "docstring": "Returns the encoded fill value for this variable as bytes. This is taken from either the _FillValue attribute, or the default fill value for this variable's data type.",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\_netcdf.py",
    "ast_data": "FunctionDef name:_get_encoded_fill_value arg:self arguments arg If Compare Assign Call Call If Compare Call Call Return return:yes Return return:yes Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "lon_lat",
    "source_code": "def lon_lat(self, query):\n    data = self.city(query)\n    return (data['longitude'], data['latitude'])",
    "docstring": "Return a tuple of the (longitude, latitude) for the given query.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geoip2.py",
    "ast_data": "FunctionDef name:lon_lat arg:self arg:query arguments arg arg Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "TextFileIndex",
    "source_code": "@tf_export('lookup.TextFileIndex')\nclass TextFileIndex:\n    WHOLE_LINE = -2\n    LINE_NUMBER = -1",
    "docstring": "The key and value content to get from each line. This class defines the key and value used for . The key and value content to get from each line is specified either by the following, or a value . * means use the line number starting from zero, expects data type int64. * means use the whole line content, expects data type string. A value means use the index (starting at zero) of the split line based on .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "ClassDef name:TextFileIndex Assign Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "get_kern",
    "source_code": "def get_kern(self, font1: str, fontclass1: str, sym1: str, fontsize1: float, font2: str, fontclass2: str, sym2: str, fontsize2: float, dpi: float) -> float:\n    return 0.0",
    "docstring": "Get the kerning distance for font between *sym1* and *sym2*. See for a detailed description of the parameters.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_mathtext.py",
    "ast_data": "FunctionDef name:get_kern arg:self arg:font1 arg:fontclass1 arg:sym1 arg:fontsize1 arg:font2 arg:fontclass2 arg:sym2 arg:fontsize2 arg:dpi arguments arg arg arg arg arg arg arg arg arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "set_last_step_output",
    "source_code": "def set_last_step_output(self, name, output, reduce_op=None):\n    if distribute_lib.in_cross_replica_context():\n        self._last_step_outputs_reduce_ops[name] = reduce_op\n        if reduce_op is None:\n            self._last_step_outputs[name] = output\n        else:\n            distribution = distribute_lib.get_strategy()\n            self._last_step_outputs[name] = distribution.reduce(reduce_op, output, axis=None)\n    else:\n        assert reduce_op is not None\n\n        def merge_fn(distribution, value):\n            self._last_step_outputs[name] = distribution.reduce(reduce_op, value, axis=None)\n            self._last_step_outputs_reduce_ops[name] = reduce_op\n        distribute_lib.get_replica_context().merge_call(merge_fn, args=(output,))",
    "docstring": "Set with to be outputted from the last step. Args: name: String, name to identify the output. Doesn't need to match tensor name. output: The tensors that should be outputted with . See below for actual types supported. reduce_op: Reduction method to use to reduce outputs from multiple replicas. Required if is called in a replica context. Optional in cross_replica_context. When present, the outputs from all the replicas are reduced using the current distribution strategy's method. Hence, the type of must be what's supported by the corresponding method. For e.g. if using MirroredStrategy and reduction is set, output must be a value. The reduce method is also recorded in a dictionary for later interpreting of the outputs as already reduced or not.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py",
    "ast_data": "FunctionDef name:set_last_step_output arg:self arg:name arg:output arg:reduce_op arguments arg arg arg arg If Call Assign If Compare Assign Assign Call Assign Call Compare FunctionDef name:merge_fn arg:distribution arg:value arguments arg arg Assign Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "aten_abs",
    "source_code": "@onnx_impl((aten.abs.default, operator.abs), trace_only=True)\ndef aten_abs(self: TRealOrUInt8) -> TRealOrUInt8:\n    return op.Abs(self)",
    "docstring": "abs(Tensor self) -> Tensor",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_torchlib\\ops\\core.py",
    "ast_data": "FunctionDef name:aten_abs arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "mquantiles_cimj",
    "source_code": "def mquantiles_cimj(data, prob=(0.25, 0.5, 0.75), alpha=0.05, axis=None):\n    alpha = min(alpha, 1 - alpha)\n    z = norm.ppf(1 - alpha / 2.0)\n    xq = mstats.mquantiles(data, prob, alphap=0, betap=0, axis=axis)\n    smj = mjci(data, prob, axis=axis)\n    return (xq - z * smj, xq + z * smj)",
    "docstring": "Computes the alpha confidence interval for the selected quantiles of the data, with Maritz-Jarrett estimators. Parameters ---------- data : ndarray Data array. prob : sequence, optional Sequence of quantiles to compute. alpha : float, optional Confidence level of the intervals. axis : int or None, optional Axis along which to compute the quantiles. If None, use a flattened array. Returns ------- ci_lower : ndarray The lower boundaries of the confidence interval. Of the same length as . ci_upper : ndarray The upper boundaries of the confidence interval. Of the same length as .",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mstats_extras.py",
    "ast_data": "FunctionDef name:mquantiles_cimj arg:data arg:prob arg:alpha arg:axis arguments arg arg arg arg Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_row_count",
    "source_code": "def _row_count(A):\n    tol = 1e-13\n    return np.array((abs(A) > tol).sum(axis=1)).flatten()",
    "docstring": "Counts the number of nonzeros in each row of input array A. Nonzeros are defined as any element with absolute value greater than tol = 1e-13. This value should probably be an input to the function. Parameters ---------- A : 2-D array An array representing a matrix Returns ------- rowcount : 1-D array Number of nonzeros in each row of A",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_remove_redundancy.py",
    "ast_data": "FunctionDef name:_row_count arg:A arguments arg Assign Return return:yes Call Call Call Compare Call"
  },
  {
    "library": "pytorch",
    "name": "check_fp_or_complex",
    "source_code": "def check_fp_or_complex(dtype: torch.dtype, fn_name: str, allow_low_precision_dtypes: bool=True):\n    torch._check(is_float_dtype(dtype) or is_complex_dtype(dtype), lambda: f'{fn_name}: Expected a floating point or complex tensor as input. Got {dtype}')\n    torch._check(allow_low_precision_dtypes or not is_low_precision_dtype(dtype), lambda: f'{fn_name}: Half precision dtypes not supported. Got {dtype}')",
    "docstring": "Checks whether the input is floating point or complex. If allow_low_precision_dtypes is True, it allows having float16, bfloat16, and complex32",
    "type": "function",
    "file_path": "pytorch\\torch\\_prims_common\\__init__.py",
    "ast_data": "FunctionDef name:check_fp_or_complex arg:dtype arg:fn_name arg:allow_low_precision_dtypes arguments arg arg arg Call BoolOp Call Call arguments Call BoolOp Call arguments"
  },
  {
    "library": "django",
    "name": "installed_models",
    "source_code": "def installed_models(self, tables):\n    tables = set(map(self.identifier_converter, tables))\n    return {m for m in self.get_migratable_models() if self.identifier_converter(m._meta.db_table) in tables}",
    "docstring": "Return a set of all models represented by the provided list of table names.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\introspection.py",
    "ast_data": "FunctionDef name:installed_models arg:self arg:tables arguments arg arg Assign Call Call Return return:yes Call Compare Call"
  },
  {
    "library": "numpy",
    "name": "update_executables",
    "source_code": "def update_executables(self):\n    pass",
    "docstring": "Called at the beginning of customisation. Subclasses should override this if they need to set up the executables dictionary. Note that self.find_executables() is run afterwards, so the self.executables dictionary values can contain or as the command, which will be replaced by the found F77 or F90 compiler.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\fcompiler\\__init__.py",
    "ast_data": "FunctionDef name:update_executables arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "_copy_trackable_to_cpu",
    "source_code": "def _copy_trackable_to_cpu(self, object_map):\n    self._v._copy_trackable_to_cpu(object_map)\n    if self not in object_map:\n        object_map[self] = CachingVariable(object_map[self._v])",
    "docstring": "For implementing .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\ps_values.py",
    "ast_data": "FunctionDef name:_copy_trackable_to_cpu arg:self arg:object_map arguments arg arg Call If Compare Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "flatten",
    "source_code": "def flatten(sequence):\n    flat_sequence = nest.flatten(sequence, expand_composites=True)\n    return [item.flow if isinstance(item, tensor_array_ops.TensorArray) else item for item in flat_sequence]",
    "docstring": "Like nest.flatten w/ expand_composites, but returns flow for TensorArrays. Args: sequence: A nested structure of Tensors, CompositeTensors, and TensorArrays. Returns: A list of tensors.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\func_graph.py",
    "ast_data": "FunctionDef name:flatten arg:sequence arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "scan_body",
    "source_code": "def scan_body(scan_state, scan_inputs):\n    loop_vars, iterate = (scan_state, scan_inputs)\n    set_state(loop_vars)\n\n    def main_path():\n        body(iterate)\n        new_loop_vars = get_state()\n        control_flow.verify_tf_loop_vars(init_vars, loop_vars, new_loop_vars, symbol_names, opts, check_shapes=False)\n        return new_loop_vars\n    if extra_test is not None:\n        extra_cond = extra_test()\n        new_loop_vars = cond.cond(extra_cond, main_path, lambda: loop_vars)\n    else:\n        extra_cond = (constant_op.constant(True),)\n        new_loop_vars = main_path()\n    scan_outputs = (new_loop_vars, extra_cond)\n    new_scan_state = new_loop_vars\n    return (new_scan_state, scan_outputs)",
    "docstring": "Main body of the Dataset.scan.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_autograph.py",
    "ast_data": "FunctionDef name:scan_body arg:scan_state arg:scan_inputs arguments arg arg Assign Call FunctionDef name:main_path arguments Call Assign Call Call Return return:yes If Compare Assign Call Assign Call arguments Assign Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_structured_output_mapping",
    "source_code": "def _structured_output_mapping(fetched):\n    lifted = lift_map[fetched]\n    if isinstance(lifted, ops.Operation):\n        return None\n    return lifted",
    "docstring": "callback for",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\wrap_function.py",
    "ast_data": "FunctionDef name:_structured_output_mapping arg:fetched arguments arg Assign If Call Return return:no Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "SaliencyPruner",
    "source_code": "class SaliencyPruner(BaseStructuredSparsifier):\n\n    def update_mask(self, module, tensor_name, **kwargs):\n        weights = getattr(module, tensor_name)\n        mask = getattr(module.parametrizations, tensor_name)[0].mask\n        if weights.dim() <= 1:\n            raise Exception('Structured pruning can only be applied to a 2+dim weight tensor!')\n        saliency = -weights.norm(dim=tuple(range(1, weights.dim())), p=1)\n        assert saliency.shape == mask.shape\n        num_to_pick = int(len(mask) * kwargs['sparsity_level'])\n        prune = saliency.topk(num_to_pick).indices\n        mask.data[prune] = False",
    "docstring": "Prune rows based on the saliency (L1 norm) of each row. This pruner works on N-Dimensional weight tensors. For each row, we will calculate the saliency, whic is the sum the L1 norm of all weights in that row. We expect that the resulting saliency vector has the same shape as our mask. We then pick elements to remove until we reach the target sparsity_level.",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\pruner\\saliency_pruner.py",
    "ast_data": "ClassDef name:SaliencyPruner FunctionDef name:update_mask arg:self arg:module arg:tensor_name arguments arg arg arg arg Assign Call Assign Call If Compare Call Raise Call Assign Call Call Call Call Compare Assign Call Call Assign Call Assign"
  },
  {
    "library": "pytorch",
    "name": "_RelaxedConstraint",
    "source_code": "@dataclasses.dataclass\nclass _RelaxedConstraint(_ConstraintTarget):\n\n    @property\n    def serializable_spec(self):\n        return {'t_id': self.t_id, 'dim': self.dim}",
    "docstring": "This represents a dim marked with Dim.AUTO/DYNAMIC (i.e. mark_dynamic() or maybe_mark_dynamic()), which leaves relations & min/max ranges for inference, instead of requiring explicit specification. The intention is for constraint violations to not be raised if produce_guards() finds equalities or relations between a _RelaxedConstraint and another type of _Constraint.",
    "type": "class",
    "file_path": "pytorch\\torch\\export\\dynamic_shapes.py",
    "ast_data": "ClassDef name:_RelaxedConstraint FunctionDef name:serializable_spec arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_truncate",
    "source_code": "def _truncate(self, new_rank: int) -> 'DynamicRaggedShape.Spec':\n    if self.rank is None:\n        return self._set_rank_if_unknown(new_rank)._truncate(new_rank)\n    if new_rank == 0:\n        return DynamicRaggedShape.Spec._from_tensor_shape([], 0, self.dtype)\n    if new_rank == 1:\n        vector_size = self._dimension(0)\n        return DynamicRaggedShape.Spec._from_tensor_shape([vector_size], 0, self.dtype)\n    if new_rank < self.num_row_partitions + 1:\n        new_row_partitions = self._row_partitions[:new_rank - 1]\n        new_static_inner_shape = tensor_shape.TensorShape([new_row_partitions[-1].nvals])\n        return DynamicRaggedShape.Spec(row_partitions=new_row_partitions, static_inner_shape=new_static_inner_shape, dtype=self.dtype)\n    else:\n        remainder = new_rank - self.num_row_partitions\n        new_static_inner_shape = self._static_inner_shape[:remainder]\n        return DynamicRaggedShape.Spec(row_partitions=self._row_partitions, static_inner_shape=new_static_inner_shape, dtype=self.dtype)",
    "docstring": "Truncate a ragged shape spec. For example, if the original spec s was for a shape: [3, [4, 1], 2, 7] Then truncate_dynamic_ragged_shape_spec(s, 3) is a spec for: [3, [4, 1], 2] Args: new_rank: the new rank Returns: A truncated DynamicRaggedShape.Spec.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:_truncate arg:self arg:new_rank arguments arg arg If Compare Return return:yes Call Call If Compare Return return:yes Call If Compare Assign Call Return return:yes Call If Compare Assign Assign Call Return return:yes Call Assign Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_filter_nodes_map",
    "source_code": "def _filter_nodes_map(nodes_map: dict[Node, Node]) -> dict[Node, Node]:\n    new_nodes_map: dict[Node, Node] = {}\n    for pattern_node, graph_node in nodes_map.items():\n        if graph_node is None:\n            continue\n        if pattern_node.op == 'placeholder':\n            continue\n        new_nodes_map[pattern_node] = graph_node\n    return new_nodes_map",
    "docstring": "Return a filtered returned from the subgraph rewriter. The filtered will contain only nodes that are actually matched in the pattern, excluding None or placeholder nodes.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\qat_utils.py",
    "ast_data": "FunctionDef name:_filter_nodes_map arg:nodes_map arguments arg For Call If Compare If Compare Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "get_language",
    "source_code": "def get_language(sources):\n    language = None\n    for source in sources:\n        if isinstance(source, str):\n            if f90_ext_match(source):\n                language = 'f90'\n                break\n            elif fortran_ext_match(source):\n                language = 'f77'\n    return language",
    "docstring": "Determine language value (c,f77,f90) from sources",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\misc_util.py",
    "ast_data": "FunctionDef name:get_language arg:sources arguments arg Assign For If Call If Call Assign If Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "Serializable",
    "source_code": "class Serializable(metaclass=abc.ABCMeta):\n\n    @classmethod\n    @abc.abstractmethod\n    def experimental_type_proto(cls) -> Type[message.Message]:\n        raise NotImplementedError\n\n    @classmethod\n    @abc.abstractmethod\n    def experimental_from_proto(cls, proto: message.Message) -> 'Serializable':\n        raise NotImplementedError\n\n    @abc.abstractmethod\n    def experimental_as_proto(self) -> message.Message:\n        raise NotImplementedError",
    "docstring": "TraceTypes implementing this additional interface are portable.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\core\\function\\trace_type\\serialization.py",
    "ast_data": "ClassDef name:Serializable FunctionDef name:experimental_type_proto arg:cls arguments arg Raise FunctionDef name:experimental_from_proto arg:cls arg:proto arguments arg arg Raise FunctionDef name:experimental_as_proto arg:self arguments arg Raise"
  },
  {
    "library": "cryptography",
    "name": "__eq__",
    "source_code": "@abc.abstractmethod\ndef __eq__(self, other: object) -> bool:\n    pass",
    "docstring": "Checks equality.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\dsa.py",
    "ast_data": "FunctionDef name:__eq__ arg:self arg:other arguments arg arg"
  },
  {
    "library": "tensorflow",
    "name": "experimental_local_results",
    "source_code": "def experimental_local_results(self, value):\n    return super(OneDeviceStrategy, self).experimental_local_results(value)",
    "docstring": "Returns the list of all local per-replica values contained in . In , the is always expected to be a single value, so the result is just the value in a tuple. Args: value: A value returned by , , , or a variable created in . Returns: A tuple of values contained in . If represents a single value, this returns",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\one_device_strategy.py",
    "ast_data": "FunctionDef name:experimental_local_results arg:self arg:value arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_release",
    "source_code": "def _release(self, event):\n    self._set_cursor(False)\n    if not self._interactive:\n        self._selection_artist.set_visible(False)\n    if self._active_handle is None and self._selection_completed and self.ignore_event_outside:\n        return\n    vmin, vmax = self.extents\n    span = vmax - vmin\n    if span <= self.minspan:\n        self.set_visible(False)\n        if self._selection_completed:\n            self.onselect(vmin, vmax)\n        self._selection_completed = False\n    else:\n        self.onselect(vmin, vmax)\n        self._selection_completed = True\n    self.update()\n    self._active_handle = None\n    return False",
    "docstring": "Button release event handler.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:_release arg:self arg:event arguments arg arg Call If Call If BoolOp Compare Return return:no Assign Assign If Compare Call If Call Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, on_exit: OnExitType):\n    self._on_exit = on_exit\n    self._metrics: dict[str, Any] = {}\n    self._start_time_ns: int = 0",
    "docstring": "Similar to MetricsContext, but used to gather the runtime metrics that are decoupled from compilation, where there's not a natural place to insert a context manager.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\metrics_context.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:on_exit arguments arg arg Assign"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "def forward(self, x: Tensor) -> Tensor:\n    if x.ndim < 1:\n        raise ValueError('Input tensor represents a scalar')\n    if x.shape[-1] != self._num_dims:\n        raise ValueError(f'Input tensor number of dimensions {x.shape[-1]} does not match instantiated dimensionality {self._num_dims}')\n    return torch.cat([fn(x) for fn in self._embed_fns], dim=-1)",
    "docstring": "Apply positional encoding to input. Args: x: Positionsl (or directional) tensor to encode: Tensor Returns: Tensor with encoded position/direction: Tensor",
    "type": "method",
    "file_path": "kornia\\kornia\\nerf\\positional_encoder.py",
    "ast_data": "FunctionDef name:forward arg:self arg:x arguments arg arg If Compare Raise Call If Compare Raise Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "SuspiciousSession",
    "source_code": "class SuspiciousSession(SuspiciousOperation):\n    pass",
    "docstring": "The session may be tampered with",
    "type": "class",
    "file_path": "django\\django\\contrib\\sessions\\exceptions.py",
    "ast_data": "ClassDef name:SuspiciousSession"
  },
  {
    "library": "tensorflow",
    "name": "get_contents",
    "source_code": "def get_contents():\n    contents = ''\n    contents += _GENERATED_FILE_HEADER + _INCLUDES\n    contents += get_function('OpGradientUnusedInputIndices', get_entries('inputs'))\n    contents += get_function('OpGradientUnusedOutputIndices', get_entries('outputs'))\n    return contents",
    "docstring": "Returns contents for the generated file.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\gradient_input_output_exclusions.py",
    "ast_data": "FunctionDef name:get_contents arguments Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "create_multimethod",
    "source_code": "def create_multimethod(*args, **kwargs):\n\n    def wrapper(a):\n        return generate_multimethod(a, *args, **kwargs)\n    return wrapper",
    "docstring": "Creates a decorator for generating multimethods. This function creates a decorator that can be used with an argument extractor in order to generate a multimethod. Other than for the argument extractor, all arguments are passed on to :obj:. See Also -------- generate_multimethod Generates a multimethod.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_uarray\\_backend.py",
    "ast_data": "FunctionDef name:create_multimethod arguments arg arg FunctionDef name:wrapper arg:a arguments arg Return return:yes Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "add_enumerable_node",
    "source_code": "def add_enumerable_node(self, node: type[Element], figtype: str, title_getter: TitleGetter | None=None, override: bool=False, **kwargs: tuple[_NodeHandler, _NodeHandler]) -> None:\n    self.registry.add_enumerable_node(node, figtype, title_getter, override=override)\n    self.add_node(node, override=override, **kwargs)",
    "docstring": "Register a Docutils node class as a numfig target. Sphinx numbers the node automatically. And then the users can refer it using :rst:role:. :param node: A node class :param figtype: The type of enumerable nodes. Each figtype has individual numbering sequences. As system figtypes, `refadd_node`) :param override: If true, install the node forcedly even if another node is already installed as the same name .. versionadded:: 1.4",
    "type": "method",
    "file_path": "sphinx\\sphinx\\application.py",
    "ast_data": "FunctionDef name:add_enumerable_node arg:self arg:node arg:figtype arg:title_getter arg:override arguments arg arg arg arg arg arg Call Call"
  },
  {
    "library": "scipy",
    "name": "roots_chebyu",
    "source_code": "def roots_chebyu(n, mu=False):\n    m = int(n)\n    if n < 1 or n != m:\n        raise ValueError('n must be a positive integer.')\n    t = np.arange(m, 0, -1) * pi / (m + 1)\n    x = np.cos(t)\n    w = pi * np.sin(t) ** 2 / (m + 1)\n    if mu:\n        return (x, w, pi / 2)\n    else:\n        return (x, w)",
    "docstring": "Gauss-Chebyshev (second kind) quadrature. Computes the sample points and weights for Gauss-Chebyshev quadrature. The sample points are the roots of the nth degree Chebyshev polynomial of the second kind, :math:. These sample points and weights correctly integrate polynomials of degree :math: or less over the interval :math: with weight function :math:. See 22.2.5 in [AS]_ for details. Parameters ---------- n : int quadrature order mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights See Also -------- scipy.integrate.fixed_quad References ---------- .. [AS] Milton Abramowitz and Irene A. Stegun, eds. Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables. New York: Dover, 1972.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_orthogonal.py",
    "ast_data": "FunctionDef name:roots_chebyu arg:n arg:mu arguments arg arg Assign Call If BoolOp Compare Compare Raise Call Assign Call Assign Call Assign Call If Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "sca",
    "source_code": "def sca(self, a):\n    self._axstack.bubble(a)\n    self._axobservers.process('_axes_change_event', self)\n    return a",
    "docstring": "Set the current Axes to be *a* and return *a*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:sca arg:self arg:a arguments arg arg Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_antiderivative_inplace",
    "source_code": "def _antiderivative_inplace(self, nu, axis):\n    if nu <= 0:\n        return self._derivative_inplace(-nu, axis)\n    ndim = len(self.x)\n    axis = axis % ndim\n    perm = list(range(ndim))\n    perm[0], perm[axis] = (perm[axis], perm[0])\n    perm = perm + list(range(ndim, self.c.ndim))\n    c = self.c.transpose(perm)\n    c2 = np.zeros((c.shape[0] + nu,) + c.shape[1:], dtype=c.dtype)\n    c2[:-nu] = c\n    factor = spec.poch(np.arange(c.shape[0], 0, -1), nu)\n    c2[:-nu] /= factor[(slice(None),) + (None,) * (c.ndim - 1)]\n    perm2 = list(range(c2.ndim))\n    perm2[1], perm2[ndim + axis] = (perm2[ndim + axis], perm2[1])\n    c2 = c2.transpose(perm2)\n    c2 = c2.copy()\n    _ppoly.fix_continuity(c2.reshape(c2.shape[0], c2.shape[1], -1), self.x[axis], nu - 1)\n    c2 = c2.transpose(perm2)\n    c2 = c2.transpose(perm)\n    self.c = c2",
    "docstring": "Compute 1-D antiderivative along a selected dimension May result to non-contiguous c array.",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_interpolate.py",
    "ast_data": "FunctionDef name:_antiderivative_inplace arg:self arg:nu arg:axis arguments arg arg arg If Compare Return return:yes Call Assign Call Assign Assign Call Call Assign Assign Call Call Assign Call Assign Call Assign Assign Call Call Call Assign Call Call Assign Assign Call Assign Call Call Call Assign Call Assign Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, widthB=1.0, lengthB=0.2, angleB=0):\n    super().__init__(widthB=widthB, lengthB=lengthB, angleB=angleB)",
    "docstring": "Parameters ---------- widthB : float, default: 1.0 Width of the bracket. lengthB : float, default: 0.2 Length of the bracket. angleB : float, default: 0 degrees Orientation of the bracket, as a counterclockwise angle. 0 degrees means perpendicular to the line.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:widthB arg:lengthB arg:angleB arguments arg arg arg arg Call Call"
  },
  {
    "library": "numpy",
    "name": "sorted_block",
    "source_code": "@staticmethod\n@memoize\ndef sorted_block(size, dtype, block_size, rnd):\n    a = np.arange(size, dtype=dtype)\n    b = []\n    if size < block_size:\n        return a\n    block_num = size // block_size\n    for i in range(block_num):\n        b.extend(a[i::block_num])\n    return np.array(b)",
    "docstring": "Returns an array with blocks that are all sorted.",
    "type": "method",
    "file_path": "numpy\\benchmarks\\benchmarks\\bench_function_base.py",
    "ast_data": "FunctionDef name:sorted_block arg:size arg:dtype arg:block_size arg:rnd arguments arg arg arg arg Assign Call Assign If Compare Return return:yes Assign For Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_all_trackables",
    "source_code": "def _get_all_trackables(root, exclude_set):\n    all_trackables = trackable_view.TrackableView(root=root).descendants()\n    trackable_index = 0\n    while trackable_index < len(all_trackables) and exclude_set:\n        if all_trackables[trackable_index] in exclude_set:\n            exclude_set.discard(all_trackables[trackable_index])\n            all_trackables.pop(trackable_index)\n        else:\n            trackable_index += 1\n\n    def _trackable_needs_to_be_saved(obj):\n        if hasattr(obj, '__dict__'):\n            if '_serialize_to_tensors' in obj.__dict__ or '_gather_saveables_for_checkpoint' in obj.__dict__ or '_copy_trackable_to_cpu' in obj.__dict__:\n                return True\n        for t in type(obj).mro():\n            if t is base.Trackable:\n                continue\n            elif '_serialize_to_tensors' in t.__dict__ or '_gather_saveables_for_checkpoint' in t.__dict__ or '_copy_trackable_to_cpu' in t.__dict__:\n                return True\n        return False\n    saveable_trackables = [x for x in all_trackables if _trackable_needs_to_be_saved(x)]\n    return (saveable_trackables, all_trackables)",
    "docstring": "Return the list of checkpointable trackables dependent on . Args: root: The root trackable from where we get all its dependent trackables. exclude_set: An ObjectIdentitySet of Trackables to exclude before returning. Each element in is a specific instance of a and appears precisely once in . Returns: saveable_trackables: All trackables that are saveable in (see definition of \"saveable\" in ). A subset of . all_trackables: All trackables returned by 's after excluding . A superset of .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\async_checkpoint_helper.py",
    "ast_data": "FunctionDef name:_get_all_trackables arg:root arg:exclude_set arguments arg arg Assign Call Call Assign While BoolOp Compare Call If Compare Call Call FunctionDef name:_trackable_needs_to_be_saved arg:obj arguments arg If Call If BoolOp Compare Compare Compare Return return:yes For Call Call If Compare If BoolOp Compare Compare Compare Return return:yes Return return:yes Assign Call Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "key_size",
    "source_code": "@property\n@abc.abstractmethod\ndef key_size(self) -> int:\n    pass",
    "docstring": "The bit length of the prime modulus.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\dsa.py",
    "ast_data": "FunctionDef name:key_size arg:self arguments arg"
  },
  {
    "library": "pandas",
    "name": "maybe_adjust_figure",
    "source_code": "def maybe_adjust_figure(fig: Figure, *args, **kwargs) -> None:\n    if do_adjust_figure(fig):\n        fig.subplots_adjust(*args, **kwargs)",
    "docstring": "Call fig.subplots_adjust unless fig has constrained_layout enabled.",
    "type": "function",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\tools.py",
    "ast_data": "FunctionDef name:maybe_adjust_figure arg:fig arguments arg arg arg If Call Call"
  },
  {
    "library": "pandas",
    "name": "_maybe_convert_setitem_value",
    "source_code": "def _maybe_convert_setitem_value(self, value):\n    if lib.is_scalar(value):\n        if isna(value):\n            value = self.dtype.na_value\n        elif not isinstance(value, str):\n            raise TypeError(f\"Invalid value '{value}' for dtype '{self.dtype}'. Value should be a string or missing value, got '{type(value).__name__}' instead.\")\n    else:\n        value = extract_array(value, extract_numpy=True)\n        if not is_array_like(value):\n            value = np.asarray(value, dtype=object)\n        elif isinstance(value.dtype, type(self.dtype)):\n            return value\n        else:\n            value = np.asarray(value)\n        if len(value) and (not lib.is_string_array(value, skipna=True)):\n            raise TypeError(\"Invalid value for dtype 'str'. Value should be a string or missing value (or array of those).\")\n    return value",
    "docstring": "Maybe convert value to be pyarrow compatible.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\string_.py",
    "ast_data": "FunctionDef name:_maybe_convert_setitem_value arg:self arg:value arguments arg arg If Call If Call Assign If Call Raise Call Call Assign Call If Call Assign Call If Call Call Return return:yes Assign Call If BoolOp Call Call Raise Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "build_norm",
    "source_code": "def build_norm(name: str='bn2d', num_features: Optional[int]=None, **kwargs: Any) -> Optional[nn.Module]:\n    if name in ['ln', 'ln2d']:\n        kwargs['normalized_shape'] = num_features\n    else:\n        kwargs['num_features'] = num_features\n    if name in REGISTERED_NORM_DICT:\n        norm_cls = REGISTERED_NORM_DICT[name]\n        args = build_kwargs_from_config(kwargs, norm_cls)\n        return norm_cls(**args)\n    else:\n        return None",
    "docstring": "Return norm op.",
    "type": "function",
    "file_path": "kornia\\kornia\\contrib\\models\\efficient_vit\\nn\\norm.py",
    "ast_data": "FunctionDef name:build_norm arg:name arg:num_features arguments arg arg arg If Compare Assign Assign If Compare Assign Assign Call Return return:yes Call Return return:no"
  },
  {
    "library": "django",
    "name": "_check_fields",
    "source_code": "@classmethod\ndef _check_fields(cls, **kwargs):\n    errors = []\n    for field in cls._meta.local_fields:\n        errors.extend(field.check(**kwargs))\n    for field in cls._meta.local_many_to_many:\n        errors.extend(field.check(from_model=cls, **kwargs))\n    return errors",
    "docstring": "Perform all field checks.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\base.py",
    "ast_data": "FunctionDef name:_check_fields arg:cls arguments arg arg Assign For Call Call For Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "lazy_bind",
    "source_code": "def lazy_bind(concrete_type, unbound_method):\n\n    def lazy_binding_method(cpp_module, *args):\n\n        def init_fn(script_module):\n            orig_class = concrete_type.py_class\n            for name in dir(orig_class):\n                item = getattr(orig_class, name, None)\n                if _jit_internal.is_ignored_fn(item):\n                    setattr(script_module, name, item)\n            for name, value in concrete_type.get_constants().items():\n                setattr(script_module, name, value)\n        script_module = torch.jit.RecursiveScriptModule._construct(cpp_module, init_fn)\n        method = types.MethodType(unbound_method, script_module)\n        return method(*args)\n    lazy_binding_method.original_fn = unbound_method\n    lazy_binding_method.__name__ = unbound_method.__name__\n    torch._jit_internal.copy_torchscript_modifier(unbound_method, lazy_binding_method)\n    return lazy_binding_method",
    "docstring": "Return a function that lazily binds to a provided Module IValue, then invokes the method. We do this so that any Python shenanigans that will poison type sharing are impossible at compile time.",
    "type": "function",
    "file_path": "pytorch\\torch\\jit\\_recursive.py",
    "ast_data": "FunctionDef name:lazy_bind arg:concrete_type arg:unbound_method arguments arg arg FunctionDef name:lazy_binding_method arg:cpp_module arguments arg arg FunctionDef name:init_fn arg:script_module arguments arg Assign For Call Assign Call If Call Call For Call Call Call Assign Call Assign Call Return return:yes Call Assign Assign Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "VarType",
    "source_code": "class VarType(UserString):\n    allowed = ('numeric', 'datetime', 'categorical', 'boolean', 'unknown')\n\n    def __init__(self, data):\n        assert data in self.allowed, data\n        super().__init__(data)\n\n    def __eq__(self, other):\n        assert other in self.allowed, other\n        return self.data == other",
    "docstring": "Prevent comparisons elsewhere in the library from using the wrong name. Errors are simple assertions because users should not be able to trigger them. If that changes, they should be more verbose.",
    "type": "class",
    "file_path": "seaborn\\seaborn\\_core\\rules.py",
    "ast_data": "ClassDef name:VarType Assign FunctionDef name:__init__ arg:self arg:data arguments arg arg Compare Call Call FunctionDef name:__eq__ arg:self arg:other arguments arg arg Compare Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "get_trtengineop_io_nodes_count",
    "source_code": "def get_trtengineop_io_nodes_count(node, key):\n    return len(node.attr[key].list.type)",
    "docstring": "Returns the number of input/output nodes of a TRTEngineOp.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\tensorrt\\utils.py",
    "ast_data": "FunctionDef name:get_trtengineop_io_nodes_count arg:node arg:key arguments arg arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_get_data",
    "source_code": "def _get_data(self):\n    return ndarray.view(self, self._baseclass)",
    "docstring": "Returns the underlying data, as a view of the masked array. If the underlying data is a subclass of :class:, it is returned as such. >>> x = np.ma.array(np.matrix([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]]) >>> x.data matrix([[1, 2], [3, 4]]) The type of the data can be accessed through the :attr: attribute.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:_get_data arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "disable_lower_using_switch_merge",
    "source_code": "def disable_lower_using_switch_merge(graph_def):\n    output_graph_def = graph_pb2.GraphDef()\n    output_graph_def.CopyFrom(graph_def)\n\n    def disable_control_flow_lowering(node):\n        if node.op in _CONTROL_FLOW_OPS:\n            node.attr['_lower_using_switch_merge'].b = False\n    for node in output_graph_def.node:\n        disable_control_flow_lowering(node)\n    if output_graph_def.library:\n        for func in output_graph_def.library.function:\n            for node in func.node_def:\n                disable_control_flow_lowering(node)\n    return output_graph_def",
    "docstring": "Set '_lower_using_switch_merge' attributes to False. Sets the attribute to False in the NodeDefs in the main graph and the NodeDefs in each function's graph. Args: graph_def: GraphDef proto. Returns: GraphDef",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "FunctionDef name:disable_lower_using_switch_merge arg:graph_def arguments arg Assign Call Call FunctionDef name:disable_control_flow_lowering arg:node arguments arg If Compare Assign For Call If For For Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "lookup",
    "source_code": "def lookup(self, keys, name=None):\n    key_tensor = keys\n    if isinstance(keys, (sparse_tensor.SparseTensor, internal.RaggedTensor)):\n        key_tensor = keys.values\n    if keys.dtype.base_dtype != self._key_dtype:\n        raise TypeError(f'Dtype of argument `keys` must be {self._key_dtype}, received: {keys.dtype}')\n    with ops.name_scope(name, '%s_Lookup' % self.name, (self.resource_handle, key_tensor, self._default_value)):\n        values = gen_lookup_ops.lookup_table_find_v2(self.resource_handle, key_tensor, self._default_value)\n    values.set_shape(key_tensor.get_shape())\n    if isinstance(keys, sparse_tensor.SparseTensor):\n        return sparse_tensor.SparseTensor(keys.indices, values, keys.dense_shape)\n    elif isinstance(keys, internal.RaggedTensor):\n        return keys.with_values(values)\n    else:\n        return values",
    "docstring": "Looks up in a table, outputs the corresponding values. The is used for keys not present in the table. Args: keys: Keys to look up. May be either a or dense . name: A name for the operation (optional). Returns: A if keys are sparse, a if keys are ragged, otherwise a dense . Raises: TypeError: when or doesn't match the table data types.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "FunctionDef name:lookup arg:self arg:keys arg:name arguments arg arg arg Assign If Call Assign If Compare Raise Call With Call Assign Call Call Call If Call Return return:yes Call If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "Mishra03",
    "source_code": "class Mishra03(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n        self.global_optimum = [[-9.99378322, -9.99918927]]\n        self.fglob = -0.19990562\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return 0.01 * (x[0] + x[1]) + sqrt(abs(cos(sqrt(abs(x[0] ** 2 + x[1] ** 2)))))",
    "docstring": "Mishra 3 objective function. This class defines the Mishra 3 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Mishra03}}(x) = \\sqrt{\\lvert \\cos{\\sqrt{\\lvert x_1^2 + x_2^2 \\rvert}} \\rvert} + 0.01(x_1 + x_2) with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO: I think that Jamil#76 has the wrong global minimum, a smaller one is possible",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_M.py",
    "ast_data": "ClassDef name:Mishra03 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "kwargs",
    "source_code": "@kwargs.setter\ndef kwargs(self, k: dict[str, Argument]) -> None:\n    self._update_args_kwargs(self._args, k)",
    "docstring": "Set the dict of kwargs to this Node. The interpretation of arguments depends on the node's opcode. See the `` docstring for more information.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\node.py",
    "ast_data": "FunctionDef name:kwargs arg:self arg:k arguments arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "ptr",
    "source_code": "def ptr(self, node: IRNode) -> str:\n    if node is None:\n        return 'nullptr'\n    arg_name = self.arg_name(node)\n    if arg_name is None:\n        return 'nullptr'\n    offset = self.offset(node)\n    return arg_name if offset == '0' else f'{arg_name} + {offset}'",
    "docstring": "Generates code which represents pointer of a given node.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\cuda_kernel.py",
    "ast_data": "FunctionDef name:ptr arg:self arg:node arguments arg arg If Compare Return return:yes Assign Call If Compare Return return:yes Assign Call Return return:yes Compare"
  },
  {
    "library": "scipy",
    "name": "_split",
    "source_code": "def _split(x, indices_or_sections, axis, xp):\n    Ntotal = x.shape[axis]\n    Nsections = len(indices_or_sections) + 1\n    div_points = [0] + list(indices_or_sections) + [Ntotal]\n    sub_arys = []\n    sary = _swapaxes(x, axis, 0, xp=xp)\n    for i in range(Nsections):\n        st = div_points[i]\n        end = div_points[i + 1]\n        sub_arys.append(_swapaxes(sary[st:end, ...], axis, 0, xp=xp))\n    return sub_arys",
    "docstring": "A simplified version of np.split, with being an list.",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_signaltools.py",
    "ast_data": "FunctionDef name:_split arg:x arg:indices_or_sections arg:axis arg:xp arguments arg arg arg arg Assign Assign Call Assign Call Assign Assign Call For Call Assign Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_results_for_monotonic_comparison",
    "source_code": "def _get_results_for_monotonic_comparison(x, compare_op):\n    x = array_ops.reshape(x, [-1])\n    if not is_numeric_tensor(x):\n        raise TypeError('Expected x to be numeric, instead found: %s' % x)\n    is_shorter_than_two = math_ops.less(array_ops.size(x), 2)\n    short_result = lambda: ops.convert_to_tensor([], dtype=bool)\n    s_len = array_ops.shape(x) - 1\n    diff = lambda: compare_op(array_ops.strided_slice(x, [1], [1] + s_len), array_ops.strided_slice(x, [0], s_len))\n    return cond.cond(is_shorter_than_two, short_result, diff)",
    "docstring": "Gets the difference x[1:] - x[:-1].",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\check_ops.py",
    "ast_data": "FunctionDef name:_get_results_for_monotonic_comparison arg:x arg:compare_op arguments arg arg Assign Call If Call Raise Call Assign Call Call Assign arguments Call Assign Call Assign arguments Call Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_xy2",
    "source_code": "def get_xy2(self):\n    return self._xy2",
    "docstring": "Return the *xy2* value of the line.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:get_xy2 arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "camtoworld_graphics_to_vision_4x4",
    "source_code": "def camtoworld_graphics_to_vision_4x4(extrinsics_graphics: Tensor) -> Tensor:\n    KORNIA_CHECK_SHAPE(extrinsics_graphics, ['B', '4', '4'])\n    invert_yz = tensor([[[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1.0]]], dtype=extrinsics_graphics.dtype, device=extrinsics_graphics.device)\n    return extrinsics_graphics @ invert_yz",
    "docstring": "Convert graphics coordinate frame (e.g. OpenGL) to vision coordinate frame (e.g. OpenCV.). I.e. flips y and z axis. Graphics convention: [+x, +y, +z] == [right, up, backwards]. Vision convention: [+x, +y, +z] == [right, down, forwards]. Args: extrinsics_graphics: pose matrix :math:. Returns: extrinsics: pose matrix :math:. Example: >>> ext = torch.eye(4)[None] >>> camtoworld_graphics_to_vision_4x4(ext) tensor([[[ 1., 0., 0., 0.], [ 0., -1., 0., 0.], [ 0., 0., -1., 0.], [ 0., 0., 0., 1.]]])",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\conversions.py",
    "ast_data": "FunctionDef name:camtoworld_graphics_to_vision_4x4 arg:extrinsics_graphics arguments arg Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "inverse",
    "source_code": "def inverse(self, value):\n    raise ValueError('BoundaryNorm is not invertible')",
    "docstring": "Raises ------ ValueError BoundaryNorm is not invertible, so calling this method will always raise an error",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:inverse arg:self arg:value arguments arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_is_in_control_flow",
    "source_code": "def _is_in_control_flow(self, op):\n    return control_flow_util.IsInCond(op)",
    "docstring": "Returns true if the given op is inside a tf.cond or in tf.while_loop. Args: op: A tensorflow op that should be checked whether in control flow or not. Returns: A boolean value whether the op is in control flow or not.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:_is_in_control_flow arg:self arg:op arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "reduce_join_v2",
    "source_code": "@tf_export('strings.reduce_join', v1=[])\n@dispatch.add_dispatch_support\ndef reduce_join_v2(inputs, axis=None, keepdims=False, separator='', name=None):\n    with ops.name_scope(None, 'ReduceJoin', [inputs, axis]):\n        inputs_t = ops.convert_to_tensor(inputs)\n        axis = _reduce_join_reduction_dims(inputs_t, axis)\n        return gen_string_ops.reduce_join(inputs=inputs_t, reduction_indices=axis, keep_dims=keepdims, separator=separator, name=name)",
    "docstring": "Joins all strings into a single string, or joins along an axis. This is the reduction operation for the elementwise op. >>> tf.strings.reduce_join([['abc','123'], ... ['def','456']]).numpy() b'abc123def456' >>> tf.strings.reduce_join([['abc','123'], ... ['def','456']], axis=-1).numpy() array([b'abc123', b'def456'], dtype=object) >>> tf.strings.reduce_join([['abc','123'], ... ['def','456']], ... axis=-1, ... separator=\" \").numpy() array([b'abc 123', b'def 456'], dtype=object) Args: inputs: A tensor. axis: Which axis to join along. The default behavior is to join all elements, producing a scalar. keepdims: If true, retains reduced dimensions with length 1. separator: a string added between each string being joined. name: A name for the operation (optional). Returns: A tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\string_ops.py",
    "ast_data": "FunctionDef name:reduce_join_v2 arg:inputs arg:axis arg:keepdims arg:separator arg:name arguments arg arg arg arg arg With Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "init_population_lhs",
    "source_code": "def init_population_lhs(self):\n    rng = self.random_number_generator\n    segsize = 1.0 / self.num_population_members\n    samples = segsize * rng.uniform(size=self.population_shape) + np.linspace(0.0, 1.0, self.num_population_members, endpoint=False)[:, np.newaxis]\n    self.population = np.zeros_like(samples)\n    for j in range(self.parameter_count):\n        order = rng.permutation(range(self.num_population_members))\n        self.population[:, j] = samples[order, j]\n    self.population_energies = np.full(self.num_population_members, np.inf)\n    self._nfev = 0",
    "docstring": "Initializes the population with Latin Hypercube Sampling. Latin Hypercube Sampling ensures that each parameter is uniformly sampled over its range.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_differentialevolution.py",
    "ast_data": "FunctionDef name:init_population_lhs arg:self arguments arg Assign Assign Assign Call Call Assign Call For Call Assign Call Call Assign Assign Call Assign"
  },
  {
    "library": "pytorch",
    "name": "_init_template",
    "source_code": "def _init_template(self, module_interface_cls, enable_moving_cpu_tensors_to_cuda):\n    generated_module = instantiator.instantiate_scriptable_remote_module_template(module_interface_cls, enable_moving_cpu_tensors_to_cuda)\n    self.generated_methods = generated_module._generated_methods",
    "docstring": "Instantiate template on local side.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\nn\\api\\remote_module.py",
    "ast_data": "FunctionDef name:_init_template arg:self arg:module_interface_cls arg:enable_moving_cpu_tensors_to_cuda arguments arg arg arg Assign Call Assign"
  },
  {
    "library": "cherrypy",
    "name": "annotated_file",
    "source_code": "def annotated_file(self, filename, statements, excluded, missing):\n    with open(filename, 'r') as source:\n        lines = source.readlines()\n    buffer = []\n    for lineno, line in enumerate(lines):\n        lineno += 1\n        line = line.strip('\\n\\r')\n        empty_the_buffer = True\n        if lineno in excluded:\n            template = TEMPLATE_LOC_EXCLUDED\n        elif lineno in missing:\n            template = TEMPLATE_LOC_NOT_COVERED\n        elif lineno in statements:\n            template = TEMPLATE_LOC_COVERED\n        else:\n            empty_the_buffer = False\n            buffer.append((lineno, line))\n        if empty_the_buffer:\n            for lno, pastline in buffer:\n                yield (template % (lno, html.escape(pastline)))\n            buffer = []\n            yield (template % (lineno, html.escape(line)))",
    "docstring": "Annotate given file with coverage information.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\covercp.py",
    "ast_data": "FunctionDef name:annotated_file arg:self arg:filename arg:statements arg:excluded arg:missing arguments arg arg arg arg arg With Call Assign Call Assign For Call Assign Call Assign If Compare Assign If Compare Assign If Compare Assign Assign Call If For Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "SaveableCompatibilityConverter",
    "source_code": "class SaveableCompatibilityConverter(trackable.Trackable):\n    __slots__ = ('_obj', '_saveables')\n\n    def __init__(self, obj, saveables):\n        self._obj = obj\n        self._saveables = saveables\n\n    @property\n    def obj(self):\n        return self._obj\n\n    @property\n    def saveables(self):\n        return self._saveables\n\n    def _serialize_to_tensors(self):\n        return saveable_object_to_tensor_dict(self.saveables)\n\n    def _restore_from_tensors(self, restored_tensors):\n        expected_keys = []\n        for saveable in self.saveables:\n            expected_keys.extend((trackable_utils.extract_local_name(_convert_to_string(spec.name)) for spec in saveable.specs))\n        if set(expected_keys) != restored_tensors.keys():\n            raise ValueError(f'Could not restore object {self._obj} because not all expected tensors were in the checkpoint.\\n\\tExpected: {expected_keys}\\n\\tGot: {list(restored_tensors.keys())}')\n        return saveable_object_to_restore_fn(self.saveables)(restored_tensors)",
    "docstring": "Converts object's to functions used in TF2 checkpointing. A class that converts a Trackable object's to save and restore functions with the same signatures as and . This class also produces a method for filling the object proto.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\training\\saving\\saveable_object_util.py",
    "ast_data": "ClassDef name:SaveableCompatibilityConverter Assign FunctionDef name:__init__ arg:self arg:obj arg:saveables arguments arg arg arg Assign Assign FunctionDef name:obj arg:self arguments arg Return return:yes FunctionDef name:saveables arg:self arguments arg Return return:yes FunctionDef name:_serialize_to_tensors arg:self arguments arg Return return:yes Call FunctionDef name:_restore_from_tensors arg:self arg:restored_tensors arguments arg arg Assign For Call Call Call If Compare Call Call Raise Call Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "gather_initializers",
    "source_code": "def gather_initializers(root_trackable):\n    trackable_objects = list_objects(root_trackable)\n    return [c.initializer for c in trackable_objects if hasattr(c, 'initializer') and c.initializer is not None]",
    "docstring": "Traverse the object graph and find initialization ops. Looks for objects which are dependencies of and which have an property. Includes initializers for slot variables only if the variable they are slotting for and the optimizer are dependencies of (i.e. if they would be saved with a checkpoint). Args: root_trackable: A object to gather initializers for. Returns: A list of initialization ops.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:gather_initializers arg:root_trackable arguments arg Assign Call Return return:yes BoolOp Call Compare"
  },
  {
    "library": "sphinx",
    "name": "IntersphinxRoleResolver",
    "source_code": "class IntersphinxRoleResolver(ReferencesResolver):\n    default_priority = ReferencesResolver.default_priority - 1\n\n    def run(self, **kwargs: Any) -> None:\n        for node in self.document.findall(pending_xref):\n            if 'intersphinx' not in node:\n                continue\n            contnode = cast('nodes.TextElement', node[0].deepcopy())\n            inv_name = node['inventory']\n            if inv_name is not None:\n                assert inventory_exists(self.env, inv_name)\n                newnode = resolve_reference_in_inventory(self.env, inv_name, node, contnode)\n            else:\n                newnode = resolve_reference_any_inventory(self.env, False, node, contnode)\n            if newnode is None:\n                typ = node['reftype']\n                msg = __('external %s:%s reference target not found: %s') % (node['refdomain'], typ, node['reftarget'])\n                LOGGER.warning(msg, location=node, type='ref', subtype=typ)\n                node.replace_self(contnode)\n            else:\n                node.replace_self(newnode)",
    "docstring": "pending_xref node resolver for intersphinx role. This resolves pending_xref nodes generated by :intersphinx:***: role.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\ext\\intersphinx\\_resolve.py",
    "ast_data": "ClassDef name:IntersphinxRoleResolver Assign FunctionDef name:run arg:self arguments arg arg For Call If Compare Assign Call Call Assign If Compare Call Assign Call Assign Call If Compare Assign Assign Call Call Call Call"
  },
  {
    "library": "numpy",
    "name": "markinnerspaces",
    "source_code": "def markinnerspaces(line):\n    fragment = ''\n    inside = False\n    current_quote = None\n    escaped = ''\n    for c in line:\n        if escaped == '\\\\' and c in ['\\\\', \"'\", '\"']:\n            fragment += c\n            escaped = c\n            continue\n        if not inside and c in [\"'\", '\"']:\n            current_quote = c\n        if c == current_quote:\n            inside = not inside\n        elif c == ' ' and inside:\n            fragment += '@_@'\n            continue\n        fragment += c\n        escaped = c\n    return fragment",
    "docstring": "The function replace all spaces in the input variable line which are surrounded with quotation marks, with the triplet \"@_@\". For instance, for the input \"a 'b c'\" the function returns \"a 'b@_@c'\" Parameters ---------- line : str Returns ------- str",
    "type": "function",
    "file_path": "numpy\\numpy\\f2py\\crackfortran.py",
    "ast_data": "FunctionDef name:markinnerspaces arg:line arguments arg Assign Assign Assign Assign For If BoolOp Compare Compare Assign If BoolOp Compare Assign If Compare Assign If BoolOp Compare Assign Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "index",
    "source_code": "@cherrypy.expose\ndef index(self):\n    return 'We have an <a href=\"show_msg\">important message</a> for you!'",
    "docstring": "Produce HTTP response body of hello world app index URI.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\tutorial\\tut02_expose_methods.py",
    "ast_data": "FunctionDef name:index arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "interpolate_2d_inplace",
    "source_code": "def interpolate_2d_inplace(data: np.ndarray, index: Index, axis: AxisInt, method: str='linear', limit: int | None=None, limit_direction: str='forward', limit_area: str | None=None, fill_value: Any | None=None, mask=None, **kwargs) -> None:\n    clean_interp_method(method, index, **kwargs)\n    if is_valid_na_for_dtype(fill_value, data.dtype):\n        fill_value = na_value_for_dtype(data.dtype, compat=False)\n    if method == 'time':\n        if not needs_i8_conversion(index.dtype):\n            raise ValueError('time-weighted interpolation only works on Series or DataFrames with a DatetimeIndex')\n        method = 'values'\n    limit_direction = validate_limit_direction(limit_direction)\n    limit_area_validated = validate_limit_area(limit_area)\n    limit = algos.validate_limit(nobs=None, limit=limit)\n    indices = _index_to_interp_indices(index, method)\n\n    def func(yvalues: np.ndarray) -> None:\n        _interpolate_1d(indices=indices, yvalues=yvalues, method=method, limit=limit, limit_direction=limit_direction, limit_area=limit_area_validated, fill_value=fill_value, bounds_error=False, mask=mask, **kwargs)\n    np.apply_along_axis(func, axis, data)",
    "docstring": "Column-wise application of _interpolate_1d. Notes ----- Alters 'data' in-place. The signature does differ from _interpolate_1d because it only includes what is needed for Block.interpolate.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\missing.py",
    "ast_data": "FunctionDef name:interpolate_2d_inplace arg:data arg:index arg:axis arg:method arg:limit arg:limit_direction arg:limit_area arg:fill_value arg:mask arguments arg arg arg arg arg arg arg arg arg arg Call If Call Assign Call If Compare If Call Raise Call Assign Assign Call Assign Call Assign Call Assign Call FunctionDef name:func arg:yvalues arguments arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "OutsideCompilationV2Context",
    "source_code": "class OutsideCompilationV2Context(control_flow_ops.ControlFlowContext):\n\n    def __init__(self, name: Text, is_map_outside_compilation=False):\n        control_flow_ops.ControlFlowContext.__init__(self)\n        self._name = name\n        self._is_map_outside_compilation = is_map_outside_compilation\n\n    def AddOp(self, op: ops.Operation) -> None:\n        if self._outer_context:\n            self._outer_context.AddOp(op)\n        self._set_outside_compilation_attributes(op)\n\n    def AddInnerOp(self, op: ops.Operation) -> None:\n        if self._outer_context:\n            self._outer_context.AddInnerOp(op)\n        self._set_outside_compilation_attributes(op)\n\n    def to_control_flow_context_def(self, context_def, export_scope=None):\n        raise NotImplementedError\n\n    def _set_outside_compilation_attributes(self, op: ops.Operation) -> None:\n        op._set_attr(_OUTSIDE_COMPILATION_ATTR, attr_value_pb2.AttrValue(s=compat.as_bytes(self._name)))\n        if self._is_map_outside_compilation:\n            op._set_attr(_MAP_OUTSIDE_COMPILATION_ATTR, attr_value_pb2.AttrValue(b=True))",
    "docstring": "The context for outside compilation in Tensorflow 2.0. Every op added in this context will be assigned an _xla_outside_compilation attribute.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_replication.py",
    "ast_data": "ClassDef name:OutsideCompilationV2Context FunctionDef name:__init__ arg:self arg:name arg:is_map_outside_compilation arguments arg arg arg Call Assign Assign FunctionDef name:AddOp arg:self arg:op arguments arg arg If Call Call FunctionDef name:AddInnerOp arg:self arg:op arguments arg arg If Call Call FunctionDef name:to_control_flow_context_def arg:self arg:context_def arg:export_scope arguments arg arg arg Raise FunctionDef name:_set_outside_compilation_attributes arg:self arg:op arguments arg arg Call Call Call If Call Call"
  },
  {
    "library": "scipy",
    "name": "_diff_dual_poly",
    "source_code": "def _diff_dual_poly(j, k, y, d, t):\n    if d == 0:\n        return _dual_poly(j, k, t, y)\n    if d == k:\n        return poch(1, k)\n    comb = list(combinations(range(j + 1, j + k + 1), d))\n    res = 0\n    for i in range(len(comb) * len(comb[0])):\n        res += np.prod([y - t[j + p] for p in range(1, k + 1) if j + p not in comb[i // d]])\n    return res",
    "docstring": "d-th derivative of the dual polynomial $p_{j,k}(y)$",
    "type": "function",
    "file_path": "scipy\\scipy\\interpolate\\_bsplines.py",
    "ast_data": "FunctionDef name:_diff_dual_poly arg:j arg:k arg:y arg:d arg:t arguments arg arg arg arg arg If Compare Return return:yes Call If Compare Return return:yes Call Assign Call Call Call Assign For Call Call Call Call Call Compare Return return:yes"
  },
  {
    "library": "scipy",
    "name": "aps13_f",
    "source_code": "def aps13_f(x):\n    if x == 0:\n        return 0\n    y = 1 / x ** 2\n    if y > _MAX_EXPABLE:\n        return 0\n    return x / np.exp(y)",
    "docstring": "Function with *all* derivatives 0 at the root",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_tstutils.py",
    "ast_data": "FunctionDef name:aps13_f arg:x arguments arg If Compare Return return:yes Assign If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "is_onnx",
    "source_code": "def is_onnx(domain: str) -> bool:\n    return domain == 'onnx'",
    "docstring": "Check if the domain is official.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\jit_utils.py",
    "ast_data": "FunctionDef name:is_onnx arg:domain arguments arg Return return:yes Compare"
  },
  {
    "library": "seaborn",
    "name": "add_gutters",
    "source_code": "def add_gutters(self, points, center, trans_fwd, trans_inv):\n    half_width = self.width / 2\n    low_gutter = trans_inv(trans_fwd(center) - half_width)\n    off_low = points < low_gutter\n    if off_low.any():\n        points[off_low] = low_gutter\n    high_gutter = trans_inv(trans_fwd(center) + half_width)\n    off_high = points > high_gutter\n    if off_high.any():\n        points[off_high] = high_gutter\n    gutter_prop = (off_high + off_low).sum() / len(points)\n    if gutter_prop > self.warn_thresh:\n        msg = '{:.1%} of the points cannot be placed; you may want to decrease the size of the markers or use stripplot.'.format(gutter_prop)\n        warnings.warn(msg, UserWarning)\n    return points",
    "docstring": "Stop points from extending beyond their territory.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\categorical.py",
    "ast_data": "FunctionDef name:add_gutters arg:self arg:points arg:center arg:trans_fwd arg:trans_inv arguments arg arg arg arg arg Assign Assign Call Call Assign Compare If Call Assign Assign Call Call Assign Compare If Call Assign Assign Call Call If Compare Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_ready_to_schedule",
    "source_code": "def _ready_to_schedule(action: Optional[_Action], prev_actions: set[_Action]) -> bool:\n    if action is None:\n        return True\n    elif action.computation_type == F and (not action.stage_index == 0):\n        if _Action(action.stage_index, RECV_F, action.microbatch_index) in prev_actions:\n            return True\n        elif _Action(action.stage_index - 1, F, action.microbatch_index) in prev_actions:\n            return True\n        return False\n    elif action.computation_type in (BACKWARD_INPUT, FULL_BACKWARD) and (not action.stage_index == num_stages - 1):\n        if _Action(action.stage_index, RECV_B, action.microbatch_index) in prev_actions:\n            return True\n        elif _Action(action.stage_index + 1, BACKWARD_INPUT, action.microbatch_index) in prev_actions:\n            return True\n        elif _Action(action.stage_index + 1, FULL_BACKWARD, action.microbatch_index) in prev_actions:\n            return True\n        return False\n    else:\n        return True",
    "docstring": "We don't put our own recv ops in the schedule, we let a sender on another rank put our recv ops in place. This helps ensure a sane (non-hanging) ordering of sends and recvs. But it also means we might not be able to schedule our next compute action yet.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\schedules.py",
    "ast_data": "FunctionDef name:_ready_to_schedule arg:action arg:prev_actions arguments arg arg If Compare Return return:yes If BoolOp Compare Compare If Compare Call Return return:yes If Compare Call Return return:yes Return return:yes If BoolOp Compare Compare If Compare Call Return return:yes If Compare Call Return return:yes If Compare Call Return return:yes Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "all_input_nodes",
    "source_code": "@property\ndef all_input_nodes(self) -> list['Node']:\n    return list(self._input_nodes.keys())",
    "docstring": "Return all Nodes that are inputs to this Node. This is equivalent to iterating over ``, in that order.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\node.py",
    "ast_data": "FunctionDef name:all_input_nodes arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "wrapped_cond",
    "source_code": "def wrapped_cond(loop_counter, maximum_iterations_arg, *args):\n    pred = cond(*_pack_sequence_as(loop_vars_signature, flat_orig_loop_vars, args))\n    if tensor_util.is_tf_type(pred) and (pred.shape.dims is None or pred.shape.dims):\n        pred = array_ops.squeeze_v2(pred)\n    if maximum_iterations is None:\n        return pred\n    else:\n        return math_ops.logical_and(loop_counter < maximum_iterations_arg, pred)",
    "docstring": "Extra wrapper that can handle the extra counter loop_var.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\while_v2.py",
    "ast_data": "FunctionDef name:wrapped_cond arg:loop_counter arg:maximum_iterations_arg arguments arg arg arg Assign Call Call If BoolOp Call BoolOp Compare Assign Call If Compare Return return:yes Return return:yes Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "_get_control_flow_context",
    "source_code": "def _get_control_flow_context(self):\n    return self._control_flow_context",
    "docstring": "Returns the control flow context of this op. Returns: A context object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_get_control_flow_context arg:self arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_parse_input_dimensions",
    "source_code": "def _parse_input_dimensions(args, input_core_dims):\n    broadcast_args = []\n    dim_sizes = {}\n    for arg, core_dims in zip(args, input_core_dims):\n        _update_dim_sizes(dim_sizes, arg, core_dims)\n        ndim = arg.ndim - len(core_dims)\n        dummy_array = np.lib.stride_tricks.as_strided(0, arg.shape[:ndim])\n        broadcast_args.append(dummy_array)\n    broadcast_shape = np.lib._stride_tricks_impl._broadcast_shape(*broadcast_args)\n    return (broadcast_shape, dim_sizes)",
    "docstring": "Parse broadcast and core dimensions for vectorize with a signature. Arguments --------- args : Tuple[ndarray, ...] Tuple of input arguments to examine. input_core_dims : List[Tuple[str, ...]] List of core dimensions corresponding to each input. Returns ------- broadcast_shape : Tuple[int, ...] Common shape to broadcast all non-core dimensions to. dim_sizes : Dict[str, int] Common sizes for named core dimensions.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_function_base_impl.py",
    "ast_data": "FunctionDef name:_parse_input_dimensions arg:args arg:input_core_dims arguments arg arg Assign Assign For Call Call Assign Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_fusion_pair_priority",
    "source_code": "def get_fusion_pair_priority(self, node1: BaseSchedulerNode, node2: BaseSchedulerNode) -> int:\n    return 0",
    "docstring": "Return an unsigned integer which represents the priority of this fusion pair. The smaller is with higher priority.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:get_fusion_pair_priority arg:self arg:node1 arg:node2 arguments arg arg arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_arc_jac_sc1",
    "source_code": "def _arc_jac_sc1(w, m):\n    zcomplex = _arc_jac_sn(1j * w, m)\n    if abs(zcomplex.real) > 1e-14:\n        raise ValueError\n    return zcomplex.imag",
    "docstring": "Real inverse Jacobian sc, with complementary modulus Solve for z in w = sc(z, 1-m) w - real scalar m - modulus From [1], sc(z, m) = -i * sn(i * z, 1 - m) References ---------- # noqa: E501 .. [1] \"Representations through other Jacobi functions\"",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_filter_design.py",
    "ast_data": "FunctionDef name:_arc_jac_sc1 arg:w arg:m arguments arg arg Assign Call If Compare Call Raise Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_MatrixTriangularSolveGrad",
    "source_code": "@ops.RegisterGradient('MatrixTriangularSolve')\ndef _MatrixTriangularSolveGrad(op: ops.Operation, grad):\n    a = op.inputs[0]\n    b = op.inputs[1]\n    adjoint_a = op.get_attr('adjoint')\n    lower_a = op.get_attr('lower')\n    c = op.outputs[0]\n    grad_b = linalg_ops.matrix_triangular_solve(a, grad, lower=lower_a, adjoint=not adjoint_a)\n    if adjoint_a:\n        grad_a = -math_ops.matmul(c, grad_b, adjoint_b=True)\n    else:\n        grad_a = -math_ops.matmul(grad_b, c, adjoint_b=True)\n    if lower_a:\n        grad_a = array_ops.matrix_band_part(grad_a, -1, 0)\n    else:\n        grad_a = array_ops.matrix_band_part(grad_a, 0, -1)\n    if a.shape.is_fully_defined() and b.shape.is_fully_defined() and (a.shape[:-2] == b.shape[:-2]):\n        return (grad_a, grad_b)\n    a_shape = array_ops.shape(a)\n    b_shape = array_ops.shape(b)\n    ra, rb = array_ops.broadcast_gradient_args(a_shape[:-2], b_shape[:-2])\n    grad_a = array_ops.reshape(math_ops.reduce_sum(grad_a, axis=ra), a_shape)\n    grad_b = array_ops.reshape(math_ops.reduce_sum(grad_b, axis=rb), b_shape)\n    return (grad_a, grad_b)",
    "docstring": "Gradient for MatrixTriangularSolve.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg_grad.py",
    "ast_data": "FunctionDef name:_MatrixTriangularSolveGrad arg:op arg:grad arguments arg arg Assign Assign Assign Call Assign Call Assign Assign Call If Assign Call Assign Call If Assign Call Assign Call If BoolOp Call Call Compare Return return:yes Assign Call Assign Call Assign Call Assign Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_graph_is_connected",
    "source_code": "def _graph_is_connected(graph):\n    if sparse.issparse(graph):\n        accept_large_sparse = sp_version >= parse_version('1.11.3')\n        graph = check_array(graph, accept_sparse=True, accept_large_sparse=accept_large_sparse)\n        n_connected_components, _ = connected_components(graph)\n        return n_connected_components == 1\n    else:\n        return _graph_connected_component(graph, 0).sum() == graph.shape[0]",
    "docstring": "Return whether the graph is connected (True) or Not (False). Parameters ---------- graph : {array-like, sparse matrix} of shape (n_samples, n_samples) Adjacency matrix of the graph, non-zero weight means an edge between the nodes. Returns ------- is_connected : bool True means the graph is fully connected and False means not.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\manifold\\_spectral_embedding.py",
    "ast_data": "FunctionDef name:_graph_is_connected arg:graph arguments arg If Call Assign Compare Call Assign Call Assign Call Return return:yes Compare Return return:yes Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "_create_act_recv_info",
    "source_code": "def _create_act_recv_info(self):\n\n    def create_recv_tensor(placeholder, arg_node):\n        example_value = placeholder.meta['val']\n        if arg_node.op == 'placeholder':\n            return _RootArgPlaceholder(example_value)\n        while arg_node.target is operator.getitem:\n            arg_node = arg_node.args[0]\n        assert arg_node.op == 'call_module', f'Expecting call_module, got {arg_node.op}'\n        src_stage = self.get_stage_index_of_submod(arg_node.name)\n        logger.debug(\"%s Creating recv buffer for input '%s' : %s, %s\", self.log_prefix, placeholder.name, example_value.shape, example_value.dtype)\n        buffer = _make_tensor_from_meta(example_value, self.device)\n        if self.has_backward:\n            buffer.requires_grad_(True)\n        return _RecvInfo(arg_node.name, src_stage, buffer)\n    args_recv_info: list[InputInfo] = []\n    placeholders = filter(lambda node: node.op == 'placeholder', self.submod.graph.nodes)\n    for placeholder, arg_node in zip(placeholders, self.node.args):\n        recv_info = create_recv_tensor(placeholder, arg_node)\n        args_recv_info.append(recv_info)\n    logger.debug('%s Activation recv / args info: %s', self.log_prefix, args_recv_info)\n    return tuple(args_recv_info)",
    "docstring": "Create a tuple of for inputs to the stage.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py",
    "ast_data": "FunctionDef name:_create_act_recv_info arg:self arguments arg FunctionDef name:create_recv_tensor arg:placeholder arg:arg_node arguments arg arg Assign If Compare Return return:yes Call While Compare Assign Compare Assign Call Call Assign Call If Call Return return:yes Call Assign Call arguments arg Compare For Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_config",
    "source_code": "def get_config(self):\n    return {'name': self.name, 'dtype': self.dtype}",
    "docstring": "Returns the serializable config of the metric.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py",
    "ast_data": "FunctionDef name:get_config arg:self arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "lagvander",
    "source_code": "def lagvander(x, deg):\n    ideg = pu._as_int(deg, 'deg')\n    if ideg < 0:\n        raise ValueError('deg must be non-negative')\n    x = np.array(x, copy=None, ndmin=1) + 0.0\n    dims = (ideg + 1,) + x.shape\n    dtyp = x.dtype\n    v = np.empty(dims, dtype=dtyp)\n    v[0] = x * 0 + 1\n    if ideg > 0:\n        v[1] = 1 - x\n        for i in range(2, ideg + 1):\n            v[i] = (v[i - 1] * (2 * i - 1 - x) - v[i - 2] * (i - 1)) / i\n    return np.moveaxis(v, 0, -1)",
    "docstring": "Pseudo-Vandermonde matrix of given degree. Returns the pseudo-Vandermonde matrix of degree and sample points . The pseudo-Vandermonde matrix is defined by .. math:: V[..., i] = L_i(x) where ``0 >> import numpy as np >>> from numpy.polynomial.laguerre import lagvander >>> x = np.array([0, 1, 2]) >>> lagvander(x, 3) array([[ 1. , 1. , 1. , 1. ], [ 1. , 0. , -0.5 , -0.66666667], [ 1. , -1. , -1. , -0.33333333]])",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\laguerre.py",
    "ast_data": "FunctionDef name:lagvander arg:x arg:deg arguments arg arg Assign Call If Compare Raise Call Assign Call Assign Assign Assign Call Assign If Compare Assign For Call Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "BaseFinder",
    "source_code": "class BaseFinder:\n\n    def check(self, **kwargs):\n        raise NotImplementedError('subclasses may provide a check() method to verify the finder is configured correctly.')\n\n    def _check_deprecated_find_param(self, **kwargs):\n        return _check_deprecated_find_param(class_name=self.__class__.__qualname__, stacklevel=4, **kwargs)\n\n    def find(self, path, find_all=False, **kwargs):\n        raise NotImplementedError('subclasses of BaseFinder must provide a find() method')\n\n    def list(self, ignore_patterns):\n        raise NotImplementedError('subclasses of BaseFinder must provide a list() method')",
    "docstring": "A base file finder to be used for custom staticfiles finder classes.",
    "type": "class",
    "file_path": "django\\django\\contrib\\staticfiles\\finders.py",
    "ast_data": "ClassDef name:BaseFinder FunctionDef name:check arg:self arguments arg arg Raise Call FunctionDef name:_check_deprecated_find_param arg:self arguments arg arg Return return:yes Call FunctionDef name:find arg:self arg:path arg:find_all arguments arg arg arg arg Raise Call FunctionDef name:list arg:self arg:ignore_patterns arguments arg arg Raise Call"
  },
  {
    "library": "pandas",
    "name": "_reduce_axis1",
    "source_code": "def _reduce_axis1(self, name: str, func, skipna: bool) -> Series:\n    if name == 'all':\n        result = np.ones(len(self), dtype=bool)\n        ufunc = np.logical_and\n    elif name == 'any':\n        result = np.zeros(len(self), dtype=bool)\n        ufunc = np.logical_or\n    else:\n        raise NotImplementedError(name)\n    for blocks in self._mgr.blocks:\n        middle = func(blocks.values, axis=0, skipna=skipna)\n        result = ufunc(result, middle)\n    res_ser = self._constructor_sliced(result, index=self.index, copy=False)\n    return res_ser",
    "docstring": "Special case for _reduce to try to avoid a potentially-expensive transpose. Apply the reduction block-wise along axis=1 and then reduce the resulting 1D arrays.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:_reduce_axis1 arg:self arg:name arg:func arg:skipna arguments arg arg arg arg If Compare Assign Call Call Assign If Compare Assign Call Call Assign Raise Call For Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "__init__",
    "source_code": "def __init__(self, model, data, mapping, layer=0, source_srs=None, encoding='utf-8', transaction_mode='commit_on_success', transform=True, unique=None, using=None):\n    if isinstance(data, (str, Path)):\n        self.ds = DataSource(data, encoding=encoding)\n    else:\n        self.ds = data\n    self.layer = self.ds[layer]\n    self.using = using if using is not None else router.db_for_write(model)\n    connection = connections[self.using]\n    self.spatial_backend = connection.ops\n    self.mapping = mapping\n    self.model = model\n    self.check_layer()\n    if connection.features.supports_transform:\n        self.geo_field = self.geometry_field()\n    else:\n        transform = False\n    if transform:\n        self.source_srs = self.check_srs(source_srs)\n        self.transform = self.coord_transform()\n    else:\n        self.transform = transform\n    if encoding:\n        from codecs import lookup\n        lookup(encoding)\n        self.encoding = encoding\n    else:\n        self.encoding = None\n    if unique:\n        self.check_unique(unique)\n        transaction_mode = 'autocommit'\n        self.unique = unique\n    else:\n        self.unique = None\n    self.transaction_mode = transaction_mode\n    if transaction_mode == 'autocommit':\n        self.transaction_decorator = None\n    elif transaction_mode == 'commit_on_success':\n        self.transaction_decorator = transaction.atomic\n    else:\n        raise LayerMapError('Unrecognized transaction mode: %s' % transaction_mode)",
    "docstring": "A LayerMapping object is initialized using the given Model (not an instance), a DataSource (or string path to an OGR-supported data file), and a mapping dictionary. See the module level docstring for more details and keyword argument usage.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\utils\\layermapping.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:model arg:data arg:mapping arg:layer arg:source_srs arg:encoding arg:transaction_mode arg:transform arg:unique arg:using arguments arg arg arg arg arg arg arg arg arg arg arg If Call Assign Call Assign Assign Assign Compare Call Assign Assign Assign Assign Call If Assign Call Assign If Assign Call Assign Call Assign If Call Assign Assign If Call Assign Assign Assign Assign If Compare Assign If Compare Assign Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "__iter__",
    "source_code": "def __iter__(self):\n    for name in self._registered:\n        if self.is_available(name):\n            yield name",
    "docstring": "Iterate over names of available writer class.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\animation.py",
    "ast_data": "FunctionDef name:__iter__ arg:self arguments arg For If Call"
  },
  {
    "library": "scipy",
    "name": "docs",
    "source_code": "@click.option('--list-targets', '-t', default=False, is_flag=True, help='List doc targets')\n@click.option('--no-cache', default=False, is_flag=True, help='Forces a full rebuild of the docs. Note that this may be ' + 'needed in order to make docstring changes in C/Cython files ' + 'show up.')\n@spin.util.extend_command(spin.cmds.meson.docs)\ndef docs(*, parent_callback, sphinx_target, clean, jobs, list_targets, no_cache, **kwargs):\n    meson.docs.ignore_unknown_options = True\n    if clean:\n        cwd = os.getcwd()\n        os.chdir(os.path.join(cwd, 'doc'))\n        subprocess.call(['make', 'clean'], cwd=os.getcwd())\n        clean = False\n        os.chdir(cwd)\n    SPHINXOPTS = '-W'\n    if no_cache:\n        SPHINXOPTS += ' -E'\n    SPHINXOPTS = os.environ.get('SPHINXOPTS', '') + SPHINXOPTS\n    os.environ['SPHINXOPTS'] = SPHINXOPTS\n    sphinx_target = 'html'\n    parent_callback(**{'sphinx_target': sphinx_target, 'clean': clean, 'jobs': jobs, **kwargs})",
    "docstring": "📖 Build Sphinx documentation By default, SPHINXOPTS=\"-W\", raising errors on warnings. To build without raising on warnings: SPHINXOPTS=\"\" spin docs To list all Sphinx targets: spin docs targets To build another Sphinx target: spin docs TARGET E.g., to build a zipfile of the html docs for distribution: spin docs dist",
    "type": "function",
    "file_path": "scipy\\.spin\\cmds.py",
    "ast_data": "FunctionDef name:docs arguments arg arg arg arg arg arg arg Assign If Assign Call Call Call Call Call Assign Call Assign If Assign Call Assign Assign Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "trace",
    "source_code": "def trace(self, offset=0):\n    return self.diagonal(k=offset).sum()",
    "docstring": "Returns the sum along diagonals of the sparse array/matrix. Parameters ---------- offset : int, optional Which diagonal to get, corresponding to elements a[i, i+offset]. Default: 0 (the main diagonal).",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_base.py",
    "ast_data": "FunctionDef name:trace arg:self arg:offset arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "serialize",
    "source_code": "def serialize(self):\n    return gen_boosted_trees_ops.boosted_trees_serialize_ensemble(self.resource_handle)",
    "docstring": "Serializes the ensemble into proto and returns the serialized proto. Returns: stamp_token: int64 scalar Tensor to denote the stamp of the resource. serialized_proto: string scalar Tensor of the serialized proto.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\boosted_trees_ops.py",
    "ast_data": "FunctionDef name:serialize arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, learning_rate, momentum, use_locking=False, name='Momentum', use_nesterov=False):\n    super(MomentumOptimizer, self).__init__(use_locking, name)\n    self._learning_rate = learning_rate\n    self._momentum = momentum\n    self._use_nesterov = use_nesterov",
    "docstring": "Construct a new Momentum optimizer. Args: learning_rate: A or a floating point value. The learning rate. momentum: A or a floating point value. The momentum. use_locking: If use locks for update operations. name: Optional name prefix for the operations created when applying gradients. Defaults to \"Momentum\". use_nesterov: If use Nesterov Momentum. See (Sutskever et al., 2013). This implementation always computes gradients at the value of the variable(s) passed to the optimizer. Using Nesterov Momentum makes the variable(s) track the values called in the paper. This implementation is an approximation of the original formula, valid for high values of momentum. It will compute the \"adjusted gradient\" in NAG by assuming that the new gradient will be estimated by the current average gradient plus the product of momentum and the change in the average gradient. References: On the importance of initialization and momentum in deep learning: [Sutskever et al., 2013] ( ([pdf](",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\momentum.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:learning_rate arg:momentum arg:use_locking arg:name arg:use_nesterov arguments arg arg arg arg arg arg Call Call Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_transform_gradients",
    "source_code": "def _transform_gradients(self, grads_and_vars):\n    if self._clipvalue is not None:\n        grads_and_vars = self._clipvalue_fn(grads_and_vars)\n    if self._clipnorm is not None:\n        grads_and_vars = self._clipnorm_fn(grads_and_vars)\n    if self._global_clipnorm is not None:\n        grads_and_vars = self._global_clipnorm_fn(grads_and_vars)\n    for fn in self.gradient_transformers:\n        grads_and_vars = fn(grads_and_vars)\n    return grads_and_vars",
    "docstring": "Called in after aggregation.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py",
    "ast_data": "FunctionDef name:_transform_gradients arg:self arg:grads_and_vars arguments arg arg If Compare Assign Call If Compare Assign Call If Compare Assign Call For Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_warn_mkl_vcomp",
    "source_code": "def _warn_mkl_vcomp(self, n_active_threads):\n    warnings.warn(f'KMeans is known to have a memory leak on Windows with MKL, when there are less chunks than available threads. You can avoid it by setting the environment variable OMP_NUM_THREADS={n_active_threads}.')",
    "docstring": "Warn when vcomp and mkl are both present",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_kmeans.py",
    "ast_data": "FunctionDef name:_warn_mkl_vcomp arg:self arg:n_active_threads arguments arg arg Call"
  },
  {
    "library": "scikit-learn",
    "name": "make_constraint",
    "source_code": "def make_constraint(constraint):\n    if isinstance(constraint, str) and constraint == 'array-like':\n        return _ArrayLikes()\n    if isinstance(constraint, str) and constraint == 'sparse matrix':\n        return _SparseMatrices()\n    if isinstance(constraint, str) and constraint == 'random_state':\n        return _RandomStates()\n    if constraint is callable:\n        return _Callables()\n    if constraint is None:\n        return _NoneConstraint()\n    if isinstance(constraint, type):\n        return _InstancesOf(constraint)\n    if isinstance(constraint, (Interval, StrOptions, Options, HasMethods, MissingValues)):\n        return constraint\n    if isinstance(constraint, str) and constraint == 'boolean':\n        return _Booleans()\n    if isinstance(constraint, str) and constraint == 'verbose':\n        return _VerboseHelper()\n    if isinstance(constraint, str) and constraint == 'cv_object':\n        return _CVObjects()\n    if isinstance(constraint, Hidden):\n        constraint = make_constraint(constraint.constraint)\n        constraint.hidden = True\n        return constraint\n    if isinstance(constraint, str) and constraint == 'nan' or (isinstance(constraint, float) and np.isnan(constraint)):\n        return _NanConstraint()\n    raise ValueError(f'Unknown constraint type: {constraint}')",
    "docstring": "Convert the constraint into the appropriate Constraint object. Parameters ---------- constraint : object The constraint to convert. Returns ------- constraint : instance of _Constraint The converted constraint.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_param_validation.py",
    "ast_data": "FunctionDef name:make_constraint arg:constraint arguments arg If BoolOp Call Compare Return return:yes Call If BoolOp Call Compare Return return:yes Call If BoolOp Call Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call If Call Return return:yes Call If Call Return return:yes If BoolOp Call Compare Return return:yes Call If BoolOp Call Compare Return return:yes Call If BoolOp Call Compare Return return:yes Call If Call Assign Call Assign Return return:yes If BoolOp BoolOp Call Compare BoolOp Call Call Return return:yes Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_partition_parameters",
    "source_code": "def _partition_parameters(self, params_per_rank: Optional[list[list[torch.Tensor]]]=None) -> list[list[dict]]:\n    if params_per_rank is None:\n        if len(self._partition_parameters_cache) == 0:\n            self._partition_parameters_cache = [[] for _ in range(self.world_size)]\n            sizes = [0] * self.world_size\n            for param_group in self.param_groups:\n                param_group_params_per_rank: list[list] = [[] for _ in range(self.world_size)]\n                params_sorted = sorted(param_group['params'], key=lambda t: t.numel(), reverse=True)\n                for param in params_sorted:\n                    rank = self._get_min_index(sizes)\n                    param_group_params_per_rank[rank].append(param)\n                    sizes[rank] += param.numel()\n                self._partition_param_group(param_group, param_group_params_per_rank)\n        return self._partition_parameters_cache\n    assert len(self._partition_parameters_cache) == 0, 'Specifying `params_per_rank` should only be done when the parameters have not been partitioned yet'\n    if len(self.param_groups) != 1:\n        raise RuntimeError('Specifying `params_per_rank` only supports a single parameter group')\n    self._verify_params_per_rank(params_per_rank)\n    self._partition_parameters_cache = [[] for _ in range(self.world_size)]\n    param_group = self.param_groups[0]\n    self._partition_param_group(param_group, params_per_rank)\n    return self._partition_parameters_cache",
    "docstring": "Partitions parameters across distributed data parallel ranks. Arguments: params_per_rank (list[list[torch.Tensor]], optional): a :class: of length world size containing :class: s of parameters to assign to each rank; this provides a way to specify a partition manually. If `listlistdictstep_validate_params_per_rankZeroRedundancyOptimizer` instance is using more than one parameter group.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\optim\\zero_redundancy_optimizer.py",
    "ast_data": "FunctionDef name:_partition_parameters arg:self arg:params_per_rank arguments arg arg If Compare If Compare Call Assign Call Assign For Call Assign Call arguments arg Call For Assign Call Call Call Call Return return:yes Compare Call If Compare Call Raise Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "_torch_svd_cast",
    "source_code": "def _torch_svd_cast(input: Tensor) -> Tuple[Tensor, Tensor, Tensor]:\n    dtype = input.dtype\n    if dtype not in (torch.float32, torch.float64):\n        dtype = torch.float32\n    out1, out2, out3H = torch.linalg.svd(input.to(dtype))\n    if torch_version_ge(1, 11):\n        out3 = out3H.mH\n    else:\n        out3 = out3H.transpose(-1, -2)\n    return (out1.to(input.dtype), out2.to(input.dtype), out3.to(input.dtype))",
    "docstring": "Make torch.svd work with other than fp32/64. The function torch.svd is only implemented for fp32/64 which makes impossible to be used by fp16 or others. What this function does, is cast input data type to fp32, apply torch.svd, and cast back to the input dtype. NOTE: in torch 1.8.1 this function is recommended to use as torch.linalg.svd",
    "type": "function",
    "file_path": "kornia\\kornia\\utils\\helpers.py",
    "ast_data": "FunctionDef name:_torch_svd_cast arg:input arguments arg Assign If Compare Assign Assign Call Call If Call Assign Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_3d_properties",
    "source_code": "def set_3d_properties(self, verts, zs=0, zdir='z', axlim_clip=False):\n    zs = np.broadcast_to(zs, len(verts))\n    self._segment3d = [juggle_axes(x, y, z, zdir) for (x, y), z in zip(verts, zs)]\n    self._axlim_clip = axlim_clip",
    "docstring": "Set the *z* position and direction of the patch. Parameters ---------- verts : zs : float The location along the *zdir* axis in 3D space to position the patch. zdir : {'x', 'y', 'z'} Plane to plot patch orthogonal to. Default: 'z'. See for a description of the values. axlim_clip : bool, default: False Whether to hide patches with a vertex outside the axes view limits. .. versionadded:: 3.10",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py",
    "ast_data": "FunctionDef name:set_3d_properties arg:self arg:verts arg:zs arg:zdir arg:axlim_clip arguments arg arg arg arg arg Assign Call Call Assign Call Call Assign"
  },
  {
    "library": "authlib",
    "name": "JWEAlgorithmWithTagAwareKeyAgreement",
    "source_code": "class JWEAlgorithmWithTagAwareKeyAgreement(JWEAlgorithmBase, metaclass=ABCMeta):\n\n    def generate_keys_and_prepare_headers(self, enc_alg, key, sender_key, preset=None):\n        raise NotImplementedError\n\n    def agree_upon_key_and_wrap_cek(self, enc_alg, headers, key, sender_key, epk, cek, tag):\n        raise NotImplementedError\n\n    def wrap(self, enc_alg, headers, key, sender_key, preset=None):\n        raise NotImplementedError\n\n    def unwrap(self, enc_alg, ek, headers, key, sender_key, tag=None):\n        raise NotImplementedError",
    "docstring": "Interface for JWE algorithm with tag-aware key agreement (in key agreement with key wrapping mode). ECDH-1PU is an example of such an algorithm.",
    "type": "class",
    "file_path": "authlib\\authlib\\jose\\rfc7516\\models.py",
    "ast_data": "ClassDef name:JWEAlgorithmWithTagAwareKeyAgreement FunctionDef name:generate_keys_and_prepare_headers arg:self arg:enc_alg arg:key arg:sender_key arg:preset arguments arg arg arg arg arg Raise FunctionDef name:agree_upon_key_and_wrap_cek arg:self arg:enc_alg arg:headers arg:key arg:sender_key arg:epk arg:cek arg:tag arguments arg arg arg arg arg arg arg arg Raise FunctionDef name:wrap arg:self arg:enc_alg arg:headers arg:key arg:sender_key arg:preset arguments arg arg arg arg arg arg Raise FunctionDef name:unwrap arg:self arg:enc_alg arg:ek arg:headers arg:key arg:sender_key arg:tag arguments arg arg arg arg arg arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "_validate_state_spec",
    "source_code": "@staticmethod\ndef _validate_state_spec(cell_state_sizes, init_state_specs):\n    validation_error = ValueError('An `initial_state` was passed that is not compatible with `cell.state_size`. Received `state_spec`={}; however `cell.state_size` is {}'.format(init_state_specs, cell_state_sizes))\n    flat_cell_state_sizes = nest.flatten(cell_state_sizes)\n    flat_state_specs = nest.flatten(init_state_specs)\n    if len(flat_cell_state_sizes) != len(flat_state_specs):\n        raise validation_error\n    for cell_state_spec, cell_state_size in zip(flat_state_specs, flat_cell_state_sizes):\n        if not tensor_shape.TensorShape(cell_state_spec.shape[1:]).is_compatible_with(tensor_shape.TensorShape(cell_state_size)):\n            raise validation_error",
    "docstring": "Validate the state spec between the initial_state and the state_size. Args: cell_state_sizes: list, the attribute from the cell. init_state_specs: list, the from the initial_state that is passed in . Raises: ValueError: When initial state spec is not compatible with the state size.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\recurrent.py",
    "ast_data": "FunctionDef name:_validate_state_spec arg:cell_state_sizes arg:init_state_specs arguments arg arg Assign Call Call Assign Call Assign Call If Compare Call Call Raise For Call If Call Call Call Raise"
  },
  {
    "library": "django",
    "name": "set_level",
    "source_code": "def set_level(request, level):\n    if not hasattr(request, '_messages'):\n        return False\n    request._messages.level = level\n    return True",
    "docstring": "Set the minimum level of messages to be recorded, and return ``, use the default level (see the get_level() function).",
    "type": "function",
    "file_path": "django\\django\\contrib\\messages\\api.py",
    "ast_data": "FunctionDef name:set_level arg:request arg:level arguments arg arg If Call Return return:yes Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "average_pooling2d",
    "source_code": "def average_pooling2d(inputs, pool_size, strides, padding='valid', data_format='channels_last', name=None):\n    warnings.warn('`tf.layers.average_pooling2d` is deprecated and will be removed in a future version. Please use `tf.keras.layers.AveragePooling2D` instead.')\n    layer = AveragePooling2D(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name)\n    return layer.apply(inputs)",
    "docstring": "Average pooling layer for 2D inputs (e.g. images). Args: inputs: The tensor over which to pool. Must have rank 4. pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. (default) and are supported. corresponds to inputs with shape while corresponds to inputs with shape . name: A string, the name of the layer. Returns: Output tensor. Raises: ValueError: if eager execution is enabled.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\legacy_tf_layers\\pooling.py",
    "ast_data": "FunctionDef name:average_pooling2d arg:inputs arg:pool_size arg:strides arg:padding arg:data_format arg:name arguments arg arg arg arg arg arg Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_save_variable_devices",
    "source_code": "def _save_variable_devices(self):\n    return self != VariablePolicy.NONE",
    "docstring": "Checks whether variable devices should be saved.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\save_options.py",
    "ast_data": "FunctionDef name:_save_variable_devices arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "from_proto",
    "source_code": "@classmethod\ndef from_proto(cls, proto: Any) -> 'Parameter':\n    deserialized_type_constraint = serialization.deserialize(proto.type_constraint) if proto.HasField('type_constraint') else None\n    return Parameter(proto.name, PROTO_TO_PY_ENUM[proto.kind], proto.is_optional, deserialized_type_constraint)",
    "docstring": "Generate a Parameter from the proto representation.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_type.py",
    "ast_data": "FunctionDef name:from_proto arg:cls arg:proto arguments arg arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_UnsortedSegmentMaxGrad",
    "source_code": "@ops.RegisterGradient('UnsortedSegmentMax')\ndef _UnsortedSegmentMaxGrad(op: ops.Operation, grad):\n    return _UnsortedSegmentMinOrMaxGrad(op, grad)",
    "docstring": "Gradient for UnsortedSegmentMax.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_UnsortedSegmentMaxGrad arg:op arg:grad arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "cryptography",
    "name": "authenticate_additional_data",
    "source_code": "@abc.abstractmethod\ndef authenticate_additional_data(self, data: Buffer) -> None:\n    pass",
    "docstring": "Authenticates the provided bytes.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\ciphers\\base.py",
    "ast_data": "FunctionDef name:authenticate_additional_data arg:self arg:data arguments arg arg"
  },
  {
    "library": "tensorflow",
    "name": "logits",
    "source_code": "@property\ndef logits(self):\n    return self._logits",
    "docstring": "Vector of coordinatewise logits.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\categorical.py",
    "ast_data": "FunctionDef name:logits arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "FallbackStorage",
    "source_code": "class FallbackStorage(BaseStorage):\n    storage_classes = (CookieStorage, SessionStorage)\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self.storages = [storage_class(*args, **kwargs) for storage_class in self.storage_classes]\n        self._used_storages = set()\n\n    def _get(self, *args, **kwargs):\n        all_messages = []\n        for storage in self.storages:\n            messages, all_retrieved = storage._get()\n            if messages is None:\n                break\n            if messages:\n                self._used_storages.add(storage)\n            all_messages.extend(messages)\n            if all_retrieved:\n                break\n        return (all_messages, all_retrieved)\n\n    def _store(self, messages, response, *args, **kwargs):\n        for storage in self.storages:\n            if messages:\n                messages = storage._store(messages, response, remove_oldest=False)\n            elif storage in self._used_storages:\n                storage._store([], response)\n                self._used_storages.remove(storage)\n        return messages",
    "docstring": "Try to store all messages in the first backend. Store any unstored messages in each subsequent backend.",
    "type": "class",
    "file_path": "django\\django\\contrib\\messages\\storage\\fallback.py",
    "ast_data": "ClassDef name:FallbackStorage Assign FunctionDef name:__init__ arg:self arguments arg arg arg Call Call Assign Call Assign Call FunctionDef name:_get arg:self arguments arg arg arg Assign For Assign Call If Compare If Call Call If Return return:yes FunctionDef name:_store arg:self arg:messages arg:response arguments arg arg arg arg arg For If Assign Call If Compare Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "build_2d_sincos_pos_emb",
    "source_code": "@staticmethod\ndef build_2d_sincos_pos_emb(w: int, h: int, embed_dim: int, temp: float=10000.0, device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None) -> Tensor:\n    xs = torch.arange(w, device=device, dtype=dtype)\n    ys = torch.arange(h, device=device, dtype=dtype)\n    grid_x, grid_y = torch_meshgrid([xs, ys], indexing='ij')\n    pos_dim = embed_dim // 4\n    omega = torch.arange(pos_dim, device=device, dtype=dtype) / pos_dim\n    omega = 1.0 / temp ** omega\n    out_x = grid_x.reshape(-1, 1) * omega.view(1, -1)\n    out_y = grid_y.reshape(-1, 1) * omega.view(1, -1)\n    pos_emb = concatenate([out_x.sin(), out_x.cos(), out_y.sin(), out_y.cos()], 1)\n    return pos_emb.unsqueeze(1)",
    "docstring": "Construct 2D sin-cos positional embeddings. Args: w: width of the image or feature map h: height of the image or feature map embed_dim: embedding dimension temp: temperature coefficient device: device to place the positional embeddings dtype: data type of the positional embeddings Returns: positional embeddings, shape :math:",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\models\\rt_detr\\architecture\\hybrid_encoder.py",
    "ast_data": "FunctionDef name:build_2d_sincos_pos_emb arg:w arg:h arg:embed_dim arg:temp arg:device arg:dtype arguments arg arg arg arg arg arg Assign Call Assign Call Assign Call Assign Assign Call Assign Assign Call Call Assign Call Call Assign Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_verify_same_dense_param_type",
    "source_code": "def _verify_same_dense_param_type(self) -> None:\n    typename = torch.typename(self._all_params[0])\n    if self._all_params[0].is_sparse:\n        raise ValueError(f'ZeroRedundancyOptimizer only supports using the same dense type for all parameters but got {typename}')\n    for param in self._all_params[1:]:\n        other_typename = torch.typename(param)\n        if other_typename != typename:\n            raise ValueError(f'ZeroRedundancyOptimizer only supports using the same dense type for all parameters but got both {typename} and {other_typename}')",
    "docstring": "Verify that all parameters are of the same dense type. The method assumes that `` contains sparse parameters or parameters of varying dense types. NOTE: This method can be removed once support for sparse parameters and varying parameter types is added.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\optim\\zero_redundancy_optimizer.py",
    "ast_data": "FunctionDef name:_verify_same_dense_param_type arg:self arguments arg Assign Call If Raise Call For Assign Call If Compare Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "size",
    "source_code": "def size(x):\n    return math.prod(x.shape)",
    "docstring": "Return the total number of elements of x. Parameters ---------- x : array Array instance from NumPy or an array API compatible library. Returns ------- out : int Total number of elements.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_array_api.py",
    "ast_data": "FunctionDef name:size arg:x arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_canonicalize_fft_c2r_shape_and_dim_args",
    "source_code": "def _canonicalize_fft_c2r_shape_and_dim_args(fname: str, input: TensorLikeType, s: Optional[ShapeType], dim: Optional[DimsType]) -> _CanonicalizeC2rReturn:\n    shape, dim = _canonicalize_fft_shape_and_dim_args(input, s, dim)\n    torch._check(len(shape) > 0, lambda: f'{fname} must transform at least one axis')\n    if s is None or s[-1] == -1:\n        last_dim_size = 2 * (input.shape[dim[-1]] - 1)\n    else:\n        last_dim_size = shape[-1]\n    torch._check(last_dim_size >= 1, lambda: f'Invalid number of data points ({last_dim_size}) specified')\n    shape_list = list(shape)\n    shape_list[-1] = last_dim_size // 2 + 1\n    return _CanonicalizeC2rReturn(shape=tuple(shape_list), dim=dim, last_dim_size=last_dim_size)",
    "docstring": "Canonicalize shape and dim arguments for n-dimensional c2r transforms, as well as calculating the last_dim_size which is shape[dim[-1]] for the output",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\fft.py",
    "ast_data": "FunctionDef name:_canonicalize_fft_c2r_shape_and_dim_args arg:fname arg:input arg:s arg:dim arguments arg arg arg arg Assign Call Call Compare Call arguments If BoolOp Compare Compare Assign Assign Call Compare arguments Assign Call Assign Return return:yes Call Call"
  },
  {
    "library": "cherrypy",
    "name": "readline",
    "source_code": "def readline(self, size=None):\n    return self.fp.readline(size)",
    "docstring": "Read a line of bytes from the connection.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpreqbody.py",
    "ast_data": "FunctionDef name:readline arg:self arg:size arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_find_onnx_data_type",
    "source_code": "def _find_onnx_data_type(torch_input: fx_type_utils.TensorLike | str | int | float | bool | list | tuple | complex | None) -> set[str]:\n    if isinstance(torch_input, fx_type_utils.TensorLike) and torch_input.dtype is not None:\n        return fx_type_utils.from_torch_dtype_to_onnx_dtype_str(torch_input.dtype)\n    if isinstance(torch_input, (int, float, bool, str, complex)):\n        return fx_type_utils.from_torch_dtype_to_onnx_dtype_str(type(torch_input))\n    if isinstance(torch_input, (list, tuple)) and torch_input:\n        the_first_non_none_item = next((item for item in torch_input if item is not None), None)\n        set_dtype = _find_onnx_data_type(the_first_non_none_item)\n        if any((isinstance(input, fx_type_utils.TensorLike) for input in torch_input)):\n            return {f'seq({dtype})' for dtype in set_dtype}\n        else:\n            return set_dtype\n    if torch_input is None or (isinstance(torch_input, fx_type_utils.TensorLike) and torch_input.dtype is None) or (isinstance(torch_input, (list, tuple)) and (not torch_input)):\n        return set()\n    raise RuntimeError(f'Unknown input type from input: {torch_input}')",
    "docstring": "Convert inputs data type from torch acceptable dtype to the compatible onnx dtype string.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\onnxfunction_dispatcher.py",
    "ast_data": "FunctionDef name:_find_onnx_data_type arg:torch_input arguments arg If BoolOp Call Compare Return return:yes Call If Call Return return:yes Call Call If BoolOp Call Assign Call Compare Assign Call If Call Call Return return:yes Return return:yes If BoolOp Compare BoolOp Call Compare BoolOp Call Return return:yes Call Raise Call"
  },
  {
    "library": "django",
    "name": "__getitem__",
    "source_code": "def __getitem__(self, target):\n    if isinstance(target, tuple):\n        return self.attr_value(*target)\n    else:\n        return self.attr_value(target)",
    "docstring": "Return the value of the given string attribute node, None if the node doesn't exist. Can also take a tuple as a parameter, (target, child), where child is the index of the attribute in the WKT. For example: >>> wkt = 'GEOGCS[\"WGS 84\", DATUM[\"WGS_1984, ... AUTHORITY[\"EPSG\",\"4326\"]]' >>> srs = SpatialReference(wkt) # could also use 'WGS84', or 4326 >>> print(srs['GEOGCS']) WGS 84 >>> print(srs['DATUM']) WGS_1984 >>> print(srs['AUTHORITY']) EPSG >>> print(srs['AUTHORITY', 1]) # The authority value 4326 >>> print(srs['TOWGS84', 4]) # the fourth value in this wkt 0 >>> # For the units authority, have to use the pipe symbole. >>> print(srs['UNIT|AUTHORITY']) EPSG >>> print(srs['UNIT|AUTHORITY', 1]) # The authority value for the units 9122",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\srs.py",
    "ast_data": "FunctionDef name:__getitem__ arg:self arg:target arguments arg arg If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "XinSheYang03",
    "source_code": "class XinSheYang03(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-20.0] * self.N, [20.0] * self.N))\n        self.global_optimum = [[0 for _ in range(self.N)]]\n        self.fglob = -1.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        beta, m = (15.0, 5.0)\n        u = sum((x / beta) ** (2 * m))\n        v = sum(x ** 2)\n        w = prod(cos(x) ** 2)\n        return exp(-u) - 2 * exp(-v) * w",
    "docstring": "Xin-She Yang 3 objective function. This class defines the Xin-She Yang 3 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{XinSheYang03}}(x) = e^{-\\sum_{i=1}^{n} (x_i/\\beta)^{2m}} - 2e^{-\\sum_{i=1}^{n} x_i^2} \\prod_{i=1}^{n} \\cos^2(x_i) Where, in this exercise, :math: and :math:. Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_X.py",
    "ast_data": "ClassDef name:XinSheYang03 Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Assign Call Assign Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "_build_num_ray_dict_of_points2d",
    "source_code": "@staticmethod\ndef _build_num_ray_dict_of_points2d(points2d_as_flat_tensors: Dict[int, Points2D_FlatTensors]) -> Dict[int, Points2D]:\n    num_ray_dict_of_points2d: Dict[int, RaySampler.Points2D] = {}\n    for n, points2d_as_flat_tensor in points2d_as_flat_tensors.items():\n        num_cams = len(points2d_as_flat_tensor._camera_ids)\n        points_2d = torch.stack((points2d_as_flat_tensor._x, points2d_as_flat_tensor._y)).permute(1, 0).reshape(num_cams, -1, 2)\n        num_ray_dict_of_points2d[n] = RaySampler.Points2D(points_2d, points2d_as_flat_tensor._camera_ids)\n    return num_ray_dict_of_points2d",
    "docstring": "Build a dictionary of ray pixel points, by total number of rays as key. The dictionary groups rays by the total amount of rays, which allows the case of casting different number of rays from each scene camera. Args: points2d_as_flat_tensors: dictionary of pixel coordinates grouped by total number of rays: Dict[int, Points2D_FlatTensors] Returns: dictionary of Points2D objects that holds information on pixel 2d coordinates of each ray and the camera id it was casted by: Dict[int, Points2D]",
    "type": "method",
    "file_path": "kornia\\kornia\\nerf\\samplers.py",
    "ast_data": "FunctionDef name:_build_num_ray_dict_of_points2d arg:points2d_as_flat_tensors arguments arg For Call Assign Call Assign Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "run",
    "source_code": "def run(self, fn, args=(), kwargs=None, options=None):\n    validate_run_function(fn)\n    fn, args, kwargs = _maybe_partial_apply_variables(fn, args, kwargs)\n    fn = autograph.tf_convert(fn, autograph_ctx.control_status_ctx())\n    options = options or distribute_lib.RunOptions()\n    return self.extended.tpu_run(fn, args, kwargs, options)",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_strategy.py",
    "ast_data": "FunctionDef name:run arg:self arg:fn arg:args arg:kwargs arg:options arguments arg arg arg arg arg Call Assign Call Assign Call Call Assign BoolOp Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "display_modulewise_stats",
    "source_code": "def display_modulewise_stats(self, depth: int=2) -> None:\n    print('Pre-Forward Execution Order: ')\n    for mod_fqn in self.mod_fw_pre_order:\n        mod_depth = mod_fqn.count('.') + 1\n        if mod_depth > depth:\n            continue\n        print(mod_fqn)\n    print('Pre-Backward Execution Order: ')\n    for mod_fqn in self.mod_bw_pre_order:\n        mod_depth = mod_fqn.count('.') + 1\n        if mod_depth > depth:\n            continue\n        print(mod_fqn)\n    for mod_fqn, runtimes in self.mod_runtimes.items():\n        mod_depth = mod_fqn.count('.') + 1\n        if mod_depth > depth:\n            continue\n        print(f'{mod_fqn} fw: {runtimes.get('fw', 0.0):.3f}ms bw: {runtimes.get('bw', 0.0):.3f}ms')",
    "docstring": "Displays module-wise statistics collected by ``. Prints the pre-forward and pre-backward execution orders. Displays the module-wise forward and backward runtimes in milliseconds. Args: depth (int): The maximum depth of module hierarchy to display (default to 2).",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_tools\\runtime_estimator.py",
    "ast_data": "FunctionDef name:display_modulewise_stats arg:self arg:depth arguments arg arg Call For Assign Call If Compare Call Call For Assign Call If Compare Call For Call Assign Call If Compare Call Call Call"
  },
  {
    "library": "django",
    "name": "ExpressionList",
    "source_code": "class ExpressionList(Func):\n    template = '%(expressions)s'\n\n    def __str__(self):\n        return self.arg_joiner.join((str(arg) for arg in self.source_expressions))\n\n    def as_sql(self, *args, **kwargs):\n        if not self.source_expressions:\n            return ('', ())\n        return super().as_sql(*args, **kwargs)\n\n    def as_sqlite(self, compiler, connection, **extra_context):\n        return self.as_sql(compiler, connection, **extra_context)\n\n    def get_group_by_cols(self):\n        group_by_cols = []\n        for expr in self.get_source_expressions():\n            group_by_cols.extend(expr.get_group_by_cols())\n        return group_by_cols",
    "docstring": "An expression containing multiple expressions. Can be used to provide a list of expressions as an argument to another expression, like a partition clause.",
    "type": "class",
    "file_path": "django\\django\\db\\models\\expressions.py",
    "ast_data": "ClassDef name:ExpressionList Assign FunctionDef name:__str__ arg:self arguments arg Return return:yes Call Call FunctionDef name:as_sql arg:self arguments arg arg arg If Return return:yes Return return:yes Call Call FunctionDef name:as_sqlite arg:self arg:compiler arg:connection arguments arg arg arg arg Return return:yes Call FunctionDef name:get_group_by_cols arg:self arguments arg Assign For Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_is_dtype_compat",
    "source_code": "def _is_dtype_compat(self, other: Index) -> Categorical:\n    if isinstance(other.dtype, CategoricalDtype):\n        cat = extract_array(other)\n        cat = cast(Categorical, cat)\n        if not cat._categories_match_up_to_permutation(self._values):\n            raise TypeError('categories must match existing categories when appending')\n    elif other._is_multi:\n        raise TypeError('MultiIndex is not dtype-compatible with CategoricalIndex')\n    else:\n        values = other\n        cat = Categorical(other, dtype=self.dtype)\n        other = CategoricalIndex(cat)\n        if not other.isin(values).all():\n            raise TypeError('cannot append a non-category item to a CategoricalIndex')\n        cat = other._values\n        if not ((cat == values) | isna(cat) & isna(values)).all():\n            raise TypeError('categories must match existing categories when appending')\n    return cat",
    "docstring": "*this is an internal non-public method* provide a comparison between the dtype of self and other (coercing if needed) Parameters ---------- other : Index Returns ------- Categorical Raises ------ TypeError if the dtypes are not compatible",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\category.py",
    "ast_data": "FunctionDef name:_is_dtype_compat arg:self arg:other arguments arg arg If Call Assign Call Assign Call If Call Raise Call If Raise Call Assign Assign Call Assign Call If Call Call Raise Call Assign If Call Compare Call Call Raise Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "is_url",
    "source_code": "def is_url(url: str) -> bool:\n    return bool(url) and '://' in url",
    "docstring": "Check *url* is URL or not.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\_uri.py",
    "ast_data": "FunctionDef name:is_url arg:url arguments arg Return return:yes BoolOp Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "write_metrics_raw",
    "source_code": "def write_metrics_raw(out: TextIO, metrics: list[list[str]]):\n    for _, value, unit in metrics:\n        out.write(f'{value} {unit}\\n')",
    "docstring": "Formats metrics in raw.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\xla\\backends\\gpu\\codegen\\tools\\ncu_rep_lib.py",
    "ast_data": "FunctionDef name:write_metrics_raw arg:out arg:metrics arguments arg arg For Call"
  },
  {
    "library": "pytorch",
    "name": "EqualizationQConfig",
    "source_code": "class EqualizationQConfig(namedtuple('EqualizationQConfig', ['input_activation', 'weight'])):\n    __slots__ = ()\n\n    def __new__(cls, input_activation=torch.nn.Identity, weight=torch.nn.Identity):\n        if isinstance(input_activation, nn.Module) or isinstance(weight, nn.Module):\n            raise ValueError('EqualizationQConfig received observer instance, please pass observer class instead. ' + 'Use MyObserver.with_args(x=1) to override arguments to constructor if needed')\n        self = super().__new__(cls, input_activation, weight)\n        return self",
    "docstring": "Describes how to quantize a layer or a part of the network specifically for input-weight equalization by providing settings (observer classes) for inputs, outputs, and weights. Note that EqualizationQConfig needs to contain observer **classes** (like MinMaxObserver) or a callable that returns instances on invocation, not the concrete observer instances themselves. Quantization function will instantiate observers multiple times for each of the layers. Observer classes have usually reasonable default arguments, but they can be overwritten with method (that behaves like functools.partial): my_qconfig = EqualizationQConfig(input_activation=_InputEqualizationObserver.with_args(dtype=torch.qint8), weight=_WeightEqualizationObserver.with_args(dtype=torch.qint8))",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_equalize.py",
    "ast_data": "ClassDef name:EqualizationQConfig Call Assign FunctionDef name:__new__ arg:cls arg:input_activation arg:weight arguments arg arg arg If BoolOp Call Call Raise Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "lookup",
    "source_code": "def lookup(self, keys, name=None):\n    raise NotImplementedError",
    "docstring": "Looks up in a table, outputs the corresponding values.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "FunctionDef name:lookup arg:self arg:keys arg:name arguments arg arg arg Raise"
  },
  {
    "library": "sphinx",
    "name": "format_type",
    "source_code": "def format_type(self, type_: str) -> tuple[nodes.field, list[system_message]]:\n    parsed, msgs = self.parse_inline(type_, lineno=self.lineno)\n    field = nodes.field('', nodes.field_name('', _('Type')), nodes.field_body('', *parsed))\n    return (field, msgs)",
    "docstring": "Formats the `` option.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\domains\\std\\__init__.py",
    "ast_data": "FunctionDef name:format_type arg:self arg:type_ arguments arg arg Assign Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "convert_to_symint",
    "source_code": "def convert_to_symint(i: Union[int, sympy.Expr]) -> Union[int, torch.SymInt]:\n    from .virtualized import V\n    return i if isinstance(i, int) else int(i) if isinstance(i, sympy.Integer) else V.graph.sizevars.shape_env.create_symintnode(i, hint=None)",
    "docstring": "Like convert_shape_to_symint, but operates on a single expression.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\utils.py",
    "ast_data": "FunctionDef name:convert_to_symint arg:i arguments arg Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_FunctionConverterDataInEager",
    "source_code": "class _FunctionConverterDataInEager(_FunctionConverterData):\n\n    def _eval(self, tensor):\n        return tensor.numpy()",
    "docstring": "Container for ConcreteFunction-based conversion data in Eager mode.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "ClassDef name:_FunctionConverterDataInEager FunctionDef name:_eval arg:self arg:tensor arguments arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "__init__",
    "source_code": "def __init__(self, params):\n    params = params.copy()\n    self.name = params.pop('NAME')\n    self.dirs = list(params.pop('DIRS'))\n    self.app_dirs = params.pop('APP_DIRS')\n    if params:\n        raise ImproperlyConfigured('Unknown parameters: {}'.format(', '.join(params)))",
    "docstring": "Initialize the template engine. is a dict of configuration settings.",
    "type": "method",
    "file_path": "django\\django\\template\\backends\\base.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:params arguments arg arg Assign Call Assign Call Assign Call Call Assign Call If Raise Call Call Call"
  },
  {
    "library": "pandas",
    "name": "validate_endpoints",
    "source_code": "def validate_endpoints(closed: str | None) -> tuple[bool, bool]:\n    left_closed = False\n    right_closed = False\n    if closed is None:\n        left_closed = True\n        right_closed = True\n    elif closed == 'left':\n        left_closed = True\n    elif closed == 'right':\n        right_closed = True\n    else:\n        raise ValueError(\"Closed has to be either 'left', 'right' or None\")\n    return (left_closed, right_closed)",
    "docstring": "Check that the argument is among [None, \"left\", \"right\"] Parameters ---------- closed : {None, \"left\", \"right\"} Returns ------- left_closed : bool right_closed : bool Raises ------ ValueError : if argument is not among valid values",
    "type": "function",
    "file_path": "pandas\\pandas\\util\\_validators.py",
    "ast_data": "FunctionDef name:validate_endpoints arg:closed arguments arg Assign Assign If Compare Assign Assign If Compare Assign If Compare Assign Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_assert_valid_concentration",
    "source_code": "def _maybe_assert_valid_concentration(self, concentration, validate_args):\n    if not validate_args:\n        return concentration\n    concentration = distribution_util.embed_check_categorical_event_shape(concentration)\n    return control_flow_ops.with_dependencies([check_ops.assert_positive(concentration, message='Concentration parameter must be positive.')], concentration)",
    "docstring": "Checks the validity of the concentration parameter.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\dirichlet_multinomial.py",
    "ast_data": "FunctionDef name:_maybe_assert_valid_concentration arg:self arg:concentration arg:validate_args arguments arg arg arg If Return return:yes Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "as_default",
    "source_code": "def as_default(self, step=None):\n    if context.executing_eagerly() and self._closed:\n        raise RuntimeError(f'SummaryWriter {self!r} is already closed')\n    return super().as_default(step)",
    "docstring": "See .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "FunctionDef name:as_default arg:self arg:step arguments arg arg If BoolOp Call Raise Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_OverlapStatus",
    "source_code": "class _OverlapStatus(enum.IntEnum):\n    UNINITIALIZED = 0\n    DDP_HAS_REBUILT_BUCKETS = 1\n    INITIALIZED = 2",
    "docstring": "Define possible statuses that :class: can be in when overlapping with :class:. Attributes: ``: The ZeRO instance is fully initialized and can now optimize parameters.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\optim\\zero_redundancy_optimizer.py",
    "ast_data": "ClassDef name:_OverlapStatus Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_create_op_internal",
    "source_code": "def _create_op_internal(self, op_type, inputs, dtypes=None, input_types=None, name=None, attrs=None, op_def=None, compute_device=True) -> 'Operation':\n    self._check_not_finalized()\n    if name is None:\n        name = op_type\n    if name and name[-1] == '/':\n        name = name_from_scope_name(name)\n    else:\n        name = self.unique_name(name)\n    node_def = _NodeDef(op_type, name, attrs)\n    input_ops = set((t.op for t in inputs))\n    control_inputs = self._control_dependencies_for_inputs(input_ops)\n    with self._mutation_lock():\n        ret = Operation.from_node_def(node_def, self, inputs=inputs, output_types=dtypes, control_inputs=control_inputs, input_types=input_types, original_op=self._default_original_op, op_def=op_def)\n        self._create_op_helper(ret, compute_device=compute_device)\n    return ret",
    "docstring": "Creates an in this graph. Implements without the overhead of the deprecation wrapper. Args: op_type: The type to create. This corresponds to the field for the proto that defines the operation. inputs: A list of objects that will be inputs to the . dtypes: (Optional) A list of objects that will be the types of the tensors that the operation produces. input_types: (Optional.) A list of s that will be the types of the tensors that the operation consumes. By default, uses the base of each input in . Operations that expect reference-typed inputs must specify explicitly. name: (Optional.) A string name for the operation. If not specified, a name is generated based on . attrs: (Optional.) A dictionary where the key is the attribute name (a string) and the value is the respective attribute of the proto that will represent the operation (an proto). op_def: (Optional.) The proto that describes the that the operation will have. compute_device: (Optional.) If True, device functions will be executed to compute the device property of the Operation. Raises: ValueError: if colocation conflicts with existing device assignment. Returns: An object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_create_op_internal arg:self arg:op_type arg:inputs arg:dtypes arg:input_types arg:name arg:attrs arg:op_def arg:compute_device arguments arg arg arg arg arg arg arg arg arg Call If Compare Assign If BoolOp Compare Assign Call Assign Call Assign Call Assign Call Assign Call With Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_dedup_strings",
    "source_code": "def _dedup_strings(device_strs):\n    new_device_strs = []\n    for device_str, vals in itertools.groupby(device_strs):\n        num = len(list(vals))\n        if num == 1:\n            new_device_strs.append(device_str)\n        else:\n            new_device_strs.append('%s (x%d)' % (device_str, num))\n    return new_device_strs",
    "docstring": "Groups together consecutive identical strings. For example, given: ['GPU 1', 'GPU 2', 'GPU 2', 'GPU 3', 'GPU 3', 'GPU 3'] This function returns: ['GPU 1', 'GPU 2 (x2)', 'GPU 3 (x3)'] Args: device_strs: A list of strings, each representing a device. Returns: A copy of the input, but identical consecutive strings are merged into a single string.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\device_compatibility_check.py",
    "ast_data": "FunctionDef name:_dedup_strings arg:device_strs arguments arg Assign For Call Assign Call Call If Compare Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "FsspecWriter",
    "source_code": "class FsspecWriter(FileSystemWriter):\n\n    def __init__(self, path: Union[str, os.PathLike], single_file_per_rank: bool=True, sync_files: bool=True, thread_count: int=1, per_thread_copy_ahead: int=10000000, overwrite: bool=True, _extensions: Optional[Sequence[StreamTransformExtension]]=None, serialization_format: SerializationFormat=SerializationFormat.TORCH_SAVE, **kwargs) -> None:\n        super().__init__(path, single_file_per_rank, sync_files, thread_count, per_thread_copy_ahead, overwrite=overwrite, _extensions=_extensions, serialization_format=serialization_format)\n        self.fs = FileSystem()\n        self.path = self.fs.init_path(path, **kwargs)\n\n    @classmethod\n    def validate_checkpoint_id(cls, checkpoint_id: Union[str, os.PathLike]) -> bool:\n        return FileSystem.validate_checkpoint_id(checkpoint_id)",
    "docstring": "Basic implementation of StorageWriter using FFspec. This implementation makes the following assumptions and simplifications: * The checkpoint path is an empty or non-existing directory. * File creation is atomic The checkpoint consist of one file per write request plus a file with the serialized metadata.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\_fsspec_filesystem.py",
    "ast_data": "ClassDef name:FsspecWriter FunctionDef name:__init__ arg:self arg:path arg:single_file_per_rank arg:sync_files arg:thread_count arg:per_thread_copy_ahead arg:overwrite arg:_extensions arg:serialization_format arguments arg arg arg arg arg arg arg arg arg arg Call Call Assign Call Assign Call FunctionDef name:validate_checkpoint_id arg:cls arg:checkpoint_id arguments arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "_validate_input_shape",
    "source_code": "def _validate_input_shape(input: Tensor, channel_index: int, number: int) -> bool:\n    return input.shape[channel_index] == number",
    "docstring": "Validate if an input has the right shape. e.g. to check if an input is channel first. If channel first, the second channel of an RGB input shall be fixed to 3. To verify using: _validate_input_shape(input, 1, 3) Args: input: Tensor channel_index: int number: int Returns: bool",
    "type": "function",
    "file_path": "kornia\\kornia\\augmentation\\utils\\helpers.py",
    "ast_data": "FunctionDef name:_validate_input_shape arg:input arg:channel_index arg:number arguments arg arg arg Return return:yes Compare"
  },
  {
    "library": "pytorch",
    "name": "store",
    "source_code": "def store(self, name: str, index: sympy.Expr, value: CSEVariable, mode: StoreMode=None) -> None:\n    assert isinstance(value, HalideCSEVariable)\n    var = self.args.output(name)\n    index = self.prepare_indexing(index)\n    var, dims = self.indexing_to_dimensions(var, index, True)\n    if self.is_indirect_indexing(index) or mode is not None:\n        replacements = self.setup_dom_indexing()\n        index_str = self.make_index_str(dims, replacements)\n        value_str = value.subs_str(replacements)\n        undef_dims = ', '.join(['hl.Var()'] * len(dims)) or '()'\n        self.body.writeline(DeferredLine(name, f'{var}[{undef_dims}] = hl.undef({var}.type())'))\n    else:\n        index_str = self.make_index_str(dims, zero_vars=True)\n        value_str = str(value)\n    dtype = V.graph.get_dtype(name)\n    if mode is None:\n        line = f'{var}[{index_str}] = hl.cast({halide_type(dtype)}, {value_str})'\n    elif mode == 'atomic_add':\n        line = f'{var}[{index_str}] += hl.cast({halide_type(dtype)}, {value_str})'\n    else:\n        raise NotImplementedError(f'store mode={mode}')\n    self.body.writeline(DeferredLine(name, line))",
    "docstring": "Codegen a store to an OutputBuffer",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\halide.py",
    "ast_data": "FunctionDef name:store arg:self arg:name arg:index arg:value arg:mode arguments arg arg arg arg arg Call Assign Call Assign Call Assign Call If BoolOp Call Compare Assign Call Assign Call Assign Call Assign BoolOp Call Call Call Call Assign Call Assign Call Assign Call If Compare Assign Call If Compare Assign Call Raise Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "use_test_undeclared_outputs_dir",
    "source_code": "def use_test_undeclared_outputs_dir(self):\n    return self.is_flag_on(FLAG_NAME_USE_TEST_UNDECLARED_OUTPUTS_DIR)",
    "docstring": "Decides the output directory of the report and trace files. Args: None. Returns: True if the output files should be written to the test-undeclared-outputs-directory defined via an env variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer_flags.py",
    "ast_data": "FunctionDef name:use_test_undeclared_outputs_dir arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "pop",
    "source_code": "def pop(self, item: Hashable) -> Series:\n    return super().pop(item=item)",
    "docstring": "Return item and drop it from DataFrame. Raise KeyError if not found. Parameters ---------- item : label Label of column to be popped. Returns ------- Series Series representing the item that is dropped. See Also -------- DataFrame.drop: Drop specified labels from rows or columns. DataFrame.drop_duplicates: Return DataFrame with duplicate rows removed. Examples -------- >>> df = pd.DataFrame( ... [ ... (\"falcon\", \"bird\", 389.0), ... (\"parrot\", \"bird\", 24.0), ... (\"lion\", \"mammal\", 80.5), ... (\"monkey\", \"mammal\", np.nan), ... ], ... columns=(\"name\", \"class\", \"max_speed\"), ... ) >>> df name class max_speed 0 falcon bird 389.0 1 parrot bird 24.0 2 lion mammal 80.5 3 monkey mammal NaN >>> df.pop(\"class\") 0 bird 1 bird 2 mammal 3 mammal Name: class, dtype: object >>> df name max_speed 0 falcon 389.0 1 parrot 24.0 2 lion 80.5 3 monkey NaN",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:pop arg:self arg:item arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "split_pulls",
    "source_code": "def split_pulls(all_issues, project='matplotlib/matplotlib'):\n    pulls = []\n    issues = []\n    for i in all_issues:\n        if is_pull_request(i):\n            pull = get_pull_request(project, i['number'], auth=True)\n            pulls.append(pull)\n        else:\n            issues.append(i)\n    return (issues, pulls)",
    "docstring": "Split a list of closed issues into non-PR Issues and Pull Requests.",
    "type": "function",
    "file_path": "matplotlib\\tools\\github_stats.py",
    "ast_data": "FunctionDef name:split_pulls arg:all_issues arg:project arguments arg arg Assign Assign For If Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "isbuiltin",
    "source_code": "def isbuiltin(f):\n    if id(f) in _BUILTIN_FUNCTION_IDS:\n        return True\n    elif isinstance(f, types.BuiltinFunctionType):\n        return True\n    elif inspect.isbuiltin(f):\n        return True\n    elif f is eval:\n        return True\n    else:\n        return False",
    "docstring": "Returns True if the argument is a built-in function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\inspect_utils.py",
    "ast_data": "FunctionDef name:isbuiltin arg:f arguments arg If Compare Call Return return:yes If Call Return return:yes If Call Return return:yes If Compare Return return:yes Return return:yes"
  },
  {
    "library": "kornia",
    "name": "yuv_to_rgb",
    "source_code": "def yuv_to_rgb(image: Tensor) -> Tensor:\n    if not isinstance(image, Tensor):\n        raise TypeError(f'Input type is not a Tensor. Got {type(image)}')\n    if image.dim() < 3 or image.shape[-3] != 3:\n        raise ValueError(f'Input size must have a shape of (*, 3, H, W). Got {image.shape}')\n    y: Tensor = image[..., 0, :, :]\n    u: Tensor = image[..., 1, :, :]\n    v: Tensor = image[..., 2, :, :]\n    r: Tensor = y + 1.14 * v\n    g: Tensor = y + -0.396 * u - 0.581 * v\n    b: Tensor = y + 2.029 * u\n    out: Tensor = torch.stack([r, g, b], -3)\n    return out",
    "docstring": "Convert an YUV image to RGB. The image data is assumed to be in the range of :math: for luma (Y). The ranges of U and V are :math: and :math:, respectively. YUV formula follows M/PAL values (see _, Table 2, items 2.5 and 2.6). Args: image: YUV Image to be converted to RGB with shape :math:. Returns: RGB version of the image with shape :math:. Example: >>> input = torch.rand(2, 3, 4, 5) >>> output = yuv_to_rgb(input) # 2x3x4x5",
    "type": "function",
    "file_path": "kornia\\kornia\\color\\yuv.py",
    "ast_data": "FunctionDef name:yuv_to_rgb arg:image arguments arg If Call Raise Call Call If BoolOp Compare Call Compare Raise Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "describe_numeric_1d",
    "source_code": "def describe_numeric_1d(series: Series, percentiles: Sequence[float]) -> Series:\n    from pandas import Series\n    formatted_percentiles = format_percentiles(percentiles)\n    if len(percentiles) == 0:\n        quantiles = []\n    else:\n        quantiles = series.quantile(percentiles).tolist()\n    stat_index = ['count', 'mean', 'std', 'min'] + formatted_percentiles + ['max']\n    d = [series.count(), series.mean(), series.std(), series.min()] + quantiles + [series.max()]\n    dtype: DtypeObj | None\n    if isinstance(series.dtype, ExtensionDtype):\n        if isinstance(series.dtype, ArrowDtype):\n            if series.dtype.kind == 'm':\n                dtype = None\n            else:\n                import pyarrow as pa\n                dtype = ArrowDtype(pa.float64())\n        else:\n            dtype = Float64Dtype()\n    elif series.dtype.kind in 'iufb':\n        dtype = np.dtype('float')\n    else:\n        dtype = None\n    return Series(d, index=stat_index, name=series.name, dtype=dtype)",
    "docstring": "Describe series containing numerical data. Parameters ---------- series : Series Series to be described. percentiles : list-like of numbers The percentiles to include in the output.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\methods\\describe.py",
    "ast_data": "FunctionDef name:describe_numeric_1d arg:series arg:percentiles arguments arg arg Assign Call If Compare Call Assign Assign Call Call Assign Assign Call Call Call Call Call If Call If Call If Compare Assign Assign Call Call Assign Call If Compare Assign Call Assign Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_score",
    "source_code": "def _score(self, method_caller, estimator, X, y_true, **kwargs):\n    self._warn_overlap(message='There is an overlap between set kwargs of this scorer instance and passed metadata. Please pass them either as kwargs to `make_scorer` or metadata, but not both.', kwargs=kwargs)\n    pos_label = None if is_regressor(estimator) else self._get_pos_label()\n    response_method = _check_response_method(estimator, self._response_method)\n    y_pred = method_caller(estimator, _get_response_method_name(response_method), X, pos_label=pos_label)\n    scoring_kwargs = {**self._kwargs, **kwargs}\n    return self._sign * self._score_func(y_true, y_pred, **scoring_kwargs)",
    "docstring": "Evaluate the response method of on and . Parameters ---------- method_caller : callable Returns predictions given an estimator, method name, and other arguments, potentially caching results. estimator : object Trained estimator to use for scoring. X : {array-like, sparse matrix} Test data that will be fed to clf.decision_function or clf.predict_proba. y_true : array-like Gold standard target values for X. These must be class labels, not decision function values. **kwargs : dict Other parameters passed to the scorer. Refer to :func: for more details. Returns ------- score : float Score function applied to prediction of estimator on X.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\metrics\\_scorer.py",
    "ast_data": "FunctionDef name:_score arg:self arg:method_caller arg:estimator arg:X arg:y_true arguments arg arg arg arg arg arg Call Assign Call Call Assign Call Assign Call Call Assign Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "generate_pip_from_conda",
    "source_code": "def generate_pip_from_conda(conda_path: pathlib.Path, pip_path: pathlib.Path, compare: bool=False) -> bool:\n    with conda_path.open() as file:\n        deps = yaml.safe_load(file)['dependencies']\n    pip_deps = []\n    for dep in deps:\n        if isinstance(dep, str):\n            conda_dep = conda_package_to_pip(dep)\n            if conda_dep:\n                pip_deps.append(conda_dep)\n        elif isinstance(dep, dict) and len(dep) == 1 and ('pip' in dep):\n            pip_deps.extend(dep['pip'])\n        else:\n            raise ValueError(f'Unexpected dependency {dep}')\n    header = f'# This file is auto-generated from {conda_path.name}, do not modify.\\n# See that file for comments about the need/usage of each dependency.\\n\\n'\n    pip_content = header + '\\n'.join(pip_deps) + '\\n'\n    with open(pathlib.Path(conda_path.parent, 'pyproject.toml'), 'rb') as fd:\n        meta = tomllib.load(fd)\n    for requirement in meta['build-system']['requires']:\n        if 'setuptools' in requirement:\n            pip_content += requirement\n            pip_content += '\\n'\n    if compare:\n        with pip_path.open() as file:\n            return pip_content != file.read()\n    with pip_path.open('w') as file:\n        file.write(pip_content)\n    return False",
    "docstring": "Generate the pip dependencies file from the conda file, or compare that they are synchronized (`environment.ymlrequirements-dev.txt`). Returns ------- bool True if the comparison fails, False otherwise",
    "type": "function",
    "file_path": "pandas\\scripts\\generate_pip_deps_from_conda.py",
    "ast_data": "FunctionDef name:generate_pip_from_conda arg:conda_path arg:pip_path arg:compare arguments arg arg arg With Call Assign Call Assign For If Call Assign Call If Call If BoolOp Call Compare Call Compare Call Raise Call Assign Assign Call With Call Call Assign Call For If Compare If With Call Return return:yes Compare Call With Call Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "get_error_page",
    "source_code": "def get_error_page(self, *args, **kwargs):\n    return get_error_page(*args, **kwargs)",
    "docstring": "Compose an HTML page with error information.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cperror.py",
    "ast_data": "FunctionDef name:get_error_page arg:self arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_count_condition",
    "source_code": "def _count_condition(values, weights=None, metrics_collections=None, updates_collections=None):\n    check_ops.assert_type(values, dtypes.bool)\n    count = metric_variable([], dtypes.float32, name='count')\n    values = math_ops.cast(values, dtypes.float32)\n    if weights is not None:\n        with ops.control_dependencies((check_ops.assert_rank_in(weights, (0, array_ops.rank(values))),)):\n            weights = math_ops.cast(weights, dtypes.float32)\n            values = math_ops.multiply(values, weights)\n    value_tensor = _aggregate_variable(count, metrics_collections)\n    update_op = state_ops.assign_add(count, math_ops.reduce_sum(values))\n    if updates_collections:\n        ops.add_to_collections(updates_collections, update_op)\n    return (value_tensor, update_op)",
    "docstring": "Sums the weights of cases where the given values are True. If is , weights default to 1. Use weights of 0 to mask values. Args: values: A of arbitrary size. weights: Optional whose rank is either 0, or the same rank as , and must be broadcastable to (i.e., all dimensions must be either , or the same as the corresponding dimension). metrics_collections: An optional list of collections that the metric value variable should be added to. updates_collections: An optional list of collections that the metric update ops should be added to. Returns: value_tensor: A representing the current value of the metric. update_op: An operation that accumulates the error from a batch of data. Raises: ValueError: If is not and its shape doesn't match , or if either or are not a list or tuple.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\metrics_impl.py",
    "ast_data": "FunctionDef name:_count_condition arg:values arg:weights arg:metrics_collections arg:updates_collections arguments arg arg arg arg Call Assign Call Assign Call If Compare With Call Call Call Assign Call Assign Call Assign Call Assign Call Call If Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "log_tpu_embedding_configuration",
    "source_code": "def log_tpu_embedding_configuration(config: tpu_embedding_configuration_pb2.TPUEmbeddingConfiguration) -> None:\n    logging.info('Beginning log of TPUEmbeddingConfiguration.')\n    for line in str(config).splitlines():\n        logging.info(line)\n    logging.info('Done with log of TPUEmbeddingConfiguration.')",
    "docstring": "Logs a TPUEmbeddingConfiguration proto across multiple statements. Args: config: TPUEmbeddingConfiguration proto to log. Necessary because logging.info has a maximum length to each log statement, which particularly large configs can exceed.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2_utils.py",
    "ast_data": "FunctionDef name:log_tpu_embedding_configuration arg:config arguments arg Call For Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "rotation",
    "source_code": "@property\ndef rotation(self):\n    return np.rad2deg(self._rotation)",
    "docstring": "Rotation in degree in interval [-45°, 45°]. The rotation is limited in range to keep the implementation simple.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:rotation arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_context_device_type",
    "source_code": "def _get_context_device_type():\n    current_device = context.context().device_name\n    if current_device is None:\n        return None\n    return device.DeviceSpec.from_string(current_device).device_type",
    "docstring": "Parses the current context and returns the device type, eg CPU/GPU.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ctc_ops.py",
    "ast_data": "FunctionDef name:_get_context_device_type arguments Assign Call If Compare Return return:no Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "FunctionID",
    "source_code": "@dataclasses.dataclass(frozen=True)\nclass FunctionID:\n    id: int",
    "docstring": "Unique counter of a function wrapped in cudagraphify_impl",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\cudagraph_utils.py",
    "ast_data": "ClassDef name:FunctionID Call"
  },
  {
    "library": "pytorch",
    "name": "set_detect_anomaly",
    "source_code": "class set_detect_anomaly:\n\n    def __init__(self, mode: bool, check_nan: bool=True) -> None:\n        self.prev = torch.is_anomaly_enabled()\n        self.prev_check_nan = torch.is_anomaly_check_nan_enabled()\n        torch.set_anomaly_enabled(mode, check_nan)\n\n    def __enter__(self) -> None:\n        pass\n\n    def __exit__(self, *args: object) -> None:\n        torch.set_anomaly_enabled(self.prev, self.prev_check_nan)",
    "docstring": "Context-manager that sets the anomaly detection for the autograd engine on or off. `mode`). check_nan (bool): Flag whether to raise an error when the backward generate \"nan\"",
    "type": "class",
    "file_path": "pytorch\\torch\\autograd\\anomaly_mode.py",
    "ast_data": "ClassDef name:set_detect_anomaly FunctionDef name:__init__ arg:self arg:mode arg:check_nan arguments arg arg arg Assign Call Assign Call Call FunctionDef name:__enter__ arg:self arguments arg FunctionDef name:__exit__ arg:self arguments arg arg Call"
  },
  {
    "library": "sphinx",
    "name": "on_builder_inited",
    "source_code": "def on_builder_inited(app: Sphinx) -> None:\n    domain = app.env.domains['duration']\n    domain.clear()",
    "docstring": "Initialize DurationDomain on bootstrap. This clears the results of the last build.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\ext\\duration.py",
    "ast_data": "FunctionDef name:on_builder_inited arg:app arguments arg Assign Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_feature_names_out",
    "source_code": "def get_feature_names_out(self, input_features=None):\n    check_is_fitted(self, 'n_features_in_')\n    input_features = _check_feature_names_in(self, input_features)\n    names = self.initial_imputer_.get_feature_names_out(input_features)\n    return self._concatenate_indicator_feature_names_out(names, input_features)",
    "docstring": "Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Input features. - If is , then is used as feature names in. If is not defined, then the following input feature names are generated: . - If is an array-like, then must match if is defined. Returns ------- feature_names_out : ndarray of str objects Transformed feature names.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\impute\\_iterative.py",
    "ast_data": "FunctionDef name:get_feature_names_out arg:self arg:input_features arguments arg arg Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "LiteralBlockTransform",
    "source_code": "class LiteralBlockTransform(SphinxPostTransform):\n    default_priority = 400\n    formats = ('latex',)\n\n    def run(self, **kwargs: Any) -> None:\n        matcher = NodeMatcher(nodes.container, literal_block=True)\n        for node in matcher.findall(self.document):\n            newnode = captioned_literal_block('', *node.children, **node.attributes)\n            node.replace_self(newnode)",
    "docstring": "Replace container nodes for literal_block by captioned_literal_block.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\builders\\latex\\transforms.py",
    "ast_data": "ClassDef name:LiteralBlockTransform Assign Assign FunctionDef name:run arg:self arguments arg arg Assign Call For Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "_reorder_tensors_as",
    "source_code": "def _reorder_tensors_as(tensors, ordered_tensors):\n    type_dict = defaultdict(list)\n    for tensor in tensors:\n        type_dict[tensor.type()].append(tensor)\n    type_dict_ = {t: iter(coll) for t, coll in type_dict.items()}\n    return tuple((next(type_dict_[tensor.type()]) for tensor in ordered_tensors))",
    "docstring": "Assume that tensors are of same order as ordered_tensors within their types, e.g., from _take_tensors. Reorder them to be of same order as ordered_tensors. Args: tensors (Iterable[Tensor]): tensors to be reordered. They should be of the same order as ordered_tensors within their own types. ordered_tensors (Iterable[Tensor]): tensors whose order will be the reference. Returns: Ordered tuple of tensors with contents from tensors and order of ordered_tensors.",
    "type": "function",
    "file_path": "pytorch\\torch\\_utils.py",
    "ast_data": "FunctionDef name:_reorder_tensors_as arg:tensors arg:ordered_tensors arguments arg arg Assign Call For Call Call Assign Call Call Return return:yes Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_upsample",
    "source_code": "def _upsample(self, method, limit: int | None=None, fill_value=None):\n    if self._from_selection:\n        raise ValueError('Upsampling from level= or on= selection is not supported, use .set_index(...) to explicitly set index to datetime-like')\n    ax = self.ax\n    obj = self._selected_obj\n    binner = self.binner\n    res_index = self._adjust_binner_for_upsample(binner)\n    if limit is None and to_offset(ax.inferred_freq) == self.freq and (len(obj) == len(res_index)):\n        result = obj.copy()\n        result.index = res_index\n    else:\n        if method == 'asfreq':\n            method = None\n        result = obj.reindex(res_index, method=method, limit=limit, fill_value=fill_value)\n    return self._wrap_result(result)",
    "docstring": "Parameters ---------- method : string {'backfill', 'bfill', 'pad', 'ffill', 'asfreq'} method for upsampling limit : int, default None Maximum size gap to fill when reindexing fill_value : scalar, default None Value to use for missing values",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\resample.py",
    "ast_data": "FunctionDef name:_upsample arg:self arg:method arg:limit arg:fill_value arguments arg arg arg arg If Raise Call Assign Assign Assign Assign Call If BoolOp Compare Compare Call Compare Call Call Assign Call Assign If Compare Assign Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_assert_non_singular",
    "source_code": "def _assert_non_singular(self):\n    logging.warn('Using (possibly slow) default implementation of assert_non_singular.  Requires conversion to a dense matrix and O(N^3) operations.')\n    if self._can_use_cholesky():\n        return self.assert_positive_definite()\n    else:\n        singular_values = linalg_ops.svd(self.to_dense(), compute_uv=False)\n        cond = math_ops.reduce_max(singular_values, axis=-1) / math_ops.reduce_min(singular_values, axis=-1)\n        return check_ops.assert_less(cond, self._max_condition_number_to_be_non_singular(), message='Singular matrix up to precision epsilon.')",
    "docstring": "Private default implementation of _assert_non_singular.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "FunctionDef name:_assert_non_singular arg:self arguments arg Call If Call Return return:yes Call Assign Call Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "seaborn",
    "name": "_preprocess_colors",
    "source_code": "def _preprocess_colors(self, data, colors, axis):\n    labels = None\n    if colors is not None:\n        if isinstance(colors, (pd.DataFrame, pd.Series)):\n            if not hasattr(data, 'index') and axis == 0 or (not hasattr(data, 'columns') and axis == 1):\n                axis_name = 'col' if axis else 'row'\n                msg = f\"{axis_name}_colors indices can't be matched with data indices. Provide {axis_name}_colors as a non-indexed datatype, e.g. by using `.to_numpy()``\"\n                raise TypeError(msg)\n            if axis == 0:\n                colors = colors.reindex(data.index)\n            else:\n                colors = colors.reindex(data.columns)\n            colors = colors.astype(object).fillna('white')\n            if isinstance(colors, pd.DataFrame):\n                labels = list(colors.columns)\n                colors = colors.T.values\n            else:\n                if colors.name is None:\n                    labels = ['']\n                else:\n                    labels = [colors.name]\n                colors = colors.values\n        colors = _convert_colors(colors)\n    return (colors, labels)",
    "docstring": "Preprocess {row/col}_colors to extract labels and convert colors.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\matrix.py",
    "ast_data": "FunctionDef name:_preprocess_colors arg:self arg:data arg:colors arg:axis arguments arg arg arg arg Assign If Compare If Call If BoolOp BoolOp Call Compare BoolOp Call Compare Assign Assign Raise Call If Compare Assign Call Assign Call Assign Call Call If Call Assign Call Assign If Compare Assign Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_message_with_time",
    "source_code": "def _message_with_time(source, message, time):\n    start_message = '[%s] ' % source\n    if time > 60:\n        time_str = '%4.1fmin' % (time / 60)\n    else:\n        time_str = ' %5.1fs' % time\n    end_message = ' %s, total=%s' % (message, time_str)\n    dots_len = 70 - len(start_message) - len(end_message)\n    return '%s%s%s' % (start_message, dots_len * '.', end_message)",
    "docstring": "Create one line message for logging purposes. Parameters ---------- source : str String indicating the source or the reference of the message. message : str Short message. time : int Time in seconds.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_user_interface.py",
    "ast_data": "FunctionDef name:_message_with_time arg:source arg:message arg:time arguments arg arg arg Assign If Compare Assign Assign Assign Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_EmbeddingColumnLayer",
    "source_code": "class _EmbeddingColumnLayer(base.Layer):\n\n    def __init__(self, embedding_shape, initializer, weight_collections=None, trainable=True, name=None, **kwargs):\n        super(_EmbeddingColumnLayer, self).__init__(trainable=trainable, name=name, **kwargs)\n        self._embedding_shape = embedding_shape\n        self._initializer = initializer\n        self._weight_collections = weight_collections\n\n    def set_weight_collections(self, weight_collections):\n        self._weight_collections = weight_collections\n\n    def build(self, _):\n        self._embedding_weight_var = self.add_variable(name='embedding_weights', shape=self._embedding_shape, dtype=dtypes.float32, initializer=self._initializer, trainable=self.trainable)\n        if self._weight_collections and (not context.executing_eagerly()):\n            _add_to_collections(self._embedding_weight_var, self._weight_collections)\n        self.built = True\n\n    def call(self, _):\n        return self._embedding_weight_var",
    "docstring": "A layer that stores all the state required for a embedding column.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py",
    "ast_data": "ClassDef name:_EmbeddingColumnLayer FunctionDef name:__init__ arg:self arg:embedding_shape arg:initializer arg:weight_collections arg:trainable arg:name arguments arg arg arg arg arg arg arg Call Call Assign Assign Assign FunctionDef name:set_weight_collections arg:self arg:weight_collections arguments arg arg Assign FunctionDef name:build arg:self arg:_ arguments arg arg Assign Call If BoolOp Call Call Assign FunctionDef name:call arg:self arg:_ arguments arg arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_XT_CenterStackOp",
    "source_code": "class _XT_CenterStackOp(sparse.linalg.LinearOperator):\n\n    def __init__(self, X, X_mean, sqrt_sw):\n        n_samples, n_features = X.shape\n        super().__init__(X.dtype, (n_features + 1, n_samples))\n        self.X = X\n        self.X_mean = X_mean\n        self.sqrt_sw = sqrt_sw\n\n    def _matvec(self, v):\n        v = v.ravel()\n        n_features = self.shape[0]\n        res = np.empty(n_features, dtype=self.X.dtype)\n        res[:-1] = safe_sparse_dot(self.X.T, v, dense_output=True) - self.X_mean * self.sqrt_sw.dot(v)\n        res[-1] = np.dot(v, self.sqrt_sw)\n        return res\n\n    def _matmat(self, v):\n        n_features = self.shape[0]\n        res = np.empty((n_features, v.shape[1]), dtype=self.X.dtype)\n        res[:-1] = safe_sparse_dot(self.X.T, v, dense_output=True) - self.X_mean[:, None] * self.sqrt_sw.dot(v)\n        res[-1] = np.dot(self.sqrt_sw, v)\n        return res",
    "docstring": "Behaves as transposed centered and scaled X with an intercept column. This operator behaves as np.hstack([X - sqrt_sw[:, None] * X_mean, sqrt_sw[:, None]]).T",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_ridge.py",
    "ast_data": "ClassDef name:_XT_CenterStackOp FunctionDef name:__init__ arg:self arg:X arg:X_mean arg:sqrt_sw arguments arg arg arg arg Assign Call Call Assign Assign Assign FunctionDef name:_matvec arg:self arg:v arguments arg arg Assign Call Assign Assign Call Assign Call Call Assign Call Return return:yes FunctionDef name:_matmat arg:self arg:v arguments arg arg Assign Assign Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "__mul__",
    "source_code": "def __mul__(self, i):\n    return asarray(multiply(self, i))",
    "docstring": "Return (self * i), that is string multiple concatenation, element-wise. See Also -------- multiply",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:__mul__ arg:self arg:i arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_contains",
    "source_code": "def _contains(self, event):\n    return self._selection_artist.contains(event, radius=0)[0]",
    "docstring": "Return True if event is within the patch.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:_contains arg:self arg:event arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_GatherGrad",
    "source_code": "@ops.RegisterGradient('ResourceGather')\ndef _GatherGrad(op, grad):\n    handle = op.inputs[0]\n    indices = op.inputs[1]\n    params_shape = variable_shape(handle)\n    size = array_ops.expand_dims(array_ops.size(indices), 0)\n    values_shape = array_ops.concat([size, params_shape[1:]], 0)\n    values = array_ops.reshape(grad, values_shape)\n    indices = array_ops.reshape(indices, size)\n    return (indexed_slices.IndexedSlices(values, indices, params_shape), None)",
    "docstring": "Gradient for gather op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:_GatherGrad arg:op arg:grad arguments arg arg Assign Assign Assign Call Assign Call Call Assign Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_min_index",
    "source_code": "def _get_min_index(self, values: list[int], disallowed_indices: Optional[set[int]]=None) -> int:\n    min_index = -1\n    min_value = float('inf')\n    for i, value in enumerate(values):\n        if disallowed_indices and i in disallowed_indices:\n            continue\n        if value < min_value:\n            min_value = value\n            min_index = i\n    assert min_index >= 0, 'All indices are disallowed'\n    return min_index",
    "docstring": "Return `list` of values. disallowed_indices (Optional[set[int]]): indices that are disallowed from being the returned min index.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\optim\\zero_redundancy_optimizer.py",
    "ast_data": "FunctionDef name:_get_min_index arg:self arg:values arg:disallowed_indices arguments arg arg arg Assign Assign Call For Call If BoolOp Compare If Compare Assign Assign Compare Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "_violin_scale_backcompat",
    "source_code": "def _violin_scale_backcompat(self, scale, scale_hue, density_norm, common_norm):\n    if scale is not deprecated:\n        density_norm = scale\n        msg = f'\\n\\nThe `scale` parameter has been renamed and will be removed in v0.15.0. Pass `density_norm={scale!r}` for the same effect.'\n        warnings.warn(msg, FutureWarning, stacklevel=3)\n    if scale_hue is not deprecated:\n        common_norm = scale_hue\n        msg = f'\\n\\nThe `scale_hue` parameter has been replaced and will be removed in v0.15.0. Pass `common_norm={not scale_hue}` for the same effect.'\n        warnings.warn(msg, FutureWarning, stacklevel=3)\n    return (density_norm, common_norm)",
    "docstring": "Provide two cycles of backcompat for scale kwargs",
    "type": "method",
    "file_path": "seaborn\\seaborn\\categorical.py",
    "ast_data": "FunctionDef name:_violin_scale_backcompat arg:self arg:scale arg:scale_hue arg:density_norm arg:common_norm arguments arg arg arg arg arg If Compare Assign Assign Call If Compare Assign Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "impulse",
    "source_code": "def impulse(self, X0=None, T=None, N=None):\n    return impulse(self, X0=X0, T=T, N=N)",
    "docstring": "Return the impulse response of a continuous-time system. See for details.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:impulse arg:self arg:X0 arg:T arg:N arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "squeeze",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef squeeze(x, axis):\n    return array_ops.squeeze(x, [axis])",
    "docstring": "Removes a 1-dimension from the tensor at index \"axis\". Args: x: A tensor or variable. axis: Axis to drop. Returns: A tensor with the same data as but reduced dimensions.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:squeeze arg:x arg:axis arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "name",
    "source_code": "@property\ndef name(self):\n    return '{}_indicator'.format(self.categorical_column.name)",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "prev_fast_len",
    "source_code": "def prev_fast_len(target, real=False):\n    pass",
    "docstring": "Find the previous fast size of input data to ``. Useful for discarding a minimal number of samples before FFT. SciPy's FFT algorithms gain their speed by a recursive divide and conquer strategy. This relies on efficient functions for small prime factors of the input length. Thus, the transforms are fastest when using composites of the prime factors handled by the fft implementation. If there are efficient functions for all radices >> from scipy import fft >>> import numpy as np >>> rng = np.random.default_rng() >>> max_len = 93059 # prime length is worst case for speed >>> a = rng.standard_normal(max_len) >>> b = fft.fft(a) Performing FFT on the maximum fast length less than max_len reduces the computation time to 1.5 ms, a speedup of 10.5 times: >>> fft.prev_fast_len(max_len, real=True) 92160 >>> c = fft.fft(a[:92160]) # discard last 899 samples",
    "type": "function",
    "file_path": "scipy\\scipy\\fft\\_helper.py",
    "ast_data": "FunctionDef name:prev_fast_len arg:target arg:real arguments arg arg"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, offset=(0, 0), **kwargs):\n    super().__init__(offset=offset)\n    self.patch = mpatches.PathPatch([], **kwargs)",
    "docstring": "Parameters ---------- offset : (float, float), default: (0, 0) The (x, y) offset to apply to the path, in points. **kwargs All keyword arguments are passed through to the :class: constructor. The properties which cannot be overridden are \"path\", \"clip_box\" \"transform\" and \"clip_path\".",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patheffects.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:offset arguments arg arg arg Call Call Assign Call"
  },
  {
    "library": "pandas",
    "name": "adjoin",
    "source_code": "def adjoin(space: int, *lists: list[str], **kwargs: Any) -> str:\n    strlen = kwargs.pop('strlen', len)\n    justfunc = kwargs.pop('justfunc', _adj_justify)\n    newLists = []\n    lengths = [max(map(strlen, x)) + space for x in lists[:-1]]\n    lengths.append(max(map(len, lists[-1])))\n    maxLen = max(map(len, lists))\n    for i, lst in enumerate(lists):\n        nl = justfunc(lst, lengths[i], mode='left')\n        nl = [' ' * lengths[i]] * (maxLen - len(lst)) + nl\n        newLists.append(nl)\n    toJoin = zip(*newLists)\n    return '\\n'.join((''.join(lines) for lines in toJoin))",
    "docstring": "Glues together two sets of strings using the amount of space requested. The idea is to prettify. ---------- space : int number of spaces for padding lists : str list of str which being joined strlen : callable function used to calculate the length of each str. Needed for unicode handling. justfunc : callable function used to justify str. Needed for unicode handling.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\formats\\printing.py",
    "ast_data": "FunctionDef name:adjoin arg:space arguments arg arg arg Assign Call Assign Call Assign Assign Call Call Call Call Call Assign Call Call For Call Assign Call Assign Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "InsertVar",
    "source_code": "class InsertVar:\n    types = {'AutoField': int, 'BigAutoField': int, 'SmallAutoField': int, 'IntegerField': int, 'BigIntegerField': int, 'SmallIntegerField': int, 'PositiveBigIntegerField': int, 'PositiveSmallIntegerField': int, 'PositiveIntegerField': int, 'BooleanField': int, 'FloatField': Database.DB_TYPE_BINARY_DOUBLE, 'DateTimeField': Database.DB_TYPE_TIMESTAMP, 'DateField': Database.Date, 'DecimalField': decimal.Decimal}\n\n    def __init__(self, field):\n        internal_type = getattr(field, 'target_field', field).get_internal_type()\n        self.db_type = self.types.get(internal_type, str)\n        self.bound_param = None\n\n    def bind_parameter(self, cursor):\n        self.bound_param = cursor.cursor.var(self.db_type)\n        return self.bound_param\n\n    def get_value(self):\n        return self.bound_param.getvalue()",
    "docstring": "A late-binding cursor variable that can be passed to Cursor.execute as a parameter, in order to receive the id of the row created by an insert statement.",
    "type": "class",
    "file_path": "django\\django\\db\\backends\\oracle\\utils.py",
    "ast_data": "ClassDef name:InsertVar Assign FunctionDef name:__init__ arg:self arg:field arguments arg arg Assign Call Call Assign Call Assign FunctionDef name:bind_parameter arg:self arg:cursor arguments arg arg Assign Call Return return:yes FunctionDef name:get_value arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "register_sharding_prop_rule",
    "source_code": "def register_sharding_prop_rule(self, op_overload: OpOverload, rule_func: Callable[[OpSchema], OutputSharding], schema_info: Optional[RuntimeSchemaInfo]=None):\n    self.op_to_rules[op_overload] = rule_func\n    if schema_info is not None:\n        self.op_to_schema_info[op_overload] = schema_info",
    "docstring": "Register a sharding propagation rule for an operator.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_sharding_prop.py",
    "ast_data": "FunctionDef name:register_sharding_prop_rule arg:self arg:op_overload arg:rule_func arg:schema_info arguments arg arg arg arg Assign If Compare Assign"
  },
  {
    "library": "tensorflow",
    "name": "merge_device",
    "source_code": "def merge_device(spec):\n    if isinstance(spec, MergeDevice):\n        return spec\n    merger = _cached_mergers.get(spec)\n    if merger:\n        return merger\n    merger = MergeDevice(spec)\n    _cached_mergers[spec] = merger\n    return merger",
    "docstring": "Returns a device function that merges devices specifications. This can be used to merge partial specifications of devices. The innermost setting for a device field takes precedence. For example: with tf.device(merge_device(\"/device:GPU:0\")) # Nodes created here have device \"/device:GPU:0\" with tf.device(merge_device(\"/job:worker\")): # Nodes created here have device \"/job:worker/device:GPU:0\" with tf.device(merge_device(\"/device:CPU:0\")): # Nodes created here have device \"/job:worker/device:CPU:0\" with tf.device(merge_device(\"/job:ps\")): # Nodes created here have device \"/job:ps/device:CPU:0\" Args: spec: A or a device spec string (partially) describing the device that should be used for all nodes created in the scope of the returned device function's with block. Returns: A MergeDevice object with the above-described behavior. Raises: ValueError: if the spec was not valid.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\device.py",
    "ast_data": "FunctionDef name:merge_device arg:spec arguments arg If Call Return return:yes Assign Call If Return return:yes Assign Call Assign Return return:yes"
  },
  {
    "library": "authlib",
    "name": "authenticate_user",
    "source_code": "def authenticate_user(self, username, password):\n    raise NotImplementedError()",
    "docstring": "Validate the resource owner password credentials using its existing password validation algorithm:: def authenticate_user(self, username, password): user = get_user_by_username(username) if user.check_password(password): return user",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\grants\\resource_owner_password_credentials.py",
    "ast_data": "FunctionDef name:authenticate_user arg:self arg:username arg:password arguments arg arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_create_operator_type_filter",
    "source_code": "def _create_operator_type_filter(operator_type: Callable) -> FilterFn:\n\n    def operator_type_filter(nodes: list[Node]):\n        num_nodes_with_operator_type = sum((node.target == operator_type for node in nodes))\n        if num_nodes_with_operator_type > 1:\n            raise NotImplementedError(f'Several nodes within a single pattern are {operator_type}.')\n        return num_nodes_with_operator_type == 1\n    return operator_type_filter",
    "docstring": "Create a filter function for a given operator type. The filter function takes a list of nodes and returns True if it contains exactly one node with the specified operator type, False otherwise. For example: linear_1: \"f32[3, 10]\" = torch.ops.aten.linear.default(...) # comes from a module with name relu: \"f32[3, 10]\" = torch.ops.aten.relu.default(linear_1); # comes from a module with name >> operator_type_filter = _create_operator_type_filter(torch.ops.aten.linear.default) >> print(operator_type_filter([relu, linear_1])) # True # These two nodes are determined by function and the second node is .",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\x86_inductor_quantizer.py",
    "ast_data": "FunctionDef name:_create_operator_type_filter arg:operator_type arguments arg FunctionDef name:operator_type_filter arg:nodes arguments arg Assign Call Compare If Compare Raise Call Return return:yes Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "thresholds",
    "source_code": "@property\ndef thresholds(self):\n    return list(self._thresholds)",
    "docstring": "The thresholds used for evaluating AUC.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py",
    "ast_data": "FunctionDef name:thresholds arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_scale_names",
    "source_code": "def get_scale_names():\n    return sorted(_scale_mapping)",
    "docstring": "Return the names of the available scales.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\scale.py",
    "ast_data": "FunctionDef name:get_scale_names arguments Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "forward",
    "source_code": "def forward(self):\n    forward_function = self._functions.forward(self._inference_args, self._input_tangents)\n    return (forward_function, self._inference_args + self._input_tangents)",
    "docstring": "Builds or retrieves a forward function for this call.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py",
    "ast_data": "FunctionDef name:forward arg:self arguments arg Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_order_cluster_tree",
    "source_code": "def _order_cluster_tree(Z):\n    q = deque()\n    tree = to_tree(Z)\n    q.append(tree)\n    nodes = []\n    while q:\n        node = q.popleft()\n        if not node.is_leaf():\n            bisect.insort_left(nodes, node)\n            q.append(node.get_right())\n            q.append(node.get_left())\n    return nodes",
    "docstring": "Return clustering nodes in bottom-up order by distance. Parameters ---------- Z : scipy.cluster.linkage array The linkage matrix. Returns ------- nodes : list A list of ClusterNode objects.",
    "type": "function",
    "file_path": "scipy\\scipy\\cluster\\hierarchy.py",
    "ast_data": "FunctionDef name:_order_cluster_tree arg:Z arguments arg Assign Call Assign Call Call Assign While Assign Call If Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "restore_thread_local_summary_state",
    "source_code": "def restore_thread_local_summary_state(self):\n    summary_state = summary_ops_v2._summary_state\n    summary_state.step = self._summary_step\n    summary_state.writer = self._summary_writer\n    summary_state.is_recording = self._summary_recording\n    summary_state.is_recording_distribution_strategy = self._summary_recording_distribution_strategy",
    "docstring": "Restore thread local summary state from self.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\mirrored_run.py",
    "ast_data": "FunctionDef name:restore_thread_local_summary_state arg:self arguments arg Assign Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_BesselY0Grad",
    "source_code": "@ops.RegisterGradient('BesselY0')\ndef _BesselY0Grad(op: ops.Operation, grad):\n    x = op.inputs[0]\n    with ops.control_dependencies([grad]):\n        partial_x = -special_math_ops.bessel_y1(x)\n        return grad * partial_x",
    "docstring": "Compute gradient of bessel_y0(x) with respect to its argument.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_BesselY0Grad arg:op arg:grad arguments arg arg Assign With Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "decorator",
    "source_code": "def decorator(arg):\n    class_name = name if name is not None else arg.__name__\n    registered_name = package + '>' + class_name\n    if tf_inspect.isclass(arg) and (not hasattr(arg, 'get_config')):\n        raise ValueError('Cannot register a class that does not have a get_config() method.')\n    if registered_name in _GLOBAL_CUSTOM_OBJECTS:\n        raise ValueError('%s has already been registered to %s' % (registered_name, _GLOBAL_CUSTOM_OBJECTS[registered_name]))\n    if arg in _GLOBAL_CUSTOM_NAMES:\n        raise ValueError('%s has already been registered to %s' % (arg, _GLOBAL_CUSTOM_NAMES[arg]))\n    _GLOBAL_CUSTOM_OBJECTS[registered_name] = arg\n    _GLOBAL_CUSTOM_NAMES[arg] = registered_name\n    return arg",
    "docstring": "Registers a class with the Keras serialization framework.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\generic_utils.py",
    "ast_data": "FunctionDef name:decorator arg:arg arguments arg Assign Compare Assign If BoolOp Call Call Raise Call If Compare Raise Call If Compare Raise Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "tfdbg_run_id",
    "source_code": "def tfdbg_run_id(self):\n    return self._tfdbg_run_id",
    "docstring": "Get the run ID of the instrumented TensorFlow program.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py",
    "ast_data": "FunctionDef name:tfdbg_run_id arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "save",
    "source_code": "def save(model, filepath, overwrite, include_optimizer, signatures=None, options=None, save_traces=True):\n    if not overwrite and os.path.exists(filepath):\n        proceed = ask_to_proceed_with_overwrite(filepath)\n        if not proceed:\n            return\n    if save_traces:\n        if save_impl.should_skip_serialization(model):\n            saving_utils.raise_model_input_error(model)\n    if not include_optimizer:\n        orig_optimizer = model.optimizer\n        model.optimizer = None\n        model._delete_tracking('optimizer')\n    with K.deprecated_internal_learning_phase_scope(0):\n        with utils.keras_option_scope(save_traces):\n            saved_nodes, node_paths = save_lib.save_and_return_nodes(model, filepath, signatures, options)\n        metadata = generate_keras_metadata(saved_nodes, node_paths)\n    with gfile.GFile(os.path.join(filepath, constants.SAVED_METADATA_PATH), 'wb') as w:\n        w.write(metadata.SerializeToString(deterministic=True))\n    if not include_optimizer:\n        model.optimizer = orig_optimizer",
    "docstring": "Saves a model as a SavedModel to the filepath. Args: model: Keras model instance to be saved. filepath: String path to save the model. overwrite: whether to overwrite the existing filepath. include_optimizer: If True, save the model's optimizer state. signatures: Signatures to save with the SavedModel. Applicable to the 'tf' format only. Please see the argument in for details. options: (only applies to SavedModel format) object that specifies options for saving to SavedModel. save_traces: (only applies to SavedModel format) When enabled, the SavedModel will store the function traces for each layer. This can be disabled, so that only the configs of each layer are stored. Defaults to . Disabling this will decrease serialization time and reduce file size, but it requires that all custom layers/models implement a method. Raises: ValueError: if the model's inputs have not been defined.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\save.py",
    "ast_data": "FunctionDef name:save arg:model arg:filepath arg:overwrite arg:include_optimizer arg:signatures arg:options arg:save_traces arguments arg arg arg arg arg arg arg If BoolOp Call Assign Call If Return return:no If If Call Call If Assign Assign Call With Call With Call Assign Call Assign Call With Call Call Call Call If Assign"
  },
  {
    "library": "scikit-learn",
    "name": "iteration_ends",
    "source_code": "def iteration_ends(self, time_step):\n    if self.lr_schedule == 'invscaling':\n        self.learning_rate = float(self.learning_rate_init) / (time_step + 1) ** self.power_t",
    "docstring": "Perform updates to learning rate and potential other states at the end of an iteration Parameters ---------- time_step : int number of training samples trained on so far, used to update learning rate for 'invscaling'",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neural_network\\_stochastic_optimizers.py",
    "ast_data": "FunctionDef name:iteration_ends arg:self arg:time_step arguments arg arg If Compare Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "from_spec",
    "source_code": "@classmethod\ndef from_spec(cls, spec, name=None):\n    return cls(spec.shape, spec.dtype, name or spec.name)",
    "docstring": "Returns a with the same shape and dtype as . >>> spec = tf.TensorSpec(shape=[8, 3], dtype=tf.int32, name=\"OriginalName\") >>> tf.TensorSpec.from_spec(spec, \"NewName\") TensorSpec(shape=(8, 3), dtype=tf.int32, name='NewName') Args: spec: The used to create the new . name: The name for the new . Defaults to .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor.py",
    "ast_data": "FunctionDef name:from_spec arg:cls arg:spec arg:name arguments arg arg arg Return return:yes Call BoolOp"
  },
  {
    "library": "pytorch",
    "name": "get_attr",
    "source_code": "@compatibility(is_backward_compatible=True)\ndef get_attr(self, target: 'Target', args: tuple[Argument, ...], kwargs: dict[str, Any]) -> Any:\n    assert isinstance(target, str)\n    return self.fetch_attr(target)",
    "docstring": "Execute a `Node `__ for details on semantics args (Tuple): Tuple of positional args for this invocation kwargs (Dict): Dict of keyword arguments for this invocation Return: Any: The value of the attribute that was retrieved",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\interpreter.py",
    "ast_data": "FunctionDef name:get_attr arg:self arg:target arg:args arg:kwargs arguments arg arg arg arg Call Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "marginal_pdf",
    "source_code": "def marginal_pdf(values: Tensor, bins: Tensor, sigma: Tensor, epsilon: float=1e-10) -> Tuple[Tensor, Tensor]:\n    if not isinstance(values, Tensor):\n        raise TypeError(f'Input values type is not a Tensor. Got {type(values)}')\n    if not isinstance(bins, Tensor):\n        raise TypeError(f'Input bins type is not a Tensor. Got {type(bins)}')\n    if not isinstance(sigma, Tensor):\n        raise TypeError(f'Input sigma type is not a Tensor. Got {type(sigma)}')\n    if not values.dim() == 3:\n        raise ValueError(f'Input values must be a of the shape BxNx1. Got {values.shape}')\n    if not bins.dim() == 1:\n        raise ValueError(f'Input bins must be a of the shape NUM_BINS. Got {bins.shape}')\n    if not sigma.dim() == 0:\n        raise ValueError(f'Input sigma must be a of the shape 1. Got {sigma.shape}')\n    residuals = values - bins.unsqueeze(0).unsqueeze(0)\n    kernel_values = torch.exp(-0.5 * (residuals / sigma).pow(2))\n    pdf = torch.mean(kernel_values, dim=1)\n    normalization = torch.sum(pdf, dim=1).unsqueeze(1) + epsilon\n    pdf = pdf / normalization\n    return (pdf, kernel_values)",
    "docstring": "Calculate the marginal probability distribution function of the input based on the number of histogram bins. Args: values: shape [BxNx1]. bins: shape [NUM_BINS]. sigma: shape [1], gaussian smoothing factor. epsilon: scalar, for numerical stability. Returns: Tuple[Tensor, Tensor]: - Tensor: shape [BxN]. - Tensor: shape [BxNxNUM_BINS].",
    "type": "function",
    "file_path": "kornia\\kornia\\enhance\\histogram.py",
    "ast_data": "FunctionDef name:marginal_pdf arg:values arg:bins arg:sigma arg:epsilon arguments arg arg arg arg If Call Raise Call Call If Call Raise Call Call If Call Raise Call Call If Compare Call Raise Call If Compare Call Raise Call If Compare Call Raise Call Assign Call Call Assign Call Call Assign Call Assign Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "atleast_2d",
    "source_code": "def atleast_2d(*tensors):\n    if has_torch_function(tensors):\n        return handle_torch_function(atleast_2d, tensors, *tensors)\n    if len(tensors) == 1:\n        tensors = tensors[0]\n    return _VF.atleast_2d(tensors)",
    "docstring": "Returns a 2-dimensional view of each input tensor with zero dimensions. Input tensors with two or more dimensions are returned as-is. Args: input (Tensor or list of Tensors) Returns: output (Tensor or tuple of Tensors) Example:: >>> x = torch.tensor(1.) >>> x tensor(1.) >>> torch.atleast_2d(x) tensor([[1.]]) >>> x = torch.arange(4).view(2, 2) >>> x tensor([[0, 1], [2, 3]]) >>> torch.atleast_2d(x) tensor([[0, 1], [2, 3]]) >>> x = torch.tensor(0.5) >>> y = torch.tensor(1.) >>> torch.atleast_2d((x, y)) (tensor([[0.5000]]), tensor([[1.]]))",
    "type": "function",
    "file_path": "pytorch\\torch\\functional.py",
    "ast_data": "FunctionDef name:atleast_2d arguments arg If Call Return return:yes Call If Compare Call Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_DataPipeType",
    "source_code": "class _DataPipeType:\n\n    def __init__(self, param):\n        self.param = param\n\n    def __repr__(self):\n        return _type_repr(self.param)\n\n    def __eq__(self, other):\n        if isinstance(other, _DataPipeType):\n            return self.param == other.param\n        return NotImplemented\n\n    def __hash__(self):\n        return hash(self.param)\n\n    def issubtype(self, other):\n        if isinstance(other.param, _GenericAlias):\n            if getattr(other.param, '__origin__', None) is Generic:\n                return True\n        if isinstance(other, _DataPipeType):\n            return issubtype(self.param, other.param)\n        if isinstance(other, type):\n            return issubtype(self.param, other)\n        raise TypeError(f\"Expected '_DataPipeType' or 'type', but found {type(other)}\")\n\n    def issubtype_of_instance(self, other):\n        return issubinstance(other, self.param)",
    "docstring": "Save type annotation in .",
    "type": "class",
    "file_path": "pytorch\\torch\\utils\\data\\datapipes\\_typing.py",
    "ast_data": "ClassDef name:_DataPipeType FunctionDef name:__init__ arg:self arg:param arguments arg arg Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call FunctionDef name:__eq__ arg:self arg:other arguments arg arg If Call Return return:yes Compare Return return:yes FunctionDef name:__hash__ arg:self arguments arg Return return:yes Call FunctionDef name:issubtype arg:self arg:other arguments arg arg If Call If Compare Call Return return:yes If Call Return return:yes Call If Call Return return:yes Call Raise Call Call FunctionDef name:issubtype_of_instance arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_precision",
    "source_code": "def get_precision(self):\n    xp, is_array_api_compliant = get_namespace(self.components_)\n    n_features = self.components_.shape[1]\n    if self.n_components_ == 0:\n        return xp.eye(n_features) / self.noise_variance_\n    if is_array_api_compliant:\n        linalg_inv = xp.linalg.inv\n    else:\n        linalg_inv = linalg.inv\n    if self.noise_variance_ == 0.0:\n        return linalg_inv(self.get_covariance())\n    components_ = self.components_\n    exp_var = self.explained_variance_\n    if self.whiten:\n        components_ = components_ * xp.sqrt(exp_var[:, np.newaxis])\n    exp_var_diff = exp_var - self.noise_variance_\n    exp_var_diff = xp.where(exp_var > self.noise_variance_, exp_var_diff, xp.asarray(0.0, device=device(exp_var)))\n    precision = components_ @ components_.T / self.noise_variance_\n    _fill_or_add_to_diagonal(precision, 1.0 / exp_var_diff, xp)\n    precision = components_.T @ linalg_inv(precision) @ components_\n    precision /= -self.noise_variance_ ** 2\n    _fill_or_add_to_diagonal(precision, 1.0 / self.noise_variance_, xp)\n    return precision",
    "docstring": "Compute data precision matrix with the generative model. Equals the inverse of the covariance but computed with the matrix inversion lemma for efficiency. Returns ------- precision : array, shape=(n_features, n_features) Estimated precision of data.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_base.py",
    "ast_data": "FunctionDef name:get_precision arg:self arguments arg Assign Call Assign If Compare Return return:yes Call If Assign Assign If Compare Return return:yes Call Call Assign Assign If Assign Call Assign Assign Call Compare Call Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "WriteExecution",
    "source_code": "def WriteExecution(self, execution):\n    debug_event = debug_event_pb2.DebugEvent(execution=execution)\n    self._EnsureTimestampAdded(debug_event)\n    _pywrap_debug_events_writer.WriteExecution(self._dump_root, debug_event)",
    "docstring": "Write a Execution proto with the writer. Args: execution: An Execution proto, describing a TensorFlow op or graph execution event.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_writer.py",
    "ast_data": "FunctionDef name:WriteExecution arg:self arg:execution arguments arg arg Assign Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "make_norm_from_scale",
    "source_code": "def make_norm_from_scale(scale_cls, base_norm_cls=None, *, init=None):\n    if base_norm_cls is None:\n        return functools.partial(make_norm_from_scale, scale_cls, init=init)\n    if isinstance(scale_cls, functools.partial):\n        scale_args = scale_cls.args\n        scale_kwargs_items = tuple(scale_cls.keywords.items())\n        scale_cls = scale_cls.func\n    else:\n        scale_args = scale_kwargs_items = ()\n    if init is None:\n\n        def init(vmin=None, vmax=None, clip=False):\n            pass\n    return _make_norm_from_scale(scale_cls, scale_args, scale_kwargs_items, base_norm_cls, inspect.signature(init))",
    "docstring": "Decorator for building a subclass from a subclass. After :: @make_norm_from_scale(scale_cls) class norm_cls(Normalize): ... *norm_cls* is filled with methods so that normalization computations are forwarded to *scale_cls* (i.e., *scale_cls* is the scale that would be used for the colorbar of a mappable normalized with *norm_cls*). If *init* is not passed, then the constructor signature of *norm_cls* will be `make_norm_from_scale`, and forward the remaining bound values (including any defaults defined by the signature) to the *scale_cls* constructor.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:make_norm_from_scale arg:scale_cls arg:base_norm_cls arguments arg arg arg If Compare Return return:yes Call If Call Assign Assign Call Call Assign Assign If Compare FunctionDef name:init arg:vmin arg:vmax arg:clip arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "_filter_actions_by_permissions",
    "source_code": "def _filter_actions_by_permissions(self, request, actions):\n    filtered_actions = []\n    for action in actions:\n        callable = action[0]\n        if not hasattr(callable, 'allowed_permissions'):\n            filtered_actions.append(action)\n            continue\n        permission_checks = (getattr(self, 'has_%s_permission' % permission) for permission in callable.allowed_permissions)\n        if any((has_permission(request) for has_permission in permission_checks)):\n            filtered_actions.append(action)\n    return filtered_actions",
    "docstring": "Filter out any actions that the user doesn't have access to.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:_filter_actions_by_permissions arg:self arg:request arg:actions arguments arg arg arg Assign For Assign If Call Call Assign Call If Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, handle, dtype, session):\n    self._handle = compat.as_str_any(handle)\n    self._resource_handle = None\n    self._dtype = dtype\n    self._session = session\n    self._auto_gc_enabled = True",
    "docstring": "Constructs a new tensor handle. A tensor handle for a persistent tensor is a python string that has the form of \"tensor_name;unique_id;device_name\". Args: handle: A tensor handle. dtype: The data type of the tensor represented by . session: The session in which the tensor is produced.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\session_ops.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:handle arg:dtype arg:session arguments arg arg arg arg Assign Call Assign Assign Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "toggle",
    "source_code": "def toggle(self, all=None, ticks=None, ticklabels=None, label=None):\n    if all:\n        _ticks, _ticklabels, _label = (True, True, True)\n    elif all is not None:\n        _ticks, _ticklabels, _label = (False, False, False)\n    else:\n        _ticks, _ticklabels, _label = (None, None, None)\n    if ticks is not None:\n        _ticks = ticks\n    if ticklabels is not None:\n        _ticklabels = ticklabels\n    if label is not None:\n        _label = label\n    if _ticks is not None:\n        self.major_ticks.set_visible(_ticks)\n        self.minor_ticks.set_visible(_ticks)\n    if _ticklabels is not None:\n        self.major_ticklabels.set_visible(_ticklabels)\n        self.minor_ticklabels.set_visible(_ticklabels)\n    if _label is not None:\n        self.label.set_visible(_label)",
    "docstring": "Toggle visibility of ticks, ticklabels, and (axis) label. To turn all off, :: axis.toggle(all=False) To turn all off but ticks on :: axis.toggle(all=False, ticks=True) To turn all on but (axis) label off :: axis.toggle(all=True, label=False)",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axis_artist.py",
    "ast_data": "FunctionDef name:toggle arg:self arg:all arg:ticks arg:ticklabels arg:label arguments arg arg arg arg arg If Assign If Compare Assign Assign If Compare Assign If Compare Assign If Compare Assign If Compare Call Call If Compare Call Call If Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "from_obj",
    "source_code": "@staticmethod\ndef from_obj(obj):\n    if obj is None:\n        return VariablePolicy.NONE\n    if isinstance(obj, VariablePolicy):\n        return obj\n    key = str(obj).lower()\n    for policy in VariablePolicy:\n        if key == policy.value:\n            return policy\n    raise ValueError(f'Received invalid VariablePolicy value: {obj}.')",
    "docstring": "Tries to convert to a VariablePolicy instance.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\save_options.py",
    "ast_data": "FunctionDef name:from_obj arg:obj arguments arg If Compare Return return:yes If Call Return return:yes Assign Call Call For If Compare Return return:yes Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "d2_log_loss_score",
    "source_code": "@validate_params({'y_true': ['array-like'], 'y_pred': ['array-like'], 'sample_weight': ['array-like', None], 'labels': ['array-like', None]}, prefer_skip_nested_validation=True)\ndef d2_log_loss_score(y_true, y_pred, *, sample_weight=None, labels=None):\n    y_pred = check_array(y_pred, ensure_2d=False, dtype='numeric')\n    check_consistent_length(y_pred, y_true, sample_weight)\n    if _num_samples(y_pred) < 2:\n        msg = 'D^2 score is not well-defined with less than two samples.'\n        warnings.warn(msg, UndefinedMetricWarning)\n        return float('nan')\n    numerator = log_loss(y_true=y_true, y_pred=y_pred, normalize=False, sample_weight=sample_weight, labels=labels)\n    weights = _check_sample_weight(sample_weight, y_true)\n    y_true_, weights_ = (np.concatenate([y_true, labels]), np.concatenate([weights, np.zeros_like(weights, shape=len(labels))])) if labels is not None else (y_true, weights)\n    _, y_value_indices = np.unique(y_true_, return_inverse=True)\n    counts = np.bincount(y_value_indices, weights=weights_)\n    y_prob = counts / weights.sum()\n    y_pred_null = np.tile(y_prob, (len(y_true), 1))\n    denominator = log_loss(y_true=y_true, y_pred=y_pred_null, normalize=False, sample_weight=sample_weight, labels=labels)\n    return float(1 - numerator / denominator)",
    "docstring": ":math: score function, fraction of log loss explained. Best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A model that always predicts the per-class proportions of , disregarding the input features, gets a D^2 score of 0.0. Read more in the :ref:. .. versionadded:: 1.5 Parameters ---------- y_true : array-like or label indicator matrix The actuals labels for the n_samples samples. y_pred : array-like of shape (n_samples, n_classes) or (n_samples,) Predicted probabilities, as returned by a classifier's predict_proba method. If `~sklearn.preprocessing.LabelBinarizer`. Returns ------- d2 : float or ndarray of floats The D^2 score. Notes ----- This is not a symmetric function. Like R^2, D^2 score may be negative (it need not actually be the square of a quantity D). This metric is not well-defined for a single sample and will return a NaN value if n_samples is less than two.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\metrics\\_classification.py",
    "ast_data": "FunctionDef name:d2_log_loss_score arg:y_true arg:y_pred arguments arg arg arg arg Assign Call Call If Compare Call Assign Call Return return:yes Call Assign Call Assign Call Assign Compare Call Call Call Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "op_scope",
    "source_code": "@tf_export(v1=['op_scope'])\n@tf_contextlib.contextmanager\ndef op_scope(values, name, default_name=None) -> Iterator[Optional[str]]:\n    logging.warn('tf.op_scope(values, name, default_name) is deprecated, use tf.name_scope(name, default_name, values)')\n    with name_scope(name, default_name=default_name, values=values) as scope:\n        yield scope",
    "docstring": "DEPRECATED. Same as name_scope above, just different argument order.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:op_scope arg:values arg:name arg:default_name arguments arg arg arg Call With Call Call"
  },
  {
    "library": "matplotlib",
    "name": "redraw_in_frame",
    "source_code": "def redraw_in_frame(self):\n    with ExitStack() as stack:\n        for artist in [*self._axis_map.values(), self.title, self._left_title, self._right_title]:\n            stack.enter_context(artist._cm_set(visible=False))\n        self.draw(self.get_figure(root=True).canvas.get_renderer())",
    "docstring": "Efficiently redraw Axes data, but not axis ticks, labels, etc.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:redraw_in_frame arg:self arguments arg With Call For Call Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "unflatten_as_params",
    "source_code": "@contextlib.contextmanager\ndef unflatten_as_params(self) -> Generator:\n    self._use_unsharded_views(as_params=True)\n    try:\n        yield\n    finally:\n        self._use_unsharded_views(as_params=False)",
    "docstring": "Unflatten the original parameters. The function assumes that the flat parameter is unsharded. When in the context, unflattens the original parameters as `` views into the flat parameter.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py",
    "ast_data": "FunctionDef name:unflatten_as_params arg:self arguments arg Call Try Call"
  },
  {
    "library": "tensorflow",
    "name": "_resource_apply_sparse_duplicate_indices",
    "source_code": "def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices, **kwargs):\n    summed_grad, unique_indices = _deduplicate_indexed_slices(values=grad, indices=indices)\n    return self._resource_apply_sparse(summed_grad, handle, unique_indices, **kwargs)",
    "docstring": "Add ops to apply sparse gradients to , with repeated indices. Optimizers which override this method must deal with repeated indices. See the docstring of for details. By default the correct behavior, to sum non-unique indices and their associated gradients, is enforced by first pre-processing and and passing them on to . Optimizers which deal correctly with duplicate indices may instead override this method to avoid the overhead of summing. Args: grad: a representing the gradient for the affected indices. handle: a of dtype which points to the variable to be updated. indices: a of integral type representing the indices for which the gradient is nonzero. Indices may be repeated. **kwargs: May optionally contain Returns: An which updates the value of the variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py",
    "ast_data": "FunctionDef name:_resource_apply_sparse_duplicate_indices arg:self arg:grad arg:handle arg:indices arguments arg arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "broadcast_shapes",
    "source_code": "def broadcast_shapes(*shapes: tuple[float | None, ...]) -> tuple[int | None, ...]:\n    if not shapes:\n        return ()\n    ndim = max((len(shape) for shape in shapes))\n    out: list[int | None] = []\n    for axis in range(-ndim, 0):\n        sizes = {shape[axis] for shape in shapes if axis >= -len(shape)}\n        none_size = None in sizes or math.nan in sizes\n        sizes -= {1, None, math.nan}\n        if len(sizes) > 1:\n            msg = f'shape mismatch: objects cannot be broadcast to a single shape: {shapes}.'\n            raise ValueError(msg)\n        out.append(None if none_size else cast(int, sizes.pop()) if sizes else 1)\n    return tuple(out)",
    "docstring": "Compute the shape of the broadcasted arrays. Duplicates :func:, with additional support for None and NaN sizes. This is equivalent to `` for unknown sizes. Examples -------- >>> import array_api_extra as xpx >>> xpx.broadcast_shapes((2, 3), (2, 1)) (2, 3) >>> xpx.broadcast_shapes((4, 2, 3), (2, 1), (1, 3)) (4, 2, 3)",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_extra\\_lib\\_funcs.py",
    "ast_data": "FunctionDef name:broadcast_shapes arguments arg If Return return:no Assign Call Call For Call Assign Compare Call Assign BoolOp Compare Compare If Compare Call Assign Raise Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "post_compile",
    "source_code": "def post_compile(self, example_inputs: Sequence[InputType], constants: CompiledFxGraphConstants, graph_kwargs: _CompileFxKwargs) -> None:\n    set_tracing_context_output_strides(example_inputs, self)\n    assert graph_kwargs['cudagraphs'] is not None\n    assert graph_kwargs['is_backward'] is not None\n    is_backward = graph_kwargs['is_backward']\n    cudagraphs: BoxedBool = graph_kwargs['cudagraphs']\n    if cudagraphs:\n        if self.disabled_cudagraphs_reason:\n            if 'cuda' in self.device_types:\n                log_cudagraph_skip_and_bump_counter(f'skipping cudagraphs due to {self.disabled_cudagraphs_reason}')\n            else:\n                counters['inductor']['cudagraph_skips'] += 1\n            BoxedBool.disable(cudagraphs)\n        else:\n            if is_backward:\n                assert 'boxed_forward_device_index' in graph_kwargs\n                boxed_forward_device_index = graph_kwargs['boxed_forward_device_index']\n            else:\n                boxed_forward_device_index = graph_kwargs.get('boxed_forward_device_index', None)\n            if config.graph_partition:\n                cudagraph_partition_post_compile(example_inputs, self, cudagraphs, constants.unwrap(self), boxed_forward_device_index)\n            else:\n                cudagraph_post_compile(example_inputs, self, cudagraphs, constants.unwrap(self), boxed_forward_device_index)\n    inputs_to_check = self.inputs_to_check\n    maybe_realign_inputs(cudagraphs, self, inputs_to_check)",
    "docstring": "Run a set of post processing steps after loading from the cache. These involve: - Setting the tracing context output strides - Running cudagraphs if enabled - Realigning inputs This runs whether or not we have a cache hit, and always runs directly after we get a CompiledFxGraph. The results of this function are *not* saved in the cache itself.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\output_code.py",
    "ast_data": "FunctionDef name:post_compile arg:self arg:example_inputs arg:constants arg:graph_kwargs arguments arg arg arg arg Call Compare Compare Assign If If If Compare Call Call If Compare Assign Assign Call If Call Call Call Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "end_compile",
    "source_code": "@classmethod\ndef end_compile(cls) -> None:\n    log.debug('TritonBundler.end_compile is called')\n    cls._entries = None\n    cls._static_autotuners = None",
    "docstring": "Finalizes the TritonBundler. If collect is not yet called, it discards the current bundle.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\triton_bundler.py",
    "ast_data": "FunctionDef name:end_compile arg:cls arguments arg Call Assign Assign"
  },
  {
    "library": "scipy",
    "name": "fft2",
    "source_code": "def fft2(x, shape=None, axes=(-2, -1), overwrite_x=False):\n    return fftn(x, shape, axes, overwrite_x)",
    "docstring": "2-D discrete Fourier transform. Return the 2-D discrete Fourier transform of the 2-D argument . See Also -------- fftn : for detailed information. Examples -------- >>> import numpy as np >>> from scipy.fftpack import fft2, ifft2 >>> y = np.mgrid[:5, :5][0] >>> y array([[0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [2, 2, 2, 2, 2], [3, 3, 3, 3, 3], [4, 4, 4, 4, 4]]) >>> np.allclose(y, ifft2(fft2(y))) True",
    "type": "function",
    "file_path": "scipy\\scipy\\fftpack\\_basic.py",
    "ast_data": "FunctionDef name:fft2 arg:x arg:shape arg:axes arg:overwrite_x arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_cell",
    "source_code": "def get_cell(self, *labels):\n    return CounterCell(super(Counter, self).get_cell(*labels))",
    "docstring": "Retrieves the cell.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py",
    "ast_data": "FunctionDef name:get_cell arg:self arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "take_grad",
    "source_code": "def take_grad(self, num_required, name=None):\n    out = gen_data_flow_ops.resource_accumulator_take_gradient(self._accumulator_ref, num_required, dtype=self._dtype, name=name)\n    out.set_shape(self._shape)\n    return out",
    "docstring": "Attempts to extract the average gradient from the accumulator. The operation blocks until sufficient number of gradients have been successfully applied to the accumulator. Once successful, the following actions are also triggered: - Counter of accumulated gradients is reset to 0. - Aggregated gradient is reset to 0 tensor. - Accumulator's internal time step is incremented by 1. Args: num_required: Number of gradients that needs to have been aggregated name: Optional name for the operation Returns: A tensor holding the value of the average gradient. Raises: InvalidArgumentError: If num_required < 1",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:take_grad arg:self arg:num_required arg:name arguments arg arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "Tags",
    "source_code": "@dataclass(slots=True)\nclass Tags:\n    estimator_type: str | None\n    target_tags: TargetTags\n    transformer_tags: TransformerTags | None = None\n    classifier_tags: ClassifierTags | None = None\n    regressor_tags: RegressorTags | None = None\n    array_api_support: bool = False\n    no_validation: bool = False\n    non_deterministic: bool = False\n    requires_fit: bool = True\n    _skip_test: bool = False\n    input_tags: InputTags = field(default_factory=InputTags)",
    "docstring": "Tags for the estimator. See :ref: for more information. Parameters ---------- estimator_type : str or None The type of the estimator. Can be one of: - \"classifier\" - \"regressor\" - \"transformer\" - \"clusterer\" - \"outlier_detector\" - \"density_estimator\" target_tags : :class: The target(y) tags. transformer_tags : :class: or None The transformer tags. classifier_tags : :class: or None The classifier tags. regressor_tags : :class: or None The regressor tags. array_api_support : bool, default=False Whether the estimator supports Array API compatible inputs. no_validation : bool, default=False Whether the estimator skips input-validation. This is only meant for stateless and dummy transformers! non_deterministic : bool, default=False Whether the estimator is not deterministic given a fixed `transformpredictpredict_probadecision_functionInputTags` The input data(X) tags.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\utils\\_tags.py",
    "ast_data": "ClassDef name:Tags Call Call"
  },
  {
    "library": "numpy",
    "name": "_replace_dtype_fields",
    "source_code": "def _replace_dtype_fields(dtype, primitive_dtype):\n    dtype = np.dtype(dtype)\n    primitive_dtype = np.dtype(primitive_dtype)\n    return _replace_dtype_fields_recursive(dtype, primitive_dtype)",
    "docstring": "Construct a dtype description list from a given dtype. Returns a new dtype object, with all fields and subtypes in the given type recursively replaced with . Arguments are coerced to dtypes first.",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:_replace_dtype_fields arg:dtype arg:primitive_dtype arguments arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "p_num",
    "source_code": "def p_num(self, n: int) -> int:\n    return self.p_max(n) - self.p_min",
    "docstring": "Number of time slices for an input signal with samples. It is given by = - with typically being negative. A detailed example is provided in the :ref: section of the :ref:. See Also -------- k_min: The smallest possible signal index. k_max: First sample index after signal end not touched by a time slice. lower_border_end: Where pre-padding effects end. p_min: The smallest possible slice index. p_max: Index of first non-overlapping upper time slice. p_range: Determine and validate slice index range. upper_border_begin: Where post-padding effects start. ShortTimeFFT: Class this method belongs to.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_short_time_fft.py",
    "ast_data": "FunctionDef name:p_num arg:self arg:n arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_python_major_version",
    "source_code": "def get_python_major_version(python_bin_path):\n    return run_shell([python_bin_path, '-c', 'import sys; print(sys.version[0])'])",
    "docstring": "Get the python major version.",
    "type": "function",
    "file_path": "tensorflow\\configure.py",
    "ast_data": "FunctionDef name:get_python_major_version arg:python_bin_path arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "PasswordChangeForm",
    "source_code": "class PasswordChangeForm(SetPasswordForm):\n    error_messages = {**SetPasswordForm.error_messages, 'password_incorrect': _('Your old password was entered incorrectly. Please enter it again.')}\n    old_password = forms.CharField(label=_('Old password'), strip=False, widget=forms.PasswordInput(attrs={'autocomplete': 'current-password', 'autofocus': True}))\n    field_order = ['old_password', 'new_password1', 'new_password2']\n\n    @sensitive_variables('old_password')\n    def clean_old_password(self):\n        old_password = self.cleaned_data['old_password']\n        if not self.user.check_password(old_password):\n            raise ValidationError(self.error_messages['password_incorrect'], code='password_incorrect')\n        return old_password",
    "docstring": "A form that lets a user change their password by entering their old password.",
    "type": "class",
    "file_path": "django\\django\\contrib\\auth\\forms.py",
    "ast_data": "ClassDef name:PasswordChangeForm Assign Call Assign Call Call Call Assign FunctionDef name:clean_old_password arg:self arguments arg Assign If Call Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "num_accelerators",
    "source_code": "def num_accelerators(self, task_type=None, task_id=None, config_proto=None):\n    del task_type, task_id, config_proto\n    if self._num_accelerators is None:\n        return {}\n    return self._num_accelerators",
    "docstring": "Returns the number of accelerator cores per worker. The SimpleClusterResolver does not do automatic detection of accelerators, and thus all arguments are unused and we simply return the value provided in the constructor. Args: task_type: Unused. task_id: Unused. config_proto: Unused.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\cluster_resolver.py",
    "ast_data": "FunctionDef name:num_accelerators arg:self arg:task_type arg:task_id arg:config_proto arguments arg arg arg arg If Compare Return return:no Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "sequence_mask",
    "source_code": "@tf_export('sequence_mask')\n@dispatch.add_dispatch_support\ndef sequence_mask(lengths, maxlen=None, dtype=dtypes.bool, name=None):\n    with ops.name_scope(name, 'SequenceMask', [lengths, maxlen]):\n        lengths = ops.convert_to_tensor(lengths)\n        if maxlen is None:\n            maxlen = gen_math_ops._max(lengths, _all_dimensions(lengths))\n            maxlen = gen_math_ops.maximum(constant(0, maxlen.dtype), maxlen)\n        else:\n            maxlen = ops.convert_to_tensor(maxlen)\n        if maxlen.get_shape().ndims is not None and maxlen.get_shape().ndims != 0:\n            raise ValueError(f\"Argument `maxlen` must be scalar for sequence_mask, received `maxlen` = {maxlen} with shape '{maxlen.get_shape()}' instead\")\n        row_vector = gen_math_ops._range(constant(0, maxlen.dtype), maxlen, constant(1, maxlen.dtype))\n        matrix = gen_math_ops.cast(expand_dims(lengths, -1), maxlen.dtype)\n        result = row_vector < matrix\n        if dtype is None or result.dtype.is_compatible_with(dtype):\n            return result\n        else:\n            return gen_math_ops.cast(result, dtype)",
    "docstring": "Returns a mask tensor representing the first N positions of each cell. If has shape the resulting tensor has dtype and shape , with Examples: Args: lengths: integer tensor, all its values <= maxlen. maxlen: scalar integer tensor, size of last dimension of returned tensor. Default is the maximum value in . dtype: output type of the resulting tensor. name: name of the op. Returns: A mask tensor of shape , cast to specified dtype. Raises: ValueError: if is not a scalar.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:sequence_mask arg:lengths arg:maxlen arg:dtype arg:name arguments arg arg arg arg With Call Assign Call If Compare Assign Call Call Assign Call Call Assign Call If BoolOp Compare Call Compare Call Raise Call Call Assign Call Call Call Assign Call Call Assign Compare If BoolOp Compare Call Return return:yes Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_kolmogn_p",
    "source_code": "def _kolmogn_p(n, x):\n    if np.isnan(n):\n        return n\n    if int(n) != n or n <= 0:\n        return np.nan\n    if x >= 1.0 or x <= 0:\n        return 0\n    t = n * x\n    if t <= 1.0:\n        if t <= 0.5:\n            return 0.0\n        if n <= 140:\n            prd = np.prod(np.arange(1, n) * (1.0 / n) * (2 * t - 1))\n        else:\n            prd = np.exp(_log_nfactorial_div_n_pow_n(n) + (n - 1) * np.log(2 * t - 1))\n        return prd * 2 * n ** 2\n    if t >= n - 1:\n        return 2 * (1.0 - x) ** (n - 1) * n\n    if x >= 0.5:\n        return 2 * scipy.stats.ksone.pdf(x, n)\n    delta = x / 2.0 ** 16\n    delta = min(delta, x - 1.0 / n)\n    delta = min(delta, 0.5 - x)\n\n    def _kk(_x):\n        return kolmogn(n, _x)\n    return _derivative(_kk, x, dx=delta, order=5)",
    "docstring": "Computes the PDF for the two-sided Kolmogorov-Smirnov statistic. x must be of type float, n of type integer.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_ksstats.py",
    "ast_data": "FunctionDef name:_kolmogn_p arg:n arg:x arguments arg arg If Call Return return:yes If BoolOp Compare Call Compare Return return:yes If BoolOp Compare Compare Return return:yes Assign If Compare If Compare Return return:yes If Compare Assign Call Call Assign Call Call Call Return return:yes If Compare Return return:yes If Compare Return return:yes Call Assign Assign Call Assign Call FunctionDef name:_kk arg:_x arguments arg Return return:yes Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "start",
    "source_code": "def start(self):\n    if self.is_set:\n        self.bus.log('Handler for console events already set.', level=20)\n        return\n    result = win32api.SetConsoleCtrlHandler(self.handle, 1)\n    if result == 0:\n        self.bus.log('Could not SetConsoleCtrlHandler (error %r)' % win32api.GetLastError(), level=40)\n    else:\n        self.bus.log('Set handler for console events.', level=20)\n        self.is_set = True",
    "docstring": "Register handling of the console control events.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\win32.py",
    "ast_data": "FunctionDef name:start arg:self arguments arg If Call Return return:no Assign Call If Compare Call Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "_InfiniteConstantSampler",
    "source_code": "class _InfiniteConstantSampler(Sampler):\n\n    def __iter__(self):\n        while True:\n            yield None",
    "docstring": "Analogous to `~torch.utils.data.IterableDataset`.",
    "type": "class",
    "file_path": "pytorch\\torch\\utils\\data\\dataloader.py",
    "ast_data": "ClassDef name:_InfiniteConstantSampler FunctionDef name:__iter__ arg:self arguments arg While"
  },
  {
    "library": "pytorch",
    "name": "custom_train_test_split",
    "source_code": "def custom_train_test_split(self, df, test_size=0.2, val_size=0.25, random_state=42):\n    exclude_columns = ['speedup', 'winner', 'target']\n    feature_columns = [col for col in df.columns if col not in exclude_columns and (not col.startswith(CHOICE_COL + '_'))]\n    df['input_id'] = df.groupby(feature_columns).ngroup()\n    unique_inputs = df['input_id'].unique()\n    train_val_inputs, test_inputs = train_test_split(unique_inputs, test_size=test_size, random_state=random_state)\n    train_inputs, val_inputs = train_test_split(train_val_inputs, test_size=val_size, random_state=random_state)\n    train_mask = df['input_id'].isin(train_inputs)\n    val_mask = df['input_id'].isin(val_inputs)\n    test_mask = df['input_id'].isin(test_inputs)\n    df_train = df[train_mask]\n    df_val = df[val_mask]\n    df_test = df[test_mask]\n    df_train = df_train.drop('input_id', axis=1)\n    df_val = df_val.drop('input_id', axis=1)\n    df_test = df_test.drop('input_id', axis=1)\n    return (df_train, df_val, df_test, feature_columns)",
    "docstring": "Splits the dataframe into train, val, and test sets. Also adds other datasets, specified by the user, to the train set. We need to be careful, because we want to make sure that rows with the same input but different choice are kept in the same set, e.g. Rows that looks like this input_1,choice1,... input_1,choice2,... should be in the same set.",
    "type": "method",
    "file_path": "pytorch\\torchgen\\_autoheuristic\\train_regression.py",
    "ast_data": "FunctionDef name:custom_train_test_split arg:self arg:df arg:test_size arg:val_size arg:random_state arguments arg arg arg arg arg Assign Assign BoolOp Compare Call Assign Call Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Assign Assign Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, coord, timer_interval_secs, target=None, args=None, kwargs=None):\n    if not isinstance(coord, Coordinator):\n        raise ValueError(\"'coord' argument must be a Coordinator: %s\" % coord)\n    super(LooperThread, self).__init__()\n    self.daemon = True\n    self._coord = coord\n    self._timer_interval_secs = timer_interval_secs\n    self._target = target\n    if self._target:\n        self._args = args or ()\n        self._kwargs = kwargs or {}\n    elif args or kwargs:\n        raise ValueError(\"'args' and 'kwargs' argument require that you also pass 'target'\")\n    self._coord.register_thread(self)",
    "docstring": "Create a LooperThread. Args: coord: A Coordinator. timer_interval_secs: Time boundaries at which to call Run(), or None if it should be called back to back. target: Optional callable object that will be executed in the thread. args: Optional arguments to pass to when calling it. kwargs: Optional keyword arguments to pass to when calling it. Raises: ValueError: If one of the arguments is invalid.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\coordinator.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:coord arg:timer_interval_secs arg:target arg:args arg:kwargs arguments arg arg arg arg arg arg If Call Raise Call Call Call Assign Assign Assign Assign If Assign BoolOp Assign BoolOp If BoolOp Raise Call Call"
  },
  {
    "library": "scrapy",
    "name": "origin",
    "source_code": "def origin(self, url: str) -> str | None:\n    return self.strip_url(url, origin_only=True)",
    "docstring": "Return serialized origin (scheme, host, path) for a request or response URL.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\spidermiddlewares\\referer.py",
    "ast_data": "FunctionDef name:origin arg:self arg:url arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "AOTDispatchCompiler",
    "source_code": "class AOTDispatchCompiler(Protocol):\n\n    def __call__(self, gm: torch.fx.GraphModule, example_inputs: Sequence[InputType]) -> Any:\n        ...",
    "docstring": "Represents a fw or bw_compiler passed to AOTAutograd.",
    "type": "class",
    "file_path": "pytorch\\torch\\_functorch\\aot_autograd.py",
    "ast_data": "ClassDef name:AOTDispatchCompiler FunctionDef name:__call__ arg:self arg:gm arg:example_inputs arguments arg arg arg"
  },
  {
    "library": "pytorch",
    "name": "requires",
    "source_code": "def requires(self, graph_module: GraphModule) -> None:\n    pass",
    "docstring": "This function will be called before the pass is run and will check that the given graph module contains the preconditions needed to run the pass. It is not required to implement this function. Args: graph_module: The graph module we will run checks on",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\passes\\infra\\pass_base.py",
    "ast_data": "FunctionDef name:requires arg:self arg:graph_module arguments arg arg"
  },
  {
    "library": "matplotlib",
    "name": "send_message",
    "source_code": "def send_message(self, event):\n    if self.toolmanager.messagelock.locked():\n        return\n    from matplotlib.backend_bases import NavigationToolbar2\n    message = NavigationToolbar2._mouse_event_to_message(event)\n    self.toolmanager.message_event(message, self)",
    "docstring": "Call .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py",
    "ast_data": "FunctionDef name:send_message arg:self arg:event arguments arg arg If Call Return return:no Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "verbose",
    "source_code": "class verbose:\n\n    def __init__(self, level):\n        self.level = level\n\n    def __enter__(self):\n        if self.level == VERBOSE_OFF:\n            return\n        st = torch._C._verbose.mkldnn_set_verbose(self.level)\n        assert st, 'Failed to set MKLDNN into verbose mode. Please consider to disable this verbose scope.'\n        return self\n\n    def __exit__(self, exc_type, exc_val, exc_tb):\n        torch._C._verbose.mkldnn_set_verbose(VERBOSE_OFF)\n        return False",
    "docstring": "On-demand oneDNN (former MKL-DNN) verbosing functionality. To make it easier to debug performance issues, oneDNN can dump verbose messages containing information like kernel size, input data size and execution duration while executing the kernel. The verbosing functionality can be invoked via an environment variable named . However, this methodology dumps messages in all steps. Those are a large amount of verbose messages. Moreover, for investigating the performance issues, generally taking verbose messages for one single iteration is enough. This on-demand verbosing functionality makes it possible to control scope for verbose message dumping. In the following example, verbose messages will be dumped out for the second inference only. .. highlight:: python .. code-block:: python import torch model(data) with torch.backends.mkldnn.verbose(torch.backends.mkldnn.VERBOSE_ON): model(data) Args: level: Verbose level - ``: Enable verbosing, including oneDNN kernel creation",
    "type": "class",
    "file_path": "pytorch\\torch\\backends\\mkldnn\\__init__.py",
    "ast_data": "ClassDef name:verbose FunctionDef name:__init__ arg:self arg:level arguments arg arg Assign FunctionDef name:__enter__ arg:self arguments arg If Compare Return return:no Assign Call Return return:yes FunctionDef name:__exit__ arg:self arg:exc_type arg:exc_val arg:exc_tb arguments arg arg arg arg Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "assert_splits_match",
    "source_code": "def assert_splits_match(nested_splits_lists):\n    error_msg = 'Inputs must have identical ragged splits'\n    for splits_list in nested_splits_lists:\n        if len(splits_list) != len(nested_splits_lists[0]):\n            raise ValueError(error_msg)\n    return [check_ops.assert_equal(s1, s2, message=error_msg) for splits_list in nested_splits_lists[1:] for s1, s2 in zip(nested_splits_lists[0], splits_list)]",
    "docstring": "Checks that the given splits lists are identical. Performs static tests to ensure that the given splits lists are identical, and returns a list of control dependency op tensors that check that they are fully identical. Args: nested_splits_lists: A list of nested_splits_lists, where each split_list is a list of tensors from a , ordered from outermost ragged dimension to innermost ragged dimension. Returns: A list of control dependency op tensors. Raises: ValueError: If the splits are not identical.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_util.py",
    "ast_data": "FunctionDef name:assert_splits_match arg:nested_splits_lists arguments arg Assign For If Compare Call Call Raise Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "RevivedInputLayer",
    "source_code": "class RevivedInputLayer(object):\n\n    @classmethod\n    def _init_from_metadata(cls, metadata):\n        init_args = dict(name=metadata['name'], dtype=metadata['dtype'], sparse=metadata['sparse'], ragged=metadata['ragged'], batch_input_shape=metadata['batch_input_shape'])\n        revived_obj = cls(**init_args)\n        with utils.no_automatic_dependency_tracking_scope(revived_obj):\n            revived_obj._config = metadata['config']\n        return (revived_obj, setattr)\n\n    def get_config(self):\n        return self._config",
    "docstring": "InputLayer loaded from a SavedModel.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\load.py",
    "ast_data": "ClassDef name:RevivedInputLayer FunctionDef name:_init_from_metadata arg:cls arg:metadata arguments arg arg Assign Call Assign Call With Call Assign Return return:yes FunctionDef name:get_config arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_transform_banded_jac",
    "source_code": "def _transform_banded_jac(bjac):\n    newjac = zeros((bjac.shape[0] + 1, bjac.shape[1]))\n    newjac[1:, ::2] = bjac[:, ::2]\n    newjac[:-1, 1::2] = bjac[:, 1::2]\n    return newjac",
    "docstring": "Convert a real matrix of the form (for example) [0 0 A B] [0 0 0 B] [0 0 C D] [0 0 A D] [E F G H] to [0 F C H] [I J K L] [E J G L] [I 0 K 0] That is, every other column is shifted up one.",
    "type": "function",
    "file_path": "scipy\\scipy\\integrate\\_ode.py",
    "ast_data": "FunctionDef name:_transform_banded_jac arg:bjac arguments arg Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_zlabel",
    "source_code": "def get_zlabel(self):\n    label = self.zaxis.label\n    return label.get_text()",
    "docstring": "Get the z-label text string.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:get_zlabel arg:self arguments arg Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "module_to_fqn",
    "source_code": "def module_to_fqn(model: nn.Module, module: nn.Module, prefix: str='') -> Optional[str]:\n    if module is model:\n        return ''\n    for name, child in model.named_children():\n        fqn = module_to_fqn(child, module, '.')\n        if isinstance(fqn, str):\n            return prefix + name + fqn\n    return None",
    "docstring": "Returns the fqn for a module or None if module not a descendent of model.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\pruning\\sparsifier\\utils.py",
    "ast_data": "FunctionDef name:module_to_fqn arg:model arg:module arg:prefix arguments arg arg arg If Compare Return return:yes For Call Assign Call If Call Return return:yes Return return:no"
  },
  {
    "library": "pytorch",
    "name": "SubclassSymbolicContext",
    "source_code": "@dataclass(frozen=True)\nclass SubclassSymbolicContext(StatefulSymbolicContext):\n    inner_contexts: dict[str, SymbolicContext] = None\n\n    def __post_init__(self) -> None:\n        super().__post_init__()\n        if self.inner_contexts is None:\n            self.inner_contexts = {}",
    "docstring": "The correct symbolic context for a given inner tensor of a traceable tensor subclass may differ from that of the outer symbolic context. This structure allows for this flexibility, with inner symbolic contexts mapped via attr -> symbolic context.",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "ClassDef name:SubclassSymbolicContext FunctionDef name:__post_init__ arg:self arguments arg Call Call If Compare Assign Call"
  },
  {
    "library": "scipy",
    "name": "get_id",
    "source_code": "def get_id(self):\n    return self.id",
    "docstring": "The identifier of the target node. For `ii`. Returns ------- id : int The identifier of the target node.",
    "type": "method",
    "file_path": "scipy\\scipy\\cluster\\hierarchy.py",
    "ast_data": "FunctionDef name:get_id arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "most_recent_n",
    "source_code": "def most_recent_n(self, n):\n    return self._commands[-n:]",
    "docstring": "Look up the n most recent commands. Args: n: Number of most recent commands to look up. Returns: A list of n most recent commands, or all available most recent commands, if n exceeds size of the command history, in chronological order.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py",
    "ast_data": "FunctionDef name:most_recent_n arg:self arg:n arguments arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_MEM_FORMAT_ENCODING",
    "source_code": "class _MEM_FORMAT_ENCODING(Enum):\n    TORCH_CONTIGUOUS_FORMAT = 0\n    TORCH_CHANNELS_LAST = 1\n    TORCH_PRESERVE_FORMAT = 2",
    "docstring": "Describe the memory format of a tensor.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\metadata.py",
    "ast_data": "ClassDef name:_MEM_FORMAT_ENCODING Assign Assign Assign"
  },
  {
    "library": "sphinx",
    "name": "get_assign_targets",
    "source_code": "def get_assign_targets(node: ast.AST) -> list[ast.expr]:\n    if isinstance(node, ast.Assign):\n        return node.targets\n    else:\n        return [node.target]",
    "docstring": "Get list of targets from Assign and AnnAssign node.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\pycode\\parser.py",
    "ast_data": "FunctionDef name:get_assign_targets arg:node arguments arg If Call Return return:yes Return return:yes"
  },
  {
    "library": "authlib",
    "name": "generate",
    "source_code": "def generate(self, grant_type, client, user=None, scope=None, expires_in=None, include_refresh_token=True):\n    scope = self.get_allowed_scope(client, scope)\n    access_token = self.access_token_generator(client=client, grant_type=grant_type, user=user, scope=scope)\n    if expires_in is None:\n        expires_in = self._get_expires_in(client, grant_type)\n    token = {'token_type': 'Bearer', 'access_token': access_token}\n    if expires_in:\n        token['expires_in'] = expires_in\n    if include_refresh_token and self.refresh_token_generator:\n        token['refresh_token'] = self.refresh_token_generator(client=client, grant_type=grant_type, user=user, scope=scope)\n    if scope:\n        token['scope'] = scope\n    return token",
    "docstring": "Generate a bearer token for OAuth 2.0 authorization token endpoint. :param client: the client that making the request. :param grant_type: current requested grant_type. :param user: current authorized user. :param expires_in: if provided, use this value as expires_in. :param scope: current requested scope. :param include_refresh_token: should refresh_token be included. :return: Token dict",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6750\\token.py",
    "ast_data": "FunctionDef name:generate arg:self arg:grant_type arg:client arg:user arg:scope arg:expires_in arg:include_refresh_token arguments arg arg arg arg arg arg arg Assign Call Assign Call If Compare Assign Call Assign If Assign If BoolOp Assign Call If Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "build",
    "source_code": "def build(self, y_pred):\n    super(LossesContainer, self).build(y_pred)\n    self._losses = self._maybe_broadcast_to_outputs(y_pred, self._losses)\n    self._losses = self._conform_to_outputs(y_pred, self._losses)\n    self._losses = nest.map_structure(self._get_loss_object, self._losses)\n    self._losses = nest.flatten(self._losses)\n    self._loss_weights = self._maybe_broadcast_to_outputs(y_pred, self._loss_weights)\n    self._loss_weights = self._conform_to_outputs(y_pred, self._loss_weights)\n    self._loss_weights = nest.flatten(self._loss_weights)\n    self._create_metrics()\n    self._built = True",
    "docstring": "One-time setup of loss objects.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\compile_utils.py",
    "ast_data": "FunctionDef name:build arg:self arg:y_pred arguments arg arg Call Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "draw",
    "source_code": "def draw(self):\n    if self._is_drawing:\n        return\n    with cbook._setattr_cm(self, _is_drawing=True):\n        super().draw()\n    self.update()",
    "docstring": "Render the figure and update the macosx canvas.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_macosx.py",
    "ast_data": "FunctionDef name:draw arg:self arguments arg If Return return:no With Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_gridlines",
    "source_code": "def get_gridlines(self, which='major', axis='both'):\n    _api.check_in_list(['both', 'major', 'minor'], which=which)\n    _api.check_in_list(['both', 'x', 'y'], axis=axis)\n    gridlines = []\n    if axis in ('both', 'x'):\n        locs = []\n        y1, y2 = self.axes.get_ylim()\n        if which in ('both', 'major'):\n            locs.extend(self.axes.xaxis.major.locator())\n        if which in ('both', 'minor'):\n            locs.extend(self.axes.xaxis.minor.locator())\n        gridlines.extend(([[x, x], [y1, y2]] for x in locs))\n    if axis in ('both', 'y'):\n        x1, x2 = self.axes.get_xlim()\n        locs = []\n        if self.axes.yaxis._major_tick_kw['gridOn']:\n            locs.extend(self.axes.yaxis.major.locator())\n        if self.axes.yaxis._minor_tick_kw['gridOn']:\n            locs.extend(self.axes.yaxis.minor.locator())\n        gridlines.extend(([[x1, x2], [y, y]] for y in locs))\n    return gridlines",
    "docstring": "Return list of gridline coordinates in data coordinates. Parameters ---------- which : {\"both\", \"major\", \"minor\"} axis : {\"both\", \"x\", \"y\"}",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axislines.py",
    "ast_data": "FunctionDef name:get_gridlines arg:self arg:which arg:axis arguments arg arg arg Call Call Assign If Compare Assign Assign Call If Compare Call Call If Compare Call Call Call If Compare Assign Call Assign If Call Call If Call Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "score_samples",
    "source_code": "@available_if(_final_estimator_has('score_samples'))\ndef score_samples(self, X):\n    with _raise_or_warn_if_not_fitted(self):\n        Xt = X\n        for _, _, transformer in self._iter(with_final=False):\n            Xt = transformer.transform(Xt)\n        return self.steps[-1][1].score_samples(Xt)",
    "docstring": "Transform the data, and apply with the final estimator. Call of each transformer in the pipeline. The transformed data are finally passed to the final estimator that calls method. Only valid if the final estimator implements . Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. Returns ------- y_score : ndarray of shape (n_samples,) Result of calling on the final estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\pipeline.py",
    "ast_data": "FunctionDef name:score_samples arg:self arg:X arguments arg arg With Call Assign For Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "pandas",
    "name": "append",
    "source_code": "def append(self, other: Index | Sequence[Index]) -> Index:\n    to_concat = [self]\n    if isinstance(other, (list, tuple)):\n        to_concat += list(other)\n    else:\n        to_concat.append(other)\n    for obj in to_concat:\n        if not isinstance(obj, Index):\n            raise TypeError('all inputs must be Index')\n    names = {obj.name for obj in to_concat}\n    name = None if len(names) > 1 else self.name\n    return self._concat(to_concat, name)",
    "docstring": "Append a collection of Index options together. Parameters ---------- other : Index or list/tuple of indices Single Index or a collection of indices, which can be either a list or a tuple. Returns ------- Index Returns a new Index object resulting from appending the provided other indices to the original Index. See Also -------- Index.insert : Make new Index inserting new item at location. Examples -------- >>> idx = pd.Index([1, 2, 3]) >>> idx.append(pd.Index([4])) Index([1, 2, 3, 4], dtype='int64')",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:append arg:self arg:other arguments arg arg Assign If Call Call Call For If Call Raise Call Assign Assign Compare Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "post_download",
    "source_code": "def post_download(project, filename, name=None, description=''):\n    if name is None:\n        name = os.path.basename(filename)\n    with open(filename, 'rb') as f:\n        filedata = f.read()\n    url = f'https://api.github.com/repos/{project}/downloads'\n    payload = json.dumps(dict(name=name, size=len(filedata), description=description))\n    response = requests.post(url, data=payload, headers=make_auth_header())\n    response.raise_for_status()\n    reply = json.loads(response.content)\n    s3_url = reply['s3_url']\n    fields = dict(key=reply['path'], acl=reply['acl'], success_action_status=201, Filename=reply['name'], AWSAccessKeyId=reply['accesskeyid'], Policy=reply['policy'], Signature=reply['signature'], file=(reply['name'], filedata))\n    fields['Content-Type'] = reply['mime_type']\n    data, content_type = encode_multipart_formdata(fields)\n    s3r = requests.post(s3_url, data=data, headers={'Content-Type': content_type})\n    return s3r",
    "docstring": "Upload a file to the GitHub downloads area",
    "type": "function",
    "file_path": "matplotlib\\tools\\gh_api.py",
    "ast_data": "FunctionDef name:post_download arg:project arg:filename arg:name arg:description arguments arg arg arg arg If Compare Assign Call With Call Assign Call Assign Assign Call Call Call Assign Call Call Call Assign Call Assign Assign Call Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "stop_filter",
    "source_code": "def stop_filter(self, post_processing):\n    orig_img = np.asarray(self.buffer_rgba())\n    slice_y, slice_x = cbook._get_nonzero_slices(orig_img[..., 3])\n    cropped_img = orig_img[slice_y, slice_x]\n    self._renderer = self._filter_renderers.pop()\n    self._update_methods()\n    if cropped_img.size:\n        img, ox, oy = post_processing(cropped_img / 255, self.dpi)\n        gc = self.new_gc()\n        if img.dtype.kind == 'f':\n            img = np.asarray(img * 255.0, np.uint8)\n        self._renderer.draw_image(gc, slice_x.start + ox, int(self.height) - slice_y.stop + oy, img[::-1])",
    "docstring": "Save the current canvas as an image and apply post processing. The *post_processing* function:: def post_processing(image, dpi): # ny, nx, depth = image.shape # image (numpy array) has RGBA channels and has a depth of 4. ... # create a new_image (numpy array of 4 channels, size can be # different). The resulting image may have offsets from # lower-left corner of the original image return new_image, offset_x, offset_y The saved renderer is restored and the returned image from post_processing is plotted (using draw_image) on it.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_agg.py",
    "ast_data": "FunctionDef name:stop_filter arg:self arg:post_processing arguments arg arg Assign Call Call Assign Call Assign Assign Call Call If Assign Call Assign Call If Compare Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "GroupFusion",
    "source_code": "class GroupFusion(GroupBatchFusionBase):\n    pass",
    "docstring": "Fuse ops in a group way, e.g, fuse mm/addmm of arbitrary input shapes with fbgemm.gmm.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\group_batch_fusion.py",
    "ast_data": "ClassDef name:GroupFusion"
  },
  {
    "library": "scipy",
    "name": "__call__",
    "source_code": "def __call__(self, *args, **query_options):\n    xi = _ndim_coords_from_arrays(args, ndim=self.points.shape[1])\n    xi = self._check_call_shape(xi)\n    xi = self._scale_x(xi)\n    xi_flat = xi.reshape(-1, xi.shape[-1])\n    original_shape = xi.shape\n    flattened_shape = xi_flat.shape\n    dist, i = self.tree.query(xi_flat, **query_options)\n    valid_mask = np.isfinite(dist)\n    if self.values.ndim > 1:\n        interp_shape = flattened_shape[:-1] + self.values.shape[1:]\n    else:\n        interp_shape = flattened_shape[:-1]\n    if np.issubdtype(self.values.dtype, np.complexfloating):\n        interp_values = np.full(interp_shape, np.nan, dtype=self.values.dtype)\n    else:\n        interp_values = np.full(interp_shape, np.nan)\n    interp_values[valid_mask] = self.values[i[valid_mask], ...]\n    if self.values.ndim > 1:\n        new_shape = original_shape[:-1] + self.values.shape[1:]\n    else:\n        new_shape = original_shape[:-1]\n    interp_values = interp_values.reshape(new_shape)\n    return interp_values",
    "docstring": "Evaluate interpolator at given points. Parameters ---------- x1, x2, ... xn : array-like of float Points where to interpolate data at. x1, x2, ... xn can be array-like of float with broadcastable shape. or x1 can be array-like of float with shape `scipy.spatial.cKDTree.query` for an overview of the different options. .. versionadded:: 1.12.0",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_ndgriddata.py",
    "ast_data": "FunctionDef name:__call__ arg:self arguments arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Assign Assign Call Assign Call If Compare Assign Assign If Call Assign Call Assign Call Assign If Compare Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "to_query_string",
    "source_code": "def to_query_string(self):\n    return '&'.join((f'{name}={value}' for name, value in self.attlist()))",
    "docstring": "Generate query string from node attributes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\sphinxext\\roles.py",
    "ast_data": "FunctionDef name:to_query_string arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "disconnect_events",
    "source_code": "def disconnect_events(self):\n    for c in self._cids:\n        self.canvas.mpl_disconnect(c)",
    "docstring": "Disconnect all events created by this widget.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:disconnect_events arg:self arguments arg For Call"
  },
  {
    "library": "cherrypy",
    "name": "release_lock",
    "source_code": "def release_lock(self):\n    self.locks[self.id].release()\n    self.locked = False",
    "docstring": "Release the lock on the currently-loaded session data.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\sessions.py",
    "ast_data": "FunctionDef name:release_lock arg:self arguments arg Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "angle",
    "source_code": "@tf_export('math.angle', v1=['math.angle', 'angle'])\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints('angle')\ndef angle(input, name=None):\n    with ops.name_scope(name, 'Angle', [input]) as name:\n        input = ops.convert_to_tensor(input, name='input')\n        if input.dtype.is_complex:\n            return gen_math_ops.angle(input, Tout=input.dtype.real_dtype, name=name)\n        else:\n            return array_ops.where(input < 0, np.pi * array_ops.ones_like(input), array_ops.zeros_like(input))",
    "docstring": "Returns the element-wise argument of a complex (or real) tensor. Given a tensor , this operation returns a tensor of type that is the argument of each element in considered as a complex number. The elements in are considered to be complex numbers of the form \\\\(a + bj\\\\), where *a* is the real part and *b* is the imaginary part. If is real then *b* is zero by definition. The argument returned by this function is of the form \\\\(atan2(b, a)\\\\). If is real, a tensor of all zeros is returned. For example: Args: input: A . Must be one of the following types: , , , . name: A name for the operation (optional). Returns: A of type or .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:angle arg:input arg:name arguments arg arg With Call Assign Call If Return return:yes Call Return return:yes Call Compare Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "sympy_subs",
    "source_code": "def sympy_subs(expr: sympy.Expr, replacements: dict[sympy.Expr, Any]) -> sympy.Expr:\n\n    def to_symbol(replaced: sympy.Expr, replacement: Union[sympy.Expr, str]) -> sympy.Symbol:\n        assert isinstance(replaced, sympy.Expr)\n        if isinstance(replacement, str):\n            return sympy.Symbol(replacement, integer=replaced.is_integer, nonnegative=replaced.is_nonnegative)\n        else:\n            return replacement\n    return sympy.sympify(expr).xreplace({k: to_symbol(k, v) for k, v in replacements.items()})",
    "docstring": "When the passed replacement symbol v is a string, it is converted to a symbol with name v that have the same replaced expression integer and nonnegative properties.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\utils.py",
    "ast_data": "FunctionDef name:sympy_subs arg:expr arg:replacements arguments arg arg FunctionDef name:to_symbol arg:replaced arg:replacement arguments arg arg Call If Call Return return:yes Call Return return:yes Return return:yes Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "drop_duplicates",
    "source_code": "def drop_duplicates(self, subset: Hashable | Iterable[Hashable] | None=None, *, keep: DropKeep='first', inplace: bool=False, ignore_index: bool=False) -> DataFrame | None:\n    if self.empty:\n        return self.copy(deep=False)\n    inplace = validate_bool_kwarg(inplace, 'inplace')\n    ignore_index = validate_bool_kwarg(ignore_index, 'ignore_index')\n    result = self[-self.duplicated(subset, keep=keep)]\n    if ignore_index:\n        result.index = default_index(len(result))\n    if inplace:\n        self._update_inplace(result)\n        return None\n    else:\n        return result",
    "docstring": "Return DataFrame with duplicate rows removed. Considering certain columns is optional. Indexes, including time indexes are ignored. Parameters ---------- subset : column label or iterable of labels, optional Only consider certain columns for identifying duplicates, by default use all of the columns. keep : {'first', 'last', ``. >>> df.drop_duplicates(subset=[\"brand\", \"style\"], keep=\"last\") brand style rating 1 Yum Yum cup 4.0 2 Indomie cup 3.5 4 Indomie pack 5.0",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:drop_duplicates arg:self arg:subset arguments arg arg arg arg arg If Return return:yes Call Assign Call Assign Call Assign Call If Assign Call Call If Call Return return:no Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "extract_summary",
    "source_code": "def extract_summary(doc: list[str], document: Any) -> str:\n\n    def parse(doc: list[str], settings: Any) -> nodes.document:\n        state_machine = RSTStateMachine(state_classes, 'Body')\n        node = new_document('', settings)\n        node.reporter = NullReporter()\n        state_machine.run(doc, node)\n        return node\n    while doc and (not doc[0].strip()):\n        doc.pop(0)\n    for i, piece in enumerate(doc):\n        if not piece.strip():\n            doc = doc[:i]\n            break\n    if doc == []:\n        return ''\n    node = parse(doc, document.settings)\n    if isinstance(node[0], nodes.section):\n        summary = node[0].astext().strip()\n    elif not isinstance(node[0], nodes.paragraph):\n        summary = doc[0].strip()\n    else:\n        sentences = periods_re.split(' '.join(doc))\n        if len(sentences) == 1:\n            summary = sentences[0].strip()\n        else:\n            summary = ''\n            for i in range(len(sentences)):\n                summary = '. '.join(sentences[:i + 1]).rstrip('.') + '.'\n                node[:] = []\n                node = parse(doc, document.settings)\n                if summary.endswith(WELL_KNOWN_ABBREVIATIONS):\n                    pass\n                elif not any(node.findall(nodes.system_message)):\n                    break\n    summary = literal_re.sub('.', summary)\n    return summary",
    "docstring": "Extract summary from docstring.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\ext\\autosummary\\__init__.py",
    "ast_data": "FunctionDef name:extract_summary arg:doc arg:document arguments arg arg FunctionDef name:parse arg:doc arg:settings arguments arg arg Assign Call Assign Call Assign Call Call Return return:yes While BoolOp Call Call For Call If Call Assign If Compare Return return:yes Assign Call If Call Assign Call Call If Call Assign Call Assign Call Call If Compare Call Assign Call Assign For Call Call Assign Call Call Assign Assign Call If Call If Call Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "istype",
    "source_code": "def istype(obj, allowed_types):\n    if isinstance(allowed_types, (tuple, list, set)):\n        return type(obj) in allowed_types\n    return type(obj) is allowed_types",
    "docstring": "isinstance() without subclasses",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\utils.py",
    "ast_data": "FunctionDef name:istype arg:obj arg:allowed_types arguments arg arg If Call Return return:yes Compare Call Return return:yes Compare Call"
  },
  {
    "library": "pytorch",
    "name": "_block_diag_iterable",
    "source_code": "@register_decomposition(aten.block_diag)\n@out_wrapper()\ndef _block_diag_iterable(tensors: list[TensorLikeType]) -> TensorLikeType:\n    tensors_2d = [tensor.view(1, -1) if tensor.dim() <= 1 else tensor for tensor in tensors]\n    ncols = builtins.sum((tensor.shape[1] for tensor in tensors_2d))\n    device = tensors_2d[0].device\n    result = []\n    col_start = 0\n    for i, tensor in enumerate(tensors_2d):\n        torch._check(tensor.dim() == 2, lambda: f'Input tensors must have 2 or fewer dimensions. Input {i} has {tensor.dim()} dimensions')\n        torch._check(tensor.device == device, lambda: f'Input tensors must all be on the same device. Input 0 is on device {device} and input {i} is on device {tensor.device}.')\n        row, col = tensor.shape\n        left = torch.zeros((row, col_start), device=device, dtype=tensor.dtype)\n        right = torch.zeros((row, ncols - col_start - col), device=device, dtype=tensor.dtype)\n        result += [torch.cat((left, tensor, right), dim=1)]\n        col_start += col\n    return torch.cat(result, dim=0)",
    "docstring": "Reference implementation of torch.block_diag",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\__init__.py",
    "ast_data": "FunctionDef name:_block_diag_iterable arg:tensors arguments arg Assign Compare Call Call Assign Call Assign Assign Assign For Call Call Compare Call arguments Call Call Compare arguments Assign Assign Call Assign Call Call Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "prepare_lookup_value",
    "source_code": "def prepare_lookup_value(key, value, separator=','):\n    if isinstance(value, list):\n        return [prepare_lookup_value(key, v, separator=separator) for v in value]\n    if key.endswith('__in'):\n        value = value.split(separator)\n    elif key.endswith('__isnull'):\n        value = value.lower() not in ('', 'false', '0')\n    return value",
    "docstring": "Return a lookup value prepared to be used in queryset filtering.",
    "type": "function",
    "file_path": "django\\django\\contrib\\admin\\utils.py",
    "ast_data": "FunctionDef name:prepare_lookup_value arg:key arg:value arg:separator arguments arg arg arg If Call Return return:yes Call If Call Assign Call If Call Assign Compare Call Return return:yes"
  },
  {
    "library": "pygame",
    "name": "stop",
    "source_code": "def stop(self):\n    pass",
    "docstring": "Not implemented.",
    "type": "method",
    "file_path": "pygame\\src_py\\_camera_vidcapture.py",
    "ast_data": "FunctionDef name:stop arg:self arguments arg"
  },
  {
    "library": "sphinx",
    "name": "desc_sig_literal_string",
    "source_code": "class desc_sig_literal_string(desc_sig_element, _sig_element=True):\n    classes = ['s']",
    "docstring": "Node for a string literal in a signature.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\addnodes.py",
    "ast_data": "ClassDef name:desc_sig_literal_string Assign"
  },
  {
    "library": "matplotlib",
    "name": "invert_xaxis",
    "source_code": "def invert_xaxis(self):\n    raise TypeError('Changing axes limits of a geographic projection is not supported.  Please consider using Cartopy.')",
    "docstring": "Not supported. Please consider using Cartopy.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\geo.py",
    "ast_data": "FunctionDef name:invert_xaxis arg:self arguments arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "generate_kernel_call",
    "source_code": "def generate_kernel_call(self, kernel_name: str, call_args, *, device=None, triton=True, arg_types=None, raw_keys=None, raw_args=None, triton_meta=None, original_fxnode_name=None):\n    self.args_to_buffers.update({arg: V.graph.try_get_buffer(arg) for arg in call_args if isinstance(arg, str)})\n    device = device or V.graph.get_current_device_or_throw()\n    self.writeline(KernelCallLine(self, kernel_name=kernel_name, call_args=call_args, raw_keys=raw_keys, raw_args=raw_args, arg_types=arg_types, triton=triton, triton_meta=triton_meta, device=device, graph_name=V.graph.name, original_fxnode_name=original_fxnode_name))",
    "docstring": "Generates kernel call code. triton: Defines whether the backend uses Triton for codegen. Otherwise it uses the CUDA language when gpu=True, and C++ when gpu=False.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\wrapper.py",
    "ast_data": "FunctionDef name:generate_kernel_call arg:self arg:kernel_name arg:call_args arguments arg arg arg arg arg arg arg arg arg arg Call Call Call Assign BoolOp Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "load_checkpoint",
    "source_code": "@tf_export('train.load_checkpoint')\ndef load_checkpoint(ckpt_dir_or_file):\n    filename = _get_checkpoint_filename(ckpt_dir_or_file)\n    if filename is None:\n        raise ValueError(\"Couldn't find 'checkpoint' file or checkpoints in given directory %s\" % ckpt_dir_or_file)\n    return py_checkpoint_reader.NewCheckpointReader(filename)",
    "docstring": "Returns for checkpoint found in . If resolves to a directory with multiple checkpoints, reader for the latest checkpoint is returned. Example usage: Args: ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint file. Returns: object. Raises: ValueError: If resolves to a directory with no checkpoints.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\checkpoint_utils.py",
    "ast_data": "FunctionDef name:load_checkpoint arg:ckpt_dir_or_file arguments arg Assign Call If Compare Raise Call Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "formatargvalues",
    "source_code": "def formatargvalues(args, varargs, varkw, locals, formatarg=str, formatvarargs=lambda name: '*' + name, formatvarkw=lambda name: '**' + name, formatvalue=lambda value: '=' + repr(value), join=joinseq):\n\n    def convert(name, locals=locals, formatarg=formatarg, formatvalue=formatvalue):\n        return formatarg(name) + formatvalue(locals[name])\n    specs = [strseq(arg, convert, join) for arg in args]\n    if varargs:\n        specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))\n    if varkw:\n        specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))\n    return '(' + ', '.join(specs) + ')'",
    "docstring": "Format an argument spec from the 4 values returned by getargvalues. The first four arguments are (args, varargs, varkw, locals). The next four arguments are the corresponding optional formatting functions that are called to turn names and values into strings. The ninth argument is an optional function to format the sequence of arguments.",
    "type": "function",
    "file_path": "numpy\\numpy\\_utils\\_inspect.py",
    "ast_data": "FunctionDef name:formatargvalues arg:args arg:varargs arg:varkw arg:locals arg:formatarg arg:formatvarargs arg:formatvarkw arg:formatvalue arg:join arguments arg arg arg arg arg arg arg arg arg arguments arg arguments arg arguments arg Call FunctionDef name:convert arg:name arg:locals arg:formatarg arg:formatvalue arguments arg arg arg arg Return return:yes Call Call Assign Call If Call Call Call If Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "fuzzy_list_to_dict",
    "source_code": "def fuzzy_list_to_dict(items: list[tuple[str, str]]) -> dict[str, list[str]]:\n    rc: dict[str, list[str]] = defaultdict(list)\n    for key, val in items:\n        rc[key].append(val)\n    return dict(rc)",
    "docstring": "Converts list to dict preserving elements with duplicate keys",
    "type": "function",
    "file_path": "pytorch\\.github\\scripts\\gitutils.py",
    "ast_data": "FunctionDef name:fuzzy_list_to_dict arg:items arguments arg Call For Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_display_as_base",
    "source_code": "def _display_as_base(cls):\n    assert issubclass(cls, Exception)\n    cls.__name__ = cls.__base__.__name__\n    return cls",
    "docstring": "A decorator that makes an exception class look like its base. We use this to hide subclasses that are implementation details - the user should catch the base type, which is what the traceback will show them. Classes decorated with this decorator are subject to removal without a deprecation warning.",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\_exceptions.py",
    "ast_data": "FunctionDef name:_display_as_base arg:cls arguments arg Call Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "mean_poisson_deviance",
    "source_code": "@validate_params({'y_true': ['array-like'], 'y_pred': ['array-like'], 'sample_weight': ['array-like', None]}, prefer_skip_nested_validation=True)\ndef mean_poisson_deviance(y_true, y_pred, *, sample_weight=None):\n    return mean_tweedie_deviance(y_true, y_pred, sample_weight=sample_weight, power=1)",
    "docstring": "Mean Poisson deviance regression loss. Poisson deviance is equivalent to the Tweedie deviance with the power parameter . Read more in the :ref:. Parameters ---------- y_true : array-like of shape (n_samples,) Ground truth (correct) target values. Requires y_true >= 0. y_pred : array-like of shape (n_samples,) Estimated target values. Requires y_pred > 0. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- loss : float A non-negative floating point value (the best value is 0.0). Examples -------- >>> from sklearn.metrics import mean_poisson_deviance >>> y_true = [2, 0, 1, 4] >>> y_pred = [0.5, 0.5, 2., 2.] >>> mean_poisson_deviance(y_true, y_pred) 1.4260...",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\metrics\\_regression.py",
    "ast_data": "FunctionDef name:mean_poisson_deviance arg:y_true arg:y_pred arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "fit_generator",
    "source_code": "def fit_generator(self, generator, steps_per_epoch=None, epochs=1, verbose=1, callbacks=None, validation_data=None, validation_steps=None, validation_freq=1, class_weight=None, max_queue_size=10, workers=1, use_multiprocessing=False, shuffle=True, initial_epoch=0):\n    warnings.warn('`model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators.')\n    return self.fit(generator, steps_per_epoch=steps_per_epoch, epochs=epochs, verbose=verbose, callbacks=callbacks, validation_data=validation_data, validation_steps=validation_steps, validation_freq=validation_freq, class_weight=class_weight, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, shuffle=shuffle, initial_epoch=initial_epoch)",
    "docstring": "Fits the model on data yielded batch-by-batch by a Python generator. DEPRECATED: now supports generators, so there is no longer any need to use this endpoint.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py",
    "ast_data": "FunctionDef name:fit_generator arg:self arg:generator arg:steps_per_epoch arg:epochs arg:verbose arg:callbacks arg:validation_data arg:validation_steps arg:validation_freq arg:class_weight arg:max_queue_size arg:workers arg:use_multiprocessing arg:shuffle arg:initial_epoch arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "calc_extra_info",
    "source_code": "def calc_extra_info(self):\n    info = {}\n    for key in ['extra_compile_args', 'extra_link_args']:\n        opt = self.cp.get(self.section, key)\n        opt = _shell_utils.NativeParser.split(opt)\n        if opt:\n            tmp = {key: opt}\n            dict_append(info, **tmp)\n    return info",
    "docstring": "Updates the information in the current information with respect to these flags: extra_compile_args extra_link_args",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\system_info.py",
    "ast_data": "FunctionDef name:calc_extra_info arg:self arguments arg Assign For Assign Call Assign Call If Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "fill_value",
    "source_code": "@property\ndef fill_value(self):\n    return self.dtype.fill_value",
    "docstring": "Elements in that are are not stored. For memory savings, this should be the most common value in the array. See Also -------- SparseDtype : Dtype for data stored in :class:. Series.value_counts : Return a Series containing counts of unique values. Series.fillna : Fill NA/NaN in a Series with a specified value. Examples -------- >>> ser = pd.Series([0, 0, 2, 2, 2], dtype=\"Sparse[int]\") >>> ser.sparse.fill_value 0 >>> spa_dtype = pd.SparseDtype(dtype=np.int32, fill_value=2) >>> ser = pd.Series([0, 0, 2, 2, 2], dtype=spa_dtype) >>> ser.sparse.fill_value 2",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\sparse\\array.py",
    "ast_data": "FunctionDef name:fill_value arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "compute_capability_from_device_desc",
    "source_code": "def compute_capability_from_device_desc(device_attrs):\n    match = _PHYSICAL_DEVICE_DESCRIPTION_REGEX.search(device_attrs.physical_device_desc)\n    if not match:\n        return GpuInfo(None, None)\n    cc = (int(match.group(2)), int(match.group(3))) if match.group(2) else None\n    return GpuInfo(match.group(1), cc)",
    "docstring": "Returns the GpuInfo given a DeviceAttributes proto. Args: device_attrs: A DeviceAttributes proto. Returns A gpu_info tuple. Both fields are None if does not have a valid physical_device_desc field.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\gpu_util.py",
    "ast_data": "FunctionDef name:compute_capability_from_device_desc arg:device_attrs arguments arg Assign Call If Return return:yes Call Assign Call Call Call Call Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "isspmatrix_dok",
    "source_code": "def isspmatrix_dok(x):\n    return isinstance(x, dok_matrix)",
    "docstring": "Is of dok_array type? Parameters ---------- x object to check for being a dok matrix Returns ------- bool True if is a dok matrix, False otherwise Examples -------- >>> from scipy.sparse import dok_array, dok_matrix, coo_matrix, isspmatrix_dok >>> isspmatrix_dok(dok_matrix([[5]])) True >>> isspmatrix_dok(dok_array([[5]])) False >>> isspmatrix_dok(coo_matrix([[5]])) False",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\_dok.py",
    "ast_data": "FunctionDef name:isspmatrix_dok arg:x arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "DummyPass",
    "source_code": "class DummyPass(CustomGraphPass):\n\n    def __call__(self, graph: torch.fx.graph.Graph) -> None:\n        return None\n\n    def uuid(self) -> Optional[Any]:\n        return None",
    "docstring": "A Dummy pass to be used by ConfigFuzzer",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\fuzzer.py",
    "ast_data": "ClassDef name:DummyPass FunctionDef name:__call__ arg:self arg:graph arguments arg arg Return return:no FunctionDef name:uuid arg:self arguments arg Return return:no"
  },
  {
    "library": "scikit-learn",
    "name": "score",
    "source_code": "@available_if(_estimator_has('score'))\ndef score(self, X, y, **score_params):\n    check_is_fitted(self)\n    if _routing_enabled():\n        routed_params = process_routing(self, 'score', **score_params)\n    else:\n        routed_params = Bunch(estimator=Bunch(score=score_params))\n    return self.estimator_.score(self.transform(X), y, **routed_params.estimator.score)",
    "docstring": "Reduce X to the selected features and return the score of the estimator. Parameters ---------- X : array of shape [n_samples, n_features] The input samples. y : array of shape [n_samples] The target values. **score_params : dict - If (default): Parameters directly passed to the `enable_metadata_routing=TruescoreMetadata Routing User Guide rfe.transform(X)y`.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_selection\\_rfe.py",
    "ast_data": "FunctionDef name:score arg:self arg:X arg:y arguments arg arg arg arg Call If Call Assign Call Assign Call Call Return return:yes Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "from_spline",
    "source_code": "@classmethod\ndef from_spline(cls, tck, extrapolate=None):\n    if isinstance(tck, BSpline):\n        t, c, k = tck.tck\n        if extrapolate is None:\n            extrapolate = tck.extrapolate\n    else:\n        t, c, k = tck\n    cvals = np.empty((k + 1, len(t) - 1), dtype=c.dtype)\n    for m in range(k, -1, -1):\n        y = _fitpack_py.splev(t[:-1], tck, der=m)\n        cvals[k - m, :] = y / spec.gamma(m + 1)\n    return cls.construct_fast(cvals, t, extrapolate)",
    "docstring": "Construct a piecewise polynomial from a spline Parameters ---------- tck A spline, as returned by or a BSpline object. extrapolate : bool or 'periodic', optional If bool, determines whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. If 'periodic', periodic extrapolation is used. Default is True. Examples -------- Construct an interpolating spline and convert it to a instance >>> import numpy as np >>> from scipy.interpolate import splrep, PPoly >>> x = np.linspace(0, 1, 11) >>> y = np.sin(2*np.pi*x) >>> tck = splrep(x, y, s=0) >>> p = PPoly.from_spline(tck) >>> isinstance(p, PPoly) True Note that this function only supports 1D splines out of the box. If the `splprepBSplinepolystckPPolysplev` ... out, atol=1e-15) True",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_interpolate.py",
    "ast_data": "FunctionDef name:from_spline arg:cls arg:tck arg:extrapolate arguments arg arg arg If Call Assign If Compare Assign Assign Assign Call Call For Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "add_output",
    "source_code": "def add_output(self, *args, **kwargs):\n    return self._outputs.add(*args, **kwargs)",
    "docstring": "Add a wrapped output argument to the hint. Args: *args: The output tensor. **kwargs: \"name\" label \"tag\" a tag to group multiple arguments that will be aggregated. I.e. a string like 'cool_input'. Basically multiple inputs can be added to the same hint for parallel operations that will eventually be combined. An example would be static_rnn which creates multiple copies of state or inputs. \"aggregate\" aggregation strategy that is valid only for tag non None. Acceptable values are OpHint.AGGREGATE_FIRST, OpHint.AGGREGATE_LAST, and OpHint.AGGREGATE_STACK. \"index_override\" The global index to use. This corresponds to the argument order in the final stub that will be generated. Returns: The wrapped output tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\op_hint.py",
    "ast_data": "FunctionDef name:add_output arg:self arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "list_dependencies",
    "source_code": "def list_dependencies(self, obj):\n    if obj not in self._children_cache:\n        children = {}\n    else:\n        children = self._children_cache[obj]\n    for name, dep in obj._deserialization_dependencies(children).items():\n        if not isinstance(dep, base.Trackable):\n            raise TypeError(f\"The dependency of type {type(dep)} is not an instance `Trackable`, and can't be saved to SavedModel. Please check the implementation of `_deserialization_dependencies` in the parent object {obj}.\")\n        yield (name, dep)",
    "docstring": "Yields that must be loaded before . Dependencies and children are both dictionaries of . Children define the object graph structure (used in both checkpoints and SavedModel), while dependency defines the order used to load the SavedModel Args: obj: A object Yields: Tuple of dependency names and trackable objects. Raises: TypeError: if any of the returned dependencies are not instances of .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\save.py",
    "ast_data": "FunctionDef name:list_dependencies arg:self arg:obj arguments arg arg If Compare Assign Assign For Call Call If Call Raise Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "LogitLink",
    "source_code": "class LogitLink(BaseLink):\n    interval_y_pred = Interval(0, 1, False, False)\n\n    def link(self, y_pred, out=None):\n        return logit(y_pred, out=out)\n\n    def inverse(self, raw_prediction, out=None):\n        return expit(raw_prediction, out=out)",
    "docstring": "The logit link function g(x)=logit(x).",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\_loss\\link.py",
    "ast_data": "ClassDef name:LogitLink Assign Call FunctionDef name:link arg:self arg:y_pred arg:out arguments arg arg arg Return return:yes Call FunctionDef name:inverse arg:self arg:raw_prediction arg:out arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "set_initial_value",
    "source_code": "def set_initial_value(self, y, t=0.0):\n    if isscalar(y):\n        y = [y]\n    n_prev = len(self._y)\n    if not n_prev:\n        self.set_integrator('')\n    self._y = asarray(y, self._integrator.scalar)\n    self.t = t\n    self._integrator.reset(len(self._y), self.jac is not None)\n    return self",
    "docstring": "Set initial conditions y(t) = y.",
    "type": "method",
    "file_path": "scipy\\scipy\\integrate\\_ode.py",
    "ast_data": "FunctionDef name:set_initial_value arg:self arg:y arg:t arguments arg arg arg If Call Assign Assign Call If Call Assign Call Assign Call Call Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_process_body",
    "source_code": "def _process_body(self, pfor_input: '_PforInput', inputs_stacked, new_indices, cond_stacked, new_inputs, not_all_done):\n\n    def true_fn(control_inputs, body_pfor, body_output, stacked):\n        converted_control_inp = []\n        for x in control_inputs:\n            for t in x.outputs:\n                converted_control_inp.append(body_pfor._convert_helper(t).t)\n        if stacked:\n            output = body_pfor.convert(body_output)\n        else:\n            output, convert_stacked, _ = body_pfor._convert_helper(body_output)\n            assert convert_stacked == stacked, body_output\n        with ops.control_dependencies(converted_control_inp):\n            return array_ops.identity(output)\n    body_pfor = self._init_pfor(pfor_input.pfor, new_indices, cond_stacked, new_inputs, inputs_stacked)\n    new_outputs = []\n    for i, (body_output, stacked) in enumerate(zip(self._body_outputs, inputs_stacked)):\n        control_inp = self._next_iter_control_inputs[i]\n        out_dtype = body_output.dtype\n        new_output = tf_cond.cond(not_all_done, lambda: true_fn(control_inp, body_pfor, body_output, stacked), lambda: constant_op.constant([], dtype=out_dtype))\n        new_outputs.append(new_output)\n    return new_outputs",
    "docstring": "Convert the body function.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "FunctionDef name:_process_body arg:self arg:pfor_input arg:inputs_stacked arg:new_indices arg:cond_stacked arg:new_inputs arg:not_all_done arguments arg arg arg arg arg arg arg FunctionDef name:true_fn arg:control_inputs arg:body_pfor arg:body_output arg:stacked arguments arg arg arg arg Assign For For Call Call If Assign Call Assign Call Compare With Call Return return:yes Call Assign Call Assign For Call Call Assign Assign Assign Call arguments Call arguments Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "feature_detect",
    "source_code": "def feature_detect(self, names):\n    names = self.feature_get_til(names, 'implies_detect')\n    detect = []\n    for n in names:\n        d = self.feature_supported[n]\n        detect += d.get('detect', d.get('group', [n]))\n    return detect",
    "docstring": "Return a list of CPU features that required to be detected sorted from the lowest to highest interest.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py",
    "ast_data": "FunctionDef name:feature_detect arg:self arg:names arguments arg arg Assign Call Assign For Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "dumps",
    "source_code": "def dumps(self, obj):\n    return ('[' + ','.join(obj) + ']').encode('latin-1')",
    "docstring": "The parameter is an already serialized list of Message objects. No need to serialize it again, only join the list together and encode it.",
    "type": "method",
    "file_path": "django\\django\\contrib\\messages\\storage\\cookie.py",
    "ast_data": "FunctionDef name:dumps arg:self arg:obj arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "previous_workday",
    "source_code": "def previous_workday(dt: datetime) -> datetime:\n    dt -= timedelta(days=1)\n    while dt.weekday() > 4:\n        dt -= timedelta(days=1)\n    return dt",
    "docstring": "returns previous workday used for observances",
    "type": "function",
    "file_path": "pandas\\pandas\\tseries\\holiday.py",
    "ast_data": "FunctionDef name:previous_workday arg:dt arguments arg Call While Compare Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "dot",
    "source_code": "def dot(inputs, axes, normalize=False, **kwargs):\n    return Dot(axes=axes, normalize=normalize, **kwargs)(inputs)",
    "docstring": "Functional interface to the layer. Args: inputs: A list of input tensors (at least 2). axes: Integer or tuple of integers, axis or axes along which to take the dot product. normalize: Whether to L2-normalize samples along the dot product axis before taking the dot product. If set to True, then the output of the dot product is the cosine proximity between the two samples. **kwargs: Standard layer keyword arguments. Returns: A tensor, the dot product of the samples from the inputs.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\merge.py",
    "ast_data": "FunctionDef name:dot arg:inputs arg:axes arg:normalize arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "dynamic_partition",
    "source_code": "@dispatch.dispatch_for_api(data_flow_ops.dynamic_partition)\ndef dynamic_partition(data: ragged_tensor.RaggedOrDense, partitions: ragged_tensor.RaggedOrDense, num_partitions, name=None):\n    if not isinstance(num_partitions, int) or num_partitions < 0:\n        raise TypeError('num_partitions must be a non-negative integer')\n    result = stack_dynamic_partitions(data, partitions, num_partitions, name)\n    return [result[i] for i in range(num_partitions)]",
    "docstring": "RaggedTensor dispatch override for tf.dynamic_partition.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_array_ops.py",
    "ast_data": "FunctionDef name:dynamic_partition arg:data arg:partitions arg:num_partitions arg:name arguments arg arg arg arg If BoolOp Call Compare Raise Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "cherrypy",
    "name": "release_lock",
    "source_code": "def release_lock(self, path=None):\n    self.lock.release()\n    self.locked = False",
    "docstring": "Release the lock on the currently-loaded session data.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\sessions.py",
    "ast_data": "FunctionDef name:release_lock arg:self arg:path arguments arg arg Call Assign"
  },
  {
    "library": "numpy",
    "name": "rel_path",
    "source_code": "def rel_path(path, parent_path):\n    pd = os.path.realpath(os.path.abspath(parent_path))\n    apath = os.path.realpath(os.path.abspath(path))\n    if len(apath) < len(pd):\n        return path\n    if apath == pd:\n        return ''\n    if pd == apath[:len(pd)]:\n        assert apath[len(pd)] in [os.sep], repr((path, apath[len(pd)]))\n        path = apath[len(pd) + 1:]\n    return path",
    "docstring": "Return path relative to parent_path.",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\misc_util.py",
    "ast_data": "FunctionDef name:rel_path arg:path arg:parent_path arguments arg arg Assign Call Call Assign Call Call If Compare Call Call Return return:yes If Compare Return return:yes If Compare Call Compare Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_process_def",
    "source_code": "def _process_def(self, node: Union[ast.ClassDef, ast.FunctionDef]) -> None:\n    for decorator in node.decorator_list:\n        if self._is_export_call(decorator):\n            self._add_exported_symbol(cast(ast.Call, decorator), node.name)\n        else:\n            self.visit(decorator)\n    if isinstance(node, ast.ClassDef):\n        for base in node.bases:\n            self.visit(base)\n        for kw in node.keywords:\n            self.visit(kw)\n    elif isinstance(node, ast.FunctionDef):\n        self.visit(node.args)\n        if node.returns:\n            self.visit(node.returns)\n    for stmt in node.body:\n        self.visit(stmt)",
    "docstring": "Process top-level [Class|Function]Def for potential symbol export.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\api\\generator2\\extractor\\extractor.py",
    "ast_data": "FunctionDef name:_process_def arg:self arg:node arguments arg arg For If Call Call Call Call If Call For Call For Call If Call Call If Call For Call"
  },
  {
    "library": "cherrypy",
    "name": "items",
    "source_code": "def items(self):\n    if not self.loaded:\n        self.load()\n    return self._data.items()",
    "docstring": "Return an iterable of items as tuples. D.items() -> list of D's (key, value) pairs, as 2-tuples.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\sessions.py",
    "ast_data": "FunctionDef name:items arg:self arguments arg If Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "url",
    "source_code": "def url(self, name):\n    raise NotImplementedError('subclasses of Storage must provide a url() method')",
    "docstring": "Return an absolute URL where the file's contents can be accessed directly by a web browser.",
    "type": "method",
    "file_path": "django\\django\\core\\files\\storage\\base.py",
    "ast_data": "FunctionDef name:url arg:self arg:name arguments arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_trace_mode",
    "source_code": "def _get_trace_mode(self):\n    found, trace_mode = self.get_flag_value(FLAG_NAME_TRACE_MODE)\n    if not found or not trace_mode:\n        trace_mode = TRACE_MODE_NORM\n    valid_trace_modes = [TRACE_MODE_NAN_INF, TRACE_MODE_PART_TENSOR, TRACE_MODE_FULL_TENSOR, TRACE_MODE_NORM, TRACE_MODE_MAX_ABS, TRACE_MODE_SUMMARY, TRACE_MODE_FULL_TENSOR_SUMMARY, TRACE_MODE_HISTORY]\n    if trace_mode not in valid_trace_modes:\n        raise ValueError('Invalid trace mode \"%s\" given to the Tensor_Tracer.Valid trace modes are: %s' % (trace_mode, valid_trace_modes))\n    return trace_mode",
    "docstring": "Checks if the given trace mode is valid.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer_flags.py",
    "ast_data": "FunctionDef name:_get_trace_mode arg:self arguments arg Assign Call If BoolOp Assign Assign If Compare Raise Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "OdrError",
    "source_code": "class OdrError(Exception):\n    pass",
    "docstring": "Exception indicating an error in fitting. This is raised by if an error occurs during fitting.",
    "type": "class",
    "file_path": "scipy\\scipy\\odr\\_odrpack.py",
    "ast_data": "ClassDef name:OdrError"
  },
  {
    "library": "tensorflow",
    "name": "manager",
    "source_code": "def manager():\n    _check_initialization()\n    global _manager\n    with _manager_lock:\n        if _manager is None:\n            _manager = multiprocessing.Manager()\n        return _manager",
    "docstring": "Returns the multiprocessing manager object for concurrency tools. The manager object is useful as it controls a server process that holds the python objects that can be shared across processes. This can be used for parent-subprocess communication: Note that the user of multi_process_runner should not create additional objects; doing so can result in segfault in some cases. This method should only be called after multi_process_runner.test_main() is called.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_process_runner.py",
    "ast_data": "FunctionDef name:manager arguments Call With If Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "revive_custom_object",
    "source_code": "def revive_custom_object(identifier, metadata):\n    if ops.executing_eagerly_outside_functions():\n        model_class = training_lib.Model\n    else:\n        model_class = training_lib_v1.Model\n    revived_classes = {constants.INPUT_LAYER_IDENTIFIER: (RevivedInputLayer, input_layer.InputLayer), constants.LAYER_IDENTIFIER: (RevivedLayer, base_layer.Layer), constants.MODEL_IDENTIFIER: (RevivedNetwork, model_class), constants.NETWORK_IDENTIFIER: (RevivedNetwork, functional_lib.Functional), constants.SEQUENTIAL_IDENTIFIER: (RevivedNetwork, models_lib.Sequential)}\n    parent_classes = revived_classes.get(identifier, None)\n    if parent_classes is not None:\n        parent_classes = revived_classes[identifier]\n        revived_cls = type(compat.as_str(metadata['class_name']), parent_classes, {})\n        return revived_cls._init_from_metadata(metadata)\n    else:\n        raise ValueError('Unable to restore custom object of type {} currently. Please make sure that the layer implements `get_config`and `from_config` when saving. In addition, please use the `custom_objects` arg when calling `load_model()`.'.format(identifier))",
    "docstring": "Revives object from SavedModel.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\load.py",
    "ast_data": "FunctionDef name:revive_custom_object arg:identifier arg:metadata arguments arg arg If Call Assign Assign Assign Assign Call If Compare Assign Assign Call Call Return return:yes Call Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "arg_is_blockwise",
    "source_code": "def arg_is_blockwise(block_dimensions, arg, arg_split_dim):\n    if isinstance(arg, (tuple, list)) and len(arg) == len(block_dimensions):\n        if not any((nest.is_nested(x) for x in arg)):\n            return True\n        else:\n            arg_dims = [tensor_conversion.convert_to_tensor_v2_with_dispatch(x).shape[arg_split_dim] for x in arg]\n            self_dims = [dim.value for dim in block_dimensions]\n            if all((self_d is None for self_d in self_dims)):\n                if len(arg_dims) == 1:\n                    return False\n                elif any((dim != arg_dims[0] for dim in arg_dims)):\n                    return True\n                else:\n                    raise ValueError('Parsing of the input structure is ambiguous. Please input a blockwise iterable of `Tensor`s or a single `Tensor`.')\n            if all((self_d == arg_d or self_d is None for self_d, arg_d in zip(self_dims, arg_dims))):\n                return True\n            self_dim = sum((self_d for self_d in self_dims if self_d is not None))\n            if all((s == arg_dims[0] for s in arg_dims)) and arg_dims[0] >= self_dim:\n                return False\n            raise ValueError('Input dimension does not match operator dimension.')\n    else:\n        return False",
    "docstring": "Detect if input should be interpreted as a list of blocks.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_util.py",
    "ast_data": "FunctionDef name:arg_is_blockwise arg:block_dimensions arg:arg arg:arg_split_dim arguments arg arg arg If BoolOp Call Compare Call Call If Call Call Return return:yes Assign Call Assign If Call Compare If Compare Call Return return:yes If Call Compare Return return:yes Raise Call If Call BoolOp Compare Compare Call Return return:yes Assign Call Compare If BoolOp Call Compare Compare Return return:yes Raise Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_wrap_result",
    "source_code": "def _wrap_result(data, columns, index_col=None, coerce_float: bool=True, parse_dates=None, dtype: DtypeArg | None=None, dtype_backend: DtypeBackend | Literal['numpy']='numpy') -> DataFrame:\n    frame = _convert_arrays_to_dataframe(data, columns, coerce_float, dtype_backend)\n    if dtype:\n        frame = frame.astype(dtype)\n    frame = _parse_date_columns(frame, parse_dates)\n    if index_col is not None:\n        frame = frame.set_index(index_col)\n    return frame",
    "docstring": "Wrap result set of a SQLAlchemy query in a DataFrame.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\sql.py",
    "ast_data": "FunctionDef name:_wrap_result arg:data arg:columns arg:index_col arg:coerce_float arg:parse_dates arg:dtype arg:dtype_backend arguments arg arg arg arg arg arg arg Assign Call If Assign Call Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_extend",
    "source_code": "def _extend(M, sym):\n    if not sym:\n        return (M + 1, True)\n    else:\n        return (M, False)",
    "docstring": "Extend window by 1 sample if needed for DFT-even symmetry",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\windows\\_windows.py",
    "ast_data": "FunctionDef name:_extend arg:M arg:sym arguments arg arg If Return return:yes Return return:yes"
  },
  {
    "library": "pandas",
    "name": "diff",
    "source_code": "@final\ndef diff(self, periods: int=1) -> Index:\n    return Index(self.to_series().diff(periods))",
    "docstring": "Computes the difference between consecutive values in the Index object. If periods is greater than 1, computes the difference between values that are number of positions apart. Parameters ---------- periods : int, optional The number of positions between the current and previous value to compute the difference with. Default is 1. Returns ------- Index A new Index object with the computed differences. Examples -------- >>> import pandas as pd >>> idx = pd.Index([10, 20, 30, 40, 50]) >>> idx.diff() Index([nan, 10.0, 10.0, 10.0, 10.0], dtype='float64')",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:diff arg:self arg:periods arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "batchable_from_tensor_list",
    "source_code": "def batchable_from_tensor_list(spec, tensor_list):\n    if isinstance(spec, internal.TensorSpec):\n        assert len(tensor_list) == 1\n        return tensor_list[0]\n    elif hasattr(spec, '__batch_encoder__'):\n        encoded_specs = spec.__batch_encoder__.encoding_specs(spec)\n        flat_specs = nest.map_structure(get_batchable_flat_tensor_specs, encoded_specs)\n        encoded_flats = nest.pack_sequence_as(flat_specs, tensor_list)\n        encoded_value = nest.map_structure_up_to(encoded_specs, batchable_from_tensor_list, encoded_specs, encoded_flats)\n        return spec.__batch_encoder__.decode(spec, encoded_value)\n    else:\n        return spec._from_compatible_tensor_list(tensor_list)",
    "docstring": "Returns a value with type decoded from .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py",
    "ast_data": "FunctionDef name:batchable_from_tensor_list arg:spec arg:tensor_list arguments arg arg If Call Compare Call Return return:yes If Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "is_compatible_with",
    "source_code": "def is_compatible_with(self, other):\n    other = as_dimension(other)\n    return self._value is None or other.value is None or self._value == other.value",
    "docstring": "Returns true if is compatible with this Dimension. Two known Dimensions are compatible if they have the same value. An unknown Dimension is compatible with all other Dimensions. Args: other: Another Dimension. Returns: True if this Dimension and are compatible.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:is_compatible_with arg:self arg:other arguments arg arg Assign Call Return return:yes BoolOp Compare Compare Compare"
  },
  {
    "library": "tensorflow",
    "name": "_flatten_modules",
    "source_code": "def _flatten_modules(self, recursive=True, include_self=True):\n    if include_self:\n        yield self\n    trackables = getattr(self, '_self_tracked_trackables', None)\n    if trackables:\n        seen_object_ids = set()\n        deque = collections.deque(trackables)\n        while deque:\n            trackable_obj = deque.popleft()\n            trackable_id = id(trackable_obj)\n            if trackable_id in seen_object_ids:\n                continue\n            seen_object_ids.add(trackable_id)\n            if isinstance(trackable_obj, module.Module) and (not isinstance(trackable_obj, metrics_mod.Metric)):\n                yield trackable_obj\n                if recursive:\n                    subtrackables = getattr(trackable_obj, '_self_tracked_trackables', None)\n                    if subtrackables:\n                        deque.extendleft(reversed(subtrackables))\n            elif isinstance(trackable_obj, data_structures.TrackableDataStructure):\n                tracked_values = trackable_obj._values\n                if tracked_values:\n                    deque.extendleft(reversed(tracked_values))",
    "docstring": "Flattens instances (excluding ). Args: recursive: Whether to recursively flatten through submodules. include_self: Whether to include this instance. Yields: instance tracked by this .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:_flatten_modules arg:self arg:recursive arg:include_self arguments arg arg arg If Assign Call If Assign Call Assign Call While Assign Call Assign Call If Compare Call If BoolOp Call Call If Assign Call If Call Call If Call Assign If Call Call"
  },
  {
    "library": "django",
    "name": "compiler",
    "source_code": "def compiler(self, compiler_name):\n    if self._cache is None:\n        self._cache = import_module(self.compiler_module)\n    return getattr(self._cache, compiler_name)",
    "docstring": "Return the SQLCompiler class corresponding to the given name, in the namespace corresponding to the attribute on this backend.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:compiler arg:self arg:compiler_name arguments arg arg If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_estimate_weighted_log_prob",
    "source_code": "def _estimate_weighted_log_prob(self, X):\n    return self._estimate_log_prob(X) + self._estimate_log_weights()",
    "docstring": "Estimate the weighted log-probabilities, log P(X | Z) + log weights. Parameters ---------- X : array-like of shape (n_samples, n_features) Returns ------- weighted_log_prob : array, shape (n_samples, n_component)",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\mixture\\_base.py",
    "ast_data": "FunctionDef name:_estimate_weighted_log_prob arg:self arg:X arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "CategoricalConversionWarning",
    "source_code": "class CategoricalConversionWarning(Warning):\n    pass",
    "docstring": "Warning is raised when reading a partial labeled Stata file using a iterator. This warning helps ensure data integrity and alerts users to potential issues during the incremental reading of Stata files with labeled data, allowing for additional checks and adjustments as necessary. See Also -------- read_stata : Read a Stata file into a DataFrame. Categorical : Represents a categorical variable in pandas. Examples -------- >>> from pandas.io.stata import StataReader >>> with StataReader(\"dta_file\", chunksize=2) as reader: # doctest: +SKIP ... for i, block in enumerate(reader): ... print(i, block) ... # CategoricalConversionWarning: One or more series with value labels...",
    "type": "class",
    "file_path": "pandas\\pandas\\errors\\__init__.py",
    "ast_data": "ClassDef name:CategoricalConversionWarning"
  },
  {
    "library": "scikit-learn",
    "name": "_check_initialized",
    "source_code": "def _check_initialized(self):\n    check_is_fitted(self)",
    "docstring": "Check that the estimator is initialized, raising an error if not.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_gb.py",
    "ast_data": "FunctionDef name:_check_initialized arg:self arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "add_module_init_func",
    "source_code": "def add_module_init_func(name: str, init_func: Callable[[], None]) -> None:\n    assert '.' not in name, f'Expected a root module name, but got {name}'\n    assert name not in _lazy_module_init\n    _lazy_module_init[name].append(init_func)",
    "docstring": "Register a module without eagerly importing it",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\trace_rules.py",
    "ast_data": "FunctionDef name:add_module_init_func arg:name arg:init_func arguments arg arg Compare Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "_format_data_list_with_options",
    "source_code": "def _format_data_list_with_options(self, data_list):\n    if self._options and self._options.experimental_replication_mode == InputReplicationMode.PER_REPLICA and (not self._options.experimental_fetch_to_device):\n        return [data_list]\n    else:\n        return data_list",
    "docstring": "Change the data in to a list type if required. The OwnedMultiDeviceIterator returns the list data type, while the PER_REPLICA iterator (when used with prefetch disabled) returns without the enclosed list. This is to fix the inconsistency. Args: data_list: data_list Returns: list",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py",
    "ast_data": "FunctionDef name:_format_data_list_with_options arg:self arg:data_list arguments arg arg If BoolOp Compare Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "_get_media",
    "source_code": "def _get_media(self):\n    media = Media()\n    for w in self.widgets:\n        media += w.media\n    return media",
    "docstring": "Media for a multiwidget is the combination of all media of the subwidgets.",
    "type": "method",
    "file_path": "django\\django\\forms\\widgets.py",
    "ast_data": "FunctionDef name:_get_media arg:self arguments arg Assign Call For Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "is_available",
    "source_code": "def is_available(self) -> bool:\n    raise NotImplementedError",
    "docstring": "Return the image converter is available or not.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\transforms\\post_transforms\\images.py",
    "ast_data": "FunctionDef name:is_available arg:self arguments arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "l2_normalize",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef l2_normalize(x, axis=None):\n    return nn.l2_normalize(x, axis=axis)",
    "docstring": "Normalizes a tensor wrt the L2 norm alongside the specified axis. Args: x: Tensor or variable. axis: axis along which to perform normalization. Returns: A tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:l2_normalize arg:x arg:axis arguments arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "load",
    "source_code": "def load(self):\n    raise NotImplementedError('subclasses of SessionBase must provide a load() method')",
    "docstring": "Load the session data and return a dictionary.",
    "type": "method",
    "file_path": "django\\django\\contrib\\sessions\\backends\\base.py",
    "ast_data": "FunctionDef name:load arg:self arguments arg Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "apply",
    "source_code": "def apply(self, X, check_input=True):\n    check_is_fitted(self)\n    X = self._validate_X_predict(X, check_input)\n    return self.tree_.apply(X)",
    "docstring": "Return the index of the leaf that each sample is predicted as. .. versionadded:: 0.17 Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``, possibly with gaps in the numbering.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\tree\\_classes.py",
    "ast_data": "FunctionDef name:apply arg:self arg:X arg:check_input arguments arg arg arg Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "handle_sym_dispatch",
    "source_code": "def handle_sym_dispatch(func: Callable[_P, R], args: _P.args, kwargs: _P.kwargs) -> R:\n    mode = get_proxy_mode()\n    assert mode\n    with disable_proxy_modes_tracing():\n        types: list[type] = []\n        return mode.__sym_dispatch__(func, types, args, kwargs)",
    "docstring": "Call into the currently active proxy tracing mode to do a SymInt/SymFloat/SymBool dispatch trace on a function that operates on these arguments.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\proxy_tensor.py",
    "ast_data": "FunctionDef name:handle_sym_dispatch arg:func arg:args arg:kwargs arguments arg arg arg Assign Call With Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_verify_stack_trace",
    "source_code": "def _verify_stack_trace(graph_module: torch.fx.GraphModule) -> None:\n    for mod in [graph_module, *graph_module.modules()]:\n        if not isinstance(mod, torch.fx.GraphModule):\n            continue\n        for node in graph_module.graph.nodes:\n            stack_trace = node.meta.get('stack_trace', None)\n            if node.op in ['call_function', 'get_attr']:\n                if not (stack_trace is None or isinstance(stack_trace, str)):\n                    raise SpecViolationError(f'Node {node} of type {node.op} has invalid stack_trace metadata, expected a string or None but instead found: {stack_trace}')\n            elif node.op in ['placeholder', 'output']:\n                if stack_trace:\n                    raise SpecViolationError(f'Node {node} of type {node.op} contains stack_trace metadata, expected None but instead found: {stack_trace}')",
    "docstring": "Perform stack trace checks on the graph. Constraints: - None or non-empty str for 'call_function', 'get_attr' - None for 'placeholder', 'output'",
    "type": "function",
    "file_path": "pytorch\\torch\\export\\_trace.py",
    "ast_data": "FunctionDef name:_verify_stack_trace arg:graph_module arguments arg For Call If Call For Assign Call If Compare If BoolOp Compare Call Raise Call If Compare If Raise Call"
  },
  {
    "library": "pytorch",
    "name": "max_pool1d_with_indices",
    "source_code": "def max_pool1d_with_indices(input: Tensor, kernel_size: BroadcastingList1[int], stride: Optional[BroadcastingList1[int]]=None, padding: BroadcastingList1[int]=0, dilation: BroadcastingList1[int]=1, ceil_mode: bool=False, return_indices: bool=False) -> tuple[Tensor, Tensor]:\n    if has_torch_function_unary(input):\n        return handle_torch_function(max_pool1d_with_indices, (input,), input, kernel_size, stride=stride, padding=padding, dilation=dilation, ceil_mode=ceil_mode, return_indices=return_indices)\n    if stride is None:\n        stride = torch.jit.annotate(list[int], [])\n    return torch.max_pool1d_with_indices(input, kernel_size, stride, padding, dilation, ceil_mode)",
    "docstring": "max_pool1d(input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False, return_indices=False) Applies a 1D max pooling over an input signal composed of several input planes. .. note:: The order of :attr: and :attr: is different from what seen in :class:, and will change in a future release. See :class: for details. Args: input: input tensor of shape :math:, minibatch dim optional. kernel_size: the size of the window. Can be a single number or a tuple stride: the stride of the window. Can be a single number or a tuple . Default: :attr: padding: Implicit negative infinity padding to be added on both sides, must be >= 0 and 0. ceil_mode: If `ceilfloortorch.nn.functional.max_unpool1d` later",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:max_pool1d_with_indices arg:input arg:kernel_size arg:stride arg:padding arg:dilation arg:ceil_mode arg:return_indices arguments arg arg arg arg arg arg arg If Call Return return:yes Call If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "pop_stream",
    "source_code": "def pop_stream(self, stream_id: int) -> Stream:\n    stream = self.streams.pop(stream_id)\n    self.metadata['active_streams'] -= 1\n    self._send_pending_requests()\n    return stream",
    "docstring": "Perform cleanup when a stream is closed",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\http2\\protocol.py",
    "ast_data": "FunctionDef name:pop_stream arg:self arg:stream_id arguments arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "get_binary_kernel2d",
    "source_code": "def get_binary_kernel2d(window_size: tuple[int, int] | int, *, device: Optional[Device]=None, dtype: Dtype=torch.float32) -> Tensor:\n    ky, kx = _unpack_2d_ks(window_size)\n    window_range = kx * ky\n    kernel = zeros((window_range, window_range), device=device, dtype=dtype)\n    idx = torch.arange(window_range, device=device)\n    kernel[idx, idx] += 1.0\n    return kernel.view(window_range, 1, ky, kx)",
    "docstring": "Create a binary kernel to extract the patches. If the window size is HxW will create a (H*W)x1xHxW kernel.",
    "type": "function",
    "file_path": "kornia\\kornia\\filters\\kernels.py",
    "ast_data": "FunctionDef name:get_binary_kernel2d arg:window_size arguments arg arg arg Assign Call Assign Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "strip_escape_sequences",
    "source_code": "def strip_escape_sequences(text: str, /) -> str:\n    return _ANSI_CODES.sub('', text)",
    "docstring": "Remove the ANSI CSI colors and \"erase in line\" sequences. Other _ (e.g., VT100-specific functions) are not supported. Only control sequences *natively* known to Sphinx (i.e., colour sequences used in Sphinx and \"erase entire line\" (``)) are stripped by this function. .. warning:: This function only for use within Sphinx.. __",
    "type": "function",
    "file_path": "sphinx\\sphinx\\_cli\\util\\errors.py",
    "ast_data": "FunctionDef name:strip_escape_sequences arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_facecolor",
    "source_code": "def set_facecolor(self, c):\n    if isinstance(c, str) and c.lower() in ('none', 'face'):\n        c = c.lower()\n    self._original_facecolor = c\n    self._set_facecolor(c)",
    "docstring": "Set the facecolor(s) of the collection. *c* can be a color (all patches have same color), or a sequence of colors; if it is a sequence the patches will cycle through the sequence. If *c* is 'none', the patch will not be filled. Parameters ---------- c : :mpltype: or list of :mpltype:",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:set_facecolor arg:self arg:c arguments arg arg If BoolOp Call Compare Call Assign Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "_with_args",
    "source_code": "def _with_args(cls_or_self, **kwargs):\n    r = _PartialWrapper(partial(cls_or_self, **kwargs))\n    return r",
    "docstring": "Wrapper that allows creation of class factories. This can be useful when there is a need to create classes with the same constructor arguments, but different instances. Can be used in conjunction with _callable_args Example:: >>> # xdoctest: +SKIP(\"Undefined vars\") >>> Foo.with_args = classmethod(_with_args) >>> foo_builder = Foo.with_args(a=3, b=4).with_args(answer=42) >>> foo_instance1 = foo_builder() >>> foo_instance2 = foo_builder() >>> id(foo_instance1) == id(foo_instance2) False",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\observer.py",
    "ast_data": "FunctionDef name:_with_args arg:cls_or_self arguments arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "refactor_levels",
    "source_code": "def refactor_levels(level: Level | list[Level] | None, obj: Index) -> list[int]:\n    if level is None:\n        levels_: list[int] = list(range(obj.nlevels))\n    elif isinstance(level, int):\n        levels_ = [level]\n    elif isinstance(level, str):\n        levels_ = [obj._get_level_number(level)]\n    elif isinstance(level, list):\n        levels_ = [obj._get_level_number(lev) if not isinstance(lev, int) else lev for lev in level]\n    else:\n        raise ValueError('`level` must be of type `int`, `str` or list of such')\n    return levels_",
    "docstring": "Returns a consistent levels arg for use in `` Returns ------- list : refactored arg with a list of levels to hide",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\formats\\style_render.py",
    "ast_data": "FunctionDef name:refactor_levels arg:level arg:obj arguments arg arg If Compare Call Call If Call Assign If Call Assign Call If Call Assign Call Call Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "deserialize",
    "source_code": "@classmethod\ndef deserialize(cls, json_str: str) -> 'GemmOperation':\n    json_dict = json.loads(json_str)\n    return cls._json_to_gemm_operation(json_dict)",
    "docstring": "Deserialize JSON string to a GEMM operation. Args: json_str: JSON string of a GEMM operation Returns: GemmOperation: Reconstructed operation",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\serialization.py",
    "ast_data": "FunctionDef name:deserialize arg:cls arg:json_str arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, offset=(2, -2), shadow_color='k', alpha=0.3, rho=0.3, **kwargs):\n    super().__init__(offset)\n    if shadow_color is None:\n        self._shadow_color = shadow_color\n    else:\n        self._shadow_color = mcolors.to_rgba(shadow_color)\n    self._alpha = alpha\n    self._rho = rho\n    self._gc = kwargs",
    "docstring": "Parameters ---------- offset : (float, float), default: (2, -2) The (x, y) offset to apply to the path, in points. shadow_color : :mpltype:, default: 'black' The shadow color. A value of `!AbstractPathEffect._update_gc`.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patheffects.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:offset arg:shadow_color arg:alpha arg:rho arguments arg arg arg arg arg arg Call Call If Compare Assign Assign Call Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "fill_ordered",
    "source_code": "@staticmethod\ndef fill_ordered(sizes, order):\n    assert OrderedSet(range(len(sizes))) == OrderedSet(order), (sizes, order)\n    next_stride = sympy.S.One\n    strides = [None] * len(order)\n    for i in order:\n        strides[i] = next_stride\n        next_stride = next_stride * sizes[i]\n    return strides",
    "docstring": "Create a stride based on the order the dimensions should be filled in. In this format, channels last would be: [1, 3, 2, 0]",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ir.py",
    "ast_data": "FunctionDef name:fill_ordered arg:sizes arg:order arguments arg arg Compare Call Call Call Call Assign Assign Call For Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "drop_unused_symbols",
    "source_code": "@staticmethod\ndef drop_unused_symbols(index: Union[int, sympy.Expr], var_names: list[sympy.Expr], sizes: list[sympy.Expr]) -> None:\n    if not isinstance(index, sympy.Expr):\n        return\n    free_symbols = index.free_symbols\n    while var_names and var_names[-1] not in free_symbols:\n        var_names.pop()\n        sizes.pop()",
    "docstring": "Reduction has last (reduced) dim in its sizes, but downstream users won't. Normalize this away.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\dependencies.py",
    "ast_data": "FunctionDef name:drop_unused_symbols arg:index arg:var_names arg:sizes arguments arg arg arg If Call Return return:no Assign While BoolOp Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "validate_checkpoint_id",
    "source_code": "@classmethod\n@abc.abstractmethod\ndef validate_checkpoint_id(cls, checkpoint_id: Union[str, os.PathLike]) -> bool:\n    ...",
    "docstring": "Check if the given checkpoint_id is supported by the stroage. This allow us to enable automatic storage selection.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\storage.py",
    "ast_data": "FunctionDef name:validate_checkpoint_id arg:cls arg:checkpoint_id arguments arg arg"
  },
  {
    "library": "scrapy",
    "name": "NotSupported",
    "source_code": "class NotSupported(Exception):\n    pass",
    "docstring": "Indicates a feature or method is not supported",
    "type": "class",
    "file_path": "scrapy\\scrapy\\exceptions.py",
    "ast_data": "ClassDef name:NotSupported"
  },
  {
    "library": "matplotlib",
    "name": "get_xydata",
    "source_code": "def get_xydata(self):\n    if self._invalidy or self._invalidx:\n        self.recache()\n    return self._xy",
    "docstring": "Return the *xy* data as a (N, 2) array.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:get_xydata arg:self arguments arg If BoolOp Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "variables",
    "source_code": "@property\ndef variables(self):\n    if not self._variables_created:\n        return []\n    return self._template_store.variables()",
    "docstring": "Returns the list of variables created by the Template.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\template.py",
    "ast_data": "FunctionDef name:variables arg:self arguments arg If Return return:no Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "variable_created_in_scope",
    "source_code": "def variable_created_in_scope(self, v):\n    return v._distribute_strategy == self._container_strategy_weakref()",
    "docstring": "Tests whether was created while this strategy scope was active. Variables created inside the strategy scope are \"owned\" by it: >>> strategy = tf.distribute.MirroredStrategy() >>> with strategy.scope(): ... v = tf.Variable(1.) >>> strategy.extended.variable_created_in_scope(v) True Variables created outside the strategy are not owned by it: >>> strategy = tf.distribute.MirroredStrategy() >>> v = tf.Variable(1.) >>> strategy.extended.variable_created_in_scope(v) False Args: v: A instance. Returns: True if was created inside the scope, False if not.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:variable_created_in_scope arg:self arg:v arguments arg arg Return return:yes Compare Call"
  },
  {
    "library": "scikit-learn",
    "name": "_compute_log_det_cholesky",
    "source_code": "def _compute_log_det_cholesky(matrix_chol, covariance_type, n_features):\n    if covariance_type == 'full':\n        n_components, _, _ = matrix_chol.shape\n        log_det_chol = np.sum(np.log(matrix_chol.reshape(n_components, -1)[:, ::n_features + 1]), axis=1)\n    elif covariance_type == 'tied':\n        log_det_chol = np.sum(np.log(np.diag(matrix_chol)))\n    elif covariance_type == 'diag':\n        log_det_chol = np.sum(np.log(matrix_chol), axis=1)\n    else:\n        log_det_chol = n_features * np.log(matrix_chol)\n    return log_det_chol",
    "docstring": "Compute the log-det of the cholesky decomposition of matrices. Parameters ---------- matrix_chol : array-like Cholesky decompositions of the matrices. 'full' : shape of (n_components, n_features, n_features) 'tied' : shape of (n_features, n_features) 'diag' : shape of (n_components, n_features) 'spherical' : shape of (n_components,) covariance_type : {'full', 'tied', 'diag', 'spherical'} n_features : int Number of features. Returns ------- log_det_precision_chol : array-like of shape (n_components,) The determinant of the precision matrix for each component.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\mixture\\_gaussian_mixture.py",
    "ast_data": "FunctionDef name:_compute_log_det_cholesky arg:matrix_chol arg:covariance_type arg:n_features arguments arg arg arg If Compare Assign Assign Call Call Call If Compare Assign Call Call Call If Compare Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_hatch",
    "source_code": "def get_hatch(self):\n    return self._hatch",
    "docstring": "Return the hatching pattern.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_hatch arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_short_float_fmt",
    "source_code": "def _short_float_fmt(x):\n    return f'{x:f}'.rstrip('0').rstrip('.')",
    "docstring": "Create a short string representation of a float, which is %f formatting with trailing zeros and the decimal point removed.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_svg.py",
    "ast_data": "FunctionDef name:_short_float_fmt arg:x arguments arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "make_known_categories_bitsets",
    "source_code": "def make_known_categories_bitsets(self):\n    categorical_features_indices = np.flatnonzero(self.is_categorical_)\n    n_features = self.is_categorical_.size\n    n_categorical_features = categorical_features_indices.size\n    f_idx_map = np.zeros(n_features, dtype=np.uint32)\n    f_idx_map[categorical_features_indices] = np.arange(n_categorical_features, dtype=np.uint32)\n    known_categories = self.bin_thresholds_\n    known_cat_bitsets = np.zeros((n_categorical_features, 8), dtype=X_BITSET_INNER_DTYPE)\n    for mapped_f_idx, f_idx in enumerate(categorical_features_indices):\n        for raw_cat_val in known_categories[f_idx]:\n            set_bitset_memoryview(known_cat_bitsets[mapped_f_idx], raw_cat_val)\n    return (known_cat_bitsets, f_idx_map)",
    "docstring": "Create bitsets of known categories. Returns ------- - known_cat_bitsets : ndarray of shape (n_categorical_features, 8) Array of bitsets of known categories, for each categorical feature. - f_idx_map : ndarray of shape (n_features,) Map from original feature index to the corresponding index in the known_cat_bitsets array.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\binning.py",
    "ast_data": "FunctionDef name:make_known_categories_bitsets arg:self arguments arg Assign Call Assign Assign Assign Call Assign Call Assign Assign Call For Call For Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "add_inputs",
    "source_code": "def add_inputs(self, *args, **kwargs):\n    if 'names' in kwargs:\n        return [self._inputs.add(arg, name=name) for arg, name in zip(args, kwargs['names'])]\n    else:\n        return [self._inputs.add(arg) for arg in args]",
    "docstring": "Add a sequence of inputs to the function invocation. Args: *args: List of inputs to be converted (should be Tf.Tensor). **kwargs: This allows 'names' which should be a list of names. Returns: Wrapped inputs (identity standins that have additional metadata). These are also are also tf.Tensor's.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\op_hint.py",
    "ast_data": "FunctionDef name:add_inputs arg:self arguments arg arg arg If Compare Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_wait_for_variable_initialization",
    "source_code": "def _wait_for_variable_initialization(session):\n    all_variables = backend._get_variables(backend.get_graph())\n    candidate_vars = []\n    for v in all_variables:\n        if not getattr(v, '_keras_initialized', False):\n            candidate_vars.append(v)\n    if not candidate_vars:\n        return\n    while True:\n        is_initialized = session.run([variable_v1.is_variable_initialized(v) for v in candidate_vars])\n        uninitialized_vars = []\n        for flag, v in zip(is_initialized, candidate_vars):\n            if not flag:\n                uninitialized_vars.append(v)\n            v._keras_initialized = True\n        if not uninitialized_vars:\n            break",
    "docstring": "Utility to wait for variables to be initialized.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_training_utils_v1.py",
    "ast_data": "FunctionDef name:_wait_for_variable_initialization arg:session arguments arg Assign Call Call Assign For If Call Call If Return return:no While Assign Call Call Assign For Call If Call Assign If"
  },
  {
    "library": "tensorflow",
    "name": "shutdown_accelerator_system",
    "source_code": "@tf_export('experimental.dtensor.shutdown_accelerator_system', 'experimental.dtensor.shutdown_tpu_system', v1=[])\ndef shutdown_accelerator_system() -> None:\n    global _INITIALIZED_ACCELERATOR_SYSTEM_TYPE\n    try:\n        context.async_wait()\n    finally:\n        if not is_initialized():\n            raise ValueError('Accelerator system is not initialized. Call tf.experimental.dtensor.initialize_accelerator_system first.')\n        device_type = _INITIALIZED_ACCELERATOR_SYSTEM_TYPE\n        if not config.is_local_mode():\n            raise ValueError('Shutting down accelerator system under multi-client mode is not supported.')\n        if device_type == 'TPU':\n            tpu_util.shutdown_tpu_system()\n        context._reset_context()\n        context.context()._clear_caches()\n        _INITIALIZED_ACCELERATOR_SYSTEM_TYPE = None",
    "docstring": "Shuts down the accelerator system.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\accelerator_util.py",
    "ast_data": "FunctionDef name:shutdown_accelerator_system arguments Try Call If Call Raise Call Assign If Call Raise Call If Compare Call Call Call Call Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "in_axes",
    "source_code": "def in_axes(self, mouseevent):\n    return self.patch.contains(mouseevent)[0]",
    "docstring": "Return whether the given event (in display coords) is in the Axes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:in_axes arg:self arg:mouseevent arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "set_requires_gradient_sync",
    "source_code": "def set_requires_gradient_sync(self, requires_gradient_sync: bool, *, recurse: bool=True) -> None:\n    self_module = cast(nn.Module, self)\n    modules = list(self_module.modules()) if recurse else [self_module]\n    for module in modules:\n        if isinstance(module, FSDPModule):\n            state = module._get_fsdp_state()\n            if (fsdp_param_group := state._fsdp_param_group):\n                fsdp_param_group.reduce_grads = requires_gradient_sync\n                fsdp_param_group.all_reduce_grads = requires_gradient_sync",
    "docstring": "Sets if the module should sync gradients. This can be used to implement gradient accumulation *without communication*. For HSDP, this controls both reduce-scatter and all-reduce together. This is the equivalence of in FSDP1. Args: requires_gradient_sync (bool): Whether to reduce gradients for the module's parameters. recurse (bool): Whether to set for all FSDP submodules or just the passed-in module.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fully_shard.py",
    "ast_data": "FunctionDef name:set_requires_gradient_sync arg:self arg:requires_gradient_sync arguments arg arg arg Assign Call Assign Call Call For If Call Assign Call If Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "map_placements_after_broadcast",
    "source_code": "def map_placements_after_broadcast(placements: tuple[Placement, ...], shape: torch.Size, broadcast_dims_map: list[int]) -> tuple[Placement, ...]:\n    new_placements: list[Placement] = []\n    for placement in placements:\n        if isinstance(placement, (Replicate, Partial)):\n            new_placements.append(placement)\n        else:\n            assert isinstance(placement, Shard)\n            shard_dim = normalize_dim(placement.dim, len(shape))\n            new_shard_dim = broadcast_dims_map[shard_dim]\n            if new_shard_dim != -1:\n                new_placements.append(Shard(new_shard_dim))\n            else:\n                new_placements.append(Replicate())\n    return tuple(new_placements)",
    "docstring": "Map each placement based on the output shape after broadcast.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_ops\\utils.py",
    "ast_data": "FunctionDef name:map_placements_after_broadcast arg:placements arg:shape arg:broadcast_dims_map arguments arg arg arg For If Call Call Call Assign Call Call Assign If Compare Call Call Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_set_oob_score_and_attributes",
    "source_code": "def _set_oob_score_and_attributes(self, X, y, scoring_function=None):\n    self.oob_decision_function_ = super()._compute_oob_predictions(X, y)\n    if self.oob_decision_function_.shape[-1] == 1:\n        self.oob_decision_function_ = self.oob_decision_function_.squeeze(axis=-1)\n    if scoring_function is None:\n        scoring_function = accuracy_score\n    self.oob_score_ = scoring_function(y, np.argmax(self.oob_decision_function_, axis=1))",
    "docstring": "Compute and set the OOB score and attributes. Parameters ---------- X : array-like of shape (n_samples, n_features) The data matrix. y : ndarray of shape (n_samples, n_outputs) The target matrix. scoring_function : callable, default=None Scoring function for OOB score. Defaults to .",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_forest.py",
    "ast_data": "FunctionDef name:_set_oob_score_and_attributes arg:self arg:X arg:y arg:scoring_function arguments arg arg arg arg Assign Call Call If Compare Assign Call If Compare Assign Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "watch_variable",
    "source_code": "def watch_variable(self, v):\n    if isinstance(v, resource_variable_ops.ResourceVariable) and v.handle in self._resource_tensor_inputs:\n        return\n    while self is not None and isinstance(self, FuncGraph):\n        self._watched_variables.add(v)\n        self = self.outer_graph",
    "docstring": "Marks the variable v as accessed while building this graph.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\func_graph.py",
    "ast_data": "FunctionDef name:watch_variable arg:self arg:v arguments arg arg If BoolOp Call Compare Return return:no While BoolOp Compare Call Call Assign"
  },
  {
    "library": "numpy",
    "name": "last_group",
    "source_code": "def last_group(self, getter=None):\n    if self.last:\n        return True\n    return self._compare_group(self.item, self.__next__, getter)",
    "docstring": "Returns true if this item is the end of a new group, where groups mean that some attribute has changed. The getter can be None (the item itself changes), an attribute name like ``, a function, or a dict key or list index.",
    "type": "method",
    "file_path": "numpy\\numpy\\_build_utils\\tempita\\_looper.py",
    "ast_data": "FunctionDef name:last_group arg:self arg:getter arguments arg arg If Return return:yes Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "hermitenorm",
    "source_code": "def hermitenorm(n, monic=False):\n    if n < 0:\n        raise ValueError('n must be nonnegative.')\n    if n == 0:\n        n1 = n + 1\n    else:\n        n1 = n\n    x, w = roots_hermitenorm(n1)\n\n    def wfunc(x):\n        return exp(-x * x / 2.0)\n    if n == 0:\n        x, w = ([], [])\n    hn = sqrt(2 * pi) * _gam(n + 1)\n    kn = 1.0\n    p = orthopoly1d(x, w, hn, kn, wfunc=wfunc, limits=(-inf, inf), monic=monic, eval_func=lambda x: _ufuncs.eval_hermitenorm(n, x))\n    return p",
    "docstring": "Normalized (probabilist's) Hermite polynomial. Defined by .. math:: He_n(x) = (-1)^ne^{x^2/2}\\frac{d^n}{dx^n}e^{-x^2/2}; :math: is a polynomial of degree :math:. Parameters ---------- n : int Degree of the polynomial. monic : bool, optional If , scale the leading coefficient to be 1. Default is . Returns ------- He : orthopoly1d Hermite polynomial. Notes ----- The polynomials :math: are orthogonal over :math: with weight function :math:.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_orthogonal.py",
    "ast_data": "FunctionDef name:hermitenorm arg:n arg:monic arguments arg arg If Compare Raise Call If Compare Assign Assign Assign Call FunctionDef name:wfunc arg:x arguments arg Return return:yes Call If Compare Assign Assign Call Call Assign Assign Call arguments arg Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "calc_ray_t_vals",
    "source_code": "def calc_ray_t_vals(points_3d: Tensor) -> Tensor:\n    t_vals = torch.linalg.norm(points_3d - points_3d[..., 0, :].unsqueeze(-2), dim=-1)\n    return t_vals",
    "docstring": "Calculate t values along rays. Args: points_3d: Points along rays :math: Returns: t values along rays :math:",
    "type": "function",
    "file_path": "kornia\\kornia\\nerf\\samplers.py",
    "ast_data": "FunctionDef name:calc_ray_t_vals arg:points_3d arguments arg Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "__iter__",
    "source_code": "def __iter__(self):\n    capi.reset_reading(self._ptr)\n    for i in range(self.num_feat):\n        yield Feature(capi.get_next_feature(self._ptr), self)",
    "docstring": "Iterate over each Feature in the Layer.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\layer.py",
    "ast_data": "FunctionDef name:__iter__ arg:self arguments arg Call For Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "most_specific_common_supertype",
    "source_code": "def most_specific_common_supertype(self, others):\n    if not all((isinstance(other, TensorArraySpec) for other in others)):\n        return False\n    common_shape = self._element_shape.most_specific_common_supertype((other._element_shape for other in others))\n    if common_shape is None:\n        return None\n    if not all((self._dtype == other._dtype for other in others)):\n        return None\n    if not all((self._dynamic_size == other._dynamic_size for other in others)):\n        return None\n    infer_shape = self._infer_shape and all((other._infer_shape for other in others))\n    return TensorArraySpec(common_shape, self._dtype, self._dynamic_size, infer_shape)",
    "docstring": "Returns the most specific supertype of and . Args: others: A Sequence of . Returns if a supertype does not exist.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:most_specific_common_supertype arg:self arg:others arguments arg arg If Call Call Return return:yes Assign Call If Compare Return return:no If Call Compare Return return:no If Call Compare Return return:no Assign BoolOp Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_xmargin",
    "source_code": "def get_xmargin(self):\n    return self._xmargin",
    "docstring": "Retrieve autoscaling margin of the x-axis. .. versionadded:: 3.9 Returns ------- xmargin : float See Also -------- matplotlib.axes.Axes.set_xmargin",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:get_xmargin arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_math_fontfamily",
    "source_code": "def set_math_fontfamily(self, fontfamily):\n    if fontfamily is None:\n        fontfamily = mpl.rcParams['mathtext.fontset']\n    else:\n        valid_fonts = _validators['mathtext.fontset'].valid.values()\n        _api.check_in_list(valid_fonts, math_fontfamily=fontfamily)\n    self._math_fontfamily = fontfamily",
    "docstring": "Set the font family for text in math mode. If not set explicitly, :rc: will be used. Parameters ---------- fontfamily : str The name of the font family. Available font families are defined in the :ref:. See Also -------- .text.Text.get_math_fontfamily",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\font_manager.py",
    "ast_data": "FunctionDef name:set_math_fontfamily arg:self arg:fontfamily arguments arg arg If Compare Assign Assign Call Call Assign"
  },
  {
    "library": "sphinx",
    "name": "strip_arg_typehint",
    "source_code": "def strip_arg_typehint(s: str) -> str:\n    return s.split(':')[0].strip()",
    "docstring": "Strip a type hint from argument definition.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\ext\\autosummary\\__init__.py",
    "ast_data": "FunctionDef name:strip_arg_typehint arg:s arguments arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "model_fields",
    "source_code": "@cached_property\ndef model_fields(self):\n    converter = connections[self.db].introspection.identifier_converter\n    return {converter(field.column): field for field in self.model._meta.fields if field.column}",
    "docstring": "A dict mapping column names to model field names.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:model_fields arg:self arguments arg Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "impl_abstract",
    "source_code": "def impl_abstract(self, _stacklevel=2) -> typing.Callable:\n\n    def inner(f):\n        self._check_doesnt_have_library_meta_impl()\n        self._register_impl('abstract', f, stacklevel=_stacklevel)\n        location = self._get_impl('abstract').location\n        qualname = self._qualname\n\n        @functools.wraps(f)\n        def f_with_ctx(*args, **kwargs):\n\n            def error_on_ctx():\n                raise RuntimeError(f'Attempted to call get_ctx() for the meta implementation for {qualname}.You have presumably called get_ctx() because the operator has a data-dependent output shape; if so, there is no such meta implementation and this error is the correct behavior. Otherwise, please remove the call to get_ctx() in the implementation registered with impl_abstract at {location}')\n            with torch._library.fake_impl.set_ctx_getter(error_on_ctx):\n                return f(*args, **kwargs)\n        self._lib.impl(self._opname, f_with_ctx, 'Meta')\n        return f\n    return inner",
    "docstring": "This API is deprecated, please use torch.library.custom_op instead",
    "type": "method",
    "file_path": "pytorch\\torch\\_custom_op\\impl.py",
    "ast_data": "FunctionDef name:impl_abstract arg:self arg:_stacklevel arguments arg arg FunctionDef name:inner arg:f arguments arg Call Call Assign Call Assign FunctionDef name:f_with_ctx arguments arg arg FunctionDef name:error_on_ctx arguments Raise Call With Call Return return:yes Call Call Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "call_with_captures",
    "source_code": "def call_with_captures(self, args: Sequence[Any], kwargs: Dict[str, Any], captures: Sequence[Any]) -> Any:\n    bound_parameters = self.function_type.bind(*args, **kwargs)\n    tensor_inputs = self.function_type.unpack_inputs(bound_parameters)\n    capture_inputs = self.function_type.unpack_captures(captures)\n    return self.call_preflattened(tensor_inputs + capture_inputs)",
    "docstring": "Calls with args, kwargs, captures and returns structured output.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\atomic_function.py",
    "ast_data": "FunctionDef name:call_with_captures arg:self arg:args arg:kwargs arg:captures arguments arg arg arg arg Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "hessian",
    "source_code": "@exposed_in('torch.func')\ndef hessian(func, argnums=0):\n    return jacfwd(jacrev(func, argnums), argnums)",
    "docstring": "Computes the Hessian of `jacfwdjacrev`, which has better operator coverage. A basic usage with a R^N -> R^1 function gives a N x N Hessian: >>> from torch.func import hessian >>> def f(x): >>> return x.sin().sum() >>> >>> x = torch.randn(5) >>> hess = hessian(f)(x) # equivalent to jacfwd(jacrev(f))(x) >>> assert torch.allclose(hess, torch.diag(-x.sin()))",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\eager_transforms.py",
    "ast_data": "FunctionDef name:hessian arg:func arg:argnums arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "WriteDebuggedGraph",
    "source_code": "def WriteDebuggedGraph(self, debugged_graph):\n    debug_event = debug_event_pb2.DebugEvent(debugged_graph=debugged_graph)\n    self._EnsureTimestampAdded(debug_event)\n    _pywrap_debug_events_writer.WriteDebuggedGraph(self._dump_root, debug_event)",
    "docstring": "Write a DebuggedGraph proto with the writer. Args: debugged_graph: A DebuggedGraph proto, describing the details of a TensorFlow Graph that has completed its construction.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_writer.py",
    "ast_data": "FunctionDef name:WriteDebuggedGraph arg:self arg:debugged_graph arguments arg arg Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "DebuggingVariable",
    "source_code": "class DebuggingVariable(VariableTracker):\n\n    def __init__(self, value, **kwargs) -> None:\n        super().__init__(**kwargs)\n        self.value = value\n\n    @staticmethod\n    def is_reorderable_logging_function(obj):\n        return callable(obj) and isinstance(obj, (types.FunctionType, types.BuiltinFunctionType)) and (obj in torch._dynamo.config.reorderable_logging_functions)\n\n    def call_function(self, tx: 'InstructionTranslator', args, kwargs):\n        if tx.export:\n            return\n        if not self.can_reorder_logs(self.value, args, kwargs):\n            unimplemented(f'Reordering debugging function {self.value} with inputs {args} {kwargs} is not yet implemented.')\n        tx.debug_locals.append((self, list(args)))\n\n    def reconstruct(self, codegen: 'PyCodegen'):\n        return self.source.reconstruct(codegen)\n\n    @staticmethod\n    def can_reorder_logs(fn, args, kwargs) -> True:\n        allowed_input_types = (variables.TensorVariable, variables.ConstantVariable, StringFormatVariable)\n        flat_args = pytree.tree_leaves([args, kwargs])\n        for arg in flat_args:\n            if not isinstance(arg, allowed_input_types):\n                return False\n        return True",
    "docstring": "Represents a call to a debugging function like print(), or something registered to config.reorderable_logging_functions.",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\misc.py",
    "ast_data": "ClassDef name:DebuggingVariable FunctionDef name:__init__ arg:self arg:value arguments arg arg arg Call Call Assign FunctionDef name:is_reorderable_logging_function arg:obj arguments arg Return return:yes BoolOp Call Call Compare FunctionDef name:call_function arg:self arg:tx arg:args arg:kwargs arguments arg arg arg arg If Return return:no If Call Call Call Call FunctionDef name:reconstruct arg:self arg:codegen arguments arg arg Return return:yes Call FunctionDef name:can_reorder_logs arg:fn arg:args arg:kwargs arguments arg arg arg Assign Assign Call For If Call Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_read_arg",
    "source_code": "def _read_arg(self, nbytes, signed=False):\n    return int.from_bytes(self.file.read(nbytes), 'big', signed=signed)",
    "docstring": "Read and return a big-endian integer *nbytes* long. Signedness is determined by the *signed* keyword.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\dviread.py",
    "ast_data": "FunctionDef name:_read_arg arg:self arg:nbytes arg:signed arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "toeplitz",
    "source_code": "def toeplitz(c, r=None):\n    c = np.asarray(c)\n    if r is None:\n        r = c.conjugate()\n    else:\n        r = np.asarray(r)\n    if c.ndim > 1 or r.ndim > 1:\n        msg = 'Beginning in SciPy 1.17, multidimensional input will be treated as a batch, not `ravel`ed. To preserve the existing behavior and silence this warning, `ravel` arguments before passing them to `toeplitz`.'\n        warnings.warn(msg, FutureWarning, stacklevel=2)\n    c, r = (c.ravel(), r.ravel())\n    vals = np.concatenate((c[::-1], r[1:]))\n    out_shp = (len(c), len(r))\n    n = vals.strides[0]\n    return as_strided(vals[len(c) - 1:], shape=out_shp, strides=(-n, n)).copy()",
    "docstring": "Construct a Toeplitz matrix. The Toeplitz matrix has constant diagonals, with c as its first column and r as its first row. If r is not given, `toeplitzcrcr` is None, was changed in version 0.8.0. The behavior in previous versions was undocumented and is no longer supported. Examples -------- >>> from scipy.linalg import toeplitz >>> toeplitz([1,2,3], [1,4,5,6]) array([[1, 4, 5, 6], [2, 1, 4, 5], [3, 2, 1, 4]]) >>> toeplitz([1.0, 2+3j, 4-1j]) array([[ 1.+0.j, 2.-3.j, 4.+1.j], [ 2.+3.j, 1.+0.j, 2.-3.j], [ 4.-1.j, 2.+3.j, 1.+0.j]])",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_special_matrices.py",
    "ast_data": "FunctionDef name:toeplitz arg:c arg:r arguments arg arg Assign Call If Compare Assign Call Assign Call If BoolOp Compare Compare Assign Call Assign Call Call Assign Call Assign Call Call Assign Return return:yes Call Call Call"
  },
  {
    "library": "scrapy",
    "name": "__init__",
    "source_code": "def __init__(self, dupefilter: BaseDupeFilter, jobdir: str | None=None, dqclass: type[BaseQueue] | None=None, mqclass: type[BaseQueue] | None=None, logunser: bool=False, stats: StatsCollector | None=None, pqclass: type[ScrapyPriorityQueue] | None=None, crawler: Crawler | None=None):\n    self.df: BaseDupeFilter = dupefilter\n    self.dqdir: str | None = self._dqdir(jobdir)\n    self.pqclass: type[ScrapyPriorityQueue] | None = pqclass\n    self.dqclass: type[BaseQueue] | None = dqclass\n    self.mqclass: type[BaseQueue] | None = mqclass\n    self.logunser: bool = logunser\n    self.stats: StatsCollector | None = stats\n    self.crawler: Crawler | None = crawler\n    self._sdqclass: type[BaseQueue] | None = self._get_start_queue_cls(crawler, 'DISK')\n    self._smqclass: type[BaseQueue] | None = self._get_start_queue_cls(crawler, 'MEMORY')",
    "docstring": "Initialize the scheduler. :param dupefilter: An object responsible for checking and filtering duplicate requests. The value for the :setting: setting is used by default. :type dupefilter: :class: instance or similar: any class that implements the interface :param jobdir: The path of a directory to be used for persisting the crawl's state. The value for the :setting: setting is used by default. See :ref:. :type jobdir: :class: or `SCHEDULER_DISK_QUEUESCHEDULER_MEMORY_QUEUESCHEDULER_DEBUGSTATS_CLASSscrapy.statscollectors.StatsCollectorStatsCollectorSCHEDULER_PRIORITY_QUEUEscrapy.crawler.Crawler`",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\scheduler.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:dupefilter arg:jobdir arg:dqclass arg:mqclass arg:logunser arg:stats arg:pqclass arg:crawler arguments arg arg arg arg arg arg arg arg arg Call Call Call"
  },
  {
    "library": "cherrypy",
    "name": "request_digest",
    "source_code": "def request_digest(self, ha1, entity_body=''):\n    ha2 = self.HA2(entity_body)\n    if self.qop:\n        req = '%s:%s:%s:%s:%s' % (self.nonce, self.nc, self.cnonce, self.qop, ha2)\n    else:\n        req = '%s:%s' % (self.nonce, ha2)\n    if self.algorithm == 'MD5-sess':\n        ha1 = H('%s:%s:%s' % (ha1, self.nonce, self.cnonce))\n    digest = H('%s:%s' % (ha1, req))\n    return digest",
    "docstring": "Calculate the Request-Digest. See :rfc: section 3.2.2.1. ha1 The HA1 string obtained from the credentials store. entity_body If 'qop' is set to 'auth-int', then A2 includes a hash of the \"entity body\". The entity body is the part of the message which follows the HTTP headers. See :rfc: section 4.3. This refers to the entity the user agent sent in the request which has the Authorization header. Typically GET requests don't have an entity, and POST requests do.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\auth_digest.py",
    "ast_data": "FunctionDef name:request_digest arg:self arg:ha1 arg:entity_body arguments arg arg arg Assign Call If Assign Assign If Compare Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_family",
    "source_code": "def get_family(self):\n    return self._family",
    "docstring": "Return a list of individual font family names or generic family names. The font families or generic font families (which will be resolved from their respective rcParams when searching for a matching font) in the order of preference.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\font_manager.py",
    "ast_data": "FunctionDef name:get_family arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "strip_overloads",
    "source_code": "def strip_overloads(gm):\n    for node in gm.graph.nodes:\n        if isinstance(node.target, torch._ops.OpOverload):\n            node.target = node.target.overloadpacket\n    gm.recompile()",
    "docstring": "Modifies the target of graph nodes in :attr: to strip overloads. Args: gm(fx.GraphModule): The input Fx graph module to be modified",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\compile_utils.py",
    "ast_data": "FunctionDef name:strip_overloads arg:gm arguments arg For If Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "getfullargspec",
    "source_code": "def getfullargspec(obj):\n    decorators, target = tf_decorator.unwrap(obj)\n    for d in decorators:\n        if d.decorator_argspec is not None:\n            return _convert_maybe_argspec_to_fullargspec(d.decorator_argspec)\n    return _getfullargspec(target)",
    "docstring": "TFDecorator-aware replacement for . This wrapper emulates in[^)]* Python2. Args: obj: A callable, possibly decorated. Returns: The that describes the signature of the outermost decorator that changes the callable's signature. If the callable is not decorated, will be called directly on the callable.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\tf_inspect.py",
    "ast_data": "FunctionDef name:getfullargspec arg:obj arguments arg Assign Call For If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "year_lookup_bounds_for_date_field",
    "source_code": "def year_lookup_bounds_for_date_field(self, value, iso_year=False):\n    if iso_year:\n        first = datetime.date.fromisocalendar(value, 1, 1)\n        second = datetime.date.fromisocalendar(value + 1, 1, 1) - datetime.timedelta(days=1)\n    else:\n        first = datetime.date(value, 1, 1)\n        second = datetime.date(value, 12, 31)\n    first = self.adapt_datefield_value(first)\n    second = self.adapt_datefield_value(second)\n    return [first, second]",
    "docstring": "Return a two-elements list with the lower and upper bound to be used with a BETWEEN operator to query a DateField value using a year lookup. is an int, containing the looked-up year. If is True, return bounds for ISO-8601 week-numbering years.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:year_lookup_bounds_for_date_field arg:self arg:value arg:iso_year arguments arg arg arg If Assign Call Assign Call Call Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "custom_onnx_symbolic",
    "source_code": "def custom_onnx_symbolic(name: str, opset: Union[OpsetVersion, Sequence[OpsetVersion]], decorate: Optional[Sequence[Callable]]=None) -> Callable:\n    return onnx_symbolic(name, opset, decorate, custom=True)",
    "docstring": "Registers a custom symbolic function. Args: name: the qualified name of the function. opset: the opset version of the function. decorate: a sequence of decorators to apply to the function. Returns: The decorator. Raises: ValueError: If the separator '::' is not in the name.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\registration.py",
    "ast_data": "FunctionDef name:custom_onnx_symbolic arg:name arg:opset arg:decorate arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_fontweight",
    "source_code": "def set_fontweight(self, weight):\n    self._fontproperties.set_weight(weight)\n    self.stale = True",
    "docstring": "Set the font weight. Parameters ---------- weight : {a numeric value in range 0-1000, 'ultralight', 'light', 'normal', 'regular', 'book', 'medium', 'roman', 'semibold', 'demibold', 'demi', 'bold', 'heavy', 'extra bold', 'black'} See Also -------- .font_manager.FontProperties.set_weight",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:set_fontweight arg:self arg:weight arguments arg arg Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "convert_to_tensor",
    "source_code": "@profiler_trace.trace_wrapper('convert_to_tensor')\ndef convert_to_tensor(value, dtype=None, name=None, as_ref=False, preferred_dtype=None, dtype_hint=None, ctx=None, accepted_result_types=(tensor_lib.Tensor,)) -> Union[EagerTensor, SymbolicTensor]:\n    preferred_dtype = preferred_dtype or dtype_hint\n    return tensor_conversion_registry.convert(value, dtype, name, as_ref, preferred_dtype, accepted_result_types)",
    "docstring": "Implementation of the public convert_to_tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:convert_to_tensor arg:value arg:dtype arg:name arg:as_ref arg:preferred_dtype arg:dtype_hint arg:ctx arg:accepted_result_types arguments arg arg arg arg arg arg arg arg Assign BoolOp Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "inverse_log_det_jacobian",
    "source_code": "def inverse_log_det_jacobian(self, y, event_ndims, name='inverse_log_det_jacobian'):\n    return self._call_inverse_log_det_jacobian(y, event_ndims, name)",
    "docstring": "Returns the (log o det o Jacobian o inverse)(y). Mathematically, returns: . (Recall that: .) Note that is the negative of this function, evaluated at . Args: y: . The input to the \"inverse\" Jacobian determinant evaluation. event_ndims: Number of dimensions in the probabilistic events being transformed. Must be greater than or equal to . The result is summed over the final dimensions to produce a scalar Jacobian determinant for each event, i.e. it has shape dimensions. name: The name to give this op. Returns: , if this bijector is injective. If not injective, returns the tuple of local log det Jacobians, , where is the restriction of to the partition . Raises: TypeError: if is specified and is not . NotImplementedError: if is not implemented.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bijector_impl.py",
    "ast_data": "FunctionDef name:inverse_log_det_jacobian arg:self arg:y arg:event_ndims arg:name arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_identity_broadcaster",
    "source_code": "def _get_identity_broadcaster(shape):\n    if shape.rank is None:\n        raise ValueError('Shape must have a defined rank')\n    layers = [_LayerBroadcaster.get_identity_broadcaster(shape._num_slices_in_dimension(i)) for i in range(shape.rank)]\n    return _Broadcaster(shape, shape, layers)",
    "docstring": "Gets a Broadcaster for two identical shapes.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:_get_identity_broadcaster arg:shape arguments arg If Compare Raise Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_get_rng_state_offset",
    "source_code": "def _get_rng_state_offset(device: Union[int, str, torch.device]='xpu') -> int:\n    _lazy_init()\n    final_device = _get_device(device)\n    default_generator = _get_generator(final_device)\n    return default_generator.get_offset()",
    "docstring": "Return the random number generator state offset of the specified GPU. Args: device (torch.device or int, optional): The device to return the RNG state offset of. Default: ``, the current XPU device). .. warning:: This function eagerly initializes XPU.",
    "type": "function",
    "file_path": "pytorch\\torch\\xpu\\__init__.py",
    "ast_data": "FunctionDef name:_get_rng_state_offset arg:device arguments arg Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "reduce_add",
    "source_code": "def reduce_add(inputs, destination=None):\n    destination = _get_device_index(destination, optional=True)\n    input_size = inputs[0].size()\n    root_index = None\n    for i, inp in enumerate(inputs):\n        assert inp.device.type != 'cpu', 'reduce_add expects all inputs to be on GPUs'\n        if inp.get_device() == destination:\n            root_index = i\n        if inp.size() != input_size:\n            got = 'x'.join((str(x) for x in inp.size()))\n            expected = 'x'.join((str(x) for x in input_size))\n            raise ValueError(f'input {i} has invalid size: got {got}, but expected {expected}')\n    if root_index is None:\n        raise RuntimeError('reduce_add expects destination to be on the same GPU with one of the tensors')\n    if len(inputs) == 1:\n        return inputs[0]\n    if nccl.is_available(inputs):\n        result = torch.empty_like(inputs[root_index])\n        nccl.reduce(inputs, output=result, root=root_index)\n    else:\n        destination_device = torch.device(inputs[root_index].device.type, destination)\n        nonroot = [t for i, t in enumerate(inputs) if i != root_index]\n        result = inputs[root_index] + nonroot[0].to(device=destination_device, non_blocking=True)\n        for other in nonroot[1:]:\n            result.add_(other.to(device=destination_device, non_blocking=True))\n    return result",
    "docstring": "Sum tensors from multiple GPUs. All inputs should have matching shapes, dtype, and layout. The output tensor will be of the same shape, dtype, and layout. Args: inputs (Iterable[Tensor]): an iterable of tensors to add. destination (int, optional): a device on which the output will be placed (default: current device). Returns: A tensor containing an elementwise sum of all inputs, placed on the :attr: device.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\parallel\\comm.py",
    "ast_data": "FunctionDef name:reduce_add arg:inputs arg:destination arguments arg arg Assign Call Assign Call Assign For Call Compare If Compare Call Assign If Compare Call Assign Call Call Call Assign Call Call Raise Call If Compare Raise Call If Compare Call Return return:yes If Call Assign Call Call Assign Call Assign Call Compare Assign Call For Call Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "fetch_token",
    "source_code": "def fetch_token(self, url=None, body='', method='POST', headers=None, auth=None, grant_type=None, state=None, **kwargs):\n    state = state or self.state\n    authorization_response = kwargs.pop('authorization_response', None)\n    if authorization_response and '#' in authorization_response:\n        return self.token_from_fragment(authorization_response, state)\n    session_kwargs = self._extract_session_request_params(kwargs)\n    if authorization_response and 'code=' in authorization_response:\n        grant_type = 'authorization_code'\n        params = parse_authorization_code_response(authorization_response, state=state)\n        kwargs['code'] = params['code']\n    if grant_type is None:\n        grant_type = self.metadata.get('grant_type')\n    if grant_type is None:\n        grant_type = _guess_grant_type(kwargs)\n        self.metadata['grant_type'] = grant_type\n    body = self._prepare_token_endpoint_body(body, grant_type, **kwargs)\n    if auth is None:\n        auth = self.client_auth(self.token_endpoint_auth_method)\n    if headers is None:\n        headers = DEFAULT_HEADERS\n    if url is None:\n        url = self.metadata.get('token_endpoint')\n    return self._fetch_token(url, body=body, auth=auth, method=method, headers=headers, **session_kwargs)",
    "docstring": "Generic method for fetching an access token from the token endpoint. :param url: Access Token endpoint URL, if not configured, `OAuth2Token` object (a dict too).",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\client.py",
    "ast_data": "FunctionDef name:fetch_token arg:self arg:url arg:body arg:method arg:headers arg:auth arg:grant_type arg:state arguments arg arg arg arg arg arg arg arg arg Assign BoolOp Assign Call If BoolOp Compare Return return:yes Call Assign Call If BoolOp Compare Assign Assign Call Assign If Compare Assign Call If Compare Assign Call Assign Assign Call If Compare Assign Call If Compare Assign If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "get_wheel_names",
    "source_code": "def get_wheel_names(version):\n    http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED')\n    tmpl = re.compile(f'^.*{PREFIX}-{version}{SUFFIX}')\n    index_url = f'{NAMES_URL}'\n    index_html = http.request('GET', index_url)\n    soup = BeautifulSoup(index_html.data, 'html.parser')\n    return sorted(soup.find_all(string=tmpl))",
    "docstring": "Get wheel names from Anaconda HTML directory. This looks in the Anaconda multibuild-wheels-staging page and parses the HTML to get all the wheel names for a release version. Parameters ---------- version : str The release version. For instance, \"1.18.3\".",
    "type": "function",
    "file_path": "numpy\\tools\\download-wheels.py",
    "ast_data": "FunctionDef name:get_wheel_names arg:version arguments arg Assign Call Assign Call Assign Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_expression",
    "source_code": "def _expression(initial, tokens, data):\n    delim_stack = []\n    token = initial\n    while True:\n        if token.is_delim():\n            if token.raw in ('[', '{'):\n                delim_stack.append(token)\n            elif token.raw in (']', '}'):\n                if not delim_stack:\n                    raise RuntimeError(f'unmatched closing token {token}')\n                match = delim_stack.pop()\n                if match.raw != token.opposite():\n                    raise RuntimeError(f'opening token {match} closed by {token}')\n                if not delim_stack:\n                    break\n            else:\n                raise RuntimeError(f'unknown delimiter {token}')\n        elif not delim_stack:\n            break\n        token = next(tokens)\n    return _BalancedExpression(initial.pos, data[initial.pos:token.endpos()].decode('ascii', 'replace'))",
    "docstring": "Consume some number of tokens and return a balanced PostScript expression. Parameters ---------- initial : _Token The token that triggered parsing a balanced expression. tokens : iterator of _Token Following tokens. data : bytes Underlying data that the token positions point to. Returns ------- _BalancedExpression",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\_type1font.py",
    "ast_data": "FunctionDef name:_expression arg:initial arg:tokens arg:data arguments arg arg arg Assign Assign While If Call If Compare Call If Compare If Raise Call Assign Call If Compare Call Raise Call If Raise Call If Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "draw",
    "source_code": "def draw(self, *args, **kwargs):\n    pass",
    "docstring": "Render the . This method must walk the artist tree, even if no output is produced, because it triggers deferred work that users may want to access before saving output to disk. For example computing limits, auto-limits, and tick values.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:draw arg:self arguments arg arg arg"
  },
  {
    "library": "pytorch",
    "name": "_ModMemStats",
    "source_code": "class _ModMemStats:\n\n    def __init__(self, mod_fqn: str):\n        self.mod_fqn = mod_fqn\n        self.parameter_mem: int\n        self.buffer_mem: int\n        self.input_mem: int\n        self.output_mem: int\n        self.local_peak: dict[torch.device, int] = {}\n        self.snapshots: dict[_ModState, list[dict[torch.device, dict[str, int]]]] = {}",
    "docstring": "A class to store the memory statistics of a module. Args: mod_fqn (str): The fully qualified name of the module. Attributes: mod_fqn (str): The fully qualified name of the module. parameter_mem (int): The memory usage of the parameters of the module. buffer_mem (int): The memory usage of the buffers of the module. input_mem (int): The memory usage of the inputs to the module. output_mem (int): The memory usage of the outputs from the module. snapshots (Dict[_ModState, Dict[torch.device, Dict[str, int]]]): A dictionary of memory snapshots of the module at different states defined by `_MemRefType` and values as the memory consumed in bytes.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\_tools\\mem_tracker.py",
    "ast_data": "ClassDef name:_ModMemStats FunctionDef name:__init__ arg:self arg:mod_fqn arguments arg arg Assign"
  },
  {
    "library": "pytorch",
    "name": "prepare_local_plan",
    "source_code": "def prepare_local_plan(self, plan: LoadPlan) -> LoadPlan:\n    return plan",
    "docstring": "Implementation of the StorageReader method",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\format_utils.py",
    "ast_data": "FunctionDef name:prepare_local_plan arg:self arg:plan arguments arg arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_va_for_angle",
    "source_code": "def _va_for_angle(self, angle):\n    anchor_at_left = self.get_horizontalalignment() == 'left'\n    if angle <= 10 or 350 <= angle or 170 <= angle <= 190 or (80 <= angle <= 100) or (260 <= angle <= 280):\n        return 'center'\n    elif 190 < angle < 260 or 10 < angle < 80:\n        return 'baseline' if anchor_at_left else 'top'\n    return 'top' if anchor_at_left else 'baseline'",
    "docstring": "Determines vertical alignment ('va') for rotation_mode \"ytick\" based on the angle of rotation in degrees and the horizontal alignment.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:_va_for_angle arg:self arg:angle arguments arg arg Assign Compare Call If BoolOp Compare Compare Compare Compare Compare Return return:yes If BoolOp Compare Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_flatten_with_inner_dim",
    "source_code": "def _flatten_with_inner_dim(x, dim, x_rank):\n    shape = array_ops.shape(x)\n    x = array_ops.transpose(x, list(range(1, dim)) + [0] + list(range(dim, x_rank)))\n    if dim < x_rank - 1:\n        new_shape_pieces = [shape[1:dim], [-1], shape[dim + 1:]]\n    else:\n        new_shape_pieces = [shape[1:dim], [-1]]\n    new_shape = array_ops.concat(new_shape_pieces, axis=0)\n    return array_ops.reshape(x, new_shape)",
    "docstring": "Merges the first dim with the specified dim.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "FunctionDef name:_flatten_with_inner_dim arg:x arg:dim arg:x_rank arguments arg arg arg Assign Call Assign Call Call Call Call Call If Compare Assign Assign Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "opcode_to_name",
    "source_code": "def opcode_to_name(model, op_code):\n    op = model.operatorCodes[op_code]\n    code = max(op.builtinCode, op.deprecatedBuiltinCode)\n    for name, value in vars(schema_fb.BuiltinOperator).items():\n        if value == code:\n            return name\n    return None",
    "docstring": "Converts a TFLite op_code to the human readable name. Args: model: The input tflite model. op_code: The op_code to resolve to a readable name. Returns: A string containing the human readable op name, or None if not resolvable.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\tools\\flatbuffer_utils.py",
    "ast_data": "FunctionDef name:opcode_to_name arg:model arg:op_code arguments arg arg Assign Assign Call For Call Call If Compare Return return:yes Return return:no"
  },
  {
    "library": "scipy",
    "name": "buttap",
    "source_code": "def buttap(N):\n    if abs(int(N)) != N:\n        raise ValueError('Filter order must be a nonnegative integer')\n    z = np.array([])\n    m = np.arange(-N + 1, N, 2)\n    p = -np.exp(1j * pi * m / (2 * N))\n    k = 1\n    return (z, p, k)",
    "docstring": "Return (z,p,k) for analog prototype of Nth-order Butterworth filter. The filter will have an angular (e.g., rad/s) cutoff frequency of 1. See Also -------- butter : Filter design function using this prototype",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_filter_design.py",
    "ast_data": "FunctionDef name:buttap arg:N arguments arg If Compare Call Call Raise Call Assign Call Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "stateless_random_brightness",
    "source_code": "@tf_export('image.stateless_random_brightness', v1=[])\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef stateless_random_brightness(image, max_delta, seed):\n    if max_delta < 0:\n        raise ValueError('max_delta must be non-negative.')\n    delta = stateless_random_ops.stateless_random_uniform(shape=[], minval=-max_delta, maxval=max_delta, seed=seed)\n    return adjust_brightness(image, delta)",
    "docstring": "Adjust the brightness of images by a random factor deterministically. Equivalent to using a randomly picked in the interval . Guarantees the same results given the same independent of how many times the function is called, and independent of global seed settings (e.g. ). Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> seed = (1, 2) >>> tf.image.stateless_random_brightness(x, 0.2, seed) Args: image: An image or images to adjust. max_delta: float, must be non-negative. seed: A shape [2] Tensor, the seed to the random number generator. Must have dtype or . (When using XLA, only is allowed.) Returns: The brightness-adjusted image(s). Raises: ValueError: if is negative.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:stateless_random_brightness arg:image arg:max_delta arg:seed arguments arg arg arg If Compare Raise Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "enter",
    "source_code": "def enter(self, layer, inputs, build_graph, training, saving=None):\n    state = {'layer': layer, 'inputs': inputs, 'build_graph': build_graph, 'training': training, 'saving': saving}\n    return CallContextManager(self, state)",
    "docstring": "Push a Layer and its inputs and state onto the current call context. Args: layer: The whose is currently active. inputs: The inputs to the currently active . build_graph: Whether currently inside a Graph or FuncGraph. training: Whether currently executing in training or inference mode. saving: Whether currently saving to SavedModel. Returns: Context manager.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_utils.py",
    "ast_data": "FunctionDef name:enter arg:self arg:layer arg:inputs arg:build_graph arg:training arg:saving arguments arg arg arg arg arg arg Assign Return return:yes Call"
  },
  {
    "library": "cryptography",
    "name": "add_extension",
    "source_code": "def add_extension(self, extval: ExtensionType, critical: bool) -> CertificateSigningRequestBuilder:\n    if not isinstance(extval, ExtensionType):\n        raise TypeError('extension must be an ExtensionType')\n    extension = Extension(extval.oid, critical, extval)\n    _reject_duplicate_extension(extension, self._extensions)\n    return CertificateSigningRequestBuilder(self._subject_name, [*self._extensions, extension], self._attributes)",
    "docstring": "Adds an X.509 extension to the certificate request.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\x509\\base.py",
    "ast_data": "FunctionDef name:add_extension arg:self arg:extval arg:critical arguments arg arg arg If Call Raise Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "next",
    "source_code": "def next(self):\n    return self.__next__()",
    "docstring": "UTF-8-encode the next chunk of the stream.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\encoding.py",
    "ast_data": "FunctionDef name:next arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "init_request",
    "source_code": "def init_request(self) -> Any:\n    return self.initialized()",
    "docstring": "This function should return one initialization request, with the self.initialized method as callback. When the self.initialized method is called this spider is considered initialized. If you need to perform several requests for initializing your spider, you can do so by using different callbacks. The only requirement is that the final callback (of the last initialization request) must be self.initialized. The default implementation calls self.initialized immediately, and means that no initialization is needed. This method should be overridden only when you need to perform requests to initialize your spider",
    "type": "method",
    "file_path": "scrapy\\scrapy\\spiders\\init.py",
    "ast_data": "FunctionDef name:init_request arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "set_autocommit",
    "source_code": "def set_autocommit(self, autocommit, force_begin_transaction_with_broken_autocommit=False):\n    self.validate_no_atomic_block()\n    self.close_if_health_check_failed()\n    self.ensure_connection()\n    start_transaction_under_autocommit = force_begin_transaction_with_broken_autocommit and (not autocommit) and hasattr(self, '_start_transaction_under_autocommit')\n    if start_transaction_under_autocommit:\n        self._start_transaction_under_autocommit()\n    elif autocommit:\n        self._set_autocommit(autocommit)\n    else:\n        with debug_transaction(self, 'BEGIN'):\n            self._set_autocommit(autocommit)\n    self.autocommit = autocommit\n    if autocommit and self.run_commit_hooks_on_set_autocommit_on:\n        self.run_and_clear_commit_hooks()\n        self.run_commit_hooks_on_set_autocommit_on = False",
    "docstring": "Enable or disable autocommit. The usual way to start a transaction is to turn autocommit off. SQLite does not properly start a transaction when disabling autocommit. To avoid this buggy behavior and to actually enter a new transaction, an explicit BEGIN is required. Using force_begin_transaction_with_broken_autocommit=True will issue an explicit BEGIN with SQLite. This option will be ignored for other backends.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\base.py",
    "ast_data": "FunctionDef name:set_autocommit arg:self arg:autocommit arg:force_begin_transaction_with_broken_autocommit arguments arg arg arg Call Call Call Assign BoolOp Call If Call If Call With Call Call Assign If BoolOp Call Assign"
  },
  {
    "library": "django",
    "name": "module_dir",
    "source_code": "def module_dir(module):\n    paths = list(getattr(module, '__path__', []))\n    if len(paths) == 1:\n        return paths[0]\n    else:\n        filename = getattr(module, '__file__', None)\n        if filename is not None:\n            return os.path.dirname(filename)\n    raise ValueError('Cannot determine directory containing %s' % module)",
    "docstring": "Find the name of the directory that contains a module, if possible. Raise ValueError otherwise, e.g. for namespace packages that are split over several directories.",
    "type": "function",
    "file_path": "django\\django\\utils\\module_loading.py",
    "ast_data": "FunctionDef name:module_dir arg:module arguments arg Assign Call Call If Compare Call Return return:yes Assign Call If Compare Return return:yes Call Raise Call"
  },
  {
    "library": "scipy",
    "name": "_kpoints",
    "source_code": "def _kpoints(data, k, rng, xp):\n    idx = rng.choice(data.shape[0], size=int(k), replace=False)\n    idx = xp.asarray(idx, dtype=xp.asarray([1]).dtype)\n    return xp.take(data, idx, axis=0)",
    "docstring": "Pick k points at random in data (one row = one observation). Parameters ---------- data : ndarray Expect a rank 1 or 2 array. Rank 1 are assumed to describe one dimensional data, rank 2 multidimensional data, in which case one row is one observation. k : int Number of samples to generate. rng : or Random number generator. Returns ------- x : ndarray A 'k' by 'N' containing the initial centroids",
    "type": "function",
    "file_path": "scipy\\scipy\\cluster\\vq.py",
    "ast_data": "FunctionDef name:_kpoints arg:data arg:k arg:rng arg:xp arguments arg arg arg arg Assign Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "memory_usage",
    "source_code": "def memory_usage(device: Optional[Union[Device, int]]=None) -> int:\n    if not torch.version.hip:\n        handle = _get_pynvml_handler()\n        device = _get_nvml_device_index(device)\n        handle = pynvml.nvmlDeviceGetHandleByIndex(device)\n        return pynvml.nvmlDeviceGetUtilizationRates(handle).memory\n    else:\n        return _get_amdsmi_memory_usage(device)",
    "docstring": "Return the percent of time over the past sample period during which global (device) memory was being read or written as given by . Args: device (torch.device or int, optional): selected device. Returns statistic for the current device, given by :func:, if :attr: is `` (default). Warning: Each sample period may be between 1 second and 1/6 second, depending on the product being queried.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:memory_usage arg:device arguments arg If Assign Call Assign Call Assign Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_x_slices",
    "source_code": "def _x_slices(self, x: np.ndarray, k_off: int, p0: int, p1: int, padding: PAD_TYPE) -> Generator[np.ndarray, None, None]:\n    if padding not in (padding_types := get_args(PAD_TYPE)):\n        raise ValueError(f'Parameter padding={padding!r} not in {padding_types}!')\n    pad_kws: dict[str, dict] = {'zeros': dict(mode='constant', constant_values=(0, 0)), 'edge': dict(mode='edge'), 'even': dict(mode='reflect', reflect_type='even'), 'odd': dict(mode='reflect', reflect_type='odd')}\n    n, n1 = (x.shape[-1], (p1 - p0) * self.hop)\n    k0 = p0 * self.hop - self.m_num_mid + k_off\n    k1 = k0 + n1 + self.m_num\n    i0, i1 = (max(k0, 0), min(k1, n))\n    pad_width = [(0, 0)] * (x.ndim - 1) + [(-min(k0, 0), max(k1 - n, 0))]\n    x1 = np.pad(x[..., i0:i1], pad_width, **pad_kws[padding])\n    for k_ in range(0, n1, self.hop):\n        yield x1[..., k_:k_ + self.m_num]",
    "docstring": "Generate signal slices along last axis of . This method is only used by . The parameters are described in .",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_short_time_fft.py",
    "ast_data": "FunctionDef name:_x_slices arg:self arg:x arg:k_off arg:p0 arg:p1 arg:padding arguments arg arg arg arg arg arg If Compare Call Raise Call Call Call Call Call Assign Assign Assign Assign Call Call Assign Call Call Assign Call For Call"
  },
  {
    "library": "authlib",
    "name": "validate_token_request",
    "source_code": "def validate_token_request(self):\n    client = self.authenticate_token_endpoint_client()\n    log.debug('Validate token request of %r', client)\n    if not client.check_grant_type(self.GRANT_TYPE):\n        raise UnauthorizedClientError(f\"The client is not authorized to use 'grant_type={self.GRANT_TYPE}'\")\n    params = self.request.form\n    if 'username' not in params:\n        raise InvalidRequestError(\"Missing 'username' in request.\")\n    if 'password' not in params:\n        raise InvalidRequestError(\"Missing 'password' in request.\")\n    log.debug('Authenticate user of %r', params['username'])\n    user = self.authenticate_user(params['username'], params['password'])\n    if not user:\n        raise InvalidRequestError(\"Invalid 'username' or 'password' in request.\")\n    self.request.client = client\n    self.request.user = user\n    self.validate_requested_scope()",
    "docstring": "The client makes a request to the token endpoint by adding the following parameters using the \"application/x-www-form-urlencoded\" format per Appendix B with a character encoding of UTF-8 in the HTTP request entity-body: grant_type REQUIRED. Value MUST be set to \"password\". username REQUIRED. The resource owner username. password REQUIRED. The resource owner password. scope OPTIONAL. The scope of the access request as described by Section 3.3. If the client type is confidential or the client was issued client credentials (or assigned other authentication requirements), the client MUST authenticate with the authorization server as described in Section 3.2.1. For example, the client makes the following HTTP request using transport-layer security (with extra line breaks for display purposes only): .. code-block:: http POST /token HTTP/1.1 Host: server.example.com Authorization: Basic czZCaGRSa3F0MzpnWDFmQmF0M2JW Content-Type: application/x-www-form-urlencoded grant_type=password&username=johndoe&password=A3ddj3w",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\grants\\resource_owner_password_credentials.py",
    "ast_data": "FunctionDef name:validate_token_request arg:self arguments arg Assign Call Call If Call Raise Call Assign If Compare Raise Call If Compare Raise Call Call Assign Call If Raise Call Assign Assign Call"
  },
  {
    "library": "numpy",
    "name": "BlasOptNotFoundError",
    "source_code": "class BlasOptNotFoundError(NotFoundError):\n    pass",
    "docstring": "Optimized (vendor) Blas libraries are not found. Falls back to netlib Blas library which has worse performance. A better performance should be easily gained by switching Blas library.",
    "type": "class",
    "file_path": "numpy\\numpy\\distutils\\system_info.py",
    "ast_data": "ClassDef name:BlasOptNotFoundError"
  },
  {
    "library": "django",
    "name": "get_fixed_timezone",
    "source_code": "def get_fixed_timezone(offset):\n    if isinstance(offset, timedelta):\n        offset = offset.total_seconds() // 60\n    sign = '-' if offset < 0 else '+'\n    hhmm = '%02d%02d' % divmod(abs(offset), 60)\n    name = sign + hhmm\n    return timezone(timedelta(minutes=offset), name)",
    "docstring": "Return a tzinfo instance with a fixed offset from UTC.",
    "type": "function",
    "file_path": "django\\django\\utils\\timezone.py",
    "ast_data": "FunctionDef name:get_fixed_timezone arg:offset arguments arg If Call Assign Call Assign Compare Assign Call Call Assign Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "ellip_normal",
    "source_code": "def ellip_normal(h2, k2, n, p):\n    with np.errstate(all='ignore'):\n        return _ellip_normal_vec(h2, k2, n, p)",
    "docstring": "Ellipsoidal harmonic normalization constants gamma^p_n The normalization constant is defined as .. math:: \\gamma^p_n=8\\int_{0}^{h}dx\\int_{h}^{k}dy \\frac{(y^2-x^2)(E^p_n(y)E^p_n(x))^2}{\\sqrt((k^2-y^2)(y^2-h^2)(h^2-x^2)(k^2-x^2)} Parameters ---------- h2 : float `\\gamma^p_n` See Also -------- ellip_harm, ellip_harm_2 Notes ----- .. versionadded:: 0.15.0 Examples -------- >>> from scipy.special import ellip_normal >>> w = ellip_normal(5,8,3,7) >>> w 1723.38796997",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_ellip_harm.py",
    "ast_data": "FunctionDef name:ellip_normal arg:h2 arg:k2 arg:n arg:p arguments arg arg arg arg With Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "generate",
    "source_code": "def generate(self, v) -> str:\n    val = v.tostring(self.encoding)\n    return f'({self.lhs} {self.op} {val})'",
    "docstring": "create and return the op string for this TermValue",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\computation\\pytables.py",
    "ast_data": "FunctionDef name:generate arg:self arg:v arguments arg arg Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "AddValue",
    "source_code": "def AddValue(self, val):\n    if val.name in self._values:\n        result = self._external_values.get(val.name)\n        result = val if result is None else result\n    else:\n        result = val\n        self._values.add(val.name)\n        if self._outer_context:\n            result = self._outer_context.AddValue(val)\n            self._values.add(result.name)\n            self._external_values[result.name] = result\n        with ops.control_dependencies(None):\n            result = _SwitchRefOrTensor(result, self._pred)[self._branch]\n            if self._outer_context:\n                self._outer_context.AddInnerOp(result.op)\n        result.op.graph.prevent_fetching(result.op)\n        result.op._set_control_flow_context(self)\n        ctxt = self\n        while ctxt is not None:\n            ctxt._values.add(result.name)\n            ctxt = ctxt._outer_context\n        self._external_values[val.name] = result\n    return result",
    "docstring": "Add to the current context and its outer context recursively.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:AddValue arg:self arg:val arguments arg arg If Compare Assign Call Assign Compare Assign Call If Assign Call Call Assign With Call Assign Call If Call Call Call Assign While Compare Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "unique",
    "source_code": "@deprecation.deprecated(None, 'Use `tf.data.Dataset.unique(...)')\n@tf_export('data.experimental.unique')\ndef unique():\n\n    def _apply_fn(dataset):\n        return dataset.unique()\n    return _apply_fn",
    "docstring": "Creates a from another , discarding duplicates. Use this transformation to produce a dataset that contains one instance of each unique element in the input. For example: Returns: A transformation function, which can be passed to .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\unique.py",
    "ast_data": "FunctionDef name:unique arguments FunctionDef name:_apply_fn arg:dataset arguments arg Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "inverse_transform",
    "source_code": "def inverse_transform(self, X):\n    check_is_fitted(self)\n    X = self._check_input(X, in_fit=False, check_shape=True)\n    if self.standardize:\n        X = self._scaler.inverse_transform(X)\n    inv_fun = {'box-cox': inv_boxcox, 'yeo-johnson': self._yeo_johnson_inverse_transform}[self.method]\n    for i, lmbda in enumerate(self.lambdas_):\n        with np.errstate(invalid='ignore'):\n            X[:, i] = inv_fun(X[:, i], lmbda)\n    return X",
    "docstring": "Apply the inverse power transformation using the fitted lambdas. The inverse of the Box-Cox transformation is given by:: if lambda_ == 0: X_original = exp(X_trans) else: X_original = (X * lambda_ + 1) ** (1 / lambda_) The inverse of the Yeo-Johnson transformation is given by:: if X >= 0 and lambda_ == 0: X_original = exp(X) - 1 elif X >= 0 and lambda_ != 0: X_original = (X * lambda_ + 1) ** (1 / lambda_) - 1 elif X < 0 and lambda_ != 2: X_original = 1 - (-(2 - lambda_) * X + 1) ** (1 / (2 - lambda_)) elif X < 0 and lambda_ == 2: X_original = 1 - exp(-X) Parameters ---------- X : array-like of shape (n_samples, n_features) The transformed data. Returns ------- X_original : ndarray of shape (n_samples, n_features) The original data.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py",
    "ast_data": "FunctionDef name:inverse_transform arg:self arg:X arguments arg arg Call Assign Call If Assign Call Assign For Call With Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_metrics",
    "source_code": "def get_metrics(self, idx):\n    return self._glyph_metrics.get(idx)",
    "docstring": "Return a glyph's TexMetrics, or None if unavailable.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\dviread.py",
    "ast_data": "FunctionDef name:get_metrics arg:self arg:idx arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "res_name",
    "source_code": "def res_name(self, ns, types_ns, name):\n    raise NotImplementedError('subclasses must implement')",
    "docstring": "Resolves the type/value an external (e.g. closure, global) variable. Args: ns: namespace types_ns: types namespace name: symbol name Returns: Tuple (type, static_value). The first element is the type to use for inference. The second is the static value to use. Return None to treat it as unknown.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\static_analysis\\type_inference.py",
    "ast_data": "FunctionDef name:res_name arg:self arg:ns arg:types_ns arg:name arguments arg arg arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "online_softmax_combine",
    "source_code": "@triton.jit\ndef online_softmax_combine(lhs_max, lhs_sum, rhs_max, use_fast_math: tl.constexpr):\n    out_max = maximum(lhs_max, rhs_max)\n    lhs_scale = tl.where(out_max == float('-inf'), 1.0, exp(lhs_max - out_max, use_fast_math))\n    rhs_scale = tl.where(out_max == float('-inf'), 1.0, exp(rhs_max - out_max, use_fast_math))\n    out_sum = lhs_sum * lhs_scale + rhs_scale\n    return (out_max, out_sum)",
    "docstring": "When we do combine, we assume lhs is the accumulator and rhs is the next block of data. Then rhs_sum is always 1. With that assumption, we can save some registers and computation.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\triton_helpers.py",
    "ast_data": "FunctionDef name:online_softmax_combine arg:lhs_max arg:lhs_sum arg:rhs_max arg:use_fast_math arguments arg arg arg arg Assign Call Assign Call Compare Call Call Assign Call Compare Call Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "x",
    "source_code": "@property\ndef x(self):\n    return self._listarr(capi.getx)",
    "docstring": "Return the X coordinates in a list.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:x arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "__call__",
    "source_code": "def __call__(self, dim=None, seed=None):\n    return uniform_direction_frozen(dim, seed=seed)",
    "docstring": "Create a frozen n-dimensional uniform direction distribution. See for more information.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:dim arg:seed arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "get_inventory_and_name_suffix",
    "source_code": "def get_inventory_and_name_suffix(self, name: str) -> tuple[str | None, str]:\n    assert name.startswith('external'), name\n    suffix = name[9:]\n    if name[8] == '+':\n        inv_name, suffix = suffix.split(':', 1)\n        return (inv_name, suffix)\n    elif name[8] == ':':\n        return (None, suffix)\n    else:\n        msg = f'Malformed :external: role name: {name}'\n        raise ValueError(msg)",
    "docstring": "Extract an inventory name (if any) and `` -- any inventory, explicit domain and name.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\ext\\intersphinx\\_resolve.py",
    "ast_data": "FunctionDef name:get_inventory_and_name_suffix arg:self arg:name arguments arg arg Call Assign If Compare Assign Call Return return:yes If Compare Return return:yes Assign Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_detect_nan_inf",
    "source_code": "def _detect_nan_inf(tensor):\n    if tensor.dtype.is_floating:\n        mask = math_ops.reduce_any(gen_math_ops.logical_or(gen_math_ops.is_nan(tensor), gen_math_ops.is_inf(tensor)))\n        output_tensor = cond.cond(mask, lambda: constant_op.constant([1.0]), lambda: constant_op.constant([0.0]))\n    else:\n        output_tensor = constant_op.constant([0.0])\n    return output_tensor",
    "docstring": "Trace function for detecting any NaN/Inf in the tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:_detect_nan_inf arg:tensor arguments arg If Assign Call Call Call Call Assign Call arguments Call arguments Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "binary_cross_entropy",
    "source_code": "def binary_cross_entropy(input: Tensor, target: Tensor, weight: Optional[Tensor]=None, size_average: Optional[bool]=None, reduce: Optional[bool]=None, reduction: str='mean') -> Tensor:\n    if has_torch_function_variadic(input, target, weight):\n        return handle_torch_function(binary_cross_entropy, (input, target, weight), input, target, weight=weight, size_average=size_average, reduce=reduce, reduction=reduction)\n    if size_average is not None or reduce is not None:\n        reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)\n    else:\n        reduction_enum = _Reduction.get_enum(reduction)\n    if target.size() != input.size():\n        raise ValueError(f'Using a target size ({target.size()}) that is different to the input size ({input.size()}) is deprecated. Please ensure they have the same size.')\n    if weight is not None:\n        new_size = _infer_size(target.size(), weight.size())\n        weight = weight.expand(new_size)\n    return torch._C._nn.binary_cross_entropy(input, target, weight, reduction_enum)",
    "docstring": "Compute Binary Cross Entropy between the target and input probabilities. See :class: for details. Args: input: Tensor of arbitrary shape as probabilities. target: Tensor of the same shape as input with values between 0 and 1. weight (Tensor, optional): a manual rescaling weight if provided it's repeated to match input tensor shape size_average (bool, optional): Deprecated (see :attr:). reduce (bool, optional): Deprecated (see :attr:). reduction (str, optional): Specifies the reduction to apply to the output: `size_averagereducereduction` Examples:: >>> input = torch.randn(3, 2, requires_grad=True) >>> target = torch.rand(3, 2, requires_grad=False) >>> loss = F.binary_cross_entropy(torch.sigmoid(input), target) >>> loss.backward()",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:binary_cross_entropy arg:input arg:target arg:weight arg:size_average arg:reduce arg:reduction arguments arg arg arg arg arg arg If Call Return return:yes Call If BoolOp Compare Compare Assign Call Assign Call If Compare Call Call Raise Call Call Call If Compare Assign Call Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "Logger",
    "source_code": "class Logger(nn.Module):\n\n    def __init__(self):\n        super().__init__()\n        self.stats = {}\n        self.dtype = torch.quint8\n\n    def forward(self, x):\n        pass",
    "docstring": "Base class for stats logging",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\ns\\_numeric_suite.py",
    "ast_data": "ClassDef name:Logger FunctionDef name:__init__ arg:self arguments arg Call Call Assign Assign FunctionDef name:forward arg:self arg:x arguments arg arg"
  },
  {
    "library": "numpy",
    "name": "_hook",
    "source_code": "def _hook(ctx: AnalyzeTypeContext) -> mypy.types.Type:\n    typ, _, api = ctx\n    name = typ.name.split('.')[-1]\n    name_new = _PRECISION_DICT[f'{_MODULE}._nbit.{name}']\n    return cast('TypeAnalyser', api).named_type(name_new)",
    "docstring": "Replace a type-alias with a concrete `` subclass.",
    "type": "function",
    "file_path": "numpy\\numpy\\typing\\mypy_plugin.py",
    "ast_data": "FunctionDef name:_hook arg:ctx arguments arg Assign Assign Call Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "single_batch_iterator",
    "source_code": "def single_batch_iterator(strategy, x, y=None, sample_weight=None, class_weight=None):\n    x, y, sample_weight = _process_tensorlike((x, y, sample_weight))\n    if y is None:\n        data = (x,)\n    elif sample_weight is None:\n        data = (x, y)\n    else:\n        data = (x, y, sample_weight)\n    _check_data_cardinality(data)\n    dataset = dataset_ops.DatasetV2.from_tensors(data)\n    if class_weight:\n        dataset = dataset.map(_make_class_weight_map_fn(class_weight))\n    dataset = strategy.experimental_distribute_dataset(dataset)\n    return iter(dataset)",
    "docstring": "Creates a single-batch dataset.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\data_adapter.py",
    "ast_data": "FunctionDef name:single_batch_iterator arg:strategy arg:x arg:y arg:sample_weight arg:class_weight arguments arg arg arg arg arg Assign Call If Compare Assign If Compare Assign Assign Call Assign Call If Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "ChannelFailures",
    "source_code": "class ChannelFailures(Exception):\n    delimiter = '\\n'\n\n    def __init__(self, *args, **kwargs):\n        super(ChannelFailures, self).__init__(*args, **kwargs)\n        self._exceptions = list()\n\n    def handle_exception(self):\n        self._exceptions.append(sys.exc_info()[1])\n\n    def get_instances(self):\n        return self._exceptions[:]\n\n    def __str__(self):\n        exception_strings = map(repr, self.get_instances())\n        return self.delimiter.join(exception_strings)\n    __repr__ = __str__\n\n    def __bool__(self):\n        return bool(self._exceptions)\n    __nonzero__ = __bool__",
    "docstring": "Exception raised during errors on Bus.publish().",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\process\\wspbus.py",
    "ast_data": "ClassDef name:ChannelFailures Assign FunctionDef name:__init__ arg:self arguments arg arg arg Call Call Assign Call FunctionDef name:handle_exception arg:self arguments arg Call Call FunctionDef name:get_instances arg:self arguments arg Return return:yes FunctionDef name:__str__ arg:self arguments arg Assign Call Call Return return:yes Call Assign FunctionDef name:__bool__ arg:self arguments arg Return return:yes Call Assign"
  },
  {
    "library": "pytorch",
    "name": "_get_cmake_command",
    "source_code": "@staticmethod\ndef _get_cmake_command() -> str:\n    cmake_command = 'cmake'\n    if IS_WINDOWS:\n        return cmake_command\n    cmake3_version = CMake._get_version(which('cmake3'))\n    cmake_version = CMake._get_version(which('cmake'))\n    _cmake_min_version = LooseVersion('3.18.0')\n    if all((ver is None or ver < _cmake_min_version for ver in [cmake_version, cmake3_version])):\n        raise RuntimeError('no cmake or cmake3 with version >= 3.18.0 found')\n    if cmake3_version is None:\n        cmake_command = 'cmake'\n    elif cmake_version is None:\n        cmake_command = 'cmake3'\n    elif cmake3_version >= cmake_version:\n        cmake_command = 'cmake3'\n    else:\n        cmake_command = 'cmake'\n    return cmake_command",
    "docstring": "Returns cmake command.",
    "type": "method",
    "file_path": "pytorch\\tools\\setup_helpers\\cmake.py",
    "ast_data": "FunctionDef name:_get_cmake_command arguments Assign If Return return:yes Assign Call Call Assign Call Call Assign Call If Call BoolOp Compare Compare Raise Call If Compare Assign If Compare Assign If Compare Assign Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "mean",
    "source_code": "def mean(self, mu=0, lmbda=1, a=1, b=1):\n    invalid, args = self._process_shapes(mu, lmbda, a, b)\n    mu, lmbda, a, b = args\n    invalid |= ~(a > 1)\n    mean_x = np.asarray(mu).copy()\n    mean_s2 = np.asarray(b / (a - 1))\n    mean_x[invalid] = np.nan\n    mean_s2[invalid] = np.nan\n    return (mean_x[()], mean_s2[()])",
    "docstring": "The mean of the distribution. Parameters ---------- mu, lmbda, a, b : array_like, optional Shape parameters. and must be greater than zero, and must be greater than one. Returns ------- x, s2 : ndarray The mean of the distribution.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:mean arg:self arg:mu arg:lmbda arg:a arg:b arguments arg arg arg arg arg Assign Call Assign Compare Assign Call Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "pad_batch",
    "source_code": "def pad_batch(self, *dataset_batch_elements):\n\n    def _pad(batch):\n        padded_dict_batch = {}\n        if isinstance(batch, dict):\n            for key, value in batch.items():\n                padded_dict_batch[key] = _pad(value)\n            return padded_dict_batch\n        rank = len(batch.shape)\n        assert rank > 0\n        missing_count = self.padded_batch_size - self.get_real_batch_size(batch)\n        padding = backend.stack([[0, missing_count]] + [[0, 0]] * (rank - 1))\n        return array_ops.pad(batch, padding, 'constant')\n    if len(dataset_batch_elements) == 1:\n        return _pad(dataset_batch_elements[0])\n    batch_elements = []\n    for batch_element in dataset_batch_elements:\n        batch_elements.append(_pad(batch_element))\n    return tuple(batch_elements)",
    "docstring": "Pads out the batch dimension of a tensor to the complete batch size.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\partial_batch_padding_handler.py",
    "ast_data": "FunctionDef name:pad_batch arg:self arguments arg arg FunctionDef name:_pad arg:batch arguments arg Assign If Call For Call Assign Call Return return:yes Assign Call Compare Assign Call Assign Call Return return:yes Call If Compare Call Return return:yes Call Assign For Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_mean",
    "source_code": "def _mean(self, dim, df, scale):\n    return df * scale",
    "docstring": "Mean of the Wishart distribution. Parameters ---------- dim : int Dimension of the scale matrix %(_doc_default_callparams)s Notes ----- As this function does no argument checking, it should not be called directly; use 'mean' instead.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:_mean arg:self arg:dim arg:df arg:scale arguments arg arg arg arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_first_fill_value_loc",
    "source_code": "def _first_fill_value_loc(self):\n    if len(self) == 0 or self.sp_index.npoints == len(self):\n        return -1\n    indices = self.sp_index.indices\n    if not len(indices) or indices[0] > 0:\n        return 0\n    diff = np.r_[np.diff(indices), 2]\n    return indices[(diff > 1).argmax()] + 1",
    "docstring": "Get the location of the first fill value. Returns ------- int",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\sparse\\array.py",
    "ast_data": "FunctionDef name:_first_fill_value_loc arg:self arguments arg If BoolOp Compare Call Compare Call Return return:yes Assign If BoolOp Call Compare Return return:yes Assign Call Return return:yes Call Compare"
  },
  {
    "library": "sphinx",
    "name": "write_doc",
    "source_code": "def write_doc(self, docname: str, doctree: nodes.document) -> None:\n    self.fix_ids(doctree)\n    self.add_visible_links(doctree, self.config.epub_show_urls)\n    super().write_doc(docname, doctree)",
    "docstring": "Write one document file. This method is overwritten in order to fix fragment identifiers and to add visible external links.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\_epub_base.py",
    "ast_data": "FunctionDef name:write_doc arg:self arg:docname arg:doctree arguments arg arg arg Call Call Call Call"
  },
  {
    "library": "scrapy",
    "name": "get_processed_request",
    "source_code": "def get_processed_request(self, request: Request, response: Response | None) -> Request | None:\n    return request",
    "docstring": "Return a processed request from the spider output. This method is called with a single request from the start seeds or the spider output. It should return the same or a different request, or `~scrapy.Request~scrapy.http.Response`",
    "type": "method",
    "file_path": "scrapy\\scrapy\\spidermiddlewares\\base.py",
    "ast_data": "FunctionDef name:get_processed_request arg:self arg:request arg:response arguments arg arg arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "n_th_moment",
    "source_code": "def n_th_moment(n, a, b):\n    ab = np.asarray([a, b])\n    pA, pB = self._pdf(ab, a, b)\n    probs = np.asarray([pA, -pB])\n    cond = probs != 0\n    moments = [0, 1]\n    for k in range(1, n + 1):\n        vals = xpx.apply_where(cond, (probs, ab), lambda x, y: x * y ** (k - 1), fill_value=0)\n        mk = np.sum(vals) + (k - 1) * moments[-2]\n        moments.append(mk)\n    return moments[-1]",
    "docstring": "Returns n-th moment. Defined only if n >= 0. Function cannot broadcast due to the loop over n",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_continuous_distns.py",
    "ast_data": "FunctionDef name:n_th_moment arg:n arg:a arg:b arguments arg arg arg Assign Call Assign Call Assign Call Assign Compare Assign For Call Assign Call arguments arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "update",
    "source_code": "def update(self):\n    if self.ivars['istep'] == 0:\n        X_norm = float(torch.norm(self.X))\n        iX_norm = X_norm ** (-1)\n        A_norm = float(torch.norm(_utils.matmul(self.A, self.X))) * iX_norm\n        B_norm = float(torch.norm(_utils.matmul(self.B, self.X))) * iX_norm\n        self.fvars['X_norm'] = X_norm\n        self.fvars['A_norm'] = A_norm\n        self.fvars['B_norm'] = B_norm\n        self.ivars['iterations_left'] = self.iparams['niter']\n        self.ivars['converged_count'] = 0\n        self.ivars['converged_end'] = 0\n    if self.method == 'ortho':\n        self._update_ortho()\n    else:\n        self._update_basic()\n    self.ivars['iterations_left'] = self.ivars['iterations_left'] - 1\n    self.ivars['istep'] = self.ivars['istep'] + 1",
    "docstring": "Set and update iteration variables.",
    "type": "method",
    "file_path": "pytorch\\torch\\_lobpcg.py",
    "ast_data": "FunctionDef name:update arg:self arguments arg If Compare Assign Call Call Assign Assign Call Call Call Assign Call Call Call Assign Assign Assign Assign Assign Assign If Compare Call Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "heartbeat_enabled",
    "source_code": "@tf_export('experimental.dtensor.heartbeat_enabled', v1=[])\ndef heartbeat_enabled() -> bool:\n    return os.environ.get(_DT_HEARTBEAT_ENABLED, 'true').lower() in ('true', '1')",
    "docstring": "Returns true if DTensor heartbeat service is enabled.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\config.py",
    "ast_data": "FunctionDef name:heartbeat_enabled arguments Return return:yes Compare Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_validate_X_predict",
    "source_code": "def _validate_X_predict(self, X):\n    check_is_fitted(self)\n    if self.estimators_[0]._support_missing_values(X):\n        ensure_all_finite = 'allow-nan'\n    else:\n        ensure_all_finite = True\n    X = validate_data(self, X, dtype=DTYPE, accept_sparse='csr', reset=False, ensure_all_finite=ensure_all_finite)\n    if issparse(X) and (X.indices.dtype != np.intc or X.indptr.dtype != np.intc):\n        raise ValueError('No support for np.int64 index based sparse matrices')\n    return X",
    "docstring": "Validate X whenever one tries to predict, apply, predict_proba.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_forest.py",
    "ast_data": "FunctionDef name:_validate_X_predict arg:self arg:X arguments arg arg Call If Call Assign Assign Assign Call If BoolOp Call BoolOp Compare Compare Raise Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "close",
    "source_code": "def close(self, reason: str) -> Deferred[None] | None:\n    if self.dqs is not None:\n        state = self.dqs.close()\n        assert isinstance(self.dqdir, str)\n        self._write_dqs_state(self.dqdir, state)\n    return self.df.close(reason)",
    "docstring": "(1) dump pending requests to disk if there is a disk queue (2) return the result of the dupefilter's `` method",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\scheduler.py",
    "ast_data": "FunctionDef name:close arg:self arg:reason arguments arg arg If Compare Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "pygame",
    "name": "array_red",
    "source_code": "def array_red(surface):\n    size = surface.get_size()\n    array = numpy.empty(size, numpy.uint8)\n    surface_to_array(array, surface, 'R')\n    return array",
    "docstring": "pygame.surfarray.array_red(Surface): return array copy pixel red into a 2d array Copy the pixel red values from a Surface into a 2D array. This will work for any type of Surface format. This function will temporarily lock the Surface as pixels are copied (see the Surface.lock - lock the Surface memory for pixel access method).",
    "type": "function",
    "file_path": "pygame\\src_py\\surfarray.py",
    "ast_data": "FunctionDef name:array_red arg:surface arguments arg Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_score",
    "source_code": "def _score(self, method_caller, estimator, X, y_true, **kwargs):\n    pos_label = self._get_pos_label()\n    y_score = method_caller(estimator, self._response_method, X, pos_label=pos_label)\n    scoring_kwargs = {**self._kwargs, **kwargs}\n    if isinstance(self._thresholds, Integral):\n        potential_thresholds = np.linspace(np.min(y_score), np.max(y_score), self._thresholds)\n    else:\n        potential_thresholds = np.asarray(self._thresholds)\n    score_thresholds = [self._sign * self._score_func(y_true, _threshold_scores_to_class_labels(y_score, th, estimator.classes_, pos_label), **scoring_kwargs) for th in potential_thresholds]\n    return (np.array(score_thresholds), potential_thresholds)",
    "docstring": "Evaluate predicted target values for X relative to y_true. Parameters ---------- method_caller : callable Returns predictions given an estimator, method name, and other arguments, potentially caching results. estimator : object Trained estimator to use for scoring. X : {array-like, sparse matrix} of shape (n_samples, n_features) Test data that will be fed to estimator.predict. y_true : array-like of shape (n_samples,) Gold standard target values for X. **kwargs : dict Other parameters passed to the scorer. Refer to :func: for more details. Returns ------- scores : ndarray of shape (thresholds,) The scores associated to each threshold. potential_thresholds : ndarray of shape (thresholds,) The potential thresholds used to compute the scores.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\metrics\\_scorer.py",
    "ast_data": "FunctionDef name:_score arg:self arg:method_caller arg:estimator arg:X arg:y_true arguments arg arg arg arg arg arg Assign Call Assign Call Assign If Call Assign Call Call Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "x",
    "source_code": "@x.setter\ndef x(self, value):\n    self._cs.setOrdinate(0, 0, value)",
    "docstring": "Set the X component of the Point.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\point.py",
    "ast_data": "FunctionDef name:x arg:self arg:value arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_from_pydict",
    "source_code": "@classmethod\ndef _from_pydict(cls, pyval, typespec, path_so_far):\n    if typespec is None:\n        fields = dict(((k, cls._from_pyval(v, None, path_so_far + (k,))) for k, v in pyval.items()))\n    else:\n        spec_shape = typespec._shape\n        field_specs = typespec._field_specs\n        if not (isinstance(typespec, StructuredTensor.Spec) and spec_shape.rank == 0 and (set(pyval) == set(field_specs))):\n            raise ValueError('Value at %r does not match typespec: %r vs %r' % (path_so_far, pyval, typespec))\n        fields = dict(((k, cls._from_pyval(v, field_specs[k], path_so_far + (k,))) for k, v in pyval.items()))\n    return StructuredTensor.from_fields(fields=fields, shape=(), validate=False)",
    "docstring": "Converts python dictionary to a StructuredTensor with rank=0.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor.py",
    "ast_data": "FunctionDef name:_from_pydict arg:cls arg:pyval arg:typespec arg:path_so_far arguments arg arg arg arg If Compare Assign Call Call Call Assign Assign If BoolOp Call Compare Compare Call Call Raise Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "check_arguments",
    "source_code": "def check_arguments(fun, y0, support_complex):\n    y0 = np.asarray(y0)\n    if np.issubdtype(y0.dtype, np.complexfloating):\n        if not support_complex:\n            raise ValueError('`y0` is complex, but the chosen solver does not support integration in a complex domain.')\n        dtype = complex\n    else:\n        dtype = float\n    y0 = y0.astype(dtype, copy=False)\n    if y0.ndim != 1:\n        raise ValueError('`y0` must be 1-dimensional.')\n    if not np.isfinite(y0).all():\n        raise ValueError('All components of the initial state `y0` must be finite.')\n\n    def fun_wrapped(t, y):\n        return np.asarray(fun(t, y), dtype=dtype)\n    return (fun_wrapped, y0)",
    "docstring": "Helper function for checking arguments common to all solvers.",
    "type": "function",
    "file_path": "scipy\\scipy\\integrate\\_ivp\\base.py",
    "ast_data": "FunctionDef name:check_arguments arg:fun arg:y0 arg:support_complex arguments arg arg arg Assign Call If Call If Raise Call Assign Assign Assign Call If Compare Raise Call If Call Call Raise Call FunctionDef name:fun_wrapped arg:t arg:y arguments arg arg Return return:yes Call Call Return return:yes"
  },
  {
    "library": "pygame",
    "name": "print_",
    "source_code": "def print_(*args, **kwds):\n    msysio.print_(*args, **kwds)",
    "docstring": "Similar to the Python 3.0 print function",
    "type": "function",
    "file_path": "pygame\\buildconfig\\config.py",
    "ast_data": "FunctionDef name:print_ arguments arg arg Call"
  },
  {
    "library": "numpy",
    "name": "expandtabs",
    "source_code": "@set_module('numpy.strings')\n@array_function_dispatch(_expandtabs_dispatcher)\ndef expandtabs(a, tabsize=8):\n    a = np.asanyarray(a)\n    tabsize = np.asanyarray(tabsize)\n    if a.dtype.char == 'T':\n        return _expandtabs(a, tabsize)\n    buffersizes = _expandtabs_length(a, tabsize)\n    out_dtype = f'{a.dtype.char}{buffersizes.max()}'\n    out = np.empty_like(a, shape=buffersizes.shape, dtype=out_dtype)\n    return _expandtabs(a, tabsize, out=out)",
    "docstring": "Return a copy of each string element where all tab characters are replaced by one or more spaces. Calls :meth: element-wise. Return a copy of each string element where all tab characters are replaced by one or more spaces, depending on the current column and the given . The column number is reset to zero after each newline occurring in the string. This doesn't understand other non-printing characters or escape sequences. Parameters ---------- a : array-like, with `tabsize` dtype, depending on input type See Also -------- str.expandtabs Examples -------- >>> import numpy as np >>> a = np.array([' Hello world']) >>> np.strings.expandtabs(a, tabsize=4) # doctest: +SKIP array([' Hello world'], dtype='<U21') # doctest: +SKIP",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\strings.py",
    "ast_data": "FunctionDef name:expandtabs arg:a arg:tabsize arguments arg arg Assign Call Assign Call If Compare Return return:yes Call Assign Call Assign Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "kornia",
    "name": "describe",
    "source_code": "@torch.inference_mode()\ndef describe(self, images: Tensor, keypoints: Optional[Tensor]=None, apply_imagenet_normalization: bool=True) -> Tensor:\n    KORNIA_CHECK_SHAPE(images, ['B', '3', 'H', 'W'])\n    B, C, H, W = images.shape\n    if keypoints is not None:\n        KORNIA_CHECK_SHAPE(keypoints, ['B', 'N', '2'])\n    if apply_imagenet_normalization:\n        images = self.normalizer(images)\n    self.train(False)\n    descriptions = self.descriptor.forward(images)\n    if keypoints is not None:\n        described_keypoints = F.grid_sample(descriptions.float(), keypoints[:, None], mode='bilinear', align_corners=False)[:, :, 0].mT\n        return described_keypoints\n    return descriptions",
    "docstring": "Describe keypoints in the input images. If keypoints are not provided, returns the dense descriptors. Args: images: A tensor of shape :math: containing the input images. keypoints: An optional tensor of shape :math: containing the detected keypoints. apply_imagenet_normalization: Whether to apply ImageNet normalization to the input images. Returns: descriptions: A tensor of shape :math: containing the descriptions of the detected keypoints. If the dense descriptors are requested, the shape is :math:.",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\dedode\\dedode.py",
    "ast_data": "FunctionDef name:describe arg:self arg:images arg:keypoints arg:apply_imagenet_normalization arguments arg arg arg arg Call Assign If Compare Call If Assign Call Call Assign Call If Compare Assign Call Call Return return:yes Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "Metropolis",
    "source_code": "class Metropolis:\n\n    def __init__(self, T, rng=None):\n        self.beta = 1.0 / T if T != 0 else float('inf')\n        self.rng = check_random_state(rng)\n\n    def accept_reject(self, res_new, res_old):\n        with np.errstate(invalid='ignore'):\n            prod = -(res_new.fun - res_old.fun) * self.beta\n            w = math.exp(min(0, prod))\n        rand = self.rng.uniform()\n        return w >= rand and (res_new.success or not res_old.success)\n\n    def __call__(self, *, res_new, res_old):\n        return bool(self.accept_reject(res_new, res_old))",
    "docstring": "Metropolis acceptance criterion. Parameters ---------- T : float The \"temperature\" parameter for the accept or reject criterion. rng : {None, int, }, optional Random number generator used for acceptance test.",
    "type": "class",
    "file_path": "scipy\\scipy\\optimize\\_basinhopping.py",
    "ast_data": "ClassDef name:Metropolis FunctionDef name:__init__ arg:self arg:T arg:rng arguments arg arg arg Assign Compare Call Assign Call FunctionDef name:accept_reject arg:self arg:res_new arg:res_old arguments arg arg arg With Call Assign Assign Call Call Assign Call Return return:yes BoolOp Compare BoolOp FunctionDef name:__call__ arg:self arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "PrettyDict",
    "source_code": "class PrettyDict(dict[_KT, _VT]):\n\n    def __repr__(self) -> str:\n        return pprint_thing(self)",
    "docstring": "Dict extension to support abbreviated __repr__",
    "type": "class",
    "file_path": "pandas\\pandas\\io\\formats\\printing.py",
    "ast_data": "ClassDef name:PrettyDict FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "OpsSet",
    "source_code": "@_tf_export('lite.OpsSet')\nclass OpsSet(enum.Enum):\n    TFLITE_BUILTINS = 'TFLITE_BUILTINS'\n    SELECT_TF_OPS = 'SELECT_TF_OPS'\n    TFLITE_BUILTINS_INT8 = 'TFLITE_BUILTINS_INT8'\n    EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 = 'EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8'\n    EXPERIMENTAL_STABLEHLO_OPS = 'EXPERIMENTAL_STABLEHLO_OPS'\n\n    def __str__(self):\n        return str(self.value)\n\n    @staticmethod\n    def get_options():\n        return [str(option) for option in list(OpsSet)]",
    "docstring": "Enum class defining the sets of ops available to generate TFLite models. WARNING: Experimental interface, subject to change.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\convert.py",
    "ast_data": "ClassDef name:OpsSet Assign Assign Assign Assign Assign FunctionDef name:__str__ arg:self arguments arg Return return:yes Call FunctionDef name:get_options arguments Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_get_coordinates_of_block",
    "source_code": "def _get_coordinates_of_block(x, y, width, height, angle=0):\n    vertices = _calculate_quad_point_coordinates(x, y, width, height, angle)\n    pad = 1e-05 if angle % 90 else 0\n    min_x = min((v[0] for v in vertices)) - pad\n    min_y = min((v[1] for v in vertices)) - pad\n    max_x = max((v[0] for v in vertices)) + pad\n    max_y = max((v[1] for v in vertices)) + pad\n    return (tuple(itertools.chain.from_iterable(vertices)), (min_x, min_y, max_x, max_y))",
    "docstring": "Get the coordinates of rotated rectangle and rectangle that covers the rotated rectangle.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py",
    "ast_data": "FunctionDef name:_get_coordinates_of_block arg:x arg:y arg:width arg:height arg:angle arguments arg arg arg arg arg Assign Call Assign Assign Call Assign Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "month_name",
    "source_code": "def month_name(self, locale=None) -> npt.NDArray[np.object_]:\n    values = self._local_timestamps()\n    result = fields.get_date_name_field(values, 'month_name', locale=locale, reso=self._creso)\n    result = self._maybe_mask_results(result, fill_value=None)\n    if using_string_dtype():\n        from pandas import StringDtype, array as pd_array\n        return pd_array(result, dtype=StringDtype(na_value=np.nan))\n    return result",
    "docstring": "Return the month names with specified locale. Parameters ---------- locale : str, optional Locale determining the language in which to return the month name. Default is English locale (`` will return month names in Brazilian Portuguese language. >>> idx = pd.date_range(start=\"2018-01\", freq=\"ME\", periods=3) >>> idx DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'], dtype='datetime64[ns]', freq='ME') >>> idx.month_name(locale=\"pt_BR.utf8\") # doctest: +SKIP Index(['Janeiro', 'Fevereiro', 'Março'], dtype='object')",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimes.py",
    "ast_data": "FunctionDef name:month_name arg:self arg:locale arguments arg arg Assign Call Assign Call Assign Call If Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_ensure_commit",
    "source_code": "def _ensure_commit(git_sha1: str) -> None:\n    cmd = git('cat-file', '-e', git_sha1 + '^{commit}')\n    p = subprocess.run(cmd, capture_output=True, check=False)\n    if p.returncode == 0:\n        return\n    cmd = git('fetch', GITHUB_REMOTE_URL, git_sha1)\n    subprocess.check_call(cmd)",
    "docstring": "Make sure that we actually have the commit locally",
    "type": "function",
    "file_path": "pytorch\\tools\\nightly.py",
    "ast_data": "FunctionDef name:_ensure_commit arg:git_sha1 arguments arg Assign Call Assign Call If Compare Return return:no Assign Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "setdiff1d",
    "source_code": "def setdiff1d(x1: Array | complex, x2: Array | complex, /, *, assume_unique: bool=False, xp: ModuleType | None=None) -> Array:\n    if xp is None:\n        xp = array_namespace(x1, x2)\n    x1_, x2_ = asarrays(x1, x2, xp=xp)\n    if assume_unique:\n        x1_ = xp.reshape(x1_, (-1,))\n        x2_ = xp.reshape(x2_, (-1,))\n    else:\n        x1_ = xp.unique_values(x1_)\n        x2_ = xp.unique_values(x2_)\n    return x1_[_helpers.in1d(x1_, x2_, assume_unique=True, invert=True, xp=xp)]",
    "docstring": "Find the set difference of two arrays. Return the unique values in that are not in . Parameters ---------- x1 : array | int | float | complex | bool Input array. x2 : array Input comparison array. assume_unique : bool If `x1x2x1x2assume_unique`, but otherwise only sorted if the input is sorted. Examples -------- >>> import array_api_strict as xp >>> import array_api_extra as xpx >>> x1 = xp.asarray([1, 2, 3, 2, 4, 1]) >>> x2 = xp.asarray([3, 4, 5, 6]) >>> xpx.setdiff1d(x1, x2, xp=xp) Array([1, 2], dtype=array_api_strict.int64)",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_extra\\_lib\\_funcs.py",
    "ast_data": "FunctionDef name:setdiff1d arguments arg arg arg arg If Compare Assign Call Assign Call If Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "Wedge",
    "source_code": "@_register_style(_style_list)\nclass Wedge(_Base):\n\n    def __init__(self, tail_width=0.3, shrink_factor=0.5):\n        self.tail_width = tail_width\n        self.shrink_factor = shrink_factor\n        super().__init__()\n\n    def transmute(self, path, mutation_size, linewidth):\n        x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)\n        arrow_path = [(x0, y0), (x1, y1), (x2, y2)]\n        b_plus, b_minus = make_wedged_bezier2(arrow_path, self.tail_width * mutation_size / 2.0, wm=self.shrink_factor)\n        patch_path = [(Path.MOVETO, b_plus[0]), (Path.CURVE3, b_plus[1]), (Path.CURVE3, b_plus[2]), (Path.LINETO, b_minus[2]), (Path.CURVE3, b_minus[1]), (Path.CURVE3, b_minus[0]), (Path.CLOSEPOLY, b_minus[0])]\n        path = Path([p for c, p in patch_path], [c for c, p in patch_path])\n        return (path, True)",
    "docstring": "Wedge(?) shape. Only works with a quadratic Bézier curve. The start point has a width of the *tail_width* and the end point has a width of 0. At the middle, the width is *shrink_factor*x*tail_width*.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "ClassDef name:Wedge FunctionDef name:__init__ arg:self arg:tail_width arg:shrink_factor arguments arg arg arg Assign Assign Call Call FunctionDef name:transmute arg:self arg:path arg:mutation_size arg:linewidth arguments arg arg arg arg Assign Call Assign Assign Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_fields",
    "source_code": "def get_fields(self, include_parents=True, include_hidden=False):\n    if include_parents is False:\n        include_parents = PROXY_PARENTS\n    return self._get_fields(include_parents=include_parents, include_hidden=include_hidden)",
    "docstring": "Return a list of fields associated to the model. By default, include forward and reverse fields, fields derived from inheritance, but not hidden fields. The returned fields can be changed using the parameters: - include_parents: include fields derived from inheritance - include_hidden: include fields that have a related_name that starts with a \"+\"",
    "type": "method",
    "file_path": "django\\django\\db\\models\\options.py",
    "ast_data": "FunctionDef name:get_fields arg:self arg:include_parents arg:include_hidden arguments arg arg arg If Compare Assign Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_solve_eigen_covariance",
    "source_code": "def _solve_eigen_covariance(self, alpha, y, sqrt_sw, X_mean, eigvals, V, X):\n    if self.fit_intercept:\n        return self._solve_eigen_covariance_intercept(alpha, y, sqrt_sw, X_mean, eigvals, V, X)\n    return self._solve_eigen_covariance_no_intercept(alpha, y, sqrt_sw, X_mean, eigvals, V, X)",
    "docstring": "Compute dual coefficients and diagonal of G^-1. Used when we have a decomposition of X^T.X (n_samples > n_features and X is sparse).",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_ridge.py",
    "ast_data": "FunctionDef name:_solve_eigen_covariance arg:self arg:alpha arg:y arg:sqrt_sw arg:X_mean arg:eigvals arg:V arg:X arguments arg arg arg arg arg arg arg arg If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "is_valid_na_for_dtype",
    "source_code": "def is_valid_na_for_dtype(obj, dtype: DtypeObj) -> bool:\n    if not lib.is_scalar(obj) or not isna(obj):\n        return False\n    elif dtype.kind == 'M':\n        if isinstance(dtype, np.dtype):\n            return not isinstance(obj, (np.timedelta64, Decimal))\n        return not isinstance(obj, (np.timedelta64, np.datetime64, Decimal))\n    elif dtype.kind == 'm':\n        return not isinstance(obj, (np.datetime64, Decimal))\n    elif dtype.kind in 'iufc':\n        return obj is not NaT and (not isinstance(obj, (np.datetime64, np.timedelta64)))\n    elif dtype.kind == 'b':\n        return lib.is_float(obj) or obj is None or obj is libmissing.NA\n    elif dtype == _dtype_str:\n        return not isinstance(obj, (np.datetime64, np.timedelta64, Decimal, float))\n    elif dtype == _dtype_object:\n        return True\n    elif isinstance(dtype, PeriodDtype):\n        return not isinstance(obj, (np.datetime64, np.timedelta64, Decimal))\n    elif isinstance(dtype, IntervalDtype):\n        return lib.is_float(obj) or obj is None or obj is libmissing.NA\n    elif isinstance(dtype, CategoricalDtype):\n        return is_valid_na_for_dtype(obj, dtype.categories.dtype)\n    return not isinstance(obj, (np.datetime64, np.timedelta64, Decimal))",
    "docstring": "isna check that excludes incompatible dtypes Parameters ---------- obj : object dtype : np.datetime64, np.timedelta64, DatetimeTZDtype, or PeriodDtype Returns ------- bool",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\missing.py",
    "ast_data": "FunctionDef name:is_valid_na_for_dtype arg:obj arg:dtype arguments arg arg If BoolOp Call Call Return return:yes If Compare If Call Return return:yes Call Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes BoolOp Compare Call If Compare Return return:yes BoolOp Call Compare Compare If Compare Return return:yes Call If Compare Return return:yes If Call Return return:yes Call If Call Return return:yes BoolOp Call Compare Compare If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "ClosedFileError",
    "source_code": "class ClosedFileError(Exception):\n    pass",
    "docstring": "Exception is raised when trying to perform an operation on a closed HDFStore file. `` objects. Once an HDFStore is closed, its resources are no longer available, and any further attempt to access data or perform file operations will raise this exception. See Also -------- HDFStore.close : Closes the PyTables file handle. HDFStore.open : Opens the file in the specified mode. HDFStore.is_open : Returns a boolean indicating whether the file is open. Examples -------- >>> store = pd.HDFStore(\"my-store\", \"a\") # doctest: +SKIP >>> store.close() # doctest: +SKIP >>> store.keys() # doctest: +SKIP ... # ClosedFileError: my-store file is not open!",
    "type": "class",
    "file_path": "pandas\\pandas\\errors\\__init__.py",
    "ast_data": "ClassDef name:ClosedFileError"
  },
  {
    "library": "pytorch",
    "name": "_swap_ff_with_fxff",
    "source_code": "def _swap_ff_with_fxff(model: torch.nn.Module) -> None:\n    modules_to_swap = []\n    for name, module in model.named_children():\n        if isinstance(module, torch.ao.nn.quantized.FloatFunctional):\n            modules_to_swap.append(name)\n        else:\n            _swap_ff_with_fxff(module)\n    for name in modules_to_swap:\n        del model._modules[name]\n        model._modules[name] = torch.ao.nn.quantized.FXFloatFunctional()",
    "docstring": "Swap FloatFunctional with FXFloatFunctional",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantize_fx.py",
    "ast_data": "FunctionDef name:_swap_ff_with_fxff arg:model arguments arg Assign For Call If Call Call Call For Assign Call"
  },
  {
    "library": "sphinx",
    "name": "get_translation",
    "source_code": "def get_translation(catalog: str, namespace: str='general') -> Callable[[str], str]:\n\n    def gettext(message: str) -> str:\n        if not is_translator_registered(catalog, namespace):\n            return _TranslationProxy(catalog, namespace, message)\n        else:\n            translator = get_translator(catalog, namespace)\n            return translator.gettext(message)\n    return gettext",
    "docstring": "Get a translation function based on the *catalog* and *namespace*. The extension can use this API to translate the messages on the extension:: from pathlib import Path from sphinx.locale import get_translation MESSAGE_CATALOG_NAME = 'myextension' # name of *.pot, *.po and *.mo files _ = get_translation(MESSAGE_CATALOG_NAME) text = _('Hello Sphinx!') def setup(app): package_dir = Path(__file__).resolve().parent locale_dir = package_dir / 'locales' app.add_message_catalog(MESSAGE_CATALOG_NAME, locale_dir) With this code, sphinx searches a message catalog from `language` is used for the searching. .. versionadded:: 1.8",
    "type": "function",
    "file_path": "sphinx\\sphinx\\locale\\__init__.py",
    "ast_data": "FunctionDef name:get_translation arg:catalog arg:namespace arguments arg arg FunctionDef name:gettext arg:message arguments arg If Call Return return:yes Call Assign Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_scatter_fold_block_mask",
    "source_code": "def _scatter_fold_block_mask(self, output_shape, dim, indices, block_shape, mask=None, input_shape=None, device=None):\n    if mask is None:\n        assert input_shape is not None\n        mask = torch.ones(input_shape, device=device)\n    mask.scatter_(dim=dim, index=indices, value=0)\n    mask.data = F.fold(mask, output_size=output_shape, kernel_size=block_shape, stride=block_shape)\n    return mask",
    "docstring": "Creates patches of size after scattering the indices.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\pruning\\sparsifier\\weight_norm_sparsifier.py",
    "ast_data": "FunctionDef name:_scatter_fold_block_mask arg:self arg:output_shape arg:dim arg:indices arg:block_shape arg:mask arg:input_shape arg:device arguments arg arg arg arg arg arg arg arg If Compare Compare Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "seed",
    "source_code": "def seed() -> None:\n\n    def cb():\n        idx = current_device()\n        default_generator = torch.cuda.default_generators[idx]\n        default_generator.seed()\n    _lazy_call(cb)",
    "docstring": "Set the seed for generating random numbers to a random number for the current GPU. It's safe to call this function if CUDA is not available; in that case, it is silently ignored. .. warning:: If you are working with a multi-GPU model, this function will only initialize the seed on one GPU. To initialize all GPUs, use :func:.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\random.py",
    "ast_data": "FunctionDef name:seed arguments FunctionDef name:cb arguments Assign Call Assign Call Call"
  },
  {
    "library": "django",
    "name": "_create_missing_fk_index",
    "source_code": "def _create_missing_fk_index(self, model, *, fields, expressions=None):\n    first_field_name = None\n    if fields:\n        first_field_name = fields[0]\n    elif expressions and self.connection.features.supports_expression_indexes and isinstance(expressions[0], F) and (LOOKUP_SEP not in expressions[0].name):\n        first_field_name = expressions[0].name\n    if not first_field_name:\n        return\n    first_field = model._meta.get_field(first_field_name)\n    if first_field.get_internal_type() == 'ForeignKey':\n        column = self.connection.introspection.identifier_converter(first_field.column)\n        with self.connection.cursor() as cursor:\n            constraint_names = [name for name, infodict in self.connection.introspection.get_constraints(cursor, model._meta.db_table).items() if infodict['index'] and infodict['columns'][0] == column]\n        if len(constraint_names) == 1:\n            self.execute(self._create_index_sql(model, fields=[first_field], suffix=''))",
    "docstring": "MySQL can remove an implicit FK index on a field when that field is covered by another index like a unique_together. \"covered\" here means that the more complex index has the FK field as its first field (see Manually create an implicit FK index to make it possible to remove the composed index.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\mysql\\schema.py",
    "ast_data": "FunctionDef name:_create_missing_fk_index arg:self arg:model arguments arg arg arg arg Assign If Assign If BoolOp Call Compare Assign If Return return:no Assign Call If Compare Call Assign Call With Call Assign Call Call BoolOp Compare If Compare Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "init_cellvars",
    "source_code": "def init_cellvars(parent, result: dict[str, VariableTracker], code):\n    side_effects = parent.output.side_effects\n    for name in code.co_cellvars:\n        new_cell = side_effects.track_cell_new()\n        if name in result:\n            side_effects.store_cell(new_cell, result.pop(name))\n        result[name] = new_cell",
    "docstring": "Update to add mapping from local name to new cells created directly by , or update SideEffects in if the a local cell is already in (cell argument).",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\functions.py",
    "ast_data": "FunctionDef name:init_cellvars arg:parent arg:result arg:code arguments arg arg arg Assign For Assign Call If Compare Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_lookup_op_type",
    "source_code": "def _lookup_op_type(self, graph_id, op_name):\n    return self._graph_by_id[graph_id].get_op_creation_digest(op_name).op_type",
    "docstring": "Lookup the type of an op by name and the immediately enclosing graph. Args: graph_id: Debugger-generated ID of the immediately-enclosing graph. op_name: Name of the op. Returns: Op type as a str.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py",
    "ast_data": "FunctionDef name:_lookup_op_type arg:self arg:graph_id arg:op_name arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "assert_like_rnncell",
    "source_code": "def assert_like_rnncell(cell_name, cell):\n    conditions = [_hasattr(cell, 'output_size'), _hasattr(cell, 'state_size'), _hasattr(cell, 'get_initial_state') or _hasattr(cell, 'zero_state'), callable(cell)]\n    errors = [\"'output_size' property is missing\", \"'state_size' property is missing\", \"either 'zero_state' or 'get_initial_state' method is required\", 'is not callable']\n    if not all(conditions):\n        errors = [error for error, cond in zip(errors, conditions) if not cond]\n        raise TypeError('The argument {!r} ({}) is not an RNNCell: {}.'.format(cell_name, cell, ', '.join(errors)))",
    "docstring": "Raises a TypeError if cell is not like an RNNCell. NOTE: Do not rely on the error message (in particular in tests) which can be subject to change to increase readability. Use ASSERT_LIKE_RNNCELL_ERROR_REGEXP. Args: cell_name: A string to give a meaningful error referencing to the name of the functionargument. cell: The object which should behave like an RNNCell. Raises: TypeError: A human-friendly exception.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\rnn_cell_impl.py",
    "ast_data": "FunctionDef name:assert_like_rnncell arg:cell_name arg:cell arguments arg arg Assign Call Call BoolOp Call Call Call Assign If Call Assign Call Raise Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "to_variant",
    "source_code": "@tf_export('data.experimental.to_variant')\ndef to_variant(dataset: DatasetV2):\n    return dataset._variant_tensor",
    "docstring": "Returns a variant representing the given dataset. Args: dataset: A . Returns: A scalar tensor representing the given dataset.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:to_variant arg:dataset arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "bundle_randn",
    "source_code": "def bundle_randn(*size, dtype=None):\n    stub = torch.zeros(1, dtype=dtype).expand(*size)\n    return InflatableArg(value=stub, fmt='torch.randn_like({})')",
    "docstring": "Generate a tensor that will be inflated with torch.randn.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\bundled_inputs.py",
    "ast_data": "FunctionDef name:bundle_randn arguments arg arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "create_rng_state",
    "source_code": "@tf_export('random.create_rng_state', 'random.experimental.create_rng_state')\ndef create_rng_state(seed, alg):\n    alg = random_ops_util.convert_alg_to_int(alg)\n    return _make_state_from_seed(seed, alg)",
    "docstring": "Creates a RNG state from an integer or a vector. Example: >>> tf.random.create_rng_state( ... 1234, \"philox\") >>> tf.random.create_rng_state( ... [12, 34], \"threefry\") Args: seed: an integer or 1-D numpy array. alg: the RNG algorithm. Can be a string, an or an integer. Returns: a 1-D numpy array whose size depends on the algorithm.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\stateful_random_ops.py",
    "ast_data": "FunctionDef name:create_rng_state arg:seed arg:alg arguments arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "run",
    "source_code": "def run(fn=None):\n    if fn is not None:\n        fn = innermost_fn(fn)\n        assert callable(fn)\n        return RunOnlyContext()(fn)\n    return RunOnlyContext()",
    "docstring": "Don't do any dynamic compiles, just use prior optimizations",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\decorators.py",
    "ast_data": "FunctionDef name:run arg:fn arguments arg If Compare Assign Call Call Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_build_map",
    "source_code": "def _build_map(outputs):\n    finished_nodes = set()\n    nodes_in_progress = set()\n    nodes_in_decreasing_depth = []\n    layer_indices = {}\n    for output in nest.flatten(outputs):\n        _build_map_helper(output, finished_nodes, nodes_in_progress, nodes_in_decreasing_depth, layer_indices)\n    return (nodes_in_decreasing_depth, layer_indices)",
    "docstring": "This method topologically sorts nodes in order from inputs to outputs. It uses a depth-first search to topologically sort nodes that appear in the _keras_history connectivity metadata of . Args: outputs: the output tensors whose _keras_history metadata should be walked. This may be an arbitrary nested structure. Returns: A tuple like (ordered_nodes, layer_to_first_traversal_index) ordered_nodes: list of nodes appearing in the keras history, topologically sorted from original inputs to the . (If outputs have different sets of ancestors, the inputs to one output may appear after a different output). layer_to_first_traversal_index: A dict mapping layer to the traversal index in the DFS where it is seen. Note: if a layer is shared by several nodes, the dict will only store the index corresponding to the *first* time the layer seen.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\functional.py",
    "ast_data": "FunctionDef name:_build_map arg:outputs arguments arg Assign Call Assign Call Assign Assign For Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_generate_scoped_tracing_options",
    "source_code": "def _generate_scoped_tracing_options(self, scope, scope_type):\n    weak_wrapped_fn = None\n    compile_with_xla = self._jit_compile\n\n    def wrapped_fn(*args, **kwds):\n        default_graph = ops.get_default_graph()\n        with default_graph._variable_creator_scope(scope, priority=50):\n            with OptionalXlaContext(compile_with_xla):\n                out = weak_wrapped_fn().__wrapped__(*args, **kwds)\n            return out\n    weak_wrapped_fn = weakref.ref(wrapped_fn)\n    return self._generate_tracing_options(tf_decorator.make_decorator(self._python_function, wrapped_fn), scope_type)",
    "docstring": "Creates TracingOptions for variable creator scopes.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\polymorphic_function.py",
    "ast_data": "FunctionDef name:_generate_scoped_tracing_options arg:self arg:scope arg:scope_type arguments arg arg arg Assign Assign FunctionDef name:wrapped_fn arguments arg arg Assign Call With Call With Call Assign Call Call Return return:yes Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_annotate_output_for_int8_in_int8_out_pattern",
    "source_code": "def _annotate_output_for_int8_in_int8_out_pattern(self, node: Node) -> None:\n    edge_or_node: tuple[Node, Node]\n    if node.target in int8_in_int8_out_ops and _is_any_annotated([node]):\n        if node.target == torch.ops.aten.max_pool2d.default:\n            maxpool_node = node\n            if not _is_all_annotated([maxpool_node]):\n                return\n            maxpool_node_quantization_annotation = maxpool_node.meta[QUANT_ANNOTATION_KEY] if QUANT_ANNOTATION_KEY in maxpool_node.meta else None\n            if maxpool_node_quantization_annotation and maxpool_node_quantization_annotation._is_output_of_quantized_pattern:\n                input_act = maxpool_node.args[0]\n                assert isinstance(input_act, Node)\n                assert isinstance(maxpool_node, Node)\n                edge_or_node = (input_act, maxpool_node)\n                maxpool_node_quantization_annotation.output_qspec = SharedQuantizationSpec(edge_or_node)\n        else:\n            input_node = node.all_input_nodes[0]\n            self._annotate_output_share_observer_as_input(input_node, node)\n    return",
    "docstring": "Check and insert observer at output of node in int8_in_int8_out_ops if needed. Recipe refers to",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\x86_inductor_quantizer.py",
    "ast_data": "FunctionDef name:_annotate_output_for_int8_in_int8_out_pattern arg:self arg:node arguments arg arg If BoolOp Compare Call If Compare Assign If Call Return return:no Assign Compare If BoolOp Assign Call Call Assign Assign Call Assign Call Return return:no"
  },
  {
    "library": "pytorch",
    "name": "PrependParamsAndBuffersAotAutogradOutputStep",
    "source_code": "class PrependParamsAndBuffersAotAutogradOutputStep(OutputAdaptStep):\n\n    def apply(self, model_outputs: Any, model: torch.nn.Module | Callable | torch_export.ExportedProgram | None=None) -> Sequence[Any]:\n        assert isinstance(model, torch_export.ExportedProgram), \"'model' must be torch_export.ExportedProgram\"\n        ordered_buffers = tuple((model.state_dict[name] if name in model.state_dict else model.constants[name] for name in model.graph_signature.buffers_to_mutate.values()))\n        updated_outputs = (*ordered_buffers, *model_outputs)\n        return updated_outputs",
    "docstring": "Prepend model's mutated buffers to the user output. :func: lifts model's mutated buffers as outputs, thus, they must be added to the user output after the model is executed. Args: model: The PyTorch model with mutated buffers.",
    "type": "class",
    "file_path": "pytorch\\torch\\onnx\\_internal\\io_adapter.py",
    "ast_data": "ClassDef name:PrependParamsAndBuffersAotAutogradOutputStep FunctionDef name:apply arg:self arg:model_outputs arg:model arguments arg arg arg Call Assign Call Compare Call Assign Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "is_generator_with_return_value",
    "source_code": "def is_generator_with_return_value(callable: Callable[..., Any]) -> bool:\n    if callable in _generator_callbacks_cache:\n        return bool(_generator_callbacks_cache[callable])\n\n    def returns_none(return_node: ast.Return) -> bool:\n        value = return_node.value\n        return value is None or (isinstance(value, ast.Constant) and value.value is None)\n    if inspect.isgeneratorfunction(callable):\n        func = callable\n        while isinstance(func, partial):\n            func = func.func\n        src = inspect.getsource(func)\n        pattern = re.compile('(^[\\\\t ]+)')\n        code = pattern.sub('', src)\n        match = pattern.match(src)\n        if match:\n            code = re.sub(f'\\n{match.group(0)}', '\\n', code)\n        tree = ast.parse(code)\n        for node in walk_callable(tree):\n            if isinstance(node, ast.Return) and (not returns_none(node)):\n                _generator_callbacks_cache[callable] = True\n                return bool(_generator_callbacks_cache[callable])\n    _generator_callbacks_cache[callable] = False\n    return bool(_generator_callbacks_cache[callable])",
    "docstring": "Returns True if a callable is a generator function which includes a 'return' statement with a value different than None, False otherwise",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\misc.py",
    "ast_data": "FunctionDef name:is_generator_with_return_value arg:callable arguments arg If Compare Return return:yes Call FunctionDef name:returns_none arg:return_node arguments arg Assign Return return:yes BoolOp Compare BoolOp Call Compare If Call Assign While Call Assign Assign Call Assign Call Assign Call Assign Call If Assign Call Call Assign Call For Call If BoolOp Call Call Assign Return return:yes Call Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_convert_units",
    "source_code": "def _convert_units(self):\n    x0 = self.convert_xunits(self._x0)\n    y0 = self.convert_yunits(self._y0)\n    x1 = self.convert_xunits(self._x0 + self._width)\n    y1 = self.convert_yunits(self._y0 + self._height)\n    return (x0, y0, x1, y1)",
    "docstring": "Convert bounds of the rectangle.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:_convert_units arg:self arguments arg Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "DtypeKind",
    "source_code": "class DtypeKind(enum.IntEnum):\n    INT = 0\n    UINT = 1\n    FLOAT = 2\n    BOOL = 20\n    STRING = 21\n    DATETIME = 22\n    CATEGORICAL = 23",
    "docstring": "Integer enum for data types. Attributes ---------- INT : int Matches to signed integer data type. UINT : int Matches to unsigned integer data type. FLOAT : int Matches to floating point data type. BOOL : int Matches to boolean data type. STRING : int Matches to string data type (UTF-8 encoded). DATETIME : int Matches to datetime data type. CATEGORICAL : int Matches to categorical data type.",
    "type": "class",
    "file_path": "pandas\\pandas\\core\\interchange\\dataframe_protocol.py",
    "ast_data": "ClassDef name:DtypeKind Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "django",
    "name": "common_roots",
    "source_code": "@lru_cache(maxsize=1)\ndef common_roots(paths):\n    path_parts = sorted([x.parts for x in paths], key=len, reverse=True)\n    tree = {}\n    for chunks in path_parts:\n        node = tree\n        for chunk in chunks:\n            node = node.setdefault(chunk, {})\n        node.clear()\n\n    def _walk(node, path):\n        for prefix, child in node.items():\n            yield from _walk(child, [*path, prefix])\n        if not node:\n            yield Path(*path)\n    return tuple(_walk(tree, ()))",
    "docstring": "Return a tuple of common roots that are shared between the given paths. File system watchers operate on directories and aren't cheap to create. Try to find the minimum set of directories to watch that encompass all of the files that need to be watched.",
    "type": "function",
    "file_path": "django\\django\\utils\\autoreload.py",
    "ast_data": "FunctionDef name:common_roots arg:paths arguments arg Assign Call Assign For Assign For Assign Call Call FunctionDef name:_walk arg:node arg:path arguments arg arg For Call Call If Call Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "z",
    "source_code": "@property\ndef z(self):\n    if not self.hasz:\n        return None\n    else:\n        return self._listarr(self._cs.getZ)",
    "docstring": "Return a list or numpy array of the Z variable.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\linestring.py",
    "ast_data": "FunctionDef name:z arg:self arguments arg If Return return:no Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "to_excel",
    "source_code": "@final\n@doc(klass='object', storage_options=_shared_docs['storage_options'], storage_options_versionadded='1.2.0', encoding_parameter='', verbose_parameter='', extra_parameters=textwrap.dedent('        engine_kwargs : dict, optional\\n            Arbitrary keyword arguments passed to excel engine.\\n        '))\ndef to_excel(self, excel_writer: FilePath | WriteExcelBuffer | ExcelWriter, *, sheet_name: str='Sheet1', na_rep: str='', float_format: str | None=None, columns: Sequence[Hashable] | None=None, header: Sequence[Hashable] | bool=True, index: bool=True, index_label: IndexLabel | None=None, startrow: int=0, startcol: int=0, engine: Literal['openpyxl', 'xlsxwriter'] | None=None, merge_cells: bool=True, inf_rep: str='inf', freeze_panes: tuple[int, int] | None=None, storage_options: StorageOptions | None=None, engine_kwargs: dict[str, Any] | None=None) -> None:\n    if engine_kwargs is None:\n        engine_kwargs = {}\n    df = self if isinstance(self, ABCDataFrame) else self.to_frame()\n    from pandas.io.formats.excel import ExcelFormatter\n    formatter = ExcelFormatter(df, na_rep=na_rep, cols=columns, header=header, float_format=float_format, index=index, index_label=index_label, merge_cells=merge_cells, inf_rep=inf_rep)\n    formatter.write(excel_writer, sheet_name=sheet_name, startrow=startrow, startcol=startcol, freeze_panes=freeze_panes, engine=engine, storage_options=storage_options, engine_kwargs=engine_kwargs)",
    "docstring": "Write {klass} to an Excel sheet. To write a single {klass} to an Excel .xlsx file it is only necessary to specify a target file name. To write to multiple sheets it is necessary to create an object with a target file name, and specify a sheet in the file to write to. Multiple sheets may be written to by specifying unique . With all data written to the file it is necessary to save the changes. Note that creating an object with a file name that already exists will result in the contents of the existing file being erased. Parameters ---------- excel_writer : path-like, file-like, or ExcelWriter object File path or existing ExcelWriter. sheet_name : str, default 'Sheet1' Name of sheet which will contain DataFrame. na_rep : str, default '' Missing data representation. float_format : str, optional Format string for floating point numbers. For example `headerindex~DataFrame.to_csvengine` keyword (the default engine is automatically chosen depending on the file extension): >>> df1.to_excel(\"output1.xlsx\", engine=\"xlsxwriter\") # doctest: +SKIP",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:to_excel arg:self arg:excel_writer arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg If Compare Assign Assign Call Call Assign Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_PatternInfo",
    "source_code": "@dataclass\nclass _PatternInfo:\n    action: _ModuleProviderAction\n    allow_empty: bool\n    was_matched: bool\n\n    def __init__(self, action, allow_empty):\n        self.action = action\n        self.allow_empty = allow_empty\n        self.was_matched = False",
    "docstring": "Holds :class:-specific info about how to execute matches against",
    "type": "class",
    "file_path": "pytorch\\torch\\package\\package_exporter.py",
    "ast_data": "ClassDef name:_PatternInfo FunctionDef name:__init__ arg:self arg:action arg:allow_empty arguments arg arg arg Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "in_cross_replica_context",
    "source_code": "@tf_export('distribute.in_cross_replica_context')\ndef in_cross_replica_context():\n    return _get_per_thread_mode().cross_replica_context is not None",
    "docstring": "Returns if in a cross-replica context. See for details. Returns: if in a cross-replica context ( returns ), or if in a replica context ( returns non-).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:in_cross_replica_context arguments Return return:yes Compare Call Call"
  },
  {
    "library": "kornia",
    "name": "multiply_deg_one_poly",
    "source_code": "def multiply_deg_one_poly(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:\n    return stack([a[:, 0] * b[:, 0], a[:, 0] * b[:, 1] + a[:, 1] * b[:, 0], a[:, 0] * b[:, 2] + a[:, 2] * b[:, 0], a[:, 0] * b[:, 3] + a[:, 3] * b[:, 0], a[:, 1] * b[:, 1], a[:, 1] * b[:, 2] + a[:, 2] * b[:, 1], a[:, 1] * b[:, 3] + a[:, 3] * b[:, 1], a[:, 2] * b[:, 2], a[:, 2] * b[:, 3] + a[:, 3] * b[:, 2], a[:, 3] * b[:, 3]], dim=-1)",
    "docstring": "Multiply two polynomials of the first order [@nister2004efficient]. Args: a: a first order polynomial for variables :math:. b: a first order polynomial for variables :math:. Returns: degree 2 poly with the order :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\solvers\\polynomial_solver.py",
    "ast_data": "FunctionDef name:multiply_deg_one_poly arg:a arg:b arguments arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_default_timezone",
    "source_code": "@functools.lru_cache\ndef get_default_timezone():\n    return zoneinfo.ZoneInfo(settings.TIME_ZONE)",
    "docstring": "Return the default time zone as a tzinfo instance. This is the time zone defined by settings.TIME_ZONE.",
    "type": "function",
    "file_path": "django\\django\\utils\\timezone.py",
    "ast_data": "FunctionDef name:get_default_timezone arguments Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_dummy",
    "source_code": "@set_module('numpy.ctypeslib')\ndef _dummy(*args, **kwds):\n    raise ImportError('ctypes is not available.')",
    "docstring": "Dummy object that raises an ImportError if ctypes is not available. Raises ------ ImportError If ctypes is not available.",
    "type": "function",
    "file_path": "numpy\\numpy\\ctypeslib\\_ctypeslib.py",
    "ast_data": "FunctionDef name:_dummy arguments arg arg Raise Call Call"
  },
  {
    "library": "django",
    "name": "ahas_module_perms",
    "source_code": "async def ahas_module_perms(self, app_label):\n    if self.is_active and self.is_superuser:\n        return True\n    return await _auser_has_module_perms(self, app_label)",
    "docstring": "See has_module_perms()",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\models.py",
    "ast_data": "AsyncFunctionDef name:ahas_module_perms arg:self arg:app_label arguments arg arg If BoolOp Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "can_encode",
    "source_code": "def can_encode(self, pyobj):\n    return isinstance(pyobj, self.type_spec_class)",
    "docstring": "Returns true if can be encoded as the built-in TypeSpec.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\nested_structure_coder.py",
    "ast_data": "FunctionDef name:can_encode arg:self arg:pyobj arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "unsafe_op",
    "source_code": "@staticmethod\ndef unsafe_op(op):\n    if op.type == 'Assign':\n        return True\n    return False",
    "docstring": "Returns True if this op is not safe to be traced.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:unsafe_op arg:op arguments arg If Compare Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_variant",
    "source_code": "def get_variant(self):\n    return self._variant",
    "docstring": "Return the font variant. Values are: 'normal' or 'small-caps'.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\font_manager.py",
    "ast_data": "FunctionDef name:get_variant arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "to_path",
    "source_code": "def to_path(value):\n    if isinstance(value, Path):\n        return value\n    elif not isinstance(value, str):\n        raise TypeError('Invalid path type: %s' % type(value).__name__)\n    return Path(value)",
    "docstring": "Convert value to a pathlib.Path instance, if not already a Path.",
    "type": "function",
    "file_path": "django\\django\\utils\\_os.py",
    "ast_data": "FunctionDef name:to_path arg:value arguments arg If Call Return return:yes If Call Raise Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_finalize_saved_model_layers",
    "source_code": "def _finalize_saved_model_layers(layers):\n    for layer in layers:\n        layer.built = True\n        layer_call = getattr(_get_keras_attr(layer), 'call_and_return_conditional_losses', None)\n        if layer_call and layer_call.concrete_functions:\n            layer.call = utils.use_wrapped_call(layer, layer_call, return_method=True)\n            expects_training_arg = layer._serialized_attributes['metadata']['expects_training_arg']\n            if 'training' in layer_call.function_spec.arg_names:\n                expects_training_arg = True\n            layer._init_call_fn_args(expects_training_arg)\n        else:\n            layer.call = types.MethodType(_unable_to_call_layer_due_to_serialization_issue, layer)\n    for layer in layers:\n        if isinstance(layer, RevivedNetwork):\n            _set_network_attributes_from_metadata(layer)\n            if hasattr(_get_keras_attr(layer), 'call_and_return_conditional_losses'):\n                call_fn = _get_keras_attr(layer).call_and_return_conditional_losses\n                if not call_fn.concrete_functions:\n                    continue\n                if call_fn.input_signature is None:\n                    inputs = infer_inputs_from_restored_call_function(call_fn)\n                else:\n                    inputs = call_fn.input_signature[0]\n                layer._set_inputs(inputs)\n        _restore_layer_unconditional_losses(layer)\n        _restore_layer_activation_loss(layer)\n        _restore_layer_metrics(layer)",
    "docstring": "Runs the final steps of loading Keras Layers from SavedModel.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\load.py",
    "ast_data": "FunctionDef name:_finalize_saved_model_layers arg:layers arguments arg For Assign Assign Call Call If BoolOp Assign Call Assign If Compare Assign Call Assign Call For If Call Call If Call Call Assign Call If If Compare Assign Call Assign Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "register_module_full_backward_hook",
    "source_code": "def register_module_full_backward_hook(hook: Callable[['Module', _grad_t, _grad_t], Union[None, _grad_t]]) -> RemovableHandle:\n    global _global_is_full_backward_hook\n    if _global_is_full_backward_hook is False:\n        raise RuntimeError('Cannot use both regular backward hooks and full backward hooks as a global Module hook. Please use only one of them.')\n    _global_is_full_backward_hook = True\n    handle = RemovableHandle(_global_backward_hooks)\n    _global_backward_hooks[handle.id] = hook\n    return handle",
    "docstring": "Register a backward hook common to all the modules. .. warning :: This adds global state to the module and it is only intended for debugging/profiling purposes. Hooks registered using this function behave in the same way as those registered by :meth:. Refer to its documentation for more details. Hooks registered using this function will be called before hooks registered using :meth:. Returns: :class:: a handle that can be used to remove the added hook by calling ``",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:register_module_full_backward_hook arg:hook arguments arg If Compare Raise Call Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "peek",
    "source_code": "def peek(self, s):\n    if len(s) > self.peek_length + 3:\n        half = self.peek_length // 2\n        return s[:half] + '...' + s[-half:]\n    else:\n        return s",
    "docstring": "Return s, restricted to a sane length.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\gctools.py",
    "ast_data": "FunctionDef name:peek arg:self arg:s arguments arg arg If Compare Call Assign Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_fontfamily",
    "source_code": "def get_fontfamily(self):\n    return self._fontproperties.get_family()",
    "docstring": "Return the list of font families used for font lookup. See Also -------- .font_manager.FontProperties.get_family",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:get_fontfamily arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "replace_parenthesis",
    "source_code": "def replace_parenthesis(s):\n    left, right = (None, None)\n    mn_i = len(s)\n    for left_, right_ in (('(/', '/)'), '()', '{}', '[]'):\n        i = s.find(left_)\n        if i == -1:\n            continue\n        if i < mn_i:\n            mn_i = i\n            left, right = (left_, right_)\n    if left is None:\n        return (s, {})\n    i = mn_i\n    j = s.find(right, i)\n    while s.count(left, i + 1, j) != s.count(right, i + 1, j):\n        j = s.find(right, j + 1)\n        if j == -1:\n            raise ValueError(f'Mismatch of {left + right} parenthesis in {s!r}')\n    p = {'(': 'ROUND', '[': 'SQUARE', '{': 'CURLY', '(/': 'ROUNDDIV'}[left]\n    k = f'@__f2py_PARENTHESIS_{p}_{COUNTER.__next__()}@'\n    v = s[i + len(left):j]\n    r, d = replace_parenthesis(s[j + len(right):])\n    d[k] = v\n    return (s[:i] + k + r, d)",
    "docstring": "Replace substrings of input that are enclosed in parenthesis. Return a new string and a mapping of replacements.",
    "type": "function",
    "file_path": "numpy\\numpy\\f2py\\symbolic.py",
    "ast_data": "FunctionDef name:replace_parenthesis arg:s arguments arg Assign Assign Call For Assign Call If Compare If Compare Assign Assign If Compare Return return:yes Assign Assign Call While Compare Call Call Assign Call If Compare Raise Call Assign Assign Call Assign Call Assign Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "cond",
    "source_code": "def cond(pred, true_fn, false_fn):\n    v = get_static_value(pred)\n    if v is None:\n        return tf_cond.cond(pred, true_fn, false_fn)\n    if v:\n        return true_fn()\n    else:\n        return false_fn()",
    "docstring": "A version of tf.cond that tries to evaluate the condition.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_utils.py",
    "ast_data": "FunctionDef name:cond arg:pred arg:true_fn arg:false_fn arguments arg arg arg Assign Call If Compare Return return:yes Call If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "NoConvergence",
    "source_code": "class NoConvergence(Exception):\n    pass",
    "docstring": "Exception raised when nonlinear solver fails to converge within the specified .",
    "type": "class",
    "file_path": "scipy\\scipy\\optimize\\_nonlin.py",
    "ast_data": "ClassDef name:NoConvergence"
  },
  {
    "library": "seaborn",
    "name": "set_axis_labels",
    "source_code": "def set_axis_labels(self, x_var=None, y_var=None, clear_inner=True, **kwargs):\n    if x_var is not None:\n        self._x_var = x_var\n        self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)\n    if y_var is not None:\n        self._y_var = y_var\n        self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)\n    return self",
    "docstring": "Set axis labels on the left column and bottom row of the grid.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\axisgrid.py",
    "ast_data": "FunctionDef name:set_axis_labels arg:self arg:x_var arg:y_var arg:clear_inner arguments arg arg arg arg arg If Compare Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "list_inputs",
    "source_code": "def list_inputs(self, args, screen_info=None):\n    _ = screen_info\n    parsed = self._arg_parsers['list_inputs'].parse_args(args)\n    output = self._list_inputs_or_outputs(parsed.recursive, parsed.node_name, parsed.depth, parsed.control, parsed.op_type, do_outputs=False)\n    node_name = debug_graphs.get_node_name(parsed.node_name)\n    _add_main_menu(output, node_name=node_name, enable_list_inputs=False)\n    return output",
    "docstring": "Command handler for inputs. Show inputs to a given node. Args: args: Command-line arguments, excluding the command prefix, as a list of str. screen_info: Optional dict input containing screen information such as cols. Returns: Output text lines as a RichTextLines object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\analyzer_cli.py",
    "ast_data": "FunctionDef name:list_inputs arg:self arg:args arg:screen_info arguments arg arg arg Assign Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_component_specs",
    "source_code": "@abc.abstractproperty\ndef _component_specs(self):\n    raise NotImplementedError('%s._component_specs()' % type(self).__name__)",
    "docstring": "A nested structure of TypeSpecs for this type's components. Returns: A nested structure describing the component encodings that are returned by this TypeSpec's method. In particular, for a TypeSpec and a compatible value :",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py",
    "ast_data": "FunctionDef name:_component_specs arg:self arguments arg Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_tensor_size",
    "source_code": "def _tensor_size(self):\n    if not self.shape.is_fully_defined():\n        return None\n    return np.prod(self.shape.as_list())",
    "docstring": "Returns the number of elements in this Tensor, if fully known.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_math_ops.py",
    "ast_data": "FunctionDef name:_tensor_size arg:self arguments arg If Call Return return:no Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "def fit(self, raw_documents, y=None):\n    self.fit_transform(raw_documents)\n    return self",
    "docstring": "Learn a vocabulary dictionary of all tokens in the raw documents. Parameters ---------- raw_documents : iterable An iterable which generates either str, unicode or file objects. y : None This parameter is ignored. Returns ------- self : object Fitted vectorizer.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_extraction\\text.py",
    "ast_data": "FunctionDef name:fit arg:self arg:raw_documents arg:y arguments arg arg arg Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "_get_serialized_fields",
    "source_code": "def _get_serialized_fields(self, item: Any, default_value: Any=None, include_empty: bool | None=None) -> Iterable[tuple[str, Any]]:\n    item = ItemAdapter(item)\n    if include_empty is None:\n        include_empty = self.export_empty_fields\n    if self.fields_to_export is None:\n        field_iter = item.field_names() if include_empty else item.keys()\n    elif isinstance(self.fields_to_export, Mapping):\n        if include_empty:\n            field_iter = self.fields_to_export.items()\n        else:\n            field_iter = ((x, y) for x, y in self.fields_to_export.items() if x in item)\n    elif include_empty:\n        field_iter = self.fields_to_export\n    else:\n        field_iter = (x for x in self.fields_to_export if x in item)\n    for field_name in field_iter:\n        if isinstance(field_name, str):\n            item_field, output_field = (field_name, field_name)\n        else:\n            item_field, output_field = field_name\n        if item_field in item:\n            field_meta = item.get_field_meta(item_field)\n            value = self.serialize_field(field_meta, output_field, item[item_field])\n        else:\n            value = default_value\n        yield (output_field, value)",
    "docstring": "Return the fields to export as an iterable of tuples (name, serialized_value)",
    "type": "method",
    "file_path": "scrapy\\scrapy\\exporters.py",
    "ast_data": "FunctionDef name:_get_serialized_fields arg:self arg:item arg:default_value arg:include_empty arguments arg arg arg arg Assign Call If Compare Assign If Compare Assign Call Call If Call If Assign Call Assign Call Compare If Assign Assign Compare For If Call Assign Assign If Compare Assign Call Assign Call Assign"
  },
  {
    "library": "pytorch",
    "name": "_report_compression_stats",
    "source_code": "def _report_compression_stats(bucket, state):\n    if bucket.is_last() and state.iter >= state.next_stats_report:\n        stats = state.compression_stats()\n        logger.info('Compression stats: iter %s, total before compression %s, total after compression %s, rate %s', state.iter, stats[1], stats[2], stats[0])\n        state.next_stats_report = state.iter + state.compression_stats_logging_frequency",
    "docstring": "Report compression stats at frequency of `` specified in PowerSGD state.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\ddp_comm_hooks\\powerSGD_hook.py",
    "ast_data": "FunctionDef name:_report_compression_stats arg:bucket arg:state arguments arg arg If BoolOp Call Compare Assign Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "get_node_context",
    "source_code": "def get_node_context(node, num_nodes=2) -> str:\n    node_contexts = []\n    cur = node\n    for _ in range(num_nodes):\n        node_contexts.append(cur.format_node())\n        if cur.op == 'root':\n            break\n        cur = cur.prev\n    return '\\n'.join(node_contexts[::-1])",
    "docstring": "Returns a string of the last num_nodes nodes in the graph.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\_utils.py",
    "ast_data": "FunctionDef name:get_node_context arg:node arg:num_nodes arguments arg arg Assign Assign For Call Call Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "is_compiling",
    "source_code": "def is_compiling() -> bool:\n    if torch.jit.is_scripting():\n        return False\n    else:\n        return _is_compiling_flag",
    "docstring": "Indicates whether a graph is executed/traced as part of torch.compile() or torch.export(). Note that there are 2 other related flags that should deprecated eventually: * torch._dynamo.external_utils.is_compiling() * torch._utils.is_compiling() Example:: >>> def forward(self, x): >>> if not torch.compiler.is_compiling(): >>> pass # ...logic that is not needed in a compiled/traced graph... >>> >>> # ...rest of the function...",
    "type": "function",
    "file_path": "pytorch\\torch\\compiler\\__init__.py",
    "ast_data": "FunctionDef name:is_compiling arguments If Call Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "pre_sql_setup",
    "source_code": "def pre_sql_setup(self, with_col_aliases=False):\n    self.setup_query(with_col_aliases=with_col_aliases)\n    order_by = self.get_order_by()\n    self.where, self.having, self.qualify = self.query.where.split_having_qualify(must_group_by=self.query.group_by is not None)\n    extra_select = self.get_extra_select(order_by, self.select)\n    self.has_extra_select = bool(extra_select)\n    group_by = self.get_group_by(self.select + extra_select, order_by)\n    return (extra_select, order_by, group_by)",
    "docstring": "Do any necessary class setup immediately prior to producing SQL. This is for things that can't necessarily be done in __init__ because we might not have all the pieces in place at that time.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\compiler.py",
    "ast_data": "FunctionDef name:pre_sql_setup arg:self arg:with_col_aliases arguments arg arg Call Assign Call Assign Call Compare Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, boundaries, values, name=None):\n    super(PiecewiseConstantDecay, self).__init__()\n    if len(boundaries) != len(values) - 1:\n        raise ValueError('The length of boundaries should be 1 less than the length of values')\n    self.boundaries = boundaries\n    self.values = values\n    self.name = name",
    "docstring": "Piecewise constant from boundaries and interval values. Args: boundaries: A list of s or s or s with strictly increasing entries, and with all elements having the same type as the optimizer step. values: A list of s or s or s that specifies the values for the intervals defined by . It should have one more element than , and all elements should have the same type. name: A string. Optional name of the operation. Defaults to 'PiecewiseConstant'. Raises: ValueError: if the number of elements in the lists do not match.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\learning_rate_schedule.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:boundaries arg:values arg:name arguments arg arg arg arg Call Call If Compare Call Call Raise Call Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "WrapperModule",
    "source_code": "class WrapperModule:\n\n    def __init__(self, wrapped_type, module_config, debug, save=False):\n        pt_fn = module_config.pt_fn\n        self.module = wrapped_type(pt_fn)\n        self.tensor_inputs = []\n        self.module_name = wrapped_type.__name__\n        for _ in range(module_config.num_params):\n            self.tensor_inputs.append(torch.randn(1))\n        if module_config.graph_mode:\n            self.module = torch.jit.trace(self.module, self.tensor_inputs)\n            if save:\n                file_name = self.module_name + '_' + pt_fn.__name__ + '.pt'\n                torch.jit.save(self.module, file_name)\n                print(f'Generated graph is saved in {file_name}')\n        print(f'Benchmarking module {self.module_name} with fn {pt_fn.__name__}: Graph mode:{module_config.graph_mode}')\n        if debug and isinstance(self.module, torch.jit.ScriptModule):\n            print(self.module.graph)\n            print(self.module.code)\n\n    def forward(self, niters):\n        with torch.no_grad():\n            for _ in range(niters):\n                self.module.forward(*self.tensor_inputs)",
    "docstring": "Wraps the instance of wrapped_type. For graph_mode traces the instance of wrapped_type. Randomaly initializes num_params tensors with single float element. Args: wrapped_type: - Object type to be wrapped. Expects the wrapped_type to: - be constructed with pt_fn specified in module_config. - provide forward method that takes module_config.num_params args. module_config: - Specified pt_fn to construct wrapped_type with, whether graph_mode is enabled, and number of parameters wrapped_type's forward method takes. debug: - Whether debug mode is enabled. save: - In graph mode, whether graph is to be saved.",
    "type": "class",
    "file_path": "pytorch\\benchmarks\\framework_overhead_benchmark\\pt_wrapper_module.py",
    "ast_data": "ClassDef name:WrapperModule FunctionDef name:__init__ arg:self arg:wrapped_type arg:module_config arg:debug arg:save arguments arg arg arg arg arg Assign Assign Call Assign Assign For Call Call Call If Assign Call If Assign Call Call Call If BoolOp Call Call Call FunctionDef name:forward arg:self arg:niters arguments arg arg With Call For Call Call"
  },
  {
    "library": "pandas",
    "name": "clipboard",
    "source_code": "@contextlib.contextmanager\ndef clipboard(hwnd):\n    t = time.time() + 0.5\n    success = False\n    while time.time() < t:\n        success = OpenClipboard(hwnd)\n        if success:\n            break\n        time.sleep(0.01)\n    if not success:\n        raise PyperclipWindowsException('Error calling OpenClipboard')\n    try:\n        yield\n    finally:\n        safeCloseClipboard()",
    "docstring": "Context manager that opens the clipboard and prevents other applications from modifying the clipboard content.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\clipboard\\__init__.py",
    "ast_data": "FunctionDef name:clipboard arg:hwnd arguments arg Assign Call Assign While Compare Call Assign Call If Call If Raise Call Try Call"
  },
  {
    "library": "pandas",
    "name": "reorder_categories",
    "source_code": "def reorder_categories(self, new_categories, ordered=None) -> Self:\n    if len(self.categories) != len(new_categories) or not self.categories.difference(new_categories).empty:\n        raise ValueError('items in new_categories are not the same as in old categories')\n    return self.set_categories(new_categories, ordered=ordered)",
    "docstring": "Reorder categories as specified in new_categories. `pandas.Series`: >>> ser = pd.Series([\"a\", \"b\", \"c\", \"a\"], dtype=\"category\") >>> ser = ser.cat.reorder_categories([\"c\", \"b\", \"a\"], ordered=True) >>> ser 0 a 1 b 2 c 3 a dtype: category Categories (3, object): ['c' >> ser.sort_values() 2 c 1 b 0 a 3 a dtype: category Categories (3, object): ['c' >> ci = pd.CategoricalIndex([\"a\", \"b\", \"c\", \"a\"]) >>> ci CategoricalIndex(['a', 'b', 'c', 'a'], categories=['a', 'b', 'c'], ordered=False, dtype='category') >>> ci.reorder_categories([\"c\", \"b\", \"a\"], ordered=True) CategoricalIndex(['a', 'b', 'c', 'a'], categories=['c', 'b', 'a'], ordered=True, dtype='category')",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\categorical.py",
    "ast_data": "FunctionDef name:reorder_categories arg:self arg:new_categories arg:ordered arguments arg arg arg If BoolOp Compare Call Call Call Raise Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "metadata",
    "source_code": "@property\n@abstractmethod\ndef metadata(self) -> dict[str, Any]:\n    pass",
    "docstring": "The metadata for the data frame, as a dictionary with string keys. The contents of may be anything, they are meant for a library to store information that it needs to, e.g., roundtrip losslessly or for two implementations to share data that is not (yet) part of the interchange protocol specification. For avoiding collisions with other entries, please add name the keys with the name of the library followed by a period and the desired name, e.g, ``.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\interchange\\dataframe_protocol.py",
    "ast_data": "FunctionDef name:metadata arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "_MaximumGrad",
    "source_code": "@ops.RegisterGradient('Maximum')\ndef _MaximumGrad(op: ops.Operation, grad):\n    return _MaximumMinimumGrad(op, grad, math_ops.greater_equal)",
    "docstring": "Returns grad*(x >= y, x < y) with type of grad.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_MaximumGrad arg:op arg:grad arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "set_warn_always",
    "source_code": "def set_warn_always(b: builtins.bool, /) -> None:\n    _C._set_warnAlways(b)",
    "docstring": "When this flag is False (default) then some PyTorch warnings may only appear once per process. This helps avoid excessive warning information. Setting it to True causes these warnings to always appear, which may be helpful when debugging. Args: b (:class:): If True, force warnings to always be emitted If False, set to the default behaviour",
    "type": "function",
    "file_path": "pytorch\\torch\\__init__.py",
    "ast_data": "FunctionDef name:set_warn_always arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "element_shape",
    "source_code": "@property\ndef element_shape(self):\n    return self._implementation.element_shape",
    "docstring": "The of elements in this TensorArray.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:element_shape arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_autoscale_on",
    "source_code": "def get_autoscale_on(self):\n    return all((axis._get_autoscale_on() for axis in self._axis_map.values()))",
    "docstring": "Return True if each axis is autoscaled, False otherwise.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:get_autoscale_on arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "seaborn",
    "name": "saturate",
    "source_code": "def saturate(color):\n    return set_hls_values(color, s=1)",
    "docstring": "Return a fully saturated color with the same hue. Parameters ---------- color : matplotlib color hex, rgb-tuple, or html color name Returns ------- new_color : rgb tuple saturated color code in RGB tuple representation",
    "type": "function",
    "file_path": "seaborn\\seaborn\\utils.py",
    "ast_data": "FunctionDef name:saturate arg:color arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_all_ranks",
    "source_code": "def _get_all_ranks(st):\n    fields = st.field_names()\n    all_ranks = {(): st.rank}\n    for k in fields:\n        v = st.field_value(k)\n        if isinstance(v, StructuredTensor):\n            for k2, v2 in _get_all_ranks(v).items():\n                all_ranks[(k,) + k2] = v2\n    return all_ranks",
    "docstring": "Get ranks of all submessages of a StructuredTensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_array_ops.py",
    "ast_data": "FunctionDef name:_get_all_ranks arg:st arguments arg Assign Call Assign For Assign Call If Call For Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_build_network_on_replica",
    "source_code": "def _build_network_on_replica(model, mode, inputs=None, targets=None):\n    from tensorflow.python.keras import models\n    from tensorflow.python.keras.engine import sequential\n    if isinstance(model, sequential.Sequential):\n        updated_model = models._clone_sequential_model(model, input_tensors=inputs, layer_fn=models.share_weights)\n    else:\n        updated_model = models._clone_functional_model(model, input_tensors=inputs, layer_fn=models.share_weights)\n        updated_model._callable_losses = model._callable_losses\n\n    def _upcast_low_precision_outputs(output):\n        if output.dtype == dtypes.bfloat16:\n            return math_ops.cast(output, dtypes.float32)\n        else:\n            return output\n    updated_model.outputs = [_upcast_low_precision_outputs(o) for o in updated_model.outputs]\n    if isinstance(targets, tuple):\n        targets = nest.flatten(targets)\n    if mode == ModeKeys.PREDICT and inputs is not None:\n        _custom_compile_for_predict(updated_model)\n    else:\n        updated_model.compile(model.optimizer, model.loss, metrics=metrics_module.clone_metrics(model._compile_metrics), loss_weights=model.loss_weights, sample_weight_mode=model.sample_weight_mode, weighted_metrics=metrics_module.clone_metrics(model._compile_weighted_metrics), target_tensors=targets)\n    return updated_model",
    "docstring": "Build an updated model on replicas. We create a new Keras model while sharing the variables from the old graph. Building a new sub-graph is required since the original keras model creates placeholders for the input and the output that are not accessible till we call iterator.get_next() inside the step_fn for //. The sharing of weights and layers between the old and the new model guarantee that we're using Strategy variables and any updates on either model are reflected correctly in callbacks and loop iterations. We need to make sure we share the optimizers between the old and the new model as well so that optimizer state is not lost if the user is running fit multiple times. Args: model: Model to be replicated across Replicas mode: Which of fit/eval/predict is building the distributed network inputs: Input variables to be passed to the model targets: Target tensor to be passed to model.compile Returns: A new model with shared layers with the old model.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_training_utils_v1.py",
    "ast_data": "FunctionDef name:_build_network_on_replica arg:model arg:mode arg:inputs arg:targets arguments arg arg arg arg If Call Assign Call Assign Call Assign FunctionDef name:_upcast_low_precision_outputs arg:output arguments arg If Compare Return return:yes Call Return return:yes Assign Call If Call Assign Call If BoolOp Compare Compare Call Call Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_n_features_out",
    "source_code": "@property\ndef _n_features_out(self):\n    return self.components_.shape[0]",
    "docstring": "Number of transformed output features.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neighbors\\_nca.py",
    "ast_data": "FunctionDef name:_n_features_out arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "track_nodes_created_by_last_call",
    "source_code": "def track_nodes_created_by_last_call(layer, created_nodes):\n    if not layer._inbound_nodes:\n        return\n    created_nodes.add(layer._inbound_nodes[-1])\n    prev_layers = layer._inbound_nodes[-1].inbound_layers\n    for prev_layer in nest.flatten(prev_layers):\n        if prev_layer._outbound_nodes:\n            created_nodes.add(prev_layer._outbound_nodes[-1])",
    "docstring": "Adds to the nodes created by the last call to .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\sequential.py",
    "ast_data": "FunctionDef name:track_nodes_created_by_last_call arg:layer arg:created_nodes arguments arg arg If Return return:no Call Assign For Call If Call"
  },
  {
    "library": "pytorch",
    "name": "_get_ddp_logging_data",
    "source_code": "def _get_ddp_logging_data(self):\n    assert self.logger is not None\n    ddp_logging_data = self.logger._get_ddp_logging_data()\n    return {**ddp_logging_data.strs_map, **ddp_logging_data.ints_map}",
    "docstring": "Return a dictionary of logging data for debugging and analysis. This interface can be called after DistributedDataParallel() is constructed. It returns a dictionary of logging data. It could help for debugging and analysis. The logging data includes DistributedDataParallel constructor input parameters, some internal states of DistributedDataParallel and performance metrics. Simply print the dictionary and see what these metrics are. This is a prototype interface and subject to change in the future.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\parallel\\distributed.py",
    "ast_data": "FunctionDef name:_get_ddp_logging_data arg:self arguments arg Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_help_for_command_prefix",
    "source_code": "def _get_help_for_command_prefix(self, cmd_prefix):\n    lines = []\n    resolved_prefix = self._resolve_prefix(cmd_prefix)\n    if not resolved_prefix:\n        lines.append('Invalid command prefix: \"%s\"' % cmd_prefix)\n        return lines\n    lines.append(resolved_prefix)\n    if resolved_prefix in self._prefix_to_aliases:\n        lines.append(HELP_INDENT + 'Aliases: ' + ', '.join(self._prefix_to_aliases[resolved_prefix]))\n    lines.append('')\n    help_lines = self._prefix_to_help[resolved_prefix].split('\\n')\n    for line in help_lines:\n        lines.append(HELP_INDENT + line)\n    return lines",
    "docstring": "Compile the help information for a given command prefix. Args: cmd_prefix: Command prefix, as the prefix itself or one of its aliases. Returns: A list of str as the help information for cmd_prefix. If the cmd_prefix does not exist, the returned list of str will indicate that.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py",
    "ast_data": "FunctionDef name:_get_help_for_command_prefix arg:self arg:cmd_prefix arguments arg arg Assign Assign Call If Call Return return:yes Call If Compare Call Call Call Assign Call For Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "__dlpack__",
    "source_code": "def __dlpack__(self) -> Any:\n    return self._x.__dlpack__()",
    "docstring": "Represent this structure as DLPack interface.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\interchange\\buffer.py",
    "ast_data": "FunctionDef name:__dlpack__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_coerce_field_name",
    "source_code": "def _coerce_field_name(field_name, field_index):\n    if callable(field_name):\n        if field_name.__name__ == '<lambda>':\n            return 'lambda' + str(field_index)\n        else:\n            return field_name.__name__\n    return field_name",
    "docstring": "Coerce a field_name (which may be a callable) to a string.",
    "type": "function",
    "file_path": "django\\django\\contrib\\admin\\templatetags\\admin_list.py",
    "ast_data": "FunctionDef name:_coerce_field_name arg:field_name arg:field_index arguments arg arg If Call If Compare Return return:yes Call Return return:yes Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_replace_coerce",
    "source_code": "@final\ndef _replace_coerce(self, to_replace, value, mask: npt.NDArray[np.bool_], inplace: bool=True, regex: bool=False) -> list[Block]:\n    if should_use_regex(regex, to_replace):\n        return self._replace_regex(to_replace, value, inplace=inplace, mask=mask)\n    else:\n        if value is None:\n            if mask.any():\n                has_ref = self.refs.has_reference()\n                nb = self.astype(np.dtype(object))\n                if not inplace:\n                    nb = nb.copy()\n                elif inplace and has_ref and nb.refs.has_reference():\n                    nb = nb.copy()\n                putmask_inplace(nb.values, mask, value)\n                return [nb]\n            return [self.copy(deep=False)]\n        return self.replace(to_replace=to_replace, value=value, inplace=inplace, mask=mask)",
    "docstring": "Replace value corresponding to the given boolean array with another value. Parameters ---------- to_replace : object or pattern Scalar to replace or regular expression to match. value : object Replacement object. mask : np.ndarray[bool] True indicate corresponding element is ignored. inplace : bool, default True Perform inplace modification. regex : bool, default False If true, perform regular expression substitution. Returns ------- List[Block]",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\blocks.py",
    "ast_data": "FunctionDef name:_replace_coerce arg:self arg:to_replace arg:value arg:mask arg:inplace arg:regex arguments arg arg arg arg arg arg If Call Return return:yes Call If Compare If Call Assign Call Assign Call Call If Assign Call If BoolOp Call Assign Call Call Return return:yes Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "count_up_to",
    "source_code": "@deprecated(None, 'Prefer Dataset.range instead.')\ndef count_up_to(self, limit):\n    return state_ops.count_up_to(self._variable, limit=limit)",
    "docstring": "Increments this variable until it reaches . When that Op is run it tries to increment the variable by . If incrementing the variable would bring it above then the Op raises the exception . If no error is raised, the Op outputs the value of the variable before the increment. This is essentially a shortcut for . Args: limit: value at which incrementing the variable raises an error. Returns: A that will hold the variable value before the increment. If no other Op modifies this variable, the values produced will all be distinct.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py",
    "ast_data": "FunctionDef name:count_up_to arg:self arg:limit arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "infer_fill_value",
    "source_code": "def infer_fill_value(val):\n    if not is_list_like(val):\n        val = [val]\n    val = np.asarray(val)\n    if val.dtype.kind in 'mM':\n        return np.array('NaT', dtype=val.dtype)\n    elif val.dtype == object:\n        dtype = lib.infer_dtype(ensure_object(val), skipna=False)\n        if dtype in ['datetime', 'datetime64']:\n            return np.array('NaT', dtype=DT64NS_DTYPE)\n        elif dtype in ['timedelta', 'timedelta64']:\n            return np.array('NaT', dtype=TD64NS_DTYPE)\n        return np.array(np.nan, dtype=object)\n    elif val.dtype.kind == 'U':\n        return np.array(np.nan, dtype=val.dtype)\n    return np.nan",
    "docstring": "infer the fill value for the nan/NaT from the provided scalar/ndarray/list-like if we are a NaT, return the correct dtyped element to provide proper block construction",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\missing.py",
    "ast_data": "FunctionDef name:infer_fill_value arg:val arguments arg If Call Assign Assign Call If Compare Return return:yes Call If Compare Assign Call Call If Compare Return return:yes Call If Compare Return return:yes Call Return return:yes Call If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_with_num_row_partitions",
    "source_code": "def _with_num_row_partitions(self, new_num_row_partitions: int) -> 'DynamicRaggedShape.Spec':\n    rank = self.rank\n    if rank is None:\n        raise ValueError('Changing num_row_partitions with unknown rank unsupported')\n    if new_num_row_partitions > max(rank - 1, 0):\n        raise ValueError('Number of row partitions too large')\n    if new_num_row_partitions < 0:\n        raise ValueError('Number of row partitions negative')\n    if self.num_row_partitions == new_num_row_partitions:\n        return self\n    elif self.num_row_partitions < new_num_row_partitions:\n        rp_delta = new_num_row_partitions - self.num_row_partitions\n        tail_shape = DynamicRaggedShape.Spec._from_tensor_shape(self._static_inner_shape, rp_delta, self.dtype)\n        return DynamicRaggedShape.Spec(row_partitions=self._row_partitions + tail_shape._row_partitions, static_inner_shape=tail_shape._static_inner_shape, dtype=self.dtype)\n    else:\n        assert self.num_row_partitions > new_num_row_partitions\n        new_row_partitions = self._row_partitions[:new_num_row_partitions]\n        last_row_partition = new_row_partitions[-1]\n        old_row_partitions = self._row_partitions[new_num_row_partitions:]\n        new_static_inner_shape = tensor_shape.TensorShape([last_row_partition.nvals] + [x.uniform_row_length for x in old_row_partitions]) + self._static_inner_shape[1:]\n        return DynamicRaggedShape.Spec(new_row_partitions, new_static_inner_shape, self.dtype)",
    "docstring": "Change the number of row partitions in the spec.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:_with_num_row_partitions arg:self arg:new_num_row_partitions arguments arg arg Assign If Compare Raise Call If Compare Call Raise Call If Compare Raise Call If Compare Return return:yes If Compare Assign Assign Call Return return:yes Call Compare Assign Assign Assign Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "__init__",
    "source_code": "def __init__(self, request=None, *args, **kwargs):\n    self.request = request\n    self.user_cache = None\n    super().__init__(*args, **kwargs)\n    self.username_field = UserModel._meta.get_field(UserModel.USERNAME_FIELD)\n    username_max_length = self.username_field.max_length or 254\n    self.fields['username'].max_length = username_max_length\n    self.fields['username'].widget.attrs['maxlength'] = username_max_length\n    if self.fields['username'].label is None:\n        self.fields['username'].label = capfirst(self.username_field.verbose_name)",
    "docstring": "The 'request' parameter is set for custom auth use by subclasses. The form data comes in via the standard 'data' kwarg.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\forms.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:request arguments arg arg arg arg Assign Assign Call Call Assign Call Assign BoolOp Assign Assign If Compare Assign Call"
  },
  {
    "library": "django",
    "name": "field_references",
    "source_code": "def field_references(model_tuple, field, reference_model_tuple, reference_field_name=None, reference_field=None):\n    remote_field = field.remote_field\n    if not remote_field:\n        return False\n    references_to = None\n    references_through = None\n    if resolve_relation(remote_field.model, *model_tuple) == reference_model_tuple:\n        to_fields = getattr(field, 'to_fields', None)\n        if reference_field_name is None or to_fields is None or (None in to_fields and (reference_field is None or reference_field.primary_key)) or (reference_field_name in to_fields):\n            references_to = (remote_field, to_fields)\n    through = getattr(remote_field, 'through', None)\n    if through and resolve_relation(through, *model_tuple) == reference_model_tuple:\n        through_fields = remote_field.through_fields\n        if reference_field_name is None or through_fields is None or reference_field_name in through_fields:\n            references_through = (remote_field, through_fields)\n    if not (references_to or references_through):\n        return False\n    return FieldReference(references_to, references_through)",
    "docstring": "Return either False or a FieldReference if references provided context. False positives can be returned if is provided without because of the introspection limitation it incurs. This should not be an issue when this function is used to determine whether or not an optimization can take place.",
    "type": "function",
    "file_path": "django\\django\\db\\migrations\\utils.py",
    "ast_data": "FunctionDef name:field_references arg:model_tuple arg:field arg:reference_model_tuple arg:reference_field_name arg:reference_field arguments arg arg arg arg arg Assign If Return return:yes Assign Assign If Compare Call Assign Call If BoolOp Compare Compare BoolOp Compare BoolOp Compare Compare Assign Assign Call If BoolOp Compare Call Assign If BoolOp Compare Compare Compare Assign If BoolOp Return return:yes Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_update_props",
    "source_code": "def _update_props(self, props, errfmt):\n    ret = []\n    with cbook._setattr_cm(self, eventson=False):\n        for k, v in props.items():\n            if k == 'axes':\n                ret.append(setattr(self, k, v))\n            else:\n                func = getattr(self, f'set_{k}', None)\n                if not callable(func):\n                    raise AttributeError(errfmt.format(cls=type(self), prop_name=k), name=k)\n                ret.append(func(v))\n    if ret:\n        self.pchanged()\n        self.stale = True\n    return ret",
    "docstring": "Helper for and . *errfmt* is used to generate error messages for invalid property names; it gets formatted with `` for \"{cls}\" and the property name for \"{prop_name}\".",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:_update_props arg:self arg:props arg:errfmt arguments arg arg arg Assign With Call For Call If Compare Call Call Assign Call If Call Raise Call Call Call Call Call If Call Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "predict_log_proba",
    "source_code": "@available_if(_final_estimator_has('predict_log_proba'))\ndef predict_log_proba(self, X, **params):\n    with _raise_or_warn_if_not_fitted(self):\n        Xt = X\n        if not _routing_enabled():\n            for _, name, transform in self._iter(with_final=False):\n                Xt = transform.transform(Xt)\n            return self.steps[-1][1].predict_log_proba(Xt, **params)\n        routed_params = process_routing(self, 'predict_log_proba', **params)\n        for _, name, transform in self._iter(with_final=False):\n            Xt = transform.transform(Xt, **routed_params[name].transform)\n        return self.steps[-1][1].predict_log_proba(Xt, **routed_params[self.steps[-1][0]].predict_log_proba)",
    "docstring": "Transform the data, and apply with the final estimator. Call of each transformer in the pipeline. The transformed data are finally passed to the final estimator that calls method. Only valid if the final estimator implements . Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. **params : dict of str -> object - If (default): Parameters to the called at the end of all transformations in the pipeline. - If : Parameters requested and accepted by steps. Each step must have requested certain metadata for these parameters to be forwarded to them. .. versionadded:: 0.20 .. versionchanged:: 1.4 Parameters are now passed to the `enable_metadata_routing=TrueMetadata Routing User Guide predict_log_proba` on the final estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\pipeline.py",
    "ast_data": "FunctionDef name:predict_log_proba arg:self arg:X arguments arg arg arg With Call Assign If Call For Call Assign Call Return return:yes Call Assign Call For Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__gt__",
    "source_code": "def __gt__(self, other):\n    return str(self) > str(other)",
    "docstring": "Allows feature columns to be sorted in Python 3 as they are in Python 2. Feature columns need to occasionally be sortable, for example when used as keys in a features dictionary passed to a layer. is called when the \"other\" object being compared during the sort does not have defined. Example: ``` # __lt__ only class class A(): def __lt__(self, other): return str(self) str(other) b = B() b \".",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2_types.py",
    "ast_data": "FunctionDef name:__gt__ arg:self arg:other arguments arg arg Return return:yes Compare Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_build_advisor_options",
    "source_code": "def _build_advisor_options(options):\n    opts = tfprof_options_pb2.AdvisorOptionsProto()\n    if options is None:\n        return opts\n    for checker, checker_opts in options.items():\n        checker_ops_pb = tfprof_options_pb2.AdvisorOptionsProto.CheckerOption()\n        for k, v in checker_opts.items():\n            checker_ops_pb[k] = v\n        opts.checkers[checker].MergeFrom(checker_ops_pb)\n    return opts",
    "docstring": "Build tfprof.AdvisorOptionsProto. Args: options: A dictionary of options. See ALL_ADVICE example. Returns: tfprof.AdvisorOptionsProto.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\model_analyzer.py",
    "ast_data": "FunctionDef name:_build_advisor_options arg:options arguments arg Assign Call If Compare Return return:yes For Call Assign Call For Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "list_physical_devices",
    "source_code": "def list_physical_devices(self, device_type=None):\n    self._initialize_physical_devices()\n    if device_type is None:\n        return list(self._physical_devices)\n    return [d for d in self._physical_devices if d.device_type == device_type]",
    "docstring": "List local devices visible to the system. This API allows a client to query the devices before they have been initialized by the eager runtime. Additionally a user can filter by device type, to get only CPUs or GPUs. Args: device_type: Optional device type to limit results to Returns: List of PhysicalDevice objects.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:list_physical_devices arg:self arg:device_type arguments arg arg Call If Compare Return return:yes Call Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "_sort_scores_and_boxes",
    "source_code": "def _sort_scores_and_boxes(scores, boxes):\n    with ops.name_scope('sort_scores_and_boxes'):\n        sorted_scores_indices = sort_ops.argsort(scores, axis=1, direction='DESCENDING')\n        sorted_scores = array_ops.gather(scores, sorted_scores_indices, axis=1, batch_dims=1)\n        sorted_boxes = array_ops.gather(boxes, sorted_scores_indices, axis=1, batch_dims=1)\n    return (sorted_scores, sorted_boxes, sorted_scores_indices)",
    "docstring": "Sort boxes based their score from highest to lowest. Args: scores: a tensor with a shape of [batch_size, num_boxes] representing the scores of boxes. boxes: a tensor with a shape of [batch_size, num_boxes, 4] representing the boxes. Returns: sorted_scores: a tensor with a shape of [batch_size, num_boxes] representing the sorted scores. sorted_boxes: a tensor representing the sorted boxes. sorted_scores_indices: a tensor with a shape of [batch_size, num_boxes] representing the index of the scores in a sorted descending order.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:_sort_scores_and_boxes arg:scores arg:boxes arguments arg arg With Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "render_gaussian2d",
    "source_code": "def render_gaussian2d(mean: Tensor, std: Tensor, size: tuple[int, int], normalized_coordinates: bool=True) -> Tensor:\n    if not (std.dtype == mean.dtype and std.device == mean.device):\n        raise TypeError('Expected inputs to have the same dtype and device')\n    height, width = size\n    grid = create_meshgrid(height, width, normalized_coordinates, mean.device)\n    grid = grid.to(mean.dtype)\n    pos_x = grid[..., 0].view(height, width)\n    pos_y = grid[..., 1].view(height, width)\n    dist_x = (pos_x - mean[..., 0, None, None]) ** 2\n    dist_y = (pos_y - mean[..., 1, None, None]) ** 2\n    k_x = -0.5 * torch.reciprocal(std[..., 0, None, None])\n    k_y = -0.5 * torch.reciprocal(std[..., 1, None, None])\n    exps_x = torch.exp(dist_x * k_x)\n    exps_y = torch.exp(dist_y * k_y)\n    gauss = exps_x * exps_y\n    val_sum = gauss.sum(-2, keepdim=True).sum(-1, keepdim=True)\n    gauss = _safe_zero_division(gauss, val_sum)\n    return gauss",
    "docstring": "Render the PDF of a 2D Gaussian distribution. Args: mean: the mean location of the Gaussian to render, :math:. Shape: :math:. std: the standard deviation of the Gaussian to render, :math:. Shape :math:. Should be able to be broadcast with . size: the (height, width) of the output image. normalized_coordinates: whether `[-1, 1](*, H, W)`.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\subpix\\dsnt.py",
    "ast_data": "FunctionDef name:render_gaussian2d arg:mean arg:std arg:size arg:normalized_coordinates arguments arg arg arg arg If BoolOp Compare Compare Raise Call Assign Assign Call Assign Call Assign Call Assign Call Assign Assign Assign Call Assign Call Assign Call Assign Call Assign Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "InvertedAsinhTransform",
    "source_code": "class InvertedAsinhTransform(Transform):\n    input_dims = output_dims = 1\n\n    def __init__(self, linear_width):\n        super().__init__()\n        self.linear_width = linear_width\n\n    def transform_non_affine(self, values):\n        return self.linear_width * np.sinh(values / self.linear_width)\n\n    def inverted(self):\n        return AsinhTransform(self.linear_width)",
    "docstring": "Hyperbolic sine transformation used by",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\scale.py",
    "ast_data": "ClassDef name:InvertedAsinhTransform Assign FunctionDef name:__init__ arg:self arg:linear_width arguments arg arg Call Call Assign FunctionDef name:transform_non_affine arg:self arg:values arguments arg arg Return return:yes Call FunctionDef name:inverted arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "nonsingular",
    "source_code": "def nonsingular(self, vmin, vmax):\n    if not np.isfinite(vmin) or not np.isfinite(vmax):\n        return (date2num(datetime.date(1970, 1, 1)), date2num(datetime.date(1970, 1, 2)))\n    if vmax < vmin:\n        vmin, vmax = (vmax, vmin)\n    unit = self._get_unit()\n    interval = self._get_interval()\n    if abs(vmax - vmin) < 1e-06:\n        vmin -= 2 * unit * interval\n        vmax += 2 * unit * interval\n    return (vmin, vmax)",
    "docstring": "Given the proposed upper and lower extent, adjust the range if it is too close to being singular (i.e. a range of ~0).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\dates.py",
    "ast_data": "FunctionDef name:nonsingular arg:self arg:vmin arg:vmax arguments arg arg arg If BoolOp Call Call Return return:yes Call Call Call Call If Compare Assign Assign Call Assign Call If Compare Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "def forward(self, images: Tensor, n: Optional[int]=None, window_size: int=5, score_threshold: float=0.0, pad_if_not_divisible: bool=False) -> list[DISKFeatures]:\n    B = images.shape[0]\n    if pad_if_not_divisible:\n        h, w = images.shape[2:]\n        pd_h = 16 - h % 16 if h % 16 > 0 else 0\n        pd_w = 16 - w % 16 if w % 16 > 0 else 0\n        images = torch.nn.functional.pad(images, (0, pd_w, 0, pd_h), value=0.0)\n    heatmaps, descriptors = self.heatmap_and_dense_descriptors(images)\n    if pad_if_not_divisible:\n        heatmaps = heatmaps[..., :h, :w]\n        descriptors = descriptors[..., :h, :w]\n    keypoints = heatmap_to_keypoints(heatmaps, n=n, window_size=window_size, score_threshold=score_threshold)\n    features = []\n    for i in range(B):\n        features.append(keypoints[i].merge_with_descriptors(descriptors[i]))\n    return features",
    "docstring": "Detect features in an image, returning keypoint locations, descriptors and detection scores. Args: images: The image to detect features in. Shape :math:. n: The maximum number of keypoints to detect. If None, all keypoints are returned. window_size: The size of the non-maxima suppression window used to filter detections. score_threshold: The minimum score a detection must have to be returned. See :py:class: for details. pad_if_not_divisible: if True, the non-16 divisible input is zero-padded to the closest 16-multiply Returns: A list of length :math: containing the detected features.",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\disk\\disk.py",
    "ast_data": "FunctionDef name:forward arg:self arg:images arg:n arg:window_size arg:score_threshold arg:pad_if_not_divisible arguments arg arg arg arg arg arg Assign If Assign Assign Compare Assign Compare Assign Call Assign Call If Assign Assign Assign Call Assign For Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "try_generalizing_function_type",
    "source_code": "def try_generalizing_function_type(self, target: function_type.FunctionType) -> function_type.FunctionType:\n    relaxed = target\n    for other in self._dispatch_table:\n        subtype = relaxed.most_specific_common_subtype([other])\n        if subtype is not None:\n            relaxed = subtype\n    return relaxed",
    "docstring": "Returns a generalized subtype of the one given. This heuristic aims to reduce the number of future traces by computing a type that represents more general function inputs. The original \"experimental_relax_shapes\" heuristic identified a known type which shared a common subtype with the current unknown type and then traced with that common subtype. However, the notion of \"common subtype\" was only limited to shapes. This heuristic extends that to FunctionType. Returns if a generalized subtype can not be found. Args: target: The FunctionType to generalize",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\type_dispatch.py",
    "ast_data": "FunctionDef name:try_generalizing_function_type arg:self arg:target arguments arg arg Assign For Assign Call If Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_LegacyResourceSummaryWriter",
    "source_code": "class _LegacyResourceSummaryWriter(SummaryWriter):\n\n    def __init__(self, resource, init_op_fn):\n        self._resource = resource\n        self._init_op_fn = init_op_fn\n        init_op = self.init()\n        if context.executing_eagerly():\n            self._resource_deleter = resource_variable_ops.EagerResourceDeleter(handle=self._resource, handle_device='cpu:0')\n        else:\n            ops.add_to_collection(_SUMMARY_WRITER_INIT_COLLECTION_NAME, init_op)\n\n    def init(self):\n        return self._init_op_fn(self._resource)\n\n    def flush(self):\n        with ops.device('cpu:0'):\n            return gen_summary_ops.flush_summary_writer(self._resource)\n\n    def close(self):\n        with ops.control_dependencies([self.flush()]):\n            with ops.device('cpu:0'):\n                return gen_summary_ops.close_summary_writer(self._resource)",
    "docstring": "Legacy resource-backed SummaryWriter for tf.contrib.summary.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "ClassDef name:_LegacyResourceSummaryWriter FunctionDef name:__init__ arg:self arg:resource arg:init_op_fn arguments arg arg arg Assign Assign Assign Call If Call Assign Call Call FunctionDef name:init arg:self arguments arg Return return:yes Call FunctionDef name:flush arg:self arguments arg With Call Return return:yes Call FunctionDef name:close arg:self arguments arg With Call Call With Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "strategy_supports_loss_scaling",
    "source_code": "def strategy_supports_loss_scaling():\n    if not distribute_lib.has_strategy():\n        return True\n    strategy = distribute_lib.get_strategy()\n    return isinstance(strategy, (collective_all_reduce_strategy.CollectiveAllReduceStrategy, collective_all_reduce_strategy.CollectiveAllReduceStrategyV1, one_device_strategy.OneDeviceStrategy, one_device_strategy.OneDeviceStrategyV1, mirrored_strategy.MirroredStrategy, mirrored_strategy.MirroredStrategyV1))",
    "docstring": "Returns True if the current Strategy supports loss scaling.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\loss_scale_optimizer.py",
    "ast_data": "FunctionDef name:strategy_supports_loss_scaling arguments If Call Return return:yes Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_get_doc_link",
    "source_code": "def _get_doc_link(self):\n    if self.__class__.__module__.split('.')[0] != self._doc_link_module:\n        return ''\n    if self._doc_link_url_param_generator is None:\n        estimator_name = self.__class__.__name__\n        estimator_module = '.'.join(itertools.takewhile(lambda part: not part.startswith('_'), self.__class__.__module__.split('.')))\n        return self._doc_link_template.format(estimator_module=estimator_module, estimator_name=estimator_name)\n    return self._doc_link_template.format(**self._doc_link_url_param_generator())",
    "docstring": "Generates a link to the API documentation for a given estimator. This method generates the link to the estimator's documentation page by using the template defined by the attribute . Returns ------- url : str The URL to the API documentation for this estimator. If the estimator does not belong to module , the empty string (i.e. ) is returned.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_repr_html\\base.py",
    "ast_data": "FunctionDef name:_get_doc_link arg:self arguments arg If Compare Call Return return:yes If Compare Assign Assign Call Call arguments arg Call Call Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "joint_pdf",
    "source_code": "def joint_pdf(kernel_values1: Tensor, kernel_values2: Tensor, epsilon: float=1e-10) -> Tensor:\n    if not isinstance(kernel_values1, Tensor):\n        raise TypeError(f'Input kernel_values1 type is not a Tensor. Got {type(kernel_values1)}')\n    if not isinstance(kernel_values2, Tensor):\n        raise TypeError(f'Input kernel_values2 type is not a Tensor. Got {type(kernel_values2)}')\n    if not kernel_values1.dim() == 3:\n        raise ValueError(f'Input kernel_values1 must be a of the shape BxN. Got {kernel_values1.shape}')\n    if not kernel_values2.dim() == 3:\n        raise ValueError(f'Input kernel_values2 must be a of the shape BxN. Got {kernel_values2.shape}')\n    if kernel_values1.shape != kernel_values2.shape:\n        raise ValueError(f'Inputs kernel_values1 and kernel_values2 must have the same shape. Got {kernel_values1.shape} and {kernel_values2.shape}')\n    joint_kernel_values = torch.matmul(kernel_values1.transpose(1, 2), kernel_values2)\n    normalization = torch.sum(joint_kernel_values, dim=(1, 2)).view(-1, 1, 1) + epsilon\n    pdf = joint_kernel_values / normalization\n    return pdf",
    "docstring": "Calculate the joint probability distribution function of the input tensors based on the number of histogram bins. Args: kernel_values1: shape [BxNxNUM_BINS]. kernel_values2: shape [BxNxNUM_BINS]. epsilon: scalar, for numerical stability. Returns: shape [BxNUM_BINSxNUM_BINS].",
    "type": "function",
    "file_path": "kornia\\kornia\\enhance\\histogram.py",
    "ast_data": "FunctionDef name:joint_pdf arg:kernel_values1 arg:kernel_values2 arg:epsilon arguments arg arg arg If Call Raise Call Call If Call Raise Call Call If Compare Call Raise Call If Compare Call Raise Call If Compare Raise Call Assign Call Call Assign Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "is_structseq_class",
    "source_code": "def is_structseq_class(cls: type) -> bool:\n    return isinstance(cls, type) and cls.__bases__ == (tuple,) and isinstance(getattr(cls, 'n_fields', None), int) and isinstance(getattr(cls, 'n_sequence_fields', None), int) and isinstance(getattr(cls, 'n_unnamed_fields', None), int) and (not bool(cls.__flags__ & Py_TPFLAGS_BASETYPE))",
    "docstring": "Return whether the class is a class of PyStructSequence.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_pytree.py",
    "ast_data": "FunctionDef name:is_structseq_class arg:cls arguments arg Return return:yes BoolOp Call Compare Call Call Call Call Call Call Call"
  },
  {
    "library": "numpy",
    "name": "masked_greater",
    "source_code": "def masked_greater(x, value, copy=True):\n    return masked_where(greater(x, value), x, copy=copy)",
    "docstring": "Mask an array where greater than a given value. This function is a shortcut to `condition` = (x > value). See Also -------- masked_where : Mask where a condition is met. Examples -------- >>> import numpy as np >>> import numpy.ma as ma >>> a = np.arange(4) >>> a array([0, 1, 2, 3]) >>> ma.masked_greater(a, 2) masked_array(data=[0, 1, 2, --], mask=[False, False, False, True], fill_value=999999)",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:masked_greater arg:x arg:value arg:copy arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_swa_multi_avg_fn",
    "source_code": "def get_swa_multi_avg_fn():\n\n    @torch.no_grad()\n    def swa_update(averaged_param_list: PARAM_LIST, current_param_list: PARAM_LIST, num_averaged: Union[Tensor, int]):\n        if torch.is_floating_point(averaged_param_list[0]) or torch.is_complex(averaged_param_list[0]):\n            torch._foreach_lerp_(averaged_param_list, current_param_list, 1 / (num_averaged + 1))\n        else:\n            diffs = torch._foreach_sub(current_param_list, averaged_param_list)\n            if isinstance(num_averaged, Tensor):\n                torch._foreach_addcdiv_(averaged_param_list, diffs, [num_averaged + 1] * len(averaged_param_list))\n            else:\n                torch._foreach_add_(averaged_param_list, diffs, alpha=1.0 / (num_averaged + 1))\n    return swa_update",
    "docstring": "Get the function applying stochastic weight average (SWA) across multiple params.",
    "type": "function",
    "file_path": "pytorch\\torch\\optim\\swa_utils.py",
    "ast_data": "FunctionDef name:get_swa_multi_avg_fn arguments FunctionDef name:swa_update arg:averaged_param_list arg:current_param_list arg:num_averaged arguments arg arg arg If BoolOp Call Call Call Assign Call If Call Call Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "__getitem__",
    "source_code": "def __getitem__(self, index):\n    return self.forms[index]",
    "docstring": "Return the form at the given index, based on the rendering order.",
    "type": "method",
    "file_path": "django\\django\\forms\\formsets.py",
    "ast_data": "FunctionDef name:__getitem__ arg:self arg:index arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "most_specific_common_supertype",
    "source_code": "def most_specific_common_supertype(self, others: Sequence[trace.TraceType]) -> Optional['Tuple']:\n    if not all((isinstance(other, Tuple) and len(self.components) == len(other.components) for other in others)):\n        return None\n    supertyped_components = []\n    for i, component in enumerate(self.components):\n        supertyped_component = component.most_specific_common_supertype([other.components[i] for other in others])\n        if supertyped_component is None:\n            return None\n        supertyped_components.append(supertyped_component)\n    return Tuple(*supertyped_components)",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\trace_type\\default_types.py",
    "ast_data": "FunctionDef name:most_specific_common_supertype arg:self arg:others arguments arg arg If Call BoolOp Call Compare Call Call Return return:no Assign For Call Assign Call If Compare Return return:no Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "pg_config_info",
    "source_code": "@property\ndef pg_config_info(self) -> list[dict[str, Any]]:\n    config_info: list[dict[str, Any]] = []\n    default_pg_size = _get_group_size(None)\n    for pg in self.pg_map.keys():\n        ranks = self.pg_group_ranks[pg]\n        config_info.append({'pg_name': self.pg_names[pg], 'pg_desc': pg.group_desc, 'backend_config': self.pg_backend_config[pg], 'ranks': list(ranks.keys()) if len(ranks) != default_pg_size else [], 'group_size': len(ranks), 'group_count': self.group_count})\n    return config_info",
    "docstring": "Return a list of dict with process groups and backends. Along with their unique IDs and configurations (types and ranks).",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:pg_config_info arg:self arguments arg Assign Call For Call Assign Call Compare Call Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "getdata",
    "source_code": "def getdata(obj, dtype=None, copy=False) -> np.ndarray:\n    data = np.array(obj, dtype=dtype, copy=copy)\n    getdtype(data.dtype)\n    return data",
    "docstring": "This is a wrapper of that will generate a warning if the result is an object array.",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\_sputils.py",
    "ast_data": "FunctionDef name:getdata arg:obj arg:dtype arg:copy arguments arg arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_create_handle_data_proto",
    "source_code": "def _create_handle_data_proto(shape_proto, dtype_enum):\n    variant_shape_and_type_data = cpp_shape_inference_pb2.CppShapeInferenceResult.HandleData()\n    variant_shape_and_type_data.is_set = True\n    variant_shape_and_type_data.shape_and_type.extend([cpp_shape_inference_pb2.CppShapeInferenceResult.HandleShapeAndType(shape=shape_proto, dtype=dtype_enum)])\n    return variant_shape_and_type_data",
    "docstring": "Create handle data based on shape and dtype protos.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\sparse\\sparse_csr_matrix_ops.py",
    "ast_data": "FunctionDef name:_create_handle_data_proto arg:shape_proto arg:dtype_enum arguments arg arg Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "bessel_j1",
    "source_code": "@tf_export('math.special.bessel_j1')\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef bessel_j1(x, name=None):\n    with ops.name_scope(name, 'bessel_j1', [x]):\n        return gen_special_math_ops.bessel_j1(x)",
    "docstring": "Computes the Bessel j1 function of element-wise. Modified Bessel function of order 1. >>> tf.math.special.bessel_j1([0.5, 1., 2., 4.]).numpy() array([ 0.24226846, 0.44005059, 0.57672481, -0.06604333], dtype=float32) Args: x: A or . Must be one of the following types: , , . name: A name for the operation (optional). Returns: A or , respectively. Has the same type as . @compatibility(scipy) Equivalent to scipy.special.j1 @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\special_math_ops.py",
    "ast_data": "FunctionDef name:bessel_j1 arg:x arg:name arguments arg arg With Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_fill_in_default_kwargs",
    "source_code": "def _fill_in_default_kwargs(node: torch.fx.Node) -> tuple[list[fx_type_utils.Argument], dict[str, fx_type_utils.Argument]]:\n    if hasattr(node.target, '_schema'):\n        node_schema = node.target._schema\n    else:\n        node_schema = torch.ops.aten.sym_size.int._schema\n    complete_args: list[fx_type_utils.Argument] = []\n    complete_kwargs: dict[str, fx_type_utils.Argument] = {}\n    if inspect.isbuiltin(node.target):\n        complete_args = list(node.args)\n    else:\n        for i, expected_arg in enumerate(node_schema.arguments):\n            if i < len(node.args):\n                complete_args.append(node.args[i])\n            elif expected_arg.name in node.kwargs:\n                complete_kwargs[expected_arg.name] = node.kwargs[expected_arg.name]\n            else:\n                complete_kwargs[expected_arg.name] = expected_arg.default_value\n    return (complete_args, complete_kwargs)",
    "docstring": "Find and Fill in the not provided kwargs with default values.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\fx_onnx_interpreter.py",
    "ast_data": "FunctionDef name:_fill_in_default_kwargs arg:node arguments arg If Call Assign Assign If Call Assign Call For Call If Compare Call Call If Compare Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get",
    "source_code": "def get(self, key=None, indices=None, name=None):\n    if key is None:\n        return self._popitem(indices=indices, name=name)\n    else:\n        return self._pop(key, indices=indices, name=name)",
    "docstring": "If the key is provided, the associated (key, value) is returned from the staging area. If the key is not in the staging area, this method will block until the associated (key, value) is inserted. If no key is provided and the staging area is ordered, the (key, value) with the smallest key will be returned. Otherwise, a random (key, value) will be returned. If the staging area is empty when this operation executes, it will block until there is an element to dequeue. Args: key: Key associated with the required data (Optional) indices: Partial list of tensors to retrieve (optional). A list of integer or string indices. String indices are only valid if the Staging Area has names associated with it. name: A name for the operation (optional) Returns: The created op",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:get arg:self arg:key arg:indices arg:name arguments arg arg arg arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "BuildLoop",
    "source_code": "def BuildLoop(self, pred, body, loop_vars, shape_invariants, return_same_structure):\n    flat_orig_loop_vars = nest.flatten(loop_vars, expand_composites=True)\n    loop_vars = nest.map_structure(_convert_to_tensor_or_composite_or_tensorarray, loop_vars)\n    flat_loop_vars = nest.map_structure(_convert_tensorarray_to_flow, nest.flatten(loop_vars, expand_composites=True))\n    if shape_invariants is not None:\n        loop_vars_signature = nest.map_structure(_shape_invariant_to_type_spec, loop_vars, shape_invariants)\n    else:\n        loop_vars_signature = nest.map_structure(_shape_invariant_to_type_spec, loop_vars)\n    try:\n        self.Enter()\n        with ops.get_default_graph()._mutation_lock():\n            original_body_result, exit_vars = self._BuildLoop(pred, body, flat_orig_loop_vars, flat_loop_vars, loop_vars_signature)\n    finally:\n        self.Exit()\n    flat_result = nest.flatten(original_body_result, expand_composites=True)\n    exit_vars_with_tensorarrays = nest.map_structure(_convert_flow_to_tensorarray, flat_result, exit_vars)\n    packed_exit_vars = nest.pack_sequence_as(structure=original_body_result, flat_sequence=exit_vars_with_tensorarrays, expand_composites=True)\n    if return_same_structure:\n        return packed_exit_vars\n    else:\n        return packed_exit_vars[0] if len(exit_vars) == 1 else packed_exit_vars",
    "docstring": "Add the loop termination condition and body to the graph.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:BuildLoop arg:self arg:pred arg:body arg:loop_vars arg:shape_invariants arg:return_same_structure arguments arg arg arg arg arg arg Assign Call Assign Call Assign Call Call If Compare Assign Call Assign Call Try Call With Call Call Assign Call Call Assign Call Assign Call Assign Call If Return return:yes Return return:yes Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "SendSourceFiles",
    "source_code": "def SendSourceFiles(self, request, context):\n    context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n    context.set_details('Method not implemented!')\n    raise NotImplementedError('Method not implemented!')",
    "docstring": "Send a collection of source code files being debugged.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_service_pb2_grpc.py",
    "ast_data": "FunctionDef name:SendSourceFiles arg:self arg:request arg:context arguments arg arg arg Call Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "RestoredSaveableObject",
    "source_code": "class RestoredSaveableObject(saveable_object.SaveableObject):\n\n    def __init__(self, names_and_slices, save_function, restore_function, name):\n        self.save_function = save_function\n        self.restore_function = restore_function\n        if tensor_util.is_tf_type(name):\n            name_tensor = name\n        else:\n            with ops.init_scope():\n                name_tensor = constant_op.constant(name)\n        tensors = save_function(name_tensor)\n        specs = []\n        for (str_name, str_slice), tensor_info in zip(names_and_slices, tensors):\n            specs.append(saveable_object.SaveSpec(tensor_info['tensor'], str_slice, name + str_name))\n        super(RestoredSaveableObject, self).__init__(None, specs, name)\n\n    def restore(self, restored_tensors, restored_shapes):\n        del restored_shapes\n        return self.restore_function(*[restored_tensors[i] for i in range(len(self.specs))])",
    "docstring": "SaveableObject restored from SavedModel using the traced save/restore.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\training\\saving\\saveable_object_util.py",
    "ast_data": "ClassDef name:RestoredSaveableObject FunctionDef name:__init__ arg:self arg:names_and_slices arg:save_function arg:restore_function arg:name arguments arg arg arg arg arg Assign Assign If Call Assign With Call Assign Call Assign Call Assign For Call Call Call Call Call FunctionDef name:restore arg:self arg:restored_tensors arg:restored_shapes arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "single_shot",
    "source_code": "@property\ndef single_shot(self):\n    return self._single",
    "docstring": "Whether this timer should stop after a single run.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:single_shot arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_legacy_output_classes",
    "source_code": "@tf_export(v1=['data.get_output_classes'])\ndef get_legacy_output_classes(dataset_or_iterator):\n    return nest.map_structure(lambda component_spec: component_spec._to_legacy_output_classes(), get_structure(dataset_or_iterator))",
    "docstring": "Returns the output classes for elements of the input dataset / iterator. Args: dataset_or_iterator: A or . Returns: A (nested) structure of Python objects matching the structure of the dataset / iterator elements and specifying the class of the individual components. @compatibility(TF2) This is a legacy API for inspecting the type signature of dataset elements. In TF 2, you should use the attribute instead. @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:get_legacy_output_classes arg:dataset_or_iterator arguments arg Return return:yes Call arguments arg Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_load_tf_record",
    "source_code": "def _load_tf_record(self, tf_record_path: str) -> RepresentativeDataset:\n    samples = []\n    with context.eager_mode():\n        for sample_bytes in readers.TFRecordDatasetV2(filenames=[tf_record_path]):\n            sample_proto = _RepresentativeDataSample.FromString(sample_bytes.numpy())\n            sample = {}\n            for input_key, tensor_proto in sample_proto.tensor_proto_inputs.items():\n                sample[input_key] = tensor_util.MakeNdarray(tensor_proto)\n            samples.append(sample)\n    return samples",
    "docstring": "Loads TFRecord containing samples of type.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\representative_dataset.py",
    "ast_data": "FunctionDef name:_load_tf_record arg:self arg:tf_record_path arguments arg arg Assign With Call For Call Assign Call Call Assign For Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_op_from_signature_def",
    "source_code": "def _get_op_from_signature_def(meta_graph_def, op_signature_key, import_scope):\n    if op_signature_key in meta_graph_def.signature_def:\n        return signature_def_utils.load_op_from_signature_def(meta_graph_def.signature_def[op_signature_key], op_signature_key, import_scope)\n    else:\n        return None",
    "docstring": "Retrieve op stored in the imported meta graph's signature def.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\loader_impl.py",
    "ast_data": "FunctionDef name:_get_op_from_signature_def arg:meta_graph_def arg:op_signature_key arg:import_scope arguments arg arg arg If Compare Return return:yes Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "_cast_indexed_slice_indices",
    "source_code": "def _cast_indexed_slice_indices(a, b):\n    if isinstance(a, indexed_slices.IndexedSlices) and isinstance(b, indexed_slices.IndexedSlices) and (a.indices.dtype != b.indices.dtype):\n        a._indices = math_ops.cast(a.indices, dtypes.int64)\n        b._indices = math_ops.cast(b.indices, dtypes.int64)",
    "docstring": "Cast IndexedSlice.indices from int32 to int64 where necessary. If and are both IndexedSlices, and their indices have different dtypes, then cast both their dtypes to (modifies and in-place). Otherwise, does nothing. Args: a: A value, which may be an IndexedSlices. b: A value, which may be an IndexedSlices.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\cond.py",
    "ast_data": "FunctionDef name:_cast_indexed_slice_indices arg:a arg:b arguments arg arg If BoolOp Call Call Compare Assign Call Assign Call"
  },
  {
    "library": "numpy",
    "name": "check_restrict",
    "source_code": "def check_restrict(self):\n    return check_restrict(self)",
    "docstring": "Return the restrict keyword recognized by the compiler, empty string otherwise.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\command\\config.py",
    "ast_data": "FunctionDef name:check_restrict arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, lhs, rhs, op):\n    self.lhs = lhs\n    self.rhs = rhs\n    self.op = op",
    "docstring": ":param lhs: lhs of the constraint :param rhs: rhs of the constraint :param op: string representing the operation",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:lhs arg:rhs arg:op arguments arg arg arg arg Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, communication=collective_util.CommunicationImplementation.AUTO, cluster_resolver=None):\n    communication_options = collective_util.Options(implementation=communication)\n    super(CollectiveAllReduceStrategyV1, self).__init__(CollectiveAllReduceExtended(self, cluster_resolver=cluster_resolver, communication_options=communication_options))\n    distribute_lib.distribution_strategy_gauge.get_cell('V1').set('MultiWorkerMirroredStrategy')\n    distribute_lib.distribution_strategy_replica_gauge.get_cell('num_workers').set(self.extended._num_workers)\n    distribute_lib.distribution_strategy_replica_gauge.get_cell('num_gpu_per_worker').set(self.extended._num_devices_per_worker if self.extended._local_device_type == 'GPU' else 0)",
    "docstring": "Initializes the object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\collective_all_reduce_strategy.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:communication arg:cluster_resolver arguments arg arg arg Assign Call Call Call Call Call Call Call Call Call Call Compare"
  },
  {
    "library": "pandas",
    "name": "null_count",
    "source_code": "@property\n@abstractmethod\ndef null_count(self) -> int | None:\n    pass",
    "docstring": "Number of null elements, if known. Note: Arrow uses -1 to indicate \"unknown\", but None seems cleaner.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\interchange\\dataframe_protocol.py",
    "ast_data": "FunctionDef name:null_count arg:self arguments arg"
  },
  {
    "library": "pandas",
    "name": "LossySetitemError",
    "source_code": "class LossySetitemError(Exception):\n    pass",
    "docstring": "Raised when trying to do a __setitem__ on an np.ndarray that is not lossless. Notes ----- This is an internal error.",
    "type": "class",
    "file_path": "pandas\\pandas\\errors\\__init__.py",
    "ast_data": "ClassDef name:LossySetitemError"
  },
  {
    "library": "scikit-learn",
    "name": "_concatenate_predictions",
    "source_code": "def _concatenate_predictions(self, X, predictions):\n    X_meta = []\n    for est_idx, preds in enumerate(predictions):\n        if isinstance(preds, list):\n            for pred in preds:\n                X_meta.append(pred[:, 1:])\n        elif preds.ndim == 1:\n            X_meta.append(preds.reshape(-1, 1))\n        elif self.stack_method_[est_idx] == 'predict_proba' and len(self.classes_) == 2:\n            X_meta.append(preds[:, 1:])\n        else:\n            X_meta.append(preds)\n    self._n_feature_outs = [pred.shape[1] for pred in X_meta]\n    if self.passthrough:\n        X_meta.append(X)\n        if sparse.issparse(X):\n            return sparse.hstack(X_meta, format=X.format)\n    return np.hstack(X_meta)",
    "docstring": "Concatenate the predictions of each first layer learner and possibly the input dataset . If is sparse and is False, the output of will be dense (the predictions). If is sparse and is True, the output of will be sparse. This helper is in charge of ensuring the predictions are 2D arrays and it will drop one of the probability column when using probabilities in the binary case. Indeed, the p(y|c=0) = 1 - p(y|c=1) When type is predict_probapredsndarray(n_samples, n_class)ndarray`. This function will drop one of the probability column in this situation as well.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_stacking.py",
    "ast_data": "FunctionDef name:_concatenate_predictions arg:self arg:X arg:predictions arguments arg arg arg Assign For Call If Call For Call If Compare Call Call If BoolOp Compare Compare Call Call Call Assign If Call If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "float",
    "source_code": "def float(self) -> Self:\n    return self._apply(lambda t: t.float() if t.is_floating_point() else t)",
    "docstring": "Casts all floating point parameters and buffers to `` datatype. .. note:: This method modifies the module in-place. Returns: Module: self",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:float arg:self arguments arg Return return:yes Call arguments arg Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_color",
    "source_code": "def get_color(self):\n    return self._color",
    "docstring": "Return the line color. See also .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:get_color arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_PadGrad",
    "source_code": "def _PadGrad(op: ops.Operation, grad):\n    x = op.inputs[0]\n    a = op.inputs[1]\n    pad_before = array_ops.slice(a, [0, 0], array_ops_stack.stack([array_ops.rank(x), 1]))\n    begin = array_ops.reshape(pad_before, [-1])\n    sizes = array_ops.shape(x, out_type=begin.dtype)\n    x_grad = array_ops.slice(grad, begin, sizes)\n    if len(op.inputs) == 3:\n        return (x_grad, None, None)\n    else:\n        return (x_grad, None)",
    "docstring": "Gradient for Pad.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_grad.py",
    "ast_data": "FunctionDef name:_PadGrad arg:op arg:grad arguments arg arg Assign Assign Assign Call Call Call Assign Call Assign Call Assign Call If Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_tool_toggled_cbk",
    "source_code": "def _tool_toggled_cbk(self, event):\n    self.toggle_toolitem(event.tool.name, event.tool.toggled)",
    "docstring": "Capture the 'tool_trigger_[name]' This only gets used for toggled tools.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:_tool_toggled_cbk arg:self arg:event arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_broadcast_shape_helper",
    "source_code": "def _broadcast_shape_helper(shape_x, shape_y):\n    broadcasted_dims = reversed(list(itertools.zip_longest(reversed(shape_x.dims), reversed(shape_y.dims), fillvalue=tensor_shape.Dimension(1))))\n    return_dims = []\n    for dim_x, dim_y in broadcasted_dims:\n        if dim_x.value is None or dim_y.value is None:\n            if dim_x.value is not None and dim_x.value > 1:\n                return_dims.append(dim_x)\n            elif dim_y.value is not None and dim_y.value > 1:\n                return_dims.append(dim_y)\n            else:\n                return_dims.append(None)\n        elif dim_x.value == 1:\n            return_dims.append(dim_y)\n        elif dim_y.value == 1:\n            return_dims.append(dim_x)\n        elif dim_x.value == dim_y.value:\n            return_dims.append(dim_x.merge_with(dim_y))\n        else:\n            return None\n    return return_dims",
    "docstring": "Helper functions for is_broadcast_compatible and broadcast_shape. Args: shape_x: A shape_y: A Returns: Returns None if the shapes are not broadcast compatible, a list of the broadcast dimensions otherwise.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\common_shapes.py",
    "ast_data": "FunctionDef name:_broadcast_shape_helper arg:shape_x arg:shape_y arguments arg arg Assign Call Call Call Call Call Call Assign For If BoolOp Compare Compare If BoolOp Compare Compare Call If BoolOp Compare Compare Call Call If Compare Call If Compare Call If Compare Call Call Return return:no Return return:yes"
  },
  {
    "library": "django",
    "name": "model_format_dict",
    "source_code": "def model_format_dict(obj):\n    if isinstance(obj, (models.Model, models.base.ModelBase)):\n        opts = obj._meta\n    elif isinstance(obj, models.query.QuerySet):\n        opts = obj.model._meta\n    else:\n        opts = obj\n    return {'verbose_name': opts.verbose_name, 'verbose_name_plural': opts.verbose_name_plural}",
    "docstring": "Return a with keys 'verbose_name' and 'verbose_name_plural', typically for use with string formatting. may be a instance, subclass, or instance.",
    "type": "function",
    "file_path": "django\\django\\contrib\\admin\\utils.py",
    "ast_data": "FunctionDef name:model_format_dict arg:obj arguments arg If Call Assign If Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_in_multi_worker_mode",
    "source_code": "def _in_multi_worker_mode(self):\n    return False",
    "docstring": "Whether this strategy indicates working in multi-worker settings.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\mirrored_strategy.py",
    "ast_data": "FunctionDef name:_in_multi_worker_mode arg:self arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "__sub__",
    "source_code": "def __sub__(self, other):\n    if self._delegate_binop(other):\n        return NotImplemented\n    return subtract(self, other)",
    "docstring": "Subtract other from self, and return a new masked array.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:__sub__ arg:self arg:other arguments arg arg If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "HolderModule",
    "source_code": "@compatibility(is_backward_compatible=False)\nclass HolderModule(Module):\n\n    def __init__(self, d):\n        super().__init__()\n        for k, v in d.items():\n            self.add_module(k, v)",
    "docstring": "HolderModule is used to copy all the attributes from original module to submodules that uses the attributes",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\passes\\utils\\common.py",
    "ast_data": "ClassDef name:HolderModule FunctionDef name:__init__ arg:self arg:d arguments arg arg Call Call For Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_reshape_to_1d_tensor",
    "source_code": "def _reshape_to_1d_tensor(opset: onnxscript.values.Opset, arg: ir.Value) -> ir.Value:\n    return opset.Reshape(arg, opset.Constant(value=ir.tensor([-1], dtype=ir.DataType.INT64)))",
    "docstring": "Reshape the input to a 1D tensor.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_building.py",
    "ast_data": "FunctionDef name:_reshape_to_1d_tensor arg:opset arg:arg arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_run_using_default_session",
    "source_code": "def _run_using_default_session(operation, feed_dict, graph, session=None) -> None:\n    if session is None:\n        session = stack.get_default_session()\n        if session is None:\n            raise ValueError('Cannot execute operation using `run()`: No default session is registered. Use `with sess.as_default():` or pass an explicit session to `run(session=sess)`')\n        if session.graph is not graph:\n            raise ValueError(\"Cannot use the default session to execute operation: the operation's graph is different from the session's graph. Pass an explicit session to run(session=sess).\")\n    elif session.graph is not graph:\n        raise ValueError(\"Cannot use the given session to execute operation: the operation's graph is different from the session's graph.\")\n    session.run(operation, feed_dict)",
    "docstring": "Uses the default session to run \"operation\". Args: operation: The Operation to be run. feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists, numpy ndarrays, TensorProtos, or strings. graph: The graph in which \"operation\" is defined. session: (Optional) A different session to use to run \"operation\". Raises: ValueError: If no default session is available; the default session does not have \"graph\" as its graph; or if \"session\" is specified, and it does not have \"graph\" as its graph.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_run_using_default_session arg:operation arg:feed_dict arg:graph arg:session arguments arg arg arg arg If Compare Assign Call If Compare Raise Call If Compare Raise Call If Compare Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__len__",
    "source_code": "def __len__(self):\n    if not context.executing_eagerly():\n        raise TypeError('`tf.data.Dataset` only supports `len` in eager mode. Use `tf.data.Dataset.cardinality()` instead.')\n    length = self.cardinality()\n    if length.numpy() == INFINITE:\n        raise TypeError('The dataset is infinite.')\n    if length.numpy() == UNKNOWN:\n        raise TypeError('The dataset length is unknown.')\n    return length",
    "docstring": "Returns the length of the dataset if it is known and finite. This method requires that you are running in eager mode, and that the length of the dataset is known and non-infinite. When the length may be unknown or infinite, or if you are running in graph mode, use instead. Returns: An integer representing the length of the dataset. Raises: RuntimeError: If the dataset length is unknown or infinite, or if eager execution is not enabled.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:__len__ arg:self arguments arg If Call Raise Call Assign Call If Compare Call Raise Call If Compare Call Raise Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "getdict",
    "source_code": "def getdict(self, name: _SettingsKeyT, default: dict[Any, Any] | None=None) -> dict[Any, Any]:\n    value = self.get(name, default or {})\n    if isinstance(value, str):\n        value = json.loads(value)\n    return dict(value)",
    "docstring": "Get a setting value as a dictionary. If the setting original type is a dictionary, a copy of it will be returned. If it is a string it will be evaluated as a JSON dictionary. In the case that it is a :class: instance itself, it will be converted to a dictionary, containing all its current settings values as they would be returned by :meth:, and losing all information about priority and mutability. :param name: the setting name :type name: str :param default: the value to return if no setting is found :type default: object",
    "type": "method",
    "file_path": "scrapy\\scrapy\\settings\\__init__.py",
    "ast_data": "FunctionDef name:getdict arg:self arg:name arg:default arguments arg arg arg Assign Call BoolOp If Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "bound_sympy",
    "source_code": "def bound_sympy(self, expr: sympy.Expr, size_oblivious: bool=False) -> ValueRanges:\n    var_to_range = {x: self.var_to_range.get(x, None) for x in expr.free_symbols}\n    if size_oblivious:\n        for x in self.size_like & var_to_range.keys():\n            if var_to_range[x] is not None:\n                var_to_range[x] = ValueRanges(2, int_oo)\n    return bound_sympy(expr, var_to_range)",
    "docstring": "Given a sympy expression, computes a ValueRanges bound for what values it can be",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:bound_sympy arg:self arg:expr arg:size_oblivious arguments arg arg arg Assign Call If For Call If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_ordering",
    "source_code": "def get_ordering(self):\n    return '-%s' % self.get_date_field() if self.ordering is None else self.ordering",
    "docstring": "Return the field or fields to use for ordering the queryset; use the date field by default.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\dates.py",
    "ast_data": "FunctionDef name:get_ordering arg:self arguments arg Return return:yes Compare Call"
  },
  {
    "library": "matplotlib",
    "name": "xmax",
    "source_code": "@property\ndef xmax(self):\n    return np.max(self.get_points()[:, 0])",
    "docstring": "The right edge of the bounding box.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:xmax arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "get_installed_libraries",
    "source_code": "def get_installed_libraries():\n    return {module_name: full_name for module_name, full_name in get_template_tag_modules()}",
    "docstring": "Return the built-in template tag libraries and those from installed applications. Libraries are stored in a dictionary where keys are the individual module names, not the full module paths. Example: django.templatetags.i18n is stored as i18n.",
    "type": "function",
    "file_path": "django\\django\\template\\backends\\django.py",
    "ast_data": "FunctionDef name:get_installed_libraries arguments Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "set_axis_labels",
    "source_code": "def set_axis_labels(self, xlabel='', ylabel='', **kwargs):\n    self.ax_joint.set_xlabel(xlabel, **kwargs)\n    self.ax_joint.set_ylabel(ylabel, **kwargs)\n    return self",
    "docstring": "Set axis labels on the bivariate axes. Parameters ---------- xlabel, ylabel : strings Label names for the x and y variables. kwargs : key, value mappings Other keyword arguments are passed to the following functions: - :meth: - :meth: Returns ------- :class: instance Returns `` for easy method chaining.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\axisgrid.py",
    "ast_data": "FunctionDef name:set_axis_labels arg:self arg:xlabel arg:ylabel arguments arg arg arg arg Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "realize_hint",
    "source_code": "def realize_hint(self) -> None:\n    if isinstance(self.data, (Pointwise, Reduction)) and self.data.inner_fn_opcount().nontrivial_read_count > 1:\n        self.realize()",
    "docstring": "Called on buffers we expect to be forced to realize later.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ir.py",
    "ast_data": "FunctionDef name:realize_hint arg:self arguments arg If BoolOp Call Compare Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_int_or_float_list",
    "source_code": "def get_int_or_float_list(self, min_length=_MIN_LENGTH, max_length=_MAX_LENGTH):\n    if self.get_bool():\n        return self.get_int_list(min_length, max_length)\n    else:\n        return self.get_float_list(min_length, max_length)",
    "docstring": "Consume a signed integer or float list with given constraints based on a consumed bool. Args: min_length: The minimum length of the list. max_length: The maximum length of the list. Returns: Consumed integer or float list based on input bytes and constraints.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\security\\fuzzing\\python_fuzzing.py",
    "ast_data": "FunctionDef name:get_int_or_float_list arg:self arg:min_length arg:max_length arguments arg arg arg If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "linear",
    "source_code": "@dispatch.add_dispatch_support\ndef linear(x):\n    return x",
    "docstring": "Linear activation function (pass-through). For example: >>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32) >>> b = tf.keras.activations.linear(a) >>> b.numpy() array([-3., -1., 0., 1., 3.], dtype=float32) Args: x: Input tensor. Returns: The input, unmodified.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\activations.py",
    "ast_data": "FunctionDef name:linear arg:x arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "write_bytecode",
    "source_code": "def write_bytecode(self, install_root):\n    bytecode_file_names = [f'bytecode_{i}.c' for i in range(NUM_BYTECODE_FILES)]\n    bytecode_files = [open(os.path.join(install_root, name), 'w') for name in bytecode_file_names]\n    it = itertools.cycle(bytecode_files)\n    for m in self.frozen_modules:\n        self.write_frozen(m, next(it))\n    for f in bytecode_files:\n        f.close()",
    "docstring": "Write the files containing the frozen bytecode. Shared frozen modules evenly across the files.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\_freeze.py",
    "ast_data": "FunctionDef name:write_bytecode arg:self arg:install_root arguments arg arg Assign Call Assign Call Call Assign Call For Call Call For Call"
  },
  {
    "library": "cryptography",
    "name": "private_bytes_raw",
    "source_code": "@abc.abstractmethod\ndef private_bytes_raw(self) -> bytes:\n    pass",
    "docstring": "The raw bytes of the private key. Equivalent to private_bytes(Raw, Raw, NoEncryption()).",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ed448.py",
    "ast_data": "FunctionDef name:private_bytes_raw arg:self arguments arg"
  },
  {
    "library": "scipy",
    "name": "_remove_zero_rows",
    "source_code": "def _remove_zero_rows(A, b):\n    status = 0\n    message = ''\n    i_zero = _row_count(A) == 0\n    A = A[np.logical_not(i_zero), :]\n    if not np.allclose(b[i_zero], 0):\n        status = 2\n        message = 'There is a zero row in A_eq with a nonzero corresponding entry in b_eq. The problem is infeasible.'\n    b = b[np.logical_not(i_zero)]\n    return (A, b, status, message)",
    "docstring": "Eliminates trivial equations from system of equations defined by Ax = b and identifies trivial infeasibilities Parameters ---------- A : 2-D array An array representing the left-hand side of a system of equations b : 1-D array An array representing the right-hand side of a system of equations Returns ------- A : 2-D array An array representing the left-hand side of a system of equations b : 1-D array An array representing the right-hand side of a system of equations status: int An integer indicating the status of the removal operation 0: No infeasibility identified 2: Trivially infeasible message : str A string descriptor of the exit status of the optimization.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_remove_redundancy.py",
    "ast_data": "FunctionDef name:_remove_zero_rows arg:A arg:b arguments arg arg Assign Assign Assign Compare Call Assign Call If Call Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "deriv",
    "source_code": "def deriv(self, m=1):\n    off, scl = self.mapparms()\n    coef = self._der(self.coef, m, scl)\n    return self.__class__(coef, self.domain, self.window, self.symbol)",
    "docstring": "Differentiate. Return a series instance of that is the derivative of the current series. Parameters ---------- m : non-negative int Find the derivative of order . Returns ------- new_series : series A new series representing the derivative. The domain is the same as the domain of the differentiated series.",
    "type": "method",
    "file_path": "numpy\\numpy\\polynomial\\_polybase.py",
    "ast_data": "FunctionDef name:deriv arg:self arg:m arguments arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_children",
    "source_code": "def get_children(self):\n    return self._children",
    "docstring": "Return a list of the child \\s.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py",
    "ast_data": "FunctionDef name:get_children arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_parse_args",
    "source_code": "def _parse_args(argv):\n    result = {}\n    for arg in argv:\n        k, v = arg.split('=')\n        result[k] = v\n    return result",
    "docstring": "Parses arguments with the form KEY=VALUE into a dictionary.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\third_party\\llvm_openmp\\expand_cmake_vars.py",
    "ast_data": "FunctionDef name:_parse_args arg:argv arguments arg Assign For Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "dimension_size",
    "source_code": "def dimension_size(self, axis):\n    if not isinstance(axis, int):\n        raise TypeError('axis must be an integer')\n    partitioned_ndims = len(self._partitioned_dim_sizes)\n    if axis < partitioned_ndims:\n        return self._partitioned_dim_sizes[axis]\n    else:\n        return self._inner_dim_sizes[axis - partitioned_ndims]",
    "docstring": "Returns the size of slices across the specified dimension.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor_shape.py",
    "ast_data": "FunctionDef name:dimension_size arg:self arg:axis arguments arg arg If Call Raise Call Assign Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_dynamic_quant_module_class",
    "source_code": "def get_dynamic_quant_module_class(float_module_class: Callable, additional_dynamic_quant_mapping: Optional[dict[Callable, Any]]=None) -> Any:\n    if additional_dynamic_quant_mapping is None:\n        additional_dynamic_quant_mapping = {}\n    all_mappings = get_combined_dict(DEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS, additional_dynamic_quant_mapping)\n    dynamic_quant_module_class = all_mappings.get(float_module_class, None)\n    assert dynamic_quant_module_class is not None, f'Floating point module class {str(float_module_class)}' + ' does not have a corresponding quantized module class'\n    return copy.deepcopy(dynamic_quant_module_class)",
    "docstring": "n Get the dynamically quantized module class corresponding to the floating point module class",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantization_mappings.py",
    "ast_data": "FunctionDef name:get_dynamic_quant_module_class arg:float_module_class arg:additional_dynamic_quant_mapping arguments arg arg If Compare Assign Assign Call Assign Call Compare Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "test_function",
    "source_code": "def test_function(iterator):\n    for _ in math_ops.range(self._steps_per_execution):\n        outputs = step_function(self, iterator)\n    return outputs",
    "docstring": "Runs an evaluation execution with multiple steps.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py",
    "ast_data": "FunctionDef name:test_function arg:iterator arguments arg For Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_kwargs_value",
    "source_code": "def get_kwargs_value(self, arg_name, **kwargs):\n    if arg_name in kwargs:\n        return kwargs.get(arg_name)\n    if arg_name in self.kwargs:\n        return self.kwargs.get(arg_name)\n    if self.allarg_properties and arg_name in self.allarg_properties:\n        return self.allarg_properties.get(arg_name).get('default_value')\n    raise AssertionError(f'{arg_name} not in self.allarg_properties')",
    "docstring": "Given an argument name, queries for values in (in order): 1. any provided kwargs for this function. 2. the class self.kwargs member. 3. any available default arguments in self.allarg_properties.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ir.py",
    "ast_data": "FunctionDef name:get_kwargs_value arg:self arg:arg_name arguments arg arg arg If Compare Return return:yes Call If Compare Return return:yes Call If BoolOp Compare Return return:yes Call Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "ProtoEq",
    "source_code": "def ProtoEq(a, b):\n\n    def Format(pb):\n        if isinstance(pb, message.Message):\n            return dict(((desc.number, value) for desc, value in pb.ListFields()))\n        elif _IsMap(pb):\n            return dict(pb.items())\n        elif _IsRepeatedContainer(pb):\n            return dict(enumerate(list(pb)))\n        else:\n            return pb\n    a, b = (Format(a), Format(b))\n    if not isinstance(a, dict) or not isinstance(b, dict):\n        return a == b\n    for tag in sorted(set(a.keys()) | set(b.keys())):\n        if tag not in a or tag not in b:\n            return False\n        elif not ProtoEq(a[tag], b[tag]):\n            return False\n    return True",
    "docstring": "Compares two proto2 objects for equality. Recurses into nested messages. Uses list (not set) semantics for comparing repeated fields, ie duplicates and order matter. Args: a: A proto2 message or a primitive. b: A proto2 message or a primitive. Returns: if the messages are equal.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\protobuf\\compare.py",
    "ast_data": "FunctionDef name:ProtoEq arg:a arg:b arguments arg arg FunctionDef name:Format arg:pb arguments arg If Call Return return:yes Call Call If Call Return return:yes Call Call If Call Return return:yes Call Call Call Return return:yes Assign Call Call If BoolOp Call Call Return return:yes Compare For Call Call Call Call Call If BoolOp Compare Compare Return return:yes If Call Return return:yes Return return:yes"
  },
  {
    "library": "scipy",
    "name": "Shekel07",
    "source_code": "class Shekel07(Benchmark):\n\n    def __init__(self, dimensions=4):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([0.0] * self.N, [10.0] * self.N))\n        self.global_optimum = [[4.00057291078, 4.0006893679, 3.99948971076, 3.99960615785]]\n        self.fglob = -10.4029405668\n        self.A = asarray([[4.0, 4.0, 4.0, 4.0], [1.0, 1.0, 1.0, 1.0], [8.0, 8.0, 8.0, 8.0], [6.0, 6.0, 6.0, 6.0], [3.0, 7.0, 3.0, 7.0], [2.0, 9.0, 2.0, 9.0], [5.0, 5.0, 3.0, 3.0]])\n        self.C = asarray([0.1, 0.2, 0.2, 0.4, 0.4, 0.6, 0.3])\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return -sum(1 / (sum((x - self.A) ** 2, axis=1) + self.C))",
    "docstring": "Shekel 7 objective function. This class defines the Shekel 7 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Shekel07}}(x) = \\sum_{i=1}^{m} \\frac{1}{c_{i} + \\sum_{j=1}^{n} (x_{j} - a_{ij})^2 }x_i \\in [0, 10]i = 1, ..., 4f(x) = -10.4028188x_i = 4i = 1, ..., 4` .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO: this is a different global minimum compared to Jamil#131. This minimum is obtained after running lots of minimisations! Is there any numerical overflow that causes the minimum solution to not be [4] * N?",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_S.py",
    "ast_data": "ClassDef name:Shekel07 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Assign Call Assign Call FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "validate_dim_length",
    "source_code": "def validate_dim_length(length: int):\n    if isinstance(length, (int, torch.SymInt)):\n        torch._check_is_size(length)\n    else:\n        assert length >= 0",
    "docstring": "Validates that an object represents a valid dimension length.",
    "type": "function",
    "file_path": "pytorch\\torch\\_prims_common\\__init__.py",
    "ast_data": "FunctionDef name:validate_dim_length arg:length arguments arg If Call Call Compare"
  },
  {
    "library": "scikit-learn",
    "name": "key",
    "source_code": "def key(profile):\n    components = profile['name'].lower().split(' ')\n    return ' '.join([components[-1]] + components[:-1])",
    "docstring": "Get a sorting key based on the lower case last name, then firstname",
    "type": "function",
    "file_path": "scikit-learn\\build_tools\\generate_authors_table.py",
    "ast_data": "FunctionDef name:key arg:profile arguments arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__getitem__",
    "source_code": "def __getitem__(self, key):\n    if self._dims is not None:\n        if isinstance(key, slice):\n            return TensorShape(self._dims[key])\n        elif self._v2_behavior:\n            return self._dims[key]\n        else:\n            return self.dims[key]\n    elif isinstance(key, slice):\n        start = key.start if key.start is not None else 0\n        stop = key.stop\n        if key.step is not None:\n            raise ValueError('Steps are not yet handled')\n        if stop is None:\n            return unknown_shape()\n        elif start < 0 or stop < 0:\n            return unknown_shape()\n        else:\n            return unknown_shape(rank=stop - start)\n    elif self._v2_behavior:\n        return None\n    else:\n        return Dimension(None)",
    "docstring": "Returns the value of a dimension or a shape, depending on the key. Args: key: If is an integer, returns the dimension at that index; otherwise if is a slice, returns a TensorShape whose dimensions are those selected by the slice from . Returns: An integer if is an integer, or a if is a slice. Raises: ValueError: If is a slice and is completely unknown and the step is set.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:__getitem__ arg:self arg:key arguments arg arg If Compare If Call Return return:yes Call If Return return:yes Return return:yes If Call Assign Compare Assign If Compare Raise Call If Compare Return return:yes Call If BoolOp Compare Compare Return return:yes Call Return return:yes Call If Return return:no Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "revoke_token",
    "source_code": "def revoke_token(self, token, request):\n    token.revoked = True\n    token.save()",
    "docstring": "Mark the give token as revoked.",
    "type": "method",
    "file_path": "authlib\\authlib\\integrations\\django_oauth2\\endpoints.py",
    "ast_data": "FunctionDef name:revoke_token arg:self arg:token arg:request arguments arg arg arg Assign Call"
  },
  {
    "library": "numpy",
    "name": "array_function_from_dispatcher",
    "source_code": "def array_function_from_dispatcher(implementation, module=None, verify=True, docs_from_dispatcher=True):\n\n    def decorator(dispatcher):\n        return array_function_dispatch(dispatcher, module, verify=verify, docs_from_dispatcher=docs_from_dispatcher)(implementation)\n    return decorator",
    "docstring": "Like array_function_dispatcher, but with function arguments flipped.",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\overrides.py",
    "ast_data": "FunctionDef name:array_function_from_dispatcher arg:implementation arg:module arg:verify arg:docs_from_dispatcher arguments arg arg arg arg FunctionDef name:decorator arg:dispatcher arguments arg Return return:yes Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "Identity",
    "source_code": "@tf_export(v1=['initializers.identity'])\n@deprecation.deprecated_endpoints('initializers.identity')\nclass Identity(Initializer):\n\n    @deprecated_args(None, 'Call initializer instance with the dtype argument instead of passing it to the constructor', 'dtype')\n    def __init__(self, gain=1.0, dtype=dtypes.float32):\n        self.gain = gain\n        self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))\n\n    def __call__(self, shape, dtype=None, partition_info=None):\n        full_shape = shape if partition_info is None else partition_info.full_shape\n        if len(full_shape) != 2:\n            raise ValueError(f'The tensor to initialize, specified by argument `shape` must be at least two-dimensional. Received shape={shape}')\n        if dtype is None:\n            dtype = self.dtype\n        if isinstance(full_shape, tensor_shape.TensorShape):\n            full_shape = full_shape.as_list()\n        initializer = linalg_ops_impl.eye(*full_shape, dtype=dtype)\n        if partition_info is not None:\n            initializer = array_ops.slice(initializer, partition_info.var_offset, shape)\n        return self.gain * initializer\n\n    def get_config(self):\n        return {'gain': self.gain, 'dtype': self.dtype.name}",
    "docstring": "Initializer that generates the identity matrix. Only use for 2D matrices. Args: gain: Multiplicative factor to apply to the identity matrix. dtype: Default data type, used if no argument is provided when calling the initializer. Only floating point types are supported.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops.py",
    "ast_data": "ClassDef name:Identity FunctionDef name:__init__ arg:self arg:gain arg:dtype arguments arg arg arg Assign Assign Call Call Call FunctionDef name:__call__ arg:self arg:shape arg:dtype arg:partition_info arguments arg arg arg arg Assign Compare If Compare Call Raise Call If Compare Assign If Call Assign Call Assign Call If Compare Assign Call Return return:yes FunctionDef name:get_config arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "session_manager",
    "source_code": "@property\ndef session_manager(self):\n    return self._session_manager",
    "docstring": "Return the SessionManager used by the Supervisor. Returns: A SessionManager object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py",
    "ast_data": "FunctionDef name:session_manager arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "handle",
    "source_code": "def handle(self, args, kwargs):\n    if any((isinstance(x, keras_tensor.KerasTensor) for x in nest.flatten([args, kwargs]))):\n        return ClassMethod(self.cls, self.method_name)(args[1:], kwargs)\n    else:\n        return self.NOT_SUPPORTED",
    "docstring": "Handle the specified operation with the specified arguments.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\core.py",
    "ast_data": "FunctionDef name:handle arg:self arg:args arg:kwargs arguments arg arg arg If Call Call Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "rewrite_github_anchor",
    "source_code": "def rewrite_github_anchor(app: Sphinx, uri: str) -> str | None:\n    parsed = urlparse(uri)\n    if parsed.hostname == 'github.com' and parsed.fragment:\n        prefixed = parsed.fragment.startswith('user-content-')\n        if not prefixed:\n            fragment = f'user-content-{parsed.fragment}'\n            return urlunparse(parsed._replace(fragment=fragment))\n    return None",
    "docstring": "Rewrite anchor name of the hyperlink to github.com The hyperlink anchors in github.com are dynamically generated. This rewrites them before checking and makes them comparable.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\builders\\linkcheck.py",
    "ast_data": "FunctionDef name:rewrite_github_anchor arg:app arg:uri arguments arg arg Assign Call If BoolOp Compare Assign Call If Assign Return return:yes Call Call Return return:no"
  },
  {
    "library": "pytorch",
    "name": "normalize_with_stride_order",
    "source_code": "def normalize_with_stride_order(self, prefix: str='t') -> 'MemoryDep':\n    from torch._inductor import ir\n    strides = V.graph.sizevars.stride_hints(self.index, self.var_names)\n    order = sorted(range(len(strides)), key=strides.__getitem__, reverse=True)\n    stride_reorder = ir.same_reorder(order)\n    sizes = self.size\n    var_names = self.var_names\n    new_reordered_sizes = stride_reorder(sizes)\n    new_reordered_var_names = stride_reorder(var_names)\n    new_simplified_sizes, reindex, _prune = V.graph.sizevars._simplify_loops(new_reordered_var_names, new_reordered_sizes, index_prevent_reordering([self.index], new_reordered_var_names, new_reordered_sizes))\n    var_ranges, add_var = var_builder(prefix)\n    replacement = dict(zip(new_reordered_var_names, reindex([add_var(x) for x in new_simplified_sizes])))\n    new_index = sympy_subs(sympy.expand(self.index), replacement)\n    out = MemoryDep(self.name, new_index, tuple(var_ranges.keys()), tuple(var_ranges.values()))\n    return out",
    "docstring": "Used to decide if two MemoryDep does not equal due to different loop orders. More specifically, when dep1 and dep2 are not equal, we can normalize both and check if they are equal after that. If yes, then the mismatch is caused by different loop orders.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\dependencies.py",
    "ast_data": "FunctionDef name:normalize_with_stride_order arg:self arg:prefix arguments arg arg Assign Call Assign Call Call Call Assign Call Assign Assign Assign Call Assign Call Assign Call Call Assign Call Assign Call Call Call Call Assign Call Call Assign Call Call Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "make_strictly_feasible",
    "source_code": "def make_strictly_feasible(x, lb, ub, rstep=1e-10):\n    x_new = x.copy()\n    active = find_active_constraints(x, lb, ub, rstep)\n    lower_mask = np.equal(active, -1)\n    upper_mask = np.equal(active, 1)\n    if rstep == 0:\n        x_new[lower_mask] = np.nextafter(lb[lower_mask], ub[lower_mask])\n        x_new[upper_mask] = np.nextafter(ub[upper_mask], lb[upper_mask])\n    else:\n        x_new[lower_mask] = lb[lower_mask] + rstep * np.maximum(1, np.abs(lb[lower_mask]))\n        x_new[upper_mask] = ub[upper_mask] - rstep * np.maximum(1, np.abs(ub[upper_mask]))\n    tight_bounds = (x_new < lb) | (x_new > ub)\n    x_new[tight_bounds] = 0.5 * (lb[tight_bounds] + ub[tight_bounds])\n    return x_new",
    "docstring": "Shift a point to the interior of a feasible region. Each element of the returned vector is at least at a relative distance from the closest bound. If `np.nextafter` is used.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_lsq\\common.py",
    "ast_data": "FunctionDef name:make_strictly_feasible arg:x arg:lb arg:ub arg:rstep arguments arg arg arg arg Assign Call Assign Call Assign Call Assign Call If Compare Assign Call Assign Call Assign Call Call Assign Call Call Assign Compare Compare Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "create_root",
    "source_code": "@classmethod\ndef create_root(cls) -> _ModuleMeta:\n    return _ModuleMeta('', None, ('', None))",
    "docstring": "Create an empty module meta representing root module.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\modularization.py",
    "ast_data": "FunctionDef name:create_root arg:cls arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n    X = validate_data(self, X, accept_sparse='csr', dtype=(np.float64, np.float32))\n    n_samples = X.shape[0]\n    rng = check_random_state(self.random_state)\n    self.components_ = np.asarray(rng.normal(0, 0.01, (self.n_components, X.shape[1])), order='F', dtype=X.dtype)\n    self._n_features_out = self.components_.shape[0]\n    self.intercept_hidden_ = np.zeros(self.n_components, dtype=X.dtype)\n    self.intercept_visible_ = np.zeros(X.shape[1], dtype=X.dtype)\n    self.h_samples_ = np.zeros((self.batch_size, self.n_components), dtype=X.dtype)\n    n_batches = int(np.ceil(float(n_samples) / self.batch_size))\n    batch_slices = list(gen_even_slices(n_batches * self.batch_size, n_batches, n_samples=n_samples))\n    verbose = self.verbose\n    begin = time.time()\n    for iteration in range(1, self.n_iter + 1):\n        for batch_slice in batch_slices:\n            self._fit(X[batch_slice], rng)\n        if verbose:\n            end = time.time()\n            print('[%s] Iteration %d, pseudo-likelihood = %.2f, time = %.2fs' % (type(self).__name__, iteration, self.score_samples(X).mean(), end - begin))\n            begin = end\n    return self",
    "docstring": "Fit the model to the data X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) or (n_samples, n_outputs), default=None Target values (None for unsupervised transformations). Returns ------- self : BernoulliRBM The fitted model.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neural_network\\_rbm.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Assign Call Assign Assign Call Assign Call Call Assign Assign Call Assign Call Assign Call Assign Call Call Call Assign Call Call Assign Assign Call For Call For Call If Assign Call Call Call Call Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "numpy",
    "source_code": "def numpy(self):\n    return _var_to_tensor(self).numpy()",
    "docstring": "Copies the values in this ShardedVariable to a NumPy array. First converts to a single Tensor using the registered conversion function, which concatenates the shards, then uses Tensor.numpy() to convert to a NumPy array. Returns: A NumPy array of the same shape and dtype.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\sharded_variable.py",
    "ast_data": "FunctionDef name:numpy arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "swapcase",
    "source_code": "@set_module('numpy.strings')\n@array_function_dispatch(_unary_op_dispatcher)\ndef swapcase(a):\n    a_arr = np.asarray(a)\n    return _vec_string(a_arr, a_arr.dtype, 'swapcase')",
    "docstring": "Return element-wise a copy of the string with uppercase characters converted to lowercase and vice versa. Calls :meth: element-wise. For 8-bit strings, this method is locale-dependent. Parameters ---------- a : array-like, with `` dtype, depending on input types See Also -------- str.swapcase Examples -------- >>> import numpy as np >>> c=np.array(['a1B c','1b Ca','b Ca1','cA1b'],'S5'); c array(['a1B c', '1b Ca', 'b Ca1', 'cA1b'], dtype='|S5') >>> np.strings.swapcase(c) array(['A1b C', '1B cA', 'B cA1', 'Ca1B'], dtype='|S5')",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\strings.py",
    "ast_data": "FunctionDef name:swapcase arg:a arguments arg Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_dynamic_max_trials",
    "source_code": "def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):\n    inlier_ratio = n_inliers / float(n_samples)\n    nom = max(_EPSILON, 1 - probability)\n    denom = max(_EPSILON, 1 - inlier_ratio ** min_samples)\n    if nom == 1:\n        return 0\n    if denom == 1:\n        return float('inf')\n    return abs(float(np.ceil(np.log(nom) / np.log(denom))))",
    "docstring": "Determine number trials such that at least one outlier-free subset is sampled for the given inlier/outlier ratio. Parameters ---------- n_inliers : int Number of inliers in the data. n_samples : int Total number of samples in the data. min_samples : int Minimum number of samples chosen randomly from original data. probability : float Probability (confidence) that one outlier-free sample is generated. Returns ------- trials : int Number of trials.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_ransac.py",
    "ast_data": "FunctionDef name:_dynamic_max_trials arg:n_inliers arg:n_samples arg:min_samples arg:probability arguments arg arg arg arg Assign Call Assign Call Assign Call If Compare Return return:yes If Compare Return return:yes Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_fontvariant",
    "source_code": "def set_fontvariant(self, variant):\n    self._fontproperties.set_variant(variant)\n    self.stale = True",
    "docstring": "Set the font variant. Parameters ---------- variant : {'normal', 'small-caps'} See Also -------- .font_manager.FontProperties.set_variant",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:set_fontvariant arg:self arg:variant arguments arg arg Call Assign"
  },
  {
    "library": "sphinx",
    "name": "get_translator_class",
    "source_code": "def get_translator_class(self, *args: Any) -> type[nodes.NodeVisitor]:\n    return self.env._registry.get_translator_class(self)",
    "docstring": "Return a class of translator.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\__init__.py",
    "ast_data": "FunctionDef name:get_translator_class arg:self arguments arg arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "cpu_baseline_names",
    "source_code": "def cpu_baseline_names(self):\n    return self.parse_baseline_names",
    "docstring": "return a list of final CPU baseline feature names",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py",
    "ast_data": "FunctionDef name:cpu_baseline_names arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "replay",
    "source_code": "def replay(self):\n    super().replay()",
    "docstring": "Replay the CUDA work captured by this graph.",
    "type": "method",
    "file_path": "pytorch\\torch\\cuda\\graphs.py",
    "ast_data": "FunctionDef name:replay arg:self arguments arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_load_stack_frames",
    "source_code": "def _load_stack_frames(self):\n    stack_frames_iter = self._reader.stack_frames_iterator()\n    for debug_event, _ in stack_frames_iter:\n        stack_frame_with_id = debug_event.stack_frame_with_id\n        file_line_col = stack_frame_with_id.file_line_col\n        self._unprocessed_stack_frames[stack_frame_with_id.id] = file_line_col\n    unprocessed_stack_frame_ids = tuple(self._unprocessed_stack_frames.keys())\n    for stack_frame_id in unprocessed_stack_frame_ids:\n        file_line_col = self._unprocessed_stack_frames[stack_frame_id]\n        if len(self._host_name_file_path_to_offset) > file_line_col.file_index:\n            host_name, file_path = list(self._host_name_file_path_to_offset.keys())[file_line_col.file_index]\n            self._stack_frame_by_id[stack_frame_id] = (host_name, file_path, file_line_col.line, file_line_col.func)\n        del self._unprocessed_stack_frames[stack_frame_id]",
    "docstring": "Incrementally read the .stack_frames file. This must be called after _load_source_files(). It assumes that the following contract is honored by the writer of the tfdbg v2 data file set: - Before a stack frame is written to the .stack_frames file, the corresponding source file information must have been written to the .source_files file first.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py",
    "ast_data": "FunctionDef name:_load_stack_frames arg:self arguments arg Assign Call For Assign Assign Assign Assign Call Call For Assign If Compare Call Assign Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_create_placeholders",
    "source_code": "def _create_placeholders(args, kwargs, arg_names=None):\n    signature_context = trace_type.InternalTracingContext(is_legacy_signature=True)\n    arg_trace_types = trace_type.from_value(tuple(args), signature_context)\n    kwarg_trace_types = trace_type.from_value(kwargs, signature_context)\n    placeholder_mapping = signature_context.get_placeholder_mapping()\n    placeholder_context = trace_type.InternalPlaceholderContext(ops.get_default_graph(), placeholder_mapping)\n    if arg_names is None:\n        arg_names = [None] * len(arg_trace_types.components)\n    func_args = []\n    for name, trace_type_arg in zip(arg_names, arg_trace_types.components):\n        placeholder_context.update_naming_scope(name)\n        placeholder = trace_type_arg.placeholder_value(placeholder_context)\n        func_args.append(placeholder)\n    func_kwargs = {}\n    for name, trace_type_kwarg in zip(*sorted(kwarg_trace_types.mapping.items())):\n        placeholder_context.update_naming_scope(name)\n        placeholder = trace_type_kwarg.placeholder_value(placeholder_context)\n        func_kwargs[name] = placeholder\n    return (tuple(func_args), func_kwargs)",
    "docstring": "Create placeholders given positional args and keyword args.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\func_graph.py",
    "ast_data": "FunctionDef name:_create_placeholders arg:args arg:kwargs arg:arg_names arguments arg arg arg Assign Call Assign Call Call Assign Call Assign Call Assign Call Call If Compare Assign Call Assign For Call Call Assign Call Call Assign For Call Call Call Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "with_min_float_operations",
    "source_code": "def with_min_float_operations(self, min_float_ops):\n    self._options['min_float_ops'] = min_float_ops\n    return self",
    "docstring": "Only show profiler nodes consuming no less than 'min_float_ops'. Please see on the caveats of calculating float operations. Args: min_float_ops: Only show profiler nodes with float operations no less than this. Returns: self",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\option_builder.py",
    "ast_data": "FunctionDef name:with_min_float_operations arg:self arg:min_float_ops arguments arg arg Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, figure, fh):\n    super().__init__()\n    self.dpi = figure.dpi\n    self.fh = fh\n    self.figure = figure\n    self.image_counter = 0",
    "docstring": "Create a new PGF renderer that translates any drawing instruction into text commands to be interpreted in a latex pgfpicture environment. Attributes ---------- figure : Matplotlib figure to initialize height, width and dpi from. fh : file-like File handle for the output of the drawing commands.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pgf.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:figure arg:fh arguments arg arg arg Call Call Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "slice_arrays",
    "source_code": "def slice_arrays(arrays, indices, contiguous=True):\n    converted_to_list = False\n    if not isinstance(arrays, list):\n        converted_to_list = True\n        arrays = [arrays]\n    if any((tensor_util.is_tf_type(x) for x in arrays)):\n        if not contiguous:\n            entries = [[x[i:i + 1] for i in indices] for x in arrays]\n            slices = [array_ops.concat(x, axis=0) for x in entries]\n        else:\n            slices = [x[indices[0]:indices[-1] + 1] for x in arrays]\n    else:\n        slices = generic_utils.slice_arrays(arrays, indices)\n    if converted_to_list:\n        slices = slices[0]\n    return slices",
    "docstring": "Slices batches out of provided arrays (workaround for eager tensors). Unfortunately eager tensors don't have the same slicing behavior as Numpy arrays (they follow the same slicing behavior as symbolic TF tensors), hence we cannot use directly and we have to implement this workaround based on . This has a performance cost. Args: arrays: Single array or list of arrays. indices: List of indices in the array that should be included in the output batch. contiguous: Boolean flag indicating whether the indices are contiguous. Returns: Slice of data (either single array or list of arrays).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils.py",
    "ast_data": "FunctionDef name:slice_arrays arg:arrays arg:indices arg:contiguous arguments arg arg arg Assign If Call Assign Assign If Call Call If Assign Assign Call Assign Assign Call If Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "from_device",
    "source_code": "@classmethod\ndef from_device(cls, device: str) -> 'Layout':\n    return cls.from_single_device_mesh(Mesh.from_device(device))",
    "docstring": "Constructs a single device layout from a single device mesh.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\layout.py",
    "ast_data": "FunctionDef name:from_device arg:cls arg:device arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "max_unpool3d",
    "source_code": "def max_unpool3d(input: Tensor, indices: Tensor, kernel_size: BroadcastingList3[int], stride: Optional[BroadcastingList3[int]]=None, padding: BroadcastingList3[int]=0, output_size: Optional[BroadcastingList3[int]]=None) -> Tensor:\n    if has_torch_function_unary(input):\n        return handle_torch_function(max_unpool3d, (input,), input, indices, kernel_size, stride=stride, padding=padding, output_size=output_size)\n    kernel_size = _triple(kernel_size)\n    if stride is not None:\n        _stride = _triple(stride)\n    else:\n        _stride = kernel_size\n    padding = _triple(padding)\n    output_size = _unpool_output_size(input, kernel_size, _stride, padding, output_size)\n    return torch._C._nn.max_unpool3d(input, indices, output_size, _stride, padding)",
    "docstring": "Compute a partial inverse of :class:. See :class: for details.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:max_unpool3d arg:input arg:indices arg:kernel_size arg:stride arg:padding arg:output_size arguments arg arg arg arg arg arg If Call Return return:yes Call Assign Call If Compare Assign Call Assign Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "step",
    "source_code": "@_use_grad_for_differentiable\ndef step(self, closure=None):\n    self._cuda_graph_capture_health_check()\n    loss = None\n    if closure is not None:\n        with torch.enable_grad():\n            loss = closure()\n    for group in self.param_groups:\n        params_with_grad: list[Tensor] = []\n        grads: list[Tensor] = []\n        square_avgs: list[Tensor] = []\n        grad_avgs: list[Tensor] = []\n        momentum_buffer_list: list[Tensor] = []\n        state_steps: list[Tensor] = []\n        has_complex = self._init_group(group, params_with_grad, grads, square_avgs, momentum_buffer_list, grad_avgs, state_steps)\n        rmsprop(params_with_grad, grads, square_avgs, grad_avgs, momentum_buffer_list, state_steps, lr=group['lr'], alpha=group['alpha'], eps=group['eps'], weight_decay=group['weight_decay'], momentum=group['momentum'], centered=group['centered'], foreach=group['foreach'], maximize=group['maximize'], differentiable=group['differentiable'], capturable=group['capturable'], has_complex=has_complex)\n    return loss",
    "docstring": "Perform a single optimization step. Args: closure (Callable, optional): A closure that reevaluates the model and returns the loss.",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\rmsprop.py",
    "ast_data": "FunctionDef name:step arg:self arg:closure arguments arg arg Call Assign If Compare With Call Assign Call For Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "make_key",
    "source_code": "def make_key(self, key, version=None):\n    if version is None:\n        version = self.version\n    return self.key_func(key, self.key_prefix, version)",
    "docstring": "Construct the key used by all other methods. By default, use the key_func to generate a key (which, by default, prepends the `key_prefix' and 'version'). A different key function can be provided at the time of cache construction; alternatively, you can subclass the cache backend to provide custom key making behavior.",
    "type": "method",
    "file_path": "django\\django\\core\\cache\\backends\\base.py",
    "ast_data": "FunctionDef name:make_key arg:self arg:key arg:version arguments arg arg arg If Compare Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "bessel_i0e",
    "source_code": "@tf_export('math.bessel_i0e', 'math.special.bessel_i0e')\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef bessel_i0e(x, name=None):\n    with ops.name_scope(name, 'bessel_i0e', [x]):\n        return gen_special_math_ops.bessel_i0e(x)",
    "docstring": "Computes the Bessel i0e function of element-wise. Modified Bessel function of order 0. >>> tf.math.special.bessel_i0e([-1., -0.5, 0.5, 1.]).numpy() array([0.46575961, 0.64503527, 0.64503527, 0.46575961], dtype=float32) Args: x: A or . Must be one of the following types: , , . name: A name for the operation (optional). Returns: A or , respectively. Has the same type as . @compatibility(scipy) Equivalent to scipy.special.i0e @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\special_math_ops.py",
    "ast_data": "FunctionDef name:bessel_i0e arg:x arg:name arguments arg arg With Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "from_row_lengths",
    "source_code": "@classmethod\ndef from_row_lengths(cls, row_lengths, validate=True, dtype=None, dtype_hint=None):\n    if not isinstance(validate, bool):\n        raise TypeError('validate must have type bool')\n    with ops.name_scope(None, 'RowPartitionFromRowLengths', [row_lengths]):\n        row_lengths = cls._convert_row_partition(row_lengths, 'row_lengths', dtype_hint=dtype_hint, dtype=dtype)\n        row_lengths.shape.assert_has_rank(1)\n        if validate:\n            msg = 'Arguments to from_row_lengths do not form a valid RowPartition'\n            checks = [check_ops.assert_rank(row_lengths, 1, message=msg), check_ops.assert_non_negative(row_lengths, message=msg)]\n            row_lengths = control_flow_ops.with_dependencies(checks, row_lengths)\n        row_limits = math_ops.cumsum(row_lengths)\n        row_splits = array_ops.concat([[0], row_limits], axis=0)\n        return cls(row_splits=row_splits, row_lengths=row_lengths, internal=_row_partition_factory_key)",
    "docstring": "Creates a with rows partitioned by . This divides a sequence into rows by indicating the length of each row: Args: row_lengths: A 1-D integer tensor with shape . Must be nonnegative. validate: If true, then use assertions to check that the arguments form a valid . dtype: Optional dtype for the RowPartition. If missing, the type is inferred from the type of , dtype_hint, or tf.int64. dtype_hint: Optional dtype for the RowPartition, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so dtype_hint can be used as a soft preference. If the conversion to is not possible, this argument has no effect. Returns: A .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py",
    "ast_data": "FunctionDef name:from_row_lengths arg:cls arg:row_lengths arg:validate arg:dtype arg:dtype_hint arguments arg arg arg arg arg If Call Raise Call With Call Assign Call Call If Assign Assign Call Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_broadcast_arrays",
    "source_code": "def _broadcast_arrays(a, b):\n    x, y = np.broadcast_arrays(a, b)\n    x.flags.writeable = a.flags.writeable\n    y.flags.writeable = b.flags.writeable\n    return (x, y)",
    "docstring": "Same as np.broadcast_arrays(a, b) but old writeability rules. NumPy >= 1.17.0 transitions broadcast_arrays to return read-only arrays. Set writeability explicitly to avoid warnings. Retain the old writeability rules, as our Cython code assumes the old behavior.",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\_index.py",
    "ast_data": "FunctionDef name:_broadcast_arrays arg:a arg:b arguments arg arg Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "items",
    "source_code": "def items(self) -> Iterable[tuple[Hashable, Any]]:\n    return zip(iter(self.index), iter(self))",
    "docstring": "Lazily iterate over (index, value) tuples. This method returns an iterable tuple (index, value). This is convenient if you want to create a lazy iterator. Returns ------- iterable Iterable of tuples containing the (index, value) pairs from a Series. See Also -------- DataFrame.items : Iterate over (column name, Series) pairs. DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. Examples -------- >>> s = pd.Series([\"A\", \"B\", \"C\"]) >>> for index, value in s.items(): ... print(f\"Index : {index}, Value : {value}\") Index : 0, Value : A Index : 1, Value : B Index : 2, Value : C",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:items arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "has_delete_permission",
    "source_code": "def has_delete_permission(self, request, obj=None):\n    opts = self.opts\n    codename = get_permission_codename('delete', opts)\n    return request.user.has_perm('%s.%s' % (opts.app_label, codename))",
    "docstring": "Return True if the given request has permission to delete the given Django model instance, the default implementation doesn't examine the parameter. Can be overridden by the user in subclasses. In such case it should return True if the given request has permission to delete the model instance. If is None, this should return True if the given request has permission to delete *any* object of the given type.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:has_delete_permission arg:self arg:request arg:obj arguments arg arg arg Assign Assign Call Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "unique_dashes",
    "source_code": "def unique_dashes(n):\n    dashes = ['', (4, 1.5), (1, 1), (3, 1.25, 1.5, 1.25), (5, 1, 1, 1)]\n    p = 3\n    while len(dashes) < n:\n        a = itertools.combinations_with_replacement([3, 1.25], p)\n        b = itertools.combinations_with_replacement([4, 1], p)\n        segment_list = itertools.chain(*zip(list(a)[1:-1][::-1], list(b)[1:-1]))\n        for segments in segment_list:\n            gap = min(segments)\n            spec = tuple(itertools.chain(*((seg, gap) for seg in segments)))\n            dashes.append(spec)\n        p += 1\n    return dashes[:n]",
    "docstring": "Build an arbitrarily long list of unique dash styles for lines. Parameters ---------- n : int Number of unique dash specs to generate. Returns ------- dashes : list of strings or tuples Valid arguments for the `matplotlib.lines.Line2D`), the remainder are sequences of long and short dashes.",
    "type": "function",
    "file_path": "seaborn\\seaborn\\_base.py",
    "ast_data": "FunctionDef name:unique_dashes arg:n arguments arg Assign Assign While Compare Call Assign Call Assign Call Assign Call Call Call Call For Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "ylim",
    "source_code": "def ylim(*args, **kwargs) -> tuple[float, float]:\n    ax = gca()\n    if not args and (not kwargs):\n        return ax.get_ylim()\n    ret = ax.set_ylim(*args, **kwargs)\n    return ret",
    "docstring": "Get or set the y-limits of the current Axes. Call signatures:: bottom, top = ylim() # return the current ylim ylim((bottom, top)) # set the ylim to bottom, top ylim(bottom, top) # set the ylim to bottom, top If you do not specify args, you can alternatively pass *bottom* or *top* as kwargs, i.e.:: ylim(top=3) # adjust the top leaving bottom unchanged ylim(bottom=1) # adjust the bottom leaving top unchanged Setting limits turns autoscaling off for the y-axis. Returns ------- bottom, top A tuple of the new y-axis limits. Notes ----- Calling this function with no arguments (e.g. `~.Axes.get_ylim~.Axes.set_ylim` on the current Axes. All arguments are passed though.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:ylim arguments arg arg Assign Call If BoolOp Return return:yes Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "all_reduce_coalesced",
    "source_code": "def all_reduce_coalesced(self: list[torch.Tensor], reduceOp: str, group: RANK_TYPES, tag: str='') -> list[torch.Tensor]:\n    group_name = _resolve_group_name(group, tag)\n    tensor_list = torch.ops._c10d_functional.all_reduce_coalesced(self, reduceOp.lower(), group_name)\n    return list(map(_maybe_wrap_tensor, tensor_list))",
    "docstring": "Reduces a list of tensors across all machines in such a way that all get the final result. The all tensors in the input list are left unmodified. Group can be one of: List[int]: ranks participating in the collective. List[List[int]]: 2D mesh of ranks taking part of this collective in MPMD. ProcessGroup: Will perform a collective using the ranks and tag of the PG. DeviceMesh: Do a SPMD collective over all ranks of the mesh (DeviceMesh, int): Do a MPMD collective over one dimension of the DeviceMesh :: N.B. If you pass a PG or a 1D list to perform a MPMD collective, the compiler won't be able to recover that information and perform collective algebraic optimization. Use other forms of input for that.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_functional_collectives.py",
    "ast_data": "FunctionDef name:all_reduce_coalesced arg:self arg:reduceOp arg:group arg:tag arguments arg arg arg arg Assign Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "IntelCCompilerW",
    "source_code": "class IntelCCompilerW(MSVCCompiler):\n    compiler_type = 'intelw'\n    compiler_cxx = 'icl'\n\n    def __init__(self, verbose=0, dry_run=0, force=0):\n        MSVCCompiler.__init__(self, verbose, dry_run, force)\n        version_match = simple_version_match(start='Intel\\\\(R\\\\).*?32,')\n        self.__version = version_match\n\n    def initialize(self, plat_name=None):\n        MSVCCompiler.initialize(self, plat_name)\n        self.cc = self.find_exe('icl.exe')\n        self.lib = self.find_exe('xilib')\n        self.linker = self.find_exe('xilink')\n        self.compile_options = ['/nologo', '/O3', '/MD', '/W3', '/Qstd=c99']\n        self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/Qstd=c99', '/Z7', '/D_DEBUG']",
    "docstring": "A modified Intel compiler compatible with an MSVC-built Python.",
    "type": "class",
    "file_path": "numpy\\numpy\\distutils\\intelccompiler.py",
    "ast_data": "ClassDef name:IntelCCompilerW Assign Assign FunctionDef name:__init__ arg:self arg:verbose arg:dry_run arg:force arguments arg arg arg arg Call Assign Call Assign FunctionDef name:initialize arg:self arg:plat_name arguments arg arg Call Assign Call Assign Call Assign Call Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "MonthLocator",
    "source_code": "class MonthLocator(RRuleLocator):\n\n    def __init__(self, bymonth=None, bymonthday=1, interval=1, tz=None):\n        if bymonth is None:\n            bymonth = range(1, 13)\n        rule = rrulewrapper(MONTHLY, bymonth=bymonth, bymonthday=bymonthday, interval=interval, **self.hms0d)\n        super().__init__(rule, tz=tz)",
    "docstring": "Make ticks on occurrences of each month, e.g., 1, 3, 12.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\dates.py",
    "ast_data": "ClassDef name:MonthLocator FunctionDef name:__init__ arg:self arg:bymonth arg:bymonthday arg:interval arg:tz arguments arg arg arg arg arg If Compare Assign Call Assign Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_update_fitted_transformers",
    "source_code": "def _update_fitted_transformers(self, transformers):\n    fitted_transformers = iter(transformers)\n    transformers_ = []\n    for name, old, column, _ in self._iter(fitted=False, column_as_labels=False, skip_drop=False, skip_empty_columns=False):\n        if old == 'drop':\n            trans = 'drop'\n        elif _is_empty_column_selection(column):\n            trans = old\n        else:\n            trans = next(fitted_transformers)\n        transformers_.append((name, trans, column))\n    assert not list(fitted_transformers)\n    self.transformers_ = transformers_",
    "docstring": "Set self.transformers_ from given transformers. Parameters ---------- transformers : list of estimators The fitted estimators as the output of . That function doesn't include 'drop' or transformers for which no column is selected. 'drop' is kept as is, and for the no-column transformers the unfitted transformer is put in .",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\compose\\_column_transformer.py",
    "ast_data": "FunctionDef name:_update_fitted_transformers arg:self arg:transformers arguments arg arg Assign Call Assign For Call If Compare Assign If Call Assign Assign Call Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_possibly_broadcast_batch_shape",
    "source_code": "def _possibly_broadcast_batch_shape(self, x):\n    if self._batch_shape_arg is None:\n        return x\n    special_shape = self.batch_shape.concatenate([1, 1])\n    bshape = array_ops.broadcast_static_shape(x.shape, special_shape)\n    if special_shape.is_fully_defined():\n        if bshape == x.shape:\n            return x\n        zeros = array_ops.zeros(shape=special_shape, dtype=self.dtype)\n        return x + zeros\n    special_shape = array_ops.concat((self.batch_shape_tensor(), [1, 1]), 0)\n    zeros = array_ops.zeros(shape=special_shape, dtype=self.dtype)\n    return x + zeros",
    "docstring": "Return 'x', possibly after broadcasting the leading dimensions.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_identity.py",
    "ast_data": "FunctionDef name:_possibly_broadcast_batch_shape arg:self arg:x arguments arg arg If Compare Return return:yes Assign Call Assign Call If Call If Compare Return return:yes Assign Call Return return:yes Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "__radd__",
    "source_code": "def __radd__(self, other):\n    return other.__class__([*other, *self])",
    "docstring": "add to another list-like object",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\mutable_list.py",
    "ast_data": "FunctionDef name:__radd__ arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_orthogonal_kernel",
    "source_code": "def _orthogonal_kernel(self, ksize, cin, cout):\n    if cin > cout:\n        raise ValueError(f'The number of input channels (cin={cin}) cannot exceed the number of output channels (cout={cout}).')\n    orth = self._orthogonal_matrix(cout)[0:cin, :]\n    if ksize == 1:\n        return array_ops.expand_dims(array_ops.expand_dims(orth, 0), 0)\n    p = self._block_orth(self._symmetric_projection(cout), self._symmetric_projection(cout))\n    for _ in range(ksize - 2):\n        temp = self._block_orth(self._symmetric_projection(cout), self._symmetric_projection(cout))\n        p = self._matrix_conv(p, temp)\n    for i in range(ksize):\n        for j in range(ksize):\n            p[i, j] = math_ops.matmul(orth, p[i, j])\n    return self._dict_to_tensor(p, ksize, ksize)",
    "docstring": "Construct orthogonal kernel for convolution. Args: ksize: Kernel size. cin: Number of input channels. cout: Number of output channels. Returns: An [ksize, ksize, cin, cout] orthogonal kernel. Raises: ValueError: If cin > cout.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops.py",
    "ast_data": "FunctionDef name:_orthogonal_kernel arg:self arg:ksize arg:cin arg:cout arguments arg arg arg arg If Compare Raise Call Assign Call If Compare Return return:yes Call Call Assign Call Call Call For Call Assign Call Call Call Assign Call For Call For Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "identify_gradient",
    "source_code": "def identify_gradient(self, input_tensor):\n    grad_debug_op_name = _tensor_to_grad_debug_op_name(input_tensor, self._uuid)\n    identity_op = gen_array_ops.debug_gradient_ref_identity if input_tensor.dtype._is_ref_dtype else gen_array_ops.debug_gradient_identity\n    debug_grad_identity = identity_op(input_tensor, name=grad_debug_op_name)\n    assert debug_grad_identity.dtype == input_tensor.dtype\n    if debug_grad_identity.op.name != grad_debug_op_name:\n        raise ValueError('The graph already contains an op named %s' % grad_debug_op_name)\n    return debug_grad_identity",
    "docstring": "Create a debug identity tensor that registers and forwards gradients. The side effect of this method is that when gradient tensor(s) are created with respect to the any paths that include the , the gradient tensor(s) with respect to will be registered with this this instance and can later be retrieved, with the methods and . Example: Args: input_tensor: the input object whose related gradient tensors are to be registered with this instance when they are created, e.g., during calls or the construction of optimization (training) op that uses . Returns: A forwarded identity of , as a . Raises: ValueError: If an op with name that duplicates the gradient-debugging op already exists in the graph (highly unlikely).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_gradients.py",
    "ast_data": "FunctionDef name:identify_gradient arg:self arg:input_tensor arguments arg arg Assign Call Assign Assign Call Compare If Compare Raise Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_data_ratio",
    "source_code": "def get_data_ratio(self):\n    return 1.0",
    "docstring": "Return the aspect ratio of the data itself. For a polar plot, this should always be 1.0",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\polar.py",
    "ast_data": "FunctionDef name:get_data_ratio arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "get_previous_week",
    "source_code": "def get_previous_week(self, date):\n    return _get_next_prev(self, date, is_previous=True, period='week')",
    "docstring": "Get the previous valid week.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\dates.py",
    "ast_data": "FunctionDef name:get_previous_week arg:self arg:date arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "StemContainer",
    "source_code": "class StemContainer(Container):\n\n    def __init__(self, markerline_stemlines_baseline, **kwargs):\n        markerline, stemlines, baseline = markerline_stemlines_baseline\n        self.markerline = markerline\n        self.stemlines = stemlines\n        self.baseline = baseline\n        super().__init__(markerline_stemlines_baseline, **kwargs)",
    "docstring": "Container for the artists created in a :meth: plot. The container can be treated like a namedtuple `~matplotlib.lines.Line2D~matplotlib.collections.LineCollection~matplotlib.lines.Line2D` The artist of the horizontal baseline.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\container.py",
    "ast_data": "ClassDef name:StemContainer FunctionDef name:__init__ arg:self arg:markerline_stemlines_baseline arguments arg arg arg Assign Assign Assign Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "reserve",
    "source_code": "def reserve(self, batch_idx: torch.Tensor, seq_len: torch.Tensor) -> None:\n    if seq_len <= self.capacity[batch_idx]:\n        return\n    num_pages_to_allocate = _cdiv(seq_len - self.capacity[batch_idx], self.page_size)\n    assert len(self.empty_pages) >= num_pages_to_allocate, f'requested {num_pages_to_allocate.item()} pages but there are only {len(self.empty_pages)} empty pages'\n    start_page_idx = self.capacity[batch_idx] // self.page_size\n    end_page_idx = start_page_idx + num_pages_to_allocate\n    allocated_pages = torch.tensor(self.empty_pages[-num_pages_to_allocate:], device=num_pages_to_allocate.device)\n    self.empty_pages = self.empty_pages[:-num_pages_to_allocate]\n    self.page_table[batch_idx, start_page_idx:end_page_idx] = allocated_pages\n    self.physical_to_logical[batch_idx, allocated_pages] = torch.arange(start_page_idx.item(), end_page_idx.item(), device=num_pages_to_allocate.device)\n    self.capacity[batch_idx] += num_pages_to_allocate * self.page_size",
    "docstring": "Requests the capacity of a given batch to be at least enough to hold elements. Args: batch_idx (Tensor): batch index to be reserved; shape :math:. seq_len (Tensor): minimum capacity for the given batch; shape :math:.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\attention\\experimental\\_paged_attention.py",
    "ast_data": "FunctionDef name:reserve arg:self arg:batch_idx arg:seq_len arguments arg arg arg If Compare Return return:no Assign Call Compare Call Call Call Assign Assign Assign Call Assign Assign Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "serialize",
    "source_code": "def serialize(to_serialize: Serializable) -> SerializedTraceType:\n    if not isinstance(to_serialize, Serializable):\n        raise ValueError('Can not serialize ' + type(to_serialize).__name__ + ' since it is not Serializable. For object ' + str(to_serialize))\n    actual_proto = to_serialize.experimental_as_proto()\n    if not isinstance(actual_proto, to_serialize.experimental_type_proto()):\n        raise ValueError(type(to_serialize).__name__ + ' returned different type of proto than specified by ' + 'experimental_type_proto()')\n    serialized = SerializedTraceType()\n    serialized.representation.Pack(actual_proto)\n    return serialized",
    "docstring": "Converts Serializable to a proto SerializedTraceType.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\core\\function\\trace_type\\serialization.py",
    "ast_data": "FunctionDef name:serialize arg:to_serialize arguments arg If Call Raise Call Call Call Assign Call If Call Call Raise Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_dump_error_details",
    "source_code": "def _dump_error_details(self, ops, locations):\n    for i in range(0, len(ops)):\n        callstack_dump = self._get_location_string(locations[i])\n        err_string = f'Op: {ops[i]}\\n{callstack_dump}\\n'\n        self._log(err_string)",
    "docstring": "Dump the list of ops and locations.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\authoring\\authoring.py",
    "ast_data": "FunctionDef name:_dump_error_details arg:self arg:ops arg:locations arguments arg arg arg For Call Call Assign Call Assign Call"
  },
  {
    "library": "django",
    "name": "std",
    "source_code": "@property\ndef std(self):\n    return self.statistics()[3]",
    "docstring": "Return the standard deviation of all pixel values of this band.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\raster\\band.py",
    "ast_data": "FunctionDef name:std arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_normed_hermite_n",
    "source_code": "def _normed_hermite_n(x, n):\n    if n == 0:\n        return np.full(x.shape, 1 / np.sqrt(np.sqrt(np.pi)))\n    c0 = 0.0\n    c1 = 1.0 / np.sqrt(np.sqrt(np.pi))\n    nd = float(n)\n    for i in range(n - 1):\n        tmp = c0\n        c0 = -c1 * np.sqrt((nd - 1.0) / nd)\n        c1 = tmp + c1 * x * np.sqrt(2.0 / nd)\n        nd = nd - 1.0\n    return c0 + c1 * x * np.sqrt(2)",
    "docstring": "Evaluate a normalized Hermite polynomial. Compute the value of the normalized Hermite polynomial of degree ``. Parameters ---------- x : ndarray of double. Points at which to evaluate the function n : int Degree of the normalized Hermite function to be evaluated. Returns ------- values : ndarray The shape of the return value is described above. Notes ----- This function is needed for finding the Gauss points and integration weights for high degrees. The values of the standard Hermite functions overflow when n >= 207.",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\hermite.py",
    "ast_data": "FunctionDef name:_normed_hermite_n arg:x arg:n arguments arg arg If Compare Return return:yes Call Call Call Assign Assign Call Call Assign Call For Call Assign Assign Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_generate_channels_table",
    "source_code": "def _generate_channels_table(self, filtered_data: OrderedDict[str, Any], channel_features: list[str], num_channels: int) -> tuple[list, list]:\n    channel_table: list[list[Any]] = []\n    channel_headers: list[str] = []\n    channel_table_entry_counter: int = 0\n    if len(channel_features) > 0:\n        for module_fqn in filtered_data:\n            for channel in range(num_channels):\n                new_channel_row = [channel_table_entry_counter, module_fqn, channel]\n                for feature in channel_features:\n                    if feature in filtered_data[module_fqn]:\n                        feature_val = filtered_data[module_fqn][feature][channel]\n                    else:\n                        feature_val = 'Not Applicable'\n                    if type(feature_val) is torch.Tensor:\n                        feature_val = feature_val.item()\n                    new_channel_row.append(feature_val)\n                channel_table.append(new_channel_row)\n                channel_table_entry_counter += 1\n    if len(channel_table) != 0:\n        channel_headers = ['idx', 'layer_fqn', 'channel'] + channel_features\n    return (channel_headers, channel_table)",
    "docstring": "Takes in the filtered data and features list and generates the channels headers and table Currently meant to generate the headers and table for both the channels information. Args: filtered_data (OrderedDict[str, Any]): An OrderedDict (sorted in order of model) mapping: module_fqns -> feature_names -> values channel_features (List[str]): A list of the channel level features num_channels (int): Number of channels in the channel data Returns a tuple with: A list of the headers of the channel table A list of lists containing the table information row by row The 0th index row will contain the headers of the columns The rest of the rows will contain data",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\model_report_visualizer.py",
    "ast_data": "FunctionDef name:_generate_channels_table arg:self arg:filtered_data arg:channel_features arg:num_channels arguments arg arg arg arg If Compare Call For For Call Assign For If Compare Assign Assign If Compare Call Assign Call Call Call If Compare Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_gapcolor",
    "source_code": "def set_gapcolor(self, gapcolor):\n    if gapcolor is not None:\n        mcolors._check_color_like(color=gapcolor)\n    self._gapcolor = gapcolor\n    self.stale = True",
    "docstring": "Set a color to fill the gaps in the dashed line style. .. note:: Striped lines are created by drawing two interleaved dashed lines. There can be overlaps between those two, which may result in artifacts when using transparency. This functionality is experimental and may change. Parameters ---------- gapcolor : :mpltype: or None The color with which to fill the gaps. If None, the gaps are unfilled.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:set_gapcolor arg:self arg:gapcolor arguments arg arg If Compare Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_compute_elemwise_op_output_shape",
    "source_code": "def _compute_elemwise_op_output_shape(self, shape1, shape2):\n    if None in [shape1, shape2]:\n        return None\n    elif len(shape1) < len(shape2):\n        return self._compute_elemwise_op_output_shape(shape2, shape1)\n    elif not shape2:\n        return shape1\n    output_shape = list(shape1[:-len(shape2)])\n    for i, j in zip(shape1[-len(shape2):], shape2):\n        if i is None or j is None:\n            output_shape.append(None)\n        elif i == 1:\n            output_shape.append(j)\n        elif j == 1:\n            output_shape.append(i)\n        else:\n            if i != j:\n                raise ValueError('Operands could not be broadcast together with shapes ' + str(shape1) + ' ' + str(shape2))\n            output_shape.append(i)\n    return tuple(output_shape)",
    "docstring": "Computes the shape of the resultant of an elementwise operation. Args: shape1: tuple or None. Shape of the first tensor shape2: tuple or None. Shape of the second tensor Returns: expected output shape when an element-wise operation is carried out on 2 tensors with shapes shape1 and shape2. tuple or None. Raises: ValueError: if shape1 and shape2 are not compatible for element-wise operations.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\merge.py",
    "ast_data": "FunctionDef name:_compute_elemwise_op_output_shape arg:self arg:shape1 arg:shape2 arguments arg arg arg If Compare Return return:no If Compare Call Call Return return:yes Call If Return return:yes Assign Call Call For Call Call If BoolOp Compare Compare Call If Compare Call If Compare Call If Compare Raise Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "read_coordinates",
    "source_code": "def read_coordinates(self, where=None, start: int | None=None, stop: int | None=None):\n    self.validate_version(where)\n    if not self.infer_axes():\n        return False\n    selection = Selection(self, where=where, start=start, stop=stop)\n    coords = selection.select_coords()\n    if selection.filter is not None:\n        for field, op, filt in selection.filter.format():\n            data = self.read_column(field, start=coords.min(), stop=coords.max() + 1)\n            coords = coords[op(data.iloc[coords - coords.min()], filt).values]\n    return Index(coords)",
    "docstring": "select coordinates (row numbers) from a table; return the coordinates object",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:read_coordinates arg:self arg:where arg:start arg:stop arguments arg arg arg arg Call If Call Return return:yes Assign Call Assign Call If Compare For Call Assign Call Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "to_numpy",
    "source_code": "def to_numpy() -> ModuleType:\n    return ivy.transpile(kornia, source='torch', target='numpy')",
    "docstring": "Convert Kornia to NumPy. Transpiles the Kornia library to NumPy using [ivy]( The transpilation process occurs lazily, so the transpilation on a given kornia function/class will only occur when it's called or instantiated for the first time. This will make any functions/classes slow when being used for the first time, but any subsequent uses should be as fast as expected. Return: The Kornia library transpiled to NumPy Example: .. highlight:: python .. code-block:: python import kornia np_kornia = kornia.to_numpy() import numpy as np input = np.random.normal(size=(2, 3, 4, 5)) gray = np_kornia.color.gray.rgb_to_grayscale(input) Note: Ivy does not currently support transpiling trainable modules to NumPy.",
    "type": "function",
    "file_path": "kornia\\kornia\\transpiler\\transpiler.py",
    "ast_data": "FunctionDef name:to_numpy arguments Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_get_base_knot_positions",
    "source_code": "@staticmethod\ndef _get_base_knot_positions(X, n_knots=10, knots='uniform', sample_weight=None):\n    if knots == 'quantile':\n        percentile_ranks = 100 * np.linspace(start=0, stop=1, num=n_knots, dtype=np.float64)\n        if sample_weight is None:\n            knots = np.percentile(X, percentile_ranks, axis=0)\n        else:\n            knots = np.array([_weighted_percentile(X, sample_weight, percentile_rank) for percentile_rank in percentile_ranks])\n    else:\n        mask = slice(None, None, 1) if sample_weight is None else sample_weight > 0\n        x_min = np.amin(X[mask], axis=0)\n        x_max = np.amax(X[mask], axis=0)\n        knots = np.linspace(start=x_min, stop=x_max, num=n_knots, endpoint=True, dtype=np.float64)\n    return knots",
    "docstring": "Calculate base knot positions. Base knots such that first knot <= feature <= last knot. For the B-spline construction with scipy.interpolate.BSpline, 2*degree knots beyond the base interval are added. Returns ------- knots : ndarray of shape (n_knots, n_features), dtype=np.float64 Knot positions (points) of base interval.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_polynomial.py",
    "ast_data": "FunctionDef name:_get_base_knot_positions arg:X arg:n_knots arg:knots arg:sample_weight arguments arg arg arg arg If Compare Assign Call If Compare Assign Call Assign Call Call Assign Compare Call Compare Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "discrepancy",
    "source_code": "def discrepancy(sample: 'npt.ArrayLike', *, iterative: bool=False, method: Literal['CD', 'WD', 'MD', 'L2-star']='CD', workers: IntNumber=1) -> float:\n    sample = _ensure_in_unit_hypercube(sample)\n    workers = _validate_workers(workers)\n    methods = {'CD': _cy_wrapper_centered_discrepancy, 'WD': _cy_wrapper_wrap_around_discrepancy, 'MD': _cy_wrapper_mixture_discrepancy, 'L2-star': _cy_wrapper_l2_star_discrepancy}\n    if method in methods:\n        return methods[method](sample, iterative, workers=workers)\n    else:\n        raise ValueError(f'{method!r} is not a valid method. It must be one of {set(methods)!r}')",
    "docstring": "Discrepancy of a given sample. Parameters ---------- sample : array_like (n, d) The sample to compute the discrepancy from. iterative : bool, optional Must be False if not using it for updating the discrepancy. Default is False. Refer to the notes for more details. method : str, optional Type of discrepancy, can be `nn+1update_discrepancy`. >>> disc_init = qmc.discrepancy(space[:-1], iterative=True) >>> disc_init 0.04769081147119336 >>> qmc.update_discrepancy(space[-1], space[:-1], disc_init) 0.008142039609053513",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_qmc.py",
    "ast_data": "FunctionDef name:discrepancy arg:sample arguments arg arg arg arg Assign Call Assign Call Assign If Compare Return return:yes Call Raise Call Call"
  },
  {
    "library": "virtualenv",
    "name": "read_data",
    "source_code": "def read_data(file, endian, num=1):\n    res = struct.unpack(endian + 'L' * num, file.read(num * 4))\n    if len(res) == 1:\n        return res[0]\n    return res",
    "docstring": "Read a given number of 32-bits unsigned integers from the given file with the given endianness.",
    "type": "function",
    "file_path": "virtualenv\\src\\virtualenv\\create\\via_global_ref\\builtin\\cpython\\mac_os.py",
    "ast_data": "FunctionDef name:read_data arg:file arg:endian arg:num arguments arg arg arg Assign Call Call If Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_default_static_quant_reference_module_mappings",
    "source_code": "def get_default_static_quant_reference_module_mappings() -> dict[Callable, Any]:\n    return copy.deepcopy(DEFAULT_REFERENCE_STATIC_QUANT_MODULE_MAPPINGS)",
    "docstring": "Get reference module mapping for post training static quantization",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantization_mappings.py",
    "ast_data": "FunctionDef name:get_default_static_quant_reference_module_mappings arguments Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "draggable",
    "source_code": "def draggable(self, state=None, use_blit=False):\n    from matplotlib.offsetbox import DraggableAnnotation\n    is_draggable = self._draggable is not None\n    if state is None:\n        state = not is_draggable\n    if state:\n        if self._draggable is None:\n            self._draggable = DraggableAnnotation(self, use_blit)\n    else:\n        if self._draggable is not None:\n            self._draggable.disconnect()\n        self._draggable = None\n    return self._draggable",
    "docstring": "Set whether the annotation is draggable with the mouse. Parameters ---------- state : bool or None - True or False: set the draggability. - None: toggle the draggability. use_blit : bool, default: False Use blitting for faster image composition. For details see :ref:. Returns ------- DraggableAnnotation or None If the annotation is draggable, the corresponding helper is returned.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:draggable arg:self arg:state arg:use_blit arguments arg arg arg Assign Compare If Compare Assign If If Compare Assign Call If Compare Call Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "split",
    "source_code": "def split(self, X, y=None, groups=None):\n    return super().split(X, y, groups)",
    "docstring": "Generate indices to split data into training and test set. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. y : array-like of shape (n_samples,), default=None The target variable for supervised learning problems. groups : array-like of shape (n_samples,) Group labels for the samples used while splitting the dataset into train/test set. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py",
    "ast_data": "FunctionDef name:split arg:self arg:X arg:y arg:groups arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "get_script_prefix",
    "source_code": "def get_script_prefix():\n    return getattr(_prefixes, 'value', '/')",
    "docstring": "Return the currently active script prefix. Useful for client code that wishes to construct their own URLs manually (although accessing the request instance is normally going to be a lot cleaner).",
    "type": "function",
    "file_path": "django\\django\\urls\\base.py",
    "ast_data": "FunctionDef name:get_script_prefix arguments Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "RowIdInitializer",
    "source_code": "@tf_export('tpu.experimental.embedding.RowIdInitializer')\nclass RowIdInitializer:\n\n    def __init__(self, offset: int=0):\n        self.offset = offset\n\n    def __call__(self, shape: Union[Sequence[int], TensorShape], dtype: dtypes.DType) -> core.Tensor:\n        return math_ops.range(start=self.offset, limit=self.offset + shape[0], delta=1, dtype=dtype)[:, None] * array_ops.ones(shape, dtype=dtype)",
    "docstring": "An initializer that initializes the table with vocabulary ids.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2_utils.py",
    "ast_data": "ClassDef name:RowIdInitializer FunctionDef name:__init__ arg:self arg:offset arguments arg arg Assign FunctionDef name:__call__ arg:self arg:shape arg:dtype arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "kornia",
    "name": "_validate_input",
    "source_code": "def _validate_input(f: Callable[..., Any]) -> Callable[..., Any]:\n\n    @wraps(f)\n    def wrapper(input: Tensor, *args: Any, **kwargs: Any) -> Any:\n        if not torch.is_tensor(input):\n            raise TypeError(f'Input type is not a Tensor. Got {type(input)}')\n        _validate_shape(input.shape, required_shapes=('BCHW',))\n        _validate_input_dtype(input, accepted_dtypes=[torch.float16, torch.float32, torch.float64])\n        return f(input, *args, **kwargs)\n    return wrapper",
    "docstring": "Validate the 2D input of the wrapped function. Args: f: a function that takes the first argument as tensor. Returns: the wrapped function after input is validated.",
    "type": "function",
    "file_path": "kornia\\kornia\\augmentation\\utils\\helpers.py",
    "ast_data": "FunctionDef name:_validate_input arg:f arguments arg FunctionDef name:wrapper arg:input arguments arg arg arg If Call Raise Call Call Call Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "upgrade_mapper",
    "source_code": "@classmethod\ndef upgrade_mapper(cls, func, default=None):\n    if callable(func):\n        cls._mapper.insert(-1, (cls._getsubdtype(default), func, default))\n        return\n    elif hasattr(func, '__iter__'):\n        if isinstance(func[0], (tuple, list)):\n            for _ in func:\n                cls._mapper.insert(-1, _)\n            return\n        if default is None:\n            default = [None] * len(func)\n        else:\n            default = list(default)\n            default.append([None] * (len(func) - len(default)))\n        for fct, dft in zip(func, default):\n            cls._mapper.insert(-1, (cls._getsubdtype(dft), fct, dft))",
    "docstring": "Upgrade the mapper of a StringConverter by adding a new function and its corresponding default. The input function (or sequence of functions) and its associated default value (if any) is inserted in penultimate position of the mapper. The corresponding type is estimated from the dtype of the default value. Parameters ---------- func : var Function, or sequence of functions Examples -------- >>> import dateutil.parser >>> import datetime >>> dateparser = dateutil.parser.parse >>> defaultdate = datetime.date(2000, 1, 1) >>> StringConverter.upgrade_mapper(dateparser, default=defaultdate)",
    "type": "method",
    "file_path": "numpy\\numpy\\lib\\_iotools.py",
    "ast_data": "FunctionDef name:upgrade_mapper arg:cls arg:func arg:default arguments arg arg arg If Call Call Call Return return:no If Call If Call For Call Return return:no If Compare Assign Call Assign Call Call Call Call For Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "gen_autograd_functions_lib",
    "source_code": "def gen_autograd_functions_lib(out: str, differentiability_infos: dict[FunctionSchema, dict[str, DifferentiabilityInfo]], template_path: str) -> None:\n    infos = get_infos_with_derivatives_list(differentiability_infos)\n    declarations = [process_function(f, FUNCTION_DECLARATION) for f in infos]\n    definitions = [process_function(f, FUNCTION_DEFINITION) for f in infos]\n    file_basename = 'Functions'\n    fm = FileManager(install_dir=out, template_dir=template_path, dry_run=False)\n    for suffix in ['.h', '.cpp']:\n        fname = file_basename + suffix\n        fm.write_with_template(fname, fname, lambda: {'generated_comment': '@' + f'generated from {fm.template_dir_for_comments()}/{fname}', 'autograd_function_declarations': declarations, 'autograd_function_definitions': definitions})",
    "docstring": "Functions.h and Functions.cpp body These contain the auto-generated subclasses of torch::autograd::Node for each every differentiable torch function.",
    "type": "function",
    "file_path": "pytorch\\tools\\autograd\\gen_autograd_functions.py",
    "ast_data": "FunctionDef name:gen_autograd_functions_lib arg:out arg:differentiability_infos arg:template_path arguments arg arg arg Assign Call Assign Call Assign Call Assign Assign Call For Assign Call arguments Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_metadata_routing",
    "source_code": "def get_metadata_routing(self):\n    router = MetadataRouter(owner=self.__class__.__name__).add(splitter=check_cv(self.cv), method_mapping=MethodMapping().add(callee='split', caller='fit'))\n    return router",
    "docstring": "Get metadata routing of this object. Please check :ref: on how the routing mechanism works. .. versionadded:: 1.5 Returns ------- routing : MetadataRouter A :class: encapsulating routing information.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\covariance\\_graph_lasso.py",
    "ast_data": "FunctionDef name:get_metadata_routing arg:self arguments arg Assign Call Call Call Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_count",
    "source_code": "@abstractmethod\ndef _count(self, X, Y):\n    pass",
    "docstring": "Update counts that are used to calculate probabilities. The counts make up a sufficient statistic extracted from the data. Accordingly, this method is called each time or update the model. and must be updated here along with any model specific counts. Parameters ---------- X : {ndarray, sparse matrix} of shape (n_samples, n_features) The input samples. Y : ndarray of shape (n_samples, n_classes) Binarized class labels.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\naive_bayes.py",
    "ast_data": "FunctionDef name:_count arg:self arg:X arg:Y arguments arg arg arg"
  },
  {
    "library": "tensorflow",
    "name": "scatter_min",
    "source_code": "def scatter_min(self, sparse_delta, use_locking=False, name=None):\n    if not isinstance(sparse_delta, indexed_slices.IndexedSlices):\n        raise TypeError(f'Argument `sparse_delta` must be a `tf.IndexedSlices`. Received arg: {sparse_delta}')\n    return self._lazy_read(gen_resource_variable_ops.resource_scatter_min(self.handle, sparse_delta.indices, ops.convert_to_tensor(sparse_delta.values, self.dtype), name=name))",
    "docstring": "Updates this variable with the min of and itself. Args: sparse_delta: to use as an argument of min with this variable. use_locking: If , use locking during the operation. name: the name of the operation. Returns: The updated variable. Raises: TypeError: if is not an .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:scatter_min arg:self arg:sparse_delta arg:use_locking arg:name arguments arg arg arg arg If Call Raise Call Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "ioff",
    "source_code": "def ioff() -> AbstractContextManager:\n    stack = ExitStack()\n    stack.callback(ion if isinteractive() else ioff)\n    matplotlib.interactive(False)\n    uninstall_repl_displayhook()\n    return stack",
    "docstring": "Disable interactive mode. See for more details. See Also -------- ion : Enable interactive mode. isinteractive : Whether interactive mode is enabled. show : Show all figures (and maybe block). pause : Show all figures, and block for a time. Notes ----- For a temporary change, this can be used as a context manager:: # if interactive mode is on # then figures will be shown on creation plt.ion() # This figure will be shown immediately fig = plt.figure() with plt.ioff(): # interactive mode will be off # figures will not automatically be shown fig2 = plt.figure() # ... To enable optional usage as a context manager, this function returns a context manager object, which is not intended to be stored or accessed by the user.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:ioff arguments Assign Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "np_doc_only",
    "source_code": "def np_doc_only(np_fun_name, np_fun=None):\n    np_fun_name, np_fun = _prepare_np_fun_name_and_fun(np_fun_name, np_fun)\n\n    def decorator(f):\n        f.__doc__ = _np_doc_helper(f, np_fun, np_fun_name=np_fun_name)\n        return f\n    return decorator",
    "docstring": "Attachs numpy docstring to a function. This differs from np_doc in that it doesn't check for a match in signature. Args: np_fun_name: name for the np_fun symbol. At least one of np_fun or np_fun_name shoud be set. np_fun: (optional) the numpy function whose docstring will be used. Returns: A function decorator that attaches the docstring from to the decorated function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_utils.py",
    "ast_data": "FunctionDef name:np_doc_only arg:np_fun_name arg:np_fun arguments arg arg Assign Call FunctionDef name:decorator arg:f arguments arg Assign Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_create_polynomial",
    "source_code": "def _create_polynomial(var, coeffs):\n    coeffs = np.array(coeffs, var.dtype.as_numpy_dtype)\n    if not coeffs.size:\n        return array_ops.zeros_like(var)\n    return coeffs[0] + _create_polynomial(var, coeffs[1:]) * var",
    "docstring": "Compute n_th order polynomial via Horner's method.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\special_math.py",
    "ast_data": "FunctionDef name:_create_polynomial arg:var arg:coeffs arguments arg arg Assign Call If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "mp_wright_bessel",
    "source_code": "def mp_wright_bessel(a, b, x, dps=50, maxterms=2000):\n    with mp.workdps(dps):\n        a, b, x = (mp.mpf(a), mp.mpf(b), mp.mpf(x))\n        res = mp.nsum(lambda k: x ** k / mp.fac(k) * rgamma_cached(a * k + b, dps=dps), [0, mp.inf], tol=dps, method='s', steps=[maxterms])\n        return mpf2float(res)",
    "docstring": "Compute Wright's generalized Bessel function as Series with mpmath.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_precompute\\wright_bessel_data.py",
    "ast_data": "FunctionDef name:mp_wright_bessel arg:a arg:b arg:x arg:dps arg:maxterms arguments arg arg arg arg arg With Call Assign Call Call Call Assign Call arguments arg Call Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "_tree_namespace_handler",
    "source_code": "def _tree_namespace_handler(k, v):\n    if isinstance(v, dict):\n        for script_name, app in v.items():\n            cherrypy.tree.graft(app, script_name)\n            msg = 'Mounted: %s on %s' % (app, script_name or '/')\n            cherrypy.engine.log(msg)\n    else:\n        cherrypy.tree.graft(v, v.script_name)\n        cherrypy.engine.log('Mounted: %s on %s' % (v, v.script_name or '/'))",
    "docstring": "Namespace handler for the 'tree' config namespace.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\_cpconfig.py",
    "ast_data": "FunctionDef name:_tree_namespace_handler arg:k arg:v arguments arg arg If Call For Call Call Assign BoolOp Call Call Call BoolOp"
  },
  {
    "library": "scipy",
    "name": "_solve_P_Q",
    "source_code": "def _solve_P_Q(U, V, structure=None):\n    P = U + V\n    Q = -U + V\n    if issparse(U) or is_pydata_spmatrix(U):\n        return spsolve(Q, P)\n    elif structure is None:\n        return solve(Q, P)\n    elif structure == UPPER_TRIANGULAR:\n        return solve_triangular(Q, P)\n    else:\n        raise ValueError('unsupported matrix structure: ' + str(structure))",
    "docstring": "A helper function for expm_2009. Parameters ---------- U : ndarray Pade numerator. V : ndarray Pade denominator. structure : str, optional A string describing the structure of both matrices and . Only is currently supported. Notes ----- The argument is inspired by similar args for theano and cvxopt functions.",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_matfuncs.py",
    "ast_data": "FunctionDef name:_solve_P_Q arg:U arg:V arg:structure arguments arg arg arg Assign Assign If BoolOp Call Call Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call Raise Call Call"
  },
  {
    "library": "matplotlib",
    "name": "limit_range_for_scale",
    "source_code": "def limit_range_for_scale(self, vmin, vmax, minpos):\n    if not np.isfinite(minpos):\n        minpos = 1e-300\n    return (minpos if vmin <= 0 else vmin, minpos if vmax <= 0 else vmax)",
    "docstring": "Limit the domain to positive values.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\scale.py",
    "ast_data": "FunctionDef name:limit_range_for_scale arg:self arg:vmin arg:vmax arg:minpos arguments arg arg arg arg If Call Assign Return return:yes Compare Compare"
  },
  {
    "library": "matplotlib",
    "name": "__call__",
    "source_code": "def __call__(self, x, pos=None):\n    if pos is None or pos >= len(self.seq):\n        return ''\n    else:\n        return self.seq[pos]",
    "docstring": "Return the label that matches the position, regardless of the value. For positions `` is the sequence of strings that this object was initialized with.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:x arg:pos arguments arg arg arg If BoolOp Compare Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "pattern_eq",
    "source_code": "def pattern_eq(self, other: Any) -> bool:\n    return isinstance(other, self.__class__)",
    "docstring": "Compare two s and return true if they are the same. Note this is NOT matching a pattern - it is comparing the pattern structures (for debugging).",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\pattern_matcher.py",
    "ast_data": "FunctionDef name:pattern_eq arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "merge",
    "source_code": "def merge(inputs, name=None):\n    if any((inp is None for inp in inputs)):\n        raise ValueError('At least one of the merge inputs is None: %s' % inputs)\n    with ops.name_scope(name, 'Merge', inputs) as name:\n        inputs = [ops.internal_convert_to_tensor_or_composite(inp, as_ref=True) for inp in inputs]\n        if all((isinstance(v, tensor_lib.Tensor) for v in inputs)):\n            if all((v.dtype._is_ref_dtype for v in inputs)):\n                return gen_control_flow_ops.ref_merge(inputs, name)\n            else:\n                return gen_control_flow_ops.merge(inputs, name)\n        else:\n            if all((isinstance(v, (indexed_slices.IndexedSlices, tensor_lib.Tensor)) for v in inputs)):\n                inputs = math_ops._as_indexed_slices_list(inputs, optimize=False)\n            for v in inputs:\n                if not isinstance(v, composite_tensor.CompositeTensor):\n                    raise TypeError('Type %s not supported' % type(v))\n            for v in inputs[1:]:\n                nest.assert_same_structure(inputs[0], v, expand_composites=True)\n            flat_inputs = [nest.flatten(v, expand_composites=True) for v in inputs]\n            merged_results = [gen_control_flow_ops.merge(component) for component in zip(*flat_inputs)]\n            flat_merged = [tensor for tensor, _ in merged_results]\n            chosen_index = merged_results[0][1]\n            merged_inputs = nest.pack_sequence_as(inputs[0], flat_merged, expand_composites=True)\n            return (merged_inputs, chosen_index)",
    "docstring": "Returns the value of an available element of . This op tests each of the tensors in in turn to determine if any of them is available. If it finds an available tensor, it returns it and its index in . It is an error if more than one tensor in is available. If no tensor in is available, the returned tensor and index are not set. This op handles both s and . If inputs has a mix of s and , all inputs are converted to IndexedSlices before merging. Args: inputs: The input tensors, at most one of which is available. name: A name for this operation (optional). Returns: A tuple containing the chosen input tensor and its index in . Raises: ValueError: If any of the inputs is None, or inputs are IndexedSlices and some but not all have a dense_shape property.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:merge arg:inputs arg:name arguments arg arg If Call Compare Raise Call With Call Assign Call If Call Call If Call Return return:yes Call Return return:yes Call If Call Call Assign Call For If Call Raise Call Call For Call Assign Call Assign Call Call Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_IterablesNotString",
    "source_code": "class _IterablesNotString(_Constraint):\n\n    def is_satisfied_by(self, val):\n        return isinstance(val, Iterable) and (not isinstance(val, str))\n\n    def __str__(self):\n        return 'an iterable'",
    "docstring": "Constraint representing iterables that are not strings.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\utils\\_param_validation.py",
    "ast_data": "ClassDef name:_IterablesNotString FunctionDef name:is_satisfied_by arg:self arg:val arguments arg arg Return return:yes BoolOp Call Call FunctionDef name:__str__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "load_state_dict",
    "source_code": "def load_state_dict(self, state_dict: dict[str, Any]) -> None:\n    ...",
    "docstring": "Restore the object's state from the provided state_dict. Args: state_dict: The state dict to restore from",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\stateful.py",
    "ast_data": "FunctionDef name:load_state_dict arg:self arg:state_dict arguments arg arg"
  },
  {
    "library": "tensorflow",
    "name": "base_dtype",
    "source_code": "@property\ndef base_dtype(self):\n    if self._is_ref_dtype:\n        return _INTERN_TABLE[self._type_enum - 100]\n    else:\n        return self",
    "docstring": "Returns a non-reference based on this (for TF1). Programs written for TensorFlow 2.x do not need this attribute. It exists only for compatibility with TensorFlow 1.x, which used reference s in the implementation of . In TensorFlow 2.x, is implemented without reference types.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\dtypes.py",
    "ast_data": "FunctionDef name:base_dtype arg:self arguments arg If Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "RangeBenchmark",
    "source_code": "class RangeBenchmark(benchmark_base.DatasetBenchmarkBase):\n\n    def _benchmark_range(self, num_elements, autotune, benchmark_id):\n        options = options_lib.Options()\n        options.autotune.enabled = autotune\n        dataset = dataset_ops.Dataset.range(num_elements)\n        dataset = dataset.with_options(options)\n        self.run_and_report_benchmark(dataset, num_elements=num_elements, extras={'model_name': 'range.benchmark.%d' % benchmark_id, 'parameters': '%d.%s' % (num_elements, autotune)}, name='modeling_%s' % ('on' if autotune else 'off'))\n\n    def benchmark_range_with_modeling(self):\n        self._benchmark_range(num_elements=10000000, autotune=True, benchmark_id=1)\n\n    def benchmark_range_without_modeling(self):\n        self._benchmark_range(num_elements=50000000, autotune=False, benchmark_id=2)",
    "docstring": "Benchmarks for .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\benchmarks\\range_benchmark.py",
    "ast_data": "ClassDef name:RangeBenchmark FunctionDef name:_benchmark_range arg:self arg:num_elements arg:autotune arg:benchmark_id arguments arg arg arg arg Assign Call Assign Assign Call Assign Call Call FunctionDef name:benchmark_range_with_modeling arg:self arguments arg Call FunctionDef name:benchmark_range_without_modeling arg:self arguments arg Call"
  },
  {
    "library": "matplotlib",
    "name": "set_arrowstyle",
    "source_code": "@_docstring.interpd\ndef set_arrowstyle(self, arrowstyle=None, **kwargs):\n    if arrowstyle is None:\n        return ArrowStyle.pprint_styles()\n    self._arrow_transmuter = ArrowStyle(arrowstyle, **kwargs) if isinstance(arrowstyle, str) else arrowstyle\n    self.stale = True",
    "docstring": "Set the arrow style, possibly with further attributes. Attributes from the previous arrow style are not reused. Without argument (or with `~matplotlib.patches.ArrowStyle.ArrowStyle.ArrowStyle` object, as documented in that class. The following arrow styles are available: %(ArrowStyle:table_and_accepts)s **kwargs Additional attributes for the arrow style. See the table above for supported parameters. Examples -------- :: set_arrowstyle(\"Fancy,head_length=0.2\") set_arrowstyle(\"fancy\", head_length=0.2)",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:set_arrowstyle arg:self arg:arrowstyle arguments arg arg arg If Compare Return return:yes Call Assign Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "register_state_dict_post_hook",
    "source_code": "def register_state_dict_post_hook(self, hook):\n    hook._from_public_api = True\n    handle = RemovableHandle(self._state_dict_hooks)\n    self._state_dict_hooks[handle.id] = hook\n    return handle",
    "docstring": "Register a post-hook for the :meth: method. It should have the following signature:: hook(module, state_dict, prefix, local_metadata) -> None The registered hooks can modify the `` inplace.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:register_state_dict_post_hook arg:self arg:hook arguments arg arg Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "eval",
    "source_code": "@doc_controls.do_not_generate_docs\ndef eval(x):\n    return get_value(to_dense(x))",
    "docstring": "Evaluates the value of a variable. Args: x: A variable. Returns: A Numpy array. Examples: >>> kvar = tf.keras.backend.variable(np.array([[1, 2], [3, 4]]), ... dtype='float32') >>> tf.keras.backend.eval(kvar) array([[1., 2.], [3., 4.]], dtype=float32)",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:eval arg:x arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, keys, values, key_dtype=None, value_dtype=None, name=None):\n    if not context.executing_eagerly() and ops.get_default_graph()._get_control_flow_context() is not None:\n        with ops.init_scope():\n            self._keys = ops.convert_to_tensor(keys, dtype=key_dtype, name='keys')\n            self._values = ops.convert_to_tensor(values, dtype=value_dtype, name='values')\n    else:\n        self._keys = ops.convert_to_tensor(keys, dtype=key_dtype, name='keys')\n        self._values = ops.convert_to_tensor(values, dtype=value_dtype, name='values')\n    self._name = name if name is not None else 'key_value_init'\n    if context.executing_eagerly():\n        self._name += str(ops.uid())\n    super(KeyValueTensorInitializer, self).__init__(self._keys.dtype, self._values.dtype)",
    "docstring": "Constructs a table initializer object based on keys and values tensors. Args: keys: The tensor for the keys. values: The tensor for the values. key_dtype: The data type. Used when is a python array. value_dtype: The data type. Used when is a python array. name: A name for the operation (optional).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:keys arg:values arg:key_dtype arg:value_dtype arg:name arguments arg arg arg arg arg arg If BoolOp Call Compare Call Call With Call Assign Call Assign Call Assign Call Assign Call Assign Compare If Call Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_numba_agg_general",
    "source_code": "def _numba_agg_general(self, func: Callable, dtype_mapping: dict[np.dtype, Any], engine_kwargs: dict[str, bool] | None, **aggregator_kwargs):\n    if not self.as_index:\n        raise NotImplementedError('as_index=False is not supported. Use .reset_index() instead.')\n    data = self._obj_with_exclusions\n    df = data if data.ndim == 2 else data.to_frame()\n    aggregator = executor.generate_shared_aggregator(func, dtype_mapping, True, **get_jit_arguments(engine_kwargs))\n    ids = self._grouper.ids\n    ngroups = self._grouper.ngroups\n    res_mgr = df._mgr.apply(aggregator, labels=ids, ngroups=ngroups, **aggregator_kwargs)\n    res_mgr.axes[1] = self._grouper.result_index\n    result = df._constructor_from_mgr(res_mgr, axes=res_mgr.axes)\n    if data.ndim == 1:\n        result = result.squeeze('columns')\n        result.name = data.name\n    else:\n        result.columns = data.columns\n    return result",
    "docstring": "Perform groupby with a standard numerical aggregation function (e.g. mean) with Numba.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\groupby\\groupby.py",
    "ast_data": "FunctionDef name:_numba_agg_general arg:self arg:func arg:dtype_mapping arg:engine_kwargs arguments arg arg arg arg arg If Raise Call Assign Assign Compare Call Assign Call Call Assign Assign Assign Call Assign Assign Call If Compare Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "authlib",
    "name": "as_bytes",
    "source_code": "def as_bytes(self, encoding=None, is_private=False, password=None):\n    if encoding is None or encoding == 'PEM':\n        encoding = Encoding.PEM\n    elif encoding == 'DER':\n        encoding = Encoding.DER\n    else:\n        raise ValueError(f'Invalid encoding: {encoding!r}')\n    raw_key = self.as_key(is_private)\n    if is_private:\n        if not raw_key:\n            raise ValueError('This is a public key')\n        if password is None:\n            encryption_algorithm = NoEncryption()\n        else:\n            encryption_algorithm = BestAvailableEncryption(to_bytes(password))\n        return raw_key.private_bytes(encoding=encoding, format=PrivateFormat.PKCS8, encryption_algorithm=encryption_algorithm)\n    return raw_key.public_bytes(encoding=encoding, format=PublicFormat.SubjectPublicKeyInfo)",
    "docstring": "Export key into PEM/DER format bytes. :param encoding: \"PEM\" or \"DER\" :param is_private: export private key or public key :param password: encrypt private key with password :return: bytes",
    "type": "method",
    "file_path": "authlib\\authlib\\jose\\rfc7517\\asymmetric_key.py",
    "ast_data": "FunctionDef name:as_bytes arg:self arg:encoding arg:is_private arg:password arguments arg arg arg arg If BoolOp Compare Compare Assign If Compare Assign Raise Call Assign Call If If Raise Call If Compare Assign Call Assign Call Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "IsMerge",
    "source_code": "def IsMerge(op):\n    return op.type == 'Merge' or op.type == 'RefMerge'",
    "docstring": "Return true if is a Merge.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_util.py",
    "ast_data": "FunctionDef name:IsMerge arg:op arguments arg Return return:yes BoolOp Compare Compare"
  },
  {
    "library": "scipy",
    "name": "Trefethen",
    "source_code": "class Trefethen(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n        self.custom_bounds = [(-5, 5), (-5, 5)]\n        self.global_optimum = [[-0.02440307923, 0.2106124261]]\n        self.fglob = -3.3068686474\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        val = 0.25 * x[0] ** 2 + 0.25 * x[1] ** 2\n        val += exp(sin(50.0 * x[0])) - sin(10 * x[0] + 10 * x[1])\n        val += sin(60 * exp(x[1]))\n        val += sin(70 * sin(x[0]))\n        val += sin(sin(80 * x[1]))\n        return val",
    "docstring": "Trefethen objective function. This class defines the Trefethen [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Trefethen}}(x) = 0.25 x_{1}^{2} + 0.25 x_{2}^{2} + e^{\\sin\\left(50 x_{1}\\right)} - \\sin\\left(10 x_{1} + 10 x_{2}\\right) + \\sin\\left(60 e^{x_{2}}\\right) + \\sin\\left[70 \\sin\\left(x_{1}\\right)\\right] + \\sin\\left[\\sin\\left(80 x_{2}\\right)\\right] with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_T.py",
    "ast_data": "ClassDef name:Trefethen FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Call Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_LogMatrixDeterminantGrad",
    "source_code": "@ops.RegisterGradient('LogMatrixDeterminant')\ndef _LogMatrixDeterminantGrad(op: ops.Operation, _, grad_b):\n    a = op.inputs[0]\n    c = op.outputs[1]\n    a_adj_inv = linalg_ops.matrix_inverse(a, adjoint=True)\n    multipliers = array_ops.reshape(grad_b, array_ops.concat([array_ops.shape(c), [1, 1]], 0))\n    return multipliers * a_adj_inv",
    "docstring": "Gradient for LogMatrixDeterminant.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg_grad.py",
    "ast_data": "FunctionDef name:_LogMatrixDeterminantGrad arg:op arg:_ arg:grad_b arguments arg arg arg Assign Assign Assign Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "add_graph",
    "source_code": "def add_graph(self, model, input_to_model=None, verbose=False, use_strict_trace=True):\n    torch._C._log_api_usage_once('tensorboard.logging.add_graph')\n    self._get_file_writer().add_graph(graph(model, input_to_model, verbose, use_strict_trace))",
    "docstring": "Add graph data to summary. Args: model (torch.nn.Module): Model to draw. input_to_model (torch.Tensor or list of torch.Tensor): A variable or a tuple of variables to be fed. verbose (bool): Whether to print graph structure in console. use_strict_trace (bool): Whether to pass keyword argument to . Pass False when you want the tracer to record your mutable container types (list, dict)",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\tensorboard\\writer.py",
    "ast_data": "FunctionDef name:add_graph arg:self arg:model arg:input_to_model arg:verbose arg:use_strict_trace arguments arg arg arg arg arg Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_losses_for",
    "source_code": "@doc_controls.do_not_generate_docs\ndef get_losses_for(self, inputs):\n    warnings.warn('`layer.get_losses_for` is deprecated and will be removed in a future version. Please use `layer.losses` instead.')\n    return self.losses",
    "docstring": "Deprecated, do NOT use! Retrieves losses relevant to a specific set of inputs. Args: inputs: Input tensor or list/tuple of input tensors. Returns: List of loss tensors of the layer that depend on .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:get_losses_for arg:self arg:inputs arguments arg arg Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "GradcheckError",
    "source_code": "class GradcheckError(RuntimeError):\n    pass",
    "docstring": "Error raised by :func: and :func:.",
    "type": "class",
    "file_path": "pytorch\\torch\\autograd\\gradcheck.py",
    "ast_data": "ClassDef name:GradcheckError"
  },
  {
    "library": "tensorflow",
    "name": "name",
    "source_code": "@property\ndef name(self):\n    return '{}_shared_embedding'.format(self.categorical_column.name)",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "identify_axes",
    "source_code": "def identify_axes(ax_dict, fontsize=48):\n    kw = dict(ha='center', va='center', fontsize=fontsize, color='darkgrey')\n    for k, ax in ax_dict.items():\n        ax.text(0.5, 0.5, k, transform=ax.transAxes, **kw)",
    "docstring": "Helper to identify the Axes in the examples below. Draws the label in a large font in the center of the Axes. Parameters ---------- ax_dict : dict[str, Axes] Mapping between the title / label and the Axes. fontsize : int, optional How big the label should be.",
    "type": "function",
    "file_path": "matplotlib\\galleries\\users_explain\\axes\\mosaic.py",
    "ast_data": "FunctionDef name:identify_axes arg:ax_dict arg:fontsize arguments arg arg Assign Call For Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n    if self.subsample is not None and self.n_quantiles > self.subsample:\n        raise ValueError('The number of quantiles cannot be greater than the number of samples used. Got {} quantiles and {} samples.'.format(self.n_quantiles, self.subsample))\n    X = self._check_inputs(X, in_fit=True, copy=False)\n    n_samples = X.shape[0]\n    if self.n_quantiles > n_samples:\n        warnings.warn('n_quantiles (%s) is greater than the total number of samples (%s). n_quantiles is set to n_samples.' % (self.n_quantiles, n_samples))\n    self.n_quantiles_ = max(1, min(self.n_quantiles, n_samples))\n    rng = check_random_state(self.random_state)\n    self.references_ = np.linspace(0, 1, self.n_quantiles_, endpoint=True)\n    if sparse.issparse(X):\n        self._sparse_fit(X, rng)\n    else:\n        self._dense_fit(X, rng)\n    return self",
    "docstring": "Compute the quantiles used for transforming. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data used to scale along the features axis. If a sparse matrix is provided, it will be converted into a sparse `ignore_implicit_zeros` is False. y : None Ignored. Returns ------- self : object Fitted transformer.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg If BoolOp Compare Compare Raise Call Call Assign Call Assign If Compare Call Assign Call Call Assign Call Assign Call If Call Call Call Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "beeswarm",
    "source_code": "def beeswarm(self, orig_xyr):\n    midline = orig_xyr[0, 0]\n    swarm = np.atleast_2d(orig_xyr[0])\n    for xyr_i in orig_xyr[1:]:\n        neighbors = self.could_overlap(xyr_i, swarm)\n        candidates = self.position_candidates(xyr_i, neighbors)\n        offsets = np.abs(candidates[:, 0] - midline)\n        candidates = candidates[np.argsort(offsets)]\n        new_xyr_i = self.first_non_overlapping_candidate(candidates, neighbors)\n        swarm = np.vstack([swarm, new_xyr_i])\n    return swarm",
    "docstring": "Adjust x position of points to avoid overlaps.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\categorical.py",
    "ast_data": "FunctionDef name:beeswarm arg:self arg:orig_xyr arguments arg arg Assign Assign Call For Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_all_paths",
    "source_code": "def _get_all_paths(st):\n    fields = st.field_names()\n    all_paths = {()}\n    for k in fields:\n        v = st.field_value(k)\n        if isinstance(v, StructuredTensor):\n            all_paths = all_paths.union([(k,) + p for p in _get_all_paths(v)])\n        else:\n            all_paths.add((k,))\n    return all_paths",
    "docstring": "Get all the paths from a StructuredTensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_array_ops.py",
    "ast_data": "FunctionDef name:_get_all_paths arg:st arguments arg Assign Call Assign For Assign Call If Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "authenticate_token",
    "source_code": "def authenticate_token(self, token_string):\n    raise NotImplementedError()",
    "docstring": "A method to query token from database with the given token string. Developers MUST re-implement this method. For instance:: def authenticate_token(self, token_string): return get_token_from_database(token_string) :param token_string: A string to represent the access_token. :return: token",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6750\\validator.py",
    "ast_data": "FunctionDef name:authenticate_token arg:self arg:token_string arguments arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_sort_eps",
    "source_code": "def _sort_eps(eps: tuple[str, ...]) -> tuple[str, ...]:\n\n    def get_execution_provider_priority(ep: str) -> int:\n        if ep == 'CPUExecutionProvider':\n            return 2\n        if ep == 'CUDAExecutionProvider':\n            return 1\n        return 0\n    unique_eps = set(eps)\n    return tuple(sorted(unique_eps, key=get_execution_provider_priority, reverse=True))",
    "docstring": "Sort execution providers in eps based on pre-set priority.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\onnxruntime.py",
    "ast_data": "FunctionDef name:_sort_eps arg:eps arguments arg FunctionDef name:get_execution_provider_priority arg:ep arguments arg If Compare Return return:yes If Compare Return return:yes Return return:yes Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "add",
    "source_code": "def add(self, fn: Any, context: Optional[FunctionContext]=None) -> None:\n    context = context or FunctionContext()\n    self._primary[context, fn.function_type] = fn\n    if context not in self._dispatch_dict:\n        self._dispatch_dict[context] = type_dispatch.TypeDispatchTable()\n    self._dispatch_dict[context].add_target(fn.function_type)",
    "docstring": "Adds a new function using its function_type. Args: fn: The function to be added to the cache. context: A FunctionContext representing the current context.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_cache.py",
    "ast_data": "FunctionDef name:add arg:self arg:fn arg:context arguments arg arg arg Assign BoolOp Call Assign If Compare Assign Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict_log_proba",
    "source_code": "def predict_log_proba(self, X):\n    proba = self.predict_proba(X)\n    if self.n_outputs_ == 1:\n        return np.log(proba)\n    else:\n        return [np.log(p) for p in proba]",
    "docstring": "Return log probability estimates for the test vectors X. Parameters ---------- X : {array-like, object with finite length or shape} Training data. Returns ------- P : ndarray of shape (n_samples, n_classes) or list of such arrays Returns the log probability of the sample for each class in the model, where classes are ordered arithmetically for each output.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\dummy.py",
    "ast_data": "FunctionDef name:predict_log_proba arg:self arg:X arguments arg arg Assign Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "LoginRequiredMixin",
    "source_code": "class LoginRequiredMixin(AccessMixin):\n\n    def dispatch(self, request, *args, **kwargs):\n        if not request.user.is_authenticated:\n            return self.handle_no_permission()\n        return super().dispatch(request, *args, **kwargs)",
    "docstring": "Verify that the current user is authenticated.",
    "type": "class",
    "file_path": "django\\django\\contrib\\auth\\mixins.py",
    "ast_data": "ClassDef name:LoginRequiredMixin FunctionDef name:dispatch arg:self arg:request arguments arg arg arg arg If Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "OracleGeometryColumns",
    "source_code": "class OracleGeometryColumns(models.Model):\n    table_name = models.CharField(max_length=32)\n    column_name = models.CharField(max_length=1024)\n    srid = models.IntegerField(primary_key=True)\n\n    class Meta:\n        app_label = 'gis'\n        db_table = 'USER_SDO_GEOM_METADATA'\n        managed = False\n\n    def __str__(self):\n        return '%s - %s (SRID: %s)' % (self.table_name, self.column_name, self.srid)\n\n    @classmethod\n    def table_name_col(cls):\n        return 'table_name'\n\n    @classmethod\n    def geom_col_name(cls):\n        return 'column_name'",
    "docstring": "Maps to the Oracle USER_SDO_GEOM_METADATA table.",
    "type": "class",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\oracle\\models.py",
    "ast_data": "ClassDef name:OracleGeometryColumns Assign Call Assign Call Assign Call ClassDef name:Meta Assign Assign Assign FunctionDef name:__str__ arg:self arguments arg Return return:yes FunctionDef name:table_name_col arg:cls arguments arg Return return:yes FunctionDef name:geom_col_name arg:cls arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "check_max_block",
    "source_code": "def check_max_block(cfg: dict[str, int]):\n    for var, val in cfg.items():\n        block_suffix = 'BLOCK'\n        if block_suffix in var:\n            prefix = var.removesuffix(block_suffix)\n            max_block = TRITON_MAX_BLOCK[prefix]\n            assert val <= max_block, f\"'{var}' too large. Maximum: {max_block}. Actual: {val}.\"",
    "docstring": "Check that block sizes are within the maximum allowed.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\triton_heuristics.py",
    "ast_data": "FunctionDef name:check_max_block arg:cfg arguments arg For Call Assign If Compare Assign Call Assign Compare"
  },
  {
    "library": "scrapy",
    "name": "from_headers",
    "source_code": "def from_headers(self, headers: Mapping[bytes, bytes]) -> type[Response]:\n    cls = Response\n    if b'Content-Type' in headers:\n        cls = self.from_content_type(content_type=headers[b'Content-Type'], content_encoding=headers.get(b'Content-Encoding'))\n    if cls is Response and b'Content-Disposition' in headers:\n        cls = self.from_content_disposition(headers[b'Content-Disposition'])\n    return cls",
    "docstring": "Return the most appropriate Response class by looking at the HTTP headers",
    "type": "method",
    "file_path": "scrapy\\scrapy\\responsetypes.py",
    "ast_data": "FunctionDef name:from_headers arg:self arg:headers arguments arg arg Assign If Compare Assign Call Call If BoolOp Compare Compare Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_all_objects_for_this_type",
    "source_code": "def get_all_objects_for_this_type(self, **kwargs):\n    return self.model_class()._base_manager.filter(**kwargs)",
    "docstring": "Return all objects of this type for the keyword arguments given.",
    "type": "method",
    "file_path": "django\\django\\contrib\\contenttypes\\models.py",
    "ast_data": "FunctionDef name:get_all_objects_for_this_type arg:self arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "SendSourceFiles",
    "source_code": "def SendSourceFiles(self, request, context):\n    return debug_service_pb2.EventReply()",
    "docstring": "Base implementation of the handling of SendSourceFiles calls. The base implementation does nothing with the incoming request. Override in an implementation of the server if necessary. Args: request: A proto, containing the path, content, size and last-modified timestamp of source files. context: Server context. Returns: A proto.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\grpc_debug_server.py",
    "ast_data": "FunctionDef name:SendSourceFiles arg:self arg:request arg:context arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "is_writeable_array",
    "source_code": "def is_writeable_array(x: object) -> bool:\n    cls = cast(Hashable, type(x))\n    if _issubclass_fast(cls, 'numpy', 'ndarray'):\n        return cast('npt.NDArray', x).flags.writeable\n    res = _is_writeable_cls(cls)\n    if res is not None:\n        return res\n    return hasattr(x, '__array_namespace__')",
    "docstring": "Return False if `x` is not an array API compatible object. Warning ------- As there is no standard way to check if an array is writeable without actually writing to it, this function blindly returns True for all unknown array types.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\common\\_helpers.py",
    "ast_data": "FunctionDef name:is_writeable_array arg:x arguments arg Assign Call Call If Call Return return:yes Call Assign Call If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_cross_replica_context",
    "source_code": "def get_cross_replica_context():\n    return _get_per_thread_mode().cross_replica_context",
    "docstring": "Returns the current tf.distribute.Strategy if in a cross-replica context. DEPRECATED: Please use and instead. Returns: Returns the current object in a cross-replica context, or . Exactly one of and will return in a particular block.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:get_cross_replica_context arguments Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_linewidth",
    "source_code": "def get_linewidth(self):\n    return super().get_linewidth()[0]",
    "docstring": "Get the width of the lines used to mark each event.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:get_linewidth arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "Settings",
    "source_code": "class Settings(NamedTuple):\n    experiments: dict[str, Experiment] = {}",
    "docstring": "Settings for the experiments that can be opted into.",
    "type": "class",
    "file_path": "pytorch\\.github\\scripts\\runner_determinator.py",
    "ast_data": "ClassDef name:Settings"
  },
  {
    "library": "tensorflow",
    "name": "_pyval_update_fields",
    "source_code": "def _pyval_update_fields(pyval, fields, depth):\n    if not isinstance(pyval, (dict, list, tuple)):\n        raise ValueError('Expected dict or nested list/tuple of dict')\n    for key, target in fields.items():\n        for _ in range(1, depth):\n            target = target[-1]\n        target.append(pyval[key] if isinstance(pyval, dict) else [])\n    if isinstance(pyval, (list, tuple)):\n        for child in pyval:\n            _pyval_update_fields(child, fields, depth + 1)",
    "docstring": "Append the field values from to . Args: pyval: A python , or nested list/tuple of , whose value(s) should be appended to . fields: A dictionary mapping string keys to field values. Field values extracted from are appended to this dictionary's values. depth: The depth at which should be appended to the field values.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor.py",
    "ast_data": "FunctionDef name:_pyval_update_fields arg:pyval arg:fields arg:depth arguments arg arg arg If Call Raise Call For Call For Call Assign Call Call If Call For Call"
  },
  {
    "library": "pytorch",
    "name": "add_custom",
    "source_code": "def add_custom(self, func: Callable, opset: OpsetVersion) -> None:\n    self._functions.override(opset, func)",
    "docstring": "Adds a custom symbolic function. Args: func: The symbolic function to register. opset: The corresponding opset version.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\registration.py",
    "ast_data": "FunctionDef name:add_custom arg:self arg:func arg:opset arguments arg arg arg Call"
  },
  {
    "library": "django",
    "name": "upload_complete",
    "source_code": "def upload_complete(self):\n    pass",
    "docstring": "Signal that the upload is complete. Subclasses should perform cleanup that is necessary for this handler.",
    "type": "method",
    "file_path": "django\\django\\core\\files\\uploadhandler.py",
    "ast_data": "FunctionDef name:upload_complete arg:self arguments arg"
  },
  {
    "library": "pandas",
    "name": "_consolidate",
    "source_code": "def _consolidate(blocks: tuple[Block, ...]) -> tuple[Block, ...]:\n    gkey = lambda x: x._consolidate_key\n    grouper = itertools.groupby(sorted(blocks, key=gkey), gkey)\n    new_blocks: list[Block] = []\n    for (_can_consolidate, dtype), group_blocks in grouper:\n        merged_blocks, _ = _merge_blocks(list(group_blocks), dtype=dtype, can_consolidate=_can_consolidate)\n        new_blocks = extend_blocks(merged_blocks, new_blocks)\n    return tuple(new_blocks)",
    "docstring": "Merge blocks having same dtype, exclude non-consolidating blocks",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:_consolidate arg:blocks arguments arg Assign arguments arg Assign Call Call For Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "run_start_callbacks",
    "source_code": "def run_start_callbacks(self) -> None:\n    for callback in self.start_callbacks:\n        callback()",
    "docstring": "Execute all registered start callbacks.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\callback.py",
    "ast_data": "FunctionDef name:run_start_callbacks arg:self arguments arg For Call"
  },
  {
    "library": "scipy",
    "name": "__class_getitem__",
    "source_code": "@classmethod\ndef __class_getitem__(cls, arg, /):\n    from types import GenericAlias\n    return GenericAlias(cls, arg)",
    "docstring": "Return a parametrized wrapper around the type. .. versionadded:: 1.16.0 Returns ------- alias : types.GenericAlias A parametrized type. Examples -------- >>> import numpy as np >>> from scipy.sparse import coo_array >>> coo_array[np.int8, tuple[int]] scipy.sparse._coo.coo_array[numpy.int8, tuple[int]]",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_base.py",
    "ast_data": "FunctionDef name:__class_getitem__ arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_awaitable",
    "source_code": "def _awaitable(func, *args, **kwargs):\n    return torch._C._awaitable(func, *args, **kwargs)",
    "docstring": "Create Await object that will call specified functioni with specified args, when it is requested for the result.",
    "type": "function",
    "file_path": "pytorch\\torch\\jit\\_await.py",
    "ast_data": "FunctionDef name:_awaitable arg:func arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "state",
    "source_code": "@state.setter\ndef state(self, value):\n    self._state = value\n    event = self._get_state_event(value)\n    win32event.PulseEvent(event)",
    "docstring": "Set the bus state.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\win32.py",
    "ast_data": "FunctionDef name:state arg:self arg:value arguments arg arg Assign Assign Call Call"
  },
  {
    "library": "django",
    "name": "set_3d",
    "source_code": "def set_3d(self, value):\n    if value is True:\n        capi.set_3d(self.ptr, 1)\n    elif value is False:\n        capi.set_3d(self.ptr, 0)\n    else:\n        raise ValueError(f\"Input to 'set_3d' must be a boolean, got '{value!r}'.\")",
    "docstring": "Set if this geometry has Z coordinates.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:set_3d arg:self arg:value arguments arg arg If Compare Call If Compare Call Raise Call"
  },
  {
    "library": "seaborn",
    "name": "_default_discrete",
    "source_code": "def _default_discrete(self):\n    if self.univariate:\n        discrete = self.var_types[self.data_variable] == 'categorical'\n    else:\n        discrete_x = self.var_types['x'] == 'categorical'\n        discrete_y = self.var_types['y'] == 'categorical'\n        discrete = (discrete_x, discrete_y)\n    return discrete",
    "docstring": "Find default values for discrete hist estimation based on variable type.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\distributions.py",
    "ast_data": "FunctionDef name:_default_discrete arg:self arguments arg If Assign Compare Assign Compare Assign Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "record",
    "source_code": "def record(self, flat_outputs, inference_args, input_tangents):\n    backward_function, to_record = self._wrap_backward_function(self._forward_graph, self._backward, flat_outputs)\n    if self._forwardprop_output_indices:\n        record.record_operation_backprop_only(self._forward.cached_definition.signature.name, to_record, inference_args, backward_function)\n        record.record_operation_forwardprop_only(self._forward.cached_definition.signature.name, flat_outputs, inference_args + input_tangents, backward_function, self._forwardprop_output_indices)\n    else:\n        record.record_operation(self._forward.cached_definition.signature.name, to_record, inference_args + input_tangents, backward_function)",
    "docstring": "Record the function call operation. For backprop, indicates the backward function to use and which new Tensors must be watched. For forwardprop from eager, the function call itself will have produced tangents which need to be recorded. Args: flat_outputs: The result of running . inference_args: A flat list of Tensors with inference inputs to the operation. input_tangents: A flat list of Tensors with input tangents consumed by the operation.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py",
    "ast_data": "FunctionDef name:record arg:self arg:flat_outputs arg:inference_args arg:input_tangents arguments arg arg arg arg Assign Call If Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_vertical",
    "source_code": "def set_vertical(self, v):\n    self._vertical = v",
    "docstring": "Parameters ---------- v : list of :mod: sizes for vertical division",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_divider.py",
    "ast_data": "FunctionDef name:set_vertical arg:self arg:v arguments arg arg Assign"
  },
  {
    "library": "cryptography",
    "name": "rsa_crt_dmq1",
    "source_code": "def rsa_crt_dmq1(private_exponent: int, q: int) -> int:\n    return private_exponent % (q - 1)",
    "docstring": "Compute the CRT private_exponent % (q - 1) value from the RSA private_exponent (d) and q.",
    "type": "function",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\rsa.py",
    "ast_data": "FunctionDef name:rsa_crt_dmq1 arg:private_exponent arg:q arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "validate_destinations",
    "source_code": "def validate_destinations(destinations):\n    if not isinstance(destinations, (value_lib.DistributedValues, tensor_lib.Tensor, indexed_slices.IndexedSlices, ps_values.AggregatingVariable, six.string_types, tpu_values.TPUMirroredVariable)) and (not resource_variable_ops.is_resource_variable(destinations)):\n        raise ValueError('destinations must be one of a `DistributedValues` object, a tf.Variable object, or a device string.')\n    if not check_destinations(destinations):\n        raise ValueError('destinations can not be empty')",
    "docstring": "Validates the is one of expected types.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_ops.py",
    "ast_data": "FunctionDef name:validate_destinations arg:destinations arguments arg If BoolOp Call Call Raise Call If Call Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "set_markeredgecolor",
    "source_code": "def set_markeredgecolor(self, ec):\n    self._set_markercolor('markeredgecolor', True, ec)",
    "docstring": "Set the marker edge color. Parameters ---------- ec : :mpltype:",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:set_markeredgecolor arg:self arg:ec arguments arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "annotate",
    "source_code": "@compatibility(is_backward_compatible=False)\ndef annotate(val, type):\n    if isinstance(val, Proxy):\n        if val.node.type:\n            raise RuntimeError(f'Tried to annotate a value that already had a type on it! Existing type is {val.node.type} and new type is {type}. This could happen if you tried to annotate a function parameter value (in which case you should use the type slot on the function signature) or you called annotate on the same value twice')\n        else:\n            val.node.type = type\n        return val\n    else:\n        return val",
    "docstring": "Annotates a Proxy object with a given type. This function annotates a val with a given type if a type of the val is a torch.fx.Proxy object Args: val (object): An object to be annotated if its type is torch.fx.Proxy. type (object): A type to be assigned to a given proxy object as val. Returns: The given val. Raises: RuntimeError: If a val already has a type in its node.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\annotate.py",
    "ast_data": "FunctionDef name:annotate arg:val arg:type arguments arg arg If Call If Raise Call Assign Return return:yes Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "from_matrix",
    "source_code": "@classmethod\ndef from_matrix(cls, matrix: Tensor) -> 'Quaternion':\n    return cls(rotation_matrix_to_quaternion(matrix))",
    "docstring": "Create a quaternion from a rotation matrix. Args: matrix: the rotation matrix to convert of shape :math:. Example: >>> m = torch.eye(3)[None] >>> q = Quaternion.from_matrix(m) >>> q.data Parameter containing: tensor([[1., 0., 0., 0.]], requires_grad=True)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\quaternion.py",
    "ast_data": "FunctionDef name:from_matrix arg:cls arg:matrix arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_creso",
    "source_code": "@cache_readonly\ndef _creso(self) -> int:\n    return abbrev_to_npy_unit(self.unit)",
    "docstring": "The NPY_DATETIMEUNIT corresponding to this dtype's resolution.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py",
    "ast_data": "FunctionDef name:_creso arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "size",
    "source_code": "def size(self, name=None):\n    if name is None:\n        name = '%s_Size' % self._name\n    if self._queue_ref.dtype == _dtypes.resource:\n        return gen_data_flow_ops.queue_size_v2(self._queue_ref, name=name)\n    else:\n        return gen_data_flow_ops.queue_size(self._queue_ref, name=name)",
    "docstring": "Compute the number of elements in this queue. >>> q = tf.queue.FIFOQueue(capacity=10, dtypes=tf.int32) >>> q.enqueue_many(tf.constant([1, 2, 3, 4], dtype=tf.int32)) >>> q.size() Args: name: A name for the operation (optional). Returns: A scalar tensor containing the number of elements in this queue.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:size arg:self arg:name arguments arg arg If Compare Assign If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_get_body_column_widths",
    "source_code": "def _get_body_column_widths(self) -> Sequence[int]:\n    strcols: Sequence[Sequence[str]] = list(zip(*self.strrows))\n    return [max((len(x) for x in col)) for col in strcols]",
    "docstring": "Get widths of table content columns.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:_get_body_column_widths arg:self arguments arg Call Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "add_container",
    "source_code": "def add_container(self, container):\n    label = container.get_label()\n    if not label:\n        container.set_label('_container%d' % len(self.containers))\n    self.containers.append(container)\n    container._remove_method = self.containers.remove\n    return container",
    "docstring": "Add a to the Axes' containers; return the container.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:add_container arg:self arg:container arguments arg arg Assign Call If Call Call Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "AutoWidthChar",
    "source_code": "class AutoWidthChar(Hlist):\n\n    def __init__(self, c: str, width: float, state: ParserState, always: bool=False, char_class: type[Char]=Char):\n        alternatives = state.fontset.get_sized_alternatives_for_symbol(state.font, c)\n        state = state.copy()\n        for fontname, sym in alternatives:\n            state.font = fontname\n            char = char_class(sym, state)\n            if char.width >= width:\n                break\n        factor = width / char.width\n        state.fontsize *= factor\n        char = char_class(sym, state)\n        super().__init__([char])\n        self.width = char.width",
    "docstring": "A character as close to the given width as possible. When using a font with multiple width versions of some characters (such as the BaKoMa fonts), the correct glyph will be selected, otherwise this will always just return a scaled version of the glyph.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\_mathtext.py",
    "ast_data": "ClassDef name:AutoWidthChar FunctionDef name:__init__ arg:self arg:c arg:width arg:state arg:always arg:char_class arguments arg arg arg arg arg arg Assign Call Assign Call For Assign Assign Call If Compare Assign Assign Call Call Call Assign"
  },
  {
    "library": "scipy",
    "name": "roots_sh_legendre",
    "source_code": "def roots_sh_legendre(n, mu=False):\n    x, w = roots_legendre(n)\n    x = (x + 1) / 2\n    w /= 2\n    if mu:\n        return (x, w, 1.0)\n    else:\n        return (x, w)",
    "docstring": "Gauss-Legendre (shifted) quadrature. Compute the sample points and weights for Gauss-Legendre quadrature. The sample points are the roots of the nth degree shifted Legendre polynomial :math:. These sample points and weights correctly integrate polynomials of degree :math: or less over the interval :math: with weight function :math:. See 2.2.11 in [AS]_ for details. Parameters ---------- n : int quadrature order mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights See Also -------- scipy.integrate.fixed_quad References ---------- .. [AS] Milton Abramowitz and Irene A. Stegun, eds. Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables. New York: Dover, 1972.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_orthogonal.py",
    "ast_data": "FunctionDef name:roots_sh_legendre arg:n arg:mu arguments arg arg Assign Call Assign If Return return:yes Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_is_multitask",
    "source_code": "@abstractmethod\ndef _is_multitask(self):\n    pass",
    "docstring": "Bool indicating if class is meant for multidimensional target.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_coordinate_descent.py",
    "ast_data": "FunctionDef name:_is_multitask arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "write",
    "source_code": "def write(self, index, value, name=None):\n    del name\n    self._write(index, value)\n    return self.parent()",
    "docstring": "See TensorArray.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:write arg:self arg:index arg:value arg:name arguments arg arg arg arg Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_paginator",
    "source_code": "def get_paginator(self, *args, **kwargs):\n    return self.model_admin.get_paginator(self.request, *args, **kwargs)",
    "docstring": "Use the ModelAdmin's paginator.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\views\\autocomplete.py",
    "ast_data": "FunctionDef name:get_paginator arg:self arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "chars",
    "source_code": "def chars(self, num, truncate=None, html=False):\n    self._setup()\n    length = int(num)\n    if length <= 0:\n        return ''\n    text = unicodedata.normalize('NFC', self._wrapped)\n    if html:\n        parser = TruncateCharsHTMLParser(length=length, replacement=truncate)\n        parser.feed(text)\n        parser.close()\n        return parser.output\n    return self._text_chars(length, truncate, text)",
    "docstring": "Return the text truncated to be no longer than the specified number of characters. specifies what should be used to notify that the string has been truncated, defaulting to a translatable string of an ellipsis.",
    "type": "method",
    "file_path": "django\\django\\utils\\text.py",
    "ast_data": "FunctionDef name:chars arg:self arg:num arg:truncate arg:html arguments arg arg arg arg Call Assign Call If Compare Return return:yes Assign Call If Assign Call Call Call Return return:yes Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "on_source_read",
    "source_code": "def on_source_read(app: Sphinx, docname: str, content: list[str]) -> None:\n    app.env.current_document.reading_started_at = time.monotonic()",
    "docstring": "Start to measure reading duration.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\ext\\duration.py",
    "ast_data": "FunctionDef name:on_source_read arg:app arg:docname arg:content arguments arg arg arg Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, fetches, feed_dict, run_options, run_metadata, run_call_count, is_callable_runner=False):\n    self.fetches = fetches\n    self.feed_dict = feed_dict\n    self.run_options = run_options\n    self.run_metadata = run_metadata\n    self.run_call_count = run_call_count\n    self.is_callable_runner = is_callable_runner",
    "docstring": "Constructor of . Args: fetches: Fetch targets of the run() call. feed_dict: The feed dictionary to the run() call. run_options: RunOptions input to the run() call. run_metadata: RunMetadata input to the run() call. The above four arguments are identical to the input arguments to the run() method of a non-wrapped TensorFlow session. run_call_count: 1-based count of how many run calls (including this one) has been invoked. is_callable_runner: (bool) whether a runner returned by Session.make_callable is being run.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\framework.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:fetches arg:feed_dict arg:run_options arg:run_metadata arg:run_call_count arg:is_callable_runner arguments arg arg arg arg arg arg arg Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "scikit-learn",
    "name": "_compute_core_distances_",
    "source_code": "def _compute_core_distances_(X, neighbors, min_samples, working_memory):\n    n_samples = X.shape[0]\n    core_distances = np.empty(n_samples)\n    core_distances.fill(np.nan)\n    chunk_n_rows = get_chunk_n_rows(row_bytes=16 * min_samples, max_n_rows=n_samples, working_memory=working_memory)\n    slices = gen_batches(n_samples, chunk_n_rows)\n    for sl in slices:\n        core_distances[sl] = neighbors.kneighbors(X[sl], min_samples)[0][:, -1]\n    return core_distances",
    "docstring": "Compute the k-th nearest neighbor of each sample. Equivalent to neighbors.kneighbors(X, self.min_samples)[0][:, -1] but with more memory efficiency. Parameters ---------- X : array-like of shape (n_samples, n_features) The data. neighbors : NearestNeighbors instance The fitted nearest neighbors estimator. working_memory : int, default=None The sought maximum memory for temporary distance matrix chunks. When None (default), the value of `` is used. Returns ------- core_distances : ndarray of shape (n_samples,) Distance at which each sample becomes a core point. Points which will never be core have a distance of inf.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\cluster\\_optics.py",
    "ast_data": "FunctionDef name:_compute_core_distances_ arg:X arg:neighbors arg:min_samples arg:working_memory arguments arg arg arg arg Assign Assign Call Call Assign Call Assign Call For Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "tolist",
    "source_code": "def tolist(self, fill_value=None):\n    if fill_value is not None:\n        return self.filled(fill_value).tolist()\n    result = np.array(self.filled().tolist(), dtype=object)\n    mask = np.array(self._mask.tolist())\n    result[mask] = None\n    return result.tolist()",
    "docstring": "Return the data portion of the array as a list. Data items are converted to the nearest compatible Python type. Masked values are converted to fill_value. If fill_value is None, the corresponding entries in the output list will be ``.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\mrecords.py",
    "ast_data": "FunctionDef name:tolist arg:self arg:fill_value arguments arg arg If Compare Return return:yes Call Call Assign Call Call Call Assign Call Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "Reference",
    "source_code": "class Reference(_ObjectIdentityWrapper):\n    __slots__ = ()\n    unwrapped = property()\n\n    def deref(self):\n        return self._wrapped",
    "docstring": "Reference that refers an object.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\object_identity.py",
    "ast_data": "ClassDef name:Reference Assign Assign Call FunctionDef name:deref arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_resolve_args_by_export_type",
    "source_code": "def _resolve_args_by_export_type(arg_name, arg_value, operator_export_type):\n    return arg_value",
    "docstring": "Resolves the arguments that are ignored when export_type != operator_export_type.ONNX.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\utils.py",
    "ast_data": "FunctionDef name:_resolve_args_by_export_type arg:arg_name arg:arg_value arg:operator_export_type arguments arg arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "FakeOp",
    "source_code": "class FakeOp(object):\n\n    def __init__(self):\n        self._device = ''\n\n    @property\n    def type(self):\n        return 'FakeOp'\n\n    @property\n    def device(self):\n        return self._device\n\n    def _set_device(self, device):\n        if isinstance(device, pydev.DeviceSpec):\n            self._device = device.to_string()\n        else:\n            self._device = device\n\n    def _set_device_from_string(self, device_str):\n        self._device = device_str",
    "docstring": "A helper class to determine the current device. Supports only the type and device set/get methods needed to run the graph's _apply_device_function method.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_replication.py",
    "ast_data": "ClassDef name:FakeOp FunctionDef name:__init__ arg:self arguments arg Assign FunctionDef name:type arg:self arguments arg Return return:yes FunctionDef name:device arg:self arguments arg Return return:yes FunctionDef name:_set_device arg:self arg:device arguments arg arg If Call Assign Call Assign FunctionDef name:_set_device_from_string arg:self arg:device_str arguments arg arg Assign"
  },
  {
    "library": "scipy",
    "name": "_trimmed_stde_1D",
    "source_code": "def _trimmed_stde_1D(a, low_limit, up_limit, low_inclusive, up_inclusive):\n    n = a.count()\n    idx = a.argsort()\n    if low_limit:\n        if low_inclusive:\n            lowidx = int(low_limit * n)\n        else:\n            lowidx = np.round(low_limit * n)\n        a[idx[:lowidx]] = masked\n    if up_limit is not None:\n        if up_inclusive:\n            upidx = n - int(n * up_limit)\n        else:\n            upidx = n - np.round(n * up_limit)\n        a[idx[upidx:]] = masked\n    a[idx[:lowidx]] = a[idx[lowidx]]\n    a[idx[upidx:]] = a[idx[upidx - 1]]\n    winstd = a.std(ddof=1)\n    return winstd / ((1 - low_limit - up_limit) * np.sqrt(len(a)))",
    "docstring": "Returns the standard error of the trimmed mean for a 1D input data.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mstats_basic.py",
    "ast_data": "FunctionDef name:_trimmed_stde_1D arg:a arg:low_limit arg:up_limit arg:low_inclusive arg:up_inclusive arguments arg arg arg arg arg Assign Call Assign Call If If Assign Call Assign Call Assign If Compare If Assign Call Assign Call Assign Assign Assign Assign Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_generate_items",
    "source_code": "def _generate_items(self, items):\n    for item in items:\n        if not is_scalar_nan(item):\n            yield item\n            continue\n        if not hasattr(self, 'nan_count'):\n            self.nan_count = 0\n        self.nan_count += 1",
    "docstring": "Generate items without nans. Stores the nan counts separately.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_encode.py",
    "ast_data": "FunctionDef name:_generate_items arg:self arg:items arguments arg arg For If Call If Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "adjoint",
    "source_code": "@tf_export('linalg.adjoint')\n@dispatch.add_dispatch_support\ndef adjoint(matrix, name=None):\n    with ops.name_scope(name, 'adjoint', [matrix]):\n        matrix = ops.convert_to_tensor(matrix, name='matrix')\n        return array_ops.matrix_transpose(matrix, conjugate=True)",
    "docstring": "Transposes the last two dimensions of and conjugates tensor . For example: Args: matrix: A . Must be , , , , or with shape . name: A name to give this (optional). Returns: The adjoint (a.k.a. Hermitian transpose a.k.a. conjugate transpose) of matrix.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linalg_impl.py",
    "ast_data": "FunctionDef name:adjoint arg:matrix arg:name arguments arg arg With Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_argmin",
    "source_code": "def _argmin(a, keepdims=False, axis=None):\n    res = np.argmin(a, axis=axis)\n    if keepdims and axis is not None:\n        res = np.expand_dims(res, axis=axis)\n    return res",
    "docstring": "argmin with a parameter. See If axis is not None, a.shape[axis] must be greater than 0.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_util.py",
    "ast_data": "FunctionDef name:_argmin arg:a arg:keepdims arg:axis arguments arg arg arg Assign Call If BoolOp Compare Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_batch_lowrank_logdet",
    "source_code": "def _batch_lowrank_logdet(W, D, capacitance_tril):\n    return 2 * capacitance_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1) + D.log().sum(-1)",
    "docstring": "Uses \"matrix determinant lemma\":: log|W @ W.T + D| = log|C| + log|D|, where :math: is the capacitance matrix :math:, to compute the log determinant.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributions\\lowrank_multivariate_normal.py",
    "ast_data": "FunctionDef name:_batch_lowrank_logdet arg:W arg:D arg:capacitance_tril arguments arg arg arg Return return:yes Call Call Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "_terms",
    "source_code": "@property\ndef _terms(self) -> dict[str, tuple[str, str]]:\n    return self.data.setdefault('terms', {})",
    "docstring": ".. note:: Will be removed soon. internal use only.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\domains\\std\\__init__.py",
    "ast_data": "FunctionDef name:_terms arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "process_layer",
    "source_code": "def process_layer(layer_data):\n    layer_name = layer_data['name']\n    if layer_name in created_layers:\n        layer = created_layers[layer_name]\n    else:\n        from tensorflow.python.keras.layers import deserialize as deserialize_layer\n        layer = deserialize_layer(layer_data, custom_objects=custom_objects)\n        created_layers[layer_name] = layer\n    node_count_by_layer[layer] = int(_should_skip_first_node(layer))\n    inbound_nodes_data = layer_data['inbound_nodes']\n    inbound_nodes_data = tf_utils.convert_inner_node_data(inbound_nodes_data, wrap=True)\n    for node_data in inbound_nodes_data:\n        add_unprocessed_node(layer, node_data)",
    "docstring": "Deserializes a layer, then call it on appropriate inputs. Args: layer_data: layer config dict. Raises: ValueError: In case of improperly formatted dict.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\functional.py",
    "ast_data": "FunctionDef name:process_layer arg:layer_data arguments arg Assign If Compare Assign Assign Call Assign Assign Call Call Assign Assign Call For Call"
  },
  {
    "library": "numpy",
    "name": "allRoutineNames",
    "source_code": "def allRoutineNames(self):\n    return list(self.names_to_routines.keys())",
    "docstring": "Return the names of all the routines.",
    "type": "method",
    "file_path": "numpy\\numpy\\linalg\\lapack_lite\\make_lite.py",
    "ast_data": "FunctionDef name:allRoutineNames arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_cuda_generator_meta_val",
    "source_code": "def get_cuda_generator_meta_val(device_idx: int):\n    return torch.cuda.default_generators[device_idx].clone_state()",
    "docstring": "Get a generator value to use as a meta val newly cloned generator will not contain tensors. it is only Generators that are registered to a CUDAGraph that contain tensors. since this does not contain Tensor it is fine to use in the meta.",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\utils.py",
    "ast_data": "FunctionDef name:get_cuda_generator_meta_val arg:device_idx arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "__getitem__",
    "source_code": "def __getitem__(self, k):\n    if not isinstance(k, (int, slice)):\n        raise TypeError('QuerySet indices must be integers or slices, not %s.' % type(k).__name__)\n    if isinstance(k, int) and k < 0 or (isinstance(k, slice) and (k.start is not None and k.start < 0 or (k.stop is not None and k.stop < 0))):\n        raise ValueError('Negative indexing is not supported.')\n    if self._result_cache is not None:\n        return self._result_cache[k]\n    if isinstance(k, slice):\n        qs = self._chain()\n        if k.start is not None:\n            start = int(k.start)\n        else:\n            start = None\n        if k.stop is not None:\n            stop = int(k.stop)\n        else:\n            stop = None\n        qs.query.set_limits(start, stop)\n        return list(qs)[::k.step] if k.step else qs\n    qs = self._chain()\n    qs.query.set_limits(k, k + 1)\n    qs._fetch_all()\n    return qs._result_cache[0]",
    "docstring": "Retrieve an item or slice from the set of results.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:__getitem__ arg:self arg:k arguments arg arg If Call Raise Call Call If BoolOp BoolOp Call Compare BoolOp Call BoolOp BoolOp Compare Compare BoolOp Compare Compare Raise Call If Compare Return return:yes If Call Assign Call If Compare Assign Call Assign If Compare Assign Call Assign Call Return return:yes Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__gt__",
    "source_code": "def __gt__(self, other):\n    return str(self) > str(other)",
    "docstring": "Allows feature columns to be sorted in Python 3 as they are in Python 2. Feature columns need to occasionally be sortable, for example when used as keys in a features dictionary passed to a layer. is called when the \"other\" object being compared during the sort does not have defined. Example: ``` # __lt__ only class class A(): def __lt__(self, other): return str(self) str(other) b = B() b \".",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py",
    "ast_data": "FunctionDef name:__gt__ arg:self arg:other arguments arg arg Return return:yes Compare Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_check_boundary_response_method",
    "source_code": "def _check_boundary_response_method(estimator, response_method, class_of_interest):\n    has_classes = hasattr(estimator, 'classes_')\n    if has_classes and _is_arraylike_not_scalar(estimator.classes_[0]):\n        msg = 'Multi-label and multi-output multi-class classifiers are not supported'\n        raise ValueError(msg)\n    if response_method == 'auto':\n        if is_regressor(estimator):\n            prediction_method = 'predict'\n        else:\n            prediction_method = ['decision_function', 'predict_proba', 'predict']\n    else:\n        prediction_method = response_method\n    return prediction_method",
    "docstring": "Validate the response methods to be used with the fitted estimator. Parameters ---------- estimator : object Fitted estimator to check. response_method : {'auto', 'decision_function', 'predict_proba', 'predict'} Specifies whether to use :term:, :term:, :term: as the target response. If set to 'auto', the response method is tried in the before mentioned order. class_of_interest : int, float, bool, str or None The class considered when plotting the decision. Cannot be None if multiclass and is 'predict_proba' or 'decision_function'. .. versionadded:: 1.4 Returns ------- prediction_method : list of str or str The name or list of names of the response methods to use.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\inspection\\_plot\\decision_boundary.py",
    "ast_data": "FunctionDef name:_check_boundary_response_method arg:estimator arg:response_method arg:class_of_interest arguments arg arg arg Assign Call If BoolOp Call Assign Raise Call If Compare If Call Assign Assign Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "clear",
    "source_code": "def clear(self):\n    self._clear()\n    if self.axis is not None:\n        self.axis.clear()",
    "docstring": "Clear the current spine.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\spines.py",
    "ast_data": "FunctionDef name:clear arg:self arguments arg Call If Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "image",
    "source_code": "def image(name, tensor, bad_color=None, max_images=3, family=None, step=None):\n\n    def function(tag, scope):\n        bad_color_ = constant_op.constant([255, 0, 0, 255], dtype=dtypes.uint8) if bad_color is None else bad_color\n        return gen_summary_ops.write_image_summary(_summary_state.writer._resource, _choose_step(step), tag, array_ops.identity(tensor), bad_color_, max_images, name=scope)\n    return summary_writer_function(name, tensor, function, family=family)",
    "docstring": "Writes an image summary if possible.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "FunctionDef name:image arg:name arg:tensor arg:bad_color arg:max_images arg:family arg:step arguments arg arg arg arg arg arg FunctionDef name:function arg:tag arg:scope arguments arg arg Assign Compare Call Return return:yes Call Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "walk_to_end",
    "source_code": "def walk_to_end(ch, input_iter):\n    if ch == '(':\n        nesting = 1\n    else:\n        nesting = 0\n    for ch, escaped in input_iter:\n        if escaped:\n            continue\n        elif ch == '(':\n            nesting += 1\n        elif ch == ')':\n            if not nesting:\n                return\n            nesting -= 1",
    "docstring": "The iterator is currently inside a capturing group. Walk to the close of this group, skipping over any nested groups and handling escaped parentheses correctly.",
    "type": "function",
    "file_path": "django\\django\\utils\\regex_helper.py",
    "ast_data": "FunctionDef name:walk_to_end arg:ch arg:input_iter arguments arg arg If Compare Assign Assign For If If Compare If Compare If Return return:no"
  },
  {
    "library": "pytorch",
    "name": "unify_eq",
    "source_code": "def unify_eq(list_of_eq):\n    lhs, rhs = convert_eq(list_of_eq)\n    return unify(lhs, rhs)",
    "docstring": "Apply unification to a set of equality constraints",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\unify_refinements.py",
    "ast_data": "FunctionDef name:unify_eq arg:list_of_eq arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "axis0_safe_slice",
    "source_code": "def axis0_safe_slice(X, mask, len_mask):\n    if len_mask != 0:\n        return X[safe_mask(X, mask), :]\n    return np.zeros(shape=(0, X.shape[1]))",
    "docstring": "Return a mask which is safer to use on X than safe_mask. This mask is safer than safe_mask since it returns an empty array, when a sparse matrix is sliced with a boolean mask with all False, instead of raising an unhelpful error in older versions of SciPy. See: Also note that we can avoid doing the dot product by checking if the len_mask is not zero in _huber_loss_and_gradient but this is not going to be the bottleneck, since the number of outliers and non_outliers are typically non-zero and it makes the code tougher to follow. Parameters ---------- X : {array-like, sparse matrix} Data on which to apply mask. mask : ndarray Mask to be used on X. len_mask : int The length of the mask. Returns ------- mask : ndarray Array that is safe to use on X.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_mask.py",
    "ast_data": "FunctionDef name:axis0_safe_slice arg:X arg:mask arg:len_mask arguments arg arg arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "group_by_size",
    "source_code": "def group_by_size(input_tensors, bytes_per_pack):\n    if bytes_per_pack == 0:\n        return [input_tensors]\n    packs = []\n    last_pack_size = 0\n    for value in input_tensors:\n        num_elements = value.shape.num_elements()\n        if num_elements is None:\n            logging.warning('not packing values due to the unknown or inconsistent shape of %s', value)\n            return [input_tensors]\n        size = num_elements * value.dtype.size\n        if not packs or last_pack_size > bytes_per_pack:\n            packs.append([])\n            last_pack_size = 0\n        packs[-1].append(value)\n        last_pack_size += size\n    return packs",
    "docstring": "Groups into chunks of . The method preserves the original order of . The grouping is best effort, each pack could have more or less bytes than . It only groups values with known shape. Args: input_tensors: a list of Tensor. bytes_per_pack: an integer. Returns: A list of packs of Tensor. All values are grouped into one pack if is zero or any of the value has unknown shape.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_utils.py",
    "ast_data": "FunctionDef name:group_by_size arg:input_tensors arg:bytes_per_pack arguments arg arg If Compare Return return:yes Assign Assign For Assign Call If Compare Call Return return:yes Assign If BoolOp Compare Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ShardingStrategy",
    "source_code": "class ShardingStrategy(Enum):\n    FULL_SHARD = auto()\n    SHARD_GRAD_OP = auto()\n    NO_SHARD = auto()\n    HYBRID_SHARD = auto()\n    _HYBRID_SHARD_ZERO2 = auto()",
    "docstring": "This specifies the sharding strategy to be used for distributed training by :class:. - `DistributedDataParallel`, except this may provide even higher throughput since the unsharded parameters are not freed after the forward pass, saving the all-gathers in the pre-backward.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\api.py",
    "ast_data": "ClassDef name:ShardingStrategy Assign Call Assign Call Assign Call Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "set_clang_cuda_compiler_path",
    "source_code": "def set_clang_cuda_compiler_path(environ_cp):\n    default_clang_path = '/usr/lib/llvm-18/bin/clang'\n    if not os.path.exists(default_clang_path):\n        default_clang_path = '/usr/lib/llvm-17/bin/clang'\n        if not os.path.exists(default_clang_path):\n            default_clang_path = '/usr/lib/llvm-16/bin/clang'\n        if not os.path.exists(default_clang_path):\n            default_clang_path = shutil.which('clang') or ''\n    clang_cuda_compiler_path = prompt_loop_or_load_from_env(environ_cp, var_name='CLANG_CUDA_COMPILER_PATH', var_default=default_clang_path, ask_for_var='Please specify clang path that to be used as host compiler.', check_success=os.path.exists, resolve_symlinks=True, error_msg='Invalid clang path. %s cannot be found.')\n    environ_cp['CLANG_CUDA_COMPILER_PATH'] = clang_cuda_compiler_path\n    write_action_env_to_bazelrc('CLANG_CUDA_COMPILER_PATH', clang_cuda_compiler_path)\n    return clang_cuda_compiler_path",
    "docstring": "Set CLANG_CUDA_COMPILER_PATH.",
    "type": "function",
    "file_path": "tensorflow\\configure.py",
    "ast_data": "FunctionDef name:set_clang_cuda_compiler_path arg:environ_cp arguments arg Assign If Call Assign If Call Assign If Call Assign BoolOp Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "empty_cache",
    "source_code": "def empty_cache() -> None:\n    torch._C._mps_emptyCache()",
    "docstring": "Releases all unoccupied cached memory currently held by the caching allocator so that those can be used in other GPU applications.",
    "type": "function",
    "file_path": "pytorch\\torch\\mps\\__init__.py",
    "ast_data": "FunctionDef name:empty_cache arguments Call"
  },
  {
    "library": "matplotlib",
    "name": "_get_nth_label_width",
    "source_code": "def _get_nth_label_width(self, nth):\n    fig = self.axes.get_figure(root=False)\n    renderer = fig.get_figure(root=True)._get_renderer()\n    return Text(0, 0, self.get_text(self.labelLevelList[nth], self.labelFmt), figure=fig, fontproperties=self._label_font_props).get_window_extent(renderer).width",
    "docstring": "Return the width of the *nth* label, in pixels.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\contour.py",
    "ast_data": "FunctionDef name:_get_nth_label_width arg:self arg:nth arguments arg arg Assign Call Assign Call Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "graph_def",
    "source_code": "@property\ndef graph_def(self):\n    return self._graph.as_graph_def(add_shapes=self._add_shapes)",
    "docstring": "A serializable version of the underlying TensorFlow graph. Returns: A graph_pb2.GraphDef proto containing nodes for all of the Operations in the underlying TensorFlow graph.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\session.py",
    "ast_data": "FunctionDef name:graph_def arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "do_pending_operations",
    "source_code": "def do_pending_operations(self, model):\n    key = (model._meta.app_label, model._meta.model_name)\n    for function in self._pending_operations.pop(key, []):\n        function(model)",
    "docstring": "Take a newly-prepared model and pass it to each function waiting for it. This is called at the very end of Apps.register_model().",
    "type": "method",
    "file_path": "django\\django\\apps\\registry.py",
    "ast_data": "FunctionDef name:do_pending_operations arg:self arg:model arguments arg arg Assign For Call Call"
  },
  {
    "library": "pytorch",
    "name": "_propagate_qconfig_helper",
    "source_code": "def _propagate_qconfig_helper(module, qconfig_dict, qconfig_parent=None, prefix='', prepare_custom_config_dict=None):\n    module_qconfig = qconfig_dict.get(type_before_parametrizations(module), qconfig_parent)\n    module_qconfig = qconfig_dict.get(prefix, module_qconfig)\n    module_qconfig = getattr(module, 'qconfig', module_qconfig)\n    torch.ao.quantization.qconfig._assert_valid_qconfig(module_qconfig, module)\n    qconfig_with_device_check = _add_module_to_qconfig_obs_ctr(module_qconfig, module)\n    module.qconfig = qconfig_with_device_check\n    for name, child in module.named_children():\n        module_prefix = prefix + '.' + name if prefix else name\n        if prepare_custom_config_dict is None or not (name in prepare_custom_config_dict.get('non_traceable_module_name', []) or type(child) in prepare_custom_config_dict.get('non_traceable_module_class', [])):\n            _propagate_qconfig_helper(child, qconfig_dict, qconfig_with_device_check, module_prefix)",
    "docstring": "This is a helper function for Args: module: input module qconfig_dict: dictionary that maps from name of submodule to quantization configuration qconfig_parent: quantization config of parent module, we will fallback to this config when there is no specified config for current module prefix: corresponding prefix of the current module, used as key in qconfig_dict prepare_custom_config_dict: dictionary for custom handling of modules see docs for :func: Return: None, module is modified inplace with qconfig attached",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantize.py",
    "ast_data": "FunctionDef name:_propagate_qconfig_helper arg:module arg:qconfig_dict arg:qconfig_parent arg:prefix arg:prepare_custom_config_dict arguments arg arg arg arg arg Assign Call Call Assign Call Assign Call Call Assign Call Assign For Call Assign If BoolOp Compare BoolOp Compare Call Compare Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "FunctionDecoratedByContextlibContextManagerVariable",
    "source_code": "class FunctionDecoratedByContextlibContextManagerVariable(LocalGeneratorFunctionVariable):\n\n    def __init__(self, vt, **kwargs):\n        super().__init__(vt, generator_cls=ContextlibContextManagerLocalGeneratorObjectVariable, **kwargs)\n\n    def _build_inline_tracer(self, tx, args, kwargs):\n        tracer = super()._build_inline_tracer(tx, args, kwargs)\n        assert isinstance(tracer, torch._dynamo.symbolic_convert.InliningGeneratorInstructionTranslator)\n        tracer.is_generator_from_ctx_manager = True\n        return tracer",
    "docstring": ".. note:: This is only used when the function is annotated with @contextlib.contextmanager",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\functions.py",
    "ast_data": "ClassDef name:FunctionDecoratedByContextlibContextManagerVariable FunctionDef name:__init__ arg:self arg:vt arguments arg arg arg Call Call FunctionDef name:_build_inline_tracer arg:self arg:tx arg:args arg:kwargs arguments arg arg arg arg Assign Call Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_device_type",
    "source_code": "@staticmethod\ndef get_device_type() -> str:\n    return DefaultDeviceType._default_device_type",
    "docstring": "Get the current default device type for checkpointing. Returns: str: The current default device type.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\checkpoint.py",
    "ast_data": "FunctionDef name:get_device_type arguments Return return:yes"
  },
  {
    "library": "scipy",
    "name": "add_xi",
    "source_code": "def add_xi(self, xi, yi=None):\n    if yi is not None:\n        if self.yi is None:\n            raise ValueError('No previous yi value to update!')\n        yi = self._reshape_yi(yi, check=True)\n        self.yi = np.vstack((self.yi, yi))\n    elif self.yi is not None:\n        raise ValueError('No update to yi provided!')\n    old_n = self.n\n    self.xi = np.concatenate((self.xi, xi))\n    self.n = len(self.xi)\n    self.wi **= -1\n    old_wi = self.wi\n    self.wi = np.zeros(self.n)\n    self.wi[:old_n] = old_wi\n    for j in range(old_n, self.n):\n        self.wi[:j] *= self._inv_capacity * (self.xi[j] - self.xi[:j])\n        self.wi[j] = np.multiply.reduce(self._inv_capacity * (self.xi[:j] - self.xi[j]))\n    self.wi **= -1\n    self._diff_cij = None\n    self._diff_baryint = None",
    "docstring": "Add more x values to the set to be interpolated The barycentric interpolation algorithm allows easy updating by adding more points for the polynomial to pass through. Parameters ---------- xi : array_like The x coordinates of the points that the polynomial should pass through. yi : array_like, optional The y coordinates of the points the polynomial should pass through. Should have shape `yiyiadd_xi` are not randomly permuted so there is potential for numerical instability, especially for a large number of points. If this happens, please reconstruct interpolation from scratch instead.",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_polyint.py",
    "ast_data": "FunctionDef name:add_xi arg:self arg:xi arg:yi arguments arg arg arg If Compare If Compare Raise Call Assign Call Assign Call If Compare Raise Call Assign Assign Call Assign Call Assign Assign Call Assign For Call Assign Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_update_output_shapes",
    "source_code": "def _update_output_shapes(self, incoming_output_shapes: List[TensorShape]):\n    nest.assert_same_structure(self._output_shapes, incoming_output_shapes)\n    updated_output_shapes = []\n    for old_output_shape, incoming_output_shape in zip(self._output_shapes, incoming_output_shapes):\n        if old_output_shape:\n            updated_output_shapes.append(old_output_shape)\n        else:\n            updated_output_shapes.append(incoming_output_shape)\n    self._output_shapes = updated_output_shapes",
    "docstring": "Update the existing output shapes based on the new output shapes. The existing output shapes always have higher piority than the new incoming output shapes. Args: incoming_output_shapes: nested structure of TensorShape to override the existing output shapes.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2.py",
    "ast_data": "FunctionDef name:_update_output_shapes arg:self arg:incoming_output_shapes arguments arg arg Call Assign For Call If Call Call Assign"
  },
  {
    "library": "numpy",
    "name": "rindex",
    "source_code": "def rindex(self, sub, start=0, end=None):\n    return rindex(self, sub, start, end)",
    "docstring": "Like , but raises :exc: when the substring is not found. See Also -------- char.rindex",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:rindex arg:self arg:sub arg:start arg:end arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "socket_host",
    "source_code": "@property\ndef socket_host(self):\n    return self._socket_host",
    "docstring": "The hostname or IP address on which to listen for connections. Host values may be any IPv4 or IPv6 address, or any valid hostname. The string 'localhost' is a synonym for '127.0.0.1' (or '::1', if your hosts file prefers IPv6). The string '0.0.0.0' is a special IPv4 entry meaning \"any active interface\" (INADDR_ANY), and '::' is the similar IN6ADDR_ANY for IPv6. The empty string or None are not allowed.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpserver.py",
    "ast_data": "FunctionDef name:socket_host arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "wait_device_ops",
    "source_code": "def wait_device_ops(devices=None):\n    if devices is None:\n        devices = []\n    torch._C._lazy._wait_device_ops(devices=devices)",
    "docstring": "Waits for all the async operations on the given devices to complete. Args: devices (string..., optional): The devices whose async ops need to be waited for. If empty, all the local devices will be waited for.",
    "type": "function",
    "file_path": "pytorch\\torch\\_lazy\\__init__.py",
    "ast_data": "FunctionDef name:wait_device_ops arg:devices arguments arg If Compare Assign Call"
  },
  {
    "library": "pytorch",
    "name": "_is_scalar_list",
    "source_code": "def _is_scalar_list(x: _C.Value) -> bool:\n    x_type = _as_list_type(x.type())\n    if x_type is None:\n        return False\n    scalar_type = _type_utils.JitScalarType.from_value(x)\n    return scalar_type.onnx_compatible()",
    "docstring": "Checks if x is a scalar list, for example: List[float], List[int]. Besides checking the type is ListType, we also check if the data type is a valid ONNX data type.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\symbolic_helper.py",
    "ast_data": "FunctionDef name:_is_scalar_list arg:x arguments arg Assign Call Call If Compare Return return:yes Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_to_dense",
    "source_code": "def _to_dense(self):\n    if self.batch_shape.is_fully_defined():\n        batch_shape = self.batch_shape\n    else:\n        batch_shape = self.batch_shape_tensor()\n    dim_value = tensor_shape.dimension_value(self.domain_dimension)\n    if dim_value is not None:\n        n = dim_value\n    else:\n        n = self.domain_dimension_tensor()\n    eye = linalg_ops.eye(num_rows=n, batch_shape=batch_shape, dtype=self.dtype)\n    return self.matmul(eye)",
    "docstring": "Generic and often inefficient implementation. Override often.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "FunctionDef name:_to_dense arg:self arguments arg If Call Assign Assign Call Assign Call If Compare Assign Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "clear_error",
    "source_code": "def clear_error(self):\n    pywrap_tfe.TFE_ExecutorClearError(self._handle)",
    "docstring": "Clears errors raised in this executor during execution.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\executor.py",
    "ast_data": "FunctionDef name:clear_error arg:self arguments arg Call"
  },
  {
    "library": "matplotlib",
    "name": "close",
    "source_code": "def close(self):\n    if not self.file.closed:\n        self.file.close()",
    "docstring": "Close the underlying file if it is open.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\dviread.py",
    "ast_data": "FunctionDef name:close arg:self arguments arg If Call"
  },
  {
    "library": "tensorflow",
    "name": "OnRunStartRequest",
    "source_code": "class OnRunStartRequest:\n\n    def __init__(self, fetches, feed_dict, run_options, run_metadata, run_call_count, is_callable_runner=False):\n        self.fetches = fetches\n        self.feed_dict = feed_dict\n        self.run_options = run_options\n        self.run_metadata = run_metadata\n        self.run_call_count = run_call_count\n        self.is_callable_runner = is_callable_runner",
    "docstring": "Request to an on-run-start callback. This callback is invoked during a run() call of the debug-wrapper session, immediately after the run() call counter is incremented.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\framework.py",
    "ast_data": "ClassDef name:OnRunStartRequest FunctionDef name:__init__ arg:self arg:fetches arg:feed_dict arg:run_options arg:run_metadata arg:run_call_count arg:is_callable_runner arguments arg arg arg arg arg arg arg Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "django",
    "name": "get_connection",
    "source_code": "def get_connection(using=None):\n    if using is None:\n        using = DEFAULT_DB_ALIAS\n    return connections[using]",
    "docstring": "Get a database connection by name, or the default database connection if no name is provided. This is a private API.",
    "type": "function",
    "file_path": "django\\django\\db\\transaction.py",
    "ast_data": "FunctionDef name:get_connection arg:using arguments arg If Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_unique_graph",
    "source_code": "def get_unique_graph(tops, check_types=None, none_if_empty=False):\n    if isinstance(tops, ops.Graph):\n        return tops\n    if not is_iterable(tops):\n        raise TypeError('{} is not iterable'.format(type(tops)))\n    if check_types is None:\n        check_types = (ops.Operation, tensor_lib.Tensor)\n    elif not is_iterable(check_types):\n        check_types = (check_types,)\n    g = None\n    for op in tops:\n        if not isinstance(op, check_types):\n            raise TypeError('Expected a type in ({}), got: {}'.format(', '.join([str(t) for t in check_types]), type(op)))\n        if g is None:\n            g = op.graph\n        elif g._graph_key != op.graph._graph_key:\n            raise ValueError('Operation {} does not belong to given graph'.format(op))\n    if g is None and (not none_if_empty):\n        raise ValueError(\"Can't find the unique graph of an empty list\")\n    return g",
    "docstring": "Return the unique graph used by the all the elements in tops. Args: tops: iterable of elements to check (usually a list of tf.Operation and/or tf.Tensor). Or a tf.Graph. check_types: check that the element in tops are of given type(s). If None, the types (tf.Operation, tf.Tensor) are used. none_if_empty: don't raise an error if tops is an empty list, just return None. Returns: The unique graph used by all the tops. Raises: TypeError: if tops is not a iterable of tf.Operation. ValueError: if the graph is not unique.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\op_selector.py",
    "ast_data": "FunctionDef name:get_unique_graph arg:tops arg:check_types arg:none_if_empty arguments arg arg arg If Call Return return:yes If Call Raise Call Call Call If Compare Assign If Call Assign Assign For If Call Raise Call Call Call Call Call If Compare Assign If Compare Raise Call Call If BoolOp Compare Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_validate_quantized_input_stats",
    "source_code": "def _validate_quantized_input_stats(self, converter_kwargs, quant_mode):\n    quantized_types = frozenset({_dtypes.int8, _dtypes.uint8})\n    requires_quantized_input_stats = (converter_kwargs['inference_type'] in quantized_types or converter_kwargs['inference_input_type'] in quantized_types) and (not quant_mode.is_post_training_integer_quantization())\n    if requires_quantized_input_stats and (not converter_kwargs['quantized_input_stats']):\n        raise ValueError('The `quantized_input_stats` flag must be defined when either `inference_type` flag or `inference_input_type` flag is set to tf.int8 or tf.uint8. Currently, `inference_type={}` and `inference_input_type={}`.'.format(_get_tf_type_name(converter_kwargs['inference_type']), _get_tf_type_name(converter_kwargs['inference_input_type'])))",
    "docstring": "Ensure the flag is provided if required.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "FunctionDef name:_validate_quantized_input_stats arg:self arg:converter_kwargs arg:quant_mode arguments arg arg arg Assign Call Assign BoolOp BoolOp Compare Compare Call If BoolOp Raise Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "output_slot",
    "source_code": "@property\ndef output_slot(self):\n    return self._output_slot",
    "docstring": "Output slot index from which the tensor value was dumped. Returns: () output slot index watched by the debug op.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:output_slot arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_compute_oob_predictions",
    "source_code": "def _compute_oob_predictions(self, X, y):\n    if issparse(X):\n        X = X.tocsr()\n    n_samples = y.shape[0]\n    n_outputs = self.n_outputs_\n    if is_classifier(self) and hasattr(self, 'n_classes_'):\n        oob_pred_shape = (n_samples, self.n_classes_[0], n_outputs)\n    else:\n        oob_pred_shape = (n_samples, 1, n_outputs)\n    oob_pred = np.zeros(shape=oob_pred_shape, dtype=np.float64)\n    n_oob_pred = np.zeros((n_samples, n_outputs), dtype=np.int64)\n    n_samples_bootstrap = _get_n_samples_bootstrap(n_samples, self.max_samples)\n    for estimator in self.estimators_:\n        unsampled_indices = _generate_unsampled_indices(estimator.random_state, n_samples, n_samples_bootstrap)\n        y_pred = self._get_oob_predictions(estimator, X[unsampled_indices, :])\n        oob_pred[unsampled_indices, ...] += y_pred\n        n_oob_pred[unsampled_indices, :] += 1\n    for k in range(n_outputs):\n        if (n_oob_pred == 0).any():\n            warn('Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.', UserWarning)\n            n_oob_pred[n_oob_pred == 0] = 1\n        oob_pred[..., k] /= n_oob_pred[..., [k]]\n    return oob_pred",
    "docstring": "Compute and set the OOB score. Parameters ---------- X : array-like of shape (n_samples, n_features) The data matrix. y : ndarray of shape (n_samples, n_outputs) The target matrix. Returns ------- oob_pred : ndarray of shape (n_samples, n_classes, n_outputs) or (n_samples, 1, n_outputs) The OOB predictions.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_forest.py",
    "ast_data": "FunctionDef name:_compute_oob_predictions arg:self arg:X arg:y arguments arg arg arg If Call Assign Call Assign Assign If BoolOp Call Call Assign Assign Assign Call Assign Call Assign Call For Assign Call Assign Call For Call If Call Compare Call Assign Compare Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "dtype",
    "source_code": "def dtype(self, node: IRNode) -> Optional[str]:\n    if node is None:\n        return 'void'\n    return DTYPE_TO_CPP.get(node.get_layout().dtype)",
    "docstring": "Generates code which represents dtype of a given node.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\cuda_kernel.py",
    "ast_data": "FunctionDef name:dtype arg:self arg:node arguments arg arg If Compare Return return:yes Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "share_memory_",
    "source_code": "def share_memory_(self):\n    from torch.multiprocessing import get_sharing_strategy\n    if self.device.type in ['cuda', torch._C._get_privateuse1_backend_name()]:\n        pass\n    elif get_sharing_strategy() == 'file_system':\n        self._share_filename_cpu_()\n    else:\n        self._share_fd_cpu_()\n    return self",
    "docstring": "See :meth:",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:share_memory_ arg:self arguments arg If Compare Call If Compare Call Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "_compute_tensor_center",
    "source_code": "def _compute_tensor_center(tensor: Tensor) -> Tensor:\n    if not 2 <= len(tensor.shape) <= 4:\n        raise AssertionError(f'Must be a 3D tensor as HW, CHW and BCHW. Got {tensor.shape}.')\n    height, width = tensor.shape[-2:]\n    center_x: float = float(width - 1) / 2\n    center_y: float = float(height - 1) / 2\n    center: Tensor = torch.tensor([center_x, center_y], device=tensor.device, dtype=tensor.dtype)\n    return center",
    "docstring": "Compute the center of tensor plane for (H, W), (C, H, W) and (B, C, H, W).",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\transform\\affwarp.py",
    "ast_data": "FunctionDef name:_compute_tensor_center arg:tensor arguments arg If Compare Call Raise Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "insert",
    "source_code": "def insert(self, loc: int, item) -> MultiIndex:\n    item = self._validate_fill_value(item)\n    new_levels = []\n    new_codes = []\n    for k, level, level_codes in zip(item, self.levels, self.codes):\n        if k not in level:\n            lev_loc = len(level)\n            level = level.insert(lev_loc, k)\n            if isna(level[lev_loc]):\n                lev_loc = -1\n        else:\n            lev_loc = level.get_loc(k)\n        new_levels.append(level)\n        new_codes.append(np.insert(ensure_int64(level_codes), loc, lev_loc))\n    return MultiIndex(levels=new_levels, codes=new_codes, names=self.names, verify_integrity=False)",
    "docstring": "Make new MultiIndex inserting new item at location Parameters ---------- loc : int item : tuple Must be same length as number of levels in the MultiIndex Returns ------- new_index : Index",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "FunctionDef name:insert arg:self arg:loc arg:item arguments arg arg arg Assign Call Assign Assign For Call If Compare Assign Call Assign Call If Call Assign Assign Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "cancel",
    "source_code": "def cancel(self):\n    self.running = False",
    "docstring": "Set a task cancellation flag.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\plugins.py",
    "ast_data": "FunctionDef name:cancel arg:self arguments arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "_AssertAtLeast3DImage",
    "source_code": "def _AssertAtLeast3DImage(image):\n    return control_flow_ops.with_dependencies(_CheckAtLeast3DImage(image, require_static=False), image)",
    "docstring": "Assert that we are working with a properly shaped image. Performs the check statically if possible (i.e. if the shape is statically known). Otherwise adds a control dependency to an assert op that checks the dynamic shape. Args: image: >= 3-D Tensor of size [*, height, width, depth] Raises: ValueError: if image.shape is not a [>= 3] vector. Returns: If the shape of could be verified statically, is returned unchanged, otherwise there will be a control dependency added that asserts the correct dynamic shape.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:_AssertAtLeast3DImage arg:image arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "order_by",
    "source_code": "def order_by(self, attribute):\n    self._options['order_by'] = attribute\n    return self",
    "docstring": "Order the displayed profiler nodes based on a attribute. Supported attribute includes micros, bytes, occurrence, params, etc. Args: attribute: An attribute the profiler node has. Returns: self",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\option_builder.py",
    "ast_data": "FunctionDef name:order_by arg:self arg:attribute arguments arg arg Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "has_table",
    "source_code": "def has_table(self):\n    if self._has_table:\n        return True\n    with self.connection.cursor() as cursor:\n        tables = self.connection.introspection.table_names(cursor)\n    self._has_table = self.Migration._meta.db_table in tables\n    return self._has_table",
    "docstring": "Return True if the django_migrations table exists.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\recorder.py",
    "ast_data": "FunctionDef name:has_table arg:self arguments arg If Return return:yes With Call Assign Call Assign Compare Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_register_post_forward_hook",
    "source_code": "@no_type_check\ndef _register_post_forward_hook(state: _FSDPState, module: nn.Module) -> None:\n    for forward_handle in state._post_forward_handles:\n        forward_handle.remove()\n    state._post_forward_handles.clear()\n    module_param_handle = state._fully_sharded_module_to_handle.get(module, None)\n    hook = functools.partial(_post_forward, state, module_param_handle, _post_forward_reshard)\n    state._post_forward_handles.append(module.register_forward_hook(hook))",
    "docstring": "Registers a post-forward hook on ``. Even if the module has no handles, we should register the hook since it will register the module's pre-backward hook.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_runtime_utils.py",
    "ast_data": "FunctionDef name:_register_post_forward_hook arg:state arg:module arguments arg arg For Call Call Assign Call Assign Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_maybe_match_names",
    "source_code": "def _maybe_match_names(self, other):\n    if len(self.names) != len(other.names):\n        return [None] * len(self.names)\n    names = []\n    for a_name, b_name in zip(self.names, other.names):\n        if a_name == b_name:\n            names.append(a_name)\n        else:\n            names.append(None)\n    return names",
    "docstring": "Try to find common names to attach to the result of an operation between a and b. Return a consensus list of names if they match at least partly or list of None if they have completely different names.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "FunctionDef name:_maybe_match_names arg:self arg:other arguments arg arg If Compare Call Call Return return:yes Call Assign For Call If Compare Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_function",
    "source_code": "def get_function(name, entries):\n    contents = '\\nabsl::optional<tensorflow::gtl::FlatSet<int>> {name}(\\n    const tensorflow::string &op_name) {{\\n  static std::array<OpIndexInfo, {count}> a = {{{{\\n'.format(name=name, count=len(entries) + 1)\n    contents += '      '\n    contents += '\\n      '.join((entries[op_type] for op_type in sorted(entries)))\n    contents += '\\n      {\"VarHandleOp\"},'\n    contents += '\\n  }};\\n  static const auto &m = *OpGradientInfoInit(a);\\n\\n  auto it = m.find(op_name);\\n  if (it != m.end()) {\\n    return it->second;\\n  }\\n  return absl::nullopt;\\n}\\n'\n    return contents",
    "docstring": "Generates lookup function with given name and lookup table entries.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\gradient_input_output_exclusions.py",
    "ast_data": "FunctionDef name:get_function arg:name arg:entries arguments arg arg Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "pygame",
    "name": "inv_frac",
    "source_code": "def inv_frac(value):\n    return 1 - (value - floor(value))",
    "docstring": "return inverse fractional part of x",
    "type": "function",
    "file_path": "pygame\\src_py\\draw_py.py",
    "ast_data": "FunctionDef name:inv_frac arg:value arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_segments",
    "source_code": "def get_segments(self):\n    segments = []\n    for path in self._paths:\n        vertices = [vertex for vertex, _ in path.iter_segments(simplify=False)]\n        vertices = np.asarray(vertices)\n        segments.append(vertices)\n    return segments",
    "docstring": "Returns ------- list List of segments in the LineCollection. Each list item contains an array of vertices.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:get_segments arg:self arguments arg Assign For Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "log_data_ptr_mismatch",
    "source_code": "def log_data_ptr_mismatch(placeholders: Sequence[PlaceholderInfo], inputs: list[InputType], recorded_data_ptr: Sequence[Optional[int]], target_idxs: Sequence[int], mismatch: CheckInvariantStatus) -> str:\n    assert len(inputs) == len(recorded_data_ptr) and len(inputs) == len(placeholders), 'length mismatch between inputs, recorded_data_ptr, and placeholders'\n    t_tensors = [inputs[i] for i in target_idxs]\n    t_data_ptrs = [recorded_data_ptr[i] for i in target_idxs]\n    error_msg = f'{mismatch}.\\n'\n    for i, (tensor, data_ptr) in enumerate(zip(t_tensors, t_data_ptrs)):\n        assert isinstance(tensor, torch.Tensor)\n        index = target_idxs[i]\n        if tensor.data_ptr() != data_ptr:\n            placeholder = placeholders[index]\n            error_msg = f'{error_msg}input name: {placeholder.name}. data pointer changed from {data_ptr} to {tensor.data_ptr()}. input stack trace: {get_placeholder_stack_trace(placeholder)}\\n'\n    return error_msg",
    "docstring": "Logs the mismatch between input data pointers and recorded data pointers. This checks only idxs in target_idxs.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\cudagraph_utils.py",
    "ast_data": "FunctionDef name:log_data_ptr_mismatch arg:placeholders arg:inputs arg:recorded_data_ptr arg:target_idxs arg:mismatch arguments arg arg arg arg arg BoolOp Compare Call Call Compare Call Call Assign Assign Assign For Call Call Call Assign If Compare Call Assign Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "isframe",
    "source_code": "def isframe(object):\n    return _inspect.isframe(tf_decorator.unwrap(object)[1])",
    "docstring": "TFDecorator-aware replacement for inspect.ismodule.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_inspect.py",
    "ast_data": "FunctionDef name:isframe arg:object arguments arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "MatFile4Writer",
    "source_code": "class MatFile4Writer:\n\n    def __init__(self, file_stream, oned_as=None):\n        self.file_stream = file_stream\n        if oned_as is None:\n            oned_as = 'row'\n        self.oned_as = oned_as\n        self._matrix_writer = None\n\n    def put_variables(self, mdict, write_header=None):\n        self._matrix_writer = VarWriter4(self)\n        for name, var in mdict.items():\n            self._matrix_writer.write(var, name)",
    "docstring": "Class for writing matlab 4 format files",
    "type": "class",
    "file_path": "scipy\\scipy\\io\\matlab\\_mio4.py",
    "ast_data": "ClassDef name:MatFile4Writer FunctionDef name:__init__ arg:self arg:file_stream arg:oned_as arguments arg arg arg Assign If Compare Assign Assign Assign FunctionDef name:put_variables arg:self arg:mdict arg:write_header arguments arg arg arg Assign Call For Call Call"
  },
  {
    "library": "pytorch",
    "name": "_generate_parameter_and_buffer_placements",
    "source_code": "def _generate_parameter_and_buffer_placements(params_and_buffers: list[str], parallel_strategies: dict[str, ParallelStyle]) -> dict[str, Placement]:\n    parameter_placements: dict[str, Placement] = {}\n    for linear_fqn, parallel_style in parallel_strategies.items():\n        weight_fqn = f'{linear_fqn}.weight'\n        bias_fqn = f'{linear_fqn}.bias'\n        assert weight_fqn in params_and_buffers\n        parameter_placements[weight_fqn] = Shard(0) if parallel_style == ColwiseParallel else Shard(1)\n        if bias_fqn in params_and_buffers:\n            parameter_placements[bias_fqn] = Shard(0) if parallel_style == ColwiseParallel else Replicate()\n    return parameter_placements",
    "docstring": "Build parameter placements based on the give parallel style of linear layers.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\experimental\\_tp_transform.py",
    "ast_data": "FunctionDef name:_generate_parameter_and_buffer_placements arg:params_and_buffers arg:parallel_strategies arguments arg arg For Call Assign Assign Compare Assign Compare Call Call If Compare Assign Compare Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_to_string",
    "source_code": "def _to_string(dataset_id) -> str:\n    if isinstance(dataset_id, tensor.Tensor):\n        return dataset_id if dataset_id.dtype == dtypes.string else string_ops.as_string(dataset_id)\n    return dataset_id.decode() if isinstance(dataset_id, bytes) else str(dataset_id)",
    "docstring": "Converts to string.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\data_service_ops.py",
    "ast_data": "FunctionDef name:_to_string arg:dataset_id arguments arg If Call Return return:yes Compare Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_wrap_with_training_arg",
    "source_code": "def _maybe_wrap_with_training_arg(self, call_fn, match_layer_training_arg):\n    if not self.layer._expects_training_arg and self._expects_training_arg:\n        arg_spec = tf_inspect.getfullargspec(call_fn)\n        args = arg_spec.args + ['training']\n        defaults = list(arg_spec.defaults or [])\n        defaults.append(False)\n        new_arg_spec = tf_inspect.FullArgSpec(args=args, varargs=arg_spec.varargs, varkw=arg_spec.varkw, defaults=defaults, kwonlyargs=arg_spec.kwonlyargs, kwonlydefaults=arg_spec.kwonlydefaults, annotations=arg_spec.annotations)\n        self._training_arg_index = len(args) - 1\n        if tf_inspect.ismethod(call_fn):\n            self._training_arg_index -= 1\n\n        def wrap_with_training_arg(*args, **kwargs):\n            if match_layer_training_arg:\n                args = list(args)\n                kwargs = kwargs.copy()\n                utils.remove_training_arg(self._training_arg_index, args, kwargs)\n            return call_fn(*args, **kwargs)\n        return tf_decorator.make_decorator(target=call_fn, decorator_func=wrap_with_training_arg, decorator_argspec=new_arg_spec)\n    return call_fn",
    "docstring": "Wraps call function with added training argument if necessary.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\save_impl.py",
    "ast_data": "FunctionDef name:_maybe_wrap_with_training_arg arg:self arg:call_fn arg:match_layer_training_arg arguments arg arg arg If BoolOp Assign Call Assign Assign Call BoolOp Call Assign Call Assign Call If Call FunctionDef name:wrap_with_training_arg arguments arg arg If Assign Call Assign Call Call Return return:yes Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_add_object_graph_edges",
    "source_code": "def _add_object_graph_edges(self, proto, node_id):\n    obj = self._nodes[node_id]\n    setter = self._node_setters[node_id]\n    for reference in proto.children:\n        setter(obj, reference.local_name, self._nodes[reference.node_id])\n        if reference.local_name == '__call__' and (not callable(obj)):\n            setattr(type(obj), '__call__', _call_attribute)",
    "docstring": "Adds edges from an object to its children.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\load.py",
    "ast_data": "FunctionDef name:_add_object_graph_edges arg:self arg:proto arg:node_id arguments arg arg arg Assign Assign For Call If BoolOp Compare Call Call Call"
  },
  {
    "library": "pandas",
    "name": "max",
    "source_code": "@_period_dispatch\ndef max(self, *, axis: AxisInt | None=None, skipna: bool=True, **kwargs):\n    nv.validate_max((), kwargs)\n    nv.validate_minmax_axis(axis, self.ndim)\n    result = nanops.nanmax(self._ndarray, axis=axis, skipna=skipna)\n    return self._wrap_reduction_result(axis, result)",
    "docstring": "Return the maximum value of the Array or maximum along an axis. See Also -------- numpy.ndarray.max Index.max : Return the maximum value in an Index. Series.max : Return the maximum value in a Series.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py",
    "ast_data": "FunctionDef name:max arg:self arguments arg arg arg arg Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "set_dir",
    "source_code": "def set_dir(d: Union[str, os.PathLike]) -> None:\n    global _hub_dir\n    _hub_dir = os.path.expanduser(d)",
    "docstring": "Optionally set the Torch Hub directory used to save downloaded models & weights. Args: d (str): path to a local folder to save downloaded models & weights.",
    "type": "function",
    "file_path": "pytorch\\torch\\hub.py",
    "ast_data": "FunctionDef name:set_dir arg:d arguments arg Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, ax, x=None, y=None, A=None, *, cmap=None, norm=None, colorizer=None, **kwargs):\n    super().__init__(ax, norm=norm, cmap=cmap, colorizer=colorizer)\n    self._internal_update(kwargs)\n    if A is not None:\n        self.set_data(x, y, A)",
    "docstring": "Parameters ---------- ax : The Axes the image will belong to. x, y : 1D array-like, optional Monotonic arrays of length N+1 and M+1, respectively, specifying rectangle boundaries. If not given, will default to `~numpy.ndarray~matplotlib.colors.Colormapimage.cmap~matplotlib.colors.Normalize~matplotlib.artist.Artist` properties",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\image.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:ax arg:x arg:y arg:A arguments arg arg arg arg arg arg arg arg arg Call Call Call If Compare Call"
  },
  {
    "library": "scikit-learn",
    "name": "set_params",
    "source_code": "def set_params(self, **kwargs):\n    estimator = kwargs.pop('estimator', None)\n    if estimator is not None:\n        self.estimator = estimator\n    if kwargs:\n        raise ValueError('You cannot set parameters of the inner estimator in a frozen estimator since calling `fit` has no effect. You can use `frozenestimator.estimator.set_params` to set parameters of the inner estimator.')",
    "docstring": "Set the parameters of this estimator. The only valid key here is . You cannot set the parameters of the inner estimator. Parameters ---------- **kwargs : dict Estimator parameters. Returns ------- self : FrozenEstimator This estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\frozen\\_frozen.py",
    "ast_data": "FunctionDef name:set_params arg:self arguments arg arg Assign Call If Compare Assign If Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, forward, backward):\n    super().__init__()\n    self._forward = forward\n    self._backward = backward",
    "docstring": "Parameters ---------- forward, backward : callable The forward and backward transforms, taking ``.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\grid_finder.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:forward arg:backward arguments arg arg arg Call Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_tf_sorted",
    "source_code": "def _tf_sorted(iterable, key, reverse):\n    if reverse is py_builtins.UNSPECIFIED:\n        direction = 'ASCENDING'\n    else:\n        direction = 'DESCENDING'\n    if key is not py_builtins.UNSPECIFIED:\n        mapped = parallel_ops.vectorized_map(key, iterable)\n        if mapped.shape.rank is not None and mapped.shape.rank != 1:\n            raise ValueError('sort only supports only 1D tensors')\n        with ops.control_dependencies([check_ops.assert_rank_v2(mapped, 1, 'sort only supports only 1D tensors')]):\n            order = sort_ops.argsort(mapped, direction=direction)\n            return array_ops.gather_v2(iterable, order)\n    if iterable.shape.rank is not None and iterable.shape.rank != 1:\n        raise ValueError('sort only supports only 1D tensors')\n    with ops.control_dependencies([check_ops.assert_rank_v2(iterable, 1, 'sort only supports only 1D tensors')]):\n        return sort_ops.sort(iterable, direction=direction)",
    "docstring": "Overload of sorted_ for Tensor iterable.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\autograph_ops.py",
    "ast_data": "FunctionDef name:_tf_sorted arg:iterable arg:key arg:reverse arguments arg arg arg If Compare Assign Assign If Compare Assign Call If BoolOp Compare Compare Raise Call With Call Call Assign Call Return return:yes Call If BoolOp Compare Compare Raise Call With Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_normalize_json",
    "source_code": "def _normalize_json(data: Any, key_string: str, normalized_dict: dict[str, Any], separator: str) -> dict[str, Any]:\n    if isinstance(data, dict):\n        for key, value in data.items():\n            new_key = f'{key_string}{separator}{key}'\n            if not key_string:\n                new_key = new_key.removeprefix(separator)\n            _normalize_json(data=value, key_string=new_key, normalized_dict=normalized_dict, separator=separator)\n    else:\n        normalized_dict[key_string] = data\n    return normalized_dict",
    "docstring": "Main recursive function Designed for the most basic use case of pd.json_normalize(data) intended as a performance improvement, see #15621 Parameters ---------- data : Any Type dependent on types contained within nested Json key_string : str New key (with separator(s) in) for data normalized_dict : dict The new normalized/flattened Json dict separator : str, default '.' Nested records will generate names separated by sep, e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\json\\_normalize.py",
    "ast_data": "FunctionDef name:_normalize_json arg:data arg:key_string arg:normalized_dict arg:separator arguments arg arg arg arg If Call For Call Assign If Assign Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_traceback_filtering_enabled",
    "source_code": "@tf_export('debugging.is_traceback_filtering_enabled')\ndef is_traceback_filtering_enabled():\n    value = getattr(_ENABLE_TRACEBACK_FILTERING, 'value', True)\n    return value",
    "docstring": "Check whether traceback filtering is currently enabled. See also and . Note that filtering out internal frames from the tracebacks of exceptions raised by TensorFlow code is the default behavior. Returns: True if traceback filtering is enabled (e.g. if was called), and False otherwise (e.g. if was called).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\traceback_utils.py",
    "ast_data": "FunctionDef name:is_traceback_filtering_enabled arguments Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='categorical_hinge'):\n    super().__init__(categorical_hinge, name=name, reduction=reduction)",
    "docstring": "Initializes instance. Args: reduction: Type of to apply to loss. Default value is . indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to . When used with , outside of built-in training loops such as and , using or will raise an error. Please see this custom training [tutorial]( for more details. name: Optional name for the instance. Defaults to 'categorical_hinge'.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:reduction arg:name arguments arg arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "sample_weights_mismatch",
    "source_code": "def sample_weights_mismatch(self):\n    return self.sample_weight_mode is not None and self.sample_weight is None or (self.sample_weight_mode is None and self.sample_weight is not None)",
    "docstring": "Check if the sample weight and the mode match or not.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py",
    "ast_data": "FunctionDef name:sample_weights_mismatch arg:self arguments arg Return return:yes BoolOp BoolOp Compare Compare BoolOp Compare Compare"
  },
  {
    "library": "django",
    "name": "cached_property",
    "source_code": "class cached_property:\n    name = None\n\n    @staticmethod\n    def func(instance):\n        raise TypeError('Cannot use cached_property instance without calling __set_name__() on it.')\n\n    def __init__(self, func):\n        self.real_func = func\n        self.__doc__ = getattr(func, '__doc__')\n\n    def __set_name__(self, owner, name):\n        if self.name is None:\n            self.name = name\n            self.func = self.real_func\n        elif name != self.name:\n            raise TypeError('Cannot assign the same cached_property to two different names (%r and %r).' % (self.name, name))\n\n    def __get__(self, instance, cls=None):\n        if instance is None:\n            return self\n        res = instance.__dict__[self.name] = self.func(instance)\n        return res",
    "docstring": "Decorator that converts a method with a single self argument into a property cached on the instance. A cached property can be made out of an existing method: (e.g. ``).",
    "type": "class",
    "file_path": "django\\django\\utils\\functional.py",
    "ast_data": "ClassDef name:cached_property Assign FunctionDef name:func arg:instance arguments arg Raise Call FunctionDef name:__init__ arg:self arg:func arguments arg arg Assign Assign Call FunctionDef name:__set_name__ arg:self arg:owner arg:name arguments arg arg arg If Compare Assign Assign If Compare Raise Call FunctionDef name:__get__ arg:self arg:instance arg:cls arguments arg arg arg If Compare Return return:yes Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_putmask",
    "source_code": "def _putmask(self, mask: npt.NDArray[np.bool_], value) -> None:\n    if is_list_like(value):\n        val = value[mask]\n    else:\n        val = value\n    self[mask] = val",
    "docstring": "Analogue to np.putmask(self, mask, value) Parameters ---------- mask : np.ndarray[bool] value : scalar or listlike If listlike, must be arraylike with same length as self. Returns ------- None Notes ----- Unlike np.putmask, we do not repeat listlike values with mismatched length. 'value' should either be a scalar or an arraylike with the same length as self.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\base.py",
    "ast_data": "FunctionDef name:_putmask arg:self arg:mask arg:value arguments arg arg arg If Call Assign Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "closest",
    "source_code": "def closest(self, x, y):\n    if self.direction == 'horizontal':\n        p_pts = np.array([self.ax.transData.transform((p, 0))[0] for p in self.positions])\n        dist = abs(p_pts - x)\n    else:\n        p_pts = np.array([self.ax.transData.transform((0, p))[1] for p in self.positions])\n        dist = abs(p_pts - y)\n    index = np.argmin(dist)\n    return (index, dist[index])",
    "docstring": "Return index and pixel distance to closest handle. Parameters ---------- x, y : float x, y position from which the distance will be calculated to determinate the closest handle Returns ------- index, distance : index of the handle and its distance from position x, y",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:closest arg:self arg:x arg:y arguments arg arg arg If Compare Assign Call Call Assign Call Assign Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "remove_categories",
    "source_code": "def remove_categories(self, removals) -> Self:\n    from pandas import Index\n    if not is_list_like(removals):\n        removals = [removals]\n    removals = Index(removals).unique().dropna()\n    new_categories = self.dtype.categories.difference(removals, sort=False) if self.dtype.ordered is True else self.dtype.categories.difference(removals)\n    not_included = removals.difference(self.dtype.categories)\n    if len(not_included) != 0:\n        not_included = set(not_included)\n        raise ValueError(f'removals must all be in old categories: {not_included}')\n    return self.set_categories(new_categories, ordered=self.ordered, rename=False)",
    "docstring": "Remove the specified categories. The `` argument must be a subset of the current categories. Any values that were part of the removed categories will be set to NaN. Parameters ---------- removals : category or list of categories The categories which should be removed. Returns ------- Categorical Categorical with removed categories. Raises ------ ValueError If the removals are not contained in the categories See Also -------- rename_categories : Rename categories. reorder_categories : Reorder categories. add_categories : Add new categories. remove_unused_categories : Remove categories which are not used. set_categories : Set the categories to the specified ones. Examples -------- >>> c = pd.Categorical([\"a\", \"c\", \"b\", \"c\", \"d\"]) >>> c ['a', 'c', 'b', 'c', 'd'] Categories (4, object): ['a', 'b', 'c', 'd'] >>> c.remove_categories([\"d\", \"a\"]) [NaN, 'c', 'b', 'c', NaN] Categories (2, object): ['b', 'c']",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\categorical.py",
    "ast_data": "FunctionDef name:remove_categories arg:self arg:removals arguments arg arg If Call Assign Assign Call Call Call Assign Compare Call Call Assign Call If Compare Call Assign Call Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "non_deterministic_ints",
    "source_code": "def non_deterministic_ints(shape, dtype=dtypes.int64):\n    return gen_stateful_random_ops.non_deterministic_ints(shape=shape, dtype=dtype)",
    "docstring": "Non-deterministically generates some integers. This op may use some OS-provided source of non-determinism (e.g. an RNG), so each execution will give different results. Args: shape: the shape of the result. dtype: (optional) the dtype of the result. Returns: a tensor whose element values are non-deterministically chosen.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\stateful_random_ops.py",
    "ast_data": "FunctionDef name:non_deterministic_ints arg:shape arg:dtype arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "ConverterError",
    "source_code": "class ConverterError(Exception):\n\n    def __init__(self, message):\n        super(ConverterError, self).__init__(message)\n        self.errors = []\n        self._parse_error_message(message)\n\n    def append_error(self, error_data: converter_error_data_pb2.ConverterErrorData):\n        self.errors.append(error_data)\n\n    def _parse_error_message(self, message):\n        error_code_mapping = {'Failed to functionalize Control Flow V1 ops. Consider using Control Flow V2 ops instead. See https://www.tensorflow.org/api_docs/python/tf/compat/v1/enable_control_flow_v2.': converter_error_data_pb2.ConverterErrorData.ERROR_UNSUPPORTED_CONTROL_FLOW_V1}\n        for pattern, error_code in error_code_mapping.items():\n            if pattern in message:\n                error_data = converter_error_data_pb2.ConverterErrorData()\n                error_data.error_message = message\n                error_data.error_code = error_code\n                self.append_error(error_data)\n                return",
    "docstring": "Raised when an error occurs during model conversion.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\convert_phase.py",
    "ast_data": "ClassDef name:ConverterError FunctionDef name:__init__ arg:self arg:message arguments arg arg Call Call Assign Call FunctionDef name:append_error arg:self arg:error_data arguments arg arg Call FunctionDef name:_parse_error_message arg:self arg:message arguments arg arg Assign For Call If Compare Assign Call Assign Assign Call Return return:no"
  },
  {
    "library": "kornia",
    "name": "from_config",
    "source_code": "@staticmethod\n@abstractmethod\ndef from_config(config: ModelConfig) -> ModelBase[ModelConfig]:\n    raise NotImplementedError",
    "docstring": "Build/load the model. Args: config: The specifications for the model be build/loaded",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\models\\base.py",
    "ast_data": "FunctionDef name:from_config arg:config arguments arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "_resource_creator_scope",
    "source_code": "def _resource_creator_scope(self):\n    return None",
    "docstring": "Returns one or a list of ops.resource_creator_scope for some Strategy.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:_resource_creator_scope arg:self arguments arg Return return:no"
  },
  {
    "library": "django",
    "name": "escape_filter",
    "source_code": "@register.filter('escape', is_safe=True)\n@stringfilter\ndef escape_filter(value):\n    return conditional_escape(value)",
    "docstring": "Mark the value as a string that should be auto-escaped.",
    "type": "function",
    "file_path": "django\\django\\template\\defaultfilters.py",
    "ast_data": "FunctionDef name:escape_filter arg:value arguments arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_TransposedLinearOperator",
    "source_code": "class _TransposedLinearOperator(LinearOperator):\n\n    def __init__(self, A):\n        shape = (A.shape[1], A.shape[0])\n        super().__init__(dtype=A.dtype, shape=shape)\n        self.A = A\n        self.args = (A,)\n\n    def _matvec(self, x):\n        return np.conj(self.A._rmatvec(np.conj(x)))\n\n    def _rmatvec(self, x):\n        return np.conj(self.A._matvec(np.conj(x)))\n\n    def _matmat(self, x):\n        return np.conj(self.A._rmatmat(np.conj(x)))\n\n    def _rmatmat(self, x):\n        return np.conj(self.A._matmat(np.conj(x)))",
    "docstring": "Transposition of arbitrary Linear Operator",
    "type": "class",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_interface.py",
    "ast_data": "ClassDef name:_TransposedLinearOperator FunctionDef name:__init__ arg:self arg:A arguments arg arg Assign Call Call Assign Assign FunctionDef name:_matvec arg:self arg:x arguments arg arg Return return:yes Call Call Call FunctionDef name:_rmatvec arg:self arg:x arguments arg arg Return return:yes Call Call Call FunctionDef name:_matmat arg:self arg:x arguments arg arg Return return:yes Call Call Call FunctionDef name:_rmatmat arg:self arg:x arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "sanitize_arg_name",
    "source_code": "def sanitize_arg_name(name: str) -> str:\n    swapped = ''.join([c if c.isalnum() else '_' for c in name])\n    result = swapped if swapped[0].isalpha() else 'arg_' + swapped\n    global sanitization_warnings_given\n    if name != result and sanitization_warnings_given < MAX_SANITIZATION_WARNINGS:\n        logging.warning('`%s` is not a valid tf.function parameter name. Sanitizing to `%s`.', name, result)\n        sanitization_warnings_given += 1\n    return result",
    "docstring": "Sanitizes function argument names. Matches Python symbol naming rules. Without sanitization, names that are not legal Python parameter names can be set which makes it challenging to represent callables supporting the named calling capability. Args: name: The name to sanitize. Returns: A string that meets Python parameter conventions.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_type.py",
    "ast_data": "FunctionDef name:sanitize_arg_name arg:name arguments arg Assign Call Call Assign Call If BoolOp Compare Compare Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "setup",
    "source_code": "def setup(ax, title):\n    ax.yaxis.set_major_locator(ticker.NullLocator())\n    ax.spines[['left', 'right', 'top']].set_visible(False)\n    ax.xaxis.set_major_locator(ticker.MultipleLocator(1.0))\n    ax.xaxis.set_minor_locator(ticker.MultipleLocator(0.25))\n    ax.xaxis.set_ticks_position('bottom')\n    ax.tick_params(which='major', width=1.0, length=5)\n    ax.tick_params(which='minor', width=0.75, length=2.5, labelsize=10)\n    ax.set_xlim(0, 5)\n    ax.set_ylim(0, 1)\n    ax.text(0.0, 0.2, title, transform=ax.transAxes, fontsize=14, fontname='Monospace', color='tab:blue')",
    "docstring": "Set up common parameters for the Axes in the example.",
    "type": "function",
    "file_path": "matplotlib\\galleries\\users_explain\\axes\\axes_ticks.py",
    "ast_data": "FunctionDef name:setup arg:ax arg:title arguments arg arg Call Call Call Call Call Call Call Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_infer_fft_length_for_irfft",
    "source_code": "def _infer_fft_length_for_irfft(input_tensor, fft_rank):\n    fft_shape = input_tensor.get_shape()[-fft_rank:]\n    if not fft_shape.is_fully_defined():\n        fft_length = _array_ops_stack.unstack(_array_ops.shape(input_tensor)[-fft_rank:])\n        fft_length[-1] = _math_ops.maximum(0, 2 * (fft_length[-1] - 1))\n        return _array_ops_stack.stack(fft_length)\n    fft_length = fft_shape.as_list()\n    if fft_length:\n        fft_length[-1] = max(0, 2 * (fft_length[-1] - 1))\n    return _ops.convert_to_tensor(fft_length, _dtypes.int32)",
    "docstring": "Infers the argument for a IRFFT from .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\signal\\fft_ops.py",
    "ast_data": "FunctionDef name:_infer_fft_length_for_irfft arg:input_tensor arg:fft_rank arguments arg arg Assign Call If Call Assign Call Call Assign Call Return return:yes Call Assign Call If Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "get_iterator",
    "source_code": "def get_iterator(self, data: NDFrameT) -> Iterator[tuple[Hashable, NDFrameT]]:\n    splitter = self._get_splitter(data)\n    keys = self.result_index\n    yield from zip(keys, splitter)",
    "docstring": "Groupby iterator Returns ------- Generator yielding sequence of (name, subsetted object) for each group",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\groupby\\ops.py",
    "ast_data": "FunctionDef name:get_iterator arg:self arg:data arguments arg arg Assign Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "_module_handle",
    "source_code": "@no_type_check\ndef _module_handle(state: _FSDPState, module: nn.Module) -> Optional['FlatParamHandle']:\n    if _is_composable(state):\n        if state._handle is None:\n            return None\n        assert module in state._fully_sharded_module_to_handle, f'Expects a fully sharded module but got {module} on rank {state.rank}'\n        return state._fully_sharded_module_to_handle[module]\n    else:\n        return module._handle",
    "docstring": "Returns the ``.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_common_utils.py",
    "ast_data": "FunctionDef name:_module_handle arg:state arg:module arguments arg arg If Call If Compare Return return:no Compare Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_draw_text_as_path",
    "source_code": "def _draw_text_as_path(self, gc, x, y, s, prop, angle, ismath):\n    text2path = self._text2path\n    fontsize = self.points_to_pixels(prop.get_size_in_points())\n    verts, codes = text2path.get_text_path(prop, s, ismath=ismath)\n    path = Path(verts, codes)\n    if self.flipy():\n        width, height = self.get_canvas_width_height()\n        transform = Affine2D().scale(fontsize / text2path.FONT_SCALE).rotate_deg(angle).translate(x, height - y)\n    else:\n        transform = Affine2D().scale(fontsize / text2path.FONT_SCALE).rotate_deg(angle).translate(x, y)\n    color = gc.get_rgb()\n    gc.set_linewidth(0.0)\n    self.draw_path(gc, path, transform, rgbFace=color)",
    "docstring": "Draw the text by converting them to paths using . This private helper supports the same parameters as ; setting *ismath* to \"TeX\" triggers TeX rendering.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:_draw_text_as_path arg:self arg:gc arg:x arg:y arg:s arg:prop arg:angle arg:ismath arguments arg arg arg arg arg arg arg arg Assign Assign Call Call Assign Call Assign Call If Call Assign Call Assign Call Call Call Call Assign Call Call Call Call Assign Call Call Call"
  },
  {
    "library": "kornia",
    "name": "bilateral_blur",
    "source_code": "def bilateral_blur(input: Tensor, kernel_size: tuple[int, int] | int, sigma_color: float | Tensor, sigma_space: tuple[float, float] | Tensor, border_type: str='reflect', color_distance_type: str='l1') -> Tensor:\n    return _bilateral_blur(input, None, kernel_size, sigma_color, sigma_space, border_type, color_distance_type)",
    "docstring": "Blur a tensor using a Bilateral filter. .. image:: _static/img/bilateral_blur.png The operator is an edge-preserving image smoothing filter. The weight for each pixel in a neighborhood is determined not only by its distance to the center pixel, but also the difference in intensity or color. Arguments: input: the input tensor with shape :math:. kernel_size: the size of the kernel. sigma_color: the standard deviation for intensity/color Gaussian kernel. Smaller values preserve more edges. sigma_space: the standard deviation for spatial Gaussian kernel. This is similar to `gaussian_blur2d()(B, C, H, W)`. Examples: >>> input = torch.rand(2, 4, 5, 5) >>> output = bilateral_blur(input, (3, 3), 0.1, (1.5, 1.5)) >>> output.shape torch.Size([2, 4, 5, 5])",
    "type": "function",
    "file_path": "kornia\\kornia\\filters\\bilateral.py",
    "ast_data": "FunctionDef name:bilateral_blur arg:input arg:kernel_size arg:sigma_color arg:sigma_space arg:border_type arg:color_distance_type arguments arg arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "uninstall_repl_displayhook",
    "source_code": "def uninstall_repl_displayhook() -> None:\n    global _REPL_DISPLAYHOOK\n    if _REPL_DISPLAYHOOK is _ReplDisplayHook.IPYTHON:\n        from IPython import get_ipython\n        ip = get_ipython()\n        ip.events.unregister('post_execute', _draw_all_if_interactive)\n    _REPL_DISPLAYHOOK = _ReplDisplayHook.NONE",
    "docstring": "Disconnect from the display hook of the current shell.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:uninstall_repl_displayhook arguments If Compare Assign Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_create_definition_if_needed",
    "source_code": "def _create_definition_if_needed(self):\n    with context.graph_mode():\n        self._create_definition_if_needed_impl()",
    "docstring": "Creates the function definition if it's not created yet.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\function.py",
    "ast_data": "FunctionDef name:_create_definition_if_needed arg:self arguments arg With Call Call"
  },
  {
    "library": "scipy",
    "name": "n_th_moment",
    "source_code": "def n_th_moment(n, beta, m):\n    A = (m / beta) ** m * np.exp(-beta ** 2 / 2.0)\n    B = m / beta - beta\n    rhs = 2 ** ((n - 1) / 2.0) * sc.gamma((n + 1) / 2) * (1.0 + (-1) ** n * sc.gammainc((n + 1) / 2, beta ** 2 / 2))\n    lhs = np.zeros(rhs.shape)\n    for k in range(int(n) + 1):\n        lhs += sc.binom(n, k) * B ** (n - k) * (-1) ** k / (m - k - 1) * (m / beta) ** (-m + k + 1)\n    return A * lhs + rhs",
    "docstring": "Returns n-th moment. Defined only if n+1 < m Function cannot broadcast due to the loop over n",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_continuous_distns.py",
    "ast_data": "FunctionDef name:n_th_moment arg:n arg:beta arg:m arguments arg arg arg Assign Call Assign Assign Call Call Assign Call For Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_cardinality",
    "source_code": "def _cardinality(dataset):\n    if context.executing_eagerly():\n        with ops.device(dataset._variant_tensor.device):\n            return dataset.cardinality().numpy()\n    return cardinality_lib.UNKNOWN",
    "docstring": "Returns the cardinality of the dataset.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py",
    "ast_data": "FunctionDef name:_cardinality arg:dataset arguments arg If Call With Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "step",
    "source_code": "@_use_grad_for_differentiable\ndef step(self, closure=None):\n    self._cuda_graph_capture_health_check()\n    loss = None\n    if closure is not None:\n        with torch.enable_grad():\n            loss = closure()\n    for group in self.param_groups:\n        params_with_grad: list[Tensor] = []\n        grads: list[Tensor] = []\n        square_avgs: list[Tensor] = []\n        acc_deltas: list[Tensor] = []\n        state_steps: list[Tensor] = []\n        lr, rho, eps, weight_decay, foreach, maximize, differentiable, capturable = (group['lr'], group['rho'], group['eps'], group['weight_decay'], group['foreach'], group['maximize'], group['differentiable'], group['capturable'])\n        has_complex = self._init_group(group, params_with_grad, grads, square_avgs, acc_deltas, state_steps)\n        adadelta(params_with_grad, grads, square_avgs, acc_deltas, state_steps, lr=lr, rho=rho, eps=eps, weight_decay=weight_decay, foreach=foreach, maximize=maximize, differentiable=differentiable, capturable=capturable, has_complex=has_complex)\n    return loss",
    "docstring": "Perform a single optimization step. Args: closure (Callable, optional): A closure that reevaluates the model and returns the loss.",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\adadelta.py",
    "ast_data": "FunctionDef name:step arg:self arg:closure arguments arg arg Call Assign If Compare With Call Assign Call For Assign Assign Call Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "StreamLogger",
    "source_code": "class StreamLogger:\n\n    def __init__(self, logger: logging.Logger, log_level: int=logging.INFO):\n        self.logger: logging.Logger = logger\n        self.log_level: int = log_level\n        self.linebuf: str = ''\n\n    def write(self, buf: str) -> None:\n        for line in buf.rstrip().splitlines():\n            self.logger.log(self.log_level, line.rstrip())\n\n    def flush(self) -> None:\n        for h in self.logger.handlers:\n            h.flush()",
    "docstring": "Fake file-like stream object that redirects writes to a logger instance Taken from:",
    "type": "class",
    "file_path": "scrapy\\scrapy\\utils\\log.py",
    "ast_data": "ClassDef name:StreamLogger FunctionDef name:__init__ arg:self arg:logger arg:log_level arguments arg arg arg FunctionDef name:write arg:self arg:buf arguments arg arg For Call Call Call Call FunctionDef name:flush arg:self arguments arg For Call"
  },
  {
    "library": "django",
    "name": "__bool__",
    "source_code": "def __bool__(self):\n    return bool(self.children)",
    "docstring": "Return whether or not this node has children.",
    "type": "method",
    "file_path": "django\\django\\utils\\tree.py",
    "ast_data": "FunctionDef name:__bool__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "frame_dst",
    "source_code": "@property\ndef frame_dst(self) -> str:\n    return self._frame_dst",
    "docstring": "Name of the destination frame.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\pose.py",
    "ast_data": "FunctionDef name:frame_dst arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "Hardshrink",
    "source_code": "class Hardshrink(Module):\n    __constants__ = ['lambd']\n    lambd: float\n\n    def __init__(self, lambd: float=0.5) -> None:\n        super().__init__()\n        self.lambd = lambd\n\n    def forward(self, input: Tensor) -> Tensor:\n        return F.hardshrink(input, self.lambd)\n\n    def extra_repr(self) -> str:\n        return f'{self.lambd}'",
    "docstring": "Applies the Hard Shrinkage (Hardshrink) function element-wise. Hardshrink is defined as: .. math:: \\text{HardShrink}(x) = \\begin{cases} x, & \\text{ if } x > \\lambda \\\\ x, & \\text{ if } x >> m = nn.Hardshrink() >>> input = torch.randn(2) >>> output = m(input)",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\activation.py",
    "ast_data": "ClassDef name:Hardshrink Assign FunctionDef name:__init__ arg:self arg:lambd arguments arg arg Call Call Assign FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:extra_repr arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "eager_restore",
    "source_code": "def eager_restore(self, trackable):\n    assert context.executing_eagerly()\n    for saveable in self.globally_named_object_attributes(trackable):\n        restored_tensors = []\n        tensor_missing = False\n        for spec in saveable.specs:\n            if spec.name in self.dtype_map:\n                with ops.device('cpu:0'):\n                    restored, = io_ops.restore_v2(prefix=self.save_path, tensor_names=[spec.name], shape_and_slices=[''], dtypes=[self.dtype_map[spec.name]], name='%s_checkpoint_read' % (spec.name,))\n                restored_tensors.append(array_ops.identity(restored))\n            else:\n                tensor_missing = True\n        if tensor_missing:\n            self.unused_attributes.setdefault(trackable, []).append(saveable.name)\n        else:\n            saveable.restore(restored_tensors=restored_tensors, restored_shapes=None)",
    "docstring": "Runs restore ops for 's attributes.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:eager_restore arg:self arg:trackable arguments arg arg Call For Call Assign Assign For If Compare With Call Assign Call Call Call Assign If Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_fftconv_faster",
    "source_code": "def _fftconv_faster(x, h, mode):\n    fft_ops, direct_ops = _conv_ops(x.shape, h.shape, mode)\n    offset = -0.001 if x.ndim == 1 else -0.0001\n    constants = {'valid': (1.89095737e-09, 2.1364985e-10, offset), 'full': (1.764907e-09, 2.1414831e-10, offset), 'same': (3.2646654e-09, 2.8478277e-10, offset) if h.size <= x.size else (3.21635404e-09, 1.1773253e-08, -1e-05)} if x.ndim == 1 else {'valid': (1.85927e-09, 2.11242e-08, offset), 'full': (1.99817e-09, 1.66174e-08, offset), 'same': (2.04735e-09, 1.55367e-08, offset)}\n    O_fft, O_direct, O_offset = constants[mode]\n    return O_fft * fft_ops < O_direct * direct_ops + O_offset",
    "docstring": "See if using fftconvolve or convolve is faster. Parameters ---------- x : np.ndarray Signal h : np.ndarray Kernel mode : str Mode passed to convolve Returns ------- fft_faster : bool Notes ----- See docstring of for details on tuning hardware. See pull request 11031 for more detail:",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_signaltools.py",
    "ast_data": "FunctionDef name:_fftconv_faster arg:x arg:h arg:mode arguments arg arg arg Assign Call Assign Compare Assign Compare Compare Assign Return return:yes Compare"
  },
  {
    "library": "django",
    "name": "get_safe_request_meta",
    "source_code": "def get_safe_request_meta(self, request):\n    if not hasattr(request, 'META'):\n        return {}\n    return {k: self.cleanse_setting(k, v) for k, v in request.META.items()}",
    "docstring": "Return a dictionary of request.META with sensitive values redacted.",
    "type": "method",
    "file_path": "django\\django\\views\\debug.py",
    "ast_data": "FunctionDef name:get_safe_request_meta arg:self arg:request arguments arg arg If Call Return return:no Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_config",
    "source_code": "def get_config(self):\n    from tensorflow.python.feature_column.serialization import serialize_feature_column\n    config = dict(zip(self._fields, self))\n    config['categorical_column'] = serialize_feature_column(self.categorical_column)\n    return config",
    "docstring": "See 'FeatureColumn` base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:get_config arg:self arguments arg Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "to_float",
    "source_code": "@tf_export(v1=['to_float'])\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\n@deprecation.deprecated(date=None, instructions='Use `tf.cast` instead.')\ndef to_float(x, name='ToFloat'):\n    return cast(x, dtypes.float32, name=name)",
    "docstring": "Casts a tensor to type . Args: x: A or or . name: A name for the operation (optional). Returns: A or or with same shape as with type . Raises: TypeError: If cannot be cast to the . @compatibility(TF2) This name was deprecated and removed in TF2, but has an exact replacement . There are no further issues with eager execution or tf.function. Before: >>> tf.compat.v1.to_float(tf.constant(3.14, dtype=tf.double)) After: >>> tf.cast(tf.constant(3.14, dtype=tf.double), tf.float32) @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:to_float arg:x arg:name arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "numpy",
    "name": "_isurl",
    "source_code": "def _isurl(self, path):\n    from urllib.parse import urlparse\n    scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)\n    return bool(scheme and netloc)",
    "docstring": "Test if path is a net location. Tests the scheme and netloc.",
    "type": "method",
    "file_path": "numpy\\numpy\\lib\\_datasource.py",
    "ast_data": "FunctionDef name:_isurl arg:self arg:path arguments arg arg Assign Call Return return:yes Call BoolOp"
  },
  {
    "library": "django",
    "name": "exceeds_maximum_length_ratio",
    "source_code": "def exceeds_maximum_length_ratio(password, max_similarity, value):\n    pwd_len = len(password)\n    length_bound_similarity = max_similarity / 2 * pwd_len\n    value_len = len(value)\n    return pwd_len >= 10 * value_len and value_len < length_bound_similarity",
    "docstring": "Test that value is within a reasonable range of password. The following ratio calculations are based on testing SequenceMatcher like this: for i in range(0,6): print(10**i, SequenceMatcher(a='A', b='A'*(10**i)).quick_ratio()) which yields: 1 1.0 10 0.18181818181818182 100 0.019801980198019802 1000 0.001998001998001998 10000 0.00019998000199980003 100000 1.999980000199998e-05 This means a length_ratio of 10 should never yield a similarity higher than 0.2, for 100 this is down to 0.02 and for 1000 it is 0.002. This can be calculated via 2 / length_ratio. As a result we avoid the potentially expensive sequence matching.",
    "type": "function",
    "file_path": "django\\django\\contrib\\auth\\password_validation.py",
    "ast_data": "FunctionDef name:exceeds_maximum_length_ratio arg:password arg:max_similarity arg:value arguments arg arg arg Assign Call Assign Assign Call Return return:yes BoolOp Compare Compare"
  },
  {
    "library": "tensorflow",
    "name": "spec",
    "source_code": "@property\ndef spec(self):\n    full_shape_str = ' '.join(('%d' % d for d in self.full_shape)) + ' '\n    sl_spec = ':'.join(('%d,%d' % (o, s) for o, s in zip(self.var_offset, self.var_shape)))\n    return full_shape_str + sl_spec",
    "docstring": "Computes the spec string used for saving.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:spec arg:self arguments arg Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "has_leading_dir",
    "source_code": "def has_leading_dir(self, paths):\n    common_prefix = None\n    for path in paths:\n        prefix, rest = self.split_leading_dir(path)\n        if not prefix:\n            return False\n        elif common_prefix is None:\n            common_prefix = prefix\n        elif prefix != common_prefix:\n            return False\n    return True",
    "docstring": "Return True if all the paths have the same leading path name (i.e., everything is in one subdirectory in an archive).",
    "type": "method",
    "file_path": "django\\django\\utils\\archive.py",
    "ast_data": "FunctionDef name:has_leading_dir arg:self arg:paths arguments arg arg Assign For Assign Call If Return return:yes If Compare Assign If Compare Return return:yes Return return:yes"
  },
  {
    "library": "scipy",
    "name": "pre_order",
    "source_code": "def pre_order(self, func=lambda x: x.id):\n    n = self.count\n    curNode = [None] * (2 * n)\n    lvisited = set()\n    rvisited = set()\n    curNode[0] = self\n    k = 0\n    preorder = []\n    while k >= 0:\n        nd = curNode[k]\n        ndid = nd.id\n        if nd.is_leaf():\n            preorder.append(func(nd))\n            k = k - 1\n        elif ndid not in lvisited:\n            curNode[k + 1] = nd.left\n            lvisited.add(ndid)\n            k = k + 1\n        elif ndid not in rvisited:\n            curNode[k + 1] = nd.right\n            rvisited.add(ndid)\n            k = k + 1\n        else:\n            k = k - 1\n    return preorder",
    "docstring": "Perform pre-order traversal without recursive function calls. When a leaf node is first encountered, ``. If not provided, the index of the original observation to which the node corresponds is used. Returns ------- L : list The pre-order traversal.",
    "type": "method",
    "file_path": "scipy\\scipy\\cluster\\hierarchy.py",
    "ast_data": "FunctionDef name:pre_order arg:self arg:func arguments arg arg arguments arg Assign Assign Assign Call Assign Call Assign Assign Assign While Compare Assign Assign If Call Call Call Assign If Compare Assign Call Assign If Compare Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "cluster_resolver",
    "source_code": "@property\ndef cluster_resolver(self):\n    return self.extended._tpu_cluster_resolver",
    "docstring": "Returns the cluster resolver associated with this strategy. provides the associated . If the user provides one in , that instance is returned; if the user does not, a default is provided.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_strategy.py",
    "ast_data": "FunctionDef name:cluster_resolver arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "matrix_rank",
    "source_code": "@tf_export('linalg.matrix_rank')\n@dispatch.add_dispatch_support\ndef matrix_rank(a, tol=None, validate_args=False, name=None):\n    with ops.name_scope(name or 'matrix_rank'):\n        a = ops.convert_to_tensor(a, dtype_hint=dtypes.float32, name='a')\n        assertions = _maybe_validate_matrix(a, validate_args)\n        if assertions:\n            with ops.control_dependencies(assertions):\n                a = array_ops.identity(a)\n        s = svd(a, compute_uv=False)\n        if tol is None:\n            if a.shape[-2:].is_fully_defined():\n                m = np.max(a.shape[-2:].as_list())\n            else:\n                m = math_ops.reduce_max(array_ops.shape(a)[-2:])\n            eps = np.finfo(a.dtype.as_numpy_dtype).eps\n            tol = eps * math_ops.cast(m, a.dtype) * math_ops.reduce_max(s, axis=-1, keepdims=True)\n        return math_ops.reduce_sum(math_ops.cast(s > tol, dtypes.int32), axis=-1)",
    "docstring": "Compute the matrix rank of one or more matrices. Args: a: (Batch of) -like matrix-shaped (s) which are to be pseudo-inverted. tol: Threshold below which the singular value is counted as 'zero'. Default value: (i.e., ). validate_args: When , additional assertions might be embedded in the graph. Default value: (i.e., no graph assertions are added). name: Python prefixed to ops created by this function. Default value: 'matrix_rank'. Returns: matrix_rank: (Batch of) scalars representing the number of non-zero singular values.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linalg_impl.py",
    "ast_data": "FunctionDef name:matrix_rank arg:a arg:tol arg:validate_args arg:name arguments arg arg arg arg With Call BoolOp Assign Call Assign Call If With Call Assign Call Assign Call If Compare If Call Assign Call Call Assign Call Call Assign Call Assign Call Call Return return:yes Call Call Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "_convert_tf2_model",
    "source_code": "def _convert_tf2_model(flags):\n    if flags.saved_model_dir:\n        converter = lite.TFLiteConverterV2.from_saved_model(flags.saved_model_dir, signature_keys=_parse_array(flags.saved_model_signature_key), tags=_parse_set(flags.saved_model_tag_set))\n    elif flags.keras_model_file:\n        model = keras_deps.get_load_model_function()(flags.keras_model_file)\n        converter = lite.TFLiteConverterV2.from_keras_model(model)\n    converter.experimental_new_converter = flags.experimental_new_converter\n    if flags.experimental_new_quantizer is not None:\n        converter.experimental_new_quantizer = flags.experimental_new_quantizer\n    tflite_model = converter.convert()\n    with gfile.GFile(flags.output_file, 'wb') as f:\n        f.write(tflite_model)",
    "docstring": "Calls function to convert the TensorFlow 2.0 model into a TFLite model. Args: flags: argparse.Namespace object. Raises: ValueError: Unsupported file format.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\tflite_convert.py",
    "ast_data": "FunctionDef name:_convert_tf2_model arg:flags arguments arg If Assign Call Call Call If Assign Call Call Assign Call Assign If Compare Assign Assign Call With Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_share_qparams_ops_configs",
    "source_code": "def _get_share_qparams_ops_configs() -> list[BackendPatternConfig]:\n    observation_type = ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT\n    dtype_configs = [qnnpack_default_op_qint8_symmetric_dtype_config, executorch_default_op_quint8_dtype_config]\n    share_qparams_ops = [torch.nn.Flatten, F.adaptive_avg_pool2d, F.elu, F.hardtanh, F.max_pool2d, F.pad, F.relu, F.relu6, F.leaky_relu, F.leaky_relu_, torch.nn.AdaptiveAvgPool2d, torch.nn.ConstantPad2d, torch.nn.ELU, torch.nn.MaxPool2d, torch.nn.ReLU6, torch.nn.Hardtanh, torch.nn.LeakyReLU, torch.clamp, torch.flatten, torch.mean, torch.permute, torch.permute_copy, torch.squeeze, 'clamp', 'mean', 'permute', 'reshape', 'relu', 'relu_', 'squeeze', 'squeeze_', 'leaky_relu']\n    share_qparams_op_configs: list[BackendPatternConfig] = [BackendPatternConfig(op).set_observation_type(observation_type).set_dtype_configs(dtype_configs) for op in share_qparams_ops]\n    return share_qparams_op_configs",
    "docstring": "Return the operator configs for the operators that works for both float and quantized input if input is quantized, the output Tensor shares the same quantization parameter with input. Example operator: avgpool2d, reshape, transpose, maxpool2d Example observed operator: observer_0 - avgpool2d - observer_0 (same observer instance as input)",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\executorch.py",
    "ast_data": "FunctionDef name:_get_share_qparams_ops_configs arguments Assign Assign Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "run_feed_keys_info",
    "source_code": "@property\ndef run_feed_keys_info(self):\n    output = self._run_feed_keys_info\n    return output[0] if len(output) == 1 else output",
    "docstring": "Get a str representation of the feed_dict used in the Session.run() call. Returns: If the information is available from one call, a obtained from . If the information is available from multiple calls, a of obtained from . If the information is not available, .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:run_feed_keys_info arg:self arguments arg Assign Return return:yes Compare Call"
  },
  {
    "library": "pytorch",
    "name": "FxNetMinimizerBadModuleError",
    "source_code": "@compatibility(is_backward_compatible=False)\nclass FxNetMinimizerBadModuleError(Exception):\n    pass",
    "docstring": "Raised if failed to split out a minimize module",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\passes\\net_min_base.py",
    "ast_data": "ClassDef name:FxNetMinimizerBadModuleError Call"
  },
  {
    "library": "matplotlib",
    "name": "_set_view",
    "source_code": "def _set_view(self, view):\n    self.set(**view)",
    "docstring": "Apply a previously saved view. This method is called when restoring a view (with the return value of :meth: as argument), such as with the navigation buttons. Subclasses that override :meth: also need to override this method accordingly.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:_set_view arg:self arg:view arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_colocate_with",
    "source_code": "@contextlib.contextmanager\ndef _maybe_colocate_with(op: ops.Operation, gradient_uid, colocate_gradients_with_ops):\n    if colocate_gradients_with_ops:\n        with ops._colocate_with_for_gradient(op, gradient_uid):\n            yield\n    else:\n        yield",
    "docstring": "Context to colocate with if .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\gradients_util.py",
    "ast_data": "FunctionDef name:_maybe_colocate_with arg:op arg:gradient_uid arg:colocate_gradients_with_ops arguments arg arg arg If With Call"
  },
  {
    "library": "scikit-learn",
    "name": "biclusters_",
    "source_code": "@property\ndef biclusters_(self):\n    return (self.rows_, self.columns_)",
    "docstring": "Convenient way to get row and column indicators together. Returns the `` members.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\base.py",
    "ast_data": "FunctionDef name:biclusters_ arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "register_global_feedback",
    "source_code": "def register_global_feedback(self, input_nodes: list[Any], choices: list[ChoiceCaller]) -> None:\n    from torch._inductor.select_algorithm import add_feedback_saver, create_inputs_key, create_precompile_key\n\n    def store_global_feedback(ah_inputs_key: str, ah_precompile_key: str, timings: dict[ChoiceCaller, float], name: str, input_nodes: list[Any], choices: list[ChoiceCaller]) -> None:\n        current_inputs_key = create_inputs_key(input_nodes)\n        if current_inputs_key != ah_inputs_key:\n            return\n        current_precompile_key = create_precompile_key(name, current_inputs_key, choices)\n        if current_precompile_key != ah_precompile_key:\n            return\n        for choice, time in timings.items():\n            self.save_data(choice.autoheuristic_id(), time)\n    inputs_key = create_inputs_key(input_nodes)\n    precompile_key = create_precompile_key(self.name, inputs_key, choices)\n    feedback_saver = partial(store_global_feedback, inputs_key, precompile_key)\n    add_feedback_saver(feedback_saver)",
    "docstring": "Registers a callback in select_algorithm, which is called with the timing of each choice.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\autoheuristic\\autoheuristic.py",
    "ast_data": "FunctionDef name:register_global_feedback arg:self arg:input_nodes arg:choices arguments arg arg arg FunctionDef name:store_global_feedback arg:ah_inputs_key arg:ah_precompile_key arg:timings arg:name arg:input_nodes arg:choices arguments arg arg arg arg arg arg Assign Call If Compare Return return:no Assign Call If Compare Return return:no For Call Call Call Assign Call Assign Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "list_physical_devices",
    "source_code": "@tf_export('config.list_physical_devices', 'config.experimental.list_physical_devices')\n@deprecation.deprecated_endpoints('config.experimental.list_physical_devices')\ndef list_physical_devices(device_type=None):\n    return context.context().list_physical_devices(device_type)",
    "docstring": "Return a list of physical devices visible to the host runtime. Physical devices are hardware devices present on the host machine. By default all discovered CPU and GPU devices are considered visible. This API allows querying the physical hardware resources prior to runtime initialization. Thus, giving an opportunity to call any additional configuration APIs. This is in contrast to , which triggers runtime initialization in order to list the configured devices. The following example lists the number of visible GPUs on the host. >>> physical_devices = tf.config.list_physical_devices('GPU') >>> print(\"Num GPUs:\", len(physical_devices)) Num GPUs: ... However, the number of GPUs available to the runtime may change during runtime initialization due to marking certain devices as not visible or configuring multiple logical devices. Args: device_type: (optional string) Only include devices matching this device type. For example \"CPU\" or \"GPU\". Notes: 1. If provided with any numerical values or any string other than supported device type such as 'CPU' it returns an empty list instead of raising error. 2. For default value it returns all physical devices Returns: List of discovered objects",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\config.py",
    "ast_data": "FunctionDef name:list_physical_devices arg:device_type arguments arg Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "generate_chrome_trace_format",
    "source_code": "def generate_chrome_trace_format(self, show_dataflow: bool=True, show_memory: bool=False, op_time: str='schedule') -> str:\n    step_stats_analysis = self.analyze_step_stats(show_dataflow=show_dataflow, show_memory=show_memory, op_time=op_time)\n    return step_stats_analysis.chrome_trace.format_to_string(pretty=True)",
    "docstring": "Produces a trace in Chrome Trace Format. Args: show_dataflow: (Optional.) If True, add flow events to the trace connecting producers and consumers of tensors. show_memory: (Optional.) If True, add object snapshot events to the trace showing the sizes and lifetimes of tensors. op_time: (Optional.) How the execution time of op is shown in timeline. Possible values are \"schedule\", \"gpu\" and \"all\". \"schedule\" will show op from the time it is scheduled to the end of the scheduling. Notice by the end of its scheduling its async kernels may not start yet. It is shown using the default value from step_stats. \"gpu\" will show op with the execution time of its kernels on GPU. \"all\" will show op from the start of its scheduling to the end of its last kernel. Returns: A JSON formatted string in Chrome Trace format.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py",
    "ast_data": "FunctionDef name:generate_chrome_trace_format arg:self arg:show_dataflow arg:show_memory arg:op_time arguments arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_ell",
    "source_code": "def _ell(A, m):\n    if len(A.shape) != 2 or A.shape[0] != A.shape[1]:\n        raise ValueError('expected A to be like a square matrix')\n    c_i = {3: 100800.0, 5: 10059033600.0, 7: 4487938430976000.0, 9: 5.914384781877412e+21, 13: 1.1325077560602111e+35}\n    abs_c_recip = c_i[m]\n    u = 2 ** (-53)\n    A_abs_onenorm = _onenorm_matrix_power_nnm(abs(A), 2 * m + 1)\n    if not A_abs_onenorm:\n        return 0\n    alpha = A_abs_onenorm / (_onenorm(A) * abs_c_recip)\n    log2_alpha_div_u = np.log2(alpha / u)\n    value = int(np.ceil(log2_alpha_div_u / (2 * m)))\n    return max(value, 0)",
    "docstring": "A helper function for expm_2009. Parameters ---------- A : linear operator A linear operator whose norm of power we care about. m : int The power of the linear operator Returns ------- value : int A value related to a bound.",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_matfuncs.py",
    "ast_data": "FunctionDef name:_ell arg:A arg:m arguments arg arg If BoolOp Compare Call Compare Raise Call Assign Assign Assign Assign Call Call If Return return:yes Assign Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "NotebookFormatter",
    "source_code": "class NotebookFormatter(HTMLFormatter):\n\n    def _get_formatted_values(self) -> dict[int, list[str]]:\n        return {i: self.fmt.format_col(i) for i in range(self.ncols)}\n\n    def _get_columns_formatted_values(self) -> list[str]:\n        return self.columns._format_flat(include_name=False)\n\n    def write_style(self) -> None:\n        template_first = '            <style scoped>'\n        template_last = '            </style>'\n        template_select = '                .dataframe %s {\\n                    %s: %s;\\n                }'\n        element_props = [('tbody tr th:only-of-type', 'vertical-align', 'middle'), ('tbody tr th', 'vertical-align', 'top')]\n        if isinstance(self.columns, MultiIndex):\n            element_props.append(('thead tr th', 'text-align', 'left'))\n            if self.show_row_idx_names:\n                element_props.append(('thead tr:last-of-type th', 'text-align', 'right'))\n        else:\n            element_props.append(('thead th', 'text-align', 'right'))\n        template_mid = '\\n\\n'.join((template_select % t for t in element_props))\n        template = dedent(f'{template_first}\\n{template_mid}\\n{template_last}')\n        self.write(template)\n\n    def render(self) -> list[str]:\n        self.write('<div>')\n        self.write_style()\n        super().render()\n        self.write('</div>')\n        return self.elements",
    "docstring": "Internal class for formatting output data in html for display in Jupyter Notebooks. This class is intended for functionality specific to DataFrame._repr_html_() and DataFrame.to_html(notebook=True)",
    "type": "class",
    "file_path": "pandas\\pandas\\io\\formats\\html.py",
    "ast_data": "ClassDef name:NotebookFormatter FunctionDef name:_get_formatted_values arg:self arguments arg Return return:yes Call Call FunctionDef name:_get_columns_formatted_values arg:self arguments arg Return return:yes Call FunctionDef name:write_style arg:self arguments arg Assign Assign Assign Assign If Call Call If Call Call Assign Call Assign Call Call FunctionDef name:render arg:self arguments arg Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_flip",
    "source_code": "def _flip(image, flip_index, scope_name):\n    with ops.name_scope(None, scope_name, [image]):\n        image = ops.convert_to_tensor(image, name='image')\n        image = _AssertAtLeast3DImage(image)\n        shape = image.get_shape()\n\n        def f_rank3():\n            return fix_image_flip_shape(image, array_ops.reverse(image, [flip_index]))\n\n        def f_rank4():\n            return array_ops.reverse(image, [flip_index + 1])\n        if shape.ndims is None:\n            rank = array_ops.rank(image)\n            return tf_cond.cond(math_ops.equal(rank, 3), f_rank3, f_rank4)\n        elif shape.ndims == 3:\n            return f_rank3()\n        elif shape.ndims == 4:\n            return f_rank4()\n        else:\n            raise ValueError(\"'image' (shape %s)must have either 3 or 4 dimensions.\" % shape)",
    "docstring": "Flip an image either horizontally or vertically. Outputs the contents of flipped along the dimension . See also . Args: image: 4-D Tensor of shape or 3-D Tensor of shape . flip_index: 0 For vertical, 1 for horizontal. scope_name: string, scope name. Returns: A of the same type and shape as . Raises: ValueError: if the shape of not supported.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:_flip arg:image arg:flip_index arg:scope_name arguments arg arg arg With Call Assign Call Assign Call Assign Call FunctionDef name:f_rank3 arguments Return return:yes Call Call FunctionDef name:f_rank4 arguments Return return:yes Call If Compare Assign Call Return return:yes Call Call If Compare Return return:yes Call If Compare Return return:yes Call Raise Call"
  },
  {
    "library": "seaborn",
    "name": "default_range",
    "source_code": "@property\ndef default_range(self) -> tuple[float, float]:\n    base = mpl.rcParams['patch.linewidth']\n    return (base * 0.5, base * 2)",
    "docstring": "Min and max values used by default for semantic mapping.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\properties.py",
    "ast_data": "FunctionDef name:default_range arg:self arguments arg Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "warp",
    "source_code": "def warp(self, ds_input, resampling='NearestNeighbour', max_error=0.0):\n    ds_input.setdefault('width', self.width)\n    ds_input.setdefault('height', self.height)\n    ds_input.setdefault('srid', self.srs.srid)\n    ds_input.setdefault('origin', self.origin)\n    ds_input.setdefault('scale', self.scale)\n    ds_input.setdefault('skew', self.skew)\n    ds_input.setdefault('driver', self.driver.name)\n    if 'name' not in ds_input:\n        ds_input['name'] = self.name + '_copy.' + self.driver.name\n    if 'datatype' not in ds_input:\n        ds_input['datatype'] = self.bands[0].datatype()\n    ds_input['bands'] = [{'nodata_value': bnd.nodata_value} for bnd in self.bands]\n    target = GDALRaster(ds_input, write=True)\n    algorithm = GDAL_RESAMPLE_ALGORITHMS[resampling]\n    capi.reproject_image(self._ptr, self.srs.wkt.encode(), target._ptr, target.srs.wkt.encode(), algorithm, 0.0, max_error, c_void_p(), c_void_p(), c_void_p())\n    target._flush()\n    return target",
    "docstring": "Return a warped GDALRaster with the given input characteristics. The input is expected to be a dictionary containing the parameters of the target raster. Allowed values are width, height, SRID, origin, scale, skew, datatype, driver, and name (filename). By default, the warp functions keeps all parameters equal to the values of the original source raster. For the name of the target raster, the name of the source raster will be used and appended with _copy. + source_driver_name. In addition, the resampling algorithm can be specified with the \"resampling\" input parameter. The default is NearestNeighbor. For a list of all options consult the GDAL_RESAMPLE_ALGORITHMS constant.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\raster\\source.py",
    "ast_data": "FunctionDef name:warp arg:self arg:ds_input arg:resampling arg:max_error arguments arg arg arg arg Call Call Call Call Call Call Call If Compare Assign If Compare Assign Call Assign Assign Call Assign Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "LineIterator",
    "source_code": "class LineIterator:\n\n    def __init__(self, iterable):\n        object.__init__(self)\n        self.iterable = iter(iterable)\n        self.lineno = 0\n\n    def __iter__(self):\n        return self\n\n    def __next__(self):\n        self.lineno += 1\n        line = next(self.iterable)\n        line = line.rstrip()\n        return line\n    next = __next__",
    "docstring": "LineIterator(iterable) Return rstrip()'d lines from iterable, while keeping a count of the line number in the .lineno attribute.",
    "type": "class",
    "file_path": "numpy\\numpy\\linalg\\lapack_lite\\fortran.py",
    "ast_data": "ClassDef name:LineIterator FunctionDef name:__init__ arg:self arg:iterable arguments arg arg Call Assign Call Assign FunctionDef name:__iter__ arg:self arguments arg Return return:yes FunctionDef name:__next__ arg:self arguments arg Assign Call Assign Call Return return:yes Assign"
  },
  {
    "library": "scrapy",
    "name": "_adjust_delay",
    "source_code": "def _adjust_delay(self, slot: Slot, latency: float, response: Response) -> None:\n    target_delay = latency / self.target_concurrency\n    new_delay = (slot.delay + target_delay) / 2.0\n    new_delay = max(target_delay, new_delay)\n    new_delay = min(max(self.mindelay, new_delay), self.maxdelay)\n    if response.status != 200 and new_delay <= slot.delay:\n        return\n    slot.delay = new_delay",
    "docstring": "Define delay adjustment policy",
    "type": "method",
    "file_path": "scrapy\\scrapy\\extensions\\throttle.py",
    "ast_data": "FunctionDef name:_adjust_delay arg:self arg:slot arg:latency arg:response arguments arg arg arg arg Assign Assign Assign Call Assign Call Call If BoolOp Compare Compare Return return:no Assign"
  },
  {
    "library": "matplotlib",
    "name": "get_width",
    "source_code": "def get_width(self):\n    return self._width",
    "docstring": "Return the width (thickness) of the annulus ring.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_width arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_ProcessOutputTensor",
    "source_code": "def _ProcessOutputTensor(self, val):\n    real_val = val\n    if val.name not in self._values:\n        self._values.add(val.name)\n        if self._outer_context:\n            real_val = self._outer_context.AddValue(val)\n            self._values.add(real_val.name)\n            self._external_values[real_val.name] = real_val\n        real_val = _SwitchRefOrTensor(real_val, self._pred)[self._branch]\n        self._external_values[val.name] = real_val\n    else:\n        external_val = self._external_values.get(val.name)\n        if external_val is not None:\n            real_val = external_val\n    return real_val",
    "docstring": "Process an output tensor of a conditional branch.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:_ProcessOutputTensor arg:self arg:val arguments arg arg Assign If Compare Call If Assign Call Call Assign Assign Call Assign Assign Call If Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, name, **kwargs):\n    if enabled:\n        self._traceme = _pywrap_traceme.TraceMe(name, **kwargs)\n    else:\n        self._traceme = None",
    "docstring": "Creates a trace event in the profiler. Args: name: The name of the trace event. **kwargs: Keyword arguments added to the trace event. Both the key and value are of types that can be converted to strings, which will be interpreted by the profiler according to the traceme name. Example usage: The example above uses the keyword argument \"step_num\" to specify the training step being traced.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\trace.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:name arguments arg arg arg If Assign Call Assign"
  },
  {
    "library": "scipy",
    "name": "get_boundaries_intersections",
    "source_code": "def get_boundaries_intersections(self, z, d, trust_radius):\n    a = np.dot(d, d)\n    b = 2 * np.dot(z, d)\n    c = np.dot(z, z) - trust_radius ** 2\n    sqrt_discriminant = math.sqrt(b * b - 4 * a * c)\n    aux = b + math.copysign(sqrt_discriminant, b)\n    ta = -aux / (2 * a)\n    tb = -2 * c / aux\n    return sorted([ta, tb])",
    "docstring": "Solve the scalar quadratic equation ``. This is like a line-sphere intersection. Return the two values of t, sorted from low to high.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_trustregion.py",
    "ast_data": "FunctionDef name:get_boundaries_intersections arg:self arg:z arg:d arg:trust_radius arguments arg arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Assign Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "default_filename_fcn",
    "source_code": "def default_filename_fcn(epoch: Union[str, int], metric: Union[str, float]) -> str:\n    return f'model_epoch={epoch}_metricValue={metric}.pt'",
    "docstring": "Generate the filename in the model checkpoint.",
    "type": "function",
    "file_path": "kornia\\kornia\\x\\callbacks.py",
    "ast_data": "FunctionDef name:default_filename_fcn arg:epoch arg:metric arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_validate_children_inputs_mappings",
    "source_code": "def _validate_children_inputs_mappings(self, children_inputs_mappings):\n    assert isinstance(children_inputs_mappings, dict)\n    assert 'parent_first_child_input' in children_inputs_mappings\n    assert 'parent_last_child_output' in children_inputs_mappings\n    assert 'internal_children_input_output' in children_inputs_mappings\n\n    def assert_dictlist_has_keys(dictlist, keys):\n        for dikt in dictlist:\n            assert isinstance(dikt, dict)\n            for key in keys:\n                assert key in dikt\n    assert_dictlist_has_keys(children_inputs_mappings['parent_first_child_input'], ['parent_ophint_input_index', 'first_child_ophint_input_index'])\n    assert_dictlist_has_keys(children_inputs_mappings['parent_last_child_output'], ['parent_output_index', 'child_output_index'])\n    assert_dictlist_has_keys(children_inputs_mappings['internal_children_input_output'], ['child_input_index', 'child_output_index'])",
    "docstring": "Validate children inputs mappings is in the right format. Args: children_inputs_mappings: the Children ophint inputs/outputs mapping.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\op_hint.py",
    "ast_data": "FunctionDef name:_validate_children_inputs_mappings arg:self arg:children_inputs_mappings arguments arg arg Call Compare Compare Compare FunctionDef name:assert_dictlist_has_keys arg:dictlist arg:keys arguments arg arg For Call For Compare Call Call Call"
  },
  {
    "library": "scipy",
    "name": "time_cdist",
    "source_code": "def time_cdist(self, num_points, metric):\n    distance.cdist(self.points, self.points, self.metric, w=self.weights, **self.kwargs)",
    "docstring": "Time scipy.spatial.distance.cdist for weighted distance metrics.",
    "type": "method",
    "file_path": "scipy\\benchmarks\\benchmarks\\spatial.py",
    "ast_data": "FunctionDef name:time_cdist arg:self arg:num_points arg:metric arguments arg arg arg Call"
  },
  {
    "library": "pandas",
    "name": "external_values",
    "source_code": "def external_values(self):\n    return self._block.external_values()",
    "docstring": "The array that Series.values returns",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:external_values arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_instance_key",
    "source_code": "def get_instance_key(self, group_key, device):\n    with self._lock:\n        group = self._instance_key_table.get(group_key, None)\n        if group is None:\n            raise ValueError(f'Group {group_key} is not found.')\n        if device not in group:\n            raise ValueError(f'Device {device} is not present in group {group_key}')\n        v = group[device]\n        group[device] += 1\n        return v",
    "docstring": "Returns a new instance key for use in defining a collective op. You should call this once per each collective op of a collective instance. Args: group_key: the group key returned by get_group_key(). You should not assign the group key yourself. device: a canonical device string. It should be the device this collective op is on. Returns: a new instance key. Raises: ValueError: when the group key is invalid or the device is not in the group.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_utils.py",
    "ast_data": "FunctionDef name:get_instance_key arg:self arg:group_key arg:device arguments arg arg arg With Assign Call If Compare Raise Call If Compare Raise Call Assign Return return:yes"
  },
  {
    "library": "pygame",
    "name": "use_arraytype",
    "source_code": "def use_arraytype(arraytype):\n    warnings.warn(DeprecationWarning('only numpy arrays are now supported, this function will be removed in a future version of the module'))\n    arraytype = arraytype.lower()\n    if arraytype != 'numpy':\n        raise ValueError('invalid array type')",
    "docstring": "pygame.sndarray.use_arraytype(arraytype): return None DEPRECATED - only numpy arrays are now supported.",
    "type": "function",
    "file_path": "pygame\\src_py\\sndarray.py",
    "ast_data": "FunctionDef name:use_arraytype arg:arraytype arguments arg Call Call Assign Call If Compare Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "generate_dequeue_op",
    "source_code": "def generate_dequeue_op(self, tpu_device=0):\n    self.freeze()\n    if self._generated_dequeue_op and (not ops.inside_function()):\n        raise ValueError(\"Can't generate two dequeue Ops from the same queue\")\n    self._generated_dequeue_op = True\n    full_name = '%s/dequeue' % self._name\n    sharded_shapes = [policy.get_sharded_shape(shape) for shape, policy in zip(self._tuple_shapes, self._sharding_policies)]\n    with ops.device(tpu_name_util.core(tpu_device)):\n        values = tpu_ops.infeed_dequeue_tuple(dtypes=self._tuple_types, shapes=sharded_shapes, name=full_name)\n    return tag_sharding_attribute_for_dequeued_tensors(values, self._input_partition_dims)",
    "docstring": "Generate TPU dequeue ops. Args: tpu_device: The TPU device ordinal where the infeed instruction should be placed. Returns: A list of Outputs corresponding to a partition of infeed dequeued into XLA, suitable for use within a replicated block. Raises: ValueError: if the types or shapes of the tuple elements have not been set; or if a dequeue op has already been generated.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_feed.py",
    "ast_data": "FunctionDef name:generate_dequeue_op arg:self arg:tpu_device arguments arg arg Call If BoolOp Call Raise Call Assign Assign Assign Call Call With Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "draw",
    "source_code": "def draw(self, renderer):\n    path_in_disp = self._line_transform.transform_path(self._line_path)\n    mutation_size = self.get_mutation_scale()\n    extended_path = self._extend_path(path_in_disp, mutation_size=mutation_size)\n    self._path_original = extended_path\n    FancyArrowPatch.draw(self, renderer)",
    "docstring": "Draw the axis line. 1) Transform the path to the display coordinate. 2) Extend the path to make a room for arrow. 3) Update the path of the FancyArrowPatch. 4) Draw.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axisline_style.py",
    "ast_data": "FunctionDef name:draw arg:self arg:renderer arguments arg arg Assign Call Assign Call Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_arg_names_to_ok_vals",
    "source_code": "def _get_arg_names_to_ok_vals():\n    d = {}\n    for name_or_tuple in deprecated_arg_names_or_tuples:\n        if isinstance(name_or_tuple, tuple):\n            d[name_or_tuple[0]] = DeprecatedArgSpec(-1, True, name_or_tuple[1])\n        else:\n            d[name_or_tuple] = DeprecatedArgSpec(-1, False, None)\n    return d",
    "docstring": "Returns a dict mapping arg_name to DeprecatedArgSpec w/o position.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\deprecation.py",
    "ast_data": "FunctionDef name:_get_arg_names_to_ok_vals arguments Assign For If Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "yiq_to_rgb",
    "source_code": "@tf_export('image.yiq_to_rgb')\n@dispatch.add_dispatch_support\ndef yiq_to_rgb(images):\n    images = ops.convert_to_tensor(images, name='images')\n    kernel = ops.convert_to_tensor(_yiq_to_rgb_kernel, dtype=images.dtype, name='kernel')\n    ndims = images.get_shape().ndims\n    return math_ops.tensordot(images, kernel, axes=[[ndims - 1], [0]])",
    "docstring": "Converts one or more images from YIQ to RGB. Outputs a tensor of the same shape as the tensor, containing the RGB value of the pixels. The output is only well defined if the Y value in images are in [0,1], I value are in [-0.5957,0.5957] and Q value are in [-0.5226,0.5226]. Args: images: 2-D or higher rank. Image data to convert. Last dimension must be size 3. Returns: images: tensor with the same shape as .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:yiq_to_rgb arg:images arguments arg Assign Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "UnavailableError",
    "source_code": "@tf_export('errors.UnavailableError')\nclass UnavailableError(OpError):\n\n    def __init__(self, node_def, op, message, *args):\n        super(UnavailableError, self).__init__(node_def, op, message, UNAVAILABLE, *args)",
    "docstring": "Raised when the runtime is currently unavailable. This exception is not currently used.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py",
    "ast_data": "ClassDef name:UnavailableError FunctionDef name:__init__ arg:self arg:node_def arg:op arg:message arguments arg arg arg arg arg Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "inference_fn",
    "source_code": "@property\ndef inference_fn(self):\n    return self._inference_function",
    "docstring": "Return the inference function associated with this ConcreteFunction.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py",
    "ast_data": "FunctionDef name:inference_fn arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "SegmentedBivarColormap",
    "source_code": "class SegmentedBivarColormap(BivarColormap):\n\n    def __init__(self, patch, N=256, shape='square', origin=(0, 0), name='segmented bivariate colormap'):\n        _api.check_shape((None, None, 3), patch=patch)\n        self.patch = patch\n        super().__init__(N, N, shape, origin, name=name)\n\n    def _init(self):\n        s = self.patch.shape\n        _patch = np.empty((s[0], s[1], 4))\n        _patch[:, :, :3] = self.patch\n        _patch[:, :, 3] = 1\n        transform = mpl.transforms.Affine2D().translate(-0.5, -0.5).scale(self.N / (s[1] - 1), self.N / (s[0] - 1))\n        self._lut = np.empty((self.N, self.N, 4))\n        _image.resample(_patch, self._lut, transform, _image.BILINEAR, resample=False, alpha=1)\n        self._isinit = True",
    "docstring": "BivarColormap object generated by supersampling a regular grid. Parameters ---------- patch : np.array Patch is required to have a shape (k, l, 3), and will get supersampled to a lut of shape (N, N, 4). N : int The number of RGB quantization levels along each axis. shape : {'square', 'circle', 'ignore', 'circleignore'} - If 'square' each variate is clipped to [0,1] independently - If 'circle' the variates are clipped radially to the center of the colormap, and a circular mask is applied when the colormap is displayed - If 'ignore' the variates are not clipped, but instead assigned the 'outside' color - If 'circleignore' a circular mask is applied, but the data is not clipped origin : (float, float) The relative origin of the colormap. Typically (0, 0), for colormaps that are linear on both axis, and (.5, .5) for circular colormaps. Used when getting 1D colormaps from 2D colormaps. name : str, optional The name of the colormap.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "ClassDef name:SegmentedBivarColormap FunctionDef name:__init__ arg:self arg:patch arg:N arg:shape arg:origin arg:name arguments arg arg arg arg arg arg Call Assign Call Call FunctionDef name:_init arg:self arguments arg Assign Assign Call Assign Assign Assign Call Call Call Assign Call Call Assign"
  },
  {
    "library": "django",
    "name": "do_get_language_info_list",
    "source_code": "@register.tag('get_language_info_list')\ndef do_get_language_info_list(parser, token):\n    args = token.split_contents()\n    if len(args) != 5 or args[1] != 'for' or args[3] != 'as':\n        raise TemplateSyntaxError(\"'%s' requires 'for sequence as variable' (got %r)\" % (args[0], args[1:]))\n    return GetLanguageInfoListNode(parser.compile_filter(args[2]), args[4])",
    "docstring": "Store a list of language information dictionaries for the given language codes in a context variable. The language codes can be specified either as a list of strings or a settings.LANGUAGES style list (or any sequence of sequences whose first items are language codes). Usage:: {% get_language_info_list for LANGUAGES as langs %} {% for l in langs %} {{ l.code }} {{ l.name }} {{ l.name_translated }} {{ l.name_local }} {{ l.bidi|yesno:\"bi-directional,uni-directional\" }} {% endfor %}",
    "type": "function",
    "file_path": "django\\django\\templatetags\\i18n.py",
    "ast_data": "FunctionDef name:do_get_language_info_list arg:parser arg:token arguments arg arg Assign Call If BoolOp Compare Call Compare Compare Raise Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "change_current_allocator",
    "source_code": "def change_current_allocator(allocator: _CUDAAllocator) -> None:\n    torch._C._cuda_changeCurrentAllocator(allocator.allocator())",
    "docstring": "Change the currently used memory allocator to be the one provided. If the current allocator has already been used/initialized, this function will error. Args: allocator (torch.cuda.memory._CUDAAllocator): allocator to be set as the active one. .. note:: See :ref: for details on creating and using a custom allocator",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\memory.py",
    "ast_data": "FunctionDef name:change_current_allocator arg:allocator arguments arg Call Call"
  },
  {
    "library": "django",
    "name": "add_field_update",
    "source_code": "def add_field_update(self, field, value, objs):\n    self.field_updates[field, value].append(objs)",
    "docstring": "Schedule a field update. 'objs' must be a homogeneous iterable collection of model instances (e.g. a QuerySet).",
    "type": "method",
    "file_path": "django\\django\\db\\models\\deletion.py",
    "ast_data": "FunctionDef name:add_field_update arg:self arg:field arg:value arg:objs arguments arg arg arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "graph_execution_trace_to_tensor_value",
    "source_code": "def graph_execution_trace_to_tensor_value(self, trace):\n    debug_event = self._reader.read_graph_execution_traces_event(trace.locator)\n    return _parse_tensor_value(debug_event.graph_execution_trace.tensor_proto)",
    "docstring": "Read full tensor values from an Execution or ExecutionDigest. Args: trace: An or object. Returns: A numpy array representing the output tensor value of the intra-graph tensor execution event.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py",
    "ast_data": "FunctionDef name:graph_execution_trace_to_tensor_value arg:self arg:trace arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "_embed_points",
    "source_code": "def _embed_points(self, points: Tensor, labels: Tensor, pad: bool) -> Tensor:\n    points = points + 0.5\n    if pad:\n        padding_point = zeros((points.shape[0], 1, 2), device=points.device)\n        padding_label = -torch.ones((labels.shape[0], 1), device=labels.device)\n        points = concatenate([points, padding_point], dim=1)\n        labels = concatenate([labels, padding_label], dim=1)\n    point_embedding = self.pe_layer.forward_with_coords(points, self.input_image_size)\n    point_embedding[labels == -1] = 0.0\n    point_embedding[labels == -1] += self.not_a_point_embed.weight\n    point_embedding[labels == 0] += self.point_embeddings[0].weight\n    point_embedding[labels == 1] += self.point_embeddings[1].weight\n    return point_embedding",
    "docstring": "Embeds point prompts.",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\models\\sam\\architecture\\prompt_encoder.py",
    "ast_data": "FunctionDef name:_embed_points arg:self arg:points arg:labels arg:pad arguments arg arg arg arg Assign If Assign Call Assign Call Assign Call Assign Call Assign Call Assign Compare Compare Compare Compare Return return:yes"
  },
  {
    "library": "kornia",
    "name": "get_mix_augmentation_indices",
    "source_code": "def get_mix_augmentation_indices(self, named_modules: Iterator[Tuple[str, Module]]) -> List[int]:\n    return [idx for idx, (_, child) in enumerate(named_modules) if isinstance(child, K.MixAugmentationBaseV2)]",
    "docstring": "Get all the mix augmentations since they are label-involved. Special operations needed for label-involved augmentations.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\container\\image.py",
    "ast_data": "FunctionDef name:get_mix_augmentation_indices arg:self arg:named_modules arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "set_value",
    "source_code": "@doc_controls.do_not_generate_docs\ndef set_value(x, value):\n    value = numpy_compat.np_asarray(value, dtype=dtype_numpy(x))\n    if ops.executing_eagerly_outside_functions():\n        x.assign(value)\n    else:\n        with get_graph().as_default():\n            tf_dtype = dtypes_module.as_dtype(x.dtype.name.split('_')[0])\n            if hasattr(x, '_assign_placeholder'):\n                assign_placeholder = x._assign_placeholder\n                assign_op = x._assign_op\n            else:\n                placeholder_shape = tensor_shape.TensorShape([None] * value.ndim)\n                assign_placeholder = array_ops.placeholder(tf_dtype, shape=placeholder_shape)\n                assign_op = x.assign(assign_placeholder)\n                x._assign_placeholder = assign_placeholder\n                x._assign_op = assign_op\n            get_session().run(assign_op, feed_dict={assign_placeholder: value})",
    "docstring": "Sets the value of a variable, from a Numpy array. is the complement of , and provides a generic interface for assigning to variables while abstracting away the differences between TensorFlow 1.x and 2.x semantics. {snippet} Args: x: Variable to set to a new value. value: Value to set the tensor to, as a Numpy array (of the same shape).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:set_value arg:x arg:value arguments arg arg Assign Call Call If Call Call With Call Call Assign Call Call If Call Assign Assign Assign Call Assign Call Assign Call Assign Assign Call Call"
  },
  {
    "library": "sphinx",
    "name": "PyClasslike",
    "source_code": "class PyClasslike(PyObject):\n    option_spec: ClassVar[OptionSpec] = PyObject.option_spec.copy()\n    option_spec.update({'abstract': directives.flag, 'final': directives.flag})\n    allow_nesting = True\n\n    def get_signature_prefix(self, sig: str) -> Sequence[nodes.Node]:\n        prefix: list[addnodes.desc_sig_element] = []\n        if 'final' in self.options:\n            prefix.extend((addnodes.desc_sig_keyword('', 'final'), addnodes.desc_sig_space()))\n        if 'abstract' in self.options:\n            prefix.extend((addnodes.desc_sig_keyword('', 'abstract'), addnodes.desc_sig_space()))\n        prefix.extend((addnodes.desc_sig_keyword('', self.objtype), addnodes.desc_sig_space()))\n        return prefix\n\n    def get_index_text(self, modname: str, name_cls: tuple[str, str]) -> str:\n        if self.objtype == 'class':\n            if not modname:\n                return _('%s (built-in class)') % name_cls[0]\n            return _('%s (class in %s)') % (name_cls[0], modname)\n        elif self.objtype == 'exception':\n            return name_cls[0]\n        else:\n            return ''",
    "docstring": "Description of a class-like object (classes, interfaces, exceptions).",
    "type": "class",
    "file_path": "sphinx\\sphinx\\domains\\python\\__init__.py",
    "ast_data": "ClassDef name:PyClasslike Call Call Assign FunctionDef name:get_signature_prefix arg:self arg:sig arguments arg arg If Compare Call Call Call If Compare Call Call Call Call Call Call Return return:yes FunctionDef name:get_index_text arg:self arg:modname arg:name_cls arguments arg arg arg If Compare If Return return:yes Call Return return:yes Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_antialiased",
    "source_code": "def get_antialiased(self):\n    return self._antialiased",
    "docstring": "Return whether the object should try to do antialiased rendering.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:get_antialiased arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "tflite_to_tosa_bytecode",
    "source_code": "@tf_export('mlir.experimental.tflite_to_tosa_bytecode')\ndef tflite_to_tosa_bytecode(flatbuffer, bytecode, use_external_constant=False, ordered_input_arrays=None, ordered_output_arrays=None):\n    pywrap_mlir.experimental_tflite_to_tosa_bytecode(flatbuffer, bytecode, use_external_constant, ordered_input_arrays, ordered_output_arrays)",
    "docstring": "Converts TFLite flatbuffer to TOSA dialect in MLIR bytecode. Args: flatbuffer: Path to flatbuffer. bytecode: Path to output bytecode. use_external_constant: Whether to create instead of . ordered_input_arrays: ordered_output_arrays: If ordered_output_arrays is not empty, then the function will only return nodes in ordered_output_arrays in the same order",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\mlir\\mlir.py",
    "ast_data": "FunctionDef name:tflite_to_tosa_bytecode arg:flatbuffer arg:bytecode arg:use_external_constant arg:ordered_input_arrays arg:ordered_output_arrays arguments arg arg arg arg arg Call Call"
  },
  {
    "library": "cryptography",
    "name": "public_bytes",
    "source_code": "@abc.abstractmethod\ndef public_bytes(self, encoding: _serialization.Encoding, format: _serialization.PublicFormat) -> bytes:\n    pass",
    "docstring": "The serialized bytes of the public key.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\x448.py",
    "ast_data": "FunctionDef name:public_bytes arg:self arg:encoding arg:format arguments arg arg arg"
  },
  {
    "library": "django",
    "name": "to_current_timezone",
    "source_code": "def to_current_timezone(value):\n    if settings.USE_TZ and value is not None and timezone.is_aware(value):\n        return timezone.make_naive(value)\n    return value",
    "docstring": "When time zone support is enabled, convert aware datetimes to naive datetimes in the current time zone for display.",
    "type": "function",
    "file_path": "django\\django\\forms\\utils.py",
    "ast_data": "FunctionDef name:to_current_timezone arg:value arguments arg If BoolOp Compare Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_shuffle_and_repeat",
    "source_code": "def _maybe_shuffle_and_repeat(dataset, num_epochs, shuffle, shuffle_buffer_size, shuffle_seed):\n    if shuffle:\n        dataset = dataset.shuffle(shuffle_buffer_size, shuffle_seed)\n    if num_epochs != 1:\n        dataset = dataset.repeat(num_epochs)\n    return dataset",
    "docstring": "Optionally shuffle and repeat dataset, as requested.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\readers.py",
    "ast_data": "FunctionDef name:_maybe_shuffle_and_repeat arg:dataset arg:num_epochs arg:shuffle arg:shuffle_buffer_size arg:shuffle_seed arguments arg arg arg arg arg If Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_radius_neighbors_from_graph",
    "source_code": "def _radius_neighbors_from_graph(graph, radius, return_distance):\n    assert graph.format == 'csr'\n    no_filter_needed = bool(graph.data.max() <= radius)\n    if no_filter_needed:\n        data, indices, indptr = (graph.data, graph.indices, graph.indptr)\n    else:\n        mask = graph.data <= radius\n        if return_distance:\n            data = np.compress(mask, graph.data)\n        indices = np.compress(mask, graph.indices)\n        indptr = np.concatenate(([0], np.cumsum(mask)))[graph.indptr]\n    indices = indices.astype(np.intp, copy=no_filter_needed)\n    if return_distance:\n        neigh_dist = _to_object_array(np.split(data, indptr[1:-1]))\n    neigh_ind = _to_object_array(np.split(indices, indptr[1:-1]))\n    if return_distance:\n        return (neigh_dist, neigh_ind)\n    else:\n        return neigh_ind",
    "docstring": "Decompose a nearest neighbors sparse graph into distances and indices. Parameters ---------- graph : sparse matrix of shape (n_samples, n_samples) Neighbors graph as given by or . Matrix should be of format CSR format. radius : float Radius of neighborhoods which should be strictly positive. return_distance : bool Whether or not to return the distances. Returns ------- neigh_dist : ndarray of shape (n_samples,) of arrays Distances to nearest neighbors. Only present if . neigh_ind : ndarray of shape (n_samples,) of arrays Indices of nearest neighbors.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\neighbors\\_base.py",
    "ast_data": "FunctionDef name:_radius_neighbors_from_graph arg:graph arg:radius arg:return_distance arguments arg arg arg Compare Assign Call Compare Call If Assign Assign Compare If Assign Call Assign Call Assign Call Call Assign Call If Assign Call Call Assign Call Call If Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_get_clippath_id",
    "source_code": "def _get_clippath_id(self, clippath):\n    if clippath not in self._clip_path_ids:\n        self._clip_path_ids[clippath] = len(self._clip_path_ids)\n    return self._clip_path_ids[clippath]",
    "docstring": "Returns a stable and unique identifier for the *clippath* argument object within the current rendering context. This allows plots that include custom clip paths to produce identical SVG output on each render, provided that the :rc: config setting and the `` build-time environment variable are set to fixed values.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_svg.py",
    "ast_data": "FunctionDef name:_get_clippath_id arg:self arg:clippath arguments arg arg If Compare Assign Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "HandleCodeBlocks",
    "source_code": "class HandleCodeBlocks(SphinxTransform):\n    default_priority = 210\n\n    def apply(self, **kwargs: Any) -> None:\n        for node in self.document.findall(nodes.block_quote):\n            if all((isinstance(child, nodes.doctest_block) for child in node.children)):\n                node.replace_self(node.children)",
    "docstring": "Several code block related transformations.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\transforms\\__init__.py",
    "ast_data": "ClassDef name:HandleCodeBlocks Assign FunctionDef name:apply arg:self arguments arg arg For Call If Call Call Call"
  },
  {
    "library": "numpy",
    "name": "values",
    "source_code": "def values(self):\n    return Mapping.values(self)",
    "docstring": "D.values() returns a set-like object providing a view on the values",
    "type": "method",
    "file_path": "numpy\\numpy\\lib\\_npyio_impl.py",
    "ast_data": "FunctionDef name:values arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_compute_mi",
    "source_code": "def _compute_mi(x, y, x_discrete, y_discrete, n_neighbors=3):\n    if x_discrete and y_discrete:\n        return mutual_info_score(x, y)\n    elif x_discrete and (not y_discrete):\n        return _compute_mi_cd(y, x, n_neighbors)\n    elif not x_discrete and y_discrete:\n        return _compute_mi_cd(x, y, n_neighbors)\n    else:\n        return _compute_mi_cc(x, y, n_neighbors)",
    "docstring": "Compute mutual information between two variables. This is a simple wrapper which selects a proper function to call based on whether and are discrete or not.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\feature_selection\\_mutual_info.py",
    "ast_data": "FunctionDef name:_compute_mi arg:x arg:y arg:x_discrete arg:y_discrete arg:n_neighbors arguments arg arg arg arg arg If BoolOp Return return:yes Call If BoolOp Return return:yes Call If BoolOp Return return:yes Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "filepath_to_uri",
    "source_code": "def filepath_to_uri(path):\n    if path is None:\n        return path\n    return quote(str(path).replace('\\\\', '/'), safe=\"/~!*()'\")",
    "docstring": "Convert a file system path to a URI portion that is suitable for inclusion in a URL. Encode certain chars that would normally be recognized as special chars for URIs. Do not encode the ' character, as it is a valid character within URIs. See the encodeURIComponent() JavaScript function for details.",
    "type": "function",
    "file_path": "django\\django\\utils\\encoding.py",
    "ast_data": "FunctionDef name:filepath_to_uri arg:path arguments arg If Compare Return return:yes Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_tensor_filter",
    "source_code": "def get_tensor_filter(self, filter_name):\n    if filter_name not in self._tensor_filters:\n        raise ValueError('There is no tensor filter named \"%s\"' % filter_name)\n    return self._tensor_filters[filter_name]",
    "docstring": "Retrieve filter function by name. Args: filter_name: Name of the filter set during add_tensor_filter() call. Returns: The callable associated with the filter name. Raises: ValueError: If there is no tensor filter of the specified filter name.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\analyzer_cli.py",
    "ast_data": "FunctionDef name:get_tensor_filter arg:self arg:filter_name arguments arg arg If Compare Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "enclosing_tpu_context",
    "source_code": "def enclosing_tpu_context():\n    return enclosing_tpu_context_and_graph()[0]",
    "docstring": "Returns the TPUReplicateContext, which exists inside a tpu.rewrite().",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_util.py",
    "ast_data": "FunctionDef name:enclosing_tpu_context arguments Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "verify_export",
    "source_code": "def verify_export(self, options: VerificationOptions) -> tuple[AssertionError | None, torch.Graph, _OutputsType, _OutputsType]:\n    return verify_aten_graph(self.graph, input_args=self.input_args, params_dict=self.params_dict, export_options=self.export_options, verification_options=options)",
    "docstring": "Verify the export from TorchScript IR graph to ONNX. Export the TorchScript IR graph to ONNX, with the inputs, parameters and export options recorded in this object. Then verify the exported ONNX graph against the original TorchScript IR graph under the provided verification options. Args: options: The verification options. Returns: error: The AssertionError raised during the verification. Returns None if no error is raised. onnx_graph: The exported ONNX graph in TorchScript IR format. onnx_outs: The outputs from running exported ONNX model under the onnx backend in . pt_outs: The outputs from running the TorchScript IR graph.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\verification.py",
    "ast_data": "FunctionDef name:verify_export arg:self arg:options arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_parse_example_raw",
    "source_code": "def _parse_example_raw(serialized, names, params, name):\n    if params.num_features == 0:\n        raise ValueError('Must provide at least one feature key.')\n    with ops.name_scope(name, 'ParseExample', [serialized, names]):\n        names = [] if names is None else names\n        serialized = ops.convert_to_tensor(serialized, name='serialized')\n        if params.ragged_keys and serialized.shape.ndims is None:\n            raise ValueError('serialized must have statically-known rank to parse ragged features.')\n        outputs = gen_parsing_ops.parse_example_v2(serialized=serialized, names=names, sparse_keys=params.sparse_keys, dense_keys=params.dense_keys, ragged_keys=params.ragged_keys, dense_defaults=params.dense_defaults_vec, num_sparse=len(params.sparse_keys), sparse_types=params.sparse_types, ragged_value_types=params.ragged_value_types, ragged_split_types=params.ragged_split_types, dense_shapes=params.dense_shapes_as_proto, name=name)\n        sparse_indices, sparse_values, sparse_shapes, dense_values, ragged_values, ragged_row_splits = outputs\n        ragged_tensors = parsing_config._build_ragged_tensors(serialized.shape, ragged_values, ragged_row_splits)\n        sparse_tensors = [sparse_tensor.SparseTensor(ix, val, shape) for ix, val, shape in zip(sparse_indices, sparse_values, sparse_shapes)]\n        return dict(zip(params.sparse_keys + params.dense_keys + params.ragged_keys, sparse_tensors + dense_values + ragged_tensors))",
    "docstring": "Parses protos. Args: serialized: A vector (1-D Tensor) of strings, a batch of binary serialized protos. names: A vector (1-D Tensor) of strings (optional), the names of the serialized protos. params: A containing the parameters for the parse op. name: A name for this operation (optional). Returns: A mapping keys to s and s and s.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parsing_ops.py",
    "ast_data": "FunctionDef name:_parse_example_raw arg:serialized arg:names arg:params arg:name arguments arg arg arg arg If Compare Raise Call With Call Assign Compare Assign Call If BoolOp Compare Raise Call Assign Call Call Assign Assign Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "update_normal",
    "source_code": "def update_normal(self, mappable=None):\n    if mappable:\n        self.mappable = mappable\n    _log.debug('colorbar update normal %r %r', self.mappable.norm, self.norm)\n    self.set_alpha(self.mappable.get_alpha())\n    self.cmap = self.mappable.cmap\n    if self.mappable.norm != self.norm:\n        self.norm = self.mappable.norm\n        self._reset_locator_formatter_scale()\n    self._draw_all()\n    if isinstance(self.mappable, contour.ContourSet):\n        CS = self.mappable\n        if not CS.filled:\n            self.add_lines(CS)\n    self.stale = True",
    "docstring": "Update solid patches, lines, etc. This is meant to be called when the norm of the image or contour plot to which this colorbar belongs changes. If the norm on the mappable is different than before, this resets the locator and formatter for the axis, so if these have been customized, they will need to be customized again. However, if the norm only changes values of *vmin*, *vmax* or *cmap* then the old formatter and locator will be preserved.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colorbar.py",
    "ast_data": "FunctionDef name:update_normal arg:self arg:mappable arguments arg arg If Assign Call Call Call Assign If Compare Assign Call Call If Call Assign If Call Assign"
  },
  {
    "library": "cherrypy",
    "name": "error_page_namespace",
    "source_code": "def error_page_namespace(k, v):\n    if k != 'default':\n        k = int(k)\n    cherrypy.serving.request.error_page[k] = v",
    "docstring": "Attach error pages declared in config.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\_cprequest.py",
    "ast_data": "FunctionDef name:error_page_namespace arg:k arg:v arguments arg arg If Compare Assign Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_has_precomputed_nrows",
    "source_code": "def _has_precomputed_nrows(self):\n    return self._nrows is not None",
    "docstring": "Returns true if has already been computed. If true, then will return its value without calling any TensorFlow ops.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py",
    "ast_data": "FunctionDef name:_has_precomputed_nrows arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "softsign",
    "source_code": "@dispatch.add_dispatch_support\ndef softsign(x):\n    return nn.softsign(x)",
    "docstring": "Softsign activation function, . Example Usage: >>> a = tf.constant([-1.0, 0.0, 1.0], dtype = tf.float32) >>> b = tf.keras.activations.softsign(a) >>> b.numpy() array([-0.5, 0. , 0.5], dtype=float32) Args: x: Input tensor. Returns: The softsign activation: .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\activations.py",
    "ast_data": "FunctionDef name:softsign arg:x arguments arg Return return:yes Call"
  },
  {
    "library": "pygame",
    "name": "__repr__",
    "source_code": "def __repr__(self):\n    return '<{klass} @{id:x} {attrs}>'.format(klass=self.__class__.__name__, id=id(self) & 16777215, attrs=' '.join((f'{k}={v!r}' for k, v in self.__dict__.items())))",
    "docstring": "Turn the class into a string.",
    "type": "method",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "draw_path",
    "source_code": "def draw_path(self, renderer, gc, tpath, affine, rgbFace):\n    gc0 = renderer.new_gc()\n    gc0.copy_properties(gc)\n    gc0 = self._update_gc(gc0, self._gc)\n    renderer.draw_path(gc0, tpath, affine + self._offset_transform(renderer), rgbFace)\n    gc0.restore()",
    "docstring": "Draw the path with updated gc.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patheffects.py",
    "ast_data": "FunctionDef name:draw_path arg:self arg:renderer arg:gc arg:tpath arg:affine arg:rgbFace arguments arg arg arg arg arg arg Assign Call Call Assign Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_identity",
    "source_code": "def _identity(x):\n    if isinstance(x, tensor_array_ops.TensorArray):\n        return x.identity()\n    elif isinstance(x, ops.Operation):\n        return control_flow_ops.group(x)\n    elif context.executing_eagerly() and x is None:\n        return None\n    else:\n        return array_ops.identity(x)",
    "docstring": "Identity op that recognizes , , and .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\critical_section_ops.py",
    "ast_data": "FunctionDef name:_identity arg:x arguments arg If Call Return return:yes Call If Call Return return:yes Call If BoolOp Call Compare Return return:no Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "get_jit_arguments",
    "source_code": "def get_jit_arguments(engine_kwargs: dict[str, bool] | None=None) -> dict[str, bool]:\n    if engine_kwargs is None:\n        engine_kwargs = {}\n    nopython = engine_kwargs.get('nopython', True)\n    nogil = engine_kwargs.get('nogil', False)\n    parallel = engine_kwargs.get('parallel', False)\n    return {'nopython': nopython, 'nogil': nogil, 'parallel': parallel}",
    "docstring": "Return arguments to pass to numba.JIT, falling back on pandas default JIT settings. Parameters ---------- engine_kwargs : dict, default None user passed keyword arguments for numba.JIT Returns ------- dict[str, bool] nopython, nogil, parallel Raises ------ NumbaUtilError",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\util\\numba_.py",
    "ast_data": "FunctionDef name:get_jit_arguments arg:engine_kwargs arguments arg If Compare Assign Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "log_prob",
    "source_code": "def log_prob(self, value, name='log_prob'):\n    return self._call_log_prob(value, name)",
    "docstring": "Log probability density/mass function. Args: value: or . name: Python prepended to names of ops created by this function. Returns: log_prob: a of shape with values of type .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:log_prob arg:self arg:value arg:name arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "PostProcessingManager",
    "source_code": "class PostProcessingManager(IOBase):\n\n    def __init__(self, plugins: list[Any], file: IO[bytes], feed_options: dict[str, Any]) -> None:\n        self.plugins = self._load_plugins(plugins)\n        self.file = file\n        self.feed_options = feed_options\n        self.head_plugin = self._get_head_plugin()\n\n    def write(self, data: bytes) -> int:\n        return cast(int, self.head_plugin.write(data))\n\n    def tell(self) -> int:\n        return self.file.tell()\n\n    def close(self) -> None:\n        self.head_plugin.close()\n\n    def writable(self) -> bool:\n        return True\n\n    def _load_plugins(self, plugins: list[Any]) -> list[Any]:\n        return [load_object(plugin) for plugin in plugins]\n\n    def _get_head_plugin(self) -> Any:\n        prev = self.file\n        for plugin in self.plugins[::-1]:\n            prev = plugin(prev, self.feed_options)\n        return prev",
    "docstring": "This will manage and use declared plugins to process data in a pipeline-ish way. :param plugins: all the declared plugins for the feed :type plugins: list :param file: final target file where the processed data will be written :type file: file like object",
    "type": "class",
    "file_path": "scrapy\\scrapy\\extensions\\postprocessing.py",
    "ast_data": "ClassDef name:PostProcessingManager FunctionDef name:__init__ arg:self arg:plugins arg:file arg:feed_options arguments arg arg arg arg Assign Call Assign Assign Assign Call FunctionDef name:write arg:self arg:data arguments arg arg Return return:yes Call Call FunctionDef name:tell arg:self arguments arg Return return:yes Call FunctionDef name:close arg:self arguments arg Call FunctionDef name:writable arg:self arguments arg Return return:yes FunctionDef name:_load_plugins arg:self arg:plugins arguments arg arg Return return:yes Call FunctionDef name:_get_head_plugin arg:self arguments arg Assign For Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "indices",
    "source_code": "@property\ndef indices(self):\n    return self._indices",
    "docstring": "A 1-D containing the indices of the slices.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\indexed_slices.py",
    "ast_data": "FunctionDef name:indices arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "ECDFResult",
    "source_code": "@dataclass\nclass ECDFResult:\n    cdf: EmpiricalDistributionFunction\n    sf: EmpiricalDistributionFunction\n\n    def __init__(self, q, cdf, sf, n, d):\n        self.cdf = EmpiricalDistributionFunction(q, cdf, n, d, 'cdf')\n        self.sf = EmpiricalDistributionFunction(q, sf, n, d, 'sf')",
    "docstring": "Result object returned by Attributes ---------- cdf : An object representing the empirical cumulative distribution function. sf : An object representing the complement of the empirical cumulative distribution function.",
    "type": "class",
    "file_path": "scipy\\scipy\\stats\\_survival.py",
    "ast_data": "ClassDef name:ECDFResult FunctionDef name:__init__ arg:self arg:q arg:cdf arg:sf arg:n arg:d arguments arg arg arg arg arg arg Assign Call Assign Call"
  },
  {
    "library": "pandas",
    "name": "convert_from_missing_indexer_tuple",
    "source_code": "def convert_from_missing_indexer_tuple(indexer, axes):\n\n    def get_indexer(_i, _idx):\n        return axes[_i].get_loc(_idx['key']) if isinstance(_idx, dict) else _idx\n    return tuple((get_indexer(_i, _idx) for _i, _idx in enumerate(indexer)))",
    "docstring": "Create a filtered indexer that doesn't have any missing indexers.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\indexing.py",
    "ast_data": "FunctionDef name:convert_from_missing_indexer_tuple arg:indexer arg:axes arguments arg arg FunctionDef name:get_indexer arg:_i arg:_idx arguments arg arg Return return:yes Call Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_tracker_issues",
    "source_code": "def get_tracker_issues(org: str, project: str, onto_branch: str) -> list[dict[str, Any]]:\n    version = get_release_version(onto_branch)\n    if not version:\n        return []\n    tracker_issues = gh_query_issues_by_labels(org, project, labels=['release tracker'])\n    if not tracker_issues:\n        return []\n    return [issue for issue in tracker_issues if version in issue.get('title', '')]",
    "docstring": "Find the tracker issue from the repo. The tracker issue needs to have the title like [VERSION] Release Tracker following the convention on PyTorch",
    "type": "function",
    "file_path": "pytorch\\.github\\scripts\\cherry_pick.py",
    "ast_data": "FunctionDef name:get_tracker_issues arg:org arg:project arg:onto_branch arguments arg arg arg Assign Call If Return return:no Assign Call If Return return:no Return return:yes Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "cached_definition",
    "source_code": "@property\ndef cached_definition(self) -> function_pb2.FunctionDef:\n    if self._cached_definition is None:\n        self._cached_definition = self.definition\n    return self._cached_definition",
    "docstring": "Cached FunctionDef (not guaranteed to be fresh).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\atomic_function.py",
    "ast_data": "FunctionDef name:cached_definition arg:self arguments arg If Compare Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "__init__",
    "source_code": "def __init__(self, regex, **kwargs):\n    kwargs.setdefault('strip', False)\n    super().__init__(**kwargs)\n    self._set_regex(regex)",
    "docstring": "regex can be either a string or a compiled regular expression object.",
    "type": "method",
    "file_path": "django\\django\\forms\\fields.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:regex arguments arg arg arg Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "scale_loss_for_distribution",
    "source_code": "def scale_loss_for_distribution(loss_value):\n    num_replicas = distribute_lib.get_strategy().num_replicas_in_sync\n    if num_replicas > 1:\n        loss_value *= 1.0 / num_replicas\n    return loss_value",
    "docstring": "Scales and returns the given loss value by the number of replicas.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\losses_utils.py",
    "ast_data": "FunctionDef name:scale_loss_for_distribution arg:loss_value arguments arg Assign Call If Compare Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "set_global",
    "source_code": "def set_global(self, global_qconfig_list: list[QConfigAny]) -> QConfigMultiMapping:\n    self._insert_qconfig_list('global_qconfig', [], global_qconfig_list)\n    return self",
    "docstring": "Set global QConfigs see :func: for more info",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\ns\\fx\\qconfig_multi_mapping.py",
    "ast_data": "FunctionDef name:set_global arg:self arg:global_qconfig_list arguments arg arg Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "AnonymousAxis",
    "source_code": "class AnonymousAxis:\n\n    def __init__(self, value: str) -> None:\n        self.value = int(value)\n        if self.value < 1:\n            raise ValueError(f'Anonymous axis should have positive length, not {self.value}')\n\n    def __repr__(self) -> str:\n        return f'{self.value}-axis'",
    "docstring": "Used by to represent an axis with a size (> 1), but no associated identifier. Note: Different instances of this class are not equal to each other, even if they have the same value.",
    "type": "class",
    "file_path": "pytorch\\functorch\\einops\\_parsing.py",
    "ast_data": "ClassDef name:AnonymousAxis FunctionDef name:__init__ arg:self arg:value arguments arg arg Assign Call If Compare Raise Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_TensorArrayWriteGrad",
    "source_code": "@ops.RegisterGradient('TensorArrayWrite')\n@ops.RegisterGradient('TensorArrayWriteV2')\n@ops.RegisterGradient('TensorArrayWriteV3')\ndef _TensorArrayWriteGrad(op: ops.Operation, flow):\n    handle = op.inputs[0]\n    index = op.inputs[1]\n    dtype = op.get_attr('T')\n    grad_source = _GetGradSource(flow)\n    flow_out = array_ops.identity(op.outputs[0], 'flow_out')\n    with ops.control_dependencies([flow_out]):\n        flow = array_ops.identity(flow, 'write_barrier')\n    g = tensor_array_ops.TensorArray(dtype=dtype, handle=handle, flow=flow, colocate_with_first_write_call=False).grad(source=grad_source, flow=flow)\n    grad = g.read(index)\n    return [None, None, grad, flow]",
    "docstring": "Gradient for TensorArrayWrite. Args: op: Forward TensorArrayWrite op. flow: Gradient flow to TensorArrayWrite. Returns: A grad , the gradient created in an upstream ReadGrad or PackGrad.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_grad.py",
    "ast_data": "FunctionDef name:_TensorArrayWriteGrad arg:op arg:flow arguments arg arg Assign Assign Assign Call Assign Call Assign Call With Call Assign Call Assign Call Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "pygame",
    "name": "display_capture_filter_properties",
    "source_code": "def display_capture_filter_properties(self):\n    self.dev.displaycapturefilterproperties()",
    "docstring": "Displays a dialog containing the property page of the capture filter. For VfW drivers you may find the option to select the resolution most likely here.",
    "type": "method",
    "file_path": "pygame\\src_py\\_camera_vidcapture.py",
    "ast_data": "FunctionDef name:display_capture_filter_properties arg:self arguments arg Call"
  },
  {
    "library": "scipy",
    "name": "contains",
    "source_code": "def contains(self, item, parameter_values=None):\n    parameter_values = parameter_values or {}\n    a, b = self.get_numerical_endpoints(parameter_values)\n    left_inclusive, right_inclusive = self.inclusive\n    in_left = item >= a if left_inclusive else item > a\n    in_right = item <= b if right_inclusive else item < b\n    return in_left & in_right",
    "docstring": "Determine whether the argument is contained within the domain. Parameters ---------- item : ndarray The argument parameter_values : dict A dictionary that maps between string variable names and numerical values of parameters, which may define the endpoints. Returns ------- out : bool True if is within the domain; False otherwise.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distribution_infrastructure.py",
    "ast_data": "FunctionDef name:contains arg:self arg:item arg:parameter_values arguments arg arg arg Assign BoolOp Assign Call Assign Assign Compare Compare Assign Compare Compare Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_parse_policy_keepsort",
    "source_code": "def _parse_policy_keepsort(self, has_baseline, final_targets, extra_flags):\n    self.dist_log(\"policy 'keep_sort' is on, dispatch-able targets\", final_targets, \"\\nare 'not' sorted depend on the highest interest butas specified in the dispatch-able source or the extra group\")\n    return (has_baseline, final_targets, extra_flags)",
    "docstring": "leave a notice that $keep_sort is on",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py",
    "ast_data": "FunctionDef name:_parse_policy_keepsort arg:self arg:has_baseline arg:final_targets arg:extra_flags arguments arg arg arg arg Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_validate_pruning_amount",
    "source_code": "def _validate_pruning_amount(amount, tensor_size):\n    if isinstance(amount, numbers.Integral) and amount > tensor_size:\n        raise ValueError(f'amount={amount} should be smaller than the number of parameters to prune={tensor_size}')",
    "docstring": "Validate that the pruning amount is meaningful wrt to the size of the data. Validation helper to check that the amount of parameters to prune is meaningful wrt to the size of the data (). Args: amount (int or float): quantity of parameters to prune. If float, should be between 0.0 and 1.0 and represent the fraction of parameters to prune. If int, it represents the absolute number of parameters to prune. tensor_size (int): absolute number of parameters in the tensor to prune.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\utils\\prune.py",
    "ast_data": "FunctionDef name:_validate_pruning_amount arg:amount arg:tensor_size arguments arg arg If BoolOp Call Compare Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "default_tpu_exit_fn",
    "source_code": "def default_tpu_exit_fn():\n    logging.info('Waiting for workers to exit...')\n    try:\n        context.context().get_config_key_value('BLOCK_TILL_EXIT')\n    except:\n        logging.info('Restarting cluster due to preemption.')\n        sys.exit(42)",
    "docstring": "Default exit function to run after saving checkpoint for TPUStrategy. For TPUStrategy, we want the coordinator to exit after workers are down so that restarted coordinator would not connect to workers scheduled to be preempted. This function achieves so by attempting to get a key-value store from coordination service, which will block until workers are done and then returns with error. Then we have the coordinator sys.exit(42) to re-schedule the job.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\failure_handling\\failure_handling_util.py",
    "ast_data": "FunctionDef name:default_tpu_exit_fn arguments Call Try Call Call ExceptHandler Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, dataset):\n    self.dataset = dataset\n    elem_spec = self.dataset.element_spec\n    _check_table_initializer_element_spec(elem_spec)\n    key_type = elem_spec[0].dtype\n    value_type = elem_spec[1].dtype\n    super(DatasetInitializer, self).__init__(key_type, value_type)",
    "docstring": "Creates a table initializer from a . Args: dataset: A object that produces tuples of scalars. The first scalar is treated as a key and the second as value. Raises: ValueError if doesn't conform to specifications. Returns: A object",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\lookup_ops.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:dataset arguments arg arg Assign Assign Call Assign Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "SupportLevel",
    "source_code": "class SupportLevel(Enum):\n    SUPPORTED = 1\n    NOT_SUPPORTED_YET = 0",
    "docstring": "Indicates at what stage the feature used in the example is handled in export.",
    "type": "class",
    "file_path": "pytorch\\torch\\_export\\db\\case.py",
    "ast_data": "ClassDef name:SupportLevel Assign Assign"
  },
  {
    "library": "django",
    "name": "savepoint",
    "source_code": "@async_unsafe\ndef savepoint(self):\n    if not self._savepoint_allowed():\n        return\n    thread_ident = _thread.get_ident()\n    tid = str(thread_ident).replace('-', '')\n    self.savepoint_state += 1\n    sid = 's%s_x%d' % (tid, self.savepoint_state)\n    self.validate_thread_sharing()\n    self._savepoint(sid)\n    return sid",
    "docstring": "Create a savepoint inside the current transaction. Return an identifier for the savepoint that will be used for the subsequent rollback or commit. Do nothing if savepoints are not supported.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\base.py",
    "ast_data": "FunctionDef name:savepoint arg:self arguments arg If Call Return return:no Assign Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "_get_pyramid_gaussian_kernel",
    "source_code": "def _get_pyramid_gaussian_kernel() -> Tensor:\n    return tensor([[[1.0, 4.0, 6.0, 4.0, 1.0], [4.0, 16.0, 24.0, 16.0, 4.0], [6.0, 24.0, 36.0, 24.0, 6.0], [4.0, 16.0, 24.0, 16.0, 4.0], [1.0, 4.0, 6.0, 4.0, 1.0]]]) / 256.0",
    "docstring": "Return a pre-computed gaussian kernel.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\transform\\pyramid.py",
    "ast_data": "FunctionDef name:_get_pyramid_gaussian_kernel arguments Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "sharded_filename",
    "source_code": "def sharded_filename(self, filename_tensor, shard, num_shards):\n    return gen_io_ops.sharded_filename(filename_tensor, shard, num_shards)",
    "docstring": "Append sharding information to a filename. Args: filename_tensor: A string tensor. shard: Integer. The shard for the filename. num_shards: An int Tensor for the number of shards. Returns: A string tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\saver.py",
    "ast_data": "FunctionDef name:sharded_filename arg:self arg:filename_tensor arg:shard arg:num_shards arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "baseclass",
    "source_code": "@property\ndef baseclass(self):\n    return self._baseclass",
    "docstring": "Class of the underlying data (read-only).",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:baseclass arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "add_post_render_callback",
    "source_code": "def add_post_render_callback(self, callback):\n    if self._is_rendered:\n        callback(self)\n    else:\n        self._post_render_callbacks.append(callback)",
    "docstring": "Add a new post-rendering callback. If the response has already been rendered, invoke the callback immediately.",
    "type": "method",
    "file_path": "django\\django\\template\\response.py",
    "ast_data": "FunctionDef name:add_post_render_callback arg:self arg:callback arguments arg arg If Call Call"
  },
  {
    "library": "pytorch",
    "name": "set_module_type",
    "source_code": "def set_module_type(self, module_type: Callable, quantization_config: QuantizationConfig):\n    self.module_type_config[module_type] = quantization_config\n    return self",
    "docstring": "Set quantization_config for a submodule with type: , for example: quantizer.set_module_name(Sub) or quantizer.set_module_name(nn.Linear), it will quantize all supported operator/operator patterns in the submodule with this module type with the given",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\xnnpack_quantizer.py",
    "ast_data": "FunctionDef name:set_module_type arg:self arg:module_type arg:quantization_config arguments arg arg arg Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "reset",
    "source_code": "def reset(self, n, has_jac):\n    pass",
    "docstring": "Prepare integrator for call: allocate memory, set flags, etc. n - number of equations. has_jac - if user has supplied function for evaluating Jacobian.",
    "type": "method",
    "file_path": "scipy\\scipy\\integrate\\_ode.py",
    "ast_data": "FunctionDef name:reset arg:self arg:n arg:has_jac arguments arg arg arg"
  },
  {
    "library": "scipy",
    "name": "matches",
    "source_code": "def matches(self, parameters):\n    return parameters == set(self.parameters.keys())",
    "docstring": "Checks whether the keyword arguments match the parameterization. Parameters ---------- parameters : set Set of names of parameters passed into the distribution as keyword arguments. Returns ------- out : bool True if the keyword arguments names match the names of the parameters of this parameterization.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distribution_infrastructure.py",
    "ast_data": "FunctionDef name:matches arg:self arg:parameters arguments arg arg Return return:yes Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "has_frozen_params",
    "source_code": "def has_frozen_params(gm: torch.fx.GraphModule) -> bool:\n    return getattr(gm, '_has_frozen_params', False)",
    "docstring": "Return True if the gm has frozen parameters.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\freezing_utils.py",
    "ast_data": "FunctionDef name:has_frozen_params arg:gm arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_solve_eigen_gram",
    "source_code": "def _solve_eigen_gram(self, alpha, y, sqrt_sw, X_mean, eigvals, Q, QT_y):\n    w = 1.0 / (eigvals + alpha)\n    if self.fit_intercept:\n        normalized_sw = sqrt_sw / np.linalg.norm(sqrt_sw)\n        intercept_dim = _find_smallest_angle(normalized_sw, Q)\n        w[intercept_dim] = 0\n    c = np.dot(Q, self._diag_dot(w, QT_y))\n    G_inverse_diag = self._decomp_diag(w, Q)\n    if len(y.shape) != 1:\n        G_inverse_diag = G_inverse_diag[:, np.newaxis]\n    return (G_inverse_diag, c)",
    "docstring": "Compute dual coefficients and diagonal of G^-1. Used when we have a decomposition of X.X^T (n_samples <= n_features).",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_ridge.py",
    "ast_data": "FunctionDef name:_solve_eigen_gram arg:self arg:alpha arg:y arg:sqrt_sw arg:X_mean arg:eigvals arg:Q arg:QT_y arguments arg arg arg arg arg arg arg arg Assign If Assign Call Assign Call Assign Assign Call Call Assign Call If Compare Call Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_make_int_array",
    "source_code": "def _make_int_array():\n    return array.array(str('i'))",
    "docstring": "Construct an array.array of a type suitable for scipy.sparse indices.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\feature_extraction\\text.py",
    "ast_data": "FunctionDef name:_make_int_array arguments Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "todok",
    "source_code": "def todok(self, copy=False):\n    return self.tocoo(copy=copy).todok(copy=False)",
    "docstring": "Convert this array/matrix to Dictionary Of Keys format. With copy=False, the data/indices may be shared between this array/matrix and the resultant dok_array/matrix.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_base.py",
    "ast_data": "FunctionDef name:todok arg:self arg:copy arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "bool",
    "source_code": "def bool(self):\n    return self._to(torch.bool)",
    "docstring": "Casts this storage to bool type.",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:bool arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "generate_qconfig_mapping",
    "source_code": "def generate_qconfig_mapping(self) -> QConfigMapping:\n    detector_qconfig_info_combined = self._generate_module_fqn_to_detector_info_mapping(self._update_detector_quantizaiton_qconfig_info)\n    mapping: QConfigMapping = self._generate_qconfig_mapping_helper(detector_qconfig_info_combined, self._quantization_config_generator)\n    return mapping",
    "docstring": "Generates a QConfigMapping based on the suggestions of the ModelReport API. The generated mapping encompasses all the different types of feedback from the different detectors all into one place. These configs are based on the suggestions provided by the ModelReport API and can only be generated once the reports have been generated. Returns a QConfigMapping for the quantization configuration Note: Throws exception if we try to generate mapping on model we already removed observers from Throws exception if we try to generate mapping without preparing for callibration",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\model_report.py",
    "ast_data": "FunctionDef name:generate_qconfig_mapping arg:self arguments arg Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_axisline_style",
    "source_code": "def set_axisline_style(self, axisline_style=None, **kwargs):\n    if axisline_style is None:\n        return AxislineStyle.pprint_styles()\n    if isinstance(axisline_style, AxislineStyle._Base):\n        self._axisline_style = axisline_style\n    else:\n        self._axisline_style = AxislineStyle(axisline_style, **kwargs)\n    self._init_line()",
    "docstring": "Set the axisline style. The new style is completely defined by the passed attributes. Existing style attributes are forgotten. Parameters ---------- axisline_style : str or None The line style, e.g. '->', optionally followed by a comma-separated list of attributes. Alternatively, the attributes can be provided as keywords. If *None* this returns a string containing the available styles. Examples -------- The following two commands are equal: >>> set_axisline_style(\"->,size=1.5\") >>> set_axisline_style(\"->\", size=1.5)",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axis_artist.py",
    "ast_data": "FunctionDef name:set_axisline_style arg:self arg:axisline_style arguments arg arg arg If Compare Return return:yes Call If Call Assign Assign Call Call"
  },
  {
    "library": "cherrypy",
    "name": "__str__",
    "source_code": "def __str__(self):\n    p = [';%s=%s' % (k, v) for k, v in self.params.items()]\n    return str('%s%s' % (self.value, ''.join(p)))",
    "docstring": "Render the HTTP header value as a string.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\httputil.py",
    "ast_data": "FunctionDef name:__str__ arg:self arguments arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "check_dict_or_set_indexers",
    "source_code": "def check_dict_or_set_indexers(key) -> None:\n    if isinstance(key, set) or (isinstance(key, tuple) and any((isinstance(x, set) for x in key))):\n        raise TypeError('Passing a set as an indexer is not supported. Use a list instead.')\n    if isinstance(key, dict) or (isinstance(key, tuple) and any((isinstance(x, dict) for x in key))):\n        raise TypeError('Passing a dict as an indexer is not supported. Use a list instead.')",
    "docstring": "Check if the indexer is or contains a dict or set, which is no longer allowed.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\indexing.py",
    "ast_data": "FunctionDef name:check_dict_or_set_indexers arg:key arguments arg If BoolOp Call BoolOp Call Call Call Raise Call If BoolOp Call BoolOp Call Call Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "PluginAsset",
    "source_code": "class PluginAsset(metaclass=abc.ABCMeta):\n    plugin_name = None\n\n    @abc.abstractmethod\n    def assets(self):\n        raise NotImplementedError()",
    "docstring": "This abstract base class allows TensorBoard to serialize assets to disk. Plugin authors are expected to extend the PluginAsset class, so that it: - has a unique plugin_name - provides an assets method that returns an {asset_name: asset_contents} dictionary. For now, asset_contents are strings, although we may add StringIO support later. LifeCycle of a PluginAsset instance: - It is constructed when get_plugin_asset is called on the class for the first time. - It is configured by code that follows the calls to get_plugin_asset - When the containing graph is serialized by the tf.compat.v1.summary.FileWriter, the writer calls assets and the PluginAsset instance provides its contents to be written to disk.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\summary\\plugin_asset.py",
    "ast_data": "ClassDef name:PluginAsset Assign FunctionDef name:assets arg:self arguments arg Raise Call"
  },
  {
    "library": "sphinx",
    "name": "_group_by_func",
    "source_code": "def _group_by_func(entry: tuple[str, _IndexEntry]) -> str:\n    key, (_targets, _sub_items, category_key) = entry\n    if category_key is not None:\n        return category_key\n    key = key.removeprefix('\\u200f')\n    letter = unicodedata.normalize('NFD', key[0])[0].upper()\n    if letter.isalpha() or letter == '_':\n        return letter\n    return _('Symbols')",
    "docstring": "Group the entries by letter or category key.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\environment\\adapters\\indexentries.py",
    "ast_data": "FunctionDef name:_group_by_func arg:entry arguments arg Assign If Compare Return return:yes Assign Call Assign Call Call If BoolOp Call Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_fig_manager",
    "source_code": "@classmethod\ndef get_fig_manager(cls, num):\n    manager = cls.figs.get(num, None)\n    if manager is not None:\n        cls.set_active(manager)\n    return manager",
    "docstring": "If manager number *num* exists, make it the active one and return it; otherwise return *None*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_pylab_helpers.py",
    "ast_data": "FunctionDef name:get_fig_manager arg:cls arg:num arguments arg arg Assign Call If Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_num_slurm_tasks",
    "source_code": "def _get_num_slurm_tasks():\n    return int(_get_slurm_var('STEP_NUM_TASKS'))",
    "docstring": "Returns the number of SLURM tasks of the current job step. Returns: The number of tasks as an int",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\slurm_cluster_resolver.py",
    "ast_data": "FunctionDef name:_get_num_slurm_tasks arguments Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "scatter_max",
    "source_code": "def scatter_max(self, sparse_delta, use_locking=False, name=None):\n    raise NotImplementedError",
    "docstring": "Updates this variable with the max of and itself. Args: sparse_delta: to use as an argument of max with this variable. use_locking: If , use locking during the operation. name: the name of the operation. Returns: The updated variable. Raises: TypeError: if is not an .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:scatter_max arg:self arg:sparse_delta arg:use_locking arg:name arguments arg arg arg arg Raise"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, widthB=1.0, lengthB=0.2, angleB=None):\n    super().__init__(widthB=widthB, lengthB=lengthB, angleB=angleB)",
    "docstring": "Parameters ---------- widthB : float, default: 1.0 Width of the bracket. lengthB : float, default: 0.2 Length of the bracket. angleB : float, default: 0 degrees Orientation of the bracket, as a counterclockwise angle. 0 degrees means perpendicular to the line.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:widthB arg:lengthB arg:angleB arguments arg arg arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "super_in_original_context",
    "source_code": "def super_in_original_context(f, args, caller_fn_scope):\n    if args:\n        return f(*args)\n    ctx_frame = _find_originating_frame(caller_fn_scope, innermost=False)\n    type_arg = ctx_frame.f_locals['__class__']\n    self_arg_name = ctx_frame.f_code.co_varnames[0]\n    self_arg = ctx_frame.f_locals[self_arg_name]\n    return f(type_arg, self_arg)",
    "docstring": "Executes the super function in the context of a specified function. See for the exact details Args: f: Callable, typically the super builtin args: List[Any], the original call arguments caller_fn_scope: Optional[function_wrappers.FunctionScope], the function scope of the converted function in which this call was originally made Returns: The result of calling as if it was called in the frame indicated by .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\py_builtins.py",
    "ast_data": "FunctionDef name:super_in_original_context arg:f arg:args arg:caller_fn_scope arguments arg arg arg If Return return:yes Call Assign Call Assign Assign Assign Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "median",
    "source_code": "@deprecate_nonkeyword_arguments(version='4.0', allowed_args=['self'], name='median')\ndef median(self, axis: Axis | None=0, skipna: bool=True, numeric_only: bool=False, **kwargs) -> Any:\n    return NDFrame.median(self, axis=axis, skipna=skipna, numeric_only=numeric_only, **kwargs)",
    "docstring": "Return the median of the values over the requested axis. Parameters ---------- axis : {index (0)} Axis for the function to be applied on. For this parameter is unused and defaults to 0. For DataFrames, specifying `numeric_onlyTrue` to avoid getting an error. >>> df = pd.DataFrame({\"a\": [1, 2], \"b\": [\"T\", \"Z\"]}, index=[\"tiger\", \"zebra\"]) >>> df.median(numeric_only=True) a 1.5 dtype: float64",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:median arg:self arg:axis arg:skipna arg:numeric_only arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "add_custom_scalars_multilinechart",
    "source_code": "def add_custom_scalars_multilinechart(self, tags, category='default', title='untitled'):\n    torch._C._log_api_usage_once('tensorboard.logging.add_custom_scalars_multilinechart')\n    layout = {category: {title: ['Multiline', tags]}}\n    self._get_file_writer().add_summary(custom_scalars(layout))",
    "docstring": "Shorthand for creating multilinechart. Similar to `` Examples:: writer.add_custom_scalars_multilinechart(['twse/0050', 'twse/2330'])",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\tensorboard\\writer.py",
    "ast_data": "FunctionDef name:add_custom_scalars_multilinechart arg:self arg:tags arg:category arg:title arguments arg arg arg arg Call Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "temporal_padding",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef temporal_padding(x, padding=(1, 1)):\n    assert len(padding) == 2\n    pattern = [[0, 0], [padding[0], padding[1]], [0, 0]]\n    return array_ops.pad(x, pattern)",
    "docstring": "Pads the middle dimension of a 3D tensor. Args: x: Tensor or variable. padding: Tuple of 2 integers, how many zeros to add at the start and end of dim 1. Returns: A padded 3D tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:temporal_padding arg:x arg:padding arguments arg arg Compare Call Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "advance_subsystem",
    "source_code": "@classmethod\ndef advance_subsystem(cls, curr_backend: str, curr_subsystem: Subsystem) -> Optional[Subsystem]:\n    print(f'Disabling {curr_subsystem.name} did not fix the issue.')\n    current_subsystems = BACKENDS[curr_backend]\n    current_subsystem_index = next((i for i, subsystem in enumerate(current_subsystems) if subsystem.name == curr_subsystem.name))\n    if current_subsystem_index < len(current_subsystems) - 1:\n        next_subsystem = current_subsystems[current_subsystem_index + 1]\n        cls.update_bisect_status(curr_backend, next_subsystem.name)\n        cls.update_run_state(curr_backend, next_subsystem, 'test_disable')\n        print(f'Moving to the next subsystem: {curr_backend} - {next_subsystem.name}')\n        return next_subsystem\n    else:\n        print(f'All subsystems in {curr_backend} have been checked. The issue is not in this system.')\n        return None",
    "docstring": "Tries to move to the next subsystem within the current system.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\compiler_bisector.py",
    "ast_data": "FunctionDef name:advance_subsystem arg:cls arg:curr_backend arg:curr_subsystem arguments arg arg arg Call Assign Assign Call Call Compare If Compare Call Assign Call Call Call Return return:yes Call Return return:no"
  },
  {
    "library": "django",
    "name": "builtin_template_path",
    "source_code": "def builtin_template_path(name):\n    return Path(__file__).parent / 'templates' / name",
    "docstring": "Return a path to a builtin template. Avoid calling this function at the module level or in a class-definition because __file__ may not exist, e.g. in frozen environments.",
    "type": "function",
    "file_path": "django\\django\\views\\debug.py",
    "ast_data": "FunctionDef name:builtin_template_path arg:name arguments arg Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "url_is_from_spider",
    "source_code": "def url_is_from_spider(url: UrlT, spider: type[Spider]) -> bool:\n    return url_is_from_any_domain(url, [spider.name, *getattr(spider, 'allowed_domains', [])])",
    "docstring": "Return True if the url belongs to the given spider",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\url.py",
    "ast_data": "FunctionDef name:url_is_from_spider arg:url arg:spider arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_min_max",
    "source_code": "def _min_max(self, kind: Literal['min', 'max'], skipna: bool) -> Scalar:\n    valid_vals = self._valid_sp_values\n    has_nonnull_fill_vals = not self._null_fill_value and self.sp_index.ngaps > 0\n    if len(valid_vals) > 0:\n        sp_min_max = getattr(valid_vals, kind)()\n        if has_nonnull_fill_vals:\n            func = max if kind == 'max' else min\n            return func(sp_min_max, self.fill_value)\n        elif skipna:\n            return sp_min_max\n        elif self.sp_index.ngaps == 0:\n            return sp_min_max\n        else:\n            return na_value_for_dtype(self.dtype.subtype, compat=False)\n    elif has_nonnull_fill_vals:\n        return self.fill_value\n    else:\n        return na_value_for_dtype(self.dtype.subtype, compat=False)",
    "docstring": "Min/max of non-NA/null values Parameters ---------- kind : {\"min\", \"max\"} skipna : bool Returns ------- scalar",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\sparse\\array.py",
    "ast_data": "FunctionDef name:_min_max arg:self arg:kind arg:skipna arguments arg arg arg Assign Assign BoolOp Compare If Compare Call Assign Call Call If Assign Compare Return return:yes Call If Return return:yes If Compare Return return:yes Return return:yes Call If Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "all_to_all",
    "source_code": "def all_to_all(x, concat_dimension, split_dimension, split_count, group_assignment=None, name=None):\n    if group_assignment is None:\n        group_assignment = _create_default_group_assignment()\n    return gen_tpu_ops.all_to_all(x, group_assignment, concat_dimension=concat_dimension, split_dimension=split_dimension, split_count=split_count, name=name)",
    "docstring": "Exchange data across TPU replicas. Args: x: The local tensor. concat_dimension: The dimension number to concatenate. split_dimension: The dimension number to split. split_count: The number of splits, this number must equal to the sub-group size(group_assignment.get_shape()[1]) group_assignment: Optional 2d int32 lists with shape [num_groups, num_replicas_per_group]. represents the replica ids in the ith subgroup. name: Optional op name. Returns: A which is concatenated by data from different replicas.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\ops\\tpu_ops.py",
    "ast_data": "FunctionDef name:all_to_all arg:x arg:concat_dimension arg:split_dimension arg:split_count arg:group_assignment arg:name arguments arg arg arg arg arg arg If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_autocomplete_fields",
    "source_code": "def get_autocomplete_fields(self, request):\n    return self.autocomplete_fields",
    "docstring": "Return a list of ForeignKey and/or ManyToMany fields which should use an autocomplete widget.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:get_autocomplete_fields arg:self arg:request arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "bytes_to_readable_str",
    "source_code": "def bytes_to_readable_str(num_bytes, include_b=False):\n    if num_bytes is None:\n        return str(num_bytes)\n    if num_bytes < 1024:\n        result = '%d' % num_bytes\n    elif num_bytes < 1048576:\n        result = '%.2fk' % (num_bytes / 1024.0)\n    elif num_bytes < 1073741824:\n        result = '%.2fM' % (num_bytes / 1048576.0)\n    else:\n        result = '%.2fG' % (num_bytes / 1073741824.0)\n    if include_b:\n        result += 'B'\n    return result",
    "docstring": "Generate a human-readable string representing number of bytes. The units B, kB, MB and GB are used. Args: num_bytes: ( or None) Number of bytes. include_b: () Include the letter B at the end of the unit. Returns: () A string representing the number of bytes in a human-readable way, including a unit at the end.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\cli_shared.py",
    "ast_data": "FunctionDef name:bytes_to_readable_str arg:num_bytes arg:include_b arguments arg arg If Compare Return return:yes Call If Compare Assign If Compare Assign If Compare Assign Assign If Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "repeat",
    "source_code": "@tf_export('repeat')\n@dispatch.add_dispatch_support\ndef repeat(input, repeats, axis=None, name=None):\n    if axis is None:\n        input = reshape(input, [-1])\n        axis = 0\n    return repeat_with_axis(input, repeats, axis, name)",
    "docstring": "Repeat elements of . See also , , . Args: input: An -dimensional Tensor. repeats: An 1-D Tensor. The number of repetitions for each element. repeats is broadcasted to fit the shape of the given axis. must equal if axis is not None. axis: An int. The axis along which to repeat values. By default, (axis=None), use the flattened input array, and return a flat output array. name: A name for the operation. Returns: A Tensor which has the same shape as , except along the given axis. If axis is None then the output array is flattened to match the flattened input array. Example usage: >>> repeat(['a', 'b', 'c'], repeats=[3, 0, 2], axis=0) >>> repeat([[1, 2], [3, 4]], repeats=[2, 3], axis=0) >>> repeat([[1, 2], [3, 4]], repeats=[2, 3], axis=1) >>> repeat(3, repeats=4) >>> repeat([[1,2], [3,4]], repeats=2)",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:repeat arg:input arg:repeats arg:axis arg:name arguments arg arg arg arg If Compare Assign Call Assign Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "select_coords",
    "source_code": "def select_coords(self):\n    start, stop = (self.start, self.stop)\n    nrows = self.table.nrows\n    if start is None:\n        start = 0\n    elif start < 0:\n        start += nrows\n    if stop is None:\n        stop = nrows\n    elif stop < 0:\n        stop += nrows\n    if self.condition is not None:\n        return self.table.table.get_where_list(self.condition.format(), start=start, stop=stop, sort=True)\n    elif self.coordinates is not None:\n        return self.coordinates\n    return np.arange(start, stop)",
    "docstring": "generate the selection",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:select_coords arg:self arguments arg Assign Assign If Compare Assign If Compare If Compare Assign If Compare If Compare Return return:yes Call Call If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_reflected_binary_method",
    "source_code": "def _reflected_binary_method(ufunc, name):\n\n    def func(self, other):\n        if _disables_array_ufunc(other):\n            return NotImplemented\n        return ufunc(other, self)\n    func.__name__ = f'__r{name}__'\n    return func",
    "docstring": "Implement a reflected binary method with a ufunc, e.g., __radd__.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\mixins.py",
    "ast_data": "FunctionDef name:_reflected_binary_method arg:ufunc arg:name arguments arg arg FunctionDef name:func arg:self arg:other arguments arg arg If Call Return return:yes Return return:yes Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "save",
    "source_code": "@staticmethod\ndef save(kernel_src: str, future: CodeCacheFuture):\n    key = CompiledTritonKernels.key(kernel_src)\n    CompiledTritonKernels._cache[key] = future",
    "docstring": "Saves a compiled triton kernel to the cache. TODO: We store a LambdaFuture as that's the callable returned by async_compile.triton, but the real type we want to return here is actually an abstract triton kernel. TODO: Source code here is not just the kernel's source code, but also includes the inductor preamble, etc. so it could be less strict.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\async_compile.py",
    "ast_data": "FunctionDef name:save arg:kernel_src arg:future arguments arg arg Assign Call Assign"
  },
  {
    "library": "scikit-learn",
    "name": "_get_feature_importances",
    "source_code": "def _get_feature_importances(estimator, getter, transform_func=None, norm_order=1):\n    if isinstance(getter, str):\n        if getter == 'auto':\n            if hasattr(estimator, 'coef_'):\n                getter = attrgetter('coef_')\n            elif hasattr(estimator, 'feature_importances_'):\n                getter = attrgetter('feature_importances_')\n            else:\n                raise ValueError(f\"when `importance_getter=='auto'`, the underlying estimator {estimator.__class__.__name__} should have `coef_` or `feature_importances_` attribute. Either pass a fitted estimator to feature selector or call fit before calling transform.\")\n        else:\n            getter = attrgetter(getter)\n    elif not callable(getter):\n        raise ValueError('`importance_getter` has to be a string or `callable`')\n    importances = getter(estimator)\n    if transform_func is None:\n        return importances\n    elif transform_func == 'norm':\n        if importances.ndim == 1:\n            importances = np.abs(importances)\n        else:\n            importances = np.linalg.norm(importances, axis=0, ord=norm_order)\n    elif transform_func == 'square':\n        if importances.ndim == 1:\n            importances = safe_sqr(importances)\n        else:\n            importances = safe_sqr(importances).sum(axis=0)\n    else:\n        raise ValueError(\"Valid values for `transform_func` are None, 'norm' and 'square'. Those two transformation are only supported now\")\n    return importances",
    "docstring": "Retrieve and aggregate (ndim > 1) the feature importances from an estimator. Also optionally applies transformation. Parameters ---------- estimator : estimator A scikit-learn estimator from which we want to get the feature importances. getter : \"auto\", str or callable An attribute or a callable to get the feature importance. If , is expected to expose or . transform_func : {\"norm\", \"square\"}, default=None The transform to apply to the feature importances. By default () no transformation is applied. norm_order : int, default=1 The norm order to apply when . Only applied when . Returns ------- importances : ndarray of shape (n_features,) The features importances, optionally transformed.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\feature_selection\\_base.py",
    "ast_data": "FunctionDef name:_get_feature_importances arg:estimator arg:getter arg:transform_func arg:norm_order arguments arg arg arg arg If Call If Compare If Call Assign Call If Call Assign Call Raise Call Assign Call If Call Raise Call Assign Call If Compare Return return:yes If Compare If Compare Assign Call Assign Call If Compare If Compare Assign Call Assign Call Call Raise Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_validate_estimator",
    "source_code": "def _validate_estimator(self, default=None):\n    if self.estimator is not None:\n        self.estimator_ = self.estimator\n    else:\n        self.estimator_ = default",
    "docstring": "Check the base estimator. Sets the attributes.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_base.py",
    "ast_data": "FunctionDef name:_validate_estimator arg:self arg:default arguments arg arg If Compare Assign Assign"
  },
  {
    "library": "pandas",
    "name": "infer_freq",
    "source_code": "def infer_freq(index: DatetimeIndex | TimedeltaIndex | Series | DatetimeLikeArrayMixin) -> str | None:\n    from pandas.core.api import DatetimeIndex\n    if isinstance(index, ABCSeries):\n        values = index._values\n        if not (lib.is_np_dtype(values.dtype, 'mM') or isinstance(values.dtype, DatetimeTZDtype) or values.dtype == object):\n            raise TypeError(f'cannot infer freq from a non-convertible dtype on a Series of {index.dtype}')\n        index = values\n    inferer: _FrequencyInferer\n    if not hasattr(index, 'dtype'):\n        pass\n    elif isinstance(index.dtype, PeriodDtype):\n        raise TypeError('PeriodIndex given. Check the `freq` attribute instead of using infer_freq.')\n    elif lib.is_np_dtype(index.dtype, 'm'):\n        inferer = _TimedeltaFrequencyInferer(index)\n        return inferer.get_freq()\n    elif is_numeric_dtype(index.dtype):\n        raise TypeError(f'cannot infer freq from a non-convertible index of dtype {index.dtype}')\n    if not isinstance(index, DatetimeIndex):\n        index = DatetimeIndex(index)\n    inferer = _FrequencyInferer(index)\n    return inferer.get_freq()",
    "docstring": "Infer the most likely frequency given the input index. This method attempts to deduce the most probable frequency (e.g., 'D' for daily, 'H' for hourly) from a sequence of datetime-like objects. It is particularly useful when the frequency of a time series is not explicitly set or known but can be inferred from its values. Parameters ---------- index : DatetimeIndex, TimedeltaIndex, Series or array-like If passed a Series will use the values of the series (NOT THE INDEX). Returns ------- str or None None if no discernible frequency. Raises ------ TypeError If the index is not datetime-like. ValueError If there are fewer than three values. See Also -------- date_range : Return a fixed frequency DatetimeIndex. timedelta_range : Return a fixed frequency TimedeltaIndex with day as the default. period_range : Return a fixed frequency PeriodIndex. DatetimeIndex.freq : Return the frequency object if it is set, otherwise None. Examples -------- >>> idx = pd.date_range(start=\"2020/12/01\", end=\"2020/12/30\", periods=30) >>> pd.infer_freq(idx) 'D'",
    "type": "function",
    "file_path": "pandas\\pandas\\tseries\\frequencies.py",
    "ast_data": "FunctionDef name:infer_freq arg:index arguments arg If Call Assign If BoolOp Call Call Compare Raise Call Assign If Call If Call Raise Call If Call Assign Call Return return:yes Call If Call Raise Call If Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "handle_fk_field",
    "source_code": "def handle_fk_field(self, obj, field):\n    raise NotImplementedError('subclasses of Serializer must provide a handle_fk_field() method')",
    "docstring": "Called to handle a ForeignKey field.",
    "type": "method",
    "file_path": "django\\django\\core\\serializers\\base.py",
    "ast_data": "FunctionDef name:handle_fk_field arg:self arg:obj arg:field arguments arg arg arg Raise Call"
  },
  {
    "library": "numpy",
    "name": "_splitzipext",
    "source_code": "def _splitzipext(self, filename):\n    if self._iszip(filename):\n        return os.path.splitext(filename)\n    else:\n        return (filename, None)",
    "docstring": "Split zip extension from filename and return filename. Returns ------- base, zip_ext : {tuple}",
    "type": "method",
    "file_path": "numpy\\numpy\\lib\\_datasource.py",
    "ast_data": "FunctionDef name:_splitzipext arg:self arg:filename arguments arg arg If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "modification",
    "source_code": "def modification(self, subgraph_number: int, output_name: Optional[str], mask: Optional[str]=None, **fixed_inputs) -> str:\n    num = 0\n    out = None\n    scatters = []\n    while f'mod_{subgraph_number}_{num}' in self.subgraph_bodies:\n        num += 1\n    with self.create_subgraph_body(f'mod_{subgraph_number}_{num}'):\n        subgraph = self._get_subgraph(subgraph_number)\n        modification_handler = ModificationWrapper(self, subgraph_number, fixed_inputs, mask)\n        with V.set_ops_handler(modification_handler):\n            assert isinstance(subgraph, (ir.ComputedBuffer, list)), f'Expected the subgraph to be a ComputedBuffer or a List[ComputedBuffer], got {type(subgraph)}'\n            if isinstance(subgraph, list):\n                for scatter_graph in subgraph:\n                    scatters.append(self._handle_scatter_graph(scatter_graph))\n            elif isinstance(subgraph.data, ir.InputBuffer):\n                out = subgraph.data.make_loader()(())\n            else:\n                out = subgraph.data.inner_fn(())\n        self.codegen_body()\n        if output_name is not None:\n            assert isinstance(output_name, str)\n            assert out is not None\n            self.body.writeline(f'{output_name} = {out.value}')\n        else:\n            assert out is None\n            for scatter in scatters:\n                self.body.writeline(str(scatter))\n        body_val = self.body.getvalue()\n        self.cse.invalidate(OrderedSet())\n        return body_val",
    "docstring": "This creates a modification function for a subgraph. To use this inside a template, the first argument should specify which subgraph to codegen for Args: subgraph_number (int): The index of the subgraph in self.subgraphs output_name (Optional[str]): The name of the output variable to store the result in mask (Optional[str]): An optional mask to use for the store operation. If provided, this mask will be applied to the store.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\select_algorithm.py",
    "ast_data": "FunctionDef name:modification arg:self arg:subgraph_number arg:output_name arg:mask arguments arg arg arg arg arg Assign Assign Assign While Compare With Call Assign Call Assign Call With Call Call Call If Call For Call Call If Call Assign Call Call Assign Call Call If Compare Call Compare Call Compare For Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_schedule_param",
    "source_code": "@abc.abstractmethod\ndef get_schedule_param(self):\n    raise NotImplementedError",
    "docstring": "Abstract method that needs to be implemented by the child class. The expected return type should is a dictionary of name to schedule_param value The returned values will be updated in sparsifier when the scheduler step() function is called. Example: >>> def get_schedule_param(self): ... new_param = {} ... for name in self.sparsifier.data_groups.keys(): ... new_param[name] = self.sparsifier.data_groups[name][self.schedule_param] * 0.5 ... return new_param When the step() function is called, the value in self.sparsifier.data_groups[name][self.schedule_param] would be halved",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\data_scheduler\\base_data_scheduler.py",
    "ast_data": "FunctionDef name:get_schedule_param arg:self arguments arg Raise"
  },
  {
    "library": "matplotlib",
    "name": "_on_key_press",
    "source_code": "def _on_key_press(self, event):\n    pass",
    "docstring": "Key press event handler - for widget-specific key press actions.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:_on_key_press arg:self arg:event arguments arg arg"
  },
  {
    "library": "kornia",
    "name": "remap",
    "source_code": "def remap(image: Tensor, map_x: Tensor, map_y: Tensor, mode: str='bilinear', padding_mode: str='zeros', align_corners: Optional[bool]=None, normalized_coordinates: bool=False) -> Tensor:\n    KORNIA_CHECK_SHAPE(image, ['B', 'C', 'H', 'W'])\n    KORNIA_CHECK_SHAPE(map_x, ['B', 'H', 'W'])\n    KORNIA_CHECK_SHAPE(map_y, ['B', 'H', 'W'])\n    batch_size, _, height, width = image.shape\n    map_xy: Tensor = stack([map_x, map_y], -1)\n    if not normalized_coordinates:\n        map_xy = normalize_pixel_coordinates(map_xy, height, width)\n    map_xy = map_xy.expand(batch_size, -1, -1, -1)\n    return F.grid_sample(image, map_xy, mode=mode, padding_mode=padding_mode, align_corners=align_corners)",
    "docstring": "Apply a generic geometrical transformation to an image tensor. .. image:: _static/img/remap.png The function remap transforms the source tensor using the specified map: .. math:: \\text{dst}(x, y) = \\text{src}(map_x(x, y), map_y(x, y)) Args: image: the tensor to remap with shape (B, C, H, W). Where C is the number of channels. map_x: the flow in the x-direction in pixel coordinates. The tensor must be in the shape of (B, H, W). map_y: the flow in the y-direction in pixel coordinates. The tensor must be in the shape of (B, H, W). mode: interpolation mode to calculate output values `kornia.utils.create_meshgrid`.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\transform\\imgwarp.py",
    "ast_data": "FunctionDef name:remap arg:image arg:map_x arg:map_y arg:mode arg:padding_mode arg:align_corners arg:normalized_coordinates arguments arg arg arg arg arg arg arg Call Call Call Assign Call If Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "handle_leaf",
    "source_code": "def handle_leaf(node: DecisionTreeNode, indent: str, unsafe_leaves: list[int]) -> str:\n    if node.id in unsafe_leaves:\n        return f'{indent}return None'\n    class_probas = node.class_probs\n    return f'{indent}return {best_probas_and_indices(class_probas)}'",
    "docstring": "This generates the code for a leaf node in the decision tree. If the leaf is unsafe, the learned heuristic will return \"unsure\" (i.e. None).",
    "type": "method",
    "file_path": "pytorch\\torchgen\\_autoheuristic\\ah_tree.py",
    "ast_data": "FunctionDef name:handle_leaf arg:node arg:indent arg:unsafe_leaves arguments arg arg arg If Compare Return return:yes Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "squash_mask",
    "source_code": "def squash_mask(self, attach_sparsify_hook=True, **kwargs):\n    for name, configs in self.data_groups.items():\n        configs['hook'].remove()\n        configs.pop('hook')\n        self.data_groups[name]['hook_state'] = 'None'\n        if attach_sparsify_hook:\n            configs['hook'] = configs['layer'].register_forward_pre_hook(self._sparsify_hook(name))\n        configs['hook_state'] = 'sparsify'",
    "docstring": "Unregisters aggregate hook that was applied earlier and registers sparsification hooks if attach_sparsify_hook = True.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\activation_sparsifier\\activation_sparsifier.py",
    "ast_data": "FunctionDef name:squash_mask arg:self arg:attach_sparsify_hook arguments arg arg arg For Call Call Call Assign If Assign Call Call Assign"
  },
  {
    "library": "scipy",
    "name": "SR1",
    "source_code": "class SR1(FullHessianUpdateStrategy):\n\n    def __init__(self, min_denominator=1e-08, init_scale='auto'):\n        self.min_denominator = min_denominator\n        super().__init__(init_scale)\n\n    def _update_implementation(self, delta_x, delta_grad):\n        if self.approx_type == 'hess':\n            w = delta_x\n            z = delta_grad\n        else:\n            w = delta_grad\n            z = delta_x\n        Mw = self @ w\n        z_minus_Mw = z - Mw\n        denominator = np.dot(w, z_minus_Mw)\n        if np.abs(denominator) <= self.min_denominator * norm(w) * norm(z_minus_Mw):\n            return\n        if self.approx_type == 'hess':\n            self.B = self._syr(1 / denominator, z_minus_Mw, a=self.B)\n        else:\n            self.H = self._syr(1 / denominator, z_minus_Mw, a=self.H)",
    "docstring": "Symmetric-rank-1 Hessian update strategy. Parameters ---------- min_denominator : float This number, scaled by a normalization factor, defines the minimum denominator magnitude allowed in the update. When the condition is violated we skip the update. By default uses `` shaped, symmetric array is given, this array will be used. Otherwise an error is generated. Set it to 'auto' in order to use an automatic heuristic for choosing the initial scale. The heuristic is described in [1]_, p.143. The default is 'auto'. Notes ----- The update is based on the description in [1]_, p.144-146. References ---------- .. [1] Nocedal, Jorge, and Stephen J. Wright. \"Numerical optimization\" Second Edition (2006).",
    "type": "class",
    "file_path": "scipy\\scipy\\optimize\\_hessian_update_strategy.py",
    "ast_data": "ClassDef name:SR1 FunctionDef name:__init__ arg:self arg:min_denominator arg:init_scale arguments arg arg arg Assign Call Call FunctionDef name:_update_implementation arg:self arg:delta_x arg:delta_grad arguments arg arg arg If Compare Assign Assign Assign Assign Assign Assign Assign Call If Compare Call Call Call Return return:no If Compare Assign Call Assign Call"
  },
  {
    "library": "kornia",
    "name": "from_axis_angle",
    "source_code": "@classmethod\ndef from_axis_angle(cls, axis_angle: Tensor) -> 'Quaternion':\n    return cls(axis_angle_to_quaternion(axis_angle))",
    "docstring": "Create a quaternion from axis-angle representation. Args: axis_angle: rotation vector of shape :math:. Example: >>> axis_angle = torch.tensor([[1., 0., 0.]]) >>> q = Quaternion.from_axis_angle(axis_angle) >>> q.data Parameter containing: tensor([[0.8776, 0.4794, 0.0000, 0.0000]], requires_grad=True)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\quaternion.py",
    "ast_data": "FunctionDef name:from_axis_angle arg:cls arg:axis_angle arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "validation_policy",
    "source_code": "@property\ndef validation_policy(self):\n    return self._validation_policy",
    "docstring": "{None, \"skip_all\"}: Specifies the level of input validation to perform. Left unspecified, input validation is performed to ensure appropriate behavior in edge case (e.g. parameters out of domain, argument outside of distribution support, etc.) and improve consistency of output dtype, shape, etc. Use `` to avoid the computational overhead of these checks when rough edges are acceptable.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distribution_infrastructure.py",
    "ast_data": "FunctionDef name:validation_policy arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_write_source_string",
    "source_code": "def _write_source_string(self, module_name: str, src: str, is_package: bool=False):\n    extension = '/__init__.py' if is_package else '.py'\n    filename = module_name.replace('.', '/') + extension\n    self._write(filename, src)",
    "docstring": "Write `save_source_string`.",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\package_exporter.py",
    "ast_data": "FunctionDef name:_write_source_string arg:self arg:module_name arg:src arg:is_package arguments arg arg arg arg Assign Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "_call",
    "source_code": "def _call(self, x):\n    raise NotImplementedError",
    "docstring": "Abstract method to compute forward transformation.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributions\\transforms.py",
    "ast_data": "FunctionDef name:_call arg:self arg:x arguments arg arg Raise"
  },
  {
    "library": "cryptography",
    "name": "name",
    "source_code": "@property\n@abc.abstractmethod\ndef name(self) -> str:\n    pass",
    "docstring": "The name of the curve. e.g. secp256r1.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ec.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "_init_from_args",
    "source_code": "def _init_from_args(self, name, shared_name):\n    with ops.name_scope(name, 'CriticalSection', []) as name:\n        with ops.init_scope():\n            container = ops.get_default_graph()._container\n            if shared_name is None:\n                shared_name = name\n            if container is None:\n                container = ''\n            self._handle = gen_resource_variable_ops.mutex_v2(shared_name=shared_name, container=container, name=name)\n            self._signature = (container, shared_name or id(self._handle), _get_device_or_colocation(self._handle))\n    if not context.executing_eagerly():\n        ops.add_to_collections(CRITICAL_SECTIONS, self)",
    "docstring": "Initialize the CriticalSection from constructor arguments.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\critical_section_ops.py",
    "ast_data": "FunctionDef name:_init_from_args arg:self arg:name arg:shared_name arguments arg arg arg With Call With Call Assign Call If Compare Assign If Compare Assign Assign Call Assign BoolOp Call Call If Call Call"
  },
  {
    "library": "pytorch",
    "name": "tensor_default_op",
    "source_code": "@decorator(op)\ndef tensor_default_op(types, args=(), kwargs=None, pg=None):\n    if kwargs is None:\n        kwargs = {}\n    with torch._C.DisableTorchFunctionSubclass():\n        return op(*args, **kwargs)",
    "docstring": "Handles `` to avoid recursions.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\common_op_utils.py",
    "ast_data": "FunctionDef name:tensor_default_op arg:types arg:args arg:kwargs arg:pg arguments arg arg arg arg If Compare Assign With Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "dstn",
    "source_code": "def dstn(x, type=2, shape=None, axes=None, norm=None, overwrite_x=False):\n    shape = _good_shape(x, shape, axes)\n    return _pocketfft.dstn(x, type, shape, axes, norm, overwrite_x)",
    "docstring": "Return multidimensional Discrete Sine Transform along the specified axes. Parameters ---------- x : array_like The input array. type : {1, 2, 3, 4}, optional Type of the DST (see Notes). Default type is 2. shape : int or array_like of ints or None, optional The shape of the result. If both and (see below) are None, is `shapeaxesshape`shape[i] >> import numpy as np >>> from scipy.fftpack import dstn, idstn >>> rng = np.random.default_rng() >>> y = rng.standard_normal((16, 16)) >>> np.allclose(y, idstn(dstn(y, norm='ortho'), norm='ortho')) True",
    "type": "function",
    "file_path": "scipy\\scipy\\fftpack\\_realtransforms.py",
    "ast_data": "FunctionDef name:dstn arg:x arg:type arg:shape arg:axes arg:norm arg:overwrite_x arguments arg arg arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_tf_tensor_list_get_item",
    "source_code": "def _tf_tensor_list_get_item(target, i, opts):\n    if opts.element_dtype is None:\n        raise ValueError('cannot retrieve from a list without knowing its element type; use set_element_type to annotate it')\n    x = list_ops.tensor_list_get_item(target, i, element_dtype=opts.element_dtype)\n    return x",
    "docstring": "Overload of get_item that stages a Tensor list read.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\slices.py",
    "ast_data": "FunctionDef name:_tf_tensor_list_get_item arg:target arg:i arg:opts arguments arg arg arg If Compare Raise Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "NormalizeOperators",
    "source_code": "class NormalizeOperators(AnnotateTypesWithSchema):\n    binary_magic_method_remap: dict[Callable[[Any, Any], Any], Callable[[Any, Any], Any]] = {torch.add: operator.add, torch.mul: operator.mul, torch.sub: operator.sub, torch.div: operator.truediv, torch.floor_divide: operator.floordiv, torch.remainder: operator.mod, torch.eq: operator.eq, torch.ne: operator.ne, torch.lt: operator.lt, torch.le: operator.le, torch.gt: operator.gt, torch.ge: operator.ge}\n\n    def call_function(self, target: Target, args: tuple[Argument, ...], kwargs: dict[str, Any]):\n        assert callable(target)\n        if target in self.binary_magic_method_remap:\n            if len(args) != 2:\n                return super().call_function(target, args, kwargs)\n            lhs, rhs = args\n            return super().call_function(target=self.binary_magic_method_remap[target], args=(lhs, rhs), kwargs={})\n        return super().call_function(target, args, kwargs)",
    "docstring": "Normalize callsites that are different ways of \"spelling\" the same invocation into a single, canonical call. Currently supports: 1. Normalize operators (e.g. operator.add) to the ops they ultimately invoke (e.g. torch.add) when it is possible to statically reason that Example usage: m = torchvision.models.resnet18() traced = torch.fx.symbolic_trace(m) traced = NormalizeOperators(traced).transform()",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\experimental\\normalize.py",
    "ast_data": "ClassDef name:NormalizeOperators FunctionDef name:call_function arg:self arg:target arg:args arg:kwargs arguments arg arg arg arg Call If Compare If Compare Call Return return:yes Call Call Assign Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_transform",
    "source_code": "def get_transform(self):\n    return self._transform",
    "docstring": "Return the associated with this scale.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\scale.py",
    "ast_data": "FunctionDef name:get_transform arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "BinaryPredicate",
    "source_code": "class BinaryPredicate(UnaryPredicate):\n    argtypes = [GEOM_PTR, GEOM_PTR]",
    "docstring": "For GEOS binary predicate functions.",
    "type": "class",
    "file_path": "django\\django\\contrib\\gis\\geos\\prototypes\\predicates.py",
    "ast_data": "ClassDef name:BinaryPredicate Assign"
  },
  {
    "library": "matplotlib",
    "name": "isinteractive",
    "source_code": "def isinteractive() -> bool:\n    return matplotlib.is_interactive()",
    "docstring": "Return whether plots are updated after every plotting command. The interactive mode is mainly useful if you build plots from the command line and want to see the effect of each command while you are building the figure. In interactive mode: - newly created figures will be shown immediately; - figures will automatically redraw on change; - will not block by default. In non-interactive mode: - newly created figures and changes to figures will not be reflected until explicitly asked to be; - will block by default. See Also -------- ion : Enable interactive mode. ioff : Disable interactive mode. show : Show all figures (and maybe block). pause : Show all figures, and block for a time.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:isinteractive arguments Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_create_table_setup",
    "source_code": "def _create_table_setup(self):\n    column_names_and_types = self._get_column_names_and_types(self._sql_type_name)\n    escape = _get_valid_sqlite_name\n    create_tbl_stmts = [escape(cname) + ' ' + ctype for cname, ctype, _ in column_names_and_types]\n    if self.keys is not None and len(self.keys):\n        if not is_list_like(self.keys):\n            keys = [self.keys]\n        else:\n            keys = self.keys\n        cnames_br = ', '.join([escape(c) for c in keys])\n        create_tbl_stmts.append(f'CONSTRAINT {self.name}_pk PRIMARY KEY ({cnames_br})')\n    if self.schema:\n        schema_name = self.schema + '.'\n    else:\n        schema_name = ''\n    create_stmts = ['CREATE TABLE ' + schema_name + escape(self.name) + ' (\\n' + ',\\n  '.join(create_tbl_stmts) + '\\n)']\n    ix_cols = [cname for cname, _, is_index in column_names_and_types if is_index]\n    if ix_cols:\n        cnames = '_'.join(ix_cols)\n        cnames_br = ','.join([escape(c) for c in ix_cols])\n        create_stmts.append('CREATE INDEX ' + escape('ix_' + self.name + '_' + cnames) + 'ON ' + escape(self.name) + ' (' + cnames_br + ')')\n    return create_stmts",
    "docstring": "Return a list of SQL statements that creates a table reflecting the structure of a DataFrame. The first entry will be a CREATE TABLE statement while the rest will be CREATE INDEX statements.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\sql.py",
    "ast_data": "FunctionDef name:_create_table_setup arg:self arguments arg Assign Call Assign Assign Call If BoolOp Compare Call If Call Assign Assign Assign Call Call Call If Assign Assign Assign Call Call Assign If Assign Call Assign Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "on_epoch_end",
    "source_code": "def on_epoch_end(self, epoch, logs=None):\n    logs = self._process_logs(logs)\n    for callback in self.callbacks:\n        callback.on_epoch_end(epoch, logs)",
    "docstring": "Calls the methods of its callbacks. This function should only be called during TRAIN mode. Args: epoch: Integer, index of epoch. logs: Dict, metric results for this training epoch, and for the validation epoch if validation is performed. Validation result keys are prefixed with .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:on_epoch_end arg:self arg:epoch arg:logs arguments arg arg arg Assign Call For Call"
  },
  {
    "library": "django",
    "name": "wkb",
    "source_code": "@property\ndef wkb(self):\n    return wkb_w(3 if self.hasz else 2).write(self)",
    "docstring": "Return the WKB (Well-Known Binary) representation of this Geometry as a Python memoryview. SRID and Z values are not included, use the property instead.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:wkb arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, statistic_type, value=None) -> None:\n    self.statistic_type = statistic_type\n    self.value = value",
    "docstring": "Sets up the initial placeholders for the statistics.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:statistic_type arg:value arguments arg arg arg Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "read_graph_execution_traces_event",
    "source_code": "def read_graph_execution_traces_event(self, locator):\n    file_index, offset = locator\n    graph_execution_traces_path = self._graph_execution_traces_paths[file_index]\n    with self._reader_read_locks[graph_execution_traces_path]:\n        proto_string = self._get_reader(graph_execution_traces_path).read(offset)[0]\n    return debug_event_pb2.DebugEvent.FromString(proto_string)",
    "docstring": "Read DebugEvent at given offset from given .graph_execution_traces file. Args: locator: A (file_index, offset) tuple that locates the DebugEvent containing the graph execution trace. Returns: A DebugEventProto. Raises: if offset is at a wrong location. if offset is out of range of the file.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py",
    "ast_data": "FunctionDef name:read_graph_execution_traces_event arg:self arg:locator arguments arg arg Assign Assign With Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "all_sum",
    "source_code": "def all_sum(tensors):\n    return _apply_all_reduce('sum', tensors)",
    "docstring": "Returns a list of tensors with the all-reduce sum across . The computation is done with an all-reduce operation, so if only some of the returned tensors are evaluated then the computation will hang. Args: tensors: The input tensors across which to sum; must be assigned to GPU devices. Returns: List of tensors, each with the sum of the input tensors, where tensor i has the same device as .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nccl_ops.py",
    "ast_data": "FunctionDef name:all_sum arg:tensors arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "flatten_inference_rule",
    "source_code": "@register_inference_rule(torch.flatten)\ndef flatten_inference_rule(n: Node):\n    assert isinstance(n.args[0], Node)\n    start_dim = 1\n    end_dim = -1\n    if len(n.args) > 1:\n        assert isinstance(n.args[1], int)\n        start_dim = n.args[1]\n    if len(n.args) > 2:\n        assert isinstance(n.args[2], int)\n        end_dim = n.args[2]\n    if n.args[0].type == Dyn and isinstance(n.type, TensorType):\n        n.args[0].type = expand_to_tensor_dim(n.args[0].type, len(n.type.__args__))\n    if isinstance(n.args[0].type, TensorType):\n        output_type = flatten_check(n.args[0].type, start_dim, end_dim)\n        n.type = get_greatest_upper_bound(output_type, n.type)\n    return n.type",
    "docstring": "Applies the flatten shape information to the input then gets the greatest upper bound of the resulting type and the existing type",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\graph_gradual_typechecker.py",
    "ast_data": "FunctionDef name:flatten_inference_rule arg:n arguments arg Call Assign Assign If Compare Call Call Assign If Compare Call Call Assign If BoolOp Compare Call Assign Call Call If Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "bulk_batch_size",
    "source_code": "def bulk_batch_size(self, fields, objs):\n    fields = list(chain.from_iterable((field.fields if isinstance(field, models.CompositePrimaryKey) else [field] for field in fields)))\n    if len(fields) == 1:\n        return 500\n    elif len(fields) > 1:\n        return self.connection.features.max_query_params // len(fields)\n    else:\n        return len(objs)",
    "docstring": "SQLite has a variable limit defined by SQLITE_LIMIT_VARIABLE_NUMBER (reflected in max_query_params). If there's only a single field to insert, the limit is 500 (SQLITE_MAX_COMPOUND_SELECT).",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\sqlite3\\operations.py",
    "ast_data": "FunctionDef name:bulk_batch_size arg:self arg:fields arg:objs arguments arg arg arg Assign Call Call Call If Compare Call Return return:yes If Compare Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_validate_center_shape",
    "source_code": "def _validate_center_shape(self, X, centers):\n    if centers.shape[0] != self.n_clusters:\n        raise ValueError(f'The shape of the initial centers {centers.shape} does not match the number of clusters {self.n_clusters}.')\n    if centers.shape[1] != X.shape[1]:\n        raise ValueError(f'The shape of the initial centers {centers.shape} does not match the number of features of the data {X.shape[1]}.')",
    "docstring": "Check if centers is compatible with X and n_clusters.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_kmeans.py",
    "ast_data": "FunctionDef name:_validate_center_shape arg:self arg:X arg:centers arguments arg arg arg If Compare Raise Call If Compare Raise Call"
  },
  {
    "library": "pytorch",
    "name": "dcp_to_torch_save",
    "source_code": "def dcp_to_torch_save(dcp_checkpoint_dir: Union[str, os.PathLike], torch_save_path: Union[str, os.PathLike]):\n    sd: STATE_DICT_TYPE = {}\n    _load_state_dict(sd, storage_reader=FileSystemReader(dcp_checkpoint_dir), planner=_EmptyStateDictLoadPlanner(), no_dist=True)\n    torch.save(sd, torch_save_path)",
    "docstring": "Given a directory containing a DCP checkpoint, this function will convert it into a Torch save file. Args: dcp_checkpoint_dir: Directory containing the DCP checkpoint. torch_save_path: Filename to store the converted Torch save file. .. warning:: To avoid OOM, it's recommended to only run this function on a single rank.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\format_utils.py",
    "ast_data": "FunctionDef name:dcp_to_torch_save arg:dcp_checkpoint_dir arg:torch_save_path arguments arg arg Call Call Call Call"
  },
  {
    "library": "django",
    "name": "sql_flush",
    "source_code": "def sql_flush(style, connection, reset_sequences=True, allow_cascade=False):\n    tables = connection.introspection.django_table_names(only_existing=True, include_views=False)\n    return connection.ops.sql_flush(style, tables, reset_sequences=reset_sequences, allow_cascade=allow_cascade)",
    "docstring": "Return a list of the SQL statements used to flush the database.",
    "type": "function",
    "file_path": "django\\django\\core\\management\\sql.py",
    "ast_data": "FunctionDef name:sql_flush arg:style arg:connection arg:reset_sequences arg:allow_cascade arguments arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_concrete_function_garbage_collected",
    "source_code": "def _get_concrete_function_garbage_collected(self, *args, **kwargs):\n    with self._lock:\n        if self._variable_creation_config is None:\n            initializers = []\n            self._initialize(args, kwargs, add_initializers_to=initializers)\n            self._initialize_uninitialized_variables(initializers)\n    if self._created_variables:\n        return tracing_compilation.trace_function(args, kwargs, dataclasses.replace(self._no_variable_creation_config, bind_graph_to_function=True))\n    elif self._variable_creation_config is not None:\n        concrete = tracing_compilation.trace_function(args, kwargs, dataclasses.replace(self._variable_creation_config, bind_graph_to_function=True))\n        if self._created_variables:\n            raise ValueError('Creating variables on a non-first call to a function decorated with tf.function.')\n        return concrete",
    "docstring": "Returns a specialized to inputs and execution context. Unlike , the graph will be deleted when the returned function is deleted. It's useful to avoid creating a reference cycle when you know for sure that the graph will be no longer used without the returned function. Args: *args: inputs to specialize on. **kwargs: inputs to specialize on. Returns: A TensorFlow function which takes exactly one per argument. Raises: ValueError: if this object has not yet been called on concrete values.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\polymorphic_function.py",
    "ast_data": "FunctionDef name:_get_concrete_function_garbage_collected arg:self arguments arg arg arg With If Compare Assign Call Call If Return return:yes Call Call If Compare Assign Call Call If Raise Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_kern_dist",
    "source_code": "def get_kern_dist(self, c1, c2):\n    name1, name2 = (self.get_name_char(c1), self.get_name_char(c2))\n    return self.get_kern_dist_from_name(name1, name2)",
    "docstring": "Return the kerning pair distance (possibly 0) for chars *c1* and *c2*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_afm.py",
    "ast_data": "FunctionDef name:get_kern_dist arg:self arg:c1 arg:c2 arguments arg arg arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_weight_key_name",
    "source_code": "def get_weight_key_name(self):\n    raise NotImplementedError('not impl')",
    "docstring": "Return the key name for weights.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\feature_column.py",
    "ast_data": "FunctionDef name:get_weight_key_name arg:self arguments arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "tensor_name",
    "source_code": "@property\ndef tensor_name(self):\n    return _get_tensor_name(self.node_name, self.output_slot)",
    "docstring": "Name of the tensor watched by the debug op. Returns: () name, in the form of :",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:tensor_name arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "find_valid_index",
    "source_code": "def find_valid_index(how: str, is_valid: npt.NDArray[np.bool_]) -> int | None:\n    assert how in ['first', 'last']\n    if len(is_valid) == 0:\n        return None\n    if is_valid.ndim == 2:\n        is_valid = is_valid.any(axis=1)\n    if how == 'first':\n        idxpos = is_valid[:].argmax()\n    elif how == 'last':\n        idxpos = len(is_valid) - 1 - is_valid[::-1].argmax()\n    chk_notna = is_valid[idxpos]\n    if not chk_notna:\n        return None\n    return idxpos",
    "docstring": "Retrieves the positional index of the first valid value. Parameters ---------- how : {'first', 'last'} Use this parameter to change between the first or last valid index. is_valid: np.ndarray Mask to find na_values. Returns ------- int or None",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\missing.py",
    "ast_data": "FunctionDef name:find_valid_index arg:how arg:is_valid arguments arg arg Compare If Compare Call Return return:no If Compare Assign Call If Compare Assign Call If Compare Assign Call Call Assign If Return return:no Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "apply_random_seed",
    "source_code": "def apply_random_seed(datapipe: DataPipe, rng: torch.Generator) -> DataPipe:\n    graph = traverse_dps(datapipe)\n    all_pipes = get_all_graph_pipes(graph)\n    cache = set()\n    random_datapipes = []\n    for pipe in all_pipes:\n        if id(pipe) in cache:\n            continue\n        if _is_random_datapipe(pipe):\n            random_datapipes.append(pipe)\n            cache.add(id(pipe))\n    for pipe in random_datapipes:\n        random_seed = int(torch.empty((), dtype=torch.int64).random_(generator=rng).item())\n        pipe.set_seed(random_seed)\n    return datapipe",
    "docstring": "Traverse the graph of ``. Args: datapipe: DataPipe that needs to set randomness rng: Random number generator to generate random seeds",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\data\\graph_settings.py",
    "ast_data": "FunctionDef name:apply_random_seed arg:datapipe arg:rng arguments arg arg Assign Call Assign Call Assign Call Assign For If Compare Call If Call Call Call Call For Assign Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "prefer_static_value",
    "source_code": "def prefer_static_value(x):\n    static_x = tensor_util.constant_value(x)\n    if static_x is not None:\n        return static_x\n    return x",
    "docstring": "Return static value of tensor if available, else . Args: x: (already converted). Returns: Numpy array (if static value is obtainable), else .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\util.py",
    "ast_data": "FunctionDef name:prefer_static_value arg:x arguments arg Assign Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "scipy",
    "name": "reset",
    "source_code": "def reset(self, func_wrapper, rng_gen, x0=None):\n    if x0 is None:\n        self.current_location = rng_gen.uniform(self.lower, self.upper, size=len(self.lower))\n    else:\n        self.current_location = np.copy(x0)\n    init_error = True\n    reinit_counter = 0\n    while init_error:\n        self.current_energy = func_wrapper.fun(self.current_location)\n        if self.current_energy is None:\n            raise ValueError('Objective function is returning None')\n        if not np.isfinite(self.current_energy):\n            if reinit_counter >= EnergyState.MAX_REINIT_COUNT:\n                init_error = False\n                message = 'Stopping algorithm because function create NaN or (+/-) infinity values even with trying new random parameters'\n                raise ValueError(message)\n            self.current_location = rng_gen.uniform(self.lower, self.upper, size=self.lower.size)\n            reinit_counter += 1\n        else:\n            init_error = False\n        if self.ebest is None and self.xbest is None:\n            self.ebest = self.current_energy\n            self.xbest = np.copy(self.current_location)",
    "docstring": "Initialize current location is the search domain. If is not provided, a random location within the bounds is generated.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_dual_annealing.py",
    "ast_data": "FunctionDef name:reset arg:self arg:func_wrapper arg:rng_gen arg:x0 arguments arg arg arg arg If Compare Assign Call Call Assign Call Assign Assign While Assign Call If Compare Raise Call If Call If Compare Assign Assign Raise Call Assign Call Assign If BoolOp Compare Compare Assign Assign Call"
  },
  {
    "library": "numpy",
    "name": "array2string",
    "source_code": "@array_function_dispatch(_array2string_dispatcher, module='numpy')\ndef array2string(a, max_line_width=None, precision=None, suppress_small=None, separator=' ', prefix='', style=np._NoValue, formatter=None, threshold=None, edgeitems=None, sign=None, floatmode=None, suffix='', *, legacy=None):\n    overrides = _make_options_dict(precision, threshold, edgeitems, max_line_width, suppress_small, None, None, sign, formatter, floatmode, legacy)\n    options = format_options.get().copy()\n    options.update(overrides)\n    if options['legacy'] <= 113:\n        if style is np._NoValue:\n            style = repr\n        if a.shape == () and a.dtype.names is None:\n            return style(a.item())\n    elif style is not np._NoValue:\n        warnings.warn(\"'style' argument is deprecated and no longer functional except in 1.13 'legacy' mode\", DeprecationWarning, stacklevel=2)\n    if options['legacy'] > 113:\n        options['linewidth'] -= len(suffix)\n    if a.size == 0:\n        return '[]'\n    return _array2string(a, options, separator, prefix)",
    "docstring": "Return a string representation of an array. Parameters ---------- a : ndarray Input array. max_line_width : int, optional Inserts newlines if text is longer than . Defaults to `numpy.timedelta64numpy.datetime64numpy.voidnumpy.bytes_numpy.str_precisionprecisionprecisionprecisionprecisionFalseFalseformatterprecisionarray_reprarray_strarray2string` internally so keywords with the same name should work identically in all three functions. Examples -------- >>> import numpy as np >>> x = np.array([1e-16,1,2,3]) >>> np.array2string(x, precision=2, separator=',', ... suppress_small=True) '[0.,1.,2.,3.]' >>> x = np.arange(3.) >>> np.array2string(x, formatter={'float_kind':lambda x: \"%.2f\" % x}) '[0.00 1.00 2.00]' >>> x = np.arange(3) >>> np.array2string(x, formatter={'int':lambda x: hex(x)}) '[0x0 0x1 0x2]'",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\arrayprint.py",
    "ast_data": "FunctionDef name:array2string arg:a arg:max_line_width arg:precision arg:suppress_small arg:separator arg:prefix arg:style arg:formatter arg:threshold arg:edgeitems arg:sign arg:floatmode arg:suffix arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg Assign Call Assign Call Call Call If Compare If Compare Assign If BoolOp Compare Compare Return return:yes Call Call If Compare Call If Compare Call If Compare Return return:yes Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "quaternion_to_rotation_matrix",
    "source_code": "def quaternion_to_rotation_matrix(quaternion: Tensor) -> Tensor:\n    if not isinstance(quaternion, Tensor):\n        raise TypeError(f'Input type is not a Tensor. Got {type(quaternion)}')\n    if not quaternion.shape[-1] == 4:\n        raise ValueError(f'Input must be a tensor of shape (*, 4). Got {quaternion.shape}')\n    quaternion_norm: Tensor = normalize_quaternion(quaternion)\n    w = quaternion_norm[..., 0]\n    x = quaternion_norm[..., 1]\n    y = quaternion_norm[..., 2]\n    z = quaternion_norm[..., 3]\n    tx: Tensor = 2.0 * x\n    ty: Tensor = 2.0 * y\n    tz: Tensor = 2.0 * z\n    twx: Tensor = tx * w\n    twy: Tensor = ty * w\n    twz: Tensor = tz * w\n    txx: Tensor = tx * x\n    txy: Tensor = ty * x\n    txz: Tensor = tz * x\n    tyy: Tensor = ty * y\n    tyz: Tensor = tz * y\n    tzz: Tensor = tz * z\n    one: Tensor = tensor(1.0)\n    matrix_flat: Tensor = stack((one - (tyy + tzz), txy - twz, txz + twy, txy + twz, one - (txx + tzz), tyz - twx, txz - twy, tyz + twx, one - (txx + tyy)), dim=-1)\n    output_shape = [*list(quaternion.shape[:-1]), 3, 3]\n    matrix = matrix_flat.reshape(output_shape)\n    return matrix",
    "docstring": "Convert a quaternion to a rotation matrix. The quaternion should be in (w, x, y, z) format. Args: quaternion: a tensor containing a quaternion to be converted. The tensor can be of shape :math:. Return: the rotation matrix of shape :math:. Example: >>> quaternion = tensor((0., 0., 0., 1.)) >>> quaternion_to_rotation_matrix(quaternion) tensor([[-1., 0., 0.], [ 0., -1., 0.], [ 0., 0., 1.]])",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\conversions.py",
    "ast_data": "FunctionDef name:quaternion_to_rotation_matrix arg:quaternion arguments arg If Call Raise Call Call If Compare Raise Call Call Assign Assign Assign Assign Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_assign_add_flops",
    "source_code": "@ops.RegisterStatistics('AssignAdd', 'flops')\ndef _assign_add_flops(graph, node):\n    return _unary_op_flops(graph, node)",
    "docstring": "Compute flops for AssignAdd operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py",
    "ast_data": "FunctionDef name:_assign_add_flops arg:graph arg:node arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_param_to_param_id_from_optim_input",
    "source_code": "def _get_param_to_param_id_from_optim_input(model: nn.Module, optim_input: Optional[Union[list[dict[str, Any]], Iterable[nn.Parameter]]]=None) -> dict[nn.Parameter, int]:\n    param_id_to_param = _get_param_id_to_param_from_optim_input(model, optim_input)\n    return {param: param_id for param_id, param in param_id_to_param.items()}",
    "docstring": "Constructs the inverse mapping of :func:.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_optim_utils.py",
    "ast_data": "FunctionDef name:_get_param_to_param_id_from_optim_input arg:model arg:optim_input arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_get_abs_string_index",
    "source_code": "def _get_abs_string_index(self, idx):\n    idx = operator.index(idx)\n    if not -len(self) <= idx < len(self):\n        raise IndexError(f'index {idx} is out of range')\n    if idx < 0:\n        idx += len(self)\n    return str(idx)",
    "docstring": "Get the absolute index for the list of modules.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\container.py",
    "ast_data": "FunctionDef name:_get_abs_string_index arg:self arg:idx arguments arg arg Assign Call If Compare Call Call Raise Call If Compare Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "date_format",
    "source_code": "def date_format(value, format=None, use_l10n=None):\n    return dateformat.format(value, get_format(format or 'DATE_FORMAT', use_l10n=use_l10n))",
    "docstring": "Format a datetime.date or datetime.datetime object using a localizable format. If use_l10n is provided and is not None, that will force the value to be localized (or not), otherwise it's always localized.",
    "type": "function",
    "file_path": "django\\django\\utils\\formats.py",
    "ast_data": "FunctionDef name:date_format arg:value arg:format arg:use_l10n arguments arg arg arg Return return:yes Call Call BoolOp"
  },
  {
    "library": "pytorch",
    "name": "apply",
    "source_code": "def apply(self, *args):\n    backward_fn = self._forward_cls.backward\n    vjp_fn = self._forward_cls.vjp\n    if backward_fn is not Function.backward and vjp_fn is not Function.vjp:\n        raise RuntimeError(\"Implementing both 'backward' and 'vjp' for a custom Function is not allowed. You should only implement one of them.\")\n    user_fn = vjp_fn if vjp_fn is not Function.vjp else backward_fn\n    return user_fn(self, *args)",
    "docstring": "Apply method used when executing this Node during the backward",
    "type": "method",
    "file_path": "pytorch\\torch\\autograd\\function.py",
    "ast_data": "FunctionDef name:apply arg:self arguments arg arg Assign Assign If BoolOp Compare Compare Raise Call Assign Compare Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_validate_embedding_param",
    "source_code": "def _validate_embedding_param(args, kwargs):\n    input = args[0]\n    weight = args[1]\n    max_norm = kwargs.get('max_norm')\n    scale_grad_by_freq = kwargs.get('scale_grad_by_freq')\n    sparse = kwargs.get('sparse')\n    if not isinstance(input, torch.Tensor):\n        raise TypeError('input need to be torch.Tensor')\n    if not isinstance(weight, ShardedTensor):\n        raise TypeError('weight needs to be ShardedTensor')\n    weight_size = weight.size()\n    if len(weight_size) != 2:\n        raise ValueError('Weight needs to have exactly 2 dims')\n    if int(torch.min(input).item()) < 0:\n        raise ValueError('Index out of range in Input %d %d', int(torch.min(input).item()), weight_size[1])\n    if int(torch.max(input).item()) >= weight_size[0]:\n        raise ValueError('Index out of range in Input %d %d', int(torch.max(input).item()), weight_size[1])\n    if scale_grad_by_freq:\n        raise RuntimeError('nn.Embedding weight sharded with flag on \"scale_grad_by_freq\" not supported!')\n    if sparse:\n        raise RuntimeError('nn.Embedding weight sharded with flag on \"sparse\" not supported!')\n    if max_norm and max_norm <= 0.0:\n        raise ValueError('\"max_norm\" must be larger than zero!')\n    if not isinstance(weight._sharding_spec, ChunkShardingSpec):\n        raise ValueError('Only ChunkShardingSpec supported for ShardedTensor ops!')\n    if len(weight.local_shards()) != 1:\n        raise ValueError('Only one local shard supported!')",
    "docstring": "Validate input params of sharded embedding op. Args: input: list of ID used for lookup. weight: sharded weight tensor. kwargs: same as normal Embedding. Return: None.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharding_spec\\chunk_sharding_spec_ops\\embedding.py",
    "ast_data": "FunctionDef name:_validate_embedding_param arg:args arg:kwargs arguments arg arg Assign Assign Assign Call Assign Call Assign Call If Call Raise Call If Call Raise Call Assign Call If Compare Call Raise Call If Compare Call Call Call Raise Call Call Call Call If Compare Call Call Call Raise Call Call Call Call If Raise Call If Raise Call If BoolOp Compare Raise Call If Call Raise Call If Compare Call Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "get_coordinate",
    "source_code": "def get_coordinate(self) -> Optional[list[int]]:\n    return self._coordinate_on_dim if self._coordinate_on_dim else None",
    "docstring": "Return the relative indices of this rank relative to all dimensions of the mesh. If this rank is not part of the mesh, return None.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\device_mesh.py",
    "ast_data": "FunctionDef name:get_coordinate arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_extract_arch_version",
    "source_code": "def _extract_arch_version(arch_string: str):\n    base = arch_string.split('_')[1]\n    base = base.removesuffix('a')\n    return int(base)",
    "docstring": "Extracts the architecture string from a CUDA version",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:_extract_arch_version arg:arch_string arguments arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "batch_reduce",
    "source_code": "def batch_reduce(self, reduce_op, value_destination_pairs, options=None):\n    if options is None:\n        options = collective_util.Options()\n    if not _validate_value_destination_pairs(value_destination_pairs):\n        value_destination_pairs = _normalize_value_destination_pairs(value_destination_pairs)\n    for _, d in value_destination_pairs:\n        validate_destinations(d)\n    if self._num_between_graph_workers == 1 and _all_devices_match(value_destination_pairs, self._canonicalize_devices) and (len(value_destination_pairs[0][0].values) == 1):\n        return [distribute_utils.regroup(v.values, wrap_class=value_lib.Mirrored) for v, _ in value_destination_pairs]\n    if options is None:\n        options = collective_util.Options()\n    return self.batch_reduce_implementation(reduce_op, value_destination_pairs, options)",
    "docstring": "Reduce values to destinations in batches. See . This can only be called in the cross-replica context. Args: reduce_op: a specifying how values should be combined. value_destination_pairs: a sequence of (value, destinations) pairs. See for descriptions. options: a . See for details. Returns: A list of or , one per pair in . Raises: ValueError: if is not an iterable of tuples of and destinations.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_ops.py",
    "ast_data": "FunctionDef name:batch_reduce arg:self arg:reduce_op arg:value_destination_pairs arg:options arguments arg arg arg arg If Compare Assign Call If Call Assign Call For Call If BoolOp Compare Call Compare Call Return return:yes Call If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "CachingTool",
    "source_code": "class CachingTool(Tool):\n\n    def _wrapper(self, **kwargs):\n        request = cherrypy.serving.request\n        if _caching.get(**kwargs):\n            request.handler = None\n        elif request.cacheable:\n            request.hooks.attach('before_finalize', _caching.tee_output, priority=100)\n    _wrapper.priority = 90\n\n    def _setup(self):\n        conf = self._merged_args()\n        p = conf.pop('priority', None)\n        cherrypy.serving.request.hooks.attach('before_handler', self._wrapper, priority=p, **conf)",
    "docstring": "Caching Tool for CherryPy.",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\_cptools.py",
    "ast_data": "ClassDef name:CachingTool FunctionDef name:_wrapper arg:self arguments arg arg Assign If Call Assign If Call Assign FunctionDef name:_setup arg:self arguments arg Assign Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "pg_group_ranks",
    "source_code": "@property\ndef pg_group_ranks(self) -> dict[ProcessGroup, dict[int, int]]:\n    global _pg_group_ranks\n    return _pg_group_ranks",
    "docstring": "Process group's global rank to local rank mapping. TODO don't expose the map, expose fine grained ops",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:pg_group_ranks arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_generate_extern_kernel_common",
    "source_code": "def _generate_extern_kernel_common(self, kernel: ir.ExternKernel, out_ir_node: ir.IRNode) -> None:\n    tensor_nodes = tuple((self._generate_buffer(arg) for arg in kernel.inputs))\n    args = tensor_nodes + tuple(kernel.constant_args)\n    kwargs = kernel.kwargs.copy()\n    result_buffer: Optional[str] = None\n    if isinstance(kernel, ir.ExternKernelOut):\n        kwargs['out'] = self.buffer_to_node[out_ir_node.codegen_reference()]\n    elif isinstance(kernel.layout, (ir.Layout, ir.MultiOutputLayout)):\n        result_buffer = kernel.get_name()\n    elif isinstance(kernel.layout, ir.NoneLayout):\n        pass\n    else:\n        raise NotImplementedError(f'Unrecognized output layout: {kernel.layout}')\n    kernel_name = kernel.get_kernel_name()\n    module_name, kernel_name = kernel_name.split('.', 1)\n    op = globals()[module_name]\n    for subname in kernel_name.split('.'):\n        op = getattr(op, subname)\n    fx_node = self.gm.graph.call_function(op, args=args, kwargs=kwargs)\n    if result_buffer:\n        assert 'out' not in kwargs, f\"Extern kernel '{kernel}' has both result and out kwarg. Expected only one.\"\n        fx_node.name = result_buffer\n        self.buffer_to_node[result_buffer] = fx_node\n        arg_tensors = [arg.meta['val'] if isinstance(arg, torch.fx.Node) else arg for arg in args]\n        fx_node.meta['val'] = op(*arg_tensors, **kwargs)",
    "docstring": "Generates FX IR from either ExternKernelAlloc or ExternKernelOut.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\wrapper_fxir.py",
    "ast_data": "FunctionDef name:_generate_extern_kernel_common arg:self arg:kernel arg:out_ir_node arguments arg arg arg Assign Call Call Assign Call Assign Call If Call Assign Call If Call Assign Call If Call Raise Call Assign Call Assign Call Assign Call For Call Assign Call Assign Call If Compare Assign Assign Assign Call Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "get_width_height",
    "source_code": "def get_width_height(self, *, physical=False):\n    return tuple((int(size / (1 if physical else self.device_pixel_ratio)) for size in self.figure.bbox.max))",
    "docstring": "Return the figure width and height in integral points or pixels. When the figure is used on High DPI screens (and the backend supports it), the truncation to integers occurs after scaling by the device pixel ratio. Parameters ---------- physical : bool, default: False Whether to return true physical pixels or logical pixels. Physical pixels may be used by backends that support HiDPI, but still configure the canvas using its actual size. Returns ------- width, height : int The size of the figure, in points or pixels, depending on the backend.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:get_width_height arg:self arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "__init__",
    "source_code": "def __init__(self, source, target):\n    if not isinstance(source, SpatialReference) or not isinstance(target, SpatialReference):\n        raise TypeError('source and target must be of type SpatialReference')\n    self.ptr = capi.new_ct(source._ptr, target._ptr)\n    self._srs1_name = source.name\n    self._srs2_name = target.name",
    "docstring": "Initialize on a source and target SpatialReference objects.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\srs.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:source arg:target arguments arg arg arg If BoolOp Call Call Raise Call Assign Call Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "apply_weights",
    "source_code": "def apply_weights(self, state_dict: dict[str, torch.Tensor]) -> None:\n    from torch.onnx._internal.exporter import _core\n    for name, tensor in state_dict.items():\n        if name in self.model.graph.initializers:\n            self.model.graph.initializers[name].const_value = _core.TorchTensor(tensor, name)\n        else:\n            warnings.warn(f\"Weight '{name}' not found in the model. Skipped applying.\", category=torch.onnx.errors.OnnxExporterWarning, stacklevel=1)",
    "docstring": "Apply the weights from the specified state dict to the ONNX model. Use this method to replace FakeTensors or other weights. Args: state_dict: The state dict containing the weights to apply to the ONNX model.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_onnx_program.py",
    "ast_data": "FunctionDef name:apply_weights arg:self arg:state_dict arguments arg arg For Call If Compare Assign Call Call"
  },
  {
    "library": "scipy",
    "name": "reset",
    "source_code": "def reset(self) -> 'PoissonDisk':\n    super().reset()\n    self._initialize_grid_pool()\n    return self",
    "docstring": "Reset the engine to base state. Returns ------- engine : PoissonDisk Engine reset to its base state.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_qmc.py",
    "ast_data": "FunctionDef name:reset arg:self arguments arg Call Call Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "add_row",
    "source_code": "def add_row(self) -> None:\n    self.current_line += 1\n    self.current_col = 0",
    "docstring": "Add a row to the table, to use with ``.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\writers\\text.py",
    "ast_data": "FunctionDef name:add_row arg:self arguments arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "children",
    "source_code": "def children(self, node_id):\n    return {child.local_name: child.node_id for child in self._object_graph_proto.nodes[node_id].children}",
    "docstring": "Returns all child trackables attached to obj. Args: node_id: Id of the node to return its children. Returns: Dictionary of all children attached to the object with name to node_id.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint_view.py",
    "ast_data": "FunctionDef name:children arg:self arg:node_id arguments arg arg Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "SphinxDanglingReferences",
    "source_code": "class SphinxDanglingReferences(DanglingReferences):\n\n    def apply(self, **kwargs: Any) -> None:\n        try:\n            reporter = self.document.reporter\n            report_level = reporter.report_level\n            reporter.report_level = max(reporter.WARNING_LEVEL, reporter.report_level)\n            super().apply()\n        finally:\n            reporter.report_level = report_level",
    "docstring": "DanglingReferences transform which does not output info messages.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\transforms\\references.py",
    "ast_data": "ClassDef name:SphinxDanglingReferences FunctionDef name:apply arg:self arguments arg arg Try Assign Assign Assign Call Call Call Assign"
  },
  {
    "library": "scipy",
    "name": "_validate_workers",
    "source_code": "def _validate_workers(workers: IntNumber=1) -> IntNumber:\n    workers = int(workers)\n    if workers == -1:\n        workers = os.cpu_count()\n        if workers is None:\n            raise NotImplementedError('Cannot determine the number of cpus using os.cpu_count(), cannot use -1 for the number of workers')\n    elif workers <= 0:\n        raise ValueError(f'Invalid number of workers: {workers}, must be -1 or > 0')\n    return workers",
    "docstring": "Validate based on platform and value. Parameters ---------- workers : int, optional Number of workers to use for parallel processing. If -1 is given all CPU threads are used. Default is 1. Returns ------- Workers : int Number of CPU used by the algorithm",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_qmc.py",
    "ast_data": "FunctionDef name:_validate_workers arg:workers arguments arg Assign Call If Compare Assign Call If Compare Raise Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_default_filetype",
    "source_code": "@classmethod\ndef get_default_filetype(cls):\n    return rcParams['savefig.format']",
    "docstring": "Return the default savefig file format as specified in :rc:. The returned string does not include a period. This method is overridden in backends that only support a single file type.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:get_default_filetype arg:cls arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "mark_inconsistent_side_effects",
    "source_code": "def mark_inconsistent_side_effects(self):\n    self.inconsistent_side_effects = True",
    "docstring": "InstructionTranslator has encountered instructions which may cause dynamo to see a different version of history from eager See:",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\symbolic_convert.py",
    "ast_data": "FunctionDef name:mark_inconsistent_side_effects arg:self arguments arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "_inspect_tensor",
    "source_code": "def _inspect_tensor(tensor):\n    if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_NAN_INF:\n        return cond.cond(math_ops.greater(tensor, 0.0), lambda: 'has NaNs/Infs!', lambda: 'has no NaNs or Infs.')\n    else:\n        return tensor",
    "docstring": "Returns the text to be printed for inspection output.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:_inspect_tensor arg:tensor arguments arg If Compare Return return:yes Call Call arguments arguments Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_always_wrap",
    "source_code": "def _always_wrap(strategy):\n    return strategy.extended._in_multi_worker_mode() or len(strategy.extended.worker_devices) > 1",
    "docstring": "Returns whether to always wrap the values in a DistributedValues.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py",
    "ast_data": "FunctionDef name:_always_wrap arg:strategy arguments arg Return return:yes BoolOp Call Compare Call"
  },
  {
    "library": "pytorch",
    "name": "_ConvNd",
    "source_code": "class _ConvNd(torch.nn.modules.conv._ConvNd, ReferenceQuantizedModule):\n    __annotations__ = {'bias': Optional[torch.Tensor]}\n    _IS_REFERENCE = True\n\n    @staticmethod\n    def from_float(cls, float_conv, weight_qparams):\n        qref_conv = cls(float_conv.in_channels, float_conv.out_channels, float_conv.kernel_size, float_conv.stride, float_conv.padding, float_conv.dilation, float_conv.groups, float_conv.bias is not None, float_conv.padding_mode, device=float_conv.weight.device, dtype=float_conv.weight.dtype, weight_qparams=weight_qparams)\n        qref_conv.weight = torch.nn.Parameter(float_conv.weight.detach())\n        if float_conv.bias is not None:\n            qref_conv.bias = torch.nn.Parameter(float_conv.bias.detach())\n        return qref_conv",
    "docstring": "A reference version of nn.quantized.Conv2d we will not pack the parameters in this module, since weight packing is an optimization for quantized backends supported in PyTorch (fbgemm/qnnpack), this is useful when user want to use this module in other backends like Glow.",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\reference\\modules\\conv.py",
    "ast_data": "ClassDef name:_ConvNd Assign Assign FunctionDef name:from_float arg:cls arg:float_conv arg:weight_qparams arguments arg arg arg Assign Call Compare Assign Call Call If Compare Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "print_foo",
    "source_code": "def print_foo(self, filename, **kwargs):\n    self.draw()",
    "docstring": "Write out format foo. This method is normally called via and , which take care of setting the figure facecolor, edgecolor, and dpi to the desired output values, and will restore them to the original values. Therefore, does not need to handle these settings.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_template.py",
    "ast_data": "FunctionDef name:print_foo arg:self arg:filename arguments arg arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "get_extra_state",
    "source_code": "def get_extra_state(self) -> Any:\n    raise RuntimeError('Reached a code path in Module.get_extra_state() that should never be called. Please file an issue at https://github.com/pytorch/pytorch/issues/new?template=bug-report.yml to report this bug.')",
    "docstring": "Return any extra state to include in the module's state_dict. Implement this and a corresponding :func: for your module if you need to store extra state. This function is called when building the module's . Note that extra state should be picklable to ensure working serialization of the state_dict. We only provide backwards compatibility guarantees for serializing Tensors; other objects may break backwards compatibility if their serialized pickled form changes. Returns: object: Any extra state to store in the module's state_dict",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:get_extra_state arg:self arguments arg Raise Call"
  },
  {
    "library": "pandas",
    "name": "view",
    "source_code": "def view(self, dtype: Dtype | None=None) -> ArrayLike:\n    if dtype is not None:\n        raise NotImplementedError(dtype)\n    return self[:]",
    "docstring": "Return a view on the array. Parameters ---------- dtype : str, np.dtype, or ExtensionDtype, optional Default None. Returns ------- ExtensionArray or np.ndarray A view on the :class:'s data. See Also -------- api.extensions.ExtensionArray.ravel: Return a flattened view on input array. Index.view: Equivalent function for Index. ndarray.view: New view of array with the same data. Examples -------- This gives view on the underlying data of an `` will be reflected on the underlying data: >>> arr = pd.array([1, 2, 3]) >>> arr2 = arr.view() >>> arr[0] = 2 >>> arr2 [2, 2, 3] Length: 3, dtype: Int64",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\base.py",
    "ast_data": "FunctionDef name:view arg:self arg:dtype arguments arg arg If Compare Raise Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_load_coverage",
    "source_code": "def _load_coverage(F, header_length=6, dtype=np.int16):\n    header = [F.readline() for _ in range(header_length)]\n    make_tuple = lambda t: (t.split()[0], float(t.split()[1]))\n    header = dict([make_tuple(line) for line in header])\n    M = np.loadtxt(F, dtype=dtype)\n    nodata = int(header[b'NODATA_value'])\n    if nodata != -9999:\n        M[nodata] = -9999\n    return M",
    "docstring": "Load a coverage file from an open file object. This will return a numpy array of the given dtype",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\datasets\\_species_distributions.py",
    "ast_data": "FunctionDef name:_load_coverage arg:F arg:header_length arg:dtype arguments arg arg arg Assign Call Call Assign arguments arg Call Call Call Assign Call Call Assign Call Assign Call If Compare Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "add_figure",
    "source_code": "def add_figure(self, tag: str, figure: Union['Figure', list['Figure']], global_step: Optional[int]=None, close: bool=True, walltime: Optional[float]=None) -> None:\n    torch._C._log_api_usage_once('tensorboard.logging.add_figure')\n    if isinstance(figure, list):\n        self.add_image(tag, figure_to_image(figure, close), global_step, walltime, dataformats='NCHW')\n    else:\n        self.add_image(tag, figure_to_image(figure, close), global_step, walltime, dataformats='CHW')",
    "docstring": "Render matplotlib figure into an image and add it to summary. Note that this requires the `` package. Args: tag: Data identifier figure: Figure or a list of figures global_step: Global step value to record close: Flag to automatically close the figure walltime: Optional override default walltime (time.time()) seconds after epoch of event",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\tensorboard\\writer.py",
    "ast_data": "FunctionDef name:add_figure arg:self arg:tag arg:figure arg:global_step arg:close arg:walltime arguments arg arg arg arg arg arg Call If Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "dispatch",
    "source_code": "@staticmethod\ndef dispatch(sm: 'SamplingMethod') -> SamplingType:\n    if sm == SamplingMethod.RANDOM:\n        return partial(SamplingMethod._generate_value_for_type, True)\n    elif sm == SamplingMethod.TOGGLE:\n        return partial(SamplingMethod._generate_value_for_type, False)\n    else:\n        raise ValueError(f'malformed sampling method: {sm}')",
    "docstring": "Returns a function that will generate values from a type, based on the SamplingMethod passed in.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\fuzzer.py",
    "ast_data": "FunctionDef name:dispatch arg:sm arguments arg If Compare Return return:yes Call If Compare Return return:yes Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_check_unpickable_fn",
    "source_code": "def _check_unpickable_fn(fn: Callable):\n    if not callable(fn):\n        raise TypeError(f'A callable function is expected, but {type(fn)} is provided.')\n    if isinstance(fn, functools.partial):\n        fn = fn.func\n    if _is_local_fn(fn) and (not dill_available()):\n        warnings.warn('Local function is not supported by pickle, please use regular python function or functools.partial instead.')\n        return\n    if hasattr(fn, '__name__') and fn.__name__ == '<lambda>' and (not dill_available()):\n        warnings.warn('Lambda function is not supported by pickle, please use regular python function or functools.partial instead.')\n        return",
    "docstring": "Check function is pickable or not. If it is a lambda or local function, a UserWarning will be raised. If it's not a callable function, a TypeError will be raised.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\data\\datapipes\\utils\\common.py",
    "ast_data": "FunctionDef name:_check_unpickable_fn arg:fn arguments arg If Call Raise Call Call If Call Assign If BoolOp Call Call Call Return return:no If BoolOp Call Compare Call Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "to_proto",
    "source_code": "def to_proto(self) -> saver_pb2.SaverDef:\n    filename_tensor = array_ops.placeholder(shape=[], dtype=dtypes.string, name='saver_filename')\n    save_tensor = self._traced_save(filename_tensor)\n    restore_op = self._traced_restore(filename_tensor).op\n    return saver_pb2.SaverDef(filename_tensor_name=filename_tensor.name, save_tensor_name=save_tensor.name, restore_op_name=restore_op.name, version=saver_pb2.SaverDef.V2)",
    "docstring": "Serializes to a SaverDef referencing the current graph.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\functional_saver.py",
    "ast_data": "FunctionDef name:to_proto arg:self arguments arg Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "differentiable_polynomial_floor",
    "source_code": "def differentiable_polynomial_floor(input: Tensor) -> Tensor:\n    input_floor = input.floor()\n    output: Tensor = input_floor + (input - 0.5 - input_floor) ** 3\n    return output",
    "docstring": "Perform floor via a differentiable operation. Args: input (Tensor): Input tensor of any shape to be floored. Returns: output (Tensor): Pseudo rounded tensor of the same shape as input tensor.",
    "type": "function",
    "file_path": "kornia\\kornia\\utils\\misc.py",
    "ast_data": "FunctionDef name:differentiable_polynomial_floor arg:input arguments arg Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "parse_example_spec",
    "source_code": "@property\ndef parse_example_spec(self):\n    return {self.key: parsing_ops.VarLenFeature(self.dtype)}",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:parse_example_spec arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "spaceless",
    "source_code": "@register.tag\ndef spaceless(parser, token):\n    nodelist = parser.parse(('endspaceless',))\n    parser.delete_first_token()\n    return SpacelessNode(nodelist)",
    "docstring": "Remove whitespace between HTML tags, including tab and newline characters. Example usage:: {% spaceless %} Foo {% endspaceless %} This example returns this HTML:: Foo Only space between *tags* is normalized -- not space between tags and text. In this example, the space around `` isn't stripped:: {% spaceless %} Hello {% endspaceless %}",
    "type": "function",
    "file_path": "django\\django\\template\\defaulttags.py",
    "ast_data": "FunctionDef name:spaceless arg:parser arg:token arguments arg arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__call__",
    "source_code": "def __call__(self, shape, dtype, axis=0):\n    raise NotImplementedError",
    "docstring": "Partitions the given and returns the partition results. Examples of a partitioner that allocates a fixed number of shards: Args: shape: a , the shape to partition. dtype: a indicating the type of the partition value. axis: The axis to partition along. Default: outermost axis. Returns: A list of integers representing the number of partitions on each axis, where i-th value corresponds to i-th axis.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\sharded_variable.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:shape arg:dtype arg:axis arguments arg arg arg arg Raise"
  },
  {
    "library": "numpy",
    "name": "zfill",
    "source_code": "def zfill(self, width):\n    return asarray(zfill(self, width))",
    "docstring": "Return the numeric string left-filled with zeros in a string of length . See Also -------- char.zfill",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:zfill arg:self arg:width arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "min_y",
    "source_code": "@property\ndef min_y(self):\n    return self._envelope.MinY",
    "docstring": "Return the value of the minimum Y coordinate.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\envelope.py",
    "ast_data": "FunctionDef name:min_y arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "_add_fallback",
    "source_code": "def _add_fallback(self, localedirs=None):\n    if self.__language == settings.LANGUAGE_CODE or self.__language.startswith('en'):\n        return\n    if self.domain == 'django':\n        default_translation = translation(settings.LANGUAGE_CODE)\n    else:\n        default_translation = DjangoTranslation(settings.LANGUAGE_CODE, domain=self.domain, localedirs=localedirs)\n    self.add_fallback(default_translation)",
    "docstring": "Set the GNUTranslations() fallback with the default language.",
    "type": "method",
    "file_path": "django\\django\\utils\\translation\\trans_real.py",
    "ast_data": "FunctionDef name:_add_fallback arg:self arg:localedirs arguments arg arg If BoolOp Compare Call Return return:no If Compare Assign Call Assign Call Call"
  },
  {
    "library": "django",
    "name": "migrations_module",
    "source_code": "@classmethod\ndef migrations_module(cls, app_label):\n    if app_label in settings.MIGRATION_MODULES:\n        return (settings.MIGRATION_MODULES[app_label], True)\n    else:\n        app_package_name = apps.get_app_config(app_label).name\n        return ('%s.%s' % (app_package_name, MIGRATIONS_MODULE_NAME), False)",
    "docstring": "Return the path to the migrations module for the specified app_label and a boolean indicating if the module is specified in settings.MIGRATION_MODULE.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\loader.py",
    "ast_data": "FunctionDef name:migrations_module arg:cls arg:app_label arguments arg arg If Compare Return return:yes Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "_load_ops",
    "source_code": "def _load_ops(self, *args: Union[onnx.ModelProto, str], cache_dir: Optional[str]=None) -> list[onnx.ModelProto]:\n    op_list = []\n    for arg in args:\n        op_list.append(self._load_op(arg, cache_dir=cache_dir))\n    return op_list",
    "docstring": "Load multiple ONNX models or operators and returns them as a list. Args: *args: A variable number of ONNX models (either ONNX ModelProto objects or file paths). For Hugging Face-hosted models, use the format 'hf://model_name'. Valid can be found on Or a URL to the ONNX model. cache_dir: Where to read operations from if stored on disk. Returns: list[onnx.ModelProto]: The loaded ONNX models as a list of ONNX graphs.",
    "type": "method",
    "file_path": "kornia\\kornia\\core\\mixin\\onnx.py",
    "ast_data": "FunctionDef name:_load_ops arg:self arguments arg arg arg Assign For Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_cm_set",
    "source_code": "@contextlib.contextmanager\ndef _cm_set(self, **kwargs):\n    orig_vals = {k: getattr(self, f'get_{k}')() for k in kwargs}\n    try:\n        self.set(**kwargs)\n        yield\n    finally:\n        self.set(**orig_vals)",
    "docstring": "context-manager that restores original values at exit.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:_cm_set arg:self arguments arg arg Assign Call Call Try Call Call"
  },
  {
    "library": "pytorch",
    "name": "size",
    "source_code": "def size(self, dim: Optional[int]=None) -> Union[int, torch.SymInt]:\n    return self.as_fake().size(dim)",
    "docstring": "Returns the size of the tensor (if dim is None) or the size at the dimension dim. The returned size may be a SymInt.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\comptime.py",
    "ast_data": "FunctionDef name:size arg:self arg:dim arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "city",
    "source_code": "def city(self, query):\n    response = self._query(query, require_city=True)\n    region = response.subdivisions[0] if response.subdivisions else None\n    return {'accuracy_radius': response.location.accuracy_radius, 'city': response.city.name, 'continent_code': response.continent.code, 'continent_name': response.continent.name, 'country_code': response.country.iso_code, 'country_name': response.country.name, 'is_in_european_union': response.country.is_in_european_union, 'latitude': response.location.latitude, 'longitude': response.location.longitude, 'metro_code': response.location.metro_code, 'postal_code': response.postal.code, 'region_code': region.iso_code if region else None, 'region_name': region.name if region else None, 'time_zone': response.location.time_zone, 'dma_code': response.location.metro_code, 'region': region.iso_code if region else None}",
    "docstring": "Return a dictionary of city information for the given IP address or Fully Qualified Domain Name (FQDN). Some information in the dictionary may be undefined (None).",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geoip2.py",
    "ast_data": "FunctionDef name:city arg:self arg:query arguments arg arg Assign Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "is_null_slice",
    "source_code": "def is_null_slice(obj) -> bool:\n    return isinstance(obj, slice) and obj.start is None and (obj.stop is None) and (obj.step is None)",
    "docstring": "We have a null slice.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\common.py",
    "ast_data": "FunctionDef name:is_null_slice arg:obj arguments arg Return return:yes BoolOp Call Compare Compare Compare"
  },
  {
    "library": "tensorflow",
    "name": "all_prod",
    "source_code": "def all_prod(tensors):\n    return _apply_all_reduce('prod', tensors)",
    "docstring": "Returns a list of tensors with the all-reduce product across . The computation is done with an all-reduce operation, so if only some of the returned tensors are evaluated then the computation will hang. Args: tensors: The input tensors across which to multiply; must be assigned to GPU devices. Returns: List of tensors, each with the product of the input tensors, where tensor i has the same device as .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nccl_ops.py",
    "ast_data": "FunctionDef name:all_prod arg:tensors arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_find_producer_matmul",
    "source_code": "def _find_producer_matmul(node: torch.fx.Node) -> Optional[_Matmul]:\n    if node.target == aten.mm.default:\n        return _Matmul.from_match(match=[node])\n    elif node.target == aten._scaled_mm.default:\n        return _ScaledMatmul.from_match(match=[node])\n    elif node.target == aten.reshape.default:\n        reshape_node_1 = node\n        mm_node = reshape_node_1.args[0]\n        assert isinstance(mm_node, torch.fx.Node)\n        if mm_node.target not in (aten.mm.default, aten._scaled_mm.default):\n            return None\n        reshape_node_0 = mm_node.args[0]\n        assert isinstance(reshape_node_0, torch.fx.Node)\n        if reshape_node_0.target != aten.reshape.default:\n            return None\n        if mm_node.target == aten.mm.default:\n            return _Matmul.from_match(match=[reshape_node_0, mm_node, reshape_node_1])\n        elif mm_node.target == aten._scaled_mm.default:\n            return _ScaledMatmul.from_match(match=[reshape_node_0, mm_node, reshape_node_1])\n    return None",
    "docstring": "Returns producer matmul node if found, otherwise returns None.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\micro_pipeline_tp.py",
    "ast_data": "FunctionDef name:_find_producer_matmul arg:node arguments arg If Compare Return return:yes Call If Compare Return return:yes Call If Compare Assign Assign Call If Compare Return return:no Assign Call If Compare Return return:no If Compare Return return:yes Call If Compare Return return:yes Call Return return:no"
  },
  {
    "library": "scikit-learn",
    "name": "fit_intercept_only",
    "source_code": "def fit_intercept_only(self, y_true, sample_weight=None):\n    y_pred = np.average(y_true, weights=sample_weight, axis=0)\n    eps = 10 * np.finfo(y_pred.dtype).eps\n    if self.interval_y_pred.low == -np.inf:\n        a_min = None\n    elif self.interval_y_pred.low_inclusive:\n        a_min = self.interval_y_pred.low\n    else:\n        a_min = self.interval_y_pred.low + eps\n    if self.interval_y_pred.high == np.inf:\n        a_max = None\n    elif self.interval_y_pred.high_inclusive:\n        a_max = self.interval_y_pred.high\n    else:\n        a_max = self.interval_y_pred.high - eps\n    if a_min is None and a_max is None:\n        return self.link.link(y_pred)\n    else:\n        return self.link.link(np.clip(y_pred, a_min, a_max))",
    "docstring": "Compute raw_prediction of an intercept-only model. This can be used as initial estimates of predictions, i.e. before the first iteration in fit. Parameters ---------- y_true : array-like of shape (n_samples,) Observed, true target values. sample_weight : None or array of shape (n_samples,) Sample weights. Returns ------- raw_prediction : numpy scalar or array of shape (n_classes,) Raw predictions of an intercept-only model.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\_loss\\loss.py",
    "ast_data": "FunctionDef name:fit_intercept_only arg:self arg:y_true arg:sample_weight arguments arg arg arg Assign Call Assign Call If Compare Assign If Assign Assign If Compare Assign If Assign Assign If BoolOp Compare Compare Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "@available_if(_can_transform)\ndef transform(self, X, **params):\n    with _raise_or_warn_if_not_fitted(self):\n        _raise_for_params(params, self, 'transform')\n        routed_params = process_routing(self, 'transform', **params)\n        Xt = X\n        for _, name, transform in self._iter():\n            Xt = transform.transform(Xt, **routed_params[name].transform)\n        return Xt",
    "docstring": "Transform the data, and apply with the final estimator. Call of each transformer in the pipeline. The transformed data are finally passed to the final estimator that calls method. Only valid if the final estimator implements . This also works where final estimator is in which case all prior transformations are applied. Parameters ---------- X : iterable Data to transform. Must fulfill input requirements of first step of the pipeline. **params : dict of str -> object Parameters requested and accepted by steps. Each step must have requested certain metadata for these parameters to be forwarded to them. .. versionadded:: 1.4 Only available if . See :ref: for more details. Returns ------- Xt : ndarray of shape (n_samples, n_transformed_features) Transformed data.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\pipeline.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg arg With Call Call Assign Call Assign For Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "check_line_split",
    "source_code": "def check_line_split(code_line):\n    return re.search('\\\\\\\\\\\\s*\\\\n$', code_line)",
    "docstring": "Checks if a line was split with . Args: code_line: A line of Python code Returns: If the line was split with >>> skip_magic(\"!gcloud ml-engine models create ${MODEL} \\\\\\n\") True",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\ipynb.py",
    "ast_data": "FunctionDef name:check_line_split arg:code_line arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_adjust_binner_for_upsample",
    "source_code": "def _adjust_binner_for_upsample(self, binner):\n    return binner",
    "docstring": "Adjust our binner when upsampling. The range of a new index is allowed to be greater than original range so we don't need to change the length of a binner, GH 13022",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\resample.py",
    "ast_data": "FunctionDef name:_adjust_binner_for_upsample arg:self arg:binner arguments arg arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "groups",
    "source_code": "@cache_readonly\ndef groups(self):\n    result = {key: value for key, value in zip(self.binlabels, self.bins) if key is not NaT}\n    return result",
    "docstring": "dict {group name -> group labels}",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\groupby\\ops.py",
    "ast_data": "FunctionDef name:groups arg:self arguments arg Assign Call Compare Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "is_concrete_float",
    "source_code": "def is_concrete_float(a: FloatLikeType) -> bool:\n    assert isinstance(a, (SymFloat, float))\n    if isinstance(a, float):\n        return True\n    if isinstance(a.node.expr, sympy.core.numbers.Float):\n        return True\n    return False",
    "docstring": "Utility to check if underlying object in SymInt is concrete value. Also returns true if integer is passed in. Args: a (SymInt or float): Object to test if it float",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:is_concrete_float arg:a arguments arg Call If Call Return return:yes If Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "connect_raise_node",
    "source_code": "def connect_raise_node(self, node, except_guards):\n    for guard in except_guards:\n        if guard in self.raises:\n            self.raises[guard].append(node)\n        else:\n            self.raises[guard] = [node]",
    "docstring": "Adds extra connection between a raise node and containing except guards. The node is a graph node, not an ast node. Args: node: Node except_guards: Tuple[ast.AST, ...], the except sections that guard node",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\cfg.py",
    "ast_data": "FunctionDef name:connect_raise_node arg:self arg:node arg:except_guards arguments arg arg arg For If Compare Call Assign"
  },
  {
    "library": "seaborn",
    "name": "set_layout_engine",
    "source_code": "def set_layout_engine(fig: Figure, engine: Literal['constrained', 'compressed', 'tight', 'none']) -> None:\n    if hasattr(fig, 'set_layout_engine'):\n        fig.set_layout_engine(engine)\n    elif engine == 'tight':\n        fig.set_tight_layout(True)\n    elif engine == 'constrained':\n        fig.set_constrained_layout(True)\n    elif engine == 'none':\n        fig.set_tight_layout(False)\n        fig.set_constrained_layout(False)",
    "docstring": "Handle changes to auto layout engine interface in 3.6",
    "type": "function",
    "file_path": "seaborn\\seaborn\\_compat.py",
    "ast_data": "FunctionDef name:set_layout_engine arg:fig arg:engine arguments arg arg If Call Call If Compare Call If Compare Call If Compare Call Call"
  },
  {
    "library": "matplotlib",
    "name": "plot_examples",
    "source_code": "def plot_examples(colormaps):\n    np.random.seed(19680801)\n    data = np.random.randn(30, 30)\n    n = len(colormaps)\n    fig, axs = plt.subplots(1, n, figsize=(n * 2 + 2, 3), layout='constrained', squeeze=False)\n    for [ax, cmap] in zip(axs.flat, colormaps):\n        psm = ax.pcolormesh(data, cmap=cmap, rasterized=True, vmin=-4, vmax=4)\n        fig.colorbar(psm, ax=ax)\n    plt.show()",
    "docstring": "Helper function to plot data with associated colormap.",
    "type": "function",
    "file_path": "matplotlib\\galleries\\users_explain\\colors\\colormap-manipulation.py",
    "ast_data": "FunctionDef name:plot_examples arg:colormaps arguments arg Call Assign Call Assign Call Assign Call For Call Assign Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "value_formatter",
    "source_code": "class value_formatter:\n\n    def __init__(self, value):\n        if value is mlab.detrend_none:\n            self._repr = 'mlab.detrend_none'\n        elif value is mlab.window_hanning:\n            self._repr = 'mlab.window_hanning'\n        elif value is np.mean:\n            self._repr = 'np.mean'\n        elif value is _api.deprecation._deprecated_parameter:\n            self._repr = '_api.deprecation._deprecated_parameter'\n        elif isinstance(value, Enum):\n            self._repr = f'{type(value).__name__}.{value.name}'\n        else:\n            self._repr = repr(value)\n\n    def __repr__(self):\n        return self._repr",
    "docstring": "Format function default values as needed for inspect.formatargspec. The interesting part is a hard-coded list of functions used as defaults in pyplot methods.",
    "type": "class",
    "file_path": "matplotlib\\tools\\boilerplate.py",
    "ast_data": "ClassDef name:value_formatter FunctionDef name:__init__ arg:self arg:value arguments arg arg If Compare Assign If Compare Assign If Compare Assign If Compare Assign If Call Assign Call Assign Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_traced_graph_from_model",
    "source_code": "def _traced_graph_from_model(model: torch.nn.Module | torch.jit.ScriptModule, args: tuple[Any, ...], kwargs: Mapping[str, Any], export_options: _experimental.ExportOptions) -> _C.Graph:\n    training = export_options.training\n    verbose = export_options.verbose\n    with utils.exporter_context(model, training, verbose):\n        export_inputs = _prepare_input_for_export(args, kwargs)\n        model = utils._pre_trace_quant_model(model, export_inputs)\n        jit_graph, _, _, _ = utils._create_jit_graph(model, export_inputs)\n        return jit_graph",
    "docstring": "As part of the ONNX export steps, create a traced JIT graph from a PyTorch model. Args: model: See :func:. args: See :func:. kwargs: See :func:. export_options: See :func:. Returns: jit_graph (_C.Graph): A traced JIT graph.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\verification.py",
    "ast_data": "FunctionDef name:_traced_graph_from_model arg:model arg:args arg:kwargs arg:export_options arguments arg arg arg arg Assign Assign With Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "trace_export",
    "source_code": "@tf_export('summary.trace_export', v1=[])\ndef trace_export(name, step=None, profiler_outdir=None):\n    global _current_trace_context\n    if ops.inside_function():\n        logging.warn('Cannot export trace inside a tf.function.')\n        return\n    if not context.executing_eagerly():\n        logging.warn('Can only export trace while executing eagerly.')\n        return\n    with _current_trace_context_lock:\n        if _current_trace_context is None:\n            raise ValueError('Must enable trace before export through tf.summary.trace_on.')\n        graph, profiler = _current_trace_context\n    run_meta = context.context().export_run_metadata()\n    if graph and (not profiler):\n        run_metadata_graphs(name, run_meta, step)\n    else:\n        run_metadata(name, run_meta, step)\n    if profiler:\n        if profiler_outdir:\n            logging.warn('Ignoring `profiler_outdir` passed to trace_export(). Please pass it to trace_on() instead.')\n        _profiler.stop()\n    trace_off()",
    "docstring": "Stops and exports the active trace as a Summary and/or profile file. Stops the trace and exports all metadata collected during the trace to the default SummaryWriter, if one has been set. Args: name: A name for the summary to be written. step: Explicit -castable monotonic step value for this summary. If omitted, this defaults to , which must not be None. profiler_outdir: This arg is a no-op. Please set this in trace_on(). Raises: ValueError: if a default writer exists, but no step was provided and is None.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "FunctionDef name:trace_export arg:name arg:step arg:profiler_outdir arguments arg arg arg If Call Call Return return:no If Call Call Return return:no With If Compare Raise Call Assign Assign Call Call If BoolOp Call Call If If Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_is_quantized_input_stats_required",
    "source_code": "def _is_quantized_input_stats_required(conversion_flags: _conversion_flags_pb2.ConverterFlags) -> bool:\n    quantized_inference_types = [_types_pb2.QUANTIZED_UINT8, _types_pb2.QUANTIZED_INT8]\n    return (conversion_flags.inference_type in quantized_inference_types or conversion_flags.inference_input_type in quantized_inference_types) and (not conversion_flags.post_training_quantize)",
    "docstring": "Checks if the flag is required for conversion. Args: conversion_flags: A protocol buffer describing the conversion process. Returns: True, if the or the is a quantized type and it is not post training quantization, else False.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\convert.py",
    "ast_data": "FunctionDef name:_is_quantized_input_stats_required arg:conversion_flags arguments arg Assign Return return:yes BoolOp BoolOp Compare Compare"
  },
  {
    "library": "tensorflow",
    "name": "as_default",
    "source_code": "def as_default(self):\n    return stack.default_session(self)",
    "docstring": "Returns a context manager that makes this object the default session. Use with the keyword to specify that calls to or should be executed in this session. To get the current default session, use . *N.B.* The context manager *does not* close the session when you exit the context, and you must close the session explicitly. Alternatively, you can use to create a session that is automatically closed on exiting the context, including when an uncaught exception is raised. *N.B.* The default session is a property of the current thread. If you create a new thread, and wish to use the default session in that thread, you must explicitly add a in that thread's function. *N.B.* Entering a block does not affect the current default graph. If you are using multiple graphs, and is different from the value of , you must explicitly enter a block to make the default graph. Returns: A context manager using this session as the default session.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\session.py",
    "ast_data": "FunctionDef name:as_default arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X):\n    if self.methods_to_check == 'all' or 'predict' in self.methods_to_check:\n        X, y = self._check_X_y(X)\n    rng = check_random_state(self.random_state)\n    return rng.choice(self.classes_, size=_num_samples(X))",
    "docstring": "Predict the first class seen in . Parameters ---------- X : array-like of shape (n_samples, n_features) The input data. Returns ------- preds : ndarray of shape (n_samples,) Predictions of the first class seen in .",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_mocking.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg If BoolOp Compare Compare Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "authlib",
    "name": "RequestNotSupportedError",
    "source_code": "class RequestNotSupportedError(OAuth2Error):\n    error = 'request_not_supported'",
    "docstring": "The OP does not support use of the request parameter.",
    "type": "class",
    "file_path": "authlib\\authlib\\oidc\\core\\errors.py",
    "ast_data": "ClassDef name:RequestNotSupportedError Assign"
  },
  {
    "library": "sphinx",
    "name": "get_updated_docs",
    "source_code": "def get_updated_docs(self, app: Sphinx, env: BuildEnvironment) -> list[str]:\n    return []",
    "docstring": "Return a list of docnames to re-read. This method is called after reading the whole of documents. .. seealso:: :event:",
    "type": "method",
    "file_path": "sphinx\\sphinx\\environment\\collectors\\__init__.py",
    "ast_data": "FunctionDef name:get_updated_docs arg:self arg:app arg:env arguments arg arg arg Return return:no"
  },
  {
    "library": "sphinx",
    "name": "_write_py_statistics",
    "source_code": "def _write_py_statistics(self, op: TextIO) -> None:\n    all_modules = frozenset(self.py_documented.keys() | self.py_undocumented.keys())\n    all_objects: Set[str] = set()\n    all_documented_objects: Set[str] = set()\n    for module in all_modules:\n        all_objects |= self.py_documented[module] | self.py_undocumented[module]\n        all_documented_objects |= self.py_documented[module]\n    table = [['Module', 'Coverage', 'Undocumented']]\n    for module in sorted(all_modules):\n        module_objects = self.py_documented[module] | self.py_undocumented[module]\n        if len(module_objects):\n            value = 100.0 * len(self.py_documented[module]) / len(module_objects)\n        else:\n            value = 100.0\n        table.append([module, f'{value:.2f}%', str(len(self.py_undocumented[module]))])\n    if all_objects:\n        table.append(['TOTAL', f'{100 * len(all_documented_objects) / len(all_objects):.2f}%', f'{len(all_objects) - len(all_documented_objects)}'])\n    else:\n        table.append(['TOTAL', '100', '0'])\n    op.writelines((f'{line}\\n' for line in _write_table(table)))",
    "docstring": "Outputs the table of ``.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\ext\\coverage.py",
    "ast_data": "FunctionDef name:_write_py_statistics arg:self arg:op arguments arg arg Assign Call Call Call Call Call For Assign For Call Assign If Call Assign Call Call Assign Call Call Call If Call Call Call Call Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "ymin",
    "source_code": "@property\ndef ymin(self):\n    return np.min(self.get_points()[:, 1])",
    "docstring": "The bottom edge of the bounding box.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:ymin arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "patterns_to_regex",
    "source_code": "def patterns_to_regex(allowed_patterns: list[str]) -> Any:\n    rc = '('\n    for idx, pattern in enumerate(allowed_patterns):\n        if idx > 0:\n            rc += '|'\n        pattern_ = PeekableIterator(pattern)\n        assert not any((c in pattern for c in '{}()[]\\\\'))\n        for c in pattern_:\n            if c == '.':\n                rc += '\\\\.'\n            elif c == '+':\n                rc += '\\\\+'\n            elif c == '*':\n                if pattern_.peek() == '*':\n                    next(pattern_)\n                    rc += '.*'\n                else:\n                    rc += '[^/]*'\n            else:\n                rc += c\n    rc += ')'\n    return re.compile(rc)",
    "docstring": "pattern is glob-like, i.e. the only special sequences it has are: - ? - matches single character - * - matches any non-folder separator characters or no character - ** - matches any characters or no character Assuming that patterns are free of braces and backslashes the only character that needs to be escaped are dot and plus",
    "type": "function",
    "file_path": "pytorch\\.github\\scripts\\gitutils.py",
    "ast_data": "FunctionDef name:patterns_to_regex arg:allowed_patterns arguments arg Assign For Call If Compare Assign Call Call Compare For If Compare If Compare If Compare If Compare Call Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "production_definitions",
    "source_code": "@staticmethod\ndef production_definitions(lines: Iterable[str], /) -> Iterator[tuple[str, str, str]]:\n    for line in lines:\n        if ':' not in line:\n            break\n        name, _, tokens = line.partition(':')\n        yield (line, name.strip(), tokens.strip())",
    "docstring": "Yield triples of rawsource, name, definition (tokens).",
    "type": "method",
    "file_path": "sphinx\\sphinx\\domains\\std\\__init__.py",
    "ast_data": "FunctionDef name:production_definitions arguments arg For If Compare Assign Call Call Call"
  },
  {
    "library": "django",
    "name": "get_day_format",
    "source_code": "def get_day_format(self):\n    return self.day_format",
    "docstring": "Get a day format string in strptime syntax to be used to parse the day from url variables.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\dates.py",
    "ast_data": "FunctionDef name:get_day_format arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_set_gc_clip",
    "source_code": "def _set_gc_clip(self, gc):\n    if self._clipon:\n        if self.clipbox is not None:\n            gc.set_clip_rectangle(self.clipbox)\n        gc.set_clip_path(self._clippath)\n    else:\n        gc.set_clip_rectangle(None)\n        gc.set_clip_path(None)",
    "docstring": "Set the clip properly for the gc.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:_set_gc_clip arg:self arg:gc arguments arg arg If If Compare Call Call Call Call"
  },
  {
    "library": "django",
    "name": "BaseCreateView",
    "source_code": "class BaseCreateView(ModelFormMixin, ProcessFormView):\n\n    def get(self, request, *args, **kwargs):\n        self.object = None\n        return super().get(request, *args, **kwargs)\n\n    def post(self, request, *args, **kwargs):\n        self.object = None\n        return super().post(request, *args, **kwargs)",
    "docstring": "Base view for creating a new object instance. This requires subclassing to provide a response mixin.",
    "type": "class",
    "file_path": "django\\django\\views\\generic\\edit.py",
    "ast_data": "ClassDef name:BaseCreateView FunctionDef name:get arg:self arg:request arguments arg arg arg arg Assign Return return:yes Call Call FunctionDef name:post arg:self arg:request arguments arg arg arg arg Assign Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_convert_to_font",
    "source_code": "@classmethod\ndef _convert_to_font(cls, font_dict):\n    from openpyxl.styles import Font\n    _font_key_map = {'sz': 'size', 'b': 'bold', 'i': 'italic', 'u': 'underline', 'strike': 'strikethrough', 'vertalign': 'vertAlign'}\n    font_kwargs = {}\n    for k, v in font_dict.items():\n        k = _font_key_map.get(k, k)\n        if k == 'color':\n            v = cls._convert_to_color(v)\n        font_kwargs[k] = v\n    return Font(**font_kwargs)",
    "docstring": "Convert `` to an openpyxl v2 Font object. Parameters ---------- font_dict : dict A dict with zero or more of the following keys (or their synonyms). 'name' 'size' ('sz') 'bold' ('b') 'italic' ('i') 'underline' ('u') 'strikethrough' ('strike') 'color' 'vertAlign' ('vertalign') 'charset' 'scheme' 'family' 'outline' 'shadow' 'condense' Returns ------- font : openpyxl.styles.Font",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\excel\\_openpyxl.py",
    "ast_data": "FunctionDef name:_convert_to_font arg:cls arg:font_dict arguments arg arg Assign Assign For Call Assign Call If Compare Assign Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "apply_mask",
    "source_code": "def apply_mask(self, prediction_result):\n    padding_mask = backend.get_value(self.padding_mask)\n    assert len(padding_mask.shape) == 1\n    if len(self.output_shape) == 1:\n        prediction = np.take(prediction_result, np.nonzero(padding_mask[:len(prediction_result)]), axis=0)\n        if prediction.shape[0] == 1:\n            prediction = np.squeeze(prediction, axis=0)\n        return prediction\n    else:\n        predictions = []\n        for i in range(len(self.output_shape)):\n            prediction = prediction_result[i]\n            prediction = np.take(prediction, np.nonzero(padding_mask[:len(prediction)]), axis=0)\n            predictions.append(np.squeeze(prediction))\n        return predictions",
    "docstring": "Removes prediction output that corresponds to padded input.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\partial_batch_padding_handler.py",
    "ast_data": "FunctionDef name:apply_mask arg:self arg:prediction_result arguments arg arg Assign Call Compare Call If Compare Call Assign Call Call Call If Compare Assign Call Return return:yes Assign For Call Call Assign Assign Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "VariableMetaclass",
    "source_code": "class VariableMetaclass(abc.ABCMeta):\n\n    @traceback_utils.filter_traceback\n    def __call__(cls, *args, **kwargs):\n        if hasattr(cls, '_variable_call') and callable(cls._variable_call):\n            variable_call = cls._variable_call(*args, **kwargs)\n            if variable_call is not None:\n                return variable_call\n        return super(VariableMetaclass, cls).__call__(*args, **kwargs)",
    "docstring": "Metaclass to allow construction of tf.Variable to be overridden.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "ClassDef name:VariableMetaclass FunctionDef name:__call__ arg:cls arguments arg arg arg If BoolOp Call Call Assign Call If Compare Return return:yes Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_SparseTensorSliceDataset",
    "source_code": "class _SparseTensorSliceDataset(dataset_ops.DatasetSource):\n\n    def __init__(self, sparse_tensor):\n        if not isinstance(sparse_tensor, sparse_tensor_lib.SparseTensor):\n            raise TypeError(f'Invalid `sparse_tensor`. `sparse_tensor` must be a `tf.sparse.SparseTensor`. Got {type(sparse_tensor)}.')\n        self._sparse_tensor = sparse_tensor\n        indices_shape = self._sparse_tensor.indices.get_shape()\n        shape_shape = self._sparse_tensor.dense_shape.get_shape()\n        rank = (indices_shape.dims[1] - 1).merge_with(shape_shape.dims[0] - 1)\n        self._structure = (tensor_spec.TensorSpec([None, rank], dtypes.int64), tensor_spec.TensorSpec([None], self._sparse_tensor.dtype), tensor_spec.TensorSpec([rank], dtypes.int64))\n        variant_tensor = gen_dataset_ops.sparse_tensor_slice_dataset(self._sparse_tensor.indices, self._sparse_tensor.values, self._sparse_tensor.dense_shape)\n        super().__init__(variant_tensor)\n\n    @property\n    def element_spec(self):\n        return self._structure",
    "docstring": "A that splits a rank-N into its rows.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\from_sparse_tensor_slices_op.py",
    "ast_data": "ClassDef name:_SparseTensorSliceDataset FunctionDef name:__init__ arg:self arg:sparse_tensor arguments arg arg If Call Raise Call Call Assign Assign Call Assign Call Assign Call Assign Call Call Call Assign Call Call Call FunctionDef name:element_spec arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "arcsine_gen",
    "source_code": "class arcsine_gen(rv_continuous):\n\n    def _shape_info(self):\n        return []\n\n    def _pdf(self, x):\n        with np.errstate(divide='ignore'):\n            return 1.0 / np.pi / np.sqrt(x * (1 - x))\n\n    def _cdf(self, x):\n        return 2.0 / np.pi * np.arcsin(np.sqrt(x))\n\n    def _ppf(self, q):\n        return np.sin(np.pi / 2.0 * q) ** 2.0\n\n    def _stats(self):\n        mu = 0.5\n        mu2 = 1.0 / 8\n        g1 = 0\n        g2 = -3.0 / 2.0\n        return (mu, mu2, g1, g2)\n\n    def _entropy(self):\n        return -0.24156447527049044",
    "docstring": "An arcsine continuous random variable. %(before_notes)s Notes ----- The probability density function for is: .. math:: f(x) = \\frac{1}{\\pi \\sqrt{x (1-x)}} for :math:. %(after_notes)s %(example)s",
    "type": "class",
    "file_path": "scipy\\scipy\\stats\\_continuous_distns.py",
    "ast_data": "ClassDef name:arcsine_gen FunctionDef name:_shape_info arg:self arguments arg Return return:no FunctionDef name:_pdf arg:self arg:x arguments arg arg With Call Return return:yes Call FunctionDef name:_cdf arg:self arg:x arguments arg arg Return return:yes Call Call FunctionDef name:_ppf arg:self arg:q arguments arg arg Return return:yes Call FunctionDef name:_stats arg:self arguments arg Assign Assign Assign Assign Return return:yes FunctionDef name:_entropy arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "split_result_tensors",
    "source_code": "def split_result_tensors(result: torch.Tensor, inputs: list[torch.Tensor]) -> tuple[torch.Tensor, ...]:\n    if isinstance(result, torch.fx.Proxy):\n        splits = [0] * len(inputs)\n    else:\n        splits = [x.shape[0] for x in inputs]\n    return torch.split(result, splits)",
    "docstring": "A free function for use in the merge_matmul graph transformation below that splits the output from a merged matmul into the individual results for each input tensor. Arguments: result: The merged matmul result tensor. inputs: The list of inputs that were merged into one for the matmul. Returns: List of matmul results for each input tensor.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\merge_matmul.py",
    "ast_data": "FunctionDef name:split_result_tensors arg:result arg:inputs arguments arg arg If Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_batched_dataset",
    "source_code": "def _get_batched_dataset(d):\n    if isinstance(d, dataset_ops.DatasetV1Adapter):\n        d = d._dataset\n    if isinstance(d, (dataset_ops.BatchDataset, batching._MapAndBatchDataset)):\n        return d\n    elif isinstance(d, (dataset_ops.PrefetchDataset, dataset_ops._OptionsDataset)):\n        return _get_batched_dataset(d._input_dataset)\n    raise ValueError('Unable to get batched dataset from the input dataset. `batch` `map_and_batch` need to be the last operations on the dataset. The batch operations can be followed by a prefetch.')",
    "docstring": "Get the batched dataset from .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py",
    "ast_data": "FunctionDef name:_get_batched_dataset arg:d arguments arg If Call Assign If Call Return return:yes If Call Return return:yes Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "print",
    "source_code": "def print(self):\n    print(str(self))",
    "docstring": "Print formatted table",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\benchmark\\utils\\compare.py",
    "ast_data": "FunctionDef name:print arg:self arguments arg Call Call"
  },
  {
    "library": "sphinx",
    "name": "new_navpoint",
    "source_code": "def new_navpoint(self, node: dict[str, Any], level: int, incr: bool=True) -> NavPoint:\n    if incr:\n        self.playorder += 1\n    self.tocid += 1\n    return NavPoint(f'navPoint{self.tocid}', self.playorder, node['text'], node['refuri'], [])",
    "docstring": "Create a new entry in the toc from the node at given level.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\_epub_base.py",
    "ast_data": "FunctionDef name:new_navpoint arg:self arg:node arg:level arg:incr arguments arg arg arg arg If Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_snap",
    "source_code": "def set_snap(self, snap):\n    self._snap = snap",
    "docstring": "Set the snap setting which may be: * True: snap vertices to the nearest pixel center * False: leave vertices as-is * None: (auto) If the path contains only rectilinear line segments, round to the nearest pixel center",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:set_snap arg:self arg:snap arguments arg arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "is_async",
    "source_code": "def is_async():\n    return context().is_async()",
    "docstring": "Returns true if current thread is in async mode.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:is_async arguments Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "nested_value_rowids",
    "source_code": "def nested_value_rowids(self, name=None):\n    with ops.name_scope(name, 'RaggedNestedValueRowIds', [self]):\n        rt_nested_ids = [self.value_rowids()]\n        rt_values = self.values\n        while isinstance(rt_values, RaggedTensor):\n            rt_nested_ids.append(rt_values.value_rowids())\n            rt_values = rt_values.values\n        return tuple(rt_nested_ids)",
    "docstring": "Returns a tuple containing the value_rowids for all ragged dimensions. is a tuple containing the tensors for all ragged dimensions in , ordered from outermost to innermost. In particular, where: * if is a . * otherwise. Args: name: A name prefix for the returned tensors (optional). Returns: A of 1-D integer s. #### Example: >>> rt = tf.ragged.constant( ... [[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]]) >>> for i, ids in enumerate(rt.nested_value_rowids()): ... print('row ids for dimension %d: %s' % (i+1, ids.numpy())) row ids for dimension 1: [0 0 0] row ids for dimension 2: [0 0 0 2 2] row ids for dimension 3: [0 0 0 0 2 2 2 3]",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:nested_value_rowids arg:self arg:name arguments arg arg With Call Assign Call Assign While Call Call Call Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "evaluate_distribution_of_results_for_knapsack_algo",
    "source_code": "def evaluate_distribution_of_results_for_knapsack_algo(self, knapsack_algo: Callable[[list[float], list[float], float], tuple[float, list[int], list[int]]], memory_budget_values: list[float]) -> list[dict[str, float]]:\n    results = list()\n    for memory_budget in memory_budget_values:\n        _, saved_nodes, recomputed_nodes = knapsack_algo(self._graph_info_provider.get_knapsack_memory_input(), self._graph_info_provider.get_knapsack_runtime_input(), memory_budget)\n        result = self.evaluate_knapsack_output(saved_nodes_idxs=saved_nodes, recomputable_node_idxs=recomputed_nodes)\n        result['memory_budget'] = memory_budget\n        results.append(result)\n    return results",
    "docstring": "Evaluates the distribution of results for a given knapsack algorithm. Args: knapsack_algo (Callable): The knapsack algorithm to use for evaluation. memory_budget_values (List[float]): A list of memory budgets to evaluate.",
    "type": "method",
    "file_path": "pytorch\\torch\\_functorch\\_activation_checkpointing\\knapsack_evaluator.py",
    "ast_data": "FunctionDef name:evaluate_distribution_of_results_for_knapsack_algo arg:self arg:knapsack_algo arg:memory_budget_values arguments arg arg arg Assign Call For Assign Call Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "parameters",
    "source_code": "@abc.abstractmethod\ndef parameters(self) -> DHParameters:\n    pass",
    "docstring": "The DHParameters object associated with this private key.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\dh.py",
    "ast_data": "FunctionDef name:parameters arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "wrapped",
    "source_code": "@functools.wraps(orig_fn)\ndef wrapped(*args, **kwargs):\n    proxy = _find_proxy(args, kwargs)\n    if proxy is not None:\n        return_proxy = proxy.tracer.create_proxy('call_function', orig_fn, args, kwargs)\n        return_proxy.node.meta['is_wrapped'] = True\n        return return_proxy\n    return orig_fn(*args, **kwargs)",
    "docstring": "Given an closed-over `` node to preserve the call to this leaf function directly. Otherwise, just return the results of this function call, as this function is not being traced.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\_symbolic_trace.py",
    "ast_data": "FunctionDef name:wrapped arguments arg arg Assign Call If Compare Assign Call Assign Return return:yes Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_update_parameters",
    "source_code": "def _update_parameters(self, *, validation_policy=None, **params):\n    parameters = original_parameters = self._original_parameters.copy()\n    parameters.update(**params)\n    parameterization = None\n    self._invalid = np.asarray(False)\n    self._any_invalid = False\n    self._shape = tuple()\n    self._ndim = 0\n    self._size = 1\n    self._dtype = np.float64\n    if (validation_policy or self.validation_policy) == _SKIP_ALL:\n        parameters = self._process_parameters(**parameters)\n    elif not len(self._parameterizations):\n        if parameters:\n            message = f'The `{self.__class__.__name__}` distribution family does not accept parameters, but parameters `{set(parameters)}` were provided.'\n            raise ValueError(message)\n    else:\n        parameterization = self._identify_parameterization(parameters)\n        parameters, shape, size, ndim = self._broadcast(parameters)\n        parameters, invalid, any_invalid, dtype = self._validate(parameterization, parameters)\n        parameters = self._process_parameters(**parameters)\n        self._invalid = invalid\n        self._any_invalid = any_invalid\n        self._shape = shape\n        self._size = size\n        self._ndim = ndim\n        self._dtype = dtype\n    self.reset_cache()\n    self._parameters = parameters\n    self._parameterization = parameterization\n    self._original_parameters = original_parameters\n    for name in self._parameters.keys():\n        if hasattr(self.__class__, name):\n            continue\n        setattr(self.__class__, name, property(lambda self_, name_=name: self_._parameters[name_].copy()[()]))",
    "docstring": "Update the numerical values of distribution parameters. Parameters ---------- **params : array_like Desired numerical values of the distribution parameters. Any or all of the parameters initially used to instantiate the distribution may be modified. Parameters used in alternative parameterizations are not accepted. validation_policy : str To be documented. See Question 3 at the top.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distribution_infrastructure.py",
    "ast_data": "FunctionDef name:_update_parameters arg:self arguments arg arg arg Assign Call Call Assign Assign Call Assign Assign Call Assign Assign Assign If Compare BoolOp Assign Call If Call If Assign Call Raise Call Assign Call Assign Call Assign Call Assign Call Assign Assign Assign Assign Assign Assign Call Assign Assign Assign For Call If Call Call Call arguments arg arg Call"
  },
  {
    "library": "scikit-learn",
    "name": "check_memory",
    "source_code": "def check_memory(memory):\n    if memory is None or isinstance(memory, str):\n        memory = joblib.Memory(location=memory, verbose=0)\n    elif not hasattr(memory, 'cache'):\n        raise ValueError(\"'memory' should be None, a string or have the same interface as joblib.Memory. Got memory='{}' instead.\".format(memory))\n    return memory",
    "docstring": "Check that `joblib.Memory` is not joblib.Memory-like. Examples -------- >>> from sklearn.utils.validation import check_memory >>> check_memory(\"caching_dir\") Memory(location=caching_dir/joblib)",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\validation.py",
    "ast_data": "FunctionDef name:check_memory arg:memory arguments arg If BoolOp Compare Call Assign Call If Call Raise Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "UploadedFile",
    "source_code": "class UploadedFile(File):\n\n    def __init__(self, file=None, name=None, content_type=None, size=None, charset=None, content_type_extra=None):\n        super().__init__(file, name)\n        self.size = size\n        self.content_type = content_type\n        self.charset = charset\n        self.content_type_extra = content_type_extra\n\n    def __repr__(self):\n        return '<%s: %s (%s)>' % (self.__class__.__name__, self.name, self.content_type)\n\n    def _get_name(self):\n        return self._name\n\n    def _set_name(self, name):\n        if name is not None:\n            name = os.path.basename(name)\n            if len(name) > 255:\n                name, ext = os.path.splitext(name)\n                ext = ext[:255]\n                name = name[:255 - len(ext)] + ext\n            name = validate_file_name(name)\n        self._name = name\n    name = property(_get_name, _set_name)",
    "docstring": "An abstract uploaded file (`` object behaves somewhat like a file object and represents some file data that the user submitted with a form.",
    "type": "class",
    "file_path": "django\\django\\core\\files\\uploadedfile.py",
    "ast_data": "ClassDef name:UploadedFile FunctionDef name:__init__ arg:self arg:file arg:name arg:content_type arg:size arg:charset arg:content_type_extra arguments arg arg arg arg arg arg arg Call Call Assign Assign Assign Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:_get_name arg:self arguments arg Return return:yes FunctionDef name:_set_name arg:self arg:name arguments arg arg If Compare Assign Call If Compare Call Assign Call Assign Assign Call Assign Call Assign Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_session_manager",
    "source_code": "def _get_session_manager(self):\n    if self._session_manager:\n        return self._session_manager\n    self._session_manager = sm.SessionManager(local_init_op=self._scaffold.local_init_op, local_init_feed_dict=self._scaffold.local_init_feed_dict, ready_op=self._scaffold.ready_op, ready_for_local_init_op=self._scaffold.ready_for_local_init_op, graph=ops.get_default_graph())\n    return self._session_manager",
    "docstring": "Gets or creates a SessionManager.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\monitored_session.py",
    "ast_data": "FunctionDef name:_get_session_manager arg:self arguments arg If Return return:yes Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "RegularPolygon",
    "source_code": "class RegularPolygon(Patch):\n\n    def __str__(self):\n        s = 'RegularPolygon((%g, %g), %d, radius=%g, orientation=%g)'\n        return s % (self.xy[0], self.xy[1], self.numvertices, self.radius, self.orientation)\n\n    @_docstring.interpd\n    def __init__(self, xy, numVertices, *, radius=5, orientation=0, **kwargs):\n        self.xy = xy\n        self.numvertices = numVertices\n        self.orientation = orientation\n        self.radius = radius\n        self._path = Path.unit_regular_polygon(numVertices)\n        self._patch_transform = transforms.Affine2D()\n        super().__init__(**kwargs)\n\n    def get_path(self):\n        return self._path\n\n    def get_patch_transform(self):\n        return self._patch_transform.clear().scale(self.radius).rotate(self.orientation).translate(*self.xy)",
    "docstring": "A regular polygon patch.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "ClassDef name:RegularPolygon FunctionDef name:__str__ arg:self arguments arg Assign Return return:yes FunctionDef name:__init__ arg:self arg:xy arg:numVertices arguments arg arg arg arg arg arg Assign Assign Assign Assign Assign Call Assign Call Call Call FunctionDef name:get_path arg:self arguments arg Return return:yes FunctionDef name:get_patch_transform arg:self arguments arg Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "validate_inputs",
    "source_code": "def validate_inputs(x, y):\n    if isinstance(x, iterator_ops.Iterator) or isinstance(y, iterator_ops.Iterator):\n        raise ValueError('`DistributionStrategy` does not support inputs of type Iterator. You must pass a `tf.data.Dataset` object or a numpy array as input.')",
    "docstring": "Validate inputs when using DistributionStrategy. Args: x: Model Inputs. y: Model Targets. Raises: ValueError: if input is not a Dataset or a numpy array(when we use MirroredStrategy).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_training_utils_v1.py",
    "ast_data": "FunctionDef name:validate_inputs arg:x arg:y arguments arg arg If BoolOp Call Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "create_getattr_from_value",
    "source_code": "def create_getattr_from_value(module: torch.nn.Module, graph: Graph, prefix: str, value: Any) -> Node:\n    get_new_attr_name = get_new_attr_name_with_prefix(prefix)\n    attr_name = get_new_attr_name(module)\n    device = assert_and_get_unique_device(module)\n    new_value = value.detach().clone() if isinstance(value, torch.Tensor) else torch.tensor(value, device=device)\n    module.register_buffer(attr_name, new_value)\n    attr_node = graph.create_node('get_attr', attr_name)\n    return attr_node",
    "docstring": "Given a value of any type, creates a getattr node corresponding to the value and registers the value as a buffer to the module.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\utils.py",
    "ast_data": "FunctionDef name:create_getattr_from_value arg:module arg:graph arg:prefix arg:value arguments arg arg arg arg Assign Call Assign Call Assign Call Assign Call Call Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "Trid",
    "source_code": "class Trid(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=6):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-20.0] * self.N, [20.0] * self.N))\n        self.global_optimum = [[6, 10, 12, 12, 10, 6]]\n        self.fglob = -50.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return sum((x - 1.0) ** 2.0) - sum(x[1:] * x[:-1])",
    "docstring": "Trid objective function. This class defines the Trid [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Trid}}(x) = \\sum_{i=1}^{n} (x_i - 1)^2 - \\sum_{i=2}^{n} x_i x_{i-1} Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO Jamil#150, starting index of second summation term should be 2.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_T.py",
    "ast_data": "ClassDef name:Trid Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_axisbelow",
    "source_code": "def set_axisbelow(self, b):\n    self._axisbelow = axisbelow = validate_axisbelow(b)\n    zorder = {True: 0.5, 'line': 1.5, False: 2.5}[axisbelow]\n    for axis in self._axis_map.values():\n        axis.set_zorder(zorder)\n    self.stale = True",
    "docstring": "Set whether axis ticks and gridlines are above or below most artists. This controls the zorder of the ticks and gridlines. For more information on the zorder see :doc:. Parameters ---------- b : bool or 'line' Possible values: - *True* (zorder = 0.5): Ticks and gridlines are below patches and lines, though still above images. - 'line' (zorder = 1.5): Ticks and gridlines are above patches (e.g. rectangles, with default zorder = 1) but still below lines and markers (with their default zorder = 2). - *False* (zorder = 2.5): Ticks and gridlines are above patches and lines / markers. Notes ----- For more control, call the method of each axis. See Also -------- get_axisbelow",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:set_axisbelow arg:self arg:b arguments arg arg Assign Call Assign For Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "export_run_metadata",
    "source_code": "def export_run_metadata(self):\n    if not self._context_handle:\n        return None\n    with c_api_util.tf_buffer() as buffer_:\n        pywrap_tfe.TFE_ContextExportRunMetadata(self._context_handle, buffer_)\n        proto_data = pywrap_tf_session.TF_GetBuffer(buffer_)\n    run_metadata = config_pb2.RunMetadata()\n    run_metadata.ParseFromString(compat.as_bytes(proto_data))\n    return run_metadata",
    "docstring": "Returns a RunMetadata proto with accumulated information. The returned protocol buffer contains information since the most recent call to either enable_run_metadata or export_run_metadata. Returns: A RunMetadata protocol buffer. Or None if not enabled.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:export_run_metadata arg:self arguments arg If Return return:no With Call Call Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "chebgrid3d",
    "source_code": "def chebgrid3d(x, y, z, c):\n    return pu._gridnd(chebval, c, x, y, z)",
    "docstring": "Evaluate a 3-D Chebyshev series on the Cartesian product of x, y, and z. This function returns the values: .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * T_i(a) * T_j(b) * T_k(c) where the points `axbyczxyzxyzxyzccxyzxyzcxy`. See Also -------- chebval, chebval2d, chebgrid2d, chebval3d",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\chebyshev.py",
    "ast_data": "FunctionDef name:chebgrid3d arg:x arg:y arg:z arg:c arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "children",
    "source_code": "@classmethod\ndef children(cls, obj, save_type=base.SaveType.CHECKPOINT, **kwargs):\n    obj._maybe_initialize_trackable()\n    children = {}\n    for name, ref in obj._trackable_children(save_type, **kwargs).items():\n        ref = converter.convert_to_trackable(ref, parent=obj)\n        children[name] = ref\n    return children",
    "docstring": "Returns all child trackables attached to obj. Args: obj: A object. save_type: A string, can be 'savedmodel' or 'checkpoint'. **kwargs: kwargs to use when retrieving the object's children. Returns: Dictionary of all children attached to the object with name to trackable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\trackable_view.py",
    "ast_data": "FunctionDef name:children arg:cls arg:obj arg:save_type arguments arg arg arg arg Call Assign For Call Call Assign Call Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_decode_relation",
    "source_code": "def _decode_relation(self, s):\n    _, v = s.split(' ', 1)\n    v = v.strip()\n    if not _RE_RELATION.match(v):\n        raise BadRelationFormat()\n    res = str(v.strip('\"\\''))\n    return res",
    "docstring": "(INTERNAL) Decodes a relation line. The relation declaration is a line with the format `BadRelationFormat` exception. This method must receive a normalized string, i.e., a string without padding, including the \" \" characters. :param s: a normalized string. :return: a string with the decoded relation name.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\externals\\_arff.py",
    "ast_data": "FunctionDef name:_decode_relation arg:self arg:s arguments arg arg Assign Call Assign Call If Call Raise Call Assign Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_max_precision_float_dtype",
    "source_code": "def _max_precision_float_dtype(xp, device):\n    if _is_xp_namespace(xp, 'torch') and str(device).startswith('mps'):\n        return xp.float32\n    return xp.float64",
    "docstring": "Return the float dtype with the highest precision supported by the device.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_array_api.py",
    "ast_data": "FunctionDef name:_max_precision_float_dtype arg:xp arg:device arguments arg arg If BoolOp Call Call Call Return return:yes Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y, sample_weight=None, check_input=True):\n    super()._fit(X, y, sample_weight=sample_weight, check_input=check_input)\n    return self",
    "docstring": "Build a decision tree classifier from the training set (X, y). Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The training input samples. Internally, it will be converted to ``. y : array-like of shape (n_samples,) or (n_samples, n_outputs) The target values (class labels) as integers or strings. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If None, then samples are equally weighted. Splits that would create child nodes with net zero or negative weight are ignored while searching for a split in each node. Splits are also ignored if they would result in any single class carrying a negative weight in either child node. check_input : bool, default=True Allow to bypass several input checking. Don't use this parameter unless you know what you're doing. Returns ------- self : DecisionTreeClassifier Fitted estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\tree\\_classes.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arg:sample_weight arg:check_input arguments arg arg arg arg arg Call Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "AdjustGamma",
    "source_code": "class AdjustGamma(Module):\n\n    def __init__(self, gamma: Union[float, Tensor], gain: Union[float, Tensor]=1.0) -> None:\n        super().__init__()\n        self.gamma: Union[float, Tensor] = gamma\n        self.gain: Union[float, Tensor] = gain\n\n    def forward(self, input: Tensor) -> Tensor:\n        return adjust_gamma(input, self.gamma, self.gain)",
    "docstring": "Perform gamma correction on an image. The input image is expected to be in the range of [0, 1]. Args: gamma: Non negative real number, same as y\\gammay in the equation. gamma larger than 1 make the shadows darker, while gamma smaller than 1 make dark regions lighter. gain: The constant multiplier. Shape: - Input: Image to be adjusted in the shape of :math:. - Output: Adjusted image in the shape of :math:. Example: >>> x = torch.ones(1, 1, 3, 3) >>> AdjustGamma(1.0, 2.0)(x) tensor([[[[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]]]]) >>> x = torch.ones(2, 5, 3, 3) >>> y1 = torch.ones(2) * 1.0 >>> y2 = torch.ones(2) * 2.0 >>> AdjustGamma(y1, y2)(x).shape torch.Size([2, 5, 3, 3])",
    "type": "class",
    "file_path": "kornia\\kornia\\enhance\\adjust.py",
    "ast_data": "ClassDef name:AdjustGamma FunctionDef name:__init__ arg:self arg:gamma arg:gain arguments arg arg arg Call Call FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_calculate_null_samples",
    "source_code": "def _calculate_null_samples(data, statistic, n_permutations, batch, rng=None):\n    n_samples = len(data)\n    if n_samples == 1:\n        data = [data[0], -data[0]]\n    data = np.swapaxes(data, 0, -1)\n\n    def statistic_wrapped(*data, axis):\n        data = np.swapaxes(data, 0, -1)\n        if n_samples == 1:\n            data = data[0:1]\n        return statistic(*data, axis=axis)\n    return _calculate_null_pairings(data, statistic_wrapped, n_permutations, batch, rng)",
    "docstring": "Calculate null distribution for paired-sample tests.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_resampling.py",
    "ast_data": "FunctionDef name:_calculate_null_samples arg:data arg:statistic arg:n_permutations arg:batch arg:rng arguments arg arg arg arg arg Assign Call If Compare Assign Assign Call FunctionDef name:statistic_wrapped arguments arg arg Assign Call If Compare Assign Return return:yes Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "leggrid2d",
    "source_code": "def leggrid2d(x, y, c):\n    return pu._gridnd(legval, c, x, y)",
    "docstring": "Evaluate a 2-D Legendre series on the Cartesian product of x and y. This function returns the values: .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * L_i(a) * L_j(b) where the points `axbyxyxyxyccxyxycxy`. See Also -------- legval, legval2d, legval3d, leggrid3d",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\legendre.py",
    "ast_data": "FunctionDef name:leggrid2d arg:x arg:y arg:c arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "Convert",
    "source_code": "class Convert(Rule):\n\n    def __str__(self):\n        return 'Convert rule for {}'.format(self._prefix)\n\n    def get_action(self, module):\n        if self.matches(module.__name__):\n            return Action.CONVERT\n        return Action.NONE",
    "docstring": "Indicates that this module should be converted.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\core\\config_lib.py",
    "ast_data": "ClassDef name:Convert FunctionDef name:__str__ arg:self arguments arg Return return:yes Call FunctionDef name:get_action arg:self arg:module arguments arg arg If Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_batch",
    "source_code": "def _batch(self, batch_size):\n    return self._copy(param_specs=nest.map_structure(lambda spec: spec._batch(batch_size), self._param_specs))",
    "docstring": "Returns a TypeSpec representing a batch of objects with this TypeSpec.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "FunctionDef name:_batch arg:self arg:batch_size arguments arg arg Return return:yes Call Call arguments arg Call"
  },
  {
    "library": "virtualenv",
    "name": "from_dir",
    "source_code": "def from_dir(distribution, version, for_py_version, directories):\n    for folder in directories:\n        for wheel in discover_wheels(folder, distribution, version, for_py_version):\n            return wheel\n    return None",
    "docstring": "Load a compatible wheel from a given folder.",
    "type": "function",
    "file_path": "virtualenv\\src\\virtualenv\\seed\\wheels\\bundle.py",
    "ast_data": "FunctionDef name:from_dir arg:distribution arg:version arg:for_py_version arg:directories arguments arg arg arg arg For For Call Return return:yes Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "assert_same_structure",
    "source_code": "def assert_same_structure(nest1, nest2, check_types=True):\n    nest_util.assert_same_structure(nest_util.Modality.DATA, nest1, nest2, check_types)",
    "docstring": "Asserts that two structures are nested in the same way. Args: nest1: an arbitrarily nested structure. nest2: an arbitrarily nested structure. check_types: if (default) types of sequences should be same as well. For dictionary, \"type\" of dictionary is considered to include its keys. In other words, two dictionaries with different keys are considered to have a different \"type\". If set to , two iterables are considered same as long as they yield the elements that have same structures. Raises: ValueError: If the two structures do not have the same number of elements or if the two structures are not nested in the same way. TypeError: If the two structures differ in the type of sequence in any of their substructures. Only possible if is .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\util\\nest.py",
    "ast_data": "FunctionDef name:assert_same_structure arg:nest1 arg:nest2 arg:check_types arguments arg arg arg Call"
  },
  {
    "library": "django",
    "name": "get_list_or_404",
    "source_code": "def get_list_or_404(klass, *args, **kwargs):\n    queryset = _get_queryset(klass)\n    if not hasattr(queryset, 'filter'):\n        klass__name = klass.__name__ if isinstance(klass, type) else klass.__class__.__name__\n        raise ValueError(\"First argument to get_list_or_404() must be a Model, Manager, or QuerySet, not '%s'.\" % klass__name)\n    obj_list = list(queryset.filter(*args, **kwargs))\n    if not obj_list:\n        raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)\n    return obj_list",
    "docstring": "Use filter() to return a list of objects, or raise an Http404 exception if the list is empty. klass may be a Model, Manager, or QuerySet object. All other passed arguments and keyword arguments are used in the filter() query.",
    "type": "function",
    "file_path": "django\\django\\shortcuts.py",
    "ast_data": "FunctionDef name:get_list_or_404 arg:klass arguments arg arg arg Assign Call If Call Assign Call Raise Call Assign Call Call If Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, scores=None, classes=None):\n    if scores is not None and (not (isinstance(scores, tensor.Tensor) and scores.dtype.is_floating)):\n        raise ValueError('Classification scores must be a float32 Tensor; got {}'.format(scores))\n    if classes is not None and (not (isinstance(classes, tensor.Tensor) and dtypes.as_dtype(classes.dtype) == dtypes.string)):\n        raise ValueError('Classification classes must be a string Tensor; got {}'.format(classes))\n    if scores is None and classes is None:\n        raise ValueError('Cannot create a ClassificationOutput with empty arguments. At least one of `scores` and `classes` must be defined.')\n    self._scores = scores\n    self._classes = classes",
    "docstring": "Constructor for . Args: scores: A float giving scores (sometimes but not always interpretable as probabilities) for each class. May be , but only if is set. Interpretation varies-- see class doc. classes: A string giving predicted class labels. May be , but only if is set. Interpretation varies-- see class doc. Raises: ValueError: if neither classes nor scores is set, or one of them is not a with the correct dtype.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\model_utils\\export_output.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:scores arg:classes arguments arg arg arg If BoolOp Compare BoolOp Call Raise Call Call If BoolOp Compare BoolOp Call Compare Call Raise Call Call If BoolOp Compare Compare Raise Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_BesselJ0Grad",
    "source_code": "@ops.RegisterGradient('BesselJ0')\ndef _BesselJ0Grad(op: ops.Operation, grad):\n    x = op.inputs[0]\n    with ops.control_dependencies([grad]):\n        partial_x = -special_math_ops.bessel_j1(x)\n        return grad * partial_x",
    "docstring": "Compute gradient of bessel_j0(x) with respect to its argument.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_BesselJ0Grad arg:op arg:grad arguments arg arg Assign With Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "unit_rectangle",
    "source_code": "@classmethod\ndef unit_rectangle(cls):\n    if cls._unit_rectangle is None:\n        cls._unit_rectangle = cls([[0, 0], [1, 0], [1, 1], [0, 1], [0, 0]], closed=True, readonly=True)\n    return cls._unit_rectangle",
    "docstring": "Return a instance of the unit rectangle from (0, 0) to (1, 1).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\path.py",
    "ast_data": "FunctionDef name:unit_rectangle arg:cls arguments arg If Compare Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "pg_map",
    "source_code": "@property\ndef pg_map(self) -> dict[ProcessGroup, tuple[str, Store]]:\n    global _pg_map\n    return _pg_map",
    "docstring": "Provide Mapping from ProcessGroup to backend name and store. For NCCL and GLOO pg, it is a map from ProcessGroup to (Backend, Store) For MPI pg, it is a map from ProcessGroup to (Backend, None) TODO don't expose the map, expose fine grained ops",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:pg_map arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "in_onnx_export",
    "source_code": "@property\ndef in_onnx_export(self) -> bool:\n    return self._in_onnx_export",
    "docstring": "Whether it is in the middle of ONNX export.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_globals.py",
    "ast_data": "FunctionDef name:in_onnx_export arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "StaticAutotunerFuture",
    "source_code": "class StaticAutotunerFuture(CodeCacheFuture):\n\n    def __init__(self, static_autotuner: CachingAutotuner) -> None:\n        self.static_autotuner = static_autotuner\n        self.reload_kernel_from_src: Optional[Callable[[], Any]] = None\n\n    def result(self) -> CachingAutotuner:\n        assert self.reload_kernel_from_src is not None\n        with dynamo_timed('StaticAutotunerFuture.warm_precompile'):\n            self.static_autotuner.recheck_autotune_cache(reload_kernel_from_src=self.reload_kernel_from_src)\n            self.static_autotuner.precompile(warm_cache_only=False, reload_kernel=self.reload_kernel_from_src, static_triton_bundle_key=None)\n            return self.static_autotuner",
    "docstring": "A statically launchable CachingAutotuner, loaded from TritonBundler",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\codecache.py",
    "ast_data": "ClassDef name:StaticAutotunerFuture FunctionDef name:__init__ arg:self arg:static_autotuner arguments arg arg Assign FunctionDef name:result arg:self arguments arg Compare With Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "lookup_tensor",
    "source_code": "def lookup_tensor(self, index: MetadataIndex) -> torch.Tensor:\n    return find_state_dict_object(self.state_dict, index)",
    "docstring": "Extension from the planner interface to make it easy to extend the default planner.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\default_planner.py",
    "ast_data": "FunctionDef name:lookup_tensor arg:self arg:index arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_set_lim_and_transforms",
    "source_code": "def _set_lim_and_transforms(self):\n    self.transAxes = mtransforms.BboxTransformTo(self.bbox)\n    self.transScale = mtransforms.TransformWrapper(mtransforms.IdentityTransform())\n    self.transLimits = mtransforms.BboxTransformFrom(mtransforms.TransformedBbox(self._viewLim, self.transScale))\n    self.transData = self.transScale + (self.transLimits + self.transAxes)\n    self._xaxis_transform = mtransforms.blended_transform_factory(self.transData, self.transAxes)\n    self._yaxis_transform = mtransforms.blended_transform_factory(self.transAxes, self.transData)",
    "docstring": "Set the *_xaxis_transform*, *_yaxis_transform*, *transScale*, *transData*, *transLimits* and *transAxes* transformations. .. note:: This method is primarily used by rectilinear projections of the class, and is meant to be overridden by new kinds of projection Axes that need different transformations and limits. (See for an example.)",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:_set_lim_and_transforms arg:self arguments arg Assign Call Assign Call Call Assign Call Call Assign Assign Call Assign Call"
  },
  {
    "library": "cherrypy",
    "name": "allow",
    "source_code": "def allow(methods=None, debug=False):\n    if not isinstance(methods, (tuple, list)):\n        methods = [methods]\n    methods = [m.upper() for m in methods if m]\n    if not methods:\n        methods = ['GET', 'HEAD']\n    elif 'GET' in methods and 'HEAD' not in methods:\n        methods.append('HEAD')\n    cherrypy.response.headers['Allow'] = ', '.join(methods)\n    if cherrypy.request.method not in methods:\n        if debug:\n            cherrypy.log('request.method %r not in methods %r' % (cherrypy.request.method, methods), 'TOOLS.ALLOW')\n        raise cherrypy.HTTPError(405)\n    elif debug:\n        cherrypy.log('request.method %r in methods %r' % (cherrypy.request.method, methods), 'TOOLS.ALLOW')",
    "docstring": "Raise 405 if request.method not in methods (default ['GET', 'HEAD']). The given methods are case-insensitive, and may be in any order. If only one method is allowed, you may supply a single string; if more than one, supply a list of strings. Regardless of whether the current method is allowed or not, this also emits an 'Allow' response header, containing the given methods.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\cptools.py",
    "ast_data": "FunctionDef name:allow arg:methods arg:debug arguments arg arg If Call Assign Assign Call If Assign If BoolOp Compare Compare Call Assign Call If Compare If Call Raise Call If Call"
  },
  {
    "library": "numpy",
    "name": "get_printoptions",
    "source_code": "@set_module('numpy')\ndef get_printoptions():\n    opts = format_options.get().copy()\n    opts['legacy'] = {113: '1.13', 121: '1.21', 125: '1.25', 201: '2.1', 202: '2.2', sys.maxsize: False}[opts['legacy']]\n    return opts",
    "docstring": "Return the current print options. Returns ------- print_opts : dict Dictionary of current print options with keys - precision : int - threshold : int - edgeitems : int - linewidth : int - suppress : bool - nanstr : str - infstr : str - sign : str - formatter : dict of callables - floatmode : str - legacy : str or False For a full description of these options, see . See Also -------- set_printoptions, printoptions Examples -------- >>> import numpy as np >>> np.get_printoptions() {'edgeitems': 3, 'threshold': 1000, ..., 'override_repr': None} >>> np.get_printoptions()['linewidth'] 75 >>> np.set_printoptions(linewidth=100) >>> np.get_printoptions()['linewidth'] 100",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\arrayprint.py",
    "ast_data": "FunctionDef name:get_printoptions arguments Assign Call Call Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_status",
    "source_code": "def get_status(self):\n    return [not colors.same_color(color, colors.to_rgba('none')) for color in self._checks.get_facecolors()]",
    "docstring": "Return a list of the status (True/False) of all of the check buttons.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:get_status arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_orthogonal_kernel",
    "source_code": "def _orthogonal_kernel(self, ksize, cin, cout):\n    if cin > cout:\n        raise ValueError(f'The number of input channels (cin={cin}) cannot exceed the number of output channels (cout={cout}).')\n    orth = self._orthogonal_matrix(cout)[0:cin, :]\n    if ksize == 1:\n        return array_ops.expand_dims(orth, 0)\n    p = self._block_orth(self._symmetric_projection(cout))\n    for _ in range(ksize - 2):\n        temp = self._block_orth(self._symmetric_projection(cout))\n        p = self._matrix_conv(p, temp)\n    for i in range(ksize):\n        p[i] = math_ops.matmul(orth, p[i])\n    return self._dict_to_tensor(p, ksize)",
    "docstring": "Construct orthogonal kernel for convolution. Args: ksize: Kernel size. cin: Number of input channels. cout: Number of output channels. Returns: An [ksize, ksize, cin, cout] orthogonal kernel. Raises: ValueError: If cin > cout.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops.py",
    "ast_data": "FunctionDef name:_orthogonal_kernel arg:self arg:ksize arg:cin arg:cout arguments arg arg arg arg If Compare Raise Call Assign Call If Compare Return return:yes Call Assign Call Call For Call Assign Call Call Assign Call For Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_create_mirrored_tpu_variables",
    "source_code": "def _create_mirrored_tpu_variables(**kwargs):\n    initial_value = None\n    value_list = []\n    for i, d in enumerate(devices):\n        with ops.device(d):\n            if i == 0:\n                initial_value = kwargs['initial_value']\n                with maybe_init_scope():\n                    initial_value = initial_value() if callable(initial_value) else initial_value\n            if i > 0:\n                var0name = value_list[0].name.split(':')[0]\n                kwargs['name'] = '%s/replica_%d/' % (var0name, i)\n            kwargs['initial_value'] = initial_value\n            with context.device_policy(context.DEVICE_PLACEMENT_SILENT):\n                v = next_creator(**kwargs)\n            assert not isinstance(v, tpu_values.TPUMirroredVariable)\n            value_list.append(v)\n    return value_list",
    "docstring": "Returns a list of s. The list contains s and can be used to initialize a . Args: **kwargs: the keyword arguments for creating a variable",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_strategy.py",
    "ast_data": "FunctionDef name:_create_mirrored_tpu_variables arguments arg Assign Assign For Call With Call If Compare Assign With Call Assign Call Call If Compare Assign Call Assign Assign With Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "HandlerWrapperTool",
    "source_code": "class HandlerWrapperTool(Tool):\n\n    def __init__(self, newhandler, point='before_handler', name=None, priority=50):\n        self.newhandler = newhandler\n        self._point = point\n        self._name = name\n        self._priority = priority\n\n    def callable(self, *args, **kwargs):\n        innerfunc = cherrypy.serving.request.handler\n\n        def wrap(*args, **kwargs):\n            return self.newhandler(innerfunc, *args, **kwargs)\n        cherrypy.serving.request.handler = wrap",
    "docstring": "Tool which wraps request.handler in a provided wrapper function. The 'newhandler' arg must be a handler wrapper function that takes a 'next_handler' argument, plus ``. Like all page handler functions, it must return an iterable for use as cherrypy.response.body. For example, to allow your 'inner' page handlers to return dicts which then get interpolated into a template:: def interpolator(next_handler, *args, **kwargs): filename = cherrypy.request.config.get('template') cherrypy.response.template = env.get_template(filename) response_dict = next_handler(*args, **kwargs) return cherrypy.response.template.render(**response_dict) cherrypy.tools.jinja = HandlerWrapperTool(interpolator)",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\_cptools.py",
    "ast_data": "ClassDef name:HandlerWrapperTool FunctionDef name:__init__ arg:self arg:newhandler arg:point arg:name arg:priority arguments arg arg arg arg arg Assign Assign Assign Assign FunctionDef name:callable arg:self arguments arg arg arg Assign FunctionDef name:wrap arguments arg arg Return return:yes Call Assign"
  },
  {
    "library": "pandas",
    "name": "get_block_type",
    "source_code": "def get_block_type(dtype: DtypeObj) -> type[Block]:\n    if isinstance(dtype, DatetimeTZDtype):\n        return DatetimeLikeBlock\n    elif isinstance(dtype, PeriodDtype):\n        return NDArrayBackedExtensionBlock\n    elif isinstance(dtype, ExtensionDtype):\n        return ExtensionBlock\n    kind = dtype.kind\n    if kind in 'Mm':\n        return DatetimeLikeBlock\n    return NumpyBlock",
    "docstring": "Find the appropriate Block subclass to use for the given values and dtype. Parameters ---------- dtype : numpy or pandas dtype Returns ------- cls : class, subclass of Block",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\internals\\blocks.py",
    "ast_data": "FunctionDef name:get_block_type arg:dtype arguments arg If Call Return return:yes If Call Return return:yes If Call Return return:yes Assign If Compare Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_block_args",
    "source_code": "def get_block_args(self) -> list[ConstexprArg]:\n    block_names = {}\n    for sub_kernel in self.sub_kernels:\n        for tree in sub_kernel.range_trees:\n            if tree.is_reduction and (not sub_kernel.inside_reduction or sub_kernel.persistent_reduction):\n                continue\n            if tree.prefix == 'x' and sub_kernel.no_x_dim:\n                continue\n            block_names[f'{tree.prefix.upper()}BLOCK'] = tree.prefix\n    self.block_args = list(block_names.keys())\n    return [ConstexprArg(x) for x in block_names.keys()]",
    "docstring": "Calculate blocks from sub_kernels and range_trees. **Update self.block_args** Return the block args",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\triton_combo_kernel.py",
    "ast_data": "FunctionDef name:get_block_args arg:self arguments arg Assign For For If BoolOp BoolOp If BoolOp Compare Assign Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_LoweringSerializerContextManager",
    "source_code": "class _LoweringSerializerContextManager(contextlib.ExitStack):\n\n    def __init__(self, lowering: _LoweringSerializer) -> None:\n        super().__init__()\n        self.lowering = lowering\n\n    @override\n    def __enter__(self) -> Self:\n        super().__enter__()\n        from . import lowering\n        for k, v in lowering.lowerings.items():\n            name = str(k)\n            if name in self.lowering.fallbacks:\n                if not _is_fallback_handler(v):\n                    self.enter_context(lowering.force_fallback(k))\n        return self",
    "docstring": "Helper for _LoweringSerializer.patch()",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\compile_fx_ext.py",
    "ast_data": "ClassDef name:_LoweringSerializerContextManager FunctionDef name:__init__ arg:self arg:lowering arguments arg arg Call Call Assign FunctionDef name:__enter__ arg:self arguments arg Call Call For Call Assign Call If Compare If Call Call Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "authenticate_token",
    "source_code": "def authenticate_token(self, request, client):\n    self.check_params(request, client)\n    token = self.query_token(request.form['token'], request.form.get('token_type_hint'))\n    if token and (not token.check_client(client)):\n        raise InvalidGrantError()\n    return token",
    "docstring": "The client constructs the request by including the following parameters using the \"application/x-www-form-urlencoded\" format in the HTTP request entity-body: token REQUIRED. The token that the client wants to get revoked. token_type_hint OPTIONAL. A hint about the type of the token submitted for revocation.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc7009\\revocation.py",
    "ast_data": "FunctionDef name:authenticate_token arg:self arg:request arg:client arguments arg arg arg Call Assign Call Call If BoolOp Call Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "topological_sort_dfs",
    "source_code": "def topological_sort_dfs(nodes: list[BaseSchedulerNode]) -> list[BaseSchedulerNode]:\n    seen: OrderedSet[BaseSchedulerNode] = OrderedSet()\n    name_to_node: dict[str, BaseSchedulerNode] = dict()\n    result: list[BaseSchedulerNode] = []\n    size_with_reads: dict[BaseSchedulerNode, int] = dict()\n\n    def visit(n: BaseSchedulerNode) -> None:\n        if n not in seen:\n            seen.add(n)\n            dep_nodes = [name_to_node[dep.name] for dep in n.unmet_dependencies if dep.name in name_to_node]\n            for node in sorted(dep_nodes, key=lambda n: (size_with_reads[n], n.mpi_node.index)):\n                visit(node)\n            result.append(n)\n    for node in nodes:\n        for name in node.get_buffer_names():\n            name_to_node[name] = node\n    for node in nodes:\n        size_with_reads[node] = node.mpi_node.size + sum((pred_buf.mpi_buffer.size_free for pred_buf in node.mpi_node.pred_buffers))\n    for node in sorted(nodes, key=lambda n: (size_with_reads[n], n.mpi_node.index)):\n        visit(node)\n    return result",
    "docstring": "This is a DFS topological sort. The setup is similar to in scheduler.py. The difference is the order nodes are visited in the outer loop. In , nodes are visited in their original order. In this function, nodes are visited based on their priority -- for each node, we compute the total memory of all buffers it reads from or writes to, and we visit the nodes in ascending order of this priority.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\memory.py",
    "ast_data": "FunctionDef name:topological_sort_dfs arg:nodes arguments arg Call Call Call FunctionDef name:visit arg:n arguments arg If Compare Call Assign Compare For Call arguments arg Call Call For For Call Assign For Assign Call For Call arguments arg Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "number_of_shards",
    "source_code": "@property\ndef number_of_shards(self):\n    return self._number_of_shards",
    "docstring": "Returns the number of shards in the policy or None if unspecified.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_sharding.py",
    "ast_data": "FunctionDef name:number_of_shards arg:self arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "unique_counts",
    "source_code": "@array_function_dispatch(_unique_counts_dispatcher)\ndef unique_counts(x):\n    result = unique(x, return_index=False, return_inverse=False, return_counts=True, equal_nan=False)\n    return UniqueCountsResult(*result)",
    "docstring": "Find the unique elements and counts of an input array . This function is an Array API compatible alternative to:: np.unique(x, return_counts=True, equal_nan=False, sorted=False) but returns a namedtuple for easier access to each output. .. note:: This function currently always returns a sorted result, however, this could change in any NumPy minor release. Parameters ---------- x : array_like Input array. It will be flattened if it is not already 1-D. Returns ------- out : namedtuple The result containing: * values - The unique elements of an input array. * counts - The corresponding counts for each unique element. See Also -------- unique : Find the unique elements of an array. Examples -------- >>> import numpy as np >>> x = [1, 1, 2] >>> uniq = np.unique_counts(x) >>> uniq.values array([1, 2]) >>> uniq.counts array([2, 1])",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_arraysetops_impl.py",
    "ast_data": "FunctionDef name:unique_counts arg:x arguments arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_TensorListConcatGrad",
    "source_code": "@ops.RegisterGradient('TensorListConcat')\n@ops.RegisterGradient('TensorListConcatV2')\ndef _TensorListConcatGrad(op: ops.Operation, dtensor, unused_dlengths):\n    dlist = tensor_list_split(dtensor, element_shape=gen_list_ops.tensor_list_element_shape(op.inputs[0], shape_type=dtypes.int32), lengths=op.outputs[1])\n    if op.type == 'TensorListConcatV2':\n        return (dlist, None, None)\n    else:\n        return dlist",
    "docstring": "Gradient function for TensorListConcat.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\list_ops.py",
    "ast_data": "FunctionDef name:_TensorListConcatGrad arg:op arg:dtensor arg:unused_dlengths arguments arg arg arg Assign Call Call If Compare Return return:yes Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "ListView",
    "source_code": "class ListView(MultipleObjectTemplateResponseMixin, BaseListView):\n    pass",
    "docstring": "Render some list of objects, set by or . can actually be any iterable of items, not just a queryset.",
    "type": "class",
    "file_path": "django\\django\\views\\generic\\list.py",
    "ast_data": "ClassDef name:ListView"
  },
  {
    "library": "pytorch",
    "name": "verify",
    "source_code": "def verify(self, ep):\n    epm = ep.module()\n    for args, kwargs in self._examples:\n        torch.export._unlift._check_input_constraints_pre_hook(epm, args, kwargs or {})",
    "docstring": "Verifies that an exported program is valid for each additional input.",
    "type": "method",
    "file_path": "pytorch\\torch\\export\\dynamic_shapes.py",
    "ast_data": "FunctionDef name:verify arg:self arg:ep arguments arg arg Assign Call For Call BoolOp"
  },
  {
    "library": "pytorch",
    "name": "update_usage",
    "source_code": "def update_usage(self, timestep: int):\n    self.live_range = LiveRange(min(timestep, self.live_range.begin), max(timestep, self.live_range.end))",
    "docstring": "Expand self.live_range to include timestep",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\memory_planning.py",
    "ast_data": "FunctionDef name:update_usage arg:self arg:timestep arguments arg arg Assign Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_default_handler_map",
    "source_code": "@classmethod\ndef get_default_handler_map(cls):\n    return cls._default_handler_map",
    "docstring": "Return the global default handler map, shared by all legends.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\legend.py",
    "ast_data": "FunctionDef name:get_default_handler_map arg:cls arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__repr__",
    "source_code": "def __repr__(self) -> str:\n    return f'_MaskPartial(offset_shape={self.offset_shape}, offset_dim={self.offset_dim})'",
    "docstring": "machine readable representation of the MaskPartial placement",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_ops\\_embedding_ops.py",
    "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "lp_pool2d",
    "source_code": "def lp_pool2d(input: Tensor, norm_type: Union[int, float], kernel_size: BroadcastingList2[int], stride: Optional[BroadcastingList2[int]]=None, ceil_mode: bool=False) -> Tensor:\n    if has_torch_function_unary(input):\n        return handle_torch_function(lp_pool2d, (input,), input, norm_type, kernel_size, stride=stride, ceil_mode=ceil_mode)\n    kw, kh = _pair(kernel_size)\n    if stride is not None:\n        out = avg_pool2d(input.pow(norm_type), kernel_size, stride, 0, ceil_mode)\n    else:\n        out = avg_pool2d(input.pow(norm_type), kernel_size, padding=0, ceil_mode=ceil_mode)\n    return (torch.sign(out) * relu(torch.abs(out))).mul(kw * kh).pow(1.0 / norm_type)",
    "docstring": "Apply a 2D power-average pooling over an input signal composed of several input planes. If the sum of all inputs to the power of is zero, the gradient is set to zero as well. See :class: for details.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:lp_pool2d arg:input arg:norm_type arg:kernel_size arg:stride arg:ceil_mode arguments arg arg arg arg arg If Call Return return:yes Call Assign Call If Compare Assign Call Call Assign Call Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "graph",
    "source_code": "@property\ndef graph(self) -> GraphLowering:\n    return _graph._get_handler()",
    "docstring": "The graph currently being generated",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\virtualized.py",
    "ast_data": "FunctionDef name:graph arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "is_revoked",
    "source_code": "def is_revoked(self):\n    raise NotImplementedError()",
    "docstring": "A method to define if this token is revoked. For instance, there is a boolean column `` in the table:: def is_revoked(self): return self.revoked :return: boolean",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\models.py",
    "ast_data": "FunctionDef name:is_revoked arg:self arguments arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "install_guard",
    "source_code": "def install_guard(*guards, skip=0):\n    from torch._guards import TracingContext\n    collect_debug_stack = guards_log.isEnabledFor(logging.DEBUG) or verbose_guards_log.isEnabledFor(logging.DEBUG)\n    add = TracingContext.get().guards_context.dynamo_guards.add\n    for guard in guards:\n        assert isinstance(guard, Guard)\n        add(guard, collect_debug_stack=collect_debug_stack, skip=skip + 1)",
    "docstring": "Add dynamo guards to the current tracing context. Args: guards: guard(s) to add skip: number of stack frames to ignore for debug stack trace",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\guards.py",
    "ast_data": "FunctionDef name:install_guard arguments arg arg Assign BoolOp Call Call Assign Call For Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_data_files_path",
    "source_code": "@tf_export(v1=['resource_loader.get_data_files_path'])\ndef get_data_files_path():\n    return _os.path.dirname(_inspect.getfile(_sys._getframe(1)))",
    "docstring": "Get a direct path to the data files colocated with the script. Returns: The directory where files specified in data attribute of py_test and py_binary are stored.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\platform\\resource_loader.py",
    "ast_data": "FunctionDef name:get_data_files_path arguments Return return:yes Call Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "escape_abbr",
    "source_code": "def escape_abbr(text: str) -> str:\n    return re.sub('\\\\.(?=\\\\s|$)', '.\\\\@', text)",
    "docstring": "Adjust spacing after abbreviations.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\writers\\latex.py",
    "ast_data": "FunctionDef name:escape_abbr arg:text arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "UserOptins",
    "source_code": "class UserOptins(dict[str, list[str]]):\n    pass",
    "docstring": "Dictionary of users with a list of features they have opted into",
    "type": "class",
    "file_path": "pytorch\\.github\\scripts\\runner_determinator.py",
    "ast_data": "ClassDef name:UserOptins"
  },
  {
    "library": "kornia",
    "name": "from_euler",
    "source_code": "@classmethod\ndef from_euler(cls, roll: Tensor, pitch: Tensor, yaw: Tensor) -> 'Quaternion':\n    w, x, y, z = quaternion_from_euler(roll=roll, pitch=pitch, yaw=yaw)\n    q = stack((w, x, y, z), -1)\n    return cls(q)",
    "docstring": "Create a quaternion from Euler angles. Args: roll: the roll euler angle. pitch: the pitch euler angle. yaw: the yaw euler angle. Example: >>> roll, pitch, yaw = tensor(0), tensor(1), tensor(0) >>> q = Quaternion.from_euler(roll, pitch, yaw) >>> q.data Parameter containing: tensor([0.8776, 0.0000, 0.4794, 0.0000], requires_grad=True)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\quaternion.py",
    "ast_data": "FunctionDef name:from_euler arg:cls arg:roll arg:pitch arg:yaw arguments arg arg arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "parse_indices",
    "source_code": "def parse_indices(indices_string):\n    indices_string = re.sub('\\\\s+', '', indices_string)\n    if indices_string.startswith('[') and indices_string.endswith(']'):\n        indices_string = indices_string[1:-1]\n    return [int(element) for element in indices_string.split(',')]",
    "docstring": "Parse a string representing indices. For example, if the input is \"[1, 2, 3]\", the return value will be a list of indices: [1, 2, 3] Args: indices_string: (str) a string representing indices. Can optionally be surrounded by a pair of brackets. Returns: (list of int): Parsed indices.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\command_parser.py",
    "ast_data": "FunctionDef name:parse_indices arg:indices_string arguments arg Assign Call If BoolOp Call Call Assign Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "make_friedman2",
    "source_code": "@validate_params({'n_samples': [Interval(Integral, 1, None, closed='left')], 'noise': [Interval(Real, 0, None, closed='left')], 'random_state': ['random_state']}, prefer_skip_nested_validation=True)\ndef make_friedman2(n_samples=100, *, noise=0.0, random_state=None):\n    generator = check_random_state(random_state)\n    X = generator.uniform(size=(n_samples, 4))\n    X[:, 0] *= 100\n    X[:, 1] *= 520 * np.pi\n    X[:, 1] += 40 * np.pi\n    X[:, 3] *= 10\n    X[:, 3] += 1\n    y = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * generator.standard_normal(size=n_samples)\n    return (X, y)",
    "docstring": "Generate the \"Friedman #2\" regression problem. This dataset is described in Friedman [1] and Breiman [2]. Inputs are 4 independent features uniformly distributed on the intervals:: 0 Glossary `. Returns ------- X : ndarray of shape (n_samples, 4) The input samples. y : ndarray of shape (n_samples,) The output values. References ---------- .. [1] J. Friedman, \"Multivariate adaptive regression splines\", The Annals of Statistics 19 (1), pages 1-67, 1991. .. [2] L. Breiman, \"Bagging predictors\", Machine Learning 24, pages 123-140, 1996. Examples -------- >>> from sklearn.datasets import make_friedman2 >>> X, y = make_friedman2(random_state=42) >>> X.shape (100, 4) >>> y.shape (100,) >>> list(y[:3]) [np.float64(1229.4), np.float64(27.0), np.float64(65.6)]",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\datasets\\_samples_generator.py",
    "ast_data": "FunctionDef name:make_friedman2 arg:n_samples arguments arg arg arg Assign Call Assign Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "draw_path",
    "source_code": "def draw_path(self, renderer, gc, tpath, affine, rgbFace):\n    gc0 = renderer.new_gc()\n    gc0.copy_properties(gc)\n    if self._shadow_rgbFace is None:\n        r, g, b = (rgbFace or (1.0, 1.0, 1.0))[:3]\n        shadow_rgbFace = (r * self._rho, g * self._rho, b * self._rho)\n    else:\n        shadow_rgbFace = self._shadow_rgbFace\n    gc0.set_foreground('none')\n    gc0.set_alpha(self._alpha)\n    gc0.set_linewidth(0)\n    gc0 = self._update_gc(gc0, self._gc)\n    renderer.draw_path(gc0, tpath, affine + self._offset_transform(renderer), shadow_rgbFace)\n    gc0.restore()",
    "docstring": "Overrides the standard draw_path to add the shadow offset and necessary color changes for the shadow.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patheffects.py",
    "ast_data": "FunctionDef name:draw_path arg:self arg:renderer arg:gc arg:tpath arg:affine arg:rgbFace arguments arg arg arg arg arg arg Assign Call Call If Compare Assign BoolOp Assign Assign Call Call Call Assign Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "back",
    "source_code": "def back(self, *args):\n    self._nav_stack.back()\n    self.set_history_buttons()\n    self._update_view()",
    "docstring": "Move back up the view lim stack. For convenience of being directly connected as a GUI callback, which often get passed additional parameters, this method accepts arbitrary parameters, but does not use them.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:back arg:self arguments arg arg Call Call Call"
  },
  {
    "library": "pandas",
    "name": "AppendableMultiFrameTable",
    "source_code": "class AppendableMultiFrameTable(AppendableFrameTable):\n    table_type = 'appendable_multiframe'\n    obj_type = DataFrame\n    ndim = 2\n    _re_levels = re.compile('^level_\\\\d+$')\n\n    @property\n    def table_type_short(self) -> str:\n        return 'appendable_multi'\n\n    def write(self, obj, data_columns=None, **kwargs) -> None:\n        if data_columns is None:\n            data_columns = []\n        elif data_columns is True:\n            data_columns = obj.columns.tolist()\n        obj, self.levels = self.validate_multiindex(obj)\n        assert isinstance(self.levels, list)\n        for n in self.levels:\n            if n not in data_columns:\n                data_columns.insert(0, n)\n        super().write(obj=obj, data_columns=data_columns, **kwargs)\n\n    def read(self, where=None, columns=None, start: int | None=None, stop: int | None=None) -> DataFrame:\n        df = super().read(where=where, columns=columns, start=start, stop=stop)\n        df = df.set_index(self.levels)\n        df.index = df.index.set_names([None if self._re_levels.search(name) else name for name in df.index.names])\n        return df",
    "docstring": "a frame with a multi-index",
    "type": "class",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "ClassDef name:AppendableMultiFrameTable Assign Assign Assign Assign Call FunctionDef name:table_type_short arg:self arguments arg Return return:yes FunctionDef name:write arg:self arg:obj arg:data_columns arguments arg arg arg arg If Compare Assign If Compare Assign Call Assign Call Call For If Compare Call Call Call FunctionDef name:read arg:self arg:where arg:columns arg:start arg:stop arguments arg arg arg arg arg Assign Call Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n    X = self._validate_input(X, in_fit=True)\n    if self.fill_value is None:\n        if X.dtype.kind in ('i', 'u', 'f'):\n            fill_value = 0\n        else:\n            fill_value = 'missing_value'\n    else:\n        fill_value = self.fill_value\n    if sp.issparse(X):\n        self.statistics_ = self._sparse_fit(X, self.strategy, self.missing_values, fill_value)\n    else:\n        self.statistics_ = self._dense_fit(X, self.strategy, self.missing_values, fill_value)\n    return self",
    "docstring": "Fit the imputer on . Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Input data, where is the number of samples and is the number of features. y : Ignored Not used, present here for API consistency by convention. Returns ------- self : object Fitted estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\impute\\_base.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Assign Call If Compare If Compare Assign Assign Assign If Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "Laplacian",
    "source_code": "class Laplacian(Module):\n\n    def __init__(self, kernel_size: tuple[int, int] | int, border_type: str='reflect', normalized: bool=True) -> None:\n        super().__init__()\n        self.kernel_size = kernel_size\n        self.border_type: str = border_type\n        self.normalized: bool = normalized\n\n    def __repr__(self) -> str:\n        return f'{self.__class__.__name__}(kernel_size={self.kernel_size}, normalized={self.normalized}, border_type={self.border_type})'\n\n    def forward(self, input: Tensor) -> Tensor:\n        return laplacian(input, self.kernel_size, self.border_type, self.normalized)",
    "docstring": "Create an operator that returns a tensor using a Laplacian filter. The operator smooths the given tensor with a laplacian kernel by convolving it to each channel. It supports batched operation. Args: kernel_size: the size of the kernel. border_type: the padding mode to be applied before convolving. The expected modes are: `(B, C, H, W)(B, C, H, W)` Examples: >>> input = torch.rand(2, 4, 5, 5) >>> laplace = Laplacian(5) >>> output = laplace(input) >>> output.shape torch.Size([2, 4, 5, 5])",
    "type": "class",
    "file_path": "kornia\\kornia\\filters\\laplacian.py",
    "ast_data": "ClassDef name:Laplacian FunctionDef name:__init__ arg:self arg:kernel_size arg:border_type arg:normalized arguments arg arg arg arg Call Call Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "dtensor_initialize_tpu_system",
    "source_code": "def dtensor_initialize_tpu_system(enable_coordination_service=False):\n    from . import accelerator_util\n    accelerator_util.initialize_accelerator_system('TPU', enable_coordination_service=enable_coordination_service)",
    "docstring": "Deprecated way to initialize the TPU system.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\tpu_util.py",
    "ast_data": "FunctionDef name:dtensor_initialize_tpu_system arg:enable_coordination_service arguments arg Call"
  },
  {
    "library": "scikit-learn",
    "name": "process_routing",
    "source_code": "def process_routing(_obj, _method, /, **kwargs):\n    if not kwargs:\n\n        class EmptyRequest:\n\n            def get(self, name, default=None):\n                return Bunch(**{method: dict() for method in METHODS})\n\n            def __getitem__(self, name):\n                return Bunch(**{method: dict() for method in METHODS})\n\n            def __getattr__(self, name):\n                return Bunch(**{method: dict() for method in METHODS})\n        return EmptyRequest()\n    if not (hasattr(_obj, 'get_metadata_routing') or isinstance(_obj, MetadataRouter)):\n        raise AttributeError(f'The given object ({_obj.__class__.__name__!r}) needs to either implement the routing method `get_metadata_routing` or be a `MetadataRouter` instance.')\n    if _method not in METHODS:\n        raise TypeError(f'Can only route and process input on these methods: {METHODS}, while the passed method is: {_method}.')\n    request_routing = get_routing_for_object(_obj)\n    request_routing.validate_metadata(params=kwargs, method=_method)\n    routed_params = request_routing.route_params(params=kwargs, caller=_method)\n    return routed_params",
    "docstring": "Validate and route input parameters. This function is used inside a router's method, e.g. :term:, to validate the metadata and handle the routing. Assuming this signature of a router's fit method: `~utils.Bunch~sklearn.utils.Bunchobj.get_metadata_routing()`.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py",
    "ast_data": "FunctionDef name:process_routing arguments arg arg arg If ClassDef name:EmptyRequest FunctionDef name:get arg:self arg:name arg:default arguments arg arg arg Return return:yes Call Call FunctionDef name:__getitem__ arg:self arg:name arguments arg arg Return return:yes Call Call FunctionDef name:__getattr__ arg:self arg:name arguments arg arg Return return:yes Call Call Return return:yes Call If BoolOp Call Call Raise Call If Compare Raise Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_add_text",
    "source_code": "def _add_text(self, txt):\n    _api.check_isinstance(mtext.Text, txt=txt)\n    self._set_artist_props(txt)\n    self._children.append(txt)\n    txt._remove_method = self._children.remove\n    self.stale = True\n    return txt",
    "docstring": "Add a to the Axes; return the text.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:_add_text arg:self arg:txt arguments arg arg Call Call Call Assign Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_transform",
    "source_code": "def get_transform(self):\n    if self._transform is None:\n        self._transform = IdentityTransform()\n    elif not isinstance(self._transform, Transform) and hasattr(self._transform, '_as_mpl_transform'):\n        self._transform = self._transform._as_mpl_transform(self.axes)\n    return self._transform",
    "docstring": "Return the instance used by this artist.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:get_transform arg:self arguments arg If Compare Assign Call If BoolOp Call Call Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "__reduce__",
    "source_code": "def __reduce__(self):\n    return (_mrreconstruct, (self.__class__, self._baseclass, (0,), 'b'), self.__getstate__())",
    "docstring": "Return a 3-tuple for pickling a MaskedArray.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\mrecords.py",
    "ast_data": "FunctionDef name:__reduce__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_snapshot_device_function_stack_metadata",
    "source_code": "def _snapshot_device_function_stack_metadata(self) -> list[traceable_stack.TraceableObject]:\n    snapshot = []\n    for obj in self._device_function_stack.peek_traceable_objs():\n        obj_copy = obj.copy_metadata()\n        obj_copy.obj = obj.obj.display_name\n        snapshot.append(obj_copy)\n    return snapshot",
    "docstring": "Return device function stack as a list of TraceableObjects. Returns: [traceable_stack.TraceableObject, ...] where each TraceableObject's .obj member is a displayable name for the user's argument to Graph.device, and the filename and lineno members point to the code location where Graph.device was called directly or indirectly by the user.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_snapshot_device_function_stack_metadata arg:self arguments arg Assign For Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "request_authenticate",
    "source_code": "def request_authenticate(request: Request, username: str, password: str) -> None:\n    warnings.warn('The request_authenticate function is deprecated and will be removed in a future version of Scrapy.', category=ScrapyDeprecationWarning, stacklevel=2)\n    request.headers['Authorization'] = basic_auth_header(username, password)",
    "docstring": "Authenticate the given request (in place) using the HTTP basic access authentication mechanism (RFC 2617) and the given username and password",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\request.py",
    "ast_data": "FunctionDef name:request_authenticate arg:request arg:username arg:password arguments arg arg arg Call Assign Call"
  },
  {
    "library": "kornia",
    "name": "batch_2x2_inv",
    "source_code": "def batch_2x2_inv(m: Tensor, check_dets: bool=False) -> Tensor:\n    a = m[..., 0, 0]\n    b = m[..., 0, 1]\n    c = m[..., 1, 0]\n    d = m[..., 1, 1]\n    minv = torch.empty_like(m)\n    det = a * d - b * c\n    if check_dets:\n        det[torch.abs(det) < 1e-10] = 1e-10\n    minv[..., 0, 0] = d\n    minv[..., 1, 1] = a\n    minv[..., 0, 1] = -b\n    minv[..., 1, 0] = -c\n    return minv / det.unsqueeze(-1).unsqueeze(-1)",
    "docstring": "Returns inverse of batch of 2x2 matrices.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\adalam\\utils.py",
    "ast_data": "FunctionDef name:batch_2x2_inv arg:m arg:check_dets arguments arg arg Assign Assign Assign Assign Assign Call Assign If Assign Compare Call Assign Assign Assign Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "element_spec",
    "source_code": "@property\ndef element_spec(self):\n    raise NotImplementedError('DistributedIterator.element_spec() must be implemented in descendants')",
    "docstring": "The type specification of an element of . Example usage: >>> global_batch_size = 16 >>> strategy = tf.distribute.MirroredStrategy([\"GPU:0\", \"GPU:1\"]) >>> dataset = tf.data.Dataset.from_tensors(([1.],[2])).repeat(100).batch(global_batch_size) >>> distributed_iterator = iter(strategy.experimental_distribute_dataset(dataset)) >>> distributed_iterator.element_spec (PerReplicaSpec(TensorSpec(shape=(None, 1), dtype=tf.float32, name=None), TensorSpec(shape=(None, 1), dtype=tf.float32, name=None)), PerReplicaSpec(TensorSpec(shape=(None, 1), dtype=tf.int32, name=None), TensorSpec(shape=(None, 1), dtype=tf.int32, name=None))) Returns: A nested structure of objects matching the structure of an element of this . This returned value is typically a object and specifies the of individual components.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\types\\distribute.py",
    "ast_data": "FunctionDef name:element_spec arg:self arguments arg Raise Call"
  },
  {
    "library": "scrapy",
    "name": "from_body",
    "source_code": "def from_body(self, body: bytes) -> type[Response]:\n    chunk = body[:5000]\n    chunk = to_bytes(chunk)\n    if not binary_is_text(chunk):\n        return self.from_mimetype('application/octet-stream')\n    lowercase_chunk = chunk.lower()\n    if b'<html>' in lowercase_chunk:\n        return self.from_mimetype('text/html')\n    if b'<?xml' in lowercase_chunk:\n        return self.from_mimetype('text/xml')\n    if b'<!doctype html>' in lowercase_chunk:\n        return self.from_mimetype('text/html')\n    return self.from_mimetype('text')",
    "docstring": "Try to guess the appropriate response based on the body content. This method is a bit magic and could be improved in the future, but it's not meant to be used except for special cases where response types cannot be guess using more straightforward methods.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\responsetypes.py",
    "ast_data": "FunctionDef name:from_body arg:self arg:body arguments arg arg Assign Assign Call If Call Return return:yes Call Assign Call If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "set_difference",
    "source_code": "@tf_export('sets.difference', v1=['sets.difference', 'sets.set_difference'])\n@dispatch.add_dispatch_support\ndef set_difference(a, b, aminusb=True, validate_indices=True):\n    a, b, flipped = _convert_to_tensors_or_sparse_tensors(a, b)\n    if flipped:\n        aminusb = not aminusb\n    return _set_operation(a, b, 'a-b' if aminusb else 'b-a', validate_indices)",
    "docstring": "Compute set difference of elements in last dimension of and . All but the last dimension of and must match. Example: Args: a: or of the same type as . If sparse, indices must be sorted in row-major order. b: or of the same type as . If sparse, indices must be sorted in row-major order. aminusb: Whether to subtract from , vs vice versa. validate_indices: Whether to validate the order and range of sparse indices in and . Returns: A whose shape is the same rank as and , and all but the last dimension the same. Elements along the last dimension contain the differences. Raises: TypeError: If inputs are invalid types, or if and have different types. ValueError: If is sparse and is dense. errors_impl.InvalidArgumentError: If the shapes of and do not match in any dimension other than the last dimension.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\sets_impl.py",
    "ast_data": "FunctionDef name:set_difference arg:a arg:b arg:aminusb arg:validate_indices arguments arg arg arg arg Assign Call If Assign Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_joint_probabilities",
    "source_code": "def _joint_probabilities(distances, desired_perplexity, verbose):\n    distances = distances.astype(np.float32, copy=False)\n    conditional_P = _utils._binary_search_perplexity(distances, desired_perplexity, verbose)\n    P = conditional_P + conditional_P.T\n    sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)\n    P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)\n    return P",
    "docstring": "Compute joint probabilities p_ij from distances. Parameters ---------- distances : ndarray of shape (n_samples * (n_samples-1) / 2,) Distances of samples are stored as condensed matrices, i.e. we omit the diagonal and duplicate entries and store everything in a one-dimensional array. desired_perplexity : float Desired perplexity of the joint probability distributions. verbose : int Verbosity level. Returns ------- P : ndarray of shape (n_samples * (n_samples-1) / 2,) Condensed joint probability matrix.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\manifold\\_t_sne.py",
    "ast_data": "FunctionDef name:_joint_probabilities arg:distances arg:desired_perplexity arg:verbose arguments arg arg arg Assign Call Assign Call Assign Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_config",
    "source_code": "def get_config(self):\n    from tensorflow.python.feature_column.serialization import serialize_feature_column\n    config = dict(zip(self._fields, self))\n    config['categorical_column'] = serialize_feature_column(self.categorical_column)\n    config['dtype'] = self.dtype.name\n    return config",
    "docstring": "See 'FeatureColumn` base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:get_config arg:self arguments arg Assign Call Call Assign Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "endswith_cr",
    "source_code": "def endswith_cr(line):\n    return line.endswith('\\r' if isinstance(line, str) else b'\\r')",
    "docstring": "Return True if line (a text or bytestring) ends with ' '.",
    "type": "function",
    "file_path": "django\\django\\core\\files\\base.py",
    "ast_data": "FunctionDef name:endswith_cr arg:line arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "proto",
    "source_code": "@property\ndef proto(self):\n    return self._proto",
    "docstring": "Return the sharding protobuf of type xla_data_pb2.OpSharding.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\xla\\experimental\\xla_sharding.py",
    "ast_data": "FunctionDef name:proto arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "OdrWarning",
    "source_code": "class OdrWarning(UserWarning):\n    pass",
    "docstring": "Warning indicating that the data passed into ODR will cause problems when passed into 'odr' that the user should be aware of.",
    "type": "class",
    "file_path": "scipy\\scipy\\odr\\_odrpack.py",
    "ast_data": "ClassDef name:OdrWarning"
  },
  {
    "library": "tensorflow",
    "name": "_was_converted",
    "source_code": "def _was_converted(self, t):\n    converted_t = self._conversion_map[t]\n    return converted_t.t is not t",
    "docstring": "True if t is not a conversion of itself.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "FunctionDef name:_was_converted arg:self arg:t arguments arg arg Assign Return return:yes Compare"
  },
  {
    "library": "sphinx",
    "name": "DirectoryHTMLBuilder",
    "source_code": "class DirectoryHTMLBuilder(StandaloneHTMLBuilder):\n    name = 'dirhtml'\n\n    def get_target_uri(self, docname: str, typ: str | None=None) -> str:\n        if docname == 'index':\n            return ''\n        if docname.endswith(SEP + 'index'):\n            return docname[:-5]\n        return docname + SEP\n\n    def get_output_path(self, page_name: str, /) -> Path:\n        page_parts = page_name.split(SEP)\n        if page_parts[-1] == 'index':\n            page_parts.pop()\n        return Path(self.outdir, *page_parts, f'index{self.out_suffix}')",
    "docstring": "A StandaloneHTMLBuilder that creates all HTML pages as \"index.html\" in a directory given by their pagename, so that generated URLs don't have `` in them.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\builders\\dirhtml.py",
    "ast_data": "ClassDef name:DirectoryHTMLBuilder Assign FunctionDef name:get_target_uri arg:self arg:docname arg:typ arguments arg arg arg If Compare Return return:yes If Call Return return:yes Return return:yes FunctionDef name:get_output_path arguments arg arg Assign Call If Compare Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "intersect1d",
    "source_code": "def intersect1d(ar1, ar2, assume_unique=False):\n    if assume_unique:\n        aux = ma.concatenate((ar1, ar2))\n    else:\n        aux = ma.concatenate((unique(ar1), unique(ar2)))\n    aux.sort()\n    return aux[:-1][aux[1:] == aux[:-1]]",
    "docstring": "Returns the unique elements common to both arrays. Masked values are considered equal one to the other. The output is always a masked array. See for more details. See Also -------- numpy.intersect1d : Equivalent function for ndarrays. Examples -------- >>> import numpy as np >>> x = np.ma.array([1, 3, 3, 3], mask=[0, 0, 0, 1]) >>> y = np.ma.array([3, 1, 1, 1], mask=[0, 0, 0, 1]) >>> np.ma.intersect1d(x, y) masked_array(data=[1, 3, --], mask=[False, False, True], fill_value=999999)",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\extras.py",
    "ast_data": "FunctionDef name:intersect1d arg:ar1 arg:ar2 arg:assume_unique arguments arg arg arg If Assign Call Assign Call Call Call Call Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "_gather_saveables_for_checkpoint",
    "source_code": "def _gather_saveables_for_checkpoint(self):\n    return {trackable.VARIABLE_VALUE_KEY: self}",
    "docstring": "For implementing . This object is saveable on its own.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:_gather_saveables_for_checkpoint arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_original_op",
    "source_code": "@tf_contextlib.contextmanager\ndef _original_op(self, op) -> Iterator[None]:\n    old_original_op = self._default_original_op\n    self._default_original_op = op\n    try:\n        yield\n    finally:\n        self._default_original_op = old_original_op",
    "docstring": "Python 'with' handler to help annotate ops with their originator. An op may have an 'original_op' property that indicates the op on which it was based. For example a replica op is based on the op that was replicated and a gradient op is based on the op that was differentiated. All ops created in the scope of this 'with' handler will have the given 'op' as their original op. Args: op: The Operation that all ops created in this scope will have as their original op. Yields: Nothing.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_original_op arg:self arg:op arguments arg arg Assign Assign Try Assign"
  },
  {
    "library": "pytorch",
    "name": "custom_fwd",
    "source_code": "def custom_fwd(fwd=None, *, device_type: str, cast_inputs: Optional[_dtype]=None):\n    if not isinstance(device_type, str):\n        raise ValueError(f'Expected `device_type` of type `str`, got: `{type(device_type)}`')\n    if fwd is None:\n        return functools.partial(custom_fwd, device_type=device_type, cast_inputs=cast_inputs)\n\n    @functools.wraps(fwd)\n    def decorate_fwd(*args, **kwargs):\n        args[0]._dtype = torch.get_autocast_dtype(device_type)\n        if cast_inputs is None:\n            args[0]._fwd_used_autocast = torch.is_autocast_enabled(device_type)\n            return fwd(*args, **kwargs)\n        else:\n            autocast_context = torch.is_autocast_enabled(device_type)\n            args[0]._fwd_used_autocast = False\n            if autocast_context:\n                with autocast(device_type=device_type, enabled=False):\n                    return fwd(*_cast(args, device_type, cast_inputs), **_cast(kwargs, device_type, cast_inputs))\n            else:\n                return fwd(*args, **kwargs)\n    return decorate_fwd",
    "docstring": "Create a helper decorator for `torch.autograd.Functionexample pagetypetorch.deviceTensor.device.typetorch.dtypecustom_fwd` has no effect.",
    "type": "function",
    "file_path": "pytorch\\torch\\amp\\autocast_mode.py",
    "ast_data": "FunctionDef name:custom_fwd arg:fwd arguments arg arg arg If Call Raise Call Call If Compare Return return:yes Call FunctionDef name:decorate_fwd arguments arg arg Assign Call If Compare Assign Call Return return:yes Call Assign Call Assign If With Call Return return:yes Call Call Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "name",
    "source_code": "@property\ndef name(self):\n    return self.categorical_column.name",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "register_backend",
    "source_code": "def register_backend(backend):\n    _uarray.register_backend(backend)",
    "docstring": "This utility method sets registers backend for permanent use. It will be tried in the list of backends automatically, unless the `` flag is set on a backend. Note that this method is not thread-safe. Parameters ---------- backend The backend to register.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_uarray\\_backend.py",
    "ast_data": "FunctionDef name:register_backend arg:backend arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "transform_dimension",
    "source_code": "def transform_dimension(dimension, counter, dimension_dict):\n    if dimension == Dyn:\n        counter += 1\n        return (D(0, z3.Int(counter)), counter)\n    elif isinstance(dimension, int):\n        return (D(1, dimension), counter)\n    elif isinstance(dimension, DVar):\n        if dimension.c in dimension_dict:\n            return (D(z3.Int(dimension_dict[dimension.c]), z3.Int(dimension.c)), counter)\n        else:\n            counter += 1\n            dimension_dict[dimension.c] = counter\n            return (D(z3.Int(counter), z3.Int(dimension.c)), counter)",
    "docstring": "Takes a dimension variable or a number and transforms it to a tuple according to our scheme Args: dimension: The dimension to be transformed counter: variable tracking Returns: tuple and the current counter",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\transform_to_z3.py",
    "ast_data": "FunctionDef name:transform_dimension arg:dimension arg:counter arg:dimension_dict arguments arg arg arg If Compare Return return:yes Call Call If Call Return return:yes Call If Call If Compare Return return:yes Call Call Call Assign Return return:yes Call Call Call"
  },
  {
    "library": "authlib",
    "name": "validate_display_values_supported",
    "source_code": "def validate_display_values_supported(self):\n    values = self.get('display_values_supported')\n    if not values:\n        return\n    if not isinstance(values, list):\n        raise ValueError('\"display_values_supported\" MUST be JSON array')\n    valid_values = {'page', 'popup', 'touch', 'wap'}\n    if not valid_values.issuperset(set(values)):\n        raise ValueError('\"display_values_supported\" contains invalid values')",
    "docstring": "OPTIONAL. JSON array containing a list of the display parameter values that the OpenID Provider supports. These values are described in Section 3.1.2.1 of OpenID Connect Core 1.0.",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\discovery\\models.py",
    "ast_data": "FunctionDef name:validate_display_values_supported arg:self arguments arg Assign Call If Return return:no If Call Raise Call Assign If Call Call Raise Call"
  },
  {
    "library": "django",
    "name": "UnrecognizedArchiveFormat",
    "source_code": "class UnrecognizedArchiveFormat(ArchiveException):\n    pass",
    "docstring": "Error raised when passed file is not a recognized archive format.",
    "type": "class",
    "file_path": "django\\django\\utils\\archive.py",
    "ast_data": "ClassDef name:UnrecognizedArchiveFormat"
  },
  {
    "library": "tensorflow",
    "name": "_save",
    "source_code": "def _save(self, session, step):\n    logging.info('Calling checkpoint listeners before saving checkpoint %d...', step)\n    for l in self._listeners:\n        l.before_save(session, step)\n    logging.info('Saving checkpoints for %d into %s.', step, self._save_path)\n    self._get_saver().save(session, self._save_path, global_step=step, write_meta_graph=self._save_graph_def)\n    self._summary_writer.add_session_log(SessionLog(status=SessionLog.CHECKPOINT, checkpoint_path=self._save_path), step)\n    logging.info('Calling checkpoint listeners after saving checkpoint %d...', step)\n    should_stop = False\n    for l in self._listeners:\n        if l.after_save(session, step):\n            logging.info('A CheckpointSaverListener requested that training be stopped. listener: {}'.format(l))\n            should_stop = True\n    return should_stop",
    "docstring": "Saves the latest checkpoint, returns should_stop.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\basic_session_run_hooks.py",
    "ast_data": "FunctionDef name:_save arg:self arg:session arg:step arguments arg arg arg Call For Call Call Call Call Call Call Call Assign For If Call Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_flat_tensor_shapes",
    "source_code": "def get_flat_tensor_shapes(element_spec):\n    return [spec.shape for spec in get_flat_tensor_specs(element_spec)]",
    "docstring": "Returns a list s for the element tensor representation. Args: element_spec: A nested structure of objects representing to element type specification. Returns: A list s for the element tensor representation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\util\\structure.py",
    "ast_data": "FunctionDef name:get_flat_tensor_shapes arg:element_spec arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "lookup",
    "source_code": "def lookup(self, keys, name=None):\n    if keys.dtype.base_dtype != self._key_dtype:\n        raise TypeError(f'Dtype of argument `keys` must be {self._key_dtype}, received: {keys.dtype}')\n    values = keys\n    if isinstance(keys, (sparse_tensor.SparseTensor, internal.RaggedTensor)):\n        values = keys.values\n    if self._table and self._table.key_dtype.base_dtype == dtypes.int64:\n        values = math_ops.cast(values, dtypes.int64)\n    with ops.name_scope(name, '%s_Lookup' % self.name):\n        buckets = string_ops.string_to_hash_bucket_fast(_as_string(values), num_buckets=self._num_oov_buckets, name='hash_bucket')\n        if self._table:\n            ids = self._table.lookup(values)\n            buckets = math_ops.add(buckets, self._table.size())\n            is_id_non_default = math_ops.not_equal(ids, self._table.default_value)\n            ids = array_ops.where_v2(is_id_non_default, ids, buckets)\n        else:\n            ids = buckets\n    if isinstance(keys, sparse_tensor.SparseTensor):\n        return sparse_tensor.SparseTensor(keys.indices, ids, keys.dense_shape)\n    elif isinstance(keys, internal.RaggedTensor):\n        return keys.with_values(ids)\n    return ids",
    "docstring": "Looks up in the table, outputs the corresponding values. It assigns out-of-vocabulary keys to buckets based in their hashes. Args: keys: Keys to look up. May be either a or dense . name: Optional name for the op. Returns: A if keys are sparse, a if keys are ragged, otherwise a dense . Raises: TypeError: when doesn't match the table key data type.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "FunctionDef name:lookup arg:self arg:keys arg:name arguments arg arg arg If Compare Raise Call Assign If Call Assign If BoolOp Compare Assign Call With Call Assign Call Call If Assign Call Assign Call Call Assign Call Assign Call Assign If Call Return return:yes Call If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "has_default_graph",
    "source_code": "def has_default_graph() -> bool:\n    return len(_default_graph_stack.stack) >= 1",
    "docstring": "Returns True if there is a default graph.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:has_default_graph arguments Return return:yes Compare Call"
  },
  {
    "library": "pytorch",
    "name": "export_case",
    "source_code": "def export_case(**kwargs):\n\n    def wrapper(m):\n        configs = kwargs\n        module = inspect.getmodule(m)\n        if module in _MODULES:\n            raise RuntimeError('export_case should only be used once per example file.')\n        assert module is not None\n        _MODULES.add(module)\n        module_name = module.__name__.split('.')[-1]\n        case = _make_export_case(m, module_name, configs)\n        register_db_case(case)\n        return case\n    return wrapper",
    "docstring": "Decorator for registering a user provided case into example bank.",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\db\\case.py",
    "ast_data": "FunctionDef name:export_case arguments arg FunctionDef name:wrapper arg:m arguments arg Assign Assign Call If Compare Raise Call Compare Call Assign Call Assign Call Call Return return:yes Return return:yes"
  },
  {
    "library": "scipy",
    "name": "odds_ratio",
    "source_code": "def odds_ratio(table, *, kind='conditional'):\n    if kind not in ['conditional', 'sample']:\n        raise ValueError(\"`kind` must be 'conditional' or 'sample'.\")\n    c = np.asarray(table)\n    if c.shape != (2, 2):\n        raise ValueError(f'Invalid shape {c.shape}. The input `table` must be of shape (2, 2).')\n    if not np.issubdtype(c.dtype, np.integer):\n        raise ValueError(f'`table` must be an array of integers, but got type {c.dtype}')\n    c = c.astype(np.int64)\n    if np.any(c < 0):\n        raise ValueError('All values in `table` must be nonnegative.')\n    if 0 in c.sum(axis=0) or 0 in c.sum(axis=1):\n        result = OddsRatioResult(_table=c, _kind=kind, statistic=np.nan)\n        return result\n    if kind == 'sample':\n        oddsratio = _sample_odds_ratio(c)\n    else:\n        oddsratio = _conditional_oddsratio(c)\n    result = OddsRatioResult(_table=c, _kind=kind, statistic=oddsratio)\n    return result",
    "docstring": "Compute the odds ratio for a 2x2 contingency table. Parameters ---------- table : array_like of ints A 2x2 contingency table. Elements must be non-negative integers. kind : str, optional Which kind of odds ratio to compute, either the sample odds ratio (`~scipy.stats._result_classes.OddsRatioResultkindkindtableconfidence_intervalhypothesis_odds_ratiohypothesis_odds_ratio`.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_odds_ratio.py",
    "ast_data": "FunctionDef name:odds_ratio arg:table arguments arg arg If Compare Raise Call Assign Call If Compare Raise Call If Call Raise Call Assign Call If Call Compare Raise Call If BoolOp Compare Call Compare Call Assign Call Return return:yes If Compare Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "sandwich_dot",
    "source_code": "def sandwich_dot(X, W):\n    n_samples = X.shape[0]\n    if sparse.issparse(X):\n        return (X.T @ sparse.dia_matrix((W, 0), shape=(n_samples, n_samples)) @ X).toarray()\n    else:\n        WX = W[:, None] * X\n        return X.T @ WX",
    "docstring": "Compute the sandwich product X.T @ diag(W) @ X.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_linear_loss.py",
    "ast_data": "FunctionDef name:sandwich_dot arg:X arg:W arguments arg arg Assign If Call Return return:yes Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "compute_sqnr",
    "source_code": "@maybe_dequantize_first_two_tensor_args_and_handle_tuples\ndef compute_sqnr(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n    Ps = torch.norm(x)\n    Pn = torch.norm(x - y)\n    return 20 * torch.log10(Ps / Pn)",
    "docstring": "Computes the SQNR between and . Args: x: Tensor or tuple of tensors y: Tensor or tuple of tensors Return: float or tuple of floats",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\ns\\fx\\utils.py",
    "ast_data": "FunctionDef name:compute_sqnr arg:x arg:y arguments arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "OnceFilter",
    "source_code": "class OnceFilter(logging.Filter):\n\n    def __init__(self, name: str='') -> None:\n        super().__init__(name)\n        self.messages: dict[str, list[tuple[object, ...] | Mapping[str, object] | None]] = {}\n\n    def filter(self, record: logging.LogRecord) -> bool:\n        once = getattr(record, 'once', '')\n        if not once:\n            return True\n        else:\n            params = self.messages.setdefault(record.msg, [])\n            if record.args in params:\n                return False\n            params.append(record.args)\n            return True",
    "docstring": "Show the message only once.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\util\\logging.py",
    "ast_data": "ClassDef name:OnceFilter FunctionDef name:__init__ arg:self arg:name arguments arg arg Call Call FunctionDef name:filter arg:self arg:record arguments arg arg Assign Call If Return return:yes Assign Call If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_tight_layout",
    "source_code": "def get_tight_layout(self):\n    return isinstance(self.get_layout_engine(), TightLayoutEngine)",
    "docstring": "Return whether is called when drawing.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:get_tight_layout arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "cherrypy",
    "name": "convert_params",
    "source_code": "def convert_params(exception=ValueError, error=400):\n    request = cherrypy.serving.request\n    types = request.handler.callable.__annotations__\n    with cherrypy.HTTPError.handle(exception, error):\n        for key in set(types).intersection(request.params):\n            request.params[key] = types[key](request.params[key])",
    "docstring": "Convert request params based on function annotations. This function also processes errors that are subclasses of ``. :param BaseException exception: Exception class to catch. :type exception: BaseException :param error: The HTTP status code to return to the client on failure. :type error: int",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\cptools.py",
    "ast_data": "FunctionDef name:convert_params arg:exception arg:error arguments arg arg Assign Assign With Call For Call Call Assign Call"
  },
  {
    "library": "django",
    "name": "truncate_name",
    "source_code": "def truncate_name(identifier, length=None, hash_len=4):\n    namespace, name = split_identifier(identifier)\n    if length is None or len(name) <= length:\n        return identifier\n    digest = names_digest(name, length=hash_len)\n    return '%s%s%s' % ('%s\".\"' % namespace if namespace else '', name[:length - hash_len], digest)",
    "docstring": "Shorten an SQL identifier to a repeatable mangled version with the given length. If a quote stripped name contains a namespace, e.g. USERNAME\".\"TABLE, truncate the table portion only.",
    "type": "function",
    "file_path": "django\\django\\db\\backends\\utils.py",
    "ast_data": "FunctionDef name:truncate_name arg:identifier arg:length arg:hash_len arguments arg arg arg Assign Call If BoolOp Compare Compare Call Return return:yes Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_PackGrad",
    "source_code": "@ops.RegisterGradient('Pack')\ndef _PackGrad(op: ops.Operation, grad):\n    return array_ops_stack.unstack(grad, num=op.get_attr('N'), axis=op.get_attr('axis'))",
    "docstring": "Gradient for pack op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_grad.py",
    "ast_data": "FunctionDef name:_PackGrad arg:op arg:grad arguments arg arg Return return:yes Call Call Call Call"
  },
  {
    "library": "kornia",
    "name": "__getitem__",
    "source_code": "def __getitem__(self, idxs: Union[int, List[int]]) -> RayGroup:\n    if not isinstance(self._ray_sampler, RaySampler):\n        raise TypeError('Ray sampler is not initiate yet, please run self.init_ray_dataset() before use it.')\n    origins = self._ray_sampler.origins[idxs]\n    directions = self._ray_sampler.directions[idxs]\n    if self._imgs is None:\n        return (origins, directions, None)\n    camerd_ids = self._ray_sampler.camera_ids[idxs]\n    points_2d = self._ray_sampler.points_2d[idxs]\n    rgbs = None\n    imgs_for_ids = [self._imgs[i] for i in camerd_ids]\n    rgbs = stack([img[:, point2d[1].item(), point2d[0].item()] for img, point2d in zip(imgs_for_ids, points_2d)])\n    rgbs = rgbs.to(dtype=self._dtype) / 255.0\n    return (origins, directions, rgbs)",
    "docstring": "Get a dataset item. Args: idxs: An index or group of indices of ray parameter object: Union[int, List[int]] Return: A ray parameter object that includes ray origins, directions, and rgb values at the ray 2d pixel coordinates: RayGroup",
    "type": "method",
    "file_path": "kornia\\kornia\\nerf\\data_utils.py",
    "ast_data": "FunctionDef name:__getitem__ arg:self arg:idxs arguments arg arg If Call Raise Call Assign Assign If Compare Return return:yes Assign Assign Assign Assign Assign Call Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_metrics",
    "source_code": "def get_metrics(self, font: str, font_class: str, sym: str, fontsize: float, dpi: float) -> FontMetrics:\n    info = self._get_info(font, font_class, sym, fontsize, dpi)\n    return info.metrics",
    "docstring": "Parameters ---------- font : str One of the TeX font names: \"tt\", \"it\", \"rm\", \"cal\", \"sf\", \"bf\", \"default\", \"regular\", \"bb\", \"frak\", \"scr\". \"default\" and \"regular\" are synonyms and use the non-math font. font_class : str One of the TeX font names (as for *font*), but **not** \"bb\", \"frak\", or \"scr\". This is used to combine two font classes. The only supported combination currently is ``. sym : str A symbol in raw TeX form, e.g., \"1\", \"x\", or \"\\sigma\". fontsize : float Font size in points. dpi : float Rendering dots-per-inch. Returns ------- FontMetrics",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_mathtext.py",
    "ast_data": "FunctionDef name:get_metrics arg:self arg:font arg:font_class arg:sym arg:fontsize arg:dpi arguments arg arg arg arg arg arg Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "remove_comp_items",
    "source_code": "def remove_comp_items(self, context_word, comp_items):\n    if context_word not in self._comp_dict:\n        raise KeyError('Context word \"%s\" has not been registered' % context_word)\n    for item in comp_items:\n        self._comp_dict[context_word].remove(item)",
    "docstring": "Remove a list of completion items from a completion context. Args: context_word: A single completion word as a string. The removal will also apply to all other context words of the same context. comp_items: Completion items to remove. Raises: KeyError: if the context word has not been registered.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py",
    "ast_data": "FunctionDef name:remove_comp_items arg:self arg:context_word arg:comp_items arguments arg arg arg If Compare Raise Call For Call"
  },
  {
    "library": "pytorch",
    "name": "HealthCheckServer",
    "source_code": "class HealthCheckServer:\n    _alive_callback: Callable[[], int]\n    _port: int\n    _timeout: int\n\n    def __init__(self, alive_callback: Callable[[], int], port: int, timeout: int) -> None:\n        self._alive_callback = alive_callback\n        self._port = port\n        self._timeout = timeout\n\n    def start(self) -> None:\n        log.warning('No health check server started')\n\n    def stop(self) -> None:\n        log.info('Stopping noop health check server.')",
    "docstring": "Interface for health check monitoring server, which can be extended by starting tcp/http server on the specified port. Args: alive_callback: Callable[[], int], callback to last progress time of agent port: int, port number to start tcp/http server timeout: int, timeout seconds to decide agent is alive/dead",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\elastic\\agent\\server\\health_check_server.py",
    "ast_data": "ClassDef name:HealthCheckServer FunctionDef name:__init__ arg:self arg:alive_callback arg:port arg:timeout arguments arg arg arg arg Assign Assign Assign FunctionDef name:start arg:self arguments arg Call FunctionDef name:stop arg:self arguments arg Call"
  },
  {
    "library": "matplotlib",
    "name": "_ensure_has_backend",
    "source_code": "def _ensure_has_backend(self):\n    dict.setdefault(self, 'backend', rcsetup._auto_backend_sentinel)",
    "docstring": "Ensure that a \"backend\" entry exists. Normally, the default matplotlibrc file contains *no* entry for \"backend\" (the corresponding line starts with ##, not #; we fill in _auto_backend_sentinel in that case. However, packagers can set a different default backend (resulting in a normal line) in which case we should *not* fill in _auto_backend_sentinel.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\__init__.py",
    "ast_data": "FunctionDef name:_ensure_has_backend arg:self arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "_register_custom_op",
    "source_code": "def _register_custom_op(lib):\n    from torch._inductor.decomposition import register_decomposition\n\n    def decorator(fn):\n        from torch._library.infer_schema import infer_schema\n        assert fn.__name__[0] == '_', f'Expecting function name starts with `_`, got {fn.__name__}'\n        assert not any((c in fn.__name__ for c in '.<>')), f'Expecting op to be defined in normal functions, not lambda or local: {fn.__name__}'\n        op_name = fn.__name__[1:]\n        schema = op_name + infer_schema(fn, mutates_args={})\n        lib.define(schema)\n        lib.impl(op_name, fn, 'CompositeImplicitAutograd')\n        lib_namespace = lib.ns\n        op = getattr(getattr(torch.ops, lib_namespace), op_name)\n        register_decomposition([op])(fn)\n        return op\n    return decorator",
    "docstring": "This decorator is used to preserve some high level operators for torch.export.export while still allow them to be decomposed for inductor path requirement: make sure is the operator name you want to register NOTE: This should be applied at the top, after all other decorators have been applied NOTE: We haven't tested the case when accepts tensor subclass instance as input, e.g. uint4 tensor subclass instance, and we'll probably need to figure out what would make sense for downstream system (like executorch) to accept as well Example: lib = torch.library.Library(\"my_namespace', \"FRAGMENT\") register_custom_op = _register_custom_op(lib) @register_custom_op def _the_op_that_needs_to_be_preserved(...) ... # after this, will be preserved as # torch.ops.my_namespace.the_op_that_needs_to_be_preserved operator after # torch.export.export / torch._export.export_for_training",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\_affine_quantization.py",
    "ast_data": "FunctionDef name:_register_custom_op arg:lib arguments arg FunctionDef name:decorator arg:fn arguments arg Compare Call Compare Assign Assign Call Call Call Assign Assign Call Call Call Call Return return:yes Return return:yes"
  },
  {
    "library": "uvicorn",
    "name": "process_subprotocol",
    "source_code": "def process_subprotocol(self, headers: Headers, available_subprotocols: Sequence[Subprotocol] | None) -> Subprotocol | None:\n    return self.accepted_subprotocol",
    "docstring": "We override the standard 'process_subprotocol' behavior here so that we return whatever subprotocol is sent in the 'accept' message.",
    "type": "method",
    "file_path": "uvicorn\\uvicorn\\protocols\\websockets\\websockets_impl.py",
    "ast_data": "FunctionDef name:process_subprotocol arg:self arg:headers arg:available_subprotocols arguments arg arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_set_activations",
    "source_code": "def _set_activations(self) -> None:\n    required = {Category.INPUT, Category.ACTIVATION}\n    also_allowed = {Category.PARAMETER, Category.TEMPORARY}\n    for node in self._data_flow_graph.flow_nodes:\n        inputs = {(key, value) for key, (_, value) in node.inputs.items()}\n        input_categories = {self._categories.get(*i) for i in inputs}\n        if input_categories & required and (not input_categories - (required | also_allowed)) and (RecordScope.BACKWARD_FUNCTION not in get_scopes(node._event)):\n            for i in node.outputs.items():\n                self._categories.setdefault_by_version(*i, Category.ACTIVATION)",
    "docstring": "Flood the graph to identify activations.",
    "type": "method",
    "file_path": "pytorch\\torch\\profiler\\_memory_profiler.py",
    "ast_data": "FunctionDef name:_set_activations arg:self arguments arg Assign Assign For Assign Call Assign Call If BoolOp Compare Call For Call Call"
  },
  {
    "library": "tensorflow",
    "name": "ObjectIdentityWeakSet",
    "source_code": "class ObjectIdentityWeakSet(ObjectIdentitySet):\n    __slots__ = ()\n\n    def _wrap_key(self, key):\n        return _WeakObjectIdentityWrapper(key)\n\n    def __len__(self):\n        return len([_ for _ in self])\n\n    def __iter__(self):\n        keys = list(self._storage)\n        for key in keys:\n            unwrapped = key.unwrapped\n            if unwrapped is None:\n                self.discard(key)\n            else:\n                yield unwrapped",
    "docstring": "Like weakref.WeakSet, but compares objects with \"is\".",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\util\\object_identity.py",
    "ast_data": "ClassDef name:ObjectIdentityWeakSet Assign FunctionDef name:_wrap_key arg:self arg:key arguments arg arg Return return:yes Call FunctionDef name:__len__ arg:self arguments arg Return return:yes Call FunctionDef name:__iter__ arg:self arguments arg Assign Call For Assign If Compare Call"
  },
  {
    "library": "pandas",
    "name": "round",
    "source_code": "@final\ndef round(self, decimals: int) -> Self:\n    if not self.is_numeric or self.is_bool:\n        return self.copy(deep=False)\n    values = self.values.round(decimals)\n    refs = None\n    if values is self.values:\n        refs = self.refs\n    return self.make_block_same_class(values, refs=refs)",
    "docstring": "Rounds the values. If the block is not of an integer or float dtype, nothing happens. This is consistent with DataFrame.round behavior. (Note: Series.round would raise) Parameters ---------- decimals: int, Number of decimal places to round to. Caller is responsible for validating this",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\blocks.py",
    "ast_data": "FunctionDef name:round arg:self arg:decimals arguments arg arg If BoolOp Return return:yes Call Assign Call Assign If Compare Assign Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "_LinearPlotter",
    "source_code": "class _LinearPlotter:\n\n    def establish_variables(self, data, **kws):\n        self.data = data\n        any_strings = any([isinstance(v, str) for v in kws.values()])\n        if any_strings and data is None:\n            raise ValueError('Must pass `data` if using named variables.')\n        for var, val in kws.items():\n            if isinstance(val, str):\n                vector = data[val]\n            elif isinstance(val, list):\n                vector = np.asarray(val)\n            else:\n                vector = val\n            if vector is not None and vector.shape != (1,):\n                vector = np.squeeze(vector)\n            if np.ndim(vector) > 1:\n                err = 'regplot inputs must be 1d'\n                raise ValueError(err)\n            setattr(self, var, vector)\n\n    def dropna(self, *vars):\n        vals = [getattr(self, var) for var in vars]\n        vals = [v for v in vals if v is not None]\n        not_na = np.all(np.column_stack([pd.notnull(v) for v in vals]), axis=1)\n        for var in vars:\n            val = getattr(self, var)\n            if val is not None:\n                setattr(self, var, val[not_na])\n\n    def plot(self, ax):\n        raise NotImplementedError",
    "docstring": "Base class for plotting relational data in tidy format. To get anything useful done you'll have to inherit from this, but setup code that can be abstracted out should be put here.",
    "type": "class",
    "file_path": "seaborn\\seaborn\\regression.py",
    "ast_data": "ClassDef name:_LinearPlotter FunctionDef name:establish_variables arg:self arg:data arguments arg arg arg Assign Assign Call Call Call If BoolOp Compare Raise Call For Call If Call Assign If Call Assign Call Assign If BoolOp Compare Compare Assign Call If Compare Call Assign Raise Call Call FunctionDef name:dropna arg:self arguments arg arg Assign Call Assign Compare Assign Call Call Call For Assign Call If Compare Call FunctionDef name:plot arg:self arg:ax arguments arg arg Raise"
  },
  {
    "library": "pandas",
    "name": "nodefault_used_not_only_for_typing",
    "source_code": "def nodefault_used_not_only_for_typing(file_obj: IO[str]) -> Iterable[tuple[int, str]]:\n    contents = file_obj.read()\n    tree = ast.parse(contents)\n    in_annotation = False\n    nodes: list[tuple[bool, ast.AST]] = [(in_annotation, tree)]\n    while nodes:\n        in_annotation, node = nodes.pop()\n        if not in_annotation and (isinstance(node, ast.Name) and node.id == 'NoDefault' or (isinstance(node, ast.Attribute) and node.attr == 'NoDefault')):\n            yield (node.lineno, 'NoDefault is used not only for typing')\n        for name in reversed(node._fields):\n            value = getattr(node, name)\n            if name in {'annotation', 'returns'}:\n                next_in_annotation = True\n            else:\n                next_in_annotation = in_annotation\n            if isinstance(value, ast.AST):\n                nodes.append((next_in_annotation, value))\n            elif isinstance(value, list):\n                nodes.extend(((next_in_annotation, value) for value in reversed(value) if isinstance(value, ast.AST)))",
    "docstring": "Test case where pandas._libs.lib.NoDefault is not used for typing. Parameters ---------- file_obj : IO File-like object containing the Python code to validate. Yields ------ line_number : int Line number of misused lib.NoDefault. msg : str Explanation of the error.",
    "type": "function",
    "file_path": "pandas\\scripts\\validate_unwanted_patterns.py",
    "ast_data": "FunctionDef name:nodefault_used_not_only_for_typing arg:file_obj arguments arg Assign Call Assign Call Assign While Assign Call If BoolOp BoolOp BoolOp Call Compare BoolOp Call Compare For Call Assign Call If Compare Assign Assign If Call Call If Call Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "relfn2path",
    "source_code": "def relfn2path(self, filename: str | Path, docname: str | None=None) -> tuple[str, str]:\n    file_name = Path(filename)\n    if file_name.parts[:1] in {('/',), ('\\\\',)}:\n        abs_fn = self.srcdir.joinpath(*file_name.parts[1:]).resolve()\n    else:\n        if not docname:\n            if self.docname:\n                docname = self.docname\n            else:\n                msg = 'docname'\n                raise KeyError(msg)\n        doc_dir = self.doc2path(docname, base=False).parent\n        abs_fn = self.srcdir.joinpath(doc_dir, file_name).resolve()\n    rel_fn = _relative_path(abs_fn, self.srcdir)\n    return (rel_fn.as_posix(), os.fspath(abs_fn))",
    "docstring": "Return paths to a file referenced from a document, relative to documentation root and absolute. In the input \"filename\", absolute filenames are taken as relative to the source dir, while relative filenames are relative to the dir of the containing document.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\environment\\__init__.py",
    "ast_data": "FunctionDef name:relfn2path arg:self arg:filename arg:docname arguments arg arg arg Assign Call If Compare Assign Call Call If If Assign Assign Raise Call Assign Call Assign Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "_check_list_editable",
    "source_code": "def _check_list_editable(self, obj):\n    if not isinstance(obj.list_editable, (list, tuple)):\n        return must_be('a list or tuple', option='list_editable', obj=obj, id='admin.E120')\n    else:\n        return list(chain.from_iterable((self._check_list_editable_item(obj, item, 'list_editable[%d]' % index) for index, item in enumerate(obj.list_editable))))",
    "docstring": "Check that list_editable is a sequence of editable fields from list_display without first element.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\checks.py",
    "ast_data": "FunctionDef name:_check_list_editable arg:self arg:obj arguments arg arg If Call Return return:yes Call Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_or_create_assets_dir",
    "source_code": "def get_or_create_assets_dir(export_dir):\n    assets_destination_dir = get_assets_dir(export_dir)\n    file_io.recursive_create_dir(assets_destination_dir)\n    return assets_destination_dir",
    "docstring": "Return assets sub-directory, or create one if it doesn't exist.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\path_helpers.py",
    "ast_data": "FunctionDef name:get_or_create_assets_dir arg:export_dir arguments arg Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_config",
    "source_code": "@generic_utils.default\ndef get_config(self):\n    all_args = tf_inspect.getfullargspec(self.__init__).args\n    config = {'name': self.name, 'trainable': self.trainable}\n    if hasattr(self, '_batch_input_shape'):\n        config['batch_input_shape'] = self._batch_input_shape\n    config['dtype'] = policy.serialize(self._dtype_policy)\n    if hasattr(self, 'dynamic'):\n        if self.dynamic:\n            config['dynamic'] = self.dynamic\n        elif 'dynamic' in all_args:\n            all_args.remove('dynamic')\n    expected_args = config.keys()\n    extra_args = [arg for arg in all_args if arg not in expected_args]\n    if len(extra_args) > 1 and hasattr(self.get_config, '_is_default'):\n        raise NotImplementedError('Layer %s has arguments in `__init__` and therefore must override `get_config`.' % self.__class__.__name__)\n    return config",
    "docstring": "Returns the config of the layer. A layer config is a Python dictionary (serializable) containing the configuration of a layer. The same layer can be reinstantiated later (without its trained weights) from this configuration. The config of a layer does not include connectivity information, nor the layer class name. These are handled by (one layer of abstraction above). Note that does not guarantee to return a fresh copy of dict every time it is called. The callers should make a copy of the returned dict if they want to modify it. Returns: Python dictionary.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:get_config arg:self arguments arg Assign Call Assign If Call Assign Assign Call If Call If Assign If Compare Call Assign Call Assign Compare If BoolOp Compare Call Call Raise Call Return return:yes"
  },
  {
    "library": "django",
    "name": "DisallowedRedirect",
    "source_code": "class DisallowedRedirect(SuspiciousOperation):\n    pass",
    "docstring": "Redirect to scheme not in allowed list",
    "type": "class",
    "file_path": "django\\django\\core\\exceptions.py",
    "ast_data": "ClassDef name:DisallowedRedirect"
  },
  {
    "library": "pytorch",
    "name": "elapsed_time",
    "source_code": "def elapsed_time(self, end_event):\n    return super().elapsed_time(end_event)",
    "docstring": "Return the time elapsed. Time reported in milliseconds after the event was recorded and before the end_event was recorded.",
    "type": "method",
    "file_path": "pytorch\\torch\\xpu\\streams.py",
    "ast_data": "FunctionDef name:elapsed_time arg:self arg:end_event arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "BaseDigest",
    "source_code": "class BaseDigest:\n\n    def __init__(self, wall_time, locator):\n        self._wall_time = wall_time\n        self._locator = locator\n\n    @property\n    def wall_time(self):\n        return self._wall_time\n\n    @property\n    def locator(self):\n        return self._locator\n\n    def to_json(self):\n        return {'wall_time': self.wall_time}",
    "docstring": "Base class for digest. Properties: wall_time: A timestamp for the digest as a (unit: s). locator: A datum that allows tracng the digest to its original location. It can be either of the two: 1. Bytes offset from the beginning of the file as a single integer, for the case of all digests of the same kind coming from the same file. 2. A tuple of a file index and a byte offset. This applies to case in which the same type of debugger data may come from multiple files, e.g., graph execution traces.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py",
    "ast_data": "ClassDef name:BaseDigest FunctionDef name:__init__ arg:self arg:wall_time arg:locator arguments arg arg arg Assign Assign FunctionDef name:wall_time arg:self arguments arg Return return:yes FunctionDef name:locator arg:self arguments arg Return return:yes FunctionDef name:to_json arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "close",
    "source_code": "def close(self, name=None):\n    return gen_control_flow_ops.no_op(name=name)",
    "docstring": "See TensorArray.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:close arg:self arg:name arguments arg arg Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "index",
    "source_code": "@cherrypy.expose\ndef index(self):\n    return '\\n        <html><body>\\n            <h2>Upload a file</h2>\\n            <form action=\"upload\" method=\"post\" enctype=\"multipart/form-data\">\\n            filename: <input type=\"file\" name=\"myFile\" /><br />\\n            <input type=\"submit\" />\\n            </form>\\n            <h2>Download a file</h2>\\n            <a href=\\'download\\'>This one</a>\\n        </body></html>\\n        '",
    "docstring": "Produce HTTP response body of file upload app index URI.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\tutorial\\tut09_files.py",
    "ast_data": "FunctionDef name:index arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "index_indirect_depends_on",
    "source_code": "def index_indirect_depends_on(self, index: sympy.Expr, itervar: sympy.Symbol):\n    return any((self.cse.varname_map[s.name].depends_on(itervar) for s in index.free_symbols if s.name in self.cse.varname_map and isinstance(self.cse.varname_map[s.name], CppCSEVariable)))",
    "docstring": "Check if an index has free symbol CppCSEVariable that depends on .",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp.py",
    "ast_data": "FunctionDef name:index_indirect_depends_on arg:self arg:index arg:itervar arguments arg arg arg Return return:yes Call Call BoolOp Compare Call"
  },
  {
    "library": "matplotlib",
    "name": "invert_xaxis",
    "source_code": "def invert_xaxis(self):\n    self.xaxis.set_inverted(not self.xaxis.get_inverted())",
    "docstring": "[*Discouraged*] Invert the x-axis. .. admonition:: Discouraged The use of this method is discouraged. Use instead. See Also -------- get_xinverted get_xlim, set_xlim get_xbound, set_xbound",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:invert_xaxis arg:self arguments arg Call Call"
  },
  {
    "library": "pygame",
    "name": "pitch_bend",
    "source_code": "def pitch_bend(self, value=0, channel=0):\n    if not 0 <= channel <= 15:\n        raise ValueError('Channel not between 0 and 15.')\n    if not -8192 <= value <= 8191:\n        raise ValueError(f'Pitch bend value must be between -8192 and +8191, not {value}.')\n    value = value + 8192\n    lsb = value & 127\n    msb = value >> 7\n    self.write_short(224 + channel, lsb, msb)",
    "docstring": "modify the pitch of a channel. Output.pitch_bend(value=0, channel=0) Adjust the pitch of a channel. The value is a signed integer from -8192 to +8191. For example, 0 means \"no change\", +4096 is typically a semitone higher, and -8192 is 1 whole tone lower (though the musical range corresponding to the pitch bend range can also be changed in some synthesizers). If no value is given, the pitch bend is returned to \"no change\".",
    "type": "method",
    "file_path": "pygame\\src_py\\midi.py",
    "ast_data": "FunctionDef name:pitch_bend arg:self arg:value arg:channel arguments arg arg arg If Compare Raise Call If Compare Raise Call Assign Assign Assign Call"
  },
  {
    "library": "pytorch",
    "name": "float8_e4m3fnuz",
    "source_code": "def float8_e4m3fnuz(self):\n    _warn_typed_storage_removal()\n    return self._to(torch.float8_e4m3fnuz)",
    "docstring": "Casts this storage to float8_e4m3fnuz type",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:float8_e4m3fnuz arg:self arguments arg Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "list_local_devices",
    "source_code": "def list_local_devices(session_config=None):\n\n    def _convert(pb_str):\n        m = device_attributes_pb2.DeviceAttributes()\n        m.ParseFromString(pb_str)\n        return m\n    serialized_config = None\n    if session_config is not None:\n        serialized_config = session_config.SerializeToString()\n    return [_convert(s) for s in _pywrap_device_lib.list_devices(serialized_config)]",
    "docstring": "List the available devices available in the local process. Args: session_config: a session config proto or None to use the default config. Returns: A list of protocol buffers.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\client\\device_lib.py",
    "ast_data": "FunctionDef name:list_local_devices arg:session_config arguments arg FunctionDef name:_convert arg:pb_str arguments arg Assign Call Call Return return:yes Assign If Compare Assign Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "column_sql",
    "source_code": "def column_sql(self, model, field, include_default=False):\n    field_db_params = field.db_parameters(connection=self.connection)\n    column_db_type = field_db_params['type']\n    if column_db_type is None:\n        return (None, None)\n    params = []\n    return (' '.join(self._iter_column_sql(column_db_type, params, model, field, field_db_params, include_default)), params)",
    "docstring": "Return the column definition for a field. The field must already have had set_attributes_from_name() called.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\schema.py",
    "ast_data": "FunctionDef name:column_sql arg:self arg:model arg:field arg:include_default arguments arg arg arg arg Assign Call Assign If Compare Return return:no Assign Return return:yes Call Call"
  },
  {
    "library": "seaborn",
    "name": "_compute_covariance",
    "source_code": "def _compute_covariance(self):\n    self.factor = self.covariance_factor()\n    if not hasattr(self, '_data_inv_cov'):\n        self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1, bias=False, aweights=self.weights))\n        self._data_inv_cov = linalg.inv(self._data_covariance)\n    self.covariance = self._data_covariance * self.factor ** 2\n    self.inv_cov = self._data_inv_cov / self.factor ** 2\n    self._norm_factor = sqrt(linalg.det(2 * pi * self.covariance))",
    "docstring": "Computes the covariance matrix for each Gaussian kernel using covariance_factor().",
    "type": "method",
    "file_path": "seaborn\\seaborn\\external\\kde.py",
    "ast_data": "FunctionDef name:_compute_covariance arg:self arguments arg Assign Call If Call Assign Call Call Assign Call Assign Assign Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "_with_callable_args",
    "source_code": "def _with_callable_args(cls_or_self, **kwargs):\n    r = _PartialWrapper(partial(cls_or_self))\n    return r.with_callable_args(**kwargs)",
    "docstring": "Wrapper that allows creation of class factories args that need to be called at construction time. This can be useful when there is a need to create classes with the same constructor arguments, but different instances and those arguments should only be calculated at construction time. Can be used in conjunction with _with_args Example:: >>> # xdoctest: +SKIP(\"Undefined vars\") >>> Foo.with_callable_args = classmethod(_with_callable_args) >>> Foo.with_args = classmethod(_with_args) >>> foo_builder = Foo.with_callable_args(cur_time=get_time_func).with_args(name=\"dan\") >>> foo_instance1 = foo_builder() >>> # wait 50 >>> foo_instance2 = foo_builder() >>> id(foo_instance1.creation_time) == id(foo_instance2.creation_time) False",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\observer.py",
    "ast_data": "FunctionDef name:_with_callable_args arg:cls_or_self arguments arg arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "duplicated",
    "source_code": "def duplicated(self, keep: Literal['first', 'last', False]='first') -> npt.NDArray[np.bool_]:\n    mask = self.isna().astype(np.bool_, copy=False)\n    return duplicated(values=self, keep=keep, mask=mask)",
    "docstring": "Return boolean ndarray denoting duplicate values. Parameters ---------- keep : {'first', 'last', False}, default 'first' - ``. Returns ------- ndarray[bool] With true in indices where elements are duplicated and false otherwise. See Also -------- DataFrame.duplicated : Return boolean Series denoting duplicate rows. Series.duplicated : Indicate duplicate Series values. api.extensions.ExtensionArray.unique : Compute the ExtensionArray of unique values. Examples -------- >>> pd.array([1, 1, 2, 3, 3], dtype=\"Int64\").duplicated() array([False, True, False, False, True])",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\base.py",
    "ast_data": "FunctionDef name:duplicated arg:self arg:keep arguments arg arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_hook_then_optimizer",
    "source_code": "def _hook_then_optimizer(hook: Callable[[Any, dist.GradBucket], torch.futures.Future[torch.Tensor]], optimizer_state: _OptimizerHookState) -> Callable[[Any, dist.GradBucket], torch.futures.Future[torch.Tensor]]:\n    has_set_params = hasattr(optimizer_state, 'params_to_optimize') and optimizer_state.params_to_optimize is not None\n\n    def hook_then_optimizer_wrapper(hook_state, bucket: dist.GradBucket) -> torch.futures.Future[torch.Tensor]:\n        fut = hook(hook_state, bucket)\n\n        def optimizer_step(fut):\n            gradient_tensors = bucket.gradients()\n            model_params = bucket.parameters()\n            for grad_tensor, model_param in zip(gradient_tensors, model_params):\n                if not has_set_params or model_param in optimizer_state.params_to_optimize:\n                    optimizer_state.functional_optimizer.step_param(model_param, grad_tensor)\n            return bucket.buffer()\n        return fut.then(optimizer_step)\n    return hook_then_optimizer_wrapper",
    "docstring": "Run optimizer in a functional fashion after DDP communication hook.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\ddp_comm_hooks\\optimizer_overlap_hooks.py",
    "ast_data": "FunctionDef name:_hook_then_optimizer arg:hook arg:optimizer_state arguments arg arg Assign BoolOp Call Compare FunctionDef name:hook_then_optimizer_wrapper arg:hook_state arg:bucket arguments arg arg Assign Call FunctionDef name:optimizer_step arg:fut arguments arg Assign Call Assign Call For Call If BoolOp Compare Call Return return:yes Call Return return:yes Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y, sample_weight=None, **params):\n    X, y, sample_weight, Y = self._prepare_data(X, y, sample_weight, solver='eigen')\n    target = Y if self.cv is None else y\n    super().fit(X, target, sample_weight=sample_weight, **params)\n    return self",
    "docstring": "Fit Ridge classifier with cv. Parameters ---------- X : ndarray of shape (n_samples, n_features) Training vectors, where is the number of samples and is the number of features. When using GCV, will be cast to float64 if necessary. y : ndarray of shape (n_samples,) Target values. Will be cast to X's dtype if necessary. sample_weight : float or ndarray of shape (n_samples,), default=None Individual weights for each sample. If given a float, every sample will have the same weight. **params : dict, default=None Parameters to be passed to the underlying scorer. .. versionadded:: 1.5 Only available if , which can be set by using `Metadata Routing User Guide ` for more details. Returns ------- self : object Fitted estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_ridge.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg arg Assign Call Assign Compare Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_construct",
    "source_code": "@staticmethod\ndef _construct(cpp_module, init_fn):\n    script_module = RecursiveScriptModule(cpp_module)\n    init_fn(script_module)\n    RecursiveScriptModule._finalize_scriptmodule(script_module)\n    return script_module",
    "docstring": "Construct a RecursiveScriptModule that's ready for use. PyTorch code should use this to construct a RecursiveScriptModule instead of instead of calling directly, as it makes sure the object is properly finalized (and in the future, we may take control of how the RecursiveScriptModule instance is created). Args: cpp_module: The C++ Module that will hold the actual state of this RecursiveScriptModule instance. init_fn: Lambda that initializes the RecursiveScriptModule passed to it.",
    "type": "method",
    "file_path": "pytorch\\torch\\jit\\_script.py",
    "ast_data": "FunctionDef name:_construct arg:cpp_module arg:init_fn arguments arg arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "checkpoint_sequential",
    "source_code": "def checkpoint_sequential(functions, segments, input, use_reentrant=None, **kwargs):\n    if use_reentrant is None:\n        warnings.warn('torch.utils.checkpoint.checkpoint_sequential: the use_reentrant parameter should be passed explicitly. In version 2.5 we will raise an exception if use_reentrant is not passed. use_reentrant=False is recommended, but if you need to preserve the current default behavior, you can pass use_reentrant=True. Refer to docs for more details on the differences between the two variants.')\n        use_reentrant = True\n    preserve = kwargs.pop('preserve_rng_state', True)\n    if kwargs:\n        raise ValueError('Unexpected keyword arguments: ' + ','.join((arg for arg in kwargs)))\n\n    def run_function(start, end, functions):\n\n        def forward(input):\n            for j in range(start, end + 1):\n                input = functions[j](input)\n            return input\n        return forward\n    if isinstance(functions, torch.nn.Sequential):\n        functions = list(functions.children())\n    segment_size = len(functions) // segments\n    end = -1\n    for start in range(0, segment_size * (segments - 1), segment_size):\n        end = start + segment_size - 1\n        input = checkpoint(run_function(start, end, functions), input, use_reentrant=use_reentrant, preserve_rng_state=preserve)\n    return run_function(end + 1, len(functions) - 1, functions)(input)",
    "docstring": "Checkpoint a sequential model to save memory. Sequential models execute a list of modules/functions in order (sequentially). Therefore, we can divide such a model in various segments and checkpoint each segment. All segments except the last will not store the intermediate activations. The inputs of each checkpointed segment will be saved for re-running the segment in the backward pass. .. warning:: The ` variant, please see :func: for the important considerations and limitations of this variant. It is recommended that you use `torch.nn.Sequentialtorch.nn.Sequentialfunctionsfunctions*inputs` Example: >>> # xdoctest: +SKIP(\"stub\") >>> model = nn.Sequential(...) >>> input_var = checkpoint_sequential(model, chunks, input_var)",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\checkpoint.py",
    "ast_data": "FunctionDef name:checkpoint_sequential arg:functions arg:segments arg:input arg:use_reentrant arguments arg arg arg arg arg If Compare Call Assign Assign Call If Raise Call Call FunctionDef name:run_function arg:start arg:end arg:functions arguments arg arg arg FunctionDef name:forward arg:input arguments arg For Call Assign Call Return return:yes Return return:yes If Call Assign Call Call Assign Call Assign For Call Assign Assign Call Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "AddOp",
    "source_code": "def AddOp(self, op: ops.Operation):\n    if not util.IsInXLAContext(op) and op.type in {'Shape', 'Size', 'Rank'}:\n        grad_ctxt = ops.get_default_graph()._get_control_flow_context()\n        if grad_ctxt:\n            grad_ctxt = grad_ctxt.GetWhileContext()\n            if grad_ctxt.grad_state:\n                op_input_forward_ctxt = util.GetWhileContext(op.inputs[0].op)\n                if op_input_forward_ctxt == grad_ctxt.grad_state.forward_context:\n                    op_input_ctxt = op.inputs[0].op._get_control_flow_context()\n                    op._set_control_flow_context(op_input_ctxt)\n                    op_input_ctxt._AddOpInternal(op)\n                    return\n    self._AddOpInternal(op)",
    "docstring": "Add to the current context.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:AddOp arg:self arg:op arguments arg arg If BoolOp Call Compare Assign Call Call If Assign Call If Assign Call If Compare Assign Call Call Call Return return:no Call"
  },
  {
    "library": "pytorch",
    "name": "BenchmarkTensors",
    "source_code": "@dataclasses.dataclass\nclass BenchmarkTensors:\n    input_tensors: list[torch.Tensor]\n    output_tensor: Optional[torch.Tensor]\n\n    def unpack(self):\n        return (self.input_tensors, self.output_tensor)",
    "docstring": "Represents a set of inputs and outputs for autotuning with a template",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\select_algorithm.py",
    "ast_data": "ClassDef name:BenchmarkTensors FunctionDef name:unpack arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, matrix=None, **kwargs):\n    super().__init__(**kwargs)\n    if matrix is None:\n        matrix = IdentityTransform._mtx\n    self._mtx = matrix.copy()\n    self._invalid = 0",
    "docstring": "Initialize an Affine transform from a 3x3 numpy float array:: a c e b d f 0 0 1 If *matrix* is None, initialize with the identity transform.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:matrix arguments arg arg arg Call Call If Compare Assign Assign Call Assign"
  },
  {
    "library": "numpy",
    "name": "_add_library",
    "source_code": "def _add_library(self, name, sources, install_dir, build_info):\n    build_info = copy.copy(build_info)\n    build_info['sources'] = sources\n    if not 'depends' in build_info:\n        build_info['depends'] = []\n    self._fix_paths_dict(build_info)\n    self.libraries.append((name, build_info))",
    "docstring": "Common implementation for add_library and add_installed_library. Do not use directly",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\misc_util.py",
    "ast_data": "FunctionDef name:_add_library arg:self arg:name arg:sources arg:install_dir arg:build_info arguments arg arg arg arg arg Assign Call Assign If Compare Assign Call Call"
  },
  {
    "library": "matplotlib",
    "name": "symbol",
    "source_code": "@property\ndef symbol(self):\n    symbol = self._symbol\n    if not symbol:\n        symbol = ''\n    elif not self._is_latex and mpl.rcParams['text.usetex']:\n        for spec in '\\\\#$%&~_^{}':\n            symbol = symbol.replace(spec, '\\\\' + spec)\n    return symbol",
    "docstring": "The configured percent symbol as a string. If LaTeX is enabled via :rc:, the special characters `` are automatically escaped in the string.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:symbol arg:self arguments arg Assign If Assign If BoolOp For Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "EnableReduction",
    "source_code": "class EnableReduction(NodeScheduleMarker):\n\n    @staticmethod\n    def filter(node_schedule: list[NodeScheduleEntry]) -> Iterable[SchedulerNode]:\n        disabled = False\n        for node in node_schedule:\n            if node in (EnableReduction, DisableReduction):\n                disabled = node is DisableReduction\n            elif disabled:\n                pass\n            else:\n                yield node",
    "docstring": "Marker to end a DisableReduction block.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\simd_kernel_features.py",
    "ast_data": "ClassDef name:EnableReduction FunctionDef name:filter arg:node_schedule arguments arg Assign For If Compare Assign Compare If"
  },
  {
    "library": "authlib",
    "name": "validate_claims_parameter_supported",
    "source_code": "def validate_claims_parameter_supported(self):\n    _validate_boolean_value(self, 'claims_parameter_supported')",
    "docstring": "OPTIONAL. Boolean value specifying whether the OP supports use of the claims parameter, with true indicating support. If omitted, the default value is false.",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\discovery\\models.py",
    "ast_data": "FunctionDef name:validate_claims_parameter_supported arg:self arguments arg Call"
  },
  {
    "library": "matplotlib",
    "name": "encode_multipart_formdata",
    "source_code": "def encode_multipart_formdata(fields, boundary=None):\n    from io import BytesIO\n    from requests.packages.urllib3.filepost import choose_boundary, writer, b, get_content_type\n    body = BytesIO()\n    if boundary is None:\n        boundary = choose_boundary()\n    for fieldname, value in iter_fields(fields):\n        body.write(b('--%s\\r\\n' % boundary))\n        if isinstance(value, tuple):\n            filename, data = value\n            writer(body).write('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"\\r\\n' % (fieldname, filename))\n            body.write(b('Content-Type: %s\\r\\n\\r\\n' % get_content_type(filename)))\n        else:\n            data = value\n            writer(body).write('Content-Disposition: form-data; name=\"%s\"\\r\\n' % fieldname)\n            body.write(b'Content-Type: text/plain\\r\\n\\r\\n')\n        if isinstance(data, int):\n            data = str(data)\n        if isinstance(data, str):\n            writer(body).write(data)\n        else:\n            body.write(data)\n        body.write(b'\\r\\n')\n    body.write(b('--%s--\\r\\n' % boundary))\n    content_type = b('multipart/form-data; boundary=%s' % boundary)\n    return (body.getvalue(), content_type)",
    "docstring": "Encode a dictionary of `mimetools.choose_boundary`.",
    "type": "function",
    "file_path": "matplotlib\\tools\\gh_api.py",
    "ast_data": "FunctionDef name:encode_multipart_formdata arg:fields arg:boundary arguments arg arg Assign Call If Compare Assign Call For Call Call Call If Call Assign Call Call Call Call Call Assign Call Call Call If Call Assign Call If Call Call Call Call Call Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_gid",
    "source_code": "def get_gid(self):\n    return self._gid",
    "docstring": "Return the object identifier if one is set, None otherwise.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:get_gid arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "reduce",
    "source_code": "def reduce(self, initial_state, reduce_func):\n    pass",
    "docstring": "Reduces this iterable object to a single element. The transformation calls successively on each element. The argument is used for the initial state and the final state is returned as the result. Args: initial_state: An element representing the initial state of the reduction. reduce_func: A function that maps to . The structure of must match the structure of . For the first element, is . Returns: The final state of the transformation.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\types\\distribute.py",
    "ast_data": "FunctionDef name:reduce arg:self arg:initial_state arg:reduce_func arguments arg arg arg"
  },
  {
    "library": "matplotlib",
    "name": "update",
    "source_code": "def update(self):\n    cv = self.canvas\n    fig = cv.figure\n    if self._bg is None:\n        self.on_draw(None)\n    else:\n        cv.restore_region(self._bg)\n        self._draw_animated()\n        cv.blit(fig.bbox)\n    cv.flush_events()",
    "docstring": "Update the screen with animated artists.",
    "type": "method",
    "file_path": "matplotlib\\galleries\\users_explain\\animations\\blitting.py",
    "ast_data": "FunctionDef name:update arg:self arguments arg Assign Assign If Compare Call Call Call Call Call"
  },
  {
    "library": "django",
    "name": "JoinPromoter",
    "source_code": "class JoinPromoter:\n\n    def __init__(self, connector, num_children, negated):\n        self.connector = connector\n        self.negated = negated\n        if self.negated:\n            if connector == AND:\n                self.effective_connector = OR\n            else:\n                self.effective_connector = AND\n        else:\n            self.effective_connector = self.connector\n        self.num_children = num_children\n        self.votes = Counter()\n\n    def __repr__(self):\n        return f'{self.__class__.__qualname__}(connector={self.connector!r}, num_children={self.num_children!r}, negated={self.negated!r})'\n\n    def add_votes(self, votes):\n        self.votes.update(votes)\n\n    def update_join_types(self, query):\n        to_promote = set()\n        to_demote = set()\n        for table, votes in self.votes.items():\n            if self.effective_connector == OR and votes < self.num_children:\n                to_promote.add(table)\n            if self.effective_connector == AND or (self.effective_connector == OR and votes == self.num_children):\n                to_demote.add(table)\n        query.promote_joins(to_promote)\n        query.demote_joins(to_demote)\n        return to_demote",
    "docstring": "A class to abstract away join promotion problems for complex filter conditions.",
    "type": "class",
    "file_path": "django\\django\\db\\models\\sql\\query.py",
    "ast_data": "ClassDef name:JoinPromoter FunctionDef name:__init__ arg:self arg:connector arg:num_children arg:negated arguments arg arg arg arg Assign Assign If If Compare Assign Assign Assign Assign Assign Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:add_votes arg:self arg:votes arguments arg arg Call FunctionDef name:update_join_types arg:self arg:query arguments arg arg Assign Call Assign Call For Call If BoolOp Compare Compare Call If BoolOp Compare BoolOp Compare Compare Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "MatchAllNode",
    "source_code": "class MatchAllNode:\n    pass",
    "docstring": "A node pattern that matches all nodes, used in defining fusion patterns in FX Graph Mode Quantization",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\quantization\\utils.py",
    "ast_data": "ClassDef name:MatchAllNode"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, variant: CausalVariant, seq_len_q: int, seq_len_kv: int):\n    assert isinstance(variant, CausalVariant)\n    self.variant = variant\n    self.seq_len_q = seq_len_q\n    self.seq_len_kv = seq_len_kv\n    if seq_len_q > seq_len_kv and variant == CausalVariant.LOWER_RIGHT:\n        warn('Lower right causal bias will produce NaNs in the output when seq_len_q > seq_len_kv!')",
    "docstring": "Initializes the CausalBias instance with a specified variant and sequence lengths. Args: variant (CausalVariant): The type of causal bias to use (either UPPER_LEFT or LOWER_RIGHT). seq_len_q (int): The sequence length of the query tensor. seq_len_kv (int): The sequence length of the key/value tensor. Raises a warning if the LOWER_RIGHT variant is used with seq_len_q > seq_len_kv, as it may produce NaNs.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\attention\\bias.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:variant arg:seq_len_q arg:seq_len_kv arguments arg arg arg arg Call Assign Assign Assign If BoolOp Compare Compare Call"
  },
  {
    "library": "scikit-learn",
    "name": "make_distributor_init_64_bits",
    "source_code": "def make_distributor_init_64_bits(distributor_init, vcomp140_dll_filename, msvcp140_dll_filename):\n    with open(distributor_init, 'wt') as f:\n        f.write(textwrap.dedent('\\n            \\'\\'\\'Helper to preload vcomp140.dll and msvcp140.dll to prevent\\n            \"not found\" errors.\\n\\n            Once vcomp140.dll and msvcp140.dll are\\n            preloaded, the namespace is made available to any subsequent\\n            vcomp140.dll and msvcp140.dll. This is\\n            created as part of the scripts that build the wheel.\\n            \\'\\'\\'\\n\\n\\n            import os\\n            import os.path as op\\n            from ctypes import WinDLL\\n\\n\\n            if os.name == \"nt\":\\n                libs_path = op.join(op.dirname(__file__), \".libs\")\\n                vcomp140_dll_filename = op.join(libs_path, \"{0}\")\\n                msvcp140_dll_filename = op.join(libs_path, \"{1}\")\\n                WinDLL(op.abspath(vcomp140_dll_filename))\\n                WinDLL(op.abspath(msvcp140_dll_filename))\\n            '.format(vcomp140_dll_filename, msvcp140_dll_filename)))",
    "docstring": "Create a _distributor_init.py file for 64-bit architectures. This file is imported first when importing the sklearn package so as to pre-load the vendored vcomp140.dll and msvcp140.dll.",
    "type": "function",
    "file_path": "scikit-learn\\build_tools\\github\\vendor.py",
    "ast_data": "FunctionDef name:make_distributor_init_64_bits arg:distributor_init arg:vcomp140_dll_filename arg:msvcp140_dll_filename arguments arg arg arg With Call Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "viewcode_anchor",
    "source_code": "class viewcode_anchor(Element):\n    pass",
    "docstring": "Node for viewcode anchors. This node will be processed in the resolving phase. For viewcode supported builders, they will be all converted to the anchors. For not supported builders, they will be removed.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\ext\\viewcode.py",
    "ast_data": "ClassDef name:viewcode_anchor"
  },
  {
    "library": "tensorflow",
    "name": "add_update",
    "source_code": "@doc_controls.for_subclass_implementers\ndef add_update(self, updates, inputs=None):\n    if inputs is not None:\n        tf_logging.warning('`add_update` `inputs` kwarg has been deprecated. You no longer need to pass a value to `inputs` as it is being automatically inferred.')\n    call_context = base_layer_utils.call_context()\n    if distribute_lib.has_strategy() and distribute_lib.in_cross_replica_context() and (not call_context.saving):\n        return\n    updates = generic_utils.to_list(updates)\n    if call_context.in_call:\n        relevant_inputs = call_context.inputs\n    else:\n        inbound_nodes = getattr(self, '_inbound_nodes', [])\n        relevant_inputs = [node.input_tensors for node in inbound_nodes]\n\n    def process_update(x):\n        if callable(x):\n            update = lambda: process_update(x())\n            return update()\n        elif isinstance(x, ops.Operation):\n            update = x\n        elif hasattr(x, 'op'):\n            update = x.op\n        else:\n            update = tensor_conversion.convert_to_tensor_v2_with_dispatch(x)\n        reachable = tf_utils.get_reachable_from_inputs(relevant_inputs, [update])\n        update._unconditional_update = update not in reachable\n        return update\n    updates = [process_update(x) for x in updates]\n    self._updates.extend(updates)",
    "docstring": "Add update op(s), potentially dependent on layer inputs. Weight updates (for instance, the updates of the moving mean and variance in a BatchNormalization layer) may be dependent on the inputs passed when calling a layer. Hence, when reusing the same layer on different inputs and , some entries in may be dependent on and some on . This method automatically keeps track of dependencies. The method allows to retrieve the updates relevant to a specific set of inputs. This call is ignored when eager execution is enabled (in that case, variable updates are run on the fly and thus do not need to be tracked for later execution). Args: updates: Update op, or list/tuple of update ops, or zero-arg callable that returns an update op. A zero-arg callable should be passed in order to disable running the updates by setting on this Layer, when executing in Eager mode. inputs: Deprecated, will be automatically inferred.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py",
    "ast_data": "FunctionDef name:add_update arg:self arg:updates arg:inputs arguments arg arg arg If Compare Call Assign Call If BoolOp Call Call Return return:no Assign Call If Assign Assign Call Assign FunctionDef name:process_update arg:x arguments arg If Call Assign arguments Call Call Return return:yes Call If Call Assign If Call Assign Assign Call Assign Call Assign Compare Return return:yes Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_PartitionedCall",
    "source_code": "class _PartitionedCall(_FunctionCaller):\n\n    def __init__(self, node, function, enclosing_graph):\n        super(_PartitionedCall, self).__init__(node, function, enclosing_graph, first_function_input=0, type_attribute='Tin', function_attributes=['f'])",
    "docstring": "Specialization of _Node to PartitionedCall-like operations.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "ClassDef name:_PartitionedCall FunctionDef name:__init__ arg:self arg:node arg:function arg:enclosing_graph arguments arg arg arg arg Call Call"
  },
  {
    "library": "pandas",
    "name": "get_iterator",
    "source_code": "def get_iterator(self, data: NDFrame):\n    slicer = lambda start, edge: data.iloc[start:edge]\n    start: np.int64 | int = 0\n    for edge, label in zip(self.bins, self.binlabels):\n        if label is not NaT:\n            yield (label, slicer(start, edge))\n        start = edge\n    if start < len(data):\n        yield (self.binlabels[-1], slicer(start, None))",
    "docstring": "Groupby iterator Returns ------- Generator yielding sequence of (name, subsetted object) for each group",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\groupby\\ops.py",
    "ast_data": "FunctionDef name:get_iterator arg:self arg:data arguments arg arg Assign arguments arg arg For Call If Compare Call Assign If Compare Call Call"
  },
  {
    "library": "tensorflow",
    "name": "signature",
    "source_code": "def signature(obj, *, follow_wrapped=True):\n    return _inspect.signature(tf_decorator.unwrap(obj)[1], follow_wrapped=follow_wrapped)",
    "docstring": "TFDecorator-aware replacement for inspect.signature.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\tf_inspect.py",
    "ast_data": "FunctionDef name:signature arg:obj arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_build_node_error_message",
    "source_code": "def _build_node_error_message(op):\n    node_error_message = [f'Detected at node {op.name!r} defined at (most recent call last):']\n    field_dict = _compute_field_dict(op)\n    for frame in field_dict['definition_traceback']:\n        if '<embedded' not in frame:\n            node_error_message.extend([f'  {line}' for line in frame.split('\\n') if line.strip()])\n    node_error_message.append(f'Node: {op.name!r}')\n    return '\\n'.join(node_error_message)",
    "docstring": "Returns the formatted error message for the given op. Args: op: The node. Returns: The formatted error message for the given op with traceback.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\error_interpolation.py",
    "ast_data": "FunctionDef name:_build_node_error_message arg:op arguments arg Assign Assign Call For If Compare Call Call Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_over",
    "source_code": "def set_over(self, color='k', alpha=None):\n    self._rgba_over = to_rgba(color, alpha)\n    if self._isinit:\n        self._set_extremes()",
    "docstring": "Set the color for high out-of-range values.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:set_over arg:self arg:color arg:alpha arguments arg arg arg Assign Call If Call"
  },
  {
    "library": "scipy",
    "name": "set_meta",
    "source_code": "def set_meta(self, **kwds):\n    self.meta.update(kwds)",
    "docstring": "Update the metadata dictionary with the keywords and data provided here. Examples -------- set_meta(name=\"Exponential\", equation=\"y = a exp(b x) + c\")",
    "type": "method",
    "file_path": "scipy\\scipy\\odr\\_odrpack.py",
    "ast_data": "FunctionDef name:set_meta arg:self arguments arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "pre_load_state_dict_hook",
    "source_code": "def pre_load_state_dict_hook(module, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):\n    for submodule_name, submodule in module.named_modules():\n        for attr_name in submodule.__dict__.keys():\n            mod_prefix = prefix + submodule_name\n            key = mod_prefix + ('.' if mod_prefix else '') + attr_name\n            if key in state_dict:\n                if isinstance(state_dict[key], ShardedTensor):\n                    setattr(submodule, attr_name, state_dict[key])",
    "docstring": "Pre-load state dict hook to add ShardedTensor to the module.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\__init__.py",
    "ast_data": "FunctionDef name:pre_load_state_dict_hook arg:module arg:state_dict arg:prefix arg:local_metadata arg:strict arg:missing_keys arg:unexpected_keys arg:error_msgs arguments arg arg arg arg arg arg arg arg For Call For Call Assign Assign If Compare If Call Call"
  },
  {
    "library": "pytorch",
    "name": "_finalize_zip",
    "source_code": "def _finalize_zip(self):\n    del self.zip_file\n    if self.buffer:\n        self.buffer.flush()",
    "docstring": "Called at the very end of packaging to leave the zipfile in a closed but valid state.",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\package_exporter.py",
    "ast_data": "FunctionDef name:_finalize_zip arg:self arguments arg If Call"
  },
  {
    "library": "pytorch",
    "name": "remove_small_acc_subgraphs",
    "source_code": "def remove_small_acc_subgraphs(self, subgraphs: list[Subgraph]) -> list[Subgraph]:\n    result: list[Subgraph] = []\n    for subgraph in subgraphs:\n        if subgraph.is_acc:\n            if len(subgraph.nodes) >= self.settings.min_acc_module_size:\n                result.append(subgraph)\n            else:\n                print(f\"Eliminating acc subgraph because it's smaller than the threshold: {len(subgraph.nodes)} < {self.settings.min_acc_module_size}\")\n                if result:\n                    result[-1].nodes.extend(subgraph.nodes)\n                else:\n                    subgraph.is_acc = False\n                    result.append(subgraph)\n        elif result and (not result[-1].is_acc):\n            result[-1].nodes.extend(subgraph.nodes)\n        else:\n            result.append(subgraph)\n    return result",
    "docstring": "This pass finds ACC submodules with less than specified size and merges them with adjacent CPU submodules.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\passes\\splitter_base.py",
    "ast_data": "FunctionDef name:remove_small_acc_subgraphs arg:self arg:subgraphs arguments arg arg For If If Compare Call Call Call Call If Call Assign Call If BoolOp Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "IdentityVectorFunction",
    "source_code": "class IdentityVectorFunction(LinearVectorFunction):\n\n    def __init__(self, x0, sparse_jacobian):\n        n = len(x0)\n        if sparse_jacobian or sparse_jacobian is None:\n            A = sps.eye_array(n, format='csr')\n            sparse_jacobian = True\n        else:\n            A = np.eye(n)\n            sparse_jacobian = False\n        super().__init__(A, x0, sparse_jacobian)",
    "docstring": "Identity vector function and its derivatives. The Jacobian is the identity matrix, returned as a dense array when and as a csr matrix otherwise. The Hessian is identically zero and it is returned as a csr matrix.",
    "type": "class",
    "file_path": "scipy\\scipy\\optimize\\_differentiable_functions.py",
    "ast_data": "ClassDef name:IdentityVectorFunction FunctionDef name:__init__ arg:self arg:x0 arg:sparse_jacobian arguments arg arg arg Assign Call If BoolOp Compare Assign Call Assign Assign Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_fsdp_states_with_modules",
    "source_code": "def _get_fsdp_states_with_modules(module: nn.Module) -> tuple[list[_FSDPState], list[nn.Module]]:\n    fsdp_states: list[_FSDPState] = []\n    fsdp_modules: list[nn.Module] = []\n    visited_fsdp_states: set[_FSDPState] = set()\n    visited_modules: set[nn.Module] = set()\n    deque: collections.deque[nn.Module] = collections.deque([module])\n    while deque:\n        submodule = deque.popleft()\n        visited_modules.add(submodule)\n        if not _composable(submodule):\n            continue\n        for child_module in reversed(list(submodule.children())):\n            if child_module not in visited_modules:\n                deque.appendleft(child_module)\n        optional_state = _get_module_fsdp_state(submodule)\n        if optional_state is not None and optional_state not in visited_fsdp_states:\n            visited_fsdp_states.add(optional_state)\n            fsdp_states.append(optional_state)\n            fsdp_modules.append(submodule)\n    return (fsdp_states, fsdp_modules)",
    "docstring": "Returns a tuple containing: 1. A list of the ``).",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_traversal_utils.py",
    "ast_data": "FunctionDef name:_get_fsdp_states_with_modules arg:module arguments arg Call Call Call While Assign Call Call If Call For Call Call Call If Compare Call Assign Call If BoolOp Compare Compare Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "convert_nested_model",
    "source_code": "def convert_nested_model(weights):\n    trainable_weights = weights[:len(layer.trainable_weights)]\n    non_trainable_weights = weights[len(layer.trainable_weights):]\n    new_trainable_weights = []\n    new_non_trainable_weights = []\n    for sublayer in layer.layers:\n        num_trainable_weights = len(sublayer.trainable_weights)\n        num_non_trainable_weights = len(sublayer.non_trainable_weights)\n        if sublayer.weights:\n            preprocessed = preprocess_weights_for_loading(layer=sublayer, weights=trainable_weights[:num_trainable_weights] + non_trainable_weights[:num_non_trainable_weights], original_keras_version=original_keras_version, original_backend=original_backend)\n            new_trainable_weights.extend(preprocessed[:num_trainable_weights])\n            new_non_trainable_weights.extend(preprocessed[num_trainable_weights:])\n            trainable_weights = trainable_weights[num_trainable_weights:]\n            non_trainable_weights = non_trainable_weights[num_non_trainable_weights:]\n    return new_trainable_weights + new_non_trainable_weights",
    "docstring": "Converts layers nested in or . This function uses for converting nested layers. Args: weights: List of weights values (Numpy arrays). Returns: A list of weights values (Numpy arrays).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\hdf5_format.py",
    "ast_data": "FunctionDef name:convert_nested_model arg:weights arguments arg Assign Call Assign Call Assign Assign For Assign Call Assign Call If Assign Call Call Call Assign Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_safe_split",
    "source_code": "def _safe_split(estimator, X, y, indices, train_indices=None):\n    if get_tags(estimator).input_tags.pairwise:\n        if not hasattr(X, 'shape'):\n            raise ValueError('Precomputed kernels or affinity matrices have to be passed as arrays or sparse matrices.')\n        if X.shape[0] != X.shape[1]:\n            raise ValueError('X should be a square kernel matrix')\n        if train_indices is None:\n            X_subset = X[np.ix_(indices, indices)]\n        else:\n            X_subset = X[np.ix_(indices, train_indices)]\n    else:\n        X_subset = _safe_indexing(X, indices)\n    if y is not None:\n        y_subset = _safe_indexing(y, indices)\n    else:\n        y_subset = None\n    return (X_subset, y_subset)",
    "docstring": "Create subset of dataset and properly handle kernels. Slice X, y according to indices for cross-validation, but take care of precomputed kernel-matrices or pairwise affinities / distances. If `` will be use to slice the columns of X. Returns ------- X_subset : array-like, sparse matrix or list Indexed data. y_subset : array-like, sparse matrix or list Indexed targets.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\metaestimators.py",
    "ast_data": "FunctionDef name:_safe_split arg:estimator arg:X arg:y arg:indices arg:train_indices arguments arg arg arg arg arg If Call If Call Raise Call If Compare Raise Call If Compare Assign Call Assign Call Assign Call If Compare Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_apply_to_modules",
    "source_code": "def _apply_to_modules(root_module: torch.nn.Module, module_fn: Callable, return_fn: Callable, filter_fqns: Optional[list[str]]=None, *args, **kwargs):\n\n    def f(module: torch.nn.Module, prefix: str, tree_level: int, *args, **kwargs):\n        module_fn(module, prefix, tree_level, *args, **kwargs)\n        for submodule_name, submodule in module.named_children():\n            if submodule is None:\n                continue\n            new_prefix = prefix + submodule_name + '.'\n            new_tree_level = tree_level + 1\n            if filter_fqns is not None:\n                for fqn in filter_fqns:\n                    if fqn.startswith(new_prefix):\n                        break\n                else:\n                    if submodule_name == '_fsdp_wrapped_module' or submodule_name == '_dmp_wrapped_module':\n                        new_prefix = prefix\n                    elif submodule_name == 'module':\n                        new_prefix = prefix\n            f(submodule, new_prefix, new_tree_level, *args, **kwargs)\n    f(root_module, '', 0, *args, **kwargs)\n    return return_fn(*args, **kwargs)",
    "docstring": "Performs a pre-order traversal of the modules in the hierarchy rooted at `` is overwritten to remove the prefix.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_common_utils.py",
    "ast_data": "FunctionDef name:_apply_to_modules arg:root_module arg:module_fn arg:return_fn arg:filter_fqns arguments arg arg arg arg arg arg FunctionDef name:f arg:module arg:prefix arg:tree_level arguments arg arg arg arg arg Call For Call If Compare Assign Assign If Compare For If Call If BoolOp Compare Compare Assign If Compare Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "hinge",
    "source_code": "@dispatch.add_dispatch_support\ndef hinge(y_true, y_pred):\n    y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)\n    y_true = math_ops.cast(y_true, y_pred.dtype)\n    y_true = _maybe_convert_labels(y_true)\n    return backend.mean(math_ops.maximum(1.0 - y_true * y_pred, 0.0), axis=-1)",
    "docstring": "Computes the hinge loss between and . Standalone usage: >>> y_true = np.random.choice([-1, 1], size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.hinge(y_true, y_pred) >>> assert loss.shape == (2,) >>> assert np.array_equal( ... loss.numpy(), ... np.mean(np.maximum(1. - y_true * y_pred, 0.), axis=-1)) Args: y_true: The ground truth values. values are expected to be -1 or 1. If binary (0 or 1) labels are provided they will be converted to -1 or 1. shape = . y_pred: The predicted values. shape = . Returns: Hinge loss values. shape = .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py",
    "ast_data": "FunctionDef name:hinge arg:y_true arg:y_pred arguments arg arg Assign Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "cherrypy",
    "name": "createLock",
    "source_code": "def createLock(self):\n    self.lock = None",
    "docstring": "Lock log write with no-op.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cplogging.py",
    "ast_data": "FunctionDef name:createLock arg:self arguments arg Assign"
  },
  {
    "library": "kornia",
    "name": "scale_intrinsics",
    "source_code": "def scale_intrinsics(camera_matrix: Tensor, scale_factor: Union[float, Tensor]) -> Tensor:\n    K_scale = camera_matrix.clone()\n    K_scale[..., 0, 0] *= scale_factor\n    K_scale[..., 1, 1] *= scale_factor\n    K_scale[..., 0, 2] *= scale_factor\n    K_scale[..., 1, 2] *= scale_factor\n    return K_scale",
    "docstring": "Scale a camera matrix containing the intrinsics. Applies the scaling factor to the focal length and center of projection. Args: camera_matrix: the camera calibration matrix containing the intrinsic parameters. The expected shape for the tensor is :math:. scale_factor: the scaling factor to be applied. Returns: The scaled camera matrix with shame shape as input :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\epipolar\\projection.py",
    "ast_data": "FunctionDef name:scale_intrinsics arg:camera_matrix arg:scale_factor arguments arg arg Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "has_perm",
    "source_code": "def has_perm(self, perm, obj=None):\n    if self.is_active and self.is_superuser:\n        return True\n    return _user_has_perm(self, perm, obj)",
    "docstring": "Return True if the user has the specified permission. Query all available auth backends, but return immediately if any backend returns True. Thus, a user who has permission from a single auth backend is assumed to have permission in general. If an object is provided, check permissions for that object.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\models.py",
    "ast_data": "FunctionDef name:has_perm arg:self arg:perm arg:obj arguments arg arg arg If BoolOp Return return:yes Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_kurtosis",
    "source_code": "def _kurtosis(data):\n    data = np.ravel(data)\n    mu = data.mean()\n    m2 = ((data - mu) ** 2).mean()\n    m4 = ((data - mu) ** 4).mean()\n    return m4 / m2 ** 2 - 3",
    "docstring": "Fisher's excess kurtosis is fourth central moment / variance**2 - 3.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:_kurtosis arg:data arguments arg Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "geom_lib_version",
    "source_code": "def geom_lib_version(self):\n    if self.spatial_version >= (5,):\n        return self.rttopo_version()\n    else:\n        return self.lwgeom_version()",
    "docstring": "Return the version of the version-dependant geom library used by SpatiaLite.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\spatialite\\operations.py",
    "ast_data": "FunctionDef name:geom_lib_version arg:self arguments arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "rsqrt",
    "source_code": "@tf_export('math.rsqrt', v1=['math.rsqrt', 'rsqrt'])\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints('rsqrt')\ndef rsqrt(x, name=None):\n    return gen_math_ops.rsqrt(x, name)",
    "docstring": "Computes reciprocal of square root of x element-wise. For example: >>> x = tf.constant([2., 0., -2.]) >>> tf.math.rsqrt(x) Args: x: A . Must be one of the following types: , , , . name: A name for the operation (optional). Returns: A . Has the same type as .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:rsqrt arg:x arg:name arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set",
    "source_code": "def set(self, other):\n    _api.check_isinstance(Affine2DBase, other=other)\n    self._mtx = other.get_matrix()\n    self.invalidate()",
    "docstring": "Set this transformation from the frozen copy of another object.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:set arg:self arg:other arguments arg arg Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "list_children",
    "source_code": "def list_children(self, obj, save_type=base.SaveType.CHECKPOINT, **kwargs):\n    children = []\n    for name, ref in super(ObjectGraphView, self).children(obj, save_type, **kwargs).items():\n        children.append(base.TrackableReference(name, ref))\n    if obj is self.root and self._attached_dependencies:\n        children.extend(self._attached_dependencies)\n    return children",
    "docstring": "Returns list of all child trackables attached to obj. Args: obj: A object. save_type: A string, can be 'savedmodel' or 'checkpoint'. **kwargs: kwargs to use when retrieving the object's children. Returns: List of all children attached to the object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\graph_view.py",
    "ast_data": "FunctionDef name:list_children arg:self arg:obj arg:save_type arguments arg arg arg arg Assign For Call Call Call Call Call If BoolOp Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "register_proto_function",
    "source_code": "def register_proto_function(collection_name, proto_type=None, to_proto=None, from_proto=None) -> None:\n    if to_proto and (not callable(to_proto)):\n        raise TypeError('to_proto must be callable.')\n    if from_proto and (not callable(from_proto)):\n        raise TypeError('from_proto must be callable.')\n    _proto_function_registry.register((proto_type, to_proto, from_proto), collection_name)",
    "docstring": "Registers and functions for collection_name. function converts a Python object to the corresponding protocol buffer, and returns the protocol buffer. function converts protocol buffer into a Python object, and returns the object.. Args: collection_name: Name of the collection. proto_type: Protobuf type, such as , , .. to_proto: Function that implements Python object to protobuf conversion. from_proto: Function that implements protobuf to Python object conversion.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:register_proto_function arg:collection_name arg:proto_type arg:to_proto arg:from_proto arguments arg arg arg arg If BoolOp Call Raise Call If BoolOp Call Raise Call Call"
  },
  {
    "library": "django",
    "name": "__set__",
    "source_code": "def __set__(self, instance, value):\n    gtype = self.field.geom_type\n    if gtype == 'RASTER' and (value is None or isinstance(value, (str, dict, self._klass))):\n        pass\n    elif isinstance(value, self._klass):\n        if value.srid is None:\n            value.srid = self.field.srid\n    elif value is None or isinstance(value, (str, memoryview)):\n        pass\n    else:\n        raise TypeError('Cannot set %s SpatialProxy (%s) with value of type: %s' % (instance.__class__.__name__, gtype, type(value)))\n    instance.__dict__[self.field.attname] = value\n    return value",
    "docstring": "Retrieve the proxied geometry or raster with the corresponding class specified during initialization. To set geometries, use values of None, HEXEWKB, or WKT. To set rasters, use JSON or dict values.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\models\\proxy.py",
    "ast_data": "FunctionDef name:__set__ arg:self arg:instance arg:value arguments arg arg arg Assign If BoolOp Compare BoolOp Compare Call If Call If Compare Assign If BoolOp Compare Call Raise Call Call Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "staged_predict",
    "source_code": "def staged_predict(self, X):\n    if self.n_classes_ == 2:\n        for raw_predictions in self._staged_raw_predict(X):\n            encoded_classes = (raw_predictions.squeeze() >= 0).astype(int)\n            yield self.classes_.take(encoded_classes, axis=0)\n    else:\n        for raw_predictions in self._staged_raw_predict(X):\n            encoded_classes = np.argmax(raw_predictions, axis=1)\n            yield self.classes_.take(encoded_classes, axis=0)",
    "docstring": "Predict class at each stage for X. This method allows monitoring (i.e. determine error on testing set) after each stage. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``. Yields ------ y : generator of ndarray of shape (n_samples,) The predicted value of the input samples.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_gb.py",
    "ast_data": "FunctionDef name:staged_predict arg:self arg:X arguments arg arg If Compare For Call Assign Call Compare Call Call For Call Assign Call Call"
  },
  {
    "library": "django",
    "name": "process_clob",
    "source_code": "def process_clob(self, value):\n    return value",
    "docstring": "Return the value of a CLOB column, for backends that return a locator object that requires additional processing.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:process_clob arg:self arg:value arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "ld",
    "source_code": "def ld(v):\n    if isinstance(v, Undefined):\n        return v.read()\n    return v",
    "docstring": "Load variable operator.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\variables.py",
    "ast_data": "FunctionDef name:ld arg:v arguments arg If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "add_lexer",
    "source_code": "def add_lexer(self, alias: str, lexer: type[Lexer]) -> None:\n    logger.debug('[app] adding lexer: %r', (alias, lexer))\n    lexer_classes[alias] = lexer",
    "docstring": "Register a new lexer for source code. Use *lexer* to highlight code blocks with the given language *alias*. .. versionadded:: 0.6 .. versionchanged:: 2.1 Take a lexer class as an argument. .. versionchanged:: 4.0 Removed support for lexer instances as an argument.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\application.py",
    "ast_data": "FunctionDef name:add_lexer arg:self arg:alias arg:lexer arguments arg arg arg Call Assign"
  },
  {
    "library": "pytorch",
    "name": "_HalfOpenInterval",
    "source_code": "class _HalfOpenInterval(Constraint):\n\n    def __init__(self, lower_bound, upper_bound):\n        self.lower_bound = lower_bound\n        self.upper_bound = upper_bound\n        super().__init__()\n\n    def check(self, value):\n        return (self.lower_bound <= value) & (value < self.upper_bound)\n\n    def __repr__(self):\n        fmt_string = self.__class__.__name__[1:]\n        fmt_string += f'(lower_bound={self.lower_bound}, upper_bound={self.upper_bound})'\n        return fmt_string",
    "docstring": "Constrain to a real interval .",
    "type": "class",
    "file_path": "pytorch\\torch\\distributions\\constraints.py",
    "ast_data": "ClassDef name:_HalfOpenInterval FunctionDef name:__init__ arg:self arg:lower_bound arg:upper_bound arguments arg arg arg Assign Assign Call Call FunctionDef name:check arg:self arg:value arguments arg arg Return return:yes Compare Compare FunctionDef name:__repr__ arg:self arguments arg Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "rosen_hess",
    "source_code": "@xp_capabilities(skip_backends=[('jax.numpy', \"JAX doesn't allow item assignment.\")])\ndef rosen_hess(x):\n    xp = array_namespace(x)\n    x = xp_promote(x, force_floating=True, xp=xp)\n    H = xpx.create_diagonal(-400 * x[:-1], offset=1, xp=xp) - xpx.create_diagonal(400 * x[:-1], offset=-1, xp=xp)\n    diagonal = xp.zeros(x.shape[0], dtype=x.dtype)\n    diagonal = xpx.at(diagonal)[0].set(1200 * x[0] ** 2 - 400 * x[1] + 2)\n    diagonal = xpx.at(diagonal)[-1].set(200)\n    diagonal = xpx.at(diagonal)[1:-1].set(202 + 1200 * x[1:-1] ** 2 - 400 * x[2:])\n    return H + xpx.create_diagonal(diagonal, xp=xp)",
    "docstring": "The Hessian matrix of the Rosenbrock function. Parameters ---------- x : array_like 1-D array of points at which the Hessian matrix is to be computed. Returns ------- rosen_hess : ndarray The Hessian matrix of the Rosenbrock function at . See Also -------- rosen, rosen_der, rosen_hess_prod Examples -------- >>> import numpy as np >>> from scipy.optimize import rosen_hess >>> X = 0.1 * np.arange(4) >>> rosen_hess(X) array([[-38., 0., 0., 0.], [ 0., 134., -40., 0.], [ 0., -40., 130., -80.], [ 0., 0., -80., 200.]])",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_optimize.py",
    "ast_data": "FunctionDef name:rosen_hess arg:x arguments arg Assign Call Assign Call Assign Call Call Assign Call Assign Call Call Assign Call Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "@_docstring.interpd\ndef __init__(self, axes, spine_type, path, **kwargs):\n    super().__init__(**kwargs)\n    self.axes = axes\n    self.set_figure(self.axes.get_figure(root=False))\n    self.spine_type = spine_type\n    self.set_facecolor('none')\n    self.set_edgecolor(mpl.rcParams['axes.edgecolor'])\n    self.set_linewidth(mpl.rcParams['axes.linewidth'])\n    self.set_capstyle('projecting')\n    self.axis = None\n    self.set_zorder(2.5)\n    self.set_transform(self.axes.transData)\n    self._bounds = None\n    self._position = None\n    _api.check_isinstance(mpath.Path, path=path)\n    self._path = path\n    self._patch_type = 'line'\n    self._patch_transform = mtransforms.IdentityTransform()",
    "docstring": "Parameters ---------- axes : The instance containing the spine. spine_type : str The spine type. path : The instance used to draw the spine. Other Parameters ---------------- **kwargs Valid keyword arguments are: %(Patch:kwdoc)s",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\spines.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:axes arg:spine_type arg:path arguments arg arg arg arg arg Call Call Assign Call Call Assign Call Call Call Call Assign Call Call Assign Assign Call Assign Assign Assign Call"
  },
  {
    "library": "pytorch",
    "name": "register_forward_pre_hook",
    "source_code": "def register_forward_pre_hook(self, hook: Union[Callable[[T, tuple[Any, ...]], Optional[Any]], Callable[[T, tuple[Any, ...], dict[str, Any]], Optional[tuple[Any, dict[str, Any]]]]], *, prepend: bool=False, with_kwargs: bool=False) -> RemovableHandle:\n    handle = RemovableHandle(self._forward_pre_hooks, extra_dict=self._forward_pre_hooks_with_kwargs)\n    self._forward_pre_hooks[handle.id] = hook\n    if with_kwargs:\n        self._forward_pre_hooks_with_kwargs[handle.id] = True\n    if prepend:\n        self._forward_pre_hooks.move_to_end(handle.id, last=False)\n    return handle",
    "docstring": "Register a forward pre-hook on the module. The hook will be called every time before :func: is invoked. If `torch.nn.Moduletorch.nn.Moduleregister_module_forward_pre_hooktorch.utils.hooks.RemovableHandle`",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:register_forward_pre_hook arg:self arg:hook arguments arg arg arg arg Assign Call Assign If Assign If Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "get_resampler_for_grouping",
    "source_code": "def get_resampler_for_grouping(groupby: GroupBy, rule, how=None, fill_method=None, limit: int | None=None, on=None, **kwargs) -> Resampler:\n    tg = TimeGrouper(freq=rule, key=on, **kwargs)\n    resampler = tg._get_resampler(groupby.obj)\n    return resampler._get_resampler_for_grouping(groupby=groupby, key=tg.key)",
    "docstring": "Return our appropriate resampler when grouping as well.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\resample.py",
    "ast_data": "FunctionDef name:get_resampler_for_grouping arg:groupby arg:rule arg:how arg:fill_method arg:limit arg:on arguments arg arg arg arg arg arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_reject_objects",
    "source_code": "def _reject_objects(arr, name):\n    dt = np.asarray(arr).dtype\n    if not (np.issubdtype(dt, np.integer) or dt in [np.bool_, np.float16, np.float32, np.float64, np.complex64, np.complex128]):\n        msg = f'dtype={dt} is not supported by {name} and will raise an error in SciPy 1.17.0. Supported dtypes are: boolean, integer, `np.float16`,`np.float32`, `np.float64`, `np.complex64`, `np.complex128`.'\n        warnings.warn(msg, category=DeprecationWarning, stacklevel=3)",
    "docstring": "Warn if arr.dtype is object or longdouble.",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_signaltools.py",
    "ast_data": "FunctionDef name:_reject_objects arg:arr arg:name arguments arg arg Assign Call If BoolOp Call Compare Assign Call"
  },
  {
    "library": "scipy",
    "name": "lqmn",
    "source_code": "def lqmn(m, n, z):\n    if not isscalar(m) or m < 0:\n        raise ValueError('m must be a non-negative integer.')\n    if not isscalar(n) or n < 0:\n        raise ValueError('n must be a non-negative integer.')\n    m, n = (int(m), int(n))\n    mm = max(1, m)\n    nn = max(1, n)\n    z = np.asarray(z)\n    if not np.issubdtype(z.dtype, np.inexact):\n        z = z.astype(np.float64)\n    if np.iscomplexobj(z):\n        q = np.empty((mm + 1, nn + 1) + z.shape, dtype=np.complex128)\n    else:\n        q = np.empty((mm + 1, nn + 1) + z.shape, dtype=np.float64)\n    qd = np.empty_like(q)\n    if z.ndim == 0:\n        _lqmn(z, out=(q, qd))\n    else:\n        _lqmn(z, out=(np.moveaxis(q, (0, 1), (-2, -1)), np.moveaxis(qd, (0, 1), (-2, -1))))\n    return (q[:m + 1, :n + 1], qd[:m + 1, :n + 1])",
    "docstring": "Sequence of associated Legendre functions of the second kind. Computes the associated Legendre function of the second kind of order m and degree n, `Q_n^m(z)` (lower case L) in descriptions of the associated Legendre function z : array_like, complex Input value. Returns ------- Qmn_z : (m+1, n+1) array Values for all orders 0..m and degrees 0..n Qmn_d_z : (m+1, n+1) array Derivatives for all orders 0..m and degrees 0..n References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special Functions\", John Wiley and Sons, 1996.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_basic.py",
    "ast_data": "FunctionDef name:lqmn arg:m arg:n arg:z arguments arg arg arg If BoolOp Call Compare Raise Call If BoolOp Call Compare Raise Call Assign Call Call Assign Call Assign Call Assign Call If Call Assign Call If Call Assign Call Assign Call Assign Call If Compare Call Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "add_collection",
    "source_code": "def add_collection(self, collection, autolim=True):\n    _api.check_isinstance(mcoll.Collection, collection=collection)\n    if not collection.get_label():\n        collection.set_label(f'_child{len(self._children)}')\n    self._children.append(collection)\n    collection._remove_method = self._children.remove\n    self._set_artist_props(collection)\n    if collection.get_clip_path() is None:\n        collection.set_clip_path(self.patch)\n    if autolim:\n        self._unstale_viewLim()\n        datalim = collection.get_datalim(self.transData)\n        points = datalim.get_points()\n        if not np.isinf(datalim.minpos).all():\n            points = np.concatenate([points, [datalim.minpos]])\n        self.update_datalim(points)\n    self.stale = True\n    return collection",
    "docstring": "Add a to the Axes; return the collection.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:add_collection arg:self arg:collection arg:autolim arguments arg arg arg Call If Call Call Call Call Assign Call If Compare Call Call If Call Assign Call Assign Call If Call Call Assign Call Call Assign Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "apply_post_transforms",
    "source_code": "def apply_post_transforms(self, doctree: nodes.document, docname: str) -> None:\n    backup = self.current_document\n    new = deepcopy(backup)\n    new.docname = docname\n    try:\n        self.current_document = new\n        transformer = SphinxTransformer(doctree)\n        transformer.set_environment(self)\n        transformer.add_transforms(self._registry.get_post_transforms())\n        transformer.apply_transforms()\n    finally:\n        self.current_document = backup\n    self.events.emit('doctree-resolved', doctree, docname)",
    "docstring": "Apply all post-transforms.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\environment\\__init__.py",
    "ast_data": "FunctionDef name:apply_post_transforms arg:self arg:doctree arg:docname arguments arg arg arg Assign Assign Call Assign Try Assign Assign Call Call Call Call Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "add_target",
    "source_code": "def add_target(self, target: function_type.FunctionType) -> None:\n    self._dispatch_table[target] = None\n    for request in self._dispatch_cache:\n        if target.is_supertype_of(self._dispatch_cache[request]):\n            self._dispatch_cache[request] = target",
    "docstring": "Adds a new target type.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\type_dispatch.py",
    "ast_data": "FunctionDef name:add_target arg:self arg:target arguments arg arg Assign For If Call Assign"
  },
  {
    "library": "scikit-learn",
    "name": "PairwiseDistancesBenchmark",
    "source_code": "class PairwiseDistancesBenchmark(Benchmark):\n    param_names = ['representation', 'metric', 'n_jobs']\n    params = (['dense', 'sparse'], ['cosine', 'euclidean', 'manhattan', 'correlation'], Benchmark.n_jobs_vals)\n\n    def setup(self, *params):\n        representation, metric, n_jobs = params\n        if representation == 'sparse' and metric == 'correlation':\n            raise NotImplementedError\n        if Benchmark.data_size == 'large':\n            if metric in ('manhattan', 'correlation'):\n                n_samples = 8000\n            else:\n                n_samples = 24000\n        elif metric in ('manhattan', 'correlation'):\n            n_samples = 4000\n        else:\n            n_samples = 12000\n        data = _random_dataset(n_samples=n_samples, representation=representation)\n        self.X, self.X_val, self.y, self.y_val = data\n        self.pdist_params = {'metric': metric, 'n_jobs': n_jobs}\n\n    def time_pairwise_distances(self, *args):\n        pairwise_distances(self.X, **self.pdist_params)\n\n    def peakmem_pairwise_distances(self, *args):\n        pairwise_distances(self.X, **self.pdist_params)",
    "docstring": "Benchmarks for pairwise distances.",
    "type": "class",
    "file_path": "scikit-learn\\asv_benchmarks\\benchmarks\\metrics.py",
    "ast_data": "ClassDef name:PairwiseDistancesBenchmark Assign Assign FunctionDef name:setup arg:self arguments arg arg Assign If BoolOp Compare Compare Raise If Compare If Compare Assign Assign If Compare Assign Assign Assign Call Assign Assign FunctionDef name:time_pairwise_distances arg:self arguments arg arg Call FunctionDef name:peakmem_pairwise_distances arg:self arguments arg arg Call"
  },
  {
    "library": "django",
    "name": "field_precisions",
    "source_code": "@property\ndef field_precisions(self):\n    return [capi.get_field_precision(capi.get_field_defn(self._ldefn, i)) for i in range(self.num_fields)]",
    "docstring": "Return the field precisions for the features.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\layer.py",
    "ast_data": "FunctionDef name:field_precisions arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "pygame",
    "name": "list_cameras",
    "source_code": "def list_cameras():\n    return [0]",
    "docstring": "Always only lists one camera. Functionality not supported in videocapture module.",
    "type": "function",
    "file_path": "pygame\\src_py\\_camera_vidcapture.py",
    "ast_data": "FunctionDef name:list_cameras arguments Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_diag",
    "source_code": "def _get_diag(self):\n    return array_ops.matrix_diag_part(self._tril)",
    "docstring": "Gets the diagonal part of kwarg.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_lower_triangular.py",
    "ast_data": "FunctionDef name:_get_diag arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_should_fallback_to_positional",
    "source_code": "@cache_readonly\ndef _should_fallback_to_positional(self) -> bool:\n    return False",
    "docstring": "Should an integer key be treated as positional?",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\range.py",
    "ast_data": "FunctionDef name:_should_fallback_to_positional arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "laplace",
    "source_code": "@_ni_docstrings.docfiller\ndef laplace(input, output=None, mode='reflect', cval=0.0, *, axes=None):\n\n    def derivative2(input, axis, output, mode, cval):\n        return correlate1d(input, [1, -2, 1], axis, output, mode, cval, 0)\n    return generic_laplace(input, derivative2, output, mode, cval, axes=axes)",
    "docstring": "N-D Laplace filter based on approximate second derivatives. Parameters ---------- %(input)s %(output)s %(mode_multiple)s %(cval)s axes : tuple of int or None The axes over which to apply the filter. If a tuple is provided, its length must match the number of axes. Returns ------- laplace : ndarray Filtered array. Has the same shape as . Examples -------- >>> from scipy import ndimage, datasets >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = datasets.ascent() >>> result = ndimage.laplace(ascent) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show()",
    "type": "function",
    "file_path": "scipy\\scipy\\ndimage\\_filters.py",
    "ast_data": "FunctionDef name:laplace arg:input arg:output arg:mode arg:cval arguments arg arg arg arg arg FunctionDef name:derivative2 arg:input arg:axis arg:output arg:mode arg:cval arguments arg arg arg arg arg Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_FallbackException",
    "source_code": "class _FallbackException(Exception):\n    pass",
    "docstring": "Exception class to handle fallback from the fastpath. The fastpath that we refer to here is the one implemented to reduce per-op overheads (TFE_Py_FastPathExecute_C). If the conditions for executing the op on the fastpath are not met, we fallback to a safer (and more complete) slowpath, and this Exception is raised to signal that transition.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\core.py",
    "ast_data": "ClassDef name:_FallbackException"
  },
  {
    "library": "scipy",
    "name": "cli",
    "source_code": "@click.group(cls=CLI)\n@click.pass_context\ndef cli(ctx, **kwargs):\n    CLI.update_context(ctx, kwargs)",
    "docstring": "Developer Tool for SciPy \bCommands that require a built/installed instance are marked with :wrench:. \b**python dev.py --build-dir my-build test -s stats**",
    "type": "function",
    "file_path": "scipy\\dev.py",
    "ast_data": "FunctionDef name:cli arg:ctx arguments arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "populate_sample_weight",
    "source_code": "def populate_sample_weight(self, sample_weight, sample_weight_mode):\n    if sample_weight is None and (self.should_skip_target_weights() or sample_weight_mode is None or context.executing_eagerly()):\n        self._sample_weight = None\n        return\n    assert sample_weight_mode in ['temporal', 'samplewise']\n    if sample_weight_mode == 'temporal':\n        default_value = [[1.0]]\n        shape = [None, None]\n    else:\n        default_value = [1.0]\n        shape = [None]\n    if sample_weight is not None:\n        if not sample_weight.shape.is_compatible_with(shape):\n            raise ValueError('Received sample weight with shape {}. Expected shape {}.'.format(sample_weight.shape, shape))\n        self._sample_weight = sample_weight\n    else:\n        self._sample_weight = array_ops.placeholder_with_default(constant_op.constant(default_value, dtype=backend.floatx()), shape=shape, name=self.output_name + '_sample_weights')",
    "docstring": "Populate the sample weight and based on the sample weight mode.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py",
    "ast_data": "FunctionDef name:populate_sample_weight arg:self arg:sample_weight arg:sample_weight_mode arguments arg arg arg If BoolOp Compare BoolOp Call Compare Call Assign Return return:no Compare If Compare Assign Assign Assign Assign If Compare If Call Raise Call Call Assign Assign Call Call Call"
  },
  {
    "library": "pandas",
    "name": "dt64arr_to_periodarr",
    "source_code": "def dt64arr_to_periodarr(data, freq, tz=None) -> tuple[npt.NDArray[np.int64], BaseOffset]:\n    if not isinstance(data.dtype, np.dtype) or data.dtype.kind != 'M':\n        raise ValueError(f'Wrong dtype: {data.dtype}')\n    if freq is None:\n        if isinstance(data, ABCIndex):\n            data, freq = (data._values, data.freq)\n        elif isinstance(data, ABCSeries):\n            data, freq = (data._values, data.dt.freq)\n    elif isinstance(data, (ABCIndex, ABCSeries)):\n        data = data._values\n    reso = get_unit_from_dtype(data.dtype)\n    freq = Period._maybe_convert_freq(freq)\n    base = freq._period_dtype_code\n    return (c_dt64arr_to_periodarr(data.view('i8'), base, tz, reso=reso), freq)",
    "docstring": "Convert an datetime-like array to values Period ordinals. Parameters ---------- data : Union[Series[datetime64[ns]], DatetimeIndex, ndarray[datetime64ns]] freq : Optional[Union[str, Tick]] Must match the on the if is a DatetimeIndex or Series. tz : Optional[tzinfo] Returns ------- ordinals : ndarray[int64] freq : Tick The frequency extracted from the Series or DatetimeIndex if that's used.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\arrays\\period.py",
    "ast_data": "FunctionDef name:dt64arr_to_periodarr arg:data arg:freq arg:tz arguments arg arg arg If BoolOp Call Compare Raise Call If Compare If Call Assign If Call Assign If Call Assign Assign Call Assign Call Assign Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "enforce_output_layout",
    "source_code": "def enforce_output_layout(gm: torch.fx.GraphModule):\n    *_, output_node = gm.graph.nodes\n    out_list = output_node.args[0]\n    with gm.graph.inserting_before(output_node):\n        for n in out_list:\n            if not isinstance(n.meta['val'], torch.Tensor) or not torch._prims_common.is_non_overlapping_and_dense(n.meta['val']):\n                continue\n            ft = n.meta['val']\n            new_node = gm.graph.call_function(prims.inductor_force_stride_order.default, (n, ft.stride()))\n            output_node.replace_input_with(n, new_node)\n    gm.graph.lint()\n    gm.recompile()",
    "docstring": "Make sure the output node's layout does not change due to compiler optimizations by adding aten.as_strided nodes with the expected strides. Only used for inference so we can assume all graph outputs are model outputs.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\freezing.py",
    "ast_data": "FunctionDef name:enforce_output_layout arg:gm arguments arg Assign Assign With Call For If BoolOp Call Call Assign Assign Call Call Call Call Call"
  },
  {
    "library": "virtualenv",
    "name": "debug",
    "source_code": "@property\ndef debug(self):\n    if self._debug is None and self.exe is not None:\n        self._debug = get_env_debug_info(self.exe, self.debug_script(), self.app_data, self.env)\n    return self._debug",
    "docstring": ":return: debug information about the virtual environment (only valid after :meth: has run)",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\create\\creator.py",
    "ast_data": "FunctionDef name:debug arg:self arguments arg If BoolOp Compare Compare Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "consistent",
    "source_code": "def consistent(a, b):\n    if not a:\n        return not b or isvariadic(b[0])\n    if not b:\n        return not a or isvariadic(a[0])\n    if len(a) == len(b):\n        return all((issubclass(aa, bb) or issubclass(bb, aa) for aa, bb in zip(a, b)))\n    else:\n        p1 = 0\n        p2 = 0\n        while p1 < len(a) and p2 < len(b):\n            cur_a = a[p1]\n            cur_b = b[p2]\n            if not issubclass(cur_b, cur_a) and (not issubclass(cur_a, cur_b)):\n                return False\n            if not (isvariadic(cur_a) or isvariadic(cur_b)):\n                p1 += 1\n                p2 += 1\n            elif isvariadic(cur_a):\n                p2 += 1\n            elif isvariadic(cur_b):\n                p1 += 1\n        return isvariadic(cur_a) and p2 == len(b) or (isvariadic(cur_b) and p1 == len(a))",
    "docstring": "It is possible for an argument list to satisfy both A and B",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\unification\\multipledispatch\\conflict.py",
    "ast_data": "FunctionDef name:consistent arg:a arg:b arguments arg arg If Return return:yes BoolOp Call If Return return:yes BoolOp Call If Compare Call Call Return return:yes Call BoolOp Call Call Call Assign Assign While BoolOp Compare Call Compare Call Assign Assign If BoolOp Call Call Return return:yes If BoolOp Call Call If Call If Call Return return:yes BoolOp BoolOp Call Compare Call BoolOp Call Compare Call"
  },
  {
    "library": "django",
    "name": "show_plan",
    "source_code": "def show_plan(self, connection, app_names=None):\n    loader = MigrationLoader(connection)\n    graph = loader.graph\n    if app_names:\n        self._validate_app_names(loader, app_names)\n        targets = [key for key in graph.leaf_nodes() if key[0] in app_names]\n    else:\n        targets = graph.leaf_nodes()\n    plan = []\n    seen = set()\n    for target in targets:\n        for migration in graph.forwards_plan(target):\n            if migration not in seen:\n                node = graph.node_map[migration]\n                plan.append(node)\n                seen.add(migration)\n\n    def print_deps(node):\n        out = []\n        for parent in sorted(node.parents):\n            out.append('%s.%s' % parent.key)\n        if out:\n            return ' ... (%s)' % ', '.join(out)\n        return ''\n    for node in plan:\n        deps = ''\n        if self.verbosity >= 2:\n            deps = print_deps(node)\n        if node.key in loader.applied_migrations:\n            self.stdout.write('[X]  %s.%s%s' % (node.key[0], node.key[1], deps))\n        else:\n            self.stdout.write('[ ]  %s.%s%s' % (node.key[0], node.key[1], deps))\n    if not plan:\n        self.stdout.write('(no migrations)', self.style.ERROR)",
    "docstring": "Show all known migrations (or only those of the specified app_names) in the order they will be applied.",
    "type": "method",
    "file_path": "django\\django\\core\\management\\commands\\showmigrations.py",
    "ast_data": "FunctionDef name:show_plan arg:self arg:connection arg:app_names arguments arg arg arg Assign Call Assign If Call Assign Call Compare Assign Call Assign Assign Call For For Call If Compare Assign Call Call FunctionDef name:print_deps arg:node arguments arg Assign For Call Call If Return return:yes Call Return return:yes For Assign If Compare Assign Call If Compare Call Call If Call"
  },
  {
    "library": "pytorch",
    "name": "benchmark_utilization",
    "source_code": "def benchmark_utilization(f, input, trace_folder, optimize_ctx=None, trace_file_name='tmp_chrome_trace', num_runs=1):\n    isExist = os.path.exists(trace_folder)\n    if not isExist:\n        os.makedirs(trace_folder)\n        print('create folder ' + trace_folder)\n    if optimize_ctx is None:\n        optimize_ctx = contextlib.nullcontext()\n    chrome_trace_file_name = os.path.join(trace_folder, trace_file_name + '.json')\n    total_length = dump_chrome_trace(f, input, chrome_trace_file_name, optimize_ctx, [ProfilerActivity.CUDA], num_runs=num_runs, devices=['cuda'])\n    utilization, mm_conv_utilization = compute_utilization(chrome_trace_file_name, total_length)\n    return (utilization, mm_conv_utilization)",
    "docstring": "Benchmark the GPU Utilization and percent of time spent on matmul and convolution operations of running f(input, **kwargs_for_f) with [optimize_ctx] [num_runs] times. It will produce a chrome trace file in trace_folder/trace_file_name.json Example: Args: f: function to benchmark input: input to :attr: trace_folder: name of the folder to store the chrome trace optimize_ctx: the context in which f will run trace_file_name: name of the dumped chrome trace file, default to \"tmp_chrome_trace\" num_runs: number of times to run f, excluding the warm-up runs, default to 1. Return: tuple: (GPU Utilization, percent of time spent on matmul and convolution)",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\benchmark_utils.py",
    "ast_data": "FunctionDef name:benchmark_utilization arg:f arg:input arg:trace_folder arg:optimize_ctx arg:trace_file_name arg:num_runs arguments arg arg arg arg arg arg Assign Call If Call Call If Compare Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "Benchmark",
    "source_code": "class Benchmark(metaclass=_BenchmarkRegistrar):\n\n    @classmethod\n    def is_abstract(cls):\n        return len(cls.mro()) <= 2\n\n    def _get_name(self, overwrite_name=None):\n        stack = tf_inspect.stack()\n        calling_class = None\n        name = None\n        for frame in stack[::-1]:\n            f_locals = frame[0].f_locals\n            f_self = f_locals.get('self', None)\n            if isinstance(f_self, Benchmark):\n                calling_class = f_self\n                name = frame[3]\n                break\n        if calling_class is None:\n            raise ValueError('Unable to determine calling Benchmark class.')\n        name = overwrite_name or name\n        class_name = type(calling_class).__name__\n        name = '%s.%s' % (class_name, name)\n        return name\n\n    def report_benchmark(self, iters=None, cpu_time=None, wall_time=None, throughput=None, extras=None, name=None, metrics=None):\n        name = self._get_name(overwrite_name=name)\n        _global_report_benchmark(name=name, iters=iters, cpu_time=cpu_time, wall_time=wall_time, throughput=throughput, extras=extras, metrics=metrics)",
    "docstring": "Abstract class that provides helper functions for running benchmarks. Any class subclassing this one is immediately registered in the global benchmark registry. Only methods whose names start with the word \"benchmark\" will be run during benchmarking.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\platform\\benchmark.py",
    "ast_data": "ClassDef name:Benchmark FunctionDef name:is_abstract arg:cls arguments arg Return return:yes Compare Call Call FunctionDef name:_get_name arg:self arg:overwrite_name arguments arg arg Assign Call Assign Assign For Assign Assign Call If Call Assign Assign If Compare Raise Call Assign BoolOp Assign Call Assign Return return:yes FunctionDef name:report_benchmark arg:self arg:iters arg:cpu_time arg:wall_time arg:throughput arg:extras arg:name arg:metrics arguments arg arg arg arg arg arg arg arg Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, name: str, input_nodes: list[Buffer], layout: Layout, input_reorder: Optional[list[int]]=None) -> None:\n    super().__init__(name)\n    self.input_nodes = input_nodes\n    self.output_node: Buffer = Buffer(name='buf_out', layout=layout)\n    self.input_reorder = input_reorder\n    self.layout = layout",
    "docstring": "Baseclass for ROCm C++ Templates, derived from KernelTemplate. Not to be instantiated directly. Args: name (str): The name of the ROCmTemplate object. input_nodes (List[IRNode]): A list of input IRNodes. layout (Layout): The layout of the output buffer / tensor. input_reorder (Optional[List[int]]): An optional list that specifies the order of the input nodes.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\rocm\\rocm_template.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:name arg:input_nodes arg:layout arg:input_reorder arguments arg arg arg arg arg Call Call Assign Call Assign Assign"
  },
  {
    "library": "numpy",
    "name": "VisibleDeprecationWarning",
    "source_code": "class VisibleDeprecationWarning(UserWarning):\n    pass",
    "docstring": "Visible deprecation warning. By default, python will not show deprecation warnings, so this class can be used when a very visible warning is helpful, for example because the usage is most likely a user bug.",
    "type": "class",
    "file_path": "numpy\\numpy\\exceptions.py",
    "ast_data": "ClassDef name:VisibleDeprecationWarning"
  },
  {
    "library": "tensorflow",
    "name": "nested_row_splits",
    "source_code": "@property\ndef nested_row_splits(self):\n    rt_nested_splits = [self.row_splits]\n    rt_values = self.values\n    while isinstance(rt_values, RaggedTensor):\n        rt_nested_splits.append(rt_values.row_splits)\n        rt_values = rt_values.values\n    return tuple(rt_nested_splits)",
    "docstring": "A tuple containing the row_splits for all ragged dimensions. is a tuple containing the tensors for all ragged dimensions in , ordered from outermost to innermost. In particular, where: * if is a . * otherwise. Returns: A of 1-D integer s. #### Example: >>> rt = tf.ragged.constant( ... [[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]]) >>> for i, splits in enumerate(rt.nested_row_splits): ... print('Splits for dimension %d: %s' % (i+1, splits.numpy())) Splits for dimension 1: [0 3] Splits for dimension 2: [0 3 3 5] Splits for dimension 3: [0 4 4 7 8 8]",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:nested_row_splits arg:self arguments arg Assign Assign While Call Call Assign Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_maybe_convert_arg",
    "source_code": "def _maybe_convert_arg(arg, xp):\n    if isinstance(arg, np.ndarray | np.generic):\n        return xp.asarray(arg)\n    elif isinstance(arg, list | tuple):\n        return type(arg)((_maybe_convert_arg(x, xp) for x in arg))\n    else:\n        return arg",
    "docstring": "Convert arrays/scalars hiding in the sequence .",
    "type": "function",
    "file_path": "scipy\\scipy\\ndimage\\_support_alternative_backends.py",
    "ast_data": "FunctionDef name:_maybe_convert_arg arg:arg arg:xp arguments arg arg If Call Return return:yes Call If Call Return return:yes Call Call Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "parse_rows",
    "source_code": "def parse_rows(self, response: Response) -> Any:\n    for row in csviter(response, self.delimiter, self.headers, quotechar=self.quotechar):\n        ret = iterate_spider_output(self.parse_row(response, row))\n        yield from self.process_results(response, ret)",
    "docstring": "Receives a response and a dict (representing each row) with a key for each provided (or detected) header of the CSV file. This spider also gives the opportunity to override adapt_response and process_results methods for pre and post-processing purposes.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\spiders\\feed.py",
    "ast_data": "FunctionDef name:parse_rows arg:self arg:response arguments arg arg For Call Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_as_serialized_graph",
    "source_code": "@deprecation.deprecated_args(None, 'Use external_state_policy instead', 'allow_stateful')\ndef _as_serialized_graph(self, allow_stateful=None, strip_device_assignment=None, external_state_policy=options_lib.ExternalStatePolicy.WARN):\n    if external_state_policy:\n        policy = external_state_policy.value\n        return gen_dataset_ops.dataset_to_graph_v2(self._variant_tensor, external_state_policy=policy, strip_device_assignment=strip_device_assignment)\n    if strip_device_assignment:\n        return gen_dataset_ops.dataset_to_graph(self._variant_tensor, allow_stateful=allow_stateful, strip_device_assignment=strip_device_assignment)\n    return gen_dataset_ops.dataset_to_graph(self._variant_tensor, allow_stateful=allow_stateful)",
    "docstring": "Produces serialized graph representation of the dataset. Args: allow_stateful: If true, we allow stateful ops to be present in the graph def. In that case, the state in these ops would be thrown away. strip_device_assignment: If true, non-local (i.e. job and task) device assignment is stripped from ops in the serialized graph. external_state_policy: The ExternalStatePolicy enum that determines how we handle input pipelines that depend on external state. By default, its set to WARN. Returns: A scalar of type, representing this dataset as a serialized graph.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:_as_serialized_graph arg:self arg:allow_stateful arg:strip_device_assignment arg:external_state_policy arguments arg arg arg arg If Assign Return return:yes Call If Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_parse_td",
    "source_code": "def _parse_td(self, obj):\n    raise AbstractMethodError(self)",
    "docstring": "Return the td elements from a row element. Parameters ---------- obj : node-like A DOM node. Returns ------- list of node-like These are the elements of each row, i.e., the columns.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\html.py",
    "ast_data": "FunctionDef name:_parse_td arg:self arg:obj arguments arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "free_buffers",
    "source_code": "def free_buffers(self) -> None:\n    for name in sorted(self.buffer_names_to_free - V.graph.removed_buffers - V.graph.wrapper_code.freed):\n        if name in self.name_to_buf:\n            buf = self.name_to_buf[name]\n            if buf.can_free():\n                V.graph.wrapper_code.codegen_free(buf.node)\n        elif name in V.graph.graph_inputs:\n            inp = V.graph.graph_inputs[name]\n            if isinstance(inp, ir.TorchBindObject):\n                V.graph.wrapper_code.codegen_free(inp)\n            elif isinstance(inp, ir.GeneratorState):\n                continue\n            else:\n                storage = inp.data\n                assert isinstance(storage, ir.StorageBox) and storage.is_input_buffer()\n                V.graph.wrapper_code.codegen_free(storage.data)\n    self.buffer_names_to_free.clear()",
    "docstring": "Free any buffers that are no longer needed",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:free_buffers arg:self arguments arg For Call If Compare Assign If Call Call If Compare Assign If Call Call If Call Assign BoolOp Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "detrend_none",
    "source_code": "def detrend_none(x, axis=None):\n    return x",
    "docstring": "Return *x*: no detrending. Parameters ---------- x : any object An object containing the data axis : int This parameter is ignored. It is included for compatibility with detrend_mean See Also -------- detrend_mean : Another detrend algorithm. detrend_linear : Another detrend algorithm. detrend : A wrapper around all the detrend algorithms.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\mlab.py",
    "ast_data": "FunctionDef name:detrend_none arg:x arg:axis arguments arg arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "step",
    "source_code": "def step(self):\n    if self.status != 'running':\n        raise RuntimeError('Attempt to step on a failed or finished solver.')\n    if self.n == 0 or self.t == self.t_bound:\n        self.t_old = self.t\n        self.t = self.t_bound\n        message = None\n        self.status = 'finished'\n    else:\n        t = self.t\n        success, message = self._step_impl()\n        if not success:\n            self.status = 'failed'\n        else:\n            self.t_old = t\n            if self.direction * (self.t - self.t_bound) >= 0:\n                self.status = 'finished'\n    return message",
    "docstring": "Perform one integration step. Returns ------- message : string or None Report from the solver. Typically a reason for a failure if is 'failed' after the step was taken or None otherwise.",
    "type": "method",
    "file_path": "scipy\\scipy\\integrate\\_ivp\\base.py",
    "ast_data": "FunctionDef name:step arg:self arguments arg If Compare Raise Call If BoolOp Compare Compare Assign Assign Assign Assign Assign Assign Call If Assign Assign If Compare Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "warp_dst_inro_src",
    "source_code": "def warp_dst_inro_src(self, dst_img: Tensor) -> Tensor:\n    _height, _width = dst_img.shape[-2:]\n    warper = self.warper(_height, _width)\n    img_dst_to_src = warper(dst_img, self.model.forward_inverse())\n    return img_dst_to_src",
    "docstring": "Warp src_img with inverted estimated model.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\transform\\image_registrator.py",
    "ast_data": "FunctionDef name:warp_dst_inro_src arg:self arg:dst_img arguments arg arg Assign Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_write_expansion_fields",
    "source_code": "def _write_expansion_fields(self) -> None:\n    self._write(_pad_bytes('', 5))",
    "docstring": "Write 5 zeros for expansion fields",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\stata.py",
    "ast_data": "FunctionDef name:_write_expansion_fields arg:self arguments arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "CastContext",
    "source_code": "class CastContext:\n    pass",
    "docstring": "Contains context info and rules for casting values to a TypeSpec.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\types\\trace.py",
    "ast_data": "ClassDef name:CastContext"
  },
  {
    "library": "django",
    "name": "_store",
    "source_code": "def _store(self, messages, response, *args, **kwargs):\n    raise NotImplementedError('subclasses of BaseStorage must provide a _store() method')",
    "docstring": "Store a list of messages and return a list of any messages which could not be stored. One type of object must be able to be stored, ``. **This method must be implemented by a subclass.**",
    "type": "method",
    "file_path": "django\\django\\contrib\\messages\\storage\\base.py",
    "ast_data": "FunctionDef name:_store arg:self arg:messages arg:response arguments arg arg arg arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_verify_spec_compatible",
    "source_code": "def _verify_spec_compatible(input_name, spec_name, input_, spec):\n    assert isinstance(spec, tensor_spec.TensorSpec)\n    if input is None:\n        raise ValueError('{} cannot be None'.format(input_name))\n    if isinstance(input_, (bool, int, float, str, np.ndarray)):\n        input_ = tensor_conversion.convert_to_tensor_v2(input_)\n    input_dtype = getattr(input_, 'dtype', None)\n    if input_dtype != spec.dtype:\n        input_dtype_str = 'no dtype' if input_dtype is None else str(input_dtype)\n        raise TypeError('{} must have the same dtype as {}. Expected {}, got {}'.format(input_name, spec_name, spec.dtype, input_dtype_str))",
    "docstring": "Verifies that a symbol has a type compatible vith a given spec. Here, compatibility is viewed in the general TensorFlow sense: that the dtypes are the same after implicit conversion, if both are tensors. This verifier ensures consistent treatment of types across AutoGraph. Args: input_name: A name to use for in error messages. spec_name: A name to use for in error messages. input_: Any, value to verify. spec: TypeSpec that must be compatible with. Raises: ValueError if the two types have been determined not to be compatible.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\iterator_autograph.py",
    "ast_data": "FunctionDef name:_verify_spec_compatible arg:input_name arg:spec_name arg:input_ arg:spec arguments arg arg arg arg Call If Compare Raise Call Call If Call Assign Call Assign Call If Compare Assign Compare Call Raise Call Call"
  },
  {
    "library": "django",
    "name": "__len__",
    "source_code": "def __len__(self):\n    return self.layer_count",
    "docstring": "Return the number of layers within the data source.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\datasource.py",
    "ast_data": "FunctionDef name:__len__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "check_generator_arguments",
    "source_code": "def check_generator_arguments(y=None, sample_weight=None, validation_split=None):\n    if y is not None:\n        raise ValueError('`y` argument is not supported when data isa generator or Sequence instance. Instead pass targets as the second element of the generator.')\n    if sample_weight is not None:\n        raise ValueError('`sample_weight` argument is not supported when data isa generator or Sequence instance. Instead pass sample weights as the third element of the generator.')\n    if validation_split:\n        raise ValueError('If your data is in the form of a Python generator, you cannot use `validation_split`.')",
    "docstring": "Validates arguments passed when using a generator.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py",
    "ast_data": "FunctionDef name:check_generator_arguments arg:y arg:sample_weight arg:validation_split arguments arg arg arg If Compare Raise Call If Compare Raise Call If Raise Call"
  },
  {
    "library": "pytorch",
    "name": "benchmark_combo_kernel",
    "source_code": "def benchmark_combo_kernel(self, node_list: Sequence[BaseSchedulerNode]) -> tuple[float, float, list[Optional[str]]]:\n    device = node_list[0].get_device()\n    V.graph.scheduler = self\n    self.current_device = device\n    assert device is not None\n    backend = self.get_backend(device)\n    return backend.benchmark_combo_kernel(node_list)",
    "docstring": "Benchmark fused list of nodes and return the execution time in milliseconds on randomly generated inputs.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:benchmark_combo_kernel arg:self arg:node_list arguments arg arg Assign Call Assign Assign Compare Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "pid",
    "source_code": "@property\ndef pid(self) -> int:\n    return self._pid",
    "docstring": "ID of the process which created this tensor (an integer).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py",
    "ast_data": "FunctionDef name:pid arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "getmaxprint",
    "source_code": "def getmaxprint(self):\n    return self._getmaxprint()",
    "docstring": "Maximum number of elements to display when printed.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_matrix.py",
    "ast_data": "FunctionDef name:getmaxprint arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "divide",
    "source_code": "@tf_export('math.divide', 'divide')\n@dispatch.register_binary_elementwise_api\n@dispatch.add_dispatch_support\ndef divide(x, y, name=None):\n    if name is not None:\n        return DivideDelegateWithName(x, name) / y\n    else:\n        if not tensor_util.is_tf_type(x):\n            dtype = y.dtype.base_dtype if tensor_util.is_tf_type(y) else None\n            x = ops.convert_to_tensor(x, dtype=dtype)\n        return x / y",
    "docstring": "Computes Python style division of by . For example: >>> x = tf.constant([16, 12, 11]) >>> y = tf.constant([4, 6, 2]) >>> tf.divide(x,y) Args: x: A y: A name: A name for the operation (optional). Returns: A with same shape as input",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:divide arg:x arg:y arg:name arguments arg arg arg If Compare Return return:yes Call If Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_get_tensor_summary",
    "source_code": "def _get_tensor_summary(name, display_name, description, tensor, content_type, components, json_config):\n    import torch\n    from tensorboard.plugins.mesh import metadata\n    tensor = torch.as_tensor(tensor)\n    tensor_metadata = metadata.create_summary_metadata(name, display_name, content_type, components, tensor.shape, description, json_config=json_config)\n    tensor = TensorProto(dtype='DT_FLOAT', float_val=tensor.reshape(-1).tolist(), tensor_shape=TensorShapeProto(dim=[TensorShapeProto.Dim(size=tensor.shape[0]), TensorShapeProto.Dim(size=tensor.shape[1]), TensorShapeProto.Dim(size=tensor.shape[2])]))\n    tensor_summary = Summary.Value(tag=metadata.get_instance_name(name, content_type), tensor=tensor, metadata=tensor_metadata)\n    return tensor_summary",
    "docstring": "Create a tensor summary with summary metadata. Args: name: Uniquely identifiable name of the summary op. Could be replaced by combination of name and type to make it unique even outside of this summary. display_name: Will be used as the display name in TensorBoard. Defaults to . description: A longform readable description of the summary data. Markdown is supported. tensor: Tensor to display in summary. content_type: Type of content inside the Tensor. components: Bitmask representing present parts (vertices, colors, etc.) that belong to the summary. json_config: A string, JSON-serialized dictionary of ThreeJS classes configuration. Returns: Tensor summary with metadata.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\tensorboard\\summary.py",
    "ast_data": "FunctionDef name:_get_tensor_summary arg:name arg:display_name arg:description arg:tensor arg:content_type arg:components arg:json_config arguments arg arg arg arg arg arg arg Assign Call Assign Call Assign Call Call Call Call Call Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "open_spider",
    "source_code": "@deferred_f_from_coro_f\nasync def open_spider(self, spider: Spider) -> None:\n    self.slot = Slot(self.crawler.settings.getint('SCRAPER_SLOT_MAX_ACTIVE_SIZE'))\n    await maybe_deferred_to_future(self.itemproc.open_spider(spider))",
    "docstring": "Open the given spider for scraping and allocate resources for it",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\scraper.py",
    "ast_data": "AsyncFunctionDef name:open_spider arg:self arg:spider arguments arg arg Assign Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_lr",
    "source_code": "@override\ndef get_lr(self) -> list[float]:\n    _warn_get_lr_called_within_step(self)\n    if self.last_epoch not in self.milestones:\n        return [group['lr'] for group in self.optimizer.param_groups]\n    return [group['lr'] * self.gamma ** self.milestones[self.last_epoch] for group in self.optimizer.param_groups]",
    "docstring": "Compute the learning rate of each parameter group.",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\lr_scheduler.py",
    "ast_data": "FunctionDef name:get_lr arg:self arguments arg Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "grad_func_name",
    "source_code": "@property\ndef grad_func_name(self):\n    return self._grad_func.name if self._grad_func else None",
    "docstring": "Returns the name of the gradient function.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\function.py",
    "ast_data": "FunctionDef name:grad_func_name arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "from_float",
    "source_code": "@classmethod\ndef from_float(cls, mod, use_precomputed_fake_quant=False):\n    assert type(mod) == cls._FLOAT_MODULE, ' qat.' + cls.__name__ + '.from_float only works for ' + cls._FLOAT_MODULE.__name__\n    assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined'\n    assert mod.qconfig, 'Input float module must have a valid qconfig'\n    weight_qscheme = mod.qconfig.weight().qscheme\n    assert weight_qscheme == torch.per_channel_affine_float_qparams, 'Embedding Bag weights requires a qscheme of torch.per_channel_affine_float_qparams Got ' + str(weight_qscheme)\n    qconfig = mod.qconfig\n    qat_embedding_bag = cls(mod.num_embeddings, mod.embedding_dim, mod.max_norm, mod.norm_type, mod.scale_grad_by_freq, mod.mode, mod.sparse, mod.weight, mod.include_last_offset, mod.padding_idx, qconfig=qconfig)\n    return qat_embedding_bag",
    "docstring": "Create a qat module from a float module Args: a float module, either produced by torch.ao.quantization utilities or directly from user",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\nn\\qat\\modules\\embedding_ops.py",
    "ast_data": "FunctionDef name:from_float arg:cls arg:mod arg:use_precomputed_fake_quant arguments arg arg arg Compare Call Call Assign Call Compare Call Assign Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "Box",
    "source_code": "class Box(Node):\n\n    def __init__(self, width: float, height: float, depth: float) -> None:\n        super().__init__()\n        self.width = width\n        self.height = height\n        self.depth = depth\n\n    def shrink(self) -> None:\n        super().shrink()\n        if self.size < NUM_SIZE_LEVELS:\n            self.width *= SHRINK_FACTOR\n            self.height *= SHRINK_FACTOR\n            self.depth *= SHRINK_FACTOR\n\n    def render(self, output: Output, x1: float, y1: float, x2: float, y2: float) -> None:\n        pass",
    "docstring": "A node with a physical location.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\_mathtext.py",
    "ast_data": "ClassDef name:Box FunctionDef name:__init__ arg:self arg:width arg:height arg:depth arguments arg arg arg arg Call Call Assign Assign Assign FunctionDef name:shrink arg:self arguments arg Call Call If Compare FunctionDef name:render arg:self arg:output arg:x1 arg:y1 arg:x2 arg:y2 arguments arg arg arg arg arg arg"
  },
  {
    "library": "pandas",
    "name": "validate_ordered",
    "source_code": "@staticmethod\ndef validate_ordered(ordered: Ordered) -> None:\n    if not is_bool(ordered):\n        raise TypeError(\"'ordered' must either be 'True' or 'False'\")",
    "docstring": "Validates that we have a valid ordered parameter. If it is not a boolean, a TypeError will be raised. Parameters ---------- ordered : object The parameter to be verified. Raises ------ TypeError If 'ordered' is not a boolean.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py",
    "ast_data": "FunctionDef name:validate_ordered arg:ordered arguments arg If Call Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "FixedLocator",
    "source_code": "class FixedLocator(Locator):\n\n    def __init__(self, locs, nbins=None):\n        self.locs = np.asarray(locs)\n        _api.check_shape((None,), locs=self.locs)\n        self.nbins = max(nbins, 2) if nbins is not None else None\n\n    def set_params(self, nbins=None):\n        if nbins is not None:\n            self.nbins = nbins\n\n    def __call__(self):\n        return self.tick_values(None, None)\n\n    def tick_values(self, vmin, vmax):\n        if self.nbins is None:\n            return self.locs\n        step = max(int(np.ceil(len(self.locs) / self.nbins)), 1)\n        ticks = self.locs[::step]\n        for i in range(1, step):\n            ticks1 = self.locs[i::step]\n            if np.abs(ticks1).min() < np.abs(ticks).min():\n                ticks = ticks1\n        return self.raise_if_exceeds(ticks)",
    "docstring": "Place ticks at a set of fixed values. If *nbins* is None ticks are placed at all values. Otherwise, the *locs* array of possible positions will be subsampled to keep the number of ticks :math:. The subsampling will be done to include the smallest absolute value; for example, if zero is included in the array of possibilities, then it will be included in the chosen ticks.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "ClassDef name:FixedLocator FunctionDef name:__init__ arg:self arg:locs arg:nbins arguments arg arg arg Assign Call Call Assign Compare Call FunctionDef name:set_params arg:self arg:nbins arguments arg arg If Compare Assign FunctionDef name:__call__ arg:self arguments arg Return return:yes Call FunctionDef name:tick_values arg:self arg:vmin arg:vmax arguments arg arg arg If Compare Return return:yes Assign Call Call Call Call Assign For Call Assign If Compare Call Call Call Call Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "VPacker",
    "source_code": "class VPacker(PackerBase):\n\n    def _get_bbox_and_child_offsets(self, renderer):\n        dpicor = renderer.points_to_pixels(1.0)\n        pad = self.pad * dpicor\n        sep = self.sep * dpicor\n        if self.width is not None:\n            for c in self.get_visible_children():\n                if isinstance(c, PackerBase) and c.mode == 'expand':\n                    c.set_width(self.width)\n        bboxes = [c.get_bbox(renderer) for c in self.get_visible_children()]\n        (x0, x1), xoffsets = _get_aligned_offsets([bbox.intervalx for bbox in bboxes], self.width, self.align)\n        height, yoffsets = _get_packed_offsets([bbox.height for bbox in bboxes], self.height, sep, self.mode)\n        yoffsets = height - (yoffsets + [bbox.y1 for bbox in bboxes])\n        ydescent = yoffsets[0]\n        yoffsets = yoffsets - ydescent\n        return (Bbox.from_bounds(x0, -ydescent, x1 - x0, height).padded(pad), [*zip(xoffsets, yoffsets)])",
    "docstring": "VPacker packs its children vertically, automatically adjusting their relative positions at draw time. .. code-block:: none +---------+ | Child 1 | | Child 2 | | Child 3 | +---------+",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py",
    "ast_data": "ClassDef name:VPacker FunctionDef name:_get_bbox_and_child_offsets arg:self arg:renderer arguments arg arg Assign Call Assign Assign If Compare For Call If BoolOp Call Compare Call Assign Call Call Assign Call Assign Call Assign Assign Assign Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "size",
    "source_code": "def size(self, name=None):\n    del name\n    return constant_op.constant(len(self._tensor_array))",
    "docstring": "See TensorArray.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:size arg:self arg:name arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "rank",
    "source_code": "@property\ndef rank(self):\n    if self._dims is not None:\n        return len(self._dims)\n    return None",
    "docstring": "Returns the rank of this shape, or None if it is unspecified.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:rank arg:self arguments arg If Compare Return return:yes Call Return return:no"
  },
  {
    "library": "numpy",
    "name": "zfill",
    "source_code": "@set_module('numpy.strings')\n@array_function_dispatch(_zfill_dispatcher)\ndef zfill(a, width):\n    width = np.asanyarray(width)\n    if not np.issubdtype(width.dtype, np.integer):\n        raise TypeError(f\"unsupported type {width.dtype} for operand 'width'\")\n    a = np.asanyarray(a)\n    if a.dtype.char == 'T':\n        return _zfill(a, width)\n    width = np.maximum(str_len(a), width)\n    shape = np.broadcast_shapes(a.shape, width.shape)\n    out_dtype = f'{a.dtype.char}{width.max()}'\n    out = np.empty_like(a, shape=shape, dtype=out_dtype)\n    return _zfill(a, width, out=out)",
    "docstring": "Return the numeric string left-filled with zeros. A leading sign prefix (`a` dtype, depending on input type See Also -------- str.zfill Examples -------- >>> import numpy as np >>> np.strings.zfill(['1', '-1', '+1'], 3) array(['001', '-01', '+01'], dtype='<U3')",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\strings.py",
    "ast_data": "FunctionDef name:zfill arg:a arg:width arguments arg arg Assign Call If Call Raise Call Assign Call If Compare Return return:yes Call Assign Call Call Assign Call Assign Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "is_lifted_tensor_constant",
    "source_code": "def is_lifted_tensor_constant(program: 'ExportedProgram', node: torch.fx.Node) -> bool:\n    return node.name in program.graph_signature.inputs_to_lifted_tensor_constants",
    "docstring": "Checks if the given node is a lifted tensor constant within the exported program",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\utils.py",
    "ast_data": "FunctionDef name:is_lifted_tensor_constant arg:program arg:node arguments arg arg Return return:yes Compare"
  },
  {
    "library": "authlib",
    "name": "authenticate_endpoint_client",
    "source_code": "def authenticate_endpoint_client(self, request):\n    client = self.server.authenticate_client(request, self.CLIENT_AUTH_METHODS, self.ENDPOINT_NAME)\n    request.client = client\n    return client",
    "docstring": "Authentication client for endpoint with ``.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\token_endpoint.py",
    "ast_data": "FunctionDef name:authenticate_endpoint_client arg:self arg:request arguments arg arg Assign Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "field_widths",
    "source_code": "@property\ndef field_widths(self):\n    return [capi.get_field_width(capi.get_field_defn(self._ldefn, i)) for i in range(self.num_fields)]",
    "docstring": "Return a list of the maximum field widths for the features.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\layer.py",
    "ast_data": "FunctionDef name:field_widths arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "CallgrindModuleType",
    "source_code": "class CallgrindModuleType(Protocol):\n    __file__: str\n    __name__: str\n\n    def _valgrind_supported_platform(self) -> bool:\n        ...\n\n    def _valgrind_toggle(self) -> None:\n        ...",
    "docstring": "Replicates the valgrind endpoints in . These bindings are used to collect Callgrind profiles on earlier versions of PyTorch and will eventually be removed.",
    "type": "class",
    "file_path": "pytorch\\torch\\utils\\benchmark\\utils\\_stubs.py",
    "ast_data": "ClassDef name:CallgrindModuleType FunctionDef name:_valgrind_supported_platform arg:self arguments arg FunctionDef name:_valgrind_toggle arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "merge_with",
    "source_code": "def merge_with(func, *dicts, **kwargs):\n    if len(dicts) == 1 and (not isinstance(dicts[0], Mapping)):\n        dicts = dicts[0]\n    factory = _get_factory(merge_with, kwargs)\n    result = factory()\n    for d in dicts:\n        for k, v in d.items():\n            if k not in result:\n                result[k] = [v]\n            else:\n                result[k].append(v)\n    return valmap(func, result, factory)",
    "docstring": "Merge dictionaries and apply function to combined values A key may occur in more than one dict, and all values mapped from the key will be passed to the function as a list, such as func([val1, val2, ...]). >>> merge_with(sum, {1: 1, 2: 2}, {1: 10, 2: 20}) {1: 11, 2: 22} >>> merge_with(first, {1: 1, 2: 2}, {2: 20, 3: 30}) # doctest: +SKIP {1: 1, 2: 2, 3: 30} See Also: merge",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\unification\\unification_tools.py",
    "ast_data": "FunctionDef name:merge_with arg:func arguments arg arg arg If BoolOp Compare Call Call Assign Assign Call Assign Call For For Call If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "serialize_many_sparse_v2",
    "source_code": "@tf_export('io.serialize_many_sparse', v1=[])\n@dispatch.add_dispatch_support\ndef serialize_many_sparse_v2(sp_input, out_type=dtypes.string, name=None):\n    sp_input = _convert_to_sparse_tensor(sp_input)\n    return gen_sparse_ops.serialize_many_sparse(sp_input.indices, sp_input.values, sp_input.dense_shape, name=name, out_type=out_type)",
    "docstring": "Serialize -minibatch into an . The must have rank greater than 1, and the first dimension is treated as the minibatch dimension. Elements of the must be sorted in increasing order of this first dimension. The serialized objects going into each row of the output will have rank . The minibatch size is extracted from . Args: sp_input: The input rank . out_type: The to use for serialization. name: A name prefix for the returned tensors (optional). Returns: A matrix (2-D ) with rows and columns. Each column represents serialized 's indices, values, and shape (respectively). Raises: TypeError: If is not a .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_ops.py",
    "ast_data": "FunctionDef name:serialize_many_sparse_v2 arg:sp_input arg:out_type arg:name arguments arg arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "nearest_k_p",
    "source_code": "def nearest_k_p(self, k: int, left: bool=True) -> int:\n    p_q, remainder = divmod(k, self.hop)\n    if remainder == 0:\n        return k\n    return p_q * self.hop if left else (p_q + 1) * self.hop",
    "docstring": "Return nearest sample index k_p for which t[k_p] == t[p] holds. The nearest next smaller time sample p (where t[p] is the center position of the window of the p-th slice) is p_k = k // . If is a divisor of then is returned. If is set then p_k * is returned else (p_k+1) * . This method can be used to slice an input signal into chunks for calculating the STFT and iSTFT incrementally. See Also -------- delta_t: Time increment of STFT (`n` samples. ShortTimeFFT: Class this method belongs to.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_short_time_fft.py",
    "ast_data": "FunctionDef name:nearest_k_p arg:self arg:k arg:left arguments arg arg arg Assign Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_register_load_state_dict_pre_hook",
    "source_code": "def _register_load_state_dict_pre_hook(self, hook, with_module=False):\n    handle = RemovableHandle(self._load_state_dict_pre_hooks)\n    self._load_state_dict_pre_hooks[handle.id] = _WrappedHook(hook, self if with_module else None)\n    return handle",
    "docstring": "See :meth: for details. A subtle difference is that if `~torch.nn.Module.register_load_state_dict_pre_hook` as the first argument. Arguments: hook (Callable): Callable hook that will be invoked before loading the state dict. with_module (bool, optional): Whether or not to pass the module instance to the hook as the first parameter.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:_register_load_state_dict_pre_hook arg:self arg:hook arg:with_module arguments arg arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "drop",
    "source_code": "def drop(self, labels: Index | np.ndarray | Iterable[Hashable], errors: IgnoreRaise='raise') -> Index:\n    if not isinstance(labels, Index):\n        arr_dtype = 'object' if self.dtype == 'object' else None\n        labels = com.index_labels_to_array(labels, dtype=arr_dtype)\n    indexer = self.get_indexer_for(labels)\n    mask = indexer == -1\n    if mask.any():\n        if errors != 'ignore':\n            raise KeyError(f'{labels[mask].tolist()} not found in axis')\n        indexer = indexer[~mask]\n    return self.delete(indexer)",
    "docstring": "Make new Index with passed list of labels deleted. Parameters ---------- labels : array-like or scalar Array-like object or a scalar value, representing the labels to be removed from the Index. errors : {'ignore', 'raise'}, default 'raise' If 'ignore', suppress error and existing labels are dropped. Returns ------- Index Will be same type as self, except for RangeIndex. Raises ------ KeyError If not all of the labels are found in the selected axis See Also -------- Index.dropna : Return Index without NA/NaN values. Index.drop_duplicates : Return Index with duplicate values removed. Examples -------- >>> idx = pd.Index([\"a\", \"b\", \"c\"]) >>> idx.drop([\"a\"]) Index(['b', 'c'], dtype='object')",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:drop arg:self arg:labels arg:errors arguments arg arg arg If Call Assign Compare Assign Call Assign Call Assign Compare If Call If Compare Raise Call Call Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_check_m2m_through_same_relationship",
    "source_code": "@classmethod\ndef _check_m2m_through_same_relationship(cls):\n    errors = []\n    seen_intermediary_signatures = []\n    fields = cls._meta.local_many_to_many\n    fields = (f for f in fields if isinstance(f.remote_field.model, ModelBase))\n    fields = (f for f in fields if isinstance(f.remote_field.through, ModelBase))\n    for f in fields:\n        signature = (f.remote_field.model, cls, f.remote_field.through, f.remote_field.through_fields)\n        if signature in seen_intermediary_signatures:\n            errors.append(checks.Error(\"The model has two identical many-to-many relations through the intermediate model '%s'.\" % f.remote_field.through._meta.label, obj=cls, id='models.E003'))\n        else:\n            seen_intermediary_signatures.append(signature)\n    return errors",
    "docstring": "Check if no relationship model is used by more than one m2m field.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\base.py",
    "ast_data": "FunctionDef name:_check_m2m_through_same_relationship arg:cls arguments arg Assign Assign Assign Assign Call Assign Call For Assign If Compare Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_graph_module_flat_inputs",
    "source_code": "def _graph_module_flat_inputs(self, args: Any, kwargs: Any) -> Any:\n    in_spec = self.call_spec.in_spec\n    flat_args, received_spec = self._get_flat_args_with_check(args, kwargs)\n    if in_spec is not None and (not is_equivalent(received_spec, in_spec, _fx_collection_equivalence_fn)):\n        raise ValueError(f'Trying to flatten user inputs with exported input tree spec: \\n{in_spec}\\nbut actually got inputs with tree spec of: \\n{received_spec}')\n    additional_inputs = []\n    for input_ in self.graph_signature.input_specs:\n        if input_.kind == InputKind.USER_INPUT:\n            continue\n        elif input_.kind in (InputKind.PARAMETER, InputKind.BUFFER):\n            if input_.persistent is False:\n                additional_inputs.append(self.constants[input_.target])\n            else:\n                additional_inputs.append(self.state_dict[input_.target])\n        elif input_.kind in (InputKind.CONSTANT_TENSOR, InputKind.CUSTOM_OBJ):\n            additional_inputs.append(self.constants[input_.target])\n    additional_inputs = tuple(additional_inputs)\n    return additional_inputs + flat_args",
    "docstring": "Transform args, kwargs of __call__ to args for graph_module. self.graph_module takes stuff from state dict as inputs. The invariant is for ep: ExportedProgram is ep(args, kwargs) == ep.postprocess(ep.graph_module(ep.graph_module_flat_inputs(args, kwargs)))",
    "type": "method",
    "file_path": "pytorch\\torch\\export\\exported_program.py",
    "ast_data": "FunctionDef name:_graph_module_flat_inputs arg:self arg:args arg:kwargs arguments arg arg arg Assign Assign Call If BoolOp Compare Call Raise Call Assign For If Compare If Compare If Compare Call Call If Compare Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_values",
    "source_code": "@property\ndef _values(self):\n    raise NotImplementedError('Abstract method')",
    "docstring": "An iterable/sequence which may contain trackable objects.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\data_structures.py",
    "ast_data": "FunctionDef name:_values arg:self arguments arg Raise Call"
  },
  {
    "library": "numpy",
    "name": "feature_sorted",
    "source_code": "def feature_sorted(self, names, reverse=False):\n\n    def sort_cb(k):\n        if isinstance(k, str):\n            return self.feature_supported[k]['interest']\n        rank = max([self.feature_supported[f]['interest'] for f in k])\n        rank += len(k) - 1\n        return rank\n    return sorted(names, reverse=reverse, key=sort_cb)",
    "docstring": "Sort a list of CPU features ordered by the lowest interest. Parameters ---------- 'names': sequence sequence of supported feature names in uppercase. 'reverse': bool, optional If true, the sorted features is reversed. (highest interest) Returns ------- list, sorted CPU features",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py",
    "ast_data": "FunctionDef name:feature_sorted arg:self arg:names arg:reverse arguments arg arg arg FunctionDef name:sort_cb arg:k arguments arg If Call Return return:yes Assign Call Call Return return:yes Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_upsample",
    "source_code": "def _upsample(self, method, limit: int | None=None, fill_value=None):\n    if isinstance(self.ax, DatetimeIndex):\n        return super()._upsample(method, limit=limit, fill_value=fill_value)\n    ax = self.ax\n    obj = self.obj\n    new_index = self.binner\n    memb = ax.asfreq(self.freq, how=self.convention)\n    if method == 'asfreq':\n        method = None\n    indexer = memb.get_indexer(new_index, method=method, limit=limit)\n    new_obj = _take_new_index(obj, indexer, new_index)\n    return self._wrap_result(new_obj)",
    "docstring": "Parameters ---------- method : {'backfill', 'bfill', 'pad', 'ffill'} Method for upsampling. limit : int, default None Maximum size gap to fill when reindexing. fill_value : scalar, default None Value to use for missing values.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\resample.py",
    "ast_data": "FunctionDef name:_upsample arg:self arg:method arg:limit arg:fill_value arguments arg arg arg arg If Call Return return:yes Call Call Assign Assign Assign Assign Call If Compare Assign Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_params_dunnett",
    "source_code": "def _params_dunnett(samples: list[np.ndarray], control: np.ndarray) -> tuple[np.ndarray, int, int, np.ndarray, int]:\n    n_samples = np.array([sample.size for sample in samples])\n    n_sample = n_samples.sum()\n    n_control = control.size\n    n = n_sample + n_control\n    n_groups = len(samples)\n    df = n - n_groups - 1\n    rho = n_control / n_samples + 1\n    rho = 1 / np.sqrt(rho[:, None] * rho[None, :])\n    np.fill_diagonal(rho, 1)\n    return (rho, df, n_groups, n_samples, n_control)",
    "docstring": "Specific parameters for Dunnett's test. Degree of freedom is the number of observations minus the number of groups including the control.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_multicomp.py",
    "ast_data": "FunctionDef name:_params_dunnett arg:samples arg:control arguments arg arg Assign Call Assign Call Assign Assign Assign Call Assign Assign Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "ssf",
    "source_code": "def ssf(self):\n    ssfs = [n.ssf() if isinstance(n, QN) else n for n in self.qn]\n    ssf_string = ''\n    for i in range(0, len(self.qn) - 1):\n        if self.has_subscript():\n            delimiter = '_sub_'\n        else:\n            delimiter = '_'\n        ssf_string += ssfs[i] + delimiter\n    return ssf_string + ssfs[-1]",
    "docstring": "Simple symbol form.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\qual_names.py",
    "ast_data": "FunctionDef name:ssf arg:self arguments arg Assign Call Call Assign For Call Call If Call Assign Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "construct_change_message",
    "source_code": "def construct_change_message(form, formsets, add):\n    changed_data = form.changed_data\n    with translation_override(None):\n        changed_field_labels = _get_changed_field_labels_from_form(form, changed_data)\n    change_message = []\n    if add:\n        change_message.append({'added': {}})\n    elif form.changed_data:\n        change_message.append({'changed': {'fields': changed_field_labels}})\n    if formsets:\n        with translation_override(None):\n            for formset in formsets:\n                for added_object in formset.new_objects:\n                    change_message.append({'added': {'name': str(added_object._meta.verbose_name), 'object': str(added_object)}})\n                for changed_object, changed_fields in formset.changed_objects:\n                    change_message.append({'changed': {'name': str(changed_object._meta.verbose_name), 'object': str(changed_object), 'fields': _get_changed_field_labels_from_form(formset.forms[0], changed_fields)}})\n                for deleted_object in formset.deleted_objects:\n                    change_message.append({'deleted': {'name': str(deleted_object._meta.verbose_name), 'object': str(deleted_object)}})\n    return change_message",
    "docstring": "Construct a JSON structure describing changes from a changed object. Translations are deactivated so that strings are stored untranslated. Translation happens later on LogEntry access.",
    "type": "function",
    "file_path": "django\\django\\contrib\\admin\\utils.py",
    "ast_data": "FunctionDef name:construct_change_message arg:form arg:formsets arg:add arguments arg arg arg Assign With Call Assign Call Assign If Call If Call If With Call For For Call Call Call For Call Call Call Call For Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_majorticklabels",
    "source_code": "def get_majorticklabels(self):\n    self._update_ticks()\n    ticks = self.get_major_ticks()\n    labels1 = [tick.label1 for tick in ticks if tick.label1.get_visible()]\n    labels2 = [tick.label2 for tick in ticks if tick.label2.get_visible()]\n    return labels1 + labels2",
    "docstring": "Return this Axis' major tick labels, as a list of .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:get_majorticklabels arg:self arguments arg Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "maybe_upcast_numeric_to_64bit",
    "source_code": "def maybe_upcast_numeric_to_64bit(arr: NumpyIndexT) -> NumpyIndexT:\n    dtype = arr.dtype\n    if dtype.kind == 'i' and dtype != np.int64:\n        return arr.astype(np.int64)\n    elif dtype.kind == 'u' and dtype != np.uint64:\n        return arr.astype(np.uint64)\n    elif dtype.kind == 'f' and dtype != np.float64:\n        return arr.astype(np.float64)\n    else:\n        return arr",
    "docstring": "If array is a int/uint/float bit size lower than 64 bit, upcast it to 64 bit. Parameters ---------- arr : ndarray or ExtensionArray Returns ------- ndarray or ExtensionArray",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\cast.py",
    "ast_data": "FunctionDef name:maybe_upcast_numeric_to_64bit arg:arr arguments arg Assign If BoolOp Compare Compare Return return:yes Call If BoolOp Compare Compare Return return:yes Call If BoolOp Compare Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "set_verbosity",
    "source_code": "@tf_export('autograph.set_verbosity')\ndef set_verbosity(level, alsologtostdout=False):\n    global verbosity_level\n    global echo_log_to_stdout\n    verbosity_level = level\n    echo_log_to_stdout = alsologtostdout",
    "docstring": "Sets the AutoGraph verbosity level. _Debug logging in AutoGraph_ More verbose logging is useful to enable when filing bug reports or doing more in-depth debugging. There are two means to control the logging verbosity: * The function * The environment variable takes precedence over the environment variable. For example: Logs entries are output to [absl]( [default output]( with level. Logs can be mirrored to stdout by using the argument. Mirroring is enabled by default when Python runs in interactive mode. Args: level: int, the verbosity level; larger values specify increased verbosity; 0 means no logging. When reporting bugs, it is recommended to set this value to a larger number, like 10. alsologtostdout: bool, whether to also output log messages to .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\utils\\ag_logging.py",
    "ast_data": "FunctionDef name:set_verbosity arg:level arg:alsologtostdout arguments arg arg Assign Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "_wait_cursor_for_draw_cm",
    "source_code": "@contextmanager\ndef _wait_cursor_for_draw_cm(self):\n    self._draw_time, last_draw_time = (time.time(), getattr(self, '_draw_time', -np.inf))\n    if self._draw_time - last_draw_time > 1:\n        try:\n            self.canvas.set_cursor(tools.Cursors.WAIT)\n            yield\n        finally:\n            self.canvas.set_cursor(self._last_cursor)\n    else:\n        yield",
    "docstring": "Set the cursor to a wait cursor when drawing the canvas. In order to avoid constantly changing the cursor when the canvas changes frequently, do nothing if this context was triggered during the last second. (Optimally we'd prefer only setting the wait cursor if the *current* draw takes too long, but the current draw blocks the GUI thread).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:_wait_cursor_for_draw_cm arg:self arguments arg Assign Call Call If Compare Try Call Call"
  },
  {
    "library": "numpy",
    "name": "parse_structure",
    "source_code": "def parse_structure(astr, level):\n    if level == 0:\n        loopbeg = '/**begin repeat'\n        loopend = '/**end repeat**/'\n    else:\n        loopbeg = '/**begin repeat%d' % level\n        loopend = '/**end repeat%d**/' % level\n    ind = 0\n    line = 0\n    spanlist = []\n    while True:\n        start = astr.find(loopbeg, ind)\n        if start == -1:\n            break\n        start2 = astr.find('*/', start)\n        start2 = astr.find('\\n', start2)\n        fini1 = astr.find(loopend, start2)\n        fini2 = astr.find('\\n', fini1)\n        line += astr.count('\\n', ind, start2 + 1)\n        spanlist.append((start, start2 + 1, fini1, fini2 + 1, line))\n        line += astr.count('\\n', start2 + 1, fini2)\n        ind = fini2\n    spanlist.sort()\n    return spanlist",
    "docstring": "The returned line number is from the beginning of the string, starting at zero. Returns an empty list if no loops found.",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\conv_template.py",
    "ast_data": "FunctionDef name:parse_structure arg:astr arg:level arguments arg arg If Compare Assign Assign Assign Assign Assign Assign Assign While Assign Call If Compare Assign Call Assign Call Assign Call Assign Call Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_calculate_mem_consumed",
    "source_code": "def _calculate_mem_consumed(self) -> int:\n    mem = self.size * self.element_size\n    if self.device.type == 'cuda':\n        return math.ceil(mem / _PYTORCH_MIN_ALLOCATE) * _PYTORCH_MIN_ALLOCATE\n    return mem",
    "docstring": "Calculates the memory consumed by the tensor storage, considering device-specific allocation rules. Returns: int: The memory consumed in bytes.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_tools\\mem_tracker.py",
    "ast_data": "FunctionDef name:_calculate_mem_consumed arg:self arguments arg Assign If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, statistics: calib_stats_pb2.CalibrationStatistics, calib_opts: stablehlo_quant_config_pb2.CalibrationOptions):\n    super().__init__(statistics, calib_opts)\n    hist_stats = statistics.histogram_statistics\n    self._bin_width = hist_stats.bin_width\n    self._lower_bound = hist_stats.lower_bound\n    self._hist_freq = np.array(hist_stats.hist_freq)\n    self._num_bins = len(self._hist_freq)\n    self._num_bits = 8\n    first_mid = self._lower_bound + self._bin_width / 2\n    last_mid = first_mid + (self._num_bins - 1) * self._bin_width\n    self._hist_mids = np.linspace(first_mid, last_mid, self._num_bins)",
    "docstring": "Builds histogram using statistics.histogram_statistics. lower_bound hist_mid v v |=========|=========|=========|=========|=========| bin width Args: statistics: Collected calibration statistics. calib_opts: Calibration options used for calculating min and max.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\calibrator\\calibration_algorithm.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:statistics arg:calib_opts arguments arg arg arg Call Call Assign Assign Assign Assign Call Assign Call Assign Assign Assign Assign Call"
  },
  {
    "library": "kornia",
    "name": "build_laplacian_pyramid",
    "source_code": "def build_laplacian_pyramid(input: Tensor, max_level: int, border_type: str='reflect', align_corners: bool=False) -> list[Tensor]:\n    KORNIA_CHECK_SHAPE(input, ['B', 'C', 'H', 'W'])\n    KORNIA_CHECK(isinstance(max_level, int) or max_level < 0, f'Invalid max_level, it must be a positive integer. Got: {max_level}')\n    h = input.size()[2]\n    w = input.size()[3]\n    require_padding = not (is_powerof_two(w) or is_powerof_two(h))\n    if require_padding:\n        padding = (0, find_next_powerof_two(w) - w, 0, find_next_powerof_two(h) - h)\n        input = pad(input, padding, 'reflect')\n    gaussian_pyramid: list[Tensor] = build_pyramid(input, max_level, border_type, align_corners)\n    laplacian_pyramid: list[Tensor] = []\n    for i in range(max_level - 1):\n        img_expand: Tensor = pyrup(gaussian_pyramid[i + 1], border_type, align_corners)\n        laplacian: Tensor = gaussian_pyramid[i] - img_expand\n        laplacian_pyramid.append(laplacian)\n    laplacian_pyramid.append(gaussian_pyramid[-1])\n    return laplacian_pyramid",
    "docstring": "Construct the Laplacian pyramid for a tensor image. The function constructs a vector of images and builds the Laplacian pyramid by recursively computing the difference after applying pyrUp to the adjacent layer in its Gaussian pyramid. See :cite: for more details. Args: input : the tensor to be used to construct the pyramid with shape :math:. max_level: 0-based index of the last (the smallest) pyramid layer. It must be non-negative. border_type: the padding mode to be applied before convolving. The expected modes are: `[(B, C, H, W), (B, C, H/2, W/2), ...]`",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\transform\\pyramid.py",
    "ast_data": "FunctionDef name:build_laplacian_pyramid arg:input arg:max_level arg:border_type arg:align_corners arguments arg arg arg arg Call Call BoolOp Call Compare Assign Call Assign Call Assign BoolOp Call Call If Assign Call Call Assign Call Call For Call Call Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_get_gamma",
    "source_code": "def _get_gamma(virtual_indexes, previous_indexes, method):\n    gamma = np.asanyarray(virtual_indexes - previous_indexes)\n    gamma = method['fix_gamma'](gamma, virtual_indexes)\n    return np.asanyarray(gamma, dtype=virtual_indexes.dtype)",
    "docstring": "Compute gamma (a.k.a 'm' or 'weight') for the linear interpolation of quantiles. virtual_indexes : array_like The indexes where the percentile is supposed to be found in the sorted sample. previous_indexes : array_like The floor values of virtual_indexes. interpolation : dict The interpolation method chosen, which may have a specific rule modifying gamma. gamma is usually the fractional part of virtual_indexes but can be modified by the interpolation method.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_function_base_impl.py",
    "ast_data": "FunctionDef name:_get_gamma arg:virtual_indexes arg:previous_indexes arg:method arguments arg arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_height",
    "source_code": "def get_height(self):\n    return self._height",
    "docstring": "Return the height of the rectangle.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_height arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, metagraph, ignore_colocation=True, ignore_user_placement=False):\n    self._metagraph = metagraph\n    self._item_graph = meta_graph_pb2.MetaGraphDef()\n    self._item_graph.CopyFrom(metagraph)\n    self._ignore_colocation = ignore_colocation\n    self._ignore_user_placement = ignore_user_placement\n    self._tf_item = None\n    self._BuildTFItem()",
    "docstring": "Creates an Item. Args: metagraph: a TensorFlow metagraph. ignore_colocation: if set, the tool will ignore all the colocation constraints generated by TensorFlow. ignore_user_placement: if set, all the placement annotations annotated in the metagraph will be ignored. Raises: ValueError: the metagraph is incomplete or invalid.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\grappler\\item.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:metagraph arg:ignore_colocation arg:ignore_user_placement arguments arg arg arg arg Assign Assign Call Call Assign Assign Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "LogFormatterExponent",
    "source_code": "class LogFormatterExponent(LogFormatter):\n\n    def _num_to_string(self, x, vmin, vmax):\n        fx = math.log(x) / math.log(self._base)\n        if 1 <= abs(fx) <= 10000:\n            fd = math.log(vmax - vmin) / math.log(self._base)\n            s = self._pprint_val(fx, fd)\n        else:\n            s = f'{fx:1.0g}'\n        return s",
    "docstring": "Format values for log axis using ``.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "ClassDef name:LogFormatterExponent FunctionDef name:_num_to_string arg:self arg:x arg:vmin arg:vmax arguments arg arg arg arg Assign Call Call If Compare Call Assign Call Call Assign Call Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X):\n    check_is_fitted(self)\n    X = validate_data(self, X, accept_sparse='csr', reset=False)\n    kernel_params = self._get_kernel_params()\n    embedded = pairwise_kernels(X, self.components_, metric=self.kernel, filter_params=True, n_jobs=self.n_jobs, **kernel_params)\n    return np.dot(embedded, self.normalization_.T)",
    "docstring": "Apply feature map to X. Computes an approximate feature map using the kernel between some training points and X. Parameters ---------- X : array-like of shape (n_samples, n_features) Data to transform. Returns ------- X_transformed : ndarray of shape (n_samples, n_components) Transformed data.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\kernel_approximation.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "matrix_transpose",
    "source_code": "@array_function_dispatch(_matrix_transpose_dispatcher)\ndef matrix_transpose(x, /):\n    x = asanyarray(x)\n    if x.ndim < 2:\n        raise ValueError(f'Input array must be at least 2-dimensional, but it is {x.ndim}')\n    return swapaxes(x, -1, -2)",
    "docstring": "Transposes a matrix (or a stack of matrices) `` matrices. Returns ------- out : ndarray An array containing the transpose for each matrix and having shape (..., N, M). See Also -------- transpose : Generic transpose method. Examples -------- >>> import numpy as np >>> np.matrix_transpose([[1, 2], [3, 4]]) array([[1, 3], [2, 4]]) >>> np.matrix_transpose([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) array([[[1, 3], [2, 4]], [[5, 7], [6, 8]]])",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\fromnumeric.py",
    "ast_data": "FunctionDef name:matrix_transpose arguments arg Assign Call If Compare Raise Call Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "write_array",
    "source_code": "@set_module('numpy.lib.format')\ndef write_array(fp, array, version=None, allow_pickle=True, pickle_kwargs=None):\n    _check_version(version)\n    _write_array_header(fp, header_data_from_array_1_0(array), version)\n    if array.itemsize == 0:\n        buffersize = 0\n    else:\n        buffersize = max(16 * 1024 ** 2 // array.itemsize, 1)\n    dtype_class = type(array.dtype)\n    if array.dtype.hasobject or not dtype_class._legacy:\n        if not allow_pickle:\n            if array.dtype.hasobject:\n                raise ValueError('Object arrays cannot be saved when allow_pickle=False')\n            if not dtype_class._legacy:\n                raise ValueError('User-defined dtypes cannot be saved when allow_pickle=False')\n        if pickle_kwargs is None:\n            pickle_kwargs = {}\n        pickle.dump(array, fp, protocol=4, **pickle_kwargs)\n    elif array.flags.f_contiguous and (not array.flags.c_contiguous):\n        if isfileobj(fp):\n            array.T.tofile(fp)\n        else:\n            for chunk in numpy.nditer(array, flags=['external_loop', 'buffered', 'zerosize_ok'], buffersize=buffersize, order='F'):\n                fp.write(chunk.tobytes('C'))\n    elif isfileobj(fp):\n        array.tofile(fp)\n    else:\n        for chunk in numpy.nditer(array, flags=['external_loop', 'buffered', 'zerosize_ok'], buffersize=buffersize, order='C'):\n            fp.write(chunk.tobytes('C'))",
    "docstring": "Write an array to an NPY file, including a header. If the array is neither C-contiguous nor Fortran-contiguous AND the file_like object is not a real file object, this function will have to copy data in memory. Parameters ---------- fp : file_like object An open, writable file object, or similar object with a `` method. array : ndarray The array to write to disk. version : (int, int) or None, optional The version number of the format. None means use the oldest supported version that is able to store the data. Default: None allow_pickle : bool, optional Whether to allow writing pickled data. Default: True pickle_kwargs : dict, optional Additional keyword arguments to pass to pickle.dump, excluding 'protocol'. These are only useful when pickling objects in object arrays to Python 2 compatible format. Raises ------ ValueError If the array cannot be persisted. This includes the case of allow_pickle=False and array being an object array. Various other errors If the array contains Python objects as part of its dtype, the process of pickling them may raise various errors if the objects are not picklable.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_format_impl.py",
    "ast_data": "FunctionDef name:write_array arg:fp arg:array arg:version arg:allow_pickle arg:pickle_kwargs arguments arg arg arg arg arg Call Call Call If Compare Assign Assign Call Assign Call If BoolOp If If Raise Call If Raise Call If Compare Assign Call If BoolOp If Call Call For Call Call Call If Call Call For Call Call Call Call"
  },
  {
    "library": "kornia",
    "name": "perform_keep_shape_image",
    "source_code": "def perform_keep_shape_image(f: Callable[..., Tensor]) -> Callable[..., Tensor]:\n\n    @wraps(f)\n    def _wrapper(input: Tensor, *args: Any, **kwargs: Any) -> Tensor:\n        if not isinstance(input, Tensor):\n            raise TypeError(f'Input input type is not a Tensor. Got {type(input)}')\n        if input.shape.numel() == 0:\n            raise ValueError('Invalid input tensor, it is empty.')\n        input_shape = input.shape\n        input = _to_bchw(input)\n        output = f(input, *args, **kwargs)\n        if len(input_shape) == 3:\n            output = output[0]\n        if len(input_shape) == 2:\n            output = output[0, 0]\n        if len(input_shape) > 4:\n            output = output.view(*input_shape[:-3] + output.shape[-3:])\n        return output\n    return _wrapper",
    "docstring": "Apply to an image of arbitrary leading dimensions . It works by first viewing the image as , applying the function and re-viewing the image as original shape.",
    "type": "function",
    "file_path": "kornia\\kornia\\utils\\image.py",
    "ast_data": "FunctionDef name:perform_keep_shape_image arg:f arguments arg FunctionDef name:_wrapper arg:input arguments arg arg arg If Call Raise Call Call If Compare Call Raise Call Assign Assign Call Assign Call If Compare Call Assign If Compare Call Assign If Compare Call Assign Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ChainDataset",
    "source_code": "class ChainDataset(IterableDataset):\n\n    def __init__(self, datasets: Iterable[Dataset]) -> None:\n        super().__init__()\n        self.datasets = datasets\n\n    def __iter__(self):\n        for d in self.datasets:\n            assert isinstance(d, IterableDataset), 'ChainDataset only supports IterableDataset'\n            yield from d\n\n    def __len__(self):\n        total = 0\n        for d in self.datasets:\n            assert isinstance(d, IterableDataset), 'ChainDataset only supports IterableDataset'\n            total += len(d)\n        return total",
    "docstring": "Dataset for chaining multiple :class: s. This class is useful to assemble different existing dataset streams. The chaining operation is done on-the-fly, so concatenating large-scale datasets with this class will be efficient. Args: datasets (iterable of IterableDataset): datasets to be chained together",
    "type": "class",
    "file_path": "pytorch\\torch\\utils\\data\\dataset.py",
    "ast_data": "ClassDef name:ChainDataset FunctionDef name:__init__ arg:self arg:datasets arguments arg arg Call Call Assign FunctionDef name:__iter__ arg:self arguments arg For Call FunctionDef name:__len__ arg:self arguments arg Assign For Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "list_outputs",
    "source_code": "def list_outputs(self, args, screen_info=None):\n    _ = screen_info\n    parsed = self._arg_parsers['list_outputs'].parse_args(args)\n    output = self._list_inputs_or_outputs(parsed.recursive, parsed.node_name, parsed.depth, parsed.control, parsed.op_type, do_outputs=True)\n    node_name = debug_graphs.get_node_name(parsed.node_name)\n    _add_main_menu(output, node_name=node_name, enable_list_outputs=False)\n    return output",
    "docstring": "Command handler for inputs. Show inputs to a given node. Args: args: Command-line arguments, excluding the command prefix, as a list of str. screen_info: Optional dict input containing screen information such as cols. Returns: Output text lines as a RichTextLines object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\analyzer_cli.py",
    "ast_data": "FunctionDef name:list_outputs arg:self arg:args arg:screen_info arguments arg arg arg Assign Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "register",
    "source_code": "def register(self, dtype: type_t[ExtensionDtype]) -> None:\n    if not issubclass(dtype, ExtensionDtype):\n        raise ValueError('can only register pandas extension dtypes')\n    self.dtypes.append(dtype)",
    "docstring": "Parameters ---------- dtype : ExtensionDtype class",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\base.py",
    "ast_data": "FunctionDef name:register arg:self arg:dtype arguments arg arg If Call Raise Call Call"
  },
  {
    "library": "pandas",
    "name": "process_sampling_size",
    "source_code": "def process_sampling_size(n: int | None, frac: float | None, replace: bool) -> int | None:\n    if n is None and frac is None:\n        n = 1\n    elif n is not None and frac is not None:\n        raise ValueError('Please enter a value for `frac` OR `n`, not both')\n    elif n is not None:\n        if n < 0:\n            raise ValueError('A negative number of rows requested. Please provide `n` >= 0.')\n        if n % 1 != 0:\n            raise ValueError('Only integers accepted as `n` values')\n    else:\n        assert frac is not None\n        if frac > 1 and (not replace):\n            raise ValueError('Replace has to be set to `True` when upsampling the population `frac` > 1.')\n        if frac < 0:\n            raise ValueError('A negative number of rows requested. Please provide `frac` >= 0.')\n    return n",
    "docstring": "Process and validate the and arguments to and . Returns None if should be used (variable sampling sizes), otherwise returns the constant sampling size.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\sample.py",
    "ast_data": "FunctionDef name:process_sampling_size arg:n arg:frac arg:replace arguments arg arg arg If BoolOp Compare Compare Assign If BoolOp Compare Compare Raise Call If Compare If Compare Raise Call If Compare Raise Call Compare If BoolOp Compare Raise Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "restore",
    "source_code": "def restore(self, save_path, options=None):\n    self._checkpoint_options = copy.copy(options) if options else self._checkpoint_options\n    if self._checkpoint_options:\n        self._checkpoint_options.experimental_enable_async_checkpoint = False\n    self._queue.join()\n    status = self.checkpointer().restore(save_path, self._checkpoint_options)\n    return status",
    "docstring": "Restore the checkpointed variables. Args: save_path: The full name of the checkpoint file to be restored. options: CheckpointOption instance. Returns: A load status object, which can be used to make assertions about the status of a checkpoint restoration. See tf.train.Checkpoint.restore() for more details.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\async_checkpoint_helper.py",
    "ast_data": "FunctionDef name:restore arg:self arg:save_path arg:options arguments arg arg arg Assign Call If Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_tf_tensor_list_set_item",
    "source_code": "def _tf_tensor_list_set_item(target, i, x):\n    return list_ops.tensor_list_set_item(target, i, x)",
    "docstring": "Overload of set_item that stages a Tensor list update.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\slices.py",
    "ast_data": "FunctionDef name:_tf_tensor_list_set_item arg:target arg:i arg:x arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_help_handler",
    "source_code": "def _help_handler(self, args, screen_info=None):\n    _ = screen_info\n    if not args:\n        return self.get_help()\n    elif len(args) == 1:\n        return self.get_help(args[0])\n    else:\n        return RichTextLines(['ERROR: help takes only 0 or 1 input argument.'])",
    "docstring": "Command handler for \"help\". \"help\" is a common command that merits built-in support from this class. Args: args: Command line arguments to \"help\" (not including \"help\" itself). screen_info: (dict) Information regarding the screen, e.g., the screen width in characters: {\"cols\": 80} Returns: (RichTextLines) Screen text output.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py",
    "ast_data": "FunctionDef name:_help_handler arg:self arg:args arg:screen_info arguments arg arg arg Assign If Return return:yes Call If Compare Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "last",
    "source_code": "def last(self):\n    if self.ordered:\n        queryset = self.reverse()\n    else:\n        self._check_ordering_first_last_queryset_aggregation(method='last')\n        queryset = self.order_by('-pk')\n    for obj in queryset[:1]:\n        return obj",
    "docstring": "Return the last object of a query or None if no match is found.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:last arg:self arguments arg If Assign Call Call Assign Call For Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "ExceptionTrapper",
    "source_code": "class ExceptionTrapper(object):\n\n    def __init__(self, nextapp, throws=(KeyboardInterrupt, SystemExit)):\n        self.nextapp = nextapp\n        self.throws = throws\n\n    def __call__(self, environ, start_response):\n        return _TrappedResponse(self.nextapp, environ, start_response, self.throws)",
    "docstring": "WSGI middleware that traps exceptions.",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\_cpwsgi.py",
    "ast_data": "ClassDef name:ExceptionTrapper FunctionDef name:__init__ arg:self arg:nextapp arg:throws arguments arg arg arg Assign Assign FunctionDef name:__call__ arg:self arg:environ arg:start_response arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "Wrapper",
    "source_code": "@function.Defun(*_GetInputDtypes(func), func_name='%s_Wrapper' % func.name)\ndef Wrapper(*args):\n    result = func(*args)\n    extra_args = tuple(function.get_extra_args())\n    if isinstance(result, ops.Operation):\n        return extra_args\n    elif not isinstance(result, (list, tuple)):\n        return (result,) + extra_args\n    else:\n        return result + type(result)(extra_args)",
    "docstring": "A wrapper that handles loop-carried captured inputs.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\functional_ops.py",
    "ast_data": "FunctionDef name:Wrapper arguments arg Assign Call Assign Call Call If Call Return return:yes If Call Return return:yes Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_convert_labels",
    "source_code": "def _maybe_convert_labels(y_true):\n    are_zeros = math_ops.equal(y_true, 0)\n    are_ones = math_ops.equal(y_true, 1)\n    is_binary = math_ops.reduce_all(math_ops.logical_or(are_zeros, are_ones))\n\n    def _convert_binary_labels():\n        return 2.0 * y_true - 1.0\n    updated_y_true = smart_cond.smart_cond(is_binary, _convert_binary_labels, lambda: y_true)\n    return updated_y_true",
    "docstring": "Converts binary labels into -1/1.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py",
    "ast_data": "FunctionDef name:_maybe_convert_labels arg:y_true arguments arg Assign Call Assign Call Assign Call Call FunctionDef name:_convert_binary_labels arguments Return return:yes Assign Call arguments Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "op",
    "source_code": "def op(self):\n    return cond.cond(math_ops.equal(self._num_remaining, 0), lambda: check_ops.assert_equal(self._cluster_centers_initialized, True), self._initialize)",
    "docstring": "Returns the cluster initializer op.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\clustering_ops.py",
    "ast_data": "FunctionDef name:op arg:self arguments arg Return return:yes Call Call arguments Call"
  },
  {
    "library": "pytorch",
    "name": "replace_offset",
    "source_code": "def replace_offset(self, expr: sympy.Expr, replacement: sympy.Expr, symt: SymT) -> sympy.Expr:\n    roffset = TritonSymbols.block_offsets[symt]\n    return sympy_subs(expr, {roffset: replacement})",
    "docstring": "Replaces instances of {symt}_offset with the new expression.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py",
    "ast_data": "FunctionDef name:replace_offset arg:self arg:expr arg:replacement arg:symt arguments arg arg arg arg Assign Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "has_pending_requests",
    "source_code": "@abstractmethod\ndef has_pending_requests(self) -> bool:\n    raise NotImplementedError",
    "docstring": "`` otherwise",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\scheduler.py",
    "ast_data": "FunctionDef name:has_pending_requests arg:self arguments arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "from_tensors",
    "source_code": "@doc_controls.do_not_doc_inheritable\ndef from_tensors(self, tensors):\n    components = nest.map_structure(lambda spec: spec.from_tensors(tensors), self._component_specs)\n    return self._from_components(components)",
    "docstring": "See TraceType base class for details. Do not override.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py",
    "ast_data": "FunctionDef name:from_tensors arg:self arg:tensors arguments arg arg Assign Call arguments arg Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "decision_function",
    "source_code": "def decision_function(self, X):\n    return super().decision_function(X)",
    "docstring": "Apply decision function to an array of samples. The decision function is equal (up to a constant factor) to the log-posterior of the model, i.e. . In a binary classification setting this instead corresponds to the difference . See :ref:. Parameters ---------- X : array-like of shape (n_samples, n_features) Array of samples (test vectors). Returns ------- C : ndarray of shape (n_samples,) or (n_samples, n_classes) Decision function values related to each class, per sample. In the two-class case, the shape is , giving the log likelihood ratio of the positive class.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\discriminant_analysis.py",
    "ast_data": "FunctionDef name:decision_function arg:self arg:X arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "init_weights_vit_timm",
    "source_code": "def init_weights_vit_timm(module: nn.Module, name: str=''):\n    if isinstance(module, nn.Linear):\n        trunc_normal_(module.weight, std=0.02)\n        if module.bias is not None:\n            nn.init.zeros_(module.bias)",
    "docstring": "ViT weight initialization, original timm impl (for reproducibility).",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\dedode\\transformer\\dinov2.py",
    "ast_data": "FunctionDef name:init_weights_vit_timm arg:module arg:name arguments arg arg If Call Call If Compare Call"
  },
  {
    "library": "pytorch",
    "name": "get_rng_state",
    "source_code": "def get_rng_state(device: Union[int, str, torch.device]='mtia') -> Tensor:\n    warnings.warn('get_rng_state is not implemented in torch.mtia', UserWarning, stacklevel=2)\n    return torch.zeros([1], dtype=torch.uint8, device=device)",
    "docstring": "Returns the random number generator state as a ByteTensor. Args: device (torch.device or int, optional): The device to return the RNG state of. Default: ``, the current mtia device).",
    "type": "function",
    "file_path": "pytorch\\torch\\mtia\\__init__.py",
    "ast_data": "FunctionDef name:get_rng_state arg:device arguments arg Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "vgg13_bn",
    "source_code": "def vgg13_bn(*, weights: Optional[Any]=None, **kwargs: Any) -> VGG:\n    return _vgg('B', True, weights, **kwargs)",
    "docstring": "VGG-13-BN from __. Args: weights (:class:, optional): The pretrained weights to use. See :class: below for more details, and possible values. By default, no pre-trained weights are used. progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True. **kwargs: parameters passed to the `source code `_ for more details about this class. .. autoclass:: torchvision.models.VGG13_BN_Weights :members:",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\dedode\\vgg.py",
    "ast_data": "FunctionDef name:vgg13_bn arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "fx_node",
    "source_code": "@property\ndef fx_node(self) -> torch.fx.Node:\n    return self._node",
    "docstring": "Returns the fx.Node this instance represents.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\modularization.py",
    "ast_data": "FunctionDef name:fx_node arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "recv_object_list",
    "source_code": "@_exception_logger\ndef recv_object_list(object_list: list[Any], src: Optional[int]=None, group: Optional[ProcessGroup]=None, device: Optional[torch.device]=None, group_src: Optional[int]=None):\n    if _rank_not_in_group(group):\n        _warn_not_in_group('recv_object_list')\n        return -1\n    current_device = device or _get_object_coll_device(group)\n    object_sizes_tensor = torch.empty(len(object_list), dtype=torch.long, device=current_device)\n    rank_sizes = recv(object_sizes_tensor, src=src, group=group, group_src=group_src)\n    object_tensor = torch.empty(torch.sum(object_sizes_tensor).item(), dtype=torch.uint8, device=current_device)\n    rank_objects = recv(object_tensor, src=src, group=group, group_src=group_src)\n    assert rank_sizes == rank_objects, 'Mismatch in return ranks for object sizes and objects.'\n    offset = 0\n    for i, obj_size in enumerate(object_sizes_tensor):\n        obj_view = object_tensor[offset:offset + obj_size]\n        obj_view = obj_view.type(torch.uint8)\n        offset += obj_size\n        object_list[i] = _tensor_to_object(obj_view, obj_size, group)\n    return rank_objects",
    "docstring": "Receives picklable objects in `recvobject_collectivesrecv_object_listrecv_object_listrecv` instead. Example:: >>> # xdoctest: +SKIP(\"need process group init\") >>> # Note: Process group initialization omitted on each rank. >>> import torch.distributed as dist >>> # Assumes backend is not NCCL >>> device = torch.device(\"cpu\") >>> if dist.get_rank() == 0: >>> # Assumes world_size of 2. >>> objects = [\"foo\", 12, {1: 2}] # any picklable object >>> dist.send_object_list(objects, dst=1, device=device) >>> else: >>> objects = [None, None, None] >>> dist.recv_object_list(objects, src=0, device=device) >>> objects ['foo', 12, {1: 2}]",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:recv_object_list arg:object_list arg:src arg:group arg:device arg:group_src arguments arg arg arg arg arg If Call Call Return return:yes Assign BoolOp Call Assign Call Call Assign Call Assign Call Call Call Assign Call Compare Assign For Call Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_serialize",
    "source_code": "def _serialize(self):\n    return self._requests",
    "docstring": "Serialize the object. Returns ------- obj : dict A serialized version of the instance in the form of a dictionary.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py",
    "ast_data": "FunctionDef name:_serialize arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "clone",
    "source_code": "def clone(self):\n    _, args, kwargs = self.deconstruct()\n    return self.__class__(*args, **kwargs)",
    "docstring": "Create a copy of this Index.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\indexes.py",
    "ast_data": "FunctionDef name:clone arg:self arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "yule",
    "source_code": "def yule(u, v, w=None):\n    u = _validate_vector(u)\n    v = _validate_vector(v)\n    if w is not None:\n        w = _validate_weights(w)\n    nff, nft, ntf, ntt = _nbool_correspond_all(u, v, w=w)\n    half_R = ntf * nft\n    if half_R == 0:\n        return 0.0\n    else:\n        return float(2.0 * half_R / (ntt * nff + half_R))",
    "docstring": "Compute the Yule dissimilarity between two boolean 1-D arrays. The Yule dissimilarity is defined as .. math:: \\frac{R}{c_{TT} * c_{FF} + \\frac{R}{2}} where :math: is the number of occurrences of :math: and :math: for :math:`k >> from scipy.spatial import distance >>> distance.yule([1, 0, 0], [0, 1, 0]) 2.0 >>> distance.yule([1, 1, 0], [0, 1, 0]) 0.0",
    "type": "function",
    "file_path": "scipy\\scipy\\spatial\\distance.py",
    "ast_data": "FunctionDef name:yule arg:u arg:v arg:w arguments arg arg arg Assign Call Assign Call If Compare Assign Call Assign Call Assign If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "triton_compute_type",
    "source_code": "def triton_compute_type(dtype: torch.dtype) -> str:\n    return triton_type(upcast_compute_type(dtype))",
    "docstring": "Convert torch.dtype to triton type and upcast [b]float16 to float32",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py",
    "ast_data": "FunctionDef name:triton_compute_type arg:dtype arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "cache_variable_reads",
    "source_code": "@contextlib.contextmanager\ndef cache_variable_reads():\n    try:\n        if caching_scope_local.in_caching_scope():\n            raise ValueError('cache_variable_reads scope cannot be nested')\n        caching_scope_local.enter_scope()\n        yield\n    finally:\n        caching_scope_local.exit_scope()",
    "docstring": "Scope for caching variable reads for AggregatingVariable. The variable reads for AggregatingVariable inside this scope are cached. i.e. the first read of variable reads the value from possibly remote handle, but subsequent reads are returned using local cached value. For example: strategy = ParameterServerStrategy... with strategy.scope(): # Variable v is of AggregatingVariable type with actual variable residing # on PS. v = tf.Variable(1.0) with distribute_utils.cache_variable_reads(): v.read_value() # Reads value 1.0 v.assign(constant_op.constant(5.0)) # v changes to 5.0 t1 = v.read_value() t2 = v.read_value() # Both t1 & t2 return cached value 1.0 from local CPU. Notes about cache_variable_reads scope: 1. Nesting of scope cache_variable_reads() is not supported 2. And when caching scope is enabled, the thread enabling the cache and mirrored_run._MirroredReplicaThread threads spawned from it will have caching enabled. Yields: A context for caching variables.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_utils.py",
    "ast_data": "FunctionDef name:cache_variable_reads arguments Try If Call Raise Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "output_spec",
    "source_code": "@cached_property\ndef output_spec(self) -> DTensorSpec:\n    if isinstance(self.output_specs, DTensorSpec):\n        return self.output_specs\n    else:\n        raise ValueError(f'function output_spec expects a single DTensorSpec but got: {self.output_specs}')",
    "docstring": "This function requires that the strategy have exactly one DTensorSpec as the output spec. If the output_specs is a tuple, we throw an exception.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_op_schema.py",
    "ast_data": "FunctionDef name:output_spec arg:self arguments arg If Call Return return:yes Raise Call"
  },
  {
    "library": "django",
    "name": "configured",
    "source_code": "@property\ndef configured(self):\n    return self._wrapped is not empty",
    "docstring": "Return True if the settings have already been configured.",
    "type": "method",
    "file_path": "django\\django\\conf\\__init__.py",
    "ast_data": "FunctionDef name:configured arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "matplotlib",
    "name": "view_limits",
    "source_code": "def view_limits(self, vmin, vmax):\n    if vmax < vmin:\n        vmin, vmax = (vmax, vmin)\n    if vmin == vmax:\n        vmin -= 1\n        vmax += 1\n    if mpl.rcParams['axes.autolimit_mode'] == 'round_numbers':\n        exponent, remainder = divmod(math.log10(vmax - vmin), math.log10(max(self.numticks - 1, 1)))\n        exponent -= remainder < 0.5\n        scale = max(self.numticks - 1, 1) ** (-exponent)\n        vmin = math.floor(scale * vmin) / scale\n        vmax = math.ceil(scale * vmax) / scale\n    return mtransforms.nonsingular(vmin, vmax)",
    "docstring": "Try to choose the view limits intelligently.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:view_limits arg:self arg:vmin arg:vmax arguments arg arg arg If Compare Assign If Compare If Compare Assign Call Call Call Call Compare Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "retrieve_collected_errors",
    "source_code": "def retrieve_collected_errors():\n    serialized_message_list = wrap_converter.wrapped_retrieve_collected_errors()\n    return list(map(converter_error_data_pb2.ConverterErrorData.FromString, serialized_message_list))",
    "docstring": "Returns and clears the list of collected errors in ErrorCollector. The RetrieveCollectedErrors function in C++ returns a list of serialized proto messages. This function will convert them to ConverterErrorData instances. Returns: A list of ConverterErrorData.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\metrics\\wrapper\\metrics_wrapper.py",
    "ast_data": "FunctionDef name:retrieve_collected_errors arguments Assign Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "read_table",
    "source_code": "def read_table(self, table_name: str, index_col: str | list[str] | None=None, coerce_float: bool=True, parse_dates=None, columns=None, schema: str | None=None, chunksize: int | None=None, dtype_backend: DtypeBackend | Literal['numpy']='numpy') -> DataFrame | Iterator[DataFrame]:\n    self.meta.reflect(bind=self.con, only=[table_name], views=True)\n    table = SQLTable(table_name, self, index=index_col, schema=schema)\n    if chunksize is not None:\n        self.returns_generator = True\n    return table.read(self.exit_stack, coerce_float=coerce_float, parse_dates=parse_dates, columns=columns, chunksize=chunksize, dtype_backend=dtype_backend)",
    "docstring": "Read SQL database table into a DataFrame. Parameters ---------- table_name : str Name of SQL table in database. index_col : string, optional, default: None Column to set as index. coerce_float : bool, default True Attempts to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point. This can result in loss of precision. parse_dates : list or dict, default: None - List of column names to parse as dates. - Dict of `pandas.to_datetimechunksizeDataFrameDataFrameArrowDtypeDataFrame` .. versionadded:: 2.0 Returns ------- DataFrame See Also -------- pandas.read_sql_table SQLDatabase.read_query",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\sql.py",
    "ast_data": "FunctionDef name:read_table arg:self arg:table_name arg:index_col arg:coerce_float arg:parse_dates arg:columns arg:schema arg:chunksize arg:dtype_backend arguments arg arg arg arg arg arg arg arg arg Call Assign Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, onnx_registry: OnnxRegistry):\n    self.onnx_registry = onnx_registry",
    "docstring": "Initialize the ONNX Function dispatcher. Args: onnx_registry: The ONNX registry.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\onnxfunction_dispatcher.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:onnx_registry arguments arg arg Assign"
  },
  {
    "library": "scikit-learn",
    "name": "_deprecate_force_all_finite",
    "source_code": "def _deprecate_force_all_finite(force_all_finite, ensure_all_finite):\n    if force_all_finite != 'deprecated':\n        warnings.warn(\"'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.\", FutureWarning)\n        if ensure_all_finite is not None:\n            raise ValueError(\"'force_all_finite' and 'ensure_all_finite' cannot be used together. Pass `ensure_all_finite` only.\")\n        return force_all_finite\n    if ensure_all_finite is None:\n        return True\n    return ensure_all_finite",
    "docstring": "Helper to deprecate force_all_finite in favor of ensure_all_finite.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\deprecation.py",
    "ast_data": "FunctionDef name:_deprecate_force_all_finite arg:force_all_finite arg:ensure_all_finite arguments arg arg If Compare Call If Compare Raise Call Return return:yes If Compare Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "SACGreedyOrderMeta",
    "source_code": "@dataclass\nclass SACGreedyOrderMeta:\n    recomputed_ops: set[int]\n    stored_ops: set[int]\n    inplace_op_groups: dict[int, set[int]]\n    random_ops_group: dict[int, set[int]]\n    msps_meta: list[MSPS]",
    "docstring": "Stores metadata for Greedy-order SAC. Attributes: recomputed_ops (set[int]): Set of operator indices to be recomputed. stored_ops (set[int]): Set of operator indices to be stored. inplace_op_groups (dict[int, set[int]]): Dictionary of inplace operator groups from group-head to operators. random_ops_group (dict[int, set[int]]): Dictionary of random op group head to random ops. msps_meta (list[MSPS]): List of Memory and Runtime Statistics for operators.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\_tools\\sac_estimator.py",
    "ast_data": "ClassDef name:SACGreedyOrderMeta"
  },
  {
    "library": "tensorflow",
    "name": "copy_handle_data",
    "source_code": "def copy_handle_data(source_t, target_t):\n    if target_t.dtype == dtypes.resource or target_t.dtype == dtypes.variant:\n        handle_data = get_handle_data(source_t)\n        set_handle_data(target_t, handle_data)",
    "docstring": "Copies HandleData for variant and resource type tensors if available. The CppShapeInferenceResult::HandleData proto contains information about the shapes and types of the element tensors of resource/variant type tensors. We need to copy this across function boundaries, i.e., when capturing a placeholder or when returning a function tensor as output. If we don't do this the element tensors will have unknown shapes, e.g., if a TensorList variant tensor is captured as a placeholder, elements popped from that list would have unknown shape. Args: source_t: The tensor to copy HandleData from. target_t: The tensor to copy HandleData to.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\handle_data_util.py",
    "ast_data": "FunctionDef name:copy_handle_data arg:source_t arg:target_t arguments arg arg If BoolOp Compare Compare Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "executing_eagerly",
    "source_code": "def executing_eagerly(self):\n    return self._thread_local_data.is_eager",
    "docstring": "Returns True if current thread has eager executing enabled.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:executing_eagerly arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "update_position_encoding_size",
    "source_code": "def update_position_encoding_size(self, max_shape: Tuple[int, int]) -> None:\n    self.pe = self._create_position_encoding(max_shape).to(self.pe.device)",
    "docstring": "Update position encoding to new max_shape. For 1/8 feature map (which is standard): If the input image size is H, W (both divisible by 8), the max_shape should be (H//8, W//8).",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\loftr\\utils\\position_encoding.py",
    "ast_data": "FunctionDef name:update_position_encoding_size arg:self arg:max_shape arguments arg arg Assign Call Call"
  },
  {
    "library": "kornia",
    "name": "x",
    "source_code": "@property\ndef x(self) -> Tensor:\n    return self.data[..., 1]",
    "docstring": "Return the :math: with shape :math:.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\quaternion.py",
    "ast_data": "FunctionDef name:x arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "getrowview",
    "source_code": "def getrowview(self, i):\n    new = self._lil_container((1, self.shape[1]), dtype=self.dtype)\n    new.rows[0] = self.rows[i]\n    new.data[0] = self.data[i]\n    return new",
    "docstring": "Returns a view of the 'i'th row (without copying).",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_lil.py",
    "ast_data": "FunctionDef name:getrowview arg:self arg:i arguments arg arg Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "get_target_uri",
    "source_code": "def get_target_uri(self, docname: str, typ: str | None=None) -> str:\n    raise NotImplementedError",
    "docstring": "Return the target URI for a document name. *typ* can be used to qualify the link characteristic for individual builders.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\__init__.py",
    "ast_data": "FunctionDef name:get_target_uri arg:self arg:docname arg:typ arguments arg arg arg Raise"
  },
  {
    "library": "seaborn",
    "name": "reset_orig",
    "source_code": "def reset_orig():\n    from . import _orig_rc_params\n    mpl.rcParams.update(_orig_rc_params)",
    "docstring": "Restore all RC params to original settings (respects custom rc).",
    "type": "function",
    "file_path": "seaborn\\seaborn\\rcmod.py",
    "ast_data": "FunctionDef name:reset_orig arguments Call"
  },
  {
    "library": "tensorflow",
    "name": "epsilon",
    "source_code": "@dispatch.add_dispatch_support\ndef epsilon():\n    return _EPSILON",
    "docstring": "Returns the value of the fuzz factor used in numeric expressions. Returns: A float. Example: >>> tf.keras.backend.epsilon() 1e-07",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend_config.py",
    "ast_data": "FunctionDef name:epsilon arguments Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_saveable_factory",
    "source_code": "def _saveable_factory(name=self.name):\n    saveables = []\n    dims = len(self._variables[0].shape)\n    var_offset = [0 for _ in range(dims)]\n    for v in self._variables:\n        save_slice_info = variables_lib.Variable.SaveSliceInfo(full_name=self.name, full_shape=self.shape.as_list(), var_offset=copy.copy(var_offset), var_shape=v.shape.as_list())\n        saveables.append(saveable_object_util.ResourceVariableSaveable(v, save_slice_info.spec, name))\n        var_offset[0] += int(v.shape[0])\n    return saveables",
    "docstring": "Creates s for this .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\sharded_variable.py",
    "ast_data": "FunctionDef name:_saveable_factory arg:name arguments arg Assign Assign Call Assign Call For Assign Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_module_concrete_type",
    "source_code": "def get_module_concrete_type(nn_module, share_types=True):\n    assert isinstance(nn_module, Module)\n    if isinstance(nn_module, torch.jit.ScriptModule) and hasattr(nn_module, '_concrete_type'):\n        return nn_module._concrete_type\n    if share_types:\n        concrete_type = concrete_type_store.get_or_create_concrete_type(nn_module)\n    else:\n        concrete_type_builder = infer_concrete_type_builder(nn_module, share_types)\n        concrete_type_builder.set_poisoned()\n        concrete_type = concrete_type_builder.build()\n    return concrete_type",
    "docstring": "Get a concrete type for nn_modules. If share_types is True, the concrete type is fetched from concrete_type_store. If it is False, a new concrete type is created without first searching concrete_type_store. Args: nn_module: The original Python nn.Module that we are creating a ScriptModule for. share_types = Whether to share underlying JIT types between modules (if possible). Returns: A concrete type for nn_module.",
    "type": "function",
    "file_path": "pytorch\\torch\\jit\\_recursive.py",
    "ast_data": "FunctionDef name:get_module_concrete_type arg:nn_module arg:share_types arguments arg arg Call If BoolOp Call Call Return return:yes If Assign Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_free_storage",
    "source_code": "def _free_storage(tensor: torch.Tensor):\n    with torch.no_grad():\n        if not torch.distributed._functional_collectives.is_torchdynamo_compiling():\n            already_freed = tensor._typed_storage()._size() == 0\n            if not already_freed:\n                _p_assert(tensor.storage_offset() == 0, f\"Freeing a tensor's storage is unsafe when it is not the sole occupant\\nstorage offset: {tensor.storage_offset()}\\nstorage size: {tensor._typed_storage()._size()}\\ntensor shape: {tensor.shape}\")\n                tensor._typed_storage()._resize_(0)",
    "docstring": "Frees the underlying storage of `` if the storage was already freed.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\utils.py",
    "ast_data": "FunctionDef name:_free_storage arg:tensor arguments arg With Call If Call Assign Compare Call Call If Call Compare Call Call Call Call Call Call"
  },
  {
    "library": "django",
    "name": "chain",
    "source_code": "def chain(self, klass=None):\n    obj = self.clone()\n    if klass and obj.__class__ != klass:\n        obj.__class__ = klass\n    if not obj.filter_is_sticky:\n        obj.used_aliases = set()\n    obj.filter_is_sticky = False\n    if hasattr(obj, '_setup_query'):\n        obj._setup_query()\n    return obj",
    "docstring": "Return a copy of the current Query that's ready for another operation. The klass argument changes the type of the Query, e.g. UpdateQuery.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\query.py",
    "ast_data": "FunctionDef name:chain arg:self arg:klass arguments arg arg Assign Call If BoolOp Compare Assign If Assign Call Assign If Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "reset_position",
    "source_code": "def reset_position(self):\n    for ax in self._twinned_axes.get_siblings(self):\n        pos = ax.get_position(original=True)\n        ax.set_position(pos, which='active')",
    "docstring": "Reset the active position to the original position. This undoes changes to the active position (as defined in ) which may have been performed to satisfy fixed-aspect constraints.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:reset_position arg:self arguments arg For Call Assign Call Call"
  },
  {
    "library": "django",
    "name": "MediaDefiningClass",
    "source_code": "class MediaDefiningClass(type):\n\n    def __new__(mcs, name, bases, attrs):\n        new_class = super().__new__(mcs, name, bases, attrs)\n        if 'media' not in attrs:\n            new_class.media = media_property(new_class)\n        return new_class",
    "docstring": "Metaclass for classes that can have media definitions.",
    "type": "class",
    "file_path": "django\\django\\forms\\widgets.py",
    "ast_data": "ClassDef name:MediaDefiningClass FunctionDef name:__new__ arg:mcs arg:name arg:bases arg:attrs arguments arg arg arg arg Assign Call Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_validate_flag_names",
    "source_code": "def _validate_flag_names(self):\n    tensor_tracer_flags = self._env.get(FLAGS_ENV_VAR)\n    if not tensor_tracer_flags:\n        return\n    pos = 0\n    while True:\n        match, _ = TTParameters.match_next_flag(tensor_tracer_flags, pos)\n        if not match:\n            break\n        flag_name = match.group(1)\n        if flag_name not in VALID_FLAG_NAMES:\n            raise ValueError('The flag name \"%s\" passed via the environment variable \"%s\" is invalid. Valid flag names are:\\n%s' % (flag_name, FLAGS_ENV_VAR, VALID_FLAG_NAMES))\n        pos = match.end()",
    "docstring": "Validates if the TensorTrace flags passed are valid.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer_flags.py",
    "ast_data": "FunctionDef name:_validate_flag_names arg:self arguments arg Assign Call If Return return:no Assign While Assign Call If Assign Call If Compare Raise Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "freeze",
    "source_code": "def freeze(d):\n    if isinstance(d, dict):\n        return frozenset(map(freeze, d.items()))\n    if isinstance(d, set):\n        return frozenset(map(freeze, d))\n    if isinstance(d, (tuple, list)):\n        return tuple(map(freeze, d))\n    return d",
    "docstring": "Freeze container to hashable form >>> freeze(1) 1 >>> freeze([1, 2]) (1, 2) >>> freeze({1: 2}) # doctest: +SKIP frozenset([(1, 2)])",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\unification\\utils.py",
    "ast_data": "FunctionDef name:freeze arg:d arguments arg If Call Return return:yes Call Call Call If Call Return return:yes Call Call If Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "hpu",
    "source_code": "def hpu(self, device=None, non_blocking=False) -> Union[_StorageBase, TypedStorage]:\n    device2 = torch.device('hpu', device) if device else torch.device('hpu')\n    return self.to(device=device2, non_blocking=non_blocking)",
    "docstring": "Returns a copy of this object in HPU memory. If this object is already in HPU memory and on the correct device, then no copy is performed and the original object is returned. Args: device (int): The destination HPU id. Defaults to the current device. non_blocking (bool): If `` and the source is in pinned memory, the copy will be asynchronous with respect to the host. Otherwise, the argument has no effect.",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:hpu arg:self arg:device arg:non_blocking arguments arg arg arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "set_all_reduce_hook",
    "source_code": "def set_all_reduce_hook(self, hook: Callable[[torch.Tensor], None], *, stream: Optional[torch.cuda.Stream]=None):\n    state = self._get_fsdp_state()\n    if (fsdp_param_group := state._fsdp_param_group) is not None:\n        fsdp_param_group._all_reduce_hook = hook\n        if stream is not None:\n            if fsdp_param_group._is_hsdp:\n                raise ValueError('stream cannot be set when using native HSDP')\n            fsdp_param_group._all_reduce_hook_stream = stream",
    "docstring": "Args: hook (Callable[[torch.Tensor], None]): User-defined all-reduce hook with expected signature `` is the reduce-scatter output if only using FSDP or the all-reduce output if using native HSDP. stream (Optional[torch.cuda.Stream]): Stream to run the all-reduce hook in. This should only be set if not using native HSDP. If using native HSDP, the hook will run in the internally defined all-reduce stream used by the native HSDP all-reduce.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fully_shard.py",
    "ast_data": "FunctionDef name:set_all_reduce_hook arg:self arg:hook arguments arg arg arg Assign Call If Compare Assign If Compare If Raise Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "zeros_like_v2",
    "source_code": "@dispatch.dispatch_for_types(array_ops.zeros_like_v2, StructuredTensor)\ndef zeros_like_v2(input, dtype=None, name=None, layout=None):\n    if layout is not None and (not layout.is_fully_replicated()):\n        raise ValueError(f'StructuredTensor only allows replicated layout. got {layout}')\n    if dtype is None:\n        dtype = dtypes.float32\n    with ops.name_scope(name, 'zeros_like', [input]) as name:\n        if not input.row_partitions:\n            if input.nrows() is not None:\n                return array_ops.zeros([input.nrows()], dtype, layout=layout)\n            else:\n                return array_ops.zeros([], dtype, layout=layout)\n        last_row_partition = input.row_partitions[-1]\n        result = ragged_tensor.RaggedTensor._from_nested_row_partitions(array_ops.zeros(last_row_partition.nvals(), dtype=dtype), input.row_partitions)\n        return result",
    "docstring": "Replace every object with a zero. Example: >>> st = StructuredTensor.from_pyval([{\"x\":[3]}, {\"x\":[4,5]}]) >>> tf.zeros_like(st) >>> st = StructuredTensor.from_pyval([[{\"x\":[3]}], [{\"x\":[4,5]}, {\"x\":[]}]]) >>> tf.zeros_like(st, dtype=tf.int32) Args: input: a structured tensor. dtype: the dtype of the resulting zeros. (default is tf.float32) name: a name for the op. layout: Optional Layout. Only supports replicated layout. Returns: a tensor of zeros of the same shape.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_array_ops.py",
    "ast_data": "FunctionDef name:zeros_like_v2 arg:input arg:dtype arg:name arg:layout arguments arg arg arg arg If BoolOp Compare Call Raise Call If Compare Assign With Call If If Compare Call Return return:yes Call Call Return return:yes Call Assign Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "add_to_tensor",
    "source_code": "def add_to_tensor(self, mat, name='add_to_tensor'):\n    with self._name_scope(name):\n        mat = tensor_conversion.convert_to_tensor_v2_with_dispatch(mat, name='mat')\n        mat_diag = array_ops.matrix_diag_part(mat)\n        new_diag = 1 + mat_diag\n        return array_ops.matrix_set_diag(mat, new_diag)",
    "docstring": "Add matrix represented by this operator to . Equiv to . Args: mat: with same and shape broadcastable to . name: A name to give this . Returns: A with broadcast shape and same as .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_identity.py",
    "ast_data": "FunctionDef name:add_to_tensor arg:self arg:mat arg:name arguments arg arg arg With Call Assign Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "TooHardError",
    "source_code": "class TooHardError(RuntimeError):\n    pass",
    "docstring": "`` may have caused the operation to fail.",
    "type": "class",
    "file_path": "numpy\\numpy\\exceptions.py",
    "ast_data": "ClassDef name:TooHardError"
  },
  {
    "library": "pandas",
    "name": "apply",
    "source_code": "@Substitution(subset=subset_args)\ndef apply(self, func: Callable, axis: Axis | None=0, subset: Subset | None=None, **kwargs) -> Styler:\n    self._todo.append((lambda instance: instance._apply, (func, axis, subset), kwargs))\n    return self",
    "docstring": "Apply a CSS-styling function column-wise, row-wise, or table-wise. Updates the HTML representation with the result. Parameters ---------- func : function `Table Visualization `_ user guide for more details.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\style.py",
    "ast_data": "FunctionDef name:apply arg:self arg:func arg:axis arg:subset arguments arg arg arg arg arg Call arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_setup",
    "source_code": "def _setup(self, name=None):\n    settings_module = os.environ.get(ENVIRONMENT_VARIABLE)\n    if not settings_module:\n        desc = 'setting %s' % name if name else 'settings'\n        raise ImproperlyConfigured('Requested %s, but settings are not configured. You must either define the environment variable %s or call settings.configure() before accessing settings.' % (desc, ENVIRONMENT_VARIABLE))\n    self._wrapped = Settings(settings_module)",
    "docstring": "Load the settings module pointed to by the environment variable. This is used the first time settings are needed, if the user hasn't configured settings manually.",
    "type": "method",
    "file_path": "django\\django\\conf\\__init__.py",
    "ast_data": "FunctionDef name:_setup arg:self arg:name arguments arg arg Assign Call If Assign Raise Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_initialize_physical_devices",
    "source_code": "def _initialize_physical_devices(self, reinitialize=False):\n    with self._device_lock:\n        if not reinitialize and self._physical_devices is not None:\n            return\n        devs = pywrap_tfe.TF_ListPhysicalDevices()\n        self._physical_devices = [PhysicalDevice(name=d.decode(), device_type=d.decode().split(':')[1]) for d in devs]\n        self._physical_device_to_index = {p: i for i, p in enumerate(self._physical_devices)}\n        pluggable_devs = pywrap_tfe.TF_ListPluggablePhysicalDevices()\n        self._pluggable_devices = [PhysicalDevice(name=d.decode(), device_type=d.decode().split(':')[1]) for d in pluggable_devs]\n        self._visible_device_list = list(self._physical_devices)\n        self._memory_growth_map = {d: None for d in self._physical_devices if d.device_type == 'GPU' or d in self._pluggable_devices}\n    self._import_config()",
    "docstring": "Gets local devices visible to the system. Args: reinitialize: If True, reinitializes self._physical_devices so that dynamic registered devices will also be visible to the python front-end.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:_initialize_physical_devices arg:self arg:reinitialize arguments arg arg With If BoolOp Compare Return return:no Assign Call Assign Call Call Call Call Assign Call Assign Call Assign Call Call Call Call Assign Call Assign BoolOp Compare Compare Call"
  },
  {
    "library": "pytorch",
    "name": "add_pruning_method",
    "source_code": "def add_pruning_method(self, method):\n    if not isinstance(method, BasePruningMethod) and method is not None:\n        raise TypeError(f'{type(method)} is not a BasePruningMethod subclass')\n    elif method is not None and self._tensor_name != method._tensor_name:\n        raise ValueError(f\"Can only add pruning methods acting on the parameter named '{self._tensor_name}' to PruningContainer {self}.\" + f\" Found '{method._tensor_name}'\")\n    self._pruning_methods += (method,)",
    "docstring": "Add a child pruning `` to the container. Args: method (subclass of BasePruningMethod): child pruning method to be added to the container.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\utils\\prune.py",
    "ast_data": "FunctionDef name:add_pruning_method arg:self arg:method arguments arg arg If BoolOp Call Compare Raise Call Call If BoolOp Compare Compare Raise Call"
  },
  {
    "library": "pytorch",
    "name": "is_expandable_to",
    "source_code": "def is_expandable_to(shape: ShapeType, desired: ShapeType) -> bool:\n    if len(shape) > len(desired):\n        return False\n    for i in range(len(shape)):\n        if shape[-i - 1] != desired[-i - 1] and shape[-i - 1] != 1:\n            return False\n    return True",
    "docstring": "Checks if a shape can be expanded to another shape. This is equivalent to checking if the two shapes are broadcastable.",
    "type": "function",
    "file_path": "pytorch\\torch\\_prims_common\\__init__.py",
    "ast_data": "FunctionDef name:is_expandable_to arg:shape arg:desired arguments arg arg If Compare Call Call Return return:yes For Call Call If BoolOp Compare Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_on_device_or_primary",
    "source_code": "def _get_on_device_or_primary(self):\n    replica_id = values_util.get_current_replica_id_as_int()\n    if replica_id is None:\n        current_device = device_util.canonicalize(device_util.current())\n        for value in self._values:\n            if device_util.canonicalize(value.device) == current_device:\n                return value\n        return self._primary\n    else:\n        return self._values[replica_id]",
    "docstring": "Returns value in same replica or device if possible, else the _primary.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py",
    "ast_data": "FunctionDef name:_get_on_device_or_primary arg:self arguments arg Assign Call If Compare Assign Call Call For If Compare Call Return return:yes Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "join_hook",
    "source_code": "@abstractmethod\ndef join_hook(self, **kwargs) -> JoinHook:\n    ...",
    "docstring": "Return a :class: instance for the given :class:. Arguments: kwargs (dict): a :class: containing any keyword arguments to modify the behavior of the join hook at run time; all :class: instances sharing the same join context manager are forwarded the same value for ``.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\join.py",
    "ast_data": "FunctionDef name:join_hook arg:self arguments arg arg"
  },
  {
    "library": "scipy",
    "name": "logpmf",
    "source_code": "def logpmf(self, x, m, n):\n    M, m, n, mcond, ncond, mncond = self._process_parameters(m, n)\n    x, M, m, n, xcond, xcond_reduced = self._process_quantiles(x, M, m, n)\n    mxcond = mcond | xcond\n    ncond = ncond | np.zeros(n.shape, dtype=np.bool_)\n    result = self._logpmf(x, M, m, n, mxcond, ncond)\n    xcond_ = xcond_reduced | np.zeros(mncond.shape, dtype=np.bool_)\n    result = self._checkresult(result, xcond_, -np.inf)\n    mncond_ = mncond | np.zeros(xcond_reduced.shape, dtype=np.bool_)\n    return self._checkresult(result, mncond_, np.nan)",
    "docstring": "Log of the multivariate hypergeometric probability mass function. Parameters ---------- x : array_like Quantiles, with the last axis of denoting the components. %(_doc_default_callparams)s Returns ------- logpmf : ndarray or scalar Log of the probability mass function evaluated at Notes ----- %(_doc_callparams_note)s",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:logpmf arg:self arg:x arg:m arg:n arguments arg arg arg arg Assign Call Assign Call Assign Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "close",
    "source_code": "def close(self) -> None:\n    if self.is_wrapped:\n        assert isinstance(self.handle, TextIOWrapper)\n        self.handle.flush()\n        self.handle.detach()\n        self.created_handles.remove(self.handle)\n    for handle in self.created_handles:\n        handle.close()\n    self.created_handles = []\n    self.is_wrapped = False",
    "docstring": "Close all created buffers. Note: If a TextIOWrapper was inserted, it is flushed and detached to avoid closing the potentially user-created buffer.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\common.py",
    "ast_data": "FunctionDef name:close arg:self arguments arg If Call Call Call Call For Call Assign Assign"
  },
  {
    "library": "scipy",
    "name": "_all_partitions_concatenated",
    "source_code": "def _all_partitions_concatenated(ns):\n\n    def all_partitions(z, n):\n        for c in combinations(z, n):\n            x0 = set(c)\n            x1 = z - x0\n            yield [x0, x1]\n\n    def all_partitions_n(z, ns):\n        if len(ns) == 0:\n            yield [z]\n            return\n        for c in all_partitions(z, ns[0]):\n            for d in all_partitions_n(c[1], ns[1:]):\n                yield (c[0:1] + d)\n    z = set(range(np.sum(ns)))\n    for partitioning in all_partitions_n(z, ns[:]):\n        x = np.concatenate([list(partition) for partition in partitioning]).astype(int)\n        yield x",
    "docstring": "Generate all partitions of indices of groups of given sizes, concatenated is an iterable of ints.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_resampling.py",
    "ast_data": "FunctionDef name:_all_partitions_concatenated arg:ns arguments arg FunctionDef name:all_partitions arg:z arg:n arguments arg arg For Call Assign Call Assign FunctionDef name:all_partitions_n arg:z arg:ns arguments arg arg If Compare Call Return return:no For Call For Call Assign Call Call Call For Call Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_generate_buffer",
    "source_code": "def _generate_buffer(self, node: ir.IRNode) -> Optional[torch.fx.Node]:\n\n    def generate_to_buffer(node: ir.IRNode) -> Optional[BufferLike]:\n        if isinstance(node, (ir.Buffer, WorkspaceArg)):\n            return node\n        elif isinstance(node, ir.NoneAsConstantBuffer):\n            return None\n        elif isinstance(node, ir.StorageBox):\n            return generate_to_buffer(node.data)\n        elif isinstance(node, ir.ReinterpretView):\n            buffer = self._get_buffer(node.data)\n            assert isinstance(buffer, (ir.Buffer, WorkspaceArg))\n            unique_name = self.gm.graph._graph_namespace.create_name(f'{buffer.get_name()}_view', None)\n            device = buffer.get_device()\n            assert device\n            reused_as = WorkspaceArg(count=buffer.get_size(), zero_mode=WorkspaceZeroMode.UNINITIALIZED, device=device, outer_name=unique_name, dtype=buffer.get_dtype())\n            self._generate_reinterpret_helper(buffer, reused_as, node.layout)\n            return reused_as\n        else:\n            raise NotImplementedError(f'Unrecognized buffer/view node: {node}')\n    buffer = generate_to_buffer(node)\n    return self.buffer_to_node[buffer.get_name()] if buffer is not None else None",
    "docstring": "Generates FX IR for transformations on a buffer, such as ReinterpretView. Does nothing if no such transformations are present.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\wrapper_fxir.py",
    "ast_data": "FunctionDef name:_generate_buffer arg:self arg:node arguments arg arg FunctionDef name:generate_to_buffer arg:node arguments arg If Call Return return:yes If Call Return return:no If Call Return return:yes Call If Call Assign Call Call Assign Call Call Assign Call Assign Call Call Call Call Return return:yes Raise Call Assign Call Return return:yes Compare Call"
  },
  {
    "library": "pytorch",
    "name": "exit",
    "source_code": "def exit(self, status: int=0, message: str | None=None) -> Never:\n    argv = sys.argv[1:]\n    if self._epilog and (not status) and ('-h' in argv) or '--help' in argv:\n        print(self._epilog)\n    super().exit(status, message)",
    "docstring": "Overriding this method is a workaround for argparse throwing away all line breaks when printing the section of the help message.",
    "type": "method",
    "file_path": "pytorch\\tools\\linter\\adapters\\_linter.py",
    "ast_data": "FunctionDef name:exit arg:self arg:status arg:message arguments arg arg arg Assign If BoolOp BoolOp Compare Compare Call Call Call"
  },
  {
    "library": "django",
    "name": "sentence",
    "source_code": "def sentence():\n    sections = [' '.join(random.sample(WORDS, random.randint(3, 12))) for i in range(random.randint(1, 5))]\n    s = ', '.join(sections)\n    return '%s%s%s' % (s[0].upper(), s[1:], random.choice('?.'))",
    "docstring": "Return a randomly generated sentence of lorem ipsum text. The first word is capitalized, and the sentence ends in either a period or question mark. Commas are added at random.",
    "type": "function",
    "file_path": "django\\django\\utils\\lorem_ipsum.py",
    "ast_data": "FunctionDef name:sentence arguments Assign Call Call Call Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "local_variables",
    "source_code": "def local_variables(self):\n    return self.get_collection(ops.GraphKeys.LOCAL_VARIABLES)",
    "docstring": "Get this scope's local variables.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variable_scope.py",
    "ast_data": "FunctionDef name:local_variables arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "nonsingular",
    "source_code": "def nonsingular(self, v0, v1):\n    return mtransforms.nonsingular(v0, v1, expander=0.05)",
    "docstring": "Adjust a range as needed to avoid singularities. This method gets called during autoscaling, with `` is returned without modification.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:nonsingular arg:self arg:v0 arg:v1 arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "normalized_arguments",
    "source_code": "@compatibility(is_backward_compatible=False)\ndef normalized_arguments(self, root: torch.nn.Module, arg_types: Optional[tuple[Any]]=None, kwarg_types: Optional[dict[str, Any]]=None, normalize_to_only_use_kwargs: bool=False) -> Optional[ArgsKwargsPair]:\n    if self.op == 'call_function':\n        assert callable(self.target)\n        return normalize_function(self.target, self.args, self.kwargs, arg_types, kwarg_types, normalize_to_only_use_kwargs=normalize_to_only_use_kwargs)\n    elif self.op == 'call_module':\n        assert isinstance(self.target, str)\n        return normalize_module(root, self.target, self.args, self.kwargs, normalize_to_only_use_kwargs=normalize_to_only_use_kwargs)\n    return None",
    "docstring": "Returns normalized arguments to Python targets. This means that will be matched up to the module/functional's signature and return exclusively kwargs in positional order if is true. Also populates default values. Does not support positional-only parameters or varargs parameters. Supports module calls. May require and in order to disambiguate overloads. Args: root (torch.nn.Module): Module upon which to resolve module targets. arg_types (Optional[Tuple[Any]]): Tuple of arg types for the args kwarg_types (Optional[Dict[str, Any]]): Dict of arg types for the kwargs normalize_to_only_use_kwargs (bool): Whether to normalize to only use kwargs. Returns: Returns NamedTuple ArgsKwargsPair, or if not successful.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\node.py",
    "ast_data": "FunctionDef name:normalized_arguments arg:self arg:root arg:arg_types arg:kwarg_types arg:normalize_to_only_use_kwargs arguments arg arg arg arg arg If Compare Call Return return:yes Call If Compare Call Return return:yes Call Return return:no Call"
  },
  {
    "library": "matplotlib",
    "name": "print_label",
    "source_code": "def print_label(self, linecontour, labelwidth):\n    return len(linecontour) > 10 * labelwidth or (len(linecontour) and (np.ptp(linecontour, axis=0) > 1.2 * labelwidth).any())",
    "docstring": "Return whether a contour is long enough to hold a label.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\contour.py",
    "ast_data": "FunctionDef name:print_label arg:self arg:linecontour arg:labelwidth arguments arg arg arg Return return:yes BoolOp Compare Call BoolOp Call Call Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "foldl_v2",
    "source_code": "@tf_export('foldl', v1=[])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_arg_values(None, 'back_prop=False is deprecated. Consider using tf.stop_gradient instead.\\nInstead of:\\nresults = tf.foldl(fn, elems, back_prop=False)\\nUse:\\nresults = tf.nest.map_structure(tf.stop_gradient, tf.foldl(fn, elems))', warn_once=True, back_prop=False)\ndef foldl_v2(fn, elems, initializer=None, parallel_iterations=10, back_prop=True, swap_memory=False, name=None):\n    return foldl(fn=fn, elems=elems, initializer=initializer, parallel_iterations=parallel_iterations, back_prop=back_prop, swap_memory=swap_memory, name=name)",
    "docstring": "foldl on the list of tensors unpacked from on dimension 0. This foldl operator repeatedly applies the callable to a sequence of elements from first to last. The elements are made of the tensors unpacked from on dimension 0. The callable fn takes two tensors as arguments. The first argument is the accumulated value computed from the preceding invocation of fn, and the second is the value at the current position of . If is None, must contain at least one element, and its first element is used as the initializer. Suppose that is unpacked into , a list of tensors. The shape of the result tensor is fn(initializer, values[0]).shapeelemsfnelemsfnelemselems(t1, [t2, t3, [t4, t5]])fnfn = lambda (t1, [t2, t3, [t4, t5]]):fntf.stop_gradientfnelemsfn` is not callable. Example:",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\functional_ops.py",
    "ast_data": "FunctionDef name:foldl_v2 arg:fn arg:elems arg:initializer arg:parallel_iterations arg:back_prop arg:swap_memory arg:name arguments arg arg arg arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "kornia",
    "name": "get_spatial_gradient_kernel2d",
    "source_code": "def get_spatial_gradient_kernel2d(mode: str, order: int, *, device: Optional[Device]=None, dtype: Optional[Dtype]=None) -> Tensor:\n    KORNIA_CHECK(mode.lower() in {'sobel', 'diff'}, f'Mode should be `sobel` or `diff`. Got {mode}')\n    KORNIA_CHECK(order in {1, 2}, f'Order should be 1 or 2. Got {order}')\n    if mode == 'sobel' and order == 1:\n        kernel: Tensor = get_sobel_kernel2d(device=device, dtype=dtype)\n    elif mode == 'sobel' and order == 2:\n        kernel = get_sobel_kernel2d_2nd_order(device=device, dtype=dtype)\n    elif mode == 'diff' and order == 1:\n        kernel = get_diff_kernel2d(device=device, dtype=dtype)\n    elif mode == 'diff' and order == 2:\n        kernel = get_diff_kernel2d_2nd_order(device=device, dtype=dtype)\n    else:\n        raise NotImplementedError(f'Not implemented for order {order} on mode {mode}')\n    return kernel",
    "docstring": "Return kernel for 1st or 2nd order image gradients. Uses one of the following operators: sobel, diff.",
    "type": "function",
    "file_path": "kornia\\kornia\\filters\\kernels.py",
    "ast_data": "FunctionDef name:get_spatial_gradient_kernel2d arg:mode arg:order arguments arg arg arg arg Call Compare Call Call Compare If BoolOp Compare Compare Call If BoolOp Compare Compare Assign Call If BoolOp Compare Compare Assign Call If BoolOp Compare Compare Assign Call Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_sequence_length_feature_key_name_from_feature_key_name",
    "source_code": "def get_sequence_length_feature_key_name_from_feature_key_name(feature_name):\n    return feature_name + _SEQUENCE_FEATURE_LENGTH_POSTFIX",
    "docstring": "Gets the name of the sequence length feature from that of the base feature. Args: feature_name: The feature key of a sequence column. Returns: A string which is the feature key for the associated feature length column.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\feature_column.py",
    "ast_data": "FunctionDef name:get_sequence_length_feature_key_name_from_feature_key_name arg:feature_name arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "list_dir",
    "source_code": "def list_dir(path: str) -> list[str]:\n    return check_output(['ls', '-1', path]).decode().split('\\n')",
    "docstring": "' Helper for getting paths for Python",
    "type": "function",
    "file_path": "pytorch\\.ci\\aarch64_linux\\aarch64_wheel_ci_build.py",
    "ast_data": "FunctionDef name:list_dir arg:path arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "area",
    "source_code": "@property\ndef area(self):\n    return capi.geos_area(self.ptr, byref(c_double()))",
    "docstring": "Return the area of the Geometry.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:area arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "is_multipart",
    "source_code": "def is_multipart(self):\n    return any((field.widget.needs_multipart_form for field in self.fields.values()))",
    "docstring": "Return True if the form needs to be multipart-encoded, i.e. it has FileInput, or False otherwise.",
    "type": "method",
    "file_path": "django\\django\\forms\\forms.py",
    "ast_data": "FunctionDef name:is_multipart arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "reset",
    "source_code": "def reset(self) -> None:\n    pass",
    "docstring": "Reset the to the initial state. By default, no-op. For subclasses of , depending on their functionalities, they may want to override this method with implementations that may clear the buffers and reset pointers of the DataPipe. The method is always called when is called as part of .",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\data\\datapipes\\datapipe.py",
    "ast_data": "FunctionDef name:reset arg:self arguments arg"
  },
  {
    "library": "pandas",
    "name": "value_counts",
    "source_code": "def value_counts(self, dropna: bool=True) -> Series:\n    from pandas import Index, Series\n    from pandas.arrays import IntegerArray\n    keys, value_counts, na_counter = algos.value_counts_arraylike(self._data, dropna=dropna, mask=self._mask)\n    mask_index = np.zeros((len(value_counts),), dtype=np.bool_)\n    mask = mask_index.copy()\n    if na_counter > 0:\n        mask_index[-1] = True\n    arr = IntegerArray(value_counts, mask)\n    index = Index(self.dtype.construct_array_type()(keys, mask_index))\n    return Series(arr, index=index, name='count', copy=False)",
    "docstring": "Returns a Series containing counts of each unique value. Parameters ---------- dropna : bool, default True Don't include counts of missing values. Returns ------- counts : Series See Also -------- Series.value_counts",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\masked.py",
    "ast_data": "FunctionDef name:value_counts arg:self arg:dropna arguments arg arg Assign Call Assign Call Call Assign Call If Compare Assign Assign Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_get_reconciled_name_object",
    "source_code": "def _get_reconciled_name_object(self, other):\n    name = get_op_result_name(self, other)\n    if self.name is not name:\n        return self.rename(name)\n    return self",
    "docstring": "If the result of a set operation will be self, return self, unless the name changes, in which case make a shallow copy of self.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_get_reconciled_name_object arg:self arg:other arguments arg arg Assign Call If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "validate_request_object_encryption_enc_values_supported",
    "source_code": "def validate_request_object_encryption_enc_values_supported(self):\n    validate_array_value(self, 'request_object_encryption_enc_values_supported')",
    "docstring": "OPTIONAL. JSON array containing a list of the JWE encryption algorithms (enc values) supported by the OP for Request Objects. These algorithms are used both when the Request Object is passed by value and when it is passed by reference.",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\discovery\\models.py",
    "ast_data": "FunctionDef name:validate_request_object_encryption_enc_values_supported arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "default",
    "source_code": "def default(method):\n    method._is_default = True\n    return method",
    "docstring": "Decorates a method to detect overrides in subclasses.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\generic_utils.py",
    "ast_data": "FunctionDef name:default arg:method arguments arg Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_evaluate_standard",
    "source_code": "def _evaluate_standard(op, op_str, left_op, right_op):\n    if _TEST_MODE:\n        _store_test_result(False)\n    return op(left_op, right_op)",
    "docstring": "Standard evaluation.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\computation\\expressions.py",
    "ast_data": "FunctionDef name:_evaluate_standard arg:op arg:op_str arg:left_op arg:right_op arguments arg arg arg arg If Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "AOTAutogradCacheEntry",
    "source_code": "class AOTAutogradCacheEntry(GenericAOTAutogradCacheEntry[CompiledForward, CompiledBackward]):\n    pass",
    "docstring": "Regular AOTAutogradCacheEntry: saves the forward/backward FxGraphCache keys and looks them up in FxGraphCache on load",
    "type": "class",
    "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\autograd_cache.py",
    "ast_data": "ClassDef name:AOTAutogradCacheEntry"
  },
  {
    "library": "pytorch",
    "name": "recursive_add_node",
    "source_code": "def recursive_add_node(self, fusion_group: 'FxNetAccFusionsFinder.FusionGroup', inputs: Union[NodeSet, NodeList], visited: Optional[NodeSet]=None):\n    for arg in inputs:\n        if visited is not None:\n            if arg in visited:\n                continue\n            visited.add(arg)\n        if arg.op not in CALLABLE_NODE_OPS:\n            continue\n        if self.nodes.index(arg) < fusion_group.top_node_idx:\n            continue\n        if arg in fusion_group.nodes:\n            return True\n        if self.recursive_add_node(fusion_group, arg.all_input_nodes, visited):\n            fusion_group.add_node(arg)\n            return True\n    return False",
    "docstring": "Start from inputs and going reverse topological order. If any upstream node is in the fusion group, add all the nodes in this path to fusion group.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\passes\\tools_common.py",
    "ast_data": "FunctionDef name:recursive_add_node arg:self arg:fusion_group arg:inputs arg:visited arguments arg arg arg arg For If Compare If Compare Call If Compare If Compare Call If Compare Return return:yes If Call Call Return return:yes Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "set",
    "source_code": "def set(*args, **kwargs):\n    set_theme(*args, **kwargs)",
    "docstring": "Alias for :func:, which is the preferred interface. This function may be removed in the future.",
    "type": "function",
    "file_path": "seaborn\\seaborn\\rcmod.py",
    "ast_data": "FunctionDef name:set arguments arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "traverse",
    "source_code": "def traverse(datapipe: DataPipe, only_datapipe: Optional[bool]=None) -> DataPipeGraph:\n    msg = '`traverse` function and will be removed after 1.13. Please use `traverse_dps` instead.'\n    if not only_datapipe:\n        msg += ' And, the behavior will be changed to the equivalent of `only_datapipe=True`.'\n    warnings.warn(msg, FutureWarning)\n    if only_datapipe is None:\n        only_datapipe = False\n    cache: set[int] = set()\n    return _traverse_helper(datapipe, only_datapipe, cache)",
    "docstring": "Traverse the DataPipes and their attributes to extract the DataPipe graph. [Deprecated] When `traverse_dps` (default), all attributes of each DataPipe are traversed. This argument is deprecating and will be removed after the next release. Returns: A graph represented as a nested dictionary, where keys are ids of DataPipe instances and values are tuples of DataPipe instance and the sub-graph",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\data\\graph.py",
    "ast_data": "FunctionDef name:traverse arg:datapipe arg:only_datapipe arguments arg arg Assign If Call If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "streaming_restore",
    "source_code": "@tf_export('__internal__.tracking.streaming_restore', v1=[])\ndef streaming_restore(status, session=None):\n    if context.executing_eagerly():\n        return\n    if session is None:\n        session = get_session()\n    if isinstance(status, NameBasedSaverStatus):\n        raise NotImplementedError('Streaming restore not supported from name-based checkpoints when graph building. File a feature request if this limitation bothers you. As a workaround, consider either using tf.train.Checkpoint to load name-based checkpoints or enabling eager execution.')\n    status.run_restore_ops(session=session)\n    status._checkpoint.new_restore_ops_callback = lambda ops: session.run(ops, feed_dict=status._feed_dict)",
    "docstring": "When graph building, runs restore ops as soon as they come in. Args: status: A _LoadStatus objects from an object-based saver's restore(). Streaming restore from name-based checkpoints is not currently supported. session: A session to run new restore ops in.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:streaming_restore arg:status arg:session arguments arg arg If Call Return return:no If Compare Assign Call If Call Raise Call Call Assign arguments arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__validate__",
    "source_code": "def __validate__(self):\n    pass",
    "docstring": "Perform post-construction validation.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type.py",
    "ast_data": "FunctionDef name:__validate__ arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "_get_args_with_constexprs",
    "source_code": "def _get_args_with_constexprs(self, args, launcher):\n    if triton_version_uses_attrs_dict():\n        constexpr_args: list[tuple[int, Any]] = []\n        for arg_name, arg_val in launcher.config.kwargs.items():\n            constexpr_args.append((self.fn.arg_names.index(arg_name), arg_val))\n        constexpr_args.sort()\n        new_args = [*args]\n        for arg_idx, arg_val in constexpr_args:\n            new_args.insert(arg_idx, arg_val)\n        return new_args\n    return args",
    "docstring": "is passed in with only the non-constexpr args (because the constexpr arg values depend on the config). However, in later triton versions, the constexpr args need to be added into the args list.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\triton_heuristics.py",
    "ast_data": "FunctionDef name:_get_args_with_constexprs arg:self arg:args arg:launcher arguments arg arg arg If Call For Call Call Call Call Assign For Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "unwrap_tensor_subclass_parameters",
    "source_code": "def unwrap_tensor_subclass_parameters(module: torch.nn.Module) -> torch.nn.Module:\n    for name, tensor in itertools.chain(list(module.named_parameters(recurse=False)), list(module.named_buffers(recurse=False))):\n        if is_traceable_wrapper_subclass(tensor):\n            torch.nn.utils.parametrize.register_parametrization(module, name, UnwrapTensorSubclass())\n    for name, child in module.named_children():\n        unwrap_tensor_subclass_parameters(child)\n    return module",
    "docstring": "Model transformation that replaces all the parameters that are subclasses to plain tensors. This reduces runtime overhead of flattening/unflattening the parameters. This transformation adds parametrization with . The FQNs of the subclass parameters will be changed and state_dict will become incompatible with the original model. E.g. Original model state_dict: {\"p1\": torch.testing._internal.TwoTensor} becomes: {\"parametrizations.p2.original0\": torch.Tensor, \"parametrizations.p2.original1\": torch.Tensor}",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\subclass_parametrization.py",
    "ast_data": "FunctionDef name:unwrap_tensor_subclass_parameters arg:module arguments arg For Call Call Call Call Call If Call Call Call For Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "asset_path",
    "source_code": "@property\ndef asset_path(self):\n    return self._path",
    "docstring": "Fetch the current asset path.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\asset.py",
    "ast_data": "FunctionDef name:asset_path arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_ticks_position",
    "source_code": "def get_ticks_position(self):\n    return {1: 'bottom', 2: 'top', 'default': 'default', 'unknown': 'unknown'}[self._get_ticks_position()]",
    "docstring": "Return the ticks position (\"top\", \"bottom\", \"default\", or \"unknown\").",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:get_ticks_position arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pygame",
    "name": "find_freetype",
    "source_code": "def find_freetype():\n    pkg_config = DependencyProg('FREETYPE', 'FREETYPE_CONFIG', 'pkg-config freetype2', '2.0', ['freetype2'], '--modversion')\n    if pkg_config.found:\n        return pkg_config\n    freetype_config = DependencyProg('FREETYPE', 'FREETYPE_CONFIG', 'freetype-config', '2.0', ['freetype'], '--ftversion')\n    if freetype_config.found:\n        return freetype_config\n    return pkg_config",
    "docstring": "modern freetype uses pkg-config. However, some older systems don't have that.",
    "type": "function",
    "file_path": "pygame\\buildconfig\\config_unix.py",
    "ast_data": "FunctionDef name:find_freetype arguments Assign Call If Return return:yes Assign Call If Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "et_get_selected_kernels",
    "source_code": "def et_get_selected_kernels(self, op_name: str, kernel_key: list[str]) -> list[str]:\n    if op_name not in self.et_kernel_metadata:\n        return kernel_key if self.include_all_operators else []\n    result_set = set()\n    for model_kernel_keys in self.et_kernel_metadata[op_name]:\n        key_found = False\n        for key in kernel_key:\n            if key != 'default' and key.split('/')[1] == model_kernel_keys.split('/')[1]:\n                result_set.add(key)\n                key_found = True\n                break\n        if not key_found:\n            if 'default' not in kernel_key:\n                raise Exception('Missing kernel for the model')\n            else:\n                result_set.add('default')\n    return list(result_set)",
    "docstring": "Return a list of kernel keys that cover the used ops",
    "type": "method",
    "file_path": "pytorch\\torchgen\\selective_build\\selector.py",
    "ast_data": "FunctionDef name:et_get_selected_kernels arg:self arg:op_name arg:kernel_key arguments arg arg arg If Compare Return return:yes Assign Call For Assign For If BoolOp Compare Compare Call Call Call Assign If If Compare Raise Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "theta",
    "source_code": "@property\ndef theta(self):\n    return np.append(self.k1.theta, self.k2.theta)",
    "docstring": "Returns the (flattened, log-transformed) non-fixed hyperparameters. Note that theta are typically the log-transformed values of the kernel's hyperparameters as this representation of the search space is more amenable for hyperparameter search, as hyperparameters like length-scales naturally live on a log-scale. Returns ------- theta : ndarray of shape (n_dims,) The non-fixed, log-transformed hyperparameters of the kernel",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:theta arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_scale",
    "source_code": "def get_scale(self):\n    return self._scale.name",
    "docstring": "Return this Axis' scale (as a str).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:get_scale arg:self arguments arg Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "parse",
    "source_code": "def parse(self) -> Name:\n    if not self._has_data():\n        return Name([])\n    rdns = [self._parse_rdn()]\n    while self._has_data():\n        self._read_char(',')\n        rdns.append(self._parse_rdn())\n    return Name(reversed(rdns))",
    "docstring": "Parses the string and converts it to a Name. According to RFC4514 section 2.1 the RDNSequence must be reversed when converting to string representation. So, when we parse it, we need to reverse again to get the RDNs on the correct order.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\x509\\name.py",
    "ast_data": "FunctionDef name:parse arg:self arguments arg If Call Return return:yes Call Assign Call While Call Call Call Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "vancestors",
    "source_code": "def vancestors(*types):\n    check(types)\n    ras = [[] for _ in range(len(dispatch_args))]\n    for types_ in typemap:\n        for t, type_, ra in zip(types, types_, ras):\n            if issubclass(t, type_) and type_ not in t.__mro__:\n                append(type_, ra)\n    return [set(ra) for ra in ras]",
    "docstring": "Get a list of sets of virtual ancestors for the given types",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\decorator.py",
    "ast_data": "FunctionDef name:vancestors arguments arg Call Assign Call Call For For Call If BoolOp Call Compare Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "calculate_equalization_scale",
    "source_code": "def calculate_equalization_scale(input_obs: _InputEqualizationObserver, weight_obs: _WeightEqualizationObserver) -> torch.Tensor:\n    min_inputs, max_inputs = input_obs.get_input_minmax()\n    min_weights, max_weights = weight_obs.get_weight_col_minmax()\n    if not (check_min_max_valid(min_inputs, max_inputs) and check_min_max_valid(min_weights, max_weights)):\n        warnings.warn('Must run observer before calling calculate_equalization_scale. ' + 'Returning default equalization scale torch.tensor(1).')\n        return torch.tensor(1)\n    if not min_inputs.shape == min_weights.shape:\n        raise ValueError('Input and Weight must have the same column dimension. ' + f'Found {min_inputs.shape} and {min_weights.shape} shapes instead.')\n    equalization_scale = torch.sqrt((max_weights - min_weights) / (max_inputs - min_inputs))\n    equalization_scale[equalization_scale == 0.0] = 1\n    equalization_scale = torch.nan_to_num(equalization_scale, nan=1, posinf=1, neginf=1)\n    return equalization_scale",
    "docstring": "Calculates the equalization scale and sets the equalization_scale value in the observers. Args: input_obs: Observer that tracks the ranges for the input columns weight_obs: Observer that tracks the ranges for the weight columns",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_equalize.py",
    "ast_data": "FunctionDef name:calculate_equalization_scale arg:input_obs arg:weight_obs arguments arg arg Assign Call Assign Call If BoolOp Call Call Call Return return:yes Call If Compare Raise Call Assign Call Assign Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='mean_squared_logarithmic_error'):\n    super().__init__(mean_squared_logarithmic_error, name=name, reduction=reduction)",
    "docstring": "Initializes instance. Args: reduction: Type of to apply to loss. Default value is . indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to . When used with , outside of built-in training loops such as and , using or will raise an error. Please see this custom training [tutorial]( for more details. name: Optional name for the instance. Defaults to 'mean_squared_logarithmic_error'.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:reduction arg:name arguments arg arg arg Call Call"
  },
  {
    "library": "django",
    "name": "format_html_join",
    "source_code": "def format_html_join(sep, format_string, args_generator):\n    return mark_safe(conditional_escape(sep).join((format_html(format_string, **args) if isinstance(args, Mapping) else format_html(format_string, *args) for args in args_generator)))",
    "docstring": "A wrapper of format_html, for the common case of a group of arguments that need to be formatted using the same format string, and then joined using 'sep'. 'sep' is also passed through conditional_escape. 'args_generator' should be an iterator that returns the sequence of 'args' that will be passed to format_html. Example: format_html_join(' ', \"{} {}\", ((u.first_name, u.last_name) for u in users))",
    "type": "function",
    "file_path": "django\\django\\utils\\html.py",
    "ast_data": "FunctionDef name:format_html_join arg:sep arg:format_string arg:args_generator arguments arg arg arg Return return:yes Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_config",
    "source_code": "def get_config(self):\n    if self._residual_fn is not None:\n        function, function_type, function_module = _serialize_function_to_config(self._residual_fn)\n        config = {'residual_fn': function, 'residual_fn_type': function_type, 'residual_fn_module': function_module}\n    else:\n        config = {}\n    base_config = super(ResidualWrapperBase, self).get_config()\n    return dict(list(base_config.items()) + list(config.items()))",
    "docstring": "Returns the config of the residual wrapper.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\legacy_rnn\\rnn_cell_wrapper_impl.py",
    "ast_data": "FunctionDef name:get_config arg:self arguments arg If Compare Assign Call Assign Assign Assign Call Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_get_distinct_objs",
    "source_code": "def _get_distinct_objs(objs: list[Index]) -> list[Index]:\n    ids: set[int] = set()\n    res = []\n    for obj in objs:\n        if id(obj) not in ids:\n            ids.add(id(obj))\n            res.append(obj)\n    return res",
    "docstring": "Return a list with distinct elements of \"objs\" (different ids). Preserves order.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\indexes\\api.py",
    "ast_data": "FunctionDef name:_get_distinct_objs arg:objs arguments arg Call Assign For If Compare Call Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "language",
    "source_code": "def language(self):\n    return self.__language",
    "docstring": "Return the translation language.",
    "type": "method",
    "file_path": "django\\django\\utils\\translation\\trans_real.py",
    "ast_data": "FunctionDef name:language arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "walk",
    "source_code": "@tf_export(v1=['gfile.Walk'])\ndef walk(top, in_order=True):\n    return walk_v2(top, in_order)",
    "docstring": "Recursive directory tree generator for directories. Args: top: string, a Directory name in_order: bool, Traverse in order if True, post order if False. Errors that happen while listing directories are ignored. Yields: Each yield is a 3-tuple: the pathname of a directory, followed by lists of all its subdirectories and leaf files. That is, each yield looks like: . Each item is a string.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py",
    "ast_data": "FunctionDef name:walk arg:top arg:in_order arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "rgb_to_yuv420",
    "source_code": "def rgb_to_yuv420(image: Tensor) -> tuple[Tensor, Tensor]:\n    if not isinstance(image, Tensor):\n        raise TypeError(f'Input type is not a Tensor. Got {type(image)}')\n    if len(image.shape) < 3 or image.shape[-3] != 3:\n        raise ValueError(f'Input size must have a shape of (*, 3, H, W). Got {image.shape}')\n    if len(image.shape) < 2 or image.shape[-2] % 2 == 1 or image.shape[-1] % 2 == 1:\n        raise ValueError(f'Input H&W must be evenly disible by 2. Got {image.shape}')\n    yuvimage = rgb_to_yuv(image)\n    return (yuvimage[..., :1, :, :], yuvimage[..., 1:3, :, :].unfold(-2, 2, 2).unfold(-2, 2, 2).mean((-1, -2)))",
    "docstring": "Convert an RGB image to YUV 420 (subsampled). Input need to be padded to be evenly divisible by 2 horizontal and vertical. The image data is assumed to be in the range of :math:. The range of the output is of :math: to luma and the ranges of U and V are :math: and :math:, respectively. The YUV model adopted here follows M/PAL values (see _, Table 2, items 2.5 and 2.6). Args: image: RGB Image to be converted to YUV with shape :math:. Returns: A Tensor containing the Y plane with shape :math: A Tensor containing the UV planes with shape :math: Example: >>> input = torch.rand(2, 3, 4, 6) >>> output = rgb_to_yuv420(input) # (2x1x4x6, 2x2x2x3)",
    "type": "function",
    "file_path": "kornia\\kornia\\color\\yuv.py",
    "ast_data": "FunctionDef name:rgb_to_yuv420 arg:image arguments arg If Call Raise Call Call If BoolOp Compare Call Compare Raise Call If BoolOp Compare Call Compare Compare Raise Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "kornia",
    "name": "_crop",
    "source_code": "def _crop(img: torch.Tensor, cropping_shape: List[int]) -> torch.Tensor:\n    return torch.nn.functional.pad(img, (-cropping_shape[2], -cropping_shape[3], -cropping_shape[0], -cropping_shape[1]))",
    "docstring": "Crop out the part of \"valid\" convolution area.",
    "type": "function",
    "file_path": "kornia\\kornia\\metrics\\ssim.py",
    "ast_data": "FunctionDef name:_crop arg:img arg:cropping_shape arguments arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "check_predicate",
    "source_code": "def check_predicate(result, func, cargs):\n    if result == 1:\n        return True\n    elif result == 0:\n        return False\n    else:\n        raise GEOSException('Error encountered on GEOS C predicate function \"%s\".' % func.__name__)",
    "docstring": "Error checking for unary/binary predicate functions.",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\geos\\prototypes\\errcheck.py",
    "ast_data": "FunctionDef name:check_predicate arg:result arg:func arg:cargs arguments arg arg arg If Compare Return return:yes If Compare Return return:yes Raise Call"
  },
  {
    "library": "pytorch",
    "name": "register_module_buffer_registration_hook",
    "source_code": "def register_module_buffer_registration_hook(hook: Callable[..., None]) -> RemovableHandle:\n    handle = RemovableHandle(_global_buffer_registration_hooks)\n    _global_buffer_registration_hooks[handle.id] = hook\n    return handle",
    "docstring": "Register a buffer registration hook common to all modules. .. warning :: This adds global state to the module The hook will be called every time :func: is invoked. It should have the following signature:: hook(module, name, buffer) -> None or new buffer The hook can modify the input or return a single modified value in the hook. Returns: :class:: a handle that can be used to remove the added hook by calling ``",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:register_module_buffer_registration_hook arg:hook arguments arg Assign Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_edgecolor",
    "source_code": "def set_edgecolor(self, color):\n    self._original_edgecolor = color\n    self._set_edgecolor(color)",
    "docstring": "Set the patch edge color. Parameters ---------- color : :mpltype: or None",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:set_edgecolor arg:self arg:color arguments arg arg Assign Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_xp",
    "source_code": "def get_xp(xp: ModuleType) -> Callable[[Callable[..., _T]], Callable[..., _T]]:\n\n    def inner(f: Callable[..., _T], /) -> Callable[..., _T]:\n\n        @wraps(f)\n        def wrapped_f(*args: object, **kwargs: object) -> object:\n            return f(*args, xp=xp, **kwargs)\n        sig = signature(f)\n        new_sig = sig.replace(parameters=[par for i, par in sig.parameters.items() if i != 'xp'])\n        if wrapped_f.__doc__ is None:\n            wrapped_f.__doc__ = f'Array API compatibility wrapper for {f.__name__}.\\n\\nSee the corresponding documentation in NumPy/CuPy and/or the array API\\nspecification for more details.\\n\\n'\n        wrapped_f.__signature__ = new_sig\n        return wrapped_f\n    return inner",
    "docstring": "Decorator to automatically replace xp with the corresponding array module. Use like import numpy as np @get_xp(np) def func(x, /, xp, kwarg=None): return xp.func(x, kwarg=kwarg) Note that xp must be a keyword argument and come after all non-keyword arguments.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\_internal.py",
    "ast_data": "FunctionDef name:get_xp arg:xp arguments arg FunctionDef name:inner arguments arg FunctionDef name:wrapped_f arguments arg arg Return return:yes Call Call Assign Call Assign Call Call Compare If Compare Assign Assign Return return:yes Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "ndindex",
    "source_code": "def ndindex(*x: int) -> Generator[tuple[int, ...]]:\n    if not x:\n        yield ()\n        return\n    for i in ndindex(*x[:-1]):\n        for j in range(x[-1]):\n            yield (*i, j)",
    "docstring": "Generate all N-dimensional indices for a given array shape. Given the shape of an array, an ndindex instance iterates over the N-dimensional index of the array. At each iteration a tuple of indices is returned, the last dimension is iterated over first. This has an identical API to numpy.ndindex. Parameters ---------- *x : int The shape of the array.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_extra\\_lib\\_utils\\_helpers.py",
    "ast_data": "FunctionDef name:ndindex arguments arg If Return return:no For Call For Call"
  },
  {
    "library": "tensorflow",
    "name": "get_compile_flags",
    "source_code": "@tf_export('sysconfig.get_compile_flags')\ndef get_compile_flags():\n    flags = []\n    flags.append('-I%s' % get_include())\n    flags.append('-D_GLIBCXX_USE_CXX11_ABI=%d' % _CXX11_ABI_FLAG)\n    cxx_version_flag = None\n    if _CXX_VERSION == 201103:\n        cxx_version_flag = '--std=c++11'\n    elif _CXX_VERSION == 201402:\n        cxx_version_flag = '--std=c++14'\n    elif _CXX_VERSION == 201703:\n        cxx_version_flag = '--std=c++17'\n    elif _CXX_VERSION == 202002:\n        cxx_version_flag = '--std=c++20'\n    if cxx_version_flag:\n        flags.append(cxx_version_flag)\n    flags.append('-DEIGEN_MAX_ALIGN_BYTES=%d' % pywrap_tf_session.get_eigen_max_align_bytes())\n    return flags",
    "docstring": "Returns the compilation flags for compiling with TensorFlow. The returned list of arguments can be passed to the compiler for compiling against TensorFlow headers. The result is platform dependent. For example, on a typical Linux system with Python 3.7 the following command prints >>> print(tf.sysconfig.get_compile_flags()) Returns: A list of strings for the compiler flags.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\platform\\sysconfig.py",
    "ast_data": "FunctionDef name:get_compile_flags arguments Assign Call Call Call Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign If Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_TensorCacheDeleter",
    "source_code": "class _TensorCacheDeleter(object):\n    __slots__ = ['_context_id']\n\n    def __init__(self, context_id):\n        self._context_id = context_id\n\n    def __del__(self):\n        if _tensor_caches_map is None:\n            return\n        if self._context_id in _tensor_caches_map:\n            del _tensor_caches_map[self._context_id]",
    "docstring": "Deletes tensor caches for a given context.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "ClassDef name:_TensorCacheDeleter Assign FunctionDef name:__init__ arg:self arg:context_id arguments arg arg Assign FunctionDef name:__del__ arg:self arguments arg If Compare Return return:no If Compare"
  },
  {
    "library": "tensorflow",
    "name": "pick_vector",
    "source_code": "def pick_vector(cond, true_vector, false_vector, name='pick_vector'):\n    with ops.name_scope(name, values=(cond, true_vector, false_vector)):\n        cond = ops.convert_to_tensor(cond, name='cond')\n        if cond.dtype != dtypes.bool:\n            raise TypeError('%s.dtype=%s which is not %s' % (cond, cond.dtype, dtypes.bool))\n        cond_value_static = tensor_util.constant_value(cond)\n        if cond_value_static is not None:\n            return true_vector if cond_value_static else false_vector\n        true_vector = ops.convert_to_tensor(true_vector, name='true_vector')\n        false_vector = ops.convert_to_tensor(false_vector, name='false_vector')\n        if true_vector.dtype != false_vector.dtype:\n            raise TypeError('%s.dtype=%s does not match %s.dtype=%s' % (true_vector, true_vector.dtype, false_vector, false_vector.dtype))\n        n = array_ops.shape(true_vector)[0]\n        return array_ops.slice(array_ops.concat([true_vector, false_vector], 0), [array_ops.where_v2(cond, 0, n)], [array_ops.where(cond, n, -1)])",
    "docstring": "Picks possibly different length row s based on condition. Value s should have exactly one dimension. If is a python Boolean or then either or is immediately returned. I.e., no graph nodes are created and no validation happens. Args: cond: . Must have and be scalar. true_vector: of one dimension. Returned when cond is . false_vector: of one dimension. Returned when cond is . name: Python . The name to give this op. Example: Returns: true_or_false_vector: . Raises: TypeError: if TypeError: if is not a constant and",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\util.py",
    "ast_data": "FunctionDef name:pick_vector arg:cond arg:true_vector arg:false_vector arg:name arguments arg arg arg arg With Call Assign Call If Compare Raise Call Assign Call If Compare Return return:yes Assign Call Assign Call If Compare Raise Call Assign Call Return return:yes Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_connectionstyle",
    "source_code": "def get_connectionstyle(self):\n    return self._connector",
    "docstring": "Return the used.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_connectionstyle arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_apply_shrinkage",
    "source_code": "def _apply_shrinkage(self):\n    for leaf in self.finalized_leaves:\n        leaf.value *= self.shrinkage",
    "docstring": "Multiply leaves values by shrinkage parameter. This must be done at the very end of the growing process. If this were done during the growing process e.g. in finalize_leaf(), then a leaf would be shrunk but its sibling would potentially not be (if it's a non-leaf), which would lead to a wrong computation of the 'middle' value needed to enforce the monotonic constraints.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\grower.py",
    "ast_data": "FunctionDef name:_apply_shrinkage arg:self arguments arg For"
  },
  {
    "library": "tensorflow",
    "name": "_process_logs",
    "source_code": "def _process_logs(self, logs, is_batch_hook=False):\n    if logs is None:\n        return {}\n    if self._supports_tf_logs:\n        return logs\n    if is_batch_hook and self._batch_hooks_support_tf_logs:\n        return logs\n    return tf_utils.sync_to_numpy_or_python_type(logs)",
    "docstring": "Turns tensors into numpy arrays or Python scalars if necessary.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:_process_logs arg:self arg:logs arg:is_batch_hook arguments arg arg arg If Compare Return return:no If Return return:yes If BoolOp Return return:yes Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "Stroke",
    "source_code": "class Stroke(IntervalProperty):\n    _default_range = (0.25, 2.5)",
    "docstring": "Thickness of lines that define point glyphs.",
    "type": "class",
    "file_path": "seaborn\\seaborn\\_core\\properties.py",
    "ast_data": "ClassDef name:Stroke Assign"
  },
  {
    "library": "tensorflow",
    "name": "broadcast_to_rank",
    "source_code": "def broadcast_to_rank(self, rank):\n    if self.rank is None:\n        raise ValueError('Unable to broadcast: self.rank is unknown')\n    dims_to_add = rank - self.rank\n    if dims_to_add < 0:\n        raise ValueError('Unable to broadcast: rank=%d must be greater than self.rank=%d.' % (rank, self.rank))\n    elif dims_to_add == 0:\n        return self\n    elif self._partitioned_dim_sizes:\n        partitioned_dims = (1,) * dims_to_add + self._partitioned_dim_sizes\n        return RaggedTensorDynamicShape(partitioned_dims, self.inner_dim_sizes, self.dim_size_dtype)\n    else:\n        inner_dims = array_ops.concat([array_ops.ones([dims_to_add], self.dim_size_dtype), self.inner_dim_sizes], axis=0)\n        return RaggedTensorDynamicShape([], inner_dims, self.dim_size_dtype)",
    "docstring": "Adds leading size-1 dimensions to broadcast to the given rank. E.g., if is , then is . Args: rank: The rank for the returned shape. Returns: A RaggedTensorDynamicShape with dimensions, whose inner dimensions have the same size as and whose outer dimensions have size . Raises: ValueError: If is unknown or greater than .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor_shape.py",
    "ast_data": "FunctionDef name:broadcast_to_rank arg:self arg:rank arguments arg arg If Compare Raise Call Assign If Compare Raise Call If Compare Return return:yes If Assign Return return:yes Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_insert",
    "source_code": "def _insert(self, objs, fields, returning_fields=None, raw=False, using=None, on_conflict=None, update_fields=None, unique_fields=None):\n    self._for_write = True\n    if using is None:\n        using = self.db\n    query = sql.InsertQuery(self.model, on_conflict=on_conflict, update_fields=update_fields, unique_fields=unique_fields)\n    query.insert_values(fields, objs, raw=raw)\n    return query.get_compiler(using=using).execute_sql(returning_fields)",
    "docstring": "Insert a new record for the given model. This provides an interface to the InsertQuery class and is how Model.save() is implemented.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:_insert arg:self arg:objs arg:fields arg:returning_fields arg:raw arg:using arg:on_conflict arg:update_fields arg:unique_fields arguments arg arg arg arg arg arg arg arg arg Assign If Compare Assign Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "bump_prefix",
    "source_code": "def bump_prefix(self, other_query, exclude=None):\n\n    def prefix_gen():\n        alphabet = ascii_uppercase\n        prefix = chr(ord(self.alias_prefix) + 1)\n        yield prefix\n        for n in count(1):\n            seq = alphabet[alphabet.index(prefix):] if prefix else alphabet\n            for s in product(seq, repeat=n):\n                yield ''.join(s)\n            prefix = None\n    if self.alias_prefix != other_query.alias_prefix:\n        return\n    local_recursion_limit = sys.getrecursionlimit() // 16\n    for pos, prefix in enumerate(prefix_gen()):\n        if prefix not in self.subq_aliases:\n            self.alias_prefix = prefix\n            break\n        if pos > local_recursion_limit:\n            raise RecursionError('Maximum recursion depth exceeded: too many subqueries.')\n    self.subq_aliases = self.subq_aliases.union([self.alias_prefix])\n    other_query.subq_aliases = other_query.subq_aliases.union(self.subq_aliases)\n    if exclude is None:\n        exclude = {}\n    self.change_aliases({alias: '%s%d' % (self.alias_prefix, pos) for pos, alias in enumerate(self.alias_map) if alias not in exclude})",
    "docstring": "Change the alias prefix to the next letter in the alphabet in a way that the other query's aliases and this query's aliases will not conflict. Even tables that previously had no alias will get an alias after this call. To prevent changing aliases use the exclude parameter.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\query.py",
    "ast_data": "FunctionDef name:bump_prefix arg:self arg:other_query arg:exclude arguments arg arg arg FunctionDef name:prefix_gen arguments Assign Assign Call Call For Call Assign Call For Call Call Assign If Compare Return return:no Assign Call For Call Call If Compare Assign If Compare Raise Call Assign Call Assign Call If Compare Assign Call Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "_expand_and_tile",
    "source_code": "def _expand_and_tile(tensor, multiple, dim=0, name=None):\n    if multiple < 1:\n        raise ValueError(f'Invalid argument multiple={multiple} for expand_and_tile  call. `multiple` must be an integer > 0')\n    with ops.name_scope(name, 'expand_and_tile', (tensor, multiple, dim)) as scope:\n        tensor = sparse_tensor.convert_to_tensor_or_sparse_tensor(tensor)\n        if isinstance(tensor, sparse_tensor.SparseTensor):\n            if dim < 0:\n                expand_dims = array_ops.reshape(array_ops.size(tensor.dense_shape) + dim, [1])\n            else:\n                expand_dims = [dim]\n            expanded_shape = array_ops.concat((array_ops.slice(tensor.dense_shape, [0], expand_dims), [1], array_ops.slice(tensor.dense_shape, expand_dims, [-1])), 0, name='expanded_shape')\n            expanded = sparse_ops.sparse_reshape(tensor, shape=expanded_shape, name='expand')\n            if multiple == 1:\n                return expanded\n            return sparse_ops.sparse_concat(dim - 1 if dim < 0 else dim, [expanded] * multiple, name=scope)\n        expanded = array_ops.expand_dims(tensor, dim if dim >= 0 else dim - 1, name='expand')\n        if multiple == 1:\n            return expanded\n        ones = array_ops.ones_like(array_ops.shape(tensor))\n        tile_multiples = array_ops.concat((ones[:dim], (multiple,), ones[dim:]), 0, name='multiples')\n        return array_ops.tile(expanded, tile_multiples, name=scope)",
    "docstring": "Slice shape in 2, then tile along the sliced dimension. A new dimension is inserted in shape of before , then values are tiled times along the new dimension. Args: tensor: Input or . multiple: Integer, number of times to tile. dim: Integer, dimension along which to tile. name: Name of operation. Returns: result of expanding and tiling . Raises: ValueError: if is less than 1, or is not in .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\metrics_impl.py",
    "ast_data": "FunctionDef name:_expand_and_tile arg:tensor arg:multiple arg:dim arg:name arguments arg arg arg arg If Compare Raise Call With Call Assign Call If Call If Compare Assign Call Call Assign Assign Call Call Call Assign Call If Compare Return return:yes Return return:yes Call Compare Assign Call Compare If Compare Return return:yes Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_restride_src",
    "source_code": "def _restride_src(self):\n    shape = before_shape + replacement_shape + after_shape\n    strides = list(self.stride())\n    strides[len(before_shape):len(self.shape) - len(after_shape)] = [0] * len(replacement_shape)\n    return self.as_strided(shape, strides)",
    "docstring": "This follows restride_src in TensorAdvancedIndexing.cpp",
    "type": "function",
    "file_path": "pytorch\\torch\\_meta_registrations.py",
    "ast_data": "FunctionDef name:_restride_src arg:self arguments arg Assign Assign Call Call Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "take_grad",
    "source_code": "def take_grad(self, num_required, name=None):\n    return gen_data_flow_ops.sparse_accumulator_take_gradient(self._accumulator_ref, num_required, dtype=self._dtype, name=name)",
    "docstring": "Attempts to extract the average gradient from the accumulator. The operation blocks until sufficient number of gradients have been successfully applied to the accumulator. Once successful, the following actions are also triggered: - Counter of accumulated gradients is reset to 0. - Aggregated gradient is reset to 0 tensor. - Accumulator's internal time step is incremented by 1. Args: num_required: Number of gradients that needs to have been aggregated name: Optional name for the operation Returns: A tuple of indices, values, and shape representing the average gradient. Raises: InvalidArgumentError: If < 1",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:take_grad arg:self arg:num_required arg:name arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "aggregate",
    "source_code": "def aggregate(metric_values: list[float], metric_name: str):\n    if str.endswith(metric_name, '.max'):\n        return max(metric_values)\n    if str.endswith(metric_name, '.min'):\n        return min(metric_values)\n    if str.endswith(metric_name, '.sum'):\n        return sum(metric_values)\n    return metric_values[0]",
    "docstring": "Aggregates metric values using a function based on the metric name. If metric name does not match any of the known patterns, the first value from the input list is returned. Args: metric_values: list of metric values, floats metric_name: metric name Returns: aggregated metric value",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\xla\\backends\\gpu\\codegen\\tools\\ncu_rep_lib.py",
    "ast_data": "FunctionDef name:aggregate arg:metric_values arg:metric_name arguments arg arg If Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_pretty_env_info",
    "source_code": "def get_pretty_env_info():\n    return pretty_str(get_env_info())",
    "docstring": "Returns a pretty string of environment information. This function retrieves environment information by calling the function and then formats the information into a human-readable string. The retrieved environment information is listed in the document of . This function is used in that should be executed when reporting a bug. Returns: str: A pretty string of the environment information.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\collect_env.py",
    "ast_data": "FunctionDef name:get_pretty_env_info arguments Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "__getitem__",
    "source_code": "def __getitem__(self, index):\n    self._checkindex(index)\n    return self._point_getter(index)",
    "docstring": "Return the coordinate sequence value at the given index.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\coordseq.py",
    "ast_data": "FunctionDef name:__getitem__ arg:self arg:index arguments arg arg Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "replaced_attrs",
    "source_code": "@property\ndef replaced_attrs(self) -> tuple[torch.Tensor, ...]:\n    assert self._replaced_attrs is not None, 'Must run ReplaceGetAttrWithPlaceholder first'\n    return self._replaced_attrs",
    "docstring": "The list of replaced weight tensors.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\virtualization.py",
    "ast_data": "FunctionDef name:replaced_attrs arg:self arguments arg Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_assert_eager",
    "source_code": "def _assert_eager(self):\n    if not context.executing_eagerly():\n        raise NotImplementedError('ParallelDevice is currently not supported inside `tf.function`. It can however run calls to a `tf.function` in parallel:\\n\\nwith ParallelDevice() as p:\\n  f()')",
    "docstring": "Verifies that tracing is not active.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\parallel_device\\parallel_device.py",
    "ast_data": "FunctionDef name:_assert_eager arg:self arguments arg If Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_transform_input_tensor",
    "source_code": "def _transform_input_tensor(self, input_tensor):\n    if not isinstance(input_tensor, sparse_tensor_lib.SparseTensor):\n        raise ValueError('SparseColumn input must be a SparseTensor.')\n    fc_utils.assert_string_or_int(input_tensor.dtype, prefix='column_name: {} input_tensor'.format(self.key))\n    if self.dtype.is_integer != input_tensor.dtype.is_integer:\n        raise ValueError('Column dtype and SparseTensors dtype must be compatible. key: {}, column dtype: {}, tensor dtype: {}'.format(self.key, self.dtype, input_tensor.dtype))\n    if self.dtype == dtypes.string:\n        sparse_values = input_tensor.values\n    else:\n        sparse_values = string_ops.as_string(input_tensor.values)\n    sparse_id_values = string_ops.string_to_hash_bucket_fast(sparse_values, self.hash_bucket_size, name='lookup')\n    return sparse_tensor_lib.SparseTensor(input_tensor.indices, sparse_id_values, input_tensor.dense_shape)",
    "docstring": "Hashes the values in the feature_column.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:_transform_input_tensor arg:self arg:input_tensor arguments arg arg If Call Raise Call Call Call If Compare Raise Call Call If Compare Assign Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "union1d",
    "source_code": "@array_function_dispatch(_union1d_dispatcher)\ndef union1d(ar1, ar2):\n    return unique(np.concatenate((ar1, ar2), axis=None))",
    "docstring": "Find the union of two arrays. Return the unique, sorted array of values that are in either of the two input arrays. Parameters ---------- ar1, ar2 : array_like Input arrays. They are flattened if they are not already 1D. Returns ------- union1d : ndarray Unique, sorted union of the input arrays. Examples -------- >>> import numpy as np >>> np.union1d([-1, 0, 1], [-2, 0, 2]) array([-2, -1, 0, 1, 2]) To find the union of more than two arrays, use functools.reduce: >>> from functools import reduce >>> reduce(np.union1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2])) array([1, 2, 3, 4, 6])",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_arraysetops_impl.py",
    "ast_data": "FunctionDef name:union1d arg:ar1 arg:ar2 arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_xy_helper_corrcoef",
    "source_code": "def _xy_helper_corrcoef(x_tensor, y_tensor=None, rowvar=True):\n    if y_tensor is not None:\n        ndim_extra = 2 - x_tensor.ndim\n        if ndim_extra > 0:\n            x_tensor = x_tensor.view((1,) * ndim_extra + x_tensor.shape)\n        if not rowvar and x_tensor.shape[0] != 1:\n            x_tensor = x_tensor.mT\n        x_tensor = x_tensor.clone()\n        ndim_extra = 2 - y_tensor.ndim\n        if ndim_extra > 0:\n            y_tensor = y_tensor.view((1,) * ndim_extra + y_tensor.shape)\n        if not rowvar and y_tensor.shape[0] != 1:\n            y_tensor = y_tensor.mT\n        y_tensor = y_tensor.clone()\n        x_tensor = _concatenate((x_tensor, y_tensor), axis=0)\n    return x_tensor",
    "docstring": "Prepare inputs for cov and corrcoef.",
    "type": "function",
    "file_path": "pytorch\\torch\\_numpy\\_funcs_impl.py",
    "ast_data": "FunctionDef name:_xy_helper_corrcoef arg:x_tensor arg:y_tensor arg:rowvar arguments arg arg arg If Compare Assign If Compare Assign Call If BoolOp Compare Assign Assign Call Assign If Compare Assign Call If BoolOp Compare Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "with_dependencies",
    "source_code": "def with_dependencies(dependencies, output_tensor, name=None):\n    if context.executing_eagerly():\n        return output_tensor\n    with ops.name_scope(name, 'control_dependency', list(dependencies) + [output_tensor]) as name:\n        with ops.colocate_with(output_tensor):\n            with ops.control_dependencies(dependencies):\n                output_tensor = ops.convert_to_tensor_or_composite(output_tensor)\n                if isinstance(output_tensor, indexed_slices.IndexedSlices):\n                    return indexed_slices.IndexedSlices(_Identity(output_tensor.values, name=name), output_tensor.indices, output_tensor.dense_shape)\n                else:\n                    return _Identity(output_tensor, name=name)",
    "docstring": "Produces the content of only after . In some cases, a user may want the output of an operation to be consumed externally only after some other dependencies have run first. This function ensures returns , but only after all operations in have run. Note that this means that there is no guarantee that will be evaluated after any have run. See also and . Args: dependencies: Iterable of operations to run before this op finishes. output_tensor: A or that will be returned. name: (Optional) A name for this operation. Returns: Same as . Raises: TypeError: if is not a or .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:with_dependencies arg:dependencies arg:output_tensor arg:name arguments arg arg arg If Call Return return:yes With Call Call With Call With Call Assign Call If Call Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_modify_graph_op_device",
    "source_code": "def _modify_graph_op_device(gm: torch.fx.GraphModule, new_device: torch.device):\n    modified = False\n    for node in gm.graph.nodes:\n        if node.op == 'call_function':\n            if 'device' in node.kwargs and node.kwargs['device'] != new_device:\n                logger.debug(f'Changing device of Node {node.name} from {node.kwargs['device']} to {new_device}')\n                node.update_kwarg('device', new_device)\n                modified = True\n        elif node.op == 'call_module':\n            submod = gm.get_submodule(node.target)\n            if isinstance(submod, torch.fx.GraphModule):\n                _modify_graph_op_device(submod, new_device)\n            elif isinstance(submod, InterpreterModule):\n                _modify_graph_op_device(submod.graph_module, new_device)\n            else:\n                logger.warning(f'Skipping device modification for submodule {node.target} because it is a {type(submod)}')\n    if modified:\n        gm.recompile()",
    "docstring": "Modify the device argument of all \"call_function\" nodes in the graph. This is useful for moving the graph to a different device. In particular for generator ops, like torch.ones.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\_IR.py",
    "ast_data": "FunctionDef name:_modify_graph_op_device arg:gm arg:new_device arguments arg arg Assign For If Compare If BoolOp Compare Compare Call Call Assign If Compare Assign Call If Call Call If Call Call Call Call If Call"
  },
  {
    "library": "pandas",
    "name": "groups",
    "source_code": "@cache_readonly\ndef groups(self) -> dict[Hashable, Index]:\n    if len(self.groupings) == 1:\n        return self.groupings[0].groups\n    result_index, ids = self.result_index_and_ids\n    values = result_index._values\n    categories = Categorical(ids, categories=range(len(result_index)))\n    result = {values[group]: self.axis.take(axis_ilocs) for group, axis_ilocs in categories._reverse_indexer().items()}\n    return result",
    "docstring": "dict {group name -> group labels}",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\groupby\\ops.py",
    "ast_data": "FunctionDef name:groups arg:self arguments arg If Compare Call Return return:yes Assign Assign Assign Call Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_static_check_for_broadcastable_batch_shape",
    "source_code": "def _static_check_for_broadcastable_batch_shape(operators):\n    if len(operators) < 2:\n        return\n    batch_shape = operators[0].batch_shape\n    for op in operators[1:]:\n        batch_shape = array_ops.broadcast_static_shape(batch_shape, op.batch_shape)",
    "docstring": "ValueError if operators determined to have non-broadcastable shapes.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_addition.py",
    "ast_data": "FunctionDef name:_static_check_for_broadcastable_batch_shape arg:operators arguments arg If Compare Call Return return:no Assign For Assign Call"
  },
  {
    "library": "django",
    "name": "_add_to_cache",
    "source_code": "def _add_to_cache(self, using, ct):\n    key = (ct.app_label, ct.model)\n    self._cache.setdefault(using, {})[key] = ct\n    self._cache.setdefault(using, {})[ct.id] = ct",
    "docstring": "Insert a ContentType into the cache.",
    "type": "method",
    "file_path": "django\\django\\contrib\\contenttypes\\models.py",
    "ast_data": "FunctionDef name:_add_to_cache arg:self arg:using arg:ct arguments arg arg arg Assign Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_padding_values_or_default",
    "source_code": "def _padding_values_or_default(padding_values, input_dataset):\n\n    def make_zero(t):\n        if t.base_dtype == dtypes.string:\n            return ''\n        elif t.base_dtype == dtypes.variant:\n            raise TypeError(\"Unable to create default padding value for a component of type 'variant'.\")\n        elif t.base_dtype == dtypes.bfloat16:\n            return constant_op.constant(0, dtype=dtypes.bfloat16)\n        else:\n            return np.zeros_like(t.as_numpy_dtype())\n\n    def value_or_default(value, default):\n        return default if value is None else value\n    default_padding = nest.map_structure(make_zero, dataset_ops.get_legacy_output_types(input_dataset))\n    return nest.map_structure_up_to(padding_values, value_or_default, padding_values, default_padding)",
    "docstring": "Returns padding values with None elements replaced with default values.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\padded_batch_op.py",
    "ast_data": "FunctionDef name:_padding_values_or_default arg:padding_values arg:input_dataset arguments arg arg FunctionDef name:make_zero arg:t arguments arg If Compare Return return:yes If Compare Raise Call If Compare Return return:yes Call Return return:yes Call Call FunctionDef name:value_or_default arg:value arg:default arguments arg arg Return return:yes Compare Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_check_async_thread_error",
    "source_code": "def _check_async_thread_error(self):\n    if self._async_error:\n        e = self._async_error\n        self._async_error = None\n        logging.error('Propagating the most recent error from the async thread before joining: %s', str(e))\n        raise e",
    "docstring": "Expose the most recent error from the async saving thread to the caller.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\async_checkpoint_helper.py",
    "ast_data": "FunctionDef name:_check_async_thread_error arg:self arguments arg If Assign Assign Call Call Raise"
  },
  {
    "library": "tensorflow",
    "name": "build",
    "source_code": "@trackable.no_automatic_dependency_tracking\n@generic_utils.default\ndef build(self, input_shape):\n    if not hasattr(self.build, '_is_default'):\n        self._build_input_shape = input_shape\n    self.built = True",
    "docstring": "Creates the variables of the layer (optional, for subclass implementers). This is a method that implementers of subclasses of or can override if they need a state-creation step in-between layer instantiation and layer call. This is typically used to create the weights of subclasses. Args: input_shape: Instance of , or list of instances of if the layer expects a list of inputs (one instance per input).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py",
    "ast_data": "FunctionDef name:build arg:self arg:input_shape arguments arg arg If Call Assign Assign"
  },
  {
    "library": "scipy",
    "name": "func",
    "source_code": "def func(data, A0, A1, A2, A3, A4, A5):\n    a = data['a']\n    b = data['b']\n    x = data['x']\n    return A0 * b * np.exp(-0.5 * a) + np.exp(A1 + 1 / (1 + a) * np.log(x) - A2 * np.exp(-A3 * a) + A4 / (1 + np.exp(A5 * a)))",
    "docstring": "Compute parametric function to fit.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_precompute\\wright_bessel.py",
    "ast_data": "FunctionDef name:func arg:data arg:A0 arg:A1 arg:A2 arg:A3 arg:A4 arg:A5 arguments arg arg arg arg arg arg arg Assign Assign Assign Return return:yes Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "shape",
    "source_code": "@property\ndef shape(self):\n    return self._dense_shape_default",
    "docstring": "Get the representing the shape of the dense tensor. Returns: A object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\sparse_tensor.py",
    "ast_data": "FunctionDef name:shape arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "contains_path",
    "source_code": "def contains_path(self, path, transform=None):\n    if transform is not None:\n        transform = transform.frozen()\n    return _path.path_in_path(self, None, path, transform)",
    "docstring": "Return whether this (closed) path completely contains the given path. If *transform* is not ``, the path will be transformed before checking for containment.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\path.py",
    "ast_data": "FunctionDef name:contains_path arg:self arg:path arg:transform arguments arg arg arg If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_KroneckerProduct",
    "source_code": "def _KroneckerProduct(b1, b2):\n    b1_shape = array_ops.shape(b1)\n    b2_shape = array_ops.shape(b2)\n    b1_order = b1_shape[-1]\n    b2_order = b2_shape[-1]\n    shape_slice_size = [math_ops.subtract(array_ops.size(b1_shape), 2)]\n    shape_slice = array_ops.slice(b1_shape, [0], shape_slice_size)\n    b1_reshape_shape = array_ops.concat([shape_slice, [b1_order], [1], [b1_order], [1]], 0)\n    b2_reshape_shape = array_ops.concat([shape_slice, [1], [b2_order], [1], [b2_order]], 0)\n    b1_reshape = array_ops.reshape(b1, b1_reshape_shape)\n    b2_reshape = array_ops.reshape(b2, b2_reshape_shape)\n    order_prod = b1_order * b2_order\n    kprod_shape = array_ops.concat([shape_slice, [order_prod], [order_prod]], 0)\n    return array_ops.reshape(b1_reshape * b2_reshape, kprod_shape)",
    "docstring": "Computes the Kronecker product of two batches of square matrices.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg_grad.py",
    "ast_data": "FunctionDef name:_KroneckerProduct arg:b1 arg:b2 arguments arg arg Assign Call Assign Call Assign Assign Assign Call Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "L1Unstructured",
    "source_code": "class L1Unstructured(BasePruningMethod):\n    PRUNING_TYPE = 'unstructured'\n\n    def __init__(self, amount):\n        _validate_pruning_amount_init(amount)\n        self.amount = amount\n\n    def compute_mask(self, t, default_mask):\n        tensor_size = t.nelement()\n        nparams_toprune = _compute_nparams_toprune(self.amount, tensor_size)\n        _validate_pruning_amount(nparams_toprune, tensor_size)\n        mask = default_mask.clone(memory_format=torch.contiguous_format)\n        if nparams_toprune != 0:\n            topk = torch.topk(torch.abs(t).view(-1), k=nparams_toprune, largest=False)\n            mask.view(-1)[topk.indices] = 0\n        return mask\n\n    @classmethod\n    def apply(cls, module, name, amount, importance_scores=None):\n        return super().apply(module, name, amount=amount, importance_scores=importance_scores)",
    "docstring": "Prune (currently unpruned) units in a tensor by zeroing out the ones with the lowest L1-norm. Args: amount (int or float): quantity of parameters to prune. If ``, it represents the absolute number of parameters to prune.",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\utils\\prune.py",
    "ast_data": "ClassDef name:L1Unstructured Assign FunctionDef name:__init__ arg:self arg:amount arguments arg arg Call Assign FunctionDef name:compute_mask arg:self arg:t arg:default_mask arguments arg arg arg Assign Call Assign Call Call Assign Call If Compare Assign Call Call Call Assign Call Return return:yes FunctionDef name:apply arg:cls arg:module arg:name arg:amount arg:importance_scores arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, hooks=None, scaffold=None, master='', config=None, checkpoint_dir=None, stop_grace_period_secs=120, checkpoint_filename_with_path=None):\n    session_creator = ChiefSessionCreator(scaffold=scaffold, master=master, config=config, checkpoint_dir=checkpoint_dir, checkpoint_filename_with_path=checkpoint_filename_with_path)\n    super(SingularMonitoredSession, self).__init__(session_creator, hooks, should_recover=False, stop_grace_period_secs=stop_grace_period_secs)",
    "docstring": "Creates a SingularMonitoredSession. Args: hooks: An iterable of ScaffoldStringConfigProtoclose()` has been called. checkpoint_filename_with_path: A string. Optional path to a checkpoint file from which to restore variables.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\monitored_session.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:hooks arg:scaffold arg:master arg:config arg:checkpoint_dir arg:stop_grace_period_secs arg:checkpoint_filename_with_path arguments arg arg arg arg arg arg arg arg Assign Call Call Call"
  },
  {
    "library": "django",
    "name": "deconstruct",
    "source_code": "def deconstruct(obj):\n    if path and type(obj) is klass:\n        module_name, _, name = path.rpartition('.')\n    else:\n        module_name = obj.__module__\n        name = obj.__class__.__name__\n    module = import_module(module_name)\n    if not hasattr(module, name):\n        raise ValueError('Could not find object %s in %s.\\nPlease note that you cannot serialize things like inner classes. Please move the object into the main module body to use migrations.\\nFor more information, see https://docs.djangoproject.com/en/%s/topics/migrations/#serializing-values' % (name, module_name, get_docs_version()))\n    return (path if path and type(obj) is klass else f'{obj.__class__.__module__}.{name}', obj._constructor_args[0], obj._constructor_args[1])",
    "docstring": "Return a 3-tuple of class import path, positional arguments, and keyword arguments.",
    "type": "function",
    "file_path": "django\\django\\utils\\deconstruct.py",
    "ast_data": "FunctionDef name:deconstruct arg:obj arguments arg If BoolOp Compare Call Assign Call Assign Assign Assign Call If Call Raise Call Call Return return:yes BoolOp Compare Call"
  },
  {
    "library": "authlib",
    "name": "prepare_request_uri_query",
    "source_code": "def prepare_request_uri_query(oauth_params, uri):\n    sch, net, path, par, query, fra = urlparse.urlparse(uri)\n    query = url_encode(_append_params(oauth_params, extract_params(query) or []))\n    return urlparse.urlunparse((sch, net, path, par, query, fra))",
    "docstring": "Prepare the Request URI Query. Per _ of the spec. .. _:",
    "type": "function",
    "file_path": "authlib\\authlib\\oauth1\\rfc5849\\parameters.py",
    "ast_data": "FunctionDef name:prepare_request_uri_query arg:oauth_params arg:uri arguments arg arg Assign Call Assign Call Call BoolOp Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "size",
    "source_code": "@property\ndef size(self) -> int:\n    return self._getnnz()",
    "docstring": "Number of stored values. See also -------- count_nonzero : Number of non-zero values.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_base.py",
    "ast_data": "FunctionDef name:size arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_params",
    "source_code": "def get_params(self, deep=True):\n    return {'estimator': self.estimator}",
    "docstring": "Get parameters for this estimator. Returns a dict. The parameters of the inner estimator are not included. Parameters ---------- deep : bool, default=True Ignored. Returns ------- params : dict Parameter names mapped to their values.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\frozen\\_frozen.py",
    "ast_data": "FunctionDef name:get_params arg:self arg:deep arguments arg arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "resolve_backend",
    "source_code": "def resolve_backend(self, backend):\n    if isinstance(backend, str):\n        if not backend.startswith('module://'):\n            backend = backend.lower()\n    else:\n        from matplotlib import get_backend\n        backend = get_backend()\n    gui = self._BUILTIN_BACKEND_TO_GUI_FRAMEWORK.get(backend) or self._backend_to_gui_framework.get(backend)\n    if gui is None and isinstance(backend, str) and backend.startswith('module://'):\n        gui = 'unknown'\n    if gui is None and (not self._loaded_entry_points):\n        self._ensure_entry_points_loaded()\n        gui = self._backend_to_gui_framework.get(backend)\n    if gui == 'unknown':\n        gui = self._get_gui_framework_by_loading(backend)\n        self._backend_to_gui_framework[backend] = gui\n    if gui is None:\n        raise RuntimeError(f\"'{backend}' is not a recognised backend name\")\n    return (backend, gui if gui != 'headless' else None)",
    "docstring": "Return the backend and GUI framework for the specified backend name. If the GUI framework is not yet known then it will be determined by loading the backend module and checking the `` format. Parameters ---------- backend : str or None Name of backend, or None to use the default backend. Returns ------- backend : str The backend name. framework : str or None The GUI framework, which will be None for a backend that is non-interactive.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\registry.py",
    "ast_data": "FunctionDef name:resolve_backend arg:self arg:backend arguments arg arg If Call If Call Assign Call Assign Call Assign BoolOp Call Call If BoolOp Compare Call Call Assign If BoolOp Compare Call Assign Call If Compare Assign Call Assign If Compare Raise Call Return return:yes Compare"
  },
  {
    "library": "pytorch",
    "name": "isend",
    "source_code": "def isend(tensor: torch.Tensor, dst: Optional[int]=None, group: Optional[ProcessGroup]=None, tag: int=0, group_dst: Optional[int]=None) -> Optional[Work]:\n    group = _group_or_default_group(group)\n    group_dst = _canonicalize_group_rank(group, dst, group_dst)\n    _check_single_tensor(tensor, 'tensor')\n    if _rank_not_in_group(group):\n        _warn_not_in_group('isend')\n        return None\n    if tensor.is_complex():\n        tensor = torch.view_as_real(tensor)\n    return group.send([tensor], group_dst, tag)",
    "docstring": "Send a tensor asynchronously. .. warning:: Modifying `` Returns: A distributed request object. None, if not part of the group",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:isend arg:tensor arg:dst arg:group arg:tag arg:group_dst arguments arg arg arg arg arg Assign Call Assign Call Call If Call Call Return return:no If Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "reset",
    "source_code": "def reset(self):\n    self.head = None\n    self.errors = set()\n    self.node_index = {}\n    self.leaves = set()\n    self.active_stmts = set()\n    self.owners = {}\n    self.forward_edges = set()\n    self.finally_sections = {}\n    self.finally_section_subgraphs = {}\n    self.finally_section_has_direct_flow = {}\n    self.pending_finally_sections = set()\n    self.exits = {}\n    self.section_entry = {}\n    self.continues = {}\n    self.raises = {}\n    self.cond_entry = {}\n    self.cond_leaves = {}",
    "docstring": "Resets the state of this factory.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\cfg.py",
    "ast_data": "FunctionDef name:reset arg:self arguments arg Assign Assign Call Assign Assign Call Assign Call Assign Assign Call Assign Assign Assign Assign Call Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "scikit-learn",
    "name": "predict_proba",
    "source_code": "def predict_proba(self, raw_prediction):\n    if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1:\n        raw_prediction = raw_prediction.squeeze(1)\n    proba = np.empty((raw_prediction.shape[0], 2), dtype=raw_prediction.dtype)\n    proba[:, 1] = self.link.inverse(raw_prediction)\n    proba[:, 0] = 1 - proba[:, 1]\n    return proba",
    "docstring": "Predict probabilities. Parameters ---------- raw_prediction : array of shape (n_samples,) or (n_samples, 1) Raw prediction values (in link space). Returns ------- proba : array of shape (n_samples, 2) Element-wise class probabilities.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\_loss\\loss.py",
    "ast_data": "FunctionDef name:predict_proba arg:self arg:raw_prediction arguments arg arg If BoolOp Compare Compare Assign Call Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "variables",
    "source_code": "@contextmanager\ndef variables(*variables):\n    old_global_logic_variables = _global_logic_variables.copy()\n    _global_logic_variables.update(set(variables))\n    try:\n        yield\n    finally:\n        _global_logic_variables.clear()\n        _global_logic_variables.update(old_global_logic_variables)",
    "docstring": "Context manager for logic variables Example: >>> # xdoctest: +SKIP(\"undefined vars\") >>> from __future__ import with_statement >>> with variables(1): ... print(isvar(1)) True >>> print(isvar(1)) False >>> # Normal approach >>> from unification import unify >>> x = var(\"x\") >>> unify(x, 1) {~x: 1} >>> # Context Manager approach >>> with variables(\"x\"): ... print(unify(\"x\", 1)) {'x': 1}",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\unification\\variable.py",
    "ast_data": "FunctionDef name:variables arguments arg Assign Call Call Call Try Call Call"
  },
  {
    "library": "pytorch",
    "name": "power_draw",
    "source_code": "def power_draw(device: Optional[Union[Device, int]]=None) -> int:\n    if not torch.version.hip:\n        handle = _get_pynvml_handler(device)\n        return pynvml.nvmlDeviceGetPowerUsage(handle)\n    else:\n        return _get_amdsmi_power_draw(device)",
    "docstring": "Return the average power draw of the GPU sensor in mW (MilliWatts) over the past sample period as given by for Fermi or newer fully supported devices. Args: device (torch.device or int, optional): selected device. Returns statistic for the current device, given by :func:, if :attr: is `` (default). Warning: Each sample period may be between 1 second and 1/6 second, depending on the product being queried.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:power_draw arg:device arguments arg If Assign Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "lookup_extent",
    "source_code": "def lookup_extent(origin):\n    if origin == 'lower':\n        return (-0.5, 6.5, -0.5, 5.5)\n    else:\n        return (-0.5, 6.5, 5.5, -0.5)",
    "docstring": "Return extent for label positioning when not given explicitly.",
    "type": "function",
    "file_path": "matplotlib\\galleries\\users_explain\\artists\\imshow_extent.py",
    "ast_data": "FunctionDef name:lookup_extent arg:origin arguments arg If Compare Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_AxesStack",
    "source_code": "class _AxesStack:\n\n    def __init__(self):\n        self._axes = {}\n        self._counter = itertools.count()\n\n    def as_list(self):\n        return [*self._axes]\n\n    def remove(self, a):\n        self._axes.pop(a)\n\n    def bubble(self, a):\n        if a not in self._axes:\n            raise ValueError('Axes has not been added yet')\n        self._axes[a] = next(self._counter)\n\n    def add(self, a):\n        if a not in self._axes:\n            self._axes[a] = next(self._counter)\n\n    def current(self):\n        return max(self._axes, key=self._axes.__getitem__, default=None)\n\n    def __getstate__(self):\n        return {**vars(self), '_counter': max(self._axes.values(), default=0)}\n\n    def __setstate__(self, state):\n        next_counter = state.pop('_counter')\n        vars(self).update(state)\n        self._counter = itertools.count(next_counter)",
    "docstring": "Helper class to track Axes in a figure. Axes are tracked both in the order in which they have been added (`` dict).",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "ClassDef name:_AxesStack FunctionDef name:__init__ arg:self arguments arg Assign Assign Call FunctionDef name:as_list arg:self arguments arg Return return:yes FunctionDef name:remove arg:self arg:a arguments arg arg Call FunctionDef name:bubble arg:self arg:a arguments arg arg If Compare Raise Call Assign Call FunctionDef name:add arg:self arg:a arguments arg arg If Compare Assign Call FunctionDef name:current arg:self arguments arg Return return:yes Call FunctionDef name:__getstate__ arg:self arguments arg Return return:yes Call Call Call FunctionDef name:__setstate__ arg:self arg:state arguments arg arg Assign Call Call Call Assign Call"
  },
  {
    "library": "kornia",
    "name": "get_submodule",
    "source_code": "def get_submodule(self, target: str) -> Module:\n    if len(target) == 0:\n        return self\n    atoms: List[str] = target.split('.')\n    mod = self\n    for item in atoms:\n        if not hasattr(mod, item):\n            raise AttributeError(mod._get_name() + ' has no attribute `' + item + '`')\n        mod = getattr(mod, item)\n        if not isinstance(mod, Module):\n            raise AttributeError('`' + item + '` is not an Module')\n    return mod",
    "docstring": "Get submodule. This code is taken from torch 1.9.0 since it is not introduced back to torch 1.7.1. We included this for maintaining more backward torch versions. Args: target: The fully-qualified string name of the submodule to look for. (See above example for how to specify a fully-qualified string.) Returns: Module: The submodule referenced by ``",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\container\\base.py",
    "ast_data": "FunctionDef name:get_submodule arg:self arg:target arguments arg arg If Compare Call Return return:yes Call Assign For If Call Raise Call Call Assign Call If Call Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "DynamoFlattenOutputStep",
    "source_code": "class DynamoFlattenOutputStep(io_adapter.FlattenOutputStep):\n\n    def __init__(self, pytree_extension_context: _PyTreeExtensionContext | None=None):\n        super().__init__()\n        self._pytree_extension_context = pytree_extension_context or _PyTreeExtensionContext()\n\n    def apply(self, model_outputs: Any, model: torch.nn.Module | Callable | torch_export.ExportedProgram | None=None) -> Sequence[Any]:\n        with self._pytree_extension_context:\n            return super().apply(model_outputs, model=model)",
    "docstring": "Flatten nested collection and custom python types and return a flat list of elements. Extended from :class: to support flattening arbitrary types via pytree extension. By default this supports many common user defined python types such as :class: from HuggingFace transformers. The pytree extension can be customized by passing in a `_PyTreeExtensionContext.register_pytree_node`.",
    "type": "class",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\dynamo_graph_extractor.py",
    "ast_data": "ClassDef name:DynamoFlattenOutputStep FunctionDef name:__init__ arg:self arg:pytree_extension_context arguments arg arg Call Call Assign BoolOp Call FunctionDef name:apply arg:self arg:model_outputs arg:model arguments arg arg arg With Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "construct_array_type",
    "source_code": "@classmethod\ndef construct_array_type(cls) -> type_t[ArrowExtensionArray]:\n    from pandas.core.arrays.arrow import ArrowExtensionArray\n    return ArrowExtensionArray",
    "docstring": "Return the array type associated with this dtype. Returns ------- type",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py",
    "ast_data": "FunctionDef name:construct_array_type arg:cls arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "training_arg_passed_to_call",
    "source_code": "def training_arg_passed_to_call(argspec, args, kwargs):\n    full_args = dict(zip(argspec.args[2:], args))\n    full_args.update(kwargs)\n    return 'training' in full_args and full_args['training'] is not None",
    "docstring": "Returns whether a user passed the argument in .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_utils.py",
    "ast_data": "FunctionDef name:training_arg_passed_to_call arg:argspec arg:args arg:kwargs arguments arg arg arg Assign Call Call Call Return return:yes BoolOp Compare Compare"
  },
  {
    "library": "pytorch",
    "name": "can_codegen_without_upcasts",
    "source_code": "def can_codegen_without_upcasts(prologue: 'SchedulerNode', disallow_fp32_ops: bool=False) -> bool:\n    if prologue.get_operation_names() <= V.graph.low_precision_codegen_ops:\n        return True\n    low_prec_analysis = RecordLowPrecisionOps(disallow_fp32_ops)\n    with config.patch('triton.codegen_upcast_to_fp32', False), V.set_ops_handler(low_prec_analysis):\n        prologue._body(*prologue.get_ranges())\n    return not low_prec_analysis.low_precision_numeric_op",
    "docstring": "Can this prologue be run without while preserving numerics. This is only true if the node only contains dtype conversions, indexing, and other non-arithmetic operators. If disallow_fp32_ops is True, then we also disallow ops that are explicitly computed in fp32 or fp64.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\analyze_preserves_zero_mask.py",
    "ast_data": "FunctionDef name:can_codegen_without_upcasts arg:prologue arg:disallow_fp32_ops arguments arg arg If Compare Call Return return:yes Assign Call With Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "moments_v2",
    "source_code": "@tf_export('nn.moments', v1=[])\n@dispatch.add_dispatch_support\ndef moments_v2(x, axes, shift=None, keepdims=False, name=None):\n    return moments(x=x, axes=axes, shift=shift, name=name, keep_dims=keepdims)",
    "docstring": "Calculates the mean and variance of . The mean and variance are calculated by aggregating the contents of across . If is 1-D and this is just the mean and variance of a vector. Note: shift is currently not used; the true mean is computed and used. When using these moments for batch normalization (see ): * for so-called \"global normalization\", used with convolutional filters with shape , pass . * for simple batch normalization pass (batch only). Args: x: A . axes: Array of ints. Axes along which to compute mean and variance. shift: Not used in the current implementation. keepdims: produce moments with the same dimensionality as the input. name: Name used to scope the operations that compute the moments. Returns: Two objects: and .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_impl.py",
    "ast_data": "FunctionDef name:moments_v2 arg:x arg:axes arg:shift arg:keepdims arg:name arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "check_libs2",
    "source_code": "def check_libs2(self, lib_dirs, libs, opt_libs=[]):\n    exts = self.library_extensions()\n    info = self._check_libs(lib_dirs, libs, opt_libs, exts)\n    if not info:\n        log.info('  libraries %s not found in %s', ','.join(libs), lib_dirs)\n    return info",
    "docstring": "If static or shared libraries are available then return their info dictionary. Checks each library for shared or static.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\system_info.py",
    "ast_data": "FunctionDef name:check_libs2 arg:self arg:lib_dirs arg:libs arg:opt_libs arguments arg arg arg arg Assign Call Assign Call If Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "listen_for_disconnect",
    "source_code": "async def listen_for_disconnect(self, receive):\n    message = await receive()\n    if message['type'] == 'http.disconnect':\n        raise RequestAborted()\n    assert False, 'Invalid ASGI message after request body: %s' % message['type']",
    "docstring": "Listen for disconnect from the client.",
    "type": "method",
    "file_path": "django\\django\\core\\handlers\\asgi.py",
    "ast_data": "AsyncFunctionDef name:listen_for_disconnect arg:self arg:receive arguments arg arg Assign Call If Compare Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "convert_saved_model",
    "source_code": "@tf_export('mlir.experimental.convert_saved_model')\ndef convert_saved_model(saved_model_path, exported_names, show_debug_info=False):\n    return pywrap_mlir.experimental_convert_saved_model_to_mlir(saved_model_path, exported_names, show_debug_info)",
    "docstring": "Converts a SavedModel to MLIR module. Args: saved_model_path: Path to SavedModel. exported_names: Names to export. show_debug_info: Whether to include locations in the emitted textual form. Returns: A textual representation of the MLIR module corresponding to the SavedModel.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\mlir\\mlir.py",
    "ast_data": "FunctionDef name:convert_saved_model arg:saved_model_path arg:exported_names arg:show_debug_info arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, axis=None, use_rmin=True, *, apply_theta_transforms=True, scale_transform=None):\n    super().__init__()\n    self._axis = axis\n    self._use_rmin = use_rmin\n    self._apply_theta_transforms = apply_theta_transforms\n    self._scale_transform = scale_transform\n    if apply_theta_transforms:\n        _apply_theta_transforms_warn()",
    "docstring": "Parameters ---------- axis : , optional Axis associated with this transform. This is used to get the minimum radial limit. use_rmin : , optional If ``, subtract the minimum radial axis limit before transforming to Cartesian coordinates. *axis* must also be specified for this to take effect.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\polar.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:axis arg:use_rmin arguments arg arg arg arg arg Call Call Assign Assign Assign Assign If Call"
  },
  {
    "library": "pytorch",
    "name": "ones",
    "source_code": "def ones(sharding_spec: ShardingSpec, *size, dtype=None, layout=torch.strided, requires_grad=False, pin_memory=False, memory_format=torch.contiguous_format, process_group=None, init_rrefs=False) -> ShardedTensor:\n    return full(sharding_spec, size, fill_value=1, dtype=dtype, layout=layout, requires_grad=requires_grad, pin_memory=pin_memory, memory_format=memory_format, process_group=process_group, init_rrefs=init_rrefs)",
    "docstring": "Returns a :class: with the scalar value 1. Needs to be called on all ranks in an SPMD fashion. Args: sharding_spec (:class:): The specification describing how to shard the Tensor. size (int...): a sequence of integers defining the shape of the output tensor. Can be a variable number of arguments or a collection like a list or tuple. Keyword args: dtype (:class:, optional): the desired data type of returned tensor. Default: if `torch.set_default_dtypetorch.layouttorch.distributed.rpc.RRefShardedTensor` object on each rank",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\__init__.py",
    "ast_data": "FunctionDef name:ones arg:sharding_spec arguments arg arg arg arg arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_trifinder",
    "source_code": "def get_trifinder(self):\n    if self._trifinder is None:\n        from matplotlib.tri._trifinder import TrapezoidMapTriFinder\n        self._trifinder = TrapezoidMapTriFinder(self)\n    return self._trifinder",
    "docstring": "Return the default of this triangulation, creating it if necessary. This allows the same TriFinder object to be easily shared.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triangulation.py",
    "ast_data": "FunctionDef name:get_trifinder arg:self arguments arg If Compare Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "SignalException",
    "source_code": "class SignalException(Exception):\n\n    def __init__(self, msg: str, sigval: signal.Signals) -> None:\n        super().__init__(msg)\n        self.sigval = sigval",
    "docstring": "Exception is raised inside the torchelastic agent process by the termination handler if the death signal got received by the process.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\elastic\\multiprocessing\\api.py",
    "ast_data": "ClassDef name:SignalException FunctionDef name:__init__ arg:self arg:msg arg:sigval arguments arg arg arg Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "InplaceFunction",
    "source_code": "class InplaceFunction(Function):\n\n    def __init__(self, inplace=False):\n        super().__init__()\n        self.inplace = inplace",
    "docstring": "This class is here only for backward compatibility reasons. Use :class: instead of this for any new use case.",
    "type": "class",
    "file_path": "pytorch\\torch\\autograd\\function.py",
    "ast_data": "ClassDef name:InplaceFunction FunctionDef name:__init__ arg:self arg:inplace arguments arg arg Call Call Assign"
  },
  {
    "library": "django",
    "name": "voidptr_output",
    "source_code": "def voidptr_output(func, argtypes, errcheck=True):\n    func.argtypes = argtypes\n    func.restype = c_void_p\n    if errcheck:\n        func.errcheck = check_pointer\n    return func",
    "docstring": "For functions that return c_void_p.",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\gdal\\prototypes\\generation.py",
    "ast_data": "FunctionDef name:voidptr_output arg:func arg:argtypes arg:errcheck arguments arg arg arg Assign Assign If Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_all_dimensions",
    "source_code": "def _all_dimensions(x):\n    if isinstance(x, tensor_lib.Tensor) and x.get_shape().ndims is not None:\n        return constant_op.constant(np.arange(x.get_shape().ndims), dtype=dtypes.int32)\n    if isinstance(x, sparse_tensor.SparseTensor) and x.dense_shape.get_shape().is_fully_defined():\n        r = x.dense_shape.get_shape().dims[0].value\n        return constant_op.constant(np.arange(r), dtype=dtypes.int32)\n    return gen_math_ops._range(0, rank(x), 1)",
    "docstring": "Returns a 1D-tensor listing all dimensions in x.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:_all_dimensions arg:x arguments arg If BoolOp Call Compare Call Return return:yes Call Call Call If BoolOp Call Call Call Assign Call Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "inference_fn",
    "source_code": "@property\n@abc.abstractmethod\ndef inference_fn(self) -> AtomicFunction:\n    pass",
    "docstring": "Returns the original owned by this ConcreteFunction.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\types\\core.py",
    "ast_data": "FunctionDef name:inference_fn arg:self arguments arg"
  },
  {
    "library": "scrapy",
    "name": "list",
    "source_code": "def list(self) -> list[str]:\n    return list(self._spiders.keys())",
    "docstring": "Return a list with the names of all spiders available in the project.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\spiderloader.py",
    "ast_data": "FunctionDef name:list arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "clear",
    "source_code": "def clear(self):\n    self._primary.clear()\n    self._dispatch_dict.clear()",
    "docstring": "Removes all functions from the cache.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_cache.py",
    "ast_data": "FunctionDef name:clear arg:self arguments arg Call Call"
  },
  {
    "library": "pandas",
    "name": "interleaved_dtype",
    "source_code": "def interleaved_dtype(dtypes: list[DtypeObj]) -> DtypeObj | None:\n    if not len(dtypes):\n        return None\n    return find_common_type(dtypes)",
    "docstring": "Find the common dtype for . Parameters ---------- blocks : List[DtypeObj] Returns ------- dtype : np.dtype, ExtensionDtype, or None None is returned when is empty.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:interleaved_dtype arg:dtypes arguments arg If Call Return return:no Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "default_dtypes",
    "source_code": "def default_dtypes(self, *, device=None):\n    if device not in ['cpu', None]:\n        raise ValueError(f'Device not understood. Only \"cpu\" is allowed, but received: {device}')\n    return {'real floating': dtype(float64), 'complex floating': dtype(complex128), 'integral': dtype(intp), 'indexing': dtype(intp)}",
    "docstring": "The default data types used for new NumPy arrays. For NumPy, this always returns the following dictionary: - **\"real floating\"**: `` is allowed. Returns ------- dtypes : dict A dictionary describing the default data types used for new NumPy arrays. See Also -------- __array_namespace_info__.capabilities, __array_namespace_info__.default_device, __array_namespace_info__.dtypes, __array_namespace_info__.devices Examples -------- >>> info = np.__array_namespace_info__() >>> info.default_dtypes() {'real floating': numpy.float64, 'complex floating': numpy.complex128, 'integral': numpy.int64, 'indexing': numpy.int64}",
    "type": "method",
    "file_path": "numpy\\numpy\\_array_api_info.py",
    "ast_data": "FunctionDef name:default_dtypes arg:self arguments arg arg If Compare Raise Call Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_build_ragged_tensor_from_value_ranges",
    "source_code": "def _build_ragged_tensor_from_value_ranges(starts, limits, step, values):\n    if step is None:\n        step = 1\n    step = ops.convert_to_tensor(step, name='step')\n    if step.dtype.is_integer:\n        step = math_ops.cast(step, starts.dtype)\n    else:\n        raise TypeError('slice strides must be integers or None')\n    value_indices = ragged_math_ops.range(starts, limits, step, row_splits_dtype=starts.dtype)\n    if isinstance(values, ragged_tensor.RaggedTensor):\n        gathered_values = ragged_gather_ops.gather(params=values, indices=value_indices.values)\n    else:\n        gathered_values = array_ops.gather(params=values, indices=value_indices.values)\n    return value_indices.with_values(gathered_values)",
    "docstring": "Returns a containing the specified sequences of values. Returns a RaggedTensor where: Requires that and . Args: starts: 1D integer Tensor specifying the start indices for the sequences of values to include. limits: 1D integer Tensor specifying the limit indices for the sequences of values to include. step: Integer value specifying the step size for strided slices. values: The set of values to select from. Returns: A . Raises: ValueError: Until the prerequisite ops are checked in.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_getitem.py",
    "ast_data": "FunctionDef name:_build_ragged_tensor_from_value_ranges arg:starts arg:limits arg:step arg:values arguments arg arg arg arg If Compare Assign Assign Call If Assign Call Raise Call Assign Call If Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "should_store",
    "source_code": "@final\ndef should_store(self, value: ArrayLike) -> bool:\n    return value.dtype == self.dtype",
    "docstring": "Should we set self.values[indexer] = value inplace or do we need to cast? Parameters ---------- value : np.ndarray or ExtensionArray Returns ------- bool",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\blocks.py",
    "ast_data": "FunctionDef name:should_store arg:self arg:value arguments arg arg Return return:yes Compare"
  },
  {
    "library": "matplotlib",
    "name": "is_opentype_cff_font",
    "source_code": "@lru_cache\ndef is_opentype_cff_font(filename):\n    if os.path.splitext(filename)[1].lower() == '.otf':\n        with open(filename, 'rb') as fd:\n            return fd.read(4) == b'OTTO'\n    else:\n        return False",
    "docstring": "Return whether the given font is a Postscript Compact Font Format Font embedded in an OpenType wrapper. Used by the PostScript and PDF backends that cannot subset these fonts.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\font_manager.py",
    "ast_data": "FunctionDef name:is_opentype_cff_font arg:filename arguments arg If Compare Call Call With Call Return return:yes Compare Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_batch_p2p",
    "source_code": "def _batch_p2p(p2p_ops: list[dist.P2POp], desc: Optional[str]=None) -> list[dist.Work]:\n    if len(p2p_ops) == 0:\n        return []\n    desc_str = f'{desc}, ' if desc else ''\n    logger.debug('batch_p2p %s%s', desc_str, p2p_ops)\n    return dist.batch_isend_irecv(p2p_ops)",
    "docstring": "Simple wrapper over batch_isend_irecv from torch.distributed, which just adds a descriptive logger on top.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\schedules.py",
    "ast_data": "FunctionDef name:_batch_p2p arg:p2p_ops arg:desc arguments arg arg If Compare Call Return return:no Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "Wolfe",
    "source_code": "class Wolfe(Benchmark):\n\n    def __init__(self, dimensions=3):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([0.0] * self.N, [2.0] * self.N))\n        self.global_optimum = [[0.0 for _ in range(self.N)]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return 4 / 3 * (x[0] ** 2 + x[1] ** 2 - x[0] * x[1]) ** 0.75 + x[2]",
    "docstring": "Wolfe objective function. This class defines the Wolfe [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Wolfe}}(x) = \\frac{4}{3}(x_1^2 + x_2^2 - x_1x_2)^{0.75} + x_3 with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_W.py",
    "ast_data": "ClassDef name:Wolfe FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "TestTubeHolder",
    "source_code": "class TestTubeHolder(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n        self.global_optimum = [[-pi / 2, 0.0]]\n        self.fglob = -10.872299901558\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        u = sin(x[0]) * cos(x[1])\n        v = (x[0] ** 2 + x[1] ** 2) / 200\n        return -4 * abs(u * exp(abs(cos(v))))",
    "docstring": "TestTubeHolder objective function. This class defines the TestTubeHolder [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{TestTubeHolder}}(x) = - 4 \\left | {e^{\\left|{\\cos \\left(\\frac{1}{200} x_{1}^{2} + \\frac{1}{200} x_{2}^{2}\\right)} \\right|}\\sin\\left(x_{1}\\right) \\cos\\left(x_{2}\\right)}\\right| with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Mishra, S. Global Optimization by Differential Evolution and Particle Swarm Methods: Evaluation on Some Benchmark Functions. Munich Personal RePEc Archive, 2006, 1005 TODO Jamil#148 has got incorrect equation, missing an abs around the square brackets",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_T.py",
    "ast_data": "ClassDef name:TestTubeHolder FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Call Assign Return return:yes Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_na_value",
    "source_code": "@cache_readonly\ndef _na_value(self):\n    dtype = self.dtype\n    if isinstance(dtype, np.dtype):\n        if dtype.kind in 'mM':\n            return NaT\n        return np.nan\n    return dtype.na_value",
    "docstring": "The expected NA value to use with this index.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_na_value arg:self arguments arg Assign If Call If Compare Return return:yes Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "state_size",
    "source_code": "@property\ndef state_size(self):\n    raise NotImplementedError('Abstract method')",
    "docstring": "size(s) of state(s) used by this cell. It can be represented by an Integer, a TensorShape or a tuple of Integers or TensorShapes.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\legacy_rnn\\rnn_cell_impl.py",
    "ast_data": "FunctionDef name:state_size arg:self arguments arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, maximum_iterations=None, parallel_iterations=10, back_prop=True, swap_memory=False, name='while_context', grad_state=None, context_def=None, import_scope=None):\n    if context_def:\n        self._init_from_proto(context_def, import_scope=import_scope)\n    else:\n        ControlFlowContext.__init__(self)\n        self._init_from_args(maximum_iterations, parallel_iterations, back_prop, swap_memory, name)\n    self._grad_state = grad_state",
    "docstring": "\"Creates a . Args: maximum_iterations: Optional upper bound on number of loop iterations. parallel_iterations: The number of iterations allowed to run in parallel. back_prop: Whether backprop is enabled for this while loop. swap_memory: Whether GPU-CPU memory swap is enabled for this loop. name: Optional name prefix for the returned tensors. grad_state: The gradient loop state. context_def: Optional protocol buffer to initialize the python object from. import_scope: Optional . Name scope to add. Only used when initialing from protocol buffer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:maximum_iterations arg:parallel_iterations arg:back_prop arg:swap_memory arg:name arg:grad_state arg:context_def arg:import_scope arguments arg arg arg arg arg arg arg arg arg If Call Call Call Assign"
  },
  {
    "library": "pandas",
    "name": "_get_codes_for_values",
    "source_code": "def _get_codes_for_values(values: Index | Series | ExtensionArray | np.ndarray, categories: Index) -> np.ndarray:\n    codes = categories.get_indexer_for(values)\n    return coerce_indexer_dtype(codes, categories)",
    "docstring": "utility routine to turn values into codes given the specified categories If is known to be a Categorical, use recode_for_categories instead.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\arrays\\categorical.py",
    "ast_data": "FunctionDef name:_get_codes_for_values arg:values arg:categories arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_path_from_parent",
    "source_code": "def get_path_from_parent(self, parent):\n    if self.model is parent:\n        return []\n    model = self.concrete_model\n    chain = model._meta.get_base_chain(parent)\n    chain.reverse()\n    chain.append(model)\n    path = []\n    for i, ancestor in enumerate(chain[:-1]):\n        child = chain[i + 1]\n        link = child._meta.get_ancestor_link(ancestor)\n        path.extend(link.reverse_path_infos)\n    return path",
    "docstring": "Return a list of PathInfos containing the path from the parent model to the current model, or an empty list if parent is not a parent of the current model.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\options.py",
    "ast_data": "FunctionDef name:get_path_from_parent arg:self arg:parent arguments arg arg If Compare Return return:no Assign Assign Call Call Call Assign For Call Assign Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_embedding_feature_proto_to_string",
    "source_code": "@classmethod\ndef _embedding_feature_proto_to_string(cls, embedding_feature_proto):\n    embedding_feature_proto_to_string_map = {topology_pb2.TPUHardwareFeature.EmbeddingFeature.UNSUPPORTED: HardwareFeature.EmbeddingFeature.UNSUPPORTED, topology_pb2.TPUHardwareFeature.EmbeddingFeature.V1: HardwareFeature.EmbeddingFeature.V1, topology_pb2.TPUHardwareFeature.EmbeddingFeature.V2: HardwareFeature.EmbeddingFeature.V2}\n    return embedding_feature_proto_to_string_map.get(embedding_feature_proto, HardwareFeature.EmbeddingFeature.UNSUPPORTED)",
    "docstring": "Convert the embedding feature proto to enum string.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_hardware_feature.py",
    "ast_data": "FunctionDef name:_embedding_feature_proto_to_string arg:cls arg:embedding_feature_proto arguments arg arg Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_safe_rail_checks",
    "source_code": "@staticmethod\ndef _safe_rail_checks(args):\n    features, feature_dim = (args['features'], args['feature_dim'])\n    if features is not None:\n        assert feature_dim is not None, 'need feature dim to select features'\n    fn_keys = ['aggregate_fn', 'reduce_fn', 'mask_fn']\n    for key in fn_keys:\n        fn = args[key]\n        assert callable(fn), 'function should be callable'",
    "docstring": "Makes sure that some of the functions and attributes are not passed incorrectly",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\activation_sparsifier\\activation_sparsifier.py",
    "ast_data": "FunctionDef name:_safe_rail_checks arg:args arguments arg Assign If Compare Compare Assign For Assign Call"
  },
  {
    "library": "seaborn",
    "name": "plot_joint",
    "source_code": "def plot_joint(self, func, **kwargs):\n    kwargs = kwargs.copy()\n    if str(func.__module__).startswith('seaborn'):\n        kwargs['ax'] = self.ax_joint\n    else:\n        plt.sca(self.ax_joint)\n    if self.hue is not None:\n        kwargs['hue'] = self.hue\n        self._inject_kwargs(func, kwargs, self._hue_params)\n    if str(func.__module__).startswith('seaborn'):\n        func(x=self.x, y=self.y, **kwargs)\n    else:\n        func(self.x, self.y, **kwargs)\n    return self",
    "docstring": "Draw a bivariate plot on the joint axes of the grid. Parameters ---------- func : plotting callable If a seaborn function, it should accept `JointGrid` for easy method chaining.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\axisgrid.py",
    "ast_data": "FunctionDef name:plot_joint arg:self arg:func arguments arg arg arg Assign Call If Call Call Assign Call If Compare Assign Call If Call Call Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "load_checkpoint",
    "source_code": "def load_checkpoint(self, checkpoint: str, device: Optional[torch.device]=None) -> None:\n    if os.path.isfile(checkpoint):\n        with open(checkpoint, 'rb') as f:\n            state_dict = torch.load(f, map_location=device)\n    else:\n        state_dict = torch.hub.load_state_dict_from_url(checkpoint, map_location=device)\n    self.load_state_dict(state_dict)",
    "docstring": "Load checkpoint from a given url or file. Args: checkpoint: The url or filepath for the respective checkpoint device: The desired device to load the weights and move the model",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\models\\base.py",
    "ast_data": "FunctionDef name:load_checkpoint arg:self arg:checkpoint arg:device arguments arg arg arg If Call With Call Assign Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "sum_over_all_but_batch_and_last_n",
    "source_code": "def sum_over_all_but_batch_and_last_n(tensor: torch.Tensor, n_dims: int) -> torch.Tensor:\n    if tensor.dim() == n_dims + 1:\n        return tensor\n    else:\n        dims = list(range(1, tensor.dim() - n_dims))\n        return tensor.sum(dim=dims)",
    "docstring": "Calculate the sum over all dimensions, except the first (batch dimension), and excluding the last n_dims. This function will ignore the first dimension and it will not aggregate over the last n_dims dimensions. Args: tensor: An input tensor of shape ``",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\utils\\_expanded_weights\\expanded_weights_utils.py",
    "ast_data": "FunctionDef name:sum_over_all_but_batch_and_last_n arg:tensor arg:n_dims arguments arg arg If Compare Call Return return:yes Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_familyname",
    "source_code": "def get_familyname(self):\n    name = self._header.get(b'FamilyName')\n    if name is not None:\n        return name\n    name = self.get_fullname()\n    extras = '(?i)([ -](regular|plain|italic|oblique|bold|semibold|light|ultralight|extra|condensed))+$'\n    return re.sub(extras, '', name)",
    "docstring": "Return the font family name, e.g., 'Times'.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_afm.py",
    "ast_data": "FunctionDef name:get_familyname arg:self arguments arg Assign Call If Compare Return return:yes Assign Call Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "InvokeQuant",
    "source_code": "@dataclasses.dataclass(frozen=True, repr=True)\nclass InvokeQuant:\n    codegen_low_precision: bool = True\n\n    def __call__(self, *args, scheme: Optional[str]=None, **kwargs):\n        if not torch.compiler.is_compiling():\n            return args[0](*args[1:], **kwargs)\n        if scheme is not None:\n            kwargs['scheme'] = scheme\n        return invoke_quant_packed(*args, **kwargs, quant_options=self)",
    "docstring": "Invoke a quantization function that will be preserved as a single operator. Preservation as a single operator aids in pattern matching and custom lowerings. The operation appears as: torch.ops.higher_order.invoke_quant(subgraph, *args, scheme=scheme) Args: codegen_low_precision: Use observed subgraph dtypes for codegen instead of upcasting to fp32. Can improve performance for prologue fusion but requires careful testing of numerics.",
    "type": "class",
    "file_path": "pytorch\\torch\\_higher_order_ops\\_invoke_quant.py",
    "ast_data": "ClassDef name:InvokeQuant FunctionDef name:__call__ arg:self arguments arg arg arg arg If Call Return return:yes Call If Compare Assign Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X):\n    check_is_fitted(self)\n    return self._predict(X)",
    "docstring": "Return predictions for X for each estimator. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Returns ------- predictions : ndarray of shape (n_samples, n_classifiers) Values predicted by each regressor.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_voting.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "fuse_linear_bn",
    "source_code": "def fuse_linear_bn(is_qat, linear, bn):\n    assert linear.training == bn.training, 'Linear and BN both must be in the same mode (train or eval).'\n    if is_qat:\n        assert bn.num_features == linear.out_features, 'Output features of Linear must match num_features of BatchNorm1d'\n        assert bn.affine, 'Only support fusing BatchNorm1d with affine set to True'\n        assert bn.track_running_stats, 'Only support fusing BatchNorm1d with tracking_running_stats set to True'\n        return nni.LinearBn1d(linear, bn)\n    else:\n        return nn.utils.fusion.fuse_linear_bn_eval(linear, bn)",
    "docstring": "Return the fused linear and bn modules. Given the linear and bn modules, fuses them and returns the fused module Args: is_qat: a flag for whether we are using quantization aware training fusion or post training quantization fusion linear: Module instance of type Linear bn: BatchNorm1d instance that needs to be fused with the linear layer Examples:: >>> m1 = nn.Linear(20, 10) >>> b1 = nn.BatchNorm1d(10) >>> # xdoctest: +SKIP >>> m2 = fuse_linear_bn(m1, b1)",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fuser_method_mappings.py",
    "ast_data": "FunctionDef name:fuse_linear_bn arg:is_qat arg:linear arg:bn arguments arg arg arg Compare If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_extent",
    "source_code": "def set_extent(self, extent, **kwargs):\n    (xmin, xmax), (ymin, ymax) = self.axes._process_unit_info([('x', [extent[0], extent[1]]), ('y', [extent[2], extent[3]])], kwargs)\n    if kwargs:\n        raise _api.kwarg_error('set_extent', kwargs)\n    xmin = self.axes._validate_converted_limits(xmin, self.convert_xunits)\n    xmax = self.axes._validate_converted_limits(xmax, self.convert_xunits)\n    ymin = self.axes._validate_converted_limits(ymin, self.convert_yunits)\n    ymax = self.axes._validate_converted_limits(ymax, self.convert_yunits)\n    extent = [xmin, xmax, ymin, ymax]\n    self._extent = extent\n    corners = ((xmin, ymin), (xmax, ymax))\n    self.axes.update_datalim(corners)\n    self.sticky_edges.x[:] = [xmin, xmax]\n    self.sticky_edges.y[:] = [ymin, ymax]\n    if self.axes.get_autoscalex_on():\n        self.axes.set_xlim((xmin, xmax), auto=None)\n    if self.axes.get_autoscaley_on():\n        self.axes.set_ylim((ymin, ymax), auto=None)\n    self.stale = True",
    "docstring": "Set the image extent. Parameters ---------- extent : 4-tuple of float The position and size of the image as tuple `.Axes.dataLim.Axes.viewLim~.Axes.dataLim.Axes.autoscale_view~.Axes.dataLim`.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\image.py",
    "ast_data": "FunctionDef name:set_extent arg:self arg:extent arguments arg arg arg Assign Call If Raise Call Assign Call Assign Call Assign Call Assign Call Assign Assign Assign Call Assign Assign If Call Call If Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "dtypes",
    "source_code": "@property\ndef dtypes(self):\n    return self._dtypes",
    "docstring": "The list of dtypes for each component of a queue element.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:dtypes arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "ndim",
    "source_code": "@property\ndef ndim(self) -> int:\n    return 1",
    "docstring": "Extension Arrays are only allowed to be 1-dimensional. See Also -------- ExtensionArray.shape: Return a tuple of the array dimensions. ExtensionArray.size: The number of elements in the array. Examples -------- >>> arr = pd.array([1, 2, 3]) >>> arr.ndim 1",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\base.py",
    "ast_data": "FunctionDef name:ndim arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "TracingConfig",
    "source_code": "@dataclass\nclass TracingConfig:\n    tracer: torch.fx.Tracer = field(default_factory=torch.fx.Tracer)\n    concrete_args: Optional[dict[str, Any]] = None",
    "docstring": "This represents a symbolic tracing configuration. Args: tracer (torch.fx.Tracer): An instance of :class: to use for symbolic tracing. The default value is the native :class: constructed with default arguments. However, the user may want to pass a different value such as the `~torch.fx.Tracer.trace`.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_trace_utils.py",
    "ast_data": "ClassDef name:TracingConfig Call"
  },
  {
    "library": "pandas",
    "name": "_new_Index",
    "source_code": "def _new_Index(cls, d):\n    if issubclass(cls, ABCPeriodIndex):\n        from pandas.core.indexes.period import _new_PeriodIndex\n        return _new_PeriodIndex(cls, **d)\n    if issubclass(cls, ABCMultiIndex):\n        if 'labels' in d and 'codes' not in d:\n            d['codes'] = d.pop('labels')\n        d['verify_integrity'] = False\n    elif 'dtype' not in d and 'data' in d:\n        d['dtype'] = d['data'].dtype\n    return cls.__new__(cls, **d)",
    "docstring": "This is called upon unpickling, rather than the default which doesn't have arguments and breaks __new__.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_new_Index arg:cls arg:d arguments arg arg If Call Return return:yes Call If Call If BoolOp Compare Compare Assign Call Assign If BoolOp Compare Compare Assign Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "NumExprClobberingError",
    "source_code": "class NumExprClobberingError(NameError):\n    pass",
    "docstring": "Exception raised when trying to use a built-in numexpr name as a variable name. `` will throw the error if the engine is set to 'numexpr'. 'numexpr' is the default engine value for these methods if the numexpr package is installed. See Also -------- eval : Evaluate a Python expression as a string using various backends. DataFrame.query : Query the columns of a DataFrame with a boolean expression. Examples -------- >>> df = pd.DataFrame({\"abs\": [1, 1, 1]}) >>> df.query(\"abs > 2\") # doctest: +SKIP ... # NumExprClobberingError: Variables in expression \"(abs) > (2)\" overlap... >>> sin, a = 1, 2 >>> pd.eval(\"sin + a\", engine=\"numexpr\") # doctest: +SKIP ... # NumExprClobberingError: Variables in expression \"(sin) + (a)\" overlap...",
    "type": "class",
    "file_path": "pandas\\pandas\\errors\\__init__.py",
    "ast_data": "ClassDef name:NumExprClobberingError"
  },
  {
    "library": "matplotlib",
    "name": "element",
    "source_code": "def element(self, tag, text=None, attrib={}, **extra):\n    self.start(tag, attrib, **extra)\n    if text:\n        self.data(text)\n    self.end(indent=False)",
    "docstring": "Add an entire element. This is the same as calling :meth:, :meth:, and :meth: in sequence. The *text* argument can be omitted.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_svg.py",
    "ast_data": "FunctionDef name:element arg:self arg:tag arg:text arg:attrib arguments arg arg arg arg arg Call If Call Call"
  },
  {
    "library": "tensorflow",
    "name": "uniform_row_length",
    "source_code": "@property\ndef uniform_row_length(self):\n    return self._row_partition.uniform_row_length()",
    "docstring": "The length of each row in this ragged tensor, or None if rows are ragged. >>> rt1 = tf.ragged.constant([[1, 2, 3], [4], [5, 6], [7, 8, 9, 10]]) >>> print(rt1.uniform_row_length) # rows are ragged. None >>> rt2 = tf.RaggedTensor.from_uniform_row_length( ... values=rt1, uniform_row_length=2) >>> print(rt2) >>> print(rt2.uniform_row_length) # rows are not ragged (all have size 2). tf.Tensor(2, shape=(), dtype=int64) A RaggedTensor's rows are only considered to be uniform (i.e. non-ragged) if it can be determined statically (at graph construction time) that the rows all have the same length. Returns: A scalar integer , specifying the length of every row in this ragged tensor (for ragged tensors whose rows are uniform); or (for ragged tensors whose rows are ragged).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:uniform_row_length arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "devirtualize_jumps",
    "source_code": "def devirtualize_jumps(instructions):\n    jumps = set(dis.hasjabs).union(set(dis.hasjrel))\n    for inst in instructions:\n        if inst.opcode in jumps:\n            if inst.opcode not in dis.hasjabs:\n                if inst.target.offset < inst.offset:\n                    if sys.version_info < (3, 11):\n                        raise RuntimeError('Got negative jump offset for Python < 3.11')\n                    if 'FORWARD' in inst.opname:\n                        flip_jump_direction(inst)\n                elif sys.version_info >= (3, 11) and 'BACKWARD' in inst.opname:\n                    flip_jump_direction(inst)\n    update_offsets(instructions)\n    indexof = get_indexof(instructions)\n    for inst in instructions:\n        if inst.opcode in jumps:\n            target = _get_instruction_front(instructions, indexof[inst.target])\n            if inst.opcode in dis.hasjabs:\n                if sys.version_info < (3, 10):\n                    inst.arg = target.offset\n                elif sys.version_info < (3, 11):\n                    inst.arg = int(target.offset / 2)\n                else:\n                    raise RuntimeError('Python 3.11+ should not have absolute jumps')\n            else:\n                inst.arg = abs(int(target.offset - inst.offset - instruction_size(inst)))\n                if sys.version_info >= (3, 10):\n                    inst.arg //= 2\n            inst.argval = target.offset\n            inst.argrepr = f'to {target.offset}'",
    "docstring": "Fill in args for virtualized jump target after instructions may have moved",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\bytecode_transformation.py",
    "ast_data": "FunctionDef name:devirtualize_jumps arg:instructions arguments arg Assign Call Call Call For If Compare If Compare If Compare If Compare Raise Call If Compare Call If BoolOp Compare Compare Call Call Assign Call For If Compare Assign Call If Compare If Compare Assign If Compare Assign Call Raise Call Assign Call Call Call If Compare Assign Assign"
  },
  {
    "library": "kornia",
    "name": "autofill_dim",
    "source_code": "def autofill_dim(self, input: Tensor, dim_range: Tuple[int, int]=(2, 4)) -> Tuple[torch.Size, torch.Size]:\n    ori_shape = input.shape\n    if len(ori_shape) < dim_range[0] or len(ori_shape) > dim_range[1]:\n        raise RuntimeError(f'input shape expected to be in {dim_range} while got {ori_shape}.')\n    while len(input.shape) < dim_range[1]:\n        input = input[None]\n    return (ori_shape, input.shape)",
    "docstring": "Fill tensor dim to the upper bound of dim_range. If input tensor dim is smaller than the lower bound of dim_range, an error will be thrown out.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\container\\base.py",
    "ast_data": "FunctionDef name:autofill_dim arg:self arg:input arg:dim_range arguments arg arg arg Assign If BoolOp Compare Call Compare Call Raise Call While Compare Call Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "get_laf_center",
    "source_code": "def get_laf_center(LAF: Tensor) -> Tensor:\n    KORNIA_CHECK_LAF(LAF)\n    out = LAF[..., 2]\n    return out",
    "docstring": "Return a center (keypoint) of the LAFs. The convention is that center of 5-pixel image (coordinates from 0 to 4) is 2, and not 2.5. Args: LAF: :math: Returns: xy :math: Example: >>> input = torch.ones(1, 5, 2, 3) # BxNx2x3 >>> output = get_laf_center(input) # BxNx2",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\laf.py",
    "ast_data": "FunctionDef name:get_laf_center arg:LAF arguments arg Call Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "dumps",
    "source_code": "def dumps(obj):\n    encoder = ArffEncoder()\n    return encoder.encode(obj)",
    "docstring": "Serialize an object representing the ARFF document, returning a string. :param obj: a dictionary. :return: a string with the ARFF document.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\_arff.py",
    "ast_data": "FunctionDef name:dumps arg:obj arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "ExponentialFamily",
    "source_code": "class ExponentialFamily(Distribution):\n\n    @property\n    def _natural_params(self) -> tuple[Tensor, ...]:\n        raise NotImplementedError\n\n    def _log_normalizer(self, *natural_params):\n        raise NotImplementedError\n\n    @property\n    def _mean_carrier_measure(self) -> float:\n        raise NotImplementedError\n\n    def entropy(self):\n        result = -self._mean_carrier_measure\n        nparams = [p.detach().requires_grad_() for p in self._natural_params]\n        lg_normal = self._log_normalizer(*nparams)\n        gradients = torch.autograd.grad(lg_normal.sum(), nparams, create_graph=True)\n        result += lg_normal\n        for np, g in zip(nparams, gradients):\n            result -= (np * g).reshape(self._batch_shape + (-1,)).sum(-1)\n        return result",
    "docstring": "ExponentialFamily is the abstract base class for probability distributions belonging to an exponential family, whose probability mass/density function has the form is defined below .. math:: p_{F}(x; \\theta) = \\exp(\\langle t(x), \\theta\\rangle - F(\\theta) + k(x)) where :math: denotes the natural parameters, :math: denotes the sufficient statistic, :math: is the log normalizer function for a given family and :math: is the carrier measure. Note: This class is an intermediary between the class and distributions which belong to an exponential family mainly to check the correctness of the and analytic KL divergence methods. We use this class to compute the entropy and KL divergence using the AD framework and Bregman divergences (courtesy of: Frank Nielsen and Richard Nock, Entropies and Cross-entropies of Exponential Families).",
    "type": "class",
    "file_path": "pytorch\\torch\\distributions\\exp_family.py",
    "ast_data": "ClassDef name:ExponentialFamily FunctionDef name:_natural_params arg:self arguments arg Raise FunctionDef name:_log_normalizer arg:self arguments arg arg Raise FunctionDef name:_mean_carrier_measure arg:self arguments arg Raise FunctionDef name:entropy arg:self arguments arg Assign Assign Call Call Assign Call Assign Call Call For Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "concatenate_context_input",
    "source_code": "def concatenate_context_input(context_input, sequence_input):\n    seq_rank_check = check_ops.assert_rank(sequence_input, 3, message='sequence_input must have rank 3', data=[array_ops.shape(sequence_input)])\n    seq_type_check = check_ops.assert_type(sequence_input, dtypes.float32, message='sequence_input must have dtype float32; got {}.'.format(sequence_input.dtype))\n    ctx_rank_check = check_ops.assert_rank(context_input, 2, message='context_input must have rank 2', data=[array_ops.shape(context_input)])\n    ctx_type_check = check_ops.assert_type(context_input, dtypes.float32, message='context_input must have dtype float32; got {}.'.format(context_input.dtype))\n    with ops.control_dependencies([seq_rank_check, seq_type_check, ctx_rank_check, ctx_type_check]):\n        padded_length = array_ops.shape(sequence_input)[1]\n        tiled_context_input = array_ops.tile(array_ops.expand_dims(context_input, 1), array_ops.concat([[1], [padded_length], [1]], 0))\n    return array_ops.concat([sequence_input, tiled_context_input], 2)",
    "docstring": "Replicates across all timesteps of . Expands dimension 1 of then tiles it times. This value is appended to on dimension 2 and the result is returned. Args: context_input: A of dtype and shape . sequence_input: A of dtype and shape . Returns: A of dtype and shape . Raises: ValueError: If does not have rank 3 or does not have rank 2.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\sequence_feature_column.py",
    "ast_data": "FunctionDef name:concatenate_context_input arg:context_input arg:sequence_input arguments arg arg Assign Call Call Assign Call Call Assign Call Call Assign Call Call With Call Assign Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "from_pretrained",
    "source_code": "@classmethod\ndef from_pretrained(cls, detector_weights: str='L-C4-v2', descriptor_weights: str='G-upright', amp_dtype: torch.dtype=torch.float16) -> Module:\n    model: DeDoDe = cls(detector_model=detector_weights[0], descriptor_model=descriptor_weights[0], amp_dtype=amp_dtype)\n    model.detector.load_state_dict(torch.hub.load_state_dict_from_url(urls['detector'][detector_weights], map_location=torch.device('cpu')))\n    model.descriptor.load_state_dict(torch.hub.load_state_dict_from_url(urls['descriptor'][descriptor_weights], map_location=torch.device('cpu')))\n    model.eval()\n    return model",
    "docstring": "Load a pretrained model. Args: detector_weights: The weights to load for the detector. One of 'L-upright' (original paper, 'L-C4', 'L-SO2' (from steerers, better for rotations, 'L-C4-v2' (from dedode v2, better at rotations, less clustering, Default is 'L-C4-v2', but perhaps it should be 'L-C4-v2'? descriptor_weights: The weights to load for the descriptor. One of 'B-upright','G-upright' (original paper, 'B-C4', 'B-SO2', 'G-C4', 'G-SO2' (from steerers, better for rotations, Default is 'G-upright'. amp_dtype: the dtype to use for the model. One of torch.float16 or torch.float32. Default is torch.float16, suitable for CUDA. Use torch.float32 for CPU or MPS Returns: The pretrained model.",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\dedode\\dedode.py",
    "ast_data": "FunctionDef name:from_pretrained arg:cls arg:detector_weights arg:descriptor_weights arg:amp_dtype arguments arg arg arg arg Call Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "n_features_in_",
    "source_code": "@property\ndef n_features_in_(self):\n    return self.steps[0][1].n_features_in_",
    "docstring": "Number of features seen during first step method.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\pipeline.py",
    "ast_data": "FunctionDef name:n_features_in_ arg:self arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "resize",
    "source_code": "def resize(self, newshape, refcheck=True, order=False):\n    errmsg = 'A masked array does not own its data and therefore cannot be resized.\\nUse the numpy.ma.resize function instead.'\n    raise ValueError(errmsg)",
    "docstring": ".. warning:: This method does nothing, except raise a ValueError exception. A masked array does not own its data and therefore cannot safely be resized in place. Use the function instead. This method is difficult to implement safely and may be deprecated in future releases of NumPy.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:resize arg:self arg:newshape arg:refcheck arg:order arguments arg arg arg arg Assign Raise Call"
  },
  {
    "library": "django",
    "name": "bool_output",
    "source_code": "def bool_output(func, argtypes, errcheck=None):\n    func.argtypes = argtypes\n    func.restype = c_bool\n    if errcheck:\n        func.errcheck = errcheck\n    return func",
    "docstring": "Generate a ctypes function that returns a boolean value.",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\gdal\\prototypes\\generation.py",
    "ast_data": "FunctionDef name:bool_output arg:func arg:argtypes arg:errcheck arguments arg arg arg Assign Assign If Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_convert_graph_def",
    "source_code": "def _convert_graph_def(self):\n    graph = ops.Graph()\n    with graph.as_default():\n        importer.import_graph_def(self._input_graph_def, name='')\n    self._grappler_meta_graph_def = saver.export_meta_graph(graph_def=graph.as_graph_def(add_shapes=True), graph=graph)\n    self._add_nodes_denylist()\n    self._run_conversion()",
    "docstring": "Convert the input GraphDef.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\tensorrt\\trt_convert.py",
    "ast_data": "FunctionDef name:_convert_graph_def arg:self arguments arg Assign Call With Call Call Assign Call Call Call Call"
  },
  {
    "library": "numpy",
    "name": "_pow",
    "source_code": "def _pow(mul_f, c, pow, maxpower):\n    [c] = as_series([c])\n    power = int(pow)\n    if power != pow or power < 0:\n        raise ValueError('Power must be a non-negative integer.')\n    elif maxpower is not None and power > maxpower:\n        raise ValueError('Power is too large')\n    elif power == 0:\n        return np.array([1], dtype=c.dtype)\n    elif power == 1:\n        return c\n    else:\n        prd = c\n        for i in range(2, power + 1):\n            prd = mul_f(prd, c)\n        return prd",
    "docstring": "Helper function used to implement the `` functions for more detail",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\polyutils.py",
    "ast_data": "FunctionDef name:_pow arg:mul_f arg:c arg:pow arg:maxpower arguments arg arg arg arg Assign Call Assign Call If BoolOp Compare Compare Raise Call If BoolOp Compare Compare Raise Call If Compare Return return:yes Call If Compare Return return:yes Assign For Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_import_submodules",
    "source_code": "def _import_submodules(self):\n    imported_modules = set(self._module_imports.keys())\n    for module in imported_modules:\n        if not module:\n            continue\n        module_split = module.split('.')\n        parent_module = ''\n        for submodule_index in range(len(module_split)):\n            if submodule_index > 0:\n                submodule = module_split[submodule_index - 1]\n                parent_module += '.' + submodule if parent_module else submodule\n            import_from = self._output_package\n            if self._lazy_loading:\n                import_from += '.' + '.'.join(module_split[:submodule_index + 1])\n                self.add_import(symbol=None, source_module_name='', source_name=import_from, dest_module_name=parent_module, dest_name=module_split[submodule_index])\n            else:\n                if self._use_relative_imports:\n                    import_from = '.'\n                elif submodule_index > 0:\n                    import_from += '.' + '.'.join(module_split[:submodule_index])\n                self.add_import(symbol=None, source_module_name=import_from, source_name=module_split[submodule_index], dest_module_name=parent_module, dest_name=module_split[submodule_index])",
    "docstring": "Add imports for all destination modules in self._module_imports.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\api\\generator\\create_python_api.py",
    "ast_data": "FunctionDef name:_import_submodules arg:self arguments arg Assign Call Call For If Assign Call Assign For Call Call If Compare Assign Assign If Call Call If Assign If Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "ipc_handle",
    "source_code": "def ipc_handle(self):\n    return super().ipc_handle()",
    "docstring": "Return an IPC handle of this event. If not recorded yet, the event will use the current device.",
    "type": "method",
    "file_path": "pytorch\\torch\\cuda\\streams.py",
    "ast_data": "FunctionDef name:ipc_handle arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_cast_inputs",
    "source_code": "def _maybe_cast_inputs(self, inputs, input_list=None):\n    if not input_list:\n        input_list = nest.flatten(inputs)\n    compute_dtype_object = self._compute_dtype_object\n    should_autocast = self._autocast and compute_dtype_object and compute_dtype_object.is_floating\n    if should_autocast and any(map(self._should_cast_single_input, input_list)):\n        return nest.map_structure(self._cast_single_input, inputs)\n    else:\n        return inputs",
    "docstring": "Maybe casts the inputs to the compute dtype. If self._compute_dtype is floating-point, and self_autocast is True, floating-point inputs are casted to self._compute_dtype. Args: inputs: Input tensor, or structure of input tensors. input_list: Flat list of input tensors. Returns: , but tensors may have been casted to self._compute_dtype",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:_maybe_cast_inputs arg:self arg:inputs arg:input_list arguments arg arg arg If Assign Call Assign Assign BoolOp If BoolOp Call Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "run_eagerly",
    "source_code": "@property\ndef run_eagerly(self):\n    if self.dynamic and self._run_eagerly is False:\n        raise ValueError('Your model contains layers that can only be successfully run in eager execution (layers constructed with `dynamic=True`). You cannot set `run_eagerly=False`.')\n    if self._cluster_coordinator and self._run_eagerly:\n        raise ValueError('When using `Model` with `ParameterServerStrategy`, `run_eagerly` is not supported.')\n    return self.dynamic or self._run_eagerly or (def_function.functions_run_eagerly() and self._run_eagerly is None)",
    "docstring": "Settable attribute indicating whether the model should run eagerly. Running eagerly means that your model will be run step by step, like Python code. Your model might run slower, but it should become easier for you to debug it by stepping into individual layer calls. By default, we will attempt to compile your model to a static graph to deliver the best execution performance. Returns: Boolean, whether the model should run eagerly.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py",
    "ast_data": "FunctionDef name:run_eagerly arg:self arguments arg If BoolOp Compare Raise Call If BoolOp Raise Call Return return:yes BoolOp BoolOp Call Compare"
  },
  {
    "library": "authlib",
    "name": "validate_nonce",
    "source_code": "def validate_nonce(self):\n    nonce_value = self.params.get('nonce')\n    if nonce_value:\n        if 'nonce' not in self:\n            raise MissingClaimError('nonce')\n        if nonce_value != self['nonce']:\n            raise InvalidClaimError('nonce')",
    "docstring": "String value used to associate a Client session with an ID Token, and to mitigate replay attacks. The value is passed through unmodified from the Authentication Request to the ID Token. If present in the ID Token, Clients MUST verify that the nonce Claim Value is equal to the value of the nonce parameter sent in the Authentication Request. If present in the Authentication Request, Authorization Servers MUST include a nonce Claim in the ID Token with the Claim Value being the nonce value sent in the Authentication Request. Authorization Servers SHOULD perform no other processing on nonce values used. The nonce value is a case sensitive string.",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\core\\claims.py",
    "ast_data": "FunctionDef name:validate_nonce arg:self arguments arg Assign Call If If Compare Raise Call If Compare Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_consumers",
    "source_code": "def _consumers(self):\n    consumers = nest.flatten([component.consumers() for component in nest.flatten(self, expand_composites=True) if getattr(component, 'graph', None) is not None])\n    return list(set(consumers))",
    "docstring": "Returns a list of s that consume this . Returns: A list of s. Raises: RuntimeError: If this method is called while executing eagerly.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\composite_tensor.py",
    "ast_data": "FunctionDef name:_consumers arg:self arguments arg Assign Call Call Call Compare Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "N",
    "source_code": "@property\ndef N(self):\n    return self._dimensions",
    "docstring": "The dimensionality of the problem. Returns ------- N : int The dimensionality of the problem",
    "type": "method",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_benchmark.py",
    "ast_data": "FunctionDef name:N arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_typename",
    "source_code": "def _get_typename(obj):\n    objtype = type(obj)\n    name = objtype.__name__\n    module = getattr(objtype, '__module__', None)\n    if module:\n        return '{}.{}'.format(module, name)\n    else:\n        return name",
    "docstring": "Return human readable pretty type name string.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\python_memory_checker.py",
    "ast_data": "FunctionDef name:_get_typename arg:obj arguments arg Assign Call Assign Assign Call If Return return:yes Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "desc_sig_literal_char",
    "source_code": "class desc_sig_literal_char(desc_sig_element, _sig_element=True):\n    classes = ['sc']",
    "docstring": "Node for a character literal in a signature.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\addnodes.py",
    "ast_data": "ClassDef name:desc_sig_literal_char Assign"
  },
  {
    "library": "scipy",
    "name": "pdf",
    "source_code": "def pdf(self, x, df, scale):\n    return np.exp(self.logpdf(x, df, scale))",
    "docstring": "Wishart probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of denoting the components. Each quantile must be a symmetric positive definite matrix. %(_doc_default_callparams)s Returns ------- pdf : ndarray Probability density function evaluated at Notes ----- %(_doc_callparams_note)s",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:pdf arg:self arg:x arg:df arg:scale arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "is_instance_factory",
    "source_code": "def is_instance_factory(_type: type | tuple[type, ...]) -> Callable[[Any], None]:\n    if isinstance(_type, tuple):\n        type_repr = '|'.join(map(str, _type))\n    else:\n        type_repr = f\"'{_type}'\"\n\n    def inner(x) -> None:\n        if not isinstance(x, _type):\n            raise ValueError(f'Value must be an instance of {type_repr}')\n    return inner",
    "docstring": "Parameters ---------- - the type to be checked against Returns ------- validator - a function of a single argument x , which raises ValueError if x is not an instance of",
    "type": "function",
    "file_path": "pandas\\pandas\\_config\\config.py",
    "ast_data": "FunctionDef name:is_instance_factory arg:_type arguments arg If Call Assign Call Call Assign FunctionDef name:inner arg:x arguments arg If Call Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_writeback_tensor",
    "source_code": "def _writeback_tensor(self, src_tensor: Optional[Tensor], dst_tensor: Tensor, tensor_index: int, expected_shape: torch.Size, offset: int, is_param: bool) -> None:\n    _p_assert(len(expected_shape) == 1, f'Expects a 1D expected shape but got {expected_shape}')\n    if self._debug_level == dist.DebugLevel.INFO:\n        rank = self.rank if hasattr(self, 'rank') else dist.get_rank()\n        src_shape = src_tensor.shape if src_tensor is not None else None\n        src_device = src_tensor.device if src_tensor is not None else None\n        warnings.warn(f'[Rank {rank}] {('Parameter' if is_param else 'Gradient')} needs writeback in {self._training_state}\\nexpected shape={expected_shape} shape={src_shape} expected device={dst_tensor.device} device={src_device}')\n    if src_tensor is not None and src_tensor.shape != expected_shape:\n        raise RuntimeError(f'Cannot writeback when the {('parameter' if is_param else 'gradient')} shape changes\\nExpects {expected_shape} but got {src_tensor.shape}')\n    if src_tensor is not None:\n        dst_tensor[offset:offset + expected_shape.numel()].copy_(src_tensor)\n    else:\n        dst_tensor[offset:offset + expected_shape.numel()].zero_()\n        assert self.flat_param._is_grad_none_mask is not None\n        self.flat_param._is_grad_none_mask[tensor_index] = True",
    "docstring": "Write back `` does not have the expected shape.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py",
    "ast_data": "FunctionDef name:_writeback_tensor arg:self arg:src_tensor arg:dst_tensor arg:tensor_index arg:expected_shape arg:offset arg:is_param arguments arg arg arg arg arg arg arg Call Compare Call If Compare Assign Call Call Assign Compare Assign Compare Call If BoolOp Compare Compare Raise Call If Compare Call Call Call Call Compare Assign"
  },
  {
    "library": "pandas",
    "name": "prod",
    "source_code": "@deprecate_nonkeyword_arguments(version='4.0', allowed_args=['self'], name='prod')\ndef prod(self, axis: Axis | None=0, skipna: bool=True, numeric_only: bool=False, min_count: int=0, **kwargs) -> Series:\n    result = super().prod(axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count, **kwargs)\n    if isinstance(result, Series):\n        result = result.__finalize__(self, method='prod')\n    return result",
    "docstring": "Return the product of the values over the requested axis. Parameters ---------- axis : {index (0), columns (1)} Axis for the function to be applied on. For this parameter is unused and defaults to 0. .. warning:: The behavior of DataFrame.prod with `` handles all-NA and empty series identically. >>> pd.Series([np.nan]).prod() 1.0 >>> pd.Series([np.nan]).prod(min_count=1) nan",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:prod arg:self arg:axis arg:skipna arg:numeric_only arg:min_count arguments arg arg arg arg arg arg Assign Call Call If Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "compute_gradients",
    "source_code": "def compute_gradients(self, loss, var_list=None, gate_gradients=optimizer.Optimizer.GATE_OP, aggregation_method=None, colocate_gradients_with_ops=False, grad_loss=None):\n    loss = self._scale_loss(loss)\n    grads_and_vars = self._optimizer.compute_gradients(loss=loss, var_list=var_list, gate_gradients=gate_gradients, aggregation_method=aggregation_method, colocate_gradients_with_ops=colocate_gradients_with_ops, grad_loss=grad_loss)\n    grads = [g for g, _ in grads_and_vars]\n    variables = [v for _, v in grads_and_vars]\n    unscaled_grads = self._unscale_grads(grads)\n    return list(zip(unscaled_grads, variables))",
    "docstring": "Compute gradients of for the variables in . This adjusts the dynamic range of the gradient evaluation by scaling up the value. The gradient values are then scaled back down by the reciprocal of the loss scale. This is useful in reduced precision training where small gradient values would otherwise underflow the representable range. Args: loss: A Tensor containing the value to minimize or a callable taking no arguments which returns the value to minimize. When eager execution is enabled it must be a callable. var_list: Optional list or tuple of to update to minimize . Defaults to the list of variables collected in the graph under the key . gate_gradients: How to gate the computation of gradients. Can be , , or . aggregation_method: Specifies the method used to combine gradient terms. Valid values are defined in the class . colocate_gradients_with_ops: If True, try colocating gradients with the corresponding op. grad_loss: Optional. A holding the gradient computed for . Returns: A list of (gradient, variable) pairs. Variable is always present, but gradient can be .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\experimental\\loss_scale_optimizer.py",
    "ast_data": "FunctionDef name:compute_gradients arg:self arg:loss arg:var_list arg:gate_gradients arg:aggregation_method arg:colocate_gradients_with_ops arg:grad_loss arguments arg arg arg arg arg arg arg Assign Call Assign Call Assign Assign Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "InputAdaptStep",
    "source_code": "@runtime_checkable\nclass InputAdaptStep(Protocol):\n\n    def apply(self, model_args: Sequence[Any], model_kwargs: Mapping[str, Any], model: torch.nn.Module | Callable | torch_export.ExportedProgram | None=None) -> tuple[Sequence[Any], Mapping[str, Any]]:\n        ...",
    "docstring": "A protocol that defines a step in the input adapting process. The input adapting process is a sequence of steps that are applied to the PyTorch model inputs to transform them into the inputs format expected by the exported ONNX model. Each step takes the PyTorch model inputs as arguments and returns the transformed inputs. This serves as a base formalized construct for the transformation done to model input signature by any individual component in the exporter.",
    "type": "class",
    "file_path": "pytorch\\torch\\onnx\\_internal\\io_adapter.py",
    "ast_data": "ClassDef name:InputAdaptStep FunctionDef name:apply arg:self arg:model_args arg:model_kwargs arg:model arguments arg arg arg arg"
  },
  {
    "library": "scipy",
    "name": "__init__",
    "source_code": "def __init__(self, A, A_1_norm=None, ell=2, scale=1):\n    self._A = A\n    self._A_1_norm = A_1_norm\n    self._ell = ell\n    self._d = {}\n    self._scale = scale",
    "docstring": "Provide the operator and some norm-related information. Parameters ---------- A : linear operator The operator of interest. A_1_norm : float, optional The exact 1-norm of A. ell : int, optional A technical parameter controlling norm estimation quality. scale : int, optional If specified, return the norms of scale*A instead of A.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_expm_multiply.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:A arg:A_1_norm arg:ell arg:scale arguments arg arg arg arg arg Assign Assign Assign Assign Assign"
  },
  {
    "library": "scipy",
    "name": "foldcauchy_gen",
    "source_code": "class foldcauchy_gen(rv_continuous):\n\n    def _argcheck(self, c):\n        return c >= 0\n\n    def _shape_info(self):\n        return [_ShapeInfo('c', False, (0, np.inf), (True, False))]\n\n    def _rvs(self, c, size=None, random_state=None):\n        return abs(cauchy.rvs(loc=c, size=size, random_state=random_state))\n\n    def _pdf(self, x, c):\n        return 1.0 / np.pi * (1.0 / (1 + (x - c) ** 2) + 1.0 / (1 + (x + c) ** 2))\n\n    def _cdf(self, x, c):\n        return 1.0 / np.pi * (np.arctan(x - c) + np.arctan(x + c))\n\n    def _sf(self, x, c):\n        return (np.arctan2(1, x - c) + np.arctan2(1, x + c)) / np.pi\n\n    def _stats(self, c):\n        return (np.inf, np.inf, np.nan, np.nan)",
    "docstring": "A folded Cauchy continuous random variable. %(before_notes)s Notes ----- The probability density function for is: .. math:: f(x, c) = \\frac{1}{\\pi (1+(x-c)^2)} + \\frac{1}{\\pi (1+(x+c)^2)} for :math: and :math:. takes `c`. %(example)s",
    "type": "class",
    "file_path": "scipy\\scipy\\stats\\_continuous_distns.py",
    "ast_data": "ClassDef name:foldcauchy_gen FunctionDef name:_argcheck arg:self arg:c arguments arg arg Return return:yes Compare FunctionDef name:_shape_info arg:self arguments arg Return return:yes Call FunctionDef name:_rvs arg:self arg:c arg:size arg:random_state arguments arg arg arg arg Return return:yes Call Call FunctionDef name:_pdf arg:self arg:x arg:c arguments arg arg arg Return return:yes FunctionDef name:_cdf arg:self arg:x arg:c arguments arg arg arg Return return:yes Call Call FunctionDef name:_sf arg:self arg:x arg:c arguments arg arg arg Return return:yes Call Call FunctionDef name:_stats arg:self arg:c arguments arg arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "is_platform_riscv64",
    "source_code": "def is_platform_riscv64() -> bool:\n    return platform.machine() == 'riscv64'",
    "docstring": "Checking if the running platform use riscv64 architecture. Returns ------- bool True if the running platform uses riscv64 architecture.",
    "type": "function",
    "file_path": "pandas\\pandas\\compat\\__init__.py",
    "ast_data": "FunctionDef name:is_platform_riscv64 arguments Return return:yes Compare Call"
  },
  {
    "library": "matplotlib",
    "name": "_set_cursor",
    "source_code": "def _set_cursor(self, enabled):\n    if enabled:\n        cursor = backend_tools.Cursors.RESIZE_HORIZONTAL if self.direction == 'horizontal' else backend_tools.Cursors.RESIZE_VERTICAL\n    else:\n        cursor = backend_tools.Cursors.POINTER\n    self.ax.get_figure(root=True).canvas.set_cursor(cursor)",
    "docstring": "Update the canvas cursor based on direction of the selector.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:_set_cursor arg:self arg:enabled arguments arg arg If Assign Compare Assign Call Call"
  },
  {
    "library": "pandas",
    "name": "__init__",
    "source_code": "def __init__(self, x: np.ndarray, allow_copy: bool=True) -> None:\n    if x.strides[0] and (not x.strides == (x.dtype.itemsize,)):\n        if allow_copy:\n            x = x.copy()\n        else:\n            raise RuntimeError('Exports cannot be zero-copy in the case of a non-contiguous buffer')\n    self._x = x",
    "docstring": "Handle only regular columns (= numpy arrays) for now.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\interchange\\buffer.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:x arg:allow_copy arguments arg arg arg If BoolOp Compare If Assign Call Raise Call Assign"
  },
  {
    "library": "scipy",
    "name": "_line_search_wolfe12",
    "source_code": "def _line_search_wolfe12(f, fprime, xk, pk, gfk, old_fval, old_old_fval, **kwargs):\n    extra_condition = kwargs.pop('extra_condition', None)\n    ret = line_search_wolfe1(f, fprime, xk, pk, gfk, old_fval, old_old_fval, **kwargs)\n    if ret[0] is not None and extra_condition is not None:\n        xp1 = xk + ret[0] * pk\n        if not extra_condition(ret[0], xp1, ret[3], ret[5]):\n            ret = (None,)\n    if ret[0] is None:\n        with warnings.catch_warnings():\n            warnings.simplefilter('ignore', LineSearchWarning)\n            kwargs2 = {}\n            for key in ('c1', 'c2', 'amax'):\n                if key in kwargs:\n                    kwargs2[key] = kwargs[key]\n            ret = line_search_wolfe2(f, fprime, xk, pk, gfk, old_fval, old_old_fval, extra_condition=extra_condition, **kwargs2)\n    if ret[0] is None:\n        raise _LineSearchError()\n    return ret",
    "docstring": "Same as line_search_wolfe1, but fall back to line_search_wolfe2 if suitable step length is not found, and raise an exception if a suitable step length is not found. Raises ------ _LineSearchError If no suitable step size is found",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_optimize.py",
    "ast_data": "FunctionDef name:_line_search_wolfe12 arg:f arg:fprime arg:xk arg:pk arg:gfk arg:old_fval arg:old_old_fval arguments arg arg arg arg arg arg arg arg Assign Call Assign Call If BoolOp Compare Compare Assign If Call Assign If Compare With Call Call Assign For If Compare Assign Assign Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "pygame",
    "name": "close",
    "source_code": "def close(self):\n    _check_init()\n    if self._output is not None:\n        self._output.Close()\n    self._output = None",
    "docstring": "closes a midi stream, flushing any pending buffers. Output.close(): return None PortMidi attempts to close open streams when the application exits -- this is particularly difficult under Windows.",
    "type": "method",
    "file_path": "pygame\\src_py\\midi.py",
    "ast_data": "FunctionDef name:close arg:self arguments arg Call If Compare Call Assign"
  },
  {
    "library": "scikit-learn",
    "name": "predict_proba",
    "source_code": "def predict_proba(self, X):\n    raw_predictions = self.decision_function(X)\n    return self._loss.predict_proba(raw_predictions)",
    "docstring": "Predict class probabilities for X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to `classes_` does not support probabilities.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_gb.py",
    "ast_data": "FunctionDef name:predict_proba arg:self arg:X arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "predict",
    "source_code": "def predict(self, x: Tensor) -> Tensor:\n    KORNIA_CHECK(x.shape[1] == self.cluster_centers.shape[1], f'Dimensions at position 1 of x and cluster_centers do not match.                 {x.shape[1]} != {self.cluster_centers.shape[1]}')\n    distance = self._pairwise_euclidean_distance(x, self.cluster_centers)\n    cluster_assignment = distance.argmin(-1)\n    return cluster_assignment",
    "docstring": "Find the cluster center closest to each point in x. Args: x: 2D tensor Returns: 1D tensor containing cluster id assigned to each data point in x",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\kmeans.py",
    "ast_data": "FunctionDef name:predict arg:self arg:x arguments arg arg Call Compare Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_process_constant",
    "source_code": "def _process_constant(self, node: ast.Constant) -> None:\n    if isinstance(node.value, str):\n        docstring, modules = self._extract_docstring(node.value)\n        if modules:\n            self._exports.add_doc(exported_api.ExportedDoc.create(file_name=self._current_file, line_no=node.lineno, modules=modules, docstring=docstring))\n        else:\n            self.visit(node)",
    "docstring": "Process top-level constant for a potential API docstring export.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\api\\generator2\\extractor\\extractor.py",
    "ast_data": "FunctionDef name:_process_constant arg:self arg:node arguments arg arg If Call Assign Call If Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "shard_dimension",
    "source_code": "@property\ndef shard_dimension(self):\n    return self._shard_dimension",
    "docstring": "Returns the shard dimension of the policy or None if unspecified.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_sharding.py",
    "ast_data": "FunctionDef name:shard_dimension arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_check_is_permutation",
    "source_code": "def _check_is_permutation(indices, n_samples):\n    if len(indices) != n_samples:\n        return False\n    hit = np.zeros(n_samples, dtype=bool)\n    hit[indices] = True\n    if not np.all(hit):\n        return False\n    return True",
    "docstring": "Check whether indices is a reordering of the array np.arange(n_samples) Parameters ---------- indices : ndarray int array to test n_samples : int number of expected elements Returns ------- is_partition : bool True iff sorted(indices) is np.arange(n)",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_validation.py",
    "ast_data": "FunctionDef name:_check_is_permutation arg:indices arg:n_samples arguments arg arg If Compare Call Return return:yes Assign Call Assign If Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "flatten_with_variables",
    "source_code": "def flatten_with_variables(inputs):\n    flat_inputs = []\n    for value in nest.flatten(inputs):\n        if isinstance(value, composite_tensor.CompositeTensor) and (not _pywrap_utils.IsResourceVariable(value)):\n            components = value._type_spec._to_components(value)\n            flat_inputs.extend(flatten_with_variables(components))\n        else:\n            flat_inputs.append(value)\n    return flat_inputs",
    "docstring": "Flattens but don't expand s.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\composite_tensor_utils.py",
    "ast_data": "FunctionDef name:flatten_with_variables arg:inputs arguments arg Assign For Call If BoolOp Call Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_data",
    "source_code": "def get_data(self, orig=True):\n    return (self.get_xdata(orig=orig), self.get_ydata(orig=orig))",
    "docstring": "Return the line data as an `` pair. If *orig* is *True*, return the original data.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:get_data arg:self arg:orig arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "rgb_to_grayscale",
    "source_code": "@tf_export('image.rgb_to_grayscale')\n@dispatch.add_dispatch_support\ndef rgb_to_grayscale(images, name=None):\n    with ops.name_scope(name, 'rgb_to_grayscale', [images]) as name:\n        images = ops.convert_to_tensor(images, name='images')\n        orig_dtype = images.dtype\n        flt_image = convert_image_dtype(images, dtypes.float32)\n        rgb_weights = [0.2989, 0.587, 0.114]\n        gray_float = math_ops.tensordot(flt_image, rgb_weights, [-1, -1])\n        gray_float = array_ops.expand_dims(gray_float, -1)\n        return convert_image_dtype(gray_float, orig_dtype, name=name)",
    "docstring": "Converts one or more images from RGB to Grayscale. Outputs a tensor of the same and rank as . The size of the last dimension of the output is 1, containing the Grayscale value of the pixels. >>> original = tf.constant([[[1.0, 2.0, 3.0]]]) >>> converted = tf.image.rgb_to_grayscale(original) >>> print(converted.numpy()) [[[1.81...]]] Args: images: The RGB tensor to convert. The last dimension must have size 3 and should contain RGB values. name: A name for the operation (optional). Returns: The converted grayscale image(s).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:rgb_to_grayscale arg:images arg:name arguments arg arg With Call Assign Call Assign Assign Call Assign Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "reinterpret",
    "source_code": "def reinterpret(value, new_type):\n    if not isinstance(value, ExtensionType):\n        raise ValueError(f'reinterpret expects `value` to be a tf.ExtensionType instance; got {value!r}')\n    if not (isinstance(new_type, type) and issubclass(new_type, ExtensionType)):\n        raise ValueError(f'reinterpret expects `new_type` to be a subclass of tf.ExtensionType; got {new_type!r}')\n    fields = [item for item in value.__dict__.items() if not extension_type_field.ExtensionTypeField.is_reserved_name(item[0])]\n    new_value = _create_object_from_type_and_dict(new_type, fields)\n    new_value._tf_extension_type_convert_fields()\n    new_value.__validate__()\n    return new_value",
    "docstring": "Converts a given to a new type with compatible fields. In particular, this can be used to convert a concrete subclass of to an , or vice versa. When converting to a non-anonymous ExtensionType, field values are type-checked to ensure they are consistent with 's type annotations, and validated with . Args: value: An instance of a subclass of new_type: A subclass of Returns: An instance of , whose fields are copied from .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type.py",
    "ast_data": "FunctionDef name:reinterpret arg:value arg:new_type arguments arg arg If Call Raise Call If BoolOp Call Call Raise Call Assign Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "_extend_pythonpath",
    "source_code": "@staticmethod\ndef _extend_pythonpath(env):\n    path_prefix = '.' + os.pathsep\n    existing_path = env.get('PYTHONPATH', '')\n    needs_patch = sys.path[0] == '' and (not existing_path.startswith(path_prefix))\n    if needs_patch:\n        env['PYTHONPATH'] = path_prefix + existing_path",
    "docstring": "Prepend current working dir to PATH environment variable if needed. If sys.path[0] is an empty string, the interpreter was likely invoked with -m and the effective path is about to change on re- exec. Add the current directory to $PYTHONPATH to ensure that the new process sees the same path. This issue cannot be addressed in the general case because Python cannot reliably reconstruct the original command line ( (This idea filched from tornado.autoreload)",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\wspbus.py",
    "ast_data": "FunctionDef name:_extend_pythonpath arg:env arguments arg Assign Assign Call Assign BoolOp Compare Call If Assign"
  },
  {
    "library": "django",
    "name": "BaseTodayArchiveView",
    "source_code": "class BaseTodayArchiveView(BaseDayArchiveView):\n\n    def get_dated_items(self):\n        return self._get_dated_items(datetime.date.today())",
    "docstring": "Base view for a list of objects published today. This requires subclassing to provide a response mixin.",
    "type": "class",
    "file_path": "django\\django\\views\\generic\\dates.py",
    "ast_data": "ClassDef name:BaseTodayArchiveView FunctionDef name:get_dated_items arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__iter__",
    "source_code": "def __iter__(self):\n    return iter(self._variables)",
    "docstring": "Return an iterable for accessing the underlying sharded variables.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\sharded_variable.py",
    "ast_data": "FunctionDef name:__iter__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "Print",
    "source_code": "@deprecated('2018-08-20', 'Use tf.print instead of tf.Print. Note that tf.print returns a no-output operator that directly prints the output. Outside of defuns or eager mode, this operator will not be executed unless it is directly specified in session.run or used as a control dependency for other operators. This is only a concern in graph mode. Below is an example of how to ensure tf.print executes in graph mode:\\n')\n@tf_export(v1=['Print'])\n@dispatch.add_dispatch_support\ndef Print(input_, data, message=None, first_n=None, summarize=None, name=None):\n    return gen_logging_ops._print(input_, data, message, first_n, summarize, name)",
    "docstring": "Prints a list of tensors. This is an identity op (behaves like ) with the side effect of printing when evaluating. Note: This op prints to the standard error. It is not currently compatible with jupyter notebook (printing to the notebook *server's* output, not into the notebook). @compatibility(TF2) This API is deprecated. Use instead. does not need the argument. works in TF2 when executing eagerly and inside a . In TF1-styled sessions, an explicit control dependency declaration is needed to execute the operation. Refer to the documentation of for more details. @end_compatibility Args: input_: A tensor passed through this op. data: A list of tensors to print out when op is evaluated. message: A string, prefix of the error message. first_n: Only log number of times. Negative numbers log always; this is the default. summarize: Only print this many entries of each tensor. If None, then a maximum of 3 elements are printed per input tensor. name: A name for the operation (optional). Returns: A . Has the same type and contents as .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\logging_ops.py",
    "ast_data": "FunctionDef name:Print arg:input_ arg:data arg:message arg:first_n arg:summarize arg:name arguments arg arg arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_TrackableResourceSummaryWriter",
    "source_code": "class _TrackableResourceSummaryWriter(_ResourceSummaryWriter, resource.TrackableResource, metaclass=_MultiMetaclass):\n\n    def __init__(self, create_fn, init_op_fn, mesh=None):\n        resource.TrackableResource.__init__(self, device='/CPU:0')\n        self._create_fn = create_fn\n        self._init_op_fn = init_op_fn\n        _ResourceSummaryWriter.__init__(self, create_fn=lambda: self.resource_handle, init_op_fn=init_op_fn, mesh=mesh)\n\n    def _create_resource(self):\n        return self._create_fn()\n\n    def _initialize(self):\n        return self._init_op_fn(self.resource_handle)\n\n    def _destroy_resource(self):\n        gen_resource_variable_ops.destroy_resource_op(self.resource_handle, ignore_lookup_error=True)\n\n    def _set_up_resource_deleter(self):\n        pass",
    "docstring": "A subclass that implements .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "ClassDef name:_TrackableResourceSummaryWriter FunctionDef name:__init__ arg:self arg:create_fn arg:init_op_fn arg:mesh arguments arg arg arg arg Call Assign Assign Call arguments FunctionDef name:_create_resource arg:self arguments arg Return return:yes Call FunctionDef name:_initialize arg:self arguments arg Return return:yes Call FunctionDef name:_destroy_resource arg:self arguments arg Call FunctionDef name:_set_up_resource_deleter arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "graph_copy",
    "source_code": "@compatibility(is_backward_compatible=True)\ndef graph_copy(self, g: 'Graph', val_map: dict[Node, Node], return_output_node=False) -> 'Optional[Argument]':\n    for node in g.nodes:\n        if node in val_map:\n            continue\n        if node.op == 'output':\n            rv = map_arg(node.args[0], lambda n: val_map[n])\n            return rv if not return_output_node else (rv, node)\n        val_map[node] = self.node_copy(node, lambda n: val_map[n])\n    return None",
    "docstring": "Copy all nodes from a given graph into `` otherwise.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\graph.py",
    "ast_data": "FunctionDef name:graph_copy arg:self arg:g arg:val_map arg:return_output_node arguments arg arg arg arg For If Compare If Compare Assign Call arguments arg Return return:yes Assign Call arguments arg Return return:no Call"
  },
  {
    "library": "tensorflow",
    "name": "run_with_all_weight_formats",
    "source_code": "def run_with_all_weight_formats(test_or_class=None, exclude_formats=None):\n    exclude_formats = exclude_formats or []\n    exclude_formats.append('tf_no_traces')\n    return run_with_all_saved_model_formats(test_or_class, exclude_formats)",
    "docstring": "Runs all tests with the supported formats for saving weights.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\keras_parameterized.py",
    "ast_data": "FunctionDef name:run_with_all_weight_formats arg:test_or_class arg:exclude_formats arguments arg arg Assign BoolOp Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_SafeShapeDiv",
    "source_code": "def _SafeShapeDiv(x, y):\n    return x // math_ops.maximum(y, 1)",
    "docstring": "Divides assuming , treating .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_SafeShapeDiv arg:x arg:y arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "__call__",
    "source_code": "def __call__(self, direction, factor, values):\n    if self._fallback_formatter:\n        fallback_strings = self._fallback_formatter(direction, factor, values)\n    else:\n        fallback_strings = [''] * len(values)\n    return [self._format_dict.get(k, v) for k, v in zip(values, fallback_strings)]",
    "docstring": "factor is ignored if value is found in the dictionary",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\grid_finder.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:direction arg:factor arg:values arguments arg arg arg arg If Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_backend_config",
    "source_code": "def get_backend_config(group: Optional[ProcessGroup]=None) -> str:\n    pg = group or _get_default_group()\n    if _rank_not_in_group(pg):\n        raise ValueError('Invalid process group specified')\n    backend_config = _world.pg_backend_config.get(pg)\n    return str(not_none(backend_config))",
    "docstring": "Return the backend configuration of the given process group. Args: group (ProcessGroup, optional): The process group to work on. The default is the general main process group. If another specific group is specified, the calling process must be part of :attr:. Returns: The backend configuration of the given process group as a lower case string.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:get_backend_config arg:group arguments arg Assign BoolOp Call If Call Raise Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_dtype_as_int",
    "source_code": "def get_dtype_as_int(tensor):\n    dtype = tensor.dtype\n    if dtype not in _TORCH_DTYPE_TO_ENUM:\n        raise RuntimeError(f'Unsupported dtype {dtype}')\n    return _TORCH_DTYPE_TO_ENUM[dtype]",
    "docstring": "prim::dtype has the signature \"Tensor a) -> int\", where it gets the dtype of the tensor and returns the integer corresponding to this dtype based on the enum in ScalarType.h",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\converter.py",
    "ast_data": "FunctionDef name:get_dtype_as_int arg:tensor arguments arg Assign If Compare Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "image_boxes",
    "source_code": "def image_boxes(tag, tensor_image, tensor_boxes, rescale=1, dataformats='CHW', labels=None):\n    tensor_image = make_np(tensor_image)\n    tensor_image = convert_to_HWC(tensor_image, dataformats)\n    tensor_boxes = make_np(tensor_boxes)\n    tensor_image = tensor_image.astype(np.float32) * _calc_scale_factor(tensor_image)\n    image = make_image(tensor_image.clip(0, 255).astype(np.uint8), rescale=rescale, rois=tensor_boxes, labels=labels)\n    return Summary(value=[Summary.Value(tag=tag, image=image)])",
    "docstring": "Output a protocol buffer with images.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\tensorboard\\summary.py",
    "ast_data": "FunctionDef name:image_boxes arg:tag arg:tensor_image arg:tensor_boxes arg:rescale arg:dataformats arg:labels arguments arg arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call Call Assign Call Call Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "nested",
    "source_code": "def nested(self, format_callback=None):\n    seen = set()\n    roots = []\n    for root in self.edges.get(None, ()):\n        roots.extend(self._nested(root, seen, format_callback))\n    return roots",
    "docstring": "Return the graph as a nested list.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\utils.py",
    "ast_data": "FunctionDef name:nested arg:self arg:format_callback arguments arg arg Assign Call Assign For Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_check_self_external_modification",
    "source_code": "def _check_self_external_modification(self):\n    if self._dirty:\n        return\n    if self != self._self_last_wrapped_dict_snapshot:\n        self._self_external_modification = True\n        self._self_last_wrapped_dict_snapshot = None",
    "docstring": "Checks for any changes to the wrapped dict not through the wrapper.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\data_structures.py",
    "ast_data": "FunctionDef name:_check_self_external_modification arg:self arguments arg If Return return:no If Compare Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "wait_for_other_workers",
    "source_code": "def wait_for_other_workers(self):\n    if not self._worker_barrier:\n        return\n    self._worker_barrier.wait()",
    "docstring": "Waits for other workers to reach the same call to this method. Raises: ValueError: if is not passed to the __init__ method.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distribute_coordinator_utils.py",
    "ast_data": "FunctionDef name:wait_for_other_workers arg:self arguments arg If Return return:no Call"
  },
  {
    "library": "pytorch",
    "name": "QuantWrapper",
    "source_code": "class QuantWrapper(nn.Module):\n    quant: QuantStub\n    dequant: DeQuantStub\n    module: nn.Module\n\n    def __init__(self, module):\n        super().__init__()\n        qconfig = getattr(module, 'qconfig', None)\n        self.add_module('quant', QuantStub(qconfig))\n        self.add_module('dequant', DeQuantStub(qconfig))\n        self.add_module('module', module)\n        self.train(module.training)\n\n    def forward(self, X):\n        X = self.quant(X)\n        X = self.module(X)\n        return self.dequant(X)",
    "docstring": "A wrapper class that wraps the input module, adds QuantStub and DeQuantStub and surround the call to module with call to quant and dequant modules. This is used by the utility functions to add the quant and dequant modules, before function will just be observer, it observes the input tensor, after , will be swapped to which does actual quantization. Similarly for .",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\quantization\\stubs.py",
    "ast_data": "ClassDef name:QuantWrapper FunctionDef name:__init__ arg:self arg:module arguments arg arg Call Call Assign Call Call Call Call Call Call Call FunctionDef name:forward arg:self arg:X arguments arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X):\n    return self._decision_function(X)",
    "docstring": "Predict using the linear model. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) Samples. Returns ------- C : array, shape (n_samples,) Returns predicted values.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_base.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "__deepcopy__",
    "source_code": "def __deepcopy__(self, memo):\n    obj = self.__class__()\n    for k, v in self.__dict__.items():\n        if k == '_result_cache':\n            obj.__dict__[k] = None\n        else:\n            obj.__dict__[k] = copy.deepcopy(v, memo)\n    return obj",
    "docstring": "Don't populate the QuerySet's cache.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:__deepcopy__ arg:self arg:memo arguments arg arg Assign Call For Call If Compare Assign Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_script_module_receiver",
    "source_code": "@classmethod\ndef _script_module_receiver(cls, script_module_serialized):\n    f = io.BytesIO(script_module_serialized)\n    m = torch.jit.load(f)\n    return m",
    "docstring": "Given a serialized representation of a ScriptModule created with torch.jit.save, loads and returns the ScriptModule.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\rpc\\internal.py",
    "ast_data": "FunctionDef name:_script_module_receiver arg:cls arg:script_module_serialized arguments arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "rgb_to_linear_rgb",
    "source_code": "def rgb_to_linear_rgb(image: Tensor) -> Tensor:\n    if not isinstance(image, Tensor):\n        raise TypeError(f'Input type is not a Tensor. Got {type(image)}')\n    if len(image.shape) < 3 or image.shape[-3] != 3:\n        raise ValueError(f'Input size must have a shape of (*, 3, H, W).Got {image.shape}')\n    lin_rgb: Tensor = torch.where(image > 0.04045, torch.pow((image + 0.055) / 1.055, 2.4), image / 12.92)\n    return lin_rgb",
    "docstring": "Convert an sRGB image to linear RGB. Used in colorspace conversions. .. image:: _static/img/rgb_to_linear_rgb.png Args: image: sRGB Image to be converted to linear RGB of shape :math:. Returns: linear RGB version of the image with shape of :math:. Example: >>> input = torch.rand(2, 3, 4, 5) >>> output = rgb_to_linear_rgb(input) # 2x3x4x5",
    "type": "function",
    "file_path": "kornia\\kornia\\color\\rgb.py",
    "ast_data": "FunctionDef name:rgb_to_linear_rgb arg:image arguments arg If Call Raise Call Call If BoolOp Compare Call Compare Raise Call Call Compare Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "diag",
    "source_code": "def diag(self, X):\n    return np.full(_num_samples(X), self.noise_level, dtype=np.array(self.noise_level).dtype)",
    "docstring": "Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters ---------- X : array-like of shape (n_samples_X, n_features) or list of object Argument to the kernel. Returns ------- K_diag : ndarray of shape (n_samples_X,) Diagonal of kernel k(X, X)",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:diag arg:self arg:X arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "control_dependencies",
    "source_code": "def control_dependencies(self, control_inputs) -> _ControlDependenciesController:\n    if control_inputs is None:\n        return self._ControlDependenciesController(self, None)\n    control_ops = []\n    current = self._current_control_dependencies()\n    for c in control_inputs:\n        if isinstance(c, internal.IndexedSlices) or (hasattr(c, '_handle') and hasattr(c, 'op')):\n            c = c.op\n        c = self.as_graph_element(c)\n        if isinstance(c, tensor_lib.Tensor):\n            c = c.op\n        elif not isinstance(c, Operation):\n            raise TypeError('Control input must be Operation or Tensor: %s' % c)\n        if c not in current:\n            control_ops.append(c)\n            current.add(c)\n            c._set_attr('_has_manual_control_dependencies', attr_value_pb2.AttrValue(b=True))\n    return self._ControlDependenciesController(self, control_ops)",
    "docstring": "Returns a context manager that specifies control dependencies. Use with the keyword to specify that all operations constructed within the context should have control dependencies on . For example: Multiple calls to can be nested, and in that case a new will have control dependencies on the union of from all active contexts. You can pass None to clear the control dependencies: *N.B.* The control dependencies context applies *only* to ops that are constructed within the context. Merely using an op or tensor in the context does not add a control dependency. The following example illustrates this point: Also note that though execution of ops created under this scope will trigger execution of the dependencies, the ops created under this scope might still be pruned from a normal tensorflow graph. For example, in the following snippet of code the dependencies are never executed: This is because evaluating the gradient graph does not require evaluating the constant(1) op created in the forward pass. Args: control_inputs: A list of or objects which must be executed or computed before running the operations defined in the context. Can also be to clear the control dependencies. Returns: A context manager that specifies control dependencies for all operations constructed within the context. Raises: TypeError: If is not a list of or objects.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:control_dependencies arg:self arg:control_inputs arguments arg arg If Compare Return return:yes Call Assign Assign Call For If BoolOp Call BoolOp Call Call Assign Assign Call If Call Assign If Call Raise Call If Compare Call Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "on_predict_batch_begin",
    "source_code": "@doc_controls.for_subclass_implementers\n@generic_utils.default\ndef on_predict_batch_begin(self, batch, logs=None):\n    pass",
    "docstring": "Called at the beginning of a batch in methods. Subclasses should override for any actions to run. Note that if the argument to in is set to , this method will only be called every batches. Args: batch: Integer, index of batch within the current epoch. logs: Dict, contains the return value of , it typically returns a dict with a key 'outputs' containing the model's outputs.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:on_predict_batch_begin arg:self arg:batch arg:logs arguments arg arg arg"
  },
  {
    "library": "matplotlib",
    "name": "set_ticks_position",
    "source_code": "def set_ticks_position(self, position):\n    _api.check_in_list(['lower', 'upper', 'both', 'default', 'none'], position=position)\n    self._tick_position = position",
    "docstring": "Set the ticks position. Parameters ---------- position : {'lower', 'upper', 'both', 'default', 'none'} The position of the bolded axis lines, ticks, and tick labels.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axis3d.py",
    "ast_data": "FunctionDef name:set_ticks_position arg:self arg:position arguments arg arg Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "decorated",
    "source_code": "@Bind.decorator\ndef decorated(wrapped, args, kwargs):\n    if context.executing_eagerly():\n        return _eager_mode_decorator(wrapped, args, kwargs)\n    else:\n        return _graph_mode_decorator(wrapped, args, kwargs)",
    "docstring": "Decorated function with custom gradient.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\custom_gradient.py",
    "ast_data": "FunctionDef name:decorated arg:wrapped arg:args arg:kwargs arguments arg arg arg If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "toco_convert",
    "source_code": "@_tf_export(v1=['lite.toco_convert'])\n@deprecation.deprecated(None, 'Use `lite.TFLiteConverter` instead.')\ndef toco_convert(input_data, input_tensors, output_tensors, *args, **kwargs):\n    return convert_graphdef(input_data, input_tensors, output_tensors, *args, **kwargs)",
    "docstring": "Convert a TensorFlow GraphDef to TFLite. This function is deprecated. Please use API instead. Conversion can be customized by providing arguments that are forwarded to and (see documentation for details). Args: input_data: Input data (i.e. often ). input_tensors: List of input tensors. Type and shape are computed using and . output_tensors: List of output tensors (only .name is used from this). *args: See and . **kwargs: See and . Returns: The converted TensorFlow Lite model in a bytes array. Raises: Defined in .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\convert.py",
    "ast_data": "FunctionDef name:toco_convert arg:input_data arg:input_tensors arg:output_tensors arguments arg arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_collect_input_masks",
    "source_code": "def _collect_input_masks(self, inputs, args, kwargs):\n    if self._call_arg_was_passed('mask', args, kwargs):\n        return self._get_call_arg_value('mask', args, kwargs)\n    if not self._should_compute_mask:\n        return None\n    input_masks = nest.map_structure(lambda t: getattr(t, '_keras_mask', None), inputs)\n    if generic_utils.is_all_none(input_masks):\n        return None\n    return input_masks",
    "docstring": "Checks if argument was passed, else gathers mask from inputs.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py",
    "ast_data": "FunctionDef name:_collect_input_masks arg:self arg:inputs arg:args arg:kwargs arguments arg arg arg arg If Call Return return:yes Call If Return return:no Assign Call arguments arg Call If Call Return return:no Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__deepcopy__",
    "source_code": "def __deepcopy__(self, memo=None):\n    p = copy.deepcopy(super(), memo)\n    p._readonly = False\n    return p",
    "docstring": "Return a deepcopy of the . The will not be readonly, even if the source is.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\path.py",
    "ast_data": "FunctionDef name:__deepcopy__ arg:self arg:memo arguments arg arg Assign Call Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "y1",
    "source_code": "@property\ndef y1(self):\n    return self.get_points()[1, 1]",
    "docstring": "The second of the pair of *y* coordinates that define the bounding box. This is not guaranteed to be greater than :attr: (for that, use :attr:).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:y1 arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_device_capability",
    "source_code": "@lru_cache(None)\ndef get_device_capability(device: Optional[_device_t]=None) -> dict[str, Any]:\n    props = get_device_properties(device)\n    return {prop: getattr(props, prop) for prop in dir(props) if not prop.startswith(('__', '_pybind11_'))}",
    "docstring": "Get the xpu capability of a device. Args: device (torch.device or int or str, optional): device for which to return the device capability. This function is a no-op if this argument is a negative integer. It uses the current device, given by :func:, if :attr: is `` (default). Returns: Dict[str, Any]: the xpu capability dictionary of the device",
    "type": "function",
    "file_path": "pytorch\\torch\\xpu\\__init__.py",
    "ast_data": "FunctionDef name:get_device_capability arg:device arguments arg Assign Call Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "convert_padding",
    "source_code": "def convert_padding(padding, expected_length=4):\n    explicit_paddings = []\n    if padding == 'EXPLICIT':\n        raise ValueError(\"'EXPLICIT' is not a valid value for `padding`. To use explicit padding, `padding` must be a list.\")\n    if isinstance(padding, (list, tuple)):\n        for i, dim_paddings in enumerate(padding):\n            if not isinstance(dim_paddings, (list, tuple)):\n                raise ValueError(f'When `padding` is a list, each element of `padding` must be a list/tuple of size 2. Received: padding={padding} with element at index {i} of type {type(dim_paddings)}')\n            if len(dim_paddings) != 2:\n                raise ValueError(f'When `padding` is a list, each element of `padding` must be a list/tuple of size 2. Received: padding={padding} with element at index {i} of size {len(dim_paddings)}')\n            explicit_paddings.extend(dim_paddings)\n        if len(padding) != expected_length:\n            raise ValueError(f'When padding is a list, it must be of size {expected_length}. Received: padding={padding} of size {len(padding)}')\n        padding = 'EXPLICIT'\n    return (padding, explicit_paddings)",
    "docstring": "Converts Python padding to C++ padding for ops which take EXPLICIT padding. Args: padding: the argument for a Python op which supports EXPLICIT padding. expected_length: Expected number of entries in the padding list when explicit padding is used. Returns: (padding, explicit_paddings) pair, which should be passed as attributes to a C++ op. Raises: ValueError: If padding is invalid.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py",
    "ast_data": "FunctionDef name:convert_padding arg:padding arg:expected_length arguments arg arg Assign If Compare Raise Call If Call For Call If Call Raise Call Call If Compare Call Raise Call Call Call If Compare Call Raise Call Call Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_get_format_function",
    "source_code": "def _get_format_function(data, **options):\n    dtype_ = data.dtype\n    dtypeobj = dtype_.type\n    formatdict = _get_formatdict(data, **options)\n    if dtypeobj is None:\n        return formatdict['numpystr']()\n    elif issubclass(dtypeobj, _nt.bool):\n        return formatdict['bool']()\n    elif issubclass(dtypeobj, _nt.integer):\n        if issubclass(dtypeobj, _nt.timedelta64):\n            return formatdict['timedelta']()\n        else:\n            return formatdict['int']()\n    elif issubclass(dtypeobj, _nt.floating):\n        if issubclass(dtypeobj, _nt.longdouble):\n            return formatdict['longfloat']()\n        else:\n            return formatdict['float']()\n    elif issubclass(dtypeobj, _nt.complexfloating):\n        if issubclass(dtypeobj, _nt.clongdouble):\n            return formatdict['longcomplexfloat']()\n        else:\n            return formatdict['complexfloat']()\n    elif issubclass(dtypeobj, (_nt.str_, _nt.bytes_)):\n        return formatdict['numpystr']()\n    elif issubclass(dtypeobj, _nt.datetime64):\n        return formatdict['datetime']()\n    elif issubclass(dtypeobj, _nt.object_):\n        return formatdict['object']()\n    elif issubclass(dtypeobj, _nt.void):\n        if dtype_.names is not None:\n            return StructuredVoidFormat.from_data(data, **options)\n        else:\n            return formatdict['void']()\n    else:\n        return formatdict['numpystr']()",
    "docstring": "find the right formatting function for the dtype_",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\arrayprint.py",
    "ast_data": "FunctionDef name:_get_format_function arg:data arguments arg arg Assign Assign Assign Call If Compare Return return:yes Call If Call Return return:yes Call If Call If Call Return return:yes Call Return return:yes Call If Call If Call Return return:yes Call Return return:yes Call If Call If Call Return return:yes Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Call If Call If Compare Return return:yes Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "SessionAuthTool",
    "source_code": "class SessionAuthTool(HandlerTool):\n    pass",
    "docstring": "An HTTP session authentication tool.",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\_cptools.py",
    "ast_data": "ClassDef name:SessionAuthTool"
  },
  {
    "library": "pytorch",
    "name": "DefaultNodeQuantizeHandler",
    "source_code": "class DefaultNodeQuantizeHandler(QuantizeHandler):\n    pass",
    "docstring": "Common quantized op, first input and first output will be quantized",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\quantize_handler.py",
    "ast_data": "ClassDef name:DefaultNodeQuantizeHandler"
  },
  {
    "library": "tensorflow",
    "name": "get_dense_tensor",
    "source_code": "def get_dense_tensor(self, transformation_cache, state_manager):\n    if isinstance(self.categorical_column, SequenceCategoricalColumn):\n        raise ValueError('In embedding_column: {}. categorical_column must not be of type SequenceCategoricalColumn. Suggested fix A: If you wish to use DenseFeatures, use a non-sequence categorical_column_with_*. Suggested fix B: If you wish to create sequence input, use SequenceFeatures instead of DenseFeatures. Given (type {}): {}'.format(self.name, type(self.categorical_column), self.categorical_column))\n    sparse_tensors = self.categorical_column.get_sparse_tensors(transformation_cache, state_manager)\n    return self._get_dense_tensor_internal(sparse_tensors, state_manager)",
    "docstring": "Returns tensor after doing the embedding lookup. Args: transformation_cache: A object to access features. state_manager: A to create / access resources such as lookup tables. Returns: Embedding lookup tensor. Raises: ValueError: is SequenceCategoricalColumn.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:get_dense_tensor arg:self arg:transformation_cache arg:state_manager arguments arg arg arg If Call Raise Call Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "is_any_real_numeric_dtype",
    "source_code": "def is_any_real_numeric_dtype(arr_or_dtype) -> bool:\n    return is_numeric_dtype(arr_or_dtype) and (not is_complex_dtype(arr_or_dtype)) and (not is_bool_dtype(arr_or_dtype))",
    "docstring": "Check whether the provided array or dtype is of a real number dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a real number dtype. See Also -------- is_numeric_dtype : Check if a dtype is numeric. is_complex_dtype : Check if a dtype is complex. is_bool_dtype : Check if a dtype is boolean. Examples -------- >>> from pandas.api.types import is_any_real_numeric_dtype >>> is_any_real_numeric_dtype(int) True >>> is_any_real_numeric_dtype(float) True >>> is_any_real_numeric_dtype(object) False >>> is_any_real_numeric_dtype(str) False >>> is_any_real_numeric_dtype(complex(1, 2)) False >>> is_any_real_numeric_dtype(bool) False",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\common.py",
    "ast_data": "FunctionDef name:is_any_real_numeric_dtype arg:arr_or_dtype arguments arg Return return:yes BoolOp Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_kern_dist_from_name",
    "source_code": "def get_kern_dist_from_name(self, name1, name2):\n    return self._kern.get((name1, name2), 0)",
    "docstring": "Return the kerning pair distance (possibly 0) for chars *name1* and *name2*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_afm.py",
    "ast_data": "FunctionDef name:get_kern_dist_from_name arg:self arg:name1 arg:name2 arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "in_neighborhood",
    "source_code": "def in_neighborhood(candidate: np.ndarray, n: int=2) -> bool:\n    indices = ((candidate - self.l_bounds) / self.cell_size).astype(int)\n    ind_min = np.maximum(indices - n, self.l_bounds.astype(int))\n    ind_max = np.minimum(indices + n + 1, self.grid_size)\n    if not np.isnan(self.sample_grid[tuple(indices)][0]):\n        return True\n    a = [slice(ind_min[i], ind_max[i]) for i in range(self.d)]\n    with np.errstate(invalid='ignore'):\n        if np.any(np.sum(np.square(candidate - self.sample_grid[tuple(a)]), axis=self.d) < self.radius_squared):\n            return True\n    return False",
    "docstring": "Check if there are samples closer than `candidate` sample.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_qmc.py",
    "ast_data": "FunctionDef name:in_neighborhood arg:candidate arg:n arguments arg arg Assign Call Assign Call Call Assign Call If Call Call Return return:yes Assign Call Call With Call If Call Compare Call Call Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "load_spmm_dataset",
    "source_code": "def load_spmm_dataset(dataset_path, hidden_size, sparsity, spmm_type, device, n_limit=math.inf):\n    current_folder_path = f'{dataset_path}/{sparsity}'\n    path = Path(current_folder_path)\n    files = path.glob('**/*.smtx')\n    print(dataset_path, hidden_size, sparsity)\n    index = 0\n    x_files, y_files = ([], [])\n    for f in files:\n        if index >= n_limit:\n            break\n        print('.', end='')\n        size, nnz = read_matrix_params(f.as_posix())\n        if size[1] == hidden_size:\n            x_files.append(f.as_posix())\n        if size[0] == hidden_size:\n            y_files.append(f.as_posix())\n        index += 1\n    print()\n    for fx, fy in zip(x_files, y_files):\n        x = load_sparse_matrix(fx, device)\n        y = gen_matrix(fy, device) if spmm_type == 'sparse@dense' else load_sparse_matrix(fy, device)\n        yield (x, y)",
    "docstring": "load_spmm_dataset loads a DLMC dataset for a sparse matrix-matrix multiplication (SPMM) performance test. Args: dataset_path: path of the dataset from DLMC collection. hidden_size This value allows tensors of varying sizes. sparsity: This value allows tensors of varying sparsities. spmm_type: This value allows tensors for or operations. device: Whether to place the Tensor on a GPU or CPU. n_limit: This value allows a dataset with some limit size.",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\sparse\\dlmc\\utils.py",
    "ast_data": "FunctionDef name:load_spmm_dataset arg:dataset_path arg:hidden_size arg:sparsity arg:spmm_type arg:device arg:n_limit arguments arg arg arg arg arg arg Assign Assign Call Assign Call Call Assign Assign For If Compare Call Assign Call Call If Compare Call Call If Compare Call Call Call For Call Assign Call Assign Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "typecast_tensor",
    "source_code": "def typecast_tensor(t, target_dtype, casting):\n    can_cast = _dtypes_impl.can_cast_impl\n    if not can_cast(t.dtype, target_dtype, casting=casting):\n        raise TypeError(f\"Cannot cast array data from {t.dtype} to {target_dtype} according to the rule '{casting}'\")\n    return cast_if_needed(t, target_dtype)",
    "docstring": "Dtype-cast tensor to target_dtype. Parameters ---------- t : torch.Tensor The tensor to cast target_dtype : torch dtype object The array dtype to cast all tensors to casting : str The casting mode, see Returns ------- of the dtype Raises ------ ValueError if the argument cannot be cast according to the rule",
    "type": "function",
    "file_path": "pytorch\\torch\\_numpy\\_util.py",
    "ast_data": "FunctionDef name:typecast_tensor arg:t arg:target_dtype arg:casting arguments arg arg arg Assign If Call Raise Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "in_bounds",
    "source_code": "def in_bounds(x, lb, ub):\n    return np.all((x >= lb) & (x <= ub))",
    "docstring": "Check if a point lies within bounds.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_lsq\\common.py",
    "ast_data": "FunctionDef name:in_bounds arg:x arg:lb arg:ub arguments arg arg arg Return return:yes Call Compare Compare"
  },
  {
    "library": "tensorflow",
    "name": "apply",
    "source_code": "@doc_controls.do_not_doc_inheritable\ndef apply(self, inputs, *args, **kwargs):\n    warnings.warn('`layer.apply` is deprecated and will be removed in a future version. Please use `layer.__call__` method instead.')\n    return self.__call__(inputs, *args, **kwargs)",
    "docstring": "Deprecated, do NOT use! This is an alias of . Args: inputs: Input tensor(s). *args: additional positional arguments to be passed to . **kwargs: additional keyword arguments to be passed to . Returns: Output tensor(s).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:apply arg:self arg:inputs arguments arg arg arg arg Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "forward",
    "source_code": "def forward(self, x):\n    x = x + self.pe[:x.size(0), :]\n    return self.dropout(x)",
    "docstring": "Inputs of forward function Args: x: the sequence fed to the positional encoder model (required). Shape: x: [sequence length, batch size, embed dim] output: [sequence length, batch size, embed dim] Examples: >>> output = pos_encoder(x)",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\functional_autograd_benchmark\\torchaudio_models.py",
    "ast_data": "FunctionDef name:forward arg:self arg:x arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "is_available",
    "source_code": "def is_available() -> bool:\n    return hasattr(torch._C, '_c10d_init')",
    "docstring": "Return `` for MacOS.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\__init__.py",
    "ast_data": "FunctionDef name:is_available arguments Return return:yes Call"
  },
  {
    "library": "django",
    "name": "punycode",
    "source_code": "def punycode(domain):\n    return domain.encode('idna').decode('ascii')",
    "docstring": "Return the Punycode of the given domain if it's non-ASCII.",
    "type": "function",
    "file_path": "django\\django\\utils\\encoding.py",
    "ast_data": "FunctionDef name:punycode arg:domain arguments arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_gen_rows_without_counts",
    "source_code": "def _gen_rows_without_counts(self) -> Iterator[Sequence[str]]:\n    yield from zip(self._gen_line_numbers(), self._gen_columns(), self._gen_dtypes())",
    "docstring": "Iterator with string representation of body data without counts.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:_gen_rows_without_counts arg:self arguments arg Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "frozen_saveable_objects",
    "source_code": "def frozen_saveable_objects(self, object_map=None, to_graph=None, call_with_mapped_captures=None):\n    return save_util_v1.frozen_saveables_and_savers(self, object_map, to_graph, call_with_mapped_captures)[0]",
    "docstring": "Creates SaveableObjects with the current object graph frozen.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\graph_view.py",
    "ast_data": "FunctionDef name:frozen_saveable_objects arg:self arg:object_map arg:to_graph arg:call_with_mapped_captures arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "verify_geom",
    "source_code": "def verify_geom(self, geom, model_field):\n    if geom.is_measured:\n        geom.set_measured(False)\n    if self.coord_dim == 2 and geom.is_3d:\n        geom.set_3d(False)\n    if self.make_multi(geom.geom_type, model_field):\n        multi_type = self.MULTI_TYPES[geom.geom_type.num]\n        g = OGRGeometry(multi_type)\n        g.add(geom)\n    else:\n        g = geom\n    if self.transform:\n        g.transform(self.transform)\n    return g.wkt",
    "docstring": "Verify the geometry -- construct and return a GeometryCollection if necessary (for example if the model field is MultiPolygonField while the mapped shapefile only contains Polygons).",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\utils\\layermapping.py",
    "ast_data": "FunctionDef name:verify_geom arg:self arg:geom arg:model_field arguments arg arg arg If Call If BoolOp Compare Call If Call Assign Assign Call Call Assign If Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "Default",
    "source_code": "class Default:\n    pass",
    "docstring": "Singleton default object that will cause the ConfigFuzzer to always use the default value set in the config.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\fuzzer.py",
    "ast_data": "ClassDef name:Default"
  },
  {
    "library": "tensorflow",
    "name": "tpu_ordinal",
    "source_code": "def tpu_ordinal(self, replica: int=0, logical_core: int=0) -> int:\n    coordinates = self.coordinates(replica, logical_core)\n    return self._topology.tpu_device_ordinal_at_coordinates(coordinates)",
    "docstring": "Returns the ordinal of the TPU device assigned to a logical core.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\device_assignment.py",
    "ast_data": "FunctionDef name:tpu_ordinal arg:self arg:replica arg:logical_core arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "size_args",
    "source_code": "def size_args(self):\n    X = self.input_nodes[0]\n    W = self.input_nodes[1]\n    Y = self.output_node\n    M = X.get_size()[0]\n    K = X.get_size()[1]\n    N = W.get_size()[1]\n    LDA = X.get_stride()[0 if X.get_stride()[1] == 1 else 1]\n    LDB = W.get_stride()[0 if W.get_stride()[1] == 1 else 1]\n    LDC = Y.get_stride()[0 if Y.get_stride()[1] == 1 else 1]\n    return (M, N, K, LDA, LDB, LDC)",
    "docstring": "Sizes and strides to be used for the kernel call",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\rocm\\ck_tile_universal_gemm_template.py",
    "ast_data": "FunctionDef name:size_args arg:self arguments arg Assign Assign Assign Assign Call Assign Call Assign Call Assign Call Compare Call Assign Call Compare Call Assign Call Compare Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_set_element",
    "source_code": "def _set_element(root_dict: STATE_DICT_TYPE, path: OBJ_PATH, value: Any) -> None:\n    cur_container = cast(CONTAINER_TYPE, root_dict)\n\n    def extend_list(lst: list[Any], idx: int) -> None:\n        while len(lst) <= idx:\n            lst.append(None)\n    for i in range(1, len(path)):\n        prev_key = path[i - 1]\n        key = path[i]\n        def_val: Union[CONTAINER_TYPE, list[Any]] = {} if type(key) == str else []\n        if isinstance(cur_container, Mapping):\n            cur_container = cast(CONTAINER_TYPE, cur_container.setdefault(prev_key, def_val))\n        else:\n            extend_list(cur_container, prev_key)\n            if cur_container[prev_key] is None:\n                cur_container[prev_key] = def_val\n            cur_container = cur_container[prev_key]\n    key = path[-1]\n    if type(key) == int:\n        extend_list(cast(list[Any], cur_container), key)\n    cur_container[key] = value",
    "docstring": "Set `` object path.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_state_dict_utils.py",
    "ast_data": "FunctionDef name:_set_element arg:root_dict arg:path arg:value arguments arg arg arg Assign Call FunctionDef name:extend_list arg:lst arg:idx arguments arg arg While Compare Call Call For Call Call Assign Assign Compare Call If Call Assign Call Call Call If Compare Assign Assign Assign If Compare Call Call Call Assign"
  },
  {
    "library": "scikit-learn",
    "name": "_check_bounds_params",
    "source_code": "def _check_bounds_params(self):\n    list_close = np.isclose(self.bounds, np.atleast_2d(self.theta).T)\n    idx = 0\n    for hyp in self.hyperparameters:\n        if hyp.fixed:\n            continue\n        for dim in range(hyp.n_elements):\n            if list_close[idx, 0]:\n                warnings.warn('The optimal value found for dimension %s of parameter %s is close to the specified lower bound %s. Decreasing the bound and calling fit again may find a better value.' % (dim, hyp.name, hyp.bounds[dim][0]), ConvergenceWarning)\n            elif list_close[idx, 1]:\n                warnings.warn('The optimal value found for dimension %s of parameter %s is close to the specified upper bound %s. Increasing the bound and calling fit again may find a better value.' % (dim, hyp.name, hyp.bounds[dim][1]), ConvergenceWarning)\n            idx += 1",
    "docstring": "Called after fitting to warn if bounds may have been too tight.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:_check_bounds_params arg:self arguments arg Assign Call Call Assign For If For Call If Call If Call"
  },
  {
    "library": "pytorch",
    "name": "get_qconfig_info",
    "source_code": "def get_qconfig_info(self, model) -> dict[str, DetectorQConfigInfo]:\n    per_channel_info = self._detect_per_channel_helper(model)\n    module_fqn_to_detector_qconfig_info = {}\n    for module_fqn in per_channel_info:\n        detector_qconfig_info = DetectorQConfigInfo(module_fqn)\n        per_chan_supported: bool = per_channel_info[module_fqn][self.PER_CHAN_SUPPORTED_KEY]\n        detector_qconfig_info.is_weight_per_channel = per_chan_supported\n        module_fqn_to_detector_qconfig_info[module_fqn] = detector_qconfig_info\n    return module_fqn_to_detector_qconfig_info",
    "docstring": "Returns the DetectorQConfigInfo for each module_fqn relevant Args model (nn.Module or subclass): model to find observer insertion points Returns a Dict mapping from unique observer fqns (where we want to insert them) to: A DetectorQConfigInfo with the information to generate a QConfig for a specific module",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\detector.py",
    "ast_data": "FunctionDef name:get_qconfig_info arg:self arg:model arguments arg arg Assign Call Assign For Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_ensure_array",
    "source_code": "@classmethod\ndef _ensure_array(cls, data, dtype, copy: bool):\n    if data.ndim > 1:\n        raise ValueError('Index data must be 1-dimensional')\n    elif dtype == np.float16:\n        raise NotImplementedError('float16 indexes are not supported')\n    if copy:\n        data = data.copy()\n    return data",
    "docstring": "Ensure we have a valid array to pass to _simple_new.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_ensure_array arg:cls arg:data arg:dtype arg:copy arguments arg arg arg arg If Compare Raise Call If Compare Raise Call If Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "infer_symbolic_types",
    "source_code": "def infer_symbolic_types(traced):\n    r = Refine(traced)\n    r.refine()\n    mgu = unify_eq(r.constraints)\n    substitute_all_types(traced.graph, mgu)\n    r = Refine(traced)\n    r.refine()\n    mgu = unify_eq(r.constraints)\n    substitute_all_types(traced.graph, mgu)\n    r.symbolic_relations()",
    "docstring": "Calls our symbolic inferencer twice. This is useful when one pass is not enough to infer all the information such as the case for braodcasting.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\unify_refinements.py",
    "ast_data": "FunctionDef name:infer_symbolic_types arg:traced arguments arg Assign Call Call Assign Call Call Assign Call Call Assign Call Call Call"
  },
  {
    "library": "numpy",
    "name": "strip",
    "source_code": "def strip(self, chars=None):\n    return strip(self, chars)",
    "docstring": "For each element in , return a copy with the leading and trailing characters removed. See Also -------- char.strip",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:strip arg:self arg:chars arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "cluster_spec",
    "source_code": "def cluster_spec(self):\n    tf_config = _load_tf_config(self._port)\n    if 'cluster' not in tf_config:\n        return ClusterSpec({})\n    return ClusterSpec(tf_config['cluster'])",
    "docstring": "Returns a ClusterSpec based on the SageMaker environment variables. Returns: A ClusterSpec with information from the SageMaker environment variables.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\sagemaker_cluster_resolver.py",
    "ast_data": "FunctionDef name:cluster_spec arg:self arguments arg Assign Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_bad",
    "source_code": "def get_bad(self):\n    return self._rgba_bad",
    "docstring": "Get the color for masked values.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:get_bad arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "@available_if(_search_estimator_has('predict'))\ndef predict(self, X):\n    check_is_fitted(self)\n    return self.best_estimator_.predict(X)",
    "docstring": "Call predict on the estimator with the best found parameters. Only available if `X` based on the estimator with the best found parameters.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_search.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_registered_kernels_for_op",
    "source_code": "def get_registered_kernels_for_op(name):\n    buf = c_api.TF_GetRegisteredKernelsForOp(name)\n    data = c_api.TF_GetBuffer(buf)\n    kernel_list = kernel_def_pb2.KernelList()\n    kernel_list.ParseFromString(compat.as_bytes(data))\n    return kernel_list",
    "docstring": "Returns a KernelList proto of registered kernels for a given op. Args: name: A string representing the name of the op whose kernels to retrieve.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\kernels.py",
    "ast_data": "FunctionDef name:get_registered_kernels_for_op arg:name arguments arg Assign Call Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "graph_by_id",
    "source_code": "def graph_by_id(self, graph_id):\n    return self._graph_by_id[graph_id]",
    "docstring": "Get a DebuggedGraph object by its ID.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py",
    "ast_data": "FunctionDef name:graph_by_id arg:self arg:graph_id arguments arg arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@abstractmethod\ndef fit(self, X, y, **fit_params):\n    names, clfs = self._validate_estimators()\n    if self.weights is not None and len(self.weights) != len(self.estimators):\n        raise ValueError(f'Number of `estimators` and weights must be equal; got {len(self.weights)} weights, {len(self.estimators)} estimators')\n    if _routing_enabled():\n        routed_params = process_routing(self, 'fit', **fit_params)\n    else:\n        routed_params = Bunch()\n        for name in names:\n            routed_params[name] = Bunch(fit={})\n            if 'sample_weight' in fit_params:\n                routed_params[name].fit['sample_weight'] = fit_params['sample_weight']\n    self.estimators_ = Parallel(n_jobs=self.n_jobs)((delayed(_fit_single_estimator)(clone(clf), X, y, fit_params=routed_params[name]['fit'], message_clsname='Voting', message=self._log_message(name, idx + 1, len(clfs))) for idx, (name, clf) in enumerate(zip(names, clfs)) if clf != 'drop'))\n    self.named_estimators_ = Bunch()\n    est_iter = iter(self.estimators_)\n    for name, est in self.estimators:\n        current_est = est if est == 'drop' else next(est_iter)\n        self.named_estimators_[name] = current_est\n        if hasattr(current_est, 'feature_names_in_'):\n            self.feature_names_in_ = current_est.feature_names_in_\n    return self",
    "docstring": "Get common fit operations.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_voting.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg arg Assign Call If BoolOp Compare Compare Call Call Raise Call Call Call If Call Assign Call Assign Call For Assign Call If Compare Assign Assign Call Call Call Call Call Call Call Call Call Compare Assign Call Assign Call For Assign Compare Call Assign If Call Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "translation",
    "source_code": "@property\ndef translation(self) -> Tensor:\n    return self._dst_from_src.translation",
    "docstring": "Translation part of the pose.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\pose.py",
    "ast_data": "FunctionDef name:translation arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "match_staging_level",
    "source_code": "def match_staging_level(value, like_value):\n    if tensor_util.is_tf_type(like_value):\n        return constant_op.constant(value)\n    return value",
    "docstring": "Casts a value to be staged at the same level as another.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\lang\\special_functions.py",
    "ast_data": "FunctionDef name:match_staging_level arg:value arg:like_value arguments arg arg If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "last_call",
    "source_code": "@property\ndef last_call(self) -> timedelta:\n    return self._last_call",
    "docstring": "Get the last call timeout.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\dynamic_rendezvous.py",
    "ast_data": "FunctionDef name:last_call arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_partition",
    "source_code": "def _partition(self, dataset):\n    if self._num_local_devices_per_replica == 1 and self._partition_offset == 0:\n        return dataset\n\n    def slice_batch(index, batch):\n        flattened_batch = nest.flatten(batch)\n        flattened_output = []\n        norm_index = math_ops.cast(index % self._num_local_devices_per_replica, dtype=dtypes.int32)\n        norm_index += self._partition_offset\n        coords = self._mesh.coords(norm_index)\n        coords = array_ops.reshape(coords, (1, -1))\n        for element, shard_counts, idx_matrix in zip(flattened_batch, self._all_shard_counts, self._index_matrices):\n            indexes = math_ops.matmul(coords, idx_matrix)\n            start = array_ops.reshape(indexes, (-1,))\n            size = array_ops.shape_v2(element, out_type=dtypes.int32) // shard_counts\n            flattened_output.append(array_ops.slice(element, begin=start, size=size))\n        return nest.pack_sequence_as(batch, flattened_output)\n    enumerated_dataset = dataset.enumerate()\n    partitioned_dataset = enumerated_dataset.map(slice_batch)\n    return partitioned_dataset",
    "docstring": "Slices each dataset element on any sharded non-batch dimension.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\input_util.py",
    "ast_data": "FunctionDef name:_partition arg:self arg:dataset arguments arg arg If BoolOp Compare Compare Return return:yes FunctionDef name:slice_batch arg:index arg:batch arguments arg arg Assign Call Assign Assign Call Assign Call Assign Call For Call Assign Call Assign Call Assign Call Call Call Return return:yes Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "log_softmax",
    "source_code": "@xp_capabilities()\ndef log_softmax(x, axis=None):\n    xp = array_namespace(x)\n    x = xp.asarray(x)\n    x_max = xp.max(x, axis=axis, keepdims=True)\n    if x_max.ndim > 0:\n        x_max = xpx.at(x_max, ~xp.isfinite(x_max)).set(0)\n    elif not xp.isfinite(x_max):\n        x_max = 0\n    tmp = x - x_max\n    exp_tmp = xp.exp(tmp)\n    with np.errstate(divide='ignore'):\n        s = xp.sum(exp_tmp, axis=axis, keepdims=True)\n        out = xp.log(s)\n    return tmp - out",
    "docstring": "Compute the logarithm of the softmax function. In principle:: log_softmax(x) = log(softmax(x)) but using a more accurate implementation. Parameters ---------- x : array_like Input array. axis : int or tuple of ints, optional Axis to compute values along. Default is None and softmax will be computed over the entire array . Returns ------- s : ndarray or scalar An array with the same shape as . Exponential of the result will sum to 1 along the specified axis. If is a scalar, a scalar is returned. Notes ----- is more accurate than `softmax` saturate (see examples below). .. versionadded:: 1.5.0 Examples -------- >>> import numpy as np >>> from scipy.special import log_softmax >>> from scipy.special import softmax >>> np.set_printoptions(precision=5) >>> x = np.array([1000.0, 1.0]) >>> y = log_softmax(x) >>> y array([ 0., -999.]) >>> with np.errstate(divide='ignore'): ... y = np.log(softmax(x)) ... >>> y array([ 0., -inf])",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_logsumexp.py",
    "ast_data": "FunctionDef name:log_softmax arg:x arg:axis arguments arg arg Assign Call Assign Call Assign Call If Compare Assign Call Call Call If Call Assign Assign Assign Call With Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "CosineMixture",
    "source_code": "class CosineMixture(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = [(-1.0, 1.0)] * self.N\n        self.global_optimum = [[0.0 for _ in range(self.N)]]\n        self.fglob = -0.1 * self.N\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return -0.1 * sum(cos(5.0 * pi * x)) + sum(x ** 2.0)",
    "docstring": "Cosine Mixture objective function. This class defines the Cosine Mixture global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{CosineMixture}}(x) = -0.1 \\sum_{i=1}^n \\cos(5 \\pi x_i) + \\sum_{i=1}^n x_i^2 Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Ali, M.M, Khompatraporn, C. , Zabinski, B. A Numerical Evaluation of Several Stochastic Algorithms on Selected Continuous Global Optimization Test Problems, Journal of Global Optimization, 2005, 31, 635",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_C.py",
    "ast_data": "ClassDef name:CosineMixture Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_trace_graph",
    "source_code": "def _get_trace_graph(f, args=(), kwargs=None, strict=True, _force_outplace=False, return_inputs=False, _return_inputs_states=False):\n    if kwargs is None:\n        kwargs = {}\n    if not isinstance(args, tuple):\n        args = (args,)\n    outs = ONNXTracedModule(f, strict, _force_outplace, return_inputs, _return_inputs_states)(*args, **kwargs)\n    return outs",
    "docstring": "Return a tuple on tracing a function or model. .. warning:: This function is internal-only and should only be used by the ONNX exporter. If you are trying to get a graph through tracing, please go through the public API instead:: trace = torch.jit.trace(nn.LSTMCell(), (input, hidden)) trace_graph = trace.graph Trace a function or model, returning a tuple consisting of the both the *trace* of an execution, as well as the original return value. If return_inputs, also returns the trace inputs as part of the tuple Tracing is guaranteed not to change the semantics of the function/module that is traced. Args: f (torch.nn.Module or function): the function or module to be traced. args (tuple or Tensor): the positional arguments to pass to the function/module to be traced. A non-tuple is assumed to be a single positional argument to be passed to the model. kwargs (dict): the keyword arguments to pass to the function/module to be traced. Example (trace a cell): .. testcode:: trace = torch.jit.trace(nn.LSTMCell(), (input, hidden))",
    "type": "function",
    "file_path": "pytorch\\torch\\jit\\_trace.py",
    "ast_data": "FunctionDef name:_get_trace_graph arg:f arg:args arg:kwargs arg:strict arg:_force_outplace arg:return_inputs arg:_return_inputs_states arguments arg arg arg arg arg arg arg If Compare Assign If Call Assign Assign Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "visualize_output",
    "source_code": "def visualize_output(self, semantic_mask: Tensor, colors: Tensor) -> Tensor:\n    if semantic_mask.dim() == 3:\n        channel_dim = 0\n    elif semantic_mask.dim() == 4:\n        channel_dim = 1\n    else:\n        raise ValueError(f'Semantic mask must be of shape (C, H, W) or (B, C, H, W), got {semantic_mask.shape}.')\n    if torch.allclose(semantic_mask.sum(dim=channel_dim), torch.tensor(1, dtype=semantic_mask.dtype, device=semantic_mask.device)):\n        semantic_mask = semantic_mask.argmax(dim=channel_dim, keepdim=True)\n        output = colors[semantic_mask.squeeze(channel_dim)]\n        if semantic_mask.dim() == 3:\n            output = output.permute(2, 0, 1)\n        elif semantic_mask.dim() == 4:\n            output = output.permute(0, 3, 1, 2)\n        else:\n            raise ValueError(f'Semantic mask must be of shape (C, H, W) or (B, C, H, W), got {semantic_mask.shape}.')\n    else:\n        raise ValueError('Only muliclass segmentation is supported. Please ensure a softmax is used, or submit a PR.')\n    return output",
    "docstring": "Visualize the output of the segmentation model. Args: semantic_mask: The output of the segmentation model. Shape should be (C, H, W) or (B, C, H, W). colors: The color map to use for visualizing the output of the segmentation model. Shape should be (num_classes, 3). Returns: A tensor of shape (3, H, W) or (B, 3, H, W) representing the visualized output of the segmentation model. Raises: ValueError: If the shape of the semantic mask is not of shape (C, H, W) or (B, C, H, W). ValueError: If the shape of the colors is not of shape (num_classes, 3). ValueError: If only muliclass segmentation is supported. Please ensure a softmax is used, or submit a PR.",
    "type": "method",
    "file_path": "kornia\\kornia\\models\\segmentation\\base.py",
    "ast_data": "FunctionDef name:visualize_output arg:self arg:semantic_mask arg:colors arguments arg arg arg If Compare Call Assign If Compare Call Assign Raise Call If Call Call Call Assign Call Assign Call If Compare Call Assign Call If Compare Call Assign Call Raise Call Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "update_state",
    "source_code": "def update_state(self, y_true, y_pred, sample_weight=None):\n    return metrics_utils.update_confusion_matrix_variables({metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, metrics_utils.ConfusionMatrix.TRUE_NEGATIVES: self.true_negatives, metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives, metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives}, y_true, y_pred, thresholds=self.thresholds, thresholds_distributed_evenly=self._thresholds_distributed_evenly, class_id=self.class_id, sample_weight=sample_weight)",
    "docstring": "Accumulates confusion matrix statistics. Args: y_true: The ground truth values. y_pred: The predicted values. sample_weight: Optional weighting of each example. Defaults to 1. Can be a whose rank is either 0, or the same rank as , and must be broadcastable to . Returns: Update op.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py",
    "ast_data": "FunctionDef name:update_state arg:self arg:y_true arg:y_pred arg:sample_weight arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_fontstyle",
    "source_code": "def get_fontstyle(self):\n    return self._fontproperties.get_style()",
    "docstring": "Return the font style as a string. See Also -------- .font_manager.FontProperties.get_style",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:get_fontstyle arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_set_parameters_using_data_flow",
    "source_code": "def _set_parameters_using_data_flow(self) -> None:\n    snapshot = self._category_snapshot()\n    candidate_parameters: set[TensorAndID] = set()\n    candidate_fwd_tensors: set[TensorAndID] = {i for i, category in snapshot.items() if category == Category.INPUT}\n    for node in self._data_flow_graph.flow_nodes:\n        inputs = {(key, value) for key, (_, value) in node.inputs.items()}\n        if RecordScope.BACKWARD_FUNCTION not in get_scopes(node._event) and (not any((self._is_gradient(*i) for i in inputs))) and (not any((self._is_gradient(*i) for i in node.outputs.items()))) and candidate_fwd_tensors.intersection(inputs):\n            candidate_fwd_tensors |= node.outputs.items()\n            candidate_parameters |= inputs.difference(candidate_fwd_tensors)\n    used_for_gradient: set[TensorAndID] = set()\n    for node in reversed(self._data_flow_graph.flow_nodes):\n        if any((self._is_gradient(*i) or i in used_for_gradient for i in node.outputs.items())):\n            used_for_gradient.update(((key, version) for key, (_, version) in node.inputs.items()))\n    candidate_parameters.intersection_update(used_for_gradient)\n    parameter_keys = {key.id for key, _ in candidate_parameters}\n    parameter_keys &= self._any_version_depends_on_gradient()\n    for key, _ in snapshot.keys():\n        if key.id in parameter_keys:\n            self._categories.set_by_id(key, Category.PARAMETER)",
    "docstring": "Deduce which Tensors are parameters. Consider the following code for the step of SGD with momentum (nesterov=False), where is the gradient of and is the momentum buffer. Both and take a gradient and perform an in-place update. The python tracer will inspect calls to and to extract parameter and optimizer state respectively (including parameters), so this is generally a non-issue. However as a fallback we can also exploit several properties of parameters to distinguish them from other model state. First, they are directly used in the forward pass. (At this point we haven't established which parts of the graph correspond to the forward pass but we can deduce enough to suffice.) Some mutable state such as batch norm moving averages also contribute to the forward pass, but optimizer state does not. Second, a parameter is by definition used to compute at least one gradient and depends on at least one gradient.",
    "type": "method",
    "file_path": "pytorch\\torch\\profiler\\_memory_profiler.py",
    "ast_data": "FunctionDef name:_set_parameters_using_data_flow arg:self arguments arg Assign Call Call Call Compare For Assign Call If BoolOp Compare Call Call Call Call Call Call Call Call Call Call For Call If Call BoolOp Call Compare Call Call Call Call Assign Call For Call If Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "set_tpu_topology",
    "source_code": "def set_tpu_topology(self, serialized_tpu_topology):\n    self._tpu_topology = topology_pb2.TopologyProto()\n    self._tpu_topology.ParseFromString(serialized_tpu_topology)",
    "docstring": "Sets the tpu topology info stored in this resolver.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\tpu\\tpu_cluster_resolver.py",
    "ast_data": "FunctionDef name:set_tpu_topology arg:self arg:serialized_tpu_topology arguments arg arg Assign Call Call"
  },
  {
    "library": "scipy",
    "name": "_squeeze_output",
    "source_code": "def _squeeze_output(out):\n    out = out.squeeze()\n    if out.ndim == 0:\n        out = out[()]\n    return out",
    "docstring": "Remove single-dimensional entries from array and convert to scalar, if necessary.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:_squeeze_output arg:out arguments arg Assign Call If Compare Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, grid_helper, side, nth_coord_ticks=None):\n    lon1, lon2, lat1, lat2 = grid_helper.grid_finder.extreme_finder(*[None] * 5)\n    value, nth_coord = _api.check_getitem(dict(left=(lon1, 0), right=(lon2, 0), bottom=(lat1, 1), top=(lat2, 1)), side=side)\n    super().__init__(grid_helper, nth_coord, value, axis_direction=side)\n    if nth_coord_ticks is None:\n        nth_coord_ticks = nth_coord\n    self.nth_coord_ticks = nth_coord_ticks\n    self.value = value\n    self.grid_helper = grid_helper\n    self._side = side",
    "docstring": "nth_coord = along which coordinate value varies. nth_coord = 0 -> x axis, nth_coord = 1 -> y axis",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\floating_axes.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:grid_helper arg:side arg:nth_coord_ticks arguments arg arg arg arg Assign Call Assign Call Call Call Call If Compare Assign Assign Assign Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "Curve",
    "source_code": "@_register_style(_style_list, name='-')\nclass Curve(_Curve):\n\n    def __init__(self):\n        super().__init__(head_length=0.2, head_width=0.1)",
    "docstring": "A simple curve without any arrow head.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "ClassDef name:Curve FunctionDef name:__init__ arg:self arguments arg Call Call Call"
  },
  {
    "library": "scipy",
    "name": "eye_array",
    "source_code": "def eye_array(m, n=None, *, k=0, dtype=float, format=None):\n    return _eye(m, n, k, dtype, format)",
    "docstring": "Sparse array of chosen shape with ones on the kth diagonal and zeros elsewhere. Return a sparse array with ones on diagonal. Specifically a sparse array (m x n) where the kth diagonal is all ones and everything else is zeros. Parameters ---------- m : int Number of rows requested. n : int, optional Number of columns. Default: . k : int, optional Diagonal to place ones on. Default: 0 (main diagonal). dtype : dtype, optional Data type of the array format : str, optional (default: \"dia\") Sparse format of the result, e.g., format=\"csr\", etc. Returns ------- new_array : sparse array Sparse array of chosen shape with ones on the kth diagonal and zeros elsewhere. Examples -------- >>> import numpy as np >>> import scipy as sp >>> sp.sparse.eye_array(3).toarray() array([[ 1., 0., 0.], [ 0., 1., 0.], [ 0., 0., 1.]]) >>> sp.sparse.eye_array(3, dtype=np.int8)",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\_construct.py",
    "ast_data": "FunctionDef name:eye_array arg:m arg:n arguments arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_labels_is_sparse",
    "source_code": "def _labels_is_sparse():\n    return isinstance(labels, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue))",
    "docstring": "Returns true is is a sparse tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\metrics_impl.py",
    "ast_data": "FunctionDef name:_labels_is_sparse arguments Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "normalize_points",
    "source_code": "def normalize_points(points: Tensor, eps: float=1e-08) -> Tuple[Tensor, Tensor]:\n    if len(points.shape) != 3:\n        raise AssertionError(points.shape)\n    if points.shape[-1] != 2:\n        raise AssertionError(points.shape)\n    x_mean = torch.mean(points, dim=1, keepdim=True)\n    scale = (points - x_mean).norm(dim=-1, p=2).mean(dim=-1)\n    scale = torch.sqrt(torch.tensor(2.0)) / (scale + eps)\n    ones, zeros = (ones_like(scale), torch.zeros_like(scale))\n    transform = stack([scale, zeros, -scale * x_mean[..., 0, 0], zeros, scale, -scale * x_mean[..., 0, 1], zeros, zeros, ones], dim=-1)\n    transform = transform.view(-1, 3, 3)\n    points_norm = transform_points(transform, points)\n    return (points_norm, transform)",
    "docstring": "Normalize points (isotropic). Computes the transformation matrix such that the two principal moments of the set of points are equal to unity, forming an approximately symmetric circular cloud of points of radius 1 about the origin. Reference: Hartley/Zisserman 4.4.4 pag.107 This operation is an essential step before applying the DLT algorithm in order to consider the result as optimal. Args: points: Tensor containing the points to be normalized with shape :math:. eps: epsilon value to avoid numerical instabilities. Returns: tuple containing the normalized points in the shape :math: and the transformation matrix in the shape :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\epipolar\\fundamental.py",
    "ast_data": "FunctionDef name:normalize_points arg:points arg:eps arguments arg arg If Compare Call Raise Call If Compare Raise Call Assign Call Assign Call Call Assign Call Call Assign Call Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "TokenBase",
    "source_code": "class TokenBase:\n    id = None\n    value = None\n    first = second = None\n\n    def nud(self, parser):\n        raise parser.error_class(\"Not expecting '%s' in this position in if tag.\" % self.id)\n\n    def led(self, left, parser):\n        raise parser.error_class(\"Not expecting '%s' as infix operator in if tag.\" % self.id)\n\n    def display(self):\n        return self.id\n\n    def __repr__(self):\n        out = [str(x) for x in [self.id, self.first, self.second] if x is not None]\n        return '(' + ' '.join(out) + ')'",
    "docstring": "Base class for operators and literals, mainly for debugging and for throwing syntax errors.",
    "type": "class",
    "file_path": "django\\django\\template\\smartif.py",
    "ast_data": "ClassDef name:TokenBase Assign Assign Assign FunctionDef name:nud arg:self arg:parser arguments arg arg Raise Call FunctionDef name:led arg:self arg:left arg:parser arguments arg arg arg Raise Call FunctionDef name:display arg:self arguments arg Return return:yes FunctionDef name:__repr__ arg:self arguments arg Assign Call Compare Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X):\n    check_is_fitted(self)\n    X = self._check_test_data(X)\n    x_squared_norms = row_norms(X, squared=True)\n    sample_weight = np.ones_like(x_squared_norms)\n    labels = self._predict_recursive(X, sample_weight, self._bisecting_tree)\n    return labels",
    "docstring": "Predict which cluster each sample in X belongs to. Prediction is made by going down the hierarchical tree in searching of closest leaf cluster. In the vector quantization literature, is called the code book and each value returned by is the index of the closest code in the code book. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) New data to predict. Returns ------- labels : ndarray of shape (n_samples,) Index of the cluster each sample belongs to.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_bisect_k_means.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Call Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "LibraryInfo",
    "source_code": "class LibraryInfo:\n\n    def __init__(self, name, description, version, sections, vars, requires=None):\n        self.name = name\n        self.description = description\n        if requires:\n            self.requires = requires\n        else:\n            self.requires = []\n        self.version = version\n        self._sections = sections\n        self.vars = vars\n\n    def sections(self):\n        return list(self._sections.keys())\n\n    def cflags(self, section='default'):\n        val = self.vars.interpolate(self._sections[section]['cflags'])\n        return _escape_backslash(val)\n\n    def libs(self, section='default'):\n        val = self.vars.interpolate(self._sections[section]['libs'])\n        return _escape_backslash(val)\n\n    def __str__(self):\n        m = ['Name: %s' % self.name, 'Description: %s' % self.description]\n        if self.requires:\n            m.append('Requires:')\n        else:\n            m.append('Requires: %s' % ','.join(self.requires))\n        m.append('Version: %s' % self.version)\n        return '\\n'.join(m)",
    "docstring": "Object containing build information about a library. Parameters ---------- name : str The library name. description : str Description of the library. version : str Version string. sections : dict The sections of the configuration file for the library. The keys are the section headers, the values the text under each header. vars : class instance A instance, which contains `` pairs for variables defined in the configuration file for the library. requires : sequence, optional The required libraries for the library to be installed. Notes ----- All input parameters (except \"sections\" which is a method) are available as attributes of the same name.",
    "type": "class",
    "file_path": "numpy\\numpy\\distutils\\npy_pkg_config.py",
    "ast_data": "ClassDef name:LibraryInfo FunctionDef name:__init__ arg:self arg:name arg:description arg:version arg:sections arg:vars arg:requires arguments arg arg arg arg arg arg arg Assign Assign If Assign Assign Assign Assign Assign FunctionDef name:sections arg:self arguments arg Return return:yes Call Call FunctionDef name:cflags arg:self arg:section arguments arg arg Assign Call Return return:yes Call FunctionDef name:libs arg:self arg:section arguments arg arg Assign Call Return return:yes Call FunctionDef name:__str__ arg:self arguments arg Assign If Call Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_record_op_seen_by_control_dependencies",
    "source_code": "def _record_op_seen_by_control_dependencies(self, op) -> None:\n    for controller in self._control_dependencies_stack:\n        controller.add_op(op)",
    "docstring": "Record that the given op depends on all registered control dependencies. Args: op: An Operation.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_record_op_seen_by_control_dependencies arg:self arg:op arguments arg arg For Call"
  },
  {
    "library": "tensorflow",
    "name": "stop_on_exception",
    "source_code": "@contextlib.contextmanager\ndef stop_on_exception(self):\n    try:\n        yield\n    except:\n        self.request_stop(ex=sys.exc_info())",
    "docstring": "Context manager to request stop when an Exception is raised. Code that uses a coordinator must catch exceptions and pass them to the method to stop the other threads managed by the coordinator. This context handler simplifies the exception handling. Use it as follows: This is completely equivalent to the slightly longer code: Yields: nothing.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\coordinator.py",
    "ast_data": "FunctionDef name:stop_on_exception arg:self arguments arg Try ExceptHandler Call Call"
  },
  {
    "library": "tensorflow",
    "name": "on_predict_batch_end",
    "source_code": "@doc_controls.for_subclass_implementers\n@generic_utils.default\ndef on_predict_batch_end(self, batch, logs=None):\n    pass",
    "docstring": "Called at the end of a batch in methods. Subclasses should override for any actions to run. Note that if the argument to in is set to , this method will only be called every batches. Args: batch: Integer, index of batch within the current epoch. logs: Dict. Aggregated metric results up until this batch.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:on_predict_batch_end arg:self arg:batch arg:logs arguments arg arg arg"
  },
  {
    "library": "scipy",
    "name": "get_shape",
    "source_code": "def get_shape(self):\n    return self._shape",
    "docstring": "Get the shape of the matrix",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_matrix.py",
    "ast_data": "FunctionDef name:get_shape arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "MonitoredTimer",
    "source_code": "class MonitoredTimer(object):\n    __slots__ = ['cell', 't', 'monitored_section_name', '_counting', '_avoid_repetitive_counting']\n\n    def __init__(self, cell, monitored_section_name=None, avoid_repetitive_counting=False):\n        self.cell = cell\n        self.monitored_section_name = monitored_section_name\n        self._avoid_repetitive_counting = avoid_repetitive_counting\n        self._counting = True\n\n    def __enter__(self):\n        if self._avoid_repetitive_counting and self.monitored_section_name and (self.monitored_section_name in MonitoredTimerSections):\n            self._counting = False\n            return self\n        self.t = time.time()\n        if self.monitored_section_name:\n            MonitoredTimerSections.append(self.monitored_section_name)\n        return self\n\n    def __exit__(self, exception_type, exception_value, traceback):\n        del exception_type, exception_value, traceback\n        if self._counting:\n            micro_seconds = (time.time() - self.t) * 1000000\n            self.cell.increase_by(int(micro_seconds))\n            if self.monitored_section_name:\n                MonitoredTimerSections.remove(self.monitored_section_name)",
    "docstring": "A context manager to measure the walltime and increment a Counter cell.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py",
    "ast_data": "ClassDef name:MonitoredTimer Assign FunctionDef name:__init__ arg:self arg:cell arg:monitored_section_name arg:avoid_repetitive_counting arguments arg arg arg arg Assign Assign Assign Assign FunctionDef name:__enter__ arg:self arguments arg If BoolOp Compare Assign Return return:yes Assign Call If Call Return return:yes FunctionDef name:__exit__ arg:self arg:exception_type arg:exception_value arg:traceback arguments arg arg arg arg If Assign Call Call Call If Call"
  },
  {
    "library": "pandas",
    "name": "highlight_between",
    "source_code": "@Substitution(subset=subset_args, color=coloring_args.format(default='yellow'), props=properties_args)\ndef highlight_between(self, subset: Subset | None=None, color: str='yellow', axis: Axis | None=0, left: Scalar | Sequence | None=None, right: Scalar | Sequence | None=None, inclusive: IntervalClosedType='both', props: str | None=None) -> Styler:\n    if props is None:\n        props = f'background-color: {color};'\n    return self.apply(_highlight_between, axis=axis, subset=subset, props=props, left=left, right=right, inclusive=inclusive)",
    "docstring": "Highlight a defined range with a style. .. versionadded:: 1.3.0 Parameters ---------- %(subset)s %(color)s axis : {0 or 'index', 1 or 'columns', None}, default 0 If `` instead of default background coloring >>> df.style.highlight_between( ... left=1.5, right=3.5, props=\"font-weight:bold;color:#e83e8c\" ... ) # doctest: +SKIP .. figure:: ../../_static/style/hbetw_props.png",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\style.py",
    "ast_data": "FunctionDef name:highlight_between arg:self arg:subset arg:color arg:axis arg:left arg:right arg:inclusive arg:props arguments arg arg arg arg arg arg arg arg If Compare Assign Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "Plateau",
    "source_code": "class Plateau(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-5.12] * self.N, [5.12] * self.N))\n        self.global_optimum = [[0.0 for _ in range(self.N)]]\n        self.fglob = 30.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return 30.0 + sum(floor(abs(x)))",
    "docstring": "Plateau objective function. This class defines the Plateau [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Plateau}}(x) = 30 + \\sum_{i=1}^n \\lfloor \\lvert x_i \\rvert\\rfloor Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_P.py",
    "ast_data": "ClassDef name:Plateau Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "CosineDecay",
    "source_code": "class CosineDecay(LearningRateSchedule):\n\n    def __init__(self, initial_learning_rate, decay_steps, alpha=0.0, name=None):\n        super(CosineDecay, self).__init__()\n        self.initial_learning_rate = initial_learning_rate\n        self.decay_steps = decay_steps\n        self.alpha = alpha\n        self.name = name\n\n    def __call__(self, step):\n        with ops.name_scope_v2(self.name or 'CosineDecay'):\n            initial_learning_rate = tensor_conversion.convert_to_tensor_v2_with_dispatch(self.initial_learning_rate, name='initial_learning_rate')\n            dtype = initial_learning_rate.dtype\n            decay_steps = math_ops.cast(self.decay_steps, dtype)\n            global_step_recomp = math_ops.cast(step, dtype)\n            global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps)\n            completed_fraction = global_step_recomp / decay_steps\n            cosine_decayed = 0.5 * (1.0 + math_ops.cos(constant_op.constant(math.pi) * completed_fraction))\n            decayed = (1 - self.alpha) * cosine_decayed + self.alpha\n            return math_ops.multiply(initial_learning_rate, decayed)\n\n    def get_config(self):\n        return {'initial_learning_rate': self.initial_learning_rate, 'decay_steps': self.decay_steps, 'alpha': self.alpha, 'name': self.name}",
    "docstring": "A LearningRateSchedule that uses a cosine decay schedule. See [Loshchilov & Hutter, ICLR2016]( SGDR: Stochastic Gradient Descent with Warm Restarts. When training a model, it is often useful to lower the learning rate as the training progresses. This schedule applies a cosine decay function to an optimizer step, given a provided initial learning rate. It requires a value to compute the decayed learning rate. You can just pass a TensorFlow variable that you increment at each training step. The schedule a 1-arg callable that produces a decayed learning rate when passed the current optimizer step. This can be useful for changing the learning rate value across different invocations of optimizer functions. It is computed as: Example usage: You can pass this schedule directly into a as the learning rate. The learning rate schedule is also serializable and deserializable using and . Returns: A 1-arg callable learning rate schedule that takes the current optimizer step and outputs the decayed learning rate, a scalar of the same type as .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\learning_rate_schedule.py",
    "ast_data": "ClassDef name:CosineDecay FunctionDef name:__init__ arg:self arg:initial_learning_rate arg:decay_steps arg:alpha arg:name arguments arg arg arg arg arg Call Call Assign Assign Assign Assign FunctionDef name:__call__ arg:self arg:step arguments arg arg With Call BoolOp Assign Call Assign Assign Call Assign Call Assign Call Assign Assign Call Call Assign Return return:yes Call FunctionDef name:get_config arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "TraceWrappedHigherOrderOperatorVariable",
    "source_code": "class TraceWrappedHigherOrderOperatorVariable(TorchHigherOrderOperatorVariable):\n\n    def call_function(self, tx: 'InstructionTranslator', args: 'list[VariableTracker]', kwargs: 'dict[str, VariableTracker]') -> 'VariableTracker':\n        kwargs = dict(kwargs)\n        fn = kwargs.pop('fn')\n        return fn.call_function(tx, args, kwargs)",
    "docstring": "Handles torch._dynamo._trace_wrapped_higher_order_op.inner_trace by unwrapping the higher order op and inlining through it. This op is created by dynamo to survive through AotAutograd, then unwrapped here in the call to dynamo from compiled autograd.",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\higher_order_ops.py",
    "ast_data": "ClassDef name:TraceWrappedHigherOrderOperatorVariable FunctionDef name:call_function arg:self arg:tx arg:args arg:kwargs arguments arg arg arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_create_skew_matrix",
    "source_code": "def _create_skew_matrix(x):\n    result = np.zeros((len(x), 3, 3))\n    result[:, 0, 1] = -x[:, 2]\n    result[:, 0, 2] = x[:, 1]\n    result[:, 1, 0] = x[:, 2]\n    result[:, 1, 2] = -x[:, 0]\n    result[:, 2, 0] = -x[:, 1]\n    result[:, 2, 1] = x[:, 0]\n    return result",
    "docstring": "Create skew-symmetric matrices corresponding to vectors. Parameters ---------- x : ndarray, shape (n, 3) Set of vectors. Returns ------- ndarray, shape (n, 3, 3)",
    "type": "function",
    "file_path": "scipy\\scipy\\spatial\\transform\\_rotation_spline.py",
    "ast_data": "FunctionDef name:_create_skew_matrix arg:x arguments arg Assign Call Call Assign Assign Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_TensorScatterMinGrad",
    "source_code": "@ops.RegisterGradient('TensorScatterMin')\ndef _TensorScatterMinGrad(op: ops.Operation, grad):\n    return _TensorScatterMinOrMaxGrad(op, grad)",
    "docstring": "Gradient for TensorScatterMin op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_grad.py",
    "ast_data": "FunctionDef name:_TensorScatterMinGrad arg:op arg:grad arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "spawn",
    "source_code": "def spawn(fn, args=(), nprocs=1, join=True, daemon=False, start_method='spawn'):\n    if start_method != 'spawn':\n        msg = f'This method only supports start_method=spawn (got: {start_method}).\\nTo use a different start_method use:\\n\\t\\t torch.multiprocessing.start_processes(...)'\n        warnings.warn(msg, FutureWarning, stacklevel=2)\n    return start_processes(fn, args, nprocs, join, daemon, start_method='spawn')",
    "docstring": "Spawns `~ProcessContext`",
    "type": "function",
    "file_path": "pytorch\\torch\\multiprocessing\\spawn.py",
    "ast_data": "FunctionDef name:spawn arg:fn arg:args arg:nprocs arg:join arg:daemon arg:start_method arguments arg arg arg arg arg arg If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "exponential",
    "source_code": "@dispatch.add_dispatch_support\ndef exponential(x):\n    return math_ops.exp(x)",
    "docstring": "Exponential activation function. For example: >>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32) >>> b = tf.keras.activations.exponential(a) >>> b.numpy() array([0.04978707, 0.36787945, 1., 2.7182817 , 20.085537], dtype=float32) Args: x: Input tensor. Returns: Tensor with exponential activation: .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\activations.py",
    "ast_data": "FunctionDef name:exponential arg:x arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "BgrToRgba",
    "source_code": "class BgrToRgba(Module):\n    ONNX_DEFAULT_INPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n    ONNX_DEFAULT_OUTPUTSHAPE: ClassVar[list[int]] = [-1, 4, -1, -1]\n\n    def __init__(self, alpha_val: Union[float, Tensor]) -> None:\n        super().__init__()\n        self.alpha_val = alpha_val\n\n    def forward(self, image: Tensor) -> Tensor:\n        return rgb_to_rgba(image, self.alpha_val)",
    "docstring": "Convert an image from BGR to RGBA. Add an alpha channel to existing RGB image. Args: alpha_val: A float number for the alpha value or a tensor of shape :math:. Returns: RGBA version of the image with shape :math:. Shape: - image: :math: - output: :math: .. note:: The current functionality is NOT supported by Torchscript. Example: >>> input = torch.rand(2, 3, 4, 5) >>> rgba = BgrToRgba(1.) >>> output = rgba(input) # 2x4x4x5",
    "type": "class",
    "file_path": "kornia\\kornia\\color\\rgb.py",
    "ast_data": "ClassDef name:BgrToRgba FunctionDef name:__init__ arg:self arg:alpha_val arguments arg arg Call Call Assign FunctionDef name:forward arg:self arg:image arguments arg arg Return return:yes Call"
  },
  {
    "library": "pygame",
    "name": "get_fonts",
    "source_code": "def get_fonts():\n    initsysfonts()\n    return list(Sysfonts)",
    "docstring": "pygame.font.get_fonts() -> list get a list of system font names Returns the list of all found system fonts. Note that the names of the fonts will be all lowercase with spaces removed. This is how pygame internally stores the font names for matching.",
    "type": "function",
    "file_path": "pygame\\src_py\\sysfont.py",
    "ast_data": "FunctionDef name:get_fonts arguments Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "download_reference",
    "source_code": "class download_reference(nodes.reference):\n    pass",
    "docstring": "Node for download references, similar to pending_xref.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\addnodes.py",
    "ast_data": "ClassDef name:download_reference"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "def forward(self, queries: Tensor, keys: Tensor, values: Tensor, q_mask: Optional[Tensor]=None, kv_mask: Optional[Tensor]=None) -> Tensor:\n    QK = torch.einsum('nlhd,nshd->nlsh', queries, keys)\n    if kv_mask is not None and q_mask is not None:\n        QK.masked_fill_(~(q_mask[:, :, None, None] * kv_mask[:, None, :, None]), float('-inf'))\n    softmax_temp = 1.0 / queries.size(3) ** 0.5\n    A = torch.softmax(softmax_temp * QK, dim=2)\n    if self.use_dropout:\n        A = self.dropout(A)\n    queried_values = torch.einsum('nlsh,nshd->nlhd', A, values)\n    return queried_values.contiguous()",
    "docstring": "Multi-head scaled dot-product attention, a.k.a full attention. Args: queries: [N, L, H, D] keys: [N, S, H, D] values: [N, S, H, D] q_mask: [N, L] kv_mask: [N, S] Returns: queried_values: (N, L, H, D)",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\loftr\\loftr_module\\linear_attention.py",
    "ast_data": "FunctionDef name:forward arg:self arg:queries arg:keys arg:values arg:q_mask arg:kv_mask arguments arg arg arg arg arg arg Assign Call If BoolOp Compare Compare Call Call Assign Call Assign Call If Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "eq_spec",
    "source_code": "def eq_spec(self: pytree.TreeSpec, other: pytree.TreeSpec) -> bool:\n\n    def _normalize_type(t):\n        return str(_pytree_subclasses_that_lose_info.get(t, t))\n\n    def _match_normalized_structure(a, b):\n        if a is b:\n            return True\n        if _normalize_type(a.type) != _normalize_type(b.type):\n            return False\n        if a.context != b.context:\n            return False\n        if len(a.children_specs) != len(b.children_specs):\n            return False\n        return all((_match_normalized_structure(a, b) for a, b in zip(a.children_specs, b.children_specs)))\n    return _match_normalized_structure(self, other)",
    "docstring": "Refinement of TreeSpec.__eq__ where, e.g., torch.Size(...) matches tuple(...). See _pytree_subclasses_that_lose_info in proxy_tensor.py for more details.",
    "type": "function",
    "file_path": "pytorch\\torch\\export\\_unlift.py",
    "ast_data": "FunctionDef name:eq_spec arg:self arg:other arguments arg arg FunctionDef name:_normalize_type arg:t arguments arg Return return:yes Call Call FunctionDef name:_match_normalized_structure arg:a arg:b arguments arg arg If Compare Return return:yes If Compare Call Call Return return:yes If Compare Return return:yes If Compare Call Call Return return:yes Return return:yes Call Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "indexer_at_time",
    "source_code": "def indexer_at_time(self, time, asof: bool=False) -> npt.NDArray[np.intp]:\n    if asof:\n        raise NotImplementedError(\"'asof' argument is not supported\")\n    if isinstance(time, str):\n        from dateutil.parser import parse\n        time = parse(time).time()\n    if time.tzinfo:\n        if self.tz is None:\n            raise ValueError('Index must be timezone aware.')\n        time_micros = self.tz_convert(time.tzinfo)._get_time_micros()\n    else:\n        time_micros = self._get_time_micros()\n    micros = _time_to_micros(time)\n    return (time_micros == micros).nonzero()[0]",
    "docstring": "Return index locations of values at particular time of day. Parameters ---------- time : datetime.time or str Time passed in either as object (datetime.time) or as string in appropriate format (\"%H:%M\", \"%H%M\", \"%I:%M%p\", \"%I%M%p\", \"%H:%M:%S\", \"%H%M%S\", \"%I:%M:%S%p\", \"%I%M%S%p\"). asof : bool, default False This parameter is currently not supported. Returns ------- np.ndarray[np.intp] Index locations of values at given of day. See Also -------- indexer_between_time : Get index locations of values between particular times of day. DataFrame.at_time : Select values at particular time of day. Examples -------- >>> idx = pd.DatetimeIndex( ... [\"1/1/2020 10:00\", \"2/1/2020 11:00\", \"3/1/2020 10:00\"] ... ) >>> idx.indexer_at_time(\"10:00\") array([0, 2])",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\datetimes.py",
    "ast_data": "FunctionDef name:indexer_at_time arg:self arg:time arg:asof arguments arg arg arg If Raise Call If Call Assign Call Call If If Compare Raise Call Assign Call Call Assign Call Assign Call Return return:yes Call Compare"
  },
  {
    "library": "matplotlib",
    "name": "set_major_formatter",
    "source_code": "def set_major_formatter(self, formatter):\n    self._set_formatter(formatter, self.major)",
    "docstring": "Set the formatter of the major ticker. In addition to a instance, this also accepts a `~matplotlib.ticker.StrMethodFormatter~matplotlib.ticker.StrMethodFormatter~matplotlib.ticker.FuncFormatter~matplotlib.ticker.FuncFormatter~matplotlib.ticker.Formatter`, or function",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:set_major_formatter arg:self arg:formatter arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "CounterCell",
    "source_code": "class CounterCell(object):\n    __slots__ = ['_cell']\n\n    def __init__(self, cell):\n        self._cell = cell\n\n    def increase_by(self, value):\n        pywrap_tfe.TFE_MonitoringCounterCellIncrementBy(self._cell, value)\n\n    def value(self):\n        return pywrap_tfe.TFE_MonitoringCounterCellValue(self._cell)",
    "docstring": "CounterCell stores each value of a Counter.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py",
    "ast_data": "ClassDef name:CounterCell Assign FunctionDef name:__init__ arg:self arg:cell arguments arg arg Assign FunctionDef name:increase_by arg:self arg:value arguments arg arg Call FunctionDef name:value arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_pre_state_dict_hook",
    "source_code": "@no_type_check\n@torch.no_grad()\ndef _pre_state_dict_hook(module: nn.Module, *args, **kwargs) -> None:\n    fsdp_state = _get_module_fsdp_state_if_fully_sharded_module(module)\n    if fsdp_state.sharding_strategy == ShardingStrategy.NO_SHARD:\n        context = _replace_with_full_state_dict_type(fsdp_state)\n        warnings.warn('When using ``NO_SHARD`` for ``ShardingStrategy``, full_state_dict will be returned.')\n    else:\n        _set_use_dtensor(fsdp_state)\n        context = contextlib.nullcontext()\n    with context:\n        _pre_state_dict_hook_fn = {StateDictType.FULL_STATE_DICT: _full_pre_state_dict_hook, StateDictType.LOCAL_STATE_DICT: _local_pre_state_dict_hook, StateDictType.SHARDED_STATE_DICT: _sharded_pre_state_dict_hook}\n        _pre_state_dict_hook_fn[fsdp_state._state_dict_type](fsdp_state, module, *args, **kwargs)",
    "docstring": "This is called before the core state dict saving logic of `` is used to decide what postprocessing will be done.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_state_dict_utils.py",
    "ast_data": "FunctionDef name:_pre_state_dict_hook arg:module arguments arg arg arg Assign Call If Compare Assign Call Call Call Assign Call With Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, additional_note='', kwargs_dict=None):\n    self._additional_note = additional_note\n    if kwargs_dict:\n        bullets = []\n        for key in sorted(kwargs_dict.keys()):\n            value = kwargs_dict[key]\n            if any((x.isspace() for x in key)):\n                raise ValueError('Parameter name \"%s\" contains whitespace.' % key)\n            value = value.lstrip()\n            if '\\n' in value:\n                raise ValueError('Parameter description for \"%s\" contains newlines.' % key)\n            bullets.append('*  `%s`: %s' % (key, value))\n        self._additional_note += '\\n\\n##### `kwargs`:\\n\\n' + '\\n'.join(bullets)",
    "docstring": "Initializes the AppendDocstring object. Args: additional_note: Python string added as additional docstring to public version of function. kwargs_dict: Python string/string dictionary representing specific kwargs expanded from the **kwargs input. Raises: ValueError: if kwargs_dict.key contains whitespace. ValueError: if kwargs_dict.value contains newlines.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\util.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:additional_note arg:kwargs_dict arguments arg arg arg Assign If Assign For Call Call Assign If Call Call Raise Call Assign Call If Compare Raise Call Call Call"
  },
  {
    "library": "django",
    "name": "construct_managers",
    "source_code": "def construct_managers(self):\n    sorted_managers = sorted(self.managers, key=lambda v: v[1].creation_counter)\n    for mgr_name, manager in sorted_managers:\n        as_manager, manager_path, qs_path, args, kwargs = manager.deconstruct()\n        if as_manager:\n            qs_class = import_string(qs_path)\n            yield (mgr_name, qs_class.as_manager())\n        else:\n            manager_class = import_string(manager_path)\n            yield (mgr_name, manager_class(*args, **kwargs))",
    "docstring": "Deep-clone the managers using deconstruction.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\state.py",
    "ast_data": "FunctionDef name:construct_managers arg:self arguments arg Assign Call arguments arg For Assign Call If Assign Call Call Assign Call Call"
  },
  {
    "library": "scrapy",
    "name": "GzipPlugin",
    "source_code": "class GzipPlugin:\n\n    def __init__(self, file: BinaryIO, feed_options: dict[str, Any]) -> None:\n        self.file = file\n        self.feed_options = feed_options\n        compress_level = self.feed_options.get('gzip_compresslevel', 9)\n        mtime = self.feed_options.get('gzip_mtime')\n        filename = self.feed_options.get('gzip_filename')\n        self.gzipfile = GzipFile(fileobj=self.file, mode='wb', compresslevel=compress_level, mtime=mtime, filename=filename)\n\n    def write(self, data: bytes) -> int:\n        return self.gzipfile.write(data)\n\n    def close(self) -> None:\n        self.gzipfile.close()",
    "docstring": "Compresses received data using _. Accepted `gzip_compresslevelgzip_mtimegzip_filenamegzip.GzipFile` for more info about parameters.",
    "type": "class",
    "file_path": "scrapy\\scrapy\\extensions\\postprocessing.py",
    "ast_data": "ClassDef name:GzipPlugin FunctionDef name:__init__ arg:self arg:file arg:feed_options arguments arg arg arg Assign Assign Assign Call Assign Call Assign Call Assign Call FunctionDef name:write arg:self arg:data arguments arg arg Return return:yes Call FunctionDef name:close arg:self arguments arg Call"
  },
  {
    "library": "django",
    "name": "get_srid",
    "source_code": "def get_srid(self, obj):\n    srid = obj.srid\n    if srid is None or self.srid == -1 or (srid == -1 and self.srid != -1):\n        return self.srid\n    else:\n        return srid",
    "docstring": "Return the default SRID for the given geometry or raster, taking into account the SRID set for the field. For example, if the input geometry or raster doesn't have an SRID, then the SRID of the field will be returned.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\models\\fields.py",
    "ast_data": "FunctionDef name:get_srid arg:self arg:obj arguments arg arg Assign If BoolOp Compare Compare BoolOp Compare Compare Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_check_value",
    "source_code": "def _check_value(cond, message=None):\n    _check_with(ValueError, cond, message)",
    "docstring": "Throws error containing an optional message if the specified condition is False. Error type: `bool`",
    "type": "function",
    "file_path": "pytorch\\torch\\__init__.py",
    "ast_data": "FunctionDef name:_check_value arg:cond arg:message arguments arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "_is_input_large_scalar",
    "source_code": "def _is_input_large_scalar(node: Node, gm: torch.fx.GraphModule):\n    if node.op == 'get_attr':\n        qualified_name = str(node.target)\n        module_path, _, name = qualified_name.rpartition('.')\n        submod = gm.get_submodule(module_path)\n        tensor = getattr(submod, name)\n        HISTC_UPPER_BOUND = 3402823500000000.0\n        return tensor.numel() == 1 and abs(tensor.item()) > HISTC_UPPER_BOUND\n    return False",
    "docstring": "Check if input is a large scalar value. So that we can skip quantization for the node since histc op (in HistogramObserver) only works for values up to certain upper bound",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\xnnpack_quantizer_utils.py",
    "ast_data": "FunctionDef name:_is_input_large_scalar arg:node arg:gm arguments arg arg If Compare Assign Call Assign Call Assign Call Assign Call Assign Return return:yes BoolOp Compare Call Compare Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_name_to_nodes",
    "source_code": "def get_name_to_nodes(self) -> dict[str, Union[ir.IRNode, ir.TorchBindObject, sympy.Expr]]:\n    name_to_node: dict[str, Union[ir.IRNode, ir.TorchBindObject, sympy.Expr]] = {}\n    name_to_node.update(V.graph.graph_inputs)\n    for node in self.nodes:\n        for name, scheduler_buffer in node.outputs_by_name.items():\n            name_to_node[name] = scheduler_buffer.node\n    return name_to_node",
    "docstring": "Return a mapping from name strings to the corresponding graph inputs or base scheduler node outputs.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:get_name_to_nodes arg:self arguments arg Call For For Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "clear_limits",
    "source_code": "def clear_limits(self):\n    self.low_mark, self.high_mark = (0, None)",
    "docstring": "Clear any existing limits.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\query.py",
    "ast_data": "FunctionDef name:clear_limits arg:self arguments arg Assign"
  },
  {
    "library": "pytorch",
    "name": "propagate",
    "source_code": "def propagate(self, *args):\n    if self.fake_mode is not None:\n        fake_args = [self.fake_mode.from_tensor(t) if isinstance(t, torch.Tensor) else t for t in args]\n    else:\n        fake_args = args\n    return super().run(*fake_args)",
    "docstring": "Run via interpretation and return the result and record the shape and type of each node. Args: *args (Tensor): the sample input. Returns: Any: The value returned from executing the Module",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\passes\\shape_prop.py",
    "ast_data": "FunctionDef name:propagate arg:self arguments arg arg If Compare Assign Call Call Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_filter_execution_path_operations",
    "source_code": "def _filter_execution_path_operations(self, operations, fetches):\n    if fetches is None:\n        return set(operations)\n    if not isinstance(fetches, (list, tuple)):\n        fetches = [fetches]\n    op_fetches = []\n    for fetch in fetches:\n        if isinstance(fetch, ops.Operation):\n            op_fetches.append(fetch)\n        elif isinstance(fetch, tensor_lib.Tensor):\n            op_fetches.append(fetch.op)\n        else:\n            raise RuntimeError('Given fetch:%s is neither a tensor nor an op.' % fetch)\n    execution_path_operations = set(op_fetches)\n    traverse_stack = list(op_fetches)\n    while True:\n        if not traverse_stack:\n            break\n        head_op = traverse_stack.pop()\n        input_ops = [tensor_input.op for tensor_input in head_op.inputs]\n        input_ops.extend(head_op.control_inputs)\n        for input_op in input_ops:\n            if input_op not in execution_path_operations:\n                if TensorTracer.loop_cond_op(input_op):\n                    continue\n                execution_path_operations.add(input_op)\n                traverse_stack.append(input_op)\n    return execution_path_operations",
    "docstring": "Returns the set of ops in the execution path to compute given fetches.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:_filter_execution_path_operations arg:self arg:operations arg:fetches arguments arg arg arg If Compare Return return:yes Call If Call Assign Assign For If Call Call If Call Call Raise Call Assign Call Assign Call While If Assign Call Assign Call For If Compare If Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "SparseCoreEmbeddingConfig",
    "source_code": "@tf_export('tpu.experimental.embedding.SparseCoreEmbeddingConfig')\n@dataclasses.dataclass(frozen=True)\nclass SparseCoreEmbeddingConfig:\n    disable_table_stacking: bool = False\n    max_ids_per_chip_per_sample: int = 64\n    max_ids_per_table: Optional[Dict[str, int]] = None\n    max_unique_ids_per_table: Optional[Dict[str, int]] = None\n    allow_id_dropping: bool = False\n    initialize_tables_on_host: bool = True\n    enable_fast_table_initialization: bool = False",
    "docstring": "Config for sparsecore embedding.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3.py",
    "ast_data": "ClassDef name:SparseCoreEmbeddingConfig Call Call"
  },
  {
    "library": "pytorch",
    "name": "fuse_modules_qat",
    "source_code": "def fuse_modules_qat(model, modules_to_fuse, inplace=False, fuser_func=fuse_known_modules, fuse_custom_config_dict=None):\n    return _fuse_modules(model, modules_to_fuse, is_qat=True, inplace=inplace, fuser_func=fuser_func, fuse_custom_config_dict=fuse_custom_config_dict)",
    "docstring": "QAT version for .",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fuse_modules.py",
    "ast_data": "FunctionDef name:fuse_modules_qat arg:model arg:modules_to_fuse arg:inplace arg:fuser_func arg:fuse_custom_config_dict arguments arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "add",
    "source_code": "def add(self, value):\n    pywrap_tfe.TFE_MonitoringSamplerCellAdd(self._cell, value)",
    "docstring": "Atomically add a sample. Args: value: float value.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py",
    "ast_data": "FunctionDef name:add arg:self arg:value arguments arg arg Call"
  },
  {
    "library": "scipy",
    "name": "_entropy",
    "source_code": "def _entropy(self):\n    hpdf = self._hpdf[1:-1]\n    res = xpx.apply_where(hpdf > 0.0, hpdf, np.log, fill_value=0.0)\n    return -np.sum(hpdf * res * self._hbin_widths)",
    "docstring": "Compute entropy of distribution",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_continuous_distns.py",
    "ast_data": "FunctionDef name:_entropy arg:self arguments arg Assign Assign Call Compare Return return:yes Call"
  },
  {
    "library": "django",
    "name": "GenericRel",
    "source_code": "class GenericRel(ForeignObjectRel):\n\n    def __init__(self, field, to, related_name=None, related_query_name=None, limit_choices_to=None):\n        super().__init__(field, to, related_name=related_query_name or '+', related_query_name=related_query_name, limit_choices_to=limit_choices_to, on_delete=DO_NOTHING)",
    "docstring": "Used by GenericRelation to store information about the relation.",
    "type": "class",
    "file_path": "django\\django\\contrib\\contenttypes\\fields.py",
    "ast_data": "ClassDef name:GenericRel FunctionDef name:__init__ arg:self arg:field arg:to arg:related_name arg:related_query_name arg:limit_choices_to arguments arg arg arg arg arg arg Call Call BoolOp"
  },
  {
    "library": "matplotlib",
    "name": "_add_twin_axes",
    "source_code": "def _add_twin_axes(self, axes_class, **kwargs):\n    if axes_class is None:\n        axes_class = self._base_axes_class\n    ax = parasite_axes_class_factory(axes_class)(self, **kwargs)\n    self.parasites.append(ax)\n    ax._remove_method = self._remove_any_twin\n    return ax",
    "docstring": "Helper for //. *kwargs* are forwarded to the parasite axes constructor.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\parasite_axes.py",
    "ast_data": "FunctionDef name:_add_twin_axes arg:self arg:axes_class arguments arg arg arg If Compare Assign Assign Call Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "graph_returns_tuple",
    "source_code": "def graph_returns_tuple(gm: GraphModule) -> bool:\n    if not isinstance(gm, GraphModule):\n        return True\n    rv, = output_node(gm).args\n    if isinstance(rv, (list, tuple)):\n        return True\n    if isinstance(rv, torch.fx.node.Node) and hasattr(rv.target, '_schema') and (len(rv.target._schema.returns) > 1) and all((str(ret.type) == 'Tensor' for ret in rv.target._schema.returns)):\n        return True\n    return False",
    "docstring": "True if a FX graph returns a tuple",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\compile_fx.py",
    "ast_data": "FunctionDef name:graph_returns_tuple arg:gm arguments arg If Call Return return:yes Assign Call If Call Return return:yes If BoolOp Call Call Compare Call Call Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X):\n    check_is_fitted(self)\n    return self._predict(X)",
    "docstring": "Predict using the multi-layer perceptron model. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input data. Returns ------- y : ndarray of shape (n_samples, n_outputs) The predicted values.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neural_network\\_multilayer_perceptron.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "graph",
    "source_code": "@property\ndef graph(self):\n    return self._graph",
    "docstring": "The graph that was launched in this session.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\session.py",
    "ast_data": "FunctionDef name:graph arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "window_padding_type_to_pad_values",
    "source_code": "def window_padding_type_to_pad_values(padding_type, lhs_dims, rhs_dims, window_strides):\n    if not isinstance(padding_type, (str, PaddingType)):\n        msg = 'padding_type must be str or PaddingType, got {}.'\n        raise TypeError(msg.format(type(padding_type)))\n    if isinstance(padding_type, str):\n        if padding_type.upper() == 'VALID':\n            padding_type = PaddingType.VALID\n        elif padding_type.upper() == 'SAME':\n            padding_type = PaddingType.SAME\n        else:\n            msg = 'Unknown padding type string: expected \"VALID\" or \"SAME\", got {}.'\n            raise ValueError(msg.format(padding_type))\n    if padding_type == PaddingType.VALID:\n        return [(0, 0)] * len(window_strides)\n    elif padding_type == PaddingType.SAME:\n        out_shape = np.ceil(np.true_divide(lhs_dims, window_strides)).astype(int)\n        pad_sizes = [max((out_size - 1) * stride + filter_size - in_size, 0) for out_size, stride, filter_size, in_size in zip(out_shape, window_strides, rhs_dims, lhs_dims)]\n        return [(pad_size // 2, pad_size - pad_size // 2) for pad_size in pad_sizes]\n    else:\n        msg = 'Unexpected PaddingType value: {}'\n        raise ValueError(msg.format(padding_type))",
    "docstring": "Maps PaddingType or string to pad values (list of pairs of ints).",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\xla\\python\\xla_client.py",
    "ast_data": "FunctionDef name:window_padding_type_to_pad_values arg:padding_type arg:lhs_dims arg:rhs_dims arg:window_strides arguments arg arg arg arg If Call Assign Raise Call Call Call If Call If Compare Call Assign If Compare Call Assign Assign Raise Call Call If Compare Return return:yes Call If Compare Assign Call Call Call Assign Call Call Return return:yes Assign Raise Call Call"
  },
  {
    "library": "pandas",
    "name": "_dir_additions",
    "source_code": "@final\ndef _dir_additions(self) -> set[str]:\n    additions = super()._dir_additions()\n    if self._info_axis._can_hold_strings:\n        additions.update(self._info_axis._dir_additions_for_owner)\n    return additions",
    "docstring": "add the string-like attributes from the info_axis. If info_axis is a MultiIndex, its first level values are used.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:_dir_additions arg:self arguments arg Assign Call Call If Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "enable_mem_efficient_sdp",
    "source_code": "def enable_mem_efficient_sdp(enabled: bool):\n    torch._C._set_sdp_use_mem_efficient(enabled)",
    "docstring": ".. warning:: This flag is beta and subject to change. Enables or disables memory efficient scaled dot product attention.",
    "type": "function",
    "file_path": "pytorch\\torch\\backends\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:enable_mem_efficient_sdp arg:enabled arguments arg Call"
  },
  {
    "library": "scipy",
    "name": "_eq_10_42",
    "source_code": "def _eq_10_42(lam_1, lam_2, t_12):\n    a = 0.5 * (lam_1 + lam_2)\n    b = 0.5 * (lam_1 - lam_2)\n    return t_12 * _exp_sinch(a, b)",
    "docstring": "Equation (10.42) of Functions of Matrices: Theory and Computation. Notes ----- This is a helper function for _fragment_2_1 of expm_2009. Equation (10.42) is on page 251 in the section on Schur algorithms. In particular, section 10.4.3 explains the Schur-Parlett algorithm. expm([[lam_1, t_12], [0, lam_1]) = [[exp(lam_1), t_12*exp((lam_1 + lam_2)/2)*sinch((lam_1 - lam_2)/2)], [0, exp(lam_2)]",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_matfuncs.py",
    "ast_data": "FunctionDef name:_eq_10_42 arg:lam_1 arg:lam_2 arg:t_12 arguments arg arg arg Assign Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "save_op_profiles",
    "source_code": "def save_op_profiles(op_profiles: dict[str, set[OpProfile]], f: FileLike) -> None:\n    yaml_str = generate_yaml_from_profiles(op_profiles)\n    if isinstance(f, (str, os.PathLike)):\n        f = os.fspath(f)\n        with open(f, 'w') as file:\n            file.write(yaml_str)\n    elif isinstance(f, io.BytesIO):\n        f.write(yaml_str.encode('utf-8'))\n    else:\n        raise ValueError(f'Invalid type of file {f}')",
    "docstring": "Serializes the given operator profiles into a yaml format and saves it to the given file. The operator profile can be loaded back using .",
    "type": "function",
    "file_path": "pytorch\\torch\\_library\\fake_profile.py",
    "ast_data": "FunctionDef name:save_op_profiles arg:op_profiles arg:f arguments arg arg Assign Call If Call Assign Call With Call Call If Call Call Call Raise Call"
  },
  {
    "library": "scrapy",
    "name": "walk_modules",
    "source_code": "def walk_modules(path: str) -> list[ModuleType]:\n    mods: list[ModuleType] = []\n    mod = import_module(path)\n    mods.append(mod)\n    if hasattr(mod, '__path__'):\n        for _, subpath, ispkg in iter_modules(mod.__path__):\n            fullpath = path + '.' + subpath\n            if ispkg:\n                mods += walk_modules(fullpath)\n            else:\n                submod = import_module(fullpath)\n                mods.append(submod)\n    return mods",
    "docstring": "Loads a module and all its submodules from the given module path and returns them. If *any* module throws an exception while importing, that exception is thrown back. For example: walk_modules('scrapy.utils')",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\misc.py",
    "ast_data": "FunctionDef name:walk_modules arg:path arguments arg Assign Call Call If Call For Call Assign If Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "AddLoss",
    "source_code": "class AddLoss(Layer):\n\n    def __init__(self, unconditional, **kwargs):\n        kwargs['autocast'] = False\n        super(AddLoss, self).__init__(**kwargs)\n        self.unconditional = unconditional\n\n    def call(self, inputs):\n        self.add_loss(inputs, inputs=not self.unconditional)\n        return inputs\n\n    def get_config(self):\n        config = super(AddLoss, self).get_config()\n        config.update({'unconditional': self.unconditional})\n        return config",
    "docstring": "Adds its inputs as a loss. Attributes: unconditional: Whether or not the loss should be conditioned on the inputs.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "ClassDef name:AddLoss FunctionDef name:__init__ arg:self arg:unconditional arguments arg arg arg Assign Call Call Assign FunctionDef name:call arg:self arg:inputs arguments arg arg Call Return return:yes FunctionDef name:get_config arg:self arguments arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "export_chrome_trace",
    "source_code": "def export_chrome_trace(self, path):\n    if kineto_available():\n        self.kineto_results.save(path)\n    else:\n        self._ensure_function_events()\n        return self._function_events.export_chrome_trace(path)",
    "docstring": "Exports the collected trace in Chrome JSON format. If kineto is enabled, only last cycle in schedule is exported.",
    "type": "method",
    "file_path": "pytorch\\torch\\autograd\\profiler.py",
    "ast_data": "FunctionDef name:export_chrome_trace arg:self arg:path arguments arg arg If Call Call Call Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "_annotate_heatmap",
    "source_code": "def _annotate_heatmap(self, ax, mesh):\n    mesh.update_scalarmappable()\n    height, width = self.annot_data.shape\n    xpos, ypos = np.meshgrid(np.arange(width) + 0.5, np.arange(height) + 0.5)\n    for x, y, m, color, val in zip(xpos.flat, ypos.flat, mesh.get_array().flat, mesh.get_facecolors(), self.annot_data.flat):\n        if m is not np.ma.masked:\n            lum = relative_luminance(color)\n            text_color = '.15' if lum > 0.408 else 'w'\n            annotation = ('{:' + self.fmt + '}').format(val)\n            text_kwargs = dict(color=text_color, ha='center', va='center')\n            text_kwargs.update(self.annot_kws)\n            ax.text(x, y, annotation, **text_kwargs)",
    "docstring": "Add textual labels with the value in each cell.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\matrix.py",
    "ast_data": "FunctionDef name:_annotate_heatmap arg:self arg:ax arg:mesh arguments arg arg arg Call Assign Assign Call Call Call For Call Call Call If Compare Assign Call Assign Compare Assign Call Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_is_custom_module_lstm",
    "source_code": "def _is_custom_module_lstm(node: Node, named_modules: dict[str, torch.nn.Module], qconfig: QConfigAny=None, qhandler: Optional[Any]=None) -> bool:\n    mod = _get_module(node, named_modules)\n    if qconfig is not None and qhandler is not None:\n        assert isinstance(qhandler, torch.ao.quantization.fx.quantize_handler.QuantizeHandler)\n        return isinstance(mod, torch.nn.LSTM) and activation_is_statically_quantized(qconfig) and qhandler.is_custom_module()\n    else:\n        return isinstance(mod, torch.ao.nn.quantizable.LSTM)",
    "docstring": "Return whether this refers to the custom module LSTM flow.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\utils.py",
    "ast_data": "FunctionDef name:_is_custom_module_lstm arg:node arg:named_modules arg:qconfig arg:qhandler arguments arg arg arg arg Assign Call If BoolOp Compare Compare Call Return return:yes BoolOp Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "memory_snapshot",
    "source_code": "def memory_snapshot():\n    return torch._C._cuda_memorySnapshot()['segments']",
    "docstring": "Return a snapshot of the CUDA memory allocator state across all devices. Interpreting the output of this function requires familiarity with the memory allocator internals. .. note:: See :ref: for more details about GPU memory management.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\memory.py",
    "ast_data": "FunctionDef name:memory_snapshot arguments Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "Combiner",
    "source_code": "class Combiner(object):\n    __metaclass__ = abc.ABCMeta\n\n    def __repr__(self):\n        return '<{}>'.format(self.__class__.__name__)\n\n    @abc.abstractmethod\n    def compute(self, batch_values, accumulator=None):\n        pass\n\n    @abc.abstractmethod\n    def merge(self, accumulators):\n        pass\n\n    @abc.abstractmethod\n    def extract(self, accumulator):\n        pass\n\n    @abc.abstractmethod\n    def restore(self, output):\n        pass\n\n    @abc.abstractmethod\n    def serialize(self, accumulator):\n        pass\n\n    @abc.abstractmethod\n    def deserialize(self, encoded_accumulator):\n        pass",
    "docstring": "Functional object that defines a shardable computation. This object defines functions required to create and manipulate data objects. These data objects, referred to below as 'accumulators', are computation- specific and may be implemented alongside concrete subclasses of Combiner (if necessary - some computations may be simple enough that standard Python types can be used as accumulators). The intent for this class is that by describing computations in this way, we can arbitrarily shard a dataset, perform computations on a subset, and then merge the computation into a final result. This enables distributed computation. The combiner itself does not own any state - all computational state is owned by the accumulator objects. This is so that we can have an arbitrary number of Combiners (thus sharding the computation N ways) without risking any change to the underlying computation. These accumulator objects are uniquely associated with each Combiner; a Combiner defines what the accumulator object should be and will only work with accumulators of that type.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_preprocessing_layer.py",
    "ast_data": "ClassDef name:Combiner Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call FunctionDef name:compute arg:self arg:batch_values arg:accumulator arguments arg arg arg FunctionDef name:merge arg:self arg:accumulators arguments arg arg FunctionDef name:extract arg:self arg:accumulator arguments arg arg FunctionDef name:restore arg:self arg:output arguments arg arg FunctionDef name:serialize arg:self arg:accumulator arguments arg arg FunctionDef name:deserialize arg:self arg:encoded_accumulator arguments arg arg"
  },
  {
    "library": "matplotlib",
    "name": "compute_dz",
    "source_code": "def compute_dz(self):\n    dz_init = super().compute_dz()\n    Uf0 = np.ravel(dz_init)\n    reference_element = _ReducedHCT_Element()\n    J = CubicTriInterpolator._get_jacobian(self._tris_pts)\n    eccs = self._eccs\n    triangles = self._triangles\n    Uc = self.z[self._triangles]\n    Kff_rows, Kff_cols, Kff_vals, Ff = reference_element.get_Kff_and_Ff(J, eccs, triangles, Uc)\n    tol = 1e-10\n    n_dof = Ff.shape[0]\n    Kff_coo = _Sparse_Matrix_coo(Kff_vals, Kff_rows, Kff_cols, shape=(n_dof, n_dof))\n    Kff_coo.compress_csc()\n    Uf, err = _cg(A=Kff_coo, b=Ff, x0=Uf0, tol=tol)\n    err0 = np.linalg.norm(Kff_coo.dot(Uf0) - Ff)\n    if err0 < err:\n        _api.warn_external('In TriCubicInterpolator initialization, PCG sparse solver did not converge after 1000 iterations. `geom` approximation is used instead of `min_E`')\n        Uf = Uf0\n    dz = np.empty([self._pts.shape[0], 2], dtype=np.float64)\n    dz[:, 0] = Uf[::2]\n    dz[:, 1] = Uf[1::2]\n    return dz",
    "docstring": "Elliptic solver for bending energy minimization. Uses a dedicated 'toy' sparse Jacobi PCG solver.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triinterpolate.py",
    "ast_data": "FunctionDef name:compute_dz arg:self arguments arg Assign Call Call Assign Call Assign Call Assign Call Assign Assign Assign Assign Call Assign Assign Assign Call Call Assign Call Assign Call Call If Compare Call Assign Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "draw_path",
    "source_code": "def draw_path(self, renderer, gc, tpath, affine, rgbFace=None):\n    if isinstance(renderer, PathEffectRenderer):\n        renderer = renderer._renderer\n    return renderer.draw_path(gc, tpath, affine, rgbFace)",
    "docstring": "Derived should override this method. The arguments are the same as :meth: except the first argument is a renderer.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patheffects.py",
    "ast_data": "FunctionDef name:draw_path arg:self arg:renderer arg:gc arg:tpath arg:affine arg:rgbFace arguments arg arg arg arg arg arg If Call Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_accumulator_dtype",
    "source_code": "def get_accumulator_dtype(input_torch_dtypes: list[torch.dtype]) -> Optional[torch.dtype]:\n    if len(input_torch_dtypes) != 2:\n        return None\n    torch_dtype = None\n    if input_torch_dtypes[0] == input_torch_dtypes[1]:\n        torch_dtype = input_torch_dtypes[0]\n    else:\n        size0 = torch.tensor([], dtype=input_torch_dtypes[0]).element_size()\n        size1 = torch.tensor([], dtype=input_torch_dtypes[1]).element_size()\n        if size0 > size1:\n            dtype0, dtype1 = input_torch_dtypes\n        else:\n            dtype1, dtype0 = input_torch_dtypes\n        if dtype0 in [torch.half, torch.bfloat16] and dtype1 in [torch.int8, torch.uint8]:\n            torch_dtype = dtype0\n    if torch_dtype in (torch.float16, torch.bfloat16, torch.float, torch.float8_e4m3fn):\n        return torch.float\n    if torch_dtype == torch.int8:\n        return torch.int32\n    raise NotImplementedError(f'Unsupported data types: input_torch_dtypes={input_torch_dtypes!r}')",
    "docstring": "Given a pair of input torch dtypes, returns the inferred accumulator torch dtype.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\cutlass_utils.py",
    "ast_data": "FunctionDef name:get_accumulator_dtype arg:input_torch_dtypes arguments arg If Compare Call Return return:no Assign If Compare Assign Assign Call Call Assign Call Call If Compare Assign Assign If BoolOp Compare Compare Assign If Compare Return return:yes If Compare Return return:yes Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_DispatchCacheEntryOutputInfo",
    "source_code": "@dataclass_slots\n@dataclass(frozen=True)\nclass _DispatchCacheEntryOutputInfo:\n    inplace_idx: Optional[int]\n    metadata: Optional[TensorMetadata]\n    view_idx: Optional[int]\n    constant_value: Optional[Any] = SingletonConstant",
    "docstring": "Entry type for the FakeTensor dispatch cache for an output. Accounts for three possibilities: 1) The op is inplace, and a hit means we need to alias the argument at a given index. 2) We need to synthesize a new FakeTensor given tensor metadata. For view ops, we further capture the index of the arg to alias. 3) if the tensor related fields are None, then it is a constant value (e.g. None or integer)",
    "type": "class",
    "file_path": "pytorch\\torch\\_subclasses\\fake_tensor.py",
    "ast_data": "ClassDef name:_DispatchCacheEntryOutputInfo Call"
  },
  {
    "library": "tensorflow",
    "name": "_ObjectIdentityWrapper",
    "source_code": "class _ObjectIdentityWrapper:\n    __slots__ = ['_wrapped', '__weakref__']\n\n    def __init__(self, wrapped):\n        self._wrapped = wrapped\n\n    @property\n    def unwrapped(self):\n        return self._wrapped\n\n    def _assert_type(self, other):\n        if not isinstance(other, _ObjectIdentityWrapper):\n            raise TypeError('Cannot compare wrapped object with unwrapped object')\n\n    def __lt__(self, other):\n        self._assert_type(other)\n        return id(self._wrapped) < id(other._wrapped)\n\n    def __gt__(self, other):\n        self._assert_type(other)\n        return id(self._wrapped) > id(other._wrapped)\n\n    def __eq__(self, other):\n        if other is None:\n            return False\n        self._assert_type(other)\n        return self._wrapped is other._wrapped\n\n    def __ne__(self, other):\n        return not self.__eq__(other)\n\n    def __hash__(self):\n        return id(self._wrapped)\n\n    def __repr__(self):\n        return '<{} wrapping {!r}>'.format(type(self).__name__, self._wrapped)",
    "docstring": "Wraps an object, mapping __eq__ on wrapper to \"is\" on wrapped. Since __eq__ is based on object identity, it's safe to also define __hash__ based on object ids. This lets us add unhashable types like trackable _ListWrapper objects to object-identity collections.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\util\\object_identity.py",
    "ast_data": "ClassDef name:_ObjectIdentityWrapper Assign FunctionDef name:__init__ arg:self arg:wrapped arguments arg arg Assign FunctionDef name:unwrapped arg:self arguments arg Return return:yes FunctionDef name:_assert_type arg:self arg:other arguments arg arg If Call Raise Call FunctionDef name:__lt__ arg:self arg:other arguments arg arg Call Return return:yes Compare Call Call FunctionDef name:__gt__ arg:self arg:other arguments arg arg Call Return return:yes Compare Call Call FunctionDef name:__eq__ arg:self arg:other arguments arg arg If Compare Return return:yes Call Return return:yes Compare FunctionDef name:__ne__ arg:self arg:other arguments arg arg Return return:yes Call FunctionDef name:__hash__ arg:self arguments arg Return return:yes Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_disable_dynamo",
    "source_code": "def _disable_dynamo(fn: Optional[Callable[_P, _T]]=None, recursive: bool=True) -> Union[Callable[_P, _T], Callable[[Callable[_P, _T]], Callable[_P, _T]]]:\n    if fn is not None:\n\n        @functools.wraps(fn)\n        def inner(*args: _P.args, **kwargs: _P.kwargs) -> _T:\n            disable_fn = getattr(fn, '__dynamo_disable', None)\n            if disable_fn is None:\n                import torch._dynamo\n                disable_fn = torch._dynamo.disable(fn, recursive)\n                fn.__dynamo_disable = disable_fn\n            return disable_fn(*args, **kwargs)\n        return inner\n    else:\n        return functools.partial(_disable_dynamo, recursive=recursive)",
    "docstring": "This API should be only used inside torch, external users should still use torch._dynamo.disable. The main goal of this API is to avoid circular imports issues that is common while using _dynamo.disable inside torch itself. This API avoids it by lazily importing torch._dynamo from the import time to the invocation of the decorated function.",
    "type": "function",
    "file_path": "pytorch\\torch\\_compile.py",
    "ast_data": "FunctionDef name:_disable_dynamo arg:fn arg:recursive arguments arg arg If Compare FunctionDef name:inner arguments arg arg Assign Call If Compare Assign Call Assign Return return:yes Call Call Return return:yes Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "validate_col",
    "source_code": "def validate_col(self, itemsize=None):\n    if self.kind == 'string':\n        c = self.col\n        if c is not None:\n            if itemsize is None:\n                itemsize = self.itemsize\n            if c.itemsize < itemsize:\n                raise ValueError(f'Trying to store a string with len [{itemsize}] in [{self.cname}] column but\\nthis column has a limit of [{c.itemsize}]!\\nConsider using min_itemsize to preset the sizes on these columns')\n            return c.itemsize\n    return None",
    "docstring": "validate this column: return the compared against itemsize",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:validate_col arg:self arg:itemsize arguments arg arg If Compare Assign If Compare If Compare Assign If Compare Raise Call Return return:yes Return return:no"
  },
  {
    "library": "pytorch",
    "name": "maybe_get_weight_eq_obs_node",
    "source_code": "def maybe_get_weight_eq_obs_node(op_node: Node, modules: dict[str, nn.Module]) -> Optional[Node]:\n    assert op_node.op == 'call_function'\n    for node_arg in op_node.args:\n        if node_arg_is_weight(op_node, node_arg):\n            assert isinstance(node_arg, Node) and node_arg.op == 'call_module' and isinstance(modules[str(node_arg.target)], _WeightEqualizationObserver)\n            return node_arg\n    return None",
    "docstring": "Gets the weight equalization observer node if it exists.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_equalize.py",
    "ast_data": "FunctionDef name:maybe_get_weight_eq_obs_node arg:op_node arg:modules arguments arg arg Compare For If Call BoolOp Call Compare Call Call Return return:yes Return return:no"
  },
  {
    "library": "pytorch",
    "name": "unsupported_input_tensor",
    "source_code": "def unsupported_input_tensor(t: torch.Tensor, parent=None, node=None):\n    if t.is_complex():\n        if parent and parent.target in (torch.ops.aten.view.dtype, torch.ops.prims.convert_element_type.default):\n            return False\n        _warn_complex_not_supported()\n        return True\n    if t.is_meta:\n        return True\n    if t.dtype == torch.float8_e8m0fnu:\n        if not node:\n            return True\n        return not (isinstance(parent.target, torch._ops.OpOverload) and parent.target in (aten.view.dtype, aten.cat.default, aten._scaled_mm.default) or (isinstance(node.target, torch._ops.OpOverload) and is_view(node.target)))\n    return False",
    "docstring": "Do not support reading or writing to this tensor",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\lowering.py",
    "ast_data": "FunctionDef name:unsupported_input_tensor arg:t arg:parent arg:node arguments arg arg arg If Call If BoolOp Compare Return return:yes Call Return return:yes If Return return:yes If Compare If Return return:yes Return return:yes BoolOp BoolOp Call Compare BoolOp Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "assert_no_entries_with_modulus_zero",
    "source_code": "def assert_no_entries_with_modulus_zero(x, message=None, name='assert_no_entries_with_modulus_zero'):\n    with ops.name_scope(name, values=[x]):\n        x = tensor_conversion.convert_to_tensor_v2_with_dispatch(x, name='x')\n        dtype = x.dtype.base_dtype\n        should_be_nonzero = math_ops.abs(x)\n        zero = tensor_conversion.convert_to_tensor_v2_with_dispatch(0, dtype=dtype.real_dtype)\n        return check_ops.assert_less(zero, should_be_nonzero, message=message)",
    "docstring": "Returns that asserts Tensor has no entries with modulus zero. Args: x: Numeric , real, integer, or complex. message: A string message to prepend to failure message. name: A name to give this . Returns: An that asserts has no entries with modulus zero.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_util.py",
    "ast_data": "FunctionDef name:assert_no_entries_with_modulus_zero arg:x arg:message arg:name arguments arg arg arg With Call Assign Call Assign Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "cartesian_product",
    "source_code": "def cartesian_product(X: list[np.ndarray]) -> list[np.ndarray]:\n    msg = 'Input must be a list-like of list-likes'\n    if not is_list_like(X):\n        raise TypeError(msg)\n    for x in X:\n        if not is_list_like(x):\n            raise TypeError(msg)\n    if len(X) == 0:\n        return []\n    lenX = np.fromiter((len(x) for x in X), dtype=np.intp)\n    cumprodX = np.cumprod(lenX)\n    if np.any(cumprodX < 0):\n        raise ValueError('Product space too large to allocate arrays!')\n    a = np.roll(cumprodX, 1)\n    a[0] = 1\n    if cumprodX[-1] != 0:\n        b = cumprodX[-1] / cumprodX\n    else:\n        b = np.zeros_like(cumprodX)\n    return [np.tile(np.repeat(x, b[i]), np.prod(a[i])) for i, x in enumerate(X)]",
    "docstring": "Numpy version of itertools.product. Sometimes faster (for large inputs)... Parameters ---------- X : list-like of list-likes Returns ------- product : list of ndarrays Examples -------- >>> cartesian_product([list(\"ABC\"), [1, 2]]) [array(['A', 'A', 'B', 'B', 'C', 'C'], dtype='<U1'), array([1, 2, 1, 2, 1, 2])] See Also -------- itertools.product : Cartesian product of input iterables. Equivalent to nested for-loops.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "FunctionDef name:cartesian_product arg:X arguments arg Assign If Call Raise Call For If Call Raise Call If Compare Call Return return:no Assign Call Call Assign Call If Call Compare Raise Call Assign Call Assign If Compare Assign Assign Call Return return:yes Call Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit_transform",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit_transform(self, X, y):\n    from ..model_selection import KFold, StratifiedKFold\n    X_ordinal, X_known_mask, y_encoded, n_categories = self._fit_encodings_all(X, y)\n    if self.target_type_ == 'continuous':\n        cv = KFold(self.cv, shuffle=self.shuffle, random_state=self.random_state)\n    else:\n        cv = StratifiedKFold(self.cv, shuffle=self.shuffle, random_state=self.random_state)\n    if self.target_type_ == 'multiclass':\n        X_out = np.empty((X_ordinal.shape[0], X_ordinal.shape[1] * len(self.classes_)), dtype=np.float64)\n    else:\n        X_out = np.empty_like(X_ordinal, dtype=np.float64)\n    for train_idx, test_idx in cv.split(X, y):\n        X_train, y_train = (X_ordinal[train_idx, :], y_encoded[train_idx])\n        y_train_mean = np.mean(y_train, axis=0)\n        if self.target_type_ == 'multiclass':\n            encodings = self._fit_encoding_multiclass(X_train, y_train, n_categories, y_train_mean)\n        else:\n            encodings = self._fit_encoding_binary_or_continuous(X_train, y_train, n_categories, y_train_mean)\n        self._transform_X_ordinal(X_out, X_ordinal, ~X_known_mask, test_idx, encodings, y_train_mean)\n    return X_out",
    "docstring": "Fit :class: and transform X with the target encoding. .. note:: does not equal because a :term: scheme is used in for encoding. See the :ref:. for details. Parameters ---------- X : array-like of shape (n_samples, n_features) The data to determine the categories of each feature. y : array-like of shape (n_samples,) The target data used to encode the categories. Returns ------- X_trans : ndarray of shape (n_samples, n_features) or (n_samples, (n_features * n_classes)) Transformed input.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_target_encoder.py",
    "ast_data": "FunctionDef name:fit_transform arg:self arg:X arg:y arguments arg arg arg Assign Call If Compare Assign Call Assign Call If Compare Assign Call Call Assign Call For Call Assign Assign Call If Compare Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "Hbox",
    "source_code": "class Hbox(Box):\n\n    def __init__(self, width: float):\n        super().__init__(width, 0.0, 0.0)",
    "docstring": "A box with only width (zero height and depth).",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\_mathtext.py",
    "ast_data": "ClassDef name:Hbox FunctionDef name:__init__ arg:self arg:width arguments arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "scatter_div",
    "source_code": "def scatter_div(self, sparse_delta, use_locking=False, name=None):\n    if not isinstance(sparse_delta, indexed_slices.IndexedSlices):\n        raise TypeError(f'Argument `sparse_delta` must be a `tf.IndexedSlices`. Received arg: {sparse_delta}')\n    return self._lazy_read(gen_resource_variable_ops.resource_scatter_div(self.handle, sparse_delta.indices, ops.convert_to_tensor(sparse_delta.values, self.dtype), name=name))",
    "docstring": "Divide this variable by . Args: sparse_delta: to divide this variable by. use_locking: If , use locking during the operation. name: the name of the operation. Returns: The updated variable. Raises: TypeError: if is not an .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:scatter_div arg:self arg:sparse_delta arg:use_locking arg:name arguments arg arg arg arg If Call Raise Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_large_compatible_negative",
    "source_code": "def _large_compatible_negative(tensor_type):\n    if tensor_type == dtypes.float16:\n        return dtypes.float16.min\n    return -1000000000.0",
    "docstring": "Large negative number as Tensor. This function is necessary because the standard value for epsilon in this module (-1e9) cannot be represented using tf.float16 Args: tensor_type: a dtype to determine the type. Returns: a large negative number.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\advanced_activations.py",
    "ast_data": "FunctionDef name:_large_compatible_negative arg:tensor_type arguments arg If Compare Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "execute_subgraph_from_prim_loop",
    "source_code": "def execute_subgraph_from_prim_loop(subgraph, iter_idx, len_loop_local_arguments, *args, **kwargs):\n    loop_local_args = args[:len_loop_local_arguments]\n    global_args = args[len_loop_local_arguments:]\n    return subgraph(*global_args, iter_idx, *loop_local_args, **kwargs)",
    "docstring": "subgraph: GraphModule from sub-block. iter_idx: The index of interation. len_loop_local_arguments: The number of loop local arguments in args.",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\converter.py",
    "ast_data": "FunctionDef name:execute_subgraph_from_prim_loop arg:subgraph arg:iter_idx arg:len_loop_local_arguments arguments arg arg arg arg arg Assign Assign Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "find_by_request",
    "source_code": "def find_by_request(self, request: Request) -> __builtins__.list[str]:\n    pass",
    "docstring": "Return the list of spiders names that can handle the given request",
    "type": "method",
    "file_path": "scrapy\\scrapy\\spiderloader.py",
    "ast_data": "FunctionDef name:find_by_request arg:self arg:request arguments arg arg"
  },
  {
    "library": "tensorflow",
    "name": "_check_call_args",
    "source_code": "def _check_call_args(self, method_name):\n    fullargspec = self._call_full_argspec\n    if fullargspec.defaults:\n        positional_args = fullargspec.args[:-len(fullargspec.defaults)]\n    else:\n        positional_args = fullargspec.args\n    if 'training' in positional_args:\n        positional_args.remove('training')\n    if len(positional_args) > 2:\n        extra_args = positional_args[2:]\n        raise ValueError('Models passed to `' + method_name + '` can only have `training` and the first argument in `call` as positional arguments, found: ' + str(extra_args) + '.')",
    "docstring": "Check that has only one positional arg.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py",
    "ast_data": "FunctionDef name:_check_call_args arg:self arg:method_name arguments arg arg Assign If Assign Call Assign If Compare Call If Compare Call Assign Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_format",
    "source_code": "def _format(name, arr):\n    title = '### All Compatibility %s ###' % str(name)\n    tlen = len(title)\n    print('-' * tlen)\n    print(title)\n    print('-' * tlen)\n    print(' Total # of %s: %s\\n' % (str(name), str(len(arr))))\n    if arr:\n        for item in arr:\n            detail = ''\n            if isinstance(item[1], list):\n                for itm in item[1]:\n                    detail += str(itm) + ', '\n                detail = detail[:-2]\n            else:\n                detail = str(item[1])\n            print(\"  %s ('%s')\\n\" % (str(item[0]), detail))\n    else:\n        print('  No %s' % name)\n    print('\\n')",
    "docstring": "Prints compatibility check results with a format. Args: name: String that is the title representing list . arr: List of items to be printed in a certain format.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\tools\\tensorflow_builder\\compat_checker\\compat_checker.py",
    "ast_data": "FunctionDef name:_format arg:name arg:arr arguments arg arg Assign Call Assign Call Call Call Call Call Call Call Call If For Assign If Call For Call Assign Assign Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "convert_variables_to_constants_v2_as_graph",
    "source_code": "def convert_variables_to_constants_v2_as_graph(func, lower_control_flow=True, aggressive_inlining=False):\n    converter_data = _FunctionConverterDataInEager(func=func, lower_control_flow=lower_control_flow, aggressive_inlining=aggressive_inlining)\n    output_graph_def, converted_input_indices = _replace_variables_by_constants(converter_data=converter_data)\n    frozen_func = _construct_concrete_function(func, output_graph_def, converted_input_indices)\n    return (frozen_func, output_graph_def)",
    "docstring": "Replaces all the variables in a graph with constants of the same values. This function works as same as convert_variables_to_constants_v2, but it returns the intermediate as well. This contains all the debug information after all the transformations in the frozen phase. Args: func: ConcreteFunction. lower_control_flow: Boolean indicating whether or not to lower control flow ops such as If and While. (default True) aggressive_inlining: Boolean indicating whether or not to do aggressive function inlining (might be unsafe if function has stateful ops, not properly connected to control outputs). Returns: ConcreteFunction containing a simplified version of the original, and also the intermediate GraphDef containing the node debug information for the transformations in the frozen phase.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "FunctionDef name:convert_variables_to_constants_v2_as_graph arg:func arg:lower_control_flow arg:aggressive_inlining arguments arg arg arg Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "get_chunks",
    "source_code": "@abstractmethod\ndef get_chunks(self, n_chunks: int | None=None) -> Iterable[DataFrame]:\n    pass",
    "docstring": "Return an iterator yielding the chunks. By default (None), yields the chunks that the data is stored as by the producer. If given, ``, meaning the producer must subdivide each chunk before yielding it.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\interchange\\dataframe_protocol.py",
    "ast_data": "FunctionDef name:get_chunks arg:self arg:n_chunks arguments arg arg"
  },
  {
    "library": "pandas",
    "name": "size",
    "source_code": "@property\ndef size(self) -> int:\n    return len(self)",
    "docstring": "Return the number of elements in the underlying data.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "FunctionDef name:size arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "estimator_checks_generator",
    "source_code": "def estimator_checks_generator(estimator, *, legacy: bool=True, expected_failed_checks: dict[str, str] | None=None, mark: Literal['xfail', 'skip', None]=None):\n    if mark == 'xfail':\n        import pytest\n    else:\n        pytest = None\n    name = type(estimator).__name__\n    yield (estimator, partial(check_estimator_cloneable, name))\n    for check in _yield_all_checks(estimator, legacy=legacy):\n        check_with_name = partial(check, name)\n        for check_instance in _yield_instances_for_check(check, estimator):\n            yield _maybe_mark(check_instance, check_with_name, expected_failed_checks=expected_failed_checks, mark=mark, pytest=pytest)",
    "docstring": "Iteratively yield all check callables for an estimator. .. versionadded:: 1.6 Parameters ---------- estimator : estimator object Estimator instance for which to generate checks. legacy : bool, default=True Whether to include legacy checks. Over time we remove checks from this category and move them into their specific category. expected_failed_checks : dict[str, str], default=None Dictionary of the form {check_name: reason} for checks that are expected to fail. mark : {\"xfail\", \"skip\"} or None, default=None Whether to mark the checks that are expected to fail as xfail() or skip. Marking a test as \"skip\" is done via wrapping the check in a function that raises a :class: exception. Returns ------- estimator_checks_generator : generator Generator that yields (estimator, check) tuples.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\estimator_checks.py",
    "ast_data": "FunctionDef name:estimator_checks_generator arg:estimator arguments arg arg arg arg If Compare Assign Assign Call Call For Call Assign Call For Call Call"
  },
  {
    "library": "tensorflow",
    "name": "ParameterizedBenchmark",
    "source_code": "@tf_export('__internal__.test.ParameterizedBenchmark', v1=[])\nclass ParameterizedBenchmark(_BenchmarkRegistrar):\n\n    def __new__(mcs, clsname, base, attrs):\n        param_config_list = attrs['_benchmark_parameters']\n\n        def create_benchmark_function(original_benchmark, params):\n            return lambda self: original_benchmark(self, *params)\n        for name in attrs.copy().keys():\n            if not name.startswith('benchmark'):\n                continue\n            original_benchmark = attrs[name]\n            del attrs[name]\n            for param_config in param_config_list:\n                test_name_suffix = param_config[0]\n                params = param_config[1:]\n                benchmark_name = name + '__' + test_name_suffix\n                if benchmark_name in attrs:\n                    raise Exception('Benchmark named {} already defined.'.format(benchmark_name))\n                benchmark = create_benchmark_function(original_benchmark, params)\n                attrs[benchmark_name] = _rename_function(benchmark, 1, benchmark_name)\n        return super().__new__(mcs, clsname, base, attrs)",
    "docstring": "Metaclass to generate parameterized benchmarks. Use this class as a metaclass and override the to generate multiple benchmark test cases. For example: class FooBenchmark(metaclass=tf.test.ParameterizedBenchmark, tf.test.Benchmark): # The is expected to be a list with test cases. # Each of the test case is a tuple, with the first time to be test case # name, followed by any number of the parameters needed for the test case. _benchmark_parameters = [ ('case_1', Foo, 1, 'one'), ('case_2', Bar, 2, 'two'), ] def benchmark_test(self, target_class, int_param, string_param): # benchmark test body The example above will generate two benchmark test cases: \"benchmark_test__case_1\" and \"benchmark_test__case_2\".",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\platform\\benchmark.py",
    "ast_data": "ClassDef name:ParameterizedBenchmark FunctionDef name:__new__ arg:mcs arg:clsname arg:base arg:attrs arguments arg arg arg arg Assign FunctionDef name:create_benchmark_function arg:original_benchmark arg:params arguments arg arg Return return:yes arguments arg Call For Call Call If Call Assign For Assign Assign Assign If Compare Raise Call Call Assign Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "isboundmethod",
    "source_code": "def isboundmethod(method: MethodType) -> bool:\n    return safe_getattr(method, '__self__', None) is not None",
    "docstring": "Check if the method is a bound method.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\inspect.py",
    "ast_data": "FunctionDef name:isboundmethod arg:method arguments arg Return return:yes Compare Call"
  },
  {
    "library": "pytorch",
    "name": "_wrap_model_with_output_adapter",
    "source_code": "def _wrap_model_with_output_adapter(model: torch.nn.Module | Callable, output_adapter: DynamoFlattenOutputStep) -> Callable:\n    model_func = model.forward if isinstance(model, torch.nn.Module) else model\n\n    @functools.wraps(model_func)\n    def wrapped(*args, **kwargs):\n        return output_adapter.apply(model_func(*args, **kwargs), model=model)\n    return wrapped",
    "docstring": "Wrap model with output adapter. This is a helper function to enable :func: on models that produce custom user defined types outputs. It wraps the model with an output adapter to convert the outputs to :func: compatible types, i.e. :class:. The adapting logic is controlled by ``. Args: model: PyTorch model or function. output_adapter: Output adapter to apply to model output. Returns: Wrapped model.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\dynamo_graph_extractor.py",
    "ast_data": "FunctionDef name:_wrap_model_with_output_adapter arg:model arg:output_adapter arguments arg arg Assign Call FunctionDef name:wrapped arguments arg arg Return return:yes Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_prepare_bounds",
    "source_code": "def _prepare_bounds(bounds, x0):\n    lb, ub = (np.asarray(b, dtype=float) for b in bounds)\n    if lb.ndim == 0:\n        lb = np.resize(lb, x0.shape)\n    if ub.ndim == 0:\n        ub = np.resize(ub, x0.shape)\n    return (lb, ub)",
    "docstring": "Prepares new-style bounds from a two-tuple specifying the lower and upper limits for values in x0. If a value is not bound then the lower/upper bound will be expected to be -np.inf/np.inf. Examples -------- >>> _prepare_bounds([(0, 1, 2), (1, 2, np.inf)], [0.5, 1.5, 2.5]) (array([0., 1., 2.]), array([ 1., 2., inf]))",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_numdiff.py",
    "ast_data": "FunctionDef name:_prepare_bounds arg:bounds arg:x0 arguments arg arg Assign Call If Compare Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "dequantize_helper",
    "source_code": "def dequantize_helper(g: jit_utils.GraphContext, qtensor: _C.Value, qdtype: _C_onnx.TensorProtoDataType | None=None) -> tuple[_C.Value, _C.Value, _C.Value, _C.Value | None]:\n    unpacked_qtensors = _unpack_quantized_tensor(qtensor)\n    tensor, scale, zero_point = unpacked_qtensors[:3]\n    axis = unpacked_qtensors[3] if len(unpacked_qtensors) >= 4 else None\n    axis_i = _get_const(axis, 'i', 'axis')\n    input_qdtype = _type_utils.JitScalarType.from_value(tensor)\n    if qdtype is None:\n        if input_qdtype is not None:\n            qdtype = input_qdtype.onnx_type()\n        else:\n            qdtype = _C_onnx.TensorProtoDataType.UINT8\n    value = g.op('Cast', tensor, to_i=qdtype)\n    scale = g.op('Cast', scale, to_i=_C_onnx.TensorProtoDataType.FLOAT)\n    zero_point = g.op('Cast', zero_point, to_i=qdtype)\n    if axis_i is not None and GLOBALS.export_onnx_opset_version < 13:\n        _onnx_opset_unsupported_detailed('DequantizeLinear', GLOBALS.export_onnx_opset_version, 13, 'Attribute axis is not supported.', qtensor)\n    return (g.op('DequantizeLinear', value, scale, zero_point, axis_i=axis_i), scale, zero_point, axis)",
    "docstring": "Appends to graph ONNX nodes that dequantizes into . Args: g: Graph, the ONNX IR graph that is under construction. qtensor: torch._C.Value, either a tuple of (quantized_tensor, scale, zero_point) for per tensor quantization, or (quantized_tensor, scale, zero_point, axis) for per channel quantization, representing the quantized tensor. qdtype: torch.onnx.TensorProtoDataType default None, if not None, represents the data type of quantized tensor. It must be either torch.onnx.TensorProtoDataType.UINT8 or torch.onnx.TensorProtoDataType.INT8.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\symbolic_helper.py",
    "ast_data": "FunctionDef name:dequantize_helper arg:g arg:qtensor arg:qdtype arguments arg arg arg Assign Call Assign Assign Compare Call Assign Call Assign Call If Compare If Compare Assign Call Assign Assign Call Assign Call Assign Call If BoolOp Compare Compare Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "arr_to_2d",
    "source_code": "def arr_to_2d(arr, oned_as='row'):\n    dims = matdims(arr, oned_as)\n    if len(dims) > 2:\n        raise ValueError('Matlab 4 files cannot save arrays with more than 2 dimensions')\n    return arr.reshape(dims)",
    "docstring": "Make `arr` for more detail Returns ------- arr2d : array 2-D version of the array",
    "type": "function",
    "file_path": "scipy\\scipy\\io\\matlab\\_mio4.py",
    "ast_data": "FunctionDef name:arr_to_2d arg:arr arg:oned_as arguments arg arg Assign Call If Compare Call Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "getmro",
    "source_code": "def getmro(cls):\n    return _inspect.getmro(cls)",
    "docstring": "TFDecorator-aware replacement for inspect.getmro.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_inspect.py",
    "ast_data": "FunctionDef name:getmro arg:cls arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "__call__",
    "source_code": "@abstractmethod\ndef __call__(self, graph: torch.fx.graph.Graph) -> None:\n    pass",
    "docstring": "Implementation of the custom pass.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\custom_graph_pass.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:graph arguments arg arg"
  },
  {
    "library": "tensorflow",
    "name": "set_python_graph",
    "source_code": "def set_python_graph(self, python_graph):\n    self._python_graph = python_graph\n    self._node_traceback = {}\n    if self._python_graph:\n        for op in self._python_graph.get_operations():\n            self._node_traceback[op.name] = tuple(map(tuple, op.traceback))",
    "docstring": "Provide Python object to the wrapper. Unlike the partition graphs, which are protobuf objects, is a Python object and carries additional information such as the traceback of the construction of the nodes in the graph. Args: python_graph: (ops.Graph) The Python Graph object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:set_python_graph arg:self arg:python_graph arguments arg arg Assign Assign If For Call Assign Call Call"
  },
  {
    "library": "matplotlib",
    "name": "onpick",
    "source_code": "def onpick(self, event):\n    if event.artist is not self.line:\n        return\n    self.ind ^= set(event.ind)\n    ind = sorted(self.ind)\n    xdata, ydata = self.line.get_data()\n    self.process_selected(ind, xdata[ind], ydata[ind])",
    "docstring": "When the line is picked, update the set of selected indices.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:onpick arg:self arg:event arguments arg arg If Compare Return return:no Call Assign Call Assign Call Call"
  },
  {
    "library": "django",
    "name": "parse_raster",
    "source_code": "def parse_raster(self, value):\n    return from_pgraster(value)",
    "docstring": "Convert a PostGIS HEX String into a dict readable by GDALRaster.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\postgis\\operations.py",
    "ast_data": "FunctionDef name:parse_raster arg:self arg:value arguments arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "save_base",
    "source_code": "def save_base(self, raw=False, force_insert=False, force_update=False, using=None, update_fields=None):\n    using = using or router.db_for_write(self.__class__, instance=self)\n    assert not (force_insert and (force_update or update_fields))\n    assert update_fields is None or update_fields\n    cls = origin = self.__class__\n    if cls._meta.proxy:\n        cls = cls._meta.concrete_model\n    meta = cls._meta\n    if not meta.auto_created:\n        pre_save.send(sender=origin, instance=self, raw=raw, using=using, update_fields=update_fields)\n    if meta.parents:\n        context_manager = transaction.atomic(using=using, savepoint=False)\n    else:\n        context_manager = transaction.mark_for_rollback_on_error(using=using)\n    with context_manager:\n        parent_inserted = False\n        if not raw:\n            force_insert = self._validate_force_insert(force_insert)\n            parent_inserted = self._save_parents(cls, using, update_fields, force_insert)\n        updated = self._save_table(raw, cls, force_insert or parent_inserted, force_update, using, update_fields)\n    self._state.db = using\n    self._state.adding = False\n    if not meta.auto_created:\n        post_save.send(sender=origin, instance=self, created=not updated, update_fields=update_fields, raw=raw, using=using)",
    "docstring": "Handle the parts of saving which should be done only once per save, yet need to be done in raw saves, too. This includes some sanity checks and signal sending. The 'raw' argument is telling save_base not to save any parent models and not to do any changes to the values before save. This is used by fixture loading.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\base.py",
    "ast_data": "FunctionDef name:save_base arg:self arg:raw arg:force_insert arg:force_update arg:using arg:update_fields arguments arg arg arg arg arg arg Assign BoolOp Call BoolOp BoolOp BoolOp Compare Assign If Assign Assign If Call If Assign Call Assign Call With Assign If Assign Call Assign Call Assign Call BoolOp Assign Assign If Call"
  },
  {
    "library": "sphinx",
    "name": "note_object",
    "source_code": "def note_object(self, objtype: str, name: str, labelid: str, location: Any=None) -> None:\n    if (objtype, name) in self.objects:\n        docname = self.objects[objtype, name][0]\n        logger.warning(__('duplicate %s description of %s, other instance in %s'), objtype, name, docname, location=location)\n    self.objects[objtype, name] = (self.env.docname, labelid)",
    "docstring": "Note a generic object for cross reference. .. versionadded:: 3.0",
    "type": "method",
    "file_path": "sphinx\\sphinx\\domains\\std\\__init__.py",
    "ast_data": "FunctionDef name:note_object arg:self arg:objtype arg:name arg:labelid arg:location arguments arg arg arg arg arg If Compare Assign Call Call Assign"
  },
  {
    "library": "scikit-learn",
    "name": "_safe_accumulator_op",
    "source_code": "def _safe_accumulator_op(op, x, *args, **kwargs):\n    if np.issubdtype(x.dtype, np.floating) and x.dtype.itemsize < 8:\n        result = op(x, *args, **kwargs, dtype=np.float64)\n    else:\n        result = op(x, *args, **kwargs)\n    return result",
    "docstring": "This function provides numpy accumulator functions with a float64 dtype when used on a floating point input. This prevents accumulator overflow on smaller floating point dtypes. Parameters ---------- op : function A numpy accumulator function such as np.mean or np.sum. x : ndarray A numpy array to apply the accumulator function. *args : positional arguments Positional arguments passed to the accumulator function after the input x. **kwargs : keyword arguments Keyword arguments passed to the accumulator function. Returns ------- result The output of the accumulator function passed to this function.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\extmath.py",
    "ast_data": "FunctionDef name:_safe_accumulator_op arg:op arg:x arguments arg arg arg arg If BoolOp Call Compare Assign Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "kml",
    "source_code": "@property\ndef kml(self):\n    gtype = self.geom_type\n    return '<%s>%s</%s>' % (gtype, self.coord_seq.kml, gtype)",
    "docstring": "Return the KML representation of this Geometry.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:kml arg:self arguments arg Assign Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "private_numbers",
    "source_code": "@abc.abstractmethod\ndef private_numbers(self) -> DHPrivateNumbers:\n    pass",
    "docstring": "Returns a DHPrivateNumbers.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\dh.py",
    "ast_data": "FunctionDef name:private_numbers arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, replica_id_in_sync_group=0, num_replicas_in_sync=1):\n    self._replica_id_in_sync_group = replica_id_in_sync_group\n    self._num_replicas_in_sync = num_replicas_in_sync",
    "docstring": "Initializes a ValueContext object. Args: replica_id_in_sync_group: the current replica_id, should be an int in [0,). num_replicas_in_sync: the number of replicas that are in sync.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:replica_id_in_sync_group arg:num_replicas_in_sync arguments arg arg arg Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "get_buffer",
    "source_code": "def get_buffer(self, target: str) -> 'Tensor':\n    module_path, _, buffer_name = target.rpartition('.')\n    mod: torch.nn.Module = self.get_submodule(module_path)\n    if not hasattr(mod, buffer_name):\n        raise AttributeError(mod._get_name() + ' has no attribute `' + buffer_name + '`')\n    buffer: torch.Tensor = getattr(mod, buffer_name)\n    if buffer_name not in mod._buffers:\n        raise AttributeError('`' + buffer_name + '` is not a buffer')\n    return buffer",
    "docstring": "Return the buffer given by `` Raises: AttributeError: If the target string references an invalid path or resolves to something that is not a buffer",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:get_buffer arg:self arg:target arguments arg arg Assign Call Call If Call Raise Call Call Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_extract_docstring",
    "source_code": "def _extract_docstring(self, value: str) -> tuple[str, Sequence[str]]:\n    docstring = ''\n    modules = []\n    for line in value.splitlines():\n        match = _DOCSTRING_PATTERN.match(line)\n        if match:\n            module = match.group(1).strip()\n            if module == self._api_name or module.startswith(self._api_name + '.'):\n                modules.append(module)\n        else:\n            docstring += line + '\\n'\n    return (docstring.strip(), modules)",
    "docstring": "Extract docstring and list of modules that it should be applied to.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\api\\generator2\\extractor\\extractor.py",
    "ast_data": "FunctionDef name:_extract_docstring arg:self arg:value arguments arg arg Assign Assign For Call Assign Call If Assign Call Call If BoolOp Compare Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_unit",
    "source_code": "def set_unit(self, unit):\n    _api.check_in_list(['points', 'pixels'], unit=unit)\n    self._unit = unit",
    "docstring": "Set the unit for input to the transform used by ``. Parameters ---------- unit : {'points', 'pixels'}",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:set_unit arg:self arg:unit arguments arg arg Call Assign"
  },
  {
    "library": "authlib",
    "name": "JWSAlgorithm",
    "source_code": "class JWSAlgorithm:\n    name = None\n    description = None\n    algorithm_type = 'JWS'\n    algorithm_location = 'alg'\n\n    def prepare_key(self, raw_data):\n        raise NotImplementedError()\n\n    def sign(self, msg, key):\n        raise NotImplementedError\n\n    def verify(self, msg, sig, key):\n        raise NotImplementedError",
    "docstring": "Interface for JWS algorithm. JWA specification (RFC7518) SHOULD implement the algorithms for JWS with this base implementation.",
    "type": "class",
    "file_path": "authlib\\authlib\\jose\\rfc7515\\models.py",
    "ast_data": "ClassDef name:JWSAlgorithm Assign Assign Assign Assign FunctionDef name:prepare_key arg:self arg:raw_data arguments arg arg Raise Call FunctionDef name:sign arg:self arg:msg arg:key arguments arg arg arg Raise FunctionDef name:verify arg:self arg:msg arg:sig arg:key arguments arg arg arg arg Raise"
  },
  {
    "library": "matplotlib",
    "name": "set_3d_properties",
    "source_code": "def set_3d_properties(self, zs=0, zdir='z', axlim_clip=False):\n    xs = self.get_xdata()\n    ys = self.get_ydata()\n    zs = cbook._to_unmasked_float_array(zs).ravel()\n    zs = np.broadcast_to(zs, len(xs))\n    self._verts3d = juggle_axes(xs, ys, zs, zdir)\n    self._axlim_clip = axlim_clip\n    self.stale = True",
    "docstring": "Set the *z* position and direction of the line. Parameters ---------- zs : float or array of floats The location along the *zdir* axis in 3D space to position the line. zdir : {'x', 'y', 'z'} Plane to plot line orthogonal to. Default: 'z'. See for a description of the values. axlim_clip : bool, default: False Whether to hide lines with an endpoint outside the axes view limits. .. versionadded:: 3.10",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py",
    "ast_data": "FunctionDef name:set_3d_properties arg:self arg:zs arg:zdir arg:axlim_clip arguments arg arg arg arg Assign Call Assign Call Assign Call Call Assign Call Call Assign Call Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "get_device_arg_index",
    "source_code": "def get_device_arg_index(schema: _C.FunctionSchema) -> Union[int, None]:\n    for index, arg in enumerate(schema.arguments):\n        if arg.type is _C.DeviceObjType.get() and arg.name == 'device':\n            return index\n    return None",
    "docstring": "Given a schema, returns the id of the argument. If it does not exist, returns None.",
    "type": "function",
    "file_path": "pytorch\\torch\\_library\\utils.py",
    "ast_data": "FunctionDef name:get_device_arg_index arg:schema arguments arg For Call If BoolOp Compare Call Compare Return return:yes Return return:no"
  },
  {
    "library": "pytorch",
    "name": "from_dict",
    "source_code": "@classmethod\ndef from_dict(cls, dtype_config_dict: dict[str, Any]) -> DTypeConfig:\n    input_dtype = dtype_config_dict.get(INPUT_DTYPE_DICT_KEY, None)\n    if input_dtype is not None and (not isinstance(input_dtype, (torch.dtype, DTypeWithConstraints))):\n        raise ValueError('Expected input_dtype to be a torch.dtype or DTypeWithConstraints')\n    output_dtype = dtype_config_dict.get(OUTPUT_DTYPE_DICT_KEY, None)\n    if output_dtype is not None and (not isinstance(output_dtype, (torch.dtype, DTypeWithConstraints))):\n        raise ValueError('Expected output_dtype to be a torch.dtype or DTypeWithConstraints')\n    weight_dtype = dtype_config_dict.get(WEIGHT_DTYPE_DICT_KEY, None)\n    if weight_dtype is not None and (not isinstance(weight_dtype, (torch.dtype, DTypeWithConstraints))):\n        raise ValueError('Expected weight_dtype to be a torch.dtype or DTypeWithConstraints')\n    bias_dtype = dtype_config_dict.get(BIAS_DTYPE_DICT_KEY, None)\n    is_dynamic = dtype_config_dict.get(IS_DYNAMIC_DICT_KEY, None)\n    return cls(input_dtype, output_dtype, weight_dtype, bias_dtype, is_dynamic)",
    "docstring": "Create a `` \"bias_type\": torch.dtype \"is_dynamic\": bool",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\backend_config.py",
    "ast_data": "FunctionDef name:from_dict arg:cls arg:dtype_config_dict arguments arg arg Assign Call If BoolOp Compare Call Raise Call Assign Call If BoolOp Compare Call Raise Call Assign Call If BoolOp Compare Call Raise Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "rot_z",
    "source_code": "@classmethod\ndef rot_z(cls, z: Tensor) -> So3:\n    zs = zeros_like(z)\n    return cls.exp(stack((zs, zs, z), -1))",
    "docstring": "Construct a z-axis rotation. Args: z: the z-axis rotation angle.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\so3.py",
    "ast_data": "FunctionDef name:rot_z arg:cls arg:z arguments arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_pop",
    "source_code": "def _pop(self, key, indices=None, name=None):\n    if name is None:\n        name = '%s_get' % self._name\n    indices, dtypes = self._get_indices_and_dtypes(indices)\n    with ops.colocate_with(self._coloc_op):\n        result = self._pop_fn(key, shared_name=self._name, indices=indices, dtypes=dtypes, name=name, capacity=self._capacity, memory_limit=self._memory_limit)\n    return (key, self._get_return_value(result, indices))",
    "docstring": "Remove and return the associated (key, value) is returned from the staging area. If the key is not in the staging area, this method will block until the associated (key, value) is inserted. Args: key: Key associated with the required data indices: Partial list of tensors to retrieve (optional). A list of integer or string indices. String indices are only valid if the Staging Area has names associated with it. name: A name for the operation (optional) Returns: The created op",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:_pop arg:self arg:key arg:indices arg:name arguments arg arg arg arg If Compare Assign Assign Call With Call Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "annotate",
    "source_code": "def annotate(self, *args, **kwargs):\n    self._not_support_combined_queries('annotate')\n    return self._annotate(args, kwargs, select=True)",
    "docstring": "Return a query set in which the returned objects have been annotated with extra data or aggregations.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:annotate arg:self arguments arg arg arg Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "key_is_id",
    "source_code": "def key_is_id(k):\n    return isinstance(k, (torch.Tensor, torch.nn.Module, MethodWrapperType))",
    "docstring": "Returns whether it indexes dictionaries using its id",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\utils.py",
    "ast_data": "FunctionDef name:key_is_id arg:k arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_alter_column_type_sql",
    "source_code": "def _alter_column_type_sql(self, table, old_field, new_field, new_type, old_collation, new_collation):\n    if not hasattr(old_field, 'dim') or not hasattr(new_field, 'dim'):\n        return super()._alter_column_type_sql(table, old_field, new_field, new_type, old_collation, new_collation)\n    if old_field.dim == 2 and new_field.dim == 3:\n        sql_alter = self.sql_alter_column_to_3d\n    elif old_field.dim == 3 and new_field.dim == 2:\n        sql_alter = self.sql_alter_column_to_2d\n    else:\n        sql_alter = self.sql_alter_column_type\n    return ((sql_alter % {'column': self.quote_name(new_field.column), 'type': new_type, 'collation': ''}, []), [])",
    "docstring": "Special case when dimension changed.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\postgis\\schema.py",
    "ast_data": "FunctionDef name:_alter_column_type_sql arg:self arg:table arg:old_field arg:new_field arg:new_type arg:old_collation arg:new_collation arguments arg arg arg arg arg arg arg If BoolOp Call Call Return return:yes Call Call If BoolOp Compare Compare Assign If BoolOp Compare Compare Assign Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, o):\n    if not isinstance(o, Artist):\n        if np.iterable(o):\n            o = list(o)\n            if len(o):\n                o = o[0]\n    self.oorig = o\n    if not isinstance(o, type):\n        o = type(o)\n    self.o = o\n    self.aliasd = self.get_aliases()",
    "docstring": "Initialize the artist inspector with an or an iterable of \\s. If an iterable is used, we assume it is a homogeneous sequence (all \\s are of the same type) and it is your responsibility to make sure this is so.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:o arguments arg arg If Call If Call Assign Call If Call Assign Assign If Call Assign Call Assign Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "rc_file",
    "source_code": "def rc_file(fname, *, use_default_template=True):\n    with _api.suppress_matplotlib_deprecation_warning():\n        from .style.core import STYLE_BLACKLIST\n        rc_from_file = rc_params_from_file(fname, use_default_template=use_default_template)\n        rcParams.update({k: rc_from_file[k] for k in rc_from_file if k not in STYLE_BLACKLIST})",
    "docstring": "Update from file. Style-blacklisted (defined in ``) are not updated. Parameters ---------- fname : str or path-like A file with Matplotlib rc settings. use_default_template : bool If True, initialize with default parameters before updating with those in the given file. If False, the current configuration persists and only the parameters specified in the file are updated.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\__init__.py",
    "ast_data": "FunctionDef name:rc_file arg:fname arguments arg arg With Call Assign Call Call Compare"
  },
  {
    "library": "pandas",
    "name": "_array_strptime_with_fallback",
    "source_code": "def _array_strptime_with_fallback(arg, name, utc: bool, fmt: str, exact: bool, errors: str) -> Index:\n    result, tz_out = array_strptime(arg, fmt, exact=exact, errors=errors, utc=utc)\n    if tz_out is not None:\n        unit = np.datetime_data(result.dtype)[0]\n        dtype = DatetimeTZDtype(tz=tz_out, unit=unit)\n        dta = DatetimeArray._simple_new(result, dtype=dtype)\n        if utc:\n            dta = dta.tz_convert('UTC')\n        return Index(dta, name=name)\n    elif result.dtype != object and utc:\n        unit = np.datetime_data(result.dtype)[0]\n        res = Index(result, dtype=f'M8[{unit}, UTC]', name=name)\n        return res\n    return Index(result, dtype=result.dtype, name=name)",
    "docstring": "Call array_strptime, with fallback behavior depending on 'errors'.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\tools\\datetimes.py",
    "ast_data": "FunctionDef name:_array_strptime_with_fallback arg:arg arg:name arg:utc arg:fmt arg:exact arg:errors arguments arg arg arg arg arg arg Assign Call If Compare Assign Call Assign Call Assign Call If Assign Call Return return:yes Call If BoolOp Compare Assign Call Assign Call Return return:yes Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "EnvVarXRefRole",
    "source_code": "class EnvVarXRefRole(XRefRole):\n\n    def result_nodes(self, document: nodes.document, env: BuildEnvironment, node: Element, is_ref: bool) -> tuple[list[Node], list[system_message]]:\n        if not is_ref:\n            return ([node], [])\n        varname = node['reftarget']\n        tgtid = 'index-%s' % env.new_serialno('index')\n        indexnode = addnodes.index()\n        indexnode['entries'] = [('single', varname, tgtid, '', None), ('single', _('environment variable; %s') % varname, tgtid, '', None)]\n        targetnode = nodes.target('', '', ids=[tgtid])\n        document.note_explicit_target(targetnode)\n        return ([indexnode, targetnode, node], [])",
    "docstring": "Cross-referencing role for environment variables (adds an index entry).",
    "type": "class",
    "file_path": "sphinx\\sphinx\\domains\\std\\__init__.py",
    "ast_data": "ClassDef name:EnvVarXRefRole FunctionDef name:result_nodes arg:self arg:document arg:env arg:node arg:is_ref arguments arg arg arg arg arg If Return return:yes Assign Assign Call Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_create_etcd_client",
    "source_code": "def _create_etcd_client(params: RendezvousParameters) -> etcd.Client:\n    hostname, port = parse_rendezvous_endpoint(params.endpoint, 2379)\n    protocol = params.config.get('protocol')\n    if protocol is None:\n        protocol = 'http'\n    elif protocol != 'http' and protocol != 'https':\n        raise ValueError('The etcd protocol must be HTTP or HTTPS.')\n    ssl_cert = params.config.get('cert')\n    if ssl_cert is not None:\n        cert_key = params.config.get('key')\n        if cert_key is not None:\n            ssl_cert = (ssl_cert, cert_key)\n    ca_cert = params.config.get('cacert')\n    return etcd.Client(hostname, port, protocol=protocol, cert=ssl_cert, ca_cert=ca_cert, allow_reconnect=True)",
    "docstring": "Create a new ``.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\etcd_rendezvous.py",
    "ast_data": "FunctionDef name:_create_etcd_client arg:params arguments arg Assign Call Assign Call If Compare Assign If BoolOp Compare Compare Raise Call Assign Call If Compare Assign Call If Compare Assign Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "error_translator",
    "source_code": "def error_translator(e):\n    error_message = str(e)\n    if 'not found in checkpoint' in error_message or 'Failed to find any matching files for' in error_message:\n        raise errors_impl.NotFoundError(None, None, error_message)\n    elif 'Sliced checkpoints are not supported' in error_message or 'Data type not supported' in error_message:\n        raise errors_impl.UnimplementedError(None, None, error_message)\n    elif 'Failed to get matching files on' in error_message:\n        raise errors_impl.InvalidArgumentError(None, None, error_message)\n    elif 'Unable to open table file' in error_message:\n        raise errors_impl.DataLossError(None, None, error_message)\n    elif 'Failed to find the saved tensor slices' in error_message or 'not convertible to numpy dtype' in error_message:\n        raise errors_impl.InternalError(None, None, error_message)\n    else:\n        raise errors_impl.OpError(None, None, error_message, errors_impl.UNKNOWN)",
    "docstring": "Translate the tensor_slice_reader.cc errors.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\py_checkpoint_reader.py",
    "ast_data": "FunctionDef name:error_translator arg:e arguments arg Assign Call If BoolOp Compare Compare Raise Call If BoolOp Compare Compare Raise Call If Compare Raise Call If Compare Raise Call If BoolOp Compare Compare Raise Call Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "_broadcast_with_masks",
    "source_code": "def _broadcast_with_masks(*args, compress=False):\n    masks = [k.mask for k in args if isinstance(k, np.ma.MaskedArray)]\n    bcast = np.broadcast_arrays(*args, *masks)\n    inputs = bcast[:len(args)]\n    masks = bcast[len(args):]\n    if masks:\n        mask = np.logical_or.reduce(masks)\n        if compress:\n            inputs = [np.ma.array(k, mask=mask).compressed() for k in inputs]\n        else:\n            inputs = [np.ma.array(k, mask=mask, dtype=float).filled(np.nan).ravel() for k in inputs]\n    else:\n        inputs = [np.ravel(k) for k in inputs]\n    return inputs",
    "docstring": "Broadcast inputs, combining all masked arrays. Parameters ---------- *args : array-like The inputs to broadcast. compress : bool, default: False Whether to compress the masked arrays. If False, the masked values are replaced by NaNs. Returns ------- list of array-like The broadcasted and masked inputs.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\cbook.py",
    "ast_data": "FunctionDef name:_broadcast_with_masks arguments arg arg Assign Call Assign Call Assign Call Assign Call If Assign Call If Assign Call Call Assign Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_If",
    "source_code": "class _If(_FunctionCaller):\n\n    def __init__(self, node, function, enclosing_graph):\n        super(_If, self).__init__(node, function, enclosing_graph, first_function_input=1, type_attribute='Tin', function_attributes=['then_branch', 'else_branch'])",
    "docstring": "Specialization of _Node to If-like operations.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "ClassDef name:_If FunctionDef name:__init__ arg:self arg:node arg:function arg:enclosing_graph arguments arg arg arg arg Call Call"
  },
  {
    "library": "numpy",
    "name": "fix",
    "source_code": "@array_function_dispatch(_dispatcher, verify=False, module='numpy')\ndef fix(x, out=None):\n    res = nx.asanyarray(nx.ceil(x, out=out))\n    res = nx.floor(x, out=res, where=nx.greater_equal(x, 0))\n    if out is None and type(res) is nx.ndarray:\n        res = res[()]\n    return res",
    "docstring": "Round to nearest integer towards zero. Round an array of floats element-wise to nearest integer towards zero. The rounded values have the same data-type as the input. Parameters ---------- x : array_like An array to be rounded out : ndarray, optional A location into which the result is stored. If provided, it must have a shape that the input broadcasts to. If not provided or None, a freshly-allocated array is returned. Returns ------- out : ndarray of floats An array with the same dimensions and data-type as the input. If second argument is not supplied then a new array is returned with the rounded values. If a second argument is supplied the result is stored there. The return value `` is then a reference to that array. See Also -------- rint, trunc, floor, ceil around : Round to given number of decimals Examples -------- >>> import numpy as np >>> np.fix(3.14) 3.0 >>> np.fix(3) 3 >>> np.fix([2.1, 2.9, -2.1, -2.9]) array([ 2., 2., -2., -2.])",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_ufunclike_impl.py",
    "ast_data": "FunctionDef name:fix arg:x arg:out arguments arg arg Assign Call Call Assign Call Call If BoolOp Compare Compare Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_preprocess_grad",
    "source_code": "def _preprocess_grad(grad, body_graph_output, while_op_input, while_op_output):\n    if not _is_trainable(body_graph_output):\n        return None\n    if while_op_output.dtype in (dtypes.resource, dtypes.variant) and default_gradient.supports_default_grad(while_op_input) and (grad is None):\n        return _zeros_like(while_op_input, while_op_output)\n    if isinstance(grad, indexed_slices.IndexedSlices):\n        return ops.convert_to_tensor(grad)\n    return grad",
    "docstring": "Returns the initial gradient to be used for a given output tensor. Args: grad: the original gradient Tensor passed to the gradient function. body_graph_output: the corresponding Tensor in the body graph. while_op_input: the corresponding Tensor input of the While op. while_op_output: the corresponding Tensor output of the While op. Returns: A Tensor or None.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\while_v2.py",
    "ast_data": "FunctionDef name:_preprocess_grad arg:grad arg:body_graph_output arg:while_op_input arg:while_op_output arguments arg arg arg arg If Call Return return:no If BoolOp Compare Call Compare Return return:yes Call If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "input_mask",
    "source_code": "@property\n@doc_controls.do_not_doc_inheritable\ndef input_mask(self):\n    inputs = self.input\n    if isinstance(inputs, list):\n        return [getattr(x, '_keras_mask', None) for x in inputs]\n    else:\n        return getattr(inputs, '_keras_mask', None)",
    "docstring": "Retrieves the input mask tensor(s) of a layer. Only applicable if the layer has exactly one inbound node, i.e. if it is connected to one incoming layer. Returns: Input mask tensor (potentially None) or list of input mask tensors. Raises: AttributeError: if the layer is connected to more than one incoming layers.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:input_mask arg:self arguments arg Assign If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "histogram_matching",
    "source_code": "def histogram_matching(source: torch.Tensor, template: torch.Tensor) -> torch.Tensor:\n    oldshape = source.shape\n    source = source.ravel()\n    template = template.ravel()\n    _, bin_idx, s_counts = torch.unique(source, return_inverse=True, return_counts=True)\n    t_values, t_counts = torch.unique(template, return_counts=True)\n    s_quantiles = torch.cumsum(s_counts, dim=0, dtype=source.dtype)\n    s_quantiles = s_quantiles / s_quantiles[-1]\n    t_quantiles = torch.cumsum(t_counts, dim=0, dtype=source.dtype)\n    t_quantiles = t_quantiles / t_quantiles[-1]\n    interp_t_values = interp(s_quantiles, t_quantiles, t_values)\n    return interp_t_values[bin_idx].reshape(oldshape)",
    "docstring": "Adjust the pixel values of an image to match its histogram towards a target image. _ is the transformation of an image so that its histogram matches a specified histogram. In this implementation, the histogram is computed over the flattened image array. Code referred to _. Args: source: Image to transform. template: Template image. It can have different dimensions to source. Returns: The transformed output image as the same shape as the source image. Note: This function does not matches histograms element-wisely if input a batched tensor.",
    "type": "function",
    "file_path": "kornia\\kornia\\contrib\\histogram_matching.py",
    "ast_data": "FunctionDef name:histogram_matching arg:source arg:template arguments arg arg Assign Assign Call Assign Call Assign Call Assign Call Assign Call Assign Assign Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "flattened_inputs_and_outputs",
    "source_code": "def flattened_inputs_and_outputs(self):\n\n    def _flatten(input_or_output_dict):\n        flattened_items = []\n        for item in input_or_output_dict.values():\n            flattened_items.extend(item.flatten())\n        return flattened_items\n    return (_flatten(self.inputs), _flatten(self.outputs))",
    "docstring": "Return a list of inputs and outputs in a flattened format. Returns: Tuple of (inputs, outputs). where input and output i a list of names.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\op_hint.py",
    "ast_data": "FunctionDef name:flattened_inputs_and_outputs arg:self arguments arg FunctionDef name:_flatten arg:input_or_output_dict arguments arg Assign For Call Call Call Return return:yes Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "get_nodes_by_type",
    "source_code": "def get_nodes_by_type(self, nodetype):\n    nodes = []\n    if isinstance(self, nodetype):\n        nodes.append(self)\n    for attr in self.child_nodelists:\n        nodelist = getattr(self, attr, None)\n        if nodelist:\n            nodes.extend(nodelist.get_nodes_by_type(nodetype))\n    return nodes",
    "docstring": "Return a list of all nodes (within this node and its nodelist) of the given type",
    "type": "method",
    "file_path": "django\\django\\template\\base.py",
    "ast_data": "FunctionDef name:get_nodes_by_type arg:self arg:nodetype arguments arg arg Assign If Call Call For Assign Call If Call Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "escape_abbr",
    "source_code": "def escape_abbr(text: str) -> str:\n    return re.sub('\\\\.(?=\\\\s|$)', '.\\\\@{}', text)",
    "docstring": "Adjust spacing after abbreviations. Works with @ letter or other.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\texescape.py",
    "ast_data": "FunctionDef name:escape_abbr arg:text arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "Bukin02",
    "source_code": "class Bukin02(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = [(-15.0, -5.0), (-3.0, 3.0)]\n        self.global_optimum = [[-15.0, 0.0]]\n        self.fglob = -124.75\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return 100 * (x[1] ** 2 - 0.01 * x[0] ** 2 + 1.0) + 0.01 * (x[0] + 10.0) ** 2.0",
    "docstring": "Bukin02 objective function. The Bukin02 [1]_ global optimization problem is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Bukin02}}(x) = 100 (x_2^2 - 0.01x_1^2 + 1) + 0.01(x_1 + 10)^2 with :math: *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO: I think that Gavana and Jamil are wrong on this function. In both sources the x[1] term is not squared. As such there will be a minimum at the smallest value of x[1].",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_B.py",
    "ast_data": "ClassDef name:Bukin02 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "is_good_size",
    "source_code": "@staticmethod\ndef is_good_size(s):\n    s = V.graph.sizevars.size_hint(s)\n    return s >= 32 and s % 32 == 0",
    "docstring": "Somewhat arbitrary heuristic used to boost scores for some sizes",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\simd.py",
    "ast_data": "FunctionDef name:is_good_size arg:s arguments arg Assign Call Return return:yes BoolOp Compare Compare"
  },
  {
    "library": "scikit-learn",
    "name": "_check_method_params",
    "source_code": "def _check_method_params(X, params, indices=None):\n    from . import _safe_indexing\n    method_params_validated = {}\n    for param_key, param_value in params.items():\n        if not _is_arraylike(param_value) and (not sp.issparse(param_value)) or _num_samples(param_value) != _num_samples(X):\n            method_params_validated[param_key] = param_value\n        else:\n            method_params_validated[param_key] = _make_indexable(param_value)\n            method_params_validated[param_key] = _safe_indexing(method_params_validated[param_key], indices)\n    return method_params_validated",
    "docstring": "Check and validate the parameters passed to a specific method like . Parameters ---------- X : array-like of shape (n_samples, n_features) Data array. params : dict Dictionary containing the parameters passed to the method. indices : array-like of shape (n_samples,), default=None Indices to be selected if the parameter has the same size as . Returns ------- method_params_validated : dict Validated parameters. We ensure that the values support indexing.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\validation.py",
    "ast_data": "FunctionDef name:_check_method_params arg:X arg:params arg:indices arguments arg arg arg Assign For Call If BoolOp BoolOp Call Call Compare Call Call Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__le__",
    "source_code": "def __le__(self, other):\n    other = as_dimension(other)\n    if self._value is None or other.value is None:\n        return None\n    else:\n        return self._value <= other.value",
    "docstring": "Returns True if is known to be less than or equal to . Dimensions are compared as follows: Args: other: Another Dimension. Returns: The value of if both are known, otherwise None.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:__le__ arg:self arg:other arguments arg arg Assign Call If BoolOp Compare Compare Return return:no Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "_with_precomputed_nrows",
    "source_code": "def _with_precomputed_nrows(self):\n    return RowPartition(row_splits=self._row_splits, row_lengths=self._row_lengths, value_rowids=self._value_rowids, nrows=self.nrows(), nvals=self._nvals, uniform_row_length=self._uniform_row_length, internal=_row_partition_factory_key)",
    "docstring": "Returns a copy of with precomputed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py",
    "ast_data": "FunctionDef name:_with_precomputed_nrows arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "__mul__",
    "source_code": "def __mul__(self, n):\n    return self.__class__(list(self) * n)",
    "docstring": "multiply",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\mutable_list.py",
    "ast_data": "FunctionDef name:__mul__ arg:self arg:n arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "is_deterministic_algorithms_warn_only_enabled",
    "source_code": "def is_deterministic_algorithms_warn_only_enabled() -> builtins.bool:\n    return _C._get_deterministic_algorithms_warn_only()",
    "docstring": "Returns True if the global deterministic flag is set to warn only. Refer to :func: documentation for more details.",
    "type": "function",
    "file_path": "pytorch\\torch\\__init__.py",
    "ast_data": "FunctionDef name:is_deterministic_algorithms_warn_only_enabled arguments Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "get_temporary_credential",
    "source_code": "def get_temporary_credential(self, request):\n    raise NotImplementedError()",
    "docstring": "Get the temporary credential from database or cache. A temporary credential should share the same methods as described in models of ``:: def get_temporary_credential(self, request): key = \"a-key-prefix:{}\".format(request.token) data = cache.get(key) # TemporaryCredential shares methods from TemporaryCredentialMixin return TemporaryCredential(data) :param request: OAuth1Request instance :return: TemporaryCredential instance",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth1\\rfc5849\\authorization_server.py",
    "ast_data": "FunctionDef name:get_temporary_credential arg:self arg:request arguments arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_DenseToSparseBatchDataset",
    "source_code": "class _DenseToSparseBatchDataset(dataset_ops.UnaryDataset):\n\n    def __init__(self, input_dataset, batch_size, row_shape):\n        if not isinstance(dataset_ops.get_legacy_output_types(input_dataset), dtypes.DType):\n            raise TypeError(f'`dense_to_sparse_batch` requires an input dataset whose elements have a single component, but the given dataset has the following component types: {dataset_ops.get_legacy_output_types(input_dataset)}.')\n        self._input_dataset = input_dataset\n        self._batch_size = batch_size\n        self._row_shape = row_shape\n        self._element_spec = sparse_tensor.SparseTensorSpec(tensor_shape.TensorShape([None]).concatenate(self._row_shape), dataset_ops.get_legacy_output_types(input_dataset))\n        variant_tensor = ged_ops.dense_to_sparse_batch_dataset(self._input_dataset._variant_tensor, self._batch_size, row_shape=convert.partial_shape_to_tensor(self._row_shape), **self._flat_structure)\n        super(_DenseToSparseBatchDataset, self).__init__(input_dataset, variant_tensor)\n\n    @property\n    def element_spec(self):\n        return self._element_spec",
    "docstring": "A that batches ragged dense elements into s.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\batching.py",
    "ast_data": "ClassDef name:_DenseToSparseBatchDataset FunctionDef name:__init__ arg:self arg:input_dataset arg:batch_size arg:row_shape arguments arg arg arg arg If Call Call Raise Call Call Assign Assign Assign Assign Call Call Call Call Assign Call Call Call Call FunctionDef name:element_spec arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_module_state",
    "source_code": "def _get_module_state(module: nn.Module) -> Optional[_State]:\n    global _module_state_mapping\n    if isinstance(module, _State):\n        return cast(_State, module)\n    elif module in _module_state_mapping:\n        state_ref = _module_state_mapping[module]\n        state = state_ref()\n        if state is None:\n            raise AssertionError('State has already been garbage collected')\n        return state\n    else:\n        return None",
    "docstring": "Return the ` and returned. If it is managed by a composable API, the corresponding `` will be returned.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_composable_state.py",
    "ast_data": "FunctionDef name:_get_module_state arg:module arguments arg If Call Return return:yes Call If Compare Assign Assign Call If Compare Raise Call Return return:yes Return return:no"
  },
  {
    "library": "matplotlib",
    "name": "get_minorticklabels",
    "source_code": "def get_minorticklabels(self):\n    self._update_ticks()\n    ticks = self.get_minor_ticks()\n    labels1 = [tick.label1 for tick in ticks if tick.label1.get_visible()]\n    labels2 = [tick.label2 for tick in ticks if tick.label2.get_visible()]\n    return labels1 + labels2",
    "docstring": "Return this Axis' minor tick labels, as a list of .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:get_minorticklabels arg:self arguments arg Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_clip",
    "source_code": "def _clip(self, X):\n    if self.shape == 'square':\n        for X_part, mx in zip(X, (self.N, self.M)):\n            X_part[X_part < 0] = 0\n            if X_part.dtype.kind == 'f':\n                X_part[X_part > 1] = 1\n            else:\n                X_part[X_part >= mx] = mx - 1\n    elif self.shape == 'ignore':\n        for X_part, mx in zip(X, (self.N, self.M)):\n            X_part[X_part < 0] = -1\n            if X_part.dtype.kind == 'f':\n                X_part[X_part > 1] = -1\n            else:\n                X_part[X_part >= mx] = -1\n    elif self.shape == 'circle' or self.shape == 'circleignore':\n        for X_part in X:\n            if X_part.dtype.kind != 'f':\n                raise NotImplementedError('Circular bivariate colormaps are only implemented for use with with floats')\n        radii_sqr = (X[0] - 0.5) ** 2 + (X[1] - 0.5) ** 2\n        mask_outside = radii_sqr > 0.25\n        if self.shape == 'circle':\n            overextend = 2 * np.sqrt(radii_sqr[mask_outside])\n            X[0][mask_outside] = (X[0][mask_outside] - 0.5) / overextend + 0.5\n            X[1][mask_outside] = (X[1][mask_outside] - 0.5) / overextend + 0.5\n        else:\n            X[0][mask_outside] = -1\n            X[1][mask_outside] = -1",
    "docstring": "For internal use when applying a BivarColormap to data. i.e. cm.ScalarMappable().to_rgba() Clips X[0] and X[1] according to 'self.shape'. X is modified in-place. Parameters ---------- X: np.array array of floats or ints to be clipped shape : {'square', 'circle', 'ignore', 'circleignore'} - If 'square' each variate is clipped to [0,1] independently - If 'circle' the variates are clipped radially to the center of the colormap. It is assumed that a circular mask is applied when the colormap is displayed - If 'ignore' the variates are not clipped, but instead assigned the 'outside' color - If 'circleignore' a circular mask is applied, but the data is not clipped and instead assigned the 'outside' color",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:_clip arg:self arg:X arguments arg arg If Compare For Call Assign Compare If Compare Assign Compare Assign Compare If Compare For Call Assign Compare If Compare Assign Compare Assign Compare If BoolOp Compare Compare For If Compare Raise Call Assign Assign Compare If Compare Assign Call Assign Assign Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, head_length=0.5, head_width=0.5, tail_width=0.2):\n    self.head_length, self.head_width, self.tail_width = (head_length, head_width, tail_width)\n    super().__init__()",
    "docstring": "Parameters ---------- head_length : float, default: 0.5 Length of the arrow head. head_width : float, default: 0.5 Width of the arrow head. tail_width : float, default: 0.2 Width of the arrow tail.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:head_length arg:head_width arg:tail_width arguments arg arg arg arg Assign Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "trigger_stopping",
    "source_code": "def trigger_stopping(self, msg, verbose):\n    if verbose:\n        print(msg + ' Stopping.')\n    return True",
    "docstring": "Decides whether it is time to stop training Parameters ---------- msg : str Message passed in for verbose output verbose : bool Print message to stdin if True Returns ------- is_stopping : bool True if training needs to stop",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neural_network\\_stochastic_optimizers.py",
    "ast_data": "FunctionDef name:trigger_stopping arg:self arg:msg arg:verbose arguments arg arg arg If Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "get_values_for_csv",
    "source_code": "@final\ndef get_values_for_csv(self, *, float_format, date_format, decimal, na_rep: str='nan', quoting=None) -> Block:\n    result = get_values_for_csv(self.values, na_rep=na_rep, quoting=quoting, float_format=float_format, date_format=date_format, decimal=decimal)\n    return self.make_block(result)",
    "docstring": "convert to our native types format",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\blocks.py",
    "ast_data": "FunctionDef name:get_values_for_csv arg:self arguments arg arg arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "transform_keypoints_",
    "source_code": "def transform_keypoints_(self, M: Tensor) -> 'Keypoints3D':\n    return self.transform_keypoints(M, inplace=True)",
    "docstring": "Inplace version of :func:.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\keypoints.py",
    "ast_data": "FunctionDef name:transform_keypoints_ arg:self arg:M arguments arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "ODEintWarning",
    "source_code": "class ODEintWarning(Warning):\n    pass",
    "docstring": "Warning raised during the execution of .",
    "type": "class",
    "file_path": "scipy\\scipy\\integrate\\_odepack_py.py",
    "ast_data": "ClassDef name:ODEintWarning"
  },
  {
    "library": "tensorflow",
    "name": "TextFileIdTableInitializer",
    "source_code": "class TextFileIdTableInitializer(TextFileInitializer):\n\n    def __init__(self, filename, key_column_index=TextFileIndex.WHOLE_LINE, value_column_index=TextFileIndex.LINE_NUMBER, vocab_size=None, delimiter='\\t', name='text_file_id_table_init', key_dtype=dtypes.string):\n        super(TextFileIdTableInitializer, self).__init__(filename, key_dtype, key_column_index, dtypes.int64, value_column_index, vocab_size=vocab_size, delimiter=delimiter, name=name)",
    "docstring": "Table initializer for string to IDs tables from a text file.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "ClassDef name:TextFileIdTableInitializer FunctionDef name:__init__ arg:self arg:filename arg:key_column_index arg:value_column_index arg:vocab_size arg:delimiter arg:name arg:key_dtype arguments arg arg arg arg arg arg arg arg Call Call"
  },
  {
    "library": "matplotlib",
    "name": "ImageMagickWriter",
    "source_code": "@writers.register('imagemagick')\nclass ImageMagickWriter(ImageMagickBase, MovieWriter):\n    input_names = '-'",
    "docstring": "Pipe-based animated gif writer. Frames are streamed directly to ImageMagick via a pipe and written in a single pass.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\animation.py",
    "ast_data": "ClassDef name:ImageMagickWriter Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "leaky_relu",
    "source_code": "@tf_export('nn.leaky_relu')\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef leaky_relu(features, alpha=0.2, name=None):\n    with ops.name_scope(name, 'LeakyRelu', [features, alpha]) as name:\n        features = ops.convert_to_tensor(features, name='features')\n        if features.dtype.is_integer:\n            features = math_ops.cast(features, dtypes.float32)\n        if isinstance(alpha, np.ndarray):\n            alpha = alpha.item()\n        return gen_nn_ops.leaky_relu(features, alpha=alpha, name=name)",
    "docstring": "Compute the Leaky ReLU activation function. Source: [Rectifier Nonlinearities Improve Neural Network Acoustic Models. AL Maas, AY Hannun, AY Ng - Proc. ICML, 2013] ( Args: features: A representing preactivation values. Must be one of the following types: , , , , . alpha: Slope of the activation function at x < 0. name: A name for the operation (optional). Returns: The activation value. References: Rectifier Nonlinearities Improve Neural Network Acoustic Models: [Maas et al., 2013] ( ([pdf] (",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py",
    "ast_data": "FunctionDef name:leaky_relu arg:features arg:alpha arg:name arguments arg arg arg With Call Assign Call If Assign Call If Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "tfr_gen_from_module",
    "source_code": "def tfr_gen_from_module(source, method_prefix=None, op_libraries=None, op_defs=OpDefCache()):\n    mlir_funcs = tfr_funcs_gen_from_module(source, op_defs, method_prefix, op_libraries)\n    return '\\n'.join(mlir_funcs + op_defs.mlir_external_funcs())",
    "docstring": "Parse the input source module and emit the TFR and external functions.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\tfr\\python\\tfr_gen.py",
    "ast_data": "FunctionDef name:tfr_gen_from_module arg:source arg:method_prefix arg:op_libraries arg:op_defs arguments arg arg arg arg Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_solve",
    "source_code": "def _solve(func):\n    nc = 1.0\n    value = func(nc)\n    if value == 0:\n        return nc\n    factor = 2.0\n    if value > 0:\n        nc /= factor\n        while func(nc) > 0:\n            nc /= factor\n        lo = nc\n        hi = factor * nc\n    else:\n        nc *= factor\n        while func(nc) < 0:\n            nc *= factor\n        lo = nc / factor\n        hi = nc\n    nc = brentq(func, lo, hi, xtol=1e-13)\n    return nc",
    "docstring": "Solve func(nc) = 0. func must be an increasing function.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_odds_ratio.py",
    "ast_data": "FunctionDef name:_solve arg:func arguments arg Assign Assign Call If Compare Return return:yes Assign If Compare While Compare Call Assign Assign While Compare Call Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "select_for_update",
    "source_code": "def select_for_update(self, nowait=False, skip_locked=False, of=(), no_key=False):\n    if nowait and skip_locked:\n        raise ValueError('The nowait option cannot be used with skip_locked.')\n    obj = self._chain()\n    obj._for_write = True\n    obj.query.select_for_update = True\n    obj.query.select_for_update_nowait = nowait\n    obj.query.select_for_update_skip_locked = skip_locked\n    obj.query.select_for_update_of = of\n    obj.query.select_for_no_key_update = no_key\n    return obj",
    "docstring": "Return a new QuerySet instance that will select objects with a FOR UPDATE lock.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:select_for_update arg:self arg:nowait arg:skip_locked arg:of arg:no_key arguments arg arg arg arg arg If BoolOp Raise Call Assign Call Assign Assign Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_format_duplicate_message",
    "source_code": "@final\ndef _format_duplicate_message(self) -> DataFrame:\n    from pandas import Series\n    duplicates = self[self.duplicated(keep='first')].unique()\n    assert len(duplicates)\n    out = Series(np.arange(len(self)), copy=False).groupby(self, observed=False).agg(list)[duplicates]\n    if self._is_multi:\n        out.index = type(self).from_tuples(out.index)\n    if self.nlevels == 1:\n        out = out.rename_axis('label')\n    return out.to_frame(name='positions')",
    "docstring": "Construct the DataFrame for a DuplicateLabelError. This returns a DataFrame indicating the labels and positions of duplicates in an index. This should only be called when it's already known that duplicates are present. Examples -------- >>> idx = pd.Index([\"a\", \"b\", \"a\"]) >>> idx._format_duplicate_message() positions label a [0, 2]",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_format_duplicate_message arg:self arguments arg Assign Call Call Call Assign Call Call Call Call Call If Assign Call Call If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_block_check_depths_match",
    "source_code": "def _block_check_depths_match(arrays, parent_index=[]):\n    if isinstance(arrays, tuple):\n        raise TypeError(f'{_block_format_index(parent_index)} is a tuple. Only lists can be used to arrange blocks, and np.block does not allow implicit conversion from tuple to ndarray.')\n    elif isinstance(arrays, list) and len(arrays) > 0:\n        idxs_ndims = (_block_check_depths_match(arr, parent_index + [i]) for i, arr in enumerate(arrays))\n        first_index, max_arr_ndim, final_size = next(idxs_ndims)\n        for index, ndim, size in idxs_ndims:\n            final_size += size\n            if ndim > max_arr_ndim:\n                max_arr_ndim = ndim\n            if len(index) != len(first_index):\n                raise ValueError(f'List depths are mismatched. First element was at depth {len(first_index)}, but there is an element at depth {len(index)} ({_block_format_index(index)})')\n            if index[-1] is None:\n                first_index = index\n        return (first_index, max_arr_ndim, final_size)\n    elif isinstance(arrays, list) and len(arrays) == 0:\n        return (parent_index + [None], 0, 0)\n    else:\n        size = _size(arrays)\n        return (parent_index, _ndim(arrays), size)",
    "docstring": "Recursive function checking that the depths of nested lists in all match. Mismatch raises a ValueError as described in the block docstring below. The entire index (rather than just the depth) needs to be calculated for each innermost list, in case an error needs to be raised, so that the index of the offending list can be printed as part of the error. Parameters ---------- arrays : nested list of arrays The arrays to check parent_index : list of int The full index of within the nested lists passed to at the top of the recursion. Returns ------- first_index : list of int The full index of an element from the bottom of the nesting in . If any element at the bottom is an empty list, this will refer to it, and the last index along the empty axis will be None. max_arr_ndim : int The maximum of the ndims of the arrays nested in . final_size: int The number of elements in the final array. This is used the motivate the choice of algorithm used using benchmarking wisdom.",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\shape_base.py",
    "ast_data": "FunctionDef name:_block_check_depths_match arg:arrays arg:parent_index arguments arg arg If Call Raise Call Call If BoolOp Call Compare Call Assign Call Call Assign Call For If Compare Assign If Compare Call Call Raise Call Call Call Call If Compare Assign Return return:yes If BoolOp Call Compare Call Return return:yes Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_DataFrameTableBuilderNonVerbose",
    "source_code": "class _DataFrameTableBuilderNonVerbose(_DataFrameTableBuilder):\n\n    def _fill_non_empty_info(self) -> None:\n        self.add_object_type_line()\n        self.add_index_range_line()\n        self.add_columns_summary_line()\n        self.add_dtypes_line()\n        if self.display_memory_usage:\n            self.add_memory_usage_line()\n\n    def add_columns_summary_line(self) -> None:\n        self._lines.append(self.ids._summary(name='Columns'))",
    "docstring": "Dataframe info table builder for non-verbose output.",
    "type": "class",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "ClassDef name:_DataFrameTableBuilderNonVerbose FunctionDef name:_fill_non_empty_info arg:self arguments arg Call Call Call Call If Call FunctionDef name:add_columns_summary_line arg:self arguments arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "from_value",
    "source_code": "@staticmethod\ndef from_value(value):\n    return DatasetSpec(value.element_spec)",
    "docstring": "Creates a for the given value.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:from_value arg:value arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "random_state",
    "source_code": "@property\ndef random_state(self):\n    return self._random_state",
    "docstring": "Get or set the generator object for generating random variates. If is None (or ), the singleton is used. If is an int, a new `random_staterandom_state` instance, that instance is used.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:random_state arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "_populate_directed_relation_graph",
    "source_code": "def _populate_directed_relation_graph(self):\n    related_objects_graph = defaultdict(list)\n    all_models = self.apps.get_models(include_auto_created=True)\n    for model in all_models:\n        opts = model._meta\n        if opts.abstract:\n            continue\n        fields_with_relations = (f for f in opts._get_fields(reverse=False, include_parents=False) if f.is_relation and f.related_model is not None)\n        for f in fields_with_relations:\n            if not isinstance(f.remote_field.model, str):\n                remote_label = f.remote_field.model._meta.concrete_model._meta.label\n                related_objects_graph[remote_label].append(f)\n    for model in all_models:\n        related_objects = related_objects_graph[model._meta.concrete_model._meta.label]\n        model._meta.__dict__['_relation_tree'] = related_objects\n    return self.__dict__.get('_relation_tree', EMPTY_RELATION_TREE)",
    "docstring": "This method is used by each model to find its reverse objects. As this method is very expensive and is accessed frequently (it looks up every field in a model, in every app), it is computed on first access and then is set as a property on every model.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\options.py",
    "ast_data": "FunctionDef name:_populate_directed_relation_graph arg:self arguments arg Assign Call Assign Call For Assign If Assign Call BoolOp Compare For If Call Assign Call For Assign Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "master",
    "source_code": "def master(self, task_type=None, task_id=None, rpc_layer=None):\n    task_type = task_type if task_type is not None else self.task_type\n    task_id = task_id if task_id is not None else self.task_id\n    if task_type is not None and task_id is not None:\n        return format_master_url(self.cluster_spec().task_address(task_type, task_id), rpc_layer or self.rpc_layer)\n    return ''",
    "docstring": "Returns the master address to use when creating a session. You must have set the task_type and task_id object properties before calling this function, or pass in the and parameters when using this function. If you do both, the function parameters will override the object properties. Note: this is only useful for TensorFlow 1.x. Args: task_type: (Optional) The type of the TensorFlow task of the master. task_id: (Optional) The index of the TensorFlow task of the master. rpc_layer: (Optional) The RPC protocol for the given cluster. Returns: The name or URL of the session master.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\kubernetes_cluster_resolver.py",
    "ast_data": "FunctionDef name:master arg:self arg:task_type arg:task_id arg:rpc_layer arguments arg arg arg arg Assign Compare Assign Compare If BoolOp Compare Compare Return return:yes Call Call Call BoolOp Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit_transform",
    "source_code": "def fit_transform(self, X, y=None, **fit_params):\n    return super().fit_transform(X, y, **fit_params)",
    "docstring": "Return class labels or probabilities for each estimator. Return predictions for X for each estimator. Parameters ---------- X : {array-like, sparse matrix, dataframe} of shape (n_samples, n_features) Input samples. y : ndarray of shape (n_samples,), default=None Target values (None for unsupervised transformations). **fit_params : dict Additional fit parameters. Returns ------- X_new : ndarray array of shape (n_samples, n_features_new) Transformed array.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_voting.py",
    "ast_data": "FunctionDef name:fit_transform arg:self arg:X arg:y arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "bei_zeros",
    "source_code": "def bei_zeros(nt):\n    if not isscalar(nt) or floor(nt) != nt or nt <= 0:\n        raise ValueError('nt must be positive integer scalar.')\n    return _specfun.klvnzo(nt, 2)",
    "docstring": "Compute nt zeros of the Kelvin function bei. Parameters ---------- nt : int Number of zeros to compute. Must be positive. Returns ------- ndarray First zeros of the Kelvin function. See Also -------- bei References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special Functions\", John Wiley and Sons, 1996.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_basic.py",
    "ast_data": "FunctionDef name:bei_zeros arg:nt arguments arg If BoolOp Call Compare Call Compare Raise Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "__repr__",
    "source_code": "def __repr__(self):\n    return f'{self.__class__.__name__}(\\n{repr(self.A)},\\n{repr(self.B)},\\n{repr(self.C)},\\n{repr(self.D)},\\ndt: {repr(self.dt)}\\n)'",
    "docstring": "Return representation of the system.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "splantider",
    "source_code": "def splantider(tck, n=1):\n    if isinstance(tck, BSpline):\n        return tck.antiderivative(n)\n    else:\n        return _impl.splantider(tck, n)",
    "docstring": "Compute the spline for the antiderivative (integral) of a given spline. .. legacy:: function Specifically, we recommend constructing a object and using its `tckspldertckK(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`: >>> from scipy.special import ellipk >>> ellipk(0.8) 2.2572053268208538",
    "type": "function",
    "file_path": "scipy\\scipy\\interpolate\\_fitpack_py.py",
    "ast_data": "FunctionDef name:splantider arg:tck arg:n arguments arg arg If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "generic_gradient_magnitude",
    "source_code": "@_ni_docstrings.docfiller\ndef generic_gradient_magnitude(input, derivative, output=None, mode='reflect', cval=0.0, extra_arguments=(), extra_keywords=None, *, axes=None):\n    if extra_keywords is None:\n        extra_keywords = {}\n    input = np.asarray(input)\n    output = _ni_support._get_output(output, input)\n    axes = _ni_support._check_axes(axes, input.ndim)\n    if len(axes) > 0:\n        modes = _ni_support._normalize_sequence(mode, len(axes))\n        derivative(input, axes[0], output, modes[0], cval, *extra_arguments, **extra_keywords)\n        np.multiply(output, output, output)\n        for ii in range(1, len(axes)):\n            tmp = derivative(input, axes[ii], output.dtype, modes[ii], cval, *extra_arguments, **extra_keywords)\n            np.multiply(tmp, tmp, tmp)\n            output += tmp\n        np.sqrt(output, output, casting='unsafe')\n    else:\n        output[...] = input[...]\n    return output",
    "docstring": "Gradient magnitude using a provided gradient function. Parameters ---------- %(input)s derivative : callable Callable with the following signature:: derivative(input, axis, output, mode, cval, *extra_arguments, **extra_keywords) See , below. can assume that and are ndarrays. Note that the output from is modified inplace; be careful to copy important inputs before returning them. %(output)s %(mode_multiple)s %(cval)s %(extra_keywords)s %(extra_arguments)s axes : tuple of int or None The axes over which to apply the filter. If a tuple is provided, its length must match the number of axes. Returns ------- generic_gradient_magnitude : ndarray Filtered array. Has the same shape as .",
    "type": "function",
    "file_path": "scipy\\scipy\\ndimage\\_filters.py",
    "ast_data": "FunctionDef name:generic_gradient_magnitude arg:input arg:derivative arg:output arg:mode arg:cval arg:extra_arguments arg:extra_keywords arguments arg arg arg arg arg arg arg arg If Compare Assign Assign Call Assign Call Assign Call If Compare Call Assign Call Call Call Call For Call Call Assign Call Call Call Assign Return return:yes"
  },
  {
    "library": "authlib",
    "name": "validate_iat",
    "source_code": "def validate_iat(self, now, leeway):\n    if 'iat' in self:\n        iat = self['iat']\n        if not _validate_numeric_time(iat):\n            raise InvalidClaimError('iat')\n        if iat > now + leeway:\n            raise InvalidTokenError(description='The token is not valid as it was issued in the future')",
    "docstring": "The \"iat\" (issued at) claim identifies the time at which the JWT was issued. This claim can be used to determine the age of the JWT. Implementers MAY provide for some small leeway, usually no more than a few minutes, to account for clock skew. Its value MUST be a number containing a NumericDate value. Use of this claim is OPTIONAL.",
    "type": "method",
    "file_path": "authlib\\authlib\\jose\\rfc7519\\claims.py",
    "ast_data": "FunctionDef name:validate_iat arg:self arg:now arg:leeway arguments arg arg arg If Compare Assign If Call Raise Call If Compare Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "_view_transformation_uvw",
    "source_code": "def _view_transformation_uvw(u, v, w, E):\n    Mr = np.eye(4)\n    Mt = np.eye(4)\n    Mr[:3, :3] = [u, v, w]\n    Mt[:3, -1] = -E\n    M = np.dot(Mr, Mt)\n    return M",
    "docstring": "Return the view transformation matrix. Parameters ---------- u : 3-element numpy array Unit vector pointing towards the right of the screen. v : 3-element numpy array Unit vector pointing towards the top of the screen. w : 3-element numpy array Unit vector pointing out of the screen. E : 3-element numpy array The coordinates of the eye/camera.",
    "type": "function",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\proj3d.py",
    "ast_data": "FunctionDef name:_view_transformation_uvw arg:u arg:v arg:w arg:E arguments arg arg arg arg Assign Call Assign Call Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "should_trim",
    "source_code": "def should_trim(values: ArrayLike | list[str]) -> bool:\n    numbers = [x for x in values if is_number_with_decimal(x)]\n    return len(numbers) > 0 and all((x.endswith('0') for x in numbers))",
    "docstring": "Determine if an array of strings should be trimmed. Returns True if all numbers containing decimals (defined by the above regular expression) within the array end in a zero, otherwise returns False.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\formats\\format.py",
    "ast_data": "FunctionDef name:should_trim arg:values arguments arg Assign Call Return return:yes BoolOp Compare Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_context_parallel_buffers",
    "source_code": "def _context_parallel_buffers(mesh: DeviceMesh, buffers: list[torch.Tensor], buffer_seq_dims: list[int]) -> list[torch.Tensor]:\n    new_buffers = []\n    sharder = _RoundRobinLoadBalancer if _cp_options.enable_load_balance else _SequentialSharder\n    for buffer, seq_dim in zip(buffers, buffer_seq_dims):\n        new_buffers.append(sharder.shard(buffer, mesh, seq_dim))\n    return new_buffers",
    "docstring": "Shard the buffers along the sequence dimensions according to CP rules.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\experimental\\_attention.py",
    "ast_data": "FunctionDef name:_context_parallel_buffers arg:mesh arg:buffers arg:buffer_seq_dims arguments arg arg arg Assign Assign For Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_rgrids",
    "source_code": "def set_rgrids(self, radii, labels=None, angle=None, fmt=None, **kwargs):\n    radii = self.convert_xunits(radii)\n    radii = np.asarray(radii)\n    self.set_yticks(radii)\n    if labels is not None:\n        self.set_yticklabels(labels)\n    elif fmt is not None:\n        self.yaxis.set_major_formatter(mticker.FormatStrFormatter(fmt))\n    if angle is None:\n        angle = self.get_rlabel_position()\n    self.set_rlabel_position(angle)\n    for t in self.yaxis.get_ticklabels():\n        t._internal_update(kwargs)\n    return (self.yaxis.get_gridlines(), self.yaxis.get_ticklabels())",
    "docstring": "Set the radial gridlines on a polar plot. Parameters ---------- radii : tuple with floats The radii for the radial gridlines labels : tuple with strings or None The labels to use at each radial gridline. The will be used if None. angle : float The angular position of the radius labels in degrees. fmt : str or None Format string used in . For example '%f'. Returns ------- lines : list of The radial gridlines. labels : list of The tick labels. Other Parameters ---------------- **kwargs *kwargs* are optional properties for the labels. .. warning:: This only sets the properties of the current ticks. Ticks are not guaranteed to be persistent. Various operations can create, delete and modify the Tick instances. There is an imminent risk that these settings can get lost if you work on the figure further (including also panning/zooming on a displayed figure). Use instead if possible. See Also -------- .PolarAxes.set_thetagrids .Axis.get_gridlines .Axis.get_ticklabels",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\polar.py",
    "ast_data": "FunctionDef name:set_rgrids arg:self arg:radii arg:labels arg:angle arg:fmt arguments arg arg arg arg arg arg Assign Call Assign Call Call If Compare Call If Compare Call Call If Compare Assign Call Call For Call Call Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "r",
    "source_code": "@property\ndef r(self) -> So3:\n    return self._rotation",
    "docstring": "Return the underlying rotation(So3).",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\se3.py",
    "ast_data": "FunctionDef name:r arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_extension_type",
    "source_code": "def is_extension_type(tensor):\n    return isinstance(tensor, composite_tensor.CompositeTensor)",
    "docstring": "Returns whether a tensor is of an ExtensionType. github.com/tensorflow/community/pull/269 Currently it works by checking if is a instance, but this will be changed to use an appropriate extensiontype protocol check once ExtensionType is made public. Args: tensor: An object to test Returns: True if the tensor is an extension type object, false if not.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_utils.py",
    "ast_data": "FunctionDef name:is_extension_type arg:tensor arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_estimator_type",
    "source_code": "@property\ndef _estimator_type(self):\n    if not self.steps:\n        return None\n    return self.steps[-1][1]._estimator_type",
    "docstring": "Return the estimator type of the last step in the pipeline.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\pipeline.py",
    "ast_data": "FunctionDef name:_estimator_type arg:self arguments arg If Return return:no Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "assert_is_fully_defined",
    "source_code": "def assert_is_fully_defined(self):\n    if not self.is_fully_defined():\n        raise ValueError('Shape %s is not fully defined' % self)",
    "docstring": "Raises an exception if is not fully defined in every dimension. Raises: ValueError: If does not have a known value for every dimension.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:assert_is_fully_defined arg:self arguments arg If Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "get_cuda_compute_capability",
    "source_code": "def get_cuda_compute_capability(source_from_url=False):\n    if not GPU_TYPE:\n        if FLAGS.debug:\n            print('Warning: GPU_TYPE is empty. Make sure to call `get_gpu_type()` first.')\n    elif GPU_TYPE == 'unknown':\n        if FLAGS.debug:\n            print('Warning: Unknown GPU is detected. Skipping CUDA compute capability retrieval.')\n    else:\n        if source_from_url:\n            cuda_compute_capa = cuda_compute_capability.retrieve_from_web()\n        else:\n            cuda_compute_capa = cuda_compute_capability.retrieve_from_golden()\n        return cuda_compute_capa[GPU_TYPE]\n    return",
    "docstring": "Retrieves CUDA compute capability based on the detected GPU type. This function uses the module to retrieve the corresponding CUDA compute capability for the given GPU type. Args: source_from_url: Boolean deciding whether to source compute capability from NVIDIA website or from a local golden file. Returns: List of all supported CUDA compute capabilities for the given GPU type. e.g. ['3.5', '3.7']",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\tensorflow_builder\\config_detector\\config_detector.py",
    "ast_data": "FunctionDef name:get_cuda_compute_capability arg:source_from_url arguments arg If If Call If Compare If Call If Assign Call Assign Call Return return:yes Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "get_unsharded_shape",
    "source_code": "def get_unsharded_shape(self, shapes):\n    self._fill_default_values()\n    if len(shapes) != self.number_of_shards:\n        raise ValueError(f'Shapes {shapes} is length {len(shapes)} but must be a list of length number_of_shards={self.number_of_shards}')\n    unsharded_shapes = [self._unshard_shape(s) for s in shapes]\n    for i in range(self.number_of_shards - 1):\n        if not unsharded_shapes[i].is_compatible_with(unsharded_shapes[self.number_of_shards - 1]):\n            raise ValueError(f'Sharded shapes {shapes} are not consistent shards of a full shape sharded {self.number_of_shards} ways along dimension {self.shard_dimension}.')\n    return unsharded_shapes[0]",
    "docstring": "Returns the shape of an unsharded Tensor given a list of shards. When given a list of shapes of shards, returns the shape of the unsharded Tensor that would generate the shards. Sets defaults for the policy if number_of_shards or shard_dimension is None. Args: shapes: The shapes of the Tensor shards to be combined. Returns: The shape of the unsharded version of the Tensor. Raises: ValueError: if shapes is not a list of length self.number_of_shards; or any element of shapes is not a valid shape consistent with the sharding policy; or the list of shapes is not a valid sharding of a full shape. TypeError: if an element of shapes is not convertible to a TensorShape",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_sharding.py",
    "ast_data": "FunctionDef name:get_unsharded_shape arg:self arg:shapes arguments arg arg Call If Compare Call Raise Call Call Assign Call For Call If Call Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "CancelledError",
    "source_code": "@tf_export('errors.CancelledError')\nclass CancelledError(OpError):\n\n    def __init__(self, node_def, op, message, *args):\n        super(CancelledError, self).__init__(node_def, op, message, CANCELLED, *args)",
    "docstring": "Raised when an operation is cancelled. For example, a long-running operation e.g., or a call may be cancelled by either running another operation e.g. or a remote worker failure. This long-running operation will fail by raising . Example: >>> q = tf.queue.FIFOQueue(10, tf.float32, ((),)) >>> q.enqueue((10.0,)) >>> q.close() >>> q.enqueue((10.0,)) Traceback (most recent call last): ... CancelledError: ...",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py",
    "ast_data": "ClassDef name:CancelledError FunctionDef name:__init__ arg:self arg:node_def arg:op arg:message arguments arg arg arg arg arg Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "wrap_cpp_module",
    "source_code": "def wrap_cpp_module(cpp_module):\n\n    def init_fn(script_module):\n        for name, cpp_module in torch._C.ModuleDict(script_module._c).items():\n            setattr(script_module, name, wrap_cpp_module(cpp_module))\n        script_module._concrete_type = torch._C.ConcreteModuleType.from_jit_type(script_module._c._type())\n        for idx, fn in enumerate(script_module._c._get_forward_pre_hooks()):\n            script_module._forward_pre_hooks[idx] = fn\n        for idx, fn in enumerate(script_module._c._get_forward_hooks()):\n            script_module._forward_hooks[idx] = fn\n    return torch.jit.RecursiveScriptModule._construct(cpp_module, init_fn)",
    "docstring": "Wrap this torch._C.ScriptModule in a Python ScriptModule, recursively for all submodules.",
    "type": "function",
    "file_path": "pytorch\\torch\\jit\\_recursive.py",
    "ast_data": "FunctionDef name:wrap_cpp_module arg:cpp_module arguments arg FunctionDef name:init_fn arg:script_module arguments arg For Call Call Call Call Assign Call Call For Call Call Assign For Call Call Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_y",
    "source_code": "def set_y(self, y):\n    self._y = y\n    self.stale = True",
    "docstring": "Set the bottom coord of the rectangle. Parameters ---------- y : float",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:set_y arg:self arg:y arguments arg arg Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_do_run",
    "source_code": "def _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata):\n    feeds = dict(((t.deref()._as_tf_output(), v) for t, v in feed_dict.items()))\n    fetches = [t._as_tf_output() for t in fetch_list]\n    targets = [op._c_op for op in target_list]\n\n    def _run_fn(feed_dict, fetch_list, target_list, options, run_metadata):\n        self._extend_graph()\n        return self._call_tf_sessionrun(options, feed_dict, fetch_list, target_list, run_metadata)\n\n    def _prun_fn(handle, feed_dict, fetch_list):\n        if target_list:\n            raise RuntimeError(f'partial_run() requires empty `target_list`. Received: target_list={target_list} (non-empty)')\n        return self._call_tf_sessionprun(handle, feed_dict, fetch_list)\n    if handle is None:\n        return self._do_call(_run_fn, feeds, fetches, targets, options, run_metadata)\n    else:\n        return self._do_call(_prun_fn, handle, feeds, fetches)",
    "docstring": "Runs a step based on the given fetches and feeds. Args: handle: a handle for partial_run. None if this is just a call to run(). target_list: A list of operations to be run, but not fetched. fetch_list: A list of tensors to be fetched. feed_dict: A dictionary that maps tensors to numpy ndarrays. options: A (pointer to a) [] protocol buffer, or None run_metadata: A (pointer to a) [] protocol buffer, or None Returns: A list of numpy ndarrays, corresponding to the elements of . If the ith element of contains the name of an operation, the first Tensor output of that operation will be returned for that element. Raises: tf.errors.OpError: Or one of its subclasses on error.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\session.py",
    "ast_data": "FunctionDef name:_do_run arg:self arg:handle arg:target_list arg:fetch_list arg:feed_dict arg:options arg:run_metadata arguments arg arg arg arg arg arg arg Assign Call Call Call Call Assign Call Assign FunctionDef name:_run_fn arg:feed_dict arg:fetch_list arg:target_list arg:options arg:run_metadata arguments arg arg arg arg arg Call Return return:yes Call FunctionDef name:_prun_fn arg:handle arg:feed_dict arg:fetch_list arguments arg arg arg If Raise Call Return return:yes Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "Shekel10",
    "source_code": "class Shekel10(Benchmark):\n\n    def __init__(self, dimensions=4):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([0.0] * self.N, [10.0] * self.N))\n        self.global_optimum = [[4.000746537726627, 4.000592923462141, 3.999663394168097, 3.9995098017834123]]\n        self.fglob = -10.536409816692023\n        self.A = asarray([[4.0, 4.0, 4.0, 4.0], [1.0, 1.0, 1.0, 1.0], [8.0, 8.0, 8.0, 8.0], [6.0, 6.0, 6.0, 6.0], [3.0, 7.0, 3.0, 7.0], [2.0, 9.0, 2.0, 9.0], [5.0, 5.0, 3.0, 3.0], [8.0, 1.0, 8.0, 1.0], [6.0, 2.0, 6.0, 2.0], [7.0, 3.6, 7.0, 3.6]])\n        self.C = asarray([0.1, 0.2, 0.2, 0.4, 0.4, 0.6, 0.3, 0.7, 0.5, 0.5])\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return -sum(1 / (sum((x - self.A) ** 2, axis=1) + self.C))",
    "docstring": "Shekel 10 objective function. This class defines the Shekel 10 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Shekel10}}(x) = \\sum_{i=1}^{m} \\frac{1}{c_{i} + \\sum_{j=1}^{n} (x_{j} - a_{ij})^2 }x_i \\in [0, 10]i = 1, ..., 4f(x) = -10.5362837x_i = 4i = 1, ..., 4` .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO Found a lower global minimum than Jamil#132... Is this numerical overflow?",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_S.py",
    "ast_data": "ClassDef name:Shekel10 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Assign Call Assign Call FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "emit_dispatch_case",
    "source_code": "def emit_dispatch_case(overload: PythonSignatureGroup, structseq_typenames: dict[str, str], *, symint: bool=True) -> str:\n    if overload.outplace is not None:\n        return PY_VARIABLE_OUT.substitute(out_idx=overload.signature.output_idx(), call_dispatch=emit_single_dispatch(overload.signature, overload.base, structseq_typenames, symint=symint), call_dispatch_out=emit_single_dispatch(overload.signature, overload.outplace, structseq_typenames, symint=symint))\n    else:\n        return emit_single_dispatch(overload.signature, overload.base, structseq_typenames, symint=symint)",
    "docstring": "Emit dispatch code for a single parsed signature. This corresponds to either a single native function, or a pair that differ only in output params. In the latter case, a single python signature is used for both and dispatching switches on the presence/absence of passed output args.",
    "type": "function",
    "file_path": "pytorch\\tools\\autograd\\gen_python_functions.py",
    "ast_data": "FunctionDef name:emit_dispatch_case arg:overload arg:structseq_typenames arguments arg arg arg If Compare Return return:yes Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_get_file_checksum",
    "source_code": "def _get_file_checksum(filename: str) -> str:\n    cmd_output = subprocess.run(('openssl', 'sha512', filename), capture_output=True, text=True)\n    return cmd_output.stdout.split()[-1]",
    "docstring": "Reading the whole preprocessed header in for hashing is very expensive, but calling a fast hashing utility in a subprocess is cheap.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\codecache.py",
    "ast_data": "FunctionDef name:_get_file_checksum arg:filename arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "get_sum_dtype",
    "source_code": "def get_sum_dtype(dtype: np.dtype) -> np.dtype:\n    if dtype.kind == 'u' and np.can_cast(dtype, np.uint):\n        return np.uint\n    if np.can_cast(dtype, np.int_):\n        return np.int_\n    return dtype",
    "docstring": "Mimic numpy's casting for np.sum",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\_sputils.py",
    "ast_data": "FunctionDef name:get_sum_dtype arg:dtype arguments arg If BoolOp Compare Call Return return:yes If Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_serialize_to_tensors",
    "source_code": "def _serialize_to_tensors(self):\n    return saveable_object_to_tensor_dict(self.saveables)",
    "docstring": "Returns a dict of tensors to serialize.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\saving\\saveable_object_util.py",
    "ast_data": "FunctionDef name:_serialize_to_tensors arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "@deprecation.deprecated('2019-01-01', 'The TensorFlow Distributions library has moved to TensorFlow Probability (https://github.com/tensorflow/probability). You should update all references to use `tfp.distributions` instead of `tf.distributions`.', warn_once=True)\ndef __init__(self, loc, scale, validate_args=False, allow_nan_stats=True, name='Laplace'):\n    parameters = dict(locals())\n    with ops.name_scope(name, values=[loc, scale]) as name:\n        with ops.control_dependencies([check_ops.assert_positive(scale)] if validate_args else []):\n            self._loc = array_ops.identity(loc, name='loc')\n            self._scale = array_ops.identity(scale, name='scale')\n            check_ops.assert_same_float_dtype([self._loc, self._scale])\n        super(Laplace, self).__init__(dtype=self._loc.dtype, reparameterization_type=distribution.FULLY_REPARAMETERIZED, validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters, graph_parents=[self._loc, self._scale], name=name)",
    "docstring": "Construct Laplace distribution with parameters and . The parameters and must be shaped in a way that supports broadcasting (e.g., is a valid operation). Args: loc: Floating point tensor which characterizes the location (center) of the distribution. scale: Positive floating point tensor which characterizes the spread of the distribution. validate_args: Python , default . When distribution parameters are checked for validity despite possibly degrading runtime performance. When invalid inputs may silently render incorrect outputs. allow_nan_stats: Python , default . When , statistics (e.g., mean, mode, variance) use the value \"\" to indicate the result is undefined. When , an exception is raised if one or more of the statistic's batch members are undefined. name: Python name prefixed to Ops created by this class. Raises: TypeError: if and are of different dtype.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\laplace.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:loc arg:scale arg:validate_args arg:allow_nan_stats arg:name arguments arg arg arg arg arg arg Assign Call Call With Call With Call Call Assign Call Assign Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_custom_object_name",
    "source_code": "def get_custom_object_name(obj):\n    if hasattr(obj, 'name'):\n        return obj.name\n    elif hasattr(obj, '__name__'):\n        return obj.__name__\n    elif hasattr(obj, '__class__'):\n        return generic_utils.to_snake_case(obj.__class__.__name__)\n    else:\n        return None",
    "docstring": "Returns the name to use for a custom loss or metric callable. Args: obj: Custom loss of metric callable Returns: Name to use, or if the object was not recognized.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\compile_utils.py",
    "ast_data": "FunctionDef name:get_custom_object_name arg:obj arguments arg If Call Return return:yes If Call Return return:yes If Call Return return:yes Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "to_list",
    "source_code": "def to_list(x):\n    if isinstance(x, list):\n        return x\n    return [x]",
    "docstring": "Normalizes a list/tensor into a list. If a tensor is passed, we return a list of size 1 containing the tensor. Args: x: target object to be normalized. Returns: A list.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\generic_utils.py",
    "ast_data": "FunctionDef name:to_list arg:x arguments arg If Call Return return:yes Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_col_idx_to_axis_idx",
    "source_code": "@final\ndef _col_idx_to_axis_idx(self, col_idx: int) -> int:\n    if isinstance(self.subplots, list):\n        return next((group_idx for group_idx, group in enumerate(self.subplots) if col_idx in group))\n    else:\n        return col_idx",
    "docstring": "Return the index of the axis where the column at col_idx should be plotted",
    "type": "method",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\core.py",
    "ast_data": "FunctionDef name:_col_idx_to_axis_idx arg:self arg:col_idx arguments arg arg If Call Return return:yes Call Call Compare Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "add_request",
    "source_code": "def add_request(self, *, param, alias):\n    if not request_is_alias(alias) and (not request_is_valid(alias)):\n        raise ValueError(f\"The alias you're setting for `{param}` should be either a valid identifier or one of {{None, True, False}}, but given value is: `{alias}`\")\n    if alias == param:\n        alias = True\n    if alias == UNUSED:\n        if param in self._requests:\n            del self._requests[param]\n        else:\n            raise ValueError(f\"Trying to remove parameter {param} with UNUSED which doesn't exist.\")\n    else:\n        self._requests[param] = alias\n    return self",
    "docstring": "Add request info for a metadata. Parameters ---------- param : str The property for which a request is set. alias : str, or {True, False, None} Specifies which metadata should be routed to - str: the name (or alias) of metadata given to a meta-estimator that should be routed to this parameter. - True: requested - False: not requested - None: error if passed",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py",
    "ast_data": "FunctionDef name:add_request arg:self arguments arg arg arg If BoolOp Call Call Raise Call If Compare Assign If Compare If Compare Raise Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "ThresholdedReLU",
    "source_code": "class ThresholdedReLU(Layer):\n\n    def __init__(self, theta=1.0, **kwargs):\n        super(ThresholdedReLU, self).__init__(**kwargs)\n        if theta is None:\n            raise ValueError('Theta of a Thresholded ReLU layer cannot be None, requires a float. Got %s' % theta)\n        if theta < 0:\n            raise ValueError('The theta value of a Thresholded ReLU layer should be >=0, got %s' % theta)\n        self.supports_masking = True\n        self.theta = backend.cast_to_floatx(theta)\n\n    def call(self, inputs):\n        theta = math_ops.cast(self.theta, inputs.dtype)\n        return inputs * math_ops.cast(math_ops.greater(inputs, theta), inputs.dtype)\n\n    def get_config(self):\n        config = {'theta': float(self.theta)}\n        base_config = super(ThresholdedReLU, self).get_config()\n        return dict(list(base_config.items()) + list(config.items()))\n\n    @tf_utils.shape_type_conversion\n    def compute_output_shape(self, input_shape):\n        return input_shape",
    "docstring": "Thresholded Rectified Linear Unit. It follows: Input shape: Arbitrary. Use the keyword argument (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as the input. Args: theta: Float >= 0. Threshold location of activation.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\advanced_activations.py",
    "ast_data": "ClassDef name:ThresholdedReLU FunctionDef name:__init__ arg:self arg:theta arguments arg arg arg Call Call If Compare Raise Call If Compare Raise Call Assign Assign Call FunctionDef name:call arg:self arg:inputs arguments arg arg Assign Call Return return:yes Call Call FunctionDef name:get_config arg:self arguments arg Assign Call Assign Call Call Return return:yes Call Call Call Call Call FunctionDef name:compute_output_shape arg:self arg:input_shape arguments arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "encode_exception_table_varint",
    "source_code": "def encode_exception_table_varint(n: int) -> list[int]:\n    assert n >= 0\n    b = [n & 63]\n    n >>= 6\n    while n > 0:\n        b.append(n & 63)\n        n >>= 6\n    b.reverse()\n    for i in range(len(b) - 1):\n        b[i] |= 64\n    return b",
    "docstring": "Similar to , but the 6-bit chunks are ordered in reverse.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\bytecode_transformation.py",
    "ast_data": "FunctionDef name:encode_exception_table_varint arg:n arguments arg Compare Assign While Compare Call Call For Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "make_initializable_iterator",
    "source_code": "def make_initializable_iterator(self):\n    return self._make_initializable_iterator()",
    "docstring": "Get an initializable iterator for DistributedDatasetV1. Note: This API is deprecated. Please use to create an initializable iterator. Returns: A DistributedIteratorV1 instance.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\input_lib.py",
    "ast_data": "FunctionDef name:make_initializable_iterator arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_config",
    "source_code": "def get_config(self):\n    return {}",
    "docstring": "Returns the configuration of the initializer as a JSON-serializable dict. Returns: A JSON-serializable Python dict.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\initializers\\initializers_v2.py",
    "ast_data": "FunctionDef name:get_config arg:self arguments arg Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "_init_metric_attributes",
    "source_code": "def _init_metric_attributes(self):\n    self._compile_metric_functions = []",
    "docstring": "Initialized model metric attributes.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py",
    "ast_data": "FunctionDef name:_init_metric_attributes arg:self arguments arg Assign"
  },
  {
    "library": "scikit-learn",
    "name": "_fit_indicator",
    "source_code": "def _fit_indicator(self, X):\n    if self.add_indicator:\n        self.indicator_ = MissingIndicator(missing_values=self.missing_values, error_on_new=False)\n        self.indicator_._fit(X, precomputed=True)\n    else:\n        self.indicator_ = None",
    "docstring": "Fit a MissingIndicator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\impute\\_base.py",
    "ast_data": "FunctionDef name:_fit_indicator arg:self arg:X arguments arg arg If Assign Call Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "set_url",
    "source_code": "def set_url(self, url):\n    self._url = url",
    "docstring": "Set the url for the artist. Parameters ---------- url : str",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:set_url arg:self arg:url arguments arg arg Assign"
  },
  {
    "library": "pytorch",
    "name": "dims",
    "source_code": "def dims(*names: str, min: Optional[int]=None, max: Optional[int]=None) -> tuple[Dim, ...]:\n    return tuple((Dim(name, min=min, max=max) for name in names))",
    "docstring": "Util to create multiple :func: types. Returns: A tuple of :func: types.",
    "type": "function",
    "file_path": "pytorch\\torch\\export\\dynamic_shapes.py",
    "ast_data": "FunctionDef name:dims arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "flip_left_right",
    "source_code": "@tf_export('image.flip_left_right')\n@dispatch.add_dispatch_support\ndef flip_left_right(image):\n    return _flip(image, 1, 'flip_left_right')",
    "docstring": "Flip an image horizontally (left to right). Outputs the contents of flipped along the width dimension. See also . Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.flip_left_right(x) Args: image: 4-D Tensor of shape or 3-D Tensor of shape . Returns: A tensor of the same type and shape as . Raises: ValueError: if the shape of not supported.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:flip_left_right arg:image arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "set_device",
    "source_code": "def set_device(device):\n    return lambda func: device_decorator(torch.device(device), func)",
    "docstring": "Set the default device inside of the wrapped function by decorating it with this function. If you would like to use this as a context manager, use device as a context manager directly, e.g., ``.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_device.py",
    "ast_data": "FunctionDef name:set_device arg:device arguments arg Return return:yes arguments arg Call Call"
  },
  {
    "library": "matplotlib",
    "name": "cleaned",
    "source_code": "def cleaned(self, transform=None, remove_nans=False, clip=None, *, simplify=False, curves=False, stroke_width=1.0, snap=False, sketch=None):\n    vertices, codes = _path.cleanup_path(self, transform, remove_nans, clip, snap, stroke_width, simplify, curves, sketch)\n    pth = Path._fast_from_codes_and_verts(vertices, codes, self)\n    if not simplify:\n        pth._should_simplify = False\n    return pth",
    "docstring": "Return a new with vertices and codes cleaned according to the parameters. See Also -------- Path.iter_segments : for details of the keyword arguments.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\path.py",
    "ast_data": "FunctionDef name:cleaned arg:self arg:transform arg:remove_nans arg:clip arguments arg arg arg arg arg arg arg arg arg Assign Call Assign Call If Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_anderson_ksamp_right",
    "source_code": "def _anderson_ksamp_right(samples, Z, Zstar, k, n, N):\n    A2kN = 0.0\n    lj = Z.searchsorted(Zstar[:-1], 'right') - Z.searchsorted(Zstar[:-1], 'left')\n    Bj = lj.cumsum()\n    for i in arange(0, k):\n        s = np.sort(samples[i])\n        Mij = s.searchsorted(Zstar[:-1], side='right')\n        inner = lj / float(N) * (N * Mij - Bj * n[i]) ** 2 / (Bj * (N - Bj))\n        A2kN += inner.sum() / n[i]\n    return A2kN",
    "docstring": "Compute A2akN equation 6 of Scholz & Stephens. Parameters ---------- samples : sequence of 1-D array_like Array of sample arrays. Z : array_like Sorted array of all observations. Zstar : array_like Sorted array of unique observations. k : int Number of samples. n : array_like Number of observations in each sample. N : int Total number of observations. Returns ------- A2KN : float The A2KN statistics of Scholz and Stephens 1987.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_morestats.py",
    "ast_data": "FunctionDef name:_anderson_ksamp_right arg:samples arg:Z arg:Zstar arg:k arg:n arg:N arguments arg arg arg arg arg arg Assign Assign Call Call Assign Call For Call Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "add_tool",
    "source_code": "def add_tool(self, tool, group, position=-1):\n    tool = self.toolmanager.get_tool(tool)\n    image = self._get_image_filename(tool)\n    toggle = getattr(tool, 'toggled', None) is not None\n    self.add_toolitem(tool.name, group, position, image, tool.description, toggle)\n    if toggle:\n        self.toolmanager.toolmanager_connect('tool_trigger_%s' % tool.name, self._tool_toggled_cbk)\n        if tool.toggled:\n            self.toggle_toolitem(tool.name, True)",
    "docstring": "Add a tool to this container. Parameters ---------- tool : tool_like The tool to add, see . group : str The name of the group to add this tool to. position : int, default: -1 The position within the group to place this tool.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:add_tool arg:self arg:tool arg:group arg:position arguments arg arg arg arg Assign Call Assign Call Assign Compare Call Call If Call If Call"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "def forward(self, origins: Tensor, directions: Tensor) -> Tensor:\n    batch_size = origins.shape[0]\n    lengths = sample_lengths(batch_size, self._num_ray_points, device=origins.device, dtype=origins.dtype, irregular=self._irregular_ray_sampling)\n    points_3d = sample_ray_points(origins, directions, lengths)\n    points_3d_encoded = self._pos_encoder(points_3d)\n    directions_encoded = self._dir_encoder(F.normalize(directions, dim=-1))\n    y = self._mlp(points_3d_encoded)\n    y = self._fc1(y)\n    densities_ray_points = self._sigma(y)\n    densities_ray_points = densities_ray_points + torch.randn_like(densities_ray_points) * 0.1\n    densities_ray_points = torch.relu(densities_ray_points)\n    y = torch.cat((y, directions_encoded[..., None, :].expand(-1, self._num_ray_points, -1)), dim=-1)\n    y = self._fc2(y)\n    rgbs_ray_points = self._rgb(y)\n    rgbs = self._renderer(rgbs_ray_points, densities_ray_points, points_3d)\n    return rgbs",
    "docstring": "Forward method. Args: origins: Ray origins with shape :math:. directions: Ray directions with shape :math:. Returns: Rendered image pixels :math:.",
    "type": "method",
    "file_path": "kornia\\kornia\\nerf\\nerf_model.py",
    "ast_data": "FunctionDef name:forward arg:self arg:origins arg:directions arguments arg arg arg Assign Assign Call Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_local_bwd_output",
    "source_code": "def get_local_bwd_output(self, mb_index):\n    assert self.has_backward, \"can't steal_bwd_input if this stage doesn't have backward\"\n    assert not self.is_first, \"can't get bwd output if this stage is first\"\n    self._check_chunk_id(mb_index)\n    return self.bwd_cache.pop(mb_index)",
    "docstring": "Returns the input grad tensors for this stage, which correspond to the stage inputs during forward.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py",
    "ast_data": "FunctionDef name:get_local_bwd_output arg:self arg:mb_index arguments arg arg Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_aggregate_grads",
    "source_code": "def _aggregate_grads(gradients):\n    assert gradients, 'No gradients to aggregate'\n    if len(gradients) == 1:\n        return gradients[0]\n    if all((isinstance(g, tensor_lib.Tensor) for g in gradients)):\n        return gen_math_ops.add_n(gradients)\n    else:\n        assert all((isinstance(g, (tensor_lib.Tensor, indexed_slices.IndexedSlices)) for g in gradients))\n        return backprop_util.AggregateIndexedSlicesGradients(gradients)",
    "docstring": "Aggregate gradients from multiple sources. Args: gradients: A list of 'Tensor' or 'IndexedSlices' gradients. Returns: If 'gradients' only has 'Tensor', returns an aggregated 'Tensor'. Otherwise returns an aggregated 'IndexedSlices'.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\backprop.py",
    "ast_data": "FunctionDef name:_aggregate_grads arg:gradients arguments arg If Compare Call Return return:yes If Call Call Return return:yes Call Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X):\n    check_is_fitted(self)\n    dtype = (np.float64, np.float32) if self.dtype is None else self.dtype\n    Xt = validate_data(self, X, copy=True, dtype=dtype, reset=False)\n    bin_edges = self.bin_edges_\n    for jj in range(Xt.shape[1]):\n        Xt[:, jj] = np.searchsorted(bin_edges[jj][1:-1], Xt[:, jj], side='right')\n    if self.encode == 'ordinal':\n        return Xt\n    dtype_init = None\n    if 'onehot' in self.encode:\n        dtype_init = self._encoder.dtype\n        self._encoder.dtype = Xt.dtype\n    try:\n        Xt_enc = self._encoder.transform(Xt)\n    finally:\n        self._encoder.dtype = dtype_init\n    return Xt_enc",
    "docstring": "Discretize the data. Parameters ---------- X : array-like of shape (n_samples, n_features) Data to be discretized. Returns ------- Xt : {ndarray, sparse matrix}, dtype={np.float32, np.float64} Data in the binned space. Will be a sparse matrix if and ndarray otherwise.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_discretization.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Call Assign Compare Assign Call Assign For Call Assign Call If Compare Return return:yes Assign If Compare Assign Assign Try Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_add_saveable",
    "source_code": "def _add_saveable(saveables, seen_ops, saveable):\n    if saveable.op is not None and saveable.op in seen_ops:\n        raise ValueError(f'The same saveable will be restored with two names: {saveable.name}')\n    saveables.append(saveable)\n    seen_ops.add(saveable.op)",
    "docstring": "Adds the saveable to the saveables list. Args: saveables: List to append the SaveableObject to. seen_ops: Set of the ops of the saveables already processed. Used to check that each saveable is only saved once. saveable: The saveable. Raises: ValueError: If the saveable has already been processed.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\saving\\saveable_object_util.py",
    "ast_data": "FunctionDef name:_add_saveable arg:saveables arg:seen_ops arg:saveable arguments arg arg arg If BoolOp Compare Compare Raise Call Call Call"
  },
  {
    "library": "pandas",
    "name": "agg_list_like",
    "source_code": "def agg_list_like(self) -> DataFrame | Series:\n    return self.agg_or_apply_list_like(op_name='agg')",
    "docstring": "Compute aggregation in the case of a list-like argument. Returns ------- Result of aggregation.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\apply.py",
    "ast_data": "FunctionDef name:agg_list_like arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "save",
    "source_code": "def save(self, name: Optional[str]=None, n_row: Optional[int]=None) -> None:\n    from kornia.io import write_image\n    from kornia.utils.image import make_grid\n    if name is None:\n        name = f'Kornia-{datetime.datetime.now(tz=datetime.timezone.utc).strftime('%Y%m%d%H%M%S')!s}.jpg'\n    if len(self._output_image.shape) == 3:\n        out_image = self._output_image\n    if len(self._output_image.shape) == 4:\n        if n_row is None:\n            n_row = math.ceil(self._output_image.shape[0] ** 0.5)\n        out_image = make_grid(self._output_image, n_row, padding=2)\n    write_image(name, out_image.mul(255.0).byte())",
    "docstring": "Save the output image(s) to a directory. Args: name: Directory to save the images. n_row: Number of images displayed in each row of the grid.",
    "type": "method",
    "file_path": "kornia\\kornia\\core\\mixin\\image_module.py",
    "ast_data": "FunctionDef name:save arg:self arg:name arg:n_row arguments arg arg arg If Compare Assign Call Call If Compare Call Assign If Compare Call If Compare Assign Call Assign Call Call Call Call"
  },
  {
    "library": "cherrypy",
    "name": "make_file",
    "source_code": "def make_file(self):\n    return tempfile.TemporaryFile()",
    "docstring": "Return a file-like object into which the request body will be read. By default, this will return a TemporaryFile. Override as needed. See also :attr:.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpreqbody.py",
    "ast_data": "FunctionDef name:make_file arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "deserialize",
    "source_code": "@abc.abstractmethod\ndef deserialize(self, string_value):\n    pass",
    "docstring": "Callback to deserialize the object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\python_state.py",
    "ast_data": "FunctionDef name:deserialize arg:self arg:string_value arguments arg arg"
  },
  {
    "library": "kornia",
    "name": "adjoint",
    "source_code": "def adjoint(self) -> Tensor:\n    R = self.so3.matrix()\n    z = zeros_like(R)\n    row0 = concatenate((R, So3.hat(self.t) @ R), -1)\n    row1 = concatenate((z, R), -1)\n    return concatenate((row0, row1), -2)",
    "docstring": "Return the adjoint matrix of shape :math:. Example: >>> s = Se3.identity() >>> s.adjoint() tensor([[1., 0., 0., 0., 0., 0.], [0., 1., 0., 0., 0., 0.], [0., 0., 1., 0., 0., 0.], [0., 0., 0., 1., 0., 0.], [0., 0., 0., 0., 1., 0.], [0., 0., 0., 0., 0., 1.]], grad_fn=)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\se3.py",
    "ast_data": "FunctionDef name:adjoint arg:self arguments arg Assign Call Assign Call Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "exported_program_to_ir",
    "source_code": "def exported_program_to_ir(exported_program: torch.export.ExportedProgram, *, registry: _registration.ONNXRegistry | None=None, lower: Literal['at_conversion', 'none']='at_conversion') -> ir.Model:\n    if registry is None:\n        registry = _registration.ONNXRegistry.from_torchlib()\n    if lower != 'none':\n        exported_program = _prepare_exported_program_for_export(exported_program, registry=registry)\n    return _exported_program_to_onnx_program(exported_program, registry=registry, lower=lower).model",
    "docstring": "Convert an exported program to an ONNX IR model. Reference: - ExportedProgram spec: Args: exported_program: The exported program to convert. lower: Whether to lower the graph to core ONNX operators. at_conversion: Lower whe translating the FX graph to ONNX IR. none: Do not lower the graph. registry: The registry of all ONNX Script decomposition.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_core.py",
    "ast_data": "FunctionDef name:exported_program_to_ir arg:exported_program arguments arg arg arg If Compare Assign Call If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_meta_graph_def_from_tags",
    "source_code": "def get_meta_graph_def_from_tags(self, tags):\n    found_match = False\n    meta_graph_def_to_load = None\n    available_tags = []\n    for meta_graph_def in self._saved_model.meta_graphs:\n        available_tags.append(set(meta_graph_def.meta_info_def.tags))\n        if set(meta_graph_def.meta_info_def.tags) == set(tags):\n            meta_graph_def_to_load = meta_graph_def\n            found_match = True\n            break\n    if not found_match:\n        raise RuntimeError(f\"MetaGraphDef associated with tags {str(tags).strip('[]')} could not be found in SavedModel, with available tags '{available_tags}'. To inspect available tag-sets in the SavedModel, please use the SavedModel CLI: `saved_model_cli`.\")\n    return meta_graph_def_to_load",
    "docstring": "Return MetaGraphDef with the exact specified tags. Args: tags: A list or set of string tags that identify the MetaGraphDef. Returns: MetaGraphDef with the same tags. Raises: RuntimeError: if no metagraphs were found with the associated tags.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\loader_impl.py",
    "ast_data": "FunctionDef name:get_meta_graph_def_from_tags arg:self arg:tags arguments arg arg Assign Assign Assign For Call Call If Compare Call Call Assign Assign If Raise Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "maybe_box_native",
    "source_code": "def maybe_box_native(value: Scalar | None | NAType) -> Scalar | None | NAType:\n    if is_float(value):\n        value = float(value)\n    elif is_integer(value):\n        value = int(value)\n    elif is_bool(value):\n        value = bool(value)\n    elif isinstance(value, (np.datetime64, np.timedelta64)):\n        value = maybe_box_datetimelike(value)\n    elif value is NA:\n        value = None\n    return value",
    "docstring": "If passed a scalar cast the scalar to a python native type. Parameters ---------- value : scalar or Series Returns ------- scalar or Series",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\cast.py",
    "ast_data": "FunctionDef name:maybe_box_native arg:value arguments arg If Call Assign Call If Call Assign Call If Call Assign Call If Call Assign Call If Compare Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "covariance",
    "source_code": "@property\ndef covariance(self):\n    return self._covariance",
    "docstring": "Explicit representation of the covariance matrix",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_covariance.py",
    "ast_data": "FunctionDef name:covariance arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "infer_dtype_from",
    "source_code": "def infer_dtype_from(val) -> tuple[DtypeObj, Any]:\n    if not is_list_like(val):\n        return infer_dtype_from_scalar(val)\n    return infer_dtype_from_array(val)",
    "docstring": "Interpret the dtype from a scalar or array. Parameters ---------- val : object",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\cast.py",
    "ast_data": "FunctionDef name:infer_dtype_from arg:val arguments arg If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "int_to_half",
    "source_code": "def int_to_half(i: int) -> float:\n    buf = struct.pack('i', i)\n    return struct.unpack('f', buf)[0]",
    "docstring": "Casts an integer value to a half-precision float. Converts an integer value obtained from half_to_int back into a floating point value.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\tensorboard\\summary.py",
    "ast_data": "FunctionDef name:int_to_half arg:i arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "unreachable_codes",
    "source_code": "@staticmethod\ndef unreachable_codes(code_options) -> list[Instruction]:\n    return [create_load_const(None), create_instruction('RAISE_VARARGS', arg=1)]",
    "docstring": "Codegen a to make analysis work for unreachable code",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\resume_execution.py",
    "ast_data": "FunctionDef name:unreachable_codes arg:code_options arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "print_op_coverage_summary",
    "source_code": "def print_op_coverage_summary(model: nn.Module, args, kwargs, *, output_csv=False):\n    import csv\n    from tabulate import tabulate\n    fwd_graph, bwd_graph = get_inductor_decomp_graphs(model, args, kwargs)\n    op_counts = {}\n    for node in fwd_graph.graph.nodes:\n        if node.op == 'call_function' and isinstance(node.target, torch._ops.OpOverload):\n            if node.target not in op_counts:\n                op_counts[node.target] = 0\n            op_counts[node.target] += 1\n    for node in bwd_graph.graph.nodes:\n        if node.op == 'call_function' and isinstance(node.target, torch._ops.OpOverload):\n            if node.target not in op_counts:\n                op_counts[node.target] = 0\n            op_counts[node.target] += 1\n    op_infos = []\n    for op, count in op_counts.items():\n        supported = op in DTensor._op_dispatcher.sharding_propagator.op_to_rules\n        op_infos.append([op, str(op._schema), count, supported])\n    count_idx = 2\n    op_infos.sort(key=itemgetter(count_idx), reverse=True)\n    headers = ['Operator', 'Schema', 'Total Count', 'Supported']\n    print(tabulate(op_infos, headers=headers))\n    if output_csv:\n        with open('op_summary.csv', 'w', newline='') as csv_file:\n            csv_writer = csv.writer(csv_file)\n            csv_writer.writerow(headers)\n            for row in op_infos:\n                csv_writer.writerow(row)",
    "docstring": "Util to print the operator coverage summary of a certain model with tabulute. Must have tabulate module installed.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\debug\\_op_coverage.py",
    "ast_data": "FunctionDef name:print_op_coverage_summary arg:model arg:args arg:kwargs arguments arg arg arg arg Assign Call Assign For If BoolOp Compare Call If Compare Assign For If BoolOp Compare Call If Compare Assign Assign For Call Assign Compare Call Call Assign Call Call Assign Call Call If With Call Assign Call Call For Call"
  },
  {
    "library": "kornia",
    "name": "median_blur",
    "source_code": "def median_blur(input: Tensor, kernel_size: tuple[int, int] | int) -> Tensor:\n    KORNIA_CHECK_IS_TENSOR(input)\n    KORNIA_CHECK_SHAPE(input, ['B', 'C', 'H', 'W'])\n    padding = _compute_zero_padding(kernel_size)\n    kernel: Tensor = get_binary_kernel2d(kernel_size, device=input.device, dtype=input.dtype)\n    b, c, h, w = input.shape\n    features: Tensor = F.conv2d(input.reshape(b * c, 1, h, w), kernel, padding=padding, stride=1)\n    features = features.view(b, c, -1, h, w)\n    return features.median(dim=2)[0]",
    "docstring": "Blur an image using the median filter. .. image:: _static/img/median_blur.png Args: input: the input image with shape :math:. kernel_size: the blurring kernel size. Returns: the blurred input tensor with shape :math:. .. note:: See a working example __. Example: >>> input = torch.rand(2, 4, 5, 7) >>> output = median_blur(input, (3, 3)) >>> output.shape torch.Size([2, 4, 5, 7])",
    "type": "function",
    "file_path": "kornia\\kornia\\filters\\median.py",
    "ast_data": "FunctionDef name:median_blur arg:input arg:kernel_size arguments arg arg Call Call Assign Call Call Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "general_source_directories_files",
    "source_code": "def general_source_directories_files(top_path):\n    pruned_directories = ['CVS', '.svn', 'build']\n    prune_file_pat = re.compile('(?:[~#]|\\\\.py[co]|\\\\.o)$')\n    for dirpath, dirnames, filenames in os.walk(top_path, topdown=True):\n        pruned = [d for d in dirnames if d not in pruned_directories]\n        dirnames[:] = pruned\n        for d in dirnames:\n            dpath = os.path.join(dirpath, d)\n            rpath = rel_path(dpath, top_path)\n            files = []\n            for f in os.listdir(dpath):\n                fn = os.path.join(dpath, f)\n                if os.path.isfile(fn) and (not prune_file_pat.search(fn)):\n                    files.append(fn)\n            yield (rpath, files)\n    dpath = top_path\n    rpath = rel_path(dpath, top_path)\n    filenames = [os.path.join(dpath, f) for f in os.listdir(dpath) if not prune_file_pat.search(f)]\n    files = [f for f in filenames if os.path.isfile(f)]\n    yield (rpath, files)",
    "docstring": "Return a directory name relative to top_path and files contained.",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\misc_util.py",
    "ast_data": "FunctionDef name:general_source_directories_files arg:top_path arguments arg Assign Assign Call For Call Assign Compare Assign For Assign Call Assign Call Assign For Call Assign Call If BoolOp Call Call Call Assign Assign Call Assign Call Call Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_graph",
    "source_code": "def _get_graph(while_op, func_attr_name, attr_graph_name):\n    func_graph = getattr(while_op, attr_graph_name, None)\n    if func_graph is None:\n        input_shapes = [tensor_shape.TensorShape(s) for s in while_op.get_attr('output_shapes')]\n        func_name = while_op.get_attr(func_attr_name).name\n        func_graph = util.get_func_graph(while_op, input_shapes, func_name)\n    func_graph._while = while_op\n    return func_graph",
    "docstring": "Returns for the given function attribute. Args: while_op: The While Operation. func_attr_name: string attr_graph_name: cached forward graph name Returns:",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\while_v2.py",
    "ast_data": "FunctionDef name:_get_graph arg:while_op arg:func_attr_name arg:attr_graph_name arguments arg arg arg Assign Call If Compare Assign Call Call Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_delay",
    "source_code": "def _maybe_delay(self):\n    delay_secs = int(os.environ.get('TF_COORDINATOR_SCHEDULE_START_DELAY', '0'))\n    delay_secs *= self.worker_index\n    delay_cap = int(os.environ.get('TF_COORDINATOR_SCHEDULE_START_DELAY_MAX', '0'))\n    if delay_cap:\n        delay_secs = min(delay_secs, delay_cap)\n    if delay_secs > 0:\n        logging.info(' Worker %d sleeping for %d seconds before running function', self.worker_index, delay_secs)\n    time.sleep(delay_secs)",
    "docstring": "Delay if corresponding env vars are set.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py",
    "ast_data": "FunctionDef name:_maybe_delay arg:self arguments arg Assign Call Call Assign Call Call If Assign Call If Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_root_modules",
    "source_code": "def _get_root_modules(modules: set[nn.Module]) -> set[nn.Module]:\n    root_modules: set[nn.Module] = set()\n    module_to_submodules = {module: set(module.modules()) for module in modules}\n    for candidate_module in modules:\n        is_root_module = True\n        for module, submodules in module_to_submodules.items():\n            is_child_module = candidate_module is not module and candidate_module in submodules\n            if is_child_module:\n                is_root_module = False\n                break\n        if is_root_module:\n            root_modules.add(candidate_module)\n    return root_modules",
    "docstring": "Returns: Set[nn.Module]: The subset of ``.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_common_utils.py",
    "ast_data": "FunctionDef name:_get_root_modules arg:modules arguments arg Call Assign Call Call For Assign For Call Assign BoolOp Compare Compare If Assign If Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_apply",
    "source_code": "def _apply(self, func: Callable[[np.ndarray, int, int], np.ndarray], name: str, numeric_only: bool=False, numba_args: tuple[Any, ...]=(), **kwargs):\n    window = self._scipy_weight_generator(self.window, **kwargs)\n    offset = (len(window) - 1) // 2 if self.center else 0\n\n    def homogeneous_func(values: np.ndarray):\n        if values.size == 0:\n            return values.copy()\n\n        def calc(x):\n            additional_nans = np.full(offset, np.nan)\n            x = np.concatenate((x, additional_nans))\n            return func(x, window, self.min_periods if self.min_periods is not None else len(window))\n        with np.errstate(all='ignore'):\n            result = np.asarray(calc(values))\n        if self.center:\n            result = self._center_window(result, offset)\n        return result\n    return self._apply_columnwise(homogeneous_func, name, numeric_only)[::self.step]",
    "docstring": "Rolling with weights statistical measure using supplied function. Designed to be used with passed-in Cython array-based functions. Parameters ---------- func : callable function to apply name : str, numeric_only : bool, default False Whether to only operate on bool, int, and float columns numba_args : tuple unused **kwargs additional arguments for scipy windows if necessary Returns ------- y : type of input",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\window\\rolling.py",
    "ast_data": "FunctionDef name:_apply arg:self arg:func arg:name arg:numeric_only arg:numba_args arguments arg arg arg arg arg arg Assign Call Assign Call FunctionDef name:homogeneous_func arg:values arguments arg If Compare Return return:yes Call FunctionDef name:calc arg:x arguments arg Assign Call Assign Call Return return:yes Call Compare Call With Call Assign Call Call If Assign Call Return return:yes Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_finalize_sample_weight",
    "source_code": "def _finalize_sample_weight(self, sample_weight, y):\n    return sample_weight",
    "docstring": "Finalize sample weight. Used by subclasses to adjust sample_weights. This is useful for implementing class weights.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\gradient_boosting.py",
    "ast_data": "FunctionDef name:_finalize_sample_weight arg:self arg:sample_weight arg:y arguments arg arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_calc_bias_add_flops",
    "source_code": "@ops.RegisterStatistics('BiasAdd', 'flops')\ndef _calc_bias_add_flops(graph, node):\n    input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])\n    input_shape.assert_is_fully_defined()\n    input_count = np.prod(input_shape.as_list())\n    return ops.OpStats('flops', input_count)",
    "docstring": "Calculates the computing needed for BiasAdd.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py",
    "ast_data": "FunctionDef name:_calc_bias_add_flops arg:graph arg:node arguments arg arg Assign Call Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "response_post_save_change",
    "source_code": "def response_post_save_change(self, request, obj):\n    return self._response_post_save(request, obj)",
    "docstring": "Figure out where to redirect after the 'Save' button has been pressed when editing an existing object.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:response_post_save_change arg:self arg:request arg:obj arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_get_prefix_uri",
    "source_code": "def _get_prefix_uri(self) -> str:\n    raise AbstractMethodError(self)",
    "docstring": "Get uri of namespace prefix. This method retrieves corresponding URI to prefix in namespaces. Raises ------ KeyError *If prefix is not included in namespace dict.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\xml.py",
    "ast_data": "FunctionDef name:_get_prefix_uri arg:self arguments arg Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "__iter__",
    "source_code": "def __iter__(self):\n    unique_groups = {id(group): group for group in self._mapping.values()}\n    for group in unique_groups.values():\n        yield sorted(group, key=self._ordering.__getitem__)",
    "docstring": "Iterate over each of the disjoint sets as a list. The iterator is invalid if interleaved with calls to join().",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\cbook.py",
    "ast_data": "FunctionDef name:__iter__ arg:self arguments arg Assign Call Call For Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_profiles",
    "source_code": "def get_profiles(graph, run_metadata):\n    return PprofProfiler(graph, run_metadata).profile()",
    "docstring": "Generate profiles in pprof format. See for pprof proto format. Args: graph: A object. run_metadata: A proto. Returns: A dictionary mapping from device name to pprof proto for that device.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\pprof_profiler.py",
    "ast_data": "FunctionDef name:get_profiles arg:graph arg:run_metadata arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "build_chunks",
    "source_code": "def build_chunks(self):\n    proto = self._proto\n    if not isinstance(proto, graph_pb2.GraphDef):\n        raise TypeError('Can only split GraphDef type protos.')\n    proto_size = proto.ByteSize()\n    if proto_size < constants.max_size():\n        return\n    node_splitter = RepeatedMessageSplitter(proto, 'node', [ConstantNodeDefSplitter, LargeMessageSplitter], parent_splitter=self, fields_in_parent=[])\n    function_splitter = RepeatedMessageSplitter(proto.library, ['function'], [FunctionDefSplitter], parent_splitter=self, fields_in_parent=['library'])\n    library_size = proto.library.ByteSize()\n    approx_node_size = proto_size - library_size\n    if library_size > approx_node_size:\n        library_size -= function_splitter.build_chunks()\n        if library_size + approx_node_size > constants.max_size():\n            approx_node_size -= node_splitter.build_chunks()\n    else:\n        approx_node_size -= node_splitter.build_chunks()\n        if library_size + approx_node_size > constants.max_size():\n            library_size -= function_splitter.build_chunks()\n    if proto.ByteSize() > constants.max_size():\n        self.add_chunk(proto.library, ['library'], 1)\n        proto.ClearField('library')",
    "docstring": "Splits a GraphDef proto into smaller chunks.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\tools\\proto_splitter\\split_graph_def.py",
    "ast_data": "FunctionDef name:build_chunks arg:self arguments arg Assign If Call Raise Call Assign Call If Compare Call Return return:no Assign Call Assign Call Assign Call Assign If Compare Call If Compare Call Call Call If Compare Call Call If Compare Call Call Call Call"
  },
  {
    "library": "django",
    "name": "touch",
    "source_code": "def touch(self, key, timeout=DEFAULT_TIMEOUT, version=None):\n    raise NotImplementedError('subclasses of BaseCache must provide a touch() method')",
    "docstring": "Update the key's expiry time using timeout. Return True if successful or False if the key does not exist.",
    "type": "method",
    "file_path": "django\\django\\core\\cache\\backends\\base.py",
    "ast_data": "FunctionDef name:touch arg:self arg:key arg:timeout arg:version arguments arg arg arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "binomial",
    "source_code": "def binomial(self, shape, counts, probs, dtype=dtypes.int32, name=None):\n    dtype = dtypes.as_dtype(dtype)\n    with ops.name_scope(name, 'binomial', [shape, counts, probs]) as name:\n        counts = ops.convert_to_tensor(counts, name='counts')\n        probs = ops.convert_to_tensor(probs, name='probs')\n        shape_tensor = _shape_tensor(shape)\n        return gen_stateful_random_ops.stateful_random_binomial(self.state.handle, self.algorithm, shape=shape_tensor, counts=counts, probs=probs, dtype=dtype, name=name)",
    "docstring": "Outputs random values from a binomial distribution. The generated values follow a binomial distribution with specified count and probability of success parameters. Example: Args: shape: A 1-D integer Tensor or Python array. The shape of the output tensor. counts: Tensor. The counts of the binomial distribution. Must be broadcastable with , and broadcastable with the rightmost dimensions of . probs: Tensor. The probability of success for the binomial distribution. Must be broadcastable with and broadcastable with the rightmost dimensions of . dtype: The type of the output. Default: tf.int32 name: A name for the operation (optional). Returns: samples: A Tensor of the specified shape filled with random binomial values. For each i, each samples[i, ...] is an independent draw from the binomial distribution on counts[i] trials with probability of success probs[i].",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\stateful_random_ops.py",
    "ast_data": "FunctionDef name:binomial arg:self arg:shape arg:counts arg:probs arg:dtype arg:name arguments arg arg arg arg arg arg Assign Call With Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "sync",
    "source_code": "def sync(self):\n    self._queue.join()\n    logging.info('Sync on ongoing save/restore.')",
    "docstring": "Sync on any ongoing save or restore events.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\async_checkpoint_helper.py",
    "ast_data": "FunctionDef name:sync arg:self arguments arg Call Call"
  },
  {
    "library": "scipy",
    "name": "count_blocks",
    "source_code": "def count_blocks(A, blocksize):\n    r, c = blocksize\n    if r < 1 or c < 1:\n        raise ValueError('r and c must be positive')\n    if issparse(A):\n        if A.format == 'csr':\n            M, N = A.shape\n            return csr_count_blocks(M, N, r, c, A.indptr, A.indices)\n        elif A.format == 'csc':\n            return count_blocks(A.T, (c, r))\n    return count_blocks(csr_array(A), blocksize)",
    "docstring": "For a given blocksize=(r,c) count the number of occupied blocks in a sparse matrix A",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\_spfuncs.py",
    "ast_data": "FunctionDef name:count_blocks arg:A arg:blocksize arguments arg arg Assign If BoolOp Compare Compare Raise Call If Call If Compare Assign Return return:yes Call If Compare Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "max_error",
    "source_code": "@validate_params({'y_true': ['array-like'], 'y_pred': ['array-like']}, prefer_skip_nested_validation=True)\ndef max_error(y_true, y_pred):\n    xp, _ = get_namespace(y_true, y_pred)\n    y_type, y_true, y_pred, _ = _check_reg_targets(y_true, y_pred, None, xp=xp)\n    if y_type == 'continuous-multioutput':\n        raise ValueError('Multioutput not supported in max_error')\n    return float(xp.max(xp.abs(y_true - y_pred)))",
    "docstring": "The max_error metric calculates the maximum residual error. Read more in the :ref:. Parameters ---------- y_true : array-like of shape (n_samples,) Ground truth (correct) target values. y_pred : array-like of shape (n_samples,) Estimated target values. Returns ------- max_error : float A positive floating point value (the best value is 0.0). Examples -------- >>> from sklearn.metrics import max_error >>> y_true = [3, 2, 7, 1] >>> y_pred = [4, 2, 7, 1] >>> max_error(y_true, y_pred) 1.0",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\metrics\\_regression.py",
    "ast_data": "FunctionDef name:max_error arg:y_true arg:y_pred arguments arg arg Assign Call Assign Call If Compare Raise Call Return return:yes Call Call Call Call"
  },
  {
    "library": "django",
    "name": "listdir",
    "source_code": "def listdir(self, path):\n    raise NotImplementedError('subclasses of Storage must provide a listdir() method')",
    "docstring": "List the contents of the specified path. Return a 2-tuple of lists: the first item being directories, the second item being files.",
    "type": "method",
    "file_path": "django\\django\\core\\files\\storage\\base.py",
    "ast_data": "FunctionDef name:listdir arg:self arg:path arguments arg arg Raise Call"
  },
  {
    "library": "django",
    "name": "get_pk_value_on_save",
    "source_code": "def get_pk_value_on_save(self, instance):\n    if self.default:\n        return self.get_default()\n    return None",
    "docstring": "Hook to generate new PK values on save. This method is called when saving instances with no primary key value set. If this method returns something else than None, then the returned value is used when saving the new instance.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\__init__.py",
    "ast_data": "FunctionDef name:get_pk_value_on_save arg:self arg:instance arguments arg arg If Return return:yes Call Return return:no"
  },
  {
    "library": "pytorch",
    "name": "stack_meta",
    "source_code": "@property\ndef stack_meta(self) -> _ModuleStackMeta:\n    return self._stack_meta",
    "docstring": "Returns the module stack meta data associated with this node.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\modularization.py",
    "ast_data": "FunctionDef name:stack_meta arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "nrows",
    "source_code": "def nrows(self, out_type=None, name=None):\n    with ops.name_scope(name, 'RaggedNRows', [self]):\n        if out_type is None:\n            return self._row_partition.nrows()\n        else:\n            return math_ops.cast(self._row_partition.nrows(), dtype=out_type)",
    "docstring": "Returns the number of rows in this ragged tensor. I.e., the size of the outermost dimension of the tensor. Args: out_type: for the returned tensor. Defaults to . name: A name prefix for the returned tensor (optional). Returns: A scalar with dtype . #### Example: >>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []]) >>> print(rt.nrows()) # rt has 5 rows. tf.Tensor(5, shape=(), dtype=int64)",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:nrows arg:self arg:out_type arg:name arguments arg arg arg With Call If Compare Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "__init__",
    "source_code": "def __init__(self, subject='', body='', from_email=None, to=None, bcc=None, connection=None, attachments=None, headers=None, alternatives=None, cc=None, reply_to=None):\n    super().__init__(subject, body, from_email, to, bcc, connection, attachments, headers, cc, reply_to)\n    self.alternatives = [EmailAlternative(*alternative) for alternative in alternatives or []]",
    "docstring": "Initialize a single email message (which can be sent to multiple recipients).",
    "type": "method",
    "file_path": "django\\django\\core\\mail\\message.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:subject arg:body arg:from_email arg:to arg:bcc arg:connection arg:attachments arg:headers arg:alternatives arg:cc arg:reply_to arguments arg arg arg arg arg arg arg arg arg arg arg arg Call Call Assign Call BoolOp"
  },
  {
    "library": "tensorflow",
    "name": "canonicalize",
    "source_code": "def canonicalize(d, default=None):\n    if isinstance(d, context.LogicalDevice):\n        d = tf_device.DeviceSpec.from_string(d.name)\n    else:\n        d = tf_device.DeviceSpec.from_string(d)\n    assert d.device_type is None or d.device_type == d.device_type.upper(), \"Device type '%s' must be all-caps.\" % (d.device_type,)\n    result = tf_device.DeviceSpec(replica=0, task=0, device_type='CPU', device_index=0)\n    if ops.executing_eagerly_outside_functions():\n        host_cpu = tf_device.DeviceSpec.from_string(config.list_logical_devices('CPU')[0].name)\n        if host_cpu.job:\n            result = result.make_merged_spec(host_cpu)\n        else:\n            result = result.replace(job='localhost')\n    if default:\n        result = result.make_merged_spec(tf_device.DeviceSpec.from_string(default))\n    result = result.make_merged_spec(d)\n    return result.to_string()",
    "docstring": "Canonicalize device string. If d has missing components, the rest would be deduced from the argument or from '/replica:0/task:0/device:CPU:0'. For example: If d = '/cpu:0', default='/job:worker/task:1', it returns '/job:worker/replica:0/task:1/device:CPU:0'. If d = '/cpu:0', default='/job:worker', it returns '/job:worker/replica:0/task:0/device:CPU:0'. If d = '/gpu:0', default=None, it returns '/replica:0/task:0/device:GPU:0'. Note: This uses \"job:localhost\" as the default if executing eagerly. Args: d: a device string or tf.config.LogicalDevice default: a string for default device if d doesn't have all components. Returns: a canonicalized device string.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\device_util.py",
    "ast_data": "FunctionDef name:canonicalize arg:d arg:default arguments arg arg If Call Assign Call Assign Call BoolOp Compare Compare Call Assign Call If Call Assign Call Call If Assign Call Assign Call If Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "yield_flat_paths",
    "source_code": "@tf_export('__internal__.nest.yield_flat_paths', v1=[])\ndef yield_flat_paths(nest, expand_composites=False):\n    is_nested_fn = _is_nested_or_composite if expand_composites else is_nested\n    for k, _ in nest_util.yield_flat_up_to(nest_util.Modality.CORE, nest, nest, is_nested_fn):\n        yield k",
    "docstring": "Yields paths for some nested structure. Refer to [tf.nest]( for the definition of a structure. Paths are lists of objects which can be str-converted, which may include integers or other types which are used as indices in a dict. The flat list will be in the corresponding order as if you called on the structure. This is handy for naming Tensors such the TF scope structure matches the tuple structure. E.g. if we have a tuple Args: nest: the value to produce a flattened paths list for. expand_composites: If true, then composite tensors such as and are expanded into their component tensors. Yields: Tuples containing index or key values which form the path to a specific leaf value in the nested structure.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\nest.py",
    "ast_data": "FunctionDef name:yield_flat_paths arg:nest arg:expand_composites arguments arg arg Assign For Call Call"
  },
  {
    "library": "tensorflow",
    "name": "FilterBenchmark",
    "source_code": "class FilterBenchmark(benchmark_base.DatasetBenchmarkBase):\n\n    def _benchmark(self, predicate, name, benchmark_id):\n        num_elements = 100000\n        dataset = dataset_ops.Dataset.from_tensors(True)\n        dataset = dataset.repeat().filter(predicate)\n        self.run_and_report_benchmark(dataset, num_elements=num_elements, extras={'model_name': 'filter.benchmark.%d' % benchmark_id, 'parameters': '%d' % num_elements}, name=name)\n\n    def benchmark_simple_function(self):\n        self._benchmark(array_ops.identity, 'simple_function', benchmark_id=1)\n\n    def benchmark_return_component_optimization(self):\n        self._benchmark(lambda x: x, 'return_component', benchmark_id=2)",
    "docstring": "Benchmarks for .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\benchmarks\\filter_benchmark.py",
    "ast_data": "ClassDef name:FilterBenchmark FunctionDef name:_benchmark arg:self arg:predicate arg:name arg:benchmark_id arguments arg arg arg arg Assign Assign Call Assign Call Call Call FunctionDef name:benchmark_simple_function arg:self arguments arg Call FunctionDef name:benchmark_return_component_optimization arg:self arguments arg Call arguments arg"
  },
  {
    "library": "scipy",
    "name": "_updated_ctor_param",
    "source_code": "def _updated_ctor_param(self):\n    dct = super()._updated_ctor_param()\n    dct['histogram'] = self._histogram\n    dct['density'] = self._density\n    return dct",
    "docstring": "Set the histogram as additional constructor argument",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_continuous_distns.py",
    "ast_data": "FunctionDef name:_updated_ctor_param arg:self arguments arg Assign Call Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "decorated",
    "source_code": "@parameterized.named_parameters(*params)\n@functools.wraps(f)\ndef decorated(self, saved_format, *args, **kwargs):\n    if saved_format == 'h5':\n        _test_h5_saved_model_format(f, self, *args, **kwargs)\n    elif saved_format == 'tf':\n        _test_tf_saved_model_format(f, self, *args, **kwargs)\n    elif saved_format == 'tf_no_traces':\n        _test_tf_saved_model_format_no_traces(f, self, *args, **kwargs)\n    else:\n        raise ValueError('Unknown model type: %s' % (saved_format,))",
    "docstring": "A run of a single test case w/ the specified model type.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\keras_parameterized.py",
    "ast_data": "FunctionDef name:decorated arg:self arg:saved_format arguments arg arg arg arg If Compare Call If Compare Call If Compare Call Raise Call Call Call"
  },
  {
    "library": "numpy",
    "name": "accumulate",
    "source_code": "def accumulate(self, target, axis=0):\n    tclass = get_masked_subclass(target)\n    t = filled(target, self.filly)\n    result = self.f.accumulate(t, axis)\n    masked_result = result.view(tclass)\n    return masked_result",
    "docstring": "Accumulate along after filling with y fill value.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:accumulate arg:self arg:target arg:axis arguments arg arg arg Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_resource",
    "source_code": "def get_resource(self, feature_column, resource_name):\n    if feature_column not in self._cols_to_resources_map or resource_name not in self._cols_to_resources_map[feature_column]:\n        raise ValueError('Resource does not exist.')\n    return self._cols_to_resources_map[feature_column][resource_name]",
    "docstring": "Returns an already created resource. Resources can be things such as tables, variables, trackables, etc. Args: feature_column: A object this variable corresponds to. resource_name: Name of the resource.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:get_resource arg:self arg:feature_column arg:resource_name arguments arg arg arg If BoolOp Compare Compare Raise Call Return return:yes"
  },
  {
    "library": "virtualenv",
    "name": "ExePathRef",
    "source_code": "class ExePathRef(PathRef, ABC):\n\n    def __init__(self, src, must=RefMust.NA, when=RefWhen.ANY) -> None:\n        super().__init__(src, must, when)\n        self._can_run = None\n\n    @property\n    def can_symlink(self):\n        if self.FS_SUPPORTS_SYMLINK:\n            return self.can_run\n        return False\n\n    @property\n    def can_run(self):\n        if self._can_run is None:\n            mode = self.src.stat().st_mode\n            for key in [S_IXUSR, S_IXGRP, S_IXOTH]:\n                if mode & key:\n                    self._can_run = True\n                break\n            else:\n                self._can_run = False\n        return self._can_run",
    "docstring": "Base class that checks if a executable can be references via symlink/copy.",
    "type": "class",
    "file_path": "virtualenv\\src\\virtualenv\\create\\via_global_ref\\builtin\\ref.py",
    "ast_data": "ClassDef name:ExePathRef FunctionDef name:__init__ arg:self arg:src arg:must arg:when arguments arg arg arg arg Call Call Assign FunctionDef name:can_symlink arg:self arguments arg If Return return:yes Return return:yes FunctionDef name:can_run arg:self arguments arg If Compare Assign Call For If Assign Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "quote_name",
    "source_code": "def quote_name(self, name):\n    raise NotImplementedError('subclasses of BaseDatabaseOperations may require a quote_name() method')",
    "docstring": "Return a quoted version of the given table, index, or column name. Do not quote the given name if it's already been quoted.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:quote_name arg:self arg:name arguments arg arg Raise Call"
  },
  {
    "library": "sphinx",
    "name": "resolve_reference",
    "source_code": "def resolve_reference(self, todo: todo_node, docname: str) -> None:\n    for node in todo.findall(addnodes.pending_xref):\n        if 'refdoc' in node:\n            node['refdoc'] = docname\n    self.document += todo\n    self.env.resolve_references(self.document, docname, self.builder)\n    self.document.remove(todo)",
    "docstring": "Resolve references in the todo content.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\ext\\todo.py",
    "ast_data": "FunctionDef name:resolve_reference arg:self arg:todo arg:docname arguments arg arg arg For Call If Compare Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_init_ready_op",
    "source_code": "def _init_ready_op(self, ready_op=USE_DEFAULT, ready_for_local_init_op=USE_DEFAULT):\n    if ready_op is Supervisor.USE_DEFAULT:\n        ready_op = self._get_first_op_from_collection(ops.GraphKeys.READY_OP)\n        if ready_op is None:\n            ready_op = variables.report_uninitialized_variables()\n            ops.add_to_collection(ops.GraphKeys.READY_OP, ready_op)\n    self._ready_op = ready_op\n    if ready_for_local_init_op is Supervisor.USE_DEFAULT:\n        ready_for_local_init_op = self._get_first_op_from_collection(ops.GraphKeys.READY_FOR_LOCAL_INIT_OP)\n    self._ready_for_local_init_op = ready_for_local_init_op",
    "docstring": "Initializes ready_op. Args: ready_op: to check if the model is initialized. If it's set to USE_DEFAULT, creates an op that checks all the variables are initialized. ready_for_local_init_op: to check if the model is ready to run local_init_op. If it's set to USE_DEFAULT, creates an op that checks all the global variables are initialized.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py",
    "ast_data": "FunctionDef name:_init_ready_op arg:self arg:ready_op arg:ready_for_local_init_op arguments arg arg arg If Compare Assign Call If Compare Assign Call Call Assign If Compare Assign Call Assign"
  },
  {
    "library": "cherrypy",
    "name": "__str__",
    "source_code": "def __str__(self):\n    return 'authorization : %s' % self.auth_header",
    "docstring": "Render an HTTP Digest Auth header as a string.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\auth_digest.py",
    "ast_data": "FunctionDef name:__str__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_optimizer",
    "source_code": "def _get_optimizer(self, optimizer):\n    if isinstance(self._dtype_policy, policy.PolicyV1):\n        loss_scale = self._dtype_policy.loss_scale\n    elif self._dtype_policy.name == 'mixed_float16':\n        loss_scale = 'dynamic'\n    else:\n        loss_scale = None\n\n    def _get_single_optimizer(opt):\n        opt = optimizers.get(opt)\n        if loss_scale is not None and (not isinstance(opt, lso.LossScaleOptimizer)):\n            if loss_scale == 'dynamic':\n                opt = lso.LossScaleOptimizer(opt)\n            else:\n                opt = lso.LossScaleOptimizerV1(opt, loss_scale)\n        return opt\n    return nest.map_structure(_get_single_optimizer, optimizer)",
    "docstring": "Wraps in if necessary.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py",
    "ast_data": "FunctionDef name:_get_optimizer arg:self arg:optimizer arguments arg arg If Call Assign If Compare Assign Assign FunctionDef name:_get_single_optimizer arg:opt arguments arg Assign Call If BoolOp Compare Call If Compare Assign Call Assign Call Return return:yes Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_logpdf",
    "source_code": "def _logpdf(self, dims, X, mean, row_prec_rt, log_det_rowcov, col_prec_rt, log_det_colcov):\n    numrows, numcols = dims\n    roll_dev = np.moveaxis(X - mean, -1, 0)\n    scale_dev = np.tensordot(col_prec_rt.T, np.dot(roll_dev, row_prec_rt), 1)\n    maha = np.sum(np.sum(np.square(scale_dev), axis=-1), axis=0)\n    return -0.5 * (numrows * numcols * _LOG_2PI + numcols * log_det_rowcov + numrows * log_det_colcov + maha)",
    "docstring": "Log of the matrix normal probability density function. Parameters ---------- dims : tuple Dimensions of the matrix variates X : ndarray Points at which to evaluate the log of the probability density function mean : ndarray Mean of the distribution row_prec_rt : ndarray A decomposition such that np.dot(row_prec_rt, row_prec_rt.T) is the inverse of the among-row covariance matrix log_det_rowcov : float Logarithm of the determinant of the among-row covariance matrix col_prec_rt : ndarray A decomposition such that np.dot(col_prec_rt, col_prec_rt.T) is the inverse of the among-column covariance matrix log_det_colcov : float Logarithm of the determinant of the among-column covariance matrix Notes ----- As this function does no argument checking, it should not be called directly; use 'logpdf' instead.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:_logpdf arg:self arg:dims arg:X arg:mean arg:row_prec_rt arg:log_det_rowcov arg:col_prec_rt arg:log_det_colcov arguments arg arg arg arg arg arg arg arg Assign Assign Call Assign Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "timeuntil",
    "source_code": "def timeuntil(d, now=None, time_strings=None, depth=2):\n    return timesince(d, now, reversed=True, time_strings=time_strings, depth=depth)",
    "docstring": "Like timesince, but return a string measuring the time until the given time.",
    "type": "function",
    "file_path": "django\\django\\utils\\timesince.py",
    "ast_data": "FunctionDef name:timeuntil arg:d arg:now arg:time_strings arg:depth arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_nvals_uniform_row_length",
    "source_code": "def _nvals_uniform_row_length(values, uniform_row_length):\n    const_nvals = tensor_shape.dimension_at_index(values.shape, 0).value\n    if const_nvals is not None:\n        nvals = constant_op.constant(const_nvals, uniform_row_length.dtype)\n    elif isinstance(values, RaggedTensor):\n        nvals = values.nrows(out_type=uniform_row_length.dtype)\n    else:\n        nvals = array_ops.shape(values, out_type=uniform_row_length.dtype)[0]\n    return nvals",
    "docstring": "Get the number of values for uniform row length constructor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:_nvals_uniform_row_length arg:values arg:uniform_row_length arguments arg arg Assign Call If Compare Assign Call If Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "_if_filename_register_autoreload",
    "source_code": "def _if_filename_register_autoreload(ob):\n    is_filename = isinstance(ob, text_or_bytes)\n    is_filename and cherrypy.engine.autoreload.files.add(ob)",
    "docstring": "Register for autoreload if ob is a string (presumed filename).",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\_cpconfig.py",
    "ast_data": "FunctionDef name:_if_filename_register_autoreload arg:ob arguments arg Assign Call BoolOp Call"
  },
  {
    "library": "authlib",
    "name": "add_to_uri",
    "source_code": "def add_to_uri(token, uri):\n    return add_params_to_uri(uri, [('access_token', token)])",
    "docstring": "Add a Bearer Token to the request URI. Not recommended, use only if client can't use authorization header or body.",
    "type": "function",
    "file_path": "authlib\\authlib\\oauth2\\rfc6750\\parameters.py",
    "ast_data": "FunctionDef name:add_to_uri arg:token arg:uri arguments arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "param_default_value",
    "source_code": "def param_default_value(p):\n    return p.name != 'self' and p.kind != p.VAR_KEYWORD and (p.kind != p.VAR_POSITIONAL) and (p.default != p.empty)",
    "docstring": "Identify hyper parameters of an estimator.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\estimator_checks.py",
    "ast_data": "FunctionDef name:param_default_value arg:p arguments arg Return return:yes BoolOp Compare Compare Compare Compare"
  },
  {
    "library": "matplotlib",
    "name": "PathPatch",
    "source_code": "class PathPatch(Patch):\n    _edge_default = True\n\n    def __str__(self):\n        s = 'PathPatch%d((%g, %g) ...)'\n        return s % (len(self._path.vertices), *tuple(self._path.vertices[0]))\n\n    @_docstring.interpd\n    def __init__(self, path, **kwargs):\n        super().__init__(**kwargs)\n        self._path = path\n\n    def get_path(self):\n        return self._path\n\n    def set_path(self, path):\n        self._path = path",
    "docstring": "A general polycurve path patch.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "ClassDef name:PathPatch Assign FunctionDef name:__str__ arg:self arguments arg Assign Return return:yes Call Call FunctionDef name:__init__ arg:self arg:path arguments arg arg arg Call Call Assign FunctionDef name:get_path arg:self arguments arg Return return:yes FunctionDef name:set_path arg:self arg:path arguments arg arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "execute_with_callbacks",
    "source_code": "def execute_with_callbacks(op_name, num_outputs, inputs, attrs, ctx, name=None):\n    tensors = quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)\n    for callback in ctx.op_callbacks:\n        callback(op_name, tuple(inputs), attrs, tensors, name)\n    return tensors",
    "docstring": "Monkey-patch to execute to enable execution callbacks.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\execute.py",
    "ast_data": "FunctionDef name:execute_with_callbacks arg:op_name arg:num_outputs arg:inputs arg:attrs arg:ctx arg:name arguments arg arg arg arg arg arg Assign Call For Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_params",
    "source_code": "def set_params(self, **kwargs):\n    _api.warn_external(\"'set_params()' not defined for locator of type \" + str(type(self)))",
    "docstring": "Do nothing, and raise a warning. Any locator class not supporting the set_params() function will call this.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:set_params arg:self arguments arg arg Call Call Call"
  },
  {
    "library": "scipy",
    "name": "minimise_pool",
    "source_code": "def minimise_pool(self, force_iter=False):\n    lres_f_min = self.minimize(self.X_min[0], ind=self.minimizer_pool[0])\n    self.trim_min_pool(0)\n    while not self.stop_l_iter:\n        self.stopping_criteria()\n        if force_iter:\n            force_iter -= 1\n            if force_iter == 0:\n                self.stop_l_iter = True\n                break\n        if np.shape(self.X_min)[0] == 0:\n            self.stop_l_iter = True\n            break\n        self.g_topograph(lres_f_min.x, self.X_min)\n        ind_xmin_l = self.Z[:, -1]\n        lres_f_min = self.minimize(self.Ss[-1, :], self.minimizer_pool[-1])\n        self.trim_min_pool(ind_xmin_l)\n    self.stop_l_iter = False\n    return",
    "docstring": "This processing method can optionally minimise only the best candidate solutions in the minimiser pool Parameters ---------- force_iter : int Number of starting minimizers to process (can be specified globally or locally)",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_shgo.py",
    "ast_data": "FunctionDef name:minimise_pool arg:self arg:force_iter arguments arg arg Assign Call Call While Call If If Compare Assign If Compare Call Assign Call Assign Assign Call Call Assign Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "_ReadVariable",
    "source_code": "class _ReadVariable(_Node):\n\n    def convert_variable_to_constant(self, incoming_edge, tensor_data):\n        node = self.converted_self().node\n        node.Clear()\n        node.name = self._node.name\n        node.op = 'Identity'\n        node.input.append(self._node.input[0])\n        node.attr['T'].CopyFrom(self._node.attr['dtype'])\n        if '_class' in self._node.attr:\n            node.attr['_class'].CopyFrom(self._node.attr['_class'])\n        if self._function is not None:\n            for edge in self.outgoing_edges:\n                index = edge.destination.index\n                dest = edge.destination.convertible.converted_self()\n                if isinstance(dest, _Node):\n                    input_name_parts = dest.node.input[index].split(':')\n                    if len(input_name_parts) > 1 and input_name_parts[1] == 'value':\n                        input_name_parts[1] = 'output'\n                        dest.node.input[index] = ':'.join(input_name_parts)",
    "docstring": "Specialization of _Node to ReadVariableOp.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "ClassDef name:_ReadVariable FunctionDef name:convert_variable_to_constant arg:self arg:incoming_edge arg:tensor_data arguments arg arg arg Assign Call Call Assign Assign Call Call If Compare Call If Compare For Assign Assign Call If Call Assign Call If BoolOp Compare Call Compare Assign Assign Call"
  },
  {
    "library": "scipy",
    "name": "_statistic_dunnett",
    "source_code": "def _statistic_dunnett(samples: list[np.ndarray], control: np.ndarray, df: int, n_samples: np.ndarray, n_control: int) -> tuple[np.ndarray, float, np.ndarray, np.ndarray]:\n    mean_control = np.mean(control)\n    mean_samples = np.array([np.mean(sample) for sample in samples])\n    all_samples = [control] + samples\n    all_means = np.concatenate([[mean_control], mean_samples])\n    s2 = np.sum([_var(sample, mean=mean) * sample.size for sample, mean in zip(all_samples, all_means)]) / df\n    std = np.sqrt(s2)\n    z = (mean_samples - mean_control) / np.sqrt(1 / n_samples + 1 / n_control)\n    return (z / std, std, mean_control, mean_samples)",
    "docstring": "Statistic of Dunnett's test. Computation based on the original single-step test from [1].",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_multicomp.py",
    "ast_data": "FunctionDef name:_statistic_dunnett arg:samples arg:control arg:df arg:n_samples arg:n_control arguments arg arg arg arg arg Assign Call Assign Call Call Assign Assign Call Assign Call Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "triton_store_type",
    "source_code": "def triton_store_type(dtype: torch.dtype) -> str:\n    if dtype == torch.bool:\n        dtype = torch.int8\n    return triton_type(dtype)",
    "docstring": "Convert torch.dtype to triton type, with fix for storing tl.bool",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py",
    "ast_data": "FunctionDef name:triton_store_type arg:dtype arguments arg If Compare Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "is_saving_non_distributed",
    "source_code": "def is_saving_non_distributed():\n    if not save_context.in_save_context():\n        return False\n    options = save_context.get_save_options()\n    return options.experimental_variable_policy != save_options.VariablePolicy.EXPAND_DISTRIBUTED_VARIABLES",
    "docstring": "Returns whether we're saving a non-distributed version of the model. It returns True iff we are in saving context and are saving a non-distributed version of the model. That is, SaveOptions.experimental_variable_policy is NONE. Returns: A boolean.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values_util.py",
    "ast_data": "FunctionDef name:is_saving_non_distributed arguments If Call Return return:yes Assign Call Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "_identify_gradient_grad_ref",
    "source_code": "@ops.RegisterGradient('DebugGradientRefIdentity')\ndef _identify_gradient_grad_ref(op, dy):\n    return _identify_gradient_grad(op, dy)",
    "docstring": "Gradient function for the DebugIdentity op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_gradients.py",
    "ast_data": "FunctionDef name:_identify_gradient_grad_ref arg:op arg:dy arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "validate_integer",
    "source_code": "def validate_integer(name: str, val: int | float | None, min_val: int=0) -> int | None:\n    if val is None:\n        return val\n    msg = f\"'{name:s}' must be an integer >={min_val:d}\"\n    if is_float(val):\n        if int(val) != val:\n            raise ValueError(msg)\n        val = int(val)\n    elif not (is_integer(val) and val >= min_val):\n        raise ValueError(msg)\n    return int(val)",
    "docstring": "Checks whether the 'name' parameter for parsing is either an integer OR float that can SAFELY be cast to an integer without losing accuracy. Raises a ValueError if that is not the case. Parameters ---------- name : str Parameter name (used for error reporting) val : int or float The value to check min_val : int Minimum allowed value (val < min_val will result in a ValueError)",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\parsers\\readers.py",
    "ast_data": "FunctionDef name:validate_integer arg:name arg:val arg:min_val arguments arg arg arg If Compare Return return:yes Assign If Call If Compare Call Raise Call Assign Call If BoolOp Call Compare Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "num_records_produced",
    "source_code": "def num_records_produced(self, name=None):\n    if self._reader_ref.dtype == dtypes.resource:\n        return gen_io_ops.reader_num_records_produced_v2(self._reader_ref, name=name)\n    else:\n        return gen_io_ops.reader_num_records_produced(self._reader_ref, name=name)",
    "docstring": "Returns the number of records this reader has produced. This is the same as the number of Read executions that have succeeded. Args: name: A name for the operation (optional). Returns: An int64 Tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\io_ops.py",
    "ast_data": "FunctionDef name:num_records_produced arg:self arg:name arguments arg arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_format_term",
    "source_code": "def _format_term(self, scalar_format: Callable, off: float, scale: float):\n    if off == 0 and scale == 1:\n        term = self.symbol\n        needs_parens = False\n    elif scale == 1:\n        term = f'{scalar_format(off)} + {self.symbol}'\n        needs_parens = True\n    elif off == 0:\n        term = f'{scalar_format(scale)}{self.symbol}'\n        needs_parens = True\n    else:\n        term = f'{scalar_format(off)} + {scalar_format(scale)}{self.symbol}'\n        needs_parens = True\n    return (term, needs_parens)",
    "docstring": "Format a single term in the expansion",
    "type": "method",
    "file_path": "numpy\\numpy\\polynomial\\_polybase.py",
    "ast_data": "FunctionDef name:_format_term arg:self arg:scalar_format arg:off arg:scale arguments arg arg arg arg If BoolOp Compare Compare Assign Assign If Compare Assign Call Assign If Compare Assign Call Assign Assign Call Call Assign Return return:yes"
  },
  {
    "library": "authlib",
    "name": "fetch_request_token",
    "source_code": "def fetch_request_token(self, url, **kwargs):\n    return self._fetch_token(url, **kwargs)",
    "docstring": "Method for fetching an access token from the token endpoint. This is the first step in the OAuth 1 workflow. A request token is obtained by making a signed post request to url. The token is then parsed from the application/x-www-form-urlencoded response and ready to be used to construct an authorization url. :param url: Request Token endpoint. :param kwargs: Extra parameters to include for fetching token. :return: A Request Token dict.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth1\\client.py",
    "ast_data": "FunctionDef name:fetch_request_token arg:self arg:url arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "extract_original_messages",
    "source_code": "def extract_original_messages(self) -> Sequence[str]:\n    raise NotImplementedError",
    "docstring": "Extract translation messages. :returns: list of extracted messages or messages generator",
    "type": "method",
    "file_path": "sphinx\\sphinx\\addnodes.py",
    "ast_data": "FunctionDef name:extract_original_messages arg:self arguments arg Raise"
  },
  {
    "library": "django",
    "name": "U",
    "source_code": "def U(self):\n    value = self.data\n    if not isinstance(value, datetime):\n        value = datetime.combine(value, time.min)\n    return int(value.timestamp())",
    "docstring": "Seconds since the Unix epoch (January 1 1970 00:00:00 GMT)",
    "type": "method",
    "file_path": "django\\django\\utils\\dateformat.py",
    "ast_data": "FunctionDef name:U arg:self arguments arg Assign If Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "desc_signature",
    "source_code": "class desc_signature(_desc_classes_injector, nodes.Part, nodes.Inline, nodes.TextElement):\n    classes = ['sig', 'sig-object']\n\n    @property\n    def child_text_separator(self) -> str:\n        if self.get('is_multiline'):\n            return ' '\n        else:\n            return super().child_text_separator",
    "docstring": "Node for a single object signature. As default the signature is a single-line signature. Set `desc_signature_line`, and the domain it belongs to.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\addnodes.py",
    "ast_data": "ClassDef name:desc_signature Assign FunctionDef name:child_text_separator arg:self arguments arg If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_full_name",
    "source_code": "def get_full_name(var):\n    if not (isinstance(var, variables.Variable) or resource_variable_ops.is_resource_variable(var)):\n        return ''\n    if getattr(var, '_save_slice_info', None) is not None:\n        return var._save_slice_info.full_name\n    else:\n        return var._shared_name",
    "docstring": "Gets the full name of variable for name-based checkpoint compatibility.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\util.py",
    "ast_data": "FunctionDef name:get_full_name arg:var arguments arg If BoolOp Call Call Return return:yes If Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, job=None, replica=None, task=None, device_type=None, device_index=None):\n    self._job = _as_str_or_none(job)\n    self._replica = _as_int_or_none(replica)\n    self._task = _as_int_or_none(task)\n    self._device_type = _as_device_str_or_none(device_type)\n    self._device_index = _as_int_or_none(device_index)\n    self._as_string = self._components_to_string(job=self._job, replica=self._replica, task=self._task, device_type=self._device_type, device_index=self._device_index)\n    self._hash = hash(self.to_string())",
    "docstring": "Create a new object. Args: job: string. Optional job name. replica: int. Optional replica index. task: int. Optional task index. device_type: Optional device type string (e.g. \"CPU\" or \"GPU\") device_index: int. Optional device index. If left unspecified, device represents 'any' device_index.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\device_spec.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:job arg:replica arg:task arg:device_type arg:device_index arguments arg arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_rank",
    "source_code": "def _rank(t):\n    rank = t.get_shape().rank if isinstance(t, tensor_lib.Tensor) else None\n    return array_ops.rank(t) if rank is None else rank",
    "docstring": "Returns rank as an integer (when statically known) or as a tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "FunctionDef name:_rank arg:t arguments arg Assign Call Call Return return:yes Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "save",
    "source_code": "def save(self, as_text=False, experimental_image_format=False, experimental_image_writer_options=None):\n    metrics.IncrementWriteApi(_SAVE_BUILDER_LABEL)\n    if not file_io.file_exists(self._export_dir):\n        file_io.recursive_create_dir(self._export_dir)\n    if as_text:\n        path = file_io.join(compat.as_bytes(self._export_dir), compat.as_bytes(constants.SAVED_MODEL_FILENAME_PBTXT))\n        file_io.write_string_to_file(path, str(self._saved_model))\n    elif experimental_image_format:\n        path = file_io.join(self._export_dir, constants.SAVED_MODEL_FILENAME_PREFIX)\n        if locals().get('proto_splitter', globals().get('proto_splitter')) is None:\n            raise RuntimeError('No proto_splitter is provided, cannot use experimental_image_format.')\n        path = proto_splitter.SavedModelSplitter(self._saved_model).write(path, experimental_image_writer_options)\n    else:\n        path = file_io.join(compat.as_bytes(self._export_dir), compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB))\n        file_io.write_string_to_file(path, self._saved_model.SerializeToString(deterministic=True))\n    tf_logging.info('SavedModel written to: %s', compat.as_text(path))\n    metrics.IncrementWrite(write_version='1')\n    return path",
    "docstring": "Writes a protocol buffer to disk. The function writes the SavedModel protocol buffer to the export directory in a serialized format. Args: as_text: Writes the SavedModel protocol buffer in text format to disk. Protocol buffers in text format are useful for debugging, but parsing fails when it encounters an unknown field and so is not forward compatible. This means changes to TensorFlow may prevent deployment of new text format SavedModels to existing serving binaries. Do not deploy SavedModels to production. experimental_image_format: Writes the SavedModel protobuf in the experimental image format. See for more details. This allows to save models larger than 2 GiB. experimental_image_writer_options: Optional options for the experimental image writer. See for available options. Raises: RuntimeError: When trying to use but is not imported. This check is here because is not available in OSS at the moment. Returns: The path to which the SavedModel protocol buffer was written.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\builder_impl.py",
    "ast_data": "FunctionDef name:save arg:self arg:as_text arg:experimental_image_format arg:experimental_image_writer_options arguments arg arg arg arg Call If Call Call If Assign Call Call Call Call Call If Assign Call If Compare Call Call Call Call Raise Call Assign Call Call Assign Call Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "truncdiv",
    "source_code": "def truncdiv(self, x0: T, x1: T) -> T:\n    raise NotImplementedError",
    "docstring": "C-style trunc division between integers only. Computes the true division of two numbers and rounds the result to zero.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ops_handler.py",
    "ast_data": "FunctionDef name:truncdiv arg:self arg:x0 arg:x1 arguments arg arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "_UnaryMapValueDispatcher",
    "source_code": "class _UnaryMapValueDispatcher(dispatch.OpDispatcher):\n\n    def __init__(self, original_func):\n        self._original_func = original_func\n        func_name = get_canonical_name_for_symbol(original_func)\n        arg_names = tf_inspect.getfullargspec(original_func)[0]\n        self._x = arg_names[0]\n        original_func.__doc__ = original_func.__doc__.rstrip() + '\\n\\n' + '    If `{x}` is a `SparseTensor`, returns\\n    `SparseTensor({x}.indices, tf.{func}({x}.values, ...), {x}.dense_shape)`'.format(x=self._x, func=func_name)\n\n    def handle(self, args, kwargs):\n        if args:\n            x, args = (args[0], args[1:])\n        else:\n            kwargs = kwargs.copy()\n            x = kwargs.pop(self._x, None)\n        if isinstance(x, sparse_tensor.SparseTensor):\n            return sparse_tensor.SparseTensor(indices=x.indices, values=self._original_func(x.values, *args, **kwargs), dense_shape=x.dense_shape)\n        else:\n            return self.NOT_SUPPORTED",
    "docstring": "OpDispatcher for unary ops that maps base function across sparse values.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_ops.py",
    "ast_data": "ClassDef name:_UnaryMapValueDispatcher FunctionDef name:__init__ arg:self arg:original_func arguments arg arg Assign Assign Call Assign Call Assign Assign Call Call FunctionDef name:handle arg:self arg:args arg:kwargs arguments arg arg arg If Assign Assign Call Assign Call If Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "adamax",
    "source_code": "@_disable_dynamo_if_unsupported(single_tensor_fn=_single_tensor_adamax)\ndef adamax(params: list[Tensor], grads: list[Tensor], exp_avgs: list[Tensor], exp_infs: list[Tensor], state_steps: list[Tensor], foreach: Optional[bool]=None, maximize: bool=False, differentiable: bool=False, capturable: bool=False, has_complex: bool=False, *, eps: float, beta1: float, beta2: float, lr: float, weight_decay: float):\n    if not torch.compiler.is_compiling() and (not all((isinstance(t, torch.Tensor) for t in state_steps))):\n        raise RuntimeError('API has changed, `state_steps` argument must contain a list of singleton tensors')\n    if foreach is None:\n        _, foreach = _default_to_fused_or_foreach(params, differentiable, use_fused=False)\n    if foreach and torch.jit.is_scripting():\n        raise RuntimeError('torch.jit.script not supported with foreach optimizers')\n    if foreach and (not torch.jit.is_scripting()):\n        func = _multi_tensor_adamax\n    else:\n        func = _single_tensor_adamax\n    func(params, grads, exp_avgs, exp_infs, state_steps, eps=eps, beta1=beta1, beta2=beta2, lr=lr, weight_decay=weight_decay, maximize=maximize, differentiable=differentiable, has_complex=has_complex, capturable=capturable)",
    "docstring": "Functional API that performs adamax algorithm computation. See :class: for details.",
    "type": "function",
    "file_path": "pytorch\\torch\\optim\\adamax.py",
    "ast_data": "FunctionDef name:adamax arg:params arg:grads arg:exp_avgs arg:exp_infs arg:state_steps arg:foreach arg:maximize arg:differentiable arg:capturable arg:has_complex arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg If BoolOp Call Call Call Raise Call If Compare Assign Call If BoolOp Call Raise Call If BoolOp Call Assign Assign Call Call"
  },
  {
    "library": "django",
    "name": "sql_flush",
    "source_code": "def sql_flush(self, style, tables, *, reset_sequences=False, allow_cascade=False):\n    raise NotImplementedError('subclasses of BaseDatabaseOperations must provide an sql_flush() method')",
    "docstring": "Return a list of SQL statements required to remove all data from the given database tables (without actually removing the tables themselves). The argument is a Style object as returned by either color_style() or no_style() in django.core.management.color. If is True, the list includes SQL statements required to reset the sequences. The argument determines whether truncation may cascade to tables with foreign keys pointing the tables being truncated. PostgreSQL requires a cascade even if these tables are empty.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:sql_flush arg:self arg:style arg:tables arguments arg arg arg arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "on_epoch_begin",
    "source_code": "def on_epoch_begin(self, epoch, logs=None):\n    logs = self._process_logs(logs)\n    for callback in self.callbacks:\n        callback.on_epoch_begin(epoch, logs)",
    "docstring": "Calls the methods of its callbacks. This function should only be called during TRAIN mode. Args: epoch: Integer, index of epoch. logs: Dict. Currently no data is passed to this argument for this method but that may change in the future.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:on_epoch_begin arg:self arg:epoch arg:logs arguments arg arg arg Assign Call For Call"
  },
  {
    "library": "tensorflow",
    "name": "partition_or_replicate_on_host",
    "source_code": "def partition_or_replicate_on_host(tensor, dims):\n    if dims is None:\n        return itertools.repeat(tensor)\n    dims = np.array(dims)\n    output = [tensor]\n    shape_list = np.array(tensor.shape.as_list())\n    quotients, remainders = np.divmod(shape_list, dims)\n    for axis, (quotient, remainder, dim, original_size) in enumerate(zip(quotients, remainders, dims, shape_list)):\n        if dim <= 1:\n            continue\n        if remainder > 0:\n            ceil_ratio = quotient + 1\n            num_full_slots, left_over = np.divmod(original_size, ceil_ratio)\n            num_or_size_splits = [ceil_ratio] * num_full_slots + [left_over]\n            if len(num_or_size_splits) < dim:\n                num_or_size_splits += [0] * (dim - len(num_or_size_splits))\n            new_output = []\n            for x in output:\n                new_output.append(array_ops.split(x, num_or_size_splits=num_or_size_splits, axis=axis))\n            output = new_output\n        else:\n            output = [array_ops.split(x, int(dim), axis=axis) for x in output]\n        output = nest.flatten(output)\n    return output",
    "docstring": "Partitions or replicates the input tensor. The ops inside this function are placed on the host side. Args: tensor: The input tensor which will be partitioned or replicated. dims: A list of integer describes how to partition the input tensor. Returns: An iterator of s or a list of partitioned tensors.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_feed.py",
    "ast_data": "FunctionDef name:partition_or_replicate_on_host arg:tensor arg:dims arguments arg arg If Compare Return return:yes Call Assign Call Assign Assign Call Call Assign Call For Call Call If Compare If Compare Assign Assign Call Assign If Compare Call Call Assign For Call Call Assign Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "modelform_factory",
    "source_code": "def modelform_factory(model, form=ModelForm, fields=None, exclude=None, formfield_callback=None, widgets=None, localized_fields=None, labels=None, help_texts=None, error_messages=None, field_classes=None):\n    attrs = {'model': model}\n    if fields is not None:\n        attrs['fields'] = fields\n    if exclude is not None:\n        attrs['exclude'] = exclude\n    if widgets is not None:\n        attrs['widgets'] = widgets\n    if localized_fields is not None:\n        attrs['localized_fields'] = localized_fields\n    if labels is not None:\n        attrs['labels'] = labels\n    if help_texts is not None:\n        attrs['help_texts'] = help_texts\n    if error_messages is not None:\n        attrs['error_messages'] = error_messages\n    if field_classes is not None:\n        attrs['field_classes'] = field_classes\n    bases = (form.Meta,) if hasattr(form, 'Meta') else ()\n    Meta = type('Meta', bases, attrs)\n    if formfield_callback:\n        Meta.formfield_callback = staticmethod(formfield_callback)\n    class_name = model.__name__ + 'Form'\n    form_class_attrs = {'Meta': Meta}\n    if getattr(Meta, 'fields', None) is None and getattr(Meta, 'exclude', None) is None:\n        raise ImproperlyConfigured(\"Calling modelform_factory without defining 'fields' or 'exclude' explicitly is prohibited.\")\n    return type(form)(class_name, (form,), form_class_attrs)",
    "docstring": "Return a ModelForm containing form fields for the given model. You can optionally pass a argument to use as a starting point for constructing the ModelForm. `` is a dictionary of model field names mapped to a form field class.",
    "type": "function",
    "file_path": "django\\django\\forms\\models.py",
    "ast_data": "FunctionDef name:modelform_factory arg:model arg:form arg:fields arg:exclude arg:formfield_callback arg:widgets arg:localized_fields arg:labels arg:help_texts arg:error_messages arg:field_classes arguments arg arg arg arg arg arg arg arg arg arg arg Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign Assign Call Assign Call If Assign Call Assign Assign If BoolOp Compare Call Compare Call Raise Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_param_to_fqns",
    "source_code": "def _get_param_to_fqns(model: torch.nn.Module, dedup_shared_params: bool=True) -> dict[nn.Parameter, list[str]]:\n\n    def module_fn(module, prefix, tree_level, param_to_fqns):\n        for param_name, param in _named_parameters_with_duplicates(module, recurse=False):\n            local_fqns = param._fqns if isinstance(param, flat_param_file.FlatParameter) else [param_name]\n            global_fqns = [clean_tensor_name(prefix + name) for name in local_fqns]\n            is_shared_param = param in param_to_fqns\n            if not is_shared_param:\n                param_to_fqns[param] = global_fqns\n            elif isinstance(param, flat_param_file.FlatParameter):\n                warnings.warn('FlatParameter is being traversed more than once. This case should only happen when using DistributedModelParallel with FullyShardedDataParallel.')\n                param_to_fqns[param] = global_fqns\n            elif not dedup_shared_params:\n                param_to_fqns[param].extend(global_fqns)\n\n    def return_fn(param_to_fqns):\n        return param_to_fqns\n    param_to_unflat_param_names: dict[torch.nn.Parameter, list[str]] = {}\n    return _apply_to_modules(model, module_fn, return_fn, [key for key, _ in _named_parameters_with_duplicates(model)], param_to_unflat_param_names)",
    "docstring": "Constructs a mapping from parameter to a list of its \"canonical\" FQNs. Here, we use canonical to mean the fully-qualified name assigned to the parameter based on its position in the original nn.Module hierarchy before any wrapper or parallelism has been applied to it. This is in contrast to FQNs that may be generated after parallelisms or wrappers have been applied to the model. Each normal parameter maps to a singleton list containing its FQN, while each `FullyShardedDataParallel`)",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_common_utils.py",
    "ast_data": "FunctionDef name:_get_param_to_fqns arg:model arg:dedup_shared_params arguments arg arg FunctionDef name:module_fn arg:module arg:prefix arg:tree_level arg:param_to_fqns arguments arg arg arg arg For Call Assign Call Assign Call Assign Compare If Assign If Call Call Assign If Call FunctionDef name:return_fn arg:param_to_fqns arguments arg Return return:yes Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "setdefault",
    "source_code": "def setdefault(self, key, value):\n    self.headers.setdefault(key, value)",
    "docstring": "Set a header unless it has already been set.",
    "type": "method",
    "file_path": "django\\django\\http\\response.py",
    "ast_data": "FunctionDef name:setdefault arg:self arg:key arg:value arguments arg arg arg Call"
  },
  {
    "library": "scikit-learn",
    "name": "score",
    "source_code": "def score(self, X, y, sample_weight=None):\n    return accuracy_score(y, self.predict(X), sample_weight=sample_weight)",
    "docstring": "Return the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters ---------- X : array-like of shape (n_samples, n_features) Test samples. y : array-like of shape (n_samples,) or (n_samples, n_outputs) True labels for X. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- score : float Mean accuracy of self.predict(X) w.r.t. y.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\covariance\\_elliptic_envelope.py",
    "ast_data": "FunctionDef name:score arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "add_update_fields",
    "source_code": "def add_update_fields(self, values_seq):\n    for field, model, val in values_seq:\n        if field.generated:\n            continue\n        if hasattr(val, 'resolve_expression'):\n            val = val.resolve_expression(self, allow_joins=False, for_save=True)\n        self.values.append((field, model, val))",
    "docstring": "Append a sequence of (field, model, value) triples to the internal list that will be used to generate the UPDATE query. Might be more usefully called add_update_targets() to hint at the extra information here.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\subqueries.py",
    "ast_data": "FunctionDef name:add_update_fields arg:self arg:values_seq arguments arg arg For If If Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "convert",
    "source_code": "@_export_metrics\ndef convert(self):\n    saved_model_convert_result = self._convert_as_saved_model()\n    if saved_model_convert_result:\n        return saved_model_convert_result\n    return super(TFLiteKerasModelConverter, self).convert()",
    "docstring": "Converts a Keras model based on instance variables. Returns: The converted data in serialized format, either a TFLite Flatbuffer or a Graphviz graph depending on value in . Raises: ValueError: Input shape is not specified. None value for dimension in input_tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "FunctionDef name:convert arg:self arguments arg Assign Call If Return return:yes Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "delete_submodule",
    "source_code": "@compatibility(is_backward_compatible=True)\ndef delete_submodule(self, target: str) -> bool:\n    atoms = target.split('.')\n    path, target_submod = (atoms[:-1], atoms[-1])\n    mod: torch.nn.Module = self\n    for item in path:\n        if not hasattr(mod, item):\n            return False\n        mod = getattr(mod, item)\n        if not isinstance(mod, torch.nn.Module):\n            return False\n    if not hasattr(mod, target_submod):\n        return False\n    if not isinstance(getattr(mod, target_submod), torch.nn.Module):\n        return False\n    delattr(mod, target_submod)\n    return True",
    "docstring": "Deletes the given submodule from `` was not a valid reference to a submodule.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\graph_module.py",
    "ast_data": "FunctionDef name:delete_submodule arg:self arg:target arguments arg arg Assign Call Assign For If Call Return return:yes Assign Call If Call Return return:yes If Call Return return:yes If Call Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_recursive_printoption",
    "source_code": "def _recursive_printoption(result, mask, printopt):\n    names = result.dtype.names\n    if names is not None:\n        for name in names:\n            curdata = result[name]\n            curmask = mask[name]\n            _recursive_printoption(curdata, curmask, printopt)\n    else:\n        np.copyto(result, printopt, where=mask)",
    "docstring": "Puts printoptions in result where mask is True. Private function allowing for recursion",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:_recursive_printoption arg:result arg:mask arg:printopt arguments arg arg arg Assign If Compare For Assign Assign Call Call"
  },
  {
    "library": "pandas",
    "name": "null_count",
    "source_code": "@cache_readonly\ndef null_count(self) -> int:\n    return self._col.isna().sum().item()",
    "docstring": "Number of null elements. Should always be known.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\interchange\\column.py",
    "ast_data": "FunctionDef name:null_count arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "optimized_execution",
    "source_code": "@contextlib.contextmanager\ndef optimized_execution(should_optimize):\n    stored_flag = torch._C._get_graph_executor_optimize()\n    torch._C._set_graph_executor_optimize(should_optimize)\n    try:\n        yield\n    finally:\n        torch._C._set_graph_executor_optimize(stored_flag)",
    "docstring": "Context manager that controls whether the JIT's executor will run optimizations before executing a function.",
    "type": "function",
    "file_path": "pytorch\\torch\\jit\\_fuser.py",
    "ast_data": "FunctionDef name:optimized_execution arg:should_optimize arguments arg Assign Call Call Try Call"
  },
  {
    "library": "matplotlib",
    "name": "getp",
    "source_code": "def getp(obj, property=None):\n    if property is None:\n        insp = ArtistInspector(obj)\n        ret = insp.pprint_getters()\n        print('\\n'.join(ret))\n        return\n    return getattr(obj, 'get_' + property)()",
    "docstring": "Return the value of an 's *property*, or print all of them. Parameters ---------- obj : The queried artist; e.g., a , a , or an . property : str or None, default: None If *property* is 'somename', this function returns ``. If it's None (or unset), it *prints* all gettable properties from *obj*. Many properties have aliases for shorter typing, e.g. 'lw' is an alias for 'linewidth'. In the output, aliases and full property names will be listed as: property or alias = value e.g.: linewidth or lw = 2 See Also -------- setp",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:getp arg:obj arg:property arguments arg arg If Compare Assign Call Assign Call Call Call Return return:no Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "never_cache",
    "source_code": "def never_cache(view_func):\n    if iscoroutinefunction(view_func):\n\n        async def _view_wrapper(request, *args, **kwargs):\n            _check_request(request, 'never_cache')\n            response = await view_func(request, *args, **kwargs)\n            add_never_cache_headers(response)\n            return response\n    else:\n\n        def _view_wrapper(request, *args, **kwargs):\n            _check_request(request, 'never_cache')\n            response = view_func(request, *args, **kwargs)\n            add_never_cache_headers(response)\n            return response\n    return wraps(view_func)(_view_wrapper)",
    "docstring": "Decorator that adds headers to a response so that it will never be cached.",
    "type": "function",
    "file_path": "django\\django\\views\\decorators\\cache.py",
    "ast_data": "FunctionDef name:never_cache arg:view_func arguments arg If Call AsyncFunctionDef name:_view_wrapper arg:request arguments arg arg arg Call Assign Call Call Return return:yes FunctionDef name:_view_wrapper arg:request arguments arg arg arg Call Assign Call Call Return return:yes Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_is_nested_tuple_indexer",
    "source_code": "@final\ndef _is_nested_tuple_indexer(self, tup: tuple) -> bool:\n    if any((isinstance(ax, MultiIndex) for ax in self.obj.axes)):\n        return any((is_nested_tuple(tup, ax) for ax in self.obj.axes))\n    return False",
    "docstring": "Returns ------- bool",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexing.py",
    "ast_data": "FunctionDef name:_is_nested_tuple_indexer arg:self arg:tup arguments arg arg If Call Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "bracket_pairs",
    "source_code": "def bracket_pairs(tokens: Sequence[TokenInfo]) -> dict[int, int]:\n    braces: dict[int, int] = {}\n    stack: list[int] = []\n    for i, t in enumerate(tokens):\n        if t.type == token.OP:\n            if t.string in BRACKETS:\n                stack.append(i)\n            elif (inv := BRACKETS_INV.get(t.string)):\n                if not stack:\n                    raise ParseError(t, 'Never opened')\n                begin = stack.pop()\n                if not (stack and stack[-1] == FSTRING_START):\n                    braces[begin] = i\n                b = tokens[begin].string\n                if b != inv:\n                    raise ParseError(t, f\"Mismatched braces '{b}' at {begin}\")\n        elif t.type == FSTRING_START:\n            stack.append(FSTRING_START)\n        elif t.type == FSTRING_END:\n            if stack.pop() != FSTRING_START:\n                raise ParseError(t, 'Mismatched FSTRING_START/FSTRING_END')\n    if stack:\n        raise ParseError(t, 'Left open')\n    return braces",
    "docstring": "Returns a dictionary mapping opening to closing brackets",
    "type": "function",
    "file_path": "pytorch\\tools\\linter\\adapters\\_linter.py",
    "ast_data": "FunctionDef name:bracket_pairs arg:tokens arguments arg For Call If Compare If Compare Call If Call If Raise Call Assign Call If BoolOp Compare Assign Assign If Compare Raise Call If Compare Call If Compare If Compare Call Raise Call If Raise Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "rmatvec",
    "source_code": "def rmatvec(self, x):\n    x = np.asanyarray(x)\n    M, N = self.shape\n    if x.shape != (M,) and x.shape != (M, 1):\n        raise ValueError('dimension mismatch')\n    y = self._rmatvec(x)\n    if isinstance(x, np.matrix):\n        y = asmatrix(y)\n    else:\n        y = np.asarray(y)\n    if x.ndim == 1:\n        y = y.reshape(N)\n    elif x.ndim == 2:\n        y = y.reshape(N, 1)\n    else:\n        raise ValueError('invalid shape returned by user-defined rmatvec()')\n    return y",
    "docstring": "Adjoint matrix-vector multiplication. Performs the operation y = A^H @ x where A is an MxN linear operator and x is a column vector or 1-d array. Parameters ---------- x : {matrix, ndarray} An array with shape (M,) or (M,1). Returns ------- y : {matrix, ndarray} A matrix or ndarray with shape (N,) or (N,1) depending on the type and shape of the x argument. Notes ----- This rmatvec wraps the user-specified rmatvec routine or overridden _rmatvec method to ensure that y has the correct shape and type.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_interface.py",
    "ast_data": "FunctionDef name:rmatvec arg:self arg:x arguments arg arg Assign Call Assign If BoolOp Compare Compare Raise Call Assign Call If Call Assign Call Assign Call If Compare Assign Call If Compare Assign Call Raise Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "Hidden",
    "source_code": "class Hidden:\n\n    def __init__(self, constraint):\n        self.constraint = constraint",
    "docstring": "Class encapsulating a constraint not meant to be exposed to the user. Parameters ---------- constraint : str or _Constraint instance The constraint to be used internally.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\utils\\_param_validation.py",
    "ast_data": "ClassDef name:Hidden FunctionDef name:__init__ arg:self arg:constraint arguments arg arg Assign"
  },
  {
    "library": "pandas",
    "name": "dropna",
    "source_code": "def dropna(self) -> Self:\n    return type(self)(pc.drop_null(self._pa_array))",
    "docstring": "Return ArrowExtensionArray without NA values. Returns ------- ArrowExtensionArray",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\arrow\\array.py",
    "ast_data": "FunctionDef name:dropna arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_alignments",
    "source_code": "@functools.lru_cache(32)\ndef get_alignments(torch_dtype: torch.dtype) -> list[int]:\n    if torch_dtype in (torch.half, torch.bfloat16):\n        return [8, 4, 2, 1]\n    elif torch_dtype == torch.float:\n        return [4, 2, 1]\n    elif torch_dtype in (torch.uint8, torch.int8, torch.float8_e4m3fn):\n        return [16, 8, 4, 2]\n    elif torch_dtype == torch.int32:\n        return [4, 2, 1]\n    else:\n        raise NotImplementedError(f'unsupported torch_dtype={torch_dtype!r} for alignments')",
    "docstring": "Returns all possible valid CUTLASS alignments in terms of the number of elements for a given dtype. CUTLASS gemm / conv SM80 APIs support 16 bytes max alignment, and 2 bytes min alignment.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\cutlass_utils.py",
    "ast_data": "FunctionDef name:get_alignments arg:torch_dtype arguments arg If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "create_proxy_api_files",
    "source_code": "def create_proxy_api_files(output_files, proxy_module_root, output_dir):\n    for file_path in output_files:\n        module = get_module(os.path.dirname(file_path), output_dir)\n        if not os.path.isdir(os.path.dirname(file_path)):\n            os.makedirs(os.path.dirname(file_path))\n        contents = f'from {proxy_module_root}.{module} import *'\n        with open(file_path, 'w') as fp:\n            fp.write(contents)",
    "docstring": "Creates __init__.py files in proxy format for the Python API. Args: output_files: List of __init__.py file paths to create. proxy_module_root: Module root for proxy-import format. If specified, proxy files with content like will be created to enable import resolution under TensorFlow. output_dir: output API root directory.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\api\\generator\\create_python_api.py",
    "ast_data": "FunctionDef name:create_proxy_api_files arg:output_files arg:proxy_module_root arg:output_dir arguments arg arg arg For Assign Call Call If Call Call Call Call Assign With Call Call"
  },
  {
    "library": "tensorflow",
    "name": "serialize",
    "source_code": "def serialize(loss):\n    return serialize_keras_object(loss)",
    "docstring": "Serializes loss function or instance. Args: loss: A Keras instance or a loss function. Returns: Loss configuration dictionary.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py",
    "ast_data": "FunctionDef name:serialize arg:loss arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "value_dtype",
    "source_code": "@property\ndef value_dtype(self):\n    return self._value_dtype",
    "docstring": "The table value dtype.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "FunctionDef name:value_dtype arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_modify_model_output_type",
    "source_code": "def _modify_model_output_type(model, inference_output_type=dtypes.float32):\n    if inference_output_type == dtypes.float32:\n        return\n    if not model.signatureDefs:\n        _modify_model_output_type_per_subgraph(model, 0, -1, inference_output_type)\n        return\n    for signature_index, signature_def in enumerate(model.signatureDefs):\n        _modify_model_output_type_per_subgraph(model, signature_def.subgraphIndex, signature_index, inference_output_type)",
    "docstring": "Modify model output type.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\util.py",
    "ast_data": "FunctionDef name:_modify_model_output_type arg:model arg:inference_output_type arguments arg arg If Compare Return return:no If Call Return return:no For Call Call"
  },
  {
    "library": "pytorch",
    "name": "_duplicate_dequantize_node",
    "source_code": "def _duplicate_dequantize_node(m: GraphModule):\n    dq_op = torch.ops.quantized_decomposed.dequantize_per_tensor\n    for n in m.graph.nodes:\n        if n.op != 'call_function' or n.target != dq_op or len(n.users) == 1:\n            continue\n        for user in list(n.users):\n            with m.graph.inserting_before(n):\n                new_node = m.graph.create_node('call_function', dq_op, n.args, n.kwargs)\n            user.replace_input_with(n, new_node)\n        m.graph.erase_node(n)\n    m.recompile()",
    "docstring": "Helper function to duplicate all dequantize nodes in the graph if the node has more than one user. For example: Before: quantize -> dequantize -> a \\--> b \\--> c After: quantize -> dequantize_1 -> a \\--> dequantize_2 -> b \\--> dequantize_3 -> c This is useful for subgraph rewriting. E.g. if we wish to match the pattern [dequantize - a] above, subgraph matching would fail because the dequantize node has users outside the matched portion of the graph. Instead, we match [dequantize_1 - a], which is safe.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\qat_utils.py",
    "ast_data": "FunctionDef name:_duplicate_dequantize_node arg:m arguments arg Assign For If BoolOp Compare Compare Compare Call For Call With Call Assign Call Call Call Call"
  },
  {
    "library": "kornia",
    "name": "to_numpy",
    "source_code": "def to_numpy(self, x: Any) -> np.array:\n    if isinstance(x, (Tensor,)):\n        return x.cpu().detach().numpy()\n    if isinstance(x, (np.ndarray,)):\n        return x\n    if isinstance(x, (Image.Image,)):\n        return np.array(x)\n    raise TypeError('Input type not supported')",
    "docstring": "Convert input to numpy array. Args: x: The input to convert. Returns: np.array: The converted numpy array.",
    "type": "method",
    "file_path": "kornia\\kornia\\core\\module.py",
    "ast_data": "FunctionDef name:to_numpy arg:self arg:x arguments arg arg If Call Return return:yes Call Call Call If Call Return return:yes If Call Return return:yes Call Raise Call"
  },
  {
    "library": "kornia",
    "name": "KORNIA_CHECK_IS_LIST_OF_TENSOR",
    "source_code": "def KORNIA_CHECK_IS_LIST_OF_TENSOR(x: Optional[Sequence[object]], raises: bool=True) -> TypeGuard[list[Tensor]]:\n    are_tensors = isinstance(x, list) and all((isinstance(d, Tensor) for d in x))\n    if not are_tensors:\n        if raises:\n            raise TypeError(f'Provided container of type {type(x)} is not a list of tensors')\n        return False\n    return True",
    "docstring": "Check the input variable is a List of Tensors. Args: x: Any sequence of objects raises: bool indicating whether an exception should be raised upon failure. Raises: TypeException: if the input variable does not match with the expected and raises is True. Return: True if the input is a list of Tensors, otherwise return False. Example: >>> x = torch.rand(2, 3, 3) >>> KORNIA_CHECK_IS_LIST_OF_TENSOR(x, raises=False) False >>> KORNIA_CHECK_IS_LIST_OF_TENSOR([x]) True",
    "type": "function",
    "file_path": "kornia\\kornia\\core\\check.py",
    "ast_data": "FunctionDef name:KORNIA_CHECK_IS_LIST_OF_TENSOR arg:x arg:raises arguments arg arg Assign BoolOp Call Call Call If If Raise Call Call Return return:yes Return return:yes"
  },
  {
    "library": "kornia",
    "name": "warp_perspective3d",
    "source_code": "def warp_perspective3d(src: Tensor, M: Tensor, dsize: tuple[int, int, int], flags: str='bilinear', border_mode: str='zeros', align_corners: bool=False) -> Tensor:\n    if not isinstance(src, Tensor):\n        raise TypeError(f'Input src type is not a Tensor. Got {type(src)}')\n    if not isinstance(M, Tensor):\n        raise TypeError(f'Input M type is not a Tensor. Got {type(M)}')\n    if not len(src.shape) == 5:\n        raise ValueError(f'Input src must be a BxCxDxHxW tensor. Got {src.shape}')\n    if not (len(M.shape) == 3 or M.shape[-2:] == (4, 4)):\n        raise ValueError(f'Input M must be a Bx4x4 tensor. Got {M.shape}')\n    d, h, w = src.shape[-3:]\n    return _transform_warp_impl3d(src, M, (d, h, w), dsize, flags, border_mode, align_corners)",
    "docstring": "Apply a perspective transformation to an image. The function warp_perspective transforms the source image using the specified matrix: .. math:: \\text{dst} (x, y) = \\text{src} \\left( \\frac{M_{11} x + M_{12} y + M_{13}}{M_{31} x + M_{32} y + M_{33}} , \\frac{M_{21} x + M_{22} y + M_{23}}{M_{31} x + M_{32} y + M_{33}} \\right ) Args: src: input image with shape :math:. M: transformation matrix with shape :math:. dsize: size of the output image (height, width). flags: interpolation mode to calculate output values `(B, C, D, H, W)get_perspective_transform3d`.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\transform\\imgwarp.py",
    "ast_data": "FunctionDef name:warp_perspective3d arg:src arg:M arg:dsize arg:flags arg:border_mode arg:align_corners arguments arg arg arg arg arg arg If Call Raise Call Call If Call Raise Call Call If Compare Call Raise Call If BoolOp Compare Call Compare Raise Call Assign Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_multi_svd_norm",
    "source_code": "def _multi_svd_norm(x, row_axis, col_axis, op, initial=None):\n    y = moveaxis(x, (row_axis, col_axis), (-2, -1))\n    result = op(svd(y, compute_uv=False), axis=-1, initial=initial)\n    return result",
    "docstring": "Compute a function of the singular values of the 2-D matrices in . This is a private utility function used by . Parameters ---------- x : ndarray row_axis, col_axis : int The axes of that hold the 2-D matrices. op : callable This should be either numpy.amin or or . Returns ------- result : float or ndarray If is 2-D, the return values is a float. Otherwise, it is an array with `opnumpy.aminnumpy.amaxnumpy.sum`.",
    "type": "function",
    "file_path": "numpy\\numpy\\linalg\\_linalg.py",
    "ast_data": "FunctionDef name:_multi_svd_norm arg:x arg:row_axis arg:col_axis arg:op arg:initial arguments arg arg arg arg arg Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "colorbar_gridspec",
    "source_code": "@property\ndef colorbar_gridspec(self):\n    if self._colorbar_gridspec is None:\n        raise NotImplementedError\n    return self._colorbar_gridspec",
    "docstring": "Return a boolean if the layout engine creates colorbars using a gridspec.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\layout_engine.py",
    "ast_data": "FunctionDef name:colorbar_gridspec arg:self arguments arg If Compare Raise Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "config",
    "source_code": "@property\ndef config(self) -> Config:\n    return self.env.config",
    "docstring": "Reference to the :class: object.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\transforms\\__init__.py",
    "ast_data": "FunctionDef name:config arg:self arguments arg Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "__contains__",
    "source_code": "def __contains__(self, key: str) -> bool:\n    if self.frame is None:\n        return any((key in df for df in self.frames.values()))\n    return key in self.frame",
    "docstring": "Boolean check on whether a variable is defined in this dataset.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\data.py",
    "ast_data": "FunctionDef name:__contains__ arg:self arg:key arguments arg arg If Compare Return return:yes Call Compare Call Return return:yes Compare"
  },
  {
    "library": "scipy",
    "name": "__str__",
    "source_code": "def __str__(self):\n    return f'`{self.name}` for :math:`{self.symbol} \\\\in {str(self.domain)}`'",
    "docstring": "String representation of the parameter for use in documentation.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distribution_infrastructure.py",
    "ast_data": "FunctionDef name:__str__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_infer_selection",
    "source_code": "@final\ndef _infer_selection(self, key, subset: Series | DataFrame):\n    selection = None\n    if subset.ndim == 2 and (lib.is_scalar(key) and key in subset or lib.is_list_like(key)):\n        selection = key\n    elif subset.ndim == 1 and lib.is_scalar(key) and (key == subset.name):\n        selection = key\n    return selection",
    "docstring": "Infer the to pass to our constructor in _gotitem.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\base.py",
    "ast_data": "FunctionDef name:_infer_selection arg:self arg:key arg:subset arguments arg arg arg Assign If BoolOp Compare BoolOp BoolOp Call Compare Call Assign If BoolOp Compare Call Compare Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "boundary",
    "source_code": "@property\ndef boundary(self):\n    return self._topology(capi.geos_boundary(self.ptr))",
    "docstring": "Return the boundary as a newly allocated Geometry object.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:boundary arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "hypsecant_gen",
    "source_code": "class hypsecant_gen(rv_continuous):\n\n    def _shape_info(self):\n        return []\n\n    def _pdf(self, x):\n        return 1.0 / (np.pi * np.cosh(x))\n\n    def _cdf(self, x):\n        return 2.0 / np.pi * np.arctan(np.exp(x))\n\n    def _ppf(self, q):\n        return np.log(np.tan(np.pi * q / 2.0))\n\n    def _sf(self, x):\n        return 2.0 / np.pi * np.arctan(np.exp(-x))\n\n    def _isf(self, q):\n        return -np.log(np.tan(np.pi * q / 2.0))\n\n    def _stats(self):\n        return (0, np.pi * np.pi / 4, 0, 2)\n\n    def _entropy(self):\n        return np.log(2 * np.pi)",
    "docstring": "A hyperbolic secant continuous random variable. %(before_notes)s Notes ----- The probability density function for is: .. math:: f(x) = \\frac{1}{\\pi} \\text{sech}(x) for a real number :math:. %(after_notes)s %(example)s",
    "type": "class",
    "file_path": "scipy\\scipy\\stats\\_continuous_distns.py",
    "ast_data": "ClassDef name:hypsecant_gen FunctionDef name:_shape_info arg:self arguments arg Return return:no FunctionDef name:_pdf arg:self arg:x arguments arg arg Return return:yes Call FunctionDef name:_cdf arg:self arg:x arguments arg arg Return return:yes Call Call FunctionDef name:_ppf arg:self arg:q arguments arg arg Return return:yes Call Call FunctionDef name:_sf arg:self arg:x arguments arg arg Return return:yes Call Call FunctionDef name:_isf arg:self arg:q arguments arg arg Return return:yes Call Call FunctionDef name:_stats arg:self arguments arg Return return:yes FunctionDef name:_entropy arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "codegen_boilerplate",
    "source_code": "def codegen_boilerplate(self, heuristic_name, opt_name, threshold, shared_memory, device_capa, classes):\n    boiler_plate = f\"# flake8: noqa: B950\\n# fmt: off\\n# This file was generated by AutoHeuristic. Do not modify it manually!\\n# To regenerate this file, take a look at the steps in the README.md file inside torchgen/_autoheuristic/{opt_name}/\\nfrom torch._inductor.autoheuristic.autoheuristic_utils import AHContext, AHMetadata, Choice, CHOICE_COL\\nfrom torch._inductor.autoheuristic.learnedheuristic_interface import (\\n    LearnedHeuristicRegression,\\n)\\n\\n\\nclass {heuristic_name}(LearnedHeuristicRegression):\\n\\n    def __init__(self) -> None:\\n        pass\\n\\n{self.gen_precondition(opt_name, shared_memory, device_capa)}\\n\\n    def get_feedback(self, context: AHContext, choice: Choice) -> float:\\n        context.context_dict[CHOICE_COL] = choice\\n        return self.predict(context)\\n\\n    def get_confidence_threshold(self) -> float:\\n        return {threshold}\\n\\n    def get_name(self) -> str:\\n        return '{opt_name}'\"\n    return boiler_plate",
    "docstring": "Generates the boilerplate code for the generated heuristic. This includes things like imports, class definition, etc.",
    "type": "method",
    "file_path": "pytorch\\torchgen\\_autoheuristic\\train_regression.py",
    "ast_data": "FunctionDef name:codegen_boilerplate arg:self arg:heuristic_name arg:opt_name arg:threshold arg:shared_memory arg:device_capa arg:classes arguments arg arg arg arg arg arg arg Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "Event",
    "source_code": "@dataclass\nclass Event:\n    name: str\n    source: EventSource\n    timestamp: int = 0\n    metadata: dict[str, EventMetadataValue] = field(default_factory=dict)\n\n    def __str__(self):\n        return self.serialize()\n\n    @staticmethod\n    def deserialize(data: Union[str, 'Event']) -> 'Event':\n        if isinstance(data, Event):\n            return data\n        if isinstance(data, str):\n            data_dict = json.loads(data)\n        data_dict['source'] = EventSource[data_dict['source']]\n        return Event(**data_dict)\n\n    def serialize(self) -> str:\n        return json.dumps(asdict(self))",
    "docstring": "The class represents the generic event that occurs during the torchelastic job execution. The event can be any kind of meaningful action. Args: name: event name. source: the event producer, e.g. agent or worker timestamp: timestamp in milliseconds when event occurred. metadata: additional data that is associated with the event.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\elastic\\events\\api.py",
    "ast_data": "ClassDef name:Event Call FunctionDef name:__str__ arg:self arguments arg Return return:yes Call FunctionDef name:deserialize arg:data arguments arg If Call Return return:yes If Call Assign Call Assign Return return:yes Call FunctionDef name:serialize arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "parse_example_spec",
    "source_code": "@property\ndef parse_example_spec(self):\n    return {self.key: parsing_ops.VarLenFeature(dtypes.int64)}",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:parse_example_spec arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "visit_Import",
    "source_code": "def visit_Import(self, node):\n    for import_alias in node.names:\n        full_import = (import_alias.name, import_alias.asname)\n        detection = self._api_analysis_spec.imports_to_detect.get(full_import, None)\n        if detection:\n            self.add_result(detection)\n            self.add_log(detection.log_level, node.lineno, node.col_offset, detection.log_message)\n    self.generic_visit(node)",
    "docstring": "Handle visiting an import node in the AST. Args: node: Current Node",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\ast_edits.py",
    "ast_data": "FunctionDef name:visit_Import arg:self arg:node arguments arg arg For Assign Assign Call If Call Call Call"
  },
  {
    "library": "django",
    "name": "srid",
    "source_code": "@srid.setter\ndef srid(self, srid):\n    capi.geos_set_srid(self.ptr, 0 if srid is None else srid)",
    "docstring": "Set the SRID for the geometry.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:srid arg:self arg:srid arguments arg arg Call Compare"
  },
  {
    "library": "pytorch",
    "name": "close",
    "source_code": "def close(self):\n    self.event_writer.close()",
    "docstring": "Flushes the event file to disk and close the file. Call this method when you do not need the summary writer anymore.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\tensorboard\\writer.py",
    "ast_data": "FunctionDef name:close arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "unpack",
    "source_code": "@tf_export('experimental.dtensor.unpack', v1=[])\ndef unpack(tensor: Any) -> Sequence[Any]:\n    return _dtensor_device().unpack(tensor)",
    "docstring": "Unpacks a DTensor into components. Packing and unpacking are inverse operations: 1. For any DTensor on the mesh, returns the raw components placed on each underlying device. 2. Packing these raw components in the same order using returns a DTensor which should be identical to the original DTensor--both the content value and the layout. See the documentation for for more information about how packing and unpacking works. Args: tensor: The DTensor to unpack. Returns: The individual component tensors of the DTensor. This will include only the client-local components, i.e. the components placed on the local devices. Raises: RuntimeError: When is not called eagerly.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\api.py",
    "ast_data": "FunctionDef name:unpack arg:tensor arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_log_gauss_mass",
    "source_code": "def _log_gauss_mass(a, b):\n    a, b = np.broadcast_arrays(a, b)\n    case_left = b <= 0\n    case_right = a > 0\n    case_central = ~(case_left | case_right)\n\n    def mass_case_left(a, b):\n        return _log_diff(_norm_logcdf(b), _norm_logcdf(a))\n\n    def mass_case_right(a, b):\n        return mass_case_left(-b, -a)\n\n    def mass_case_central(a, b):\n        return sc.log1p(-_norm_cdf(a) - _norm_cdf(-b))\n    out = np.full_like(a, fill_value=np.nan, dtype=np.complex128)\n    if a[case_left].size:\n        out[case_left] = mass_case_left(a[case_left], b[case_left])\n    if a[case_right].size:\n        out[case_right] = mass_case_right(a[case_right], b[case_right])\n    if a[case_central].size:\n        out[case_central] = mass_case_central(a[case_central], b[case_central])\n    return np.real(out)",
    "docstring": "Log of Gaussian probability mass within an interval",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_continuous_distns.py",
    "ast_data": "FunctionDef name:_log_gauss_mass arg:a arg:b arguments arg arg Assign Call Assign Compare Assign Compare Assign FunctionDef name:mass_case_left arg:a arg:b arguments arg arg Return return:yes Call Call Call FunctionDef name:mass_case_right arg:a arg:b arguments arg arg Return return:yes Call FunctionDef name:mass_case_central arg:a arg:b arguments arg arg Return return:yes Call Call Call Assign Call If Assign Call If Assign Call If Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "StrategyV1",
    "source_code": "@tf_export(v1=['distribute.Strategy'])\nclass StrategyV1(StrategyBase):\n\n    def make_dataset_iterator(self, dataset):\n        return self._extended._make_dataset_iterator(dataset)\n\n    def make_input_fn_iterator(self, input_fn, replication_mode=InputReplicationMode.PER_WORKER):\n        return super(StrategyV1, self).make_input_fn_iterator(input_fn, replication_mode)\n\n    def experimental_make_numpy_dataset(self, numpy_input, session=None):\n        return self.extended.experimental_make_numpy_dataset(numpy_input, session=session)\n\n    @deprecated(None, 'This method is not available in TF 2.x. Please switch to using `run` instead.')\n    def experimental_run(self, fn, input_iterator=None):\n        return super(StrategyV1, self).experimental_run(fn, input_iterator)\n\n    def reduce(self, reduce_op, value, axis=None):\n        return super(StrategyV1, self).reduce(reduce_op, value, axis)\n    reduce.__doc__ = StrategyBase.reduce.__doc__\n\n    def update_config_proto(self, config_proto):\n        return self._extended._update_config_proto(config_proto)",
    "docstring": "A list of devices with a state & compute distribution policy. See [the guide]( for overview and examples. Note: Not all implementations currently support TensorFlow's partitioned variables (where a single variable is split across multiple devices) at this time.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "ClassDef name:StrategyV1 FunctionDef name:make_dataset_iterator arg:self arg:dataset arguments arg arg Return return:yes Call FunctionDef name:make_input_fn_iterator arg:self arg:input_fn arg:replication_mode arguments arg arg arg Return return:yes Call Call FunctionDef name:experimental_make_numpy_dataset arg:self arg:numpy_input arg:session arguments arg arg arg Return return:yes Call FunctionDef name:experimental_run arg:self arg:fn arg:input_iterator arguments arg arg arg Return return:yes Call Call Call FunctionDef name:reduce arg:self arg:reduce_op arg:value arg:axis arguments arg arg arg arg Return return:yes Call Call Assign FunctionDef name:update_config_proto arg:self arg:config_proto arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "get_default_redirect_url",
    "source_code": "def get_default_redirect_url(self):\n    if self.next_page:\n        return resolve_url(self.next_page)\n    else:\n        return resolve_url(settings.LOGIN_REDIRECT_URL)",
    "docstring": "Return the default redirect URL.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\views.py",
    "ast_data": "FunctionDef name:get_default_redirect_url arg:self arguments arg If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "LinAlgWarning",
    "source_code": "class LinAlgWarning(RuntimeWarning):\n    pass",
    "docstring": "The warning emitted when a linear algebra related operation is close to fail conditions of the algorithm or loss of accuracy is expected.",
    "type": "class",
    "file_path": "scipy\\scipy\\linalg\\_misc.py",
    "ast_data": "ClassDef name:LinAlgWarning"
  },
  {
    "library": "matplotlib",
    "name": "get_position",
    "source_code": "def get_position(self):\n    return self._pos",
    "docstring": "Return the position of the rectangle.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_divider.py",
    "ast_data": "FunctionDef name:get_position arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_DispatchCacheValidEntry",
    "source_code": "@dataclass_slots\n@dataclass(frozen=True)\nclass _DispatchCacheValidEntry:\n    output_infos: tuple[_DispatchCacheEntryOutputInfo]\n    is_output_tuple: bool = False",
    "docstring": "Entry type for the FakeTensor dispatch cache. It supports two types of outputs 1) tensor 2) tuple of tensors is_output_tuple flag helps in differentiating the return type",
    "type": "class",
    "file_path": "pytorch\\torch\\_subclasses\\fake_tensor.py",
    "ast_data": "ClassDef name:_DispatchCacheValidEntry Call"
  },
  {
    "library": "pytorch",
    "name": "_inject_property",
    "source_code": "def _inject_property(module: Module, tensor_name: str) -> None:\n    assert not hasattr(module, tensor_name)\n\n    @torch.jit.unused\n    def get_cached_parametrization(parametrization) -> Tensor:\n        global _cache\n        key = (id(module), tensor_name)\n        tensor = _cache.get(key)\n        if tensor is None:\n            tensor = parametrization()\n            _cache[key] = tensor\n        return tensor\n\n    def get_parametrized(self) -> Tensor:\n        if torch.jit.is_scripting():\n            raise RuntimeError('Parametrization is not working with scripting.')\n        parametrization = self.parametrizations[tensor_name]\n        if _cache_enabled:\n            if torch.jit.is_scripting():\n                raise RuntimeError('Caching is not implemented for scripting. Either disable caching or avoid scripting.')\n            elif torch._C._get_tracing_state() is not None:\n                raise RuntimeError('Cannot trace a model while caching parametrizations.')\n            else:\n                return get_cached_parametrization(parametrization)\n        else:\n            return parametrization()\n\n    def set_original(self, value: Tensor) -> None:\n        if torch.jit.is_scripting():\n            raise RuntimeError('Parametrization is not working with scripting.')\n        self.parametrizations[tensor_name].right_inverse(value)\n    setattr(module.__class__, tensor_name, property(get_parametrized, set_original))",
    "docstring": "Injects a property into module[tensor_name]. It assumes that the class in the module has already been modified from its original one using _inject_new_class and that the tensor under :attr: has already been moved out Args: module (nn.Module): module into which to inject the property tensor_name (str): name of the name of the property to create",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\utils\\parametrize.py",
    "ast_data": "FunctionDef name:_inject_property arg:module arg:tensor_name arguments arg arg Call FunctionDef name:get_cached_parametrization arg:parametrization arguments arg Assign Call Assign Call If Compare Assign Call Assign Return return:yes FunctionDef name:get_parametrized arg:self arguments arg If Call Raise Call Assign If If Call Raise Call If Compare Call Raise Call Return return:yes Call Return return:yes Call FunctionDef name:set_original arg:self arg:value arguments arg arg If Call Raise Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "check_random_state",
    "source_code": "def check_random_state(seed):\n    if seed is None or seed is np.random:\n        return np.random.mtrand._rand\n    if isinstance(seed, numbers.Integral | np.integer):\n        return np.random.RandomState(seed)\n    if isinstance(seed, np.random.RandomState | np.random.Generator):\n        return seed\n    raise ValueError(f\"'{seed}' cannot be used to seed a numpy.random.RandomState instance\")",
    "docstring": "Turn into a instance. Parameters ---------- seed : {None, int, , }, optional If is None (or ), the singleton is used. If is an int, a new `seedseednumpy.random.Generatornumpy.random.RandomState`} Random number generator.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_util.py",
    "ast_data": "FunctionDef name:check_random_state arg:seed arguments arg If BoolOp Compare Compare Return return:yes If Call Return return:yes Call If Call Return return:yes Raise Call"
  },
  {
    "library": "pytorch",
    "name": "add_video",
    "source_code": "def add_video(self, tag, vid_tensor, global_step=None, fps=4, walltime=None):\n    torch._C._log_api_usage_once('tensorboard.logging.add_video')\n    self._get_file_writer().add_summary(video(tag, vid_tensor, fps), global_step, walltime)",
    "docstring": "Add video data to summary. Note that this requires the `(N, T, C, H, W)uint8float`.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\tensorboard\\writer.py",
    "ast_data": "FunctionDef name:add_video arg:self arg:tag arg:vid_tensor arg:global_step arg:fps arg:walltime arguments arg arg arg arg arg arg Call Call Call Call"
  },
  {
    "library": "numpy",
    "name": "MethodsV1IntOnly",
    "source_code": "class MethodsV1IntOnly(Benchmark):\n    params = [['__and__', '__or__', '__xor__'], ['int16', 'int32', 'int64']]\n    param_names = ['methods', 'npdtypes']\n    timeout = 10\n\n    def setup(self, methname, npdtypes):\n        values = get_squares_().get(npdtypes)\n        self.xargs = [values[0], values[1]]\n\n    def time_ndarray_meth(self, methname, npdtypes):\n        getattr(operator, methname)(*self.xargs)",
    "docstring": "Benchmark for the methods which take an argument",
    "type": "class",
    "file_path": "numpy\\benchmarks\\benchmarks\\bench_ufunc.py",
    "ast_data": "ClassDef name:MethodsV1IntOnly Assign Assign Assign FunctionDef name:setup arg:self arg:methname arg:npdtypes arguments arg arg arg Assign Call Call Assign FunctionDef name:time_ndarray_meth arg:self arg:methname arg:npdtypes arguments arg arg arg Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "score",
    "source_code": "def score(self, X=None, Y=None):\n    if self.methods_to_check == 'all' or 'score' in self.methods_to_check:\n        self._check_X_y(X, Y)\n    if self.foo_param > 1:\n        score = 1.0\n    else:\n        score = 0.0\n    return score",
    "docstring": "Fake score. Parameters ---------- X : array-like of shape (n_samples, n_features) Input data, where is the number of samples and is the number of features. Y : array-like of shape (n_samples, n_output) or (n_samples,) Target relative to X for classification or regression; None for unsupervised learning. Returns ------- score : float Either 0 or 1 depending of (i.e. otherwise ).",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_mocking.py",
    "ast_data": "FunctionDef name:score arg:self arg:X arg:Y arguments arg arg arg If BoolOp Compare Compare Call If Compare Assign Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "get_kernel",
    "source_code": "def get_kernel(self) -> Tensor:\n    cross = tensor([[[0, 1, 0], [1, 1, 1], [0, 1, 0]]])\n    kernel = cross * 0.2\n    return kernel[None]",
    "docstring": "Get kernel for image morphology convolution.",
    "type": "method",
    "file_path": "kornia\\kornia\\losses\\hausdorff.py",
    "ast_data": "FunctionDef name:get_kernel arg:self arguments arg Assign Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_consolidate",
    "source_code": "@final\ndef _consolidate(self):\n    cons_data = self._mgr.consolidate()\n    return self._constructor_from_mgr(cons_data, axes=cons_data.axes).__finalize__(self)",
    "docstring": "Compute NDFrame with \"consolidated\" internals (data of each dtype grouped together in a single ndarray). Returns ------- consolidated : same type as caller",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:_consolidate arg:self arguments arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "scrapy",
    "name": "close",
    "source_code": "def close(self) -> Deferred[None]:\n    if self.running:\n        return self.stop()\n    if self.spider is not None:\n        return self.close_spider(self.spider, reason='shutdown')\n    self.downloader.close()\n    return succeed(None)",
    "docstring": "Gracefully close the execution engine. If it has already been started, stop it. In all cases, close the spider and the downloader.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\engine.py",
    "ast_data": "FunctionDef name:close arg:self arguments arg If Return return:yes Call If Compare Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_input_shape_for_ragged_tensor",
    "source_code": "def _get_input_shape_for_ragged_tensor(self, tensor, feature, per_replica, path) -> TensorShape:\n    del per_replica\n    shape = tensor.shape.as_list()\n    if len(shape) != 2:\n        raise ValueError('Only rank 2 ragged tensor is supported, find rank {} ragged tensor for input {}'.format(len(shape), path))\n    if not feature.output_shape and feature.max_sequence_length > 0:\n        shape.insert(len(shape) - 1, feature.max_sequence_length)\n    return TensorShape(shape)",
    "docstring": "Get the input shape for the ragged tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2.py",
    "ast_data": "FunctionDef name:_get_input_shape_for_ragged_tensor arg:self arg:tensor arg:feature arg:per_replica arg:path arguments arg arg arg arg arg Assign Call If Compare Call Raise Call Call Call If BoolOp Compare Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "cast_to_floatx",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef cast_to_floatx(x):\n    if isinstance(x, (tensor_lib.Tensor, variables_module.Variable, sparse_tensor.SparseTensor)):\n        return math_ops.cast(x, dtype=floatx())\n    return numpy_compat.np_asarray(x, dtype=floatx())",
    "docstring": "Cast a Numpy array to the default Keras float type. Args: x: Numpy array or TensorFlow tensor. Returns: The same array (Numpy array if was a Numpy array, or TensorFlow tensor if was a tensor), cast to its new type. Example: >>> tf.keras.backend.floatx() 'float32' >>> arr = np.array([1.0, 2.0], dtype='float64') >>> arr.dtype dtype('float64') >>> new_arr = cast_to_floatx(arr) >>> new_arr array([1., 2.], dtype=float32) >>> new_arr.dtype dtype('float32')",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:cast_to_floatx arg:x arguments arg If Call Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_postprocess_serialized_shapes",
    "source_code": "def _postprocess_serialized_shapes(dynamic_shapes: Union[dict[str, Any], tuple[Any], list[Any], None], dims: dict[str, dict[str, Union[int, list[str], None]]], to_dict: Optional[bool]=False) -> Union[DynamicShapesSpec, dict[str, Any]]:\n    from torch.utils._sympy.numbers import int_oo\n    dims = {k: RootDim(min=v['min'], max=None if v['max'] is int_oo else v['max'], derived=sorted(v['derived'])) for k, v in sorted(dims.items())}\n    spec = DynamicShapesSpec(dynamic_shapes=dynamic_shapes, dims=dims)\n    if to_dict:\n        return _dataclass_to_dict(spec)\n    else:\n        return spec",
    "docstring": "Sorts dims and dumps to dictionary format.",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\serde\\dynamic_shapes.py",
    "ast_data": "FunctionDef name:_postprocess_serialized_shapes arg:dynamic_shapes arg:dims arg:to_dict arguments arg arg arg Assign Call Compare Call Call Call Assign Call If Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "isconstructor",
    "source_code": "def isconstructor(cls):\n    return inspect.isclass(cls) and (not (issubclass(cls.__class__, type) and hasattr(cls.__class__, '__call__') and (cls.__class__.__call__ is not type.__call__)))",
    "docstring": "Returns True if the argument is an object constructor. In general, any object of type class is a constructor, with the exception of classes created using a callable metaclass. See below for why a callable metaclass is not a trivial combination: Args: cls: Any Returns: Bool",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\inspect_utils.py",
    "ast_data": "FunctionDef name:isconstructor arg:cls arguments arg Return return:yes BoolOp Call BoolOp Call Call Compare"
  },
  {
    "library": "pandas",
    "name": "kind",
    "source_code": "@property\ndef kind(self):\n    return getattr(self.queryables.get(self.lhs), 'kind', None)",
    "docstring": "the kind of my field",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\computation\\pytables.py",
    "ast_data": "FunctionDef name:kind arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "kill",
    "source_code": "def kill(self) -> None:\n    if self.alive():\n        autotuning_log.error('Sending SIGKILL to autotune subprocess %d', self.process.pid)\n        self.process.kill()\n    self.close()",
    "docstring": "Send a SIGKILL to the child process.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\autotune_process.py",
    "ast_data": "FunctionDef name:kill arg:self arguments arg If Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "any",
    "source_code": "@final\n@Substitution(name='groupby')\n@Substitution(see_also=_common_see_also)\ndef any(self, skipna: bool=True) -> NDFrameT:\n    return self._cython_agg_general('any', alt=lambda x: Series(x, copy=False).any(skipna=skipna), skipna=skipna)",
    "docstring": "Return True if any value in the group is truthful, else False. Parameters ---------- skipna : bool, default True Flag to ignore nan values during truth testing. Returns ------- Series or DataFrame DataFrame or Series of boolean values, where a value is True if any element is True within its respective group, False otherwise. %(see_also)s Examples -------- For SeriesGroupBy: >>> lst = [\"a\", \"a\", \"b\"] >>> ser = pd.Series([1, 2, 0], index=lst) >>> ser a 1 a 2 b 0 dtype: int64 >>> ser.groupby(level=0).any() a True b False dtype: bool For DataFrameGroupBy: >>> data = [[1, 0, 3], [1, 0, 6], [7, 1, 9]] >>> df = pd.DataFrame( ... data, columns=[\"a\", \"b\", \"c\"], index=[\"ostrich\", \"penguin\", \"parrot\"] ... ) >>> df a b c ostrich 1 0 3 penguin 1 0 6 parrot 7 1 9 >>> df.groupby(by=[\"a\"]).any() b c a 1 False True 7 True True",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\groupby\\groupby.py",
    "ast_data": "FunctionDef name:any arg:self arg:skipna arguments arg arg Return return:yes Call arguments arg Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "LocalFeedback",
    "source_code": "class LocalFeedback:\n\n    def __init__(self, feedback_fn: Callable[[Choice], Feedback]) -> None:\n        self.feedback_fn = feedback_fn\n\n    def __call__(self, choice: Choice) -> Feedback:\n        return self.feedback_fn(choice)",
    "docstring": "To be able to collect data for a choice, a function providing feedback given a choice has to be provided. LocalFeedback can be used when AutoHeuristic should immediately run the function to collect feedback for each choice (see pad_mm.py, where the autotuning happens locally, for an example).",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\autoheuristic\\autoheuristic.py",
    "ast_data": "ClassDef name:LocalFeedback FunctionDef name:__init__ arg:self arg:feedback_fn arguments arg arg Assign FunctionDef name:__call__ arg:self arg:choice arguments arg arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "__init__",
    "source_code": "def __init__(self, destpath=os.curdir):\n    if destpath:\n        self._destpath = os.path.abspath(destpath)\n        self._istmpdest = False\n    else:\n        import tempfile\n        self._destpath = tempfile.mkdtemp()\n        self._istmpdest = True",
    "docstring": "Create a DataSource with a local path at destpath.",
    "type": "method",
    "file_path": "numpy\\numpy\\lib\\_datasource.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:destpath arguments arg arg If Assign Call Assign Assign Call Assign"
  },
  {
    "library": "pandas",
    "name": "_validate_columns",
    "source_code": "@final\ndef _validate_columns(self) -> None:\n    if self.attr_cols and (not is_list_like(self.attr_cols)):\n        raise TypeError(f'{type(self.attr_cols).__name__} is not a valid type for attr_cols')\n    if self.elem_cols and (not is_list_like(self.elem_cols)):\n        raise TypeError(f'{type(self.elem_cols).__name__} is not a valid type for elem_cols')",
    "docstring": "Validate elems_cols and attrs_cols. This method will check if columns is list-like. Raises ------ ValueError * If value is not a list and less then length of nodes.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\xml.py",
    "ast_data": "FunctionDef name:_validate_columns arg:self arguments arg If BoolOp Call Raise Call Call If BoolOp Call Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "write_to_file",
    "source_code": "def write_to_file(self, file_path):\n    with gfile.Open(file_path, 'w') as f:\n        for line in self._lines:\n            f.write(line + '\\n')",
    "docstring": "Write the object itself to file, in a plain format. The font_attr_segs and annotations are ignored. Args: file_path: (str) path of the file to write to.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py",
    "ast_data": "FunctionDef name:write_to_file arg:self arg:file_path arguments arg arg With Call For Call"
  },
  {
    "library": "tensorflow",
    "name": "_watchdog_function",
    "source_code": "def _watchdog_function(self):\n    logging.info('Starting watchdog thread with timeout %r', self._timeout)\n    while not self._stopped:\n        time.sleep(self._timeout / 10.0)\n        current_time = time.time()\n        if current_time - self._last_activity_time >= self._timeout:\n            logging.warning('No activity for ClusterCoordinator for %r seconds. Dumping stack traces.', self._timeout)\n            if self._on_triggered:\n                self._on_triggered()\n            faulthandler.dump_traceback(file=self._traceback_file)\n            self._traceback_file.write('==== End of stack traces ====\\n')\n            self._last_activity_time = current_time",
    "docstring": "The watchdog thread.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\watchdog.py",
    "ast_data": "FunctionDef name:_watchdog_function arg:self arguments arg Call While Call Assign Call If Compare Call If Call Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "convert_bytearray_to_object",
    "source_code": "def convert_bytearray_to_object(model_bytearray):\n    model_object = schema_fb.Model.GetRootAsModel(model_bytearray, 0)\n    return schema_fb.ModelT.InitFromObj(model_object)",
    "docstring": "Converts a tflite model from a bytearray to an object for parsing.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\tools\\flatbuffer_utils.py",
    "ast_data": "FunctionDef name:convert_bytearray_to_object arg:model_bytearray arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "is_output_of_multi_outputs_template",
    "source_code": "def is_output_of_multi_outputs_template(input_buf: Optional[Union[Buffer, Operation]]) -> bool:\n    from . import ir\n    return isinstance(input_buf, ir.MultiOutput) and len(input_buf.inputs) == 1 and is_multi_outputs_template(input_buf.inputs[0])",
    "docstring": "Check if input buffer is a output of multi-outputs template buffer",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\utils.py",
    "ast_data": "FunctionDef name:is_output_of_multi_outputs_template arg:input_buf arguments arg Return return:yes BoolOp Call Compare Call Call"
  },
  {
    "library": "django",
    "name": "geom_col_name",
    "source_code": "@classmethod\ndef geom_col_name(cls):\n    return 'column_name'",
    "docstring": "Return the name of the metadata column used to store the feature geometry column.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\oracle\\models.py",
    "ast_data": "FunctionDef name:geom_col_name arg:cls arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "__len__",
    "source_code": "def __len__(self):\n    return self.num_interior_rings + 1",
    "docstring": "Return the number of rings in this Polygon.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\polygon.py",
    "ast_data": "FunctionDef name:__len__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_matmat",
    "source_code": "def _matmat(self, x):\n    return self._matvec(x)",
    "docstring": "Construct matrix-free callable matrix-matrix multiplication by the Sakurai matrix without constructing or storing the matrix itself by reusing the ``.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_special_sparse_arrays.py",
    "ast_data": "FunctionDef name:_matmat arg:self arg:x arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_visible",
    "source_code": "def set_visible(self, visible):\n    self._visible = visible\n    for artist in self.artists:\n        artist.set_visible(visible)",
    "docstring": "Set the visibility of the selector artists.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:set_visible arg:self arg:visible arguments arg arg Assign For Call"
  },
  {
    "library": "scikit-learn",
    "name": "_validate_parameters",
    "source_code": "def _validate_parameters(self):\n    if self.monotonic_cst is not None and self.n_trees_per_iteration_ != 1:\n        raise ValueError('monotonic constraints are not supported for multiclass classification.')",
    "docstring": "Validate parameters passed to __init__. The parameters that are directly passed to the grower are checked in TreeGrower.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\gradient_boosting.py",
    "ast_data": "FunctionDef name:_validate_parameters arg:self arguments arg If BoolOp Compare Compare Raise Call"
  },
  {
    "library": "pandas",
    "name": "GroupByCythonAggEaDtypes",
    "source_code": "class GroupByCythonAggEaDtypes:\n    param_names = ['dtype', 'method']\n    params = [['Float64', 'Int64', 'Int32'], ['sum', 'prod', 'min', 'max', 'mean', 'median', 'var', 'first', 'last', 'any', 'all']]\n\n    def setup(self, dtype, method):\n        N = 1000000\n        df = DataFrame(np.random.randint(0, high=100, size=(N, 10)), columns=list('abcdefghij'), dtype=dtype)\n        df.loc[list(range(1, N, 5)), list('abcdefghij')] = NA\n        df['key'] = np.random.randint(0, 100, size=N)\n        self.df = df\n\n    def time_frame_agg(self, dtype, method):\n        self.df.groupby('key').agg(method)",
    "docstring": "Benchmarks specifically targeting our cython aggregation algorithms (using a big enough dataframe with simple key, so a large part of the time is actually spent in the grouped aggregation).",
    "type": "class",
    "file_path": "pandas\\asv_bench\\benchmarks\\groupby.py",
    "ast_data": "ClassDef name:GroupByCythonAggEaDtypes Assign Assign FunctionDef name:setup arg:self arg:dtype arg:method arguments arg arg arg Assign Assign Call Call Call Assign Call Call Call Assign Call Assign FunctionDef name:time_frame_agg arg:self arg:dtype arg:method arguments arg arg arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "ConvBn2d",
    "source_code": "class ConvBn2d(_ConvBnNd, nn.Conv2d):\n    _FLOAT_MODULE: ClassVar[type[nni.ConvBn2d]] = nni.ConvBn2d\n    _FLOAT_CONV_MODULE: ClassVar[type[nn.Conv2d]] = nn.Conv2d\n    _FLOAT_BN_MODULE: ClassVar[Optional[type[nn.Module]]] = nn.BatchNorm2d\n    _FLOAT_RELU_MODULE: ClassVar[Optional[type[nn.Module]]] = None\n\n    def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=None, padding_mode='zeros', eps=1e-05, momentum=0.1, freeze_bn=False, qconfig=None):\n        kernel_size = _pair(kernel_size)\n        stride = _pair(stride)\n        padding = _pair(padding)\n        dilation = _pair(dilation)\n        _ConvBnNd.__init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation, False, _pair(0), groups, bias, padding_mode, eps, momentum, freeze_bn, qconfig, dim=2)",
    "docstring": "A ConvBn2d module is a module fused from Conv2d and BatchNorm2d, attached with FakeQuantize modules for weight, used in quantization aware training. We combined the interface of :class: and :class:. Similar to :class:, with FakeQuantize modules initialized to default. Attributes: freeze_bn: weight_fake_quant: fake quant module for weight",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\nn\\intrinsic\\qat\\modules\\conv_fused.py",
    "ast_data": "ClassDef name:ConvBn2d FunctionDef name:__init__ arg:self arg:in_channels arg:out_channels arg:kernel_size arg:stride arg:padding arg:dilation arg:groups arg:bias arg:padding_mode arg:eps arg:momentum arg:freeze_bn arg:qconfig arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "load_from_saved_model",
    "source_code": "def load_from_saved_model(saved_model_path, custom_objects=None):\n    warnings.warn('`tf.keras.experimental.load_from_saved_model` is deprecatedand will be removed in a future version. Please switch to `tf.keras.models.load_model`.')\n    model_json_filepath = os.path.join(compat.as_bytes(saved_model_path), compat.as_bytes(constants.ASSETS_DIRECTORY), compat.as_bytes(SAVED_MODEL_FILENAME_JSON))\n    with gfile.Open(model_json_filepath, 'r') as f:\n        model_json = f.read()\n    model = model_config.model_from_json(model_json, custom_objects=custom_objects)\n    checkpoint_prefix = os.path.join(compat.as_text(saved_model_path), compat.as_text(constants.VARIABLES_DIRECTORY), compat.as_text(constants.VARIABLES_FILENAME))\n    model.load_weights(checkpoint_prefix)\n    return model",
    "docstring": "Loads a keras Model from a SavedModel created by . This function reinstantiates model state by: 1) loading model topology from json (this will eventually come from metagraph). 2) loading model weights from checkpoint. Example: Args: saved_model_path: a string specifying the path to an existing SavedModel. custom_objects: Optional dictionary mapping names (strings) to custom classes or functions to be considered during deserialization. Returns: a keras.Model instance.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model_experimental.py",
    "ast_data": "FunctionDef name:load_from_saved_model arg:saved_model_path arg:custom_objects arguments arg arg Call Assign Call Call Call Call With Call Assign Call Assign Call Assign Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "AttributeMutationNew",
    "source_code": "class AttributeMutationNew(AttributeMutation):\n\n    def __init__(self, cls_source: Optional[Source]=None):\n        super().__init__(SourceType.New)\n        self.cls_source = cls_source",
    "docstring": "This case of VariableTracker.mutation_type marker indicates 1. Dynamo allows mutation on the value's attributes. 2. The value is created by the bytecode Dynamo is tracing through. For instance, Dynamo could model a newly created object with this marker, indicating that while we need to model mutations to this object, we don't have to emit bytecode for these mutations if the object doesn't escape into the Python world.",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\base.py",
    "ast_data": "ClassDef name:AttributeMutationNew FunctionDef name:__init__ arg:self arg:cls_source arguments arg arg Call Call Assign"
  },
  {
    "library": "scikit-learn",
    "name": "_precompute_metric_params",
    "source_code": "def _precompute_metric_params(X, Y, metric=None, **kwds):\n    if metric == 'seuclidean' and 'V' not in kwds:\n        if X is Y:\n            V = np.var(X, axis=0, ddof=1)\n        else:\n            raise ValueError(\"The 'V' parameter is required for the seuclidean metric when Y is passed.\")\n        return {'V': V}\n    if metric == 'mahalanobis' and 'VI' not in kwds:\n        if X is Y:\n            VI = np.linalg.inv(np.cov(X.T)).T\n        else:\n            raise ValueError(\"The 'VI' parameter is required for the mahalanobis metric when Y is passed.\")\n        return {'VI': VI}\n    return {}",
    "docstring": "Precompute data-derived metric parameters if not provided.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\metrics\\pairwise.py",
    "ast_data": "FunctionDef name:_precompute_metric_params arg:X arg:Y arg:metric arguments arg arg arg arg If BoolOp Compare Compare If Compare Assign Call Raise Call Return return:yes If BoolOp Compare Compare If Compare Assign Call Call Raise Call Return return:yes Return return:no"
  },
  {
    "library": "matplotlib",
    "name": "get_text",
    "source_code": "def get_text(self):\n    return self._text",
    "docstring": "Return the text string.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:get_text arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_children",
    "source_code": "def get_children(self):\n    return []",
    "docstring": "Return a list of the child \\s of this .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:get_children arg:self arguments arg Return return:no"
  },
  {
    "library": "pytorch",
    "name": "reduce",
    "source_code": "def reduce(tensor, dst, op=ReduceOp.SUM, group=group.WORLD):\n    return _Reduce.apply(dst, op, group, tensor)",
    "docstring": "Reduces the tensor data across all machines. Only the process with rank `` enum. Specifies an operation used for element-wise reductions. group (ProcessGroup, optional): The process group to work on. Returns: Tensor: Output of the collective.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\nn\\functional.py",
    "ast_data": "FunctionDef name:reduce arg:tensor arg:dst arg:op arg:group arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "union_with_duplicates",
    "source_code": "def union_with_duplicates(lvals: ArrayLike | Index, rvals: ArrayLike | Index) -> ArrayLike | Index:\n    from pandas import Series\n    l_count = value_counts_internal(lvals, dropna=False)\n    r_count = value_counts_internal(rvals, dropna=False)\n    l_count, r_count = l_count.align(r_count, fill_value=0)\n    final_count = np.maximum(l_count.values, r_count.values)\n    final_count = Series(final_count, index=l_count.index, dtype='int', copy=False)\n    if isinstance(lvals, ABCMultiIndex) and isinstance(rvals, ABCMultiIndex):\n        unique_vals = lvals.append(rvals).unique()\n    else:\n        if isinstance(lvals, ABCIndex):\n            lvals = lvals._values\n        if isinstance(rvals, ABCIndex):\n            rvals = rvals._values\n        combined = concat_compat([lvals, rvals])\n        unique_vals = unique(combined)\n        unique_vals = ensure_wrapped_if_datetimelike(unique_vals)\n    repeats = final_count.reindex(unique_vals).values\n    return np.repeat(unique_vals, repeats)",
    "docstring": "Extracts the union from lvals and rvals with respect to duplicates and nans in both arrays. Parameters ---------- lvals: np.ndarray or ExtensionArray left values which is ordered in front. rvals: np.ndarray or ExtensionArray right values ordered after lvals. Returns ------- np.ndarray or ExtensionArray Containing the unsorted union of both arrays. Notes ----- Caller is responsible for ensuring lvals.dtype == rvals.dtype.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\algorithms.py",
    "ast_data": "FunctionDef name:union_with_duplicates arg:lvals arg:rvals arguments arg arg Assign Call Assign Call Assign Call Assign Call Assign Call If BoolOp Call Call Assign Call Call If Call Assign If Call Assign Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_make_iterator",
    "source_code": "def _make_iterator(self):\n    with ops.device(self._worker):\n        if self._options is not None:\n            self._iterator = multi_device_iterator_ops.MultiDeviceIterator(self._dataset, self._devices, max_buffer_size=self._options.experimental_per_replica_buffer_size, prefetch_buffer_size=self._options.experimental_per_replica_buffer_size)\n        else:\n            self._iterator = multi_device_iterator_ops.MultiDeviceIterator(self._dataset, self._devices)",
    "docstring": "Make appropriate iterator on the dataset.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\input_lib.py",
    "ast_data": "FunctionDef name:_make_iterator arg:self arguments arg With Call If Compare Assign Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "get_compute_time",
    "source_code": "def get_compute_time(func_packet, args, kwargs, out, out_dtypes) -> float:\n    if func_packet in flop_registry:\n        assert len(out_dtypes) == 1, f'Only support single out dtype got {out_dtypes} for {func_packet}'\n        dtype = out_dtypes.pop()\n        peak_gpu_flops = get_device_tflops(dtype) * 1000000000000000.0\n        factor = 0.75\n        peak_empirical_flops = factor * peak_gpu_flops\n        flop_count_func = flop_registry[func_packet]\n        flop_count = flop_count_func(*args, **kwargs, out_val=out) / 2\n        compute_time = flop_count / peak_empirical_flops * 1000000000.0\n        return compute_time\n    return 0.0",
    "docstring": "Estimates the compute time of an aten operator. Args: func_packet: The operator overload packet. args: The arguments to the operator. kwargs: The keyword arguments to the operator. out: The output of the operator. out_dtypes: The output data types. Returns: float: The estimated compute time in nanoseconds.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_tools\\runtime_estimator.py",
    "ast_data": "FunctionDef name:get_compute_time arg:func_packet arg:args arg:kwargs arg:out arg:out_dtypes arguments arg arg arg arg arg If Compare Compare Call Assign Call Assign Call Assign Assign Assign Assign Call Assign Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "record_artifact",
    "source_code": "@classmethod\ndef record_artifact(cls, artifact_type: str, key: str, content: Any) -> None:\n    artifact = CacheArtifactFactory.encode_create(artifact_type, key, content)\n    if artifact in cls._seen_artifacts:\n        return\n    log.debug('Recording %s', str(artifact))\n    cls._new_cache_artifacts[artifact_type].append(artifact)\n    cls._seen_artifacts.add(artifact)",
    "docstring": "Called from each caching operation to record the artifact in this \"mega\" list",
    "type": "method",
    "file_path": "pytorch\\torch\\compiler\\_cache.py",
    "ast_data": "FunctionDef name:record_artifact arg:cls arg:artifact_type arg:key arg:content arguments arg arg arg arg Assign Call If Compare Return return:no Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "set_multi_thread_and_allocator",
    "source_code": "def set_multi_thread_and_allocator(self, ncores_per_instance, disable_iomp=False, set_kmp_affinity=True, enable_tcmalloc=True, enable_jemalloc=False, use_default_allocator=False):\n    self.set_memory_allocator(enable_tcmalloc, enable_jemalloc, use_default_allocator)\n    self.set_env('OMP_NUM_THREADS', str(ncores_per_instance))\n    if not disable_iomp:\n        find_iomp = self.add_lib_preload(lib_type='iomp5')\n        if not find_iomp:\n            msg = f'{self.msg_lib_notfound} you can use \"conda install mkl\" to install {{0}}'\n            logger.warning(msg.format('iomp', 'iomp5'))\n        else:\n            logger.info('Using Intel OpenMP')\n            if set_kmp_affinity:\n                self.set_env('KMP_AFFINITY', 'granularity=fine,compact,1,0')\n            self.set_env('KMP_BLOCKTIME', '1')\n    self.log_env_var('LD_PRELOAD')",
    "docstring": "Set multi-thread configuration and enable Intel openMP and TCMalloc/JeMalloc. By default, GNU openMP and PTMalloc are used in PyTorch. but Intel openMP and TCMalloc/JeMalloc are better alternatives to get performance benefit.",
    "type": "method",
    "file_path": "pytorch\\torch\\backends\\xeon\\run_cpu.py",
    "ast_data": "FunctionDef name:set_multi_thread_and_allocator arg:self arg:ncores_per_instance arg:disable_iomp arg:set_kmp_affinity arg:enable_tcmalloc arg:enable_jemalloc arg:use_default_allocator arguments arg arg arg arg arg arg arg Call Call Call If Assign Call If Assign Call Call Call If Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_partial_wrapper_equals",
    "source_code": "def _partial_wrapper_equals(obs_or_fq1: _PartialWrapper, obs_or_fq2: _PartialWrapper):\n    obs_or_fq1_keywords = copy.copy(obs_or_fq1.p.keywords)\n    obs_or_fq2_keywords = copy.copy(obs_or_fq2.p.keywords)\n    keywords_equal = True\n    if 'observer' in obs_or_fq1_keywords and 'observer' in obs_or_fq2_keywords:\n        keywords_equal = keywords_equal and _obs_or_fq_ctr_equals(obs_or_fq1_keywords['observer'], obs_or_fq2_keywords['observer'])\n        obs_or_fq1_keywords.pop('observer')\n        obs_or_fq2_keywords.pop('observer')\n    keywords_equal = keywords_equal and obs_or_fq1_keywords == obs_or_fq2_keywords\n    return obs_or_fq1.p.func == obs_or_fq2.p.func and obs_or_fq1.p.args == obs_or_fq2.p.args and keywords_equal",
    "docstring": "Return whether the two partial wrappers are equal,",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\qconfig.py",
    "ast_data": "FunctionDef name:_partial_wrapper_equals arg:obs_or_fq1 arg:obs_or_fq2 arguments arg arg Assign Call Assign Call Assign If BoolOp Compare Compare Assign BoolOp Call Call Call Assign BoolOp Compare Return return:yes BoolOp Compare Compare"
  },
  {
    "library": "django",
    "name": "BadRequest",
    "source_code": "class BadRequest(Exception):\n    pass",
    "docstring": "The request is malformed and cannot be processed.",
    "type": "class",
    "file_path": "django\\django\\core\\exceptions.py",
    "ast_data": "ClassDef name:BadRequest"
  },
  {
    "library": "django",
    "name": "close",
    "source_code": "def close(self):\n    pass",
    "docstring": "Close a network connection.",
    "type": "method",
    "file_path": "django\\django\\core\\mail\\backends\\base.py",
    "ast_data": "FunctionDef name:close arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "add_loss",
    "source_code": "@tf_export(v1=['losses.add_loss'])\ndef add_loss(loss, loss_collection=ops.GraphKeys.LOSSES):\n    if loss_collection and (not context.executing_eagerly()):\n        ops.add_to_collection(loss_collection, loss)",
    "docstring": "Adds a externally defined loss to the collection of losses. Args: loss: A loss . loss_collection: Optional collection to add the loss to.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\losses\\util.py",
    "ast_data": "FunctionDef name:add_loss arg:loss arg:loss_collection arguments arg arg If BoolOp Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_generate_wrapped_number",
    "source_code": "def _generate_wrapped_number(g: jit_utils.GraphContext, scalar):\n    assert not isinstance(scalar, torch.Tensor)\n    if isinstance(scalar, float):\n        return g.op('Constant', value_t=torch.tensor(scalar, dtype=torch.double))\n    return g.op('Constant', value_t=torch.tensor(scalar))",
    "docstring": "Creates a wrapped number based on A Tensor is a considered a \"wrapped number\" if it is auto-wrapped from a C++ or Python number type. Integer types are wrapped as 0-dim int64 tensors and floating-point types are wrapped as 0-dim double tensors. The input to this function is constant value. If the data type is a floating point type, it is converted to a 0-dim double tensor, else it is converted to a 0-dim tensor of its original type",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\symbolic_helper.py",
    "ast_data": "FunctionDef name:_generate_wrapped_number arg:g arg:scalar arguments arg arg Call If Call Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "binary_masks",
    "source_code": "@property\ndef binary_masks(self) -> Tensor:\n    if self._original_res_logits is not None:\n        x = self._original_res_logits\n    else:\n        x = self.logits\n    return x > self.mask_threshold",
    "docstring": "Binary mask generated from logits considering the mask_threshold. Shape will be the same of logits :math: where :math: is the number masks predicted. .. note:: If you run , this will generate the masks based on the original resolution logits. Otherwise, this will use the low resolution logits (self.logits).",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\models\\structures.py",
    "ast_data": "FunctionDef name:binary_masks arg:self arguments arg If Compare Assign Assign Return return:yes Compare"
  },
  {
    "library": "sphinx",
    "name": "get_js_stemmer_rawcodes",
    "source_code": "def get_js_stemmer_rawcodes(self) -> list[_StrPath]:\n    if self.lang.js_stemmer_rawcode:\n        return [_StrPath(_NON_MINIFIED_JS_PATH / 'base-stemmer.js'), _StrPath(_NON_MINIFIED_JS_PATH / self.lang.js_stemmer_rawcode)]\n    else:\n        return []",
    "docstring": "Returns a list of non-minified stemmer JS files to copy.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\search\\__init__.py",
    "ast_data": "FunctionDef name:get_js_stemmer_rawcodes arg:self arguments arg If Return return:yes Call Call Return return:no"
  },
  {
    "library": "numpy",
    "name": "hermeline",
    "source_code": "def hermeline(off, scl):\n    if scl != 0:\n        return np.array([off, scl])\n    else:\n        return np.array([off])",
    "docstring": "Hermite series whose graph is a straight line. Parameters ---------- off, scl : scalars The specified line is given by ``. See Also -------- numpy.polynomial.polynomial.polyline numpy.polynomial.chebyshev.chebline numpy.polynomial.legendre.legline numpy.polynomial.laguerre.lagline numpy.polynomial.hermite.hermline Examples -------- >>> from numpy.polynomial.hermite_e import hermeline >>> from numpy.polynomial.hermite_e import hermeline, hermeval >>> hermeval(0,hermeline(3, 2)) 3.0 >>> hermeval(1,hermeline(3, 2)) 5.0",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\hermite_e.py",
    "ast_data": "FunctionDef name:hermeline arg:off arg:scl arguments arg arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "pretty_print_mismatch",
    "source_code": "def pretty_print_mismatch(self, graph: bool=False):\n    print(f' Mismatch info for graph partition {self.id}: '.center(80, '='))\n    if graph:\n        print(' ATen JIT graph '.center(80, '='))\n        print(self.graph)\n        if self._onnx_graph is not None:\n            print(' ONNX graph '.center(80, '='))\n            print(self._onnx_graph)\n    if self.has_mismatch():\n        print(' Mismatch error '.center(80, '='))\n        print(self.mismatch_error)\n    else:\n        print(' No mismatch '.center(80, '='))",
    "docstring": "Pretty print details of the mismatch between torch and ONNX. Args: graph: If True, print the ATen JIT graph and ONNX graph.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\verification.py",
    "ast_data": "FunctionDef name:pretty_print_mismatch arg:self arg:graph arguments arg arg Call Call If Call Call Call If Compare Call Call Call If Call Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_backend",
    "source_code": "@classmethod\ndef get_backend(cls) -> Optional[str]:\n    if (val := get_env_val('TORCH_BISECT_BACKEND')):\n        return val\n    file_path = os.path.join(cls.get_dir(), 'bisect_status.txt')\n    lines = cls.read_lines_from_file(file_path)\n    for line in lines:\n        if line.startswith('backend='):\n            return line.strip().split('=')[1]\n    return None",
    "docstring": "Returns the active backend, if any",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\compiler_bisector.py",
    "ast_data": "FunctionDef name:get_backend arg:cls arguments arg If Call Return return:yes Assign Call Call Assign Call For If Call Return return:yes Call Call Return return:no"
  },
  {
    "library": "scikit-learn",
    "name": "_check_rows_and_columns",
    "source_code": "def _check_rows_and_columns(a, b):\n    check_consistent_length(*a)\n    check_consistent_length(*b)\n    checks = lambda x: check_array(x, ensure_2d=False)\n    a_rows, a_cols = map(checks, a)\n    b_rows, b_cols = map(checks, b)\n    return (a_rows, a_cols, b_rows, b_cols)",
    "docstring": "Unpacks the row and column arrays and checks their shape.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\metrics\\cluster\\_bicluster.py",
    "ast_data": "FunctionDef name:_check_rows_and_columns arg:a arg:b arguments arg arg Call Call Assign arguments arg Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_strides_of_load",
    "source_code": "def get_strides_of_load(self, index: sympy.Expr) -> dict[sympy.Symbol, sympy.Expr]:\n    index_to_tile_indexes = {k: v.expr for k, v in self.range_tree_nodes.items()}\n    index_in_tile_vars = sympy_subs(index, index_to_tile_indexes)\n    strides = {}\n    for range_tree in self.range_trees:\n        s = sympy_index_symbol(range_tree.name)\n        strides[s] = sympy_subs(index_in_tile_vars, {s: 1}) - sympy_subs(index_in_tile_vars, {s: 0})\n    return strides",
    "docstring": "This gets the stride of the index for each of the tiling variables (technically, it does it at index 0) For example, if xindex = x0 + 512*x1 + 1024*r0 x0 = (xindex//512) x1 = (xindex % 512) r0 = rindex // 1024 this function would return {xindex: 512, rindex: 1024}",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\simd.py",
    "ast_data": "FunctionDef name:get_strides_of_load arg:self arg:index arguments arg arg Assign Call Assign Call Assign For Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "connect_ancillary_layers",
    "source_code": "def connect_ancillary_layers(model, created_layers):\n    ancillary_layers = [layer for layer in created_layers.values() if layer not in model.layers]\n    if ancillary_layers:\n        relevant_nodes = nest.flatten([layer.inbound_nodes[1:] if _should_skip_first_node(layer) else layer.inbound_nodes for layer in created_layers.values()])\n        model._insert_layers(ancillary_layers, relevant_nodes)\n    return model",
    "docstring": "Adds layers that are not connected to the outputs to the model.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\functional.py",
    "ast_data": "FunctionDef name:connect_ancillary_layers arg:model arg:created_layers arguments arg arg Assign Call Compare If Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "assets",
    "source_code": "@abc.abstractmethod\ndef assets(self):\n    raise NotImplementedError()",
    "docstring": "Provide all of the assets contained by the PluginAsset instance. The assets method should return a dictionary structured as {asset_name: asset_contents}. asset_contents is a string. This method will be called by the tf.compat.v1.summary.FileWriter when it is time to write the assets out to disk.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\summary\\plugin_asset.py",
    "ast_data": "FunctionDef name:assets arg:self arguments arg Raise Call"
  },
  {
    "library": "kornia",
    "name": "ImageLayout",
    "source_code": "@dataclass(frozen=True)\nclass ImageLayout:\n    image_size: ImageSize\n    channels: int\n    channels_order: ChannelsOrder",
    "docstring": "Data class to represent the layout of an image. Args: image_size: image size. channels: number of channels. channels_order: channels order. Example: >>> layout = ImageLayout(ImageSize(3, 4), 3, ChannelsOrder.CHANNELS_LAST) >>> layout.image_size ImageSize(height=3, width=4) >>> layout.channels 3 >>> layout.channels_order",
    "type": "class",
    "file_path": "kornia\\kornia\\image\\base.py",
    "ast_data": "ClassDef name:ImageLayout Call"
  },
  {
    "library": "django",
    "name": "__init__",
    "source_code": "def __init__(self, language, domain=None, localedirs=None):\n    gettext_module.GNUTranslations.__init__(self)\n    if domain is not None:\n        self.domain = domain\n    self.__language = language\n    self.__to_language = to_language(language)\n    self.__locale = to_locale(language)\n    self._catalog = None\n    self.plural = lambda n: int(n != 1)\n    if self.domain == 'django':\n        if localedirs is not None:\n            warnings.warn(\"localedirs is ignored when domain is 'django'.\", RuntimeWarning)\n            localedirs = None\n        self._init_translation_catalog()\n    if localedirs:\n        for localedir in localedirs:\n            translation = self._new_gnu_trans(localedir)\n            self.merge(translation)\n    else:\n        self._add_installed_apps_translations()\n    self._add_local_translations()\n    if self.__language == settings.LANGUAGE_CODE and self.domain == 'django' and (self._catalog is None):\n        raise OSError('No translation files found for default language %s.' % settings.LANGUAGE_CODE)\n    self._add_fallback(localedirs)\n    if self._catalog is None:\n        self._catalog = TranslationCatalog()",
    "docstring": "Create a GNUTranslations() using many locale directories",
    "type": "method",
    "file_path": "django\\django\\utils\\translation\\trans_real.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:language arg:domain arg:localedirs arguments arg arg arg arg Call If Compare Assign Assign Assign Call Assign Call Assign Assign arguments arg Call Compare If Compare If Compare Call Assign Call If For Assign Call Call Call Call If BoolOp Compare Compare Compare Raise Call Call If Compare Assign Call"
  },
  {
    "library": "sphinx",
    "name": "fetch_until",
    "source_code": "def fetch_until(self, condition: Any) -> list[Token]:\n    tokens = []\n    while (current := self.fetch_token()):\n        tokens.append(current)\n        if current == condition:\n            break\n        if current == [OP, '(']:\n            tokens += self.fetch_until([OP, ')'])\n        elif current == [OP, '{']:\n            tokens += self.fetch_until([OP, '}'])\n        elif current == [OP, '[']:\n            tokens += self.fetch_until([OP, ']'])\n    return tokens",
    "docstring": "Fetch tokens until specified token appeared. .. note:: This also handles parenthesis well.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\pycode\\parser.py",
    "ast_data": "FunctionDef name:fetch_until arg:self arg:condition arguments arg arg Assign While Call Call If Compare If Compare Call If Compare Call If Compare Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "normalize_keypoints",
    "source_code": "def normalize_keypoints(kpts: Tensor, size: Tensor) -> Tensor:\n    if isinstance(size, torch.Size):\n        size = tensor(size)[None]\n    shift = size.float().to(kpts) / 2\n    scale = size.max(1).values.float().to(kpts) / 2\n    kpts = (kpts - shift[:, None]) / scale[:, None, None]\n    return kpts",
    "docstring": "Normalize tensor of keypoints.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\lightglue_onnx\\utils\\keypoints.py",
    "ast_data": "FunctionDef name:normalize_keypoints arg:kpts arg:size arguments arg arg If Call Assign Call Assign Call Call Assign Call Call Call Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "Shell",
    "source_code": "@cli.cls_cmd('shell')\nclass Shell(Python):\n    ctx = CONTEXT\n    pythonpath = Python.pythonpath\n    extra_argv = Python.extra_argv\n\n    @classmethod\n    def run(cls, pythonpath, extra_argv, **kwargs):\n        cls._setup(pythonpath, **kwargs)\n        shell = os.environ.get('SHELL', 'sh')\n        click.echo(f\"Spawning a Unix shell '{shell}' ...\")\n        os.execv(shell, [shell] + list(extra_argv))\n        sys.exit(1)",
    "docstring": ":wrench: Start Unix shell with PYTHONPATH set. Running is equivalent to: 1. Execute build command (skip by passing the global option). 2. Open a new shell. 3. Set the PYTHONPATH environment variable in shell (query with ).",
    "type": "class",
    "file_path": "scipy\\dev.py",
    "ast_data": "ClassDef name:Shell Assign Assign Assign FunctionDef name:run arg:cls arg:pythonpath arg:extra_argv arguments arg arg arg arg Call Assign Call Call Call Call Call Call"
  },
  {
    "library": "numpy",
    "name": "nonzero",
    "source_code": "@array_function_dispatch(_nonzero_dispatcher)\ndef nonzero(a):\n    return _wrapfunc(a, 'nonzero')",
    "docstring": "Return the indices of the elements that are non-zero. Returns a tuple of arrays, one for each dimension of , containing the indices of the non-zero elements in that dimension. The values in are always tested and returned in row-major, C-style order. To group the indices by element, rather than dimension, use , which returns a row for each non-zero element. .. note:: When called on a zero-d array or scalar, `atleast_1daaaa` can also be called as a method of the array. >>> (a > 3).nonzero() (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\fromnumeric.py",
    "ast_data": "FunctionDef name:nonzero arg:a arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_pool_flops",
    "source_code": "def _pool_flops(graph, node):\n    _verify_conv_data_format(node)\n    out_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)\n    out_shape.assert_is_fully_defined()\n    kernel_shape = list(node.attr['ksize'].list.i)\n    kernel_area = _list_product(kernel_shape)\n    return ops.OpStats('flops', kernel_area * out_shape.num_elements())",
    "docstring": "Common code which compute flops for pooling operations.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py",
    "ast_data": "FunctionDef name:_pool_flops arg:graph arg:node arguments arg arg Call Assign Call Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_getfullargspec",
    "source_code": "def _getfullargspec(target):\n    return _convert_maybe_argspec_to_fullargspec(getargspec(target))",
    "docstring": "A python2 version of getfullargspec. Args: target: the target object to inspect. Returns: A FullArgSpec with empty kwonlyargs, kwonlydefaults and annotations.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_inspect.py",
    "ast_data": "FunctionDef name:_getfullargspec arg:target arguments arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "plotting_positions",
    "source_code": "def plotting_positions(data, alpha=0.4, beta=0.4):\n    data = ma.array(data, copy=False).reshape(1, -1)\n    n = data.count()\n    plpos = np.empty(data.size, dtype=float)\n    plpos[n:] = 0\n    plpos[data.argsort(axis=None)[:n]] = (np.arange(1, n + 1) - alpha) / (n + 1.0 - alpha - beta)\n    return ma.array(plpos, mask=data._mask)",
    "docstring": "Returns plotting positions (or empirical percentile points) for the data. Plotting positions are defined as `alphabetaalphabeta`, Blom. The resulting quantile estimates are approximately unbiased if x is normally distributed (R type 9) - (.4,.4) : approximately quantile unbiased (Cunnane) - (.35,.35): APL, used with PWM - (.3175, .3175): used in scipy.stats.probplot Parameters ---------- data : array_like Input data, as a sequence or array of dimension at most 2. alpha : float, optional Plotting positions parameter. Default is 0.4. beta : float, optional Plotting positions parameter. Default is 0.4. Returns ------- positions : MaskedArray The calculated plotting positions.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mstats_basic.py",
    "ast_data": "FunctionDef name:plotting_positions arg:data arg:alpha arg:beta arguments arg arg arg Assign Call Call Assign Call Assign Call Assign Assign Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_estimators_has",
    "source_code": "def _estimators_has(attr):\n\n    def check(self):\n        if hasattr(self, 'estimators_'):\n            getattr(self.estimators_[0], attr)\n        else:\n            getattr(self.estimator, attr)\n        return True\n    return check",
    "docstring": "Check if self.estimator or self.estimators_[0] has attr. If has the attr, then its safe to assume that other estimators have it too. We raise the original if does not exist. This function is used together with .",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\multiclass.py",
    "ast_data": "FunctionDef name:_estimators_has arg:attr arguments arg FunctionDef name:check arg:self arguments arg If Call Call Call Return return:yes Return return:yes"
  },
  {
    "library": "authlib",
    "name": "query_client",
    "source_code": "def query_client(self, client_id):\n    raise NotImplementedError()",
    "docstring": "Query OAuth client by client_id. The client model class MUST implement the methods described by :class:.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\authorization_server.py",
    "ast_data": "FunctionDef name:query_client arg:self arg:client_id arguments arg arg Raise Call"
  },
  {
    "library": "pygame",
    "name": "get_height",
    "source_code": "def get_height(self):\n    return self.get_sized_ascender() - self.get_sized_descender() + 1",
    "docstring": "get_height() -> int get the height of the font",
    "type": "method",
    "file_path": "pygame\\src_py\\ftfont.py",
    "ast_data": "FunctionDef name:get_height arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "map_arguments",
    "source_code": "def map_arguments(self, tensor_dict):\n    if self._single_positional_tensor_passed:\n        kt_id, _ = self._keras_inputs_ids_and_indices[0]\n        return ((tensor_dict[kt_id].pop(),), {})\n    else:\n        flat_arguments = copy.copy(self._flat_arguments)\n        for kt_id, kt_index in self._keras_inputs_ids_and_indices:\n            flat_arguments[kt_index] = tensor_dict[kt_id].pop()\n        args, kwargs = nest.pack_sequence_as((self.call_args, self.call_kwargs), flat_arguments)\n        return (args, kwargs)",
    "docstring": "Maps Keras Tensors to computed Tensors using .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\node.py",
    "ast_data": "FunctionDef name:map_arguments arg:self arg:tensor_dict arguments arg arg If Assign Return return:yes Call Assign Call For Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "nth_moment",
    "source_code": "def nth_moment(n_k, a_k, b_k):\n    num = (a_k + b_k) ** (0.5 * n_k)\n    denom = 2 ** n_k * sc.beta(a_k, b_k)\n    indices = np.arange(n_k + 1)\n    sgn = np.where(indices % 2 > 0, -1, 1)\n    d = sc.beta(a_k + 0.5 * n_k - indices, b_k - 0.5 * n_k + indices)\n    sum_terms = sc.comb(n_k, indices) * sgn * d\n    return num / denom * sum_terms.sum()",
    "docstring": "Computes E[T^(n_k)] where T is skew-t distributed with parameters a_k and b_k.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_continuous_distns.py",
    "ast_data": "FunctionDef name:nth_moment arg:n_k arg:a_k arg:b_k arguments arg arg arg Assign Assign Call Assign Call Assign Call Compare Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "wrap_method",
    "source_code": "def wrap_method(method):\n    assert ismethod(method), f'Expected {method} to be a method'\n    return _WrappedMethod(method)",
    "docstring": "Wrap a method as a module so that it can be exported. The wrapped module's forward points to the method, and the method's original module state is shared.",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\utils.py",
    "ast_data": "FunctionDef name:wrap_method arg:method arguments arg Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "bulk_batch_size",
    "source_code": "def bulk_batch_size(self, fields, objs):\n    fields = list(chain.from_iterable((field.fields if isinstance(field, CompositePrimaryKey) else [field] for field in fields)))\n    if fields:\n        return self.connection.features.max_query_params // len(fields)\n    return len(objs)",
    "docstring": "Oracle restricts the number of parameters in a query.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\oracle\\operations.py",
    "ast_data": "FunctionDef name:bulk_batch_size arg:self arg:fields arg:objs arguments arg arg arg Assign Call Call Call If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_RoutingNotSupportedMixin",
    "source_code": "class _RoutingNotSupportedMixin:\n\n    def get_metadata_routing(self):\n        raise NotImplementedError(f'{self.__class__.__name__} has not implemented metadata routing yet.')",
    "docstring": "A mixin to be used to remove the default . This is used in meta-estimators where metadata routing is not yet implemented. This also makes it clear in our rendered documentation that this method cannot be used.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py",
    "ast_data": "ClassDef name:_RoutingNotSupportedMixin FunctionDef name:get_metadata_routing arg:self arguments arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_get_prepare_custom_config",
    "source_code": "def _get_prepare_custom_config(obj: Any, dict_key: str) -> Optional[PrepareCustomConfig]:\n    if isinstance(obj, PrepareCustomConfig) or obj is None:\n        return obj\n    if isinstance(obj, dict):\n        return PrepareCustomConfig.from_dict(obj)\n    raise ValueError(f\"\"\"Expected PrepareCustomConfig in prepare_custom_config_dict[\"{dict_key}\"], got '{type(obj)}'\"\"\")",
    "docstring": "Convert the given object into a PrepareCustomConfig if possible, else throw an exception.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\custom_config.py",
    "ast_data": "FunctionDef name:_get_prepare_custom_config arg:obj arg:dict_key arguments arg arg If BoolOp Call Compare Return return:yes If Call Return return:yes Call Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "load",
    "source_code": "def load(self, name: str, index: sympy.Expr):\n    if name not in self.fixed_inputs:\n        index_str = self._process_indexing(index)\n        var = self._add_kernel_input(name)\n        var_dtype = V.graph.get_buffer(name).dtype\n        line = f'tl.load({var} + {index_str})'\n        if var_dtype in (torch.float16, torch.bfloat16) and config.triton.codegen_upcast_to_fp32:\n            line += '.to(tl.float32)'\n            var_dtype = torch.float32\n        out = self.kernel.cse.generate(self.kernel.compute, line, dtype=var_dtype)\n        return out\n    return self.kernel.cse.generate(self.kernel.compute, f'({self.fixed_inputs[name]})', dtype=torch.float32)",
    "docstring": "Handle loading from tensor or fixed input.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\select_algorithm.py",
    "ast_data": "FunctionDef name:load arg:self arg:name arg:index arguments arg arg arg If Compare Assign Call Assign Call Assign Call Assign If BoolOp Compare Assign Assign Call Return return:yes Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "inner_solve",
    "source_code": "@abstractmethod\ndef inner_solve(self, X, y, sample_weight):\n    pass",
    "docstring": "Compute Newton step. Sets: - self.coef_newton - self.gradient_times_newton",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_glm\\_newton_solver.py",
    "ast_data": "FunctionDef name:inner_solve arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg"
  },
  {
    "library": "matplotlib",
    "name": "set_navigate",
    "source_code": "def set_navigate(self, b):\n    self._navigate = b",
    "docstring": "Set whether the Axes responds to navigation toolbar commands. Parameters ---------- b : bool See Also -------- matplotlib.axes.Axes.set_forward_navigation_events",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:set_navigate arg:self arg:b arguments arg arg Assign"
  },
  {
    "library": "numpy",
    "name": "dos2unix",
    "source_code": "def dos2unix(file):\n    if os.path.isdir(file):\n        print(file, 'Directory!')\n        return\n    with open(file, 'rb') as fp:\n        data = fp.read()\n    if '\\x00' in data:\n        print(file, 'Binary!')\n        return\n    newdata = re.sub('\\r\\n', '\\n', data)\n    if newdata != data:\n        print('dos2unix:', file)\n        with open(file, 'wb') as f:\n            f.write(newdata)\n        return file\n    else:\n        print(file, 'ok')",
    "docstring": "Replace CRLF with LF in argument files. Print names of changed files.",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\line_endings.py",
    "ast_data": "FunctionDef name:dos2unix arg:file arguments arg If Call Call Return return:no With Call Assign Call If Compare Call Return return:no Assign Call If Compare Call With Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_default",
    "source_code": "def get_default(self):\n    field_default = super().get_default()\n    if isinstance(field_default, self.remote_field.model):\n        return getattr(field_default, self.target_field.attname)\n    return field_default",
    "docstring": "Return the to_field if the default value is an object.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\related.py",
    "ast_data": "FunctionDef name:get_default arg:self arguments arg Assign Call Call If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "compute_mask",
    "source_code": "def compute_mask(self, t, default_mask):\n    _validate_structured_pruning(t)\n    _validate_pruning_dim(t, self.dim)\n    tensor_size = t.shape[self.dim]\n    nparams_toprune = _compute_nparams_toprune(self.amount, tensor_size)\n    _validate_pruning_amount(nparams_toprune, tensor_size)\n\n    def make_mask(t, dim, nchannels, nchannels_toprune):\n        prob = torch.rand(nchannels)\n        threshold = torch.kthvalue(prob, k=nchannels_toprune).values\n        channel_mask = prob > threshold\n        mask = torch.zeros_like(t)\n        slc = [slice(None)] * len(t.shape)\n        slc[dim] = channel_mask\n        mask[slc] = 1\n        return mask\n    if nparams_toprune == 0:\n        mask = default_mask\n    else:\n        mask = make_mask(t, self.dim, tensor_size, nparams_toprune)\n        mask *= default_mask.to(dtype=mask.dtype)\n    return mask",
    "docstring": "Compute and returns a mask for the input tensor ``",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\utils\\prune.py",
    "ast_data": "FunctionDef name:compute_mask arg:self arg:t arg:default_mask arguments arg arg arg Call Call Assign Assign Call Call FunctionDef name:make_mask arg:t arg:dim arg:nchannels arg:nchannels_toprune arguments arg arg arg arg Assign Call Assign Call Assign Compare Assign Call Assign Call Call Assign Assign Return return:yes If Compare Assign Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_transform_input_tensor",
    "source_code": "def _transform_input_tensor(self, input_tensor, state_manager=None):\n    if self.dtype.is_integer != input_tensor.dtype.is_integer:\n        raise ValueError('Column dtype and SparseTensors dtype must be compatible. key: {}, column dtype: {}, tensor dtype: {}'.format(self.key, self.dtype, input_tensor.dtype))\n    fc_utils.assert_string_or_int(input_tensor.dtype, prefix='column_name: {} input_tensor'.format(self.key))\n    key_dtype = self.dtype\n    if input_tensor.dtype.is_integer:\n        key_dtype = dtypes.int64\n        input_tensor = math_ops.cast(input_tensor, dtypes.int64)\n    return self._make_table(key_dtype, state_manager).lookup(input_tensor)",
    "docstring": "Creates a lookup table for the vocabulary.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:_transform_input_tensor arg:self arg:input_tensor arg:state_manager arguments arg arg arg If Compare Raise Call Call Call Call Assign If Assign Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "initialize_tpu_system",
    "source_code": "def initialize_tpu_system(cluster_resolver=None):\n    return tpu_strategy_util.initialize_tpu_system_impl(cluster_resolver, TPUClusterResolver)",
    "docstring": "Initialize the TPU devices. Args: cluster_resolver: A tf.distribute.cluster_resolver.TPUClusterResolver, which provides information about the TPU cluster. Returns: The tf.tpu.Topology object for the topology of the TPU cluster. If called inside tf.function, it returns the serialized topology object instead. Raises: RuntimeError: If running inside a tf.function. NotFoundError: If no TPU devices found in eager mode.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\tpu\\tpu_cluster_resolver.py",
    "ast_data": "FunctionDef name:initialize_tpu_system arg:cluster_resolver arguments arg Return return:yes Call"
  },
  {
    "library": "cryptography",
    "name": "__copy__",
    "source_code": "@abc.abstractmethod\ndef __copy__(self) -> DHPrivateKey:\n    pass",
    "docstring": "Returns a copy.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\dh.py",
    "ast_data": "FunctionDef name:__copy__ arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "get_all_call_args",
    "source_code": "def get_all_call_args(call_args_list, arg_types_list):\n    return _get_all_args(call_args_list, arg_types_list)",
    "docstring": "Passed in the call_args for each subkernel and return the call_args for the combined multi-kernel. Note an algorithm as follows does not always work: It will fail if any kernel has the same argument passed in multiple times. Check test_pass_same_arg_multi_times in test_multi_kernel.py Instead, we pick the longest call args and assert that other call args are a subset of it.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\multi_kernel.py",
    "ast_data": "FunctionDef name:get_all_call_args arg:call_args_list arg:arg_types_list arguments arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "Least",
    "source_code": "class Least(Func):\n    function = 'LEAST'\n\n    def __init__(self, *expressions, **extra):\n        if len(expressions) < 2:\n            raise ValueError('Least must take at least two expressions')\n        super().__init__(*expressions, **extra)\n\n    def as_sqlite(self, compiler, connection, **extra_context):\n        return super().as_sqlite(compiler, connection, function='MIN', **extra_context)",
    "docstring": "Return the minimum expression. If any expression is null the return value is database-specific: On PostgreSQL, return the minimum not-null expression. On MySQL, Oracle, and SQLite, if any expression is null, return null.",
    "type": "class",
    "file_path": "django\\django\\db\\models\\functions\\comparison.py",
    "ast_data": "ClassDef name:Least Assign FunctionDef name:__init__ arg:self arguments arg arg arg If Compare Call Raise Call Call Call FunctionDef name:as_sqlite arg:self arg:compiler arg:connection arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "detect_platform",
    "source_code": "def detect_platform():\n    if on_gcp():\n        if context.context().list_logical_devices('GPU'):\n            return PlatformDevice.GCE_GPU\n        elif context.context().list_logical_devices('TPU'):\n            return PlatformDevice.GCE_TPU\n        else:\n            return PlatformDevice.GCE_CPU\n    elif context.context().list_logical_devices('GPU'):\n        return PlatformDevice.INTERNAL_GPU\n    elif context.context().list_logical_devices('TPU'):\n        return PlatformDevice.INTERNAL_TPU\n    else:\n        return PlatformDevice.INTERNAL_CPU",
    "docstring": "Returns the platform and device information.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\failure_handling\\failure_handling_util.py",
    "ast_data": "FunctionDef name:detect_platform arguments If Call If Call Call Return return:yes If Call Call Return return:yes Return return:yes If Call Call Return return:yes If Call Call Return return:yes Return return:yes"
  },
  {
    "library": "pandas",
    "name": "preprocess_data",
    "source_code": "def preprocess_data(data) -> io.StringIO | io.BytesIO:\n    if isinstance(data, str):\n        data = io.StringIO(data)\n    elif isinstance(data, bytes):\n        data = io.BytesIO(data)\n    return data",
    "docstring": "Convert extracted raw data. This method will return underlying data of extracted XML content. The data either has a attribute (e.g. a file object or a StringIO/BytesIO) or is a string or bytes that is an XML document.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\xml.py",
    "ast_data": "FunctionDef name:preprocess_data arg:data arguments arg If Call Assign Call If Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "activate_command",
    "source_code": "@property\ndef activate_command(self) -> str:\n    if WINDOWS:\n        return f'& \"{self.activate_script}\"'\n    return f'source {shlex.quote(str(self.activate_script))}'",
    "docstring": "Get the command to activate the virtual environment.",
    "type": "method",
    "file_path": "pytorch\\tools\\nightly.py",
    "ast_data": "FunctionDef name:activate_command arg:self arguments arg If Return return:yes Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "emit_itt",
    "source_code": "class emit_itt:\n\n    def __init__(self, enabled=True, record_shapes=False):\n        self.enabled = enabled\n        self.entered = False\n        self.record_shapes = record_shapes\n\n    def __enter__(self):\n        if not self.enabled:\n            return\n        if self.entered:\n            raise RuntimeError('ITT annotation context manager is not reentrant')\n        self.entered = True\n        _run_on_profiler_start()\n        _enable_profiler(ProfilerConfig(ProfilerState.ITT, self.record_shapes, False, False, False, False, _ExperimentalConfig()), set())\n        return self\n\n    def __exit__(self, exc_type, exc_val, exc_tb):\n        if not self.enabled:\n            return\n        _disable_profiler()\n        _run_on_profiler_stop()\n        return False",
    "docstring": "Context manager that makes every autograd operation emit an ITT range. It is useful when running the program under Intel(R) VTune Profiler:: vtune The Instrumentation and Tracing Technology (ITT) API enables your application to generate and control the collection of trace data during its execution across different Intel tools. This context manager is to annotate Intel(R) VTune Profiling trace. With help of this context manager, you will be able to see labled ranges in Intel(R) VTune Profiler GUI. .. warning: This context manager should not be called recursively, i.e. at most one instance should be enabled at any given time. Args: enabled (bool, optional): Setting `` Example: >>> # xdoctest: +SKIP(\"Undefined variables\") >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD_PROFILER) >>> with torch.autograd.profiler.emit_itt(): ... model(x)",
    "type": "class",
    "file_path": "pytorch\\torch\\autograd\\profiler.py",
    "ast_data": "ClassDef name:emit_itt FunctionDef name:__init__ arg:self arg:enabled arg:record_shapes arguments arg arg arg Assign Assign Assign FunctionDef name:__enter__ arg:self arguments arg If Return return:no If Raise Call Assign Call Call Call Call Call Return return:yes FunctionDef name:__exit__ arg:self arg:exc_type arg:exc_val arg:exc_tb arguments arg arg arg arg If Return return:no Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "cohere",
    "source_code": "@_api.make_keyword_only('3.10', 'NFFT')\n@_preprocess_data(replace_names=['x', 'y'])\n@_docstring.interpd\ndef cohere(self, x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none, window=mlab.window_hanning, noverlap=0, pad_to=None, sides='default', scale_by_freq=None, **kwargs):\n    cxy, freqs = mlab.cohere(x=x, y=y, NFFT=NFFT, Fs=Fs, detrend=detrend, window=window, noverlap=noverlap, scale_by_freq=scale_by_freq, sides=sides, pad_to=pad_to)\n    freqs += Fc\n    self.plot(freqs, cxy, **kwargs)\n    self.set_xlabel('Frequency')\n    self.set_ylabel('Coherence')\n    self.grid(True)\n    return (cxy, freqs)",
    "docstring": "Plot the coherence between *x* and *y*. Coherence is the normalized cross spectral density: .. math:: C_{xy} = \\frac{|P_{xy}|^2}{P_{xx}P_{yy}} Parameters ---------- %(Spectral)s %(PSD)s noverlap : int, default: 0 (no overlap) The number of points of overlap between blocks. Fc : int, default: 0 The center frequency of *x*, which offsets the x extents of the plot to reflect the frequency range used when a signal is acquired and then filtered and downsampled to baseband. Returns ------- Cxy : 1-D array The coherence vector. freqs : 1-D array The frequencies for the elements in *Cxy*. Other Parameters ---------------- data : indexable object, optional DATA_PARAMETER_PLACEHOLDER **kwargs Keyword arguments control the properties: %(Line2D:kwdoc)s References ---------- Bendat & Piersol -- Random Data: Analysis and Measurement Procedures, John Wiley & Sons (1986)",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_axes.py",
    "ast_data": "FunctionDef name:cohere arg:self arg:x arg:y arg:NFFT arg:Fs arg:Fc arg:detrend arg:window arg:noverlap arg:pad_to arg:sides arg:scale_by_freq arguments arg arg arg arg arg arg arg arg arg arg arg arg arg Assign Call Call Call Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_dense_tensor",
    "source_code": "@abc.abstractmethod\ndef get_dense_tensor(self, transformation_cache, state_manager):\n    pass",
    "docstring": "Returns a . The output of this function will be used by model-builder-functions. For example the pseudo code of will be like: Args: transformation_cache: A object to access features. state_manager: A to create / access resources such as lookup tables. Returns: of shape [batch_size] + .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:get_dense_tensor arg:self arg:transformation_cache arg:state_manager arguments arg arg arg"
  },
  {
    "library": "kornia",
    "name": "get_same_padding",
    "source_code": "def get_same_padding(kernel_size: Union[int, tuple[int, ...]]) -> Union[int, tuple[int, ...]]:\n    if isinstance(kernel_size, (tuple,)):\n        return tuple([get_same_padding(ks) for ks in kernel_size])\n    return kernel_size // 2",
    "docstring": "Return padding values.",
    "type": "function",
    "file_path": "kornia\\kornia\\contrib\\models\\efficient_vit\\utils\\network.py",
    "ast_data": "FunctionDef name:get_same_padding arg:kernel_size arguments arg If Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "MutationOutput",
    "source_code": "class MutationOutput(Buffer):\n\n    def __init__(self, layout, mutated_node, mutating_node: Operation) -> None:\n        super().__init__(name=None, layout=layout)\n        mutated_node_name = mutated_node.get_name()\n        V.graph.mark_buffer_mutated(mutated_node_name)\n        self.mutation_names = [mutated_node_name]\n        self.mutating_node: Operation = mutating_node\n        self.name = V.graph.register_buffer(self)\n\n    def get_defining_op(self) -> Operation:\n        return self.mutating_node\n\n    def get_mutation_names(self):\n        return self.mutation_names\n\n    def should_allocate(self) -> bool:\n        return False",
    "docstring": "An output buffer that represents the mutation of a pre-existing buffer",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\ir.py",
    "ast_data": "ClassDef name:MutationOutput FunctionDef name:__init__ arg:self arg:layout arg:mutated_node arg:mutating_node arguments arg arg arg arg Call Call Assign Call Call Assign Assign Call FunctionDef name:get_defining_op arg:self arguments arg Return return:yes FunctionDef name:get_mutation_names arg:self arguments arg Return return:yes FunctionDef name:should_allocate arg:self arguments arg Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "fix_fragment",
    "source_code": "def fix_fragment(self, prefix: str, fragment: str) -> str:\n    return prefix + fragment.replace(':', '-')",
    "docstring": "Return a href/id attribute with colons replaced by hyphens.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\_epub_base.py",
    "ast_data": "FunctionDef name:fix_fragment arg:self arg:prefix arg:fragment arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "profile_to_file",
    "source_code": "def profile_to_file(filename: str) -> Callable[[T], T]:\n    prof = cProfile.Profile()\n    filename = os.path.abspath(os.path.expanduser(filename))\n\n    def decorator(fn):\n\n        @functools.wraps(fn)\n        def wrapper(*args, **kwargs):\n            prof.enable()\n            try:\n                return fn(*args, **kwargs)\n            finally:\n                prof.disable()\n        return wrapper\n\n    def save_it():\n        prof.dump_stats(filename)\n        sys.stderr.write(textwrap.dedent(f'                Wrote profile to {filename}, view with:\\n\\n                    snakeviz {filename}\\n\\n                '))\n    atexit.register(save_it)\n    return decorator",
    "docstring": "Decorator to cProfile a given function and save the result to disk on process exit. Args: filename: filename to save profile to",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\debug_utils.py",
    "ast_data": "FunctionDef name:profile_to_file arg:filename arguments arg Assign Call Assign Call Call FunctionDef name:decorator arg:fn arguments arg FunctionDef name:wrapper arguments arg arg Call Try Return return:yes Call Call Call Return return:yes FunctionDef name:save_it arguments Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "evaluate",
    "source_code": "def evaluate(self, model, x=None, y=None, batch_size=None, verbose=1, sample_weight=None, steps=None, callbacks=None, **kwargs):\n    dist_utils.validate_inputs(x, y)\n    batch_size, steps = dist_utils.process_batch_and_step_size(model._distribution_strategy, x, batch_size, steps, ModeKeys.TEST)\n    batch_size = model._validate_or_infer_batch_size(batch_size, steps, x)\n    dataset = model._distribution_standardize_user_data(x, y, sample_weight=sample_weight, batch_size=batch_size, allow_partial_batch=True)\n    if backend.is_tpu_strategy(model._distribution_strategy):\n        steps = training_utils_v1.infer_steps_for_dataset(model, dataset, steps, steps_name='steps')\n        if steps is None:\n            raise ValueError('Number of steps could not be inferred from the data, please pass the steps argument.')\n        if not context.executing_eagerly():\n            return experimental_tpu_test_loop(model, dataset, verbose=verbose, steps=steps, callbacks=callbacks)\n    return training_arrays_v1.test_loop(model, inputs=dataset, batch_size=batch_size, verbose=verbose, steps=steps, callbacks=callbacks)",
    "docstring": "Evaluate loop for Distribution Strategies.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_distributed_v1.py",
    "ast_data": "FunctionDef name:evaluate arg:self arg:model arg:x arg:y arg:batch_size arg:verbose arg:sample_weight arg:steps arg:callbacks arguments arg arg arg arg arg arg arg arg arg arg Call Assign Call Assign Call Assign Call If Call Assign Call If Compare Raise Call If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "generate_transformation_matrix",
    "source_code": "def generate_transformation_matrix(self, input: Tensor, params: Dict[str, Tensor], flags: Dict[str, Any]) -> Tensor:\n    batch_prob = params['batch_prob']\n    to_apply = batch_prob > 0.5\n    in_tensor = self.transform_tensor(input)\n    if not to_apply.any():\n        trans_matrix = self.identity_matrix(in_tensor)\n    elif to_apply.all():\n        trans_matrix = self.compute_transformation(in_tensor, params=params, flags=flags)\n    else:\n        trans_matrix = self.identity_matrix(in_tensor)\n        trans_matrix = trans_matrix.index_put((to_apply,), self.compute_transformation(in_tensor[to_apply], params=params, flags=flags))\n    return trans_matrix",
    "docstring": "Generate transformation matrices with the given input and param settings.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\_3d\\base.py",
    "ast_data": "FunctionDef name:generate_transformation_matrix arg:self arg:input arg:params arg:flags arguments arg arg arg arg Assign Assign Compare Assign Call If Call Assign Call If Call Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "RegularGridInterpolator",
    "source_code": "class RegularGridInterpolator(Benchmark):\n    param_names = ['ndim', 'max_coord_size', 'n_samples', 'flipped']\n    params = [[2, 3, 4], [10, 40, 200], [10, 100, 1000, 10000], [1, -1]]\n\n    def setup(self, ndim, max_coord_size, n_samples, flipped):\n        rng = np.random.default_rng(314159)\n        coord_sizes = [max_coord_size // 2 ** i for i in range(ndim)]\n        self.points = [np.sort(rng.random(size=s))[::flipped] for s in coord_sizes]\n        self.values = rng.random(size=coord_sizes)\n        bounds = [(p.min(), p.max()) for p in self.points]\n        xi = [rng.uniform(low, high, size=n_samples) for low, high in bounds]\n        self.xi = np.array(xi).T\n        self.interp = interpolate.RegularGridInterpolator(self.points, self.values)\n\n    def time_rgi_setup_interpolator(self, ndim, max_coord_size, n_samples, flipped):\n        self.interp = interpolate.RegularGridInterpolator(self.points, self.values)\n\n    def time_rgi(self, ndim, max_coord_size, n_samples, flipped):\n        self.interp(self.xi)",
    "docstring": "Benchmark RegularGridInterpolator with method=\"linear\".",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\interpolate.py",
    "ast_data": "ClassDef name:RegularGridInterpolator Assign Assign FunctionDef name:setup arg:self arg:ndim arg:max_coord_size arg:n_samples arg:flipped arguments arg arg arg arg arg Assign Call Assign Call Assign Call Call Assign Call Assign Call Call Assign Call Assign Call Assign Call FunctionDef name:time_rgi_setup_interpolator arg:self arg:ndim arg:max_coord_size arg:n_samples arg:flipped arguments arg arg arg arg arg Assign Call FunctionDef name:time_rgi arg:self arg:ndim arg:max_coord_size arg:n_samples arg:flipped arguments arg arg arg arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_update_sample_weight_modes",
    "source_code": "def _update_sample_weight_modes(model, mode, sample_weights):\n    if is_distributing_by_cloning(model):\n        distributed_model = get_distributed_model(model, mode)\n        if not distributed_model:\n            _make_replicated_models_with_cloning(model, mode)\n            distributed_model = get_distributed_model(model, mode)\n        distributed_model._recompile_exec_function = any([e.sample_weights_mismatch() for e in model._training_endpoints])\n        if sample_weights:\n            distributed_models = flatten_per_replica_values(model._distribution_strategy, distributed_model)\n            sample_weights = sample_weights[0]\n            if sample_weights and None not in sample_weights:\n                for m, sw in zip(distributed_models, sample_weights):\n                    m._update_sample_weight_modes(sample_weights=[sw])",
    "docstring": "Update sample_weight_mode of the distributed model.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_training_utils_v1.py",
    "ast_data": "FunctionDef name:_update_sample_weight_modes arg:model arg:mode arg:sample_weights arguments arg arg arg If Call Assign Call If Call Assign Call Assign Call Call If Assign Call Assign If BoolOp Compare For Call Call"
  },
  {
    "library": "django",
    "name": "check_unique",
    "source_code": "def check_unique(self, unique):\n    if isinstance(unique, (list, tuple)):\n        for attr in unique:\n            if attr not in self.mapping:\n                raise ValueError\n    elif isinstance(unique, str):\n        if unique not in self.mapping:\n            raise ValueError\n    else:\n        raise TypeError('Unique keyword argument must be set with a tuple, list, or string.')",
    "docstring": "Check the keyword parameter -- may be a sequence or string.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\utils\\layermapping.py",
    "ast_data": "FunctionDef name:check_unique arg:self arg:unique arguments arg arg If Call For If Compare Raise If Call If Compare Raise Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_run_graph",
    "source_code": "def _run_graph(self, device, input_shape, perm, num_iters, datatype):\n    graph = ops.Graph()\n    with graph.as_default():\n        outputs = build_graph(device, input_shape, perm, datatype, num_iters)\n        with session_lib.Session(graph=graph) as session:\n            variables.global_variables_initializer().run()\n            session.run(outputs)\n            start_time = time.time()\n            session.run(outputs)\n            duration = (time.time() - start_time) / num_iters\n            throughput = np.prod(np.array(input_shape)) * datatype().itemsize * 2 / duration / 1000000000.0\n            print('%s %s inputshape:%s perm:%s %d %.6fsec, %.4fGB/s.' % (device, str(datatype), str(input_shape).replace(' ', ''), str(perm).replace(' ', ''), num_iters, duration, throughput))\n    name_template = 'transpose_{device}_{dtype}_input_shape_{inputshape}_perm_{perm}'\n    self.report_benchmark(name=name_template.format(device=device, dtype=str(datatype).replace(' ', ''), inputshape=str(input_shape).replace(' ', ''), perm=str(perm).replace(' ', '')).replace(' ', ''), iters=num_iters, wall_time=duration)\n    return duration",
    "docstring": "runs the graph and print its execution time. Args: device: String, the device to run on. input_shape: Shape of the input tensor. perm: A list of ints with the same length as input tensor's dimension. num_iters: Number of iterations to run the benchmark. datatype: numpy data type of the input tensor. Returns: The duration of the run in seconds.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\transpose_benchmark.py",
    "ast_data": "FunctionDef name:_run_graph arg:self arg:device arg:input_shape arg:perm arg:num_iters arg:datatype arguments arg arg arg arg arg arg Assign Call With Call Assign Call With Call Call Call Call Assign Call Call Assign Call Assign Call Call Call Call Call Call Call Call Call Assign Call Call Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "backward",
    "source_code": "def backward(self, *gradients: Any) -> Any:\n    nested_gradients = _unflatten(gradients, self._nested_output)\n    result = self.backward_extended(*nested_gradients)\n    return tuple(_iter_None_tensors(result))",
    "docstring": "Shared backward utility.",
    "type": "method",
    "file_path": "pytorch\\torch\\autograd\\function.py",
    "ast_data": "FunctionDef name:backward arg:self arguments arg arg Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_formatter_func",
    "source_code": "def _formatter_func(self, tup):\n    formatter_funcs = (level._formatter_func for level in self.levels)\n    return tuple((func(val) for func, val in zip(formatter_funcs, tup)))",
    "docstring": "Formats each item in tup according to its level's formatter function.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "FunctionDef name:_formatter_func arg:self arg:tup arguments arg arg Assign Return return:yes Call Call Call"
  },
  {
    "library": "pandas",
    "name": "value_counts",
    "source_code": "def value_counts(self, dropna: bool=True) -> Series:\n    if self.ndim != 1:\n        raise NotImplementedError\n    from pandas import Index, Series\n    if dropna:\n        values = self[~self.isna()]._ndarray\n    else:\n        values = self._ndarray\n    result = value_counts(values, sort=False, dropna=dropna)\n    index_arr = self._from_backing_data(np.asarray(result.index._data))\n    index = Index(index_arr, name=result.index.name)\n    return Series(result._values, index=index, name=result.name, copy=False)",
    "docstring": "Return a Series containing counts of unique values. Parameters ---------- dropna : bool, default True Don't include counts of NA values. Returns ------- Series",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\_mixins.py",
    "ast_data": "FunctionDef name:value_counts arg:self arg:dropna arguments arg arg If Compare Raise If Assign Call Assign Assign Call Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_is_observer_script_module",
    "source_code": "def _is_observer_script_module(mod, obs_type_name):\n    if isinstance(mod, torch.jit.RecursiveScriptModule):\n        suffix = mod._c.qualified_name.split('.', 1)[1]\n        name = re.sub('\\\\.___torch_mangle_\\\\d+', '', suffix)\n        return obs_type_name in name\n    return False",
    "docstring": "Returns true if given mod is an instance of Observer script module.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\observer.py",
    "ast_data": "FunctionDef name:_is_observer_script_module arg:mod arg:obs_type_name arguments arg arg If Call Assign Call Assign Call Return return:yes Compare Return return:yes"
  },
  {
    "library": "scipy",
    "name": "lqn",
    "source_code": "def lqn(n, z):\n    n = _nonneg_int_or_fail(n, 'n', strict=False)\n    if n < 1:\n        n1 = 1\n    else:\n        n1 = n\n    z = np.asarray(z)\n    if not np.issubdtype(z.dtype, np.inexact):\n        z = z.astype(float)\n    if np.iscomplexobj(z):\n        qn = np.empty((n1 + 1,) + z.shape, dtype=np.complex128)\n    else:\n        qn = np.empty((n1 + 1,) + z.shape, dtype=np.float64)\n    qd = np.empty_like(qn)\n    if z.ndim == 0:\n        _lqn(z, out=(qn, qd))\n    else:\n        _lqn(z, out=(np.moveaxis(qn, 0, -1), np.moveaxis(qd, 0, -1)))\n    return (qn[:n + 1], qd[:n + 1])",
    "docstring": "Legendre function of the second kind. Compute sequence of Legendre functions of the second kind, Qn(z) and derivatives for all degrees from 0 to n (inclusive). References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special Functions\", John Wiley and Sons, 1996.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_basic.py",
    "ast_data": "FunctionDef name:lqn arg:n arg:z arguments arg arg Assign Call If Compare Assign Assign Assign Call If Call Assign Call If Call Assign Call Assign Call Assign Call If Compare Call Call Call Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "_dodge_needed",
    "source_code": "def _dodge_needed(self):\n    groupers = list({self.orient, 'col', 'row'} & set(self.variables))\n    if 'hue' in self.variables:\n        orient = self.plot_data[groupers].value_counts()\n        paired = self.plot_data[[*groupers, 'hue']].value_counts()\n        return orient.size != paired.size\n    return False",
    "docstring": "Return True when use of would cause overlaps.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\categorical.py",
    "ast_data": "FunctionDef name:_dodge_needed arg:self arguments arg Assign Call Call If Compare Assign Call Assign Call Return return:yes Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "matmul_shape_inference",
    "source_code": "def matmul_shape_inference(a, b, c, transpose_a, transpose_b, adjoint_a, adjoint_b):\n    c_handle = getattr(c, '_handle_data', None)\n    a_shape_and_type = get_shape_and_type(a)\n    b_shape_and_type = get_shape_and_type(b)\n    if c_handle is None and a_shape_and_type is not None and (b_shape_and_type is not None):\n        transpose_a = transpose_a or adjoint_a\n        transpose_b = transpose_b or adjoint_b\n        a_shape = a_shape_and_type.shape\n        b_shape = b_shape_and_type.shape\n        rank = len(a_shape.dim)\n        c_rows = a_shape.dim[rank - (1 if transpose_a else 2)].size\n        c_cols = b_shape.dim[rank - (2 if transpose_b else 1)].size\n        c_shape = tensor_shape.TensorShape(a_shape)\n        c_shape = tensor_shape.TensorShape(c_shape[:rank - 2] + [c_rows, c_cols])\n        c_handle = _create_handle_data_proto(c_shape.as_proto(), a_shape_and_type.dtype)\n    return c_handle",
    "docstring": "Helper function for matmul to set the result matrix's handle data.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\sparse\\sparse_csr_matrix_ops.py",
    "ast_data": "FunctionDef name:matmul_shape_inference arg:a arg:b arg:c arg:transpose_a arg:transpose_b arg:adjoint_a arg:adjoint_b arguments arg arg arg arg arg arg arg Assign Call Assign Call Assign Call If BoolOp Compare Compare Compare Assign BoolOp Assign BoolOp Assign Assign Assign Call Assign Assign Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, cell):\n    self._cell = cell",
    "docstring": "Creates a new IntGaugeCell. Args: cell: A c pointer of TFE_MonitoringIntGaugeCell.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:cell arguments arg arg Assign"
  },
  {
    "library": "scikit-learn",
    "name": "inverse_transform",
    "source_code": "def inverse_transform(self, Y, threshold=None):\n    check_is_fitted(self)\n    if threshold is None:\n        threshold = (self.pos_label + self.neg_label) / 2.0\n    if self.y_type_ == 'multiclass':\n        y_inv = _inverse_binarize_multiclass(Y, self.classes_)\n    else:\n        y_inv = _inverse_binarize_thresholding(Y, self.y_type_, self.classes_, threshold)\n    if self.sparse_input_:\n        y_inv = sp.csr_matrix(y_inv)\n    elif sp.issparse(y_inv):\n        y_inv = y_inv.toarray()\n    return y_inv",
    "docstring": "Transform binary labels back to multi-class labels. Parameters ---------- Y : {ndarray, sparse matrix} of shape (n_samples, n_classes) Target values. All sparse matrices are converted to CSR before inverse transformation. threshold : float, default=None Threshold used in the binary and multi-label cases. Use 0 when `decision_functionpredict_probainverse_transformdecision_functioninverse_transform`.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_label.py",
    "ast_data": "FunctionDef name:inverse_transform arg:self arg:Y arg:threshold arguments arg arg arg Call If Compare Assign If Compare Assign Call Assign Call If Assign Call If Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "prepare_planning_info",
    "source_code": "def prepare_planning_info(nodes: list[BaseSchedulerNode], name_to_buf: dict[str, SchedulerBuffer], name_to_fused_node: dict[str, BaseSchedulerNode], graph_inputs: OrderedSet[str], graph_outputs: OrderedSet[str]) -> tuple[int, dict[str, FreeableInputBuffer]]:\n    name_to_freeable_input_buf = get_freeable_input_buf(nodes, graph_inputs)\n    assign_memory_planning_info_for_scheduler_buffers(nodes, name_to_buf)\n    assign_memory_planning_info_for_scheduler_nodes(nodes, name_to_fused_node, name_to_buf, name_to_freeable_input_buf)\n    estimated_peak_memory, _ = estimate_peak_memory(nodes, name_to_freeable_input_buf, graph_outputs)\n    return (estimated_peak_memory, name_to_freeable_input_buf)",
    "docstring": "Prepare planning info. As nodes are scheduled one at a time, these help keep track of when a buffer can be freed, and when a node can be scheduled Returns: int: peak memory estimation dict[str, FreeableInputBuffer]: name to freeable input buffer",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\memory.py",
    "ast_data": "FunctionDef name:prepare_planning_info arg:nodes arg:name_to_buf arg:name_to_fused_node arg:graph_inputs arg:graph_outputs arguments arg arg arg arg arg Assign Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "_modified_bessel_1",
    "source_code": "def _modified_bessel_1(x: Tensor) -> Tensor:\n    ax = torch.abs(x)\n    out = zeros_like(x)\n    idx_a = ax < 3.75\n    if idx_a.any():\n        y = x[idx_a] / 3.75 * (x[idx_a] / 3.75)\n        ans = 0.51498869 + y * (0.15084934 + y * (0.02658733 + y * (0.00301532 + y * 0.00032411)))\n        out[idx_a] = ax[idx_a] * (0.5 + y * (0.87890594 + y * ans))\n    idx_b = ~idx_a\n    if idx_b.any():\n        y = 3.75 / ax[idx_b]\n        ans = 0.02282967 + y * (-0.02895312 + y * (0.01787654 - y * 0.00420059))\n        ans = 0.39894228 + y * (-0.03988024 + y * (-0.00362018 + y * (0.00163801 + y * (-0.01031555 + y * ans))))\n        ans = ans * ax[idx_b].exp() / ax[idx_b].sqrt()\n        out[idx_b] = where(x[idx_b] < 0, -ans, ans)\n    return out",
    "docstring": "Adapted from:",
    "type": "function",
    "file_path": "kornia\\kornia\\filters\\kernels.py",
    "ast_data": "FunctionDef name:_modified_bessel_1 arg:x arguments arg Assign Call Assign Call Assign Compare If Call Assign Assign Assign Assign If Call Assign Assign Assign Assign Call Call Assign Call Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_op_creation_digest",
    "source_code": "def get_op_creation_digest(self, op_name):\n    return self._op_by_name[op_name]",
    "docstring": "Get the GraphOpCreationDigest for a op in the graph.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py",
    "ast_data": "FunctionDef name:get_op_creation_digest arg:self arg:op_name arguments arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "coordinatewise_monotone_map",
    "source_code": "@classmethod\ndef coordinatewise_monotone_map(cls, x, y, fn):\n    x, y = (cls.wrap(x), cls.wrap(y))\n    products = [fn(a, b) for a, b in itertools.product([x.lower, x.upper], [y.lower, y.upper])]\n    return ValueRanges(min(products), max(products))",
    "docstring": "It's increasing or decreasing on each coordinate.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\_sympy\\value_ranges.py",
    "ast_data": "FunctionDef name:coordinatewise_monotone_map arg:cls arg:x arg:y arg:fn arguments arg arg arg arg Assign Call Call Assign Call Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "res_list_literal",
    "source_code": "def res_list_literal(self, ns, elt_types):\n    raise NotImplementedError('subclasses must implement')",
    "docstring": "Resolves the type of a list literal from its elements.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\static_analysis\\type_inference.py",
    "ast_data": "FunctionDef name:res_list_literal arg:self arg:ns arg:elt_types arguments arg arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "HandleData",
    "source_code": "@dataclasses.dataclass(frozen=True)\nclass HandleData:\n    shape_inference: Optional[cpp_shape_inference_pb2.CppShapeInferenceResult.HandleData] = None\n    alias_id: Optional[int] = None",
    "docstring": "Holds resource/variant tensor specific data.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\dtypes.py",
    "ast_data": "ClassDef name:HandleData Call"
  },
  {
    "library": "tensorflow",
    "name": "_convert_object_or_list",
    "source_code": "def _convert_object_or_list(nested):\n    if wrap:\n        if isinstance(nested, ListWrapper):\n            return nested\n        if _is_serialized_node_data(nested):\n            return ListWrapper(nested)\n        return nested\n    else:\n        if isinstance(nested, ListWrapper):\n            return nested.as_list()\n        return nested",
    "docstring": "Convert b/t object and list representations.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_utils.py",
    "ast_data": "FunctionDef name:_convert_object_or_list arg:nested arguments arg If If Call Return return:yes If Call Return return:yes Call Return return:yes If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "content_metadata",
    "source_code": "def content_metadata(self) -> dict[str, Any]:\n    if (source_date_epoch := os.getenv('SOURCE_DATE_EPOCH')) is not None:\n        time_tuple = time.gmtime(int(source_date_epoch))\n    else:\n        time_tuple = time.gmtime()\n    metadata: dict[str, Any] = {'title': html.escape(self.config.epub_title), 'author': html.escape(self.config.epub_author), 'uid': html.escape(self.config.epub_uid), 'lang': html.escape(self.config.epub_language), 'publisher': html.escape(self.config.epub_publisher), 'copyright': html.escape(self.config.epub_copyright), 'scheme': html.escape(self.config.epub_scheme), 'id': html.escape(self.config.epub_identifier), 'date': html.escape(time.strftime('%Y-%m-%d', time_tuple)), 'manifest_items': [], 'spines': [], 'guides': []}\n    return metadata",
    "docstring": "Create a dictionary with all metadata for the content.opf file properly escaped.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\_epub_base.py",
    "ast_data": "FunctionDef name:content_metadata arg:self arguments arg If Compare Call Assign Call Call Assign Call Call Call Call Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "scale",
    "source_code": "def scale(self, scale_factor: Tensor) -> PinholeModel:\n    fx = self.fx * scale_factor\n    fy = self.fy * scale_factor\n    cx = self.cx * scale_factor\n    cy = self.cy * scale_factor\n    params = stack((fx, fy, cx, cy), -1)\n    image_size = ImageSize(self.image_size.height * scale_factor, self.image_size.width * scale_factor)\n    return PinholeModel(image_size, params)",
    "docstring": "Scales the camera model by a scale factor. Args: scale_factor: Scale factor to scale the camera model. Returns: Scaled camera model. Example: >>> cam = CameraModel(ImageSize(480, 640), CameraModelType.PINHOLE, torch.Tensor([328., 328., 320., 240.])) >>> cam_scaled = cam.scale(2) >>> cam_scaled.params tensor([656., 656., 640., 480.])",
    "type": "method",
    "file_path": "kornia\\kornia\\sensors\\camera\\camera_model.py",
    "ast_data": "FunctionDef name:scale arg:self arg:scale_factor arguments arg arg Assign Assign Assign Assign Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "squeeze",
    "source_code": "def squeeze(self, dim: int=0) -> SegmentationResults:\n    self.logits = self.logits.squeeze(dim)\n    self.scores = self.scores.squeeze(dim)\n    if isinstance(self._original_res_logits, Tensor):\n        self._original_res_logits = self._original_res_logits.squeeze(dim)\n    return self",
    "docstring": "Realize a squeeze for the dim given for all properties.",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\models\\structures.py",
    "ast_data": "FunctionDef name:squeeze arg:self arg:dim arguments arg arg Assign Call Assign Call If Call Assign Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "hooks_namespace",
    "source_code": "def hooks_namespace(k, v):\n    hookpoint = k.split('.', 1)[0]\n    if isinstance(v, str):\n        v = cherrypy.lib.reprconf.attributes(v)\n    if not isinstance(v, Hook):\n        v = Hook(v)\n    cherrypy.serving.request.hooks[hookpoint].append(v)",
    "docstring": "Attach bare hooks declared in config.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\_cprequest.py",
    "ast_data": "FunctionDef name:hooks_namespace arg:k arg:v arguments arg arg Assign Call If Call Assign Call If Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "compute_dtype",
    "source_code": "@property\ndef compute_dtype(self):\n    return self._dtype_policy.compute_dtype",
    "docstring": "The dtype of the layer's computations. This is equivalent to . Unless mixed precision is used, this is the same as , the dtype of the weights. Layers automatically cast their inputs to the compute dtype, which causes computations and the output to be in the compute dtype as well. This is done by the base Layer class in , so you do not have to insert these casts if implementing your own layer. Layers often perform certain internal computations in higher precision when is float16 or bfloat16 for numeric stability. The output will still typically be float16 or bfloat16 in such cases. Returns: The layer's compute dtype.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:compute_dtype arg:self arguments arg Return return:yes"
  },
  {
    "library": "authlib",
    "name": "query_user_grant",
    "source_code": "def query_user_grant(self, user_code):\n    raise NotImplementedError()",
    "docstring": "Get user and grant via the given user code. Developers MUST implement it in subclass:: def query_user_grant(self, user_code): # e.g. we saved user grant info in redis data = redis.get(\"oauth_user_grant:\" + user_code) if not data: return None user_id, allowed = data.split() user = User.get(user_id) return user, bool(allowed) Note, user grant information is saved by verification endpoint.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc8628\\device_code.py",
    "ast_data": "FunctionDef name:query_user_grant arg:self arg:user_code arguments arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_update",
    "source_code": "def _update(self, update_fn, value, **kwargs):\n    input_tensor = ops.convert_to_tensor(value, name='value_in_tensor', dtype=self.dtype)\n    return control_flow_ops.group(*tuple((_on_device_update(update_fn, v, input_tensor, **kwargs) for v in self.variables)))",
    "docstring": "Converts the value to tensor and updates the variable list.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_replicated_variable.py",
    "ast_data": "FunctionDef name:_update arg:self arg:update_fn arg:value arguments arg arg arg arg Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "floordiv",
    "source_code": "def floordiv(self, x0: T, x1: T) -> T:\n    raise NotImplementedError",
    "docstring": "Python-style floor division between integers only. Computes the true division of two numbers and floors the result. If you want floor division for floats, do regular truediv and floor the result.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ops_handler.py",
    "ast_data": "FunctionDef name:floordiv arg:self arg:x0 arg:x1 arguments arg arg arg Raise"
  },
  {
    "library": "django",
    "name": "get_form",
    "source_code": "def get_form(self, form_class=None):\n    if form_class is None:\n        form_class = self.get_form_class()\n    return form_class(**self.get_form_kwargs())",
    "docstring": "Return an instance of the form to be used in this view.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\edit.py",
    "ast_data": "FunctionDef name:get_form arg:self arg:form_class arguments arg arg If Compare Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_check_attribute_picklability",
    "source_code": "def _check_attribute_picklability(self):\n    for k in self.__dict__.keys():\n        if k not in _REMOTE_MODULE_PICKLED_ATTRIBUTES and k not in _REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING:\n            raise AttributeError(f'Attribute {k} must be either in ``_REMOTE_MODULE_PICKLED_ATTRIBUTES`` or ``_REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING``.')",
    "docstring": "Check if all the attribute has explicitly defined whether to be pickled (i.e., picklability).",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\nn\\api\\remote_module.py",
    "ast_data": "FunctionDef name:_check_attribute_picklability arg:self arguments arg For Call If BoolOp Compare Compare Raise Call"
  },
  {
    "library": "authlib",
    "name": "validate_require_signed_request_object",
    "source_code": "def validate_require_signed_request_object(self):\n    _validate_boolean_value(self, 'require_signed_request_object')",
    "docstring": "Indicates where authorization request needs to be protected as Request Object and provided through either request or request_uri parameter.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc9101\\discovery.py",
    "ast_data": "FunctionDef name:validate_require_signed_request_object arg:self arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "TypeConstraintParam",
    "source_code": "@dataclasses.dataclass(frozen=True)\nclass TypeConstraintParam:\n    name: str\n    allowed_types: set[ir.TypeProtocol]\n    description: str = ''\n\n    def __hash__(self) -> int:\n        return hash((self.name, tuple(self.allowed_types)))\n\n    def __str__(self) -> str:\n        allowed_types_str = ' | '.join((str(t) for t in self.allowed_types))\n        return f'{self.name}={allowed_types_str}'\n\n    @classmethod\n    def any_tensor(cls, name: str, description: str='') -> TypeConstraintParam:\n        return cls(name, {ir.TensorType(dtype) for dtype in ir.DataType}, description)\n\n    @classmethod\n    def any_value(cls, name: str, description: str='') -> TypeConstraintParam:\n        return cls(name, _ALL_VALUE_TYPES, description)",
    "docstring": "Type constraint for a parameter. Attributes: name: Name of the parameter. E.g. \"TFloat\" allowed_types: Allowed types for the parameter.",
    "type": "class",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_schemas.py",
    "ast_data": "ClassDef name:TypeConstraintParam FunctionDef name:__hash__ arg:self arguments arg Return return:yes Call Call FunctionDef name:__str__ arg:self arguments arg Assign Call Call Return return:yes FunctionDef name:any_tensor arg:cls arg:name arg:description arguments arg arg arg Return return:yes Call Call FunctionDef name:any_value arg:cls arg:name arg:description arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_minor_number",
    "source_code": "def set_minor_number(self, minor_number):\n    self._minor_number = minor_number",
    "docstring": "Set the number of minor ticks to label when some minor ticks are labelled. Parameters ---------- minor_number : int Number of ticks which are labelled when the number of ticks is below the threshold.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:set_minor_number arg:self arg:minor_number arguments arg arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "add_variable",
    "source_code": "def add_variable(self, feature_column, var):\n    del feature_column, var\n    raise NotImplementedError('StateManager.add_variable')",
    "docstring": "Adds an existing variable to the state. Args: feature_column: A object to associate this variable with. var: The variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:add_variable arg:self arg:feature_column arg:var arguments arg arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "module_class",
    "source_code": "@property\ndef module_class(self) -> type | str | None:\n    return self.top()._module_class",
    "docstring": "Returns the module class of the top module.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\modularization.py",
    "ast_data": "FunctionDef name:module_class arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "names_compat",
    "source_code": "def names_compat(meth: F) -> F:\n\n    @wraps(meth)\n    def new_meth(self_or_cls, *args, **kwargs):\n        if 'name' in kwargs and 'names' in kwargs:\n            raise TypeError('Can only provide one of `names` and `name`')\n        if 'name' in kwargs:\n            kwargs['names'] = kwargs.pop('name')\n        return meth(self_or_cls, *args, **kwargs)\n    return cast(F, new_meth)",
    "docstring": "A decorator to allow either or keyword but not both. This makes it easier to share code with base class.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "FunctionDef name:names_compat arg:meth arguments arg FunctionDef name:new_meth arg:self_or_cls arguments arg arg arg If BoolOp Compare Compare Raise Call If Compare Assign Call Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "run",
    "source_code": "def run(self, fetches, feed_dict=None, options=None, run_metadata=None):\n    if self.should_stop():\n        raise RuntimeError('Run called even after should_stop requested.')\n    actual_fetches = {'caller': fetches}\n    run_context = session_run_hook.SessionRunContext(original_args=session_run_hook.SessionRunArgs(fetches, feed_dict), session=self._sess)\n    options = options or config_pb2.RunOptions()\n    feed_dict = self._call_hook_before_run(run_context, actual_fetches, feed_dict, options)\n    run_metadata = run_metadata or config_pb2.RunMetadata()\n    outputs = _WrappedSession.run(self, fetches=actual_fetches, feed_dict=feed_dict, options=options, run_metadata=run_metadata)\n    for hook in self._hooks:\n        hook.after_run(run_context, session_run_hook.SessionRunValues(results=outputs[hook] if hook in outputs else None, options=options, run_metadata=run_metadata))\n    self._should_stop = self._should_stop or run_context.stop_requested\n    return outputs['caller']",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\monitored_session.py",
    "ast_data": "FunctionDef name:run arg:self arg:fetches arg:feed_dict arg:options arg:run_metadata arguments arg arg arg arg arg If Call Raise Call Assign Assign Call Call Assign BoolOp Call Assign Call Assign BoolOp Call Assign Call For Call Call Compare Assign BoolOp Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "rt_is_equiv_dense",
    "source_code": "def rt_is_equiv_dense(rt):\n    return math_ops.reduce_all([math_ops.equal(math_ops.reduce_variance(math_ops.cast(row_lens, backend.floatx())), constant_op.constant([0.0])) for row_lens in rt.nested_row_lengths()])",
    "docstring": "Returns true if this RaggedTensor has the same row_lengths across all ragged dimensions and thus can be converted to a dense tensor without loss of information. Args: rt: RaggedTensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py",
    "ast_data": "FunctionDef name:rt_is_equiv_dense arg:rt arguments arg Return return:yes Call Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "is_feature_layer",
    "source_code": "def is_feature_layer(layer):\n    return getattr(layer, '_is_feature_layer', False)",
    "docstring": "Returns whether is a FeatureLayer or not.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py",
    "ast_data": "FunctionDef name:is_feature_layer arg:layer arguments arg Return return:yes Call"
  },
  {
    "library": "cryptography",
    "name": "public_bytes",
    "source_code": "@abc.abstractmethod\ndef public_bytes(self, encoding: _serialization.Encoding, format: _serialization.PublicFormat) -> bytes:\n    pass",
    "docstring": "Returns the key serialized as bytes.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\dsa.py",
    "ast_data": "FunctionDef name:public_bytes arg:self arg:encoding arg:format arguments arg arg arg"
  },
  {
    "library": "pytorch",
    "name": "finish",
    "source_code": "@abc.abstractmethod\ndef finish(self, metadata: Metadata, results: list[list[WriteResult]]) -> None:\n    pass",
    "docstring": "Write the metadata and marks the current checkpoint as successful. The actual format/schema used for serializing is an implementation detail. The only requirement is that it's recoverable in to the same object graph. Args: metadata (Metadata): metadata for the new checkpoint results: A list of WriteResults from all ranks. Returns: None",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\storage.py",
    "ast_data": "FunctionDef name:finish arg:self arg:metadata arg:results arguments arg arg arg"
  },
  {
    "library": "pytorch",
    "name": "_time_estimator",
    "source_code": "@contextlib.contextmanager\ndef _time_estimator(group: Optional[ProcessGroup]=None, device: Optional[torch.device]=None):\n    group = group or _get_default_group()\n    device = device or _get_pg_default_device(group)\n    backend = group._get_backend(device)\n    if not backend.supports_time_estimate:\n        raise NotImplementedError(f'collective time estimator is not supported in the curent version of backend {backend}')\n    backend._start_time_estimate()\n    cm = _TimeEstimator()\n    yield cm\n    cm.estimated_time = backend._end_time_estimate()",
    "docstring": "Context manager used to estimate time of collectives. Within the context manager, nothing is actually run and the backend just simulates the collective time only. Args: group (, optional): The process group to work on. If None, the default process group will be used. device (, optional): Default is None, set to a device if there isn't a implementation by the backend. Examples: >>> # xdoctest: +SKIP(\"no rank\") >>> # Synchronous ops >>> with _time_estimator() as cm: >>> for i in range(num_colls): >>> dist.all_reduce(tensors[i]) >>> # estimate time is stored in cm.estimated_time .. warning:: :func: currently only support NCCL backend but it can easily be extended to other backends. Also a NCCL communicator needs to be created because only with a real communicator can we do accurate estimation. The communicator internally has knowledge about the links it runs on (e.g. intra-node or inter-node, whether the links are NVLink or PCI-e or IB).",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:_time_estimator arg:group arg:device arguments arg arg Assign BoolOp Call Assign BoolOp Call Assign Call If Raise Call Call Assign Call Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, line):\n    if line.axes is None:\n        raise RuntimeError('You must first add the line to the Axes')\n    if line.get_picker() is None:\n        raise RuntimeError('You must first set the picker property of the line')\n    self.axes = line.axes\n    self.line = line\n    self.cid = self.canvas.callbacks._connect_picklable('pick_event', self.onpick)\n    self.ind = set()",
    "docstring": "Parameters ---------- line : The line must already have been added to an and must have its picker property set.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:line arguments arg arg If Compare Raise Call If Compare Call Raise Call Assign Assign Assign Call Assign Call"
  },
  {
    "library": "pandas",
    "name": "_check_engine",
    "source_code": "def _check_engine(engine: str | None) -> str:\n    from pandas.core.computation.check import NUMEXPR_INSTALLED\n    from pandas.core.computation.expressions import USE_NUMEXPR\n    if engine is None:\n        engine = 'numexpr' if USE_NUMEXPR else 'python'\n    if engine not in ENGINES:\n        valid_engines = list(ENGINES.keys())\n        raise KeyError(f\"Invalid engine '{engine}' passed, valid engines are {valid_engines}\")\n    if engine == 'numexpr' and (not NUMEXPR_INSTALLED):\n        raise ImportError(\"'numexpr' is not installed or an unsupported version. Cannot use engine='numexpr' for query/eval if 'numexpr' is not installed\")\n    return engine",
    "docstring": "Make sure a valid engine is passed. Parameters ---------- engine : str String to validate. Raises ------ KeyError * If an invalid engine is passed. ImportError * If numexpr was requested but doesn't exist. Returns ------- str Engine name.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\computation\\eval.py",
    "ast_data": "FunctionDef name:_check_engine arg:engine arguments arg If Compare Assign If Compare Assign Call Call Raise Call If BoolOp Compare Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "is_equivalent",
    "source_code": "def is_equivalent(spec1: TreeSpec, spec2: TreeSpec, equivalence_fn: Callable[[Optional[type], Context, Optional[type], Context], bool]) -> bool:\n    if not equivalence_fn(spec1.type, spec1.context, spec2.type, spec2.context):\n        return False\n    if len(spec1.children_specs) != len(spec2.children_specs):\n        return False\n    for child_spec1, child_spec2 in zip(spec1.children_specs, spec2.children_specs):\n        if not is_equivalent(child_spec1, child_spec2, equivalence_fn):\n            return False\n    return True",
    "docstring": "Customizable equivalence check for two TreeSpecs. Arguments: spec1: The first TreeSpec to compare spec2: The second TreeSpec to compare equivalence_fn: A function to determine the equivalence of two TreeSpecs by examining their types and contexts. It will be called like: equivalence_fn(spec1.type, spec1.context, spec2.type, spec2.context) This function will be applied recursively to all children. Returns: True if the two TreeSpecs are equivalent, False otherwise.",
    "type": "function",
    "file_path": "pytorch\\torch\\export\\_tree_utils.py",
    "ast_data": "FunctionDef name:is_equivalent arg:spec1 arg:spec2 arg:equivalence_fn arguments arg arg arg If Call Return return:yes If Compare Call Call Return return:yes For Call If Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_less_flops",
    "source_code": "@ops.RegisterStatistics('Less', 'flops')\ndef _less_flops(graph, node):\n    return _binary_per_element_op_flops(graph, node)",
    "docstring": "Compute flops for Less operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py",
    "ast_data": "FunctionDef name:_less_flops arg:graph arg:node arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "unreplace_parenthesis",
    "source_code": "def unreplace_parenthesis(s, d):\n    for k, v in d.items():\n        p = _get_parenthesis_kind(k)\n        left = {'ROUND': '(', 'SQUARE': '[', 'CURLY': '{', 'ROUNDDIV': '(/'}[p]\n        right = {'ROUND': ')', 'SQUARE': ']', 'CURLY': '}', 'ROUNDDIV': '/)'}[p]\n        s = s.replace(k, left + v + right)\n    return s",
    "docstring": "Inverse of replace_parenthesis.",
    "type": "function",
    "file_path": "numpy\\numpy\\f2py\\symbolic.py",
    "ast_data": "FunctionDef name:unreplace_parenthesis arg:s arg:d arguments arg arg For Call Assign Call Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "combine_first",
    "source_code": "def combine_first(self, other) -> Series:\n    from pandas.core.reshape.concat import concat\n    if self.dtype == other.dtype:\n        if self.index.equals(other.index):\n            return self.mask(self.isna(), other)\n        elif self._can_hold_na and (not isinstance(self.dtype, SparseDtype)):\n            this, other = self.align(other, join='outer')\n            return this.mask(this.isna(), other)\n    new_index = self.index.union(other.index)\n    this = self\n    keep_other = other.index.difference(this.index[notna(this)])\n    keep_this = this.index.difference(keep_other)\n    this = this.reindex(keep_this)\n    other = other.reindex(keep_other)\n    if this.dtype.kind == 'M' and other.dtype.kind != 'M':\n        other = to_datetime(other)\n    combined = concat([this, other])\n    combined = combined.reindex(new_index)\n    return combined.__finalize__(self, method='combine_first')",
    "docstring": "Update null elements with value in the same location in 'other'. Combine two Series objects by filling null values in one Series with non-null values from the other Series. Result index will be the union of the two indexes. Parameters ---------- other : Series The value(s) to be used for filling null values. Returns ------- Series The result of combining the provided Series with the other object. See Also -------- Series.combine : Perform element-wise operation on two Series using a given function. Examples -------- >>> s1 = pd.Series([1, np.nan]) >>> s2 = pd.Series([3, 4, 5]) >>> s1.combine_first(s2) 0 1.0 1 4.0 2 5.0 dtype: float64 Null values still persist if the location of that null value does not exist in >>> s1 = pd.Series({\"falcon\": np.nan, \"eagle\": 160.0}) >>> s2 = pd.Series({\"eagle\": 200.0, \"duck\": 30.0}) >>> s1.combine_first(s2) duck 30.0 eagle 160.0 falcon NaN dtype: float64",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:combine_first arg:self arg:other arguments arg arg If Compare If Call Return return:yes Call Call If BoolOp Call Assign Call Return return:yes Call Call Assign Call Assign Assign Call Call Assign Call Assign Call Assign Call If BoolOp Compare Compare Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_fromnxfunction_seq",
    "source_code": "class _fromnxfunction_seq(_fromnxfunction):\n\n    def __call__(self, x, *args, **params):\n        func = getattr(np, self.__name__)\n        _d = func(tuple((np.asarray(a) for a in x)), *args, **params)\n        _m = func(tuple((getmaskarray(a) for a in x)), *args, **params)\n        return masked_array(_d, mask=_m)",
    "docstring": "A version of that is called with a single sequence of arrays followed by auxiliary args that are passed verbatim for both the data and mask calls.",
    "type": "class",
    "file_path": "numpy\\numpy\\ma\\extras.py",
    "ast_data": "ClassDef name:_fromnxfunction_seq FunctionDef name:__call__ arg:self arg:x arguments arg arg arg arg Assign Call Assign Call Call Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "delete",
    "source_code": "def delete(self, session_key=None):\n    self._session_key = ''\n    self._session_cache = {}\n    self.modified = True",
    "docstring": "To delete, clear the session key and the underlying data structure and set the modified flag so that the cookie is set on the client for the current request.",
    "type": "method",
    "file_path": "django\\django\\contrib\\sessions\\backends\\signed_cookies.py",
    "ast_data": "FunctionDef name:delete arg:self arg:session_key arguments arg arg Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "register_lowering_pattern",
    "source_code": "def register_lowering_pattern(pattern: PatternExpr, extra_check: Callable[[Match], bool]=_return_true, *, pass_dict: _PassDictsType, prepend: bool=False) -> Callable[[Callable[..., Any]], Callable[..., Any]]:\n\n    def decorator(handler: Callable[..., Any]) -> Callable[..., Any]:\n        assert callable(handler)\n        LoweringPatternEntry(pattern=pattern, extra_check=extra_check, handler=handler).register(pass_dict, prepend=prepend)\n        handler._inductor_lowering_function = True\n        return handler\n    return decorator",
    "docstring": "Register an aten to inductor IR replacement pattern. The decorated function is saved and then called a lowering time allowing direct pattern to inductor IR conversion.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\pattern_matcher.py",
    "ast_data": "FunctionDef name:register_lowering_pattern arg:pattern arg:extra_check arguments arg arg arg arg FunctionDef name:decorator arg:handler arguments arg Call Call Call Assign Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "chain",
    "source_code": "@compatibility(is_backward_compatible=False)\ndef chain(*op_support: OperatorSupportBase) -> OperatorSupportBase:\n\n    def _chain(submods, node) -> bool:\n        return all((x.is_node_supported(submods, node) for x in op_support))\n    return create_op_support(_chain)",
    "docstring": "Combines a sequence of instances to form a single instance by evaluating each input instance, and returns False if any of it reports False.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\passes\\operator_support.py",
    "ast_data": "FunctionDef name:chain arguments arg FunctionDef name:_chain arg:submods arg:node arguments arg arg Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "empty_with_docstring",
    "source_code": "def empty_with_docstring():\n    pass",
    "docstring": "doc",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\_api\\deprecation.py",
    "ast_data": "FunctionDef name:empty_with_docstring arguments"
  },
  {
    "library": "kornia",
    "name": "_extract_device_dtype",
    "source_code": "def _extract_device_dtype(tensor_list: List[Optional[Any]]) -> Tuple[torch.device, torch.dtype]:\n    device, dtype = (None, None)\n    for tensor in tensor_list:\n        if tensor is not None:\n            if not isinstance(tensor, (Tensor,)):\n                continue\n            _device = tensor.device\n            _dtype = tensor.dtype\n            if device is None and dtype is None:\n                device = _device\n                dtype = _dtype\n            elif device != _device or dtype != _dtype:\n                raise ValueError(f'Passed values are not in the same device and dtype.Got ({device}, {dtype}) and ({_device}, {_dtype}).')\n    if device is None:\n        device = torch.device('cpu')\n    if dtype is None:\n        dtype = torch.get_default_dtype()\n    return (device, dtype)",
    "docstring": "Check if all the input are in the same device (only if when they are Tensor). If so, it would return a tuple of (device, dtype). Default: (cpu, ``). Returns: [torch.device, torch.dtype]",
    "type": "function",
    "file_path": "kornia\\kornia\\utils\\helpers.py",
    "ast_data": "FunctionDef name:_extract_device_dtype arg:tensor_list arguments arg Assign For If Compare If Call Assign Assign If BoolOp Compare Compare Assign Assign If BoolOp Compare Compare Raise Call If Compare Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "format_coord",
    "source_code": "def format_coord(self, lon, lat):\n    lon, lat = np.rad2deg([lon, lat])\n    ns = 'N' if lat >= 0.0 else 'S'\n    ew = 'E' if lon >= 0.0 else 'W'\n    return '%f°%s, %f°%s' % (abs(lat), ns, abs(lon), ew)",
    "docstring": "Return a format string formatting the coordinate.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\geo.py",
    "ast_data": "FunctionDef name:format_coord arg:self arg:lon arg:lat arguments arg arg arg Assign Call Assign Compare Assign Compare Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "disconnect",
    "source_code": "def disconnect(self, listener_id: int) -> None:\n    for listeners in self.listeners.values():\n        for listener in listeners.copy():\n            if listener.id == listener_id:\n                listeners.remove(listener)",
    "docstring": "Disconnect a handler.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\events.py",
    "ast_data": "FunctionDef name:disconnect arg:self arg:listener_id arguments arg arg For Call For Call If Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, cell, residual_fn=None, **kwargs):\n    super(ResidualWrapperBase, self).__init__(cell, **kwargs)\n    self._residual_fn = residual_fn",
    "docstring": "Constructs a for . Args: cell: An instance of . residual_fn: (Optional) The function to map raw cell inputs and raw cell outputs to the actual cell outputs of the residual network. Defaults to calling nest.map_structure on (lambda i, o: i + o), inputs and outputs. **kwargs: dict of keyword arguments for base layer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\legacy_rnn\\rnn_cell_wrapper_impl.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:cell arg:residual_fn arguments arg arg arg arg Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "TypeSpec",
    "source_code": "class TypeSpec(object):\n    pass",
    "docstring": "Interface for internal isinstance checks to framework/type_spec.py. This helps to avoid circular dependencies.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\types\\internal.py",
    "ast_data": "ClassDef name:TypeSpec"
  },
  {
    "library": "numpy",
    "name": "hermegrid3d",
    "source_code": "def hermegrid3d(x, y, z, c):\n    return pu._gridnd(hermeval, c, x, y, z)",
    "docstring": "Evaluate a 3-D HermiteE series on the Cartesian product of x, y, and z. This function returns the values: .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * He_i(a) * He_j(b) * He_k(c) where the points `axbyczxyzxyzxyzccxyzxyzcxy`. See Also -------- hermeval, hermeval2d, hermegrid2d, hermeval3d",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\hermite_e.py",
    "ast_data": "FunctionDef name:hermegrid3d arg:x arg:y arg:z arg:c arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_get_flattened_qconfig_dict",
    "source_code": "def _get_flattened_qconfig_dict(qconfig_mapping: QConfigMapping) -> dict[Union[Callable, str], QConfigAny]:\n    flattened: dict[Union[Callable, str], QConfigAny] = {'': qconfig_mapping.global_qconfig}\n    flattened.update(qconfig_mapping.object_type_qconfigs)\n    flattened.update(qconfig_mapping.module_name_qconfigs)\n    return flattened",
    "docstring": "flatten the global, object_type and module_name qconfig to the same qconfig_dict so that it can be used by propagate_qconfig_ function. \"module_name_regex\" is ignored for now since it's not supported in propagate_qconfig_, but it can be fixed later. For example: Input: { \"\": qconfig, \"object_type\": [ (torch.add, qconfig) ], \"module_name\": [ (\"conv\", qconfig) ] } Output: { \"\": qconfig, torch.add: qconfig, \"conv\": qconfig }",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\qconfig_mapping_utils.py",
    "ast_data": "FunctionDef name:_get_flattened_qconfig_dict arg:qconfig_mapping arguments arg Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_KNV0",
    "source_code": "def _KNV0(B, ker_pole, transfer_matrix, j, poles):\n    transfer_matrix_not_j = np.delete(transfer_matrix, j, axis=1)\n    Q, R = s_qr(transfer_matrix_not_j, mode='full')\n    mat_ker_pj = np.dot(ker_pole[j], ker_pole[j].T)\n    yj = np.dot(mat_ker_pj, Q[:, -1])\n    if not np.allclose(yj, 0):\n        xj = yj / np.linalg.norm(yj)\n        transfer_matrix[:, j] = xj",
    "docstring": "Algorithm \"KNV0\" Kautsky et Al. Robust pole assignment in linear state feedback, Int journal of Control 1985, vol 41 p 1129->1155 users/105941/public/KautskyNicholsDooren",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:_KNV0 arg:B arg:ker_pole arg:transfer_matrix arg:j arg:poles arguments arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call If Call Assign Call Assign"
  },
  {
    "library": "kornia",
    "name": "visualize",
    "source_code": "def visualize(self, images: Tensor, depth_maps: Optional[Union[Tensor, list[Tensor]]]=None, output_type: str='torch', depth_type: str='relative', max_depth: int=80) -> Union[Tensor, list[Tensor], list[Image.Image]]:\n    if depth_maps is None:\n        depth_maps = self(images)\n    output = []\n    for depth_map in depth_maps:\n        if depth_type == 'metric':\n            depth_map = depth_map / max_depth\n        elif depth_type == 'relative':\n            depth_map = depth_map / depth_map.max()\n        else:\n            raise ValueError(f'Unsupported depth type `{depth_type}`.')\n        output.append(grayscale_to_rgb(depth_map))\n    return self._tensor_to_type(output, output_type, is_batch=isinstance(images, Tensor))",
    "docstring": "Draw the segmentation results. Args: images: input tensor. depth_maps: estimated depths. output_type: type of the output. depth_type: 'metric' or 'relative' depth. max_depth: maximum depth value. Only valid for metric depth. Returns: output tensor.",
    "type": "method",
    "file_path": "kornia\\kornia\\models\\depth_estimation\\base.py",
    "ast_data": "FunctionDef name:visualize arg:self arg:images arg:depth_maps arg:output_type arg:depth_type arg:max_depth arguments arg arg arg arg arg arg If Compare Assign Call Assign For If Compare Assign If Compare Assign Call Raise Call Call Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "set",
    "source_code": "def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):\n    raise NotImplementedError('subclasses of BaseCache must provide a set() method')",
    "docstring": "Set a value in the cache. If timeout is given, use that timeout for the key; otherwise use the default cache timeout.",
    "type": "method",
    "file_path": "django\\django\\core\\cache\\backends\\base.py",
    "ast_data": "FunctionDef name:set arg:self arg:key arg:value arg:timeout arg:version arguments arg arg arg arg arg Raise Call"
  },
  {
    "library": "django",
    "name": "rel_db_type",
    "source_code": "def rel_db_type(self, connection):\n    return self.db_type(connection)",
    "docstring": "Return the data type that a related field pointing to this field should use. For example, this method is called by ForeignKey and OneToOneField to determine its data type.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\__init__.py",
    "ast_data": "FunctionDef name:rel_db_type arg:self arg:connection arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_norm_text_angle",
    "source_code": "def _norm_text_angle(a):\n    a = (a + 180) % 180\n    if a > 90:\n        a = a - 180\n    return a",
    "docstring": "Return the given angle normalized to -90 < *a* <= 90 degrees.",
    "type": "function",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py",
    "ast_data": "FunctionDef name:_norm_text_angle arg:a arguments arg Assign If Compare Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_mgc_stat",
    "source_code": "def _mgc_stat(distx, disty):\n    stat_mgc_map = _local_correlations(distx, disty, global_corr='mgc')\n    n, m = stat_mgc_map.shape\n    if m == 1 or n == 1:\n        stat = stat_mgc_map[m - 1][n - 1]\n        opt_scale = m * n\n    else:\n        samp_size = len(distx) - 1\n        sig_connect = _threshold_mgc_map(stat_mgc_map, samp_size)\n        stat, opt_scale = _smooth_mgc_map(sig_connect, stat_mgc_map)\n    stat_dict = {'stat_mgc_map': stat_mgc_map, 'opt_scale': opt_scale}\n    return (stat, stat_dict)",
    "docstring": "Helper function that calculates the MGC stat. See above for use. Parameters ---------- distx, disty : ndarray and have shapes `` pair.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mgc.py",
    "ast_data": "FunctionDef name:_mgc_stat arg:distx arg:disty arguments arg arg Assign Call Assign If BoolOp Compare Compare Assign Assign Assign Call Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "BundledAOTAutogradCacheEntry",
    "source_code": "class BundledAOTAutogradCacheEntry(GenericAOTAutogradCacheEntry[BundledCompiledForward, BundledCompiledBackward]):\n    pass",
    "docstring": "AOTAutogradCacheEntry where we save the entire CompiledFxGraph instead of relying on cache keys from FxGraphCache",
    "type": "class",
    "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\autograd_cache.py",
    "ast_data": "ClassDef name:BundledAOTAutogradCacheEntry"
  },
  {
    "library": "pytorch",
    "name": "is_tracing",
    "source_code": "def is_tracing():\n    if is_scripting():\n        return False\n    return torch._C._is_tracing()",
    "docstring": "Return a boolean value. Returns `` otherwise.",
    "type": "function",
    "file_path": "pytorch\\torch\\jit\\_trace.py",
    "ast_data": "FunctionDef name:is_tracing arguments If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "django",
    "name": "CursorMixin",
    "source_code": "class CursorMixin:\n\n    def callproc(self, name, args=None):\n        if not isinstance(name, sql.Identifier):\n            name = sql.Identifier(name)\n        qparts = [sql.SQL('SELECT * FROM '), name, sql.SQL('(')]\n        if args:\n            for item in args:\n                qparts.append(sql.Literal(item))\n                qparts.append(sql.SQL(','))\n            del qparts[-1]\n        qparts.append(sql.SQL(')'))\n        stmt = sql.Composed(qparts)\n        self.execute(stmt)\n        return args",
    "docstring": "A subclass of psycopg cursor implementing callproc.",
    "type": "class",
    "file_path": "django\\django\\db\\backends\\postgresql\\base.py",
    "ast_data": "ClassDef name:CursorMixin FunctionDef name:callproc arg:self arg:name arg:args arguments arg arg arg If Call Assign Call Assign Call Call If For Call Call Call Call Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "reset_recurrent_dropout_mask",
    "source_code": "def reset_recurrent_dropout_mask(self):\n    self._recurrent_dropout_mask_cache.clear()",
    "docstring": "Reset the cached recurrent dropout masks if any. This is important for the RNN layer to invoke this in it call() method so that the cached mask is cleared before calling the cell.call(). The mask should be cached across the timestep within the same batch, but shouldn't be cached between batches. Otherwise it will introduce unreasonable bias against certain index of data within the batch.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\recurrent.py",
    "ast_data": "FunctionDef name:reset_recurrent_dropout_mask arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "NotFoundError",
    "source_code": "@tf_export('errors.NotFoundError')\nclass NotFoundError(OpError):\n\n    def __init__(self, node_def, op, message, *args):\n        super(NotFoundError, self).__init__(node_def, op, message, NOT_FOUND, *args)",
    "docstring": "Raised when a requested entity (e.g., a file or directory) was not found. For example, running the operation could raise if it receives the name of a file that does not exist.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py",
    "ast_data": "ClassDef name:NotFoundError FunctionDef name:__init__ arg:self arg:node_def arg:op arg:message arguments arg arg arg arg arg Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_reduce_shard_tensor",
    "source_code": "def _reduce_shard_tensor(self, tensor: torch.Tensor, mesh: DeviceMesh, reduce_op: str, mesh_dim: int) -> torch.Tensor:\n    my_coordinate = mesh.get_coordinate()\n    num_chunks = mesh.size(mesh_dim=mesh_dim)\n    if my_coordinate is None:\n        return tensor\n    is_padded = tensor.size(self.dim) % num_chunks != 0\n    if is_padded:\n        scattered_list, pad_sizes = self._split_tensor(tensor, num_chunks, with_padding=True, contiguous=True)\n        tensor = torch.cat(scattered_list, dim=self.dim)\n    elif not tensor.is_contiguous():\n        tensor = tensor.contiguous()\n    output = funcol.reduce_scatter_tensor(tensor, reduce_op, scatter_dim=self.dim, group=(mesh, mesh_dim))\n    if is_padded:\n        output = unpad_tensor(output, self.dim, pad_sizes[my_coordinate[mesh_dim]])\n    return output",
    "docstring": "reduce and scatter a tensor on a mesh dimension",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\placement_types.py",
    "ast_data": "FunctionDef name:_reduce_shard_tensor arg:self arg:tensor arg:mesh arg:reduce_op arg:mesh_dim arguments arg arg arg arg arg Assign Call Assign Call If Compare Return return:yes Assign Compare Call If Assign Call Assign Call If Call Assign Call Assign Call If Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_SeriesTableBuilderNonVerbose",
    "source_code": "class _SeriesTableBuilderNonVerbose(_SeriesTableBuilder):\n\n    def _fill_non_empty_info(self) -> None:\n        self.add_object_type_line()\n        self.add_index_range_line()\n        self.add_dtypes_line()\n        if self.display_memory_usage:\n            self.add_memory_usage_line()",
    "docstring": "Series info table builder for non-verbose output.",
    "type": "class",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "ClassDef name:_SeriesTableBuilderNonVerbose FunctionDef name:_fill_non_empty_info arg:self arguments arg Call Call Call If Call"
  },
  {
    "library": "pytorch",
    "name": "_check_inputs",
    "source_code": "def _check_inputs(self, arg_mbs: Optional[list]=None, kwarg_mbs: Optional[list]=None, target_mbs: Optional[list]=None, losses: Optional[list]=None):\n\n    def check_type_and_len(mbs, name: str):\n        if not isinstance(mbs, list):\n            raise TypeError(f'{name} must be a list but got a {type(mbs)}')\n        if len(mbs) != self._n_microbatches:\n            raise ValueError(f'Expecting {self._n_microbatches} {name} but got {len(mbs)}')\n    if arg_mbs is not None:\n        check_type_and_len(arg_mbs, 'arg_mbs')\n    else:\n        arg_mbs = [()] * self._n_microbatches\n    if kwarg_mbs is not None:\n        check_type_and_len(kwarg_mbs, 'kwarg_mbs')\n    else:\n        kwarg_mbs = [{}] * self._n_microbatches\n    if target_mbs is not None:\n        check_type_and_len(target_mbs, 'target_mbs')\n    if losses is not None:\n        if not isinstance(losses, list):\n            raise TypeError(f'losses must be a list but got a {type(losses)}')\n    return (arg_mbs, kwarg_mbs)",
    "docstring": "Pre-process/check inputs",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\schedules.py",
    "ast_data": "FunctionDef name:_check_inputs arg:self arg:arg_mbs arg:kwarg_mbs arg:target_mbs arg:losses arguments arg arg arg arg arg FunctionDef name:check_type_and_len arg:mbs arg:name arguments arg arg If Call Raise Call Call If Compare Call Raise Call Call If Compare Call Assign If Compare Call Assign If Compare Call If Compare If Call Raise Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "M",
    "source_code": "def M(self):\n    return MONTHS_3[self.data.month].title()",
    "docstring": "Month, textual, 3 letters; e.g. 'Jan'",
    "type": "method",
    "file_path": "django\\django\\utils\\dateformat.py",
    "ast_data": "FunctionDef name:M arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "__iter__",
    "source_code": "def __iter__(self):\n    for i in range(len(self)):\n        yield self[i]",
    "docstring": "Iterate over each ring in the polygon.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\polygon.py",
    "ast_data": "FunctionDef name:__iter__ arg:self arguments arg For Call Call"
  },
  {
    "library": "pytorch",
    "name": "_lookup_and_create_type",
    "source_code": "def _lookup_and_create_type(base: type[_T], qname: str) -> _T:\n    pkg, name = qname.rsplit('.', 1)\n    mod = importlib.import_module(pkg)\n    ty = getattr(mod, name)\n    if not issubclass(ty, base):\n        raise TypeError(f'Type {ty} is not a subtype of {base}')\n    return ty()",
    "docstring": "Given a base type and qualified name: import & lookup that name, check that it's of the given type and then instantiate it.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\compile_worker\\__main__.py",
    "ast_data": "FunctionDef name:_lookup_and_create_type arg:base arg:qname arguments arg arg Assign Call Assign Call Assign Call If Call Raise Call Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "decrypt",
    "source_code": "def decrypt(self, ciphertext, aad, iv, tag, key):\n    self.check_iv(iv)\n    hkey = key[:self.key_len]\n    dkey = key[self.key_len:]\n    _tag = self._hmac(ciphertext, aad, iv, hkey)\n    if not hmac.compare_digest(_tag, tag):\n        raise InvalidTag()\n    cipher = Cipher(AES(dkey), CBC(iv), backend=default_backend())\n    d = cipher.decryptor()\n    data = d.update(ciphertext) + d.finalize()\n    unpad = PKCS7(AES.block_size).unpadder()\n    return unpad.update(data) + unpad.finalize()",
    "docstring": "Key Decryption with AES AES_CBC_HMAC_SHA2. :param ciphertext: ciphertext in bytes :param aad: additional authenticated data in bytes :param iv: initialization vector in bytes :param tag: authentication tag in bytes :param key: encrypted key in bytes :return: message",
    "type": "method",
    "file_path": "authlib\\authlib\\jose\\rfc7518\\jwe_encs.py",
    "ast_data": "FunctionDef name:decrypt arg:self arg:ciphertext arg:aad arg:iv arg:tag arg:key arguments arg arg arg arg arg arg Call Assign Assign Assign Call If Call Raise Call Assign Call Call Call Call Assign Call Assign Call Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "append",
    "source_code": "@compatibility(is_backward_compatible=True)\ndef append(self, x: 'Node') -> None:\n    self._next.prepend(x)",
    "docstring": "Insert `` Args: x (Node): The node to put after this node. Must be a member of the same graph.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\node.py",
    "ast_data": "FunctionDef name:append arg:self arg:x arguments arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_convert_row_partition",
    "source_code": "@classmethod\ndef _convert_row_partition(cls, partition, name, dtype=None, dtype_hint=None):\n    if dtype_hint is None:\n        dtype_hint = dtypes.int64\n    if isinstance(partition, np.ndarray) and partition.dtype == np.int32 and (dtype is None):\n        partition = ops.convert_to_tensor(partition, name=name)\n    else:\n        partition = tensor_conversion.convert_to_tensor_v2(partition, dtype_hint=dtype_hint, dtype=dtype, name=name)\n    if partition.dtype not in (dtypes.int32, dtypes.int64):\n        raise ValueError('%s must have dtype int32 or int64' % name)\n    return partition",
    "docstring": "Converts to Tensors. Args: partition: A row-partitioning tensor for the being constructed. I.e., one of: row_splits, row_lengths, row_starts, row_limits, value_rowids, uniform_row_length. name: The name of the row-partitioning tensor. dtype: Optional dtype for the RowPartition. If missing, the type is inferred from the type of , dtype_hint, or tf.int64. dtype_hint: Optional dtype for the RowPartition, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so dtype_hint can be used as a soft preference. If the conversion to is not possible, this argument has no effect. Returns: A tensor equivalent to partition. Raises: ValueError: if dtype is not int32 or int64.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py",
    "ast_data": "FunctionDef name:_convert_row_partition arg:cls arg:partition arg:name arg:dtype arg:dtype_hint arguments arg arg arg arg arg If Compare Assign If BoolOp Call Compare Compare Assign Call Assign Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_string_to_hash_bucket_fn",
    "source_code": "def _get_string_to_hash_bucket_fn(self, hasher_spec):\n    if not isinstance(hasher_spec, HasherSpec):\n        raise TypeError(f'`hasher_spec` must be of type HasherSpec, got {type(hasher_spec)}.')\n    if hasher_spec.hasher == 'fasthash':\n        return string_ops.string_to_hash_bucket_fast\n    if hasher_spec.hasher == 'legacy':\n        return string_ops.string_to_hash_bucket\n    if hasher_spec.hasher == 'stronghash':\n        return functools.partial(string_ops.string_to_hash_bucket_strong, key=hasher_spec.key)\n    raise ValueError(f'Found unknown hasher {hasher_spec.hasher} in `hasher_spec`')",
    "docstring": "Returns the string_to_hash_bucket op to use based on .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "FunctionDef name:_get_string_to_hash_bucket_fn arg:self arg:hasher_spec arguments arg arg If Call Raise Call Call If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Call Raise Call"
  },
  {
    "library": "django",
    "name": "json",
    "source_code": "@property\ndef json(self):\n    return capi.to_json(self.ptr)",
    "docstring": "Return the GeoJSON representation of this Geometry.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:json arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "num_chunks",
    "source_code": "@abstractmethod\ndef num_chunks(self) -> int:\n    pass",
    "docstring": "Return the number of chunks the DataFrame consists of.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\interchange\\dataframe_protocol.py",
    "ast_data": "FunctionDef name:num_chunks arg:self arguments arg"
  },
  {
    "library": "scikit-learn",
    "name": "_compute_inverse_components",
    "source_code": "def _compute_inverse_components(self):\n    components = self.components_\n    if sp.issparse(components):\n        components = components.toarray()\n    return linalg.pinv(components, check_finite=False)",
    "docstring": "Compute the pseudo-inverse of the (densified) components.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\random_projection.py",
    "ast_data": "FunctionDef name:_compute_inverse_components arg:self arguments arg Assign If Call Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "info",
    "source_code": "def info(request, message, extra_tags='', fail_silently=False):\n    add_message(request, constants.INFO, message, extra_tags=extra_tags, fail_silently=fail_silently)",
    "docstring": "Add a message with the `` level.",
    "type": "function",
    "file_path": "django\\django\\contrib\\messages\\api.py",
    "ast_data": "FunctionDef name:info arg:request arg:message arg:extra_tags arg:fail_silently arguments arg arg arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "display_snapshot",
    "source_code": "def display_snapshot(self, type: str='current', units: str='B', tabulate: bool=False) -> None:\n    snapshot = self.get_tracker_snapshot(type)\n    if tabulate:\n        _print_snapshot_tabular(snapshot, units)\n    else:\n        _print_snapshot(snapshot, units)",
    "docstring": "Display the memory usage breakdown snapshot of the tracker based on the specified type and units. Keyword args: type (str): The type of snapshot to display. Can be \"current\" for the current memory usage or \"peak\" for the peak memory usage. Defaults to \"current\". units (str): The units to use for displaying memory usage. Defaults to \"B\". Supports [\"B\", \"KiB\", \"MiB\", \"GiB\"]. tabulate (bool): Whether to display the snapshot in a tabular format. Defaults to False.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_tools\\mem_tracker.py",
    "ast_data": "FunctionDef name:display_snapshot arg:self arg:type arg:units arg:tabulate arguments arg arg arg arg Assign Call If Call Call"
  },
  {
    "library": "tensorflow",
    "name": "matrix",
    "source_code": "@property\ndef matrix(self):\n    return self._matrix",
    "docstring": "The matrix defining this operator.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_full_matrix.py",
    "ast_data": "FunctionDef name:matrix arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_get_arithmetic_result_freq",
    "source_code": "@final\ndef _get_arithmetic_result_freq(self, other) -> BaseOffset | None:\n    if isinstance(self.dtype, PeriodDtype):\n        return self.freq\n    elif not lib.is_scalar(other):\n        return None\n    elif isinstance(self.freq, Tick):\n        return self.freq\n    return None",
    "docstring": "Check if we can preserve self.freq in addition or subtraction.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py",
    "ast_data": "FunctionDef name:_get_arithmetic_result_freq arg:self arg:other arguments arg arg If Call Return return:yes If Call Return return:no If Call Return return:yes Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "SendTracebacks",
    "source_code": "def SendTracebacks(self, request, context):\n    context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n    context.set_details('Method not implemented!')\n    raise NotImplementedError('Method not implemented!')",
    "docstring": "Send the tracebacks of ops in a Python graph definition.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_service_pb2_grpc.py",
    "ast_data": "FunctionDef name:SendTracebacks arg:self arg:request arg:context arguments arg arg arg Call Call Raise Call"
  },
  {
    "library": "seaborn",
    "name": "_parse_cubehelix_args",
    "source_code": "def _parse_cubehelix_args(argstr):\n    if argstr.startswith('ch:'):\n        argstr = argstr[3:]\n    if argstr.endswith('_r'):\n        reverse = True\n        argstr = argstr[:-2]\n    else:\n        reverse = False\n    if not argstr:\n        return ([], {'reverse': reverse})\n    all_args = argstr.split(',')\n    args = [float(a.strip(' ')) for a in all_args if '=' not in a]\n    kwargs = [a.split('=') for a in all_args if '=' in a]\n    kwargs = {k.strip(' '): float(v.strip(' ')) for k, v in kwargs}\n    kwarg_map = dict(s='start', r='rot', g='gamma', h='hue', l='light', d='dark')\n    kwargs = {kwarg_map.get(k, k): v for k, v in kwargs.items()}\n    if reverse:\n        kwargs['reverse'] = True\n    return (args, kwargs)",
    "docstring": "Turn stringified cubehelix params into args/kwargs.",
    "type": "function",
    "file_path": "seaborn\\seaborn\\palettes.py",
    "ast_data": "FunctionDef name:_parse_cubehelix_args arg:argstr arguments arg If Call Assign If Call Assign Assign Assign If Return return:yes Assign Call Assign Call Call Compare Assign Call Compare Assign Call Call Call Assign Call Assign Call Call If Assign Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "nested_parse_with_titles",
    "source_code": "def nested_parse_with_titles(state: RSTState, content: StringList, node: Node, content_offset: int=0) -> str:\n    with _fresh_title_style_context(state):\n        ret = state.nested_parse(content, content_offset, node, match_titles=True)\n    return ret",
    "docstring": "Version of state.nested_parse() that allows titles and does not require titles to have the same decoration as the calling document. This is useful when the parsed content comes from a completely different context, such as docstrings. This function is retained for compatibility and will be deprecated in Sphinx 8. Prefer ``.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\nodes.py",
    "ast_data": "FunctionDef name:nested_parse_with_titles arg:state arg:content arg:node arg:content_offset arguments arg arg arg arg With Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_find_nearest_contour",
    "source_code": "def _find_nearest_contour(self, xy, indices=None):\n    if self.filled:\n        raise ValueError('Method does not support filled contours')\n    if indices is None:\n        indices = range(len(self._paths))\n    d2min = np.inf\n    idx_level_min = idx_vtx_min = proj_min = None\n    for idx_level in indices:\n        path = self._paths[idx_level]\n        idx_vtx_start = 0\n        for subpath in path._iter_connected_components():\n            if not len(subpath.vertices):\n                continue\n            lc = self.get_transform().transform(subpath.vertices)\n            d2, proj, leg = _find_closest_point_on_path(lc, xy)\n            if d2 < d2min:\n                d2min = d2\n                idx_level_min = idx_level\n                idx_vtx_min = leg[1] + idx_vtx_start\n                proj_min = proj\n            idx_vtx_start += len(subpath)\n    return (idx_level_min, idx_vtx_min, proj_min)",
    "docstring": "Find the point in the unfilled contour plot that is closest (in screen space) to point *xy*. Parameters ---------- xy : tuple[float, float] The reference point (in screen space). indices : list of int or None, default: None Indices of contour levels to consider. If None (the default), all levels are considered. Returns ------- idx_level_min : int The index of the contour level closest to *xy*. idx_vtx_min : int The index of the segment closest to *xy* (at that level). proj : (float, float) The point in the contour plot closest to *xy*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\contour.py",
    "ast_data": "FunctionDef name:_find_nearest_contour arg:self arg:xy arg:indices arguments arg arg arg If Raise Call If Compare Assign Call Call Assign Assign For Assign Assign For Call If Call Assign Call Call Assign Call If Compare Assign Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "from_state",
    "source_code": "@classmethod\ndef from_state(cls, state, alg):\n    return cls(alg=alg, state=state)",
    "docstring": "Creates a generator from a state. See for description of and . Args: state: the new state. alg: the RNG algorithm. Returns: The new generator.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\stateful_random_ops.py",
    "ast_data": "FunctionDef name:from_state arg:cls arg:state arg:alg arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_joinstyle",
    "source_code": "@_docstring.interpd\ndef set_joinstyle(self, js):\n    self._joinstyle = JoinStyle(js)",
    "docstring": "Set how to draw connections between line segments. Parameters ---------- js : or %(JoinStyle)s",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:set_joinstyle arg:self arg:js arguments arg arg Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "StructuredTensorSpec",
    "source_code": "def StructuredTensorSpec(shape, field_specs):\n    if not isinstance(field_specs, dict):\n        raise TypeError('field_specs must be a dictionary.')\n    for k in field_specs.keys():\n        if not isinstance(k, str):\n            raise TypeError('field_specs must be a dictionary with string keys.')\n    for v in field_specs.values():\n        if not isinstance(v, type_spec.TypeSpec):\n            raise TypeError('field_specs must be a dictionary with TypeSpec values.')\n    shape = dynamic_ragged_shape.DynamicRaggedShape.Spec._from_tensor_shape(tensor_shape.as_shape(shape), 0, dtypes.int32)\n    rank = shape.rank\n    if rank is None:\n        raise TypeError(\"StructuredTensor's shape must have known rank.\")\n    for k, v in field_specs.items():\n        field_shape_untruncated = _dynamic_ragged_shape_spec_from_spec(v)\n        if field_shape_untruncated is None:\n            raise ValueError(f'Cannot convert spec of {k}.')\n        untruncated_rank = field_shape_untruncated.rank\n        if untruncated_rank is not None and untruncated_rank < rank:\n            raise ValueError(f'Rank of field {k} is {untruncated_rank}, but must be at least {rank}.')\n        field_shape = field_shape_untruncated._truncate(rank)\n        shape = shape._merge_with(field_shape)\n    return StructuredTensor.Spec(_ragged_shape=shape, _fields=field_specs)",
    "docstring": "A placeholder for the old StructuredTensorSpec.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor.py",
    "ast_data": "FunctionDef name:StructuredTensorSpec arg:shape arg:field_specs arguments arg arg If Call Raise Call For Call If Call Raise Call For Call If Call Raise Call Assign Call Call Assign If Compare Raise Call For Call Assign Call If Compare Raise Call Assign If BoolOp Compare Compare Raise Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_input_requires_grad",
    "source_code": "def _input_requires_grad(*tensors: torch.Tensor) -> bool:\n    return any((t.requires_grad for t in tensors))",
    "docstring": "Returns True if any of the tensors requires grad",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\attention\\_utils.py",
    "ast_data": "FunctionDef name:_input_requires_grad arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_OverloadOperator",
    "source_code": "@classmethod\ndef _OverloadOperator(cls, operator):\n    if operator == '__eq__' or operator == '__ne__':\n        return\n    tensor_oper = getattr(tensor_lib.Tensor, operator)\n\n    def _run_op(a, *args, **kwargs):\n        return tensor_oper(a.value(), *args, **kwargs)\n    functools.update_wrapper(_run_op, tensor_oper)\n    setattr(cls, operator, _run_op)",
    "docstring": "Defer an operator overload to . We pull the operator out of tensor_lib.Tensor dynamically to avoid ordering issues. Args: operator: string. The operator name.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:_OverloadOperator arg:cls arg:operator arguments arg arg If BoolOp Compare Compare Return return:no Assign Call FunctionDef name:_run_op arg:a arguments arg arg arg Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_process_single_batch",
    "source_code": "def _process_single_batch(model, inputs, targets, output_loss_metrics=None, sample_weights=None, training=False):\n    with backend.eager_learning_phase_scope(1 if training else 0), training_utils.RespectCompiledTrainableState(model):\n        with GradientTape() as tape:\n            outs, total_loss, output_losses, masks = _model_loss(model, inputs, targets, output_loss_metrics=output_loss_metrics, sample_weights=sample_weights, training=training)\n            if isinstance(model.optimizer, loss_scale_optimizer.LossScaleOptimizer):\n                scaled_total_loss = model.optimizer.get_scaled_loss(total_loss)\n            else:\n                scaled_total_loss = total_loss\n        if training:\n            trainable_weights = model.trainable_weights\n            if trainable_weights:\n                if hasattr(model, '_backwards'):\n                    model._backwards(tape, scaled_total_loss)\n                else:\n                    grads = tape.gradient(scaled_total_loss, trainable_weights)\n                    if isinstance(model.optimizer, loss_scale_optimizer.LossScaleOptimizer):\n                        grads = model.optimizer.get_unscaled_gradients(grads)\n                    model.optimizer.apply_gradients(zip(grads, trainable_weights))\n            else:\n                logging.warning('The list of trainable weights is empty. Make sure that you are not setting model.trainable to False before compiling the model.')\n        return (outs, total_loss, output_losses, masks)",
    "docstring": "Calculate the loss and gradient for one input batch. The model weights are updated if training is set to True. Args: model: Model whose loss has to be calculated. inputs: List of input arrays. targets: List of target arrays. output_loss_metrics: List of metrics that are used to aggregated output loss values. sample_weights: Optional list of sample weight arrays. training: The boolean represents if the weights of the model are updated. 'fit' methods will set this to True while 'evaluate' methods will set this to False. Returns: output of the model, total loss, the loss and the mask associated with each output. Raises: ValueError: If the model has no loss to optimize.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_eager_v1.py",
    "ast_data": "FunctionDef name:_process_single_batch arg:model arg:inputs arg:targets arg:output_loss_metrics arg:sample_weights arg:training arguments arg arg arg arg arg arg With Call Call With Call Assign Call If Call Assign Call Assign If Assign If If Call Call Assign Call If Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "diag",
    "source_code": "@array_function_dispatch(_diag_dispatcher)\ndef diag(v, k=0):\n    v = asanyarray(v)\n    s = v.shape\n    if len(s) == 1:\n        n = s[0] + abs(k)\n        res = zeros((n, n), v.dtype)\n        if k >= 0:\n            i = k\n        else:\n            i = -k * n\n        res[:n - k].flat[i::n + 1] = v\n        return res\n    elif len(s) == 2:\n        return diagonal(v, k)\n    else:\n        raise ValueError('Input must be 1- or 2-d.')",
    "docstring": "Extract a diagonal or construct a diagonal array. See the more detailed documentation for `vkvvkk>0k>> import numpy as np >>> x = np.arange(9).reshape((3,3)) >>> x array([[0, 1, 2], [3, 4, 5], [6, 7, 8]]) >>> np.diag(x) array([0, 4, 8]) >>> np.diag(x, k=1) array([1, 5]) >>> np.diag(x, k=-1) array([3, 7]) >>> np.diag(np.diag(x)) array([[0, 0, 0], [0, 4, 0], [0, 0, 8]])",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_twodim_base_impl.py",
    "ast_data": "FunctionDef name:diag arg:v arg:k arguments arg arg Assign Call Assign If Compare Call Assign Call Assign Call If Compare Assign Assign Assign Return return:yes If Compare Call Return return:yes Call Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "ProfilerOptions",
    "source_code": "@tf_export('profiler.experimental.ProfilerOptions', v1=[])\nclass ProfilerOptions(collections.namedtuple('ProfilerOptions', ['host_tracer_level', 'python_tracer_level', 'device_tracer_level', 'delay_ms'])):\n\n    def __new__(cls, host_tracer_level=2, python_tracer_level=0, device_tracer_level=1, delay_ms=None):\n        return super(ProfilerOptions, cls).__new__(cls, host_tracer_level, python_tracer_level, device_tracer_level, delay_ms)",
    "docstring": "Options for finer control over the profiler. Use to control behavior. Fields: host_tracer_level: Adjust CPU tracing level. Values are: - critical info only, - info, - verbose. [default value is ] python_tracer_level: Toggle tracing of Python function calls. Values are: - enabled, - disabled [default value is ] device_tracer_level: Adjust device (TPU/GPU) tracing level. Values are: - enabled, - disabled [default value is ] delay_ms: Requests for all hosts to start profiling at a timestamp that is away from the current time. is in milliseconds. If zero, each host will start profiling immediately upon receiving the request. Default value is , allowing the profiler guess the best value.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\profiler_v2.py",
    "ast_data": "ClassDef name:ProfilerOptions Call FunctionDef name:__new__ arg:cls arg:host_tracer_level arg:python_tracer_level arg:device_tracer_level arg:delay_ms arguments arg arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, from_logits=False, label_smoothing=0, axis=-1, reduction=losses_utils.ReductionV2.AUTO, name='binary_crossentropy'):\n    super().__init__(binary_crossentropy, name=name, reduction=reduction, from_logits=from_logits, label_smoothing=label_smoothing, axis=axis)\n    self.from_logits = from_logits",
    "docstring": "Initializes instance. Args: from_logits: Whether to interpret as a tensor of [logit]( values. By default, we assume that contains probabilities (i.e., values in [0, 1]). label_smoothing: Float in [0, 1]. When 0, no smoothing occurs. When > 0, we compute the loss between the predicted labels and a smoothed version of the true labels, where the smoothing squeezes the labels towards 0.5. Larger values of correspond to heavier smoothing. axis: The axis along which to compute crossentropy (the features axis). Defaults to -1. reduction: Type of to apply to loss. Default value is . indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to . When used with , outside of built-in training loops such as and , using or will raise an error. Please see this custom training [tutorial]( for more details. name: Name for the op. Defaults to 'binary_crossentropy'.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:from_logits arg:label_smoothing arg:axis arg:reduction arg:name arguments arg arg arg arg arg arg Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "dimension_size",
    "source_code": "def dimension_size(x, axis):\n    s = tensor_shape.dimension_value(x.shape.with_rank_at_least(np.abs(axis))[axis])\n    if s is not None:\n        return s\n    return array_ops.shape(x)[axis]",
    "docstring": "Returns the size of a specific dimension.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\util.py",
    "ast_data": "FunctionDef name:dimension_size arg:x arg:axis arguments arg arg Assign Call Call Call If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_flatten",
    "source_code": "def _flatten(self, mesh_dim_name: Optional[str]=None) -> 'DeviceMesh':\n    if not self.mesh_dim_names:\n        raise RuntimeError('Cannot flatten a DeviceMesh without mesh_dim_names!')\n    return _mesh_resources.create_flatten_mesh(self, mesh_dim_name)",
    "docstring": "Returns a 1D DeviceMesh by flattening the current DeviceMesh. If no mesh_dim_name is provided, the default is a string concatentaing the mesh_dim_names of the given submesh with each mesh_dim_name separated by \"_\". For example, if we have a 3D mesh DeviceMesh([[[0, 1], [2, 3]], [[4, 5], [6, 7]]], mesh_dim_names=(\"dp\", \"cp\", \"tp\")), calling mesh_3d[\"dp\", \"cp\"]._flatten() will create a 1D submesh DeviceMesh([0, 1, 2, 3], mesh_dim_names=(\"dp_cp\",)) on rank 0, 1, 2, 3 and a 1D submesh DeviceMesh([4, 5, 6, 7], mesh_dim_names=(\"dp_cp\",)) on rank 4, 5, 6, 7. After the flattened dimension is created, to access the flattened dimesnion in mesh_3d, one can use the existing slicing method to obtain the flattened mesh through calling mesh_3d[\"dp_cp\"].",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\device_mesh.py",
    "ast_data": "FunctionDef name:_flatten arg:self arg:mesh_dim_name arguments arg arg If Raise Call Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "authenticate_user",
    "source_code": "def authenticate_user(self, refresh_token):\n    raise NotImplementedError()",
    "docstring": "Authenticate the user related to this credential. Developers MUST implement this method in subclass:: def authenticate_user(self, credential): return User.get(credential.user_id) :param refresh_token: Token object :return: user",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\grants\\refresh_token.py",
    "ast_data": "FunctionDef name:authenticate_user arg:self arg:refresh_token arguments arg arg Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "grab_mouse",
    "source_code": "def grab_mouse(self, ax):\n    if self.mouse_grabber not in (None, ax):\n        raise RuntimeError('Another Axes already grabs mouse input')\n    self.mouse_grabber = ax",
    "docstring": "Set the child which is grabbing the mouse events. Usually called by the widgets themselves. It is an error to call this if the mouse is already grabbed by another Axes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:grab_mouse arg:self arg:ax arguments arg arg If Compare Raise Call Assign"
  },
  {
    "library": "kornia",
    "name": "to_tensor",
    "source_code": "def to_tensor(self, as_padded_sequence: bool=False) -> Union[Tensor, List[Tensor]]:\n    if as_padded_sequence:\n        raise NotImplementedError\n    return self._data",
    "docstring": "Cast :class: to a tensor. `(B, N, 2)`",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\keypoints.py",
    "ast_data": "FunctionDef name:to_tensor arg:self arg:as_padded_sequence arguments arg arg If Raise Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_interleave",
    "source_code": "def _interleave(self, dtype: np.dtype | None=None, na_value: object=lib.no_default) -> np.ndarray:\n    if not dtype:\n        dtype = interleaved_dtype([blk.dtype for blk in self.blocks])\n    dtype = ensure_np_dtype(dtype)\n    result = np.empty(self.shape, dtype=dtype)\n    itemmask = np.zeros(self.shape[0])\n    if dtype == np.dtype('object') and na_value is lib.no_default:\n        for blk in self.blocks:\n            rl = blk.mgr_locs\n            arr = blk.get_values(dtype)\n            result[rl.indexer] = arr\n            itemmask[rl.indexer] = 1\n        return result\n    for blk in self.blocks:\n        rl = blk.mgr_locs\n        if blk.is_extension:\n            arr = blk.values.to_numpy(dtype=dtype, na_value=na_value)\n        else:\n            arr = blk.get_values(dtype)\n        result[rl.indexer] = arr\n        if na_value is not lib.no_default and blk.dtype.kind in 'mM':\n            result[rl.indexer][isna(arr)] = na_value\n        itemmask[rl.indexer] = 1\n    if not itemmask.all():\n        raise AssertionError('Some items were not contained in blocks')\n    return result",
    "docstring": "Return ndarray from blocks with specified item order Items must be contained in the blocks",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:_interleave arg:self arg:dtype arg:na_value arguments arg arg arg If Assign Call Assign Call Assign Call Assign Call If BoolOp Compare Call Compare For Assign Assign Call Assign Assign Return return:yes For Assign If Assign Call Assign Call Assign If BoolOp Compare Compare Assign Call Assign If Call Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "enqueue_tpu_embedding_integer_batch",
    "source_code": "def enqueue_tpu_embedding_integer_batch(batch, device_ordinal, mode_override=None, name=None):\n    if mode_override is None:\n        mode_override = 'unspecified'\n    return gen_tpu_ops.enqueue_tpu_embedding_integer_batch(batch=batch, device_ordinal=device_ordinal, mode_override=mode_override, name=name)",
    "docstring": "A placeholder op for enqueueing embedding IDs to the TPU. Args: batch: A list of 1D tensors, one for each embedding table, containing the indices into the tables. device_ordinal: The TPU device to use. Should be >= 0 and less than the number of TPU cores in the task on which the node is placed. mode_override: A string input that overrides the mode specified in the TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', 'train', 'backward_pass_only'}. When set to 'unspecified', the mode set in TPUEmbeddingConfiguration is used, otherwise mode_override is used (optional). name: A name for the operation (optional). Returns: An EnqueueTPUEmbeddingIntegerBatch operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\ops\\tpu_ops.py",
    "ast_data": "FunctionDef name:enqueue_tpu_embedding_integer_batch arg:batch arg:device_ordinal arg:mode_override arg:name arguments arg arg arg arg If Compare Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "device_name_by_id",
    "source_code": "def device_name_by_id(self, device_id):\n    return self._device_by_id[device_id].device_name",
    "docstring": "Get the name of a device by the debugger-generated ID of the device.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py",
    "ast_data": "FunctionDef name:device_name_by_id arg:self arg:device_id arguments arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_validate",
    "source_code": "def _validate(configs):\n    if 'device' in configs:\n        for v in configs['device']:\n            assert v in _supported_devices, 'Device needs to be a string.'",
    "docstring": "Validate inputs from users.",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\operator_benchmark\\benchmark_utils.py",
    "ast_data": "FunctionDef name:_validate arg:configs arguments arg If Compare For Compare"
  },
  {
    "library": "tensorflow",
    "name": "deprecated_internal_learning_phase_scope",
    "source_code": "@tf_contextlib.contextmanager\ndef deprecated_internal_learning_phase_scope(value):\n    global _GRAPH_LEARNING_PHASES\n    if value not in {0, 1}:\n        raise ValueError('Expected learning phase to be 0 or 1.')\n    with ops.init_scope():\n        if context.executing_eagerly():\n            previous_eager_value = _GRAPH_LEARNING_PHASES.get(_DUMMY_EAGER_GRAPH.key, None)\n        previous_graph_value = _GRAPH_LEARNING_PHASES.get(get_graph(), None)\n    learning_phase_previously_set = _DUMMY_EAGER_GRAPH.learning_phase_is_set\n    try:\n        deprecated_internal_set_learning_phase(value)\n        yield\n    finally:\n        if not learning_phase_previously_set:\n            _DUMMY_EAGER_GRAPH.learning_phase_is_set = False\n        with ops.init_scope():\n            if context.executing_eagerly():\n                if previous_eager_value is not None:\n                    _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH.key] = previous_eager_value\n                elif _DUMMY_EAGER_GRAPH.key in _GRAPH_LEARNING_PHASES:\n                    del _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH.key]\n            graph = get_graph()\n            if previous_graph_value is not None:\n                _GRAPH_LEARNING_PHASES[graph] = previous_graph_value\n            elif graph in _GRAPH_LEARNING_PHASES:\n                del _GRAPH_LEARNING_PHASES[graph]",
    "docstring": "An internal-only version of . Unlike the public method, this method does not raise a deprecation warning. This is needed because saved model saving needs to set learning phase to maintain compatibility with code that sets/gets the learning phase, but saved model saving itself shouldn't raise a deprecation warning. We can get rid of this method and its usages when the public API is removed. Args: value: Learning phase value, either 0 or 1 (integers). 0 = test, 1 = train Yields: None. Raises: ValueError: if is neither nor .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:deprecated_internal_learning_phase_scope arg:value arguments arg If Compare Raise Call With Call If Call Assign Call Assign Call Call Assign Try Call If Assign With Call If Call If Compare Assign If Compare Assign Call If Compare Assign If Compare"
  },
  {
    "library": "django",
    "name": "parse_duration",
    "source_code": "def parse_duration(value):\n    match = standard_duration_re.match(value) or iso8601_duration_re.match(value) or postgres_interval_re.match(value)\n    if match:\n        kw = match.groupdict()\n        sign = -1 if kw.pop('sign', '+') == '-' else 1\n        if kw.get('microseconds'):\n            kw['microseconds'] = kw['microseconds'].ljust(6, '0')\n        kw = {k: float(v.replace(',', '.')) for k, v in kw.items() if v is not None}\n        days = datetime.timedelta(kw.pop('days', 0.0) or 0.0)\n        if match.re == iso8601_duration_re:\n            days *= sign\n        return days + sign * datetime.timedelta(**kw)",
    "docstring": "Parse a duration string and return a datetime.timedelta. The preferred format for durations in Django is '%d %H:%M:%S.%f'. Also supports ISO 8601 representation and PostgreSQL's day-time interval format.",
    "type": "function",
    "file_path": "django\\django\\utils\\dateparse.py",
    "ast_data": "FunctionDef name:parse_duration arg:value arguments arg Assign BoolOp Call Call Call If Assign Call Assign Compare Call If Call Assign Call Assign Call Call Call Compare Assign Call BoolOp Call If Compare Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "softsign",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef softsign(x):\n    return nn.softsign(x)",
    "docstring": "Softsign of a tensor. Args: x: A tensor or variable. Returns: A tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:softsign arg:x arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "_create_position_encoding",
    "source_code": "def _create_position_encoding(self, max_shape: Tuple[int, int]) -> Tensor:\n    pe = zeros((self.d_model, *max_shape))\n    y_position = torch.ones(max_shape).cumsum(0).float().unsqueeze(0)\n    x_position = torch.ones(max_shape).cumsum(1).float().unsqueeze(0)\n    if self.temp_bug_fix:\n        div_term = torch.exp(torch.arange(0, self.d_model // 2, 2).float() * (-math.log(10000.0) / (self.d_model // 2)))\n    else:\n        div_term = torch.exp(torch.arange(0, self.d_model // 2, 2).float() * (-math.log(10000.0) / self.d_model // 2))\n    div_term = div_term[:, None, None]\n    pe[0::4, :, :] = sin(x_position * div_term)\n    pe[1::4, :, :] = cos(x_position * div_term)\n    pe[2::4, :, :] = sin(y_position * div_term)\n    pe[3::4, :, :] = cos(y_position * div_term)\n    return pe.unsqueeze(0)",
    "docstring": "Create a position encoding from scratch. For 1/8 feature map (which is standard): If the input image size is H, W (both divisible by 8), the max_shape should be (H//8, W//8).",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\loftr\\utils\\position_encoding.py",
    "ast_data": "FunctionDef name:_create_position_encoding arg:self arg:max_shape arguments arg arg Assign Call Assign Call Call Call Call Assign Call Call Call Call If Assign Call Call Call Call Assign Call Call Call Call Assign Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_export_to_saved_model_graph",
    "source_code": "def _export_to_saved_model_graph(self, object_map, tensor_map, options, **kwargs):\n    resource_list = self._v._export_to_saved_model_graph(object_map, tensor_map, options, **kwargs)\n    object_map[self] = object_map[self._v]\n    return resource_list",
    "docstring": "For implementing .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\ps_values.py",
    "ast_data": "FunctionDef name:_export_to_saved_model_graph arg:self arg:object_map arg:tensor_map arg:options arguments arg arg arg arg arg Assign Call Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "DataConversionWarning",
    "source_code": "class DataConversionWarning(UserWarning):\n    pass",
    "docstring": "Warning used to notify implicit data conversions happening in the code. This warning occurs when some input data needs to be converted or interpreted in a way that may not match the user's expectations. For example, this warning may occur when the user - passes an integer array to a function which expects float input and will convert the input - requests a non-copying operation, but a copy is required to meet the implementation's data-type expectations; - passes an input whose shape can be interpreted ambiguously. .. versionchanged:: 0.18 Moved from sklearn.utils.validation.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\exceptions.py",
    "ast_data": "ClassDef name:DataConversionWarning"
  },
  {
    "library": "pandas",
    "name": "sheets",
    "source_code": "@property\ndef sheets(self) -> dict[str, Any]:\n    raise NotImplementedError",
    "docstring": "Mapping of sheet names to sheet objects.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\excel\\_base.py",
    "ast_data": "FunctionDef name:sheets arg:self arguments arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, name=None):\n    self._name = name\n    self._items = []",
    "docstring": "Menu constructor. Args: name: (str or None) name of this menu.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:name arguments arg arg Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_GetBcastSubshape",
    "source_code": "def _GetBcastSubshape(subscripts):\n    start = subscripts.find(ellipsis)\n    if start == -1:\n        return (0, 0)\n    remaining = len(subscripts) - (start + len(ellipsis))\n    end = -remaining if remaining > 0 else None\n    return (start, end)",
    "docstring": "Returns a tuple denoting the slice mapping to ellipsis. For a given subscript, returns a tuple (start, end) denoting the start axis index and the (negative) end axis index respectively. For any input Tensor described by the subscript, would be the slice represented by the ellipsis. E.g. For returns . If ellipsis is not present in , returns . Args: subscripts: A string denoting the einsum subscript.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg_grad.py",
    "ast_data": "FunctionDef name:_GetBcastSubshape arg:subscripts arguments arg Assign Call If Compare Return return:yes Assign Call Call Assign Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, root):\n    self._root_ref = root if isinstance(root, weakref.ref) else weakref.ref(root)",
    "docstring": "Configure the trackable view. Args: root: A object whose variables (including the variables of dependencies, recursively) should be saved. May be a weak reference.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\trackable_view.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:root arguments arg arg Assign Call Call"
  },
  {
    "library": "numpy",
    "name": "as_strided",
    "source_code": "@set_module('numpy.lib.stride_tricks')\ndef as_strided(x, shape=None, strides=None, subok=False, writeable=True):\n    x = np.array(x, copy=None, subok=subok)\n    interface = dict(x.__array_interface__)\n    if shape is not None:\n        interface['shape'] = tuple(shape)\n    if strides is not None:\n        interface['strides'] = tuple(strides)\n    array = np.asarray(DummyArray(interface, base=x))\n    array.dtype = x.dtype\n    view = _maybe_view_as_subclass(x, array)\n    if view.flags.writeable and (not writeable):\n        view.flags.writeable = False\n    return view",
    "docstring": "Create a view into the array with the given shape and strides. .. warning:: This function has to be used with extreme care, see notes. Parameters ---------- x : ndarray Array to create a new. shape : sequence of int, optional The shape of the new array. Defaults to `` when possible.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_stride_tricks_impl.py",
    "ast_data": "FunctionDef name:as_strided arg:x arg:shape arg:strides arg:subok arg:writeable arguments arg arg arg arg arg Assign Call Assign Call If Compare Assign Call If Compare Assign Call Assign Call Call Assign Assign Call If BoolOp Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_usepackage_if_not_loaded",
    "source_code": "def _usepackage_if_not_loaded(package, *, option=None):\n    option = f'[{option}]' if option is not None else ''\n    return '\\\\makeatletter\\\\@ifpackageloaded{%(package)s}{}{\\\\usepackage%(option)s{%(package)s}}\\\\makeatother' % {'package': package, 'option': option}",
    "docstring": "Output LaTeX code that loads a package (possibly with an option) if it hasn't been loaded yet. LaTeX cannot load twice a package with different options, so this helper can be used to protect against users loading arbitrary packages/options in their custom preamble.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\texmanager.py",
    "ast_data": "FunctionDef name:_usepackage_if_not_loaded arg:package arguments arg arg Assign Compare Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_FreeEventQueue",
    "source_code": "class _FreeEventQueue:\n\n    def __init__(self) -> None:\n        self._queue: collections.deque[torch.Event] = collections.deque()\n        self._max_num_inflight_all_gathers = 2\n\n    def enqueue(self, free_event: torch.Event) -> None:\n        self._queue.append(free_event)\n\n    def dequeue_if_needed(self) -> Optional[torch.Event]:\n        if len(self._queue) >= self._max_num_inflight_all_gathers:\n            return self._dequeue()\n        return None\n\n    def _dequeue(self) -> Optional[torch.Event]:\n        if self._queue:\n            event = self._queue.popleft()\n            return event\n        return None",
    "docstring": "This tracks all pending frees corresponding to inflight all-gathers. The queueing pattern is iterative enqueues with a single dequeue per iteration once the limit `` is reached.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_limiter_utils.py",
    "ast_data": "ClassDef name:_FreeEventQueue FunctionDef name:__init__ arg:self arguments arg Call Assign FunctionDef name:enqueue arg:self arg:free_event arguments arg arg Call FunctionDef name:dequeue_if_needed arg:self arguments arg If Compare Call Return return:yes Call Return return:no FunctionDef name:_dequeue arg:self arguments arg If Assign Call Return return:yes Return return:no"
  },
  {
    "library": "kornia",
    "name": "gaussian_blur2d",
    "source_code": "def gaussian_blur2d(input: Tensor, kernel_size: tuple[int, int] | int, sigma: tuple[float, float] | Tensor, border_type: str='reflect', separable: bool=True) -> Tensor:\n    KORNIA_CHECK_IS_TENSOR(input)\n    if isinstance(sigma, tuple):\n        sigma = tensor([sigma], device=input.device, dtype=input.dtype)\n    else:\n        KORNIA_CHECK_IS_TENSOR(sigma)\n        sigma = sigma.to(device=input.device, dtype=input.dtype)\n    if separable:\n        ky, kx = _unpack_2d_ks(kernel_size)\n        bs = sigma.shape[0]\n        kernel_x = get_gaussian_kernel1d(kx, sigma[:, 1].view(bs, 1))\n        kernel_y = get_gaussian_kernel1d(ky, sigma[:, 0].view(bs, 1))\n        out = filter2d_separable(input, kernel_x, kernel_y, border_type)\n    else:\n        kernel = get_gaussian_kernel2d(kernel_size, sigma)\n        out = filter2d(input, kernel, border_type)\n    return out",
    "docstring": "Create an operator that blurs a tensor using a Gaussian filter. .. image:: _static/img/gaussian_blur2d.png The operator smooths the given tensor with a gaussian kernel by convolving it to each channel. It supports batched operation. Arguments: input: the input tensor with shape :math:. kernel_size: the size of the kernel. sigma: the standard deviation of the kernel. border_type: the padding mode to be applied before convolving. The expected modes are: `(B, C, H, W)here `__. Examples: >>> input = torch.rand(2, 4, 5, 5) >>> output = gaussian_blur2d(input, (3, 3), (1.5, 1.5)) >>> output.shape torch.Size([2, 4, 5, 5]) >>> output = gaussian_blur2d(input, (3, 3), torch.tensor([[1.5, 1.5]])) >>> output.shape torch.Size([2, 4, 5, 5])",
    "type": "function",
    "file_path": "kornia\\kornia\\filters\\gaussian.py",
    "ast_data": "FunctionDef name:gaussian_blur2d arg:input arg:kernel_size arg:sigma arg:border_type arg:separable arguments arg arg arg arg arg Call If Call Assign Call Call Assign Call If Assign Call Assign Assign Call Call Assign Call Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_aspect",
    "source_code": "def set_aspect(self, aspect=False):\n    self._aspect = aspect",
    "docstring": "Parameters ---------- aspect : bool",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_divider.py",
    "ast_data": "FunctionDef name:set_aspect arg:self arg:aspect arguments arg arg Assign"
  },
  {
    "library": "pytorch",
    "name": "coordinatewise_increasing_map",
    "source_code": "@staticmethod\ndef coordinatewise_increasing_map(x: Union[AllIn, AllVR], y: Union[AllIn, AllVR], fn: AllFn2) -> AllVR:\n    x, y = (ValueRanges.wrap(x), ValueRanges.wrap(y))\n    return ValueRanges(fn(x.lower, y.lower), fn(x.upper, y.upper))",
    "docstring": "It's increasing on each coordinate. Mathematically: For every 1 <= i <= n and x_i <= y_i we have that f(x1, .., xn) <= f(x1, , yi, ..., xn)",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\_sympy\\value_ranges.py",
    "ast_data": "FunctionDef name:coordinatewise_increasing_map arg:x arg:y arg:fn arguments arg arg arg Assign Call Call Return return:yes Call Call Call"
  },
  {
    "library": "kornia",
    "name": "pyrdown",
    "source_code": "def pyrdown(input: Tensor, border_type: str='reflect', align_corners: bool=False, factor: float=2.0) -> Tensor:\n    KORNIA_CHECK_SHAPE(input, ['B', 'C', 'H', 'W'])\n    kernel: Tensor = _get_pyramid_gaussian_kernel()\n    _, _, height, width = input.shape\n    x_blur: Tensor = filter2d(input, kernel, border_type)\n    out: Tensor = F.interpolate(x_blur, size=(int(float(height) / factor), int(float(width) // factor)), mode='bilinear', align_corners=align_corners)\n    return out",
    "docstring": "Blur a tensor and downsamples it. .. image:: _static/img/pyrdown.png Args: input: the tensor to be downsampled. border_type: the padding mode to be applied before convolving. The expected modes are: ``. align_corners: interpolation flag. factor: the downsampling factor Return: the downsampled tensor. Examples: >>> input = torch.arange(16, dtype=torch.float32).reshape(1, 1, 4, 4) >>> pyrdown(input, align_corners=True) tensor([[[[ 3.7500, 5.2500], [ 9.7500, 11.2500]]]])",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\transform\\pyramid.py",
    "ast_data": "FunctionDef name:pyrdown arg:input arg:border_type arg:align_corners arg:factor arguments arg arg arg arg Call Call Assign Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "create_token_response",
    "source_code": "@hooked\ndef create_token_response(self):\n    client = self.request.client\n    authorization_code = self.request.authorization_code\n    user = self.authenticate_user(authorization_code)\n    if not user:\n        raise InvalidGrantError(\"There is no 'user' for this code.\")\n    self.request.user = user\n    scope = authorization_code.get_scope()\n    token = self.generate_token(user=user, scope=scope, include_refresh_token=client.check_grant_type('refresh_token'))\n    log.debug('Issue token %r to %r', token, client)\n    self.save_token(token)\n    self.delete_authorization_code(authorization_code)\n    return (200, token, self.TOKEN_RESPONSE_HEADER)",
    "docstring": "If the access token request is valid and authorized, the authorization server issues an access token and optional refresh token as described in Section 5.1. If the request client authentication failed or is invalid, the authorization server returns an error response as described in Section 5.2. Per _. An example successful response: .. code-block:: http HTTP/1.1 200 OK Content-Type: application/json Cache-Control: no-store Pragma: no-cache { \"access_token\":\"2YotnFZFEjr1zCsicMWpAA\", \"token_type\":\"example\", \"expires_in\":3600, \"refresh_token\":\"tGzv3JOkF0XG5Qx2TlKWIA\", \"example_parameter\":\"example_value\" } :returns: (status_code, body, headers) .. _:",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\grants\\authorization_code.py",
    "ast_data": "FunctionDef name:create_token_response arg:self arguments arg Assign Assign Assign Call If Raise Call Assign Assign Call Assign Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "create_large_matmul_savedmodel",
    "source_code": "def create_large_matmul_savedmodel(out_dir):\n    root = autotrackable.AutoTrackable()\n    root.f = def_function.function(lambda x, y: math_ops.matmul(x, y), input_signature=[tensor_spec.TensorSpec([3000, 5000], dtypes.float32), tensor_spec.TensorSpec([5000, 4000], dtypes.float32)])\n    root.f(x=array_ops.zeros((3000, 5000)), y=array_ops.zeros((5000, 4000)))\n    save_dir = os.path.join(out_dir, 'x_matmul_y_large')\n    save.save(root, save_dir, root.f)\n    with open(os.path.join(save_dir, 'variables', 'variables.index'), 'w'):\n        pass",
    "docstring": "Create a SavedModel that performs a large matmul.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\make_aot_compile_models.py",
    "ast_data": "FunctionDef name:create_large_matmul_savedmodel arg:out_dir arguments arg Assign Call Assign Call arguments arg arg Call Call Call Call Call Call Assign Call Call With Call Call"
  },
  {
    "library": "pandas",
    "name": "to_html",
    "source_code": "def to_html(self, buf: FilePath | WriteBuffer[str] | None=None, encoding: str | None=None, classes: str | list | tuple | None=None, notebook: bool=False, border: int | bool | None=None, table_id: str | None=None, render_links: bool=False) -> str | None:\n    from pandas.io.formats.html import HTMLFormatter, NotebookFormatter\n    Klass = NotebookFormatter if notebook else HTMLFormatter\n    html_formatter = Klass(self.fmt, classes=classes, border=border, table_id=table_id, render_links=render_links)\n    string = html_formatter.to_string()\n    return save_to_buffer(string, buf=buf, encoding=encoding)",
    "docstring": "Render a DataFrame to a html table. Parameters ---------- buf : str, path object, file-like object, or None, default None String, path object (implementing `class tag. The default value for this parameter is governed by `` tag if specified. render_links : bool, default False Convert URLs to HTML links.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\format.py",
    "ast_data": "FunctionDef name:to_html arg:self arg:buf arg:encoding arg:classes arg:notebook arg:border arg:table_id arg:render_links arguments arg arg arg arg arg arg arg arg Assign Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_apply_dense",
    "source_code": "def _apply_dense(self, grad, var):\n    raise RuntimeError('This function should never be called')",
    "docstring": "This function should never be called.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\experimental\\loss_scale_optimizer.py",
    "ast_data": "FunctionDef name:_apply_dense arg:self arg:grad arg:var arguments arg arg arg Raise Call"
  },
  {
    "library": "scipy",
    "name": "proc_minimisers",
    "source_code": "def proc_minimisers(self):\n    for v in self:\n        v.minimiser()\n        v.maximiser()",
    "docstring": "Check for minimisers.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_shgo_lib\\_vertex.py",
    "ast_data": "FunctionDef name:proc_minimisers arg:self arguments arg For Call Call"
  },
  {
    "library": "scrapy",
    "name": "without_none_values",
    "source_code": "def without_none_values(iterable: Mapping[_KT, _VT] | Iterable[_KT]) -> dict[_KT, _VT] | Iterable[_KT]:\n    if isinstance(iterable, Mapping):\n        return {k: v for k, v in iterable.items() if v is not None}\n    return type(iterable)((v for v in iterable if v is not None))",
    "docstring": "Return a copy of `` have been removed.",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\python.py",
    "ast_data": "FunctionDef name:without_none_values arg:iterable arguments arg If Call Return return:yes Call Compare Return return:yes Call Call Compare"
  },
  {
    "library": "pytorch",
    "name": "TritonBundleEntry",
    "source_code": "@dataclasses.dataclass(frozen=True)\nclass TritonBundleEntry:\n    kernel_hash: str\n    device: int\n    directory: str",
    "docstring": "When we have compiled a triton kernel, we take note of that kernel by its triton generated hash, its device, and where this kernel is located. This is the minimum information we can use to later retrieve this kernel from file system.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\triton_bundler.py",
    "ast_data": "ClassDef name:TritonBundleEntry Call"
  },
  {
    "library": "numpy",
    "name": "trace",
    "source_code": "def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None):\n    m = self._mask\n    if m is nomask:\n        result = super().trace(offset=offset, axis1=axis1, axis2=axis2, out=out)\n        return result.astype(dtype)\n    else:\n        D = self.diagonal(offset=offset, axis1=axis1, axis2=axis2)\n        return D.astype(dtype).filled(0).sum(axis=-1, out=out)",
    "docstring": "(this docstring should be overwritten)",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:trace arg:self arg:offset arg:axis1 arg:axis2 arg:dtype arg:out arguments arg arg arg arg arg arg Assign If Compare Assign Call Call Return return:yes Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "__getitem__",
    "source_code": "def __getitem__(self, index):\n    if isinstance(index, int):\n        if index < 0:\n            raise IndexError('Negative indices are not allowed on OGR Layers.')\n        return self._make_feature(index)\n    elif isinstance(index, slice):\n        start, stop, stride = index.indices(self.num_feat)\n        return [self._make_feature(fid) for fid in range(start, stop, stride)]\n    else:\n        raise TypeError('Integers and slices may only be used when indexing OGR Layers.')",
    "docstring": "Get the Feature at the specified index.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\layer.py",
    "ast_data": "FunctionDef name:__getitem__ arg:self arg:index arguments arg arg If Call If Compare Raise Call Return return:yes Call If Call Assign Call Return return:yes Call Call Raise Call"
  },
  {
    "library": "scipy",
    "name": "SemiInfiniteFunc",
    "source_code": "class SemiInfiniteFunc:\n\n    def __init__(self, func, start, infty):\n        self._func = func\n        self._start = start\n        self._sgn = -1 if infty < 0 else 1\n        self._tmin = sys.float_info.min ** 0.5\n\n    def get_t(self, x):\n        z = self._sgn * (x - self._start) + 1\n        if z == 0:\n            return np.inf\n        return 1 / z\n\n    def __call__(self, t):\n        if t < self._tmin:\n            return 0.0\n        else:\n            x = self._start + self._sgn * (1 - t) / t\n            f = self._func(x)\n            return self._sgn * (f / t) / t",
    "docstring": "Argument transform from (start, +-oo) to (0, 1)",
    "type": "class",
    "file_path": "scipy\\scipy\\integrate\\_quad_vec.py",
    "ast_data": "ClassDef name:SemiInfiniteFunc FunctionDef name:__init__ arg:self arg:func arg:start arg:infty arguments arg arg arg arg Assign Assign Assign Compare Assign FunctionDef name:get_t arg:self arg:x arguments arg arg Assign If Compare Return return:yes Return return:yes FunctionDef name:__call__ arg:self arg:t arguments arg arg If Compare Return return:yes Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "make_merged_spec",
    "source_code": "def make_merged_spec(self, dev):\n    return self.__class__(*self._get_combined_properties(dev))",
    "docstring": "Returns a new DeviceSpec which incorporates . When combining specs, will take precedence over the current spec. So for instance: is equivalent to: Args: dev: a Returns: A new which combines and",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\device_spec.py",
    "ast_data": "FunctionDef name:make_merged_spec arg:self arg:dev arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "binary_cross_entropy_with_logits",
    "source_code": "def binary_cross_entropy_with_logits(input: Tensor, target: Tensor, weight: Optional[Tensor]=None, size_average: Optional[bool]=None, reduce: Optional[bool]=None, reduction: str='mean', pos_weight: Optional[Tensor]=None) -> Tensor:\n    if has_torch_function_variadic(input, target, weight, pos_weight):\n        return handle_torch_function(binary_cross_entropy_with_logits, (input, target, weight, pos_weight), input, target, weight=weight, size_average=size_average, reduce=reduce, reduction=reduction, pos_weight=pos_weight)\n    if size_average is not None or reduce is not None:\n        reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)\n    else:\n        reduction_enum = _Reduction.get_enum(reduction)\n    if not target.size() == input.size():\n        raise ValueError(f'Target size ({target.size()}) must be the same as input size ({input.size()})')\n    return torch.binary_cross_entropy_with_logits(input, target, weight, pos_weight, reduction_enum)",
    "docstring": "Compute Binary Cross Entropy between target and input logits. See :class: for details. Args: input: Tensor of arbitrary shape as unnormalized scores (often referred to as logits). target: Tensor of the same shape as input with values between 0 and 1 weight (Tensor, optional): a manual rescaling weight if provided it's repeated to match input tensor shape size_average (bool, optional): Deprecated (see :attr:). reduce (bool, optional): Deprecated (see :attr:). reduction (str, optional): Specifies the reduction to apply to the output: `size_averagereducereduction` Examples:: >>> input = torch.randn(3, requires_grad=True) >>> target = torch.empty(3).random_(2) >>> loss = F.binary_cross_entropy_with_logits(input, target) >>> loss.backward()",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:binary_cross_entropy_with_logits arg:input arg:target arg:weight arg:size_average arg:reduce arg:reduction arg:pos_weight arguments arg arg arg arg arg arg arg If Call Return return:yes Call If BoolOp Compare Compare Assign Call Assign Call If Compare Call Call Raise Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_run_static_range_ptq",
    "source_code": "def _run_static_range_ptq(src_saved_model_path: str, dst_saved_model_path: str, quant_opts: _QuantizationOptions, representative_dataset: Mapping[str, _RepresentativeDatasetFile], signature_def_map: _SignatureDefMap) -> None:\n    logging.info('Running static-range post-training quantization.')\n    signature_def_map_serialized = _serialize_signature_def_map(signature_def_map)\n    dataset_file_map_serialized = {signature_key: dataset_file.SerializeToString() for signature_key, dataset_file in representative_dataset.items()}\n    pywrap_quantize_model.quantize_ptq_static_range(src_saved_model_path, dst_saved_model_path, quantization_options_serialized=quant_opts.SerializeToString(), signature_keys=list(quant_opts.signature_keys), signature_def_map_serialized=signature_def_map_serialized, py_function_library=py_function_lib.PyFunctionLibrary(), representative_dataset_file_map_serialized=dataset_file_map_serialized)",
    "docstring": "Runs static-range Post-Training Quantization. Runs static-range PTQ for the model. Runs the calibration step with to collect statistics required for quantization. This produces the quantized GraphDef along with the SignatureDefs which might have been modified according to the changes in the graph. Args: src_saved_model_path: Path to the source SavedModel directory. dst_saved_model_path: Path to the destination SavedModel directory. quant_opts: Quantization options. representative_dataset: A map from signature key to the saved representative dataset file. signature_def_map: Signature def key -> SignatureDef mapping. Raises: ValueError if the graph doesn't contain a valid signature.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\quantize_model.py",
    "ast_data": "FunctionDef name:_run_static_range_ptq arg:src_saved_model_path arg:dst_saved_model_path arg:quant_opts arg:representative_dataset arg:signature_def_map arguments arg arg arg arg arg Call Assign Call Assign Call Call Call Call Call Call"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "def forward(self, x: Tensor) -> Tensor:\n    x = self.patch_embed(x)\n    x = self.layers(x)\n    if self.mobile_sam:\n        x = x.unflatten(1, (self.feat_size, self.feat_size)).permute(0, 3, 1, 2)\n        x = self.neck(x)\n    else:\n        x = x.mean(1)\n        x = self.head(self.norm_head(x))\n    return x",
    "docstring": "Classify images if ``.",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\models\\tiny_vit.py",
    "ast_data": "FunctionDef name:forward arg:self arg:x arguments arg arg Assign Call Assign Call If Assign Call Call Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "validate_userinfo_signed_response_alg",
    "source_code": "def validate_userinfo_signed_response_alg(self):\n    self._validate_claim_value('userinfo_signed_response_alg')",
    "docstring": "JWS alg algorithm [JWA] REQUIRED for signing UserInfo Responses. If this is specified, the response will be JWT [JWT] serialized, and signed using JWS. The default, if omitted, is for the UserInfo Response to return the Claims as a UTF-8 [RFC3629] encoded JSON object using the application/json content-type.",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\registration\\claims.py",
    "ast_data": "FunctionDef name:validate_userinfo_signed_response_alg arg:self arguments arg Call"
  },
  {
    "library": "matplotlib",
    "name": "x1",
    "source_code": "@property\ndef x1(self):\n    return self.get_points()[1, 0]",
    "docstring": "The second of the pair of *x* coordinates that define the bounding box. This is not guaranteed to be greater than :attr: (for that, use :attr:).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:x1 arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "freq",
    "source_code": "@property\ndef freq(self) -> BaseOffset:\n    return self.dtype.freq",
    "docstring": "Return the frequency object for this PeriodArray.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\period.py",
    "ast_data": "FunctionDef name:freq arg:self arguments arg Return return:yes"
  },
  {
    "library": "pygame",
    "name": "move_to_front",
    "source_code": "def move_to_front(self, sprite):\n    self.change_layer(sprite, self.get_top_layer())",
    "docstring": "bring the sprite to front layer LayeredUpdates.move_to_front(sprite): return None Brings the sprite to front by changing the sprite layer to the top-most layer. The sprite is added at the end of the list of sprites in that top-most layer.",
    "type": "method",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:move_to_front arg:self arg:sprite arguments arg arg Call Call"
  },
  {
    "library": "numpy",
    "name": "laggrid2d",
    "source_code": "def laggrid2d(x, y, c):\n    return pu._gridnd(lagval, c, x, y)",
    "docstring": "Evaluate a 2-D Laguerre series on the Cartesian product of x and y. This function returns the values: .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * L_i(a) * L_j(b) where the points `axbyxyxyxyccxyxycxy`. See Also -------- lagval, lagval2d, lagval3d, laggrid3d Examples -------- >>> from numpy.polynomial.laguerre import laggrid2d >>> c = [[1, 2], [3, 4]] >>> laggrid2d([0, 1], [0, 1], c) array([[10., 4.], [ 3., 1.]])",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\laguerre.py",
    "ast_data": "FunctionDef name:laggrid2d arg:x arg:y arg:c arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_annotation_str",
    "source_code": "def get_annotation_str(annotation):\n    if isinstance(annotation, ast.Name):\n        return annotation.id\n    elif isinstance(annotation, ast.Attribute):\n        return '.'.join([get_annotation_str(annotation.value), annotation.attr])\n    elif isinstance(annotation, ast.Subscript):\n        subscript_slice = annotation.slice\n        return f'{get_annotation_str(annotation.value)}[{get_annotation_str(subscript_slice)}]'\n    elif isinstance(annotation, ast.Tuple):\n        return ','.join([get_annotation_str(elt) for elt in annotation.elts])\n    elif isinstance(annotation, ast.Constant):\n        return f'{annotation.value}'\n    return None",
    "docstring": "Convert an AST node containing a type annotation to the string present in the source that represents the same annotation.",
    "type": "function",
    "file_path": "pytorch\\torch\\_jit_internal.py",
    "ast_data": "FunctionDef name:get_annotation_str arg:annotation arguments arg If Call Return return:yes If Call Return return:yes Call Call If Call Assign Return return:yes Call Call If Call Return return:yes Call Call If Call Return return:yes Return return:no"
  },
  {
    "library": "scikit-learn",
    "name": "get_metadata_routing",
    "source_code": "def get_metadata_routing(self):\n    router = MetadataRouter(owner=self.__class__.__name__)\n    router.add(estimator=self.estimator, method_mapping=MethodMapping().add(caller='fit', callee='fit'))\n    scorer, _ = self._get_scorers()\n    router.add(scorer=scorer, method_mapping=MethodMapping().add(caller='score', callee='score').add(caller='fit', callee='score'))\n    router.add(splitter=self.cv, method_mapping=MethodMapping().add(caller='fit', callee='split'))\n    return router",
    "docstring": "Get metadata routing of this object. Please check :ref: on how the routing mechanism works. .. versionadded:: 1.4 Returns ------- routing : MetadataRouter A :class: encapsulating routing information.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_search.py",
    "ast_data": "FunctionDef name:get_metadata_routing arg:self arguments arg Assign Call Call Call Call Assign Call Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "eventTreeTraversal",
    "source_code": "def eventTreeTraversal(self):\n    yield from traverse_bfs(self.event_tree)",
    "docstring": "We need to use BFS traversal order to avoid duplicate match.",
    "type": "method",
    "file_path": "pytorch\\torch\\profiler\\_pattern_matcher.py",
    "ast_data": "FunctionDef name:eventTreeTraversal arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "serialize_graph_view",
    "source_code": "def serialize_graph_view(graph_view: graph_view_lib.ObjectGraphView, object_map: Optional[Mapping[base.Trackable, base.Trackable]]=None, call_with_mapped_captures: Optional[Callable[..., Any]]=None, cache: Optional[Dict[base.Trackable, Any]]=None) -> ...:\n    trackable_data, node_ids = _gather_trackable_data(graph_view, object_map)\n    tensor_trackables, pystate_trackables, registered_trackables = _split_trackables(trackable_data)\n    object_graph_proto = _fill_object_graph_proto(trackable_data)\n    serialized_tensors = _get_and_write_tensors_to_serialize(tensor_trackables, node_ids, call_with_mapped_captures, cache, object_graph_proto)\n    registered_savers = _get_and_write_registered_savers(registered_trackables, object_graph_proto)\n    if cache is None:\n        feed_additions = None\n        serialized_tensors.update(_get_and_write_tensors_to_serialize(pystate_trackables, node_ids, call_with_mapped_captures, cache, object_graph_proto))\n    else:\n        new_serialized_tensors, feed_additions = _get_and_write_pystate_feed_additions(pystate_trackables, cache, object_graph_proto)\n        serialized_tensors.update(new_serialized_tensors)\n    util.add_checkpoint_values_check(object_graph_proto)\n    return (serialized_tensors, feed_additions, registered_savers, object_graph_proto)",
    "docstring": "Gathers serialization objects, and creates a TrackableObjectGraph proto.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\save_util.py",
    "ast_data": "FunctionDef name:serialize_graph_view arg:graph_view arg:object_map arg:call_with_mapped_captures arg:cache arguments arg arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Call If Compare Assign Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_is_fully_sharded",
    "source_code": "def _is_fully_sharded(module: nn.Module) -> bool:\n    registry = _get_registry(module)\n    if registry is None:\n        return False\n    return 'fully_shard' in registry",
    "docstring": "Check if module is marked with fully_shard.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_composable\\replicate.py",
    "ast_data": "FunctionDef name:_is_fully_sharded arg:module arguments arg Assign Call If Compare Return return:yes Return return:yes Compare"
  },
  {
    "library": "pytorch",
    "name": "is_pointwise_use",
    "source_code": "def is_pointwise_use(use: Node, is_pointwise_fn: Callable[[torch._ops.OpOverload], bool]=lambda _: False) -> bool:\n    if not use.op == 'call_function':\n        return False\n    if not (isinstance(use.target, torch._ops.OpOverload) or use.target is operator.getitem):\n        return False\n    target = cast(torch._ops.OpOverload, use.target)\n    if target is operator.getitem or is_view(target):\n        return all((is_pointwise_use(u, is_pointwise_fn) for u in use.users))\n    return torch.Tag.pointwise in target.tags or is_pointwise_fn(target)",
    "docstring": "Do all uses of this op have torch.Tag.pointwise or return True for optional Uses in views ops will follow the views uses",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\utils.py",
    "ast_data": "FunctionDef name:is_pointwise_use arg:use arg:is_pointwise_fn arguments arg arg arguments arg If Compare Return return:yes If BoolOp Call Compare Return return:yes Assign Call If BoolOp Compare Call Return return:yes Call Call Return return:yes BoolOp Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, name, default_name=None, values=None) -> None:\n    self._name_scope = name_scope(name, default_name, values, skip_on_eager=False)\n    self._name = default_name if name is None else name",
    "docstring": "Initialize the context manager. Args: name: The name argument that is passed to the op function. default_name: The default name to use if the argument is . values: The list of arguments that are passed to the op function. Raises: TypeError: if is passed in but not a string.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:name arg:default_name arg:values arguments arg arg arg arg Assign Call Assign Compare"
  },
  {
    "library": "django",
    "name": "search_form",
    "source_code": "def search_form(cl):\n    return {'cl': cl, 'show_result_count': cl.result_count != cl.full_result_count, 'search_var': SEARCH_VAR, 'is_popup_var': IS_POPUP_VAR, 'is_facets_var': IS_FACETS_VAR}",
    "docstring": "Display a search form for searching the list.",
    "type": "function",
    "file_path": "django\\django\\contrib\\admin\\templatetags\\admin_list.py",
    "ast_data": "FunctionDef name:search_form arg:cl arguments arg Return return:yes Compare"
  },
  {
    "library": "pytorch",
    "name": "deco_unary_ufunc",
    "source_code": "def deco_unary_ufunc(torch_func):\n\n    @normalizer\n    def wrapped(x: ArrayLike, /, out: Optional[OutArray]=None, *, where=True, casting: Optional[CastingModes]='same_kind', order='K', dtype: Optional[DTypeLike]=None, subok: NotImplementedType=False, signature=None, extobj=None):\n        if dtype is not None:\n            x = _util.typecast_tensor(x, dtype, casting)\n        if torch_func.__name__ in _fp_unary:\n            x = _util.cast_int_to_float(x)\n        result = torch_func(x)\n        result = _ufunc_postprocess(result, out, casting)\n        return result\n    wrapped.__qualname__ = torch_func.__name__\n    wrapped.__name__ = torch_func.__name__\n    return wrapped",
    "docstring": "Common infra for unary ufuncs. Normalize arguments, sort out type casting, broadcasting and delegate to the pytorch functions for the actual work.",
    "type": "function",
    "file_path": "pytorch\\torch\\_numpy\\_ufuncs.py",
    "ast_data": "FunctionDef name:deco_unary_ufunc arg:torch_func arguments arg FunctionDef name:wrapped arg:out arguments arg arg arg arg arg arg arg arg arg If Compare Assign Call If Compare Assign Call Assign Call Assign Call Return return:yes Assign Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_children",
    "source_code": "def get_children(self):\n    return [self._child]",
    "docstring": "Return the list of children.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py",
    "ast_data": "FunctionDef name:get_children arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "reset_max_memory_allocated",
    "source_code": "def reset_max_memory_allocated(device: 'Device'=None) -> None:\n    warnings.warn('torch.cuda.reset_max_memory_allocated now calls torch.cuda.reset_peak_memory_stats, which resets /all/ peak memory stats.', FutureWarning)\n    return reset_peak_memory_stats(device=device)",
    "docstring": "Reset the starting point in tracking maximum GPU memory occupied by tensors for a given device. See :func: for details. Args: device (torch.device or int, optional): selected device. Returns statistic for the current device, given by :func:, if :attr: is `~torch.cuda.reset_peak_memory_statscuda-memory-management` for more details about GPU memory management.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\memory.py",
    "ast_data": "FunctionDef name:reset_max_memory_allocated arg:device arguments arg Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_sequential_split_and_maybe_inline_subgraphs",
    "source_code": "def _sequential_split_and_maybe_inline_subgraphs(gm: torch.fx.GraphModule, graph_signature: Optional[ExportGraphSignature]) -> tuple[torch.fx.GraphModule, Optional[ExportGraphSignature]]:\n    need_replacing = any((_is_autocast_node(node) for node in gm.graph.nodes))\n    if not need_replacing:\n        return (gm, graph_signature)\n    new_gm = _split_autocast(gm)\n\n    def _maybe_inline_or_replace_with_hop(node: torch.fx.Node) -> None:\n        if _is_autocast_sub_mod(node):\n            _replace_with_hop(node)\n        else:\n            assert node.op == 'call_module'\n            assert isinstance(node.target, str)\n            node_inline_(node)\n    return _sequential_split_and_maybe_inline_subgraphs_helper(new_gm, graph_signature, _maybe_inline_or_replace_with_hop)",
    "docstring": "Helper function for replace_autocast_with_hop_pass(). Split the graph module into multiple subgraphs based on the autocast nodes. For each subgraph, decides whether to construct a HOO subgraph, or inline the calls back into the parent graph module. Nodes between and are considered as a subgraph.",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\passes\\replace_autocast_with_hop_pass.py",
    "ast_data": "FunctionDef name:_sequential_split_and_maybe_inline_subgraphs arg:gm arg:graph_signature arguments arg arg Assign Call Call If Return return:yes Assign Call FunctionDef name:_maybe_inline_or_replace_with_hop arg:node arguments arg If Call Call Compare Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "__call__",
    "source_code": "def __call__(self, df=None, scale=None, seed=None):\n    return invwishart_frozen(df, scale, seed)",
    "docstring": "Create a frozen inverse Wishart distribution. See for more information.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:df arg:scale arg:seed arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "GetColocationGroups",
    "source_code": "def GetColocationGroups(self):\n    return tf_item.TF_GetColocationGroups(self.tf_item)",
    "docstring": "Return a list of hard colocation constraints. All the nodes in a colocation tuple must be placed on the same device for the model to work. Returns: A list of colocation tuples.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\grappler\\item.py",
    "ast_data": "FunctionDef name:GetColocationGroups arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_anchor",
    "source_code": "def set_anchor(self, anchor):\n    if isinstance(anchor, str):\n        _api.check_in_list(mtransforms.Bbox.coefs, anchor=anchor)\n    elif not isinstance(anchor, (tuple, list)) or len(anchor) != 2:\n        raise TypeError('anchor must be str or 2-tuple')\n    self._anchor = anchor",
    "docstring": "Parameters ---------- anchor : (float, float) or {'C', 'SW', 'S', 'SE', 'E', 'NE', 'N', 'NW', 'W'} Either an (*x*, *y*) pair of relative coordinates (0 is left or bottom, 1 is right or top), 'C' (center), or a cardinal direction ('SW', southwest, is bottom left, etc.). See Also -------- .Axes.set_anchor",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_divider.py",
    "ast_data": "FunctionDef name:set_anchor arg:self arg:anchor arguments arg arg If Call Call If BoolOp Call Compare Call Raise Call Assign"
  },
  {
    "library": "pytorch",
    "name": "decode_exception_table_varint",
    "source_code": "def decode_exception_table_varint(bytes_iter: Iterator[int]) -> int:\n    b = next(bytes_iter)\n    val = b & 63\n    while b & 64:\n        val <<= 6\n        b = next(bytes_iter)\n        val |= b & 63\n    return val",
    "docstring": "Inverse of .",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\bytecode_transformation.py",
    "ast_data": "FunctionDef name:decode_exception_table_varint arg:bytes_iter arguments arg Assign Call Assign While Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "TypePromotionSnapshot",
    "source_code": "@dataclasses.dataclass\nclass TypePromotionSnapshot:\n    args_dtypes: Mapping[int, torch.dtype]\n    'Mapping from arg position to dtype to promote to.'\n    kwargs_dtypes: Mapping[str, torch.dtype]\n    'Mapping from kwarg name to dtype to promote to.'\n    out_dtype: torch.dtype\n    'Expected output dtype of the node.'",
    "docstring": "Type promotion snapshot for a fx node and its inputs. Contains the promoted dtype for args and kwargs that needs promoting. Contains the expected node output dtype.",
    "type": "class",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\type_promotion.py",
    "ast_data": "ClassDef name:TypePromotionSnapshot"
  },
  {
    "library": "pytorch",
    "name": "get_native_backend_config_dict",
    "source_code": "def get_native_backend_config_dict():\n    return get_native_backend_config().to_dict()",
    "docstring": "Return the for PyTorch Native backend (fbgemm/qnnpack) in dictionary form.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\native.py",
    "ast_data": "FunctionDef name:get_native_backend_config_dict arguments Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_load",
    "source_code": "def _load(self):\n    module = _importlib.import_module(self.__name__)\n    self._parent_module_globals[self._local_name] = module\n    self.__dict__.update(module.__dict__)\n    return module",
    "docstring": "Import the target module and insert it into the parent's namespace.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\virtual_root_template_v1.__init__.py",
    "ast_data": "FunctionDef name:_load arg:self arguments arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_export_debug_info",
    "source_code": "def _export_debug_info(exported_graph: ops.Graph, export_dir: str):\n    debug_builder = tf_stack.GraphDebugInfoBuilder()\n    for fn_name in exported_graph._functions:\n        fn = exported_graph._get_function(fn_name)\n        if not isinstance(fn, defun.AtomicFunction):\n            continue\n        debug_builder.AppendGraphDebugInfo(fn_name, fn.graph_debug_info)\n    graph_debug_info = debug_builder.Build()\n    file_io.atomic_write_string_to_file(file_io.join(path_helpers.get_or_create_debug_dir(export_dir), constants.DEBUG_INFO_FILENAME_PB), graph_debug_info.SerializeToString(deterministic=True))",
    "docstring": "Exports debug information from graph to file. Creates and writes GraphDebugInfo with traces for ops in all functions of the exported_graph. Args: exported_graph: A Graph that has been created by tracing a saveable view. export_dir: SavedModel directory in which to write the debug info.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\save.py",
    "ast_data": "FunctionDef name:_export_debug_info arg:exported_graph arg:export_dir arguments arg arg Assign Call For Assign Call If Call Call Assign Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_normalise_fspath",
    "source_code": "def _normalise_fspath(path):\n    return os.fspath(path) if isinstance(path, os.PathLike) else path",
    "docstring": "Convert pathlib-like objects to str (__fspath__ compatibility, PEP 519).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\readers.py",
    "ast_data": "FunctionDef name:_normalise_fspath arg:path arguments arg Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "roots",
    "source_code": "@property\ndef roots(self):\n    return roots(self._coeffs)",
    "docstring": "The roots of the polynomial, where self(x) == 0",
    "type": "method",
    "file_path": "numpy\\numpy\\lib\\_polynomial_impl.py",
    "ast_data": "FunctionDef name:roots arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "export_opnames",
    "source_code": "def export_opnames(m):\n    return torch._C._export_opnames(m._c)",
    "docstring": "Generate new bytecode for a Script module. Returns what the op list would be for a Script Module based off the current code base. If you have a LiteScriptModule and want to get the currently present list of ops call _export_operator_list instead.",
    "type": "function",
    "file_path": "pytorch\\torch\\jit\\__init__.py",
    "ast_data": "FunctionDef name:export_opnames arg:m arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "normals_to_rgb255",
    "source_code": "def normals_to_rgb255(image: Tensor) -> Tensor:\n    KORNIA_CHECK_IS_COLOR(image)\n    rgb255 = (0.5 * (image + 1.0)).clip(0.0, 1.0) * 255\n    return rgb255",
    "docstring": "Convert surface normals to RGB [0, 255] for visualization purposes. Args: image: surface normals to be converted to RGB with quantization of shape :math:. Returns: RGB version of the image with shape of shape :math:. Example: >>> input = torch.rand(2, 3, 4, 5) >>> output = normals_to_rgb255(input) # 2x3x4x5",
    "type": "function",
    "file_path": "kornia\\kornia\\color\\rgb.py",
    "ast_data": "FunctionDef name:normals_to_rgb255 arg:image arguments arg Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "is_multi_outputs_template",
    "source_code": "def is_multi_outputs_template(input_buf: Optional[Union[Buffer, Operation]]) -> bool:\n    from . import ir\n    return isinstance(input_buf, ir.CppTemplateBuffer) and isinstance(input_buf.layout, ir.MultiOutputLayout)",
    "docstring": "Check if input buffer is a multi-outputs template buffer",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\utils.py",
    "ast_data": "FunctionDef name:is_multi_outputs_template arg:input_buf arguments arg Return return:yes BoolOp Call Call"
  },
  {
    "library": "pandas",
    "name": "_convert_datetime_to_stata_type",
    "source_code": "def _convert_datetime_to_stata_type(fmt: str) -> np.dtype:\n    if fmt in ['tc', '%tc', 'td', '%td', 'tw', '%tw', 'tm', '%tm', 'tq', '%tq', 'th', '%th', 'ty', '%ty']:\n        return np.dtype(np.float64)\n    else:\n        raise NotImplementedError(f'Format {fmt} not implemented')",
    "docstring": "Convert from one of the stata date formats to a type in TYPE_MAP.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\stata.py",
    "ast_data": "FunctionDef name:_convert_datetime_to_stata_type arg:fmt arguments arg If Compare Return return:yes Call Raise Call"
  },
  {
    "library": "numpy",
    "name": "polygrid3d",
    "source_code": "def polygrid3d(x, y, z, c):\n    return pu._gridnd(polyval, c, x, y, z)",
    "docstring": "Evaluate a 3-D polynomial on the Cartesian product of x, y and z. This function returns the values: .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * a^i * b^j * c^k where the points `axbyczxyzxyzxyzccxyzxyzcxy`. See Also -------- polyval, polyval2d, polygrid2d, polyval3d Examples -------- >>> from numpy.polynomial import polynomial as P >>> c = ((1, 2, 3), (4, 5, 6), (7, 8, 9)) >>> P.polygrid3d([0, 1], [0, 1], [0, 1], c) array([[ 1., 13.], [ 6., 51.]])",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\polynomial.py",
    "ast_data": "FunctionDef name:polygrid3d arg:x arg:y arg:z arg:c arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "can_zoom",
    "source_code": "def can_zoom(self):\n    return False",
    "docstring": "Return whether this Axes supports the zoom box button functionality. This Axes object does not support interactive zoom box.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\geo.py",
    "ast_data": "FunctionDef name:can_zoom arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "kolmognp",
    "source_code": "def kolmognp(n, x):\n    it = np.nditer([n, x, None])\n    for _n, _x, z in it:\n        if np.isnan(_n):\n            z[...] = _n\n            continue\n        if int(_n) != _n:\n            raise ValueError(f'n is not integral: {_n}')\n        z[...] = _kolmogn_p(int(_n), _x)\n    result = it.operands[-1]\n    return result",
    "docstring": "Computes the PDF for the two-sided Kolmogorov-Smirnov distribution. Parameters ---------- n : integer, array_like the number of samples x : float, array_like The K-S statistic, float between 0 and 1 Returns ------- pdf : ndarray The PDF at the specified locations The return value has shape the result of numpy broadcasting n and x.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_ksstats.py",
    "ast_data": "FunctionDef name:kolmognp arg:n arg:x arguments arg arg Assign Call For If Call Assign If Compare Call Raise Call Assign Call Call Assign Return return:yes"
  },
  {
    "library": "pygame",
    "name": "set_instrument",
    "source_code": "def set_instrument(self, instrument_id, channel=0):\n    if not 0 <= instrument_id <= 127:\n        raise ValueError(f'Undefined instrument id: {instrument_id}')\n    if not 0 <= channel <= 15:\n        raise ValueError('Channel not between 0 and 15.')\n    self.write_short(192 + channel, instrument_id)",
    "docstring": "select an instrument for a channel, with a value between 0 and 127 Output.set_instrument(instrument_id, channel=0) Also called \"patch change\" or \"program change\".",
    "type": "method",
    "file_path": "pygame\\src_py\\midi.py",
    "ast_data": "FunctionDef name:set_instrument arg:self arg:instrument_id arg:channel arguments arg arg arg If Compare Raise Call If Compare Raise Call Call"
  },
  {
    "library": "pygame",
    "name": "frac",
    "source_code": "def frac(value):\n    return value - floor(value)",
    "docstring": "return fractional part of x",
    "type": "function",
    "file_path": "pygame\\src_py\\draw_py.py",
    "ast_data": "FunctionDef name:frac arg:value arguments arg Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "DoctestTransform",
    "source_code": "class DoctestTransform(SphinxTransform):\n    default_priority = 500\n\n    def apply(self, **kwargs: Any) -> None:\n        for node in self.document.findall(nodes.doctest_block):\n            node['classes'].append('doctest')",
    "docstring": "Set \"doctest\" style to each doctest_block node",
    "type": "class",
    "file_path": "sphinx\\sphinx\\transforms\\__init__.py",
    "ast_data": "ClassDef name:DoctestTransform Assign FunctionDef name:apply arg:self arguments arg arg For Call Call"
  },
  {
    "library": "matplotlib",
    "name": "format_data_short",
    "source_code": "def format_data_short(self, value):\n    return self.format_data(value)",
    "docstring": "Return a short string version of the tick value. Defaults to the position-independent long value.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:format_data_short arg:self arg:value arguments arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_isnan",
    "source_code": "@cache_readonly\ndef _isnan(self) -> npt.NDArray[np.bool_]:\n    if self._can_hold_na:\n        return isna(self)\n    else:\n        values = np.empty(len(self), dtype=np.bool_)\n        values.fill(False)\n        return values",
    "docstring": "Return if each value is NaN.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_isnan arg:self arguments arg If Return return:yes Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_CVIterableWrapper",
    "source_code": "class _CVIterableWrapper(BaseCrossValidator):\n\n    def __init__(self, cv):\n        self.cv = list(cv)\n\n    def get_n_splits(self, X=None, y=None, groups=None):\n        return len(self.cv)\n\n    def split(self, X=None, y=None, groups=None):\n        for train, test in self.cv:\n            yield (train, test)",
    "docstring": "Wrapper class for old style cv objects and iterables.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py",
    "ast_data": "ClassDef name:_CVIterableWrapper FunctionDef name:__init__ arg:self arg:cv arguments arg arg Assign Call FunctionDef name:get_n_splits arg:self arg:X arg:y arg:groups arguments arg arg arg arg Return return:yes Call FunctionDef name:split arg:self arg:X arg:y arg:groups arguments arg arg arg arg For"
  },
  {
    "library": "matplotlib",
    "name": "set_orientation",
    "source_code": "def set_orientation(self, orientation):\n    is_horizontal = _api.check_getitem({'horizontal': True, 'vertical': False}, orientation=orientation)\n    if is_horizontal == self.is_horizontal():\n        return\n    self.switch_orientation()",
    "docstring": "Set the orientation of the event line. Parameters ---------- orientation : {'horizontal', 'vertical'}",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:set_orientation arg:self arg:orientation arguments arg arg Assign Call If Compare Call Return return:no Call"
  },
  {
    "library": "django",
    "name": "get_field_type",
    "source_code": "def get_field_type(self, data_type, description):\n    return self.data_types_reverse[data_type]",
    "docstring": "Hook for a database backend to use the cursor description to match a Django field type to a database column. For Oracle, the column data_type on its own is insufficient to distinguish between a FloatField and IntegerField, for example.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\introspection.py",
    "ast_data": "FunctionDef name:get_field_type arg:self arg:data_type arg:description arguments arg arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "upsample_bilinear",
    "source_code": "def upsample_bilinear(input, size=None, scale_factor=None):\n    warnings.warn('nn.quantized.functional.upsample_bilinear is deprecated. Use nn.quantized.functional.interpolate instead.')\n    return interpolate(input, size, scale_factor, mode='bilinear', align_corners=True)",
    "docstring": "Upsamples the input, using bilinear upsampling. .. warning:: This function is deprecated in favor of :func:. This is equivalent with ``. .. note:: The input quantization parameters propagate to the output. .. note:: Only 2D inputs are supported Args: input (Tensor): quantized input size (int or Tuple[int, int]): output spatial size. scale_factor (int or Tuple[int, int]): multiplier for spatial size",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\functional.py",
    "ast_data": "FunctionDef name:upsample_bilinear arg:input arg:size arg:scale_factor arguments arg arg arg Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "predict",
    "source_code": "def predict(self, input: Tensor) -> Tensor:\n    head_output = self.head(input)\n    output = torch.argmax(head_output, dim=1)\n    not_in_shortlist = output >= self.shortlist_size\n    all_in_shortlist = not not_in_shortlist.any()\n    if all_in_shortlist:\n        return output\n    elif not_in_shortlist.all():\n        log_prob = self._get_full_log_prob(input, head_output)\n        return torch.argmax(log_prob, dim=1)\n    else:\n        log_prob = self._get_full_log_prob(input[not_in_shortlist], head_output[not_in_shortlist])\n        output[not_in_shortlist] = torch.argmax(log_prob, dim=1)\n        return output",
    "docstring": "Return the class with the highest probability for each example in the input minibatch. This is equivalent to `(N, \\texttt{in\\_features})(N)`",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\adaptive.py",
    "ast_data": "FunctionDef name:predict arg:self arg:input arguments arg arg Assign Call Assign Call Assign Compare Assign Call If Return return:yes If Call Assign Call Return return:yes Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "shape",
    "source_code": "@property\ndef shape(self) -> tensor_shape.TensorShape:\n    if self._shape_val is None:\n        dims, unknown_shape = self._shape\n        if unknown_shape:\n            self._shape_val = tensor_shape.unknown_shape()\n        else:\n            self._shape_val = tensor_shape.TensorShape(dims)\n    return self._shape_val",
    "docstring": "Returns a that represents the shape of this tensor. >>> t = tf.constant([1,2,3,4,5]) >>> t.shape TensorShape([5]) is equivalent to . In a or when building a model using , they return the build-time shape of the tensor, which may be partially unknown. A is not a tensor. Use to get a tensor containing the shape, calculated at runtime. See , and for details and examples.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor.py",
    "ast_data": "FunctionDef name:shape arg:self arguments arg If Compare Assign If Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_lr",
    "source_code": "@override\ndef get_lr(self) -> list[float]:\n    _warn_get_lr_called_within_step(self)\n    if self._is_initial or self.last_epoch > self.total_iters:\n        return [group['lr'] for group in self.optimizer.param_groups]\n    decay_factor = ((1.0 - self.last_epoch / self.total_iters) / (1.0 - (self.last_epoch - 1) / self.total_iters)) ** self.power\n    return [group['lr'] * decay_factor for group in self.optimizer.param_groups]",
    "docstring": "Compute the learning rate.",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\lr_scheduler.py",
    "ast_data": "FunctionDef name:get_lr arg:self arguments arg Call If BoolOp Compare Return return:yes Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_write_config_section",
    "source_code": "def _write_config_section(self, tt_config, tt_parameters):\n    self._write_report('%s %s\\n' % (_MARKER_SECTION_BEGIN, _SECTION_NAME_CONFIG))\n    self._write_report('%s %s\\n' % (_FIELD_NAME_VERSION, tt_config.version))\n    self._write_report('%s %s\\n' % (_FIELD_NAME_DEVICE, tt_config.device_type))\n    self._write_report('%s %s\\n' % (_FIELD_NAME_TRACE_MODE, tt_parameters.trace_mode))\n    self._write_report('%s %s\\n' % (_FIELD_NAME_SUBMODE, tt_parameters.submode))\n    self._write_report('%s %s\\n' % (_FIELD_NAME_NUM_REPLICAS, tt_config.num_replicas))\n    self._write_report('%s %s\\n' % (_FIELD_NAME_NUM_REPLICAS_PER_HOST, tt_config.num_replicas_per_host))\n    self._write_report('%s %s\\n' % (_FIELD_NAME_NUM_HOSTS, tt_config.num_hosts))\n    self._write_report('%s %s\\n' % (_MARKER_SECTION_END, _SECTION_NAME_CONFIG))",
    "docstring": "Writes the config section of the report.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer_report.py",
    "ast_data": "FunctionDef name:_write_config_section arg:self arg:tt_config arg:tt_parameters arguments arg arg arg Call Call Call Call Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "MemoryProfileDispatchMode",
    "source_code": "class MemoryProfileDispatchMode(TorchDispatchMode):\n\n    def __init__(self, memory_tracker) -> None:\n        self.memory_tracker = memory_tracker\n\n    def __torch_dispatch__(self, func, types, args=..., kwargs=None):\n        rs = func(*args, **kwargs)\n        if func == torch.ops.aten.detach.default:\n            return rs\n        func_name: str = self.memory_tracker._cur_module_name + '.' + func.__name__ + '_' + str(self.memory_tracker._operator_names[func.__name__])\n        self.memory_tracker._operator_names[func.__name__] = self.memory_tracker._operator_names[func.__name__] + 1\n        self.memory_tracker._record_memory_stats(func_name)\n        return rs",
    "docstring": "Run in `` to get memory stats at operator level.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\_tools\\memory_tracker.py",
    "ast_data": "ClassDef name:MemoryProfileDispatchMode FunctionDef name:__init__ arg:self arg:memory_tracker arguments arg arg Assign FunctionDef name:__torch_dispatch__ arg:self arg:func arg:types arg:args arg:kwargs arguments arg arg arg arg arg Assign Call If Compare Return return:yes Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "points",
    "source_code": "def points(self):\n    return czt_points(self.m, self.w, self.a)",
    "docstring": "Return the points at which the chirp z-transform is computed.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_czt.py",
    "ast_data": "FunctionDef name:points arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "SuccessMessageMixin",
    "source_code": "class SuccessMessageMixin:\n    success_message = ''\n\n    def form_valid(self, form):\n        response = super().form_valid(form)\n        success_message = self.get_success_message(form.cleaned_data)\n        if success_message:\n            messages.success(self.request, success_message)\n        return response\n\n    def get_success_message(self, cleaned_data):\n        return self.success_message % cleaned_data",
    "docstring": "Add a success message on successful form submission.",
    "type": "class",
    "file_path": "django\\django\\contrib\\messages\\views.py",
    "ast_data": "ClassDef name:SuccessMessageMixin Assign FunctionDef name:form_valid arg:self arg:form arguments arg arg Assign Call Call Assign Call If Call Return return:yes FunctionDef name:get_success_message arg:self arg:cleaned_data arguments arg arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "__add__",
    "source_code": "@unpack_zerodim_and_defer('__add__')\ndef __add__(self, other):\n    return self._arith_method(other, operator.add)",
    "docstring": "Get Addition of DataFrame and other, column-wise. Equivalent to `otherSeriesotherotherSeriesDataFrame.addaxis='index'otherDataFrame`, both columns names and the index are aligned. >>> other = pd.DataFrame( ... {\"height\": [0.2, 0.4, 0.6]}, index=[\"elk\", \"moose\", \"deer\"] ... ) >>> df[[\"height\", \"weight\"]] + other height weight deer NaN NaN elk 1.7 NaN moose 3.0 NaN",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arraylike.py",
    "ast_data": "FunctionDef name:__add__ arg:self arg:other arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "parse_etags",
    "source_code": "def parse_etags(etag_str):\n    if etag_str.strip() == '*':\n        return ['*']\n    else:\n        etag_matches = (ETAG_MATCH.match(etag.strip()) for etag in etag_str.split(','))\n        return [match[1] for match in etag_matches if match]",
    "docstring": "Parse a string of ETags given in an If-None-Match or If-Match header as defined by RFC 9110. Return a list of quoted ETags, or ['*'] if all ETags should be matched.",
    "type": "function",
    "file_path": "django\\django\\utils\\http.py",
    "ast_data": "FunctionDef name:parse_etags arg:etag_str arguments arg If Compare Call Return return:yes Assign Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "size",
    "source_code": "@property\ndef size(self) -> int:\n    return np.prod(self.shape)",
    "docstring": "The number of elements in the array.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\base.py",
    "ast_data": "FunctionDef name:size arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "node_names",
    "source_code": "@property\ndef node_names(self):\n    if self._node_name_cache is not None:\n        return self._node_name_cache\n    path_to_root = {}\n    path_to_root[0] = ('(root)',)\n    to_visit = collections.deque([0])\n    while to_visit:\n        node_id = to_visit.popleft()\n        obj = self._object_graph_proto.nodes[node_id]\n        for child in obj.children:\n            if child.node_id not in path_to_root:\n                path_to_root[child.node_id] = path_to_root[node_id] + (child.local_name,)\n                to_visit.append(child.node_id)\n    node_names = {}\n    for node_id, path_to_root in path_to_root.items():\n        node_names[node_id] = '.'.join(path_to_root)\n    for node_id, node in enumerate(self._object_graph_proto.nodes):\n        for slot_reference in node.slot_variables:\n            node_names[slot_reference.slot_variable_node_id] = f\"{node_names[node_id]}'s state '{slot_reference.slot_name}' for {node_names[slot_reference.original_variable_node_id]}\"\n    self._node_name_cache = node_names\n    return node_names",
    "docstring": "Lazily creates a mapping from node id to (\"path\", \"to\", \"root\").",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:node_names arg:self arguments arg If Compare Return return:yes Assign Assign Assign Call While Assign Call Assign For If Compare Assign Call Assign For Call Assign Call For Call For Assign Assign Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "unpartial",
    "source_code": "def unpartial(obj: Any) -> Any:\n    while ispartial(obj):\n        obj = obj.func\n    return obj",
    "docstring": "Get an original object from a partial-like object. If *obj* is not a partial object, it is returned as is. .. seealso:: :func:",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\inspect.py",
    "ast_data": "FunctionDef name:unpartial arg:obj arguments arg While Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "CompiledFxGraphConstantsWithGm",
    "source_code": "class CompiledFxGraphConstantsWithGm(CompiledFxGraphConstants):\n\n    def __init__(self, gm: torch.fx.GraphModule) -> None:\n        self.gm = gm\n\n    def unwrap(self, g: CompiledFxGraph) -> dict[str, torch.Tensor]:\n        frozen_params = {name: getattr(self.gm, orig_name) for name, orig_name in g.frozen_param_names.items()}\n        constants = g.constants or {}\n        return {**constants, **frozen_params}",
    "docstring": "This version of CompiledFxGraphConstants, instead of grabbing constants directly saved on CompiledFxGraphs, will just grab their names. Then, it takes a second GraphModule to grab the corresponding constant values out of. This is necessary for supporting freezing in FxGraphCache.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\output_code.py",
    "ast_data": "ClassDef name:CompiledFxGraphConstantsWithGm FunctionDef name:__init__ arg:self arg:gm arguments arg arg Assign FunctionDef name:unwrap arg:self arg:g arguments arg arg Assign Call Call Assign BoolOp Return return:yes"
  },
  {
    "library": "kornia",
    "name": "sift_describe",
    "source_code": "def sift_describe(input: Tensor, patch_size: int=41, num_ang_bins: int=8, num_spatial_bins: int=4, rootsift: bool=True, clipval: float=0.2) -> Tensor:\n    return SIFTDescriptor(patch_size, num_ang_bins, num_spatial_bins, rootsift, clipval)(input)",
    "docstring": "Compute the sift descriptor. See :class: for details.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\siftdesc.py",
    "ast_data": "FunctionDef name:sift_describe arg:input arg:patch_size arg:num_ang_bins arg:num_spatial_bins arg:rootsift arg:clipval arguments arg arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "parse_arguments",
    "source_code": "def parse_arguments():\n    from argparse import ArgumentParser\n    parser = ArgumentParser('AARCH64 wheels python CD')\n    parser.add_argument('--debug', action='store_true')\n    parser.add_argument('--build-only', action='store_true')\n    parser.add_argument('--test-only', type=str)\n    parser.add_argument('--enable-mkldnn', action='store_true')\n    parser.add_argument('--enable-cuda', action='store_true')\n    return parser.parse_args()",
    "docstring": "Parse inline arguments",
    "type": "function",
    "file_path": "pytorch\\.ci\\aarch64_linux\\aarch64_wheel_ci_build.py",
    "ast_data": "FunctionDef name:parse_arguments arguments Assign Call Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "dog_response_single",
    "source_code": "def dog_response_single(input: Tensor, sigma1: float=1.0, sigma2: float=1.6) -> Tensor:\n    KORNIA_CHECK_SHAPE(input, ['B', 'C', 'H', 'W'])\n    ks1 = _get_kernel_size(sigma1)\n    ks2 = _get_kernel_size(sigma2)\n    g1 = gaussian_blur2d(input, (ks1, ks1), (sigma1, sigma1))\n    g2 = gaussian_blur2d(input, (ks2, ks2), (sigma2, sigma2))\n    return g2 - g1",
    "docstring": "Compute the Difference-of-Gaussian response. .. image:: _static/img/dog_response_single.png Args: input: a given the gaussian 4d tensor :math:. sigma1: lower gaussian sigma sigma2: bigger gaussian sigma Return: the response map per channel with shape :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\responses.py",
    "ast_data": "FunctionDef name:dog_response_single arg:input arg:sigma1 arg:sigma2 arguments arg arg arg Call Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "string_camelcase",
    "source_code": "def string_camelcase(string: str) -> str:\n    return CAMELCASE_INVALID_CHARS.sub('', string.title())",
    "docstring": "Convert a word to its CamelCase version and remove invalid chars >>> string_camelcase('lost-pound') 'LostPound' >>> string_camelcase('missing_images') 'MissingImages'",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\template.py",
    "ast_data": "FunctionDef name:string_camelcase arg:string arguments arg Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "keys",
    "source_code": "def keys(self):\n    self._load()\n    return list(self._file_openers.keys())",
    "docstring": "Return the keys of currently supported file openers. Parameters ---------- None Returns ------- keys : list The keys are None for uncompressed files and the file extension strings (i.e. ``) for supported compression methods.",
    "type": "method",
    "file_path": "numpy\\numpy\\lib\\_datasource.py",
    "ast_data": "FunctionDef name:keys arg:self arguments arg Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_result_type",
    "source_code": "def _result_type(*arrays_and_dtypes):\n\n    def preprocess_float(x):\n        if is_prefer_float32():\n            if isinstance(x, float):\n                return np.float32(x)\n            elif isinstance(x, complex):\n                return np.complex64(x)\n        return x\n    arrays_and_dtypes = [preprocess_float(x) for x in arrays_and_dtypes]\n    dtype = np.result_type(*arrays_and_dtypes)\n    return dtypes.as_dtype(canonicalize_dtype(dtype))",
    "docstring": "Returns the resulting type given a set of arrays.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_dtypes.py",
    "ast_data": "FunctionDef name:_result_type arguments arg FunctionDef name:preprocess_float arg:x arguments arg If Call If Call Return return:yes Call If Call Return return:yes Call Return return:yes Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_add_prefix_for_feature_names_out",
    "source_code": "def _add_prefix_for_feature_names_out(self, transformer_with_feature_names_out):\n    if self.verbose_feature_names_out:\n        names = list(chain.from_iterable(((f'{name}__{i}' for i in feature_names_out) for name, feature_names_out in transformer_with_feature_names_out)))\n        return np.asarray(names, dtype=object)\n    feature_names_count = Counter(chain.from_iterable((s for _, s in transformer_with_feature_names_out)))\n    top_6_overlap = [name for name, count in feature_names_count.most_common(6) if count > 1]\n    top_6_overlap.sort()\n    if top_6_overlap:\n        if len(top_6_overlap) == 6:\n            names_repr = str(top_6_overlap[:5])[:-1] + ', ...]'\n        else:\n            names_repr = str(top_6_overlap)\n        raise ValueError(f'Output feature names: {names_repr} are not unique. Please set verbose_feature_names_out=True to add prefixes to feature names')\n    return np.concatenate([name for _, name in transformer_with_feature_names_out])",
    "docstring": "Add prefix for feature names out that includes the transformer names. Parameters ---------- transformer_with_feature_names_out : list of tuples of (str, array-like of str) The tuple consistent of the transformer's name and its feature names out. Returns ------- feature_names_out : ndarray of shape (n_features,), dtype=str Transformed feature names.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\pipeline.py",
    "ast_data": "FunctionDef name:_add_prefix_for_feature_names_out arg:self arg:transformer_with_feature_names_out arguments arg arg If Assign Call Call Return return:yes Call Assign Call Call Assign Call Compare Call If If Compare Call Assign Call Assign Call Raise Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "hat",
    "source_code": "@staticmethod\ndef hat(theta: Tensor) -> Tensor:\n    check_so2_theta_shape(theta)\n    z = zeros_like(theta)\n    row0 = stack((z, theta), -1)\n    row1 = stack((theta, z), -1)\n    return stack((row0, row1), -1)",
    "docstring": "Convert elements from vector space to lie algebra. Returns matrix of shape :math:. Args: theta: angle in radians of shape :math:. Example: >>> theta = torch.tensor(3.1415/2) >>> So2.hat(theta) tensor([[0.0000, 1.5707], [1.5707, 0.0000]])",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\so2.py",
    "ast_data": "FunctionDef name:hat arg:theta arguments arg Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "watch_key_to_data",
    "source_code": "def watch_key_to_data(self, debug_watch_key, device_name=None):\n    if device_name is None:\n        matching_device_names = [name for name in self._watch_key_to_datum if debug_watch_key in self._watch_key_to_datum[name]]\n        if not matching_device_names:\n            return []\n        elif len(matching_device_names) == 1:\n            device_name = matching_device_names[0]\n        else:\n            raise ValueError(\"The debug watch key '%s' exists on multiple (%d) devices, but device name is not specified.\" % (debug_watch_key, len(matching_device_names)))\n    elif device_name not in self._debug_key_to_datum:\n        raise ValueError(\"There is no device named '%s' consisting of debug watch keys.\" % device_name)\n    return self._watch_key_to_datum[device_name].get(debug_watch_key, [])",
    "docstring": "Get all instances corresponding to a debug watch key. Args: debug_watch_key: () debug watch key. device_name: () name of the device. If there is only one device or if the specified debug_watch_key exists on only one device, this argument is optional. Returns: A list of instances that correspond to the debug watch key. If the watch key does not exist, returns an empty list. Raises: ValueError: If there are multiple devices that have the debug_watch_key, but device_name is not specified.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:watch_key_to_data arg:self arg:debug_watch_key arg:device_name arguments arg arg arg If Compare Assign Compare If Return return:no If Compare Call Assign Raise Call Call If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "call_with_layout",
    "source_code": "@tf_export('experimental.dtensor.call_with_layout', v1=[])\ndef call_with_layout(fn: Callable[..., Any], layout: Optional[layout_lib.Layout], *args, **kwargs) -> Any:\n    if layout is not None:\n        if context.executing_eagerly():\n            with default_mesh(layout.mesh):\n                with _dtensor_device()._default_layout(layout):\n                    return fn(*args, **kwargs)\n        else:\n            return relayout(fn(*args, **kwargs), layout)\n    return fn(*args, **kwargs)",
    "docstring": "Calls a function in the DTensor device scope if is not None. If is not None, consumes DTensor(s) as input and produces a DTensor as output; a DTensor is a tf.Tensor with layout-related attributes. If is None, consumes and produces regular tf.Tensors. Args: fn: A supported TF API function such as tf.zeros. layout: Optional, the layout of the output DTensor. *args: Arguments given to . **kwargs: Keyword arguments given to . Returns: The return value of transformed to a DTensor if requested.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\api.py",
    "ast_data": "FunctionDef name:call_with_layout arg:fn arg:layout arguments arg arg arg arg If Compare If Call With Call With Call Call Return return:yes Call Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "get_token",
    "source_code": "def get_token(request):\n    if 'CSRF_COOKIE' in request.META:\n        csrf_secret = request.META['CSRF_COOKIE']\n        request.META['CSRF_COOKIE_NEEDS_UPDATE'] = True\n    else:\n        csrf_secret = _add_new_csrf_cookie(request)\n    return _mask_cipher_secret(csrf_secret)",
    "docstring": "Return the CSRF token required for a POST form. The token is an alphanumeric value. A new token is created if one is not already set. A side effect of calling this function is to make the csrf_protect decorator and the CsrfViewMiddleware add a CSRF cookie and a 'Vary: Cookie' header to the outgoing response. For this reason, you may need to use this function lazily, as is done by the csrf context processor.",
    "type": "function",
    "file_path": "django\\django\\middleware\\csrf.py",
    "ast_data": "FunctionDef name:get_token arg:request arguments arg If Compare Assign Assign Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_set_active_handle",
    "source_code": "def _set_active_handle(self, event):\n    c_idx, c_dist = self._corner_handles.closest(event.x, event.y)\n    e_idx, e_dist = self._edge_handles.closest(event.x, event.y)\n    m_idx, m_dist = self._center_handle.closest(event.x, event.y)\n    if 'move' in self._state:\n        self._active_handle = 'C'\n    elif m_dist < self.grab_range * 2:\n        self._active_handle = 'C'\n    elif c_dist > self.grab_range and e_dist > self.grab_range:\n        if self.drag_from_anywhere and self._contains(event):\n            self._active_handle = 'C'\n        else:\n            self._active_handle = None\n            return\n    elif c_dist < e_dist:\n        self._active_handle = self._corner_order[c_idx]\n    else:\n        self._active_handle = self._edge_order[e_idx]",
    "docstring": "Set active handle based on the location of the mouse event.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:_set_active_handle arg:self arg:event arguments arg arg Assign Call Assign Call Assign Call If Compare Assign If Compare Assign If BoolOp Compare Compare If BoolOp Call Assign Assign Return return:no If Compare Assign Assign"
  },
  {
    "library": "django",
    "name": "should_redirect_with_slash",
    "source_code": "def should_redirect_with_slash(self, request):\n    if settings.APPEND_SLASH and (not request.path_info.endswith('/')):\n        urlconf = getattr(request, 'urlconf', None)\n        if not is_valid_path(request.path_info, urlconf):\n            match = is_valid_path('%s/' % request.path_info, urlconf)\n            if match:\n                view = match.func\n                return getattr(view, 'should_append_slash', True)\n    return False",
    "docstring": "Return True if settings.APPEND_SLASH is True and appending a slash to the request path turns an invalid path into a valid one.",
    "type": "method",
    "file_path": "django\\django\\middleware\\common.py",
    "ast_data": "FunctionDef name:should_redirect_with_slash arg:self arg:request arguments arg arg If BoolOp Call Assign Call If Call Assign Call If Assign Return return:yes Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "get_headers",
    "source_code": "def get_headers(token):\n    return {'Accept': 'application/vnd.github+json', 'Authorization': f'Bearer {token}', 'X-GitHub-Api-Version': '2022-11-28'}",
    "docstring": "Get the headers for the GitHub API.",
    "type": "function",
    "file_path": "scikit-learn\\build_tools\\get_comment.py",
    "ast_data": "FunctionDef name:get_headers arg:token arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "__str__",
    "source_code": "@abstractmethod\ndef __str__(self):\n    pass",
    "docstring": "A human readable representational string of the constraint.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_param_validation.py",
    "ast_data": "FunctionDef name:__str__ arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "internal_convert_n_to_tensor",
    "source_code": "def internal_convert_n_to_tensor(values, dtype=None, name=None, as_ref=False, preferred_dtype=None, ctx=None) -> list[Union[EagerTensor, SymbolicTensor]]:\n    if not isinstance(values, collections_abc.Sequence):\n        raise TypeError('values must be a sequence.')\n    ret = []\n    for i, value in enumerate(values):\n        n = None if name is None else '%s_%d' % (name, i)\n        ret.append(convert_to_tensor(value, dtype=dtype, name=n, as_ref=as_ref, preferred_dtype=preferred_dtype))\n    return ret",
    "docstring": "Converts to a list of objects. Args: values: A list of objects that can be consumed by . dtype: (Optional.) The required of the returned objects. name: (Optional.) A name prefix to used when a new is created, in which case element will be given the name . as_ref: True if the caller wants the results as ref tensors. preferred_dtype: Optional element type for the returned tensors, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so preferred_dtype can be used as a soft preference. If the conversion to is not possible, this argument has no effect. ctx: Unused. Present for API backwards compatibility. Returns: A list of and/or objects. Raises: TypeError: If no conversion function is registered for an element in . RuntimeError: If a registered conversion function returns an invalid value.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:internal_convert_n_to_tensor arg:values arg:dtype arg:name arg:as_ref arg:preferred_dtype arg:ctx arguments arg arg arg arg arg arg If Call Raise Call Assign For Call Assign Compare Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_name",
    "source_code": "def _get_name(self, overwrite_name=None):\n    stack = tf_inspect.stack()\n    calling_class = None\n    name = None\n    for frame in stack[::-1]:\n        f_locals = frame[0].f_locals\n        f_self = f_locals.get('self', None)\n        if isinstance(f_self, Benchmark):\n            calling_class = f_self\n            name = frame[3]\n            break\n    if calling_class is None:\n        raise ValueError('Unable to determine calling Benchmark class.')\n    name = overwrite_name or name\n    class_name = type(calling_class).__name__\n    name = '%s.%s' % (class_name, name)\n    return name",
    "docstring": "Returns full name of class and method calling report_benchmark.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\platform\\benchmark.py",
    "ast_data": "FunctionDef name:_get_name arg:self arg:overwrite_name arguments arg arg Assign Call Assign Assign For Assign Assign Call If Call Assign Assign If Compare Raise Call Assign BoolOp Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "FSDPManagedNNModuleVariable",
    "source_code": "class FSDPManagedNNModuleVariable(UnspecializedNNModuleVariable):\n\n    def __init__(self, value, **kwargs) -> None:\n        source = kwargs.get('source', None)\n        assert source is not None, 'FSDPManagedNNModule depends on having an accurate source to control guarding.'\n        super().__init__(value=value, **kwargs)\n        self.source = source\n\n    def _wrap_source(self, attr_source):\n        if not isinstance(attr_source, (FSDPNNModuleSource, UnspecializedNNModuleSource)):\n            if torch._dynamo.config.skip_fsdp_guards:\n                return FSDPNNModuleSource(attr_source)\n            else:\n                return UnspecializedNNModuleSource(attr_source)\n        return attr_source",
    "docstring": "Tracing behavior: trace into submodules and treat them as Unspecialized, do not register parameters to the top-level, treat them as function inputs. Guards behavior: if 'skip_fsdp_guards', many guards that would be installed by a vanilla UnspecializedNNModuleVariable are simply dropped, on the basis that a user wrapping their model in FSDP(model) is already opting into a requirement to not modify internal model state, which would already break FSDP without compilation.",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\nn_module.py",
    "ast_data": "ClassDef name:FSDPManagedNNModuleVariable FunctionDef name:__init__ arg:self arg:value arguments arg arg arg Assign Call Compare Call Call Assign FunctionDef name:_wrap_source arg:self arg:attr_source arguments arg arg If Call If Return return:yes Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_normalize_outputs",
    "source_code": "def _normalize_outputs(outputs, function_name, signature_key):\n    if not isinstance(outputs, collections_abc.Mapping):\n        if hasattr(outputs, '_asdict'):\n            outputs = outputs._asdict()\n        else:\n            if not isinstance(outputs, collections_abc.Sequence):\n                outputs = [outputs]\n            outputs = {'output_{}'.format(output_index): output for output_index, output in enumerate(outputs)}\n    for key, value in outputs.items():\n        if not isinstance(key, compat.bytes_or_text_types):\n            raise ValueError(f'Got a dictionary with a non-string key {key!r} in the output of the function {compat.as_str_any(function_name)} used to generate the SavedModel signature {signature_key!r}.')\n        if not isinstance(value, (tensor.Tensor, composite_tensor.CompositeTensor)):\n            raise ValueError(f'Got a non-Tensor value {value!r} for key {key!r} in the output of the function {compat.as_str_any(function_name)} used to generate the SavedModel signature {signature_key!r}. Outputs for functions used as signatures must be a single Tensor, a sequence of Tensors, or a dictionary from string to Tensor.')\n    return outputs",
    "docstring": "Normalize outputs if necessary and check that they are tensors.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\signature_serialization.py",
    "ast_data": "FunctionDef name:_normalize_outputs arg:outputs arg:function_name arg:signature_key arguments arg arg arg If Call If Call Assign Call If Call Assign Assign Call Call For Call If Call Raise Call Call If Call Raise Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "GetContainingCondContext",
    "source_code": "def GetContainingCondContext(ctxt):\n    while ctxt:\n        if ctxt.IsCondContext():\n            return ctxt\n        ctxt = ctxt.outer_context\n    return None",
    "docstring": "Returns the first ancestor CondContext of . Returns if is a CondContext, or None if is not in a cond. Args: ctxt: ControlFlowContext Returns: if is a CondContext, the most nested CondContext containing , or None if is not in a cond.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_util.py",
    "ast_data": "FunctionDef name:GetContainingCondContext arg:ctxt arguments arg While If Call Return return:yes Assign Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "registered_identifiers",
    "source_code": "@tf_export('__internal__.saved_model.load.registered_identifiers', v1=[])\ndef registered_identifiers():\n    return _REVIVED_TYPE_REGISTRY.keys()",
    "docstring": "Return all the current registered revived object identifiers. Returns: A set of strings.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\revived_types.py",
    "ast_data": "FunctionDef name:registered_identifiers arguments Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__ne__",
    "source_code": "def __ne__(self, other):\n    return not self.__eq__(other)",
    "docstring": "Returns True iff self != other.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\dtypes.py",
    "ast_data": "FunctionDef name:__ne__ arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "find_minima",
    "source_code": "def find_minima(self):\n    if self.disp:\n        logging.info('Searching for minimizer pool...')\n    self.minimizers()\n    if len(self.X_min) != 0:\n        self.minimise_pool(self.local_iter)\n        self.sort_result()\n        self.f_lowest = self.res.fun\n        self.x_lowest = self.res.x\n    else:\n        self.find_lowest_vertex()\n    if self.disp:\n        logging.info(f'Minimiser pool = SHGO.X_min = {self.X_min}')",
    "docstring": "Construct the minimizer pool, map the minimizers to local minima and sort the results into a global return object.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_shgo.py",
    "ast_data": "FunctionDef name:find_minima arg:self arguments arg If Call Call If Compare Call Call Call Assign Assign Call If Call"
  },
  {
    "library": "django",
    "name": "__getitem__",
    "source_code": "def __getitem__(self, key):\n    use_func = key.startswith(self.prefix)\n    key = key.removeprefix(self.prefix)\n    value = super().__getitem__(key)\n    if use_func:\n        return self.func(value)\n    return value",
    "docstring": "Retrieve the real value after stripping the prefix string (if present). If the prefix is present, pass the value through self.func before returning, otherwise return the raw value.",
    "type": "method",
    "file_path": "django\\django\\utils\\datastructures.py",
    "ast_data": "FunctionDef name:__getitem__ arg:self arg:key arguments arg arg Assign Call Assign Call Assign Call Call If Return return:yes Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "array_repr",
    "source_code": "@array_function_dispatch(_array_repr_dispatcher, module='numpy')\ndef array_repr(arr, max_line_width=None, precision=None, suppress_small=None):\n    return _array_repr_implementation(arr, max_line_width, precision, suppress_small)",
    "docstring": "Return the string representation of an array. Parameters ---------- arr : ndarray Input array. max_line_width : int, optional Inserts newlines if text is longer than . Defaults to ``. Returns ------- string : str The string representation of an array. See Also -------- array_str, array2string, set_printoptions Examples -------- >>> import numpy as np >>> np.array_repr(np.array([1,2])) 'array([1, 2])' >>> np.array_repr(np.ma.array([0.])) 'MaskedArray([0.])' >>> np.array_repr(np.array([], np.int32)) 'array([], dtype=int32)' >>> x = np.array([1e-6, 4e-7, 2, 3]) >>> np.array_repr(x, precision=6, suppress_small=True) 'array([0.000001, 0. , 2. , 3. ])'",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\arrayprint.py",
    "ast_data": "FunctionDef name:array_repr arg:arr arg:max_line_width arg:precision arg:suppress_small arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "write_op_log",
    "source_code": "@tf_export(v1=['profiler.write_op_log'])\ndef write_op_log(graph, log_dir, op_log=None, run_meta=None, add_trace=True):\n    if not graph and (not context.executing_eagerly()):\n        graph = ops.get_default_graph()\n    op_log = merge_default_with_oplog(graph, op_log, run_meta, add_trace)\n    with gfile.Open(os.path.join(log_dir, 'tfprof_log'), 'w') as log:\n        log.write(op_log.SerializeToString())",
    "docstring": "Log provided 'op_log', and add additional model information below. The API also assigns ops in tf.compat.v1.trainable_variables() an op type called '_trainable_variables'. The API also logs 'flops' statistics for ops with op.RegisterStatistics() defined. flops calculation depends on Tensor shapes defined in 'graph', which might not be complete. 'run_meta', if provided, completes the shape information with best effort. Args: graph: tf.Graph. If None and eager execution is not enabled, use default graph. log_dir: directory to write the log file. op_log: (Optional) OpLogProto proto to be written. If not provided, an new one is created. run_meta: (Optional) RunMetadata proto that helps flops computation using run time shape information. add_trace: Whether to add python code trace information. Used to support \"code\" view.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\tfprof_logger.py",
    "ast_data": "FunctionDef name:write_op_log arg:graph arg:log_dir arg:op_log arg:run_meta arg:add_trace arguments arg arg arg arg arg If BoolOp Call Assign Call Assign Call With Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "random_brightness",
    "source_code": "@tf_export('image.random_brightness')\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef random_brightness(image, max_delta, seed=None):\n    if max_delta < 0:\n        raise ValueError('max_delta must be non-negative.')\n    delta = random_ops.random_uniform([], -max_delta, max_delta, seed=seed)\n    return adjust_brightness(image, delta)",
    "docstring": "Adjust the brightness of images by a random factor. Equivalent to using a randomly picked in the interval . For producing deterministic results given a value, use . Unlike using the param with ops, ops guarantee the same results given the same seed independent of how many times the function is called, and independent of global seed settings (e.g. tf.random.set_seed). Args: image: An image or images to adjust. max_delta: float, must be non-negative. This parameter controls the maximum relative change in brightness. seed: A Python integer. Used to create a random seed. See for behavior. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.random_brightness(x, 0.2) Returns: The brightness-adjusted image(s). Raises: ValueError: if is negative.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:random_brightness arg:image arg:max_delta arg:seed arguments arg arg arg If Compare Raise Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_subplot_params",
    "source_code": "def get_subplot_params(self, figure=None):\n    if figure is None:\n        kw = {k: mpl.rcParams['figure.subplot.' + k] for k in self._AllowedKeys}\n        subplotpars = SubplotParams(**kw)\n    else:\n        subplotpars = copy.copy(figure.subplotpars)\n    subplotpars.update(**{k: getattr(self, k) for k in self._AllowedKeys})\n    return subplotpars",
    "docstring": "Return the for the GridSpec. In order of precedence the values are taken from - non-*None* attributes of the GridSpec - the provided *figure* - :rc: Note that the `` attribute of the GridSpec is always ignored.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\gridspec.py",
    "ast_data": "FunctionDef name:get_subplot_params arg:self arg:figure arguments arg arg If Compare Assign Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_wrap_custom_getter",
    "source_code": "def _maybe_wrap_custom_getter(custom_getter, old_getter):\n    if old_getter is None:\n        return custom_getter\n\n    def wrapped_custom_getter(getter, *args, **kwargs):\n        return custom_getter(functools.partial(old_getter, getter), *args, **kwargs)\n    return wrapped_custom_getter",
    "docstring": "Wrap a call to a custom_getter to use the old_getter internally.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variable_scope.py",
    "ast_data": "FunctionDef name:_maybe_wrap_custom_getter arg:custom_getter arg:old_getter arguments arg arg If Compare Return return:yes FunctionDef name:wrapped_custom_getter arg:getter arguments arg arg arg Return return:yes Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "compute_mask",
    "source_code": "@abstractmethod\ndef compute_mask(self, t, default_mask):\n    pass",
    "docstring": "Compute and returns a mask for the input tensor ``",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\utils\\prune.py",
    "ast_data": "FunctionDef name:compute_mask arg:self arg:t arg:default_mask arguments arg arg arg"
  },
  {
    "library": "pytorch",
    "name": "__new__",
    "source_code": "def __new__(cls, *args, **kwargs):\n    orig_cls = cls.__mro__[2]\n    return orig_cls.__new__(orig_cls, *args, **kwargs)",
    "docstring": "Override `` to remove the DDP class and directly construct the original class for cases like indexing into a container module.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_composable\\replicate.py",
    "ast_data": "FunctionDef name:__new__ arg:cls arguments arg arg arg Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_sparse_tensors",
    "source_code": "@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION)\ndef _get_sparse_tensors(self, inputs, weight_collections=None, trainable=None):\n    del weight_collections\n    del trainable\n    return CategoricalColumn.IdWeightPair(inputs.get(self), None)",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:_get_sparse_tensors arg:self arg:inputs arg:weight_collections arg:trainable arguments arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit_transform",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit_transform(self, X, y=None):\n    return self._fit_transform(X, compute_sources=True)",
    "docstring": "Fit the model and recover the sources from X. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. y : Ignored Not used, present for API consistency by convention. Returns ------- X_new : ndarray of shape (n_samples, n_components) Estimated sources obtained by transforming the data with the estimated unmixing matrix.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_fastica.py",
    "ast_data": "FunctionDef name:fit_transform arg:self arg:X arg:y arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "wait",
    "source_code": "def wait(self, stream=None) -> None:\n    if stream is None:\n        stream = torch.xpu.current_stream()\n    super().wait(stream)",
    "docstring": "Make all future work submitted to the given stream wait for this event. Use `` if no stream is specified.",
    "type": "method",
    "file_path": "pytorch\\torch\\xpu\\streams.py",
    "ast_data": "FunctionDef name:wait arg:self arg:stream arguments arg arg If Compare Assign Call Call Call"
  },
  {
    "library": "django",
    "name": "get_table_description",
    "source_code": "def get_table_description(self, cursor, table_name):\n    raise NotImplementedError('subclasses of BaseDatabaseIntrospection may require a get_table_description() method.')",
    "docstring": "Return a description of the table with the DB-API cursor.description interface.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\introspection.py",
    "ast_data": "FunctionDef name:get_table_description arg:self arg:cursor arg:table_name arguments arg arg arg Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "_draw_polygon_without_update",
    "source_code": "def _draw_polygon_without_update(self):\n    xs, ys = zip(*self._xys) if self._xys else ([], [])\n    self._selection_artist.set_data(xs, ys)\n    self._update_box()\n    if self._selection_completed or (len(self._xys) > 3 and self._xys[-1] == self._xys[0]):\n        self._polygon_handles.set_data(xs[:-1], ys[:-1])\n    else:\n        self._polygon_handles.set_data(xs, ys)",
    "docstring": "Redraw the polygon based on new vertex positions, no update().",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:_draw_polygon_without_update arg:self arguments arg Assign Call Call Call If BoolOp BoolOp Compare Call Compare Call Call"
  },
  {
    "library": "cherrypy",
    "name": "error",
    "source_code": "@cherrypy.expose\ndef error(self, code):\n    raise cherrypy.HTTPError(status=code)",
    "docstring": "Respond with a given HTTP error.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\tutorial\\tut10_http_errors.py",
    "ast_data": "FunctionDef name:error arg:self arg:code arguments arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "context",
    "source_code": "def context():\n    if _context is None:\n        _create_context()\n    return _context",
    "docstring": "Returns a singleton context object.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:context arguments If Compare Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "string",
    "source_code": "@property\ndef string(self) -> str:\n    return self._str",
    "docstring": "The Stata representation of the missing value: '.', '.a'..'.z' Returns ------- str The representation of the missing value.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\stata.py",
    "ast_data": "FunctionDef name:string arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "wait_for_other_workers",
    "source_code": "def wait_for_other_workers(self):\n    if not self._worker_barrier:\n        return\n    self._worker_barrier.wait()",
    "docstring": "Waits for other workers to reach the same call to this method. Raises: ValueError: if is not passed to the __init__ method.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_coordinator.py",
    "ast_data": "FunctionDef name:wait_for_other_workers arg:self arguments arg If Return return:no Call"
  },
  {
    "library": "tensorflow",
    "name": "_check_multiple_access_to_resources",
    "source_code": "def _check_multiple_access_to_resources(self, captured_resources, exclusive_resource_access):\n    for sg in ops.get_collection(CRITICAL_SECTION_EXECUTIONS):\n        if self._is_self_handle(sg.handle):\n            continue\n        if not (exclusive_resource_access or sg.exclusive_resource_access):\n            continue\n        resource_intersection = captured_resources.intersection(sg.resources)\n        if resource_intersection:\n            raise ValueError(f\"This execution would access resources: {list(resource_intersection)}. Either this lock (CriticalSection: {self._handle}) or lock '{sg}' (CriticalSection: {sg.handle}) requested exclusive resource access of this resource. Did you mean to call execute with keyword argument exclusive_resource_access=False?\")",
    "docstring": "Raise if captured_resources are accessed by another CriticalSection. Args: captured_resources: Set of tensors of type resource. exclusive_resource_access: Whether this execution requires exclusive resource access. Raises: ValueError: If any tensors in are also accessed by another , and at least one of them requires exclusive resource access.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\critical_section_ops.py",
    "ast_data": "FunctionDef name:_check_multiple_access_to_resources arg:self arg:captured_resources arg:exclusive_resource_access arguments arg arg arg For Call If Call If BoolOp Assign Call If Raise Call Call"
  },
  {
    "library": "django",
    "name": "rendered_content",
    "source_code": "@property\ndef rendered_content(self):\n    template = self.resolve_template(self.template_name)\n    context = self.resolve_context(self.context_data)\n    return template.render(context, self._request)",
    "docstring": "Return the freshly rendered content for the template and context described by the TemplateResponse. This *does not* set the final content of the response. To set the response content, you must either call render(), or set the content explicitly using the value of this property.",
    "type": "method",
    "file_path": "django\\django\\template\\response.py",
    "ast_data": "FunctionDef name:rendered_content arg:self arguments arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "transpose",
    "source_code": "def transpose(self):\n    return self._transpose()",
    "docstring": "Transpose this linear operator. Returns a LinearOperator that represents the transpose of this one. Can be abbreviated self.T instead of self.transpose().",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_interface.py",
    "ast_data": "FunctionDef name:transpose arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_flat_tensors_for_gradients",
    "source_code": "def get_flat_tensors_for_gradients(xs):\n    return nest.flatten([_get_tensors_for_gradient(x) for x in xs])",
    "docstring": "Returns a flat list of Tensors that should be differentiated for . Args: xs: A list of s or s. Returns: A flat list of s constructed from , where values are left as-is, and s are replaced with .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\composite_tensor_gradient.py",
    "ast_data": "FunctionDef name:get_flat_tensors_for_gradients arg:xs arguments arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "_flush",
    "source_code": "def _flush(self):\n    if not self._write:\n        raise GDALException('Raster needs to be opened in write mode to change values.')\n    capi.flush_ds(self._ptr)",
    "docstring": "Flush all data from memory into the source file if it exists. The data that needs flushing are geotransforms, coordinate systems, nodata_values and pixel values. This function will be called automatically wherever it is needed.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\raster\\source.py",
    "ast_data": "FunctionDef name:_flush arg:self arguments arg If Raise Call Call"
  },
  {
    "library": "matplotlib",
    "name": "autoscale_None",
    "source_code": "def autoscale_None(self, A):\n    A = np.asanyarray(A)\n    if isinstance(A, np.ma.MaskedArray):\n        if A.mask is False or not A.mask.shape:\n            A = A.data\n    if self.vmin is None and A.size:\n        self.vmin = A.min()\n    if self.vmax is None and A.size:\n        self.vmax = A.max()",
    "docstring": "If *vmin* or *vmax* are not set, use the min/max of *A* to set them.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:autoscale_None arg:self arg:A arguments arg arg Assign Call If Call If BoolOp Compare Assign If BoolOp Compare Assign Call If BoolOp Compare Assign Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X, known_cat_bitsets, f_idx_map, n_threads):\n    out = np.empty(X.shape[0], dtype=Y_DTYPE)\n    _predict_from_raw_data(self.nodes, X, self.raw_left_cat_bitsets, known_cat_bitsets, f_idx_map, n_threads, out)\n    return out",
    "docstring": "Predict raw values for non-binned data. Parameters ---------- X : ndarray, shape (n_samples, n_features) The input samples. known_cat_bitsets : ndarray of shape (n_categorical_features, 8) Array of bitsets of known categories, for each categorical feature. f_idx_map : ndarray of shape (n_features,) Map from original feature index to the corresponding index in the known_cat_bitsets array. n_threads : int Number of OpenMP threads to use. Returns ------- y : ndarray, shape (n_samples,) The raw predicted values.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\predictor.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arg:known_cat_bitsets arg:f_idx_map arg:n_threads arguments arg arg arg arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "barrier",
    "source_code": "@tf_export('experimental.dtensor.barrier', v1=[])\ndef barrier(mesh: layout.Mesh, barrier_name: Optional[str]=None, timeout_in_ms: Optional[int]=None):\n    if barrier_name is None:\n        barrier_name = '(barrier)'\n    logging.info('entering barrier before op: %s', barrier_name)\n    context.async_wait()\n    component = array_ops.reshape(1.0, [1] * len(mesh.shape()))\n    ones = api.pack([component] * mesh.num_local_devices(), layout.Layout(mesh.dim_names, mesh))\n    mesh_size = math_ops.reduce_sum(ones)\n    if mesh_size != mesh.size:\n        raise ValueError('Global barrier produced wrong mesh size : {0} while mesh has actualsize : {1}'.format(mesh_size, mesh.size))\n    context.async_wait()\n    if context.context().coordination_service:\n        if timeout_in_ms is None:\n            timeout_in_ms = 24 * 60 * 60 * 1000\n        num_calls = _BARRIER_DICT.setdefault(barrier_name, 0)\n        _BARRIER_DICT[barrier_name] = num_calls + 1\n        barrier_id = f'{barrier_name}:{num_calls}'\n        context.context().wait_at_barrier(barrier_id, timeout_in_ms)\n    logging.info('finished running barrier across all clients after op: %s', barrier_name)",
    "docstring": "Runs a barrier on the mesh. Upon returning from the barrier, all operations run before the barrier would have completed across all clients. Currently we allocate a fully sharded tensor with mesh shape and run an all_reduce on it. Example: A barrier can be used before application exit to ensure completion of pending ops. Args: mesh: The mesh to run the barrier on. barrier_name: The name of the barrier. Mainly used for logging purpose. timeout_in_ms: The timeout of the barrier in ms. If omitted, blocks indefinitely till the barrier is reached from all clients.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\mesh_util.py",
    "ast_data": "FunctionDef name:barrier arg:mesh arg:barrier_name arg:timeout_in_ms arguments arg arg arg If Compare Assign Call Call Assign Call Call Call Assign Call Call Call Assign Call If Compare Raise Call Call Call If Call If Compare Assign Assign Call Assign Assign Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, child, pad=0.0, *, draw_frame=False, patch_attrs=None):\n    super().__init__()\n    self.pad = pad\n    self._children = [child]\n    self.patch = FancyBboxPatch(xy=(0.0, 0.0), width=1.0, height=1.0, facecolor='w', edgecolor='k', mutation_scale=1, snap=True, visible=draw_frame, boxstyle='square,pad=0')\n    if patch_attrs is not None:\n        self.patch.update(patch_attrs)",
    "docstring": "Parameters ---------- child : The contained . pad : float, default: 0.0 The padding in points. This will be scaled with the renderer dpi. In contrast, *width* and *height* are in *pixels* and thus not scaled. draw_frame : bool Whether to draw the contained . patch_attrs : dict or None Additional parameters passed to the contained .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:child arg:pad arguments arg arg arg arg arg Call Call Assign Assign Assign Call If Compare Call"
  },
  {
    "library": "pandas",
    "name": "putmask",
    "source_code": "def putmask(self, mask, value: MultiIndex) -> MultiIndex:\n    mask, noop = validate_putmask(self, mask)\n    if noop:\n        return self.copy()\n    if len(mask) == len(value):\n        subset = value[mask].remove_unused_levels()\n    else:\n        subset = value.remove_unused_levels()\n    new_levels = []\n    new_codes = []\n    for i, (value_level, level, level_codes) in enumerate(zip(subset.levels, self.levels, self.codes)):\n        new_level = level.union(value_level, sort=False)\n        value_codes = new_level.get_indexer_for(subset.get_level_values(i))\n        new_code = ensure_int64(level_codes)\n        new_code[mask] = value_codes\n        new_levels.append(new_level)\n        new_codes.append(new_code)\n    return MultiIndex(levels=new_levels, codes=new_codes, names=self.names, verify_integrity=False)",
    "docstring": "Return a new MultiIndex of the values set with the mask. Parameters ---------- mask : array like value : MultiIndex Must either be the same length as self or length one Returns ------- MultiIndex",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "FunctionDef name:putmask arg:self arg:mask arg:value arguments arg arg arg Assign Call If Return return:yes Call If Compare Call Call Assign Call Assign Call Assign Assign For Call Call Assign Call Assign Call Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_migrate_all_backwards",
    "source_code": "def _migrate_all_backwards(self, plan, full_plan, fake):\n    migrations_to_run = {m[0] for m in plan}\n    states = {}\n    state = self._create_project_state()\n    applied_migrations = {self.loader.graph.nodes[key] for key in self.loader.applied_migrations if key in self.loader.graph.nodes}\n    if self.progress_callback:\n        self.progress_callback('render_start')\n    for migration, _ in full_plan:\n        if not migrations_to_run:\n            break\n        if migration in migrations_to_run:\n            if 'apps' not in state.__dict__:\n                state.apps\n            states[migration] = state\n            state = migration.mutate_state(state, preserve=True)\n            migrations_to_run.remove(migration)\n        elif migration in applied_migrations:\n            migration.mutate_state(state, preserve=False)\n    if self.progress_callback:\n        self.progress_callback('render_success')\n    for migration, _ in plan:\n        self.unapply_migration(states[migration], migration, fake=fake)\n        applied_migrations.remove(migration)\n    last_unapplied_migration = plan[-1][0]\n    state = states[last_unapplied_migration]\n    del state.apps\n    for index, (migration, _) in enumerate(full_plan):\n        if migration == last_unapplied_migration:\n            for migration, _ in full_plan[index:]:\n                if migration in applied_migrations:\n                    migration.mutate_state(state, preserve=False)\n            break\n    return state",
    "docstring": "Take a list of 2-tuples of the form (migration instance, True) and unapply them in reverse order they occur in the full_plan. Since unapplying a migration requires the project state prior to that migration, Django will compute the migration states before each of them in a first run over the plan and then unapply them in a second run over the plan.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\executor.py",
    "ast_data": "FunctionDef name:_migrate_all_backwards arg:self arg:plan arg:full_plan arg:fake arguments arg arg arg arg Assign Assign Assign Call Assign Compare If Call For If If Compare If Compare Assign Assign Call Call If Compare Call If Call For Call Call Assign Assign For Call If Compare For If Compare Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "pad_packed_sequence",
    "source_code": "def pad_packed_sequence(sequence: PackedSequence, batch_first: bool=False, padding_value: float=0.0, total_length: Optional[int]=None) -> tuple[Tensor, Tensor]:\n    max_seq_length = sequence.batch_sizes.size(0)\n    if total_length is not None:\n        if total_length < max_seq_length:\n            raise ValueError(f'Expected total_length to be at least the length of the longest sequence in input, but got total_length={total_length} and max sequence length being {max_seq_length}')\n        max_seq_length = total_length\n    padded_output, lengths = _VF._pad_packed_sequence(sequence.data, sequence.batch_sizes, batch_first, padding_value, max_seq_length)\n    unsorted_indices = sequence.unsorted_indices\n    if unsorted_indices is not None:\n        batch_dim = 0 if batch_first else 1\n        return (padded_output.index_select(batch_dim, unsorted_indices), lengths[unsorted_indices.cpu()])\n    return (padded_output, lengths)",
    "docstring": "Pad a packed batch of variable length sequences. It is an inverse operation to :func:. The returned Tensor's data will be of size `batch_firstbatch_firsttotal_length~torch.nn.Module~torch.nn.DataParallelthis FAQ section total_lengthValueErrortotal_lengthsequence`.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\utils\\rnn.py",
    "ast_data": "FunctionDef name:pad_packed_sequence arg:sequence arg:batch_first arg:padding_value arg:total_length arguments arg arg arg arg Assign Call If Compare If Compare Raise Call Assign Assign Call Assign If Compare Assign Return return:yes Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "formatoddticks",
    "source_code": "def formatoddticks(x, pos):\n    if x % 2:\n        return f'{x:1.2f}'\n    else:\n        return ''",
    "docstring": "Format odd tick positions.",
    "type": "function",
    "file_path": "matplotlib\\galleries\\users_explain\\text\\text_intro.py",
    "ast_data": "FunctionDef name:formatoddticks arg:x arg:pos arguments arg arg If Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "normal_",
    "source_code": "def normal_(tensor: Tensor, mean: float=0.0, std: float=1.0, generator: _Optional[torch.Generator]=None) -> Tensor:\n    if torch.overrides.has_torch_function_variadic(tensor):\n        return torch.overrides.handle_torch_function(normal_, (tensor,), tensor=tensor, mean=mean, std=std, generator=generator)\n    return _no_grad_normal_(tensor, mean, std, generator)",
    "docstring": "Fill the input Tensor with values drawn from the normal distribution. :math:. Args: tensor: an n-dimensional mean: the mean of the normal distribution std: the standard deviation of the normal distribution generator: the torch Generator to sample from (default: None) Examples: >>> w = torch.empty(3, 5) >>> nn.init.normal_(w)",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\init.py",
    "ast_data": "FunctionDef name:normal_ arg:tensor arg:mean arg:std arg:generator arguments arg arg arg arg If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "compute_output_signature",
    "source_code": "@doc_controls.for_subclass_implementers\ndef compute_output_signature(self, input_signature):\n\n    def check_type_return_shape(s):\n        if not isinstance(s, tensor_lib.TensorSpec):\n            raise TypeError('Only TensorSpec signature types are supported, but saw signature entry: {}.'.format(s))\n        return s.shape\n    input_shape = nest.map_structure(check_type_return_shape, input_signature)\n    output_shape = self.compute_output_shape(input_shape)\n    dtype = self._compute_dtype\n    if dtype is None:\n        input_dtypes = [s.dtype for s in nest.flatten(input_signature)]\n        dtype = input_dtypes[0]\n    return nest.map_structure(lambda s: tensor_lib.TensorSpec(dtype=dtype, shape=s), output_shape)",
    "docstring": "Compute the output tensor signature of the layer based on the inputs. Unlike a TensorShape object, a TensorSpec object contains both shape and dtype information for a tensor. This method allows layers to provide output dtype information if it is different from the input dtype. For any layer that doesn't implement this function, the framework will fall back to use , and will assume that the output dtype matches the input dtype. Args: input_signature: Single TensorSpec or nested structure of TensorSpec objects, describing a candidate input for the layer. Returns: Single TensorSpec or nested structure of TensorSpec objects, describing how the layer would transform the provided input. Raises: TypeError: If input_signature contains a non-TensorSpec object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:compute_output_signature arg:self arg:input_signature arguments arg arg FunctionDef name:check_type_return_shape arg:s arguments arg If Call Raise Call Call Return return:yes Assign Call Assign Call Assign If Compare Assign Call Assign Return return:yes Call arguments arg Call"
  },
  {
    "library": "scipy",
    "name": "_discordant_pairs",
    "source_code": "def _discordant_pairs(A):\n    m, n = A.shape\n    count = 0\n    for i in range(m):\n        for j in range(n):\n            count += A[i, j] * _Dij(A, i, j)\n    return count",
    "docstring": "Twice the number of discordant pairs, excluding ties.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_stats_pythran.py",
    "ast_data": "FunctionDef name:_discordant_pairs arg:A arguments arg Assign Assign For Call For Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "is_local_src_dir",
    "source_code": "def is_local_src_dir(directory):\n    if not is_string(directory):\n        return False\n    abs_dir = os.path.abspath(directory)\n    c = os.path.commonprefix([os.getcwd(), abs_dir])\n    new_dir = abs_dir[len(c):].split(os.sep)\n    if new_dir and (not new_dir[0]):\n        new_dir = new_dir[1:]\n    if new_dir and new_dir[0] == 'build':\n        return False\n    new_dir = os.sep.join(new_dir)\n    return os.path.isdir(new_dir)",
    "docstring": "Return true if directory is local directory.",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\misc_util.py",
    "ast_data": "FunctionDef name:is_local_src_dir arg:directory arguments arg If Call Return return:yes Assign Call Assign Call Call Assign Call Call If BoolOp Assign If BoolOp Compare Return return:yes Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "report",
    "source_code": "def report(self, accept, **kwargs):\n    if accept:\n        self.naccept += 1",
    "docstring": "called by basinhopping to report the result of the step",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_basinhopping.py",
    "ast_data": "FunctionDef name:report arg:self arg:accept arguments arg arg arg If"
  },
  {
    "library": "numpy",
    "name": "CClass",
    "source_code": "class CClass(AxisConcatenator):\n    __slots__ = ()\n\n    def __init__(self):\n        AxisConcatenator.__init__(self, -1, ndmin=2, trans1d=0)",
    "docstring": "Translates slice objects to concatenation along the second axis. This is short-hand for ``, which is useful because of its common occurrence. In particular, arrays will be stacked along their last axis after being upgraded to at least 2-D with 1's post-pended to the shape (column vectors made out of 1-D arrays). See Also -------- column_stack : Stack 1-D arrays as columns into a 2-D array. r_ : For more detailed documentation. Examples -------- >>> import numpy as np >>> np.c_[np.array([1,2,3]), np.array([4,5,6])] array([[1, 4], [2, 5], [3, 6]]) >>> np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])] array([[1, 2, 3, ..., 4, 5, 6]])",
    "type": "class",
    "file_path": "numpy\\numpy\\lib\\_index_tricks_impl.py",
    "ast_data": "ClassDef name:CClass Assign FunctionDef name:__init__ arg:self arguments arg Call"
  },
  {
    "library": "matplotlib",
    "name": "_repr_png_",
    "source_code": "def _repr_png_(self):\n    X = np.tile(np.linspace(0, 1, _REPR_PNG_SIZE[0]), (_REPR_PNG_SIZE[1], 1))\n    pixels = np.zeros((_REPR_PNG_SIZE[1] * len(self), _REPR_PNG_SIZE[0], 4), dtype=np.uint8)\n    for i, c in enumerate(self):\n        pixels[i * _REPR_PNG_SIZE[1]:(i + 1) * _REPR_PNG_SIZE[1], :] = c(X, bytes=True)\n    png_bytes = io.BytesIO()\n    title = self.name + ' multivariate colormap'\n    author = f'Matplotlib v{mpl.__version__}, https://matplotlib.org'\n    pnginfo = PngInfo()\n    pnginfo.add_text('Title', title)\n    pnginfo.add_text('Description', title)\n    pnginfo.add_text('Author', author)\n    pnginfo.add_text('Software', author)\n    Image.fromarray(pixels).save(png_bytes, format='png', pnginfo=pnginfo)\n    return png_bytes.getvalue()",
    "docstring": "Generate a PNG representation of the Colormap.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:_repr_png_ arg:self arguments arg Assign Call Call Assign Call Call For Call Assign Call Assign Call Assign Assign Assign Call Call Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_compute_loss_grad",
    "source_code": "def _compute_loss_grad(self, layer, sw_sum, activations, deltas, coef_grads, intercept_grads):\n    coef_grads[layer] = safe_sparse_dot(activations[layer].T, deltas[layer])\n    coef_grads[layer] += self.alpha * self.coefs_[layer]\n    coef_grads[layer] /= sw_sum\n    intercept_grads[layer] = np.sum(deltas[layer], axis=0) / sw_sum",
    "docstring": "Compute the gradient of loss with respect to coefs and intercept for specified layer. This function does backpropagation for the specified one layer.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neural_network\\_multilayer_perceptron.py",
    "ast_data": "FunctionDef name:_compute_loss_grad arg:self arg:layer arg:sw_sum arg:activations arg:deltas arg:coef_grads arg:intercept_grads arguments arg arg arg arg arg arg arg Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "gradient_tensors",
    "source_code": "def gradient_tensors(self):\n    return self._gradient_tensors",
    "docstring": "Get the gradient tensors that this object is aware of. Returns: A dict mapping x-tensor names to gradient tensor objects. x-tensor refers to the tensors on the denominator of the differentation.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_gradients.py",
    "ast_data": "FunctionDef name:gradient_tensors arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_get_fill_indexer_searchsorted",
    "source_code": "@final\ndef _get_fill_indexer_searchsorted(self, target: Index, method: str_t, limit: int | None=None) -> npt.NDArray[np.intp]:\n    if limit is not None:\n        raise ValueError(f'limit argument for {method!r} method only well-defined if index and target are monotonic')\n    side: Literal['left', 'right'] = 'left' if method == 'pad' else 'right'\n    indexer = self.get_indexer(target)\n    nonexact = indexer == -1\n    indexer[nonexact] = self._searchsorted_monotonic(target[nonexact], side)\n    if side == 'left':\n        indexer[nonexact] -= 1\n    else:\n        indexer[indexer == len(self)] = -1\n    return indexer",
    "docstring": "Fallback pad/backfill get_indexer that works for monotonic decreasing indexes and non-monotonic targets.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_get_fill_indexer_searchsorted arg:self arg:target arg:method arg:limit arguments arg arg arg arg If Compare Raise Call Compare Assign Call Assign Compare Assign Call If Compare Assign Compare Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "calculate_range",
    "source_code": "def calculate_range(dtype: torch.dtype) -> tuple:\n    info = torch.finfo(dtype)\n    return (info.min, info.max)",
    "docstring": "Calculate the range of values for a given torch.dtype. Args: dtype (torch.dtype): The input dtype. Returns: tuple: A tuple containing the minimum and maximum values.",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\partitioners.py",
    "ast_data": "FunctionDef name:calculate_range arg:dtype arguments arg Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_register",
    "source_code": "def _register(self, internal_qualified_name: registration.OpName, symbolic_function: registration.ONNXFunction) -> None:\n    self._registry[internal_qualified_name].append(symbolic_function)",
    "docstring": "Registers a ONNXFunction to an operator. Args: internal_qualified_name: The qualified name of the operator to register: OpName. symbolic_function: The ONNXFunction to register.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\_exporter_legacy.py",
    "ast_data": "FunctionDef name:_register arg:self arg:internal_qualified_name arg:symbolic_function arguments arg arg arg Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_n_splits",
    "source_code": "def get_n_splits(self, X=None, y=None, groups=None):\n    if groups is None:\n        raise ValueError(\"The 'groups' parameter should not be None.\")\n    groups = check_array(groups, input_name='groups', ensure_2d=False, dtype=None)\n    return len(np.unique(groups))",
    "docstring": "Returns the number of splitting iterations in the cross-validator. Parameters ---------- X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : array-like of shape (n_samples,) Group labels for the samples used while splitting the dataset into train/test set. This 'groups' parameter must always be specified to calculate the number of splits, though the other parameters can be omitted. Returns ------- n_splits : int Returns the number of splitting iterations in the cross-validator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py",
    "ast_data": "FunctionDef name:get_n_splits arg:self arg:X arg:y arg:groups arguments arg arg arg arg If Compare Raise Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "has_tensor",
    "source_code": "def has_tensor(obj: object) -> bool:\n    obj_id = id(obj)\n    if obj_id in seen_ids:\n        return seen_ids[obj_id]\n    seen_ids[obj_id] = False\n    if isinstance(obj, (torch.Tensor, torch.nn.Module)) or (istype(obj, type) and issubclass(obj, torch.nn.Module)):\n        seen_ids[obj_id] = True\n        return seen_ids[obj_id]\n    elif config.trace_numpy and np and (istype(obj, np.ndarray) or isinstance(obj, np.generic)):\n        seen_ids[obj_id] = True\n        return seen_ids[obj_id]\n    elif istype(obj, (list, tuple)):\n        seen_ids[obj_id] = any((has_tensor(v) for v in obj))\n        return seen_ids[obj_id]\n    elif istype(obj, dict):\n        values = list(obj.values())\n        seen_ids[obj_id] = any((has_tensor(v) for v in values))\n        return seen_ids[obj_id]\n    elif istype(obj, (str, int, float, type(None), bool)):\n        seen_ids[obj_id] = False\n        return seen_ids[obj_id]\n    elif is_namedtuple(obj) and hasattr(obj, '_fields'):\n        seen_ids[obj_id] = any((has_tensor(getattr(obj, v)) for v in obj._fields))\n        return seen_ids[obj_id]\n    else:\n        return False",
    "docstring": "Recursively check if the obj has a tensor",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\convert_frame.py",
    "ast_data": "FunctionDef name:has_tensor arg:obj arguments arg Assign Call If Compare Return return:yes Assign If BoolOp Call BoolOp Call Call Assign Return return:yes If BoolOp BoolOp Call Call Assign Return return:yes If Call Assign Call Call Return return:yes If Call Assign Call Call Assign Call Call Return return:yes If Call Call Assign Return return:yes If BoolOp Call Call Assign Call Call Call Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "ellipsoid",
    "source_code": "@property\ndef ellipsoid(self):\n    return self.srs.ellipsoid",
    "docstring": "Return a tuple of the ellipsoid parameters: (semimajor axis, semiminor axis, and inverse flattening).",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\base\\models.py",
    "ast_data": "FunctionDef name:ellipsoid arg:self arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_construction_repr",
    "source_code": "def _construction_repr(dtype, include_align=False, short=False):\n    if dtype.fields is not None:\n        return _struct_str(dtype, include_align=include_align)\n    elif dtype.subdtype:\n        return _subarray_str(dtype)\n    else:\n        return _scalar_str(dtype, short=short)",
    "docstring": "Creates a string repr of the dtype, excluding the 'dtype()' part surrounding the object. This object may be a string, a list, or a dict depending on the nature of the dtype. This is the object passed as the first parameter to the dtype constructor, and if no additional constructor parameters are given, will reproduce the exact memory layout. Parameters ---------- short : bool If true, this creates a shorter repr using 'kind' and 'itemsize', instead of the longer type name. include_align : bool If true, this includes the 'align=True' parameter inside the struct dtype construction dict when needed. Use this flag if you want a proper repr string without the 'dtype()' part around it. If false, this does not preserve the 'align=True' parameter or sticky NPY_ALIGNED_STRUCT flag for struct arrays like the regular repr does, because the 'align' flag is not part of first dtype constructor parameter. This mode is intended for a full 'repr', where the 'align=True' is provided as the second parameter.",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\_dtype.py",
    "ast_data": "FunctionDef name:_construction_repr arg:dtype arg:include_align arg:short arguments arg arg arg If Compare Return return:yes Call If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "create_keras_history",
    "source_code": "def create_keras_history(tensors):\n    _, created_layers = _create_keras_history_helper(tensors, set(), [])\n    return created_layers",
    "docstring": "Wraps TensorFlow Operations for compatibility with the Functional API. This method checks to see if a Tensor in is missing Keras metadata and has its origin in a Keras Layer. If so, this method will replace the raw TensorFlow Operations that created this tensor with instances that create identical operations. Any Tensors not originating from a Keras Layer will be treated as constants when constructing instances. Args: tensors: A structure of Tensors, some of which come from raw TensorFlow operations and need to have Keras metadata assigned to them. Returns: created_layers: List. The instances created to wrap the raw Tensorflow operations.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_utils.py",
    "ast_data": "FunctionDef name:create_keras_history arg:tensors arguments arg Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "control_inputs",
    "source_code": "@property\ndef control_inputs(self):\n    control_inputs = []\n    for x in self._enters + self._direct_enters:\n        control_inputs.extend(x.op.control_inputs)\n    return control_inputs",
    "docstring": "Control input to all the Enter nodes.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "FunctionDef name:control_inputs arg:self arguments arg Assign For Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "convert_n_to_tensor_or_indexed_slices",
    "source_code": "def convert_n_to_tensor_or_indexed_slices(values, dtype=None, name=None):\n    return internal_convert_n_to_tensor_or_indexed_slices(values=values, dtype=dtype, name=name, as_ref=False)",
    "docstring": "Converts to a list of or objects. Any or objects in are returned unmodified. Args: values: A list of , , , or objects that can be consumed by . dtype: (Optional.) The required of the returned . name: (Optional.) A name prefix to used when a new is created, in which case element will be given the name . Returns: A list of , , and/or objects. Raises: TypeError: If no conversion function is registered for an element in . RuntimeError: If a registered conversion function returns an invalid value.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\indexed_slices.py",
    "ast_data": "FunctionDef name:convert_n_to_tensor_or_indexed_slices arg:values arg:dtype arg:name arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_markevery",
    "source_code": "def set_markevery(self, every):\n    self._markevery = every\n    self.stale = True",
    "docstring": "Set the markevery property to subsample the plot when using markers. e.g., if `/gallery/lines_bars_and_markers/markevery_demo`. Notes ----- Setting *markevery* will still only draw markers at actual data points. While the float argument form aims for uniform visual spacing, it has to coerce from the ideal spacing to the nearest available data point. Depending on the number and distribution of data points, the result may still not look evenly spaced. When using a start offset to specify the first marker, the offset will be from the first data point which may be different from the first the visible data point if the plot is zoomed in. If zooming in on a plot when using float arguments then the actual data points that have markers will change because the distance between markers is always determined from the display-coordinates axes-bounding-box-diagonal regardless of the actual axes data limits.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:set_markevery arg:self arg:every arguments arg arg Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "_State",
    "source_code": "class _State(str, Enum):\n    pass",
    "docstring": "Base Class for defining module state to capture snapshots .",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\_tools\\mem_tracker.py",
    "ast_data": "ClassDef name:_State"
  },
  {
    "library": "tensorflow",
    "name": "get_session_tensor",
    "source_code": "@tf_export(v1=['get_session_tensor'])\ndef get_session_tensor(handle, dtype, name=None):\n    handle_device = TensorHandle._get_device_name(handle)\n    with ops.device(handle_device):\n        holder = array_ops.placeholder(dtypes.string)\n        _register_handle_feeder(holder.graph, holder, dtype)\n        tensor = gen_data_flow_ops.get_session_tensor(holder, dtype, name=name)\n    return (holder, tensor)",
    "docstring": "Get the tensor of type by feeding a tensor handle. This is EXPERIMENTAL and subject to change. Get the value of the tensor from a tensor handle. The tensor is produced in a previous run() and stored in the state of the session. Args: handle: The string representation of a persistent tensor handle. dtype: The type of the output tensor. name: Optional name prefix for the return tensor. Returns: A pair of tensors. The first is a placeholder for feeding a tensor handle and the second is the tensor in the session state keyed by the tensor handle. Example:",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\session_ops.py",
    "ast_data": "FunctionDef name:get_session_tensor arg:handle arg:dtype arg:name arguments arg arg arg Assign Call With Call Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "has_sorted_indices",
    "source_code": "@property\ndef has_sorted_indices(self) -> bool:\n    if not hasattr(self, '_has_sorted_indices'):\n        M = len(self.indptr) - 1\n        self._has_sorted_indices = bool(csr_has_sorted_indices(M, self.indptr, self.indices))\n    return self._has_sorted_indices",
    "docstring": "Whether the indices are sorted Returns - True: if the indices of the array/matrix are in sorted order - False: otherwise",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_compressed.py",
    "ast_data": "FunctionDef name:has_sorted_indices arg:self arguments arg If Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_repr_html_",
    "source_code": "def _repr_html_(self):\n    png_bytes = self._repr_png_()\n    png_base64 = base64.b64encode(png_bytes).decode('ascii')\n\n    def color_block(color):\n        hex_color = to_hex(color, keep_alpha=True)\n        return f'<div title=\"{hex_color}\" style=\"display: inline-block; width: 1em; height: 1em; margin: 0; vertical-align: middle; border: 1px solid #555; background-color: {hex_color};\"></div>'\n    return f'<div style=\"vertical-align: middle;\"><strong>{self.name}</strong> </div><div class=\"cmap\"><img alt=\"{self.name} BivarColormap\" title=\"{self.name}\" style=\"border: 1px solid #555;\" src=\"data:image/png;base64,{png_base64}\"></div><div style=\"vertical-align: middle; max-width: {_BIVAR_REPR_PNG_SIZE + 2}px; display: flex; justify-content: space-between;\"><div style=\"float: left;\">{color_block(self.get_outside())} outside</div><div style=\"float: right;\">bad {color_block(self.get_bad())}</div></div>'",
    "docstring": "Generate an HTML representation of the Colormap.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:_repr_html_ arg:self arguments arg Assign Call Assign Call Call FunctionDef name:color_block arg:color arguments arg Assign Call Return return:yes Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "prune_unconnected_ops_from_xla",
    "source_code": "def prune_unconnected_ops_from_xla(prune_graph: ops.Graph):\n    for graph in [prune_graph] + [f for f in prune_graph._functions.values()]:\n        if not isinstance(graph, ops.Graph):\n            continue\n        for op in graph.get_operations():\n            if op.type not in _UNCONNECTED_OPS_TO_PRUNE:\n                continue\n            outputs_consumed = False\n            for output in op.outputs:\n                if output.consumers():\n                    outputs_consumed = True\n                    break\n            if not outputs_consumed:\n                logging.info('Pruning OP %s of type %s from XLA Compile due to it being disconnected.', op.name, op.type)\n                op._clear_attr(tpu_replication._TPU_REPLICATE_ATTR)",
    "docstring": "Prunes unconnected ops as listed in _UNCONNECTED_OPS_TO_PRUNE. Args: prune_graph: A tensorflow graph from which we wish to prune unconnected ops as listed in _UNCONNECTED_OPS_TO_PRUNE. In general, these ops should have no inputs and no consumers. These can often be left behind due to graph construction rewiring (for instance TF-Hub). While they never execute, they will cause XLA compile to fail so we strip them from XLA compile by removing the tpu_replicate attribute.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu.py",
    "ast_data": "FunctionDef name:prune_unconnected_ops_from_xla arg:prune_graph arguments arg For Call If Call For Call If Compare Assign For If Call Assign If Call Call"
  },
  {
    "library": "tensorflow",
    "name": "make_gradient_clipvalue_fn",
    "source_code": "def make_gradient_clipvalue_fn(clipvalue):\n    if clipvalue is None:\n        return lambda grads_and_vars: grads_and_vars\n\n    def gradient_clipvalue_fn(grads_and_vars):\n        if isinstance(distribute_lib.get_strategy(), (central_storage_strategy.CentralStorageStrategy, central_storage_strategy.CentralStorageStrategyV1)):\n            raise ValueError('`clipvalue` is not supported with `CenteralStorageStrategy`')\n        clipped_grads_and_vars = [(clip_ops.clip_by_value(g, -clipvalue, clipvalue), v) for g, v in grads_and_vars]\n        return clipped_grads_and_vars\n    return gradient_clipvalue_fn",
    "docstring": "Creates a gradient transformation function for clipping by value.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\utils.py",
    "ast_data": "FunctionDef name:make_gradient_clipvalue_fn arg:clipvalue arguments arg If Compare Return return:yes arguments arg FunctionDef name:gradient_clipvalue_fn arg:grads_and_vars arguments arg If Call Call Raise Call Assign Call Return return:yes Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "on_build_finished",
    "source_code": "def on_build_finished(app: Sphinx, error: Exception) -> None:\n    domain = app.env.domains['duration']\n    if not domain.reading_durations:\n        return\n    durations = sorted(domain.reading_durations.items(), key=itemgetter(1), reverse=True)\n    logger.info('')\n    logger.info(__('====================== slowest reading durations ======================='))\n    for docname, d in islice(durations, 5):\n        logger.info(f'{d:.3f} {docname}')",
    "docstring": "Display duration ranking on the current build.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\ext\\duration.py",
    "ast_data": "FunctionDef name:on_build_finished arg:app arg:error arguments arg arg Assign If Return return:no Assign Call Call Call Call Call Call For Call Call"
  },
  {
    "library": "authlib",
    "name": "import_key",
    "source_code": "@classmethod\ndef import_key(cls, raw, options=None):\n    kty = None\n    if options is not None:\n        kty = options.get('kty')\n    if kty is None and isinstance(raw, dict):\n        kty = raw.get('kty')\n    if kty is None:\n        raw_key = load_pem_key(raw)\n        for _kty in cls.JWK_KEY_CLS:\n            key_cls = cls.JWK_KEY_CLS[_kty]\n            if key_cls.validate_raw_key(raw_key):\n                return key_cls.import_key(raw_key, options)\n    key_cls = cls.JWK_KEY_CLS[kty]\n    return key_cls.import_key(raw, options)",
    "docstring": "Import a Key from bytes, string, PEM or dict. :return: Key instance",
    "type": "method",
    "file_path": "authlib\\authlib\\jose\\rfc7517\\jwk.py",
    "ast_data": "FunctionDef name:import_key arg:cls arg:raw arg:options arguments arg arg arg Assign If Compare Assign Call If BoolOp Compare Call Assign Call If Compare Assign Call For Assign If Call Return return:yes Call Assign Return return:yes Call"
  },
  {
    "library": "pygame",
    "name": "groupcollide",
    "source_code": "def groupcollide(groupa, groupb, dokilla, dokillb, collided=None):\n    crashed = {}\n    sprite_collide_func = spritecollide\n    if dokilla:\n        for group_a_sprite in groupa.sprites():\n            collision = sprite_collide_func(group_a_sprite, groupb, dokillb, collided)\n            if collision:\n                crashed[group_a_sprite] = collision\n                group_a_sprite.kill()\n    else:\n        for group_a_sprite in groupa:\n            collision = sprite_collide_func(group_a_sprite, groupb, dokillb, collided)\n            if collision:\n                crashed[group_a_sprite] = collision\n    return crashed",
    "docstring": "detect collision between a group and another group pygame.sprite.groupcollide(groupa, groupb, dokilla, dokillb): return dict Given two groups, this will find the intersections between all sprites in each group. It returns a dictionary of all sprites in the first group that collide. The value for each item in the dictionary is a list of the sprites in the second group it collides with. The two dokill arguments control if the sprites from either group will be automatically removed from all groups. Collided is a callback function used to calculate if two sprites are colliding. it should take two sprites as values, and return a bool value indicating if they are colliding. If collided is not passed, all sprites must have a \"rect\" value, which is a rectangle of the sprite area that will be used to calculate the collision.",
    "type": "function",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:groupcollide arg:groupa arg:groupb arg:dokilla arg:dokillb arg:collided arguments arg arg arg arg arg Assign Assign If For Call Assign Call If Assign Call For Assign Call If Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_prepare_fit_binary",
    "source_code": "def _prepare_fit_binary(est, y, i, input_dtype, label_encode=True):\n    y_i = np.ones(y.shape, dtype=input_dtype, order='C')\n    if label_encode:\n        y_i[y != est.classes_[i]] = 0.0\n    else:\n        y_i[y != est.classes_[i]] = -1.0\n    average_intercept = 0\n    average_coef = None\n    if len(est.classes_) == 2:\n        if not est.average:\n            coef = est.coef_.ravel()\n            intercept = est.intercept_[0]\n        else:\n            coef = est._standard_coef.ravel()\n            intercept = est._standard_intercept[0]\n            average_coef = est._average_coef.ravel()\n            average_intercept = est._average_intercept[0]\n    elif not est.average:\n        coef = est.coef_[i]\n        intercept = est.intercept_[i]\n    else:\n        coef = est._standard_coef[i]\n        intercept = est._standard_intercept[i]\n        average_coef = est._average_coef[i]\n        average_intercept = est._average_intercept[i]\n    return (y_i, coef, intercept, average_coef, average_intercept)",
    "docstring": "Initialization for fit_binary. Returns y, coef, intercept, average_coef, average_intercept.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_stochastic_gradient.py",
    "ast_data": "FunctionDef name:_prepare_fit_binary arg:est arg:y arg:i arg:input_dtype arg:label_encode arguments arg arg arg arg arg Assign Call If Assign Compare Assign Compare Assign Assign If Compare Call If Assign Call Assign Assign Call Assign Assign Call Assign If Assign Assign Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_identify_gradient_grad",
    "source_code": "@ops.RegisterGradient('DebugGradientIdentity')\ndef _identify_gradient_grad(op, dy):\n    grad_debugger_uuid, orig_tensor_name = _parse_grad_debug_op_name(op.name)\n    grad_debugger = _gradient_debuggers[grad_debugger_uuid]\n    grad_debugger.register_gradient_tensor(orig_tensor_name, dy)\n    return dy",
    "docstring": "Gradient function for the DebugIdentity op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_gradients.py",
    "ast_data": "FunctionDef name:_identify_gradient_grad arg:op arg:dy arguments arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_assert_sparse_indices_are_ragged_right",
    "source_code": "def _assert_sparse_indices_are_ragged_right(indices):\n    index_prefix = indices[:, :-1]\n    index_suffix = indices[:, -1]\n    index_prefix_changed = math_ops.reduce_any(math_ops.not_equal(index_prefix[1:], index_prefix[:-1]), axis=1)\n    index_ok = array_ops.where(index_prefix_changed, math_ops.equal(index_suffix[1:], 0), math_ops.equal(index_suffix[1:], index_suffix[:-1] + 1))\n    sparse_indices_are_ragged_right = math_ops.logical_and(math_ops.reduce_all(math_ops.equal(index_suffix[:1], 0)), math_ops.reduce_all(index_ok))\n    message = ['SparseTensor is not right-ragged', 'SparseTensor.indices =', indices]\n    return [control_flow_assert.Assert(sparse_indices_are_ragged_right, message)]",
    "docstring": "Checks that the given SparseTensor.indices tensor is ragged-right. Example: is not ragged right because the entry skips a cell. Args: indices: The SparseTensor indices to check. Returns: A list of control dependency op tensors.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:_assert_sparse_indices_are_ragged_right arg:indices arguments arg Assign Assign Assign Call Call Assign Call Call Call Assign Call Call Call Call Assign Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "AutoAugment",
    "source_code": "class AutoAugment(PolicyAugmentBase):\n\n    def __init__(self, policy: Union[str, List[SUBPOLICY_CONFIG]]='imagenet', transformation_matrix_mode: str='silent') -> None:\n        if policy == 'imagenet':\n            _policy = imagenet_policy\n        elif policy == 'cifar10':\n            _policy = cifar10_policy\n        elif policy == 'svhn':\n            _policy = svhn_policy\n        elif isinstance(policy, (list, tuple)):\n            _policy = policy\n        else:\n            raise NotImplementedError(f'Invalid policy `{policy}`.')\n        super().__init__(_policy, transformation_matrix_mode=transformation_matrix_mode)\n        selection_weights = tensor([1.0 / len(self)] * len(self))\n        self.rand_selector = Categorical(selection_weights)\n\n    def compose_subpolicy_sequential(self, subpolicy: SUBPOLICY_CONFIG) -> PolicySequential:\n        return PolicySequential(*[getattr(ops, name)(prob, mag) for name, prob, mag in subpolicy])\n\n    def get_forward_sequence(self, params: Optional[List[ParamItem]]=None) -> Iterator[Tuple[str, Module]]:\n        if params is None:\n            idx = self.rand_selector.sample((1,))\n            return self.get_children_by_indices(idx)\n        return self.get_children_by_params(params)",
    "docstring": "Apply AutoAugment :cite: searched strategies. Args: policy: a customized policy config or presets of \"imagenet\", \"cifar10\", and \"svhn\". transformation_matrix_mode: computation mode for the chained transformation matrix, via attribute. If , transformation matrix will be computed silently and the non-rigid modules will be ignored as identity transformations. If , transformation matrix will be computed silently and the non-rigid modules will trigger errors. If , transformation matrix will be totally ignored. Examples: >>> import torch >>> import kornia.augmentation as K >>> in_tensor = torch.rand(5, 3, 30, 30) >>> aug = K.AugmentationSequential(AutoAugment()) >>> aug(in_tensor).shape torch.Size([5, 3, 30, 30])",
    "type": "class",
    "file_path": "kornia\\kornia\\augmentation\\auto\\autoaugment\\autoaugment.py",
    "ast_data": "ClassDef name:AutoAugment FunctionDef name:__init__ arg:self arg:policy arg:transformation_matrix_mode arguments arg arg arg If Compare Assign If Compare Assign If Compare Assign If Call Assign Raise Call Call Call Assign Call Call Call Assign Call FunctionDef name:compose_subpolicy_sequential arg:self arg:subpolicy arguments arg arg Return return:yes Call Call Call FunctionDef name:get_forward_sequence arg:self arg:params arguments arg arg If Compare Assign Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "from_config",
    "source_code": "@classmethod\ndef from_config(cls, config, custom_objects=None, columns_by_name=None):\n    from tensorflow.python.feature_column.serialization import deserialize_feature_column\n    _check_config_keys(config, cls._fields)\n    kwargs = _standardize_and_copy_config(config)\n    kwargs['keys'] = tuple([deserialize_feature_column(c, custom_objects, columns_by_name) for c in config['keys']])\n    return cls(**kwargs)",
    "docstring": "See 'FeatureColumn` base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:from_config arg:cls arg:config arg:custom_objects arg:columns_by_name arguments arg arg arg arg Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "Keypoints",
    "source_code": "@dataclass\nclass Keypoints:\n    xys: Tensor\n    detection_logp: Tensor\n\n    def merge_with_descriptors(self, descriptors: Tensor) -> DISKFeatures:\n        dtype = descriptors.dtype\n        x, y = self.xys.T\n        desc = descriptors[:, y, x].T\n        desc = F.normalize(desc, dim=-1)\n        return DISKFeatures(self.xys.to(dtype), desc, self.detection_logp)",
    "docstring": "A temporary struct used to store keypoint detections and their log-probabilities. After construction, merge_with_descriptors is used to select corresponding descriptors from unet output.",
    "type": "class",
    "file_path": "kornia\\kornia\\feature\\disk\\structs.py",
    "ast_data": "ClassDef name:Keypoints FunctionDef name:merge_with_descriptors arg:self arg:descriptors arguments arg arg Assign Assign Assign Assign Call Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "_crop",
    "source_code": "def _crop(img: Tensor, cropping_shape: List[int]) -> Tensor:\n    return pad(img, (-cropping_shape[4], -cropping_shape[5], -cropping_shape[2], -cropping_shape[3], -cropping_shape[0], -cropping_shape[1]))",
    "docstring": "Crop out the part of \"valid\" convolution area.",
    "type": "function",
    "file_path": "kornia\\kornia\\metrics\\ssim3d.py",
    "ast_data": "FunctionDef name:_crop arg:img arg:cropping_shape arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "register_tf_serializable",
    "source_code": "def register_tf_serializable(name=None, predicate=None):\n    return register_serializable(package='tf', name=name, predicate=predicate)",
    "docstring": "See the docstring for .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\registration\\__init__.py",
    "ast_data": "FunctionDef name:register_tf_serializable arg:name arg:predicate arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_default_handler_map",
    "source_code": "@classmethod\ndef set_default_handler_map(cls, handler_map):\n    cls._default_handler_map = handler_map",
    "docstring": "Set the global default handler map, shared by all legends.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\legend.py",
    "ast_data": "FunctionDef name:set_default_handler_map arg:cls arg:handler_map arguments arg arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "task_ordinal_at_coordinates",
    "source_code": "def task_ordinal_at_coordinates(self, device_coordinates):\n    return self._topology_tasks[tuple(device_coordinates)]",
    "docstring": "Returns the TensorFlow task number attached to . Args: device_coordinates: An integer sequence describing a device's physical coordinates in the TPU fabric. Returns: Returns the TensorFlow task number that contains the TPU device with those physical coordinates.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\topology.py",
    "ast_data": "FunctionDef name:task_ordinal_at_coordinates arg:self arg:device_coordinates arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "on_graph_def",
    "source_code": "def on_graph_def(self, graph_def, device_name, wall_time):\n    raise NotImplementedError('on_graph_def() is not implemented in the base servicer class')",
    "docstring": "Callback for Event proto received through the gRPC stream. This Event proto carries a GraphDef, encoded as bytes, in its graph_def field. Args: graph_def: A GraphDef object. device_name: Name of the device on which the graph was created. wall_time: An epoch timestamp (in microseconds) for the graph. Returns: or an proto to be sent back to the client. If , an proto construct with the default no-arg constructor will be sent back to the client.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\grpc_debug_server.py",
    "ast_data": "FunctionDef name:on_graph_def arg:self arg:graph_def arg:device_name arg:wall_time arguments arg arg arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "from_reference",
    "source_code": "@classmethod\ndef from_reference(cls, ref_qconv, output_scale, output_zero_point):\n    qconv = cls(ref_qconv.in_channels, ref_qconv.out_channels, ref_qconv.kernel_size, ref_qconv.stride, ref_qconv.padding, ref_qconv.dilation, ref_qconv.groups, ref_qconv.bias is not None, ref_qconv.padding_mode, device=ref_qconv.weight.device, dtype=ref_qconv.weight.dtype)\n    qweight = ref_qconv.get_quantized_weight()\n    qconv.set_weight_bias(qweight, ref_qconv.bias)\n    qconv.scale = float(output_scale)\n    qconv.zero_point = int(output_zero_point)\n    return qconv",
    "docstring": "Create a (fbgemm/qnnpack) quantized module from a reference quantized module Args: ref_qconv (Module): a reference quantized module, either produced by torch.ao.quantization utilities or provided by the user output_scale (float): scale for output Tensor output_zero_point (int): zero point for output Tensor",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\modules\\conv.py",
    "ast_data": "FunctionDef name:from_reference arg:cls arg:ref_qconv arg:output_scale arg:output_zero_point arguments arg arg arg arg Assign Call Compare Assign Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_epoch",
    "source_code": "def get_epoch():\n    global _epoch\n    _epoch = mpl._val_or_rc(_epoch, 'date.epoch')\n    return _epoch",
    "docstring": "Get the epoch used by . Returns ------- epoch : str String for the epoch (parsable by ).",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\dates.py",
    "ast_data": "FunctionDef name:get_epoch arguments Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ZeroPad3d",
    "source_code": "class ZeroPad3d(ConstantPad3d):\n    padding: tuple[int, int, int, int, int, int]\n\n    def __init__(self, padding: _size_6_t) -> None:\n        super().__init__(padding, 0.0)\n\n    def extra_repr(self) -> str:\n        return f'{self.padding}'",
    "docstring": "Pads the input tensor boundaries with zero. For -dimensional padding, use :func:. Args: padding (int, tuple): the size of the padding. If is , uses the same padding in all boundaries. If a 6-, uses (:math:, :math:, :math:, :math:, :math:, :math:) Shape: - Input: :math: or :math:. - Output: :math: or :math:, where :math: :math: :math: Examples:: >>> m = nn.ZeroPad3d(3) >>> input = torch.randn(16, 3, 10, 20, 30) >>> output = m(input) >>> # using different paddings for different sides >>> m = nn.ZeroPad3d((3, 3, 6, 6, 0, 1)) >>> output = m(input)",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\padding.py",
    "ast_data": "ClassDef name:ZeroPad3d FunctionDef name:__init__ arg:self arg:padding arguments arg arg Call Call FunctionDef name:extra_repr arg:self arguments arg Return return:yes"
  },
  {
    "library": "authlib",
    "name": "register_client_auth_method",
    "source_code": "def register_client_auth_method(self, method, func):\n    if self._client_auth is None and self.query_client:\n        self._client_auth = ClientAuthentication(self.query_client)\n    self._client_auth.register(method, func)",
    "docstring": "Add more client auth method. The default methods are: * none: The client is a public client and does not have a client secret * client_secret_post: The client uses the HTTP POST parameters * client_secret_basic: The client uses HTTP Basic :param method: Name of the Auth method :param func: Function to authenticate the client The auth method accept two parameters: ``, an example for this method:: def authenticate_client_via_custom(query_client, request): client_id = request.headers[\"X-Client-Id\"] client = query_client(client_id) do_some_validation(client) return client authorization_server.register_client_auth_method( \"custom\", authenticate_client_via_custom )",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\authorization_server.py",
    "ast_data": "FunctionDef name:register_client_auth_method arg:self arg:method arg:func arguments arg arg arg If BoolOp Compare Assign Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_thetamax",
    "source_code": "def get_thetamax(self):\n    return np.rad2deg(self.viewLim.xmax)",
    "docstring": "Return the maximum theta limit in degrees.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\polar.py",
    "ast_data": "FunctionDef name:get_thetamax arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_check_invalid_cases",
    "source_code": "def _check_invalid_cases(embedding_lookup_device):\n    if tpu.under_tpu_inference_context() and embedding_lookup_device == EmbeddingDevice.TPU_EMBEDDING_CORE:\n        raise ValueError('Using embedding_lookup_device=tpu_embedding_core during inference is not supported.')\n    if embedding_lookup_device == EmbeddingDevice.CPU:\n        if not tpu.under_tpu_inference_context():\n            raise ValueError('Using TPUEmbeddingColumn with embedding_lookup_device=\"cpu\" during training is not supported.')",
    "docstring": "Checks for invalid embedding_lookup_device configurations.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\feature_column_v2.py",
    "ast_data": "FunctionDef name:_check_invalid_cases arg:embedding_lookup_device arguments arg If BoolOp Call Compare Raise Call If Compare If Call Raise Call"
  },
  {
    "library": "django",
    "name": "get_response_async",
    "source_code": "async def get_response_async(self, request):\n    set_urlconf(settings.ROOT_URLCONF)\n    response = await self._middleware_chain(request)\n    response._resource_closers.append(request.close)\n    if response.status_code >= 400:\n        await sync_to_async(log_response, thread_sensitive=False)('%s: %s', response.reason_phrase, request.path, response=response, request=request)\n    return response",
    "docstring": "Asynchronous version of get_response. Funneling everything, including WSGI, into a single async get_response() is too slow. Avoid the context switch by using a separate async response path.",
    "type": "method",
    "file_path": "django\\django\\core\\handlers\\base.py",
    "ast_data": "AsyncFunctionDef name:get_response_async arg:self arg:request arguments arg arg Call Assign Call Call If Compare Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_apply_str",
    "source_code": "def _apply_str(self, obj, func: str, *args, **kwargs):\n    assert isinstance(func, str)\n    if hasattr(obj, func):\n        f = getattr(obj, func)\n        if callable(f):\n            return f(*args, **kwargs)\n        assert len(args) == 0\n        assert not any((kwarg == 'axis' for kwarg in kwargs))\n        return f\n    elif hasattr(np, func) and hasattr(obj, '__array__'):\n        f = getattr(np, func)\n        return f(obj, *args, **kwargs)\n    else:\n        msg = f\"'{func}' is not a valid function for '{type(obj).__name__}' object\"\n        raise AttributeError(msg)",
    "docstring": "if arg is a string, then try to operate on it: - try to find a function (or attribute) on obj - try to find a numpy function - raise",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\apply.py",
    "ast_data": "FunctionDef name:_apply_str arg:self arg:obj arg:func arguments arg arg arg arg arg Call If Call Assign Call If Call Return return:yes Call Compare Call Call Compare Return return:yes If BoolOp Call Call Assign Call Return return:yes Call Assign Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "get_min_max_value",
    "source_code": "def get_min_max_value(self) -> tuple[float, float]:\n    if self._num_bins > 512:\n        logging.warning('num_bins=%d is too large. The HISTOGRAM_MSE_BRUTEFORCE method tests all histogram mid value pairs, so it may take a long time.', self._num_bins)\n    mse_min = (float('inf'), float('inf'), float('inf'))\n    for left, right in itertools.combinations(range(self._num_bins), 2):\n        quant_min, quant_max = (self._hist_mids[left], self._hist_mids[right])\n        mse_tuple = self._get_weighted_mean_squared_error(quant_min, quant_max)\n        mse_min = min(mse_tuple, mse_min)\n    min_value, max_value = (mse_min[1], mse_min[2])\n    return (min_value, max_value)",
    "docstring": "Finds the optimal quant_min and quant_max by testing all possible cases. It guarantees optimal quant_min and quant_max for the representative dataset, but not for the test dataset. Returns: (min_value, max_value): Min and max calculated using HistogramMseBruteforce.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\calibrator\\calibration_algorithm.py",
    "ast_data": "FunctionDef name:get_min_max_value arg:self arguments arg If Compare Call Assign Call Call Call For Call Call Assign Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "perplexity",
    "source_code": "def perplexity(self, X, sub_sampling=False):\n    check_is_fitted(self)\n    X = self._check_non_neg_array(X, reset_n_features=True, whom='LatentDirichletAllocation.perplexity')\n    return self._perplexity_precomp_distr(X, sub_sampling=sub_sampling)",
    "docstring": "Calculate approximate perplexity for data X. Perplexity is defined as exp(-1. * log-likelihood per word) .. versionchanged:: 0.19 *doc_topic_distr* argument has been deprecated and is ignored because user no longer has access to unnormalized distribution Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Document word matrix. sub_sampling : bool Do sub-sampling or not. Returns ------- score : float Perplexity score.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_lda.py",
    "ast_data": "FunctionDef name:perplexity arg:self arg:X arg:sub_sampling arguments arg arg arg Call Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_build_kml_sources",
    "source_code": "def _build_kml_sources(self, sources):\n    kml_sources = []\n    if sources is None:\n        sources = apps.get_models()\n    for source in sources:\n        if isinstance(source, models.base.ModelBase):\n            for field in source._meta.fields:\n                if isinstance(field, GeometryField):\n                    kml_sources.append((source._meta.app_label, source._meta.model_name, field.name))\n        elif isinstance(source, (list, tuple)):\n            if len(source) != 3:\n                raise ValueError('Must specify a 3-tuple of (app_label, module_name, field_name).')\n            kml_sources.append(source)\n        else:\n            raise TypeError('KML Sources must be a model or a 3-tuple.')\n    return kml_sources",
    "docstring": "Go through the given sources and return a 3-tuple of the application label, module name, and field name of every GeometryField encountered in the sources. If no sources are provided, then all models.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\sitemaps\\kml.py",
    "ast_data": "FunctionDef name:_build_kml_sources arg:self arg:sources arguments arg arg Assign If Compare Assign Call For If Call For If Call Call If Call If Compare Call Raise Call Call Raise Call Return return:yes"
  },
  {
    "library": "django",
    "name": "localtime",
    "source_code": "@register.filter\ndef localtime(value):\n    return do_timezone(value, timezone.get_current_timezone())",
    "docstring": "Convert a datetime to local time in the active time zone. This only makes sense within a {% localtime off %} block.",
    "type": "function",
    "file_path": "django\\django\\templatetags\\tz.py",
    "ast_data": "FunctionDef name:localtime arg:value arguments arg Return return:yes Call Call"
  },
  {
    "library": "scrapy",
    "name": "_embed_bpython_shell",
    "source_code": "def _embed_bpython_shell(namespace: dict[str, Any]={}, banner: str='') -> EmbedFuncT:\n    import bpython\n\n    @wraps(_embed_bpython_shell)\n    def wrapper(namespace: dict[str, Any]=namespace, banner: str='') -> None:\n        bpython.embed(locals_=namespace, banner=banner)\n    return wrapper",
    "docstring": "Start a bpython shell",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\console.py",
    "ast_data": "FunctionDef name:_embed_bpython_shell arg:namespace arg:banner arguments arg arg FunctionDef name:wrapper arg:namespace arg:banner arguments arg arg Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, tril, is_non_singular=None, is_self_adjoint=None, is_positive_definite=None, is_square=None, name='LinearOperatorLowerTriangular'):\n    parameters = dict(tril=tril, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, name=name)\n    if is_square is False:\n        raise ValueError('Only square lower triangular operators supported at this time.')\n    is_square = True\n    with ops.name_scope(name, values=[tril]):\n        self._tril = linear_operator_util.convert_nonref_to_tensor(tril, name='tril')\n        self._check_tril(self._tril)\n        super(LinearOperatorLowerTriangular, self).__init__(dtype=self._tril.dtype, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, parameters=parameters, name=name)",
    "docstring": "Initialize a . Args: tril: Shape with , . The lower triangular part of defines this operator. The strictly upper triangle is ignored. is_non_singular: Expect that this operator is non-singular. This operator is non-singular if and only if its diagonal elements are all non-zero. is_self_adjoint: Expect that this operator is equal to its hermitian transpose. This operator is self-adjoint only if it is diagonal with real-valued diagonal entries. In this case it is advised to use . is_positive_definite: Expect that this operator is positive definite, meaning the quadratic form has positive real part for all nonzero . Note that we do not require the operator to be self-adjoint to be positive-definite. See: is_square: Expect that this operator acts like square [batch] matrices. name: A name for this . Raises: ValueError: If is .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_lower_triangular.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:tril arg:is_non_singular arg:is_self_adjoint arg:is_positive_definite arg:is_square arg:name arguments arg arg arg arg arg arg arg Assign Call If Compare Raise Call Assign With Call Assign Call Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "@available_if(_final_estimator_has('predict'))\ndef predict(self, X, **params):\n    with _raise_or_warn_if_not_fitted(self):\n        Xt = X\n        if not _routing_enabled():\n            for _, name, transform in self._iter(with_final=False):\n                Xt = transform.transform(Xt)\n            return self.steps[-1][1].predict(Xt, **params)\n        routed_params = process_routing(self, 'predict', **params)\n        for _, name, transform in self._iter(with_final=False):\n            Xt = transform.transform(Xt, **routed_params[name].transform)\n        return self.steps[-1][1].predict(Xt, **routed_params[self.steps[-1][0]].predict)",
    "docstring": "Transform the data, and apply with the final estimator. Call of each transformer in the pipeline. The transformed data are finally passed to the final estimator that calls method. Only valid if the final estimator implements . Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. **params : dict of str -> object - If (default): Parameters to the `enable_metadata_routing=Trueenable_metadata_routing=True~sklearn.set_configMetadata Routing User Guide predict` on the final estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\pipeline.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg arg With Call Assign If Call For Call Assign Call Return return:yes Call Assign Call For Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, pfor: 'PFor', op: ops.Operation, inputs):\n    self.pfor = pfor\n    self._op = op\n    self._inputs = inputs",
    "docstring": "Creates a _PforInput object. Args: pfor: PFor converter object. op: the Operation object that is being converted. inputs: list of WrappedTensor objects representing converted values of the inputs of .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:pfor arg:op arg:inputs arguments arg arg arg arg Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "obtain_all_variant_tensor_ops",
    "source_code": "def obtain_all_variant_tensor_ops(dataset):\n    return _traverse(dataset, lambda op: op.outputs[0].dtype == dtypes.variant)",
    "docstring": "Given an input dataset, finds all dataset ops used for construction. A series of transformations would have created this dataset with each transformation including zero or more Dataset ops, each producing a dataset variant tensor. This method outputs all of them. Args: dataset: Dataset to find variant tensors for. Returns: A list of variant_tensor producing dataset ops used to construct this dataset.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\util\\traverse.py",
    "ast_data": "FunctionDef name:obtain_all_variant_tensor_ops arg:dataset arguments arg Return return:yes Call arguments arg Compare"
  },
  {
    "library": "matplotlib",
    "name": "update_ticks",
    "source_code": "def update_ticks(self):\n    self._get_ticker_locator_formatter()\n    self.long_axis.set_major_locator(self._locator)\n    self.long_axis.set_minor_locator(self._minorlocator)\n    self.long_axis.set_major_formatter(self._formatter)",
    "docstring": "Set up the ticks and ticklabels. This should not be needed by users.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colorbar.py",
    "ast_data": "FunctionDef name:update_ticks arg:self arguments arg Call Call Call Call"
  },
  {
    "library": "django",
    "name": "DetailView",
    "source_code": "class DetailView(SingleObjectTemplateResponseMixin, BaseDetailView):\n    pass",
    "docstring": "Render a \"detail\" view of an object. By default this is a model instance looked up from , but the view will support display of *any* object by overriding .",
    "type": "class",
    "file_path": "django\\django\\views\\generic\\detail.py",
    "ast_data": "ClassDef name:DetailView"
  },
  {
    "library": "matplotlib",
    "name": "_extend_upper",
    "source_code": "def _extend_upper(self):\n    minmax = 'min' if self.long_axis.get_inverted() else 'max'\n    return self.extend in ('both', minmax)",
    "docstring": "Return whether the upper limit is open ended.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colorbar.py",
    "ast_data": "FunctionDef name:_extend_upper arg:self arguments arg Assign Call Return return:yes Compare"
  },
  {
    "library": "django",
    "name": "_get_permissions",
    "source_code": "def _get_permissions(self, user_obj, obj, from_name):\n    if not user_obj.is_active or user_obj.is_anonymous or obj is not None:\n        return set()\n    perm_cache_name = '_%s_perm_cache' % from_name\n    if not hasattr(user_obj, perm_cache_name):\n        if user_obj.is_superuser:\n            perms = Permission.objects.all()\n        else:\n            perms = getattr(self, '_get_%s_permissions' % from_name)(user_obj)\n        perms = perms.values_list('content_type__app_label', 'codename').order_by()\n        setattr(user_obj, perm_cache_name, {'%s.%s' % (ct, name) for ct, name in perms})\n    return getattr(user_obj, perm_cache_name)",
    "docstring": "Return the permissions of from . can be either \"group\" or \"user\" to return permissions from or respectively.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\backends.py",
    "ast_data": "FunctionDef name:_get_permissions arg:self arg:user_obj arg:obj arg:from_name arguments arg arg arg arg If BoolOp Compare Return return:yes Call Assign If Call If Assign Call Assign Call Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "update_tf_version_bzl",
    "source_code": "def update_tf_version_bzl(old_version, new_version):\n    old_mmp = _get_mmp(old_version)\n    new_mmp = _get_mmp(new_version)\n    replace_string_in_line('TF_VERSION = \"%s\"' % old_mmp, 'TF_VERSION = \"%s\"' % new_mmp, TF_VERSION_BZL)",
    "docstring": "Update tf_version.bzl.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\ci_build\\update_version.py",
    "ast_data": "FunctionDef name:update_tf_version_bzl arg:old_version arg:new_version arguments arg arg Assign Call Assign Call Call"
  },
  {
    "library": "kornia",
    "name": "equalize3d",
    "source_code": "@perform_keep_shape_video\ndef equalize3d(input: Tensor) -> Tensor:\n    res = []\n    for volume in input:\n        scaled_input = torch.stack([_scale_channel(volume[i, :, :, :]) for i in range(len(volume))])\n        res.append(scaled_input)\n    return torch.stack(res)",
    "docstring": "Equalize the values for a 3D volumetric tensor. Implements Equalize function for a sequence of images using PyTorch ops based on uint8 format: Args: input: image tensor with shape :math: to equalize. Returns: Equalized volume with shape :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\enhance\\adjust.py",
    "ast_data": "FunctionDef name:equalize3d arg:input arguments arg Assign For Assign Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "maybe_disable_comprehensive_padding",
    "source_code": "def maybe_disable_comprehensive_padding(example_inputs: Sequence[InputType]) -> AbstractContextManager[None, None]:\n    has_gpu = any((is_gpu(t.device.type) for t in example_inputs if isinstance(t, torch.Tensor)))\n    if config.disable_padding_cpu and config.comprehensive_padding and (not has_gpu):\n        perf_hint_log.info('Skip comprehensive padding on CPU')\n        return config.patch(comprehensive_padding=False)\n    elif config.aot_inductor.use_runtime_constant_folding:\n        perf_hint_log.info('Skip comprehensive padding for use_runtime_constant_folding')\n        return config.patch(comprehensive_padding=False)\n    else:\n        return contextlib.nullcontext()",
    "docstring": "For CPU backend, enable comprehensive padding causes some unit tests fail due to changing number of generated kernels. Skip for now.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\compile_fx.py",
    "ast_data": "FunctionDef name:maybe_disable_comprehensive_padding arg:example_inputs arguments arg Assign Call Call Call If BoolOp Call Return return:yes Call If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_ExtractDefaultTypesAndAllowedTypes",
    "source_code": "def _ExtractDefaultTypesAndAllowedTypes(op_def, default_type_attr_map, allowed_list_attr_map):\n    for attr_def in op_def.attr:\n        if attr_def.type != 'type':\n            continue\n        key = attr_def.name\n        if attr_def.HasField('default_value'):\n            default_type_attr_map[key] = dtypes.as_dtype(attr_def.default_value.type)\n        if attr_def.HasField('allowed_values'):\n            allowed_list_attr_map[key] = attr_def.allowed_values.list.type",
    "docstring": "Extracts the and .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\op_def_library.py",
    "ast_data": "FunctionDef name:_ExtractDefaultTypesAndAllowedTypes arg:op_def arg:default_type_attr_map arg:allowed_list_attr_map arguments arg arg arg For If Compare Assign If Call Assign Call If Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "_update_offset_text_position",
    "source_code": "def _update_offset_text_position(self, bboxes, bboxes2):\n    raise NotImplementedError('Derived must override')",
    "docstring": "Update the offset text position based on the sequence of bounding boxes of all the ticklabels.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:_update_offset_text_position arg:self arg:bboxes arg:bboxes2 arguments arg arg arg Raise Call"
  },
  {
    "library": "kornia",
    "name": "transform_boxes",
    "source_code": "def transform_boxes(self, M: torch.Tensor, inplace: bool=False) -> Boxes:\n    if not 2 <= M.ndim <= 3 or M.shape[-2:] != (3, 3):\n        raise ValueError(f'The transformation matrix shape must be (3, 3) or (B, 3, 3). Got {M.shape}.')\n    transformed_boxes = _transform_boxes(self._data, M)\n    if inplace:\n        self._data = transformed_boxes\n        return self\n    obj = self.clone()\n    obj._data = transformed_boxes\n    return obj",
    "docstring": "Apply a transformation matrix to the 2D boxes. Args: M: The transformation matrix to be applied, shape of :math: or :math:. inplace: do transform in-place and return self. Returns: The transformed boxes.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\boxes.py",
    "ast_data": "FunctionDef name:transform_boxes arg:self arg:M arg:inplace arguments arg arg arg If BoolOp Compare Compare Raise Call Assign Call If Assign Return return:yes Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "resolve",
    "source_code": "def resolve(node, source_info, graphs, include_annotations=True):\n    node = TreeAnnotator(source_info, graphs, include_annotations).visit(node)\n    return node",
    "docstring": "Resolves the live symbols at the exit of control flow statements. Args: node: ast.AST source_info: transformer.SourceInfo graphs: Dict[ast.FunctionDef, cfg.Graph] include_annotations: Bool, whether type annotations should be included in the analysis. Returns: ast.AST",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\static_analysis\\liveness.py",
    "ast_data": "FunctionDef name:resolve arg:node arg:source_info arg:graphs arg:include_annotations arguments arg arg arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "trace_save_restore_function_map",
    "source_code": "def trace_save_restore_function_map(obj, factory_data_list):\n    saveable_fns = {}\n    for factory_data in factory_data_list:\n        saveable_factory = factory_data.factory\n        attribute_name = factory_data.name\n        if resource_variable_ops.is_resource_variable(obj) or resource_variable_ops.is_resource_variable(saveable_factory) or (not callable(saveable_factory)):\n            continue\n        concrete_save, concrete_restore = _trace_save_restore_functions(saveable_factory, obj)\n        if not concrete_save:\n            continue\n        saveable_fns[attribute_name] = (concrete_save, concrete_restore)\n    return saveable_fns",
    "docstring": "Traces all save and restore functions in the provided factory list. Args: obj: object. factory_data_list: List of . Returns: Dict mapping atttribute names to tuples of concrete save/restore functions.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\saving\\trace_saveable_util.py",
    "ast_data": "FunctionDef name:trace_save_restore_function_map arg:obj arg:factory_data_list arguments arg arg Assign For Assign Assign If BoolOp Call Call Call Assign Call If Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "__call__",
    "source_code": "def __call__(self, df=None, scale=None, seed=None):\n    return wishart_frozen(df, scale, seed)",
    "docstring": "Create a frozen Wishart distribution. See for more information.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:df arg:scale arg:seed arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "process_joint_outputs",
    "source_code": "def process_joint_outputs(all_joint_outputs: SubgraphResults, num_placeholders: int) -> JointOutputResult:\n    assert isinstance(all_joint_outputs, list)\n    assert all_joint_outputs[0] is not None, 'joint_subgraph_buffer is None - this is a bug!'\n    joint_buffer = all_joint_outputs[0]\n    other_grads = all_joint_outputs[num_placeholders - 1:]\n    grads_compute = [buf for buf in other_grads if buf is not None]\n\n    def get_out(buf):\n        if buf is None:\n            return None\n        assert isinstance(buf, ComputedBuffer)\n        assert buf.name is not None\n        return TensorBox.create(V.graph.get_buffer(buf.name))\n    grads_out = [get_out(x) for x in other_grads]\n    mutated_grads = [buf for buf in grads_out if buf is not None]\n    return JointOutputResult(grad_input=joint_buffer, captured_grads_compute=grads_compute, captured_grads=grads_out, mutated_grads=mutated_grads)",
    "docstring": "Process joint outputs and extract various buffers needed for lowering Args: all_joint_outputs: List of all the outputs from build_subgraphs num_placeholders: The number of placeholder inputs, used to skip over unused backward compute buffers Returns: JointOutputResult containing processed buffers and gradients",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\kernel\\flex_attention.py",
    "ast_data": "FunctionDef name:process_joint_outputs arg:all_joint_outputs arg:num_placeholders arguments arg arg Call Compare Assign Assign Assign Compare FunctionDef name:get_out arg:buf arguments arg If Compare Return return:no Call Compare Return return:yes Call Call Assign Call Assign Compare Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "delaunay_plot_2d",
    "source_code": "@_held_figure\ndef delaunay_plot_2d(tri, ax=None):\n    if tri.points.shape[1] != 2:\n        raise ValueError('Delaunay triangulation is not 2-D')\n    x, y = tri.points.T\n    ax.plot(x, y, 'o')\n    ax.triplot(x, y, tri.simplices.copy())\n    _adjust_bounds(ax, tri.points)\n    return ax.figure",
    "docstring": "Plot the given Delaunay triangulation in 2-D Parameters ---------- tri : scipy.spatial.Delaunay instance Triangulation to plot ax : matplotlib.axes.Axes instance, optional Axes to plot on Returns ------- fig : matplotlib.figure.Figure instance Figure for the plot See Also -------- Delaunay matplotlib.pyplot.triplot Notes ----- Requires Matplotlib. Examples -------- >>> import numpy as np >>> import matplotlib.pyplot as plt >>> from scipy.spatial import Delaunay, delaunay_plot_2d The Delaunay triangulation of a set of random points: >>> rng = np.random.default_rng() >>> points = rng.random((30, 2)) >>> tri = Delaunay(points) Plot it: >>> _ = delaunay_plot_2d(tri) >>> plt.show()",
    "type": "function",
    "file_path": "scipy\\scipy\\spatial\\_plotutils.py",
    "ast_data": "FunctionDef name:delaunay_plot_2d arg:tri arg:ax arguments arg arg If Compare Raise Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__len__",
    "source_code": "@abstractmethod\ndef __len__(self):\n    raise NotImplementedError",
    "docstring": "Number of batch in the Sequence. Returns: The number of batches in the Sequence.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\data_utils.py",
    "ast_data": "FunctionDef name:__len__ arg:self arguments arg Raise"
  },
  {
    "library": "scikit-learn",
    "name": "_inclusive_low_high",
    "source_code": "def _inclusive_low_high(interval, dtype=np.float64):\n    eps = 10 * np.finfo(dtype).eps\n    if interval.low == -np.inf:\n        low = -10000000000.0\n    elif interval.low < 0:\n        low = interval.low * (1 - eps) + eps\n    else:\n        low = interval.low * (1 + eps) + eps\n    if interval.high == np.inf:\n        high = 10000000000.0\n    elif interval.high < 0:\n        high = interval.high * (1 + eps) - eps\n    else:\n        high = interval.high * (1 - eps) - eps\n    return (low, high)",
    "docstring": "Generate values low and high to be within the interval range. This is used in tests only. Returns ------- low, high : tuple The returned values low and high lie within the interval.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\_loss\\link.py",
    "ast_data": "FunctionDef name:_inclusive_low_high arg:interval arg:dtype arguments arg arg Assign Call If Compare Assign If Compare Assign Assign If Compare Assign If Compare Assign Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit_transform",
    "source_code": "def fit_transform(self, X, y=None, **params):\n    self.fit(X, **params)\n    X_transformed = self.eigenvectors_ * np.sqrt(self.eigenvalues_)\n    if self.fit_inverse_transform:\n        self._fit_inverse_transform(X_transformed, X)\n    return X_transformed",
    "docstring": "Fit the model from data in X and transform X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vector, where is the number of samples and is the number of features. y : Ignored Not used, present for API consistency by convention. **params : kwargs Parameters (keyword arguments) and values passed to the fit_transform instance. Returns ------- X_new : ndarray of shape (n_samples, n_components) Returns the instance itself.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_kernel_pca.py",
    "ast_data": "FunctionDef name:fit_transform arg:self arg:X arg:y arguments arg arg arg arg Call Assign Call If Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "inverse_event_shape",
    "source_code": "def inverse_event_shape(self, output_shape):\n    return self._inverse_event_shape(output_shape)",
    "docstring": "Shape of a single sample from a single batch as a . Same meaning as . May be only partially defined. Args: output_shape: indicating event-portion shape passed into function. Returns: inverse_event_shape_tensor: indicating event-portion shape after applying . Possibly unknown.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bijector_impl.py",
    "ast_data": "FunctionDef name:inverse_event_shape arg:self arg:output_shape arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_register_and_schedule_resource_closure",
    "source_code": "def _register_and_schedule_resource_closure(self, closure):\n    resource_remote_value = closure.build_output_remote_value()\n    with self._resource_tracking_lock:\n        self._register_resource(resource_remote_value)\n        if self._is_dead_with_error:\n            resource_remote_value._set_aborted(ClosureAbortedError(self._is_dead_with_error))\n        else:\n            self._schedule_resource(closure)\n    return resource_remote_value",
    "docstring": "Build remote value for, register for reconstruction, and schedule.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py",
    "ast_data": "FunctionDef name:_register_and_schedule_resource_closure arg:self arg:closure arguments arg arg Assign Call With Call If Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "idxmax",
    "source_code": "def idxmax(self, axis: Axis=0, skipna: bool=True, *args, **kwargs) -> Hashable:\n    axis = self._get_axis_number(axis)\n    iloc = self.argmax(axis, skipna, *args, **kwargs)\n    return self.index[iloc]",
    "docstring": "Return the row label of the maximum value. If multiple values equal the maximum, the first row label with that value is returned. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. skipna : bool, default True Exclude NA/null values. If the entire Series is NA, or if ``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 3, 4], index=[\"A\", \"B\", \"C\", \"D\", \"E\"]) >>> s A 1.0 B NaN C 4.0 D 3.0 E 4.0 dtype: float64 >>> s.idxmax() 'C'",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:idxmax arg:self arg:axis arg:skipna arguments arg arg arg arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "dict_from_file",
    "source_code": "def dict_from_file(self, file):\n    if hasattr(file, 'read'):\n        self.read_file(file)\n    else:\n        self.read(file)\n    return self.as_dict()",
    "docstring": "Generate a dict from a file.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\reprconf.py",
    "ast_data": "FunctionDef name:dict_from_file arg:self arg:file arguments arg arg If Call Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_distance_tranform_arg_check",
    "source_code": "def _distance_tranform_arg_check(distances_out, indices_out, return_distances, return_indices):\n    error_msgs = []\n    if not return_distances and (not return_indices):\n        error_msgs.append('at least one of return_distances/return_indices must be True')\n    if distances_out and (not return_distances):\n        error_msgs.append('return_distances must be True if distances is supplied')\n    if indices_out and (not return_indices):\n        error_msgs.append('return_indices must be True if indices is supplied')\n    if error_msgs:\n        raise RuntimeError(', '.join(error_msgs))",
    "docstring": "Raise a RuntimeError if the arguments are invalid",
    "type": "function",
    "file_path": "scipy\\scipy\\ndimage\\_morphology.py",
    "ast_data": "FunctionDef name:_distance_tranform_arg_check arg:distances_out arg:indices_out arg:return_distances arg:return_indices arguments arg arg arg arg Assign If BoolOp Call If BoolOp Call If BoolOp Call If Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "batch_shape",
    "source_code": "@property\ndef batch_shape(self) -> torch.Size:\n    return self._batch_shape",
    "docstring": "Returns the shape over which parameters are batched.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:batch_shape arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "unit",
    "source_code": "def unit(key: str) -> str:\n    _check_obsolete(key)\n    return physical_constants[key][1]",
    "docstring": "Unit in physical_constants indexed by key Parameters ---------- key : Python string Key in dictionary Returns ------- unit : Python string Unit in corresponding to Examples -------- >>> from scipy import constants >>> constants.unit('proton mass') 'kg'",
    "type": "function",
    "file_path": "scipy\\scipy\\constants\\_codata.py",
    "ast_data": "FunctionDef name:unit arg:key arguments arg Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "lsmr_operator",
    "source_code": "def lsmr_operator(Jop, d, active_set):\n    m, n = Jop.shape\n\n    def matvec(x):\n        x_free = x.ravel().copy()\n        x_free[active_set] = 0\n        return Jop.matvec(x * d)\n\n    def rmatvec(x):\n        r = d * Jop.rmatvec(x)\n        r[active_set] = 0\n        return r\n    return LinearOperator((m, n), matvec=matvec, rmatvec=rmatvec, dtype=float)",
    "docstring": "Compute LinearOperator to use in LSMR by dogbox algorithm. mask is used to excluded active variables from computations of matrix-vector products.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_lsq\\dogbox.py",
    "ast_data": "FunctionDef name:lsmr_operator arg:Jop arg:d arg:active_set arguments arg arg arg Assign FunctionDef name:matvec arg:x arguments arg Assign Call Call Assign Return return:yes Call FunctionDef name:rmatvec arg:x arguments arg Assign Call Assign Return return:yes Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "UnsortedIndexError",
    "source_code": "class UnsortedIndexError(KeyError):\n    pass",
    "docstring": "Error raised when slicing a MultiIndex which has not been lexsorted. Subclass of . See Also -------- DataFrame.sort_index : Sort a DataFrame by its index. DataFrame.set_index : Set the DataFrame index using existing columns. Examples -------- >>> df = pd.DataFrame( ... { ... \"cat\": [0, 0, 1, 1], ... \"color\": [\"white\", \"white\", \"brown\", \"black\"], ... \"lives\": [4, 4, 3, 7], ... }, ... ) >>> df = df.set_index([\"cat\", \"color\"]) >>> df lives cat color 0 white 4 white 4 1 brown 3 black 7 >>> df.loc[(0, \"black\") : (1, \"white\")] Traceback (most recent call last): UnsortedIndexError: 'Key length (2) was greater than MultiIndex lexsort depth (1)'",
    "type": "class",
    "file_path": "pandas\\pandas\\errors\\__init__.py",
    "ast_data": "ClassDef name:UnsortedIndexError"
  },
  {
    "library": "pandas",
    "name": "insert",
    "source_code": "def insert(self, loc: int, item: Interval) -> Self:\n    left_insert, right_insert = self._validate_scalar(item)\n    new_left = self.left.insert(loc, left_insert)\n    new_right = self.right.insert(loc, right_insert)\n    return self._shallow_copy(new_left, new_right)",
    "docstring": "Return a new IntervalArray inserting new item at location. Follows Python numpy.insert semantics for negative values. Only Interval objects and NA can be inserted into an IntervalIndex Parameters ---------- loc : int item : Interval Returns ------- IntervalArray",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\interval.py",
    "ast_data": "FunctionDef name:insert arg:self arg:loc arg:item arguments arg arg arg Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_get_rescaled_operator",
    "source_code": "def _get_rescaled_operator(X, X_offset, sample_weight_sqrt):\n\n    def matvec(b):\n        return X.dot(b) - sample_weight_sqrt * b.dot(X_offset)\n\n    def rmatvec(b):\n        return X.T.dot(b) - X_offset * b.dot(sample_weight_sqrt)\n    X1 = sparse.linalg.LinearOperator(shape=X.shape, matvec=matvec, rmatvec=rmatvec)\n    return X1",
    "docstring": "Create LinearOperator for matrix products with implicit centering. Matrix product returns .",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_ridge.py",
    "ast_data": "FunctionDef name:_get_rescaled_operator arg:X arg:X_offset arg:sample_weight_sqrt arguments arg arg arg FunctionDef name:matvec arg:b arguments arg Return return:yes Call Call FunctionDef name:rmatvec arg:b arguments arg Return return:yes Call Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "default_device",
    "source_code": "def default_device(self) -> L['cpu']:\n    return 'cpu'",
    "docstring": "The default device used for new Dask arrays. For Dask, this always returns ``. See Also -------- __array_namespace_info__.capabilities, __array_namespace_info__.default_dtypes, __array_namespace_info__.dtypes, __array_namespace_info__.devices Returns ------- device : Device The default device used for new Dask arrays. Examples -------- >>> info = xp.__array_namespace_info__() >>> info.default_device() 'cpu'",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\dask\\array\\_info.py",
    "ast_data": "FunctionDef name:default_device arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "__init__",
    "source_code": "def __init__(self, backbone: ResNetD | PPHGNetV2, encoder: HybridEncoder, decoder: RTDETRHead):\n    super().__init__()\n    self.backbone = backbone\n    self.encoder = encoder\n    self.decoder = decoder",
    "docstring": "Construct RT-DETR Object Detection model. Args: backbone: backbone network for feature extraction. encoder: neck network for feature fusion. decoder: head network to decode features into detection results.",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\models\\rt_detr\\model.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:backbone arg:encoder arg:decoder arguments arg arg arg arg Call Call Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "load",
    "source_code": "def load(self, sess, tags, import_scope=None, **saver_kwargs):\n    saved_model_proto = parse_saved_model(self._export_dir)\n    metrics.IncrementReadApi(_LOADER_LABEL)\n    with sess.graph.as_default():\n        saver, _ = self.load_graph(sess.graph, tags, import_scope, **saver_kwargs)\n        self.restore_variables(sess, saver, import_scope)\n        self.run_init_ops(sess, tags, import_scope)\n    meta_graph_def = self.get_meta_graph_def_from_tags(tags)\n    if len(saved_model_proto.meta_graphs) == 1 and saved_model_proto.meta_graphs[0].HasField('object_graph_def'):\n        metrics.IncrementRead(write_version='2')\n    else:\n        metrics.IncrementRead(write_version='1')\n    return meta_graph_def",
    "docstring": "Load the MetaGraphDef graph and restore variable values into the session. Args: sess: tf.compat.v1.Session to restore variable values. tags: a set of string tags identifying a MetaGraphDef. import_scope: Optional -- if specified, prepend this string followed by '/' to all loaded tensor names. This scope is applied to tensor instances loaded into the passed session, but it is *not* written through to the static protocol buffer that is returned. **saver_kwargs: keyword arguments to pass to tf.train.import_meta_graph. Returns: proto of the graph that was loaded.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\loader_impl.py",
    "ast_data": "FunctionDef name:load arg:self arg:sess arg:tags arg:import_scope arguments arg arg arg arg arg Assign Call Call With Call Assign Call Call Call Assign Call If BoolOp Compare Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_global_batch_size",
    "source_code": "@property\ndef _global_batch_size(self):\n    return True",
    "docstring": "Global and per-replica batching are equivalent for this strategy.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:_global_batch_size arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "motion_from_essential",
    "source_code": "def motion_from_essential(E_mat: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n    KORNIA_CHECK_SHAPE(E_mat, ['*', '3', '3'])\n    R1, R2, t = decompose_essential_matrix(E_mat)\n    Rs = stack([R1, R1, R2, R2], dim=-3)\n    Ts = stack([t, -t, t, -t], dim=-3)\n    return (Rs, Ts)",
    "docstring": "Get Motion (R's and t's ) from Essential matrix. Computes and return four possible poses exist for the decomposition of the Essential matrix. The possible solutions are :math:. Args: E_mat: The essential matrix in the form of :math:. Returns: The rotation and translation containing the four possible combination for the retrieved motion. The tuple is as following :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\epipolar\\essential.py",
    "ast_data": "FunctionDef name:motion_from_essential arg:E_mat arguments arg Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "decision_function",
    "source_code": "def decision_function(self, X):\n    y_scores = self._decision_function(X)\n    if len(self.classes_) == 2:\n        return y_scores[:, 1] - y_scores[:, 0]\n    return y_scores",
    "docstring": "Apply decision function to an array of samples. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Array of samples (test vectors). Returns ------- y_scores : ndarray of shape (n_samples,) or (n_samples, n_classes) Decision function values related to each class, per sample. In the two-class case, the shape is , giving the log likelihood ratio of the positive class.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\discriminant_analysis.py",
    "ast_data": "FunctionDef name:decision_function arg:self arg:X arguments arg arg Assign Call If Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "merge",
    "source_code": "def merge(self, options):\n    return options_lib.merge_options(self, options)",
    "docstring": "Merges itself with the given . If this object and the to merge set an option differently, a warning is generated and this object's value is updated with the object's value. Args: options: The to merge with. Returns: New object which is the result of merging self with the input .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\options.py",
    "ast_data": "FunctionDef name:merge arg:self arg:options arguments arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "codes",
    "source_code": "@property\ndef codes(self) -> Series:\n    from pandas import Series\n    return Series(self._parent.codes, index=self._index)",
    "docstring": "Return Series of codes as well as the index. See Also -------- Series.cat.categories : Return the categories of this categorical. Series.cat.as_ordered : Set the Categorical to be ordered. Series.cat.as_unordered : Set the Categorical to be unordered. Examples -------- >>> raw_cate = pd.Categorical([\"a\", \"b\", \"c\", \"a\"], categories=[\"a\", \"b\"]) >>> ser = pd.Series(raw_cate) >>> ser.cat.codes 0 0 1 1 2 -1 3 0 dtype: int8",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\categorical.py",
    "ast_data": "FunctionDef name:codes arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_index",
    "source_code": "def get_index(uid, i):\n    return _SHARED_SEQUENCES[uid][i]",
    "docstring": "Get the value from the Sequence at index . To allow multiple Sequences to be used at the same time, we use to get a specific one. A single Sequence would cause the validation to overwrite the training Sequence. Args: uid: int, Sequence identifier i: index Returns: The value at index .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\data_utils.py",
    "ast_data": "FunctionDef name:get_index arg:uid arg:i arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_layer",
    "source_code": "def is_layer(obj):\n    return hasattr(obj, '_is_layer') and (not isinstance(obj, type))",
    "docstring": "Implicit check for Layer-like objects.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\layer_utils.py",
    "ast_data": "FunctionDef name:is_layer arg:obj arguments arg Return return:yes BoolOp Call Call"
  },
  {
    "library": "scrapy",
    "name": "next_request",
    "source_code": "def next_request(self) -> Request | None:\n    request: Request | None = self.mqs.pop()\n    assert self.stats is not None\n    if request is not None:\n        self.stats.inc_value('scheduler/dequeued/memory', spider=self.spider)\n    else:\n        request = self._dqpop()\n        if request is not None:\n            self.stats.inc_value('scheduler/dequeued/disk', spider=self.spider)\n    if request is not None:\n        self.stats.inc_value('scheduler/dequeued', spider=self.spider)\n    return request",
    "docstring": "Return a :class: object from the memory queue, falling back to the disk queue if the memory queue is empty. Return ``.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\scheduler.py",
    "ast_data": "FunctionDef name:next_request arg:self arguments arg Call Compare If Compare Call Assign Call If Compare Call If Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_create_categorical_column_weighted_sum",
    "source_code": "def _create_categorical_column_weighted_sum(column, transformation_cache, state_manager, sparse_combiner, weight_var):\n    sparse_tensors = column.get_sparse_tensors(transformation_cache, state_manager)\n    id_tensor = sparse_ops.sparse_reshape(sparse_tensors.id_tensor, [array_ops.shape(sparse_tensors.id_tensor)[0], -1])\n    weight_tensor = sparse_tensors.weight_tensor\n    if weight_tensor is not None:\n        weight_tensor = sparse_ops.sparse_reshape(weight_tensor, [array_ops.shape(weight_tensor)[0], -1])\n    return embedding_ops.safe_embedding_lookup_sparse(weight_var, id_tensor, sparse_weights=weight_tensor, combiner=sparse_combiner, name='weighted_sum')",
    "docstring": "Create a weighted sum of a categorical column for linear_model. Note to maintainer: As implementation details, the weighted sum is implemented via embedding_lookup_sparse toward efficiency. Mathematically, they are the same. To be specific, conceptually, categorical column can be treated as multi-hot vector. Say: The weighted sum is in this case, which is same as . Another example is The weighted sum is in this case, which is same as . For both cases, we can implement weighted sum via embedding_lookup with sparse_combiner = \"sum\".",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:_create_categorical_column_weighted_sum arg:column arg:transformation_cache arg:state_manager arg:sparse_combiner arg:weight_var arguments arg arg arg arg arg Assign Call Assign Call Call Assign If Compare Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_from_python_type_to_onnx_tensor_element_type",
    "source_code": "def _from_python_type_to_onnx_tensor_element_type(type: type):\n    import onnx\n    _PYTHON_TYPE_TO_ONNX_TENSOR_ELEMENT_TYPE = {float: onnx.TensorProto.FLOAT, int: onnx.TensorProto.INT64, bool: onnx.TensorProto.BOOL}\n    return _PYTHON_TYPE_TO_ONNX_TENSOR_ELEMENT_TYPE.get(type)",
    "docstring": "Converts a Python type to the corresponding ONNX tensor element type. For example, returns . Args: type (type): The Python type to convert. Returns: int: The corresponding ONNX tensor element type.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\onnxruntime.py",
    "ast_data": "FunctionDef name:_from_python_type_to_onnx_tensor_element_type arg:type arguments arg Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_loss_function",
    "source_code": "def get_loss_function(loss):\n    if loss is None or isinstance(loss, losses.Loss):\n        return loss\n    if tf_inspect.isclass(loss) and issubclass(loss, losses.Loss):\n        raise ValueError('Received uninstantiated Loss class: {}\\nPlease call loss \"\"classes before passing them to Model.compile.'.format(loss))\n    if isinstance(loss, collections.abc.Mapping):\n        loss = losses.get(loss)\n    if callable(loss) and (not hasattr(loss, '__name__')):\n        return loss\n    loss_fn = losses.get(loss)\n    return losses.LossFunctionWrapper(loss_fn, name=loss_fn.__name__, reduction=losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE)",
    "docstring": "Returns the loss corresponding to the loss input in API.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py",
    "ast_data": "FunctionDef name:get_loss_function arg:loss arguments arg If BoolOp Compare Call Return return:yes If BoolOp Call Call Raise Call Call If Call Assign Call If BoolOp Call Call Return return:yes Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_shape",
    "source_code": "def get_shape(self) -> tensor_shape.TensorShape:\n    return self.shape",
    "docstring": "Alias of .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:get_shape arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "replica_local_fn",
    "source_code": "def replica_local_fn(*args, **kwargs):\n    if any((isinstance(arg, keras_tensor.KerasTensor) for arg in nest.flatten((args, kwargs)))):\n        update_op = None\n    else:\n        update_op = self.update_state(*args, **kwargs)\n    update_ops = []\n    if update_op is not None:\n        update_ops.append(update_op)\n    with ops.control_dependencies(update_ops):\n        result_t = self.result()\n        result_t._metric_obj = self\n        return result_t",
    "docstring": "Updates the state of the metric in a replica-local context.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py",
    "ast_data": "FunctionDef name:replica_local_fn arguments arg arg If Call Call Call Assign Assign Call Assign If Compare Call With Call Assign Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "get_buffers",
    "source_code": "@abstractmethod\ndef get_buffers(self) -> ColumnBuffers:\n    pass",
    "docstring": "Return a dictionary containing the underlying buffers. The returned dictionary has the following contents: - \"data\": a two-element tuple whose first element is a buffer containing the data and whose second element is the data buffer's associated dtype. - \"validity\": a two-element tuple whose first element is a buffer containing mask values indicating missing data and whose second element is the mask value buffer's associated dtype. None if the null representation is not a bit or byte mask. - \"offsets\": a two-element tuple whose first element is a buffer containing the offset values for variable-size binary data (e.g., variable-length strings) and whose second element is the offsets buffer's associated dtype. None if the data buffer does not have an associated offsets buffer.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\interchange\\dataframe_protocol.py",
    "ast_data": "FunctionDef name:get_buffers arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "benchmark_codegened_module",
    "source_code": "def benchmark_codegened_module(self, module: ModuleType) -> tuple[float, str]:\n    raise NotImplementedError",
    "docstring": "Benchmark a compiled module and return the execution time in milliseconds on randomly generated inputs.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:benchmark_codegened_module arg:self arg:module arguments arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "get_v2_constants",
    "source_code": "def get_v2_constants(module: Any) -> Sequence[str]:\n    constants_v2 = []\n    tensorflow_constants_attr = API_ATTRS[TENSORFLOW_API_NAME].constants\n    if hasattr(module, tensorflow_constants_attr):\n        constants_v2.extend(getattr(module, tensorflow_constants_attr))\n    return constants_v2",
    "docstring": "Get a list of TF 2.0 constants in this module. Args: module: TensorFlow module. Returns: List of all API constants under the given module.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\tf_export.py",
    "ast_data": "FunctionDef name:get_v2_constants arg:module arguments arg Assign Assign If Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__enter__",
    "source_code": "def __enter__(self):\n    return self",
    "docstring": "Context manager enter method, does nothing.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\dviread.py",
    "ast_data": "FunctionDef name:__enter__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "ShearY",
    "source_code": "class ShearY(OperationBase):\n\n    @staticmethod\n    def _process_magnitude(magnitude: Tensor) -> Tensor:\n        return magnitude * 180\n\n    def __init__(self, initial_magnitude: Optional[float]=0.1, initial_probability: float=0.5, magnitude_range: Tuple[float, float]=(0.0, 0.3), temperature: float=0.1, symmetric_megnitude: bool=True) -> None:\n        if symmetric_megnitude and magnitude_range[0] < 0:\n            raise ValueError(f'Lower bound of {self.__class__.__name__} is a symmetric operation. The lower bound must above 0. Got {magnitude_range[0]}.')\n        super().__init__(K.RandomShear((0.0, 0.0, magnitude_range[0], magnitude_range[1]), same_on_batch=False, p=initial_probability, align_corners=True), initial_magnitude=[('shear_y', initial_magnitude)], temperature=temperature, symmetric_megnitude=symmetric_megnitude, magnitude_fn=ShearY._process_magnitude)",
    "docstring": "Apply shear operation along y-axis. Args: initial_magnitude: the initial magnitude. initial_probability: the initial probability. If None, the augmentation will be randomly applied according to he augmentation sampling range. magnitude_range: the sampling range for random sampling and clamping the optimized magnitude. temperature: temperature for RelaxedBernoulli distribution used during training. symmetric_megnitude: if to randomly assign the magnitude as negative or not.",
    "type": "class",
    "file_path": "kornia\\kornia\\augmentation\\auto\\operations\\ops.py",
    "ast_data": "ClassDef name:ShearY FunctionDef name:_process_magnitude arg:magnitude arguments arg Return return:yes FunctionDef name:__init__ arg:self arg:initial_magnitude arg:initial_probability arg:magnitude_range arg:temperature arg:symmetric_megnitude arguments arg arg arg arg arg arg If BoolOp Compare Raise Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "stop_recording",
    "source_code": "@contextlib.contextmanager\ndef stop_recording():\n    is_stopped = pywrap_tfe.TFE_Py_TapeSetIsStopped()\n    try:\n        if not is_stopped:\n            pywrap_tfe.TFE_Py_TapeSetStopOnThread()\n        yield\n    finally:\n        if not is_stopped:\n            pywrap_tfe.TFE_Py_TapeSetRestartOnThread()",
    "docstring": "Stop all gradient recording (backprop and forwardprop).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\record.py",
    "ast_data": "FunctionDef name:stop_recording arguments Assign Call Try If Call If Call"
  },
  {
    "library": "matplotlib",
    "name": "set_clip_path",
    "source_code": "def set_clip_path(self, path):\n    _api.check_isinstance((transforms.TransformedPath, None), path=path)\n    self._clippath = path",
    "docstring": "Set the clip path to a or None.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:set_clip_path arg:self arg:path arguments arg arg Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "pause",
    "source_code": "def pause(interval: float) -> None:\n    manager = _pylab_helpers.Gcf.get_active()\n    if manager is not None:\n        canvas = manager.canvas\n        if canvas.figure.stale:\n            canvas.draw_idle()\n        show(block=False)\n        canvas.start_event_loop(interval)\n    else:\n        time.sleep(interval)",
    "docstring": "Run the GUI event loop for *interval* seconds. If there is an active figure, it will be updated and displayed before the pause, and the GUI event loop (if any) will run during the pause. This can be used for crude animation. For more complex animation use :mod:. If there is no active figure, sleep for *interval* seconds instead. See Also -------- matplotlib.animation : Proper animations show : Show all figures and optional block until all figures are closed.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:pause arg:interval arguments arg Assign Call If Compare Assign If Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_setup_countdown_if_has_grace_period_and_not_already_counting_down",
    "source_code": "def _setup_countdown_if_has_grace_period_and_not_already_counting_down(self):\n    if self._grace_period > 0 and (not self._final_checkpoint_countdown):\n        buffer_factor = 3\n        self._target_time_for_termination = self._received_own_sigterm_time + self._grace_period - buffer_factor * self._estimated_run_time * 2",
    "docstring": "Set up at the beginning of a countdown period for long grace period.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\failure_handling\\failure_handling.py",
    "ast_data": "FunctionDef name:_setup_countdown_if_has_grace_period_and_not_already_counting_down arg:self arguments arg If BoolOp Compare Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "unknown_shape",
    "source_code": "def unknown_shape(rank=None, **kwargs) -> 'TensorShape':\n    if rank is None and 'ndims' in kwargs:\n        rank = kwargs.pop('ndims')\n    if kwargs:\n        raise TypeError('Unknown argument: %s' % kwargs)\n    if rank is None:\n        return TensorShape(None)\n    else:\n        return TensorShape([Dimension(None)] * rank)",
    "docstring": "Returns an unknown TensorShape, optionally with a known rank. Args: rank: (Optional) If specified, the number of dimensions in the shape. **kwargs: For backwards compatibility. Returns: An unknown TensorShape. Raises: TypeError: In case of invalid arguments.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:unknown_shape arg:rank arguments arg arg If BoolOp Compare Compare Assign Call If Raise Call If Compare Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "mtia",
    "source_code": "def mtia(self, device: Optional[Union[int, device]]=None) -> Self:\n    return self._apply(lambda t: t.mtia(device))",
    "docstring": "Move all model parameters and buffers to the MTIA. This also makes associated parameters and buffers different objects. So it should be called before constructing the optimizer if the module will live on MTIA while being optimized. .. note:: This method modifies the module in-place. Arguments: device (int, optional): if specified, all parameters will be copied to that device Returns: Module: self",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:mtia arg:self arg:device arguments arg arg Return return:yes Call arguments arg Call"
  },
  {
    "library": "kornia",
    "name": "rgb_to_grayscale",
    "source_code": "def rgb_to_grayscale(image: Tensor, rgb_weights: Optional[Tensor]=None) -> Tensor:\n    KORNIA_CHECK_IS_TENSOR(image)\n    if len(image.shape) < 3 or image.shape[-3] != 3:\n        raise ValueError(f'Input size must have a shape of (*, 3, H, W). Got {image.shape}')\n    if rgb_weights is None:\n        if image.dtype == torch.uint8:\n            rgb_weights = torch.tensor([76, 150, 29], device=image.device, dtype=torch.uint8)\n        elif image.dtype in (torch.float16, torch.float32, torch.float64):\n            rgb_weights = torch.tensor([0.299, 0.587, 0.114], device=image.device, dtype=image.dtype)\n        else:\n            raise TypeError(f'Unknown data type: {image.dtype}')\n    else:\n        rgb_weights = rgb_weights.to(image)\n    r: Tensor = image[..., 0:1, :, :]\n    g: Tensor = image[..., 1:2, :, :]\n    b: Tensor = image[..., 2:3, :, :]\n    w_r, w_g, w_b = rgb_weights.unbind()\n    return w_r * r + w_g * g + w_b * b",
    "docstring": "Convert a RGB image to grayscale version of image. .. image:: _static/img/rgb_to_grayscale.png The image data is assumed to be in the range of (0, 1). Args: image: RGB image to be converted to grayscale with shape :math:. rgb_weights: Weights that will be applied on each channel (RGB). The sum of the weights should add up to one. Returns: grayscale version of the image with shape :math:. .. note:: See a working example __. Example: >>> input = torch.rand(2, 3, 4, 5) >>> gray = rgb_to_grayscale(input) # 2x1x4x5",
    "type": "function",
    "file_path": "kornia\\kornia\\color\\gray.py",
    "ast_data": "FunctionDef name:rgb_to_grayscale arg:image arg:rgb_weights arguments arg arg Call If BoolOp Compare Call Compare Raise Call If Compare If Compare Assign Call If Compare Assign Call Raise Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "select_related_descend",
    "source_code": "def select_related_descend(field, restricted, requested, select_mask):\n    if not field.remote_field:\n        return False\n    if getattr(field.remote_field, 'parent_link', False):\n        return False\n    if not restricted:\n        return not field.null\n    if field.name not in requested:\n        return False\n    if select_mask and field not in select_mask:\n        raise FieldError(f'Field {field.model._meta.object_name}.{field.name} cannot be both deferred and traversed using select_related at the same time.')\n    return True",
    "docstring": "Return whether should be used to descend deeper for purposes. Arguments: * - the field to be checked. Can be either a or instance. * - a boolean field, indicating if the field list has been manually restricted using a select_related() clause. * - the select_related() dictionary. * - the dictionary of selected fields.",
    "type": "function",
    "file_path": "django\\django\\db\\models\\query_utils.py",
    "ast_data": "FunctionDef name:select_related_descend arg:field arg:restricted arg:requested arg:select_mask arguments arg arg arg arg If Return return:yes If Call Return return:yes If Return return:yes If Compare Return return:yes If BoolOp Compare Raise Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_draggable",
    "source_code": "def get_draggable(self):\n    return self._draggable is not None",
    "docstring": "Return `` otherwise.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\legend.py",
    "ast_data": "FunctionDef name:get_draggable arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "authlib",
    "name": "validate_userinfo_encryption_alg_values_supported",
    "source_code": "def validate_userinfo_encryption_alg_values_supported(self):\n    validate_array_value(self, 'userinfo_encryption_alg_values_supported')",
    "docstring": "OPTIONAL. JSON array containing a list of the JWE encryption algorithms (alg values) [JWA] supported by the UserInfo Endpoint to encode the Claims in a JWT.",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\discovery\\models.py",
    "ast_data": "FunctionDef name:validate_userinfo_encryption_alg_values_supported arg:self arguments arg Call"
  },
  {
    "library": "matplotlib",
    "name": "splitx",
    "source_code": "def splitx(self, *args):\n    xf = [0, *args, 1]\n    x0, y0, x1, y1 = self.extents\n    w = x1 - x0\n    return [Bbox([[x0 + xf0 * w, y0], [x0 + xf1 * w, y1]]) for xf0, xf1 in itertools.pairwise(xf)]",
    "docstring": "Return a list of new objects formed by splitting the original one with vertical lines at fractional positions given by *args*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:splitx arg:self arguments arg arg Assign Assign Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "num_buckets",
    "source_code": "@property\ndef num_buckets(self):\n    return self.categorical_column.num_buckets",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:num_buckets arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "full",
    "source_code": "def full(size, fill_value, *, dtype: Optional[torch.dtype]=None, layout: torch.layout=torch.strided, requires_grad: bool=False, device_mesh: Optional[DeviceMesh]=None, placements: Optional[Sequence[Placement]]=None) -> DTensor:\n    torch_size = normalize_to_torch_size(size)\n    return _dtensor_init_helper(torch.full, torch_size, fill_value=fill_value, dtype=dtype, layout=layout, requires_grad=requires_grad, device_mesh=device_mesh, placements=placements)",
    "docstring": "Returns a :class: filled with `DTensortorch.dtypeDTensortorch.set_default_dtypetorch.layoutDTensorDeviceMeshPlacementDTensor` object on each rank",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_api.py",
    "ast_data": "FunctionDef name:full arg:size arg:fill_value arguments arg arg arg arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "authenticate_none",
    "source_code": "def authenticate_none(query_client, request):\n    client_id = request.payload.client_id\n    if client_id and (not request.payload.data.get('client_secret')):\n        client = _validate_client(query_client, client_id)\n        log.debug(f'Authenticate {client_id} via \"none\" success')\n        return client\n    log.debug(f'Authenticate {client_id} via \"none\" failed')",
    "docstring": "Authenticate public client by `` method. The client does not have a client secret.",
    "type": "function",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\authenticate_client.py",
    "ast_data": "FunctionDef name:authenticate_none arg:query_client arg:request arguments arg arg Assign If BoolOp Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "estimate_data",
    "source_code": "@property\ndef estimate_data(self):\n    x, y = (self.x_discrete, self.y)\n    vals = sorted(np.unique(x))\n    points, cis = ([], [])\n    for val in vals:\n        _y = y[x == val]\n        est = self.x_estimator(_y)\n        points.append(est)\n        if self.x_ci is None:\n            cis.append(None)\n        else:\n            units = None\n            if self.x_ci == 'sd':\n                sd = np.std(_y)\n                _ci = (est - sd, est + sd)\n            else:\n                if self.units is not None:\n                    units = self.units[x == val]\n                boots = algo.bootstrap(_y, func=self.x_estimator, n_boot=self.n_boot, units=units, seed=self.seed)\n                _ci = utils.ci(boots, self.x_ci)\n            cis.append(_ci)\n    return (vals, points, cis)",
    "docstring": "Data with a point estimate and CI for each discrete x value.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\regression.py",
    "ast_data": "FunctionDef name:estimate_data arg:self arguments arg Assign Assign Call Call Assign For Assign Compare Assign Call Call If Compare Call Assign If Compare Assign Call Assign If Compare Assign Compare Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "tree_map",
    "source_code": "def tree_map(func: Callable[..., Any], tree: PyTree, *rests: PyTree, is_leaf: Optional[Callable[[PyTree], bool]]=None) -> PyTree:\n    leaves, treespec = tree_flatten(tree, is_leaf=is_leaf)\n    flat_args = [leaves] + [treespec.flatten_up_to(r) for r in rests]\n    return treespec.unflatten(map(func, *flat_args))",
    "docstring": "Map a multi-input function over pytree args to produce a new pytree. See also :func:. >>> tree_map(lambda x: x + 1, {'x': 7, 'y': (42, 64)}) {'x': 8, 'y': (43, 65)} >>> tree_map(lambda x: x is None, {'x': 7, 'y': (42, 64), 'z': None}) {'x': False, 'y': (False, False), 'z': True} If multiple inputs are given, the structure of the tree is taken from the first input; subsequent inputs need only have `True`.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_pytree.py",
    "ast_data": "FunctionDef name:tree_map arg:func arg:tree arguments arg arg arg arg Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "authlib",
    "name": "validate_logo_uri",
    "source_code": "def validate_logo_uri(self):\n    self._validate_uri('logo_uri')",
    "docstring": "URL string that references a logo for the client. If present, the server SHOULD display this image to the end-user during approval. The value of this field MUST point to a valid image file. The value of this field MAY be internationalized, as described in Section 2.2.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc7591\\claims.py",
    "ast_data": "FunctionDef name:validate_logo_uri arg:self arguments arg Call"
  },
  {
    "library": "kornia",
    "name": "max_blur_pool2d",
    "source_code": "def max_blur_pool2d(input: Tensor, kernel_size: tuple[int, int] | int, stride: int=2, max_pool_size: int=2, ceil_mode: bool=False) -> Tensor:\n    KORNIA_CHECK_SHAPE(input, ['B', 'C', 'H', 'W'])\n    kernel = get_pascal_kernel_2d(kernel_size, norm=True, device=input.device, dtype=input.dtype).repeat((input.shape[1], 1, 1, 1))\n    return _max_blur_pool_by_kernel2d(input, kernel, stride, max_pool_size, ceil_mode)",
    "docstring": "Compute pools and blurs and downsample a given feature map. .. image:: _static/img/max_blur_pool2d.png See :class: for details. Args: input: tensor to apply operation to. kernel_size: the kernel size for max pooling. stride: stride for pooling. max_pool_size: the kernel size for max pooling. ceil_mode: should be true to match output size of conv2d with same kernel size. .. note:: This function is tested against .. note:: See a working example __. Examples: >>> input = torch.eye(5)[None, None] >>> max_blur_pool2d(input, 3) tensor([[[[0.5625, 0.3125], [0.3125, 0.8750]]]])",
    "type": "function",
    "file_path": "kornia\\kornia\\filters\\blur_pool.py",
    "ast_data": "FunctionDef name:max_blur_pool2d arg:input arg:kernel_size arg:stride arg:max_pool_size arg:ceil_mode arguments arg arg arg arg arg Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_get_data_mask",
    "source_code": "def _get_data_mask(self, t, f1, f2, where):\n    if where is None:\n        where = True\n    else:\n        where = np.asarray(where, dtype=bool)\n        if where.size != t.size:\n            msg = 'where size ({}) does not match {!r} size ({})'.format(where.size, self.t_direction, t.size)\n            raise ValueError(msg)\n    return where & ~functools.reduce(np.logical_or, map(np.ma.getmaskarray, [t, f1, f2]))",
    "docstring": "Return a bool array, with True at all points that should eventually be rendered. The array is True at a point if none of the data inputs *t*, *f1*, *f2* is masked and if the input *where* is true at that point.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:_get_data_mask arg:self arg:t arg:f1 arg:f2 arg:where arguments arg arg arg arg arg If Compare Assign Assign Call If Compare Assign Call Raise Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "check_inst_exn_tab_entries_valid",
    "source_code": "def check_inst_exn_tab_entries_valid(instructions: list[Instruction]):\n    indexof = get_indexof(instructions)\n    exn_tab_entry_set = set()\n    for i, inst in enumerate(instructions):\n        if inst.exn_tab_entry:\n            assert sys.version_info >= (3, 11)\n            assert id(inst.exn_tab_entry) not in exn_tab_entry_set\n            exn_tab_entry_set.add(id(inst.exn_tab_entry))\n            entry = inst.exn_tab_entry\n            assert entry.start in indexof\n            assert entry.end in indexof\n            assert entry.target in indexof\n            assert indexof[entry.start] <= i <= indexof[entry.end]",
    "docstring": "Checks that exn_tab_entries of instructions are valid. An entry's start, end, and target must be in instructions. Instructions with an exn_tab_entry are located within the entry's start and end instructions. Instructions do not share exn_tab_entries. Implicitly checks for no duplicate instructions.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\bytecode_transformation.py",
    "ast_data": "FunctionDef name:check_inst_exn_tab_entries_valid arg:instructions arguments arg Assign Call Assign Call For Call If Compare Compare Call Call Call Assign Compare Compare Compare Compare"
  },
  {
    "library": "matplotlib",
    "name": "get_linestyle",
    "source_code": "def get_linestyle(self):\n    return self._linestyle",
    "docstring": "Return the linestyle. See also .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:get_linestyle arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_num_gpus",
    "source_code": "def get_num_gpus():\n    return _get_num_nvidia_gpus()",
    "docstring": "Returns the number of GPUs visible on the current node. Currently only implemented for NVIDIA GPUs.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\slurm_cluster_resolver.py",
    "ast_data": "FunctionDef name:get_num_gpus arguments Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "__init__",
    "source_code": "def __init__(self, maxes, mins):\n    self.maxes = np.maximum(maxes, mins).astype(float)\n    self.mins = np.minimum(maxes, mins).astype(float)\n    self.m, = self.maxes.shape",
    "docstring": "Construct a hyperrectangle.",
    "type": "method",
    "file_path": "scipy\\scipy\\spatial\\_kdtree.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:maxes arg:mins arguments arg arg arg Assign Call Call Assign Call Call Assign"
  },
  {
    "library": "cherrypy",
    "name": "encode_header_items",
    "source_code": "@classmethod\ndef encode_header_items(cls, header_items):\n    for k, v in header_items:\n        if not isinstance(v, str) and (not isinstance(v, bytes)):\n            v = str(v)\n        yield tuple(map(cls.encode_header_item, (k, v)))",
    "docstring": "Emit tuples of wire-ready HTTP headers. Prepare the sequence of name, value tuples into a form suitable for transmitting on the wire for HTTP.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\httputil.py",
    "ast_data": "FunctionDef name:encode_header_items arg:cls arg:header_items arguments arg arg For If BoolOp Call Call Assign Call Call Call"
  },
  {
    "library": "pandas",
    "name": "__from_arrow__",
    "source_code": "def __from_arrow__(self, array: pyarrow.Array | pyarrow.ChunkedArray) -> BaseStringArray:\n    if self.storage == 'pyarrow':\n        if self._na_value is libmissing.NA:\n            from pandas.core.arrays.string_arrow import ArrowStringArray\n            return ArrowStringArray(array)\n        else:\n            from pandas.core.arrays.string_arrow import ArrowStringArrayNumpySemantics\n            return ArrowStringArrayNumpySemantics(array)\n    else:\n        import pyarrow\n        if isinstance(array, pyarrow.Array):\n            chunks = [array]\n        else:\n            chunks = array.chunks\n        results = []\n        for arr in chunks:\n            arr = arr.to_numpy(zero_copy_only=False)\n            arr = ensure_string_array(arr, na_value=self.na_value)\n            results.append(arr)\n    if len(chunks) == 0:\n        arr = np.array([], dtype=object)\n    else:\n        arr = np.concatenate(results)\n    new_string_array = StringArray.__new__(StringArray)\n    NDArrayBacked.__init__(new_string_array, arr, self)\n    return new_string_array",
    "docstring": "Construct StringArray from pyarrow Array/ChunkedArray.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\string_.py",
    "ast_data": "FunctionDef name:__from_arrow__ arg:self arg:array arguments arg arg If Compare If Compare Return return:yes Call Return return:yes Call If Call Assign Assign Assign For Assign Call Assign Call Call If Compare Call Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_markerfacecolor",
    "source_code": "def get_markerfacecolor(self):\n    return self._get_markerfacecolor(alt=False)",
    "docstring": "Return the marker face color. See also .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:get_markerfacecolor arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "draw_convex_polygon",
    "source_code": "def draw_convex_polygon(images: Tensor, polygons: Union[Tensor, List[Tensor]], colors: Tensor) -> Tensor:\n    KORNIA_CHECK_SHAPE(images, ['B', 'C', 'H', 'W'])\n    b_i, c_i, h_i, w_i, device = (*images.shape, images.device)\n    if isinstance(polygons, List):\n        polygons = _batch_polygons(polygons)\n    b_p, _, xy, device_p, dtype_p = (*polygons.shape, polygons.device, polygons.dtype)\n    if len(colors.shape) == 1:\n        colors = colors.expand(b_i, c_i)\n    b_c, _, device_c = (*colors.shape, colors.device)\n    KORNIA_CHECK(xy == 2, 'Polygon vertices must be xy, i.e. 2-dimensional')\n    KORNIA_CHECK(b_i == b_p == b_c, 'Image, polygon, and color must have same batch dimension')\n    KORNIA_CHECK(device == device_p == device_c, 'Image, polygon, and color must have same device')\n    x_left, x_right = _get_convex_edges(polygons, h_i, w_i)\n    ws = torch.arange(w_i, device=device, dtype=dtype_p)[None, None, :]\n    fill_region = (ws >= x_left[..., :, None]) & (ws <= x_right[..., :, None])\n    images.mul_(~fill_region[:, None]).add_(fill_region[:, None] * colors[..., None, None])\n    return images",
    "docstring": "Draws convex polygons on a batch of image tensors. Args: images: is tensor of BxCxHxW. polygons: represents polygons as points, either BxNx2 or List of variable length polygons. N is the number of points. 2 is (x, y). colors: a B x 3 tensor or 3 tensor with color to fill in. Returns: This operation modifies image inplace but also returns the drawn tensor for convenience with same shape the of the input BxCxHxW. Note: This function assumes a coordinate system (0, h - 1), (0, w - 1) in the image, with (0, 0) being the center of the top-left pixel and (w - 1, h - 1) being the center of the bottom-right coordinate. Example: >>> img = torch.rand(1, 3, 12, 16) >>> poly = torch.tensor([[[4, 4], [12, 4], [12, 8], [4, 8]]]) >>> color = torch.tensor([[0.5, 0.5, 0.5]]) >>> out = draw_convex_polygon(img, poly, color)",
    "type": "function",
    "file_path": "kornia\\kornia\\utils\\draw.py",
    "ast_data": "FunctionDef name:draw_convex_polygon arg:images arg:polygons arg:colors arguments arg arg arg Call Assign If Call Assign Call Assign If Compare Call Assign Call Assign Call Compare Call Compare Call Compare Assign Call Assign Call Assign Compare Compare Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_fontconfig_pattern",
    "source_code": "def get_fontconfig_pattern(self):\n    return generate_fontconfig_pattern(self)",
    "docstring": "Get a fontconfig_ pattern_ suitable for looking up the font as specified with fontconfig's `` utility. This support does not depend on fontconfig; we are merely borrowing its pattern syntax for use here.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\font_manager.py",
    "ast_data": "FunctionDef name:get_fontconfig_pattern arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "all_v2_summary_ops",
    "source_code": "@tf_export(v1=['summary.all_v2_summary_ops'])\ndef all_v2_summary_ops():\n    if context.executing_eagerly():\n        return None\n    return ops.get_collection(ops.GraphKeys._SUMMARY_COLLECTION)",
    "docstring": "Returns all V2-style summary ops defined in the current default graph. This includes ops from TF 2.0 tf.summary and TF 1.x tf.contrib.summary (except for and ), but does *not* include TF 1.x tf.summary ops. Returns: List of summary ops, or None if called under eager execution.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "FunctionDef name:all_v2_summary_ops arguments If Call Return return:no Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_decrypt",
    "source_code": "@staticmethod\ndef _decrypt(ciphertext, key, ndiscard=4):\n    key = _api.check_getitem({'eexec': 55665, 'charstring': 4330}, key=key)\n    plaintext = []\n    for byte in ciphertext:\n        plaintext.append(byte ^ key >> 8)\n        key = (key + byte) * 52845 + 22719 & 65535\n    return bytes(plaintext[ndiscard:])",
    "docstring": "Decrypt ciphertext using the Type-1 font algorithm. The algorithm is described in Adobe's \"Adobe Type 1 Font Format\". The key argument can be an integer, or one of the strings 'eexec' and 'charstring', which map to the key specified for the corresponding part of Type-1 fonts. The ndiscard argument should be an integer, usually 4. That number of bytes is discarded from the beginning of plaintext.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_type1font.py",
    "ast_data": "FunctionDef name:_decrypt arg:ciphertext arg:key arg:ndiscard arguments arg arg arg Assign Call Assign For Call Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "indexing_dtype_strength_reduction",
    "source_code": "def indexing_dtype_strength_reduction(loop_body: LoopBody) -> None:\n    bv = loop_body.bounds()\n    int64_dtype_nodes = [node for node in loop_body.get_nodes() if node.target == 'to_dtype' and node.args[2] == torch.int64 and (node not in bv.unbounded_vars)]\n    if not int64_dtype_nodes:\n        return\n    bounds = bv.get_bounds()\n    for node in int64_dtype_nodes:\n        try_to_reduce_precision(node, bounds, loop_body.indirect_vars, loop_body.indexing_exprs, bv.replacement_vals)",
    "docstring": "Performs Value Range Analysis on LoopBody's fx graph to reduce precision of intermediaries from int64 to int32",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\optimize_indexing.py",
    "ast_data": "FunctionDef name:indexing_dtype_strength_reduction arg:loop_body arguments arg Assign Call Assign Call BoolOp Compare Compare Compare If Return return:no Assign Call For Call"
  },
  {
    "library": "django",
    "name": "delete",
    "source_code": "def delete(self):\n    self._not_support_combined_queries('delete')\n    if self.query.is_sliced:\n        raise TypeError(\"Cannot use 'limit' or 'offset' with delete().\")\n    if self.query.distinct_fields:\n        raise TypeError('Cannot call delete() after .distinct(*fields).')\n    if self._fields is not None:\n        raise TypeError('Cannot call delete() after .values() or .values_list()')\n    del_query = self._chain()\n    del_query._for_write = True\n    del_query.query.select_for_update = False\n    del_query.query.select_related = False\n    del_query.query.clear_ordering(force=True)\n    collector = Collector(using=del_query.db, origin=self)\n    collector.collect(del_query)\n    num_deleted, num_deleted_per_model = collector.delete()\n    self._result_cache = None\n    return (num_deleted, num_deleted_per_model)",
    "docstring": "Delete the records in the current QuerySet.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:delete arg:self arguments arg Call If Raise Call If Raise Call If Compare Raise Call Assign Call Assign Assign Assign Call Assign Call Call Assign Call Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "c2rn",
    "source_code": "def c2rn(forward, x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *, plan=None):\n    if plan is not None:\n        raise NotImplementedError('Passing a precomputed plan is not yet supported by scipy.fft functions')\n    tmp = _asfarray(x)\n    if np.isrealobj(tmp):\n        tmp = tmp + 0j\n    noshape = s is None\n    shape, axes = _init_nd_shape_and_axes(tmp, s, axes)\n    if len(axes) == 0:\n        raise ValueError('at least 1 axis must be transformed')\n    shape = list(shape)\n    if noshape:\n        shape[-1] = (x.shape[axes[-1]] - 1) * 2\n    norm = _normalization(norm, forward)\n    workers = _workers(workers)\n    lastsize = shape[-1]\n    shape[-1] = shape[-1] // 2 + 1\n    tmp, _ = tuple(_fix_shape(tmp, shape, axes))\n    return pfft.c2r(tmp, axes, lastsize, forward, norm, None, workers)",
    "docstring": "Multidimensional inverse discrete fourier transform with real output",
    "type": "function",
    "file_path": "scipy\\scipy\\fft\\_pocketfft\\basic.py",
    "ast_data": "FunctionDef name:c2rn arg:forward arg:x arg:s arg:axes arg:norm arg:overwrite_x arg:workers arguments arg arg arg arg arg arg arg arg If Compare Raise Call Assign Call If Call Assign Assign Compare Assign Call If Compare Call Raise Call Assign Call If Assign Assign Call Assign Call Assign Assign Assign Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "phase_shift",
    "source_code": "@phase_shift.setter\ndef phase_shift(self, v: int | None):\n    if v is None:\n        self._phase_shift = v\n        return\n    if not isinstance(v, int | np.integer):\n        raise ValueError(f'phase_shift={v} has the unit samples. Hence ' + 'it needs to be an int or it may be None!')\n    if not -self.mfft < v < self.mfft:\n        raise ValueError('-mfft < phase_shift < mfft does not hold ' + f'for mfft={self.mfft}, phase_shift={v}!')\n    self._phase_shift = v",
    "docstring": "The absolute value of the phase shift needs to be less than mfft samples. See the getter method for more details.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_short_time_fft.py",
    "ast_data": "FunctionDef name:phase_shift arg:self arg:v arguments arg arg If Compare Assign Return return:no If Call Raise Call If Compare Raise Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "get_path",
    "source_code": "def get_path(self):\n    return self._path",
    "docstring": "Return the path of the ellipse.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_path arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "dtype",
    "source_code": "@property\ndef dtype(self):\n    return self._dtype",
    "docstring": "dtype of s transformable by this distribution.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bijector_impl.py",
    "ast_data": "FunctionDef name:dtype arg:self arguments arg Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "__copy__",
    "source_code": "@abc.abstractmethod\ndef __copy__(self) -> RSAPublicKey:\n    pass",
    "docstring": "Returns a copy.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\rsa.py",
    "ast_data": "FunctionDef name:__copy__ arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "decorated",
    "source_code": "def decorated(*args, **kwds):\n    parameter_positions = _get_arg_spec(f, params, args)\n    assert not kwds, \"The gradient function can't take keyword arguments.\"\n    this_tape = tape.push_new_tape(persistent=persistent)\n    try:\n        sources = []\n        args = [ops.convert_to_tensor(arg) if i in parameter_positions else arg for i, arg in enumerate(args)]\n        args = _ensure_unique_tensor_objects(parameter_positions, args)\n        for i in parameter_positions:\n            if getattr(args[i], 'is_packed', False):\n                raise ValueError('GradientTape.gradient is not supported on packed EagerTensorsyet.')\n            sources.append(args[i])\n            tape.watch(this_tape, args[i])\n        result = f(*args)\n        if result is None:\n            raise ValueError('Cannot differentiate a function that returns None; did you forget to return a value from {}?'.format(f.__name__))\n        flat_result = nest.flatten(result)\n        flat_result = [gen_array_ops.identity(x) for x in flat_result]\n        result = nest.pack_sequence_as(result, flat_result)\n    finally:\n        tape.pop_tape(this_tape)\n\n    def vjp(dy=None):\n        if dy is not None:\n            dy = [ops.convert_to_tensor(x) for x in nest.flatten(dy)]\n        return imperative_grad.imperative_grad(this_tape, nest.flatten(result), sources, output_gradients=dy)\n    return (result, vjp)",
    "docstring": "Computes the value and gradient of the decorated function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\backprop.py",
    "ast_data": "FunctionDef name:decorated arguments arg arg Assign Call Assign Call Try Assign Assign Compare Call Call Assign Call For If Call Raise Call Call Call Assign Call If Compare Raise Call Call Assign Call Assign Call Assign Call Call FunctionDef name:vjp arg:dy arguments arg If Compare Assign Call Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "ClassSequentialOps",
    "source_code": "class ClassSequentialOps(SequentialOpsInterface[Tensor]):\n\n    @classmethod\n    def transform(cls, input: Tensor, module: Module, param: ParamItem, extra_args: Optional[Dict[str, Any]]=None) -> Tensor:\n        if isinstance(module, K.MixAugmentationBaseV2):\n            raise NotImplementedError('The support for class labels for mix augmentations that change the class label is not yet supported.')\n        return input\n\n    @classmethod\n    def inverse(cls, input: Tensor, module: Module, param: ParamItem, extra_args: Optional[Dict[str, Any]]=None) -> Tensor:\n        return input",
    "docstring": "Apply and inverse transformations for class labels if needed.",
    "type": "class",
    "file_path": "kornia\\kornia\\augmentation\\container\\ops.py",
    "ast_data": "ClassDef name:ClassSequentialOps FunctionDef name:transform arg:cls arg:input arg:module arg:param arg:extra_args arguments arg arg arg arg arg If Call Raise Call Return return:yes FunctionDef name:inverse arg:cls arg:input arg:module arg:param arg:extra_args arguments arg arg arg arg arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_alignment",
    "source_code": "def set_alignment(self, align):\n    _api.check_in_list(self._locstrings, align=align)\n    if align == self._locstrings[1]:\n        self._locstrings = self._locstrings[::-1]\n    self.spines[self._locstrings[0]].set_visible(True)\n    self.spines[self._locstrings[1]].set_visible(False)\n    self._axis.set_ticks_position(align)\n    self._axis.set_label_position(align)",
    "docstring": "Set if axes spine and labels are drawn at top or bottom (or left/right) of the Axes. Parameters ---------- align : {'top', 'bottom', 'left', 'right'} Either 'top' or 'bottom' for orientation='x' or 'left' or 'right' for orientation='y' axis.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_secondary_axes.py",
    "ast_data": "FunctionDef name:set_alignment arg:self arg:align arguments arg arg Call If Compare Assign Call Call Call Call"
  },
  {
    "library": "django",
    "name": "linear_units",
    "source_code": "@property\ndef linear_units(self):\n    return self.srs.linear_units",
    "docstring": "Return the linear units.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\base\\models.py",
    "ast_data": "FunctionDef name:linear_units arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "get_kernel",
    "source_code": "def get_kernel(self) -> Tensor:\n    raise NotImplementedError",
    "docstring": "Get kernel for image morphology convolution.",
    "type": "method",
    "file_path": "kornia\\kornia\\losses\\hausdorff.py",
    "ast_data": "FunctionDef name:get_kernel arg:self arguments arg Raise"
  },
  {
    "library": "pytorch",
    "name": "numpy_operator_wrapper",
    "source_code": "class numpy_operator_wrapper:\n\n    def __init__(self, op: Callable[..., Any]):\n        self.op = op\n        self.__name__ = f'wrapped_{op.__name__}'\n\n    def __repr__(self) -> str:\n        return f'<Wrapped operator <original {self.__name__}>>'\n\n    def __call__(self, *args, **kwargs):\n        assert not kwargs\n        args = (tnp.ndarray(arg) if isinstance(arg, torch.Tensor) else arg for arg in args)\n        out = self.op(*args)\n        return numpy_to_tensor(out)",
    "docstring": "Implements dunder methods for tnp.ndarray via functions from the operator library",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\utils.py",
    "ast_data": "ClassDef name:numpy_operator_wrapper FunctionDef name:__init__ arg:self arg:op arguments arg arg Assign Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:__call__ arg:self arguments arg arg arg Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_meta_graph_def",
    "source_code": "def get_meta_graph_def(saved_model_dir, tag_set):\n    with session.Session(graph=ops.Graph()) as sess:\n        return loader.load(sess, tag_set, saved_model_dir)",
    "docstring": "Validate saved_model and extract MetaGraphDef. Args: saved_model_dir: saved_model path to convert. tag_set: Set of tag(s) of the MetaGraphDef to load. Returns: The meta_graph_def used for tflite conversion. Raises: ValueError: No valid MetaGraphDef for given tag_set.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\convert_saved_model.py",
    "ast_data": "FunctionDef name:get_meta_graph_def arg:saved_model_dir arg:tag_set arguments arg arg With Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_create_directory_from_file_list",
    "source_code": "def _create_directory_from_file_list(filename: str, file_list: list[str], include: 'GlobPattern'='**', exclude: 'GlobPattern'=()) -> Directory:\n    glob_pattern = GlobGroup(include, exclude=exclude, separator='/')\n    top_dir = Directory(filename, True)\n    for file in file_list:\n        if glob_pattern.matches(file):\n            top_dir._add_file(file)\n    return top_dir",
    "docstring": "Return a :class: file structure representation created from a list of files. Args: filename (str): The name given to the top-level directory that will be the relative root for all file paths found in the file_list. file_list (List[str]): List of files to add to the top-level directory. include (Union[List[str], str]): An optional pattern that limits what is included from the file_list to files whose name matches the pattern. exclude (Union[List[str], str]): An optional pattern that excludes files whose name match the pattern. Returns: :class:: a :class: file structure representation created from a list of files.",
    "type": "function",
    "file_path": "pytorch\\torch\\package\\file_structure_representation.py",
    "ast_data": "FunctionDef name:_create_directory_from_file_list arg:filename arg:file_list arg:include arg:exclude arguments arg arg arg arg Assign Call Assign Call For If Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "ConvolutionOrthogonal",
    "source_code": "class ConvolutionOrthogonal(Initializer):\n\n    def __init__(self, gain=1.0, seed=None, dtype=dtypes.float32):\n        self.gain = gain\n        self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))\n        self.seed = seed\n\n    def __call__(self, shape, dtype=None, partition_info=None):\n        raise NotImplementedError\n\n    def get_config(self):\n        return {'gain': self.gain, 'seed': self.seed, 'dtype': self.dtype.name}\n\n    def _orthogonal_matrix(self, n):\n        a = random_ops.random_normal([n, n], dtype=self.dtype, seed=self.seed)\n        if self.seed:\n            self.seed += 1\n        q, r = gen_linalg_ops.qr(a)\n        d = array_ops.diag_part(r)\n        q *= math_ops.sign(d)\n        return q\n\n    def _symmetric_projection(self, n):\n        q = self._orthogonal_matrix(n)\n        mask = math_ops.cast(random_ops.random_normal([n], seed=self.seed) > 0, self.dtype)\n        if self.seed:\n            self.seed += 1\n        c = math_ops.multiply(q, mask)\n        return math_ops.matmul(c, array_ops.matrix_transpose(c))",
    "docstring": "Initializer that generates orthogonal kernel for ConvNets. Base class used to construct 1D, 2D and 3D orthogonal kernels for convolution. Args: gain: multiplicative factor to apply to the orthogonal matrix. Default is 1. The 2-norm of an input is multiplied by a factor of after applying this convolution. seed: A Python integer. Used to create random seeds. See for behavior. dtype: Default data type, used if no argument is provided when calling the initializer. Only floating point types are supported. References: [Xiao et al., 2018]( ([pdf](",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops.py",
    "ast_data": "ClassDef name:ConvolutionOrthogonal FunctionDef name:__init__ arg:self arg:gain arg:seed arg:dtype arguments arg arg arg arg Assign Assign Call Call Assign FunctionDef name:__call__ arg:self arg:shape arg:dtype arg:partition_info arguments arg arg arg arg Raise FunctionDef name:get_config arg:self arguments arg Return return:yes FunctionDef name:_orthogonal_matrix arg:self arg:n arguments arg arg Assign Call If Assign Call Assign Call Call Return return:yes FunctionDef name:_symmetric_projection arg:self arg:n arguments arg arg Assign Call Assign Call Compare Call If Assign Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_dtype_can_hold_range",
    "source_code": "def _dtype_can_hold_range(rng: range, dtype: np.dtype) -> bool:\n    if not len(rng):\n        return True\n    return np_can_cast_scalar(rng.start, dtype) and np_can_cast_scalar(rng.stop, dtype)",
    "docstring": "_maybe_infer_dtype_type infers to int64 (and float64 for very large endpoints), but in many cases a range can be held by a smaller integer dtype. Check if this is one of those cases.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\cast.py",
    "ast_data": "FunctionDef name:_dtype_can_hold_range arg:rng arg:dtype arguments arg arg If Call Return return:yes Return return:yes BoolOp Call Call"
  },
  {
    "library": "pytorch",
    "name": "pin_memory",
    "source_code": "def pin_memory(self, device: Union[str, torch.device]='cuda'):\n    if self.device.type != 'cpu':\n        raise TypeError(f\"cannot pin '{self.type()}' only CPU memory can be pinned\")\n    pinned_tensor = torch.tensor([], dtype=torch.uint8, device=self.device).set_(cast(Storage, self)).pin_memory(device)\n    return pinned_tensor.untyped_storage()",
    "docstring": "Copy the CPU storage to pinned memory, if it's not already pinned. Args: device (str or torch.device): The device to pin memory on (default: ``). This argument is discouraged and subject to deprecated. Returns: A pinned CPU storage.",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:pin_memory arg:self arg:device arguments arg arg If Compare Raise Call Call Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "CatchWarningsCtxManagerVariable",
    "source_code": "class CatchWarningsCtxManagerVariable(ContextWrappingVariable):\n\n    @staticmethod\n    def create(tx: 'InstructionTranslator', catch_warnings_args):\n        return CatchWarningsCtxManagerVariable(catch_warnings_args=catch_warnings_args, target_values=None, initial_values=None)\n\n    def __init__(self, catch_warnings_args, **kwargs) -> None:\n        assert isinstance(catch_warnings_args, dict), catch_warnings_args\n        super().__init__(**kwargs)\n        self.catch_warnings_args = catch_warnings_args\n\n    def enter(self, tx):\n        kwargs = {k: v.as_python_constant() for k, v in self.catch_warnings_args.items()}\n        ctx_val = warnings.catch_warnings(**kwargs)\n        self.set_cleanup_hook(tx, lambda: ctx_val.__exit__(None, None, None))\n        return variables.ConstantVariable.create(ctx_val.__enter__())\n\n    def reconstruct(self, cg):\n        cg.add_push_null(lambda: cg.load_import_from('warnings', 'catch_warnings'))\n        cg.foreach(self.catch_warnings_args.values())\n        keys = tuple(self.catch_warnings_args.keys())\n        cg.extend_output(cg.create_call_function_kw(len(keys), keys, False))",
    "docstring": "Delay a call to warnings.catch_warnings",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\ctx_manager.py",
    "ast_data": "ClassDef name:CatchWarningsCtxManagerVariable FunctionDef name:create arg:tx arg:catch_warnings_args arguments arg arg Return return:yes Call FunctionDef name:__init__ arg:self arg:catch_warnings_args arguments arg arg arg Call Call Call Assign FunctionDef name:enter arg:self arg:tx arguments arg arg Assign Call Call Assign Call Call arguments Call Return return:yes Call Call FunctionDef name:reconstruct arg:self arg:cg arguments arg arg Call arguments Call Call Call Assign Call Call Call Call Call"
  },
  {
    "library": "numpy",
    "name": "write_file",
    "source_code": "def write_file(filename, data):\n    if os.path.exists(filename):\n        with open(filename) as f:\n            if data == f.read():\n                return\n    with open(filename, 'w') as fid:\n        fid.write(data)",
    "docstring": "Write data to filename Only write changed data to avoid updating timestamps unnecessarily",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\code_generators\\genapi.py",
    "ast_data": "FunctionDef name:write_file arg:filename arg:data arguments arg arg If Call With Call If Compare Call Return return:no With Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_check_precisions_full",
    "source_code": "def _check_precisions_full(precisions, covariance_type):\n    for prec in precisions:\n        _check_precision_matrix(prec, covariance_type)",
    "docstring": "Check the precision matrices are symmetric and positive-definite.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\mixture\\_gaussian_mixture.py",
    "ast_data": "FunctionDef name:_check_precisions_full arg:precisions arg:covariance_type arguments arg arg For Call"
  },
  {
    "library": "scipy",
    "name": "_as_inexact",
    "source_code": "def _as_inexact(x):\n    x = asarray(x)\n    if not np.issubdtype(x.dtype, np.inexact):\n        return asarray(x, dtype=np.float64)\n    return x",
    "docstring": "Return as an array, of either floats or complex floats",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_nonlin.py",
    "ast_data": "FunctionDef name:_as_inexact arg:x arguments arg Assign Call If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_unscaled_gradients",
    "source_code": "def get_unscaled_gradients(self, grads):\n    loss_scale_reciprocal = 1.0 / self.loss_scale\n    return [_multiply_gradient(g, loss_scale_reciprocal) if g is not None else None for g in grads]",
    "docstring": "Unscales the gradients by the loss scale. This method is only needed if you compute gradients manually, e.g. with . In that case, call this method to unscale the gradients after computing them with . If you use or , loss scaling is automatically applied and this method is unneeded. If this method is called, should also be called. See the doc for an example. Args: grads: A list of tensors, each which will be divided by the loss scale. Can have None values, which are ignored. Returns: A new list the same size as , where every non-None value in is divided by .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\loss_scale_optimizer.py",
    "ast_data": "FunctionDef name:get_unscaled_gradients arg:self arg:grads arguments arg arg Assign Return return:yes Compare Call"
  },
  {
    "library": "cryptography",
    "name": "__eq__",
    "source_code": "@abc.abstractmethod\ndef __eq__(self, other: object) -> bool:\n    pass",
    "docstring": "Checks equality.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\x25519.py",
    "ast_data": "FunctionDef name:__eq__ arg:self arg:other arguments arg arg"
  },
  {
    "library": "tensorflow",
    "name": "_vectorize_then_blockify",
    "source_code": "def _vectorize_then_blockify(self, matrix):\n    vec = distribution_util.rotate_transpose(matrix, shift=1)\n    if vec.shape.is_fully_defined() and self.block_shape.is_fully_defined():\n        vec_leading_shape = vec.shape[:-1]\n        final_shape = vec_leading_shape.concatenate(self.block_shape)\n    else:\n        vec_leading_shape = array_ops.shape(vec)[:-1]\n        final_shape = array_ops.concat((vec_leading_shape, self.block_shape_tensor()), 0)\n    return array_ops.reshape(vec, final_shape)",
    "docstring": "Shape batch matrix to batch vector, then blockify trailing dimensions.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_circulant.py",
    "ast_data": "FunctionDef name:_vectorize_then_blockify arg:self arg:matrix arguments arg arg Assign Call If BoolOp Call Call Assign Assign Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "is_callable",
    "source_code": "def is_callable(obj: object) -> bool:\n    if not callable(obj):\n        raise ValueError('Value must be a callable')\n    return True",
    "docstring": "Parameters ---------- - the object to be checked Returns ------- validator - returns True if object is callable raises ValueError otherwise.",
    "type": "function",
    "file_path": "pandas\\pandas\\_config\\config.py",
    "ast_data": "FunctionDef name:is_callable arg:obj arguments arg If Call Raise Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "check_increasing",
    "source_code": "@validate_params({'x': ['array-like'], 'y': ['array-like']}, prefer_skip_nested_validation=True)\ndef check_increasing(x, y):\n    rho, _ = spearmanr(x, y)\n    increasing_bool = rho >= 0\n    if rho not in [-1.0, 1.0] and len(x) > 3:\n        F = 0.5 * math.log((1.0 + rho) / (1.0 - rho))\n        F_se = 1 / math.sqrt(len(x) - 3)\n        rho_0 = math.tanh(F - 1.96 * F_se)\n        rho_1 = math.tanh(F + 1.96 * F_se)\n        if np.sign(rho_0) != np.sign(rho_1):\n            warnings.warn('Confidence interval of the Spearman correlation coefficient spans zero. Determination of ``increasing`` may be suspect.')\n    return increasing_bool",
    "docstring": "Determine whether y is monotonically correlated with x. y is found increasing or decreasing with respect to x based on a Spearman correlation test. Parameters ---------- x : array-like of shape (n_samples,) Training data. y : array-like of shape (n_samples,) Training target. Returns ------- increasing_bool : boolean Whether the relationship is increasing or decreasing. Notes ----- The Spearman correlation coefficient is estimated from the data, and the sign of the resulting estimate is used as the result. In the event that the 95% confidence interval based on Fisher transform spans zero, a warning is raised. References ---------- Fisher transformation. Wikipedia. Examples -------- >>> from sklearn.isotonic import check_increasing >>> x, y = [1, 2, 3, 4, 5], [2, 4, 6, 8, 10] >>> check_increasing(x, y) np.True_ >>> y = [10, 8, 6, 4, 2] >>> check_increasing(x, y) np.False_",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\isotonic.py",
    "ast_data": "FunctionDef name:check_increasing arg:x arg:y arguments arg arg Assign Call Assign Compare If BoolOp Compare Compare Call Assign Call Assign Call Call Assign Call Assign Call If Compare Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_build_extension_type_constructor",
    "source_code": "def _build_extension_type_constructor(cls):\n    fields = cls._tf_extension_type_fields()\n    got_default = False\n    keyword_only_start = len(fields)\n    for i in range(len(fields)):\n        if got_default:\n            if fields[i].default is _NO_DEFAULT:\n                keyword_only_start = i\n                break\n        elif fields[i].default is not _NO_DEFAULT:\n            got_default = True\n    params = []\n    for i, field in enumerate(fields):\n        if i < keyword_only_start:\n            kind = tf_inspect.Parameter.POSITIONAL_OR_KEYWORD\n        else:\n            kind = tf_inspect.Parameter.KEYWORD_ONLY\n        if field.default is _NO_DEFAULT:\n            default = tf_inspect.Parameter.empty\n        else:\n            default = field.default\n        params.append(tf_inspect.Parameter(field.name, kind, default=default, annotation=field.value_type))\n    signature = tf_inspect.Signature(params, return_annotation=cls.__name__)\n\n    def __init__(self, *args, **kwargs):\n        bound_args = signature.bind(*args, **kwargs)\n        bound_args.apply_defaults()\n        self.__dict__.update(bound_args.arguments)\n        self._tf_extension_type_convert_fields()\n        self.__validate__()\n    __init__.__signature__ = tf_inspect.Signature([tf_inspect.Parameter('self', tf_inspect.Parameter.POSITIONAL_OR_KEYWORD)] + params, return_annotation=cls)\n    cls.__init__ = __init__",
    "docstring": "Builds a constructor for tf.ExtensionType subclass .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type.py",
    "ast_data": "FunctionDef name:_build_extension_type_constructor arg:cls arguments arg Assign Call Assign Assign Call For Call Call If If Compare Assign If Compare Assign Assign For Call If Compare Assign Assign If Compare Assign Assign Call Call Assign Call FunctionDef name:__init__ arg:self arguments arg arg arg Assign Call Call Call Call Call Assign Call Call Assign"
  },
  {
    "library": "scipy",
    "name": "_penalized_nnlf",
    "source_code": "def _penalized_nnlf(self, theta, x):\n    loc, scale, args = self._unpack_loc_scale(theta)\n    if not self._argcheck(*args) or scale <= 0:\n        return inf\n    x = asarray((x - loc) / scale)\n    n_log_scale = len(x) * log(scale)\n    return self._nlff_and_penalty(x, args, self._logpxf) + n_log_scale",
    "docstring": "Penalized negative loglikelihood function. i.e., - sum (log pdf(x, theta), axis=0) + penalty where theta are the parameters (including loc and scale)",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:_penalized_nnlf arg:self arg:theta arg:x arguments arg arg arg Assign Call If BoolOp Call Compare Return return:yes Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "__delitem__",
    "source_code": "def __delitem__(self, key: _KT) -> None:\n    for mapping in self.maps:\n        if key in mapping:\n            del mapping[key]\n            return\n    raise KeyError(key)",
    "docstring": "Raises ------ KeyError If doesn't exist.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\computation\\scope.py",
    "ast_data": "FunctionDef name:__delitem__ arg:self arg:key arguments arg arg For If Compare Return return:no Raise Call"
  },
  {
    "library": "numpy",
    "name": "soften_mask",
    "source_code": "def soften_mask(self):\n    self._hardmask = False",
    "docstring": "Forces the mask to soft",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\mrecords.py",
    "ast_data": "FunctionDef name:soften_mask arg:self arguments arg Assign"
  },
  {
    "library": "pytorch",
    "name": "get_compile_threads",
    "source_code": "def get_compile_threads() -> int:\n    if config.compile_threads is None:\n        config.compile_threads = config.decide_compile_threads()\n    return config.compile_threads",
    "docstring": "Temporary for internal rollout. Assign config.compile_threads lazily and return it. TODO: remove after rollout.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\async_compile.py",
    "ast_data": "FunctionDef name:get_compile_threads arguments If Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "TensorMapper",
    "source_code": "class TensorMapper:\n\n    def __init__(self, subgraph_data):\n        self.data = subgraph_data\n\n    def __call__(self, x):\n        html = ''\n        if x is None:\n            return html\n        html += \"<span class='tooltip'><span class='tooltipcontent'>\"\n        for i in x:\n            tensor = self.data['tensors'][i]\n            html += str(i) + ' '\n            html += NameListToString(tensor['name']) + ' '\n            html += TensorTypeToName(tensor['type']) + ' '\n            html += repr(tensor['shape']) if 'shape' in tensor else '[]'\n            html += (repr(tensor['shape_signature']) if 'shape_signature' in tensor else '[]') + '<br>'\n        html += '</span>'\n        html += repr(x)\n        html += '</span>'\n        return html",
    "docstring": "Maps a list of tensor indices to a tooltip hoverable indicator of more.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\lite\\tools\\visualize.py",
    "ast_data": "ClassDef name:TensorMapper FunctionDef name:__init__ arg:self arg:subgraph_data arguments arg arg Assign FunctionDef name:__call__ arg:self arg:x arguments arg arg Assign If Compare Return return:yes For Assign Call Call Call Compare Call Compare Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "stateful_ops",
    "source_code": "@property\ndef stateful_ops(self):\n    self._create_definition_if_needed()\n    return self._stateful_ops",
    "docstring": "Returns the list of stateful ops in function definition. Returns: A list of (op.name, op.type) pairs.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\function.py",
    "ast_data": "FunctionDef name:stateful_ops arg:self arguments arg Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "PointSize",
    "source_code": "class PointSize(IntervalProperty):\n    _default_range = (2, 8)\n\n    def _forward(self, values):\n        return np.square(values)\n\n    def _inverse(self, values):\n        return np.sqrt(values)",
    "docstring": "Size (diameter) of a point mark, in points, with scaling by area.",
    "type": "class",
    "file_path": "seaborn\\seaborn\\_core\\properties.py",
    "ast_data": "ClassDef name:PointSize Assign FunctionDef name:_forward arg:self arg:values arguments arg arg Return return:yes Call FunctionDef name:_inverse arg:self arg:values arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, path_effects, renderer):\n    self._path_effects = path_effects\n    self._renderer = renderer",
    "docstring": "Parameters ---------- path_effects : iterable of :class: The path effects which this renderer represents. renderer : subclass",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patheffects.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:path_effects arg:renderer arguments arg arg arg Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_add_while_inputs",
    "source_code": "def _add_while_inputs(self, tensors) -> None:\n    with self.graph._c_graph.get() as c_graph:\n        for tensor in tensors:\n            if not isinstance(tensor, tensor_lib.Tensor):\n                raise TypeError('tensor must be a Tensor: %s' % tensor)\n            _assert_same_graph(self, tensor)\n            self._inputs_val = None\n            pywrap_tf_session.AddWhileInputHack(c_graph, tensor._as_tf_output(), self._c_op)",
    "docstring": "See AddWhileInputHack in python_api.h. NOTE: This is for TF internal use only. Please don't use it. Args: tensors: list of Tensors Raises: TypeError: if tensor is not a Tensor, or if input tensor type is not convertible to dtype. ValueError: if the Tensor is from a different graph.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_add_while_inputs arg:self arg:tensors arguments arg arg With Call For If Call Raise Call Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "all_max",
    "source_code": "def all_max(tensors):\n    return _apply_all_reduce('max', tensors)",
    "docstring": "Returns a list of tensors with the all-reduce max across . The computation is done with an all-reduce operation, so if only some of the returned tensors are evaluated then the computation will hang. Args: tensors: The input tensors across which to reduce; must be assigned to GPU devices. Returns: List of tensors, each with the maximum of the input tensors, where tensor i has the same device as .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nccl_ops.py",
    "ast_data": "FunctionDef name:all_max arg:tensors arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "NoValue",
    "source_code": "class NoValue(enum.Enum):\n\n    def of(self, node, default=None):\n        return getanno(node, self, default=default)\n\n    def add_to(self, node, value):\n        setanno(node, self, value)\n\n    def exists(self, node):\n        return hasanno(node, self)\n\n    def __repr__(self):\n        return str(self.name)",
    "docstring": "Base class for different types of AST annotations.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\anno.py",
    "ast_data": "ClassDef name:NoValue FunctionDef name:of arg:self arg:node arg:default arguments arg arg arg Return return:yes Call FunctionDef name:add_to arg:self arg:node arg:value arguments arg arg arg Call FunctionDef name:exists arg:self arg:node arguments arg arg Return return:yes Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "get_flags",
    "source_code": "def get_flags(self):\n    return [] + self.pic_flags",
    "docstring": "List of flags common to all compiler types.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\fcompiler\\__init__.py",
    "ast_data": "FunctionDef name:get_flags arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "tensor_list_set_item",
    "source_code": "def tensor_list_set_item(input_handle, index, item, resize_if_index_out_of_bounds=False, name=None):\n    output_handle = gen_list_ops.tensor_list_set_item(input_handle=input_handle, index=index, item=item, name=name, resize_if_index_out_of_bounds=resize_if_index_out_of_bounds)\n    handle_data_util.copy_handle_data(input_handle, output_handle)\n    return output_handle",
    "docstring": "Sets at in input list.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\list_ops.py",
    "ast_data": "FunctionDef name:tensor_list_set_item arg:input_handle arg:index arg:item arg:resize_if_index_out_of_bounds arg:name arguments arg arg arg arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "get_params",
    "source_code": "def get_params(self, deep=True):\n    return self._get_params('steps', deep=deep)",
    "docstring": "Get parameters for this estimator. Returns the parameters given in the constructor as well as the estimators contained within the of the . Parameters ---------- deep : bool, default=True If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns ------- params : mapping of string to any Parameter names mapped to their values.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\pipeline.py",
    "ast_data": "FunctionDef name:get_params arg:self arg:deep arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "context_parallel",
    "source_code": "@contextlib.contextmanager\n@torch.no_grad()\ndef context_parallel(mesh: DeviceMesh, *, buffers: Optional[list[torch.Tensor]]=None, buffer_seq_dims: Optional[list[int]]=None, no_restore_buffers: Optional[set[torch.Tensor]]=None) -> Generator[None, None, None]:\n    buffers = [] if buffers is None else buffers\n    buffer_seq_dims = [] if buffer_seq_dims is None else buffer_seq_dims\n    no_restore_buffers = set() if no_restore_buffers is None else no_restore_buffers\n    if len(buffers) != len(buffer_seq_dims):\n        raise ValueError('`seq_dims` must have the same number of elements as `buffers`.')\n    for buffer in no_restore_buffers:\n        if not any((b is buffer for b in buffers)):\n            raise ValueError('`no_restore_buffers` must be a subset of `buffers`.')\n    original_buffers = [None if b in no_restore_buffers else b.clone() for b in buffers]\n    chunks = _context_parallel_buffers(mesh, buffers, buffer_seq_dims)\n    for buffer, chunk in zip(buffers, chunks):\n        chunk = chunk.clone()\n        buffer.resize_(chunk.shape)\n        buffer.copy_(chunk)\n    with _context_parallel(seq_dim=2, mesh=mesh):\n        yield\n    for buffer, original_buffer in zip(buffers, original_buffers):\n        if original_buffer is not None:\n            buffer.resize_(original_buffer.shape)\n            buffer.copy_(original_buffer)",
    "docstring": "`DeviceMeshtorch.distributed.tensor.experimental.context_parallel` is a prototype feature in PyTorch. The API is subject to change.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\experimental\\_attention.py",
    "ast_data": "FunctionDef name:context_parallel arg:mesh arguments arg arg arg arg Assign Compare Assign Compare Assign Compare Call If Compare Call Call Raise Call For If Call Compare Raise Call Assign Compare Call Assign Call For Call Assign Call Call Call With Call For Call If Compare Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_lr",
    "source_code": "@override\ndef get_lr(self) -> list[float]:\n    _warn_get_lr_called_within_step(self)\n    return [self.eta_min + (base_lr - self.eta_min) * (1 + math.cos(math.pi * self.T_cur / self.T_i)) / 2 for base_lr in self.base_lrs]",
    "docstring": "Compute the initial learning rate.",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\lr_scheduler.py",
    "ast_data": "FunctionDef name:get_lr arg:self arguments arg Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "advise",
    "source_code": "@tf_export(v1=['profiler.advise'])\ndef advise(graph=None, run_meta=None, options=_DEFAULT_ADVISE_OPTIONS):\n    if not graph and (not context.executing_eagerly()):\n        graph = ops.get_default_graph()\n    if options == _DEFAULT_ADVISE_OPTIONS:\n        options = ALL_ADVICE.copy()\n    op_log = tfprof_logger.merge_default_with_oplog(graph, None, run_meta, add_trace=True)\n    run_meta_str = run_meta.SerializeToString() if run_meta else b''\n    opts = _build_advisor_options(options)\n    ret = tfprof_output_pb2.AdviceProto()\n    ret.ParseFromString(print_mdl.PrintModelAnalysis(_graph_string(graph), run_meta_str, op_log.SerializeToString(), 'advise'.encode('utf-8'), opts.SerializeToString()))\n    return ret",
    "docstring": "Auto profile and advise. Builds profiles and automatically check anomalies of various aspects. For more details: Args: graph: tf.Graph. If None and eager execution is not enabled, use default graph. run_meta: optional tensorflow.RunMetadata proto. It is necessary to support run time information profiling, such as time and memory. options: see ALL_ADVICE example above. Default checks everything. Returns: Returns AdviceProto proto",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\model_analyzer.py",
    "ast_data": "FunctionDef name:advise arg:graph arg:run_meta arg:options arguments arg arg arg If BoolOp Call Assign Call If Compare Assign Call Assign Call Assign Call Assign Call Assign Call Call Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "capture_dependencies",
    "source_code": "@tf_contextlib.contextmanager\ndef capture_dependencies(template):\n    name_prefix = template.variable_scope.name\n\n    def _trackable_custom_creator(next_creator, name, initial_value, trackable_parent=None, **kwargs):\n\n        def _call_next_creator_renaming_initializer(initializer, **inner_kwargs):\n            inner_kwargs.pop('name')\n            return next_creator(initial_value=initializer, name=name, **inner_kwargs)\n        if name is not None and name.startswith(name_prefix):\n            scope_stripped_name = name[len(name_prefix) + 1:]\n            if not trackable_parent:\n                return template._add_variable_with_custom_getter(initializer=initial_value, name=scope_stripped_name, getter=_call_next_creator_renaming_initializer, overwrite=True, trackable_parent=(template, name_prefix), **kwargs)\n            else:\n                parent_object, parent_name_prefix = trackable_parent\n                template._track_trackable(parent_object, name=parent_name_prefix[len(name_prefix) + 1:], overwrite=True)\n        return next_creator(name=name, initial_value=initial_value, trackable_parent=(template, name_prefix), **kwargs)\n    with variable_scope.variable_creator_scope(_trackable_custom_creator):\n        yield",
    "docstring": "Capture variables created within this scope as dependencies. Requires that is active. This scope is intended as a compatibility measure, allowing a trackable object to add dependencies on variables created in a block of code which is not aware of object-based saving (and instead uses variable names heavily). This is how objects add dependencies on variables and sub-s. Where possible, use directly. Args: template: The object to register dependencies with. Yields: None (when used as a context manager).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:capture_dependencies arg:template arguments arg Assign FunctionDef name:_trackable_custom_creator arg:next_creator arg:name arg:initial_value arg:trackable_parent arguments arg arg arg arg arg FunctionDef name:_call_next_creator_renaming_initializer arg:initializer arguments arg arg Call Return return:yes Call If BoolOp Compare Call Assign Call If Return return:yes Call Assign Call Call Return return:yes Call With Call"
  },
  {
    "library": "tensorflow",
    "name": "TensorList",
    "source_code": "class TensorList(object):\n\n    def __init__(self, shape, dtype):\n        self.dtype = dtype\n        self.shape = shape\n        self.clear()\n\n    def append(self, value):\n        self.list_ = list_ops.tensor_list_push_back(self.list_, value)\n\n    def pop(self):\n        self.list_, value = list_ops.tensor_list_pop_back(self.list_, self.dtype)\n        return value\n\n    def clear(self):\n        self.list_ = list_ops.empty_tensor_list(self.shape, self.dtype)\n\n    def count(self):\n        return list_ops.tensor_list_length(self.list_)\n\n    def __getitem__(self, key):\n        return list_ops.tensor_list_get_item(self.list_, key, self.dtype)\n\n    def __setitem__(self, key, value):\n        self.list_ = list_ops.tensor_list_set_item(self.list_, key, value)",
    "docstring": "Tensor list wrapper API-compatible with Python built-in list.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\utils\\tensor_list.py",
    "ast_data": "ClassDef name:TensorList FunctionDef name:__init__ arg:self arg:shape arg:dtype arguments arg arg arg Assign Assign Call FunctionDef name:append arg:self arg:value arguments arg arg Assign Call FunctionDef name:pop arg:self arguments arg Assign Call Return return:yes FunctionDef name:clear arg:self arguments arg Assign Call FunctionDef name:count arg:self arguments arg Return return:yes Call FunctionDef name:__getitem__ arg:self arg:key arguments arg arg Return return:yes Call FunctionDef name:__setitem__ arg:self arg:key arg:value arguments arg arg arg Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "EagerWeakTensor",
    "source_code": "class EagerWeakTensor(core.Value, WeakTensor):\n    __name__ = 'tf.EagerWeakTensor'\n\n    def numpy(self):\n        if not isinstance(self.tensor, ops.EagerTensor):\n            raise ValueError('WeakTensor.numpy() is only supported in eager mode.')\n        return self.tensor.numpy()\n\n    def __complex__(self):\n        return self.tensor.__complex__()\n\n    def __int__(self):\n        return self.tensor.__int__()\n\n    def __float__(self):\n        return self.tensor.__float__()\n\n    def __index__(self):\n        return self.tensor.__index__()\n\n    def __format__(self, format_spec):\n        return f'{self.tensor.__format__(format_spec)} weakly typed'\n\n    def __array__(self, dtype=None):\n        return np.array(self.tensor.__array__(dtype))",
    "docstring": "A weakly typed Eager Tensor.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\weak_tensor.py",
    "ast_data": "ClassDef name:EagerWeakTensor Assign FunctionDef name:numpy arg:self arguments arg If Call Raise Call Return return:yes Call FunctionDef name:__complex__ arg:self arguments arg Return return:yes Call FunctionDef name:__int__ arg:self arguments arg Return return:yes Call FunctionDef name:__float__ arg:self arguments arg Return return:yes Call FunctionDef name:__index__ arg:self arguments arg Return return:yes Call FunctionDef name:__format__ arg:self arg:format_spec arguments arg arg Return return:yes Call FunctionDef name:__array__ arg:self arg:dtype arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_output_shapes_from_input_shapes",
    "source_code": "def _get_output_shapes_from_input_shapes(self, input_shapes: List[TensorShape]) -> List[TensorShape]:\n    output_shapes = []\n    for input_shape, feature in zip(input_shapes, nest.flatten(self._feature_config)):\n        if input_shape.rank is None or input_shape.rank < 1:\n            raise ValueError('Received input tensor of shape {}. Rank must be 1 and above'.format(input_shape))\n        if len(input_shape) == 2 and input_shape[-1] != 1 and (not feature.output_shape) and (feature.max_sequence_length > 0):\n            input_shape_list = input_shape.as_list()\n            input_shape_list.insert(len(input_shape_list) - 1, feature.max_sequence_length)\n            input_shape = TensorShape(input_shape_list)\n        if input_shape.rank == 1:\n            output_shapes.append(input_shape)\n        else:\n            output_shapes.append(input_shape[:-1])\n    return output_shapes",
    "docstring": "Get output shapes from the flattened input shapes list.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2.py",
    "ast_data": "FunctionDef name:_get_output_shapes_from_input_shapes arg:self arg:input_shapes arguments arg arg Assign For Call Call If BoolOp Compare Compare Raise Call Call If BoolOp Compare Call Compare Compare Assign Call Call Call Assign Call If Compare Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "intervaly",
    "source_code": "@property\ndef intervaly(self):\n    return self.get_points()[:, 1]",
    "docstring": "The pair of *y* coordinates that define the bounding box. This is not guaranteed to be sorted from bottom to top.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:intervaly arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "Topology",
    "source_code": "class Topology(GEOSFuncFactory):\n    argtypes = [GEOM_PTR]\n    restype = GEOM_PTR\n    errcheck = staticmethod(check_geom)",
    "docstring": "For GEOS unary topology functions.",
    "type": "class",
    "file_path": "django\\django\\contrib\\gis\\geos\\prototypes\\topology.py",
    "ast_data": "ClassDef name:Topology Assign Assign Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_validate_full_integer_quantization_bias_type",
    "source_code": "def _validate_full_integer_quantization_bias_type(self):\n    bias_type = self._full_integer_quantization_bias_type\n    if not bias_type:\n        return\n    if self.activations_type() == _dtypes.float32:\n        raise ValueError('`full_integer_quantization_bias_type` is only supported for full integer quantization.')\n    if self.activations_type() == _dtypes.int8 and bias_type != _dtypes.int32:\n        raise ValueError(f'Expected bias type to be `dtypes.int32` for Int8Quant. Current setting bias type: {bias_type}')\n    if self.activations_type() == _dtypes.int16 and bias_type != _dtypes.int32 and (bias_type != _dtypes.int64):\n        raise ValueError(f'Expected bias type to be `dtypes.int32` or `dtypes.int64` for Int16Quant. Current setting bias type: {bias_type}')",
    "docstring": "Validates bias type for full interger quantization.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "FunctionDef name:_validate_full_integer_quantization_bias_type arg:self arguments arg Assign If Return return:no If Compare Call Raise Call If BoolOp Compare Call Compare Raise Call If BoolOp Compare Call Compare Compare Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "get_hatch_color",
    "source_code": "def get_hatch_color(self):\n    return self._hatch_color",
    "docstring": "Get the hatch color.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:get_hatch_color arg:self arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "cumulative_prod",
    "source_code": "@array_function_dispatch(_cumulative_prod_dispatcher)\ndef cumulative_prod(x, /, *, axis=None, dtype=None, out=None, include_initial=False):\n    return _cumulative_func(x, um.multiply, axis, dtype, out, include_initial)",
    "docstring": "Return the cumulative product of elements along a given axis. This function is an Array API compatible alternative to . Parameters ---------- x : array_like Input array. axis : int, optional Axis along which the cumulative product is computed. The default (None) is only allowed for one-dimensional arrays. For arrays with more than one dimension `ufuncs-output-type`: >>> np.cumulative_prod(b, axis=1) array([[ 1, 2, 6], [ 4, 20, 120]])",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\fromnumeric.py",
    "ast_data": "FunctionDef name:cumulative_prod arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_maybe_get_observer_for_node",
    "source_code": "def _maybe_get_observer_for_node(node: Node, modules: dict[str, torch.nn.Module]) -> Optional[torch.nn.Module]:\n    for maybe_obs_node in node.users.keys():\n        if maybe_obs_node.op == 'call_module':\n            maybe_obs = modules[str(maybe_obs_node.target)]\n            if _is_activation_post_process(maybe_obs):\n                return maybe_obs\n    return None",
    "docstring": "If the node is observed, return the observer instance. Otherwise, return None.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\convert.py",
    "ast_data": "FunctionDef name:_maybe_get_observer_for_node arg:node arg:modules arguments arg arg For Call If Compare Assign Call If Call Return return:yes Return return:no"
  },
  {
    "library": "pandas",
    "name": "iget_values",
    "source_code": "def iget_values(self, i: int) -> ArrayLike:\n    block = self.blocks[self.blknos[i]]\n    values = block.iget(self.blklocs[i])\n    return values",
    "docstring": "Return the data for column i as the values (ndarray or ExtensionArray). Warning! The returned array is a view but doesn't handle Copy-on-Write, so this should be used with caution.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:iget_values arg:self arg:i arguments arg arg Assign Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "slugify",
    "source_code": "@register.filter(is_safe=True)\n@stringfilter\ndef slugify(value):\n    return _slugify(value)",
    "docstring": "Convert to ASCII. Convert spaces to hyphens. Remove characters that aren't alphanumerics, underscores, or hyphens. Convert to lowercase. Also strip leading and trailing whitespace.",
    "type": "function",
    "file_path": "django\\django\\template\\defaultfilters.py",
    "ast_data": "FunctionDef name:slugify arg:value arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_VariantTracker",
    "source_code": "class _VariantTracker(resource_lib.CapturableResource):\n\n    def __init__(self, variant_tensor, resource_creator):\n        super(_VariantTracker, self).__init__(device='CPU')\n        self._resource_handle = variant_tensor\n        if not isinstance(resource_creator, def_function.Function):\n            raise TypeError('Resource creator should already be a tf.function.')\n        self._create_resource = resource_creator\n\n    def _trackable_children(self, save_type=tracking_base.SaveType.CHECKPOINT, **kwargs):\n        if save_type != tracking_base.SaveType.SAVEDMODEL:\n            return {}\n        children = super(_VariantTracker, self)._trackable_children(save_type, **kwargs)\n        children['_create_resource'] = self._create_resource\n        return children",
    "docstring": "Allows export of functions capturing a Dataset in SavedModels. When saving a SavedModel, traverses the object graph. Since Datasets reference _VariantTracker objects, that traversal will find a _VariantTracker for each Dataset and so know how to save and restore functions which reference the Dataset's variant Tensor.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "ClassDef name:_VariantTracker FunctionDef name:__init__ arg:self arg:variant_tensor arg:resource_creator arguments arg arg arg Call Call Assign If Call Raise Call Assign FunctionDef name:_trackable_children arg:self arg:save_type arguments arg arg arg If Compare Return return:no Assign Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "u",
    "source_code": "@property\ndef u(self):\n    return self._u",
    "docstring": "If this operator is , this is the .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_low_rank_update.py",
    "ast_data": "FunctionDef name:u arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "to_control_flow_context_def",
    "source_code": "@abc.abstractmethod\ndef to_control_flow_context_def(self, context_def, export_scope=None):\n    raise NotImplementedError('Abstract method')",
    "docstring": "Serializes this into . Args: context_def: a protocol buffer. export_scope: Optional . Name scope to remove.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:to_control_flow_context_def arg:self arg:context_def arg:export_scope arguments arg arg arg Raise Call"
  },
  {
    "library": "cryptography",
    "name": "parameters",
    "source_code": "@abc.abstractmethod\ndef parameters(self) -> DSAParameters:\n    pass",
    "docstring": "The DSAParameters object associated with this private key.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\dsa.py",
    "ast_data": "FunctionDef name:parameters arg:self arguments arg"
  },
  {
    "library": "matplotlib",
    "name": "box",
    "source_code": "def box(on: bool | None=None) -> None:\n    ax = gca()\n    if on is None:\n        on = not ax.get_frame_on()\n    ax.set_frame_on(on)",
    "docstring": "Turn the Axes box on or off on the current Axes. Parameters ---------- on : bool or None The new box state. If `matplotlib.axes.Axes.set_frame_onmatplotlib.axes.Axes.get_frame_on`",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:box arg:on arguments arg Assign Call If Compare Assign Call Call"
  },
  {
    "library": "scrapy",
    "name": "item_completed",
    "source_code": "def item_completed(self, results: list[FileInfoOrError], item: Any, info: SpiderInfo) -> Any:\n    if self.LOG_FAILED_RESULTS:\n        for ok, value in results:\n            if not ok:\n                assert isinstance(value, Failure)\n                logger.error('%(class)s found errors processing %(item)s', {'class': self.__class__.__name__, 'item': item}, exc_info=failure_to_exc_info(value), extra={'spider': info.spider})\n    return item",
    "docstring": "Called per item when all media requests has been processed",
    "type": "method",
    "file_path": "scrapy\\scrapy\\pipelines\\media.py",
    "ast_data": "FunctionDef name:item_completed arg:self arg:results arg:item arg:info arguments arg arg arg arg If For If Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_ResizeBilinearGrad",
    "source_code": "@ops.RegisterGradient('ResizeBilinear')\ndef _ResizeBilinearGrad(op: ops.Operation, grad):\n    grad0 = gen_image_ops.resize_bilinear_grad(grad, op.inputs[0], align_corners=op.get_attr('align_corners'), half_pixel_centers=op.get_attr('half_pixel_centers'))\n    return [grad0, None]",
    "docstring": "The derivatives for bilinear resizing. Args: op: The ResizeBilinear op. grad: The tensor representing the gradient w.r.t. the output. Returns: The gradients w.r.t. the input.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_grad.py",
    "ast_data": "FunctionDef name:_ResizeBilinearGrad arg:op arg:grad arguments arg arg Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "train_step",
    "source_code": "def train_step(self, data):\n    data = data_adapter.expand_1d(data)\n    x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data)\n    with backprop.GradientTape() as tape:\n        y_pred = self(x, training=True)\n        loss = self.compiled_loss(y, y_pred, sample_weight, regularization_losses=self.losses)\n    self.optimizer.minimize(loss, self.trainable_variables, tape=tape)\n    self.compiled_metrics.update_state(y, y_pred, sample_weight)\n    return_metrics = {}\n    for metric in self.metrics:\n        result = metric.result()\n        if isinstance(result, dict):\n            return_metrics.update(result)\n        else:\n            return_metrics[metric.name] = result\n    return return_metrics",
    "docstring": "The logic for one training step. This method can be overridden to support custom training logic. For concrete examples of how to override this method see [Customizing what happens in fit]( This method is called by . This method should contain the mathematical logic for one step of training. This typically includes the forward pass, loss calculation, backpropagation, and metric updates. Configuration details for *how* this logic is run (e.g. and settings), should be left to , which can also be overridden. Args: data: A nested structure of s. Returns: A containing values that will be passed to . Typically, the values of the 's metrics are returned. Example: .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py",
    "ast_data": "FunctionDef name:train_step arg:self arg:data arguments arg arg Assign Call Assign Call With Call Assign Call Assign Call Call Call Assign For Assign Call If Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_temp_export_dir",
    "source_code": "def get_temp_export_dir(timestamped_export_dir):\n    dirname, basename = os.path.split(timestamped_export_dir)\n    if isinstance(basename, bytes):\n        str_name = basename.decode('utf-8')\n    else:\n        str_name = str(basename)\n    temp_export_dir = os.path.join(compat.as_bytes(dirname), compat.as_bytes('temp-{}'.format(str_name)))\n    return temp_export_dir",
    "docstring": "Builds a directory name based on the argument but starting with 'temp-'. This relies on the fact that TensorFlow Serving ignores subdirectories of the base directory that can't be parsed as integers. Args: timestamped_export_dir: the name of the eventual export directory, e.g. /foo/bar/ Returns: A sister directory prefixed with 'temp-', e.g. /foo/bar/temp-.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\utils_v1\\export_utils.py",
    "ast_data": "FunctionDef name:get_temp_export_dir arg:timestamped_export_dir arguments arg Assign Call If Call Assign Call Assign Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "make_list_of_op",
    "source_code": "def make_list_of_op(tops, check_graph=True, allow_graph=True, ignore_ts=False):\n    if isinstance(tops, ops.Graph):\n        if allow_graph:\n            return tops.get_operations()\n        else:\n            raise TypeError('allow_graph is False: cannot convert a tf.Graph.')\n    else:\n        if not is_iterable(tops):\n            tops = [tops]\n        if not tops:\n            return []\n        if check_graph:\n            check_types = None if ignore_ts else ops.Operation\n            get_unique_graph(tops, check_types=check_types)\n        return [op for op in tops if isinstance(op, ops.Operation)]",
    "docstring": "Convert ops to a list of . Args: tops: can be an iterable of , a or a single operation. check_graph: if check if all the operations belong to the same graph. allow_graph: if a cannot be converted. ignore_ts: if True, silently ignore . Returns: A newly created list of . Raises: TypeError: if tops cannot be converted to a list of or, if is , if all the ops do not belong to the same graph.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\op_selector.py",
    "ast_data": "FunctionDef name:make_list_of_op arg:tops arg:check_graph arg:allow_graph arg:ignore_ts arguments arg arg arg arg If Call If Return return:yes Call Raise Call If Call Assign If Return return:no If Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "gen_nat_constraints",
    "source_code": "def gen_nat_constraints(list_of_dims):\n    return [BinConstraintD(0, d, op_leq) for d in list_of_dims]",
    "docstring": "Generate natural number constraints for dimensions",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\util.py",
    "ast_data": "FunctionDef name:gen_nat_constraints arg:list_of_dims arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "dtrace_structured",
    "source_code": "def dtrace_structured(name: str, metadata_fn: Callable[[], Union[dict[str, Any], tuple[str, int]]]=dict, *, payload_fn: Callable[[], Optional[Union[str, object]]]=lambda: None, suppress_context: bool=False, expect_trace_id: bool=False, record_logging_overhead: bool=True):\n    if GET_DTRACE_STRUCTURED:\n        trace_structured(name, metadata_fn, payload_fn=payload_fn, suppress_context=suppress_context, expect_trace_id=expect_trace_id, record_logging_overhead=record_logging_overhead)",
    "docstring": "For logging more detailed information used for debugging. This may result in the program becoming slow.",
    "type": "function",
    "file_path": "pytorch\\torch\\_logging\\_internal.py",
    "ast_data": "FunctionDef name:dtrace_structured arg:name arg:metadata_fn arguments arg arg arg arg arg arg arguments If Call"
  },
  {
    "library": "kornia",
    "name": "UniformDistribution",
    "source_code": "class UniformDistribution(MultiprocessWrapper, Uniform):\n    pass",
    "docstring": "Wrapper around torch Uniform distribution which makes it work with the 'spawn' multiprocessing context.",
    "type": "class",
    "file_path": "kornia\\kornia\\augmentation\\random_generator\\base.py",
    "ast_data": "ClassDef name:UniformDistribution"
  },
  {
    "library": "pytorch",
    "name": "scalar_name",
    "source_code": "def scalar_name(self) -> ScalarName:\n    return _SCALAR_TYPE_TO_NAME[self]",
    "docstring": "Convert a JitScalarType to a JIT scalar type name.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_type_utils.py",
    "ast_data": "FunctionDef name:scalar_name arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_config",
    "source_code": "def get_config(self):\n    raise NotImplementedError(str(self) + ' does not implement get_config()')",
    "docstring": "Returns the config of the regularizer. An regularizer config is a Python dictionary (serializable) containing all configuration parameters of the regularizer. The same regularizer can be reinstantiated later (without any saved state) from this configuration. This method is optional if you are just training and executing models, exporting to and from SavedModels, or using weight checkpoints. This method is required for saving and loading models to HDF5 formats, Keras model cloning, some visualization utilities, and exporting models to and from JSON. Returns: Python dictionary.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\regularizers.py",
    "ast_data": "FunctionDef name:get_config arg:self arguments arg Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "name",
    "source_code": "@property\ndef name(self):\n    raise NotImplementedError",
    "docstring": "The name of this variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg Raise"
  },
  {
    "library": "matplotlib",
    "name": "invalidate",
    "source_code": "def invalidate(self):\n    return self._invalidate_internal(level=self._INVALID_AFFINE_ONLY if self.is_affine else self._INVALID_FULL, invalidating_node=self)",
    "docstring": "Invalidate this and triggers an invalidation of its ancestors. Should be called any time the transform changes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:invalidate arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "linetable_310_writer",
    "source_code": "def linetable_310_writer(first_lineno):\n    assert sys.version_info >= (3, 10) and sys.version_info < (3, 11)\n    linetable: list[int] = []\n    lineno = first_lineno\n    lineno_delta = 0\n    byteno = 0\n\n    def _update(byteno_delta, lineno_delta):\n        while byteno_delta != 0 or lineno_delta != 0:\n            byte_offset = max(0, min(byteno_delta, 254))\n            line_offset = max(-127, min(lineno_delta, 127))\n            assert byte_offset != 0 or line_offset != 0\n            byteno_delta -= byte_offset\n            lineno_delta -= line_offset\n            linetable.extend((byte_offset, line_offset & 255))\n\n    def update(lineno_new, byteno_new):\n        nonlocal lineno, lineno_delta, byteno\n        byteno_delta = byteno_new - byteno\n        byteno = byteno_new\n        _update(byteno_delta, lineno_delta)\n        lineno_delta = lineno_new - lineno\n        lineno = lineno_new\n\n    def end(total_bytes):\n        _update(total_bytes - byteno, lineno_delta)\n    return (linetable, update, end)",
    "docstring": "Used to create typing.CodeType.co_linetable See This is the internal format of the line number table for Python 3.10",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\bytecode_transformation.py",
    "ast_data": "FunctionDef name:linetable_310_writer arg:first_lineno arguments arg BoolOp Compare Compare Assign Assign Assign FunctionDef name:_update arg:byteno_delta arg:lineno_delta arguments arg arg While BoolOp Compare Compare Assign Call Call Assign Call Call BoolOp Compare Compare Call FunctionDef name:update arg:lineno_new arg:byteno_new arguments arg arg Assign Assign Call Assign Assign FunctionDef name:end arg:total_bytes arguments arg Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "header_column_widths",
    "source_code": "@property\ndef header_column_widths(self) -> Sequence[int]:\n    return [len(col) for col in self.headers]",
    "docstring": "Widths of header columns (only titles).",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:header_column_widths arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "is_lowp_fp_sink",
    "source_code": "def is_lowp_fp_sink(node: torch.fx.Node, dt: torch.dtype):\n    assert dt in DTYPE_LOWP_FP\n    if (input_dtype := get_input_dtype(node)):\n        return input_dtype == dt\n    elif node.target == 'to_dtype':\n        return True\n    else:\n        return False",
    "docstring": "Check if the given node accept input with expected low precision floating point data type.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp.py",
    "ast_data": "FunctionDef name:is_lowp_fp_sink arg:node arg:dt arguments arg arg Compare If Call Return return:yes Compare If Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_convert_as_saved_model",
    "source_code": "def _convert_as_saved_model(self):\n    temp_dir = tempfile.mkdtemp()\n    try:\n        graph_def, input_tensors, output_tensors = self._convert_keras_to_saved_model(temp_dir)\n        if self.saved_model_dir:\n            return super(TFLiteKerasModelConverterV2, self).convert(graph_def, input_tensors, output_tensors)\n    finally:\n        shutil.rmtree(temp_dir, True)",
    "docstring": "Converts a Keras model as a saved model. Returns: The converted data in serialized format.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "FunctionDef name:_convert_as_saved_model arg:self arguments arg Assign Call Try Assign Call If Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "Algorithm",
    "source_code": "@tf_export('random.Algorithm', 'random.experimental.Algorithm')\nclass Algorithm(enum.Enum):\n    PHILOX = 1\n    THREEFRY = 2\n    AUTO_SELECT = 3",
    "docstring": "A random-number-generation (RNG) algorithm. Many random-number generators (e.g. the argument of and ) in TF allow you to choose the algorithm used to generate the (pseudo-)random numbers. You can set the algorithm to be one of the options below. * : The Philox algorithm introduced in the paper [\"Parallel Random Numbers: As Easy as 1, 2, 3\"]( * : The ThreeFry algorithm introduced in the paper [\"Parallel Random Numbers: As Easy as 1, 2, 3\"]( * : Allow TF to automatically select the algorithm depending on the accelerator device. Note that with this option, running the same TF program on different devices may result in different random numbers. Also note that TF may select an algorithm that is different from and .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\random_ops_util.py",
    "ast_data": "ClassDef name:Algorithm Assign Assign Assign Call"
  },
  {
    "library": "numpy",
    "name": "fit",
    "source_code": "@classmethod\ndef fit(cls, x, y, deg, domain=None, rcond=None, full=False, w=None, window=None, symbol='x'):\n    if domain is None:\n        domain = pu.getdomain(x)\n        if domain[0] == domain[1]:\n            domain[0] -= 1\n            domain[1] += 1\n    elif isinstance(domain, list) and len(domain) == 0:\n        domain = cls.domain\n    if window is None:\n        window = cls.window\n    xnew = pu.mapdomain(x, domain, window)\n    res = cls._fit(xnew, y, deg, w=w, rcond=rcond, full=full)\n    if full:\n        [coef, status] = res\n        return (cls(coef, domain=domain, window=window, symbol=symbol), status)\n    else:\n        coef = res\n        return cls(coef, domain=domain, window=window, symbol=symbol)",
    "docstring": "Least squares fit to data. Return a series instance that is the least squares fit to the data sampled at . The domain of the returned instance can be specified and this will often result in a superior fit with less chance of ill conditioning. Parameters ---------- x : array_like, shape (M,) x-coordinates of the M sample points `degdegxrcondlinalg.lstsq`.",
    "type": "method",
    "file_path": "numpy\\numpy\\polynomial\\_polybase.py",
    "ast_data": "FunctionDef name:fit arg:cls arg:x arg:y arg:deg arg:domain arg:rcond arg:full arg:w arg:window arg:symbol arguments arg arg arg arg arg arg arg arg arg arg If Compare Assign Call If Compare If BoolOp Call Compare Call Assign If Compare Assign Assign Call Assign Call If Assign Return return:yes Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_common_shape",
    "source_code": "def get_common_shape(x, y):\n    if x is None != y is None:\n        raise RuntimeError('Cannot find a common shape when LHS shape is None but RHS shape is not (or vice versa): %s vs. %s' % (x, y))\n    if x is None:\n        return None\n    if not isinstance(x, tensor_shape.TensorShape):\n        raise TypeError('Expected x to be a TensorShape but saw %s' % (x,))\n    if not isinstance(y, tensor_shape.TensorShape):\n        raise TypeError('Expected y to be a TensorShape but saw %s' % (y,))\n    if x.rank != y.rank or x.rank is None:\n        return tensor_shape.TensorShape(None)\n    dims = []\n    for dim_x, dim_y in zip(x.dims, y.dims):\n        if dim_x != dim_y or tensor_shape.dimension_value(dim_x) is None or tensor_shape.dimension_value(dim_y) is None:\n            dims.append(None)\n        else:\n            dims.append(tensor_shape.dimension_value(dim_x))\n    return tensor_shape.TensorShape(dims)",
    "docstring": "Find a that is compatible with both and .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\load.py",
    "ast_data": "FunctionDef name:get_common_shape arg:x arg:y arguments arg arg If Compare Raise Call If Compare Return return:no If Call Raise Call If Call Raise Call If BoolOp Compare Compare Return return:yes Call Assign For Call If BoolOp Compare Compare Call Compare Call Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "resolve",
    "source_code": "def resolve(node, source_info, graphs, definition_factory=Definition):\n    visitor = TreeAnnotator(source_info, graphs, definition_factory)\n    node = visitor.visit(node)\n    return node",
    "docstring": "Resolves reaching definitions for each symbol. Args: node: ast.AST source_info: transformer.SourceInfo graphs: Dict[ast.FunctionDef, cfg.Graph] definition_factory: Callable[[], Definition] Returns: ast.AST",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\static_analysis\\reaching_definitions.py",
    "ast_data": "FunctionDef name:resolve arg:node arg:source_info arg:graphs arg:definition_factory arguments arg arg arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "generate_conj",
    "source_code": "@register_transformation_rule(Conj)\ndef generate_conj(constraint, counter):\n    new = []\n    for c in constraint.conjucts:\n        new_c, counter = transform_constraint(c, counter)\n        new.append(new_c)\n    return (Conj(new), counter)",
    "docstring": "Transform conjunctions",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_transformation.py",
    "ast_data": "FunctionDef name:generate_conj arg:constraint arg:counter arguments arg arg Assign For Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "set_gc_state",
    "source_code": "def set_gc_state(state):\n    if gc.isenabled() == state:\n        return\n    if state:\n        gc.enable()\n    else:\n        gc.disable()",
    "docstring": "Set status of garbage collector",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_gcutils.py",
    "ast_data": "FunctionDef name:set_gc_state arg:state arguments arg If Compare Call Return return:no If Call Call"
  },
  {
    "library": "authlib",
    "name": "add_params_to_uri",
    "source_code": "def add_params_to_uri(uri, params, fragment=False):\n    sch, net, path, par, query, fra = urlparse.urlparse(uri)\n    if fragment:\n        fra = add_params_to_qs(fra, params)\n    else:\n        query = add_params_to_qs(query, params)\n    return urlparse.urlunparse((sch, net, path, par, query, fra))",
    "docstring": "Add a list of two-tuples to the uri query components.",
    "type": "function",
    "file_path": "authlib\\authlib\\common\\urls.py",
    "ast_data": "FunctionDef name:add_params_to_uri arg:uri arg:params arg:fragment arguments arg arg arg Assign Call If Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_c_string_literal",
    "source_code": "def _c_string_literal(s):\n    s = s.replace('\\\\', '\\\\\\\\')\n    s = s.replace('\"', '\\\\\"')\n    s = s.replace('\\n', '\\\\n')\n    return '\"{}\"'.format(s)",
    "docstring": "Convert a python string into a literal suitable for inclusion into C code",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\system_info.py",
    "ast_data": "FunctionDef name:_c_string_literal arg:s arguments arg Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "embedding_tables",
    "source_code": "@property\ndef embedding_tables(self) -> Dict[tpu_embedding_v2_utils.TableConfig, tf_variables.Variable]:\n    self._maybe_build()\n    return {table: self._variables[table.name]['parameters'] for table in self._table_config}",
    "docstring": "Returns a dict of embedding tables, keyed by .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_for_serving.py",
    "ast_data": "FunctionDef name:embedding_tables arg:self arguments arg Call Return return:yes"
  },
  {
    "library": "django",
    "name": "__delitem__",
    "source_code": "def __delitem__(self, index):\n    if not isinstance(index, (int, slice)):\n        raise TypeError('%s is not a legal index' % index)\n    origLen = len(self)\n    if isinstance(index, int):\n        index = self._checkindex(index)\n        indexRange = [index]\n    else:\n        indexRange = range(*index.indices(origLen))\n    newLen = origLen - len(indexRange)\n    newItems = (self._get_single_internal(i) for i in range(origLen) if i not in indexRange)\n    self._rebuild(newLen, newItems)",
    "docstring": "Delete the item(s) at the specified index/slice.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\mutable_list.py",
    "ast_data": "FunctionDef name:__delitem__ arg:self arg:index arguments arg arg If Call Raise Call Assign Call If Call Assign Call Assign Assign Call Call Assign Call Assign Call Call Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "EventListenerServicer",
    "source_code": "class EventListenerServicer(object):\n\n    def SendEvents(self, request_iterator, context):\n        context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n        context.set_details('Method not implemented!')\n        raise NotImplementedError('Method not implemented!')\n\n    def SendTracebacks(self, request, context):\n        context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n        context.set_details('Method not implemented!')\n        raise NotImplementedError('Method not implemented!')\n\n    def SendSourceFiles(self, request, context):\n        context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n        context.set_details('Method not implemented!')\n        raise NotImplementedError('Method not implemented!')",
    "docstring": "EventListener: Receives Event protos, e.g., from debugged TensorFlow runtime(s).",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_service_pb2_grpc.py",
    "ast_data": "ClassDef name:EventListenerServicer FunctionDef name:SendEvents arg:self arg:request_iterator arg:context arguments arg arg arg Call Call Raise Call FunctionDef name:SendTracebacks arg:self arg:request arg:context arguments arg arg arg Call Call Raise Call FunctionDef name:SendSourceFiles arg:self arg:request arg:context arguments arg arg arg Call Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "nn_module_supports_equalization",
    "source_code": "def nn_module_supports_equalization(module) -> bool:\n    return type(module) in [nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d]",
    "docstring": "Checks if the torch.nn node supports equalization.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_equalize.py",
    "ast_data": "FunctionDef name:nn_module_supports_equalization arg:module arguments arg Return return:yes Compare Call"
  },
  {
    "library": "django",
    "name": "convert_value",
    "source_code": "@cached_property\ndef convert_value(self):\n    field = self.output_field\n    internal_type = field.get_internal_type()\n    if internal_type == 'FloatField':\n        return lambda value, expression, connection: None if value is None else float(value)\n    elif internal_type.endswith('IntegerField'):\n        return lambda value, expression, connection: None if value is None else int(value)\n    elif internal_type == 'DecimalField':\n        return lambda value, expression, connection: None if value is None else Decimal(value)\n    return self._convert_value_noop",
    "docstring": "Expressions provide their own converters because users have the option of manually specifying the output_field which may be a different type from the one the database returns.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\expressions.py",
    "ast_data": "FunctionDef name:convert_value arg:self arguments arg Assign Assign Call If Compare Return return:yes arguments arg arg arg Compare Call If Call Return return:yes arguments arg arg arg Compare Call If Compare Return return:yes arguments arg arg arg Compare Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "data_orientation",
    "source_code": "@property\ndef data_orientation(self) -> tuple[int, ...]:\n    return tuple(itertools.chain([int(a[0]) for a in self.non_index_axes], [int(a.axis) for a in self.index_axes]))",
    "docstring": "return a tuple of my permutated axes, non_indexable at the front",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:data_orientation arg:self arguments arg Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_UpdatePendingAndEnqueueReady",
    "source_code": "def _UpdatePendingAndEnqueueReady(grads, op: ops.Operation, queue, pending_count, loop_state, xs_set):\n    for x in _NonEagerInputs(op, xs_set):\n        pending_count[x.op] -= 1\n        ready = pending_count[x.op] == 0\n        if loop_state and (not ready):\n            ready = pending_count[x.op] > 0 and control_flow_util.IsLoopSwitch(x.op)\n        if ready:\n            if control_flow_util.IsLoopExit(x.op):\n                grad_state = loop_state.GetGradState(x.op, before=False)\n                grad_state.deferred_exits.append(x)\n                grad_state.pending_exits_count -= 1\n                if grad_state.pending_exits_count == 0:\n                    has_not_none_grad = False\n                    for y in grad_state.deferred_exits:\n                        if _HasAnyNotNoneGrads(grads, y.op):\n                            has_not_none_grad = True\n                            queue.append(y.op)\n                        else:\n                            grad_state.unused_exits.append(y)\n                    if has_not_none_grad:\n                        for y in grad_state.unused_exits:\n                            if backprop_util.IsTrainable(y):\n                                _SetGrad(grads, y, loop_state.ZerosLikeForExit(y))\n                            queue.append(y.op)\n                    else:\n                        for y in grad_state.unused_exits:\n                            queue.append(y.op)\n            else:\n                queue.append(x.op)",
    "docstring": "Update pending count for the inputs of op and enqueue ready ops.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\gradients_util.py",
    "ast_data": "FunctionDef name:_UpdatePendingAndEnqueueReady arg:grads arg:op arg:queue arg:pending_count arg:loop_state arg:xs_set arguments arg arg arg arg arg arg For Call Assign Compare If BoolOp Assign BoolOp Compare Call If If Call Assign Call Call If Compare Assign For If Call Assign Call Call If For If Call Call Call Call For Call Call"
  },
  {
    "library": "tensorflow",
    "name": "close",
    "source_code": "def close(self):\n    self._read_buf = None\n    if self._writable_file:\n        self._writable_file.close()\n        self._writable_file = None",
    "docstring": "Closes the file. Should be called for the WritableFile to be flushed. In general, if you use the context manager pattern, you don't need to call this directly. >>> with tf.io.gfile.GFile(\"/tmp/x\", \"w\") as f: ... f.write(\"asdf\\n\") ... f.write(\"qwer\\n\") >>> # implicit f.close() at the end of the block For cloud filesystems, forgetting to call might result in data loss as last write might not have been replicated.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py",
    "ast_data": "FunctionDef name:close arg:self arguments arg Assign If Call Assign"
  },
  {
    "library": "pandas",
    "name": "GroupByCythonAgg",
    "source_code": "class GroupByCythonAgg:\n    param_names = ['dtype', 'method']\n    params = [['float64'], ['sum', 'prod', 'min', 'max', 'idxmin', 'idxmax', 'mean', 'median', 'var', 'first', 'last', 'any', 'all']]\n\n    def setup(self, dtype, method):\n        N = 1000000\n        df = DataFrame(np.random.randn(N, 10), columns=list('abcdefghij'))\n        df['key'] = np.random.randint(0, 100, size=N)\n        self.df = df\n\n    def time_frame_agg(self, dtype, method):\n        self.df.groupby('key').agg(method)",
    "docstring": "Benchmarks specifically targeting our cython aggregation algorithms (using a big enough dataframe with simple key, so a large part of the time is actually spent in the grouped aggregation).",
    "type": "class",
    "file_path": "pandas\\asv_bench\\benchmarks\\groupby.py",
    "ast_data": "ClassDef name:GroupByCythonAgg Assign Assign FunctionDef name:setup arg:self arg:dtype arg:method arguments arg arg arg Assign Assign Call Call Call Assign Call Assign FunctionDef name:time_frame_agg arg:self arg:dtype arg:method arguments arg arg arg Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_run_search",
    "source_code": "def _run_search(self, evaluate_candidates):\n    evaluate_candidates(ParameterSampler(self.param_distributions, self.n_iter, random_state=self.random_state))",
    "docstring": "Search n_iter candidates from param_distributions",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_search.py",
    "ast_data": "FunctionDef name:_run_search arg:self arg:evaluate_candidates arguments arg arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "init_once_fakemode",
    "source_code": "def init_once_fakemode(fn: Callable[..., Any]) -> Callable[[], Any]:\n\n    @functools.lru_cache(None)\n    @functools.wraps(fn)\n    def lazy_init() -> Any:\n        counters_ref = counters['inductor'].copy()\n        with torch._guards.tracing(None), unset_fake_temporarily(), FakeTensorMode():\n            result = fn()\n        counters['inductor'] = counters_ref\n        return result\n    return lazy_init",
    "docstring": "Wrapper around lazy init functions in fx_passes/",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\pattern_matcher.py",
    "ast_data": "FunctionDef name:init_once_fakemode arg:fn arguments arg FunctionDef name:lazy_init arguments Assign Call With Call Call Call Assign Call Assign Return return:yes Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "getsource",
    "source_code": "def getsource(object):\n    return _inspect.getsource(tf_decorator.unwrap(object)[1])",
    "docstring": "TFDecorator-aware replacement for inspect.getsource.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_inspect.py",
    "ast_data": "FunctionDef name:getsource arg:object arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "reset",
    "source_code": "def reset(self):\n    self._pop_tape()\n    self._tape = None\n    self._push_tape()",
    "docstring": "Clears all information stored in this tape. Equivalent to exiting and reentering the tape context manager with a new tape. For example, the two following code blocks are equivalent: This is useful if you don't want to exit the context manager for the tape, or can't because the desired reset point is inside a control flow construct:",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\backprop.py",
    "ast_data": "FunctionDef name:reset arg:self arguments arg Call Assign Call"
  },
  {
    "library": "pandas",
    "name": "_to_latex_via_styler",
    "source_code": "@final\ndef _to_latex_via_styler(self, buf=None, *, hide: dict | list[dict] | None=None, relabel_index: dict | list[dict] | None=None, format: dict | list[dict] | None=None, format_index: dict | list[dict] | None=None, format_index_names: dict | list[dict] | None=None, render_kwargs: dict | None=None):\n    from pandas.io.formats.style import Styler\n    self = cast('DataFrame', self)\n    styler = Styler(self, uuid='')\n    for kw_name in ['hide', 'relabel_index', 'format', 'format_index', 'format_index_names']:\n        kw = vars()[kw_name]\n        if isinstance(kw, dict):\n            getattr(styler, kw_name)(**kw)\n        elif isinstance(kw, list):\n            for sub_kw in kw:\n                getattr(styler, kw_name)(**sub_kw)\n    render_kwargs = {} if render_kwargs is None else render_kwargs\n    if render_kwargs.pop('bold_rows'):\n        styler.map_index(lambda v: 'textbf:--rwrap;')\n    return styler.to_latex(buf=buf, **render_kwargs)",
    "docstring": "Render object to a LaTeX tabular, longtable, or nested table. Uses the ``. Returns ------- str or None If buf is None, returns the result as a string. Otherwise returns None.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:_to_latex_via_styler arg:self arg:buf arguments arg arg arg arg arg arg arg arg Assign Call Assign Call For Assign Call If Call Call Call If Call For Call Call Assign Compare If Call Call arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "run_calibration",
    "source_code": "def run_calibration(self, saved_model_path: str, signature_keys: list[str], tags: set[str], force_graph_mode_calibration: bool, representative_dataset_file_map_serialized: dict[str, bytes]) -> Optional[bool]:\n    dataset_file_map = {}\n    for signature_key, dataset_file_serialized in representative_dataset_file_map_serialized.items():\n        dataset_file_map[signature_key] = quantization_options_pb2.RepresentativeDatasetFile.FromString(dataset_file_serialized)\n    return _call_and_return_none_on_error(func=functools.partial(_run_calibration, saved_model_path, signature_keys, tags, force_graph_mode_calibration, dataset_file_map), error_msg=f'Failed to run calibration on model \"{saved_model_path}\", signature_keys: {signature_keys}, tags: {tags}.')",
    "docstring": "Runs calibration and adds calibration statistics to exported model. Args: saved_model_path: Path to the SavedModel to run calibration. signature_keys: List of signature keys corresponding to SignatureDefs to run calibration on. tags: A set of tags that identify the MetaGraphDef. force_graph_mode_calibration: If True, runs the calibration in graph mode. representative_dataset_file_map_serialized: Signature key -> mapping for running the calibration step. Each dataset file stores the representative dataset for the function matching the signature key. Returns: The error message if the function raises and exception. otherwise.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\py_function_lib.py",
    "ast_data": "FunctionDef name:run_calibration arg:self arg:saved_model_path arg:signature_keys arg:tags arg:force_graph_mode_calibration arg:representative_dataset_file_map_serialized arguments arg arg arg arg arg arg Assign For Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "putmask",
    "source_code": "@array_function_from_c_func_and_dispatcher(_multiarray_umath.putmask)\ndef putmask(a, /, mask, values):\n    return (a, mask, values)",
    "docstring": "putmask(a, mask, values) Changes elements of an array based on conditional and input values. Sets `valuesamaskaamaskvaluesavaluesa` it is repeated: >>> x = np.arange(5) >>> np.putmask(x, x>1, [-33, -44]) >>> x array([ 0, 1, -33, -44, -33])",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\multiarray.py",
    "ast_data": "FunctionDef name:putmask arg:mask arg:values arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "zero_grad",
    "source_code": "def zero_grad(self, set_to_none: bool=True) -> None:\n    if getattr(self, '_is_replica', False):\n        warnings.warn(\"Calling .zero_grad() from a module created with nn.DataParallel() has no effect. The parameters are copied (in a differentiable manner) from the original module. This means they are not leaf nodes in autograd and so don't accumulate gradients. If you need gradients in your forward method, consider using autograd.grad instead.\")\n    for p in self.parameters():\n        if p.grad is not None:\n            if set_to_none:\n                p.grad = None\n            else:\n                if p.grad.grad_fn is not None:\n                    p.grad.detach_()\n                else:\n                    p.grad.requires_grad_(False)\n                p.grad.zero_()",
    "docstring": "Reset gradients of all model parameters. See similar function under :class: for more context. Args: set_to_none (bool): instead of setting to zero, set the grads to None. See :meth: for details.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:zero_grad arg:self arg:set_to_none arguments arg arg If Call Call For Call If Compare If Assign If Compare Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_maybe_wrap_formatter",
    "source_code": "def _maybe_wrap_formatter(formatter: BaseFormatter | None=None, na_rep: str | None=None, precision: int | None=None, decimal: str='.', thousands: str | None=None, escape: str | None=None, hyperlinks: str | None=None) -> Callable:\n    if isinstance(formatter, str):\n        func_0 = lambda x: formatter.format(x)\n    elif callable(formatter):\n        func_0 = formatter\n    elif formatter is None:\n        precision = get_option('styler.format.precision') if precision is None else precision\n        func_0 = partial(_default_formatter, precision=precision, thousands=thousands is not None)\n    else:\n        raise TypeError(f\"'formatter' expected str or callable, got {type(formatter)}\")\n    if escape is not None:\n        func_1 = lambda x: func_0(_str_escape(x, escape=escape))\n    else:\n        func_1 = func_0\n    if decimal != '.' or (thousands is not None and thousands != ','):\n        func_2 = _wrap_decimal_thousands(func_1, decimal=decimal, thousands=thousands)\n    else:\n        func_2 = func_1\n    if hyperlinks is not None:\n        func_3 = lambda x: func_2(_render_href(x, format=hyperlinks))\n    else:\n        func_3 = func_2\n    if na_rep is None:\n        return func_3\n    else:\n        return lambda x: na_rep if isna(x) is True else func_3(x)",
    "docstring": "Allows formatters to be expressed as str, callable or None, where None returns a default formatting function. wraps with na_rep, and precision where they are available.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\formats\\style_render.py",
    "ast_data": "FunctionDef name:_maybe_wrap_formatter arg:formatter arg:na_rep arg:precision arg:decimal arg:thousands arg:escape arg:hyperlinks arguments arg arg arg arg arg arg arg If Call Assign arguments arg Call If Call Assign If Compare Assign Compare Call Assign Call Compare Raise Call Call If Compare Assign arguments arg Call Call Assign If BoolOp Compare BoolOp Compare Compare Assign Call Assign If Compare Assign arguments arg Call Call Assign If Compare Return return:yes Return return:yes arguments arg Compare Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_is_legacy_layer",
    "source_code": "@property\ndef _is_legacy_layer(self):\n    return True",
    "docstring": "Used by keras to check compatibility. This should not be overridden.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\legacy_tf_layers\\base.py",
    "ast_data": "FunctionDef name:_is_legacy_layer arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_set_table_descriptor",
    "source_code": "def _set_table_descriptor(self, table_descriptor: tpu_embedding_configuration_pb2.TPUEmbeddingConfiguration.TableDescriptor, num_hosts: int, learning_rate_index: Dict[Callable[[], Any], int]):\n    table_descriptor.name = self.name\n    table_descriptor.vocabulary_size = max(self.vocabulary_size, num_hosts)\n    table_descriptor.dimension = self.dim\n    parameters = table_descriptor.optimization_parameters\n    if self.optimizer:\n        if callable(self.optimizer.learning_rate):\n            parameters.learning_rate.dynamic.tag = learning_rate_index[self.optimizer.learning_rate]\n        else:\n            parameters.learning_rate.constant = self.optimizer.learning_rate\n        if self.optimizer.low_dimensional_packing_status:\n            parameters.low_dimensional_packing_status = optimization_parameters_pb2.LowDimensionalPackingStatus.Status.ENABLED\n        self.optimizer._set_optimization_parameters(parameters)\n    if self.quantization_config:\n        self.quantization_config._set_optimization_parameters(parameters)",
    "docstring": "Set the table descriptor from the table data.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2_utils.py",
    "ast_data": "FunctionDef name:_set_table_descriptor arg:self arg:table_descriptor arg:num_hosts arg:learning_rate_index arguments arg arg arg arg Assign Assign Call Assign Assign If If Call Assign Assign If Assign Call If Call"
  },
  {
    "library": "scipy",
    "name": "Problem22",
    "source_code": "class Problem22(Benchmark):\n\n    def __init__(self, dimensions=1):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = [(0, 20)]\n        self.global_optimum = 9.0 * pi / 2.0\n        self.fglob = exp(-27.0 * pi / 2.0) - 1.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        x = x[0]\n        return exp(-3.0 * x) - sin(x) ** 3.0",
    "docstring": "Univariate Problem22 objective function. This class defines the Univariate Problem22 global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Problem22}}(x) = e^{-3x} - \\sin^3(x) Bound constraints: :math: .. figure:: figures/Problem22.png :alt: Univariate Problem22 function :align: center **Univariate Problem22 function** *Global optimum*: :math: for :math:",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_univariate.py",
    "ast_data": "ClassDef name:Problem22 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Assign Assign Call FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "probB_",
    "source_code": "@property\ndef probB_(self):\n    return self._probB",
    "docstring": "Parameter learned in Platt scaling when . Returns ------- ndarray of shape (n_classes * (n_classes - 1) / 2)",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\svm\\_base.py",
    "ast_data": "FunctionDef name:probB_ arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "OneToOneFeatureMixin",
    "source_code": "class OneToOneFeatureMixin:\n\n    def get_feature_names_out(self, input_features=None):\n        check_is_fitted(self, attributes='n_features_in_')\n        return _check_feature_names_in(self, input_features)",
    "docstring": "Provides for simple transformers. This mixin assumes there's a 1-to-1 correspondence between input features and output features, such as :class:. Examples -------- >>> import numpy as np >>> from sklearn.base import OneToOneFeatureMixin, BaseEstimator >>> class MyEstimator(OneToOneFeatureMixin, BaseEstimator): ... def fit(self, X, y=None): ... self.n_features_in_ = X.shape[1] ... return self >>> X = np.array([[1, 2], [3, 4]]) >>> MyEstimator().fit(X).get_feature_names_out() array(['x0', 'x1'], dtype=object)",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\base.py",
    "ast_data": "ClassDef name:OneToOneFeatureMixin FunctionDef name:get_feature_names_out arg:self arg:input_features arguments arg arg Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "trace_on",
    "source_code": "@tf_export('summary.trace_on', v1=[])\ndef trace_on(graph=True, profiler=False, profiler_outdir=None):\n    if ops.inside_function():\n        logging.warn('Cannot enable trace inside a tf.function.')\n        return\n    if not context.executing_eagerly():\n        logging.warn('Must enable trace in eager mode.')\n        return\n    global _current_trace_context\n    with _current_trace_context_lock:\n        if _current_trace_context:\n            logging.warn('Trace already enabled')\n            return\n        if graph and (not profiler):\n            context.context().enable_graph_collection()\n        if profiler:\n            if profiler_outdir is None:\n                logging.warn(\"No `profiler_outdir` passed to trace_on(). Profiler won't be enabled.\")\n            else:\n                context.context().enable_run_metadata()\n                _profiler.start(profiler_outdir)\n        _current_trace_context = _TraceContext(graph=graph, profiler=profiler)",
    "docstring": "Starts a trace to record computation graphs and profiling information. Must be invoked in eager mode. When enabled, TensorFlow runtime will collect information that can later be exported and consumed by TensorBoard. The trace is activated across the entire TensorFlow runtime and affects all threads of execution. To stop the trace and export the collected information, use . To stop the trace without exporting, use . Args: graph: If True, enables collection of executed graphs. It includes ones from tf.function invocation and ones from the legacy graph mode. The default is True. profiler: If True, enables the advanced profiler. Enabling profiler implicitly enables the graph collection. The profiler may incur a high memory overhead. The default is False. profiler_outdir: Output directory for profiler. It is required when profiler is enabled when trace was started. Otherwise, it is ignored.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "FunctionDef name:trace_on arg:graph arg:profiler arg:profiler_outdir arguments arg arg arg If Call Call Return return:no If Call Call Return return:no With If Call Return return:no If BoolOp Call Call If If Compare Call Call Call Call Assign Call Call"
  },
  {
    "library": "scipy",
    "name": "EmptyStructMarker",
    "source_code": "class EmptyStructMarker:\n    pass",
    "docstring": "Class to indicate presence of empty matlab struct on output",
    "type": "class",
    "file_path": "scipy\\scipy\\io\\matlab\\_mio5.py",
    "ast_data": "ClassDef name:EmptyStructMarker"
  },
  {
    "library": "scipy",
    "name": "_compute_angular_acceleration",
    "source_code": "def _compute_angular_acceleration(rotvecs, rotvecs_dot, rotvecs_dot_dot):\n    return _compute_angular_rate(rotvecs, rotvecs_dot_dot) + _angular_acceleration_nonlinear_term(rotvecs, rotvecs_dot)",
    "docstring": "Compute angular acceleration given rotation vector and its derivatives. Parameters ---------- rotvecs : ndarray, shape (n, 3) Set of rotation vectors. rotvecs_dot : ndarray, shape (n, 3) Set of rotation vector derivatives. rotvecs_dot_dot : ndarray, shape (n, 3) Set of rotation vector second derivatives. Returns ------- ndarray, shape (n, 3)",
    "type": "function",
    "file_path": "scipy\\scipy\\spatial\\transform\\_rotation_spline.py",
    "ast_data": "FunctionDef name:_compute_angular_acceleration arg:rotvecs arg:rotvecs_dot arg:rotvecs_dot_dot arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "zeros",
    "source_code": "def zeros(shape, dtype=None, order='C'):\n    a = ndarray.__new__(matrix, shape, dtype, order=order)\n    a.fill(0)\n    return a",
    "docstring": "Return a matrix of given shape and type, filled with zeros. Parameters ---------- shape : int or sequence of ints Shape of the matrix dtype : data-type, optional The desired data-type for the matrix, default is float. order : {'C', 'F'}, optional Whether to store the result in C- or Fortran-contiguous order, default is 'C'. Returns ------- out : matrix Zero matrix of given shape, dtype, and order. See Also -------- numpy.zeros : Equivalent array function. matlib.ones : Return a matrix of ones. Notes ----- If has length one i.e. `out`. Examples -------- >>> import numpy.matlib >>> np.matlib.zeros((2, 3)) matrix([[0., 0., 0.], [0., 0., 0.]]) >>> np.matlib.zeros(2) matrix([[0., 0.]])",
    "type": "function",
    "file_path": "numpy\\numpy\\matlib.py",
    "ast_data": "FunctionDef name:zeros arg:shape arg:dtype arg:order arguments arg arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "is_datetime64_dtype",
    "source_code": "def is_datetime64_dtype(arr_or_dtype) -> bool:\n    if isinstance(arr_or_dtype, np.dtype):\n        return arr_or_dtype.kind == 'M'\n    return _is_dtype_type(arr_or_dtype, classes(np.datetime64))",
    "docstring": "Check whether an array-like or dtype is of the datetime64 dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array-like or dtype to check. Returns ------- boolean Whether or not the array-like or dtype is of the datetime64 dtype. See Also -------- api.types.is_datetime64_ns_dtype: Check whether the provided array or dtype is of the datetime64[ns] dtype. api.types.is_datetime64_any_dtype: Check whether the provided array or dtype is of the datetime64 dtype. Examples -------- >>> from pandas.api.types import is_datetime64_dtype >>> is_datetime64_dtype(object) False >>> is_datetime64_dtype(np.datetime64) True >>> is_datetime64_dtype(np.array([], dtype=int)) False >>> is_datetime64_dtype(np.array([], dtype=np.datetime64)) True >>> is_datetime64_dtype([1, 2, 3]) False",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\common.py",
    "ast_data": "FunctionDef name:is_datetime64_dtype arg:arr_or_dtype arguments arg If Call Return return:yes Compare Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "write_metrics_csv",
    "source_code": "def write_metrics_csv(out: TextIO, metrics: list[list[str]]):\n    writer = csv.writer(out, lineterminator='\\n')\n    writer.writerow(['metric', 'value', 'unit'])\n    writer.writerows(metrics)",
    "docstring": "Formats metrics in csv.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\xla\\backends\\gpu\\codegen\\tools\\ncu_rep_lib.py",
    "ast_data": "FunctionDef name:write_metrics_csv arg:out arg:metrics arguments arg arg Assign Call Call Call"
  },
  {
    "library": "seaborn",
    "name": "_default_values",
    "source_code": "def _default_values(self, n: int) -> list[MarkerStyle]:\n    markers = ['o', 'X', (4, 0, 45), 'P', (4, 0, 0), (4, 1, 0), '^', (4, 1, 45), 'v']\n    s = 5\n    while len(markers) < n:\n        a = 360 / (s + 1) / 2\n        markers.extend([(s + 1, 1, a), (s + 1, 0, a), (s, 1, 0), (s, 0, 0)])\n        s += 1\n    markers = [MarkerStyle(m) for m in markers[:n]]\n    return markers",
    "docstring": "Build an arbitrarily long list of unique marker styles. Parameters ---------- n : int Number of unique marker specs to generate. Returns ------- markers : list of string or tuples Values for defining :class: objects. All markers will be filled.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\properties.py",
    "ast_data": "FunctionDef name:_default_values arg:self arg:n arguments arg arg Assign Assign While Compare Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_read_ujson",
    "source_code": "def _read_ujson(self) -> DataFrame | Series:\n    obj: DataFrame | Series\n    if self.lines:\n        if self.chunksize:\n            obj = concat(self)\n        elif self.nrows:\n            lines = list(islice(self.data, self.nrows))\n            lines_json = self._combine_lines(lines)\n            obj = self._get_object_parser(lines_json)\n        else:\n            data = ensure_str(self.data)\n            data_lines = data.split('\\n')\n            obj = self._get_object_parser(self._combine_lines(data_lines))\n    else:\n        obj = self._get_object_parser(self.data)\n    if self.dtype_backend is not lib.no_default:\n        return obj.convert_dtypes(infer_objects=False, dtype_backend=self.dtype_backend)\n    else:\n        return obj",
    "docstring": "Read JSON using the ujson engine.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\json\\_json.py",
    "ast_data": "FunctionDef name:_read_ujson arg:self arguments arg If If Assign Call If Assign Call Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Assign Call If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_module_rref",
    "source_code": "def get_module_rref(self) -> rpc.RRef[nn.Module]:\n    return self.module_rref",
    "docstring": "Return an :class: (``) pointing to the remote module.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\nn\\api\\remote_module.py",
    "ast_data": "FunctionDef name:get_module_rref arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_slice_helper_var",
    "source_code": "def _slice_helper_var(var, slice_spec):\n    return _slice_helper(var.value(), slice_spec, var)",
    "docstring": "Creates a slice helper object given a variable. This allows creating a sub-tensor from part of the current contents of a variable. See for detailed examples of slicing. This function in addition also allows assignment to a sliced range. This is similar to functionality in Python. However, the syntax is different so that the user can capture the assignment operation for grouping or passing to in TF1. For example, Note that assignments currently do not support NumPy broadcasting semantics. Args: var: An object. slice_spec: The arguments to . Returns: The appropriate slice of \"tensor\", based on \"slice_spec\". As an operator. The operator also has a method that can be used to generate an assignment operator. Raises: ValueError: If a slice range is negative size. TypeError: TypeError: If the slice indices aren't int, slice, ellipsis, tf.newaxis or int32/int64 tensors.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_getitem_override.py",
    "ast_data": "FunctionDef name:_slice_helper_var arg:var arg:slice_spec arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pygame",
    "name": "_ft_init_check",
    "source_code": "def _ft_init_check():\n    if not _ft_init:\n        raise error('fastevent system not initialized')",
    "docstring": "Raises error if module is not init",
    "type": "function",
    "file_path": "pygame\\src_py\\fastevent.py",
    "ast_data": "FunctionDef name:_ft_init_check arguments If Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "get_graph_element_name",
    "source_code": "def get_graph_element_name(elem):\n    return elem.name if hasattr(elem, 'name') else str(elem)",
    "docstring": "Obtain the name or string representation of a graph element. If the graph element has the attribute \"name\", return name. Otherwise, return a __str__ representation of the graph element. Certain graph elements, such as s, do not have the attribute \"name\". Args: elem: The graph element in question. Returns: If the attribute 'name' is available, return the name. Otherwise, return str(fetch).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\common.py",
    "ast_data": "FunctionDef name:get_graph_element_name arg:elem arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "restride_A_shard_for_fused_all_gather_matmul",
    "source_code": "def restride_A_shard_for_fused_all_gather_matmul(t: torch.Tensor, gather_dim: int) -> torch.Tensor:\n    perm = list(range(len(t.shape)))\n    perm.insert(0, perm.pop(gather_dim))\n    return make_contiguous_for_perm(t, perm)",
    "docstring": "Restride the arg of for optimal perf. See the doc for for detail.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_symmetric_memory\\__init__.py",
    "ast_data": "FunctionDef name:restride_A_shard_for_fused_all_gather_matmul arg:t arg:gather_dim arguments arg arg Assign Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_estimate_gaussian_covariances_tied",
    "source_code": "def _estimate_gaussian_covariances_tied(resp, X, nk, means, reg_covar):\n    avg_X2 = np.dot(X.T, X)\n    avg_means2 = np.dot(nk * means.T, means)\n    covariance = avg_X2 - avg_means2\n    covariance /= nk.sum()\n    covariance.flat[::len(covariance) + 1] += reg_covar\n    return covariance",
    "docstring": "Estimate the tied covariance matrix. Parameters ---------- resp : array-like of shape (n_samples, n_components) X : array-like of shape (n_samples, n_features) nk : array-like of shape (n_components,) means : array-like of shape (n_components, n_features) reg_covar : float Returns ------- covariance : array, shape (n_features, n_features) The tied covariance matrix of the components.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\mixture\\_gaussian_mixture.py",
    "ast_data": "FunctionDef name:_estimate_gaussian_covariances_tied arg:resp arg:X arg:nk arg:means arg:reg_covar arguments arg arg arg arg arg Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "MinSizePartitioner",
    "source_code": "@tf_export('distribute.experimental.partitioners.MinSizePartitioner', v1=[])\nclass MinSizePartitioner(Partitioner):\n\n    def __init__(self, min_shard_bytes=256 << 10, max_shards=1, bytes_per_string=16):\n        if min_shard_bytes < 1:\n            raise ValueError(f'Argument `min_shard_bytes` must be positive. Received: {min_shard_bytes}')\n        if max_shards < 1:\n            raise ValueError(f'Argument `max_shards` must be positive. Received: {max_shards}')\n        if bytes_per_string < 1:\n            raise ValueError(f'Argument `bytes_per_string` must be positive. Received: {bytes_per_string}')\n        self._min_shard_bytes = min_shard_bytes\n        self._max_shards = max_shards\n        self._bytes_per_string = bytes_per_string\n\n    def __call__(self, shape, dtype, axis=0):\n        return partitioned_variables.min_max_variable_partitioner(max_partitions=self._max_shards, axis=axis, min_slice_size=self._min_shard_bytes, bytes_per_string_element=self._bytes_per_string)(shape, dtype)",
    "docstring": "Partitioner that allocates a minimum size per shard. This partitioner ensures each shard has at least , and tries to allocate as many shards as possible, i.e., keeping shard size as small as possible. The maximum number of such shards (upper bound) is given by . Examples: >>> partitioner = MinSizePartitioner(min_shard_bytes=4, max_shards=2) >>> partitions = partitioner(tf.TensorShape([6, 1]), tf.float32) >>> [2, 1] >>> partitioner = MinSizePartitioner(min_shard_bytes=4, max_shards=10) >>> partitions = partitioner(tf.TensorShape([6, 1]), tf.float32) >>> [6, 1] >>> >>> # use in ParameterServerStrategy >>> # strategy = tf.distribute.experimental.ParameterServerStrategy( >>> # cluster_resolver=cluster_resolver, variable_partitioner=partitioner)",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\sharded_variable.py",
    "ast_data": "ClassDef name:MinSizePartitioner FunctionDef name:__init__ arg:self arg:min_shard_bytes arg:max_shards arg:bytes_per_string arguments arg arg arg arg If Compare Raise Call If Compare Raise Call If Compare Raise Call Assign Assign Assign FunctionDef name:__call__ arg:self arg:shape arg:dtype arg:axis arguments arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, reuse, name='', initializer=None, regularizer=None, caching_device=None, partitioner=None, custom_getter=None, name_scope='', dtype=dtypes.float32, use_resource=None, constraint=None):\n    self._name = name\n    self._initializer = initializer\n    self._regularizer = regularizer\n    self._reuse = reuse\n    self._caching_device = caching_device\n    self._partitioner = partitioner\n    self._custom_getter = custom_getter\n    self._name_scope = name_scope\n    self._dtype = dtype\n    self._use_resource = use_resource\n    self._constraint = constraint\n    if context.executing_eagerly():\n        if self._caching_device is not None:\n            raise NotImplementedError('Caching devices is not yet supported when eager execution is enabled.')\n        self._reuse = AUTO_REUSE\n        self._use_resource = True",
    "docstring": "Creates a new VariableScope with the given properties.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variable_scope.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:reuse arg:name arg:initializer arg:regularizer arg:caching_device arg:partitioner arg:custom_getter arg:name_scope arg:dtype arg:use_resource arg:constraint arguments arg arg arg arg arg arg arg arg arg arg arg arg Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign If Call If Compare Raise Call Assign Assign"
  },
  {
    "library": "django",
    "name": "InconsistentMigrationHistory",
    "source_code": "class InconsistentMigrationHistory(Exception):\n    pass",
    "docstring": "An applied migration has some of its dependencies not applied.",
    "type": "class",
    "file_path": "django\\django\\db\\migrations\\exceptions.py",
    "ast_data": "ClassDef name:InconsistentMigrationHistory"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, channel):\n    self.NewSession = channel.unary_unary('/tensorflow.ProfileAnalysis/NewSession', request_serializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2.NewProfileSessionRequest.SerializeToString, response_deserializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2.NewProfileSessionResponse.FromString)\n    self.EnumSessions = channel.unary_unary('/tensorflow.ProfileAnalysis/EnumSessions', request_serializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2.EnumProfileSessionsAndToolsRequest.SerializeToString, response_deserializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2.EnumProfileSessionsAndToolsResponse.FromString)\n    self.GetSessionToolData = channel.unary_unary('/tensorflow.ProfileAnalysis/GetSessionToolData', request_serializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2.ProfileSessionDataRequest.SerializeToString, response_deserializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2.ProfileSessionDataResponse.FromString)",
    "docstring": "Constructor. Args: channel: A grpc.Channel.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\profiler\\profiler_analysis_pb2_grpc.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:channel arguments arg arg Assign Call Assign Call Assign Call"
  },
  {
    "library": "scipy",
    "name": "RefguideCheck",
    "source_code": "@cli.cls_cmd('refguide-check')\nclass RefguideCheck(Task):\n    ctx = CONTEXT\n    submodule = Option(['--submodule', '-s'], default=None, metavar='SUBMODULE', help='Submodule whose tests to run (cluster, constants, ...)')\n    verbose = Option(['--verbose', '-v'], default=False, is_flag=True, help='verbosity')\n\n    @classmethod\n    def task_meta(cls, **kwargs):\n        kwargs.update(cls.ctx.get())\n        Args = namedtuple('Args', [k for k in kwargs.keys()])\n        args = Args(**kwargs)\n        dirs = Dirs(args)\n        cmd = [f'{sys.executable}', str(dirs.root / 'tools' / 'refguide_check.py')]\n        if args.verbose:\n            cmd += ['-vvv']\n        if args.submodule:\n            cmd += [args.submodule]\n        cmd_str = ' '.join(cmd)\n        return {'actions': [f'env PYTHONPATH={dirs.site} {cmd_str}'], 'task_dep': ['build'], 'io': {'capture': False}}",
    "docstring": ":wrench: Run refguide check.",
    "type": "class",
    "file_path": "scipy\\dev.py",
    "ast_data": "ClassDef name:RefguideCheck Assign Assign Call Assign Call FunctionDef name:task_meta arg:cls arguments arg arg Call Call Assign Call Call Assign Call Assign Call Assign Call If If Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "putmask_without_repeat",
    "source_code": "def putmask_without_repeat(values: np.ndarray, mask: npt.NDArray[np.bool_], new: Any) -> None:\n    if getattr(new, 'ndim', 0) >= 1:\n        new = new.astype(values.dtype, copy=False)\n    nlocs = mask.sum()\n    if nlocs > 0 and is_list_like(new) and (getattr(new, 'ndim', 1) == 1):\n        shape = np.shape(new)\n        if nlocs == shape[-1]:\n            np.place(values, mask, new)\n        elif mask.shape[-1] == shape[-1] or shape[-1] == 1:\n            np.putmask(values, mask, new)\n        else:\n            raise ValueError('cannot assign mismatch length to masked array')\n    else:\n        np.putmask(values, mask, new)",
    "docstring": "np.putmask will truncate or repeat if is a listlike with len(new) != len(values). We require an exact match. Parameters ---------- values : np.ndarray mask : np.ndarray[bool] new : Any",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\array_algos\\putmask.py",
    "ast_data": "FunctionDef name:putmask_without_repeat arg:values arg:mask arg:new arguments arg arg arg If Compare Call Assign Call Assign Call If BoolOp Compare Call Compare Call Assign Call If Compare Call If BoolOp Compare Compare Call Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_deserialize_metric",
    "source_code": "def _deserialize_metric(metric_config):\n    from tensorflow.python.keras import metrics as metrics_module\n    if metric_config in ['accuracy', 'acc', 'crossentropy', 'ce']:\n        return metric_config\n    return metrics_module.deserialize(metric_config)",
    "docstring": "Deserialize metrics, leaving special strings untouched.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saving_utils.py",
    "ast_data": "FunctionDef name:_deserialize_metric arg:metric_config arguments arg If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "merge",
    "source_code": "def merge(self, x, y):\n    xr = self[x]\n    yr = self[y]\n    if self._indices[xr] == self._indices[yr]:\n        return False\n    sizes = self._sizes\n    if (sizes[xr], self._indices[yr]) < (sizes[yr], self._indices[xr]):\n        xr, yr = (yr, xr)\n    self._parents[yr] = xr\n    self._sizes[xr] += self._sizes[yr]\n    self._nbrs[xr], self._nbrs[yr] = (self._nbrs[yr], self._nbrs[xr])\n    self.n_subsets -= 1\n    return True",
    "docstring": "Merge the subsets of and . The smaller subset (the child) is merged into the larger subset (the parent). If the subsets are of equal size, the root element which was first inserted into the disjoint set is selected as the parent. Parameters ---------- x, y : hashable object Elements to merge. Returns ------- merged : bool True if and were in disjoint sets, False otherwise.",
    "type": "method",
    "file_path": "scipy\\scipy\\_lib\\_disjoint_set.py",
    "ast_data": "FunctionDef name:merge arg:self arg:x arg:y arguments arg arg arg Assign Assign If Compare Return return:yes Assign If Compare Assign Assign Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "_clean_credentials",
    "source_code": "@sensitive_variables('credentials')\ndef _clean_credentials(credentials):\n    SENSITIVE_CREDENTIALS = re.compile('api|token|key|secret|password|signature', re.I)\n    CLEANSED_SUBSTITUTE = '********************'\n    for key in credentials:\n        if SENSITIVE_CREDENTIALS.search(key):\n            credentials[key] = CLEANSED_SUBSTITUTE\n    return credentials",
    "docstring": "Clean a dictionary of credentials of potentially sensitive info before sending to less secure functions. Not comprehensive - intended for user_login_failed signal",
    "type": "function",
    "file_path": "django\\django\\contrib\\auth\\__init__.py",
    "ast_data": "FunctionDef name:_clean_credentials arg:credentials arguments arg Assign Call Assign For If Call Assign Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_metadata_routing",
    "source_code": "def get_metadata_routing(self):\n    router = MetadataRouter(owner=self.__class__.__name__).add_self_request(self).add(estimator=self._get_estimator(), method_mapping=MethodMapping().add(caller='fit', callee='fit')).add(splitter=self.cv, method_mapping=MethodMapping().add(caller='fit', callee='split'))\n    return router",
    "docstring": "Get metadata routing of this object. Please check :ref: on how the routing mechanism works. Returns ------- routing : MetadataRouter A :class: encapsulating routing information.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\calibration.py",
    "ast_data": "FunctionDef name:get_metadata_routing arg:self arguments arg Assign Call Call Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "from_wxyz",
    "source_code": "@classmethod\ndef from_wxyz(cls, wxyz: Tensor) -> So3:\n    return cls(Quaternion(wxyz))",
    "docstring": "Create So3 from a tensor representing a quaternion. Args: wxyz: the quaternion to convert of shape :math:. Example: >>> q = torch.tensor([1., 0., 0., 0.]) >>> s = So3.from_wxyz(q) >>> s Parameter containing: tensor([1., 0., 0., 0.], requires_grad=True)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\so3.py",
    "ast_data": "FunctionDef name:from_wxyz arg:cls arg:wxyz arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "to_python",
    "source_code": "def to_python(self, value):\n    if isinstance(value, str) and value.lower() in ('false', '0'):\n        value = False\n    else:\n        value = bool(value)\n    return super().to_python(value)",
    "docstring": "Return a Python boolean object.",
    "type": "method",
    "file_path": "django\\django\\forms\\fields.py",
    "ast_data": "FunctionDef name:to_python arg:self arg:value arguments arg arg If BoolOp Call Compare Call Assign Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "convert_n_to_tensor_or_composite",
    "source_code": "def convert_n_to_tensor_or_composite(values, dtype=None, name=None) -> list[Union[EagerTensor, SymbolicTensor, composite_tensor.CompositeTensor, type(None)]]:\n    return internal_convert_n_to_tensor_or_composite(values=values, dtype=dtype, name=name, as_ref=False)",
    "docstring": "Converts to a list of or objects. Any objects in are returned unmodified. Args: values: A list of , convert_to_tensor()DTypeTensorCompositeTensorTensoriname + '_' + iTensorCompositeTensorvalues`. RuntimeError: If a registered conversion function returns an invalid value.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:convert_n_to_tensor_or_composite arg:values arg:dtype arg:name arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_init",
    "source_code": "def _init(self):\n    self.label.set(x=0, y=0.5, verticalalignment='bottom', horizontalalignment='center', rotation='vertical', rotation_mode='anchor', transform=mtransforms.blended_transform_factory(mtransforms.IdentityTransform(), self.axes.transAxes))\n    self.label_position = 'left'\n    if mpl.rcParams['ytick.labelcolor'] == 'inherit':\n        tick_color = mpl.rcParams['ytick.color']\n    else:\n        tick_color = mpl.rcParams['ytick.labelcolor']\n    self.offsetText.set(x=0, y=0.5, verticalalignment='baseline', horizontalalignment='left', transform=mtransforms.blended_transform_factory(self.axes.transAxes, mtransforms.IdentityTransform()), fontsize=mpl.rcParams['ytick.labelsize'], color=tick_color)\n    self.offset_text_position = 'left'",
    "docstring": "Initialize the label and offsetText instance values and / .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:_init arg:self arguments arg Call Call Call Assign If Compare Assign Assign Call Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "args_to_matching_eager",
    "source_code": "def args_to_matching_eager(l, ctx, allowed_dtypes, default_dtype=None):\n    del ctx\n    if not l and default_dtype is not None:\n        return (default_dtype, [])\n    for x in l:\n        if not isinstance(x, core_types.Value):\n            break\n    else:\n        return (l[0]._datatype_enum(), l)\n    dtype = None\n    for t in l:\n        if isinstance(t, core_types.Value):\n            dtype = t.dtype\n            break\n    if dtype is None:\n        ret = []\n        for t in l:\n            tensor = None\n            if dtype is None and allowed_dtypes:\n                tensor = tensor_conversion_registry.convert(t)\n                if tensor.dtype not in allowed_dtypes:\n                    tensor = None\n            if tensor is None:\n                tensor = tensor_conversion_registry.convert(t, dtype, preferred_dtype=default_dtype)\n            ret.append(tensor)\n            if dtype is None:\n                dtype = tensor.dtype\n    else:\n        ret = [tensor_conversion_registry.convert(t, dtype) for t in l]\n    keras_symbolic_tensors = [x for x in ret if _is_keras_symbolic_tensor(x)]\n    if keras_symbolic_tensors:\n        raise core._SymbolicException('Using symbolic output of a Keras layer during eager execution {}'.format(keras_symbolic_tensors))\n    return (dtype.as_datatype_enum, ret)",
    "docstring": "Convert sequence to eager same-type Tensors.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\execute.py",
    "ast_data": "FunctionDef name:args_to_matching_eager arg:l arg:ctx arg:allowed_dtypes arg:default_dtype arguments arg arg arg arg If BoolOp Compare Return return:yes For If Call Return return:yes Call Assign For If Call Assign If Compare Assign For Assign If BoolOp Compare Assign Call If Compare Assign If Compare Assign Call Call If Compare Assign Assign Call Assign Call If Raise Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_wrap_and_check_outputs",
    "source_code": "def _wrap_and_check_outputs(self, outputs, single_output_default_name, error_label=None):\n    if not isinstance(outputs, dict):\n        outputs = {single_output_default_name: outputs}\n    output_dict = {}\n    for key, value in outputs.items():\n        error_name = error_label or single_output_default_name\n        key = self._check_output_key(key, error_name)\n        if not isinstance(value, tensor.Tensor):\n            raise ValueError('{} output value must be a Tensor; got {}.'.format(error_name, value))\n        output_dict[key] = value\n    return output_dict",
    "docstring": "Wraps raw tensors as dicts and checks type. Note that we create a new dict here so that we can overwrite the keys if necessary. Args: outputs: A or a dict of string to . single_output_default_name: A string key for use in the output dict if the provided is a raw tensor. error_label: descriptive string for use in error messages. If none, single_output_default_name will be used. Returns: A dict of tensors Raises: ValueError: if the outputs dict keys are not strings or tuples of strings or the values are not Tensors.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\model_utils\\export_output.py",
    "ast_data": "FunctionDef name:_wrap_and_check_outputs arg:self arg:outputs arg:single_output_default_name arg:error_label arguments arg arg arg arg If Call Assign Assign For Call Assign BoolOp Assign Call If Call Raise Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "any",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef any(x, axis=None, keepdims=False):\n    x = math_ops.cast(x, dtypes_module.bool)\n    return math_ops.reduce_any(x, axis, keepdims)",
    "docstring": "Bitwise reduction (logical OR). Args: x: Tensor or variable. axis: axis along which to perform the reduction. keepdims: whether the drop or broadcast the reduction axes. Returns: A uint8 tensor (0s and 1s).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:any arg:x arg:axis arg:keepdims arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_poly1d",
    "source_code": "def _poly1d(c_or_r, *, xp):\n    c_or_r = xpx.atleast_nd(c_or_r, ndim=1, xp=xp)\n    if c_or_r.ndim > 1:\n        raise ValueError('Polynomial must be 1d only.')\n    c_or_r = _trim_zeros(c_or_r, trim='f')\n    if c_or_r.shape[0] == 0:\n        c_or_r = xp.asarray([0], dtype=c_or_r.dtype)\n    return c_or_r",
    "docstring": "Constructor of np.poly1d object from an array of coefficients (r=False)",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_polyutils.py",
    "ast_data": "FunctionDef name:_poly1d arg:c_or_r arguments arg arg Assign Call If Compare Raise Call Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "draw_without_rendering",
    "source_code": "def draw_without_rendering(self):\n    renderer = _get_renderer(self)\n    with renderer._draw_disabled():\n        self.draw(renderer)",
    "docstring": "Draw the figure with no output. Useful to get the final size of artists that require a draw before their size is known (e.g. text).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:draw_without_rendering arg:self arguments arg Assign Call With Call Call"
  },
  {
    "library": "pandas",
    "name": "nanvar",
    "source_code": "@disallow('M8', 'm8')\n@bottleneck_switch(ddof=1)\ndef nanvar(values: np.ndarray, *, axis: AxisInt | None=None, skipna: bool=True, ddof: int=1, mask=None):\n    dtype = values.dtype\n    mask = _maybe_get_mask(values, skipna, mask)\n    if dtype.kind in 'iu':\n        values = values.astype('f8')\n        if mask is not None:\n            values[mask] = np.nan\n    if values.dtype.kind == 'f':\n        count, d = _get_counts_nanvar(values.shape, mask, axis, ddof, values.dtype)\n    else:\n        count, d = _get_counts_nanvar(values.shape, mask, axis, ddof)\n    if skipna and mask is not None:\n        values = values.copy()\n        np.putmask(values, mask, 0)\n    avg = _ensure_numeric(values.sum(axis=axis, dtype=np.float64)) / count\n    if axis is not None:\n        avg = np.expand_dims(avg, axis)\n    sqr = _ensure_numeric((avg - values) ** 2)\n    if mask is not None:\n        np.putmask(sqr, mask, 0)\n    result = sqr.sum(axis=axis, dtype=np.float64) / d\n    if dtype.kind == 'f':\n        result = result.astype(dtype, copy=False)\n    return result",
    "docstring": "Compute the variance along given axis while ignoring NaNs Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. mask : ndarray[bool], optional nan-mask if known Returns ------- result : float Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> from pandas.core import nanops >>> s = pd.Series([1, np.nan, 2, 3]) >>> nanops.nanvar(s.values) 1.0",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\nanops.py",
    "ast_data": "FunctionDef name:nanvar arg:values arguments arg arg arg arg arg Assign Assign Call If Compare Assign Call If Compare Assign If Compare Assign Call Assign Call If BoolOp Compare Assign Call Call Assign Call Call If Compare Assign Call Assign Call If Compare Call Assign Call If Compare Assign Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "generate_filename",
    "source_code": "def generate_filename(self, instance, filename):\n    if callable(self.upload_to):\n        filename = self.upload_to(instance, filename)\n    else:\n        dirname = datetime.datetime.now().strftime(str(self.upload_to))\n        filename = posixpath.join(dirname, filename)\n    filename = validate_file_name(filename, allow_relative_path=True)\n    return self.storage.generate_filename(filename)",
    "docstring": "Apply (if callable) or prepend (if a string) upload_to to the filename, then delegate further processing of the name to the storage backend. Until the storage layer, all file paths are expected to be Unix style (with forward slashes).",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\files.py",
    "ast_data": "FunctionDef name:generate_filename arg:self arg:instance arg:filename arguments arg arg arg If Call Assign Call Assign Call Call Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_update_snapshot",
    "source_code": "def _update_snapshot(self):\n    self._attribute_sentinel.invalidate_all()\n    if self._dirty:\n        return\n    self._self_last_wrapped_dict_snapshot = dict(self)",
    "docstring": "Acknowledges tracked changes to the wrapped dict.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\data_structures.py",
    "ast_data": "FunctionDef name:_update_snapshot arg:self arguments arg Call If Return return:no Assign Call"
  },
  {
    "library": "kornia",
    "name": "bbox_to_mask",
    "source_code": "def bbox_to_mask(boxes: torch.Tensor, width: int, height: int) -> torch.Tensor:\n    validate_bbox(boxes)\n    mask = zeros((len(boxes), height + 2, width + 2), dtype=boxes.dtype, device=boxes.device)\n    box_i = (boxes + 1).long()\n    for msk, bx in zip(mask, box_i):\n        msk[bx[0, 1]:bx[2, 1] + 1, bx[0, 0]:bx[1, 0] + 1] = 1.0\n    return mask[:, 1:-1, 1:-1]",
    "docstring": "Convert 2D bounding boxes to masks. Covered area is 1. and the remaining is 0. Args: boxes: a tensor containing the coordinates of the bounding boxes to be extracted. The tensor must have the shape of Bx4x2, where each box is defined in the following `` order: top-left, top-right, bottom-right and bottom-left. The coordinates must be in the x, y order. width: width of the masked image. height: height of the masked image. Returns: the output mask tensor. Note: It is currently non-differentiable. Examples: >>> boxes = torch.tensor([[ ... [1., 1.], ... [3., 1.], ... [3., 2.], ... [1., 2.], ... ]]) # 1x4x2 >>> bbox_to_mask(boxes, 5, 5) tensor([[[0., 0., 0., 0., 0.], [0., 1., 1., 1., 0.], [0., 1., 1., 1., 0.], [0., 0., 0., 0., 0.], [0., 0., 0., 0., 0.]]])",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\bbox.py",
    "ast_data": "FunctionDef name:bbox_to_mask arg:boxes arg:width arg:height arguments arg arg arg Call Assign Call Call Assign Call For Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "UpdateContext",
    "source_code": "class UpdateContext(object):\n    __slots__ = ['_replica_id', '_old_replica_id']\n\n    def __init__(self, replica_id):\n        self._replica_id = replica_id\n        self._old_replica_id = None\n\n    def __enter__(self):\n        self._old_replica_id = get_update_replica_id()\n        _update_replica_id.current = self._replica_id\n\n    def __exit__(self, exception_type, exception_value, traceback):\n        del exception_type, exception_value, traceback\n        _update_replica_id.current = self._old_replica_id",
    "docstring": "Context manager when you are in or .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "ClassDef name:UpdateContext Assign FunctionDef name:__init__ arg:self arg:replica_id arguments arg arg Assign Assign FunctionDef name:__enter__ arg:self arguments arg Assign Call Assign FunctionDef name:__exit__ arg:self arg:exception_type arg:exception_value arg:traceback arguments arg arg arg arg Assign"
  },
  {
    "library": "scikit-learn",
    "name": "in_y_true_range",
    "source_code": "def in_y_true_range(self, y):\n    return self.interval_y_true.includes(y) and np.all(y.astype(int) == y)",
    "docstring": "Return True if y is in the valid range of y_true. Parameters ---------- y : ndarray",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\_loss\\loss.py",
    "ast_data": "FunctionDef name:in_y_true_range arg:self arg:y arguments arg arg Return return:yes BoolOp Call Call Compare Call"
  },
  {
    "library": "cherrypy",
    "name": "read_process",
    "source_code": "def read_process(cmd, args=''):\n    fullcmd = '%s %s' % (cmd, args)\n    pipeout = popen(fullcmd)\n    try:\n        firstline = pipeout.readline()\n        cmd_not_found = re.search(b'(not recognized|No such file|not found)', firstline, re.IGNORECASE)\n        if cmd_not_found:\n            raise IOError('%s must be on your system path.' % cmd)\n        output = firstline + pipeout.read()\n    finally:\n        pipeout.close()\n    return output",
    "docstring": "Return a subprocess standard output.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\_cpmodpy.py",
    "ast_data": "FunctionDef name:read_process arg:cmd arg:args arguments arg arg Assign Assign Call Try Assign Call Assign Call If Raise Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "Hardsigmoid",
    "source_code": "class Hardsigmoid(Module):\n    __constants__ = ['inplace']\n    inplace: bool\n\n    def __init__(self, inplace: bool=False) -> None:\n        super().__init__()\n        self.inplace = inplace\n\n    def forward(self, input: Tensor) -> Tensor:\n        return F.hardsigmoid(input, self.inplace)",
    "docstring": "Applies the Hardsigmoid function element-wise. Hardsigmoid is defined as: .. math:: \\text{Hardsigmoid}(x) = \\begin{cases} 0 & \\text{if~} x \\le -3, \\\\ 1 & \\text{if~} x \\ge +3, \\\\ x / 6 + 1 / 2 & \\text{otherwise} \\end{cases} Args: inplace: can optionally do the operation in-place. Default: `(*)*(*)`, same shape as the input. .. image:: ../scripts/activation_images/Hardsigmoid.png Examples:: >>> m = nn.Hardsigmoid() >>> input = torch.randn(2) >>> output = m(input)",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\activation.py",
    "ast_data": "ClassDef name:Hardsigmoid Assign FunctionDef name:__init__ arg:self arg:inplace arguments arg arg Call Call Assign FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "reset_refcounts",
    "source_code": "def reset_refcounts(self, to_counts):\n    for alias, cur_refcount in self.alias_refcount.copy().items():\n        unref_amount = cur_refcount - to_counts.get(alias, 0)\n        self.unref_alias(alias, unref_amount)",
    "docstring": "Reset reference counts for aliases so that they match the value passed in .",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\query.py",
    "ast_data": "FunctionDef name:reset_refcounts arg:self arg:to_counts arguments arg arg For Call Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, c):\n    self.c = c",
    "docstring": ":param c: character or number",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:c arguments arg arg Assign"
  },
  {
    "library": "cherrypy",
    "name": "encode_stream",
    "source_code": "def encode_stream(self, encoding):\n    if encoding in self.attempted_charsets:\n        return False\n    self.attempted_charsets.add(encoding)\n\n    def encoder(body):\n        for chunk in body:\n            if isinstance(chunk, str):\n                chunk = chunk.encode(encoding, self.errors)\n            yield chunk\n    self.body = encoder(self.body)\n    return True",
    "docstring": "Encode a streaming response body. Use a generator wrapper, and just pray it works as the stream is being written out.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\encoding.py",
    "ast_data": "FunctionDef name:encode_stream arg:self arg:encoding arguments arg arg If Compare Return return:yes Call FunctionDef name:encoder arg:body arguments arg For If Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "sparse_precision_at_k",
    "source_code": "@tf_export(v1=['metrics.sparse_precision_at_k'])\n@deprecated(None, 'Use precision_at_k instead')\ndef sparse_precision_at_k(labels, predictions, k, class_id=None, weights=None, metrics_collections=None, updates_collections=None, name=None):\n    return precision_at_k(labels=labels, predictions=predictions, k=k, class_id=class_id, weights=weights, metrics_collections=metrics_collections, updates_collections=updates_collections, name=name)",
    "docstring": "Renamed to , please use that method instead.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\metrics_impl.py",
    "ast_data": "FunctionDef name:sparse_precision_at_k arg:labels arg:predictions arg:k arg:class_id arg:weights arg:metrics_collections arg:updates_collections arg:name arguments arg arg arg arg arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "accept_reject",
    "source_code": "def accept_reject(self, res_new, res_old):\n    with np.errstate(invalid='ignore'):\n        prod = -(res_new.fun - res_old.fun) * self.beta\n        w = math.exp(min(0, prod))\n    rand = self.rng.uniform()\n    return w >= rand and (res_new.success or not res_old.success)",
    "docstring": "Assuming the local search underlying res_new was successful: If new energy is lower than old, it will always be accepted. If new is higher than old, there is a chance it will be accepted, less likely for larger differences.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_basinhopping.py",
    "ast_data": "FunctionDef name:accept_reject arg:self arg:res_new arg:res_old arguments arg arg arg With Call Assign Assign Call Call Assign Call Return return:yes BoolOp Compare BoolOp"
  },
  {
    "library": "pandas",
    "name": "GroupByNumbaAgg",
    "source_code": "class GroupByNumbaAgg(GroupByCythonAgg):\n\n    def setup(self, dtype, method):\n        if method in _numba_unsupported_methods:\n            raise NotImplementedError\n        super().setup(dtype, method)\n\n    def time_frame_agg(self, dtype, method):\n        self.df.groupby('key').agg(method, engine='numba')",
    "docstring": "Benchmarks specifically targeting our numba aggregation algorithms (using a big enough dataframe with simple key, so a large part of the time is actually spent in the grouped aggregation).",
    "type": "class",
    "file_path": "pandas\\asv_bench\\benchmarks\\groupby.py",
    "ast_data": "ClassDef name:GroupByNumbaAgg FunctionDef name:setup arg:self arg:dtype arg:method arguments arg arg arg If Compare Raise Call Call FunctionDef name:time_frame_agg arg:self arg:dtype arg:method arguments arg arg arg Call Call"
  },
  {
    "library": "matplotlib",
    "name": "ge",
    "source_code": "def ge(self, x):\n    d, m = divmod(x, self.step)\n    if self.closeto(m / self.step, 0):\n        return d\n    return d + 1",
    "docstring": "Return the smallest n: n*step >= x.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:ge arg:self arg:x arguments arg arg Assign Call If Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "do_encode",
    "source_code": "def do_encode(self, numpy_value, encode_fn):\n    del encode_fn\n    encoded_numpy = struct_pb2.StructuredValue()\n    encoded_numpy.numpy_value.CopyFrom(tensor_util.make_tensor_proto(numpy_value))\n    return encoded_numpy",
    "docstring": "Returns an encoded for .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\constant_op.py",
    "ast_data": "FunctionDef name:do_encode arg:self arg:numpy_value arg:encode_fn arguments arg arg arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "edges",
    "source_code": "@property\ndef edges(self):\n    for n, successors in self._succ.items():\n        for succ in successors:\n            yield (n, succ)",
    "docstring": "Returns an iterator over all edges (u, v) in the graph",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\_digraph.py",
    "ast_data": "FunctionDef name:edges arg:self arguments arg For Call For"
  },
  {
    "library": "django",
    "name": "CircularDependencyError",
    "source_code": "class CircularDependencyError(Exception):\n    pass",
    "docstring": "There's an impossible-to-resolve circular dependency.",
    "type": "class",
    "file_path": "django\\django\\db\\migrations\\exceptions.py",
    "ast_data": "ClassDef name:CircularDependencyError"
  },
  {
    "library": "pytorch",
    "name": "add_scalars",
    "source_code": "def add_scalars(self, main_tag, tag_scalar_dict, global_step=None, walltime=None):\n    torch._C._log_api_usage_once('tensorboard.logging.add_scalars')\n    walltime = time.time() if walltime is None else walltime\n    fw_logdir = self._get_file_writer().get_logdir()\n    for tag, scalar_value in tag_scalar_dict.items():\n        fw_tag = fw_logdir + '/' + main_tag.replace('/', '_') + '_' + tag\n        assert self.all_writers is not None\n        if fw_tag in self.all_writers.keys():\n            fw = self.all_writers[fw_tag]\n        else:\n            fw = FileWriter(fw_tag, self.max_queue, self.flush_secs, self.filename_suffix)\n            self.all_writers[fw_tag] = fw\n        fw.add_summary(scalar(main_tag, scalar_value), global_step, walltime)",
    "docstring": "Add many scalar data to summary. Args: main_tag (str): The parent name for the tags tag_scalar_dict (dict): Key-value pair storing the tag and corresponding values global_step (int): Global step value to record walltime (float): Optional override default walltime (time.time()) seconds after epoch of event Examples:: from torch.utils.tensorboard import SummaryWriter writer = SummaryWriter() r = 5 for i in range(100): writer.add_scalars('run_14h', {'xsinx':i*np.sin(i/r), 'xcosx':i*np.cos(i/r), 'tanx': np.tan(i/r)}, i) writer.close() # This call adds three values to the same scalar plot with the tag # 'run_14h' in TensorBoard's scalar section. Expected result: .. image:: _static/img/tensorboard/add_scalars.png :scale: 50 %",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\tensorboard\\writer.py",
    "ast_data": "FunctionDef name:add_scalars arg:self arg:main_tag arg:tag_scalar_dict arg:global_step arg:walltime arguments arg arg arg arg arg Call Assign Compare Call Assign Call Call For Call Assign Call Compare If Compare Call Assign Assign Call Assign Call Call"
  },
  {
    "library": "pandas",
    "name": "nanprod",
    "source_code": "@disallow('M8', 'm8')\n@maybe_operate_rowwise\ndef nanprod(values: np.ndarray, *, axis: AxisInt | None=None, skipna: bool=True, min_count: int=0, mask: npt.NDArray[np.bool_] | None=None) -> float:\n    mask = _maybe_get_mask(values, skipna, mask)\n    if skipna and mask is not None:\n        values = values.copy()\n        values[mask] = 1\n    result = values.prod(axis)\n    return _maybe_null_out(result, axis, mask, values.shape, min_count=min_count)",
    "docstring": "Parameters ---------- values : ndarray[dtype] axis : int, optional skipna : bool, default True min_count: int, default 0 mask : ndarray[bool], optional nan-mask if known Returns ------- Dtype The product of all elements on a given axis. ( NaNs are treated as 1) Examples -------- >>> from pandas.core import nanops >>> s = pd.Series([1, 2, 3, np.nan]) >>> nanops.nanprod(s.values) np.float64(6.0)",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\nanops.py",
    "ast_data": "FunctionDef name:nanprod arg:values arguments arg arg arg arg arg Assign Call If BoolOp Compare Assign Call Assign Assign Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_coo_to_compressed",
    "source_code": "def _coo_to_compressed(self, swap, copy=False):\n    M, N = swap(self._shape_as_2d)\n    idx_dtype = self._get_index_dtype(self.coords, maxval=max(self.nnz, N))\n    if self.ndim == 1:\n        indices = self.coords[0].copy() if copy else self.coords[0]\n        nnz = len(indices)\n        indptr = np.array([0, nnz], dtype=idx_dtype)\n        data = self.data.copy() if copy else self.data\n        return (indptr, indices, data, self.shape)\n    major, minor = swap(self.coords)\n    nnz = len(major)\n    major = major.astype(idx_dtype, copy=False)\n    minor = minor.astype(idx_dtype, copy=False)\n    indptr = np.empty(M + 1, dtype=idx_dtype)\n    indices = np.empty_like(minor, dtype=idx_dtype)\n    data = np.empty_like(self.data, dtype=self.dtype)\n    coo_tocsr(M, N, nnz, major, minor, self.data, indptr, indices, data)\n    return (indptr, indices, data, self.shape)",
    "docstring": "convert (shape, coords, data) to (indptr, indices, data, shape)",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_coo.py",
    "ast_data": "FunctionDef name:_coo_to_compressed arg:self arg:swap arg:copy arguments arg arg arg Assign Call Assign Call Call If Compare Assign Call Assign Call Assign Call Assign Call Return return:yes Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_prepopulated_fields",
    "source_code": "def get_prepopulated_fields(self, request, obj=None):\n    return self.prepopulated_fields",
    "docstring": "Hook for specifying custom prepopulated fields.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:get_prepopulated_fields arg:self arg:request arg:obj arguments arg arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_DistributedVariableSaveable",
    "source_code": "class _DistributedVariableSaveable(saveable_object.SaveableObject):\n\n    def __init__(self, distributed_variable, primary_variable, name):\n        self._distributed_variable = distributed_variable\n        if not self._distributed_variable._policy:\n            raise ValueError('The VariablePolicy of the argument `distributed_variable` must be set to create a _DistributedVariableSaveable. Please set it via the `var_policy` argument in the constructor of DistributedVariable.')\n        tensor, spec = distributed_variable._policy.get_saveable(distributed_variable, primary_variable, name)\n        super(_DistributedVariableSaveable, self).__init__(tensor, spec, name)\n\n    def restore(self, restored_tensors, restored_shapes):\n        tensor, = restored_tensors\n        return self._distributed_variable._policy.get_restore_ops(self._distributed_variable, tensor)",
    "docstring": "Class for defining how to restore a DistributedVariable.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py",
    "ast_data": "ClassDef name:_DistributedVariableSaveable FunctionDef name:__init__ arg:self arg:distributed_variable arg:primary_variable arg:name arguments arg arg arg arg Assign If Raise Call Assign Call Call Call FunctionDef name:restore arg:self arg:restored_tensors arg:restored_shapes arguments arg arg arg Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_fill",
    "source_code": "def _fill(strings, linelen=75):\n    currpos = 0\n    lasti = 0\n    result = []\n    for i, s in enumerate(strings):\n        length = len(s)\n        if currpos + length < linelen:\n            currpos += length + 1\n        else:\n            result.append(b' '.join(strings[lasti:i]))\n            lasti = i\n            currpos = length\n    result.append(b' '.join(strings[lasti:]))\n    return b'\\n'.join(result)",
    "docstring": "Make one string from sequence of strings, with whitespace in between. The whitespace is chosen to form lines of at most *linelen* characters, if possible.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py",
    "ast_data": "FunctionDef name:_fill arg:strings arg:linelen arguments arg arg Assign Assign Assign For Call Assign Call If Compare Call Call Assign Assign Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "comb",
    "source_code": "def comb(N, k, *, exact=False, repetition=False):\n    if repetition:\n        return comb(N + k - 1, k, exact=exact)\n    if exact:\n        if int(N) == N and int(k) == k:\n            return _comb_int(N, k)\n        else:\n            raise ValueError('Non-integer `N` and `k` with `exact=True` is not supported.')\n    else:\n        k, N = (asarray(k), asarray(N))\n        cond = (k <= N) & (N >= 0) & (k >= 0)\n        vals = binom(N, k)\n        if isinstance(vals, np.ndarray):\n            vals[~cond] = 0\n        elif not cond:\n            vals = np.float64(0)\n        return vals",
    "docstring": "The number of combinations of N things taken k at a time. This is often expressed as \"N choose k\". Parameters ---------- N : int, ndarray Number of things. k : int, ndarray Number of elements taken. exact : bool, optional For integers, if is False, then floating point precision is used, otherwise the result is computed exactly. repetition : bool, optional If is True, then the number of combinations with repetition is computed. Returns ------- val : int, float, ndarray The total number of combinations. See Also -------- binom : Binomial coefficient considered as a function of two real variables. Notes ----- - Array arguments accepted only for exact=False case. - If N N and repetition=False, then 0 is returned. Examples -------- >>> import numpy as np >>> from scipy.special import comb >>> k = np.array([3, 4]) >>> n = np.array([10, 10]) >>> comb(n, k, exact=False) array([ 120., 210.]) >>> comb(10, 3, exact=True) 120 >>> comb(10, 3, exact=True, repetition=True) 220",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_basic.py",
    "ast_data": "FunctionDef name:comb arg:N arg:k arguments arg arg arg arg If Return return:yes Call If If BoolOp Compare Call Compare Call Return return:yes Call Raise Call Assign Call Call Assign Compare Compare Compare Assign Call If Call Assign If Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "diag_operator",
    "source_code": "@property\ndef diag_operator(self):\n    return self._diag_operator",
    "docstring": "If this operator is , this is .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_low_rank_update.py",
    "ast_data": "FunctionDef name:diag_operator arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "sheets",
    "source_code": "@property\ndef sheets(self) -> dict[str, Any]:\n    from odf.table import Table\n    result = {sheet.getAttribute('name'): sheet for sheet in self.book.getElementsByType(Table)}\n    return result",
    "docstring": "Mapping of sheet names to sheet objects.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\excel\\_odswriter.py",
    "ast_data": "FunctionDef name:sheets arg:self arguments arg Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "__len__",
    "source_code": "def __len__(self):\n    if self.empty:\n        return 0\n    if self.hasz:\n        return 3\n    else:\n        return 2",
    "docstring": "Return the number of dimensions for this Point (either 0, 2 or 3).",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\point.py",
    "ast_data": "FunctionDef name:__len__ arg:self arguments arg If Return return:yes If Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_axes_pos",
    "source_code": "def _axes_pos(self, ax):\n    return (ax.get_position(True).frozen(), ax.get_position().frozen())",
    "docstring": "Return the original and modified positions for the specified Axes. Parameters ---------- ax : matplotlib.axes.Axes The to get the positions for. Returns ------- original_position, modified_position A tuple of the original and modified positions.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py",
    "ast_data": "FunctionDef name:_axes_pos arg:self arg:ax arguments arg arg Return return:yes Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_dict",
    "source_code": "def _get_dict(self, ignored_keys: Optional[list[str]]=None, ignored_prefixes: Optional[list[str]]=None, skip_default: bool=False) -> dict[str, Any]:\n    config: dict[str, Any] = {}\n    for key in self._config:\n        if ignored_keys and key in ignored_keys:\n            continue\n        if ignored_prefixes:\n            if any((key.startswith(prefix) for prefix in ignored_prefixes)):\n                continue\n        if skip_default and self._is_default(key):\n            continue\n        if self._config[key].alias is not None:\n            continue\n        config[key] = copy.deepcopy(getattr(self, key))\n    return config",
    "docstring": "Export a dictionary of current configuration keys and values. This function is design to provide a single point which handles accessing config options and exporting them into a dictionary. This is used by a number of different user facing export methods which all have slightly different semantics re: how and what to skip. If a config is aliased, it skips this config. Arguments: ignored_keys are keys that should not be exported. ignored_prefixes are prefixes that if a key matches should not be exported skip_default does two things. One if a key has not been modified it skips it.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\_config_module.py",
    "ast_data": "FunctionDef name:_get_dict arg:self arg:ignored_keys arg:ignored_prefixes arg:skip_default arguments arg arg arg arg For If BoolOp Compare If If Call Call If BoolOp Call If Compare Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "QConfig",
    "source_code": "class QConfig(namedtuple('QConfig', ['activation', 'weight'])):\n    __slots__ = ()\n\n    def __new__(cls, activation, weight):\n        if isinstance(activation, nn.Module) or isinstance(weight, nn.Module):\n            raise ValueError('QConfig received observer instance, please pass observer class instead. ' + 'Use MyObserver.with_args(x=1) to override arguments to constructor if needed')\n        return super().__new__(cls, activation, weight)",
    "docstring": "Describes how to quantize a layer or a part of the network by providing settings (observer classes) for activations and weights respectively. Note that QConfig needs to contain observer **classes** (like MinMaxObserver) or a callable that returns instances on invocation, not the concrete observer instances themselves. Quantization preparation function will instantiate observers multiple times for each of the layers. Observer classes have usually reasonable default arguments, but they can be overwritten with method (that behaves like functools.partial):: my_qconfig = QConfig( activation=MinMaxObserver.with_args(dtype=torch.qint8), weight=default_observer.with_args(dtype=torch.qint8))",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\quantization\\qconfig.py",
    "ast_data": "ClassDef name:QConfig Call Assign FunctionDef name:__new__ arg:cls arg:activation arg:weight arguments arg arg arg If BoolOp Call Call Raise Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "unstack",
    "source_code": "def unstack(self, level: IndexLabel=-1, fill_value: Hashable | None=None, sort: bool=True) -> DataFrame:\n    from pandas.core.reshape.reshape import unstack\n    return unstack(self, level, fill_value, sort)",
    "docstring": "Unstack, also known as pivot, Series with MultiIndex to produce DataFrame. Parameters ---------- level : int, str, or list of these, default last level Level(s) to unstack, can pass level name. fill_value : scalar value, default None Value to use when replacing NaN values. sort : bool, default True Sort the level(s) in the resulting MultiIndex columns. Returns ------- DataFrame Unstacked Series. See Also -------- DataFrame.unstack : Pivot the MultiIndex of a DataFrame. Notes ----- Reference :ref: for more examples. Examples -------- >>> s = pd.Series( ... [1, 2, 3, 4], ... index=pd.MultiIndex.from_product([[\"one\", \"two\"], [\"a\", \"b\"]]), ... ) >>> s one a 1 b 2 two a 3 b 4 dtype: int64 >>> s.unstack(level=-1) a b one 1 2 two 3 4 >>> s.unstack(level=0) one two a 1 3 b 2 4",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:unstack arg:self arg:level arg:fill_value arg:sort arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_wrap_aggregated_output",
    "source_code": "@final\ndef _wrap_aggregated_output(self, result: Series | DataFrame, qs: npt.NDArray[np.float64] | None=None):\n    if not self.as_index:\n        result = self._insert_inaxis_grouper(result, qs=qs)\n        result = result._consolidate()\n        result.index = default_index(len(result))\n    else:\n        index = self._grouper.result_index\n        if qs is not None:\n            index = _insert_quantile_level(index, qs)\n        result.index = index\n    return result",
    "docstring": "Wraps the output of GroupBy aggregations into the expected result. Parameters ---------- result : Series, DataFrame Returns ------- Series or DataFrame",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\groupby\\groupby.py",
    "ast_data": "FunctionDef name:_wrap_aggregated_output arg:self arg:result arg:qs arguments arg arg arg If Assign Call Assign Call Assign Call Call Assign If Compare Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "hook_with_zero_interleaved_fn",
    "source_code": "def hook_with_zero_interleaved_fn(state, bucket: dist.GradBucket) -> torch.futures.Future[torch.Tensor]:\n    fut = hook(state, bucket)\n    _hook_with_zero_step_setup(ddp_ref, zero, bucket)\n    if zero._overlap_info.status != _OverlapStatus.INITIALIZED:\n        return fut\n\n    def zero_step(fut: torch.futures.Future) -> torch.Tensor:\n        overlap_info = zero._overlap_info\n        bucket_index = bucket.index()\n        rank = zero.global_rank\n        assigned_ranks = overlap_info.assigned_ranks_per_bucket[bucket_index]\n        overlap_info.bucket_indices_seen.append(bucket_index)\n        if rank in assigned_ranks:\n            _perform_local_step(bucket, zero, rank)\n        _broadcast_bucket(bucket_index, zero)\n        num_buckets = len(overlap_info.params_per_bucket)\n        if len(overlap_info.bucket_indices_seen) == num_buckets:\n            overlap_info.wait_for_broadcasts()\n            overlap_info.clear_per_iter_info()\n        return bucket.buffer()\n    return fut.then(zero_step)",
    "docstring": "Return :class: that gives gradient bucket tensor and performs partial :class: :meth:. This function uses the gradients in gradient in given bucket to perform a partial :class: :meth: Arguments: state: any state for the hook. bucket (dist.GradBucket): the :class: gradient bucket.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\ddp_comm_hooks\\ddp_zero_hook.py",
    "ast_data": "FunctionDef name:hook_with_zero_interleaved_fn arg:state arg:bucket arguments arg arg Assign Call Call If Compare Return return:yes FunctionDef name:zero_step arg:fut arguments arg Assign Assign Call Assign Assign Call If Compare Call Call Assign Call If Compare Call Call Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "ptr",
    "source_code": "@property\n@abstractmethod\ndef ptr(self) -> int:\n    pass",
    "docstring": "Pointer to start of the buffer as an integer.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\interchange\\dataframe_protocol.py",
    "ast_data": "FunctionDef name:ptr arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "_clusters_l2_normalized",
    "source_code": "def _clusters_l2_normalized(self):\n    return self._distance_metric == COSINE_DISTANCE and (not self._use_mini_batch or self._mini_batch_steps_per_iteration > 1)",
    "docstring": "Returns True if clusters centers are kept normalized.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\clustering_ops.py",
    "ast_data": "FunctionDef name:_clusters_l2_normalized arg:self arguments arg Return return:yes BoolOp Compare BoolOp Compare"
  },
  {
    "library": "tensorflow",
    "name": "_binary_op",
    "source_code": "def _binary_op(fn):\n\n    def binary_op_wrapper(x, y, name=None):\n        return fn(x, y, name=name)\n    return binary_op_wrapper",
    "docstring": "Wrapper that restricts to have the correct signature.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\tf2xla\\python\\xla.py",
    "ast_data": "FunctionDef name:_binary_op arg:fn arguments arg FunctionDef name:binary_op_wrapper arg:x arg:y arg:name arguments arg arg arg Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "transform_from",
    "source_code": "@abc.abstractmethod\ndef transform_from(self, input: IO[bytes]) -> IO[bytes]:\n    pass",
    "docstring": "Takes a readable input stream, and generates a new stream which implements the input transform. When the returned stream is read, data will be read from the 'input' stream, transformed, and returned.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\_extension.py",
    "ast_data": "FunctionDef name:transform_from arg:self arg:input arguments arg arg"
  },
  {
    "library": "pytorch",
    "name": "__call__",
    "source_code": "def __call__(self, estimate_mode_type: str) -> Self:\n    if estimate_mode_type == 'operator-level-benchmark':\n        self._estimate_runtime = RuntimeEstimator._benchmark_estimate\n    elif estimate_mode_type == 'operator-level-cost-model':\n        self._estimate_runtime = RuntimeEstimator._roofline_estimate\n    else:\n        raise NotImplementedError(f'estimate_mode_type {estimate_mode_type} not supported')\n    return self",
    "docstring": "Sets the estimate mode type. Currently supported modes: - \"operator-level-benchmark\": Estimates runtime using operator benchmarking. - \"operator-level-cost-model\": Estimates runtime using roofline cost model. Args: estimate_mode_type (str): The type of estimate mode to use. Returns: SACEstimator: The SAC estimator instance. Raises: NotImplementedError: If the estimate mode type is not supported.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_tools\\sac_estimator.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:estimate_mode_type arguments arg arg If Compare Assign If Compare Assign Raise Call Return return:yes"
  },
  {
    "library": "django",
    "name": "as_sqlite",
    "source_code": "def as_sqlite(self, compiler, connection, **extra_context):\n    return super().as_sqlite(compiler, connection, function='MAX', **extra_context)",
    "docstring": "Use the MAX function on SQLite.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\functions\\comparison.py",
    "ast_data": "FunctionDef name:as_sqlite arg:self arg:compiler arg:connection arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_Conv2DGrad",
    "source_code": "@ops.RegisterGradient('Conv2D')\ndef _Conv2DGrad(op: ops.Operation, grad):\n    dilations = op.get_attr('dilations')\n    strides = op.get_attr('strides')\n    padding = op.get_attr('padding')\n    explicit_paddings = op.get_attr('explicit_paddings')\n    use_cudnn_on_gpu = op.get_attr('use_cudnn_on_gpu')\n    data_format = op.get_attr('data_format')\n    shape_0, shape_1 = array_ops.shape_n([op.inputs[0], op.inputs[1]])\n    return [gen_nn_ops.conv2d_backprop_input(shape_0, op.inputs[1], grad, dilations=dilations, strides=strides, padding=padding, explicit_paddings=explicit_paddings, use_cudnn_on_gpu=use_cudnn_on_gpu, data_format=data_format), gen_nn_ops.conv2d_backprop_filter(op.inputs[0], shape_1, grad, dilations=dilations, strides=strides, padding=padding, explicit_paddings=explicit_paddings, use_cudnn_on_gpu=use_cudnn_on_gpu, data_format=data_format)]",
    "docstring": "Gradient function for Conv2D.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_grad.py",
    "ast_data": "FunctionDef name:_Conv2DGrad arg:op arg:grad arguments arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "pandas",
    "name": "maybe_convert_ix",
    "source_code": "def maybe_convert_ix(*args):\n    for arg in args:\n        if not isinstance(arg, (np.ndarray, list, ABCSeries, Index)):\n            return args\n    return np.ix_(*args)",
    "docstring": "We likely want to take the cross-product.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\indexing.py",
    "ast_data": "FunctionDef name:maybe_convert_ix arguments arg For If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "FunctionModifiers",
    "source_code": "class FunctionModifiers:\n    UNUSED = 'unused (ignored and replaced with raising of an exception)'\n    IGNORE = \"ignore (leave as a call to Python, cannot be torch.jit.save'd)\"\n    EXPORT = 'export (compile this function even if nothing calls it)'\n    DEFAULT = 'default (compile if called from a exported function / forward)'\n    COPY_TO_SCRIPT_WRAPPER = 'if this method is not scripted, copy the python method onto the scripted model'\n    _DROP = '_drop (function is fully ignored, declaration can be unscriptable)'",
    "docstring": "Used to denote the behavior of a function in TorchScript. See export() and ignore() for details.",
    "type": "class",
    "file_path": "pytorch\\torch\\_jit_internal.py",
    "ast_data": "ClassDef name:FunctionModifiers Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "new_list",
    "source_code": "def new_list(iterable=None):\n    if iterable:\n        elements = tuple(iterable)\n    else:\n        elements = ()\n    if elements:\n        return _py_list_new(elements)\n    return tf_tensor_list_new(elements)",
    "docstring": "The list constructor. Args: iterable: Optional elements to fill the list with. Returns: A list-like object. The exact return value depends on the initial elements.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\data_structures.py",
    "ast_data": "FunctionDef name:new_list arg:iterable arguments arg If Assign Call Assign If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pygame",
    "name": "clear",
    "source_code": "def clear(self, surface, bgd):\n    self._bgd = bgd",
    "docstring": "use to set background Group.clear(surface, bgd): return None",
    "type": "method",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:clear arg:self arg:surface arg:bgd arguments arg arg arg Assign"
  },
  {
    "library": "seaborn",
    "name": "default_range",
    "source_code": "@property\ndef default_range(self) -> tuple[float, float]:\n    return self._default_range",
    "docstring": "Min and max values used by default for semantic mapping.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\properties.py",
    "ast_data": "FunctionDef name:default_range arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_logpdf",
    "source_code": "def _logpdf(self, x, dim, df, log_det_scale, C):\n    log_det_x = np.empty(x.shape[-1])\n    tr_scale_x_inv = np.empty(x.shape[-1])\n    trsm = get_blas_funcs('trsm', (x,))\n    if dim > 1:\n        for i in range(x.shape[-1]):\n            Cx, log_det_x[i] = self._cholesky_logdet(x[:, :, i])\n            A = trsm(1.0, Cx, C, side=0, lower=True)\n            tr_scale_x_inv[i] = np.linalg.norm(A) ** 2\n    else:\n        log_det_x[:] = np.log(x[0, 0])\n        tr_scale_x_inv[:] = C[0, 0] ** 2 / x[0, 0]\n    out = 0.5 * df * log_det_scale - 0.5 * tr_scale_x_inv - (0.5 * df * dim * _LOG_2 + 0.5 * (df + dim + 1) * log_det_x) - multigammaln(0.5 * df, dim)\n    return out",
    "docstring": "Log of the inverse Wishart probability density function. Parameters ---------- x : ndarray Points at which to evaluate the log of the probability density function. dim : int Dimension of the scale matrix df : int Degrees of freedom log_det_scale : float Logarithm of the determinant of the scale matrix C : ndarray Cholesky factorization of the scale matrix, lower triangular. Notes ----- As this function does no argument checking, it should not be called directly; use 'logpdf' instead.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:_logpdf arg:self arg:x arg:dim arg:df arg:log_det_scale arg:C arguments arg arg arg arg arg arg Assign Call Assign Call Assign Call If Compare For Call Assign Call Assign Call Assign Call Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_sparse_tensors",
    "source_code": "def get_sparse_tensors(self, transformation_cache, state_manager):\n    sparse_tensors = self.categorical_column.get_sparse_tensors(transformation_cache, state_manager)\n    return self._get_sparse_tensors_helper(sparse_tensors)",
    "docstring": "Returns an IdWeightPair. is a pair of s which represents ids and weights. is typically a x of . is either a of or to indicate all weights should be taken to be 1. If specified, must have exactly the same shape and indices as . Expected is same as parsing output of a which is a ragged matrix. Args: transformation_cache: A object to access features. state_manager: A to create / access resources such as lookup tables.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:get_sparse_tensors arg:self arg:transformation_cache arg:state_manager arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_init_intra_and_inter_node_groups",
    "source_code": "def _init_intra_and_inter_node_groups(global_process_group: dist.ProcessGroup, num_devices_per_node: int) -> tuple[dist.ProcessGroup, dist.ProcessGroup]:\n    return (_init_intra_node_process_group(num_devices_per_node), _init_inter_node_process_group(global_process_group, num_devices_per_node))",
    "docstring": "Initialize intra and inter-node process groups and return the ones corresponding to this process's rank. This function can be used to initialize process groups for `` in FSDP. This function assumes each node has an equal number of CUDA-enabled devices. Returns: Tuple[dist.ProcessGroup, dist.ProcessGroup]: Intra and inter-node process group.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_init_utils.py",
    "ast_data": "FunctionDef name:_init_intra_and_inter_node_groups arg:global_process_group arg:num_devices_per_node arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "_new_gnu_trans",
    "source_code": "def _new_gnu_trans(self, localedir, use_null_fallback=True):\n    return gettext_module.translation(domain=self.domain, localedir=localedir, languages=[self.__locale], fallback=use_null_fallback)",
    "docstring": "Return a mergeable gettext.GNUTranslations instance. A convenience wrapper. By default gettext uses 'fallback=False'. Using param to avoid confusion with any other references to 'fallback'.",
    "type": "method",
    "file_path": "django\\django\\utils\\translation\\trans_real.py",
    "ast_data": "FunctionDef name:_new_gnu_trans arg:self arg:localedir arg:use_null_fallback arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_compute_mi_cc",
    "source_code": "def _compute_mi_cc(x, y, n_neighbors):\n    n_samples = x.size\n    x = x.reshape((-1, 1))\n    y = y.reshape((-1, 1))\n    xy = np.hstack((x, y))\n    nn = NearestNeighbors(metric='chebyshev', n_neighbors=n_neighbors)\n    nn.fit(xy)\n    radius = nn.kneighbors()[0]\n    radius = np.nextafter(radius[:, -1], 0)\n    kd = KDTree(x, metric='chebyshev')\n    nx = kd.query_radius(x, radius, count_only=True, return_distance=False)\n    nx = np.array(nx) - 1.0\n    kd = KDTree(y, metric='chebyshev')\n    ny = kd.query_radius(y, radius, count_only=True, return_distance=False)\n    ny = np.array(ny) - 1.0\n    mi = digamma(n_samples) + digamma(n_neighbors) - np.mean(digamma(nx + 1)) - np.mean(digamma(ny + 1))\n    return max(0, mi)",
    "docstring": "Compute mutual information between two continuous variables. Parameters ---------- x, y : ndarray, shape (n_samples,) Samples of two continuous random variables, must have an identical shape. n_neighbors : int Number of nearest neighbors to search for each point, see [1]_. Returns ------- mi : float Estimated mutual information in nat units. If it turned out to be negative it is replaced by 0. Notes ----- True mutual information can't be negative. If its estimate by a numerical method is negative, it means (providing the method is adequate) that the mutual information is close to 0 and replacing it by 0 is a reasonable strategy. References ---------- .. [1] A. Kraskov, H. Stogbauer and P. Grassberger, \"Estimating mutual information\". Phys. Rev. E 69, 2004.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\feature_selection\\_mutual_info.py",
    "ast_data": "FunctionDef name:_compute_mi_cc arg:x arg:y arg:n_neighbors arguments arg arg arg Assign Assign Call Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "is_multi_agg_with_relabel",
    "source_code": "def is_multi_agg_with_relabel(**kwargs) -> bool:\n    return all((isinstance(v, tuple) and len(v) == 2 for v in kwargs.values())) and len(kwargs) > 0",
    "docstring": "Check whether kwargs passed to .agg look like multi-agg with relabeling. Parameters ---------- **kwargs : dict Returns ------- bool Examples -------- >>> is_multi_agg_with_relabel(a=\"max\") False >>> is_multi_agg_with_relabel(a_max=(\"a\", \"max\"), a_min=(\"a\", \"min\")) True >>> is_multi_agg_with_relabel() False",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\apply.py",
    "ast_data": "FunctionDef name:is_multi_agg_with_relabel arguments arg Return return:yes BoolOp Call BoolOp Call Compare Call Call Compare Call"
  },
  {
    "library": "django",
    "name": "do_get_current_language_bidi",
    "source_code": "@register.tag('get_current_language_bidi')\ndef do_get_current_language_bidi(parser, token):\n    args = token.contents.split()\n    if len(args) != 3 or args[1] != 'as':\n        raise TemplateSyntaxError(\"'get_current_language_bidi' requires 'as variable' (got %r)\" % args)\n    return GetCurrentLanguageBidiNode(args[2])",
    "docstring": "Store the current language layout in the context. Usage:: {% get_current_language_bidi as bidi %} This fetches the currently active language's layout and puts its value into the `` context variable. True indicates right-to-left layout, otherwise left-to-right.",
    "type": "function",
    "file_path": "django\\django\\templatetags\\i18n.py",
    "ast_data": "FunctionDef name:do_get_current_language_bidi arg:parser arg:token arguments arg arg Assign Call If BoolOp Compare Call Compare Raise Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "Assert",
    "source_code": "@tf_export('debugging.Assert', 'Assert')\n@dispatch.add_dispatch_support\n@tf_should_use.should_use_result\ndef Assert(condition, data, summarize=None, name=None):\n    if context.executing_eagerly():\n        if not condition:\n            xs = ops.convert_n_to_tensor(data)\n            data_str = [_summarize_eager(x, summarize) for x in xs]\n            raise errors.InvalidArgumentError(node_def=None, op=None, message=\"Expected '%s' to be true. Summarized data: %s\" % (condition, '\\n'.join(data_str)))\n        return\n    with ops.name_scope(name, 'Assert', [condition, data]) as name:\n        xs = ops.convert_n_to_tensor(data)\n        if all((x.dtype in {dtypes.string, dtypes.int32} for x in xs)):\n            return gen_logging_ops._assert(condition, data, summarize, name='Assert')\n        else:\n            condition = ops.convert_to_tensor(condition, name='Condition')\n\n            def true_assert():\n                return gen_logging_ops._assert(condition, data, summarize, name='Assert')\n            guarded_assert = cond.cond(condition, gen_control_flow_ops.no_op, true_assert, name='AssertGuard')\n            if context.executing_eagerly():\n                return\n            return guarded_assert.op",
    "docstring": "Asserts that the given condition is true. If evaluates to false, print the list of tensors in . determines how many entries of the tensors to print. Args: condition: The condition to evaluate. data: The tensors to print out when condition is false. summarize: Print this many entries of each tensor. name: A name for this operation (optional). Returns: assert_op: An that, when executed, raises a if is not true. @compatibility(eager) returns None @end_compatibility Raises: @compatibility(TF1) When in TF V1 mode (that is, outside ) Assert needs a control dependency on the output to ensure the assertion executes: @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_assert.py",
    "ast_data": "FunctionDef name:Assert arg:condition arg:data arg:summarize arg:name arguments arg arg arg arg If Call If Assign Call Assign Call Raise Call Call Return return:no With Call Assign Call If Call Compare Return return:yes Call Assign Call FunctionDef name:true_assert arguments Return return:yes Call Assign Call If Call Return return:no Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_process_tensor_fetches",
    "source_code": "def _process_tensor_fetches(self, tensor_fetches):\n    if tensor_fetches is None:\n        raise RuntimeError('tensor_fetches provided to tensor_tracer cannot be None.')\n    if not isinstance(tensor_fetches, (list, tuple)):\n        tensor_fetches = [tensor_fetches]\n    elif not tensor_fetches:\n        raise RuntimeError('tensor_fetches provided to tensor_tracer cannot be empty list.')\n    fetches = []\n    for fetch in tensor_fetches:\n        if isinstance(fetch, tensor_lib.Tensor):\n            fetches.append(fetch)\n        else:\n            raise RuntimeError('Given tensor_fetch:%s is not a tensor.' % fetch)\n    return fetches",
    "docstring": "Check that tensor_fetches is not empty and have valid tensors.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:_process_tensor_fetches arg:self arg:tensor_fetches arguments arg arg If Compare Raise Call If Call Assign If Raise Call Assign For If Call Call Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_random_sample",
    "source_code": "def _random_sample(self, key, values, weights):\n    assert len(values) == len(weights)\n    _distribution_func_vals = self._distribution_func(key, weights)\n    x = random.random()\n    idx = bisect.bisect(_distribution_func_vals, x)\n    assert idx <= len(values), 'Wrong index value is returned'\n    if idx == len(values):\n        idx -= 1\n    return values[idx]",
    "docstring": "given values and weights, this function randomly sample values based their weights",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\operator_benchmark\\benchmark_utils.py",
    "ast_data": "FunctionDef name:_random_sample arg:self arg:key arg:values arg:weights arguments arg arg arg arg Compare Call Call Assign Call Assign Call Assign Call Compare Call If Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "add_command",
    "source_code": "def add_command(self, command):\n    if self._commands and command == self._commands[-1]:\n        return\n    if not isinstance(command, str):\n        raise TypeError('Attempt to enter non-str entry to command history')\n    self._commands.append(command)\n    if len(self._commands) > self._limit:\n        self._commands = self._commands[-self._limit:]\n    self._add_command_to_history_file(command)",
    "docstring": "Add a command to the command history. Args: command: The history command, as a str. Raises: TypeError: if command is not a str.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py",
    "ast_data": "FunctionDef name:add_command arg:self arg:command arguments arg arg If BoolOp Compare Return return:no If Call Raise Call Call If Compare Call Assign Call"
  },
  {
    "library": "authlib",
    "name": "validate_scope",
    "source_code": "def validate_scope(self):\n    self._validate_claim_value('scope')",
    "docstring": "String containing a space-separated list of scope values (as described in Section 3.3 of OAuth 2.0 [RFC6749]) that the client can use when requesting access tokens. The semantics of values in this list are service specific. If omitted, an authorization server MAY register a client with a default set of scopes.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc7591\\claims.py",
    "ast_data": "FunctionDef name:validate_scope arg:self arguments arg Call"
  },
  {
    "library": "numpy",
    "name": "todict",
    "source_code": "def todict(self):\n    self._optimize_data_files()\n    d = {}\n    known_keys = self.list_keys + self.dict_keys + self.extra_keys\n    for n in known_keys:\n        a = getattr(self, n)\n        if a:\n            d[n] = a\n    return d",
    "docstring": "Return a dictionary compatible with the keyword arguments of distutils setup function. Examples -------- >>> setup(**config.todict()) #doctest: +SKIP",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\misc_util.py",
    "ast_data": "FunctionDef name:todict arg:self arguments arg Call Assign Assign For Assign Call If Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_create_empty_block_mask",
    "source_code": "def _create_empty_block_mask(query: Tensor, key: Tensor) -> BlockMask:\n    device = query.device\n    return BlockMask.from_kv_blocks(kv_num_blocks=torch.ones([1, 1, 1], dtype=torch.int32, device=device), kv_indices=torch.zeros([1, 1, 1, 1], dtype=torch.int32, device=device), BLOCK_SIZE=_LARGE_SPARSE_BLOCK_SIZE, seq_lengths=(1, 1))",
    "docstring": "Default block mask for flex attention. If users don't specify any block sparse mask info, we create this empty block sparse mask. Which creates a BlockMask with 1 block that is the full length of the query and key tensors.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\attention\\flex_attention.py",
    "ast_data": "FunctionDef name:_create_empty_block_mask arg:query arg:key arguments arg arg Assign Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "prism",
    "source_code": "def prism() -> None:\n    set_cmap('prism')",
    "docstring": "Set the colormap to 'prism'. This changes the default colormap as well as the colormap of the current image if there is one. See `` for more information.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:prism arguments Call"
  },
  {
    "library": "tensorflow",
    "name": "embedding_lookup",
    "source_code": "@dispatch.dispatch_for_api(embedding_ops.embedding_lookup)\ndef embedding_lookup(params, ids: ragged_tensor.Ragged, partition_strategy='mod', name=None, validate_indices=True, max_norm=None):\n    if params is None:\n        raise ValueError('params must be specified.')\n    if isinstance(params, (list, tuple)) and (not params):\n        raise ValueError('params should not be empty.')\n    if ids.dtype != dtypes.int32 and ids.dtype != dtypes.int64:\n        raise ValueError(f'The values contained by the inputs have type {str(ids.dtype)} and cannot be processed. All values should be indices, either of type `int32` or `int64`.')\n    with ops.name_scope(name, 'embedding_lookup_ragged') as name:\n        looked_up_ragged = ragged_functional_ops.map_flat_values(embedding_ops.embedding_lookup, params=params, ids=ids, partition_strategy=partition_strategy, max_norm=max_norm)\n        return looked_up_ragged",
    "docstring": "Look up the ragged ids in a list of embedding tensors. Args: params: A tensor representing the complete embedding tensor having the shape [e1, ...eM] ragged_ids: A 'RaggedTensor' with type 'int32' or 'int64' containing the ids to be looked up in 'params' of shape [r0, ..rN]. Values must be in the range '[0, params.shape[0]]'. partition_strategy: A string specifying the partitioning strategy. max_norm: If not , each embedding is clipped if its l2-norm is larger than this value. name: A name for the operation (optional) Returns: A ragged tensor of shape [r0, r1, ...rN, e1, ...eM]. Raises: ValueError: When params is empty or the type of the ids is not int32 or int64.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_embedding_ops.py",
    "ast_data": "FunctionDef name:embedding_lookup arg:params arg:ids arg:partition_strategy arg:name arg:validate_indices arg:max_norm arguments arg arg arg arg arg arg If Compare Raise Call If BoolOp Call Raise Call If BoolOp Compare Compare Raise Call Call With Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_update_cross_replica",
    "source_code": "def _update_cross_replica(self, update_fn, value, **kwargs):\n    values_util.mark_as_unsaveable()\n    return self.distribute_strategy.extended.update(self, update_fn, args=(value,), kwargs=kwargs, group=True)",
    "docstring": "Applies updates across replicas. Args: update_fn: A callable to pass to to update the variable. It should has the same signature as . value: value to be passed to . **kwargs: remaining arguments to . Returns: Updated variable or .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py",
    "ast_data": "FunctionDef name:_update_cross_replica arg:self arg:update_fn arg:value arguments arg arg arg arg Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, allow_soft_placement=True, disable_detailed_stats=True, disable_timeline=True, devices=None):\n    self._tf_cluster = None\n    self._generate_timeline = not disable_timeline\n    if devices is None:\n        self._tf_cluster = tf_cluster.TF_NewCluster(allow_soft_placement, disable_detailed_stats)\n    else:\n        devices_serialized = [device.SerializeToString() for device in devices]\n        self._tf_cluster = tf_cluster.TF_NewVirtualCluster(devices_serialized)",
    "docstring": "Creates a Cluster. Args: allow_soft_placement: If True, TF will automatically fix illegal placements instead of erroring out if the placement isn't legal. disable_detailed_stats: If True, detailed statistics will not be available. disable_timeline: If True, the timeline information will not be reported. devices: A list of devices of type device_properties_pb2.NamedDevice. If None, a device list will be created based on the spec of the local machine.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\grappler\\cluster.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:allow_soft_placement arg:disable_detailed_stats arg:disable_timeline arg:devices arguments arg arg arg arg arg Assign Assign If Compare Assign Call Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "WeakTrackableReference",
    "source_code": "class WeakTrackableReference(TrackableReference):\n    __slots__ = ()\n\n    def __init__(self, name, ref):\n        if not isinstance(ref, weakref.ref):\n            ref = weakref.ref(ref)\n        super(WeakTrackableReference, self).__init__(name=name, ref=ref)\n\n    @property\n    def ref(self):\n        return self._ref()",
    "docstring": "TrackableReference that stores weak references.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\base.py",
    "ast_data": "ClassDef name:WeakTrackableReference Assign FunctionDef name:__init__ arg:self arg:name arg:ref arguments arg arg arg If Call Assign Call Call Call FunctionDef name:ref arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "terminate",
    "source_code": "def terminate(self, task_type, task_id):\n    with self._process_lock:\n        p = self._processes.get((task_type, task_id), None)\n        if p is None:\n            raise ValueError('{}-{} does not exist'.format(task_type, task_id))\n        self._terminated.add((task_type, task_id))\n        self._parent_to_sub_queue.put('terminate {} {}'.format(task_type, task_id))\n        p.join()",
    "docstring": "Terminates the process with and . If auto_retart=True, the terminated task will be restarted unless the chief has already exited with zero exit code. Args: task_type: the task type. task_id: the task id.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_process_runner.py",
    "ast_data": "FunctionDef name:terminate arg:self arg:task_type arg:task_id arguments arg arg arg With Assign Call If Compare Raise Call Call Call Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "inverse",
    "source_code": "@abstractmethod\ndef inverse(self, raw_prediction, out=None):\n    pass",
    "docstring": "Compute the inverse link function h(raw_prediction). The inverse link function maps raw predictions to predicted target values, i.e. . Parameters ---------- raw_prediction : array Raw prediction values (in link space). out : array A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or None, a freshly-allocated array is returned. Returns ------- out : array Output array, element-wise inverse link function.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\_loss\\link.py",
    "ast_data": "FunctionDef name:inverse arg:self arg:raw_prediction arg:out arguments arg arg arg"
  },
  {
    "library": "tensorflow",
    "name": "arange",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef arange(start, stop=None, step=1, dtype='int32'):\n    if stop is None and start < 0:\n        start = 0\n    result = math_ops.range(start, limit=stop, delta=step, name='arange')\n    if dtype != 'int32':\n        result = cast(result, dtype)\n    return result",
    "docstring": "Creates a 1D tensor containing a sequence of integers. The function arguments use the same convention as Theano's arange: if only one argument is provided, it is in fact the \"stop\" argument and \"start\" is 0. The default type of the returned tensor is to match TensorFlow's default. Args: start: Start value. stop: Stop value. step: Difference between two successive values. dtype: Integer dtype to use. Returns: An integer tensor. Example: >>> tf.keras.backend.arange(start=0, stop=10, step=1.5)",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:arange arg:start arg:stop arg:step arg:dtype arguments arg arg arg arg If BoolOp Compare Compare Assign Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "calc_control_outputs",
    "source_code": "def calc_control_outputs(self, graph):\n    control_outputs = {}\n    for op in graph.get_operations():\n        for control_input in op.control_inputs:\n            if control_input not in control_outputs:\n                control_outputs[control_input] = set()\n            control_outputs[control_input].add(op)\n    return control_outputs",
    "docstring": "Returns the map of control_outputs for a given graph. Args: graph: The graph to parse. Returns: A map of the control outputs.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\subscribe.py",
    "ast_data": "FunctionDef name:calc_control_outputs arg:self arg:graph arguments arg arg Assign For Call For If Compare Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "normalize_batch_in_training",
    "source_code": "@doc_controls.do_not_generate_docs\ndef normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=0.001):\n    if ndim(x) == 4 and list(reduction_axes) in [[0, 1, 2], [0, 2, 3]]:\n        if not _has_nchw_support() and list(reduction_axes) == [0, 2, 3]:\n            return _broadcast_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=epsilon)\n        return _fused_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=epsilon)\n    elif sorted(reduction_axes) == list(range(ndim(x)))[:-1]:\n        return _regular_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=epsilon)\n    else:\n        return _broadcast_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=epsilon)",
    "docstring": "Computes mean and std for batch then apply batch_normalization on batch. Args: x: Input tensor or variable. gamma: Tensor by which to scale the input. beta: Tensor with which to center the input. reduction_axes: iterable of integers, axes over which to normalize. epsilon: Fuzz factor. Returns: A tuple length of 3, .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:normalize_batch_in_training arg:x arg:gamma arg:beta arg:reduction_axes arg:epsilon arguments arg arg arg arg arg If BoolOp Compare Call Compare Call If BoolOp Call Compare Call Return return:yes Call Return return:yes Call If Compare Call Call Call Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "ExpandingIndexer",
    "source_code": "class ExpandingIndexer(BaseIndexer):\n\n    @Appender(get_window_bounds_doc)\n    def get_window_bounds(self, num_values: int=0, min_periods: int | None=None, center: bool | None=None, closed: str | None=None, step: int | None=None) -> tuple[np.ndarray, np.ndarray]:\n        return (np.zeros(num_values, dtype=np.int64), np.arange(1, num_values + 1, dtype=np.int64))",
    "docstring": "Calculate expanding window bounds, mimicking df.expanding()",
    "type": "class",
    "file_path": "pandas\\pandas\\core\\indexers\\objects.py",
    "ast_data": "ClassDef name:ExpandingIndexer FunctionDef name:get_window_bounds arg:self arg:num_values arg:min_periods arg:center arg:closed arg:step arguments arg arg arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "_import_classes",
    "source_code": "def _import_classes(self, class_names: list[str], currmodule: str) -> Sequence[type[Any]]:\n    classes: list[type[Any]] = []\n    for name in class_names:\n        classes.extend(import_classes(name, currmodule))\n    return classes",
    "docstring": "Import a list of classes.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\ext\\inheritance_diagram.py",
    "ast_data": "FunctionDef name:_import_classes arg:self arg:class_names arg:currmodule arguments arg arg arg For Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "_write_float32_image",
    "source_code": "def _write_float32_image(path_file: Path, img_np: Any) -> None:\n    mode = 'mono' if img_np.ndim == 2 or (img_np.ndim == 3 and img_np.shape[-1] == 1) else 'rgb'\n    if path_file.suffix.lower() == '.tiff':\n        kornia_rs.write_image_tiff_f32(str(path_file), img_np, mode=mode)\n    else:\n        raise NotImplementedError(f'Unsupported file extension: {path_file.suffix} for float32 image')",
    "docstring": "Write float32 image to file.",
    "type": "function",
    "file_path": "kornia\\kornia\\io\\io.py",
    "ast_data": "FunctionDef name:_write_float32_image arg:path_file arg:img_np arguments arg arg Assign BoolOp Compare BoolOp Compare Compare If Compare Call Call Call Raise Call"
  },
  {
    "library": "scipy",
    "name": "_with_data",
    "source_code": "def _with_data(self, data, copy=True):\n    if copy:\n        coords = tuple((idx.copy() for idx in self.coords))\n    else:\n        coords = self.coords\n    return self.__class__((data, coords), shape=self.shape, dtype=data.dtype)",
    "docstring": "Returns a matrix with the same sparsity structure as self, but with different data. By default the index arrays are copied.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_coo.py",
    "ast_data": "FunctionDef name:_with_data arg:self arg:data arg:copy arguments arg arg arg If Assign Call Call Assign Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_brute_mst",
    "source_code": "def _brute_mst(mutual_reachability, min_samples):\n    if not issparse(mutual_reachability):\n        return mst_from_mutual_reachability(mutual_reachability)\n    indptr = mutual_reachability.indptr\n    num_points = mutual_reachability.shape[0]\n    if any((indptr[i + 1] - indptr[i] < min_samples for i in range(num_points))):\n        raise ValueError(f'There exists points with fewer than {min_samples} neighbors. Ensure your distance matrix has non-zero values for at least `min_sample`={min_samples} neighbors for each points (i.e. K-nn graph), or specify a `max_distance` in `metric_params` to use when distances are missing.')\n    n_components = csgraph.connected_components(mutual_reachability, directed=False, return_labels=False)\n    if n_components > 1:\n        raise ValueError(f'Sparse mutual reachability matrix has {n_components} connected components. HDBSCAN cannot be performed on a disconnected graph. Ensure that the sparse distance matrix has only one connected component.')\n    sparse_min_spanning_tree = csgraph.minimum_spanning_tree(mutual_reachability)\n    rows, cols = sparse_min_spanning_tree.nonzero()\n    mst = np.rec.fromarrays([rows, cols, sparse_min_spanning_tree.data], dtype=MST_edge_dtype)\n    return mst",
    "docstring": "Builds a minimum spanning tree (MST) from the provided mutual-reachability values. This function dispatches to a custom Cython implementation for dense arrays, and for sparse arrays/matrices. Parameters ---------- mututal_reachability_graph: {ndarray, sparse matrix} of shape (n_samples, n_samples) Weighted adjacency matrix of the mutual reachability graph. min_samples : int, default=None The number of samples in a neighborhood for a point to be considered as a core point. This includes the point itself. Returns ------- mst : ndarray of shape (n_samples - 1,), dtype=MST_edge_dtype The MST representation of the mutual-reachability graph. The MST is represented as a collection of edges.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\cluster\\_hdbscan\\hdbscan.py",
    "ast_data": "FunctionDef name:_brute_mst arg:mutual_reachability arg:min_samples arguments arg arg If Call Return return:yes Call Assign Assign If Call Compare Call Raise Call Assign Call If Compare Raise Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_set_graph_outputs",
    "source_code": "@contextlib.contextmanager\ndef _set_graph_outputs(graph: ir.Graph, outputs: list[ir.Value]):\n    original_outputs = list(graph.outputs)\n    graph.outputs.clear()\n    graph.outputs.extend(outputs)\n    try:\n        yield\n    finally:\n        graph.outputs.clear()\n        graph.outputs.extend(original_outputs)",
    "docstring": "Temporarily set the outputs of the graph. Args: graph: The graph to set the outputs for. outputs: The outputs to set.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_onnx_program.py",
    "ast_data": "FunctionDef name:_set_graph_outputs arg:graph arg:outputs arguments arg arg Assign Call Call Call Try Call Call"
  },
  {
    "library": "scipy",
    "name": "_Brute_Wrapper",
    "source_code": "class _Brute_Wrapper:\n\n    def __init__(self, f, args):\n        self.f = f\n        self.args = [] if args is None else args\n\n    def __call__(self, x):\n        return self.f(np.asarray(x).flatten(), *self.args)",
    "docstring": "Object to wrap user cost function for optimize.brute, allowing picklability",
    "type": "class",
    "file_path": "scipy\\scipy\\optimize\\_optimize.py",
    "ast_data": "ClassDef name:_Brute_Wrapper FunctionDef name:__init__ arg:self arg:f arg:args arguments arg arg arg Assign Assign Compare FunctionDef name:__call__ arg:self arg:x arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_verify_conv_data_format",
    "source_code": "def _verify_conv_data_format(node):\n    if node.attr['data_format'].s != b'NHWC':\n        raise ValueError('Only NHWC format is supported in flops computations')",
    "docstring": "Verifies data format for pooling and convolutional operations.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py",
    "ast_data": "FunctionDef name:_verify_conv_data_format arg:node arguments arg If Compare Raise Call"
  },
  {
    "library": "scipy",
    "name": "mean",
    "source_code": "def mean(self, axis=None, dtype=None, out=None):\n    axis = validateaxis(axis, ndim=self.ndim)\n    integral = np.issubdtype(self.dtype, np.integer) or np.issubdtype(self.dtype, np.bool_)\n    inter_dtype = np.float64 if integral else self.dtype\n    inter_self = self.astype(inter_dtype)\n    if axis is None:\n        denom = math.prod(self.shape)\n    else:\n        denom = math.prod((self.shape[ax] for ax in axis))\n    res = (inter_self * (1.0 / denom)).sum(axis=axis, dtype=inter_dtype, out=out)\n    if dtype is not None and out is None:\n        return res.astype(dtype, copy=False)\n    return res",
    "docstring": "Compute the arithmetic mean along the specified axis. Returns the average of the array/matrix elements. The average is taken over all elements in the array/matrix by default, otherwise over the specified axis. intermediate and return values are used for integer inputs. Parameters ---------- axis : {-2, -1, 0, 1, None} optional Axis along which the mean is computed. The default is to compute the mean of all elements in the array/matrix (i.e., = ). dtype : data-type, optional Type to use in computing the mean. For integer inputs, the default is ; for floating point inputs, it is the same as the input dtype. .. versionadded:: 0.18.0 out : np.matrix, optional Alternative output matrix in which to place the result. It must have the same shape as the expected output, but the type of the output values will be cast if necessary. .. versionadded:: 0.18.0 Returns ------- m : np.matrix See Also -------- numpy.matrix.mean : NumPy's implementation of 'mean' for matrices",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_base.py",
    "ast_data": "FunctionDef name:mean arg:self arg:axis arg:dtype arg:out arguments arg arg arg arg Assign Call Assign BoolOp Call Call Assign Assign Call If Compare Assign Call Assign Call Assign Call If BoolOp Compare Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "levenshtein_distance",
    "source_code": "def levenshtein_distance(a: str, b: str) -> int:\n    if a == b:\n        return 0\n    if len(a) < len(b):\n        a, b = (b, a)\n    if not a:\n        return len(b)\n    previous_row = list(range(len(b) + 1))\n    for i, column1 in enumerate(a):\n        current_row = [i + 1]\n        for j, column2 in enumerate(b):\n            insertions = previous_row[j + 1] + 1\n            deletions = current_row[j] + 1\n            substitutions = previous_row[j] + (column1 != column2)\n            current_row.append(min(insertions, deletions, substitutions))\n        previous_row = current_row\n    return previous_row[-1]",
    "docstring": "Return the Levenshtein edit distance between two strings *a* and *b*.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\versioning.py",
    "ast_data": "FunctionDef name:levenshtein_distance arg:a arg:b arguments arg arg If Compare Return return:yes If Compare Call Call Assign If Return return:yes Call Assign Call Call Call For Call Assign For Call Assign Assign Assign Compare Call Call Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "Problem21",
    "source_code": "class Problem21(Benchmark):\n\n    def __init__(self, dimensions=1):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = [(0, 10)]\n        self.global_optimum = 4.79507\n        self.fglob = -9.50835\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        x = x[0]\n        return x * sin(x) + x * cos(2.0 * x)",
    "docstring": "Univariate Problem21 objective function. This class defines the Univariate Problem21 global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Problem21}}(x) = x \\sin(x) + x \\cos(2x) Bound constraints: :math: .. figure:: figures/Problem21.png :alt: Univariate Problem21 function :align: center **Univariate Problem21 function** *Global optimum*: :math: for :math:",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_univariate.py",
    "ast_data": "ClassDef name:Problem21 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "state_dict",
    "source_code": "def state_dict(self):\n    optim_state_dict = self.optim.state_dict()\n    optim_state_dict['step'] = self.averager.step\n    return optim_state_dict",
    "docstring": "This is the same as :class: :meth:, but adds an extra entry to record model averager's step to the checkpoint to ensure reload does not cause unnecessary warm up again.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\optim\\post_localSGD_optimizer.py",
    "ast_data": "FunctionDef name:state_dict arg:self arguments arg Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "name",
    "source_code": "@property\ndef name(self):\n    return self._name",
    "docstring": "Returns the name of this module as passed or determined in the ctor. NOTE: This is not the same as the which includes parent module names.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\module\\module.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "backward",
    "source_code": "def backward(self, gradient=None, retain_graph=None, create_graph=False, inputs=None):\n    if has_torch_function_unary(self):\n        return handle_torch_function(Tensor.backward, (self,), self, gradient=gradient, retain_graph=retain_graph, create_graph=create_graph, inputs=inputs)\n    torch.autograd.backward(self, gradient, retain_graph, create_graph, inputs=inputs)",
    "docstring": "Computes the gradient of current tensor wrt graph leaves. The graph is differentiated using the chain rule. If the tensor is non-scalar (i.e. its data has more than one element) and requires gradient, the function additionally requires specifying a `Default gradient layoutsStream semantics of backward passestensors`.",
    "type": "method",
    "file_path": "pytorch\\torch\\_tensor.py",
    "ast_data": "FunctionDef name:backward arg:self arg:gradient arg:retain_graph arg:create_graph arg:inputs arguments arg arg arg arg arg If Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_render_html",
    "source_code": "def _render_html(self, sparse_index: bool, sparse_columns: bool, max_rows: int | None=None, max_cols: int | None=None, **kwargs) -> str:\n    d = self._render(sparse_index, sparse_columns, max_rows, max_cols, '&nbsp;')\n    d.update(kwargs)\n    return self.template_html.render(**d, html_table_tpl=self.template_html_table, html_style_tpl=self.template_html_style)",
    "docstring": "Renders the `` including all applied styles to HTML. Generates a dict with necessary kwargs passed to jinja2 template.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\style_render.py",
    "ast_data": "FunctionDef name:_render_html arg:self arg:sparse_index arg:sparse_columns arg:max_rows arg:max_cols arguments arg arg arg arg arg arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=False)\ndef fit(self, X, y, **fit_params):\n    _raise_for_params(fit_params, self, 'fit')\n    routed_params = process_routing(self, 'fit', **fit_params)\n    self.label_binarizer_ = LabelBinarizer(sparse_output=True)\n    Y = self.label_binarizer_.fit_transform(y)\n    Y = Y.tocsc()\n    self.classes_ = self.label_binarizer_.classes_\n    columns = (col.toarray().ravel() for col in Y.T)\n    self.estimators_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)((delayed(_fit_binary)(self.estimator, X, column, fit_params=routed_params.estimator.fit, classes=['not %s' % self.label_binarizer_.classes_[i], self.label_binarizer_.classes_[i]]) for i, column in enumerate(columns)))\n    if hasattr(self.estimators_[0], 'n_features_in_'):\n        self.n_features_in_ = self.estimators_[0].n_features_in_\n    if hasattr(self.estimators_[0], 'feature_names_in_'):\n        self.feature_names_in_ = self.estimators_[0].feature_names_in_\n    return self",
    "docstring": "Fit underlying estimators. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Data. y : {array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_classes) Multi-class targets. An indicator matrix turns on multilabel classification. **fit_params : dict Parameters passed to the `enable_metadata_routing=TrueMetadata Routing User Guide ` for more details. Returns ------- self : object Instance of fitted estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\multiclass.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg arg Call Assign Call Assign Call Assign Call Assign Call Assign Assign Call Call Assign Call Call Call Call Call If Call Assign If Call Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "TimezoneNode",
    "source_code": "class TimezoneNode(Node):\n\n    def __init__(self, nodelist, tz):\n        self.nodelist = nodelist\n        self.tz = tz\n\n    def render(self, context):\n        with timezone.override(self.tz.resolve(context)):\n            output = self.nodelist.render(context)\n        return output",
    "docstring": "Template node class used by ``.",
    "type": "class",
    "file_path": "django\\django\\templatetags\\tz.py",
    "ast_data": "ClassDef name:TimezoneNode FunctionDef name:__init__ arg:self arg:nodelist arg:tz arguments arg arg arg Assign Assign FunctionDef name:render arg:self arg:context arguments arg arg With Call Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "dispatch_info",
    "source_code": "def dispatch_info(*types):\n    check(types)\n    lst = [tuple((a.__name__ for a in anc)) for anc in itertools.product(*ancestors(*types))]\n    return lst",
    "docstring": "An utility to introspect the dispatch algorithm",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\decorator.py",
    "ast_data": "FunctionDef name:dispatch_info arguments arg Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "score_weight",
    "source_code": "def score_weight(self, weight1, weight2):\n    if cbook._str_equal(weight1, weight2):\n        return 0.0\n    w1 = weight1 if isinstance(weight1, Number) else weight_dict[weight1]\n    w2 = weight2 if isinstance(weight2, Number) else weight_dict[weight2]\n    return 0.95 * (abs(w1 - w2) / 1000) + 0.05",
    "docstring": "Return a match score between *weight1* and *weight2*. The result is 0.0 if both weight1 and weight 2 are given as strings and have the same value. Otherwise, the result is the absolute value of the difference between the CSS numeric values of *weight1* and *weight2*, normalized between 0.05 and 1.0.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\font_manager.py",
    "ast_data": "FunctionDef name:score_weight arg:self arg:weight1 arg:weight2 arguments arg arg arg If Call Return return:yes Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "sync_and_async_middleware",
    "source_code": "def sync_and_async_middleware(func):\n    func.sync_capable = True\n    func.async_capable = True\n    return func",
    "docstring": "Mark a middleware factory as returning a hybrid middleware supporting both types of request.",
    "type": "function",
    "file_path": "django\\django\\utils\\decorators.py",
    "ast_data": "FunctionDef name:sync_and_async_middleware arg:func arguments arg Assign Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_reference_dates",
    "source_code": "def _reference_dates(self, start_date: Timestamp, end_date: Timestamp) -> DatetimeIndex:\n    if self.start_date is not None:\n        start_date = self.start_date.tz_localize(start_date.tz)\n    if self.end_date is not None:\n        end_date = self.end_date.tz_localize(start_date.tz)\n    year_offset = DateOffset(years=1)\n    reference_start_date = Timestamp(datetime(start_date.year - 1, self.month, self.day))\n    reference_end_date = Timestamp(datetime(end_date.year + 1, self.month, self.day))\n    dates = date_range(start=reference_start_date, end=reference_end_date, freq=year_offset, tz=start_date.tz)\n    return dates",
    "docstring": "Get reference dates for the holiday. Return reference dates for the holiday also returning the year prior to the start_date and year following the end_date. This ensures that any offsets to be applied will yield the holidays within the passed in dates.",
    "type": "method",
    "file_path": "pandas\\pandas\\tseries\\holiday.py",
    "ast_data": "FunctionDef name:_reference_dates arg:self arg:start_date arg:end_date arguments arg arg arg If Compare Assign Call If Compare Assign Call Assign Call Assign Call Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "requires_set_python_module",
    "source_code": "def requires_set_python_module() -> bool:\n    return getattr(_utils_internal, 'REQUIRES_SET_PYTHON_MODULE', True)",
    "docstring": "If an op was defined in C++ and extended from Python using the torch.library APIs, returns if we require that there have been a m.set_python_module(\"mylib.ops\") call from C++ that associates the C++ op with a python module.",
    "type": "function",
    "file_path": "pytorch\\torch\\_library\\utils.py",
    "ast_data": "FunctionDef name:requires_set_python_module arguments Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_ScaleAndTranslateGrad",
    "source_code": "@ops.RegisterGradient('ScaleAndTranslate')\ndef _ScaleAndTranslateGrad(op, grad):\n    grad0 = gen_image_ops.scale_and_translate_grad(grad, op.inputs[0], op.inputs[2], op.inputs[3], kernel_type=op.get_attr('kernel_type'), antialias=op.get_attr('antialias'))\n    return [grad0, None, None, None]",
    "docstring": "The derivatives for ScaleAndTranslate transformation op. Args: op: The ScaleAndTranslate op. grad: The tensor representing the gradient w.r.t. the output. Returns: The gradients w.r.t. the input.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_grad.py",
    "ast_data": "FunctionDef name:_ScaleAndTranslateGrad arg:op arg:grad arguments arg arg Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "is_nested",
    "source_code": "@tf_export('nest.is_nested')\ndef is_nested(seq):\n    return nest_util.is_nested(nest_util.Modality.CORE, seq)",
    "docstring": "Returns true if its input is a nested structure. Refer to [tf.nest]( for the definition of a nested structure. Args: seq: the value to test. Returns: True if the input is a nested structure.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\nest.py",
    "ast_data": "FunctionDef name:is_nested arg:seq arguments arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_char_wb_ngrams",
    "source_code": "def _char_wb_ngrams(self, text_document):\n    text_document = self._white_spaces.sub(' ', text_document)\n    min_n, max_n = self.ngram_range\n    ngrams = []\n    ngrams_append = ngrams.append\n    for w in text_document.split():\n        w = ' ' + w + ' '\n        w_len = len(w)\n        for n in range(min_n, max_n + 1):\n            offset = 0\n            ngrams_append(w[offset:offset + n])\n            while offset + n < w_len:\n                offset += 1\n                ngrams_append(w[offset:offset + n])\n            if offset == 0:\n                break\n    return ngrams",
    "docstring": "Whitespace sensitive char-n-gram tokenization. Tokenize text_document into a sequence of character n-grams operating only inside word boundaries. n-grams at the edges of words are padded with space.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_extraction\\text.py",
    "ast_data": "FunctionDef name:_char_wb_ngrams arg:self arg:text_document arguments arg arg Assign Call Assign Assign Assign For Call Assign Assign Call For Call Assign Call While Compare Call If Compare Return return:yes"
  },
  {
    "library": "scipy",
    "name": "rev_list",
    "source_code": "def rev_list(branch, num_commits):\n    res = subprocess.run(['git', 'rev-list', '--max-count', f'{num_commits}', '--first-parent', branch], stdout=subprocess.PIPE, encoding='utf-8')\n    res.check_returncode()\n    return res.stdout.rstrip('\\n').split('\\n')",
    "docstring": "List commits in reverse chronological order. Only the first are shown.",
    "type": "function",
    "file_path": "scipy\\tools\\lint.py",
    "ast_data": "FunctionDef name:rev_list arg:branch arg:num_commits arguments arg arg Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "flatten",
    "source_code": "def flatten(schema: Definition) -> FlatIntermediateDefinition:\n    result: FlatIntermediateDefinition = {}\n    _flatten(key_prefix=(), sub_schema=schema, result=result)\n    for k, v in result.items():\n        assert isinstance(k, tuple)\n        assert all((isinstance(ki, str) for ki in k))\n        assert isinstance(v, (TimerArgs, GroupedBenchmark))\n    return result",
    "docstring": "See types.py for an explanation of nested vs. flat definitions.",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\instruction_counts\\core\\utils.py",
    "ast_data": "FunctionDef name:flatten arg:schema arguments arg Call For Call Call Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_arg_wlen_as_expected",
    "source_code": "def _arg_wlen_as_expected(value):\n    if value is None:\n        value = -1\n    elif 1 < value:\n        if isinstance(value, float):\n            value = math.ceil(value)\n        value = np.intp(value)\n    else:\n        raise ValueError(f'`wlen` must be larger than 1, was {value}')\n    return value",
    "docstring": "Ensure argument is of type and larger than 1. Used in and . Returns ------- value : np.intp The original rounded up to an integer or -1 if was None.",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_peak_finding.py",
    "ast_data": "FunctionDef name:_arg_wlen_as_expected arg:value arguments arg If Compare Assign If Compare If Call Assign Call Assign Call Raise Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "fixed_point",
    "source_code": "def fixed_point(func, x0, args=(), xtol=1e-08, maxiter=500, method='del2'):\n    use_accel = {'del2': True, 'iteration': False}[method]\n    x0 = _asarray_validated(x0, as_inexact=True)\n    return _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel)",
    "docstring": "Find a fixed point of the function. Given a function of one or more variables and a starting point, find a fixed point of the function: i.e., where `func` convergence acceleration [1]_. The \"iteration\" method simply iterates the function until convergence is detected, without attempting to accelerate the convergence. References ---------- .. [1] Burden, Faires, \"Numerical Analysis\", 5th edition, pg. 80 Examples -------- >>> import numpy as np >>> from scipy import optimize >>> def func(x, c1, c2): ... return np.sqrt(c1/(x+c2)) >>> c1 = np.array([10,12.]) >>> c2 = np.array([3, 5.]) >>> optimize.fixed_point(func, [1.2, 1.3], args=(c1,c2)) array([ 1.4920333 , 1.37228132])",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_minpack_py.py",
    "ast_data": "FunctionDef name:fixed_point arg:func arg:x0 arg:args arg:xtol arg:maxiter arg:method arguments arg arg arg arg arg arg Assign Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "forward_bytes_to_stdout",
    "source_code": "def forward_bytes_to_stdout(val):\n    if hasattr(sys.stdout, 'buffer'):\n        sys.stdout.buffer.write(val)\n    elif hasattr(sys.stdout, 'encoding'):\n        sys.stdout.write(val.decode(sys.stdout.encoding))\n    else:\n        sys.stdout.write(val.decode('utf8', errors='replace'))",
    "docstring": "Forward bytes from a subprocess call to the console, without attempting to decode them. The assumption is that the subprocess call already returned bytes in a suitable encoding.",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\exec_command.py",
    "ast_data": "FunctionDef name:forward_bytes_to_stdout arg:val arguments arg If Call Call If Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "normal_",
    "source_code": "@_sharded_op_impl(torch.nn.init.normal_)\ndef normal_(types, args=(), kwargs=None, pg=None):\n    validate_param(kwargs, 'kwargs')\n    sharded_tensor = kwargs['tensor']\n    validate_param(sharded_tensor, 'tensor')\n    mean = kwargs['mean']\n    validate_param(mean, 'mean')\n    std = kwargs['std']\n    validate_param(std, 'std')\n    for shard in sharded_tensor.local_shards():\n        torch.nn.init.normal_(shard.tensor, mean=mean, std=std)\n    return sharded_tensor",
    "docstring": "Fills the Tensors in tensor.local_shards with values drawn from the normal distribution :math:. Args: tensor: tensor sharded across devices mean: the mean of the normal distribution std: the standard deviation of the normal distribution",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\_ops\\init.py",
    "ast_data": "FunctionDef name:normal_ arg:types arg:args arg:kwargs arg:pg arguments arg arg arg arg Call Assign Call Assign Call Assign Call For Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "pts_to_midstep",
    "source_code": "def pts_to_midstep(x, *args):\n    steps = np.zeros((1 + len(args), 2 * len(x)))\n    x = np.asanyarray(x)\n    steps[0, 1:-1:2] = steps[0, 2::2] = (x[:-1] + x[1:]) / 2\n    steps[0, :1] = x[:1]\n    steps[0, -1:] = x[-1:]\n    steps[1:, 0::2] = args\n    steps[1:, 1::2] = steps[1:, 0::2]\n    return steps",
    "docstring": "Convert continuous line to mid-steps. Given a set of ``. Examples -------- >>> x_s, y1_s, y2_s = pts_to_midstep(x, y1, y2)",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\cbook.py",
    "ast_data": "FunctionDef name:pts_to_midstep arg:x arguments arg arg Assign Call Call Call Assign Call Assign Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "lagpow",
    "source_code": "def lagpow(c, pow, maxpower=16):\n    return pu._pow(lagmul, c, pow, maxpower)",
    "docstring": "Raise a Laguerre series to a power. Returns the Laguerre series raised to the power . The argument is a sequence of coefficients ordered from low to high. i.e., [1,2,3] is the series `` Parameters ---------- c : array_like 1-D array of Laguerre series coefficients ordered from low to high. pow : integer Power to which the series will be raised maxpower : integer, optional Maximum power allowed. This is mainly to limit growth of the series to unmanageable size. Default is 16 Returns ------- coef : ndarray Laguerre series of power. See Also -------- lagadd, lagsub, lagmulx, lagmul, lagdiv Examples -------- >>> from numpy.polynomial.laguerre import lagpow >>> lagpow([1, 2, 3], 2) array([ 14., -16., 56., -72., 54.])",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\laguerre.py",
    "ast_data": "FunctionDef name:lagpow arg:c arg:pow arg:maxpower arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_transform",
    "source_code": "def get_transform(self):\n    return self._transform",
    "docstring": "Return the associated with this scale.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\scale.py",
    "ast_data": "FunctionDef name:get_transform arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "min",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef min(x, axis=None, keepdims=False):\n    return math_ops.reduce_min(x, axis, keepdims)",
    "docstring": "Minimum value in a tensor. Args: x: A tensor or variable. axis: An integer, the axis to find minimum values. keepdims: A boolean, whether to keep the dimensions or not. If is , the rank of the tensor is reduced by 1. If is , the reduced dimension is retained with length 1. Returns: A tensor with minimum values of .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:min arg:x arg:axis arg:keepdims arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "erf_zeros",
    "source_code": "def erf_zeros(nt):\n    if floor(nt) != nt or nt <= 0 or (not isscalar(nt)):\n        raise ValueError('Argument must be positive scalar integer.')\n    return _specfun.cerzo(nt)",
    "docstring": "Compute the first nt zero in the first quadrant, ordered by absolute value. Zeros in the other quadrants can be obtained by using the symmetries erf(-z) = erf(z) and erf(conj(z)) = conj(erf(z)). Parameters ---------- nt : int The number of zeros to compute Returns ------- The locations of the zeros of erf : ndarray (complex) Complex values at which zeros of erf(z) References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special Functions\", John Wiley and Sons, 1996. Examples -------- >>> from scipy import special >>> special.erf_zeros(1) array([1.45061616+1.880943j]) Check that erf is (close to) zero for the value returned by erf_zeros >>> special.erf(special.erf_zeros(1)) array([4.95159469e-14-1.16407394e-16j])",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_basic.py",
    "ast_data": "FunctionDef name:erf_zeros arg:nt arguments arg If BoolOp Compare Call Compare Call Raise Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "size",
    "source_code": "def size(obj, axis=None):\n    return np.size(getdata(obj), axis)",
    "docstring": "maskedarray version of the numpy function.",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:size arg:obj arg:axis arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "is_supertype_of",
    "source_code": "def is_supertype_of(self, other: 'FunctionType') -> bool:\n    if len(self.parameters) != len(other.parameters):\n        return False\n    for self_param, other_param in zip(self.parameters.values(), other.parameters.values()):\n        if not self_param.is_subtype_of(other_param):\n            return False\n    if not all((name in other.captures for name in self.captures)):\n        return False\n    return all((capture_type.is_subtype_of(other.captures[name]) for name, capture_type in self.captures.items()))",
    "docstring": "Returns True if self is a supertype of other FunctionType.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_type.py",
    "ast_data": "FunctionDef name:is_supertype_of arg:self arg:other arguments arg arg If Compare Call Call Return return:yes For Call Call Call If Call Return return:yes If Call Compare Return return:yes Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "aot_compile",
    "source_code": "def aot_compile(gm: torch.fx.GraphModule, args: tuple[Any], kwargs: Optional[dict[str, Any]]=None, *, options: Optional[dict[str, Any]]=None) -> Union[str, list[str]]:\n    from .compile_fx import _aoti_flatten_inputs, compile_fx_aot\n    flat_example_inputs, options = _aoti_flatten_inputs(gm, args, kwargs, options=options)\n    from torch._export.utils import _compiling_state_context\n    with _compiling_state_context():\n        return compile_fx_aot(gm, flat_example_inputs, config_patches=options)",
    "docstring": "Ahead-of-time compile a given FX graph with TorchInductor into a shared library. Args: gm: The FX graph to compile. args: Example arguments kwargs: Example keyword arguments options: Optional dict of config options. See . Returns: Path to the generated shared library, or a list of files generated by AOTI if aot_inductor.package=True. TODO: make it return a list by default",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\__init__.py",
    "ast_data": "FunctionDef name:aot_compile arg:gm arg:args arg:kwargs arguments arg arg arg arg Assign Call With Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_asfreq_compat",
    "source_code": "def _asfreq_compat(index: FreqIndexT, freq) -> FreqIndexT:\n    if len(index) != 0:\n        raise ValueError('Can only set arbitrary freq for empty DatetimeIndex or TimedeltaIndex')\n    if isinstance(index, PeriodIndex):\n        new_index = index.asfreq(freq=freq)\n    elif isinstance(index, DatetimeIndex):\n        new_index = DatetimeIndex([], dtype=index.dtype, freq=freq, name=index.name)\n    elif isinstance(index, TimedeltaIndex):\n        new_index = TimedeltaIndex([], dtype=index.dtype, freq=freq, name=index.name)\n    else:\n        raise TypeError(type(index))\n    return new_index",
    "docstring": "Helper to mimic asfreq on (empty) DatetimeIndex and TimedeltaIndex. Parameters ---------- index : PeriodIndex, DatetimeIndex, or TimedeltaIndex freq : DateOffset Returns ------- same type as index",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\resample.py",
    "ast_data": "FunctionDef name:_asfreq_compat arg:index arg:freq arguments arg arg If Compare Call Raise Call If Call Assign Call If Call Assign Call If Call Assign Call Raise Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "__setattr__",
    "source_code": "def __setattr__(self, name, value):\n    if name == '_wrapped':\n        self.__dict__.clear()\n    else:\n        self.__dict__.pop(name, None)\n    super().__setattr__(name, value)",
    "docstring": "Set the value of setting. Clear all cached values if _wrapped changes (@override_settings does this) or clear single values when set.",
    "type": "method",
    "file_path": "django\\django\\conf\\__init__.py",
    "ast_data": "FunctionDef name:__setattr__ arg:self arg:name arg:value arguments arg arg arg If Compare Call Call Call Call"
  },
  {
    "library": "django",
    "name": "srid",
    "source_code": "@property\ndef srid(self):\n    return self.srs.srid",
    "docstring": "Shortcut to access the srid of this GDALRaster.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\raster\\source.py",
    "ast_data": "FunctionDef name:srid arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_reshape_tensors",
    "source_code": "def _reshape_tensors(tensors, shape):\n    reshaped = []\n    for t in tensors:\n        with ops.colocate_with(t):\n            reshaped.append(array_ops.reshape(t, shape))\n    return reshaped",
    "docstring": "Reshape tensors flattened by _flatten_tensors. Args: tensors: list of of identical length 1D tensors. shape: list of integers describing the desired shape. Product of the elements must equal the length of each tensor. Returns: list of which are the reshaped inputs.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\all_reduce.py",
    "ast_data": "FunctionDef name:_reshape_tensors arg:tensors arg:shape arguments arg arg Assign For With Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "transitive_inputs",
    "source_code": "def transitive_inputs(self, node_name, include_control=True, include_reversed_ref=False, device_name=None):\n    if not self._debug_graphs:\n        raise LookupError('Node inputs are not loaded from partition graphs yet.')\n    device_name = self._infer_device_name(device_name, node_name)\n    input_lists = [self._debug_graphs[device_name].node_inputs]\n    if include_control:\n        input_lists.append(self._debug_graphs[device_name].node_ctrl_inputs)\n    if include_reversed_ref:\n        input_lists.append(self._debug_graphs[device_name].node_reversed_ref_inputs)\n    tracer = debug_graphs.DFSGraphTracer(input_lists, skip_node_names=self._get_merge_node_names(device_name))\n    tracer.trace(node_name)\n    return tracer.inputs()",
    "docstring": "Get the transitive inputs of given node according to partition graphs. Args: node_name: Name of the node. include_control: Include control inputs (True by default). include_reversed_ref: Whether a ref input, say from A to B, is to be also considered as an input from B to A. The rationale is that ref inputs generally let the recipient (e.g., B in this case) mutate the value of the source (e.g., A in this case). So the reverse direction of the ref edge reflects the direction of information flow. device_name: () name of the device. If there is only one device or if node_name exists on only one device, this argument is optional. Returns: ( of ) all transitive inputs to the node, as a list of node names. Raises: LookupError: If node inputs and control inputs have not been loaded from partition graphs yet.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:transitive_inputs arg:self arg:node_name arg:include_control arg:include_reversed_ref arg:device_name arguments arg arg arg arg arg If Raise Call Assign Call Assign If Call If Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "dtype",
    "source_code": "@property\ndef dtype(self) -> np.dtype[np.datetime64] | DatetimeTZDtype:\n    return self._dtype",
    "docstring": "The dtype for the DatetimeArray. .. warning:: A future version of pandas will change dtype to never be a `DatetimeArray.dtype` is returned.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimes.py",
    "ast_data": "FunctionDef name:dtype arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "to_proto",
    "source_code": "def to_proto(self) -> Any:\n    return function_type_pb2.FunctionType(parameters=[p.to_proto() for p in self.parameters.values()], captures=[function_type_pb2.Capture(name=n, type_constraint=serialization.serialize(t)) for n, t in self.captures.items()])",
    "docstring": "Generate a proto representation from the FunctionType.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_type.py",
    "ast_data": "FunctionDef name:to_proto arg:self arguments arg Return return:yes Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_LazyLoader",
    "source_code": "class _LazyLoader(_types.ModuleType):\n\n    def __init__(self, local_name, parent_module_globals, name):\n        self._local_name = local_name\n        self._parent_module_globals = parent_module_globals\n        super(_LazyLoader, self).__init__(name)\n\n    def _load(self):\n        module = _importlib.import_module(self.__name__)\n        self._parent_module_globals[self._local_name] = module\n        self.__dict__.update(module.__dict__)\n        return module\n\n    def __getattr__(self, item):\n        module = self._load()\n        return getattr(module, item)\n\n    def __dir__(self):\n        module = self._load()\n        return dir(module)\n\n    def __reduce__(self):\n        return (__import__, (self.__name__,))",
    "docstring": "Lazily import a module so that we can forward it.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\virtual_root_template_v2.__init__.py",
    "ast_data": "ClassDef name:_LazyLoader FunctionDef name:__init__ arg:self arg:local_name arg:parent_module_globals arg:name arguments arg arg arg arg Assign Assign Call Call FunctionDef name:_load arg:self arguments arg Assign Call Assign Call Return return:yes FunctionDef name:__getattr__ arg:self arg:item arguments arg arg Assign Call Return return:yes Call FunctionDef name:__dir__ arg:self arguments arg Assign Call Return return:yes Call FunctionDef name:__reduce__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "parse_args",
    "source_code": "def parse_args() -> argparse.Namespace:\n    parser = argparse.ArgumentParser(description='Upload test stats to s3')\n    parser.add_argument('--workflow-run-id', type=int, required=True, help='id of the workflow to get artifacts from')\n    parser.add_argument('--workflow-run-attempt', type=int, required=True, help='which retry of the workflow this is')\n    parser.add_argument('--workflow-name', type=str, required=True, help='id of the workflow to get artifacts from')\n    parser.add_argument('--job-id', type=int, required=True, help='id of the workflow to get artifacts from')\n    parser.add_argument('--job-name', type=str, required=True, help='id of the workflow to get artifacts from')\n    parser.add_argument('--repo', type=str, required=False, help='which GitHub repo this workflow run belongs to')\n    parser.add_argument('--debug', action='store_true', help='Enable debug mode')\n    parser.add_argument('--dry-run', action='store_true', help='Enable dry-run mode')\n    parser.add_argument('--artifact-prefix', type=str, required=False, help='artifact prefix to download raw utilizarion data from s3')\n    parser.add_argument('--local-path', type=str, required=False, help='path of the raw utilizarion data from local location')\n    return parser.parse_args()",
    "docstring": "Parse command line arguments. Returns: argparse.Namespace: Parsed arguments.",
    "type": "function",
    "file_path": "pytorch\\tools\\stats\\upload_utilization_stats\\upload_utilization_stats.py",
    "ast_data": "FunctionDef name:parse_args arguments Assign Call Call Call Call Call Call Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "convert_inputs_to_dtensor",
    "source_code": "def convert_inputs_to_dtensor(inputs, mesh):\n    if isinstance(inputs, DTensorDistributedValue):\n        return inputs.get_dtensor()\n    elif isinstance(inputs, values_lib.DistributedValues):\n        return convert_per_replica_to_dtensor(inputs, mesh)\n    elif isinstance(inputs, input_util._DTensorIterator):\n        return inputs\n    elif tensor_util.is_tensor(inputs):\n        if context.executing_eagerly():\n            if d_api.is_dtensor(inputs):\n                return inputs\n            else:\n                _raise_unsupported_input_type_error(inputs)\n        else:\n            return inputs\n    else:\n        _raise_unsupported_input_type_error(inputs)",
    "docstring": "Convert any input types to DTensor instance.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\experimental\\dtensor_util.py",
    "ast_data": "FunctionDef name:convert_inputs_to_dtensor arg:inputs arg:mesh arguments arg arg If Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes If Call If Call If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "send_catch_log_deferred",
    "source_code": "def send_catch_log_deferred(self, signal: Any, **kwargs: Any) -> Deferred[list[tuple[Any, Any]]]:\n    kwargs.setdefault('sender', self.sender)\n    return _signal.send_catch_log_deferred(signal, **kwargs)",
    "docstring": "Like :meth: but supports :ref:. Returns a Deferred that gets fired once all signal handlers have finished. Send a signal, catch exceptions and log them. The keyword arguments are passed to the signal handlers (connected through the :meth: method).",
    "type": "method",
    "file_path": "scrapy\\scrapy\\signalmanager.py",
    "ast_data": "FunctionDef name:send_catch_log_deferred arg:self arg:signal arguments arg arg arg Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_default_nowrap_functions",
    "source_code": "@functools.lru_cache(None)\ndef get_default_nowrap_functions() -> set[Callable]:\n    Tensor = torch.Tensor\n    return {Tensor._base.__get__, Tensor.grad.__get__, Tensor._grad.__get__}",
    "docstring": "Return public functions that do not wrap in a subclass when invoked by the default `` actually just creates a new transposed tensor on the fly, and so we SHOULD interpose on these calls (you need to check the implementation of the function to see if this is the case or not). Additionally, if a property accessor doesn't return a Tensor, it doesn't have to be on this list (though it is harmless if it is).",
    "type": "function",
    "file_path": "pytorch\\torch\\overrides.py",
    "ast_data": "FunctionDef name:get_default_nowrap_functions arguments Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_ntuple_from_first",
    "source_code": "def _ntuple_from_first(n):\n\n    def parse(x):\n        while isinstance(x, collections.abc.Sequence):\n            if len(x) == n:\n                break\n            x = x[0]\n        return tuple(itertools.repeat(x, n))\n    return parse",
    "docstring": "Converts the argument to a tuple of size n with the first element repeated.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\modules\\utils.py",
    "ast_data": "FunctionDef name:_ntuple_from_first arg:n arguments arg FunctionDef name:parse arg:x arguments arg While Call If Compare Call Assign Return return:yes Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "serialize_feature_column",
    "source_code": "@doc_controls.header(_FEATURE_COLUMN_DEPRECATION_WARNING)\n@tf_export('__internal__.feature_column.serialize_feature_column', v1=[])\n@deprecation.deprecated(None, _FEATURE_COLUMN_DEPRECATION_RUNTIME_WARNING)\ndef serialize_feature_column(fc):\n    if isinstance(fc, six.string_types):\n        return fc\n    elif isinstance(fc, fc_types.FeatureColumn):\n        return {'class_name': fc.__class__.__name__, 'config': fc.get_config()}\n    else:\n        raise ValueError('Instance: {} is not a FeatureColumn'.format(fc))",
    "docstring": "Serializes a FeatureColumn or a raw string key. This method should only be used to serialize parent FeatureColumns when implementing FeatureColumn.get_config(), else serialize_feature_columns() is preferable. This serialization also keeps information of the FeatureColumn class, so deserialization is possible without knowing the class type. For example: a = numeric_column('x') a.get_config() gives: { 'key': 'price', 'shape': (1,), 'default_value': None, 'dtype': 'float32', 'normalizer_fn': None } While serialize_feature_column(a) gives: { 'class_name': 'NumericColumn', 'config': { 'key': 'price', 'shape': (1,), 'default_value': None, 'dtype': 'float32', 'normalizer_fn': None } } Args: fc: A FeatureColumn or raw feature key string. Returns: Keras serialization for FeatureColumns, leaves string keys unaffected. Raises: ValueError if called with input that is not string or FeatureColumn.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\serialization.py",
    "ast_data": "FunctionDef name:serialize_feature_column arg:fc arguments arg If Call Return return:yes If Call Return return:yes Call Raise Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_render_normalized_cost_bar",
    "source_code": "def _render_normalized_cost_bar(self, cost, max_cost, length):\n    num_ticks = int(np.ceil(float(cost) / max_cost * length))\n    num_ticks = num_ticks or 1\n    output = RL('[', font_attr=self._LINE_COST_ATTR)\n    output += RL('|' * num_ticks + ' ' * (length - num_ticks), font_attr=['bold', self._LINE_COST_ATTR])\n    output += RL(']', font_attr=self._LINE_COST_ATTR)\n    return output",
    "docstring": "Render a text bar representing a normalized cost. Args: cost: the absolute value of the cost. max_cost: the maximum cost value to normalize the absolute cost with. length: (int) length of the cost bar, in number of characters, excluding the brackets on the two ends. Returns: An instance of debugger_cli_common.RichTextLine.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\profile_analyzer_cli.py",
    "ast_data": "FunctionDef name:_render_normalized_cost_bar arg:self arg:cost arg:max_cost arg:length arguments arg arg arg arg Assign Call Call Call Assign BoolOp Assign Call Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "partial_fit",
    "source_code": "@available_if(lambda est: est._check_solver())\n@_fit_context(prefer_skip_nested_validation=True)\ndef partial_fit(self, X, y, sample_weight=None, classes=None):\n    if _check_partial_fit_first_call(self, classes):\n        self._label_binarizer = LabelBinarizer()\n        if type_of_target(y).startswith('multilabel'):\n            self._label_binarizer.fit(y)\n        else:\n            self._label_binarizer.fit(classes)\n    return self._fit(X, y, sample_weight=sample_weight, incremental=True)",
    "docstring": "Update the model with a single iteration over the given data. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input data. y : array-like of shape (n_samples,) The target values. sample_weight : array-like of shape (n_samples,), default=None Sample weights. .. versionadded:: 1.7 classes : array of shape (n_classes,), default=None Classes across all calls to partial_fit. Can be obtained via , where y_all is the target vector of the entire dataset. This argument is required for the first call to partial_fit and can be omitted in the subsequent calls. Note that y doesn't need to contain all labels in . Returns ------- self : object Trained MLP model.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neural_network\\_multilayer_perceptron.py",
    "ast_data": "FunctionDef name:partial_fit arg:self arg:X arg:y arg:sample_weight arg:classes arguments arg arg arg arg arg If Call Assign Call If Call Call Call Call Return return:yes Call Call arguments arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "serialize",
    "source_code": "def serialize(self) -> _WireProtocolPickledInput:\n    from torch.fx._graph_pickler import GraphPickler\n    return _WireProtocolPickledInput(GraphPickler.dumps(self))",
    "docstring": "Turns this object into a _WireProtocolPickledInput which can be directly transferred across a stream.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\compile_fx_ext.py",
    "ast_data": "FunctionDef name:serialize arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "run_eagerly",
    "source_code": "@property\ndef run_eagerly(self):\n    if self._run_eagerly is True and (not context.executing_eagerly()):\n        raise ValueError('You can only set `run_eagerly=True` if eager execution is enabled.')\n    if not self.dynamic:\n        if self._run_eagerly is None:\n            return def_function.functions_run_eagerly()\n        else:\n            return self._run_eagerly\n    else:\n        if not context.executing_eagerly():\n            raise ValueError('Your model contains layers that can only be successfully run in eager execution (layers constructed with `dynamic=True`). You must enable eager execution with `tf.enable_eager_execution()`.')\n        if self._run_eagerly is False:\n            raise ValueError('Your model contains layers that can only be successfully run in eager execution (layers constructed with `dynamic=True`). You cannot set `run_eagerly=False`.')\n        return context.executing_eagerly()",
    "docstring": "Settable attribute indicating whether the model should run eagerly. Running eagerly means that your model will be run step by step, like Python code. Your model might run slower, but it should become easier for you to debug it by stepping into individual layer calls. By default, we will attempt to compile your model to a static graph to deliver the best execution performance. Returns: Boolean, whether the model should run eagerly.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py",
    "ast_data": "FunctionDef name:run_eagerly arg:self arguments arg If BoolOp Compare Call Raise Call If If Compare Return return:yes Call Return return:yes If Call Raise Call If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "country_name",
    "source_code": "def country_name(self, query):\n    return self.country(query)['country_name']",
    "docstring": "Return the country name for the given IP Address or FQDN.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geoip2.py",
    "ast_data": "FunctionDef name:country_name arg:self arg:query arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "UnpackedDualTensor",
    "source_code": "class UnpackedDualTensor(NamedTuple):\n    primal: torch.Tensor\n    tangent: Optional[torch.Tensor]",
    "docstring": "Namedtuple returned by :func: containing the primal and tangent components of the dual tensor. See :func: for more details.",
    "type": "class",
    "file_path": "pytorch\\torch\\autograd\\forward_ad.py",
    "ast_data": "ClassDef name:UnpackedDualTensor"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_get_dtype",
    "source_code": "def _maybe_get_dtype(x):\n    if isinstance(x, numbers.Real):\n        return x\n    if isinstance(x, tensor_lib.Tensor):\n        return x.dtype.as_numpy_dtype\n    if isinstance(x, dtypes.DType):\n        return x.as_numpy_dtype\n    if isinstance(x, tensor_shape.TensorShape):\n        return np.int32\n    if isinstance(x, (list, tuple)):\n        raise ValueError(f'Cannot determine dtype.  Got sequence {x}.')\n    return x",
    "docstring": "Returns a numpy type if available from x. Skips if x is numpy.ndarray.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\override_binary_operator.py",
    "ast_data": "FunctionDef name:_maybe_get_dtype arg:x arguments arg If Call Return return:yes If Call Return return:yes If Call Return return:yes If Call Return return:yes If Call Raise Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "expose",
    "source_code": "def expose(func=None, alias=None):\n\n    def expose_(func):\n        func.exposed = True\n        if alias is not None:\n            if isinstance(alias, text_or_bytes):\n                parents[alias.replace('.', '_')] = func\n            else:\n                for a in alias:\n                    parents[a.replace('.', '_')] = func\n        return func\n    import sys\n    import types\n    decoratable_types = (types.FunctionType, types.MethodType, type)\n    if isinstance(func, decoratable_types):\n        if alias is None:\n            func.exposed = True\n            return func\n        else:\n            parents = sys._getframe(1).f_locals\n            return expose_(func)\n    elif func is None:\n        if alias is None:\n            parents = sys._getframe(1).f_locals\n            return expose_\n        else:\n            parents = sys._getframe(1).f_locals\n            return expose_\n    else:\n        parents = sys._getframe(1).f_locals\n        alias = func\n        return expose_",
    "docstring": "Expose the function or class. Optionally provide an alias or set of aliases.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\_helper.py",
    "ast_data": "FunctionDef name:expose arg:func arg:alias arguments arg arg FunctionDef name:expose_ arg:func arguments arg Assign If Compare If Call Assign Call For Assign Call Return return:yes Assign If Call If Compare Assign Return return:yes Assign Call Return return:yes Call If Compare If Compare Assign Call Return return:yes Assign Call Return return:yes Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "StudentTWithAbsDfSoftplusScale",
    "source_code": "class StudentTWithAbsDfSoftplusScale(StudentT):\n\n    @deprecation.deprecated('2019-01-01', 'Use `tfd.StudentT(tf.floor(tf.abs(df)), loc, tf.nn.softplus(scale)) instead.', warn_once=True)\n    def __init__(self, df, loc, scale, validate_args=False, allow_nan_stats=True, name='StudentTWithAbsDfSoftplusScale'):\n        parameters = dict(locals())\n        with ops.name_scope(name, values=[df, scale]) as name:\n            super(StudentTWithAbsDfSoftplusScale, self).__init__(df=math_ops.floor(math_ops.abs(df)), loc=loc, scale=nn.softplus(scale, name='softplus_scale'), validate_args=validate_args, allow_nan_stats=allow_nan_stats, name=name)\n        self._parameters = parameters",
    "docstring": "StudentT with and .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\student_t.py",
    "ast_data": "ClassDef name:StudentTWithAbsDfSoftplusScale FunctionDef name:__init__ arg:self arg:df arg:loc arg:scale arg:validate_args arg:allow_nan_stats arg:name arguments arg arg arg arg arg arg arg Assign Call Call With Call Call Call Call Call Call Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "mouseEventCoords",
    "source_code": "def mouseEventCoords(self, pos=None):\n    if pos is None:\n        pos = self.mapFromGlobal(QtGui.QCursor.pos())\n    elif hasattr(pos, 'position'):\n        pos = pos.position()\n    elif hasattr(pos, 'pos'):\n        pos = pos.pos()\n    x = pos.x()\n    y = self.figure.bbox.height / self.device_pixel_ratio - pos.y()\n    return (x * self.device_pixel_ratio, y * self.device_pixel_ratio)",
    "docstring": "Calculate mouse coordinates in physical pixels. Qt uses logical pixels, but the figure is scaled to physical pixels for rendering. Transform to physical pixels so that all of the down-stream transforms work as expected. Also, the origin is different and needs to be corrected.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_qt.py",
    "ast_data": "FunctionDef name:mouseEventCoords arg:self arg:pos arguments arg arg If Compare Assign Call Call If Call Assign Call If Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "rsa_crt_iqmp",
    "source_code": "def rsa_crt_iqmp(p: int, q: int) -> int:\n    return _modinv(q, p)",
    "docstring": "Compute the CRT (q ** -1) % p value from RSA primes p and q.",
    "type": "function",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\rsa.py",
    "ast_data": "FunctionDef name:rsa_crt_iqmp arg:p arg:q arguments arg arg Return return:yes Call"
  },
  {
    "library": "cryptography",
    "name": "reset_nonce",
    "source_code": "@abc.abstractmethod\ndef reset_nonce(self, nonce: bytes) -> None:\n    pass",
    "docstring": "Resets the nonce for the cipher context to the provided value. Raises an exception if it does not support reset or if the provided nonce does not have a valid length.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\ciphers\\base.py",
    "ast_data": "FunctionDef name:reset_nonce arg:self arg:nonce arguments arg arg"
  },
  {
    "library": "matplotlib",
    "name": "press_pan",
    "source_code": "def press_pan(self, event):\n    if event.button not in [MouseButton.LEFT, MouseButton.RIGHT] or event.x is None or event.y is None:\n        return\n    axes = self._start_event_axes_interaction(event, method='pan')\n    if not axes:\n        return\n    for ax in axes:\n        ax.start_pan(event.x, event.y, event.button)\n    self.canvas.mpl_disconnect(self._id_drag)\n    id_drag = self.canvas.mpl_connect('motion_notify_event', self.drag_pan)\n    self._pan_info = self._PanInfo(button=event.button, axes=axes, cid=id_drag)",
    "docstring": "Callback for mouse button press in pan/zoom mode.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:press_pan arg:self arg:event arguments arg arg If BoolOp Compare Compare Compare Return return:no Assign Call If Return return:no For Call Call Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_stop",
    "source_code": "def _stop(self) -> None:\n    self._server.stop()",
    "docstring": "Stops the server. Raises: tf.errors.OpError: Or one of its subclasses if an error occurs while stopping the server.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\service\\server_lib.py",
    "ast_data": "FunctionDef name:_stop arg:self arguments arg Call"
  },
  {
    "library": "scikit-learn",
    "name": "PositiveSpectrumWarning",
    "source_code": "class PositiveSpectrumWarning(UserWarning):\n    pass",
    "docstring": "Warning raised when the eigenvalues of a PSD matrix have issues This warning is typically raised by `` when the eigenvalues of a positive semidefinite (PSD) matrix such as a gram matrix (kernel) present significant negative eigenvalues, or bad conditioning i.e. very small non-zero eigenvalues compared to the largest eigenvalue. .. versionadded:: 0.22",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\exceptions.py",
    "ast_data": "ClassDef name:PositiveSpectrumWarning"
  },
  {
    "library": "numpy",
    "name": "get_shared_lib_extension",
    "source_code": "def get_shared_lib_extension(is_python_ext=False):\n    confvars = distutils.sysconfig.get_config_vars()\n    so_ext = confvars.get('EXT_SUFFIX', '')\n    if not is_python_ext:\n        if sys.platform.startswith('linux') or sys.platform.startswith('gnukfreebsd'):\n            so_ext = '.so'\n        elif sys.platform.startswith('darwin'):\n            so_ext = '.dylib'\n        elif sys.platform.startswith('win'):\n            so_ext = '.dll'\n        elif 'SOABI' in confvars:\n            so_ext = so_ext.replace('.' + confvars.get('SOABI'), '', 1)\n    return so_ext",
    "docstring": "Return the correct file extension for shared libraries. Parameters ---------- is_python_ext : bool, optional Whether the shared library is a Python extension. Default is False. Returns ------- so_ext : str The shared library extension. Notes ----- For Python shared libs, will typically be '.so' on Linux and OS X, and '.pyd' on Windows. For Python >= 3.2 has a tag prepended on POSIX systems according to PEP 3149.",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\misc_util.py",
    "ast_data": "FunctionDef name:get_shared_lib_extension arg:is_python_ext arguments arg Assign Call Assign Call If If BoolOp Call Call Assign If Call Assign If Call Assign If Compare Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "mapping",
    "source_code": "def mapping(data_source, geom_name='geom', layer_key=0, multi_geom=False):\n    if isinstance(data_source, str):\n        data_source = DataSource(data_source)\n    elif isinstance(data_source, DataSource):\n        pass\n    else:\n        raise TypeError('Data source parameter must be a string or a DataSource object.')\n    _mapping = {}\n    for field in data_source[layer_key].fields:\n        mfield = field.lower()\n        if mfield[-1:] == '_':\n            mfield += 'field'\n        _mapping[mfield] = field\n    gtype = data_source[layer_key].geom_type\n    if multi_geom:\n        gtype.to_multi()\n    _mapping[geom_name] = str(gtype).upper()\n    return _mapping",
    "docstring": "Given a DataSource, generate a dictionary that may be used for invoking the LayerMapping utility. Keyword Arguments: => The name of the geometry field to use for the model. => The key for specifying which layer in the DataSource to use; defaults to 0 (the first layer). May be an integer index or a string identifier for the layer. => Boolean (default: False) - specify as multigeometry.",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\utils\\ogrinspect.py",
    "ast_data": "FunctionDef name:mapping arg:data_source arg:geom_name arg:layer_key arg:multi_geom arguments arg arg arg arg If Call Assign Call If Call Raise Call Assign For Assign Call If Compare Assign Assign If Call Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_apply_tickdir",
    "source_code": "def _apply_tickdir(self, tickdir):\n    tickdir = mpl._val_or_rc(tickdir, f'{self.__name__}.direction')\n    _api.check_in_list(['in', 'out', 'inout'], tickdir=tickdir)\n    self._tickdir = tickdir",
    "docstring": "Set tick direction. Valid values are 'out', 'in', 'inout'.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:_apply_tickdir arg:self arg:tickdir arguments arg arg Assign Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "tfr_funcs_gen_from_module",
    "source_code": "def tfr_funcs_gen_from_module(source, op_defs, method_prefix=None, op_libraries=None):\n    if op_libraries:\n        prefix_len = len('gen_')\n        for m in op_libraries:\n            lib_dir = os.path.dirname(m.__file__)\n            lib_name = os.path.basename(m.__file__)[prefix_len:].replace('.py', '.so')\n            lib_path = os.path.join(lib_dir, lib_name)\n            if os.path.exists(lib_path):\n                logging.info('load file: ' + lib_path)\n                load_library.load_op_library(lib_path)\n    else:\n        lib_dir = os.path.dirname(source.__file__)\n        for lib_name in os.listdir(lib_dir):\n            if lib_name.endswith('.so'):\n                lib_path = os.path.join(lib_dir, lib_name)\n                logging.info('load file: ' + lib_path)\n                load_library.load_op_library(lib_path)\n    py_funcs = [func for name, func in tf_inspect.getmembers(source, tf_inspect.isfunction) if not method_prefix or name.startswith(method_prefix)]\n    py_funcs = sorted(py_funcs, key=lambda x: x.__code__.co_firstlineno)\n    mlir_funcs = [tfr_gen(func, op_defs) for func in py_funcs]\n    return mlir_funcs",
    "docstring": "Parse the input source module and emit the TFR functions.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\tfr\\python\\tfr_gen.py",
    "ast_data": "FunctionDef name:tfr_funcs_gen_from_module arg:source arg:op_defs arg:method_prefix arg:op_libraries arguments arg arg arg arg If Assign Call For Assign Call Assign Call Call Assign Call If Call Call Call Assign Call For Call If Call Assign Call Call Call Assign Call BoolOp Call Assign Call arguments arg Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "FixedLenFeature",
    "source_code": "@tf_export('io.FixedLenFeature', v1=['io.FixedLenFeature', 'FixedLenFeature'])\nclass FixedLenFeature(collections.namedtuple('FixedLenFeature', ['shape', 'dtype', 'default_value'])):\n\n    def __new__(cls, shape, dtype, default_value=None):\n        return super(FixedLenFeature, cls).__new__(cls, shape, dtype, default_value)",
    "docstring": "Configuration for parsing a fixed-length input feature. To treat sparse input as dense, provide a ; otherwise, the parse functions will fail on any examples missing this feature. Fields: shape: Shape of input data. dtype: Data type of input. default_value: Value to be used if an example is missing this feature. It must be compatible with and of the specified .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parsing_config.py",
    "ast_data": "ClassDef name:FixedLenFeature Call FunctionDef name:__new__ arg:cls arg:shape arg:dtype arg:default_value arguments arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "render_rect_filled",
    "source_code": "def render_rect_filled(self, output: Output, x1: float, y1: float, x2: float, y2: float) -> None:\n    output.rects.append((x1, y1, x2, y2))",
    "docstring": "Draw a filled rectangle from (*x1*, *y1*) to (*x2*, *y2*).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_mathtext.py",
    "ast_data": "FunctionDef name:render_rect_filled arg:self arg:output arg:x1 arg:y1 arg:x2 arg:y2 arguments arg arg arg arg arg arg Call"
  },
  {
    "library": "matplotlib",
    "name": "clip_cmd",
    "source_code": "def clip_cmd(self, cliprect, clippath):\n    cmds = []\n    while (self._cliprect, self._clippath) != (cliprect, clippath) and self.parent is not None:\n        cmds.extend(self.pop())\n    if (self._cliprect, self._clippath) != (cliprect, clippath) or self.parent is None:\n        cmds.extend(self.push())\n        if self._cliprect != cliprect:\n            cmds.extend([cliprect, Op.rectangle, Op.clip, Op.endpath])\n        if self._clippath != clippath:\n            path, affine = clippath.get_transformed_path_and_affine()\n            cmds.extend(PdfFile.pathOperations(path, affine, simplify=False) + [Op.clip, Op.endpath])\n    return cmds",
    "docstring": "Set clip rectangle. Calls and .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py",
    "ast_data": "FunctionDef name:clip_cmd arg:self arg:cliprect arg:clippath arguments arg arg arg Assign While BoolOp Compare Compare Call Call If BoolOp Compare Compare Call Call If Compare Call If Compare Assign Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "VertexSelector",
    "source_code": "class VertexSelector:\n\n    def __init__(self, line):\n        if line.axes is None:\n            raise RuntimeError('You must first add the line to the Axes')\n        if line.get_picker() is None:\n            raise RuntimeError('You must first set the picker property of the line')\n        self.axes = line.axes\n        self.line = line\n        self.cid = self.canvas.callbacks._connect_picklable('pick_event', self.onpick)\n        self.ind = set()\n    canvas = property(lambda self: self.axes.get_figure(root=True).canvas)\n\n    def process_selected(self, ind, xs, ys):\n        pass\n\n    def onpick(self, event):\n        if event.artist is not self.line:\n            return\n        self.ind ^= set(event.ind)\n        ind = sorted(self.ind)\n        xdata, ydata = self.line.get_data()\n        self.process_selected(ind, xdata[ind], ydata[ind])",
    "docstring": "Manage the callbacks to maintain a list of selected vertices for . Derived classes should override the method to do something with the picks. Here is an example which highlights the selected verts with red circles:: import numpy as np import matplotlib.pyplot as plt import matplotlib.lines as lines class HighlightSelected(lines.VertexSelector): def __init__(self, line, fmt='ro', **kwargs): super().__init__(line) self.markers, = self.axes.plot([], [], fmt, **kwargs) def process_selected(self, ind, xs, ys): self.markers.set_data(xs, ys) self.canvas.draw() fig, ax = plt.subplots() x, y = np.random.rand(2, 30) line, = ax.plot(x, y, 'bs-', picker=5) selector = HighlightSelected(line) plt.show()",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "ClassDef name:VertexSelector FunctionDef name:__init__ arg:self arg:line arguments arg arg If Compare Raise Call If Compare Call Raise Call Assign Assign Assign Call Assign Call Assign Call arguments arg Call FunctionDef name:process_selected arg:self arg:ind arg:xs arg:ys arguments arg arg arg arg FunctionDef name:onpick arg:self arg:event arguments arg arg If Compare Return return:no Call Assign Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "current_source_info_metadata",
    "source_code": "def current_source_info_metadata(op_type=None, op_name=None, skip_frames=1):\n    full_filename, lineno = inspect.stack()[skip_frames][1:3]\n    filename = os.path.basename(full_filename)\n    return OpMetadata(op_type=op_type, op_name=op_name, source_file=filename, source_line=lineno)",
    "docstring": "Helper for use in source mapping that returns an OpMetadata object.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\xla\\python\\xla_client.py",
    "ast_data": "FunctionDef name:current_source_info_metadata arg:op_type arg:op_name arg:skip_frames arguments arg arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "serialize",
    "source_code": "@dispatch.add_dispatch_support\ndef serialize(activation):\n    if hasattr(activation, '__name__') and activation.__name__ in _TF_ACTIVATIONS_V2:\n        return _TF_ACTIVATIONS_V2[activation.__name__]\n    return serialize_keras_object(activation)",
    "docstring": "Returns the string identifier of an activation function. Args: activation : Function object. Returns: String denoting the name attribute of the input function For example: >>> tf.keras.activations.serialize(tf.keras.activations.tanh) 'tanh' >>> tf.keras.activations.serialize(tf.keras.activations.sigmoid) 'sigmoid' >>> tf.keras.activations.serialize('abcd') Traceback (most recent call last): ... ValueError: ('Cannot serialize', 'abcd') Raises: ValueError: The input function is not a valid one.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\activations.py",
    "ast_data": "FunctionDef name:serialize arg:activation arguments arg If BoolOp Call Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_graph_provenance_json",
    "source_code": "@compatibility(is_backward_compatible=False)\ndef get_graph_provenance_json(graph: Graph) -> dict[str, Any]:\n    provenance_tracking_json = {}\n    for node in graph.nodes:\n        if node.op == 'call_function':\n            provenance_tracking_json[node.name] = [source.to_dict() for source in node.meta['from_node']] if 'from_node' in node.meta else []\n    return provenance_tracking_json",
    "docstring": "Given an fx.Graph, return a json that contains the provenance information of each node.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\traceback.py",
    "ast_data": "FunctionDef name:get_graph_provenance_json arg:graph arguments arg Assign For If Compare Assign Compare Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "missing_reference",
    "source_code": "def missing_reference(app: Sphinx, env: BuildEnvironment, node: pending_xref, contnode: TextElement) -> nodes.reference | None:\n    return resolve_reference_detect_inventory(env, node, contnode)",
    "docstring": "Attempt to resolve a missing reference via intersphinx references.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\ext\\intersphinx\\_resolve.py",
    "ast_data": "FunctionDef name:missing_reference arg:app arg:env arg:node arg:contnode arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "SubgraphMatcherWithNameNodeMap",
    "source_code": "@compatibility(is_backward_compatible=False)\nclass SubgraphMatcherWithNameNodeMap(SubgraphMatcher):\n\n    def __init__(self, pattern_gm: GraphModule, match_output: bool=False, match_placeholder: bool=False, remove_overlapping_matches: bool=True, ignore_literals: bool=False) -> None:\n        pattern_gm, name_node_map = _split_to_graph_and_name_node_map(pattern_gm)\n        self.name_node_map = name_node_map\n        super().__init__(pattern_gm.graph, match_output, match_placeholder, remove_overlapping_matches, ignore_literals)\n\n    def match(self, graph: Graph) -> list[InternalMatch]:\n        internal_matches = super().match(graph)\n        for internal_match in internal_matches:\n            for k, n in self.name_node_map.items():\n                internal_match.name_node_map[k] = internal_match.nodes_map[n]\n        return internal_matches",
    "docstring": "Extends SubgraphMatcher to support querying the matched subgraph nodes through node name, this requires pattern to have specific format (returning and additional dictionary at the output, that has node name as key, and the node in the pattern graph as value, see Example for more details) Difference with SubgraphMatcher is that it takes a GraphModule as input during initialization since we need to modify the graph (which requires the GraphModule) Example:: def pattern(x, weight): conv = F.conv2d(x, weight) relu = F.relu(conv) return relu, {\"conv\": conv, \"relu\": relu} def target_graph(x, weight): conv = F.conv2d(x, weight) relu = F.relu(conv) relu *= 2 return relu pattern_gm = export_for_training(pattern, example_inputs).module() target_gm = export_for_training(target_graph, example_inputs).module() matcher = SubgraphMatcherWithNameNodeMap(pattern_gm) matches = matcher.match(target_gm) for match in matches: match.name_node_map[\"conv\"].meta[\"annotation\"] = ...",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\passes\\utils\\matcher_with_name_node_map_utils.py",
    "ast_data": "ClassDef name:SubgraphMatcherWithNameNodeMap FunctionDef name:__init__ arg:self arg:pattern_gm arg:match_output arg:match_placeholder arg:remove_overlapping_matches arg:ignore_literals arguments arg arg arg arg arg arg Assign Call Assign Call Call FunctionDef name:match arg:self arg:graph arguments arg arg Assign Call Call For For Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "visit_node",
    "source_code": "def visit_node(self, node):\n    raise NotImplementedError('Subclasses must implement this.')",
    "docstring": "Visitor function. Args: node: Node Returns: bool, whether the node should be revisited; subclasses can visit every reachable node exactly once by always returning False",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\cfg.py",
    "ast_data": "FunctionDef name:visit_node arg:self arg:node arguments arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "CreateShapeFromNumpy",
    "source_code": "def CreateShapeFromNumpy(value):\n    if isinstance(value, tuple):\n        return Shape(xla_data_pb2.TUPLE, [CreateShapeFromNumpy(component) for component in value])\n    else:\n        return _CreateShapeFromNumpy(value)",
    "docstring": "Create a Shape from a Numpy array or a nested tuple structure thereof. Args: value: Numpy array or (possibly nested) tuple structure that bottoms out in Numpy arrays. Returns: A Shape object.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\xla\\python_api\\xla_shape.py",
    "ast_data": "FunctionDef name:CreateShapeFromNumpy arg:value arguments arg If Call Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "EnumerableShardingSpec",
    "source_code": "@dataclass\nclass EnumerableShardingSpec(ShardingSpec):\n    shards: list[ShardMetadata]\n\n    def __post_init__(self):\n        if len(self.shards) == 0:\n            raise ValueError(f'Empty shard list provided: {self.shards}')\n        rank = -1\n        for shard in self.shards:\n            if rank != -1 and rank != len(shard.shard_offsets):\n                raise ValueError(f'Found inconsistent ranks for shards: {rank} and {len(shard.shard_offsets)}')\n            rank = len(shard.shard_offsets)\n        validate_non_overlapping_shards_metadata(self.shards)\n\n    def build_metadata(self, tensor_sizes: torch.Size, tensor_properties: sharded_tensor_meta.TensorProperties) -> sharded_tensor_meta.ShardedTensorMetadata:\n        check_tensor(self.shards, tensor_sizes)\n        return sharded_tensor_meta.ShardedTensorMetadata(self.shards, tensor_sizes, tensor_properties)\n\n    def shard(self, tensor: torch.Tensor, src_rank: int=0, process_group=None) -> 'ShardedTensor':\n        raise NotImplementedError('EnumerableShardingSpec.shard not implemented yet!')",
    "docstring": "This is a type of PlacementSpec that allows users to specify a generic sharding scheme by enumerating exactly how each shard is laid out. Args: shards(List[ShardMetadata]): List of :class: objects representing each shard. Note that none of the shards should overlap.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharding_spec\\api.py",
    "ast_data": "ClassDef name:EnumerableShardingSpec FunctionDef name:__post_init__ arg:self arguments arg If Compare Call Raise Call Assign For If BoolOp Compare Compare Call Raise Call Call Assign Call Call FunctionDef name:build_metadata arg:self arg:tensor_sizes arg:tensor_properties arguments arg arg arg Call Return return:yes Call FunctionDef name:shard arg:self arg:tensor arg:src_rank arg:process_group arguments arg arg arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_assert_valid_concentration",
    "source_code": "def _maybe_assert_valid_concentration(self, concentration, validate_args):\n    if not validate_args:\n        return concentration\n    return control_flow_ops.with_dependencies([check_ops.assert_positive(concentration, message='Concentration parameter must be positive.')], concentration)",
    "docstring": "Checks the validity of a concentration parameter.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\beta.py",
    "ast_data": "FunctionDef name:_maybe_assert_valid_concentration arg:self arg:concentration arg:validate_args arguments arg arg arg If Return return:yes Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "fix_static_global_kernels",
    "source_code": "def fix_static_global_kernels(in_txt):\n    in_txt = in_txt.replace(' __global__ static', '__global__')\n    return in_txt",
    "docstring": "Static global kernels in HIP results in a compilation error.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\hipify\\hipify_python.py",
    "ast_data": "FunctionDef name:fix_static_global_kernels arg:in_txt arguments arg Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "__init__",
    "source_code": "@doc(storage_options=_shared_docs['storage_options'])\ndef __init__(self, filepath_or_buffer: FilePath | ReadBuffer[bytes], storage_options: StorageOptions | None=None, engine_kwargs: dict | None=None) -> None:\n    import_optional_dependency('openpyxl')\n    super().__init__(filepath_or_buffer, storage_options=storage_options, engine_kwargs=engine_kwargs)",
    "docstring": "Reader using openpyxl engine. Parameters ---------- filepath_or_buffer : str, path object or Workbook Object to be parsed. {storage_options} engine_kwargs : dict, optional Arbitrary keyword arguments passed to excel engine.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\excel\\_openpyxl.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:filepath_or_buffer arg:storage_options arg:engine_kwargs arguments arg arg arg arg Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "create_training_target",
    "source_code": "def create_training_target(self, target, run_eagerly=False):\n    if self.has_training_target():\n        raise ValueError('The training_target field for the _TrainingEndpoint instance has already been populated')\n    if run_eagerly:\n        self.training_target = _TrainingTarget(None, feedable=True, skip_target_weights=False)\n        return\n    if self.should_skip_target():\n        self.training_target = _TrainingTarget(None)\n    else:\n        if target is not None and (not backend.is_placeholder(target)):\n            feedable = False\n            skip_target_weights = True\n        else:\n            feedable = True\n            skip_target_weights = False\n        if target is None:\n            target_dtype = losses.LABEL_DTYPES_FOR_LOSSES.get(self.loss_fn, backend.dtype(self.output))\n            target = backend.placeholder(ndim=len(self.shape), name=self.output_name + '_target', sparse=backend.is_sparse(self.output), dtype=target_dtype)\n        self.training_target = _TrainingTarget(target, feedable=feedable, skip_target_weights=skip_target_weights)",
    "docstring": "Create training_target instance and update the self.training_target. Note that the input target should just be a tensor or None, and corresponding training target will be created based on the output and loss_fn. Args: target: the target tensor for the current output. Could be None. run_eagerly: boolean, whether the model is in run_eagerly mode. Raises: ValueError if the training_target field for the current instance has already been populated.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py",
    "ast_data": "FunctionDef name:create_training_target arg:self arg:target arg:run_eagerly arguments arg arg arg If Call Raise Call If Assign Call Return return:no If Call Assign Call If BoolOp Compare Call Assign Assign Assign Assign If Compare Assign Call Call Assign Call Call Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "updates",
    "source_code": "@property\ndef updates(self):\n    aggregated = []\n    for layer in self.layers:\n        if hasattr(layer, 'updates'):\n            aggregated += layer.updates\n    return aggregated",
    "docstring": "Aggregate updates from any instances.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\data_structures.py",
    "ast_data": "FunctionDef name:updates arg:self arguments arg Assign For If Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_upcast_err",
    "source_code": "def _upcast_err(err):\n    if np.iterable(err) and len(err) > 0 and isinstance(cbook._safe_first_finite(err), np.ndarray):\n        atype = type(cbook._safe_first_finite(err))\n        if atype is np.ndarray:\n            return np.asarray(err, dtype=object)\n        return atype(err)\n    return np.asarray(err, dtype=object)",
    "docstring": "Safely handle tuple of containers that carry units. This function covers the case where the input to the xerr/yerr is a length 2 tuple of equal length ndarray-subclasses that carry the unit information in the container. If we have a tuple of nested numpy array (subclasses), we defer coercing the units to be consistent to the underlying unit library (and implicitly the broadcasting). Otherwise, fallback to casting to an object array.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_axes.py",
    "ast_data": "FunctionDef name:_upcast_err arg:err arguments arg If BoolOp Call Compare Call Call Call Assign Call Call If Compare Return return:yes Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "fp16_compress_wrapper",
    "source_code": "def fp16_compress_wrapper(hook: Callable[[Any, dist.GradBucket], torch.futures.Future[torch.Tensor]]) -> Callable[[Any, dist.GradBucket], torch.futures.Future[torch.Tensor]]:\n\n    def fp16_compress_wrapper_hook(hook_state, bucket: dist.GradBucket) -> torch.futures.Future[torch.Tensor]:\n        bucket.set_buffer(bucket.buffer().to(torch.float16))\n        fut = hook(hook_state, bucket)\n\n        def decompress(fut):\n            decompressed_tensor = bucket.buffer()\n            decompressed_tensor.copy_(fut.value())\n            return decompressed_tensor\n        return fut.then(decompress)\n    return fp16_compress_wrapper_hook",
    "docstring": "Cast input tensor to ``. Example:: >>> # xdoctest: +SKIP >>> state = PowerSGDState(process_group=process_group, matrix_approximation_rank=1, start_powerSGD_iter=10) >>> ddp_model.register_comm_hook(state, fp16_compress_wrapper(powerSGD_hook))",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\ddp_comm_hooks\\default_hooks.py",
    "ast_data": "FunctionDef name:fp16_compress_wrapper arg:hook arguments arg FunctionDef name:fp16_compress_wrapper_hook arg:hook_state arg:bucket arguments arg arg Call Call Call Assign Call FunctionDef name:decompress arg:fut arguments arg Assign Call Call Call Return return:yes Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "from_pretrained",
    "source_code": "@classmethod\ndef from_pretrained(cls, embeddings: Tensor, freeze: bool=True, max_norm: Optional[float]=None, norm_type: float=2.0, scale_grad_by_freq: bool=False, mode: str='mean', sparse: bool=False, include_last_offset: bool=False, padding_idx: Optional[int]=None) -> 'EmbeddingBag':\n    assert embeddings.dim() == 2, 'Embeddings parameter is expected to be 2-dimensional'\n    rows, cols = embeddings.shape\n    embeddingbag = cls(num_embeddings=rows, embedding_dim=cols, _weight=embeddings, max_norm=max_norm, norm_type=norm_type, scale_grad_by_freq=scale_grad_by_freq, mode=mode, sparse=sparse, include_last_offset=include_last_offset, padding_idx=padding_idx)\n    embeddingbag.weight.requires_grad = not freeze\n    return embeddingbag",
    "docstring": "Create EmbeddingBag instance from given 2-dimensional FloatTensor. Args: embeddings (Tensor): FloatTensor containing weights for the EmbeddingBag. First dimension is being passed to EmbeddingBag as 'num_embeddings', second as 'embedding_dim'. freeze (bool, optional): If ``. Examples:: >>> # FloatTensor containing pretrained weights >>> weight = torch.FloatTensor([[1, 2.3, 3], [4, 5.1, 6.3]]) >>> embeddingbag = nn.EmbeddingBag.from_pretrained(weight) >>> # Get embeddings for index 1 >>> input = torch.LongTensor([[1, 0]]) >>> # xdoctest: +IGNORE_WANT(\"non-deterministic\") >>> embeddingbag(input) tensor([[ 2.5000, 3.7000, 4.6500]])",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\sparse.py",
    "ast_data": "FunctionDef name:from_pretrained arg:cls arg:embeddings arg:freeze arg:max_norm arg:norm_type arg:scale_grad_by_freq arg:mode arg:sparse arg:include_last_offset arg:padding_idx arguments arg arg arg arg arg arg arg arg arg arg Compare Call Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "filter_incompatible_and_dtype_convert_kwargs",
    "source_code": "def filter_incompatible_and_dtype_convert_kwargs(kwargs):\n    filtered = {}\n    for key, value in kwargs.items():\n        if key in {'layout', 'device', 'requires_grad', 'pin_memory', 'memory_format', 'implicit'}:\n            continue\n        if key == 'dtype':\n            if value is None:\n                continue\n            else:\n                value = int(jit_type_utils.JitScalarType.from_dtype(value).onnx_type())\n        filtered[key] = value\n    return filtered",
    "docstring": "Filter out kwargs that are not supported by onnxscript.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\fx_onnx_interpreter.py",
    "ast_data": "FunctionDef name:filter_incompatible_and_dtype_convert_kwargs arg:kwargs arguments arg Assign For Call If Compare If Compare If Compare Assign Call Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_register_group_forward_hooks",
    "source_code": "def _register_group_forward_hooks(modules: Sequence[nn.Module], pre_hook: Callable, post_hook: Callable, modules_to_run: set[nn.Module]):\n    modules_set = set(modules)\n\n    @disable_if_config_true\n    @functools.wraps(pre_hook)\n    def wrapped_pre_hook(*args: Any, **kwargs: Any):\n        if len(modules_to_run) == 0:\n            modules_to_run.update(modules_set)\n            return pre_hook(*args, **kwargs)\n\n    @disable_if_config_true\n    def get_wrapped_post_hook(module: nn.Module):\n\n        @functools.wraps(post_hook)\n        def wrapped_post_hook(*args: Any, **kwargs: Any):\n            modules_to_run.discard(module)\n            if len(modules_to_run) == 0:\n                return post_hook(*args, **kwargs)\n        return wrapped_post_hook\n    pre_handles = [module.register_forward_pre_hook(wrapped_pre_hook, prepend=True, with_kwargs=True) for module in modules]\n    post_handles = [module.register_forward_hook(get_wrapped_post_hook(module), prepend=False, always_call=True) for module in modules]\n    return _MultiHandle(tuple(pre_handles + post_handles))",
    "docstring": "Registers group forward pre and post-hooks. The pre-hook runs upon the first module pre-forward, and the post-hook runs upon the last. If at least one module does not run forward, then the post-hook does not run.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fsdp_state.py",
    "ast_data": "FunctionDef name:_register_group_forward_hooks arg:modules arg:pre_hook arg:post_hook arg:modules_to_run arguments arg arg arg arg Assign Call FunctionDef name:wrapped_pre_hook arguments arg arg If Compare Call Call Return return:yes Call Call FunctionDef name:get_wrapped_post_hook arg:module arguments arg FunctionDef name:wrapped_post_hook arguments arg arg Call If Compare Call Return return:yes Call Call Return return:yes Assign Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "y0",
    "source_code": "@property\ndef y0(self):\n    return self.get_points()[0, 1]",
    "docstring": "The first of the pair of *y* coordinates that define the bounding box. This is not guaranteed to be less than :attr: (for that, use :attr:).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:y0 arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "cpu",
    "source_code": "@deprecation.deprecated(None, 'Use tf.identity with explicit device placement instead.')\ndef cpu(self: EagerTensorType) -> EagerTensorType:\n    return self._copy(context.context(), 'CPU:0')",
    "docstring": "A copy of this Tensor with contents backed by host memory.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:cpu arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "load",
    "source_code": "@register.tag\ndef load(parser, token):\n    bits = token.contents.split()\n    if len(bits) >= 4 and bits[-2] == 'from':\n        name = bits[-1]\n        lib = find_library(parser, name)\n        subset = load_from_library(lib, name, bits[1:-2])\n        parser.add_library(subset)\n    else:\n        for name in bits[1:]:\n            lib = find_library(parser, name)\n            parser.add_library(lib)\n    return LoadNode()",
    "docstring": "Load a custom template tag library into the parser. For example, to load the template tags in ``:: {% load news.photos %} Can also be used to load an individual tag/filter from a library:: {% load byline from news %}",
    "type": "function",
    "file_path": "django\\django\\template\\defaulttags.py",
    "ast_data": "FunctionDef name:load arg:parser arg:token arguments arg arg Assign Call If BoolOp Compare Call Compare Assign Assign Call Assign Call Call For Assign Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "NotThisMethod",
    "source_code": "class NotThisMethod(Exception):\n    pass",
    "docstring": "Exception raised if a method is not valid for the current scenario.",
    "type": "class",
    "file_path": "pandas\\pandas\\_version.py",
    "ast_data": "ClassDef name:NotThisMethod"
  },
  {
    "library": "numpy",
    "name": "__str__",
    "source_code": "def __str__(self):\n    fmt = 'Machine parameters for %(dtype)s\\n---------------------------------------------------------------\\nmin = %(min)s\\nmax = %(max)s\\n---------------------------------------------------------------\\n'\n    return fmt % {'dtype': self.dtype, 'min': self.min, 'max': self.max}",
    "docstring": "String representation.",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\getlimits.py",
    "ast_data": "FunctionDef name:__str__ arg:self arguments arg Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ColorFormatter",
    "source_code": "class ColorFormatter(logging.Formatter):\n    COLORS = {'WARNING': '\\x1b[33m', 'ERROR': '\\x1b[31m', 'CRITICAL': '\\x1b[31m', 'INFO': '\\x1b[0m', 'DEBUG': '\\x1b[0m'}\n\n    def format(self, record: LogRecord) -> str:\n        log_color = self.COLORS.get(record.levelname, '\\x1b[0m')\n        record.msg = f'{log_color}{record.msg}\\x1b[0m'\n        return super().format(record)",
    "docstring": "Color codes the log messages based on the log level",
    "type": "class",
    "file_path": "pytorch\\.github\\scripts\\runner_determinator.py",
    "ast_data": "ClassDef name:ColorFormatter Assign FunctionDef name:format arg:self arg:record arguments arg arg Assign Call Assign Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "CallFunction",
    "source_code": "class CallFunction(_TargetArgsExpr):\n    op = 'call_function'",
    "docstring": "Matches a call_function node in the FX graphs:",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\pattern_matcher.py",
    "ast_data": "ClassDef name:CallFunction Assign"
  },
  {
    "library": "django",
    "name": "parse_cookie",
    "source_code": "def parse_cookie(cookie):\n    cookiedict = {}\n    for chunk in cookie.split(';'):\n        if '=' in chunk:\n            key, val = chunk.split('=', 1)\n        else:\n            key, val = ('', chunk)\n        key, val = (key.strip(), val.strip())\n        if key or val:\n            cookiedict[key] = cookies._unquote(val)\n    return cookiedict",
    "docstring": "Return a dictionary parsed from a header string.",
    "type": "function",
    "file_path": "django\\django\\http\\cookie.py",
    "ast_data": "FunctionDef name:parse_cookie arg:cookie arguments arg Assign For Call If Compare Assign Call Assign Assign Call Call If BoolOp Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "_clone",
    "source_code": "def _clone(self):\n    c = self.__class__(model=self.model, query=self.query.chain(), using=self._db, hints=self._hints)\n    c._sticky_filter = self._sticky_filter\n    c._for_write = self._for_write\n    c._prefetch_related_lookups = self._prefetch_related_lookups[:]\n    c._known_related_objects = self._known_related_objects\n    c._iterable_class = self._iterable_class\n    c._fields = self._fields\n    return c",
    "docstring": "Return a copy of the current QuerySet. A lightweight alternative to deepcopy().",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:_clone arg:self arguments arg Assign Call Call Assign Assign Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_control_input",
    "source_code": "def is_control_input(name: str) -> str:\n    return name and name[0] == '^'",
    "docstring": "Returns whether or not the input is a control input.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\save.py",
    "ast_data": "FunctionDef name:is_control_input arg:name arguments arg Return return:yes BoolOp Compare"
  },
  {
    "library": "seaborn",
    "name": "_structured_bootstrap",
    "source_code": "def _structured_bootstrap(args, n_boot, units, func, func_kwargs, integers):\n    unique_units = np.unique(units)\n    n_units = len(unique_units)\n    args = [[a[units == unit] for unit in unique_units] for a in args]\n    boot_dist = []\n    for i in range(int(n_boot)):\n        resampler = integers(0, n_units, n_units, dtype=np.intp)\n        sample = [[a[i] for i in resampler] for a in args]\n        lengths = map(len, sample[0])\n        resampler = [integers(0, n, n, dtype=np.intp) for n in lengths]\n        sample = [[c.take(r, axis=0) for c, r in zip(a, resampler)] for a in sample]\n        sample = list(map(np.concatenate, sample))\n        boot_dist.append(func(*sample, **func_kwargs))\n    return np.array(boot_dist)",
    "docstring": "Resample units instead of datapoints.",
    "type": "function",
    "file_path": "seaborn\\seaborn\\algorithms.py",
    "ast_data": "FunctionDef name:_structured_bootstrap arg:args arg:n_boot arg:units arg:func arg:func_kwargs arg:integers arguments arg arg arg arg arg arg Assign Call Assign Call Assign Compare Assign For Call Call Assign Call Assign Assign Call Assign Call Assign Call Call Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_object_for_this_type",
    "source_code": "def get_object_for_this_type(self, using=None, **kwargs):\n    return self.model_class()._base_manager.using(using).get(**kwargs)",
    "docstring": "Return an object of this type for the keyword arguments given. Basically, this is a proxy around this object_type's get_object() model method. The ObjectNotExist exception, if thrown, will not be caught, so code that calls this method should catch it.",
    "type": "method",
    "file_path": "django\\django\\contrib\\contenttypes\\models.py",
    "ast_data": "FunctionDef name:get_object_for_this_type arg:self arg:using arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_supervised_signature_def",
    "source_code": "def _supervised_signature_def(method_name, inputs, loss=None, predictions=None, metrics=None):\n    if inputs is None or not inputs:\n        raise ValueError(f'{method_name} `inputs` cannot be None or empty.')\n    signature_inputs = {key: utils.build_tensor_info(tensor) for key, tensor in inputs.items()}\n    signature_outputs = {}\n    for output_set in (loss, predictions, metrics):\n        if output_set is not None:\n            sig_out = {key: utils.build_tensor_info(tensor) for key, tensor in output_set.items()}\n            signature_outputs.update(sig_out)\n    signature_def = build_signature_def(signature_inputs, signature_outputs, method_name)\n    return signature_def",
    "docstring": "Creates a signature for training and eval data. This function produces signatures that describe the inputs and outputs of a supervised process, such as training or evaluation, that results in loss, metrics, and the like. Note that this function only requires inputs to be not None. Args: method_name: Method name of the SignatureDef as a string. inputs: dict of string to . loss: dict of string to representing computed loss. predictions: dict of string to representing the output predictions. metrics: dict of string to representing metric ops. Returns: A train- or eval-flavored signature_def. Raises: ValueError: If inputs or outputs is .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\signature_def_utils_impl.py",
    "ast_data": "FunctionDef name:_supervised_signature_def arg:method_name arg:inputs arg:loss arg:predictions arg:metrics arguments arg arg arg arg arg If BoolOp Compare Raise Call Assign Call Call Assign For If Compare Assign Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "context_safe",
    "source_code": "def context_safe():\n    return _context",
    "docstring": "Returns current context (or None if one hasn't been initialized).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:context_safe arguments Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "pprint_styles",
    "source_code": "@classmethod\ndef pprint_styles(cls):\n    table = [('Class', 'Name', 'Parameters'), *[(cls.__name__, f'``{name}``', str(inspect.signature(cls))[1:-1] or 'None') for name, cls in cls._style_list.items()]]\n    col_len = [max((len(cell) for cell in column)) for column in zip(*table)]\n    table_formatstr = '  '.join(('=' * cl for cl in col_len))\n    rst_table = '\\n'.join(['', table_formatstr, '  '.join((cell.ljust(cl) for cell, cl in zip(table[0], col_len))), table_formatstr, *['  '.join((cell.ljust(cl) for cell, cl in zip(row, col_len))) for row in table[1:]], table_formatstr])\n    return textwrap.indent(rst_table, prefix=' ' * 4)",
    "docstring": "Return the available styles as pretty-printed string.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:pprint_styles arg:cls arguments arg Assign BoolOp Call Call Call Assign Call Call Call Assign Call Assign Call Call Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "shares_memory",
    "source_code": "@array_function_from_c_func_and_dispatcher(_multiarray_umath.shares_memory)\ndef shares_memory(a, b, max_work=None):\n    return (a, b)",
    "docstring": "shares_memory(a, b, /, max_work=None) Determine if two arrays share memory. .. warning:: This function can be exponentially slow for some inputs, unless is set to zero or a positive integer. If in doubt, use instead. Parameters ---------- a, b : ndarray Input arrays max_work : int, optional Effort to spend on solving the overlap problem (maximum number of candidate solutions to consider). The following special values are recognized: max_work=-1 (default) The problem is solved exactly. In this case, the function returns True only if there is an element shared between the arrays. Finding the exact solution may take extremely long in some cases. max_work=0 Only the memory bounds of a and b are checked. This is equivalent to using `max_workmax_work` set takes around 1 minute for this case. It is possible to find problems that take still significantly longer.",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\multiarray.py",
    "ast_data": "FunctionDef name:shares_memory arg:a arg:b arg:max_work arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "identity",
    "source_code": "@classmethod\ndef identity(cls, batch_size: Optional[int]=None, device: Optional[Device]=None, dtype: Dtype=None) -> Se3:\n    t = tensor([0.0, 0.0, 0.0], device=device, dtype=dtype)\n    if batch_size is not None:\n        t = t.repeat(batch_size, 1)\n    return cls(So3.identity(batch_size, device, dtype), Vector3(t))",
    "docstring": "Create a Se3 group representing an identity rotation and zero translation. Args: batch_size: the batch size of the underlying data. device: device to place the result on. dtype: dtype of the result. Example: >>> s = Se3.identity() >>> s.r Parameter containing: tensor([1., 0., 0., 0.], requires_grad=True) >>> s.t x: 0.0 y: 0.0 z: 0.0",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\se3.py",
    "ast_data": "FunctionDef name:identity arg:cls arg:batch_size arg:device arg:dtype arguments arg arg arg arg Assign Call If Compare Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "augment_key",
    "source_code": "def augment_key(self, cache_key: str) -> AugmentedKeyT:\n    return cast(AugmentedKeyT, cache_key)",
    "docstring": "Override this method to augment cache key with backend specifics",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\common.py",
    "ast_data": "FunctionDef name:augment_key arg:self arg:cache_key arguments arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "record_unapplied",
    "source_code": "def record_unapplied(self, app, name):\n    self.ensure_schema()\n    self.migration_qs.filter(app=app, name=name).delete()",
    "docstring": "Record that a migration was unapplied.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\recorder.py",
    "ast_data": "FunctionDef name:record_unapplied arg:self arg:app arg:name arguments arg arg arg Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_pick_scalar_condition",
    "source_code": "def _pick_scalar_condition(pred, cond_true, cond_false):\n    pred_ = _static_value(pred)\n    if pred_ is None:\n        return array_ops.where_v2(pred, cond_true, cond_false)\n    return cond_true if pred_ else cond_false",
    "docstring": "Convenience function which chooses the condition based on the predicate.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\transformed_distribution.py",
    "ast_data": "FunctionDef name:_pick_scalar_condition arg:pred arg:cond_true arg:cond_false arguments arg arg arg Assign Call If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "get_amr",
    "source_code": "def get_amr(self, user) -> Optional[list[str]]:\n    return None",
    "docstring": "Authentication Methods References. Defined by :ref: as an option list of user-defined case-sensitive strings indication which authentication methods have been used to authenticate the user. Developers MAY re-implement this method:: def get_amr(self, user): return [\"2FA\"] if user.has_2fa_enabled() else []",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc9068\\token.py",
    "ast_data": "FunctionDef name:get_amr arg:self arg:user arguments arg arg Return return:no"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, iterator):\n    self._iterator = iterator",
    "docstring": "Initialize a UTF-8 stream encoder instance.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\encoding.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:iterator arguments arg arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "done",
    "source_code": "def done(self):\n    return self.closure_queue.done()",
    "docstring": "Returns true if all scheduled functions are executed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py",
    "ast_data": "FunctionDef name:done arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_getrow",
    "source_code": "def _getrow(self, i):\n    M, N = self.shape\n    i = int(i)\n    if i < -M or i >= M:\n        raise IndexError(f'index ({i}) out of range')\n    if i < 0:\n        i += M\n    return self._get_intXslice(i, slice(None))",
    "docstring": "Return a copy of row i of the matrix, as a (1 x n) row vector.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_index.py",
    "ast_data": "FunctionDef name:_getrow arg:self arg:i arguments arg arg Assign Assign Call If BoolOp Compare Compare Raise Call If Compare Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "visit_Import",
    "source_code": "def visit_Import(self, node: ast.Import) -> None:\n    for name in node.names:\n        self.add_entry(name.asname or name.name)\n        if name.name in {'typing', 'typing_extensions'}:\n            self.typing_mods.add(name.asname or name.name)\n        elif name.name in {'typing.final', 'typing_extensions.final'}:\n            self.typing_final_names.add(name.asname or name.name)\n        elif name.name in {'typing.overload', 'typing_extensions.overload'}:\n            self.typing_overload_names.add(name.asname or name.name)",
    "docstring": "Handles Import node and record the order of definitions.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\pycode\\parser.py",
    "ast_data": "FunctionDef name:visit_Import arg:self arg:node arguments arg arg For Call BoolOp If Compare Call BoolOp If Compare Call BoolOp If Compare Call BoolOp"
  },
  {
    "library": "tensorflow",
    "name": "dropout_v1",
    "source_code": "@dispatch.dispatch_for_api(nn_ops.dropout)\ndef dropout_v1(x: ragged_tensor.Ragged, keep_prob=None, noise_shape=None, seed=None, name=None, rate=None):\n    if noise_shape is not None:\n        raise ValueError('noise_shape is not supported yet for RaggedTensor x')\n    with ops.name_scope(name, 'RaggedNNDropout', [x, rate]):\n        x = ragged_tensor.convert_to_tensor_or_ragged_tensor(x, name='x')\n        return x.with_flat_values(nn_ops.dropout(x.flat_values, keep_prob=keep_prob, seed=seed, rate=rate))",
    "docstring": "Ragged dispatch target for tf.nn.dropout.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_math_ops.py",
    "ast_data": "FunctionDef name:dropout_v1 arg:x arg:keep_prob arg:noise_shape arg:seed arg:name arg:rate arguments arg arg arg arg arg arg If Compare Raise Call With Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "draw_idle",
    "source_code": "def draw_idle(self, *args, **kwargs):\n    if not self._is_idle_drawing:\n        with self._idle_draw_cntx():\n            self.draw(*args, **kwargs)",
    "docstring": "Request a widget redraw once control returns to the GUI event loop. Even if multiple calls to occur before control returns to the GUI event loop, the figure will only be rendered once. Notes ----- Backends may choose to override the method and implement their own strategy to prevent multiple renderings.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:draw_idle arg:self arguments arg arg arg If With Call Call"
  },
  {
    "library": "pytorch",
    "name": "CudagraphMetadata",
    "source_code": "@dataclasses.dataclass(frozen=True)\nclass CudagraphMetadata:\n    placeholders: Sequence[PlaceholderInfo]\n    static_input_idxs: OrderedSet[int]\n    mutated_input_idxs: OrderedSet[int]\n    stack_traces: list[Optional[str]]\n    constants: dict[str, torch.Tensor]",
    "docstring": "Metadata for recording a CUDA graph.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\cudagraph_utils.py",
    "ast_data": "ClassDef name:CudagraphMetadata Call"
  },
  {
    "library": "pytorch",
    "name": "_NodeDesc",
    "source_code": "@dataclass(eq=True, order=True, frozen=True)\nclass _NodeDesc:\n    addr: str\n    pid: int\n    local_id: int\n\n    def __repr__(self) -> str:\n        return f'{self.addr}_{self.pid}_{self.local_id}'",
    "docstring": "Describe a node in the rendezvous. Attributes: addr: The FQDN of the node or user specified local node address. pid: The id of the process in which the rendezvous handler runs. local_id: A process-wide unique id.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\dynamic_rendezvous.py",
    "ast_data": "ClassDef name:_NodeDesc FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "__repr__",
    "source_code": "@final\ndef __repr__(self) -> str_t:\n    klass_name = type(self).__name__\n    data = self._format_data()\n    attrs = self._format_attrs()\n    attrs_str = [f'{k}={v}' for k, v in attrs]\n    prepr = ', '.join(attrs_str)\n    return f'{klass_name}({data}{prepr})'",
    "docstring": "Return a string representation for this object.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Assign Call Assign Call Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "randn",
    "source_code": "@tf_export.tf_export('experimental.numpy.random.randn', v1=[])\n@np_utils.np_doc('random.randn')\ndef randn(*args):\n    return standard_normal(size=args)",
    "docstring": "Returns samples from a normal distribution. Uses . Args: *args: The shape of the output array. Returns: An ndarray with shape and dtype .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_random.py",
    "ast_data": "FunctionDef name:randn arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "score",
    "source_code": "def score(self, X, y):\n    check_is_fitted(self)\n    n_outputs_ = len(self.estimators_)\n    if y.ndim == 1:\n        raise ValueError('y must have at least two dimensions for multi target classification but has only one')\n    if y.shape[1] != n_outputs_:\n        raise ValueError('The number of outputs of Y for fit {0} and score {1} should be same'.format(n_outputs_, y.shape[1]))\n    y_pred = self.predict(X)\n    return np.mean(np.all(y == y_pred, axis=1))",
    "docstring": "Return the mean accuracy on the given test data and labels. Parameters ---------- X : array-like of shape (n_samples, n_features) Test samples. y : array-like of shape (n_samples, n_outputs) True values for X. Returns ------- scores : float Mean accuracy of predicted target versus true target.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\multioutput.py",
    "ast_data": "FunctionDef name:score arg:self arg:X arg:y arguments arg arg arg Call Assign Call If Compare Raise Call If Compare Raise Call Call Assign Call Return return:yes Call Call Compare"
  },
  {
    "library": "scrapy",
    "name": "global_object_name",
    "source_code": "def global_object_name(obj: Any) -> str:\n    return f'{obj.__module__}.{obj.__qualname__}'",
    "docstring": "Return the full import path of the given object. >>> from scrapy import Request >>> global_object_name(Request) 'scrapy.http.request.Request' >>> global_object_name(Request.replace) 'scrapy.http.request.Request.replace'",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\python.py",
    "ast_data": "FunctionDef name:global_object_name arg:obj arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_prune_array",
    "source_code": "def _prune_array(array):\n    if array.base is not None and array.size < array.base.size // 2:\n        return array.copy()\n    return array",
    "docstring": "Return an array equivalent to the input array. If the input array is a view of a much larger array, copy its contents to a newly allocated array. Otherwise, return the input unchanged.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_util.py",
    "ast_data": "FunctionDef name:_prune_array arg:array arguments arg If BoolOp Compare Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "django",
    "name": "wkt",
    "source_code": "@property\ndef wkt(self):\n    to_wkt = capi.to_iso_wkt if self.is_measured else capi.to_wkt\n    return to_wkt(self.ptr, byref(c_char_p()))",
    "docstring": "Return the WKT representation of the Geometry.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:wkt arg:self arguments arg Assign Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "match_affine_block_expr",
    "source_code": "@classmethod\ndef match_affine_block_expr(cls, index: Expr, index_var: Symbol) -> Optional[Expr]:\n    index = cls._preprocess(index)\n    stride = sympy.Wild('stride', exclude=[index_var])\n    m = index.match(index_var * stride)\n    if m is None:\n        return None\n    return m[stride]",
    "docstring": "Matches simple expressions of the form stride * index, returning the stride.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\block_analysis.py",
    "ast_data": "FunctionDef name:match_affine_block_expr arg:cls arg:index arg:index_var arguments arg arg arg Assign Call Assign Call Assign Call If Compare Return return:no Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "XLAControlFlowContext",
    "source_code": "class XLAControlFlowContext(ControlFlowContext):\n\n    def __init__(self):\n        super(XLAControlFlowContext, self).__init__()\n        self._name = 'XLAControlFlowContext'\n\n    def to_control_flow_context_def(self, context_def, export_scope=None):\n        super(XLAControlFlowContext, self).to_control_flow_context_def(context_def, export_scope)\n\n    def IsXLAContext(self):\n        return True\n\n    def AddOp(self, _):\n        pass\n\n    def AddValue(self, x):\n        return x\n\n    def RequiresUniqueFunctionRetracing(self):\n        return False",
    "docstring": "Base class for XLA and TPU control flow contexts.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "ClassDef name:XLAControlFlowContext FunctionDef name:__init__ arg:self arguments arg Call Call Assign FunctionDef name:to_control_flow_context_def arg:self arg:context_def arg:export_scope arguments arg arg arg Call Call FunctionDef name:IsXLAContext arg:self arguments arg Return return:yes FunctionDef name:AddOp arg:self arg:_ arguments arg arg FunctionDef name:AddValue arg:self arg:x arguments arg arg Return return:yes FunctionDef name:RequiresUniqueFunctionRetracing arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_quantize_opcode_idx",
    "source_code": "def get_quantize_opcode_idx(model):\n    quant_opcode_idxs = []\n    for idx, opcode in enumerate(model.operatorCodes):\n        builtin_code = schema_util.get_builtin_code_from_operator_code(opcode)\n        if builtin_code == schema_fb.BuiltinOperator.QUANTIZE:\n            quant_opcode_idxs.append(idx)\n    return quant_opcode_idxs",
    "docstring": "Returns the quantize op idx.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\util.py",
    "ast_data": "FunctionDef name:get_quantize_opcode_idx arg:model arguments arg Assign For Call Assign Call If Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "inner_recompute_grad",
    "source_code": "@custom_gradient\ndef inner_recompute_grad(*dresult):\n    with backprop.GradientTape() as t:\n        id_args = nest.map_structure(gen_array_ops.identity, args)\n        assert len(dresult) >= 1\n        if not context.executing_eagerly():\n            elem = math_ops.reduce_max(array_ops.reshape(dresult[0], [-1])[:1])\n            elem_bool = math_ops.cast(elem, dtypes.bool)\n            dresult_dep = array_ops.where_v2(elem_bool == elem_bool, 0.0, float('nan'))\n            id_args = nest.map_structure(lambda x: x + math_ops.cast(dresult_dep, x.dtype), id_args)\n        t.watch(id_args)\n        if variables is not None:\n            t.watch(variables)\n        with variable_scope.variable_scope(current_var_scope):\n            recomputed_result = f(*id_args, **kwargs)\n    kw_vars = []\n    if variables is not None:\n        kw_vars = list(variables)\n    grads = t.gradient(recomputed_result, list(id_args) + kw_vars, output_gradients=dresult, unconnected_gradients=UnconnectedGradients.ZERO)\n\n    def transpose(*t_args, **t_kwargs):\n        raise NotImplementedError('recompute_grad tried to transpose grad of {}. Consider not using recompute_grad in forward modeautodiff'.format(f.__name__))\n    return ((grads[:len(id_args)], grads[len(id_args):]), transpose)",
    "docstring": "Nested custom gradient function for computing grads in reverse and forward mode autodiff.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\custom_gradient.py",
    "ast_data": "FunctionDef name:inner_recompute_grad arguments arg With Call Assign Call Compare Call If Call Assign Call Call Assign Call Assign Call Compare Call Assign Call arguments arg Call Call If Compare Call With Call Assign Call Assign If Compare Assign Call Assign Call Call FunctionDef name:transpose arguments arg arg Raise Call Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "xp_result_device",
    "source_code": "def xp_result_device(*args):\n    for arg in args:\n        if is_array_api_obj(arg):\n            return xp_device(arg)\n    return None",
    "docstring": "Return the device of an array in , for the purpose of input-output device propagation. If there are multiple devices, return an arbitrary one. If there are no arrays, return None (this typically happens only on NumPy).",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_array_api.py",
    "ast_data": "FunctionDef name:xp_result_device arguments arg For If Call Return return:yes Call Return return:no"
  },
  {
    "library": "matplotlib",
    "name": "ColorLayout",
    "source_code": "class ColorLayout(QtWidgets.QHBoxLayout):\n\n    def __init__(self, color, parent=None):\n        super().__init__()\n        assert isinstance(color, QtGui.QColor)\n        self.lineedit = QtWidgets.QLineEdit(mcolors.to_hex(color.getRgbF(), keep_alpha=True), parent)\n        self.lineedit.editingFinished.connect(self.update_color)\n        self.addWidget(self.lineedit)\n        self.colorbtn = ColorButton(parent)\n        self.colorbtn.color = color\n        self.colorbtn.colorChanged.connect(self.update_text)\n        self.addWidget(self.colorbtn)\n\n    def update_color(self):\n        color = self.text()\n        qcolor = to_qcolor(color)\n        self.colorbtn.color = qcolor\n\n    def update_text(self, color):\n        self.lineedit.setText(mcolors.to_hex(color.getRgbF(), keep_alpha=True))\n\n    def text(self):\n        return self.lineedit.text()",
    "docstring": "Color-specialized QLineEdit layout",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\qt_editor\\_formlayout.py",
    "ast_data": "ClassDef name:ColorLayout FunctionDef name:__init__ arg:self arg:color arg:parent arguments arg arg arg Call Call Call Assign Call Call Call Call Call Assign Call Assign Call Call FunctionDef name:update_color arg:self arguments arg Assign Call Assign Call Assign FunctionDef name:update_text arg:self arg:color arguments arg arg Call Call Call FunctionDef name:text arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "hermevander",
    "source_code": "def hermevander(x, deg):\n    ideg = pu._as_int(deg, 'deg')\n    if ideg < 0:\n        raise ValueError('deg must be non-negative')\n    x = np.array(x, copy=None, ndmin=1) + 0.0\n    dims = (ideg + 1,) + x.shape\n    dtyp = x.dtype\n    v = np.empty(dims, dtype=dtyp)\n    v[0] = x * 0 + 1\n    if ideg > 0:\n        v[1] = x\n        for i in range(2, ideg + 1):\n            v[i] = v[i - 1] * x - v[i - 2] * (i - 1)\n    return np.moveaxis(v, 0, -1)",
    "docstring": "Pseudo-Vandermonde matrix of given degree. Returns the pseudo-Vandermonde matrix of degree and sample points . The pseudo-Vandermonde matrix is defined by .. math:: V[..., i] = He_i(x), where ``0 >> import numpy as np >>> from numpy.polynomial.hermite_e import hermevander >>> x = np.array([-1, 0, 1]) >>> hermevander(x, 3) array([[ 1., -1., 0., 2.], [ 1., 0., -1., -0.], [ 1., 1., 0., -2.]])",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\hermite_e.py",
    "ast_data": "FunctionDef name:hermevander arg:x arg:deg arguments arg arg Assign Call If Compare Raise Call Assign Call Assign Assign Assign Call Assign If Compare Assign For Call Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_backgroundcolor",
    "source_code": "def set_backgroundcolor(self, color):\n    if self._bbox_patch is None:\n        self.set_bbox(dict(facecolor=color, edgecolor=color))\n    else:\n        self._bbox_patch.update(dict(facecolor=color))\n    self._update_clip_properties()\n    self.stale = True",
    "docstring": "Set the background color of the text. This is realized through the bbox (see ), creating the bbox patch if needed. Parameters ---------- color : :mpltype: See Also -------- .set_bbox : To change the position of the bounding box",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:set_backgroundcolor arg:self arg:color arguments arg arg If Compare Call Call Call Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "is_categorical_column_weighted",
    "source_code": "def is_categorical_column_weighted(self):\n    raise NotImplementedError('not impl')",
    "docstring": "Check if the categorical column of the embedding column is weighted.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\feature_column.py",
    "ast_data": "FunctionDef name:is_categorical_column_weighted arg:self arguments arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "extend_acc_subgraph",
    "source_code": "def extend_acc_subgraph(self, tag: str):\n    deps = self.find_reverse_deps(tag_id=int(tag.split('_')[-1]))\n    self.update_reverse_deps_for_fusions(deps)\n    parent_nodes = self.find_parent_nodes_of_subgraph(tag)\n    visited_nodes: NodeSet = set()\n    while parent_nodes:\n        node = None\n        for n in parent_nodes:\n            if deps[n] <= visited_nodes and n in self.acc_nodes:\n                node = n\n                break\n        if node is None:\n            break\n        node.tag = tag\n        parent_nodes.remove(node)\n        visited_nodes.add(node)\n        if node in self.fusions:\n            for fusion_node in self.fusions[node]:\n                if fusion_node not in visited_nodes:\n                    parent_nodes.add(fusion_node)\n        for arg in node.all_input_nodes:\n            if arg.op in CALLABLE_NODE_OPS and arg not in visited_nodes:\n                parent_nodes.add(arg)",
    "docstring": "Extend the acc subgraph with going the reversed topological direction.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\passes\\splitter_base.py",
    "ast_data": "FunctionDef name:extend_acc_subgraph arg:self arg:tag arguments arg arg Assign Call Call Call Call Assign Call Call While Assign For If BoolOp Compare Compare Assign If Compare Assign Call Call If Compare For If Compare Call For If BoolOp Compare Compare Call"
  },
  {
    "library": "pandas",
    "name": "get_console_size",
    "source_code": "def get_console_size() -> tuple[int | None, int | None]:\n    from pandas import get_option\n    display_width = get_option('display.width')\n    display_height = get_option('display.max_rows')\n    if in_interactive_session():\n        if in_ipython_frontend():\n            from pandas._config.config import get_default_val\n            terminal_width = get_default_val('display.width')\n            terminal_height = get_default_val('display.max_rows')\n        else:\n            terminal_width, terminal_height = get_terminal_size()\n    else:\n        terminal_width, terminal_height = (None, None)\n    return (display_width or terminal_width, display_height or terminal_height)",
    "docstring": "Return console size as tuple = (width, height). Returns (None,None) in non-interactive session.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\formats\\console.py",
    "ast_data": "FunctionDef name:get_console_size arguments Assign Call Assign Call If Call If Call Assign Call Assign Call Assign Call Assign Return return:yes BoolOp BoolOp"
  },
  {
    "library": "django",
    "name": "_check_if_value_fixed",
    "source_code": "def _check_if_value_fixed(self, value, now=None):\n    if now is None:\n        now = _get_naive_now()\n    offset = datetime.timedelta(seconds=10)\n    lower = now - offset\n    upper = now + offset\n    if isinstance(value, datetime.datetime):\n        value = _to_naive(value)\n    else:\n        assert isinstance(value, datetime.date)\n        lower = lower.date()\n        upper = upper.date()\n    if lower <= value <= upper:\n        return [checks.Warning('Fixed default value provided.', hint='It seems you set a fixed date / time / datetime value as default for this field. This may not be what you want. If you want to have the current date as default, use `django.utils.timezone.now`', obj=self, id='fields.W161')]\n    return []",
    "docstring": "Check if the given value appears to have been provided as a \"fixed\" time value, and include a warning in the returned list if it does. The value argument must be a date object or aware/naive datetime object. If now is provided, it must be a naive datetime object.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\__init__.py",
    "ast_data": "FunctionDef name:_check_if_value_fixed arg:self arg:value arg:now arguments arg arg arg If Compare Assign Call Assign Call Assign Assign If Call Assign Call Call Assign Call Assign Call If Compare Return return:yes Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, dump_root, tfdbg_run_id, circular_buffer_size=DEFAULT_CIRCULAR_BUFFER_SIZE):\n    if not dump_root:\n        raise ValueError('Empty or None dump root')\n    self._dump_root = dump_root\n    self._tfdbg_run_id = tfdbg_run_id\n    _pywrap_debug_events_writer.Init(self._dump_root, self._tfdbg_run_id, circular_buffer_size)",
    "docstring": "Construct a DebugEventsWriter object. NOTE: Given the same , all objects from this constructor will point to the same underlying set of writers. In other words, they will write to the same set of debug events files in the folder. Args: dump_root: The root directory for dumping debug data. If does not exist as a directory, it will be created. tfdbg_run_id: Debugger Run ID. circular_buffer_size: Size of the circular buffer for each of the two execution-related debug events files: with the following suffixes: - .execution - .graph_execution_traces If <= 0, the circular-buffer behavior will be abolished in the constructed object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_writer.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:dump_root arg:tfdbg_run_id arg:circular_buffer_size arguments arg arg arg arg If Raise Call Assign Assign Call"
  },
  {
    "library": "django",
    "name": "_get_no_autofield_sequence_name",
    "source_code": "def _get_no_autofield_sequence_name(self, table):\n    name_length = self.max_name_length() - 3\n    return '%s_SQ' % truncate_name(strip_quotes(table), name_length).upper()",
    "docstring": "Manually created sequence name to keep backward compatibility for AutoFields that aren't Oracle identity columns.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\oracle\\operations.py",
    "ast_data": "FunctionDef name:_get_no_autofield_sequence_name arg:self arg:table arguments arg arg Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "thunkify",
    "source_code": "def thunkify(tracer: _ProxyTracer, f: Callable[_P, R], *args: _P.args, **kwargs: _P.kwargs) -> Thunk[R]:\n    if tracer.enable_thunkify:\n        return Thunk(functools.partial(f, *args, **kwargs))\n    else:\n        r = f(*args, **kwargs)\n        return Thunk(lambda: r)",
    "docstring": "Delays computation of f until it's called again Also caches the result",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\proxy_tensor.py",
    "ast_data": "FunctionDef name:thunkify arg:tracer arg:f arguments arg arg arg arg If Return return:yes Call Call Assign Call Return return:yes Call arguments"
  },
  {
    "library": "pytorch",
    "name": "chunk_dtensor",
    "source_code": "@abstractmethod\ndef chunk_dtensor(self, tensor: torch.Tensor, rank: int, device_mesh: DeviceMesh) -> torch.Tensor:\n    ...",
    "docstring": "Shards a tensor/DTensor to DTensor and returns the local DTensor.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_fsdp_extensions.py",
    "ast_data": "FunctionDef name:chunk_dtensor arg:self arg:tensor arg:rank arg:device_mesh arguments arg arg arg arg"
  },
  {
    "library": "pandas",
    "name": "fullmatch",
    "source_code": "@forbid_nonstring_types(['bytes'])\ndef fullmatch(self, pat, case: bool=True, flags: int=0, na=lib.no_default):\n    result = self._data.array._str_fullmatch(pat, case=case, flags=flags, na=na)\n    return self._wrap_result(result, fill_value=na, returns_string=False)",
    "docstring": "Determine if each string entirely matches a regular expression. Checks if each string in the Series or Index fully matches the specified regular expression pattern. This function is useful when the requirement is for an entire string to conform to a pattern, such as validating formats like phone numbers or email addresses. Parameters ---------- pat : str Character sequence or regular expression. case : bool, default True If True, case sensitive. flags : int, default 0 (no flags) Regex module flags, e.g. re.IGNORECASE. na : scalar, optional Fill value for missing values. The default depends on dtype of the array. For object-dtype, `True` when only a *prefix* of the string matches the regular expression. extract : Extract matched groups. Examples -------- >>> ser = pd.Series([\"cat\", \"duck\", \"dove\"]) >>> ser.str.fullmatch(r\"d.+\") 0 False 1 True 2 True dtype: bool",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\strings\\accessor.py",
    "ast_data": "FunctionDef name:fullmatch arg:self arg:pat arg:case arg:flags arg:na arguments arg arg arg arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "flatten",
    "source_code": "@doc_controls.do_not_doc_inheritable\ndef flatten(self):\n    flat = []\n    nest.map_structure(lambda spec: flat.extend(spec.flatten()), self._component_specs)\n    return flat",
    "docstring": "See TraceType base class for details. Do not override.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py",
    "ast_data": "FunctionDef name:flatten arg:self arguments arg Assign Call arguments arg Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_RegularizedGramianCholesky",
    "source_code": "def _RegularizedGramianCholesky(matrix, l2_regularizer, first_kind):\n    gramian = math_ops.matmul(matrix, matrix, adjoint_a=first_kind, adjoint_b=not first_kind)\n    if isinstance(l2_regularizer, tensor_lib.Tensor) or l2_regularizer != 0:\n        matrix_shape = array_ops.shape(matrix)\n        batch_shape = matrix_shape[:-2]\n        if first_kind:\n            small_dim = matrix_shape[-1]\n        else:\n            small_dim = matrix_shape[-2]\n        identity = eye(small_dim, batch_shape=batch_shape, dtype=matrix.dtype)\n        small_dim_static = matrix.shape[-1 if first_kind else -2]\n        identity.set_shape(matrix.shape[:-2].concatenate([small_dim_static, small_dim_static]))\n        gramian += l2_regularizer * identity\n    return gen_linalg_ops.cholesky(gramian)",
    "docstring": "Computes Cholesky factorization of regularized gramian matrix. Below we will use the following notation for each pair of matrix and right-hand sides in the batch: =\\\\(A \\in \\Re^{m \\times n}\\\\), =\\\\(C \\in \\Re^{\\min(m, n) \\times \\min(m,n)}\\\\), =\\\\(\\lambda\\\\). If is True, returns the Cholesky factorization \\\\(L\\\\) such that \\\\(L L^H = A^H A + \\lambda I\\\\). If is False, returns the Cholesky factorization \\\\(L\\\\) such that \\\\(L L^H = A A^H + \\lambda I\\\\). Args: matrix: of shape . l2_regularizer: 0-D . Ignored if . first_kind: bool. Controls what gramian matrix to factor. Returns: output: of shape whose inner-most 2 dimensions contain the Cholesky factors \\\\(L\\\\) described above.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg_ops.py",
    "ast_data": "FunctionDef name:_RegularizedGramianCholesky arg:matrix arg:l2_regularizer arg:first_kind arguments arg arg arg Assign Call If BoolOp Call Compare Assign Call Assign If Assign Assign Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "normalize_pixel_coordinates3d",
    "source_code": "def normalize_pixel_coordinates3d(pixel_coordinates: Tensor, depth: int, height: int, width: int, eps: float=1e-08) -> Tensor:\n    if pixel_coordinates.shape[-1] != 3:\n        raise ValueError(f'Input pixel_coordinates must be of shape (*, 3). Got {pixel_coordinates.shape}')\n    dhw: Tensor = stack([tensor(depth), tensor(width), tensor(height)]).to(pixel_coordinates.device).to(pixel_coordinates.dtype)\n    factor: Tensor = tensor(2.0) / (dhw - 1).clamp(eps)\n    return factor * pixel_coordinates - 1",
    "docstring": "Normalize pixel coordinates between -1 and 1. Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates: the grid with pixel coordinates. Shape can be :math:. depth: the maximum depth in the z-axis. height: the maximum height in the y-axis. width: the maximum width in the x-axis. eps: safe division by zero. Return: the normalized pixel coordinates.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\conversions.py",
    "ast_data": "FunctionDef name:normalize_pixel_coordinates3d arg:pixel_coordinates arg:depth arg:height arg:width arg:eps arguments arg arg arg arg arg If Compare Raise Call Call Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "def fit(self, X, y=None, sample_weight=None):\n    self._reset()\n    return self.partial_fit(X, y, sample_weight)",
    "docstring": "Compute the mean and std to be used for later scaling. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data used to compute the mean and standard deviation used for later scaling along the features axis. y : None Ignored. sample_weight : array-like of shape (n_samples,), default=None Individual weights for each sample. .. versionadded:: 0.24 parameter *sample_weight* support to StandardScaler. Returns ------- self : object Fitted scaler.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_slice_or_none",
    "source_code": "def _slice_or_none(in_v, slc):\n    if in_v is None:\n        return None\n    return in_v[slc]",
    "docstring": "Helper function to cope with being an ndarray or .",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:_slice_or_none arg:in_v arg:slc arguments arg arg If Compare Return return:no Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_rmatvec",
    "source_code": "def _rmatvec(self, x):\n    if type(self)._adjoint == LinearOperator._adjoint:\n        if hasattr(self, '_rmatmat') and type(self)._rmatmat != LinearOperator._rmatmat:\n            return self._rmatmat(x.reshape(-1, 1)).reshape(-1)\n        raise NotImplementedError\n    else:\n        return self.H.matvec(x)",
    "docstring": "Default implementation of _rmatvec; defers to adjoint.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_interface.py",
    "ast_data": "FunctionDef name:_rmatvec arg:self arg:x arguments arg arg If Compare Call If BoolOp Call Compare Call Return return:yes Call Call Call Raise Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "is_unsafe_leaf",
    "source_code": "def is_unsafe_leaf(self, row, predicted_config, choice2time):\n    return False",
    "docstring": "Can be overridden by subclasses to define their own logic for deciding when a leaf is unsafe. Returns a sample that landed in the leaf, the choice predicted by the tree, and a dictionary that maps each choice to the execution time. One can for example decide to mark a leaf as unsafe if the predicted choice is 2x slower than the fastest choice. If a leaf is unsafe, the learned heuristic will always return 'unsure' if an input lands in that leaf.",
    "type": "method",
    "file_path": "pytorch\\torchgen\\_autoheuristic\\train_decision.py",
    "ast_data": "FunctionDef name:is_unsafe_leaf arg:self arg:row arg:predicted_config arg:choice2time arguments arg arg arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "LazyConvTranspose3d",
    "source_code": "class LazyConvTranspose3d(_LazyConvXdMixin, ConvTranspose3d):\n    cls_to_become = ConvTranspose3d\n\n    def __init__(self, out_channels: int, kernel_size: _size_3_t, stride: _size_3_t=1, padding: _size_3_t=0, output_padding: _size_3_t=0, groups: int=1, bias: bool=True, dilation: _size_3_t=1, padding_mode: str='zeros', device=None, dtype=None) -> None:\n        factory_kwargs = {'device': device, 'dtype': dtype}\n        super().__init__(0, 0, kernel_size, stride, padding, output_padding, groups, False, dilation, padding_mode, **factory_kwargs)\n        self.weight = UninitializedParameter(**factory_kwargs)\n        self.out_channels = out_channels\n        if bias:\n            self.bias = UninitializedParameter(**factory_kwargs)\n\n    def _get_num_spatial_dims(self) -> int:\n        return 3",
    "docstring": "A :class: module with lazy initialization of the `ConvTranspose3dweightbiastorch.nn.modules.lazy.LazyModuleMixintorch.nn.ConvTranspose3dtorch.nn.modules.lazy.LazyModuleMixin`",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\conv.py",
    "ast_data": "ClassDef name:LazyConvTranspose3d Assign FunctionDef name:__init__ arg:self arg:out_channels arg:kernel_size arg:stride arg:padding arg:output_padding arg:groups arg:bias arg:dilation arg:padding_mode arg:device arg:dtype arguments arg arg arg arg arg arg arg arg arg arg arg arg Assign Call Call Assign Call Assign If Assign Call FunctionDef name:_get_num_spatial_dims arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "KerasModeKeys",
    "source_code": "class KerasModeKeys:\n    TRAIN = 'train'\n    TEST = 'test'\n    PREDICT = 'predict'",
    "docstring": "Standard names for model modes. The following standard keys are defined: * : training/fitting mode. * : testing/evaluation mode. * : prediction/inference mode.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\utils_v1\\mode_keys.py",
    "ast_data": "ClassDef name:KerasModeKeys Assign Assign Assign"
  },
  {
    "library": "numpy",
    "name": "LinAlgError",
    "source_code": "@set_module('numpy.linalg')\nclass LinAlgError(ValueError):\n    pass",
    "docstring": "Generic Python-exception-derived object raised by linalg functions. General purpose exception class, derived from Python's ValueError class, programmatically raised in linalg functions when a Linear Algebra-related condition would prevent further correct execution of the function. Parameters ---------- None Examples -------- >>> from numpy import linalg as LA >>> LA.inv(np.zeros((2,2))) Traceback (most recent call last): File \"\", line 1, in File \"...linalg.py\", line 350, in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype))) File \"...linalg.py\", line 249, in solve raise LinAlgError('Singular matrix') numpy.linalg.LinAlgError: Singular matrix",
    "type": "class",
    "file_path": "numpy\\numpy\\linalg\\_linalg.py",
    "ast_data": "ClassDef name:LinAlgError Call"
  },
  {
    "library": "django",
    "name": "get_curve_geometry",
    "source_code": "def get_curve_geometry(self):\n    return OGRGeometry(capi.get_curve_geom(self.ptr, None))",
    "docstring": "Return a curve version of this geometry.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:get_curve_geometry arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "gen_fake_args",
    "source_code": "def gen_fake_args(self) -> ArgsType:\n    return tree_map_only(DTensorSpec, _rebuild_tensor_from_dtensor_meta, self.args_schema, is_leaf=lambda x: isinstance(x, DTensorSpec))",
    "docstring": "gen_fake_args: generate fake args for the operator, this is mainly used by sharding propagation rules to generate fake args for the operator to run the local tensor operator and get the output spec.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_op_schema.py",
    "ast_data": "FunctionDef name:gen_fake_args arg:self arguments arg Return return:yes Call arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_use_tensor_buffer",
    "source_code": "def _use_tensor_buffer(self):\n    return self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_FULL_TENSOR_SUMMARY",
    "docstring": "Returns true if the whole tensor needs to be cached/buffered in memory.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:_use_tensor_buffer arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "numpy",
    "name": "has_samecoef",
    "source_code": "def has_samecoef(self, other):\n    return len(self.coef) == len(other.coef) and np.all(self.coef == other.coef)",
    "docstring": "Check if coefficients match. Parameters ---------- other : class instance The other class must have the `` attribute. Returns ------- bool : boolean True if the coefficients are the same, False otherwise.",
    "type": "method",
    "file_path": "numpy\\numpy\\polynomial\\_polybase.py",
    "ast_data": "FunctionDef name:has_samecoef arg:self arg:other arguments arg arg Return return:yes BoolOp Compare Call Call Call Compare"
  },
  {
    "library": "pytorch",
    "name": "from_float",
    "source_code": "@classmethod\ndef from_float(cls, mod, use_precomputed_fake_quant=False):\n    assert type(mod) == cls._FLOAT_MODULE, 'qat.' + cls.__name__ + '.from_float only works for ' + cls._FLOAT_MODULE.__name__\n    assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined'\n    assert mod.qconfig, 'Input float module must have a valid qconfig'\n    qconfig = mod.qconfig\n    conv, bn = (mod[0], mod[1])\n    qat_convbn = cls(conv.in_channels, conv.out_channels, conv.kernel_size, conv.stride, conv.padding, conv.dilation, conv.groups, conv.bias is not None, conv.padding_mode, bn.eps, bn.momentum, False, qconfig)\n    qat_convbn.weight = conv.weight\n    qat_convbn.bias = conv.bias\n    qat_convbn.bn.weight = bn.weight\n    qat_convbn.bn.bias = bn.bias\n    qat_convbn.bn.running_mean = bn.running_mean\n    qat_convbn.bn.running_var = bn.running_var\n    qat_convbn.bn.num_batches_tracked = bn.num_batches_tracked\n    return qat_convbn",
    "docstring": "Create a qat module from a float module or qparams_dict Args: a float module, either produced by torch.ao.quantization utilities or directly from user",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\nn\\intrinsic\\qat\\modules\\conv_fused.py",
    "ast_data": "FunctionDef name:from_float arg:cls arg:mod arg:use_precomputed_fake_quant arguments arg arg arg Compare Call Call Assign Assign Assign Call Compare Assign Assign Assign Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "logpdf",
    "source_code": "def logpdf(self, x, df, scale):\n    dim, df, scale = self._process_parameters(df, scale)\n    x = self._process_quantiles(x, dim)\n    C, log_det_scale = self._cholesky_logdet(scale)\n    out = self._logpdf(x, dim, df, scale, log_det_scale, C)\n    return _squeeze_output(out)",
    "docstring": "Log of the Wishart probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of denoting the components. Each quantile must be a symmetric positive definite matrix. %(_doc_default_callparams)s Returns ------- pdf : ndarray Log of the probability density function evaluated at Notes ----- %(_doc_callparams_note)s",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:logpdf arg:self arg:x arg:df arg:scale arguments arg arg arg arg Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "DefaultReferrerPolicy",
    "source_code": "class DefaultReferrerPolicy(NoReferrerWhenDowngradePolicy):\n    NOREFERRER_SCHEMES: tuple[str, ...] = (*LOCAL_SCHEMES, 'file', 's3')\n    name: str = POLICY_SCRAPY_DEFAULT",
    "docstring": "A variant of \"no-referrer-when-downgrade\", with the addition that \"Referer\" is not sent if the parent request was using `` scheme.",
    "type": "class",
    "file_path": "scrapy\\scrapy\\spidermiddlewares\\referer.py",
    "ast_data": "ClassDef name:DefaultReferrerPolicy"
  },
  {
    "library": "pandas",
    "name": "_SeriesTableBuilder",
    "source_code": "class _SeriesTableBuilder(_TableBuilderAbstract):\n\n    def __init__(self, *, info: SeriesInfo) -> None:\n        self.info: SeriesInfo = info\n\n    def get_lines(self) -> list[str]:\n        self._lines = []\n        self._fill_non_empty_info()\n        return self._lines\n\n    @property\n    def data(self) -> Series:\n        return self.info.data\n\n    def add_memory_usage_line(self) -> None:\n        self._lines.append(f'memory usage: {self.memory_usage_string}')\n\n    @abstractmethod\n    def _fill_non_empty_info(self) -> None:\n        pass",
    "docstring": "Abstract builder for series info table. Parameters ---------- info : SeriesInfo. Instance of SeriesInfo.",
    "type": "class",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "ClassDef name:_SeriesTableBuilder FunctionDef name:__init__ arg:self arguments arg arg FunctionDef name:get_lines arg:self arguments arg Assign Call Return return:yes FunctionDef name:data arg:self arguments arg Return return:yes FunctionDef name:add_memory_usage_line arg:self arguments arg Call FunctionDef name:_fill_non_empty_info arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "_CanExtractAttrsFastPath",
    "source_code": "def _CanExtractAttrsFastPath(op_def, keywords):\n    for input_arg in op_def.input_arg:\n        value = keywords.get(input_arg.name, None)\n        if not isinstance(value, tensor.Tensor):\n            return False\n    for attr_def in op_def.attr:\n        if attr_def.type == 'func' or attr_def.type == 'list(func)':\n            return False\n    return True",
    "docstring": "Check if the fast path for _apply_op_helper is applicable.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\op_def_library.py",
    "ast_data": "FunctionDef name:_CanExtractAttrsFastPath arg:op_def arg:keywords arguments arg arg For Assign Call If Call Return return:yes For If BoolOp Compare Compare Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "locked_y1",
    "source_code": "@property\ndef locked_y1(self):\n    if self._locked_points.mask[1, 1]:\n        return None\n    else:\n        return self._locked_points[1, 1]",
    "docstring": "float or None: The value used for the locked y1.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:locked_y1 arg:self arguments arg If Return return:no Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_binop",
    "source_code": "def _binop(self, other: Series, func, level=None, fill_value=None) -> Series:\n    this = self\n    if not self.index.equals(other.index):\n        this, other = self.align(other, level=level, join='outer')\n    this_vals, other_vals = ops.fill_binop(this._values, other._values, fill_value)\n    with np.errstate(all='ignore'):\n        result = func(this_vals, other_vals)\n    name = ops.get_op_result_name(self, other)\n    out = this._construct_result(result, name, other)\n    return cast(Series, out)",
    "docstring": "Perform generic binary operation with optional fill value. Parameters ---------- other : Series func : binary operator fill_value : float or object Value to substitute for NA/null values. If both Series are NA in a location, the result will be NA regardless of the passed fill value. level : int or level name, default None Broadcast across a level, matching Index values on the passed MultiIndex level. Returns ------- Series",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:_binop arg:self arg:other arg:func arg:level arg:fill_value arguments arg arg arg arg arg Assign If Call Assign Call Assign Call With Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "build_graph",
    "source_code": "def build_graph(device, n, m, k, transpose_a, transpose_b, dtype):\n    with ops.device('%s' % device):\n        if not transpose_a:\n            x = variable_v1.VariableV1(random_ops.random_uniform([n, m], dtype=dtype), use_resource=False)\n        else:\n            x = variable_v1.VariableV1(random_ops.random_uniform([m, n], dtype=dtype), use_resource=False)\n        if not transpose_b:\n            y = variable_v1.VariableV1(random_ops.random_uniform([m, k], dtype=dtype), use_resource=False)\n        else:\n            y = variable_v1.VariableV1(random_ops.random_uniform([k, m], dtype=dtype), use_resource=False)\n        z = math_ops.matmul(x, y, transpose_a=transpose_a, transpose_b=transpose_b)\n        return control_flow_ops.group(z)",
    "docstring": "Build a graph containing a sequence of matmul operations. Args: device: String, the device to run on. n: tensor A's first dimension size. m: tensor A's second dimension size. k: tensor B's second dimension size. transpose_a: boolean value to show if tensor A is transposed. transpose_b: boolean value to show if tensor B is transposed. dtype: numpy data type of the input tensor. Returns: A matmul operation to run()",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\matmul_benchmark.py",
    "ast_data": "FunctionDef name:build_graph arg:device arg:n arg:m arg:k arg:transpose_a arg:transpose_b arg:dtype arguments arg arg arg arg arg arg arg With Call If Assign Call Call Assign Call Call If Assign Call Call Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, functions):\n    self._functions = functions\n    self._location_key_to_location = {}",
    "docstring": "Constructor. Args: functions: A object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\pprof_profiler.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:functions arguments arg arg Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "add_to_collection",
    "source_code": "@tf_export(v1=['add_to_collection'])\ndef add_to_collection(name, value) -> None:\n    get_default_graph().add_to_collection(name, value)",
    "docstring": "Wrapper for using the default graph. See for more details. Args: name: The key for the collection. For example, the class contains many standard names for collections. value: The value to add to the collection. @compatibility(eager) Collections are only supported in eager when variables are created inside an EagerVariableStore (e.g. as part of a layer or template). @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:add_to_collection arg:name arg:value arguments arg arg Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "flatten_sharded_optim_state_dict",
    "source_code": "@staticmethod\ndef flatten_sharded_optim_state_dict(sharded_optim_state_dict: dict[str, Any], model: torch.nn.Module, optim: torch.optim.Optimizer) -> dict[str, Any]:\n    FullyShardedDataParallel._warn_legacy_optim_state_dict('flatten_sharded_optim_state_dict', 'optim_state_dict_to_load', stacklevel=2)\n    return FullyShardedDataParallel._optim_state_dict_to_load_impl(optim_state_dict=sharded_optim_state_dict, model=model, optim_input=None, optim=optim, full_state_dict=False, is_named_optimizer=False)",
    "docstring": "Flatten a sharded optimizer state-dict. The API is similar to :meth:. The only difference is that the input `sharded_optim_state_dictshard_full_optim_state_dictshard_full_optim_state_dict`.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\fully_sharded_data_parallel.py",
    "ast_data": "FunctionDef name:flatten_sharded_optim_state_dict arg:sharded_optim_state_dict arg:model arg:optim arguments arg arg arg Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "set_server_def_retries",
    "source_code": "def set_server_def_retries(self, retries):\n    self._set_server_def_retries = retries",
    "docstring": "Set the number of retries to use when calling SetServerDef. In cases where many servers run in high-preemption environments, jobs could be preempted during startup and initial connection via SetServerDef. Retries allow for more robust connection in these environments. Args: retries: int specifying the number of connection retries before failing. Retries follow an exponential backoff waiting period with min value 1ms, max value 10s, and exponent 1.3.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:set_server_def_retries arg:self arg:retries arguments arg arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "_BesselK1eGrad",
    "source_code": "@ops.RegisterGradient('BesselK1e')\ndef _BesselK1eGrad(op: ops.Operation, grad):\n    x = op.inputs[0]\n    y = op.outputs[0]\n    with ops.control_dependencies([grad]):\n        partial_x = y * (1.0 - math_ops.reciprocal(x)) - special_math_ops.bessel_k0e(x)\n        return grad * partial_x",
    "docstring": "Compute gradient of bessel_k1e(x) with respect to its argument.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_BesselK1eGrad arg:op arg:grad arguments arg arg Assign Assign With Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_report_field_mismatches",
    "source_code": "def _report_field_mismatches(fields, field_values):\n    expected = set((f.name for f in fields))\n    actual = set(field_values)\n    extra = actual - expected\n    if extra:\n        raise ValueError(f'Got unexpected fields: {extra}')\n    missing = expected - actual\n    if missing:\n        raise ValueError(f'Missing required fields: {missing}')",
    "docstring": "Raises an exception with mismatches between fields and field_values.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type_field.py",
    "ast_data": "FunctionDef name:_report_field_mismatches arg:fields arg:field_values arguments arg arg Assign Call Assign Call Assign If Raise Call Assign If Raise Call"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "def forward(self, x: Tensor, source: Tensor, x_mask: Optional[Tensor]=None, source_mask: Optional[Tensor]=None) -> Tensor:\n    bs = x.size(0)\n    query, key, value = (x, source, source)\n    query = self.q_proj(query).view(bs, -1, self.nhead, self.dim)\n    key = self.k_proj(key).view(bs, -1, self.nhead, self.dim)\n    value = self.v_proj(value).view(bs, -1, self.nhead, self.dim)\n    message = self.attention(query, key, value, q_mask=x_mask, kv_mask=source_mask)\n    message = self.merge(message.view(bs, -1, self.nhead * self.dim))\n    message = self.norm1(message)\n    message = self.mlp(torch.cat([x, message], dim=2))\n    message = self.norm2(message)\n    return x + message",
    "docstring": "Run forward. Args: x: [N, L, C] source: [N, S, C] x_mask: [N, L] (optional) source_mask: [N, S] (optional)",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\loftr\\loftr_module\\transformer.py",
    "ast_data": "FunctionDef name:forward arg:self arg:x arg:source arg:x_mask arg:source_mask arguments arg arg arg arg arg Assign Call Assign Assign Call Call Assign Call Call Assign Call Call Assign Call Assign Call Call Assign Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_two_sample_transform",
    "source_code": "def _two_sample_transform(u, v):\n    nx = u.shape[0]\n    ny = v.shape[0]\n    x = np.concatenate([u, v], axis=0)\n    y = np.concatenate([np.zeros(nx), np.ones(ny)], axis=0).reshape(-1, 1)\n    return (x, y)",
    "docstring": "Helper function that concatenates x and y for two sample MGC stat. See above for use. Parameters ---------- u, v : ndarray and have shapes `uvxxuvy`.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mgc.py",
    "ast_data": "FunctionDef name:_two_sample_transform arg:u arg:v arguments arg arg Assign Assign Assign Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_from_compatible_tensor_list",
    "source_code": "def _from_compatible_tensor_list(self, tensor_list: List['core_types.Symbol']) -> composite_tensor.CompositeTensor:\n    flat_specs = nest.map_structure(functools.partial(get_batchable_flat_tensor_specs, context_spec=self), self._component_specs)\n    nested_tensor_list = nest.pack_sequence_as(flat_specs, tensor_list)\n    components = nest.map_structure_up_to(self._component_specs, batchable_from_tensor_list, self._component_specs, nested_tensor_list)\n    return self._from_components(components)",
    "docstring": "Reconstructs a value from a compatible flat list of .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py",
    "ast_data": "FunctionDef name:_from_compatible_tensor_list arg:self arg:tensor_list arguments arg arg Assign Call Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_operation_by_name",
    "source_code": "def get_operation_by_name(self, name) -> 'Operation':\n    if not isinstance(name, str):\n        raise TypeError('Operation names are strings (or similar), not %s.' % type(name).__name__)\n    op = cast(Operation, self.as_graph_element(name, allow_tensor=False, allow_operation=True))\n    return op",
    "docstring": "Returns the with the given . This method may be called concurrently from multiple threads. Args: name: The name of the to return. Returns: The with the given . Raises: TypeError: If is not a string. KeyError: If does not correspond to an operation in this graph.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:get_operation_by_name arg:self arg:name arguments arg arg If Call Raise Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "get_params",
    "source_code": "def get_params(self, deep=True):\n    out = dict()\n    for key in self._get_param_names():\n        value = getattr(self, key)\n        if deep and hasattr(value, 'get_params') and (not isinstance(value, type)):\n            deep_items = value.get_params().items()\n            out.update(((key + '__' + k, val) for k, val in deep_items))\n        out[key] = value\n    return out",
    "docstring": "Get parameters for this estimator. Parameters ---------- deep : bool, default=True If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns ------- params : dict Parameter names mapped to their values.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\base.py",
    "ast_data": "FunctionDef name:get_params arg:self arg:deep arguments arg arg Assign Call For Call Assign Call If BoolOp Call Call Assign Call Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "from_tensor_and_offsets",
    "source_code": "@classmethod\ndef from_tensor_and_offsets(cls, tensor: torch.Tensor, shard_offsets: list[int], rank: int) -> 'Shard':\n    shard_sizes = list(tensor.size())\n    placement = _remote_device(f'rank:{rank}/{str(tensor.device)}')\n    shard_meta = ShardMetadata(shard_offsets=shard_offsets, shard_sizes=shard_sizes, placement=placement)\n    return Shard(tensor, shard_meta)",
    "docstring": "Creates a Shard of a ShardedTensor from a local torch.Tensor, shard_offsets and rank. Args: tensor(torch.Tensor): Local tensor for the shard. shard_offsets(List[int]): List of integers specify the offset of the shard on each dimension. rank(int): Specify the rank for the shard.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\shard.py",
    "ast_data": "FunctionDef name:from_tensor_and_offsets arg:cls arg:tensor arg:shard_offsets arg:rank arguments arg arg arg arg Assign Call Call Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "indexables",
    "source_code": "@cache_readonly\ndef indexables(self):\n    d = self.description\n    md = self.read_metadata('index')\n    meta = 'category' if md is not None else None\n    index_col = GenericIndexCol(name='index', axis=0, table=self.table, meta=meta, metadata=md)\n    _indexables: list[GenericIndexCol | GenericDataIndexableCol] = [index_col]\n    for i, n in enumerate(d._v_names):\n        assert isinstance(n, str)\n        atom = getattr(d, n)\n        md = self.read_metadata(n)\n        meta = 'category' if md is not None else None\n        dc = GenericDataIndexableCol(name=n, pos=i, values=[n], typ=atom, table=self.table, meta=meta, metadata=md)\n        _indexables.append(dc)\n    return _indexables",
    "docstring": "create the indexables from the table description",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:indexables arg:self arguments arg Assign Assign Call Assign Compare Assign Call For Call Call Assign Call Assign Call Assign Compare Assign Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "asarray",
    "source_code": "def asarray(obj: Array | complex | NestedSequence[complex] | SupportsBufferProtocol, /, *, dtype: DType | None=None, device: Device | None=None, copy: _Copy | None=None, **kwargs: Any) -> Array:\n    _helpers._check_device(np, device)\n    if copy is None:\n        copy = np._CopyMode.IF_NEEDED\n    elif copy is False:\n        copy = np._CopyMode.NEVER\n    elif copy is True:\n        copy = np._CopyMode.ALWAYS\n    return np.array(obj, copy=copy, dtype=dtype, **kwargs)",
    "docstring": "Array API compatibility wrapper for asarray(). See the corresponding documentation in the array library and/or the array API specification for more details.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\numpy\\_aliases.py",
    "ast_data": "FunctionDef name:asarray arguments arg arg arg arg arg Call If Compare Assign If Compare Assign If Compare Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_arg_value",
    "source_code": "def get_arg_value(node, arg_name, arg_pos=None):\n    if arg_name is not None:\n        for kw in node.keywords:\n            if kw.arg == arg_name:\n                return (True, kw.value)\n    if arg_pos is not None:\n        idx = 0\n        for arg in node.args:\n            if sys.version_info[:2] >= (3, 5) and isinstance(arg, ast.Starred):\n                continue\n            if idx == arg_pos:\n                return (True, arg)\n            idx += 1\n    return (False, None)",
    "docstring": "Get the value of an argument from a ast.Call node. This function goes through the positional and keyword arguments to check whether a given argument was used, and if so, returns its value (the node representing its value). This cannot introspect *args or **args, but it safely handles *args in Python3.5+. Args: node: The ast.Call node to extract arg values from. arg_name: The name of the argument to extract. arg_pos: The position of the argument (in case it's passed as a positional argument). Returns: A tuple (arg_present, arg_value) containing a boolean indicating whether the argument is present, and its value in case it is.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\ast_edits.py",
    "ast_data": "FunctionDef name:get_arg_value arg:node arg:arg_name arg:arg_pos arguments arg arg arg If Compare For If Compare Return return:yes If Compare Assign For If BoolOp Compare Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "process_tree_inplace",
    "source_code": "def process_tree_inplace(self, root_directory):\n    files_to_process = []\n    for dir_name, _, file_list in os.walk(root_directory):\n        py_files = [os.path.join(dir_name, f) for f in file_list if f.endswith('.py')]\n        files_to_process += py_files\n    file_count = 0\n    tree_errors = {}\n    report = ''\n    report += '=' * 80 + '\\n'\n    report += 'Input tree: %r\\n' % root_directory\n    report += '=' * 80 + '\\n'\n    for path in files_to_process:\n        if os.path.islink(path):\n            report += 'Skipping symlink %s.\\n' % path\n            continue\n        file_count += 1\n        _, l_report, l_errors = self.process_file(path, path)\n        tree_errors[path] = l_errors\n        report += l_report\n    return (file_count, report, tree_errors)",
    "docstring": "Process a directory of python files in place.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\ast_edits.py",
    "ast_data": "FunctionDef name:process_tree_inplace arg:self arg:root_directory arguments arg arg Assign For Call Assign Call Call Assign Assign Assign For If Call Assign Call Assign Return return:yes"
  },
  {
    "library": "pygame",
    "name": "draw_aaline",
    "source_code": "def draw_aaline(surf, color, from_point, to_point, blend=True):\n    line = [from_point[0], from_point[1], to_point[0], to_point[1]]\n    return _clip_and_draw_aaline(surf, surf.get_clip(), color, line, blend)",
    "docstring": "draw anti-aliased line between two endpoints.",
    "type": "function",
    "file_path": "pygame\\src_py\\draw_py.py",
    "ast_data": "FunctionDef name:draw_aaline arg:surf arg:color arg:from_point arg:to_point arg:blend arguments arg arg arg arg arg Assign Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_rollout_state_from_issue",
    "source_code": "def get_rollout_state_from_issue(github_token: str, repo: str, issue_num: int) -> str:\n    gh = get_gh_client(github_token)\n    issue = get_issue(gh, repo, issue_num)\n    return str(issue.get_comments()[0].body.strip('\\n\\t '))",
    "docstring": "Gets the first comment of the issue, which contains the desired rollout state. The default issue we use -",
    "type": "function",
    "file_path": "pytorch\\.github\\scripts\\runner_determinator.py",
    "ast_data": "FunctionDef name:get_rollout_state_from_issue arg:github_token arg:repo arg:issue_num arguments arg arg arg Assign Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "kornia",
    "name": "validate_bbox",
    "source_code": "def validate_bbox(boxes: torch.Tensor) -> bool:\n    if not (len(boxes.shape) in [3, 4] and boxes.shape[-2:] == torch.Size([4, 2])):\n        return False\n    if len(boxes.shape) == 4:\n        boxes = boxes.view(-1, 4, 2)\n    x_tl, y_tl = (boxes[..., 0, 0], boxes[..., 0, 1])\n    x_tr, y_tr = (boxes[..., 1, 0], boxes[..., 1, 1])\n    x_br, y_br = (boxes[..., 2, 0], boxes[..., 2, 1])\n    x_bl, y_bl = (boxes[..., 3, 0], boxes[..., 3, 1])\n    width_t, width_b = (x_tr - x_tl + 1, x_br - x_bl + 1)\n    height_t, height_b = (y_tr - y_tl + 1, y_br - y_bl + 1)\n    if not torch.allclose(width_t, width_b, atol=0.0001):\n        return False\n    if not torch.allclose(height_t, height_b, atol=0.0001):\n        return False\n    return True",
    "docstring": "Validate if a 2D bounding box usable or not. This function checks if the boxes are rectangular or not. Args: boxes: a tensor containing the coordinates of the bounding boxes to be extracted. The tensor must have the shape of Bx4x2, where each box is defined in the following `` order: top-left, top-right, bottom-right, bottom-left. The coordinates must be in the x, y order.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\bbox.py",
    "ast_data": "FunctionDef name:validate_bbox arg:boxes arguments arg If BoolOp Compare Call Compare Call Return return:yes If Compare Call Assign Call Assign Assign Assign Assign Assign Assign If Call Return return:yes If Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_arch_list",
    "source_code": "def get_arch_list() -> list[str]:\n    if not is_available():\n        return []\n    arch_flags = torch._C._cuda_getArchFlags()\n    if arch_flags is None:\n        return []\n    return arch_flags.split()",
    "docstring": "Return list CUDA architectures this library was compiled for.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:get_arch_list arguments If Call Return return:no Assign Call If Compare Return return:no Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_pad",
    "source_code": "def _pad(batch):\n    padded_dict_batch = {}\n    if isinstance(batch, dict):\n        for key, value in batch.items():\n            padded_dict_batch[key] = _pad(value)\n        return padded_dict_batch\n    rank = len(batch.shape)\n    assert rank > 0\n    missing_count = self.padded_batch_size - self.get_real_batch_size(batch)\n    padding = backend.stack([[0, missing_count]] + [[0, 0]] * (rank - 1))\n    return array_ops.pad(batch, padding, 'constant')",
    "docstring": "Helper function to pad nested data within each batch elements.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\partial_batch_padding_handler.py",
    "ast_data": "FunctionDef name:_pad arg:batch arguments arg Assign If Call For Call Assign Call Return return:yes Assign Call Compare Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "print_readable",
    "source_code": "@compatibility(is_backward_compatible=False)\ndef print_readable(self, print_output=True, include_stride=False, include_device=False, colored=False, *, fast_sympy_print: bool=False):\n    ctx_mgr = contextlib.ExitStack()\n    with ctx_mgr:\n        if fast_sympy_print:\n            from torch._inductor.utils import sympy_str\n\n            def fast_repr(expr: torch.types.PySymType) -> str:\n                return sympy_str(expr.node.expr)\n            ctx_mgr.enter_context(_override_sym_repr(fast_repr))\n        r = _print_readable(self, self._get_name(), print_output, include_stride, include_device, colored)\n        return r",
    "docstring": "Return the Python code generated for current GraphModule and its children GraphModules",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\graph_module.py",
    "ast_data": "FunctionDef name:print_readable arg:self arg:print_output arg:include_stride arg:include_device arg:colored arguments arg arg arg arg arg arg Assign Call With If FunctionDef name:fast_repr arg:expr arguments arg Return return:yes Call Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "ParamsDict",
    "source_code": "class ParamsDict(ReprHTMLMixin, UserDict):\n    _html_repr = _params_html_repr\n\n    def __init__(self, params=None, non_default=tuple()):\n        super().__init__(params or {})\n        self.non_default = non_default",
    "docstring": "Dictionary-like class to store and provide an HTML representation. It builds an HTML structure to be used with Jupyter notebooks or similar environments. It allows storing metadata to track non-default parameters. Parameters ---------- params : dict, default=None The original dictionary of parameters and their values. non_default : tuple The list of non-default parameters.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\utils\\_repr_html\\params.py",
    "ast_data": "ClassDef name:ParamsDict Assign FunctionDef name:__init__ arg:self arg:params arg:non_default arguments arg arg arg Call Call Call BoolOp Assign"
  },
  {
    "library": "pytorch",
    "name": "device_of",
    "source_code": "class device_of(device):\n\n    def __init__(self, obj):\n        idx = obj.get_device() if obj.is_xpu else -1\n        super().__init__(idx)",
    "docstring": "Context-manager that changes the current device to that of given object. You can use both tensors and storages as arguments. If a given object is not allocated on a XPU, this is a no-op. Args: obj (Tensor or Storage): object allocated on the selected device.",
    "type": "class",
    "file_path": "pytorch\\torch\\xpu\\__init__.py",
    "ast_data": "ClassDef name:device_of FunctionDef name:__init__ arg:self arg:obj arguments arg arg Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "CompiledFxGraphConstants",
    "source_code": "class CompiledFxGraphConstants:\n\n    def unwrap(self, g: CompiledFxGraph) -> dict[str, torch.Tensor]:\n        assert g.constants is not None\n        return g.constants",
    "docstring": "Wrapper class that unwraps constants from a compiled fx graph. This version of the class only supports directly grabbing the saved constants off of a CompiledFxGraph. With freezing, FxGraphCache doesn't store the constants of the input GraphModule it gets from AOTAutograd. Instead, it saves just the **names** of those constants, and grabs the constant values directly from the graph module passed in at runtime. Thing is, we don't always *have* the graph module available at runtime, hence the existence of this class and its CompiledFxGraphConstantsWithGm counterpart. To support freezing, FXGraphCache gets passed a CompiledFxGraphConstantsWithGm during post compile. Otherwise, CompiledFxGraphConstants supports the basic case of loading the value of constants directly off of the original saved object.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\output_code.py",
    "ast_data": "ClassDef name:CompiledFxGraphConstants FunctionDef name:unwrap arg:self arg:g arguments arg arg Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "should_run_validation",
    "source_code": "def should_run_validation(validation_freq, epoch):\n    one_indexed_epoch = epoch + 1\n    if isinstance(validation_freq, int):\n        if validation_freq < 1:\n            raise ValueError('`validation_freq` can not be less than 1.')\n        return one_indexed_epoch % validation_freq == 0\n    if not isinstance(validation_freq, collections.abc.Container):\n        raise ValueError('`validation_freq` must be an Integer or `collections.abc.Container` (e.g. list, tuple, etc.)')\n    return one_indexed_epoch in validation_freq",
    "docstring": "Checks if validation should be run this epoch. Args: validation_freq: Integer or list. If an integer, specifies how many training epochs to run before a new validation run is performed. If a list, specifies the epochs on which to run validation. epoch: Integer, the number of the training epoch just completed. Returns: Bool, True if validation should be run. Raises: ValueError: if is an Integer and less than 1, or if it is neither an Integer nor a Sequence.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py",
    "ast_data": "FunctionDef name:should_run_validation arg:validation_freq arg:epoch arguments arg arg Assign If Call If Compare Raise Call Return return:yes Compare If Call Raise Call Return return:yes Compare"
  },
  {
    "library": "scipy",
    "name": "regularized_lsq_operator",
    "source_code": "def regularized_lsq_operator(J, diag):\n    J = aslinearoperator(J)\n    m, n = J.shape\n\n    def matvec(x):\n        return np.hstack((J.matvec(x), diag * x))\n\n    def rmatvec(x):\n        x1 = x[:m]\n        x2 = x[m:]\n        return J.rmatvec(x1) + diag * x2\n    return LinearOperator((m + n, n), matvec=matvec, rmatvec=rmatvec)",
    "docstring": "Return a matrix arising in regularized least squares as LinearOperator. The matrix is [ J ] [ D ] where D is diagonal matrix with elements from .",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_lsq\\common.py",
    "ast_data": "FunctionDef name:regularized_lsq_operator arg:J arg:diag arguments arg arg Assign Call Assign FunctionDef name:matvec arg:x arguments arg Return return:yes Call Call FunctionDef name:rmatvec arg:x arguments arg Assign Assign Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "from_structure",
    "source_code": "@staticmethod\ndef from_structure(output_types, output_shapes=None, shared_name=None, output_classes=None):\n    output_types = nest.map_structure(dtypes.as_dtype, output_types)\n    if output_shapes is None:\n        output_shapes = nest.map_structure(lambda _: tensor_shape.TensorShape(None), output_types)\n    else:\n        output_shapes = nest.map_structure_up_to(output_types, tensor_shape.as_shape, output_shapes)\n    if output_classes is None:\n        output_classes = nest.map_structure(lambda _: tensor.Tensor, output_types)\n    nest.assert_same_structure(output_types, output_shapes)\n    output_structure = structure.convert_legacy_structure(output_types, output_shapes, output_classes)\n    if shared_name is None:\n        shared_name = ''\n    iterator_resource = gen_dataset_ops.iterator_v2(container='', shared_name=shared_name, output_types=structure.get_flat_tensor_types(output_structure), output_shapes=structure.get_flat_tensor_shapes(output_structure))\n    return Iterator(iterator_resource, None, output_types, output_shapes, output_classes)",
    "docstring": "Creates a new, uninitialized with the given structure. This iterator-constructing method can be used to create an iterator that is reusable with many different datasets. The returned iterator is not bound to a particular dataset, and it has no . To initialize the iterator, run the operation returned by . The following is an example Args: output_types: A (nested) structure of objects corresponding to each component of an element of this dataset. output_shapes: (Optional.) A (nested) structure of objects corresponding to each component of an element of this dataset. If omitted, each component will have an unconstrainted shape. shared_name: (Optional.) If non-empty, this iterator will be shared under the given name across multiple sessions that share the same devices (e.g. when using a remote server). output_classes: (Optional.) A (nested) structure of Python objects corresponding to each component of an element of this iterator. If omitted, each component is assumed to be of type . Returns: An . Raises: TypeError: If the structures of and are not the same.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\iterator_ops.py",
    "ast_data": "FunctionDef name:from_structure arg:output_types arg:output_shapes arg:shared_name arg:output_classes arguments arg arg arg arg Assign Call If Compare Assign Call arguments arg Call Assign Call If Compare Assign Call arguments arg Call Assign Call If Compare Assign Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "dimension",
    "source_code": "@property\ndef dimension(self):\n    return capi.get_dims(self.ptr)",
    "docstring": "Return 0 for points, 1 for lines, and 2 for surfaces.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:dimension arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_assert_scalar",
    "source_code": "def _assert_scalar(value, name):\n    value_rank = value.shape.rank\n    if value_rank is None:\n        check = control_flow_assert.Assert(math_ops.equal(array_ops.rank(value), 0), ['Input %s must be a scalar' % name], name='%sIsScalar' % name.capitalize())\n        result = control_flow_ops.with_dependencies([check], value, name='%sDependencies' % name)\n        result.set_shape([])\n        return result\n    elif value_rank == 0:\n        return value\n    else:\n        raise ValueError('Input %s must be a scalar' % name)",
    "docstring": "Asserts that is scalar, and returns .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parsing_ops.py",
    "ast_data": "FunctionDef name:_assert_scalar arg:value arg:name arguments arg arg Assign If Compare Assign Call Call Call Call Assign Call Call Return return:yes If Compare Return return:yes Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y, sample_weight=None, **params):\n    super().fit(X, y, sample_weight=sample_weight, **params)\n    return self",
    "docstring": "Fit Ridge regression model with cv. Parameters ---------- X : ndarray of shape (n_samples, n_features) Training data. If using GCV, will be cast to float64 if necessary. y : ndarray of shape (n_samples,) or (n_samples, n_targets) Target values. Will be cast to X's dtype if necessary. sample_weight : float or ndarray of shape (n_samples,), default=None Individual weights for each sample. If given a float, every sample will have the same weight. **params : dict, default=None Parameters to be passed to the underlying scorer. .. versionadded:: 1.5 Only available if , which can be set by using `Metadata Routing User Guide ` for more details. Returns ------- self : object Fitted estimator. Notes ----- When sample_weight is provided, the selected hyperparameter may depend on whether we use leave-one-out cross-validation (cv=None) or another form of cross-validation, because only leave-one-out cross-validation takes the sample weights into account when computing the validation score.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_ridge.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg arg Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_local",
    "source_code": "def get_local(self, name: str, *, stacklevel=0) -> ComptimeVar:\n    tx = self.__get_tx(stacklevel)\n    var = tx.symbolic_locals[name]\n    if isinstance(var, CellVariable):\n        return ComptimeVar(tx.output.side_effects.load_cell(var))\n    return ComptimeVar(var)",
    "docstring": "Retrieve the compile-time known information about a local.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\comptime.py",
    "ast_data": "FunctionDef name:get_local arg:self arg:name arguments arg arg arg Assign Call Assign If Call Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_dual_gap",
    "source_code": "def _dual_gap(emp_cov, precision_, alpha):\n    gap = np.sum(emp_cov * precision_)\n    gap -= precision_.shape[0]\n    gap += alpha * (np.abs(precision_).sum() - np.abs(np.diag(precision_)).sum())\n    return gap",
    "docstring": "Expression of the dual gap convergence criterion The specific definition is given in Duchi \"Projected Subgradient Methods for Learning Sparse Gaussians\".",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\covariance\\_graph_lasso.py",
    "ast_data": "FunctionDef name:_dual_gap arg:emp_cov arg:precision_ arg:alpha arguments arg arg arg Assign Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "equals",
    "source_code": "def equals(self, other):\n    return self._topology(capi.ogr_equals, other)",
    "docstring": "Return True if this geometry is equivalent to the other.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:equals arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_disallow_batch_hooks_in_ps_strategy",
    "source_code": "def _disallow_batch_hooks_in_ps_strategy(self):\n    strategy = distribute_lib.get_strategy()\n    if strategy._should_use_with_coordinator:\n        unsupported_callbacks = []\n        for cb in self.callbacks:\n            if getattr(cb, '_supports_tf_logs', False):\n                continue\n            if cb._implements_train_batch_hooks() or cb._implements_test_batch_hooks() or cb._implements_predict_batch_hooks():\n                unsupported_callbacks.append(cb)\n        if unsupported_callbacks:\n            raise ValueError('Batch-level `Callback`s are not supported with `ParameterServerStrategy`. Found unsupported callbacks: {}'.format(unsupported_callbacks))",
    "docstring": "Error out if batch-level callbacks are passed with PSStrategy.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:_disallow_batch_hooks_in_ps_strategy arg:self arguments arg Assign Call If Assign For If Call If BoolOp Call Call Call Call If Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "read32",
    "source_code": "def read32(bytestream):\n    dt = np.dtype(np.uint32).newbyteorder('>')\n    return np.frombuffer(bytestream.read(4), dtype=dt)[0]",
    "docstring": "Read 4 bytes from bytestream as an unsigned 32-bit integer.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\tutorials\\dataset.py",
    "ast_data": "FunctionDef name:read32 arg:bytestream arguments arg Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_op_via_apply",
    "source_code": "@final\ndef _op_via_apply(self, name: str, *args, **kwargs):\n    f = getattr(type(self._obj_with_exclusions), name)\n\n    def curried(x):\n        return f(x, *args, **kwargs)\n    curried.__name__ = name\n    if name in base.plotting_methods:\n        return self._python_apply_general(curried, self._selected_obj)\n    is_transform = name in base.transformation_kernels\n    result = self._python_apply_general(curried, self._obj_with_exclusions, is_transform=is_transform, not_indexed_same=not is_transform)\n    if self._grouper.has_dropped_na and is_transform:\n        result = self._set_result_index_ordered(result)\n    return result",
    "docstring": "Compute the result of an operation by using GroupBy's apply.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\groupby\\groupby.py",
    "ast_data": "FunctionDef name:_op_via_apply arg:self arg:name arguments arg arg arg arg Assign Call Call FunctionDef name:curried arg:x arguments arg Return return:yes Call Assign If Compare Return return:yes Call Assign Compare Assign Call If BoolOp Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_make_1d_state",
    "source_code": "def _make_1d_state(state_size, seed):\n    if isinstance(seed, int):\n        ls = []\n        for _ in range(state_size):\n            ls.append(seed & SEED_BIT_MASK)\n            seed >>= SEED_TYPE_BITS\n        seed = ls\n    seed = nest.map_structure(_uint_to_int, seed)\n    seed = math_ops.cast(seed, STATE_TYPE)\n    seed = array_ops.reshape(seed, [-1])\n    seed = seed[0:state_size]\n    seed_size = seed.shape[0]\n    if seed_size is None:\n        seed_size = array_ops.shape(seed)[0]\n    padding_size = math_ops.maximum(state_size - seed_size, 0)\n    padding = array_ops.zeros([padding_size], seed.dtype)\n    seed = array_ops.concat([padding, seed], axis=0)\n    seed.set_shape([state_size])\n    return seed",
    "docstring": "Makes a 1-D RNG state. Args: state_size: an integer. seed: an integer or 1-D tensor. Returns: a 1-D tensor of shape [state_size] and dtype STATE_TYPE.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\stateful_random_ops.py",
    "ast_data": "FunctionDef name:_make_1d_state arg:state_size arg:seed arguments arg arg If Call Assign For Call Call Assign Assign Call Assign Call Assign Call Assign Assign If Compare Assign Call Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "__rmul__",
    "source_code": "def __rmul__(self, i):\n    return asarray(multiply(self, i))",
    "docstring": "Return (self * i), that is string multiple concatenation, element-wise. See Also -------- multiply",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:__rmul__ arg:self arg:i arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "__str__",
    "source_code": "def __str__(self):\n    return 'Feature FID %d in Layer<%s>' % (self.fid, self.layer_name)",
    "docstring": "The string name of the feature.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\feature.py",
    "ast_data": "FunctionDef name:__str__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_func_graph_for_branch",
    "source_code": "def _get_func_graph_for_branch(name_attr_list, cached_attr_name=None):\n    func_graph = None\n    if cached_attr_name is not None:\n        func_graph = getattr(op, cached_attr_name, None)\n    inputs = op.inputs[1:]\n    if func_graph is None:\n        input_shapes = [t.shape for t in inputs]\n        func_graph = util.get_func_graph(op, input_shapes, name_attr_list.name)\n    for external_t, internal_t in zip(inputs, func_graph.inputs):\n        handle_data_util.copy_handle_data(external_t, internal_t)\n    func_graph.function_captures.reset_captures(inputs, func_graph.inputs)\n    func_graph._forward_cond = op\n    return func_graph",
    "docstring": "Generates and returns a FuncGraph for the given branch.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\cond_v2.py",
    "ast_data": "FunctionDef name:_get_func_graph_for_branch arg:name_attr_list arg:cached_attr_name arguments arg arg Assign If Compare Assign Call Assign If Compare Assign Assign Call For Call Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "in_base",
    "source_code": "def in_base(self, key: _K) -> bool:\n    return key in self._base",
    "docstring": "Checks if a key is in the base dictionary.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\registration.py",
    "ast_data": "FunctionDef name:in_base arg:self arg:key arguments arg arg Return return:yes Compare"
  },
  {
    "library": "matplotlib",
    "name": "pause",
    "source_code": "def pause(self):\n    self.event_source.stop()\n    if self._blit:\n        for artist in self._drawn_artists:\n            artist.set_animated(False)",
    "docstring": "Pause the animation.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\animation.py",
    "ast_data": "FunctionDef name:pause arg:self arguments arg Call If For Call"
  },
  {
    "library": "pytorch",
    "name": "start",
    "source_code": "def start(mode: str='interval', wait_until_completed: bool=False) -> None:\n    mode_normalized = mode.lower().replace(' ', '')\n    torch._C._mps_profilerStartTrace(mode_normalized, wait_until_completed)",
    "docstring": "Start OS Signpost tracing from MPS backend. The generated OS Signposts could be recorded and viewed in XCode Instruments Logging tool. Args: mode(str): OS Signpost tracing mode could be \"interval\", \"event\", or both \"interval,event\". The interval mode traces the duration of execution of the operations, whereas event mode marks the completion of executions. See document _ for more info. wait_until_completed(bool): Waits until the MPS Stream complete executing each encoded GPU operation. This helps generating single dispatches on the trace's timeline. Note that enabling this option would affect the performance negatively. .. _Recording Performance Data:",
    "type": "function",
    "file_path": "pytorch\\torch\\mps\\profiler.py",
    "ast_data": "FunctionDef name:start arg:mode arg:wait_until_completed arguments arg arg Assign Call Call Call"
  },
  {
    "library": "numpy",
    "name": "exists",
    "source_code": "def exists(self, path):\n    return DataSource.exists(self, self._fullpath(path))",
    "docstring": "Test if path exists prepending Repository base URL to path. Test if exists as (and in this order): - a local file. - a remote URL that has been downloaded and stored locally in the directory. - a remote URL that has not been downloaded, but is valid and accessible. Parameters ---------- path : str or pathlib.Path Can be a local file or a remote URL. This may, but does not have to, include the with which the was initialized. Returns ------- out : bool True if exists. Notes ----- When is an URL, will return True if it's either stored locally in the directory, or is a valid remote URL. does not discriminate between the two, the file is accessible if it exists in either location.",
    "type": "method",
    "file_path": "numpy\\numpy\\lib\\_datasource.py",
    "ast_data": "FunctionDef name:exists arg:self arg:path arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "rvs",
    "source_code": "def rvs(self, size=1, random_state=None):\n    random_state = self._dist._get_random_state(random_state)\n    return self._dist._rvs(self.dim, self.mu, self.kappa, size, random_state)",
    "docstring": "Draw random variates from the Von Mises-Fisher distribution. Parameters ---------- size : int or tuple of ints, optional Given a shape of, for example, (m,n,k), m*n*k samples are generated, and packed in an m-by-n-by-k arrangement. Because each sample is N-dimensional, the output shape is (m,n,k,N). If no shape is specified, a single (N-D) sample is returned. random_state : {None, int, , }, optional If is None (or ), the singleton is used. If is an int, a new `seedseedsizeNN` is the dimension of the distribution.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:rvs arg:self arg:size arg:random_state arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "DatetimeIndexResamplerGroupby",
    "source_code": "class DatetimeIndexResamplerGroupby(_GroupByMixin, DatetimeIndexResampler):\n\n    @property\n    def _resampler_cls(self):\n        return DatetimeIndexResampler",
    "docstring": "Provides a resample of a groupby implementation",
    "type": "class",
    "file_path": "pandas\\pandas\\core\\resample.py",
    "ast_data": "ClassDef name:DatetimeIndexResamplerGroupby FunctionDef name:_resampler_cls arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_calculate_scores",
    "source_code": "def _calculate_scores(self, query, key):\n    q_reshaped = array_ops.expand_dims(query, axis=-2)\n    k_reshaped = array_ops.expand_dims(key, axis=-3)\n    if self.use_scale:\n        scale = self.scale\n    else:\n        scale = 1.0\n    return math_ops.reduce_sum(scale * math_ops.tanh(q_reshaped + k_reshaped), axis=-1)",
    "docstring": "Calculates attention scores as a nonlinear sum of query and key. Args: query: Query tensor of shape . key: Key tensor of shape . Returns: Tensor of shape .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\dense_attention.py",
    "ast_data": "FunctionDef name:_calculate_scores arg:self arg:query arg:key arguments arg arg arg Assign Call Assign Call If Assign Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_locate_elements_in_line",
    "source_code": "def _locate_elements_in_line(line, indices_list, ref_indices):\n    batch_size = len(indices_list)\n    offsets = [indices[-1] - ref_indices[-1] for indices in indices_list]\n    start_columns = [None] * batch_size\n    end_columns = [None] * batch_size\n    if _NUMPY_OMISSION in line:\n        ellipsis_index = line.find(_NUMPY_OMISSION)\n    else:\n        ellipsis_index = len(line)\n    matches_iter = re.finditer(_NUMBER_REGEX, line)\n    batch_pos = 0\n    offset_counter = 0\n    for match in matches_iter:\n        if match.start() > ellipsis_index:\n            break\n        if offset_counter == offsets[batch_pos]:\n            start_columns[batch_pos] = match.start()\n            end_columns[batch_pos] = match.end() - 1\n            batch_pos += 1\n            if batch_pos >= batch_size:\n                break\n        offset_counter += 1\n    return (start_columns, end_columns)",
    "docstring": "Determine the start and end indices of an element in a line. Args: line: (str) the line in which the element is to be sought. indices_list: (list of list of int) list of indices of the element to search for. Assumes that the indices in the batch are unique and sorted in ascending order. ref_indices: (list of int) reference indices, i.e., the indices of the first element represented in the line. Returns: start_columns: (list of int) start column indices, if found. If not found, None. end_columns: (list of int) end column indices, if found. If not found, None. If found, the element is represented in the left-closed-right-open interval [start_column, end_column].",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\tensor_format.py",
    "ast_data": "FunctionDef name:_locate_elements_in_line arg:line arg:indices_list arg:ref_indices arguments arg arg arg Assign Call Assign Assign Assign If Compare Assign Call Assign Call Assign Call Assign Assign For If Compare Call If Compare Assign Call Assign Call If Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "container",
    "source_code": "@tf_contextlib.contextmanager\ndef container(self, container_name) -> Iterator[str]:\n    original_container = self._container\n    self._container = container_name\n    try:\n        yield self._container\n    finally:\n        self._container = original_container",
    "docstring": "Returns a context manager that specifies the resource container to use. Stateful operations, such as variables and queues, can maintain their states on devices so that they can be shared by multiple processes. A resource container is a string name under which these stateful operations are tracked. These resources can be released or cleared with . For example: Args: container_name: container name string. Returns: A context manager for defining resource containers for stateful ops, yields the container name.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:container arg:self arg:container_name arguments arg arg Assign Assign Try Assign"
  },
  {
    "library": "kornia",
    "name": "apply_transform_box",
    "source_code": "def apply_transform_box(self, input: Boxes, params: Dict[str, Tensor], flags: Dict[str, Any], transform: Optional[Tensor]=None) -> Boxes:\n    padding_size = params['padding_size']\n    input = input.pad(padding_size)\n    return super().apply_transform_box(input=input, params=params, flags=flags, transform=transform)",
    "docstring": "Process keypoints corresponding to the inputs that are no transformation applied.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\_2d\\geometric\\crop.py",
    "ast_data": "FunctionDef name:apply_transform_box arg:self arg:input arg:params arg:flags arg:transform arguments arg arg arg arg arg Assign Assign Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_can_fast_transpose",
    "source_code": "@property\ndef _can_fast_transpose(self) -> bool:\n    blocks = self._mgr.blocks\n    if len(blocks) != 1:\n        return False\n    dtype = blocks[0].dtype\n    return not is_1d_only_ea_dtype(dtype)",
    "docstring": "Can we transpose this DataFrame without creating any new array objects.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:_can_fast_transpose arg:self arguments arg Assign If Compare Call Return return:yes Assign Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "notnull",
    "source_code": "@doc(NDFrame.notna, klass=_shared_doc_kwargs['klass'])\ndef notnull(self) -> Series:\n    return super().notnull()",
    "docstring": "Series.notnull is an alias for Series.notna.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:notnull arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "score",
    "source_code": "def score(self, X_test, y=None):\n    X_test = validate_data(self, X_test, reset=False)\n    test_cov = empirical_covariance(X_test - self.location_, assume_centered=True)\n    res = log_likelihood(test_cov, self.get_precision())\n    return res",
    "docstring": "Compute the log-likelihood of under the estimated Gaussian model. The Gaussian model is defined by its mean and covariance matrix which are represented respectively by and . Parameters ---------- X_test : array-like of shape (n_samples, n_features) Test data of which we compute the likelihood, where is the number of samples and is the number of features. is assumed to be drawn from the same distribution than the data used in fit (including centering). y : Ignored Not used, present for API consistency by convention. Returns ------- res : float The log-likelihood of with and as estimators of the Gaussian model mean and covariance matrix respectively.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\covariance\\_empirical_covariance.py",
    "ast_data": "FunctionDef name:score arg:self arg:X_test arg:y arguments arg arg arg Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "_check_param_one_of",
    "source_code": "def _check_param_one_of(self, param: str, options: Iterable[Any]) -> None:\n    value = getattr(self, param)\n    if value not in options:\n        *most, last = options\n        option_str = ', '.join((f'{x!r}' for x in most[:-1])) + f' or {last!r}'\n        err = ' '.join([f'The `{param}` parameter for `{self.__class__.__name__}` must be', f'one of {option_str}; not {value!r}.'])\n        raise ValueError(err)",
    "docstring": "Raise when parameter value is not one of a specified set.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_stats\\base.py",
    "ast_data": "FunctionDef name:_check_param_one_of arg:self arg:param arg:options arguments arg arg arg Assign Call If Compare Assign Assign Call Assign Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_notify_procs_to_terminate",
    "source_code": "def _notify_procs_to_terminate(self):\n    ones = torch.ones(1, device=self._device)\n    dist.all_reduce(ones, group=self._process_group)\n    raise RuntimeError(f'Rank {self._rank} exhausted all inputs.')",
    "docstring": "Schedule an all-reduce to notify non-joined processes to terminate. Also raise a `` indicating that the current process has exhausted its inputs.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\join.py",
    "ast_data": "FunctionDef name:_notify_procs_to_terminate arg:self arguments arg Assign Call Call Raise Call"
  },
  {
    "library": "kornia",
    "name": "cx",
    "source_code": "@property\ndef cx(self) -> Tensor:\n    return self.intrinsics[..., 0, 2]",
    "docstring": "Return the x-coordinate of the principal point. Returns: tensor of shape :math:.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\camera\\pinhole.py",
    "ast_data": "FunctionDef name:cx arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "__getitem__",
    "source_code": "def __getitem__(self, index):\n    if isinstance(index, str):\n        i = self.index(index)\n    elif 0 <= index < self.num_fields:\n        i = index\n    else:\n        raise IndexError('Index out of range when accessing field in a feature: %s.' % index)\n    return Field(self, i)",
    "docstring": "Get the Field object at the specified index, which may be either an integer or the Field's string label. Note that the Field object is not the field's _value_ -- use the method instead to retrieve the value (e.g. an integer) instead of a Field instance.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\feature.py",
    "ast_data": "FunctionDef name:__getitem__ arg:self arg:index arguments arg arg If Call Assign Call If Compare Assign Raise Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "BCELoss",
    "source_code": "class BCELoss(_WeightedLoss):\n    __constants__ = ['reduction']\n\n    def __init__(self, weight: Optional[Tensor]=None, size_average=None, reduce=None, reduction: str='mean') -> None:\n        super().__init__(weight, size_average, reduce, reduction)\n\n    def forward(self, input: Tensor, target: Tensor) -> Tensor:\n        return F.binary_cross_entropy(input, target, weight=self.weight, reduction=self.reduction)",
    "docstring": "Creates a criterion that measures the Binary Cross Entropy between the target and the input probabilities: The unreduced (i.e. with :attr: set to `Nreductionmean';}\\\\ \\operatorname{sum}(L), & \\text{if reduction} = \\text{yx_n\\log (0) = -\\infty\\lim_{x\\to 0} \\log (x) = -\\inftyy_n = 0(1 - y_n) = 0\\lim_{x\\to 0} \\frac{d}{dx} \\log (x) = \\inftyx_nnbatchreductionsize_averagereducereductionsize_averagereducesize_averagesize_averagereducereduction(*)*(*)reduction(*)`, same shape as input. Examples: >>> m = nn.Sigmoid() >>> loss = nn.BCELoss() >>> input = torch.randn(3, 2, requires_grad=True) >>> target = torch.rand(3, 2, requires_grad=False) >>> output = loss(m(input), target) >>> output.backward()",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\loss.py",
    "ast_data": "ClassDef name:BCELoss Assign FunctionDef name:__init__ arg:self arg:weight arg:size_average arg:reduce arg:reduction arguments arg arg arg arg arg Call Call FunctionDef name:forward arg:self arg:input arg:target arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "prepare_iter",
    "source_code": "def prepare_iter(value):\n    if isinstance(value, text_or_bytes):\n        if value:\n            value = [value]\n        else:\n            value = []\n    elif hasattr(value, 'read'):\n        value = file_generator(value)\n    elif value is None:\n        value = []\n    return value",
    "docstring": "Ensure response body is iterable and resolves to False when empty.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\encoding.py",
    "ast_data": "FunctionDef name:prepare_iter arg:value arguments arg If Call If Assign Assign If Call Assign Call If Compare Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "replace_with",
    "source_code": "def replace_with(self, new_node: torch.fx.Node) -> None:\n    graph = new_node.graph\n    if len(self.nodes) == 1:\n        mm_node = self.nodes[0]\n        assert mm_node.target in (aten.mm.default, aten._scaled_mm.default)\n        mm_node.replace_all_uses_with(new_node)\n        graph.erase_node(mm_node)\n        return\n    graph = new_node.graph\n    assert len(self.nodes) == 3\n    mm_node = self.nodes[1]\n    output_reshape_node = self.nodes[2]\n    assert mm_node.target in (aten.mm.default, aten._scaled_mm.default)\n    assert output_reshape_node.target == aten.reshape.default\n    output_reshape_node.replace_all_uses_with(new_node)\n    if len(mm_node.users) > 1:\n        with graph.inserting_after(new_node):\n            new_mm_node = graph.call_function(aten.reshape.default, args=(new_node, list(_get_tensor(mm_node).shape)))\n        mm_node.replace_all_uses_with(new_mm_node)",
    "docstring": "Replace the matmul with the new node.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\micro_pipeline_tp.py",
    "ast_data": "FunctionDef name:replace_with arg:self arg:new_node arguments arg arg Assign If Compare Call Assign Compare Call Call Return return:no Assign Compare Call Assign Assign Compare Compare Call If Compare Call With Call Assign Call Call Call Call"
  },
  {
    "library": "kornia",
    "name": "TLU",
    "source_code": "class TLU(Module):\n\n    def __init__(self, num_features: int) -> None:\n        super().__init__()\n        self.num_features = num_features\n        self.tau = Parameter(-torch.ones(1, num_features, 1, 1), requires_grad=True)\n        self.reset_parameters()\n\n    def reset_parameters(self) -> None:\n        nn.init.constant_(self.tau, -1)\n\n    def extra_repr(self) -> str:\n        return 'num_features={num_features}'.format(**self.__dict__)\n\n    def forward(self, x: Tensor) -> Tensor:\n        return torch.max(x, self.tau)",
    "docstring": "TLU layer from 'Filter Response Normalization Layer: Eliminating Batch Dependence in the Training of Deep Neural Networks, see :cite: for more details. :math: is learnable per channel. .. math:: y = \\max(x, {\\tau}) Args: num_features: number of channels Returns: torch.Tensor Shape: - Input: :math: - Output: :math:",
    "type": "class",
    "file_path": "kornia\\kornia\\feature\\hynet.py",
    "ast_data": "ClassDef name:TLU FunctionDef name:__init__ arg:self arg:num_features arguments arg arg Call Call Assign Assign Call Call Call FunctionDef name:reset_parameters arg:self arguments arg Call FunctionDef name:extra_repr arg:self arguments arg Return return:yes Call FunctionDef name:forward arg:self arg:x arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "Composite",
    "source_code": "class Composite(object):\n\n    def __init__(self, op_name, inputs=None, attrs=None, derived_attrs=None, outputs=None):\n        self._op_name = op_name\n        self._inputs = inputs\n        self._attrs = attrs\n        self._derived_attrs = derived_attrs\n        self._outputs = outputs\n\n    def __call__(self, compose_fn):\n        setattr(compose_fn, '_tfr_op_name', self._op_name)\n        return compose_fn",
    "docstring": "A decorator to register a function as a composition for an TF operator. The argument to the decorator must be the name of a TF raw operator the function composites for. Decorated function must take positional arguments which corresponds to the input and attributes in OpDef of the TF operation. # TODO(fengliuai): more documents here. Example: @composite.Composite('AddN') def _compose_add_n(inputs, N): if N == 1: ....",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\tfr\\python\\composite.py",
    "ast_data": "ClassDef name:Composite FunctionDef name:__init__ arg:self arg:op_name arg:inputs arg:attrs arg:derived_attrs arg:outputs arguments arg arg arg arg arg arg Assign Assign Assign Assign Assign FunctionDef name:__call__ arg:self arg:compose_fn arguments arg arg Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_forward_and_backward_functions",
    "source_code": "def _forward_and_backward_functions(self, inference_args, input_tangents):\n    outputs = self._func_graph.outputs[:self._num_inference_outputs]\n    return self._build_functions_for_outputs(outputs, inference_args, input_tangents)",
    "docstring": "Shortcut for when only first-order gradients are required. The returned backward function does not accept gradients with respect to side output of forward_function. This is fine as long as the user can't possibly request second order tape gradients, as when they've used a single non-persistent GradientTape. Since we don't need the backward function to take gradients with respect to side outputs, we can skip some potentially slow graph building. Args: inference_args: A flat list of Tensors, arguments to the inference function. input_tangents: A flat list of Tensors, jvps associated with . Returns: A tuple of (forward_function, backward_function): forward_function: Takes the same inputs as the inference function, but returns side outputs used by backward_function in addition to the inference function's outputs. backward_function: Takes side outputs from forward_function and gradients with respect to the \"real\" outputs of forward_function and returns gradients with respect to the inputs.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py",
    "ast_data": "FunctionDef name:_forward_and_backward_functions arg:self arg:inference_args arg:input_tangents arguments arg arg arg Assign Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "sharedmask",
    "source_code": "@property\ndef sharedmask(self):\n    return self._sharedmask",
    "docstring": "Share status of the mask (read-only).",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:sharedmask arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "pdf",
    "source_code": "def pdf(self, X, mean=None, rowcov=1, colcov=1):\n    return np.exp(self.logpdf(X, mean, rowcov, colcov))",
    "docstring": "Matrix normal probability density function. Parameters ---------- X : array_like Quantiles, with the last two axes of denoting the components. %(_matnorm_doc_default_callparams)s Returns ------- pdf : ndarray Probability density function evaluated at Notes ----- %(_matnorm_doc_callparams_note)s",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:pdf arg:self arg:X arg:mean arg:rowcov arg:colcov arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "lib_opts_if_msvc",
    "source_code": "def lib_opts_if_msvc(build_cmd):\n    if build_cmd.compiler.compiler_type != 'msvc':\n        return []\n    flags = ['/GL-']\n    if build_cmd.compiler_opt.cc_test_flags(['-d2VolatileMetadata-']):\n        flags.append('-d2VolatileMetadata-')\n    return flags",
    "docstring": "Add flags if we are using MSVC compiler We can't see in our scope, because we have not initialized the distutils build command, so use this deferred calculation to run when we are building the library.",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\msvccompiler.py",
    "ast_data": "FunctionDef name:lib_opts_if_msvc arg:build_cmd arguments arg If Compare Return return:no Assign If Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "__init__",
    "source_code": "@docfiller\ndef __init__(self, mat_stream, *args, **kwargs):\n    super().__init__(mat_stream, *args, **kwargs)\n    self._matrix_reader = None",
    "docstring": "Initialize matlab 4 file reader %(matstream_arg)s %(load_args)s",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\matlab\\_mio4.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:mat_stream arguments arg arg arg arg Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "cat_slice_cat",
    "source_code": "@register_lowering_pattern(CallFunction(aten.cat, [_cat_1, CallFunction(aten.slice, _cat_1, 1, 0, KeywordArg('size'))], 1))\ndef cat_slice_cat(match, cat_input, size, dim=1):\n    first, *rest = cat_input\n    if size >= 0 and V.graph.sizevars.statically_known_leq(size, first.get_size()[dim]):\n        return L[aten.cat]([first, *rest, L[aten.slice](first, dim, 0, size)], dim)\n    else:\n        tmp = L[aten.cat](cat_input, dim)\n        return L[aten.cat]([tmp, L[aten.slice](tmp, dim, 0, size)], dim)",
    "docstring": "This is an example of a more complex pattern where cat_1 is used multiple times inside the pattern. We fold 2 calls to cat into one. Matches: cat_1: f32[1024, 4077] = torch.ops.aten.cat.default([add_26, primals_217], 1) slice_1: f32[1024, 4077] = torch.ops.aten.slice.Tensor(cat_1, 0, 0, 9223372036854775807) slice_2: f32[1024, 19] = torch.ops.aten.slice.Tensor(slice_1, 1, 0, 19) cat_2: f32[1024, 4096] = torch.ops.aten.cat.default([cat_1, slice_2], 1) Rewrite to: slice_2 = torch.ops.aten.slice.Tensor(add_26, 1, 0, 19) cat_2 = torch.ops.aten.cat.default([add_26, primals_217, slice2], 1)",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\post_grad.py",
    "ast_data": "FunctionDef name:cat_slice_cat arg:match arg:cat_input arg:size arg:dim arguments arg arg arg arg Assign If BoolOp Compare Call Call Return return:yes Call Call Assign Call Return return:yes Call Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "remove",
    "source_code": "def remove(module, name):\n    for k, hook in module._forward_pre_hooks.items():\n        if isinstance(hook, BasePruningMethod) and hook._tensor_name == name:\n            hook.remove(module)\n            del module._forward_pre_hooks[k]\n            return module\n    raise ValueError(f\"Parameter '{name}' of module {module} has to be pruned before pruning can be removed\")",
    "docstring": "Remove the pruning reparameterization from a module and the pruning method from the forward hook. The pruned parameter named `` on which pruning will act. Examples: >>> m = random_unstructured(nn.Linear(5, 7), name='weight', amount=0.2) >>> m = remove(m, name='weight')",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\utils\\prune.py",
    "ast_data": "FunctionDef name:remove arg:module arg:name arguments arg arg For Call If BoolOp Call Compare Call Return return:yes Raise Call"
  },
  {
    "library": "pytorch",
    "name": "generate_constraints",
    "source_code": "def generate_constraints(self, counter=0):\n    graph = self.graph\n    all_constraints = []\n    for n in graph.nodes:\n        constraints, counter = self.generate_constraints_node(n, counter)\n        all_constraints += constraints\n    return (Conj(all_constraints), counter)",
    "docstring": "Iterate through every node and generate constraints Effect: self.constraints will be populated with the final constraints",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_generator.py",
    "ast_data": "FunctionDef name:generate_constraints arg:self arg:counter arguments arg arg Assign Assign For Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "transformed",
    "source_code": "def transformed(self, transform):\n    new_marker = MarkerStyle(self)\n    if new_marker._user_transform is not None:\n        new_marker._user_transform += transform\n    else:\n        new_marker._user_transform = transform\n    return new_marker",
    "docstring": "Return a new version of this marker with the transform applied. Parameters ---------- transform : Transform will be combined with current user supplied transform.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\markers.py",
    "ast_data": "FunctionDef name:transformed arg:self arg:transform arguments arg arg Assign Call If Compare Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "check_resolver",
    "source_code": "def check_resolver(resolver):\n    check_method = getattr(resolver, 'check', None)\n    if check_method is not None:\n        return check_method()\n    elif not hasattr(resolver, 'resolve'):\n        return get_warning_for_invalid_pattern(resolver)\n    else:\n        return []",
    "docstring": "Recursively check the resolver.",
    "type": "function",
    "file_path": "django\\django\\core\\checks\\urls.py",
    "ast_data": "FunctionDef name:check_resolver arg:resolver arguments arg Assign Call If Compare Return return:yes Call If Call Return return:yes Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "_measure_list_profile_column_widths",
    "source_code": "def _measure_list_profile_column_widths(self, profile_data):\n    num_columns = len(profile_data.column_names())\n    widths = [len(column_name) for column_name in profile_data.column_names()]\n    for row in range(profile_data.row_count()):\n        for col in range(num_columns):\n            widths[col] = max(widths[col], len(str(profile_data.row_values(row)[col])) + 2)\n    return widths",
    "docstring": "Determine the maximum column widths for each data list. Args: profile_data: list of ProfileDatum objects. Returns: List of column widths in the same order as columns in data.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\profile_analyzer_cli.py",
    "ast_data": "FunctionDef name:_measure_list_profile_column_widths arg:self arg:profile_data arguments arg arg Assign Call Call Assign Call Call For Call Call For Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "update_view",
    "source_code": "def update_view(self):\n    views = self.views[self.figure]()\n    if views is None:\n        return\n    pos = self.positions[self.figure]()\n    if pos is None:\n        return\n    home_views = self.home_views[self.figure]\n    all_axes = self.figure.get_axes()\n    for a in all_axes:\n        if a in views:\n            cur_view = views[a]\n        else:\n            cur_view = home_views[a]\n        a._set_view(cur_view)\n    if set(all_axes).issubset(pos):\n        for a in all_axes:\n            a._set_position(pos[a][0], 'original')\n            a._set_position(pos[a][1], 'active')\n    self.figure.canvas.draw_idle()",
    "docstring": "Update the view limits and position for each Axes from the current stack position. If any Axes are present in the figure that aren't in the current stack position, use the home view limits for those Axes and don't update *any* positions.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py",
    "ast_data": "FunctionDef name:update_view arg:self arguments arg Assign Call If Compare Return return:no Assign Call If Compare Return return:no Assign Assign Call For If Compare Assign Assign Call If Call Call For Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "Feature",
    "source_code": "@tf_export('autograph.experimental.Feature')\nclass Feature(enum.Enum):\n    ALL = 'ALL'\n    AUTO_CONTROL_DEPS = 'AUTO_CONTROL_DEPS'\n    ASSERT_STATEMENTS = 'ASSERT_STATEMENTS'\n    BUILTIN_FUNCTIONS = 'BUILTIN_FUNCTIONS'\n    EQUALITY_OPERATORS = 'EQUALITY_OPERATORS'\n    LISTS = 'LISTS'\n    NAME_SCOPES = 'NAME_SCOPES'\n\n    @classmethod\n    def all(cls):\n        return tuple(cls.__members__.values())\n\n    @classmethod\n    def all_but(cls, exclude):\n        if not isinstance(exclude, (list, tuple, set)):\n            exclude = (exclude,)\n        return tuple(set(cls.all()) - set(exclude) - {cls.ALL})",
    "docstring": "This enumeration represents optional conversion options. These conversion options are experimental. They are subject to change without notice and offer no guarantees. _Example Usage_ Attributes: ALL: Enable all features. AUTO_CONTROL_DEPS: Insert of control dependencies in the generated code. ASSERT_STATEMENTS: Convert Tensor-dependent assert statements to tf.Assert. BUILTIN_FUNCTIONS: Convert builtin functions applied to Tensors to their TF counterparts. EQUALITY_OPERATORS: Whether to convert the equality operator ('==') to tf.math.equal. LISTS: Convert list idioms, like initializers, slices, append, etc. NAME_SCOPES: Insert name scopes that name ops according to context, like the function they were defined in.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\core\\converter.py",
    "ast_data": "ClassDef name:Feature Assign Assign Assign Assign Assign Assign Assign FunctionDef name:all arg:cls arguments arg Return return:yes Call Call FunctionDef name:all_but arg:cls arg:exclude arguments arg arg If Call Assign Return return:yes Call Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "d",
    "source_code": "def d(self, p):\n    if p not in self._d:\n        est = _onenormest_matrix_power(self._A, p, self._ell)\n        self._d[p] = est ** (1.0 / p)\n    return self._scale * self._d[p]",
    "docstring": "Lazily estimate :math: where :math: is the 1-norm.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_expm_multiply.py",
    "ast_data": "FunctionDef name:d arg:self arg:p arguments arg arg If Compare Assign Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "unification_cast_sql",
    "source_code": "def unification_cast_sql(self, output_field):\n    return '%s'",
    "docstring": "Given a field instance, return the SQL that casts the result of a union to that type. The resulting string should contain a '%s' placeholder for the expression being cast.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:unification_cast_sql arg:self arg:output_field arguments arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "print_readable",
    "source_code": "def print_readable(self):\n    self.split_gm.print_readable()",
    "docstring": "Print the pipe in a human-readable format. This will print both the root pipe and each stage module.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\_IR.py",
    "ast_data": "FunctionDef name:print_readable arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "static_nrows",
    "source_code": "@property\ndef static_nrows(self):\n    if self._row_splits is not None:\n        nrows_plus_one = tensor_shape.dimension_value(self._row_splits.shape[0])\n        if nrows_plus_one is not None:\n            return nrows_plus_one - 1\n    if self._row_lengths is not None:\n        nrows = tensor_shape.dimension_value(self._row_lengths.shape[0])\n        if nrows is not None:\n            return nrows\n    if self._nrows is not None:\n        return tensor_util.constant_value(self._nrows)\n    return None",
    "docstring": "The number of rows in this partition, if statically known. Returns: The number of rows in this partition as an (if statically known); or (otherwise).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py",
    "ast_data": "FunctionDef name:static_nrows arg:self arguments arg If Compare Assign Call If Compare Return return:yes If Compare Assign Call If Compare Return return:yes If Compare Return return:yes Call Return return:no"
  },
  {
    "library": "numpy",
    "name": "cutdeg",
    "source_code": "def cutdeg(self, deg):\n    return self.truncate(deg + 1)",
    "docstring": "Truncate series to the given degree. Reduce the degree of the series to by discarding the high order terms. If is greater than the current degree a copy of the current series is returned. This can be useful in least squares where the coefficients of the high degree terms may be very small. Parameters ---------- deg : non-negative int The series is reduced to degree by discarding the high order terms. The value of must be a non-negative integer. Returns ------- new_series : series New instance of series with reduced degree.",
    "type": "method",
    "file_path": "numpy\\numpy\\polynomial\\_polybase.py",
    "ast_data": "FunctionDef name:cutdeg arg:self arg:deg arguments arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "bic",
    "source_code": "def bic(self, X):\n    return -2 * self.score(X) * X.shape[0] + self._n_parameters() * np.log(X.shape[0])",
    "docstring": "Bayesian information criterion for the current model on the input X. You can refer to this :ref: for more details regarding the formulation of the BIC used. For an example of GMM selection using information criterion, refer to :ref:. Parameters ---------- X : array of shape (n_samples, n_dimensions) The input samples. Returns ------- bic : float The lower the better.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\mixture\\_gaussian_mixture.py",
    "ast_data": "FunctionDef name:bic arg:self arg:X arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "cardinality",
    "source_code": "def cardinality(self):\n    return gen_dataset_ops.dataset_cardinality(self._variant_tensor)",
    "docstring": "Returns the cardinality of the dataset, if known. may return if the dataset contains an infinite number of elements or if the analysis fails to determine the number of elements in the dataset (e.g. when the dataset source is a file). >>> dataset = tf.data.Dataset.range(42) >>> print(dataset.cardinality().numpy()) 42 >>> dataset = dataset.repeat() >>> cardinality = dataset.cardinality() >>> print((cardinality == tf.data.INFINITE_CARDINALITY).numpy()) True >>> dataset = dataset.filter(lambda x: True) >>> cardinality = dataset.cardinality() >>> print((cardinality == tf.data.UNKNOWN_CARDINALITY).numpy()) True Returns: A scalar representing the cardinality of the dataset. If the cardinality is infinite or unknown, returns the named constants and respectively.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:cardinality arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "refine_node",
    "source_code": "def refine_node(self, n: Node):\n    if n.type is None:\n        n.type = Dyn\n    n.type = self.replace_dyn_with_fresh_var(n.type)\n    if n.op == 'call_function':\n        if n.target in _REFINEMENT_RULES:\n            self.constraints += _REFINEMENT_RULES[n.target](n)\n        else:\n            pass\n    if n.op == 'call_module':\n        module_instance = self.traced.get_submodule(n.target)\n        if type(module_instance) in _REFINEMENT_RULES:\n            self.constraints += _REFINEMENT_RULES[type(module_instance)](n)\n        else:\n            pass\n    if n.op == 'output':\n\n        def get_node_type(a):\n            return a.type\n        n.type = torch.fx.node.map_arg(n.args[0], get_node_type)\n        return n.type\n    else:\n        pass",
    "docstring": "Returns a list of equality constraints for call_module and call_function nodes. Models the relation between input and output dimensions using constraints in case they are both tensors. All operations used in resnet50 are defined.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\graph_gradual_typechecker.py",
    "ast_data": "FunctionDef name:refine_node arg:self arg:n arguments arg arg If Compare Assign Assign Call If Compare If Compare Call If Compare Assign Call If Compare Call Call Call If Compare FunctionDef name:get_node_type arg:a arguments arg Return return:yes Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "_check_radio_fields",
    "source_code": "def _check_radio_fields(self, obj):\n    if not isinstance(obj.radio_fields, dict):\n        return must_be('a dictionary', option='radio_fields', obj=obj, id='admin.E021')\n    else:\n        return list(chain.from_iterable((self._check_radio_fields_key(obj, field_name, 'radio_fields') + self._check_radio_fields_value(obj, val, 'radio_fields[\"%s\"]' % field_name) for field_name, val in obj.radio_fields.items())))",
    "docstring": "Check that is a dictionary.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\checks.py",
    "ast_data": "FunctionDef name:_check_radio_fields arg:self arg:obj arguments arg arg If Call Return return:yes Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "update_outer",
    "source_code": "def update_outer(self, values: dict[str, Any]) -> None:\n    if self._level == 0:\n        raise RuntimeError('Cannot update metrics outside of a MetricsContext')\n    if self._level == 1:\n        self.update(values)",
    "docstring": "Update, but only when at the outermost context.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\metrics_context.py",
    "ast_data": "FunctionDef name:update_outer arg:self arg:values arguments arg arg If Compare Raise Call If Compare Call"
  },
  {
    "library": "matplotlib",
    "name": "set_path_effects",
    "source_code": "def set_path_effects(self, path_effects):\n    self._path_effects = path_effects\n    self.stale = True",
    "docstring": "Set the path effects. Parameters ---------- path_effects : list of",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:set_path_effects arg:self arg:path_effects arguments arg arg Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_create_or_get_tensor_history_values_cache",
    "source_code": "def _create_or_get_tensor_history_values_cache(self, cache_name, graph, shape=None, dtype=dtypes.float32):\n    if graph is None:\n        raise ValueError('Invalid graph.')\n    if graph not in self._history_value_cache:\n        self._history_value_cache[graph] = {}\n    if cache_name not in self._history_value_cache[graph]:\n        if shape is None:\n            raise ValueError('shape must be provided at cache creation.')\n        if dtype.is_integer:\n            init_val = int(_COMPACT_TRACE_ENTRY_INIT_VALUE)\n        else:\n            init_val = _COMPACT_TRACE_ENTRY_INIT_VALUE\n        with graph.as_default() as g, g.name_scope(None):\n            self._history_value_cache[graph][cache_name] = variable_scope.get_variable('tt_history' + '_' + self._escape_namescopes(cache_name), shape=shape, dtype=dtype, initializer=init_ops.constant_initializer(init_val), trainable=False, use_resource=True, collections=[_TENSOR_TRACER_STORAGE, ops.GraphKeys.LOCAL_VARIABLES])\n    return self._history_value_cache[graph][cache_name]",
    "docstring": "Creates a variable as the cache to store historic intermediate tensor values. Args: cache_name: Name to be given to the cache (an instance of tf.variable). graph: Tensorflow graph. shape: A list of dimensions. dtype: Data type of created cache. Returns: A ref to newly created or existing cache with the given dimensions. Raises: ValueError: (1) If graph is None, or (2) shape is None when a new cache needs to be created.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:_create_or_get_tensor_history_values_cache arg:self arg:cache_name arg:graph arg:shape arg:dtype arguments arg arg arg arg arg If Compare Raise Call If Compare Assign If Compare If Compare Raise Call If Assign Call Assign With Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_meta",
    "source_code": "def get_meta(self):\n    if self.model:\n        return self.model._meta",
    "docstring": "Return the Options instance (the model._meta) from which to start processing. Normally, this is self.model._meta, but it can be changed by subclasses.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\query.py",
    "ast_data": "FunctionDef name:get_meta arg:self arguments arg If Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_combine_lines",
    "source_code": "def _combine_lines(self, lines) -> str:\n    return f'[{','.join([line for line in (line.strip() for line in lines) if line])}]'",
    "docstring": "Combines a list of JSON objects into one JSON object.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\json\\_json.py",
    "ast_data": "FunctionDef name:_combine_lines arg:self arg:lines arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_create_dense_column_weighted_sum",
    "source_code": "def _create_dense_column_weighted_sum(column, builder, units, weight_collections, trainable, weight_var=None):\n    tensor = column._get_dense_tensor(builder, weight_collections=weight_collections, trainable=trainable)\n    num_elements = column._variable_shape.num_elements()\n    batch_size = array_ops.shape(tensor)[0]\n    tensor = array_ops.reshape(tensor, shape=(batch_size, num_elements))\n    if weight_var is not None:\n        weight = weight_var\n    else:\n        weight = variable_scope.get_variable(name='weights', shape=[num_elements, units], initializer=init_ops.zeros_initializer(), trainable=trainable, collections=weight_collections)\n    return math_ops.matmul(tensor, weight, name='weighted_sum')",
    "docstring": "Create a weighted sum of a dense column for linear_model.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py",
    "ast_data": "FunctionDef name:_create_dense_column_weighted_sum arg:column arg:builder arg:units arg:weight_collections arg:trainable arg:weight_var arguments arg arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call If Compare Assign Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "save_for_backward",
    "source_code": "def save_for_backward(self, *args: Any) -> None:\n    self.to_save = tuple(_iter_tensors(args))\n    self._to_save_nested = args",
    "docstring": "See :meth:.",
    "type": "method",
    "file_path": "pytorch\\torch\\autograd\\function.py",
    "ast_data": "FunctionDef name:save_for_backward arg:self arguments arg arg Assign Call Call Assign"
  },
  {
    "library": "django",
    "name": "datetimes",
    "source_code": "def datetimes(self, field_name, kind, order='ASC', tzinfo=None):\n    if kind not in ('year', 'month', 'week', 'day', 'hour', 'minute', 'second'):\n        raise ValueError(\"'kind' must be one of 'year', 'month', 'week', 'day', 'hour', 'minute', or 'second'.\")\n    if order not in ('ASC', 'DESC'):\n        raise ValueError(\"'order' must be either 'ASC' or 'DESC'.\")\n    if settings.USE_TZ:\n        if tzinfo is None:\n            tzinfo = timezone.get_current_timezone()\n    else:\n        tzinfo = None\n    return self.annotate(datetimefield=Trunc(field_name, kind, output_field=DateTimeField(), tzinfo=tzinfo), plain_field=F(field_name)).values_list('datetimefield', flat=True).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datetimefield')",
    "docstring": "Return a list of datetime objects representing all available datetimes for the given field_name, scoped to 'kind'.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:datetimes arg:self arg:field_name arg:kind arg:order arg:tzinfo arguments arg arg arg arg arg If Compare Raise Call If Compare Raise Call If If Compare Assign Call Assign Return return:yes Call Call Call Call Call Call Call Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "reset_tf_configure_bazelrc",
    "source_code": "def reset_tf_configure_bazelrc():\n    open(_TF_BAZELRC, 'w').close()",
    "docstring": "Reset file that contains customized config settings.",
    "type": "function",
    "file_path": "tensorflow\\configure.py",
    "ast_data": "FunctionDef name:reset_tf_configure_bazelrc arguments Call Call"
  },
  {
    "library": "tensorflow",
    "name": "MeanSquaredLogarithmicError",
    "source_code": "class MeanSquaredLogarithmicError(MeanMetricWrapper):\n\n    def __init__(self, name='mean_squared_logarithmic_error', dtype=None):\n        super(MeanSquaredLogarithmicError, self).__init__(mean_squared_logarithmic_error, name, dtype=dtype)",
    "docstring": "Computes the mean squared logarithmic error between and . Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.MeanSquaredLogarithmicError() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.result().numpy() 0.12011322 >>> m.reset_state() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]], ... sample_weight=[1, 0]) >>> m.result().numpy() 0.24022643 Usage with API:",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py",
    "ast_data": "ClassDef name:MeanSquaredLogarithmicError FunctionDef name:__init__ arg:self arg:name arg:dtype arguments arg arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_generate_placeholder_string",
    "source_code": "def _generate_placeholder_string(x, default_placeholder='{}'):\n    placeholder = default_placeholder\n    rng = random.Random(5)\n    while placeholder in x:\n        placeholder = placeholder + str(rng.randint(0, 9))\n    return placeholder",
    "docstring": "Generate and return a string that does not appear in .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\logging_ops.py",
    "ast_data": "FunctionDef name:_generate_placeholder_string arg:x arg:default_placeholder arguments arg arg Assign Assign Call While Compare Assign Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_set_deprecated",
    "source_code": "def _set_deprecated(self, value, *, new_key, deprecated_key, warning_message):\n    self.__dict__['_deprecated_key_to_warnings'][deprecated_key] = warning_message\n    self[new_key] = self[deprecated_key] = value",
    "docstring": "Set key in dictionary to be deprecated with its warning message.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_bunch.py",
    "ast_data": "FunctionDef name:_set_deprecated arg:self arg:value arguments arg arg arg arg arg Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "get_on_write_saveable",
    "source_code": "def get_on_write_saveable(var, primary_var, name):\n\n    def tensor():\n        if context.executing_eagerly() and (not primary_var.is_initialized()):\n            return None\n        strategy = var.distribute_strategy\n        return strategy.extended.read_var(var)\n    spec = saveable_object.SaveSpec(tensor=tensor, slice_spec='', name=name, dtype=var.dtype, device=primary_var.device)\n    return (tensor, [spec])",
    "docstring": "Return saveable spec for AUTO and ON_WRITE variables.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values_util.py",
    "ast_data": "FunctionDef name:get_on_write_saveable arg:var arg:primary_var arg:name arguments arg arg arg FunctionDef name:tensor arguments If BoolOp Call Call Return return:no Assign Return return:yes Call Assign Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "docname_to_domain",
    "source_code": "def docname_to_domain(docname: str, compaction: bool | str) -> str:\n    if isinstance(compaction, str):\n        return compaction\n    if compaction:\n        return docname.split(SEP, 1)[0]\n    else:\n        return docname",
    "docstring": "Convert docname to domain for catalogs.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\i18n.py",
    "ast_data": "FunctionDef name:docname_to_domain arg:docname arg:compaction arguments arg arg If Call Return return:yes If Return return:yes Call Return return:yes"
  },
  {
    "library": "django",
    "name": "errors",
    "source_code": "@property\ndef errors(self):\n    if self._errors is None:\n        self.full_clean()\n    return self._errors",
    "docstring": "Return a list of form.errors for every form in self.forms.",
    "type": "method",
    "file_path": "django\\django\\forms\\formsets.py",
    "ast_data": "FunctionDef name:errors arg:self arguments arg If Compare Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "reset_peak_memory_stats",
    "source_code": "def reset_peak_memory_stats(device: 'Device'=None) -> None:\n    device = _get_device_index(device, optional=True)\n    return torch._C._cuda_resetPeakMemoryStats(device)",
    "docstring": "Reset the \"peak\" stats tracked by the CUDA memory allocator. See :func: for details. Peak stats correspond to the key in each individual stat dict. Args: device (torch.device or int, optional): selected device. Returns statistic for the current device, given by :func:, if :attr: is `cuda-memory-management` for more details about GPU memory management.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\memory.py",
    "ast_data": "FunctionDef name:reset_peak_memory_stats arg:device arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "virtualenv",
    "name": "cli_run",
    "source_code": "def cli_run(args, options=None, setup_logging=True, env=None):\n    env = os.environ if env is None else env\n    of_session = session_via_cli(args, options, setup_logging, env)\n    with of_session:\n        of_session.run()\n    return of_session",
    "docstring": "Create a virtual environment given some command line interface arguments. :param args: the command line arguments :param options: passing in a `` to use handlers already registered :param env: environment variables to use :return: the session object of the creation (its structure for now is experimental and might change on short notice)",
    "type": "function",
    "file_path": "virtualenv\\src\\virtualenv\\run\\__init__.py",
    "ast_data": "FunctionDef name:cli_run arg:args arg:options arg:setup_logging arg:env arguments arg arg arg arg Assign Compare Assign Call With Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "BadStringValue",
    "source_code": "class BadStringValue(ArffException):\n    message = 'Invalid string value at line %d.'",
    "docstring": "Error raise when a string contains space but is not quoted.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\externals\\_arff.py",
    "ast_data": "ClassDef name:BadStringValue Assign"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X):\n    check_is_fitted(self)\n    X = validate_data(self, X, copy=True, dtype=[np.float64, np.float32], reset=False)\n    if (X <= -self.skewedness).any():\n        raise ValueError('X may not contain entries smaller than -skewedness.')\n    X += self.skewedness\n    np.log(X, X)\n    projection = safe_sparse_dot(X, self.random_weights_)\n    projection += self.random_offset_\n    np.cos(projection, projection)\n    projection *= np.sqrt(2.0) / np.sqrt(self.n_components)\n    return projection",
    "docstring": "Apply the approximate feature map to X. Parameters ---------- X : array-like, shape (n_samples, n_features) New data, where is the number of samples and is the number of features. All values of X must be strictly greater than \"-skewedness\". Returns ------- X_new : array-like, shape (n_samples, n_components) Returns the instance itself.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\kernel_approximation.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Call Assign Call If Call Compare Raise Call Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_sequence_dense_tensor",
    "source_code": "@abc.abstractmethod\ndef get_sequence_dense_tensor(self, transformation_cache, state_manager):\n    pass",
    "docstring": "Returns a . Args: transformation_cache: A object to access features. state_manager: A to create / access resources such as lookup tables.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:get_sequence_dense_tensor arg:self arg:transformation_cache arg:state_manager arguments arg arg arg"
  },
  {
    "library": "scrapy",
    "name": "OriginWhenCrossOriginPolicy",
    "source_code": "class OriginWhenCrossOriginPolicy(ReferrerPolicy):\n    name: str = POLICY_ORIGIN_WHEN_CROSS_ORIGIN\n\n    def referrer(self, response_url: str, request_url: str) -> str | None:\n        origin = self.origin(response_url)\n        if origin == self.origin(request_url):\n            return self.stripped_referrer(response_url)\n        return origin",
    "docstring": "The \"origin-when-cross-origin\" policy specifies that a full URL, stripped for use as a referrer, is sent as referrer information when making same-origin requests from a particular request client, and only the ASCII serialization of the origin of the request client is sent as referrer information when making cross-origin requests from a particular request client.",
    "type": "class",
    "file_path": "scrapy\\scrapy\\spidermiddlewares\\referer.py",
    "ast_data": "ClassDef name:OriginWhenCrossOriginPolicy FunctionDef name:referrer arg:self arg:response_url arg:request_url arguments arg arg arg Assign Call If Compare Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, tensor_size, index_tuple, res, input_var):\n    assert isinstance(res, TVar)\n    self.res = res\n    self.tensor_size = tensor_size\n    self.index_tuple = index_tuple\n    self.input_var = input_var",
    "docstring": "Constraint for getting item given a tensor size However, when the argument is a tuple, we will expect a tensor :param tensor_size: actual number representing the rank :param index_tuple: tuple for indexing :param res: tensor variable to carry the item we get :param input_var: a tensor variable from which we will get item",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:tensor_size arg:index_tuple arg:res arg:input_var arguments arg arg arg arg arg Call Assign Assign Assign Assign"
  },
  {
    "library": "kornia",
    "name": "quaternion_from_euler",
    "source_code": "def quaternion_from_euler(roll: Tensor, pitch: Tensor, yaw: Tensor) -> tuple[Tensor, Tensor, Tensor, Tensor]:\n    KORNIA_CHECK(roll.shape == pitch.shape)\n    KORNIA_CHECK(pitch.shape == yaw.shape)\n    roll_half = roll * 0.5\n    pitch_half = pitch * 0.5\n    yaw_half = yaw * 0.5\n    cy = yaw_half.cos()\n    sy = yaw_half.sin()\n    cp = pitch_half.cos()\n    sp = pitch_half.sin()\n    cr = roll_half.cos()\n    sr = roll_half.sin()\n    qw = cy * cp * cr + sy * sp * sr\n    qx = cy * cp * sr - sy * sp * cr\n    qy = sy * cp * sr + cy * sp * cr\n    qz = sy * cp * cr - cy * sp * sr\n    return (qw, qx, qy, qz)",
    "docstring": "Convert Euler angles to quaternion coefficients. Euler angles are assumed to be in radians in XYZ convention. Args: roll: the roll euler angle. pitch: the pitch euler angle. yaw: the yaw euler angle. Return: A tuple with quaternion coefficients in order of .",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\conversions.py",
    "ast_data": "FunctionDef name:quaternion_from_euler arg:roll arg:pitch arg:yaw arguments arg arg arg Call Compare Call Compare Assign Assign Assign Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "stack_meta",
    "source_code": "@property\n@abc.abstractmethod\ndef stack_meta(self) -> _ModuleStackMeta:\n    ...",
    "docstring": "The module stack meta data associated with this node.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\modularization.py",
    "ast_data": "FunctionDef name:stack_meta arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "_enable_get_next_as_optional",
    "source_code": "def _enable_get_next_as_optional(strategy, dataset, cardinality):\n    if not getattr(strategy.extended, 'enable_partial_batch_handling', getattr(strategy.extended, 'experimental_enable_get_next_as_optional', False)):\n        return False\n    if cardinality == cardinality_lib.INFINITE:\n        return False\n    return not _is_statically_shaped(dataset.element_spec) or strategy.extended._in_multi_worker_mode()",
    "docstring": "Returns whether to enable using partial batch handling.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py",
    "ast_data": "FunctionDef name:_enable_get_next_as_optional arg:strategy arg:dataset arg:cardinality arguments arg arg arg If Call Call Return return:yes If Compare Return return:yes Return return:yes BoolOp Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_count_total_params",
    "source_code": "def _count_total_params(reader, count_exclude_pattern=''):\n    var_to_shape_map = reader.get_variable_to_shape_map()\n    if count_exclude_pattern:\n        regex_pattern = re.compile(count_exclude_pattern)\n        new_var_to_shape_map = {}\n        exclude_num_tensors = 0\n        exclude_num_params = 0\n        for v in var_to_shape_map:\n            if regex_pattern.search(v):\n                exclude_num_tensors += 1\n                exclude_num_params += np.prod(var_to_shape_map[v])\n            else:\n                new_var_to_shape_map[v] = var_to_shape_map[v]\n        var_to_shape_map = new_var_to_shape_map\n        print('# Excluding %d tensors (%d params) that match %s when counting.' % (exclude_num_tensors, exclude_num_params, count_exclude_pattern))\n    var_sizes = [np.prod(var_to_shape_map[v]) for v in var_to_shape_map]\n    return np.sum(var_sizes, dtype=int)",
    "docstring": "Count total number of variables.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\inspect_checkpoint.py",
    "ast_data": "FunctionDef name:_count_total_params arg:reader arg:count_exclude_pattern arguments arg arg Assign Call If Assign Call Assign Assign Assign For If Call Call Assign Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "set_deterministic",
    "source_code": "def set_deterministic() -> None:\n    torch.manual_seed(MAIN_RANDOM_SEED)\n    random.seed(MAIN_RANDOM_SEED)\n    numpy.random.seed(MAIN_RANDOM_SEED)\n    torch.use_deterministic_algorithms(True)",
    "docstring": "Make torch manual seed deterministic.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\numeric_utils.py",
    "ast_data": "FunctionDef name:set_deterministic arguments Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "extract_object_files",
    "source_code": "def extract_object_files(archive_file: io.BufferedIOBase, dest_dir: str) -> None:\n    if not os.path.exists(dest_dir):\n        os.makedirs(dest_dir)\n    _check_archive_signature(archive_file)\n    extracted_files = dict()\n    for name, file_content in _extract_next_file(archive_file):\n        digest = hashlib.md5(file_content).digest()\n        for final_name in _generate_modified_filenames(name):\n            if final_name not in extracted_files:\n                extracted_files[final_name] = digest\n                with open(os.path.join(dest_dir, final_name), 'wb') as object_file:\n                    object_file.write(file_content)\n                break\n            elif extracted_files[final_name] == digest:\n                break",
    "docstring": "Extracts object files from the archive path to the destination directory. Extracts object files from the given BSD variant archive file. The extracted files are written to the destination directory, which will be created if the directory does not exist. Colliding object file names are automatically renamed upon extraction in order to avoid unintended overwriting. Args: archive_file: The archive file object pointing at its beginning. dest_dir: The destination directory path in which the extracted object files will be written. The directory will be created if it does not exist.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\ios\\extract_object_files.py",
    "ast_data": "FunctionDef name:extract_object_files arg:archive_file arg:dest_dir arguments arg arg If Call Call Call Assign Call For Call Assign Call Call For Call If Compare Assign With Call Call Call If Compare"
  },
  {
    "library": "matplotlib",
    "name": "set_data",
    "source_code": "def set_data(self, *, x=None, y=None, dx=None, dy=None, width=None, head_width=None, head_length=None):\n    if x is not None:\n        self._x = x\n    if y is not None:\n        self._y = y\n    if dx is not None:\n        self._dx = dx\n    if dy is not None:\n        self._dy = dy\n    if width is not None:\n        self._width = width\n    if head_width is not None:\n        self._head_width = head_width\n    if head_length is not None:\n        self._head_length = head_length\n    self._make_verts()\n    self.set_xy(self.verts)",
    "docstring": "Set x, y, dx, dy, width, head_with, and head_length. Values left as None will not be updated. Parameters ---------- x, y : float or None, default: None The x and y coordinates of the arrow base. dx, dy : float or None, default: None The length of the arrow along x and y direction. width : float or None, default: None Width of full arrow tail. head_width : float or None, default: None Total width of the full arrow head. head_length : float or None, default: None Length of arrow head.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:set_data arg:self arguments arg arg arg arg arg arg arg arg If Compare Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign Call Call"
  },
  {
    "library": "numpy",
    "name": "_scalar_type_key",
    "source_code": "def _scalar_type_key(typ):\n    dt = dtype(typ)\n    return (dt.kind.lower(), dt.itemsize)",
    "docstring": "A `sorted`.",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\numerictypes.py",
    "ast_data": "FunctionDef name:_scalar_type_key arg:typ arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "report",
    "source_code": "@cherrypy.expose\ndef report(self, filename):\n    cherrypy.response.headers['Content-Type'] = 'text/plain'\n    return self.stats(filename)",
    "docstring": "Render a statistics report.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\profiler.py",
    "ast_data": "FunctionDef name:report arg:self arg:filename arguments arg arg Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_unique_variable_scope",
    "source_code": "def _get_unique_variable_scope(prefix):\n    var_scope_store = get_variable_scope_store()\n    current_scope = get_variable_scope()\n    name = current_scope.name + '/' + prefix if current_scope.name else prefix\n    if var_scope_store.variable_scope_count(name) == 0:\n        return prefix\n    idx = 1\n    while var_scope_store.variable_scope_count(name + '_%d' % idx) > 0:\n        idx += 1\n    return prefix + '_%d' % idx",
    "docstring": "Get a name with the given prefix unique in the current variable scope.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variable_scope.py",
    "ast_data": "FunctionDef name:_get_unique_variable_scope arg:prefix arguments arg Assign Call Assign Call Assign If Compare Call Return return:yes Assign While Compare Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_from_combined",
    "source_code": "def _from_combined(self, combined: np.ndarray) -> IntervalArray:\n    nc = combined.view('i8').reshape(-1, 2)\n    dtype = self._left.dtype\n    if needs_i8_conversion(dtype):\n        assert isinstance(self._left, (DatetimeArray, TimedeltaArray))\n        new_left: DatetimeArray | TimedeltaArray | np.ndarray = type(self._left)._from_sequence(nc[:, 0], dtype=dtype)\n        assert isinstance(self._right, (DatetimeArray, TimedeltaArray))\n        new_right: DatetimeArray | TimedeltaArray | np.ndarray = type(self._right)._from_sequence(nc[:, 1], dtype=dtype)\n    else:\n        assert isinstance(dtype, np.dtype)\n        new_left = nc[:, 0].view(dtype)\n        new_right = nc[:, 1].view(dtype)\n    return self._shallow_copy(left=new_left, right=new_right)",
    "docstring": "Create a new IntervalArray with our dtype from a 1D complex128 ndarray.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\interval.py",
    "ast_data": "FunctionDef name:_from_combined arg:self arg:combined arguments arg arg Assign Call Call Assign If Call Call Call Call Call Call Call Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_tensor_by_name",
    "source_code": "def get_tensor_by_name(self, name) -> tensor_lib.Tensor:\n    if not isinstance(name, str):\n        raise TypeError('Tensor names are strings (or similar), not %s.' % type(name).__name__)\n    tensor = cast(tensor_lib.Tensor, self.as_graph_element(name, allow_tensor=True, allow_operation=False))\n    return tensor",
    "docstring": "Returns the with the given . This method may be called concurrently from multiple threads. Args: name: The name of the to return. Returns: The with the given . Raises: TypeError: If is not a string. KeyError: If does not correspond to a tensor in this graph.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:get_tensor_by_name arg:self arg:name arguments arg arg If Call Raise Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_add_replica_id_to_graph",
    "source_code": "def _add_replica_id_to_graph(self):\n    if self._tt_config.num_replicas:\n        with ops.control_dependencies(None):\n            self._replica_id = tpu_ops.tpu_replicated_input(list(range(self._tt_config.num_replicas)), name='tt_replica_id')\n    else:\n        self._replica_id = 'unknown'",
    "docstring": "Adds nodes for computing the replica ID to the graph.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:_add_replica_id_to_graph arg:self arguments arg If With Call Assign Call Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "reset_metrics",
    "source_code": "def reset_metrics(self):\n    for m in self.metrics:\n        m.reset_state()",
    "docstring": "Resets the state of all the metrics in the model. Examples: >>> inputs = tf.keras.layers.Input(shape=(3,)) >>> outputs = tf.keras.layers.Dense(2)(inputs) >>> model = tf.keras.models.Model(inputs=inputs, outputs=outputs) >>> model.compile(optimizer=\"Adam\", loss=\"mse\", metrics=[\"mae\"]) >>> x = np.random.random((2, 3)) >>> y = np.random.randint(0, 2, (2, 2)) >>> _ = model.fit(x, y, verbose=0) >>> assert all(float(m.result()) for m in model.metrics) >>> model.reset_metrics() >>> assert all(float(m.result()) == 0 for m in model.metrics)",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py",
    "ast_data": "FunctionDef name:reset_metrics arg:self arguments arg For Call"
  },
  {
    "library": "scikit-learn",
    "name": "_iterate_columns",
    "source_code": "def _iterate_columns(X, columns=None):\n    if columns is None:\n        columns = range(X.shape[1])\n    if issparse(X):\n        for i in columns:\n            x = np.zeros(X.shape[0])\n            start_ptr, end_ptr = (X.indptr[i], X.indptr[i + 1])\n            x[X.indices[start_ptr:end_ptr]] = X.data[start_ptr:end_ptr]\n            yield x\n    else:\n        for i in columns:\n            yield X[:, i]",
    "docstring": "Iterate over columns of a matrix. Parameters ---------- X : ndarray or csc_matrix, shape (n_samples, n_features) Matrix over which to iterate. columns : iterable or None, default=None Indices of columns to iterate over. If None, iterate over all columns. Yields ------ x : ndarray, shape (n_samples,) Columns of in dense format.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\feature_selection\\_mutual_info.py",
    "ast_data": "FunctionDef name:_iterate_columns arg:X arg:columns arguments arg arg If Compare Assign Call If Call For Assign Call Assign Assign For"
  },
  {
    "library": "tensorflow",
    "name": "_process_assign",
    "source_code": "def _process_assign(self, node: ast.Assign) -> None:\n    if isinstance(node.value, ast.Call) and self._is_export_call(node.value.func):\n        if len(node.targets) != 1:\n            raise BadExportError(f'{self._current_file}:{node.lineno} export must be assigned to a single value: {ast.dump(node)}')\n        symbol = self._name(node.targets[0])\n        if not symbol:\n            raise BadExportError(f'{self._current_file}:{node.lineno} export must be assigned to a single value: {ast.dump(node)}')\n        self._add_exported_symbol(node.value.func, symbol)\n    else:\n        self.visit(node)",
    "docstring": "Process top-level assign for potential symbol export.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\api\\generator2\\extractor\\extractor.py",
    "ast_data": "FunctionDef name:_process_assign arg:self arg:node arguments arg arg If BoolOp Call Call If Compare Call Raise Call Call Assign Call If Raise Call Call Call Call"
  },
  {
    "library": "cherrypy",
    "name": "validate_translator",
    "source_code": "def validate_translator(t):\n    if not isinstance(t, dict):\n        raise ValueError('The translate argument must be a dict.')",
    "docstring": "Ensure the translator is of the correct length and size.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\_cpdispatch.py",
    "ast_data": "FunctionDef name:validate_translator arg:t arguments arg If Call Raise Call"
  },
  {
    "library": "scipy",
    "name": "VertexCacheBase",
    "source_code": "class VertexCacheBase:\n\n    def __init__(self):\n        self.cache = collections.OrderedDict()\n        self.nfev = 0\n        self.index = -1\n\n    def __iter__(self):\n        for v in self.cache:\n            yield self.cache[v]\n        return\n\n    def size(self):\n        return self.index + 1\n\n    def print_out(self):\n        headlen = len(f'Vertex cache of size: {len(self.cache)}:')\n        print('=' * headlen)\n        print(f'Vertex cache of size: {len(self.cache)}:')\n        print('=' * headlen)\n        for v in self.cache:\n            self.cache[v].print_out()",
    "docstring": "Base class for a vertex cache for a simplicial complex.",
    "type": "class",
    "file_path": "scipy\\scipy\\optimize\\_shgo_lib\\_vertex.py",
    "ast_data": "ClassDef name:VertexCacheBase FunctionDef name:__init__ arg:self arguments arg Assign Call Assign Assign FunctionDef name:__iter__ arg:self arguments arg For Return return:no FunctionDef name:size arg:self arguments arg Return return:yes FunctionDef name:print_out arg:self arguments arg Assign Call Call Call Call Call Call For Call"
  },
  {
    "library": "pytorch",
    "name": "post_hook",
    "source_code": "def post_hook(self, is_last_joiner: bool):\n    self.ddp._sync_final_model(is_last_joiner)",
    "docstring": "Sync the final model to ensure that the model is the same across all processes.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\parallel\\distributed.py",
    "ast_data": "FunctionDef name:post_hook arg:self arg:is_last_joiner arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_deep_tuple",
    "source_code": "def _deep_tuple(self, x):\n    return tuple(map(self._deep_tuple, x)) if isinstance(x, (list, tuple)) else x",
    "docstring": "Converts lists of lists to tuples of tuples.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bijector_impl.py",
    "ast_data": "FunctionDef name:_deep_tuple arg:self arg:x arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_flat_shapes",
    "source_code": "@property\ndef _flat_shapes(self):\n    return structure.get_flat_tensor_shapes(self.element_spec)",
    "docstring": "Returns a list s for the element tensor representation. Returns: A list s for the element tensor representation.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:_flat_shapes arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "ordering",
    "source_code": "def ordering(signatures):\n    signatures = list(map(tuple, signatures))\n    edges = [(a, b) for a in signatures for b in signatures if edge(a, b)]\n    edges = groupby(operator.itemgetter(0), edges)\n    for s in signatures:\n        if s not in edges:\n            edges[s] = []\n    edges = {k: [b for a, b in v] for k, v in edges.items()}\n    return _toposort(edges)",
    "docstring": "A sane ordering of signatures to check, first to last Topological sort of edges as given by ``",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\unification\\multipledispatch\\conflict.py",
    "ast_data": "FunctionDef name:ordering arg:signatures arguments arg Assign Call Call Assign Call Assign Call Call For If Compare Assign Assign Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "is_parallel_allowed",
    "source_code": "def is_parallel_allowed(self, typ: str) -> bool:\n    if typ == 'read':\n        attrname = 'parallel_read_safe'\n        message_not_declared = __(\"the %s extension does not declare if it is safe for parallel reading, assuming it isn't - please ask the extension author to check and make it explicit\")\n        message_not_safe = __('the %s extension is not safe for parallel reading')\n    elif typ == 'write':\n        attrname = 'parallel_write_safe'\n        message_not_declared = __(\"the %s extension does not declare if it is safe for parallel writing, assuming it isn't - please ask the extension author to check and make it explicit\")\n        message_not_safe = __('the %s extension is not safe for parallel writing')\n    else:\n        raise ValueError('parallel type %s is not supported' % typ)\n    for ext in self.extensions.values():\n        allowed = getattr(ext, attrname, None)\n        if allowed is None:\n            logger.warning(message_not_declared, ext.name)\n            logger.warning(__('doing serial %s'), typ)\n            return False\n        elif not allowed:\n            logger.warning(message_not_safe, ext.name)\n            logger.warning(__('doing serial %s'), typ)\n            return False\n    return True",
    "docstring": "Check whether parallel processing is allowed or not. :param typ: A type of processing; ``.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\application.py",
    "ast_data": "FunctionDef name:is_parallel_allowed arg:self arg:typ arguments arg arg If Compare Assign Assign Call Assign Call If Compare Assign Assign Call Assign Call Raise Call For Call Assign Call If Compare Call Call Call Return return:yes If Call Call Call Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "__init__",
    "source_code": "def __init__(self, verbose_name=None, dim=2, geography=False, *, extent=(-180.0, -90.0, 180.0, 90.0), tolerance=0.05, **kwargs):\n    self.dim = dim\n    self.geography = geography\n    self._extent = extent\n    self._tolerance = tolerance\n    super().__init__(verbose_name=verbose_name, **kwargs)",
    "docstring": "The initialization function for geometry fields. In addition to the parameters from BaseSpatialField, it takes the following as keyword arguments: dim: The number of dimensions for this geometry. Defaults to 2. extent: Customize the extent, in a 4-tuple of WGS 84 coordinates, for the geometry field entry in the table. Defaults to (-180.0, -90.0, 180.0, 90.0). tolerance: Define the tolerance, in meters, to use for the geometry field entry in the table. Defaults to 0.05.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\models\\fields.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:verbose_name arg:dim arg:geography arguments arg arg arg arg arg arg arg Assign Assign Assign Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "activation_is_int8_quantized",
    "source_code": "def activation_is_int8_quantized(qconfig):\n    return activation_dtype(qconfig) in [torch.quint8, torch.qint8, torch.uint8, torch.int8]",
    "docstring": "Given a qconfig, decide if the activation needs to be quantized to int8 or not, this includes quantizing to quint8, qint8",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\utils.py",
    "ast_data": "FunctionDef name:activation_is_int8_quantized arg:qconfig arguments arg Return return:yes Compare Call"
  },
  {
    "library": "django",
    "name": "identifier_converter",
    "source_code": "def identifier_converter(self, name):\n    return name.lower()",
    "docstring": "Identifier comparison is case insensitive under Oracle.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\oracle\\introspection.py",
    "ast_data": "FunctionDef name:identifier_converter arg:self arg:name arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "context",
    "source_code": "class context:\n\n    def __enter__(self):\n        self.autograd_context = _new_context()\n        return self.autograd_context._context_id()\n\n    def __exit__(self, type, value, traceback):\n        _release_context(self.autograd_context._context_id())",
    "docstring": "Context object to wrap forward and backward passes when using distributed autograd. The ``, which is required to correctly execute a distributed autograd pass. Example:: >>> # xdoctest: +SKIP >>> import torch.distributed.autograd as dist_autograd >>> with dist_autograd.context() as context_id: >>> t1 = torch.rand((3, 3), requires_grad=True) >>> t2 = torch.rand((3, 3), requires_grad=True) >>> loss = rpc.rpc_sync(\"worker1\", torch.add, args=(t1, t2)).sum() >>> dist_autograd.backward(context_id, [loss])",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\autograd\\__init__.py",
    "ast_data": "ClassDef name:context FunctionDef name:__enter__ arg:self arguments arg Assign Call Return return:yes Call FunctionDef name:__exit__ arg:self arg:type arg:value arg:traceback arguments arg arg arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, req, config, section):\n    self.req = req\n    self.exclude = None\n    self.include = None\n    self.range = [None, None]\n    self.config = config\n    self._req_type = ''\n    self._section = section\n    self._initialized = None\n    self._error_message = []\n    self.parse_single_req()",
    "docstring": "Initializes a version or dependency requirement object. Args: req: List that contains individual supported versions or a single string that contains definition. e.g. [] e.g. [, , ] config: String that is the configuration name. e.g. section: String that is the section name from the config file under which the requirement is defined. e.g. , , ,",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\tools\\tensorflow_builder\\compat_checker\\compat_checker.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:req arg:config arg:section arguments arg arg arg arg Assign Assign Assign Assign Assign Assign Assign Assign Assign Call"
  },
  {
    "library": "scipy",
    "name": "read_header",
    "source_code": "def read_header(self):\n    pass",
    "docstring": "Returns header",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\matlab\\_miobase.py",
    "ast_data": "FunctionDef name:read_header arg:self arguments arg"
  },
  {
    "library": "pygame",
    "name": "_simplename",
    "source_code": "def _simplename(name):\n    return ''.join((c.lower() for c in name if c.isalnum()))",
    "docstring": "create simple version of the font name",
    "type": "function",
    "file_path": "pygame\\src_py\\sysfont.py",
    "ast_data": "FunctionDef name:_simplename arg:name arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "class_method_to_instance_method",
    "source_code": "def class_method_to_instance_method(original_function, instance):\n    weak_instance = weakref.ref(instance)\n    bound_method = types_lib.MethodType(original_function.python_function, tf_method_target.TfMethodTarget(weak_instance, original_function.python_function))\n    assert hasattr(original_function, '_name')\n    assert hasattr(original_function, '_autograph')\n    assert hasattr(original_function, '_function_type')\n    assert hasattr(original_function, 'python_function')\n    weak_bound_method_wrapper = None\n\n    def bound_method_wrapper(*args, **kwargs):\n        strong_bound_method_wrapper = weak_bound_method_wrapper()\n        wrapped_fn = strong_bound_method_wrapper.__wrapped__\n        if wrapped_fn is strong_bound_method_wrapper.__original_wrapped__:\n            wrapped_fn = original_function.python_function\n            return wrapped_fn(weak_instance(), *args, **kwargs)\n        return wrapped_fn(*args, **kwargs)\n    weak_bound_method_wrapper = weakref.ref(bound_method_wrapper)\n    instance_func = type(original_function)(tf_decorator.make_decorator(bound_method, bound_method_wrapper), name=original_function._name, autograph=original_function._autograph, input_signature=original_function.input_signature, reduce_retracing=original_function._reduce_retracing, jit_compile=original_function._jit_compile, experimental_attributes=original_function._attributes)\n    wrapped_instance_func = tf_decorator.make_decorator(bound_method, instance_func)\n    return wrapped_instance_func",
    "docstring": "Constructs a new with bound.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\polymorphic_function.py",
    "ast_data": "FunctionDef name:class_method_to_instance_method arg:original_function arg:instance arguments arg arg Assign Call Assign Call Call Call Call Call Call Assign FunctionDef name:bound_method_wrapper arguments arg arg Assign Call Assign If Compare Assign Return return:yes Call Call Return return:yes Call Assign Call Assign Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "__isub__",
    "source_code": "def __isub__(self, other):\n    m = getmask(other)\n    if self._mask is nomask:\n        if m is not nomask and m.any():\n            self._mask = make_mask_none(self.shape, self.dtype)\n            self._mask += m\n    elif m is not nomask:\n        self._mask += m\n    other_data = getdata(other)\n    other_data = np.where(self._mask, other_data.dtype.type(0), other_data)\n    self._data.__isub__(other_data)\n    return self",
    "docstring": "Subtract other from self in-place.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:__isub__ arg:self arg:other arguments arg arg Assign Call If Compare If BoolOp Compare Call Assign Call If Compare Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "laf_is_inside_image",
    "source_code": "def laf_is_inside_image(laf: Tensor, images: Tensor, border: int=0) -> Tensor:\n    KORNIA_CHECK_LAF(laf)\n    _, _, h, w = images.size()\n    pts = laf_to_boundary_points(laf, 12)\n    good_lafs_mask = (pts[..., 0] >= border) * (pts[..., 0] <= w - border) * (pts[..., 1] >= border) * (pts[..., 1] <= h - border)\n    good_lafs_mask = good_lafs_mask.min(dim=2)[0]\n    return good_lafs_mask",
    "docstring": "Check if the LAF is touching or partly outside the image boundary. Returns the mask of LAFs, which are fully inside the image, i.e. valid. Args: laf: :math:. images: images, lafs are detected in :math:. border: additional border. Returns: mask with shape :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\laf.py",
    "ast_data": "FunctionDef name:laf_is_inside_image arg:laf arg:images arg:border arguments arg arg arg Call Assign Call Assign Call Assign Compare Compare Compare Compare Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "get_git_grep_info",
    "source_code": "def get_git_grep_info():\n    git_grep_filenames = subprocess.check_output(['git', 'grep', '-lP', 'cython.*parallel|_openmp_helpers'], text=True).splitlines()\n    git_grep_filenames = [f for f in git_grep_filenames if '.pyx' in f]\n    return [get_canonical_name_git_grep(each) for each in git_grep_filenames]",
    "docstring": "Return names of extensions that use OpenMP based on git grep regex.",
    "type": "function",
    "file_path": "scikit-learn\\build_tools\\check-meson-openmp-dependencies.py",
    "ast_data": "FunctionDef name:get_git_grep_info arguments Assign Call Call Assign Compare Return return:yes Call"
  },
  {
    "library": "django",
    "name": "db",
    "source_code": "@property\ndef db(self):\n    return self._db or router.db_for_read(self.model, **self._hints)",
    "docstring": "Return the database used if this query is executed now.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:db arg:self arguments arg Return return:yes BoolOp Call"
  },
  {
    "library": "kornia",
    "name": "extract_features",
    "source_code": "def extract_features(self, image: Tensor, mask: Optional[Tensor]=None) -> Dict[str, Tensor]:\n    lafs0, resps0, descs0 = self.local_feature(image, mask)\n    return {'lafs': lafs0, 'responses': resps0, 'descriptors': descs0}",
    "docstring": "Extract features from simple image.",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\integrated.py",
    "ast_data": "FunctionDef name:extract_features arg:self arg:image arg:mask arguments arg arg arg Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "cached_unique",
    "source_code": "def cached_unique(*ys, xp=None):\n    res = tuple((_cached_unique(y, xp=xp) for y in ys))\n    if len(res) == 1:\n        return res[0]\n    return res",
    "docstring": "Return the unique values of ys. Use the cached values from dtype.metadata if present. This function does NOT cache the values in y, i.e. it doesn't change y. Call to attach the unique values to y. Parameters ---------- *ys : sequence of array-like Input data arrays. xp : module, default=None Precomputed array namespace module. When passed, typically from a caller that has already performed inspection of its own inputs, skips array namespace inspection. Returns ------- res : tuple of array-like or array-like Unique values of ys.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_unique.py",
    "ast_data": "FunctionDef name:cached_unique arguments arg arg Assign Call Call If Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "_clean_exit",
    "source_code": "def _clean_exit(self):\n    if self.state != states.EXITING:\n        warnings.warn('The main thread is exiting, but the Bus is in the %r state; shutting it down automatically now. You must either call bus.block() after start(), or call bus.exit() before the main thread exits.' % self.state, RuntimeWarning)\n        self.exit()",
    "docstring": "Assert that the Bus is not running in atexit handler callback.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\wspbus.py",
    "ast_data": "FunctionDef name:_clean_exit arg:self arguments arg If Compare Call Call"
  },
  {
    "library": "sphinx",
    "name": "setup_resource_paths",
    "source_code": "def setup_resource_paths(app: Sphinx, pagename: str, templatename: str, context: dict[str, Any], doctree: Node) -> None:\n    pathto = context['pathto']\n    favicon_url = context.get('favicon_url')\n    if favicon_url and (not is_url(favicon_url)):\n        context['favicon_url'] = pathto('_static/' + favicon_url, resource=True)\n    logo_url = context.get('logo_url')\n    if logo_url and (not is_url(logo_url)):\n        context['logo_url'] = pathto('_static/' + logo_url, resource=True)",
    "docstring": "Set up relative resource paths.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\builders\\html\\__init__.py",
    "ast_data": "FunctionDef name:setup_resource_paths arg:app arg:pagename arg:templatename arg:context arg:doctree arguments arg arg arg arg arg Assign Assign Call If BoolOp Call Assign Call Assign Call If BoolOp Call Assign Call"
  },
  {
    "library": "django",
    "name": "type",
    "source_code": "@property\ndef type(self):\n    return 0",
    "docstring": "GDAL uses OFTReals to represent OFTIntegers in created shapefiles -- forcing the type here since the underlying field type may actually be OFTReal.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\field.py",
    "ast_data": "FunctionDef name:type arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "sharded_save",
    "source_code": "@tf_export('experimental.dtensor.sharded_save', v1=[])\ndef sharded_save(mesh: layout_lib.Mesh, file_prefix: Union[str, tensor_lib.Tensor], tensor_names: Union[List[str], tensor_lib.Tensor], shape_and_slices: Union[List[str], tensor_lib.Tensor], tensors: List[Union[tensor_lib.Tensor, tf_variables.Variable]]):\n    with ops.device(api.device_name()):\n        io_ops.save_v2(file_prefix, tensor_names, shape_and_slices, tensors)\n    mesh_util.barrier(mesh.host_mesh(), 'SaveV2')\n    with api.default_mesh(mesh.host_mesh()):\n        merge_op = io_ops.MergeV2Checkpoints(checkpoint_prefixes=[file_prefix], destination_prefix=file_prefix, delete_old_dirs=True)\n    mesh_util.barrier(mesh.host_mesh(), 'MergeV2Checkpoints')\n    return merge_op",
    "docstring": "Saves given named tensor slices in a sharded, multi-client safe fashion. The method makes sure the checkpoint directory state is correct in a sharded mutli-client saving. Namely, we place a barrier after SaveV2 to make sure every client has done writing the files. And another one after MergeV2Checkpoints to make sure all Metadata is properly merged. Upon existing, the checkpoint is completed and the all directory operations are done. Args: mesh: The Mesh that contains the Tensors to save. file_prefix: The prefix of checkpoint. tensor_names: a list of tensor names used in save op. shape_and_slices: a list of shape and slice specification used in save op. The only supported value is \"\" as we don't support distributed saving with slices yet. tensors: a list of tensors used in save op. The order should match tensor_names. Returns: A MergeV2Checkpoints op that merged all Metadata.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\save_restore.py",
    "ast_data": "FunctionDef name:sharded_save arg:mesh arg:file_prefix arg:tensor_names arg:shape_and_slices arg:tensors arguments arg arg arg arg arg With Call Call Call Call Call With Call Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "spatialite_version",
    "source_code": "def spatialite_version(self):\n    return self._get_spatialite_func('spatialite_version()')",
    "docstring": "Return the SpatiaLite library version as a string.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\spatialite\\operations.py",
    "ast_data": "FunctionDef name:spatialite_version arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "map_location_to_cpu",
    "source_code": "def map_location_to_cpu(storage: Union[str, Tensor], *args: Any, **kwargs: Any) -> Union[str, Tensor]:\n    return storage",
    "docstring": "Map location of device to CPU, util for loading things from HUB.",
    "type": "function",
    "file_path": "kornia\\kornia\\utils\\helpers.py",
    "ast_data": "FunctionDef name:map_location_to_cpu arg:storage arguments arg arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_tensor_id",
    "source_code": "def get_tensor_id(self, op_name, output_slot):\n    return self._op_by_name[op_name].output_tensor_ids[output_slot]",
    "docstring": "Get the ID of a symbolic tensor in this graph.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py",
    "ast_data": "FunctionDef name:get_tensor_id arg:self arg:op_name arg:output_slot arguments arg arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "PlaceholderInfo",
    "source_code": "@dataclasses.dataclass(frozen=True)\nclass PlaceholderInfo:\n    name: str\n    stack_trace: Optional[str]\n    users: list[PlaceholderInfo]\n    mutating_use_stack_trace: Optional[str]",
    "docstring": "A serializable version of torch.fx.Node that contains information pertinent to placeholder stack traces. We use these in logging and error messages related to cudagraphs, and will cache these results.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\cudagraph_utils.py",
    "ast_data": "ClassDef name:PlaceholderInfo Call"
  },
  {
    "library": "pytorch",
    "name": "CUDAPluggableAllocator",
    "source_code": "class CUDAPluggableAllocator(_CUDAAllocator):\n\n    def __init__(self, path_to_so_file: str, alloc_fn_name: str, free_fn_name: str):\n        allocator = ctypes.CDLL(path_to_so_file)\n        alloc_fn = ctypes.cast(getattr(allocator, alloc_fn_name), ctypes.c_void_p).value\n        free_fn = ctypes.cast(getattr(allocator, free_fn_name), ctypes.c_void_p).value\n        assert alloc_fn is not None\n        assert free_fn is not None\n        self._allocator = torch._C._cuda_customAllocator(alloc_fn, free_fn)",
    "docstring": "CUDA memory allocator loaded from a so file.",
    "type": "class",
    "file_path": "pytorch\\torch\\cuda\\memory.py",
    "ast_data": "ClassDef name:CUDAPluggableAllocator FunctionDef name:__init__ arg:self arg:path_to_so_file arg:alloc_fn_name arg:free_fn_name arguments arg arg arg arg Assign Call Assign Call Call Assign Call Call Compare Compare Assign Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=False)\ndef fit(self, X, y=None, **params):\n    self._checked_cv_orig = check_cv(self.cv, y, classifier=is_classifier(self.estimator))\n    routed_params = self._get_routed_params_for_fit(params)\n    self._check_input_parameters(X=X, y=y, split_params=routed_params.splitter.split)\n    self._n_samples_orig = _num_samples(X)\n    super().fit(X, y=y, **params)\n    self.best_score_ = self.cv_results_['mean_test_score'][self.best_index_]\n    return self",
    "docstring": "Run fit with all sets of parameters. Parameters ---------- X : array-like, shape (n_samples, n_features) Training vector, where is the number of samples and is the number of features. y : array-like, shape (n_samples,) or (n_samples, n_output), optional Target relative to X for classification or regression; None for unsupervised learning. **params : dict of string -> object Parameters passed to the `` method of the estimator. Returns ------- self : object Instance of fitted estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_search_successive_halving.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg arg Assign Call Call Assign Call Call Assign Call Call Call Assign Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "_index_to_ticklabels",
    "source_code": "def _index_to_ticklabels(index):\n    if isinstance(index, pd.MultiIndex):\n        return ['-'.join(map(to_utf8, i)) for i in index.values]\n    else:\n        return index.values",
    "docstring": "Convert a pandas index or multiindex into ticklabels.",
    "type": "function",
    "file_path": "seaborn\\seaborn\\matrix.py",
    "ast_data": "FunctionDef name:_index_to_ticklabels arg:index arguments arg If Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_mode",
    "source_code": "def _mode(self, dim, df, scale):\n    if df >= dim + 1:\n        out = (df - dim - 1) * scale\n    else:\n        out = None\n    return out",
    "docstring": "Mode of the Wishart distribution. Parameters ---------- dim : int Dimension of the scale matrix %(_doc_default_callparams)s Notes ----- As this function does no argument checking, it should not be called directly; use 'mode' instead.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:_mode arg:self arg:dim arg:df arg:scale arguments arg arg arg arg If Compare Assign Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_gen_rows_without_counts",
    "source_code": "@abstractmethod\ndef _gen_rows_without_counts(self) -> Iterator[Sequence[str]]:\n    pass",
    "docstring": "Iterator with string representation of body data without counts.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:_gen_rows_without_counts arg:self arguments arg"
  },
  {
    "library": "matplotlib",
    "name": "set_hatch_linewidth",
    "source_code": "def set_hatch_linewidth(self, hatch_linewidth):\n    self._hatch_linewidth = hatch_linewidth",
    "docstring": "Set the hatch linewidth.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:set_hatch_linewidth arg:self arg:hatch_linewidth arguments arg arg Assign"
  },
  {
    "library": "scrapy",
    "name": "close_connections",
    "source_code": "def close_connections(self) -> None:\n    for conn in self._connections.values():\n        assert conn.transport is not None\n        conn.transport.abortConnection()",
    "docstring": "Close all the HTTP/2 connections and remove them from pool Returns: Deferred that fires when all connections have been closed",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\http2\\agent.py",
    "ast_data": "FunctionDef name:close_connections arg:self arguments arg For Call Compare Call"
  },
  {
    "library": "django",
    "name": "get_static_prefix",
    "source_code": "@register.tag\ndef get_static_prefix(parser, token):\n    return PrefixNode.handle_token(parser, token, 'STATIC_URL')",
    "docstring": "Populate a template variable with the static prefix, ``. Usage:: {% get_static_prefix [as varname] %} Examples:: {% get_static_prefix %} {% get_static_prefix as static_prefix %}",
    "type": "function",
    "file_path": "django\\django\\templatetags\\static.py",
    "ast_data": "FunctionDef name:get_static_prefix arg:parser arg:token arguments arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_factorialx_array_approx",
    "source_code": "def _factorialx_array_approx(n, k, extend):\n    if extend == 'complex':\n        return _factorialx_approx_core(n, k=k, extend=extend)\n    result = zeros(n.shape)\n    place(result, np.isnan(n), np.nan)\n    cond = n >= 0\n    n_to_compute = extract(cond, n)\n    place(result, cond, _factorialx_approx_core(n_to_compute, k=k, extend=extend))\n    return result",
    "docstring": "Calculate approximation to multifactorial for array n and integer k. Ensure that values aren't calculated unnecessarily.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_basic.py",
    "ast_data": "FunctionDef name:_factorialx_array_approx arg:n arg:k arg:extend arguments arg arg arg If Compare Return return:yes Call Assign Call Call Call Assign Compare Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "call_module",
    "source_code": "@compatibility(is_backward_compatible=True)\ndef call_module(self, target: 'Target', args: tuple[Argument, ...], kwargs: dict[str, Any]) -> Any:\n    assert isinstance(target, str)\n    submod = self.fetch_attr(target)\n    return submod(*args, **kwargs)",
    "docstring": "Execute a `Node `__ for details on semantics args (Tuple): Tuple of positional args for this invocation kwargs (Dict): Dict of keyword arguments for this invocation Return Any: The value returned by the module invocation",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\interpreter.py",
    "ast_data": "FunctionDef name:call_module arg:self arg:target arg:args arg:kwargs arguments arg arg arg arg Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_process_quantiles",
    "source_code": "def _process_quantiles(self, x, dim):\n    x = np.asarray(x, dtype=float)\n    if x.ndim == 0:\n        x = x[np.newaxis]\n    elif x.ndim == 1:\n        if dim == 1:\n            x = x[:, np.newaxis]\n        else:\n            x = x[np.newaxis, :]\n    return x",
    "docstring": "Adjust quantiles array so that last axis labels the components of each data point.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:_process_quantiles arg:self arg:x arg:dim arguments arg arg arg Assign Call If Compare Assign If Compare If Compare Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "prepare_for_caching",
    "source_code": "def prepare_for_caching(self) -> None:\n    for result in self.compile_results:\n        if isinstance(result, StaticTritonCompileResult):\n            result.kernel.cubin_raw = None",
    "docstring": "Statically Launched CUDA Kernels have a raw cubin on them that we don't need to store in the cache(since TritonBundler handles the collection for us)",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\triton_heuristics.py",
    "ast_data": "FunctionDef name:prepare_for_caching arg:self arguments arg For If Call Assign"
  },
  {
    "library": "scrapy",
    "name": "guess_scheme",
    "source_code": "def guess_scheme(url: str) -> str:\n    if _is_filesystem_path(url):\n        return _any_to_uri(url)\n    return add_http_if_no_scheme(url)",
    "docstring": "Add an URL scheme if missing: file:// for filepath-like input or http:// otherwise.",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\url.py",
    "ast_data": "FunctionDef name:guess_scheme arg:url arguments arg If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "integral",
    "source_code": "def integral(self, xa, xb, ya, yb):\n    tx, ty, c = self.tck[:3]\n    kx, ky = self.degrees\n    with FITPACK_LOCK:\n        return dfitpack.dblint(tx, ty, c, kx, ky, xa, xb, ya, yb)",
    "docstring": "Evaluate the integral of the spline over area [xa,xb] x [ya,yb]. Parameters ---------- xa, xb : float The end-points of the x integration interval. ya, yb : float The end-points of the y integration interval. Returns ------- integ : float The value of the resulting integral.",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_fitpack2.py",
    "ast_data": "FunctionDef name:integral arg:self arg:xa arg:xb arg:ya arg:yb arguments arg arg arg arg arg Assign Assign With Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "time_dist",
    "source_code": "def time_dist(self, metric):\n    getattr(distance, self.metric)(self.points[0], self.points[1], **self.kwargs)",
    "docstring": "Time distance metrics individually (without batching with cdist or pdist).",
    "type": "method",
    "file_path": "scipy\\benchmarks\\benchmarks\\spatial.py",
    "ast_data": "FunctionDef name:time_dist arg:self arg:metric arguments arg arg Call Call"
  },
  {
    "library": "pygame",
    "name": "sprites",
    "source_code": "def sprites(self):\n    return self._spritelist.copy()",
    "docstring": "return a ordered list of sprites (first back, last top). LayeredUpdates.sprites(): return sprites",
    "type": "method",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:sprites arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "visualize",
    "source_code": "def visualize(self, images: Union[Tensor, List[Tensor]], edge_maps: Optional[Union[Tensor, List[Tensor]]]=None, output_type: str='torch') -> Union[Tensor, List[Tensor], List['Image.Image']]:\n    if edge_maps is None:\n        edge_maps = self.forward(images)\n    output = []\n    for edge_map in edge_maps:\n        output.append(edge_map)\n    return self._tensor_to_type(output, output_type, is_batch=isinstance(images, Tensor))",
    "docstring": "Draw the super resolution results. Args: images: input tensor. edge_maps: detected edges. output_type: type of the output. Returns: output tensor.",
    "type": "method",
    "file_path": "kornia\\kornia\\models\\super_resolution\\base.py",
    "ast_data": "FunctionDef name:visualize arg:self arg:images arg:edge_maps arg:output_type arguments arg arg arg arg If Compare Assign Call Assign For Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_find_the_perfect_or_nearest_match_onnxfunction",
    "source_code": "def _find_the_perfect_or_nearest_match_onnxfunction(self, node: torch.fx.Node, default_and_custom_functions: list[registration.ONNXFunction], onnx_args: Sequence[fx_type_utils.TensorLike | str | int | float | bool | list | complex | None], onnx_kwargs: dict[str, fx_type_utils.Argument]):\n    overload_match_ranking: dict[registration.ONNXFunction, int | None] = {}\n    for symbolic_function in reversed(default_and_custom_functions):\n        function_opschema = _OnnxSchemaChecker(symbolic_function.onnx_function)\n        if function_opschema.perfect_match_inputs(onnx_args, onnx_kwargs):\n            return symbolic_function.onnx_function\n        overload_match_ranking[symbolic_function] = function_opschema.match_score\n    overload_match_ranking = {k: v for k, v in overload_match_ranking.items() if v is not None}\n    if not overload_match_ranking:\n        op_full_name = self._get_aten_name(node).qualified_name()\n        raise RuntimeError(f'Cannot find any perfect/nearest match of symbolic function for {op_full_name},which should be registered under {node.target}.')\n    symbolic_function_list: list[registration.ONNXFunction] = sorted(overload_match_ranking, key=lambda k: (overload_match_ranking[k], k.is_custom, default_and_custom_functions.index(k)), reverse=True)\n    return symbolic_function_list[0].onnx_function",
    "docstring": "Find the perfect/nearest matched OnnxFunction for the given FX node, arguments, and keyword arguments. Args: default_and_custom_functions: The list includes overloaded functions, with custom ones appearing after the default ones. onnx_args: Arguments organized in PyTorch inputs way. onnx_kwargs: Keyword arguments organized in PyTorch inputs way. Returns: Either an or instance based on the dispatch algorithm. Raises: RuntimeError: If there are no overloaded functions available for the given FX node.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\onnxfunction_dispatcher.py",
    "ast_data": "FunctionDef name:_find_the_perfect_or_nearest_match_onnxfunction arg:self arg:node arg:default_and_custom_functions arg:onnx_args arg:onnx_kwargs arguments arg arg arg arg arg For Call Assign Call If Call Return return:yes Assign Assign Call Compare If Assign Call Call Raise Call Call arguments arg Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "list",
    "source_code": "def list(self):\n    return [*self]",
    "docstring": "Get a list of available MovieWriters.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\animation.py",
    "ast_data": "FunctionDef name:list arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "torch_key",
    "source_code": "@torch_key_cache\ndef torch_key() -> bytes:\n    with dynamo_timed('inductor_codecache_torch_key', log_pt2_compile_event=False):\n        if not config.is_fbcode():\n\n            def get_code_hash(root: str) -> bytes:\n                extra_files = ('codegen/aoti_runtime/interface.cpp', 'script.ld')\n                inductor_root = os.path.dirname(__file__)\n                extra_files = [os.path.join(inductor_root, x) for x in extra_files]\n                hasher = hashlib.sha256()\n                hasher.update(torch.__version__.encode('utf-8'))\n                build_code_hash([root], '', hasher)\n                for path in extra_files:\n                    if os.path.exists(path):\n                        with open(path, 'rb') as f:\n                            hasher.update(f.read())\n                return hasher.digest()\n            return get_code_hash(_TORCH_PATH)\n        from libfb.py import parutil\n        return parutil.get_file_contents('torch/src_hash.txt').rstrip().encode('ascii')",
    "docstring": "Compute a key that contains relevant information about torch source files",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\codecache.py",
    "ast_data": "FunctionDef name:torch_key arguments With Call If Call FunctionDef name:get_code_hash arg:root arguments arg Assign Assign Call Assign Call Assign Call Call Call Call For If Call With Call Call Call Return return:yes Call Return return:yes Call Return return:yes Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "loss",
    "source_code": "def loss(self, y_true, raw_prediction, sample_weight=None, loss_out=None, n_threads=1):\n    if loss_out is None:\n        loss_out = np.empty_like(y_true)\n    if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1:\n        raw_prediction = raw_prediction.squeeze(1)\n    self.closs.loss(y_true=y_true, raw_prediction=raw_prediction, sample_weight=sample_weight, loss_out=loss_out, n_threads=n_threads)\n    return loss_out",
    "docstring": "Compute the pointwise loss value for each input. Parameters ---------- y_true : C-contiguous array of shape (n_samples,) Observed, true target values. raw_prediction : C-contiguous array of shape (n_samples,) or array of shape (n_samples, n_classes) Raw prediction values (in link space). sample_weight : None or C-contiguous array of shape (n_samples,) Sample weights. loss_out : None or C-contiguous array of shape (n_samples,) A location into which the result is stored. If None, a new array might be created. n_threads : int, default=1 Might use openmp thread parallelism. Returns ------- loss : array of shape (n_samples,) Element-wise loss function.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\_loss\\loss.py",
    "ast_data": "FunctionDef name:loss arg:self arg:y_true arg:raw_prediction arg:sample_weight arg:loss_out arg:n_threads arguments arg arg arg arg arg arg If Compare Assign Call If BoolOp Compare Compare Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "dense_shape",
    "source_code": "@property\ndef dense_shape(self):\n    return self._dense_shape",
    "docstring": "A 1-D Tensor of int64 representing the shape of the dense tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\sparse_tensor.py",
    "ast_data": "FunctionDef name:dense_shape arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_dof_vec",
    "source_code": "@staticmethod\ndef get_dof_vec(tri_z, tri_dz, J):\n    npt = tri_z.shape[0]\n    dof = np.zeros([npt, 9], dtype=np.float64)\n    J1 = _ReducedHCT_Element.J0_to_J1 @ J\n    J2 = _ReducedHCT_Element.J0_to_J2 @ J\n    col0 = J @ np.expand_dims(tri_dz[:, 0, :], axis=2)\n    col1 = J1 @ np.expand_dims(tri_dz[:, 1, :], axis=2)\n    col2 = J2 @ np.expand_dims(tri_dz[:, 2, :], axis=2)\n    dfdksi = _to_matrix_vectorized([[col0[:, 0, 0], col1[:, 0, 0], col2[:, 0, 0]], [col0[:, 1, 0], col1[:, 1, 0], col2[:, 1, 0]]])\n    dof[:, 0:7:3] = tri_z\n    dof[:, 1:8:3] = dfdksi[:, 0]\n    dof[:, 2:9:3] = dfdksi[:, 1]\n    return dof",
    "docstring": "Compute the dof vector of a triangle, from the value of f, df and of the local Jacobian at each node. Parameters ---------- tri_z : shape (3,) array f nodal values. tri_dz : shape (3, 2) array df/dx, df/dy nodal values. J Jacobian matrix in local basis of apex 0. Returns ------- dof : shape (9,) array For each apex ``:: dof[iapex*3+0] = f(Ai) dof[iapex*3+1] = df(Ai).(AiAi+) dof[iapex*3+2] = df(Ai).(AiAi-)",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triinterpolate.py",
    "ast_data": "FunctionDef name:get_dof_vec arg:tri_z arg:tri_dz arg:J arguments arg arg arg Assign Assign Call Assign Assign Assign Call Assign Call Assign Call Assign Call Assign Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "masked",
    "source_code": "def masked(self, mask_proxy, masked_body: Callable[..., Any], other_proxy):\n    name = self.body.add_submodule(None, 'masked_subblock')\n    self.body.submodules[name] = self.body.bind_masked_shim(name)\n    self.body.subblocks[name] = LoopBodyBlock(self.body, masked_body, [])\n    return self.tracer.create_proxy('call_module', name, (mask_proxy, other_proxy), {})",
    "docstring": "Recursively capture the masked out body in another LoopBodyBlock",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\loop_body.py",
    "ast_data": "FunctionDef name:masked arg:self arg:mask_proxy arg:masked_body arg:other_proxy arguments arg arg arg arg Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_NodeDef",
    "source_code": "def _NodeDef(op_type, name, attrs=None) -> node_def_pb2.NodeDef:\n    node_def = node_def_pb2.NodeDef(op=compat.as_bytes(op_type), name=compat.as_bytes(name))\n    if attrs:\n        for k, v in attrs.items():\n            node_def.attr[k].CopyFrom(v)\n    return node_def",
    "docstring": "Create a NodeDef proto. Args: op_type: Value for the \"op\" attribute of the NodeDef proto. name: Value for the \"name\" attribute of the NodeDef proto. attrs: Dictionary where the key is the attribute name (a string) and the value is the respective \"attr\" attribute of the NodeDef proto (an AttrValue). Returns: A node_def_pb2.NodeDef protocol buffer.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_NodeDef arg:op_type arg:name arg:attrs arguments arg arg arg Assign Call Call Call If For Call Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "map_lower",
    "source_code": "def map_lower(self, func, **kwargs):\n    indices = zip(*np.tril_indices_from(self.axes, -1))\n    self._map_bivariate(func, indices, **kwargs)\n    return self",
    "docstring": "Plot with a bivariate function on the lower diagonal subplots. Parameters ---------- func : callable plotting function Must take x, y arrays as positional arguments and draw onto the \"currently active\" matplotlib Axes. Also needs to accept kwargs called ``.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\axisgrid.py",
    "ast_data": "FunctionDef name:map_lower arg:self arg:func arguments arg arg arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "_translate_pattern",
    "source_code": "def _translate_pattern(pat: str) -> str:\n    i, n = (0, len(pat))\n    res = ''\n    while i < n:\n        c = pat[i]\n        i += 1\n        if c == '*':\n            if i < n and pat[i] == '*':\n                i += 1\n                res = res + '.*'\n            else:\n                res = res + '[^/]*'\n        elif c == '?':\n            res = res + '[^/]'\n        elif c == '[':\n            j = i\n            if j < n and pat[j] == '!':\n                j += 1\n            if j < n and pat[j] == ']':\n                j += 1\n            while j < n and pat[j] != ']':\n                j += 1\n            if j >= n:\n                res = res + '\\\\['\n            else:\n                stuff = pat[i:j].replace('\\\\', '\\\\\\\\')\n                i = j + 1\n                if stuff[0] == '!':\n                    stuff = '^/' + stuff[1:]\n                elif stuff[0] == '^':\n                    stuff = '\\\\' + stuff\n                res = f'{res}[{stuff}]'\n        else:\n            res += re.escape(c)\n    return res + '$'",
    "docstring": "Translate a shell-style glob pattern to a regular expression. Adapted from the fnmatch module, but enhanced so that single stars don't match slashes.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\matching.py",
    "ast_data": "FunctionDef name:_translate_pattern arg:pat arguments arg Assign Call Assign While Compare Assign If Compare If BoolOp Compare Compare Assign Assign If Compare Assign If Compare Assign If BoolOp Compare Compare If BoolOp Compare Compare While BoolOp Compare Compare If Compare Assign Assign Call Assign If Compare Assign If Compare Assign Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "set_workers",
    "source_code": "@contextlib.contextmanager\ndef set_workers(workers):\n    old_workers = get_workers()\n    _config.default_workers = _workers(operator.index(workers))\n    try:\n        yield\n    finally:\n        _config.default_workers = old_workers",
    "docstring": "Context manager for the default number of workers used in Parameters ---------- workers : int The default number of workers to use Examples -------- >>> import numpy as np >>> from scipy import fft, signal >>> rng = np.random.default_rng() >>> x = rng.standard_normal((128, 64)) >>> with fft.set_workers(4): ... y = signal.fftconvolve(x, x)",
    "type": "function",
    "file_path": "scipy\\scipy\\fft\\_pocketfft\\helper.py",
    "ast_data": "FunctionDef name:set_workers arg:workers arguments arg Assign Call Assign Call Call Try Assign"
  },
  {
    "library": "tensorflow",
    "name": "_to_sparse_input_and_drop_ignore_values",
    "source_code": "def _to_sparse_input_and_drop_ignore_values(input_tensor, ignore_value=None):\n    input_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(input_tensor)\n    if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):\n        return input_tensor\n    with ops.name_scope(None, 'to_sparse_input', (input_tensor, ignore_value)):\n        if ignore_value is None:\n            if input_tensor.dtype == dtypes.string:\n                ignore_value = ''\n            elif input_tensor.dtype.is_integer:\n                ignore_value = -1\n            else:\n                ignore_value = input_tensor.dtype.as_numpy_dtype()\n        ignore_value = math_ops.cast(ignore_value, input_tensor.dtype, name='ignore_value')\n        indices = array_ops.where(math_ops.not_equal(input_tensor, ignore_value), name='indices')\n        return sparse_tensor_lib.SparseTensor(indices=indices, values=array_ops.gather_nd(input_tensor, indices, name='values'), dense_shape=array_ops.shape(input_tensor, out_type=dtypes.int64, name='dense_shape'))",
    "docstring": "Converts a to a , dropping ignore_value cells. If is already a , just return it. Args: input_tensor: A string or integer . ignore_value: Entries in equal to this value will be absent from the resulting . If , default value of 's dtype will be used ('' for , -1 for ). Returns: A with the same shape as . Raises: ValueError: when 's rank is .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py",
    "ast_data": "FunctionDef name:_to_sparse_input_and_drop_ignore_values arg:input_tensor arg:ignore_value arguments arg arg Assign Call If Call Return return:yes With Call If Compare If Compare Assign If Assign Assign Call Assign Call Assign Call Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "reset_peak_memory_stats",
    "source_code": "def reset_peak_memory_stats(device: _device_t=None) -> None:\n    device = _get_device_index(device, optional=True)\n    return torch._C._xpu_resetPeakMemoryStats(device)",
    "docstring": "Reset the \"peak\" stats tracked by the XPU memory allocator. See :func: for details. Peak stats correspond to the key in each individual stat dict. Args: device (torch.device or int or str, optional): selected device. Returns statistic for the current device, given by :func:, if :attr: is `` (default).",
    "type": "function",
    "file_path": "pytorch\\torch\\xpu\\memory.py",
    "ast_data": "FunctionDef name:reset_peak_memory_stats arg:device arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "outbound_nodes",
    "source_code": "@property\n@doc_controls.do_not_doc_inheritable\ndef outbound_nodes(self):\n    return self._outbound_nodes",
    "docstring": "Deprecated, do NOT use! Only for compatibility with external Keras.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:outbound_nodes arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "activity_regularizer",
    "source_code": "@property\ndef activity_regularizer(self):\n    return self._activity_regularizer",
    "docstring": "Optional regularizer function for the output of this layer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py",
    "ast_data": "FunctionDef name:activity_regularizer arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "SubplotDivider",
    "source_code": "class SubplotDivider(Divider):\n\n    def __init__(self, fig, *args, horizontal=None, vertical=None, aspect=None, anchor='C'):\n        self.figure = fig\n        super().__init__(fig, [0, 0, 1, 1], horizontal=horizontal or [], vertical=vertical or [], aspect=aspect, anchor=anchor)\n        self.set_subplotspec(SubplotSpec._from_subplot_args(fig, args))\n\n    def get_position(self):\n        return self.get_subplotspec().get_position(self.figure).bounds\n\n    def get_subplotspec(self):\n        return self._subplotspec\n\n    def set_subplotspec(self, subplotspec):\n        self._subplotspec = subplotspec\n        self.set_position(subplotspec.get_position(self.figure))",
    "docstring": "The Divider class whose rectangle area is specified as a subplot geometry.",
    "type": "class",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_divider.py",
    "ast_data": "ClassDef name:SubplotDivider FunctionDef name:__init__ arg:self arg:fig arguments arg arg arg arg arg arg arg Assign Call Call BoolOp BoolOp Call Call FunctionDef name:get_position arg:self arguments arg Return return:yes Call Call FunctionDef name:get_subplotspec arg:self arguments arg Return return:yes FunctionDef name:set_subplotspec arg:self arg:subplotspec arguments arg arg Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "_check_tensor_list",
    "source_code": "def _check_tensor_list(param, param_name) -> None:\n    if not isinstance(param, list):\n        raise TypeError(f'Invalid function argument. Expected parameter `{param_name}` of type List[torch.Tensor]\\n             but got {type(param)} instead.')\n    elif not all((isinstance(p, torch.Tensor) for p in param)):\n        raise TypeError(f'Invalid function argument. Expected parameter `{param_name}` of type List[torch.Tensor]\\n             but got {type(param)} with elements of type {[type(p) for p in param]}.')",
    "docstring": "Check that the parameter `` is a list of tensors.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:_check_tensor_list arg:param arg:param_name arguments arg arg If Call Raise Call Call If Call Call Raise Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_run_function_for_calibration_eager_mode",
    "source_code": "def _run_function_for_calibration_eager_mode(func: wrap_function.WrappedFunction, representative_dataset: rd.RepresentativeDataset) -> None:\n    _, keyword_args = func.structured_input_signature\n    sample_validator = _create_sample_validator(expected_input_keys=keyword_args.keys())\n    for sample in map(sample_validator, _log_sample_num_for_calibration(representative_dataset)):\n        func_kwargs = _convert_values_to_tf_tensors(sample)\n        func(**func_kwargs)",
    "docstring": "Runs the representative dataset through a function for calibration. NOTE: This is intended to be run in eager mode (TF2). Args: func: The function to run the representative samples through. representative_dataset: Representative dataset used for calibration. The input keys and input values of the representative samples should match the keyword arguments of .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\py_function_lib.py",
    "ast_data": "FunctionDef name:_run_function_for_calibration_eager_mode arg:func arg:representative_dataset arguments arg arg Assign Assign Call Call For Call Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "dequantize_per_token",
    "source_code": "@impl(quantized_decomposed_lib, 'dequantize_per_token', 'CompositeExplicitAutograd')\ndef dequantize_per_token(input: torch.Tensor, scales: torch.Tensor, zero_points: torch.Tensor, quant_min: int, quant_max: int, dtype: torch.dtype, output_dtype: torch.dtype=torch.float32):\n    input = input - zero_points\n    input = input * scales\n    return input.to(output_dtype)",
    "docstring": "Per token dequantization for the Tensor using the quantization parameters to map from floating point to quantized values. This means for a N dimension Tensor (M1, M2, ...Mn, N), we calculate scales/zero_points for each N elements and quantize every N elements with the same quantization parameter. The dimension for scales/zero_points will be (M1 * M2 ... * Mn) Args: input (torch.Tensor): quantized Tensor (uint8, int8 etc.) scales (float64 torch.Tensor): quantization parameter for per token affine quantization zero_points (int64 torch.Tensor): quantization parameter for per token affine quantization quant_min (int): minimum quantized value for input Tensor quant_max (int): maximum quantized value for input Tensor dtype (torch.dtype): dtype (e.g. torch.uint8) for input Tensor output_dtype (torch.dtype): dtype (e.g. torch.float32) for output Tensor Returns: dequantized Tensor with dtype",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_decomposed.py",
    "ast_data": "FunctionDef name:dequantize_per_token arg:input arg:scales arg:zero_points arg:quant_min arg:quant_max arg:dtype arg:output_dtype arguments arg arg arg arg arg arg arg Assign Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "execution_mode",
    "source_code": "@tf_contextlib.contextmanager\ndef execution_mode(mode):\n    if mode is None:\n        yield\n    else:\n        ctx = context()\n        executor_new = executor.new_executor(mode == ASYNC)\n        executor_old = ctx.executor\n        try:\n            executor_old.wait()\n            ctx.executor = executor_new\n            yield\n        finally:\n            ctx.executor = executor_old\n            executor_new.wait()",
    "docstring": "Context manager for setting execution mode for current thread.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:execution_mode arg:mode arguments arg If Compare Assign Call Assign Call Compare Assign Try Call Assign Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "name",
    "source_code": "@property\ndef name(self):\n    return '{}_embedding'.format(self.categorical_column.name)",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "partition",
    "source_code": "@set_module('numpy.char')\ndef partition(a, sep):\n    return np.stack(strings_partition(a, sep), axis=-1)",
    "docstring": "Partition each element in around . Calls :meth: element-wise. For each element in , split the element as the first occurrence of , and return 3 strings containing the part before the separator, the separator itself, and the part after the separator. If the separator is not found, return 3 strings containing the string itself, followed by two empty strings. Parameters ---------- a : array-like, with `a` dtype, depending on input types. The output array will have an extra dimension with 3 elements per input element. Examples -------- >>> import numpy as np >>> x = np.array([\"Numpy is nice!\"]) >>> np.char.partition(x, \" \") array([['Numpy', ' ', 'is nice!']], dtype='<U8') See Also -------- str.partition",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:partition arg:a arg:sep arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__getstate__",
    "source_code": "def __getstate__(self):\n    result = self.__dict__.copy()\n    del result['_lock']\n    del result['_descriptor_cache']\n    del result['_key_for_call_stats']\n    return result",
    "docstring": "Custom pickling, to omit unpickleable objects.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\polymorphic_function.py",
    "ast_data": "FunctionDef name:__getstate__ arg:self arguments arg Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "setup_module",
    "source_code": "def setup_module(module):\n    import numpy as np\n    _random_seed = os.environ.get('SKLEARN_SEED', None)\n    if _random_seed is None:\n        _random_seed = np.random.uniform() * np.iinfo(np.int32).max\n    _random_seed = int(_random_seed)\n    print('I: Seeding RNGs with %r' % _random_seed)\n    np.random.seed(_random_seed)\n    random.seed(_random_seed)",
    "docstring": "Fixture for the tests to assure globally controllable seeding of RNGs",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\__init__.py",
    "ast_data": "FunctionDef name:setup_module arg:module arguments arg Assign Call If Compare Assign Call Call Assign Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_revive_graph_network",
    "source_code": "def _revive_graph_network(self, identifier, metadata, node_id):\n    config = metadata.get('config')\n    if not generic_utils.validate_config(config):\n        return None\n    class_name = compat.as_str(metadata['class_name'])\n    if generic_utils.get_registered_object(class_name) is not None:\n        return None\n    model_is_functional_or_sequential = metadata.get('is_graph_network', False) or class_name == 'Sequential' or class_name == 'Functional'\n    if not model_is_functional_or_sequential:\n        return None\n    if class_name == 'Sequential':\n        model = models_lib.Sequential(name=config['name'])\n    elif identifier == constants.SEQUENTIAL_IDENTIFIER:\n        model = models_lib.Sequential(name=class_name)\n    else:\n        model = models_lib.Functional(inputs=[], outputs=[], name=config['name'])\n    layers = self._get_child_layer_node_ids(node_id)\n    self.model_layer_dependencies[node_id] = (model, layers)\n    if not layers:\n        self._models_to_reconstruct.append(node_id)\n    return model",
    "docstring": "Revives a graph network from config.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\load.py",
    "ast_data": "FunctionDef name:_revive_graph_network arg:self arg:identifier arg:metadata arg:node_id arguments arg arg arg arg Assign Call If Call Return return:no Assign Call If Compare Call Return return:no Assign BoolOp Call Compare Compare If Return return:no If Compare Assign Call If Compare Assign Call Assign Call Assign Call Assign If Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "sample",
    "source_code": "def sample(self, sample_shape: _size=torch.Size()) -> Tensor:\n    with torch.no_grad():\n        return self.rsample(sample_shape)",
    "docstring": "Generates a sample_shape shaped sample or sample_shape shaped batch of samples if the distribution parameters are batched.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:sample arg:self arg:sample_shape arguments arg arg Call With Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "find_essential",
    "source_code": "def find_essential(points1: torch.Tensor, points2: torch.Tensor, weights: Optional[torch.Tensor]=None) -> torch.Tensor:\n    E = run_5point(points1, points2, weights).to(points1.dtype)\n    return E",
    "docstring": "Find essential matrices. Args: points1: A set of points in the first image with a tensor shape :math:. points2: A set of points in the second image with a tensor shape :math:. weights: Tensor containing the weights per point correspondence with a shape of :math:. Returns: the computed essential matrices with shape :math:. Note that all possible solutions are returned, i.e., 10 essential matrices for each image pair. To choose the best one out of 10, try to check the one with the lowest Sampson distance.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\epipolar\\essential.py",
    "ast_data": "FunctionDef name:find_essential arg:points1 arg:points2 arg:weights arguments arg arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_get_rendered_text_width",
    "source_code": "def _get_rendered_text_width(self, text):\n    w, h, d = _get_text_metrics_with_cache(self._renderer, text, self.get_fontproperties(), cbook.is_math_text(text), self.get_figure(root=True).dpi)\n    return math.ceil(w)",
    "docstring": "Return the width of a given text string, in pixels.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:_get_rendered_text_width arg:self arg:text arguments arg arg Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_is_comparable_dtype",
    "source_code": "def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:\n    return self.dtype == dtype",
    "docstring": "Can we compare values of the given dtype to our own?",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\period.py",
    "ast_data": "FunctionDef name:_is_comparable_dtype arg:self arg:dtype arguments arg arg Return return:yes Compare"
  },
  {
    "library": "pytorch",
    "name": "render_ir_graph",
    "source_code": "def render_ir_graph(tensors):\n    return torch._C._lazy._get_tensors_dot(tensors)",
    "docstring": "Return a text dump of the LTC IR graph in dot format for the tensors. The text can be processed by tools like dot to be rendered in pdf,png etc.",
    "type": "function",
    "file_path": "pytorch\\torch\\_lazy\\debug.py",
    "ast_data": "FunctionDef name:render_ir_graph arg:tensors arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_dedup_weights",
    "source_code": "def _dedup_weights(self, weights):\n    output, seen_ids = ([], set())\n    for w in weights:\n        if id(w) not in seen_ids:\n            output.append(w)\n            seen_ids.add(id(w))\n    return output",
    "docstring": "Dedupe weights while maintaining order as much as possible.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:_dedup_weights arg:self arg:weights arguments arg arg Assign Call For If Compare Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "tfr_gen",
    "source_code": "def tfr_gen(func, op_defs):\n    mlir_code, _ = TfrGen(op_defs).transform(func, None)\n    assert tfr.verify(mlir_code), 'mlir code not verified: {}'.format(mlir_code)\n    return mlir_code",
    "docstring": "Parse a function and emit the TFR functions.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\tfr\\python\\tfr_gen.py",
    "ast_data": "FunctionDef name:tfr_gen arg:func arg:op_defs arguments arg arg Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "MixtureSameFamilyConstraint",
    "source_code": "class MixtureSameFamilyConstraint(Constraint):\n\n    def __init__(self, base_constraint):\n        assert isinstance(base_constraint, Constraint)\n        self.base_constraint = base_constraint\n        super().__init__()\n\n    @property\n    def is_discrete(self) -> bool:\n        return self.base_constraint.is_discrete\n\n    @property\n    def event_dim(self) -> int:\n        return self.base_constraint.event_dim\n\n    def check(self, value):\n        unsqueezed_value = value.unsqueeze(-1 - self.event_dim)\n        result = self.base_constraint.check(unsqueezed_value)\n        if value.dim() < self.event_dim:\n            raise ValueError(f'Expected value.dim() >= {self.event_dim} but got {value.dim()}')\n        num_dim_to_keep = value.dim() - self.event_dim\n        result = result.reshape(result.shape[:num_dim_to_keep] + (-1,))\n        result = result.all(-1)\n        return result\n\n    def __repr__(self):\n        return f'{self.__class__.__name__}({repr(self.base_constraint)})'",
    "docstring": "Constraint for the :class: distribution that adds back the rightmost batch dimension before performing the validity check with the component distribution constraint. Args: base_constraint: The `~torch.distribution.MixtureSameFamily` distribution.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributions\\constraints.py",
    "ast_data": "ClassDef name:MixtureSameFamilyConstraint FunctionDef name:__init__ arg:self arg:base_constraint arguments arg arg Call Assign Call Call FunctionDef name:is_discrete arg:self arguments arg Return return:yes FunctionDef name:event_dim arg:self arguments arg Return return:yes FunctionDef name:check arg:self arg:value arguments arg arg Assign Call Assign Call If Compare Call Raise Call Call Assign Call Assign Call Assign Call Return return:yes FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "get_rsa_public_key",
    "source_code": "def get_rsa_public_key(self):\n    raise NotImplementedError()",
    "docstring": "A method to get the RSA public key for RSA-SHA1 signature method. For instance, the value is saved on column ``:: def get_rsa_public_key(self): return self.rsa_public_key",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth1\\rfc5849\\models.py",
    "ast_data": "FunctionDef name:get_rsa_public_key arg:self arguments arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_structured_tensor_like",
    "source_code": "def _structured_tensor_like(t):\n    if isinstance(t, tensor_lib.Tensor):\n        return _structured_tensor_from_dense_tensor(t)\n    if ragged_tensor.is_ragged(t):\n        return StructuredTensor.from_fields({}, shape=t.get_shape(), row_partitions=_all_nested_row_partitions(t))\n    return StructuredTensor.from_fields({}, shape=t.shape, row_partitions=t.row_partitions, nrows=t.nrows())",
    "docstring": "Create a StructuredTensor with the shape of a (composite) tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_array_ops.py",
    "ast_data": "FunctionDef name:_structured_tensor_like arg:t arguments arg If Call Return return:yes Call If Call Return return:yes Call Call Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "serve",
    "source_code": "def serve(request, path, insecure=False, **kwargs):\n    if not settings.DEBUG and (not insecure):\n        raise Http404\n    normalized_path = posixpath.normpath(path).lstrip('/')\n    absolute_path = finders.find(normalized_path)\n    if not absolute_path:\n        if path.endswith('/') or path == '':\n            raise Http404('Directory indexes are not allowed here.')\n        raise Http404(\"'%s' could not be found\" % path)\n    document_root, path = os.path.split(absolute_path)\n    return static.serve(request, path, document_root=document_root, **kwargs)",
    "docstring": "Serve static files below a given point in the directory structure or from locations inferred from the staticfiles finders. To use, put a URL pattern such as:: from django.contrib.staticfiles import views path('', views.serve) in your URLconf. It uses the django.views.static.serve() view to serve the found files.",
    "type": "function",
    "file_path": "django\\django\\contrib\\staticfiles\\views.py",
    "ast_data": "FunctionDef name:serve arg:request arg:path arg:insecure arguments arg arg arg arg If BoolOp Raise Assign Call Call Assign Call If If BoolOp Call Compare Raise Call Raise Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "direction",
    "source_code": "@property\ndef direction(self):\n    az = np.radians(90 - self.azdeg)\n    alt = np.radians(self.altdeg)\n    return np.array([np.cos(az) * np.cos(alt), np.sin(az) * np.cos(alt), np.sin(alt)])",
    "docstring": "The unit vector direction towards the light source.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:direction arg:self arguments arg Assign Call Assign Call Return return:yes Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_summary_description",
    "source_code": "@tf_export(v1=['summary.get_summary_description'])\ndef get_summary_description(node_def):\n    if node_def.op != 'TensorSummary':\n        raise ValueError(\"Can't get_summary_description on %s\" % node_def.op)\n    description_str = _compat.as_str_any(node_def.attr['description'].s)\n    summary_description = SummaryDescription()\n    _json_format.Parse(description_str, summary_description)\n    return summary_description",
    "docstring": "Given a TensorSummary node_def, retrieve its SummaryDescription. When a Summary op is instantiated, a SummaryDescription of associated metadata is stored in its NodeDef. This method retrieves the description. Args: node_def: the node_def_pb2.NodeDef of a TensorSummary op Returns: a summary_pb2.SummaryDescription Raises: ValueError: if the node is not a summary op. @compatibility(eager) Not compatible with eager execution. To write TensorBoard summaries under eager execution, use instead. @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\summary\\summary.py",
    "ast_data": "FunctionDef name:get_summary_description arg:node_def arguments arg If Compare Raise Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "nearest_workday",
    "source_code": "def nearest_workday(dt: datetime) -> datetime:\n    if dt.weekday() == 5:\n        return dt - timedelta(1)\n    elif dt.weekday() == 6:\n        return dt + timedelta(1)\n    return dt",
    "docstring": "If holiday falls on Saturday, use day before (Friday) instead; if holiday falls on Sunday, use day thereafter (Monday) instead.",
    "type": "function",
    "file_path": "pandas\\pandas\\tseries\\holiday.py",
    "ast_data": "FunctionDef name:nearest_workday arg:dt arguments arg If Compare Call Return return:yes Call If Compare Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_getattr_by_name",
    "source_code": "@staticmethod\ndef _getattr_by_name(root: object, name: str) -> object:\n    while '.' in name:\n        mod, name = name.split('.', 1)\n        root = getattr(root, mod)\n    return getattr(root, name)",
    "docstring": "Like but supports dotted names.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\_graph_pickler.py",
    "ast_data": "FunctionDef name:_getattr_by_name arg:root arg:name arguments arg arg While Compare Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "make_cursor",
    "source_code": "def make_cursor(self, cursor):\n    return utils.CursorWrapper(cursor, self)",
    "docstring": "Create a cursor without debug logging.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\base.py",
    "ast_data": "FunctionDef name:make_cursor arg:self arg:cursor arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, layouts: Optional[sparse_core_layout_pb2.SparseCoreTableLayouts]=None):\n    self._checkpoint_layouts = {}\n    self._checkpoint_to_reshard_callback = {}\n    if layouts:\n        for layout in layouts.tables:\n            self._checkpoint_layouts[layout.table_name] = layout",
    "docstring": "An adapter for TPUEmbeddingV3 checkpoints. Constructs an adapter for TPUEmbeddingV3 to handle layout changes. between checkpoint values and embedding object being restored. Args: layouts: The target layouts required.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3_checkpoint_adapter.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:layouts arguments arg arg Assign Assign If For Assign"
  },
  {
    "library": "tensorflow",
    "name": "init_var_from_numpy",
    "source_code": "def init_var_from_numpy(input_var, numpy_input, session):\n    with ops.init_scope():\n        if context.executing_eagerly():\n            input_var.assign(numpy_input)\n            return\n        assert session is not None\n        session.run(input_var.initializer)\n        start_placeholder = array_ops.placeholder(dtypes.int64, ())\n        end_placeholder = array_ops.placeholder(dtypes.int64, ())\n        slice_placeholder = array_ops.placeholder(input_var.dtype)\n        assign_slice_op = input_var[start_placeholder:end_placeholder].assign(slice_placeholder)\n        byte_size_per_batch_element = np.prod(numpy_input.shape[1:]) * input_var.dtype.size\n        batch_size_per_slice = int(np.ceil((64 << 20) / byte_size_per_batch_element))\n        start = 0\n        limit = numpy_input.shape[0]\n        while start < limit:\n            end = min(start + batch_size_per_slice, limit)\n            session.run(assign_slice_op, feed_dict={start_placeholder: start, end_placeholder: end, slice_placeholder: numpy_input[start:end]})\n            start = end",
    "docstring": "Initialize to using in graph mode.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\numpy_dataset.py",
    "ast_data": "FunctionDef name:init_var_from_numpy arg:input_var arg:numpy_input arg:session arguments arg arg arg With Call If Call Call Return return:no Compare Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Assign Assign While Compare Assign Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_num_elements",
    "source_code": "def _num_elements(self):\n    return math_ops.reduce_prod(self.inner_shape)",
    "docstring": "Number of elements in a shape. Returns: The number of elements in the shape.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:_num_elements arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "getbufsize",
    "source_code": "@set_module('numpy')\ndef getbufsize():\n    return _get_extobj_dict()['bufsize']",
    "docstring": "Return the size of the buffer used in ufuncs. Returns ------- getbufsize : int Size of ufunc buffer in bytes. Examples -------- >>> import numpy as np >>> np.getbufsize() 8192",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\_ufunc_config.py",
    "ast_data": "FunctionDef name:getbufsize arguments Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_box_aspect",
    "source_code": "def get_box_aspect(self):\n    return self._box_aspect",
    "docstring": "Return the Axes box aspect, i.e. the ratio of height to width. The box aspect is `` (i.e. chosen depending on the available figure space) unless explicitly specified. See Also -------- matplotlib.axes.Axes.set_box_aspect for a description of box aspect. matplotlib.axes.Axes.set_aspect for a description of aspect handling.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:get_box_aspect arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "BlockParameters",
    "source_code": "@dataclasses.dataclass\nclass BlockParameters:\n    shape: list[sympy.Expr] = dataclasses.field(default_factory=list)\n    block_shape: list[sympy.Expr] = dataclasses.field(default_factory=list)\n    strides: list[sympy.Expr] = dataclasses.field(default_factory=list)\n    offsets: list[sympy.Expr] = dataclasses.field(default_factory=list)\n\n    def __add__(self, other: BlockParameters) -> BlockParameters:\n        cls = type(self)\n        a, b = tuple((dataclasses.asdict(x) for x in (self, other)))\n        return cls(**{key: a[key] + b[key] for key in a})",
    "docstring": "Class representing ND block dimensions, for block pointer analysis.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py",
    "ast_data": "ClassDef name:BlockParameters Call Call Call Call FunctionDef name:__add__ arg:self arg:other arguments arg arg Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "train_function",
    "source_code": "def train_function(iterator):\n    for _ in math_ops.range(self._steps_per_execution):\n        outputs = step_function(self, iterator)\n    return outputs",
    "docstring": "Runs a training execution with multiple steps.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py",
    "ast_data": "FunctionDef name:train_function arg:iterator arguments arg For Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "format",
    "source_code": "def format(self):\n    return traceback.format_list(self.summary())",
    "docstring": "Formats a single torch._C._profiler.CapturedTraceback into a list of strings equivalent to the output of traceback.format_list. Note that if pass it CapturedTraceback with C++ traces, it is better not to use this function and use the batch formatting API format_captured_tbs to amortize the cost of symbolization",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\_traceback.py",
    "ast_data": "FunctionDef name:format arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "pre_flatten_transform",
    "source_code": "@abstractmethod\ndef pre_flatten_transform(self, tensor: torch.Tensor) -> tuple[torch.Tensor, Optional[Any]]:\n    ...",
    "docstring": "E.g. converting `` to local tensor.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_fsdp_extensions.py",
    "ast_data": "FunctionDef name:pre_flatten_transform arg:self arg:tensor arguments arg arg"
  },
  {
    "library": "numpy",
    "name": "LinalgSmallArrays",
    "source_code": "class LinalgSmallArrays(Benchmark):\n\n    def setup(self):\n        self.array_3_3 = np.eye(3) + np.arange(9.0).reshape((3, 3))\n        self.array_3 = np.arange(3.0)\n        self.array_5 = np.arange(5.0)\n        self.array_5_5 = np.reshape(np.arange(25.0), (5, 5))\n\n    def time_norm_small_array(self):\n        np.linalg.norm(self.array_5)\n\n    def time_det_small_array(self):\n        np.linalg.det(self.array_5_5)\n\n    def time_det_3x3(self):\n        np.linalg.det(self.array_3_3)\n\n    def time_solve_3x3(self):\n        np.linalg.solve(self.array_3_3, self.array_3)\n\n    def time_eig_3x3(self):\n        np.linalg.eig(self.array_3_3)",
    "docstring": "Test overhead of linalg methods for small arrays",
    "type": "class",
    "file_path": "numpy\\benchmarks\\benchmarks\\bench_linalg.py",
    "ast_data": "ClassDef name:LinalgSmallArrays FunctionDef name:setup arg:self arguments arg Assign Call Call Call Assign Call Assign Call Assign Call Call FunctionDef name:time_norm_small_array arg:self arguments arg Call FunctionDef name:time_det_small_array arg:self arguments arg Call FunctionDef name:time_det_3x3 arg:self arguments arg Call FunctionDef name:time_solve_3x3 arg:self arguments arg Call FunctionDef name:time_eig_3x3 arg:self arguments arg Call"
  },
  {
    "library": "kornia",
    "name": "vgg16_bn",
    "source_code": "def vgg16_bn(*, weights: Optional[Any]=None, **kwargs: Any) -> VGG:\n    return _vgg('D', True, weights, **kwargs)",
    "docstring": "VGG-16-BN from __. Args: weights (:class:, optional): The pretrained weights to use. See :class: below for more details, and possible values. By default, no pre-trained weights are used. progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True. **kwargs: parameters passed to the `source code `_ for more details about this class. .. autoclass:: torchvision.models.VGG16_BN_Weights :members:",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\dedode\\vgg.py",
    "ast_data": "FunctionDef name:vgg16_bn arguments arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_translate_header",
    "source_code": "def _translate_header(self, sparsify_cols: bool, max_cols: int):\n    col_lengths = _get_level_lengths(self.columns, sparsify_cols, max_cols, self.hidden_columns)\n    clabels = self.data.columns.tolist()\n    if self.data.columns.nlevels == 1:\n        clabels = [[x] for x in clabels]\n    clabels = list(zip(*clabels))\n    head = []\n    for r, hide in enumerate(self.hide_columns_):\n        if hide or not clabels:\n            continue\n        header_row = self._generate_col_header_row((r, clabels), max_cols, col_lengths)\n        head.append(header_row)\n    if self.data.index.names and com.any_not_none(*self.data.index.names) and (not all(self.hide_index_)) and (not self.hide_index_names):\n        index_names_row = self._generate_index_names_row(clabels, max_cols, col_lengths)\n        head.append(index_names_row)\n    return head",
    "docstring": "Build each within table as a list Using the structure: +----------------------------+---------------+---------------------------+ | index_blanks ... | column_name_0 | column_headers (level_0) | 1) | .. | .. | .. | | index_blanks ... | column_name_n | column_headers (level_n) | +----------------------------+---------------+---------------------------+ 2) | index_names (level_0 to level_n) ... | column_blanks ... | +----------------------------+---------------+---------------------------+ Parameters ---------- sparsify_cols : bool Whether column_headers section will add colspan attributes (>1) to elements. max_cols : int Maximum number of columns to render. If exceeded will contain filler. Returns ------- head : list The associated HTML elements needed for template rendering.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\style_render.py",
    "ast_data": "FunctionDef name:_translate_header arg:self arg:sparsify_cols arg:max_cols arguments arg arg arg Assign Call Assign Call If Compare Assign Assign Call Call Assign For Call If BoolOp Assign Call Call If BoolOp Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "all_parents",
    "source_code": "@cached_property\ndef all_parents(self):\n    result = OrderedSet(self.parents)\n    for parent in self.parents:\n        for ancestor in parent._meta.all_parents:\n            result.add(ancestor)\n    return tuple(result)",
    "docstring": "Return all the ancestors of this model as a tuple ordered by MRO. Useful for determining if something is an ancestor, regardless of lineage.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\options.py",
    "ast_data": "FunctionDef name:all_parents arg:self arguments arg Assign Call For For Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "is_flag_on",
    "source_code": "def is_flag_on(self, flag_name):\n    found, flag_value = self.get_flag_value(flag_name)\n    if not found:\n        return False\n    if flag_value is None:\n        return True\n    flag_value = flag_value.lower()\n    enabled = flag_value in ['1', 't', 'true', 'y', 'yes']\n    return enabled",
    "docstring": "Returns True if the given flag is on.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer_flags.py",
    "ast_data": "FunctionDef name:is_flag_on arg:self arg:flag_name arguments arg arg Assign Call If Return return:yes If Compare Return return:yes Assign Call Assign Compare Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "isbuiltin",
    "source_code": "def isbuiltin(obj: Any) -> TypeIs[types.BuiltinFunctionType]:\n    return inspect.isbuiltin(unpartial(obj))",
    "docstring": "Check if the object is a built-in function or method. Partial objects are unwrapped before checking them. .. seealso:: :external+python:func:",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\inspect.py",
    "ast_data": "FunctionDef name:isbuiltin arg:obj arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "ListFilesBenchmark",
    "source_code": "class ListFilesBenchmark(benchmark_base.DatasetBenchmarkBase):\n\n    def benchmark_nested_directories(self):\n        tmp_dir = tempfile.mkdtemp()\n        width = 1024\n        depth = 16\n        for i in range(width):\n            for j in range(depth):\n                new_base = os.path.join(tmp_dir, str(i), *[str(dir_name) for dir_name in range(j)])\n                os.makedirs(new_base)\n                child_files = ['a.py', 'b.pyc'] if j < depth - 1 else ['c.txt', 'd.log']\n                for f in child_files:\n                    filename = os.path.join(new_base, f)\n                    open(filename, 'w').close()\n        patterns = [os.path.join(tmp_dir, os.path.join(*['**' for _ in range(depth)]), suffix) for suffix in ['*.txt', '*.log']]\n        num_elements = width * 2\n        dataset = dataset_ops.Dataset.list_files(patterns)\n        self.run_and_report_benchmark(dataset=dataset, iters=3, num_elements=num_elements, extras={'model_name': 'list_files.benchmark.1', 'parameters': '%d.%d' % (width, depth)}, name='nested_directory(%d*%d)' % (width, depth))\n        shutil.rmtree(tmp_dir, ignore_errors=True)",
    "docstring": "Benchmarks for .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\benchmarks\\list_files_benchmark.py",
    "ast_data": "ClassDef name:ListFilesBenchmark FunctionDef name:benchmark_nested_directories arg:self arguments arg Assign Call Assign Assign For Call For Call Assign Call Call Call Call Call Assign Compare For Assign Call Call Call Assign Call Call Call Assign Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "TritonBundlerMetadata",
    "source_code": "@dataclasses.dataclass(frozen=True)\nclass TritonBundlerMetadata:\n    cached_kernel_names: list[str]\n    statically_launched_kernel_names: list[str]",
    "docstring": "Metadata used for instrumentation",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\triton_bundler.py",
    "ast_data": "ClassDef name:TritonBundlerMetadata Call"
  },
  {
    "library": "pytorch",
    "name": "annotate",
    "source_code": "def annotate(self, model: torch.fx.GraphModule) -> torch.fx.GraphModule:\n    for quantizer in self.quantizers:\n        quantizer.annotate(model)\n        self._record_and_validate_annotations(model, quantizer)\n    return model",
    "docstring": "just handling global spec for now",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\composable_quantizer.py",
    "ast_data": "FunctionDef name:annotate arg:self arg:model arguments arg arg For Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "idstn",
    "source_code": "def idstn(x, type=2, shape=None, axes=None, norm=None, overwrite_x=False):\n    type = _inverse_typemap[type]\n    shape = _good_shape(x, shape, axes)\n    return _pocketfft.dstn(x, type, shape, axes, norm, overwrite_x)",
    "docstring": "Return multidimensional Discrete Sine Transform along the specified axes. Parameters ---------- x : array_like The input array. type : {1, 2, 3, 4}, optional Type of the DST (see Notes). Default type is 2. shape : int or array_like of ints or None, optional The shape of the result. If both and (see below) are None, is `shapeaxesshape`shape[i] >> import numpy as np >>> from scipy.fftpack import dstn, idstn >>> rng = np.random.default_rng() >>> y = rng.standard_normal((16, 16)) >>> np.allclose(y, idstn(dstn(y, norm='ortho'), norm='ortho')) True",
    "type": "function",
    "file_path": "scipy\\scipy\\fftpack\\_realtransforms.py",
    "ast_data": "FunctionDef name:idstn arg:x arg:type arg:shape arg:axes arg:norm arg:overwrite_x arguments arg arg arg arg arg arg Assign Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, param_specs, non_tensor_params, prefer_static_fields):\n    self._param_specs = param_specs\n    self._non_tensor_params = non_tensor_params\n    self._prefer_static_fields = prefer_static_fields",
    "docstring": "Initializes a new . Args: param_specs: Python of instances that describe kwargs to the 's constructor that are -like or subclasses. non_tensor_params: Python containing non- and non- kwargs to the 's constructor. prefer_static_fields: Python of strings corresponding to the names of -like args to the s constructor that may be stored as static values, if known. These are typically shapes, indices, or axis values.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:param_specs arg:non_tensor_params arg:prefer_static_fields arguments arg arg arg arg Assign Assign Assign"
  },
  {
    "library": "django",
    "name": "get_xframe_options_value",
    "source_code": "def get_xframe_options_value(self, request, response):\n    return getattr(settings, 'X_FRAME_OPTIONS', 'DENY').upper()",
    "docstring": "Get the value to set for the X_FRAME_OPTIONS header. Use the value from the X_FRAME_OPTIONS setting, or 'DENY' if not set. This method can be overridden if needed, allowing it to vary based on the request or response.",
    "type": "method",
    "file_path": "django\\django\\middleware\\clickjacking.py",
    "ast_data": "FunctionDef name:get_xframe_options_value arg:self arg:request arg:response arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "add_suffix",
    "source_code": "@final\ndef add_suffix(self, suffix: str, axis: Axis | None=None) -> Self:\n    f = lambda x: f'{x}{suffix}'\n    axis_name = self._info_axis_name\n    if axis is not None:\n        axis_name = self._get_axis_name(axis)\n    mapper = {axis_name: f}\n    return self._rename(**mapper)",
    "docstring": "Suffix labels with string . For Series, the row labels are suffixed. For DataFrame, the column labels are suffixed. Parameters ---------- suffix : str The string to add after each label. axis : {0 or 'index', 1 or 'columns', None}, default None Axis to add suffix on .. versionadded:: 2.0.0 Returns ------- Series or DataFrame New Series or DataFrame with updated labels. See Also -------- Series.add_prefix: Prefix row labels with string . DataFrame.add_prefix: Prefix column labels with string . Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.add_suffix(\"_item\") 0_item 1 1_item 2 2_item 3 3_item 4 dtype: int64 >>> df = pd.DataFrame({\"A\": [1, 2, 3, 4], \"B\": [3, 4, 5, 6]}) >>> df A B 0 1 3 1 2 4 2 3 5 3 4 6 >>> df.add_suffix(\"_col\") A_col B_col 0 1 3 1 2 4 2 3 5 3 4 6",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:add_suffix arg:self arg:suffix arg:axis arguments arg arg arg Assign arguments arg Assign If Compare Assign Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_create_device_dataset",
    "source_code": "def _create_device_dataset(prototype_ds, incarnation_id, prefetch_buffer_size, experimental_slack):\n    ds = _ReincarnatedPerDeviceGenerator(prototype_ds, incarnation_id)\n    if prefetch_buffer_size > 0:\n        if experimental_slack:\n            ds = prefetch_op._PrefetchDataset(ds, prefetch_buffer_size, slack_period=1)\n        else:\n            ds = ds.prefetch(prefetch_buffer_size, name='device_prefetch')\n    return ds",
    "docstring": "Uses _prototype_device_datasets[i] to build a dataset for the device.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\multi_device_iterator_ops.py",
    "ast_data": "FunctionDef name:_create_device_dataset arg:prototype_ds arg:incarnation_id arg:prefetch_buffer_size arg:experimental_slack arguments arg arg arg arg Assign Call If Compare If Assign Call Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_parse_policy_autovec",
    "source_code": "def _parse_policy_autovec(self, has_baseline, final_targets, extra_flags):\n    skipped = []\n    for tar in final_targets[:]:\n        if isinstance(tar, str):\n            can = self.feature_can_autovec(tar)\n        else:\n            can = all([self.feature_can_autovec(t) for t in tar])\n        if not can:\n            final_targets.remove(tar)\n            skipped.append(tar)\n    if skipped:\n        self.dist_log('skip non auto-vectorized features', skipped)\n    return (has_baseline, final_targets, extra_flags)",
    "docstring": "skip features that has no auto-vectorized support by compiler",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py",
    "ast_data": "FunctionDef name:_parse_policy_autovec arg:self arg:has_baseline arg:final_targets arg:extra_flags arguments arg arg arg arg Assign For If Call Assign Call Assign Call Call If Call Call If Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_positive_axis",
    "source_code": "def get_positive_axis(axis, ndims, axis_name='axis', ndims_name='ndims'):\n    if not isinstance(axis, int):\n        raise TypeError(f'{axis_name} must be an int; got {type(axis).__name__}')\n    if ndims is not None:\n        if 0 <= axis < ndims:\n            return axis\n        elif -ndims <= axis < 0:\n            return axis + ndims\n        else:\n            raise ValueError(f'{axis_name}={axis} out of bounds: expected {-ndims}<={axis_name}<{ndims}')\n    elif axis < 0:\n        raise ValueError(f'{axis_name}={axis} may only be negative if {ndims_name} is statically known.')\n    return axis",
    "docstring": "Validate an parameter, and normalize it to be positive. If is known (i.e., not ), then check that is in the range ) or (otherwise). If is not known, and is positive, then return it as-is. If is not known, and is negative, then report an error. Args: axis: An integer constant ndims: An integer constant, or axis_name: The name of (for error messages). ndims_name: The name of (for error messages). Returns: The normalized value. Raises: ValueError: If is out-of-bounds, or if is negative and .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:get_positive_axis arg:axis arg:ndims arg:axis_name arg:ndims_name arguments arg arg arg arg If Call Raise Call Call If Compare If Compare Return return:yes If Compare Return return:yes Raise Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "DebuggedDevice",
    "source_code": "class DebuggedDevice:\n\n    def __init__(self, device_name, device_id):\n        self._device_name = device_name\n        self._device_id = device_id\n\n    @property\n    def device_name(self):\n        return self._device_name\n\n    @property\n    def device_id(self):\n        return self._device_id\n\n    def to_json(self):\n        return {'device_name': self._device_name, 'device_id': self._device_id}",
    "docstring": "Debugger data regarding a device involved in the debugged program. Properties: device_name: Name of the device, as a str. device_id: An integer ID for the device, unique for each device within the scope of the debugged TensorFlow program.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py",
    "ast_data": "ClassDef name:DebuggedDevice FunctionDef name:__init__ arg:self arg:device_name arg:device_id arguments arg arg arg Assign Assign FunctionDef name:device_name arg:self arguments arg Return return:yes FunctionDef name:device_id arg:self arguments arg Return return:yes FunctionDef name:to_json arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "deprecate_cython_api",
    "source_code": "def deprecate_cython_api(module, routine_name, new_name=None, message=None):\n    old_name = f'{module.__name__}.{routine_name}'\n    if new_name is None:\n        depdoc = f'`{old_name}` is deprecated!'\n    else:\n        depdoc = f'`{old_name}` is deprecated, use `{new_name}` instead!'\n    if message is not None:\n        depdoc += '\\n' + message\n    d = module.__pyx_capi__\n    j = 0\n    has_fused = False\n    while True:\n        fused_name = f'__pyx_fuse_{j}{routine_name}'\n        if fused_name in d:\n            has_fused = True\n            d[_DeprecationHelperStr(fused_name, depdoc)] = d.pop(fused_name)\n            j += 1\n        else:\n            break\n    if not has_fused:\n        d[_DeprecationHelperStr(routine_name, depdoc)] = d.pop(routine_name)",
    "docstring": "Deprecate an exported cdef function in a public Cython API module. Only functions can be deprecated; typedefs etc. cannot. Parameters ---------- module : module Public Cython API module (e.g. scipy.linalg.cython_blas). routine_name : str Name of the routine to deprecate. May also be a fused-type routine (in which case its all specializations are deprecated). new_name : str New name to include in the deprecation warning message message : str Additional text in the deprecation warning message Examples -------- Usually, this function would be used in the top-level of the module `` file: >>> from scipy._lib.deprecation import deprecate_cython_api >>> import scipy.linalg.cython_blas as mod >>> deprecate_cython_api(mod, \"dgemm\", \"dgemm_new\", ... message=\"Deprecated in Scipy 1.5.0\") >>> del deprecate_cython_api, mod After this, Cython modules that use the deprecated function emit a deprecation warning when they are imported.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\deprecation.py",
    "ast_data": "FunctionDef name:deprecate_cython_api arg:module arg:routine_name arg:new_name arg:message arguments arg arg arg arg Assign If Compare Assign Assign If Compare Assign Assign Assign While Assign If Compare Assign Assign Call Call If Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "_gcd_import",
    "source_code": "def _gcd_import(self, name, package=None, level=0):\n    _sanity_check(name, package, level)\n    if level > 0:\n        name = _resolve_name(name, package, level)\n    return self._find_and_load(name)",
    "docstring": "Import and return the module based on its name, the package the call is being made from, and the level adjustment. This function represents the greatest common denominator of functionality between import_module and __import__. This includes setting __package__ if the loader did not.",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\package_importer.py",
    "ast_data": "FunctionDef name:_gcd_import arg:self arg:name arg:package arg:level arguments arg arg arg arg Call If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "is_numeric_v_string_like",
    "source_code": "def is_numeric_v_string_like(a: ArrayLike, b) -> bool:\n    is_a_array = isinstance(a, np.ndarray)\n    is_b_array = isinstance(b, np.ndarray)\n    is_a_numeric_array = is_a_array and a.dtype.kind in ('u', 'i', 'f', 'c', 'b')\n    is_b_numeric_array = is_b_array and b.dtype.kind in ('u', 'i', 'f', 'c', 'b')\n    is_a_string_array = is_a_array and a.dtype.kind in ('S', 'U')\n    is_b_string_array = is_b_array and b.dtype.kind in ('S', 'U')\n    is_b_scalar_string_like = not is_b_array and isinstance(b, str)\n    return is_a_numeric_array and is_b_scalar_string_like or (is_a_numeric_array and is_b_string_array) or (is_b_numeric_array and is_a_string_array)",
    "docstring": "Check if we are comparing a string-like object to a numeric ndarray. NumPy doesn't like to compare such objects, especially numeric arrays and scalar string-likes. Parameters ---------- a : array-like, scalar The first object to check. b : array-like, scalar The second object to check. Returns ------- boolean Whether we return a comparing a string-like object to a numeric array. Examples -------- >>> is_numeric_v_string_like(np.array([1]), \"foo\") True >>> is_numeric_v_string_like(np.array([1, 2]), np.array([\"foo\"])) True >>> is_numeric_v_string_like(np.array([\"foo\"]), np.array([1, 2])) True >>> is_numeric_v_string_like(np.array([1]), np.array([2])) False >>> is_numeric_v_string_like(np.array([\"foo\"]), np.array([\"foo\"])) False",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\common.py",
    "ast_data": "FunctionDef name:is_numeric_v_string_like arg:a arg:b arguments arg arg Assign Call Assign Call Assign BoolOp Compare Assign BoolOp Compare Assign BoolOp Compare Assign BoolOp Compare Assign BoolOp Call Return return:yes BoolOp BoolOp BoolOp BoolOp"
  },
  {
    "library": "matplotlib",
    "name": "_DOF_estimator_user",
    "source_code": "class _DOF_estimator_user(_DOF_estimator):\n\n    def compute_dz(self, dz):\n        dzdx, dzdy = dz\n        dzdx = dzdx * self._unit_x\n        dzdy = dzdy * self._unit_y\n        return np.vstack([dzdx, dzdy]).T",
    "docstring": "dz is imposed by user; accounts for scaling if any.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triinterpolate.py",
    "ast_data": "ClassDef name:_DOF_estimator_user FunctionDef name:compute_dz arg:self arg:dz arguments arg arg Assign Assign Assign Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_get_join_target",
    "source_code": "@final\ndef _get_join_target(self) -> np.ndarray:\n    if isinstance(self._values, BaseMaskedArray):\n        return self._values._data\n    elif isinstance(self._values, ArrowExtensionArray):\n        return self._values.to_numpy()\n    target = self._get_engine_target()\n    if not isinstance(target, np.ndarray):\n        raise ValueError('_can_use_libjoin should return False.')\n    return target",
    "docstring": "Get the ndarray or ExtensionArray that we can pass to the join functions.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_get_join_target arg:self arguments arg If Call Return return:yes If Call Return return:yes Call Assign Call If Call Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "close",
    "source_code": "@property\ndef close(self) -> timedelta:\n    return self._close",
    "docstring": "Get the close timeout.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\dynamic_rendezvous.py",
    "ast_data": "FunctionDef name:close arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_issubtype_with_constraints",
    "source_code": "def _issubtype_with_constraints(variant, constraints, recursive=True):\n    if variant in constraints:\n        return True\n    vs = _decompose_type(variant, to_list=False)\n    if vs is not None:\n        return all((_issubtype_with_constraints(v, constraints, recursive) for v in vs))\n    if hasattr(variant, '__origin__') and variant.__origin__ is not None:\n        v_origin = variant.__origin__\n        v_args = getattr(variant, '__args__', None)\n    else:\n        v_origin = variant\n        v_args = None\n    for constraint in constraints:\n        cs = _decompose_type(constraint, to_list=False)\n        if cs is not None:\n            if _issubtype_with_constraints(variant, cs, recursive):\n                return True\n        elif hasattr(constraint, '__origin__') and constraint.__origin__ is not None:\n            c_origin = constraint.__origin__\n            if v_origin == c_origin:\n                if not recursive:\n                    return True\n                c_args = getattr(constraint, '__args__', None)\n                if c_args is None or len(c_args) == 0:\n                    return True\n                if v_args is not None and len(v_args) == len(c_args) and all((issubtype(v_arg, c_arg) for v_arg, c_arg in zip(v_args, c_args))):\n                    return True\n        elif v_origin == constraint:\n            return True\n    return False",
    "docstring": "Check if the variant is a subtype of either one from constraints. For composite types like and with bounds, they would be expanded for testing.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\data\\datapipes\\_typing.py",
    "ast_data": "FunctionDef name:_issubtype_with_constraints arg:variant arg:constraints arg:recursive arguments arg arg arg If Compare Return return:yes Assign Call If Compare Return return:yes Call Call If BoolOp Call Compare Assign Assign Call Assign Assign For Assign Call If Compare If Call Return return:yes If BoolOp Call Compare Assign If Compare If Return return:yes Assign Call If BoolOp Compare Compare Call Return return:yes If BoolOp Compare Compare Call Call Call Call Call Return return:yes If Compare Return return:yes Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "cell_width",
    "source_code": "def cell_width(self, cell: Cell, source: list[int]) -> int:\n    if cell.row is None or cell.col is None:\n        msg = 'Cell co-ordinates have not been set'\n        raise ValueError(msg)\n    width = 0\n    for i in range(self[cell.row, cell.col].colspan):\n        width += source[cell.col + i]\n    return width + (cell.colspan - 1) * 3",
    "docstring": "Give the cell width, according to the given source (either ``). This takes into account cells spanning multiple columns.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\writers\\text.py",
    "ast_data": "FunctionDef name:cell_width arg:self arg:cell arg:source arguments arg arg arg If BoolOp Compare Compare Assign Raise Call Assign For Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_move_states_to_device",
    "source_code": "def _move_states_to_device(params: list[nn.Parameter], buffers: list[torch.Tensor], device: torch.device) -> None:\n    for tensor in itertools.chain(params, buffers):\n        if tensor.device == device or tensor.device.type == 'meta':\n            continue\n        if isinstance(tensor, DTensor):\n            if (dtensor_mesh_type := tensor.device_mesh.device_type) != device.type:\n                raise ValueError(f'Requires DTensor to have mesh of the same type as the FSDP mesh but got {dtensor_mesh_type} for DTensor and {device.type} for FSDP')\n            raise AssertionError(f'Expects DTensor to be moved to {dtensor_mesh_type} but got {tensor.device}')\n        tensor_ = tensor\n        if is_traceable_wrapper_subclass(tensor_):\n            with torch.no_grad():\n                tensor_on_device = nn.Parameter(tensor.to(device))\n            torch.utils.swap_tensors(tensor, tensor_on_device)\n        else:\n            tensor.data = tensor.to(device)",
    "docstring": "We have FSDP move states to device for simpler and faster initialization since FSDP almost always uses CUDA for training. We move parameters/buffers rather than modules since modules to support ignoring parameters/buffers in the future.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fsdp_init.py",
    "ast_data": "FunctionDef name:_move_states_to_device arg:params arg:buffers arg:device arguments arg arg arg For Call If BoolOp Compare Compare If Call If Compare Raise Call Raise Call Assign If Call With Call Assign Call Call Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "MaxPooling3D",
    "source_code": "class MaxPooling3D(keras_layers.MaxPooling3D, base.Layer):\n\n    def __init__(self, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs):\n        if strides is None:\n            raise ValueError('Argument `strides` must not be None.')\n        super(MaxPooling3D, self).__init__(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name, **kwargs)",
    "docstring": "Max pooling layer for 3D inputs (e.g. volumes). Args: pool_size: An integer or tuple/list of 3 integers: (pool_depth, pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 3 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. (default) and are supported. corresponds to inputs with shape while corresponds to inputs with shape . name: A string, the name of the layer.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\legacy_tf_layers\\pooling.py",
    "ast_data": "ClassDef name:MaxPooling3D FunctionDef name:__init__ arg:self arg:pool_size arg:strides arg:padding arg:data_format arg:name arguments arg arg arg arg arg arg arg If Compare Raise Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "reproduce",
    "source_code": "def reproduce(self, configs: Sequence[ConfigType]) -> ResultType:\n    results = ResultType()\n    for conf in configs:\n        self._reproduce_single_helper(conf, results)\n    return results",
    "docstring": "entrypoint to reproduce any failure",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\fuzzer.py",
    "ast_data": "FunctionDef name:reproduce arg:self arg:configs arguments arg arg Assign Call For Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_check_param_device",
    "source_code": "def _check_param_device(param: torch.Tensor, old_param_device: Optional[int]) -> int:\n    support_device_types = ['cuda', torch._C._get_privateuse1_backend_name()]\n    if old_param_device is None:\n        old_param_device = param.get_device() if param.device.type in support_device_types else -1\n    else:\n        warn = False\n        if param.device.type in support_device_types:\n            warn = param.get_device() != old_param_device\n        else:\n            warn = old_param_device != -1\n        if warn:\n            raise TypeError('Found two parameters on different devices, this is currently not supported.')\n    return old_param_device",
    "docstring": "Check if the parameters are located on the same device. Currently, the conversion between model parameters and single vector form is not supported for multiple allocations, e.g. parameters in different GPUs/PrivateUse1s, or mixture of CPU/GPU/PrivateUse1. Args: param ([Tensor]): a Tensor of a parameter of a model old_param_device (int): the device where the first parameter of a model is allocated. Returns: old_param_device (int): report device for the first time",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\utils\\convert_parameters.py",
    "ast_data": "FunctionDef name:_check_param_device arg:param arg:old_param_device arguments arg arg Assign Call If Compare Assign Compare Call Assign If Compare Assign Compare Call Assign Compare If Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_from_name",
    "source_code": "@classmethod\ndef _from_name(cls, name: ScalarName | TorchName | str | None) -> JitScalarType:\n    if name is None:\n        raise errors.OnnxExporterError('Scalar type name cannot be None')\n    if valid_scalar_name(name):\n        return _SCALAR_NAME_TO_TYPE[name]\n    if valid_torch_name(name):\n        return _TORCH_NAME_TO_SCALAR_TYPE[name]\n    raise errors.OnnxExporterError(f\"Unknown torch or scalar type: '{name}'\")",
    "docstring": "Convert a JIT scalar type or torch type name to ScalarType. Note: DO NOT USE this API when comes from a calls. A \"RuntimeError: INTERNAL ASSERT FAILED at \"../aten/src/ATen/core/jit_type_base.h\" can be raised in several scenarios where shape info is not present. Instead use API which is safer. Args: name: JIT scalar type name (Byte) or torch type name (uint8_t). Returns: JitScalarType Raises: OnnxExporterError: if name is not a valid scalar type name or if it is None.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_type_utils.py",
    "ast_data": "FunctionDef name:_from_name arg:cls arg:name arguments arg arg If Compare Raise Call If Call Return return:yes If Call Return return:yes Raise Call"
  },
  {
    "library": "uvicorn",
    "name": "resume_writing",
    "source_code": "def resume_writing(self) -> None:\n    self.flow.resume_writing()",
    "docstring": "Called by the transport when the write buffer drops below the low water mark.",
    "type": "method",
    "file_path": "uvicorn\\uvicorn\\protocols\\http\\httptools_impl.py",
    "ast_data": "FunctionDef name:resume_writing arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "graph",
    "source_code": "@property\ndef graph(self):\n    return self._func_graph",
    "docstring": "Returns the graph from which this function was constructed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py",
    "ast_data": "FunctionDef name:graph arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_partial_fit_binary",
    "source_code": "def _partial_fit_binary(estimator, X, y, partial_fit_params):\n    estimator.partial_fit(X, y, classes=np.array((0, 1)), **partial_fit_params)\n    return estimator",
    "docstring": "Partially fit a single binary estimator.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\multiclass.py",
    "ast_data": "FunctionDef name:_partial_fit_binary arg:estimator arg:X arg:y arg:partial_fit_params arguments arg arg arg arg Call Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "find_attr_docs",
    "source_code": "def find_attr_docs(self) -> dict[tuple[str, str], list[str]]:\n    self.analyze()\n    return self.attr_docs",
    "docstring": "Find class and module-level attributes and their documentation.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\pycode\\__init__.py",
    "ast_data": "FunctionDef name:find_attr_docs arg:self arguments arg Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_or_create_concrete_type",
    "source_code": "def get_or_create_concrete_type(self, nn_module):\n    concrete_type_builder = infer_concrete_type_builder(nn_module)\n    nn_module_type = type(nn_module)\n    if nn_module_type not in self.type_store:\n        self.type_store[nn_module_type] = []\n    known_types = self.type_store[nn_module_type]\n    for known_type in known_types:\n        if known_type.equals(concrete_type_builder):\n            return known_type\n    concrete_type = concrete_type_builder.build()\n    self.type_store[nn_module_type].append(concrete_type)\n    return concrete_type",
    "docstring": "Infer a ConcreteType from this instance. Underlying JIT types are re-used if possible.",
    "type": "method",
    "file_path": "pytorch\\torch\\jit\\_recursive.py",
    "ast_data": "FunctionDef name:get_or_create_concrete_type arg:self arg:nn_module arguments arg arg Assign Call Assign Call If Compare Assign Assign For If Call Return return:yes Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "getdoc",
    "source_code": "def getdoc(object):\n    return _inspect.getdoc(object)",
    "docstring": "TFDecorator-aware replacement for inspect.getdoc. Args: object: An object, possibly decorated. Returns: The docstring associated with the object. The outermost-decorated object is intended to have the most complete documentation, so the decorated parameter is not unwrapped.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\tf_inspect.py",
    "ast_data": "FunctionDef name:getdoc arg:object arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "unused_exits",
    "source_code": "@property\ndef unused_exits(self):\n    return self._unused_exits",
    "docstring": "The list of \"unused\" exits.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_state.py",
    "ast_data": "FunctionDef name:unused_exits arg:self arguments arg Return return:yes"
  },
  {
    "library": "pygame",
    "name": "__repr__",
    "source_code": "def __repr__(self):\n    return '<{klass} @{id:x} {attrs}>'.format(klass=self.__class__.__name__, id=id(self) & 16777215, attrs=' '.join((f'{k}={v!r}' for k, v in self.__dict__.items())))",
    "docstring": "Turn the class into a string.",
    "type": "method",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "restore",
    "source_code": "def restore(self, restored_tensors, unused_restored_shapes):\n    with ops.control_dependencies([self._create_op]):\n        return gen_boosted_trees_ops.boosted_trees_deserialize_ensemble(self.resource_handle, stamp_token=restored_tensors[0], tree_ensemble_serialized=restored_tensors[1])",
    "docstring": "Restores the associated tree ensemble from 'restored_tensors'. Args: restored_tensors: the tensors that were loaded from a checkpoint. unused_restored_shapes: the shapes this object should conform to after restore. Not meaningful for trees. Returns: The operation that restores the state of the tree ensemble variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\boosted_trees_ops.py",
    "ast_data": "FunctionDef name:restore arg:self arg:restored_tensors arg:unused_restored_shapes arguments arg arg arg With Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_template_names",
    "source_code": "def get_template_names(self):\n    if self.template_name is None:\n        raise ImproperlyConfigured(\"TemplateResponseMixin requires either a definition of 'template_name' or an implementation of 'get_template_names()'\")\n    else:\n        return [self.template_name]",
    "docstring": "Return a list of template names to be used for the request. Must return a list. May not be called if render_to_response() is overridden.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\base.py",
    "ast_data": "FunctionDef name:get_template_names arg:self arguments arg If Compare Raise Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_is_nth_color",
    "source_code": "def _is_nth_color(c):\n    return isinstance(c, str) and _nth_color_re.match(c)",
    "docstring": "Return whether *c* can be interpreted as an item in the color cycle.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:_is_nth_color arg:c arguments arg Return return:yes BoolOp Call Call"
  },
  {
    "library": "django",
    "name": "_get_database_display_str",
    "source_code": "def _get_database_display_str(self, verbosity, database_name):\n    return \"'%s'%s\" % (self.connection.alias, \" ('%s')\" % database_name if verbosity >= 2 else '')",
    "docstring": "Return display string for a database for use in various actions.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\creation.py",
    "ast_data": "FunctionDef name:_get_database_display_str arg:self arg:verbosity arg:database_name arguments arg arg arg Return return:yes Compare"
  },
  {
    "library": "django",
    "name": "modify_insert_params",
    "source_code": "def modify_insert_params(self, placeholder, params):\n    if placeholder == 'NULL':\n        return []\n    return super().modify_insert_params(placeholder, params)",
    "docstring": "Drop out insert parameters for NULL placeholder. Needed for Oracle Spatial backend due to #10888.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\oracle\\operations.py",
    "ast_data": "FunctionDef name:modify_insert_params arg:self arg:placeholder arg:params arguments arg arg arg If Compare Return return:no Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "halfgennorm_gen",
    "source_code": "class halfgennorm_gen(rv_continuous):\n\n    def _shape_info(self):\n        return [_ShapeInfo('beta', False, (0, np.inf), (False, False))]\n\n    def _pdf(self, x, beta):\n        return np.exp(self._logpdf(x, beta))\n\n    def _logpdf(self, x, beta):\n        return np.log(beta) - sc.gammaln(1.0 / beta) - x ** beta\n\n    def _cdf(self, x, beta):\n        return sc.gammainc(1.0 / beta, x ** beta)\n\n    def _ppf(self, x, beta):\n        return sc.gammaincinv(1.0 / beta, x) ** (1.0 / beta)\n\n    def _sf(self, x, beta):\n        return sc.gammaincc(1.0 / beta, x ** beta)\n\n    def _isf(self, x, beta):\n        return sc.gammainccinv(1.0 / beta, x) ** (1.0 / beta)\n\n    def _entropy(self, beta):\n        return 1.0 / beta - np.log(beta) + sc.gammaln(1.0 / beta)",
    "docstring": "The upper half of a generalized normal continuous random variable. %(before_notes)s See Also -------- gennorm : generalized normal distribution expon : exponential distribution halfnorm : half normal distribution Notes ----- The probability density function for is: .. math:: f(x, \\beta) = \\frac{\\beta}{\\Gamma(1/\\beta)} \\exp(-|x|^\\beta) for :math:. :math: is the gamma function (). takes `\\beta\\beta = 1\\beta = 2`). References ---------- .. [1] \"Generalized normal distribution, Version 1\", %(example)s",
    "type": "class",
    "file_path": "scipy\\scipy\\stats\\_continuous_distns.py",
    "ast_data": "ClassDef name:halfgennorm_gen FunctionDef name:_shape_info arg:self arguments arg Return return:yes Call FunctionDef name:_pdf arg:self arg:x arg:beta arguments arg arg arg Return return:yes Call Call FunctionDef name:_logpdf arg:self arg:x arg:beta arguments arg arg arg Return return:yes Call Call FunctionDef name:_cdf arg:self arg:x arg:beta arguments arg arg arg Return return:yes Call FunctionDef name:_ppf arg:self arg:x arg:beta arguments arg arg arg Return return:yes Call FunctionDef name:_sf arg:self arg:x arg:beta arguments arg arg arg Return return:yes Call FunctionDef name:_isf arg:self arg:x arg:beta arguments arg arg arg Return return:yes Call FunctionDef name:_entropy arg:self arg:beta arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "_fspecial_gauss_2d",
    "source_code": "def _fspecial_gauss_2d(self, size: int, sigma: float, device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None) -> torch.Tensor:\n    gaussian_vec = self._fspecial_gauss_1d(size, sigma, device, dtype)\n    return torch.outer(gaussian_vec, gaussian_vec)",
    "docstring": "Create 2-D gauss kernel. Args: size: the size of gauss kernel. sigma: sigma of normal distribution. device: device to store the result on. dtype: dtype of the result. Returns: 2D kernel (size x size).",
    "type": "method",
    "file_path": "kornia\\kornia\\losses\\ms_ssim.py",
    "ast_data": "FunctionDef name:_fspecial_gauss_2d arg:self arg:size arg:sigma arg:device arg:dtype arguments arg arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "process_file",
    "source_code": "def process_file(in_filename, out_filename, upgrader):\n    print('Extracting code lines from original notebook')\n    raw_code, notebook = _get_code(in_filename)\n    raw_lines = [cl.code for cl in raw_code]\n    with tempfile.NamedTemporaryFile('w', delete=False) as temp_file:\n        processed_file, new_file_content, log, process_errors = upgrader.update_string_pasta('\\n'.join(raw_lines), in_filename)\n        if temp_file and processed_file:\n            new_notebook = _update_notebook(notebook, raw_code, new_file_content.split('\\n'))\n            json.dump(new_notebook, temp_file)\n        else:\n            raise SyntaxError('Was not able to process the file: \\n%s\\n' % ''.join(log))\n        files_processed = processed_file\n        report_text = upgrader._format_log(log, in_filename, out_filename)\n        errors = process_errors\n    shutil.move(temp_file.name, out_filename)\n    return (files_processed, report_text, errors)",
    "docstring": "The function where we inject the support for ipynb upgrade.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\ipynb.py",
    "ast_data": "FunctionDef name:process_file arg:in_filename arg:out_filename arg:upgrader arguments arg arg arg Call Assign Call Assign With Call Assign Call Call If BoolOp Assign Call Call Call Raise Call Call Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "enqueue_request",
    "source_code": "@abstractmethod\ndef enqueue_request(self, request: Request) -> bool:\n    raise NotImplementedError",
    "docstring": "Process a request received by the engine. Return `` when the request is rejected by the dupefilter.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\scheduler.py",
    "ast_data": "FunctionDef name:enqueue_request arg:self arg:request arguments arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "after_iteration",
    "source_code": "def after_iteration(self):\n    self.iterations += 1\n    self._check_unroll_limits()\n    if self.check_op_count_after_iteration:\n        did_warn = self._verify_inefficient_unroll()\n        if did_warn:\n            self._stop_checking_inefficient_unroll()\n        elif self.iterations > INEFFICIENT_UNROLL_MIN_ITERATIONS + 3:\n            self._stop_checking_inefficient_unroll()",
    "docstring": "Called after each iteration in a Python loop.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\control_flow.py",
    "ast_data": "FunctionDef name:after_iteration arg:self arguments arg Call If Assign Call If Call If Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "remove_op_callback",
    "source_code": "def remove_op_callback(self, callback):\n    if callback not in self._thread_local_data.op_callbacks:\n        raise KeyError('The specified op callback has not been registered, and hence cannot be removed.')\n    del self._thread_local_data.op_callbacks[self._thread_local_data.op_callbacks.index(callback)]",
    "docstring": "Remove an already-registered op callback. Args: callback: The op callback to be removed. Raises: KeyError: If is not already registered.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:remove_op_callback arg:self arg:callback arguments arg arg If Compare Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_device_list",
    "source_code": "@staticmethod\ndef get_device_list() -> Sequence[Optional[int]]:\n    if not config.autotune_multi_device:\n        return [None]\n    gpu_type = get_gpu_type()\n    device_interface = get_interface_for_device(gpu_type)\n    count = device_interface.device_count()\n    if CUDA_VISIBLE_DEVICES in os.environ:\n        devices = [int(d) for d in os.environ[CUDA_VISIBLE_DEVICES].split(',')]\n        assert len(devices) <= count\n        return devices\n    return list(range(count))",
    "docstring": "Gather the list of devices to be used in the pool.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\autotune_process.py",
    "ast_data": "FunctionDef name:get_device_list arguments If Return return:no Assign Call Assign Call Assign Call If Compare Assign Call Call Compare Call Return return:yes Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_embedding_table_size",
    "source_code": "def get_embedding_table_size(self):\n    return (self.categorical_column._num_buckets, self.shared_embedding_column_creator.dimension)",
    "docstring": "Returns num_ids and width.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\feature_column_v2.py",
    "ast_data": "FunctionDef name:get_embedding_table_size arg:self arguments arg Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "JokePage",
    "source_code": "class JokePage:\n\n    @cherrypy.expose\n    def index(self):\n        return '\\n            <p>\"In Python, how do you create a string of random\\n            characters?\" -- \"Read a Perl file!\"</p>\\n            <p>[<a href=\"../\">Return</a>]</p>'",
    "docstring": "Joke app.",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\tutorial\\tut04_complex_site.py",
    "ast_data": "ClassDef name:JokePage FunctionDef name:index arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "register",
    "source_code": "def register(self, check=None, *tags, **kwargs):\n\n    def inner(check):\n        if not func_accepts_kwargs(check):\n            raise TypeError('Check functions must accept keyword arguments (**kwargs).')\n        check.tags = tags\n        checks = self.deployment_checks if kwargs.get('deploy') else self.registered_checks\n        checks.add(check)\n        return check\n    if callable(check):\n        return inner(check)\n    else:\n        if check:\n            tags += (check,)\n        return inner",
    "docstring": "Can be used as a function or a decorator. Register given function labeled with given . The function should receive **kwargs and return list of Errors and Warnings. Example:: registry = CheckRegistry() @registry.register('mytag', 'anothertag') def my_check(app_configs, **kwargs): # ... perform checks and collect ... return errors # or registry.register(my_check, 'mytag', 'anothertag')",
    "type": "method",
    "file_path": "django\\django\\core\\checks\\registry.py",
    "ast_data": "FunctionDef name:register arg:self arg:check arguments arg arg arg arg FunctionDef name:inner arg:check arguments arg If Call Raise Call Assign Assign Call Call Return return:yes If Call Return return:yes Call If Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_target_dtype",
    "source_code": "def _get_target_dtype(values, dtype=None, dtype_hint=None):\n    if dtype is not None:\n        return dtype\n    for value in values:\n        if isinstance(value, tensor_lib.Tensor):\n            return value.dtype\n    for value in values:\n        if isinstance(value, np.ndarray):\n            return dtypes.as_dtype(value.dtype)\n    if dtype_hint is not None:\n        return dtype_hint\n    return dtypes.int64",
    "docstring": "Gets the target dtype of a family of values.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py",
    "ast_data": "FunctionDef name:_get_target_dtype arg:values arg:dtype arg:dtype_hint arguments arg arg arg If Compare Return return:yes For If Call Return return:yes For If Call Return return:yes Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "set_help_intro",
    "source_code": "def set_help_intro(self, help_intro):\n    self._command_handler_registry.set_help_intro(help_intro=help_intro)",
    "docstring": "Set an introductory message to the help output of the command registry. Args: help_intro: (RichTextLines) Rich text lines appended to the beginning of the output of the command \"help\", as introductory information.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\base_ui.py",
    "ast_data": "FunctionDef name:set_help_intro arg:self arg:help_intro arguments arg arg Call"
  },
  {
    "library": "pandas",
    "name": "_handle_hidden_tables",
    "source_code": "def _handle_hidden_tables(self, tbl_list, attr_name: str):\n    if not self.displayed_only:\n        return tbl_list\n    return [x for x in tbl_list if 'display:none' not in getattr(x, attr_name).get('style', '').replace(' ', '')]",
    "docstring": "Return list of tables, potentially removing hidden elements Parameters ---------- tbl_list : list of node-like Type of list elements will vary depending upon parser used attr_name : str Name of the accessor for retrieving HTML attributes Returns ------- list of node-like Return type matches",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\html.py",
    "ast_data": "FunctionDef name:_handle_hidden_tables arg:self arg:tbl_list arg:attr_name arguments arg arg arg If Return return:yes Return return:yes Compare Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_gen_rows_with_counts",
    "source_code": "def _gen_rows_with_counts(self) -> Iterator[Sequence[str]]:\n    yield from zip(self._gen_line_numbers(), self._gen_columns(), self._gen_non_null_counts(), self._gen_dtypes())",
    "docstring": "Iterator with string representation of body data with counts.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:_gen_rows_with_counts arg:self arguments arg Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "GraphMatchingException",
    "source_code": "class GraphMatchingException(Exception):\n    pass",
    "docstring": "Exception raised when two graphs cannot be matched.",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\ns\\fx\\graph_matcher.py",
    "ast_data": "ClassDef name:GraphMatchingException"
  },
  {
    "library": "matplotlib",
    "name": "set_cmap",
    "source_code": "def set_cmap(self, cmap):\n    self.cmap = cmap",
    "docstring": "Set the colormap for luminance data. Parameters ---------- cmap : or str or None",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colorizer.py",
    "ast_data": "FunctionDef name:set_cmap arg:self arg:cmap arguments arg arg Assign"
  },
  {
    "library": "sphinx",
    "name": "newest_template_mtime",
    "source_code": "def newest_template_mtime(self) -> float:\n    return 0",
    "docstring": "Called by the builder to determine if output files are outdated because of template changes. Return the mtime of the newest template file that was changed. The default implementation returns ``.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\application.py",
    "ast_data": "FunctionDef name:newest_template_mtime arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "replace_with_unreplicated_resources",
    "source_code": "def replace_with_unreplicated_resources(resource_inputs):\n    to_remove = []\n    to_add = []\n    for resource in resource_inputs:\n        if resource.op.type == 'TPUReplicatedInput':\n            to_remove.append(resource)\n            to_add.extend(resource.op.inputs)\n    for t in to_remove:\n        resource_inputs.discard(t)\n    resource_inputs.update(to_add)\n    return to_add or to_remove",
    "docstring": "Replaces handles in with their unreplicated inputs.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu.py",
    "ast_data": "FunctionDef name:replace_with_unreplicated_resources arg:resource_inputs arguments arg Assign Assign For If Compare Call Call For Call Call Return return:yes BoolOp"
  },
  {
    "library": "pytorch",
    "name": "TVar",
    "source_code": "class TVar:\n\n    def __init__(self, tvar):\n        self.tvar = tvar\n\n    def __repr__(self):\n        return f'TV({self.tvar})'\n\n    def __eq__(self, other):\n        if isinstance(other, TVar):\n            return self.tvar == other.tvar\n        else:\n            return False",
    "docstring": "Tensor variable with no tensor constructor",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint.py",
    "ast_data": "ClassDef name:TVar FunctionDef name:__init__ arg:self arg:tvar arguments arg arg Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:__eq__ arg:self arg:other arguments arg arg If Call Return return:yes Compare Return return:yes"
  },
  {
    "library": "pandas",
    "name": "render_git_describe_long",
    "source_code": "def render_git_describe_long(pieces):\n    if pieces['closest-tag']:\n        rendered = pieces['closest-tag']\n        rendered += f'-{pieces['distance']}-g{pieces['short']}'\n    else:\n        rendered = pieces['short']\n    if pieces['dirty']:\n        rendered += '-dirty'\n    return rendered",
    "docstring": "TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix)",
    "type": "function",
    "file_path": "pandas\\pandas\\_version.py",
    "ast_data": "FunctionDef name:render_git_describe_long arg:pieces arguments arg If Assign Assign If Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "const_getattr",
    "source_code": "def const_getattr(self, tx: 'InstructionTranslator', name: str) -> Any:\n    raise NotImplementedError",
    "docstring": "getattr(self, name) returning a python constant",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\base.py",
    "ast_data": "FunctionDef name:const_getattr arg:self arg:tx arg:name arguments arg arg arg Raise"
  },
  {
    "library": "scipy",
    "name": "maxdists",
    "source_code": "@lazy_cython\ndef maxdists(Z):\n    xp = array_namespace(Z)\n    Z = _asarray(Z, order='C', dtype=xp.float64, xp=xp)\n    _is_valid_linkage(Z, throw=True, name='Z', xp=xp)\n\n    def cy_maxdists(Z, validate):\n        if validate:\n            _is_valid_linkage(Z, throw=True, name='Z', xp=np)\n        MD = np.zeros((Z.shape[0],))\n        _hierarchy.get_max_dist_for_each_cluster(Z, MD, Z.shape[0] + 1)\n        return MD\n    return xpx.lazy_apply(cy_maxdists, Z, validate=is_lazy_array(Z), shape=(Z.shape[0],), dtype=xp.float64, as_numpy=True, xp=xp)",
    "docstring": "Return the maximum distance between any non-singleton cluster. Parameters ---------- Z : ndarray The hierarchical clustering encoded as a matrix. See `scipy.cluster.hierarchy.maxdistsscipy.cluster.hierarchy.medianscipy.cluster.hierarchy.maxdists` returns 3.5 in this case.",
    "type": "function",
    "file_path": "scipy\\scipy\\cluster\\hierarchy.py",
    "ast_data": "FunctionDef name:maxdists arg:Z arguments arg Assign Call Assign Call Call FunctionDef name:cy_maxdists arg:Z arg:validate arguments arg arg If Call Assign Call Call Return return:yes Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "print_guards",
    "source_code": "def print_guards(self, *, file=None):\n    print('\\n'.join((f'{repr(guard)}' for guard in sorted(self.__tx.output.guards))), file=file)",
    "docstring": "Print the currently installed guards for the Dynamo context. This does NOT include guards associated with variables that may or may not be installed in the future if those variables are used.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\comptime.py",
    "ast_data": "FunctionDef name:print_guards arg:self arguments arg arg Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "wrapped_register_custom_opdefs",
    "source_code": "def wrapped_register_custom_opdefs(custom_opdefs_list):\n    return _pywrap_converter_api.RegisterCustomOpdefs(custom_opdefs_list)",
    "docstring": "Wraps RegisterCustomOpdefs with lazy loader.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\lite\\python\\wrap_converter.py",
    "ast_data": "FunctionDef name:wrapped_register_custom_opdefs arg:custom_opdefs_list arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_lloyd_centroidal_voronoi_tessellation",
    "source_code": "def _lloyd_centroidal_voronoi_tessellation(sample: 'npt.ArrayLike', *, tol: DecimalNumber=1e-05, maxiter: IntNumber=10, qhull_options: str | None=None, **kwargs: dict) -> np.ndarray:\n    del kwargs\n    sample = np.asarray(sample).copy()\n    if not sample.ndim == 2:\n        raise ValueError('`sample` is not a 2D array')\n    if not sample.shape[1] >= 2:\n        raise ValueError('`sample` dimension is not >= 2')\n    if sample.max() > 1.0 or sample.min() < 0.0:\n        raise ValueError('`sample` is not in unit hypercube')\n    if qhull_options is None:\n        qhull_options = 'Qbb Qc Qz QJ'\n        if sample.shape[1] >= 5:\n            qhull_options += ' Qx'\n    root = -maxiter / np.log(0.1)\n    decay = [np.exp(-x / root) + 0.9 for x in range(maxiter)]\n    l1_old = _l1_norm(sample=sample)\n    for i in range(maxiter):\n        sample = _lloyd_iteration(sample=sample, decay=decay[i], qhull_options=qhull_options)\n        l1_new = _l1_norm(sample=sample)\n        if abs(l1_new - l1_old) < tol:\n            break\n        else:\n            l1_old = l1_new\n    return sample",
    "docstring": "Approximate Centroidal Voronoi Tessellation. Perturb samples in N-dimensions using Lloyd-Max algorithm. Parameters ---------- sample : array_like (n, d) The sample to iterate on. With `[0, 1]^dtoltolmaxiter[0, 1]^dscipy.stats.qmc.scale[0, 1]^d`. And back to their original bounds. Compute the quality of the sample using the L1 criterion. >>> def l1_norm(sample): ... return distance.pdist(sample, 'cityblock').min() >>> l1_norm(sample) 0.00161... # random Now process the sample using Lloyd's algorithm and check the improvement on the L1. The value should increase. >>> sample = _lloyd_centroidal_voronoi_tessellation(sample) >>> l1_norm(sample) 0.0278... # random",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_qmc.py",
    "ast_data": "FunctionDef name:_lloyd_centroidal_voronoi_tessellation arg:sample arguments arg arg arg arg arg Assign Call Call If Compare Raise Call If Compare Raise Call If BoolOp Compare Call Compare Call Raise Call If Compare Assign If Compare Assign Call Assign Call Call Assign Call For Call Assign Call Assign Call If Compare Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_verify_managed_param",
    "source_code": "def _verify_managed_param(name: str, param: nn.Parameter) -> None:\n    if len(param.shape) == 0:\n        raise ValueError(f\"fully_shard doesn't support scalar parameters. Change {name} to a 1D tensor with numel equal to 1.\")",
    "docstring": "Verify if the parameter is accepted by fully_shard. The only restriction now is that the parameter cannot be a scalar tensor (param.numel == 0) since we need at least one dim to shard.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fsdp_init.py",
    "ast_data": "FunctionDef name:_verify_managed_param arg:name arg:param arguments arg arg If Compare Call Raise Call"
  },
  {
    "library": "pandas",
    "name": "is_interval_dtype",
    "source_code": "def is_interval_dtype(arr_or_dtype) -> bool:\n    warnings.warn('is_interval_dtype is deprecated and will be removed in a future version. Use `isinstance(dtype, pd.IntervalDtype)` instead', DeprecationWarning, stacklevel=2)\n    if isinstance(arr_or_dtype, ExtensionDtype):\n        return arr_or_dtype.type is Interval\n    if arr_or_dtype is None:\n        return False\n    return IntervalDtype.is_dtype(arr_or_dtype)",
    "docstring": "Check whether an array-like or dtype is of the Interval dtype. .. deprecated:: 2.2.0 Use isinstance(dtype, pd.IntervalDtype) instead. Parameters ---------- arr_or_dtype : array-like or dtype The array-like or dtype to check. Returns ------- boolean Whether or not the array-like or dtype is of the Interval dtype. See Also -------- api.types.is_object_dtype : Check whether an array-like or dtype is of the object dtype. api.types.is_numeric_dtype : Check whether the provided array or dtype is of a numeric dtype. api.types.is_categorical_dtype : Check whether an array-like or dtype is of the Categorical dtype. Examples -------- >>> from pandas.core.dtypes.common import is_interval_dtype >>> is_interval_dtype(object) False >>> is_interval_dtype(pd.IntervalDtype()) True >>> is_interval_dtype([1, 2, 3]) False >>> >>> interval = pd.Interval(1, 2, closed=\"right\") >>> is_interval_dtype(interval) False >>> is_interval_dtype(pd.IntervalIndex([interval])) True",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\common.py",
    "ast_data": "FunctionDef name:is_interval_dtype arg:arr_or_dtype arguments arg Call If Call Return return:yes Compare If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "django",
    "name": "make_writeable",
    "source_code": "def make_writeable(self, filename):\n    if not os.access(filename, os.W_OK):\n        st = os.stat(filename)\n        new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR\n        os.chmod(filename, new_permissions)",
    "docstring": "Make sure that the file is writeable. Useful if our source is read-only.",
    "type": "method",
    "file_path": "django\\django\\core\\management\\templates.py",
    "ast_data": "FunctionDef name:make_writeable arg:self arg:filename arguments arg arg If Call Assign Call Assign Call Call"
  },
  {
    "library": "kornia",
    "name": "from_matrix",
    "source_code": "@classmethod\ndef from_matrix(cls, matrix: Tensor) -> Se3:\n    r = So3.from_matrix(matrix[..., :3, :3])\n    t = matrix[..., :3, -1]\n    return cls(r, t)",
    "docstring": "Create a Se3 group from a matrix. Args: matrix: tensor of shape :math:. Example: >>> s = Se3.from_matrix(torch.eye(4)) >>> s.r Parameter containing: tensor([1., 0., 0., 0.], requires_grad=True) >>> s.t Parameter containing: tensor([0., 0., 0.], requires_grad=True)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\se3.py",
    "ast_data": "FunctionDef name:from_matrix arg:cls arg:matrix arguments arg arg Assign Call Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "proxy_call",
    "source_code": "def proxy_call(self, fn, args, output_metadata):\n    flat_args, _ = pytree.tree_flatten(args)\n    proxy_args = pytree.tree_map(lambda e: self.to_proxy(e), args)\n    proxy_out = self.fx_tracer.create_proxy('call_function', fn, args=proxy_args, kwargs={})\n    result = [self.allocate_dummy() for _ in output_metadata]\n    self.bind_objects_to_proxies(result, [proxy_out[i] for i in range(len(result))])\n    return result",
    "docstring": "Proxies a call to fn(*args) into the graph",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\compiled_autograd.py",
    "ast_data": "FunctionDef name:proxy_call arg:self arg:fn arg:args arg:output_metadata arguments arg arg arg arg Assign Call Assign Call arguments arg Call Assign Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "restore_state",
    "source_code": "def restore_state(self, state, name=None):\n    if self._reader_ref.dtype == dtypes.resource:\n        return gen_io_ops.reader_restore_state_v2(self._reader_ref, state, name=name)\n    else:\n        return gen_io_ops.reader_restore_state(self._reader_ref, state, name=name)",
    "docstring": "Restore a reader to a previously saved state. Not all Readers support being restored, so this can produce an Unimplemented error. Args: state: A string Tensor. Result of a SerializeState of a Reader with matching type. name: A name for the operation (optional). Returns: The created Operation.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\io_ops.py",
    "ast_data": "FunctionDef name:restore_state arg:self arg:state arg:name arguments arg arg arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "make_global_gradient_clipnorm_fn",
    "source_code": "def make_global_gradient_clipnorm_fn(clipnorm):\n    if clipnorm is None:\n        return lambda grads_and_vars: grads_and_vars\n\n    def gradient_clipnorm_fn(grads_and_vars):\n        if isinstance(distribute_lib.get_strategy(), (central_storage_strategy.CentralStorageStrategy, central_storage_strategy.CentralStorageStrategyV1)):\n            raise ValueError('`global_clipnorm` is not supported with `CenteralStorageStrategy`')\n        grads, variables = zip(*grads_and_vars)\n        clipped_grads, _ = clip_ops.clip_by_global_norm(grads, clipnorm)\n        clipped_grads_and_vars = list(zip(clipped_grads, variables))\n        return clipped_grads_and_vars\n    return gradient_clipnorm_fn",
    "docstring": "Creates a gradient transformation function for clipping by norm.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\utils.py",
    "ast_data": "FunctionDef name:make_global_gradient_clipnorm_fn arg:clipnorm arguments arg If Compare Return return:yes arguments arg FunctionDef name:gradient_clipnorm_fn arg:grads_and_vars arguments arg If Call Call Raise Call Assign Call Assign Call Assign Call Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "in_while_loop_defun",
    "source_code": "def in_while_loop_defun(graph):\n    if context.executing_eagerly():\n        return False\n    return isinstance(graph, WhileCondFuncGraph) or isinstance(graph, WhileBodyFuncGraph)",
    "docstring": "Returns if the graph is a while loop FuncGraph.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_util_v2.py",
    "ast_data": "FunctionDef name:in_while_loop_defun arg:graph arguments arg If Call Return return:yes Return return:yes BoolOp Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_obj_reference_counts",
    "source_code": "@property\ndef _obj_reference_counts(self):\n    self._maybe_create_attribute('_obj_reference_counts_dict', object_identity.ObjectIdentityDictionary())\n    return self._obj_reference_counts_dict",
    "docstring": "A dictionary counting the number of attributes referencing an object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:_obj_reference_counts arg:self arguments arg Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "rot180",
    "source_code": "def rot180(input: Tensor) -> Tensor:\n    return torch.flip(input, [-2, -1])",
    "docstring": "Rotate a tensor image or a batch of tensor images 180 degrees. .. image:: _static/img/rot180.png Input must be a tensor of shape (C, H, W) or a batch of tensors :math:. Args: input: input tensor. Returns: The rotated image tensor.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\transform\\flips.py",
    "ast_data": "FunctionDef name:rot180 arg:input arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_split",
    "source_code": "def _split(self, X):\n    X, = indexable(X)\n    n_samples = _num_samples(X)\n    n_splits = self.n_splits\n    n_folds = n_splits + 1\n    gap = self.gap\n    test_size = self.test_size if self.test_size is not None else n_samples // n_folds\n    if n_folds > n_samples:\n        raise ValueError(f'Cannot have number of folds={n_folds} greater than the number of samples={n_samples}.')\n    if n_samples - gap - test_size * n_splits <= 0:\n        raise ValueError(f'Too many splits={n_splits} for number of samples={n_samples} with test_size={test_size} and gap={gap}.')\n    indices = np.arange(n_samples)\n    test_starts = range(n_samples - n_splits * test_size, n_samples, test_size)\n    for test_start in test_starts:\n        train_end = test_start - gap\n        if self.max_train_size and self.max_train_size < train_end:\n            yield (indices[train_end - self.max_train_size:train_end], indices[test_start:test_start + test_size])\n        else:\n            yield (indices[:train_end], indices[test_start:test_start + test_size])",
    "docstring": "Generate indices to split data into training and test set. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py",
    "ast_data": "FunctionDef name:_split arg:self arg:X arguments arg arg Assign Call Assign Call Assign Assign Assign Assign Compare If Compare Raise Call If Compare Raise Call Assign Call Assign Call For Assign If BoolOp Compare"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, path=None):\n    Profiler.__init__(self, path)\n    global _count\n    self.count = _count = _count + 1\n    self.profiler = profile.Profile()",
    "docstring": "Prepare the profiling aggregator app resources.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\profiler.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:path arguments arg arg Call Assign Assign Call"
  },
  {
    "library": "scipy",
    "name": "__init__",
    "source_code": "def __init__(self):\n    super().__init__()\n    self.inputs = None\n    self.outputs = None\n    self._dt = None",
    "docstring": "Initialize the baseclass. The heavy lifting is done by the subclasses.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg Call Call Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "all_eq",
    "source_code": "@register_refinement_rule(BatchNorm2d)\n@register_refinement_rule(torch.nn.ReLU)\ndef all_eq(n: Node):\n    res = []\n    assert isinstance(n.args[0], Node)\n    arg_type = n.args[0].type\n    if isinstance(arg_type, TensorType) and isinstance(n.type, TensorType):\n        args1 = arg_type.__args__\n        args2 = n.type.__args__\n        res = [Equality(args1[i], args2[i]) for i in range(len(args1))]\n    return res",
    "docstring": "For operations where the input shape is equal to the output shape",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\graph_gradual_typechecker.py",
    "ast_data": "FunctionDef name:all_eq arg:n arguments arg Assign Call Assign If BoolOp Call Call Assign Assign Assign Call Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "on_train_begin",
    "source_code": "def on_train_begin(self, logs=None):\n    logs = self._process_logs(logs)\n    for callback in self.callbacks:\n        callback.on_train_begin(logs)",
    "docstring": "Calls the methods of its callbacks. Args: logs: Dict. Currently no data is passed to this argument for this method but that may change in the future.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:on_train_begin arg:self arg:logs arguments arg arg Assign Call For Call"
  },
  {
    "library": "cherrypy",
    "name": "get_list_collection",
    "source_code": "def get_list_collection(self, v, formatting):\n    headers = []\n    for record in v:\n        for k3 in record:\n            format = formatting.get(k3, missing)\n            if format is None:\n                continue\n            if k3 not in headers:\n                headers.append(k3)\n    headers.sort()\n    subrows = []\n    for record in v:\n        subrow = []\n        for k3 in headers:\n            v3 = record.get(k3, '')\n            format = formatting.get(k3, missing)\n            if format is None:\n                continue\n            if hasattr(format, '__call__'):\n                v3 = format(v3)\n            elif format is not missing:\n                v3 = format % v3\n            subrow.append(v3)\n        subrows.append(subrow)\n    return (headers, subrows)",
    "docstring": "Return ([headers], [subrows]) for the given collection.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\cpstats.py",
    "ast_data": "FunctionDef name:get_list_collection arg:self arg:v arg:formatting arguments arg arg arg Assign For For Assign Call If Compare If Compare Call Call Assign For Assign For Assign Call Assign Call If Compare If Call Assign Call If Compare Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_experimental_feature_activated",
    "source_code": "def is_experimental_feature_activated(feature_name):\n    return feature_name in os.environ.get('TF_TRT_EXPERIMENTAL_FEATURES', default='').split(',')",
    "docstring": "Determines if a TF-TRT experimental feature is enabled. This helper function checks if an experimental feature was enabled using the environment variable . Args: feature_name: Name of the feature being tested for activation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\tensorrt\\utils.py",
    "ast_data": "FunctionDef name:is_experimental_feature_activated arg:feature_name arguments arg Return return:yes Compare Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_paddings_constant",
    "source_code": "def _get_paddings_constant(paddings):\n    if isinstance(paddings, tensor_lib.Tensor):\n        return tensor_util.constant_value(paddings, partial=True)\n    elif isinstance(paddings, (list, tuple)):\n        return [_get_paddings_constant(x) for x in paddings]\n    else:\n        return paddings",
    "docstring": "Helper to get the constant values of the paddings arg to pad(). Used under V1 graph mode to facilitate computation of the shape of the output tensor of . Args: paddings: The same paddings arg as passed to pad(). Can be a Tensor, or a nested list or tuple of Tensor and/or numbers. Returns: A nested list or numbers or , in which indicates unknown padding size.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:_get_paddings_constant arg:paddings arguments arg If Call Return return:yes Call If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "caching_allocator_enable",
    "source_code": "def caching_allocator_enable(value: bool=True) -> None:\n    if is_initialized():\n        torch._C._cuda_cudaCachingAllocator_enable(value)",
    "docstring": "Enable or disable the CUDA memory allocator. On by default.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\memory.py",
    "ast_data": "FunctionDef name:caching_allocator_enable arg:value arguments arg If Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_copy_fn",
    "source_code": "def _copy_fn(fn):\n    if not callable(fn):\n        raise TypeError('fn is not callable: %s' % fn)\n    return types.FunctionType(code=fn.__code__, globals=fn.__globals__, name=fn.__name__, argdefs=fn.__defaults__, closure=fn.__closure__)",
    "docstring": "Create a deep copy of fn. Args: fn: a callable Returns: A : a deep copy of fn. Raises: TypeError: if is not a callable.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:_copy_fn arg:fn arguments arg If Call Raise Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_allow_interrupt_macos",
    "source_code": "def _allow_interrupt_macos():\n    return _allow_interrupt(lambda rsock: _macosx.wake_on_fd_write(rsock.fileno()), _macosx.stop)",
    "docstring": "A context manager that allows terminating a plot by sending a SIGINT.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_macosx.py",
    "ast_data": "FunctionDef name:_allow_interrupt_macos arguments Return return:yes Call arguments arg Call Call"
  },
  {
    "library": "pandas",
    "name": "read",
    "source_code": "def read(self) -> DataFrame | Series:\n    obj: DataFrame | Series\n    with self:\n        if self.engine == 'pyarrow':\n            obj = self._read_pyarrow()\n        elif self.engine == 'ujson':\n            obj = self._read_ujson()\n    return obj",
    "docstring": "Read the whole JSON input into a pandas object.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\json\\_json.py",
    "ast_data": "FunctionDef name:read arg:self arguments arg With If Compare Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "references_field",
    "source_code": "def references_field(self, model_name, name, app_label):\n    return self.references_model(model_name, app_label)",
    "docstring": "Return True if there is a chance this operation references the given field name, with an app label for accuracy. Used for optimization. If in doubt, return True.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\operations\\base.py",
    "ast_data": "FunctionDef name:references_field arg:self arg:model_name arg:name arg:app_label arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n    self._fit(X, handle_unknown=self.handle_unknown, ensure_all_finite='allow-nan')\n    self._set_drop_idx()\n    self._n_features_outs = self._compute_n_features_outs()\n    return self",
    "docstring": "Fit OneHotEncoder to X. Parameters ---------- X : array-like of shape (n_samples, n_features) The data to determine the categories of each feature. y : None Ignored. This parameter exists only for compatibility with :class:. Returns ------- self Fitted encoder.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_encoders.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_FakeQuantWithMinMaxArgsGradient",
    "source_code": "@ops.RegisterGradient('FakeQuantWithMinMaxArgs')\ndef _FakeQuantWithMinMaxArgsGradient(op: ops.Operation, grad):\n    return fake_quant_with_min_max_args_gradient(grad, op.inputs[0], min=op.get_attr('min'), max=op.get_attr('max'), num_bits=op.get_attr('num_bits'), narrow_range=op.get_attr('narrow_range'))",
    "docstring": "Gradient for FakeQuantWithMinMaxArgs op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:_FakeQuantWithMinMaxArgsGradient arg:op arg:grad arguments arg arg Return return:yes Call Call Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_forward_navigation_events",
    "source_code": "def get_forward_navigation_events(self):\n    return self._forward_navigation_events",
    "docstring": "Get how pan/zoom events are forwarded to Axes below this one.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:get_forward_navigation_events arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "most_specific_common_supertype",
    "source_code": "def most_specific_common_supertype(self, types: Sequence[trace.TraceType]) -> Optional['Dict']:\n    if not all((self._has_same_structure(other) for other in types)):\n        return None\n    new_mapping = {}\n    for key in self.mapping.keys():\n        common = self.mapping[key].most_specific_common_supertype([other.mapping[key] for other in types])\n        if common is None:\n            return None\n        else:\n            new_mapping[key] = common\n    return Dict(new_mapping, self._placeholder_type)",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\trace_type\\default_types.py",
    "ast_data": "FunctionDef name:most_specific_common_supertype arg:self arg:types arguments arg arg If Call Call Return return:no Assign For Call Assign Call If Compare Return return:no Assign Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "validate_args",
    "source_code": "def validate_args(fname, args, max_fname_arg_count, compat_args) -> None:\n    _check_arg_length(fname, args, max_fname_arg_count, compat_args)\n    kwargs = dict(zip(compat_args, args))\n    _check_for_default_values(fname, kwargs, compat_args)",
    "docstring": "Checks whether the length of the argument passed into a function has at most arguments and whether or not all of these elements in are set to their default values. Parameters ---------- fname : str The name of the function being passed the parameter args : tuple The parameter passed into a function max_fname_arg_count : int The maximum number of arguments that the function can accept, excluding those in . Used for displaying appropriate error messages. Must be non-negative. compat_args : dict A dictionary of keys and their associated default values. In order to accommodate buggy behaviour in some versions of , where a signature displayed keyword arguments but then passed those arguments **positionally** internally when calling downstream implementations, a dict ensures that the original order of the keyword arguments is enforced. Raises ------ TypeError If contains more values than there are ValueError If contains values that do not correspond to those of the default values specified in",
    "type": "function",
    "file_path": "pandas\\pandas\\util\\_validators.py",
    "ast_data": "FunctionDef name:validate_args arg:fname arg:args arg:max_fname_arg_count arg:compat_args arguments arg arg arg arg Call Assign Call Call Call"
  },
  {
    "library": "django",
    "name": "migration_name_fragment",
    "source_code": "@property\ndef migration_name_fragment(self):\n    return None",
    "docstring": "A filename part suitable for automatically naming a migration containing this operation, or None if not applicable.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\operations\\base.py",
    "ast_data": "FunctionDef name:migration_name_fragment arg:self arguments arg Return return:no"
  },
  {
    "library": "scipy",
    "name": "find_branch_point",
    "source_code": "def find_branch_point(branch):\n    branch_commits = rev_list('HEAD', 1000)\n    main_commits = set(rev_list(branch, 1000))\n    for branch_commit in branch_commits:\n        if branch_commit in main_commits:\n            return branch_commit\n    raise RuntimeError('Failed to find a common ancestor in the last 1000 commits')",
    "docstring": "Find when the current branch split off from the given branch. It is based off of this Stackoverflow post:",
    "type": "function",
    "file_path": "scipy\\tools\\lint.py",
    "ast_data": "FunctionDef name:find_branch_point arg:branch arguments arg Assign Call Assign Call Call For If Compare Return return:yes Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "minorticks_on",
    "source_code": "def minorticks_on(self):\n    scale = self.get_scale()\n    if scale == 'log':\n        s = self._scale\n        self.set_minor_locator(mticker.LogLocator(s.base, s.subs))\n    elif scale == 'symlog':\n        s = self._scale\n        self.set_minor_locator(mticker.SymmetricalLogLocator(s._transform, s.subs))\n    elif scale == 'asinh':\n        s = self._scale\n        self.set_minor_locator(mticker.AsinhLocator(s.linear_width, base=s._base, subs=s._subs))\n    elif scale == 'logit':\n        self.set_minor_locator(mticker.LogitLocator(minor=True))\n    else:\n        self.set_minor_locator(mticker.AutoMinorLocator())",
    "docstring": "Display default minor ticks on the Axis, depending on the scale (). Scales use specific minor locators: - log: - symlog: - asinh: - logit: - default: Displaying minor ticks may reduce performance; you may turn them off using if drawing speed is a problem.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:minorticks_on arg:self arguments arg Assign Call If Compare Assign Call Call If Compare Assign Call Call If Compare Assign Call Call If Compare Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "qualify_name",
    "source_code": "def qualify_name(self, name: str) -> str:\n    if self.name is not None:\n        return f'{self.name}_{name}'\n    return name",
    "docstring": "Prepend the given name with the graph name if any.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\graph.py",
    "ast_data": "FunctionDef name:qualify_name arg:self arg:name arguments arg arg If Compare Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_get_xdg_cache_dir",
    "source_code": "def _get_xdg_cache_dir():\n    return os.environ.get('XDG_CACHE_HOME') or str(Path.home() / '.cache')",
    "docstring": "Return the XDG cache directory, according to the XDG base directory spec:",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\__init__.py",
    "ast_data": "FunctionDef name:_get_xdg_cache_dir arguments Return return:yes BoolOp Call Call Call"
  },
  {
    "library": "django",
    "name": "AdminErrorList",
    "source_code": "class AdminErrorList(forms.utils.ErrorList):\n\n    def __init__(self, form, inline_formsets):\n        super().__init__()\n        if form.is_bound:\n            self.extend(form.errors.values())\n            for inline_formset in inline_formsets:\n                self.extend(inline_formset.non_form_errors())\n                for errors_in_inline_form in inline_formset.errors:\n                    self.extend(errors_in_inline_form.values())",
    "docstring": "Store errors for the form/formsets in an add/change view.",
    "type": "class",
    "file_path": "django\\django\\contrib\\admin\\helpers.py",
    "ast_data": "ClassDef name:AdminErrorList FunctionDef name:__init__ arg:self arg:form arg:inline_formsets arguments arg arg arg Call Call If Call Call For Call Call For Call Call"
  },
  {
    "library": "pytorch",
    "name": "_flatten_dense_tensors",
    "source_code": "def _flatten_dense_tensors(tensors):\n    return torch._C._nn.flatten_dense_tensors(tensors)",
    "docstring": "Flatten dense tensors into a contiguous 1D buffer. Assume tensors are of same dense type. Since inputs are dense, the resulting tensor will be a concatenated 1D buffer. Element-wise operation on this buffer will be equivalent to operating individually. Args: tensors (Iterable[Tensor]): dense tensors to flatten. Returns: A contiguous 1D buffer containing input tensors.",
    "type": "function",
    "file_path": "pytorch\\torch\\_utils.py",
    "ast_data": "FunctionDef name:_flatten_dense_tensors arg:tensors arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_alter_column_default_sql",
    "source_code": "def _alter_column_default_sql(self, model, old_field, new_field, drop=False):\n    new_default = self.effective_default(new_field)\n    default = self._column_default_sql(new_field)\n    params = [new_default]\n    if drop:\n        params = []\n    elif self.connection.features.requires_literal_defaults:\n        default = self.prepare_default(new_default)\n        params = []\n    new_db_params = new_field.db_parameters(connection=self.connection)\n    if drop:\n        if new_field.null:\n            sql = self.sql_alter_column_no_default_null\n        else:\n            sql = self.sql_alter_column_no_default\n    else:\n        sql = self.sql_alter_column_default\n    return (sql % {'column': self.quote_name(new_field.column), 'type': new_db_params['type'], 'default': default}, params)",
    "docstring": "Hook to specialize column default alteration. Return a (sql, params) fragment to add or drop (depending on the drop argument) a default to new_field's column.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\schema.py",
    "ast_data": "FunctionDef name:_alter_column_default_sql arg:self arg:model arg:old_field arg:new_field arg:drop arguments arg arg arg arg arg Assign Call Assign Call Assign If Assign If Assign Call Assign Assign Call If If Assign Assign Assign Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "polygamma",
    "source_code": "def polygamma(n, x):\n    n, x = (asarray(n), asarray(x))\n    fac2 = (-1.0) ** (n + 1) * gamma(n + 1.0) * zeta(n + 1, x)\n    return where(n == 0, psi(x), fac2)",
    "docstring": "Polygamma functions. Defined as :math: where :math: is the function. See [dlmf]_ for details. Parameters ---------- n : array_like The order of the derivative of the digamma function; must be integral x : array_like Real valued input Returns ------- ndarray Function results See Also -------- digamma References ---------- .. [dlmf] NIST, Digital Library of Mathematical Functions, Examples -------- >>> from scipy import special >>> x = [2, 3, 25.5] >>> special.polygamma(1, x) array([ 0.64493407, 0.39493407, 0.03999467]) >>> special.polygamma(0, x) == special.psi(x) array([ True, True, True], dtype=bool)",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_basic.py",
    "ast_data": "FunctionDef name:polygamma arg:n arg:x arguments arg arg Assign Call Call Assign Call Call Return return:yes Call Compare Call"
  },
  {
    "library": "numpy",
    "name": "IntelCCompiler",
    "source_code": "class IntelCCompiler(UnixCCompiler):\n    compiler_type = 'intel'\n    cc_exe = 'icc'\n    cc_args = 'fPIC'\n\n    def __init__(self, verbose=0, dry_run=0, force=0):\n        UnixCCompiler.__init__(self, verbose, dry_run, force)\n        v = self.get_version()\n        mpopt = 'openmp' if v and v < '15' else 'qopenmp'\n        self.cc_exe = 'icc -fPIC -fp-model strict -O3 -fomit-frame-pointer -{}'.format(mpopt)\n        compiler = self.cc_exe\n        if platform.system() == 'Darwin':\n            shared_flag = '-Wl,-undefined,dynamic_lookup'\n        else:\n            shared_flag = '-shared'\n        self.set_executables(compiler=compiler, compiler_so=compiler, compiler_cxx=compiler, archiver='xiar' + ' cru', linker_exe=compiler + ' -shared-intel', linker_so=compiler + ' ' + shared_flag + ' -shared-intel')",
    "docstring": "A modified Intel compiler compatible with a GCC-built Python.",
    "type": "class",
    "file_path": "numpy\\numpy\\distutils\\intelccompiler.py",
    "ast_data": "ClassDef name:IntelCCompiler Assign Assign Assign FunctionDef name:__init__ arg:self arg:verbose arg:dry_run arg:force arguments arg arg arg arg Call Assign Call Assign BoolOp Compare Assign Call Assign If Compare Call Assign Assign Call"
  },
  {
    "library": "pytorch",
    "name": "size",
    "source_code": "def size(self, node: IRNode, start_index: int, end_index: Optional[int]=None, default_value: int=0) -> str:\n    if node is None:\n        return str(default_value)\n    start_index = _normalize_idx(start_index, len(node.get_size()))\n    if end_index is None:\n        end_index = start_index\n    end_index = _normalize_idx(end_index, len(node.get_size()))\n    sizes = [self.find_symbol(node, 'size', dim=i) or node.get_size()[i] for i in range(start_index, end_index + 1)]\n    if len(sizes) == 0:\n        return str(default_value)\n    sizes = [symbols(v) if isinstance(v, str) else v for v in sizes]\n    val = sympy_product(sizes)\n    return val",
    "docstring": "Hook called from template code to get the size of an arg. Generates code which represents size of a given node in [start_index, end_index). If node is None, returns default_value. TODO: Will add needed args to pass it in if it is dynamic.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\cuda_kernel.py",
    "ast_data": "FunctionDef name:size arg:self arg:node arg:start_index arg:end_index arg:default_value arguments arg arg arg arg arg If Compare Return return:yes Call Assign Call Call Call If Compare Assign Assign Call Call Call Assign BoolOp Call Call Call If Compare Call Return return:yes Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ROCmGemmConfig",
    "source_code": "@dataclasses.dataclass\nclass ROCmGemmConfig(GemmConfig):\n    matrix_instr_nonkdim: int = 16\n    waves_per_eu: int = 0\n    kpack: int = 2",
    "docstring": "ROCm subclass for GEMMs, with AMD backend specific tuneable kernargs",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\template_heuristics.py",
    "ast_data": "ClassDef name:ROCmGemmConfig"
  },
  {
    "library": "scikit-learn",
    "name": "ensure_common_namespace_device",
    "source_code": "def ensure_common_namespace_device(reference, *arrays):\n    xp, is_array_api = get_namespace(reference)\n    if is_array_api:\n        device_ = device(reference)\n        return [xp.asarray(a, device=device_) for a in arrays]\n    else:\n        return arrays",
    "docstring": "Ensure that all arrays use the same namespace and device as reference. If necessary the arrays are moved to the same namespace and device as the reference array. Parameters ---------- reference : array Reference array. *arrays : array Arrays to check. Returns ------- arrays : list Arrays with the same namespace and device as reference.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_array_api.py",
    "ast_data": "FunctionDef name:ensure_common_namespace_device arg:reference arguments arg arg Assign Call If Assign Call Return return:yes Call Return return:yes"
  },
  {
    "library": "django",
    "name": "__init__",
    "source_code": "def __init__(self, children=None, connector=None, negated=False):\n    self.children = children[:] if children else []\n    self.connector = connector or self.default\n    self.negated = negated",
    "docstring": "Construct a new Node. If no connector is given, use the default.",
    "type": "method",
    "file_path": "django\\django\\utils\\tree.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:children arg:connector arg:negated arguments arg arg arg arg Assign Assign BoolOp Assign"
  },
  {
    "library": "tensorflow",
    "name": "_extract_graph_summary",
    "source_code": "def _extract_graph_summary(graph_def):\n    name_to_input_name = {}\n    name_to_node = {}\n    name_to_seq_num = {}\n    seq = 0\n    for node in graph_def.node:\n        n = _node_name(node.name)\n        name_to_node[n] = node\n        name_to_input_name[n] = [_node_name(x) for x in node.input]\n        if '_class' in node.attr:\n            for colocated_node_name in node.attr['_class'].list.s:\n                name_to_input_name[n].append(_get_colocated_node_name(colocated_node_name))\n        name_to_seq_num[n] = seq\n        seq += 1\n    return (name_to_input_name, name_to_node, name_to_seq_num)",
    "docstring": "Extracts useful information from the graph and returns them.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\graph_util_impl.py",
    "ast_data": "FunctionDef name:_extract_graph_summary arg:graph_def arguments arg Assign Assign Assign Assign For Assign Call Assign Assign Call If Compare For Call Call Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "to",
    "source_code": "def to(self, device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None) -> Boxes3D:\n    if dtype is not None and (not _is_floating_point_dtype(dtype)):\n        raise ValueError('Boxes must be in floating point')\n    self._data = self._data.to(device=device, dtype=dtype)\n    return self",
    "docstring": "Like :func: method.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\boxes.py",
    "ast_data": "FunctionDef name:to arg:self arg:device arg:dtype arguments arg arg arg If BoolOp Compare Call Raise Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_init_freq_conv_axes",
    "source_code": "def _init_freq_conv_axes(in1, in2, mode, axes, sorted_axes=False):\n    s1 = in1.shape\n    s2 = in2.shape\n    noaxes = axes is None\n    _, axes = _init_nd_shape_and_axes(in1, shape=None, axes=axes)\n    if not noaxes and (not len(axes)):\n        raise ValueError('when provided, axes cannot be empty')\n    axes = [a for a in axes if s1[a] != 1 and s2[a] != 1]\n    if sorted_axes:\n        axes.sort()\n    if not all((s1[a] == s2[a] or s1[a] == 1 or s2[a] == 1 for a in range(in1.ndim) if a not in axes)):\n        raise ValueError(f'incompatible shapes for in1 and in2: {s1} and {s2}')\n    if _inputs_swap_needed(mode, s1, s2, axes=axes):\n        in1, in2 = (in2, in1)\n    return (in1, in2, axes)",
    "docstring": "Handle the axes argument for frequency-domain convolution. Returns the inputs and axes in a standard form, eliminating redundant axes, swapping the inputs if necessary, and checking for various potential errors. Parameters ---------- in1 : array First input. in2 : array Second input. mode : str {'full', 'valid', 'same'}, optional A string indicating the size of the output. See the documentation for more information. axes : list of ints Axes over which to compute the FFTs. sorted_axes : bool, optional If , sort the axes. Default is , do not sort. Returns ------- in1 : array The first input, possible swapped with the second input. in2 : array The second input, possible swapped with the first input. axes : list of ints Axes over which to compute the FFTs.",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_signaltools.py",
    "ast_data": "FunctionDef name:_init_freq_conv_axes arg:in1 arg:in2 arg:mode arg:axes arg:sorted_axes arguments arg arg arg arg arg Assign Assign Assign Compare Assign Call If BoolOp Call Raise Call Assign BoolOp Compare Compare If Call If Call BoolOp Compare Compare Compare Call Compare Raise Call If Call Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_pow1pm1",
    "source_code": "def _pow1pm1(x, y):\n    return np.expm1(sc.xlog1py(y, x))",
    "docstring": "Compute (1 + x)**y - 1. Uses expm1 and xlog1py to avoid loss of precision when (1 + x)**y is close to 1. Note that the inverse of this function with respect to x is ``. That is, if t = _pow1pm1(x, y) then x = _pow1pm1(t, 1/y)",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_continuous_distns.py",
    "ast_data": "FunctionDef name:_pow1pm1 arg:x arg:y arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "__reduce__",
    "source_code": "def __reduce__(self):\n    dict_without_graph = self.__dict__.copy()\n    python_code = self.recompile()\n    import_block = _format_import_block(python_code.globals, sys_importer)\n    del dict_without_graph['_graph']\n    return (reduce_graph_module, (dict_without_graph, import_block))",
    "docstring": "Serialization of GraphModule. We serialize only the generated code, not the underlying ``",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\graph_module.py",
    "ast_data": "FunctionDef name:__reduce__ arg:self arguments arg Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "_get_current_year",
    "source_code": "def _get_current_year(self, date):\n    return date.replace(month=1, day=1)",
    "docstring": "Return the start date of the current interval.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\dates.py",
    "ast_data": "FunctionDef name:_get_current_year arg:self arg:date arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_angle",
    "source_code": "def get_angle(self):\n    return self._header[b'ItalicAngle']",
    "docstring": "Return the fontangle as float.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_afm.py",
    "ast_data": "FunctionDef name:get_angle arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__call__",
    "source_code": "def __call__(self):\n    return list(self)",
    "docstring": "Return a list of the registered colormap names. This exists only for backward-compatibility in which had a ``.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\cm.py",
    "ast_data": "FunctionDef name:__call__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "NullFormatter",
    "source_code": "class NullFormatter(Formatter):\n\n    def __call__(self, x, pos=None):\n        return ''",
    "docstring": "Always return the empty string.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "ClassDef name:NullFormatter FunctionDef name:__call__ arg:self arg:x arg:pos arguments arg arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "triplet_margin_loss",
    "source_code": "def triplet_margin_loss(anchor: Tensor, positive: Tensor, negative: Tensor, margin: float=1.0, p: float=2, eps: float=1e-06, swap: bool=False, size_average: Optional[bool]=None, reduce: Optional[bool]=None, reduction: str='mean') -> Tensor:\n    if has_torch_function_variadic(anchor, positive, negative):\n        return handle_torch_function(triplet_margin_loss, (anchor, positive, negative), anchor, positive, negative, margin=margin, p=p, eps=eps, swap=swap, size_average=size_average, reduce=reduce, reduction=reduction)\n    if size_average is not None or reduce is not None:\n        reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)\n    else:\n        reduction_enum = _Reduction.get_enum(reduction)\n    if margin <= 0:\n        raise ValueError(f'margin must be greater than 0, got {margin}')\n    return torch.triplet_margin_loss(anchor, positive, negative, margin, p, eps, swap, reduction_enum)",
    "docstring": "Compute the triplet loss between given input tensors and a margin greater than 0. See :class: for details.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:triplet_margin_loss arg:anchor arg:positive arg:negative arg:margin arg:p arg:eps arg:swap arg:size_average arg:reduce arg:reduction arguments arg arg arg arg arg arg arg arg arg arg If Call Return return:yes Call If BoolOp Compare Compare Assign Call Assign Call If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_validate_dtype",
    "source_code": "@final\n@classmethod\ndef _validate_dtype(cls, dtype) -> DtypeObj | None:\n    if dtype is not None:\n        dtype = pandas_dtype(dtype)\n        if dtype.kind == 'V':\n            raise NotImplementedError(f'compound dtypes are not implemented in the {cls.__name__} constructor')\n    return dtype",
    "docstring": "validate the passed dtype",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:_validate_dtype arg:cls arg:dtype arguments arg arg If Compare Assign Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_GetInputDtypes",
    "source_code": "def _GetInputDtypes(func):\n    if isinstance(func, function._DefinedFunction):\n        return func.declared_input_types\n    num_non_captured_inputs = len(func.inputs) - len(func.captured_inputs)\n    inputs_without_captured = func.inputs[:num_non_captured_inputs]\n    return [t.dtype for t in inputs_without_captured]",
    "docstring": "Returns the input dtypes of func, excluding dtypes for captured inputs.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\functional_ops.py",
    "ast_data": "FunctionDef name:_GetInputDtypes arg:func arguments arg If Call Return return:yes Assign Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "has_tensor_arg",
    "source_code": "def has_tensor_arg(schema: _C.FunctionSchema) -> bool:\n    return any((is_tensor_like_type(a.type) or is_tensorlist_like_type(a.type) for a in schema.arguments))",
    "docstring": "Given a schema, returns True if the schema has a Tensor arg. A Tensor arg is any arg with a type annotation that might involve Tensor.",
    "type": "function",
    "file_path": "pytorch\\torch\\_library\\utils.py",
    "ast_data": "FunctionDef name:has_tensor_arg arg:schema arguments arg Return return:yes Call BoolOp Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_model_bytecode_version",
    "source_code": "def _get_model_bytecode_version(f_input) -> int:\n    if isinstance(f_input, (str, os.PathLike)):\n        if not os.path.exists(f_input):\n            raise ValueError(f'The provided filename {f_input} does not exist')\n        if os.path.isdir(f_input):\n            raise ValueError(f'The provided filename {f_input} is a directory')\n    if isinstance(f_input, (str, os.PathLike)):\n        return torch._C._get_model_bytecode_version(os.fspath(f_input))\n    else:\n        return torch._C._get_model_bytecode_version_from_buffer(f_input.read())",
    "docstring": "Take a file-like object to return an integer. Args: f_input: a file-like object (has to implement read, readline, tell, and seek), or a string containing a file name Returns: version: An integer. If the integer is -1, the version is invalid. A warning will show in the log. Example: .. testcode:: from torch.jit.mobile import _get_model_bytecode_version # Get bytecode version from a saved file path version = _get_model_bytecode_version(\"path/to/model.ptl\")",
    "type": "function",
    "file_path": "pytorch\\torch\\jit\\mobile\\__init__.py",
    "ast_data": "FunctionDef name:_get_model_bytecode_version arg:f_input arguments arg If Call If Call Raise Call If Call Raise Call If Call Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "concentration0",
    "source_code": "@property\ndef concentration0(self):\n    return self._concentration0",
    "docstring": "Concentration parameter associated with a outcome.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\beta.py",
    "ast_data": "FunctionDef name:concentration0 arg:self arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_getsubdtype",
    "source_code": "@classmethod\ndef _getsubdtype(cls, val):\n    return np.array(val).dtype.type",
    "docstring": "Returns the type of the dtype of the input variable.",
    "type": "method",
    "file_path": "numpy\\numpy\\lib\\_iotools.py",
    "ast_data": "FunctionDef name:_getsubdtype arg:cls arg:val arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "__iter__",
    "source_code": "def __iter__(self):\n    while self._read():\n        yield self._output()",
    "docstring": "Iterate through the pages of the file. Yields ------ Page Details of all the text and box objects on the page. The Page tuple contains lists of Text and Box tuples and the page dimensions, and the Text and Box tuples contain coordinates transformed into a standard Cartesian coordinate system at the dpi value given when initializing. The coordinates are floating point numbers, but otherwise precision is not lost and coordinate values are not clipped to integers.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\dviread.py",
    "ast_data": "FunctionDef name:__iter__ arg:self arguments arg While Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_weight",
    "source_code": "def get_weight(self):\n    return self._header[b'Weight']",
    "docstring": "Return the font weight, e.g., 'Bold' or 'Roman'.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_afm.py",
    "ast_data": "FunctionDef name:get_weight arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "run_b",
    "source_code": "def run_b(self, mod: torch.fx.GraphModule, inputs: Tensors, report_idx: int=-1) -> TensorOrTensors:\n    raise RuntimeError('run_b() is not implemented.')",
    "docstring": "Run with and generate output. The output will be compared with output of run_a().",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\passes\\net_min_base.py",
    "ast_data": "FunctionDef name:run_b arg:self arg:mod arg:inputs arg:report_idx arguments arg arg arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "verify_tensor_all_finite",
    "source_code": "@tf_export(v1=['debugging.assert_all_finite', 'verify_tensor_all_finite'])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints('verify_tensor_all_finite')\ndef verify_tensor_all_finite(t=None, msg=None, name=None, x=None, message=None):\n    x = deprecation.deprecated_argument_lookup('x', x, 't', t)\n    message = deprecation.deprecated_argument_lookup('message', message, 'msg', msg)\n    return verify_tensor_all_finite_v2(x, message, name)",
    "docstring": "Assert that the tensor does not contain any NaN's or Inf's. Args: t: Tensor to check. msg: Message to log on failure. name: A name for this operation (optional). x: Alias for t. message: Alias for msg. Returns: Same tensor as .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numerics.py",
    "ast_data": "FunctionDef name:verify_tensor_all_finite arg:t arg:msg arg:name arg:x arg:message arguments arg arg arg arg arg Assign Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_parse_kern_pairs",
    "source_code": "def _parse_kern_pairs(fh):\n    line = next(fh)\n    if not line.startswith(b'StartKernPairs'):\n        raise RuntimeError('Bad start of kern pairs data: %s' % line)\n    d = {}\n    for line in fh:\n        line = line.rstrip()\n        if not line:\n            continue\n        if line.startswith(b'EndKernPairs'):\n            next(fh)\n            return d\n        vals = line.split()\n        if len(vals) != 4 or vals[0] != b'KPX':\n            raise RuntimeError('Bad kern pairs line: %s' % line)\n        c1, c2, val = (_to_str(vals[1]), _to_str(vals[2]), _to_float(vals[3]))\n        d[c1, c2] = val\n    raise RuntimeError('Bad kern pairs parse')",
    "docstring": "Return a kern pairs dictionary; keys are (*char1*, *char2*) tuples and values are the kern pair value. For example, a kern pairs line like `` will be represented as:: d[ ('A', 'y') ] = -50",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\_afm.py",
    "ast_data": "FunctionDef name:_parse_kern_pairs arg:fh arguments arg Assign Call If Call Raise Call Assign For Assign Call If If Call Call Return return:yes Assign Call If BoolOp Compare Call Compare Raise Call Assign Call Call Call Assign Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_reset_config",
    "source_code": "def _reset_config(self):\n    pass",
    "docstring": "Resets the configuration in the column. Some feature columns e.g. embedding or shared embedding columns might have some state that is needed to be reset sometimes. Use this method in that scenario.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py",
    "ast_data": "FunctionDef name:_reset_config arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "ObservationType",
    "source_code": "class ObservationType(Enum):\n    OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT = 0\n    'this means input and output are observed with different observers, based\\n    on qconfig.activation\\n    example: conv, linear, softmax\\n    '\n    OUTPUT_SHARE_OBSERVER_WITH_INPUT = 1\n    'this means the output will use the same observer instance as input, based\\n    on qconfig.activation\\n    example: torch.cat, maxpool\\n    '\n    INPUT_OUTPUT_NOT_OBSERVED = 2\n    'this means the input and output are never observed\\n    example: x.shape, x.size\\n    '",
    "docstring": "An enum that represents different ways of how an operator/operator pattern should be observed",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\backend_config.py",
    "ast_data": "ClassDef name:ObservationType Assign Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "set_fontfamily",
    "source_code": "def set_fontfamily(self, fontname):\n    self._fontproperties.set_family(fontname)\n    self.stale = True",
    "docstring": "Set the font family. Can be either a single string, or a list of strings in decreasing priority. Each string may be either a real font name or a generic font class name. If the latter, the specific font names will be looked up in the corresponding rcParams. If a instance is constructed with `font.familyset_fontfamily()Text` instance. Parameters ---------- fontname : {FONTNAME, 'serif', 'sans-serif', 'cursive', 'fantasy', 'monospace'} See Also -------- .font_manager.FontProperties.set_family",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:set_fontfamily arg:self arg:fontname arguments arg arg Call Assign"
  },
  {
    "library": "pytorch",
    "name": "_backport_for_mobile_to_buffer",
    "source_code": "def _backport_for_mobile_to_buffer(f_input, to_version):\n    if isinstance(f_input, (str, os.PathLike)):\n        if not os.path.exists(f_input):\n            raise ValueError(f'The provided filename {f_input} does not exist')\n        if os.path.isdir(f_input):\n            raise ValueError(f'The provided filename {f_input} is a directory')\n    if isinstance(f_input, (str, os.PathLike)):\n        return torch._C._backport_for_mobile_to_buffer(os.fspath(f_input), to_version)\n    else:\n        return torch._C._backport_for_mobile_from_buffer_to_buffer(f_input.read(), to_version)",
    "docstring": "Take a string containing a file name (file-like object). Args: f_input: a file-like object (has to implement read, readline, tell, and seek), or a string containing a file name",
    "type": "function",
    "file_path": "pytorch\\torch\\jit\\mobile\\__init__.py",
    "ast_data": "FunctionDef name:_backport_for_mobile_to_buffer arg:f_input arg:to_version arguments arg arg If Call If Call Raise Call If Call Raise Call If Call Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "authlib",
    "name": "OAuth1Auth",
    "source_code": "class OAuth1Auth(AuthBase, ClientAuth):\n\n    def __call__(self, req):\n        url, headers, body = self.prepare(req.method, req.url, req.headers, req.body)\n        req.url = to_native(url)\n        req.prepare_headers(headers)\n        if body:\n            req.body = body\n        return req",
    "docstring": "Signs the request using OAuth 1 (RFC5849).",
    "type": "class",
    "file_path": "authlib\\authlib\\integrations\\requests_client\\oauth1_session.py",
    "ast_data": "ClassDef name:OAuth1Auth FunctionDef name:__call__ arg:self arg:req arguments arg arg Assign Call Assign Call Call If Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "GlobalStepWaiterHook",
    "source_code": "@tf_export(v1=['train.GlobalStepWaiterHook'])\nclass GlobalStepWaiterHook(session_run_hook.SessionRunHook):\n\n    def __init__(self, wait_until_step):\n        self._wait_until_step = wait_until_step\n\n    def begin(self):\n        self._worker_is_started = False\n        self._global_step_tensor = training_util._get_or_create_global_step_read()\n        if self._global_step_tensor is None:\n            raise RuntimeError('Global step should be created to use _GlobalStepWaiterHook.')\n\n    def before_run(self, run_context):\n        if self._worker_is_started:\n            return None\n        if self._wait_until_step <= 0:\n            self._worker_is_started = True\n            return None\n        logging.info('Waiting for global step %d before starting training.', self._wait_until_step)\n        last_logged_step = 0\n        while True:\n            current_step = run_context.session.run(self._global_step_tensor)\n            if current_step >= self._wait_until_step:\n                self._worker_is_started = True\n                return None\n            if current_step - last_logged_step > 1000:\n                logging.info('Waiting for global step %d before starting training. Current step is %d.', self._wait_until_step, current_step)\n                last_logged_step = current_step\n            time.sleep(0.5)",
    "docstring": "Delays execution until global step reaches . This hook delays execution until global step reaches to . It is used to gradually start workers in distributed settings. One example usage would be setting assuming that task_id=0 is the chief.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\training\\basic_session_run_hooks.py",
    "ast_data": "ClassDef name:GlobalStepWaiterHook FunctionDef name:__init__ arg:self arg:wait_until_step arguments arg arg Assign FunctionDef name:begin arg:self arguments arg Assign Assign Call If Compare Raise Call FunctionDef name:before_run arg:self arg:run_context arguments arg arg If Return return:no If Compare Assign Return return:no Call Assign While Assign Call If Compare Assign Return return:no If Compare Call Assign Call Call"
  },
  {
    "library": "matplotlib",
    "name": "long_axis",
    "source_code": "@property\ndef long_axis(self):\n    if self.orientation == 'vertical':\n        return self.ax.yaxis\n    return self.ax.xaxis",
    "docstring": "Axis that has decorations (ticks, etc) on it.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colorbar.py",
    "ast_data": "FunctionDef name:long_axis arg:self arguments arg If Compare Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "kml",
    "source_code": "@property\ndef kml(self):\n    return '<MultiGeometry>%s</MultiGeometry>' % ''.join((g.kml for g in self))",
    "docstring": "Return the KML for this Geometry Collection.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\collections.py",
    "ast_data": "FunctionDef name:kml arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_should_act_as_resource_variable",
    "source_code": "def _should_act_as_resource_variable(self):\n    pass",
    "docstring": "Pass resource_variable_ops.is_resource_variable check.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\ps_values.py",
    "ast_data": "FunctionDef name:_should_act_as_resource_variable arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "get_top_partitions",
    "source_code": "def get_top_partitions(partitions: list[Partition]) -> list[Partition]:\n    top_partitions = [partition for partition in partitions if len(partition.parents) == 0]\n    return top_partitions",
    "docstring": "This function is to return all the partitions without parents as the starting points of all the paths",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\partitioner_utils.py",
    "ast_data": "FunctionDef name:get_top_partitions arg:partitions arguments arg Assign Compare Call Return return:yes"
  },
  {
    "library": "django",
    "name": "spatialite_version_tuple",
    "source_code": "def spatialite_version_tuple(self):\n    version = self.spatialite_version()\n    return (version, *get_version_tuple(version))",
    "docstring": "Return the SpatiaLite version as a tuple (version string, major, minor, subminor).",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\spatialite\\operations.py",
    "ast_data": "FunctionDef name:spatialite_version_tuple arg:self arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "read",
    "source_code": "def read(self, index, name=None):\n    return self._implementation.read(index, name=name)",
    "docstring": "Read the value at location in the TensorArray. Args: index: 0-D. int32 tensor with the index to read from. name: A name for the operation (optional). Returns: The tensor at index .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:read arg:self arg:index arg:name arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_unflatten_as_params",
    "source_code": "@contextlib.contextmanager\ndef _unflatten_as_params(state: _FSDPState, module: nn.Module) -> Generator:\n    handle = _module_handle(state, module)\n    if not handle:\n        yield\n    else:\n        _deregister_flat_param(state, module)\n        try:\n            with handle.unflatten_as_params():\n                yield\n        finally:\n            if not handle._use_orig_params:\n                _register_flat_param(state, module)",
    "docstring": "Assumes that the flattened parameter is unsharded. When in the context, de-registers the flattened parameter and unflattens the original parameters as `` views into the flattened parameter.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_unshard_param_utils.py",
    "ast_data": "FunctionDef name:_unflatten_as_params arg:state arg:module arguments arg arg Assign Call If Call Try With Call If Call"
  },
  {
    "library": "scipy",
    "name": "_matmul_sparse",
    "source_code": "def _matmul_sparse(self, other):\n    if self.ndim < 3 and other.ndim < 3:\n        return _spbase._matmul_sparse(self, other)\n    self_shape = self.shape\n    other_shape = other.shape\n    broadcast_shape = np.broadcast_shapes(self_shape[:-2], other_shape[:-2])\n    self_new_shape = tuple(broadcast_shape) + self_shape[-2:]\n    other_new_shape = tuple(broadcast_shape) + other_shape[-2:]\n    self_broadcasted = self._broadcast_to(self_new_shape)\n    other_broadcasted = other._broadcast_to(other_new_shape)\n    self_block_diag = _block_diag(self_broadcasted)\n    other_block_diag = _block_diag(other_broadcasted)\n    prod_block_diag = (self_block_diag @ other_block_diag).tocoo()\n    return _extract_block_diag(prod_block_diag, shape=(*broadcast_shape, self.shape[-2], other.shape[-1]))",
    "docstring": "Perform sparse-sparse matrix multiplication for two n-D COO arrays. The method converts input n-D arrays to 2-D block array format, uses csr_matmat to multiply them, and then converts the result back to n-D COO array. Parameters: self (COO): The first n-D sparse array in COO format. other (COO): The second n-D sparse array in COO format. Returns: prod (COO): The resulting n-D sparse array after multiplication.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_coo.py",
    "ast_data": "FunctionDef name:_matmul_sparse arg:self arg:other arguments arg arg If BoolOp Compare Compare Return return:yes Call Assign Assign Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "FixedRule",
    "source_code": "class FixedRule(Rule):\n\n    def __init__(self):\n        self.xp = None\n\n    @property\n    def nodes_and_weights(self):\n        raise NotImplementedError\n\n    def estimate(self, f, a, b, args=()):\n        nodes, weights = self.nodes_and_weights\n        if self.xp is None:\n            self.xp = array_namespace(nodes)\n        return _apply_fixed_rule(f, a, b, nodes, weights, args, self.xp)",
    "docstring": "A rule implemented as the weighted sum of function evaluations at fixed nodes. Attributes ---------- nodes_and_weights : (ndarray, ndarray) A tuple `[-1, 1]^n`. See Also -------- GaussLegendreQuadrature, GaussKronrodQuadrature, GenzMalikCubature Examples -------- Implementing Simpson's 1/3 rule: >>> import numpy as np >>> from scipy.integrate._rules import FixedRule >>> class SimpsonsQuad(FixedRule): ... @property ... def nodes_and_weights(self): ... nodes = np.array([-1, 0, 1]) ... weights = np.array([1/3, 4/3, 1/3]) ... return (nodes, weights) >>> rule = SimpsonsQuad() >>> rule.estimate( ... f=lambda x: x**2, ... a=np.array([0]), ... b=np.array([1]), ... ) [0.3333333]",
    "type": "class",
    "file_path": "scipy\\scipy\\integrate\\_rules\\_base.py",
    "ast_data": "ClassDef name:FixedRule FunctionDef name:__init__ arg:self arguments arg Assign FunctionDef name:nodes_and_weights arg:self arguments arg Raise FunctionDef name:estimate arg:self arg:f arg:a arg:b arg:args arguments arg arg arg arg arg Assign If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "create_variable",
    "source_code": "def create_variable(self, feature_column, name, shape, dtype=None, trainable=True, use_resource=True, initializer=None):\n    del feature_column, name, shape, dtype, trainable, use_resource, initializer\n    raise NotImplementedError('StateManager.create_variable')",
    "docstring": "Creates a new variable. Args: feature_column: A object this variable corresponds to. name: variable name. shape: variable shape. dtype: The type of the variable. Defaults to or . trainable: Whether this variable is trainable or not. use_resource: If true, we use resource variables. Otherwise we use RefVariable. initializer: initializer instance (callable). Returns: The created variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:create_variable arg:self arg:feature_column arg:name arg:shape arg:dtype arg:trainable arg:use_resource arg:initializer arguments arg arg arg arg arg arg arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_propagate_tensor_meta_non_cached",
    "source_code": "def _propagate_tensor_meta_non_cached(self, op_schema: OpSchema) -> Union[None, TensorMeta, Sequence[Optional[TensorMeta]]]:\n    if op_schema.op == aten.equal.default:\n        return None\n    with FakeTensorMode():\n        fake_args = op_schema.gen_fake_args()\n        fake_kwargs = op_schema.gen_fake_kwargs()\n        fake_out = op_schema.op(*fake_args, **fake_kwargs)\n    if isinstance(fake_out, torch.Tensor):\n        return TensorMeta(shape=fake_out.shape, stride=fake_out.stride(), dtype=fake_out.dtype)\n    elif isinstance(fake_out, (tuple, list)):\n        tensor_meta_list: list[Optional[TensorMeta]] = []\n        for fake_out_item in fake_out:\n            if isinstance(fake_out_item, torch.Tensor):\n                tensor_meta_list.append(TensorMeta(shape=fake_out_item.shape, stride=fake_out_item.stride(), dtype=fake_out_item.dtype))\n            else:\n                tensor_meta_list.append(None)\n        return tuple(tensor_meta_list) if isinstance(fake_out, tuple) else tensor_meta_list\n    else:\n        return None",
    "docstring": "Propagate the tensor metadata, it could either return a TensorMeta or a list/tuple of TensorMetas",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_sharding_prop.py",
    "ast_data": "FunctionDef name:_propagate_tensor_meta_non_cached arg:self arg:op_schema arguments arg arg If Compare Return return:no With Call Assign Call Assign Call Assign Call If Call Return return:yes Call Call If Call For If Call Call Call Call Call Return return:yes Call Call Return return:no"
  },
  {
    "library": "cherrypy",
    "name": "read",
    "source_code": "def read(self, filenames):\n    if isinstance(filenames, text_or_bytes):\n        filenames = [filenames]\n    for filename in filenames:\n        with open(filename) as fp:\n            self._read(fp, filename)",
    "docstring": "Read the config from files on disk.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\reprconf.py",
    "ast_data": "FunctionDef name:read arg:self arg:filenames arguments arg arg If Call Assign For With Call Call"
  },
  {
    "library": "matplotlib",
    "name": "__call__",
    "source_code": "def __call__(self, x, pos=None):\n    return _UnicodeMinusFormat().format(self.fmt, x=x, pos=pos)",
    "docstring": "Return the formatted label string. *x* and *pos* are passed to as keyword arguments with those exact names.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:x arg:pos arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_is_connected",
    "source_code": "def _is_connected(source: torch.fx.Node, dest: torch.fx.Node) -> bool:\n    quant_workflow_ops = _QUANTIZE_OPS + _DEQUANTIZE_OPS\n    quant_workflow_ops.append(torch.ops.quantized_decomposed.choose_qparams.tensor)\n    while dest.target in quant_workflow_ops:\n        if not isinstance(dest.args[0], torch.fx.Node):\n            raise ValueError(f'expected arg[0] of quant workflow ops to be a node but found {dest.args[0]}')\n        dest = dest.args[0]\n    return dest == source",
    "docstring": "Assuming dest is one of the ops inserted by quant workflow, this function finds if source and dest are connected. Assumption is that only quant workflow inserted ops exist between source and dest",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\utils.py",
    "ast_data": "FunctionDef name:_is_connected arg:source arg:dest arguments arg arg Assign Call While Compare If Call Raise Call Assign Return return:yes Compare"
  },
  {
    "library": "scikit-learn",
    "name": "correct_covariance",
    "source_code": "def correct_covariance(self, data):\n    n_samples = len(self.dist_)\n    n_support = np.sum(self.support_)\n    if n_support < n_samples and np.allclose(self.raw_covariance_, 0):\n        raise ValueError('The covariance matrix of the support data is equal to 0, try to increase support_fraction')\n    correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)\n    covariance_corrected = self.raw_covariance_ * correction\n    self.dist_ /= correction\n    return covariance_corrected",
    "docstring": "Apply a correction to raw Minimum Covariance Determinant estimates. Correction using the empirical correction factor suggested by Rousseeuw and Van Driessen in [RVD]_. Parameters ---------- data : array-like of shape (n_samples, n_features) The data matrix, with p features and n samples. The data set must be the one which was used to compute the raw estimates. Returns ------- covariance_corrected : ndarray of shape (n_features, n_features) Corrected robust covariance estimate. References ---------- .. [RVD] A Fast Algorithm for the Minimum Covariance Determinant Estimator, 1999, American Statistical Association and the American Society for Quality, TECHNOMETRICS",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\covariance\\_robust_covariance.py",
    "ast_data": "FunctionDef name:correct_covariance arg:self arg:data arguments arg arg Assign Call Assign Call If BoolOp Compare Call Raise Call Assign Call Call Call Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "read",
    "source_code": "def read(self, source, *, spmatrix=True):\n    stream, close_it = self._open(source)\n    try:\n        self._parse_header(stream)\n        data = self._parse_body(stream)\n    finally:\n        if close_it:\n            stream.close()\n    if spmatrix and isinstance(data, coo_array):\n        data = coo_matrix(data)\n    return data",
    "docstring": "Reads the contents of a Matrix Market file-like 'source' into a matrix. Parameters ---------- source : str or file-like Matrix Market filename (extensions .mtx, .mtz.gz) or open file object. spmatrix : bool, optional (default: True) If ``. Returns ------- a : ndarray or coo_array or coo_matrix Dense or sparse array depending on the matrix format in the Matrix Market file.",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\_mmio.py",
    "ast_data": "FunctionDef name:read arg:self arg:source arguments arg arg arg Assign Call Try Call Assign Call If Call If BoolOp Call Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_get_fieldspec",
    "source_code": "def _get_fieldspec(dtype):\n    if dtype.names is None:\n        return [('', dtype)]\n    else:\n        fields = ((name, dtype.fields[name]) for name in dtype.names)\n        return [(name if len(f) == 2 else (f[2], name), f[0]) for name, f in fields]",
    "docstring": "Produce a list of name/dtype pairs corresponding to the dtype fields Similar to dtype.descr, but the second item of each tuple is a dtype, not a string. As a result, this handles subarray dtypes Can be passed to the dtype constructor to reconstruct the dtype, noting that this (deliberately) discards field offsets. Examples -------- >>> import numpy as np >>> dt = np.dtype([(('a', 'A'), np.int64), ('b', np.double, 3)]) >>> dt.descr [(('a', 'A'), '>> _get_fieldspec(dt) [(('a', 'A'), dtype('int64')), ('b', dtype(('<f8', (3,))))]",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\recfunctions.py",
    "ast_data": "FunctionDef name:_get_fieldspec arg:dtype arguments arg If Compare Return return:yes Assign Return return:yes Compare Call"
  },
  {
    "library": "matplotlib",
    "name": "push_state",
    "source_code": "def push_state(self) -> None:\n    self._state_stack.append(self.get_state().copy())",
    "docstring": "Push a new onto the stack, copying the current state.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_mathtext.py",
    "ast_data": "FunctionDef name:push_state arg:self arguments arg Call Call Call"
  },
  {
    "library": "kornia",
    "name": "sample_keypoints",
    "source_code": "@torch.no_grad()\ndef sample_keypoints(scoremap: Tensor, num_samples: Optional[int]=10000, return_scoremap: bool=True, increase_coverage: bool=True) -> Union[Tensor, Tuple[Tensor, Tensor]]:\n    device = scoremap.device\n    dtype = scoremap.dtype\n    B, H, W = scoremap.shape\n    if increase_coverage:\n        weights = (-torch.linspace(-2, 2, steps=51, device=device, dtype=dtype) ** 2).exp()[None, None]\n        local_density_x = F.conv2d((scoremap[:, None] + 1e-06) * 10000, weights[..., None, :], padding=(0, 51 // 2))\n        local_density = F.conv2d(local_density_x, weights[..., None], padding=(51 // 2, 0))[:, 0]\n        scoremap = scoremap * (local_density + 1e-08) ** (-1 / 2)\n    grid = get_grid(B, H, W, device=device).reshape(B, H * W, 2)\n    inds = torch.topk(scoremap.reshape(B, H * W), k=num_samples).indices\n    kps = torch.gather(grid, dim=1, index=inds[..., None].expand(B, num_samples, 2))\n    if return_scoremap:\n        return (kps, torch.gather(scoremap.reshape(B, H * W), dim=1, index=inds))\n    return kps",
    "docstring": "Sample keypoints from provided candidates.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\dedode\\utils.py",
    "ast_data": "FunctionDef name:sample_keypoints arg:scoremap arg:num_samples arg:return_scoremap arg:increase_coverage arguments arg arg arg arg Assign Assign Assign If Assign Call Call Assign Call Assign Call Assign Assign Call Call Assign Call Call Assign Call Call If Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_extent",
    "source_code": "def get_extent(self):\n    numrows, numcols = self.get_size()\n    return (-0.5 + self.ox, numcols - 0.5 + self.ox, -0.5 + self.oy, numrows - 0.5 + self.oy)",
    "docstring": "Return the image extent as tuple (left, right, bottom, top).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\image.py",
    "ast_data": "FunctionDef name:get_extent arg:self arguments arg Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_get_vars",
    "source_code": "def _get_vars(self, stack, scopes: list[str]) -> None:\n    variables = itertools.product(scopes, stack)\n    for scope, (frame, _, _, _, _, _) in variables:\n        try:\n            d = getattr(frame, f'f_{scope}')\n            self.scope = DeepChainMap(self.scope.new_child(d))\n        finally:\n            del frame",
    "docstring": "Get specifically scoped variables from a list of stack frames. Parameters ---------- stack : list A list of stack frames as returned by `` scopes : sequence of strings A sequence containing valid stack frame attribute names that evaluate to a dictionary. For example, ('locals', 'globals')",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\computation\\scope.py",
    "ast_data": "FunctionDef name:_get_vars arg:self arg:stack arg:scopes arguments arg arg arg Assign Call For Try Assign Call Assign Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_xy",
    "source_code": "def get_xy(self):\n    return self._path.vertices",
    "docstring": "Get the vertices of the path. Returns ------- (N, 2) array The coordinates of the vertices.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_xy arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_eigenvalue_ordering",
    "source_code": "def _eigenvalue_ordering(self, m):\n    grid_shape = self.grid_shape\n    if m is None:\n        indices = np.indices(grid_shape)\n        Leig = np.zeros(grid_shape)\n    else:\n        grid_shape_min = min(grid_shape, tuple(np.ones_like(grid_shape) * m))\n        indices = np.indices(grid_shape_min)\n        Leig = np.zeros(grid_shape_min)\n    for j, n in zip(indices, grid_shape):\n        if self.boundary_conditions == 'dirichlet':\n            Leig += -4 * np.sin(np.pi * (j + 1) / (2 * (n + 1))) ** 2\n        elif self.boundary_conditions == 'neumann':\n            Leig += -4 * np.sin(np.pi * j / (2 * n)) ** 2\n        else:\n            Leig += -4 * np.sin(np.pi * np.floor((j + 1) / 2) / n) ** 2\n    Leig_ravel = Leig.ravel()\n    ind = np.argsort(Leig_ravel)\n    eigenvalues = Leig_ravel[ind]\n    if m is not None:\n        eigenvalues = eigenvalues[-m:]\n        ind = ind[-m:]\n    return (eigenvalues, ind)",
    "docstring": "Compute largest eigenvalues in each of the `m` largest.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_special_sparse_arrays.py",
    "ast_data": "FunctionDef name:_eigenvalue_ordering arg:self arg:m arguments arg arg Assign If Compare Assign Call Assign Call Assign Call Call Call Assign Call Assign Call For Call If Compare Call If Compare Call Call Call Assign Call Assign Call Assign If Compare Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "graph_context_for_symbolic_tensors",
    "source_code": "@tf_contextlib.contextmanager\ndef graph_context_for_symbolic_tensors(*args, **kwargs):\n    if any((is_symbolic_tensor(v) for v in list(args) + list(kwargs.values()))):\n        with K.get_graph().as_default():\n            yield\n    else:\n        yield",
    "docstring": "Returns graph context manager if any of the inputs is a symbolic tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_utils.py",
    "ast_data": "FunctionDef name:graph_context_for_symbolic_tensors arguments arg arg If Call Call Call Call Call With Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_output_types",
    "source_code": "@property\ndef _output_types(self) -> list[int]:\n    num_outputs = pywrap_tf_session.TF_OperationNumOutputs(self._c_op)\n    output_types = [int(pywrap_tf_session.TF_OperationOutputType(self._tf_output(i))) for i in range(num_outputs)]\n    return output_types",
    "docstring": "List this operation's output types. Returns: List of the types of the Tensors computed by this operation. Each element in the list is an integer whose value is one of the TF_DataType enums defined in pywrap_tf_session.h The length of this list indicates the number of output endpoints of the operation.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_output_types arg:self arguments arg Assign Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "private_numbers",
    "source_code": "@abc.abstractmethod\ndef private_numbers(self) -> EllipticCurvePrivateNumbers:\n    pass",
    "docstring": "Returns an EllipticCurvePrivateNumbers.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ec.py",
    "ast_data": "FunctionDef name:private_numbers arg:self arguments arg"
  },
  {
    "library": "kornia",
    "name": "accuracy",
    "source_code": "def accuracy(pred: Tensor, target: Tensor, topk: Tuple[int, ...]=(1,)) -> List[Tensor]:\n    maxk = min(max(topk), pred.size()[1])\n    batch_size = target.size(0)\n    _, pred = pred.topk(maxk, 1, True, True)\n    pred = pred.t()\n    correct = pred.eq(target.reshape(1, -1).expand_as(pred))\n    return [correct[:min(k, maxk)].reshape(-1).float().sum(0) * 100.0 / batch_size for k in topk]",
    "docstring": "Compute the accuracy over the k top predictions for the specified values of k. Args: pred: the input tensor with the logits to evaluate. target: the tensor containing the ground truth. topk: the expected topk ranking. Example: >>> logits = torch.tensor([[0, 1, 0]]) >>> target = torch.tensor([[1]]) >>> accuracy(logits, target) [tensor(100.)]",
    "type": "function",
    "file_path": "kornia\\kornia\\metrics\\accuracy.py",
    "ast_data": "FunctionDef name:accuracy arg:pred arg:target arg:topk arguments arg arg arg Assign Call Call Call Assign Call Assign Call Assign Call Assign Call Call Call Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "FixedLengthRecordReader",
    "source_code": "@tf_export(v1=['FixedLengthRecordReader'])\nclass FixedLengthRecordReader(ReaderBase):\n\n    @deprecation.deprecated(None, 'Queue-based input pipelines have been replaced by `tf.data`. Use `tf.data.FixedLengthRecordDataset`.')\n    def __init__(self, record_bytes, header_bytes=None, footer_bytes=None, hop_bytes=None, name=None, encoding=None):\n        rr = gen_io_ops.fixed_length_record_reader_v2(record_bytes=record_bytes, header_bytes=header_bytes, footer_bytes=footer_bytes, hop_bytes=hop_bytes, encoding=encoding, name=name)\n        super(FixedLengthRecordReader, self).__init__(rr)",
    "docstring": "A Reader that outputs fixed-length records from a file. See ReaderBase for supported methods. @compatibility(eager) Readers are not compatible with eager execution. Instead, please use to get data into your model. @end_compatibility",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\io_ops.py",
    "ast_data": "ClassDef name:FixedLengthRecordReader FunctionDef name:__init__ arg:self arg:record_bytes arg:header_bytes arg:footer_bytes arg:hop_bytes arg:name arg:encoding arguments arg arg arg arg arg arg arg Assign Call Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "to_tf",
    "source_code": "def to_tf(self):\n    return TransferFunction(*zpk2tf(self.zeros, self.poles, self.gain), **self._dt_dict)",
    "docstring": "Convert system representation to . Returns ------- sys : instance of Transfer function of the current system",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:to_tf arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "decision_function",
    "source_code": "def decision_function(self, X):\n    dec = self._decision_function(X)\n    if self.decision_function_shape == 'ovr' and len(self.classes_) > 2:\n        return _ovr_decision_function(dec < 0, -dec, len(self.classes_))\n    return dec",
    "docstring": "Evaluate the decision function for the samples in X. Parameters ---------- X : array-like of shape (n_samples, n_features) The input samples. Returns ------- X : ndarray of shape (n_samples, n_classes * (n_classes-1) / 2) Returns the decision function of the sample for each class in the model. If decision_function_shape='ovr', the shape is (n_samples, n_classes). Notes ----- If decision_function_shape='ovo', the function values are proportional to the distance of the samples X to the separating hyperplane. If the exact distances are required, divide the function values by the norm of the weight vector (`this question `_ for further details. If decision_function_shape='ovr', the decision function is a monotonic transformation of ovo decision function.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\svm\\_base.py",
    "ast_data": "FunctionDef name:decision_function arg:self arg:X arguments arg arg Assign Call If BoolOp Compare Compare Call Return return:yes Call Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "merge_by_ref_with",
    "source_code": "def merge_by_ref_with(self, other: 'FunctionCaptures') -> None:\n    assert isinstance(other, FunctionCaptures)\n    for key in other.by_ref_external:\n        if key not in self._by_ref_external:\n            self._by_ref_external[key] = other.by_ref_external[key]\n            self._by_ref_tracetype[key] = other.by_ref_tracetype[key]",
    "docstring": "Add by-ref captures from to if not exist.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\capture\\capture_container.py",
    "ast_data": "FunctionDef name:merge_by_ref_with arg:self arg:other arguments arg arg Call For If Compare Assign Assign"
  },
  {
    "library": "scipy",
    "name": "_inverse_poly_zero",
    "source_code": "def _inverse_poly_zero(a, b, c, d, fa, fb, fc, fd):\n    return _interpolated_poly([fa, fb, fc, fd], [a, b, c, d], 0)",
    "docstring": "Inverse cubic interpolation f-values -> x-values Given four points (fa, a), (fb, b), (fc, c), (fd, d) with fa, fb, fc, fd all distinct, find poly IP(y) through the 4 points and compute x=IP(0).",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_zeros_py.py",
    "ast_data": "FunctionDef name:_inverse_poly_zero arg:a arg:b arg:c arg:d arg:fa arg:fb arg:fc arg:fd arguments arg arg arg arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "force_checkpoint_conversion",
    "source_code": "def force_checkpoint_conversion(value=True):\n    global _FORCE_CHECKPOINT_CONVERSION\n    _FORCE_CHECKPOINT_CONVERSION = value",
    "docstring": "Forces checkpoint to use the new implementation. The new checkpoint implementation is changing the saved metadata slightly, and therefore may break forward compatibility in newly saved checkpoints. This means: - Previous versions of TensorFlow may not be able to load new checkpoints. - Backwards compatibility is unchanged: Old checkpoints can still be loaded. TensorFlow guarantees 3 weeks of forward compatibility, so this flag will be removed in the future weeks, after which checkpoint conversion will happen by default. **What happens when this flag is enabled?** The checkpoint will be saved with different metadata, meaning that previous versions of TensorFlow (<=2.10) will not be able to load this checkpoint. Args: value: Boolean value, whether or not to force checkpoint conversion to the new implementation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\saveable_compat.py",
    "ast_data": "FunctionDef name:force_checkpoint_conversion arg:value arguments arg Assign"
  },
  {
    "library": "django",
    "name": "width",
    "source_code": "@property\ndef width(self):\n    return capi.get_field_width(self.ptr)",
    "docstring": "Return the width of this Field.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\field.py",
    "ast_data": "FunctionDef name:width arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "_cherrypy_pydoc_resolve",
    "source_code": "def _cherrypy_pydoc_resolve(thing, forceload=0):\n    if isinstance(thing, _ThreadLocalProxy):\n        thing = getattr(serving, thing.__attrname__)\n    return _pydoc._builtin_resolve(thing, forceload)",
    "docstring": "Given an object or a path to an object, get the object and its name.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\__init__.py",
    "ast_data": "FunctionDef name:_cherrypy_pydoc_resolve arg:thing arg:forceload arguments arg arg If Call Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "__init__",
    "source_code": "def __init__(self, feat, layer):\n    if not feat:\n        raise GDALException('Cannot create OGR Feature, invalid pointer given.')\n    self.ptr = feat\n    self._layer = layer",
    "docstring": "Initialize Feature from a pointer and its Layer object.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\feature.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:feat arg:layer arguments arg arg arg If Raise Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_find_max_under_constraint",
    "source_code": "def _find_max_under_constraint(self, constrained, dependent, predicate):\n    feasible = array_ops.where_v2(predicate(constrained, self.value))\n    feasible_exists = math_ops.greater(array_ops.size(feasible), 0)\n    max_dependent = math_ops.reduce_max(array_ops.gather(dependent, feasible))\n    return array_ops.where_v2(feasible_exists, max_dependent, 0.0)",
    "docstring": "Returns the maximum of dependent_statistic that satisfies the constraint. Args: constrained: Over these values the constraint is specified. A rank-1 tensor. dependent: From these values the maximum that satiesfies the constraint is selected. Values in this tensor and in are linked by having the same threshold at each position, hence this tensor must have the same shape. predicate: A binary boolean functor to be applied to arguments and , e.g. . Returns maximal dependent value, if no value satiesfies the constraint 0.0.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py",
    "ast_data": "FunctionDef name:_find_max_under_constraint arg:self arg:constrained arg:dependent arg:predicate arguments arg arg arg arg Assign Call Call Assign Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "irfftn",
    "source_code": "@array_function_dispatch(_fftn_dispatcher)\ndef irfftn(a, s=None, axes=None, norm=None, out=None):\n    a = asarray(a)\n    s, axes = _cook_nd_args(a, s, axes, invreal=1)\n    for ii in range(len(axes) - 1):\n        a = ifft(a, s[ii], axes[ii], norm)\n    a = irfft(a, s[-1], axes[-1], norm, out=out)\n    return a",
    "docstring": "Computes the inverse of . This function computes the inverse of the N-dimensional discrete Fourier Transform for real input over any number of axes in an M-dimensional array by means of the Fast Fourier Transform (FFT). In other words, `irfftrfftnirfftifftnssssaxesslen(s)saxessaxesnumpy.fftaxessasssssaxesaxesaifftnfftrfftsirfftn` assumes an even output length which puts the last entry at the Nyquist frequency; aliasing with its symmetric counterpart. When performing the final complex to real transform, the last value is thus treated as purely real. To avoid losing information, the correct shape of the real input **must** be given. Examples -------- >>> import numpy as np >>> a = np.zeros((3, 2, 2)) >>> a[0, 0, 0] = 3 * 2 * 2 >>> np.fft.irfftn(a) array([[[1., 1.], [1., 1.]], [[1., 1.], [1., 1.]], [[1., 1.], [1., 1.]]])",
    "type": "function",
    "file_path": "numpy\\numpy\\fft\\_pocketfft.py",
    "ast_data": "FunctionDef name:irfftn arg:a arg:s arg:axes arg:norm arg:out arguments arg arg arg arg arg Assign Call Assign Call For Call Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "import_submodule",
    "source_code": "def import_submodule(mod: types.ModuleType):\n    for filename in sorted(os.listdir(os.path.dirname(cast(str, mod.__file__)))):\n        if filename.endswith('.py') and filename[0] != '_':\n            importlib.import_module(f'{mod.__name__}.{filename[:-3]}')",
    "docstring": "Ensure all the files in a given submodule are imported",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\utils.py",
    "ast_data": "FunctionDef name:import_submodule arg:mod arguments arg For Call Call Call Call If BoolOp Call Compare Call"
  },
  {
    "library": "scipy",
    "name": "interp2d",
    "source_code": "class interp2d:\n\n    def __init__(self, x, y, z, kind='linear', copy=True, bounds_error=False, fill_value=None):\n        raise NotImplementedError(err_mesg)",
    "docstring": "interp2d(x, y, z, kind='linear', copy=True, bounds_error=False, fill_value=None) Class for 2D interpolation (deprecated and removed) .. versionremoved:: 1.14.0 has been removed in SciPy 1.14.0. For legacy code, nearly bug-for-bug compatible replacements are on regular grids, and / for scattered 2D data. In new code, for regular grids use instead. For scattered data, prefer or . For more details see :ref:.",
    "type": "class",
    "file_path": "scipy\\scipy\\interpolate\\_interpolate.py",
    "ast_data": "ClassDef name:interp2d FunctionDef name:__init__ arg:self arg:x arg:y arg:z arg:kind arg:copy arg:bounds_error arg:fill_value arguments arg arg arg arg arg arg arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "is_oss",
    "source_code": "def is_oss():\n    return len(sys.argv) >= 1 and 'bazel' in sys.argv[0]",
    "docstring": "Returns whether the test is run under OSS.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_process_lib.py",
    "ast_data": "FunctionDef name:is_oss arguments Return return:yes BoolOp Compare Call Compare"
  },
  {
    "library": "pytorch",
    "name": "scale_input_observer",
    "source_code": "def scale_input_observer(node: Node, modules: dict[str, nn.Module]) -> None:\n    input_eq_obs = modules[str(node.target)]\n    assert isinstance(input_eq_obs, _InputEqualizationObserver)\n    input_quant_obs_node = node.args[0]\n    assert isinstance(input_quant_obs_node, Node)\n    input_quant_obs = modules[str(input_quant_obs_node.target)]\n    if not isinstance(input_quant_obs, ObserverBase):\n        return\n    min_input_scaled, max_input_scaled = input_eq_obs.calculate_scaled_minmax()\n    if min_input_scaled is None and max_input_scaled is None:\n        return\n    input_quant_obs.min_val = min_input_scaled\n    input_quant_obs.max_val = max_input_scaled",
    "docstring": "Scales the following input quantization observer's min/max values by updating the values with the scaled min/max values calculated by the input equalization observer",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_equalize.py",
    "ast_data": "FunctionDef name:scale_input_observer arg:node arg:modules arguments arg arg Assign Call Call Assign Call Assign Call If Call Return return:no Assign Call If BoolOp Compare Compare Return return:no Assign Assign"
  },
  {
    "library": "django",
    "name": "allow_migrate_model",
    "source_code": "def allow_migrate_model(self, connection_alias, model):\n    if not model._meta.can_migrate(connection_alias):\n        return False\n    return router.allow_migrate_model(connection_alias, model)",
    "docstring": "Return whether or not a model may be migrated. This is a thin wrapper around router.allow_migrate_model() that preemptively rejects any proxy, swapped out, or unmanaged model.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\operations\\base.py",
    "ast_data": "FunctionDef name:allow_migrate_model arg:self arg:connection_alias arg:model arguments arg arg arg If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_decision_function",
    "source_code": "def _decision_function(self, X):\n    check_is_fitted(self)\n    X = validate_data(self, X, accept_sparse='csr', reset=False)\n    scores = safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_\n    return scores.ravel()",
    "docstring": "Predict using the linear model Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Returns ------- ndarray of shape (n_samples,) Predicted target values per element in X.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_stochastic_gradient.py",
    "ast_data": "FunctionDef name:_decision_function arg:self arg:X arguments arg arg Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_get_device_index",
    "source_code": "def _get_device_index(device: Any, optional: bool=False, allow_cpu: bool=False) -> int:\n    if isinstance(device, int):\n        return device\n    if isinstance(device, str):\n        device = torch.device(device)\n    if isinstance(device, torch.device):\n        if allow_cpu:\n            if device.type not in ['cuda', 'cpu']:\n                raise ValueError(f'Expected a cuda or cpu device, but got: {device}')\n        elif device.type != 'cuda':\n            raise ValueError(f'Expected a cuda device, but got: {device}')\n    if not torch.jit.is_scripting():\n        if isinstance(device, torch.cuda.device):\n            return device.idx\n    return _torch_get_device_index(device, optional, allow_cpu)",
    "docstring": "Get the device index from :attr:, which can be a torch.device object, a Python integer, or `deviceoptionalallow_cpudevicedeviceoptional`.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\_utils.py",
    "ast_data": "FunctionDef name:_get_device_index arg:device arg:optional arg:allow_cpu arguments arg arg arg If Call Return return:yes If Call Assign Call If Call If If Compare Raise Call If Compare Raise Call If Call If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "always_wrap_policy",
    "source_code": "def always_wrap_policy(*args, **kwargs) -> bool:\n    return True",
    "docstring": "A simple recursive wrap policy that always returns `_recursive_wrap`.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\wrap.py",
    "ast_data": "FunctionDef name:always_wrap_policy arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "export",
    "source_code": "def export(self, name=None):\n    with ops.name_scope(name, '%s_Export' % self.name, [self.resource_handle]):\n        exported_keys, exported_values = gen_lookup_ops.lookup_table_export_v2(self.resource_handle, self._key_dtype, self._value_dtype)\n    exported_values.set_shape(exported_keys.get_shape().concatenate(self._value_shape))\n    return (exported_keys, exported_values)",
    "docstring": "Returns tensors of all keys and values in the table. Args: name: A name for the operation (optional). Returns: A pair of tensors with the first tensor containing all keys and the second tensors containing all values in the table.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "FunctionDef name:export arg:self arg:name arguments arg arg With Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "var",
    "source_code": "@deprecate_nonkeyword_arguments(version='4.0', allowed_args=['self'], name='var')\ndef var(self, axis: Axis | None=None, skipna: bool=True, ddof: int=1, numeric_only: bool=False, **kwargs):\n    return NDFrame.var(self, axis=axis, skipna=skipna, ddof=ddof, numeric_only=numeric_only, **kwargs)",
    "docstring": "Return unbiased variance over requested axis. Normalized by N-1 by default. This can be changed using the ddof argument. Parameters ---------- axis : {index (0)} For this parameter is unused and defaults to 0. .. warning:: The behavior of DataFrame.var with `` can be set to normalize by N instead of N-1: >>> df.var(ddof=0) age 264.687500 height 0.042275 dtype: float64",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:var arg:self arg:axis arg:skipna arg:ddof arg:numeric_only arguments arg arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "load_op_profiles",
    "source_code": "def load_op_profiles(f: FileLike) -> dict[str, set[OpProfile]]:\n    if isinstance(f, (str, os.PathLike)):\n        f = os.fspath(f)\n        with open(f) as file:\n            yaml_str = file.read()\n    elif isinstance(f, io.BytesIO):\n        yaml_str = f.read().decode('utf-8')\n    else:\n        raise ValueError(f'Invalid type of file {f}')\n    return read_profiles_from_yaml(yaml_str)",
    "docstring": "Loads the saved operator profiles from .",
    "type": "function",
    "file_path": "pytorch\\torch\\_library\\fake_profile.py",
    "ast_data": "FunctionDef name:load_op_profiles arg:f arguments arg If Call Assign Call With Call Assign Call If Call Assign Call Call Raise Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "collapse",
    "source_code": "def collapse(self):\n    self.collapsed = np.array(self, copy=copy_if_needed)\n    self.cs = None\n    self.ds = None\n    self.alpha = None",
    "docstring": "Collapse the low-rank matrix to a full-rank one.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_nonlin.py",
    "ast_data": "FunctionDef name:collapse arg:self arguments arg Assign Call Assign Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "set_active",
    "source_code": "def set_active(self, active):\n    self._active = active",
    "docstring": "Set whether the widget is active.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:set_active arg:self arg:active arguments arg arg Assign"
  },
  {
    "library": "numpy",
    "name": "devices",
    "source_code": "def devices(self):\n    return ['cpu']",
    "docstring": "The devices supported by NumPy. For NumPy, this always returns ``. Returns ------- devices : list of str The devices supported by NumPy. See Also -------- __array_namespace_info__.capabilities, __array_namespace_info__.default_device, __array_namespace_info__.default_dtypes, __array_namespace_info__.dtypes Examples -------- >>> info = np.__array_namespace_info__() >>> info.devices() ['cpu']",
    "type": "method",
    "file_path": "numpy\\numpy\\_array_api_info.py",
    "ast_data": "FunctionDef name:devices arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_kneighbors_from_graph",
    "source_code": "def _kneighbors_from_graph(graph, n_neighbors, return_distance):\n    n_samples = graph.shape[0]\n    assert graph.format == 'csr'\n    row_nnz = np.diff(graph.indptr)\n    row_nnz_min = row_nnz.min()\n    if n_neighbors is not None and row_nnz_min < n_neighbors:\n        raise ValueError('%d neighbors per samples are required, but some samples have only %d neighbors in precomputed graph matrix. Decrease number of neighbors used or recompute the graph with more neighbors.' % (n_neighbors, row_nnz_min))\n\n    def extract(a):\n        if row_nnz.max() == row_nnz_min:\n            return a.reshape(n_samples, -1)[:, :n_neighbors]\n        else:\n            idx = np.tile(np.arange(n_neighbors), (n_samples, 1))\n            idx += graph.indptr[:-1, None]\n            return a.take(idx, mode='clip').reshape(n_samples, n_neighbors)\n    if return_distance:\n        return (extract(graph.data), extract(graph.indices))\n    else:\n        return extract(graph.indices)",
    "docstring": "Decompose a nearest neighbors sparse graph into distances and indices. Parameters ---------- graph : sparse matrix of shape (n_samples, n_samples) Neighbors graph as given by or . Matrix should be of format CSR format. n_neighbors : int Number of neighbors required for each sample. return_distance : bool Whether or not to return the distances. Returns ------- neigh_dist : ndarray of shape (n_samples, n_neighbors) Distances to nearest neighbors. Only present if . neigh_ind : ndarray of shape (n_samples, n_neighbors) Indices of nearest neighbors.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\neighbors\\_base.py",
    "ast_data": "FunctionDef name:_kneighbors_from_graph arg:graph arg:n_neighbors arg:return_distance arguments arg arg arg Assign Compare Assign Call Assign Call If BoolOp Compare Compare Raise Call FunctionDef name:extract arg:a arguments arg If Compare Call Return return:yes Call Assign Call Call Return return:yes Call Call If Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "UnconnectedGradients",
    "source_code": "@tf_export('UnconnectedGradients')\nclass UnconnectedGradients(enum.Enum):\n    NONE = 'none'\n    ZERO = 'zero'",
    "docstring": "Controls how gradient computation behaves when y does not depend on x. The gradient of y with respect to x can be zero in two different ways: there could be no differentiable path in the graph connecting x to y (and so we can statically prove that the gradient is zero) or it could be that runtime values of tensors in a particular execution lead to a gradient of zero (say, if a relu unit happens to not be activated). To allow you to distinguish between these two cases you can choose what value gets returned for the gradient when there is no path in the graph from x to y: * : Indicates that [None] will be returned if there is no path from x to y * : Indicates that a zero tensor will be returned in the shape of x.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\unconnected_gradients.py",
    "ast_data": "ClassDef name:UnconnectedGradients Assign Assign Call"
  },
  {
    "library": "pytorch",
    "name": "Identity",
    "source_code": "class Identity(Module):\n\n    def __init__(self, *args: Any, **kwargs: Any) -> None:\n        super().__init__()\n\n    def forward(self, input: Tensor) -> Tensor:\n        return input",
    "docstring": "A placeholder identity operator that is argument-insensitive. Args: args: any argument (unused) kwargs: any keyword argument (unused) Shape: - Input: :math:, where :math: means any number of dimensions. - Output: :math:, same shape as the input. Examples:: >>> m = nn.Identity(54, unused_argument1=0.1, unused_argument2=False) >>> input = torch.randn(128, 20) >>> output = m(input) >>> print(output.size()) torch.Size([128, 20])",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\linear.py",
    "ast_data": "ClassDef name:Identity FunctionDef name:__init__ arg:self arguments arg arg arg Call Call FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "isin",
    "source_code": "def isin(self, values: Series | DataFrame | Sequence | Mapping) -> DataFrame:\n    if isinstance(values, dict):\n        from pandas.core.reshape.concat import concat\n        values = collections.defaultdict(list, values)\n        result = concat((self.iloc[:, [i]].isin(values[col]) for i, col in enumerate(self.columns)), axis=1)\n    elif isinstance(values, Series):\n        if not values.index.is_unique:\n            raise ValueError('cannot compute isin with a duplicate axis.')\n        result = self.eq(values.reindex_like(self), axis='index')\n    elif isinstance(values, DataFrame):\n        if not (values.columns.is_unique and values.index.is_unique):\n            raise ValueError('cannot compute isin with a duplicate axis.')\n        result = self.eq(values.reindex_like(self))\n    else:\n        if not is_list_like(values):\n            raise TypeError(f\"only list-like or dict-like objects are allowed to be passed to DataFrame.isin(), you passed a '{type(values).__name__}'\")\n\n        def isin_(x):\n            result = algorithms.isin(x.ravel(), values)\n            return result.reshape(x.shape)\n        res_mgr = self._mgr.apply(isin_)\n        result = self._constructor_from_mgr(res_mgr, axes=res_mgr.axes)\n    return result.__finalize__(self, method='isin')",
    "docstring": "Whether each element in the DataFrame is contained in values. Parameters ---------- values : iterable, Series, DataFrame or dict The result will only be true at a location if all the labels match. If is a Series, that's the index. If is a dict, the keys must be the column names, which must match. If is a DataFrame, then both the index and column labels must match. Returns ------- DataFrame DataFrame of booleans showing whether each element in the DataFrame is contained in values. See Also -------- DataFrame.eq: Equality test for DataFrame. Series.isin: Equivalent method on Series. Series.str.contains: Test if pattern or regex is contained within a string of a Series or Index. Notes ----- `` is a Series or DataFrame the index and column must match. Note that 'falcon' does not match based on the number of legs in other. >>> other = pd.DataFrame( ... {\"num_legs\": [8, 3], \"num_wings\": [0, 2]}, index=[\"spider\", \"falcon\"] ... ) >>> df.isin(other) num_legs num_wings falcon False True dog False False",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:isin arg:self arg:values arguments arg arg If Call Assign Call Assign Call Call Call If Call If Raise Call Assign Call Call If Call If BoolOp Raise Call Assign Call Call If Call Raise Call Call FunctionDef name:isin_ arg:x arguments arg Assign Call Call Return return:yes Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "device_count",
    "source_code": "def device_count() -> int:\n    return int(torch._C._has_mps and torch._C._mps_is_available())",
    "docstring": "Returns the number of available MPS devices.",
    "type": "function",
    "file_path": "pytorch\\torch\\mps\\__init__.py",
    "ast_data": "FunctionDef name:device_count arguments Return return:yes Call BoolOp Call"
  },
  {
    "library": "matplotlib",
    "name": "process_value",
    "source_code": "@staticmethod\ndef process_value(value):\n    is_scalar = not np.iterable(value)\n    if is_scalar:\n        value = [value]\n    dtype = np.min_scalar_type(value)\n    if np.issubdtype(dtype, np.integer) or dtype.type is np.bool_:\n        dtype = np.promote_types(dtype, np.float32)\n    mask = np.ma.getmask(value)\n    data = np.asarray(value)\n    result = np.ma.array(data, mask=mask, dtype=dtype, copy=True)\n    return (result, is_scalar)",
    "docstring": "Homogenize the input *value* for easy and efficient normalization. *value* can be a scalar or sequence. Parameters ---------- value Data to normalize. Returns ------- result : masked array Masked array with the same shape as *value*. is_scalar : bool Whether *value* is a scalar. Notes ----- Float dtypes are preserved; integer types with two bytes or smaller are converted to np.float32, and larger types are converted to np.float64. Preserving float32 when possible, and using in-place operations, greatly improves speed for large arrays.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:process_value arg:value arguments arg Assign Call If Assign Assign Call If BoolOp Call Compare Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_fillstyle",
    "source_code": "def set_fillstyle(self, fs):\n    self.set_marker(MarkerStyle(self._marker.get_marker(), fs))\n    self.stale = True",
    "docstring": "Set the marker fill style. Parameters ---------- fs : {'full', 'left', 'right', 'bottom', 'top', 'none'} Possible values: - 'full': Fill the whole marker with the *markerfacecolor*. - 'left', 'right', 'bottom', 'top': Fill the marker half at the given side with the *markerfacecolor*. The other half of the marker is filled with *markerfacecoloralt*. - 'none': No filling. For examples see :ref:.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:set_fillstyle arg:self arg:fs arguments arg arg Call Call Call Assign"
  },
  {
    "library": "kornia",
    "name": "RTDETRModelType",
    "source_code": "class RTDETRModelType(Enum):\n    resnet18d = 0\n    resnet34d = 1\n    resnet50d = 2\n    resnet101d = 3\n    hgnetv2_l = 4\n    hgnetv2_x = 5\n    resnet50d_m = 6",
    "docstring": "Enum class that maps RT-DETR model type.",
    "type": "class",
    "file_path": "kornia\\kornia\\contrib\\models\\rt_detr\\model.py",
    "ast_data": "ClassDef name:RTDETRModelType Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "pandas",
    "name": "versions_from_parentdir",
    "source_code": "def versions_from_parentdir(parentdir_prefix, root, verbose):\n    rootdirs = []\n    for _ in range(3):\n        dirname = os.path.basename(root)\n        if dirname.startswith(parentdir_prefix):\n            return {'version': dirname[len(parentdir_prefix):], 'full-revisionid': None, 'dirty': False, 'error': None, 'date': None}\n        rootdirs.append(root)\n        root = os.path.dirname(root)\n    if verbose:\n        print(f'Tried directories {rootdirs!s}             but none started with prefix {parentdir_prefix}')\n    raise NotThisMethod(\"rootdir doesn't start with parentdir_prefix\")",
    "docstring": "Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory",
    "type": "function",
    "file_path": "pandas\\pandas\\_version.py",
    "ast_data": "FunctionDef name:versions_from_parentdir arg:parentdir_prefix arg:root arg:verbose arguments arg arg arg Assign For Call Assign Call If Call Return return:yes Call Call Assign Call If Call Raise Call"
  },
  {
    "library": "kornia",
    "name": "fit",
    "source_code": "def fit(self, x: Tensor) -> 'ZCAWhitening':\n    T, mean, T_inv = zca_mean(x, self.dim, self.unbiased, self.eps, self.compute_inv)\n    self.mean_vector = mean\n    self.transform_matrix = T\n    if T_inv is None:\n        self.transform_inv = torch.empty([0])\n    else:\n        self.transform_inv = T_inv\n    if self.detach_transforms:\n        self.mean_vector = self.mean_vector.detach()\n        self.transform_matrix = self.transform_matrix.detach()\n        self.transform_inv = self.transform_inv.detach()\n    self.fitted = True\n    return self",
    "docstring": "Fit ZCA whitening matrices to the data. Args: x: Input data. Returns: Returns a fitted ZCAWhiten object instance.",
    "type": "method",
    "file_path": "kornia\\kornia\\enhance\\zca.py",
    "ast_data": "FunctionDef name:fit arg:self arg:x arguments arg arg Assign Call Assign Assign If Compare Assign Call Assign If Assign Call Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "vee",
    "source_code": "@staticmethod\ndef vee(omega: Tensor) -> Tensor:\n    a, b, c = (omega[..., 2, 1], omega[..., 0, 2], omega[..., 1, 0])\n    return stack((a, b, c), -1)",
    "docstring": "Convert elements from lie algebra to vector space. Returns vector of shape :math:. .. math:: omega = \\begin{bmatrix} 0 & -c & b \\\\ c & 0 & -a \\\\ -b & a & 0\\end{bmatrix} Args: omega: 3x3-matrix representing lie algebra. Example: >>> v = torch.ones((1,3)) >>> omega = So3.hat(v) >>> So3.vee(omega) tensor([[1., 1., 1.]])",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\so3.py",
    "ast_data": "FunctionDef name:vee arg:omega arguments arg Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, axis, *, linear_width=1.0, base=10, subs='auto', **kwargs):\n    super().__init__(axis)\n    self._transform = AsinhTransform(linear_width)\n    self._base = int(base)\n    if subs == 'auto':\n        self._subs = self.auto_tick_multipliers.get(self._base)\n    else:\n        self._subs = subs",
    "docstring": "Parameters ---------- linear_width : float, default: 1 The scale parameter (elsewhere referred to as :math:) defining the extent of the quasi-linear region, and the coordinate values beyond which the transformation becomes asymptotically logarithmic. base : int, default: 10 The number base used for rounding tick locations on a logarithmic scale. If this is less than one, then rounding is to the nearest integer multiple of powers of ten. subs : sequence of int Multiples of the number base used for minor ticks. If set to 'auto', this will use built-in defaults, e.g. (2, 5) for base=10.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\scale.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:axis arguments arg arg arg arg arg arg Call Call Assign Call Assign Call If Compare Assign Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "execute",
    "source_code": "def execute(self, fig):\n    width, height = fig.get_size_inches()\n    w_pad = self._params['w_pad'] / width\n    h_pad = self._params['h_pad'] / height\n    return do_constrained_layout(fig, w_pad=w_pad, h_pad=h_pad, wspace=self._params['wspace'], hspace=self._params['hspace'], rect=self._params['rect'], compress=self._compress)",
    "docstring": "Perform constrained_layout and move and resize Axes accordingly. Parameters ---------- fig : to perform layout on.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\layout_engine.py",
    "ast_data": "FunctionDef name:execute arg:self arg:fig arguments arg arg Assign Call Assign Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "dtype",
    "source_code": "@property\ndef dtype(self):\n    return self._dtype",
    "docstring": "The datatype of the gradients accumulated by this accumulator.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:dtype arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, path_map: Mapping[str, os.PathLike[str]], expected_input_key_map: Optional[Mapping[str, Collection[str]]]=None):\n    self.path_map: Mapping[str, os.PathLike[str]] = path_map\n    self.expected_input_key_map: Mapping[str, Collection[str]] = {}\n    if expected_input_key_map is not None:\n        if set(path_map.keys()) != set(expected_input_key_map.keys()):\n            raise KeyError('The `path_map` and `expected_input_key_map` should have the same set of keys.')\n        self.expected_input_key_map = expected_input_key_map",
    "docstring": "Initializes TFRecord represenatative dataset saver. Args: path_map: Signature def key -> path mapping. Each path is a TFRecord file to which a is saved. The signature def keys should be a subset of the keys of the argument of the call. expected_input_key_map: Signature def key -> expected input keys. If set, validate that the sample has same set of input keys before saving. Raises: KeyError: If path_map and expected_input_key_map have different keys.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\representative_dataset.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:path_map arg:expected_input_key_map arguments arg arg arg If Compare If Compare Call Call Call Call Raise Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, type_spec, inferred_value=None, name=None):\n    if not isinstance(type_spec, type_spec_module.TypeSpec):\n        raise ValueError('KerasTensors must be constructed with a `tf.TypeSpec`.')\n    self._type_spec = type_spec\n    self._inferred_value = inferred_value\n    self._name = name",
    "docstring": "Constructs a KerasTensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\keras_tensor.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:type_spec arg:inferred_value arg:name arguments arg arg arg arg If Call Raise Call Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_initialize_local",
    "source_code": "def _initialize_local(self, compute_devices, parameter_device, cluster_resolver=None):\n    self._worker_device = device_util.canonicalize('/device:CPU:0')\n    self._input_host_device = numpy_dataset.SingleDevice(self._worker_device)\n    if compute_devices is None:\n        if not cluster_resolver:\n            num_gpus = context.num_gpus()\n        else:\n            num_gpus = cluster_resolver.num_accelerators().get('GPU', 0)\n        self._num_gpus_per_worker = num_gpus\n        compute_devices = device_util.local_devices_from_num_gpus(num_gpus)\n    compute_devices = [device_util.canonicalize(d) for d in compute_devices]\n    if parameter_device is None:\n        if len(compute_devices) == 1:\n            parameter_device = compute_devices[0]\n        else:\n            parameter_device = _LOCAL_CPU\n    self._variable_device = parameter_device\n    self._compute_devices = compute_devices\n    self._parameter_devices = (parameter_device,)\n    self._is_chief = True\n    self._cluster_spec = None\n    self._task_type = None\n    self._task_id = None\n    logging.info('ParameterServerStrategy (CentralStorageStrategy if you are using a single machine) with compute_devices = %r, variable_device = %r', compute_devices, self._variable_device)",
    "docstring": "Initialize local devices for training.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\parameter_server_strategy.py",
    "ast_data": "FunctionDef name:_initialize_local arg:self arg:compute_devices arg:parameter_device arg:cluster_resolver arguments arg arg arg arg Assign Call Assign Call If Compare If Assign Call Assign Call Call Assign Assign Call Assign Call If Compare If Compare Call Assign Assign Assign Assign Assign Assign Assign Assign Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "update_sub",
    "source_code": "@doc_controls.do_not_generate_docs\ndef update_sub(x, decrement):\n    return state_ops.assign_sub(x, decrement)",
    "docstring": "Update the value of by subtracting . Args: x: A Variable. decrement: A tensor of same shape as . Returns: The variable updated.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:update_sub arg:x arg:decrement arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "sparse_split",
    "source_code": "@tf_export(v1=['sparse.split', 'sparse_split'])\n@deprecation.deprecated_endpoints('sparse_split')\n@deprecation.deprecated_args(None, 'split_dim is deprecated, use axis instead', 'split_dim')\ndef sparse_split(keyword_required=KeywordRequired(), sp_input=None, num_split=None, axis=None, name=None, split_dim=None):\n    if not isinstance(keyword_required, KeywordRequired):\n        raise ValueError('Keyword arguments are required for this function.')\n    if sp_input is None:\n        raise ValueError('sp_input is required')\n    if num_split is None:\n        raise ValueError('num_split is required')\n    if axis is None:\n        raise ValueError('axis is required')\n    axis = deprecation.deprecated_argument_lookup('axis', axis, 'split_dim', split_dim)\n    sp_input = _convert_to_sparse_tensor(sp_input)\n    output_inds, output_vals, output_shapes = gen_sparse_ops.sparse_split(axis, sp_input.indices, sp_input.values, sp_input.dense_shape, num_split, name=name)\n    sparse_tensors = []\n    for i in range(0, num_split):\n        sparse_tensors.append(sparse_tensor.SparseTensor(output_inds[i], output_vals[i], output_shapes[i]))\n    return sparse_tensors",
    "docstring": "Split a into tensors along . If the is not an integer multiple of each slice starting from 0: gets extra one dimension. For example, if and and the input is: input_tensor = shape = [2, 7] [ a d e ] [b c ] Graphically the output tensors are: output_tensor[0] = [ a ] [b c ] output_tensor[1] = [ d e ] [ ] Args: keyword_required: Python 2 standin for * (temporary for argument reorder) sp_input: The to split. num_split: A Python integer. The number of ways to split. axis: A 0-D . The dimension along which to split. Must be in range [-rank, rank), where rank is the number of dimensions in the input . name: A name for the operation (optional). split_dim: Deprecated old name for axis. Returns: objects resulting from splitting . Raises: TypeError: If is not a . ValueError: If the deprecated and are both non None.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_ops.py",
    "ast_data": "FunctionDef name:sparse_split arg:keyword_required arg:sp_input arg:num_split arg:axis arg:name arg:split_dim arguments arg arg arg arg arg arg Call If Call Raise Call If Compare Raise Call If Compare Raise Call If Compare Raise Call Assign Call Assign Call Assign Call Assign For Call Call Call Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, n=None):\n    self.ndivs = n",
    "docstring": "Parameters ---------- n : int or 'auto', default: :rc: or :rc: The number of subdivisions of the interval between major ticks; e.g., n=2 will place a single minor tick midway between major ticks. If *n* is 'auto', it will be set to 4 or 5: if the distance between the major ticks equals 1, 2.5, 5 or 10 it can be perfectly divided in 5 equidistant sub-intervals with a length multiple of 0.05; otherwise, it is divided in 4 sub-intervals.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:n arguments arg arg Assign"
  },
  {
    "library": "matplotlib",
    "name": "_remove_axes",
    "source_code": "def _remove_axes(self, ax, owners):\n    for owner in owners:\n        owner.remove(ax)\n    self._axobservers.process('_axes_change_event', self)\n    self.stale = True\n    self._root_figure.canvas.release_mouse(ax)\n    for name in ax._axis_names:\n        grouper = ax._shared_axes[name]\n        siblings = [other for other in grouper.get_siblings(ax) if other is not ax]\n        if not siblings:\n            continue\n        grouper.remove(ax)\n        remaining_axis = siblings[0]._axis_map[name]\n        remaining_axis.get_major_formatter().set_axis(remaining_axis)\n        remaining_axis.get_major_locator().set_axis(remaining_axis)\n        remaining_axis.get_minor_formatter().set_axis(remaining_axis)\n        remaining_axis.get_minor_locator().set_axis(remaining_axis)\n    ax._twinned_axes.remove(ax)",
    "docstring": "Common helper for removal of standard Axes (via delaxes) and of child Axes. Parameters ---------- ax : The Axes to remove. owners List of objects (list or _AxesStack) \"owning\" the Axes, from which the Axes will be remove()d.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:_remove_axes arg:self arg:ax arg:owners arguments arg arg arg For Call Call Assign Call For Assign Assign Call Compare If Call Assign Call Call Call Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "enable_collective_ops",
    "source_code": "def enable_collective_ops(self, server_def):\n    if not server_def:\n        raise ValueError('server_def is None.')\n    self._collective_ops_server_def = server_def\n    if self._context_handle is not None:\n        logging.warning('Enabling collective ops after program startup may cause error when accessing previously created tensors.')\n        with self._initialize_lock:\n            assert self._initialized\n            server_def_str = self._collective_ops_server_def.SerializeToString()\n            pywrap_tfe.TFE_EnableCollectiveOps(self._context_handle, server_def_str)\n            self._initialize_logical_devices()\n            self._clear_caches()",
    "docstring": "Enable distributed collective ops with an appropriate server_def. Args: server_def: A tensorflow::ServerDef proto. Enables execution on remote devices. Raises: ValueError: if server_def is None. RuntimeError: if this method is not called at program startup.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:enable_collective_ops arg:self arg:server_def arguments arg arg If Raise Call Assign If Compare Call With Assign Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_window_extent",
    "source_code": "def get_window_extent(self, renderer=None):\n    return Bbox([[0, 0], [0, 0]])",
    "docstring": "Get the artist's bounding box in display space. The bounding box's width and height are non-negative. Subclasses should override for inclusion in the bounding box \"tight\" calculation. Default is to return an empty bounding box at 0, 0. .. warning:: The extent can change due to any changes in the transform stack, such as changing the Axes limits, the figure size, the canvas used (as is done when saving a figure), or the DPI. Relying on a once-retrieved window extent can lead to unexpected behavior in various cases such as interactive figures being resized or moved to a screen with different dpi, or figures that look fine on screen render incorrectly when saved to file. To get accurate results you may need to manually call or to have Matplotlib compute the rendered size.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:get_window_extent arg:self arg:renderer arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_get_tmp_dir",
    "source_code": "@staticmethod\ndef _get_tmp_dir() -> str:\n    return os.path.join(cache_dir(), 'fxgraph')",
    "docstring": "Get the toplevel temporary directory for storing compiled graphs.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codecache.py",
    "ast_data": "FunctionDef name:_get_tmp_dir arguments Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, N=256, M=256, shape='square', origin=(0, 0), name='bivariate colormap'):\n    self.name = name\n    self.N = int(N)\n    self.M = int(M)\n    _api.check_in_list(['square', 'circle', 'ignore', 'circleignore'], shape=shape)\n    self._shape = shape\n    self._rgba_bad = (0.0, 0.0, 0.0, 0.0)\n    self._rgba_outside = (1.0, 0.0, 1.0, 1.0)\n    self._isinit = False\n    self.n_variates = 2\n    self._origin = (float(origin[0]), float(origin[1]))\n    '#: When this colormap exists on a scalar mappable and colorbar_extend\\n        #: is not False, colorbar creation will pick up ``colorbar_extend`` as\\n        #: the default value for the ``extend`` keyword in the\\n        #: `matplotlib.colorbar.Colorbar` constructor.\\n        self.colorbar_extend = False'",
    "docstring": "Parameters ---------- N : int, default: 256 The number of RGB quantization levels along the first axis. M : int, default: 256 The number of RGB quantization levels along the second axis. shape : {'square', 'circle', 'ignore', 'circleignore'} - 'square' each variate is clipped to [0,1] independently - 'circle' the variates are clipped radially to the center of the colormap, and a circular mask is applied when the colormap is displayed - 'ignore' the variates are not clipped, but instead assigned the 'outside' color - 'circleignore' a circular mask is applied, but the data is not clipped and instead assigned the 'outside' color origin : (float, float), default: (0,0) The relative origin of the colormap. Typically (0, 0), for colormaps that are linear on both axis, and (.5, .5) for circular colormaps. Used when getting 1D colormaps from 2D colormaps. name : str, optional The name of the colormap.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:N arg:M arg:shape arg:origin arg:name arguments arg arg arg arg arg arg Assign Assign Call Assign Call Call Assign Assign Assign Assign Assign Assign Call Call"
  },
  {
    "library": "numpy",
    "name": "me",
    "source_code": "@staticmethod\ndef me(cb):\n\n    def cache_wrap_me(self, *args, **kwargs):\n        cache_key = str((cb.__name__, *args, *kwargs.keys(), *kwargs.values()))\n        if cache_key in self.cache_me:\n            return self.cache_me[cache_key]\n        ccb = cb(self, *args, **kwargs)\n        self.cache_me[cache_key] = ccb\n        return ccb\n    return cache_wrap_me",
    "docstring": "A static method that can be treated as a decorator to dynamically cache certain methods.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py",
    "ast_data": "FunctionDef name:me arg:cb arguments arg FunctionDef name:cache_wrap_me arg:self arguments arg arg arg Assign Call Call Call If Compare Return return:yes Assign Call Assign Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "register_custom_opdefs",
    "source_code": "def register_custom_opdefs(custom_opdefs_list):\n    return wrap_converter.wrapped_register_custom_opdefs(custom_opdefs_list)",
    "docstring": "Register the given custom opdefs to the TensorFlow global op registry. Args: custom_opdefs_list: String representing the custom ops OpDefs that are included in the GraphDef. Returns: True if the registration is successfully completed.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\convert.py",
    "ast_data": "FunctionDef name:register_custom_opdefs arg:custom_opdefs_list arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "reduce_euclidean_norm",
    "source_code": "@tf_export('math.reduce_euclidean_norm')\n@dispatch.add_dispatch_support\ndef reduce_euclidean_norm(input_tensor, axis=None, keepdims=False, name=None):\n    keepdims = bool(keepdims)\n    return _may_reduce_to_scalar(keepdims, axis, gen_math_ops.euclidean_norm(input_tensor, _ReductionDims(input_tensor, axis), keepdims, name=name))",
    "docstring": "Computes the Euclidean norm of elements across dimensions of a tensor. Reduces along the dimensions given in . Unless is true, the rank of the tensor is reduced by 1 for each of the entries in , which must be unique. If is true, the reduced dimensions are retained with length 1. If is None, all dimensions are reduced, and a tensor with a single element is returned. For example: Args: input_tensor: The tensor to reduce. Should have numeric type. axis: The dimensions to reduce. If (the default), reduces all dimensions. Must be in the range . keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). Returns: The reduced tensor, of the same dtype as the input_tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:reduce_euclidean_norm arg:input_tensor arg:axis arg:keepdims arg:name arguments arg arg arg arg Assign Call Return return:yes Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "DejaVuSerifFonts",
    "source_code": "class DejaVuSerifFonts(DejaVuFonts):\n    _fontmap = {'rm': 'DejaVu Serif', 'it': 'DejaVu Serif:italic', 'bf': 'DejaVu Serif:weight=bold', 'bfit': 'DejaVu Serif:italic:bold', 'sf': 'DejaVu Sans', 'tt': 'DejaVu Sans Mono', 'ex': 'DejaVu Serif Display', 0: 'DejaVu Serif'}",
    "docstring": "A font handling class for the DejaVu Serif fonts If a glyph is not found it will fallback to Stix Serif",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\_mathtext.py",
    "ast_data": "ClassDef name:DejaVuSerifFonts Assign"
  },
  {
    "library": "tensorflow",
    "name": "_parse_command",
    "source_code": "def _parse_command(self, command):\n    command = command.strip()\n    if not command:\n        return ('', [], None)\n    command_items = command_parser.parse_command(command)\n    command_items, output_file_path = command_parser.extract_output_file_path(command_items)\n    return (command_items[0], command_items[1:], output_file_path)",
    "docstring": "Parse a command string into prefix and arguments. Args: command: (str) Command string to be parsed. Returns: prefix: (str) The command prefix. args: (list of str) The command arguments (i.e., not including the prefix). output_file_path: (str or None) The path to save the screen output to (if any).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\base_ui.py",
    "ast_data": "FunctionDef name:_parse_command arg:self arg:command arguments arg arg Assign Call If Return return:yes Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_CollOp",
    "source_code": "class _CollOp:\n\n    def __init__(self, op: Callable, tensor: torch.Tensor, dst_tensor: Optional[torch.Tensor]=None, redop: Optional[ReduceOp]=None, root: Optional[int]=None):\n        self.op = op\n        self.tensor = tensor\n        self.dst_tensor = dst_tensor\n        self.redop = redop\n        self.root = root",
    "docstring": "A class to capture collective operations. Args: op (Callable): A collective function, e.g. ``. tensor (Tensor): Tensor to operate on. dst_tensor (Tensor, optional): Provided when source and destinaton tensors are not the same. redop (ReduceOp, optional): reduce operation. root (int, optional): root of broadcast or reduce.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "ClassDef name:_CollOp FunctionDef name:__init__ arg:self arg:op arg:tensor arg:dst_tensor arg:redop arg:root arguments arg arg arg arg arg arg Assign Assign Assign Assign Assign"
  },
  {
    "library": "numpy",
    "name": "cc_test_cexpr",
    "source_code": "@_Cache.me\ndef cc_test_cexpr(self, cexpr, flags=[]):\n    self.dist_log('testing compiler expression', cexpr)\n    test_path = os.path.join(self.conf_tmp_path, 'npy_dist_test_cexpr.c')\n    with open(test_path, 'w') as fd:\n        fd.write(textwrap.dedent(f'               #if !({cexpr})\\n                   #error \"unsupported expression\"\\n               #endif\\n               int dummy;\\n            '))\n    test = self.dist_test(test_path, flags)\n    if not test:\n        self.dist_log('testing failed', stderr=True)\n    return test",
    "docstring": "Same as the above but supports compile-time expressions.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py",
    "ast_data": "FunctionDef name:cc_test_cexpr arg:self arg:cexpr arg:flags arguments arg arg arg Call Assign Call With Call Call Call Assign Call If Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "__rsub__",
    "source_code": "def __rsub__(self, other):\n    return subtract(other, self)",
    "docstring": "Subtract self from other, and return a new masked array.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:__rsub__ arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "reshard",
    "source_code": "def reshard(self, checkpoint_values: tensor.Tensor, shape_and_slice: str) -> tensor.Tensor:\n    return _shard_from_cpu_to_sc(checkpoint_values, shape_and_slice, self._to_shard_layout)",
    "docstring": "Reshards the checkpoint values according to the resharding plan. Args: checkpoint_values: The checkpoint values to be resharded. shape_and_slice: The shape and slice spec to be returned after resharding. Returns: The resharded tensor slice.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3_checkpoint_adapter.py",
    "ast_data": "FunctionDef name:reshard arg:self arg:checkpoint_values arg:shape_and_slice arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "join_hook",
    "source_code": "def join_hook(self, **kwargs):\n    return _ZeROJoinHook(self)",
    "docstring": "Return the ZeRO join hook. It enables training on uneven inputs by shadowing the collective communications in the optimizer step. Gradients must be properly set before this hook is called. Arguments: kwargs (dict): a :class: containing any keyword arguments to modify the behavior of the join hook at run time; all :class: instances sharing the same join context manager are forwarded the same value for `` is unused.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\optim\\zero_redundancy_optimizer.py",
    "ast_data": "FunctionDef name:join_hook arg:self arguments arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_bound_field",
    "source_code": "def get_bound_field(self, form, field_name):\n    bound_field_class = self.bound_field_class or form.bound_field_class or BoundField\n    return bound_field_class(form, self, field_name)",
    "docstring": "Return a BoundField instance that will be used when accessing the form field in a template.",
    "type": "method",
    "file_path": "django\\django\\forms\\fields.py",
    "ast_data": "FunctionDef name:get_bound_field arg:self arg:form arg:field_name arguments arg arg arg Assign BoolOp Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "CurveFilledAB",
    "source_code": "@_register_style(_style_list, name='<|-|>')\nclass CurveFilledAB(_Curve):\n    arrow = '<|-|>'",
    "docstring": "An arrow with filled triangle heads at both ends.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "ClassDef name:CurveFilledAB Assign Call"
  },
  {
    "library": "kornia",
    "name": "mean_iou_bbox",
    "source_code": "def mean_iou_bbox(boxes_1: torch.Tensor, boxes_2: torch.Tensor) -> torch.Tensor:\n    if not ((boxes_1[:, 2] - boxes_1[:, 0] > 0).all() or (boxes_1[:, 3] - boxes_1[:, 1] > 0).all()):\n        raise AssertionError('Boxes_1 does not follow (x1, y1, x2, y2) format.')\n    if not ((boxes_2[:, 2] - boxes_2[:, 0] > 0).all() or (boxes_2[:, 3] - boxes_2[:, 1] > 0).all()):\n        raise AssertionError('Boxes_2 does not follow (x1, y1, x2, y2) format.')\n    lower_bounds = torch.max(boxes_1[:, :2].unsqueeze(1), boxes_2[:, :2].unsqueeze(0))\n    upper_bounds = torch.min(boxes_1[:, 2:].unsqueeze(1), boxes_2[:, 2:].unsqueeze(0))\n    intersection_dims = torch.clamp(upper_bounds - lower_bounds, min=0)\n    intersection = intersection_dims[:, :, 0] * intersection_dims[:, :, 1]\n    areas_set_1 = (boxes_1[:, 2] - boxes_1[:, 0]) * (boxes_1[:, 3] - boxes_1[:, 1])\n    areas_set_2 = (boxes_2[:, 2] - boxes_2[:, 0]) * (boxes_2[:, 3] - boxes_2[:, 1])\n    union = areas_set_1.unsqueeze(1) + areas_set_2.unsqueeze(0) - intersection\n    return intersection / union",
    "docstring": "Compute the IoU of the cartesian product of two sets of boxes. Each box in each set shall be (x1, y1, x2, y2). Args: boxes_1: a tensor of bounding boxes in :math:. boxes_2: a tensor of bounding boxes in :math:. Returns: a tensor in dimensions :math:, representing the intersection of each of the boxes in set 1 with respect to each of the boxes in set 2. Example: >>> boxes_1 = torch.tensor([[40, 40, 60, 60], [30, 40, 50, 60]]) >>> boxes_2 = torch.tensor([[40, 50, 60, 70], [30, 40, 40, 50]]) >>> mean_iou_bbox(boxes_1, boxes_2) tensor([[0.3333, 0.0000], [0.1429, 0.2500]])",
    "type": "function",
    "file_path": "kornia\\kornia\\metrics\\mean_iou.py",
    "ast_data": "FunctionDef name:mean_iou_bbox arg:boxes_1 arg:boxes_2 arguments arg arg If BoolOp Call Compare Call Compare Raise Call If BoolOp Call Compare Call Compare Raise Call Assign Call Call Call Assign Call Call Call Assign Call Assign Assign Assign Assign Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "GammaRegressor",
    "source_code": "class GammaRegressor(_GeneralizedLinearRegressor):\n    _parameter_constraints: dict = {**_GeneralizedLinearRegressor._parameter_constraints}\n\n    def __init__(self, *, alpha=1.0, fit_intercept=True, solver='lbfgs', max_iter=100, tol=0.0001, warm_start=False, verbose=0):\n        super().__init__(alpha=alpha, fit_intercept=fit_intercept, solver=solver, max_iter=max_iter, tol=tol, warm_start=warm_start, verbose=verbose)\n\n    def _get_loss(self):\n        return HalfGammaLoss()",
    "docstring": "Generalized Linear Model with a Gamma distribution. This regressor uses the 'log' link function. Read more in the :ref:. .. versionadded:: 0.23 Parameters ---------- alpha : float, default=1 Constant that multiplies the L2 penalty term and determines the regularization strength. `Xalpha[0.0, inf)X @ coef_ + intercept_n_samplesn_featuresn_features[1, inf)`max{|g_j|, j = 1, ..., d} >> from sklearn import linear_model >>> clf = linear_model.GammaRegressor() >>> X = [[1, 2], [2, 3], [3, 4], [4, 3]] >>> y = [19, 26, 33, 30] >>> clf.fit(X, y) GammaRegressor() >>> clf.score(X, y) np.float64(0.773) >>> clf.coef_ array([0.073, 0.067]) >>> clf.intercept_ np.float64(2.896) >>> clf.predict([[1, 0], [2, 8]]) array([19.483, 35.795])",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_glm\\glm.py",
    "ast_data": "ClassDef name:GammaRegressor FunctionDef name:__init__ arg:self arguments arg arg arg arg arg arg arg arg Call Call FunctionDef name:_get_loss arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "import_classes",
    "source_code": "def import_classes(name: str, currmodule: str) -> list[type[Any]]:\n    target = None\n    if currmodule:\n        target = try_import(currmodule + '.' + name)\n    if target is None:\n        target = try_import(name)\n    if target is None:\n        raise InheritanceException('Could not import class or module %r specified for inheritance diagram' % name)\n    if inspect.isclass(target):\n        return [target]\n    elif inspect.ismodule(target):\n        return [cls for cls in target.__dict__.values() if inspect.isclass(cls) and cls.__module__ == target.__name__]\n    msg = f'{name!r} specified for inheritance diagram is not a class or module'\n    raise InheritanceException(msg)",
    "docstring": "Import a class using its fully-qualified *name*.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\ext\\inheritance_diagram.py",
    "ast_data": "FunctionDef name:import_classes arg:name arg:currmodule arguments arg arg Assign If Assign Call If Compare Assign Call If Compare Raise Call If Call Return return:yes If Call Return return:yes Call BoolOp Call Compare Assign Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_make_request",
    "source_code": "def _make_request(self, verb: str, endpoint: str, **kwargs: dict[str, Any]) -> requests.Response:\n    res = self._session.request(verb, urllib.parse.urljoin('https://api.github.com', endpoint), json=kwargs)\n    res.raise_for_status()\n    return res.json()",
    "docstring": "Helper method to make a request and raise an HTTPError if one occurred. Arguments: verb: The HTTP verb to use endpoint: The endpoint to make the request to **kwargs: The json that will be sent as the body of the request. Returns: a requests.Response object containing the response from the API. Raises: requests.exceptions.HTTPError",
    "type": "method",
    "file_path": "tensorflow\\third_party\\xla\\.github\\workflows\\github_api.py",
    "ast_data": "FunctionDef name:_make_request arg:self arg:verb arg:endpoint arguments arg arg arg arg Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "start_index",
    "source_code": "def start_index(self):\n    if self.paginator.count == 0:\n        return 0\n    return self.paginator.per_page * (self.number - 1) + 1",
    "docstring": "Return the 1-based index of the first object on this page, relative to total objects in the paginator.",
    "type": "method",
    "file_path": "django\\django\\core\\paginator.py",
    "ast_data": "FunctionDef name:start_index arg:self arguments arg If Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_load_saved_model",
    "source_code": "@convert_phase(Component.PREPARE_TF_MODEL, SubComponent.LOAD_SAVED_MODEL)\ndef _load_saved_model(self, saved_model_dir, saved_model_tags):\n    graph = _ops.Graph()\n    saved_model = _loader_impl.SavedModelLoader(saved_model_dir)\n    saved_model.load_graph(graph, tags=saved_model_tags)\n    meta_graph = saved_model.get_meta_graph_def_from_tags(saved_model_tags)\n    graph_def = meta_graph.graph_def\n    signature_def = meta_graph.signature_def[_signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]\n    input_tensors = [graph.get_tensor_by_name(signature_def.inputs[key].name) for key in signature_def.inputs]\n    output_tensors = [graph.get_tensor_by_name(signature_def.outputs[key].name) for key in signature_def.outputs]\n    return (graph_def, input_tensors, output_tensors)",
    "docstring": "Load graph_def from saved model with the default serving signature key. Args: saved_model_dir: Directory of the SavedModel. saved_model_tags: Set of tags identifying the MetaGraphDef within the SavedModel to analyze. Returns: graph_def: The loaded GraphDef. input_tensors: List of input tensors. output_tensors: List of output tensors.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "FunctionDef name:_load_saved_model arg:self arg:saved_model_dir arg:saved_model_tags arguments arg arg arg Assign Call Assign Call Call Assign Call Assign Assign Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "register_scale",
    "source_code": "def register_scale(scale_class):\n    _scale_mapping[scale_class.name] = scale_class",
    "docstring": "Register a new kind of scale. Parameters ---------- scale_class : subclass of The scale to register.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\scale.py",
    "ast_data": "FunctionDef name:register_scale arg:scale_class arguments arg Assign"
  },
  {
    "library": "pytorch",
    "name": "_to_ort_value",
    "source_code": "def _to_ort_value(tensor: torch.Tensor) -> ort.OrtValue:\n    import onnxruntime as ort\n    from torch.onnx._internal.exporter import _core\n    if tensor.dtype == torch.bfloat16 or tensor.dtype in _NP_UNSUPPORTED_DTYPES_8BIT:\n        if hasattr(ort.OrtValue, 'ortvalue_from_numpy_with_onnx_type'):\n            if tensor.dtype == torch.bfloat16:\n                uint_type = torch.uint16\n            else:\n                uint_type = torch.uint8\n            onnx_type = _core.torch_dtype_to_onnx_dtype(tensor.dtype)\n            tensor = tensor.contiguous()\n            return ort.OrtValue.ortvalue_from_numpy_with_onnx_type(tensor.view(uint_type).numpy(force=True), onnx_element_type=onnx_type)\n        raise RuntimeError(f\"Failed to convert tensor of type '{tensor.dtype}' to OrtValue. Please ensure that ONNX Runtime is built with DLPack support or is the latest version\")\n    return ort.OrtValue.ortvalue_from_numpy(tensor.numpy(force=True))",
    "docstring": "Convert a PyTorch tensor to an ONNX Runtime OrtValue.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_onnx_program.py",
    "ast_data": "FunctionDef name:_to_ort_value arg:tensor arguments arg If BoolOp Compare Compare If Call If Compare Assign Assign Assign Call Assign Call Return return:yes Call Call Call Raise Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_run_graph",
    "source_code": "def _run_graph(self, device, output_shape, variable, num_outputs, axis):\n    graph = ops.Graph()\n    with graph.as_default():\n        if not variable:\n            if axis == 0:\n                input_shape = [output_shape[0] * num_outputs, output_shape[1]]\n                sizes = [output_shape[0] for _ in range(num_outputs)]\n            else:\n                input_shape = [output_shape[0], output_shape[1] * num_outputs]\n                sizes = [output_shape[1] for _ in range(num_outputs)]\n        else:\n            sizes = np.random.randint(low=max(1, output_shape[axis] - 2), high=output_shape[axis] + 2, size=num_outputs)\n            total_size = np.sum(sizes)\n            if axis == 0:\n                input_shape = [total_size, output_shape[1]]\n            else:\n                input_shape = [output_shape[0], total_size]\n        outputs = build_graph(device, input_shape, sizes, axis)\n    config = config_pb2.ConfigProto(graph_options=config_pb2.GraphOptions(optimizer_options=config_pb2.OptimizerOptions(opt_level=config_pb2.OptimizerOptions.L0)))\n    with session_lib.Session(graph=graph, config=config) as session:\n        logging.set_verbosity('info')\n        variables.global_variables_initializer().run()\n        bench = benchmark.TensorFlowBenchmark()\n        bench.run_op_benchmark(session, outputs, mbs=input_shape[0] * input_shape[1] * 4 * 2 * 100 / 1000000.0, extras={'input_shape': input_shape, 'variable': variable, 'axis': axis})",
    "docstring": "Run the graph and print its execution time. Args: device: string, the device to run on. output_shape: shape of each output tensors. variable: whether or not the output shape should be fixed num_outputs: the number of outputs to split the input into axis: axis to be split Returns: The duration of the run in seconds.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\split_benchmark.py",
    "ast_data": "FunctionDef name:_run_graph arg:self arg:device arg:output_shape arg:variable arg:num_outputs arg:axis arguments arg arg arg arg arg arg Assign Call With Call If If Compare Assign Assign Call Assign Assign Call Assign Call Call Assign Call If Compare Assign Assign Assign Call Assign Call Call Call With Call Call Call Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "strategy_supports_no_merge_call",
    "source_code": "def strategy_supports_no_merge_call():\n    if not distribute_lib.has_strategy():\n        return True\n    strategy = distribute_lib.get_strategy()\n    return not strategy.extended._use_merge_call()",
    "docstring": "Returns if the current Strategy can operate in pure replica context.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\utils.py",
    "ast_data": "FunctionDef name:strategy_supports_no_merge_call arguments If Call Return return:yes Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_deserialize_graph_module",
    "source_code": "def _deserialize_graph_module(forward, body: dict[Any, Any], graph_module_cls=None) -> torch.nn.Module:\n    _CodeOnlyModule.forward = forward\n    tracer_cls = body.get('_tracer_cls')\n    if tracer_cls is None:\n        from ._symbolic_trace import Tracer\n        tracer_cls = Tracer\n    graphmodule_cls_name = body.get('_graphmodule_cls_name', 'GraphModule')\n    cls_tracer: Any = tracer_cls\n\n    class KeepModules(cls_tracer):\n\n        def is_leaf_module(self, _: torch.nn.Module, __: str) -> bool:\n            return True\n    com = _CodeOnlyModule(body)\n    tracer_extras = body.get('_tracer_extras', {})\n    graph = KeepModules().trace(com, **tracer_extras)\n    graph._tracer_cls = tracer_cls\n    from ._lazy_graph_module import _make_graph_module\n    gm = _make_graph_module(com, graph, class_name=graphmodule_cls_name, graph_module_cls=graph_module_cls)\n    for k, v in body.items():\n        if not hasattr(gm, k):\n            setattr(gm, k, v)\n    return gm",
    "docstring": "Deserialize a GraphModule given the dictionary of the original module, using the code to reconstruct the graph. We delete the actual graph before saving the dictionary so that changes to the in-memory graph format do not get serialized.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\graph_module.py",
    "ast_data": "FunctionDef name:_deserialize_graph_module arg:forward arg:body arg:graph_module_cls arguments arg arg arg Assign Assign Call If Compare Assign Assign Call ClassDef name:KeepModules FunctionDef name:is_leaf_module arg:self arg:_ arg:__ arguments arg arg arg Return return:yes Assign Call Assign Call Assign Call Call Assign Assign Call For Call If Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_convert_file_name_tensor_to_string",
    "source_code": "def _convert_file_name_tensor_to_string(tensor):\n    output = tensor\n    if tensor_util.is_tf_type(output):\n        if context.executing_eagerly():\n            output = compat.as_str(output.numpy())\n    else:\n        output = compat.as_str(output)\n    return output",
    "docstring": "Convert file name tensor to string.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:_convert_file_name_tensor_to_string arg:tensor arguments arg Assign If Call If Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "benchmark_generation_time",
    "source_code": "def benchmark_generation_time(output_token_len):\n    timestamp_start = datetime.datetime.now()\n    model_ids = model.generate(prompt, device=device, output_len=output_token_len)\n    timestamp_end = datetime.datetime.now()\n    timer_delta = timestamp_end - timestamp_start\n    if output_token_len == OUTPUT_TOKEN_LEN:\n        print(model_ids)\n    return timer_delta.total_seconds() * 1000",
    "docstring": "Benchmark generation time given output token length.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\xla\\backends\\cpu\\benchmarks\\e2e\\gemma2\\pytorch_2b\\benchmark.py",
    "ast_data": "FunctionDef name:benchmark_generation_time arg:output_token_len arguments arg Assign Call Assign Call Assign Call Assign If Compare Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "Alpine01",
    "source_code": "class Alpine01(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n        self.global_optimum = [[0 for _ in range(self.N)]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return sum(abs(x * sin(x) + 0.1 * x))",
    "docstring": "Alpine01 objective function. The Alpine01 [1]_ global optimization problem is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Alpine01}}(x) = \\sum_{i=1}^{n} \\lvert {x_i \\sin \\left( x_i \\right) + 0.1 x_i} \\rvert Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_A.py",
    "ast_data": "ClassDef name:Alpine01 Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "__eq__",
    "source_code": "def __eq__(self, other):\n    if isinstance(other, Envelope):\n        return self.min_x == other.min_x and self.min_y == other.min_y and (self.max_x == other.max_x) and (self.max_y == other.max_y)\n    elif isinstance(other, tuple) and len(other) == 4:\n        return self.min_x == other[0] and self.min_y == other[1] and (self.max_x == other[2]) and (self.max_y == other[3])\n    else:\n        raise GDALException('Equivalence testing only works with other Envelopes.')",
    "docstring": "Return True if the envelopes are equivalent; can compare against other Envelopes and 4-tuples.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\envelope.py",
    "ast_data": "FunctionDef name:__eq__ arg:self arg:other arguments arg arg If Call Return return:yes BoolOp Compare Compare Compare Compare If BoolOp Call Compare Call Return return:yes BoolOp Compare Compare Compare Compare Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "default_device",
    "source_code": "def default_device(self):\n    return cuda.Device(0)",
    "docstring": "The default device used for new CuPy arrays. See Also -------- __array_namespace_info__.capabilities, __array_namespace_info__.default_dtypes, __array_namespace_info__.dtypes, __array_namespace_info__.devices Returns ------- device : Device The default device used for new CuPy arrays. Examples -------- >>> info = xp.__array_namespace_info__() >>> info.default_device() Device(0) Notes ----- This method returns the static default device when CuPy is initialized. However, the *current* device used by creation functions (`` etc.) can be changed globally or with a context manager. See Also --------",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\cupy\\_info.py",
    "ast_data": "FunctionDef name:default_device arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "from_numpy",
    "source_code": "@classmethod\ndef from_numpy(cls, data: np_ndarray, color_space: ColorSpace=ColorSpace.RGB, channels_order: ChannelsOrder=ChannelsOrder.CHANNELS_LAST) -> Image:\n    if channels_order == ChannelsOrder.CHANNELS_LAST:\n        image_size = ImageSize(height=data.shape[0], width=data.shape[1])\n        channels = data.shape[2]\n    elif channels_order == ChannelsOrder.CHANNELS_FIRST:\n        image_size = ImageSize(height=data.shape[1], width=data.shape[2])\n        channels = data.shape[0]\n    else:\n        raise ValueError('channels_order must be either `CHANNELS_LAST` or `CHANNELS_FIRST`')\n    pixel_format = PixelFormat(color_space=color_space, bit_depth=data.itemsize * 8)\n    layout = ImageLayout(image_size=image_size, channels=channels, channels_order=channels_order)\n    return cls(torch.from_numpy(data), pixel_format, layout)",
    "docstring": "Construct an image tensor from a numpy array. Args: data: a numpy array containing the image data. color_space: the color space of the image. pixel_format: the pixel format of the image. channels_order: what dimension the channels are in the image tensor. Example: >>> data = np.ones((4, 5, 3), dtype=np.uint8) # HxWxC >>> img = Image.from_numpy(data, color_space=ColorSpace.RGB) >>> assert img.channels == 3 >>> assert img.width == 5 >>> assert img.height == 4",
    "type": "method",
    "file_path": "kornia\\kornia\\image\\image.py",
    "ast_data": "FunctionDef name:from_numpy arg:cls arg:data arg:color_space arg:channels_order arguments arg arg arg arg If Compare Assign Call Assign If Compare Assign Call Assign Raise Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "fingerprint",
    "source_code": "def fingerprint(self):\n    return gen_dataset_ops.dataset_fingerprint(self._variant_tensor)",
    "docstring": "Computes the fingerprint of this . If two datasets have the same fingerprint, it is guaranteed that they would produce identical elements as long as the content of the upstream input files does not change and they produce data deterministically. However, two datasets producing identical values does not always mean they would have the same fingerprint due to different graph constructs. In other words, if two datasets have different fingerprints, they could still produce identical values. Returns: A scalar of type .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:fingerprint arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "assert_and_get_unique_device",
    "source_code": "def assert_and_get_unique_device(module: torch.nn.Module) -> Any:\n    return _assert_and_get_unique_device(module)",
    "docstring": "Returns the unique device for a module, or None if no device is found. Throws an error if multiple devices are detected.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\utils.py",
    "ast_data": "FunctionDef name:assert_and_get_unique_device arg:module arguments arg Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "Config",
    "source_code": "class Config(dict):\n    defaults = {}\n    environments = {}\n    namespaces = NamespaceSet()\n\n    def __init__(self, file=None, **kwargs):\n        self.reset()\n        if file is not None:\n            self.update(file)\n        if kwargs:\n            self.update(kwargs)\n\n    def reset(self):\n        self.clear()\n        dict.update(self, self.defaults)\n\n    def update(self, config):\n        self._apply(Parser.load(config))\n\n    def _apply(self, config):\n        which_env = config.get('environment')\n        if which_env:\n            env = self.environments[which_env]\n            for k in env:\n                if k not in config:\n                    config[k] = env[k]\n        dict.update(self, config)\n        self.namespaces(config)\n\n    def __setitem__(self, k, v):\n        dict.__setitem__(self, k, v)\n        self.namespaces({k: v})",
    "docstring": "A dict-like set of configuration data, with defaults and namespaces. May take a file, filename, or dict.",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\lib\\reprconf.py",
    "ast_data": "ClassDef name:Config Assign Assign Assign Call FunctionDef name:__init__ arg:self arg:file arguments arg arg arg Call If Compare Call If Call FunctionDef name:reset arg:self arguments arg Call Call FunctionDef name:update arg:self arg:config arguments arg arg Call Call FunctionDef name:_apply arg:self arg:config arguments arg arg Assign Call If Assign For If Compare Assign Call Call FunctionDef name:__setitem__ arg:self arg:k arg:v arguments arg arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "GlorotNormal",
    "source_code": "class GlorotNormal(VarianceScaling):\n\n    def __init__(self, seed=None):\n        super(GlorotNormal, self).__init__(scale=1.0, mode='fan_avg', distribution='truncated_normal', seed=seed)\n\n    def get_config(self):\n        return {'seed': self.seed}",
    "docstring": "The Glorot normal initializer, also called Xavier normal initializer. Initializers allow you to pre-specify an initialization strategy, encoded in the Initializer object, without knowing the shape and dtype of the variable being initialized. Draws samples from a truncated normal distribution centered on 0 with where is the number of input units in the weight tensor and is the number of output units in the weight tensor. Examples: >>> def make_variables(k, initializer): ... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)), ... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32))) >>> v1, v2 = make_variables(3, tf.initializers.GlorotNormal()) >>> v1 >> v2 >> make_variables(4, tf.initializers.RandomNormal()) (<tf.Variable ... shape=(4, 4) dtype=float32... <tf.Variable ... shape=(4, 4, 4) dtype=float32... Args: seed: A Python integer. Used to create random seeds. See for behavior. References: [Glorot et al., 2010]( ([pdf](",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops_v2.py",
    "ast_data": "ClassDef name:GlorotNormal FunctionDef name:__init__ arg:self arg:seed arguments arg arg Call Call FunctionDef name:get_config arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ModelAverager",
    "source_code": "class ModelAverager(ABC):\n\n    def __init__(self, process_group: Optional[dist.ProcessGroup]=None):\n        self.process_group = process_group if process_group is not None else _not_none(dist.group.WORLD)\n        self.step = 0\n\n    @abstractmethod\n    def average_parameters(self, params):\n        raise NotImplementedError",
    "docstring": "Base class for all model averagers. Args: process_group: The process group to be used for all-reduce. If `torch.distributed.init_process_group`)",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\model_averaging\\averagers.py",
    "ast_data": "ClassDef name:ModelAverager FunctionDef name:__init__ arg:self arg:process_group arguments arg arg Assign Compare Call Assign FunctionDef name:average_parameters arg:self arg:params arguments arg arg Raise"
  },
  {
    "library": "pytorch",
    "name": "double",
    "source_code": "def double(self):\n    return self._to(torch.double)",
    "docstring": "Casts this storage to double type.",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:double arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "local_variables",
    "source_code": "@property\ndef local_variables(self):\n    return []",
    "docstring": "Returns the list of global variables created by the Template.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\template.py",
    "ast_data": "FunctionDef name:local_variables arg:self arguments arg Return return:no"
  },
  {
    "library": "pandas",
    "name": "dtype",
    "source_code": "@property\n@abstractmethod\ndef dtype(self) -> tuple[DtypeKind, int, str, str]:\n    pass",
    "docstring": "Dtype description as a tuple ``. - Data types not included: complex, Arrow-style null, binary, decimal, and nested (list, struct, map, union) dtypes.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\interchange\\dataframe_protocol.py",
    "ast_data": "FunctionDef name:dtype arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "step",
    "source_code": "@_use_grad_for_differentiable\ndef step(self, closure=None):\n    self._cuda_graph_capture_health_check()\n    loss = None\n    if closure is not None:\n        with torch.enable_grad():\n            loss = closure()\n    for group in self.param_groups:\n        params_with_grad: list[Tensor] = []\n        grads: list[Tensor] = []\n        exp_avgs: list[Tensor] = []\n        exp_avg_sqs: list[Tensor] = []\n        mu_products: list[Tensor] = []\n        state_steps: list[Tensor] = []\n        beta1, beta2 = cast(tuple[float, float], group['betas'])\n        has_complex = self._init_group(group, params_with_grad, grads, exp_avgs, exp_avg_sqs, mu_products, state_steps)\n        nadam(params_with_grad, grads, exp_avgs, exp_avg_sqs, mu_products, state_steps, beta1=beta1, beta2=beta2, lr=group['lr'], weight_decay=group['weight_decay'], momentum_decay=group['momentum_decay'], eps=group['eps'], maximize=group['maximize'], decoupled_weight_decay=group['decoupled_weight_decay'], foreach=group['foreach'], capturable=group['capturable'], differentiable=group['differentiable'], has_complex=has_complex)\n    return loss",
    "docstring": "Perform a single optimization step. Args: closure (Callable, optional): A closure that reevaluates the model and returns the loss.",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\nadam.py",
    "ast_data": "FunctionDef name:step arg:self arg:closure arguments arg arg Call Assign If Compare With Call Assign Call For Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "HeUniform",
    "source_code": "class HeUniform(VarianceScaling):\n\n    def __init__(self, seed=None):\n        super(HeUniform, self).__init__(scale=2.0, mode='fan_in', distribution='uniform', seed=seed)\n\n    def get_config(self):\n        return {'seed': self.seed}",
    "docstring": "He uniform variance scaling initializer. Also available via the shortcut function . Draws samples from a uniform distribution within , where ( is the number of input units in the weight tensor). Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.HeUniform() >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.HeUniform() >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) Args: seed: A Python integer. An initializer created with a given seed will always produce the same random tensor for a given shape and dtype. References: [He et al., 2015]( # pylint: disable=line-too-long ([pdf](",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\initializers\\initializers_v2.py",
    "ast_data": "ClassDef name:HeUniform FunctionDef name:__init__ arg:self arg:seed arguments arg arg Call Call FunctionDef name:get_config arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "relu_inference_rule",
    "source_code": "@register_inference_rule(torch.nn.ReLU)\ndef relu_inference_rule(n: Node, module_instance):\n    assert isinstance(n.args[0], Node)\n    if n.args[0].type == Dyn and isinstance(n.type, TensorType):\n        n.args[0].type = expand_to_tensor_dim(n.args[0].type, len(n.type.__args__))\n    if isinstance(n.args[0].type, TensorType):\n        n.type = get_greatest_upper_bound(n.args[0].type, n.type)\n    return n.type",
    "docstring": "Input and output shapes should be equal.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\graph_gradual_typechecker.py",
    "ast_data": "FunctionDef name:relu_inference_rule arg:n arg:module_instance arguments arg arg Call If BoolOp Compare Call Assign Call Call If Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "shutdown",
    "source_code": "def shutdown(self) -> None:\n    nodes = []\n    for roots in self.roots.values():\n        nodes.extend(roots)\n    while nodes:\n        node = nodes.pop()\n        for children in node.children.values():\n            nodes.extend(children)\n        node.remove_node_cached_tensors()\n        node.graph = None\n    self.graph = None\n    self.roots = None\n    self.current_node = None",
    "docstring": "Remove all cached tensors in all nodes. Because cached tensors can hold gradients which in turn might reference a backward which invokes a CUDA Graph Node, we have to manually clear them on shutdown to avoid a reference cycle.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py",
    "ast_data": "FunctionDef name:shutdown arg:self arguments arg Assign For Call Call While Assign Call For Call Call Call Assign Assign Assign Assign"
  },
  {
    "library": "pandas",
    "name": "_derive_colors",
    "source_code": "def _derive_colors(*, color: Color | Collection[Color] | None, colormap: str | Colormap | None, color_type: str, num_colors: int) -> list[Color]:\n    if color is None and colormap is not None:\n        return _get_colors_from_colormap(colormap, num_colors=num_colors)\n    elif color is not None:\n        if colormap is not None:\n            warnings.warn(\"'color' and 'colormap' cannot be used simultaneously. Using 'color'\", stacklevel=find_stack_level())\n        return _get_colors_from_color(color)\n    else:\n        return _get_colors_from_color_type(color_type, num_colors=num_colors)",
    "docstring": "Derive colors from either , or inputs. Get a list of colors either from , or from , or from (if both and are None). Parameters ---------- color : str or sequence, optional Color(s) to be used for deriving sequence of colors. Can be either be a single color (single color string, or sequence of floats representing a single color), or a sequence of colors. colormap : :py:class:, optional Matplotlib colormap. When provided, the resulting colors will be derived from the colormap. color_type : {\"default\", \"random\"}, optional Type of colors to derive. Used if provided and are None. Ignored if either or colormapcolorcolor` will override.",
    "type": "function",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\style.py",
    "ast_data": "FunctionDef name:_derive_colors arguments arg arg arg arg If BoolOp Compare Compare Return return:yes Call If Compare If Compare Call Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "read_body",
    "source_code": "async def read_body(self, receive):\n    body_file = tempfile.SpooledTemporaryFile(max_size=settings.FILE_UPLOAD_MAX_MEMORY_SIZE, mode='w+b')\n    while True:\n        message = await receive()\n        if message['type'] == 'http.disconnect':\n            body_file.close()\n            raise RequestAborted()\n        if 'body' in message:\n            on_disk = getattr(body_file, '_rolled', False)\n            if on_disk:\n                async_write = sync_to_async(body_file.write, thread_sensitive=False)\n                await async_write(message['body'])\n            else:\n                body_file.write(message['body'])\n        if not message.get('more_body', False):\n            break\n    body_file.seek(0)\n    return body_file",
    "docstring": "Reads an HTTP body from an ASGI connection.",
    "type": "method",
    "file_path": "django\\django\\core\\handlers\\asgi.py",
    "ast_data": "AsyncFunctionDef name:read_body arg:self arg:receive arguments arg arg Assign Call While Assign Call If Compare Call Raise Call If Compare Assign Call If Assign Call Call Call If Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "sharey",
    "source_code": "def sharey(self, other):\n    _api.check_isinstance(_AxesBase, other=other)\n    if self._sharey is not None and other is not self._sharey:\n        raise ValueError('y-axis is already shared')\n    self._shared_axes['y'].join(self, other)\n    self._sharey = other\n    self.yaxis.major = other.yaxis.major\n    self.yaxis.minor = other.yaxis.minor\n    y0, y1 = other.get_ylim()\n    self.set_ylim(y0, y1, emit=False, auto=other.get_autoscaley_on())\n    self.yaxis._scale = other.yaxis._scale",
    "docstring": "Share the y-axis with *other*. This is equivalent to passing `` when constructing the Axes, and cannot be used if the y-axis is already being shared with another Axes. Note that it is not possible to unshare axes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:sharey arg:self arg:other arguments arg arg Call If BoolOp Compare Compare Raise Call Call Assign Assign Assign Assign Call Call Call Assign"
  },
  {
    "library": "uvicorn",
    "name": "timeout_keep_alive_handler",
    "source_code": "def timeout_keep_alive_handler(self) -> None:\n    if not self.transport.is_closing():\n        self.transport.close()",
    "docstring": "Called on a keep-alive connection if no new data is received after a short delay.",
    "type": "method",
    "file_path": "uvicorn\\uvicorn\\protocols\\http\\httptools_impl.py",
    "ast_data": "FunctionDef name:timeout_keep_alive_handler arg:self arguments arg If Call Call"
  },
  {
    "library": "tensorflow",
    "name": "KerasHistory",
    "source_code": "class KerasHistory(collections.namedtuple('KerasHistory', ['layer', 'node_index', 'tensor_index'])):\n    __slots__ = ()",
    "docstring": "Tracks the Layer call that created a Tensor, for Keras Graph Networks. During construction of Keras Graph Networks, this metadata is added to each Tensor produced as the output of a Layer, starting with an . This allows Keras to track how each Tensor was produced, and this information is later retraced by the class to reconstruct the Keras Graph Network. Attributes: layer: The Layer that produced the Tensor. node_index: The specific call to the Layer that produced this Tensor. Layers can be called multiple times in order to share weights. A new node is created every time a Tensor is called. tensor_index: The output index for this Tensor. Always zero if the Layer that produced this Tensor only has one output. Nested structures of Tensors are deterministically assigned an index via .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py",
    "ast_data": "ClassDef name:KerasHistory Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "smart_constant_value",
    "source_code": "def smart_constant_value(pred):\n    if isinstance(pred, tensor.Tensor):\n        pred_value = tensor_util.constant_value(pred)\n        if pred_value is None:\n            pred_value = tensor_util.try_evaluate_constant(pred)\n    elif pred in {0, 1}:\n        pred_value = bool(pred)\n    elif isinstance(pred, bool):\n        pred_value = pred\n    else:\n        raise TypeError(f'Argument `pred` must be a Tensor, or a Python bool, or 1 or 0. Received: pred={pred} of type {type(pred).__name__}')\n    return pred_value",
    "docstring": "Return the bool value for , or None if had a dynamic value. Args: pred: A scalar, either a Python bool or tensor. Returns: True or False if has a constant boolean value, None otherwise. Raises: TypeError: If is not a Tensor or bool.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\smart_cond.py",
    "ast_data": "FunctionDef name:smart_constant_value arg:pred arguments arg If Call Assign Call If Compare Assign Call If Compare Assign Call If Call Assign Raise Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, pad=0.3):\n    self.pad = pad\n    super().__init__()",
    "docstring": "The arguments must be floats and have default values. Parameters ---------- pad : float amount of padding",
    "type": "method",
    "file_path": "matplotlib\\galleries\\users_explain\\text\\annotations.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:pad arguments arg arg Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "l1_l2",
    "source_code": "def l1_l2(l1=0.01, l2=0.01):\n    return L1L2(l1=l1, l2=l2)",
    "docstring": "Create a regularizer that applies both L1 and L2 penalties. The L1 regularization penalty is computed as: The L2 regularization penalty is computed as: Args: l1: Float; L1 regularization factor. l2: Float; L2 regularization factor. Returns: An L1L2 Regularizer with the given regularization factors.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\regularizers.py",
    "ast_data": "FunctionDef name:l1_l2 arg:l1 arg:l2 arguments arg arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "hermepow",
    "source_code": "def hermepow(c, pow, maxpower=16):\n    return pu._pow(hermemul, c, pow, maxpower)",
    "docstring": "Raise a Hermite series to a power. Returns the Hermite series raised to the power . The argument is a sequence of coefficients ordered from low to high. i.e., [1,2,3] is the series `` Parameters ---------- c : array_like 1-D array of Hermite series coefficients ordered from low to high. pow : integer Power to which the series will be raised maxpower : integer, optional Maximum power allowed. This is mainly to limit growth of the series to unmanageable size. Default is 16 Returns ------- coef : ndarray Hermite series of power. See Also -------- hermeadd, hermesub, hermemulx, hermemul, hermediv Examples -------- >>> from numpy.polynomial.hermite_e import hermepow >>> hermepow([1, 2, 3], 2) array([23., 28., 46., 12., 9.])",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\hermite_e.py",
    "ast_data": "FunctionDef name:hermepow arg:c arg:pow arg:maxpower arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_do_batch_all_reduce",
    "source_code": "def _do_batch_all_reduce(self, reduce_op, dense_values):\n    logging.log_first_n(logging.INFO, 'batch_all_reduce: %d all-reduces with algorithm = %s, num_packs = %d' % (len(dense_values), self._all_reduce_alg, self._num_packs), 10)\n    destinations = dense_values[0]._devices\n    grouped = _group_value_by_device(dense_values)\n    device_grad_packs, tensor_packer = _pack_tensors(grouped, self._num_packs)\n    if self._all_reduce_alg == 'nccl':\n        reduced = cross_device_utils.aggregate_gradients_using_nccl(device_grad_packs)\n    else:\n        reduced = cross_device_utils.aggregate_gradients_using_hierarchical_copy(destinations, device_grad_packs)\n    reduced = _unpack_tensors(reduced, tensor_packer)\n    return _ungroup_and_make_mirrored(reduced, dense_values[0], reduce_op)",
    "docstring": "Run batch all-reduces.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_ops.py",
    "ast_data": "FunctionDef name:_do_batch_all_reduce arg:self arg:reduce_op arg:dense_values arguments arg arg arg Call Call Assign Assign Call Assign Call If Compare Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "ScatterDimensionNumbers",
    "source_code": "class ScatterDimensionNumbers:\n    __slots__ = ('update_window_dims', 'inserted_window_dims', 'scatter_dims_to_operand_dims', 'index_vector_dim')\n\n    def __init__(self):\n        self.update_window_dims = []\n        self.inserted_window_dims = []\n        self.scatter_dims_to_operand_dims = []\n        self.index_vector_dim = 0",
    "docstring": "Python representation of a xla.ScatterDimensionNumbers protobuf.",
    "type": "class",
    "file_path": "tensorflow\\third_party\\xla\\xla\\python\\xla_client.py",
    "ast_data": "ClassDef name:ScatterDimensionNumbers Assign FunctionDef name:__init__ arg:self arguments arg Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "clear",
    "source_code": "def clear(self, name=None):\n    if name is None:\n        name = '%s_clear' % self._name\n    return self._clear_fn(shared_name=self._name, name=name, dtypes=self._dtypes, capacity=self._capacity, memory_limit=self._memory_limit)",
    "docstring": "Clears the staging area. Args: name: A name for the operation (optional) Returns: The created op",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:clear arg:self arg:name arguments arg arg If Compare Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "rcdefaults",
    "source_code": "def rcdefaults():\n    with _api.suppress_matplotlib_deprecation_warning():\n        from .style.core import STYLE_BLACKLIST\n        rcParams.clear()\n        rcParams.update({k: v for k, v in rcParamsDefault.items() if k not in STYLE_BLACKLIST})",
    "docstring": "Restore the from Matplotlib's internal default style. Style-blacklisted (defined in `.rcParams` to restore the default style.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\__init__.py",
    "ast_data": "FunctionDef name:rcdefaults arguments With Call Call Call Call Compare"
  },
  {
    "library": "django",
    "name": "ConnectionProxy",
    "source_code": "class ConnectionProxy:\n\n    def __init__(self, connections, alias):\n        self.__dict__['_connections'] = connections\n        self.__dict__['_alias'] = alias\n\n    def __getattr__(self, item):\n        return getattr(self._connections[self._alias], item)\n\n    def __setattr__(self, name, value):\n        return setattr(self._connections[self._alias], name, value)\n\n    def __delattr__(self, name):\n        return delattr(self._connections[self._alias], name)\n\n    def __contains__(self, key):\n        return key in self._connections[self._alias]\n\n    def __eq__(self, other):\n        return self._connections[self._alias] == other",
    "docstring": "Proxy for accessing a connection object's attributes.",
    "type": "class",
    "file_path": "django\\django\\utils\\connection.py",
    "ast_data": "ClassDef name:ConnectionProxy FunctionDef name:__init__ arg:self arg:connections arg:alias arguments arg arg arg Assign Assign FunctionDef name:__getattr__ arg:self arg:item arguments arg arg Return return:yes Call FunctionDef name:__setattr__ arg:self arg:name arg:value arguments arg arg arg Return return:yes Call FunctionDef name:__delattr__ arg:self arg:name arguments arg arg Return return:yes Call FunctionDef name:__contains__ arg:self arg:key arguments arg arg Return return:yes Compare FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "_infer_hints_allowing_override",
    "source_code": "def _infer_hints_allowing_override(op1, op2, hints):\n    hints = hints or _Hints()\n    if hints.is_self_adjoint is None:\n        is_self_adjoint = op1.is_self_adjoint and op2.is_self_adjoint\n    else:\n        is_self_adjoint = hints.is_self_adjoint\n    if hints.is_positive_definite is None:\n        is_positive_definite = op1.is_positive_definite and op2.is_positive_definite\n    else:\n        is_positive_definite = hints.is_positive_definite\n    if is_positive_definite and hints.is_positive_definite is None:\n        is_non_singular = True\n    else:\n        is_non_singular = hints.is_non_singular\n    return _Hints(is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite)",
    "docstring": "Infer hints from op1 and op2. hints argument is an override. Args: op1: LinearOperator op2: LinearOperator hints: _Hints object holding \"is_X\" boolean hints to use for returned operator. If some hint is None, try to set using op1 and op2. If the hint is provided, ignore op1 and op2 hints. This allows an override of previous hints, but does not allow forbidden hints (e.g. you still cannot say a real diagonal operator is not self-adjoint. Returns: _Hints object.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_addition.py",
    "ast_data": "FunctionDef name:_infer_hints_allowing_override arg:op1 arg:op2 arg:hints arguments arg arg arg Assign BoolOp Call If Compare Assign BoolOp Assign If Compare Assign BoolOp Assign If BoolOp Compare Assign Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "make_train_function",
    "source_code": "def make_train_function(self):\n    if self.train_function is not None:\n        return self.train_function\n\n    def step_function(model, iterator):\n\n        def run_step(data):\n            outputs = model.train_step(data)\n            with ops.control_dependencies(_minimum_control_deps(outputs)):\n                model._train_counter.assign_add(1)\n            return outputs\n        data = next(iterator)\n        outputs = model.distribute_strategy.run(run_step, args=(data,))\n        outputs = reduce_per_replica(outputs, self.distribute_strategy, reduction='first')\n        write_scalar_summaries(outputs, step=model._train_counter)\n        return outputs\n    if self._steps_per_execution.numpy().item() == 1:\n\n        def train_function(iterator):\n            return step_function(self, iterator)\n    else:\n\n        def train_function(iterator):\n            for _ in math_ops.range(self._steps_per_execution):\n                outputs = step_function(self, iterator)\n            return outputs\n    if not self.run_eagerly:\n        train_function = def_function.function(train_function, experimental_relax_shapes=True)\n        self.train_tf_function = train_function\n    self.train_function = train_function\n    if self._cluster_coordinator:\n        self.train_function = lambda iterator: self._cluster_coordinator.schedule(train_function, args=(iterator,))\n    return self.train_function",
    "docstring": "Creates a function that executes one step of training. This method can be overridden to support custom training logic. This method is called by and . Typically, this method directly controls and settings, and delegates the actual training logic to . This function is cached the first time or is called. The cache is cleared whenever is called. Returns: Function. The function created by this method should accept a , and return a containing values that will be passed to , such as .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py",
    "ast_data": "FunctionDef name:make_train_function arg:self arguments arg If Compare Return return:yes FunctionDef name:step_function arg:model arg:iterator arguments arg arg FunctionDef name:run_step arg:data arguments arg Assign Call With Call Call Call Return return:yes Assign Call Assign Call Assign Call Call Return return:yes If Compare Call Call FunctionDef name:train_function arg:iterator arguments arg Return return:yes Call FunctionDef name:train_function arg:iterator arguments arg For Call Assign Call Return return:yes If Assign Call Assign Assign If Assign arguments arg Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "advance_roffset",
    "source_code": "def advance_roffset(self, symt: SymT) -> sympy.Expr:\n    rblock = TritonSymbols.block_sizes[symt]\n    advance = [self.replace_offset(offset, rblock, symt) - self.replace_offset(offset, sympy.S.Zero, symt) for offset in self.offsets]\n    return advance",
    "docstring": "Codegen string to pass to tl.advance(name, ...). Advance is the difference between offsets in each loop iteration. To compute it, we replace rN_offset with multiples of RN_BLOCK. Since we expect rN_offset to vary in range(0, rN_numel, RN_BLOCK), the first iteration has rN_offset=0, while the second has rN_offset=RN_BLOCK.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py",
    "ast_data": "FunctionDef name:advance_roffset arg:self arg:symt arguments arg arg Assign Assign Call Call Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "__copy__",
    "source_code": "@abc.abstractmethod\ndef __copy__(self) -> Ed25519PrivateKey:\n    pass",
    "docstring": "Returns a copy.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ed25519.py",
    "ast_data": "FunctionDef name:__copy__ arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "_assert_and_get_unique_device",
    "source_code": "def _assert_and_get_unique_device(module: torch.nn.Module) -> Any:\n    devices = {p.device for p in module.parameters()} | {p.device for p in module.buffers()}\n    '\\n    As a temp workaround for AIMP HHC publish we added CPU check.remove it later. T163614564\\n    '\n    if {torch.device('cpu'), torch.device('meta')} == devices:\n        warnings.warn(\"Both 'meta' and 'cpu' are present in the list of devices. Module can have one device. We Select 'cpu'.\")\n        devices = {torch.device('cpu')}\n    ''\n    assert len(devices) <= 1, f'prepare only works with cpu or single-device CUDA modules, but got devices {devices}'\n    device = next(iter(devices)) if len(devices) > 0 else None\n    return device",
    "docstring": "Returns the unique device for a module, or None if no device is found. Throws an error if multiple devices are detected.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\utils.py",
    "ast_data": "FunctionDef name:_assert_and_get_unique_device arg:module arguments arg Assign Call Call If Compare Call Call Call Assign Call Compare Call Assign Compare Call Call Call Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "add_extension",
    "source_code": "def add_extension(self, extval: ExtensionType, critical: bool) -> CertificateBuilder:\n    if not isinstance(extval, ExtensionType):\n        raise TypeError('extension must be an ExtensionType')\n    extension = Extension(extval.oid, critical, extval)\n    _reject_duplicate_extension(extension, self._extensions)\n    return CertificateBuilder(self._issuer_name, self._subject_name, self._public_key, self._serial_number, self._not_valid_before, self._not_valid_after, [*self._extensions, extension])",
    "docstring": "Adds an X.509 extension to the certificate.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\x509\\base.py",
    "ast_data": "FunctionDef name:add_extension arg:self arg:extval arg:critical arguments arg arg arg If Call Raise Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "graph",
    "source_code": "@property\ndef graph(self) -> Graph:\n    return self._graph",
    "docstring": "Return the ``",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\graph_module.py",
    "ast_data": "FunctionDef name:graph arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "add_unref",
    "source_code": "def add_unref(self, timestamp: int) -> None:\n    self._unref_times.append(timestamp)",
    "docstring": "Adds an unref to this tensor with the specified timestamp. Args: timestamp: Timestamp of object unreference as an integer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py",
    "ast_data": "FunctionDef name:add_unref arg:self arg:timestamp arguments arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "__str__",
    "source_code": "def __str__(self) -> str:\n    return f'__torch__.torch.classes.{self.class_name}'",
    "docstring": "Return the class name will prefix __torch__.torch.classes",
    "type": "method",
    "file_path": "pytorch\\torchgen\\model.py",
    "ast_data": "FunctionDef name:__str__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_validate",
    "source_code": "def _validate(self):\n    if self.tuple_shapes is not None:\n        for policy, shape in zip(self._sharding_policies, self._tuple_shapes):\n            _ = policy.get_sharded_shape(shape)",
    "docstring": "Checks that the configuration is self-consistent. Raises: ValueError: if the shapes and sharding policies don't match.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_feed.py",
    "ast_data": "FunctionDef name:_validate arg:self arguments arg If Compare For Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "CondBranchFuncGraph",
    "source_code": "class CondBranchFuncGraph(ControlFlowFuncGraph):\n    pass",
    "docstring": "FuncGraph for branches of tf.cond(). This is used to distinguish cond branches from other functions.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_v2_func_graphs.py",
    "ast_data": "ClassDef name:CondBranchFuncGraph"
  },
  {
    "library": "pytorch",
    "name": "_new_line",
    "source_code": "def _new_line(self, line: str) -> Self:\n    raise NotImplementedError",
    "docstring": "Returns a new deferred line with the same condition",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\utils.py",
    "ast_data": "FunctionDef name:_new_line arg:self arg:line arguments arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "unwrap",
    "source_code": "def unwrap(maybe_tf_decorator):\n    decorators = []\n    cur = maybe_tf_decorator\n    while True:\n        if isinstance(cur, TFDecorator):\n            decorators.append(cur)\n        elif _has_tf_decorator_attr(cur):\n            decorators.append(getattr(cur, '_tf_decorator'))\n        else:\n            break\n        if not hasattr(decorators[-1], 'decorated_target'):\n            break\n        cur = decorators[-1].decorated_target\n    return (decorators, cur)",
    "docstring": "Unwraps an object into a list of TFDecorators and a final target. Args: maybe_tf_decorator: Any callable object. Returns: A tuple whose first element is an list of TFDecorator-derived objects that were applied to the final callable target, and whose second element is the final undecorated callable target. If the parameter is not decorated by any TFDecorators, the first tuple element will be an empty list. The list is ordered from outermost to innermost decorators.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\tf_decorator.py",
    "ast_data": "FunctionDef name:unwrap arg:maybe_tf_decorator arguments arg Assign Assign While If Call Call If Call Call Call If Call Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_tabulate",
    "source_code": "def _tabulate(results, metrics, formats):\n    column_width = max(max((len(k) for k in formats)) + 1, 8)\n    first_width = max((len(k) for k in metrics))\n    head_fmt = '{:<{fw}s}' + '{:>{cw}s}' * len(formats)\n    row_fmt = '{:<{fw}s}' + '{:>{cw}.3f}' * len(formats)\n    print(head_fmt.format('Metric', *formats, cw=column_width, fw=first_width))\n    for metric, row in zip(metrics, results[:, :, -1, -1, -1]):\n        print(row_fmt.format(metric, *row, cw=column_width, fw=first_width))",
    "docstring": "Prints results by metric and format Uses the last ([-1]) value of other fields",
    "type": "function",
    "file_path": "scikit-learn\\benchmarks\\bench_multilabel_metrics.py",
    "ast_data": "FunctionDef name:_tabulate arg:results arg:metrics arg:formats arguments arg arg arg Assign Call Call Call Assign Call Call Assign Call Assign Call Call Call For Call Call Call"
  },
  {
    "library": "kornia",
    "name": "__init__",
    "source_code": "def __init__(self, dim: int, num_heads: int=8, qkv_bias: bool=True, use_rel_pos: bool=False, rel_pos_zero_init: bool=True, input_size: Optional[tuple[int, int]]=None) -> None:\n    super().__init__()\n    self.num_heads = num_heads\n    head_dim = dim // num_heads\n    self.scale = head_dim ** (-0.5)\n    self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\n    self.proj = nn.Linear(dim, dim)\n    self.use_rel_pos = use_rel_pos\n    if self.use_rel_pos and isinstance(input_size, tuple):\n        self.rel_pos_h = nn.Parameter(zeros(2 * input_size[0] - 1, head_dim))\n        self.rel_pos_w = nn.Parameter(zeros(2 * input_size[1] - 1, head_dim))",
    "docstring": "Construct attention block. Args: dim: Number of input channels. num_heads: Number of attention heads. qkv_bias: If True, add a learnable bias to query, key, value. use_rel_pos: If True, add relative positional embeddings to the attention map. rel_pos_zero_init: If True, zero initialize relative positional parameters. input_size: Input resolution for calculating the relative positional parameter size.",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\models\\sam\\architecture\\image_encoder.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:dim arg:num_heads arg:qkv_bias arg:use_rel_pos arg:rel_pos_zero_init arg:input_size arguments arg arg arg arg arg arg arg Call Call Assign Assign Assign Assign Call Assign Call Assign If BoolOp Call Assign Call Call Assign Call Call"
  },
  {
    "library": "scipy",
    "name": "_munp",
    "source_code": "def _munp(self, n):\n    if n == 0:\n        return 1.0\n    if n % 2 == 0:\n        return sc.factorial2(int(n) - 1)\n    else:\n        return 0.0",
    "docstring": "@returns Moments of standard normal distribution for integer n >= 0 See eq. 16 of",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_continuous_distns.py",
    "ast_data": "FunctionDef name:_munp arg:self arg:n arguments arg arg If Compare Return return:yes If Compare Return return:yes Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_add",
    "source_code": "def _add(c1, c2):\n    [c1, c2] = as_series([c1, c2])\n    if len(c1) > len(c2):\n        c1[:c2.size] += c2\n        ret = c1\n    else:\n        c2[:c1.size] += c1\n        ret = c2\n    return trimseq(ret)",
    "docstring": "Helper function used to implement the `` functions.",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\polyutils.py",
    "ast_data": "FunctionDef name:_add arg:c1 arg:c2 arguments arg arg Assign Call If Compare Call Call Assign Assign Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "Vflip",
    "source_code": "class Vflip(Module):\n\n    def forward(self, input: Tensor) -> Tensor:\n        return vflip(input)\n\n    def __repr__(self) -> str:\n        return self.__class__.__name__",
    "docstring": "Vertically flip a tensor image or a batch of tensor images. Input must be a tensor of shape (C, H, W) or a batch of tensors :math:. Args: input: input tensor. Returns: The vertically flipped image tensor. Examples: >>> vflip = Vflip() >>> input = torch.tensor([[[ ... [0., 0., 0.], ... [0., 0., 0.], ... [0., 1., 1.] ... ]]]) >>> vflip(input) tensor([[[[0., 1., 1.], [0., 0., 0.], [0., 0., 0.]]]])",
    "type": "class",
    "file_path": "kornia\\kornia\\geometry\\transform\\flips.py",
    "ast_data": "ClassDef name:Vflip FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "num_descendants",
    "source_code": "def num_descendants(self) -> int:\n    num_desc = 0\n    for children in self.children.values():\n        for child in children:\n            num_desc += 1\n            num_desc += child.num_descendants()\n    return num_desc",
    "docstring": "Total number of descendents of this node",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py",
    "ast_data": "FunctionDef name:num_descendants arg:self arguments arg Assign For Call For Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "TraceableObject",
    "source_code": "class TraceableObject(Generic[T]):\n    SUCCESS, HEURISTIC_USED, FAILURE = (0, 1, 2)\n\n    def __init__(self, obj: T, filename: Optional[str]=None, lineno: Optional[int]=None):\n        self.obj = obj\n        self.filename = filename\n        self.lineno = lineno\n\n    def set_filename_and_line_from_caller(self, offset: int=0) -> int:\n        retcode = self.SUCCESS\n        frame = inspect.currentframe()\n        if not frame:\n            return self.FAILURE\n        frame = cast(types.FrameType, frame)\n        for _ in range(offset + 1):\n            parent = frame.f_back\n            if parent is None:\n                retcode = self.HEURISTIC_USED\n                break\n            parent = cast(types.FrameType, parent)\n            frame = parent\n        self.filename = frame.f_code.co_filename\n        self.lineno = cast(int, frame.f_lineno)\n        return retcode\n\n    def copy_metadata(self):\n        return self.__class__(None, filename=self.filename, lineno=self.lineno)",
    "docstring": "Wrap an object together with its the code definition location.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\traceable_stack.py",
    "ast_data": "ClassDef name:TraceableObject Assign FunctionDef name:__init__ arg:self arg:obj arg:filename arg:lineno arguments arg arg arg arg Assign Assign Assign FunctionDef name:set_filename_and_line_from_caller arg:self arg:offset arguments arg arg Assign Assign Call If Return return:yes Assign Call For Call Assign If Compare Assign Assign Call Assign Assign Assign Call Return return:yes FunctionDef name:copy_metadata arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "FunctionalizeCtxWrapper",
    "source_code": "class FunctionalizeCtxWrapper:\n\n    @torch._disable_dynamo\n    def __init__(self, ctx, subgraph):\n        self.ctx = ctx\n        self.subgraph = subgraph\n\n    def __hash__(self):\n        return id(self.subgraph)\n\n    def __repr__(self):\n        return f'FunctionalizeCtxWrapper on subgraph {self.subgraph})'\n\n    def __call__(self, *args, **kwargs):\n        if isinstance(self.subgraph, torch.fx.GraphModule):\n            with fx_traceback.preserve_node_meta():\n                return self.ctx.functionalize(torch.fx.Interpreter(self.subgraph).run)(*args, **kwargs)\n        return self.ctx.functionalize(self.subgraph)(*args, **kwargs)",
    "docstring": "This is a dummy wrapper to facilitate fake tensor caching. For AOT Dispatcher metadata collection pass, HOPs go from functionalization key to fake tensor key. The functionalization key wraps the subgraphs in a function, which changes from call to call even though the subgraph might still be same. To enable fake tensor caching, we just wrap the ctx and subgraph in this class and then use the subgraph as the hash.",
    "type": "class",
    "file_path": "pytorch\\torch\\_higher_order_ops\\utils.py",
    "ast_data": "ClassDef name:FunctionalizeCtxWrapper FunctionDef name:__init__ arg:self arg:ctx arg:subgraph arguments arg arg arg Assign Assign FunctionDef name:__hash__ arg:self arguments arg Return return:yes Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:__call__ arg:self arguments arg arg arg If Call With Call Return return:yes Call Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "device",
    "source_code": "def device(name):\n    ensure_initialized()\n    return context().device(name)",
    "docstring": "Context-manager to force placement of operations and Tensors on a device. Example: will ensure that the Tensor is on CPU but the operation runs on GPU 0. Args: name: Name of the device (see context().devices()), or None to perform automatic placement. Returns: Context manager for setting the device.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:device arg:name arguments arg Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_bool_arith_fallback",
    "source_code": "def _bool_arith_fallback(op_str, left_op, right_op) -> bool:\n    if _has_bool_dtype(left_op) and _has_bool_dtype(right_op):\n        if op_str in _BOOL_OP_UNSUPPORTED:\n            warnings.warn(f'evaluating in Python space because the {op_str!r} operator is not supported by numexpr for the bool dtype, use {_BOOL_OP_UNSUPPORTED[op_str]!r} instead.', stacklevel=find_stack_level())\n            return True\n    return False",
    "docstring": "Check if we should fallback to the python in case of an unsupported operation by numexpr, which is the case for some boolean ops.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\computation\\expressions.py",
    "ast_data": "FunctionDef name:_bool_arith_fallback arg:op_str arg:left_op arg:right_op arguments arg arg arg If BoolOp Call Call If Compare Call Call Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, xpad=0.0, ypad=0.0, update_func=None):\n    self._xpad, self._ypad = (xpad, ypad)\n    self._update_prop_func = update_func",
    "docstring": "Parameters ---------- xpad : float, optional Padding in x-direction. ypad : float, optional Padding in y-direction. update_func : callable, optional Function for updating the legend handler properties from another legend handler, used by .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\legend_handler.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:xpad arg:ypad arg:update_func arguments arg arg arg arg Assign Assign"
  },
  {
    "library": "numpy",
    "name": "_check_mask_axis",
    "source_code": "def _check_mask_axis(mask, axis, keepdims=np._NoValue):\n    kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}\n    if mask is not nomask:\n        return mask.all(axis=axis, **kwargs)\n    return nomask",
    "docstring": "Check whether there are masked values along the given axis",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:_check_mask_axis arg:mask arg:axis arg:keepdims arguments arg arg arg Assign Compare If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "foldr",
    "source_code": "@doc_controls.do_not_generate_docs\ndef foldr(fn, elems, initializer=None, name=None):\n    return functional_ops.foldr(fn, elems, initializer=initializer, name=name)",
    "docstring": "Reduce elems using fn to combine them from right to left. Args: fn: Callable that will be called upon each element in elems and an accumulator, for instance elems: tensor initializer: The first value used ( in case of None) name: A string name for the foldr node in the graph Returns: Same type and shape as initializer",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:foldr arg:fn arg:elems arg:initializer arg:name arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "constrain_unify",
    "source_code": "def constrain_unify(a: torch.SymInt, b: torch.SymInt) -> None:\n    if not isinstance(a, SymInt):\n        if not isinstance(b, SymInt):\n            assert a == b\n            return\n        else:\n            shape_env = b.node.shape_env\n    else:\n        shape_env = a.node.shape_env\n    shape_env._constrain_unify(a, b)",
    "docstring": "Given two SymInts, constrain them so that they must be equal. NB: this will not work with SymInts that represent nontrivial expressions (yet!)",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:constrain_unify arg:a arg:b arguments arg arg If Call If Call Compare Return return:no Assign Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "objects_to_serialize",
    "source_code": "@abc.abstractmethod\ndef objects_to_serialize(self, serialization_cache):\n    raise NotImplementedError",
    "docstring": "Returns dictionary of extra checkpointable objects to serialize. See for an explanation of this function's effects. Args: serialization_cache: Dictionary passed to all objects in the same object graph during serialization. Returns: A dictionary mapping attribute names to checkpointable objects.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\base_serialization.py",
    "ast_data": "FunctionDef name:objects_to_serialize arg:self arg:serialization_cache arguments arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "__str__",
    "source_code": "def __str__(self) -> str:\n    return 'while_loop(%s)' % self.name",
    "docstring": "String representation.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "FunctionDef name:__str__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "private_bytes_raw",
    "source_code": "@abc.abstractmethod\ndef private_bytes_raw(self) -> bytes:\n    pass",
    "docstring": "The raw bytes of the private key. Equivalent to private_bytes(Raw, Raw, NoEncryption()).",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ed25519.py",
    "ast_data": "FunctionDef name:private_bytes_raw arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "PackageMangler",
    "source_code": "class PackageMangler:\n\n    def __init__(self) -> None:\n        global _mangle_index\n        self._mangle_index = _mangle_index\n        _mangle_index += 1\n        self._mangle_parent = f'<torch_package_{self._mangle_index}>'\n\n    def mangle(self, name) -> str:\n        assert len(name) != 0\n        return self._mangle_parent + '.' + name\n\n    def demangle(self, mangled: str) -> str:\n        if mangled.startswith(self._mangle_parent + '.'):\n            return mangled.partition('.')[2]\n        return mangled\n\n    def parent_name(self):\n        return self._mangle_parent",
    "docstring": "Used on import, to ensure that all modules imported have a shared mangle parent.",
    "type": "class",
    "file_path": "pytorch\\torch\\package\\_mangling.py",
    "ast_data": "ClassDef name:PackageMangler FunctionDef name:__init__ arg:self arguments arg Assign Assign FunctionDef name:mangle arg:self arg:name arguments arg arg Compare Call Return return:yes FunctionDef name:demangle arg:self arg:mangled arguments arg arg If Call Return return:yes Call Return return:yes FunctionDef name:parent_name arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_patch_difflib_sequence_matcher_init",
    "source_code": "@contextlib.contextmanager\ndef _patch_difflib_sequence_matcher_init():\n    original_init = difflib.SequenceMatcher.__init__\n\n    def patched_init(self, isjunk=None, a='', b='', autojunk=True):\n        original_init(self, isjunk, a, b, autojunk=False)\n    difflib.SequenceMatcher.__init__ = patched_init\n    try:\n        yield\n    finally:\n        difflib.SequenceMatcher.__init__ = original_init",
    "docstring": "Context patching for fx readable graph. Under this context, the argument of will always be considered as . This is to prevent recognizing stacktrace messages in fx readable graph as junk, as these messages tend to be long (>200) and repeat multiple times, which falls under the junk filter criteria. is used underneath by all sorts of diffing functions in , including , , . Unfortunately, there is no way to pass argument to these functions, and they all default to . This context patching will affect all of them. _",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\_pass.py",
    "ast_data": "FunctionDef name:_patch_difflib_sequence_matcher_init arguments Assign FunctionDef name:patched_init arg:self arg:isjunk arg:a arg:b arg:autojunk arguments arg arg arg arg arg Call Assign Try Assign"
  },
  {
    "library": "pytorch",
    "name": "set_device_states",
    "source_code": "def set_device_states(devices, states, *, device_type=None) -> None:\n    if device_type is None:\n        device_type = DefaultDeviceType.get_device_type()\n    if device_type == 'meta':\n        return\n    device_module = _get_device_module(device_type)\n    for device, state in zip(devices, states):\n        with device_module.device(device):\n            device_module.set_rng_state(state)",
    "docstring": "Sets random number generator states for the specified devices. Args: devices: Device ids to set states for. states: States to set. device_type: ``.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\checkpoint.py",
    "ast_data": "FunctionDef name:set_device_states arg:devices arg:states arguments arg arg arg If Compare Assign Call If Compare Return return:no Assign Call For Call With Call Call"
  },
  {
    "library": "pytorch",
    "name": "_msg_dict_from_dcp_method_args",
    "source_code": "def _msg_dict_from_dcp_method_args(*args, **kwargs) -> dict[str, Any]:\n    msg_dict = {}\n    storage_writer = kwargs.get('storage_writer', None)\n    storage_reader = kwargs.get('storage_reader', None)\n    planner = kwargs.get('planner', None)\n    checkpoint_id = kwargs.get('checkpoint_id', None)\n    if not checkpoint_id and (serializer := (storage_writer or storage_reader)):\n        checkpoint_id = getattr(serializer, 'checkpoint_id', None)\n    msg_dict['checkpoint_id'] = str(checkpoint_id) if checkpoint_id is not None else checkpoint_id\n    msg_dict['uuid'] = str(uuid4().int)\n    if storage_writer:\n        msg_dict['storage_writer'] = storage_writer.__class__.__name__\n    if storage_reader:\n        msg_dict['storage_reader'] = storage_reader.__class__.__name__\n    if planner:\n        msg_dict['planner'] = planner.__class__.__name__\n    return msg_dict",
    "docstring": "Extracts log data from dcp method args",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\logger.py",
    "ast_data": "FunctionDef name:_msg_dict_from_dcp_method_args arguments arg arg Assign Assign Call Assign Call Assign Call Assign Call If BoolOp BoolOp Assign Call Assign Compare Call Assign Call Call If Assign If Assign If Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, graph, run_metadata):\n    self._graph = graph\n    self._run_metadata = run_metadata\n    self._string_table = StringTable()\n    self._functions = Functions(self._string_table)\n    self._locations = Locations(self._functions)",
    "docstring": "Constructor. Args: graph: A instance. run_metadata: A list of objects.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\pprof_profiler.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:graph arg:run_metadata arguments arg arg arg Assign Assign Assign Call Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "sum",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef sum(x, axis=None, keepdims=False):\n    return math_ops.reduce_sum(x, axis, keepdims)",
    "docstring": "Sum of the values in a tensor, alongside the specified axis. Args: x: A tensor or variable. axis: An integer, the axis to sum over. keepdims: A boolean, whether to keep the dimensions or not. If is , the rank of the tensor is reduced by 1. If is , the reduced dimension is retained with length 1. Returns: A tensor with sum of .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:sum arg:x arg:axis arg:keepdims arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "clear",
    "source_code": "def clear(self):\n    self.set_active(self._initial_active)",
    "docstring": "Reset the active button to the initially active one.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:clear arg:self arguments arg Call"
  },
  {
    "library": "authlib",
    "name": "validate_request",
    "source_code": "def validate_request(self, scopes, request, **kwargs):\n    validator, token_string = self.parse_request_authorization(request)\n    validator.validate_request(request)\n    token = validator.authenticate_token(token_string)\n    validator.validate_token(token, scopes, request, **kwargs)\n    return token",
    "docstring": "Validate the request and return a token.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\resource_protector.py",
    "ast_data": "FunctionDef name:validate_request arg:self arg:scopes arg:request arguments arg arg arg arg Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "from_pretrained",
    "source_code": "@classmethod\ndef from_pretrained(cls, model_type: Literal['b1', 'b2', 'b3'], resolution: Literal[224, 256, 288]) -> EfficientViTConfig:\n    return cls(checkpoint=_get_base_url(model_type=model_type, resolution=resolution))",
    "docstring": "Return a configuration object from a pre-trained model. Args: model_type: model type, one of :obj:, :obj:, :obj:. resolution: input resolution, one of :obj:, :obj:, :obj:.",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\models\\efficient_vit\\model.py",
    "ast_data": "FunctionDef name:from_pretrained arg:cls arg:model_type arg:resolution arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "frozen_saver",
    "source_code": "def frozen_saver(root_trackable):\n    named_saveable_objects, registered_savers = save_util_v1.frozen_saveables_and_savers(graph_view_lib.ObjectGraphView(root_trackable))\n    return functional_saver.MultiDeviceSaver.from_saveables(named_saveable_objects, registered_savers)",
    "docstring": "Creates a static from a trackable object. The returned saves object-based checkpoints, but these checkpoints will no longer reflect structural changes to the object graph, only changes to the values of s added as dependencies of the root object before was called. works on the returned , but requires that the object graph of the checkpoint being loaded exactly matches the object graph when was called. This is in contrast the object-based restore performed by which attempts a fuzzy matching between a checkpoint's object graph and the current Python object graph. Args: root_trackable: A trackable object to save. Returns: A saver which saves object-based checkpoints for the object graph frozen at the time was called.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:frozen_saver arg:root_trackable arguments arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "endswith",
    "source_code": "@forbid_nonstring_types(['bytes'])\ndef endswith(self, pat: str | tuple[str, ...], na: Scalar | lib.NoDefault=lib.no_default) -> Series | Index:\n    if not isinstance(pat, (str, tuple)):\n        msg = f'expected a string or tuple, not {type(pat).__name__}'\n        raise TypeError(msg)\n    result = self._data.array._str_endswith(pat, na=na)\n    return self._wrap_result(result, returns_string=False)",
    "docstring": "Test if the end of each string element matches a pattern. Equivalent to :meth:. Parameters ---------- pat : str or tuple[str, ...] Character sequence or tuple of strings. Regular expressions are not accepted. na : scalar, optional Object shown if element tested is not a string. The default depends on dtype of the array. For object-dtype, `naFalseNaN`. >>> s.str.endswith(\"t\", na=False) 0 True 1 False 2 False 3 False dtype: bool",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\strings\\accessor.py",
    "ast_data": "FunctionDef name:endswith arg:self arg:pat arg:na arguments arg arg arg If Call Assign Call Raise Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_fill_non_empty_info",
    "source_code": "@abstractmethod\ndef _fill_non_empty_info(self) -> None:\n    pass",
    "docstring": "Add lines to the info table, pertaining to non-empty series.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:_fill_non_empty_info arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "lookup_prefix",
    "source_code": "def lookup_prefix(self, prefix, n):\n    commands = [cmd for cmd in self._commands if cmd.startswith(prefix)]\n    return commands[-n:]",
    "docstring": "Look up the n most recent commands that starts with prefix. Args: prefix: The prefix to lookup. n: Number of most recent commands to look up. Returns: A list of n most recent commands that have the specified prefix, or all available most recent commands that have the prefix, if n exceeds the number of history commands with the prefix.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py",
    "ast_data": "FunctionDef name:lookup_prefix arg:self arg:prefix arg:n arguments arg arg arg Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_MatrixSetDiagGradV3",
    "source_code": "@ops.RegisterGradient('MatrixSetDiagV3')\ndef _MatrixSetDiagGradV3(op: ops.Operation, grad):\n    diag_shape = op.inputs[1].get_shape()\n    align = op.get_attr('align')\n    if not diag_shape.is_fully_defined():\n        grad_shape = array_ops.shape(grad)\n        batch_shape = grad_shape[:-2]\n        matrix_shape = grad_shape[-2:]\n        diag_index = array_ops.reshape(op.inputs[2], [-1])\n        d_lower = diag_index[0]\n        d_upper = diag_index[-1]\n        y_offset = cond.cond(math_ops.less(d_upper, 0), lambda: d_upper, lambda: 0)\n        x_offset = cond.cond(math_ops.greater(d_lower, 0), lambda: -d_lower, lambda: 0)\n        max_diag_len = math_ops.minimum(matrix_shape[0] + y_offset, matrix_shape[1] + x_offset)\n        postfix = cond.cond(math_ops.equal(d_lower, d_upper), lambda: ops.convert_to_tensor([max_diag_len]), lambda: ops.convert_to_tensor([d_upper - d_lower + 1, max_diag_len]))\n        diag_shape = array_ops.concat([batch_shape, postfix], 0)\n    grad_input = array_ops.matrix_set_diag(grad, array_ops.zeros(diag_shape, dtype=grad.dtype), k=op.inputs[2], align=align)\n    grad_diag = array_ops.matrix_diag_part(grad, k=op.inputs[2], align=align)\n    return (grad_input, grad_diag, None)",
    "docstring": "Gradient for MatrixSetDiagV3.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_grad.py",
    "ast_data": "FunctionDef name:_MatrixSetDiagGradV3 arg:op arg:grad arguments arg arg Assign Call Assign Call If Call Assign Call Assign Assign Assign Call Assign Assign Assign Call Call arguments arguments Assign Call Call arguments arguments Assign Call Assign Call Call arguments Call arguments Call Assign Call Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "list_to_tuple",
    "source_code": "def list_to_tuple(maybe_list):\n    if isinstance(maybe_list, list):\n        return tuple(maybe_list)\n    return maybe_list",
    "docstring": "Datasets will stack the list of tensor, so switch them to tuples.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils.py",
    "ast_data": "FunctionDef name:list_to_tuple arg:maybe_list arguments arg If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_size_inches",
    "source_code": "def get_size_inches(self):\n    return np.array(self.bbox_inches.p1)",
    "docstring": "Return the current size of the figure in inches. Returns ------- ndarray The size (width, height) of the figure in inches. See Also -------- matplotlib.figure.Figure.set_size_inches matplotlib.figure.Figure.get_figwidth matplotlib.figure.Figure.get_figheight Notes ----- The size in pixels can be obtained by multiplying with .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:get_size_inches arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_display_summary",
    "source_code": "def _display_summary(message, status, fun, iteration):\n    print(message)\n    if status in (0, 1):\n        print(f'         Current function value: {fun: <12.6f}')\n    print(f'         Iterations: {iteration:d}')",
    "docstring": "Print the termination summary of the linear program Parameters ---------- message : str A string descriptor of the exit status of the optimization. status : int An integer representing the exit status of the optimization:: 0 : Optimization terminated successfully 1 : Iteration limit reached 2 : Problem appears to be infeasible 3 : Problem appears to be unbounded 4 : Serious numerical difficulties encountered fun : float Value of the objective function. iteration : iteration The number of iterations performed.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_linprog_util.py",
    "ast_data": "FunctionDef name:_display_summary arg:message arg:status arg:fun arg:iteration arguments arg arg arg arg Call If Compare Call Call"
  },
  {
    "library": "sphinx",
    "name": "FootnoteDocnameUpdater",
    "source_code": "class FootnoteDocnameUpdater(SphinxTransform):\n    default_priority = 700\n    TARGET_NODES = (nodes.footnote, nodes.footnote_reference)\n\n    def apply(self, **kwargs: Any) -> None:\n        matcher = NodeMatcher(*self.TARGET_NODES)\n        for node in matcher.findall(self.document):\n            node['docname'] = self.env.docname",
    "docstring": "Add docname to footnote and footnote_reference nodes.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\builders\\latex\\transforms.py",
    "ast_data": "ClassDef name:FootnoteDocnameUpdater Assign Assign FunctionDef name:apply arg:self arguments arg arg Assign Call For Call Assign"
  },
  {
    "library": "pandas",
    "name": "__finalize__",
    "source_code": "@final\ndef __finalize__(self, other, method: str | None=None, **kwargs) -> Self:\n    if isinstance(other, NDFrame):\n        if other.attrs:\n            self.attrs = deepcopy(other.attrs)\n        self.flags.allows_duplicate_labels = self.flags.allows_duplicate_labels and other.flags.allows_duplicate_labels\n        for name in set(self._metadata) & set(other._metadata):\n            assert isinstance(name, str)\n            object.__setattr__(self, name, getattr(other, name, None))\n    if method == 'concat':\n        objs = other.objs\n        if all((bool(obj.attrs) for obj in objs)):\n            attrs = objs[0].attrs\n            have_same_attrs = all((obj.attrs == attrs for obj in objs[1:]))\n            if have_same_attrs:\n                self.attrs = deepcopy(attrs)\n        allows_duplicate_labels = all((x.flags.allows_duplicate_labels for x in objs))\n        self.flags.allows_duplicate_labels = allows_duplicate_labels\n    return self",
    "docstring": "Propagate metadata from other to self. Parameters ---------- other : the object from which to get the attributes that we are going to propagate method : str, optional A passed method name providing context on where `method` are not currently considered stable across pandas releases.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:__finalize__ arg:self arg:other arg:method arguments arg arg arg arg If Call If Assign Call Assign BoolOp For Call Call Call Call Call If Compare Assign If Call Call Assign Assign Call Compare If Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "body",
    "source_code": "def body(i, *args):\n    del args\n    fn_result = fn(ctx, iterator.get_next())\n    for name, output in ctx.last_step_outputs.items():\n        ctx.last_step_outputs[name] = self._local_results(output)\n    flat_last_step_outputs = nest.flatten(ctx.last_step_outputs)\n    with ops.control_dependencies([fn_result]):\n        return [i + 1] + flat_last_step_outputs",
    "docstring": "A wrapper around to create the while loop body.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\mirrored_strategy.py",
    "ast_data": "FunctionDef name:body arg:i arguments arg arg Assign Call Call For Call Assign Call Assign Call With Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_pyval_empty_list_depth",
    "source_code": "def _pyval_empty_list_depth(pyval):\n    if isinstance(pyval, list):\n        if not pyval:\n            return 1\n        depths = [_pyval_empty_list_depth(v) for v in pyval]\n        if any((depth is None for depth in depths)):\n            return None\n        else:\n            return max(depths) + 1\n    else:\n        return None",
    "docstring": "Find the max depth for nested empty lists. Args: pyval: A nested python list. Returns: The maximum depth of empty lists in , or None if contains anything other than nested empty lists.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor.py",
    "ast_data": "FunctionDef name:_pyval_empty_list_depth arg:pyval arguments arg If Call If Return return:yes Assign Call If Call Compare Return return:no Return return:yes Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "Tape",
    "source_code": "class Tape(object):\n    __slots__ = ['_tape']\n\n    def __init__(self, tape):\n        self._tape = tape\n\n    def watched_variables(self):\n        return pywrap_tfe.TFE_Py_TapeWatchedVariables(self._tape)",
    "docstring": "Represents a gradient propagation trace.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\tape.py",
    "ast_data": "ClassDef name:Tape Assign FunctionDef name:__init__ arg:self arg:tape arguments arg arg Assign FunctionDef name:watched_variables arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_init_cls",
    "source_code": "@classmethod\ndef _init_cls(cls):\n    code = StringIO()\n    for target in OP_NAMES:\n        sig = inspect.signature(getattr(OpsHandler, target))\n        if all((p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD and p.default is inspect.Parameter.empty for p in sig.parameters.values())):\n            self_arg, *args = sig.parameters.keys()\n            assert self_arg == 'self'\n            code.write(f'\\n                    def {target}(self, {', '.join(args)}):\\n                        return self._default({target!r}, ({', '.join(args)}, ), {{}})\\n                    '.strip())\n            code.write('\\n\\n')\n        else:\n            setattr(cls, target, cls._call_default(target))\n    ctx: dict[str, Any] = {}\n    exec(code.getvalue(), ctx)\n    for target, impl in ctx.items():\n        if target in OP_NAMES:\n            setattr(cls, target, impl)",
    "docstring": "Here we codegen many functions of the form: def add(self, a, b): return self._default('add', (a, b), {}) and install them in cls. This is the same as _call_default above, but is about 1.2x faster since CPython varargs parsing is slow.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ops_handler.py",
    "ast_data": "FunctionDef name:_init_cls arg:cls arguments arg Assign Call For Assign Call Call If Call BoolOp Compare Compare Call Assign Call Compare Call Call Call Call Call Call Call Call Call For Call If Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "write",
    "source_code": "def write(self, index, value, name=None):\n    with ops.name_scope(name, 'TensorArrayWrite', [self._handle, index, value]):\n        value = ops.convert_to_tensor(value, preferred_dtype=self._dtype, name='value')\n        _check_dtypes(value, self._dtype)\n        self._check_element_shape(value.shape)\n        with self._maybe_colocate_with(value):\n            flow_out = gen_data_flow_ops.tensor_array_write_v3(handle=self._handle, index=index, value=value, flow_in=self._flow, name=name)\n        return build_ta_with_new_flow(self, flow_out)",
    "docstring": "See TensorArray.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:write arg:self arg:index arg:value arg:name arguments arg arg arg arg With Call Assign Call Call Call With Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_radio_props",
    "source_code": "def set_radio_props(self, props):\n    _api.check_isinstance(dict, props=props)\n    if 's' in props:\n        props['sizes'] = np.broadcast_to(props.pop('s'), len(self.labels))\n    self._buttons.update(props)\n    self._active_colors = self._buttons.get_facecolor()\n    if len(self._active_colors) == 1:\n        self._active_colors = np.repeat(self._active_colors, len(self.labels), axis=0)\n    self._buttons.set_facecolor([activecolor if text.get_text() == self.value_selected else 'none' for text, activecolor in zip(self.labels, self._active_colors)])",
    "docstring": "Set properties of the labels. .. versionadded:: 3.7 Parameters ---------- props : dict Dictionary of properties to be used for the radio buttons.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:set_radio_props arg:self arg:props arguments arg arg Call If Compare Assign Call Call Call Call Assign Call If Compare Call Assign Call Call Call Compare Call Call"
  },
  {
    "library": "authlib",
    "name": "create_claims_options",
    "source_code": "def create_claims_options(self):\n    options = {'iss': {'essential': True, 'validate': _validate_iss}, 'sub': {'essential': True}, 'aud': {'essential': True, 'value': self.token_url}, 'exp': {'essential': True}}\n    if self._validate_jti:\n        options['jti'] = {'essential': True, 'validate': self.validate_jti}\n    return options",
    "docstring": "Create a claims_options for verify JWT payload claims. Developers MAY overwrite this method to create a more strict options.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc7523\\client.py",
    "ast_data": "FunctionDef name:create_claims_options arg:self arguments arg Assign If Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_href_getter",
    "source_code": "def _href_getter(self, obj) -> str | None:\n    raise AbstractMethodError(self)",
    "docstring": "Return a href if the DOM node contains a child or None. Parameters ---------- obj : node-like A DOM node. Returns ------- href : str or unicode The href from the child of the DOM node.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\html.py",
    "ast_data": "FunctionDef name:_href_getter arg:self arg:obj arguments arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "snapshot",
    "source_code": "def snapshot(self):\n    try:\n        ctx = MemPoolContext(self)\n        snapshot = torch.cuda.memory_snapshot()\n    finally:\n        del ctx\n    return snapshot",
    "docstring": "Return a snapshot of the CUDA memory allocator pool state across all devices. Interpreting the output of this function requires familiarity with the memory allocator internals. .. note:: See :ref: for more details about GPU memory management.",
    "type": "method",
    "file_path": "pytorch\\torch\\cuda\\memory.py",
    "ast_data": "FunctionDef name:snapshot arg:self arguments arg Try Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "len",
    "source_code": "def len(self, text: str) -> int:\n    if not isinstance(text, str):\n        return len(text)\n    return sum((self._EAW_MAP.get(east_asian_width(c), self.ambiguous_width) for c in text))",
    "docstring": "Calculate display width considering unicode East Asian Width",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\printing.py",
    "ast_data": "FunctionDef name:len arg:self arg:text arguments arg arg If Call Return return:yes Call Return return:yes Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "read_doc",
    "source_code": "@final\ndef read_doc(self, docname: str, *, _cache: bool=True) -> None:\n    env = self.env\n    env.prepare_settings(docname)\n    docutils_conf = self.confdir / 'docutils.conf'\n    if docutils_conf.is_file():\n        env.note_dependency(docutils_conf)\n    filename = str(env.doc2path(docname))\n    filetype = get_filetype(self.app.config.source_suffix, filename)\n    publisher = self.env._registry.get_publisher(self.app, filetype)\n    self.env.current_document._parser = publisher.parser\n    publisher.settings.record_dependencies = DependencyList()\n    with sphinx_domains(env), rst.default_role(docname, self.config.default_role):\n        error_handler = _UnicodeDecodeErrorHandler(docname)\n        codecs.register_error('sphinx', error_handler)\n        publisher.set_source(source_path=filename)\n        publisher.publish()\n        doctree = publisher.document\n    env.all_docs[docname] = time.time_ns() // 1000\n    env.current_document = _CurrentDocument()\n    env.ref_context.clear()\n    self.write_doctree(docname, doctree, _cache=_cache)",
    "docstring": "Parse a file and add/update inventory entries for the doctree.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\__init__.py",
    "ast_data": "FunctionDef name:read_doc arg:self arg:docname arguments arg arg arg Assign Call Assign If Call Call Assign Call Call Assign Call Assign Call Assign Assign Call With Call Call Assign Call Call Call Call Assign Assign Call Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "to_sparse",
    "source_code": "def to_sparse(self, name=None):\n    with ops.name_scope(name, 'RaggedToSparse', [self]):\n        result = gen_ragged_conversion_ops.ragged_tensor_to_sparse(self.nested_row_splits, self.flat_values, name=name)\n        return sparse_tensor.SparseTensor(result.sparse_indices, result.sparse_values, result.sparse_dense_shape)",
    "docstring": "Converts this into a . Example: >>> rt = tf.ragged.constant([[1, 2, 3], [4], [], [5, 6]]) >>> print(rt.to_sparse()) SparseTensor(indices=tf.Tensor( [[0 0] [0 1] [0 2] [1 0] [3 0] [3 1]], shape=(6, 2), dtype=int64), values=tf.Tensor([1 2 3 4 5 6], shape=(6,), dtype=int32), dense_shape=tf.Tensor([4 3], shape=(2,), dtype=int64)) Args: name: A name prefix for the returned tensors (optional). Returns: A SparseTensor with the same values as .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:to_sparse arg:self arg:name arguments arg arg With Call Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "davies_bouldin_score",
    "source_code": "@validate_params({'X': ['array-like'], 'labels': ['array-like']}, prefer_skip_nested_validation=True)\ndef davies_bouldin_score(X, labels):\n    X, labels = check_X_y(X, labels)\n    le = LabelEncoder()\n    labels = le.fit_transform(labels)\n    n_samples, _ = X.shape\n    n_labels = len(le.classes_)\n    check_number_of_labels(n_labels, n_samples)\n    intra_dists = np.zeros(n_labels)\n    centroids = np.zeros((n_labels, len(X[0])), dtype=float)\n    for k in range(n_labels):\n        cluster_k = _safe_indexing(X, labels == k)\n        centroid = cluster_k.mean(axis=0)\n        centroids[k] = centroid\n        intra_dists[k] = np.average(pairwise_distances(cluster_k, [centroid]))\n    centroid_distances = pairwise_distances(centroids)\n    if np.allclose(intra_dists, 0) or np.allclose(centroid_distances, 0):\n        return 0.0\n    centroid_distances[centroid_distances == 0] = np.inf\n    combined_intra_dists = intra_dists[:, None] + intra_dists\n    scores = np.max(combined_intra_dists / centroid_distances, axis=1)\n    return float(np.mean(scores))",
    "docstring": "Compute the Davies-Bouldin score. The score is defined as the average similarity measure of each cluster with its most similar cluster, where similarity is the ratio of within-cluster distances to between-cluster distances. Thus, clusters which are farther apart and less dispersed will result in a better score. The minimum score is zero, with lower values indicating better clustering. Read more in the :ref:. .. versionadded:: 0.20 Parameters ---------- X : array-like of shape (n_samples, n_features) A list of `\"A Cluster Separation Measure\" `__. IEEE Transactions on Pattern Analysis and Machine Intelligence. PAMI-1 (2): 224-227 Examples -------- >>> from sklearn.metrics import davies_bouldin_score >>> X = [[0, 1], [1, 1], [3, 4]] >>> labels = [0, 0, 1] >>> davies_bouldin_score(X, labels) 0.12...",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\metrics\\cluster\\_unsupervised.py",
    "ast_data": "FunctionDef name:davies_bouldin_score arg:X arg:labels arguments arg arg Assign Call Assign Call Assign Call Assign Assign Call Call Assign Call Assign Call Call For Call Assign Call Compare Assign Call Assign Assign Call Call Assign Call If BoolOp Call Call Return return:yes Assign Compare Assign Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "match_row_splits_dtypes",
    "source_code": "def match_row_splits_dtypes(*tensors, **kwargs):\n    return_dtype = kwargs.pop('return_dtype', False)\n    if kwargs:\n        raise ValueError(f'Unexpected keyword args {kwargs}.')\n    has_int32 = False\n    has_int64 = False\n    for tensor in tensors:\n        if isinstance(tensor, RaggedTensor):\n            if tensor.row_splits.dtype == dtypes.int32:\n                has_int32 = True\n            else:\n                has_int64 = True\n    if has_int32 and has_int64:\n        if not ragged_config.auto_cast_partition_dtype():\n            raise ValueError('Input RaggedTensors have mismatched row_splits dtypes; use RaggedTensor.with_row_splits_dtype() to convert them to compatible dtypes.')\n        dtype = dtypes.int64\n        tensors = tuple((t.with_row_splits_dtype(dtypes.int64) if isinstance(t, RaggedTensor) else t for t in tensors))\n    elif has_int32:\n        dtype = dtypes.int32\n    else:\n        dtype = dtypes.int64\n    if return_dtype:\n        return (dtype, tensors)\n    else:\n        return tensors",
    "docstring": "Return a copy of with row_splits all having the same dtype. Args: *tensors: A list of Tensors or RaggedTensors. **kwargs: If 'return_dtype=True', then return a tuple (dtype, tensors), where is the data type used by row-splits, and is the converted list of and . Returns: The converted list of and .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:match_row_splits_dtypes arguments arg arg Assign Call If Raise Call Assign Assign For If Call If Compare Assign Assign If BoolOp If Call Raise Call Assign Assign Call Call Call If Assign Assign If Return return:yes Return return:yes"
  },
  {
    "library": "numpy",
    "name": "atleast_1d",
    "source_code": "@array_function_dispatch(_atleast_1d_dispatcher)\ndef atleast_1d(*arys):\n    if len(arys) == 1:\n        result = asanyarray(arys[0])\n        if result.ndim == 0:\n            result = result.reshape(1)\n        return result\n    res = []\n    for ary in arys:\n        result = asanyarray(ary)\n        if result.ndim == 0:\n            result = result.reshape(1)\n        res.append(result)\n    return tuple(res)",
    "docstring": "Convert inputs to arrays with at least one dimension. Scalar inputs are converted to 1-dimensional arrays, whilst higher-dimensional inputs are preserved. Parameters ---------- arys1, arys2, ... : array_like One or more input arrays. Returns ------- ret : ndarray An array, or tuple of arrays, each with ``. Copies are made only if necessary. See Also -------- atleast_2d, atleast_3d Examples -------- >>> import numpy as np >>> np.atleast_1d(1.0) array([1.]) >>> x = np.arange(9.0).reshape(3,3) >>> np.atleast_1d(x) array([[0., 1., 2.], [3., 4., 5.], [6., 7., 8.]]) >>> np.atleast_1d(x) is x True >>> np.atleast_1d(1, [3, 4]) (array([1]), array([3, 4]))",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\shape_base.py",
    "ast_data": "FunctionDef name:atleast_1d arguments arg If Compare Call Assign Call If Compare Assign Call Return return:yes Assign For Assign Call If Compare Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "debug",
    "source_code": "def debug(request, message, extra_tags='', fail_silently=False):\n    add_message(request, constants.DEBUG, message, extra_tags=extra_tags, fail_silently=fail_silently)",
    "docstring": "Add a message with the `` level.",
    "type": "function",
    "file_path": "django\\django\\contrib\\messages\\api.py",
    "ast_data": "FunctionDef name:debug arg:request arg:message arg:extra_tags arg:fail_silently arguments arg arg arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "_extract_tensors",
    "source_code": "def _extract_tensors(obj):\n    tensors: list[torch.Tensor] = []\n    extractor = _TensorExtractor(io.BytesIO(), protocol=-1, tensors=tensors)\n    extractor.dump(obj)\n    return tensors",
    "docstring": "This function is exclusively called from C++. See ``. It extracts the tensors contained in the given object, through pickling.",
    "type": "function",
    "file_path": "pytorch\\torch\\_jit_internal.py",
    "ast_data": "FunctionDef name:_extract_tensors arg:obj arguments arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "hfft2",
    "source_code": "@_dispatch\ndef hfft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *, plan=None):\n    return (Dispatchable(x, np.ndarray),)",
    "docstring": "Compute the 2-D FFT of a Hermitian complex array. Parameters ---------- x : array Input array, taken to be Hermitian complex. s : sequence of ints, optional Shape of the real output. axes : sequence of ints, optional Axes over which to compute the FFT. norm : {\"backward\", \"ortho\", \"forward\"}, optional Normalization mode (see ). Default is \"backward\". overwrite_x : bool, optional If True, the contents of can be destroyed; the default is False. See for more details. workers : int, optional Maximum number of workers to use for parallel computation. If negative, the value wraps around from `~scipy.fft.ffthfftnhfftn`. Examples -------- >>> import scipy.fft >>> import numpy as np >>> x = np.array([[1+0j, 2+0j], [2+0j, 1+0j]]) # Hermitian-symmetric input >>> scipy.fft.hfft2(x, s=(2, 2)) array([[ 6., 0.], [ 0., -2.]])",
    "type": "function",
    "file_path": "scipy\\scipy\\fft\\_basic.py",
    "ast_data": "FunctionDef name:hfft2 arg:x arg:s arg:axes arg:norm arg:overwrite_x arg:workers arguments arg arg arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_quantize_affine",
    "source_code": "@register_custom_op\ndef _quantize_affine(input: torch.Tensor, block_size: list[int], scale: torch.Tensor, zero_point: Optional[torch.Tensor], output_dtype: torch.dtype, quant_min: Optional[Union[int, float, bool]]=None, quant_max: Optional[Union[int, float, bool]]=None, zero_point_domain: Optional[str]=ZeroPointDomain.INT.name) -> torch.Tensor:\n    quant_min, quant_max = _get_and_check_qmin_qmax(output_dtype, quant_min, quant_max)\n    if output_dtype in _SUB_BYTE_UINT_BOUNDS:\n        output_dtype = torch.uint8\n    return _quantize_affine_no_dtype_cast(input, block_size, scale, zero_point, quant_min, quant_max, zero_point_domain).to(output_dtype)",
    "docstring": "op definition that has compatible signatures with custom op library Note: zero_point_domain is optional specifies how we quantize the floating point to quantized data: INT: quantized_val = (float_val / scale) (integer) + zero_point (integer) FLOAT: quantized_val = (float_val - (zero_point (float) - scale * mid_point)) / scale None: quantized_val = (float_val / scale) | this is primarily used for floatx quantization Where we do not want to round values to nearest integer and instead scale and cast.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\_affine_quantization.py",
    "ast_data": "FunctionDef name:_quantize_affine arg:input arg:block_size arg:scale arg:zero_point arg:output_dtype arg:quant_min arg:quant_max arg:zero_point_domain arguments arg arg arg arg arg arg arg arg Assign Call If Compare Assign Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_lower_to_native_backend",
    "source_code": "def _lower_to_native_backend(model: GraphModule, qconfig_map: dict[str, QConfigAny], node_name_to_scope: dict[str, tuple[str, type]], keep_original_weights: bool=False) -> GraphModule:\n    _lower_static_weighted_ref_module(model, qconfig_map)\n    _lower_static_weighted_ref_module_with_two_inputs(model, qconfig_map)\n    _lower_dynamic_weighted_ref_module(model)\n    _lower_weight_only_weighted_ref_module(model)\n    _lower_static_weighted_ref_functional(model, qconfig_map)\n    _lower_dynamic_weighted_ref_functional(model, qconfig_map)\n    _lower_quantized_binary_op(model, qconfig_map)\n    _lower_getattr_tensor_metadta_op(model)\n    _lower_get_tensor_info_op(model)\n    special_pattern_replacement(model)\n    model.graph.eliminate_dead_code()\n    model = fold_weight(model, node_name_to_scope, keep_original_weights)\n    model.graph.eliminate_dead_code()\n    model.recompile()\n    model.graph.lint()\n    return model",
    "docstring": "Lower a quantized reference model (with reference quantized operator patterns) to the native backend in PyTorch (fbgemm/qnnpack), both backends shares the same operator signature so they can be lowered with the same function",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_lower_to_native_backend.py",
    "ast_data": "FunctionDef name:_lower_to_native_backend arg:model arg:qconfig_map arg:node_name_to_scope arg:keep_original_weights arguments arg arg arg arg Call Call Call Call Call Call Call Call Call Call Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "_get_dated_items",
    "source_code": "def _get_dated_items(self, date):\n    lookup_kwargs = self._make_single_date_lookup(date)\n    qs = self.get_dated_queryset(**lookup_kwargs)\n    return (None, qs, {'day': date, 'previous_day': self.get_previous_day(date), 'next_day': self.get_next_day(date), 'previous_month': self.get_previous_month(date), 'next_month': self.get_next_month(date)})",
    "docstring": "Do the actual heavy lifting of getting the dated items; this accepts a date object so that TodayArchiveView can be trivial.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\dates.py",
    "ast_data": "FunctionDef name:_get_dated_items arg:self arg:date arguments arg arg Assign Call Assign Call Return return:yes Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_Real",
    "source_code": "class _Real(Constraint):\n\n    def check(self, value):\n        return value == value",
    "docstring": "Trivially constrain to the extended real line .",
    "type": "class",
    "file_path": "pytorch\\torch\\distributions\\constraints.py",
    "ast_data": "ClassDef name:_Real FunctionDef name:check arg:self arg:value arguments arg arg Return return:yes Compare"
  },
  {
    "library": "scipy",
    "name": "getH",
    "source_code": "def getH(self):\n    return self.conjugate().transpose()",
    "docstring": "Return the Hermitian transpose of this matrix. See Also -------- numpy.matrix.getH : NumPy's implementation of for matrices",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_matrix.py",
    "ast_data": "FunctionDef name:getH arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "eigenvectors",
    "source_code": "def eigenvectors(self, m=None):\n    _, ind = self._eigenvalue_ordering(m)\n    if m is None:\n        grid_shape_min = self.grid_shape\n    else:\n        grid_shape_min = min(self.grid_shape, tuple(np.ones_like(self.grid_shape) * m))\n    N_indices = np.unravel_index(ind, grid_shape_min)\n    N_indices = [tuple(x) for x in zip(*N_indices)]\n    eigenvectors_list = [self._one_eve(k) for k in N_indices]\n    return np.column_stack(eigenvectors_list)",
    "docstring": "Return the requested number of eigenvectors for ordered eigenvalues. Parameters ---------- m : int, optional The positive number of eigenvectors to return. If not provided, then all eigenvectors will be returned. Returns ------- eigenvectors : float array An array with columns made of the requested or all eigenvectors. The columns are ordered according to the ordered eigenvalues.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_special_sparse_arrays.py",
    "ast_data": "FunctionDef name:eigenvectors arg:self arg:m arguments arg arg Assign Call If Compare Assign Assign Call Call Call Assign Call Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_draw_list_compositing_images",
    "source_code": "def _draw_list_compositing_images(renderer, parent, artists, suppress_composite=None):\n    has_images = any((isinstance(x, _ImageBase) for x in artists))\n    not_composite = suppress_composite if suppress_composite is not None else renderer.option_image_nocomposite()\n    if not_composite or not has_images:\n        for a in artists:\n            a.draw(renderer)\n    else:\n        image_group = []\n        mag = renderer.get_image_magnification()\n\n        def flush_images():\n            if len(image_group) == 1:\n                image_group[0].draw(renderer)\n            elif len(image_group) > 1:\n                data, l, b = composite_images(image_group, renderer, mag)\n                if data.size != 0:\n                    gc = renderer.new_gc()\n                    gc.set_clip_rectangle(parent.bbox)\n                    gc.set_clip_path(parent.get_clip_path())\n                    renderer.draw_image(gc, round(l), round(b), data)\n                    gc.restore()\n            del image_group[:]\n        for a in artists:\n            if isinstance(a, _ImageBase) and a.can_composite() and a.get_clip_on() and (not a.get_clip_path()):\n                image_group.append(a)\n            else:\n                flush_images()\n                a.draw(renderer)\n        flush_images()",
    "docstring": "Draw a sorted list of artists, compositing images into a single image where possible. For internal Matplotlib use only: It is here to reduce duplication between and , but otherwise should not be generally useful.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\image.py",
    "ast_data": "FunctionDef name:_draw_list_compositing_images arg:renderer arg:parent arg:artists arg:suppress_composite arguments arg arg arg arg Assign Call Call Assign Compare Call If BoolOp For Call Assign Assign Call FunctionDef name:flush_images arguments If Compare Call Call If Compare Call Assign Call If Compare Assign Call Call Call Call Call Call Call Call For If BoolOp Call Call Call Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "record_status",
    "source_code": "def record_status(accuracy_status, dynamo_start_stats):\n    if current_name in self.non_deterministic_models:\n        if accuracy_status in ('pass', 'eager_two_runs_differ', 'fail_accuracy'):\n            accuracy_status = 'pass'\n    headers = ['dev', 'name', 'batch_size', 'accuracy']\n    fields = [current_device, current_name, current_batch_size, accuracy_status]\n    if tag is not None:\n        headers.insert(3, 'tag')\n        fields.insert(3, tag)\n    o_headers = list(headers)\n    o_fields = list(fields)\n    dynamo_stats = get_dynamo_stats()\n    dynamo_stats.subtract(dynamo_start_stats)\n    for k, v in dynamo_stats.items():\n        headers.append(k)\n        fields.append(v)\n    total_wall_time = output_signpost(dict(zip(o_headers, o_fields)), self.args, self.suite_name)\n    headers.append('compilation_latency')\n    fields.append(total_wall_time)\n    write_outputs(output_filename, headers, fields)\n    if self.args.print_compilation_time:\n        print(f'Compilation time (from dynamo_timed): {total_wall_time}')\n    return accuracy_status",
    "docstring": "Records the status in the csv file",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\dynamo\\common.py",
    "ast_data": "FunctionDef name:record_status arg:accuracy_status arg:dynamo_start_stats arguments arg arg If Compare If Compare Assign Assign Assign If Compare Call Call Assign Call Assign Call Assign Call Call For Call Call Call Assign Call Call Call Call Call Call If Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X):\n    output_config_dense = _get_output_config('transform', estimator=self)['dense']\n    preserve_X = output_config_dense != 'default' and _is_pandas_df(X)\n    X = validate_data(self, X, dtype=None, accept_sparse='csr', ensure_all_finite=not get_tags(self).input_tags.allow_nan, skip_check_array=preserve_X, reset=False)\n    return self._transform(X)",
    "docstring": "Reduce X to the selected features. Parameters ---------- X : array of shape [n_samples, n_features] The input samples. Returns ------- X_r : array of shape [n_samples, n_selected_features] The input samples with only the selected features.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_selection\\_base.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Assign Call Assign BoolOp Compare Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "argmax",
    "source_code": "@_apply_docstring_templates\ndef argmax(input: Union[Tensor, MaskedTensor], dim: Optional[int]=None, *, keepdim: Optional[bool]=False, dtype: Optional[DType]=None, mask: Optional[Tensor]=None) -> Tensor:\n    if dtype is None:\n        dtype = input.dtype\n    mask_input = _combine_input_and_mask(argmax, input, mask)\n    if mask_input.layout == torch.strided:\n        return torch.argmax(mask_input, dim, bool(keepdim)).to(dtype=dtype)\n    else:\n        raise ValueError(f'masked argmax expects strided tensor (got {mask_input.layout} tensor)')",
    "docstring": "{reduction_signature} {reduction_descr} {reduction_identity_dtype} {reduction_args} {reduction_example}",
    "type": "function",
    "file_path": "pytorch\\torch\\masked\\_ops.py",
    "ast_data": "FunctionDef name:argmax arg:input arg:dim arguments arg arg arg arg arg If Compare Assign Assign Call If Compare Return return:yes Call Call Call Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "set_mask",
    "source_code": "def set_mask(self, mask):\n    if mask is None:\n        self.mask = None\n    else:\n        self.mask = np.asarray(mask, dtype=bool)\n        if self.mask.shape != (self.triangles.shape[0],):\n            raise ValueError('mask array must have same length as triangles array')\n    if self._cpp_triangulation is not None:\n        self._cpp_triangulation.set_mask(self.mask if self.mask is not None else ())\n    self._edges = None\n    self._neighbors = None\n    if self._trifinder is not None:\n        self._trifinder._initialize()",
    "docstring": "Set or clear the mask array. Parameters ---------- mask : None or bool array of length ntri",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triangulation.py",
    "ast_data": "FunctionDef name:set_mask arg:self arg:mask arguments arg arg If Compare Assign Assign Call If Compare Raise Call If Compare Call Compare Assign Assign If Compare Call"
  },
  {
    "library": "matplotlib",
    "name": "get_bbox_for_cb",
    "source_code": "def get_bbox_for_cb(self, rows=0, cols=0):\n    rows = np.atleast_1d(rows)\n    cols = np.atleast_1d(cols)\n    bbox = Bbox.from_extents(self.lefts[cols[0]].value() + self.margins['leftcb'][cols[0]].value(), self.bottoms[rows[-1]].value() + self.margins['bottomcb'][rows[-1]].value(), self.rights[cols[-1]].value() - self.margins['rightcb'][cols[-1]].value(), self.tops[rows[0]].value() - self.margins['topcb'][rows[0]].value())\n    return bbox",
    "docstring": "Return the bounding box that includes the decorations but, *not* the colorbar...",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_layoutgrid.py",
    "ast_data": "FunctionDef name:get_bbox_for_cb arg:self arg:rows arg:cols arguments arg arg arg Assign Call Assign Call Assign Call Call Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "api_available",
    "source_code": "def api_available(self):\n    return self._use_api",
    "docstring": "Return if the Cloud TPU API is available, if not certain features will not work.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\client\\client.py",
    "ast_data": "FunctionDef name:api_available arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "constant_to_device",
    "source_code": "def constant_to_device(self, device: torch.device) -> IRNode:\n    loader = self.make_loader()\n    loader = patch.object(ConstantBuffer, 'override_device', device)(loader)\n    return Scatter(device=device, dtype=self.dtype, inner_fn=loader, ranges=self.ranges, output_indexer=self.output_indexer, scatter_mode=self.scatter_mode)",
    "docstring": "Move this to a given device. Requires that all reads are to constants.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ir.py",
    "ast_data": "FunctionDef name:constant_to_device arg:self arg:device arguments arg arg Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "parse_example_spec",
    "source_code": "@property\ndef parse_example_spec(self):\n    config = {}\n    for key in self.keys:\n        if isinstance(key, fc_types.FeatureColumn):\n            config.update(key.parse_example_spec)\n        elif isinstance(key, fc_old._FeatureColumn):\n            config.update(key._parse_example_spec)\n        else:\n            config.update({key: parsing_ops.VarLenFeature(dtypes.string)})\n    return config",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:parse_example_spec arg:self arguments arg Assign For If Call Call If Call Call Call Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "current_ids",
    "source_code": "def current_ids():\n    name, group = (None, None)\n    if pwd:\n        name = pwd.getpwuid(os.getuid())[0]\n    if grp:\n        group = grp.getgrgid(os.getgid())[0]\n    return (name, group)",
    "docstring": "Return the current (uid, gid) if available.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\plugins.py",
    "ast_data": "FunctionDef name:current_ids arguments Assign If Assign Call Call If Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_as_shape_list",
    "source_code": "def _as_shape_list(shapes, dtypes, unknown_dim_allowed=False, unknown_rank_allowed=False):\n    del dtypes\n    if unknown_dim_allowed:\n        if not isinstance(shapes, collections_abc.Sequence) or not shapes or any((shape is None or isinstance(shape, int) for shape in shapes)):\n            raise ValueError('When providing partial shapes, a list of shapes must be provided.')\n    if shapes is None:\n        return None\n    if isinstance(shapes, tensor_shape.TensorShape):\n        shapes = [shapes]\n    if not isinstance(shapes, (tuple, list)):\n        raise TypeError(f'Shapes must be a TensorShape or a list or tuple of TensorShapes, got {type(shapes)} instead.')\n    if all((shape is None or isinstance(shape, int) for shape in shapes)):\n        shapes = [shapes]\n    shapes = [tensor_shape.as_shape(shape) for shape in shapes]\n    if not unknown_dim_allowed:\n        if any((not shape.is_fully_defined() for shape in shapes)):\n            raise ValueError(f'All shapes must be fully defined: {shapes}')\n    if not unknown_rank_allowed:\n        if any((shape.dims is None for shape in shapes)):\n            raise ValueError(f'All shapes must have a defined rank: {shapes}')\n    return shapes",
    "docstring": "Convert shapes to a list of tuples of int (or None).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:_as_shape_list arg:shapes arg:dtypes arg:unknown_dim_allowed arg:unknown_rank_allowed arguments arg arg arg arg If If BoolOp Call Call BoolOp Compare Call Raise Call If Compare Return return:no If Call Assign If Call Raise Call Call If Call BoolOp Compare Call Assign Assign Call If If Call Call Raise Call If If Call Compare Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "maybe_get_remote_value",
    "source_code": "def maybe_get_remote_value(val):\n    if isinstance(val, remote_value.RemoteValue):\n        error = val._get_error()\n        if error:\n            raise AssertionError(\"RemoteValue doesn't have a value because it has error %r:%s\" % (error, error))\n        elif val._status is not remote_value.RemoteValueStatus.READY:\n            raise AssertionError('The input RemoteValue has not been executed.')\n        else:\n            return val._get_values()\n    else:\n        return val",
    "docstring": "Gets the value of if it is a .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\coordinator_context.py",
    "ast_data": "FunctionDef name:maybe_get_remote_value arg:val arguments arg If Call Assign Call If Raise Call If Compare Raise Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_should_record_summaries_internal",
    "source_code": "def _should_record_summaries_internal(default_state):\n    if _summary_state.writer is None:\n        return constant_op.constant(False)\n    if not callable(_summary_state.is_recording):\n        static_cond = tensor_util.constant_value(_summary_state.is_recording)\n        if static_cond is not None and (not static_cond):\n            return constant_op.constant(False)\n    resolve = lambda x: x() if callable(x) else x\n    cond_distributed = resolve(_summary_state.is_recording_distribution_strategy)\n    cond = resolve(_summary_state.is_recording)\n    if cond is None:\n        cond = default_state\n    return math_ops.logical_and(cond_distributed, cond)",
    "docstring": "Returns boolean Tensor if summaries should/shouldn't be recorded. Now the summary condition is decided by logical \"and\" of below conditions: First, summary writer must be set. Given this constraint is met, ctx.summary_recording and ctx.summary_recording_distribution_strategy. The former one is usually set by user, and the latter one is controlled by DistributionStrategy (tf.distribute.ReplicaContext). Args: default_state: can be True or False. The default summary behavior when summary writer is set and the user does not specify ctx.summary_recording and ctx.summary_recording_distribution_strategy is True.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "FunctionDef name:_should_record_summaries_internal arg:default_state arguments arg If Compare Return return:yes Call If Call Assign Call If BoolOp Compare Return return:yes Call Assign arguments arg Call Call Assign Call Assign Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "Problem15",
    "source_code": "class Problem15(Benchmark):\n\n    def __init__(self, dimensions=1):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = [(-5.0, 5.0)]\n        self.global_optimum = 2.41422\n        self.fglob = -0.03553\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        x = x[0]\n        return -(-x ** 2.0 + 5 * x - 6) / (x ** 2 + 1)",
    "docstring": "Univariate Problem15 objective function. This class defines the Univariate Problem15 global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Problem15}}(x) = \\frac{x^{2} - 5 x + 6}{x^{2} + 1} Bound constraints: :math: .. figure:: figures/Problem15.png :alt: Univariate Problem15 function :align: center **Univariate Problem15 function** *Global optimum*: :math: for :math:",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_univariate.py",
    "ast_data": "ClassDef name:Problem15 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "Bohachevsky2",
    "source_code": "class Bohachevsky2(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-100.0] * self.N, [100.0] * self.N))\n        self.global_optimum = [[0 for _ in range(self.N)]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return x[0] ** 2 + 2 * x[1] ** 2 - 0.3 * cos(3 * pi * x[0]) * cos(4 * pi * x[1]) + 0.3",
    "docstring": "Bohachevsky 2 objective function. The Bohachevsky 2 [1]_ global optimization problem is a multimodal minimization problem defined as follows .. math:: f_{\\text{Bohachevsky}}(x) = \\sum_{i=1}^{n-1}\\left[x_i^2 + 2 x_{i+1}^2 - 0.3 \\cos(3 \\pi x_i) - 0.4 \\cos(4 \\pi x_{i + 1}) + 0.7 \\right] Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO: equation needs to be fixed up in the docstring. Jamil is also wrong. There should be no 0.4 factor in front of the cos term",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_B.py",
    "ast_data": "ClassDef name:Bohachevsky2 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_unqualified_variable_name",
    "source_code": "def _unqualified_variable_name(qualified_name: str) -> str:\n    name_atoms = qualified_name.split('.')\n    for i, atom in reversed(list(enumerate(name_atoms))):\n        if not atom.isnumeric():\n            return '.'.join(name_atoms[i:])\n    return qualified_name",
    "docstring": "Parse qualified variable name and return the unqualified version. Pure numeric atoms are considered inadequate, so this function will look past them, and start from the first non-numeric atom. Example: >>> _unqualified_variable_name(\"__main__.Foo.bar\") 'bar' >>> _unqualified_variable_name(\"__main__.Foo.bar.0\") 'bar.0'",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\utils.py",
    "ast_data": "FunctionDef name:_unqualified_variable_name arg:qualified_name arguments arg Assign Call For Call Call Call If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "is_available",
    "source_code": "def is_available() -> bool:\n    return True",
    "docstring": "Returns a bool indicating if CPU is currently available. N.B. This function only exists to facilitate device-agnostic code",
    "type": "function",
    "file_path": "pytorch\\torch\\cpu\\__init__.py",
    "ast_data": "FunctionDef name:is_available arguments Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_next_as_optional",
    "source_code": "@abc.abstractmethod\ndef get_next_as_optional(self):\n    raise NotImplementedError('Iterator.get_next_as_optional()')",
    "docstring": "Returns the next element wrapped in . If the iterator has reached the end of the sequence, the returned will have no value. >>> dataset = tf.data.Dataset.from_tensors(42) >>> iterator = iter(dataset) >>> optional = iterator.get_next_as_optional() >>> print(optional.has_value()) tf.Tensor(True, shape=(), dtype=bool) >>> print(optional.get_value()) tf.Tensor(42, shape=(), dtype=int32) >>> optional = iterator.get_next_as_optional() >>> print(optional.has_value()) tf.Tensor(False, shape=(), dtype=bool) Returns: A object representing the next element.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\iterator_ops.py",
    "ast_data": "FunctionDef name:get_next_as_optional arg:self arguments arg Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "release",
    "source_code": "def release(self, event):\n    if not self.ignore(event) and self._eventpress:\n        event = self._clean_event(event)\n        self._eventrelease = event\n        self._release(event)\n        self._eventpress = None\n        self._eventrelease = None\n        self._state.discard('move')\n        return True\n    return False",
    "docstring": "Button release event handler and validator.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:release arg:self arg:event arguments arg arg If BoolOp Call Assign Call Assign Call Assign Assign Call Return return:yes Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self):\n    self.status = None\n    self.header_list = None\n    self._body = []\n    self.time = time.time()\n    self.headers = httputil.HeaderMap()\n    dict.update(self.headers, {'Content-Type': 'text/html', 'Server': 'CherryPy/' + cherrypy.__version__, 'Date': httputil.HTTPDate(self.time)})\n    self.cookie = SimpleCookie()",
    "docstring": "Intialize the HTTP response instance.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cprequest.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg Assign Assign Assign Assign Call Assign Call Call Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "ConvBn3d",
    "source_code": "class ConvBn3d(_ConvBnNd, nn.Conv3d):\n    _FLOAT_MODULE: ClassVar[type[nni.ConvBn3d]] = nni.ConvBn3d\n    _FLOAT_CONV_MODULE: ClassVar[type[nn.Conv3d]] = nn.Conv3d\n    _FLOAT_BN_MODULE: ClassVar[Optional[type[nn.Module]]] = nn.BatchNorm3d\n    _FLOAT_RELU_MODULE: ClassVar[Optional[type[nn.Module]]] = None\n\n    def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=None, padding_mode='zeros', eps=1e-05, momentum=0.1, freeze_bn=False, qconfig=None):\n        kernel_size = _triple(kernel_size)\n        stride = _triple(stride)\n        padding = _triple(padding)\n        dilation = _triple(dilation)\n        _ConvBnNd.__init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation, False, _triple(0), groups, bias, padding_mode, eps, momentum, freeze_bn, qconfig, dim=3)",
    "docstring": "A ConvBn3d module is a module fused from Conv3d and BatchNorm3d, attached with FakeQuantize modules for weight, used in quantization aware training. We combined the interface of :class: and :class:. Similar to :class:, with FakeQuantize modules initialized to default. Attributes: freeze_bn: weight_fake_quant: fake quant module for weight",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\nn\\intrinsic\\qat\\modules\\conv_fused.py",
    "ast_data": "ClassDef name:ConvBn3d FunctionDef name:__init__ arg:self arg:in_channels arg:out_channels arg:kernel_size arg:stride arg:padding arg:dilation arg:groups arg:bias arg:padding_mode arg:eps arg:momentum arg:freeze_bn arg:qconfig arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "StrobelightCLIProfilerError",
    "source_code": "class StrobelightCLIProfilerError(Exception):\n    pass",
    "docstring": "Raised when an error happens during strobelight profiling",
    "type": "class",
    "file_path": "pytorch\\torch\\_strobelight\\cli_function_profiler.py",
    "ast_data": "ClassDef name:StrobelightCLIProfilerError"
  },
  {
    "library": "seaborn",
    "name": "evaluate",
    "source_code": "def evaluate(self, points):\n    points = atleast_2d(asarray(points))\n    d, m = points.shape\n    if d != self.d:\n        if d == 1 and m == self.d:\n            points = reshape(points, (self.d, 1))\n            m = 1\n        else:\n            msg = f'points have dimension {d}, dataset has dimension {self.d}'\n            raise ValueError(msg)\n    output_dtype = np.common_type(self.covariance, points)\n    result = zeros((m,), dtype=output_dtype)\n    whitening = linalg.cholesky(self.inv_cov)\n    scaled_dataset = dot(whitening, self.dataset)\n    scaled_points = dot(whitening, points)\n    if m >= self.n:\n        for i in range(self.n):\n            diff = scaled_dataset[:, i, newaxis] - scaled_points\n            energy = sum(diff * diff, axis=0) / 2.0\n            result += self.weights[i] * exp(-energy)\n    else:\n        for i in range(m):\n            diff = scaled_dataset - scaled_points[:, i, newaxis]\n            energy = sum(diff * diff, axis=0) / 2.0\n            result[i] = sum(exp(-energy) * self.weights, axis=0)\n    result = result / self._norm_factor\n    return result",
    "docstring": "Evaluate the estimated pdf on a set of points. Parameters ---------- points : (# of dimensions, # of points)-array Alternatively, a (# of dimensions,) vector can be passed in and treated as a single point. Returns ------- values : (# of points,)-array The values at each point. Raises ------ ValueError : if the dimensionality of the input points is different than the dimensionality of the KDE.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\external\\kde.py",
    "ast_data": "FunctionDef name:evaluate arg:self arg:points arguments arg arg Assign Call Call Assign If Compare If BoolOp Compare Compare Assign Call Assign Assign Raise Call Assign Call Assign Call Assign Call Assign Call Assign Call If Compare For Call Assign Assign Call Call For Call Assign Assign Call Assign Call Call Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_SupportsArrayFunc",
    "source_code": "@runtime_checkable\nclass _SupportsArrayFunc(Protocol):\n\n    def __array_function__(self, func: Callable[..., Any], types: Collection[type[Any]], args: tuple[Any, ...], kwargs: dict[str, Any]) -> object:\n        ...",
    "docstring": "A protocol class representing .",
    "type": "class",
    "file_path": "numpy\\numpy\\_typing\\_array_like.py",
    "ast_data": "ClassDef name:_SupportsArrayFunc FunctionDef name:__array_function__ arg:self arg:func arg:types arg:args arg:kwargs arguments arg arg arg arg arg"
  },
  {
    "library": "pytorch",
    "name": "is_postrelease",
    "source_code": "@property\ndef is_postrelease(self) -> bool:\n    return self.post is not None",
    "docstring": "Whether this version is a post-release. >>> Version(\"1.2.3\").is_postrelease False >>> Version(\"1.2.3.post1\").is_postrelease True",
    "type": "method",
    "file_path": "pytorch\\torch\\_vendor\\packaging\\version.py",
    "ast_data": "FunctionDef name:is_postrelease arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "sphinx",
    "name": "stem",
    "source_code": "def stem(self, word: str) -> str:\n    return word",
    "docstring": "This method implements stemming algorithm of the Python version. Default implementation does nothing. You should implement this if the language has any stemming rules. This class is used to preprocess search words before registering them in the search index. The stemming of the Python version and the JS version (given in the js_stemmer_code attribute) must be compatible.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\search\\__init__.py",
    "ast_data": "FunctionDef name:stem arg:self arg:word arguments arg arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "all",
    "source_code": "def all(self, axis=None, *args, **kwargs):\n    nv.validate_all(args, kwargs)\n    values = self.sp_values\n    if len(values) != len(self) and (not np.all(self.fill_value)):\n        return False\n    return values.all()",
    "docstring": "Tests whether all elements evaluate True Returns ------- all : bool See Also -------- numpy.all",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\sparse\\array.py",
    "ast_data": "FunctionDef name:all arg:self arg:axis arguments arg arg arg arg Call Assign If BoolOp Compare Call Call Call Return return:yes Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_context_object_name",
    "source_code": "def get_context_object_name(self, object_list):\n    if self.context_object_name:\n        return self.context_object_name\n    elif hasattr(object_list, 'model'):\n        return '%s_list' % object_list.model._meta.model_name\n    else:\n        return None",
    "docstring": "Get the name of the item to be used in the context.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\list.py",
    "ast_data": "FunctionDef name:get_context_object_name arg:self arg:object_list arguments arg arg If Return return:yes If Call Return return:yes Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "activity_regularizer",
    "source_code": "@activity_regularizer.setter\ndef activity_regularizer(self, regularizer):\n    self._activity_regularizer = regularizer",
    "docstring": "Optional regularizer function for the output of this layer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py",
    "ast_data": "FunctionDef name:activity_regularizer arg:self arg:regularizer arguments arg arg Assign"
  },
  {
    "library": "pytorch",
    "name": "set_filename",
    "source_code": "def set_filename(filename: str, insert_device_ordinal: bool=False) -> None:\n    torch._C._cuda_tunableop_set_filename(filename, insert_device_ordinal)",
    "docstring": "Set the filename to use for input/output of tuning results. If :attr: is `` then the current device ordinal will be added to the given filename automatically. This can be used in a 1-process-per-gpu cenario to ensure all processes write to a separate file.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\tunable.py",
    "ast_data": "FunctionDef name:set_filename arg:filename arg:insert_device_ordinal arguments arg arg Call"
  },
  {
    "library": "scikit-learn",
    "name": "_concatenate_indicator",
    "source_code": "def _concatenate_indicator(self, X_imputed, X_indicator):\n    if not self.add_indicator:\n        return X_imputed\n    if sp.issparse(X_imputed):\n        hstack = partial(sp.hstack, format=X_imputed.format)\n    else:\n        hstack = np.hstack\n    if X_indicator is None:\n        raise ValueError('Data from the missing indicator are not provided. Call _fit_indicator and _transform_indicator in the imputer implementation.')\n    return hstack((X_imputed, X_indicator))",
    "docstring": "Concatenate indicator mask with the imputed data.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\impute\\_base.py",
    "ast_data": "FunctionDef name:_concatenate_indicator arg:self arg:X_imputed arg:X_indicator arguments arg arg arg If Return return:yes If Call Assign Call Assign If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "unpack",
    "source_code": "def unpack(structure, data):\n    return struct.unpack('<' + structure, bytes.fromhex(data))",
    "docstring": "Unpack little endian hexlified binary string into a list.",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\postgis\\pgraster.py",
    "ast_data": "FunctionDef name:unpack arg:structure arg:data arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "set_rng_state",
    "source_code": "def set_rng_state(new_state: Tensor, device: Union[int, str, torch.device]='mtia') -> None:\n    warnings.warn('set_rng_state is not implemented in torch.mtia', UserWarning, stacklevel=2)",
    "docstring": "Sets the random number generator state. Args: new_state (torch.ByteTensor): The desired state device (torch.device or int, optional): The device to set the RNG state. Default: ``, the current mtia device).",
    "type": "function",
    "file_path": "pytorch\\torch\\mtia\\__init__.py",
    "ast_data": "FunctionDef name:set_rng_state arg:new_state arg:device arguments arg arg Call"
  },
  {
    "library": "seaborn",
    "name": "_AxesStyle",
    "source_code": "class _AxesStyle(_RCAesthetics):\n    _keys = _style_keys\n    _set = staticmethod(set_style)",
    "docstring": "Light wrapper on a dict to set style temporarily.",
    "type": "class",
    "file_path": "seaborn\\seaborn\\rcmod.py",
    "ast_data": "ClassDef name:_AxesStyle Assign Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "check_graphs",
    "source_code": "def check_graphs(*args):\n    graph = None\n    for i, sgv in enumerate(args):\n        if graph is None and sgv.graph is not None:\n            graph = sgv.graph\n        elif sgv.graph is not None and sgv.graph is not graph:\n            raise ValueError(f'args[{i}] does not belong to the same graph as other arguments.')",
    "docstring": "Check that all the element in args belong to the same graph. Args: *args: a list of object with a obj.graph property. Raises: ValueError: if all the elements do not belong to the same graph.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\op_selector.py",
    "ast_data": "FunctionDef name:check_graphs arguments arg Assign For Call If BoolOp Compare Compare Assign If BoolOp Compare Compare Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_prune_invalid_weights",
    "source_code": "def _prune_invalid_weights(sparse_ids, sparse_weights):\n    if sparse_weights is not None:\n        is_weights_valid = math_ops.greater(sparse_weights.values, 0)\n        sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_weights_valid)\n        sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_weights_valid)\n    return (sparse_ids, sparse_weights)",
    "docstring": "Prune invalid weights (< 0) from the input ids and weights.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\embedding_ops.py",
    "ast_data": "FunctionDef name:_prune_invalid_weights arg:sparse_ids arg:sparse_weights arguments arg arg If Compare Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "get_objs_combined_axis",
    "source_code": "def get_objs_combined_axis(objs, intersect: bool=False, axis: Axis=0, sort: bool=True) -> Index:\n    obs_idxes = [obj._get_axis(axis) for obj in objs]\n    return _get_combined_index(obs_idxes, intersect=intersect, sort=sort)",
    "docstring": "Extract combined index: return intersection or union (depending on the value of \"intersect\") of indexes on given axis, or None if all objects lack indexes (e.g. they are numpy arrays). Parameters ---------- objs : list Series or DataFrame objects, may be mix of the two. intersect : bool, default False If True, calculate the intersection between indexes. Otherwise, calculate the union. axis : {0 or 'index', 1 or 'outer'}, default 0 The axis to extract indexes from. sort : bool, default True Whether the result index should come out sorted or not. Returns ------- Index",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\indexes\\api.py",
    "ast_data": "FunctionDef name:get_objs_combined_axis arg:objs arg:intersect arg:axis arg:sort arguments arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_load",
    "source_code": "def _load(self):\n    module = _importlib.import_module(self.__name__)\n    self._parent_module_globals[self._local_name] = module\n    self.__dict__.update(module.__dict__)\n    return module",
    "docstring": "Import the target module and insert it into the parent's namespace.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\virtual_root_template_v2.__init__.py",
    "ast_data": "FunctionDef name:_load arg:self arguments arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "reset",
    "source_code": "def reset(self):\n    pass",
    "docstring": "Resets the timer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\basic_session_run_hooks.py",
    "ast_data": "FunctionDef name:reset arg:self arguments arg"
  },
  {
    "library": "django",
    "name": "is_naive",
    "source_code": "def is_naive(value):\n    return value.utcoffset() is None",
    "docstring": "Determine if a given datetime.datetime is naive. The concept is defined in Python's docs: Assuming value.tzinfo is either None or a proper datetime.tzinfo, value.utcoffset() implements the appropriate logic.",
    "type": "function",
    "file_path": "django\\django\\utils\\timezone.py",
    "ast_data": "FunctionDef name:is_naive arg:value arguments arg Return return:yes Compare Call"
  },
  {
    "library": "pytorch",
    "name": "_scalar",
    "source_code": "def _scalar(x: torch.Tensor):\n    assert x.numel() == 1\n    return x[0]",
    "docstring": "Convert a scalar tensor into a Python value.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\jit_utils.py",
    "ast_data": "FunctionDef name:_scalar arg:x arguments arg Compare Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_runtime_arg_values",
    "source_code": "@override\ndef get_runtime_arg_values(self, **kwargs: Any) -> list[Any]:\n    return [kwargs[arg.name] for arg in self.get_runtime_arg_info()]",
    "docstring": "Helper method to retrieve runtime args from generate kwargs",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\rocm\\ck_template.py",
    "ast_data": "FunctionDef name:get_runtime_arg_values arg:self arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_select_which_to_enqueue",
    "source_code": "def _select_which_to_enqueue(tensor_list, keep_input):\n    select_i = math_ops.cast(keep_input, dtypes.int32)\n    tensor_list = [data_flow_ops.dynamic_partition(x, select_i, num_partitions=2)[1] for x in tensor_list]\n    return tensor_list",
    "docstring": "Select which examples to enqueue based on vector .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\input.py",
    "ast_data": "FunctionDef name:_select_which_to_enqueue arg:tensor_list arg:keep_input arguments arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "clear_cache",
    "source_code": "def clear_cache(self):\n    self._cache.clear()",
    "docstring": "Clear out the content-type cache.",
    "type": "method",
    "file_path": "django\\django\\contrib\\contenttypes\\models.py",
    "ast_data": "FunctionDef name:clear_cache arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_pyval_field_major_to_node_major",
    "source_code": "def _pyval_field_major_to_node_major(keys, values, depth):\n    assert keys\n    if depth == 0:\n        return dict(zip(keys, values))\n    nvals = len(values[0])\n    assert all((nvals == len(values[i]) for i in range(1, len(values))))\n    return [_pyval_field_major_to_node_major(keys, value_slice, depth - 1) for value_slice in zip(*values)]",
    "docstring": "Regroup each field (k, v) from dict-of-list to list-of-dict. Given a \"field-major\" encoding of the StructuredTensor (which maps each key to a single nested list containing the values for all structs), return a corresponding \"node-major\" encoding, consisting of a nested list of dicts. Args: keys: The field names (list of string). Must not be empty. values: The field values (list of python values). Must have the same length as . depth: The list depth at which dictionaries should be created. Returns: A nested list of dict, with depth .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor.py",
    "ast_data": "FunctionDef name:_pyval_field_major_to_node_major arg:keys arg:values arg:depth arguments arg arg arg If Compare Return return:yes Call Call Assign Call Call Compare Call Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "set_checkpoint_debug_enabled",
    "source_code": "@contextlib.contextmanager\ndef set_checkpoint_debug_enabled(enabled: Optional[bool]):\n    global _checkpoint_debug_enabled\n    try:\n        prev = _checkpoint_debug_enabled\n        _checkpoint_debug_enabled = enabled\n        yield\n    finally:\n        _checkpoint_debug_enabled = prev",
    "docstring": "Context manager that sets whether checkpoint should print additional debug information when running. See the `~torch.utils.checkpoint.checkpoint` to this context. Args: enabled (bool): Whether checkpoint should print debug information. Default is 'None'.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\checkpoint.py",
    "ast_data": "FunctionDef name:set_checkpoint_debug_enabled arg:enabled arguments arg Try Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "less_equal",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef less_equal(x, y):\n    return math_ops.less_equal(x, y)",
    "docstring": "Element-wise truth value of (x <= y). Args: x: Tensor or variable. y: Tensor or variable. Returns: A bool tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:less_equal arg:x arg:y arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_transform_feature",
    "source_code": "@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION)\ndef _transform_feature(self, inputs):\n    source_tensor = inputs.get(self.source_column)\n    return math_ops._bucketize(source_tensor, boundaries=self.boundaries)",
    "docstring": "Returns bucketized categorical tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:_transform_feature arg:self arg:inputs arguments arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "call",
    "source_code": "@doc_controls.doc_in_current_and_subclasses\ndef call(self, inputs, training=None, mask=None):\n    raise NotImplementedError('When subclassing the `Model` class, you should implement a `call` method.')",
    "docstring": "Calls the model on new inputs. In this case just reapplies all ops in the graph to the new inputs (e.g. build a new computational graph from the provided inputs). Note: This method should not be called directly. It is only meant to be overridden when subclassing . To call a model on an input, always use the method, i.e. , which relies on the underlying method. Args: inputs: Input tensor, or dict/list/tuple of input tensors. training: Boolean or boolean scalar tensor, indicating whether to run the in training mode or inference mode. mask: A mask or list of masks. A mask can be either a tensor or None (no mask). Returns: A tensor if there is a single output, or a list of tensors if there are more than one outputs.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py",
    "ast_data": "FunctionDef name:call arg:self arg:inputs arg:training arg:mask arguments arg arg arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "losses",
    "source_code": "@property\ndef losses(self):\n    collected_losses = []\n    all_layers = self._flatten_layers()\n    for layer in all_layers:\n        collected_losses.extend(layer._losses)\n        for regularizer in layer._callable_losses:\n            loss_tensor = regularizer()\n            if loss_tensor is not None:\n                collected_losses.append(loss_tensor)\n    return collected_losses",
    "docstring": "Losses which are associated with this . Variable regularization tensors are created when this property is accessed, so it is eager safe: accessing under a will propagate gradients back to the corresponding variables. Returns: A list of tensors.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py",
    "ast_data": "FunctionDef name:losses arg:self arguments arg Assign Assign Call For Call For Assign Call If Compare Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "__ipow__",
    "source_code": "def __ipow__(self, other):\n    other_data = getdata(other)\n    other_data = np.where(self._mask, other_data.dtype.type(1), other_data)\n    other_mask = getmask(other)\n    with np.errstate(divide='ignore', invalid='ignore'):\n        self._data.__ipow__(other_data)\n    invalid = np.logical_not(np.isfinite(self._data))\n    if invalid.any():\n        if self._mask is not nomask:\n            self._mask |= invalid\n        else:\n            self._mask = invalid\n        np.copyto(self._data, self.fill_value, where=invalid)\n    new_mask = mask_or(other_mask, invalid)\n    self._mask = mask_or(self._mask, new_mask)\n    return self",
    "docstring": "Raise self to the power other, in place.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:__ipow__ arg:self arg:other arguments arg arg Assign Call Assign Call Call Assign Call With Call Call Assign Call Call If Call If Compare Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_BesselI0Grad",
    "source_code": "@ops.RegisterGradient('BesselI0')\ndef _BesselI0Grad(op: ops.Operation, grad):\n    x = op.inputs[0]\n    with ops.control_dependencies([grad]):\n        partial_x = special_math_ops.bessel_i1(x)\n        return grad * partial_x",
    "docstring": "Compute gradient of bessel_i0(x) with respect to its argument.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_BesselI0Grad arg:op arg:grad arguments arg arg Assign With Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_args_to_tuple",
    "source_code": "def _args_to_tuple(self, node):\n    builder = _ArgTemplateBuilder()\n    for a in node.args:\n        if isinstance(a, gast.Starred):\n            builder.add_stararg(a.value)\n        else:\n            builder.add_arg(a)\n    builder.finalize()\n    return builder.to_ast()",
    "docstring": "Ties together all positional and *arg arguments in a single tuple.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\converters\\call_trees.py",
    "ast_data": "FunctionDef name:_args_to_tuple arg:self arg:node arguments arg arg Assign Call For If Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "is_type",
    "source_code": "def is_type(type_hint, comp_type) -> bool:\n    return type_hint is comp_type or get_origin(type_hint) is comp_type",
    "docstring": "Determines if type_hint is comp_type. There are some type annotations that this doesn't work for. I think it's because some Type annotations are Type Objects and some are Special Forms, but not sure. There's definite room for improvement to make this more general for someone who deeply understands Python types.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fuzzer.py",
    "ast_data": "FunctionDef name:is_type arg:type_hint arg:comp_type arguments arg arg Return return:yes BoolOp Compare Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "make_canonicalized_monomorphic_type",
    "source_code": "def make_canonicalized_monomorphic_type(args: Any, kwargs: Any, capture_types: Any, polymorphic_type) -> Tuple[function_type_lib.FunctionType, trace_type.InternalTracingContext]:\n    kwargs = {function_type_lib.sanitize_arg_name(name): value for name, value in kwargs.items()}\n    function_type, type_context = function_type_lib.canonicalize_to_monomorphic(args, kwargs, {}, capture_types, polymorphic_type)\n    return (function_type, type_context)",
    "docstring": "Generates function type given the function arguments.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\function_type_utils.py",
    "ast_data": "FunctionDef name:make_canonicalized_monomorphic_type arg:args arg:kwargs arg:capture_types arg:polymorphic_type arguments arg arg arg arg Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get",
    "source_code": "def _get(self):\n    replica_id = values_util.get_current_replica_id_as_int()\n    if replica_id is None:\n        return self._get_cross_replica()\n    else:\n        return self._values[replica_id]",
    "docstring": "Returns the value for the current device or raises a ValueError.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py",
    "ast_data": "FunctionDef name:_get arg:self arguments arg Assign Call If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_fftautocorr",
    "source_code": "def _fftautocorr(x):\n    N = x.shape[-1]\n    use_N = sp_fft.next_fast_len(2 * N - 1)\n    x_fft = sp_fft.rfft(x, use_N, axis=-1)\n    cxy = sp_fft.irfft(x_fft * x_fft.conj(), n=use_N)[:, :N]\n    return cxy",
    "docstring": "Compute the autocorrelation of a real array and crop the result.",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\windows\\_windows.py",
    "ast_data": "FunctionDef name:_fftautocorr arg:x arguments arg Assign Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_order_dir",
    "source_code": "def get_order_dir(field, default='ASC'):\n    dirn = ORDER_DIR[default]\n    if field[0] == '-':\n        return (field[1:], dirn[1])\n    return (field, dirn[0])",
    "docstring": "Return the field name and direction for an order specification. For example, '-foo' is returned as ('foo', 'DESC'). The 'default' param is used to indicate which way no prefix (or a '+' prefix) should sort. The '-' prefix always sorts the opposite way.",
    "type": "function",
    "file_path": "django\\django\\db\\models\\sql\\query.py",
    "ast_data": "FunctionDef name:get_order_dir arg:field arg:default arguments arg arg Assign If Compare Return return:yes Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_set_categories",
    "source_code": "def _set_categories(self, categories, fastpath: bool=False) -> None:\n    if fastpath:\n        new_dtype = CategoricalDtype._from_fastpath(categories, self.ordered)\n    else:\n        new_dtype = CategoricalDtype(categories, ordered=self.ordered)\n    if not fastpath and self.dtype.categories is not None and (len(new_dtype.categories) != len(self.dtype.categories)):\n        raise ValueError('new categories need to have the same number of items as the old categories!')\n    super().__init__(self._ndarray, new_dtype)",
    "docstring": "Sets new categories inplace Parameters ---------- fastpath : bool, default False Don't perform validation of the categories for uniqueness or nulls Examples -------- >>> c = pd.Categorical([\"a\", \"b\"]) >>> c ['a', 'b'] Categories (2, object): ['a', 'b'] >>> c._set_categories(pd.Index([\"a\", \"c\"])) >>> c ['a', 'c'] Categories (2, object): ['a', 'c']",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\categorical.py",
    "ast_data": "FunctionDef name:_set_categories arg:self arg:categories arg:fastpath arguments arg arg arg If Assign Call Assign Call If BoolOp Compare Compare Call Call Raise Call Call Call"
  },
  {
    "library": "authlib",
    "name": "hmac_sha1_signature",
    "source_code": "def hmac_sha1_signature(base_string, client_secret, token_secret):\n    text = base_string\n    key = escape(client_secret or '')\n    key += '&'\n    key += escape(token_secret or '')\n    signature = hmac.new(to_bytes(key), to_bytes(text), hashlib.sha1)\n    sig = binascii.b2a_base64(signature.digest())[:-1]\n    return to_unicode(sig)",
    "docstring": "Generate signature via HMAC-SHA1 method, per _. The \"HMAC-SHA1\" signature method uses the HMAC-SHA1 signature algorithm as defined in _:: digest = HMAC - SHA1(key, text) .. _: .. _:",
    "type": "function",
    "file_path": "authlib\\authlib\\oauth1\\rfc5849\\signature.py",
    "ast_data": "FunctionDef name:hmac_sha1_signature arg:base_string arg:client_secret arg:token_secret arguments arg arg arg Assign Assign Call BoolOp Call BoolOp Assign Call Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_select_single_value",
    "source_code": "def _select_single_value(self, structured):\n\n    def _select_fn(x):\n        if isinstance(x, values.Mirrored) or isinstance(x, values.PerReplica):\n            return x._primary\n        else:\n            return x\n    return nest.map_structure(_select_fn, structured)",
    "docstring": "Select any single value in .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\parameter_server_strategy.py",
    "ast_data": "FunctionDef name:_select_single_value arg:self arg:structured arguments arg arg FunctionDef name:_select_fn arg:x arguments arg If BoolOp Call Call Return return:yes Return return:yes Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "digest_auth",
    "source_code": "def digest_auth(realm, get_ha1, key, debug=False, accept_charset='utf-8'):\n    request = cherrypy.serving.request\n    auth_header = request.headers.get('authorization')\n    respond_401 = functools.partial(_respond_401, realm, key, accept_charset, debug)\n    if not HttpDigestAuthorization.matches(auth_header or ''):\n        respond_401()\n    msg = 'The Authorization header could not be parsed.'\n    with cherrypy.HTTPError.handle(ValueError, 400, msg):\n        auth = HttpDigestAuthorization(auth_header, request.method, debug=debug, accept_charset=accept_charset)\n    if debug:\n        TRACE(str(auth))\n    if not auth.validate_nonce(realm, key):\n        respond_401()\n    ha1 = get_ha1(realm, auth.username)\n    if ha1 is None:\n        respond_401()\n    digest = auth.request_digest(ha1, entity_body=request.body)\n    if digest != auth.response:\n        respond_401()\n    if debug:\n        TRACE('digest matches auth.response')\n    if auth.is_nonce_stale(max_age_seconds=600):\n        respond_401(stale=True)\n    request.login = auth.username\n    if debug:\n        TRACE('authentication of %s successful' % auth.username)",
    "docstring": "Perform HTTP Digest Access Authentication. A CherryPy tool that hooks at `2617` where username is obtained from the request's 'authorization' header. If username is not found in the credentials store, get_ha1() returns None. key A secret string known only to the server, used in the synthesis of nonces.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\auth_digest.py",
    "ast_data": "FunctionDef name:digest_auth arg:realm arg:get_ha1 arg:key arg:debug arg:accept_charset arguments arg arg arg arg arg Assign Assign Call Assign Call If Call BoolOp Call Assign With Call Assign Call If Call Call If Call Call Assign Call If Compare Call Assign Call If Compare Call If Call If Call Call Assign If Call"
  },
  {
    "library": "scikit-learn",
    "name": "_warn_mkl_vcomp",
    "source_code": "def _warn_mkl_vcomp(self, n_active_threads):\n    warnings.warn(f'BisectingKMeans is known to have a memory leak on Windows with MKL, when there are less chunks than available threads. You can avoid it by setting the environment variable OMP_NUM_THREADS={n_active_threads}.')",
    "docstring": "Warn when vcomp and mkl are both present",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_bisect_k_means.py",
    "ast_data": "FunctionDef name:_warn_mkl_vcomp arg:self arg:n_active_threads arguments arg arg Call"
  },
  {
    "library": "django",
    "name": "BaseMonthArchiveView",
    "source_code": "class BaseMonthArchiveView(YearMixin, MonthMixin, BaseDateListView):\n    date_list_period = 'day'\n\n    def get_dated_items(self):\n        year = self.get_year()\n        month = self.get_month()\n        date_field = self.get_date_field()\n        date = _date_from_string(year, self.get_year_format(), month, self.get_month_format())\n        since = self._make_date_lookup_arg(date)\n        until = self._make_date_lookup_arg(self._get_next_month(date))\n        lookup_kwargs = {'%s__gte' % date_field: since, '%s__lt' % date_field: until}\n        qs = self.get_dated_queryset(**lookup_kwargs)\n        date_list = self.get_date_list(qs)\n        return (date_list, qs, {'month': date, 'next_month': self.get_next_month(date), 'previous_month': self.get_previous_month(date)})",
    "docstring": "Base view for a list of objects published in a given month. This requires subclassing to provide a response mixin.",
    "type": "class",
    "file_path": "django\\django\\views\\generic\\dates.py",
    "ast_data": "ClassDef name:BaseMonthArchiveView Assign FunctionDef name:get_dated_items arg:self arguments arg Assign Call Assign Call Assign Call Assign Call Call Call Assign Call Assign Call Call Assign Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "make_image",
    "source_code": "def make_image(self, renderer, magnification=1.0, unsampled=False):\n    raise NotImplementedError('The make_image method must be overridden')",
    "docstring": "Normalize, rescale, and colormap this image's data for rendering using *renderer*, with the given *magnification*. If *unsampled* is True, the image will not be scaled, but an appropriate affine transformation will be returned instead. Returns ------- image : (M, N, 4) array The RGBA image, resampled unless *unsampled* is True. x, y : float The upper left corner where the image should be drawn, in pixel space. trans : The affine transformation from image to pixel space.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\image.py",
    "ast_data": "FunctionDef name:make_image arg:self arg:renderer arg:magnification arg:unsampled arguments arg arg arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_run_single_worker",
    "source_code": "def _run_single_worker(worker_fn, strategy, cluster_spec, task_type, task_id, session_config, rpc_layer='', worker_barrier=None, coord=None):\n    session_config = copy.deepcopy(session_config)\n    strategy = copy.deepcopy(strategy)\n    if task_type == _TaskType.EVALUATOR:\n        if strategy:\n            strategy.configure(session_config)\n    else:\n        assert strategy\n        strategy.configure(session_config, cluster_spec, task_type, task_id)\n    context = _WorkerContext(strategy, cluster_spec, task_type, task_id, session_config=session_config, rpc_layer=rpc_layer, worker_barrier=worker_barrier)\n    with context:\n        if coord:\n            with coord.stop_on_exception():\n                return worker_fn(strategy)\n        else:\n            return worker_fn(strategy)",
    "docstring": "Runs a single worker by calling under context.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distribute_coordinator_utils.py",
    "ast_data": "FunctionDef name:_run_single_worker arg:worker_fn arg:strategy arg:cluster_spec arg:task_type arg:task_id arg:session_config arg:rpc_layer arg:worker_barrier arg:coord arguments arg arg arg arg arg arg arg arg arg Assign Call Assign Call If Compare If Call Call Assign Call With If With Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_maybe_reindex_columns_na_proxy",
    "source_code": "def _maybe_reindex_columns_na_proxy(axes: list[Index], mgrs_indexers: list[tuple[BlockManager, dict[int, np.ndarray]]], needs_copy: bool) -> list[BlockManager]:\n    new_mgrs = []\n    for mgr, indexers in mgrs_indexers:\n        for i, indexer in indexers.items():\n            mgr = mgr.reindex_indexer(axes[i], indexers[i], axis=i, only_slice=True, allow_dups=True, use_na_proxy=True)\n        if needs_copy and (not indexers):\n            mgr = mgr.copy()\n        new_mgrs.append(mgr)\n    return new_mgrs",
    "docstring": "Reindex along columns so that all of the BlockManagers being concatenated have matching columns. Columns added in this reindexing have dtype=np.void, indicating they should be ignored when choosing a column's final dtype.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\internals\\concat.py",
    "ast_data": "FunctionDef name:_maybe_reindex_columns_na_proxy arg:axes arg:mgrs_indexers arg:needs_copy arguments arg arg arg Assign For For Call Assign Call If BoolOp Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "delete_recursively_v2",
    "source_code": "@tf_export('io.gfile.rmtree')\ndef delete_recursively_v2(path):\n    _pywrap_file_io.DeleteRecursively(compat.path_to_bytes(path))",
    "docstring": "Deletes everything under path recursively. Args: path: string, a path Raises: errors.OpError: If the operation fails.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py",
    "ast_data": "FunctionDef name:delete_recursively_v2 arg:path arguments arg Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_impl_with_aoti_compile",
    "source_code": "def _impl_with_aoti_compile(self, op_name, dispatch_key=''):\n    if torch._running_with_deploy():\n        _library.utils.warn_deploy()\n        return\n    if dispatch_key == '':\n        dispatch_key = self.dispatch_key\n    assert torch.DispatchKeySet(dispatch_key).has(torch._C.DispatchKey.Dense)\n    if isinstance(op_name, str):\n        name = op_name\n    elif isinstance(op_name, OpOverload):\n        name = op_name._schema.name\n        overload_name = op_name._schema.overload_name\n        if overload_name != '':\n            name = name + '.' + overload_name\n    else:\n        raise RuntimeError('_impl_with_aoti_compile should be passed either a name or an OpOverload object as the first argument')\n    key = self.ns + '/' + name.split('::')[-1] + '/' + dispatch_key\n    if key in _impls:\n        raise RuntimeError(\"This is not allowed since there's already a kernel registered from python overriding {}'s behavior for {} dispatch key and {} namespace.\".format(name.split('::')[-1], dispatch_key, self.ns))\n    assert self.m is not None\n    impl_fn: Callable = self.m.impl_with_aoti_compile\n    impl_fn(self.ns, name.split('::')[-1], dispatch_key)\n    _impls.add(key)\n    self._op_impls.add(key)",
    "docstring": "Register the operator to use the AOTI-compiled implementation. Args: op_name: operator name (along with the overload) or OpOverload object. dispatch_key: dispatch key that the input function should be registered for. By default, it uses the dispatch key that the library was created with. Example:: >>> my_lib = Library(\"aten\", \"IMPL\") >>> my_lib._impl_with_aoti_compile(\"div.Tensor\", \"CPU\")",
    "type": "method",
    "file_path": "pytorch\\torch\\library.py",
    "ast_data": "FunctionDef name:_impl_with_aoti_compile arg:self arg:op_name arg:dispatch_key arguments arg arg arg If Call Call Return return:no If Compare Assign Call Call If Call Assign If Call Assign Assign If Compare Assign Raise Call Assign Call If Compare Raise Call Call Call Compare Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "real",
    "source_code": "@tf_export('math.real', v1=['math.real', 'real'])\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints('real')\ndef real(input, name=None):\n    with ops.name_scope(name, 'Real', [input]) as name:\n        input = ops.convert_to_tensor(input, name='input')\n        if input.dtype.is_complex:\n            real_dtype = input.dtype.real_dtype\n            return gen_math_ops.real(input, Tout=real_dtype, name=name)\n        elif input.dtype.is_numeric:\n            return input\n        else:\n            raise TypeError('input must be a numeric tensor, but got tensor with dtype {}'.format(input.dtype))",
    "docstring": "Returns the real part of a complex (or real) tensor. Given a tensor , this operation returns a tensor of type that is the real part of each element in considered as a complex number. For example: If is already real, it is returned unchanged. Args: input: A . Must have numeric type. name: A name for the operation (optional). Returns: A of type or .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:real arg:input arg:name arguments arg arg With Call Assign Call If Assign Return return:yes Call If Return return:yes Raise Call Call Call Call"
  },
  {
    "library": "numpy",
    "name": "var",
    "source_code": "def var(self, axis=None, dtype=None, out=None, ddof=0):\n    return N.ndarray.var(self, axis, dtype, out, ddof, keepdims=True)._collapse(axis)",
    "docstring": "Returns the variance of the matrix elements, along the given axis. Refer to for full documentation. See Also -------- numpy.var Notes ----- This is the same as , except that where an would be returned, a object is returned instead. Examples -------- >>> x = np.matrix(np.arange(12).reshape((3, 4))) >>> x matrix([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]]) >>> x.var() 11.916666666666666 >>> x.var(0) matrix([[ 10.66666667, 10.66666667, 10.66666667, 10.66666667]]) # may vary >>> x.var(1) matrix([[1.25], [1.25], [1.25]])",
    "type": "method",
    "file_path": "numpy\\numpy\\matrixlib\\defmatrix.py",
    "ast_data": "FunctionDef name:var arg:self arg:axis arg:dtype arg:out arg:ddof arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "get_affine_matrix3d",
    "source_code": "def get_affine_matrix3d(translations: Tensor, center: Tensor, scale: Tensor, angles: Tensor, sxy: Optional[Tensor]=None, sxz: Optional[Tensor]=None, syx: Optional[Tensor]=None, syz: Optional[Tensor]=None, szx: Optional[Tensor]=None, szy: Optional[Tensor]=None) -> Tensor:\n    transform: Tensor = get_projective_transform(center, -angles, scale)\n    transform[..., 3] += translations\n    transform_h = convert_affinematrix_to_homography3d(transform)\n    if any((s is not None for s in [sxy, sxz, syx, syz, szx, szy])):\n        shear_mat = get_shear_matrix3d(center, sxy, sxz, syx, syz, szx, szy)\n        transform_h = transform_h @ shear_mat\n    return transform_h",
    "docstring": "Compose 3d affine matrix from the components. Args: translations: tensor containing the translation vector (dx,dy,dz) with shape :math:. center: tensor containing the center vector (x,y,z) with shape :math:. scale: tensor containing the scale factor with shape :math:. angles: axis angle vector containing the rotation angles in degrees in the form of (rx, ry, rz) with shape :math:. Internally it calls Rodrigues to compute the rotation matrix from axis-angle. sxy: tensor containing the shear factor in the xy-direction with shape :math:. sxz: tensor containing the shear factor in the xz-direction with shape :math:. syx: tensor containing the shear factor in the yx-direction with shape :math:. syz: tensor containing the shear factor in the yz-direction with shape :math:. szx: tensor containing the shear factor in the zx-direction with shape :math:. szy: tensor containing the shear factor in the zy-direction with shape :math:. Returns: the 3d affine transformation matrix :math:. .. note:: This function is often used in conjunction with :func:.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\transform\\imgwarp.py",
    "ast_data": "FunctionDef name:get_affine_matrix3d arg:translations arg:center arg:scale arg:angles arg:sxy arg:sxz arg:syx arg:syz arg:szx arg:szy arguments arg arg arg arg arg arg arg arg arg arg Call Assign Call If Call Compare Assign Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_interpolation",
    "source_code": "def get_interpolation(self):\n    return self._interpolation",
    "docstring": "Return the interpolation method the image uses when resizing. One of 'auto', 'antialiased', 'nearest', 'bilinear', 'bicubic', 'spline16', 'spline36', 'hanning', 'hamming', 'hermite', 'kaiser', 'quadric', 'catrom', 'gaussian', 'bessel', 'mitchell', 'sinc', 'lanczos', or 'none'.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\image.py",
    "ast_data": "FunctionDef name:get_interpolation arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "peek",
    "source_code": "def peek(self, index, name=None):\n    if name is None:\n        name = '%s_peek' % self._name\n    fn = lambda: gen_data_flow_ops.stage_peek(index, dtypes=self._dtypes, shared_name=self._name, name=name, capacity=self._capacity, memory_limit=self._memory_limit)\n    return self.__internal_get(fn, name)",
    "docstring": "Peeks at an element in the staging area. If the staging area is too small to contain the element at the specified index, it will block until enough elements are inserted to complete the operation. The placement of the returned tensor will be determined by the current device scope when this function is called. Args: index: The index of the tensor within the staging area to look up. name: A name for the operation (optional). Returns: The tuple of tensors that was gotten.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:peek arg:self arg:index arg:name arguments arg arg arg If Compare Assign Assign arguments Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "transform_feature",
    "source_code": "def transform_feature(self, transformation_cache, state_manager):\n    input_tensor = _to_sparse_input_and_drop_ignore_values(transformation_cache.get(self.key, state_manager))\n    return self._transform_input_tensor(input_tensor)",
    "docstring": "Returns a SparseTensor with identity values.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:transform_feature arg:self arg:transformation_cache arg:state_manager arguments arg arg arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_parseFormats",
    "source_code": "def _parseFormats(self, formats, aligned=False):\n    if formats is None:\n        raise ValueError('Need formats argument')\n    if isinstance(formats, list):\n        dtype = sb.dtype([(f'f{i}', format_) for i, format_ in enumerate(formats)], aligned)\n    else:\n        dtype = sb.dtype(formats, aligned)\n    fields = dtype.fields\n    if fields is None:\n        dtype = sb.dtype([('f1', dtype)], aligned)\n        fields = dtype.fields\n    keys = dtype.names\n    self._f_formats = [fields[key][0] for key in keys]\n    self._offsets = [fields[key][1] for key in keys]\n    self._nfields = len(keys)",
    "docstring": "Parse the field formats",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\records.py",
    "ast_data": "FunctionDef name:_parseFormats arg:self arg:formats arg:aligned arguments arg arg arg If Compare Raise Call If Call Assign Call Call Assign Call Assign If Compare Assign Call Assign Assign Assign Assign Assign Call"
  },
  {
    "library": "scipy",
    "name": "_minimize_scalar_brent",
    "source_code": "def _minimize_scalar_brent(func, brack=None, args=(), xtol=1.48e-08, maxiter=500, disp=0, **unknown_options):\n    _check_unknown_options(unknown_options)\n    tol = xtol\n    if tol < 0:\n        raise ValueError(f'tolerance should be >= 0, got {tol!r}')\n    brent = Brent(func=func, args=args, tol=tol, full_output=True, maxiter=maxiter, disp=disp)\n    brent.set_bracket(brack)\n    brent.optimize()\n    x, fval, nit, nfev = brent.get_result(full_output=True)\n    success = nit < maxiter and (not (np.isnan(x) or np.isnan(fval)))\n    if success:\n        message = f'\\nOptimization terminated successfully;\\nThe returned value satisfies the termination criteria\\n(using xtol = {xtol} )'\n    else:\n        if nit >= maxiter:\n            message = '\\nMaximum number of iterations exceeded'\n        if np.isnan(x) or np.isnan(fval):\n            message = f'{_status_message['nan']}'\n    if disp:\n        _print_success_message_or_warn(not success, message)\n    return OptimizeResult(fun=fval, x=x, nit=nit, nfev=nfev, success=success, message=message)",
    "docstring": "Options ------- maxiter : int Maximum number of iterations to perform. xtol : float Relative error in solution acceptable for convergence. disp : int, optional If non-zero, print messages. `` : print iteration results. Notes ----- Uses inverse parabolic interpolation when possible to speed up convergence of golden section method.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_optimize.py",
    "ast_data": "FunctionDef name:_minimize_scalar_brent arg:func arg:brack arg:args arg:xtol arg:maxiter arg:disp arguments arg arg arg arg arg arg arg Call Assign If Compare Raise Call Assign Call Call Call Assign Call Assign BoolOp Compare BoolOp Call Call If Assign If Compare Assign If BoolOp Call Call Assign If Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_optimizer_step_code",
    "source_code": "def _optimizer_step_code(self) -> None:\n    pass",
    "docstring": "Entry point for . When python tracing is enabled the profiler will hook into this function at the CPython level to inspect the optimizer's parameters and param groups. It is called it after since many optimizers lazily initialize state. This is a workaround due to lack of a proper step hook on the optimizer, and will be removed if it exists.",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\optimizer.py",
    "ast_data": "FunctionDef name:_optimizer_step_code arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "pad_to_bounding_box",
    "source_code": "@tf_export('image.pad_to_bounding_box')\n@dispatch.add_dispatch_support\ndef pad_to_bounding_box(image, offset_height, offset_width, target_height, target_width):\n    return pad_to_bounding_box_internal(image, offset_height, offset_width, target_height, target_width, check_dims=True)",
    "docstring": "Pad with zeros to the specified and . Adds rows of zeros on top, columns of zeros on the left, and then pads the image on the bottom and right with zeros until it has dimensions , . This op does nothing if is zero and the image already has size by . Usage Example: >>> x = [[[1., 2., 3.], ... [4., 5., 6.]], ... [[7., 8., 9.], ... [10., 11., 12.]]] >>> padded_image = tf.image.pad_to_bounding_box(x, 1, 1, 4, 4) >>> padded_image Args: image: 4-D Tensor of shape or 3-D Tensor of shape . offset_height: Number of rows of zeros to add on top. offset_width: Number of columns of zeros to add on the left. target_height: Height of output image. target_width: Width of output image. Returns: If was 4-D, a 4-D float Tensor of shape If was 3-D, a 3-D float Tensor of shape Raises: ValueError: If the shape of is incompatible with the or arguments, or either or is negative.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:pad_to_bounding_box arg:image arg:offset_height arg:offset_width arg:target_height arg:target_width arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "check_if_parallel",
    "source_code": "def check_if_parallel(dx1, dy1, dx2, dy2, tolerance=1e-05):\n    theta1 = np.arctan2(dx1, dy1)\n    theta2 = np.arctan2(dx2, dy2)\n    dtheta = abs(theta1 - theta2)\n    if dtheta < tolerance:\n        return 1\n    elif abs(dtheta - np.pi) < tolerance:\n        return -1\n    else:\n        return False",
    "docstring": "Check if two lines are parallel. Parameters ---------- dx1, dy1, dx2, dy2 : float The gradients *dy*/*dx* of the two lines. tolerance : float The angular tolerance in radians up to which the lines are considered parallel. Returns ------- is_parallel - 1 if two lines are parallel in same direction. - -1 if two lines are parallel in opposite direction. - False otherwise.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\bezier.py",
    "ast_data": "FunctionDef name:check_if_parallel arg:dx1 arg:dy1 arg:dx2 arg:dy2 arg:tolerance arguments arg arg arg arg arg Assign Call Assign Call Assign Call If Compare Return return:yes If Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_populate_param_buffer_metadata_to_new_gm",
    "source_code": "def _populate_param_buffer_metadata_to_new_gm(params_buffers_to_node_meta: dict[str, Any], gm: torch.fx.GraphModule, new_sig: 'ExportGraphSignature') -> None:\n    for metadata in params_buffers_to_node_meta.values():\n        metadata.pop('nn_module_stack', None)\n        metadata.pop('stack_trace', None)\n    for node in gm.graph.nodes:\n        if node.op == 'placeholder':\n            if node.target in new_sig.inputs_to_parameters:\n                param_name = new_sig.inputs_to_parameters[node.target]\n                if param_name in params_buffers_to_node_meta:\n                    for k, v in params_buffers_to_node_meta[param_name].items():\n                        node.meta[k] = v\n            if node.target in new_sig.inputs_to_buffers:\n                buffer_name = new_sig.inputs_to_buffers[node.target]\n                if buffer_name in params_buffers_to_node_meta:\n                    for k, v in params_buffers_to_node_meta[buffer_name].items():\n                        node.meta[k] = v",
    "docstring": "Given that we collected param'buffer metadata before, we put them back in newly traced graph module",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\utils.py",
    "ast_data": "FunctionDef name:_populate_param_buffer_metadata_to_new_gm arg:params_buffers_to_node_meta arg:gm arg:new_sig arguments arg arg arg For Call Call Call For If Compare If Compare Assign If Compare For Call Assign If Compare Assign If Compare For Call Assign"
  },
  {
    "library": "django",
    "name": "HiddenRangeWidget",
    "source_code": "class HiddenRangeWidget(RangeWidget):\n\n    def __init__(self, attrs=None):\n        super().__init__(HiddenInput, attrs)",
    "docstring": "A widget that splits input into two inputs.",
    "type": "class",
    "file_path": "django\\django\\contrib\\postgres\\forms\\ranges.py",
    "ast_data": "ClassDef name:HiddenRangeWidget FunctionDef name:__init__ arg:self arg:attrs arguments arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "check_alive",
    "source_code": "def check_alive(self, worker_name):\n    if self._context_handle:\n        return pywrap_tfe.TFE_ContextCheckAlive(self._context_handle, worker_name)\n    else:\n        raise ValueError('Context is not initialized.')",
    "docstring": "Checks whether a remote worker is alive or not. Args: worker_name: a string representing the remote worker. It must be a fully specified name like \"/job:worker/replica:0/task:0\". Returns: a boolean indicating whether the remote worker is alive or not. Raises: ValueError: if context is not initialized.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:check_alive arg:self arg:worker_name arguments arg arg If Return return:yes Call Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "align_titles",
    "source_code": "def align_titles(self, axs=None):\n    if axs is None:\n        axs = self.axes\n    axs = [ax for ax in np.ravel(axs) if ax.get_subplotspec() is not None]\n    for ax in axs:\n        _log.debug(' Working on: %s', ax.get_title())\n        rowspan = ax.get_subplotspec().rowspan\n        for axc in axs:\n            rowspanc = axc.get_subplotspec().rowspan\n            if rowspan.start == rowspanc.start:\n                self._align_label_groups['title'].join(ax, axc)",
    "docstring": "Align the titles of subplots in the same subplot row if title alignment is being done automatically (i.e. the title position is not manually set). Alignment persists for draw events after this is called. Parameters ---------- axs : list of Optional list of (or ndarray) to align the titles. Default is to align all Axes on the figure. See Also -------- matplotlib.figure.Figure.align_xlabels matplotlib.figure.Figure.align_ylabels matplotlib.figure.Figure.align_labels Notes ----- This assumes that all Axes in `.GridSpec.SubplotSpec` positions correspond to figure positions. Examples -------- Example with titles:: fig, axs = plt.subplots(1, 2) axs[0].set_aspect('equal') axs[0].set_title('Title 0') axs[1].set_title('Title 1') fig.align_titles()",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:align_titles arg:self arg:axs arguments arg arg If Compare Assign Assign Call Compare Call For Call Call Assign Call For Assign Call If Compare Call"
  },
  {
    "library": "django",
    "name": "_field_indexes_sql",
    "source_code": "def _field_indexes_sql(self, model, field):\n    output = []\n    if self._field_should_be_indexed(model, field):\n        output.append(self._create_index_sql(model, fields=[field]))\n    return output",
    "docstring": "Return a list of all index SQL statements for the specified field.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\schema.py",
    "ast_data": "FunctionDef name:_field_indexes_sql arg:self arg:model arg:field arguments arg arg arg Assign If Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "to_zpk",
    "source_code": "def to_zpk(self, **kwargs):\n    return ZerosPolesGain(*ss2zpk(self._A, self._B, self._C, self._D, **kwargs), **self._dt_dict)",
    "docstring": "Convert system representation to . Parameters ---------- kwargs : dict, optional Additional keywords passed to Returns ------- sys : instance of Zeros, poles, gain representation of the current system",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:to_zpk arg:self arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_AddShardedSaveOpsForV2",
    "source_code": "def _AddShardedSaveOpsForV2(self, checkpoint_prefix, per_device):\n    with ops.device('CPU'):\n        _SHARDED_SUFFIX = array_ops.where(string_ops.regex_full_match(checkpoint_prefix, '^s3://.*'), constant_op.constant('.part'), constant_op.constant(os.path.normpath('_temp/part')))\n        tmp_checkpoint_prefix = string_ops.string_join([checkpoint_prefix, _SHARDED_SUFFIX])\n    num_shards = len(per_device)\n    sharded_saves = []\n    sharded_prefixes = []\n    num_shards_tensor = constant_op.constant(num_shards, name='num_shards')\n    last_device = None\n    for shard, (device, saveables) in enumerate(per_device):\n        last_device = device\n        with ops.device(saveable_object_util.set_cpu0(device)):\n            sharded_filename = self.sharded_filename(tmp_checkpoint_prefix, shard, num_shards_tensor)\n            sharded_prefixes.append(sharded_filename)\n            sharded_saves.append(self._AddSaveOps(sharded_filename, saveables))\n    with ops.control_dependencies([x.op for x in sharded_saves]):\n        with ops.device(saveable_object_util.set_cpu0(last_device)):\n            merge_step = gen_io_ops.merge_v2_checkpoints(sharded_prefixes, checkpoint_prefix, delete_old_dirs=True)\n            with ops.control_dependencies([merge_step]):\n                return array_ops.identity(checkpoint_prefix)",
    "docstring": "Add ops to save the params per shard, for the V2 format. Note that the sharded save procedure for the V2 format is different from V1: there is a special \"merge\" step that merges the small metadata produced from each device. Args: checkpoint_prefix: scalar String Tensor. Interpreted *NOT AS A FILENAME*, but as a prefix of a V2 checkpoint; per_device: A list of (device, BaseSaverBuilder.VarToSave) pairs, as returned by _GroupByDevices(). Returns: An op to save the variables, which, when evaluated, returns the prefix \"\" only and does not include the sharded spec suffix.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\saver.py",
    "ast_data": "FunctionDef name:_AddShardedSaveOpsForV2 arg:self arg:checkpoint_prefix arg:per_device arguments arg arg arg With Call Assign Call Call Call Call Call Assign Call Assign Call Assign Assign Assign Call Assign For Call Assign With Call Call Assign Call Call Call Call With Call With Call Call Assign Call With Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "shard_metadata",
    "source_code": "@no_type_check\ndef shard_metadata(self) -> FlatParamShardMetadata:\n    fqns_list = []\n    shapes_list = []\n    strides_list = []\n    contiguities_list = []\n    numels_list = []\n    shard_param_offsets = []\n    for fqn, shape, stride, contiguous, numel, shard_param_info in zip(self.flat_param._fqns, self.flat_param._shapes, self.flat_param._strides, self.flat_param._contiguities, self.flat_param._numels, self.flat_param._shard_param_infos):\n        if not shard_param_info.in_shard:\n            continue\n        fqns_list.append(fqn)\n        shapes_list.append(shape)\n        strides_list.append(stride)\n        contiguities_list.append(contiguous)\n        numels_list.append(numel)\n        shard_param_offsets.append((shard_param_info.intra_param_start_idx, shard_param_info.intra_param_end_idx))\n    return FlatParamShardMetadata(tuple(fqns_list), tuple(shapes_list), tuple(strides_list), tuple(contiguities_list), tuple(numels_list), tuple(shard_param_offsets))",
    "docstring": "Return the shard-related metadata specific to this rank's shard of the flat parameter. NOTE: The returned tuple does not include elements for alignment padding but does account for the padding.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py",
    "ast_data": "FunctionDef name:shard_metadata arg:self arguments arg Assign Assign Assign Assign Assign Assign For Call If Call Call Call Call Call Call Return return:yes Call Call Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "conv_refinement_rule",
    "source_code": "@register_refinement_rule(Conv2d)\ndef conv_refinement_rule(n: Node):\n    res = []\n    assert isinstance(n.args[0], Node)\n    arg_type = n.args[0].type\n    if isinstance(arg_type, TensorType) and isinstance(n.type, TensorType):\n        res = [Equality(arg_type.__args__[0], n.type.__args__[0])]\n        return res",
    "docstring": "The equality constraints are between the first dimension of the input and output",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\graph_gradual_typechecker.py",
    "ast_data": "FunctionDef name:conv_refinement_rule arg:n arguments arg Assign Call Assign If BoolOp Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "Verbatim",
    "source_code": "class Verbatim:\n\n    def __init__(self, x):\n        self._x = x\n\n    def pdfRepr(self):\n        return self._x",
    "docstring": "Store verbatim PDF command content for later inclusion in the stream.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py",
    "ast_data": "ClassDef name:Verbatim FunctionDef name:__init__ arg:self arg:x arguments arg arg Assign FunctionDef name:pdfRepr arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "enable_onednn_fusion",
    "source_code": "def enable_onednn_fusion(enabled: bool):\n    torch._C._jit_set_llga_enabled(enabled)",
    "docstring": "Enable or disables onednn JIT fusion based on the parameter .",
    "type": "function",
    "file_path": "pytorch\\torch\\jit\\__init__.py",
    "ast_data": "FunctionDef name:enable_onednn_fusion arg:enabled arguments arg Call"
  },
  {
    "library": "scipy",
    "name": "BiggsExp03",
    "source_code": "class BiggsExp03(Benchmark):\n\n    def __init__(self, dimensions=3):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([0] * 3, [20] * 3))\n        self.global_optimum = [[1.0, 10.0, 5.0]]\n        self.fglob = 0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        t = arange(1.0, 11.0) * 0.1\n        y = exp(-t) - 5 * exp(-10 * t)\n        vec = (exp(-t * x[0]) - x[2] * exp(-t * x[1]) - y) ** 2\n        return sum(vec)",
    "docstring": "BiggsExp03 objective function. The BiggsExp03 [1]_ global optimization problem is a multimodal minimization problem defined as follows .. math:: \\begin{matrix}\\ f_{\\text{BiggsExp03}}(x) = \\sum_{i=1}^{10} (e^{-t_i x_1} - x_3e^{-t_i x_2} - y_i)^2\\\\ t_i = 0.1i\\\\ y_i = e^{-t_i} - 5e^{-10 t_i}\\\\ \\end{matrix} with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_B.py",
    "ast_data": "ClassDef name:BiggsExp03 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Assign Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_SummaryIterator",
    "source_code": "class _SummaryIterator(object):\n\n    def __init__(self, path):\n        self._tf_record_iterator = tf_record.tf_record_iterator(path)\n\n    def __iter__(self):\n        return self\n\n    def __next__(self):\n        r = next(self._tf_record_iterator)\n        return event_pb2.Event.FromString(r)\n    next = __next__",
    "docstring": "Yields protocol buffers from a given path.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\summary\\summary_iterator.py",
    "ast_data": "ClassDef name:_SummaryIterator FunctionDef name:__init__ arg:self arg:path arguments arg arg Assign Call FunctionDef name:__iter__ arg:self arguments arg Return return:yes FunctionDef name:__next__ arg:self arguments arg Assign Call Return return:yes Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "set_configuration_from_input_tensors",
    "source_code": "def set_configuration_from_input_tensors(self, input_tensors):\n    if len(input_tensors) != self.number_of_tuple_elements:\n        raise ValueError(f'input_tensors is {str(input_tensors)}, but should be a list of {self.number_of_tuple_elements} Tensors')\n    self.set_tuple_shapes([t.shape for t in input_tensors])\n    self.set_tuple_types([t.dtype for t in input_tensors])",
    "docstring": "Sets the shapes and types of the queue tuple elements. input_tensors is a list of Tensors whose types and shapes are used to set the queue configuration. Args: input_tensors: list of Tensors of the same types and shapes as the desired queue Tuple. Raises: ValueError: if input_tensors is not a list of length self.number_of_tuple_elements",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_feed.py",
    "ast_data": "FunctionDef name:set_configuration_from_input_tensors arg:self arg:input_tensors arguments arg arg If Compare Call Raise Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "MutatingFirstArgExternKernel",
    "source_code": "class MutatingFirstArgExternKernel(ExternKernel):\n\n    def codegen(self, wrapper) -> None:\n        argrefs = [*(t.codegen_reference() for t in self.inputs), *map(repr, self.constant_args)]\n        wrapper.writeline(f'{self.get_kernel_name()}({', '.join(argrefs)}){wrapper.ending}')\n\n    def should_allocate(self) -> bool:\n        return False\n\n    def get_mutation_names(self):\n        return [self.inputs[0].get_name()]\n\n    def get_unbacked_symbol_defs(self) -> OrderedSet[sympy.Symbol]:\n        return OrderedSet()\n\n    def has_side_effects(self) -> bool:\n        return True",
    "docstring": "This needs to be a custom class to handle mutation properly",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\ir.py",
    "ast_data": "ClassDef name:MutatingFirstArgExternKernel FunctionDef name:codegen arg:self arg:wrapper arguments arg arg Assign Call Call Call Call Call FunctionDef name:should_allocate arg:self arguments arg Return return:yes FunctionDef name:get_mutation_names arg:self arguments arg Return return:yes Call FunctionDef name:get_unbacked_symbol_defs arg:self arguments arg Return return:yes Call FunctionDef name:has_side_effects arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "SeriesType",
    "source_code": "class SeriesType(types.Type):\n\n    def __init__(self, dtype, index, namety) -> None:\n        assert isinstance(index, IndexType)\n        self.dtype = dtype\n        self.index = index\n        self.values = types.Array(self.dtype, 1, 'C')\n        self.namety = namety\n        name = f'series({dtype}, {index}, {namety})'\n        super().__init__(name)\n\n    @property\n    def key(self):\n        return (self.dtype, self.index, self.namety)\n\n    @property\n    def as_array(self):\n        return self.values\n\n    def copy(self, dtype=None, ndim: int=1, layout: str='C') -> Self:\n        assert ndim == 1\n        assert layout == 'C'\n        if dtype is None:\n            dtype = self.dtype\n        return type(self)(dtype, self.index, self.namety)",
    "docstring": "The type class for Series objects.",
    "type": "class",
    "file_path": "pandas\\pandas\\core\\_numba\\extensions.py",
    "ast_data": "ClassDef name:SeriesType FunctionDef name:__init__ arg:self arg:dtype arg:index arg:namety arguments arg arg arg arg Call Assign Assign Assign Call Assign Assign Call Call FunctionDef name:key arg:self arguments arg Return return:yes FunctionDef name:as_array arg:self arguments arg Return return:yes FunctionDef name:copy arg:self arg:dtype arg:ndim arg:layout arguments arg arg arg arg Compare Compare If Compare Assign Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "register_decomposition",
    "source_code": "def register_decomposition(aten_op, registry=None, *, type='post_autograd', unsafe=False) -> Callable[[Callable[_P, _T]], Callable[_P, _T]]:\n    assert type in {'post_autograd', 'pre_autograd', 'meta'}\n\n    def decomposition_decorator(fn: Callable[_P, _T]) -> Callable[_P, _T]:\n        orig_fn = fn\n        if not unsafe:\n            fn = _convert_out_params(fn)\n        nonlocal registry\n        if registry is None:\n            registry = global_decomposition_table[type]\n\n        def register(op):\n            _add_op_to_registry(registry, op, fn)\n        pytree.tree_map_(register, aten_op)\n        return orig_fn\n    return decomposition_decorator",
    "docstring": "A decorator to register a function as a decomposition to the Python decomposition table. Use it like this:: @register_decomposition(torch.ops.aten.clamp_min) def clamp_min(x): return torch.clamp(self, min=min) If you are writing a new decomposition, consider contributing it directly to PyTorch in torch._decomp.decompositions. This API is experimental; we are almost certainly going to extend the API when we make decompositions eligible for use in transforms (e.g., autograd) and not just backend tracing, where we then need to know if a decomposition can be used to simulate a transform. By default, we also will register it to the Meta key of dispatcher, and replace the c++ Meta implementation if there is already one. unsafe kwarg is for reuse of this function for registering non-function things",
    "type": "function",
    "file_path": "pytorch\\torch\\_decomp\\__init__.py",
    "ast_data": "FunctionDef name:register_decomposition arg:aten_op arg:registry arguments arg arg arg arg Compare FunctionDef name:decomposition_decorator arg:fn arguments arg Assign If Assign Call If Compare Assign FunctionDef name:register arg:op arguments arg Call Call Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "get_choices",
    "source_code": "def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH, limit_choices_to=None, ordering=()):\n    limit_choices_to = limit_choices_to or self.limit_choices_to\n    qs = self.related_model._default_manager.complex_filter(limit_choices_to)\n    if ordering:\n        qs = qs.order_by(*ordering)\n    return (blank_choice if include_blank else []) + [(x.pk, str(x)) for x in qs]",
    "docstring": "Return choices with a default blank choices included, for use as choices for this field. Analog of django.db.models.fields.Field.get_choices(), provided initially for utilization by RelatedFieldListFilter.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\reverse_related.py",
    "ast_data": "FunctionDef name:get_choices arg:self arg:include_blank arg:blank_choice arg:limit_choices_to arg:ordering arguments arg arg arg arg arg Assign BoolOp Assign Call If Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='poisson'):\n    super().__init__(poisson, name=name, reduction=reduction)",
    "docstring": "Initializes instance. Args: reduction: Type of to apply to loss. Default value is . indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to . When used with , outside of built-in training loops such as and , using or will raise an error. Please see this custom training [tutorial]( for more details. name: Optional name for the instance. Defaults to 'poisson'.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:reduction arg:name arguments arg arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_copy_trackable_to_cpu",
    "source_code": "def _copy_trackable_to_cpu(self, object_map):\n    if self not in object_map:\n        if self._dataset is None:\n            object_map[self] = OwnedIterator(components=self._components, element_spec=self._element_spec)\n        else:\n            object_map[self] = OwnedIterator(dataset=self._dataset)\n    serialized = self._serialize_to_tensors()\n    object_map[self]._restore_from_tensors(serialized)",
    "docstring": "Implements checkpointing protocols for .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\iterator_ops.py",
    "ast_data": "FunctionDef name:_copy_trackable_to_cpu arg:self arg:object_map arguments arg arg If Compare If Compare Assign Call Assign Call Assign Call Call"
  },
  {
    "library": "scipy",
    "name": "adjoint",
    "source_code": "def adjoint(self):\n    return self._adjoint()",
    "docstring": "Hermitian adjoint. Returns the Hermitian adjoint of self, aka the Hermitian conjugate or Hermitian transpose. For a complex matrix, the Hermitian adjoint is equal to the conjugate transpose. Can be abbreviated self.H instead of self.adjoint(). Returns ------- A_H : LinearOperator Hermitian adjoint of self.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_interface.py",
    "ast_data": "FunctionDef name:adjoint arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "attach_note",
    "source_code": "def attach_note(self, text, positionRect=[-100, -100, 0, 0]):\n    self._ensure_file().newTextnote(text, positionRect)",
    "docstring": "Add a new text note to the page to be saved next. The optional positionRect specifies the position of the new note on the page. It is outside the page per default to make sure it is invisible on printouts.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py",
    "ast_data": "FunctionDef name:attach_note arg:self arg:text arg:positionRect arguments arg arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_set_device",
    "source_code": "def _set_device(self, device):\n    if isinstance(device, device_spec.DeviceSpecV2):\n        device = device.to_string()\n    self.device = device",
    "docstring": "This method captures TF's explicit device scope setting.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:_set_device arg:self arg:device arguments arg arg If Call Assign Call Assign"
  },
  {
    "library": "scipy",
    "name": "_mutate",
    "source_code": "def _mutate(self, candidate):\n    rng = self.random_number_generator\n    if callable(self.strategy):\n        return self._mutate_custom(candidate)\n    fill_point = rng_integers(rng, self.parameter_count)\n    samples = self._select_samples(candidate, 5)\n    trial = np.copy(self.population[candidate])\n    if self.strategy in ['currenttobest1exp', 'currenttobest1bin']:\n        bprime = self.mutation_func(candidate, samples)\n    else:\n        bprime = self.mutation_func(samples)\n    crossovers = rng.uniform(size=self.parameter_count)\n    crossovers = crossovers < self.cross_over_probability\n    if self.strategy in self._binomial:\n        crossovers[fill_point] = True\n        trial = np.where(crossovers, bprime, trial)\n        return trial\n    elif self.strategy in self._exponential:\n        i = 0\n        crossovers[0] = True\n        while i < self.parameter_count and crossovers[i]:\n            trial[fill_point] = bprime[fill_point]\n            fill_point = (fill_point + 1) % self.parameter_count\n            i += 1\n        return trial",
    "docstring": "Create a trial vector based on a mutation strategy.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_differentialevolution.py",
    "ast_data": "FunctionDef name:_mutate arg:self arg:candidate arguments arg arg Assign If Call Return return:yes Call Assign Call Assign Call Assign Call If Compare Assign Call Assign Call Assign Call Assign Compare If Compare Assign Assign Call Return return:yes If Compare Assign Assign While BoolOp Compare Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "prologue_preserves_zero_mask",
    "source_code": "def prologue_preserves_zero_mask(prologue: 'SchedulerNode') -> bool:\n    preserves_zeros = PreservesZeros()\n    with V.set_ops_handler(preserves_zeros):\n        prologue._body(*prologue.get_ranges())\n    store_preserves_zeros = preserves_zeros.store_preserves_zeros\n    assert isinstance(store_preserves_zeros, bool)\n    return store_preserves_zeros",
    "docstring": "Does this prologue preserve zero masks",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\analyze_preserves_zero_mask.py",
    "ast_data": "FunctionDef name:prologue_preserves_zero_mask arg:prologue arguments arg Assign Call With Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "read",
    "source_code": "def read(self, index, name=None):\n    with ops.name_scope(name, 'TensorArrayV2Read', [self._flow, index]):\n        value = list_ops.tensor_list_get_item(input_handle=self._flow, index=index, element_dtype=self._dtype, element_shape=self.element_shape, name=name)\n        return value",
    "docstring": "See TensorArray.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:read arg:self arg:index arg:name arguments arg arg arg With Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_set_replace_hook",
    "source_code": "@contextlib.contextmanager\ndef _set_replace_hook(self, f):\n    assert callable(f), 'Replace hook must be a callable.'\n    self._register_replace_node_hook(f)\n    try:\n        yield\n    finally:\n        self._unregister_replace_node_hook(f)",
    "docstring": "Takes a callable which will be called everytime when we replace a node to a new node, or change the node's name. Callable takes three arguments: the old node we're changing, and NAME of the new node, followed by the user node which consumes the old node to be replaced.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\graph_module.py",
    "ast_data": "FunctionDef name:_set_replace_hook arg:self arg:f arguments arg arg Call Call Try Call"
  },
  {
    "library": "pandas",
    "name": "delete",
    "source_code": "def delete(self, loc) -> list[Block]:\n    if not is_list_like(loc):\n        loc = [loc]\n    if self.ndim == 1:\n        values = cast(np.ndarray, self.values)\n        values = np.delete(values, loc)\n        mgr_locs = self._mgr_locs.delete(loc)\n        return [type(self)(values, placement=mgr_locs, ndim=self.ndim)]\n    if np.max(loc) >= self.values.shape[0]:\n        raise IndexError\n    loc = np.concatenate([loc, [self.values.shape[0]]])\n    mgr_locs_arr = self._mgr_locs.as_array\n    new_blocks: list[Block] = []\n    previous_loc = -1\n    refs = self.refs if self.refs.has_reference() else None\n    for idx in loc:\n        if idx == previous_loc + 1:\n            pass\n        else:\n            values = self.values[previous_loc + 1:idx, :]\n            locs = mgr_locs_arr[previous_loc + 1:idx]\n            nb = type(self)(values, placement=BlockPlacement(locs), ndim=self.ndim, refs=refs)\n            new_blocks.append(nb)\n        previous_loc = idx\n    return new_blocks",
    "docstring": "Deletes the locs from the block. We split the block to avoid copying the underlying data. We create new blocks for every connected segment of the initial block that is not deleted. The new blocks point to the initial array.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\blocks.py",
    "ast_data": "FunctionDef name:delete arg:self arg:loc arguments arg arg If Call Assign If Compare Assign Call Assign Call Assign Call Return return:yes Call Call If Compare Call Raise Assign Call Assign Assign Assign Call For If Compare Assign Assign Assign Call Call Call Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_check_for_bom",
    "source_code": "def _check_for_bom(self, first_row: list[Scalar]) -> list[Scalar]:\n    if not first_row:\n        return first_row\n    if not isinstance(first_row[0], str):\n        return first_row\n    if not first_row[0]:\n        return first_row\n    first_elt = first_row[0][0]\n    if first_elt != _BOM:\n        return first_row\n    first_row_bom = first_row[0]\n    new_row: str\n    if len(first_row_bom) > 1 and first_row_bom[1] == self.quotechar:\n        start = 2\n        quote = first_row_bom[1]\n        end = first_row_bom[2:].index(quote) + 2\n        new_row = first_row_bom[start:end]\n        if len(first_row_bom) > end + 1:\n            new_row += first_row_bom[end + 1:]\n    else:\n        new_row = first_row_bom[1:]\n    new_row_list: list[Scalar] = [new_row]\n    return new_row_list + first_row[1:]",
    "docstring": "Checks whether the file begins with the BOM character. If it does, remove it. In addition, if there is quoting in the field subsequent to the BOM, remove it as well because it technically takes place at the beginning of the name, not the middle of it.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\parsers\\python_parser.py",
    "ast_data": "FunctionDef name:_check_for_bom arg:self arg:first_row arguments arg arg If Return return:yes If Call Return return:yes If Return return:yes Assign If Compare Return return:yes Assign If BoolOp Compare Call Compare Assign Assign Assign Call Assign If Compare Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_input_mask_at",
    "source_code": "def get_input_mask_at(self, node_index):\n    inputs = self.get_input_at(node_index)\n    if isinstance(inputs, list):\n        return [getattr(x, '_keras_mask', None) for x in inputs]\n    else:\n        return getattr(inputs, '_keras_mask', None)",
    "docstring": "Retrieves the input mask tensor(s) of a layer at a given node. Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. will correspond to the first time the layer was called. Returns: A mask tensor (or list of tensors if the layer has multiple inputs).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py",
    "ast_data": "FunctionDef name:get_input_mask_at arg:self arg:node_index arguments arg arg Assign Call If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "ExportOutput",
    "source_code": "class ExportOutput:\n    __metaclass__ = abc.ABCMeta\n    _SEPARATOR_CHAR = '/'\n\n    @abc.abstractmethod\n    def as_signature_def(self, receiver_tensors):\n        pass\n\n    def _check_output_key(self, key, error_label):\n        if isinstance(key, tuple):\n            key = self._SEPARATOR_CHAR.join(key)\n        if not isinstance(key, str):\n            raise ValueError('{} output key must be a string; got {}.'.format(error_label, key))\n        return key\n\n    def _wrap_and_check_outputs(self, outputs, single_output_default_name, error_label=None):\n        if not isinstance(outputs, dict):\n            outputs = {single_output_default_name: outputs}\n        output_dict = {}\n        for key, value in outputs.items():\n            error_name = error_label or single_output_default_name\n            key = self._check_output_key(key, error_name)\n            if not isinstance(value, tensor.Tensor):\n                raise ValueError('{} output value must be a Tensor; got {}.'.format(error_name, value))\n            output_dict[key] = value\n        return output_dict",
    "docstring": "Represents an output of a model that can be served. These typically correspond to model heads.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\model_utils\\export_output.py",
    "ast_data": "ClassDef name:ExportOutput Assign Assign FunctionDef name:as_signature_def arg:self arg:receiver_tensors arguments arg arg FunctionDef name:_check_output_key arg:self arg:key arg:error_label arguments arg arg arg If Call Assign Call If Call Raise Call Call Return return:yes FunctionDef name:_wrap_and_check_outputs arg:self arg:outputs arg:single_output_default_name arg:error_label arguments arg arg arg arg If Call Assign Assign For Call Assign BoolOp Assign Call If Call Raise Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "CompatibilityError",
    "source_code": "class CompatibilityError(Exception):\n    pass",
    "docstring": "Raised when an error occurs with TFLite compatibility.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\authoring\\authoring.py",
    "ast_data": "ClassDef name:CompatibilityError"
  },
  {
    "library": "pandas",
    "name": "notna",
    "source_code": "def notna(self) -> npt.NDArray[np.bool_]:\n    return ~self.isna()",
    "docstring": "Inverse of isna Both missing values (-1 in .codes) and NA as a category are detected as null. Returns ------- np.ndarray[bool] of whether my values are not null See Also -------- notna : Top-level notna. notnull : Alias of notna. Categorical.isna : Boolean inverse of Categorical.notna.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\categorical.py",
    "ast_data": "FunctionDef name:notna arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "cluster_spec",
    "source_code": "def cluster_spec(self):\n    if self._override_client:\n        client = self._override_client\n    else:\n        from kubernetes import config as k8sconfig\n        from kubernetes import client as k8sclient\n        k8sconfig.load_kube_config()\n        client = k8sclient.CoreV1Api()\n    cluster_map = {}\n    for tf_job in self._job_to_label_mapping:\n        all_pods = []\n        for selector in self._job_to_label_mapping[tf_job]:\n            ret = client.list_pod_for_all_namespaces(label_selector=selector)\n            selected_pods = []\n            for pod in sorted(ret.items, key=lambda x: x.metadata.name):\n                if pod.status.phase == 'Running':\n                    selected_pods.append('%s:%s' % (pod.status.host_ip, self._tf_server_port))\n                else:\n                    raise RuntimeError('Pod \"%s\" is not running; phase: \"%s\"' % (pod.metadata.name, pod.status.phase))\n            all_pods.extend(selected_pods)\n        cluster_map[tf_job] = all_pods\n    return server_lib.ClusterSpec(cluster_map)",
    "docstring": "Returns a ClusterSpec object based on the latest info from Kubernetes. We retrieve the information from the Kubernetes master every time this method is called. Returns: A ClusterSpec containing host information returned from Kubernetes. Raises: RuntimeError: If any of the pods returned by the master is not in the phase.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\kubernetes_cluster_resolver.py",
    "ast_data": "FunctionDef name:cluster_spec arg:self arguments arg If Assign Call Assign Call Assign For Assign For Assign Call Assign For Call arguments arg If Compare Call Raise Call Call Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "password_validators_help_texts",
    "source_code": "def password_validators_help_texts(password_validators=None):\n    help_texts = []\n    if password_validators is None:\n        password_validators = get_default_password_validators()\n    for validator in password_validators:\n        help_texts.append(validator.get_help_text())\n    return help_texts",
    "docstring": "Return a list of all help texts of all configured validators.",
    "type": "function",
    "file_path": "django\\django\\contrib\\auth\\password_validation.py",
    "ast_data": "FunctionDef name:password_validators_help_texts arg:password_validators arguments arg Assign If Compare Assign Call For Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "redirect",
    "source_code": "def redirect(to, *args, permanent=False, preserve_request=False, **kwargs):\n    redirect_class = HttpResponsePermanentRedirect if permanent else HttpResponseRedirect\n    return redirect_class(resolve_url(to, *args, **kwargs), preserve_request=preserve_request)",
    "docstring": "Return an HttpResponseRedirect to the appropriate URL for the arguments passed. The arguments could be: * A model: the model's function will be called. * A view name, possibly with arguments: will be used to reverse-resolve the name. * A URL, which will be used as-is for the redirect location. Issues a temporary redirect by default. Set permanent=True to issue a permanent redirect. Set preserve_request=True to instruct the user agent to preserve the original HTTP method and body when following the redirect.",
    "type": "function",
    "file_path": "django\\django\\shortcuts.py",
    "ast_data": "FunctionDef name:redirect arg:to arguments arg arg arg arg arg Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "assert_no_legacy_layers",
    "source_code": "def assert_no_legacy_layers(layers):\n    legacy_layers = [l for l in layers if getattr(l, '_is_legacy_layer', None)]\n    if legacy_layers:\n        layer_str = '\\n'.join(('  ' + str(l) for l in legacy_layers))\n        raise TypeError('The following are legacy tf.layers.Layers:\\n{}\\nTo use keras as a framework (for instance using the Network, Model, or Sequential classes), please use the tf.keras.layers implementation instead. (Or, if writing custom layers, subclass from tf.keras.layers rather than tf.layers)'.format(layer_str))",
    "docstring": "Prevent tf.layers.Layers from being used with Keras. Certain legacy layers inherit from their keras analogs; however they are not supported with keras and can lead to subtle and hard to diagnose bugs. Args: layers: A list of layers to check Raises: TypeError: If any elements of layers are tf.layers.Layers",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_utils.py",
    "ast_data": "FunctionDef name:assert_no_legacy_layers arg:layers arguments arg Assign Call If Assign Call Call Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get",
    "source_code": "@abstractmethod\ndef get(self):\n    raise NotImplementedError",
    "docstring": "Creates a generator to extract data from the queue. Skip the data if it is . # Returns Generator yielding tuples or .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\data_utils.py",
    "ast_data": "FunctionDef name:get arg:self arguments arg Raise"
  },
  {
    "library": "django",
    "name": "_path_from_module",
    "source_code": "def _path_from_module(self, module):\n    paths = list(getattr(module, '__path__', []))\n    if len(paths) != 1:\n        filename = getattr(module, '__file__', None)\n        if filename is not None:\n            paths = [os.path.dirname(filename)]\n        else:\n            paths = list(set(paths))\n    if len(paths) > 1:\n        raise ImproperlyConfigured(\"The app module %r has multiple filesystem locations (%r); you must configure this app with an AppConfig subclass with a 'path' class attribute.\" % (module, paths))\n    elif not paths:\n        raise ImproperlyConfigured(\"The app module %r has no filesystem location, you must configure this app with an AppConfig subclass with a 'path' class attribute.\" % module)\n    return paths[0]",
    "docstring": "Attempt to determine app's filesystem path from its module.",
    "type": "method",
    "file_path": "django\\django\\apps\\config.py",
    "ast_data": "FunctionDef name:_path_from_module arg:self arg:module arguments arg arg Assign Call Call If Compare Call Assign Call If Compare Assign Call Assign Call Call If Compare Call Raise Call If Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "dispatch",
    "source_code": "def dispatch(node: torch.fx.Node, registry: _registration.ONNXRegistry) -> tuple[Callable | None, str]:\n    decomp_metas = registry.get_decomps(node.target)\n    is_complex = any((_arg_has_complex_dtype(arg) for arg in node.args)) or any((_arg_has_complex_dtype(arg) for arg in node.kwargs.values()))\n    if is_complex:\n        decomp_metas = [decomp for decomp in decomp_metas if decomp.is_complex]\n        if not decomp_metas:\n            return (None, 'No decompositions registered for the complex-valued input')\n    else:\n        decomp_metas = [decomp for decomp in decomp_metas if not decomp.is_complex]\n        if not decomp_metas:\n            return (None, 'No decompositions registered for the real-valued input')\n    if len(decomp_metas) == 1:\n        return (decomp_metas[0].onnx_function, 'Fast path: Only one decomposition is defined')\n    overload, message = get_matching_overload(node, decomp_metas)\n    return (overload, message)",
    "docstring": "Dispatch a node to an ONNX function based on the node's target and the ONNX registry. Args: node: The node to dispatch. registry: The ONNX registry to use for dispatching. Returns: A tuple containing the matched ONNX function and a string describing the reason for failure or success.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_dispatching.py",
    "ast_data": "FunctionDef name:dispatch arg:node arg:registry arguments arg arg Assign Call Assign BoolOp Call Call Call Call Call If Assign If Return return:yes Assign If Return return:yes If Compare Call Return return:yes Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "refine",
    "source_code": "def refine(self):\n    graph = self.traced.graph\n    for n in graph.nodes:\n        self.refine_node(n)\n    return True",
    "docstring": "Generates constraints for every node in the graph based on the operation.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\graph_gradual_typechecker.py",
    "ast_data": "FunctionDef name:refine arg:self arguments arg Assign For Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_position",
    "source_code": "def set_position(self, xy):\n    self.set_x(xy[0])\n    self.set_y(xy[1])",
    "docstring": "Set the (*x*, *y*) position of the text. Parameters ---------- xy : (float, float)",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:set_position arg:self arg:xy arguments arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "record",
    "source_code": "def record():\n    if step is None:\n        raise ValueError('No step set. Please specify one either through the `step` argument or through tf.summary.experimental.set_step()')\n    with ops.device('cpu:0'):\n        summary_tensor = tensor() if callable(tensor) else array_ops.identity(tensor)\n        writer = _summary_state.writer\n        summary_value = _maybe_convert_tensor_to_dtensor(writer, summary_tensor)\n        step_value = _maybe_convert_tensor_to_dtensor(writer, step)\n        write_summary_op = gen_summary_ops.write_summary(writer._resource, step_value, summary_value, tag, serialized_metadata, name=scope)\n        with ops.control_dependencies([write_summary_op]):\n            return constant_op.constant(True)",
    "docstring": "Record the actual summary and return True.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "FunctionDef name:record arguments If Compare Raise Call With Call Assign Call Call Call Assign Assign Call Assign Call Assign Call With Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "poly2leg",
    "source_code": "def poly2leg(pol):\n    [pol] = pu.as_series([pol])\n    deg = len(pol) - 1\n    res = 0\n    for i in range(deg, -1, -1):\n        res = legadd(legmulx(res), pol[i])\n    return res",
    "docstring": "Convert a polynomial to a Legendre series. Convert an array representing the coefficients of a polynomial (relative to the \"standard\" basis) ordered from lowest degree to highest, to an array of the coefficients of the equivalent Legendre series, ordered from lowest to highest degree. Parameters ---------- pol : array_like 1-D array containing the polynomial coefficients Returns ------- c : ndarray 1-D array containing the coefficients of the equivalent Legendre series. See Also -------- leg2poly Notes ----- The easy way to do conversions between polynomial basis sets is to use the convert method of a class instance. Examples -------- >>> import numpy as np >>> from numpy import polynomial as P >>> p = P.Polynomial(np.arange(4)) >>> p Polynomial([0., 1., 2., 3.], domain=[-1., 1.], window=[-1., 1.], ... >>> c = P.Legendre(P.legendre.poly2leg(p.coef)) >>> c Legendre([ 1. , 3.25, 1. , 0.75], domain=[-1, 1], window=[-1, 1]) # may vary",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\legendre.py",
    "ast_data": "FunctionDef name:poly2leg arg:pol arguments arg Assign Call Assign Call Assign For Call Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "start_object",
    "source_code": "def start_object(self, obj):\n    raise NotImplementedError('subclasses of Serializer must provide a start_object() method')",
    "docstring": "Called when serializing of an object starts.",
    "type": "method",
    "file_path": "django\\django\\core\\serializers\\base.py",
    "ast_data": "FunctionDef name:start_object arg:self arg:obj arguments arg arg Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, segments, *, zorder=2, **kwargs):\n    kwargs.setdefault('facecolors', 'none')\n    super().__init__(zorder=zorder, **kwargs)\n    self.set_segments(segments)",
    "docstring": "Parameters ---------- segments : list of (N, 2) array-like A sequence `lines.linewidthcolorlines.colorlines.antialiasedcolorcolor.PathCollection~.path.Path.CLOSEPOLY.Collection`.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:segments arguments arg arg arg arg Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_undeduplicated_weights",
    "source_code": "@property\ndef _undeduplicated_weights(self):\n    self._assert_weights_created()\n    weights = []\n    for layer in self._self_tracked_trackables:\n        weights += layer.variables\n    weights += self._trainable_weights + self._non_trainable_weights\n    return weights",
    "docstring": "Returns the undeduplicated list of all layer variables/weights.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py",
    "ast_data": "FunctionDef name:_undeduplicated_weights arg:self arguments arg Call Assign For Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "make_estimator",
    "source_code": "@abstractmethod\ndef make_estimator(self, params):\n    pass",
    "docstring": "Return an instance of the estimator for a combination of parameters",
    "type": "method",
    "file_path": "scikit-learn\\asv_benchmarks\\benchmarks\\common.py",
    "ast_data": "FunctionDef name:make_estimator arg:self arg:params arguments arg arg"
  },
  {
    "library": "django",
    "name": "get_related_field",
    "source_code": "def get_related_field(self):\n    field = self.model._meta.get_field(self.field_name)\n    if not field.concrete:\n        raise exceptions.FieldDoesNotExist(\"No related field named '%s'\" % self.field_name)\n    return field",
    "docstring": "Return the Field in the 'to' object to which this relationship is tied.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\reverse_related.py",
    "ast_data": "FunctionDef name:get_related_field arg:self arguments arg Assign Call If Raise Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "get_metadata_routing",
    "source_code": "def get_metadata_routing(self):\n    router = MetadataRouter(owner=self.__class__.__name__)\n    transformers = chain(self.transformers, [('remainder', self.remainder, None)])\n    for name, step, _ in transformers:\n        method_mapping = MethodMapping()\n        if hasattr(step, 'fit_transform'):\n            method_mapping.add(caller='fit', callee='fit_transform').add(caller='fit_transform', callee='fit_transform')\n        else:\n            method_mapping.add(caller='fit', callee='fit').add(caller='fit', callee='transform').add(caller='fit_transform', callee='fit').add(caller='fit_transform', callee='transform')\n        method_mapping.add(caller='transform', callee='transform')\n        router.add(method_mapping=method_mapping, **{name: step})\n    return router",
    "docstring": "Get metadata routing of this object. Please check :ref: on how the routing mechanism works. .. versionadded:: 1.4 Returns ------- routing : MetadataRouter A :class: encapsulating routing information.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\compose\\_column_transformer.py",
    "ast_data": "FunctionDef name:get_metadata_routing arg:self arguments arg Assign Call Assign Call For Assign Call If Call Call Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_Process",
    "source_code": "class _Process(multi_process_lib.Process):\n\n    def __init__(self, test_env, **kwargs):\n        super(_Process, self).__init__(**kwargs)\n        self._test_env = test_env\n        self._actual_run = getattr(self, 'run')\n        self.run = self._run_with_setenv\n\n    def _run_with_setenv(self):\n        test_env = self._test_env\n        if test_env.grpc_fail_fast is not None:\n            os.environ['GRPC_FAIL_FAST'] = str(test_env.grpc_fail_fast)\n        if test_env.visible_gpus:\n            os.environ['CUDA_VISIBLE_DEVICES'] = ','.join([str(i) for i in test_env.visible_gpus])\n        _set_tf_config(test_env.task_type, test_env.task_id, test_env.cluster_spec, test_env.rpc_layer)\n        return self._actual_run()",
    "docstring": "A modified that can set up environment variables.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_process_runner.py",
    "ast_data": "ClassDef name:_Process FunctionDef name:__init__ arg:self arg:test_env arguments arg arg arg Call Call Assign Assign Call Assign FunctionDef name:_run_with_setenv arg:self arguments arg Assign If Compare Assign Call If Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "engine",
    "source_code": "@property\ndef engine(self) -> str:\n    return self._engine",
    "docstring": "Name of engine.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\excel\\_base.py",
    "ast_data": "FunctionDef name:engine arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "index",
    "source_code": "def index(self, val):\n    for i in range(0, len(self)):\n        if self[i] == val:\n            return i\n    raise ValueError('%s not found in object' % val)",
    "docstring": "Standard list index method",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\mutable_list.py",
    "ast_data": "FunctionDef name:index arg:self arg:val arguments arg arg For Call Call If Compare Return return:yes Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "monitored_timer",
    "source_code": "def monitored_timer(cell):\n\n    def actual_decorator(func):\n\n        @functools.wraps(func)\n        def wrapper(*args, **kwargs):\n            with MonitoredTimer(cell):\n                return func(*args, **kwargs)\n        return wrapper\n    return actual_decorator",
    "docstring": "A function decorator for adding MonitoredTimer support. Args: cell: the cell associated with the time metric that will be inremented. Returns: A decorator that measure the function runtime and increment the specified counter cell.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py",
    "ast_data": "FunctionDef name:monitored_timer arg:cell arguments arg FunctionDef name:actual_decorator arg:func arguments arg FunctionDef name:wrapper arguments arg arg With Call Return return:yes Call Call Return return:yes Return return:yes"
  },
  {
    "library": "pandas",
    "name": "to_frame",
    "source_code": "def to_frame(self, index: bool=True, name: Hashable=lib.no_default) -> DataFrame:\n    from pandas import DataFrame\n    if name is lib.no_default:\n        result_name = self._get_level_names()\n    else:\n        result_name = Index([name])\n    result = DataFrame(self, copy=False)\n    result.columns = result_name\n    if index:\n        result.index = self\n    return result",
    "docstring": "Create a DataFrame with a column containing the Index. Parameters ---------- index : bool, default True Set the index of the returned DataFrame as the original Index. name : object, defaults to index.name The passed name should substitute for the index name (if it has one). Returns ------- DataFrame DataFrame containing the original Index data. See Also -------- Index.to_series : Convert an Index to a Series. Series.to_frame : Convert Series to DataFrame. Examples -------- >>> idx = pd.Index([\"Ant\", \"Bear\", \"Cow\"], name=\"animal\") >>> idx.to_frame() animal animal Ant Ant Bear Bear Cow Cow By default, the original Index is reused. To enforce a new Index: >>> idx.to_frame(index=False) animal 0 Ant 1 Bear 2 Cow To override the name of the resulting column, specify : >>> idx.to_frame(index=False, name=\"zoo\") zoo 0 Ant 1 Bear 2 Cow",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:to_frame arg:self arg:index arg:name arguments arg arg arg If Compare Assign Call Assign Call Assign Call Assign If Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_x_replacer",
    "source_code": "def _x_replacer(args, kwargs, dispatchables):\n    if len(args) > 0:\n        return ((dispatchables[0],) + args[1:], kwargs)\n    kw = kwargs.copy()\n    kw['x'] = dispatchables[0]\n    return (args, kw)",
    "docstring": "uarray argument replacer to replace the transform input array (``)",
    "type": "function",
    "file_path": "scipy\\scipy\\fft\\_basic.py",
    "ast_data": "FunctionDef name:_x_replacer arg:args arg:kwargs arg:dispatchables arguments arg arg arg If Compare Call Return return:yes Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_broadcast_cat_event_and_params",
    "source_code": "def _broadcast_cat_event_and_params(event, params, base_dtype):\n    if event.dtype.is_integer:\n        pass\n    elif event.dtype.is_floating:\n        event = math_ops.cast(event, dtype=dtypes.int32)\n    else:\n        raise TypeError('`value` should have integer `dtype` or `self.dtype` ({})'.format(base_dtype))\n    shape_known_statically = params.shape.ndims is not None and params.shape[:-1].is_fully_defined() and event.shape.is_fully_defined()\n    if not shape_known_statically or params.shape[:-1] != event.shape:\n        params *= array_ops.ones_like(event[..., array_ops.newaxis], dtype=params.dtype)\n        params_shape = array_ops.shape(params)[:-1]\n        event *= array_ops.ones(params_shape, dtype=event.dtype)\n        if params.shape.ndims is not None:\n            event.set_shape(tensor_shape.TensorShape(params.shape[:-1]))\n    return (event, params)",
    "docstring": "Broadcasts the event or distribution parameters.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\categorical.py",
    "ast_data": "FunctionDef name:_broadcast_cat_event_and_params arg:event arg:params arg:base_dtype arguments arg arg arg If If Assign Call Raise Call Call Assign BoolOp Compare Call Call If BoolOp Compare Call Assign Call Call If Compare Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_matrix_exp_pade5",
    "source_code": "def _matrix_exp_pade5(matrix):\n    b = [30240.0, 15120.0, 3360.0, 420.0, 30.0]\n    b = [constant_op.constant(x, matrix.dtype) for x in b]\n    ident = linalg_ops.eye(array_ops.shape(matrix)[-2], batch_shape=array_ops.shape(matrix)[:-2], dtype=matrix.dtype)\n    matrix_2 = math_ops.matmul(matrix, matrix)\n    matrix_4 = math_ops.matmul(matrix_2, matrix_2)\n    tmp = matrix_4 + b[3] * matrix_2 + b[1] * ident\n    matrix_u = math_ops.matmul(matrix, tmp)\n    matrix_v = b[4] * matrix_4 + b[2] * matrix_2 + b[0] * ident\n    return (matrix_u, matrix_v)",
    "docstring": "5th-order Pade approximant for matrix exponential.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linalg_impl.py",
    "ast_data": "FunctionDef name:_matrix_exp_pade5 arg:matrix arguments arg Assign Assign Call Assign Call Call Call Assign Call Assign Call Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "dropout1d",
    "source_code": "def dropout1d(input: Tensor, p: float=0.5, training: bool=True, inplace: bool=False) -> Tensor:\n    if has_torch_function_unary(input):\n        return handle_torch_function(dropout1d, (input,), input, p=p, training=training, inplace=inplace)\n    if p < 0.0 or p > 1.0:\n        raise ValueError(f'dropout probability has to be between 0 and 1, but got {p}')\n    inp_dim = input.dim()\n    if inp_dim not in (2, 3):\n        raise RuntimeError(f'dropout1d: Expected 2D or 3D input, but received a {inp_dim}D input. Note that dropout1d exists to provide channel-wise dropout on inputs with 1 spatial dimension, a channel dimension, and an optional batch dimension (i.e. 2D or 3D inputs).')\n    is_batched = inp_dim == 3\n    if not is_batched:\n        input = input.unsqueeze_(0) if inplace else input.unsqueeze(0)\n    result = _VF.feature_dropout_(input, p, training) if inplace else _VF.feature_dropout(input, p, training)\n    if not is_batched:\n        result = result.squeeze_(0) if inplace else result.squeeze(0)\n    return result",
    "docstring": "Randomly zero out entire channels (a channel is a 1D feature map). For example, the :math:-th channel of the :math:-th sample in the batched input is a 1D tensor :math: of the input tensor. Each channel will be zeroed out independently on every forward call with probability :attr: using samples from a Bernoulli distribution. See :class: for details. Args: p: probability of a channel to be zeroed. Default: 0.5 training: apply dropout if is ``",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:dropout1d arg:input arg:p arg:training arg:inplace arguments arg arg arg arg If Call Return return:yes Call If BoolOp Compare Compare Raise Call Assign Call If Compare Raise Call Assign Compare If Assign Call Call Assign Call Call If Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "tpu_device_name_at_coordinates",
    "source_code": "def tpu_device_name_at_coordinates(self, device_coordinates, job=None):\n    return _tpu_device_name(job, self._topology_tasks[tuple(device_coordinates)], self._topology_devices[tuple(device_coordinates)])",
    "docstring": "Returns the name of the TPU device assigned to a logical core.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\topology.py",
    "ast_data": "FunctionDef name:tpu_device_name_at_coordinates arg:self arg:device_coordinates arg:job arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_find_tool_class",
    "source_code": "def _find_tool_class(canvas_cls, tool_cls):\n    for canvas_parent in canvas_cls.__mro__:\n        for tool_child in _api.recursive_subclasses(tool_cls):\n            if (canvas_parent, tool_child) in _tool_registry:\n                return tool_child\n    return tool_cls",
    "docstring": "Find a subclass of *tool_cls* registered for *canvas_cls*.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py",
    "ast_data": "FunctionDef name:_find_tool_class arg:canvas_cls arg:tool_cls arguments arg arg For For Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "scipy",
    "name": "to_ss",
    "source_code": "def to_ss(self):\n    return StateSpace(*zpk2ss(self.zeros, self.poles, self.gain), **self._dt_dict)",
    "docstring": "Convert system representation to . Returns ------- sys : instance of State space model of the current system",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:to_ss arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "rc_params",
    "source_code": "def rc_params(fail_on_error=False):\n    return rc_params_from_file(matplotlib_fname(), fail_on_error)",
    "docstring": "Construct a instance from the default Matplotlib rc file.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\__init__.py",
    "ast_data": "FunctionDef name:rc_params arg:fail_on_error arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_load_local",
    "source_code": "def _load_local(hubconf_dir, model, *args, **kwargs):\n    with _add_to_sys_path(hubconf_dir):\n        hubconf_path = os.path.join(hubconf_dir, MODULE_HUBCONF)\n        hub_module = _import_module(MODULE_HUBCONF, hubconf_path)\n        entry = _load_entry_from_hubconf(hub_module, model)\n        model = entry(*args, **kwargs)\n    return model",
    "docstring": "Load a model from a local directory with a ``. Returns: a single model with corresponding pretrained weights. Example: >>> # xdoctest: +SKIP(\"stub local path\") >>> path = \"/some/local/path/pytorch/vision\" >>> model = _load_local( ... path, ... \"resnet50\", ... weights=\"ResNet50_Weights.IMAGENET1K_V1\", ... )",
    "type": "function",
    "file_path": "pytorch\\torch\\hub.py",
    "ast_data": "FunctionDef name:_load_local arg:hubconf_dir arg:model arguments arg arg arg arg With Call Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_solve_discrete_lyapunov_bilinear",
    "source_code": "def _solve_discrete_lyapunov_bilinear(a, q):\n    eye = np.eye(a.shape[0])\n    aH = a.conj().transpose()\n    aHI_inv = inv(aH + eye)\n    b = np.dot(aH - eye, aHI_inv)\n    c = 2 * np.dot(np.dot(inv(a + eye), q), aHI_inv)\n    return solve_lyapunov(b.conj().transpose(), -c)",
    "docstring": "Solves the discrete Lyapunov equation using a bilinear transformation. This function is called by the function with . It is not supposed to be called directly.",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_solvers.py",
    "ast_data": "FunctionDef name:_solve_discrete_lyapunov_bilinear arg:a arg:q arguments arg arg Assign Call Assign Call Call Assign Call Assign Call Assign Call Call Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "OutputAdaptStep",
    "source_code": "@runtime_checkable\nclass OutputAdaptStep(Protocol):\n\n    def apply(self, model_outputs: Any, model: torch.nn.Module | Callable | torch_export.ExportedProgram | None=None) -> Any:\n        ...",
    "docstring": "A protocol that defines a step in the output adapting process. The output adapting process is a sequence of steps that are applied to the PyTorch model outputs to transform them into the outputs format produced by the exported ONNX model. Each step takes the PyTorch model outputs as arguments and returns the transformed outputs. This serves as a base formalized construct for the transformation done to model output signature by any individual component in the exporter.",
    "type": "class",
    "file_path": "pytorch\\torch\\onnx\\_internal\\io_adapter.py",
    "ast_data": "ClassDef name:OutputAdaptStep FunctionDef name:apply arg:self arg:model_outputs arg:model arguments arg arg arg"
  },
  {
    "library": "scipy",
    "name": "_anderson_ksamp_midrank",
    "source_code": "def _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N):\n    A2akN = 0.0\n    Z_ssorted_left = Z.searchsorted(Zstar, 'left')\n    if N == Zstar.size:\n        lj = 1.0\n    else:\n        lj = Z.searchsorted(Zstar, 'right') - Z_ssorted_left\n    Bj = Z_ssorted_left + lj / 2.0\n    for i in arange(0, k):\n        s = np.sort(samples[i])\n        s_ssorted_right = s.searchsorted(Zstar, side='right')\n        Mij = s_ssorted_right.astype(float)\n        fij = s_ssorted_right - s.searchsorted(Zstar, 'left')\n        Mij -= fij / 2.0\n        inner = lj / float(N) * (N * Mij - Bj * n[i]) ** 2 / (Bj * (N - Bj) - N * lj / 4.0)\n        A2akN += inner.sum() / n[i]\n    A2akN *= (N - 1.0) / N\n    return A2akN",
    "docstring": "Compute A2akN equation 7 of Scholz and Stephens. Parameters ---------- samples : sequence of 1-D array_like Array of sample arrays. Z : array_like Sorted array of all observations. Zstar : array_like Sorted array of unique observations. k : int Number of samples. n : array_like Number of observations in each sample. N : int Total number of observations. Returns ------- A2aKN : float The A2aKN statistics of Scholz and Stephens 1987.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_morestats.py",
    "ast_data": "FunctionDef name:_anderson_ksamp_midrank arg:samples arg:Z arg:Zstar arg:k arg:n arg:N arguments arg arg arg arg arg arg Assign Assign Call If Compare Assign Assign Call Assign For Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "AdjustContrastWithMeanSubtraction",
    "source_code": "class AdjustContrastWithMeanSubtraction(Module):\n\n    def __init__(self, contrast_factor: Union[float, Tensor]) -> None:\n        super().__init__()\n        self.contrast_factor: Union[float, Tensor] = contrast_factor\n\n    def forward(self, input: Tensor) -> Tensor:\n        return adjust_contrast_with_mean_subtraction(input, self.contrast_factor)",
    "docstring": "Adjust Contrast of an image. This implementation aligns PIL. Hence, the output is close to TorchVision. The input image is expected to be in the range of [0, 1]. Args: contrast_factor: Contrast adjust factor per element in the batch by subtracting its mean grayscaled version. 0 generates a completely black image, 1 does not modify the input image while any other non-negative number modify the brightness by this factor. Shape: - Input: Image/Input to be adjusted in the shape of :math:. - Output: Adjusted image in the shape of :math:. Example: >>> x = torch.ones(1, 1, 3, 3) >>> AdjustContrastWithMeanSubtraction(0.5)(x) tensor([[[[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]]]]) >>> x = torch.ones(2, 5, 3, 3) >>> y = torch.ones(2) >>> AdjustContrastWithMeanSubtraction(y)(x).shape torch.Size([2, 5, 3, 3])",
    "type": "class",
    "file_path": "kornia\\kornia\\enhance\\adjust.py",
    "ast_data": "ClassDef name:AdjustContrastWithMeanSubtraction FunctionDef name:__init__ arg:self arg:contrast_factor arguments arg arg Call Call FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "function_from_graph_def",
    "source_code": "def function_from_graph_def(graph_def, inputs, outputs, captures=None):\n\n    def _imports_graph_def():\n        importer.import_graph_def(graph_def, name='')\n        graph = ops.get_default_graph()\n        if captures is not None:\n            for c in captures:\n                graph.add_capture(captures[c], graph.get_tensor_by_name(str(c) + ':0'))\n    wrapped_import = wrap_function(_imports_graph_def, [])\n    import_graph = wrapped_import.graph\n    return wrapped_import.prune(nest.map_structure(import_graph.as_graph_element, inputs), nest.map_structure(import_graph.as_graph_element, outputs))",
    "docstring": "Creates a ConcreteFunction from a GraphDef. Args: graph_def: A GraphDef to make a function out of. inputs: A Tensor name or nested structure of names in which should be inputs to the function. outputs: A Tensor name or nested structure of names in which should be outputs of the function. captures: (Optional) A dictionary mapping node names in that should be captured as inputs to tensors containing the value of the captured inputs. Returns: A ConcreteFunction.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\wrap_function.py",
    "ast_data": "FunctionDef name:function_from_graph_def arg:graph_def arg:inputs arg:outputs arg:captures arguments arg arg arg arg FunctionDef name:_imports_graph_def arguments Call Assign Call If Compare For Call Call Call Assign Call Assign Return return:yes Call Call Call"
  },
  {
    "library": "cryptography",
    "name": "load_application",
    "source_code": "def load_application(data) -> tuple[memoryview, memoryview]:\n    application, data = _get_sshstr(data)\n    if not application.tobytes().startswith(b'ssh:'):\n        raise ValueError(f\"U2F application string does not start with b'ssh:' ({application})\")\n    return (application, data)",
    "docstring": "U2F application strings",
    "type": "function",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\serialization\\ssh.py",
    "ast_data": "FunctionDef name:load_application arg:data arguments arg Assign Call If Call Call Raise Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_texmanager",
    "source_code": "def get_texmanager(self):\n    if self._texmanager is None:\n        self._texmanager = TexManager()\n    return self._texmanager",
    "docstring": "Return the instance.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:get_texmanager arg:self arguments arg If Compare Assign Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "OriginPolicy",
    "source_code": "class OriginPolicy(ReferrerPolicy):\n    name: str = POLICY_ORIGIN\n\n    def referrer(self, response_url: str, request_url: str) -> str | None:\n        return self.origin_referrer(response_url)",
    "docstring": "The \"origin\" policy specifies that only the ASCII serialization of the origin of the request client is sent as referrer information when making both same-origin requests and cross-origin requests from a particular request client.",
    "type": "class",
    "file_path": "scrapy\\scrapy\\spidermiddlewares\\referer.py",
    "ast_data": "ClassDef name:OriginPolicy FunctionDef name:referrer arg:self arg:response_url arg:request_url arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "conj",
    "source_code": "@tf_export('math.conj', v1=['math.conj', 'conj'])\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints('conj')\ndef conj(x, name=None):\n    if isinstance(x, tensor_lib.Tensor):\n        dt = x.dtype\n        if dt.is_floating or dt.is_integer:\n            return x\n    with ops.name_scope(name, 'Conj', [x]) as name:\n        x = ops.convert_to_tensor(x, name='x')\n        if x.dtype.is_complex or x.dtype == dtypes.variant:\n            return gen_math_ops.conj(x, name=name)\n        elif x.dtype.is_floating or x.dtype.is_integer:\n            return x\n        else:\n            raise TypeError(f'Expected numeric or variant tensor, got dtype {x.dtype!r}.')",
    "docstring": "Returns the complex conjugate of a complex number. Given a tensor of complex numbers, this operation returns a tensor of complex numbers that are the complex conjugate of each element in . The complex numbers in must be of the form \\\\(a + bj\\\\), where is the real part and is the imaginary part. The complex conjugate returned by this operation is of the form \\\\(a - bj\\\\). For example: >>> x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j]) >>> tf.math.conj(x) If is real, it is returned unchanged. For example: >>> x = tf.constant([-2.25, 3.25]) >>> tf.math.conj(x) Args: x: to conjugate. Must have numeric or variant type. name: A name for the operation (optional). Returns: A that is the conjugate of (with the same type). Raises: TypeError: If is not a numeric tensor. @compatibility(numpy) Equivalent to numpy.conj. @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:conj arg:x arg:name arguments arg arg If Call Assign If BoolOp Return return:yes With Call Assign Call If BoolOp Compare Return return:yes Call If BoolOp Return return:yes Raise Call Call Call"
  },
  {
    "library": "django",
    "name": "InvalidBasesError",
    "source_code": "class InvalidBasesError(ValueError):\n    pass",
    "docstring": "A model's base classes can't be resolved.",
    "type": "class",
    "file_path": "django\\django\\db\\migrations\\exceptions.py",
    "ast_data": "ClassDef name:InvalidBasesError"
  },
  {
    "library": "pytorch",
    "name": "_DispatchCacheBypassEntry",
    "source_code": "@dataclass_slots\n@dataclass(frozen=True)\nclass _DispatchCacheBypassEntry:\n    reason: str",
    "docstring": "Entry type for a negative cache entry.",
    "type": "class",
    "file_path": "pytorch\\torch\\_subclasses\\fake_tensor.py",
    "ast_data": "ClassDef name:_DispatchCacheBypassEntry Call"
  },
  {
    "library": "authlib",
    "name": "is_secure_transport",
    "source_code": "def is_secure_transport(uri):\n    if os.getenv('AUTHLIB_INSECURE_TRANSPORT'):\n        return True\n    uri = uri.lower()\n    return uri.startswith(('https://', 'http://localhost:'))",
    "docstring": "Check if the uri is over ssl.",
    "type": "function",
    "file_path": "authlib\\authlib\\common\\security.py",
    "ast_data": "FunctionDef name:is_secure_transport arg:uri arguments arg If Call Return return:yes Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "average_parameters_or_parameter_groups",
    "source_code": "def average_parameters_or_parameter_groups(params: Union[Iterable[torch.nn.Parameter], Iterable[dict[str, torch.nn.Parameter]]], process_group: ProcessGroup):\n    average_parameters(iter(get_params_to_average(params)), process_group)",
    "docstring": "Averages parameters of a model or parameter groups of an optimizer.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\model_averaging\\utils.py",
    "ast_data": "FunctionDef name:average_parameters_or_parameter_groups arg:params arg:process_group arguments arg arg Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_dense_tensor",
    "source_code": "def get_dense_tensor(self, transformation_cache, state_manager):\n    if isinstance(self.categorical_column, SequenceCategoricalColumn):\n        raise ValueError('In embedding_column: {}. categorical_column must not be of type SequenceCategoricalColumn. Suggested fix A: If you wish to use DenseFeatures, use a non-sequence categorical_column_with_*. Suggested fix B: If you wish to create sequence input, use SequenceFeatures instead of DenseFeatures. Given (type {}): {}'.format(self.name, type(self.categorical_column), self.categorical_column))\n    return self._get_dense_tensor_internal(transformation_cache, state_manager)",
    "docstring": "Returns the embedding lookup result.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:get_dense_tensor arg:self arg:transformation_cache arg:state_manager arguments arg arg arg If Call Raise Call Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "write",
    "source_code": "def write(self, obj, **kwargs) -> None:\n    name = obj.name or 'values'\n    newobj, self.levels = self.validate_multiindex(obj)\n    assert isinstance(self.levels, list)\n    cols = list(self.levels)\n    cols.append(name)\n    newobj.columns = Index(cols)\n    super().write(obj=newobj, **kwargs)",
    "docstring": "we are going to write this as a frame table",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:write arg:self arg:obj arguments arg arg arg Assign BoolOp Assign Call Call Assign Call Call Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "remove_unaligned_input_idxs",
    "source_code": "def remove_unaligned_input_idxs(inputs: Sequence[InputType], static_input_idxs: Sequence[int]) -> Sequence[int]:\n    aligned_static_input_idxs = []\n    for idx in static_input_idxs:\n        input = inputs[idx]\n        if isinstance(input, torch.Tensor) and input.data_ptr() % ALIGNMENT == 0:\n            aligned_static_input_idxs.append(idx)\n    if len(aligned_static_input_idxs) != len(static_input_idxs):\n        return aligned_static_input_idxs\n    return static_input_idxs",
    "docstring": "We require all inputs to be aligned, so introduce a copy for any that aren't.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\utils.py",
    "ast_data": "FunctionDef name:remove_unaligned_input_idxs arg:inputs arg:static_input_idxs arguments arg arg Assign For Assign If BoolOp Call Compare Call Call If Compare Call Call Return return:yes Return return:yes"
  },
  {
    "library": "pandas",
    "name": "is_platform_mac",
    "source_code": "def is_platform_mac() -> bool:\n    return sys.platform == 'darwin'",
    "docstring": "Checking if the running platform is mac. Returns ------- bool True if the running platform is mac.",
    "type": "function",
    "file_path": "pandas\\pandas\\compat\\__init__.py",
    "ast_data": "FunctionDef name:is_platform_mac arguments Return return:yes Compare"
  },
  {
    "library": "matplotlib",
    "name": "auto_set_column_width",
    "source_code": "def auto_set_column_width(self, col):\n    col1d = np.atleast_1d(col)\n    if not np.issubdtype(col1d.dtype, np.integer):\n        raise TypeError('col must be an int or sequence of ints.')\n    for cell in col1d:\n        self._autoColumns.append(cell)\n    self.stale = True",
    "docstring": "Automatically set the widths of given columns to optimal sizes. Parameters ---------- col : int or sequence of ints The indices of the columns to auto-scale.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\table.py",
    "ast_data": "FunctionDef name:auto_set_column_width arg:self arg:col arguments arg arg Assign Call If Call Raise Call For Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "get_all_collection_keys",
    "source_code": "def get_all_collection_keys(self) -> list[str]:\n    with self._lock:\n        return [x for x in self._collections if isinstance(x, str)]",
    "docstring": "Returns a list of collections used in this graph.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:get_all_collection_keys arg:self arguments arg With Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "Jitter",
    "source_code": "@dataclass\nclass Jitter(Move):\n    width: float | Default = default\n    x: float = 0\n    y: float = 0\n    seed: int | None = None\n\n    def __call__(self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale]) -> DataFrame:\n        data = data.copy()\n        rng = np.random.default_rng(self.seed)\n\n        def jitter(data, col, scale):\n            noise = rng.uniform(-0.5, +0.5, len(data))\n            offsets = noise * scale\n            return data[col] + offsets\n        if self.width is default:\n            width = 0.0 if self.x or self.y else 0.2\n        else:\n            width = cast(float, self.width)\n        if self.width:\n            data[orient] = jitter(data, orient, width * data['width'])\n        if self.x:\n            data['x'] = jitter(data, 'x', self.x)\n        if self.y:\n            data['y'] = jitter(data, 'y', self.y)\n        return data",
    "docstring": "Random displacement along one or both axes to reduce overplotting. Parameters ---------- width : float Magnitude of jitter, relative to mark width, along the orientation axis. If not provided, the default value will be 0 when or are set, otherwise there will be a small amount of jitter applied by default. x : float Magnitude of jitter, in data units, along the x axis. y : float Magnitude of jitter, in data units, along the y axis. Examples -------- .. include:: ../docstrings/objects.Jitter.rst",
    "type": "class",
    "file_path": "seaborn\\seaborn\\_core\\moves.py",
    "ast_data": "ClassDef name:Jitter FunctionDef name:__call__ arg:self arg:data arg:groupby arg:orient arg:scales arguments arg arg arg arg arg Assign Call Assign Call FunctionDef name:jitter arg:data arg:col arg:scale arguments arg arg arg Assign Call Call Assign Return return:yes If Compare Assign BoolOp Assign Call If Assign Call If Assign Call If Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_min_supported",
    "source_code": "def get_min_supported(self) -> OpsetVersion:\n    return min(self._functions)",
    "docstring": "Returns the lowest built-in opset version supported by the function.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\registration.py",
    "ast_data": "FunctionDef name:get_min_supported arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_GatherReturnElements",
    "source_code": "def _GatherReturnElements(requested_return_elements, graph, results):\n    return_outputs = c_api.TF_ImportGraphDefResultsReturnOutputs(results)\n    return_opers = c_api.TF_ImportGraphDefResultsReturnOperations(results)\n    combined_return_elements = []\n    outputs_idx = 0\n    opers_idx = 0\n    for name in requested_return_elements:\n        if ':' in name:\n            combined_return_elements.append(graph._get_tensor_by_tf_output(return_outputs[outputs_idx]))\n            outputs_idx += 1\n        else:\n            combined_return_elements.append(graph._get_operation_by_tf_operation(return_opers[opers_idx]))\n            opers_idx += 1\n    return combined_return_elements",
    "docstring": "Returns the requested return elements from results. Args: requested_return_elements: list of strings of operation and tensor names graph: Graph results: wrapped TF_ImportGraphDefResults Returns: list of and/or objects",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\importer.py",
    "ast_data": "FunctionDef name:_GatherReturnElements arg:requested_return_elements arg:graph arg:results arguments arg arg arg Assign Call Assign Call Assign Assign Assign For If Compare Call Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_is_visible",
    "source_code": "def _is_visible(idx_row, idx_col, lengths) -> bool:\n    return (idx_col, idx_row) in lengths",
    "docstring": "Index -> {(idx_row, idx_col): bool}).",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\formats\\style_render.py",
    "ast_data": "FunctionDef name:_is_visible arg:idx_row arg:idx_col arg:lengths arguments arg arg arg Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "internal_convert_n_to_tensor_or_indexed_slices",
    "source_code": "def internal_convert_n_to_tensor_or_indexed_slices(values, dtype=None, name=None, as_ref=False):\n    if not isinstance(values, collections_abc.Iterable):\n        raise TypeError('Argument `values` must be iterable.')\n    ret = []\n    for i, value in enumerate(values):\n        if value is None:\n            ret.append(value)\n        else:\n            n = None if name is None else '%s_%d' % (name, i)\n            ret.append(internal_convert_to_tensor_or_indexed_slices(value, dtype=dtype, name=n, as_ref=as_ref))\n    return ret",
    "docstring": "Converts to a list of or objects. Any or objects in are returned unmodified. Args: values: An iterable of , , , or objects that can be consumed by . dtype: (Optional.) The required of the returned or . name: (Optional.) A name prefix to used when a new is created, in which case element will be given the name . as_ref: True if the caller wants the results as ref tensors. Returns: A list of , , and/or objects. Raises: TypeError: If no conversion function is registered for an element in . RuntimeError: If a registered conversion function returns an invalid value.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\indexed_slices.py",
    "ast_data": "FunctionDef name:internal_convert_n_to_tensor_or_indexed_slices arg:values arg:dtype arg:name arg:as_ref arguments arg arg arg arg If Call Raise Call Assign For Call If Compare Call Assign Compare Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "replace_target_nodes_with",
    "source_code": "@compatibility(is_backward_compatible=False)\ndef replace_target_nodes_with(fx_module: GraphModule, old_op: str, old_target: Target, new_op: str, new_target: Target):\n    new_graph = Graph()\n    val_map: dict[Node, Node] = {}\n    for node in fx_module.graph.nodes:\n        if node.op == old_op and node.target == old_target:\n            args = map_arg(node.args, lambda n: val_map[n])\n            kwargs = map_arg(node.kwargs, lambda n: val_map[n])\n            assert isinstance(args, tuple)\n            assert isinstance(kwargs, dict)\n            val_map[node] = new_graph.create_node(new_op, new_target, args, kwargs, node.name)\n        else:\n            val_map[node] = new_graph.node_copy(node, lambda n: val_map[n])\n    fx_module.graph = new_graph",
    "docstring": "Modifies all nodes in fx_module.graph.nodes which match the specified op code and target, and updates them to match the new op code and target",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\passes\\graph_manipulation.py",
    "ast_data": "FunctionDef name:replace_target_nodes_with arg:fx_module arg:old_op arg:old_target arg:new_op arg:new_target arguments arg arg arg arg arg Assign Call For If BoolOp Compare Compare Assign Call arguments arg Assign Call arguments arg Call Call Assign Call Assign Call arguments arg Assign Call"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, nextapp, recursive=False):\n    self.nextapp = nextapp\n    self.recursive = recursive",
    "docstring": "Initialize an internal redirector.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpwsgi.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:nextapp arg:recursive arguments arg arg arg Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "transform_angles",
    "source_code": "def transform_angles(self, angles, pts, radians=False, pushoff=1e-05):\n    if self.input_dims != 2 or self.output_dims != 2:\n        raise NotImplementedError('Only defined in 2D')\n    angles = np.asarray(angles)\n    pts = np.asarray(pts)\n    _api.check_shape((None, 2), pts=pts)\n    _api.check_shape((None,), angles=angles)\n    if len(angles) != len(pts):\n        raise ValueError(\"There must be as many 'angles' as 'pts'\")\n    if not radians:\n        angles = np.deg2rad(angles)\n    pts2 = pts + pushoff * np.column_stack([np.cos(angles), np.sin(angles)])\n    tpts = self.transform(pts)\n    tpts2 = self.transform(pts2)\n    d = tpts2 - tpts\n    a = np.arctan2(d[:, 1], d[:, 0])\n    if not radians:\n        a = np.rad2deg(a)\n    return a",
    "docstring": "Transform a set of angles anchored at specific locations. Parameters ---------- angles : (N,) array-like The angles to transform. pts : (N, 2) array-like The points where the angles are anchored. radians : bool, default: False Whether *angles* are radians or degrees. pushoff : float For each point in *pts* and angle in *angles*, the transformed angle is computed by transforming a segment of length *pushoff* starting at that point and making that angle relative to the horizontal axis, and measuring the angle between the horizontal axis and the transformed segment. Returns ------- (N,) array",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:transform_angles arg:self arg:angles arg:pts arg:radians arg:pushoff arguments arg arg arg arg arg If BoolOp Compare Compare Raise Call Assign Call Assign Call Call Call If Compare Call Call Raise Call If Assign Call Assign Call Call Call Assign Call Assign Call Assign Assign Call If Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "compute_dtype",
    "source_code": "@property\ndef compute_dtype(self):\n    return self._compute_dtype",
    "docstring": "The compute dtype of this policy. This is the dtype layers will do their computations in. Typically layers output tensors with the compute dtype as well. Note that even if the compute dtype is float16 or bfloat16, hardware devices may not do individual adds, multiplies, and other fundamental operations in float16 or bfloat16, but instead may do some of them in float32 for numeric stability. The compute dtype is the dtype of the inputs and outputs of the TensorFlow ops that the layer executes. Internally, many TensorFlow ops will do certain internal calculations in float32 or some other device-internal intermediate format with higher precision than float16/bfloat16, to increase numeric stability. For example, a layer, when run on a GPU with a float16 compute dtype, will pass float16 inputs to . But, will do use float32 intermediate math. The performance benefit of float16 is still apparent, due to increased memory bandwidth and the fact modern GPUs have specialized hardware for computing matmuls on float16 inputs while still keeping intermediate computations in float32. Returns: The compute dtype of this policy, as a string.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\policy.py",
    "ast_data": "FunctionDef name:compute_dtype arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "rvs",
    "source_code": "def rvs(self, df, scale, size=1, random_state=None):\n    n, shape = self._process_size(size)\n    dim, df, scale = self._process_parameters(df, scale)\n    C = scipy.linalg.cholesky(scale, lower=True)\n    out = self._rvs(n, shape, dim, df, C, random_state)\n    return _squeeze_output(out)",
    "docstring": "Draw random samples from a Wishart distribution. Parameters ---------- %(_doc_default_callparams)s size : integer or iterable of integers, optional Number of samples to draw (default 1). %(_doc_random_state)s Returns ------- rvs : ndarray Random variates of shape () + (`` is the dimension of the scale matrix. Notes ----- %(_doc_callparams_note)s",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:rvs arg:self arg:df arg:scale arg:size arg:random_state arguments arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "semicircular_gen",
    "source_code": "class semicircular_gen(rv_continuous):\n\n    def _shape_info(self):\n        return []\n\n    def _pdf(self, x):\n        return 2.0 / np.pi * np.sqrt(1 - x * x)\n\n    def _logpdf(self, x):\n        return np.log(2 / np.pi) + 0.5 * sc.log1p(-x * x)\n\n    def _cdf(self, x):\n        return 0.5 + 1.0 / np.pi * (x * np.sqrt(1 - x * x) + np.arcsin(x))\n\n    def _ppf(self, q):\n        return rdist._ppf(q, 3)\n\n    def _rvs(self, size=None, random_state=None):\n        r = np.sqrt(random_state.uniform(size=size))\n        a = np.cos(np.pi * random_state.uniform(size=size))\n        return r * a\n\n    def _stats(self):\n        return (0, 0.25, 0, -1.0)\n\n    def _entropy(self):\n        return 0.6447298858494002",
    "docstring": "A semicircular continuous random variable. %(before_notes)s See Also -------- rdist Notes ----- The probability density function for is: .. math:: f(x) = \\frac{2}{\\pi} \\sqrt{1-x^2} for :math:. The distribution is a special case of with ``. %(after_notes)s References ---------- .. [1] \"Wigner semicircle distribution\", %(example)s",
    "type": "class",
    "file_path": "scipy\\scipy\\stats\\_continuous_distns.py",
    "ast_data": "ClassDef name:semicircular_gen FunctionDef name:_shape_info arg:self arguments arg Return return:no FunctionDef name:_pdf arg:self arg:x arguments arg arg Return return:yes Call FunctionDef name:_logpdf arg:self arg:x arguments arg arg Return return:yes Call Call FunctionDef name:_cdf arg:self arg:x arguments arg arg Return return:yes Call Call FunctionDef name:_ppf arg:self arg:q arguments arg arg Return return:yes Call FunctionDef name:_rvs arg:self arg:size arg:random_state arguments arg arg arg Assign Call Call Assign Call Call Return return:yes FunctionDef name:_stats arg:self arguments arg Return return:yes FunctionDef name:_entropy arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_config_command_handler",
    "source_code": "def _config_command_handler(self, args, screen_info=None):\n    del screen_info\n    parsed = self._config_argparser.parse_args(args)\n    if hasattr(parsed, 'property_name') and hasattr(parsed, 'property_value'):\n        self._config.set(parsed.property_name, parsed.property_value)\n        return self._config.summarize(highlight=parsed.property_name)\n    else:\n        return self._config.summarize()",
    "docstring": "Command handler for the \"config\" command.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\base_ui.py",
    "ast_data": "FunctionDef name:_config_command_handler arg:self arg:args arg:screen_info arguments arg arg arg Assign Call If BoolOp Call Call Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "AppendingByteSerializer",
    "source_code": "class AppendingByteSerializer(Generic[T]):\n    _serialize_fn: Callable[[BytesWriter, T], None]\n    _writer: BytesWriter\n\n    def __init__(self, *, serialize_fn: Callable[[BytesWriter, T], None]) -> None:\n        self._serialize_fn = serialize_fn\n        self.clear()\n\n    def clear(self) -> None:\n        self._writer = BytesWriter()\n        self._writer.write_uint64(_ENCODING_VERSION)\n\n    def append(self, data: T) -> None:\n        self._serialize_fn(self._writer, data)\n\n    def extend(self, elems: Iterable[T]) -> None:\n        for elem in elems:\n            self.append(elem)\n\n    def to_bytes(self) -> bytes:\n        return self._writer.to_bytes()\n\n    @staticmethod\n    def to_list(data: bytes, *, deserialize_fn: Callable[[BytesReader], T]) -> list[T]:\n        reader = BytesReader(data)\n        assert reader.read_uint64() == _ENCODING_VERSION\n        result: list[T] = []\n        while not reader.is_finished():\n            result.append(deserialize_fn(reader))\n        return result",
    "docstring": "Provides efficient serialization and deserialization of list of bytes Note that this does not provide any guarantees around byte order",
    "type": "class",
    "file_path": "pytorch\\torch\\utils\\_appending_byte_serializer.py",
    "ast_data": "ClassDef name:AppendingByteSerializer FunctionDef name:__init__ arg:self arguments arg arg Assign Call FunctionDef name:clear arg:self arguments arg Assign Call Call FunctionDef name:append arg:self arg:data arguments arg arg Call FunctionDef name:extend arg:self arg:elems arguments arg arg For Call FunctionDef name:to_bytes arg:self arguments arg Return return:yes Call FunctionDef name:to_list arg:data arguments arg arg Assign Call Compare Call While Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "BatchNorm3d",
    "source_code": "class BatchNorm3d(_BatchNorm):\n    _NNI_BN_RELU_MODULE = nni.BNReLU3d\n\n    def __init__(self, num_features, eps=1e-05, momentum=0.1, device=None, dtype=None):\n        factory_kwargs = {'device': device, 'dtype': dtype}\n        super().__init__(num_features, eps, momentum, **factory_kwargs)\n\n    def _get_name(self):\n        return 'QuantizedBatchNorm3d'\n\n    def _check_input_dim(self, input):\n        if len(input.shape) != 5:\n            raise ValueError('Input shape must be `(N, C, H, W)`!')\n\n    def forward(self, input: torch.Tensor) -> torch.Tensor:\n        return torch.ops.quantized.batch_norm3d(input, self.weight, self.bias, self.running_mean, self.running_var, self.eps, self.scale, self.zero_point)\n\n    @classmethod\n    def from_float(cls, mod, use_precomputed_fake_quant=False):\n        return _BatchNorm.from_float(cls, mod, use_precomputed_fake_quant=use_precomputed_fake_quant)",
    "docstring": "This is the quantized version of :class:.",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\modules\\batchnorm.py",
    "ast_data": "ClassDef name:BatchNorm3d Assign FunctionDef name:__init__ arg:self arg:num_features arg:eps arg:momentum arg:device arg:dtype arguments arg arg arg arg arg arg Assign Call Call FunctionDef name:_get_name arg:self arguments arg Return return:yes FunctionDef name:_check_input_dim arg:self arg:input arguments arg arg If Compare Call Raise Call FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:from_float arg:cls arg:mod arg:use_precomputed_fake_quant arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_process_stack_frames",
    "source_code": "def _process_stack_frames(self):\n    stack_frames = tf_stack.extract_stack()\n    stack_frame_ids = []\n    writer = None\n    for file_path, lineno, func, _ in stack_frames:\n        abs_path = os.path.abspath(file_path)\n        if (abs_path, lineno, func) in self._stack_frame_to_id:\n            stack_frame_ids.append(self._stack_frame_to_id[abs_path, lineno, func])\n            continue\n        with self._stack_frame_to_id_lock:\n            if (abs_path, lineno, func) not in self._stack_frame_to_id:\n                stack_frame_id = _get_id()\n                self._stack_frame_to_id[abs_path, lineno, func] = stack_frame_id\n                file_index = self._write_source_file_content(abs_path)\n                file_line_col = graph_debug_info_pb2.GraphDebugInfo.FileLineCol(file_index=file_index, line=lineno, func=func)\n                stack_frame_with_id = debug_event_pb2.StackFrameWithId(id=stack_frame_id, file_line_col=file_line_col)\n                writer = self.get_writer()\n                writer.WriteStackFrameWithId(stack_frame_with_id)\n            stack_frame_ids.append(self._stack_frame_to_id[abs_path, lineno, func])\n    code_location = debug_event_pb2.CodeLocation(host_name=self._hostname, stack_frame_ids=stack_frame_ids)\n    return code_location",
    "docstring": "Process stack frames. Send the content of source-files, on a best-effort basis. Returns: A list of stack frame IDs.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\dumping_callback.py",
    "ast_data": "FunctionDef name:_process_stack_frames arg:self arguments arg Assign Call Assign Assign For Assign Call If Compare Call With If Compare Assign Call Assign Assign Call Assign Call Assign Call Assign Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_ifft_func",
    "source_code": "def _ifft_func(self, X: np.ndarray) -> np.ndarray:\n    if self.fft_mode == 'twosided':\n        x = fft_lib.ifft(X, n=self.mfft, axis=-1)\n    elif self.fft_mode == 'centered':\n        x = fft_lib.ifft(fft_lib.ifftshift(X, axes=-1), n=self.mfft, axis=-1)\n    elif self.fft_mode == 'onesided':\n        x = fft_lib.irfft(X, n=self.mfft, axis=-1)\n    elif self.fft_mode == 'onesided2X':\n        Xc = X.copy()\n        fac = np.sqrt(2) if self.scaling == 'psd' else 2\n        q1 = -1 if self.mfft % 2 == 0 else None\n        Xc[..., 1:q1] /= fac\n        x = fft_lib.irfft(Xc, n=self.mfft, axis=-1)\n    else:\n        error_str = f'self.fft_mode={self.fft_mode!r} not in {get_args(FFT_MODE_TYPE)}!'\n        raise RuntimeError(error_str)\n    if self.phase_shift is None:\n        return x[..., :self.m_num]\n    p_s = (self.phase_shift + self.m_num_mid) % self.m_num\n    return np.roll(x, p_s, axis=-1)[..., :self.m_num]",
    "docstring": "Inverse to . Returned is an array of length . If the FFT is then a float array is returned else a complex array is returned. For multidimensional arrays the transformation is carried out on the last axis.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_short_time_fft.py",
    "ast_data": "FunctionDef name:_ifft_func arg:self arg:X arguments arg arg If Compare Assign Call If Compare Assign Call Call If Compare Assign Call If Compare Assign Call Assign Compare Call Assign Compare Assign Call Assign Call Raise Call If Compare Return return:yes Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "TopN",
    "source_code": "class TopN:\n\n    def __init__(self, at_most: int=25):\n        self.at_most = at_most\n        self.heap: list[tuple[int, Any]] = []\n\n    def add(self, key: Any, val: int) -> None:\n        fn = heapq.heappush if len(self.heap) < self.at_most else heapq.heappushpop\n        fn(self.heap, (val, key))\n\n    def __len__(self) -> int:\n        return len(self.heap)\n\n    def __iter__(self) -> Iterator[tuple[Any, int]]:\n        return ((key, val) for val, key in sorted(self.heap, reverse=True))",
    "docstring": "Helper to record a list of metrics, keeping only the top N \"most expensive\" elements.",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\metrics_context.py",
    "ast_data": "ClassDef name:TopN FunctionDef name:__init__ arg:self arg:at_most arguments arg arg Assign FunctionDef name:add arg:self arg:key arg:val arguments arg arg arg Assign Compare Call Call FunctionDef name:__len__ arg:self arguments arg Return return:yes Call FunctionDef name:__iter__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_collect_leaf_level_keys",
    "source_code": "def _collect_leaf_level_keys(cross):\n    leaf_level_keys = []\n    for k in cross.keys:\n        if isinstance(k, _CrossedColumn):\n            leaf_level_keys.extend(_collect_leaf_level_keys(k))\n        else:\n            leaf_level_keys.append(k)\n    return leaf_level_keys",
    "docstring": "Collects base keys by expanding all nested crosses. Args: cross: A . Returns: A list of strings or instances.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py",
    "ast_data": "FunctionDef name:_collect_leaf_level_keys arg:cross arguments arg Assign For If Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "normalize_moments",
    "source_code": "@tf_export('nn.normalize_moments')\n@dispatch.add_dispatch_support\ndef normalize_moments(counts, mean_ss, variance_ss, shift, name=None):\n    with ops.name_scope(name, 'normalize', [counts, mean_ss, variance_ss, shift]):\n        divisor = math_ops.reciprocal(counts, name='divisor')\n        if shift is not None:\n            shifted_mean = math_ops.multiply(mean_ss, divisor, name='shifted_mean')\n            mean = math_ops.add(shifted_mean, shift, name='mean')\n        else:\n            shifted_mean = math_ops.multiply(mean_ss, divisor, name='mean')\n            mean = shifted_mean\n        variance = math_ops.subtract(math_ops.multiply(variance_ss, divisor), math_ops.square(shifted_mean), name='variance')\n    return (mean, variance)",
    "docstring": "Calculate the mean and variance of based on the sufficient statistics. Args: counts: A containing the total count of the data (one value). mean_ss: A containing the mean sufficient statistics: the (possibly shifted) sum of the elements to average over. variance_ss: A containing the variance sufficient statistics: the (possibly shifted) squared sum of the data to compute the variance over. shift: A containing the value by which the data is shifted for numerical stability, or if no shift was performed. name: Name used to scope the operations that compute the moments. Returns: Two objects: and .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_impl.py",
    "ast_data": "FunctionDef name:normalize_moments arg:counts arg:mean_ss arg:variance_ss arg:shift arg:name arguments arg arg arg arg arg With Call Assign Call If Compare Assign Call Assign Call Assign Call Assign Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "references_table",
    "source_code": "def references_table(self, table):\n    return False",
    "docstring": "Return whether or not this instance references the specified table.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\ddl_references.py",
    "ast_data": "FunctionDef name:references_table arg:self arg:table arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_unused_handle",
    "source_code": "def _unused_handle():\n    error_message = 'Trying to access a placeholder that is not supposed to be executed. This means you are executing a graph generated from the cross-replica context in an in-replica context.'\n    save_error_message = \"It seems that you are trying to save a tf.types.experimental.ConcreteFunction that involves a distributed model, and the model contains parts that are loaded form a SavedModel. It's not supported to save such tf.types.experimental.ConcreteFunction. Try saving a tf.function with input_signature instead, and file a bug if there are still issues.\"\n    assert_op = control_flow_assert.Assert(array_ops.placeholder_with_default(False, shape=()), [error_message])\n    if not context.executing_eagerly() and ops.get_default_graph().building_function:\n        ops.get_default_graph().mark_as_unsaveable(save_error_message)\n    with ops.control_dependencies([assert_op]):\n        return array_ops.placeholder(dtype=dtypes.resource)",
    "docstring": "Returns a placeholder as a handle that is not supposed to be accessed.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\load.py",
    "ast_data": "FunctionDef name:_unused_handle arguments Assign Assign Assign Call Call If BoolOp Call Call Call Call With Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_calc_coord",
    "source_code": "def _calc_coord(self, xv, yv, renderer=None):\n    if self._focal_length == np.inf:\n        zv = 1\n    else:\n        zv = -1 / self._focal_length\n    p1 = np.array(proj3d.inv_transform(xv, yv, zv, self.invM)).ravel()\n    vec = self._get_camera_loc() - p1\n    pane_locs = []\n    for axis in self._axis_map.values():\n        xys, loc = axis.active_pane()\n        pane_locs.append(loc)\n    scales = np.zeros(3)\n    for i in range(3):\n        if vec[i] == 0:\n            scales[i] = np.inf\n        else:\n            scales[i] = (p1[i] - pane_locs[i]) / vec[i]\n    pane_idx = np.argmin(abs(scales))\n    scale = scales[pane_idx]\n    p2 = p1 - scale * vec\n    return (p2, pane_idx)",
    "docstring": "Given the 2D view coordinates, find the point on the nearest axis pane that lies directly below those coordinates. Returns a 3D point in data coordinates.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:_calc_coord arg:self arg:xv arg:yv arg:renderer arguments arg arg arg arg If Compare Assign Assign Assign Call Call Call Assign Call Assign For Call Assign Call Call Assign Call For Call If Compare Assign Assign Assign Call Call Assign Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "__sub__",
    "source_code": "def __sub__(self, other):\n    return self.difference(other)",
    "docstring": "Return the difference this Geometry and the other.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:__sub__ arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "rfft2",
    "source_code": "@array_function_dispatch(_fftn_dispatcher)\ndef rfft2(a, s=None, axes=(-2, -1), norm=None, out=None):\n    return rfftn(a, s, axes, norm, out=out)",
    "docstring": "Compute the 2-dimensional FFT of a real array. Parameters ---------- a : array Input array, taken to be real. s : sequence of ints, optional Shape of the FFT. .. versionchanged:: 2.0 If it is `saxesssaxesnumpy.fftrfftnrfftn`. Examples -------- >>> import numpy as np >>> a = np.mgrid[:5, :5][0] >>> np.fft.rfft2(a) array([[ 50. +0.j , 0. +0.j , 0. +0.j ], [-12.5+17.20477401j, 0. +0.j , 0. +0.j ], [-12.5 +4.0614962j , 0. +0.j , 0. +0.j ], [-12.5 -4.0614962j , 0. +0.j , 0. +0.j ], [-12.5-17.20477401j, 0. +0.j , 0. +0.j ]])",
    "type": "function",
    "file_path": "numpy\\numpy\\fft\\_pocketfft.py",
    "ast_data": "FunctionDef name:rfft2 arg:a arg:s arg:axes arg:norm arg:out arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "seaborn",
    "name": "__call__",
    "source_code": "def __call__(self, key, *args, **kwargs):\n    if isinstance(key, (list, np.ndarray, pd.Series)):\n        return [self._lookup_single(k, *args, **kwargs) for k in key]\n    else:\n        return self._lookup_single(key, *args, **kwargs)",
    "docstring": "Get the attribute(s) values for the data key.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_base.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:key arguments arg arg arg arg If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "EnabledProxy",
    "source_code": "class EnabledProxy:\n\n    def __init__(self) -> None:\n        self.enabled = self.parse_env('PYTORCH_JIT', True, '> Using PyTorch JIT', '> PyTorch JIT DISABLED')\n\n    def parse_env(self, name, default, true_message, false_message):\n        value = os.environ.get(name)\n        if value is None:\n            return default\n        if value.lower() in {'1', 'true', 'yes'}:\n            return True\n        elif value.lower() in {'0', 'false', 'no'}:\n            return False\n        if value == '1v':\n            print(true_message)\n            return True\n        elif value == '0v':\n            print(false_message)\n            return False\n        raise ValueError(f'Unknown setting of {name}. Try using 0 or 1.')\n\n    def __bool__(self):\n        return self.enabled",
    "docstring": "Stores whether the JIT is enabled or not. This is just a wrapper for a bool, so that we get reference semantics",
    "type": "class",
    "file_path": "pytorch\\torch\\jit\\_state.py",
    "ast_data": "ClassDef name:EnabledProxy FunctionDef name:__init__ arg:self arguments arg Assign Call FunctionDef name:parse_env arg:self arg:name arg:default arg:true_message arg:false_message arguments arg arg arg arg arg Assign Call If Compare Return return:yes If Compare Call Return return:yes If Compare Call Return return:yes If Compare Call Return return:yes If Compare Call Return return:yes Raise Call FunctionDef name:__bool__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "num_accumulated",
    "source_code": "def num_accumulated(self, name=None):\n    if name is None:\n        name = '%s_NumAccumulated' % self._name\n    return gen_data_flow_ops.resource_accumulator_num_accumulated(self._accumulator_ref, name=name)",
    "docstring": "Number of gradients that have currently been aggregated in accumulator. Args: name: Optional name for the operation. Returns: Number of accumulated gradients currently in accumulator.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:num_accumulated arg:self arg:name arguments arg arg If Compare Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_data",
    "source_code": "def set_data(self, x, y, A):\n    A = self._normalize_image_array(A)\n    x = np.arange(0.0, A.shape[1] + 1) if x is None else np.array(x, float).ravel()\n    y = np.arange(0.0, A.shape[0] + 1) if y is None else np.array(y, float).ravel()\n    if A.shape[:2] != (y.size - 1, x.size - 1):\n        raise ValueError(\"Axes don't match array shape. Got %s, expected %s.\" % (A.shape[:2], (y.size - 1, x.size - 1)))\n    if x[-1] < x[0]:\n        x = x[::-1]\n        A = A[:, ::-1]\n    if y[-1] < y[0]:\n        y = y[::-1]\n        A = A[::-1]\n    self._A = A\n    self._Ax = x\n    self._Ay = y\n    self._imcache = None\n    self.stale = True",
    "docstring": "Set the grid for the rectangle boundaries, and the data values. Parameters ---------- x, y : 1D array-like, optional Monotonic arrays of length N+1 and M+1, respectively, specifying rectangle boundaries. If not given, will default to `~numpy.ndarray` or masked array: values to be colormapped - (M, N, 3): RGB array - (M, N, 4): RGBA array",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\image.py",
    "ast_data": "FunctionDef name:set_data arg:self arg:x arg:y arg:A arguments arg arg arg arg Assign Call Assign Compare Call Call Call Assign Compare Call Call Call If Compare Raise Call If Compare Assign Assign If Compare Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "scipy",
    "name": "time_spherical_voronoi_calculation",
    "source_code": "def time_spherical_voronoi_calculation(self, num_points):\n    SphericalVoronoi(self.points, radius=1, center=np.zeros(3))",
    "docstring": "Perform spherical Voronoi calculation, but not the sorting of vertices in the Voronoi polygons.",
    "type": "method",
    "file_path": "scipy\\benchmarks\\benchmarks\\spatial.py",
    "ast_data": "FunctionDef name:time_spherical_voronoi_calculation arg:self arg:num_points arguments arg arg Call Call"
  },
  {
    "library": "scipy",
    "name": "stacked_matmul",
    "source_code": "def stacked_matmul(a, b):\n    if a.shape[1] > 50:\n        out = np.empty((a.shape[0], a.shape[1], b.shape[2]))\n        for i in range(a.shape[0]):\n            out[i] = np.dot(a[i], b[i])\n        return out\n    else:\n        return np.einsum('...ij,...jk->...ik', a, b)",
    "docstring": "Stacked matrix multiply: out[i,:,:] = np.dot(a[i,:,:], b[i,:,:]). Empirical optimization. Use outer Python loop and BLAS for large matrices, otherwise use a single einsum call.",
    "type": "function",
    "file_path": "scipy\\scipy\\integrate\\_bvp.py",
    "ast_data": "FunctionDef name:stacked_matmul arg:a arg:b arguments arg arg If Compare Assign Call For Call Assign Call Return return:yes Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "maximum_filter1d",
    "source_code": "@_ni_docstrings.docfiller\ndef maximum_filter1d(input, size, axis=-1, output=None, mode='reflect', cval=0.0, origin=0):\n    input = np.asarray(input)\n    if np.iscomplexobj(input):\n        raise TypeError('Complex type not supported')\n    axis = normalize_axis_index(axis, input.ndim)\n    if size < 1:\n        raise RuntimeError('incorrect filter size')\n    output = _ni_support._get_output(output, input)\n    if size // 2 + origin < 0 or size // 2 + origin >= size:\n        raise ValueError('invalid origin')\n    mode = _ni_support._extend_mode_to_code(mode)\n    _nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval, origin, 0)\n    return output",
    "docstring": "Calculate a 1-D maximum filter along the given axis. The lines of the array along the given axis are filtered with a maximum filter of given size. Parameters ---------- %(input)s size : int Length along which to calculate the 1-D maximum. %(axis)s %(output)s %(mode_reflect)s %(cval)s %(origin)s Returns ------- maximum1d : ndarray, None Maximum-filtered array with same shape as input. None if is not None Notes ----- This function implements the MAXLIST algorithm [1]_, as described by Richard Harter [2]_, and has a guaranteed O(n) performance, being the length, regardless of filter size. References ---------- .. [1] .. [2] Examples -------- >>> from scipy.ndimage import maximum_filter1d >>> maximum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3) array([8, 8, 8, 4, 9, 9, 9, 9])",
    "type": "function",
    "file_path": "scipy\\scipy\\ndimage\\_filters.py",
    "ast_data": "FunctionDef name:maximum_filter1d arg:input arg:size arg:axis arg:output arg:mode arg:cval arg:origin arguments arg arg arg arg arg arg arg Assign Call If Call Raise Call Assign Call If Compare Raise Call Assign Call If BoolOp Compare Compare Raise Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "num_inner_dimensions",
    "source_code": "@property\ndef num_inner_dimensions(self):\n    return tensor_shape.dimension_value(self._inner_dim_sizes.shape[0])",
    "docstring": "The number of inner dimensions, or if not statically known.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor_shape.py",
    "ast_data": "FunctionDef name:num_inner_dimensions arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "_disable_autolayout",
    "source_code": "@contextmanager\ndef _disable_autolayout():\n    orig_val = mpl.rcParams['figure.autolayout']\n    try:\n        mpl.rcParams['figure.autolayout'] = False\n        yield\n    finally:\n        mpl.rcParams['figure.autolayout'] = orig_val",
    "docstring": "Context manager for preventing rc-controlled auto-layout behavior.",
    "type": "function",
    "file_path": "seaborn\\seaborn\\utils.py",
    "ast_data": "FunctionDef name:_disable_autolayout arguments Assign Try Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "wrap_module_to",
    "source_code": "def wrap_module_to(self: torch.nn.modules.module.T, device: Optional[Union[int, torch.device]]=None) -> torch.nn.modules.module.T:\n    return self._apply(lambda t: getattr(t, custom_backend_name)(device))",
    "docstring": "Move all model parameters and buffers to the custom device. This also makes associated parameters and buffers different objects. So it should be called before constructing optimizer if the module will live on device while being optimized. .. note:: This method modifies the module in-place. Args: device (int, optional): if specified, all parameters will be copied to that device",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\backend_registration.py",
    "ast_data": "FunctionDef name:wrap_module_to arg:self arg:device arguments arg arg Return return:yes Call arguments arg Call Call"
  },
  {
    "library": "cherrypy",
    "name": "default_proc",
    "source_code": "def default_proc(self):\n    if self.filename:\n        self.file = self.read_into_file()\n    else:\n        result = self.read_lines_to_boundary()\n        if isinstance(result, bytes):\n            self.value = result\n        else:\n            self.file = result",
    "docstring": "Process unknown data as a fallback. Called if a more-specific processor is not found for the ``.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpreqbody.py",
    "ast_data": "FunctionDef name:default_proc arg:self arguments arg If Assign Call Assign Call If Call Assign Assign"
  },
  {
    "library": "kornia",
    "name": "rgb_to_raw",
    "source_code": "def rgb_to_raw(image: torch.Tensor, cfa: CFA) -> torch.Tensor:\n    if not isinstance(image, torch.Tensor):\n        raise TypeError(f'Input type is not a torch.Tensor. Got {type(image)}')\n    if len(image.shape) < 3 or image.shape[-3] != 3:\n        raise ValueError(f'Input size must have a shape of (*, 3, H, W). Got {image.shape}')\n    output: torch.Tensor = image[..., 1:2, :, :].clone()\n    if cfa == CFA.BG:\n        output[..., :, ::2, ::2] = image[..., 0:1, ::2, ::2]\n        output[..., :, 1::2, 1::2] = image[..., 2:3, 1::2, 1::2]\n    elif cfa == CFA.GB:\n        output[..., :, ::2, 1::2] = image[..., 0:1, ::2, 1::2]\n        output[..., :, 1::2, ::2] = image[..., 2:3, 1::2, ::2]\n    elif cfa == CFA.RG:\n        output[..., :, 1::2, 1::2] = image[..., 0:1, 1::2, 1::2]\n        output[..., :, ::2, ::2] = image[..., 2:3, ::2, ::2]\n    elif cfa == CFA.GR:\n        output[..., :, 1::2, ::2] = image[..., 0:1, 1::2, ::2]\n        output[..., :, ::2, 1::2] = image[..., 2:3, ::2, 1::2]\n    return output",
    "docstring": "Convert a RGB image to RAW version of image with the specified color filter array. The image data is assumed to be in the range of (0, 1). Args: image: RGB image to be converted to bayer raw with shape :math:. cfa: Which color filter array do we want the output to mimic. I.e. which pixels are red/green/blue. Returns: raw version of the image with shape :math:. Example: >>> rgbinput = torch.rand(2, 3, 4, 6) >>> raw = rgb_to_raw(rgbinput, CFA.BG) # 2x1x4x6",
    "type": "function",
    "file_path": "kornia\\kornia\\color\\raw.py",
    "ast_data": "FunctionDef name:rgb_to_raw arg:image arg:cfa arguments arg arg If Call Raise Call Call If BoolOp Compare Call Compare Raise Call Call If Compare Assign Assign If Compare Assign Assign If Compare Assign Assign If Compare Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "location_protos",
    "source_code": "def location_protos(self):\n    return self._location_key_to_location.values()",
    "docstring": "Returns list of protos.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\pprof_profiler.py",
    "ast_data": "FunctionDef name:location_protos arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_transform_unaggregated_gradients",
    "source_code": "def _transform_unaggregated_gradients(self, grads_and_vars):\n    return grads_and_vars",
    "docstring": "Called in before gradient aggregation.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py",
    "ast_data": "FunctionDef name:_transform_unaggregated_gradients arg:self arg:grads_and_vars arguments arg arg Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "init_env",
    "source_code": "def init_env(project: str='default', set_syspath: bool=True) -> None:\n    cfg = get_config()\n    if cfg.has_option('settings', project):\n        os.environ['SCRAPY_SETTINGS_MODULE'] = cfg.get('settings', project)\n    closest = closest_scrapy_cfg()\n    if closest:\n        projdir = str(Path(closest).parent)\n        if set_syspath and projdir not in sys.path:\n            sys.path.append(projdir)",
    "docstring": "Initialize environment to use command-line tool from inside a project dir. This sets the Scrapy settings module and modifies the Python path to be able to locate the project module.",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\conf.py",
    "ast_data": "FunctionDef name:init_env arg:project arg:set_syspath arguments arg arg Assign Call If Call Assign Call Assign Call If Assign Call Call If BoolOp Compare Call"
  },
  {
    "library": "pygame",
    "name": "add",
    "source_code": "def add(self, *groups):\n    has = self.__g.__contains__\n    for group in groups:\n        if hasattr(group, '_spritegroup'):\n            if not has(group):\n                group.add_internal(self)\n                self.add_internal(group)\n        else:\n            self.add(*group)",
    "docstring": "add the sprite to groups Sprite.add(*groups): return None Any number of Group instances can be passed as arguments. The Sprite will be added to the Groups it is not already a member of.",
    "type": "method",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:add arg:self arguments arg arg Assign For If Call If Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_batch_capacitance_tril",
    "source_code": "def _batch_capacitance_tril(W, D):\n    m = W.size(-1)\n    Wt_Dinv = W.mT / D.unsqueeze(-2)\n    K = torch.matmul(Wt_Dinv, W).contiguous()\n    K.view(-1, m * m)[:, ::m + 1] += 1\n    return torch.linalg.cholesky(K)",
    "docstring": "Computes Cholesky of :math: for a batch of matrices :math: and a batch of vectors :math:.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributions\\lowrank_multivariate_normal.py",
    "ast_data": "FunctionDef name:_batch_capacitance_tril arg:W arg:D arguments arg arg Assign Call Assign Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_check_sated",
    "source_code": "def _check_sated(self, raise_error):\n    if self._sated:\n        return\n    creation_stack = ''.join([line.rstrip() for line in traceback.format_stack(self._stack_frame, limit=5)])\n    if raise_error:\n        try:\n            raise RuntimeError('Object was never used (type {}): {}.  If you want to mark it as used call its \"mark_used()\" method.  It was originally created here:\\n{}'.format(self._type, self._repr, creation_stack))\n        finally:\n            self.sate()\n    else:\n        tf_logging.error('==================================\\nObject was never used (type {}):\\n{}\\nIf you want to mark it as used call its \"mark_used()\" method.\\nIt was originally created here:\\n{}\\n=================================='.format(self._type, self._repr, creation_stack))",
    "docstring": "Check if the object has been sated.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\util\\tf_should_use.py",
    "ast_data": "FunctionDef name:_check_sated arg:self arg:raise_error arguments arg arg If Return return:no Assign Call Call Call If Try Raise Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "round",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef round(x):\n    return math_ops.round(x)",
    "docstring": "Element-wise rounding to the closest integer. In case of tie, the rounding mode used is \"half to even\". Args: x: Tensor or variable. Returns: A tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:round arg:x arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "Mean",
    "source_code": "class Mean(Reduce):\n\n    def __init__(self, name='mean', dtype=None):\n        super(Mean, self).__init__(reduction=metrics_utils.Reduction.WEIGHTED_MEAN, name=name, dtype=dtype)",
    "docstring": "Computes the (weighted) mean of the given values. For example, if values is [1, 3, 5, 7] then the mean is 4. If the weights were specified as [1, 1, 0, 0] then the mean would be 2. This metric creates two variables, and that are used to compute the average of . This average is ultimately returned as which is an idempotent operation that simply divides by . If is , weights default to 1. Use of 0 to mask values. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.Mean() >>> m.update_state([1, 3, 5, 7]) >>> m.result().numpy() 4.0 >>> m.reset_state() >>> m.update_state([1, 3, 5, 7], sample_weight=[1, 1, 0, 0]) >>> m.result().numpy() 2.0 Usage with API:",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py",
    "ast_data": "ClassDef name:Mean FunctionDef name:__init__ arg:self arg:name arg:dtype arguments arg arg arg Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "pad",
    "source_code": "def pad(x: Array, pad_width: int | tuple[int, int] | Sequence[tuple[int, int]], mode: Literal['constant']='constant', *, constant_values: complex=0, xp: ModuleType | None=None) -> Array:\n    xp = array_namespace(x) if xp is None else xp\n    if mode != 'constant':\n        msg = \"Only `'constant'` mode is currently supported\"\n        raise NotImplementedError(msg)\n    if _delegate(xp, Backend.TORCH):\n        pad_width = xp.asarray(pad_width)\n        pad_width = xp.broadcast_to(pad_width, (x.ndim, 2))\n        pad_width = xp.flip(pad_width, axis=(0,)).flatten()\n        return xp.nn.functional.pad(x, tuple(pad_width), value=constant_values)\n    if _delegate(xp, Backend.NUMPY, Backend.JAX, Backend.CUPY, Backend.SPARSE):\n        return xp.pad(x, pad_width, mode, constant_values=constant_values)\n    return _funcs.pad(x, pad_width, constant_values=constant_values, xp=xp)",
    "docstring": "Pad the input array. Parameters ---------- x : array Input array. pad_width : int or tuple of ints or sequence of pairs of ints Pad the input array with this many elements from each side. If a sequence of tuples, `constant_valuesx`.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_extra\\_delegation.py",
    "ast_data": "FunctionDef name:pad arg:x arg:pad_width arg:mode arguments arg arg arg arg arg Assign Compare Call If Compare Assign Raise Call If Call Assign Call Assign Call Assign Call Call Return return:yes Call Call If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_lookup_reduction",
    "source_code": "def _lookup_reduction(self, t):\n    assert isinstance(t, tensor_lib.Tensor), t\n    return self._reduce_map.get(t.op)",
    "docstring": "Lookups Tensor in the reduction maps.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "FunctionDef name:_lookup_reduction arg:self arg:t arguments arg arg Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_RandomGammaGrad",
    "source_code": "@ops.RegisterGradient('RandomGamma')\ndef _RandomGammaGrad(op: ops.Operation, grad):\n    shape = op.inputs[0]\n    alpha = op.inputs[1]\n    sample = op.outputs[0]\n    with ops.control_dependencies([grad]):\n        num_sample_dimensions = array_ops.shape(shape)[0]\n        alpha_broadcastable = add_leading_unit_dimensions(alpha, num_sample_dimensions)\n        partial_a = gen_random_ops.random_gamma_grad(alpha_broadcastable, sample)\n        return (None, math_ops.reduce_sum(grad * partial_a, axis=math_ops.range(num_sample_dimensions)))",
    "docstring": "Returns the gradient of a Gamma sample w.r.t. alpha. The gradient is computed using implicit differentiation (Figurnov et al., 2018). Args: op: A operation. We assume that the inputs to the operation are and tensors, and the output is the tensor. grad: The incoming gradient of the same shape as . Returns: A with derivatives . References: Implicit Reparameterization Gradients: [Figurnov et al., 2018] ( ([pdf] (",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\random_grad.py",
    "ast_data": "FunctionDef name:_RandomGammaGrad arg:op arg:grad arguments arg arg Assign Assign Assign With Call Assign Call Assign Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "RegressorTags",
    "source_code": "@dataclass(slots=True)\nclass RegressorTags:\n    poor_score: bool = False",
    "docstring": "Tags for the regressor. Parameters ---------- poor_score : bool, default=False Whether the estimator fails to provide a \"reasonable\" test-set score, which currently for regression is an R2 of 0.5 on ``. The dataset and values are based on current estimators in scikit-learn and might be replaced by something more systematic.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\utils\\_tags.py",
    "ast_data": "ClassDef name:RegressorTags Call"
  },
  {
    "library": "tensorflow",
    "name": "set_weights",
    "source_code": "def set_weights(distribution_strategy, dist_model, weights):\n    assign_ops = []\n    for layer in dist_model.layers:\n        num_param = len(layer.weights)\n        layer_weights = weights[:num_param]\n        for sw, w in zip(layer.weights, layer_weights):\n            if ops.executing_eagerly_outside_functions():\n                sw.assign(w)\n            else:\n                assign_ops.append(distribution_strategy.unwrap(sw.assign(w)))\n        weights = weights[num_param:]\n    if not ops.executing_eagerly_outside_functions():\n        backend.get_session(assign_ops).run(assign_ops)",
    "docstring": "Sets the weights of the replicated models. The weights of the replicated models are set to the weights of the original model. The weights of the replicated model are Mirrored variables and hence we need to use the call within a DistributionStrategy scope. Args: distribution_strategy: DistributionStrategy used to distribute training and validation. dist_model: The replicated models on the different devices. weights: The weights of the original model.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_training_utils_v1.py",
    "ast_data": "FunctionDef name:set_weights arg:distribution_strategy arg:dist_model arg:weights arguments arg arg arg Assign For Assign Call Assign For Call If Call Call Call Call Call Assign If Call Call Call"
  },
  {
    "library": "django",
    "name": "dumps",
    "source_code": "def dumps(obj, key=None, salt='django.core.signing', serializer=JSONSerializer, compress=False):\n    return TimestampSigner(key=key, salt=salt).sign_object(obj, serializer=serializer, compress=compress)",
    "docstring": "Return URL-safe, hmac signed base64 compressed JSON string. If key is None, use settings.SECRET_KEY instead. The hmac algorithm is the default Signer algorithm. If compress is True (not the default), check if compressing using zlib can save some space. Prepend a '.' to signify compression. This is included in the signature, to protect against zip bombs. Salt can be used to namespace the hash, so that a signed string is only valid for a given namespace. Leaving this at the default value or re-using a salt value across different parts of your application without good cause is a security risk. The serializer is expected to return a bytestring.",
    "type": "function",
    "file_path": "django\\django\\core\\signing.py",
    "ast_data": "FunctionDef name:dumps arg:obj arg:key arg:salt arg:serializer arg:compress arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_update_pcolor_lims",
    "source_code": "def _update_pcolor_lims(self, collection, coords):\n    t = collection._transform\n    if not isinstance(t, mtransforms.Transform) and hasattr(t, '_as_mpl_transform'):\n        t = t._as_mpl_transform(self.axes)\n    if t and any(t.contains_branch_seperately(self.transData)):\n        trans_to_data = t - self.transData\n        coords = trans_to_data.transform(coords)\n    self.add_collection(collection, autolim=False)\n    minx, miny = np.min(coords, axis=0)\n    maxx, maxy = np.max(coords, axis=0)\n    collection.sticky_edges.x[:] = [minx, maxx]\n    collection.sticky_edges.y[:] = [miny, maxy]\n    self.update_datalim(coords)\n    self._request_autoscale_view()",
    "docstring": "Common code for updating lims in pcolor() and pcolormesh() methods.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_axes.py",
    "ast_data": "FunctionDef name:_update_pcolor_lims arg:self arg:collection arg:coords arguments arg arg arg Assign If BoolOp Call Call Assign Call If BoolOp Call Call Assign Assign Call Call Assign Call Assign Call Assign Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "GlobalPooling2D",
    "source_code": "class GlobalPooling2D(Layer):\n\n    def __init__(self, data_format=None, keepdims=False, **kwargs):\n        super(GlobalPooling2D, self).__init__(**kwargs)\n        self.data_format = conv_utils.normalize_data_format(data_format)\n        self.input_spec = InputSpec(ndim=4)\n        self.keepdims = keepdims\n\n    def compute_output_shape(self, input_shape):\n        input_shape = tensor_shape.TensorShape(input_shape).as_list()\n        if self.data_format == 'channels_last':\n            if self.keepdims:\n                return tensor_shape.TensorShape([input_shape[0], 1, 1, input_shape[3]])\n            else:\n                return tensor_shape.TensorShape([input_shape[0], input_shape[3]])\n        elif self.keepdims:\n            return tensor_shape.TensorShape([input_shape[0], input_shape[1], 1, 1])\n        else:\n            return tensor_shape.TensorShape([input_shape[0], input_shape[1]])\n\n    def call(self, inputs):\n        raise NotImplementedError\n\n    def get_config(self):\n        config = {'data_format': self.data_format, 'keepdims': self.keepdims}\n        base_config = super(GlobalPooling2D, self).get_config()\n        return dict(list(base_config.items()) + list(config.items()))",
    "docstring": "Abstract class for different global pooling 2D layers.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\pooling.py",
    "ast_data": "ClassDef name:GlobalPooling2D FunctionDef name:__init__ arg:self arg:data_format arg:keepdims arguments arg arg arg arg Call Call Assign Call Assign Call Assign FunctionDef name:compute_output_shape arg:self arg:input_shape arguments arg arg Assign Call Call If Compare If Return return:yes Call Return return:yes Call If Return return:yes Call Return return:yes Call FunctionDef name:call arg:self arg:inputs arguments arg arg Raise FunctionDef name:get_config arg:self arguments arg Assign Assign Call Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_validate_call_args",
    "source_code": "def _validate_call_args(self, inputs, mask):\n    class_name = self.__class__.__name__\n    if not isinstance(inputs, list):\n        raise ValueError('{} layer must be called on a list of inputs, namely [query, value] or [query, value, key].'.format(class_name))\n    if len(inputs) < 2 or len(inputs) > 3:\n        raise ValueError('{} layer accepts inputs list of length 2 or 3, namely [query, value] or [query, value, key]. Given length: {}'.format(class_name, len(inputs)))\n    if mask:\n        if not isinstance(mask, list):\n            raise ValueError('{} layer mask must be a list, namely [query_mask, value_mask].'.format(class_name))\n        if len(mask) < 2 or len(mask) > len(inputs):\n            raise ValueError('{} layer mask must be a list of length 2, namely [query_mask, value_mask]. Given length: {}'.format(class_name, len(mask)))",
    "docstring": "Validates arguments of the call method.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\dense_attention.py",
    "ast_data": "FunctionDef name:_validate_call_args arg:self arg:inputs arg:mask arguments arg arg arg Assign If Call Raise Call Call If BoolOp Compare Call Compare Call Raise Call Call Call If If Call Raise Call Call If BoolOp Compare Call Compare Call Call Raise Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "SphinxTransformer",
    "source_code": "class SphinxTransformer(Transformer):\n    document: nodes.document\n    env: BuildEnvironment | None = None\n\n    def set_environment(self, env: BuildEnvironment) -> None:\n        self.env = env\n\n    def apply_transforms(self) -> None:\n        if isinstance(self.document, nodes.document):\n            if not hasattr(self.document.settings, 'env') and self.env:\n                self.document.settings.env = self.env\n            super().apply_transforms()\n        else:\n            try:\n                document = new_document('')\n                if self.env:\n                    document.settings.env = self.env\n                document += self.document\n                self.document = document\n                super().apply_transforms()\n            finally:\n                self.document = self.document[0]",
    "docstring": "A transformer for Sphinx.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\transforms\\__init__.py",
    "ast_data": "ClassDef name:SphinxTransformer FunctionDef name:set_environment arg:self arg:env arguments arg arg Assign FunctionDef name:apply_transforms arg:self arguments arg If Call If BoolOp Call Assign Call Call Try Assign Call If Assign Assign Call Call Assign"
  },
  {
    "library": "cherrypy",
    "name": "PyWebService",
    "source_code": "class PyWebService(win32serviceutil.ServiceFramework):\n    _svc_name_ = 'Python Web Service'\n    _svc_display_name_ = 'Python Web Service'\n    _svc_deps_ = None\n    _exe_name_ = 'pywebsvc'\n    _exe_args_ = None\n    _svc_description_ = 'Python Web Service'\n\n    def SvcDoRun(self):\n        from cherrypy import process\n        process.bus.start()\n        process.bus.block()\n\n    def SvcStop(self):\n        from cherrypy import process\n        self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)\n        process.bus.exit()\n\n    def SvcOther(self, control):\n        from cherrypy import process\n        process.bus.publish(control_codes.key_for(control))",
    "docstring": "Python Web Service.",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\process\\win32.py",
    "ast_data": "ClassDef name:PyWebService Assign Assign Assign Assign Assign Assign FunctionDef name:SvcDoRun arg:self arguments arg Call Call FunctionDef name:SvcStop arg:self arguments arg Call Call FunctionDef name:SvcOther arg:self arg:control arguments arg arg Call Call"
  },
  {
    "library": "pandas",
    "name": "_safe_cast",
    "source_code": "@classmethod\ndef _safe_cast(cls, values: np.ndarray, dtype: np.dtype, copy: bool) -> np.ndarray:\n    raise AbstractMethodError(cls)",
    "docstring": "Safely cast the values to the given dtype. \"safe\" in this context means the casting is lossless.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\numeric.py",
    "ast_data": "FunctionDef name:_safe_cast arg:cls arg:values arg:dtype arg:copy arguments arg arg arg arg Raise Call"
  },
  {
    "library": "pandas",
    "name": "name",
    "source_code": "@property\ndef name(self) -> Hashable:\n    return self._name",
    "docstring": "Return Index or MultiIndex name. Returns ------- label (hashable object) The name of the Index. See Also -------- Index.set_names: Able to set new names partially and by level. Index.rename: Able to set new names partially and by level. Series.name: Corresponding Series property. Examples -------- >>> idx = pd.Index([1, 2, 3], name=\"x\") >>> idx Index([1, 2, 3], dtype='int64', name='x') >>> idx.name 'x'",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_deferred_dependencies",
    "source_code": "@property\ndef _deferred_dependencies(self):\n    return self._self_unconditional_deferred_dependencies",
    "docstring": "A dictionary with deferred dependencies. Stores restorations for other Trackable objects on which this object may eventually depend. May be overridden by sub-classes (e.g. Optimizers use conditional dependencies based the current graph, and so need separate management of deferred dependencies too). Returns: A dictionary mapping from local name to a list of CheckpointPosition objects.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\base.py",
    "ast_data": "FunctionDef name:_deferred_dependencies arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "build_stage",
    "source_code": "def build_stage(stage_module: torch.nn.Module, stage_index: int, pipe_info: PipeInfo, device: torch.device, group: Optional[dist.ProcessGroup]=None) -> _PipelineStage:\n    return _PipelineStage(stage_module, stage_index, pipe_info, device, group)",
    "docstring": "Create a pipeline stage given a stage_module to be wrapped by this stage and pipeline information. Args: stage_module (torch.nn.Module): the module to be wrapped by this stage stage_index (int): the index of this stage in the pipeline pipe_info (PipeInfo): information about the pipeline, can be retrieved by device (torch.device): the device to be used by this stage group (Optional[dist.ProcessGroup]): the process group to be used by this stage Returns: _PipelineStage: a pipeline stage that can run with .",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py",
    "ast_data": "FunctionDef name:build_stage arg:stage_module arg:stage_index arg:pipe_info arg:device arg:group arguments arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "empty",
    "source_code": "def empty(self, shape: Shape) -> ExtensionArray:\n    cls = self.construct_array_type()\n    return cls._empty(shape, dtype=self)",
    "docstring": "Construct an ExtensionArray of this dtype with the given shape. Analogous to numpy.empty. Parameters ---------- shape : int or tuple[int] Returns ------- ExtensionArray",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\base.py",
    "ast_data": "FunctionDef name:empty arg:self arg:shape arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "PyCTError",
    "source_code": "class PyCTError(Exception):\n    pass",
    "docstring": "Base class for all exceptions.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\errors.py",
    "ast_data": "ClassDef name:PyCTError"
  },
  {
    "library": "numpy",
    "name": "get_config_cmd",
    "source_code": "def get_config_cmd(self):\n    cmd = get_cmd('config')\n    cmd.ensure_finalized()\n    cmd.dump_source = 0\n    cmd.noisy = 0\n    old_path = os.environ.get('PATH')\n    if old_path:\n        path = os.pathsep.join(['.', old_path])\n        os.environ['PATH'] = path\n    return cmd",
    "docstring": "Returns the numpy.distutils config command instance.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\misc_util.py",
    "ast_data": "FunctionDef name:get_config_cmd arg:self arguments arg Assign Call Call Assign Assign Assign Call If Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "allocate_groups",
    "source_code": "def allocate_groups(self):\n    assert config.memory_pool in ('none', 'intermediates', 'outputs', 'combined')\n    assert self.buffer_groups is not None\n    for group in self.buffer_groups:\n        group.make_allocation()\n    outputs: list[Allocation] = []\n    intermediates: list[Allocation] = []\n    for group in self.buffer_groups:\n        assert group.allocation\n        if group.is_output and config.memory_pool != 'combined':\n            outputs.append(group.allocation)\n        else:\n            intermediates.append(group.allocation)\n    for block in sorted(outputs, key=lambda x: (x.size_hint, -len(x.live_range))):\n        self.pools.allocate_output(block)\n    for block in sorted(intermediates, key=lambda x: (-x.size_hint, -len(x.live_range))):\n        self.pools.allocate(block)\n    self.pools.finalize()",
    "docstring": "Assign every allocation to a specific location in a specific AllocationPool.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\memory_planning.py",
    "ast_data": "FunctionDef name:allocate_groups arg:self arguments arg Compare Compare For Call For If BoolOp Compare Call Call For Call arguments arg Call Call For Call arguments arg Call Call Call"
  },
  {
    "library": "django",
    "name": "check_geom_offset",
    "source_code": "def check_geom_offset(result, func, cargs, offset=-1):\n    check_err(result)\n    geom = ptr_byref(cargs, offset=offset)\n    return check_geom(geom, func, cargs)",
    "docstring": "Check the geometry at the given offset in the C parameter list.",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\gdal\\prototypes\\errcheck.py",
    "ast_data": "FunctionDef name:check_geom_offset arg:result arg:func arg:cargs arg:offset arguments arg arg arg arg Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "create_with_canvas",
    "source_code": "@classmethod\ndef create_with_canvas(cls, canvas_class, figure, num):\n    return cls(canvas_class(figure), num)",
    "docstring": "Create a manager for a given *figure* using a specific *canvas_class*. Backends should override this method if they have specific needs for setting up the canvas or the manager.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:create_with_canvas arg:cls arg:canvas_class arg:figure arg:num arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_compute_prob_outside_square",
    "source_code": "def _compute_prob_outside_square(n, h):\n    P = 0.0\n    k = int(np.floor(n / h))\n    while k >= 0:\n        p1 = 1.0\n        for j in range(h):\n            p1 = (n - k * h - j) * p1 / (n + k * h + j + 1)\n        P = p1 * (1.0 - P)\n        k -= 1\n    return 2 * P",
    "docstring": "Compute the proportion of paths that pass outside the two diagonal lines. Parameters ---------- n : integer n > 0 h : integer 0 <= h <= n Returns ------- p : float The proportion of paths that pass outside the lines x-y = +/-h.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_stats_py.py",
    "ast_data": "FunctionDef name:_compute_prob_outside_square arg:n arg:h arguments arg arg Assign Assign Call Call While Compare Assign For Call Assign Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "unary_union",
    "source_code": "@property\ndef unary_union(self):\n    return self._topology(capi.geos_unary_union(self.ptr))",
    "docstring": "Return the union of all the elements of this geometry.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:unary_union arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_assert_float_dtype",
    "source_code": "def _assert_float_dtype(dtype):\n    dtype = dtypes.as_dtype(dtype)\n    if not dtype.is_floating:\n        raise ValueError('Expected floating point type, got %s.' % dtype)\n    return dtype",
    "docstring": "Validate and return floating point type based on . must be a floating point type. Args: dtype: The data type to validate. Returns: Validated type. Raises: ValueError: if is not a floating point type.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\initializers\\initializers_v2.py",
    "ast_data": "FunctionDef name:_assert_float_dtype arg:dtype arguments arg Assign Call If Raise Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "convert_dtypes",
    "source_code": "def convert_dtypes(dtype_template, order_code):\n    dtypes = dtype_template.copy()\n    for k in dtypes:\n        dtypes[k] = np.dtype(dtypes[k]).newbyteorder(order_code)\n    return dtypes",
    "docstring": "Convert dtypes in mapping to given order Parameters ---------- dtype_template : mapping mapping with values returning numpy dtype from ``",
    "type": "function",
    "file_path": "scipy\\scipy\\io\\matlab\\_miobase.py",
    "ast_data": "FunctionDef name:convert_dtypes arg:dtype_template arg:order_code arguments arg arg Assign Call For Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ModuleWrapper",
    "source_code": "class ModuleWrapper(nn.Module):\n\n    def __init__(self, cpp_module):\n        self.cpp_module = cpp_module\n        super().__init__()\n        self._parameters = OrderedDictWrapper(cpp_module, '_parameters')\n        self._buffers: OrderedDictWrapper = OrderedDictWrapper(cpp_module, '_buffers')\n        self._modules: OrderedDictWrapper = OrderedDictWrapper(cpp_module, '_modules')\n        for attr in dir(cpp_module):\n            if not attr.startswith('_'):\n                setattr(self, attr, getattr(self.cpp_module, attr))\n\n    def _apply(self, fn, recurse=True):\n        for param in self.parameters():\n            param.data = fn(param.data)\n            if param._grad is not None:\n                param._grad.data = fn(param._grad.data)\n        for buf in self.buffers():\n            buf.data = fn(buf.data)\n        return self\n\n    @property\n    def training(self):\n        return self.cpp_module.training\n\n    @training.setter\n    def training(self, mode):\n        self.cpp_module.train(mode)\n\n    def __repr__(self):\n        return self.cpp_module.__repr__()",
    "docstring": "A subclass of `` that wraps a C++ frontend module and delegates all access.",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\cpp.py",
    "ast_data": "ClassDef name:ModuleWrapper FunctionDef name:__init__ arg:self arg:cpp_module arguments arg arg Assign Call Call Assign Call Call Call For Call If Call Call Call FunctionDef name:_apply arg:self arg:fn arg:recurse arguments arg arg arg For Call Assign Call If Compare Assign Call For Call Assign Call Return return:yes FunctionDef name:training arg:self arguments arg Return return:yes FunctionDef name:training arg:self arg:mode arguments arg arg Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_hook_with_zero_step_setup",
    "source_code": "def _hook_with_zero_step_setup(ddp_ref: weakref.ReferenceType, zero: ZeroRedundancyOptimizer, bucket: dist.GradBucket):\n    if not ddp_ref()._has_rebuilt_buckets:\n        assert zero._overlap_info.status == _OverlapStatus.UNINITIALIZED\n        return\n    bucket_index = bucket.index()\n    overlap_info = zero._overlap_info\n    if overlap_info.status == _OverlapStatus.UNINITIALIZED:\n        overlap_info.status = _OverlapStatus.DDP_HAS_REBUILT_BUCKETS\n    if overlap_info.status == _OverlapStatus.DDP_HAS_REBUILT_BUCKETS:\n        if bucket_index == 0 and len(overlap_info.params_per_bucket) > 0:\n            zero._init_zero_for_overlap()\n        else:\n            _save_ddp_bucket_info(bucket, zero)",
    "docstring": "Encapsulate the setup logic for :func: and :func:. This means the logic to run in the hook before the backward pass and optimizer step can actually be overlapped. This is factored out since it is common to both :func: and :func:. Arguments: ddp_ref (weakref.ReferenceType): weak reference to the process's :class: instance. zero (ZeroRedundancyOptimizer): the calling process's :class: instance. bucket (dist.GradBucket): the current gradient bucket.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\ddp_comm_hooks\\ddp_zero_hook.py",
    "ast_data": "FunctionDef name:_hook_with_zero_step_setup arg:ddp_ref arg:zero arg:bucket arguments arg arg arg If Call Compare Return return:no Assign Call Assign If Compare Assign If Compare If BoolOp Compare Compare Call Call Call"
  },
  {
    "library": "numpy",
    "name": "capitalize",
    "source_code": "def capitalize(self):\n    return asarray(capitalize(self))",
    "docstring": "Return a copy of with only the first character of each element capitalized. See Also -------- char.capitalize",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:capitalize arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "children",
    "source_code": "@property\ndef children(self) -> List['AtomicFunction']:\n    return self._children",
    "docstring": "AtomicFunctions needed as dependencies for this one.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\atomic_function.py",
    "ast_data": "FunctionDef name:children arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_offset_text",
    "source_code": "def get_offset_text(self):\n    return self.offsetText",
    "docstring": "Return the axis offsetText as a Text instance.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:get_offset_text arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_slot",
    "source_code": "def get_slot(self, *args, **kwargs):\n    return self._opt.get_slot(*args, **kwargs)",
    "docstring": "Return a slot named \"name\" created for \"var\" by the Optimizer. This simply wraps the get_slot() from the actual optimizer. Args: *args: Arguments for get_slot(). **kwargs: Keyword arguments for get_slot(). Returns: The for the slot if it was created, otherwise.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_optimizer.py",
    "ast_data": "FunctionDef name:get_slot arg:self arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "add_step",
    "source_code": "def add_step(self, step, run_meta):\n    op_log = tfprof_logger.merge_default_with_oplog(self._graph, run_meta=run_meta)\n    self._coverage = print_mdl.AddStep(step, _graph_string(self._graph), run_meta.SerializeToString(), op_log.SerializeToString())",
    "docstring": "Add statistics of a step. Args: step: int, An id used to group one or more different together. When profiling with the profile_xxx APIs, user can use the id in the to profile these together. run_meta: RunMetadata proto that contains statistics of a session run.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\model_analyzer.py",
    "ast_data": "FunctionDef name:add_step arg:self arg:step arg:run_meta arguments arg arg arg Assign Call Assign Call Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "SphinxLogRecord",
    "source_code": "class SphinxLogRecord(logging.LogRecord):\n    prefix = ''\n    location: Any = None\n\n    def getMessage(self) -> str:\n        message = super().getMessage()\n        location = getattr(self, 'location', None)\n        if location:\n            message = f'{location}: {self.prefix}{message}'\n        elif self.prefix not in message:\n            message = self.prefix + message\n        return message",
    "docstring": "Log record class supporting location",
    "type": "class",
    "file_path": "sphinx\\sphinx\\util\\logging.py",
    "ast_data": "ClassDef name:SphinxLogRecord Assign FunctionDef name:getMessage arg:self arguments arg Assign Call Call Assign Call If Assign If Compare Assign Return return:yes"
  },
  {
    "library": "authlib",
    "name": "resolve_public_key",
    "source_code": "def resolve_public_key(self, request):\n    raise NotImplementedError()",
    "docstring": "Resolve a public key for decoding ``, developers MUST implement this method in subclass:: def resolve_public_key(self, request): return get_public_key_from_user(request.credential) :return: JWK or Key string",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc7591\\endpoint.py",
    "ast_data": "FunctionDef name:resolve_public_key arg:self arg:request arguments arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_initialize_stats",
    "source_code": "def _initialize_stats(self):\n    self._defining_op = dict()\n    for op_info in self._quant_interpreter._get_ops_details():\n        self._defining_op.update({tensor_idx: op_info['index'] for tensor_idx in op_info['outputs']})\n    self._numeric_verify_tensor_details = None\n    self._numeric_verify_op_details = None\n    if not self._get_numeric_verify_tensor_details():\n        raise ValueError('Please check if the quantized model is in debug mode')\n    self._layer_debug_metrics = _DEFAULT_LAYER_DEBUG_METRICS.copy()\n    if self._debug_options.layer_debug_metrics:\n        self._layer_debug_metrics.update(self._debug_options.layer_debug_metrics)\n    self.layer_statistics = None\n    self.model_statistics = None\n    self._metrics = metrics_stub.TFLiteMetrics()\n    self._metrics.increase_counter_debugger_creation()",
    "docstring": "Helper function initializes stats.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\tools\\optimize\\debugging\\python\\debugger.py",
    "ast_data": "FunctionDef name:_initialize_stats arg:self arguments arg Assign Call For Call Call Assign Assign If Call Raise Call Assign Call If Call Assign Assign Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_clean_function_name",
    "source_code": "def _clean_function_name(name):\n    match = re.search(_FUNCTION_WRAPPER_NAME_REGEX, name)\n    if match:\n        return match.group(1)\n    else:\n        return name",
    "docstring": "Vanity function to keep the function names comprehensible.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\function_deserialization.py",
    "ast_data": "FunctionDef name:_clean_function_name arg:name arguments arg Assign Call If Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "name",
    "source_code": "@property\ndef name(self):\n    return self.key",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "cygpath",
    "source_code": "def cygpath(path):\n    return os.path.abspath(path).replace('\\\\', '/')",
    "docstring": "Convert path from posix to windows.",
    "type": "function",
    "file_path": "tensorflow\\configure.py",
    "ast_data": "FunctionDef name:cygpath arg:path arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "execute_save",
    "source_code": "def execute_save(self, staged_state_dict: STATE_DICT_TYPE, *, checkpoint_id: Union[str, os.PathLike, None]=None, storage_writer: Optional[StorageWriter]=None, planner: Optional[SavePlanner]=None, process_group: Optional[dist.ProcessGroup]=None) -> Future:\n    global _CHECKPOINT_PROCESS\n    pg_init_info: Optional[_ProcessGroupInitInfo] = None\n    if _CHECKPOINT_PROCESS is None:\n        pg_init_info = _ProcessGroupInitInfo(process_group)\n    f: Future = self._executor.submit(self._execute_save_impl, pg_init_info=pg_init_info, staged_state_dict=staged_state_dict, checkpoint_id=checkpoint_id, storage_writer=storage_writer, planner=planner)\n    f.add_done_callback(lambda f: self._executor.shutdown(wait=False))\n    return f",
    "docstring": "NOTE: - Checkpoint process is implemented as a daemon process. The AsyncCheckpointProcess' lifetime is tied to the lifetime of the main process (e.g. trainer process). - The first call to execute_save_in_process() will initialize the checkpoint daemon process. Subsequent async checkpoint requests will not need process initialization. Therefore, the first async checkpoint request will take longer to complete. - Process initialization can have significant overhead, dominated by latency for all ranks to spawn a background process + process group initialization in the background process.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\_async_process_executor.py",
    "ast_data": "FunctionDef name:execute_save arg:self arg:staged_state_dict arguments arg arg arg arg arg arg If Compare Assign Call Call Call arguments arg Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_device",
    "source_code": "def _get_device(device: Union[int, str, torch.device]) -> torch.device:\n    if isinstance(device, str):\n        device = torch.device(device)\n    elif isinstance(device, int):\n        device = torch.device('cuda', device)\n    return device",
    "docstring": "Return the torch.device type object from the passed in device. Args: device (torch.device or int): selected device.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:_get_device arg:device arguments arg If Call Assign Call If Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "migration_plan",
    "source_code": "def migration_plan(self, targets, clean_start=False):\n    plan = []\n    if clean_start:\n        applied = {}\n    else:\n        applied = dict(self.loader.applied_migrations)\n    for target in targets:\n        if target[1] is None:\n            for root in self.loader.graph.root_nodes():\n                if root[0] == target[0]:\n                    for migration in self.loader.graph.backwards_plan(root):\n                        if migration in applied:\n                            plan.append((self.loader.graph.nodes[migration], True))\n                            applied.pop(migration)\n        elif self.loader.replace_migrations and target not in self.loader.graph.node_map:\n            self.loader.replace_migrations = False\n            self.loader.build_graph()\n            return self.migration_plan(targets, clean_start=clean_start)\n        elif target in applied:\n            next_in_app = sorted((n for n in self.loader.graph.node_map[target].children if n[0] == target[0]))\n            for node in next_in_app:\n                for migration in self.loader.graph.backwards_plan(node):\n                    if migration in applied:\n                        plan.append((self.loader.graph.nodes[migration], True))\n                        applied.pop(migration)\n        else:\n            for migration in self.loader.graph.forwards_plan(target):\n                if migration not in applied:\n                    plan.append((self.loader.graph.nodes[migration], False))\n                    applied[migration] = self.loader.graph.nodes[migration]\n    return plan",
    "docstring": "Given a set of targets, return a list of (Migration instance, backwards?).",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\executor.py",
    "ast_data": "FunctionDef name:migration_plan arg:self arg:targets arg:clean_start arguments arg arg arg Assign If Assign Assign Call For If Compare For Call If Compare For Call If Compare Call Call If BoolOp Compare Assign Call Return return:yes Call If Compare Assign Call Compare For For Call If Compare Call Call For Call If Compare Call Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_recursive_fill_value",
    "source_code": "def _recursive_fill_value(dtype, f):\n    if dtype.names is not None:\n        vals = tuple((np.array(_recursive_fill_value(dtype[name], f)) for name in dtype.names))\n        return np.array(vals, dtype=dtype)[()]\n    elif dtype.subdtype:\n        subtype, shape = dtype.subdtype\n        subval = _recursive_fill_value(subtype, f)\n        return np.full(shape, subval)\n    else:\n        return f(dtype)",
    "docstring": "Recursively produce a fill value for , calling f on scalar dtypes",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:_recursive_fill_value arg:dtype arg:f arguments arg arg If Compare Assign Call Call Call Return return:yes Call If Assign Assign Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_square_of_sums",
    "source_code": "def _square_of_sums(a, axis=0):\n    a, axis = _chk_asarray(a, axis)\n    s = np.sum(a, axis)\n    if not np.isscalar(s):\n        return s.astype(float) * s\n    else:\n        return float(s) * s",
    "docstring": "Sum elements of the input array, and return the square(s) of that sum. Parameters ---------- a : array_like Input array. axis : int or None, optional Axis along which to calculate. Default is 0. If None, compute over the whole array . Returns ------- square_of_sums : float or ndarray The square of the sum over . See Also -------- _sum_of_squares : The sum of squares (the opposite of ).",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_stats_py.py",
    "ast_data": "FunctionDef name:_square_of_sums arg:a arg:axis arguments arg arg Assign Call Assign Call If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "path_live_weakrefs",
    "source_code": "def path_live_weakrefs(self) -> Iterator[StorageWeakRefWrapper]:\n    for node in self._path_from_root:\n        for output in node.outputs_weakrefs:\n            if is_live(output):\n                yield output",
    "docstring": "Returns all live storages weakrefs that created by nodes in this path",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py",
    "ast_data": "FunctionDef name:path_live_weakrefs arg:self arguments arg For For If Call"
  },
  {
    "library": "pandas",
    "name": "_can_hold_element",
    "source_code": "@final\ndef _can_hold_element(self, element: Any) -> bool:\n    element = extract_array(element, extract_numpy=True)\n    return can_hold_element(self.values, element)",
    "docstring": "require the same dtype as ourselves",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\blocks.py",
    "ast_data": "FunctionDef name:_can_hold_element arg:self arg:element arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_async_checkpointer",
    "source_code": "def _async_checkpointer(self):\n    if self._async_checkpointer_impl is None:\n        self._async_checkpointer_impl = async_checkpoint_helper.AsyncCheckpointHelper(Checkpoint, **self._kwargs)\n    return self._async_checkpointer_impl",
    "docstring": "Returns an instantiated AsyncCheckpointHelper.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:_async_checkpointer arg:self arguments arg If Compare Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_check_device",
    "source_code": "def _check_device(bare_xp: Namespace, device: Device) -> None:\n    if bare_xp is sys.modules.get('numpy'):\n        if device not in ('cpu', None):\n            raise ValueError(f'Unsupported device for NumPy: {device!r}')\n    elif bare_xp is sys.modules.get('dask.array'):\n        if device not in ('cpu', _DASK_DEVICE, None):\n            raise ValueError(f'Unsupported device for Dask: {device!r}')",
    "docstring": "Validate dummy device on device-less array backends. Notes ----- This function is also invoked by CuPy, which does have multiple devices if there are multiple GPUs available. However, CuPy multi-device support is currently impossible without using the global device or a context manager:",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\common\\_helpers.py",
    "ast_data": "FunctionDef name:_check_device arg:bare_xp arg:device arguments arg arg If Compare Call If Compare Raise Call If Compare Call If Compare Raise Call"
  },
  {
    "library": "pytorch",
    "name": "calculate_tensor_size",
    "source_code": "def calculate_tensor_size(tensor: torch.Tensor) -> float:\n    num_elements = tensor.numel()\n    element_size = tensor.element_size()\n    return num_elements * element_size / (1024 * 1024)",
    "docstring": "Calculate the size of a PyTorch tensor in megabytes (MB). Args: tensor (torch.Tensor): Input tensor Returns: float: Memory size in MB",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\partitioners.py",
    "ast_data": "FunctionDef name:calculate_tensor_size arg:tensor arguments arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "on_key_press",
    "source_code": "def on_key_press(self, event):\n    if self.active:\n        key = event.key or ''\n        key = key.replace('ctrl', 'control')\n        if key == self._state_modifier_keys['clear']:\n            self.clear()\n            return\n        for state, modifier in self._state_modifier_keys.items():\n            if modifier in key.split('+'):\n                if state == 'rotate':\n                    if state in self._state:\n                        self._state.discard(state)\n                    else:\n                        self._state.add(state)\n                else:\n                    self._state.add(state)\n        self._on_key_press(event)",
    "docstring": "Key press event handler and validator for all selection widgets.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:on_key_press arg:self arg:event arguments arg arg If Assign BoolOp Assign Call If Compare Call Return return:no For Call If Compare Call If Compare If Compare Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "OpInfo",
    "source_code": "@dataclass\nclass OpInfo:\n    compute_mesh: DeviceMesh\n    schema: OpSchema\n    flat_args_schema: list[object]\n    local_args: Sequence[object]\n    local_kwargs: dict[str, object]\n    args_tree_spec: Optional[TreeSpec] = None\n    output_sharding: Optional[OutputSharding] = None",
    "docstring": "All Runtime Op execution info are packed here",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_op_schema.py",
    "ast_data": "ClassDef name:OpInfo"
  },
  {
    "library": "numpy",
    "name": "get_fill_value",
    "source_code": "def get_fill_value(a):\n    if isinstance(a, MaskedArray):\n        result = a.fill_value\n    else:\n        result = default_fill_value(a)\n    return result",
    "docstring": "Return the filling value of a, if any. Otherwise, returns the default filling value for that type.",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:get_fill_value arg:a arguments arg If Call Assign Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "words",
    "source_code": "def words(count, common=True):\n    word_list = list(COMMON_WORDS) if common else []\n    c = len(word_list)\n    if count > c:\n        count -= c\n        while count > 0:\n            c = min(count, len(WORDS))\n            count -= c\n            word_list += random.sample(WORDS, c)\n    else:\n        word_list = word_list[:count]\n    return ' '.join(word_list)",
    "docstring": "Return a string of lorem ipsum words separated by a single space. If is True, then the first 19 words will be the standard 'lorem ipsum' words. Otherwise, all words will be selected randomly.",
    "type": "function",
    "file_path": "django\\django\\utils\\lorem_ipsum.py",
    "ast_data": "FunctionDef name:words arg:count arg:common arguments arg arg Assign Call Assign Call If Compare While Compare Assign Call Call Call Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "savepoint_create_sql",
    "source_code": "def savepoint_create_sql(self, sid):\n    return 'SAVEPOINT %s' % self.quote_name(sid)",
    "docstring": "Return the SQL for starting a new savepoint. Only required if the \"uses_savepoints\" feature is True. The \"sid\" parameter is a string for the savepoint id.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:savepoint_create_sql arg:self arg:sid arguments arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "applied_migrations",
    "source_code": "def applied_migrations(self):\n    if self.has_table():\n        return {(migration.app, migration.name): migration for migration in self.migration_qs}\n    else:\n        return {}",
    "docstring": "Return a dict mapping (app_name, migration_name) to Migration instances for all applied migrations.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\recorder.py",
    "ast_data": "FunctionDef name:applied_migrations arg:self arguments arg If Call Return return:yes Return return:no"
  },
  {
    "library": "kornia",
    "name": "preprocess_boxes",
    "source_code": "def preprocess_boxes(input: Union[Tensor, Boxes], mode: str='vertices_plus') -> Boxes:\n    if isinstance(input, Tensor):\n        if not (len(input.shape) == 4 and input.shape[2:] == torch.Size([4, 2])):\n            raise RuntimeError(f'Only BxNx4x2 tensor is supported. Got {input.shape}.')\n        input = Boxes.from_tensor(input, mode=mode)\n    if not isinstance(input, Boxes):\n        raise RuntimeError(f'Expect `Boxes` type. Got {type(input)}.')\n    return input",
    "docstring": "Preprocess input boxes. Args: input: 2D boxes, shape of :math:, :math: or a list of :math:. See below for more details. mode: The format in which the boxes are provided. * 'xyxy': boxes are assumed to be in the format `(N, 4)(B, N, 4)(N, 4)(B, N, 4)(N, 4)(B, N, 4)(N, 4, 2)(B, N, 4, 2)(N, 4, 2)(B, N, 4, 2)quadrilateral quadrilaterals `_ are rectangles, rhombus and trapezoids.",
    "type": "function",
    "file_path": "kornia\\kornia\\augmentation\\utils\\helpers.py",
    "ast_data": "FunctionDef name:preprocess_boxes arg:input arg:mode arguments arg arg If Call If BoolOp Compare Call Compare Call Raise Call Assign Call If Call Raise Call Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "is_classmethod_descriptor",
    "source_code": "def is_classmethod_descriptor(obj: Any, cls: Any=None, name: str | None=None) -> TypeIs[types.ClassMethodDescriptorType]:\n    if isinstance(obj, types.ClassMethodDescriptorType):\n        return True\n    if cls and name:\n        sentinel = object()\n        for basecls in getmro(cls):\n            meth = basecls.__dict__.get(name, sentinel)\n            if meth is not sentinel:\n                return isinstance(meth, types.ClassMethodDescriptorType)\n    return False",
    "docstring": "Check if the object is a :class:. This check is stricter than :func: as a classmethod descriptor does not have a `` attribute.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\inspect.py",
    "ast_data": "FunctionDef name:is_classmethod_descriptor arg:obj arg:cls arg:name arguments arg arg arg If Call Return return:yes If BoolOp Assign Call For Call Assign Call If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "policy",
    "source_code": "def policy(self, resp_or_url: Response | str, request: Request) -> ReferrerPolicy:\n    policy_name = request.meta.get('referrer_policy')\n    if policy_name is None and isinstance(resp_or_url, Response):\n        policy_header = resp_or_url.headers.get('Referrer-Policy')\n        if policy_header is not None:\n            policy_name = to_unicode(policy_header.decode('latin1'))\n    if policy_name is None:\n        return self.default_policy()\n    cls = _load_policy_class(policy_name, warning_only=True)\n    return cls() if cls else self.default_policy()",
    "docstring": "Determine Referrer-Policy to use from a parent Response (or URL), and a Request to be sent. - if a valid policy is set in Request meta, it is used. - if the policy is set in meta but is wrong (e.g. a typo error), the policy from settings is used - if the policy is not set in Request meta, but there is a Referrer-policy header in the parent response, it is used if valid - otherwise, the policy from settings is used.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\spidermiddlewares\\referer.py",
    "ast_data": "FunctionDef name:policy arg:self arg:resp_or_url arg:request arguments arg arg arg Assign Call If BoolOp Compare Call Assign Call If Compare Assign Call Call If Compare Return return:yes Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "ones",
    "source_code": "def ones(*size, dtype: Optional[torch.dtype]=None, layout: torch.layout=torch.strided, requires_grad: bool=False, device_mesh: Optional[DeviceMesh]=None, placements: Optional[Sequence[Placement]]=None) -> DTensor:\n    torch_size = normalize_to_torch_size(size)\n    return _dtensor_init_helper(torch.ones, torch_size, dtype=dtype, layout=layout, requires_grad=requires_grad, device_mesh=device_mesh, placements=placements)",
    "docstring": "Returns a :class: filled with the scalar value 1, with the shape defined by the variable argument `DTensortorch.dtypeDTensortorch.set_default_dtypetorch.layoutDTensorDeviceMeshPlacementDTensor` object on each rank",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_api.py",
    "ast_data": "FunctionDef name:ones arguments arg arg arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "false_negatives",
    "source_code": "@tf_export(v1=['metrics.false_negatives'])\ndef false_negatives(labels, predictions, weights=None, metrics_collections=None, updates_collections=None, name=None):\n    if context.executing_eagerly():\n        raise RuntimeError('tf.metrics.false_negatives is not supported when eager execution is enabled.')\n    with variable_scope.variable_scope(name, 'false_negatives', (predictions, labels, weights)):\n        predictions, labels, weights = _remove_squeezable_dimensions(predictions=math_ops.cast(predictions, dtype=dtypes.bool), labels=math_ops.cast(labels, dtype=dtypes.bool), weights=weights)\n        is_false_negative = math_ops.logical_and(math_ops.equal(labels, True), math_ops.equal(predictions, False))\n        return _count_condition(is_false_negative, weights, metrics_collections, updates_collections)",
    "docstring": "Computes the total number of false negatives. If is , weights default to 1. Use weights of 0 to mask values. Args: labels: The ground truth values, a whose dimensions must match . Will be cast to . predictions: The predicted values, a of arbitrary dimensions. Will be cast to . weights: Optional whose rank is either 0, or the same rank as , and must be broadcastable to (i.e., all dimensions must be either , or the same as the corresponding dimension). metrics_collections: An optional list of collections that the metric value variable should be added to. updates_collections: An optional list of collections that the metric update ops should be added to. name: An optional variable_scope name. Returns: value_tensor: A representing the current value of the metric. update_op: An operation that accumulates the error from a batch of data. Raises: ValueError: If is not and its shape doesn't match , or if either or are not a list or tuple. RuntimeError: If eager execution is enabled.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\metrics_impl.py",
    "ast_data": "FunctionDef name:false_negatives arg:labels arg:predictions arg:weights arg:metrics_collections arg:updates_collections arg:name arguments arg arg arg arg arg arg If Call Raise Call With Call Assign Call Call Call Assign Call Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "boolean_mask_v2",
    "source_code": "@tf_export('boolean_mask', v1=[])\n@dispatch.add_dispatch_support\ndef boolean_mask_v2(tensor, mask, axis=None, name='boolean_mask'):\n    return boolean_mask(tensor, mask, name, axis)",
    "docstring": "Apply boolean mask to tensor. Numpy equivalent is . In general, tensortensorTruemask`. Raises: ValueError: If shapes do not conform. Examples:",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:boolean_mask_v2 arg:tensor arg:mask arg:axis arg:name arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_overload_operator",
    "source_code": "@classmethod\ndef _overload_operator(cls, operator):\n    tensor_operator = getattr(tensor_lib.Tensor, operator)\n\n    def _operator(v, *args, **kwargs):\n        return tensor_operator(_var_to_tensor(v), *args, **kwargs)\n    setattr(cls, operator, _operator)",
    "docstring": "Delegate an operator overload to .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\sharded_variable.py",
    "ast_data": "FunctionDef name:_overload_operator arg:cls arg:operator arguments arg arg Assign Call FunctionDef name:_operator arg:v arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "slice_inputs",
    "source_code": "def slice_inputs(self, indices_dataset, inputs):\n    flat_inputs = nest.flatten(inputs)\n\n    def dynamic_shape_like(t):\n        shape = list(t.shape)\n        shape[0] = None\n        return tuple(shape)\n    flat_dtypes = [inp.dtype for inp in flat_inputs]\n    contiguous = True\n    if self._shuffle and self._shuffle != 'batch':\n        contiguous = False\n\n    def grab_batch(indices):\n\n        def py_method(ind):\n\n            def slice_array(data):\n                return training_utils.slice_arrays(data, ind.numpy(), contiguous=contiguous)\n            return [slice_array(inp) for inp in flat_inputs]\n        flat_out = script_ops.eager_py_func(py_method, [indices], flat_dtypes)\n        for v, original_inp in zip(flat_out, flat_inputs):\n            v.set_shape(dynamic_shape_like(original_inp))\n        return nest.pack_sequence_as(inputs, flat_out)\n    dataset = indices_dataset.map(grab_batch, num_parallel_calls=dataset_ops.AUTOTUNE)\n    return dataset",
    "docstring": "Slice inputs into a Dataset of batches. Given a Dataset of batch indices and the unsliced inputs, this step slices the inputs in a parallelized fashion and produces a dataset of input batches. Args: indices_dataset: A Dataset of batched indices inputs: A python data structure that contains the inputs, targets, and possibly sample weights. Returns: A Dataset of input batches matching the batch indices.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\data_adapter.py",
    "ast_data": "FunctionDef name:slice_inputs arg:self arg:indices_dataset arg:inputs arguments arg arg arg Assign Call FunctionDef name:dynamic_shape_like arg:t arguments arg Assign Call Assign Return return:yes Call Assign Assign If BoolOp Compare Assign FunctionDef name:grab_batch arg:indices arguments arg FunctionDef name:py_method arg:ind arguments arg FunctionDef name:slice_array arg:data arguments arg Return return:yes Call Call Return return:yes Call Assign Call For Call Call Call Return return:yes Call Assign Call Return return:yes"
  },
  {
    "library": "pygame",
    "name": "frequency_to_midi",
    "source_code": "def frequency_to_midi(frequency):\n    return int(round(69 + 12 * math.log(frequency / 440.0) / math.log(2)))",
    "docstring": "converts a frequency into a MIDI note. Rounds to the closest midi note. ::Examples:: >>> frequency_to_midi(27.5) 21 >>> frequency_to_midi(36.7) 26 >>> frequency_to_midi(4186.0) 108",
    "type": "function",
    "file_path": "pygame\\src_py\\midi.py",
    "ast_data": "FunctionDef name:frequency_to_midi arg:frequency arguments arg Return return:yes Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_check_shard_metadata_pair_overlap",
    "source_code": "def _check_shard_metadata_pair_overlap(shard1: ChunkStorageMetadata, shard2: ChunkStorageMetadata):\n    ndims = len(shard1.offsets)\n    for i in range(ndims):\n        if shard1.offsets[i] >= shard2.offsets[i] + shard2.sizes[i]:\n            return False\n        if shard2.offsets[i] >= shard1.offsets[i] + shard1.sizes[i]:\n            return False\n    return True",
    "docstring": "Check if two shards overlap.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\resharding.py",
    "ast_data": "FunctionDef name:_check_shard_metadata_pair_overlap arg:shard1 arg:shard2 arguments arg arg Assign Call For Call If Compare Return return:yes If Compare Return return:yes Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "__setitem__",
    "source_code": "def __setitem__(self, key, value):\n    if not self.loaded:\n        self.load()\n    self._data[key] = value",
    "docstring": "Store an object in the session.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\sessions.py",
    "ast_data": "FunctionDef name:__setitem__ arg:self arg:key arg:value arguments arg arg arg If Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "set_connectionstyle",
    "source_code": "@_docstring.interpd\ndef set_connectionstyle(self, connectionstyle=None, **kwargs):\n    if connectionstyle is None:\n        return ConnectionStyle.pprint_styles()\n    self._connector = ConnectionStyle(connectionstyle, **kwargs) if isinstance(connectionstyle, str) else connectionstyle\n    self.stale = True",
    "docstring": "Set the connection style, possibly with further attributes. Attributes from the previous connection style are not reused. Without argument (or with `~matplotlib.patches.ConnectionStyle.ConnectionStyle.ConnectionStyle` object, as documented in that class. The following connection styles are available: %(ConnectionStyle:table_and_accepts)s **kwargs Additional attributes for the connection style. See the table above for supported parameters. Examples -------- :: set_connectionstyle(\"Arc,armA=30,rad=10\") set_connectionstyle(\"arc\", armA=30, rad=10)",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:set_connectionstyle arg:self arg:connectionstyle arguments arg arg arg If Compare Return return:yes Call Assign Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "_set_params_and_buffers_to_ignore_for_model",
    "source_code": "@staticmethod\ndef _set_params_and_buffers_to_ignore_for_model(module, params_and_buffers_to_ignore):\n    module._ddp_params_and_buffers_to_ignore = params_and_buffers_to_ignore\n    for name, param in module.named_parameters():\n        if name in params_and_buffers_to_ignore:\n            param._ddp_ignored = True\n    for name, buffer in module.named_buffers():\n        if name in params_and_buffers_to_ignore:\n            buffer._ddp_ignored = True",
    "docstring": "Set parameters and buffers to be ignored by DDP. Expected format for parameters is the fully qualified name: {module_name}.{param_name}, and similarly, {module_name}.{buffer_name} for buffers. For example: params_to_ignore = [] # NB: model here is vanilla PyTorch module, not yet wrapped with DDP. for module_name, module in model.named_modules(): for param_name, param in module.named_parameters(recurse=False): if should_ignore(param): # Create expected format fqn = f\"{module_name}.{param_name}\" params_to_ignore.append(fqn) torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model( model, params_to_ignore )",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\parallel\\distributed.py",
    "ast_data": "FunctionDef name:_set_params_and_buffers_to_ignore_for_model arg:module arg:params_and_buffers_to_ignore arguments arg arg Assign For Call If Compare Assign For Call If Compare Assign"
  },
  {
    "library": "pytorch",
    "name": "is_namedtuple",
    "source_code": "def is_namedtuple(obj):\n    return is_namedtuple_cls(type(obj))",
    "docstring": "Test if an object is a namedtuple or a torch.return_types.* quasi-namedtuple",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\utils.py",
    "ast_data": "FunctionDef name:is_namedtuple arg:obj arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_find_op",
    "source_code": "def _find_op(graph: ops.Graph, op_name: Optional[str]) -> Optional[ops.Operation]:\n    if not op_name:\n        return None\n    init_op = graph.get_operation_by_name(op_name)\n    logging.debug('Op found in the graph: %s', op_name)\n    return init_op",
    "docstring": "Finds the operation with . Args: graph: The graph to find from. op_name: Name of the node. Returns: The operation that corresponds to . Returns None iff op_name is an empty string or None. Raises: ValueError: is malformed.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\save_model.py",
    "ast_data": "FunctionDef name:_find_op arg:graph arg:op_name arguments arg arg If Return return:no Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_aligned_zeros",
    "source_code": "def _aligned_zeros(shape, dtype=float, order='C', align=None):\n    dtype = np.dtype(dtype)\n    if align is None:\n        align = dtype.alignment\n    if not hasattr(shape, '__len__'):\n        shape = (shape,)\n    size = functools.reduce(operator.mul, shape) * dtype.itemsize\n    buf = np.empty(size + align + 1, np.uint8)\n    offset = buf.__array_interface__['data'][0] % align\n    if offset != 0:\n        offset = align - offset\n    buf = buf[offset:offset + size + 1][:-1]\n    data = np.ndarray(shape, dtype, buf, order=order)\n    data.fill(0)\n    return data",
    "docstring": "Allocate a new ndarray with aligned memory. Primary use case for this currently is working around a f2py issue in NumPy 1.9.1, where dtype.alignment is such that np.zeros() does not necessarily create arrays aligned up to it.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_util.py",
    "ast_data": "FunctionDef name:_aligned_zeros arg:shape arg:dtype arg:order arg:align arguments arg arg arg arg Assign Call If Compare Assign If Call Assign Assign Call Assign Call Assign If Compare Assign Assign Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "load_state_dict",
    "source_code": "def load_state_dict(self, state_dict, strict=True):\n    states = copy.deepcopy(state_dict['state'])\n    data_groups = copy.deepcopy(state_dict['data_groups'])\n    container_state_dict = copy.deepcopy(state_dict['_container'])\n    states = self._convert_mask(states, sparse_coo=False)\n    if strict:\n        self._container = _Container()\n    self._load_container_from_state(states, data_groups, container_state_dict)\n    if not strict:\n        states.update(self.state)\n        data_groups.update(self.data_groups)\n    self.__setstate__({'state': states, 'data_groups': data_groups})",
    "docstring": "The load_state_dict() restores the state of the sparsifier based on the state_dict Args: * state_dict - the dictionary that to which the current sparsifier needs to be restored to * strict - If True - the sparsifier is reset and is restored exactly to the state in state_dict. If False - the current sparsifier is not reset before loading the state_dict i.e. data added before loading the state_dict is not erased.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\data_sparsifier\\base_data_sparsifier.py",
    "ast_data": "FunctionDef name:load_state_dict arg:self arg:state_dict arg:strict arguments arg arg arg Assign Call Assign Call Assign Call Assign Call If Assign Call Call If Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_next",
    "source_code": "def get_next(self):\n    raise NotImplementedError('DistributedIterator.get_next() must be implemented in descendants.')",
    "docstring": "Returns the next input from the iterator for all replicas. Example use: >>> strategy = tf.distribute.MirroredStrategy([\"GPU:0\", \"GPU:1\"]) >>> dataset = tf.data.Dataset.range(100).batch(2) >>> dist_dataset = strategy.experimental_distribute_dataset(dataset) >>> dist_dataset_iterator = iter(dist_dataset) >>> @tf.function ... def one_step(input): ... return input >>> step_num = 5 >>> for _ in range(step_num): ... strategy.run(one_step, args=(dist_dataset_iterator.get_next(),)) >>> strategy.experimental_local_results(dist_dataset_iterator.get_next()) (, ) Returns: A single or a which contains the next input for all replicas. Raises: : If the end of the iterator has been reached.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\types\\distribute.py",
    "ast_data": "FunctionDef name:get_next arg:self arguments arg Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d', *, usetex=None):\n    self._locator = locator\n    self._tz = tz\n    self.defaultfmt = defaultfmt\n    self._formatter = DateFormatter(self.defaultfmt, tz)\n    rcParams = mpl.rcParams\n    self._usetex = mpl._val_or_rc(usetex, 'text.usetex')\n    self.scaled = {DAYS_PER_YEAR: rcParams['date.autoformatter.year'], DAYS_PER_MONTH: rcParams['date.autoformatter.month'], 1: rcParams['date.autoformatter.day'], 1 / HOURS_PER_DAY: rcParams['date.autoformatter.hour'], 1 / MINUTES_PER_DAY: rcParams['date.autoformatter.minute'], 1 / SEC_PER_DAY: rcParams['date.autoformatter.second'], 1 / MUSECONDS_PER_DAY: rcParams['date.autoformatter.microsecond']}",
    "docstring": "Autoformat the date labels. Parameters ---------- locator : Locator that this axis is using. tz : str or , default: :rc: Ticks timezone. If a string, *tz* is passed to . defaultfmt : str The default format to use if none of the values in `text.usetex` are set as functions, then it is up to the customized function to enable or disable TeX's math mode itself.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\dates.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:locator arg:tz arg:defaultfmt arguments arg arg arg arg arg Assign Assign Assign Assign Call Assign Assign Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "can_add",
    "source_code": "@abc.abstractmethod\ndef can_add(self, op1, op2):\n    pass",
    "docstring": "Returns if this can add and . Else .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_addition.py",
    "ast_data": "FunctionDef name:can_add arg:self arg:op1 arg:op2 arguments arg arg arg"
  },
  {
    "library": "tensorflow",
    "name": "_GetAxisFromLabel",
    "source_code": "def _GetAxisFromLabel(subscripts, label):\n    splits = subscripts.split(ellipsis)\n    index = splits[0].find(label)\n    if index != -1:\n        return index\n    if len(splits) < 2:\n        return None\n    index = splits[1].find(label)\n    if index != -1:\n        return index - len(splits[1])\n    return None",
    "docstring": "Returns the axis (possibly negative) corresponding to a label. Returns the axis index of the axis label if it is before an ellipsis (or if the ellipsis is not present), and the negative index if it occurs after the ellipsis. E.g. index of in , is , but that of is . For multiple occurrences, returns the leftmost one. If not found, returns None. Args: subscripts: A string denoting the einsum subscript (e.g. ) label: The single character axis label.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg_grad.py",
    "ast_data": "FunctionDef name:_GetAxisFromLabel arg:subscripts arg:label arguments arg arg Assign Call Assign Call If Compare Return return:yes If Compare Call Return return:no Assign Call If Compare Return return:yes Call Return return:no"
  },
  {
    "library": "pytorch",
    "name": "install_global_unsafe",
    "source_code": "def install_global_unsafe(self, name, value) -> None:\n    assert name not in self.installed_globals\n    self.installed_globals.add(name)\n    self.cleanups.append(CleanupHook.create(self.global_scope, name, value))",
    "docstring": "WARNING: prefer the safer . torch.compile instances should be independent of each other; one footgun is to have one instance depend on the existence of a global installed by another instance. This can happen if we mangle a global the same way across both instances.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\output_graph.py",
    "ast_data": "FunctionDef name:install_global_unsafe arg:self arg:name arg:value arguments arg arg arg Compare Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "argmin_v2",
    "source_code": "@tf_export('math.argmin', 'argmin', v1=[])\n@dispatch.add_dispatch_support\ndef argmin_v2(input, axis=None, output_type=dtypes.int64, name=None):\n    if axis is None:\n        axis = 0\n    return gen_math_ops.arg_min(input, axis, name=name, output_type=output_type)",
    "docstring": "Returns the index with the smallest value across axes of a tensor. Returns the smallest index in case of ties. Args: input: A . Must be one of the following types: , , , , , , , , , , , , , , , , . axis: A . Must be one of the following types: , . int32 or int64, must be in the range . Describes which axis of the input Tensor to reduce across. For vectors, use axis = 0. output_type: An optional from: . Defaults to . name: A name for the operation (optional). Returns: A of type . Usage:",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:argmin_v2 arg:input arg:axis arg:output_type arg:name arguments arg arg arg arg If Compare Assign Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X):\n    pred = self.decision_function(X)\n    if self.n_classes_ == 2:\n        return self.classes_.take(pred > 0, axis=0)\n    return self.classes_.take(np.argmax(pred, axis=1), axis=0)",
    "docstring": "Predict classes for X. The predicted class of an input sample is computed as the weighted mean prediction of the classifiers in the ensemble. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. COO, DOK, and LIL are converted to CSR. Returns ------- y : ndarray of shape (n_samples,) The predicted classes.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_weight_boosting.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Assign Call If Compare Return return:yes Call Compare Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "device",
    "source_code": "@property\ndef device(self):\n    return self.specs[0].device",
    "docstring": "The device for SaveSpec Tensors.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\saving\\saveable_object.py",
    "ast_data": "FunctionDef name:device arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "batch_all_reduce",
    "source_code": "def batch_all_reduce(self, input_tensor_packs: List[List[core.TensorLike]], options: Optional[collective_util.Options]=None) -> core.Tensor:\n    options = self._options.merge(options)\n    outputs = []\n    for pack in input_tensor_packs:\n        if context.executing_eagerly():\n            for input_tensor in pack:\n                outputs.append(self.all_reduce(input_tensor, None, options))\n        else:\n            with ops.device(self._device):\n                flat_tensors = [array_ops.reshape(t, [-1]) for t in pack]\n                shapes = [array_ops.shape(t) for t in pack]\n                if options.implementation == collective_util.CommunicationImplementation.NCCL and outputs:\n                    control_input = outputs[-1]\n                else:\n                    control_input = None\n                reduced = self.all_reduce(array_ops.concat(flat_tensors, axis=0), control_input, options)\n                num_elements = [math_ops.reduce_prod(s) for s in shapes]\n                flat_outputs = array_ops.split(reduced, num_elements, axis=0)\n                for shape, flat_output in zip(shapes, flat_outputs):\n                    outputs.append(array_ops.reshape(flat_output, shape))\n    return outputs",
    "docstring": "Batch all-reduce dense tensors. This takes a list of batches of tensors. Using multiple batches have the benefit that it doesn't need to wait for all inputs to be ready to start the all-reduce. Args: input_tensor_packs: a list of lists of dense tensors. options: an optional tf.distribute.experimental.CommunicationOptions. If provided, it overrides the default options. Returns: A flat list of reduced tensors.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_utils.py",
    "ast_data": "FunctionDef name:batch_all_reduce arg:self arg:input_tensor_packs arg:options arguments arg arg arg Assign Call Assign For If Call For Call Call With Call Assign Call Assign Call If BoolOp Compare Assign Assign Assign Call Call Assign Call Assign Call For Call Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_smallest_admissible_index_dtype",
    "source_code": "def _smallest_admissible_index_dtype(arrays=(), maxval=None, check_contents=False):\n    int32min = np.int32(np.iinfo(np.int32).min)\n    int32max = np.int32(np.iinfo(np.int32).max)\n    if maxval is not None:\n        if maxval > np.iinfo(np.int64).max:\n            raise ValueError(f'maxval={maxval} is to large to be represented as np.int64.')\n        if maxval > int32max:\n            return np.int64\n    if isinstance(arrays, np.ndarray):\n        arrays = (arrays,)\n    for arr in arrays:\n        if not isinstance(arr, np.ndarray):\n            raise TypeError(f'Arrays should be of type np.ndarray, got {type(arr)} instead.')\n        if not np.issubdtype(arr.dtype, np.integer):\n            raise ValueError(f'Array dtype {arr.dtype} is not supported for index dtype. We expect integral values.')\n        if not np.can_cast(arr.dtype, np.int32):\n            if not check_contents:\n                return np.int64\n            if arr.size == 0:\n                continue\n            else:\n                maxval = arr.max()\n                minval = arr.min()\n                if minval < int32min or maxval > int32max:\n                    return np.int64\n    return np.int32",
    "docstring": "Based on input (integer) arrays , determine a suitable index data type that can hold the data in the arrays. This function returns if it either required by or based on the largest precision of the dtype of the arrays passed as argument, or by their contents (when ). If none of the condition requires then this function returns . Parameters ---------- arrays : ndarray or tuple of ndarrays, default=() Input arrays whose types/contents to check. maxval : float, default=None Maximum value needed. check_contents : bool, default=False Whether to check the values in the arrays and not just their types. By default, check only the types. Returns ------- dtype : {np.int32, np.int64} Suitable index data type (int32 or int64).",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\fixes.py",
    "ast_data": "FunctionDef name:_smallest_admissible_index_dtype arg:arrays arg:maxval arg:check_contents arguments arg arg arg Assign Call Call Assign Call Call If Compare If Compare Call Raise Call If Compare Return return:yes If Call Assign For If Call Raise Call Call If Call Raise Call If Call If Return return:yes If Compare Assign Call Assign Call If BoolOp Compare Compare Return return:yes Return return:yes"
  },
  {
    "library": "kornia",
    "name": "BilateralBlur",
    "source_code": "class BilateralBlur(_BilateralBlur):\n\n    def forward(self, input: Tensor) -> Tensor:\n        return bilateral_blur(input, self.kernel_size, self.sigma_color, self.sigma_space, self.border_type, self.color_distance_type)",
    "docstring": "Blur a tensor using a Bilateral filter. The operator is an edge-preserving image smoothing filter. The weight for each pixel in a neighborhood is determined not only by its distance to the center pixel, but also the difference in intensity or color. Arguments: kernel_size: the size of the kernel. sigma_color: the standard deviation for intensity/color Gaussian kernel. Smaller values preserve more edges. sigma_space: the standard deviation for spatial Gaussian kernel. This is similar to `gaussian_blur2d()(B, C, H, W)(B, C, H, W)` Examples: >>> input = torch.rand(2, 4, 5, 5) >>> blur = BilateralBlur((3, 3), 0.1, (1.5, 1.5)) >>> output = blur(input) >>> output.shape torch.Size([2, 4, 5, 5])",
    "type": "class",
    "file_path": "kornia\\kornia\\filters\\bilateral.py",
    "ast_data": "ClassDef name:BilateralBlur FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "AEPE",
    "source_code": "class AEPE(nn.Module):\n\n    def __init__(self, reduction: str='mean') -> None:\n        super().__init__()\n        self.reduction: str = reduction\n\n    def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:\n        return aepe(input, target, self.reduction)",
    "docstring": "Computes the average endpoint error (AEPE) between 2 flow maps. EPE is the endpoint error between two 2D vectors (e.g., optical flow). Given a h x w x 2 optical flow map, the AEPE is: .. math:: \\text{AEPE}=\\frac{1}{hw}\\sum_{i=1, j=1}^{h, w}\\sqrt{(I_{i,j,1}-T_{i,j,1})^{2}+(I_{i,j,2}-T_{i,j,2})^{2}} Args: reduction : Specifies the reduction to apply to the output: `(*, 2)(*, 2)(1)`. Examples: >>> input1 = torch.rand(1, 4, 5, 2) >>> input2 = torch.rand(1, 4, 5, 2) >>> epe = AEPE(reduction=\"mean\") >>> epe = epe(input1, input2)",
    "type": "class",
    "file_path": "kornia\\kornia\\metrics\\endpoint_error.py",
    "ast_data": "ClassDef name:AEPE FunctionDef name:__init__ arg:self arg:reduction arguments arg arg Call Call FunctionDef name:forward arg:self arg:input arg:target arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "clear",
    "source_code": "def clear(self) -> None:\n    for k in self._keys.copy():\n        del self[k]",
    "docstring": "Remove all items from the ParameterDict.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\container.py",
    "ast_data": "FunctionDef name:clear arg:self arguments arg For Call"
  },
  {
    "library": "sphinx",
    "name": "create_documenter",
    "source_code": "def create_documenter(self, obj: Any, parent: Any, full_name: str, *, registry: SphinxComponentRegistry) -> Documenter:\n    doccls = _get_documenter(obj, parent, registry=registry)\n    return doccls(self.bridge, full_name)",
    "docstring": "Get an autodoc.Documenter class suitable for documenting the given object. Wraps _get_documenter and is meant as a hook for extensions.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\ext\\autosummary\\__init__.py",
    "ast_data": "FunctionDef name:create_documenter arg:self arg:obj arg:parent arg:full_name arguments arg arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "page",
    "source_code": "def page(self, number):\n    number = self.validate_number(number)\n    bottom = (number - 1) * self.per_page\n    top = bottom + self.per_page\n    if top + self.orphans >= self.count:\n        top = self.count\n    return self._get_page(self.object_list[bottom:top], number, self)",
    "docstring": "Return a Page object for the given 1-based page number.",
    "type": "method",
    "file_path": "django\\django\\core\\paginator.py",
    "ast_data": "FunctionDef name:page arg:self arg:number arguments arg arg Assign Call Assign Assign If Compare Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "ConvertComplexToRealRepresentationInputStep",
    "source_code": "class ConvertComplexToRealRepresentationInputStep(InputAdaptStep):\n\n    def apply(self, model_args: Sequence[Any], model_kwargs: Mapping[str, Any], model: torch.nn.Module | Callable | torch_export.ExportedProgram | None=None) -> tuple[Sequence[Any], Mapping[str, Any]]:\n        return (tuple((torch.view_as_real(arg.resolve_conj()) if isinstance(arg, torch.Tensor) and arg.is_complex() else arg for arg in model_args)), model_kwargs)",
    "docstring": "Convert complex dtype tensors to real representation tensors. ONNX does not support complex dtype tensors. Thus, we convert complex dtype tensors to real representation tensors (i.e., float dtype tensors with an extra dimension representing the real and imaginary parts of the complex number).",
    "type": "class",
    "file_path": "pytorch\\torch\\onnx\\_internal\\io_adapter.py",
    "ast_data": "ClassDef name:ConvertComplexToRealRepresentationInputStep FunctionDef name:apply arg:self arg:model_args arg:model_kwargs arg:model arguments arg arg arg arg Return return:yes Call BoolOp Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "reconstruction_loss",
    "source_code": "def reconstruction_loss(self, soft_quantized_output: torch.Tensor, original_output: torch.Tensor) -> torch.Tensor:\n    return F.mse_loss(soft_quantized_output, original_output, reduction='none').mean()",
    "docstring": "Compute the reconstruction loss between the soft quantized output and the original output.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\experimental\\adaround_loss.py",
    "ast_data": "FunctionDef name:reconstruction_loss arg:self arg:soft_quantized_output arg:original_output arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "operator",
    "source_code": "@property\ndef operator(self):\n    return self._operator",
    "docstring": "The operator before taking the adjoint.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_adjoint.py",
    "ast_data": "FunctionDef name:operator arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "forward",
    "source_code": "def forward(self, x: torch.Tensor) -> torch.Tensor:\n    weight_quant_dequant = self.get_weight()\n    result = F.conv2d(x, weight_quant_dequant, self.bias, self.stride, self.padding, self.dilation, self.groups)\n    return result",
    "docstring": "we have: w(float) -- quant - dequant x(float) ------------- F.conv2d --- In the full model, we will see w(float) -- quant - *dequant x -- quant --- *dequant -- *F.conv2d --- *quant - dequant and the backend should be able to fuse the ops with into a quantized conv2d",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\reference\\modules\\conv.py",
    "ast_data": "FunctionDef name:forward arg:self arg:x arguments arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_scatter_dim_after_reshape",
    "source_code": "def _scatter_dim_after_reshape(reshape_node: torch.fx.Node, orig_scatter_dim: int) -> int:\n    if not reshape_node:\n        return orig_scatter_dim\n    reshape_op_output_tensor = _get_tensor(reshape_node)\n    assert reshape_op_output_tensor.ndim == 2, 'reshape must produce 2D tensor for scaled_mm'\n    assert len(reshape_node.args) >= 1, 'reshape node must have at least 1 arg'\n    input_tensor_node = cast(torch.fx.Node, reshape_node.args[0])\n    reshape_op_input_tensor = _get_tensor(input_tensor_node)\n    assert reshape_op_input_tensor.ndim > reshape_op_output_tensor.ndim, 'reshape must be from 3D+ to 2D'\n    input_shape = reshape_op_input_tensor.shape\n    output_shape = reshape_op_output_tensor.shape\n    leading_dims_collapsed = output_shape[0] == prod(input_shape[:-1])\n    if orig_scatter_dim == 0:\n        return 0\n    if orig_scatter_dim == reshape_op_input_tensor.ndim - 1:\n        return 1\n    return 0 if leading_dims_collapsed else 1",
    "docstring": "Given a reshape node and the original scatter dim for the target tensor, returns the new scatter dim for the reshaped tensor.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\micro_pipeline_tp.py",
    "ast_data": "FunctionDef name:_scatter_dim_after_reshape arg:reshape_node arg:orig_scatter_dim arguments arg arg If Return return:yes Assign Call Compare Compare Call Assign Call Assign Call Compare Assign Assign Assign Compare Call If Compare Return return:yes If Compare Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "replace_autocast_with_hop_pass",
    "source_code": "def replace_autocast_with_hop_pass(gm: torch.fx.GraphModule, graph_signature: Optional[ExportGraphSignature]) -> tuple[torch.fx.GraphModule, Optional[ExportGraphSignature]]:\n    return _replace_with_hop_pass_helper(gm, graph_signature, _sequential_split_and_maybe_inline_subgraphs)",
    "docstring": "Split gm into sub-graph-modules using , and then recursively call itself on each of the submodules.",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\passes\\replace_autocast_with_hop_pass.py",
    "ast_data": "FunctionDef name:replace_autocast_with_hop_pass arg:gm arg:graph_signature arguments arg arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "rstrip",
    "source_code": "def rstrip(self, chars=None):\n    return rstrip(self, chars)",
    "docstring": "For each element in , return a copy with the trailing characters removed. See Also -------- char.rstrip",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:rstrip arg:self arg:chars arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_has_valid_tensors",
    "source_code": "def _has_valid_tensors(self):\n    return self._input_tensors is not None and self._output_tensors",
    "docstring": "Checks if the input and output tensors have been initialized. Returns: Bool.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "FunctionDef name:_has_valid_tensors arg:self arguments arg Return return:yes BoolOp Compare"
  },
  {
    "library": "sphinx",
    "name": "_UnicodeDecodeErrorHandler",
    "source_code": "class _UnicodeDecodeErrorHandler:\n\n    def __init__(self, docname: str, /) -> None:\n        self.docname = docname\n\n    def __call__(self, error: UnicodeDecodeError) -> tuple[str, int]:\n        line_start = error.object.rfind(b'\\n', 0, error.start)\n        line_end = error.object.find(b'\\n', error.start)\n        if line_end == -1:\n            line_end = len(error.object)\n        line_num = error.object.count(b'\\n', 0, error.start) + 1\n        logger.warning(__('undecodable source characters, replacing with \"?\": %r'), error.object[line_start + 1:error.start] + b'>>>' + error.object[error.start:error.end] + b'<<<' + error.object[error.end:line_end], location=(self.docname, line_num))\n        return ('?', error.end)",
    "docstring": "Custom error handler for open() that warns and replaces.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\builders\\__init__.py",
    "ast_data": "ClassDef name:_UnicodeDecodeErrorHandler FunctionDef name:__init__ arguments arg arg Assign FunctionDef name:__call__ arg:self arg:error arguments arg arg Assign Call Assign Call If Compare Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "_get_transform_functions",
    "source_code": "def _get_transform_functions(ax, axis):\n    axis_obj = getattr(ax, f'{axis}axis')\n    transform = axis_obj.get_transform()\n    return (transform.transform, transform.inverted().transform)",
    "docstring": "Return the forward and inverse transforms for a given axis.",
    "type": "function",
    "file_path": "seaborn\\seaborn\\utils.py",
    "ast_data": "FunctionDef name:_get_transform_functions arg:ax arg:axis arguments arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "theta",
    "source_code": "@theta.setter\ndef theta(self, theta):\n    self.kernel.theta = theta",
    "docstring": "Sets the (flattened, log-transformed) non-fixed hyperparameters. Parameters ---------- theta : ndarray of shape (n_dims,) The non-fixed, log-transformed hyperparameters of the kernel",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:theta arg:self arg:theta arguments arg arg Assign"
  },
  {
    "library": "pytorch",
    "name": "FakeQuantizeBase",
    "source_code": "class FakeQuantizeBase(ABC, Module):\n    fake_quant_enabled: torch.Tensor\n    observer_enabled: torch.Tensor\n\n    def __init__(self) -> None:\n        super().__init__()\n        self.register_buffer('fake_quant_enabled', torch.tensor([1], dtype=torch.uint8))\n        self.register_buffer('observer_enabled', torch.tensor([1], dtype=torch.uint8))\n\n    @abstractmethod\n    def forward(self, x):\n        pass\n\n    @abstractmethod\n    def calculate_qparams(self, **kwargs):\n        pass\n\n    @torch.jit.export\n    def enable_fake_quant(self, enabled: bool=True) -> None:\n        self.fake_quant_enabled[0] = 1 if enabled else 0\n\n    @torch.jit.export\n    def disable_fake_quant(self):\n        self.enable_fake_quant(False)\n\n    @torch.jit.export\n    def enable_observer(self, enabled: bool=True) -> None:\n        self.observer_enabled[0] = 1 if enabled else 0\n\n    @torch.jit.export\n    def disable_observer(self):\n        self.enable_observer(False)\n\n    @classmethod\n    def with_args(cls, **kwargs):\n        fake_quant_constructor = _with_args(cls, **kwargs)\n        fake_quant_constructor.__module__ = 'torch.ao.quantization.fake_quantize'\n        return fake_quant_constructor",
    "docstring": "Base fake quantize module. Base fake quantize module Any fake quantize implementation should derive from this class. Concrete fake quantize module should follow the same API. In forward, they will update the statistics of the observed Tensor and fake quantize the input. They should also provide a function that computes the quantization parameters given the collected statistics.",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\quantization\\fake_quantize.py",
    "ast_data": "ClassDef name:FakeQuantizeBase FunctionDef name:__init__ arg:self arguments arg Call Call Call Call Call Call FunctionDef name:forward arg:self arg:x arguments arg arg FunctionDef name:calculate_qparams arg:self arguments arg arg FunctionDef name:enable_fake_quant arg:self arg:enabled arguments arg arg Assign FunctionDef name:disable_fake_quant arg:self arguments arg Call FunctionDef name:enable_observer arg:self arg:enabled arguments arg arg Assign FunctionDef name:disable_observer arg:self arguments arg Call FunctionDef name:with_args arg:cls arguments arg arg Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "shape",
    "source_code": "@property\ndef shape(self):\n    return self._ragged_shape._to_tensor_shape()",
    "docstring": "The static shape of this StructuredTensor. The returned is guaranteed to have a known rank, but the individual dimension sizes may be unknown. Returns:",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor.py",
    "ast_data": "FunctionDef name:shape arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "stateless_random_saturation",
    "source_code": "@tf_export('image.stateless_random_saturation', v1=[])\n@dispatch.add_dispatch_support\ndef stateless_random_saturation(image, lower, upper, seed=None):\n    if upper <= lower:\n        raise ValueError('upper must be > lower.')\n    if lower < 0:\n        raise ValueError('lower must be non-negative.')\n    saturation_factor = stateless_random_ops.stateless_random_uniform(shape=[], minval=lower, maxval=upper, seed=seed)\n    return adjust_saturation(image, saturation_factor)",
    "docstring": "Adjust the saturation of RGB images by a random factor deterministically. Equivalent to but uses a randomly picked in the interval . Guarantees the same results given the same independent of how many times the function is called, and independent of global seed settings (e.g. ). Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> seed = (1, 2) >>> tf.image.stateless_random_saturation(x, 0.5, 1.0, seed) Args: image: RGB image or images. The size of the last dimension must be 3. lower: float. Lower bound for the random saturation factor. upper: float. Upper bound for the random saturation factor. seed: A shape [2] Tensor, the seed to the random number generator. Must have dtype or . (When using XLA, only is allowed.) Returns: Adjusted image(s), same shape and DType as . Raises: ValueError: if or if .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:stateless_random_saturation arg:image arg:lower arg:upper arg:seed arguments arg arg arg arg If Compare Raise Call If Compare Raise Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "geos_version",
    "source_code": "def geos_version(self):\n    return self._get_spatialite_func('geos_version()')",
    "docstring": "Return the version of GEOS used by SpatiaLite as a string.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\spatialite\\operations.py",
    "ast_data": "FunctionDef name:geos_version arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "Salomon",
    "source_code": "class Salomon(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-100.0] * self.N, [100.0] * self.N))\n        self.custom_bounds = [(-50, 50), (-50, 50)]\n        self.global_optimum = [[0.0 for _ in range(self.N)]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        u = sqrt(sum(x ** 2))\n        return 1 - cos(2 * pi * u) + 0.1 * u",
    "docstring": "Salomon objective function. This class defines the Salomon [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Salomon}}(x) = 1 - \\cos \\left (2 \\pi \\sqrt{\\sum_{i=1}^{n} x_i^2} \\right) + 0.1 \\sqrt{\\sum_{i=1}^n x_i^2} Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_S.py",
    "ast_data": "ClassDef name:Salomon Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_is_compiled",
    "source_code": "def _is_compiled() -> bool:\n    return torch._C._has_xpu",
    "docstring": "Return true if compile with XPU support.",
    "type": "function",
    "file_path": "pytorch\\torch\\xpu\\__init__.py",
    "ast_data": "FunctionDef name:_is_compiled arguments Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "decision_function",
    "source_code": "@available_if(_estimator_has('decision_function', delegates=('final_estimator_', 'final_estimator')))\ndef decision_function(self, X):\n    check_is_fitted(self)\n    return self.final_estimator_.decision_function(self.transform(X))",
    "docstring": "Decision function for samples in using the final estimator. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vectors, where is the number of samples and is the number of features. Returns ------- decisions : ndarray of shape (n_samples,), (n_samples, n_classes), or (n_samples, n_classes * (n_classes-1) / 2) The decision function computed the final estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_stacking.py",
    "ast_data": "FunctionDef name:decision_function arg:self arg:X arguments arg arg Call Return return:yes Call Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_silhouette_reduce",
    "source_code": "def _silhouette_reduce(D_chunk, start, labels, label_freqs):\n    n_chunk_samples = D_chunk.shape[0]\n    cluster_distances = np.zeros((n_chunk_samples, len(label_freqs)), dtype=D_chunk.dtype)\n    if issparse(D_chunk):\n        if D_chunk.format != 'csr':\n            raise TypeError('Expected CSR matrix. Please pass sparse matrix in CSR format.')\n        for i in range(n_chunk_samples):\n            indptr = D_chunk.indptr\n            indices = D_chunk.indices[indptr[i]:indptr[i + 1]]\n            sample_weights = D_chunk.data[indptr[i]:indptr[i + 1]]\n            sample_labels = np.take(labels, indices)\n            cluster_distances[i] += np.bincount(sample_labels, weights=sample_weights, minlength=len(label_freqs))\n    else:\n        for i in range(n_chunk_samples):\n            sample_weights = D_chunk[i]\n            sample_labels = labels\n            cluster_distances[i] += np.bincount(sample_labels, weights=sample_weights, minlength=len(label_freqs))\n    end = start + n_chunk_samples\n    intra_index = (np.arange(n_chunk_samples), labels[start:end])\n    intra_cluster_distances = cluster_distances[intra_index]\n    cluster_distances[intra_index] = np.inf\n    cluster_distances /= label_freqs\n    inter_cluster_distances = cluster_distances.min(axis=1)\n    return (intra_cluster_distances, inter_cluster_distances)",
    "docstring": "Accumulate silhouette statistics for vertical chunk of X. Parameters ---------- D_chunk : {array-like, sparse matrix} of shape (n_chunk_samples, n_samples) Precomputed distances for a chunk. If a sparse matrix is provided, only CSR format is accepted. start : int First index in the chunk. labels : array-like of shape (n_samples,) Corresponding cluster labels, encoded as {0, ..., n_clusters-1}. label_freqs : array-like Distribution of cluster labels in ``.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\metrics\\cluster\\_unsupervised.py",
    "ast_data": "FunctionDef name:_silhouette_reduce arg:D_chunk arg:start arg:labels arg:label_freqs arguments arg arg arg arg Assign Assign Call Call If Call If Compare Raise Call For Call Assign Assign Assign Assign Call Call Call For Call Assign Assign Call Call Assign Assign Call Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_signature_from_annotations",
    "source_code": "def _signature_from_annotations(func):\n    func_signature = tf_inspect.signature(func)\n    signature = dict([(name, param.annotation) for name, param in func_signature.parameters.items() if param.annotation != tf_inspect.Parameter.empty])\n    if not signature:\n        raise ValueError('The dispatch_for_api decorator must be called with at least one signature, or applied to a function that has type annotations on its parameters.')\n    return signature",
    "docstring": "Builds a dict mapping from parameter names to type annotations.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\dispatch.py",
    "ast_data": "FunctionDef name:_signature_from_annotations arg:func arguments arg Assign Call Assign Call Call Compare If Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_set_handle_data",
    "source_code": "def _set_handle_data(func_graph, fdef):\n    for tensor, arg_def in itertools.chain(zip(func_graph.inputs, fdef.signature.input_arg), zip(func_graph.outputs, fdef.signature.output_arg)):\n        if arg_def.handle_data:\n            tensor.set_shape([])\n            shape_and_dtype = arg_def.handle_data[0]\n            handle_data = cpp_shape_inference_pb2.CppShapeInferenceResult.HandleData()\n            handle_data.is_set = True\n            handle_data.shape_and_type.append(cpp_shape_inference_pb2.CppShapeInferenceResult.HandleShapeAndType(shape=shape_and_dtype.shape, dtype=shape_and_dtype.dtype))\n            resource_variable_ops._set_handle_shapes_and_types(tensor, handle_data, True)",
    "docstring": "Adds handle data for resource type inputs and outputs.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\function_def_to_graph.py",
    "ast_data": "FunctionDef name:_set_handle_data arg:func_graph arg:fdef arguments arg arg For Call Call Call If Call Assign Assign Call Assign Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_marker",
    "source_code": "@_docstring.interpd\ndef set_marker(self, marker):\n    self._marker = MarkerStyle(marker, self._marker.get_fillstyle())\n    self.stale = True",
    "docstring": "Set the line marker. Parameters ---------- marker : marker style string, or See for full description of possible arguments.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:set_marker arg:self arg:marker arguments arg arg Assign Call Call Assign"
  },
  {
    "library": "scipy",
    "name": "abcd_normalize",
    "source_code": "def abcd_normalize(A=None, B=None, C=None, D=None):\n    A, B, C, D = map(_atleast_2d_or_none, (A, B, C, D))\n    MA, NA = _shape_or_none(A)\n    MB, NB = _shape_or_none(B)\n    MC, NC = _shape_or_none(C)\n    MD, ND = _shape_or_none(D)\n    p = _choice_not_none(MA, MB, NC)\n    q = _choice_not_none(NB, ND)\n    r = _choice_not_none(MC, MD)\n    if p is None or q is None or r is None:\n        raise ValueError('Not enough information on the system.')\n    A, B, C, D = map(_none_to_empty_2d, (A, B, C, D))\n    A = _restore(A, (p, p))\n    B = _restore(B, (p, q))\n    C = _restore(C, (r, p))\n    D = _restore(D, (r, q))\n    return (A, B, C, D)",
    "docstring": "Check state-space matrices and ensure they are 2-D. If enough information on the system is provided, that is, enough properly-shaped arrays are passed to the function, the missing ones are built from this information, ensuring the correct number of rows and columns. Otherwise a ValueError is raised. Parameters ---------- A, B, C, D : array_like, optional State-space matrices. All of them are None (missing) by default. See for format. Returns ------- A, B, C, D : array Properly shaped state-space matrices. Raises ------ ValueError If not enough information on the system was provided.",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_lti_conversion.py",
    "ast_data": "FunctionDef name:abcd_normalize arg:A arg:B arg:C arg:D arguments arg arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call If BoolOp Compare Compare Compare Raise Call Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "infer_map_type",
    "source_code": "def infer_map_type(self, palette, norm, input_format, var_type):\n    if palette in QUAL_PALETTES:\n        map_type = 'categorical'\n    elif norm is not None:\n        map_type = 'numeric'\n    elif isinstance(palette, (dict, list)):\n        map_type = 'categorical'\n    elif input_format == 'wide':\n        map_type = 'categorical'\n    else:\n        map_type = var_type\n    return map_type",
    "docstring": "Determine how to implement the mapping.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_base.py",
    "ast_data": "FunctionDef name:infer_map_type arg:self arg:palette arg:norm arg:input_format arg:var_type arguments arg arg arg arg arg If Compare Assign If Compare Assign If Call Assign If Compare Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_join_sycl_home",
    "source_code": "def _join_sycl_home(*paths) -> str:\n    if SYCL_HOME is None:\n        raise OSError('SYCL runtime is not dected. Please setup the pytorch prerequisites for Intel GPU following the instruction in https://github.com/pytorch/pytorch?tab=readme-ov-file#intel-gpu-support or install intel-sycl-rt via pip.')\n    return os.path.join(SYCL_HOME, *paths)",
    "docstring": "Join paths with SYCL_HOME, or raises an error if it SYCL_HOME is not found. This is basically a lazy way of raising an error for missing SYCL_HOME only once we need to get any SYCL-specific path.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\cpp_extension.py",
    "ast_data": "FunctionDef name:_join_sycl_home arguments arg If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__call__",
    "source_code": "def __call__(self, shape, dtype=dtypes.float32, **kwargs):\n    self._validate_kwargs(kwargs)\n    dtype = dtypes.as_dtype(dtype)\n    if not dtype.is_floating and (not dtype.is_integer):\n        raise ValueError(f'Argument `dtype` expected to be numeric or boolean. Received {dtype}.')\n    if _PARTITION_SHAPE in kwargs:\n        shape = kwargs[_PARTITION_SHAPE]\n    return self._random_generator.random_uniform(shape, self.minval, self.maxval, dtype)",
    "docstring": "Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only floating point and integer types are supported. **kwargs: Additional keyword arguments. Raises: ValueError: If the dtype is not numeric.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops_v2.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:shape arg:dtype arguments arg arg arg arg Call Assign Call If BoolOp Raise Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "rindex",
    "source_code": "@set_module('numpy.strings')\ndef rindex(a, sub, start=0, end=None):\n    end = end if end is not None else MAX\n    return _rindex_ufunc(a, sub, start, end)",
    "docstring": "Like , but raises :exc: when the substring is not found. Parameters ---------- a : array-like, with or dtype sub : array-like, with or dtype start, end : array-like, with any integer dtype, optional Returns ------- out : ndarray Output array of ints. See Also -------- rfind, str.rindex Examples -------- >>> a = np.array([\"Computer Science\"]) >>> np.strings.rindex(a, \"Science\", start=0, end=None) array([9])",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\strings.py",
    "ast_data": "FunctionDef name:rindex arg:a arg:sub arg:start arg:end arguments arg arg arg arg Assign Compare Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "row_splits",
    "source_code": "def row_splits(self):\n    return self._row_splits",
    "docstring": "Returns the row-split indices for this row partition. specifies where the values for each row begin and end. In particular, the values for row are stored in the slice . Returns: A 1-D integer with shape . The returned tensor is non-empty, and is sorted in ascending order. . .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py",
    "ast_data": "FunctionDef name:row_splits arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_run_prepare_fx_on_standalone_modules",
    "source_code": "def _run_prepare_fx_on_standalone_modules(model: torch.nn.Module, is_qat: bool, named_modules: dict[str, torch.nn.Module], node_name_to_match_result_with_qconfig: Any, prepare_custom_config: PrepareCustomConfig, backend_config: BackendConfig) -> None:\n    for root_node, _, _pattern, qhandler, qconfig in node_name_to_match_result_with_qconfig.values():\n        if qhandler is None:\n            continue\n        elif not qhandler.is_standalone_module():\n            continue\n        sm_qconfig_mapping, sm_example_inputs, sm_prepare_custom_config, sm_backend_config = _get_standalone_module_configs(root_node, named_modules, prepare_custom_config, qconfig, backend_config)\n        standalone_module = named_modules[root_node.target]\n        prepare = torch.ao.quantization.quantize_fx._prepare_standalone_module_fx\n        observed_standalone_module = prepare(standalone_module, sm_qconfig_mapping, is_qat, example_inputs=sm_example_inputs, prepare_custom_config=sm_prepare_custom_config, backend_config=sm_backend_config)\n        parent_name, name = _parent_name(root_node.target)\n        setattr(named_modules[parent_name], name, observed_standalone_module)\n        named_modules[root_node.target] = observed_standalone_module",
    "docstring": "Runs prepare_fx on each standalone module. Note: this does not modify the graph, it just replaces the unobserved modules with their observed versions.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\prepare.py",
    "ast_data": "FunctionDef name:_run_prepare_fx_on_standalone_modules arg:model arg:is_qat arg:named_modules arg:node_name_to_match_result_with_qconfig arg:prepare_custom_config arg:backend_config arguments arg arg arg arg arg arg For Call If Compare If Call Assign Call Assign Assign Assign Call Assign Call Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "_location_coords",
    "source_code": "def _location_coords(self, xv, yv, renderer):\n    p1, pane_idx = self._calc_coord(xv, yv, renderer)\n    xs = self.format_xdata(p1[0])\n    ys = self.format_ydata(p1[1])\n    zs = self.format_zdata(p1[2])\n    if pane_idx == 0:\n        coords = f'x pane={xs}, y={ys}, z={zs}'\n    elif pane_idx == 1:\n        coords = f'x={xs}, y pane={ys}, z={zs}'\n    elif pane_idx == 2:\n        coords = f'x={xs}, y={ys}, z pane={zs}'\n    return coords",
    "docstring": "Return the location on the axis pane underneath the cursor as a string.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:_location_coords arg:self arg:xv arg:yv arg:renderer arguments arg arg arg arg Assign Call Assign Call Assign Call Assign Call If Compare Assign If Compare Assign If Compare Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "try_find_mesh_from_args",
    "source_code": "def try_find_mesh_from_args(op_call: torch._ops.OpOverload, args: Sequence[object]) -> DeviceMesh:\n    for arg in args:\n        if isinstance(arg, (dtensor.DTensor, DTensorSpec)):\n            return arg.device_mesh\n        elif isinstance(arg, (list, tuple)) and len(arg) > 0 and isinstance(arg[0], (dtensor.DTensor, DTensorSpec)):\n            return arg[0].device_mesh\n    raise ValueError(f'Cannot find device mesh from args for op : {op_call}.')",
    "docstring": "Find the device mesh object from args. It returns None if no mesh is found. NOTE: we can optimize this search if needed",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_utils.py",
    "ast_data": "FunctionDef name:try_find_mesh_from_args arg:op_call arg:args arguments arg arg For If Call Return return:yes If BoolOp Call Compare Call Call Return return:yes Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_native_handle",
    "source_code": "def _native_handle(self):\n    return self._interpreter.interpreter()",
    "docstring": "Returns a pointer to the underlying tflite::Interpreter instance. This allows extending tflite.Interpreter's functionality in a custom C++ function. Consider how that may work in a custom pybind wrapper: m.def(\"SomeNewFeature\", ( { auto* interpreter = reinterpret_cast(handle.cast()); ... })) and corresponding Python call: SomeNewFeature(interpreter.native_handle()) Note: This approach is fragile. Users must guarantee the C++ extension build is consistent with the tflite.Interpreter's underlying C++ build.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\interpreter.py",
    "ast_data": "FunctionDef name:_native_handle arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "deserialize_hoo_inputs",
    "source_code": "def deserialize_hoo_inputs(self, inputs: list[NamedArgument]):\n    args = []\n    kwargs = {}\n    for input_ in inputs:\n        if input_.name != '':\n            kwargs[input_.name] = self.deserialize_input(input_.arg)\n        else:\n            args.append(self.deserialize_input(input_.arg))\n    return (tuple(args), kwargs)",
    "docstring": "For deserializing HOO inputs since HOOs do not have a schema.",
    "type": "method",
    "file_path": "pytorch\\torch\\_export\\serde\\serialize.py",
    "ast_data": "FunctionDef name:deserialize_hoo_inputs arg:self arg:inputs arguments arg arg Assign Assign For If Compare Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "__reduce__",
    "source_code": "def __reduce__(self):\n    if not hasattr(self, 'model'):\n        state = self.__dict__.copy()\n        state.pop('_get_default', None)\n        return (_empty, (self.__class__,), state)\n    return (_load_field, (self.model._meta.app_label, self.model._meta.object_name, self.name))",
    "docstring": "Pickling should return the model._meta.fields instance of the field, not a new copy of that field. So, use the app registry to load the model and then the field back.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\__init__.py",
    "ast_data": "FunctionDef name:__reduce__ arg:self arguments arg If Call Assign Call Call Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_dpi",
    "source_code": "def get_dpi(self):\n    return self.dpi",
    "docstring": "Return the resolution in dots per inch as a float.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:get_dpi arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "reduce_min",
    "source_code": "@dispatch.dispatch_for_api(math_ops.reduce_min)\ndef reduce_min(input_tensor: ragged_tensor.Ragged, axis=None, keepdims=None, name=None):\n    return ragged_reduce_aggregate(reduce_op=math_ops.reduce_min, unsorted_segment_op=math_ops.unsorted_segment_min, rt_input=input_tensor, axis=axis, keepdims=keepdims, name=name or 'RaggedReduceMin')",
    "docstring": "For docs, see: _RAGGED_REDUCE_DOCSTRING.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_math_ops.py",
    "ast_data": "FunctionDef name:reduce_min arg:input_tensor arg:axis arg:keepdims arg:name arguments arg arg arg arg Return return:yes Call BoolOp Call"
  },
  {
    "library": "tensorflow",
    "name": "_distributed_apply",
    "source_code": "def _distributed_apply(self, distribution, grads_and_vars, global_step=None, name=None):\n    name = name if name is not None else self.get_name()\n    grads = [g for g, _ in grads_and_vars]\n    loss_scale_update_op, should_apply_grads = self._loss_scale.update(grads)\n\n    def apply_fn():\n        return self._apply_gradients(distribution, grads_and_vars, global_step, name + '-wrapped')\n    maybe_apply_op = smart_cond.smart_cond(should_apply_grads, apply_fn, control_flow_ops.no_op)\n    return control_flow_ops.group(maybe_apply_op, loss_scale_update_op, name=name)",
    "docstring": "A version of for cross replica context. When users are in a cross replica strategy, they must call this rather than . Args: distribution: a object. grads_and_vars: List of (gradient, variable) pairs as returned by and then aggregated across replicas. global_step: Optional (mirrored) to increment by one after the variables have been updated. name: Optional name for the returned operation. Default to the name passed to the constructor. Returns: An that applies the specified gradients across all replicas. If was not None, that operation also increments",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\experimental\\loss_scale_optimizer.py",
    "ast_data": "FunctionDef name:_distributed_apply arg:self arg:distribution arg:grads_and_vars arg:global_step arg:name arguments arg arg arg arg arg Assign Compare Call Assign Assign Call FunctionDef name:apply_fn arguments Return return:yes Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "interrupt",
    "source_code": "def interrupt(self) -> None:\n    self._proc.send_signal(signal.SIGINT)",
    "docstring": "Soft interrupt. Allows subprocess to cleanup.",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\instruction_counts\\execution\\work.py",
    "ast_data": "FunctionDef name:interrupt arg:self arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "library_paths",
    "source_code": "def library_paths(device_type: str='cpu') -> list[str]:\n    paths = [TORCH_LIB_PATH]\n    if device_type == 'cuda' and IS_HIP_EXTENSION:\n        lib_dir = 'lib'\n        paths.append(_join_rocm_home(lib_dir))\n        if HIP_HOME is not None:\n            paths.append(os.path.join(HIP_HOME, 'lib'))\n    elif device_type == 'cuda':\n        if IS_WINDOWS:\n            lib_dir = os.path.join('lib', 'x64')\n        else:\n            lib_dir = 'lib64'\n            if not os.path.exists(_join_cuda_home(lib_dir)) and os.path.exists(_join_cuda_home('lib')):\n                lib_dir = 'lib'\n        paths.append(_join_cuda_home(lib_dir))\n        if CUDNN_HOME is not None:\n            paths.append(os.path.join(CUDNN_HOME, lib_dir))\n    elif device_type == 'xpu':\n        if IS_WINDOWS:\n            lib_dir = os.path.join('lib', 'x64')\n        else:\n            lib_dir = 'lib64'\n            if not os.path.exists(_join_sycl_home(lib_dir)) and os.path.exists(_join_sycl_home('lib')):\n                lib_dir = 'lib'\n        paths.append(_join_sycl_home(lib_dir))\n    return paths",
    "docstring": "Get the library paths required to build a C++ or CUDA extension. Args: device_type: Defaults to \"cpu\". Returns: A list of library path strings.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\cpp_extension.py",
    "ast_data": "FunctionDef name:library_paths arg:device_type arguments arg Assign If BoolOp Compare Assign Call Call If Compare Call Call If Compare If Assign Call Assign If BoolOp Call Call Call Call Assign Call Call If Compare Call Call If Compare If Assign Call Assign If BoolOp Call Call Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "as_unit",
    "source_code": "def as_unit(self, unit: str) -> Self:\n    arr = self._data.as_unit(unit)\n    return type(self)._simple_new(arr, name=self.name)",
    "docstring": "Convert to a dtype with the given unit resolution. This method is for converting the dtype of a `pandas.DatetimeIndexpandas.TimedeltaIndex`: >>> tdelta_idx = pd.to_timedelta([\"1 day 3 min 2 us 42 ns\"]) >>> tdelta_idx TimedeltaIndex(['1 days 00:03:00.000002042'], dtype='timedelta64[ns]', freq=None) >>> tdelta_idx.as_unit(\"s\") TimedeltaIndex(['1 days 00:03:00'], dtype='timedelta64[s]', freq=None)",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\datetimelike.py",
    "ast_data": "FunctionDef name:as_unit arg:self arg:unit arguments arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "flush",
    "source_code": "def flush(self):\n    self.migration_qs.all().delete()",
    "docstring": "Delete all migration records. Useful for testing migrations.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\recorder.py",
    "ast_data": "FunctionDef name:flush arg:self arguments arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "bform",
    "source_code": "def bform(X: Tensor, A: Optional[Tensor], Y: Tensor) -> Tensor:\n    return matmul(X.mT, matmul(A, Y))",
    "docstring": "Return bilinear form of matrices: :math:.",
    "type": "function",
    "file_path": "pytorch\\torch\\_linalg_utils.py",
    "ast_data": "FunctionDef name:bform arg:X arg:A arg:Y arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_cast_buffers",
    "source_code": "def _cast_buffers(mixed_precision_config, root_module):\n    for buf in root_module.buffers():\n        if hasattr(buf, '_ddp_ignored') and buf._ddp_ignored:\n            continue\n        buf.data = buf.to(dtype=mixed_precision_config.buffer_dtype)",
    "docstring": "Casts buffers to the given ``.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\parallel\\distributed.py",
    "ast_data": "FunctionDef name:_cast_buffers arg:mixed_precision_config arg:root_module arguments arg arg For Call If BoolOp Call Assign Call"
  },
  {
    "library": "django",
    "name": "to_multi",
    "source_code": "def to_multi(self):\n    if self.name.startswith(('Point', 'LineString', 'Polygon')):\n        self.num += 3",
    "docstring": "Transform Point, LineString, Polygon, and their 25D equivalents to their Multi... counterpart.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geomtype.py",
    "ast_data": "FunctionDef name:to_multi arg:self arguments arg If Call"
  },
  {
    "library": "pytorch",
    "name": "DistributedVariable",
    "source_code": "class DistributedVariable(VariableTracker):\n\n    def __init__(self, value, **kwargs) -> None:\n        super().__init__(**kwargs)\n        if not DistributedVariable.is_available():\n            unimplemented_v2(gb_type='torch.distributed package is not available!', context='', explanation=\"The PyTorch package doesn't include torch.distributed when builing from source.\", hints=['Set USE_DISTRIBUTED=1 to enable it when building PyTorch from source.'])\n        self.value = value\n\n    def python_type(self):\n        return type(self.value)\n\n    @staticmethod\n    def is_available():\n        return torch.distributed.is_available()",
    "docstring": "The base distributed variable that encapsulates common methods for the distributed objects (i.e. ProcessGroup, DeviceMesh, etc.). Concrete distributed objects could inherit this class and add object specific logic. i.e. It provides the check on the distributed package existance and hold the tracking value for the corresponding distributed object.",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\distributed.py",
    "ast_data": "ClassDef name:DistributedVariable FunctionDef name:__init__ arg:self arg:value arguments arg arg arg Call Call If Call Call Assign FunctionDef name:python_type arg:self arguments arg Return return:yes Call FunctionDef name:is_available arguments Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "dup",
    "source_code": "def dup(node, copy_map, field_name='___pyct_anno'):\n    for n in gast.walk(node):\n        for k in copy_map:\n            if hasanno(n, k, field_name):\n                setanno(n, copy_map[k], getanno(n, k, field_name), field_name)",
    "docstring": "Recursively copies annotations in an AST tree. Args: node: ast.AST copy_map: Dict[Hashable, Hashable], maps a source anno key to a destination key. All annotations with the source key will be copied to identical annotations with the destination key. field_name: str",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\anno.py",
    "ast_data": "FunctionDef name:dup arg:node arg:copy_map arg:field_name arguments arg arg arg For Call For If Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "wrap_numpy",
    "source_code": "def wrap_numpy(f: Callable[_P, _R]) -> Callable[_P, _R]:\n    if not np:\n        return f\n\n    @functools.wraps(f)\n    def wrap(*args: _P.args, **kwargs: _P.kwargs) -> pytree.PyTree:\n        args, kwargs = pytree.tree_map_only(torch.Tensor, lambda x: x.numpy(), (args, kwargs))\n        out = f(*args, **kwargs)\n        return pytree.tree_map_only(np.ndarray, lambda x: torch.as_tensor(x), out)\n    return wrap",
    "docstring": "Decorator that turns a function from ``s.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\external_utils.py",
    "ast_data": "FunctionDef name:wrap_numpy arg:f arguments arg If Return return:yes FunctionDef name:wrap arguments arg arg Assign Call arguments arg Call Assign Call Return return:yes Call arguments arg Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "is_node_realized",
    "source_code": "def is_node_realized(node: torch.fx.Node) -> bool:\n    from torch._inductor.lowering import fallbacks, needs_realized_inputs\n\n    def is_buffer(node: torch.fx.Node) -> bool:\n        if node.op == 'call_function' and node.target is operator.getitem:\n            return is_buffer(node.args[0])\n        return node.op in ('placeholder', 'output') or node.target in fallbacks\n    if is_buffer(node):\n        return True\n\n    def realizes_inputs(node: torch.fx.Node) -> bool:\n        return node.op == 'output' or node.target in needs_realized_inputs\n    if any((realizes_inputs(user) for user in node.users)):\n        return True\n    return False",
    "docstring": "Returns true if a node is always realized when lowered to inductor IR. NOTE: This may return some false negatives. e.g. it doesn't handle buffers realized heuristically during lowering, or buffers realized indirectly through view ops.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_utils.py",
    "ast_data": "FunctionDef name:is_node_realized arg:node arguments arg FunctionDef name:is_buffer arg:node arguments arg If BoolOp Compare Compare Return return:yes Call Return return:yes BoolOp Compare Compare If Call Return return:yes FunctionDef name:realizes_inputs arg:node arguments arg Return return:yes BoolOp Compare Compare If Call Call Return return:yes Return return:yes"
  },
  {
    "library": "kornia",
    "name": "reset_model",
    "source_code": "def reset_model(self) -> None:\n    torch.nn.init.zeros_(self.rot)\n    torch.nn.init.zeros_(self.shift)\n    torch.nn.init.ones_(self.scale)",
    "docstring": "Initialize the model with identity transform.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\transform\\image_registrator.py",
    "ast_data": "FunctionDef name:reset_model arg:self arguments arg Call Call Call"
  },
  {
    "library": "django",
    "name": "delete_model",
    "source_code": "def delete_model(self, model):\n    for field in model._meta.local_many_to_many:\n        if field.remote_field.through._meta.auto_created:\n            self.delete_model(field.remote_field.through)\n    self.execute(self.sql_delete_table % {'table': self.quote_name(model._meta.db_table)})\n    for sql in list(self.deferred_sql):\n        if isinstance(sql, Statement) and sql.references_table(model._meta.db_table):\n            self.deferred_sql.remove(sql)",
    "docstring": "Delete a model from the database.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\schema.py",
    "ast_data": "FunctionDef name:delete_model arg:self arg:model arguments arg arg For If Call Call Call For Call If BoolOp Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_registered_canvas_class",
    "source_code": "def get_registered_canvas_class(format):\n    if format not in _default_backends:\n        return None\n    backend_class = _default_backends[format]\n    if isinstance(backend_class, str):\n        backend_class = importlib.import_module(backend_class).FigureCanvas\n        _default_backends[format] = backend_class\n    return backend_class",
    "docstring": "Return the registered default canvas for given file format. Handles deferred import of required backend.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:get_registered_canvas_class arg:format arguments arg If Compare Return return:no Assign If Call Assign Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__getitem__",
    "source_code": "def __getitem__(self, name):\n    if self.is_available(name):\n        return self._registered[name]\n    raise RuntimeError(f'Requested MovieWriter ({name}) not available')",
    "docstring": "Get an available writer class from its name.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\animation.py",
    "ast_data": "FunctionDef name:__getitem__ arg:self arg:name arguments arg arg If Call Return return:yes Raise Call"
  },
  {
    "library": "pandas",
    "name": "_inplace_method",
    "source_code": "@final\ndef _inplace_method(self, other, op) -> Self:\n    result = op(self, other)\n    self._update_inplace(result.reindex_like(self))\n    return self",
    "docstring": "Wrap arithmetic method to operate inplace.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:_inplace_method arg:self arg:other arg:op arguments arg arg arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "loss_gradient",
    "source_code": "def loss_gradient(self, y_true, raw_prediction, sample_weight=None, loss_out=None, gradient_out=None, n_threads=1):\n    if loss_out is None:\n        if gradient_out is None:\n            loss_out = np.empty_like(y_true)\n            gradient_out = np.empty_like(raw_prediction)\n        else:\n            loss_out = np.empty_like(y_true, dtype=gradient_out.dtype)\n    elif gradient_out is None:\n        gradient_out = np.empty_like(raw_prediction, dtype=loss_out.dtype)\n    if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1:\n        raw_prediction = raw_prediction.squeeze(1)\n    if gradient_out.ndim == 2 and gradient_out.shape[1] == 1:\n        gradient_out = gradient_out.squeeze(1)\n    self.closs.loss_gradient(y_true=y_true, raw_prediction=raw_prediction, sample_weight=sample_weight, loss_out=loss_out, gradient_out=gradient_out, n_threads=n_threads)\n    return (loss_out, gradient_out)",
    "docstring": "Compute loss and gradient w.r.t. raw_prediction for each input. Parameters ---------- y_true : C-contiguous array of shape (n_samples,) Observed, true target values. raw_prediction : C-contiguous array of shape (n_samples,) or array of shape (n_samples, n_classes) Raw prediction values (in link space). sample_weight : None or C-contiguous array of shape (n_samples,) Sample weights. loss_out : None or C-contiguous array of shape (n_samples,) A location into which the loss is stored. If None, a new array might be created. gradient_out : None or C-contiguous array of shape (n_samples,) or array of shape (n_samples, n_classes) A location into which the gradient is stored. If None, a new array might be created. n_threads : int, default=1 Might use openmp thread parallelism. Returns ------- loss : array of shape (n_samples,) Element-wise loss function. gradient : array of shape (n_samples,) or (n_samples, n_classes) Element-wise gradients.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\_loss\\loss.py",
    "ast_data": "FunctionDef name:loss_gradient arg:self arg:y_true arg:raw_prediction arg:sample_weight arg:loss_out arg:gradient_out arg:n_threads arguments arg arg arg arg arg arg arg If Compare If Compare Assign Call Assign Call Assign Call If Compare Assign Call If BoolOp Compare Compare Assign Call If BoolOp Compare Compare Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "add_summary",
    "source_code": "def add_summary(self, summ, current_global_step):\n    if isinstance(summ, bytes):\n        summary_proto = summary_pb2.Summary()\n        summary_proto.ParseFromString(summ)\n        summ = summary_proto\n    if current_global_step in self._summaries:\n        step_summaries = self._summaries[current_global_step]\n    else:\n        step_summaries = []\n        self._summaries[current_global_step] = step_summaries\n    step_summaries.append(summ)",
    "docstring": "Add summary.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\fake_summary_writer.py",
    "ast_data": "FunctionDef name:add_summary arg:self arg:summ arg:current_global_step arguments arg arg arg If Call Assign Call Call Assign If Compare Assign Assign Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "gather",
    "source_code": "@dispatch.dispatch_for_types(array_ops.gather, StructuredTensor)\ndef gather(params, indices, validate_indices=None, name=None, axis=None, batch_dims=0):\n    if name is None:\n        name = 'gather'\n    with ops.name_scope(name):\n        if axis is None:\n            axis = batch_dims\n        axis = array_ops.get_positive_axis(axis, params.shape.rank, ndims_name='params.shape.rank')\n        indices = ragged_tensor.convert_to_tensor_or_ragged_tensor(indices, name='indices')\n\n        def leaf_op(p):\n            return array_ops.gather(p, indices, validate_indices=validate_indices, axis=axis, batch_dims=batch_dims, name=None)\n        return _extend_op_single(params, leaf_op)",
    "docstring": "tf.gather for structured tensors. Does not support (yet) checks on illegal axis values, et cetera. Indices must be a ragged or dense tensor. Args: params: a structured tensor to be gathered indices: a ragged tensor or tensor to gather by. validate_indices: whether to validate the indices name: the name of the op(s). axis: the axis in params to gather on. batch_dims: the number of batch dimensions. Returns: the params reorganized according to indices.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_array_ops.py",
    "ast_data": "FunctionDef name:gather arg:params arg:indices arg:validate_indices arg:name arg:axis arg:batch_dims arguments arg arg arg arg arg arg If Compare Assign With Call If Compare Assign Assign Call Assign Call FunctionDef name:leaf_op arg:p arguments arg Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_is_v2_column",
    "source_code": "@abc.abstractproperty\ndef _is_v2_column(self):\n    pass",
    "docstring": "Returns whether this FeatureColumn is fully conformant to the new API. This is needed for composition type cases where an EmbeddingColumn etc. might take in old categorical columns as input and then we want to use the old API.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2_types.py",
    "ast_data": "FunctionDef name:_is_v2_column arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, node_def, op, message, *args):\n    super(UnknownError, self).__init__(node_def, op, message, UNKNOWN, *args)",
    "docstring": "Creates an .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:node_def arg:op arg:message arguments arg arg arg arg arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_unique_name_wrt",
    "source_code": "def get_unique_name_wrt(prefix: str, *containers, requires_suffix=False) -> str:\n    if not requires_suffix and (not is_in(prefix, *containers)):\n        return prefix\n    for i in itertools.count():\n        candidate = f'{prefix}_{i}'\n        if not is_in(candidate, *containers):\n            return candidate\n    raise AssertionError('unreachable')",
    "docstring": "Return a name that starts with and is not in any of the (e.g., map, set).",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\utils.py",
    "ast_data": "FunctionDef name:get_unique_name_wrt arg:prefix arguments arg arg arg If BoolOp Call Return return:yes For Call Assign If Call Return return:yes Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_gather_saveables_for_checkpoint",
    "source_code": "def _gather_saveables_for_checkpoint(self) -> Dict[str, Callable[..., Any]]:\n\n    def _saveable_factory(name=self._common_name):\n        saveables = []\n        num_shards = len(self.values)\n        for shard_id in range(num_shards):\n            saveables.append(TPUEmbeddingShardedSaveable(self.values[shard_id], shard_id, num_shards, self.shard_dim, name))\n        return saveables\n    return {base.VARIABLE_VALUE_KEY: _saveable_factory}",
    "docstring": "Overrides Trackable method. Returns: A dictionary mapping attribute names to factories.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3.py",
    "ast_data": "FunctionDef name:_gather_saveables_for_checkpoint arg:self arguments arg FunctionDef name:_saveable_factory arg:name arguments arg Assign Assign Call For Call Call Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "codegen_reduction_indices",
    "source_code": "def codegen_reduction_indices(self, buffer: IndentedBuffer) -> None:\n    rn_offsets = self._get_reduction_symbols('offset', integer=True, nonnegative=True)\n    rn_inds = self._get_reduction_symbols('index', integer=True, nonnegative=True)\n    roffset = self._flatten_reduction_indices(rn_offsets)\n    buffer.splice(f'roffset = {self.index_to_str(roffset)}')\n    rindex = self._flatten_reduction_indices(rn_inds)\n    buffer.splice(f'rindex = {self.index_to_str(rindex)}')",
    "docstring": "Generates code that converts ND reduction indices into linear indices.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py",
    "ast_data": "FunctionDef name:codegen_reduction_indices arg:self arg:buffer arguments arg arg Assign Call Assign Call Assign Call Call Call Assign Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_dash_capstyle",
    "source_code": "def get_dash_capstyle(self):\n    return self._dashcapstyle.name",
    "docstring": "Return the for dashed lines. See also .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:get_dash_capstyle arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_StorageInfo",
    "source_code": "@dataclass\nclass _StorageInfo:\n    relative_path: str\n    offset: int\n    length: int\n    transform_descriptors: Optional[Sequence[str]] = None\n\n    def __getstate__(self):\n        return {k: v for k, v in self.__dict__.items() if v is not None}",
    "docstring": "This is the per entry storage info.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\filesystem.py",
    "ast_data": "ClassDef name:_StorageInfo FunctionDef name:__getstate__ arg:self arguments arg Return return:yes Call Compare"
  },
  {
    "library": "django",
    "name": "postgis_geos_version",
    "source_code": "def postgis_geos_version(self):\n    return self._get_postgis_func('postgis_geos_version')",
    "docstring": "Return the version of the GEOS library used with PostGIS.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\postgis\\operations.py",
    "ast_data": "FunctionDef name:postgis_geos_version arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "compute_alpha",
    "source_code": "def compute_alpha(n):\n    coeffs = mp.taylor(eta, 0, n - 1)\n    return lagrange_inversion(coeffs)",
    "docstring": "alpha_n from DLMF 8.12.13",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_precompute\\gammainc_asy.py",
    "ast_data": "FunctionDef name:compute_alpha arg:n arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_update_inverse_hessian",
    "source_code": "def _update_inverse_hessian(self, ys, Hy, yHy, s):\n    self.H = self._syr2(-1.0 / ys, s, Hy, a=self.H)\n    self.H = self._syr((ys + yHy) / ys ** 2, s, a=self.H)",
    "docstring": "Update the inverse Hessian matrix. BFGS update using the formula: ``. This formula is equivalent to (6.17) in [1]_ written in a more efficient way for implementation. References ---------- .. [1] Nocedal, Jorge, and Stephen J. Wright. \"Numerical optimization\" Second Edition (2006).",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_hessian_update_strategy.py",
    "ast_data": "FunctionDef name:_update_inverse_hessian arg:self arg:ys arg:Hy arg:yHy arg:s arguments arg arg arg arg arg Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "local_variables_initializer",
    "source_code": "@tf_export(v1=['initializers.local_variables', 'local_variables_initializer'])\ndef local_variables_initializer():\n    if context.executing_eagerly():\n        return control_flow_ops.no_op(name='local_variables_initializer')\n    return variables_initializer(local_variables())",
    "docstring": "Returns an Op that initializes all local variables. This is just a shortcut for @compatibility(TF2) In TF2, variables are initialized immediately when they are created. There is no longer a need to run variable initializers before using them. @end_compatibility Returns: An Op that initializes all local variables in the graph.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:local_variables_initializer arguments If Call Return return:yes Call Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "redrho",
    "source_code": "def redrho(rho_in, rhoend):\n    if DEBUGGING:\n        assert rho_in > rhoend > 0\n    rho_ratio = rho_in / rhoend\n    if rho_ratio > 250:\n        rho = 0.1 * rho_in\n    elif rho_ratio <= 16:\n        rho = rhoend\n    else:\n        rho = np.sqrt(rho_ratio) * rhoend\n    if DEBUGGING:\n        assert rho_in > rho >= rhoend\n    return rho",
    "docstring": "This function calculates RHO when it needs to be reduced. The scheme is shared by UOBYQA, NEWUOA, BOBYQA, LINCOA. For COBYLA, Powell's code reduces RHO by 'RHO *= 0.5; if RHO <= 1.5 * RHOEND: RHO = RHOEND' as specified in (11) of the COBYLA paper. However, this scheme seems to work better, especially after we introduce DELTA.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\pyprima\\pyprima\\src\\pyprima\\common\\redrho.py",
    "ast_data": "FunctionDef name:redrho arg:rho_in arg:rhoend arguments arg arg If Compare Assign If Compare Assign If Compare Assign Assign Call If Compare Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "transform_non_affine",
    "source_code": "def transform_non_affine(self, values):\n    return 1.0 / (1 + 10 ** (-values))",
    "docstring": "logistic transform (base 10)",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\scale.py",
    "ast_data": "FunctionDef name:transform_non_affine arg:self arg:values arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_NonEagerInputs",
    "source_code": "def _NonEagerInputs(op: ops.Operation, xs_set):\n    return [t for t in _Inputs(op, xs_set) if not isinstance(t, ops.EagerTensor)]",
    "docstring": "Returns the inputs of op, crossing closure boundaries where necessary. Does not return any captured EagerTensors, i.e., the number of tensors returned may be less than the actual number of inputs. Args: op: Operation xs_set: ObjectIdentitySet of Tensors we are differentiating w.r.t. Returns: A list of tensors. The tensors may be from multiple Graph/FuncGraphs if op is in a FuncGraph and has captured inputs.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\gradients_util.py",
    "ast_data": "FunctionDef name:_NonEagerInputs arg:op arg:xs_set arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_dtype_and_is_dynamic",
    "source_code": "def _get_dtype_and_is_dynamic(obs_or_fq: Optional[ObserverOrFakeQuantize]) -> tuple[Optional[torch.dtype], bool]:\n    if obs_or_fq is None:\n        return (None, False)\n    else:\n        return (obs_or_fq.dtype, getattr(obs_or_fq, 'is_dynamic', False))",
    "docstring": "Given a constructor for observer or fake quant module, returns a Tuple of dtype and is_dynamic",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\prepare.py",
    "ast_data": "FunctionDef name:_get_dtype_and_is_dynamic arg:obs_or_fq arguments arg If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, op_type, statistic_type) -> None:\n    if not isinstance(op_type, str):\n        raise TypeError('op_type must be a string.')\n    if ',' in op_type:\n        raise TypeError('op_type must not contain a comma.')\n    self._op_type = op_type\n    if not isinstance(statistic_type, str):\n        raise TypeError('statistic_type must be a string.')\n    if ',' in statistic_type:\n        raise TypeError('statistic_type must not contain a comma.')\n    self._statistic_type = statistic_type",
    "docstring": "Saves the as the type.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:op_type arg:statistic_type arguments arg arg arg If Call Raise Call If Compare Raise Call Assign If Call Raise Call If Compare Raise Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_build_element_shape",
    "source_code": "def _build_element_shape(shape):\n    if isinstance(shape, tensor_lib.Tensor):\n        return shape\n    if isinstance(shape, tensor_shape.TensorShape):\n        shape = shape.as_list() if shape else None\n    if shape is None:\n        return -1\n    if isinstance(shape, (np.ndarray, np.generic)) or not shape:\n        return ops.convert_to_tensor(shape, dtype=dtypes.int32)\n\n    def convert(val):\n        if val is None:\n            return -1\n        if isinstance(val, tensor_lib.Tensor):\n            return val\n        if isinstance(val, tensor_shape.Dimension):\n            return val.value if val.value is not None else -1\n        return val\n    return [convert(d) for d in shape]",
    "docstring": "Converts shape to a format understood by list_ops for element_shape. If is already a it is returned as-is. We do not perform a type check here. If shape is None or a TensorShape with unknown rank, -1 is returned. If shape is a scalar, an int32 tensor with empty list is returned. Note we do directly return an empty list since ops.convert_to_tensor would conver it to a float32 which is not a valid type for element_shape. If shape is a sequence of dims, None's in the list are replaced with -1. We do not check the dtype of the other dims. Args: shape: Could be None, Tensor, TensorShape or a list of dims (each dim could be a None, scalar or Tensor). Returns: A None-free shape that can be converted to a tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\list_ops.py",
    "ast_data": "FunctionDef name:_build_element_shape arg:shape arguments arg If Call Return return:yes If Call Assign Call If Compare Return return:yes If BoolOp Call Return return:yes Call FunctionDef name:convert arg:val arguments arg If Compare Return return:yes If Call Return return:yes If Call Return return:yes Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_prepare_skip_target_masks",
    "source_code": "def _prepare_skip_target_masks(self):\n    return [l is None for l in self.loss_functions]",
    "docstring": "Boolean mask for whether the target in the output list should be skipped. If the loss function corresponding to a model output is None, then this output will be skipped during total loss calculation and feed targets preparation. Returns: A boolean list for whether the corresponding target in the output list should be skipped during loss calculation.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py",
    "ast_data": "FunctionDef name:_prepare_skip_target_masks arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "pandas",
    "name": "_getitem_slice",
    "source_code": "def _getitem_slice(self: MultiIndex, slobj: slice) -> MultiIndex:\n    sortorder = None\n    if slobj.step is None or slobj.step > 0:\n        sortorder = self.sortorder\n    new_codes = [level_codes[slobj] for level_codes in self.codes]\n    return type(self)(levels=self.levels, codes=new_codes, names=self._names, sortorder=sortorder, verify_integrity=False)",
    "docstring": "Fastpath for __getitem__ when we know we have a slice.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "FunctionDef name:_getitem_slice arg:self arg:slobj arguments arg arg Assign If BoolOp Compare Compare Assign Assign Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_matvec",
    "source_code": "def _matvec(self, x):\n    x = x.reshape(self.shape[0], -1)\n    return self._diag()[:, np.newaxis] * x",
    "docstring": "Construct matrix-free callable banded-matrix-vector multiplication by the Mikota mass matrix without constructing or storing the matrix itself using the knowledge of its entries and the diagonal format.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_special_sparse_arrays.py",
    "ast_data": "FunctionDef name:_matvec arg:self arg:x arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "cryptography",
    "name": "parameter_numbers",
    "source_code": "@abc.abstractmethod\ndef parameter_numbers(self) -> DHParameterNumbers:\n    pass",
    "docstring": "Returns a DHParameterNumbers.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\dh.py",
    "ast_data": "FunctionDef name:parameter_numbers arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "_list_node_attributes",
    "source_code": "def _list_node_attributes(self, node_name):\n    lines = []\n    lines.append('')\n    lines.append('Node attributes:')\n    attrs = self._debug_dump.node_attributes(node_name)\n    for attr_key in attrs:\n        lines.append('  %s:' % attr_key)\n        attr_val_str = repr(attrs[attr_key]).strip().replace('\\n', ' ')\n        lines.append('    %s' % attr_val_str)\n        lines.append('')\n    return debugger_cli_common.RichTextLines(lines)",
    "docstring": "List neighbors (inputs or recipients) of a node. Args: node_name: Name of the node of which the attributes are to be listed. Returns: A RichTextLines object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\analyzer_cli.py",
    "ast_data": "FunctionDef name:_list_node_attributes arg:self arg:node_name arguments arg arg Assign Call Call Assign Call For Call Assign Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "process_doc",
    "source_code": "def process_doc(self, env: BuildEnvironment, docname: str, document: nodes.document) -> None:\n    pass",
    "docstring": "Process a document after it is read by the environment.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\domains\\__init__.py",
    "ast_data": "FunctionDef name:process_doc arg:self arg:env arg:docname arg:document arguments arg arg arg arg"
  },
  {
    "library": "tensorflow",
    "name": "_transform_feature",
    "source_code": "@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION)\ndef _transform_feature(self, inputs):\n    feature_tensors = []\n    for key in _collect_leaf_level_keys(self):\n        if isinstance(key, six.string_types):\n            feature_tensors.append(inputs.get(key))\n        elif isinstance(key, (CategoricalColumn, fc_old._CategoricalColumn)):\n            ids_and_weights = key._get_sparse_tensors(inputs)\n            if ids_and_weights.weight_tensor is not None:\n                raise ValueError('crossed_column does not support weight_tensor, but the given column populates weight_tensor. Given column: {}'.format(key.name))\n            feature_tensors.append(ids_and_weights.id_tensor)\n        else:\n            raise ValueError('Unsupported column type. Given: {}'.format(key))\n    return sparse_ops.sparse_cross_hashed(inputs=feature_tensors, num_buckets=self.hash_bucket_size, hash_key=self.hash_key)",
    "docstring": "Generates a hashed sparse cross from the input tensors.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:_transform_feature arg:self arg:inputs arguments arg arg Assign For Call If Call Call Call If Call Assign Call If Compare Raise Call Call Call Raise Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "quantize_dynamic_jit",
    "source_code": "def quantize_dynamic_jit(model, qconfig_dict, inplace=False, debug=False):\n    torch._C._log_api_usage_once('quantization_api.quantize_jit.quantize_dynamic_jit')\n    return _quantize_jit(model, qconfig_dict, inplace=inplace, debug=debug, quant_type=QuantType.DYNAMIC)",
    "docstring": "Quantize the input float TorchScript model with post training dynamic quantization. Currently only qint8 quantization of torch.nn.Linear is supported. Args: : input float TorchScript model : qconfig_dict is a dictionary with names of sub modules as key and qconfig for that module as value, please see detailed descriptions in :func: : carry out model transformations in-place, the original module is mutated : flag for producing a debug friendly model (preserve weight attribute) Return: Quantized TorchSciprt model. Example:",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantize_jit.py",
    "ast_data": "FunctionDef name:quantize_dynamic_jit arg:model arg:qconfig_dict arg:inplace arg:debug arguments arg arg arg arg Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "kwargs",
    "source_code": "@kwargs.setter\ndef kwargs(self, kwargs):\n    cherrypy.serving.request.kwargs = kwargs\n    self._kwargs = kwargs",
    "docstring": "Set the named request keyword arguments as :class:.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpdispatch.py",
    "ast_data": "FunctionDef name:kwargs arg:self arg:kwargs arguments arg arg Assign Assign"
  },
  {
    "library": "django",
    "name": "unapply_migration",
    "source_code": "def unapply_migration(self, state, migration, fake=False):\n    if self.progress_callback:\n        self.progress_callback('unapply_start', migration, fake)\n    if not fake:\n        with self.connection.schema_editor(atomic=migration.atomic) as schema_editor:\n            state = migration.unapply(state, schema_editor)\n    self.record_migration(migration.app_label, migration.name, forward=False)\n    if self.progress_callback:\n        self.progress_callback('unapply_success', migration, fake)\n    return state",
    "docstring": "Run a migration backwards.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\executor.py",
    "ast_data": "FunctionDef name:unapply_migration arg:self arg:state arg:migration arg:fake arguments arg arg arg arg If Call If With Call Assign Call Call If Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_version",
    "source_code": "def get_version(self):\n    return django.get_version()",
    "docstring": "Return the Django version, which should be correct for all built-in Django commands. User-supplied commands can override this method to return their own version.",
    "type": "method",
    "file_path": "django\\django\\core\\management\\base.py",
    "ast_data": "FunctionDef name:get_version arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, dataset=None, components=None, element_spec=None):\n    super(OwnedIterator, self).__init__()\n    if dataset is None:\n        if components is None or element_spec is None:\n            raise ValueError('When `dataset` is not provided, both `components` and `element_spec` must be specified.')\n        self._element_spec = element_spec\n        self._flat_output_types = structure.get_flat_tensor_types(self._element_spec)\n        self._flat_output_shapes = structure.get_flat_tensor_shapes(self._element_spec)\n        self._components = components\n        self._iterator_resource, = components\n    else:\n        if components is not None or element_spec is not None:\n            raise ValueError('When `dataset` is provided, `element_spec` and `components` must not be specified.')\n        self._create_iterator(dataset)\n    self._get_next_call_count = 0",
    "docstring": "Creates a new iterator from the given dataset. If is not specified, the iterator will be created from the given tensor components and element structure. In particular, the alternative for constructing the iterator is used when the iterator is reconstructed from it representation. Args: dataset: A object. components: Tensor components to construct the iterator from. element_spec: A (nested) structure of objects that represents the type specification of elements of the iterator. Raises: ValueError: If is not provided and either or is not provided. Or is provided and either and is provided.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\iterator_ops.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:dataset arg:components arg:element_spec arguments arg arg arg arg Call Call If Compare If BoolOp Compare Compare Raise Call Assign Assign Call Assign Call Assign Assign If BoolOp Compare Compare Raise Call Call Assign"
  },
  {
    "library": "pandas",
    "name": "dtypes",
    "source_code": "@property\ndef dtypes(self) -> DtypeObj:\n    return self.dtype",
    "docstring": "Return the dtype object of the underlying data. See Also -------- DataFrame.dtypes : Return the dtypes in the DataFrame. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.dtypes dtype('int64')",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:dtypes arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "__setstate__",
    "source_code": "def __setstate__(self, state) -> None:\n    if not isinstance(state, dict):\n        return super().__setstate__(state)\n    if '_dtype' not in state:\n        state['_dtype'] = CategoricalDtype(state['_categories'], state['_ordered'])\n    if '_codes' in state and '_ndarray' not in state:\n        state['_ndarray'] = state.pop('_codes')\n    super().__setstate__(state)",
    "docstring": "Necessary for making this object picklable",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\categorical.py",
    "ast_data": "FunctionDef name:__setstate__ arg:self arg:state arguments arg arg If Call Return return:yes Call Call If Compare Assign Call If BoolOp Compare Compare Assign Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_order_complex_poles",
    "source_code": "def _order_complex_poles(poles):\n    ordered_poles = np.sort(poles[np.isreal(poles)])\n    im_poles = []\n    for p in np.sort(poles[np.imag(poles) < 0]):\n        if np.conj(p) in poles:\n            im_poles.extend((p, np.conj(p)))\n    ordered_poles = np.hstack((ordered_poles, im_poles))\n    if poles.shape[0] != len(ordered_poles):\n        raise ValueError('Complex poles must come with their conjugates')\n    return ordered_poles",
    "docstring": "Check we have complex conjugates pairs and reorder P according to YT, ie real_poles, complex_i, conjugate complex_i, .... The lexicographic sort on the complex poles is added to help the user to compare sets of poles.",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:_order_complex_poles arg:poles arguments arg Assign Call Call Assign For Call Compare Call If Compare Call Call Call Assign Call If Compare Call Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_safe_scalar_div",
    "source_code": "def _safe_scalar_div(numerator, denominator, name):\n    numerator.get_shape().with_rank_at_most(1)\n    denominator.get_shape().with_rank_at_most(1)\n    return math_ops.div_no_nan(numerator, denominator, name=name)",
    "docstring": "Divides two values, returning 0 if the denominator is 0. Args: numerator: A scalar . denominator: A scalar . name: Name for the returned op. Returns: 0 if == 0, else /",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\metrics_impl.py",
    "ast_data": "FunctionDef name:_safe_scalar_div arg:numerator arg:denominator arg:name arguments arg arg arg Call Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_if_ge_zero",
    "source_code": "def _if_ge_zero(value, true_fn, false_fn):\n    if isinstance(value, tensor_lib.Tensor):\n        const_value = tensor_util.constant_value(value)\n        if const_value is None:\n            return cond.cond(value >= 0, true_fn, false_fn)\n        else:\n            value = const_value\n    if value >= 0:\n        return true_fn()\n    else:\n        return false_fn()",
    "docstring": "Returns .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_getitem.py",
    "ast_data": "FunctionDef name:_if_ge_zero arg:value arg:true_fn arg:false_fn arguments arg arg arg If Call Assign Call If Compare Return return:yes Call Compare Assign If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "TensorBoardDebugHook",
    "source_code": "class TensorBoardDebugHook(GrpcDebugHook):\n\n    def __init__(self, grpc_debug_server_addresses, thread_name_filter=None, send_traceback_and_source_code=True):\n\n        def _gated_grpc_watch_fn(fetches, feeds):\n            del fetches, feeds\n            return framework.WatchOptions(debug_ops=['DebugIdentity(gated_grpc=true)'])\n        super(TensorBoardDebugHook, self).__init__(grpc_debug_server_addresses, watch_fn=_gated_grpc_watch_fn, thread_name_filter=thread_name_filter)\n        self._grpc_debug_server_addresses = grpc_debug_server_addresses\n        self._send_traceback_and_source_code = send_traceback_and_source_code\n        self._sent_graph_version = -1\n        grpc_wrapper.register_signal_handler()\n\n    def before_run(self, run_context):\n        if self._send_traceback_and_source_code:\n            self._sent_graph_version = grpc_wrapper.publish_traceback(self._grpc_debug_server_addresses, run_context.session.graph, run_context.original_args.feed_dict, run_context.original_args.fetches, self._sent_graph_version)\n        return super(TensorBoardDebugHook, self).before_run(run_context)",
    "docstring": "A tfdbg hook that can be used with TensorBoard Debugger Plugin. This hook is the same as , except that it uses a predefined that 1) uses debug ops with the attribute set to , to allow the interactive enabling and disabling of tensor breakpoints. 2) watches all tensors in the graph. This saves the need for the user to define a .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\hooks.py",
    "ast_data": "ClassDef name:TensorBoardDebugHook FunctionDef name:__init__ arg:self arg:grpc_debug_server_addresses arg:thread_name_filter arg:send_traceback_and_source_code arguments arg arg arg arg FunctionDef name:_gated_grpc_watch_fn arg:fetches arg:feeds arguments arg arg Return return:yes Call Call Call Assign Assign Assign Call FunctionDef name:before_run arg:self arg:run_context arguments arg arg If Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_assign_bucket_subset_to_rank",
    "source_code": "def _assign_bucket_subset_to_rank(self, bucket_index: int, bucket_params: list[torch.Tensor], bucket_offset: int, assigned_rank: int, assigned_ranks_per_bucket: list[set[int]]) -> None:\n    overlap_info = self._overlap_info\n    if len(bucket_params) == 0:\n        raise ValueError('Empty bucket assignment')\n    params_per_rank = overlap_info.params_per_rank\n    offsets = overlap_info.offsets\n    self._bucket_assignments_per_rank_cache[assigned_rank][bucket_index] = _DDPBucketAssignment(bucket_index, bucket_params, bucket_offset)\n    if self.global_rank == assigned_rank:\n        offsets[bucket_index] = len(params_per_rank[assigned_rank])\n    params_per_rank[assigned_rank].extend(bucket_params)\n    assigned_ranks_per_bucket[bucket_index].add(assigned_rank)\n    self._overlap_info.num_bucket_assignments += 1",
    "docstring": "Assign `DistributedDataParallelDistributedDataParallelset` of group ranks assigned to each bucket.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\optim\\zero_redundancy_optimizer.py",
    "ast_data": "FunctionDef name:_assign_bucket_subset_to_rank arg:self arg:bucket_index arg:bucket_params arg:bucket_offset arg:assigned_rank arg:assigned_ranks_per_bucket arguments arg arg arg arg arg arg Assign If Compare Call Raise Call Assign Assign Assign Call If Compare Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "TensorRepr",
    "source_code": "class TensorRepr(gdb.Command):\n    __doc__ = textwrap.dedent(__doc__).strip()\n\n    def __init__(self) -> None:\n        gdb.Command.__init__(self, 'torch-tensor-repr', gdb.COMMAND_USER, gdb.COMPLETE_EXPRESSION)\n\n    def invoke(self, args: str, from_tty: bool) -> None:\n        args = gdb.string_to_argv(args)\n        if len(args) != 1:\n            print('Usage: torch-tensor-repr EXP')\n            return\n        name = args[0]\n        with DisableBreakpoints():\n            res = gdb.parse_and_eval(f'torch::gdb::tensor_repr({name})')\n            print(f'Python-level repr of {name}:')\n            print(res.string())\n            gdb.parse_and_eval(f'(void)free({int(res)})')",
    "docstring": "Print a human readable representation of the given at::Tensor. Usage: torch-tensor-repr EXP at::Tensor instances do not have a C++ implementation of a repr method: in pytorch, this is done by pure-Python code. As such, torch-tensor-repr internally creates a Python wrapper for the given tensor and call repr() on it.",
    "type": "class",
    "file_path": "pytorch\\tools\\gdb\\pytorch-gdb.py",
    "ast_data": "ClassDef name:TensorRepr Assign Call Call FunctionDef name:__init__ arg:self arguments arg Call FunctionDef name:invoke arg:self arg:args arg:from_tty arguments arg arg arg Assign Call If Compare Call Call Return return:no Assign With Call Assign Call Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "print_comparisons_n_shadows_model",
    "source_code": "def print_comparisons_n_shadows_model(results: NSResultsType) -> None:\n    results_grouped = group_results_by_subgraph(results)\n    results_comparison = create_results_comparison(results_grouped)\n    print_n_shadows_summary(results_comparison)",
    "docstring": "Prints a summary of extracted .",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\ns\\_numeric_suite_fx.py",
    "ast_data": "FunctionDef name:print_comparisons_n_shadows_model arg:results arguments arg Assign Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "_pid_namespace",
    "source_code": "def _pid_namespace(pid: Optional[int]=None) -> int:\n    pid = pid or os.getpid()\n    link = _pid_namespace_link(pid)\n    return int(link[link.find('[') + 1:-1])",
    "docstring": "Returns the process's namespace id",
    "type": "function",
    "file_path": "pytorch\\torch\\_strobelight\\cli_function_profiler.py",
    "ast_data": "FunctionDef name:_pid_namespace arg:pid arguments arg Assign BoolOp Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_parse_args",
    "source_code": "def _parse_args(self, *args, **kwargs):\n    if len(args) == 1:\n        self._start = self._build_tensor(0, 'start')\n        self._stop = self._build_tensor(args[0], 'stop')\n        self._step = self._build_tensor(1, 'step')\n    elif len(args) == 2:\n        self._start = self._build_tensor(args[0], 'start')\n        self._stop = self._build_tensor(args[1], 'stop')\n        self._step = self._build_tensor(1, 'step')\n    elif len(args) == 3:\n        self._start = self._build_tensor(args[0], 'start')\n        self._stop = self._build_tensor(args[1], 'stop')\n        self._step = self._build_tensor(args[2], 'step')\n    else:\n        raise ValueError(f'Invalid `args`. The length of `args` should be between 1 and 3 but was {len(args)}.')\n    if 'output_type' in kwargs:\n        self._output_type = kwargs['output_type']\n    else:\n        self._output_type = dtypes.int64\n    self._name = kwargs['name'] if 'name' in kwargs else None",
    "docstring": "Parses arguments according to the same rules as the builtin.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\range_op.py",
    "ast_data": "FunctionDef name:_parse_args arg:self arguments arg arg arg If Compare Call Assign Call Assign Call Assign Call If Compare Call Assign Call Assign Call Assign Call If Compare Call Assign Call Assign Call Assign Call Raise Call Call If Compare Assign Assign Assign Compare"
  },
  {
    "library": "pytorch",
    "name": "named_parameters",
    "source_code": "def named_parameters(self, *args, **kwargs) -> Iterator[tuple[str, torch.nn.Parameter]]:\n    for param_name, param in super().named_parameters(*args, **kwargs):\n        yield (param_name.replace(_CHECKPOINT_PREFIX, ''), param)",
    "docstring": "Override :meth: to intercept parameter names. remove all occurrences of ``.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\_checkpoint\\checkpoint_wrapper.py",
    "ast_data": "FunctionDef name:named_parameters arg:self arguments arg arg arg For Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "eager_safe_variable_handle",
    "source_code": "def eager_safe_variable_handle(initial_value, shape, shared_name, name, graph_mode):\n    dtype = initial_value.dtype.base_dtype\n    return _variable_handle_from_shape_and_dtype(shape, dtype, shared_name, name, graph_mode, initial_value)",
    "docstring": "Creates a variable handle with information to do shape inference. The dtype is read from and stored in the returned resource tensor's handle data. If , we additionally extract the handle data (if any) from and append it to the . In this case, the returned tensor's handle data is in the form Ops that read from this tensor, such as and , know that correspond to the handle data of the variant(s) stored in the Variable. Args: initial_value: A . shape: The shape of the handle data. Can be (i.e. unknown shape). shared_name: A string. name: A string. graph_mode: A python bool. Returns: The handle, a of type .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:eager_safe_variable_handle arg:initial_value arg:shape arg:shared_name arg:name arg:graph_mode arguments arg arg arg arg arg Assign Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "cov",
    "source_code": "def cov(self, alpha):\n    alpha = _dirichlet_check_parameters(alpha)\n    alpha0 = np.sum(alpha)\n    a = alpha / alpha0\n    cov = (np.diag(a) - np.outer(a, a)) / (alpha0 + 1)\n    return _squeeze_output(cov)",
    "docstring": "Covariance matrix of the Dirichlet distribution. Parameters ---------- %(_dirichlet_doc_default_callparams)s Returns ------- cov : ndarray The covariance matrix of the distribution.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:cov arg:self arg:alpha arguments arg arg Assign Call Assign Call Assign Assign Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "theta",
    "source_code": "@theta.setter\ndef theta(self, theta):\n    k_dims = self.k1.n_dims\n    for i, kernel in enumerate(self.kernels):\n        kernel.theta = theta[i * k_dims:(i + 1) * k_dims]",
    "docstring": "Sets the (flattened, log-transformed) non-fixed hyperparameters. Parameters ---------- theta : array of shape (n_dims,) The non-fixed, log-transformed hyperparameters of the kernel",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:theta arg:self arg:theta arguments arg arg Assign For Call Assign"
  },
  {
    "library": "pytorch",
    "name": "clock_rate",
    "source_code": "def clock_rate(device: Optional[Union[Device, int]]=None) -> int:\n    if not torch.version.hip:\n        handle = _get_pynvml_handler(device)\n        return pynvml.nvmlDeviceGetClockInfo(handle, 1)\n    else:\n        return _get_amdsmi_clock_rate(device)",
    "docstring": "Return the clock speed of the GPU SM in MHz (megahertz) over the past sample period as given by . Args: device (torch.device or int, optional): selected device. Returns statistic for the current device, given by :func:, if :attr: is `` (default). Warning: Each sample period may be between 1 second and 1/6 second, depending on the product being queried.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:clock_rate arg:device arguments arg If Assign Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "staticfile",
    "source_code": "def staticfile(filename, root=None, match='', content_types=None, debug=False):\n    request = cherrypy.serving.request\n    if request.method not in ('GET', 'HEAD'):\n        if debug:\n            cherrypy.log('request.method not GET or HEAD', 'TOOLS.STATICFILE')\n        return False\n    if match and (not re.search(match, request.path_info)):\n        if debug:\n            cherrypy.log('request.path_info %r does not match pattern %r' % (request.path_info, match), 'TOOLS.STATICFILE')\n        return False\n    if not os.path.isabs(filename):\n        if not root:\n            msg = \"Static tool requires an absolute filename (got '%s').\" % (filename,)\n            if debug:\n                cherrypy.log(msg, 'TOOLS.STATICFILE')\n            raise ValueError(msg)\n        filename = os.path.join(root, filename)\n    return _attempt(filename, content_types, debug=debug)",
    "docstring": "Serve a static resource from the given (root +) filename. match If given, request.path_info will be searched for the given regular expression before attempting to serve static content. content_types If given, it should be a Python dictionary of {file-extension: content-type} pairs, where 'file-extension' is a string (e.g. \"gif\") and 'content-type' is the value to write out in the Content-Type response header (e.g. \"image/gif\").",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\static.py",
    "ast_data": "FunctionDef name:staticfile arg:filename arg:root arg:match arg:content_types arg:debug arguments arg arg arg arg arg Assign If Compare If Call Return return:yes If BoolOp Call If Call Return return:yes If Call If Assign If Call Raise Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_construct_function_from_graph_def",
    "source_code": "def _construct_function_from_graph_def(func, graph_def, frozen_func=None):\n    if frozen_func is None:\n        frozen_func = func\n    for f in graph_def.library.function:\n        while context.context().has_function(f.signature.name):\n            context.context().remove_function(f.signature.name)\n    captures = {c[1].name.split(':')[0]: c[0] for c in frozen_func.graph.captures}\n    new_func = wrap_function.function_from_graph_def(graph_def, [tensor.name for tensor in frozen_func.inputs], [tensor.name for tensor in frozen_func.outputs], captures)\n    new_func.graph.structured_outputs = nest.pack_sequence_as(func.graph.structured_outputs, new_func.graph.structured_outputs)\n    new_func._function_type = func.function_type\n    new_func.graph.structured_input_signature = func.structured_input_signature\n    return new_func",
    "docstring": "Rebuild function from graph_def.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\tensorrt\\trt_convert.py",
    "ast_data": "FunctionDef name:_construct_function_from_graph_def arg:func arg:graph_def arg:frozen_func arguments arg arg arg If Compare Assign For While Call Call Call Call Assign Call Assign Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "ItemMeta",
    "source_code": "class ItemMeta(ABCMeta):\n\n    def __new__(mcs, class_name: str, bases: tuple[type, ...], attrs: dict[str, Any]) -> ItemMeta:\n        classcell = attrs.pop('__classcell__', None)\n        new_bases = tuple((base._class for base in bases if hasattr(base, '_class')))\n        _class = super().__new__(mcs, 'x_' + class_name, new_bases, attrs)\n        fields = getattr(_class, 'fields', {})\n        new_attrs = {}\n        for n in dir(_class):\n            v = getattr(_class, n)\n            if isinstance(v, Field):\n                fields[n] = v\n            elif n in attrs:\n                new_attrs[n] = attrs[n]\n        new_attrs['fields'] = fields\n        new_attrs['_class'] = _class\n        if classcell is not None:\n            new_attrs['__classcell__'] = classcell\n        return super().__new__(mcs, class_name, bases, new_attrs)",
    "docstring": "Metaclass_ of :class: that handles field definitions. .. _metaclass:",
    "type": "class",
    "file_path": "scrapy\\scrapy\\item.py",
    "ast_data": "ClassDef name:ItemMeta FunctionDef name:__new__ arg:mcs arg:class_name arg:bases arg:attrs arguments arg arg arg arg Assign Call Assign Call Call Assign Call Call Assign Call Assign For Call Assign Call If Call Assign If Compare Assign Assign Assign If Compare Assign Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "Collection3D",
    "source_code": "class Collection3D(Collection):\n\n    def do_3d_projection(self):\n        vs_list = [vs for vs, _ in self._3dverts_codes]\n        if self._axlim_clip:\n            vs_list = [np.ma.array(vs, mask=np.broadcast_to(_viewlim_mask(*vs.T, self.axes), vs.shape)) for vs in vs_list]\n        xyzs_list = [proj3d.proj_transform(*vs.T, self.axes.M) for vs in vs_list]\n        self._paths = [mpath.Path(np.ma.column_stack([xs, ys]), cs) for (xs, ys, _), (_, cs) in zip(xyzs_list, self._3dverts_codes)]\n        zs = np.concatenate([zs for _, _, zs in xyzs_list])\n        return zs.min() if len(zs) else 1000000000.0",
    "docstring": "A collection of 3D paths.",
    "type": "class",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py",
    "ast_data": "ClassDef name:Collection3D FunctionDef name:do_3d_projection arg:self arguments arg Assign If Assign Call Call Call Assign Call Assign Call Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_TakeWhileDataset",
    "source_code": "class _TakeWhileDataset(dataset_ops.UnaryUnchangedStructureDataset):\n\n    def __init__(self, input_dataset, predicate, name=None):\n        self._input_dataset = input_dataset\n        wrapped_func = structured_function.StructuredFunctionWrapper(predicate, self._transformation_name(), dataset=self._input_dataset)\n        if not wrapped_func.output_structure.is_compatible_with(tensor_spec.TensorSpec([], dtypes.bool)):\n            raise ValueError(f'Invalid `predicate`. `predicate` must return a `tf.bool` scalar tensor but its return type is{wrapped_func.output_structure}.')\n        self._predicate = wrapped_func\n        self._name = name\n        variant_tensor = ged_ops.take_while_dataset(self._input_dataset._variant_tensor, other_arguments=self._predicate.function.captured_inputs, predicate=self._predicate.function, **self._common_args)\n        super().__init__(input_dataset, variant_tensor)\n\n    def _functions(self):\n        return [self._predicate]\n\n    def _transformation_name(self):\n        return 'Dataset.take_while()'",
    "docstring": "A dataset that stops iteration when returns false.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\take_while_op.py",
    "ast_data": "ClassDef name:_TakeWhileDataset FunctionDef name:__init__ arg:self arg:input_dataset arg:predicate arg:name arguments arg arg arg arg Assign Assign Call Call If Call Call Raise Call Assign Assign Assign Call Call Call FunctionDef name:_functions arg:self arguments arg Return return:yes FunctionDef name:_transformation_name arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_linewidth",
    "source_code": "def get_linewidth(self):\n    return self._linewidth",
    "docstring": "Return the line width in points.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_linewidth arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_get_backend_mod",
    "source_code": "def _get_backend_mod() -> type[matplotlib.backend_bases._Backend]:\n    if _backend_mod is None:\n        switch_backend(rcParams._get('backend'))\n    return cast(type[matplotlib.backend_bases._Backend], _backend_mod)",
    "docstring": "Ensure that a backend is selected and return it. This is currently private, but may be made public in the future.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:_get_backend_mod arguments If Compare Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "merge",
    "source_code": "@abc.abstractmethod\ndef merge(self, accumulators):\n    pass",
    "docstring": "Merge several accumulators to a single accumulator. This method takes the partial values in several accumulators and combines them into a single accumulator. This computation must not be order-specific (that is, merge([a, b]) must return the same result as merge([b, a]). Args: accumulators: the accumulators to merge, as a list. Returns: A merged accumulator.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_preprocessing_layer.py",
    "ast_data": "FunctionDef name:merge arg:self arg:accumulators arguments arg arg"
  },
  {
    "library": "pytorch",
    "name": "check_csv",
    "source_code": "def check_csv(filename):\n    df = pd.read_csv(filename)\n    failed = []\n    for _, row in df.iterrows():\n        model_name = row['name']\n        status = row['accuracy']\n        if 'pass' not in status:\n            failed.append(model_name)\n        print(f'{model_name:34} {status}')\n    if failed:\n        print(textwrap.dedent(f'\\n                Error {len(failed)} models failed\\n                    {' '.join(failed)}\\n                '))\n        sys.exit(1)",
    "docstring": "Basic accuracy checking.",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\dynamo\\check_csv.py",
    "ast_data": "FunctionDef name:check_csv arg:filename arguments arg Assign Call Assign For Call Assign Assign If Compare Call Call If Call Call Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_initialize_parameters",
    "source_code": "def _initialize_parameters(self, X, random_state):\n    n_samples, _ = X.shape\n    if self.init_params == 'kmeans':\n        resp = np.zeros((n_samples, self.n_components), dtype=X.dtype)\n        label = cluster.KMeans(n_clusters=self.n_components, n_init=1, random_state=random_state).fit(X).labels_\n        resp[np.arange(n_samples), label] = 1\n    elif self.init_params == 'random':\n        resp = np.asarray(random_state.uniform(size=(n_samples, self.n_components)), dtype=X.dtype)\n        resp /= resp.sum(axis=1)[:, np.newaxis]\n    elif self.init_params == 'random_from_data':\n        resp = np.zeros((n_samples, self.n_components), dtype=X.dtype)\n        indices = random_state.choice(n_samples, size=self.n_components, replace=False)\n        resp[indices, np.arange(self.n_components)] = 1\n    elif self.init_params == 'k-means++':\n        resp = np.zeros((n_samples, self.n_components), dtype=X.dtype)\n        _, indices = kmeans_plusplus(X, self.n_components, random_state=random_state)\n        resp[indices, np.arange(self.n_components)] = 1\n    self._initialize(X, resp)",
    "docstring": "Initialize the model parameters. Parameters ---------- X : array-like of shape (n_samples, n_features) random_state : RandomState A random number generator instance that controls the random seed used for the method chosen to initialize the parameters.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\mixture\\_base.py",
    "ast_data": "FunctionDef name:_initialize_parameters arg:self arg:X arg:random_state arguments arg arg arg Assign If Compare Assign Call Assign Call Call Assign Call If Compare Assign Call Call Call If Compare Assign Call Assign Call Assign Call If Compare Assign Call Assign Call Assign Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_transform",
    "source_code": "def _transform(self, X, inverse=False):\n    if sparse.issparse(X):\n        for feature_idx in range(X.shape[1]):\n            column_slice = slice(X.indptr[feature_idx], X.indptr[feature_idx + 1])\n            X.data[column_slice] = self._transform_col(X.data[column_slice], self.quantiles_[:, feature_idx], inverse)\n    else:\n        for feature_idx in range(X.shape[1]):\n            X[:, feature_idx] = self._transform_col(X[:, feature_idx], self.quantiles_[:, feature_idx], inverse)\n    return X",
    "docstring": "Forward and inverse transform. Parameters ---------- X : ndarray of shape (n_samples, n_features) The data used to scale along the features axis. inverse : bool, default=False If False, apply forward transform. If True, apply inverse transform. Returns ------- X : ndarray of shape (n_samples, n_features) Projected data.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py",
    "ast_data": "FunctionDef name:_transform arg:self arg:X arg:inverse arguments arg arg arg If Call For Call Assign Call Assign Call For Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "PList",
    "source_code": "class PList(StructurePattern):\n\n    def __init__(self, *components):\n        self.components = list(components)\n\n    def __eq__(self, other):\n        return isinstance(other, PList) and self.components == other.components",
    "docstring": "Represents a list of StructurePatterns.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "ClassDef name:PList FunctionDef name:__init__ arg:self arguments arg arg Assign Call FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes BoolOp Call Compare"
  },
  {
    "library": "numpy",
    "name": "concatenate",
    "source_code": "def concatenate(arrays, axis=0):\n    d = np.concatenate([getdata(a) for a in arrays], axis)\n    rcls = get_masked_subclass(*arrays)\n    data = d.view(rcls)\n    for x in arrays:\n        if getmask(x) is not nomask:\n            break\n    else:\n        return data\n    dm = np.concatenate([getmaskarray(a) for a in arrays], axis)\n    dm = dm.reshape(d.shape)\n    data._mask = _shrink_mask(dm)\n    return data",
    "docstring": "Concatenate a sequence of arrays along the given axis. Parameters ---------- arrays : sequence of array_like The arrays must have the same shape, except in the dimension corresponding to (the first, by default). axis : int, optional The axis along which the arrays will be joined. Default is 0. Returns ------- result : MaskedArray The concatenated array with any masked entries preserved. See Also -------- numpy.concatenate : Equivalent function in the top-level NumPy module. Examples -------- >>> import numpy as np >>> import numpy.ma as ma >>> a = ma.arange(3) >>> a[1] = ma.masked >>> b = ma.arange(2, 5) >>> a masked_array(data=[0, --, 2], mask=[False, True, False], fill_value=999999) >>> b masked_array(data=[2, 3, 4], mask=False, fill_value=999999) >>> ma.concatenate([a, b]) masked_array(data=[0, --, 2, 2, 3, 4], mask=[False, True, False, False, False, False], fill_value=999999)",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:concatenate arg:arrays arg:axis arguments arg arg Assign Call Call Assign Call Assign Call For If Compare Call Return return:yes Assign Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "_compute_tensor_center3d",
    "source_code": "def _compute_tensor_center3d(tensor: Tensor) -> Tensor:\n    if not 3 <= len(tensor.shape) <= 5:\n        raise AssertionError(f'Must be a 3D tensor as DHW, CDHW and BCDHW. Got {tensor.shape}.')\n    depth, height, width = tensor.shape[-3:]\n    center_x: float = float(width - 1) / 2\n    center_y: float = float(height - 1) / 2\n    center_z: float = float(depth - 1) / 2\n    center: Tensor = torch.tensor([center_x, center_y, center_z], device=tensor.device, dtype=tensor.dtype)\n    return center",
    "docstring": "Compute the center of tensor plane for (D, H, W), (C, D, H, W) and (B, C, D, H, W).",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\transform\\affwarp.py",
    "ast_data": "FunctionDef name:_compute_tensor_center3d arg:tensor arguments arg If Compare Call Raise Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "isdtype",
    "source_code": "def isdtype(dtype: DType, kind: DType | str | tuple[DType | str, ...], xp: Namespace, *, _tuple: bool=True) -> bool:\n    if isinstance(kind, tuple) and _tuple:\n        return any((isdtype(dtype, k, xp, _tuple=False) for k in cast('tuple[DType | str, ...]', kind)))\n    elif isinstance(kind, str):\n        if kind == 'bool':\n            return dtype == xp.bool_\n        elif kind == 'signed integer':\n            return xp.issubdtype(dtype, xp.signedinteger)\n        elif kind == 'unsigned integer':\n            return xp.issubdtype(dtype, xp.unsignedinteger)\n        elif kind == 'integral':\n            return xp.issubdtype(dtype, xp.integer)\n        elif kind == 'real floating':\n            return xp.issubdtype(dtype, xp.floating)\n        elif kind == 'complex floating':\n            return xp.issubdtype(dtype, xp.complexfloating)\n        elif kind == 'numeric':\n            return xp.issubdtype(dtype, xp.number)\n        else:\n            raise ValueError(f'Unrecognized data type kind: {kind!r}')\n    else:\n        return dtype == kind",
    "docstring": "Returns a boolean indicating whether a provided dtype is of a specified data type ``. Note that outside of this function, this compat library does not yet fully support complex numbers. See for more details",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\common\\_aliases.py",
    "ast_data": "FunctionDef name:isdtype arg:dtype arg:kind arg:xp arguments arg arg arg arg If BoolOp Call Return return:yes Call Call Call If Call If Compare Return return:yes Compare If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call Raise Call Return return:yes Compare"
  },
  {
    "library": "pytorch",
    "name": "_cast_grad_to_param_dtype",
    "source_code": "@no_type_check\ndef _cast_grad_to_param_dtype(state: _FSDPState, sharded_grad: torch.Tensor, param: FlatParameter):\n    _assert_in_training_states(state, [TrainingState.FORWARD_BACKWARD])\n    if not _low_precision_hook_enabled(state) and sharded_grad.dtype != param.dtype:\n        low_prec_grad_data = sharded_grad.data\n        sharded_grad.data = sharded_grad.data.to(dtype=param.dtype)\n        _no_dispatch_record_stream(low_prec_grad_data, state._device_handle.current_stream())",
    "docstring": "Casts `` back to the full parameter dtype so that the optimizer step runs with that dtype. This performs an actual cast if 1. parameters were in reduced precision during the forward since then gradients would be in that reduced precision, or 2. parameters were not in reduced precision but gradients were in reduced precision for communication. However, if a low precision communication hook is registered, then this dtype cast happens in the hook instead.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_runtime_utils.py",
    "ast_data": "FunctionDef name:_cast_grad_to_param_dtype arg:state arg:sharded_grad arg:param arguments arg arg arg Call If BoolOp Call Compare Assign Assign Call Call Call"
  },
  {
    "library": "django",
    "name": "_start_transaction_under_autocommit",
    "source_code": "def _start_transaction_under_autocommit(self):\n    if self.transaction_mode is None:\n        self.cursor().execute('BEGIN')\n    else:\n        self.cursor().execute(f'BEGIN {self.transaction_mode}')",
    "docstring": "Start a transaction explicitly in autocommit mode. Staying in autocommit mode works around a bug of sqlite3 that breaks savepoints when autocommit is disabled.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\sqlite3\\base.py",
    "ast_data": "FunctionDef name:_start_transaction_under_autocommit arg:self arguments arg If Compare Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "query",
    "source_code": "def query(self) -> bool:\n    return torch._C._mps_queryEvent(self.__eventId)",
    "docstring": "Returns True if all work currently captured by event has completed.",
    "type": "method",
    "file_path": "pytorch\\torch\\mps\\event.py",
    "ast_data": "FunctionDef name:query arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "KeyValueTensorInitializer",
    "source_code": "@tf_export('lookup.KeyValueTensorInitializer')\nclass KeyValueTensorInitializer(TableInitializerBase):\n\n    def __init__(self, keys, values, key_dtype=None, value_dtype=None, name=None):\n        if not context.executing_eagerly() and ops.get_default_graph()._get_control_flow_context() is not None:\n            with ops.init_scope():\n                self._keys = ops.convert_to_tensor(keys, dtype=key_dtype, name='keys')\n                self._values = ops.convert_to_tensor(values, dtype=value_dtype, name='values')\n        else:\n            self._keys = ops.convert_to_tensor(keys, dtype=key_dtype, name='keys')\n            self._values = ops.convert_to_tensor(values, dtype=value_dtype, name='values')\n        self._name = name if name is not None else 'key_value_init'\n        if context.executing_eagerly():\n            self._name += str(ops.uid())\n        super(KeyValueTensorInitializer, self).__init__(self._keys.dtype, self._values.dtype)\n\n    def initialize(self, table):\n        check_table_dtypes(table, self._keys.dtype, self._values.dtype)\n        with ops.name_scope(self._name, values=(table.resource_handle, self._keys, self._values)):\n            init_op = gen_lookup_ops.lookup_table_import_v2(table.resource_handle, self._keys, self._values)\n        ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op)\n        return init_op",
    "docstring": "Table initializers given and tensors. >>> keys_tensor = tf.constant(['a', 'b', 'c']) >>> vals_tensor = tf.constant([7, 8, 9]) >>> input_tensor = tf.constant(['a', 'f']) >>> init = tf.lookup.KeyValueTensorInitializer(keys_tensor, vals_tensor) >>> table = tf.lookup.StaticHashTable( ... init, ... default_value=-1) >>> table.lookup(input_tensor).numpy() array([ 7, -1], dtype=int32)",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "ClassDef name:KeyValueTensorInitializer FunctionDef name:__init__ arg:self arg:keys arg:values arg:key_dtype arg:value_dtype arg:name arguments arg arg arg arg arg arg If BoolOp Call Compare Call Call With Call Assign Call Assign Call Assign Call Assign Call Assign Compare If Call Call Call Call Call FunctionDef name:initialize arg:self arg:table arguments arg arg Call With Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_fit",
    "source_code": "def _fit(self, X, y=None, precomputed=False):\n    if precomputed:\n        if not (hasattr(X, 'dtype') and X.dtype.kind == 'b'):\n            raise ValueError('precomputed is True but the input data is not a mask')\n        self._precomputed = True\n    else:\n        self._precomputed = False\n    if not self._precomputed:\n        X = self._validate_input(X, in_fit=True)\n    else:\n        _check_n_features(self, X, reset=True)\n    self._n_features = X.shape[1]\n    missing_features_info = self._get_missing_features_info(X)\n    self.features_ = missing_features_info[1]\n    return missing_features_info[0]",
    "docstring": "Fit the transformer on . Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Input data, where is the number of samples and is the number of features. If , then is a mask of the input data. precomputed : bool Whether the input data is a mask. Returns ------- imputer_mask : {ndarray, sparse matrix} of shape (n_samples, n_features) The imputer mask of the original data.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\impute\\_base.py",
    "ast_data": "FunctionDef name:_fit arg:self arg:X arg:y arg:precomputed arguments arg arg arg arg If If BoolOp Call Compare Raise Call Assign Assign If Assign Call Call Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_from_inferred_categories",
    "source_code": "@classmethod\ndef _from_inferred_categories(cls, inferred_categories, inferred_codes, dtype, true_values=None) -> Self:\n    from pandas import Index, to_datetime, to_numeric, to_timedelta\n    cats = Index(inferred_categories)\n    known_categories = isinstance(dtype, CategoricalDtype) and dtype.categories is not None\n    if known_categories:\n        if is_any_real_numeric_dtype(dtype.categories.dtype):\n            cats = to_numeric(inferred_categories, errors='coerce')\n        elif lib.is_np_dtype(dtype.categories.dtype, 'M'):\n            cats = to_datetime(inferred_categories, errors='coerce')\n        elif lib.is_np_dtype(dtype.categories.dtype, 'm'):\n            cats = to_timedelta(inferred_categories, errors='coerce')\n        elif is_bool_dtype(dtype.categories.dtype):\n            if true_values is None:\n                true_values = ['True', 'TRUE', 'true']\n            cats = cats.isin(true_values)\n    if known_categories:\n        categories = dtype.categories\n        codes = recode_for_categories(inferred_codes, cats, categories)\n    elif not cats.is_monotonic_increasing:\n        unsorted = cats.copy()\n        categories = cats.sort_values()\n        codes = recode_for_categories(inferred_codes, unsorted, categories)\n        dtype = CategoricalDtype(categories, ordered=False)\n    else:\n        dtype = CategoricalDtype(cats, ordered=False)\n        codes = inferred_codes\n    return cls._simple_new(codes, dtype=dtype)",
    "docstring": "Construct a Categorical from inferred values. For inferred categories ( is None) the categories are sorted. For explicit , the are cast to the appropriate type. Parameters ---------- inferred_categories : Index inferred_codes : Index dtype : CategoricalDtype or 'category' true_values : list, optional If none are provided, the default ones are \"True\", \"TRUE\", and \"true.\" Returns ------- Categorical",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\categorical.py",
    "ast_data": "FunctionDef name:_from_inferred_categories arg:cls arg:inferred_categories arg:inferred_codes arg:dtype arg:true_values arguments arg arg arg arg arg Assign Call Assign BoolOp Call Compare If If Call Assign Call If Call Assign Call If Call Assign Call If Call If Compare Assign Assign Call If Assign Assign Call If Assign Call Assign Call Assign Call Assign Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "triangulate_points",
    "source_code": "def triangulate_points(P1: torch.Tensor, P2: torch.Tensor, points1: torch.Tensor, points2: torch.Tensor) -> torch.Tensor:\n    KORNIA_CHECK_SHAPE(P1, ['*', '3', '4'])\n    KORNIA_CHECK_SHAPE(P2, ['*', '3', '4'])\n    KORNIA_CHECK_SHAPE(points1, ['*', 'N', '2'])\n    KORNIA_CHECK_SHAPE(points2, ['*', 'N', '2'])\n    points_shape = max(points1.shape, points2.shape)\n    X = zeros(points_shape[:-1] + (4, 4)).type_as(points1)\n    for i in range(4):\n        X[..., 0, i] = points1[..., 0] * P1[..., 2:3, i] - P1[..., 0:1, i]\n        X[..., 1, i] = points1[..., 1] * P1[..., 2:3, i] - P1[..., 1:2, i]\n        X[..., 2, i] = points2[..., 0] * P2[..., 2:3, i] - P2[..., 0:1, i]\n        X[..., 3, i] = points2[..., 1] * P2[..., 2:3, i] - P2[..., 1:2, i]\n    _, _, V = _torch_svd_cast(X)\n    points3d_h = V[..., -1]\n    points3d: torch.Tensor = convert_points_from_homogeneous(points3d_h)\n    return points3d",
    "docstring": "Reconstructs a bunch of points by triangulation. Triangulates the 3d position of 2d correspondences between several images. Reference: Internally it uses DLT method from Hartley/Zisserman 12.2 pag.312 The input points are assumed to be in homogeneous coordinate system and being inliers correspondences. The method does not perform any robust estimation. Args: P1: The projection matrix for the first camera with shape :math:. P2: The projection matrix for the second camera with shape :math:. points1: The set of points seen from the first camera frame in the camera plane coordinates with shape :math:. points2: The set of points seen from the second camera frame in the camera plane coordinates with shape :math:. Returns: The reconstructed 3d points in the world frame with shape :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\epipolar\\triangulation.py",
    "ast_data": "FunctionDef name:triangulate_points arg:P1 arg:P2 arg:points1 arg:points2 arguments arg arg arg arg Call Call Call Call Assign Call Assign Call Call For Call Assign Assign Assign Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "_get_path_info",
    "source_code": "def _get_path_info(self, direct=False, filtered_relation=None):\n    int_model = self.remote_field.through\n    linkfield1 = int_model._meta.get_field(self.m2m_field_name())\n    linkfield2 = int_model._meta.get_field(self.m2m_reverse_field_name())\n    if direct:\n        join1infos = linkfield1.reverse_path_infos\n        if filtered_relation:\n            join2infos = linkfield2.get_path_info(filtered_relation)\n        else:\n            join2infos = linkfield2.path_infos\n    else:\n        join1infos = linkfield2.reverse_path_infos\n        if filtered_relation:\n            join2infos = linkfield1.get_path_info(filtered_relation)\n        else:\n            join2infos = linkfield1.path_infos\n    join1_final = join1infos[-1].to_opts\n    join2_initial = join2infos[0].from_opts\n    if join1_final is join2_initial:\n        intermediate_infos = []\n    elif issubclass(join1_final.model, join2_initial.model):\n        intermediate_infos = join1_final.get_path_to_parent(join2_initial.model)\n    else:\n        intermediate_infos = join2_initial.get_path_from_parent(join1_final.model)\n    return [*join1infos, *intermediate_infos, *join2infos]",
    "docstring": "Called by both direct and indirect m2m traversal.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\related.py",
    "ast_data": "FunctionDef name:_get_path_info arg:self arg:direct arg:filtered_relation arguments arg arg arg Assign Assign Call Call Assign Call Call If Assign If Assign Call Assign Assign If Assign Call Assign Assign Assign If Compare Assign If Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_check_refit_for_multimetric",
    "source_code": "def _check_refit_for_multimetric(self, scores):\n    multimetric_refit_msg = f'For multi-metric scoring, the parameter refit must be set to a scorer key or a callable to refit an estimator with the best parameter setting on the whole data and make the best_* attributes available for that metric. If this is not needed, refit should be set to False explicitly. {self.refit!r} was passed.'\n    valid_refit_dict = isinstance(self.refit, str) and self.refit in scores\n    if self.refit is not False and (not valid_refit_dict) and (not callable(self.refit)):\n        raise ValueError(multimetric_refit_msg)",
    "docstring": "Check is compatible with is valid",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_search.py",
    "ast_data": "FunctionDef name:_check_refit_for_multimetric arg:self arg:scores arguments arg arg Assign Assign BoolOp Call Compare If BoolOp Compare Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "num_workers",
    "source_code": "@property\ndef num_workers(self):\n    return self._num_workers",
    "docstring": "Returns number of workers in the cluster, including chief.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distribute_coordinator_utils.py",
    "ast_data": "FunctionDef name:num_workers arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "itemsize",
    "source_code": "@cache_readonly\ndef itemsize(self) -> int:\n    return self.numpy_dtype.itemsize",
    "docstring": "Return the number of bytes in this dtype",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py",
    "ast_data": "FunctionDef name:itemsize arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_wrapped_model",
    "source_code": "@def_function.function(input_signature=input_signature)\ndef _wrapped_model(*args):\n    inputs = args[0] if len(input_signature) == 1 else list(args)\n    with base_layer_utils.call_context().enter(model, inputs=inputs, build_graph=False, training=False, saving=True):\n        outputs = model(inputs, training=False)\n    output_names = model.output_names\n    if output_names is None:\n        from tensorflow.python.keras.engine import compile_utils\n        output_names = compile_utils.create_pseudo_output_names(outputs)\n    outputs = nest.flatten(outputs)\n    return {name: output for name, output in zip(output_names, outputs)}",
    "docstring": "A concrete tf.function that wraps the model's call function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saving_utils.py",
    "ast_data": "FunctionDef name:_wrapped_model arguments arg Assign Compare Call Call With Call Call Assign Call Assign If Compare Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_label",
    "source_code": "def get_label(self):\n    return self._label",
    "docstring": "Return the label used for this artist in the legend.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:get_label arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "deactivate_all",
    "source_code": "def deactivate_all():\n    _active.value = gettext_module.NullTranslations()\n    _active.value.to_language = lambda *args: None",
    "docstring": "Make the active translation object a NullTranslations() instance. This is useful when we want delayed translations to appear as the original string for some reason.",
    "type": "function",
    "file_path": "django\\django\\utils\\translation\\trans_real.py",
    "ast_data": "FunctionDef name:deactivate_all arguments Assign Call Assign arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "_broadcast_grad",
    "source_code": "@ops.RegisterGradient('NcclBroadcast')\ndef _broadcast_grad(op, accumulated_grad):\n    grads = [t for t in accumulated_grad.op.inputs]\n    for t in grads:\n        _check_device(t)\n    with ops.device(op.device):\n        return gen_nccl_ops.nccl_reduce(input=grads, reduction='sum')",
    "docstring": "The gradients for input of . Args: op: The that we are differentiating. accumulated_grad: Accumulated gradients with respect to the output of the op. Returns: Gradients with respect to the input of .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nccl_ops.py",
    "ast_data": "FunctionDef name:_broadcast_grad arg:op arg:accumulated_grad arguments arg arg Assign For Call With Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "compare",
    "source_code": "def compare(all_dict, others, names, module_name):\n    only_all = set()\n    for name in all_dict:\n        if name not in names:\n            for pat in REFGUIDE_AUTOSUMMARY_SKIPLIST:\n                if re.match(pat, module_name + '.' + name):\n                    break\n            else:\n                only_all.add(name)\n    only_ref = set()\n    missing = set()\n    for name in names:\n        if name not in all_dict:\n            for pat in REFGUIDE_ALL_SKIPLIST:\n                if re.match(pat, module_name + '.' + name):\n                    if name not in others:\n                        missing.add(name)\n                    break\n            else:\n                only_ref.add(name)\n    return (only_all, only_ref, missing)",
    "docstring": "Return sets of objects only in __all__, refguide, or completely missing.",
    "type": "function",
    "file_path": "scipy\\tools\\refguide_check.py",
    "ast_data": "FunctionDef name:compare arg:all_dict arg:others arg:names arg:module_name arguments arg arg arg arg Assign Call For If Compare For If Call Call Assign Call Assign Call For If Compare For If Call If Compare Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_serialize_object_graph",
    "source_code": "def _serialize_object_graph(saveable_view: _SaveableView, asset_file_def_index):\n    proto = saved_object_graph_pb2.SavedObjectGraph()\n    saveable_view.fill_object_graph_proto(proto)\n    for concrete_function in saveable_view.concrete_and_gradient_functions:\n        name = compat.as_text(concrete_function.name)\n        serialized = function_serialization.serialize_concrete_function(concrete_function, saveable_view.captured_tensor_node_ids)\n        if serialized is not None:\n            proto.concrete_functions[name].CopyFrom(serialized)\n    for obj, obj_proto in zip(saveable_view.nodes, proto.nodes):\n        _write_object_proto(obj, obj_proto, asset_file_def_index, saveable_view.augmented_graph_view.list_children)\n    return proto",
    "docstring": "Save a SavedObjectGraph proto for .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\save.py",
    "ast_data": "FunctionDef name:_serialize_object_graph arg:saveable_view arg:asset_file_def_index arguments arg arg Assign Call Call For Assign Call Assign Call If Compare Call For Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_fft_r2c",
    "source_code": "def _fft_r2c(func_name: str, input: TensorLikeType, n: Optional[int], dim: int, norm: NormType, forward: bool, onesided: bool) -> TensorLikeType:\n    torch._check(not input.dtype.is_complex, lambda: f'{func_name} expects a floating point input tensor, but got {input.dtype}')\n    input = _maybe_promote_tensor_fft(input)\n    dims = (utils.canonicalize_dim(input.ndim, dim, wrap_scalar=False),)\n    dim_size = n if n is not None else input.shape[dim]\n    torch._check(dim_size >= 1, lambda: f'Invalid number of data points ({dim_size}) specified')\n    if n is not None:\n        input = _resize_fft_input(input, dims, (n,))\n    ret = prims.fft_r2c(input, dim=dims, onesided=onesided)\n    ret = _apply_norm(ret, norm, dim_size, forward)\n    return ret if forward else torch.conj(ret)",
    "docstring": "Common code for performing any real to complex FFT (rfft or ihfft)",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\fft.py",
    "ast_data": "FunctionDef name:_fft_r2c arg:func_name arg:input arg:n arg:dim arg:norm arg:forward arg:onesided arguments arg arg arg arg arg arg arg Call arguments Assign Call Assign Call Assign Compare Call Compare arguments If Compare Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, raw_documents, y=None):\n    self._check_params()\n    self._warn_for_unused_params()\n    self._tfidf = TfidfTransformer(norm=self.norm, use_idf=self.use_idf, smooth_idf=self.smooth_idf, sublinear_tf=self.sublinear_tf)\n    X = super().fit_transform(raw_documents)\n    self._tfidf.fit(X)\n    return self",
    "docstring": "Learn vocabulary and idf from training set. Parameters ---------- raw_documents : iterable An iterable which generates either str, unicode or file objects. y : None This parameter is not needed to compute tfidf. Returns ------- self : object Fitted vectorizer.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_extraction\\text.py",
    "ast_data": "FunctionDef name:fit arg:self arg:raw_documents arg:y arguments arg arg arg Call Call Assign Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "poly2herm",
    "source_code": "def poly2herm(pol):\n    [pol] = pu.as_series([pol])\n    deg = len(pol) - 1\n    res = 0\n    for i in range(deg, -1, -1):\n        res = hermadd(hermmulx(res), pol[i])\n    return res",
    "docstring": "poly2herm(pol) Convert a polynomial to a Hermite series. Convert an array representing the coefficients of a polynomial (relative to the \"standard\" basis) ordered from lowest degree to highest, to an array of the coefficients of the equivalent Hermite series, ordered from lowest to highest degree. Parameters ---------- pol : array_like 1-D array containing the polynomial coefficients Returns ------- c : ndarray 1-D array containing the coefficients of the equivalent Hermite series. See Also -------- herm2poly Notes ----- The easy way to do conversions between polynomial basis sets is to use the convert method of a class instance. Examples -------- >>> from numpy.polynomial.hermite import poly2herm >>> poly2herm(np.arange(4)) array([1. , 2.75 , 0.5 , 0.375])",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\hermite.py",
    "ast_data": "FunctionDef name:poly2herm arg:pol arguments arg Assign Call Assign Call Assign For Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "script_qconfig",
    "source_code": "def script_qconfig(qconfig):\n    return QConfig(activation=torch.jit.script(qconfig.activation())._c, weight=torch.jit.script(qconfig.weight())._c)",
    "docstring": "Instantiate the activation and weight observer modules and script them, these observer module instances will be deepcopied during prepare_jit step.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantize_jit.py",
    "ast_data": "FunctionDef name:script_qconfig arg:qconfig arguments arg Return return:yes Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "async_scope",
    "source_code": "@tf_export('experimental.async_scope')\n@tf_contextlib.contextmanager\ndef async_scope():\n    remote_async_env_var = 'TF_ENABLE_EAGER_CLIENT_STREAMING_ENQUEUE'\n    old_policy = os.environ.get(remote_async_env_var)\n    try:\n        os.environ[remote_async_env_var] = str(True)\n        yield\n        context().sync_executors()\n    finally:\n        if old_policy is None:\n            del os.environ[remote_async_env_var]\n        else:\n            os.environ[remote_async_env_var] = old_policy",
    "docstring": "Context manager for grouping async operations. Ops/function calls inside the scope can return before finishing the actual execution. When exiting the async scope, a synchronization barrier will be automatically added to ensure the completion of all async op and function execution, potentially raising exceptions if async execution results in an error state. Users may write the following code to asynchronously invoke and log the metric for every steps in a training loop. internally consumes data using , and may throw OutOfRangeError when running out of data. In the case: Yields: Context manager for grouping async operations.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:async_scope arguments Assign Assign Call Try Assign Call Call Call If Compare Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "option_scale_image",
    "source_code": "def option_scale_image(self):\n    return False",
    "docstring": "Return whether arbitrary affine transformations in are supported (True for most vector backends).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:option_scale_image arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "RoundRobinDispatch",
    "source_code": "class RoundRobinDispatch:\n    grid_expr = RoundRobinComboKernelGrid\n\n    @classmethod\n    def codegen_pid_range(cls, kernel: 'ComboKernel', num: int, code: IndentedBuffer) -> None:\n        num_kernels = len(kernel.sub_kernels)\n        if num == 0:\n            cond = 'if'\n        else:\n            cond = 'elif'\n        code.splice(f'{cond} pid % {num_kernels} == {num}:')\n        with code.indent():\n            code.splice(f'pid_offset = pid // {num_kernels}')",
    "docstring": "The dispatcher which dispatches the subkernels in a round robin manner: the blocks are interleavedly dispatched to each subkernel to execute them in parallel. The class defines the methods specific to the dispatch algorithm. Methods: codegen_pid_range(...): codegen the pid range for each subkernel. grid(...): codegen the grid size for launching the combo kernel.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\triton_combo_kernel.py",
    "ast_data": "ClassDef name:RoundRobinDispatch Assign FunctionDef name:codegen_pid_range arg:cls arg:kernel arg:num arg:code arguments arg arg arg arg Assign Call If Compare Assign Assign Call With Call Call"
  },
  {
    "library": "django",
    "name": "get_spheroid",
    "source_code": "@classmethod\ndef get_spheroid(cls, wkt, string=True):\n    srs = gdal.SpatialReference(wkt)\n    sphere_params = srs.ellipsoid\n    sphere_name = srs['spheroid']\n    if not string:\n        return (sphere_name, sphere_params)\n    else:\n        if len(sphere_params) == 3:\n            radius, flattening = (sphere_params[0], sphere_params[2])\n        else:\n            radius, flattening = sphere_params\n        return 'SPHEROID[\"%s\",%s,%s]' % (sphere_name, radius, flattening)",
    "docstring": "Class method used by GeometryField on initialization to retrieve the parameters from the given WKT.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\base\\models.py",
    "ast_data": "FunctionDef name:get_spheroid arg:cls arg:wkt arg:string arguments arg arg arg Assign Call Assign Assign If Return return:yes If Compare Call Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses):\n    super().__init__()\n    self.num_classes = num_classes\n    self.matcher = matcher\n    self.weight_dict = weight_dict\n    self.eos_coef = eos_coef\n    self.losses = losses\n    empty_weight = torch.ones(self.num_classes + 1)\n    empty_weight[-1] = self.eos_coef\n    self.register_buffer('empty_weight', empty_weight)",
    "docstring": "Create the criterion. Parameters: num_classes: number of object categories, omitting the special no-object category matcher: module able to compute a matching between targets and proposals weight_dict: dict containing as key the names of the losses and as values their relative weight. eos_coef: relative classification weight applied to the no-object category losses: list of all the losses to be applied. See get_loss for list of available losses.",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\functional_autograd_benchmark\\torchvision_models.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:num_classes arg:matcher arg:weight_dict arg:eos_coef arg:losses arguments arg arg arg arg arg arg Call Call Assign Assign Assign Assign Assign Assign Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "load",
    "source_code": "def load(self, state_dict: dict[str, Any]) -> None:\n    loader.load(state_dict, storage_reader=self.storage_reader, process_group=self.process_group, planner=self.load_planner)",
    "docstring": "Calls :py:meth: . Utilizing values passed during initialization.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\_checkpointer.py",
    "ast_data": "FunctionDef name:load arg:self arg:state_dict arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "true_positives",
    "source_code": "@tf_export(v1=['metrics.true_positives'])\ndef true_positives(labels, predictions, weights=None, metrics_collections=None, updates_collections=None, name=None):\n    if context.executing_eagerly():\n        raise RuntimeError('tf.metrics.true_positives is not supported when eager execution is enabled.')\n    with variable_scope.variable_scope(name, 'true_positives', (predictions, labels, weights)):\n        predictions, labels, weights = _remove_squeezable_dimensions(predictions=math_ops.cast(predictions, dtype=dtypes.bool), labels=math_ops.cast(labels, dtype=dtypes.bool), weights=weights)\n        is_true_positive = math_ops.logical_and(math_ops.equal(labels, True), math_ops.equal(predictions, True))\n        return _count_condition(is_true_positive, weights, metrics_collections, updates_collections)",
    "docstring": "Sum the weights of true_positives. If is , weights default to 1. Use weights of 0 to mask values. Args: labels: The ground truth values, a whose dimensions must match . Will be cast to . predictions: The predicted values, a of arbitrary dimensions. Will be cast to . weights: Optional whose rank is either 0, or the same rank as , and must be broadcastable to (i.e., all dimensions must be either , or the same as the corresponding dimension). metrics_collections: An optional list of collections that the metric value variable should be added to. updates_collections: An optional list of collections that the metric update ops should be added to. name: An optional variable_scope name. Returns: value_tensor: A representing the current value of the metric. update_op: An operation that accumulates the error from a batch of data. Raises: ValueError: If and have mismatched shapes, or if is not and its shape doesn't match , or if either or are not a list or tuple. RuntimeError: If eager execution is enabled.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\metrics_impl.py",
    "ast_data": "FunctionDef name:true_positives arg:labels arg:predictions arg:weights arg:metrics_collections arg:updates_collections arg:name arguments arg arg arg arg arg arg If Call Raise Call With Call Assign Call Call Call Assign Call Call Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "set_attr",
    "source_code": "def set_attr(self) -> None:\n    setattr(self.attrs, self.kind_attr, self.kind)",
    "docstring": "set the kind for this column",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:set_attr arg:self arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "_pre_load_state_dict_hook",
    "source_code": "@no_type_check\n@torch.no_grad()\ndef _pre_load_state_dict_hook(module: nn.Module, state_dict: dict[str, Any], prefix: str, *args: Any) -> None:\n    fsdp_state = _get_module_fsdp_state_if_fully_sharded_module(module)\n    if fsdp_state.sharding_strategy == ShardingStrategy.NO_SHARD:\n        context = _replace_with_full_state_dict_type(fsdp_state)\n        warnings.warn('When using ``NO_SHARD`` for ``ShardingStrategy``, full_state_dict willbe returned.')\n    else:\n        _set_use_dtensor(fsdp_state)\n        context = contextlib.nullcontext()\n    _lazy_init(fsdp_state, module)\n    if fsdp_state._is_root:\n        SimpleProfiler.reset()\n    with context:\n        _pre_load_state_dict_hook_fn = {StateDictType.FULL_STATE_DICT: _full_pre_load_state_dict_hook, StateDictType.LOCAL_STATE_DICT: _local_pre_load_state_dict_hook, StateDictType.SHARDED_STATE_DICT: _sharded_pre_load_state_dict_hook}\n        if fsdp_state._device_handle.is_available():\n            fsdp_state._device_handle.synchronize()\n        _pre_load_state_dict_hook_fn[fsdp_state._state_dict_type](module, fsdp_state, state_dict, prefix)",
    "docstring": "This is called before `` is used to decide what preprocessing will be done.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_state_dict_utils.py",
    "ast_data": "FunctionDef name:_pre_load_state_dict_hook arg:module arg:state_dict arg:prefix arguments arg arg arg arg Assign Call If Compare Assign Call Call Call Assign Call Call If Call With Assign If Call Call Call Call"
  },
  {
    "library": "django",
    "name": "get",
    "source_code": "def get(self, request, *args, **kwargs):\n    return self.render_to_response(self.get_context_data())",
    "docstring": "Handle GET requests: instantiate a blank version of the form.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\edit.py",
    "ast_data": "FunctionDef name:get arg:self arg:request arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "is_ea_or_datetimelike_dtype",
    "source_code": "def is_ea_or_datetimelike_dtype(dtype: DtypeObj | None) -> bool:\n    return isinstance(dtype, ExtensionDtype) or lib.is_np_dtype(dtype, 'mM')",
    "docstring": "Check for ExtensionDtype, datetime64 dtype, or timedelta64 dtype. Notes ----- Checks only for dtype objects, not dtype-castable strings or types.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\common.py",
    "ast_data": "FunctionDef name:is_ea_or_datetimelike_dtype arg:dtype arguments arg Return return:yes BoolOp Call Call"
  },
  {
    "library": "tensorflow",
    "name": "converted_self",
    "source_code": "def converted_self(self):\n    if self._converted_self is None:\n        source = self._function or self._enclosing_graph\n        self._converted_self = source.converted_self().nodes[self._node.name]\n    return self._converted_self",
    "docstring": "The NodeDef to be converted. Returns: The NodeDef to be converted, which can come from either a graph for a function. Derived classes should call this (via 'super') to make sure the node is retrieved from the right place.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "FunctionDef name:converted_self arg:self arguments arg If Compare Assign BoolOp Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, values, indices, dense_shape=None):\n    self._values = values\n    self._indices = indices\n    self._dense_shape = dense_shape",
    "docstring": "Creates an .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\indexed_slices.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:values arg:indices arg:dense_shape arguments arg arg arg arg Assign Assign Assign"
  },
  {
    "library": "django",
    "name": "_post_clean",
    "source_code": "def _post_clean(self):\n    pass",
    "docstring": "An internal hook for performing additional cleaning after form cleaning is complete. Used for model validation in model forms.",
    "type": "method",
    "file_path": "django\\django\\forms\\forms.py",
    "ast_data": "FunctionDef name:_post_clean arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "CleanDiv",
    "source_code": "class CleanDiv(FloorDiv):\n    pass",
    "docstring": "Div where we can assume no rounding. This is to enable future optimizations.",
    "type": "class",
    "file_path": "pytorch\\torch\\utils\\_sympy\\functions.py",
    "ast_data": "ClassDef name:CleanDiv"
  },
  {
    "library": "django",
    "name": "get_safe_settings",
    "source_code": "def get_safe_settings(self):\n    settings_dict = {}\n    for k in dir(settings):\n        if k.isupper():\n            settings_dict[k] = self.cleanse_setting(k, getattr(settings, k))\n    return settings_dict",
    "docstring": "Return a dictionary of the settings module with values of sensitive settings replaced with stars (*********).",
    "type": "method",
    "file_path": "django\\django\\views\\debug.py",
    "ast_data": "FunctionDef name:get_safe_settings arg:self arguments arg Assign For Call If Call Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "integrate_box",
    "source_code": "def integrate_box(self, low_bounds, high_bounds, maxpts=None, *, rng=None):\n    low, high = (low_bounds - self.dataset.T, high_bounds - self.dataset.T)\n    values = multivariate_normal.cdf(high, lower_limit=low, cov=self.covariance, maxpts=maxpts, rng=rng)\n    return np_vecdot(values, self.weights, axis=-1)",
    "docstring": "Computes the integral of a pdf over a rectangular interval. Parameters ---------- low_bounds : array_like A 1-D array containing the lower bounds of integration. high_bounds : array_like A 1-D array containing the upper bounds of integration. maxpts : int, optional The maximum number of points to use for integration. rng : , optional Pseudorandom number generator state. When is None, a new generator is created using entropy from the operating system. Types other than are passed to to instantiate a ``. Returns ------- value : scalar The result of the integral.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_kde.py",
    "ast_data": "FunctionDef name:integrate_box arg:self arg:low_bounds arg:high_bounds arg:maxpts arguments arg arg arg arg arg Assign Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_ensure_open",
    "source_code": "def _ensure_open(self) -> None:\n    if not hasattr(self, '_path_or_buf'):\n        self._open_file()",
    "docstring": "Ensure the file has been opened and its header data read.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\stata.py",
    "ast_data": "FunctionDef name:_ensure_open arg:self arguments arg If Call Call"
  },
  {
    "library": "pytorch",
    "name": "statically_known_true",
    "source_code": "def statically_known_true(x: BoolLikeType) -> bool:\n    if not isinstance(x, SymBool):\n        assert isinstance(x, bool)\n        return x\n    result = _static_eval_sym_bool(x)\n    if result is None:\n        return False\n    return result",
    "docstring": "Returns True if x can be simplified to a constant and is true. .. note:: This function doesn't introduce new guards, so the expression may end up evaluating to true at runtime even if this function returns False. Args: x (bool, SymBool): The expression to try statically evaluating",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:statically_known_true arg:x arguments arg If Call Call Return return:yes Assign Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "pandas",
    "name": "plus_or_dot",
    "source_code": "def plus_or_dot(pieces) -> str:\n    if '+' in pieces.get('closest-tag', ''):\n        return '.'\n    return '+'",
    "docstring": "Return a + if we don't already have one, else return a .",
    "type": "function",
    "file_path": "pandas\\pandas\\_version.py",
    "ast_data": "FunctionDef name:plus_or_dot arg:pieces arguments arg If Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_lr",
    "source_code": "@override\ndef get_lr(self) -> list[float]:\n    _warn_get_lr_called_within_step(self)\n    if self.last_epoch == 0 or self.last_epoch % self.step_size != 0:\n        return [group['lr'] for group in self.optimizer.param_groups]\n    return [group['lr'] * self.gamma for group in self.optimizer.param_groups]",
    "docstring": "Compute the learning rate of each parameter group.",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\lr_scheduler.py",
    "ast_data": "FunctionDef name:get_lr arg:self arguments arg Call If BoolOp Compare Compare Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "get_redirect_url",
    "source_code": "def get_redirect_url(self):\n    redirect_to = self.request.POST.get(self.redirect_field_name, self.request.GET.get(self.redirect_field_name))\n    url_is_safe = url_has_allowed_host_and_scheme(url=redirect_to, allowed_hosts=self.get_success_url_allowed_hosts(), require_https=self.request.is_secure())\n    return redirect_to if url_is_safe else ''",
    "docstring": "Return the user-originating redirect URL if it's safe.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\views.py",
    "ast_data": "FunctionDef name:get_redirect_url arg:self arguments arg Assign Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "_InvalidOutput",
    "source_code": "class _InvalidOutput(TypeError):\n    pass",
    "docstring": "Indicates an invalid value has been returned by a middleware's processing method. Internal and undocumented, it should not be raised or caught by user code.",
    "type": "class",
    "file_path": "scrapy\\scrapy\\exceptions.py",
    "ast_data": "ClassDef name:_InvalidOutput"
  },
  {
    "library": "scipy",
    "name": "choose_ncv",
    "source_code": "def choose_ncv(k):\n    return max(2 * k + 1, 20)",
    "docstring": "Choose number of lanczos vectors based on target number of singular/eigen values and vectors to compute, k.",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_eigen\\arpack\\arpack.py",
    "ast_data": "FunctionDef name:choose_ncv arg:k arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_ragged_tensor_to_variant_grad",
    "source_code": "@ops.RegisterGradient('RaggedTensorToVariant')\ndef _ragged_tensor_to_variant_grad(op, encoded_ragged_grad):\n    dense_values = op.inputs[-1]\n    ragged_rank = len(op.inputs) - 1\n    row_splits = 0 if ragged_rank == 0 else op.inputs[0]\n    values_grad = gen_ragged_conversion_ops.ragged_tensor_to_variant_gradient(encoded_ragged_grad=encoded_ragged_grad, row_splits=row_splits, dense_values_shape=array_ops.shape(dense_values), Tvalues=op.inputs[-1].dtype)\n    result = [None] * ragged_rank + [values_grad]\n    return result",
    "docstring": "Gradient for RaggedTensorToVariant op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_conversion_ops.py",
    "ast_data": "FunctionDef name:_ragged_tensor_to_variant_grad arg:op arg:encoded_ragged_grad arguments arg arg Assign Assign Call Assign Compare Assign Call Call Assign Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "camera_matrix",
    "source_code": "@property\ndef camera_matrix(self) -> Tensor:\n    return self.intrinsics[..., :3, :3]",
    "docstring": "Return the 3x3 camera matrix containing the intrinsics. Returns: tensor of shape :math:.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\camera\\pinhole.py",
    "ast_data": "FunctionDef name:camera_matrix arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_param_to_index",
    "source_code": "@property\ndef _param_to_index(self) -> dict[torch.Tensor, int]:\n    if len(self._param_to_index_cache) == 0:\n        self._param_to_index_cache = {p: i for i, p in enumerate(chain.from_iterable((g['params'] for g in self.param_groups)))}\n    return self._param_to_index_cache",
    "docstring": ":class: mapping parameters to their indices in the global optimizer state. NOTE: This assumes that the global optimizer state's indexing (in ``) follows a linear ordering over the parameter groups.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\optim\\zero_redundancy_optimizer.py",
    "ast_data": "FunctionDef name:_param_to_index arg:self arguments arg If Compare Call Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "expected_freq",
    "source_code": "def expected_freq(observed):\n    observed = np.asarray(observed, dtype=np.float64)\n    margsums = margins(observed)\n    d = observed.ndim\n    expected = reduce(np.multiply, margsums) / observed.sum() ** (d - 1)\n    return expected",
    "docstring": "Compute the expected frequencies from a contingency table. Given an n-dimensional contingency table of observed frequencies, compute the expected frequencies for the table based on the marginal sums under the assumption that the groups associated with each dimension are independent. Parameters ---------- observed : array_like The table of observed frequencies. (While this function can handle a 1-D array, that case is trivial. Generally is at least 2-D.) Returns ------- expected : ndarray of float64 The expected frequencies, based on the marginal sums of the table. Same shape as . Examples -------- >>> import numpy as np >>> from scipy.stats.contingency import expected_freq >>> observed = np.array([[10, 10, 20],[20, 20, 20]]) >>> expected_freq(observed) array([[ 12., 12., 16.], [ 18., 18., 24.]])",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\contingency.py",
    "ast_data": "FunctionDef name:expected_freq arg:observed arguments arg Assign Call Assign Call Assign Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "within",
    "source_code": "def within(self, other):\n    return capi.geos_within(self.ptr, other.ptr)",
    "docstring": "Return true if the DE-9IM intersection matrix for the two Geometries is T*F**F***.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:within arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "skew_deg",
    "source_code": "def skew_deg(self, xShear, yShear):\n    return self.skew(math.radians(xShear), math.radians(yShear))",
    "docstring": "Add a skew in place. *xShear* and *yShear* are the shear angles along the *x*- and *y*-axes, respectively, in degrees. Returns *self*, so this method can easily be chained with more calls to :meth:, :meth:, :meth: and :meth:.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:skew_deg arg:self arg:xShear arg:yShear arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "get_outdated_docs",
    "source_code": "def get_outdated_docs(self) -> str | Iterable[str]:\n    raise NotImplementedError",
    "docstring": "Return an iterable of output files that are outdated, or a string describing what an update build will build. If the builder does not output individual files corresponding to source files, return a string here. If it does, return an iterable of those files that need to be written.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\__init__.py",
    "ast_data": "FunctionDef name:get_outdated_docs arg:self arguments arg Raise"
  },
  {
    "library": "pandas",
    "name": "contains",
    "source_code": "def contains(self, other):\n    if isinstance(other, Interval):\n        raise NotImplementedError('contains not implemented for two intervals')\n    return (self._left < other if self.open_left else self._left <= other) & (other < self._right if self.open_right else other <= self._right)",
    "docstring": "Check elementwise if the Intervals contain the value. Return a boolean mask whether the value is contained in the Intervals of the IntervalArray. Parameters ---------- other : scalar The value to check whether it is contained in the Intervals. Returns ------- boolean array A boolean mask whether the value is contained in the Intervals. See Also -------- Interval.contains : Check whether Interval object contains value. IntervalArray.overlaps : Check if an Interval overlaps the values in the IntervalArray. Examples -------- >>> intervals = pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 3), (2, 4)]) >>> intervals [(0, 1], (1, 3], (2, 4]] Length: 3, dtype: interval[int64, right] >>> intervals.contains(0.5) array([ True, False, False])",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\interval.py",
    "ast_data": "FunctionDef name:contains arg:self arg:other arguments arg arg If Call Raise Call Return return:yes Compare Compare Compare Compare"
  },
  {
    "library": "authlib",
    "name": "query_token",
    "source_code": "def query_token(self, token_string, token_type_hint):\n    raise NotImplementedError()",
    "docstring": "Get the token from database/storage by the given token string. Developers should implement this method:: def query_token(self, token_string, token_type_hint): if token_type_hint == \"access_token\": tok = Token.query_by_access_token(token_string) elif token_type_hint == \"refresh_token\": tok = Token.query_by_refresh_token(token_string) else: tok = Token.query_by_access_token(token_string) if not tok: tok = Token.query_by_refresh_token(token_string) return tok",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc7662\\introspection.py",
    "ast_data": "FunctionDef name:query_token arg:self arg:token_string arg:token_type_hint arguments arg arg arg Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "set_markersize",
    "source_code": "def set_markersize(self, sz):\n    sz = float(sz)\n    if self._markersize != sz:\n        self.stale = True\n    self._markersize = sz",
    "docstring": "Set the marker size in points. Parameters ---------- sz : float Marker size, in points.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:set_markersize arg:self arg:sz arguments arg arg Assign Call If Compare Assign Assign"
  },
  {
    "library": "scikit-learn",
    "name": "requires_vector_input",
    "source_code": "@property\ndef requires_vector_input(self):\n    return False",
    "docstring": "Whether the kernel works only on fixed-length feature vectors.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:requires_vector_input arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_prettify_tree",
    "source_code": "def _prettify_tree(self) -> bytes:\n    from xml.dom.minidom import parseString\n    dom = parseString(self.out_xml)\n    return dom.toprettyxml(indent='  ', encoding=self.encoding)",
    "docstring": "Output tree for pretty print format. This method will pretty print xml with line breaks and indentation.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\xml.py",
    "ast_data": "FunctionDef name:_prettify_tree arg:self arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, node_def, op, message, *args):\n    super(ResourceExhaustedError, self).__init__(node_def, op, message, RESOURCE_EXHAUSTED, *args)",
    "docstring": "Creates a .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:node_def arg:op arg:message arguments arg arg arg arg arg Call Call"
  },
  {
    "library": "numpy",
    "name": "__setstate__",
    "source_code": "def __setstate__(self, state):\n    ver, shp, typ, isf, raw, msk, flv = state\n    np.ndarray.__setstate__(self, (shp, typ, isf, raw))\n    mdtype = np.dtype([(k, np.bool) for k, _ in self.dtype.descr])\n    self.__dict__['_mask'].__setstate__((shp, mdtype, isf, msk))\n    self.fill_value = flv",
    "docstring": "Restore the internal state of the masked array. This is for pickling. `` output, and is a 5-tuple: - class name - a tuple giving the shape of the data - a typecode for the data - a binary string for the data - a binary string for the mask.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\mrecords.py",
    "ast_data": "FunctionDef name:__setstate__ arg:self arg:state arguments arg arg Assign Call Assign Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "reset",
    "source_code": "@abc.abstractmethod\ndef reset(self, checkpoint_id: Union[str, os.PathLike, None]=None) -> None:\n    ...",
    "docstring": "Calls to indicates a brand new checkpoint write is going to happen. A checkpoint_id may be present if users set the checkpoint_id for this checkpoint write. The meaning of the checkpiont_id is storage-dependent. It can be a path to a folder/file or a key for a key-value storage. Args: checkpoint_id (Union[str, os.PathLike, None]): The ID of this checkpoint instance. The meaning of the checkpoint_id depends on the storage. It can be a path to a folder or to a file. It can also be a key if the storage is a key-value store. (Default: ``)",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\storage.py",
    "ast_data": "FunctionDef name:reset arg:self arg:checkpoint_id arguments arg arg"
  },
  {
    "library": "pytorch",
    "name": "batch_isend_irecv",
    "source_code": "def batch_isend_irecv(p2p_op_list: list[P2POp]) -> list[Work]:\n    _check_p2p_op_list(p2p_op_list)\n    group = p2p_op_list[0].group\n    if group is None:\n        group = _get_default_group()\n    device = p2p_op_list[0].tensor.device\n\n    def peer_kwarg(op: P2POp) -> dict[str, int]:\n        key = 'group_dst' if op.op == isend else 'group_src'\n        return {key: op.group_peer}\n    if type(group) == ProcessGroup and group._get_backend(device).supports_coalescing:\n        with _coalescing_manager(group, device, async_ops=True) as cm:\n            for p2p_op in p2p_op_list:\n                p2p_op.op(p2p_op.tensor, group=p2p_op.group, tag=p2p_op.tag, **peer_kwarg(p2p_op))\n        return cm.works\n    else:\n        reqs = []\n        for p2p_op in p2p_op_list:\n            work = p2p_op.op(p2p_op.tensor, group=p2p_op.group, tag=p2p_op.tag, **peer_kwarg(p2p_op))\n            if work:\n                reqs.append(work)\n        return reqs",
    "docstring": "Send or Receive a batch of tensors asynchronously and return a list of requests. Process each of the operations in `torch.cuda.set_device` are allowed.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:batch_isend_irecv arg:p2p_op_list arguments arg Call Assign If Compare Assign Call Assign FunctionDef name:peer_kwarg arg:op arguments arg Assign Compare Return return:yes If BoolOp Compare Call Call With Call For Call Call Return return:yes Assign For Assign Call Call If Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_async_or_non_blocking",
    "source_code": "def _get_async_or_non_blocking(function_name, non_blocking, kwargs):\n    if not kwargs:\n        return non_blocking\n    if len(kwargs) != 1 or 'async' not in kwargs:\n        message = \"{}() got an unexpected keyword argument '{}'\"\n        argument = list(kwargs.keys()).pop()\n        raise TypeError(message.format(function_name, argument))\n    warnings.warn(\"'async' is deprecated; use 'non_blocking'\")\n    return kwargs['async']",
    "docstring": "Return the non-blocking flag given the function name and kwargs. Args: function_name (str): the name of the function being used. non_blocking (bool): the default value. **kwargs (dict): the kwargs passed to the function.",
    "type": "function",
    "file_path": "pytorch\\torch\\_utils.py",
    "ast_data": "FunctionDef name:_get_async_or_non_blocking arg:function_name arg:non_blocking arg:kwargs arguments arg arg arg If Return return:yes If BoolOp Compare Call Compare Assign Assign Call Call Call Raise Call Call Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "on",
    "source_code": "def on(self, target: Axes | SubFigure | Figure) -> Plot:\n    accepted_types: tuple\n    accepted_types = (mpl.axes.Axes, mpl.figure.SubFigure, mpl.figure.Figure)\n    accepted_types_str = f'{mpl.axes.Axes}, {mpl.figure.SubFigure}, or {mpl.figure.Figure}'\n    if not isinstance(target, accepted_types):\n        err = f'The `Plot.on` target must be an instance of {accepted_types_str}. You passed an instance of {target.__class__} instead.'\n        raise TypeError(err)\n    new = self._clone()\n    new._target = target\n    return new",
    "docstring": "Provide existing Matplotlib figure or axes for drawing the plot. When using this method, you will also need to explicitly call a method that triggers compilation, such as :meth: or :meth:. If you want to postprocess using matplotlib, you'd need to call :meth: first to compile the plot without rendering it. Parameters ---------- target : Axes, SubFigure, or Figure Matplotlib object to use. Passing :class: will add artists without otherwise modifying the figure. Otherwise, subplots will be created within the space of the given :class: or :class:. Examples -------- .. include:: ../docstrings/objects.Plot.on.rst",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\plot.py",
    "ast_data": "FunctionDef name:on arg:self arg:target arguments arg arg Assign Assign If Call Assign Raise Call Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_static_batch_size",
    "source_code": "def get_static_batch_size(layer):\n    batch_input_shape, _ = get_input_shape_and_dtype(layer)\n    if batch_input_shape is not None:\n        return tensor_shape.Dimension(batch_input_shape[0]).value\n    return None",
    "docstring": "Gets the static batch size of a Layer. Args: layer: a instance. Returns: The static batch size of a Layer.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils.py",
    "ast_data": "FunctionDef name:get_static_batch_size arg:layer arguments arg Assign Call If Compare Return return:yes Call Return return:no"
  },
  {
    "library": "scrapy",
    "name": "scraped",
    "source_code": "def scraped(self, item: Any, response: Response | Failure | None, spider: Spider) -> LogFormatterResult:\n    src: Any\n    if response is None:\n        src = f'{global_object_name(spider.__class__)}.start'\n    elif isinstance(response, Failure):\n        src = response.getErrorMessage()\n    else:\n        src = response\n    return {'level': logging.DEBUG, 'msg': SCRAPEDMSG, 'args': {'src': src, 'item': item}}",
    "docstring": "Logs a message when an item is scraped by a spider.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\logformatter.py",
    "ast_data": "FunctionDef name:scraped arg:self arg:item arg:response arg:spider arguments arg arg arg arg If Compare Assign Call If Call Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, device_name, node_exec_stats, file_path, line_number, func_name, op_type):\n    self.device_name = device_name\n    self.node_exec_stats = node_exec_stats\n    self.file_path = file_path\n    self.line_number = line_number\n    self.func_name = func_name\n    if self.file_path:\n        self.file_line_func = '%s:%d(%s)' % (os.path.basename(self.file_path), self.line_number, self.func_name)\n    else:\n        self.file_line_func = ''\n    self.op_type = op_type\n    self.start_time = self.node_exec_stats.all_start_micros\n    self.op_time = self.node_exec_stats.op_end_rel_micros - self.node_exec_stats.op_start_rel_micros",
    "docstring": "Constructor. Args: device_name: (string) name of the device. node_exec_stats: proto. file_path: path to the source file involved in creating the op. line_number: line number in the file involved in creating the op. func_name: name of the function that the line belongs to. op_type: (string) Operation type.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\profiling.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:device_name arg:node_exec_stats arg:file_path arg:line_number arg:func_name arg:op_type arguments arg arg arg arg arg arg arg Assign Assign Assign Assign Assign If Assign Call Assign Assign Assign Assign"
  },
  {
    "library": "scikit-learn",
    "name": "_BaseNB",
    "source_code": "class _BaseNB(ClassifierMixin, BaseEstimator, metaclass=ABCMeta):\n\n    @abstractmethod\n    def _joint_log_likelihood(self, X):\n        pass\n\n    @abstractmethod\n    def _check_X(self, X):\n        pass\n\n    def predict_joint_log_proba(self, X):\n        check_is_fitted(self)\n        X = self._check_X(X)\n        return self._joint_log_likelihood(X)\n\n    def predict(self, X):\n        check_is_fitted(self)\n        X = self._check_X(X)\n        jll = self._joint_log_likelihood(X)\n        return self.classes_[np.argmax(jll, axis=1)]\n\n    def predict_log_proba(self, X):\n        check_is_fitted(self)\n        X = self._check_X(X)\n        jll = self._joint_log_likelihood(X)\n        log_prob_x = logsumexp(jll, axis=1)\n        return jll - np.atleast_2d(log_prob_x).T\n\n    def predict_proba(self, X):\n        return np.exp(self.predict_log_proba(X))",
    "docstring": "Abstract base class for naive Bayes estimators",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\naive_bayes.py",
    "ast_data": "ClassDef name:_BaseNB FunctionDef name:_joint_log_likelihood arg:self arg:X arguments arg arg FunctionDef name:_check_X arg:self arg:X arguments arg arg FunctionDef name:predict_joint_log_proba arg:self arg:X arguments arg arg Call Assign Call Return return:yes Call FunctionDef name:predict arg:self arg:X arguments arg arg Call Assign Call Assign Call Return return:yes Call FunctionDef name:predict_log_proba arg:self arg:X arguments arg arg Call Assign Call Assign Call Assign Call Return return:yes Call FunctionDef name:predict_proba arg:self arg:X arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "mark_static",
    "source_code": "def mark_static(t, index=None):\n    if is_compiling():\n        if index is None:\n            for s in t.size():\n                comptime.force_static(s)\n        else:\n            comptime.force_static(t.size(index))\n        return\n    if is_traceable_wrapper_subclass(t):\n        _apply_func_to_inner_tensors_of_same_dim(mark_static, t, index)\n    if not isinstance(t, torch.Tensor) and issubclass(t, torch.nn.Module):\n        t._dynamo_marked_static = True\n        return t\n    if not isinstance(t, torch.Tensor):\n        raise TypeError(f'mark_static expects a tensor/nn.Module class but recieved {type(t)}')\n    if isinstance(index, int):\n        if not hasattr(t, '_dynamo_static_indices'):\n            t._dynamo_static_indices = set()\n        t._dynamo_static_indices.add(index)\n    elif index is None:\n        for i in range(t.dim()):\n            mark_static(t, i)\n    else:\n        assert isinstance(index, (list, tuple))\n        for i in index:\n            mark_static(t, i)",
    "docstring": "Mark a tensor as having a static dim or mark a nn module class as static. For tensors =========== This will prevent us from attempting to compile it dynamically when dynamic=True; this can improve trace-time performance. This has lower precedence than mark_dynamic. Unlike mark_dynamic, this can be done inside a graph, in which case it induces specialization on the tensor. For nn.Module classes ===================== For static nn.Module classes, TorchDynamo assumes that the module instance attributes will not be modified after compilation. This will ensure that TorchDynamo keeps integer attributes CONSTANT and not symints. From TorchDynamo implementation side, the instances of static-marked nn.Module class will be converted to UnspecializedBuiltinNNModuleVariable, which have the same properties. Note that we still have to guard on the attributes, because different instances of the nn.Module can have different values of the attributes. The key point here is that the attributes are static.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\decorators.py",
    "ast_data": "FunctionDef name:mark_static arg:t arg:index arguments arg arg If Call If Compare For Call Call Call Call Return return:no If Call Call If BoolOp Call Call Assign Return return:yes If Call Raise Call Call If Call If Call Assign Call Call If Compare For Call Call Call Call For Call"
  },
  {
    "library": "pytorch",
    "name": "get_lr",
    "source_code": "def get_lr(self):\n    if not self._get_lr_called_within_step:\n        warnings.warn('To get the last learning rate computed by the scheduler, please use `get_last_lr()`.', UserWarning)\n    step = self._step_count - 1\n    if self.anneal_epochs == 0:\n        step = max(1, step)\n    prev_t = max(0, min(1, (step - 1) / max(1, self.anneal_epochs)))\n    prev_alpha = self.anneal_func(prev_t)\n    prev_lrs = [self._get_initial_lr(group['lr'], group['swa_lr'], prev_alpha) for group in self.optimizer.param_groups]\n    t = max(0, min(1, step / max(1, self.anneal_epochs)))\n    alpha = self.anneal_func(t)\n    return [group['swa_lr'] * alpha + lr * (1 - alpha) for group, lr in zip(self.optimizer.param_groups, prev_lrs)]",
    "docstring": "Get learning rate.",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\swa_utils.py",
    "ast_data": "FunctionDef name:get_lr arg:self arguments arg If Call Assign If Compare Assign Call Assign Call Call Call Assign Call Assign Call Assign Call Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "activation_is_dynamically_quantized",
    "source_code": "def activation_is_dynamically_quantized(qconfig):\n    _activation_dtype, _, activation_is_dynamic = get_qconfig_dtypes(qconfig)\n    return activation_is_dynamic",
    "docstring": "Given a qconfig, decide if the activation needs to be dynamically quantized or not, this includes dynamically quantizing to quint8, qint8 and float16",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\utils.py",
    "ast_data": "FunctionDef name:activation_is_dynamically_quantized arg:qconfig arguments arg Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_convert_to_side",
    "source_code": "@classmethod\ndef _convert_to_side(cls, side_spec):\n    from openpyxl.styles import Side\n    _side_key_map = {'border_style': 'style'}\n    if isinstance(side_spec, str):\n        return Side(style=side_spec)\n    side_kwargs = {}\n    for k, v in side_spec.items():\n        k = _side_key_map.get(k, k)\n        if k == 'color':\n            v = cls._convert_to_color(v)\n        side_kwargs[k] = v\n    return Side(**side_kwargs)",
    "docstring": "Convert `` to an openpyxl v2 Side object. Parameters ---------- side_spec : str, dict A string specifying the border style, or a dict with zero or more of the following keys (or their synonyms). 'style' ('border_style') 'color' Returns ------- side : openpyxl.styles.Side",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\excel\\_openpyxl.py",
    "ast_data": "FunctionDef name:_convert_to_side arg:cls arg:side_spec arguments arg arg Assign If Call Return return:yes Call Assign For Call Assign Call If Compare Assign Call Assign Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "agg",
    "source_code": "def agg(self, data: DataFrame, *args, **kwargs) -> DataFrame:\n    grouper, groups = self._get_groups(data)\n    if not grouper:\n        raise ValueError('No grouping variables are present in dataframe')\n    res = data.groupby(grouper, sort=False, observed=False).agg(*args, **kwargs).reindex(groups).reset_index().pipe(self._reorder_columns, data)\n    return res",
    "docstring": "Reduce each group to a single row in the output. The output will have a row for each unique combination of the grouping variable levels with null values for the aggregated variable(s) where those combinations do not appear in the dataset.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\groupby.py",
    "ast_data": "FunctionDef name:agg arg:self arg:data arguments arg arg arg arg Assign Call If Raise Call Assign Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "dispatch",
    "source_code": "def dispatch(self, node: torch.fx.Node, onnx_args: Sequence[fx_type_utils.TensorLike | str | int | float | bool | list | complex | None], onnx_kwargs: dict[str, fx_type_utils.Argument]) -> onnxscript.OnnxFunction | onnxscript.TracedOnnxFunction:\n    default_and_custom_functions = self.get_function_overloads(node)\n    return self._find_the_perfect_or_nearest_match_onnxfunction(node, default_and_custom_functions, onnx_args, onnx_kwargs)",
    "docstring": "Dispatches an ONNX function based on the given FX node, arguments, and keyword arguments. Args: node: The TorchFX node to dispatch the function for. onnx_args: The arguments of the ONNX function. onnx_kwargs: The keyword arguments of the ONNX function. Returns: Either an or instance based on the dispatch algorithm. Raises: RuntimeError: If there are no overloaded functions available for the given FX node.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\onnxfunction_dispatcher.py",
    "ast_data": "FunctionDef name:dispatch arg:self arg:node arg:onnx_args arg:onnx_kwargs arguments arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_int_list",
    "source_code": "def get_int_list(self, min_length=_MIN_LENGTH, max_length=_MAX_LENGTH, min_int=_MIN_INT, max_int=_MAX_INT):\n    length = self.get_int(min_length, max_length)\n    return self.fdp.ConsumeIntListInRange(length, min_int, max_int)",
    "docstring": "Consume a signed integer list with given constraints. Args: min_length: The minimum length of the list. max_length: The maximum length of the list. min_int: Minimum allowed integer. max_int: Maximum allowed integer. Returns: Consumed integer list based on input bytes and constraints.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\security\\fuzzing\\python_fuzzing.py",
    "ast_data": "FunctionDef name:get_int_list arg:self arg:min_length arg:max_length arg:min_int arg:max_int arguments arg arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "call_module",
    "source_code": "@compatibility(is_backward_compatible=True)\ndef call_module(self, module_name: str, args: Optional[tuple['Argument', ...]]=None, kwargs: Optional[dict[str, 'Argument']]=None, type_expr: Optional[Any]=None) -> Node:\n    if self.owning_module and self.owning_module.get_submodule(module_name) is None:\n        warnings.warn('Attempted to insert a call_module Node with no underlying reference in the owning GraphModule! Call GraphModule.add_submodule to add the necessary submodule')\n    return self.create_node('call_module', module_name, args, kwargs, type_expr=type_expr)",
    "docstring": "Insert a `Graph.create_node`.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\graph.py",
    "ast_data": "FunctionDef name:call_module arg:self arg:module_name arg:args arg:kwargs arg:type_expr arguments arg arg arg arg arg If BoolOp Compare Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "distribute_strategy",
    "source_code": "@property\ndef distribute_strategy(self):\n    return self._distribution_strategy or distribute_lib.get_strategy()",
    "docstring": "The this model was created under.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py",
    "ast_data": "FunctionDef name:distribute_strategy arg:self arguments arg Return return:yes BoolOp Call"
  },
  {
    "library": "scikit-learn",
    "name": "_parallel_decision_function",
    "source_code": "def _parallel_decision_function(estimators, estimators_features, X, params):\n    return sum((estimator.decision_function(X[:, features], **params) for estimator, features in zip(estimators, estimators_features)))",
    "docstring": "Private function used to compute decisions within a job.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_bagging.py",
    "ast_data": "FunctionDef name:_parallel_decision_function arg:estimators arg:estimators_features arg:X arg:params arguments arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_support_mask",
    "source_code": "def _support_mask(self, x):\n    return ~np.any(_dot_diag(x, self._i_zero), axis=-1)",
    "docstring": "Check whether x lies in the support of the distribution.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_covariance.py",
    "ast_data": "FunctionDef name:_support_mask arg:self arg:x arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_Stack",
    "source_code": "class _Stack:\n\n    def __init__(self):\n        self._pos = -1\n        self._elements = []\n\n    def clear(self):\n        self._pos = -1\n        self._elements = []\n\n    def __call__(self):\n        return self._elements[self._pos] if self._elements else None\n\n    def __len__(self):\n        return len(self._elements)\n\n    def __getitem__(self, ind):\n        return self._elements[ind]\n\n    def forward(self):\n        self._pos = min(self._pos + 1, len(self._elements) - 1)\n        return self()\n\n    def back(self):\n        self._pos = max(self._pos - 1, 0)\n        return self()\n\n    def push(self, o):\n        self._elements[self._pos + 1:] = [o]\n        self._pos = len(self._elements) - 1\n        return o\n\n    def home(self):\n        return self.push(self._elements[0]) if self._elements else None",
    "docstring": "Stack of elements with a movable cursor. Mimics home/back/forward in a web browser.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\cbook.py",
    "ast_data": "ClassDef name:_Stack FunctionDef name:__init__ arg:self arguments arg Assign Assign FunctionDef name:clear arg:self arguments arg Assign Assign FunctionDef name:__call__ arg:self arguments arg Return return:yes FunctionDef name:__len__ arg:self arguments arg Return return:yes Call FunctionDef name:__getitem__ arg:self arg:ind arguments arg arg Return return:yes FunctionDef name:forward arg:self arguments arg Assign Call Call Return return:yes Call FunctionDef name:back arg:self arguments arg Assign Call Return return:yes Call FunctionDef name:push arg:self arg:o arguments arg arg Assign Assign Call Return return:yes FunctionDef name:home arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "register_extension_dtype",
    "source_code": "def register_extension_dtype(cls: type_t[ExtensionDtypeT]) -> type_t[ExtensionDtypeT]:\n    _registry.register(cls)\n    return cls",
    "docstring": "Register an ExtensionType with pandas as class decorator. This enables operations like `` for the name of the ExtensionDtype. Returns ------- callable A class decorator. See Also -------- api.extensions.ExtensionDtype : The base class for creating custom pandas data types. Series : One-dimensional array with axis labels. DataFrame : Two-dimensional, size-mutable, potentially heterogeneous tabular data. Examples -------- >>> from pandas.api.extensions import register_extension_dtype, ExtensionDtype >>> @register_extension_dtype ... class MyExtensionDtype(ExtensionDtype): ... name = \"myextension\"",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\base.py",
    "ast_data": "FunctionDef name:register_extension_dtype arg:cls arguments arg Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_hasna",
    "source_code": "@property\ndef _hasna(self) -> bool:\n    return bool(self.isna().any())",
    "docstring": "Equivalent to . Some ExtensionArray subclasses may be able to optimize this check.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\base.py",
    "ast_data": "FunctionDef name:_hasna arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "impl_backward",
    "source_code": "def impl_backward(qualname, output_differentiability=None, *, func=None):\n\n    def inner(func):\n        custom_op = _find_custom_op(qualname, also_check_torch_library=True)\n        custom_op.impl_backward(output_differentiability, _stacklevel=3)(func)\n        return func\n    if func is None:\n        return inner\n    return inner(func)",
    "docstring": "Registers a backward formula for an operator. In order for an operator to work with autograd, you need to register a backward formula. There are two pieces to this: 1. You must give us a function to specify what to save for backward. Call this the \"save for backward\" function. 2. You must give us a function that computes gradients. Call this the \"backward\" function. Use to define a \"save for backward\" function that specifies what gets saved for backward. The function should accept two arguments `impl_backward` is one or more gradients. The number of gradients matches the number of outputs of the operator. The backward function must return a dict that maps the name of an input to the operator to its corresponding gradient. All inputs that were declared to be Tensors in the operator definition must be accounted for in the dict. The gradient may be a Tensor or None. For a detailed guide on custom ops, please see",
    "type": "function",
    "file_path": "pytorch\\torch\\_custom_ops.py",
    "ast_data": "FunctionDef name:impl_backward arg:qualname arg:output_differentiability arguments arg arg arg FunctionDef name:inner arg:func arguments arg Assign Call Call Call Return return:yes If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "DefaultState",
    "source_code": "class DefaultState:\n    __slots__ = ['process_group', 'world_size', 'gradient_predivide_factor', 'gradient_postdivide_factor']\n\n    def __init__(self, process_group: dist.ProcessGroup):\n        if process_group is None:\n            raise ValueError(f'Expected to pass in an explicit ProcessGroup to {self}.')\n        self.process_group = process_group\n        self.world_size = dist.get_world_size(process_group)\n        self.gradient_predivide_factor = self._get_gradient_predivide_factor(self.world_size)\n        self.gradient_postdivide_factor = self.world_size / self.gradient_predivide_factor\n\n    @staticmethod\n    def _get_gradient_predivide_factor(world_size: int) -> float:\n        factor: int = 1\n        while world_size % factor == 0 and world_size / factor > factor:\n            factor *= 2\n        return float(factor)",
    "docstring": "Stores state needed to perform the default communication algorithm within a communication hook. Args: process_group (ProcessGroup): The process group to be used.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\_comm_hooks\\default_hooks.py",
    "ast_data": "ClassDef name:DefaultState Assign FunctionDef name:__init__ arg:self arg:process_group arguments arg arg If Compare Raise Call Assign Assign Call Assign Call Assign FunctionDef name:_get_gradient_predivide_factor arg:world_size arguments arg While BoolOp Compare Compare Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_converter",
    "source_code": "def get_converter(self):\n    return self._converter",
    "docstring": "Get the unit converter for axis. Returns ------- or None",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:get_converter arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "assert_type_v2",
    "source_code": "@tf_export('debugging.assert_type', v1=[])\n@dispatch.add_dispatch_support\ndef assert_type_v2(tensor, tf_type, message=None, name=None):\n    assert_type(tensor=tensor, tf_type=tf_type, message=message, name=name)",
    "docstring": "Asserts that the given is of the specified type. This can always be checked statically, so this method returns nothing. Example: >>> a = tf.Variable(1.0) >>> tf.debugging.assert_type(a, tf_type= tf.float32) >>> b = tf.constant(21) >>> tf.debugging.assert_type(b, tf_type=tf.bool) Traceback (most recent call last): ... TypeError: ... >>> c = tf.SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], ... dense_shape=[3, 4]) >>> tf.debugging.assert_type(c, tf_type= tf.int32) Args: tensor: A , or . tf_type: A tensorflow type (, , , etc). message: A string to prefix to the default message. name: A name for this operation. Defaults to \"assert_type\" Raises: TypeError: If the tensor's data type doesn't match .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\check_ops.py",
    "ast_data": "FunctionDef name:assert_type_v2 arg:tensor arg:tf_type arg:message arg:name arguments arg arg arg arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "reset_mod_stats",
    "source_code": "def reset_mod_stats(self) -> None:\n    self.memory_tracking.clear()",
    "docstring": "Reset all the module memory stats. Clears `` dictionary.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_tools\\mem_tracker.py",
    "ast_data": "FunctionDef name:reset_mod_stats arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_ragged_split",
    "source_code": "def _ragged_split(tensor, pieces):\n    shape = tensor.shape\n    if 1 != len(shape):\n        raise ValueError('input tensor must be 1D')\n    tensor_len = shape.dims[0].value\n    chunk_size = tensor_len // pieces\n    with ops.colocate_with(tensor):\n        if tensor_len != pieces * chunk_size:\n            assert pieces > 1\n            last_chunk_size = tensor_len - (pieces - 1) * chunk_size\n            assert last_chunk_size > 0\n            piece_lens = [chunk_size for _ in range(pieces - 1)] + [last_chunk_size]\n            return array_ops.split(tensor, piece_lens)\n        else:\n            return array_ops.split(tensor, pieces)",
    "docstring": "Like split for 1D tensors but allows case where len % pieces != 0. Args: tensor: that must be 1D. pieces: a positive integer specifying the number of pieces into which tensor should be split. Returns: list of of length pieces, which hold the values of the input tensor, in order. The final tensor may be shorter than the others, which will all be of equal length. Raises: ValueError: input tensor must be 1D.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\all_reduce.py",
    "ast_data": "FunctionDef name:_ragged_split arg:tensor arg:pieces arguments arg arg Assign If Compare Call Raise Call Assign Assign With Call If Compare Compare Assign Compare Assign Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "transform_path_affine",
    "source_code": "def transform_path_affine(self, path):\n    return self.get_affine().transform_path_affine(path)",
    "docstring": "Apply the affine part of this transform to *path*, returning a new . ``.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:transform_path_affine arg:self arg:path arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "AdjustBrightnessAccumulative",
    "source_code": "class AdjustBrightnessAccumulative(Module):\n\n    def __init__(self, brightness_factor: Union[float, Tensor]) -> None:\n        super().__init__()\n        self.brightness_factor: Union[float, Tensor] = brightness_factor\n\n    def forward(self, input: Tensor) -> Tensor:\n        return adjust_brightness_accumulative(input, self.brightness_factor)",
    "docstring": "Adjust Brightness of an image accumulatively. This implementation aligns PIL. Hence, the output is close to TorchVision. The input image is expected to be in the range of [0, 1]. Args: brightness_factor: Brightness adjust factor per element in the batch. 0 does not modify the input image while any other number modify the brightness. Shape: - Input: Image/Input to be adjusted in the shape of :math:. - Output: Adjusted image in the shape of :math:. Example: >>> x = torch.ones(1, 1, 3, 3) >>> AdjustBrightnessAccumulative(1.)(x) tensor([[[[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]]]]) >>> x = torch.ones(2, 5, 3, 3) >>> y = torch.ones(2) >>> AdjustBrightnessAccumulative(y)(x).shape torch.Size([2, 5, 3, 3])",
    "type": "class",
    "file_path": "kornia\\kornia\\enhance\\adjust.py",
    "ast_data": "ClassDef name:AdjustBrightnessAccumulative FunctionDef name:__init__ arg:self arg:brightness_factor arguments arg arg Call Call FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "set_backend_pattern_configs",
    "source_code": "def set_backend_pattern_configs(self, configs: list[BackendPatternConfig]) -> BackendConfig:\n    for conf in configs:\n        self.set_backend_pattern_config(conf)\n    return self",
    "docstring": "Set the configs for patterns that can be run on the target backend. This overrides any existing config for a given pattern if it was previously registered already.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\backend_config.py",
    "ast_data": "FunctionDef name:set_backend_pattern_configs arg:self arg:configs arguments arg arg For Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "gradient",
    "source_code": "def gradient(self, y_true, raw_prediction, sample_weight=None, gradient_out=None, n_threads=1):\n    if gradient_out is None:\n        gradient_out = np.empty_like(raw_prediction)\n    if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1:\n        raw_prediction = raw_prediction.squeeze(1)\n    if gradient_out.ndim == 2 and gradient_out.shape[1] == 1:\n        gradient_out = gradient_out.squeeze(1)\n    self.closs.gradient(y_true=y_true, raw_prediction=raw_prediction, sample_weight=sample_weight, gradient_out=gradient_out, n_threads=n_threads)\n    return gradient_out",
    "docstring": "Compute gradient of loss w.r.t raw_prediction for each input. Parameters ---------- y_true : C-contiguous array of shape (n_samples,) Observed, true target values. raw_prediction : C-contiguous array of shape (n_samples,) or array of shape (n_samples, n_classes) Raw prediction values (in link space). sample_weight : None or C-contiguous array of shape (n_samples,) Sample weights. gradient_out : None or C-contiguous array of shape (n_samples,) or array of shape (n_samples, n_classes) A location into which the result is stored. If None, a new array might be created. n_threads : int, default=1 Might use openmp thread parallelism. Returns ------- gradient : array of shape (n_samples,) or (n_samples, n_classes) Element-wise gradients.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\_loss\\loss.py",
    "ast_data": "FunctionDef name:gradient arg:self arg:y_true arg:raw_prediction arg:sample_weight arg:gradient_out arg:n_threads arguments arg arg arg arg arg arg If Compare Assign Call If BoolOp Compare Compare Assign Call If BoolOp Compare Compare Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "cleanup_makefile",
    "source_code": "def cleanup_makefile():\n    makefile_download_dir = os.path.join(_TF_WORKSPACE_ROOT, 'tensorflow', 'contrib', 'makefile', 'downloads')\n    if os.path.isdir(makefile_download_dir):\n        for root, _, filenames in os.walk(makefile_download_dir):\n            for f in filenames:\n                if f.endswith('BUILD'):\n                    os.remove(os.path.join(root, f))",
    "docstring": "Delete any leftover BUILD files from the Makefile build. These files could interfere with Bazel parsing.",
    "type": "function",
    "file_path": "tensorflow\\configure.py",
    "ast_data": "FunctionDef name:cleanup_makefile arguments Assign Call If Call For Call For If Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "LargeMessageSplitter",
    "source_code": "class LargeMessageSplitter(SplitBasedOnSize):\n    __slots__ = ('size_check',)\n\n    def __init__(self, proto, proto_size, size_check=_GREEDY_SPLIT, **kwargs):\n        self.size_check = size_check\n        super().__init__(proto, proto_size, **kwargs)\n\n    def build_chunks(self) -> int:\n        if self.size_check(self.proto_size):\n            new_proto = type(self._proto)()\n            new_proto.MergeFrom(self._proto)\n            self._proto.Clear()\n            self.add_chunk(new_proto, [])\n            return self.proto_size\n        return 0",
    "docstring": "Splits a message into a separaet chunk if its over a certain size.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\tools\\proto_splitter\\split_graph_def.py",
    "ast_data": "ClassDef name:LargeMessageSplitter Assign FunctionDef name:__init__ arg:self arg:proto arg:proto_size arg:size_check arguments arg arg arg arg arg Assign Call Call FunctionDef name:build_chunks arg:self arguments arg If Call Assign Call Call Call Call Call Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "get_database_version",
    "source_code": "def get_database_version(self):\n    raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a get_database_version() method.')",
    "docstring": "Return a tuple of the database's version.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\base.py",
    "ast_data": "FunctionDef name:get_database_version arg:self arguments arg Raise Call"
  },
  {
    "library": "numpy",
    "name": "_fix_real_abs_gt_1",
    "source_code": "def _fix_real_abs_gt_1(x):\n    x = asarray(x)\n    if any(isreal(x) & (abs(x) > 1)):\n        x = _tocomplex(x)\n    return x",
    "docstring": "Convert to complex if it has real components x_i with abs(x_i)>1. Otherwise, output is just the array version of the input (via asarray). Parameters ---------- x : array_like Returns ------- array Examples -------- >>> import numpy as np >>> np.lib.scimath._fix_real_abs_gt_1([0,1]) array([0, 1]) >>> np.lib.scimath._fix_real_abs_gt_1([0,2]) array([0.+0.j, 2.+0.j])",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_scimath_impl.py",
    "ast_data": "FunctionDef name:_fix_real_abs_gt_1 arg:x arguments arg Assign Call If Call Call Compare Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_x",
    "source_code": "def set_x(self, x):\n    self._x = x\n    self.stale = True",
    "docstring": "Set the *x* position of the text. Parameters ---------- x : float",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:set_x arg:self arg:x arguments arg arg Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "assert_nontrivial_match",
    "source_code": "def assert_nontrivial_match(self):\n    raise AssertionError('No checkpoint specified (save_path=None); nothing is being restored.')",
    "docstring": "Assertion for consistency with . Always fails.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:assert_nontrivial_match arg:self arguments arg Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "def fit(self, X, y, sample_weight=None, **params):\n    return super().fit(X, y, sample_weight=sample_weight, **params)",
    "docstring": "Fit ElasticNet model with coordinate descent. Fit is on grid of alphas and best alpha estimated by cross-validation. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. Pass directly as Fortran-contiguous data to avoid unnecessary memory duplication. If y is mono-output, X can be sparse. Note that large sparse matrices and arrays requiring indices are not accepted. y : array-like of shape (n_samples,) Target values. sample_weight : float or array-like of shape (n_samples,), default=None Sample weights used for fitting and evaluation of the weighted mean squared error of each cv-fold. Note that the cross validated MSE that is finally used to find the best model is the unweighted mean over the (weighted) MSEs of each test fold. **params : dict, default=None Parameters to be passed to the CV splitter. .. versionadded:: 1.4 Only available if , which can be set by using `Metadata Routing User Guide ` for more details. Returns ------- self : object Returns an instance of fitted model.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_coordinate_descent.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "log_abs_determinant",
    "source_code": "def log_abs_determinant(self, name='log_abs_det'):\n    if self.is_square is False:\n        raise NotImplementedError('Determinant not implemented for an operator that is expected to not be square.')\n    with self._name_scope(name):\n        return self._log_abs_determinant()",
    "docstring": "Log absolute value of determinant for every batch member. Args: name: A name for this . Returns: with shape and same as . Raises: NotImplementedError: If is .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "FunctionDef name:log_abs_determinant arg:self arg:name arguments arg arg If Compare Raise Call With Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "f5",
    "source_code": "def f5(x):\n    if x != 1:\n        return 1.0 / (1.0 - x)\n    return 0",
    "docstring": "Hyperbola with a pole at x=1, but pole replaced with 0. Not continuous at root.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_tstutils.py",
    "ast_data": "FunctionDef name:f5 arg:x arguments arg If Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "random_normal",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):\n    if dtype is None:\n        dtype = floatx()\n    if seed is None:\n        seed = np.random.randint(10000000.0)\n    return random_ops.random_normal(shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed)",
    "docstring": "Returns a tensor with normal distribution of values. It is an alias to . Args: shape: A tuple of integers, the shape of tensor to create. mean: A float, the mean value of the normal distribution to draw samples. Default to 0.0. stddev: A float, the standard deviation of the normal distribution to draw samples. Default to 1.0. dtype: , dtype of returned tensor. Default to use Keras backend dtype which is float32. seed: Integer, random seed. Will use a random numpy integer when not specified. Returns: A tensor with normal distribution of values. Example: >>> random_normal_tensor = tf.keras.backend.random_normal(shape=(2,3), ... mean=0.0, stddev=1.0) >>> random_normal_tensor",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:random_normal arg:shape arg:mean arg:stddev arg:dtype arg:seed arguments arg arg arg arg arg If Compare Assign Call If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_find_tensors",
    "source_code": "def _find_tensors(obj):\n    if RPC_AVAILABLE and isinstance(obj, RRef):\n        if obj.is_owner():\n            return _find_tensors(obj.local_value())\n    if isinstance(obj, torch.Tensor):\n        return [obj]\n    if isinstance(obj, (list, tuple)):\n        return itertools.chain.from_iterable(map(_find_tensors, obj))\n    if isinstance(obj, dict):\n        return itertools.chain.from_iterable(map(_find_tensors, obj.values()))\n    if is_dataclass(obj):\n        return itertools.chain.from_iterable(map(_find_tensors, (getattr(obj, f.name) for f in fields(obj))))\n    return []",
    "docstring": "Recursively find all tensors contained in the specified object.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\parallel\\distributed.py",
    "ast_data": "FunctionDef name:_find_tensors arg:obj arguments arg If BoolOp Call If Call Return return:yes Call Call If Call Return return:yes If Call Return return:yes Call Call If Call Return return:yes Call Call Call If Call Return return:yes Call Call Call Call Return return:no"
  },
  {
    "library": "pygame",
    "name": "get_all",
    "source_code": "def get_all(mod: Any):\n    if hasattr(mod, '__all__') and isinstance(mod.__all__, list):\n        return sorted({str(i) for i in mod.__all__})\n    return [i for i in dir(mod) if not i.startswith('_')]",
    "docstring": "Get the attributes that are imported from 'mod' when 'from mod import *' First try to use '__all__' if it is defined, else fallback to 'dir'",
    "type": "function",
    "file_path": "pygame\\buildconfig\\stubs\\gen_stubs.py",
    "ast_data": "FunctionDef name:get_all arg:mod arguments arg If BoolOp Call Call Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "squash_mask",
    "source_code": "def squash_mask(self, *args, leave_parametrized=True, names=None, **kwargs):\n    if names is None:\n        names = list(self.data_groups.keys())\n    for name in names:\n        parametrize.remove_parametrizations(self._container, name, leave_parametrized=leave_parametrized)",
    "docstring": "Squashes the sparse masks into the appropriate tensors. Also, accepts list of strings to squash mask for. If none, squashes mask for all the keys kwargs: * names: list of strings to squash mask for * sparsified: if true - applies the mask before squashing if false - does not apply the mask before squashing",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\data_sparsifier\\base_data_sparsifier.py",
    "ast_data": "FunctionDef name:squash_mask arg:self arguments arg arg arg arg arg If Compare Assign Call Call For Call"
  },
  {
    "library": "cherrypy",
    "name": "from_str",
    "source_code": "@classmethod\ndef from_str(cls, elementstr):\n    ival, params = cls.parse(elementstr)\n    return cls(ival, params)",
    "docstring": "Construct an instance from a string of the form 'token;key=val'.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\httputil.py",
    "ast_data": "FunctionDef name:from_str arg:cls arg:elementstr arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_transform",
    "source_code": "def get_transform(self):\n    return self._transform",
    "docstring": "Return the associated with this scale.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\scale.py",
    "ast_data": "FunctionDef name:get_transform arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_eager_value",
    "source_code": "def _eager_value(self):\n    value = self.flat_values.numpy()\n    for row_splits in reversed(self.nested_row_splits):\n        value = ragged_tensor_value.RaggedTensorValue(value, row_splits.numpy())\n    return value",
    "docstring": "Returns a RaggedTensorValue for self. Requires self._is_eager()=true.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:_eager_value arg:self arguments arg Assign Call For Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_TensorIterator",
    "source_code": "class _TensorIterator(object):\n    __slots__ = ['_tensor', '_index', '_limit']\n\n    def __init__(self, tensor, dim0):\n        self._tensor = tensor\n        self._index = 0\n        self._limit = dim0\n\n    def __iter__(self):\n        return self\n\n    def __next__(self):\n        if self._index == self._limit:\n            raise StopIteration\n        result = self._tensor[self._index]\n        self._index += 1\n        return result\n    next = __next__",
    "docstring": "Iterates over the leading dim of a Tensor. Performs no error checks.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor.py",
    "ast_data": "ClassDef name:_TensorIterator Assign FunctionDef name:__init__ arg:self arg:tensor arg:dim0 arguments arg arg arg Assign Assign Assign FunctionDef name:__iter__ arg:self arguments arg Return return:yes FunctionDef name:__next__ arg:self arguments arg If Compare Raise Assign Return return:yes Assign"
  },
  {
    "library": "scipy",
    "name": "time_convex_hull",
    "source_code": "def time_convex_hull(self, num_points, incremental):\n    ConvexHull(self.points, incremental)",
    "docstring": "Time scipy.spatial.ConvexHull over a range of input data sizes and settings.",
    "type": "method",
    "file_path": "scipy\\benchmarks\\benchmarks\\spatial.py",
    "ast_data": "FunctionDef name:time_convex_hull arg:self arg:num_points arg:incremental arguments arg arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "checkpoint_wrapper",
    "source_code": "def checkpoint_wrapper(module: torch.nn.Module, checkpoint_impl: CheckpointImpl=CheckpointImpl.NO_REENTRANT, checkpoint_fn=None, **checkpoint_fn_kwargs) -> torch.nn.Module:\n    if checkpoint_impl == CheckpointImpl.REENTRANT:\n        warnings.warn(f'Please specify {CheckpointImpl.NO_REENTRANT} as {CheckpointImpl.REENTRANT} will soon be removed as the default and eventually deprecated.', FutureWarning, stacklevel=2)\n    return CheckpointWrapper(module, checkpoint_impl, checkpoint_fn, **checkpoint_fn_kwargs)",
    "docstring": "Wrap a module for activation checkpointing. If the module is wrapped with this function, all subsequent calls to the module will, automatically perform checkpointing without the user having to explicitly call `. checkpoint_fn (Optional[Callable]): Functional checkpoint implementation to use. If this is specified, it will be used over the default `checkpoint_implcheckpoint_fn`. Returns: (nn.Module): Wrapped module",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\_checkpoint\\checkpoint_wrapper.py",
    "ast_data": "FunctionDef name:checkpoint_wrapper arg:module arg:checkpoint_impl arg:checkpoint_fn arguments arg arg arg arg If Compare Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "GenericViewError",
    "source_code": "class GenericViewError(Exception):\n    pass",
    "docstring": "A problem in a generic view.",
    "type": "class",
    "file_path": "django\\django\\views\\generic\\__init__.py",
    "ast_data": "ClassDef name:GenericViewError"
  },
  {
    "library": "pytorch",
    "name": "register_pytree_node",
    "source_code": "def register_pytree_node(cls: type[Any], flatten_fn: FlattenFunc, unflatten_fn: UnflattenFunc, *, serialized_type_name: Optional[str]=None, to_dumpable_context: Optional[ToDumpableContextFn]=None, from_dumpable_context: Optional[FromDumpableContextFn]=None, flatten_with_keys_fn: Optional[FlattenWithKeysFunc]=None) -> None:\n    if flatten_with_keys_fn is not None:\n        raise NotImplementedError('KeyPaths are not yet supported in cxx_pytree.')\n    _private_register_pytree_node(cls, flatten_fn, unflatten_fn, serialized_type_name=serialized_type_name, to_dumpable_context=to_dumpable_context, from_dumpable_context=from_dumpable_context)\n    python_pytree._private_register_pytree_node(cls, flatten_fn, unflatten_fn, serialized_type_name=serialized_type_name, to_dumpable_context=to_dumpable_context, from_dumpable_context=from_dumpable_context)",
    "docstring": "Register a container-like type as pytree node. Args: cls (type): A Python type to treat as an internal pytree node. flatten_fn (callable): A function to be used during flattening, taking an instance of `torch.exporttorch.export` right now. Example:: >>> # xdoctest: +SKIP >>> # Registry a Python type with lambda functions >>> register_pytree_node( ... set, ... lambda s: (sorted(s), None, None), ... lambda children, _: set(children), ... )",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_cxx_pytree.py",
    "ast_data": "FunctionDef name:register_pytree_node arg:cls arg:flatten_fn arg:unflatten_fn arguments arg arg arg arg arg arg arg If Compare Raise Call Call Call"
  },
  {
    "library": "scipy",
    "name": "Alpine02",
    "source_code": "class Alpine02(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([0.0] * self.N, [10.0] * self.N))\n        self.global_optimum = [[7.91705268, 4.81584232]]\n        self.fglob = -6.1295\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return prod(sqrt(x) * sin(x))",
    "docstring": "Alpine02 objective function. The Alpine02 [1]_ global optimization problem is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Alpine02}(x) = \\prod_{i=1}^{n} \\sqrt{x_i} \\sin(x_i) Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO: eqn 7 in [1]_ has the wrong global minimum value.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_A.py",
    "ast_data": "ClassDef name:Alpine02 Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "kornia",
    "name": "psnr_loss",
    "source_code": "def psnr_loss(image: torch.Tensor, target: torch.Tensor, max_val: float) -> torch.Tensor:\n    return -1.0 * metrics.psnr(image, target, max_val)",
    "docstring": "Compute the PSNR loss. The loss is computed as follows: .. math:: \\text{loss} = -\\text{psnr(x, y)} See :meth: for details abut PSNR. Args: image: the input image with shape :math:. target : the labels image with shape :math:. max_val: The maximum value in the image tensor. Return: the computed loss as a scalar. Examples: >>> ones = torch.ones(1) >>> psnr_loss(ones, 1.2 * ones, 2.) # 10 * log(4/((1.2-1)**2)) / log(10) tensor(-20.0000)",
    "type": "function",
    "file_path": "kornia\\kornia\\losses\\psnr.py",
    "ast_data": "FunctionDef name:psnr_loss arg:image arg:target arg:max_val arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_handle_or_self",
    "source_code": "def _handle_or_self(x):\n    if resource_variable_ops.is_resource_variable(x):\n        return x.handle\n    return x",
    "docstring": "Unwrap resource variable/ndarray to return tensors.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\backprop.py",
    "ast_data": "FunctionDef name:_handle_or_self arg:x arguments arg If Call Return return:yes Return return:yes"
  },
  {
    "library": "scipy",
    "name": "get_distribution_names",
    "source_code": "def get_distribution_names(namespace_pairs, rv_base_class):\n    distn_names = []\n    distn_gen_names = []\n    for name, value in namespace_pairs:\n        if name.startswith('_'):\n            continue\n        if name.endswith('_gen') and issubclass(value, rv_base_class):\n            distn_gen_names.append(name)\n        if isinstance(value, rv_base_class):\n            distn_names.append(name)\n    return (distn_names, distn_gen_names)",
    "docstring": "Collect names of statistical distributions and their generators. Parameters ---------- namespace_pairs : sequence A snapshot of (name, value) pairs in the namespace of a module. rv_base_class : class The base class of random variable generator classes in a module. Returns ------- distn_names : list of strings Names of the statistical distributions. distn_gen_names : list of strings Names of the generators of the statistical distributions. Note that these are not simply the names of the statistical distributions, with a _gen suffix added.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:get_distribution_names arg:namespace_pairs arg:rv_base_class arguments arg arg Assign Assign For If Call If BoolOp Call Call Call If Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "count_up_to",
    "source_code": "@deprecated(None, 'Prefer Dataset.range instead.')\ndef count_up_to(self, limit):\n    raise NotImplementedError",
    "docstring": "Increments this variable until it reaches . When that Op is run it tries to increment the variable by . If incrementing the variable would bring it above then the Op raises the exception . If no error is raised, the Op outputs the value of the variable before the increment. This is essentially a shortcut for . Args: limit: value at which incrementing the variable raises an error. Returns: A that will hold the variable value before the increment. If no other Op modifies this variable, the values produced will all be distinct.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:count_up_to arg:self arg:limit arguments arg arg Raise Call"
  },
  {
    "library": "django",
    "name": "mark_expected_failures_and_skips",
    "source_code": "def mark_expected_failures_and_skips(self):\n    from unittest import expectedFailure, skip\n    for test_name in self.connection.features.django_test_expected_failures:\n        test_case_name, _, test_method_name = test_name.rpartition('.')\n        test_app = test_name.split('.')[0]\n        if test_app in settings.INSTALLED_APPS:\n            test_case = import_string(test_case_name)\n            test_method = getattr(test_case, test_method_name)\n            setattr(test_case, test_method_name, expectedFailure(test_method))\n    for reason, tests in self.connection.features.django_test_skips.items():\n        for test_name in tests:\n            test_case_name, _, test_method_name = test_name.rpartition('.')\n            test_app = test_name.split('.')[0]\n            if test_app in settings.INSTALLED_APPS:\n                test_case = import_string(test_case_name)\n                test_method = getattr(test_case, test_method_name)\n                setattr(test_case, test_method_name, skip(reason)(test_method))",
    "docstring": "Mark tests in Django's test suite which are expected failures on this database and test which should be skipped on this database.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\creation.py",
    "ast_data": "FunctionDef name:mark_expected_failures_and_skips arg:self arguments arg For Assign Call Assign Call If Compare Assign Call Assign Call Call Call For Call For Assign Call Assign Call If Compare Assign Call Assign Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "polynomial_coefficients",
    "source_code": "@property\ndef polynomial_coefficients(self):\n    n = self.degree\n    if n > 10:\n        warnings.warn('Polynomial coefficients formula unstable for high order Bezier curves!', RuntimeWarning)\n    P = self.control_points\n    j = np.arange(n + 1)[:, None]\n    i = np.arange(n + 1)[None, :]\n    prefactor = (-1) ** (i + j) * _comb(j, i)\n    return _comb(n, j) * prefactor @ P",
    "docstring": "The polynomial coefficients of the Bézier curve. .. warning:: Follows opposite convention from . Returns ------- (n+1, d) array Coefficients after expanding in polynomial basis, where :math: is the degree of the Bézier curve and :math: its dimension. These are the numbers (:math:) such that the curve can be written :math:. Notes ----- The coefficients are calculated as .. math:: {n \\choose j} \\sum_{i=0}^j (-1)^{i+j} {j \\choose i} P_i where :math: are the control points of the curve.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\bezier.py",
    "ast_data": "FunctionDef name:polynomial_coefficients arg:self arguments arg Assign If Compare Call Assign Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "outgoing_edges",
    "source_code": "@property\ndef outgoing_edges(self):\n    return self._outgoing_edges",
    "docstring": "The list of edges starting at this Convertible.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "FunctionDef name:outgoing_edges arg:self arguments arg Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "send_catch_log_async",
    "source_code": "async def send_catch_log_async(signal: TypingAny=Any, sender: TypingAny=Anonymous, *arguments: TypingAny, **named: TypingAny) -> list[tuple[TypingAny, TypingAny]]:\n    return await maybe_deferred_to_future(send_catch_log_deferred(signal, sender, *arguments, **named))",
    "docstring": "Like :func: but supports :ref:. Returns a coroutine that completes once all signal handlers have finished.",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\signal.py",
    "ast_data": "AsyncFunctionDef name:send_catch_log_async arg:signal arg:sender arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "make_union",
    "source_code": "def make_union(*transformers, n_jobs=None, verbose=False, verbose_feature_names_out=True):\n    return FeatureUnion(_name_estimators(transformers), n_jobs=n_jobs, verbose=verbose, verbose_feature_names_out=verbose_feature_names_out)",
    "docstring": "Construct a :class: from the given transformers. This is a shorthand for the :class: constructor; it does not require, and does not permit, naming the transformers. Instead, they will be given names automatically based on their types. It also does not allow weighting. Parameters ---------- *transformers : list of estimators One or more estimators. n_jobs : int, default=None Number of jobs to run in parallel. `joblib.parallel_backendGlossary n_jobsget_feature_names_outFeatureUnion` object for concatenating the results of multiple transformer objects. See Also -------- FeatureUnion : Class for concatenating the results of multiple transformer objects. Examples -------- >>> from sklearn.decomposition import PCA, TruncatedSVD >>> from sklearn.pipeline import make_union >>> make_union(PCA(), TruncatedSVD()) FeatureUnion(transformer_list=[('pca', PCA()), ('truncatedsvd', TruncatedSVD())])",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\pipeline.py",
    "ast_data": "FunctionDef name:make_union arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, matrix, is_non_singular=None, is_self_adjoint=None, is_positive_definite=None, is_square=None, name='LinearOperatorFullMatrix'):\n    parameters = dict(matrix=matrix, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, name=name)\n    with ops.name_scope(name, values=[matrix]):\n        self._matrix = linear_operator_util.convert_nonref_to_tensor(matrix, name='matrix')\n        self._check_matrix(self._matrix)\n        super(LinearOperatorFullMatrix, self).__init__(dtype=self._matrix.dtype, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, parameters=parameters, name=name)",
    "docstring": "Initialize a . Args: matrix: Shape with , . Allowed dtypes: , , , , . is_non_singular: Expect that this operator is non-singular. is_self_adjoint: Expect that this operator is equal to its hermitian transpose. is_positive_definite: Expect that this operator is positive definite, meaning the quadratic form has positive real part for all nonzero . Note that we do not require the operator to be self-adjoint to be positive-definite. See: is_square: Expect that this operator acts like square [batch] matrices. name: A name for this . Raises: TypeError: If is not an allowed type.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_full_matrix.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:matrix arg:is_non_singular arg:is_self_adjoint arg:is_positive_definite arg:is_square arg:name arguments arg arg arg arg arg arg arg Assign Call With Call Assign Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "to_tf",
    "source_code": "def to_tf(self):\n    return copy.deepcopy(self)",
    "docstring": "Return a copy of the current system. Returns ------- sys : instance of The current system (copy)",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:to_tf arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "ifchanged",
    "source_code": "@register.tag\ndef ifchanged(parser, token):\n    bits = token.split_contents()\n    nodelist_true = parser.parse(('else', 'endifchanged'))\n    token = parser.next_token()\n    if token.contents == 'else':\n        nodelist_false = parser.parse(('endifchanged',))\n        parser.delete_first_token()\n    else:\n        nodelist_false = NodeList()\n    values = [parser.compile_filter(bit) for bit in bits[1:]]\n    return IfChangedNode(nodelist_true, nodelist_false, *values)",
    "docstring": "Check if a value has changed from the last iteration of a loop. The `` block tag is used within a loop. It has two possible uses. 1. Check its own rendered contents against its previous state and only displays the content if it has changed. For example, this displays a list of days, only displaying the month if it changes:: Archive for {{ year }} {% for date in days %} {% ifchanged %}{{ date|date:\"F\" }}{% endifchanged %} {{ date|date:\"j\" }} {% endfor %} 2. If given one or more variables, check whether any variable has changed. For example, the following shows the date every time it changes, while showing the hour if either the hour or the date has changed:: {% for date in days %} {% ifchanged date.date %} {{ date.date }} {% endifchanged %} {% ifchanged date.hour date.date %} {{ date.hour }} {% endifchanged %} {% endfor %}",
    "type": "function",
    "file_path": "django\\django\\template\\defaulttags.py",
    "ast_data": "FunctionDef name:ifchanged arg:parser arg:token arguments arg arg Assign Call Assign Call Assign Call If Compare Assign Call Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X):\n    check_is_fitted(self)\n    if np.isclose(self.class_prior_, 1 / len(self.classes_)).all():\n        ensure_all_finite = 'allow-nan' if get_tags(self).input_tags.allow_nan else True\n        X = validate_data(self, X, ensure_all_finite=ensure_all_finite, accept_sparse='csr', reset=False)\n        return self.classes_[pairwise_distances_argmin(X, self.centroids_, metric=self.metric)]\n    else:\n        return super().predict(X)",
    "docstring": "Perform classification on an array of test vectors . The predicted class for each sample in is returned. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Input data. Returns ------- y_pred : ndarray of shape (n_samples,) The predicted classes.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neighbors\\_nearest_centroid.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Call If Call Call Call Assign Call Assign Call Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "parents",
    "source_code": "@property\ndef parents(self):\n    return [self.categorical_column, self.weight_feature_key]",
    "docstring": "See 'FeatureColumn` base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:parents arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_generic_tuple",
    "source_code": "def is_generic_tuple(tp):\n    return tp not in (tuple, typing.Tuple) and getattr(tp, '__origin__', None) in (tuple, typing.Tuple)",
    "docstring": "Returns true if is a parameterized typing.Tuple value.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\type_annotations.py",
    "ast_data": "FunctionDef name:is_generic_tuple arg:tp arguments arg Return return:yes BoolOp Compare Compare Call"
  },
  {
    "library": "pytorch",
    "name": "is_compiling",
    "source_code": "def is_compiling() -> bool:\n    warnings.warn('`torch._utils.is_compiling` is deprecated. Use `torch.compiler.is_compiling` instead.', stacklevel=2)\n    return torch.compiler.is_compiling()",
    "docstring": "Indicates whether we are tracing/compiling with torch.compile() or torch.export().",
    "type": "function",
    "file_path": "pytorch\\torch\\_utils.py",
    "ast_data": "FunctionDef name:is_compiling arguments Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_to_tensor_shape",
    "source_code": "def _to_tensor_shape(self) -> tensor_shape.TensorShape:\n    lengths = self.static_lengths(ragged_lengths=False)\n    if not lengths:\n        return tensor_shape.TensorShape(())\n    if lengths[-1] == Ellipsis:\n        return tensor_shape.TensorShape(None)\n    return tensor_shape.TensorShape(lengths)",
    "docstring": "Returns a TensorShape representation of the shape.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:_to_tensor_shape arg:self arguments arg Assign Call If Return return:yes Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "is_distributed_variable",
    "source_code": "def is_distributed_variable(v):\n    return isinstance(v, values_lib.DistributedValues) and isinstance(v, variables.Variable)",
    "docstring": "Returns whether is a distributed variable.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_training_utils.py",
    "ast_data": "FunctionDef name:is_distributed_variable arg:v arguments arg Return return:yes BoolOp Call Call"
  },
  {
    "library": "tensorflow",
    "name": "last_step_outputs",
    "source_code": "@property\ndef last_step_outputs(self):\n    return self._last_step_outputs",
    "docstring": "A dictionary consisting of outputs to be captured on last step. Keys in the dictionary are names of tensors to be captured, as specified when is called. Values in the dictionary are the tensors themselves. If was called with a for this output, then the value is the reduced value. Returns: A dictionary with last step outputs.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py",
    "ast_data": "FunctionDef name:last_step_outputs arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "assign",
    "source_code": "def assign(self, value, use_locking=False, name=None, read_value=True):\n    assign = state_ops.assign(self._variable, value, use_locking=use_locking, name=name)\n    if read_value:\n        return assign\n    return assign.op",
    "docstring": "Assigns a new value to the variable. This is essentially a shortcut for . Args: value: A . The new value for this variable. use_locking: If , use locking during the assignment. name: The name of the operation to be created read_value: if True, will return something which evaluates to the new value of the variable; if False will return the assign op. Returns: A that will hold the new value of this variable after the assignment has completed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py",
    "ast_data": "FunctionDef name:assign arg:self arg:value arg:use_locking arg:name arg:read_value arguments arg arg arg arg arg Assign Call If Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_wrap",
    "source_code": "def get_wrap(self):\n    return self._wrap",
    "docstring": "Return whether the text can be wrapped.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:get_wrap arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_text",
    "source_code": "@staticmethod\ndef _text(value):\n    if isinstance(value, bytes):\n        value = value.decode(encoding='utf-8')\n    elif not isinstance(value, str):\n        value = str(value)\n    return value",
    "docstring": "Convert text values into utf-8 or ascii strings.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\category.py",
    "ast_data": "FunctionDef name:_text arg:value arguments arg If Call Assign Call If Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "load_pointcloud_ply",
    "source_code": "def load_pointcloud_ply(filename: str, header_size: int=8) -> torch.Tensor:\n    if not isinstance(filename, str) and filename[-3:] == '.ply':\n        raise TypeError(f'Input filename must be a string in with the .ply  extension. Got {filename}')\n    if not os.path.isfile(filename):\n        raise ValueError('Input filename is not an existing file.')\n    if not (isinstance(header_size, int) and header_size > 0):\n        raise TypeError(f'Input header_size must be a positive integer. Got {header_size}.')\n    with open(filename) as f:\n        points = []\n        lines = f.readlines()[header_size:]\n        for line in lines:\n            x_str, y_str, z_str = line.split()\n            points.append((torch.tensor(float(x_str)), torch.tensor(float(y_str)), torch.tensor(float(z_str))))\n        pointcloud: torch.Tensor = torch.tensor(points)\n        return pointcloud",
    "docstring": "Load from disk a pointcloud in PLY format. Args: filename: the path to the pointcloud. header_size: the size of the ply file header that will be skipped during loading. Return: tensor containing the loaded point with shape :math: where :math: represents the number of points.",
    "type": "function",
    "file_path": "kornia\\kornia\\utils\\pointcloud_io.py",
    "ast_data": "FunctionDef name:load_pointcloud_ply arg:filename arg:header_size arguments arg arg If BoolOp Call Compare Raise Call If Call Raise Call If BoolOp Call Compare Raise Call With Call Assign Assign Call For Assign Call Call Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_create_grad_send_info",
    "source_code": "def _create_grad_send_info(self, args_recv_info: tuple) -> list[Optional[int]]:\n    grad_send_info: list[Optional[int]] = []\n\n    def map_recv_to_send(a):\n        if isinstance(a, _RecvInfo):\n            grad_send_info.append(a.source)\n            return a.source\n        else:\n            grad_send_info.append(None)\n            return None\n    map_aggregate(args_recv_info, map_recv_to_send)\n    logger.debug('%s Grad send info: %s', self.log_prefix, grad_send_info)\n    return grad_send_info",
    "docstring": "Create a list of stage indices to send gradients to.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py",
    "ast_data": "FunctionDef name:_create_grad_send_info arg:self arg:args_recv_info arguments arg arg FunctionDef name:map_recv_to_send arg:a arguments arg If Call Call Return return:yes Call Return return:no Call Call Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "private_bytes_raw",
    "source_code": "@abc.abstractmethod\ndef private_bytes_raw(self) -> bytes:\n    pass",
    "docstring": "The raw bytes of the private key. Equivalent to private_bytes(Raw, Raw, NoEncryption()).",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\x25519.py",
    "ast_data": "FunctionDef name:private_bytes_raw arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "_min_matrix_dim",
    "source_code": "def _min_matrix_dim(self):\n    domain_dim = self.domain_dimension.value\n    range_dim = self.range_dimension.value\n    if domain_dim is None or range_dim is None:\n        return None\n    return min(domain_dim, range_dim)",
    "docstring": "Minimum of domain/range dimension, if statically available, else None.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_zeros.py",
    "ast_data": "FunctionDef name:_min_matrix_dim arg:self arguments arg Assign Assign If BoolOp Compare Compare Return return:no Return return:yes Call"
  },
  {
    "library": "django",
    "name": "proj_version_tuple",
    "source_code": "def proj_version_tuple(self):\n    proj_regex = re.compile('(\\\\d+)\\\\.(\\\\d+)\\\\.(\\\\d+)')\n    proj_ver_str = self.postgis_proj_version()\n    m = proj_regex.search(proj_ver_str)\n    if m:\n        return tuple(map(int, m.groups()))\n    else:\n        raise Exception('Could not determine PROJ version from PostGIS.')",
    "docstring": "Return the version of PROJ used by PostGIS as a tuple of the major, minor, and subminor release numbers.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\postgis\\operations.py",
    "ast_data": "FunctionDef name:proj_version_tuple arg:self arguments arg Assign Call Assign Call Assign Call If Return return:yes Call Call Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "get_all_v2_names",
    "source_code": "def get_all_v2_names():\n    v2_names = set()\n\n    def visit(unused_path, unused_parent, children):\n        for child in children:\n            _, attr = tf_decorator.unwrap(child[1])\n            api_names_v2 = tf_export.get_v2_names(attr)\n            for name in api_names_v2:\n                v2_names.add(name)\n    visitor = public_api.PublicAPIVisitor(visit)\n    visitor.do_not_descend_map['tf'].append('contrib')\n    visitor.private_map['tf.compat'] = ['v1', 'v2']\n    traverse.traverse(tf.compat.v2, visitor)\n    return v2_names",
    "docstring": "Get a set of function/class names available in TensorFlow 2.0.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\update\\generate_v2_renames_map.py",
    "ast_data": "FunctionDef name:get_all_v2_names arguments Assign Call FunctionDef name:visit arg:unused_path arg:unused_parent arg:children arguments arg arg arg For Assign Call Assign Call For Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "max_num_shards",
    "source_code": "def max_num_shards(self) -> int:\n    return max((strategy.output_spec.num_shards for strategy in self.strategies))",
    "docstring": "Returns the max number of shards across all placement strategies",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_op_schema.py",
    "ast_data": "FunctionDef name:max_num_shards arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, data=None):\n    self._mapping = OrderedDict()\n    self._counter = itertools.count()\n    if data is not None:\n        self.update(data)",
    "docstring": "Create mapping between unique categorical values and integer ids. Parameters ---------- data : iterable sequence of string values",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\category.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:data arguments arg arg Assign Call Assign Call If Compare Call"
  },
  {
    "library": "pytorch",
    "name": "merge",
    "source_code": "@staticmethod\ndef merge(measurements: Iterable['Measurement']) -> list['Measurement']:\n    grouped_measurements: collections.defaultdict[TaskSpec, list[Measurement]] = collections.defaultdict(list)\n    for m in measurements:\n        grouped_measurements[m.task_spec].append(m)\n\n    def merge_group(task_spec: TaskSpec, group: list['Measurement']) -> 'Measurement':\n        times: list[float] = []\n        for m in group:\n            times.extend(m.times)\n        return Measurement(number_per_run=1, raw_times=times, task_spec=task_spec, metadata=None)\n    return [merge_group(t, g) for t, g in grouped_measurements.items()]",
    "docstring": "Convenience method for merging replicates. Merge will extrapolate times to and will not transfer any metadata. (Since it might differ between replicates)",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\benchmark\\utils\\common.py",
    "ast_data": "FunctionDef name:merge arg:measurements arguments arg Call For Call FunctionDef name:merge_group arg:task_spec arg:group arguments arg arg For Call Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "Hardswish",
    "source_code": "class Hardswish(torch.nn.Hardswish):\n\n    def __init__(self, scale, zero_point, device=None, dtype=None):\n        factory_kwargs = {'device': device, 'dtype': dtype}\n        super().__init__()\n        self.register_buffer('scale', torch.tensor(scale, **factory_kwargs))\n        self.register_buffer('zero_point', torch.tensor(zero_point, **factory_kwargs))\n\n    def forward(self, input):\n        return torch.ops.quantized.hardswish(input, self.scale, self.zero_point)\n\n    def _get_name(self):\n        return 'QuantizedHardswish'\n\n    @staticmethod\n    def from_float(mod, use_precomputed_fake_quant=False):\n        scale, zero_point = mod.activation_post_process.calculate_qparams()\n        return Hardswish(float(scale), int(zero_point))\n\n    @classmethod\n    def from_reference(cls, mod, scale, zero_point):\n        return cls(float(scale), int(zero_point))",
    "docstring": "This is the quantized version of :class:. Args: scale: quantization scale of the output tensor zero_point: quantization zero point of the output tensor",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\modules\\activation.py",
    "ast_data": "ClassDef name:Hardswish FunctionDef name:__init__ arg:self arg:scale arg:zero_point arg:device arg:dtype arguments arg arg arg arg arg Assign Call Call Call Call Call Call FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:_get_name arg:self arguments arg Return return:yes FunctionDef name:from_float arg:mod arg:use_precomputed_fake_quant arguments arg arg Assign Call Return return:yes Call Call Call FunctionDef name:from_reference arg:cls arg:mod arg:scale arg:zero_point arguments arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "parse_example_spec",
    "source_code": "@property\ndef parse_example_spec(self):\n    return {self.key: parsing_ops.VarLenFeature(self.dtype)}",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:parse_example_spec arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, bus, umask=None, uid=None, gid=None):\n    SimplePlugin.__init__(self, bus)\n    self.finalized = False\n    self.uid = uid\n    self.gid = gid\n    self.umask = umask",
    "docstring": "Initialize the privilege dropping plugin.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\plugins.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:bus arg:umask arg:uid arg:gid arguments arg arg arg arg arg Call Assign Assign Assign Assign"
  },
  {
    "library": "virtualenv",
    "name": "PyPy3Posix",
    "source_code": "class PyPy3Posix(PyPy3, PosixSupports):\n\n    @classmethod\n    def _shared_libs(cls, python_dir):\n        return python_dir.glob('libpypy3*.*')\n\n    def to_lib(self, src):\n        return self.dest / 'lib' / src.name\n\n    @classmethod\n    def sources(cls, interpreter):\n        yield from super().sources(interpreter)\n        if interpreter.system_prefix == '/usr':\n            return\n        host_lib = Path(interpreter.system_prefix) / 'lib'\n        stdlib = Path(interpreter.system_stdlib)\n        if host_lib.exists() and host_lib.is_dir():\n            for path in host_lib.iterdir():\n                if stdlib == path:\n                    continue\n                yield PathRefToDest(path, dest=cls.to_lib)",
    "docstring": "PyPy 3 on POSIX.",
    "type": "class",
    "file_path": "virtualenv\\src\\virtualenv\\create\\via_global_ref\\builtin\\pypy\\pypy3.py",
    "ast_data": "ClassDef name:PyPy3Posix FunctionDef name:_shared_libs arg:cls arg:python_dir arguments arg arg Return return:yes Call FunctionDef name:to_lib arg:self arg:src arguments arg arg Return return:yes FunctionDef name:sources arg:cls arg:interpreter arguments arg arg Call Call If Compare Return return:no Assign Call Assign Call If BoolOp Call Call For Call If Compare Call"
  },
  {
    "library": "pytorch",
    "name": "check_cacheable",
    "source_code": "def check_cacheable(gm: torch.fx.GraphModule):\n    nodes = gm.graph.nodes\n    if torch._inductor.config.freezing:\n        raise BypassAOTAutogradCache('Cannot cache a graph with freezing enabled')\n    if not (torch._inductor.config.fx_graph_cache or should_use_remote_fx_graph_cache()):\n        raise BypassAOTAutogradCache('FX graph cache is not enabled')\n    tracing_context = torch._guards.TracingContext.try_get()\n    if tracing_context and tracing_context.fakify_first_call:\n        raise BypassAOTAutogradCache(\"Won't cache a graph with fakify_first_call enabled\")\n    for node in nodes:\n        check_node_safe(node)\n    if hasattr(gm, 'saved_tensors_hooks_pack_0'):\n        check_cacheable(gm.saved_tensors_hooks_pack_0)\n        check_cacheable(gm.saved_tensors_hooks_unpack_0)",
    "docstring": "Checks that the graph module only uses supported operators",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\autograd_cache.py",
    "ast_data": "FunctionDef name:check_cacheable arg:gm arguments arg Assign If Raise Call If BoolOp Call Raise Call Assign Call If BoolOp Raise Call For Call If Call Call Call"
  },
  {
    "library": "kornia",
    "name": "save",
    "source_code": "def save(self, images: Union[Tensor, list[Tensor]], semantic_masks: Optional[Union[Tensor, list[Tensor]]]=None, directory: Optional[str]=None, output_type: str='torch', colormap: str='random', manual_seed: int=2147) -> None:\n    colored_masks = self.visualize(images, semantic_masks, output_type, colormap=colormap, manual_seed=manual_seed)\n    overlaid: Union[Tensor, list[Tensor]]\n    if isinstance(images, Tensor) and isinstance(colored_masks, Tensor):\n        overlaid = kornia.enhance.add_weighted(images, 0.5, colored_masks, 0.5, 1.0)\n    elif isinstance(images, (list, tuple)) and isinstance(colored_masks, (list, tuple)):\n        overlaid = []\n        for i in range(len(images)):\n            overlaid.append(kornia.enhance.add_weighted(images[i][None], 0.5, colored_masks[i][None], 0.5, 1.0)[0])\n    else:\n        raise ValueError(f'`images` should be a Tensor or a list of Tensors. Got {type(images)}')\n    self._save_outputs(images, directory, suffix='_src')\n    self._save_outputs(colored_masks, directory, suffix='_mask')\n    self._save_outputs(overlaid, directory, suffix='_overlay')",
    "docstring": "Save the segmentation results. Args: images: If list of RGB images. Each image is a Tensor with shape :math:. If Tensor, a Tensor with shape :math:. semantic_masks: If list of segmentation masks. Each mask is a Tensor with shape :math:. If Tensor, a Tensor with shape :math:. directory: The directory to save the results. output_type: The type of output, can be \"torch\" or \"PIL\". colormap: The colormap to use, can be \"random\" or a custom color map. manual_seed: The manual seed to use for the colormap.",
    "type": "method",
    "file_path": "kornia\\kornia\\models\\segmentation\\base.py",
    "ast_data": "FunctionDef name:save arg:self arg:images arg:semantic_masks arg:directory arg:output_type arg:colormap arg:manual_seed arguments arg arg arg arg arg arg arg Assign Call If BoolOp Call Call Assign Call If BoolOp Call Call Assign For Call Call Call Call Raise Call Call Call Call Call"
  },
  {
    "library": "django",
    "name": "__init__",
    "source_code": "def __init__(self, token_type, contents, position=None, lineno=None):\n    self.token_type = token_type\n    self.contents = contents\n    self.lineno = lineno\n    self.position = position",
    "docstring": "A token representing a string from the template. token_type A TokenType, either .TEXT, .VAR, .BLOCK, or .COMMENT. contents The token source string. position An optional tuple containing the start and end index of the token in the template source. This is used for traceback information when debug is on. lineno The line number the token appears on in the template source. This is used for traceback information and gettext files.",
    "type": "method",
    "file_path": "django\\django\\template\\base.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:token_type arg:contents arg:position arg:lineno arguments arg arg arg arg arg Assign Assign Assign Assign"
  },
  {
    "library": "kornia",
    "name": "transform_tensor",
    "source_code": "def transform_tensor(self, input: Tensor, *, shape: Optional[Tensor]=None, match_channel: bool=True) -> Tensor:\n    raise NotImplementedError",
    "docstring": "Standardize input tensors.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\base.py",
    "ast_data": "FunctionDef name:transform_tensor arg:self arg:input arguments arg arg arg arg Raise"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, path: Union[str, os.PathLike], single_file_per_rank: bool=True, sync_files: bool=True, thread_count: int=1, per_thread_copy_ahead: int=10000000, overwrite: bool=True, _extensions: Optional[Sequence[StreamTransformExtension]]=None, serialization_format: SerializationFormat=SerializationFormat.TORCH_SAVE, *args: Any, **kwargs: Any) -> None:\n    super().__init__()\n    self.fs = FileSystem()\n    self.path = self.fs.init_path(path)\n    self.single_file_per_rank = single_file_per_rank\n    self.sync_files = sync_files\n    self.thread_count = thread_count\n    self.per_thread_copy_ahead = per_thread_copy_ahead\n    self.save_id = _generate_uuid()\n    self.overwrite = overwrite\n    self.transforms = _StorageWriterTransforms(_extensions)\n    self.serialization_format = serialization_format",
    "docstring": "Initialize the writer pointing to . Args: path: directory where the checkpoint will be written to. single_file_per_rank: Produce one file per rank instead of one file per tensor/blob. Default to True. sync_files : force files to be synced to permanent storage. Default to True. thread_count: Number of IO threads to use to write. Default to 1. per_thread_copy_ahead: How many bytes to copy from the GPU ahead of saving then. Default 10Mb. overwrite: Whether to allow overwriting existing checkpoints. Defaults to True. _extensions: Extensions to apply to output streams (EXPERIMENTAL) N. B. If sync_files is disabled, there's no guarantee that the checkpoint will be consistent in the case of a failure.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\filesystem.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:path arg:single_file_per_rank arg:sync_files arg:thread_count arg:per_thread_copy_ahead arg:overwrite arg:_extensions arg:serialization_format arguments arg arg arg arg arg arg arg arg arg arg arg Call Call Assign Call Assign Call Assign Assign Assign Assign Assign Call Assign Assign Call Assign"
  },
  {
    "library": "pytorch",
    "name": "to_dot",
    "source_code": "def to_dot(self) -> str:\n    edges = '\\n'.join((f'\"{f}\" -> \"{t}\";' for f, t in self.edges))\n    return f'digraph G {{\\nrankdir = LR;\\nnode [shape=box];\\n{edges}\\n}}\\n'",
    "docstring": "Returns the dot representation of the graph. Returns: A dot representation of the graph.",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\_digraph.py",
    "ast_data": "FunctionDef name:to_dot arg:self arguments arg Assign Call Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "_get_sshstr",
    "source_code": "def _get_sshstr(data: memoryview) -> tuple[memoryview, memoryview]:\n    n, data = _get_u32(data)\n    if n > len(data):\n        raise ValueError('Invalid data')\n    return (data[:n], data[n:])",
    "docstring": "Bytes with u32 length prefix",
    "type": "function",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\serialization\\ssh.py",
    "ast_data": "FunctionDef name:_get_sshstr arg:data arguments arg Assign Call If Compare Call Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_unpartitioned_shape",
    "source_code": "def get_unpartitioned_shape(self, shape):\n    shape = tensor_shape.as_shape(shape)\n    dims = shape.as_list()\n    if self._shard_dimension is None or self._number_of_partitions is None or (not dims):\n        return None\n    if dims[self._shard_dimension] is None:\n        raise ValueError(f'Shape {shape.as_list()} must have a fixed size for dimension {self._shard_dimension} that is known. ')\n    if self._number_of_partitions > 1:\n        dims[self._shard_dimension] *= self._number_of_partitions\n    return tensor_shape.as_shape(dims)",
    "docstring": "Returns the shape of an unpartitioned Tensor. When given the shape of a 'sharded-size' Tensor, returns the shape of the full shape of its unpartitioned Tensor. Args: shape: The shape of the sharded Tensor. Returns: The shape of the unpartitioned version of the Tensor. Raises: ValueError: if shape has unknown sharded dimension",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_sharding.py",
    "ast_data": "FunctionDef name:get_unpartitioned_shape arg:self arg:shape arguments arg arg Assign Call Assign Call If BoolOp Compare Compare Return return:no If Compare Raise Call Call If Compare Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__check_tensor_list",
    "source_code": "def __check_tensor_list(self, tensor_list):\n    expected = self._flat_tensor_specs\n    specs = [type_spec_from_value(t) for t in tensor_list]\n    if len(specs) != len(expected):\n        raise ValueError(f'Cannot create a {self.value_type.__name__} from the tensor list because the TypeSpec expects {len(expected)} items, but the provided tensor list has {len(specs)} items.')\n    for i, (s1, s2) in enumerate(zip(specs, expected)):\n        if not s1.is_compatible_with(s2):\n            raise ValueError(f'Cannot create a {self.value_type.__name__} from the tensor list because item {i} ({tensor_list[i]!r}) is incompatible with the expected TypeSpec {s2}.')",
    "docstring": "Raises an exception if tensor_list incompatible w/ flat_tensor_specs.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py",
    "ast_data": "FunctionDef name:__check_tensor_list arg:self arg:tensor_list arguments arg arg Assign Assign Call If Compare Call Call Raise Call Call Call For Call Call If Call Raise Call"
  },
  {
    "library": "sphinx",
    "name": "DummyApplication",
    "source_code": "class DummyApplication:\n\n    def __init__(self, translator: NullTranslations) -> None:\n        self.config = Config()\n        self.events = _DummyEvents()\n        self.registry = SphinxComponentRegistry()\n        self.messagelog: list[str] = []\n        self.srcdir = _StrPath('/')\n        self.translator = translator\n        self.verbosity = 0\n        self._warncount = 0\n        self._exception_on_warning = False\n        self.config.add('autosummary_context', {}, 'env', ())\n        self.config.add('autosummary_filename_map', {}, 'env', ())\n        self.config.add('autosummary_ignore_module_all', True, 'env', bool)\n\n    def emit_firstresult(self, *args: Any) -> None:\n        pass",
    "docstring": "Dummy Application class for sphinx-autogen command.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\ext\\autosummary\\generate.py",
    "ast_data": "ClassDef name:DummyApplication FunctionDef name:__init__ arg:self arg:translator arguments arg arg Assign Call Assign Call Assign Call Assign Call Assign Assign Assign Assign Call Call Call FunctionDef name:emit_firstresult arg:self arguments arg arg"
  },
  {
    "library": "django",
    "name": "tokenize",
    "source_code": "def tokenize(self):\n    in_tag = False\n    lineno = 1\n    result = []\n    for token_string, position in self._tag_re_split():\n        if token_string:\n            result.append(self.create_token(token_string, position, lineno, in_tag))\n            lineno += token_string.count('\\n')\n        in_tag = not in_tag\n    return result",
    "docstring": "Split a template string into tokens and annotates each token with its start and end position in the source. This is slower than the default lexer so only use it when debug is True.",
    "type": "method",
    "file_path": "django\\django\\template\\base.py",
    "ast_data": "FunctionDef name:tokenize arg:self arguments arg Assign Assign Assign For Call If Call Call Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "IntFromGeom",
    "source_code": "class IntFromGeom(GEOSFuncFactory):\n    argtypes = [GEOM_PTR]\n    restype = c_int\n    errcheck = staticmethod(check_minus_one)",
    "docstring": "Argument is a geometry, return type is an integer.",
    "type": "class",
    "file_path": "django\\django\\contrib\\gis\\geos\\prototypes\\geom.py",
    "ast_data": "ClassDef name:IntFromGeom Assign Assign Assign Call"
  },
  {
    "library": "scrapy",
    "name": "create_crawler",
    "source_code": "def create_crawler(self, crawler_or_spidercls: type[Spider] | str | Crawler) -> Crawler:\n    if isinstance(crawler_or_spidercls, Spider):\n        raise ValueError('The crawler_or_spidercls argument cannot be a spider object, it must be a spider class (or a Crawler object)')\n    if isinstance(crawler_or_spidercls, Crawler):\n        return crawler_or_spidercls\n    return self._create_crawler(crawler_or_spidercls)",
    "docstring": "Return a :class: object. * If `` is a string, this function finds a spider with this name in a Scrapy project (using spider loader), then creates a Crawler instance for it.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\crawler.py",
    "ast_data": "FunctionDef name:create_crawler arg:self arg:crawler_or_spidercls arguments arg arg If Call Raise Call If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n    X = validate_data(self, X, accept_sparse='csr', ensure_min_samples=2)\n    random_state = check_random_state(self.random_state)\n    affinity_matrix = self._get_affinity_matrix(X)\n    self.embedding_ = _spectral_embedding(affinity_matrix, n_components=self.n_components, eigen_solver=self.eigen_solver, eigen_tol=self.eigen_tol, random_state=random_state)\n    return self",
    "docstring": "Fit the model from data in X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vector, where is the number of samples and is the number of features. If affinity is \"precomputed\" X : {array-like, sparse matrix}, shape (n_samples, n_samples), Interpret X as precomputed adjacency graph computed from samples. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns the instance itself.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\manifold\\_spectral_embedding.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_intra_op_parallelism_threads",
    "source_code": "@tf_export('config.threading.get_intra_op_parallelism_threads')\ndef get_intra_op_parallelism_threads():\n    return context.context().intra_op_parallelism_threads",
    "docstring": "Get number of threads used within an individual op for parallelism. Certain operations like matrix multiplication and reductions can utilize parallel threads for speed ups. A value of 0 means the system picks an appropriate number. Returns: Number of parallel threads",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\config.py",
    "ast_data": "FunctionDef name:get_intra_op_parallelism_threads arguments Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "lorem",
    "source_code": "@register.tag\ndef lorem(parser, token):\n    bits = list(token.split_contents())\n    tagname = bits[0]\n    common = bits[-1] != 'random'\n    if not common:\n        bits.pop()\n    if bits[-1] in ('w', 'p', 'b'):\n        method = bits.pop()\n    else:\n        method = 'b'\n    if len(bits) > 1:\n        count = bits.pop()\n    else:\n        count = '1'\n    count = parser.compile_filter(count)\n    if len(bits) != 1:\n        raise TemplateSyntaxError('Incorrect format for %r tag' % tagname)\n    return LoremNode(count, method, common)",
    "docstring": "Create random Latin text useful for providing test data in templates. Usage format:: {% lorem [count] [method] [random] %} ```` outputs two random latin words",
    "type": "function",
    "file_path": "django\\django\\template\\defaulttags.py",
    "ast_data": "FunctionDef name:lorem arg:parser arg:token arguments arg arg Assign Call Call Assign Assign Compare If Call If Compare Assign Call Assign If Compare Call Assign Call Assign Assign Call If Compare Call Raise Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "clean_username",
    "source_code": "def clean_username(self, username):\n    return username",
    "docstring": "Perform any cleaning on the \"username\" prior to using it to get or create the user object. Return the cleaned username. By default, return the username unchanged.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\backends.py",
    "ast_data": "FunctionDef name:clean_username arg:self arg:username arguments arg arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "def forward(self, img: Tensor, mask: Optional[Tensor]=None) -> Tuple[Tensor, Tensor]:\n    KORNIA_CHECK_SHAPE(img, ['1', 'C', 'H', 'W'])\n    responses, lafs = self.detect(img, mask)\n    lafs = self.aff(lafs, img)\n    lafs = self.ori(lafs, img)\n    return (lafs, responses)",
    "docstring": "Three stage local feature detection. First the location and scale of interest points are determined by detect function. Then affine shape and orientation. Args: img: image to extract features with shape [1xCxHxW]. KeyNetDetector does not support batch processing, because the number of detections is different on each image. mask: a mask with weights where to apply the response function. The shape must be the same as the input image. Returns: lafs: shape [1xNx2x3]. Detected local affine frames. responses: shape [1xNx1]. Response function values for corresponding lafs",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\scale_space_detector.py",
    "ast_data": "FunctionDef name:forward arg:self arg:img arg:mask arguments arg arg arg Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_split_by_backtick",
    "source_code": "def _split_by_backtick(s: str) -> list[tuple[bool, str]]:\n    substrings = []\n    substr: list[str] = []\n    i = 0\n    parse_state = ParseState.DEFAULT\n    while i < len(s):\n        char = s[i]\n        match char:\n            case '`':\n                if parse_state == ParseState.DEFAULT:\n                    if substr:\n                        substrings.append((False, ''.join(substr)))\n                    substr = [char]\n                    i += 1\n                    parse_state = ParseState.IN_BACKTICK\n                    continue\n                elif parse_state == ParseState.IN_BACKTICK:\n                    next_char = s[i + 1] if i != len(s) - 1 else None\n                    if next_char == '`':\n                        substr.append(char)\n                        substr.append(next_char)\n                        i += 2\n                        continue\n                    else:\n                        substr.append(char)\n                        substrings.append((True, ''.join(substr)))\n                        substr = []\n                        i += 1\n                        parse_state = ParseState.DEFAULT\n                        continue\n            case \"'\":\n                if parse_state == ParseState.DEFAULT:\n                    parse_state = ParseState.IN_SINGLE_QUOTE\n                elif parse_state == ParseState.IN_SINGLE_QUOTE and s[i - 1] != '\\\\':\n                    parse_state = ParseState.DEFAULT\n            case '\"':\n                if parse_state == ParseState.DEFAULT:\n                    parse_state = ParseState.IN_DOUBLE_QUOTE\n                elif parse_state == ParseState.IN_DOUBLE_QUOTE and s[i - 1] != '\\\\':\n                    parse_state = ParseState.DEFAULT\n        substr.append(char)\n        i += 1\n    if substr:\n        substrings.append((False, ''.join(substr)))\n    return substrings",
    "docstring": "Splits a str into substrings along backtick characters (`). Disregards backticks inside quotes. Parameters ---------- s : str The Python source code string. Returns ------- substrings: list[tuple[bool, str]] List of tuples, where each tuple has two elements: The first is a boolean indicating if the substring is backtick-quoted. The second is the actual substring.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\computation\\parsing.py",
    "ast_data": "FunctionDef name:_split_by_backtick arg:s arguments arg Assign Assign Assign While Compare Call Assign If Compare If Call Call Assign Assign If Compare Assign Compare Call If Compare Call Call Call Call Call Assign Assign If Compare Assign If BoolOp Compare Compare Assign If Compare Assign If BoolOp Compare Compare Assign Call If Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "incr_version",
    "source_code": "def incr_version(self, key, delta=1, version=None):\n    if version is None:\n        version = self.version\n    value = self.get(key, self._missing_key, version=version)\n    if value is self._missing_key:\n        raise ValueError(\"Key '%s' not found\" % key)\n    self.set(key, value, version=version + delta)\n    self.delete(key, version=version)\n    return version + delta",
    "docstring": "Add delta to the cache version for the supplied key. Return the new version.",
    "type": "method",
    "file_path": "django\\django\\core\\cache\\backends\\base.py",
    "ast_data": "FunctionDef name:incr_version arg:self arg:key arg:delta arg:version arguments arg arg arg arg If Compare Assign Assign Call If Compare Raise Call Call Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "get_mapping",
    "source_code": "def get_mapping(self, scale: Scale, data: Series) -> Mapping:\n\n    def identity(x):\n        return x\n    return identity",
    "docstring": "Return a function that maps from data domain to property range.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\properties.py",
    "ast_data": "FunctionDef name:get_mapping arg:self arg:scale arg:data arguments arg arg arg FunctionDef name:identity arg:x arguments arg Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "CommunicationImplementation",
    "source_code": "@tf_export('distribute.experimental.CommunicationImplementation', 'distribute.experimental.CollectiveCommunication')\nclass CommunicationImplementation(enum.Enum):\n    AUTO = 'AUTO'\n    RING = 'RING'\n    NCCL = 'NCCL'",
    "docstring": "Cross device communication implementation. Warning: The alias is deprecated and will be removed in a future version. Use instead. * : Automatically chosen by Tensorflow. * : TensorFlow's ring algorithms for all-reduce and all-gather. * : NVIDIA®'s NCCL library. This is now only used for all-reduce on GPUs; all-reduce on CPU, all-gather and broadcast fallbacks to RING.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\collective_util.py",
    "ast_data": "ClassDef name:CommunicationImplementation Assign Assign Assign Call"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, file=None, **kwargs):\n    self.reset()\n    if file is not None:\n        self.update(file)\n    if kwargs:\n        self.update(kwargs)",
    "docstring": "Initialize a CherryPy :class:.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\reprconf.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:file arguments arg arg arg Call If Compare Call If Call"
  },
  {
    "library": "pytorch",
    "name": "get_embedding_static_quant_module_mappings",
    "source_code": "def get_embedding_static_quant_module_mappings() -> dict[Callable, Any]:\n    mapping = copy.deepcopy(DEFAULT_STATIC_QUANT_MODULE_MAPPINGS)\n    mapping[nnqat.EmbeddingBag] = nnq.EmbeddingBag\n    mapping[nnqat.Embedding] = nnq.Embedding\n    return mapping",
    "docstring": "Get module mapping, including mapping for embedding QAT",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantization_mappings.py",
    "ast_data": "FunctionDef name:get_embedding_static_quant_module_mappings arguments Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "entropy",
    "source_code": "def entropy(self, df, scale):\n    dim, df, scale = self._process_parameters(df, scale)\n    _, log_det_scale = self._cholesky_logdet(scale)\n    return self._entropy(dim, df, log_det_scale)",
    "docstring": "Compute the differential entropy of the Wishart. Parameters ---------- %(_doc_default_callparams)s Returns ------- h : scalar Entropy of the Wishart distribution Notes ----- %(_doc_callparams_note)s",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:entropy arg:self arg:df arg:scale arguments arg arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "freezing_passes",
    "source_code": "def freezing_passes(gm: torch.fx.GraphModule, aot_example_inputs):\n    from ..freezing import constant_fold\n    lazy_init()\n    binary_folding = counters['inductor']['binary_folding']\n    fake_tensor_prop(gm, aot_example_inputs, True)\n    torch._inductor.fx_passes.binary_folding.mark_mixed_dtype_allowed_computation_ops(gm)\n    for _ in range(4):\n        constant_fold(gm)\n        fake_tensor_prop(gm, aot_example_inputs, True)\n        binary_folding_pass.apply(gm.graph)\n        if counters['inductor']['binary_folding'] == binary_folding:\n            break\n        binary_folding = counters['inductor']['binary_folding']\n    torch._inductor.fx_passes.binary_folding.recover_original_precision_folded_computation_ops(gm)\n    constant_fold(gm)\n    fake_tensor_prop(gm, aot_example_inputs, True)\n    for pattern in pass_patterns:\n        pattern.apply(gm.graph)\n    if torch._C._has_mkldnn and config.cpp.weight_prepack and config.layout_optimization:\n        from .mkldnn_fusion import _eliminate_duplicate_packed_nodes\n        _eliminate_duplicate_packed_nodes(gm)\n    stable_topological_sort(gm.graph)\n    gm.recompile()\n    gm.graph.lint()",
    "docstring": "Passes that are applied to the graph to freeze pass.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\freezing_patterns.py",
    "ast_data": "FunctionDef name:freezing_passes arg:gm arg:aot_example_inputs arguments arg arg Call Assign Call Call For Call Call Call Call If Compare Assign Call Call Call For Call If BoolOp Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "create_default_global_load_plan",
    "source_code": "def create_default_global_load_plan(all_plans: list[LoadPlan]) -> list[LoadPlan]:\n    return all_plans",
    "docstring": "Create global load plan used by DefaultLoadPlanner. The default load behavior involved no global coordination and this function currently doesn't change the local plans.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\default_planner.py",
    "ast_data": "FunctionDef name:create_default_global_load_plan arg:all_plans arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "exp",
    "source_code": "@staticmethod\ndef exp(v: Tensor) -> Se3:\n    upsilon = v[..., :3]\n    omega = v[..., 3:]\n    omega_hat = So3.hat(omega)\n    omega_hat_sq = omega_hat @ omega_hat\n    theta = batched_dot_product(omega, omega).sqrt()\n    R = So3.exp(omega)\n    V = eye(3, device=v.device, dtype=v.dtype) + ((1 - theta.cos()) / theta ** 2)[..., None, None] * omega_hat + ((theta - theta.sin()) / theta ** 3)[..., None, None] * omega_hat_sq\n    U = where(theta[..., None] != 0.0, (upsilon[..., None, :] * V).sum(-1), upsilon)\n    return Se3(R, U)",
    "docstring": "Convert elements of lie algebra to elements of lie group. Args: v: vector of shape :math:. Example: >>> v = torch.zeros((1, 6)) >>> s = Se3.exp(v) >>> s.r Parameter containing: tensor([[1., 0., 0., 0.]], requires_grad=True) >>> s.t Parameter containing: tensor([[0., 0., 0.]], requires_grad=True)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\se3.py",
    "ast_data": "FunctionDef name:exp arg:v arguments arg Assign Assign Assign Call Assign Assign Call Call Assign Call Assign Call Call Call Assign Call Compare Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "InclusionAdminNode",
    "source_code": "class InclusionAdminNode(InclusionNode):\n\n    def __init__(self, parser, token, func, template_name, takes_context=True):\n        self.template_name = template_name\n        params, varargs, varkw, defaults, kwonly, kwonly_defaults, _ = getfullargspec(func)\n        bits = token.split_contents()\n        args, kwargs = parse_bits(parser, bits[1:], params, varargs, varkw, defaults, kwonly, kwonly_defaults, takes_context, bits[0])\n        super().__init__(func, takes_context, args, kwargs, filename=None)\n\n    def render(self, context):\n        opts = context['opts']\n        app_label = opts.app_label.lower()\n        object_name = opts.model_name\n        context.render_context[self] = context.template.engine.select_template(['admin/%s/%s/%s' % (app_label, object_name, self.template_name), 'admin/%s/%s' % (app_label, self.template_name), 'admin/%s' % self.template_name])\n        return super().render(context)",
    "docstring": "Template tag that allows its template to be overridden per model, per app, or globally.",
    "type": "class",
    "file_path": "django\\django\\contrib\\admin\\templatetags\\base.py",
    "ast_data": "ClassDef name:InclusionAdminNode FunctionDef name:__init__ arg:self arg:parser arg:token arg:func arg:template_name arg:takes_context arguments arg arg arg arg arg arg Assign Assign Call Assign Call Assign Call Call Call FunctionDef name:render arg:self arg:context arguments arg arg Assign Assign Call Assign Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_clear_attr",
    "source_code": "def _clear_attr(self, attr_name) -> None:\n    with self.graph._c_graph.get() as c_graph:\n        pywrap_tf_session.ClearAttr(c_graph, self._c_op, attr_name)",
    "docstring": "Private method used to clear an attribute in the node_def.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_clear_attr arg:self arg:attr_name arguments arg arg With Call Call"
  },
  {
    "library": "pandas",
    "name": "all",
    "source_code": "def all(self, *args, **kwargs):\n    nv.validate_all(args, kwargs)\n    self._maybe_disable_logical_methods('all')\n    vals = self._values\n    if not isinstance(vals, np.ndarray):\n        return vals._reduce('all')\n    return np.all(vals)",
    "docstring": "Return whether all elements are Truthy. Parameters ---------- *args Required for compatibility with numpy. **kwargs Required for compatibility with numpy. Returns ------- bool or array-like (if axis is specified) A single element array-like may be converted to bool. See Also -------- Index.any : Return whether any element in an Index is True. Series.any : Return whether any element in a Series is True. Series.all : Return whether all elements in a Series are True. Notes ----- Not a Number (NaN), positive infinity and negative infinity evaluate to True because these are not equal to zero. Examples -------- True, because nonzero integers are considered True. >>> pd.Index([1, 2, 3]).all() True False, because `` is considered False. >>> pd.Index([0, 1, 2]).all() False",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:all arg:self arguments arg arg arg Call Call Assign If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "maybe_operate_rowwise",
    "source_code": "def maybe_operate_rowwise(func: F) -> F:\n\n    @functools.wraps(func)\n    def newfunc(values: np.ndarray, *, axis: AxisInt | None=None, **kwargs):\n        if axis == 1 and values.ndim == 2 and values.flags['C_CONTIGUOUS'] and (values.shape[1] / 1000 > values.shape[0]) and (values.dtype != object) and (values.dtype != bool):\n            arrs = list(values)\n            if kwargs.get('mask') is not None:\n                mask = kwargs.pop('mask')\n                results = [func(arrs[i], mask=mask[i], **kwargs) for i in range(len(arrs))]\n            else:\n                results = [func(x, **kwargs) for x in arrs]\n            return np.array(results)\n        return func(values, axis=axis, **kwargs)\n    return cast(F, newfunc)",
    "docstring": "NumPy operations on C-contiguous ndarrays with axis=1 can be very slow if axis 1 >> axis 0. Operate row-by-row and concatenate the results.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\nanops.py",
    "ast_data": "FunctionDef name:maybe_operate_rowwise arg:func arguments arg FunctionDef name:newfunc arg:values arguments arg arg arg If BoolOp Compare Compare Compare Compare Compare Assign Call If Compare Call Assign Call Assign Call Call Call Assign Call Return return:yes Call Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "serialized_tensors_to_saveable_cache",
    "source_code": "def serialized_tensors_to_saveable_cache(serialized_tensors):\n    saveables_cache = object_identity.ObjectIdentityWeakKeyDictionary()\n    for obj, tensor_dict in serialized_tensors.items():\n        if not tensor_dict:\n            continue\n        if isinstance(obj, SaveableCompatibilityConverter):\n            trackable_obj = obj.obj\n            saveables_cache[trackable_obj] = {}\n            for saveable in obj.saveables:\n                local_name = trackable_utils.extract_local_name(saveable.name)\n                saveables_cache[trackable_obj][local_name] = [saveable]\n            continue\n        specs = []\n        local_names = []\n        prefix = saveable_compat.get_saveable_name(obj) or ''\n        for checkpoint_key, maybe_tensor in tensor_dict.items():\n            if not isinstance(maybe_tensor, dict):\n                maybe_tensor = {'': maybe_tensor}\n            for slice_spec, tensor in maybe_tensor.items():\n                if isinstance(tensor, saveable_object.SaveSpec):\n                    specs.append(tensor)\n                else:\n                    specs.append(saveable_object.SaveSpec(tensor, slice_spec, checkpoint_key))\n            local_names.append(trackable_utils.extract_local_name(checkpoint_key, prefix))\n        object_name = trackable_utils.extract_object_name(next(iter(tensor_dict.keys())))\n        saveables_cache[obj] = {trackable_utils.SERIALIZE_TO_TENSORS_NAME: [TrackableSaveable(obj, specs, object_name, local_names=local_names, prefix=prefix)]}\n    return saveables_cache",
    "docstring": "Converts a tensor dict to a SaveableObject cache. Args: serialized_tensors: Map from Trackable to a tensor dict. The tensor dict maps checkpoint key (-> slice_spec) -> Tensor Returns: A dict mapping Trackable objects to a map from local savable name to SaveableObject.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\saving\\saveable_object_util.py",
    "ast_data": "FunctionDef name:serialized_tensors_to_saveable_cache arg:serialized_tensors arguments arg Assign Call For Call If If Call Assign Assign For Assign Call Assign Assign Assign Assign BoolOp Call For Call If Call Assign For Call If Call Call Call Call Call Call Assign Call Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_zmargin",
    "source_code": "def get_zmargin(self):\n    return self._zmargin",
    "docstring": "Retrieve autoscaling margin of the z-axis. .. versionadded:: 3.9 Returns ------- zmargin : float See Also -------- mpl_toolkits.mplot3d.axes3d.Axes3D.set_zmargin",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:get_zmargin arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "sql_with_params",
    "source_code": "def sql_with_params(self):\n    return self.get_compiler(DEFAULT_DB_ALIAS).as_sql()",
    "docstring": "Return the query as an SQL string and the parameters that will be substituted into the query.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\query.py",
    "ast_data": "FunctionDef name:sql_with_params arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "format_coord",
    "source_code": "def format_coord(self, xv, yv, renderer=None):\n    coords = ''\n    if self.button_pressed in self._rotate_btn:\n        coords = self._rotation_coords()\n    elif self.M is not None:\n        coords = self._location_coords(xv, yv, renderer)\n    return coords",
    "docstring": "Return a string giving the current view rotation angles, or the x, y, z coordinates of the point on the nearest axis pane underneath the mouse cursor, depending on the mouse button pressed.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:format_coord arg:self arg:xv arg:yv arg:renderer arguments arg arg arg arg Assign If Compare Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "LambdaSL",
    "source_code": "class LambdaSL(BaseScheduler):\n\n    def __init__(self, sparsifier, sl_lambda, last_epoch=-1, verbose=False):\n        self.sparsifier = sparsifier\n        if not isinstance(sl_lambda, list) and (not isinstance(sl_lambda, tuple)):\n            self.sl_lambdas = [sl_lambda] * len(sparsifier.groups)\n        else:\n            if len(sl_lambda) != len(sparsifier.groups):\n                raise ValueError(f'Expected {len(sparsifier.groups)} lr_lambdas, but got {len(sl_lambda)}')\n            self.sl_lambdas = list(sl_lambda)\n        super().__init__(sparsifier, last_epoch, verbose)\n\n    def get_sl(self):\n        if not self._get_sl_called_within_step:\n            warnings.warn('To get the last sparsity level computed by the scheduler, please use `get_last_sl()`.')\n        return [base_sl * lmbda(self.last_epoch) for lmbda, base_sl in zip(self.sl_lambdas, self.base_sl)]",
    "docstring": "Sets the sparsity level of each parameter group to the final sl times a given function. When last_epoch=-1, sets initial sl as zero. Args: sparsifier (BaseSparsifier): Wrapped sparsifier. sl_lambda (function or list): A function which computes a multiplicative factor given an integer parameter epoch, or a list of such functions, one for each group in sparsifier.param_groups. last_epoch (int): The index of last epoch. Default: -1. verbose (bool): If ``. Example: >>> # Assuming sparsifier has two groups. >>> lambda1 = lambda epoch: epoch // 30 >>> lambda2 = lambda epoch: 0.95 ** epoch >>> # xdoctest: +SKIP >>> scheduler = LambdaSL(sparsifier, sl_lambda=[lambda1, lambda2]) >>> for epoch in range(100): >>> train(...) >>> validate(...) >>> scheduler.step()",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\pruning\\scheduler\\lambda_scheduler.py",
    "ast_data": "ClassDef name:LambdaSL FunctionDef name:__init__ arg:self arg:sparsifier arg:sl_lambda arg:last_epoch arg:verbose arguments arg arg arg arg arg Assign If BoolOp Call Call Assign Call If Compare Call Call Raise Call Call Call Assign Call Call Call FunctionDef name:get_sl arg:self arguments arg If Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_promote_type_fft",
    "source_code": "def _promote_type_fft(dtype: torch.dtype, require_complex: bool, device: torch.device) -> torch.dtype:\n    if dtype.is_complex:\n        return dtype\n    if not dtype.is_floating_point:\n        dtype = torch.get_default_dtype()\n    allowed_types = [torch.float32, torch.float64]\n    maybe_support_half = device.type in ['cuda', 'meta']\n    if maybe_support_half:\n        allowed_types.append(torch.float16)\n    torch._check(dtype in allowed_types, lambda: f'Unsupported dtype {dtype}')\n    if require_complex:\n        dtype = utils.corresponding_complex_dtype(dtype)\n    return dtype",
    "docstring": "Helper to promote a dtype to one supported by the FFT primitives",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\fft.py",
    "ast_data": "FunctionDef name:_promote_type_fft arg:dtype arg:require_complex arg:device arguments arg arg arg If Return return:yes If Assign Call Assign Assign Compare If Call Call Compare arguments If Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_check_sparse_inputs",
    "source_code": "def _check_sparse_inputs(options, meth, A_ub, A_eq):\n    _sparse_presolve = options.pop('_sparse_presolve', False)\n    if _sparse_presolve and A_eq is not None:\n        A_eq = sps.coo_array(A_eq)\n    if _sparse_presolve and A_ub is not None:\n        A_ub = sps.coo_array(A_ub)\n    sparse_constraint = sps.issparse(A_eq) or sps.issparse(A_ub)\n    preferred_methods = {'highs', 'highs-ds', 'highs-ipm'}\n    dense_methods = {'simplex', 'revised simplex'}\n    if meth in dense_methods and sparse_constraint:\n        raise ValueError(f\"Method '{meth}' does not support sparse constraint matrices. Please consider using one of {preferred_methods}.\")\n    sparse = options.get('sparse', False)\n    if not sparse and sparse_constraint and (meth == 'interior-point'):\n        options['sparse'] = True\n        warn(\"Sparse constraint matrix detected; setting 'sparse':True.\", OptimizeWarning, stacklevel=4)\n    return (options, A_ub, A_eq)",
    "docstring": "Check the provided `show_options('linprog')show_options('linprog')`.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_linprog_util.py",
    "ast_data": "FunctionDef name:_check_sparse_inputs arg:options arg:meth arg:A_ub arg:A_eq arguments arg arg arg arg Assign Call If BoolOp Compare Assign Call If BoolOp Compare Assign Call Assign BoolOp Call Call Assign Assign If BoolOp Compare Raise Call Assign Call If BoolOp Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_GatherDropNegatives",
    "source_code": "def _GatherDropNegatives(params, ids, zero_clipped_indices=None, is_positive=None):\n    if zero_clipped_indices is None:\n        zero_clipped_indices = math_ops.maximum(ids, array_ops.zeros_like(ids))\n    gathered = array_ops.gather(params, zero_clipped_indices)\n    if is_positive is None:\n        is_positive = math_ops.greater_equal(ids, 0)\n        is_positive_shape = array_ops.shape(is_positive)\n        broadcastable_shape = array_ops.concat([is_positive_shape, array_ops.ones([array_ops.rank(gathered) - array_ops.rank(is_positive)], dtype=is_positive_shape.dtype)], axis=0)\n        is_positive = array_ops.reshape(is_positive, broadcastable_shape)\n        is_positive = is_positive & array_ops.ones_like(gathered, dtype=dtypes.bool)\n    zero_slice = array_ops.zeros_like(gathered)\n    return (array_ops.where_v2(is_positive, gathered, zero_slice), zero_clipped_indices, is_positive)",
    "docstring": "Helper function for unsorted segment ops. Gathers params for positive segment ids and gathers 0 for inputs with negative segment id. Also returns the clipped indices and a boolean mask with the same shape as ids where a positive id is masked as true. With this, the latter two can be passed as arguments to this function to reuse them.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_GatherDropNegatives arg:params arg:ids arg:zero_clipped_indices arg:is_positive arguments arg arg arg arg If Compare Assign Call Call Assign Call If Compare Assign Call Assign Call Assign Call Call Call Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_prepare",
    "source_code": "def _prepare(cls):\n    opts = cls._meta\n    opts._prepare(cls)\n    if opts.order_with_respect_to:\n        cls.get_next_in_order = partialmethod(cls._get_next_or_previous_in_order, is_next=True)\n        cls.get_previous_in_order = partialmethod(cls._get_next_or_previous_in_order, is_next=False)\n        if opts.order_with_respect_to.remote_field:\n            wrt = opts.order_with_respect_to\n            remote = wrt.remote_field.model\n            lazy_related_operation(make_foreign_order_accessors, cls, remote)\n    if cls.__doc__ is None:\n        cls.__doc__ = '%s(%s)' % (cls.__name__, ', '.join((f.name for f in opts.fields)))\n    get_absolute_url_override = settings.ABSOLUTE_URL_OVERRIDES.get(opts.label_lower)\n    if get_absolute_url_override:\n        setattr(cls, 'get_absolute_url', get_absolute_url_override)\n    if not opts.managers:\n        if any((f.name == 'objects' for f in opts.fields)):\n            raise ValueError(\"Model %s must specify a custom Manager, because it has a field named 'objects'.\" % cls.__name__)\n        manager = Manager()\n        manager.auto_created = True\n        cls.add_to_class('objects', manager)\n    for index in cls._meta.indexes:\n        if not index.name:\n            index.set_name_with_model(cls)\n    class_prepared.send(sender=cls)",
    "docstring": "Create some methods once self._meta has been populated.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\base.py",
    "ast_data": "FunctionDef name:_prepare arg:cls arguments arg Assign Call If Assign Call Assign Call If Assign Assign Call If Compare Assign Call Assign Call If Call If If Call Compare Raise Call Assign Call Assign Call For If Call Call"
  },
  {
    "library": "kornia",
    "name": "__init__",
    "source_code": "def __init__(self, *, transformer_dim: int, transformer: Module, num_multimask_outputs: int=3, activation: type[Module]=nn.GELU, iou_head_depth: int=3, iou_head_hidden_dim: int=256) -> None:\n    super().__init__()\n    self.transformer_dim = transformer_dim\n    self.transformer = transformer\n    self.num_multimask_outputs = num_multimask_outputs\n    self.iou_token = nn.Embedding(1, transformer_dim)\n    self.num_mask_tokens = num_multimask_outputs + 1\n    self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim)\n    self.output_upscaling = nn.Sequential(nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2), LayerNorm2d(transformer_dim // 4), activation(), nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2), activation())\n    self.output_hypernetworks_mlps = nn.ModuleList([MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3) for i in range(self.num_mask_tokens)])\n    self.iou_prediction_head = MLP(transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth)",
    "docstring": "Predicts masks given an image and prompt embeddings, using a transformer architecture. Args: transformer_dim: the channel dimension of the transformer transformer: the transformer used to predict masks num_multimask_outputs: the number of masks to predict when disambiguating masks activation: the type of activation to use when upscaling masks iou_head_depth: the depth of the MLP used to predict mask quality iou_head_hidden_dim: the hidden dimension of the MLP used to predict mask quality",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\models\\sam\\architecture\\mask_decoder.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg arg arg arg arg arg arg Call Call Assign Assign Assign Assign Call Assign Assign Call Assign Call Call Call Call Call Call Assign Call Call Call Assign Call"
  },
  {
    "library": "pandas",
    "name": "_maybe_return_indexers",
    "source_code": "def _maybe_return_indexers(meth: F) -> F:\n\n    @functools.wraps(meth)\n    def join(self, other: Index, *, how: JoinHow='left', level=None, return_indexers: bool=False, sort: bool=False):\n        join_index, lidx, ridx = meth(self, other, how=how, level=level, sort=sort)\n        if not return_indexers:\n            return join_index\n        if lidx is not None:\n            lidx = ensure_platform_int(lidx)\n        if ridx is not None:\n            ridx = ensure_platform_int(ridx)\n        return (join_index, lidx, ridx)\n    return cast(F, join)",
    "docstring": "Decorator to simplify 'return_indexers' checks in Index.join.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_maybe_return_indexers arg:meth arguments arg FunctionDef name:join arg:self arg:other arguments arg arg arg arg arg arg Assign Call If Return return:yes If Compare Assign Call If Compare Assign Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_inlines",
    "source_code": "def get_inlines(self, request, obj):\n    return self.inlines",
    "docstring": "Hook for specifying custom inlines.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:get_inlines arg:self arg:request arg:obj arguments arg arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "Operand",
    "source_code": "class Operand(NamedTuple):\n    op_type: int\n    shape: tuple[int, ...]\n    dim_order: DimOrder\n    scale: float\n    zero_point: int\n\n    def use_nchw(self):\n        if self.dim_order is DimOrder.PRESUMED_CONTIGUOUS:\n            return True\n        if self.dim_order is DimOrder.CHANNELS_LAST:\n            return False\n        raise Exception('Unknown dim order')",
    "docstring": "Represenation of an NNAPI operand.",
    "type": "class",
    "file_path": "pytorch\\torch\\backends\\_nnapi\\serializer.py",
    "ast_data": "ClassDef name:Operand FunctionDef name:use_nchw arg:self arguments arg If Compare Return return:yes If Compare Return return:yes Raise Call"
  },
  {
    "library": "kornia",
    "name": "JointBilateralBlur",
    "source_code": "class JointBilateralBlur(_BilateralBlur):\n\n    def forward(self, input: Tensor, guidance: Tensor) -> Tensor:\n        return joint_bilateral_blur(input, guidance, self.kernel_size, self.sigma_color, self.sigma_space, self.border_type, self.color_distance_type)",
    "docstring": "Blur a tensor using a Joint Bilateral filter. This operator is almost identical to a Bilateral filter. The only difference is that the color Gaussian kernel is computed based on another image called a guidance image. See :class: for more information. Arguments: kernel_size: the size of the kernel. sigma_color: the standard deviation for intensity/color Gaussian kernel. Smaller values preserve more edges. sigma_space: the standard deviation for spatial Gaussian kernel. This is similar to `gaussian_blur2d()(B, C, H, W)(B, C, H, W)(B, C, H, W)` Examples: >>> input = torch.rand(2, 4, 5, 5) >>> guidance = torch.rand(2, 4, 5, 5) >>> blur = JointBilateralBlur((3, 3), 0.1, (1.5, 1.5)) >>> output = blur(input, guidance) >>> output.shape torch.Size([2, 4, 5, 5])",
    "type": "class",
    "file_path": "kornia\\kornia\\filters\\bilateral.py",
    "ast_data": "ClassDef name:JointBilateralBlur FunctionDef name:forward arg:self arg:input arg:guidance arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "empty",
    "source_code": "def empty(*size, dtype: Optional[torch.dtype]=None, layout: torch.layout=torch.strided, requires_grad: bool=False, device_mesh: Optional[DeviceMesh]=None, placements: Optional[Sequence[Placement]]=None) -> DTensor:\n    torch_size = normalize_to_torch_size(size)\n    return _dtensor_init_helper(torch.empty, torch_size, dtype=dtype, layout=layout, requires_grad=requires_grad, device_mesh=device_mesh, placements=placements)",
    "docstring": "Returns a :class: filled with uninitialized data. The shape of the :class: is defined by the variable argument `DTensortorch.dtypeDTensortorch.set_default_dtypetorch.layoutDTensorDTensorDeviceMeshPlacementDTensor` object on each rank",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_api.py",
    "ast_data": "FunctionDef name:empty arguments arg arg arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_move_module_to_device",
    "source_code": "def _move_module_to_device(module: nn.Module, ignored_params: set[nn.Parameter], ignored_buffers: set[torch.Tensor], device_from_device_id: Optional[torch.device]) -> None:\n    cpu_device = torch.device('cpu')\n    if device_from_device_id is not None:\n        queue: collections.deque[nn.Module] = collections.deque()\n        queue.append(module)\n        params: list[nn.Parameter] = []\n        buffers: list[torch.Tensor] = []\n        while queue:\n            curr_module = queue.popleft()\n            params.extend((param for param in curr_module.parameters(recurse=False) if param.device == cpu_device))\n            buffers.extend((buffer for buffer in curr_module.buffers(recurse=False) if buffer.device == cpu_device))\n            for submodule in curr_module.children():\n                if not isinstance(submodule, fsdp_file.FullyShardedDataParallel):\n                    queue.append(submodule)\n        params_to_move = [p for p in params if p not in ignored_params]\n        bufs_to_move = [p for p in buffers if p not in ignored_buffers]\n        _move_states_to_device(params_to_move, bufs_to_move, device_from_device_id)\n        return\n    param = next(_get_orig_params(module, ignored_params), None)\n    if param is not None and param.device == cpu_device:\n        _warn_cpu_init()",
    "docstring": "Move ``.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_init_utils.py",
    "ast_data": "FunctionDef name:_move_module_to_device arg:module arg:ignored_params arg:ignored_buffers arg:device_from_device_id arguments arg arg arg arg Assign Call If Compare Call Call While Assign Call Call Call Compare Call Call Compare For Call If Call Call Assign Compare Assign Compare Call Return return:no Assign Call Call If BoolOp Compare Compare Call"
  },
  {
    "library": "matplotlib",
    "name": "contains",
    "source_code": "def contains(self, x, y):\n    return self.containsx(x) and self.containsy(y)",
    "docstring": "Return whether `` is in the bounding box or on its edge.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:contains arg:self arg:x arg:y arguments arg arg arg Return return:yes BoolOp Call Call"
  },
  {
    "library": "scipy",
    "name": "_calc_vertices_regions",
    "source_code": "def _calc_vertices_regions(self):\n    conv = scipy.spatial.ConvexHull(self.points)\n    self.vertices = self.radius * conv.equations[:, :-1] + self.center\n    self._simplices = conv.simplices\n    simplex_indices = np.arange(len(self._simplices))\n    tri_indices = np.column_stack([simplex_indices] * self._dim).ravel()\n    point_indices = self._simplices.ravel()\n    indices = np.argsort(point_indices, kind='mergesort')\n    flattened_groups = tri_indices[indices].astype(np.intp)\n    intervals = np.cumsum(np.bincount(point_indices + 1))\n    groups = [list(flattened_groups[intervals[i]:intervals[i + 1]]) for i in range(len(intervals) - 1)]\n    self.regions = groups",
    "docstring": "Calculates the Voronoi vertices and regions of the generators stored in self.points. The vertices will be stored in self.vertices and the regions in self.regions. This algorithm was discussed at PyData London 2015 by Tyler Reddy, Ross Hemsley and Nikolai Nowaczyk",
    "type": "method",
    "file_path": "scipy\\scipy\\spatial\\_spherical_voronoi.py",
    "ast_data": "FunctionDef name:_calc_vertices_regions arg:self arguments arg Assign Call Assign Assign Assign Call Call Assign Call Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "on_compile_start",
    "source_code": "def on_compile_start(callback: Callable[[], None]) -> Callable[[], None]:\n    callback_handler.register_start_callback(callback)\n    return callback",
    "docstring": "Decorator to register a callback function for the start of the compilation.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\callback.py",
    "ast_data": "FunctionDef name:on_compile_start arg:callback arguments arg Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "build",
    "source_code": "@staticmethod\ndef build(rank: int, store: Store, local_addr: Optional[str], server_port: Optional[int]=None) -> 'RendezvousStoreInfo':\n    if rank == 0:\n        addr = local_addr or socket.getfqdn()\n        port = server_port or get_free_port()\n        store.set(RendezvousStoreInfo.MASTER_ADDR_KEY, addr.encode(encoding='UTF-8'))\n        store.set(RendezvousStoreInfo.MASTER_PORT_KEY, str(port).encode(encoding='UTF-8'))\n    addr = store.get(RendezvousStoreInfo.MASTER_ADDR_KEY).decode(encoding='UTF-8')\n    port = int(store.get(RendezvousStoreInfo.MASTER_PORT_KEY).decode(encoding='UTF-8'))\n    return RendezvousStoreInfo(master_addr=addr, master_port=port)",
    "docstring": "Factory method, finds unused new port on rank0 host and addr/port info with all ranks. If master_addr/master_port is knowns (useful when sharing existing tcp store server) use the constructor. Args: rank: rank of the current node store: store to use for rendezvous local_addr: address of the current node, if not provided will be resolved from hostname server_port: port of the TCPStore server, when the TCPStore is shared.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\api.py",
    "ast_data": "FunctionDef name:build arg:rank arg:store arg:local_addr arg:server_port arguments arg arg arg arg If Compare Assign BoolOp Call Assign BoolOp Call Call Call Call Call Call Assign Call Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "unshuffle_from_sc_to_cpu",
    "source_code": "def unshuffle_from_sc_to_cpu(t: tensor.Tensor, num_sparse_cores: int, offset_in_shard: int, size_in_shard: int, shard_rotation: int=0) -> tensor.Tensor:\n    old_shape = t.shape\n    if t.shape[0] % num_sparse_cores != 0:\n        raise ValueError('The dim of table ({}) should be multiple of number of sparse cores ({})'.format(t.shape[1], num_sparse_cores))\n    shards_t = array_ops.reshape(t, (num_sparse_cores, t.shape[0] // num_sparse_cores, t.shape[1]))\n    shards = shards_t[:, offset_in_shard:offset_in_shard + size_in_shard, :]\n    if shard_rotation:\n        shards = manip_ops.roll(shards, -shard_rotation, axis=0)\n    intermediate_tensor = array_ops.transpose(shards, (1, 0, 2))\n    new_shape = (size_in_shard * num_sparse_cores, old_shape[1])\n    return array_ops.reshape(intermediate_tensor, new_shape)",
    "docstring": "Unshuffles the sparse core sharded embedding tables to unsharded. This converts an input tensor respresenting stacked and sharded embedding table into a specific embedding table variable by using the provided metadata about the said table within the stacked, sharded embedding table. Args: t: The input stacked and sharded embedding table from sparsecore. num_sparse_cores: The number of sparsecores, this determines the number of shards that are present in the input t. offset_in_shard: Offset within a shard where the queried table starts. size_in_shard: size (number of rows) of this queried table within each shard of the input t. shard_rotation: The rotation of this table's shards. Returns: An embedding table which is part of the stacked embedding table t.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3_utils.py",
    "ast_data": "FunctionDef name:unshuffle_from_sc_to_cpu arg:t arg:num_sparse_cores arg:offset_in_shard arg:size_in_shard arg:shard_rotation arguments arg arg arg arg arg Assign If Compare Raise Call Call Assign Call Assign If Assign Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "read_headers",
    "source_code": "@classmethod\ndef read_headers(cls, fp):\n    headers = httputil.HeaderMap()\n    while True:\n        line = fp.readline()\n        if not line:\n            raise EOFError('Illegal end of headers.')\n        if line == b'\\r\\n':\n            break\n        if not line.endswith(b'\\r\\n'):\n            raise ValueError('MIME requires CRLF terminators: %r' % line)\n        if line[0] in b' \\t':\n            v = line.strip().decode('ISO-8859-1')\n        else:\n            k, v = line.split(b':', 1)\n            k = k.strip().decode('ISO-8859-1')\n            v = v.strip().decode('ISO-8859-1')\n        existing = headers.get(k)\n        if existing:\n            v = ', '.join((existing, v))\n        headers[k] = v\n    return headers",
    "docstring": "Read HTTP headers from a file handle.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpreqbody.py",
    "ast_data": "FunctionDef name:read_headers arg:cls arg:fp arguments arg arg Assign Call While Assign Call If Raise Call If Compare If Call Raise Call If Compare Assign Call Call Assign Call Assign Call Call Assign Call Call Assign Call If Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "cosine_similarity",
    "source_code": "@dispatch.add_dispatch_support\ndef cosine_similarity(y_true, y_pred, axis=-1):\n    y_true = nn.l2_normalize(y_true, axis=axis)\n    y_pred = nn.l2_normalize(y_pred, axis=axis)\n    return -math_ops.reduce_sum(y_true * y_pred, axis=axis)",
    "docstring": "Computes the cosine similarity between labels and predictions. Note that it is a number between -1 and 1. When it is a negative number between -1 and 0, 0 indicates orthogonality and values closer to -1 indicate greater similarity. The values closer to 1 indicate greater dissimilarity. This makes it usable as a loss function in a setting where you try to maximize the proximity between predictions and targets. If either or is a zero vector, cosine similarity will be 0 regardless of the proximity between predictions and targets. Standalone usage: >>> y_true = [[0., 1.], [1., 1.], [1., 1.]] >>> y_pred = [[1., 0.], [1., 1.], [-1., -1.]] >>> loss = tf.keras.losses.cosine_similarity(y_true, y_pred, axis=1) >>> loss.numpy() array([-0., -0.999, 0.999], dtype=float32) Args: y_true: Tensor of true targets. y_pred: Tensor of predicted targets. axis: Axis along which to determine similarity. Returns: Cosine similarity tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py",
    "ast_data": "FunctionDef name:cosine_similarity arg:y_true arg:y_pred arg:axis arguments arg arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "xframe_options_sameorigin",
    "source_code": "def xframe_options_sameorigin(view_func):\n    if iscoroutinefunction(view_func):\n\n        async def _view_wrapper(*args, **kwargs):\n            response = await view_func(*args, **kwargs)\n            if response.get('X-Frame-Options') is None:\n                response['X-Frame-Options'] = 'SAMEORIGIN'\n            return response\n    else:\n\n        def _view_wrapper(*args, **kwargs):\n            response = view_func(*args, **kwargs)\n            if response.get('X-Frame-Options') is None:\n                response['X-Frame-Options'] = 'SAMEORIGIN'\n            return response\n    return wraps(view_func)(_view_wrapper)",
    "docstring": "Modify a view function so its response has the X-Frame-Options HTTP header set to 'SAMEORIGIN' as long as the response doesn't already have that header set. Usage: @xframe_options_sameorigin def some_view(request): ...",
    "type": "function",
    "file_path": "django\\django\\views\\decorators\\clickjacking.py",
    "ast_data": "FunctionDef name:xframe_options_sameorigin arg:view_func arguments arg If Call AsyncFunctionDef name:_view_wrapper arguments arg arg Assign Call If Compare Call Assign Return return:yes FunctionDef name:_view_wrapper arguments arg arg Assign Call If Compare Call Assign Return return:yes Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "tensor_to_image",
    "source_code": "def tensor_to_image(tensor: Tensor, keepdim: bool=False, force_contiguous: bool=False) -> Any:\n    if not isinstance(tensor, Tensor):\n        raise TypeError(f'Input type is not a Tensor. Got {type(tensor)}')\n    if len(tensor.shape) > 4 or len(tensor.shape) < 2:\n        raise ValueError('Input size must be a two, three or four dimensional tensor')\n    input_shape = tensor.shape\n    image = tensor.cpu().detach()\n    if len(input_shape) == 2:\n        pass\n    elif len(input_shape) == 3:\n        if input_shape[0] == 1:\n            image = image.squeeze()\n        else:\n            image = image.permute(1, 2, 0)\n    elif len(input_shape) == 4:\n        image = image.permute(0, 2, 3, 1)\n        if input_shape[0] == 1 and (not keepdim):\n            image = image.squeeze(0)\n        if input_shape[1] == 1:\n            image = image.squeeze(-1)\n    else:\n        raise ValueError(f'Cannot process tensor with shape {input_shape}')\n    if force_contiguous:\n        image = image.contiguous()\n    return image.numpy()",
    "docstring": "Convert a PyTorch tensor image to a numpy image. In case the tensor is in the GPU, it will be copied back to CPU. Args: tensor: image of the form :math:, :math: or :math:. keepdim: If `(H, W, C)(H, W)contiguous(H, W)(H, W, C)(B, H, W, C)`. Example: >>> img = torch.ones(1, 3, 3) >>> tensor_to_image(img).shape (3, 3) >>> img = torch.ones(3, 4, 4) >>> tensor_to_image(img).shape (4, 4, 3)",
    "type": "function",
    "file_path": "kornia\\kornia\\utils\\image.py",
    "ast_data": "FunctionDef name:tensor_to_image arg:tensor arg:keepdim arg:force_contiguous arguments arg arg arg If Call Raise Call Call If BoolOp Compare Call Compare Call Raise Call Assign Assign Call Call If Compare Call If Compare Call If Compare Assign Call Assign Call If Compare Call Assign Call If BoolOp Compare Assign Call If Compare Assign Call Raise Call If Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_context_data",
    "source_code": "def get_context_data(self, **kwargs):\n    context = {}\n    if self.object:\n        context['object'] = self.object\n        context_object_name = self.get_context_object_name(self.object)\n        if context_object_name:\n            context[context_object_name] = self.object\n    context.update(kwargs)\n    return super().get_context_data(**context)",
    "docstring": "Insert the single object into the context dict.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\detail.py",
    "ast_data": "FunctionDef name:get_context_data arg:self arguments arg arg Assign If Assign Assign Call If Assign Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "restart",
    "source_code": "def restart(self, iter=None):\n    if self.output is None:\n        raise OdrError('cannot restart: run() has not been called before')\n    self.set_job(restart=1)\n    self.work = self.output.work\n    self.iwork = self.output.iwork\n    self.maxit = iter\n    return self.run()",
    "docstring": "Restarts the run with iter more iterations. Parameters ---------- iter : int, optional ODRPACK's default for the number of new iterations is 10. Returns ------- output : Output instance This object is also assigned to the attribute .output .",
    "type": "method",
    "file_path": "scipy\\scipy\\odr\\_odrpack.py",
    "ast_data": "FunctionDef name:restart arg:self arg:iter arguments arg arg If Compare Raise Call Call Assign Assign Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_low_precision_hook_enabled",
    "source_code": "def _low_precision_hook_enabled(self) -> bool:\n    return self._comm_hook is not None and self._comm_hook in LOW_PRECISION_HOOKS",
    "docstring": "Whether a low precision hook is registered or not.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\fully_sharded_data_parallel.py",
    "ast_data": "FunctionDef name:_low_precision_hook_enabled arg:self arguments arg Return return:yes BoolOp Compare Compare"
  },
  {
    "library": "scikit-learn",
    "name": "__call__",
    "source_code": "def __call__(self, X, Y=None, eval_gradient=False):\n    if eval_gradient:\n        K, K_gradient = self.kernel(X, Y, eval_gradient=True)\n        K_gradient *= self.exponent * K[:, :, np.newaxis] ** (self.exponent - 1)\n        return (K ** self.exponent, K_gradient)\n    else:\n        K = self.kernel(X, Y, eval_gradient=False)\n        return K ** self.exponent",
    "docstring": "Return the kernel k(X, Y) and optionally its gradient. Parameters ---------- X : array-like of shape (n_samples_X, n_features) or list of object Left argument of the returned kernel k(X, Y) Y : array-like of shape (n_samples_Y, n_features) or list of object, default=None Right argument of the returned kernel k(X, Y). If None, k(X, X) is evaluated instead. eval_gradient : bool, default=False Determines whether the gradient with respect to the log of the kernel hyperparameter is computed. Returns ------- K : ndarray of shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), optional The gradient of the kernel k(X, X) with respect to the log of the hyperparameter of the kernel. Only returned when is True.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:X arg:Y arg:eval_gradient arguments arg arg arg arg If Assign Call Return return:yes Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "yuv_to_rgb",
    "source_code": "@tf_export('image.yuv_to_rgb')\n@dispatch.add_dispatch_support\ndef yuv_to_rgb(images):\n    images = ops.convert_to_tensor(images, name='images')\n    kernel = ops.convert_to_tensor(_yuv_to_rgb_kernel, dtype=images.dtype, name='kernel')\n    ndims = images.get_shape().ndims\n    return math_ops.tensordot(images, kernel, axes=[[ndims - 1], [0]])",
    "docstring": "Converts one or more images from YUV to RGB. Outputs a tensor of the same shape as the tensor, containing the RGB value of the pixels. The output is only well defined if the Y value in images are in [0,1], U and V value are in [-0.5,0.5]. As per the above description, you need to scale your YUV images if their pixel values are not in the required range. Below given example illustrates preprocessing of each channel of images before feeding them to . Args: images: 2-D or higher rank. Image data to convert. Last dimension must be size 3. Returns: images: tensor with the same shape as .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:yuv_to_rgb arg:images arguments arg Assign Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_rebuild_sparse_tensor",
    "source_code": "def _rebuild_sparse_tensor(layout, data):\n    if layout == torch.sparse_coo:\n        if len(data) == 3:\n            indices, values, size = data\n            is_coalesced = None\n        else:\n            indices, values, size, is_coalesced = data\n        result = torch.sparse_coo_tensor(indices, values, size, check_invariants=False, is_coalesced=is_coalesced)\n        _sparse_tensors_to_validate.append(result)\n        return result\n    elif layout in {torch.sparse_csr, torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc}:\n        compressed_indices, plain_indices, values, size = data\n        result = torch.sparse_compressed_tensor(compressed_indices, plain_indices, values, size, layout=layout, check_invariants=False)\n        _sparse_tensors_to_validate.append(result)\n        return result\n    raise NotImplementedError(f'rebuilding sparse tensor for layout {layout}')",
    "docstring": "Rebuilds a sparse tensor from its sparse storage representation. Args: layout (str): The sparse storage layout of the tensor. data (tuple): The tensor's sparse storage representation.",
    "type": "function",
    "file_path": "pytorch\\torch\\_utils.py",
    "ast_data": "FunctionDef name:_rebuild_sparse_tensor arg:layout arg:data arguments arg arg If Compare If Compare Call Assign Assign Assign Assign Call Call Return return:yes If Compare Assign Assign Call Call Return return:yes Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "initialize_all_tables",
    "source_code": "@tf_export(v1=['initialize_all_tables'])\n@deprecated(None, 'Use `tf.tables_initializer` instead.')\ndef initialize_all_tables(name='init_all_tables'):\n    return tables_initializer(name)",
    "docstring": "Returns an Op that initializes all tables of the default graph. Args: name: Optional name for the initialization op. Returns: An Op that initializes all tables. Note that if there are not tables the returned Op is a NoOp.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "FunctionDef name:initialize_all_tables arg:name arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "has_perm",
    "source_code": "def has_perm(self, request, obj=None):\n    return self.model_admin.has_view_permission(request, obj=obj)",
    "docstring": "Check if user has permission to access the related model.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\views\\autocomplete.py",
    "ast_data": "FunctionDef name:has_perm arg:self arg:request arg:obj arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "import_module_from_path",
    "source_code": "def import_module_from_path(mod_name, mod_path):\n    spec = importlib.util.spec_from_file_location(mod_name, mod_path)\n    mod = importlib.util.module_from_spec(spec)\n    spec.loader.exec_module(mod)\n    return mod",
    "docstring": "Import module with name from file path",
    "type": "function",
    "file_path": "scipy\\dev.py",
    "ast_data": "FunctionDef name:import_module_from_path arg:mod_name arg:mod_path arguments arg arg Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "hidden_fields",
    "source_code": "def hidden_fields(self):\n    return [field for field in self if field.is_hidden]",
    "docstring": "Return a list of all the BoundField objects that are hidden fields. Useful for manual form layout in templates.",
    "type": "method",
    "file_path": "django\\django\\forms\\forms.py",
    "ast_data": "FunctionDef name:hidden_fields arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_yeo_johnson_optimize",
    "source_code": "def _yeo_johnson_optimize(self, x):\n    x_tiny = np.finfo(np.float64).tiny\n\n    def _neg_log_likelihood(lmbda):\n        x_trans = self._yeo_johnson_transform(x, lmbda)\n        n_samples = x.shape[0]\n        x_trans_var = x_trans.var()\n        if x_trans_var < x_tiny:\n            return np.inf\n        log_var = np.log(x_trans_var)\n        loglike = -n_samples / 2 * log_var\n        loglike += (lmbda - 1) * (np.sign(x) * np.log1p(np.abs(x))).sum()\n        return -loglike\n    x = x[~np.isnan(x)]\n    return _yeojohnson_lambda(_neg_log_likelihood, x)",
    "docstring": "Find and return optimal lambda parameter of the Yeo-Johnson transform by MLE, for observed data x. Like for Box-Cox, MLE is done via the brent optimizer.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py",
    "ast_data": "FunctionDef name:_yeo_johnson_optimize arg:self arg:x arguments arg arg Assign Call FunctionDef name:_neg_log_likelihood arg:lmbda arguments arg Assign Call Assign Assign Call If Compare Return return:yes Assign Call Assign Call Call Call Call Return return:yes Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_task_states",
    "source_code": "def get_task_states(self, job_configs):\n    if self._context_handle:\n        job_names, task_nums = zip(*job_configs)\n        return pywrap_tfe.TFE_GetTaskStates(self._context_handle, job_names, task_nums)\n    else:\n        raise ValueError('Context is not initialized.')",
    "docstring": "Get task states from the Coordination Service. Args: job_configs: A list of tuples of job name and task number. Returns: A list of TF_Status.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:get_task_states arg:self arg:job_configs arguments arg arg If Assign Call Return return:yes Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "IntGaugeCell",
    "source_code": "class IntGaugeCell(object):\n    __slots__ = ['_cell']\n\n    def __init__(self, cell):\n        self._cell = cell\n\n    def set(self, value):\n        pywrap_tfe.TFE_MonitoringIntGaugeCellSet(self._cell, value)\n\n    def value(self):\n        return pywrap_tfe.TFE_MonitoringIntGaugeCellValue(self._cell)",
    "docstring": "A single integer value stored in an .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py",
    "ast_data": "ClassDef name:IntGaugeCell Assign FunctionDef name:__init__ arg:self arg:cell arguments arg arg Assign FunctionDef name:set arg:self arg:value arguments arg arg Call FunctionDef name:value arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "essential_node_kinds",
    "source_code": "def essential_node_kinds(self) -> set[str]:\n    return {n.kind() for n in self.graph.nodes() if n.kind() not in self._EXCLUDED_NODE_KINDS}",
    "docstring": "Return the set of node kinds in the subgraph excluding those in .",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\verification.py",
    "ast_data": "FunctionDef name:essential_node_kinds arg:self arguments arg Return return:yes Call Call Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "solve",
    "source_code": "def solve(self, rhs, adjoint=False, adjoint_arg=False, name='solve'):\n    if self.is_non_singular is False:\n        raise NotImplementedError('Exact solve not implemented for an operator that is expected to be singular.')\n    if self.is_square is False:\n        raise NotImplementedError('Exact solve not implemented for an operator that is expected to not be square.')\n    if isinstance(rhs, LinearOperator):\n        left_operator = self.adjoint() if adjoint else self\n        right_operator = rhs.adjoint() if adjoint_arg else rhs\n        if right_operator.range_dimension is not None and left_operator.domain_dimension is not None and (right_operator.range_dimension != left_operator.domain_dimension):\n            raise ValueError('Operators are incompatible. Expected `rhs` to have dimension {} but got {}.'.format(left_operator.domain_dimension, right_operator.range_dimension))\n        with self._name_scope(name):\n            return self._linop_solve(left_operator, right_operator)\n    with self._name_scope(name):\n        rhs = tensor_conversion.convert_to_tensor_v2_with_dispatch(rhs, name='rhs')\n        self._check_input_dtype(rhs)\n        self_dim = -1 if adjoint else -2\n        arg_dim = -1 if adjoint_arg else -2\n        tensor_shape.dimension_at_index(self.shape, self_dim).assert_is_compatible_with(rhs.shape[arg_dim])\n        return self._solve(rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)",
    "docstring": "Solve (exact or approx) (batch) systems of equations: . The returned will be close to an exact solution if is well conditioned. Otherwise closeness will vary. See class docstring for details. Examples: Args: rhs: with same as this operator and compatible shape. is treated like a [batch] matrix meaning for every set of leading dimensions, the last two dimensions defines a matrix. See class docstring for definition of compatibility. adjoint: Python . If , solve the system involving the adjoint of this : . adjoint_arg: Python . If , solve where is the hermitian transpose (transposition and complex conjugation). name: A name scope to use for ops added by this method. Returns: with shape and same as . Raises: NotImplementedError: If or is False.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "FunctionDef name:solve arg:self arg:rhs arg:adjoint arg:adjoint_arg arg:name arguments arg arg arg arg arg If Compare Raise Call If Compare Raise Call If Call Assign Call Assign Call If BoolOp Compare Compare Compare Raise Call Call With Call Return return:yes Call With Call Assign Call Call Assign Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "reset_cudagraph_trees",
    "source_code": "def reset_cudagraph_trees() -> None:\n    container_dict = get_obj(local, 'tree_manager_containers')\n    locks_dict = get_obj(local, 'tree_manager_locks')\n    for device, lock in locks_dict.items():\n        with lock:\n            container = container_dict.get(device)\n            if not container or not container.tree_manager:\n                continue\n            container.tree_manager.shutdown()\n    _set_cached_tensors_enabled(False)\n    container_dict.clear()\n    MarkStepBox.mark_step_counter = 0",
    "docstring": "Clear all cudagraph trees",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py",
    "ast_data": "FunctionDef name:reset_cudagraph_trees arguments Assign Call Assign Call For Call With Assign Call If BoolOp Call Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_merge_dims",
    "source_code": "def _merge_dims(value, outer_axis, inner_axis):\n    assert outer_axis < inner_axis\n    if isinstance(value, (tensor.Tensor, ragged_tensor.RaggedTensor)):\n        return ragged_tensor.merge_dims(value, outer_axis, inner_axis)\n    else:\n        assert isinstance(value, StructuredTensor)\n        fields = dict(((k, _merge_dims(v, outer_axis, inner_axis)) for k, v in value._fields.items()))\n        ragged_shape = value._ragged_shape._merge_dims(outer_axis, inner_axis)\n        return StructuredTensor(fields, ragged_shape)",
    "docstring": "Merges of into a single dimension.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor.py",
    "ast_data": "FunctionDef name:_merge_dims arg:value arg:outer_axis arg:inner_axis arguments arg arg arg Compare If Call Return return:yes Call Call Assign Call Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_get_output_spec_from_output_sharding",
    "source_code": "def _get_output_spec_from_output_sharding(output_sharding: OutputSharding) -> DTensorSpec:\n    if isinstance(output_sharding.output_spec, DTensorSpec):\n        return output_sharding.output_spec\n    else:\n        assert isinstance(output_sharding.output_spec, Sequence)\n        assert output_sharding.output_spec[0] is not None\n        output_sharding.output_spec[0].tensor_meta = None\n        return output_sharding.output_spec[0]",
    "docstring": "Util function to extract output spec from output sharding.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\experimental\\_tp_transform.py",
    "ast_data": "FunctionDef name:_get_output_spec_from_output_sharding arg:output_sharding arguments arg If Call Return return:yes Call Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_should_invoke_v2_op",
    "source_code": "def _should_invoke_v2_op():\n    if not _ops.executing_eagerly_outside_functions():\n        return False\n    if not _summary_ops_v2.has_default_writer():\n        warnings.warn('Cannot activate TF2 compatibility support for TF1 summary ops: default summary writer not found.')\n        return False\n    if _get_step_for_v2() is None:\n        warnings.warn('Cannot activate TF2 compatibility support for TF1 summary ops: global step not set. To set step for summary writer, use `tf.summary.SummaryWriter.as_default(step=_)`, `tf.summary.experimental.set_step()` or `tf.compat.v1.train.create_global_step()`.')\n        return False\n    return True",
    "docstring": "Check if v2 op can be invoked. When calling TF1 summary op in eager mode, if the following conditions are met, v2 op will be invoked: - The outermost context is eager mode. - A default TF2 summary writer is present. - A step is set for the writer (using , or ). Returns: A boolean indicating whether v2 summary op should be invoked.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\summary\\summary.py",
    "ast_data": "FunctionDef name:_should_invoke_v2_op arguments If Call Return return:yes If Call Call Return return:yes If Compare Call Call Return return:yes Return return:yes"
  },
  {
    "library": "pandas",
    "name": "equals",
    "source_code": "def equals(self, other: object) -> bool:\n    if not isinstance(other, Categorical):\n        return False\n    elif self._categories_match_up_to_permutation(other):\n        other = self._encode_with_my_categories(other)\n        return np.array_equal(self._codes, other._codes)\n    return False",
    "docstring": "Returns True if categorical arrays are equal. Parameters ---------- other : Returns ------- bool",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\categorical.py",
    "ast_data": "FunctionDef name:equals arg:self arg:other arguments arg arg If Call Return return:yes If Call Assign Call Return return:yes Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_check_props",
    "source_code": "def set_check_props(self, props):\n    _api.check_isinstance(dict, props=props)\n    if 's' in props:\n        props['sizes'] = np.broadcast_to(props.pop('s'), len(self.labels))\n    actives = self.get_status()\n    self._checks.update(props)\n    self._init_status(actives)",
    "docstring": "Set properties of the check button checks. .. versionadded:: 3.7 Parameters ---------- props : dict Dictionary of properties to be used for the check button check.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:set_check_props arg:self arg:props arguments arg arg Call If Compare Assign Call Call Call Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "broadcast_to",
    "source_code": "@dispatch.dispatch_for_api(array_ops.broadcast_to)\ndef broadcast_to(input: ragged_tensor.RaggedOrDense, shape: dynamic_ragged_shape.DynamicRaggedShape) -> Union[ragged_tensor.RaggedTensor, tensor_lib.Tensor]:\n    return dynamic_ragged_shape.broadcast_to(input, shape)",
    "docstring": "Broadcasts a potentially ragged tensor to a ragged shape. Tiles as necessary to match the given shape. Behavior is undefined if is not broadcast-compatible with . Args: input: The potentially ragged tensor to broadcast. shape: A Returns: A potentially ragged tensor whose values are taken from , and whose shape matches .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_array_ops.py",
    "ast_data": "FunctionDef name:broadcast_to arg:input arg:shape arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "consolidate_state_dict",
    "source_code": "def consolidate_state_dict(self, to: int=0) -> None:\n    self._check_overlap_initialized()\n    self._sync_param_groups(self.param_groups, self.optim.param_groups)\n    empty_messenger = torch.tensor([0], dtype=torch.uint8, device=self._default_device)\n    self._all_state_dicts = []\n    for rank in range(self.world_size):\n        global_rank = dist.distributed_c10d.get_global_rank(self.process_group, rank)\n        if self.rank == to:\n            if rank == self.rank:\n                self._all_state_dicts.append(_recursive_copy_to_device(self.optim.state_dict(), non_blocking=True, device=torch.device('cpu')))\n            else:\n                local_state_dict = _broadcast_object(empty_messenger, src_rank=global_rank, group=self.process_group, device=self._default_device)\n                self._all_state_dicts.append(_recursive_copy_to_device(local_state_dict, non_blocking=True, device=torch.device('cpu')))\n        elif rank == self.rank:\n            _ = _broadcast_object(self.optim.state_dict(), src_rank=self.global_rank, group=self.process_group, device=self._default_device)\n        elif rank != to:\n            _ = _broadcast_object(empty_messenger, src_rank=global_rank, group=self.process_group, device=self._default_device)",
    "docstring": "Consolidate a list of `ZeroRedundancyOptimizerDistributedDataParallel` gradient buckets have been rebuilt. .. warning:: This needs to be called on all ranks.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\optim\\zero_redundancy_optimizer.py",
    "ast_data": "FunctionDef name:consolidate_state_dict arg:self arg:to arguments arg arg Call Call Assign Call Assign For Call Assign Call If Compare If Compare Call Call Call Call Assign Call Call Call Call If Compare Assign Call Call If Compare Assign Call"
  },
  {
    "library": "scipy",
    "name": "_rvs",
    "source_code": "def _rvs(self, n, shape, dim, df, C, random_state):\n    random_state = self._get_random_state(random_state)\n    A = self._standard_rvs(n, shape, dim, df, random_state)\n    for index in np.ndindex(shape):\n        CA = np.dot(C, A[index])\n        A[index] = np.dot(CA, CA.T)\n    return A",
    "docstring": "Draw random samples from a Wishart distribution. Parameters ---------- n : integer Number of variates to generate shape : iterable Shape of the variates to generate dim : int Dimension of the scale matrix df : int Degrees of freedom C : ndarray Cholesky factorization of the scale matrix, lower triangular. %(_doc_random_state)s Notes ----- As this function does no argument checking, it should not be called directly; use 'rvs' instead.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:_rvs arg:self arg:n arg:shape arg:dim arg:df arg:C arg:random_state arguments arg arg arg arg arg arg arg Assign Call Assign Call For Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "median",
    "source_code": "@_apply_docstring_templates\ndef median(input: Union[Tensor, MaskedTensor], dim: int=-1, *, keepdim: bool=False, dtype: Optional[DType]=None, mask: Optional[Tensor]=None) -> Tensor:\n    if dtype is None:\n        dtype = input.dtype\n    dim_ = _canonical_dim(dim, input.ndim)[0]\n    is_float = torch.is_floating_point(input)\n    if not is_float:\n        input = input.to(dtype=torch.float)\n    mask_input = _combine_input_and_mask(median, input, mask)\n    if mask_input.layout == torch.strided:\n        output = torch.nanmedian(mask_input, dim_, keepdim).values\n        if is_float:\n            return output\n        elif not is_float and (not torch.isnan(output).any()):\n            return output.to(dtype=dtype)\n        else:\n            raise ValueError('masked median expects no fully masked out rows if dtype is not floating point')\n    else:\n        raise ValueError(f'masked median expects strided tensor (got {mask_input.layout} tensor)')",
    "docstring": "{reduction_signature} {reduction_descr} By definition, the identity value of a median operation is the median value of the tensor. If all elements of the input tensor along given dimension(s) :attr: are masked-out, the identity value of the median is undefined. Due to this ambiguity, the elements of output tensor with strided layout, that correspond to fully masked-out elements, have `` values. {reduction_args} {reduction_example}",
    "type": "function",
    "file_path": "pytorch\\torch\\masked\\_ops.py",
    "ast_data": "FunctionDef name:median arg:input arg:dim arguments arg arg arg arg arg If Compare Assign Assign Call Assign Call If Assign Call Assign Call If Compare Assign Call If Return return:yes If BoolOp Call Call Return return:yes Call Raise Call Raise Call"
  },
  {
    "library": "scipy",
    "name": "assoc_laguerre",
    "source_code": "def assoc_laguerre(x, n, k=0.0):\n    return _ufuncs.eval_genlaguerre(n, k, x)",
    "docstring": "Compute the generalized (associated) Laguerre polynomial of degree n and order k. The polynomial :math: is orthogonal over `assoc_laguerreeval_genlaguerre`.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_basic.py",
    "ast_data": "FunctionDef name:assoc_laguerre arg:x arg:n arg:k arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "determine_backend_multi",
    "source_code": "def determine_backend_multi(dispatchables, *, domain, only=True, coerce=False, **kwargs):\n    if 'dispatch_type' in kwargs:\n        disp_type = kwargs.pop('dispatch_type')\n        dispatchables = tuple((d if isinstance(d, Dispatchable) else Dispatchable(d, disp_type) for d in dispatchables))\n    else:\n        dispatchables = tuple(dispatchables)\n        if not all((isinstance(d, Dispatchable) for d in dispatchables)):\n            raise TypeError('dispatchables must be instances of uarray.Dispatchable')\n    if len(kwargs) != 0:\n        raise TypeError(f'Received unexpected keyword arguments: {kwargs}')\n    backend = _uarray.determine_backend(domain, dispatchables, coerce)\n    return set_backend(backend, coerce=coerce, only=only)",
    "docstring": "Set a backend supporting all `determine_backend_multimarking determine_backenddetermine_backend_multi` argument. >>> with ua.set_backend(ex.BackendAB), ua.set_backend(ex.BackendBC): ... a, b = ex.TypeA(), ex.TypeB() ... with ua.determine_backend_multi( ... [a, b], dispatch_type=\"mark\", domain=\"ua_examples\" ... ): ... res = ex.creation_multimethod() ... ex.call_multimethod(res, a, b) TypeA",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_uarray\\_backend.py",
    "ast_data": "FunctionDef name:determine_backend_multi arg:dispatchables arguments arg arg arg arg arg If Compare Assign Call Assign Call Call Call Assign Call If Call Call Raise Call If Compare Call Raise Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "manually_trace_nn_module_getattr",
    "source_code": "def manually_trace_nn_module_getattr(self, tx: 'InstructionTranslator', name):\n    name_vt = variables.ConstantVariable(name)\n    out = self.getattr_helper(tx, '_parameters', name_vt)\n    if out is None:\n        out = self.getattr_helper(tx, '_modules', name_vt)\n    if out is None:\n        out = self.getattr_helper(tx, '_buffers', name_vt)\n    if out is None:\n        raise_observed_exception(AttributeError, tx)\n    return out",
    "docstring": "Dynamo tracing of nn.Module __getattr__ can be expensive if the model has deep submodule hierarchy. Since the __getattr__ is stable, we can directly look into the underlying datastructures. This saves a lot of compilation time.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\nn_module.py",
    "ast_data": "FunctionDef name:manually_trace_nn_module_getattr arg:self arg:tx arg:name arguments arg arg arg Assign Call Assign Call If Compare Assign Call If Compare Assign Call If Compare Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "resampled",
    "source_code": "def resampled(self, lutsize):\n    new_cmap = LinearSegmentedColormap(self.name, self._segmentdata, lutsize)\n    new_cmap._rgba_over = self._rgba_over\n    new_cmap._rgba_under = self._rgba_under\n    new_cmap._rgba_bad = self._rgba_bad\n    return new_cmap",
    "docstring": "Return a new colormap with *lutsize* entries.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:resampled arg:self arg:lutsize arguments arg arg Assign Call Assign Assign Assign Return return:yes"
  },
  {
    "library": "authlib",
    "name": "exists_nonce",
    "source_code": "def exists_nonce(self, nonce, request):\n    raise NotImplementedError()",
    "docstring": "Check if the given nonce is existing in your database. Developers should implement this method in subclass, e.g.:: def exists_nonce(self, nonce, request): exists = AuthorizationCode.query.filter_by( client_id=request.payload.client_id, nonce=nonce ).first() return bool(exists) :param nonce: A string of \"nonce\" parameter in request :param request: OAuth2Request instance :return: Boolean",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\core\\grants\\implicit.py",
    "ast_data": "FunctionDef name:exists_nonce arg:self arg:nonce arg:request arguments arg arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_OneHot",
    "source_code": "class _OneHot(Constraint):\n    is_discrete = True\n    event_dim = 1\n\n    def check(self, value):\n        is_boolean = (value == 0) | (value == 1)\n        is_normalized = value.sum(-1).eq(1)\n        return is_boolean.all(-1) & is_normalized",
    "docstring": "Constrain to one-hot vectors.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributions\\constraints.py",
    "ast_data": "ClassDef name:_OneHot Assign Assign FunctionDef name:check arg:self arg:value arguments arg arg Assign Compare Compare Assign Call Call Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "plot",
    "source_code": "def plot(self, joint_func, marginal_func, **kwargs):\n    self.plot_marginals(marginal_func, **kwargs)\n    self.plot_joint(joint_func, **kwargs)\n    return self",
    "docstring": "Draw the plot by passing functions for joint and marginal axes. This method passes the `JointGrid.plot_jointJointGrid.plot_marginalsJointGrid` for easy method chaining.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\axisgrid.py",
    "ast_data": "FunctionDef name:plot arg:self arg:joint_func arg:marginal_func arguments arg arg arg arg Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "PairwiseDistance",
    "source_code": "class PairwiseDistance(Module):\n    __constants__ = ['norm', 'eps', 'keepdim']\n    norm: float\n    eps: float\n    keepdim: bool\n\n    def __init__(self, p: float=2.0, eps: float=1e-06, keepdim: bool=False) -> None:\n        super().__init__()\n        self.norm = p\n        self.eps = eps\n        self.keepdim = keepdim\n\n    def forward(self, x1: Tensor, x2: Tensor) -> Tensor:\n        return F.pairwise_distance(x1, x2, self.norm, self.eps, self.keepdim)",
    "docstring": "Computes the pairwise distance between input vectors, or between columns of input matrices. Distances are computed using `e(N, D)(D)N = batch dimensionD = vector dimension(N, D)(D)(N)()keepdim(N, 1)(1)` based on input dimension. Examples: >>> pdist = nn.PairwiseDistance(p=2) >>> input1 = torch.randn(100, 128) >>> input2 = torch.randn(100, 128) >>> output = pdist(input1, input2)",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\distance.py",
    "ast_data": "ClassDef name:PairwiseDistance Assign FunctionDef name:__init__ arg:self arg:p arg:eps arg:keepdim arguments arg arg arg arg Call Call Assign Assign Assign FunctionDef name:forward arg:self arg:x1 arg:x2 arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "__call__",
    "source_code": "def __call__(self, X, Y=None, eval_gradient=False):\n    if eval_gradient:\n        K1, K1_gradient = self.k1(X, Y, eval_gradient=True)\n        K2, K2_gradient = self.k2(X, Y, eval_gradient=True)\n        return (K1 + K2, np.dstack((K1_gradient, K2_gradient)))\n    else:\n        return self.k1(X, Y) + self.k2(X, Y)",
    "docstring": "Return the kernel k(X, Y) and optionally its gradient. Parameters ---------- X : array-like of shape (n_samples_X, n_features) or list of object Left argument of the returned kernel k(X, Y) Y : array-like of shape (n_samples_X, n_features) or list of object, default=None Right argument of the returned kernel k(X, Y). If None, k(X, X) is evaluated instead. eval_gradient : bool, default=False Determines whether the gradient with respect to the log of the kernel hyperparameter is computed. Returns ------- K : ndarray of shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), optional The gradient of the kernel k(X, X) with respect to the log of the hyperparameter of the kernel. Only returned when is True.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:X arg:Y arg:eval_gradient arguments arg arg arg arg If Assign Call Assign Call Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "svd_factorization_projections",
    "source_code": "def svd_factorization_projections(A, m, n, orth_tol, max_refin, tol):\n    U, s, Vt = scipy.linalg.svd(A, full_matrices=False)\n    U = U[:, s > tol]\n    Vt = Vt[s > tol, :]\n    s = s[s > tol]\n\n    def null_space(x):\n        aux1 = Vt.dot(x)\n        aux2 = 1 / s * aux1\n        v = U.dot(aux2)\n        z = x - A.T.dot(v)\n        k = 0\n        while orthogonality(A, z) > orth_tol:\n            if k >= max_refin:\n                break\n            aux1 = Vt.dot(z)\n            aux2 = 1 / s * aux1\n            v = U.dot(aux2)\n            z = z - A.T.dot(v)\n            k += 1\n        return z\n\n    def least_squares(x):\n        aux1 = Vt.dot(x)\n        aux2 = 1 / s * aux1\n        z = U.dot(aux2)\n        return z\n\n    def row_space(x):\n        aux1 = U.T.dot(x)\n        aux2 = 1 / s * aux1\n        z = Vt.T.dot(aux2)\n        return z\n    return (null_space, least_squares, row_space)",
    "docstring": "Return linear operators for matrix A using `` approach.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_trustregion_constr\\projections.py",
    "ast_data": "FunctionDef name:svd_factorization_projections arg:A arg:m arg:n arg:orth_tol arg:max_refin arg:tol arguments arg arg arg arg arg arg Assign Call Assign Compare Assign Compare Assign Compare FunctionDef name:null_space arg:x arguments arg Assign Call Assign Assign Call Assign Call Assign While Compare Call If Compare Assign Call Assign Assign Call Assign Call Return return:yes FunctionDef name:least_squares arg:x arguments arg Assign Call Assign Assign Call Return return:yes FunctionDef name:row_space arg:x arguments arg Assign Call Assign Assign Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_replicate_to_shard",
    "source_code": "def _replicate_to_shard(self, local_tensor: torch.Tensor, mesh: DeviceMesh, mesh_dim: int, shard_index: int) -> torch.Tensor:\n    num_chunks = mesh.size(mesh_dim=mesh_dim)\n    shards, _ = self._split_tensor(local_tensor, num_chunks, with_padding=False, contiguous=False)\n    return shards[shard_index].clone()",
    "docstring": "transform from replicated tensor to a sharded tensor on the current rank, which would perform a local chunk",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\placement_types.py",
    "ast_data": "FunctionDef name:_replicate_to_shard arg:self arg:local_tensor arg:mesh arg:mesh_dim arg:shard_index arguments arg arg arg arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "isvariadic",
    "source_code": "def isvariadic(obj):\n    return isinstance(obj, VariadicSignatureType)",
    "docstring": "Check whether the type is variadic. Parameters ---------- obj : type The type to check Returns ------- bool Whether or not is variadic Examples -------- >>> # xdoctest: +SKIP >>> isvariadic(int) False >>> isvariadic(Variadic[int]) True",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\unification\\multipledispatch\\variadic.py",
    "ast_data": "FunctionDef name:isvariadic arg:obj arguments arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "get_include",
    "source_code": "@set_module('numpy')\ndef get_include():\n    import numpy\n    if numpy.show_config is None:\n        d = os.path.join(os.path.dirname(numpy.__file__), '_core', 'include')\n    else:\n        import numpy._core as _core\n        d = os.path.join(os.path.dirname(_core.__file__), 'include')\n    return d",
    "docstring": "Return the directory that contains the NumPy \\*.h header files. Extension modules that need to compile against NumPy may need to use this function to locate the appropriate include directory. Notes ----- When using ``:: $ numpy-config --cflags -I/path/to/site-packages/numpy/_core/include # Or rely on pkg-config: $ export PKG_CONFIG_PATH=$(numpy-config --pkgconfigdir) $ pkg-config --cflags -I/path/to/site-packages/numpy/_core/include Examples -------- >>> np.get_include() '.../site-packages/numpy/core/include' # may vary",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_utils_impl.py",
    "ast_data": "FunctionDef name:get_include arguments If Compare Assign Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self):\n    self.apps = {}",
    "docstring": "Initialize registry Tree.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cptree.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg Assign"
  },
  {
    "library": "django",
    "name": "value_from_datadict",
    "source_code": "def value_from_datadict(self, data, files, name):\n    return data.get(name)",
    "docstring": "Given a dictionary of data and this widget's name, return the value of this widget or None if it's not provided.",
    "type": "method",
    "file_path": "django\\django\\forms\\widgets.py",
    "ast_data": "FunctionDef name:value_from_datadict arg:self arg:data arg:files arg:name arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "errors",
    "source_code": "@property\ndef errors(self):\n    if self._errors is None:\n        self.full_clean()\n    return self._errors",
    "docstring": "Return an ErrorDict for the data provided for the form.",
    "type": "method",
    "file_path": "django\\django\\forms\\forms.py",
    "ast_data": "FunctionDef name:errors arg:self arguments arg If Compare Call Return return:yes"
  },
  {
    "library": "django",
    "name": "_check_save_on_top",
    "source_code": "def _check_save_on_top(self, obj):\n    if not isinstance(obj.save_on_top, bool):\n        return must_be('a boolean', option='save_on_top', obj=obj, id='admin.E102')\n    else:\n        return []",
    "docstring": "Check save_on_top is a boolean.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\checks.py",
    "ast_data": "FunctionDef name:_check_save_on_top arg:self arg:obj arguments arg arg If Call Return return:yes Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "_create_distributed_tensor_spec",
    "source_code": "def _create_distributed_tensor_spec(strategy, tensor_spec):\n    num_replicas = len(strategy.extended.worker_devices)\n    if not _always_wrap(strategy):\n        return tensor_spec\n\n    def _get_value_per_replica(tensor_spec_per_input):\n        value_specs = [tensor_spec_per_input for _ in range(num_replicas)]\n        return values.PerReplicaSpec(*value_specs)\n    return nest.map_structure(_get_value_per_replica, tensor_spec)",
    "docstring": "Create a for a given strategy and input . Args: strategy: The given strategy. tensor_spec: of a given value. The batch dimension of the shape should be None if you have partial batches. Returns: A that matches the values produced by a given strategy. This can be a or a .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py",
    "ast_data": "FunctionDef name:_create_distributed_tensor_spec arg:strategy arg:tensor_spec arguments arg arg Assign Call If Call Return return:yes FunctionDef name:_get_value_per_replica arg:tensor_spec_per_input arguments arg Assign Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_convert_to_tensor",
    "source_code": "def _convert_to_tensor(value, name=None, preferred_dtype=None):\n    if context.executing_eagerly() and preferred_dtype is not None and (preferred_dtype.is_integer or preferred_dtype.is_bool):\n        v = ops.convert_to_tensor(value, name=name)\n        if v.dtype.is_floating:\n            return v\n    return ops.convert_to_tensor(value, name=name, preferred_dtype=preferred_dtype)",
    "docstring": "Converts to tensor avoiding an eager bug that loses float precision.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:_convert_to_tensor arg:value arg:name arg:preferred_dtype arguments arg arg arg If BoolOp Call Compare BoolOp Assign Call If Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "start",
    "source_code": "def start(self):\n    c_api.TF_ServerStart(self._server)",
    "docstring": "Starts this server. Raises: tf.errors.OpError: Or one of its subclasses if an error occurs while starting the TensorFlow server.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\server_lib.py",
    "ast_data": "FunctionDef name:start arg:self arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "addmm_flop",
    "source_code": "@register_flop_formula(aten.addmm)\ndef addmm_flop(self_shape, a_shape, b_shape, out_shape=None, **kwargs) -> int:\n    return mm_flop(a_shape, b_shape)",
    "docstring": "Count flops for addmm.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\flop_counter.py",
    "ast_data": "FunctionDef name:addmm_flop arg:self_shape arg:a_shape arg:b_shape arg:out_shape arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "maybe_convert_indices",
    "source_code": "def maybe_convert_indices(indices, n: int, verify: bool=True) -> np.ndarray:\n    if isinstance(indices, list):\n        indices = np.array(indices)\n        if len(indices) == 0:\n            return np.empty(0, dtype=np.intp)\n    mask = indices < 0\n    if mask.any():\n        indices = indices.copy()\n        indices[mask] += n\n    if verify:\n        mask = (indices >= n) | (indices < 0)\n        if mask.any():\n            raise IndexError('indices are out-of-bounds')\n    return indices",
    "docstring": "Attempt to convert indices into valid, positive indices. If we have negative indices, translate to positive here. If we have indices that are out-of-bounds, raise an IndexError. Parameters ---------- indices : array-like Array of indices that we are to convert. n : int Number of elements in the array that we are indexing. verify : bool, default True Check that all entries are between 0 and n - 1, inclusive. Returns ------- array-like An array-like of positive indices that correspond to the ones that were passed in initially to this function. Raises ------ IndexError One of the converted indices either exceeded the number of, elements (specified by ), or was still negative.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\indexers\\utils.py",
    "ast_data": "FunctionDef name:maybe_convert_indices arg:indices arg:n arg:verify arguments arg arg arg If Call Assign Call If Compare Call Return return:yes Call Assign Compare If Call Assign Call If Assign Compare Compare If Call Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "RandomUniform",
    "source_code": "@tf_export('random_uniform_initializer', v1=[])\nclass RandomUniform(Initializer):\n\n    def __init__(self, minval=-0.05, maxval=0.05, seed=None):\n        self.minval = minval\n        self.maxval = maxval\n        self.seed = seed\n        self._random_generator = _RandomGenerator(seed)\n\n    def __call__(self, shape, dtype=dtypes.float32, **kwargs):\n        self._validate_kwargs(kwargs)\n        dtype = dtypes.as_dtype(dtype)\n        if not dtype.is_floating and (not dtype.is_integer):\n            raise ValueError(f'Argument `dtype` expected to be numeric or boolean. Received {dtype}.')\n        if _PARTITION_SHAPE in kwargs:\n            shape = kwargs[_PARTITION_SHAPE]\n        return self._random_generator.random_uniform(shape, self.minval, self.maxval, dtype)\n\n    def get_config(self):\n        return {'minval': self.minval, 'maxval': self.maxval, 'seed': self.seed}",
    "docstring": "Initializer that generates tensors with a uniform distribution. Initializers allow you to pre-specify an initialization strategy, encoded in the Initializer object, without knowing the shape and dtype of the variable being initialized. Examples: >>> def make_variables(k, initializer): ... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)), ... tf.Variable(initializer(shape=[k, k], dtype=tf.float32))) >>> v1, v2 = make_variables(3, tf.ones_initializer()) >>> v1 >>> v2 >>> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.)) (, <tf.Variable...shape=(4, 4) ... Args: minval: A python scalar or a scalar tensor. Lower bound of the range of random values to generate (inclusive). maxval: A python scalar or a scalar tensor. Upper bound of the range of random values to generate (exclusive). seed: A Python integer. Used to create random seeds. See for behavior.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops_v2.py",
    "ast_data": "ClassDef name:RandomUniform FunctionDef name:__init__ arg:self arg:minval arg:maxval arg:seed arguments arg arg arg arg Assign Assign Assign Assign Call FunctionDef name:__call__ arg:self arg:shape arg:dtype arguments arg arg arg arg Call Assign Call If BoolOp Raise Call If Compare Assign Return return:yes Call FunctionDef name:get_config arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "is_scalar_event",
    "source_code": "def is_scalar_event(self, name='is_scalar_event'):\n    with self._name_scope(name):\n        return ops.convert_to_tensor(self._is_scalar_helper(self.event_shape, self.event_shape_tensor), name='is_scalar_event')",
    "docstring": "Indicates that . Args: name: Python prepended to names of ops created by this function. Returns: is_scalar_event: scalar .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:is_scalar_event arg:self arg:name arguments arg arg With Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "register_functional_optim",
    "source_code": "def register_functional_optim(key, optim):\n    if key not in functional_optim_map:\n        functional_optim_map[key] = optim",
    "docstring": "Interface to insert a new functional optimizer to functional_optim_map `torch.optim.Optimizer` (e.g. for custom optimizers) Example:: >>> # import the new functional optimizer >>> # xdoctest: +SKIP >>> from xyz import fn_optimizer >>> from torch.distributed.optim.utils import register_functional_optim >>> fn_optim_key = \"XYZ_optim\" >>> register_functional_optim(fn_optim_key, fn_optimizer)",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\optim\\utils.py",
    "ast_data": "FunctionDef name:register_functional_optim arg:key arg:optim arguments arg arg If Compare Assign"
  },
  {
    "library": "tensorflow",
    "name": "make_seeds",
    "source_code": "def make_seeds(self, count=1):\n    alg = self.algorithm\n    if alg in (a.value for a in random_ops_util.Algorithm):\n        keys = self._make_int64_keys(shape=[count])\n        zeros = array_ops.zeros_like(keys)\n        return array_ops_stack.stack([keys, zeros])\n    else:\n        raise ValueError(stateless_random_ops.unsupported_alg_error_msg(alg))",
    "docstring": "Generates seeds for stateless random ops. For example: Args: count: the number of seed pairs (note that stateless random ops need a pair of seeds to invoke). Returns: A tensor of shape [2, count] and dtype int64.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\stateful_random_ops.py",
    "ast_data": "FunctionDef name:make_seeds arg:self arg:count arguments arg arg Assign If Compare Assign Call Assign Call Return return:yes Call Raise Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_ticksize",
    "source_code": "def get_ticksize(self):\n    return self._ticksize",
    "docstring": "Return length of the ticks in points.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axis_artist.py",
    "ast_data": "FunctionDef name:get_ticksize arg:self arguments arg Return return:yes"
  },
  {
    "library": "authlib",
    "name": "prepare_token_request",
    "source_code": "def prepare_token_request(grant_type, body='', redirect_uri=None, **kwargs):\n    params = [('grant_type', grant_type)]\n    if redirect_uri:\n        params.append(('redirect_uri', redirect_uri))\n    if 'scope' in kwargs:\n        kwargs['scope'] = list_to_scope(kwargs['scope'])\n    if grant_type == 'authorization_code' and 'code' not in kwargs:\n        raise MissingCodeException()\n    for k in kwargs:\n        if kwargs[k]:\n            params.append((to_unicode(k), kwargs[k]))\n    return add_params_to_qs(body, params)",
    "docstring": "Prepare the access token request. Per _. The client makes a request to the token endpoint by adding the following parameters using the `Section 4.1.1Section 4.1.1Section 4.1.3`:",
    "type": "function",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\parameters.py",
    "ast_data": "FunctionDef name:prepare_token_request arg:grant_type arg:body arg:redirect_uri arguments arg arg arg arg Assign If Call If Compare Assign Call If BoolOp Compare Compare Raise Call For If Call Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "get_array_wrap",
    "source_code": "def get_array_wrap(*args):\n    warnings.warn('`get_array_wrap` is deprecated. (deprecated in NumPy 2.0)', DeprecationWarning, stacklevel=2)\n    wrappers = sorted(((getattr(x, '__array_priority__', 0), -i, x.__array_wrap__) for i, x in enumerate(args) if hasattr(x, '__array_wrap__')))\n    if wrappers:\n        return wrappers[-1][-1]\n    return None",
    "docstring": "Find the wrapper for the array with the highest priority. In case of ties, leftmost wins. If no wrapper is found, return None. .. deprecated:: 2.0",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_shape_base_impl.py",
    "ast_data": "FunctionDef name:get_array_wrap arguments arg Call Assign Call Call Call Call If Return return:yes Return return:no"
  },
  {
    "library": "pandas",
    "name": "_concatenate_chunks",
    "source_code": "def _concatenate_chunks(chunks: list[dict[int, ArrayLike]], column_names: list[str]) -> dict:\n    names = list(chunks[0].keys())\n    warning_columns = []\n    result: dict = {}\n    for name in names:\n        arrs = [chunk.pop(name) for chunk in chunks]\n        dtypes = {a.dtype for a in arrs}\n        non_cat_dtypes = {x for x in dtypes if not isinstance(x, CategoricalDtype)}\n        dtype = dtypes.pop()\n        if isinstance(dtype, CategoricalDtype):\n            result[name] = union_categoricals(arrs, sort_categories=False)\n        else:\n            result[name] = concat_compat(arrs)\n            if len(non_cat_dtypes) > 1 and result[name].dtype == np.dtype(object):\n                warning_columns.append(column_names[name])\n    if warning_columns:\n        warning_names = ', '.join([f'{index}: {name}' for index, name in enumerate(warning_columns)])\n        warning_message = ' '.join([f'Columns ({warning_names}) have mixed types. Specify dtype option on import or set low_memory=False.'])\n        warnings.warn(warning_message, DtypeWarning, stacklevel=find_stack_level())\n    return result",
    "docstring": "Concatenate chunks of data read with low_memory=True. The tricky part is handling Categoricals, where different chunks may have different inferred categories.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\parsers\\c_parser_wrapper.py",
    "ast_data": "FunctionDef name:_concatenate_chunks arg:chunks arg:column_names arguments arg arg Assign Call Call Assign For Assign Call Assign Assign Call Assign Call If Call Assign Call Assign Call If BoolOp Compare Call Compare Call Call If Assign Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "RawModelIterable",
    "source_code": "class RawModelIterable(BaseIterable):\n\n    def __iter__(self):\n        db = self.queryset.db\n        query = self.queryset.query\n        connection = connections[db]\n        compiler = connection.ops.compiler('SQLCompiler')(query, connection, db)\n        query_iterator = iter(query)\n        try:\n            model_init_names, model_init_pos, annotation_fields = self.queryset.resolve_model_init_order()\n            model_cls = self.queryset.model\n            if any((f.attname not in model_init_names for f in model_cls._meta.pk_fields)):\n                raise exceptions.FieldDoesNotExist('Raw query must include the primary key')\n            fields = [self.queryset.model_fields.get(c) for c in self.queryset.columns]\n            cols = [f.get_col(f.model._meta.db_table) if f else None for f in fields]\n            converters = compiler.get_converters(cols)\n            if converters:\n                query_iterator = compiler.apply_converters(query_iterator, converters)\n            if compiler.has_composite_fields(cols):\n                query_iterator = compiler.composite_fields_to_tuples(query_iterator, cols)\n            for values in query_iterator:\n                model_init_values = [values[pos] for pos in model_init_pos]\n                instance = model_cls.from_db(db, model_init_names, model_init_values)\n                if annotation_fields:\n                    for column, pos in annotation_fields:\n                        setattr(instance, column, values[pos])\n                yield instance\n        finally:\n            if hasattr(query, 'cursor') and query.cursor:\n                query.cursor.close()",
    "docstring": "Iterable that yields a model instance for each row from a raw queryset.",
    "type": "class",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "ClassDef name:RawModelIterable FunctionDef name:__iter__ arg:self arguments arg Assign Assign Assign Assign Call Call Assign Call Try Assign Call Assign If Call Compare Raise Call Assign Call Assign Call Assign Call If Assign Call If Call Assign Call For Assign Assign Call If For Call If BoolOp Call Call"
  },
  {
    "library": "tensorflow",
    "name": "remove_checkpoint",
    "source_code": "@deprecation.deprecated(date=None, instructions='Use standard file APIs to delete files with this prefix.')\n@tf_export(v1=['train.remove_checkpoint'])\ndef remove_checkpoint(checkpoint_prefix, checkpoint_format_version=saver_pb2.SaverDef.V2, meta_graph_suffix='meta'):\n    _delete_file_if_exists(meta_graph_filename(checkpoint_prefix, meta_graph_suffix))\n    if checkpoint_format_version == saver_pb2.SaverDef.V2:\n        _delete_file_if_exists(checkpoint_prefix + '.index')\n        _delete_file_if_exists(checkpoint_prefix + '.data-?????-of-?????')\n    else:\n        _delete_file_if_exists(checkpoint_prefix)",
    "docstring": "Removes a checkpoint given by . Args: checkpoint_prefix: The prefix of a V1 or V2 checkpoint. Typically the result of or that of , regardless of sharded/non-sharded or V1/V2. checkpoint_format_version: , defaults to . meta_graph_suffix: Suffix for file. Defaults to 'meta'.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint_management.py",
    "ast_data": "FunctionDef name:remove_checkpoint arg:checkpoint_prefix arg:checkpoint_format_version arg:meta_graph_suffix arguments arg arg arg Call Call If Compare Call Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "numpy_dtype",
    "source_code": "@property\ndef numpy_dtype(self) -> np.dtype:\n    return self._dtype",
    "docstring": "The NumPy dtype this NumpyEADtype wraps.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py",
    "ast_data": "FunctionDef name:numpy_dtype arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X, return_std=False):\n    y_mean = self._decision_function(X)\n    if not return_std:\n        return y_mean\n    else:\n        sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1)\n        y_std = np.sqrt(sigmas_squared_data + 1.0 / self.alpha_)\n        return (y_mean, y_std)",
    "docstring": "Predict using the linear model. In addition to the mean of the predictive distribution, also its standard deviation can be returned. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Samples. return_std : bool, default=False Whether to return the standard deviation of posterior prediction. Returns ------- y_mean : array-like of shape (n_samples,) Mean of predictive distribution of query points. y_std : array-like of shape (n_samples,) Standard deviation of predictive distribution of query points.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_bayes.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arg:return_std arguments arg arg arg Assign Call If Return return:yes Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "get_api_functions",
    "source_code": "def get_api_functions(tagname, api_dict):\n    functions = []\n    for f in API_FILES:\n        functions.extend(find_functions(f, tagname))\n    dfunctions = [(api_dict[func.name][0], func) for func in functions]\n    dfunctions.sort()\n    return [a[1] for a in dfunctions]",
    "docstring": "Parse source files to get functions tagged by the given tag.",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\code_generators\\genapi.py",
    "ast_data": "FunctionDef name:get_api_functions arg:tagname arg:api_dict arguments arg arg Assign For Call Call Assign Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "client_id",
    "source_code": "@property\ndef client_id(self) -> str:\n    return self.data.get('client_id')",
    "docstring": "The authorization server issues the registered client a client identifier -- a unique string representing the registration information provided by the client. The value is extracted from request. :return: string",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\requests.py",
    "ast_data": "FunctionDef name:client_id arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_to_tensor_list",
    "source_code": "def _to_tensor_list(self, value: composite_tensor.CompositeTensor) -> List['core_types.Symbol']:\n    component_tensor_lists = nest.map_structure(batchable_to_tensor_list, self._component_specs, self._to_components(value))\n    return nest.flatten(component_tensor_lists)",
    "docstring": "Encodes as a flat list of .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py",
    "ast_data": "FunctionDef name:_to_tensor_list arg:self arg:value arguments arg arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "WSGIServer",
    "source_code": "class WSGIServer(simple_server.WSGIServer):\n    request_queue_size = 10\n\n    def __init__(self, *args, ipv6=False, allow_reuse_address=True, **kwargs):\n        if ipv6:\n            self.address_family = socket.AF_INET6\n        self.allow_reuse_address = allow_reuse_address\n        super().__init__(*args, **kwargs)\n\n    def handle_error(self, request, client_address):\n        if is_broken_pipe_error():\n            logger.info('- Broken pipe from %s', client_address)\n        else:\n            super().handle_error(request, client_address)",
    "docstring": "BaseHTTPServer that implements the Python WSGI protocol",
    "type": "class",
    "file_path": "django\\django\\core\\servers\\basehttp.py",
    "ast_data": "ClassDef name:WSGIServer Assign FunctionDef name:__init__ arg:self arguments arg arg arg arg arg If Assign Assign Call Call FunctionDef name:handle_error arg:self arg:request arg:client_address arguments arg arg arg If Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_write_characteristics",
    "source_code": "def _write_characteristics(self) -> None:\n    pass",
    "docstring": "No-op, future compatibility",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\stata.py",
    "ast_data": "FunctionDef name:_write_characteristics arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "get_concrete_function",
    "source_code": "def get_concrete_function(self, *args, **kwargs):\n    return self._get_func().get_concrete_function(*args, **kwargs)",
    "docstring": "Returns a concrete function of the decorated function.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\authoring\\authoring.py",
    "ast_data": "FunctionDef name:get_concrete_function arg:self arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "summary_iterator",
    "source_code": "@tf_export(v1=['train.summary_iterator'])\ndef summary_iterator(path):\n    return _SummaryIterator(path)",
    "docstring": "Returns a iterator for reading protocol buffers from an event file. You can use this function to read events written to an event file. It returns a Python iterator that yields protocol buffers. Example: Print the contents of an events file. Example: Print selected summary values. Example: Continuously check for new summary values. See the protocol buffer definitions of [Event]( and [Summary]( for more information about their attributes. Args: path: The path to an event file created by a . Returns: A iterator that yields protocol buffers",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\summary\\summary_iterator.py",
    "ast_data": "FunctionDef name:summary_iterator arg:path arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_prune_non_control_edges_of_debug_ops",
    "source_code": "def _prune_non_control_edges_of_debug_ops(self):\n    for node in self._node_inputs:\n        inputs = self._node_inputs[node]\n        for i, inp in enumerate(inputs):\n            if is_copy_node(inp):\n                orig_inp = self._node_inputs[inp][0]\n                inputs[i] = orig_inp",
    "docstring": "Prune (non-control) edges related to debug ops. Prune the Copy ops and associated _Send ops inserted by the debugger out from the non-control inputs and output recipients map. Replace the inputs and recipients with original ones.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_graphs.py",
    "ast_data": "FunctionDef name:_prune_non_control_edges_of_debug_ops arg:self arguments arg For Assign For Call If Call Assign Assign"
  },
  {
    "library": "scipy",
    "name": "mean",
    "source_code": "def mean(self, df, scale):\n    dim, df, scale = self._process_parameters(df, scale)\n    out = self._mean(dim, df, scale)\n    return _squeeze_output(out)",
    "docstring": "Mean of the Wishart distribution. Parameters ---------- %(_doc_default_callparams)s Returns ------- mean : float The mean of the distribution",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:mean arg:self arg:df arg:scale arguments arg arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_on_write_restore_ops",
    "source_code": "def get_on_write_restore_ops(var, tensor):\n    packed_var = var._packed_variable\n    if packed_var is not None:\n        return control_flow_ops.group(tuple((assign_on_device(d, packed_var, tensor) for d in packed_var.devices)))\n    return control_flow_ops.group(tuple((assign_on_device(v.device, v, tensor) for v in var.values)))",
    "docstring": "Return restore ops for AUTO and ON_WRITE variables.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values_util.py",
    "ast_data": "FunctionDef name:get_on_write_restore_ops arg:var arg:tensor arguments arg arg Assign If Compare Return return:yes Call Call Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "all_gather_object_enforce_type",
    "source_code": "def all_gather_object_enforce_type(pg: dist.ProcessGroup, object_list: list[Any], obj: Any, type_checker: Callable[[Any, Any], bool]=lambda x, y: type(x) == type(y)) -> None:\n    dist.all_gather_object(object_list, obj, group=pg)\n    list_len = len(object_list)\n    if list_len == 0:\n        return\n    first_obj = object_list[0]\n    for i in range(1, list_len):\n        if not type_checker(first_obj, object_list[i]):\n            raise TypeError(f'Object type at index {i} is {type(object_list[i])}, while first object type is {type(first_obj)}')",
    "docstring": "Similar to plain all_gather_object but with additional type checking AFTER gather is done to ensure basic consistency. If check does not pass, all ranks will fail with exception. This is generally to prevent conditional logic leading to unexpected messages being received. This is considered fatal code error, but due to logic stacks this might happen implicitly in practice. The default check does not check sub type (considered different) or covariance (considered same) but users can pass in custom checker if more complicated check is needed.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\collective_utils.py",
    "ast_data": "FunctionDef name:all_gather_object_enforce_type arg:pg arg:object_list arg:obj arg:type_checker arguments arg arg arg arg arguments arg arg Compare Call Call Call Assign Call If Compare Return return:no Assign For Call If Call Raise Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "__eq__",
    "source_code": "def __eq__(cls, other):\n    return isvariadic(other) and set(cls.variadic_type) == set(other.variadic_type)",
    "docstring": "Return True if other has the same variadic type Parameters ---------- other : object (type) The object (type) to check Returns ------- bool Whether or not is equal to",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\unification\\multipledispatch\\variadic.py",
    "ast_data": "FunctionDef name:__eq__ arg:cls arg:other arguments arg arg Return return:yes BoolOp Call Compare Call Call"
  },
  {
    "library": "django",
    "name": "format_subject",
    "source_code": "def format_subject(self, subject):\n    return subject.replace('\\n', '\\\\n').replace('\\r', '\\\\r')",
    "docstring": "Escape CR and LF characters.",
    "type": "method",
    "file_path": "django\\django\\utils\\log.py",
    "ast_data": "FunctionDef name:format_subject arg:self arg:subject arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "validate_argmin_with_skipna",
    "source_code": "def validate_argmin_with_skipna(skipna: bool | ndarray | None, args, kwargs) -> bool:\n    skipna, args = process_skipna(skipna, args)\n    validate_argmin(args, kwargs)\n    return skipna",
    "docstring": "If 'Series.argmin' is called via the 'numpy' library, the third parameter in its signature is 'out', which takes either an ndarray or 'None', so check if the 'skipna' parameter is either an instance of ndarray or is None, since 'skipna' itself should be a boolean",
    "type": "function",
    "file_path": "pandas\\pandas\\compat\\numpy\\function.py",
    "ast_data": "FunctionDef name:validate_argmin_with_skipna arg:skipna arg:args arg:kwargs arguments arg arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "InvalidVersion",
    "source_code": "class InvalidVersion(ValueError):\n    pass",
    "docstring": "An invalid version was found, users should refer to PEP 440.",
    "type": "class",
    "file_path": "seaborn\\seaborn\\external\\version.py",
    "ast_data": "ClassDef name:InvalidVersion"
  },
  {
    "library": "pytorch",
    "name": "_RendezvousCloseOp",
    "source_code": "class _RendezvousCloseOp:\n\n    def __call__(self, ctx: _RendezvousContext, deadline: float) -> _Action:\n        if ctx.state.closed:\n            return _Action.FINISH\n        if time.monotonic() > deadline:\n            return _Action.ERROR_TIMEOUT\n        return _Action.MARK_RENDEZVOUS_CLOSED",
    "docstring": "Represent a rendezvous close operation.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\dynamic_rendezvous.py",
    "ast_data": "ClassDef name:_RendezvousCloseOp FunctionDef name:__call__ arg:self arg:ctx arg:deadline arguments arg arg arg If Return return:yes If Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "virtualenv",
    "name": "python_zip",
    "source_code": "@classmethod\ndef python_zip(cls, interpreter):\n    pattern = f'*python{interpreter.version_nodot}.zip'\n    matches = fnmatch.filter(interpreter.path, pattern)\n    matched_paths = map(Path, matches)\n    existing_paths = filter(method('exists'), matched_paths)\n    path = next(existing_paths, None)\n    if path is not None:\n        yield PathRefToDest(path, cls.to_bin)",
    "docstring": "\"python{VERSION}.zip\" contains compiled *.pyc std lib packages, where \"VERSION\" is var from the module. :see: :see: class (interpreter). :see: output. :note: The embeddable Python distribution for Windows includes \"python{VERSION}.zip\" and \"python{VERSION}._pth\" files. User can move/rename *zip* file and edit by editing *_pth* file. Here the is used only for the default *zip* file name!",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\create\\via_global_ref\\builtin\\cpython\\cpython3.py",
    "ast_data": "FunctionDef name:python_zip arg:cls arg:interpreter arguments arg arg Assign Assign Call Assign Call Assign Call Call Assign Call If Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "_assert_float_dtype",
    "source_code": "def _assert_float_dtype(dtype):\n    dtype = dtypes.as_dtype(dtype)\n    if not dtype.is_floating:\n        raise ValueError(f'Argument `dtype` is expected to be floating point. Received: {dtype}.')\n    return dtype",
    "docstring": "Validate and return floating point type based on . must be a floating point type. Args: dtype: The data type to validate. Returns: Validated type. Raises: ValueError: if is not a floating point type.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops_v2.py",
    "ast_data": "FunctionDef name:_assert_float_dtype arg:dtype arguments arg Assign Call If Raise Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "empirical_covariance",
    "source_code": "@validate_params({'X': ['array-like'], 'assume_centered': ['boolean']}, prefer_skip_nested_validation=True)\ndef empirical_covariance(X, *, assume_centered=False):\n    X = check_array(X, ensure_2d=False, ensure_all_finite=False)\n    if X.ndim == 1:\n        X = np.reshape(X, (1, -1))\n    if X.shape[0] == 1:\n        warnings.warn('Only one sample available. You may want to reshape your data array')\n    if assume_centered:\n        covariance = np.dot(X.T, X) / X.shape[0]\n    else:\n        covariance = np.cov(X.T, bias=1)\n    if covariance.ndim == 0:\n        covariance = np.array([[covariance]])\n    return covariance",
    "docstring": "Compute the Maximum likelihood covariance estimator. Parameters ---------- X : ndarray of shape (n_samples, n_features) Data from which to compute the covariance estimate. assume_centered : bool, default=False If , data will not be centered before computation. Useful when working with data whose mean is almost, but not exactly zero. If , data will be centered before computation. Returns ------- covariance : ndarray of shape (n_features, n_features) Empirical covariance (Maximum Likelihood Estimator). Examples -------- >>> from sklearn.covariance import empirical_covariance >>> X = [[1,1,1],[1,1,1],[1,1,1], ... [0,0,0],[0,0,0],[0,0,0]] >>> empirical_covariance(X) array([[0.25, 0.25, 0.25], [0.25, 0.25, 0.25], [0.25, 0.25, 0.25]])",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\covariance\\_empirical_covariance.py",
    "ast_data": "FunctionDef name:empirical_covariance arg:X arguments arg arg Assign Call If Compare Assign Call If Compare Call If Assign Call Assign Call If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "@available_if(_estimator_has('predict', delegates=('final_estimator_', 'final_estimator')))\ndef predict(self, X, **predict_params):\n    check_is_fitted(self)\n    return self.final_estimator_.predict(self.transform(X), **predict_params)",
    "docstring": "Predict target for X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vectors, where is the number of samples and is the number of features. **predict_params : dict of str -> obj Parameters to the called by the . Note that this may be used to return uncertainties from some estimators with or . Be aware that it will only account for uncertainty in the final estimator. Returns ------- y_pred : ndarray of shape (n_samples,) or (n_samples, n_output) Predicted targets.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_stacking.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg arg Call Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_map_key_proto",
    "source_code": "def _map_key_proto(key_type, key):\n    return _MAP_KEY[key_type](key)",
    "docstring": "Returns MapKey proto for a key of key_type.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\proto_splitter\\util.py",
    "ast_data": "FunctionDef name:_map_key_proto arg:key_type arg:key arguments arg arg Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "facet_axis",
    "source_code": "def facet_axis(self, row_i, col_j, modify_state=True):\n    if self._col_wrap is not None:\n        ax = self.axes.flat[col_j]\n    else:\n        ax = self.axes[row_i, col_j]\n    if modify_state:\n        plt.sca(ax)\n    return ax",
    "docstring": "Make the axis identified by these indices active and return it.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\axisgrid.py",
    "ast_data": "FunctionDef name:facet_axis arg:self arg:row_i arg:col_j arg:modify_state arguments arg arg arg arg If Compare Assign Assign If Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__iter__",
    "source_code": "def __iter__(self):\n    return iter(self._vars)",
    "docstring": "Return an iterable for accessing the underlying sharded variables.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_replicated_variable.py",
    "ast_data": "FunctionDef name:__iter__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "getcallargs",
    "source_code": "def getcallargs(*func_and_positional, **named):\n    func = func_and_positional[0]\n    positional = func_and_positional[1:]\n    argspec = getfullargspec(func)\n    call_args = named.copy()\n    this = getattr(func, 'im_self', None) or getattr(func, '__self__', None)\n    if ismethod(func) and this:\n        positional = (this,) + positional\n    remaining_positionals = [arg for arg in argspec.args if arg not in call_args]\n    call_args.update(dict(zip(remaining_positionals, positional)))\n    default_count = 0 if not argspec.defaults else len(argspec.defaults)\n    if default_count:\n        for arg, value in zip(argspec.args[-default_count:], argspec.defaults):\n            if arg not in call_args:\n                call_args[arg] = value\n    if argspec.kwonlydefaults is not None:\n        for k, v in argspec.kwonlydefaults.items():\n            if k not in call_args:\n                call_args[k] = v\n    return call_args",
    "docstring": "TFDecorator-aware replacement for inspect.getcallargs. Args: *func_and_positional: A callable, possibly decorated, followed by any positional arguments that would be passed to . **named: The named argument dictionary that would be passed to . Returns: A dictionary mapping 's named arguments to the values they would receive if were called. will use the argspec from the outermost decorator that provides it. If no attached decorators modify argspec, the final unwrapped target's argspec will be used.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_inspect.py",
    "ast_data": "FunctionDef name:getcallargs arguments arg arg Assign Assign Assign Call Assign Call Assign BoolOp Call Call If BoolOp Call Assign Assign Compare Call Call Call Assign Call If For Call If Compare Assign If Compare For Call If Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "broadcast_to",
    "source_code": "def broadcast_to(self, tensor, destinations):\n    assert destinations is not None\n    _require_cross_replica_or_default_context_extended(self)\n    assert not isinstance(destinations, (list, tuple))\n    return self._broadcast_to(tensor, destinations)",
    "docstring": "Mirror a tensor on one device to all worker devices. Args: tensor: A Tensor value to broadcast. destinations: A mirrored variable or device string specifying the destination devices to copy to. Returns: A value mirrored to devices.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:broadcast_to arg:self arg:tensor arg:destinations arguments arg arg arg Compare Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "IndexedSlices",
    "source_code": "class IndexedSlices(object):\n    pass",
    "docstring": "Interface for internal isinstance checks to framework/indexed_slices.py. This helps to avoid circular dependencies.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\types\\internal.py",
    "ast_data": "ClassDef name:IndexedSlices"
  },
  {
    "library": "kornia",
    "name": "apply_non_transform_class",
    "source_code": "def apply_non_transform_class(self, input: Tensor, params: Dict[str, Tensor], flags: Dict[str, Any], transform: Optional[Tensor]=None) -> Tensor:\n    return input",
    "docstring": "Process class tags corresponding to the inputs that are no transformation applied.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\_2d\\geometric\\base.py",
    "ast_data": "FunctionDef name:apply_non_transform_class arg:self arg:input arg:params arg:flags arg:transform arguments arg arg arg arg arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_cov",
    "source_code": "def _cov(X, shrinkage=None, covariance_estimator=None):\n    if covariance_estimator is None:\n        shrinkage = 'empirical' if shrinkage is None else shrinkage\n        if isinstance(shrinkage, str):\n            if shrinkage == 'auto':\n                sc = StandardScaler()\n                X = sc.fit_transform(X)\n                s = ledoit_wolf(X)[0]\n                s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :]\n            elif shrinkage == 'empirical':\n                s = empirical_covariance(X)\n        elif isinstance(shrinkage, Real):\n            s = shrunk_covariance(empirical_covariance(X), shrinkage)\n    else:\n        if shrinkage is not None and shrinkage != 0:\n            raise ValueError('covariance_estimator and shrinkage parameters are not None. Only one of the two can be set.')\n        covariance_estimator.fit(X)\n        if not hasattr(covariance_estimator, 'covariance_'):\n            raise ValueError('%s does not have a covariance_ attribute' % covariance_estimator.__class__.__name__)\n        s = covariance_estimator.covariance_\n    return s",
    "docstring": "Estimate covariance matrix (using optional covariance_estimator). Parameters ---------- X : array-like of shape (n_samples, n_features) Input data. shrinkage : {'empirical', 'auto'} or float, default=None Shrinkage parameter, possible values: - None or 'empirical': no shrinkage (default). - 'auto': automatic shrinkage using the Ledoit-Wolf lemma. - float between 0 and 1: fixed shrinkage parameter. Shrinkage parameter is ignored if is not None. covariance_estimator : estimator, default=None If not None, is used to estimate the covariance matrices instead of relying on the empirical covariance estimator (with potential shrinkage). The object should have a fit method and a `sklearn.covariance``. if None the shrinkage parameter drives the estimate. .. versionadded:: 0.24 Returns ------- s : ndarray of shape (n_features, n_features) Estimated covariance matrix.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\discriminant_analysis.py",
    "ast_data": "FunctionDef name:_cov arg:X arg:shrinkage arg:covariance_estimator arguments arg arg arg If Compare Assign Compare If Call If Compare Assign Call Assign Call Assign Call Assign If Compare Assign Call If Call Assign Call Call If BoolOp Compare Compare Raise Call Call If Call Raise Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_add_timedelta_arraylike",
    "source_code": "def _add_timedelta_arraylike(self, other: TimedeltaArray) -> Self:\n    if len(self) != len(other):\n        raise ValueError('cannot add indices of unequal length')\n    self, other = cast('DatetimeArray | TimedeltaArray', self)._ensure_matching_resos(other)\n    return self._add_timedeltalike(other)",
    "docstring": "Add a delta of a TimedeltaIndex Returns ------- Same type as self",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py",
    "ast_data": "FunctionDef name:_add_timedelta_arraylike arg:self arg:other arguments arg arg If Compare Call Call Raise Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_all_sharing_strategies",
    "source_code": "def get_all_sharing_strategies():\n    return _all_sharing_strategies",
    "docstring": "Return a set of sharing strategies supported on a current system.",
    "type": "function",
    "file_path": "pytorch\\torch\\multiprocessing\\__init__.py",
    "ast_data": "FunctionDef name:get_all_sharing_strategies arguments Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "variable_op_scope",
    "source_code": "@tf_export(v1=['variable_op_scope'])\n@tf_contextlib.contextmanager\ndef variable_op_scope(values, name_or_scope, default_name=None, initializer=None, regularizer=None, caching_device=None, partitioner=None, custom_getter=None, reuse=None, dtype=None, use_resource=None, constraint=None):\n    logging.warn('tf.variable_op_scope(values, name, default_name) is deprecated, use tf.variable_scope(name, default_name, values)')\n    with variable_scope(name_or_scope, default_name=default_name, values=values, initializer=initializer, regularizer=regularizer, caching_device=caching_device, partitioner=partitioner, custom_getter=custom_getter, reuse=reuse, dtype=dtype, use_resource=use_resource, constraint=constraint) as scope:\n        yield scope",
    "docstring": "Deprecated: context manager for defining an op that creates variables.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variable_scope.py",
    "ast_data": "FunctionDef name:variable_op_scope arg:values arg:name_or_scope arg:default_name arg:initializer arg:regularizer arg:caching_device arg:partitioner arg:custom_getter arg:reuse arg:dtype arg:use_resource arg:constraint arguments arg arg arg arg arg arg arg arg arg arg arg arg Call With Call Call"
  },
  {
    "library": "pytorch",
    "name": "all_to_all",
    "source_code": "def all_to_all(output_tensor_list, input_tensor_list, group=group.WORLD):\n    return _AlltoAll.apply(group, output_tensor_list, *input_tensor_list)",
    "docstring": "Each process scatters list of input tensors to all processes in a group and return gathered list of tensors in output list. Arguments: output_tensor_list (list[Tensor]): list of tensors to gather one per rank. input_tensor_list (list[Tensor]): List of tensors to scatter one per rank. group (ProcessGroup, optional): The process group to work on. Returns: tuple([Tensor]): Output of the collective.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\nn\\functional.py",
    "ast_data": "FunctionDef name:all_to_all arg:output_tensor_list arg:input_tensor_list arg:group arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "UndefinedVariableError",
    "source_code": "class UndefinedVariableError(NameError):\n\n    def __init__(self, name: str, is_local: bool | None=None) -> None:\n        base_msg = f'{name!r} is not defined'\n        if is_local:\n            msg = f'local variable {base_msg}'\n        else:\n            msg = f'name {base_msg}'\n        super().__init__(msg)",
    "docstring": "Exception raised by ``, the variable is treated as a non-local name. See Also -------- DataFrame.query : Query the columns of a DataFrame with a boolean expression. DataFrame.eval : Evaluate a string describing operations on DataFrame columns. Examples -------- >>> df = pd.DataFrame({\"A\": [1, 1, 1]}) >>> df.query(\"A > x\") # doctest: +SKIP ... # UndefinedVariableError: name 'x' is not defined >>> df.query(\"A > @y\") # doctest: +SKIP ... # UndefinedVariableError: local variable 'y' is not defined >>> pd.eval(\"x + 1\") # doctest: +SKIP ... # UndefinedVariableError: name 'x' is not defined",
    "type": "class",
    "file_path": "pandas\\pandas\\errors\\__init__.py",
    "ast_data": "ClassDef name:UndefinedVariableError FunctionDef name:__init__ arg:self arg:name arg:is_local arguments arg arg arg Assign If Assign Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "nodes",
    "source_code": "def nodes(self, device_name=None):\n    if not self._debug_graphs:\n        raise LookupError('No partition graphs have been loaded.')\n    if device_name is None:\n        nodes = []\n        for device_name in self._debug_graphs:\n            nodes.extend(self._debug_graphs[device_name].node_inputs.keys())\n        return nodes\n    else:\n        if device_name not in self._debug_graphs:\n            raise ValueError('Invalid device name: %s' % device_name)\n        return self._debug_graphs[device_name].node_inputs.keys()",
    "docstring": "Get a list of all nodes from the partition graphs. Args: device_name: () name of device. If None, all nodes from all available devices will be included. Returns: All nodes' names, as a list of str. Raises: LookupError: If no partition graphs have been loaded. ValueError: If specified node name does not exist.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:nodes arg:self arg:device_name arguments arg arg If Raise Call If Compare Assign For Call Call Return return:yes If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "T",
    "source_code": "@T.setter\ndef T(self, v: float):\n    if not v > 0:\n        raise ValueError(f'Sampling interval T={v} must be positive!')\n    self._fs = 1 / v",
    "docstring": "Sampling interval of input signal and of the window. A `` is raised if it is set to a non-positive value.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_short_time_fft.py",
    "ast_data": "FunctionDef name:T arg:self arg:v arguments arg arg If Compare Raise Call Assign"
  },
  {
    "library": "scipy",
    "name": "van_der_corput",
    "source_code": "def van_der_corput(n: IntNumber, base: IntNumber=2, *, start_index: IntNumber=0, scramble: bool=False, permutations: 'npt.ArrayLike | None'=None, rng: SeedType=None, workers: IntNumber=1) -> np.ndarray:\n    if base < 2:\n        raise ValueError(\"'base' must be at least 2\")\n    if scramble:\n        if permutations is None:\n            permutations = _van_der_corput_permutations(base=base, rng=rng)\n        else:\n            permutations = np.asarray(permutations)\n        permutations = permutations.astype(np.int64)\n        return _cy_van_der_corput_scrambled(n, base, start_index, permutations, workers)\n    else:\n        return _cy_van_der_corput(n, base, start_index, workers)",
    "docstring": "Van der Corput sequence. Pseudo-random number generator based on a b-adic expansion. Scrambling uses permutations of the remainders (see [1]_). Multiple permutations are applied to construct a point. The sequence of permutations has to be the same for all points of the sequence. Parameters ---------- n : int Number of element of the sequence. base : int, optional Base of the sequence. Default is 2. start_index : int, optional Index to start the sequence from. Default is 0. scramble : bool, optional If True, use Owen scrambling. Otherwise no scrambling is done. Default is True. permutations : array_like, optional Permutations used for scrambling. rng : , optional Pseudorandom number generator state. When is None, a new is created using entropy from the operating system. Types other than are passed to to instantiate a `1706.02808`, 2017.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_qmc.py",
    "ast_data": "FunctionDef name:van_der_corput arg:n arg:base arguments arg arg arg arg arg arg arg If Compare Raise Call If If Compare Assign Call Assign Call Assign Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "FontEntry",
    "source_code": "@dataclasses.dataclass(frozen=True)\nclass FontEntry:\n    fname: str = ''\n    name: str = ''\n    style: str = 'normal'\n    variant: str = 'normal'\n    weight: str | int = 'normal'\n    stretch: str = 'normal'\n    size: str = 'medium'\n\n    def _repr_html_(self) -> str:\n        png_stream = self._repr_png_()\n        png_b64 = b64encode(png_stream).decode()\n        return f'<img src=\"data:image/png;base64, {png_b64}\" />'\n\n    def _repr_png_(self) -> bytes:\n        from matplotlib.figure import Figure\n        fig = Figure()\n        font_path = Path(self.fname) if self.fname != '' else None\n        fig.text(0, 0, self.name, font=font_path)\n        with BytesIO() as buf:\n            fig.savefig(buf, bbox_inches='tight', transparent=True)\n            return buf.getvalue()",
    "docstring": "A class for storing Font properties. It is used when populating the font lookup dictionary.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\font_manager.py",
    "ast_data": "ClassDef name:FontEntry FunctionDef name:_repr_html_ arg:self arguments arg Assign Call Assign Call Call Return return:yes FunctionDef name:_repr_png_ arg:self arguments arg Assign Call Assign Compare Call Call With Call Call Return return:yes Call Call"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, nextapp, path=None, aggregate=False):\n    if profile is None or pstats is None:\n        msg = \"Your installation of Python does not have a profile module. If you're on Debian, try `sudo apt-get install python-profiler`. See http://www.cherrypy.org/wiki/ProfilingOnDebian for details.\"\n        warnings.warn(msg)\n    self.nextapp = nextapp\n    self.aggregate = aggregate\n    if aggregate:\n        self.profiler = ProfileAggregator(path)\n    else:\n        self.profiler = Profiler(path)",
    "docstring": "Make a WSGI middleware app which wraps 'nextapp' with profiling. nextapp the WSGI application to wrap, usually an instance of cherrypy.Application. path where to dump the profiling output. aggregate if True, profile data for all HTTP requests will go in a single file. If False (the default), each HTTP request will dump its profile data into a separate file.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\profiler.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:nextapp arg:path arg:aggregate arguments arg arg arg arg If BoolOp Compare Compare Assign Call Assign Assign If Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, graph_view):\n    self._graph_view = graph_view\n    if context.executing_eagerly():\n        self._cache = None\n        self._saveables_cache = None\n    else:\n        self._cache = object_identity.ObjectIdentityWeakKeyDictionary()\n        self._saveables_cache = object_identity.ObjectIdentityWeakKeyDictionary()\n    self._file_prefix_placeholder = None\n    self._object_graph_feed_tensor = None\n    self._last_save_object_graph = None\n    self._file_prefix_feed_tensor = None\n    self._cached_save_operation = None\n    self._restore_op_cache = {}\n    self._object_map = None",
    "docstring": "Configure saving. Args: graph_view: An object containing a description of the object graph to save.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:graph_view arguments arg arg Assign If Call Assign Assign Assign Call Assign Call Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "django",
    "name": "_guess_stylesheet_mimetype",
    "source_code": "def _guess_stylesheet_mimetype(url):\n    mimetypedb = mimetypes.MimeTypes()\n    mimetypedb.readfp(StringIO('text/xsl\\txsl\\ntext/xsl\\txslt'))\n    return mimetypedb.guess_type(url)",
    "docstring": "Return the given stylesheet's mimetype tuple, using a slightly custom version of Python's mimetypes.guess_type().",
    "type": "function",
    "file_path": "django\\django\\utils\\feedgenerator.py",
    "ast_data": "FunctionDef name:_guess_stylesheet_mimetype arg:url arguments arg Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_parse_args",
    "source_code": "def _parse_args():\n    parser = argparse.ArgumentParser(allow_abbrev=False)\n    group = parser.add_mutually_exclusive_group(required=True)\n    group.add_argument('--build', type=BuildType.from_str, choices=list(BuildType))\n    group.add_argument('--dump_commands', action='store_true')\n    return parser.parse_args()",
    "docstring": "Defines flags and parses args.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\build_tools\\ci\\build.py",
    "ast_data": "FunctionDef name:_parse_args arguments Assign Call Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "SubgraphLowering",
    "source_code": "class SubgraphLowering(GraphLowering):\n\n    def __init__(self, parent: GraphLowering, *args: Any, **kwargs: Any) -> None:\n        self.parent = parent\n        super().__init__(*args, **kwargs)\n\n    def init_wrapper_code(self, is_subgraph: bool=False, subgraph_name: Optional[str]=None, parent_wrapper_code: Optional[PythonWrapperCodegen]=None, partition_signatures: Optional[GraphPartitionSignature]=None) -> None:\n        super().init_wrapper_code(is_subgraph=True, subgraph_name=self.name, parent_wrapper_code=self.parent.wrapper_code)",
    "docstring": "Mostly a helper class for the subgraph lowering. The main goal is to call init_wrapper_code with the subgraph related arguments.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\graph.py",
    "ast_data": "ClassDef name:SubgraphLowering FunctionDef name:__init__ arg:self arg:parent arguments arg arg arg arg Assign Call Call FunctionDef name:init_wrapper_code arg:self arg:is_subgraph arg:subgraph_name arg:parent_wrapper_code arg:partition_signatures arguments arg arg arg arg arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_default_store",
    "source_code": "def _get_default_store() -> Store:\n    if not is_initialized():\n        raise ValueError('Default process group has not been initialized, please make sure to call init_process_group.')\n    default_pg = _get_default_group()\n    _, default_store = _world.pg_map[default_pg]\n    return default_store",
    "docstring": "Get the default store created by init_process_group.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:_get_default_store arguments If Call Raise Call Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "value_text",
    "source_code": "def value_text(tensor, is_repr=False) -> AnyStr:\n    if tensor._prefer_custom_summarizer():\n        text = tensor._summarize_value()\n        if is_repr:\n            text = 'value=' + text\n    else:\n        text = numpy_text(tensor, is_repr=is_repr)\n        if is_repr:\n            text = 'numpy=' + text\n    return text",
    "docstring": "Either the NumPy value or a custom TensorFlow formatting of . Custom formatting is used for custom device tensors, e.g. parallel tensors with multiple components on different devices. Args: tensor: The tensor to format. is_repr: Controls the style/verbosity of formatting. Returns: The formatted tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:value_text arg:tensor arg:is_repr arguments arg arg If Call Assign Call If Assign Assign Call If Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "data",
    "source_code": "@property\ndef data(self):\n    return self._data.value",
    "docstring": "A pointer to the memory area of the array as a Python integer. This memory area may contain data that is not aligned, or not in correct byte-order. The memory area may not even be writeable. The array flags and data-type of this array should be respected when passing this attribute to arbitrary C-code to avoid trouble that can include Python crashing. User Beware! The value of this attribute is exactly the same as: ``",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\_internal.py",
    "ast_data": "FunctionDef name:data arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "grad_context",
    "source_code": "@property\ndef grad_context(self):\n    return self._grad_context",
    "docstring": "The corresponding WhileContext for gradient.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_state.py",
    "ast_data": "FunctionDef name:grad_context arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "def fit(self, X, y, sample_weight=None):\n    return super().fit(X, y, sample_weight=sample_weight)",
    "docstring": "Fit Naive Bayes classifier according to X, y. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vectors, where is the number of samples and is the number of features. Here, each feature of X is assumed to be from a different categorical distribution. It is further assumed that all categories of each feature are represented by the numbers 0, ..., n - 1, where n refers to the total number of categories for the given feature. This can, for instance, be achieved with the help of OrdinalEncoder. y : array-like of shape (n_samples,) Target values. sample_weight : array-like of shape (n_samples,), default=None Weights applied to individual samples (1. for unweighted). Returns ------- self : object Returns the instance itself.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\naive_bayes.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "bench_run_global",
    "source_code": "def bench_run_global(self, numtrials=50, methods=None):\n    if methods is None:\n        methods = ['DE', 'basinh.', 'DA', 'DIRECT', 'SHGO']\n    stochastic_methods = ['DE', 'basinh.', 'DA']\n    method_fun = {'DE': self.run_differentialevolution, 'basinh.': self.run_basinhopping, 'DA': self.run_dualannealing, 'DIRECT': self.run_direct, 'SHGO': self.run_shgo}\n    for m in methods:\n        if m in stochastic_methods:\n            for i in range(numtrials):\n                method_fun[m]()\n        else:\n            method_fun[m]()",
    "docstring": "Run the optimization tests for the required minimizers.",
    "type": "method",
    "file_path": "scipy\\benchmarks\\benchmarks\\optimize.py",
    "ast_data": "FunctionDef name:bench_run_global arg:self arg:numtrials arg:methods arguments arg arg arg If Compare Assign Assign Assign For If Compare For Call Call Call"
  },
  {
    "library": "django",
    "name": "_create_formsets",
    "source_code": "def _create_formsets(self, request, obj, change):\n    formsets = []\n    inline_instances = []\n    prefixes = {}\n    get_formsets_args = [request]\n    if change:\n        get_formsets_args.append(obj)\n    for FormSet, inline in self.get_formsets_with_inlines(*get_formsets_args):\n        prefix = FormSet.get_default_prefix()\n        prefixes[prefix] = prefixes.get(prefix, 0) + 1\n        if prefixes[prefix] != 1 or not prefix:\n            prefix = '%s-%s' % (prefix, prefixes[prefix])\n        formset_params = self.get_formset_kwargs(request, obj, inline, prefix)\n        formset = FormSet(**formset_params)\n\n        def user_deleted_form(request, obj, formset, index, inline):\n            return inline.has_delete_permission(request, obj) and '{}-{}-DELETE'.format(formset.prefix, index) in request.POST\n        if not inline.has_change_permission(request, obj if change else None):\n            for index, form in enumerate(formset.initial_forms):\n                if user_deleted_form(request, obj, formset, index, inline):\n                    continue\n                form._errors = {}\n                form.cleaned_data = form.initial\n        formsets.append(formset)\n        inline_instances.append(inline)\n    return (formsets, inline_instances)",
    "docstring": "Helper function to generate formsets for add/change_view.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:_create_formsets arg:self arg:request arg:obj arg:change arguments arg arg arg arg Assign Assign Assign Assign If Call For Call Assign Call Assign Call If BoolOp Compare Assign Assign Call Assign Call FunctionDef name:user_deleted_form arg:request arg:obj arg:formset arg:index arg:inline arguments arg arg arg arg arg Return return:yes BoolOp Call Compare Call If Call For Call If Call Assign Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "wrap_tensor_to",
    "source_code": "def wrap_tensor_to(self: torch.Tensor, device: Optional[Union[int, torch.device]]=None, non_blocking=False, **kwargs) -> torch.Tensor:\n    if has_torch_function_unary(self):\n        return handle_torch_function(wrap_tensor_to, (self,), self, device=device, non_blocking=False, **kwargs)\n    device_idx = _normalization_device(custom_backend_name, device)\n    return self.to(device=torch.device(f'{custom_backend_name}:{device_idx}'), non_blocking=non_blocking, **kwargs)",
    "docstring": "Perform Tensor device conversion. Call the to operator implementation. .. note:: If the `torch.devicetorch.device` argument.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\backend_registration.py",
    "ast_data": "FunctionDef name:wrap_tensor_to arg:self arg:device arg:non_blocking arguments arg arg arg arg If Call Return return:yes Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "rename_privateuse1_backend",
    "source_code": "def rename_privateuse1_backend(backend_name: str) -> None:\n    _rename_privateuse1_backend(backend_name)\n    global _privateuse1_backend_name\n    _privateuse1_backend_name = backend_name",
    "docstring": "Rename the privateuse1 backend device to make it more convenient to use as a device name within PyTorch APIs. The steps are: (1) (In C++) implement kernels for various torch operations, and register them to the PrivateUse1 dispatch key. (2) (In python) call torch.utils.rename_privateuse1_backend(\"foo\") You can now use \"foo\" as an ordinary device string in python. Note: this API can only be called once per process. Attempting to change the external backend after it's already been set will result in an error. Note(AMP): If you want to support AMP on your device, you can register a custom backend module. The backend must register a custom backend module with `` Returns the index of a currently selected device. For more details, see For an existing example, see Example:: >>> # xdoctest: +SKIP(\"failing\") >>> torch.utils.rename_privateuse1_backend(\"foo\") # This will work, assuming that you've implemented the right C++ kernels # to implement torch.ones. >>> a = torch.ones(2, device=\"foo\")",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\backend_registration.py",
    "ast_data": "FunctionDef name:rename_privateuse1_backend arg:backend_name arguments arg Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "value_rowids",
    "source_code": "def value_rowids(self, name=None):\n    with ops.name_scope(name, 'RaggedValueRowIds', [self]):\n        return self._row_partition.value_rowids()",
    "docstring": "Returns the row indices for the in this ragged tensor. corresponds one-to-one with the outermost dimension of , and specifies the row containing each value. In particular, the row consists of the values where . Args: name: A name prefix for the returned tensor (optional). Returns: A 1-D integer with shape . The returned tensor is nonnegative, and is sorted in ascending order. #### Example: >>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []]) >>> print(rt.values) tf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32) >>> print(rt.value_rowids()) # corresponds 1:1 with rt.values tf.Tensor([0 0 0 0 2 2 2 3], shape=(8,), dtype=int64)",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:value_rowids arg:self arg:name arguments arg arg With Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "TimerGTK",
    "source_code": "class TimerGTK(TimerBase):\n\n    def __init__(self, *args, **kwargs):\n        self._timer = None\n        super().__init__(*args, **kwargs)\n\n    def _timer_start(self):\n        self._timer_stop()\n        self._timer = GLib.timeout_add(self._interval, self._on_timer)\n\n    def _timer_stop(self):\n        if self._timer is not None:\n            GLib.source_remove(self._timer)\n            self._timer = None\n\n    def _timer_set_interval(self):\n        if self._timer is not None:\n            self._timer_stop()\n            self._timer_start()\n\n    def _on_timer(self):\n        super()._on_timer()\n        if self.callbacks and (not self._single):\n            return True\n        else:\n            self._timer = None\n            return False",
    "docstring": "Subclass of using GTK timer events.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\_backend_gtk.py",
    "ast_data": "ClassDef name:TimerGTK FunctionDef name:__init__ arg:self arguments arg arg arg Assign Call Call FunctionDef name:_timer_start arg:self arguments arg Call Assign Call FunctionDef name:_timer_stop arg:self arguments arg If Compare Call Assign FunctionDef name:_timer_set_interval arg:self arguments arg If Compare Call Call FunctionDef name:_on_timer arg:self arguments arg Call Call If BoolOp Return return:yes Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "__init__",
    "source_code": "def __init__(self, eigs, seed=None, tol=1e-13, diag_tol=1e-07):\n    self._dist = random_correlation_gen(seed)\n    self.tol = tol\n    self.diag_tol = diag_tol\n    _, self.eigs = self._dist._process_parameters(eigs, tol=self.tol)",
    "docstring": "Create a frozen random correlation matrix distribution. Parameters ---------- eigs : 1d ndarray Eigenvalues of correlation matrix seed : {None, int, , }, optional If is None (or ), the singleton is used. If is an int, a new `seedseed` instance then that instance is used. tol : float, optional Tolerance for input parameter checks diag_tol : float, optional Tolerance for deviation of the diagonal of the resulting matrix. Default: 1e-7 Raises ------ RuntimeError Floating point error prevented generating a valid correlation matrix. Returns ------- rvs : ndarray or scalar Random size N-dimensional matrices, dimension (size, dim, dim), each having eigenvalues eigs.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:eigs arg:seed arg:tol arg:diag_tol arguments arg arg arg arg arg Assign Call Assign Assign Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "apply_indexed_slices_grad",
    "source_code": "def apply_indexed_slices_grad(self, grad, local_step=0, name=None):\n    return self.apply_grad(grad_indices=grad.indices, grad_values=grad.values, grad_shape=grad.dense_shape, local_step=local_step, name=name)",
    "docstring": "Attempts to apply a gradient to the accumulator. The attempt is silently dropped if the gradient is stale, i.e., is less than the accumulator's global time step. Args: grad: The gradient to be applied. local_step: Time step at which the gradient was computed. name: Optional name for the operation. Returns: The operation that (conditionally) applies a gradient to the accumulator. Raises: InvalidArgumentError: If grad is of the wrong shape",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:apply_indexed_slices_grad arg:self arg:grad arg:local_step arg:name arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "check_do_not_raise_errors_in_init_or_set_params",
    "source_code": "def check_do_not_raise_errors_in_init_or_set_params(name, estimator_orig):\n    Estimator = type(estimator_orig)\n    params = signature(Estimator).parameters\n    smoke_test_values = [-1, 3.0, 'helloworld', np.array([1.0, 4.0]), [1], {}, []]\n    for value in smoke_test_values:\n        new_params = {key: value for key in params}\n        est = Estimator(**new_params)\n        est.set_params(**new_params)",
    "docstring": "Check that init or set_param does not raise errors.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\estimator_checks.py",
    "ast_data": "FunctionDef name:check_do_not_raise_errors_in_init_or_set_params arg:name arg:estimator_orig arguments arg arg Assign Call Assign Call Assign Call For Assign Assign Call Call"
  },
  {
    "library": "scipy",
    "name": "ttest_rel",
    "source_code": "def ttest_rel(a, b, axis=0, alternative='two-sided'):\n    a, b, axis = _chk2_asarray(a, b, axis)\n    if len(a) != len(b):\n        raise ValueError('unequal length arrays')\n    if a.size == 0 or b.size == 0:\n        return Ttest_relResult(np.nan, np.nan)\n    n = a.count(axis)\n    df = ma.asanyarray(n - 1.0)\n    d = (a - b).astype('d')\n    dm = d.mean(axis)\n    v = d.var(axis=axis, ddof=1)\n    denom = ma.sqrt(v / n)\n    with np.errstate(divide='ignore', invalid='ignore'):\n        t = dm / denom\n    t, prob = _ttest_finish(df, t, alternative)\n    return Ttest_relResult(t, prob)",
    "docstring": "Calculates the T-test on TWO RELATED samples of scores, a and b. Parameters ---------- a, b : array_like The arrays must have the same shape. axis : int or None, optional Axis along which to compute test. If None, compute over the whole arrays, , and . alternative : {'two-sided', 'less', 'greater'}, optional Defines the alternative hypothesis. The following options are available (default is 'two-sided'): * 'two-sided': the means of the distributions underlying the samples are unequal. * 'less': the mean of the distribution underlying the first sample is less than the mean of the distribution underlying the second sample. * 'greater': the mean of the distribution underlying the first sample is greater than the mean of the distribution underlying the second sample. .. versionadded:: 1.7.0 Returns ------- statistic : float or array t-statistic pvalue : float or array two-tailed p-value Notes ----- For more details on , see .",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mstats_basic.py",
    "ast_data": "FunctionDef name:ttest_rel arg:a arg:b arg:axis arg:alternative arguments arg arg arg arg Assign Call If Compare Call Call Raise Call If BoolOp Compare Compare Return return:yes Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call With Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "__del__",
    "source_code": "def __del__(self):\n    if hasattr(self.input, 'close'):\n        self.input.close()",
    "docstring": "Close input on descturct.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\__init__.py",
    "ast_data": "FunctionDef name:__del__ arg:self arguments arg If Call Call"
  },
  {
    "library": "matplotlib",
    "name": "reload_library",
    "source_code": "def reload_library():\n    library.clear()\n    library.update(update_user_library(_base_library))\n    available[:] = sorted(library.keys())",
    "docstring": "Reload the style library.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\style\\core.py",
    "ast_data": "FunctionDef name:reload_library arguments Call Call Call Assign Call Call"
  },
  {
    "library": "scipy",
    "name": "_Aij",
    "source_code": "def _Aij(A, i, j):\n    return A[:i, :j].sum() + A[i + 1:, j + 1:].sum()",
    "docstring": "Sum of upper-left and lower right blocks of contingency table.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_stats_pythran.py",
    "ast_data": "FunctionDef name:_Aij arg:A arg:i arg:j arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "thebibliography",
    "source_code": "class thebibliography(nodes.container):\n    pass",
    "docstring": "A node for wrapping bibliographies.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\builders\\latex\\nodes.py",
    "ast_data": "ClassDef name:thebibliography"
  },
  {
    "library": "scikit-learn",
    "name": "_transform",
    "source_code": "def _transform(self, X):\n    mask = self.get_support()\n    if not mask.any():\n        warnings.warn('No features were selected: either the data is too noisy or the selection test too strict.', UserWarning)\n        if hasattr(X, 'iloc'):\n            return X.iloc[:, :0]\n        return np.empty(0, dtype=X.dtype).reshape((X.shape[0], 0))\n    return _safe_indexing(X, mask, axis=1)",
    "docstring": "Reduce X to the selected features.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_selection\\_base.py",
    "ast_data": "FunctionDef name:_transform arg:self arg:X arguments arg arg Assign Call If Call Call If Call Return return:yes Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "StrategyType",
    "source_code": "class StrategyType:\n    pass",
    "docstring": "Base class type for op strategy, We have two StrategyType: OpStrategy and TupleStrategy",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_op_schema.py",
    "ast_data": "ClassDef name:StrategyType"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, proto, *, proto_as_initial_chunk: bool=True, parent_splitter: Optional['ComposableSplitter']=None, fields_in_parent: Optional[util.FieldTypes]=None):\n    self._proto = proto\n    self._parent_splitter = parent_splitter\n    self._fields_in_parent = fields_in_parent\n    self._built = False\n    self._add_chunk_order = []\n    self._fix_chunk_order = False\n    if parent_splitter is not None:\n        self._chunks = None\n        self._chunked_message = None\n    elif proto_as_initial_chunk:\n        self._chunks = [self._proto]\n        self._chunked_message = chunk_pb2.ChunkedMessage(chunk_index=0)\n        self._add_chunk_order.append(id(self._proto))\n    else:\n        self._chunks = []\n        self._chunked_message = chunk_pb2.ChunkedMessage()",
    "docstring": "Initializes ComposableSplitter. Args: proto: Proto message to split. proto_as_initial_chunk: Whether to initialize chunks with the user-provided proto as the initial chunk. parent_splitter: The parent object. fields_in_parent: Fields to access from the parent splitter's proto.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\tools\\proto_splitter\\split.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:proto arguments arg arg arg arg arg Assign Assign Assign Assign Assign Assign If Compare Assign Assign If Assign Assign Call Call Call Assign Assign Call"
  },
  {
    "library": "scikit-learn",
    "name": "power",
    "source_code": "def power(self, y: Array | complex, /, copy: bool | None=None, xp: ModuleType | None=None) -> Array:\n    return self._op(_AtOp.POWER, operator.ipow, operator.pow, y, copy=copy, xp=xp)",
    "docstring": "Apply `` and return the updated array.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_extra\\_lib\\_at.py",
    "ast_data": "FunctionDef name:power arg:copy arg:xp arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "cryptography",
    "name": "subject_name",
    "source_code": "def subject_name(self, name: Name) -> CertificateBuilder:\n    if not isinstance(name, Name):\n        raise TypeError('Expecting x509.Name object.')\n    if self._subject_name is not None:\n        raise ValueError('The subject name may only be set once.')\n    return CertificateBuilder(self._issuer_name, name, self._public_key, self._serial_number, self._not_valid_before, self._not_valid_after, self._extensions)",
    "docstring": "Sets the requestor's distinguished name.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\x509\\base.py",
    "ast_data": "FunctionDef name:subject_name arg:self arg:name arguments arg arg If Call Raise Call If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "transform_keypoints_",
    "source_code": "def transform_keypoints_(self, M: Tensor) -> 'Keypoints':\n    return self.transform_keypoints(M, inplace=True)",
    "docstring": "Inplace version of :func:.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\keypoints.py",
    "ast_data": "FunctionDef name:transform_keypoints_ arg:self arg:M arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "set_dtype",
    "source_code": "def set_dtype(self, dtype):\n    self._dtype = dtype",
    "docstring": "Set data type for this scope.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variable_scope.py",
    "ast_data": "FunctionDef name:set_dtype arg:self arg:dtype arguments arg arg Assign"
  },
  {
    "library": "kornia",
    "name": "inverse_transform",
    "source_code": "def inverse_transform(self, x: Tensor) -> Tensor:\n    if not self.fitted:\n        raise RuntimeError('Needs to be fitted first before running. Please call fit or set include_fit to True.')\n    if not self.compute_inv:\n        raise RuntimeError('Did not compute inverse ZCA. Please set compute_inv to True')\n    if self.transform_inv is None:\n        raise TypeError('The transform inverse should be a Tensor. Gotcha None.')\n    mean_inv: Tensor = -self.mean_vector.mm(self.transform_matrix)\n    y = linear_transform(x, self.transform_inv, mean_inv)\n    return y",
    "docstring": "Apply the inverse transform to the whitened data. Args: x: Whitened data. Returns: Original data.",
    "type": "method",
    "file_path": "kornia\\kornia\\enhance\\zca.py",
    "ast_data": "FunctionDef name:inverse_transform arg:self arg:x arguments arg arg If Raise Call If Raise Call If Compare Raise Call Call Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "CCompiler_customize_cmd",
    "source_code": "def CCompiler_customize_cmd(self, cmd, ignore=()):\n    log.info('customize %s using %s' % (self.__class__.__name__, cmd.__class__.__name__))\n    if hasattr(self, 'compiler') and 'clang' in self.compiler[0] and (not (platform.machine() == 'arm64' and sys.platform == 'darwin')):\n        self.compiler.append('-ftrapping-math')\n        self.compiler_so.append('-ftrapping-math')\n\n    def allow(attr):\n        return getattr(cmd, attr, None) is not None and attr not in ignore\n    if allow('include_dirs'):\n        self.set_include_dirs(cmd.include_dirs)\n    if allow('define'):\n        for name, value in cmd.define:\n            self.define_macro(name, value)\n    if allow('undef'):\n        for macro in cmd.undef:\n            self.undefine_macro(macro)\n    if allow('libraries'):\n        self.set_libraries(self.libraries + cmd.libraries)\n    if allow('library_dirs'):\n        self.set_library_dirs(self.library_dirs + cmd.library_dirs)\n    if allow('rpath'):\n        self.set_runtime_library_dirs(cmd.rpath)\n    if allow('link_objects'):\n        self.set_link_objects(cmd.link_objects)",
    "docstring": "Customize compiler using distutils command. Parameters ---------- cmd : class instance An instance inheriting from ``. Returns ------- None",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\ccompiler.py",
    "ast_data": "FunctionDef name:CCompiler_customize_cmd arg:self arg:cmd arg:ignore arguments arg arg arg Call If BoolOp Call Compare BoolOp Compare Call Compare Call Call FunctionDef name:allow arg:attr arguments arg Return return:yes BoolOp Compare Call Compare If Call Call If Call For Call If Call For Call If Call Call If Call Call If Call Call If Call Call"
  },
  {
    "library": "cherrypy",
    "name": "__call__",
    "source_code": "def __call__(self, path_info):\n    request = cherrypy.serving.request\n    func, vpath = self.find_handler(path_info)\n    if func:\n        vpath = [x.replace('%2F', '/') for x in vpath]\n        request.handler = LateParamPageHandler(func, *vpath)\n    else:\n        request.handler = cherrypy.NotFound()",
    "docstring": "Set handler and config for the current request.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpdispatch.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:path_info arguments arg arg Assign Assign Call If Assign Call Assign Call Assign Call"
  },
  {
    "library": "scikit-learn",
    "name": "reconstruction_error",
    "source_code": "def reconstruction_error(self):\n    G = -0.5 * self.dist_matrix_ ** 2\n    G_center = KernelCenterer().fit_transform(G)\n    evals = self.kernel_pca_.eigenvalues_\n    return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]",
    "docstring": "Compute the reconstruction error for the embedding. Returns ------- reconstruction_error : float Reconstruction error. Notes ----- The cost function of an isomap embedding is ``",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\manifold\\_isomap.py",
    "ast_data": "FunctionDef name:reconstruction_error arg:self arguments arg Assign Assign Call Call Assign Return return:yes Call Call Call"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, *args, **kwargs):\n    if kwargs.get('bindAddress', None) is None:\n        import socket\n        if not hasattr(socket, 'fromfd'):\n            raise ValueError('Dynamic FCGI server not available on this platform. You must use a static or external one by providing a legal bindAddress.')\n    self.args = args\n    self.kwargs = kwargs\n    self.ready = False",
    "docstring": "Initialize the FCGI server parameters.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\servers.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg arg arg If Compare Call If Call Raise Call Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_ResizeNearestNeighborGrad",
    "source_code": "@ops.RegisterGradient('ResizeNearestNeighbor')\ndef _ResizeNearestNeighborGrad(op: ops.Operation, grad):\n    image = op.inputs[0]\n    if image.get_shape()[1:3].is_fully_defined():\n        image_shape = image.get_shape()[1:3]\n    else:\n        image_shape = array_ops.shape(image)[1:3]\n    grads = gen_image_ops.resize_nearest_neighbor_grad(grad, image_shape, align_corners=op.get_attr('align_corners'), half_pixel_centers=op.get_attr('half_pixel_centers'))\n    return [grads, None]",
    "docstring": "The derivatives for nearest neighbor resizing. Args: op: The ResizeNearestNeighbor op. grad: The tensor representing the gradient w.r.t. the output. Returns: The gradients w.r.t. the input and the output.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_grad.py",
    "ast_data": "FunctionDef name:_ResizeNearestNeighborGrad arg:op arg:grad arguments arg arg Assign If Call Call Assign Call Assign Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_decorate_run_options_for_profile",
    "source_code": "def _decorate_run_options_for_profile(self, run_options):\n    run_options.trace_level = config_pb2.RunOptions.FULL_TRACE",
    "docstring": "Modify a RunOptions object for profiling TensorFlow graph execution. Args: run_options: (RunOptions) the modified RunOptions object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\framework.py",
    "ast_data": "FunctionDef name:_decorate_run_options_for_profile arg:self arg:run_options arguments arg arg Assign"
  },
  {
    "library": "scipy",
    "name": "step",
    "source_code": "def step(self, X0=None, T=None, N=None):\n    return step(self, X0=X0, T=T, N=N)",
    "docstring": "Return the step response of a continuous-time system. See for details.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:step arg:self arg:X0 arg:T arg:N arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_root_scalar_secant_doc",
    "source_code": "def _root_scalar_secant_doc():\n    pass",
    "docstring": "Options ------- args : tuple, optional Extra arguments passed to the objective function. xtol : float, optional Tolerance (absolute) for termination. rtol : float, optional Tolerance (relative) for termination. maxiter : int, optional Maximum number of iterations. x0 : float, required Initial guess. x1 : float, optional A second guess. Must be different from . If not specified, a value near will be chosen. options: dict, optional Specifies any method-specific options not covered above.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_root_scalar.py",
    "ast_data": "FunctionDef name:_root_scalar_secant_doc arguments"
  },
  {
    "library": "django",
    "name": "get_list_filter",
    "source_code": "def get_list_filter(self, request):\n    return self.list_filter",
    "docstring": "Return a sequence containing the fields to be displayed as filters in the right sidebar of the changelist page.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:get_list_filter arg:self arg:request arguments arg arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_draw",
    "source_code": "@classmethod\ndef _draw(cls, sizes=None, rng=None, i_parameterization=None, proportions=None):\n    rng = np.random.default_rng(rng)\n    if len(cls._parameterizations) == 0:\n        return cls()\n    if i_parameterization is None:\n        n = cls._num_parameterizations()\n        i_parameterization = rng.integers(0, max(0, n - 1), endpoint=True)\n    parameterization = cls._parameterizations[i_parameterization]\n    parameters = parameterization.draw(sizes, rng, proportions=proportions, region='typical')\n    return cls(**parameters)",
    "docstring": "Draw a specific (fully-defined) distribution from the family. See _Parameterization.draw for documentation details.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distribution_infrastructure.py",
    "ast_data": "FunctionDef name:_draw arg:cls arg:sizes arg:rng arg:i_parameterization arg:proportions arguments arg arg arg arg arg Assign Call If Compare Call Return return:yes Call If Compare Assign Call Assign Call Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_get_abs_corr_mat",
    "source_code": "def _get_abs_corr_mat(self, X_filled, tolerance=1e-06):\n    n_features = X_filled.shape[1]\n    if self.n_nearest_features is None or self.n_nearest_features >= n_features:\n        return None\n    with np.errstate(invalid='ignore'):\n        abs_corr_mat = np.abs(np.corrcoef(X_filled.T))\n    abs_corr_mat[np.isnan(abs_corr_mat)] = tolerance\n    np.clip(abs_corr_mat, tolerance, None, out=abs_corr_mat)\n    np.fill_diagonal(abs_corr_mat, 0)\n    abs_corr_mat = normalize(abs_corr_mat, norm='l1', axis=0, copy=False)\n    return abs_corr_mat",
    "docstring": "Get absolute correlation matrix between features. Parameters ---------- X_filled : ndarray, shape (n_samples, n_features) Input data with the most recent imputations. tolerance : float, default=1e-6 can have nans, which will be replaced with . Returns ------- abs_corr_mat : ndarray, shape (n_features, n_features) Absolute correlation matrix of at the beginning of the current round. The diagonal has been zeroed out and each feature's absolute correlations with all others have been normalized to sum to 1.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\impute\\_iterative.py",
    "ast_data": "FunctionDef name:_get_abs_corr_mat arg:self arg:X_filled arg:tolerance arguments arg arg arg Assign If BoolOp Compare Compare Return return:no With Call Assign Call Call Assign Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "TooManyFilesSent",
    "source_code": "class TooManyFilesSent(SuspiciousOperation):\n    pass",
    "docstring": "The number of fields in a GET or POST request exceeded settings.DATA_UPLOAD_MAX_NUMBER_FILES.",
    "type": "class",
    "file_path": "django\\django\\core\\exceptions.py",
    "ast_data": "ClassDef name:TooManyFilesSent"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, bus):\n    self.is_set = False\n    plugins.SimplePlugin.__init__(self, bus)",
    "docstring": "Initialize the console control handler.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\win32.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:bus arguments arg arg Assign Call"
  },
  {
    "library": "pytorch",
    "name": "current_stream",
    "source_code": "def current_stream(device: _device_t=None, /) -> torch.Stream:\n    device_index = _get_device_index(device, True)\n    return torch._C._accelerator_getStream(device_index)",
    "docstring": "Return the currently selected stream for a given device. Args: device (:class:, str, int, optional): a given device that must match the current :ref: device type. If not given, use :func: by default. Returns: torch.Stream: the currently selected stream for a given device.",
    "type": "function",
    "file_path": "pytorch\\torch\\accelerator\\__init__.py",
    "ast_data": "FunctionDef name:current_stream arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "encode",
    "source_code": "def encode(self, obj):\n    data = [row for row in self.iter_encode(obj)]\n    return '\\n'.join(data)",
    "docstring": "Encodes a given object to an ARFF file. :param obj: the object containing the ARFF information. :return: the ARFF file as an string.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\externals\\_arff.py",
    "ast_data": "FunctionDef name:encode arg:self arg:obj arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "AddValue",
    "source_code": "def AddValue(self, val):\n    result = val\n    new_value = val.name not in self._values\n    new_value &= val.op._control_flow_context is not self\n    if new_value:\n        self._values.add(val.name)\n        grad_ctxt = ops.get_default_graph()._get_control_flow_context()\n        if grad_ctxt:\n            grad_ctxt = grad_ctxt.GetWhileContext()\n            if grad_ctxt.grad_state:\n                forward_ctxt = util.GetWhileContext(val.op)\n                if util.IsLoopExit(val.op):\n                    forward_ctxt = forward_ctxt.outer_context\n                    if forward_ctxt:\n                        forward_ctxt = forward_ctxt.GetWhileContext()\n                if forward_ctxt == grad_ctxt.grad_state.forward_context:\n                    real_val = grad_ctxt.grad_state.GetRealValue(val)\n                    self._external_values[val.name] = real_val\n                    return real_val\n        if self._outer_context is not None:\n            result = self._outer_context.AddValue(val)\n        with ops.control_dependencies(None):\n            enter = _Enter(result, self._name, is_constant=True, parallel_iterations=self._parallel_iterations)\n            enter.graph.prevent_feeding(enter)\n            if self._outer_context:\n                self._outer_context.AddInnerOp(enter.op)\n        self._FixControlInputsAndContext([enter])\n        self._values.add(enter.name)\n        self._external_values[val.name] = enter\n        result = enter\n    else:\n        actual_val = self._external_values.get(val.name)\n        if actual_val is not None:\n            result = actual_val\n    return result",
    "docstring": "Add to the current context and its outer context recursively.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:AddValue arg:self arg:val arguments arg arg Assign Assign Compare Compare If Call Assign Call Call If Assign Call If Assign Call If Call Assign If Assign Call If Compare Assign Call Assign Return return:yes If Compare Assign Call With Call Assign Call Call If Call Call Call Assign Assign Assign Call If Compare Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_pad",
    "source_code": "def get_pad(self):\n    return self._pad",
    "docstring": "Return the internal pad in points. See for more details.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axis_artist.py",
    "ast_data": "FunctionDef name:get_pad arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "handle_file_complete",
    "source_code": "def handle_file_complete(self, old_field_name, counters):\n    for i, handler in enumerate(self._upload_handlers):\n        file_obj = handler.file_complete(counters[i])\n        if file_obj:\n            self._files.appendlist(force_str(old_field_name, self._encoding, errors='replace'), file_obj)\n            break",
    "docstring": "Handle all the signaling that takes place when a file is complete.",
    "type": "method",
    "file_path": "django\\django\\http\\multipartparser.py",
    "ast_data": "FunctionDef name:handle_file_complete arg:self arg:old_field_name arg:counters arguments arg arg arg For Call Assign Call If Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_navigate_mode",
    "source_code": "def get_navigate_mode(self):\n    return self._navigate_mode",
    "docstring": "Get the navigation toolbar button status: 'PAN', 'ZOOM', or None.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:get_navigate_mode arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_set_fsdp_flattened",
    "source_code": "def _set_fsdp_flattened(tensor: torch.Tensor) -> None:\n    setattr(tensor, FSDP_FLATTENED, True)",
    "docstring": "Sets an attribute on `` to mark it as flattened by FSDP. This is to avoid re-flattening it during nested construction.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_common_utils.py",
    "ast_data": "FunctionDef name:_set_fsdp_flattened arg:tensor arguments arg Call"
  },
  {
    "library": "matplotlib",
    "name": "locked",
    "source_code": "def locked(self):\n    return self._owner is not None",
    "docstring": "Return whether the lock is currently held by an owner.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:locked arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "_deduplicate_indexed_slices",
    "source_code": "def _deduplicate_indexed_slices(values, indices):\n    unique_indices, new_index_positions = array_ops.unique(indices)\n    summed_values = math_ops.unsorted_segment_sum(values, new_index_positions, array_ops.shape(unique_indices)[0])\n    return (summed_values, unique_indices)",
    "docstring": "Sums associated with any non-unique . Args: values: A with rank >= 1. indices: A one-dimensional integer , indexing into the first dimension of (as in an IndexedSlices object). Returns: A tuple of (, ) where is a de-duplicated version of and contains the sum of slices associated with each unique index.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\optimizer.py",
    "ast_data": "FunctionDef name:_deduplicate_indexed_slices arg:values arg:indices arguments arg arg Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "tensor_list_scatter",
    "source_code": "def tensor_list_scatter(tensor, indices, element_shape=None, input_handle=None, name=None):\n    tensor = ops.convert_to_tensor(tensor)\n    if input_handle is not None:\n        output_handle = gen_list_ops.tensor_list_scatter_into_existing_list(input_handle=input_handle, tensor=tensor, indices=indices, name=name)\n        handle_data_util.copy_handle_data(input_handle, output_handle)\n        return output_handle\n    else:\n        output_handle = gen_list_ops.tensor_list_scatter_v2(tensor=tensor, indices=indices, element_shape=_build_element_shape(element_shape), num_elements=-1, name=name)\n        _set_handle_data(output_handle, element_shape, tensor.dtype)\n        return output_handle",
    "docstring": "Returns a TensorList created or updated by scattering .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\list_ops.py",
    "ast_data": "FunctionDef name:tensor_list_scatter arg:tensor arg:indices arg:element_shape arg:input_handle arg:name arguments arg arg arg arg arg Assign Call If Compare Assign Call Call Return return:yes Assign Call Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "first_group",
    "source_code": "def first_group(self, getter=None):\n    if self.first:\n        return True\n    return self._compare_group(self.item, self.previous, getter)",
    "docstring": "Returns true if this item is the start of a new group, where groups mean that some attribute has changed. The getter can be None (the item itself changes), an attribute name like ``, a function, or a dict key or list index.",
    "type": "method",
    "file_path": "numpy\\numpy\\_build_utils\\tempita\\_looper.py",
    "ast_data": "FunctionDef name:first_group arg:self arg:getter arguments arg arg If Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_py_assert_stmt",
    "source_code": "def _py_assert_stmt(expression1, expression2):\n    assert expression1, expression2()\n    return None",
    "docstring": "Overload of assert_stmt that executes a Python assert statement.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\exceptions.py",
    "ast_data": "FunctionDef name:_py_assert_stmt arg:expression1 arg:expression2 arguments arg arg Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "restore",
    "source_code": "def restore(self, restored_tensors, restored_shapes):\n    tensor, = restored_tensors\n    return values_util.get_on_read_restore_ops(self._sync_on_read_variable, tensor, self._sync_on_read_variable.aggregation)",
    "docstring": "Restore the same value into all variables.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py",
    "ast_data": "FunctionDef name:restore arg:self arg:restored_tensors arg:restored_shapes arguments arg arg arg Assign Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "__init__",
    "source_code": "def __init__(self):\n    super().__init__()\n    self.Vertex = VertexCube",
    "docstring": "Class for a vertex cache for a simplicial complex without an associated field. Useful only for building and visualising a domain complex. Parameters ----------",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_shgo_lib\\_vertex.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg Call Call Assign"
  },
  {
    "library": "scrapy",
    "name": "_BenchSpider",
    "source_code": "class _BenchSpider(scrapy.Spider):\n    name = 'follow'\n    total = 10000\n    show = 20\n    baseurl = 'http://localhost:8998'\n    link_extractor = LinkExtractor()\n\n    async def start(self) -> AsyncIterator[Any]:\n        qargs = {'total': self.total, 'show': self.show}\n        url = f'{self.baseurl}?{urlencode(qargs, doseq=True)}'\n        yield scrapy.Request(url, dont_filter=True)\n\n    def parse(self, response: Response) -> Any:\n        assert isinstance(response, TextResponse)\n        for link in self.link_extractor.extract_links(response):\n            yield scrapy.Request(link.url, callback=self.parse)",
    "docstring": "A spider that follows all links",
    "type": "class",
    "file_path": "scrapy\\scrapy\\commands\\bench.py",
    "ast_data": "ClassDef name:_BenchSpider Assign Assign Assign Assign Assign Call AsyncFunctionDef name:start arg:self arguments arg Assign Assign Call Call FunctionDef name:parse arg:self arg:response arguments arg arg Call For Call Call"
  },
  {
    "library": "pytorch",
    "name": "cumsum_inference_rule",
    "source_code": "@register_inference_rule(torch.cumsum)\ndef cumsum_inference_rule(n: Node, symbols, constraints, counter):\n    assert isinstance(n.args[0], Node)\n    arg_1 = n.args[1] if len(n.args) > 1 else n.kwargs['dim']\n    assert isinstance(arg_1, int)\n    output, counter = gen_tvar(counter)\n    symbols[n] = output\n    input = symbols[n.args[0]]\n    input_dyn = BinConstraintT(input, Dyn, op_eq)\n    output_dyn = BinConstraintT(output, Dyn, op_eq)\n    c1 = Conj([input_dyn, output_dyn])\n    c2 = []\n    for i in range(1, MAX_TENSOR_RANK + 1):\n        new_dims, counter = gen_tensor_dims(i, counter)\n        nat_constraints = gen_nat_constraints(new_dims)\n        c_tensor_i = Conj([BinConstraintT(input, TensorType(new_dims), op_eq), BinConstraintT(output, TensorType(new_dims), op_eq)] + [range_check(arg_1, i)] + nat_constraints)\n        c2.append(c_tensor_i)\n    dyn_or_tensor = Disj([c1, Disj(c2)])\n    return ([dyn_or_tensor], counter)",
    "docstring": "Input and output shapes should be equal We should verify that the index is valid",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_generator.py",
    "ast_data": "FunctionDef name:cumsum_inference_rule arg:n arg:symbols arg:constraints arg:counter arguments arg arg arg arg Call Assign Compare Call Call Assign Call Assign Assign Assign Call Assign Call Assign Call Assign For Call Assign Call Assign Call Assign Call Call Call Call Call Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "quantize_per_channel_group_meta",
    "source_code": "@impl(quantized_decomposed_lib, 'quantize_per_channel_group', 'Meta')\ndef quantize_per_channel_group_meta(input: torch.Tensor, scales: torch.Tensor, zero_points: torch.Tensor, quant_min: int, quant_max: int, dtype: torch.dtype, group_size=128):\n    assert group_size > 1\n    if group_size > input.shape[-1] and scales.shape[-1] == 1:\n        group_size = input.shape[-1]\n    assert input.shape[-1] % group_size == 0\n    assert input.dim() == 2\n    return torch.empty_like(input, dtype=dtype)",
    "docstring": "Groupwise quantization within each channel for an 2-d Tensor using the quantization parameters to map from floating point to quantized values. This means for each row of a 2-d Tensor (M, N), we calculate scales/zero_points for each elements and quantize every elements with the same quantization parameter. The dimension for scales/zero_points will be (M * ceil(N, group_size),) Args: input (torch.Tensor): original float32 or bfloat16 Tensor scales (float32 torch.Tensor): quantization parameter for per channel group affine quantization zero_points (int32 torch.Tensor): quantization parameter for per channel group affine quantization quant_min (int): minimum quantized value for output Tensor quant_max (int): maximum quantized value for output Tensor dtype (torch.dtype): requested dtype (e.g. torch.uint8) for output Tensor Returns: Tensor with requested dtype (e.g. torch.uint8), note the quantization parameters are not stored in the Tensor, we are storing them in function arguments instead",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_decomposed.py",
    "ast_data": "FunctionDef name:quantize_per_channel_group_meta arg:input arg:scales arg:zero_points arg:quant_min arg:quant_max arg:dtype arg:group_size arguments arg arg arg arg arg arg arg Compare If BoolOp Compare Compare Assign Compare Compare Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "switch",
    "source_code": "def switch(data, pred, dtype=None, name=None):\n    with ops.name_scope(name, 'Switch', [data, pred]) as name:\n        data = ops.internal_convert_to_tensor_or_composite(data, dtype=dtype, name='data', as_ref=True)\n        pred = ops.convert_to_tensor(pred, name='pred')\n        if isinstance(data, tensor_lib.Tensor):\n            return gen_control_flow_ops.switch(data, pred, name=name)\n        else:\n            if not isinstance(data, composite_tensor.CompositeTensor):\n                raise TypeError(f\"'data' must be a Tensor or CompositeTensor. Received: {type(data)}.\")\n            tensors = nest.flatten(data, expand_composites=True)\n            mapped = [gen_control_flow_ops.switch(tensor, pred) for tensor in tensors]\n            mapped_f, mapped_t = zip(*mapped)\n            return (nest.pack_sequence_as(data, mapped_f, expand_composites=True), nest.pack_sequence_as(data, mapped_t, expand_composites=True))",
    "docstring": "Forwards to an output determined by . If is false, the input is forwarded to the first output. Otherwise, the data goes to the second output. This op handles s and . Args: data: The tensor to be forwarded to the appropriate output. pred: A scalar that specifies which output port will receive data. dtype: Optional element type for the returned tensor. If missing, the type is inferred from the type of . name: A name for this operation (optional). Returns: : If is true, data will be forwarded to , otherwise it goes to .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:switch arg:data arg:pred arg:dtype arg:name arguments arg arg arg arg With Call Assign Call Assign Call If Call Return return:yes Call If Call Raise Call Call Assign Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "unit_circle_righthalf",
    "source_code": "@classmethod\ndef unit_circle_righthalf(cls):\n    if cls._unit_circle_righthalf is None:\n        MAGIC = 0.2652031\n        SQRTHALF = np.sqrt(0.5)\n        MAGIC45 = SQRTHALF * MAGIC\n        vertices = np.array([[0.0, -1.0], [MAGIC, -1.0], [SQRTHALF - MAGIC45, -SQRTHALF - MAGIC45], [SQRTHALF, -SQRTHALF], [SQRTHALF + MAGIC45, -SQRTHALF + MAGIC45], [1.0, -MAGIC], [1.0, 0.0], [1.0, MAGIC], [SQRTHALF + MAGIC45, SQRTHALF - MAGIC45], [SQRTHALF, SQRTHALF], [SQRTHALF - MAGIC45, SQRTHALF + MAGIC45], [MAGIC, 1.0], [0.0, 1.0], [0.0, -1.0]], float)\n        codes = np.full(14, cls.CURVE4, dtype=cls.code_type)\n        codes[0] = cls.MOVETO\n        codes[-1] = cls.CLOSEPOLY\n        cls._unit_circle_righthalf = cls(vertices, codes, readonly=True)\n    return cls._unit_circle_righthalf",
    "docstring": "Return a of the right half of a unit circle. See for the reference on the approximation used.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\path.py",
    "ast_data": "FunctionDef name:unit_circle_righthalf arg:cls arguments arg If Compare Assign Assign Call Assign Assign Call Assign Call Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "configure_callbacks",
    "source_code": "def configure_callbacks(callbacks, model, do_validation=False, batch_size=None, epochs=None, steps_per_epoch=None, samples=None, verbose=1, count_mode='steps', mode=ModeKeys.TRAIN):\n    if isinstance(callbacks, CallbackList):\n        return callbacks\n    if not callbacks:\n        callbacks = []\n    if mode == ModeKeys.TRAIN:\n        model.history = History()\n        callbacks = [BaseLogger()] + (callbacks or []) + [model.history]\n        if verbose:\n            callbacks.append(ProgbarLogger(count_mode))\n    callback_list = CallbackList(callbacks)\n    callback_model = model._get_callback_model()\n    callback_list.set_model(callback_model)\n    set_callback_parameters(callback_list, model, do_validation=do_validation, batch_size=batch_size, epochs=epochs, steps_per_epoch=steps_per_epoch, samples=samples, verbose=verbose, mode=mode)\n    callback_list.model.stop_training = False\n    return callback_list",
    "docstring": "Configures callbacks for use in various training loops. Args: callbacks: List of Callbacks. model: Model being trained. do_validation: Whether or not validation loop will be run. batch_size: Number of samples per batch. epochs: Number of epoch to train. steps_per_epoch: Number of batches to run per training epoch. samples: Number of training samples. verbose: int, 0 or 1. Keras logging verbosity to pass to ProgbarLogger. count_mode: One of 'steps' or 'samples'. Per-batch or per-sample count. mode: String. One of ModeKeys.TRAIN, ModeKeys.TEST, or ModeKeys.PREDICT. Which loop mode to configure callbacks for. Returns: Instance of CallbackList used to control all Callbacks.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:configure_callbacks arg:callbacks arg:model arg:do_validation arg:batch_size arg:epochs arg:steps_per_epoch arg:samples arg:verbose arg:count_mode arg:mode arguments arg arg arg arg arg arg arg arg arg arg If Call Return return:yes If Assign If Compare Assign Call Assign Call BoolOp If Call Call Assign Call Assign Call Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "zeros_like",
    "source_code": "@dispatch.dispatch_for_types(array_ops.zeros_like, StructuredTensor)\ndef zeros_like(tensor, dtype=None, name=None, optimize=True):\n    del optimize\n    return zeros_like_v2(tensor, dtype=dtype, name=name)",
    "docstring": "Implementation of zeros_like for StructuredTensor for TF v1.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_array_ops.py",
    "ast_data": "FunctionDef name:zeros_like arg:tensor arg:dtype arg:name arg:optimize arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "ptp",
    "source_code": "@array_function_dispatch(_ptp_dispatcher)\ndef ptp(a, axis=None, out=None, keepdims=np._NoValue):\n    kwargs = {}\n    if keepdims is not np._NoValue:\n        kwargs['keepdims'] = keepdims\n    return _methods._ptp(a, axis=axis, out=out, **kwargs)",
    "docstring": "Range of values (maximum - minimum) along an axis. The name of the function comes from the acronym for 'peak to peak'. .. warning:: preserves the data type of the array. This means the return value for an input of signed integers with n bits (e.g. , , etc) is also a signed integer with n bits. In that case, peak-to-peak values greater than `axiskeepdimsptpndarraykeepdimsscalarview()` method to view the result as unsigned integers with the same bit width: >>> np.ptp(y, axis=1).view(np.uint8) array([126, 127, 128, 129], dtype=uint8)",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\fromnumeric.py",
    "ast_data": "FunctionDef name:ptp arg:a arg:axis arg:out arg:keepdims arguments arg arg arg arg Assign If Compare Assign Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "register_backward_hook",
    "source_code": "def register_backward_hook(self, hook: Callable[['Module', _grad_t, _grad_t], Union[None, _grad_t]]) -> RemovableHandle:\n    if self._is_full_backward_hook is True:\n        raise RuntimeError('Cannot use both regular backward hooks and full backward hooks on a single Module. Please use only one of them.')\n    self._is_full_backward_hook = False\n    handle = RemovableHandle(self._backward_hooks)\n    self._backward_hooks[handle.id] = hook\n    return handle",
    "docstring": "Register a backward hook on the module. This function is deprecated in favor of :meth: and the behavior of this function will change in future versions. Returns: :class:: a handle that can be used to remove the added hook by calling ``",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:register_backward_hook arg:self arg:hook arguments arg arg If Compare Raise Call Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_findfile",
    "source_code": "def _findfile(self, path):\n    return DataSource._findfile(self, self._fullpath(path))",
    "docstring": "Extend DataSource method to prepend baseurl to ``.",
    "type": "method",
    "file_path": "numpy\\numpy\\lib\\_datasource.py",
    "ast_data": "FunctionDef name:_findfile arg:self arg:path arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "grid",
    "source_code": "def grid(self, visible=True, **kwargs):\n    if len(kwargs):\n        visible = True\n    self._draw_grid = visible\n    self.stale = True",
    "docstring": "Set / unset 3D grid. .. note:: Currently, this function does not behave the same as , but it is intended to eventually support that behavior.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:grid arg:self arg:visible arguments arg arg arg If Call Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "relu",
    "source_code": "def relu(input: Tensor, inplace: bool=False) -> Tensor:\n    if has_torch_function_unary(input):\n        return handle_torch_function(relu, (input,), input, inplace=inplace)\n    if inplace:\n        result = torch.relu_(input)\n    else:\n        result = torch.relu(input)\n    return result",
    "docstring": "relu(input, inplace=False) -> Tensor Applies the rectified linear unit function element-wise. See :class: for more details.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:relu arg:input arg:inplace arguments arg arg If Call Return return:yes Call If Assign Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "_scale_index_to_scale",
    "source_code": "def _scale_index_to_scale(max_coords: Tensor, sigmas: Tensor, num_levels: int) -> Tensor:\n    B, N, _ = max_coords.shape\n    scale_coords = max_coords[:, :, 0].contiguous().view(-1, 1, 1, 1)\n    out = concatenate([sigmas[0, 0] * torch.pow(2.0, scale_coords / float(num_levels)).view(B, N, 1), max_coords[:, :, 1:]], 2)\n    return out",
    "docstring": "Auxiliary function for ScaleSpaceDetector. Converts scale level index from ConvSoftArgmax3d to the actual scale, using the sigmas from the ScalePyramid output. Args: max_coords: tensor [BxNx3]. sigmas: tensor [BxNxD], D >= 1 num_levels: number of levels in the scale index. Returns: tensor [BxNx3].",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\scale_space_detector.py",
    "ast_data": "FunctionDef name:_scale_index_to_scale arg:max_coords arg:sigmas arg:num_levels arguments arg arg arg Assign Assign Call Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_get_dtype",
    "source_code": "def _get_dtype(dtype):\n    if np.issubdtype(dtype, np.complexfloating):\n        return np.complex128\n    else:\n        return np.float64",
    "docstring": "Return np.complex128 for complex dtypes, np.float64 otherwise.",
    "type": "function",
    "file_path": "scipy\\scipy\\interpolate\\_ndbspline.py",
    "ast_data": "FunctionDef name:_get_dtype arg:dtype arguments arg If Call Return return:yes Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_UFuncBinaryResolutionError",
    "source_code": "@_display_as_base\nclass _UFuncBinaryResolutionError(_UFuncNoLoopError):\n\n    def __init__(self, ufunc, dtypes):\n        super().__init__(ufunc, dtypes)\n        assert len(self.dtypes) == 2\n\n    def __str__(self):\n        return 'ufunc {!r} cannot use operands with types {!r} and {!r}'.format(self.ufunc.__name__, *self.dtypes)",
    "docstring": "Thrown when a binary resolution fails",
    "type": "class",
    "file_path": "numpy\\numpy\\_core\\_exceptions.py",
    "ast_data": "ClassDef name:_UFuncBinaryResolutionError FunctionDef name:__init__ arg:self arg:ufunc arg:dtypes arguments arg arg arg Call Call Compare Call FunctionDef name:__str__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "note_object",
    "source_code": "def note_object(self, name: str, objtype: str, node_id: str, aliased: bool=False, location: Any=None) -> None:\n    if name in self.objects:\n        other = self.objects[name]\n        if other.aliased and aliased is False:\n            pass\n        elif other.aliased is False and aliased:\n            return\n        else:\n            logger.warning(__('duplicate object description of %s, other instance in %s, use :no-index: for one of them'), name, other.docname, location=location)\n    self.objects[name] = ObjectEntry(self.env.docname, node_id, objtype, aliased)",
    "docstring": "Note a python object for cross reference. .. versionadded:: 2.1",
    "type": "method",
    "file_path": "sphinx\\sphinx\\domains\\python\\__init__.py",
    "ast_data": "FunctionDef name:note_object arg:self arg:name arg:objtype arg:node_id arg:aliased arg:location arguments arg arg arg arg arg arg If Compare Assign If BoolOp Compare If BoolOp Compare Return return:no Call Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, container_strategy, cluster_resolver: base_cluster_resolver.ClusterResolver, variable_partitioner):\n    super(ParameterServerStrategyV2Extended, self).__init__(container_strategy)\n    self._num_ps = len(cluster_resolver.cluster_spec().as_dict().get('ps', []))\n    self._num_workers = len(cluster_resolver.cluster_spec().as_dict().get('worker', []))\n    self._variable_count = 0\n    self._variable_partitioner = variable_partitioner\n    self._used_with_coordinator = False\n    self._being_scheduled = False\n    self._set_num_gpus()\n    distribute_lib.distribution_strategy_replica_gauge.get_cell('num_gpus_per_worker').set(self._num_gpus_per_worker)\n    self._cross_device_ops = cross_device_ops_lib.ReductionToOneDevice(reduce_to_device='/device:CPU:0')\n    self._cross_device_ops._canonicalize_devices = False\n    self._allow_run_without_coordinator = False\n    self._coordinator_creation_lock = threading.Lock()",
    "docstring": "Initialization of ParameterServerStrategyV2Extended.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\parameter_server_strategy_v2.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:container_strategy arg:cluster_resolver arg:variable_partitioner arguments arg arg arg arg Call Call Assign Call Call Call Call Assign Call Call Call Call Assign Assign Assign Assign Call Call Call Assign Call Assign Assign Assign Call"
  },
  {
    "library": "pytorch",
    "name": "try_combining_partitions",
    "source_code": "def try_combining_partitions(p0_index, p1_index, partitions) -> float:\n    p0 = partitions[p0_index]\n    p1 = partitions[p1_index]\n    \"If two partitions' bfs level are less than 2 or two partitions are connected to each other,\\n               then they can be combined\\n            \"\n    if abs(p0.bfs_level - p1.bfs_level) <= 1 or p0 in p1.parents or p0 in p1.children:\n        combine_two_partitions(p0, p1, partitions)\n        if check_dependency(partitions[-1]):\n            return float('inf')\n        reset_partition_device(partitions)\n        found_deivce = get_device_to_partitions_mapping(partitions, self.devices)\n        if not found_deivce:\n            return float('inf')\n        partition_to_latency_mapping = get_partition_to_latency_mapping(partitions, node_to_latency_mapping)\n        cost = get_latency_of_partitioned_graph(partitions, partition_to_latency_mapping, transfer_rate_bytes_per_sec)\n        return cost\n    return float('inf')",
    "docstring": "Given two partitions and a list of partitions, combine these two partitions and see what is the cost of the modified partition list",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\accelerator_partitioner.py",
    "ast_data": "FunctionDef name:try_combining_partitions arg:p0_index arg:p1_index arg:partitions arguments arg arg arg Assign Assign If BoolOp Compare Call Compare Compare Call If Call Return return:yes Call Call Assign Call If Return return:yes Call Assign Call Assign Call Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_disable_user_warnings",
    "source_code": "def _disable_user_warnings(func: Callable[_P, _R], regex: str='.*is deprecated, please use.*', module: str='torch') -> Callable[_P, _R]:\n\n    @wraps(func)\n    def wrapper(*args: _P.args, **kwargs: _P.kwargs) -> _R:\n        with warnings.catch_warnings():\n            warnings.filterwarnings('ignore', category=UserWarning, message=regex, module=module)\n            return func(*args, **kwargs)\n    return wrapper",
    "docstring": "Decorator that temporarily disables `` message. module : str The python module to which the filtering should be restricted. Returns ------- function The wrapped function.",
    "type": "function",
    "file_path": "pytorch\\torch\\overrides.py",
    "ast_data": "FunctionDef name:_disable_user_warnings arg:func arg:regex arg:module arguments arg arg arg FunctionDef name:wrapper arguments arg arg With Call Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_force_fallback",
    "source_code": "def get_force_fallback() -> str:\n    return torch._C._lazy._get_force_fallback()",
    "docstring": "Get the config used to force LTC fallback",
    "type": "function",
    "file_path": "pytorch\\torch\\_lazy\\config.py",
    "ast_data": "FunctionDef name:get_force_fallback arguments Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "query_authorization_code",
    "source_code": "def query_authorization_code(self, code, client):\n    raise NotImplementedError()",
    "docstring": "Get authorization_code from previously savings. Developers MUST implement it in subclass:: def query_authorization_code(self, code, client): return Authorization.get(code=code, client_id=client.client_id) :param code: a string represent the code. :param client: client related to this code. :return: authorization_code object",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\grants\\authorization_code.py",
    "ast_data": "FunctionDef name:query_authorization_code arg:self arg:code arg:client arguments arg arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_emit_tensor_snapshot",
    "source_code": "def _emit_tensor_snapshot(self, tensor: _TensorTracker, timestamp: int, pid: int, tid: int, value: step_stats_pb2.NodeOutput) -> None:\n    desc = str(value.tensor_description).replace('\"', '')\n    snapshot = {'tensor_description': desc}\n    self._chrome_trace.emit_obj_snapshot('Tensor', tensor.name, timestamp, pid, tid, tensor.object_id, snapshot)",
    "docstring": "Generate Chrome Trace snapshot event for a computed Tensor. Args: tensor: A 'TensorTracker' object. timestamp: The timestamp of this snapshot as a long integer. pid: The pid assigned for showing the device where this op ran. tid: The tid of the thread computing the tensor snapshot. value: A JSON-compliant snapshot of the object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py",
    "ast_data": "FunctionDef name:_emit_tensor_snapshot arg:self arg:tensor arg:timestamp arg:pid arg:tid arg:value arguments arg arg arg arg arg arg Assign Call Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_shape_common",
    "source_code": "def _shape_common(s1, s2):\n    s1 = tensor_shape.TensorShape(s1)\n    s2 = tensor_shape.TensorShape(s2)\n    if s1.ndims is None or s2.ndims is None or s1.ndims != s2.ndims:\n        return tensor_shape.unknown_shape()\n    d = [d1 if d1 is not None and d1 == d2 else None for d1, d2 in zip(s1.as_list(), s2.as_list())]\n    return tensor_shape.TensorShape(d)",
    "docstring": "The greatest lower bound (ordered by specificity) TensorShape.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:_shape_common arg:s1 arg:s2 arguments arg arg Assign Call Assign Call If BoolOp Compare Compare Compare Return return:yes Call Assign BoolOp Compare Compare Call Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "p1",
    "source_code": "@property\ndef p1(self):\n    return self.get_points()[1]",
    "docstring": "The second pair of (*x*, *y*) coordinates that define the bounding box. This is not guaranteed to be the top-right corner (for that, use :attr:).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:p1 arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "validate_default_max_age",
    "source_code": "def validate_default_max_age(self):\n    if self.get('default_max_age') is not None and (not isinstance(self['default_max_age'], (int, float))):\n        raise InvalidClaimError('default_max_age')\n    self._validate_claim_value('default_max_age')",
    "docstring": "Default Maximum Authentication Age. Specifies that the End-User MUST be actively authenticated if the End-User was authenticated longer ago than the specified number of seconds. The max_age request parameter overrides this default value. If omitted, no default Maximum Authentication Age is specified.",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\registration\\claims.py",
    "ast_data": "FunctionDef name:validate_default_max_age arg:self arguments arg If BoolOp Compare Call Call Raise Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_linestyle",
    "source_code": "def set_linestyle(self, ls):\n    if ls is None:\n        ls = 'solid'\n    if ls in [' ', '', 'none']:\n        ls = 'None'\n    self._linestyle = ls\n    self._unscaled_dash_pattern = mlines._get_dash_pattern(ls)\n    self._dash_pattern = mlines._scale_dashes(*self._unscaled_dash_pattern, self._linewidth)\n    self.stale = True",
    "docstring": "Set the patch linestyle. ======================================================= ================ linestyle description ======================================================= ================ `` is an even length tuple of on and off ink in points. Parameters ---------- ls : {'-', '--', '-.', ':', '', (offset, on-off-seq), ...} The line style.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:set_linestyle arg:self arg:ls arguments arg arg If Compare Assign If Compare Assign Assign Assign Call Assign Call Assign"
  },
  {
    "library": "scipy",
    "name": "_moment",
    "source_code": "def _moment(a, order, axis, *, mean=None, xp=None):\n    xp = array_namespace(a) if xp is None else xp\n    a = xp_promote(a, force_floating=True, xp=xp)\n    dtype = a.dtype\n    if xp_size(a) == 0:\n        return xp.mean(a, axis=axis)\n    if order == 0 or (order == 1 and mean is None):\n        shape = list(a.shape)\n        del shape[axis]\n        temp = xp.ones(shape, dtype=dtype) if order == 0 else xp.zeros(shape, dtype=dtype)\n        return temp[()] if temp.ndim == 0 else temp\n    n_list = [order]\n    current_n = order\n    while current_n > 2:\n        if current_n % 2:\n            current_n = (current_n - 1) / 2\n        else:\n            current_n /= 2\n        n_list.append(current_n)\n    mean = xp.mean(a, axis=axis, keepdims=True) if mean is None else xp.asarray(mean, dtype=dtype)\n    mean = mean[()] if mean.ndim == 0 else mean\n    a_zero_mean = _demean(a, mean, axis, xp=xp)\n    if n_list[-1] == 1:\n        s = xp.asarray(a_zero_mean, copy=True)\n    else:\n        s = a_zero_mean ** 2\n    for n in n_list[-2::-1]:\n        s = s ** 2\n        if n % 2:\n            s *= a_zero_mean\n    return xp.mean(s, axis=axis)",
    "docstring": "Vectorized calculation of raw moment about specified center When is None, the mean is computed and used as the center; otherwise, the provided value is used as the center.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_stats_py.py",
    "ast_data": "FunctionDef name:_moment arg:a arg:order arg:axis arguments arg arg arg arg arg Assign Compare Call Assign Call Assign If Compare Call Return return:yes Call If BoolOp Compare BoolOp Compare Compare Assign Call Assign Compare Call Call Return return:yes Compare Assign Assign While Compare If Assign Call Assign Compare Call Call Assign Compare Assign Call If Compare Assign Call Assign For Assign If Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_seed",
    "source_code": "def get_seed(seed):\n    seed, seed2 = random_seed.get_seed(seed)\n    if seed is None:\n        seed = constant_op.constant(0, dtype=dtypes.int64, name='seed')\n    else:\n        seed = ops.convert_to_tensor(seed, dtype=dtypes.int64, name='seed')\n    if seed2 is None:\n        seed2 = constant_op.constant(0, dtype=dtypes.int64, name='seed2')\n    else:\n        with ops.name_scope('seed2') as scope:\n            seed2 = ops.convert_to_tensor(seed2, dtype=dtypes.int64)\n            seed2 = array_ops.where_v2(math_ops.logical_and(math_ops.equal(seed, 0), math_ops.equal(seed2, 0)), constant_op.constant(2 ** 31 - 1, dtype=dtypes.int64), seed2, name=scope)\n    return (seed, seed2)",
    "docstring": "Returns the local seeds an operation should use given an op-specific seed. See for more details. This wrapper adds support for the case where may be a tensor. Args: seed: An integer or a scalar tensor. Returns: A tuple of two scalar tensors that should be used for the local seed of the calling dataset.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\util\\random_seed.py",
    "ast_data": "FunctionDef name:get_seed arg:seed arguments arg Assign Call If Compare Assign Call Assign Call If Compare Assign Call With Call Assign Call Assign Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "broadcast_to",
    "source_code": "def broadcast_to(rt_input, shape, broadcast_inner_dimensions=True):\n    if not isinstance(shape, RaggedTensorDynamicShape):\n        raise TypeError('shape must be a RaggedTensorDynamicShape')\n    rt_input = ragged_tensor.convert_to_tensor_or_ragged_tensor(rt_input)\n    if shape.num_partitioned_dimensions == 0:\n        return _broadcast_to_uniform_shape(rt_input, shape, broadcast_inner_dimensions)\n    else:\n        return _broadcast_to_ragged_shape(rt_input, shape, broadcast_inner_dimensions)",
    "docstring": "Broadcasts a potentially ragged tensor to a ragged shape. Tiles as necessary to match the given shape. Behavior is undefined if is not broadcast-compatible with . Args: rt_input: The potentially ragged tensor to broadcast. shape: A broadcast_inner_dimensions: If false, then inner dimensions will not be tiled. Returns: A potentially ragged tensor whose values are taken from , and whose shape matches .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor_shape.py",
    "ast_data": "FunctionDef name:broadcast_to arg:rt_input arg:shape arg:broadcast_inner_dimensions arguments arg arg arg If Call Raise Call Assign Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_arg_max_flops",
    "source_code": "@ops.RegisterStatistics('ArgMax', 'flops')\ndef _arg_max_flops(graph, node):\n    return _reduction_op_flops(graph, node, reduce_flops=1, finalize_flops=0)",
    "docstring": "Compute flops for ArgMax operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py",
    "ast_data": "FunctionDef name:_arg_max_flops arg:graph arg:node arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "scrapy",
    "name": "persist_file",
    "source_code": "def persist_file(self, path: str, buf: BytesIO, info: MediaPipeline.SpiderInfo, meta: dict[str, Any] | None=None, headers: dict[str, str] | None=None) -> Deferred[Any]:\n    key_name = f'{self.prefix}{path}'\n    buf.seek(0)\n    extra = self._headers_to_botocore_kwargs(self.HEADERS)\n    if headers:\n        extra.update(self._headers_to_botocore_kwargs(headers))\n    return deferToThread(self.s3_client.put_object, Bucket=self.bucket, Key=key_name, Body=buf, Metadata={k: str(v) for k, v in (meta or {}).items()}, ACL=self.POLICY, **extra)",
    "docstring": "Upload file to S3 storage",
    "type": "method",
    "file_path": "scrapy\\scrapy\\pipelines\\files.py",
    "ast_data": "FunctionDef name:persist_file arg:self arg:path arg:buf arg:info arg:meta arg:headers arguments arg arg arg arg arg arg Assign Call Assign Call If Call Call Return return:yes Call Call Call BoolOp"
  },
  {
    "library": "pytorch",
    "name": "_match_static_pattern_with_two_inputs",
    "source_code": "def _match_static_pattern_with_two_inputs(node: Node, modules: dict[str, nn.Module], qconfig_map: dict[str, QConfigAny], matching_modules_or_ops: list[Callable]) -> Union[tuple[Node, Node], tuple[None, None]]:\n    SKIP_LOWERING_VALUE = (None, None)\n    if node.op != 'call_function' or node.target != torch.quantize_per_tensor:\n        return SKIP_LOWERING_VALUE\n    q_node = node\n    ref_node = q_node.args[0]\n    assert isinstance(ref_node, Node)\n    if should_skip_lowering(ref_node, qconfig_map):\n        return SKIP_LOWERING_VALUE\n    if isinstance(matching_modules_or_ops[0], type) and issubclass(matching_modules_or_ops[0], nn.Module):\n        expected_op = 'call_module'\n        match_key = type(_get_module(ref_node, modules))\n    else:\n        return SKIP_LOWERING_VALUE\n    if ref_node.op != expected_op or match_key not in matching_modules_or_ops:\n        return SKIP_LOWERING_VALUE\n    if len(ref_node.args) != 2:\n        return SKIP_LOWERING_VALUE\n    for i in range(len(ref_node.args)):\n        arg = ref_node.args[i]\n        if not is_dequantize_node(arg):\n            return SKIP_LOWERING_VALUE\n    return (q_node, ref_node)",
    "docstring": "(dequantize Match the pattern (dequantize - ref node - quantize) against the node provided. If there is a match, return a 2-tuple of: 1) q_node: the quantize node, 2) ref_node: a reference module or functional node to replace with its quantized counterpart Otherwise, if there is no match, return a 2-tuple of (None, None). Parameters: node: The to match against. modules: A mapping from node names to modules in the model graph, used for module lookup. qconfig_map: A mapping from node names to the qconfigs associated with the nodes. If the corresponding qconfig for the reference node is None, then return no match. matching_modules_or_ops: Either a list of functions or a list of s. If the reference node is not in this list, then return no match.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_lower_to_native_backend.py",
    "ast_data": "FunctionDef name:_match_static_pattern_with_two_inputs arg:node arg:modules arg:qconfig_map arg:matching_modules_or_ops arguments arg arg arg arg Assign If BoolOp Compare Compare Return return:yes Assign Assign Call If Call Return return:yes If BoolOp Call Call Assign Assign Call Call Return return:yes If BoolOp Compare Compare Return return:yes If Compare Call Return return:yes For Call Call Assign If Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_transfer_time",
    "source_code": "def get_transfer_time(flat_args_kwargs, flat_outs) -> float:\n    gpu_memory_bandwidth = get_gpu_dram_gbps()\n    read_bytes = sum((get_num_bytes(t) for t in flat_args_kwargs if isinstance(t, torch.Tensor)))\n    write_bytes = sum((get_num_bytes(t) for t in flat_outs if isinstance(t, torch.Tensor)))\n    counted_bytes = read_bytes + write_bytes\n    transfer_time = counted_bytes / gpu_memory_bandwidth\n    return transfer_time",
    "docstring": "Estimates the memory transfer time of input and output tensors. Args: flat_args_kwargs (List[torch.Tensor]): The flat list of arguments and keyword arguments. flat_outs (List[torch.Tensor]): The flat list of outputs. Returns: float: The estimated memory transfer time in nanoseconds.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_tools\\runtime_estimator.py",
    "ast_data": "FunctionDef name:get_transfer_time arg:flat_args_kwargs arg:flat_outs arguments arg arg Assign Call Assign Call Call Call Assign Call Call Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "activity_regularizer",
    "source_code": "@activity_regularizer.setter\ndef activity_regularizer(self, regularizer):\n    self._activity_regularizer = regularizer",
    "docstring": "Optional regularizer function for the output of this layer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:activity_regularizer arg:self arg:regularizer arguments arg arg Assign"
  },
  {
    "library": "pytorch",
    "name": "get_axioms",
    "source_code": "@_lru_cache\ndef get_axioms(self, symbols: Optional[tuple[sympy.Symbol]]=None, compute_hint: bool=False) -> tuple[SympyBoolean, ...]:\n    if symbols is None:\n        runtime_asserts = (r.expr for rs in self.deferred_runtime_asserts.values() for r in rs)\n    else:\n        runtime_asserts = (r.expr for s in symbols if s not in self.var_to_val for r in self.deferred_runtime_asserts.get(s, ()))\n    guards: Iterator[SympyBoolean] = (g.expr for g in self.guards)\n    axioms: Iterator[SympyBoolean] = itertools.chain(guards, runtime_asserts)\n    if compute_hint:\n        axioms = (canonicalize_bool_expr(a.xreplace(self.var_to_val)) for a in axioms)\n    return tuple(dict.fromkeys(axioms).keys())",
    "docstring": "Given the symbols in an expression, it returns all the runtime asserts that have those symbols concatenated with all the guards. If symbols is None, it returns all the runtime asserts (and all the guards)",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:get_axioms arg:self arg:symbols arg:compute_hint arguments arg arg arg If Compare Assign Call Assign Compare Call Call If Assign Call Call Return return:yes Call Call Call"
  },
  {
    "library": "numpy",
    "name": "_replace_dtype_fields_recursive",
    "source_code": "def _replace_dtype_fields_recursive(dtype, primitive_dtype):\n    _recurse = _replace_dtype_fields_recursive\n    if dtype.names is not None:\n        descr = []\n        for name in dtype.names:\n            field = dtype.fields[name]\n            if len(field) == 3:\n                name = (field[-1], name)\n            descr.append((name, _recurse(field[0], primitive_dtype)))\n        new_dtype = np.dtype(descr)\n    elif dtype.subdtype:\n        descr = list(dtype.subdtype)\n        descr[0] = _recurse(dtype.subdtype[0], primitive_dtype)\n        new_dtype = np.dtype(tuple(descr))\n    else:\n        new_dtype = primitive_dtype\n    if new_dtype == dtype:\n        new_dtype = dtype\n    return new_dtype",
    "docstring": "Private function allowing recursion in _replace_dtype_fields.",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:_replace_dtype_fields_recursive arg:dtype arg:primitive_dtype arguments arg arg Assign If Compare Assign For Assign If Compare Call Assign Call Call Assign Call If Assign Call Assign Call Assign Call Call Assign If Compare Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "draw",
    "source_code": "def draw(self, drawDC=None):\n    _log.debug('%s - draw()', type(self))\n    self.renderer = RendererWx(self.bitmap, self.figure.dpi)\n    self.figure.draw(self.renderer)\n    self._isDrawn = True\n    self.gui_repaint(drawDC=drawDC)",
    "docstring": "Render the figure using RendererWx instance renderer, or using a previously defined renderer if none is specified.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_wx.py",
    "ast_data": "FunctionDef name:draw arg:self arg:drawDC arguments arg arg Call Call Assign Call Call Assign Call"
  },
  {
    "library": "kornia",
    "name": "merge_with_descriptors",
    "source_code": "def merge_with_descriptors(self, descriptors: Tensor) -> DISKFeatures:\n    dtype = descriptors.dtype\n    x, y = self.xys.T\n    desc = descriptors[:, y, x].T\n    desc = F.normalize(desc, dim=-1)\n    return DISKFeatures(self.xys.to(dtype), desc, self.detection_logp)",
    "docstring": "Select descriptors from a dense tensor, at locations given by .",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\disk\\structs.py",
    "ast_data": "FunctionDef name:merge_with_descriptors arg:self arg:descriptors arguments arg arg Assign Assign Assign Assign Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_mode",
    "source_code": "def _mode(self, dim, df, scale):\n    return scale / (df + dim + 1)",
    "docstring": "Mode of the inverse Wishart distribution. Parameters ---------- dim : int Dimension of the scale matrix %(_doc_default_callparams)s Notes ----- As this function does no argument checking, it should not be called directly; use 'mode' instead.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:_mode arg:self arg:dim arg:df arg:scale arguments arg arg arg arg Return return:yes"
  },
  {
    "library": "authlib",
    "name": "OAuth1Auth",
    "source_code": "class OAuth1Auth(Auth, ClientAuth):\n    requires_request_body = True\n\n    def auth_flow(self, request: Request) -> typing.Generator[Request, Response, None]:\n        url, headers, body = self.prepare(request.method, str(request.url), request.headers, request.content)\n        headers['Content-Length'] = str(len(body))\n        yield build_request(url=url, headers=headers, body=body, initial_request=request)",
    "docstring": "Signs the httpx request using OAuth 1 (RFC5849).",
    "type": "class",
    "file_path": "authlib\\authlib\\integrations\\httpx_client\\oauth1_client.py",
    "ast_data": "ClassDef name:OAuth1Auth Assign FunctionDef name:auth_flow arg:self arg:request arguments arg arg Assign Call Call Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "master",
    "source_code": "def master(self, task_type=None, task_id=None, rpc_layer=None):\n    session_master = _get_value_in_tfconfig(_SESSION_MASTER_KEY)\n    if session_master is not None:\n        return session_master\n    cluster_spec = self.cluster_spec()\n    if not cluster_spec.jobs or (len(cluster_spec.jobs) == 1 and len(cluster_spec.job_tasks(cluster_spec.jobs[0])) == 1):\n        return ''\n    task_type = task_type if task_type is not None else self.task_type\n    task_id = task_id if task_id is not None else self.task_id\n    rpc_layer = rpc_layer if rpc_layer is not None else self.rpc_layer\n    return format_master_url(cluster_spec.task_address(task_type, task_id), rpc_layer)",
    "docstring": "Returns the master address to use when creating a TensorFlow session. Note: this is only useful for TensorFlow 1.x. Args: task_type: (String, optional) Overrides and sets the task_type of the master. task_id: (Integer, optional) Overrides and sets the task id of the master. rpc_layer: (String, optional) Overrides and sets the protocol over which TensorFlow nodes communicate with each other. Returns: The address of the master. Raises: RuntimeError: If the task_type or task_id is not specified and the environment variable does not contain a task section.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\tfconfig_cluster_resolver.py",
    "ast_data": "FunctionDef name:master arg:self arg:task_type arg:task_id arg:rpc_layer arguments arg arg arg arg Assign Call If Compare Return return:yes Assign Call If BoolOp BoolOp Compare Call Compare Call Call Return return:yes Assign Compare Assign Compare Assign Compare Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "StrIndex",
    "source_code": "class StrIndex(Func):\n    function = 'INSTR'\n    arity = 2\n    output_field = IntegerField()\n\n    def as_postgresql(self, compiler, connection, **extra_context):\n        return super().as_sql(compiler, connection, function='STRPOS', **extra_context)",
    "docstring": "Return a positive integer corresponding to the 1-indexed position of the first occurrence of a substring inside another string, or 0 if the substring is not found.",
    "type": "class",
    "file_path": "django\\django\\db\\models\\functions\\text.py",
    "ast_data": "ClassDef name:StrIndex Assign Assign Assign Call FunctionDef name:as_postgresql arg:self arg:compiler arg:connection arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "is_scalar_or_string",
    "source_code": "def is_scalar_or_string(val):\n    return isinstance(val, str) or not np.iterable(val)",
    "docstring": "Return whether the given object is a scalar or string like.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\cbook.py",
    "ast_data": "FunctionDef name:is_scalar_or_string arg:val arguments arg Return return:yes BoolOp Call Call"
  },
  {
    "library": "django",
    "name": "content",
    "source_code": "@content.setter\ndef content(self, value):\n    HttpResponse.content.fset(self, value)\n    self._is_rendered = True",
    "docstring": "Set the content for the response.",
    "type": "method",
    "file_path": "django\\django\\template\\response.py",
    "ast_data": "FunctionDef name:content arg:self arg:value arguments arg arg Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_inference_name",
    "source_code": "def _inference_name(n):\n    return '%s%s_%s' % (_INFERENCE_PREFIX, n, ops.uid())",
    "docstring": "The name of a forward-but-no-gradient defun named n.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py",
    "ast_data": "FunctionDef name:_inference_name arg:n arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_logm_superdiag_entry",
    "source_code": "def _logm_superdiag_entry(l1, l2, t12):\n    if l1 == l2:\n        f12 = t12 / l1\n    elif abs(l2 - l1) > abs(l1 + l2) / 2:\n        f12 = t12 * (np.log(l2) - np.log(l1)) / (l2 - l1)\n    else:\n        z = (l2 - l1) / (l2 + l1)\n        u = _unwindk(np.log(l2) - np.log(l1))\n        if u:\n            f12 = t12 * 2 * (np.arctanh(z) + np.pi * 1j * u) / (l2 - l1)\n        else:\n            f12 = t12 * 2 * np.arctanh(z) / (l2 - l1)\n    return f12",
    "docstring": "Compute a superdiagonal entry of a matrix logarithm. This is like Eq. (11.28) in [1]_, except the determination of whether l1 and l2 are sufficiently far apart has been modified. Parameters ---------- l1 : complex A diagonal entry of the matrix. l2 : complex A diagonal entry of the matrix. t12 : complex A superdiagonal entry of the matrix. Returns ------- f12 : complex A superdiagonal entry of the matrix logarithm. Notes ----- Care has been taken to return a real number if possible when all of the inputs are real numbers. References ---------- .. [1] Nicholas J. Higham (2008) \"Functions of Matrices: Theory and Computation\" ISBN 978-0-898716-46-7",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_matfuncs_inv_ssq.py",
    "ast_data": "FunctionDef name:_logm_superdiag_entry arg:l1 arg:l2 arg:t12 arguments arg arg arg If Compare Assign If Compare Call Call Assign Call Call Assign Assign Call Call Call If Assign Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "transform_tensor",
    "source_code": "def transform_tensor(self, input: Tensor, *, shape: Optional[Tensor]=None, match_channel: bool=True) -> Tensor:\n    _validate_input_dtype(input, accepted_dtypes=[torch.float16, torch.float32, torch.float64])\n    if shape is None:\n        return _transform_input(input)\n    else:\n        return _transform_input_by_shape(input, reference_shape=shape, match_channel=match_channel)",
    "docstring": "Convert any incoming (H, W), (C, H, W) and (B, C, H, W) into (B, C, H, W).",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\_2d\\mix\\base.py",
    "ast_data": "FunctionDef name:transform_tensor arg:self arg:input arguments arg arg arg arg Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_is_valid_endpoint",
    "source_code": "def _is_valid_endpoint(endpoint) -> bool:\n    return any([is_number(endpoint), isinstance(endpoint, Timestamp), isinstance(endpoint, Timedelta), endpoint is None])",
    "docstring": "Helper for interval_range to check if start/end are valid types.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\indexes\\interval.py",
    "ast_data": "FunctionDef name:_is_valid_endpoint arg:endpoint arguments arg Return return:yes Call Call Call Call Compare"
  },
  {
    "library": "matplotlib",
    "name": "format_pct",
    "source_code": "def format_pct(self, x, display_range):\n    x = self.convert_to_pct(x)\n    if self.decimals is None:\n        scaled_range = self.convert_to_pct(display_range)\n        if scaled_range <= 0:\n            decimals = 0\n        else:\n            decimals = math.ceil(2.0 - math.log10(2.0 * scaled_range))\n            if decimals > 5:\n                decimals = 5\n            elif decimals < 0:\n                decimals = 0\n    else:\n        decimals = self.decimals\n    s = f'{x:0.{int(decimals)}f}'\n    return s + self.symbol",
    "docstring": "Format the number as a percentage number with the correct number of decimals and adds the percent symbol, if any. If `None` => 34.50% ... ... ... ============= ======== ======================= This method will not be very good for tiny axis ranges or extremely large ones. It assumes that the values on the chart are percentages displayed on a reasonable scale.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:format_pct arg:self arg:x arg:display_range arguments arg arg arg Assign Call If Compare Assign Call If Compare Assign Assign Call Call If Compare Assign If Compare Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_serialize_to_tensors",
    "source_code": "def _serialize_to_tensors(self):\n    tensors = self.export()\n    return {'-keys': tensors[0], '-values': tensors[1]}",
    "docstring": "Implements checkpointing interface in .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "FunctionDef name:_serialize_to_tensors arg:self arguments arg Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "index_to_str",
    "source_code": "def index_to_str(self, index: sympy.Expr) -> str:\n    return cexpr(self.rename_indexing(index))",
    "docstring": "Convert an index expr to a string that can be used in cpp code. e.g. a sympy expression \"s2\" may actually appear as \"ks1\" in the cpp kernel.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp.py",
    "ast_data": "FunctionDef name:index_to_str arg:self arg:index arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "gather_nd",
    "source_code": "def gather_nd(self, indices, name=None):\n    with ops.name_scope('GatherNd' if name is None else name) as name:\n        if self.trainable:\n            variable_accessed(self)\n        value = gen_resource_variable_ops.resource_gather_nd(self.handle, indices, dtype=self._dtype, name=name)\n    return array_ops.identity(value)",
    "docstring": "Reads the value of this variable sparsely, using .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:gather_nd arg:self arg:indices arg:name arguments arg arg arg With Call Compare If Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_is_valid_regression_signature",
    "source_code": "def _is_valid_regression_signature(signature_def):\n    if signature_def.method_name != signature_constants.REGRESS_METHOD_NAME:\n        return False\n    if set(signature_def.inputs.keys()) != set([signature_constants.REGRESS_INPUTS]):\n        return False\n    if signature_def.inputs[signature_constants.REGRESS_INPUTS].dtype != types_pb2.DT_STRING:\n        return False\n    if set(signature_def.outputs.keys()) != set([signature_constants.REGRESS_OUTPUTS]):\n        return False\n    if signature_def.outputs[signature_constants.REGRESS_OUTPUTS].dtype != types_pb2.DT_FLOAT:\n        return False\n    return True",
    "docstring": "Determine whether the argument is a servable 'regress' SignatureDef.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\signature_def_utils_impl.py",
    "ast_data": "FunctionDef name:_is_valid_regression_signature arg:signature_def arguments arg If Compare Return return:yes If Compare Call Call Call Return return:yes If Compare Return return:yes If Compare Call Call Call Return return:yes If Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_signatures_from_saved_model",
    "source_code": "def get_signatures_from_saved_model(saved_model_path: str, signature_keys: Optional[Sequence[str]]=None, tags: Optional[Collection[str]]=None) -> Dict[str, meta_graph_pb2.SignatureDef]:\n    if tags is None:\n        tags = {tag_constants.SERVING}\n    loader = saved_model_loader.SavedModelLoader(saved_model_path)\n    meta_graphdef = loader.get_meta_graph_def_from_tags(tags)\n    signatures = {}\n    for key, signature_def in meta_graphdef.signature_def.items():\n        if key == saved_model_constants.INIT_OP_SIGNATURE_KEY:\n            continue\n        if signature_keys is not None and key not in signature_keys:\n            continue\n        signatures[key] = signature_def\n    return signatures",
    "docstring": "Gets a map from signature keys to their SignatureDef. Args: saved_model_path: Path to the saved model. signature_keys: List of keys identifying SignatureDef to retrieve. If None, retrieve all except the init signature. tags: Set of tags identifying the MetaGraphDef within the SavedModel. Returns: A map from signature_key to its SignatureDef.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\save_model.py",
    "ast_data": "FunctionDef name:get_signatures_from_saved_model arg:saved_model_path arg:signature_keys arg:tags arguments arg arg arg If Compare Assign Assign Call Assign Call Assign For Call If Compare If BoolOp Compare Compare Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_validate_names",
    "source_code": "@final\ndef _validate_names(self, name=None, names=None, deep: bool=False) -> list[Hashable]:\n    from copy import deepcopy\n    if names is not None and name is not None:\n        raise TypeError('Can only provide one of `names` and `name`')\n    if names is None and name is None:\n        new_names = deepcopy(self.names) if deep else self.names\n    elif names is not None:\n        if not is_list_like(names):\n            raise TypeError('Must pass list-like as `names`.')\n        new_names = names\n    elif not is_list_like(name):\n        new_names = [name]\n    else:\n        new_names = name\n    if len(new_names) != len(self.names):\n        raise ValueError(f'Length of new names must be {len(self.names)}, got {len(new_names)}')\n    validate_all_hashable(*new_names, error_name=f'{type(self).__name__}.name')\n    return new_names",
    "docstring": "Handles the quirks of having a singular 'name' parameter for general Index and plural 'names' parameter for MultiIndex.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_validate_names arg:self arg:name arg:names arg:deep arguments arg arg arg arg If BoolOp Compare Compare Raise Call If BoolOp Compare Compare Assign Call If Compare If Call Raise Call Assign If Call Assign Assign If Compare Call Call Raise Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "contiguous_last_dim",
    "source_code": "def contiguous_last_dim(x):\n    strides = x.maybe_get_stride()\n    if strides and strides[-1] != 1:\n        contiguous_stride_order = list(reversed(range(len(x.get_size()))))\n        return ExternKernel.require_stride_order(x, contiguous_stride_order)\n    return x",
    "docstring": "Ensure that realized IR node has a contigous stride in the last dimension.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\kernel\\flex_attention.py",
    "ast_data": "FunctionDef name:contiguous_last_dim arg:x arguments arg Assign Call If BoolOp Compare Assign Call Call Call Call Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "serialize_object_graph",
    "source_code": "def serialize_object_graph(self, saveables_cache=None):\n    named_saveable_objects, object_graph_proto, feed_additions, _ = save_util_v1.serialize_object_graph_with_registered_savers(self, saveables_cache)\n    return (named_saveable_objects, object_graph_proto, feed_additions)",
    "docstring": "Determine checkpoint keys for variables and build a serialized graph. Non-slot variables are keyed based on a shortest path from the root saveable to the object which owns the variable (i.e. the one which called to create it). Slot variables are keyed based on a shortest path to the variable being slotted for, a shortest path to their optimizer, and the slot name. Args: saveables_cache: An optional cache storing previously created SaveableObjects created for each Trackable. Maps Trackables to a dictionary of attribute names to Trackable. Returns: A tuple of (named_variables, object_graph_proto, feed_additions): named_variables: A dictionary mapping names to variable objects. object_graph_proto: A TrackableObjectGraph protocol buffer containing the serialized object graph and variable references. feed_additions: A dictionary mapping from Tensors to values which should be fed when saving. Raises: ValueError: If there are invalid characters in an optimizer's slot names.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\graph_view.py",
    "ast_data": "FunctionDef name:serialize_object_graph arg:self arg:saveables_cache arguments arg arg Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_MatrixInverseGrad",
    "source_code": "@ops.RegisterGradient('MatrixInverse')\ndef _MatrixInverseGrad(op: ops.Operation, grad):\n    ainv = op.outputs[0]\n    op_adjoint = op.get_attr('adjoint')\n    return -math_ops.matmul(ainv, math_ops.matmul(grad, ainv, adjoint_a=op_adjoint, adjoint_b=not op_adjoint), adjoint_a=not op_adjoint)",
    "docstring": "Gradient for MatrixInverse.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg_grad.py",
    "ast_data": "FunctionDef name:_MatrixInverseGrad arg:op arg:grad arguments arg arg Assign Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_CountDownTimer",
    "source_code": "class _CountDownTimer:\n    __slots__ = ['_start_time_secs', '_duration_secs']\n\n    def __init__(self, duration_secs):\n        self._start_time_secs = time.time()\n        self._duration_secs = duration_secs\n\n    def secs_remaining(self):\n        diff = self._duration_secs - (time.time() - self._start_time_secs)\n        return max(0, diff)",
    "docstring": "A timer that tracks a duration since creation.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\training\\session_manager.py",
    "ast_data": "ClassDef name:_CountDownTimer Assign FunctionDef name:__init__ arg:self arg:duration_secs arguments arg arg Assign Call Assign FunctionDef name:secs_remaining arg:self arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "FixedQParamsFakeQuantize",
    "source_code": "class FixedQParamsFakeQuantize(FakeQuantize):\n\n    def __init__(self, observer):\n        super().__init__(observer=observer)\n        assert type(self.activation_post_process) == FixedQParamsObserver, f\"{self.__class__.__name__}'s observer must be a {FixedQParamsObserver.__name__}\"\n        self._observer_ctr = observer\n        self.scale = self.activation_post_process.scale\n        self.zero_point = self.activation_post_process.zero_point\n        assert _is_per_tensor(self.qscheme), 'Only per tensor quantization is supported' + ' FixedQParamsFakeQuantize module, got qscheme:' + str(self.qscheme)\n\n    @torch.jit.export\n    def calculate_qparams(self):\n        return (self.scale, self.zero_point)\n\n    @torch.jit.export\n    def extra_repr(self):\n        return f'fake_quant_enabled={self.fake_quant_enabled}, observer_enabled={self.observer_enabled}, scale={self.scale}, zero_point={self.zero_point}, dtype={self.dtype}, quant_min={self.activation_post_process.quant_min}, quant_max={self.activation_post_process.quant_max}, qscheme={self.qscheme}'",
    "docstring": "Simulate quantize and dequantize in training time. Simulate quantize and dequantize with fixed quantization parameters in training time. Only per tensor quantization is supported.",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\quantization\\fake_quantize.py",
    "ast_data": "ClassDef name:FixedQParamsFakeQuantize FunctionDef name:__init__ arg:self arg:observer arguments arg arg Call Call Compare Call Assign Assign Assign Call Call FunctionDef name:calculate_qparams arg:self arguments arg Return return:yes FunctionDef name:extra_repr arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "vec",
    "source_code": "def vec(M):\n    return M.T.ravel()",
    "docstring": "Stack columns of M to construct a single vector. This is somewhat standard notation in linear algebra. Parameters ---------- M : 2-D array_like Input matrix Returns ------- v : 1-D ndarray Output vector",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_expm_frechet.py",
    "ast_data": "FunctionDef name:vec arg:M arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "truncated_normal",
    "source_code": "@tf_export('random.truncated_normal', v1=['random.truncated_normal', 'truncated_normal'])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints('truncated_normal')\ndef truncated_normal(shape, mean=0.0, stddev=1.0, dtype=dtypes.float32, seed=None, name=None):\n    with ops.name_scope(name, 'truncated_normal', [shape, mean, stddev]) as name:\n        shape_tensor = shape_util.shape_tensor(shape)\n        mean_tensor = ops.convert_to_tensor(mean, dtype=dtype, name='mean')\n        stddev_tensor = ops.convert_to_tensor(stddev, dtype=dtype, name='stddev')\n        seed1, seed2 = random_seed.get_seed(seed)\n        rnd = gen_random_ops.truncated_normal(shape_tensor, dtype, seed=seed1, seed2=seed2)\n        mul = rnd * stddev_tensor\n        value = math_ops.add(mul, mean_tensor, name=name)\n        shape_util.maybe_set_static_shape(value, shape)\n        return value",
    "docstring": "Outputs random values from a truncated normal distribution. The values are drawn from a normal distribution with specified mean and standard deviation, discarding and re-drawing any samples that are more than two standard deviations from the mean. Examples: >>> tf.random.truncated_normal(shape=[2]) >>> tf.random.truncated_normal(shape=[2], mean=3, stddev=1, dtype=tf.float32) Args: shape: A 1-D integer Tensor or Python array. The shape of the output tensor. mean: A 0-D Tensor or Python value of type . The mean of the truncated normal distribution. stddev: A 0-D Tensor or Python value of type . The standard deviation of the normal distribution, before truncation. dtype: The type of the output. Restricted to floating-point types: , , , etc. seed: A Python integer. Used to create a random seed for the distribution. See for more information. name: A name for the operation (optional). Returns: A tensor of the specified shape filled with random truncated normal values.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\random_ops.py",
    "ast_data": "FunctionDef name:truncated_normal arg:shape arg:mean arg:stddev arg:dtype arg:seed arg:name arguments arg arg arg arg arg arg With Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "authlib",
    "name": "add_to_headers",
    "source_code": "def add_to_headers(token, headers=None):\n    headers = headers or {}\n    headers['Authorization'] = f'Bearer {token}'\n    return headers",
    "docstring": "Add a Bearer Token to the request URI. Recommended method of passing bearer tokens. Authorization: Bearer h480djs93hd8",
    "type": "function",
    "file_path": "authlib\\authlib\\oauth2\\rfc6750\\parameters.py",
    "ast_data": "FunctionDef name:add_to_headers arg:token arg:headers arguments arg arg Assign BoolOp Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "WelschLoss",
    "source_code": "class WelschLoss(Module):\n\n    def __init__(self, reduction: str='none') -> None:\n        super().__init__()\n        self.reduction = reduction\n\n    def forward(self, img1: Tensor, img2: Tensor) -> Tensor:\n        return welsch_loss(img1=img1, img2=img2, reduction=self.reduction)",
    "docstring": "Criterion that computes the Welsch [2] (aka. Leclerc [3]) loss. According to [1], we compute the Welsch loss as follows: .. math:: \\text{WL}(x, y) = 1 - exp(-\\frac{1}{2} (x - y)^{2}) Where: - :math: is the prediction. - :math: is the target to be regressed to. Reference: [1] [2] [3] Args: reduction: Specifies the reduction to apply to the output: `(*)`. - img2: the target tensor with the same shape as img1. Example: >>> criterion = WelschLoss(reduction=\"mean\") >>> img1 = torch.randn(2, 3, 32, 1904, requires_grad=True) >>> img2 = torch.randn(2, 3, 32, 1904) >>> output = criterion(img1, img2) >>> output.backward()",
    "type": "class",
    "file_path": "kornia\\kornia\\losses\\welsch.py",
    "ast_data": "ClassDef name:WelschLoss FunctionDef name:__init__ arg:self arg:reduction arguments arg arg Call Call Assign FunctionDef name:forward arg:self arg:img1 arg:img2 arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "value_container",
    "source_code": "def value_container(self, value):\n    raise NotImplementedError('must be implemented in descendants')",
    "docstring": "Returns the container that this per-replica belongs to. Args: value: A value returned by or a variable created in . Returns: A container that belongs to. If value does not belong to any container (including the case of container having been destroyed), returns the value itself. will always be true.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:value_container arg:self arg:value arguments arg arg Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "set_alpha",
    "source_code": "def set_alpha(self, alpha):\n    if alpha is not None and (not isinstance(alpha, Real)):\n        raise TypeError(f'alpha must be numeric or None, not {type(alpha)}')\n    if alpha is not None and (not 0 <= alpha <= 1):\n        raise ValueError(f'alpha ({alpha}) is outside 0-1 range')\n    if alpha != self._alpha:\n        self._alpha = alpha\n        self.pchanged()\n        self.stale = True",
    "docstring": "Set the alpha value used for blending - not supported on all backends. Parameters ---------- alpha : float or None *alpha* must be within the 0-1 range, inclusive.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:set_alpha arg:self arg:alpha arguments arg arg If BoolOp Compare Call Raise Call Call If BoolOp Compare Compare Raise Call If Compare Assign Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_FloorDivGrad",
    "source_code": "@ops.RegisterGradient('FloorDiv')\ndef _FloorDivGrad(_, unused_grad):\n    return (None, None)",
    "docstring": "The gradient for the FloorDiv operator.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_FloorDivGrad arg:_ arg:unused_grad arguments arg arg Return return:no Call"
  },
  {
    "library": "cherrypy",
    "name": "serve_download",
    "source_code": "def serve_download(path, name=None):\n    return serve_file(path, 'application/x-download', 'attachment', name)",
    "docstring": "Serve 'path' as an application/x-download attachment.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\static.py",
    "ast_data": "FunctionDef name:serve_download arg:path arg:name arguments arg arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "__radd__",
    "source_code": "def __radd__(self, other):\n    return add(other, self)",
    "docstring": "Return (other + self), that is string concatenation, element-wise for a pair of array_likes of or . See Also -------- add",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:__radd__ arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "show",
    "source_code": "def show():\n    if _SMCLI_ALL.value:\n        _show_all(_SMCLI_DIR.value)\n    elif _SMCLI_TAG_SET.value is None:\n        if _SMCLI_LIST_OPS.value:\n            print('--list_ops must be paired with a tag-set or with --all.')\n        _show_tag_sets(_SMCLI_DIR.value)\n    else:\n        if _SMCLI_LIST_OPS.value:\n            _show_ops_in_metagraph(_SMCLI_DIR.value, _SMCLI_TAG_SET.value)\n        if _SMCLI_SIGNATURE_DEF.value is None:\n            _show_signature_def_map_keys(_SMCLI_DIR.value, _SMCLI_TAG_SET.value)\n        else:\n            _show_inputs_outputs(_SMCLI_DIR.value, _SMCLI_TAG_SET.value, _SMCLI_SIGNATURE_DEF.value)",
    "docstring": "Function triggered by show command.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\saved_model_cli.py",
    "ast_data": "FunctionDef name:show arguments If Call If Compare If Call Call If Call If Compare Call Call"
  },
  {
    "library": "django",
    "name": "has_perms",
    "source_code": "def has_perms(self, perm_list, obj=None):\n    if not isinstance(perm_list, Iterable) or isinstance(perm_list, str):\n        raise ValueError('perm_list must be an iterable of permissions.')\n    return all((self.has_perm(perm, obj) for perm in perm_list))",
    "docstring": "Return True if the user has each of the specified permissions. If object is passed, check if the user has all required perms for it.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\models.py",
    "ast_data": "FunctionDef name:has_perms arg:self arg:perm_list arg:obj arguments arg arg arg If BoolOp Call Call Raise Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_scale_polygon",
    "source_code": "def _scale_polygon(self, event):\n    if not self._selection_completed:\n        return\n    if self._old_box_extents == self._box.extents:\n        return\n    x1, y1, w1, h1 = self._box._rect_bbox\n    old_bbox = self._get_bbox()\n    t = transforms.Affine2D().translate(-old_bbox.x0, -old_bbox.y0).scale(1 / old_bbox.width, 1 / old_bbox.height).scale(w1, h1).translate(x1, y1)\n    new_verts = [(x, y) for x, y in t.transform(np.array(self.verts))]\n    self._xys = [*new_verts, new_verts[0]]\n    self._draw_polygon()\n    self._old_box_extents = self._box.extents",
    "docstring": "Scale the polygon selector points when the bounding box is moved or scaled. This is set as a callback on the bounding box RectangleSelector.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:_scale_polygon arg:self arg:event arguments arg arg If Return return:no If Compare Return return:no Assign Assign Call Assign Call Call Call Call Call Assign Call Call Assign Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "ragged_bool",
    "source_code": "def ragged_bool(self):\n    raise TypeError('RaggedTensor may not be used as a boolean.')",
    "docstring": "Raises TypeError when a RaggedTensor is used as a Python bool. To prevent RaggedTensor from being used as a bool, this function always raise TypeError when being called. For example: >>> x = tf.ragged.constant([[1, 2], [3]]) >>> result = True if x else False # Evaluate x as a bool value. Traceback (most recent call last): ... TypeError: RaggedTensor may not be used as a boolean. >>> x = tf.ragged.constant([[1]]) >>> r = (x == 1) # tf.RaggedTensor [[True]] >>> if r: # Evaluate r as a bool value. ... pass Traceback (most recent call last): ... TypeError: RaggedTensor may not be used as a boolean.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_operators.py",
    "ast_data": "FunctionDef name:ragged_bool arg:self arguments arg Raise Call"
  },
  {
    "library": "sphinx",
    "name": "create",
    "source_code": "def create(self, name: str) -> Theme:\n    if name in self._entry_point_themes:\n        entry_point_loader = self._entry_point_themes[name]\n        entry_point_loader()\n    if name not in self._themes:\n        raise ThemeError(__('no theme named %r found (missing theme.toml?)') % name)\n    themes, theme_dirs, tmp_dirs = _load_theme_with_ancestors(name, self._themes, self._entry_point_themes)\n    return Theme(name, configs=themes, paths=theme_dirs, tmp_dirs=tmp_dirs)",
    "docstring": "Create an instance of theme.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\theming.py",
    "ast_data": "FunctionDef name:create arg:self arg:name arguments arg arg If Compare Assign Call If Compare Raise Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "process_opened_file",
    "source_code": "def process_opened_file(self, in_filename, in_file, out_filename, out_file):\n    lines = in_file.readlines()\n    processed_file, new_file_content, log, process_errors = self.update_string_pasta(''.join(lines), in_filename)\n    if out_file and processed_file:\n        out_file.write(new_file_content)\n    return (processed_file, self._format_log(log, in_filename, out_filename), process_errors)",
    "docstring": "Process the given python file for incompatible changes. This function is split out to facilitate StringIO testing from tf_upgrade_test.py. Args: in_filename: filename to parse in_file: opened file (or StringIO) out_filename: output file to write to out_file: opened file (or StringIO) Returns: A tuple representing number of files processed, log of actions, errors",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\ast_edits.py",
    "ast_data": "FunctionDef name:process_opened_file arg:self arg:in_filename arg:in_file arg:out_filename arg:out_file arguments arg arg arg arg arg Assign Call Assign Call Call If BoolOp Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "NewDim",
    "source_code": "@dataclass\nclass NewDim(DimSpec):\n    size: int\n\n    @classmethod\n    def new(cls, size: int) -> DimSpec:\n        return Singleton() if size == 1 else NewDim(size)",
    "docstring": "This is a new dimension created by the op.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_ops\\_view_ops.py",
    "ast_data": "ClassDef name:NewDim FunctionDef name:new arg:cls arg:size arguments arg arg Return return:yes Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "step",
    "source_code": "@_use_grad_for_differentiable\ndef step(self, closure=None):\n    loss = None\n    if closure is not None:\n        with torch.enable_grad():\n            loss = closure()\n    for group in self.param_groups:\n        params_with_grad: list[Tensor] = []\n        grads: list[Tensor] = []\n        state_sums: list[Tensor] = []\n        state_steps: list[Tensor] = []\n        has_sparse_grad, has_complex = self._init_group(group, params_with_grad, grads, state_sums, state_steps)\n        adagrad(params_with_grad, grads, state_sums, state_steps, lr=group['lr'], weight_decay=group['weight_decay'], lr_decay=group['lr_decay'], eps=group['eps'], has_sparse_grad=has_sparse_grad, foreach=group['foreach'], maximize=group['maximize'], differentiable=group['differentiable'], has_complex=has_complex, fused=group['fused'], grad_scale=getattr(self, 'grad_scale', None), found_inf=getattr(self, 'found_inf', None))\n    return loss",
    "docstring": "Perform a single optimization step. Args: closure (Callable, optional): A closure that reevaluates the model and returns the loss.",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\adagrad.py",
    "ast_data": "FunctionDef name:step arg:self arg:closure arguments arg arg Assign If Compare With Call Assign Call For Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "replicated_fn",
    "source_code": "def replicated_fn(replica_id, replica_args, replica_kwargs):\n    with _TPUReplicaContext(strategy, replica_id_in_sync_group=replica_id):\n        result[0] = fn(*replica_args, **replica_kwargs)\n    return result[0]",
    "docstring": "Wraps user function to provide replica ID and inputs.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_strategy.py",
    "ast_data": "FunctionDef name:replicated_fn arg:replica_id arg:replica_args arg:replica_kwargs arguments arg arg arg With Call Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_setfieldnames",
    "source_code": "def _setfieldnames(self, names, titles):\n    if names:\n        if type(names) in [list, tuple]:\n            pass\n        elif isinstance(names, str):\n            names = names.split(',')\n        else:\n            raise NameError(f'illegal input names {repr(names)}')\n        self._names = [n.strip() for n in names[:self._nfields]]\n    else:\n        self._names = []\n    self._names += ['f%d' % i for i in range(len(self._names), self._nfields)]\n    _dup = find_duplicate(self._names)\n    if _dup:\n        raise ValueError(f'Duplicate field names: {_dup}')\n    if titles:\n        self._titles = [n.strip() for n in titles[:self._nfields]]\n    else:\n        self._titles = []\n        titles = []\n    if self._nfields > len(titles):\n        self._titles += [None] * (self._nfields - len(titles))",
    "docstring": "convert input field names into a list and assign to the _names attribute",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\records.py",
    "ast_data": "FunctionDef name:_setfieldnames arg:self arg:names arg:titles arguments arg arg arg If If Compare Call If Call Assign Call Raise Call Call Assign Call Assign Call Call Assign Call If Raise Call If Assign Call Assign Assign If Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_swa_avg_fn",
    "source_code": "def get_swa_avg_fn():\n\n    @torch.no_grad()\n    def swa_update(averaged_param: Tensor, current_param: Tensor, num_averaged: Union[Tensor, int]):\n        return averaged_param + (current_param - averaged_param) / (num_averaged + 1)\n    return swa_update",
    "docstring": "Get the function applying stochastic weight average (SWA) across a single param.",
    "type": "function",
    "file_path": "pytorch\\torch\\optim\\swa_utils.py",
    "ast_data": "FunctionDef name:get_swa_avg_fn arguments FunctionDef name:swa_update arg:averaged_param arg:current_param arg:num_averaged arguments arg arg arg Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ParameterProxy",
    "source_code": "@compatibility(is_backward_compatible=False)\nclass ParameterProxy(Proxy):\n\n    def __init__(self, tracer: TracerBase, node: Node, name, param):\n        super().__init__(node, tracer)\n        assert isinstance(param, torch.nn.Parameter)\n        self.param = param\n        self.name = name\n\n    def __repr__(self) -> str:\n        return f'ParameterProxy({self.name})'\n\n    @property\n    def shape(self):\n        return self.param.shape\n\n    def size(self):\n        return self.param.size()\n\n    def dim(self):\n        return self.param.dim()\n\n    @property\n    def ndim(self):\n        return self.param.ndim\n\n    def numel(self):\n        return self.param.numel()\n\n    def nelement(self):\n        return self.param.nelement()",
    "docstring": "A special proxy which lets \"shape\", \"size\", \"dim\", and a few other attribute accesses pass through to the underlying module parameter object, so that conditional tests on these attributes will not throw exception during tracing",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\proxy.py",
    "ast_data": "ClassDef name:ParameterProxy FunctionDef name:__init__ arg:self arg:tracer arg:node arg:name arg:param arguments arg arg arg arg arg Call Call Call Assign Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:shape arg:self arguments arg Return return:yes FunctionDef name:size arg:self arguments arg Return return:yes Call FunctionDef name:dim arg:self arguments arg Return return:yes Call FunctionDef name:ndim arg:self arguments arg Return return:yes FunctionDef name:numel arg:self arguments arg Return return:yes Call FunctionDef name:nelement arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "autotune_to_one_config",
    "source_code": "def autotune_to_one_config(self, *args, **kwargs):\n    start_time = time.time_ns()\n    timings = self.benchmark_all_configs(*args, **kwargs)\n    benchmark_time_taken_ns = time.time_ns() - start_time\n    self.launchers = [builtins.min(timings, key=timings.get)]\n    self.autotune_time_taken_ns = self.precompile_time_taken_ns + benchmark_time_taken_ns\n    launcher = self.launchers[0]\n    log.debug('Best config for %s: %s: %f, nreg %d, nspill %d, #shared-mem %s', self.fn.__name__, launcher.config, timings[launcher], launcher.n_regs, launcher.n_spills, launcher.shared)\n    if self.save_cache_hook:\n        self.save_cache_hook(launcher.config, self.autotune_time_taken_ns)",
    "docstring": "Do the actual autotuning",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\triton_heuristics.py",
    "ast_data": "FunctionDef name:autotune_to_one_config arg:self arguments arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Assign Call If Call"
  },
  {
    "library": "tensorflow",
    "name": "structured_outputs",
    "source_code": "@property\ndef structured_outputs(self):\n    return self._func_graph.structured_outputs",
    "docstring": "Returns outputs in as returned by the original function.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py",
    "ast_data": "FunctionDef name:structured_outputs arg:self arguments arg Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "ReorderConsecutiveTargetAndIndexNodes",
    "source_code": "class ReorderConsecutiveTargetAndIndexNodes(SphinxTransform):\n    default_priority = 220\n\n    def apply(self, **kwargs: Any) -> None:\n        for target in self.document.findall(nodes.target):\n            _reorder_index_target_nodes(target)",
    "docstring": "Index nodes interspersed between target nodes prevent other Transformations from combining those target nodes, e.g. `` as input:: The transformed result will be::",
    "type": "class",
    "file_path": "sphinx\\sphinx\\transforms\\__init__.py",
    "ast_data": "ClassDef name:ReorderConsecutiveTargetAndIndexNodes Assign FunctionDef name:apply arg:self arguments arg arg For Call Call"
  },
  {
    "library": "tensorflow",
    "name": "dtype",
    "source_code": "@property\ndef dtype(self):\n    return self.values.dtype",
    "docstring": "The of elements in this tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\indexed_slices.py",
    "ast_data": "FunctionDef name:dtype arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_update",
    "source_code": "def _update(self, event):\n    if self.ignore(event) or event.button != 1:\n        return\n    if event.name == 'button_press_event' and self.ax.contains(event)[0]:\n        self.drag_active = True\n        event.canvas.grab_mouse(self.ax)\n    if not self.drag_active:\n        return\n    if event.name == 'button_release_event' or (event.name == 'button_press_event' and (not self.ax.contains(event)[0])):\n        self.drag_active = False\n        event.canvas.release_mouse(self.ax)\n        return\n    xdata, ydata = self._get_data_coords(event)\n    val = self._value_in_bounds(xdata if self.orientation == 'horizontal' else ydata)\n    if val not in [None, self.val]:\n        self.set_val(val)",
    "docstring": "Update the slider position.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:_update arg:self arg:event arguments arg arg If BoolOp Call Compare Return return:no If BoolOp Compare Call Assign Call If Return return:no If BoolOp Compare BoolOp Compare Call Assign Call Return return:no Assign Call Assign Call Compare If Compare Call"
  },
  {
    "library": "matplotlib",
    "name": "get_clim",
    "source_code": "def get_clim(self):\n    return self._colorizer.get_clim()",
    "docstring": "Return the values (min, max) that are mapped to the colormap limits.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colorizer.py",
    "ast_data": "FunctionDef name:get_clim arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "cast_to_model_input_dtypes",
    "source_code": "def cast_to_model_input_dtypes(x, model):\n    input_dtypes = nest.map_structure(lambda t: t.dtype, model.inputs)\n    return nest.map_structure(math_ops.cast, x, input_dtypes)",
    "docstring": "Casts the given data tensors to the dtypes of the model inputs. Args: x: tensor or list/tuple of tensors. model: The model. Returns: Converted input. Each tensor is casted to the corresponding input in .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py",
    "ast_data": "FunctionDef name:cast_to_model_input_dtypes arg:x arg:model arguments arg arg Assign Call arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "disable",
    "source_code": "def disable(fn=None, recursive=True, *, reason=None):\n    if recursive:\n        if fn is not None:\n            fn = innermost_fn(fn)\n            assert callable(fn)\n            return DisableContext(msg=reason)(fn)\n        return DisableContext(msg=reason)\n    else:\n\n        def wrap(fn):\n            fn = innermost_fn(fn)\n            assert callable(fn)\n            nonrecursive_disable_wrapper = get_nonrecursive_disable_wrapper(fn)\n            nonrecursive_disable_wrapper._torchdynamo_disable = True\n            nonrecursive_disable_wrapper._torchdynamo_disable_msg = reason\n            nonrecursive_disable_wrapper._torchdynamo_orig_callable = fn\n            return nonrecursive_disable_wrapper\n        if fn is None:\n            return wrap\n        return wrap(fn)",
    "docstring": "Decorator to disable TorchDynamo If recursive=True, Dynamo is completely skipped on the decorated function frame as well as the recursively invoked functions. If recursive=False, Dynamo skips frames associated with the function code, but still process recursively invoked frames. If reason is provided, it will be printed when Dynamo attempts to trace the disabled function.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\decorators.py",
    "ast_data": "FunctionDef name:disable arg:fn arg:recursive arguments arg arg arg If If Compare Assign Call Call Return return:yes Call Call Return return:yes Call FunctionDef name:wrap arg:fn arguments arg Assign Call Call Assign Call Assign Assign Assign Return return:yes If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "idst",
    "source_code": "def idst(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False):\n    type = _inverse_typemap[type]\n    return _pocketfft.dst(x, type, n, axis, norm, overwrite_x)",
    "docstring": "Return the Inverse Discrete Sine Transform of an arbitrary type sequence. Parameters ---------- x : array_like The input array. type : {1, 2, 3, 4}, optional Type of the DST (see Notes). Default type is 2. n : int, optional Length of the transform. If `xxdst`. .. versionadded:: 0.11.0",
    "type": "function",
    "file_path": "scipy\\scipy\\fftpack\\_realtransforms.py",
    "ast_data": "FunctionDef name:idst arg:x arg:type arg:n arg:axis arg:norm arg:overwrite_x arguments arg arg arg arg arg arg Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "map_debug_info",
    "source_code": "def map_debug_info(a: Argument) -> Argument:\n    return torch.fx.node.map_aggregate(a, friendly_debug_info)",
    "docstring": "Helper function to apply to items in . may be a list, tuple, or dict.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\_debug.py",
    "ast_data": "FunctionDef name:map_debug_info arg:a arguments arg Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "comp_data",
    "source_code": "@property\ndef comp_data(self):\n    if not hasattr(self, 'ax'):\n        return self.plot_data\n    if not hasattr(self, '_comp_data'):\n        comp_data = self.plot_data.copy(deep=False).drop(['x', 'y'], axis=1, errors='ignore')\n        for var in 'yx':\n            if var not in self.variables:\n                continue\n            parts = []\n            grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n            for converter, orig in grouped:\n                orig = orig.mask(orig.isin([np.inf, -np.inf]), np.nan)\n                orig = orig.dropna()\n                if var in self.var_levels:\n                    orig = orig[orig.isin(self.var_levels[var])]\n                comp = pd.to_numeric(converter.convert_units(orig)).astype(float)\n                transform = converter.get_transform().transform\n                parts.append(pd.Series(transform(comp), orig.index, name=orig.name))\n            if parts:\n                comp_col = pd.concat(parts)\n            else:\n                comp_col = pd.Series(dtype=float, name=var)\n            comp_data.insert(0, var, comp_col)\n        self._comp_data = comp_data\n    return self._comp_data",
    "docstring": "Dataframe with numeric x and y, after unit conversion and log scaling.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_base.py",
    "ast_data": "FunctionDef name:comp_data arg:self arguments arg If Call Return return:yes If Call Assign Call Call For If Compare Assign Assign Call For Assign Call Call Assign Call If Compare Assign Call Assign Call Call Call Assign Call Call Call Call If Assign Call Assign Call Call Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_zseries_div",
    "source_code": "def _zseries_div(z1, z2):\n    z1 = z1.copy()\n    z2 = z2.copy()\n    lc1 = len(z1)\n    lc2 = len(z2)\n    if lc2 == 1:\n        z1 /= z2\n        return (z1, z1[:1] * 0)\n    elif lc1 < lc2:\n        return (z1[:1] * 0, z1)\n    else:\n        dlen = lc1 - lc2\n        scl = z2[0]\n        z2 /= scl\n        quo = np.empty(dlen + 1, dtype=z1.dtype)\n        i = 0\n        j = dlen\n        while i < j:\n            r = z1[i]\n            quo[i] = z1[i]\n            quo[dlen - i] = r\n            tmp = r * z2\n            z1[i:i + lc2] -= tmp\n            z1[j:j + lc2] -= tmp\n            i += 1\n            j -= 1\n        r = z1[i]\n        quo[i] = r\n        tmp = r * z2\n        z1[i:i + lc2] -= tmp\n        quo /= scl\n        rem = z1[i + 1:i - 1 + lc2].copy()\n        return (quo, rem)",
    "docstring": "Divide the first z-series by the second. Divide by and return the quotient and remainder as z-series. Warning: this implementation only applies when both z1 and z2 have the same symmetry, which is sufficient for present purposes. Parameters ---------- z1, z2 : 1-D ndarray The arrays must be 1-D and have the same symmetry, but this is not checked. Returns ------- (quotient, remainder) : 1-D ndarrays Quotient and remainder as z-series. Notes ----- This is not the same as polynomial division on account of the desired form of the remainder. If symmetric/anti-symmetric z-series are denoted by S/A then the following rules apply: S/S -> S,S A/A -> S,A The restriction to types of the same symmetry could be fixed but seems like unneeded generality. There is no natural form for the remainder in the case where there is no symmetry.",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\chebyshev.py",
    "ast_data": "FunctionDef name:_zseries_div arg:z1 arg:z2 arguments arg arg Assign Call Assign Call Assign Call Assign Call If Compare Return return:yes If Compare Return return:yes Assign Assign Assign Call Assign Assign While Compare Assign Assign Assign Assign Assign Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "sc_diff",
    "source_code": "def sc_diff(x, a, b, period=None, _cache=_cache):\n    if isinstance(_cache, threading.local):\n        if not hasattr(_cache, 'sc_diff_cache'):\n            _cache.sc_diff_cache = {}\n        _cache = _cache.sc_diff_cache\n    tmp = asarray(x)\n    if iscomplexobj(tmp):\n        return sc_diff(tmp.real, a, b, period, _cache) + 1j * sc_diff(tmp.imag, a, b, period, _cache)\n    if period is not None:\n        a = a * 2 * pi / period\n        b = b * 2 * pi / period\n    n = len(x)\n    omega = _cache.get((n, a, b))\n    if omega is None:\n        if len(_cache) > 20:\n            while _cache:\n                _cache.popitem()\n\n        def kernel(k, a=a, b=b):\n            if k:\n                return sinh(a * k) / cosh(b * k)\n            return 0\n        omega = convolve.init_convolution_kernel(n, kernel, d=1)\n        _cache[n, a, b] = omega\n    overwrite_x = _datacopied(tmp, x)\n    return convolve.convolve(tmp, omega, swap_real_imag=1, overwrite_x=overwrite_x)",
    "docstring": "Return (a,b)-sinh/cosh pseudo-derivative of a periodic sequence x. If x_j and y_j are Fourier coefficients of periodic functions x and y, respectively, then:: y_j = sqrt(-1)*sinh(j*a*2*pi/period)/cosh(j*b*2*pi/period) * x_j y_0 = 0 Parameters ---------- x : array_like Input array. a,b : float Defines the parameters of the sinh/cosh pseudo-differential operator. period : float, optional The period of the sequence x. Default is 2*pi. Notes ----- ``, the Nyquist mode of x is taken as zero.",
    "type": "function",
    "file_path": "scipy\\scipy\\fftpack\\_pseudo_diffs.py",
    "ast_data": "FunctionDef name:sc_diff arg:x arg:a arg:b arg:period arg:_cache arguments arg arg arg arg arg If Call If Call Assign Assign Assign Call If Call Return return:yes Call Call If Compare Assign Assign Assign Call Assign Call If Compare If Compare Call While Call FunctionDef name:kernel arg:k arg:a arg:b arguments arg arg arg If Return return:yes Call Call Return return:yes Assign Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "GetContainingXLAContext",
    "source_code": "def GetContainingXLAContext(ctxt):\n    while ctxt:\n        if ctxt.IsXLAContext():\n            return ctxt\n        ctxt = ctxt.outer_context\n    return None",
    "docstring": "Returns the first ancestor XLAContext of . Returns if is a XLAContext, or None if is not in a while loop. Args: ctxt: ControlFlowContext Returns: if is a XLAContext, the most nested XLAContext containing , or None if is not in a while loop.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\control_flow_util.py",
    "ast_data": "FunctionDef name:GetContainingXLAContext arg:ctxt arguments arg While If Call Return return:yes Assign Return return:no"
  },
  {
    "library": "pytorch",
    "name": "deserialize",
    "source_code": "def deserialize(self) -> _WireProtocolInput:\n    from torch.fx._graph_pickler import GraphPickler\n    fake_mode = _current_fake_mode()\n    result = GraphPickler.loads(self.value, fake_mode)\n    assert isinstance(result, _WireProtocolInput)\n    return result",
    "docstring": "Turn this streamable object back into a _WireProtocolInput.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\compile_fx_ext.py",
    "ast_data": "FunctionDef name:deserialize arg:self arguments arg Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "build",
    "source_code": "@staticmethod\ndef build(model_name: str='depth-anything-v2-small', model_type: str='model', cache_dir: Optional[str]=None) -> DepthEstimation:\n    if model_name not in ['depth-anything-v2-small', 'depth-anything-v2-base', 'depth-anything-v2-large']:\n        raise ValueError(f'{model_name} is not a valid model name.')\n    loader = HFONNXComunnityModelLoader(model_name, model_type=model_type, cache_dir=cache_dir)\n    onnx_model = loader.load_model(download=True, io_name_mapping={'pixel_values': 'input', 'predicted_depth': 'output'})\n    preproc = loader.load_preprocessing().to_onnx(save=False)\n    return DepthEstimation(onnx_model, pre_processor=preproc, name=f'{model_name}_{model_type}')",
    "docstring": "Export a DepthAnything model to an ONNX model file. Args: model_name: The name of the model to be loaded. Valid model names include: - - - model_type: The type of the model to be loaded. Valid model types include: - - - - - - - cache_dir: The directory where the model should be cached. Returns: str: The name of the output ONNX file. .. code-block:: python images = kornia.utils.sample.get_sample_images() model = DepthAnythingONNXBuilder.build() model.save(images)",
    "type": "method",
    "file_path": "kornia\\kornia\\models\\depth_estimation\\depth_anything.py",
    "ast_data": "FunctionDef name:build arg:model_name arg:model_type arg:cache_dir arguments arg arg arg If Compare Raise Call Assign Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_schedule_class",
    "source_code": "def get_schedule_class(schedule_name: str):\n    schedule_map = {'1F1B': Schedule1F1B, 'Interleaved1F1B': ScheduleInterleaved1F1B, 'GPipe': ScheduleGPipe, 'LoopedBFS': ScheduleLoopedBFS, 'InterleavedZeroBubble': ScheduleInterleavedZeroBubble, 'PipelineScheduleSingle': PipelineScheduleSingle, 'PipelineScheduleMulti': PipelineScheduleMulti, 'ZBVZeroBubble': ScheduleZBVZeroBubble}\n    lowercase_keys = {k.lower(): k for k in schedule_map.keys()}\n    lowercase_schedule_name = schedule_name.lower()\n    if lowercase_schedule_name not in lowercase_keys:\n        raise ValueError(f\"Unknown schedule name '{schedule_name}'. The valid options are {list(schedule_map.keys())}\")\n    return schedule_map[lowercase_keys[lowercase_schedule_name]]",
    "docstring": "Maps a schedule name (case insensitive) to its corresponding class object. Args: schedule_name (str): The name of the schedule.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\schedules.py",
    "ast_data": "FunctionDef name:get_schedule_class arg:schedule_name arguments arg Assign Assign Call Call Assign Call If Compare Raise Call Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "def forward(self, image_embeddings: Tensor, image_pe: Tensor, sparse_prompt_embeddings: Tensor, dense_prompt_embeddings: Tensor, multimask_output: bool) -> tuple[Tensor, Tensor]:\n    masks, iou_pred = self.predict_masks(image_embeddings=image_embeddings, image_pe=image_pe, sparse_prompt_embeddings=sparse_prompt_embeddings, dense_prompt_embeddings=dense_prompt_embeddings)\n    if multimask_output:\n        mask_slice = slice(1, None)\n    else:\n        mask_slice = slice(0, 1)\n    masks = masks[:, mask_slice, :, :]\n    iou_pred = iou_pred[:, mask_slice]\n    return (masks, iou_pred)",
    "docstring": "Predict masks given image and prompt embeddings. Args: image_embeddings: the embeddings from the image encoder image_pe: positional encoding with the shape of image_embeddings sparse_prompt_embeddings: the embeddings of the points and boxes dense_prompt_embeddings: the embeddings of the mask inputs multimask_output: Whether to return multiple masks or a single mask. Returns: batched predicted masks batched predictions of mask quality",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\models\\sam\\architecture\\mask_decoder.py",
    "ast_data": "FunctionDef name:forward arg:self arg:image_embeddings arg:image_pe arg:sparse_prompt_embeddings arg:dense_prompt_embeddings arg:multimask_output arguments arg arg arg arg arg arg Assign Call If Assign Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "offset_var",
    "source_code": "def offset_var(self, index: Expr, vars: list[sympy.Symbol]) -> Expr:\n    index = self.simplify(index)\n    return sympy_subs(index, {v: sympy.S.Zero for v in vars if v != 0})",
    "docstring": "Extract offset part of an indexing expression",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\sizevars.py",
    "ast_data": "FunctionDef name:offset_var arg:self arg:index arg:vars arguments arg arg arg Assign Call Return return:yes Call Compare"
  },
  {
    "library": "matplotlib",
    "name": "_clear",
    "source_code": "def _clear(self, event):\n    if self.ignore(event) or self.canvas.is_saving():\n        return\n    self._background = self.canvas.copy_from_bbox(self.ax.bbox)\n    self.ax.draw_artist(self._buttons)",
    "docstring": "Internal event handler to clear the buttons.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:_clear arg:self arg:event arguments arg arg If BoolOp Call Call Return return:no Assign Call Call"
  },
  {
    "library": "django",
    "name": "lookup_cast",
    "source_code": "def lookup_cast(self, lookup_type, internal_type=None):\n    return '%s'",
    "docstring": "Return the string to use in a query when performing lookups (\"contains\", \"like\", etc.). It should contain a '%s' placeholder for the column being searched against.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:lookup_cast arg:self arg:lookup_type arg:internal_type arguments arg arg arg Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "LockTimeout",
    "source_code": "class LockTimeout(Exception):\n    pass",
    "docstring": "Exception when a lock could not be acquired before a timeout period.",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\lib\\locking.py",
    "ast_data": "ClassDef name:LockTimeout"
  },
  {
    "library": "tensorflow",
    "name": "fn_args",
    "source_code": "def fn_args(fn):\n    if isinstance(fn, functools.partial):\n        args = fn_args(fn.func)\n        args = [a for a in args[len(fn.args):] if a not in (fn.keywords or [])]\n    else:\n        if hasattr(fn, '__call__') and tf_inspect.ismethod(fn.__call__):\n            fn = fn.__call__\n        args = tf_inspect.getfullargspec(fn).args\n        if _is_bound_method(fn) and args:\n            args.pop(0)\n    return tuple(args)",
    "docstring": "Get argument names for function-like object. Args: fn: Function, or function-like object (e.g., result of ). Returns: of string argument names. Raises: ValueError: if partial function has positionally bound arguments",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\legacy_tf_layers\\variable_scope_shim.py",
    "ast_data": "FunctionDef name:fn_args arg:fn arguments arg If Call Assign Call Assign Call Compare BoolOp If BoolOp Call Call Assign Assign Call If BoolOp Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, widthA=1.0, lengthA=0.2, angleA=0, widthB=1.0, lengthB=0.2, angleB=0):\n    super().__init__(widthA=widthA, lengthA=lengthA, angleA=angleA, widthB=widthB, lengthB=lengthB, angleB=angleB)",
    "docstring": "Parameters ---------- widthA, widthB : float, default: 1.0 Width of the bracket. lengthA, lengthB : float, default: 0.2 Length of the bracket. angleA, angleB : float, default: 0 degrees Orientation of the bracket, as a counterclockwise angle. 0 degrees means perpendicular to the line.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:widthA arg:lengthA arg:angleA arg:widthB arg:lengthB arg:angleB arguments arg arg arg arg arg arg arg Call Call"
  },
  {
    "library": "numpy",
    "name": "broadcast_shapes",
    "source_code": "@set_module('numpy')\ndef broadcast_shapes(*args):\n    arrays = [np.empty(x, dtype=_size0_dtype) for x in args]\n    return _broadcast_shape(*arrays)",
    "docstring": "Broadcast the input shapes into a single shape. :ref:. .. versionadded:: 1.20.0 Parameters ---------- *args : tuples of ints, or ints The shapes to be broadcast against each other. Returns ------- tuple Broadcasted shape. Raises ------ ValueError If the shapes are not compatible and cannot be broadcast according to NumPy's broadcasting rules. See Also -------- broadcast broadcast_arrays broadcast_to Examples -------- >>> import numpy as np >>> np.broadcast_shapes((1, 2), (3, 1), (3, 2)) (3, 2) >>> np.broadcast_shapes((6, 7), (5, 6, 1), (7,), (5, 1, 7)) (5, 6, 7)",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_stride_tricks_impl.py",
    "ast_data": "FunctionDef name:broadcast_shapes arguments arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "time_trunc_sql",
    "source_code": "def time_trunc_sql(self, lookup_type, sql, params, tzname=None):\n    raise NotImplementedError('subclasses of BaseDatabaseOperations may require a time_trunc_sql() method')",
    "docstring": "Given a lookup_type of 'hour', 'minute' or 'second', return the SQL that truncates the given time or datetime field field_name to a time object with only the given specificity. If is provided, the given value is truncated in a specific timezone.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:time_trunc_sql arg:self arg:lookup_type arg:sql arg:params arg:tzname arguments arg arg arg arg arg Raise Call"
  },
  {
    "library": "scrapy",
    "name": "long_desc",
    "source_code": "def long_desc(self) -> str:\n    return self.short_desc()",
    "docstring": "A long description of the command. Return short description when not available. It cannot contain newlines since contents will be formatted by optparser which removes newlines and wraps text.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\commands\\__init__.py",
    "ast_data": "FunctionDef name:long_desc arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "render_string",
    "source_code": "def render_string(self, template: str, context: dict[str, Any]) -> str:\n    msg = 'must be implemented in subclasses'\n    raise NotImplementedError(msg)",
    "docstring": "Called by the builder to render a template given as a string with a specified context (a Python dictionary).",
    "type": "method",
    "file_path": "sphinx\\sphinx\\application.py",
    "ast_data": "FunctionDef name:render_string arg:self arg:template arg:context arguments arg arg arg Assign Raise Call"
  },
  {
    "library": "scipy",
    "name": "irfft",
    "source_code": "@_dispatch\ndef irfft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *, plan=None):\n    return (Dispatchable(x, np.ndarray),)",
    "docstring": "Computes the inverse of . This function computes the inverse of the 1-D *n*-point discrete Fourier Transform of real input computed by . In other words, `rfftnnaxisfftxfft~scipy.fft.fftaxisaxisnnnaxisxirfftnxxnnamnifftirfft`, the negative frequencies are not specified, and the output array is purely real.",
    "type": "function",
    "file_path": "scipy\\scipy\\fft\\_basic.py",
    "ast_data": "FunctionDef name:irfft arg:x arg:n arg:axis arg:norm arg:overwrite_x arg:workers arguments arg arg arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "describe_option",
    "source_code": "def describe_option(pat: str='', _print_desc: bool=True) -> str | None:\n    keys = _select_options(pat)\n    if len(keys) == 0:\n        raise OptionError(f'No such keys(s) for pat={pat!r}')\n    s = '\\n'.join([_build_option_description(k) for k in keys])\n    if _print_desc:\n        print(s)\n        return None\n    return s",
    "docstring": "Print the description for one or more registered options. Call with no arguments to get a listing for all registered options. Parameters ---------- pat : str, default \"\" String or string regexp pattern. Empty string will return all options. For regexp strings, all matching keys will have their description displayed. _print_desc : bool, default True If True (default) the description(s) will be printed to stdout. Otherwise, the description(s) will be returned as a string (for testing). Returns ------- None If `User Guide `. Examples -------- >>> pd.describe_option(\"display.max_columns\") # doctest: +SKIP display.max_columns : int If max_cols is exceeded, switch to truncate view...",
    "type": "function",
    "file_path": "pandas\\pandas\\_config\\config.py",
    "ast_data": "FunctionDef name:describe_option arg:pat arg:_print_desc arguments arg arg Assign Call If Compare Call Raise Call Assign Call Call If Call Return return:no Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_scaled_mm_flop",
    "source_code": "@register_flop_formula(aten._scaled_mm)\ndef _scaled_mm_flop(a_shape, b_shape, scale_a_shape, scale_b_shape, bias_shape=None, scale_result_shape=None, out_dtype=None, use_fast_accum=False, out_shape=None, **kwargs) -> int:\n    return mm_flop(a_shape, b_shape)",
    "docstring": "Count flops for _scaled_mm.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\flop_counter.py",
    "ast_data": "FunctionDef name:_scaled_mm_flop arg:a_shape arg:b_shape arg:scale_a_shape arg:scale_b_shape arg:bias_shape arg:scale_result_shape arg:out_dtype arg:use_fast_accum arg:out_shape arguments arg arg arg arg arg arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pygame",
    "name": "SysFont",
    "source_code": "def SysFont(name, size, bold=0, italic=0, constructor=None):\n    if constructor is None:\n\n        def constructor(fontpath, size, bold, italic):\n            font = Font(fontpath, size)\n            font.set_bold(bold)\n            font.set_italic(italic)\n            return font\n    return _SysFont(name, size, bold, italic, constructor)",
    "docstring": "pygame.ftfont.SysFont(name, size, bold=False, italic=False, constructor=None) -> Font Create a pygame Font from system font resources. This will search the system fonts for the given font name. You can also enable bold or italic styles, and the appropriate system font will be selected if available. This will always return a valid Font object, and will fallback on the builtin pygame font if the given font is not found. Name can also be an iterable of font names, a string of comma-separated font names, or a bytes of comma-separated font names, in which case the set of names will be searched in order. Pygame uses a small set of common font aliases. If the specific font you ask for is not available, a reasonable alternative may be used. If optional constructor is provided, it must be a function with signature constructor(fontpath, size, bold, italic) which returns a Font instance. If None, a pygame.ftfont.Font object is created.",
    "type": "function",
    "file_path": "pygame\\src_py\\ftfont.py",
    "ast_data": "FunctionDef name:SysFont arg:name arg:size arg:bold arg:italic arg:constructor arguments arg arg arg arg arg If Compare FunctionDef name:constructor arg:fontpath arg:size arg:bold arg:italic arguments arg arg arg arg Assign Call Call Call Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "set_weights",
    "source_code": "def set_weights(self, weights):\n    params = self.weights\n    if len(params) != len(weights):\n        raise ValueError('Length of the specified weight list (' + str(len(weights)) + ') does not match the number of weights of the optimizer (' + str(len(params)) + ')')\n    weight_value_tuples = []\n    param_values = backend.batch_get_value(params)\n    for pv, p, w in zip(param_values, params, weights):\n        if pv.shape != w.shape:\n            raise ValueError('Optimizer weight shape ' + str(pv.shape) + ' not compatible with provided weight shape ' + str(w.shape))\n        weight_value_tuples.append((p, w))\n    backend.batch_set_value(weight_value_tuples)",
    "docstring": "Sets the weights of the optimizer, from Numpy arrays. Should only be called after computing the gradients (otherwise the optimizer has no weights). Args: weights: a list of Numpy arrays. The number of arrays and their shape must match number of the dimensions of the weights of the optimizer (i.e. it should match the output of ). Raises: ValueError: in case of incompatible weight shapes.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v1.py",
    "ast_data": "FunctionDef name:set_weights arg:self arg:weights arguments arg arg Assign If Compare Call Call Raise Call Call Call Call Call Assign Assign Call For Call If Compare Raise Call Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "read_column",
    "source_code": "def read_column(self, column: str, where=None, start: int | None=None, stop: int | None=None):\n    self.validate_version()\n    if not self.infer_axes():\n        return False\n    if where is not None:\n        raise TypeError('read_column does not currently accept a where clause')\n    for a in self.axes:\n        if column == a.name:\n            if not a.is_data_indexable:\n                raise ValueError(f'column [{column}] can not be extracted individually; it is not data indexable')\n            c = getattr(self.table.cols, column)\n            a.set_info(self.info)\n            col_values = a.convert(c[start:stop], nan_rep=self.nan_rep, encoding=self.encoding, errors=self.errors)\n            cvs = col_values[1]\n            dtype = getattr(self.table.attrs, f'{column}_meta', None)\n            return Series(cvs, name=column, copy=False, dtype=dtype)\n    raise KeyError(f'column [{column}] not found in the table')",
    "docstring": "return a single column from the table, generally only indexables are interesting",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:read_column arg:self arg:column arg:where arg:start arg:stop arguments arg arg arg arg arg Call If Call Return return:yes If Compare Raise Call For If Compare If Raise Call Assign Call Call Assign Call Assign Assign Call Return return:yes Call Raise Call"
  },
  {
    "library": "django",
    "name": "check_err",
    "source_code": "def check_err(code, cpl=False):\n    err_dict = CPLERR_DICT if cpl else OGRERR_DICT\n    if code == ERR_NONE:\n        return\n    elif code in err_dict:\n        e, msg = err_dict[code]\n        raise e(msg)\n    else:\n        raise GDALException('Unknown error code: \"%s\"' % code)",
    "docstring": "Check the given CPL/OGRERR and raise an exception where appropriate.",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\gdal\\error.py",
    "ast_data": "FunctionDef name:check_err arg:code arg:cpl arguments arg arg Assign If Compare Return return:no If Compare Assign Raise Call Raise Call"
  },
  {
    "library": "cherrypy",
    "name": "_add_MSIE_max_age_workaround",
    "source_code": "def _add_MSIE_max_age_workaround(cookie, timeout):\n    expires = time.time() + timeout * 60\n    cookie['expires'] = httputil.HTTPDate(expires)",
    "docstring": "Inject a Microsoft Internet Explorer `` workaround. We'd like to use the \"max-age\" param as indicated in but IE doesn't save it to disk and the session is lost if people close the browser. So we have to use the old \"expires\" ... sigh ...",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\sessions.py",
    "ast_data": "FunctionDef name:_add_MSIE_max_age_workaround arg:cookie arg:timeout arguments arg arg Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "kl_divergence",
    "source_code": "def kl_divergence(self, other, name='kl_divergence'):\n    with self._name_scope(name):\n        return self._kl_divergence(other)",
    "docstring": "Computes the Kullback--Leibler divergence. Denote this distribution () by and the distribution by . Assuming are absolutely continuous with respect to reference measure , the KL divergence is defined as: where denotes the support of the random variable , denotes (Shanon) cross entropy, and denotes (Shanon) entropy. Args: other: instance. name: Python prepended to names of ops created by this function. Returns: kl_divergence: with shape representing different calculations of the Kullback-Leibler divergence.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:kl_divergence arg:self arg:other arg:name arguments arg arg arg With Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "stack",
    "source_code": "def stack(list_or_tensor, element_dtype=None, strict=True):\n    if strict:\n\n        def raise_error(x):\n            raise ValueError('%s must be stackable when strict=True' % x)\n        original_call = raise_error\n    else:\n        original_call = lambda x: x\n    return data_structures.list_stack(list_or_tensor, data_structures.ListStackOpts(element_dtype=element_dtype, original_call=original_call))",
    "docstring": "Stacks the input, if it admits the notion of stacking. For example, a list of tensors can be stacked into a larger tensor. This function is similar to tf.stack, but it accepts non-lists and lists of non-tensors as arguments. In the latter case, the function does nothing. Args: list_or_tensor: Any element_dtype: tf.DType, optional dtypedtype for the elements in the list. Required if the input is stackable, and the list is untyped. strict: bool, if True an error is raised if the input is not stackable. Otherwise the function is a no-op. Returns: Any, if the input is stackable, the result will be a tf.Tensor. Otherwise, if strict=False, the result will be list_or_tensor. Raises: ValueError: if strict=True and the input is not stackable.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\lang\\special_functions.py",
    "ast_data": "FunctionDef name:stack arg:list_or_tensor arg:element_dtype arg:strict arguments arg arg arg If FunctionDef name:raise_error arg:x arguments arg Raise Call Assign Assign arguments arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "get_storage_engine",
    "source_code": "def get_storage_engine(self, cursor, table_name):\n    cursor.execute('\\n            SELECT engine\\n            FROM information_schema.tables\\n            WHERE\\n                table_name = %s AND\\n                table_schema = DATABASE()\\n            ', [table_name])\n    result = cursor.fetchone()\n    if not result:\n        return self.connection.features._mysql_storage_engine\n    return result[0]",
    "docstring": "Retrieve the storage engine for a given table. Return the default storage engine if the table doesn't exist.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\mysql\\introspection.py",
    "ast_data": "FunctionDef name:get_storage_engine arg:self arg:cursor arg:table_name arguments arg arg arg Call Assign Call If Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_write_keras_model_train_graph",
    "source_code": "def _write_keras_model_train_graph(self):\n    with self._train_writer.as_default():\n        with summary_ops_v2.record_if(True):\n            train_fn = self.model.train_tf_function\n            if hasattr(train_fn, 'function_spec'):\n                summary_ops_v2.graph(train_fn._concrete_stateful_fn.graph)",
    "docstring": "Writes Keras model train_function graph to TensorBoard.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:_write_keras_model_train_graph arg:self arguments arg With Call With Call Assign If Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_gridspec",
    "source_code": "def get_gridspec(self):\n    return self._subplotspec.get_gridspec() if self._subplotspec else None",
    "docstring": "Return the associated with the subplot, or None.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:get_gridspec arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "cryptography",
    "name": "not_valid_before",
    "source_code": "def not_valid_before(self, time: datetime.datetime) -> CertificateBuilder:\n    if not isinstance(time, datetime.datetime):\n        raise TypeError('Expecting datetime object.')\n    if self._not_valid_before is not None:\n        raise ValueError('The not valid before may only be set once.')\n    time = _convert_to_naive_utc_time(time)\n    if time < _EARLIEST_UTC_TIME:\n        raise ValueError('The not valid before date must be on or after 1950 January 1).')\n    if self._not_valid_after is not None and time > self._not_valid_after:\n        raise ValueError('The not valid before date must be before the not valid after date.')\n    return CertificateBuilder(self._issuer_name, self._subject_name, self._public_key, self._serial_number, time, self._not_valid_after, self._extensions)",
    "docstring": "Sets the certificate activation time.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\x509\\base.py",
    "ast_data": "FunctionDef name:not_valid_before arg:self arg:time arguments arg arg If Call Raise Call If Compare Raise Call Assign Call If Compare Raise Call If BoolOp Compare Compare Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "MetricAttributes",
    "source_code": "class MetricAttributes(SerializedAttributes.with_attributes('MetricAttributes', checkpointable_objects=['variables'], functions=[])):\n    pass",
    "docstring": "Attributes that are added to Metric objects when saved to SavedModel. List of all attributes: variables: list of all variables",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\serialized_attributes.py",
    "ast_data": "ClassDef name:MetricAttributes Call"
  },
  {
    "library": "kornia",
    "name": "save",
    "source_code": "def save(self, images: Tensor, depth_maps: Optional[Union[Tensor, list[Tensor]]]=None, directory: Optional[str]=None, output_type: str='torch', depth_type: str='relative', max_depth: int=80) -> None:\n    outputs = self.visualize(images, depth_maps, output_type, depth_type=depth_type, max_depth=max_depth)\n    self._save_outputs(images, directory, suffix='_src')\n    self._save_outputs(outputs, directory, suffix='_depth')",
    "docstring": "Save the segmentation results. Args: images: input tensor. depth_maps: estimated depths. output_type: type of the output. depth_type: 'metric' or 'relative' depth. max_depth: maximum depth value. Only valid for metric depth. directory: where to store outputs. Returns: output tensor.",
    "type": "method",
    "file_path": "kornia\\kornia\\models\\depth_estimation\\base.py",
    "ast_data": "FunctionDef name:save arg:self arg:images arg:depth_maps arg:directory arg:output_type arg:depth_type arg:max_depth arguments arg arg arg arg arg arg arg Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_model_proto",
    "source_code": "@tf_export('data.experimental.get_model_proto')\ndef get_model_proto(iterator) -> model_pb2.ModelProto:\n    if isinstance(iterator, iterator_ops.OwnedIterator):\n        iterator_resource = iterator._iterator_resource\n    elif isinstance(iterator, dataset_ops.NumpyIterator):\n        iterator_resource = iterator._iterator._iterator_resource\n    else:\n        raise ValueError('Only supports `tf.data.Iterator`-typed `iterator`.')\n    if not context.executing_eagerly():\n        raise ValueError(f'{get_model_proto.__name__} is not supported in graph mode.')\n    model_proto_string_tensor = ged_ops.iterator_get_model_proto(iterator_resource)\n    model_proto_bytes = model_proto_string_tensor.numpy()\n    return model_pb2.ModelProto.FromString(model_proto_bytes)",
    "docstring": "Gets the analytical model inside of as . Args: iterator: An or Returns: The model inside of this iterator as a model proto. Raises: NotFoundError: If this iterator's autotune is not enabled.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\iterator_model_ops.py",
    "ast_data": "FunctionDef name:get_model_proto arg:iterator arguments arg If Call Assign If Call Assign Raise Call If Call Raise Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "squared_loss",
    "source_code": "def squared_loss(y_true, y_pred, sample_weight=None):\n    return 0.5 * np.average((y_true - y_pred) ** 2, weights=sample_weight, axis=0).mean()",
    "docstring": "Compute the squared loss for regression. Parameters ---------- y_true : array-like or label indicator matrix Ground truth (correct) values. y_pred : array-like or label indicator matrix Predicted values, as returned by a regression estimator. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- loss : float The degree to which the samples are correctly predicted.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\neural_network\\_base.py",
    "ast_data": "FunctionDef name:squared_loss arg:y_true arg:y_pred arg:sample_weight arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "blend_soft_light",
    "source_code": "def blend_soft_light(self, rgb, intensity):\n    return 2 * intensity * rgb + (1 - 2 * intensity) * rgb ** 2",
    "docstring": "Combine an RGB image with an intensity map using \"soft light\" blending, using the \"pegtop\" formula. Parameters ---------- rgb : An (M, N, 3) RGB array of floats ranging from 0 to 1 (color image). intensity : An (M, N, 1) array of floats ranging from 0 to 1 (grayscale image). Returns ------- An (M, N, 3) RGB array representing the combined images.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:blend_soft_light arg:self arg:rgb arg:intensity arguments arg arg arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "release_pan",
    "source_code": "def release_pan(self, event):\n    if self._pan_info is None:\n        return\n    self.canvas.mpl_disconnect(self._pan_info.cid)\n    self._id_drag = self.canvas.mpl_connect('motion_notify_event', self.mouse_move)\n    for ax in self._pan_info.axes:\n        ax.end_pan()\n    self.canvas.draw_idle()\n    self._pan_info = None\n    self.push_current()",
    "docstring": "Callback for mouse button release in pan/zoom mode.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:release_pan arg:self arg:event arguments arg arg If Compare Return return:no Call Assign Call For Call Call Assign Call"
  },
  {
    "library": "django",
    "name": "language",
    "source_code": "@register.tag\ndef language(parser, token):\n    bits = token.split_contents()\n    if len(bits) != 2:\n        raise TemplateSyntaxError(\"'%s' takes one argument (language)\" % bits[0])\n    language = parser.compile_filter(bits[1])\n    nodelist = parser.parse(('endlanguage',))\n    parser.delete_first_token()\n    return LanguageNode(nodelist, language)",
    "docstring": "Enable the given language just for this block. Usage:: {% language \"de\" %} This is {{ bar }} and {{ boo }}. {% endlanguage %}",
    "type": "function",
    "file_path": "django\\django\\templatetags\\i18n.py",
    "ast_data": "FunctionDef name:language arg:parser arg:token arguments arg arg Assign Call If Compare Call Raise Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "virtualenv",
    "name": "py_info_clear",
    "source_code": "def py_info_clear(self):\n    pass",
    "docstring": "Nothing to clear.",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\app_data\\na.py",
    "ast_data": "FunctionDef name:py_info_clear arg:self arguments arg"
  },
  {
    "library": "matplotlib",
    "name": "locked_y0",
    "source_code": "@property\ndef locked_y0(self):\n    if self._locked_points.mask[0, 1]:\n        return None\n    else:\n        return self._locked_points[0, 1]",
    "docstring": "float or None: The value used for the locked y0.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:locked_y0 arg:self arguments arg If Return return:no Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "evaluate_metrics",
    "source_code": "def evaluate_metrics(test_dataloader, sparse_model_metadata):\n    metadata = pd.read_csv(sparse_model_metadata)\n    device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n    metrics_dict: dict[str, list] = {'norm': [], 'sparse_block_shape': [], 'sparsity_level': [], 'precision': [], 'recall': [], 'f1': [], 'roc_auc': [], 'accuracy': [], 'log_loss': []}\n    for _, row in metadata.iterrows():\n        norm, sbs, sl = (row['norm'], row['sparse_block_shape'], row['sparsity_level'])\n        model_path = row['path']\n        model = fetch_model(model_path, device)\n        model_metrics = inference_and_evaluation(model, test_dataloader, device)\n        key = f'{norm}_{sbs}_{sl}'\n        print(key, '=', model_metrics)\n        metrics_dict['norm'].append(norm)\n        metrics_dict['sparse_block_shape'].append(sbs)\n        metrics_dict['sparsity_level'].append(sl)\n        for key, value in model_metrics.items():\n            if key in metrics_dict:\n                metrics_dict[key].append(value)\n    sparse_model_metrics = pd.DataFrame(metrics_dict)\n    print(sparse_model_metrics)\n    filename = 'sparse_model_metrics.csv'\n    sparse_model_metrics.to_csv(filename, index=False)\n    print(f'Model metrics file saved to {filename}')",
    "docstring": "Evaluates the metrics the sparsified metrics for the dlrm model on various sparsity levels, block shapes and norms. This function evaluates the model on the test dataset and dumps evaluation metrics in a csv file [model_performance.csv]",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\data_sparsifier\\benchmarks\\evaluate_model_metrics.py",
    "ast_data": "FunctionDef name:evaluate_metrics arg:test_dataloader arg:sparse_model_metadata arguments arg arg Assign Call Assign Call Call Call For Call Assign Assign Assign Call Assign Call Assign Call Call Call Call For Call If Compare Call Assign Call Call Assign Call Call"
  },
  {
    "library": "numpy",
    "name": "data_as",
    "source_code": "def data_as(self, obj):\n    ptr = self._ctypes.cast(self._data, obj)\n    ptr._arr = self._arr\n    return ptr",
    "docstring": "Return the data pointer cast to a particular c-types object. For example, calling ``. The returned pointer will keep a reference to the array.",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\_internal.py",
    "ast_data": "FunctionDef name:data_as arg:self arg:obj arguments arg arg Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "state_updates",
    "source_code": "@property\n@doc_controls.do_not_generate_docs\ndef state_updates(self):\n    warnings.warn('`Model.state_updates` will be removed in a future version. This property should not be used in TensorFlow 2.0, as `updates` are applied automatically.')\n    state_updates = []\n    for layer in self.layers:\n        if getattr(layer, 'stateful', False):\n            if hasattr(layer, 'updates'):\n                state_updates += layer.updates\n    return state_updates",
    "docstring": "Deprecated, do NOT use! Returns the from all layers that are stateful. This is useful for separating training updates and state updates, e.g. when we need to update a layer's internal state during prediction. Returns: A list of update ops.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py",
    "ast_data": "FunctionDef name:state_updates arg:self arguments arg Call Assign For If Call If Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_broadcast_uniform_partitioned_dimension",
    "source_code": "def _broadcast_uniform_partitioned_dimension(self, axis, lengths):\n    axis_dim_size = self.dimension_size(axis)\n    partitioned_sizes = list(self._partitioned_dim_sizes[:axis])\n    if lengths.shape.ndims == 0:\n        lengths = array_ops.where(math_ops.equal(axis_dim_size, 1), lengths, axis_dim_size)\n        repeats = array_ops.where(math_ops.equal(axis_dim_size, 1), lengths, 1)\n        splits = array_ops_stack.stack([0, self.num_slices_in_dimension(axis)])\n    else:\n        splits = math_ops.range(array_ops.size(lengths, out_type=self.dim_size_dtype) + 1)\n        repeats = lengths\n    partitioned_sizes.append(lengths)\n    for dim_size in self._partitioned_dim_sizes[axis + 1:]:\n        if dim_size.shape.ndims == 0:\n            partitioned_sizes.append(dim_size)\n            splits *= dim_size\n        else:\n            partitioned_sizes.append(ragged_util.repeat_ranges(dim_size, splits, repeats))\n            splits = array_ops.gather(ragged_util.lengths_to_splits(dim_size), splits)\n    inner_sizes = self._inner_dim_sizes\n    return RaggedTensorDynamicShape(partitioned_sizes, inner_sizes, self.dim_size_dtype)",
    "docstring": "Broadcasts the partitioned dimension to match .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor_shape.py",
    "ast_data": "FunctionDef name:_broadcast_uniform_partitioned_dimension arg:self arg:axis arg:lengths arguments arg arg arg Assign Call Assign Call If Compare Assign Call Call Assign Call Call Assign Call Call Assign Call Call Assign Call For If Compare Call Call Call Assign Call Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "stop_gradient",
    "source_code": "@tf_export('stop_gradient')\n@dispatch.add_dispatch_support\ndef stop_gradient(input, name=None):\n    if isinstance(input, composite_tensor.CompositeTensor) and (not _pywrap_utils.IsResourceVariable(input)):\n        return nest.map_structure(stop_gradient, input, expand_composites=True)\n    with record.stop_recording():\n        return gen_array_ops.stop_gradient(input, name=name)",
    "docstring": "Stops gradient computation. NOTE: This docstring is patched out below. See tensorflow/core/api_def/base_api/api_def_StopGradient.pbtxt for the full docstring. That file determines the public documentation page. Args: input: A . name: A name for this operation. Returns: A . Has the same dtype as .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:stop_gradient arg:input arg:name arguments arg arg If BoolOp Call Call Return return:yes Call With Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_over",
    "source_code": "def get_over(self):\n    if not self._isinit:\n        self._init()\n    return np.array(self._lut[self._i_over])",
    "docstring": "Get the color for high out-of-range values.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:get_over arg:self arguments arg If Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "direction",
    "source_code": "@property\ndef direction(self):\n    return self._direction",
    "docstring": "Direction of the span selector: 'vertical' or 'horizontal'.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:direction arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "__init__",
    "source_code": "def __init__(self, shift: int=1) -> None:\n    super().__init__()\n    self._shift = shift",
    "docstring": "Initialize the renderer. Args: shift: Size of far-field layer: int",
    "type": "method",
    "file_path": "kornia\\kornia\\nerf\\volume_renderer.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:shift arguments arg arg Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "infer_size",
    "source_code": "def infer_size(total_size: int, sizes: Shape) -> Shape:\n    infers = [i for i, s in enumerate(sizes) if s == -1]\n    size = prod(sizes)\n    assert len(infers) <= 1, 'can only infer one size'\n    if infers:\n        size = -size\n        missing_size = total_size // size\n        assert total_size % size == 0, f'size inferred for -1 is not integral {sizes} should have {total_size} elements.'\n        return tuple((s if s != -1 else missing_size for s in sizes))\n    assert size == total_size, f'sizes do not match {total_size} vs {size}'\n    return sizes",
    "docstring": "One dimension input to view may be \"-1\". Infer the size of this dimension given the total_size.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_ops\\_view_ops.py",
    "ast_data": "FunctionDef name:infer_size arg:total_size arg:sizes arguments arg arg Assign Call Compare Assign Call Compare Call If Assign Assign Compare Return return:yes Call Compare Compare Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_create_bitmap",
    "source_code": "def _create_bitmap(self):\n    rgba = self.get_renderer().buffer_rgba()\n    h, w, _ = rgba.shape\n    bitmap = wx.Bitmap.FromBufferRGBA(w, h, rgba)\n    bitmap.SetScaleFactor(self.GetDPIScaleFactor())\n    return bitmap",
    "docstring": "Create a wx.Bitmap from the renderer RGBA buffer",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_wxagg.py",
    "ast_data": "FunctionDef name:_create_bitmap arg:self arguments arg Assign Call Call Assign Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "copy_tensor_or_indexed_slices_to_device",
    "source_code": "def copy_tensor_or_indexed_slices_to_device(value, device):\n    with ops.device(device):\n        if isinstance(value, indexed_slices.IndexedSlices):\n            copied_values = array_ops.identity(value.values)\n            copied_indices = array_ops.identity(value.indices)\n            if value.dense_shape is not None:\n                copied_shape = array_ops.identity(value.dense_shape)\n            else:\n                copied_shape = None\n            result = indexed_slices.IndexedSlices(copied_values, copied_indices, copied_shape)\n        else:\n            result = array_ops.identity(value)\n    return result",
    "docstring": "Copies a tensor or IndexedSlices to a device.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_utils.py",
    "ast_data": "FunctionDef name:copy_tensor_or_indexed_slices_to_device arg:value arg:device arguments arg arg With Call If Call Assign Call Assign Call If Compare Assign Call Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_new_connection",
    "source_code": "def get_new_connection(self, conn_params):\n    raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a get_new_connection() method')",
    "docstring": "Open a connection to the database.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\base.py",
    "ast_data": "FunctionDef name:get_new_connection arg:self arg:conn_params arguments arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "set_module_name_object_type_order",
    "source_code": "def set_module_name_object_type_order(self, module_name: str, object_type: Callable, index: int, qconfig: QConfigAny) -> QConfigMapping:\n    self.module_name_object_type_order_qconfigs[module_name, object_type, index] = qconfig\n    return self",
    "docstring": "Set the QConfig for modules matching a combination of the given module name, object type, and the index at which the module appears. If the QConfig for an existing (module name, object type, index) was already set, the new QConfig will override the old one.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\qconfig_mapping.py",
    "ast_data": "FunctionDef name:set_module_name_object_type_order arg:self arg:module_name arg:object_type arg:index arg:qconfig arguments arg arg arg arg arg Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "detect_fake_mode",
    "source_code": "def detect_fake_mode(inputs: Any=None):\n    from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode\n    fake_modes = []\n    if (context := TracingContext.try_get()):\n        fake_mode = context.fake_mode\n        if fake_mode is not None:\n            fake_modes.append((fake_mode, 'tracing context', 0))\n    from torch.utils._python_dispatch import _get_current_dispatch_mode_stack\n    for i, m in enumerate(reversed(_get_current_dispatch_mode_stack())):\n        if isinstance(m, FakeTensorMode):\n            fake_modes.append((m, 'active fake mode', i))\n    flat_inputs = pytree.tree_leaves(inputs)\n    for i, flat_input in enumerate(flat_inputs):\n        if isinstance(flat_input, FakeTensor):\n            fake_modes.append((flat_input.fake_mode, 'fake tensor input', i))\n    if fake_modes:\n        fake_mode, desc1, i1 = fake_modes[0]\n        for m, desc2, i2 in fake_modes[1:]:\n            assert fake_mode is m, f\"fake mode ({fake_mode}) from {desc1} {i1} doesn't match mode ({m}) from {desc2} {i2}\\n\\nfake mode from {desc1} {i1} allocated at:\\n{fake_mode.stack}\\nfake mode from {desc2} {i2} allocated at:\\n{m.stack}\"\n        return fake_mode\n    else:\n        return None",
    "docstring": "Attempts to \"detect\" what the current fake mode is. If there is one ambiently available from TracingContext, we preferentially use that. Otherwise, we heuristically detect the fake mode via the following sources, in order of priority: - Currently active fake mode on stack - Fake mode associated with passed in tensors (inputs does not have to be flattened)",
    "type": "function",
    "file_path": "pytorch\\torch\\_guards.py",
    "ast_data": "FunctionDef name:detect_fake_mode arg:inputs arguments arg Assign If Call Assign If Compare Call For Call Call Call If Call Call Assign Call For Call If Call Call If Assign For Compare Return return:yes Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "build_nccl_then_recursive_hd",
    "source_code": "def build_nccl_then_recursive_hd(input_tensors, red_op, un_op=None):\n    upper_level_f = lambda x: build_recursive_hd_all_reduce(x, red_op, un_op)\n    return _build_nccl_hybrid(input_tensors, red_op, upper_level_f)",
    "docstring": "Construct hybrid of NCCL within workers, Recursive-HD across workers.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\all_reduce.py",
    "ast_data": "FunctionDef name:build_nccl_then_recursive_hd arg:input_tensors arg:red_op arg:un_op arguments arg arg arg Assign arguments arg Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "relative_uri",
    "source_code": "def relative_uri(base: str, to: str) -> str:\n    if to.startswith(SEP):\n        return to\n    b2 = base.split('#')[0].split(SEP)\n    t2 = to.split('#')[0].split(SEP)\n    for x, y in zip(b2[:-1], t2[:-1], strict=False):\n        if x != y:\n            break\n        b2.pop(0)\n        t2.pop(0)\n    if b2 == t2:\n        return ''\n    if len(b2) == 1 and t2 == ['']:\n        return '.' + SEP\n    return ('..' + SEP) * (len(b2) - 1) + SEP.join(t2)",
    "docstring": "Return a relative URL from ``.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\osutil.py",
    "ast_data": "FunctionDef name:relative_uri arg:base arg:to arguments arg arg If Call Return return:yes Assign Call Call Assign Call Call For Call If Compare Call Call If Compare Return return:yes If BoolOp Compare Call Compare Return return:yes Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "normal",
    "source_code": "def normal(self, shape, mean=0.0, stddev=1.0, dtype=dtypes.float32, name=None):\n    with ops.name_scope(name, 'stateful_normal', [shape, mean, stddev]) as name:\n        shape = _shape_tensor(shape)\n        mean = ops.convert_to_tensor(mean, dtype=dtype, name='mean')\n        stddev = ops.convert_to_tensor(stddev, dtype=dtype, name='stddev')\n        rnd = self._standard_normal(shape, dtype=dtype)\n        return math_ops.add(rnd * stddev, mean, name=name)",
    "docstring": "Outputs random values from a normal distribution. Args: shape: A 1-D integer Tensor or Python array. The shape of the output tensor. mean: A 0-D Tensor or Python value of type . The mean of the normal distribution. stddev: A 0-D Tensor or Python value of type . The standard deviation of the normal distribution. dtype: The type of the output. name: A name for the operation (optional). Returns: A tensor of the specified shape filled with random normal values.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\stateful_random_ops.py",
    "ast_data": "FunctionDef name:normal arg:self arg:shape arg:mean arg:stddev arg:dtype arg:name arguments arg arg arg arg arg arg With Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "with_min_execution_time",
    "source_code": "def with_min_execution_time(self, min_micros=0, min_accelerator_micros=0, min_cpu_micros=0):\n    self._options['min_micros'] = min_micros\n    self._options['min_accelerator_micros'] = min_accelerator_micros\n    self._options['min_cpu_micros'] = min_cpu_micros\n    return self",
    "docstring": "Only show profiler nodes consuming no less than 'min_micros'. Args: min_micros: Only show profiler nodes with execution time no less than this. It sums accelerator and cpu times. min_accelerator_micros: Only show profiler nodes spend no less than this time on accelerator (e.g. GPU). min_cpu_micros: Only show profiler nodes spend no less than this time on cpu. Returns: self",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\option_builder.py",
    "ast_data": "FunctionDef name:with_min_execution_time arg:self arg:min_micros arg:min_accelerator_micros arg:min_cpu_micros arguments arg arg arg arg Assign Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_ParamUsageInfo",
    "source_code": "class _ParamUsageInfo(NamedTuple):\n    module: nn.Module\n    named_params: list[tuple[str, nn.Parameter]]",
    "docstring": "This is used for `` follow the execution order.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_trace_utils.py",
    "ast_data": "ClassDef name:_ParamUsageInfo"
  },
  {
    "library": "tensorflow",
    "name": "remove",
    "source_code": "def remove(self, token):\n    self._funcs.pop(token, None)",
    "docstring": "Removes the registered function corresponding to .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\script_ops.py",
    "ast_data": "FunctionDef name:remove arg:self arg:token arguments arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "is_ucc_available",
    "source_code": "def is_ucc_available() -> bool:\n    return _UCC_AVAILABLE",
    "docstring": "Check if the UCC backend is available.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:is_ucc_available arguments Return return:yes"
  },
  {
    "library": "django",
    "name": "receive_data_chunk",
    "source_code": "def receive_data_chunk(self, raw_data, start):\n    raise NotImplementedError('subclasses of FileUploadHandler must provide a receive_data_chunk() method')",
    "docstring": "Receive data from the streamed upload parser. `` is the position in the file of the chunk.",
    "type": "method",
    "file_path": "django\\django\\core\\files\\uploadhandler.py",
    "ast_data": "FunctionDef name:receive_data_chunk arg:self arg:raw_data arg:start arguments arg arg arg Raise Call"
  },
  {
    "library": "django",
    "name": "get_citext_oids",
    "source_code": "@functools.lru_cache\ndef get_citext_oids(connection_alias):\n    return get_type_oids(connection_alias, 'citext')",
    "docstring": "Return citext and citext array OIDs.",
    "type": "function",
    "file_path": "django\\django\\contrib\\postgres\\signals.py",
    "ast_data": "FunctionDef name:get_citext_oids arg:connection_alias arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "write_top",
    "source_code": "def write_top(self, arr, name, is_global):\n    self._var_is_global = is_global\n    self._var_name = name\n    self.write(arr)",
    "docstring": "Write variable at top level of mat file Parameters ---------- arr : array_like array-like object to create writer for name : str, optional name as it will appear in matlab workspace default is empty string is_global : {False, True}, optional whether variable will be global on load into matlab",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\matlab\\_mio5.py",
    "ast_data": "FunctionDef name:write_top arg:self arg:arr arg:name arg:is_global arguments arg arg arg arg Assign Assign Call"
  },
  {
    "library": "kornia",
    "name": "adjust_saturation",
    "source_code": "def adjust_saturation(image: Tensor, factor: Union[float, Tensor]) -> Tensor:\n    x_hsv: Tensor = rgb_to_hsv(image)\n    x_adjusted: Tensor = adjust_saturation_raw(x_hsv, factor)\n    out: Tensor = hsv_to_rgb(x_adjusted)\n    return out",
    "docstring": "Adjust color saturation of an image. .. image:: _static/img/adjust_saturation.png The image is expected to be an RGB image in the range of [0, 1]. Args: image: Image/Tensor to be adjusted in the shape of :math:. factor: How much to adjust the saturation. 0 will give a black and white image, 1 will give the original image while 2 will enhance the saturation by a factor of 2. saturation_mode: The mode to adjust saturation. Return: Adjusted image in the shape of :math:. .. note:: See a working example __. Example: >>> x = torch.ones(1, 3, 3, 3) >>> adjust_saturation(x, 2.).shape torch.Size([1, 3, 3, 3]) >>> x = torch.ones(2, 3, 3, 3) >>> y = torch.tensor([1., 2.]) >>> adjust_saturation(x, y).shape torch.Size([2, 3, 3, 3])",
    "type": "function",
    "file_path": "kornia\\kornia\\enhance\\adjust.py",
    "ast_data": "FunctionDef name:adjust_saturation arg:image arg:factor arguments arg arg Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_tf_core_map_structure_with_tuple_paths_up_to",
    "source_code": "def _tf_core_map_structure_with_tuple_paths_up_to(shallow_tree, func, *inputs, **kwargs):\n    if not inputs:\n        raise ValueError('Cannot map over no sequences')\n    check_types = kwargs.pop('check_types', True)\n    expand_composites = kwargs.pop('expand_composites', False)\n    is_nested_fn = _is_nested_or_composite if expand_composites else _tf_core_is_nested\n    for input_tree in inputs:\n        _tf_core_assert_shallow_structure(shallow_tree, input_tree, check_types=check_types, expand_composites=expand_composites)\n    flat_value_gen = (_tf_core_flatten_up_to(shallow_tree, input_tree, check_types, expand_composites=expand_composites) for input_tree in inputs)\n    flat_path_gen = (path for path, _ in _tf_core_yield_flat_up_to(shallow_tree, inputs[0], is_nested_fn))\n    results = [func(*args, **kwargs) for args in zip(flat_path_gen, *flat_value_gen)]\n    return _tf_core_pack_sequence_as(structure=shallow_tree, flat_sequence=results, expand_composites=expand_composites)",
    "docstring": "See comments for map_structure_with_tuple_paths_up_to() in tensorflow/python/util/nest.py.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\nest_util.py",
    "ast_data": "FunctionDef name:_tf_core_map_structure_with_tuple_paths_up_to arg:shallow_tree arg:func arguments arg arg arg arg If Raise Call Assign Call Assign Call Assign For Call Assign Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_create_mirrored_tpu_replicated_variables",
    "source_code": "def _create_mirrored_tpu_replicated_variables(**kwargs):\n    initial_value = kwargs['initial_value']\n    with maybe_init_scope():\n        initial_value = initial_value() if callable(initial_value) else initial_value\n    mirrored_replicated_var_list = []\n    for replica_id in range(num_replicas):\n        replicated_var_list = []\n        for logic_core_id in range(num_cores_per_replica):\n            with ops.device(self._tpu_devices[replica_id][logic_core_id]):\n                kwargs['initial_value'] = initial_value\n                v = next_creator(**kwargs)\n            replicated_var_list.append(v)\n        replica_name = '{}/r:{}'.format(kwargs['name'], replica_id)\n        tpu_replicated_var = tpu_replicated_variable.TPUReplicatedVariable(variables=replicated_var_list, name=replica_name)\n        mirrored_replicated_var_list.append(tpu_replicated_var)\n    return mirrored_replicated_var_list",
    "docstring": "Returns a list of s. The list consists of s and can be used to initialize a . Each contains a list of s which are replicated to logical cores to enable XLA SPMD compilation. Args: **kwargs: the keyword arguments for creating a variable",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_strategy.py",
    "ast_data": "FunctionDef name:_create_mirrored_tpu_replicated_variables arguments arg Assign With Call Assign Call Call Assign For Call Assign For Call With Call Assign Assign Call Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self):\n    Tool.__init__(self, 'before_request_body', _sessions.init)",
    "docstring": "Initialize a session tool.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cptools.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "make_subgraph",
    "source_code": "def make_subgraph(self, gm: torch.fx.GraphModule, example_inputs: list[torch.Tensor], subgraph_name: str) -> SubgraphLowering:\n    return SubgraphLowering(parent=self, gm=gm, example_inputs=example_inputs, shape_env=self._shape_env, cpp_wrapper=self.cpp_wrapper, aot_mode=self.aot_mode, extern_node_serializer=self.extern_node_serializer, is_inference=self.is_inference, is_backward=self.is_backward, name=self.qualify_name(subgraph_name))",
    "docstring": "Make a subgraph of the current graph with all inherited parts, except the graph module () and . The subgraphs are lowered separately and lifted into a separate function in the parent output wrapper code. The subgraph name is qualified by the parent graph's name. Note that the lifting of subgraph is supported for python wrapper only. For cpp wrapper, we inline the subgraphs in the parent wrapper.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\graph.py",
    "ast_data": "FunctionDef name:make_subgraph arg:self arg:gm arg:example_inputs arg:subgraph_name arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "select_as_coordinates",
    "source_code": "def select_as_coordinates(self, key: str, where=None, start: int | None=None, stop: int | None=None):\n    where = _ensure_term(where, scope_level=1)\n    tbl = self.get_storer(key)\n    if not isinstance(tbl, Table):\n        raise TypeError('can only read_coordinates with a table')\n    return tbl.read_coordinates(where=where, start=start, stop=stop)",
    "docstring": "return the selection as an Index .. warning:: Pandas uses PyTables for reading and writing HDF5 files, which allows serializing object-dtype data with pickle when using the \"fixed\" format. Loading pickled data received from untrusted sources can be unsafe. See: for more. Parameters ---------- key : str where : list of Term (or convertible) objects, optional start : integer (defaults to None), row number to start selection stop : integer (defaults to None), row number to stop selection",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:select_as_coordinates arg:self arg:key arg:where arg:start arg:stop arguments arg arg arg arg arg Assign Call Assign Call If Call Raise Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, id=None, **kwargs):\n    kwargs['storage_path'] = os.path.abspath(kwargs['storage_path'])\n    kwargs.setdefault('lock_timeout', None)\n    Session.__init__(self, id=id, **kwargs)\n    if isinstance(self.lock_timeout, (int, float)):\n        self.lock_timeout = datetime.timedelta(seconds=self.lock_timeout)\n    if not isinstance(self.lock_timeout, (datetime.timedelta, type(None))):\n        raise ValueError('Lock timeout must be numeric seconds or a timedelta instance.')",
    "docstring": "Prepare the file session store.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\sessions.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:id arguments arg arg arg Assign Call Call Call If Call Assign Call If Call Call Raise Call"
  },
  {
    "library": "scipy",
    "name": "generate_decl_c",
    "source_code": "def generate_decl_c(name, return_type, argnames, argtypes, accelerate):\n    c_return_type = C_TYPES[return_type]\n    c_argtypes = [C_TYPES[t] for t in argtypes]\n    if name in WRAPPED_FUNCS:\n        argnames = ['out'] + argnames\n        c_argtypes = [c_return_type] + c_argtypes\n        c_return_type = 'void'\n    blas_macro, blas_name = get_blas_macro_and_name(name, accelerate)\n    c_args = ', '.join((f'{t} *{n}' for t, n in zip(c_argtypes, argnames)))\n    return f'{c_return_type} {blas_macro}({blas_name})({c_args});\\n'",
    "docstring": "Create C header declarations for Cython to import.",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_generate_pyx.py",
    "ast_data": "FunctionDef name:generate_decl_c arg:name arg:return_type arg:argnames arg:argtypes arg:accelerate arguments arg arg arg arg arg Assign Assign If Compare Assign Assign Assign Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "split",
    "source_code": "def split(self, X=None, y=None, groups=None):\n    for train, test in self.cv:\n        yield (train, test)",
    "docstring": "Generate indices to split data into training and test set. Parameters ---------- X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py",
    "ast_data": "FunctionDef name:split arg:self arg:X arg:y arg:groups arguments arg arg arg arg For"
  },
  {
    "library": "pytorch",
    "name": "Repeat",
    "source_code": "@dataclass\nclass Repeat(DimSpec):\n    input_dim: DimSpec\n    times: int\n\n    @classmethod\n    def new(cls, dim: DimSpec, times: int) -> DimSpec:\n        if times == 1:\n            return dim\n        elif isinstance(dim, Singleton):\n            return Broadcast(dim, times)\n        else:\n            return Repeat(dim, times)\n\n    def inputs(self) -> Iterable[DimSpec]:\n        return (self.input_dim,)",
    "docstring": "Output dimension is the input dimension repeated n-times.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_ops\\_view_ops.py",
    "ast_data": "ClassDef name:Repeat FunctionDef name:new arg:cls arg:dim arg:times arguments arg arg arg If Compare Return return:yes If Call Return return:yes Call Return return:yes Call FunctionDef name:inputs arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, learning_rate, l1_regularization_strength=0.0, l2_regularization_strength=0.0, use_locking=False, name='ProximalGradientDescent'):\n    super(ProximalGradientDescentOptimizer, self).__init__(use_locking, name)\n    self._learning_rate = learning_rate\n    self._l1_regularization_strength = l1_regularization_strength\n    self._l2_regularization_strength = l2_regularization_strength\n    self._l1_regularization_strength_tensor = None\n    self._l2_regularization_strength_tensor = None",
    "docstring": "Construct a new proximal gradient descent optimizer. Args: learning_rate: A Tensor or a floating point value. The learning rate to use. l1_regularization_strength: A float value, must be greater than or equal to zero. l2_regularization_strength: A float value, must be greater than or equal to zero. use_locking: If True use locks for update operations. name: Optional name prefix for the operations created when applying gradients. Defaults to \"GradientDescent\".",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\proximal_gradient_descent.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:learning_rate arg:l1_regularization_strength arg:l2_regularization_strength arg:use_locking arg:name arguments arg arg arg arg arg arg Call Call Assign Assign Assign Assign Assign"
  },
  {
    "library": "scikit-learn",
    "name": "_log_normalize",
    "source_code": "def _log_normalize(X):\n    X = make_nonnegative(X, min_value=1)\n    if issparse(X):\n        raise ValueError('Cannot compute log of a sparse matrix, because log(x) diverges to -infinity as x goes to 0.')\n    L = np.log(X)\n    row_avg = L.mean(axis=1)[:, np.newaxis]\n    col_avg = L.mean(axis=0)\n    avg = L.mean()\n    return L - row_avg - col_avg + avg",
    "docstring": "Normalize `` according to Kluger's log-interactions scheme.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\cluster\\_bicluster.py",
    "ast_data": "FunctionDef name:_log_normalize arg:X arguments arg Assign Call If Call Raise Call Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, name: Union[str, bytes], bound_context: context.Context, function_type: function_type_lib.FunctionType, children: Optional[List['AtomicFunction']]=None, call_options: CallOptions=CallOptions(), cached_graph: Optional[func_graph_module.FuncGraph]=None):\n    self._name = compat.as_bytes(name)\n    self._bound_context = bound_context\n    self._function_type = function_type\n    self._children = children if children else []\n    self._call_options = call_options\n    self._cached_definition = None\n    self._cached_graph = cached_graph\n    self._generated_graph = None\n    ref_key = (self._bound_context.function_scope_id, self.name)\n    if ref_key not in RUNTIME_FUNCTION_REFS:\n        RUNTIME_FUNCTION_REFS[ref_key] = 1\n    else:\n        RUNTIME_FUNCTION_REFS[ref_key] += 1",
    "docstring": "Construct a new AtomicFunction. Args: name: str/bytes name of the runtime function in the bound context. bound_context: interface to the runtime for the AtomicFunction. function_type: input/output contract for the AtomicFunction children: list of AtomicFunctions that are needed to call this one. call_options: extra configuration options for the call. cached_graph: FuncGraph that this AtomicFunction was generated from (if known). Otherwise it will lazily construct a new corresponding FuncGraph if ever needed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\atomic_function.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:name arg:bound_context arg:function_type arg:children arg:call_options arg:cached_graph arguments arg arg arg arg arg arg arg Call Assign Call Assign Assign Assign Assign Assign Assign Assign Assign If Compare Assign"
  },
  {
    "library": "pytorch",
    "name": "LocalGeneratorFunctionVariable",
    "source_code": "class LocalGeneratorFunctionVariable(BaseUserFunctionVariable):\n\n    def __init__(self, vt: VariableTracker, *, generator_cls=LocalGeneratorObjectVariable, **kwargs):\n        super().__init__(**kwargs)\n        self.vt = vt\n        self.generator_cls = generator_cls\n\n    def __getattr__(self, name):\n        if name in self.__class__.__dict__.keys():\n            return getattr(self, name)\n        return getattr(self.vt, name)\n\n    def _build_inline_tracer(self, tx, args, kwargs):\n        from torch._dynamo.symbolic_convert import InliningInstructionTranslator\n        return InliningInstructionTranslator.build_inline_tracer(tx, self, args, kwargs)\n\n    def call_function(self, tx: 'InstructionTranslator', args: 'list[VariableTracker]', kwargs: 'dict[str, VariableTracker]') -> 'VariableTracker':\n        assert is_generator(self.vt.get_code())\n        inline_tracer = self._build_inline_tracer(tx, args, kwargs)\n        code = self.vt.get_code()\n        f_globals = self.vt.get_globals()\n        return self.generator_cls(code, f_globals, inline_tracer, source=self.source)",
    "docstring": "functions that behaves like iterators .. note:: This is a wrapper around (Nested)UserFunctionVariable",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\functions.py",
    "ast_data": "ClassDef name:LocalGeneratorFunctionVariable FunctionDef name:__init__ arg:self arg:vt arguments arg arg arg arg Call Call Assign Assign FunctionDef name:__getattr__ arg:self arg:name arguments arg arg If Compare Call Return return:yes Call Return return:yes Call FunctionDef name:_build_inline_tracer arg:self arg:tx arg:args arg:kwargs arguments arg arg arg arg Return return:yes Call FunctionDef name:call_function arg:self arg:tx arg:args arg:kwargs arguments arg arg arg arg Call Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "all_candidate_sampler",
    "source_code": "@tf_export('random.all_candidate_sampler', 'nn.all_candidate_sampler')\ndef all_candidate_sampler(true_classes, num_true, num_sampled, unique, seed=None, name=None):\n    seed1, seed2 = random_seed.get_seed(seed)\n    return gen_candidate_sampling_ops.all_candidate_sampler(true_classes, num_true, num_sampled, unique, seed=seed1, seed2=seed2, name=name)",
    "docstring": "Generate the set of all classes. Deterministically generates and returns the set of all possible classes. For testing purposes. There is no need to use this, since you might as well use full softmax or full logistic regression. Args: true_classes: A of type and shape . The target classes. num_true: An . The number of target classes per training example. num_sampled: An . The number of possible classes. unique: A . Ignored. unique. seed: An . An operation-specific seed. Default is 0. name: A name for the operation (optional). Returns: sampled_candidates: A tensor of type and shape . This operation deterministically returns the entire range . true_expected_count: A tensor of type . Same shape as . The expected counts under the sampling distribution of each of . All returned values are 1.0. sampled_expected_count: A tensor of type . Same shape as . The expected counts under the sampling distribution of each of . All returned values are 1.0.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\candidate_sampling_ops.py",
    "ast_data": "FunctionDef name:all_candidate_sampler arg:true_classes arg:num_true arg:num_sampled arg:unique arg:seed arg:name arguments arg arg arg arg arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "emit_obj_snapshot",
    "source_code": "def emit_obj_snapshot(self, category: str, name: str, timestamp: int, pid: int, tid: int, object_id: int, snapshot: Dict[str, Any]) -> None:\n    event = self._create_event('O', category, name, pid, tid, timestamp)\n    event['id'] = object_id\n    event['args'] = {'snapshot': snapshot}\n    self._events.append(event)",
    "docstring": "Adds an object snapshot event to the trace. Args: category: The event category as a string. name: The event name as a string. timestamp: The timestamp of this event as a long integer. pid: Identifier of the process generating this event as an integer. tid: Identifier of the thread generating this event as an integer. object_id: Identifier of the object as an integer. snapshot: A JSON-compatible representation of the object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py",
    "ast_data": "FunctionDef name:emit_obj_snapshot arg:self arg:category arg:name arg:timestamp arg:pid arg:tid arg:object_id arg:snapshot arguments arg arg arg arg arg arg arg arg Assign Call Assign Assign Call"
  },
  {
    "library": "cherrypy",
    "name": "find_config",
    "source_code": "def find_config(self, path, key, default=None):\n    trail = path or '/'\n    while trail:\n        nodeconf = self.config.get(trail, {})\n        if key in nodeconf:\n            return nodeconf[key]\n        lastslash = trail.rfind('/')\n        if lastslash == -1:\n            break\n        elif lastslash == 0 and trail != '/':\n            trail = '/'\n        else:\n            trail = trail[:lastslash]\n    return default",
    "docstring": "Return the most-specific value for key along path, or default.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cptree.py",
    "ast_data": "FunctionDef name:find_config arg:self arg:path arg:key arg:default arguments arg arg arg arg Assign BoolOp While Assign Call If Compare Return return:yes Assign Call If Compare If BoolOp Compare Compare Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_is_all_finite",
    "source_code": "def _is_all_finite(grads):\n\n    def raw_values(g):\n        return g.values if isinstance(g, indexed_slices.IndexedSlices) else g\n    is_finite_per_grad = [math_ops.reduce_all(math_ops.is_finite(raw_values(g))) for g in grads if g is not None]\n    return math_ops.reduce_all(is_finite_per_grad)",
    "docstring": "Returns a scalar boolean tensor indicating if all gradients are finite.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\loss_scale_optimizer.py",
    "ast_data": "FunctionDef name:_is_all_finite arg:grads arguments arg FunctionDef name:raw_values arg:g arguments arg Return return:yes Call Assign Call Call Call Compare Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_iter_break_from_left_to_right",
    "source_code": "def _iter_break_from_left_to_right(self):\n    yield (IdentityTransform(), self)",
    "docstring": "Return an iterator breaking down this transform stack from left to right recursively. If self == ((A, N), A) then the result will be an iterator which yields I : ((A, N), A), followed by A : (N, A), followed by (A, N) : (A), but not ((A, N), A) : I. This is equivalent to flattening the stack then yielding `` where i=0..(n-1).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:_iter_break_from_left_to_right arg:self arguments arg Call"
  },
  {
    "library": "numpy",
    "name": "_to_bytes_or_str_array",
    "source_code": "def _to_bytes_or_str_array(result, output_dtype_like):\n    output_dtype_like = np.asarray(output_dtype_like)\n    if result.size == 0:\n        return result.astype(output_dtype_like.dtype)\n    ret = np.asarray(result.tolist())\n    if isinstance(output_dtype_like.dtype, np.dtypes.StringDType):\n        return ret.astype(type(output_dtype_like.dtype))\n    return ret.astype(type(output_dtype_like.dtype)(_get_num_chars(ret)))",
    "docstring": "Helper function to cast a result back into an array with the appropriate dtype if an object array must be used as an intermediary.",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\strings.py",
    "ast_data": "FunctionDef name:_to_bytes_or_str_array arg:result arg:output_dtype_like arguments arg arg Assign Call If Compare Return return:yes Call Assign Call Call If Call Return return:yes Call Call Return return:yes Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_gradient_edge",
    "source_code": "def get_gradient_edge(tensor: torch.Tensor) -> GradientEdge:\n    if not tensor.requires_grad:\n        raise RuntimeError('It is not possible to get the gradient edge for a Tensor that does not require gradients')\n    grad_fn = _get_grad_fn_or_grad_acc(tensor)\n    return GradientEdge(grad_fn, tensor.output_nr)",
    "docstring": "Get the gradient edge for computing the gradient of the given Tensor. In particular, it is equivalent to call ``.",
    "type": "function",
    "file_path": "pytorch\\torch\\autograd\\graph.py",
    "ast_data": "FunctionDef name:get_gradient_edge arg:tensor arguments arg If Raise Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_build_spec_constructor",
    "source_code": "def _build_spec_constructor(cls):\n    params = []\n    kind = tf_inspect.Parameter.POSITIONAL_OR_KEYWORD\n    for field in cls._tf_extension_type_fields():\n        params.append(tf_inspect.Parameter(field.name, kind))\n    signature = tf_inspect.Signature(params, return_annotation=cls.__name__)\n\n    def __init__(self, *args, **kwargs):\n        bound_args = signature.bind(*args, **kwargs)\n        bound_args.apply_defaults()\n        self.__dict__.update(bound_args.arguments)\n        self._tf_extension_type_convert_fields()\n        self.__validate__()\n    __init__.__signature__ = tf_inspect.Signature([tf_inspect.Parameter('self', tf_inspect.Parameter.POSITIONAL_OR_KEYWORD)] + params, return_annotation=cls)\n    cls.__init__ = __init__",
    "docstring": "Builds a constructor for ExtensionTypeSpec subclass .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type.py",
    "ast_data": "FunctionDef name:_build_spec_constructor arg:cls arguments arg Assign Assign For Call Call Call Assign Call FunctionDef name:__init__ arg:self arguments arg arg arg Assign Call Call Call Call Call Assign Call Call Assign"
  },
  {
    "library": "pandas",
    "name": "_is_numeric",
    "source_code": "@property\ndef _is_numeric(self) -> bool:\n    return pa.types.is_integer(self.pyarrow_dtype) or pa.types.is_floating(self.pyarrow_dtype) or pa.types.is_decimal(self.pyarrow_dtype)",
    "docstring": "Whether columns with this dtype should be considered numeric.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py",
    "ast_data": "FunctionDef name:_is_numeric arg:self arguments arg Return return:yes BoolOp Call Call Call"
  },
  {
    "library": "numpy",
    "name": "_preprocess_dtype",
    "source_code": "def _preprocess_dtype(dtype):\n    if isinstance(dtype, ma.dtype):\n        dtype = dtype.type\n    if isinstance(dtype, ndarray) or dtype not in allTypes.values():\n        raise _PreprocessDTypeError\n    return dtype",
    "docstring": "Preprocess dtype argument by: 1. fetching type from a data type 2. verifying that types are built-in NumPy dtypes",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\numerictypes.py",
    "ast_data": "FunctionDef name:_preprocess_dtype arg:dtype arguments arg If Call Assign If BoolOp Call Compare Call Raise Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "want_no_x_dim",
    "source_code": "@staticmethod\ndef want_no_x_dim(features: SIMDKernelFeatures) -> bool:\n    return features.get_reduction_hint() == ReductionHint.INNER and V.graph.sizevars.statically_known_geq(features.reduction_numel, 256)",
    "docstring": "Heuristic to decide if we should drop the X dimension from a persistent reduction kernel. So the [XBLOCK, RBLOCK] block becomes a [RBLOCK] block and XBLOCK is forced to be always 1. Strangely this is faster than a [1, RBLOCK] block in some cases.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\choices.py",
    "ast_data": "FunctionDef name:want_no_x_dim arg:features arguments arg Return return:yes BoolOp Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "update",
    "source_code": "def update(self, new_scale: Optional[Union[float, torch.Tensor]]=None) -> None:\n    if not self._enabled:\n        return\n    _scale, _growth_tracker = self._check_scale_growth_tracker('update')\n    if new_scale is not None:\n        if isinstance(new_scale, float):\n            self._scale.fill_(new_scale)\n        else:\n            reason = 'new_scale should be a float or a 1-element torch.cuda.FloatTensor or                     torch.FloatTensor with requires_grad=False.'\n            assert new_scale.device.type == self._device, reason\n            assert new_scale.numel() == 1, reason\n            assert new_scale.requires_grad is False, reason\n            self._scale.copy_(new_scale)\n    else:\n        found_infs = [found_inf.to(device=_scale.device, non_blocking=True) for state in self._per_optimizer_states.values() for found_inf in state['found_inf_per_device'].values()]\n        assert len(found_infs) > 0, 'No inf checks were recorded prior to update.'\n        found_inf_combined = found_infs[0]\n        if len(found_infs) > 1:\n            for i in range(1, len(found_infs)):\n                found_inf_combined += found_infs[i]\n        if _scale.device.type == 'cpu':\n            self._amp_update_scale_cpu_(found_inf_combined)\n        else:\n            torch._amp_update_scale_(self._scale, self._growth_tracker, found_inf_combined, self._growth_factor, self._backoff_factor, self._growth_interval)\n    self._per_optimizer_states = defaultdict(_refresh_per_optimizer_state)",
    "docstring": "Updates the scale factor. If any optimizer steps were skipped the scale is multiplied by `torch.Tensorupdate` has been invoked for all optimizers used this iteration.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\sharded_grad_scaler.py",
    "ast_data": "FunctionDef name:update arg:self arg:new_scale arguments arg arg If Return return:no Assign Call If Compare If Call Call Assign Compare Compare Call Compare Call Assign Call Call Call Compare Call Assign If Compare Call For Call Call If Compare Call Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "set_device",
    "source_code": "def set_device(device: _device_t) -> None:\n    pass",
    "docstring": "Sets the current device, in CPU we do nothing. N.B. This function only exists to facilitate device-agnostic code",
    "type": "function",
    "file_path": "pytorch\\torch\\cpu\\__init__.py",
    "ast_data": "FunctionDef name:set_device arg:device arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "covariance",
    "source_code": "def covariance(self, name='covariance'):\n    with self._name_scope(name):\n        return self._covariance()",
    "docstring": "Covariance. Covariance is (possibly) defined only for non-scalar-event distributions. For example, for a length-, vector-valued distribution, it is calculated as, where is a (batch of) matrix, , and denotes expectation. Alternatively, for non-vector, multivariate distributions (e.g., matrix-valued, Wishart), shall return a (batch of) matrices under some vectorization of the events, i.e., where is a (batch of) matrices, , and is some function mapping indices of this distribution's event dimensions to indices of a length- vector. Args: name: Python prepended to names of ops created by this function. Returns: covariance: Floating-point with shape where the first dimensions are batch coordinates and .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:covariance arg:self arg:name arguments arg arg With Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_max_tuning_iterations",
    "source_code": "def get_max_tuning_iterations() -> int:\n    return torch._C._cuda_tunableop_get_max_tuning_iterations()",
    "docstring": "Get max iterations to spend tuning a given solution.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\tunable.py",
    "ast_data": "FunctionDef name:get_max_tuning_iterations arguments Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "find_result_type",
    "source_code": "def find_result_type(left_dtype: DtypeObj, right: Any) -> DtypeObj:\n    new_dtype: DtypeObj\n    if isinstance(left_dtype, np.dtype) and left_dtype.kind in 'iuc' and (lib.is_integer(right) or lib.is_float(right)):\n        if lib.is_float(right) and right.is_integer() and (left_dtype.kind != 'f'):\n            right = int(right)\n        if isinstance(right, int) and (not isinstance(right, np.integer)):\n            right_dtype = np.min_scalar_type(right)\n            if right == 0:\n                right = left_dtype\n            elif not np.issubdtype(left_dtype, np.unsignedinteger) and 0 < right <= np.iinfo(right_dtype).max:\n                right = np.dtype(f'i{right_dtype.itemsize}')\n            else:\n                right = right_dtype\n        new_dtype = np.result_type(left_dtype, right)\n    elif is_valid_na_for_dtype(right, left_dtype):\n        new_dtype = ensure_dtype_can_hold_na(left_dtype)\n    else:\n        dtype, _ = infer_dtype_from(right)\n        new_dtype = find_common_type([left_dtype, dtype])\n    return new_dtype",
    "docstring": "Find the type/dtype for the result of an operation between objects. This is similar to find_common_type, but looks at the right object instead of just its dtype. This can be useful in particular when the right object does not have a . Parameters ---------- left_dtype : np.dtype or ExtensionDtype right : Any Returns ------- np.dtype or ExtensionDtype See also -------- find_common_type numpy.result_type",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\cast.py",
    "ast_data": "FunctionDef name:find_result_type arg:left_dtype arg:right arguments arg arg If BoolOp Call Compare BoolOp Call Call If BoolOp Call Call Compare Assign Call If BoolOp Call Call Assign Call If Compare Assign If BoolOp Call Compare Call Assign Call Assign Assign Call If Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "paginator_number",
    "source_code": "@register.simple_tag\ndef paginator_number(cl, i):\n    if i == cl.paginator.ELLIPSIS:\n        return format_html('{} ', cl.paginator.ELLIPSIS)\n    elif i == cl.page_num:\n        return format_html('<span class=\"this-page\">{}</span> ', i)\n    else:\n        return format_html('<a role=\"button\" href=\"{}\"{}>{}</a> ', cl.get_query_string({PAGE_VAR: i}), mark_safe(' class=\"end\"' if i == cl.paginator.num_pages else ''), i)",
    "docstring": "Generate an individual page index link in a paginated list.",
    "type": "function",
    "file_path": "django\\django\\contrib\\admin\\templatetags\\admin_list.py",
    "ast_data": "FunctionDef name:paginator_number arg:cl arg:i arguments arg arg If Compare Return return:yes Call If Compare Return return:yes Call Return return:yes Call Call Call Compare"
  },
  {
    "library": "matplotlib",
    "name": "_get_interpolating_points",
    "source_code": "@classmethod\ndef _get_interpolating_points(cls, t, f1, f2, idx):\n    im1 = max(idx - 1, 0)\n    t_values = t[im1:idx + 1]\n    diff_values = f1[im1:idx + 1] - f2[im1:idx + 1]\n    f1_values = f1[im1:idx + 1]\n    if len(diff_values) == 2:\n        if np.ma.is_masked(diff_values[1]):\n            return (t[im1], f1[im1])\n        elif np.ma.is_masked(diff_values[0]):\n            return (t[idx], f1[idx])\n    diff_root_t = cls._get_diff_root(0, diff_values, t_values)\n    diff_root_f = cls._get_diff_root(diff_root_t, t_values, f1_values)\n    return (diff_root_t, diff_root_f)",
    "docstring": "Calculate interpolating points.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:_get_interpolating_points arg:cls arg:t arg:f1 arg:f2 arg:idx arguments arg arg arg arg arg Assign Call Assign Assign Assign If Compare Call If Call Return return:yes If Call Return return:yes Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "inspect_excel_format",
    "source_code": "@doc(storage_options=_shared_docs['storage_options'])\ndef inspect_excel_format(content_or_path: FilePath | ReadBuffer[bytes], storage_options: StorageOptions | None=None) -> str | None:\n    with get_handle(content_or_path, 'rb', storage_options=storage_options, is_text=False) as handle:\n        stream = handle.handle\n        stream.seek(0)\n        buf = stream.read(PEEK_SIZE)\n        if buf is None:\n            raise ValueError('stream is empty')\n        assert isinstance(buf, bytes)\n        peek = buf\n        stream.seek(0)\n        if any((peek.startswith(sig) for sig in XLS_SIGNATURES)):\n            return 'xls'\n        elif not peek.startswith(ZIP_SIGNATURE):\n            return None\n        with zipfile.ZipFile(stream) as zf:\n            component_names = {name.replace('\\\\', '/').lower() for name in zf.namelist()}\n        if 'xl/workbook.xml' in component_names:\n            return 'xlsx'\n        if 'xl/workbook.bin' in component_names:\n            return 'xlsb'\n        if 'content.xml' in component_names:\n            return 'ods'\n        return 'zip'",
    "docstring": "Inspect the path or content of an excel file and get its format. Adopted from xlrd: Parameters ---------- content_or_path : str or file-like object Path to file or content of file to inspect. May be a URL. {storage_options} Returns ------- str or None Format of file if it can be determined. Raises ------ ValueError If resulting stream is empty. BadZipFile If resulting stream does not have an XLS signature and is not a valid zipfile.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\excel\\_base.py",
    "ast_data": "FunctionDef name:inspect_excel_format arg:content_or_path arg:storage_options arguments arg arg With Call Assign Call Assign Call If Compare Raise Call Call Assign Call If Call Call Return return:yes If Call Return return:no With Call Assign Call Call Call If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__iadd__",
    "source_code": "def __iadd__(self, y):\n    return self.__wrapped__ + y",
    "docstring": "Avoid running self.__wrapped__ += y, which mutates .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\data_structures.py",
    "ast_data": "FunctionDef name:__iadd__ arg:self arg:y arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "concat",
    "source_code": "def concat(*combined):\n    result = []\n    for one in combined:\n        result += one\n    return result",
    "docstring": "Concats combinations.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\combinations.py",
    "ast_data": "FunctionDef name:concat arguments arg Assign For Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_lines",
    "source_code": "def get_lines(self):\n    return cbook.silent_list('Line2D', self.lines)",
    "docstring": "Return a list of lines contained by the Axes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:get_lines arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_get_fsdp_root_states",
    "source_code": "def _get_fsdp_root_states(module: nn.Module) -> list[_FSDPState]:\n    fsdp_root_states, _ = _get_fsdp_root_states_with_modules(module)\n    return fsdp_root_states",
    "docstring": "See :func:.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_runtime_utils.py",
    "ast_data": "FunctionDef name:_get_fsdp_root_states arg:module arguments arg Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_rendezvous_handler",
    "source_code": "def get_rendezvous_handler(params: RendezvousParameters) -> RendezvousHandler:\n    return handler_registry.create_handler(params)",
    "docstring": "Obtain a reference to a :py:class. Custom rendezvous handlers can be registered by :: from torch.distributed.elastic.rendezvous import rendezvous_handler_registry from torch.distributed.elastic.rendezvous.registry import get_rendezvous_handler def create_my_rdzv(params: RendezvousParameters): return MyCustomRdzv(params) rendezvous_handler_registry.register(\"my_rdzv_backend_name\", create_my_rdzv) my_rdzv_handler = get_rendezvous_handler( \"my_rdzv_backend_name\", RendezvousParameters )",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\registry.py",
    "ast_data": "FunctionDef name:get_rendezvous_handler arg:params arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "deregister_handle",
    "source_code": "def deregister_handle(self) -> None:\n    assert self.handle is not None, 'Cannot deregister a handle that is not registered.'\n    torch._C._gds_deregister_handle(self.handle)\n    self.handle = None",
    "docstring": "Deregisters file descriptor from cuFile Driver. This is a wrapper around ``.",
    "type": "method",
    "file_path": "pytorch\\torch\\cuda\\gds.py",
    "ast_data": "FunctionDef name:deregister_handle arg:self arguments arg Compare Call Assign"
  },
  {
    "library": "authlib",
    "name": "request",
    "source_code": "async def request(self, method, url, withhold_token=False, auth=USE_CLIENT_DEFAULT, **kwargs) -> Response:\n    if not withhold_token and auth is USE_CLIENT_DEFAULT:\n        if not self.token or self.token.is_expired():\n            await self.refresh_token()\n        auth = self.token_auth\n    return await super().request(method, url, auth=auth, **kwargs)",
    "docstring": "Send request with auto refresh token feature.",
    "type": "method",
    "file_path": "authlib\\authlib\\integrations\\httpx_client\\assertion_client.py",
    "ast_data": "AsyncFunctionDef name:request arg:self arg:method arg:url arg:withhold_token arg:auth arguments arg arg arg arg arg arg If BoolOp Compare If BoolOp Call Call Assign Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "is_stationary",
    "source_code": "def is_stationary(self):\n    return self.k1.is_stationary() and self.k2.is_stationary()",
    "docstring": "Returns whether the kernel is stationary.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:is_stationary arg:self arguments arg Return return:yes BoolOp Call Call"
  },
  {
    "library": "numpy",
    "name": "iscomplex",
    "source_code": "@array_function_dispatch(_is_type_dispatcher)\ndef iscomplex(x):\n    ax = asanyarray(x)\n    if issubclass(ax.dtype.type, _nx.complexfloating):\n        return ax.imag != 0\n    res = zeros(ax.shape, bool)\n    return res[()]",
    "docstring": "Returns a bool array, where True if input element is complex. What is tested is whether the input has a non-zero imaginary part, not if the input type is complex. Parameters ---------- x : array_like Input array. Returns ------- out : ndarray of bools Output array. See Also -------- isreal iscomplexobj : Return True if x is a complex type or an array of complex numbers. Examples -------- >>> import numpy as np >>> np.iscomplex([1+1j, 1+0j, 4.5, 3, 2, 2j]) array([ True, False, False, False, False, True])",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_type_check_impl.py",
    "ast_data": "FunctionDef name:iscomplex arg:x arguments arg Assign Call If Call Return return:yes Compare Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "isgenerator",
    "source_code": "def isgenerator(object):\n    return _inspect.isgenerator(tf_decorator.unwrap(object)[1])",
    "docstring": "TFDecorator-aware replacement for inspect.isgenerator.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\tf_inspect.py",
    "ast_data": "FunctionDef name:isgenerator arg:object arguments arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "get_lines",
    "source_code": "@abstractmethod\ndef get_lines(self) -> list[str]:\n    pass",
    "docstring": "Product in a form of list of lines (strings).",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:get_lines arg:self arguments arg"
  },
  {
    "library": "sphinx",
    "name": "SphinxBaseReader",
    "source_code": "class SphinxBaseReader(standalone.Reader):\n    transforms: list[type[Transform]] = []\n\n    def __init__(self, *args: Any, **kwargs: Any) -> None:\n        from sphinx.application import Sphinx\n        if len(args) > 0 and isinstance(args[0], Sphinx):\n            self._app = args[0]\n            self._env = self._app.env\n            args = args[1:]\n        super().__init__(*args, **kwargs)\n\n    def setup(self, app: Sphinx) -> None:\n        self._app = app\n        self._env = app.env\n\n    def get_transforms(self) -> list[type[Transform]]:\n        transforms = super().get_transforms() + self.transforms\n        unused = [DanglingReferences]\n        for transform in unused:\n            if transform in transforms:\n                transforms.remove(transform)\n        return transforms\n\n    def new_document(self) -> nodes.document:\n        document = super().new_document()\n        document.transformer = SphinxTransformer(document)\n        document.transformer.set_environment(self.settings.env)\n        reporter = document.reporter\n        document.reporter = LoggingReporter.from_reporter(reporter)\n        return document",
    "docstring": "A base class of readers for Sphinx. This replaces reporter by Sphinx's on generating document.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\io.py",
    "ast_data": "ClassDef name:SphinxBaseReader FunctionDef name:__init__ arg:self arguments arg arg arg If BoolOp Compare Call Call Assign Assign Assign Call Call FunctionDef name:setup arg:self arg:app arguments arg arg Assign Assign FunctionDef name:get_transforms arg:self arguments arg Assign Call Call Assign For If Compare Call Return return:yes FunctionDef name:new_document arg:self arguments arg Assign Call Call Assign Call Call Assign Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_resample",
    "source_code": "def set_resample(self, v):\n    v = mpl._val_or_rc(v, 'image.resample')\n    self._resample = v\n    self.stale = True",
    "docstring": "Set whether image resampling is used. Parameters ---------- v : bool, default: :rc:",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\image.py",
    "ast_data": "FunctionDef name:set_resample arg:self arg:v arguments arg arg Assign Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "SavedModelSplitter",
    "source_code": "class SavedModelSplitter(split.ComposableSplitter):\n\n    def build_chunks(self):\n        if not isinstance(self._proto, saved_model_pb2.SavedModel):\n            raise TypeError(f'SavedModelSplitter can only split SavedModel protos. Got {type(self._proto)}.')\n        if self._proto.ByteSize() >= constants.max_size():\n            graph_def = self._proto.meta_graphs[0].graph_def\n            graph_def_fields = ['meta_graphs', 0, 'graph_def']\n            split_graph_def.GraphDefSplitter(self._proto.meta_graphs[0].graph_def, parent_splitter=self, fields_in_parent=graph_def_fields).build_chunks()\n        if self._proto.ByteSize() >= constants.max_size():\n            self.add_chunk(graph_def, graph_def_fields, index=1)\n            self._proto.meta_graphs[0].ClearField('graph_def')",
    "docstring": "Splits a SavedModel proto into chunks of size < 2GB.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\tools\\proto_splitter\\python\\saved_model.py",
    "ast_data": "ClassDef name:SavedModelSplitter FunctionDef name:build_chunks arg:self arguments arg If Call Raise Call Call If Compare Call Call Assign Assign Call Call If Compare Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_get_blind_start",
    "source_code": "def _get_blind_start(shape):\n    m, n = shape\n    x0 = np.ones(n)\n    y0 = np.zeros(m)\n    z0 = np.ones(n)\n    tau0 = 1\n    kappa0 = 1\n    return (x0, y0, z0, tau0, kappa0)",
    "docstring": "Return the starting point from [4] 4.4 References ---------- .. [4] Andersen, Erling D., and Knud D. Andersen. \"The MOSEK interior point optimizer for linear programming: an implementation of the homogeneous algorithm.\" High performance optimization. Springer US, 2000. 197-232.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_linprog_ip.py",
    "ast_data": "FunctionDef name:_get_blind_start arg:shape arguments arg Assign Assign Call Assign Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_add_summary_step_transformer",
    "source_code": "def _add_summary_step_transformer(parent, node, full_name, name, logs):\n    for keyword_arg in node.keywords:\n        if keyword_arg.arg == 'step':\n            return node\n    default_value = 'tf.compat.v1.train.get_or_create_global_step()'\n    ast_value = ast.parse(default_value).body[0].value\n    del ast_value.lineno\n    node.keywords.append(ast.keyword(arg='step', value=ast_value))\n    logs.append((ast_edits.WARNING, node.lineno, node.col_offset, \"Summary API writing function %s now requires a 'step' argument; inserting default of %s.\" % (full_name or name, default_value)))\n    return node",
    "docstring": "Adds a step argument to the summary API call if not specified. The inserted argument value is tf.compat.v1.train.get_or_create_global_step().",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\tf_upgrade_v2.py",
    "ast_data": "FunctionDef name:_add_summary_step_transformer arg:parent arg:node arg:full_name arg:name arg:logs arguments arg arg arg arg arg For If Compare Return return:yes Assign Assign Call Call Call Call BoolOp Return return:yes"
  },
  {
    "library": "django",
    "name": "append",
    "source_code": "def append(self, val):\n    self[len(self):] = [val]",
    "docstring": "Standard list append method",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\mutable_list.py",
    "ast_data": "FunctionDef name:append arg:self arg:val arguments arg arg Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_check_external_modification",
    "source_code": "def _check_external_modification(self):\n    if self._external_modification or self._non_append_mutation:\n        return\n    if self._storage != self._last_wrapped_list_snapshot:\n        self._external_modification = True\n        self._last_wrapped_list_snapshot = None",
    "docstring": "Checks for any changes to the wrapped list not through the wrapper.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\data_structures.py",
    "ast_data": "FunctionDef name:_check_external_modification arg:self arguments arg If BoolOp Return return:no If Compare Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, graph, control_inputs) -> None:\n    self._graph = graph\n    if control_inputs is None:\n        self._control_inputs_val = []\n        self._new_stack = True\n    else:\n        self._control_inputs_val = control_inputs\n        self._new_stack = False\n    self._seen_nodes = set()\n    self._old_stack = None\n    self._old_control_flow_context = None",
    "docstring": "Create a new . A is the context manager for blocks. These normally nest, as described in the documentation for . The argument list control dependencies that must be added to the current set of control dependencies. Because of uniquification the set can be empty even if the caller passed a list of ops. The special value indicates that we want to start a new empty set of control dependencies instead of extending the current set. In that case we also clear the current control flow context, which is an additional mechanism to add control dependencies. Args: graph: The graph that this controller is managing. control_inputs: List of ops to use as control inputs in addition to the current control dependencies. None to indicate that the dependencies should be cleared.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:graph arg:control_inputs arguments arg arg arg Assign If Compare Assign Assign Assign Assign Assign Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "outer_grad_state",
    "source_code": "@property\ndef outer_grad_state(self):\n    return self._outer_grad_state",
    "docstring": "The grad loop state for outer loop.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_state.py",
    "ast_data": "FunctionDef name:outer_grad_state arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_estimate_wishart_spherical",
    "source_code": "def _estimate_wishart_spherical(self, nk, xk, sk):\n    _, n_features = xk.shape\n    self.degrees_of_freedom_ = self.degrees_of_freedom_prior_ + nk\n    diff = xk - self.mean_prior_\n    self.covariances_ = self.covariance_prior_ + nk * (sk + self.mean_precision_prior_ / self.mean_precision_ * np.mean(np.square(diff), 1))\n    self.covariances_ /= self.degrees_of_freedom_",
    "docstring": "Estimate the spherical Wishart distribution parameters. Parameters ---------- X : array-like of shape (n_samples, n_features) nk : array-like of shape (n_components,) xk : array-like of shape (n_components, n_features) sk : array-like of shape (n_components,)",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\mixture\\_bayesian_mixture.py",
    "ast_data": "FunctionDef name:_estimate_wishart_spherical arg:self arg:nk arg:xk arg:sk arguments arg arg arg arg Assign Assign Assign Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "custom_fwd",
    "source_code": "@deprecated(\"`torch.cuda.amp.custom_fwd(args...)` is deprecated. Please use `torch.amp.custom_fwd(args..., device_type='cuda')` instead.\", category=FutureWarning)\ndef custom_fwd(fwd=None, *, cast_inputs=None):\n    return functools.partial(torch.amp.custom_fwd, device_type='cuda')(fwd=fwd, cast_inputs=cast_inputs)",
    "docstring": "`` instead.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\amp\\autocast_mode.py",
    "ast_data": "FunctionDef name:custom_fwd arg:fwd arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_save_assets",
    "source_code": "def _maybe_save_assets(write_fn, assets_to_add=None):\n    asset_filename_map = {}\n    if assets_to_add is None:\n        tf_logging.info('No assets to save.')\n        return asset_filename_map\n    for asset_tensor in assets_to_add:\n        asset_source_filepath = _asset_path_from_tensor(asset_tensor)\n        if not asset_source_filepath:\n            raise ValueError(f'Asset filepath tensor {asset_tensor} in is invalid.')\n        asset_filename = get_asset_filename_to_add(asset_source_filepath, asset_filename_map)\n        write_fn(asset_filename, asset_tensor)\n        asset_filename_map[asset_filename] = asset_source_filepath\n    tf_logging.info('Assets added to graph.')\n    return asset_filename_map",
    "docstring": "Saves assets to the meta graph. Args: write_fn: A function callback that writes assets into meta graph. assets_to_add: The list where the asset paths are setup. Returns: A dict of asset basenames for saving to the original full path to the asset. Raises: ValueError: Indicating an invalid filepath tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\builder_impl.py",
    "ast_data": "FunctionDef name:_maybe_save_assets arg:write_fn arg:assets_to_add arguments arg arg Assign If Compare Call Return return:yes For Assign Call If Raise Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "find_autosummary_in_files",
    "source_code": "def find_autosummary_in_files(filenames: Sequence[str | os.PathLike[str]]) -> list[AutosummaryEntry]:\n    documented: list[AutosummaryEntry] = []\n    for filename in filenames:\n        with open(filename, encoding='utf-8', errors='ignore') as f:\n            lines = f.read().splitlines()\n        documented.extend(find_autosummary_in_lines(lines, filename=filename))\n    return documented",
    "docstring": "Find out what items are documented in source/*.rst. See .",
    "type": "function",
    "file_path": "sphinx\\sphinx\\ext\\autosummary\\generate.py",
    "ast_data": "FunctionDef name:find_autosummary_in_files arg:filenames arguments arg For With Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_get_leaves",
    "source_code": "def _get_leaves(self):\n    leaf_ptr = self.dummy_leaf_.next_leaf_\n    leaves = []\n    while leaf_ptr is not None:\n        leaves.append(leaf_ptr)\n        leaf_ptr = leaf_ptr.next_leaf_\n    return leaves",
    "docstring": "Retrieve the leaves of the CF Node. Returns ------- leaves : list of shape (n_leaves,) List of the leaf nodes.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_birch.py",
    "ast_data": "FunctionDef name:_get_leaves arg:self arguments arg Assign Assign While Compare Call Assign Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "assign_variables",
    "source_code": "def assign_variables(self, data=None, variables={}):\n    x = variables.get('x', None)\n    y = variables.get('y', None)\n    if x is None and y is None:\n        self.input_format = 'wide'\n        frame, names = self._assign_variables_wideform(data, **variables)\n    else:\n        self.input_format = 'long'\n        plot_data = PlotData(data, variables)\n        frame = plot_data.frame\n        names = plot_data.names\n    self.plot_data = frame\n    self.variables = names\n    self.var_types = {v: variable_type(frame[v], boolean_type='numeric' if v in 'xy' else 'categorical') for v in names}\n    return self",
    "docstring": "Define plot variables, optionally using lookup from .",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_base.py",
    "ast_data": "FunctionDef name:assign_variables arg:self arg:data arg:variables arguments arg arg arg Assign Call Assign Call If BoolOp Compare Compare Assign Assign Call Assign Assign Call Assign Assign Assign Assign Assign Call Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "wrapper",
    "source_code": "def wrapper(*args, **kwargs):\n    has_old_names = False\n    for old_name, new_name in _RENAMED_ARGUMENTS.items():\n        if old_name in kwargs:\n            has_old_names = True\n            value = kwargs.pop(old_name)\n            kwargs[new_name] = value\n    if has_old_names:\n        _logging.warning('Use of the keyword argument names (flag_name, default_value, docstring) is deprecated, please use (name, default, help) instead.')\n    return original_function(*args, **kwargs)",
    "docstring": "Wrapper function that turns old keyword names to new ones.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\platform\\flags.py",
    "ast_data": "FunctionDef name:wrapper arguments arg arg Assign For Call If Compare Assign Assign Call Assign If Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "_auto_version_conversion",
    "source_code": "def _auto_version_conversion(self, *args: list[onnx.ModelProto], target_ir_version: Optional[int]=None, target_opset_version: Optional[int]=None) -> list[onnx.ModelProto]:\n    if target_ir_version is None:\n        target_ir_version = 9\n    if target_opset_version is None:\n        target_opset_version = 17\n    op_list = []\n    for op in args:\n        op = super()._onnx_version_conversion(op, target_ir_version=target_ir_version, target_opset_version=target_opset_version)\n        op_list.append(op)\n    return op_list",
    "docstring": "Automatic conversion of the model's IR/OPSET version to the given target version. If is not provided, the model is converted to 9 by default. If is not provided, the model is converted to 17 by default. Args: args: List of operations to convert. target_ir_version: The target IR version to convert to. target_opset_version: The target OPSET version to convert to.",
    "type": "method",
    "file_path": "kornia\\kornia\\onnx\\sequential.py",
    "ast_data": "FunctionDef name:_auto_version_conversion arg:self arguments arg arg arg arg If Compare Assign If Compare Assign Assign For Assign Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "logpmf",
    "source_code": "def logpmf(self, k, *args, **kwds):\n    args, loc, _ = self._parse_args(*args, **kwds)\n    k, loc = map(asarray, (k, loc))\n    args = tuple(map(asarray, args))\n    _a, _b = self._get_support(*args)\n    k = asarray(k - loc)\n    cond0 = self._argcheck(*args)\n    cond1 = (k >= _a) & (k <= _b)\n    if not isinstance(self, rv_sample):\n        cond1 = cond1 & self._nonzero(k, *args)\n    cond = cond0 & cond1\n    output = empty(shape(cond), 'd')\n    output.fill(-inf)\n    place(output, 1 - cond0 + np.isnan(k), self.badvalue)\n    if np.any(cond):\n        goodargs = argsreduce(cond, *(k,) + args)\n        place(output, cond, self._logpmf(*goodargs))\n    if output.ndim == 0:\n        return output[()]\n    return output",
    "docstring": "Log of the probability mass function at k of the given RV. Parameters ---------- k : array_like Quantiles. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter. Default is 0. Returns ------- logpmf : array_like Log of the probability mass function evaluated at k.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:logpmf arg:self arg:k arguments arg arg arg arg Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Assign Compare Compare If Call Assign Call Assign Assign Call Call Call Call Call If Call Assign Call Call Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_patch_model_state_dict",
    "source_code": "@no_type_check\ndef _patch_model_state_dict(model: nn.Module, *, options: Optional[StateDictOptions]=None) -> None:\n    _state_dict_call = functools.partial(get_model_state_dict, model=model, options=options)\n\n    def state_dict_call():\n        return _state_dict_call()\n    model.state_dict = state_dict_call\n    _load_state_dict_call = functools.partial(set_model_state_dict, model=model, options=options)\n\n    def load_state_dict_call(state_dict: dict[str, Any]):\n        _load_state_dict_call(model_state_dict=state_dict)\n    model.load_state_dict = load_state_dict_call\n    _patched_state_dict.add(state_dict_call)\n    _patched_state_dict.add(load_state_dict_call)",
    "docstring": "Patch the `StateDictOptions` for the details. Returns: None",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\state_dict.py",
    "ast_data": "FunctionDef name:_patch_model_state_dict arg:model arguments arg arg Assign Call FunctionDef name:state_dict_call arguments Return return:yes Call Assign Assign Call FunctionDef name:load_state_dict_call arg:state_dict arguments arg Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "getfutureimports",
    "source_code": "def getfutureimports(entity):\n    if not (tf_inspect.isfunction(entity) or tf_inspect.ismethod(entity)):\n        return tuple()\n    return tuple(sorted((name for name, value in entity.__globals__.items() if getattr(value, '__module__', None) == '__future__')))",
    "docstring": "Detects what future imports are necessary to safely execute entity source. Args: entity: Any object Returns: A tuple of future strings",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\inspect_utils.py",
    "ast_data": "FunctionDef name:getfutureimports arg:entity arguments arg If BoolOp Call Call Return return:yes Call Return return:yes Call Call Call Compare Call"
  },
  {
    "library": "matplotlib",
    "name": "remove_callback",
    "source_code": "def remove_callback(self, func, *args, **kwargs):\n    if args or kwargs:\n        _api.warn_deprecated('3.1', message='In a future version, Timer.remove_callback will not take *args, **kwargs anymore, but remove all callbacks where the callable matches; to keep a specific callback removable by itself, pass it to add_callback as a functools.partial object.')\n        self.callbacks.remove((func, args, kwargs))\n    else:\n        funcs = [c[0] for c in self.callbacks]\n        if func in funcs:\n            self.callbacks.pop(funcs.index(func))",
    "docstring": "Remove *func* from list of callbacks. *args* and *kwargs* are optional and used to distinguish between copies of the same function registered to be called with different arguments. This behavior is deprecated. In the future, `add_callbackfunctools.partial` object.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:remove_callback arg:self arg:func arguments arg arg arg arg If BoolOp Call Call Assign If Compare Call Call"
  },
  {
    "library": "authlib",
    "name": "scope_to_list",
    "source_code": "def scope_to_list(scope):\n    if isinstance(scope, (tuple, list, set)):\n        return [to_unicode(s) for s in scope]\n    elif scope is None:\n        return None\n    return scope.strip().split()",
    "docstring": "Convert a space separated string to a list of scopes.",
    "type": "function",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\util.py",
    "ast_data": "FunctionDef name:scope_to_list arg:scope arguments arg If Call Return return:yes Call If Compare Return return:no Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_run_node_and_set_meta",
    "source_code": "def _run_node_and_set_meta(self, node) -> Any:\n    out = super().run_node(node)\n    self.env[node] = out\n    node.meta.update(((k, v) for k, v in fx_traceback.get_current_meta().items() if k not in node.meta))\n    node.meta['val'] = proxy_tensor.extract_val(out)\n    return out",
    "docstring": "Run node and set meta according to . This should be used on new nodes or nodes that have been modified. By default does not update . Set to the current meta, except for , which is recomputed.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\type_promotion.py",
    "ast_data": "FunctionDef name:_run_node_and_set_meta arg:self arg:node arguments arg arg Assign Call Call Assign Call Call Call Compare Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_distance",
    "source_code": "def get_distance(self, f, value, lookup_type):\n    raise NotImplementedError('Distance operations not available on this spatial backend.')",
    "docstring": "Return the distance parameters for the given geometry field, lookup value, and lookup type.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:get_distance arg:self arg:f arg:value arg:lookup_type arguments arg arg arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "empty_cache",
    "source_code": "def empty_cache() -> None:\n    if is_initialized():\n        torch._C._xpu_emptyCache()",
    "docstring": "Release all unoccupied cached memory currently held by the caching allocator so that those can be used in other XPU application. .. note:: :func: doesn't increase the amount of XPU memory available for PyTorch. However, it may help reduce fragmentation of XPU memory in certain cases.",
    "type": "function",
    "file_path": "pytorch\\torch\\xpu\\memory.py",
    "ast_data": "FunctionDef name:empty_cache arguments If Call Call"
  },
  {
    "library": "numpy",
    "name": "identity",
    "source_code": "def identity(n, dtype=None):\n    a = array([1] + n * [0], dtype=dtype)\n    b = empty((n, n), dtype=dtype)\n    b.flat = a\n    return b",
    "docstring": "Returns the square identity matrix of given size. Parameters ---------- n : int Size of the returned identity matrix. dtype : data-type, optional Data-type of the output. Defaults to `nn` matrix with its main diagonal set to one, and all other elements zero. See Also -------- numpy.identity : Equivalent array function. matlib.eye : More general matrix identity function. Examples -------- >>> import numpy.matlib >>> np.matlib.identity(3, dtype=int) matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]])",
    "type": "function",
    "file_path": "numpy\\numpy\\matlib.py",
    "ast_data": "FunctionDef name:identity arg:n arg:dtype arguments arg arg Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "feature_alpha_dropout",
    "source_code": "def feature_alpha_dropout(input: Tensor, p: float=0.5, training: bool=False, inplace: bool=False) -> Tensor:\n    if has_torch_function_unary(input):\n        return handle_torch_function(feature_alpha_dropout, (input,), input, p=p, training=training, inplace=inplace)\n    if p < 0.0 or p > 1.0:\n        raise ValueError(f'dropout probability has to be between 0 and 1, but got {p}')\n    return _VF.feature_alpha_dropout_(input, p, training) if inplace else _VF.feature_alpha_dropout(input, p, training)",
    "docstring": "Randomly masks out entire channels (a channel is a feature map). For example, the :math:-th channel of the :math:-th sample in the batch input is a tensor :math: of the input tensor. Instead of setting activations to zero, as in regular Dropout, the activations are set to the negative saturation value of the SELU activation function. Each element will be masked independently on every forward call with probability :attr: using samples from a Bernoulli distribution. The elements to be masked are randomized on every forward call, and scaled and shifted to maintain zero mean and unit variance. See :class: for details. Args: p: dropout probability of a channel to be zeroed. Default: 0.5 training: apply dropout if is ``",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:feature_alpha_dropout arg:input arg:p arg:training arg:inplace arguments arg arg arg arg If Call Return return:yes Call If BoolOp Compare Compare Raise Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_wrap_jagged_dims",
    "source_code": "def _wrap_jagged_dims(ndim, dims, op_name, ragged_idx=1):\n    from torch._prims_common import canonicalize_dims\n    assert isinstance(dims, (tuple, list)), f'_wrap_jagged_dims(): cannot iterate over dimensions of type {type(dims)}'\n    wrapped_dims = [canonicalize_dims(ndim, d) for d in dims]\n    operate_on_batch = 0 in wrapped_dims\n    operate_on_ragged = ragged_idx in wrapped_dims\n    operate_on_non_batch = any((d != 0 and d != ragged_idx for d in wrapped_dims))\n    outer_to_inner_dim = tuple(dict.fromkeys((_outer_to_inner_dim(ndim, d, ragged_idx) for d in wrapped_dims)))\n    return (outer_to_inner_dim, operate_on_batch, operate_on_ragged, operate_on_non_batch)",
    "docstring": "For NestedTensor operators, wraps dimensions to non-negative values, and returns metadata related to reduction dimension(s).",
    "type": "function",
    "file_path": "pytorch\\torch\\nested\\_internal\\ops.py",
    "ast_data": "FunctionDef name:_wrap_jagged_dims arg:ndim arg:dims arg:op_name arg:ragged_idx arguments arg arg arg arg Call Call Assign Call Assign Compare Assign Compare Assign Call BoolOp Compare Compare Assign Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_check_layout_engines_compat",
    "source_code": "def _check_layout_engines_compat(self, old, new):\n    if old is None or new is None:\n        return True\n    if old.colorbar_gridspec == new.colorbar_gridspec:\n        return True\n    for ax in self.axes:\n        if hasattr(ax, '_colorbar'):\n            return False\n    return True",
    "docstring": "Helper for set_layout engine If the figure has used the old engine and added a colorbar then the value of colorbar_gridspec must be the same on the new engine.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:_check_layout_engines_compat arg:self arg:old arg:new arguments arg arg arg If BoolOp Compare Compare Return return:yes If Compare Return return:yes For If Call Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "add_tool",
    "source_code": "def add_tool(self, name, tool, *args, **kwargs):\n    tool_cls = backend_tools._find_tool_class(type(self.canvas), tool)\n    if not tool_cls:\n        raise ValueError('Impossible to find class for %s' % str(tool))\n    if name in self._tools:\n        _api.warn_external('A \"Tool class\" with the same name already exists, not added')\n        return self._tools[name]\n    tool_obj = tool_cls(self, name, *args, **kwargs)\n    self._tools[name] = tool_obj\n    if tool_obj.default_keymap is not None:\n        self.update_keymap(name, tool_obj.default_keymap)\n    if isinstance(tool_obj, backend_tools.ToolToggleBase):\n        if tool_obj.radio_group is None:\n            self._toggled.setdefault(None, set())\n        else:\n            self._toggled.setdefault(tool_obj.radio_group, None)\n        if tool_obj.toggled:\n            self._handle_toggle(tool_obj, None, None)\n    tool_obj.set_figure(self.figure)\n    event = ToolEvent('tool_added_event', self, tool_obj)\n    self._callbacks.process(event.name, event)\n    return tool_obj",
    "docstring": "Add *tool* to . If successful, adds a new event `` is the *name* of the tool; the event is fired every time the tool is triggered. Parameters ---------- name : str Name of the tool, treated as the ID, has to be unique. tool : type Class of the tool to be added. A subclass will be used instead if one was registered for the current canvas class. *args, **kwargs Passed to the *tool*'s constructor. See Also -------- matplotlib.backend_tools.ToolBase : The base class for tools.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_managers.py",
    "ast_data": "FunctionDef name:add_tool arg:self arg:name arg:tool arguments arg arg arg arg arg Assign Call Call If Raise Call Call If Compare Call Return return:yes Assign Call Assign If Compare Call If Call If Compare Call Call Call If Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_create_or_get_tensor_values_cache",
    "source_code": "def _create_or_get_tensor_values_cache(self, cache_name, graph, shape=None, dtype=dtypes.float32):\n    if graph is None:\n        raise ValueError('Invalid graph.')\n    graph_cache_var = self._cache_variable_for_graph(graph)\n    if cache_name not in graph_cache_var:\n        if shape is None:\n            raise ValueError('shape must be provided at cache creation.')\n        if dtype.is_integer:\n            init_val = int(_COMPACT_TRACE_ENTRY_INIT_VALUE)\n        else:\n            init_val = _COMPACT_TRACE_ENTRY_INIT_VALUE\n        with graph.as_default() as g, g.name_scope(None):\n            graph_cache_var[cache_name] = variable_scope.get_variable(_TT_SNAPSHOT + '_' + self._escape_namescopes(cache_name), shape=shape, dtype=dtype, initializer=init_ops.constant_initializer(init_val), trainable=False, use_resource=True, collections=[_TENSOR_TRACER_STORAGE, ops.GraphKeys.LOCAL_VARIABLES])\n    return graph_cache_var[cache_name]",
    "docstring": "Creates a variable as the cache to store intermediate tensor values. Args: cache_name: Name to be given to the cache (an instance of tf.variable). graph: Tensorflow graph. shape: A list of dimensions. dtype: Data type of created cache. Returns: A ref to newly created or existing cache with the given dimensions. Raises: ValueError: (1) If graph is None, or (2) shape is None when a new cache needs to be created.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:_create_or_get_tensor_values_cache arg:self arg:cache_name arg:graph arg:shape arg:dtype arguments arg arg arg arg arg If Compare Raise Call Assign Call If Compare If Compare Raise Call If Assign Call Assign With Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "truncated_normal",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):\n    if dtype is None:\n        dtype = floatx()\n    if seed is None:\n        seed = np.random.randint(10000000.0)\n    return random_ops.truncated_normal(shape, mean, stddev, dtype=dtype, seed=seed)",
    "docstring": "Returns a tensor with truncated random normal distribution of values. The generated values follow a normal distribution with specified mean and standard deviation, except that values whose magnitude is more than two standard deviations from the mean are dropped and re-picked. Args: shape: A tuple of integers, the shape of tensor to create. mean: Mean of the values. stddev: Standard deviation of the values. dtype: String, dtype of returned tensor. seed: Integer, random seed. Returns: A tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:truncated_normal arg:shape arg:mean arg:stddev arg:dtype arg:seed arguments arg arg arg arg arg If Compare Assign Call If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_SparseSegmentSqrtNWithNumSegmentsGrad",
    "source_code": "@ops.RegisterGradient('SparseSegmentSqrtNWithNumSegments')\ndef _SparseSegmentSqrtNWithNumSegmentsGrad(op: ops.Operation, grad):\n    if _GetOpAttrOrNone(op, 'sparse_gradient'):\n        return (_SparseSegmentReduceGradV2(op, grad, 'sqrtn'), None, None, None)\n    dim0 = array_ops.shape(op.inputs[0])[0]\n    return (math_ops.sparse_segment_sqrt_n_grad(grad, op.inputs[1], op.inputs[2], dim0), None, None, None)",
    "docstring": "Gradient for SparseSegmentSqrtNWithNumSegments.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_SparseSegmentSqrtNWithNumSegmentsGrad arg:op arg:grad arguments arg arg If Call Return return:yes Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "summary_scope",
    "source_code": "@contextlib.contextmanager\ndef summary_scope(name, family=None, default_name=None, values=None):\n    name = clean_tag(name)\n    family = clean_tag(family)\n    scope_base_name = name if family is None else '{}/{}'.format(family, name)\n    with ops.name_scope(scope_base_name, default_name, values, skip_on_eager=False) as scope:\n        if family is None:\n            tag = scope.rstrip('/')\n        else:\n            tag = '{}/{}'.format(family, scope.rstrip('/'))\n        yield (tag, scope)",
    "docstring": "Enters a scope used for the summary and yields both the name and tag. To ensure that the summary tag name is always unique, we create a name scope based on and use the full scope name in the tag. If is set, then the tag name will be '/', where is . This ensures that is always the prefix of the tag (and unmodified), while ensuring the scope respects the outer scope from this summary was created. Args: name: A name for the generated summary node. family: Optional; if provided, used as the prefix of the summary tag name. default_name: Optional; if provided, used as default name of the summary. values: Optional; passed as parameter to name_scope. Yields: A tuple , both of which are unique and should be used for the tag and the scope for the summary to output.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_op_util.py",
    "ast_data": "FunctionDef name:summary_scope arg:name arg:family arg:default_name arg:values arguments arg arg arg arg Assign Call Assign Call Assign Compare Call With Call If Compare Assign Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "list_devices",
    "source_code": "def list_devices(self):\n    raw_device_list = tf_session.TF_SessionListDevices(self._session)\n    device_list = []\n    size = tf_session.TF_DeviceListCount(raw_device_list)\n    for i in range(size):\n        name = tf_session.TF_DeviceListName(raw_device_list, i)\n        device_type = tf_session.TF_DeviceListType(raw_device_list, i)\n        memory = tf_session.TF_DeviceListMemoryBytes(raw_device_list, i)\n        incarnation = tf_session.TF_DeviceListIncarnation(raw_device_list, i)\n        device_list.append(_DeviceAttributes(name, device_type, memory, incarnation))\n    tf_session.TF_DeleteDeviceList(raw_device_list)\n    return device_list",
    "docstring": "Lists available devices in this session. Where: Each element in the list has the following properties name: A string with the full name of the device. ex: device_type: The type of the device (e.g. , , .) memory_limit: The maximum amount of memory available on the device. Note: depending on the device, it is possible the usable memory could be substantially less. Raises: tf.errors.OpError: If it encounters an error (e.g. session is in an invalid state, or network errors occur). Returns: A list of devices in the session.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\session.py",
    "ast_data": "FunctionDef name:list_devices arg:self arguments arg Assign Call Assign Assign Call For Call Assign Call Assign Call Assign Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "reduce_logsumexp_v1",
    "source_code": "@tf_export(v1=['math.reduce_logsumexp', 'reduce_logsumexp'])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_args(None, 'keep_dims is deprecated, use keepdims instead', 'keep_dims')\ndef reduce_logsumexp_v1(input_tensor, axis=None, keepdims=None, name=None, reduction_indices=None, keep_dims=None):\n    axis = deprecation.deprecated_argument_lookup('axis', axis, 'reduction_indices', reduction_indices)\n    keepdims = deprecation.deprecated_argument_lookup('keepdims', keepdims, 'keep_dims', keep_dims)\n    return reduce_logsumexp(input_tensor, axis, keepdims, name)",
    "docstring": "Computes log(sum(exp(elements across dimensions of a tensor))). Reduces along the dimensions given in . Unless is true, the rank of the tensor is reduced by 1 for each of the entries in , which must be unique. If is true, the reduced dimensions are retained with length 1. If has no entries, all dimensions are reduced, and a tensor with a single element is returned. This function is more numerically stable than log(sum(exp(input))). It avoids overflows caused by taking the exp of large inputs and underflows caused by taking the log of small inputs. For example: Args: input_tensor: The tensor to reduce. Should have numeric type. axis: The dimensions to reduce. If (the default), reduces all dimensions. Must be in the range . keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). reduction_indices: The old (deprecated) name for axis. keep_dims: Deprecated alias for . Returns: The reduced tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:reduce_logsumexp_v1 arg:input_tensor arg:axis arg:keepdims arg:name arg:reduction_indices arg:keep_dims arguments arg arg arg arg arg arg Assign Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "promote",
    "source_code": "def promote(x):\n    if isinstance(x, SymBool):\n        return SymInt(x.node.wrap_int(int(x)))\n    return x",
    "docstring": "Implements True+True=2, which works in python but not sympy",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\sym_node.py",
    "ast_data": "FunctionDef name:promote arg:x arguments arg If Call Return return:yes Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "constraint",
    "source_code": "@property\ndef constraint(self):\n    return self._constraint",
    "docstring": "Returns the constraint function associated with this variable. Returns: The constraint function that was passed to the variable constructor. Can be if no constraint was passed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:constraint arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "maybe_propagate_compile_time_consts_in_xla",
    "source_code": "def maybe_propagate_compile_time_consts_in_xla(op):\n    if control_flow_util.GraphOrParentsInXlaContext(op.graph):\n        op._set_attr('_xla_propagate_compile_time_consts', attr_value_pb2.AttrValue(b=True))",
    "docstring": "Tells XLA whether to propagate compile-time consts in the loop body. This is needed to make compile time constants available to ops, for example in , inside the loop body. Ideally this would always be turned on, but that doesn't work with legacy functionalized while_loops. Args: op: A Operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_util_v2.py",
    "ast_data": "FunctionDef name:maybe_propagate_compile_time_consts_in_xla arg:op arguments arg If Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "maybe_get_next_input_eq_obs",
    "source_code": "def maybe_get_next_input_eq_obs(node: Node, modules: dict[str, nn.Module]) -> Optional[_InputEqualizationObserver]:\n    assert node_supports_equalization(node, modules)\n    maybe_relu_node = maybe_get_next_module(node, modules, nn.ReLU)\n    if maybe_relu_node is None:\n        maybe_relu_node = maybe_get_next_module(node, modules, target_functional_type=F.relu)\n    maybe_obs_node = maybe_get_next_module(node, modules, ObserverBase) if maybe_relu_node is None else maybe_get_next_module(maybe_relu_node, modules, ObserverBase)\n    if maybe_obs_node is None:\n        return None\n    maybe_eq_obs_node = maybe_get_next_module(maybe_obs_node, modules, _InputEqualizationObserver)\n    if maybe_eq_obs_node is None:\n        return None\n    maybe_eq_obs = modules[str(maybe_eq_obs_node)]\n    assert isinstance(maybe_eq_obs, _InputEqualizationObserver)\n    return maybe_eq_obs",
    "docstring": "Gets the following input equalization observer if it exists. For example, in the case of connecting linear layers: x -> inp_obs1 -> eq_obs1 -> linear1 -> out_obs1 -> eq_obs2 -> linear2 -> out_obs2 If the node being passed in is the linear1 node, then we want to return eq_obs2, the following equalization observer for linear2. However, if there are no connecting layers: x -> inp_obs1 -> eq_obs1 -> linear1 -> out_obs1 -> add Then we want to return None. In the case of an unfused linear-relu layer with a connecting linear layer: linear1 -> relu -> out_obs1 -> eq_obs2 -> linear2 -> out_obs2 Since it is unfused, we want to skip over the relu layer and return eq_obs2, the following equalization observer for linear2.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_equalize.py",
    "ast_data": "FunctionDef name:maybe_get_next_input_eq_obs arg:node arg:modules arguments arg arg Call Assign Call If Compare Assign Call Assign Compare Call Call If Compare Return return:no Assign Call If Compare Return return:no Assign Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "tx",
    "source_code": "@property\ndef tx(self) -> Tensor:\n    return -self.rectified_right_camera[..., 0, 3] / self.fx",
    "docstring": "The horizontal baseline between the two cameras. Returns: Tensor of shape :math:",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\camera\\stereo.py",
    "ast_data": "FunctionDef name:tx arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "ExternalStatePolicy",
    "source_code": "@tf_export('data.experimental.ExternalStatePolicy')\nclass ExternalStatePolicy(enum.Enum):\n    WARN = 0\n    IGNORE = 1\n    FAIL = 2\n\n    @classmethod\n    def _to_proto(cls, obj):\n        if obj == cls.IGNORE:\n            return dataset_options_pb2.ExternalStatePolicy.POLICY_IGNORE\n        if obj == cls.FAIL:\n            return dataset_options_pb2.ExternalStatePolicy.POLICY_FAIL\n        if obj == cls.WARN:\n            return dataset_options_pb2.ExternalStatePolicy.POLICY_WARN\n        raise ValueError(f'Invalid `obj.` Supported values include `POLICY_IGNORE`,`POLICY_FAIL`, `POLICY_WARN`. Got {obj.name}.')\n\n    @classmethod\n    def _from_proto(cls, pb):\n        if pb == dataset_options_pb2.ExternalStatePolicy.POLICY_IGNORE:\n            return cls.IGNORE\n        if pb == dataset_options_pb2.ExternalStatePolicy.POLICY_FAIL:\n            return cls.FAIL\n        if pb == dataset_options_pb2.ExternalStatePolicy.POLICY_WARN:\n            return cls.WARN\n        raise ValueError(f'Invalid `pb.` Supported values include `POLICY_IGNORE`,`POLICY_FAIL`, `POLICY_WARN`. Got {pb}.')",
    "docstring": "Represents how to handle external state during serialization. See the documentation for more information.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\options.py",
    "ast_data": "ClassDef name:ExternalStatePolicy Assign Assign Assign FunctionDef name:_to_proto arg:cls arg:obj arguments arg arg If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Raise Call FunctionDef name:_from_proto arg:cls arg:pb arguments arg arg If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Raise Call Call"
  },
  {
    "library": "cryptography",
    "name": "get_public",
    "source_code": "def get_public(self, data: memoryview) -> tuple[tuple[int, int], memoryview]:\n    e, data = _get_mpint(data)\n    n, data = _get_mpint(data)\n    return ((e, n), data)",
    "docstring": "RSA public fields",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\serialization\\ssh.py",
    "ast_data": "FunctionDef name:get_public arg:self arg:data arguments arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "__len__",
    "source_code": "def __len__(self):\n    return len(self._uncensored) + len(self._left) + len(self._right) + len(self._interval)",
    "docstring": "The number of values (censored and not censored).",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_censored_data.py",
    "ast_data": "FunctionDef name:__len__ arg:self arguments arg Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "replace",
    "source_code": "def replace(self, **kwargs):\n    init_kwargs = dict(job=self.job, replica=self.replica, task=self.task, device_type=self.device_type, device_index=self.device_index)\n    init_kwargs.update(kwargs)\n    return self.__class__(**init_kwargs)",
    "docstring": "Convenience method for making a new DeviceSpec by overriding fields. For instance: Args: **kwargs: This method takes the same args as the DeviceSpec constructor Returns: A DeviceSpec with the fields specified in kwargs overridden.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\device_spec.py",
    "ast_data": "FunctionDef name:replace arg:self arguments arg arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_ticklocs",
    "source_code": "def get_ticklocs(self, *, minor=False):\n    return self.get_minorticklocs() if minor else self.get_majorticklocs()",
    "docstring": "Return this Axis' tick locations in data coordinates. The locations are not clipped to the current axis limits and hence may contain locations that are not visible in the output. Parameters ---------- minor : bool, default: False True to return the minor tick directions, False to return the major tick directions. Returns ------- array of tick locations",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:get_ticklocs arg:self arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_merge_with_dialect_properties",
    "source_code": "def _merge_with_dialect_properties(dialect: csv.Dialect, defaults: dict[str, Any]) -> dict[str, Any]:\n    kwds = defaults.copy()\n    for param in MANDATORY_DIALECT_ATTRS:\n        dialect_val = getattr(dialect, param)\n        parser_default = parser_defaults[param]\n        provided = kwds.get(param, parser_default)\n        conflict_msgs = []\n        if provided not in (parser_default, dialect_val):\n            msg = f\"Conflicting values for '{param}': '{provided}' was provided, but the dialect specifies '{dialect_val}'. Using the dialect-specified value.\"\n            if not (param == 'delimiter' and kwds.pop('sep_override', False)):\n                conflict_msgs.append(msg)\n        if conflict_msgs:\n            warnings.warn('\\n\\n'.join(conflict_msgs), ParserWarning, stacklevel=find_stack_level())\n        kwds[param] = dialect_val\n    return kwds",
    "docstring": "Merge default kwargs in TextFileReader with dialect parameters. Parameters ---------- dialect : csv.Dialect Concrete csv dialect. See csv.Dialect documentation for more details. defaults : dict Keyword arguments passed to TextFileReader. Returns ------- kwds : dict Updated keyword arguments, merged with dialect parameters.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\parsers\\readers.py",
    "ast_data": "FunctionDef name:_merge_with_dialect_properties arg:dialect arg:defaults arguments arg arg Assign Call For Assign Call Assign Assign Call Assign If Compare Assign If BoolOp Compare Call Call If Call Call Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "force_no_ordering",
    "source_code": "def force_no_ordering(self):\n    return []",
    "docstring": "Return a list used in the \"ORDER BY\" clause to force no ordering at all. Return an empty list to include nothing in the ordering.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:force_no_ordering arg:self arguments arg Return return:no"
  },
  {
    "library": "scrapy",
    "name": "is_idle",
    "source_code": "def is_idle(self) -> bool:\n    return not self.slot",
    "docstring": "Return True if there isn't any more spiders to process",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\scraper.py",
    "ast_data": "FunctionDef name:is_idle arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "register",
    "source_code": "def register(self, obj, value):\n    if obj in self._registry:\n        raise KeyError(f'{type(obj)} has already been registered.')\n    self._registry[obj] = value",
    "docstring": "Registers a Python object within the registry. Args: obj: The object to add to the registry. value: The stored value for the 'obj' type. Raises: KeyError: If the same obj is used twice.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\utils\\type_registry.py",
    "ast_data": "FunctionDef name:register arg:self arg:obj arg:value arguments arg arg arg If Compare Raise Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_constant_to_tensor",
    "source_code": "def _constant_to_tensor(x, dtype):\n    return constant_op.constant(x, dtype=dtype)",
    "docstring": "Convert the input to a tensor of type . This is slightly faster than the _to_tensor function, at the cost of handling fewer cases. Args: x: An object to be converted (numpy arrays, floats, ints and lists of them). dtype: The destination type. Returns: A tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:_constant_to_tensor arg:x arg:dtype arguments arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "t",
    "source_code": "@lru_cache(maxsize=1)\ndef t(self, n: int, p0: int | None=None, p1: int | None=None, k_offset: int=0) -> np.ndarray:\n    p0, p1 = self.p_range(n, p0, p1)\n    return np.arange(p0, p1) * self.delta_t + k_offset * self.T",
    "docstring": "Times of STFT for an input signal with samples. Returns a 1d array with times of the values with the same parametrization. Note that the slices are `p_minp_max(n)x`) ShortTimeFFT: Class this method belongs to.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_short_time_fft.py",
    "ast_data": "FunctionDef name:t arg:self arg:n arg:p0 arg:p1 arg:k_offset arguments arg arg arg arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "aupdate_session_auth_hash",
    "source_code": "async def aupdate_session_auth_hash(request, user):\n    await request.session.acycle_key()\n    if hasattr(user, 'get_session_auth_hash') and request.user == user:\n        await request.session.aset(HASH_SESSION_KEY, user.get_session_auth_hash())",
    "docstring": "See update_session_auth_hash().",
    "type": "function",
    "file_path": "django\\django\\contrib\\auth\\__init__.py",
    "ast_data": "AsyncFunctionDef name:aupdate_session_auth_hash arg:request arg:user arguments arg arg Call If BoolOp Call Compare Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_batch_accumulator",
    "source_code": "@classmethod\ndef _batch_accumulator(cls, primals, tangents):\n    acc = super(ForwardAccumulator, cls).__new__(cls, primals, tangents)\n    acc._recording = False\n    acc._accumulator = pywrap_tfe.TFE_Py_ForwardAccumulatorNew(True)\n    primal_ids = set()\n    for primal, tangent in zip(nest.flatten(primals), nest.flatten(tangents)):\n        tangent.shape.assert_is_compatible_with(tensor_shape.TensorShape([None]) + primal.shape)\n        if id(primal) in primal_ids:\n            raise ValueError('Tensor {} was specified as a primal multiple times. This may indicate an error. If it was intended, please sum the corresponding tangents.')\n        primal_ids.add(id(primal))\n    acc._watch(primals, tangents)\n    return acc",
    "docstring": "Factory constructor to test accumulator on batches of tangents. Args: primals: A tensor or nested structure of tensors to watch. tangents: A tensor or nested structure of tensors, with the same nesting structure as , with each element being a vector with compatible shape of the corresponding primal element. Returns: A batch accumulator object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\forwardprop.py",
    "ast_data": "FunctionDef name:_batch_accumulator arg:cls arg:primals arg:tangents arguments arg arg arg Assign Call Call Assign Assign Call Assign Call For Call Call Call Call Call If Compare Call Raise Call Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "between_time",
    "source_code": "@final\ndef between_time(self, start_time, end_time, inclusive: IntervalClosedType='both', axis: Axis | None=None) -> Self:\n    if axis is None:\n        axis = 0\n    axis = self._get_axis_number(axis)\n    index = self._get_axis(axis)\n    if not isinstance(index, DatetimeIndex):\n        raise TypeError('Index must be DatetimeIndex')\n    left_inclusive, right_inclusive = validate_inclusive(inclusive)\n    indexer = index.indexer_between_time(start_time, end_time, include_start=left_inclusive, include_end=right_inclusive)\n    return self.take(indexer, axis=axis)",
    "docstring": "Select values between particular times of the day (e.g., 9:00-9:30 AM). By setting `SeriesDatetimeIndex`: >>> ts.between_time(\"0:45\", \"0:15\") A 2018-04-09 00:00:00 1 2018-04-12 01:00:00 4",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:between_time arg:self arg:start_time arg:end_time arg:inclusive arg:axis arguments arg arg arg arg arg If Compare Assign Assign Call Assign Call If Call Raise Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "make_input_fn_iterator",
    "source_code": "def make_input_fn_iterator(self, input_fn, replication_mode=InputReplicationMode.PER_WORKER):\n    return super(StrategyV1, self).make_input_fn_iterator(input_fn, replication_mode)",
    "docstring": "Returns an iterator split across replicas created from an input function. DEPRECATED: This method is not available in TF 2.x. The should take an object where information about batching and input sharding can be accessed: The returned by should have a per-replica batch size, which may be computed using . Args: input_fn: A function taking a object and returning a . replication_mode: an enum value of . Only is supported currently, which means there will be a single call to per worker. Replicas will dequeue from the local on their worker. Returns: An iterator object that should first be -ed. It may then either be passed to or you can to get the next value to pass to .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:make_input_fn_iterator arg:self arg:input_fn arg:replication_mode arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_is_multi",
    "source_code": "@final\n@cache_readonly\ndef _is_multi(self) -> bool:\n    return isinstance(self, ABCMultiIndex)",
    "docstring": "Cached check equivalent to isinstance(self, MultiIndex)",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_is_multi arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_is_sorted_by_data",
    "source_code": "def _is_sorted_by_data(graph):\n    assert graph.format == 'csr'\n    out_of_order = graph.data[:-1] > graph.data[1:]\n    line_change = np.unique(graph.indptr[1:-1] - 1)\n    line_change = line_change[line_change < out_of_order.shape[0]]\n    return out_of_order.sum() == out_of_order[line_change].sum()",
    "docstring": "Return whether the graph's non-zero entries are sorted by data. The non-zero entries are stored in graph.data and graph.indices. For each row (or sample), the non-zero entries can be either: - sorted by indices, as after graph.sort_indices(); - sorted by data, as after _check_precomputed(graph); - not sorted. Parameters ---------- graph : sparse matrix of shape (n_samples, n_samples) Neighbors graph as given by or . Matrix should be of format CSR format. Returns ------- res : bool Whether input graph is sorted by data.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\neighbors\\_base.py",
    "ast_data": "FunctionDef name:_is_sorted_by_data arg:graph arguments arg Compare Assign Compare Assign Call Assign Compare Return return:yes Compare Call Call"
  },
  {
    "library": "tensorflow",
    "name": "merge_state",
    "source_code": "def merge_state(self, layers):\n    raise NotImplementedError",
    "docstring": "Merge the statistics of multiple preprocessing layers. This layer will contain the merged state. Arguments: layers: Layers whose statistics should be merge with the statistics of this layer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_preprocessing_layer.py",
    "ast_data": "FunctionDef name:merge_state arg:self arg:layers arguments arg arg Raise"
  },
  {
    "library": "pandas",
    "name": "_BufferedWriter",
    "source_code": "class _BufferedWriter(BytesIO, ABC):\n    buffer = BytesIO()\n\n    @abstractmethod\n    def write_to_buffer(self) -> None:\n        ...\n\n    def close(self) -> None:\n        if self.closed:\n            return\n        if self.getbuffer().nbytes:\n            self.seek(0)\n            with self.buffer:\n                self.write_to_buffer()\n        else:\n            self.buffer.close()\n        super().close()",
    "docstring": "Some objects do not support multiple .write() calls (TarFile and ZipFile). This wrapper writes to the underlying buffer on close.",
    "type": "class",
    "file_path": "pandas\\pandas\\io\\common.py",
    "ast_data": "ClassDef name:_BufferedWriter Assign Call FunctionDef name:write_to_buffer arg:self arguments arg FunctionDef name:close arg:self arguments arg If Return return:no If Call Call With Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "generate_stage_to_rank_mapping",
    "source_code": "def generate_stage_to_rank_mapping(pp_size: int, num_stages: int, style: str='loop') -> dict[int, int]:\n    mapping = {}\n    if style == 'loop':\n        for stage_index in range(num_stages):\n            mapping[stage_index] = stage_index % pp_size\n    elif style == 'v':\n        if num_stages % pp_size != 0:\n            raise ValueError(f'num_stages {num_stages} must be evenly divisible by pp_size {pp_size} for V schedules')\n        rank_index = 0\n        for stage_index in range(num_stages):\n            mapping[stage_index] = rank_index\n            if (stage_index + 1) % pp_size == 0:\n                continue\n            if stage_index // pp_size % 2 == 0:\n                rank_index += 1\n            else:\n                rank_index -= 1\n    else:\n        raise ValueError(f'Style {style} is not supported.')\n    return mapping",
    "docstring": "Compute the stage id to rank mapping for either a looped or V-style schedule. Most commonly num_stages == pp_size * 2, but this function can be used to compute the mapping for any number of stages per rank.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\_utils.py",
    "ast_data": "FunctionDef name:generate_stage_to_rank_mapping arg:pp_size arg:num_stages arg:style arguments arg arg arg Assign If Compare For Call Assign If Compare If Compare Raise Call Assign For Call Assign If Compare If Compare Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_fusion_pattern_to_root_node_getter",
    "source_code": "def get_fusion_pattern_to_root_node_getter(backend_config: BackendConfig) -> dict[Pattern, Callable]:\n    root_node_getter_mapping: dict[Pattern, Callable] = {}\n    for pattern, config in backend_config._pattern_complex_format_to_config.items():\n        if config._root_node_getter is not None:\n            root_node_getter_mapping[pattern] = config._root_node_getter\n    return root_node_getter_mapping",
    "docstring": "Get a map from fusion pattern to a function that returns the root node from the fusion pattern, e.g. the most common one is: def get_root_node(node_pattern): while not isinstance(node_pattern[-1], Node): node_pattern = node_pattern[-1] return node_pattern[-1] This can work for all patterns whose root node is the \"last node\" in the pattern, e.g. (torch.add, MatchAllNode, (torch.ReLU, torch.Conv2d))",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\utils.py",
    "ast_data": "FunctionDef name:get_fusion_pattern_to_root_node_getter arg:backend_config arguments arg For Call If Compare Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "stop",
    "source_code": "@no_type_check\ndef stop(self) -> None:\n    self._num_cuda_retries = torch.cuda.memory_stats().get('num_alloc_retries', 0)\n    for h in self._hooks:\n        h.remove()\n    self._hooks.clear()\n    assert getattr(self, 'profile_mode', None) is not None\n    self.profile_mode.__exit__(None, None, None)\n    self.profile_mode = None",
    "docstring": "Remove module hooks and exit ``.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_tools\\memory_tracker.py",
    "ast_data": "FunctionDef name:stop arg:self arguments arg Assign Call Call For Call Call Compare Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "from_namespaced_entity",
    "source_code": "@staticmethod\ndef from_namespaced_entity(namespaced_entity: str, max_level: int=2) -> NamespaceHelper:\n    names = namespaced_entity.split('::')\n    entity_name = names[-1]\n    namespace_str = '::'.join(names[:-1])\n    return NamespaceHelper(namespace_str=namespace_str, entity_name=entity_name, max_level=max_level)",
    "docstring": "Generate helper from nested namespaces as long as class/function name. E.g.: \"torch::lazy::add\"",
    "type": "method",
    "file_path": "pytorch\\torchgen\\utils.py",
    "ast_data": "FunctionDef name:from_namespaced_entity arg:namespaced_entity arg:max_level arguments arg arg Assign Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "profile",
    "source_code": "def profile(graph, run_metadata, output_dir=None):\n    profiles = get_profiles(graph, run_metadata)\n    output_file_template = None\n    if output_dir:\n        if not os.path.isdir(output_dir):\n            os.makedirs(output_dir)\n        time_suffix = time.strftime('%Y%m%d%H%M%S')\n        output_file_template = os.path.join(output_dir, '%s_' + time_suffix + '.pb.gz')\n    profile_files = []\n    for device, pprof_proto in profiles.items():\n        if output_file_template is None:\n            print('No output directory specified, printing to stdout instead.')\n            print(pprof_proto)\n        else:\n            device_name = str(device).strip('/').translate(maketrans('/:', '__'))\n            profile_file = output_file_template % device_name\n            profile_files.append(profile_file)\n            with gzip.open(profile_file, 'w') as output_file:\n                print('Writing profile to %s...' % profile_file)\n                output_file.write(pprof_proto.SerializeToString())\n    return profile_files",
    "docstring": "Generate profiles in pprof format. See for pprof proto format. Args: graph: A object. run_metadata: A proto. output_dir: (string) Directory to output pprof profile to. Profile files for each device will be stored in compressed serialized proto format. If output_dir is None, profile protos will be printed to stdout instead. Returns: List of output files created by this profile call. (Note: this list will be empty if output_dir is None)",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\pprof_profiler.py",
    "ast_data": "FunctionDef name:profile arg:graph arg:run_metadata arg:output_dir arguments arg arg arg Assign Call Assign If If Call Call Assign Call Assign Call Assign For Call If Compare Call Call Assign Call Call Call Call Assign Call With Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_eager_reset",
    "source_code": "def _eager_reset(self):\n    if not ops.executing_eagerly_outside_functions():\n        raise ValueError('Resetting a multi-device iterator is only supported in the eager mode.')\n    self._incarnation_id = gen_dataset_ops.multi_device_iterator_init(self._dataset._variant_tensor, self._multi_device_iterator_resource, max_buffer_size=self._max_buffer_size)\n    for i, device in enumerate(self._devices):\n        with ops.device(device):\n            ds = _create_device_dataset(self._prototype_device_datasets[i], self._incarnation_id, self._prefetch_buffer_size, self._experimental_slack)\n            ds_variant = ds._variant_tensor\n            gen_dataset_ops.make_iterator(ds_variant, self._device_iterators[i]._iterator_resource)",
    "docstring": "Resets the MultiDeviceIterator in eager mode.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\multi_device_iterator_ops.py",
    "ast_data": "FunctionDef name:_eager_reset arg:self arguments arg If Call Raise Call Assign Call For Call With Call Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "dismantle_graph",
    "source_code": "def dismantle_graph(graph) -> None:\n    graph._functions.clear()\n    graph.Dismantle()",
    "docstring": "Cleans up reference cycles from a . Helpful for making sure the garbage collector doesn't need to run after a temporary is no longer needed. Args: graph: A object to destroy. Neither it nor any of its ops are usable after this function runs.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:dismantle_graph arg:graph arguments arg Call Call"
  },
  {
    "library": "numpy",
    "name": "as_string",
    "source_code": "def as_string(obj, kind=1):\n    return Expr(Op.STRING, (obj, kind))",
    "docstring": "Return object as STRING expression (string literal constant).",
    "type": "function",
    "file_path": "numpy\\numpy\\f2py\\symbolic.py",
    "ast_data": "FunctionDef name:as_string arg:obj arg:kind arguments arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "__call__",
    "source_code": "def __call__(self, alpha1, phi0=None, derphi0=None, maxiter=100):\n    if phi0 is None:\n        phi0 = self.phi(0.0)\n    if derphi0 is None:\n        derphi0 = self.derphi(0.0)\n    phi1 = phi0\n    derphi1 = derphi0\n    task = b'START'\n    for i in range(maxiter):\n        stp, phi1, derphi1, task = self._iterate(alpha1, phi1, derphi1, task)\n        if not np.isfinite(stp):\n            task = b'WARN'\n            stp = None\n            break\n        if task[:2] == b'FG':\n            alpha1 = stp\n            phi1 = self.phi(stp)\n            derphi1 = self.derphi(stp)\n        else:\n            break\n    else:\n        stp = None\n        task = b'WARNING: dcsrch did not converge within max iterations'\n    if task[:5] == b'ERROR' or task[:4] == b'WARN':\n        stp = None\n    return (stp, phi1, phi0, task)",
    "docstring": "Parameters ---------- alpha1 : float alpha1 is the current estimate of a satisfactory step. A positive initial estimate must be provided. phi0 : float the value of at 0 (if known). derphi0 : float the derivative of at 0 (if known). maxiter : int Returns ------- alpha : float Step size, or None if no suitable step was found. phi : float Value of at the new point . phi0 : float Value of at . task : bytes On exit task indicates status information. If task[:4] == b'CONV' then the search is successful. If task[:4] == b'WARN' then the subroutine is not able to satisfy the convergence conditions. The exit value of stp contains the best point found during the search. If task[:5] == b'ERROR' then there is an error in the input arguments.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_dcsrch.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:alpha1 arg:phi0 arg:derphi0 arg:maxiter arguments arg arg arg arg arg If Compare Assign Call If Compare Assign Call Assign Assign Assign For Call Assign Call If Call Assign Assign If Compare Assign Assign Call Assign Call Assign Assign If BoolOp Compare Compare Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "unique",
    "source_code": "def unique(self) -> Self:\n    pa_type = self._pa_array.type\n    if pa_version_under11p0 and pa.types.is_duration(pa_type):\n        data = self._pa_array.cast(pa.int64())\n    else:\n        data = self._pa_array\n    pa_result = pc.unique(data)\n    if pa_version_under11p0 and pa.types.is_duration(pa_type):\n        pa_result = pa_result.cast(pa_type)\n    return type(self)(pa_result)",
    "docstring": "Compute the ArrowExtensionArray of unique values. Returns ------- ArrowExtensionArray",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\arrow\\array.py",
    "ast_data": "FunctionDef name:unique arg:self arguments arg Assign If BoolOp Call Assign Call Call Assign Assign Call If BoolOp Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "names",
    "source_code": "@property\ndef names(self):\n    return self._names",
    "docstring": "The list of names for each component of a queue element.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:names arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_optimize_assert",
    "source_code": "def _optimize_assert(rebuild_ctx: Callable[[], OptimizeContext], backend, *, hooks=Hooks(None, None, None), export=False, export_constraints=None, dynamic=None):\n    backend = get_compiler_fn(backend)\n    backend_ctx_ctor = getattr(backend, 'backend_ctx_ctor', null_context)\n    return _optimize_catch_errors(convert_frame.convert_frame_assert(backend, export=export, export_constraints=export_constraints), hooks, backend_ctx_ctor, export=export, dynamic=dynamic, rebuild_ctx=rebuild_ctx)",
    "docstring": "The same as",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\eval_frame.py",
    "ast_data": "FunctionDef name:_optimize_assert arg:rebuild_ctx arg:backend arguments arg arg arg arg arg arg Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_trace_variant_creation",
    "source_code": "def _trace_variant_creation(self):\n    variant = self._variant_tensor\n    if not isinstance(variant, ops.EagerTensor):\n        raise NotImplementedError('Constructing a tf.function that reproduces a given dataset is only supported for datasets created eagerly. Please file a feature request if this is important to you.')\n    with context.eager_mode(), ops.device('CPU'):\n        graph_def = graph_pb2.GraphDef().FromString(self._as_serialized_graph(external_state_policy=options_lib.ExternalStatePolicy.FAIL).numpy())\n    output_node_names = []\n    for node in graph_def.node:\n        if node.op == '_Retval':\n            output_node_names = node.input\n    if len(output_node_names) != 1:\n        raise AssertionError(f'Dataset graph is expected to only have one return value but found {len(output_node_names)} return values: {output_node_names}.')\n    output_node_name = output_node_names[0]\n    file_path_nodes = {}\n    if ops.get_default_graph().building_function:\n        asset_tracker = self._maybe_track_assets(graph_def)\n        for key in asset_tracker:\n            assets_list = [array_ops.expand_dims(asset.asset_path, axis=0) for asset in asset_tracker[key]]\n            file_path_nodes[key] = array_ops.concat(assets_list, axis=0)\n    variant_function = wrap_function.function_from_graph_def(graph_def, inputs=[], outputs=output_node_name + ':0', captures=file_path_nodes)\n    for used_function in self._functions():\n        used_function.function.add_to_graph(variant_function.graph)\n    return variant_function",
    "docstring": "Traces a function which outputs a variant for this dataset. Note that creating this function involves evaluating an op, and is currently only supported when executing eagerly. Returns: A zero-argument which outputs a variant .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:_trace_variant_creation arg:self arguments arg Assign If Call Raise Call With Call Call Assign Call Call Call Call Assign For If Compare Assign If Compare Call Raise Call Call Assign Assign If Call Assign Call For Assign Call Assign Call Assign Call For Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "strip_strings",
    "source_code": "def strip_strings(model):\n    model.description = None\n    for subgraph in model.subgraphs:\n        subgraph.name = None\n        for tensor in subgraph.tensors:\n            tensor.name = None\n    model.signatureDefs = None",
    "docstring": "Strips all nonessential strings from the model to reduce model size. We remove the following strings: (find strings by searching \":string\" in the tensorflow lite flatbuffer schema) 1. Model description 2. SubGraph name 3. Tensor names We retain OperatorCode custom_code and Metadata name. Args: model: The model from which to remove nonessential strings.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\tools\\flatbuffer_utils.py",
    "ast_data": "FunctionDef name:strip_strings arg:model arguments arg Assign For Assign For Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_print_cache",
    "source_code": "def _print_cache():\n    replica_str = '%d' % file_index\n    if self._parameters.trace_dir:\n        output_path = os.path.join(self._parameters.trace_dir, _COMPACT_TRACE_FILE_PREFIX) + replica_str + self._get_outfile_suffix()\n        output_stream = _OUTPUT_STREAM_ESCAPE + output_path\n    else:\n        output_stream = sys.stderr\n    new_step_line = _REPLICA_ID_TAG + replica_str\n    print_ops = []\n    if self._parameters.inspect_trace:\n        if self._num_signature_dimensions() > 1:\n            raise ValueError('Inspecting multi signatures are not supported.')\n        if self._parameters.trace_mode in tensor_tracer_flags.TRACE_MODE_HISTORY:\n            print_ops.append(self._inspect_history_cache(cache=cache, replica_id=replica_id, step_num=step_num, tensor_trace_order=tensor_trace_order))\n        else:\n            print_ops.append(self._inspect_summary_cache(cache=cache, replica_id=replica_id, step_num=step_num, output_stream=output_stream, tensor_trace_order=tensor_trace_order))\n    else:\n        for i in range(self._num_signature_dimensions()):\n            print_ops.append(logging_ops.print_v2(new_step_line, '\\n', cache[:, i], '\\n', summarize=-1, output_stream=output_stream))\n    with ops.control_dependencies(print_ops):\n        return constant_op.constant(0).op",
    "docstring": "Flushes the cache to a file.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:_print_cache arguments Assign If Assign Call Call Assign Assign Assign Assign If If Compare Call Raise Call If Compare Call Call Call Call For Call Call Call Call With Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "decision_function",
    "source_code": "@available_if(_estimator_has('decision_function'))\ndef decision_function(self, X, **params):\n    check_is_fitted(self)\n    _raise_for_params(params, self, 'decision_function')\n    if _routing_enabled():\n        routed_params = process_routing(self, 'decision_function', **params)\n    else:\n        routed_params = Bunch(estimator=Bunch(decision_function={}))\n    X = validate_data(self, X, accept_sparse=True, ensure_all_finite=False, reset=False)\n    return self.estimator_.decision_function(X, **routed_params.estimator.decision_function)",
    "docstring": "Call decision function of the . Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Array representing the data. **params : dict of str -> object Parameters to pass to the underlying estimator's `enable_metadata_routing=TrueMetadata Routing User Guide estimator`.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\semi_supervised\\_self_training.py",
    "ast_data": "FunctionDef name:decision_function arg:self arg:X arguments arg arg arg Call Call If Call Assign Call Assign Call Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "numpy",
    "name": "mT",
    "source_code": "@property\ndef mT(self):\n    if self.ndim < 2:\n        raise ValueError('matrix transpose with ndim < 2 is undefined')\n    if self._mask is nomask:\n        return masked_array(data=self._data.mT)\n    else:\n        return masked_array(data=self.data.mT, mask=self.mask.mT)",
    "docstring": "Return the matrix-transpose of the masked array. The matrix transpose is the transpose of the last two dimensions, even if the array is of higher dimension. .. versionadded:: 2.0 Returns ------- result: MaskedArray The masked array with the last two dimensions transposed Raises ------ ValueError If the array is of dimension less than 2. See Also -------- ndarray.mT: Equivalent method for arrays",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:mT arg:self arguments arg If Compare Raise Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "max_query_params",
    "source_code": "@property\ndef max_query_params(self):\n    return self.connection.connection.getlimit(sqlite3.SQLITE_LIMIT_VARIABLE_NUMBER)",
    "docstring": "SQLite has a variable limit per query. The limit can be changed using the SQLITE_MAX_VARIABLE_NUMBER compile-time option (which defaults to 999 in versions < 3.32.0 or 32766 in newer versions) or lowered per connection at run-time with setlimit(SQLITE_LIMIT_VARIABLE_NUMBER, N).",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\sqlite3\\features.py",
    "ast_data": "FunctionDef name:max_query_params arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, serving_funcs, inputs):\n    super(TFLiteJaxConverterV2, self).__init__()\n    self._serving_funcs = serving_funcs\n    self._inputs = inputs",
    "docstring": "Constructor for TFLiteConverter. Args: serving_funcs: A list functions of the serving func of the jax module, the model params should already be inlined. (e.g., ) inputs: Array of input tensor placeholders tuple,s like . For example, wrapped in an array like \"[('input1', input1), ('input2', input2)]]\". Jax functions are polymorphic, for example: Will yield different computations if different input signatures are passed in: Pass will yield a scalar while pass will yield a broadcasting add. We will need the input information to do tracing for the converter to properly convert the model. So it's important to pass in the desired with the correct input shape/type. In the converted tflite model, the function name will be default to \"main\", the output names will be the traced outputs. The output ordering shall match the serving function.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:serving_funcs arg:inputs arguments arg arg arg Call Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "Symbol",
    "source_code": "class Symbol(collections.namedtuple('Symbol', ['name'])):\n    pass",
    "docstring": "Represents a Python symbol.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\qual_names.py",
    "ast_data": "ClassDef name:Symbol Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "def fit(self, X, y, **fit_params):\n    _raise_for_params(fit_params, self, 'fit', allow=['sample_weight'])\n    y = column_or_1d(y, warn=True)\n    return super().fit(X, y, **fit_params)",
    "docstring": "Fit the estimators. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vectors, where is the number of samples and is the number of features. y : array-like of shape (n_samples,) Target values. **fit_params : dict Parameters to pass to the underlying estimators. .. versionadded:: 1.6 Only available if , which can be set by using `Metadata Routing User Guide ` for more details. Returns ------- self : object Returns a fitted instance.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_stacking.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg arg Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_reshard_output",
    "source_code": "def _reshard_output(module: torch.nn.Module, resharding_spec: ShardingSpec) -> torch.nn.Module:\n\n    def hook_func(_module, _input, output):\n        if isinstance(output, ShardedTensor):\n            return output.reshard(resharding_spec)\n        return output\n    module.register_forward_hook(hook_func)\n    return module",
    "docstring": "Hook a module with output resharding in the forward pass according to the given `torch.nn.Moduletorch.distributed._shard.sharding_spec.ShardingSpectorch.nn.Module` object with reshard API hooked.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\api.py",
    "ast_data": "FunctionDef name:_reshard_output arg:module arg:resharding_spec arguments arg arg FunctionDef name:hook_func arg:_module arg:_input arg:output arguments arg arg arg If Call Return return:yes Call Return return:yes Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "reconstruct_interp_matrix",
    "source_code": "def reconstruct_interp_matrix(idx, proj):\n    n, krank = (len(idx), proj.shape[0])\n    if _is_real(proj):\n        p = np.zeros([krank, n], dtype=np.float64)\n    else:\n        p = np.zeros([krank, n], dtype=np.complex128)\n    for ci in range(krank):\n        p[ci, idx[ci]] = 1.0\n    p[:, idx[krank:]] = proj[:, :]\n    return p",
    "docstring": "Reconstruct interpolation matrix from ID. The interpolation matrix can be reconstructed from the ID indices and coefficients and , respectively, as:: P = numpy.hstack([numpy.eye(proj.shape[0]), proj])[:,numpy.argsort(idx)] The original matrix can then be reconstructed from its skeleton matrix `reconstruct_matrix_from_idreconstruct_skel_matrix_backend.idd_reconint_backend.idz_reconintnumpy.ndarraynumpy.ndarraynumpy.ndarray` Interpolation matrix.",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\interpolative.py",
    "ast_data": "FunctionDef name:reconstruct_interp_matrix arg:idx arg:proj arguments arg arg Assign Call If Call Assign Call Assign Call For Call Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_chunked_dim_size",
    "source_code": "def get_chunked_dim_size(dim_size, split_size, idx):\n    return max(min(dim_size, split_size * (idx + 1)) - split_size * idx, 0)",
    "docstring": "Computes the dim size of the chunk for provided ``. idx(int): The index of chunk whose dim size is being requested. Returns: An int indicating the dim size of the chunk.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharding_spec\\_internals.py",
    "ast_data": "FunctionDef name:get_chunked_dim_size arg:dim_size arg:split_size arg:idx arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, leftover_dependency_map):\n    self.leftover_dependency_map = leftover_dependency_map\n    super(CyclicDependencyError, self).__init__()",
    "docstring": "Creates a CyclicDependencyException.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\trackable_utils.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:leftover_dependency_map arguments arg arg Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_operand_name_and_index",
    "source_code": "def _get_operand_name_and_index(self, numeric_verify_name: str) -> Tuple[str, int]:\n    tensor_name, tensor_idx = numeric_verify_name.rsplit(':', 1)\n    float_tensor_name = tensor_name[len(_NUMERIC_VERIFY_OP_NAME) + 1:]\n    if re.match('\\\\d', float_tensor_name[-1]):\n        float_tensor_name = float_tensor_name[:-1]\n    return (float_tensor_name, int(tensor_idx))",
    "docstring": "Gets the index and name of NumericVerify Op's quantized input tensor. Args: numeric_verify_name: name of the NumericVerify op's output tensor. It has format of Returns: Tuple of (tensor_name, tensor_idx) for quantized op's output tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\tools\\optimize\\debugging\\python\\debugger.py",
    "ast_data": "FunctionDef name:_get_operand_name_and_index arg:self arg:numeric_verify_name arguments arg arg Assign Call Assign Call If Call Assign Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "clear_doc",
    "source_code": "def clear_doc(self, docname: str) -> None:\n    pass",
    "docstring": "Remove traces of a document in the domain-specific inventories.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\domains\\__init__.py",
    "ast_data": "FunctionDef name:clear_doc arg:self arg:docname arguments arg arg"
  },
  {
    "library": "tensorflow",
    "name": "WholeFileReader",
    "source_code": "@tf_export(v1=['WholeFileReader'])\nclass WholeFileReader(ReaderBase):\n\n    @deprecation.deprecated(None, 'Queue-based input pipelines have been replaced by `tf.data`. Use `tf.data.Dataset.map(tf.read_file)`.')\n    def __init__(self, name=None):\n        rr = gen_io_ops.whole_file_reader_v2(name=name)\n        super(WholeFileReader, self).__init__(rr, supports_serialize=True)",
    "docstring": "A Reader that outputs the entire contents of a file as a value. To use, enqueue filenames in a Queue. The output of Read will be a filename (key) and the contents of that file (value). See ReaderBase for supported methods. @compatibility(eager) Readers are not compatible with eager execution. Instead, please use to get data into your model. @end_compatibility",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\io_ops.py",
    "ast_data": "ClassDef name:WholeFileReader FunctionDef name:__init__ arg:self arg:name arguments arg arg Assign Call Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "get_region",
    "source_code": "def get_region(z):\n    if z == 1 + 0j:\n        return 0\n    elif abs(z) < 0.9 and z.real >= 0:\n        return 1\n    elif abs(z) <= 1 and z.real < 0:\n        return 2\n    elif 0.9 <= abs(z) <= 1 and abs(1 - z) < 0.9:\n        return 3\n    elif 0.9 <= abs(z) <= 1 and abs(1 - z) >= 0.9:\n        return 4\n    elif 1 < abs(z) < 1.1 and abs(1 - z) >= 0.9 and (z.real >= 0):\n        return 5\n    else:\n        return 6",
    "docstring": "Assign numbers for regions where hyp2f1 must be handled differently.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_precompute\\hyp2f1_data.py",
    "ast_data": "FunctionDef name:get_region arg:z arguments arg If Compare Return return:yes If BoolOp Compare Call Compare Return return:yes If BoolOp Compare Call Compare Return return:yes If BoolOp Compare Call Compare Call Return return:yes If BoolOp Compare Call Compare Call Return return:yes If BoolOp Compare Call Compare Call Compare Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "delete_batch",
    "source_code": "def delete_batch(self, pk_list, using):\n    num_deleted = 0\n    field = self.get_meta().pk\n    for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):\n        self.clear_where()\n        self.add_filter(f'{field.attname}__in', pk_list[offset:offset + GET_ITERATOR_CHUNK_SIZE])\n        num_deleted += self.do_query(self.get_meta().db_table, self.where, using=using)\n    return num_deleted",
    "docstring": "Set up and execute delete queries for all the objects in pk_list. More than one physical query may be executed if there are a lot of values in pk_list.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\subqueries.py",
    "ast_data": "FunctionDef name:delete_batch arg:self arg:pk_list arg:using arguments arg arg arg Assign Assign Call For Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "any",
    "source_code": "def any(self, axis=None, out=None):\n    return N.ndarray.any(self, axis, out, keepdims=True)._collapse(axis)",
    "docstring": "Test whether any array element along a given axis evaluates to True. Refer to for full documentation. Parameters ---------- axis : int, optional Axis along which logical OR is performed out : ndarray, optional Output to existing array instead of creating new one, must have same shape as expected output Returns ------- any : bool, ndarray Returns a single bool if is `ndarray`",
    "type": "method",
    "file_path": "numpy\\numpy\\matrixlib\\defmatrix.py",
    "ast_data": "FunctionDef name:any arg:self arg:axis arg:out arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "num_buckets",
    "source_code": "@property\ndef num_buckets(self):\n    return len(self.vocabulary_list) + self.num_oov_buckets",
    "docstring": "Returns number of buckets in this sparse feature.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:num_buckets arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, ctx):\n    self._lineno = 0\n    self._col_offset = 0\n    self.ctx = ctx\n    self.state = _State()",
    "docstring": "Initialize the transformer. Subclasses should call this. Args: ctx: A Context object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\transformer.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:ctx arguments arg arg Assign Assign Assign Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "block_shape_tensor",
    "source_code": "def block_shape_tensor(self):\n    return self._block_shape_tensor()",
    "docstring": "Shape of the block dimensions of .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_circulant.py",
    "ast_data": "FunctionDef name:block_shape_tensor arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "convert_extent3d",
    "source_code": "def convert_extent3d(self, box3d):\n    if box3d is None:\n        return None\n    ll, ur = box3d[6:-1].split(',')\n    xmin, ymin, zmin = map(float, ll.split())\n    xmax, ymax, zmax = map(float, ur.split())\n    return (xmin, ymin, zmin, xmax, ymax, zmax)",
    "docstring": "Return a 6-tuple extent for the aggregate by converting the 3d bounding-box text returned by PostGIS ( argument), for example: \"BOX3D(-90.0 30.0 1, -85.0 40.0 2)\".",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\postgis\\operations.py",
    "ast_data": "FunctionDef name:convert_extent3d arg:self arg:box3d arguments arg arg If Compare Return return:no Assign Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_memory_growth",
    "source_code": "@tf_export('config.experimental.get_memory_growth')\ndef get_memory_growth(device):\n    return context.context().get_memory_growth(device)",
    "docstring": "Get if memory growth is enabled for a . If memory growth is enabled for a , the runtime initialization will not allocate all memory on the device. For example: >>> physical_devices = tf.config.list_physical_devices('GPU') >>> try: ... tf.config.experimental.set_memory_growth(physical_devices[0], True) ... assert tf.config.experimental.get_memory_growth(physical_devices[0]) ... except: ... # Invalid device or cannot modify virtual devices once initialized. ... pass Args: device: to query Returns: A boolean indicating the memory growth setting for the . Raises: ValueError: Invalid specified.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\config.py",
    "ast_data": "FunctionDef name:get_memory_growth arg:device arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "kornia",
    "name": "make_grid",
    "source_code": "def make_grid(tensor: Tensor, n_row: Optional[int]=None, padding: int=2) -> Tensor:\n    if not isinstance(tensor, torch.Tensor):\n        raise TypeError('Input tensor must be a PyTorch tensor.')\n    B, C, H, W = tensor.shape\n    if n_row is None:\n        n_row = int(torch.sqrt(torch.tensor(B, dtype=torch.float32)).ceil())\n    n_col = (B + n_row - 1) // n_row\n    padded_H = H + padding\n    padded_W = W + padding\n    combined_H = n_row * padded_H - padding\n    combined_W = n_col * padded_W - padding\n    pad_value = 0\n    combined_image = torch.full((C, combined_H, combined_W), pad_value, dtype=tensor.dtype)\n    for idx in range(B):\n        row = idx // n_col\n        col = idx % n_col\n        top = row * padded_H\n        left = col * padded_W\n        combined_image[:, top:top + H, left:left + W] = tensor[idx]\n    return combined_image",
    "docstring": "Convert a batched tensor to one image with padding in between. Args: tensor: A batched tensor of shape (B, C, H, W). n_row: Number of images displayed in each row of the grid. padding: The amount of padding to add between images. Returns: Tensor: The combined image grid.",
    "type": "function",
    "file_path": "kornia\\kornia\\utils\\image.py",
    "ast_data": "FunctionDef name:make_grid arg:tensor arg:n_row arg:padding arguments arg arg arg If Call Raise Call Assign If Compare Assign Call Call Call Call Assign Assign Assign Assign Assign Assign Assign Call For Call Assign Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "next_monday_or_tuesday",
    "source_code": "def next_monday_or_tuesday(dt: datetime) -> datetime:\n    dow = dt.weekday()\n    if dow in (5, 6):\n        return dt + timedelta(2)\n    if dow == 0:\n        return dt + timedelta(1)\n    return dt",
    "docstring": "For second holiday of two adjacent ones! If holiday falls on Saturday, use following Monday instead; if holiday falls on Sunday or Monday, use following Tuesday instead (because Monday is already taken by adjacent holiday on the day before)",
    "type": "function",
    "file_path": "pandas\\pandas\\tseries\\holiday.py",
    "ast_data": "FunctionDef name:next_monday_or_tuesday arg:dt arguments arg Assign Call If Compare Return return:yes Call If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_options",
    "source_code": "@staticmethod\ndef get_options():\n    return [str(option) for option in list(OpsSet)]",
    "docstring": "Returns a list of OpsSet options as a list of strings.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\convert.py",
    "ast_data": "FunctionDef name:get_options arguments Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "safeseq",
    "source_code": "@register.filter(is_safe=True)\ndef safeseq(value):\n    return [mark_safe(obj) for obj in value]",
    "docstring": "A \"safe\" filter for sequences. Mark each element in the sequence, individually, as safe, after converting them to strings. Return a list with the results.",
    "type": "function",
    "file_path": "django\\django\\template\\defaultfilters.py",
    "ast_data": "FunctionDef name:safeseq arg:value arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "broadcast",
    "source_code": "def broadcast(tensor):\n    _check_device(tensor)\n    with ops.device(tensor.device):\n        return gen_nccl_ops.nccl_broadcast(input=tensor, shape=tensor.shape)",
    "docstring": "Returns a tensor that can be efficiently transferred to other devices. Args: tensor: The tensor to send; must be assigned to a GPU device. Returns: A tensor with the value of , which can be used as input to ops on other GPU devices.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nccl_ops.py",
    "ast_data": "FunctionDef name:broadcast arg:tensor arguments arg Call With Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "in_main_process",
    "source_code": "@tf_export('__internal__.distribute.combinations.in_main_process', v1=[])\ndef in_main_process():\n    return not _running_in_worker",
    "docstring": "Whether it's in the main test process. This is normally used to prepare the test environment which should only happen in the main process. Returns: A boolean.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\combinations.py",
    "ast_data": "FunctionDef name:in_main_process arguments Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, lock_file_path, wait_seconds=0.1, warn_after_seconds=None):\n    self.lock_file_path = lock_file_path\n    self.wait_seconds = wait_seconds\n    self.fd = None\n    self.warn_after_seconds = warn_after_seconds",
    "docstring": "Create a new :class:. Args: lock_file_path: The path to the file used for locking. wait_seconds: The seconds to periodically sleep (spin) when calling ``. warn_after_seconds: The seconds to wait before showing lock file path to warn existing lock file.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\file_baton.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:lock_file_path arg:wait_seconds arg:warn_after_seconds arguments arg arg arg arg Assign Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "_dispatch_custom_op",
    "source_code": "def _dispatch_custom_op(sharding_spec, op: Callable, types, args, kwargs, process_group):\n    class_name = type(sharding_spec).__qualname__\n    if not _has_custom_op(sharding_spec, op):\n        raise RuntimeError(f'Custom op: {op} not registered for {class_name}')\n    func = _CUSTOM_SHARDING_SPEC_OPS[class_name][op]\n    return func(types, args, kwargs, process_group)",
    "docstring": "Calls the custom op for this ShardingSpec if it exists.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharding_spec\\api.py",
    "ast_data": "FunctionDef name:_dispatch_custom_op arg:sharding_spec arg:op arg:types arg:args arg:kwargs arg:process_group arguments arg arg arg arg arg arg Assign Call If Call Raise Call Assign Return return:yes Call"
  },
  {
    "library": "virtualenv",
    "name": "satisfies",
    "source_code": "def satisfies(self, spec, impl_must_match):\n    if spec.path:\n        if self.executable == os.path.abspath(spec.path):\n            return True\n        if not spec.is_abs:\n            basename = os.path.basename(self.original_executable)\n            spec_path = spec.path\n            if sys.platform == 'win32':\n                basename, suffix = os.path.splitext(basename)\n                if spec_path.endswith(suffix):\n                    spec_path = spec_path[:-len(suffix)]\n            if basename != spec_path:\n                return False\n    if impl_must_match and spec.implementation is not None and (spec.implementation.lower() != self.implementation.lower()):\n        return False\n    if spec.architecture is not None and spec.architecture != self.architecture:\n        return False\n    if spec.free_threaded is not None and spec.free_threaded != self.free_threaded:\n        return False\n    for our, req in zip(self.version_info[0:3], (spec.major, spec.minor, spec.micro)):\n        if req is not None and our is not None and (our != req):\n            return False\n    return True",
    "docstring": "Check if a given specification can be satisfied by the this python interpreter instance.",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\discovery\\py_info.py",
    "ast_data": "FunctionDef name:satisfies arg:self arg:spec arg:impl_must_match arguments arg arg arg If If Compare Call Return return:yes If Assign Call Assign If Compare Assign Call If Call Assign Call If Compare Return return:yes If BoolOp Compare Compare Call Call Return return:yes If BoolOp Compare Compare Return return:yes If BoolOp Compare Compare Return return:yes For Call If BoolOp Compare Compare Compare Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "random_unstructured",
    "source_code": "def random_unstructured(module, name, amount):\n    RandomUnstructured.apply(module, name, amount)\n    return module",
    "docstring": "Prune tensor by removing random (currently unpruned) units. Prunes tensor corresponding to parameter called ``, it represents the absolute number of parameters to prune. Returns: module (nn.Module): modified (i.e. pruned) version of the input module Examples: >>> # xdoctest: +SKIP >>> m = prune.random_unstructured(nn.Linear(2, 3), 'weight', amount=1) >>> torch.sum(m.weight_mask == 0) tensor(1)",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\utils\\prune.py",
    "ast_data": "FunctionDef name:random_unstructured arg:module arg:name arg:amount arguments arg arg arg Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "from_row_lengths",
    "source_code": "@classmethod\n@dispatch.add_dispatch_support\ndef from_row_lengths(cls, values, row_lengths, name=None, validate=True):\n    if not isinstance(validate, bool):\n        raise TypeError(f'Argument `validate` must have type bool. Received {validate}.')\n    with ops.name_scope(name, 'RaggedFromRowLengths', [values, row_lengths]):\n        row_partition = RowPartition.from_row_lengths(row_lengths=row_lengths, validate=validate, dtype_hint=_get_optional_partition_dtype(values))\n        return cls._from_row_partition(values, row_partition, validate=validate)",
    "docstring": "Creates a with rows partitioned by . The returned corresponds with the python list defined by: Args: values: A potentially ragged tensor with shape . row_lengths: A 1-D integer tensor with shape . Must be nonnegative. must be . name: A name prefix for the RaggedTensor (optional). validate: If true, then use assertions to check that the arguments form a valid . Note: these assertions incur a runtime cost, since they must be checked for each tensor value. Returns: A . . . #### Example: >>> print(tf.RaggedTensor.from_row_lengths( ... values=[3, 1, 4, 1, 5, 9, 2, 6], ... row_lengths=[4, 0, 3, 1, 0]))",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:from_row_lengths arg:cls arg:values arg:row_lengths arg:name arg:validate arguments arg arg arg arg arg If Call Raise Call With Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "fetch_layout",
    "source_code": "@tf_export('experimental.dtensor.fetch_layout', v1=[])\ndef fetch_layout(tensor: tensor_lib.Tensor) -> layout_lib.Layout:\n    return _dtensor_device().fetch_layout(tensor)",
    "docstring": "Fetches the layout of a DTensor. Args: tensor: The DTensor whose layout is to be fetched. Returns: The of this DTensor. Raises: RuntimeError: When not called eagerly.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\api.py",
    "ast_data": "FunctionDef name:fetch_layout arg:tensor arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "process_doc",
    "source_code": "def process_doc(self, app: Sphinx, doctree: nodes.document) -> None:\n    titlenode = nodes.title()\n    longtitlenode = titlenode\n    if 'title' in doctree:\n        longtitlenode = nodes.title()\n        longtitlenode += nodes.Text(doctree['title'])\n    for node in doctree.findall(nodes.section):\n        visitor = SphinxContentsFilter(doctree)\n        node[0].walkabout(visitor)\n        titlenode += visitor.get_entry_text()\n        break\n    else:\n        titlenode += nodes.Text(doctree.get('title', '<no title>'))\n    app.env.titles[app.env.docname] = titlenode\n    app.env.longtitles[app.env.docname] = longtitlenode",
    "docstring": "Add a title node to the document (just copy the first section title), and store that title in the environment.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\environment\\collectors\\title.py",
    "ast_data": "FunctionDef name:process_doc arg:self arg:app arg:doctree arguments arg arg arg Assign Call Assign If Compare Assign Call Call For Call Assign Call Call Call Call Call Assign Assign"
  },
  {
    "library": "django",
    "name": "disable_constraint_checking",
    "source_code": "def disable_constraint_checking(self):\n    return False",
    "docstring": "Backends can implement as needed to temporarily disable foreign key constraint checking. Should return True if the constraints were disabled and will need to be reenabled.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\base.py",
    "ast_data": "FunctionDef name:disable_constraint_checking arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ObjNotFoundError",
    "source_code": "class ObjNotFoundError(Exception):\n    pass",
    "docstring": "Raised when an importer cannot find an object by searching for its name.",
    "type": "class",
    "file_path": "pytorch\\torch\\package\\importer.py",
    "ast_data": "ClassDef name:ObjNotFoundError"
  },
  {
    "library": "tensorflow",
    "name": "SplitBasedOnSize",
    "source_code": "class SplitBasedOnSize(split.ComposableSplitter):\n    __slots__ = ('fn', 'proto_size')\n\n    def __init__(self, proto, proto_size, **kwargs):\n        self.proto_size = proto_size\n        super().__init__(proto, **kwargs)\n\n    def build_chunks(self) -> int:\n        return 0",
    "docstring": "A Splitter that's based on the size of the input proto.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\tools\\proto_splitter\\split_graph_def.py",
    "ast_data": "ClassDef name:SplitBasedOnSize Assign FunctionDef name:__init__ arg:self arg:proto arg:proto_size arguments arg arg arg arg Assign Call Call FunctionDef name:build_chunks arg:self arguments arg Return return:yes"
  },
  {
    "library": "authlib",
    "name": "validate_sub",
    "source_code": "def validate_sub(self):\n    self._validate_claim_value('sub')",
    "docstring": "The \"sub\" (subject) claim identifies the principal that is the subject of the JWT. The claims in a JWT are normally statements about the subject. The subject value MUST either be scoped to be locally unique in the context of the issuer or be globally unique. The processing of this claim is generally application specific. The \"sub\" value is a case-sensitive string containing a StringOrURI value. Use of this claim is OPTIONAL.",
    "type": "method",
    "file_path": "authlib\\authlib\\jose\\rfc7519\\claims.py",
    "ast_data": "FunctionDef name:validate_sub arg:self arguments arg Call"
  },
  {
    "library": "pandas",
    "name": "use",
    "source_code": "@contextmanager\ndef use(self, key, value) -> Generator[_Options]:\n    old_value = self[key]\n    try:\n        self[key] = value\n        yield self\n    finally:\n        self[key] = old_value",
    "docstring": "Temporarily set a parameter value using the with statement. Aliasing allowed.",
    "type": "method",
    "file_path": "pandas\\pandas\\plotting\\_misc.py",
    "ast_data": "FunctionDef name:use arg:self arg:key arg:value arguments arg arg arg Assign Try Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "get_compatibility_log",
    "source_code": "def get_compatibility_log(self):\n    if not self._verified:\n        raise RuntimeError(\"target compatibility isn't verified yet\")\n    return self._log_messages",
    "docstring": "Returns list of compatibility log messages. WARNING: This method should only be used for unit tests. Returns: The list of log messages by the recent compatibility check. Raises: RuntimeError: when the compatibility was NOT checked.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\authoring\\authoring.py",
    "ast_data": "FunctionDef name:get_compatibility_log arg:self arguments arg If Raise Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "right",
    "source_code": "@property\ndef right(self) -> Index:\n    from pandas import Index\n    return Index(self._right, copy=False)",
    "docstring": "Return the right endpoints of each Interval in the IntervalArray as an Index. This property extracts the right endpoints from each interval contained within the IntervalArray. This can be helpful in use cases where you need to work with or compare only the upper bounds of intervals, such as when performing range-based filtering, determining interval overlaps, or visualizing the end boundaries of data segments. See Also -------- arrays.IntervalArray.left : Return the left endpoints of each Interval in the IntervalArray as an Index. arrays.IntervalArray.mid : Return the midpoint of each Interval in the IntervalArray as an Index. arrays.IntervalArray.contains : Check elementwise if the Intervals contain the value. Examples -------- >>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(2, 5)]) >>> interv_arr [(0, 1], (2, 5]] Length: 2, dtype: interval[int64, right] >>> interv_arr.right Index([1, 5], dtype='int64')",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\interval.py",
    "ast_data": "FunctionDef name:right arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "debug_unwrap",
    "source_code": "@exposed_in('torch.func')\ndef debug_unwrap(tensor: torch.Tensor, *, recurse=True) -> torch.Tensor:\n    if not is_functorch_wrapped_tensor(tensor):\n        return tensor\n    result = get_unwrapped(tensor)\n    if recurse:\n        return debug_unwrap(result)\n    return result",
    "docstring": "Unwraps a functorch tensor (e.g. BatchedTensor, GradTrackingTensor) to its underlying tensor. This function should only be used in a debug setting (e.g. trying to print the value of a Tensor in a debugger). Otherwise, using the result of function inside of a function being transformed will lead to undefined behavior.",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\eager_transforms.py",
    "ast_data": "FunctionDef name:debug_unwrap arg:tensor arguments arg arg If Call Return return:yes Assign Call If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "as_dict",
    "source_code": "def as_dict(self):\n    ret = {}\n    for job in self.jobs:\n        task_indices = self.task_indices(job)\n        if len(task_indices) == 0:\n            ret[job] = {}\n            continue\n        if max(task_indices) + 1 == len(task_indices):\n            ret[job] = self.job_tasks(job)\n        else:\n            ret[job] = {i: self.task_address(job, i) for i in task_indices}\n    return ret",
    "docstring": "Returns a dictionary from job names to their tasks. For each job, if the task index space is dense, the corresponding value will be a list of network addresses; otherwise it will be a dictionary mapping (sparse) task indices to the corresponding addresses. Returns: A dictionary mapping job names to lists or dictionaries describing the tasks in those jobs.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\server_lib.py",
    "ast_data": "FunctionDef name:as_dict arg:self arguments arg Assign For Assign Call If Compare Call Assign If Compare Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_single_spectrum_helper",
    "source_code": "def _single_spectrum_helper(mode, x, Fs=None, window=None, pad_to=None, sides=None):\n    _api.check_in_list(['complex', 'magnitude', 'angle', 'phase'], mode=mode)\n    if pad_to is None:\n        pad_to = len(x)\n    spec, freqs, _ = _spectral_helper(x=x, y=None, NFFT=len(x), Fs=Fs, detrend_func=detrend_none, window=window, noverlap=0, pad_to=pad_to, sides=sides, scale_by_freq=False, mode=mode)\n    if mode != 'complex':\n        spec = spec.real\n    if spec.ndim == 2 and spec.shape[1] == 1:\n        spec = spec[:, 0]\n    return (spec, freqs)",
    "docstring": "Private helper implementing the commonality between the complex, magnitude, angle, and phase spectrums.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\mlab.py",
    "ast_data": "FunctionDef name:_single_spectrum_helper arg:mode arg:x arg:Fs arg:window arg:pad_to arg:sides arguments arg arg arg arg arg arg Call If Compare Assign Call Assign Call Call If Compare Assign If BoolOp Compare Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "rich_text_lines_from_rich_line_list",
    "source_code": "def rich_text_lines_from_rich_line_list(rich_text_list, annotations=None):\n    lines = []\n    font_attr_segs = {}\n    for i, rl in enumerate(rich_text_list):\n        if isinstance(rl, RichLine):\n            lines.append(rl.text)\n            if rl.font_attr_segs:\n                font_attr_segs[i] = rl.font_attr_segs\n        else:\n            lines.append(rl)\n    return RichTextLines(lines, font_attr_segs, annotations=annotations)",
    "docstring": "Convert a list of RichLine objects or strings to a RichTextLines object. Args: rich_text_list: a list of RichLine objects or strings annotations: annotations for the resultant RichTextLines object. Returns: A corresponding RichTextLines object.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py",
    "ast_data": "FunctionDef name:rich_text_lines_from_rich_line_list arg:rich_text_list arg:annotations arguments arg arg Assign Assign For Call If Call Call If Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_find_library",
    "source_code": "def _find_library(base_paths, library_name, required_version):\n    if _is_windows():\n        filepattern = library_name + '.lib'\n    elif _is_macos():\n        filepattern = '%s*.dylib' % '.'.join(['lib' + library_name] + required_version.split('.')[:1])\n    else:\n        filepattern = '.'.join(['lib' + library_name, 'so'] + required_version.split('.')[:1]) + '*'\n    return _find_file(base_paths, _library_paths(), filepattern)",
    "docstring": "Returns first valid path to the requested library.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\third_party\\gpus\\find_cuda_config.py",
    "ast_data": "FunctionDef name:_find_library arg:base_paths arg:library_name arg:required_version arguments arg arg arg If Call Assign If Call Assign Call Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "SynchronizationError",
    "source_code": "class SynchronizationError(Exception):\n    pass",
    "docstring": "Base class for errors detected by CUDA Sanitizer.",
    "type": "class",
    "file_path": "pytorch\\torch\\cuda\\_sanitizer.py",
    "ast_data": "ClassDef name:SynchronizationError"
  },
  {
    "library": "pandas",
    "name": "_from_derivatives",
    "source_code": "def _from_derivatives(xi: np.ndarray, yi: np.ndarray, x: np.ndarray, order=None, der: int | list[int] | None=0, extrapolate: bool=False):\n    from scipy import interpolate\n    method = interpolate.BPoly.from_derivatives\n    m = method(xi, yi.reshape(-1, 1), orders=order, extrapolate=extrapolate)\n    return m(x)",
    "docstring": "Convenience function for interpolate.BPoly.from_derivatives. Construct a piecewise polynomial in the Bernstein basis, compatible with the specified values and derivatives at breakpoints. Parameters ---------- xi : array-like sorted 1D array of x-coordinates yi : array-like or list of array-likes yi[i][j] is the j-th derivative known at xi[i] order: None or int or array-like of ints. Default: None. Specifies the degree of local polynomials. If not None, some derivatives are ignored. der : int or list How many derivatives to extract; None for all potentially nonzero derivatives (that is a number equal to the number of points), or a list of derivatives to extract. This number includes the function value as 0th derivative. extrapolate : bool, optional Whether to extrapolate to ouf-of-bounds points based on first and last intervals, or to return NaNs. Default: True. See Also -------- scipy.interpolate.BPoly.from_derivatives Returns ------- y : scalar or array-like The result, of length R or length M or M by R.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\missing.py",
    "ast_data": "FunctionDef name:_from_derivatives arg:xi arg:yi arg:x arg:order arg:der arg:extrapolate arguments arg arg arg arg arg arg Assign Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_configure_dataset_and_inferred_steps",
    "source_code": "def _configure_dataset_and_inferred_steps(self, strategy, x, steps_per_epoch, class_weight, distribute):\n    del x\n    dataset = self._adapter.get_dataset()\n    if class_weight:\n        dataset = dataset.map(_make_class_weight_map_fn(class_weight))\n    self._inferred_steps = self._infer_steps(steps_per_epoch, dataset)\n    if distribute and (not _is_distributed_dataset(dataset)):\n        dataset = strategy.experimental_distribute_dataset(dataset)\n    self._dataset = dataset\n    self._validate_data_handler()",
    "docstring": "Configure the and attributes.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\data_adapter.py",
    "ast_data": "FunctionDef name:_configure_dataset_and_inferred_steps arg:self arg:strategy arg:x arg:steps_per_epoch arg:class_weight arg:distribute arguments arg arg arg arg arg arg Assign Call If Assign Call Call Assign Call If BoolOp Call Assign Call Assign Call"
  },
  {
    "library": "sphinx",
    "name": "_visit_sig_parameter_list",
    "source_code": "def _visit_sig_parameter_list(self, node: Element, parameter_group: type[Element], sig_open_paren: str, sig_close_paren: str) -> None:\n    self.body.append(f'<span class=\"sig-paren\">{sig_open_paren}</span>')\n    self.is_first_param = True\n    self.optional_param_level = 0\n    self.params_left_at_level = 0\n    self.param_group_index = 0\n    self.list_is_required_param = [isinstance(c, parameter_group) for c in node.children]\n    self.required_params_left = sum(self.list_is_required_param)\n    self.param_separator = node.child_text_separator\n    self.multi_line_parameter_list = node.get('multi_line_parameter_list', False)\n    self.trailing_comma = node.get('multi_line_trailing_comma', False)\n    if self.multi_line_parameter_list:\n        self.body.append('\\n\\n')\n        self.body.append(self.starttag(node, 'dl'))\n        self.param_separator = self.param_separator.rstrip()\n    self.context.append(sig_close_paren)",
    "docstring": "Visit a signature parameters or type parameters list. The *parameter_group* value is the type of child nodes acting as required parameters or as a set of contiguous optional parameters.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\writers\\html5.py",
    "ast_data": "FunctionDef name:_visit_sig_parameter_list arg:self arg:node arg:parameter_group arg:sig_open_paren arg:sig_close_paren arguments arg arg arg arg arg Call Assign Assign Assign Assign Assign Call Assign Call Assign Assign Call Assign Call If Call Call Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "loop_body",
    "source_code": "def loop_body(optional_data, state):\n    state = reduce_fn(state, optional_data.get_value())\n    optional_data = iterator.get_next_as_optional()\n    return (optional_data, state)",
    "docstring": "Executes in a loop till the dataset is empty.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py",
    "ast_data": "FunctionDef name:loop_body arg:optional_data arg:state arguments arg arg Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "bucketize",
    "source_code": "def bucketize(self, values: T, boundaries: tuple[str, sympy.Expr, sympy.Expr, sympy.Expr], boundary_indices: T, indexing_dtype: torch.dtype, right: bool, sorter: Optional[tuple[str, sympy.Expr]]=None, sorter_indices: Optional[T]=None) -> T:\n    val = self.parent_handler.bucketize(values, boundaries, boundary_indices, indexing_dtype, right, sorter, sorter_indices)\n    if val not in self.var_names:\n        self._used_ops.add('bucketize')\n        self._read_names.append(boundaries[0])\n        if sorter is not None:\n            self._read_names.append(sorter[0])\n    return self._update_count(val)",
    "docstring": "See [Note: Inductor bucketize op]",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ops_handler.py",
    "ast_data": "FunctionDef name:bucketize arg:self arg:values arg:boundaries arg:boundary_indices arg:indexing_dtype arg:right arg:sorter arg:sorter_indices arguments arg arg arg arg arg arg arg arg Assign Call If Compare Call Call If Compare Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "sanitize_masked_array",
    "source_code": "def sanitize_masked_array(data: ma.MaskedArray) -> np.ndarray:\n    mask = ma.getmaskarray(data)\n    if mask.any():\n        dtype, fill_value = maybe_promote(data.dtype, np.nan)\n        dtype = cast(np.dtype, dtype)\n        data = ma.asarray(data.astype(dtype, copy=True))\n        data.soften_mask()\n        data[mask] = fill_value\n    else:\n        data = data.copy()\n    return data",
    "docstring": "Convert numpy MaskedArray to ensure mask is softened.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\construction.py",
    "ast_data": "FunctionDef name:sanitize_masked_array arg:data arguments arg Assign Call If Call Assign Call Assign Call Assign Call Call Call Assign Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "collection_2d_to_3d",
    "source_code": "def collection_2d_to_3d(col, zs=0, zdir='z', axlim_clip=False):\n    zs = np.broadcast_to(zs, len(col.get_paths()))\n    col._3dverts_codes = [(np.column_stack(juggle_axes(*np.column_stack([p.vertices, np.broadcast_to(z, len(p.vertices))]).T, zdir)), p.codes) for p, z in zip(col.get_paths(), zs)]\n    col.__class__ = cbook._make_class_factory(Collection3D, '{}3D')(type(col))\n    col._axlim_clip = axlim_clip",
    "docstring": "Convert a to a object.",
    "type": "function",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py",
    "ast_data": "FunctionDef name:collection_2d_to_3d arg:col arg:zs arg:zdir arg:axlim_clip arguments arg arg arg arg Assign Call Call Call Assign Call Call Call Call Call Call Call Assign Call Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_is_device_list_single_worker",
    "source_code": "def _is_device_list_single_worker(devices):\n    specs = []\n    for d in devices:\n        name = d.name if isinstance(d, context.LogicalDevice) else d\n        specs.append(tf_device.DeviceSpec.from_string(name))\n    num_workers = len({(d.job, d.task, d.replica) for d in specs})\n    all_local = all((d.job in (None, 'localhost') for d in specs))\n    any_local = any((d.job in (None, 'localhost') for d in specs))\n    if any_local and (not all_local):\n        raise ValueError(\"Local device should have only 'localhost' in the job field in device string. E.g. 'job:localhost' in /job:localhost/replica:0/task:0/device:CPU:0Devices cannot have mixed list of device strings containing both localhost and other job types such as worker, ps etc. \")\n    if num_workers == 1 and (not all_local):\n        if any((d.task is None for d in specs)):\n            raise ValueError(\"Remote device string must have task specified.E.g. 'task:0' in /job:worker/replica:0/task:0/device:CPU:0\")\n    return num_workers == 1",
    "docstring": "Checks whether the devices list is for single or multi-worker. Args: devices: a list of device strings or tf.config.LogicalDevice objects, for either local or for remote devices. Returns: a boolean indicating whether these device strings are for local or for remote. Raises: ValueError: if device strings are not consistent.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\mirrored_strategy.py",
    "ast_data": "FunctionDef name:_is_device_list_single_worker arg:devices arguments arg Assign For Assign Call Call Call Assign Call Assign Call Compare Assign Call Compare If BoolOp Raise Call If BoolOp Compare If Call Compare Raise Call Return return:yes Compare"
  },
  {
    "library": "pytorch",
    "name": "tree_iter",
    "source_code": "def tree_iter(tree: PyTree, is_leaf: Optional[Callable[[PyTree], bool]]=None) -> Iterable[Any]:\n    if tree_is_leaf(tree, is_leaf=is_leaf):\n        yield tree\n    else:\n        node_type = _get_node_type(tree)\n        flatten_fn = SUPPORTED_NODES[node_type].flatten_fn\n        child_pytrees, _ = flatten_fn(tree)\n        for child in child_pytrees:\n            yield from tree_iter(child, is_leaf=is_leaf)",
    "docstring": "Get an iterator over the leaves of a pytree.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_pytree.py",
    "ast_data": "FunctionDef name:tree_iter arg:tree arg:is_leaf arguments arg arg If Call Assign Call Assign Assign Call For Call"
  },
  {
    "library": "pytorch",
    "name": "purge_old_log_files",
    "source_code": "def purge_old_log_files() -> None:\n    for name, table in REGISTERED_METRIC_TABLES.items():\n        if name in enabled_metric_tables():\n            filename = table.output_filename()\n            if os.path.exists(filename):\n                os.unlink(filename)\n            table.write_header()",
    "docstring": "Purge the old log file at the beginning when the benchmark script runs. Should do it in the parent process rather than the child processes running each individual model.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\metrics.py",
    "ast_data": "FunctionDef name:purge_old_log_files arguments For Call If Compare Call Assign Call If Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "MatchState",
    "source_code": "class MatchState(Enum):\n    FULLY_MATCHED = auto()\n    COLLECTIVE_TYPE_MISMATCH = auto()\n    SIZE_OR_SYNTAX_MISMATCH = auto()\n    COLLECTIVE_STATE_MISMATCH = auto()\n    COLLECTIVE_DTYPE_MISMATCH = auto()\n    UNDECIDED = auto()",
    "docstring": "Enum representing the possible states of matching for collective operations. - FULLY_MATCHED: Indicates that all aspects of the collective operations match. - COLLECTIVE_TYPE_MISMATCH: The types of the collective operations differ. - SIZE_OR_SYNTAX_MISMATCH: There is a mismatch in input/output sizes or violation of collective syntax. - COLLECTIVE_STATE_MISMATCH: The states of the collective not same, such as one finished while another just started or scheduled. - COLLECTIVE_DTYPE_MISMATCH: The data types of the collective input/output differ. - UNDECIDED: The match status is ambiguous or cannot be determined, e.g., we might need to check all ranks for alltoall_base.",
    "type": "class",
    "file_path": "pytorch\\tools\\flight_recorder\\components\\types.py",
    "ast_data": "ClassDef name:MatchState Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call"
  },
  {
    "library": "kornia",
    "name": "__init__",
    "source_code": "def __init__(self, distortion: CameraDistortionType, projection: CameraProjectionType, image_size: ImageSize, params: Tensor) -> None:\n    self.distortion = distortion\n    self.projection = projection\n    self._image_size = image_size\n    self._height = image_size.height\n    self._width = image_size.width\n    self._params = params",
    "docstring": "Construct CameraModelBase class. Args: distortion: Distortion type projection: Projection type image_size: Image size params: Camera parameters of shape :math: for PINHOLE Camera, :math: for Brown Conrady, :math: for Kannala Brandt K3.",
    "type": "method",
    "file_path": "kornia\\kornia\\sensors\\camera\\camera_model.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:distortion arg:projection arg:image_size arg:params arguments arg arg arg arg arg Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "numpy",
    "name": "_optimal_path",
    "source_code": "def _optimal_path(input_sets, output_set, idx_dict, memory_limit):\n    full_results = [(0, [], input_sets)]\n    for iteration in range(len(input_sets) - 1):\n        iter_results = []\n        for curr in full_results:\n            cost, positions, remaining = curr\n            for con in itertools.combinations(range(len(input_sets) - iteration), 2):\n                cont = _find_contraction(con, remaining, output_set)\n                new_result, new_input_sets, idx_removed, idx_contract = cont\n                new_size = _compute_size_by_dict(new_result, idx_dict)\n                if new_size > memory_limit:\n                    continue\n                total_cost = cost + _flop_count(idx_contract, idx_removed, len(con), idx_dict)\n                new_pos = positions + [con]\n                iter_results.append((total_cost, new_pos, new_input_sets))\n        if iter_results:\n            full_results = iter_results\n        else:\n            path = min(full_results, key=lambda x: x[0])[1]\n            path += [tuple(range(len(input_sets) - iteration))]\n            return path\n    if len(full_results) == 0:\n        return [tuple(range(len(input_sets)))]\n    path = min(full_results, key=lambda x: x[0])[1]\n    return path",
    "docstring": "Computes all possible pair contractions, sieves the results based on ``. Parameters ---------- input_sets : list List of sets that represent the lhs side of the einsum subscript output_set : set Set that represents the rhs side of the overall einsum subscript idx_dict : dictionary Dictionary of index sizes memory_limit : int The maximum number of elements in a temporary array Returns ------- path : list The optimal contraction order within the memory limit constraint. Examples -------- >>> isets = [set('abd'), set('ac'), set('bdc')] >>> oset = set() >>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4} >>> _optimal_path(isets, oset, idx_sizes, 5000) [(0, 2), (0, 1)]",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\einsumfunc.py",
    "ast_data": "FunctionDef name:_optimal_path arg:input_sets arg:output_set arg:idx_dict arg:memory_limit arguments arg arg arg arg Assign For Call Call Assign For Assign For Call Call Call Assign Call Assign Assign Call If Compare Assign Call Call Assign Call If Assign Assign Call arguments arg Call Call Call Return return:yes If Compare Call Return return:yes Call Call Call Assign Call arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_replace_regex",
    "source_code": "@final\ndef _replace_regex(self, to_replace, value, inplace: bool=False, mask=None) -> list[Block]:\n    if not is_re(to_replace) and (not self._can_hold_element(to_replace)):\n        return [self.copy(deep=False)]\n    if is_re(to_replace) and self.dtype not in [object, 'string']:\n        return [self.copy(deep=False)]\n    if not (self._can_hold_element(value) or (self.dtype == 'string' and is_re(value))):\n        block = self.astype(np.dtype(object))\n    else:\n        block = self._maybe_copy(inplace)\n    rx = re.compile(to_replace)\n    replace_regex(block.values, rx, value, mask)\n    return [block]",
    "docstring": "Replace elements by the given value. Parameters ---------- to_replace : object or pattern Scalar to replace or regular expression to match. value : object Replacement object. inplace : bool, default False Perform inplace modification. mask : array-like of bool, optional True indicate corresponding element is ignored. Returns ------- List[Block]",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\blocks.py",
    "ast_data": "FunctionDef name:_replace_regex arg:self arg:to_replace arg:value arg:inplace arg:mask arguments arg arg arg arg arg If BoolOp Call Call Return return:yes Call If BoolOp Call Compare Return return:yes Call If BoolOp Call BoolOp Compare Call Assign Call Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "graph",
    "source_code": "@property\ndef graph(self):\n    return self._c._get_method('forward').graph",
    "docstring": "Return a string representation of the internal graph for the `interpreting-graphs` for details.",
    "type": "method",
    "file_path": "pytorch\\torch\\jit\\_script.py",
    "ast_data": "FunctionDef name:graph arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "add_env_collector",
    "source_code": "def add_env_collector(self, collector: type[EnvironmentCollector]) -> None:\n    logger.debug('[app] adding environment collector: %r', collector)\n    collector().enable(self)",
    "docstring": "Register an environment collector class. Refer to :ref:. .. versionadded:: 1.6",
    "type": "method",
    "file_path": "sphinx\\sphinx\\application.py",
    "ast_data": "FunctionDef name:add_env_collector arg:self arg:collector arguments arg arg Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "validate_string_arg",
    "source_code": "def validate_string_arg(input_data, allowable_strings, layer_name, arg_name, allow_none=False, allow_callables=False):\n    if allow_none and input_data is None:\n        return\n    elif allow_callables and callable(input_data):\n        return\n    elif isinstance(input_data, str) and input_data in allowable_strings:\n        return\n    else:\n        allowed_args = '`None`, ' if allow_none else ''\n        allowed_args += 'a `Callable`, ' if allow_callables else ''\n        allowed_args += 'or one of the following values: %s' % (allowable_strings,)\n        raise ValueError('The %s argument of layer %s received an invalid value %s. Allowed values are: %s.' % (arg_name, layer_name, input_data, allowed_args))",
    "docstring": "Validates the correctness of a string-based arg.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\layer_utils.py",
    "ast_data": "FunctionDef name:validate_string_arg arg:input_data arg:allowable_strings arg:layer_name arg:arg_name arg:allow_none arg:allow_callables arguments arg arg arg arg arg arg If BoolOp Compare Return return:no If BoolOp Call Return return:no If BoolOp Call Compare Return return:no Assign Raise Call"
  },
  {
    "library": "kornia",
    "name": "rgb_to_hsv",
    "source_code": "def rgb_to_hsv(image: torch.Tensor, eps: float=1e-08) -> torch.Tensor:\n    if not isinstance(image, torch.Tensor):\n        raise TypeError(f'Input type is not a torch.Tensor. Got {type(image)}')\n    if len(image.shape) < 3 or image.shape[-3] != 3:\n        raise ValueError(f'Input size must have a shape of (*, 3, H, W). Got {image.shape}')\n    max_rgb, argmax_rgb = image.max(-3)\n    min_rgb, argmin_rgb = image.min(-3)\n    deltac = max_rgb - min_rgb\n    v = max_rgb\n    s = deltac / (max_rgb + eps)\n    deltac = torch.where(deltac == 0, torch.ones_like(deltac), deltac)\n    rc, gc, bc = torch.unbind(max_rgb.unsqueeze(-3) - image, dim=-3)\n    h1 = bc - gc\n    h2 = rc - bc + 2.0 * deltac\n    h3 = gc - rc + 4.0 * deltac\n    h = torch.stack((h1, h2, h3), dim=-3) / deltac.unsqueeze(-3)\n    h = torch.gather(h, dim=-3, index=argmax_rgb.unsqueeze(-3)).squeeze(-3)\n    h = h / 6.0 % 1.0\n    h = 2.0 * math.pi * h\n    return torch.stack((h, s, v), dim=-3)",
    "docstring": "Convert an image from RGB to HSV. .. image:: _static/img/rgb_to_hsv.png The image data is assumed to be in the range of (0, 1). Args: image: RGB Image to be converted to HSV with shape of :math:. eps: scalar to enforce numarical stability. Returns: HSV version of the image with shape of :math:. The H channel values are in the range 0..2pi. S and V are in the range 0..1. .. note:: See a working example __. Example: >>> input = torch.rand(2, 3, 4, 5) >>> output = rgb_to_hsv(input) # 2x3x4x5",
    "type": "function",
    "file_path": "kornia\\kornia\\color\\hsv.py",
    "ast_data": "FunctionDef name:rgb_to_hsv arg:image arg:eps arguments arg arg If Call Raise Call Call If BoolOp Compare Call Compare Raise Call Assign Call Assign Call Assign Assign Assign Assign Call Compare Call Assign Call Call Assign Assign Assign Assign Call Call Assign Call Call Call Assign Assign Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "internal_values",
    "source_code": "def internal_values(self):\n    return self._block.values",
    "docstring": "The array that Series._values returns",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:internal_values arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "scatter_add",
    "source_code": "def scatter_add(self, sparse_delta, use_locking=False, name=None):\n    raise NotImplementedError",
    "docstring": "Adds to this variable. Args: sparse_delta: to be added to this variable. use_locking: If , use locking during the operation. name: the name of the operation. Returns: The updated variable. Raises: TypeError: if is not an .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:scatter_add arg:self arg:sparse_delta arg:use_locking arg:name arguments arg arg arg arg Raise"
  },
  {
    "library": "pandas",
    "name": "_ensure_iterable_column_indexer",
    "source_code": "def _ensure_iterable_column_indexer(self, column_indexer):\n    ilocs: Sequence[int | np.integer] | np.ndarray | range\n    if is_integer(column_indexer):\n        ilocs = [column_indexer]\n    elif isinstance(column_indexer, slice):\n        ilocs = range(len(self.obj.columns))[column_indexer]\n    elif isinstance(column_indexer, np.ndarray) and column_indexer.dtype.kind == 'b':\n        ilocs = np.arange(len(column_indexer))[column_indexer]\n    else:\n        ilocs = column_indexer\n    return ilocs",
    "docstring": "Ensure that our column indexer is something that can be iterated over.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexing.py",
    "ast_data": "FunctionDef name:_ensure_iterable_column_indexer arg:self arg:column_indexer arguments arg arg If Call Assign If Call Assign Call Call If BoolOp Call Compare Assign Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_populate_unitwise_quantization_specs",
    "source_code": "def _populate_unitwise_quantization_specs(quantization_options: _QuantizationOptions) -> None:\n    if not quantization_options.unit_wise_quantization_specs:\n        return\n    sorted_top_level_component_specs = sorted(quantization_options.quantization_method.quantization_component_specs, key=lambda x: x.quantization_component)\n    for unitwise_spec in quantization_options.unit_wise_quantization_specs:\n        if not unitwise_spec.unit:\n            raise ValueError('UnitWiseQuantizationSpec must contain at least one unit.')\n        for unit in unitwise_spec.unit:\n            if not unit.op_type and (not unit.node_name):\n                raise ValueError('Either `op_type` or `node_name` must be specified.')\n        _populate_quantization_component_spec(unitwise_spec.quantization_method)\n        component_specs = unitwise_spec.quantization_method.quantization_component_specs\n        if component_specs and sorted_top_level_component_specs != sorted(component_specs, key=lambda x: x.quantization_component):\n            raise ValueError('Currently unit-wise quantization spec only supports NO_QUANTIZE and same quantization method as the top-level `quantization_method`')",
    "docstring": "Verifies and pupulates unitwise quantization specs.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\quantize_model.py",
    "ast_data": "FunctionDef name:_populate_unitwise_quantization_specs arg:quantization_options arguments arg If Return return:no Assign Call arguments arg For If Raise Call For If BoolOp Raise Call Call Assign If BoolOp Compare Call arguments arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "cluster_spec",
    "source_code": "def cluster_spec(self):\n    merged_cluster = {}\n    for cluster_resolver in self._cluster_resolvers:\n        cluster_spec = cluster_resolver.cluster_spec()\n        cluster_dict = cluster_spec.as_dict()\n        for job_name, tasks in cluster_dict.items():\n            if job_name in merged_cluster:\n                if isinstance(tasks, dict):\n                    merged_cluster[job_name] = {}\n            elif isinstance(tasks, list):\n                merged_cluster[job_name] = []\n            else:\n                merged_cluster[job_name] = {}\n    for cluster_resolver in self._cluster_resolvers:\n        cluster_spec = cluster_resolver.cluster_spec()\n        cluster_dict = cluster_spec.as_dict()\n        for job_name, tasks in cluster_dict.items():\n            if isinstance(merged_cluster[job_name], list):\n                merged_cluster[job_name].extend(tasks)\n            else:\n                if isinstance(tasks, list):\n                    task_dict = dict(zip(range(0, len(tasks)), tasks))\n                else:\n                    task_dict = tasks.copy()\n                task_keys = set(task_dict)\n                merged_keys = set(merged_cluster[job_name].keys())\n                intersected_keys = task_keys.intersection(merged_keys)\n                if intersected_keys:\n                    raise KeyError('Duplicate keys detected when merging two ClusterSpecs: %s' % repr(intersected_keys))\n                merged_cluster[job_name].update(task_dict)\n    return ClusterSpec(merged_cluster)",
    "docstring": "Returns a union of all the ClusterSpecs from the ClusterResolvers. Returns: A ClusterSpec containing host information merged from all the underlying ClusterResolvers. Raises: KeyError: If there are conflicting keys detected when merging two or more dictionaries, this exception is raised. Note: If there are multiple ClusterResolvers exposing ClusterSpecs with the same job name, we will merge the list/dict of workers. If *all* underlying ClusterSpecs expose the set of workers as lists, we will concatenate the lists of workers, starting with the list of workers from the first ClusterResolver passed into the constructor. If *any* of the ClusterSpecs expose the set of workers as a dict, we will treat all the sets of workers as dicts (even if they are returned as lists) and will only merge them into a dict if there is no conflicting keys. If there is a conflicting key, we will raise a .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\cluster_resolver.py",
    "ast_data": "FunctionDef name:cluster_spec arg:self arguments arg Assign For Assign Call Assign Call For Call If Compare If Call Assign If Call Assign Assign For Assign Call Assign Call For Call If Call Call If Call Assign Call Call Call Call Assign Call Assign Call Assign Call Call Assign Call If Raise Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_values",
    "source_code": "@property\ndef _values(self):\n    return self",
    "docstring": "Collect values for TrackableDataStructure.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\data_structures.py",
    "ast_data": "FunctionDef name:_values arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "pdf",
    "source_code": "def pdf(self, x):\n    return self.evaluate(x)",
    "docstring": "Evaluate the estimated pdf on a provided set of points. Notes ----- This is an alias for . See the `` docstring for more details.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_kde.py",
    "ast_data": "FunctionDef name:pdf arg:self arg:x arguments arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "isna_all",
    "source_code": "def isna_all(arr: ArrayLike) -> bool:\n    total_len = len(arr)\n    chunk_len = max(total_len // 40, 1000)\n    dtype = arr.dtype\n    if lib.is_np_dtype(dtype, 'f'):\n        checker = np.isnan\n    elif lib.is_np_dtype(dtype, 'mM') or isinstance(dtype, (DatetimeTZDtype, PeriodDtype)):\n        checker = lambda x: np.asarray(x.view('i8')) == iNaT\n    else:\n        checker = _isna_array\n    return all((checker(arr[i:i + chunk_len]).all() for i in range(0, total_len, chunk_len)))",
    "docstring": "Optimized equivalent to isna(arr).all()",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\missing.py",
    "ast_data": "FunctionDef name:isna_all arg:arr arguments arg Assign Call Assign Call Assign If Call Assign If BoolOp Call Call Assign arguments arg Compare Call Call Assign Return return:yes Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "FitDataError",
    "source_code": "class FitDataError(ValueError):\n\n    def __init__(self, distr, lower, upper):\n        self.args = (f'Invalid values in `data`.  Maximum likelihood estimation with {distr!r} requires that {lower!r} < (x - loc)/scale  < {upper!r} for each x in `data`.',)",
    "docstring": "Raised when input data is inconsistent with fixed parameters.",
    "type": "class",
    "file_path": "scipy\\scipy\\stats\\_continuous_distns.py",
    "ast_data": "ClassDef name:FitDataError FunctionDef name:__init__ arg:self arg:distr arg:lower arg:upper arguments arg arg arg arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "np_where",
    "source_code": "def np_where(condition, x=None, y=None):\n    if x is None and y is None:\n        if np.lib.NumpyVersion(np.__version__) >= '2.1.0.rc0':\n            return np.atleast_1d(np.asarray(condition)).nonzero()\n        return np.where(condition)\n    return np.where(condition, x, y)",
    "docstring": "Return elements chosen from x or y depending on condition. When only condition is provided, np.where(condition) is a shorthand for np.asarray(condition).nonzero(). See NumPy 2.1.0rc0 disallows 0D input arrays in nonzero, so np.atleast_1d is used here to remain compatible with NumPy 1.x. See Args: condition: Array_like, bool. Where True, yield x, otherwise yield y. x: Array_like. Values from which to choose. x, y and condition need to be broadcastable to some shape. y: Array_like. Values from which to choose. x, y and condition need to be broadcastable to some shape. Returns: An array with elements from x where condition is True, and elements from y elsewhere. Or the indices of the elements that are non-zero.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\numpy_compat.py",
    "ast_data": "FunctionDef name:np_where arg:condition arg:x arg:y arguments arg arg arg If BoolOp Compare Compare If Compare Call Return return:yes Call Call Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "maybe_convert_dtype",
    "source_code": "def maybe_convert_dtype(data, copy: bool, tz: tzinfo | None=None):\n    if not hasattr(data, 'dtype'):\n        return (data, copy)\n    if is_float_dtype(data.dtype):\n        data = data.astype(DT64NS_DTYPE).view('i8')\n        copy = False\n    elif lib.is_np_dtype(data.dtype, 'm') or is_bool_dtype(data.dtype):\n        raise TypeError(f'dtype {data.dtype} cannot be converted to datetime64[ns]')\n    elif isinstance(data.dtype, PeriodDtype):\n        raise TypeError('Passing PeriodDtype data is invalid. Use `data.to_timestamp()` instead')\n    elif isinstance(data.dtype, ExtensionDtype) and (not isinstance(data.dtype, DatetimeTZDtype)):\n        data = np.array(data, dtype=np.object_)\n        copy = False\n    return (data, copy)",
    "docstring": "Convert data based on dtype conventions, issuing errors where appropriate. Parameters ---------- data : np.ndarray or pd.Index copy : bool tz : tzinfo or None, default None Returns ------- data : np.ndarray or pd.Index copy : bool Raises ------ TypeError : PeriodDType data is passed",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimes.py",
    "ast_data": "FunctionDef name:maybe_convert_dtype arg:data arg:copy arg:tz arguments arg arg arg If Call Return return:yes If Call Assign Call Call Assign If BoolOp Call Call Raise Call If Call Raise Call If BoolOp Call Call Assign Call Assign Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "read",
    "source_code": "def read(self, size=-1):\n    data = self.rfile.read(size)\n    self.bytes_read += len(data)\n    return data",
    "docstring": "Read from file, counting bytes.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\cpstats.py",
    "ast_data": "FunctionDef name:read arg:self arg:size arguments arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_tag_unconditional",
    "source_code": "def _tag_unconditional(loss):\n    if callable(loss):\n        with autocast_variable.enable_auto_cast_variables(None):\n            loss = loss()\n    if loss is None:\n        return None\n    if not tensor_util.is_tf_type(loss):\n        loss = tensor_conversion.convert_to_tensor_v2_with_dispatch(loss, dtype=backend.floatx())\n    loss._unconditional_loss = inputs is None\n    return loss",
    "docstring": "Process the loss and tag it by setting loss._unconditional_loss.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py",
    "ast_data": "FunctionDef name:_tag_unconditional arg:loss arguments arg If Call With Call Assign Call If Compare Return return:no If Call Assign Call Call Assign Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_create_tensor_watch_maps",
    "source_code": "def _create_tensor_watch_maps(self, device_name):\n    self._watch_key_to_datum[device_name] = {}\n    self._watch_key_to_rel_time[device_name] = {}\n    self._watch_key_to_dump_size_bytes[device_name] = {}\n    for datum in self._dump_tensor_data[device_name]:\n        if datum.watch_key not in self._watch_key_to_devices:\n            self._watch_key_to_devices[datum.watch_key] = {device_name}\n        else:\n            self._watch_key_to_devices[datum.watch_key].add(device_name)\n        if datum.watch_key not in self._watch_key_to_datum[device_name]:\n            self._watch_key_to_datum[device_name][datum.watch_key] = [datum]\n            self._watch_key_to_rel_time[device_name][datum.watch_key] = [datum.timestamp - self._t0]\n            self._watch_key_to_dump_size_bytes[device_name][datum.watch_key] = [datum.dump_size_bytes]\n        else:\n            self._watch_key_to_datum[device_name][datum.watch_key].append(datum)\n            self._watch_key_to_rel_time[device_name][datum.watch_key].append(datum.timestamp - self._t0)\n            self._watch_key_to_dump_size_bytes[device_name][datum.watch_key].append(datum.dump_size_bytes)",
    "docstring": "Create maps from tensor watch keys to datum and to timestamps. Create a map from watch key (tensor name + debug op) to item. Also make a map from watch key to relative timestamp. \"relative\" means (absolute timestamp - t0). Args: device_name: (str) name of the device.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:_create_tensor_watch_maps arg:self arg:device_name arguments arg arg Assign Assign Assign For If Compare Assign Call If Compare Assign Assign Assign Call Call Call"
  },
  {
    "library": "pandas",
    "name": "map",
    "source_code": "def map(self, mapper, na_action: Literal['ignore'] | None=None) -> Self:\n    is_map = isinstance(mapper, (abc.Mapping, ABCSeries))\n    fill_val = self.fill_value\n    if na_action is None or notna(fill_val):\n        fill_val = mapper.get(fill_val, fill_val) if is_map else mapper(fill_val)\n\n    def func(sp_val):\n        new_sp_val = mapper.get(sp_val, None) if is_map else mapper(sp_val)\n        if new_sp_val is fill_val or new_sp_val == fill_val:\n            msg = 'fill value in the sparse values not supported'\n            raise ValueError(msg)\n        return new_sp_val\n    sp_values = [func(x) for x in self.sp_values]\n    return type(self)(sp_values, sparse_index=self.sp_index, fill_value=fill_val)",
    "docstring": "Map categories using an input mapping or function. Parameters ---------- mapper : dict, Series, callable The correspondence from old values to new. na_action : {None, 'ignore'}, default None If 'ignore', propagate NA values, without passing them to the mapping correspondence. Returns ------- SparseArray The output array will have the same density as the input. The output fill value will be the result of applying the mapping to `` Examples -------- >>> arr = pd.arrays.SparseArray([0, 1, 2]) >>> arr.map(lambda x: x + 10) [10, 11, 12] Fill: 10 IntIndex Indices: array([1, 2], dtype=int32) >>> arr.map({0: 10, 1: 11, 2: 12}) [10, 11, 12] Fill: 10 IntIndex Indices: array([1, 2], dtype=int32) >>> arr.map(pd.Series([10, 11, 12], index=[0, 1, 2])) [10, 11, 12] Fill: 10 IntIndex Indices: array([1, 2], dtype=int32)",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\sparse\\array.py",
    "ast_data": "FunctionDef name:map arg:self arg:mapper arg:na_action arguments arg arg arg Assign Call Assign If BoolOp Compare Call Assign Call Call FunctionDef name:func arg:sp_val arguments arg Assign Call Call If BoolOp Compare Compare Assign Raise Call Return return:yes Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__eq__",
    "source_code": "def __eq__(self, other):\n    return self is other",
    "docstring": "Determine if this is equal to another. Since ReparameterizationType instances are constant static global instances, equality checks if two instances' id() values are equal. Args: other: Object to compare against. Returns: .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes Compare"
  },
  {
    "library": "pytorch",
    "name": "get_subkernel_nodes",
    "source_code": "def get_subkernel_nodes(self) -> list[BaseSchedulerNode]:\n    return list(self.snodes)",
    "docstring": "Returns a list of nodes which comprise the combo kernel. These nodes may be vertically fused.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:get_subkernel_nodes arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "getargvalues",
    "source_code": "def getargvalues(frame):\n    args, varargs, varkw = getargs(frame.f_code)\n    return (args, varargs, varkw, frame.f_locals)",
    "docstring": "Get information about arguments passed into a particular frame. A tuple of four things is returned: (args, varargs, varkw, locals). 'args' is a list of the argument names (it may contain nested lists). 'varargs' and 'varkw' are the names of the * and ** arguments or None. 'locals' is the locals dictionary of the given frame.",
    "type": "function",
    "file_path": "numpy\\numpy\\_utils\\_inspect.py",
    "ast_data": "FunctionDef name:getargvalues arg:frame arguments arg Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__post_init__",
    "source_code": "def __post_init__(self) -> None:\n    _parents: dict[Source, Source] = {}\n    object.__setattr__(self, '_parents', _parents)\n    _defs: dict[Source, sympy.Expr] = {}\n    object.__setattr__(self, '_defs', _defs)\n    for source1, source2 in self.source_pairs:\n        self._union(self._find(source1), self._find(source2))\n    for source, root, fn in self.derived_equalities:\n        if isinstance(root, sympy.Symbol):\n            self._defs[self._find(source)] = fn(root)\n        else:\n            self._defs[self._find(source)] = fn(self._rewrite(root))",
    "docstring": "Pre-processing to answer queries and below. Example: Suppose we are given: source_pairs [a = b, b = c] derived_equalities [d = c + 1, e = d - 1] We first construct a union find with source_pairs: _parents = {a: a, b: a, c: a} Then we compute canonical symbolic expressions, recursively applying derived_equalities until we bottom out: _defs = {d: c + 1, e: (c + 1) - 1 aka c}",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:__post_init__ arg:self arguments arg Call Call For Call Call Call For If Call Assign Call Call Assign Call Call Call"
  },
  {
    "library": "pandas",
    "name": "nrows_expected",
    "source_code": "@property\ndef nrows_expected(self) -> int:\n    return np.prod([i.cvalues.shape[0] for i in self.index_axes])",
    "docstring": "based on our axes, compute the expected nrows",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:nrows_expected arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "deserialize",
    "source_code": "def deserialize(config, custom_objects=None):\n    return deserialize_keras_object(config, module_objects=globals(), custom_objects=custom_objects, printable_module_name='metric function')",
    "docstring": "Deserializes a serialized metric class/function instance. Args: config: Metric configuration. custom_objects: Optional dictionary mapping names (strings) to custom objects (classes and functions) to be considered during deserialization. Returns: A Keras instance or a metric function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py",
    "ast_data": "FunctionDef name:deserialize arg:config arg:custom_objects arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "newer",
    "source_code": "def newer(source, target):\n    if not os.path.exists(source):\n        raise ValueError(f\"file '{os.path.abspath(source)}' does not exist\")\n    if not os.path.exists(target):\n        return 1\n    mtime1 = os.stat(source)[ST_MTIME]\n    mtime2 = os.stat(target)[ST_MTIME]\n    return mtime1 > mtime2",
    "docstring": "Return true if 'source' exists and is more recently modified than 'target', or if 'source' exists and 'target' doesn't. Return false if both exist and 'target' is the same age or younger than 'source'.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_generate_pyx.py",
    "ast_data": "FunctionDef name:newer arg:source arg:target arguments arg arg If Call Raise Call Call If Call Return return:yes Assign Call Assign Call Return return:yes Compare"
  },
  {
    "library": "scipy",
    "name": "kelvin_zeros",
    "source_code": "def kelvin_zeros(nt):\n    if not isscalar(nt) or floor(nt) != nt or nt <= 0:\n        raise ValueError('nt must be positive integer scalar.')\n    return (_specfun.klvnzo(nt, 1), _specfun.klvnzo(nt, 2), _specfun.klvnzo(nt, 3), _specfun.klvnzo(nt, 4), _specfun.klvnzo(nt, 5), _specfun.klvnzo(nt, 6), _specfun.klvnzo(nt, 7), _specfun.klvnzo(nt, 8))",
    "docstring": "Compute nt zeros of all Kelvin functions. Returned in a length-8 tuple of arrays of length nt. The tuple contains the arrays of zeros of (ber, bei, ker, kei, ber', bei', ker', kei'). References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special Functions\", John Wiley and Sons, 1996.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_basic.py",
    "ast_data": "FunctionDef name:kelvin_zeros arg:nt arguments arg If BoolOp Call Compare Call Compare Raise Call Return return:yes Call Call Call Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "add_node",
    "source_code": "def add_node(self, node):\n    if node in self.nodes:\n        return\n    self.nodes_need_process.add(node)\n    self.nodes.add(node)\n    self.inputs.discard(node)\n    self.inputs.update({n for n in node.all_input_nodes if n.op in CALLABLE_NODE_OPS and n not in self.nodes})",
    "docstring": "Add a node to fusion group.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\passes\\tools_common.py",
    "ast_data": "FunctionDef name:add_node arg:self arg:node arguments arg arg If Compare Return return:no Call Call Call Call BoolOp Compare Compare"
  },
  {
    "library": "tensorflow",
    "name": "save",
    "source_code": "def save(self, file_prefix: str, options: Optional[checkpoint_options.CheckpointOptions]=None) -> Optional[ops.Operation]:\n    if options is not None and options.experimental_io_device is not None:\n        raise ValueError('Specified experimental_io_device in DTensor checkpoint is not supported.')\n    del options\n    tensor_names = []\n    tensors = []\n    tensor_slices = []\n    for saveable in self._saveable_objects:\n        for spec in saveable.specs:\n            tensor = spec.tensor\n            if tensor is not None:\n                if api.device_name() != spec.device:\n                    tensor = api.pack([tensor] * self._mesh.host_mesh().num_local_devices(), layout.Layout.replicated(self._mesh.host_mesh(), rank=tensor.shape.rank))\n                tensor_names.append(spec.name)\n                tensors.append(tensor)\n                tensor_slices.append(spec.slice_spec)\n    return save_restore.sharded_save(self._mesh, file_prefix, tensor_names, tensor_slices, tensors)",
    "docstring": "Saves the saveable objects to a checkpoint with . Also query the generated shards from the distributed DTensor SaveV2 ops and do a MergeV2 on those. Each op here is backed by a global_barrier to avoid racing from multiple clients. Args: file_prefix: A string or scalar string Tensor containing the prefix to save under. options: Optional object. This is unused in DTensor. Returns: An , or None when executing eagerly.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\d_checkpoint.py",
    "ast_data": "FunctionDef name:save arg:self arg:file_prefix arg:options arguments arg arg arg If BoolOp Compare Compare Raise Call Assign Assign Assign For For Assign If Compare If Compare Call Assign Call Call Call Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_add_dispatch",
    "source_code": "@tf_export('__operators__.add', v1=[])\n@dispatch.add_dispatch_support\ndef _add_dispatch(x, y, name=None):\n    if ops.is_auto_dtype_conversion_enabled():\n        return add(x, y, name=name)\n    if not isinstance(y, tensor_lib.Tensor) and (not isinstance(y, sparse_tensor.SparseTensor)):\n        y = ops.convert_to_tensor(y, dtype_hint=x.dtype.base_dtype, name='y')\n    if x.dtype == dtypes.string:\n        return gen_math_ops.add(x, y, name=name)\n    else:\n        return gen_math_ops.add_v2(x, y, name=name)",
    "docstring": "The operation invoked by the operator. Purpose in the API: This method is exposed in TensorFlow's API so that library developers can register dispatching for to allow it to handle custom composite tensors & other custom objects. The API symbol is not intended to be called by users directly and does appear in TensorFlow's generated documentation. Args: x: The left-hand side of the operator. y: The right-hand side of the operator. name: an optional name for the operation. Returns: The result of the elementwise operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:_add_dispatch arg:x arg:y arg:name arguments arg arg arg If Call Return return:yes Call If BoolOp Call Call Assign Call If Compare Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "size",
    "source_code": "def size(self, name=None):\n    with ops.name_scope(name, '%s_Size' % self.name, [self.resource_handle]):\n        with ops.colocate_with(self.resource_handle):\n            return gen_lookup_ops.lookup_table_size_v2(self.resource_handle)",
    "docstring": "Compute the number of elements in this table. Args: name: A name for the operation (optional). Returns: A scalar tensor containing the number of elements in this table.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "FunctionDef name:size arg:self arg:name arguments arg arg With Call With Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_sharded_post_state_dict_hook",
    "source_code": "@no_type_check\ndef _sharded_post_state_dict_hook(module: nn.Module, fsdp_state: _FSDPState, state_dict: dict[str, Any], prefix: str) -> dict[str, Any]:\n\n    def param_hook(state_dict: dict[str, Any], prefix: str, fqn: str):\n        param = state_dict[fqn]\n        if not fsdp_state._state_dict_config._use_dtensor:\n            sharded_tensor = _ext_chunk_tensor(tensor=param, rank=fsdp_state.rank, world_size=fsdp_state.world_size, num_devices_per_node=fsdp_state._device_handle.device_count(), pg=fsdp_state.process_group, fsdp_extension=fsdp_state._fsdp_extension)\n        else:\n            sharded_tensor = _ext_chunk_dtensor(tensor=param, rank=fsdp_state.rank, device_mesh=fsdp_state._device_mesh, fsdp_extension=fsdp_state._fsdp_extension)\n        if fsdp_state._state_dict_config.offload_to_cpu:\n            sharded_tensor = sharded_tensor.cpu()\n        state_dict[fqn] = sharded_tensor\n    return _common_unshard_post_state_dict_hook(module, fsdp_state, state_dict, prefix, param_hook)",
    "docstring": "The hook replaces the unflattened, unsharded parameter in the state_dict with a unflattened, sharded parameter (a ShardedTensor).",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_state_dict_utils.py",
    "ast_data": "FunctionDef name:_sharded_post_state_dict_hook arg:module arg:fsdp_state arg:state_dict arg:prefix arguments arg arg arg arg FunctionDef name:param_hook arg:state_dict arg:prefix arg:fqn arguments arg arg arg Assign If Assign Call Call Assign Call If Assign Call Assign Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict_log_proba",
    "source_code": "@available_if(_search_estimator_has('predict_log_proba'))\ndef predict_log_proba(self, X):\n    check_is_fitted(self)\n    return self.best_estimator_.predict_log_proba(X)",
    "docstring": "Call predict_log_proba on the estimator with the best found parameters. Only available if `Xclasses_`.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_search.py",
    "ast_data": "FunctionDef name:predict_log_proba arg:self arg:X arguments arg arg Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "TrackableReference",
    "source_code": "@tf_export('__internal__.tracking.TrackableReference', v1=[])\nclass TrackableReference(object):\n    __slots__ = ('_name', '_ref')\n\n    def __init__(self, name, ref):\n        self._name = name\n        self._ref = ref\n\n    @property\n    def name(self):\n        return self._name\n\n    @property\n    def ref(self):\n        return self._ref\n\n    def __iter__(self):\n        yield self.name\n        yield self.ref\n\n    def __repr__(self):\n        return f'{self.__class__.__name__}(name={self.name}, ref={self.ref})'\n\n    def __eq__(self, o):\n        if isinstance(o, tuple):\n            return (self.name, self.ref) == o\n        elif isinstance(o, TrackableReference):\n            return self.name == o.name and self.ref == o.ref\n        else:\n            return False",
    "docstring": "A named reference to a trackable object for use with the class. These references mark named dependencies of a object and should be created when overriding . Attributes: name: The local name for this dependency. ref: The object being referenced.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\base.py",
    "ast_data": "ClassDef name:TrackableReference Assign FunctionDef name:__init__ arg:self arg:name arg:ref arguments arg arg arg Assign Assign FunctionDef name:name arg:self arguments arg Return return:yes FunctionDef name:ref arg:self arguments arg Return return:yes FunctionDef name:__iter__ arg:self arguments arg FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:__eq__ arg:self arg:o arguments arg arg If Call Return return:yes Compare If Call Return return:yes BoolOp Compare Compare Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "concentration",
    "source_code": "@property\ndef concentration(self):\n    return self._concentration",
    "docstring": "Concentration parameter; expected counts for that coordinate.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\dirichlet.py",
    "ast_data": "FunctionDef name:concentration arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "local_resources",
    "source_code": "def local_resources():\n    return ops.get_collection(ops.GraphKeys.LOCAL_RESOURCES)",
    "docstring": "Returns resources intended to be local to this session.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resources.py",
    "ast_data": "FunctionDef name:local_resources arguments Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_module",
    "source_code": "def get_module(model, name):\n    return dict(model.named_modules())[name]",
    "docstring": "Given name of submodule, this function grabs the submodule from given model.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\_correct_bias.py",
    "ast_data": "FunctionDef name:get_module arg:model arg:name arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_ydata",
    "source_code": "def get_ydata(self, orig=True):\n    if orig:\n        return self._yorig\n    if self._invalidy:\n        self.recache()\n    return self._y",
    "docstring": "Return the ydata. If *orig* is *True*, return the original data, else the processed data.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:get_ydata arg:self arg:orig arguments arg arg If Return return:yes If Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "upsample_nearest",
    "source_code": "def upsample_nearest(input, size=None, scale_factor=None):\n    warnings.warn('`nn.functional.upsample_nearest` is deprecated. Use `nn.functional.interpolate` instead.', stacklevel=2)\n    return interpolate(input, size, scale_factor, mode='nearest')",
    "docstring": "Upsamples the input, using nearest neighbours' pixel values. .. warning:: This function is deprecated in favor of :func:. This is equivalent with ``. Currently spatial and volumetric upsampling are supported (i.e. expected inputs are 4 or 5 dimensional). Args: input (Tensor): input size (int or Tuple[int, int] or Tuple[int, int, int]): output spatia size. scale_factor (int): multiplier for spatial size. Has to be an integer. Note: {backward_reproducibility_note}",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:upsample_nearest arg:input arg:size arg:scale_factor arguments arg arg arg Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "estimate_transform",
    "source_code": "def estimate_transform(self, *args: Tensor, **kwargs: Tensor) -> Tensor:\n    kp1, kp2, idx = (kwargs['keypoints0'], kwargs['keypoints1'], kwargs['batch_indexes'])\n    homos = [self._estimate_homography(kp1[idx == i], kp2[idx == i]) for i in range(len(idx.unique()))]\n    if len(homos) == 0:\n        raise RuntimeError('Compute homography failed. No matched keypoints found.')\n    return concatenate(homos)",
    "docstring": "Compute the corresponding homography.",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\image_stitching.py",
    "ast_data": "FunctionDef name:estimate_transform arg:self arguments arg arg arg Assign Assign Call Compare Compare Call Call Call If Compare Call Raise Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "ModelInfo",
    "source_code": "@dataclasses.dataclass\nclass ModelInfo:\n    parameter_count: defaultdict[torch.dtype, int] = dataclasses.field(default_factory=lambda: defaultdict(int))\n    buffer_count: defaultdict[torch.dtype, int] = dataclasses.field(default_factory=lambda: defaultdict(int))\n    fx_node_count: int = 0\n    fx_node_op_count: defaultdict[str, int] = dataclasses.field(default_factory=lambda: defaultdict(int))\n    fx_node_target_count: defaultdict[str, int] = dataclasses.field(default_factory=lambda: defaultdict(int))\n    dispatch_failures: list[tuple[torch.fx.Node, str]] = dataclasses.field(default_factory=list)\n    inputs: dict[str, torch._export.serde.schema.TensorMeta] = dataclasses.field(default_factory=dict)\n    outputs: dict[str, torch._export.serde.schema.TensorMeta] = dataclasses.field(default_factory=dict)",
    "docstring": "Information about the model.",
    "type": "class",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_analysis.py",
    "ast_data": "ClassDef name:ModelInfo Call arguments Call Call arguments Call Call arguments Call Call arguments Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "__reduce__",
    "source_code": "def __reduce__(self):\n    d = {'levels': list(self.levels), 'codes': list(self.codes), 'sortorder': self.sortorder, 'names': list(self.names)}\n    return (ibase._new_Index, (type(self), d), None)",
    "docstring": "Necessary for making this object picklable",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "FunctionDef name:__reduce__ arg:self arguments arg Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_unpack_to_numpy",
    "source_code": "def _unpack_to_numpy(x):\n    if isinstance(x, np.ndarray):\n        return x\n    if hasattr(x, 'to_numpy'):\n        return x.to_numpy()\n    if hasattr(x, 'values'):\n        xtmp = x.values\n        if isinstance(xtmp, np.ndarray):\n            return xtmp\n    if _is_torch_array(x) or _is_jax_array(x) or _is_tensorflow_array(x):\n        xtmp = np.asarray(x)\n        if isinstance(xtmp, np.ndarray):\n            return xtmp\n    return x",
    "docstring": "Internal helper to extract data from e.g. pandas and xarray objects.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\cbook.py",
    "ast_data": "FunctionDef name:_unpack_to_numpy arg:x arguments arg If Call Return return:yes If Call Return return:yes Call If Call Assign If Call Return return:yes If BoolOp Call Call Call Assign Call If Call Return return:yes Return return:yes"
  },
  {
    "library": "scipy",
    "name": "irfftn",
    "source_code": "@_dispatch\ndef irfftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *, plan=None):\n    return (Dispatchable(x, np.ndarray),)",
    "docstring": "Computes the inverse of This function computes the inverse of the N-D discrete Fourier Transform for real input over any number of axes in an M-D array by means of the Fast Fourier Transform (FFT). In other words, `irfftrfftnirfftifftnssslen(s)sfftxfft~scipy.fft.fftaxessxsssssaxesaxesxifftnfftrffts` assumes an even output length in the final transformation axis. When performing the final complex to real transformation, the Hermitian symmetry requires that the last imaginary component along that axis must be 0 and so it is ignored. To avoid losing information, the correct length of the real input *must* be given. Examples -------- >>> import scipy.fft >>> import numpy as np >>> x = np.zeros((3, 2, 2)) >>> x[0, 0, 0] = 3 * 2 * 2 >>> scipy.fft.irfftn(x) array([[[1., 1.], [1., 1.]], [[1., 1.], [1., 1.]], [[1., 1.], [1., 1.]]])",
    "type": "function",
    "file_path": "scipy\\scipy\\fft\\_basic.py",
    "ast_data": "FunctionDef name:irfftn arg:x arg:s arg:axes arg:norm arg:overwrite_x arg:workers arguments arg arg arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_num_inputs_outputs",
    "source_code": "def _get_num_inputs_outputs(op_type):\n\n    def _is_list_arg(arg):\n        return arg.number_attr or arg.type_list_attr\n\n    def _count_args(arg_defs):\n        for arg in arg_defs:\n            if _is_list_arg(arg):\n                return -1\n        return len(arg_defs)\n    op_def = op_def_registry.get(op_type)\n    if not op_def:\n        return (-1, -1)\n    return (_count_args(op_def.input_arg), _count_args(op_def.output_arg))",
    "docstring": "Returns (num_inputs, num_outputs). Args: op_type: String. The type of the Operation. Used to lookup the op in the registry. Returns: (num_inputs, num_outputs), for either num_inputs or num_outputs if the value can't be statically inferred from the OpDef alone or of the OpDef lookup fails, -1 is returned.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\gradient_input_output_exclusions.py",
    "ast_data": "FunctionDef name:_get_num_inputs_outputs arg:op_type arguments arg FunctionDef name:_is_list_arg arg:arg arguments arg Return return:yes BoolOp FunctionDef name:_count_args arg:arg_defs arguments arg For If Call Return return:yes Return return:yes Call Assign Call If Return return:yes Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "get_outdated_docs",
    "source_code": "def get_outdated_docs(self, app: Sphinx, env: BuildEnvironment, added: set[str], changed: set[str], removed: set[str]) -> list[str]:\n    return []",
    "docstring": "Return a list of docnames to re-read. This method is called before reading the documents. .. seealso:: :event:",
    "type": "method",
    "file_path": "sphinx\\sphinx\\environment\\collectors\\__init__.py",
    "ast_data": "FunctionDef name:get_outdated_docs arg:self arg:app arg:env arg:added arg:changed arg:removed arguments arg arg arg arg arg arg Return return:no"
  },
  {
    "library": "django",
    "name": "AmbiguityError",
    "source_code": "class AmbiguityError(Exception):\n    pass",
    "docstring": "More than one migration matches a name prefix.",
    "type": "class",
    "file_path": "django\\django\\db\\migrations\\exceptions.py",
    "ast_data": "ClassDef name:AmbiguityError"
  },
  {
    "library": "django",
    "name": "get_post_parameters",
    "source_code": "def get_post_parameters(self, request):\n    if request is None:\n        return {}\n    else:\n        sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])\n        if self.is_active(request) and sensitive_post_parameters:\n            cleansed = request.POST.copy()\n            if sensitive_post_parameters == '__ALL__':\n                for k in cleansed:\n                    cleansed[k] = self.cleansed_substitute\n                return cleansed\n            else:\n                for param in sensitive_post_parameters:\n                    if param in cleansed:\n                        cleansed[param] = self.cleansed_substitute\n                return cleansed\n        else:\n            return request.POST",
    "docstring": "Replace the values of POST parameters marked as sensitive with stars (*********).",
    "type": "method",
    "file_path": "django\\django\\views\\debug.py",
    "ast_data": "FunctionDef name:get_post_parameters arg:self arg:request arguments arg arg If Compare Return return:no Assign Call If BoolOp Call Assign Call If Compare For Assign Return return:yes For If Compare Assign Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_config",
    "source_code": "def get_config(self):\n    config = dict(zip(self._fields, self))\n    config['categorical_column'] = serialization.serialize_feature_column(self.categorical_column)\n    config['initializer'] = serialization._serialize_keras_object(self.initializer)\n    return config",
    "docstring": "See 'FeatureColumn` base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:get_config arg:self arguments arg Assign Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "def forward(self, image: torch.Tensor) -> List[torch.Tensor]:\n    img = self.preprocess(image)\n    out = self.model(img)\n    return self.postprocess(out, img.shape[-2], img.shape[-1])",
    "docstring": "Detect faces in a given batch of images. Args: image: batch of images :math: Return: List[torch.Tensor]: list with the boxes found on each image. :math:.",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\face_detection.py",
    "ast_data": "FunctionDef name:forward arg:self arg:image arguments arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "canvas",
    "source_code": "@property\ndef canvas(self):\n    if not self._figure:\n        return None\n    return self._figure.canvas",
    "docstring": "Canvas managed by FigureManager.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_managers.py",
    "ast_data": "FunctionDef name:canvas arg:self arguments arg If Return return:no Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "from_config",
    "source_code": "@classmethod\ndef from_config(cls, config, custom_objects=None, columns_by_name=None):\n    from tensorflow.python.feature_column.serialization import deserialize_feature_column\n    _check_config_keys(config, cls._fields)\n    kwargs = _standardize_and_copy_config(config)\n    kwargs['categorical_column'] = deserialize_feature_column(config['categorical_column'], custom_objects, columns_by_name)\n    kwargs['dtype'] = dtypes.as_dtype(config['dtype'])\n    return cls(**kwargs)",
    "docstring": "See 'FeatureColumn` base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:from_config arg:cls arg:config arg:custom_objects arg:columns_by_name arguments arg arg arg arg Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "parameters",
    "source_code": "def parameters(self, recurse: bool=True) -> Iterator[Parameter]:\n    for _name, param in self.named_parameters(recurse=recurse):\n        yield param",
    "docstring": "Return an iterator over module parameters. This is typically passed to an optimizer. Args: recurse (bool): if True, then yields parameters of this module and all submodules. Otherwise, yields only parameters that are direct members of this module. Yields: Parameter: module parameter Example:: >>> # xdoctest: +SKIP(\"undefined vars\") >>> for param in model.parameters(): >>> print(type(param), param.size()) (20L,) (20L, 1L, 5L, 5L)",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:parameters arg:self arg:recurse arguments arg arg For Call"
  },
  {
    "library": "matplotlib",
    "name": "check_shape",
    "source_code": "def check_shape(shape, /, **kwargs):\n    for k, v in kwargs.items():\n        data_shape = v.shape\n        if len(data_shape) != len(shape) or any((s != t and t is not None for s, t in zip(data_shape, shape))):\n            dim_labels = iter(itertools.chain('NMLKJIH', (f'D{i}' for i in itertools.count())))\n            text_shape = ', '.join([str(n) if n is not None else next(dim_labels) for n in shape[::-1]][::-1])\n            if len(shape) == 1:\n                text_shape += ','\n            raise ValueError(f'{k!r} must be {len(shape)}D with shape ({text_shape}), but your input has shape {v.shape}')",
    "docstring": "For each *key, value* pair in *kwargs*, check that *value* has the shape *shape*; if not, raise an appropriate ValueError. *None* in the shape is treated as a \"free\" size that can have any length. e.g. (None, 2) -> (N, 2) The values checked must be numpy arrays. Examples -------- To check for (N, 2) shaped arrays >>> _api.check_shape((None, 2), arg=arg, other_arg=other_arg)",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\_api\\__init__.py",
    "ast_data": "FunctionDef name:check_shape arguments arg arg For Call Assign If BoolOp Compare Call Call Call BoolOp Compare Compare Call Assign Call Call Call Assign Call Compare Call Call If Compare Call Raise Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_to_matrix_vectorized",
    "source_code": "def _to_matrix_vectorized(M):\n    assert isinstance(M, (tuple, list))\n    assert all((isinstance(item, (tuple, list)) for item in M))\n    c_vec = np.asarray([len(item) for item in M])\n    assert np.all(c_vec - c_vec[0] == 0)\n    r = len(M)\n    c = c_vec[0]\n    M00 = np.asarray(M[0][0])\n    dt = M00.dtype\n    sh = [M00.shape[0], r, c]\n    M_ret = np.empty(sh, dtype=dt)\n    for irow in range(r):\n        for icol in range(c):\n            M_ret[:, irow, icol] = np.asarray(M[irow][icol])\n    return M_ret",
    "docstring": "Build an array of matrices from individuals np.arrays of identical shapes. Parameters ---------- M ncols-list of nrows-lists of shape sh. Returns ------- M_res : np.array of shape (sh, nrow, ncols) *M_res* satisfies ``.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triinterpolate.py",
    "ast_data": "FunctionDef name:_to_matrix_vectorized arg:M arguments arg Call Call Call Assign Call Call Call Compare Assign Call Assign Assign Call Assign Assign Assign Call For Call For Call Assign Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "PycodeError",
    "source_code": "class PycodeError(Exception):\n\n    def __str__(self) -> str:\n        res = self.args[0]\n        if len(self.args) > 1:\n            res += ' (exception was: %r)' % self.args[1]\n        return res",
    "docstring": "Pycode Python source code analyser error.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\errors.py",
    "ast_data": "ClassDef name:PycodeError FunctionDef name:__str__ arg:self arguments arg Assign If Compare Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "def forward(self, pred: Tensor, target: Tensor) -> Tensor:\n    if pred.dim() != 4:\n        raise ValueError(f'Only 2D images supported. Got {pred.dim()}.')\n    if not (target.max() < pred.size(1) and target.min() >= 0 and (target.dtype == torch.long)):\n        raise ValueError(f'Expect long type target value in range (0, {pred.size(1)}). ({target.min()}, {target.max()})')\n    return super().forward(pred, target)",
    "docstring": "Compute Hausdorff loss. Args: pred: predicted tensor with a shape of :math:. Each channel is as binary as: 1 -> fg, 0 -> bg. target: target tensor with a shape of :math:. Returns: Estimated Hausdorff Loss.",
    "type": "method",
    "file_path": "kornia\\kornia\\losses\\hausdorff.py",
    "ast_data": "FunctionDef name:forward arg:self arg:pred arg:target arguments arg arg arg If Compare Call Raise Call Call If BoolOp Compare Call Call Compare Call Compare Raise Call Call Call Call Return return:yes Call Call"
  },
  {
    "library": "seaborn",
    "name": "_not_bottom_axes",
    "source_code": "@property\ndef _not_bottom_axes(self):\n    if self._col_wrap is None:\n        return self.axes[:-1, :].flat\n    else:\n        axes = []\n        n_empty = self._nrow * self._ncol - self._n_facets\n        for i, ax in enumerate(self.axes):\n            append = i < self._ncol * (self._nrow - 1) and i < self._ncol * (self._nrow - 1) - n_empty\n            if append:\n                axes.append(ax)\n        return np.array(axes, object).flat",
    "docstring": "Return a flat array of axes that aren't on the bottom row.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\axisgrid.py",
    "ast_data": "FunctionDef name:_not_bottom_axes arg:self arguments arg If Compare Return return:yes Assign Assign For Call Assign BoolOp Compare Compare If Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "serialize_db_to_string",
    "source_code": "def serialize_db_to_string(self):\n\n    def get_objects():\n        from django.db.migrations.loader import MigrationLoader\n        loader = MigrationLoader(self.connection)\n        for app_config in apps.get_app_configs():\n            if app_config.models_module is not None and app_config.label in loader.migrated_apps and (app_config.name not in settings.TEST_NON_SERIALIZED_APPS):\n                for model in app_config.get_models():\n                    if model._meta.can_migrate(self.connection) and router.allow_migrate_model(self.connection.alias, model):\n                        queryset = model._base_manager.using(self.connection.alias).order_by(model._meta.pk.name)\n                        chunk_size = 2000 if queryset._prefetch_related_lookups else None\n                        yield from queryset.iterator(chunk_size=chunk_size)\n    out = StringIO()\n    serializers.serialize('json', get_objects(), indent=None, stream=out)\n    return out.getvalue()",
    "docstring": "Serialize all data in the database into a JSON string. Designed only for test runner usage; will not handle large amounts of data.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\creation.py",
    "ast_data": "FunctionDef name:serialize_db_to_string arg:self arguments arg FunctionDef name:get_objects arguments Assign Call For Call If BoolOp Compare Compare Compare For Call If BoolOp Call Call Assign Call Call Assign Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, num_evals, steps_per_run=1):\n    self._num_evals = num_evals\n    self._evals_completed = None\n    self._steps_per_run_initial_value = steps_per_run",
    "docstring": "Constructs the run hook. Args: num_evals: The number of evaluations to run for. if set to None, will iterate the dataset until all inputs are exhausted. steps_per_run: Number of steps executed per run call.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\evaluation.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:num_evals arg:steps_per_run arguments arg arg arg Assign Assign Assign"
  },
  {
    "library": "scipy",
    "name": "__new__",
    "source_code": "def __new__(cls, *system, **kwargs):\n    if len(system) == 1 and isinstance(system[0], LinearTimeInvariant):\n        return system[0].to_ss()\n    if cls is StateSpace:\n        if kwargs.get('dt') is None:\n            return StateSpaceContinuous.__new__(StateSpaceContinuous, *system, **kwargs)\n        else:\n            return StateSpaceDiscrete.__new__(StateSpaceDiscrete, *system, **kwargs)\n    return super().__new__(cls)",
    "docstring": "Create new StateSpace object and settle inheritance.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:__new__ arg:cls arguments arg arg arg If BoolOp Compare Call Call Return return:yes Call If Compare If Compare Call Return return:yes Call Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "from_tensor",
    "source_code": "@classmethod\ndef from_tensor(cls, t, dtype=None):\n    if ragged_tensor.is_ragged(t):\n        return DynamicRaggedShape(t._nested_row_partitions, _flat_values_shape(t), dtype=dtype)\n    else:\n        return DynamicRaggedShape._from_inner_shape(array_ops.shape(t), dtype=dtype)",
    "docstring": "Constructs a ragged shape for a potentially ragged tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:from_tensor arg:cls arg:t arg:dtype arguments arg arg arg If Call Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "cherrypy",
    "name": "tox_append_version_info",
    "source_code": "def tox_append_version_info() -> str:\n    return '[toxfile]'",
    "docstring": "Produce text to be rendered in ``. :returns: A string with the plugin details.",
    "type": "function",
    "file_path": "cherrypy\\toxfile.py",
    "ast_data": "FunctionDef name:tox_append_version_info arguments Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "remove_decompositions",
    "source_code": "def remove_decompositions(decompositions: dict[torch._ops.OperatorBase, Callable], aten_ops: Sequence[Union[OpOverload, OpOverloadPacket]]) -> None:\n    for op in aten_ops:\n        if isinstance(op, OpOverloadPacket):\n            for overload_name in op.overloads():\n                opo = getattr(op, overload_name)\n                decompositions.pop(opo, None)\n        elif isinstance(op, OpOverload):\n            decompositions.pop(op, None)",
    "docstring": "Given a dictionary of decompositions obtained from get_decompositions(), removes operators associated with a list of operator overloads and overload packets passed as input. If the decomposition dictionary does not contain a decomposition that is specified to be removed, it is silently ignored.",
    "type": "function",
    "file_path": "pytorch\\torch\\_decomp\\__init__.py",
    "ast_data": "FunctionDef name:remove_decompositions arg:decompositions arg:aten_ops arguments arg arg For If Call For Call Assign Call Call If Call Call"
  },
  {
    "library": "pandas",
    "name": "_maybe_check_unique",
    "source_code": "@final\ndef _maybe_check_unique(self) -> None:\n    if not self.is_unique:\n        msg = 'Index has duplicates.'\n        duplicates = self._format_duplicate_message()\n        msg += f'\\n{duplicates}'\n        raise DuplicateLabelError(msg)",
    "docstring": "Check that an Index has no duplicates. This is typically only called via when it's set to True (duplicates aren't allowed). Raises ------ DuplicateLabelError When the index is not unique.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_maybe_check_unique arg:self arguments arg If Assign Assign Call Raise Call"
  },
  {
    "library": "authlib",
    "name": "encrypt",
    "source_code": "def encrypt(self, msg, aad, iv, key):\n    self.check_iv(iv)\n    hkey = key[:self.key_len]\n    ekey = key[self.key_len:]\n    pad = PKCS7(AES.block_size).padder()\n    padded_data = pad.update(msg) + pad.finalize()\n    cipher = Cipher(AES(ekey), CBC(iv), backend=default_backend())\n    enc = cipher.encryptor()\n    ciphertext = enc.update(padded_data) + enc.finalize()\n    tag = self._hmac(ciphertext, aad, iv, hkey)\n    return (ciphertext, tag)",
    "docstring": "Key Encryption with AES_CBC_HMAC_SHA2. :param msg: text to be encrypt in bytes :param aad: additional authenticated data in bytes :param iv: initialization vector in bytes :param key: encrypted key in bytes :return: (ciphertext, iv, tag)",
    "type": "method",
    "file_path": "authlib\\authlib\\jose\\rfc7518\\jwe_encs.py",
    "ast_data": "FunctionDef name:encrypt arg:self arg:msg arg:aad arg:iv arg:key arguments arg arg arg arg arg Call Assign Assign Assign Call Call Assign Call Call Assign Call Call Call Call Assign Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "from_config",
    "source_code": "@classmethod\ndef from_config(cls, config, custom_objects=None, columns_by_name=None):\n    _check_config_keys(config, cls._fields)\n    kwargs = _standardize_and_copy_config(config)\n    return cls(**kwargs)",
    "docstring": "See 'FeatureColumn` base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:from_config arg:cls arg:config arg:custom_objects arg:columns_by_name arguments arg arg arg arg Call Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "count",
    "source_code": "def count(self, sub, start=0, end=None):\n    return count(self, sub, start, end)",
    "docstring": "Returns an array with the number of non-overlapping occurrences of substring in the range [, ]. See Also -------- char.count",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:count arg:self arg:sub arg:start arg:end arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_logdir",
    "source_code": "def get_logdir(self):\n    return self._logdir",
    "docstring": "Returns the directory where event file will be written.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\event_file_writer_v2.py",
    "ast_data": "FunctionDef name:get_logdir arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "formatter",
    "source_code": "@property\ndef formatter(self):\n    return self.long_axis.get_major_formatter()",
    "docstring": "Major tick label for the colorbar.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colorbar.py",
    "ast_data": "FunctionDef name:formatter arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "unique_kwargs",
    "source_code": "def unique_kwargs(self, kwargs):\n    if isinstance(self.unique, str):\n        return {self.unique: kwargs[self.unique]}\n    else:\n        return {fld: kwargs[fld] for fld in self.unique}",
    "docstring": "Given the feature keyword arguments (from ), construct and return the uniqueness keyword arguments -- a subset of the feature kwargs.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\utils\\layermapping.py",
    "ast_data": "FunctionDef name:unique_kwargs arg:self arg:kwargs arguments arg arg If Call Return return:yes Return return:yes"
  },
  {
    "library": "pandas",
    "name": "__rmatmul__",
    "source_code": "def __rmatmul__(self, other):\n    return self.dot(np.transpose(other))",
    "docstring": "Matrix multiplication using binary operator.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:__rmatmul__ arg:self arg:other arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_greatest_upper_bound",
    "source_code": "def get_greatest_upper_bound(type1, type2):\n    if type1 == Dyn:\n        return type2\n    elif type2 == Dyn:\n        return type1\n    elif isinstance(type1, TensorType) and isinstance(type2, TensorType):\n        if not is_consistent(type1, type2):\n            raise TypeError(f'Inconsistent types {type1}, {type2}')\n        gub = [t1 if is_more_precise(t1, t2) else t2 for t1, t2 in zip(type1.__args__, type2.__args__)]\n        return TensorType(tuple(gub))",
    "docstring": "Get the most precise type that's consistent with the given types",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\graph_gradual_typechecker.py",
    "ast_data": "FunctionDef name:get_greatest_upper_bound arg:type1 arg:type2 arguments arg arg If Compare Return return:yes If Compare Return return:yes If BoolOp Call Call If Call Raise Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "transpose",
    "source_code": "def transpose(a):\n    return swapaxes(a, -1, -2)",
    "docstring": "Transpose each matrix in a stack of matrices. Unlike np.transpose, this only swaps the last two axes, rather than all of them Parameters ---------- a : (...,M,N) array_like Returns ------- aT : (...,N,M) ndarray",
    "type": "function",
    "file_path": "numpy\\numpy\\linalg\\_linalg.py",
    "ast_data": "FunctionDef name:transpose arg:a arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "retrieve_bazel_version",
    "source_code": "def retrieve_bazel_version():\n    bazel_executable = shutil.which('bazel')\n    if bazel_executable is None:\n        bazel_executable = shutil.which('bazelisk')\n        if bazel_executable is None:\n            print('Cannot find bazel. Please install bazel/bazelisk.')\n            sys.exit(1)\n    stderr = open(os.devnull, 'wb')\n    curr_version = run_shell([bazel_executable, '--version'], allow_non_zero=True, stderr=stderr)\n    if curr_version.startswith('bazel '):\n        curr_version = curr_version.split('bazel ')[1]\n    curr_version_int = convert_version_to_int(curr_version)\n    if not curr_version_int:\n        print('WARNING: current bazel installation is not a release version.')\n        return curr_version\n    print('You have bazel %s installed.' % curr_version)\n    return curr_version",
    "docstring": "Retrieve installed bazel version (or bazelisk). Returns: The bazel version detected.",
    "type": "function",
    "file_path": "tensorflow\\configure.py",
    "ast_data": "FunctionDef name:retrieve_bazel_version arguments Assign Call If Compare Assign Call If Compare Call Call Assign Call Assign Call If Call Assign Call Assign Call If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "log_instant_event",
    "source_code": "def log_instant_event(self, event_name: str, time_ns: int, metadata: Optional[dict[str, Any]]=None, log_pt2_compile_event: bool=False) -> None:\n    if metadata is None:\n        metadata = {}\n    compile_id = str(torch._guards.CompileContext.current_compile_id())\n    metadata['compile_id'] = compile_id\n    event = {'name': event_name, 'ts': time_ns / 1000, 'args': metadata, 'ph': 'i', 'cat': 'dynamo_timed', 'tid': 0, 'pid': 0, 's': 'p'}\n    torch._logging.trace_structured('chromium_event', payload_fn=lambda: event, suppress_context=False, expect_trace_id=True)\n    if log_pt2_compile_event:\n        log_chromium_event_internal(event, self.get_pt2_compile_substack(), self.id_, time_ns)",
    "docstring": "Log an instant event with no associated duration. :param str event_name: Name of event to appear in trace :param int time_ns Timestamp in nanoseconds :param Optional[Dict[str, Any]] metadata: Any extra metadata associated with this event :param str cname optional color for the arrow in the trace",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\utils.py",
    "ast_data": "FunctionDef name:log_instant_event arg:self arg:event_name arg:time_ns arg:metadata arg:log_pt2_compile_event arguments arg arg arg arg arg If Compare Assign Assign Call Call Assign Assign Call arguments If Call Call"
  },
  {
    "library": "django",
    "name": "admin_view",
    "source_code": "def admin_view(self, view, cacheable=False):\n\n    def inner(request, *args, **kwargs):\n        if not self.has_permission(request):\n            if request.path == reverse('admin:logout', current_app=self.name):\n                index_path = reverse('admin:index', current_app=self.name)\n                return HttpResponseRedirect(index_path)\n            from django.contrib.auth.views import redirect_to_login\n            return redirect_to_login(request.get_full_path(), reverse('admin:login', current_app=self.name))\n        return view(request, *args, **kwargs)\n    if not cacheable:\n        inner = never_cache(inner)\n    if not getattr(view, 'csrf_exempt', False):\n        inner = csrf_protect(inner)\n    return update_wrapper(inner, view)",
    "docstring": "Decorator to create an admin view attached to this `` decorator. If the view can be safely cached, set cacheable=True.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\sites.py",
    "ast_data": "FunctionDef name:admin_view arg:self arg:view arg:cacheable arguments arg arg arg FunctionDef name:inner arg:request arguments arg arg arg If Call If Compare Call Assign Call Return return:yes Call Return return:yes Call Call Call Return return:yes Call If Assign Call If Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__lt__",
    "source_code": "def __lt__(self, other):\n    other = as_dimension(other)\n    if self._value is None or other.value is None:\n        return None\n    else:\n        return self._value < other.value",
    "docstring": "Returns True if is known to be less than . Dimensions are compared as follows: Args: other: Another Dimension. Returns: The value of if both are known, otherwise None.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:__lt__ arg:self arg:other arguments arg arg Assign Call If BoolOp Compare Compare Return return:no Return return:yes Compare"
  },
  {
    "library": "pytorch",
    "name": "ResultType",
    "source_code": "class ResultType:\n    _vals: dict[ComboType, Status]\n\n    def __repr__(self) -> str:\n        return f'ResultType[{self._vals}]'\n\n    def __init__(self) -> None:\n        self._vals = {}\n\n    def __len__(self) -> int:\n        return len(self._vals)\n\n    def num_ran(self) -> int:\n        ret = len(self._vals)\n        for status in self._vals.values():\n            if status == Status.SKIPPED:\n                ret -= 1\n        return ret\n\n    def set(self, combo: ComboType, status: Status) -> None:\n        combo = tuple(sorted(combo))\n        self._vals[combo] = status\n\n    def lookup(self, combo: ComboType) -> Optional[Status]:\n        combo = tuple(sorted(combo))\n        return self._vals.get(combo, None)\n\n    def keys(self) -> KeysView[ComboType]:\n        return self._vals.keys()",
    "docstring": "The mapping of the combo strings to the result status after running the config fuzzer.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\fuzzer.py",
    "ast_data": "ClassDef name:ResultType FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:__init__ arg:self arguments arg Assign FunctionDef name:__len__ arg:self arguments arg Return return:yes Call FunctionDef name:num_ran arg:self arguments arg Assign Call For Call If Compare Return return:yes FunctionDef name:set arg:self arg:combo arg:status arguments arg arg arg Assign Call Call Assign FunctionDef name:lookup arg:self arg:combo arguments arg arg Assign Call Call Return return:yes Call FunctionDef name:keys arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "group_by_window",
    "source_code": "@deprecation.deprecated(None, 'Use `tf.data.Dataset.group_by_window(...)`.')\n@tf_export('data.experimental.group_by_window')\ndef group_by_window(key_func, reduce_func, window_size=None, window_size_func=None):\n\n    def _apply_fn(dataset):\n        return dataset.group_by_window(key_func=key_func, reduce_func=reduce_func, window_size=window_size, window_size_func=window_size_func)\n    return _apply_fn",
    "docstring": "A transformation that groups windows of elements by key and reduces them. This transformation maps each consecutive element in a dataset to a key using and groups the elements by key. It then applies to at most elements matching the same key. All except the final window for each key will contain elements; the final window may be smaller. You may provide either a constant or a window size determined by the key through . Args: key_func: A function mapping a nested structure of tensors (having shapes and types defined by and ) to a scalar tensor. reduce_func: A function mapping a key and a dataset of up to consecutive elements matching that key to another dataset. window_size: A scalar , representing the number of consecutive elements matching the same key to combine in a single batch, which will be passed to . Mutually exclusive with . window_size_func: A function mapping a key to a scalar , representing the number of consecutive elements matching the same key to combine in a single batch, which will be passed to . Mutually exclusive with . Returns: A transformation function, which can be passed to . Raises: ValueError: if neither or both of {, } are passed.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\grouping.py",
    "ast_data": "FunctionDef name:group_by_window arg:key_func arg:reduce_func arg:window_size arg:window_size_func arguments arg arg arg arg FunctionDef name:_apply_fn arg:dataset arguments arg Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_convert_axes",
    "source_code": "@final\ndef _convert_axes(self, obj: DataFrame | Series) -> DataFrame | Series:\n    for axis_name in obj._AXIS_ORDERS:\n        ax = obj._get_axis(axis_name)\n        ser = Series(ax, dtype=ax.dtype, copy=False)\n        new_ser, result = self._try_convert_data(name=axis_name, data=ser, use_dtypes=False, convert_dates=True, is_axis=True)\n        if result:\n            new_axis = Index(new_ser, dtype=new_ser.dtype, copy=False)\n            setattr(obj, axis_name, new_axis)\n    return obj",
    "docstring": "Try to convert axes.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\json\\_json.py",
    "ast_data": "FunctionDef name:_convert_axes arg:self arg:obj arguments arg arg For Assign Call Assign Call Assign Call If Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "ImageMagickFileWriter",
    "source_code": "@writers.register('imagemagick_file')\nclass ImageMagickFileWriter(ImageMagickBase, FileMovieWriter):\n    supported_formats = ['png', 'jpeg', 'tiff', 'raw', 'rgba']\n    input_names = property(lambda self: f'{self.temp_prefix}*.{self.frame_format}')",
    "docstring": "File-based animated gif writer. Frames are written to temporary files on disk and then stitched together at the end.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\animation.py",
    "ast_data": "ClassDef name:ImageMagickFileWriter Assign Assign Call arguments arg Call"
  },
  {
    "library": "kornia",
    "name": "transform_boxes",
    "source_code": "def transform_boxes(self, M: torch.Tensor, inplace: bool=False) -> Boxes3D:\n    if not 2 <= M.ndim <= 3 or M.shape[-2:] != (4, 4):\n        raise ValueError(f'The transformation matrix shape must be (4, 4) or (B, 4, 4). Got {M.shape}.')\n    transformed_boxes = _transform_boxes(self._data, M)\n    if inplace:\n        self._data = transformed_boxes\n        return self\n    return Boxes3D(transformed_boxes, False, 'xyzxyz_plus')",
    "docstring": "Apply a transformation matrix to the 3D boxes. Args: M: The transformation matrix to be applied, shape of :math: or :math:. inplace: do transform in-place and return self. Returns: The transformed boxes.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\boxes.py",
    "ast_data": "FunctionDef name:transform_boxes arg:self arg:M arg:inplace arguments arg arg arg If BoolOp Compare Compare Raise Call Assign Call If Assign Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_preprocess_conv1d_input",
    "source_code": "def _preprocess_conv1d_input(x, data_format):\n    tf_data_format = 'NWC'\n    if data_format == 'channels_first':\n        if not _has_nchw_support():\n            x = array_ops.transpose(x, (0, 2, 1))\n        else:\n            tf_data_format = 'NCW'\n    return (x, tf_data_format)",
    "docstring": "Transpose and cast the input before the conv1d. Args: x: input tensor. data_format: string, or . Returns: A tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:_preprocess_conv1d_input arg:x arg:data_format arguments arg arg Assign If Compare If Call Assign Call Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "is_regressor",
    "source_code": "def is_regressor(estimator):\n    if isinstance(estimator, type):\n        warnings.warn(f'passing a class to {print(inspect.stack()[0][3])} is deprecated and will be removed in 1.8. Use an instance of the class instead.', FutureWarning)\n        return getattr(estimator, '_estimator_type', None) == 'regressor'\n    return get_tags(estimator).estimator_type == 'regressor'",
    "docstring": "Return True if the given estimator is (probably) a regressor. Parameters ---------- estimator : estimator instance Estimator object to test. Returns ------- out : bool True if estimator is a regressor and False otherwise. Examples -------- >>> from sklearn.base import is_regressor >>> from sklearn.cluster import KMeans >>> from sklearn.svm import SVC, SVR >>> classifier = SVC() >>> regressor = SVR() >>> kmeans = KMeans() >>> is_regressor(classifier) False >>> is_regressor(regressor) True >>> is_regressor(kmeans) False",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\base.py",
    "ast_data": "FunctionDef name:is_regressor arg:estimator arguments arg If Call Call Call Call Return return:yes Compare Call Return return:yes Compare Call"
  },
  {
    "library": "pandas",
    "name": "_dtype_to_stata_type",
    "source_code": "def _dtype_to_stata_type(dtype: np.dtype, column: Series) -> int:\n    if dtype.type is np.object_:\n        itemsize = max_len_string_array(ensure_object(column._values))\n        return max(itemsize, 1)\n    elif dtype.type is np.float64:\n        return 255\n    elif dtype.type is np.float32:\n        return 254\n    elif dtype.type is np.int32:\n        return 253\n    elif dtype.type is np.int16:\n        return 252\n    elif dtype.type is np.int8:\n        return 251\n    else:\n        raise NotImplementedError(f'Data type {dtype} not supported.')",
    "docstring": "Convert dtype types to stata types. Returns the byte of the given ordinal. See TYPE_MAP and comments for an explanation. This is also explained in the dta spec. 1 - 244 are strings of this length Pandas Stata 251 - for int8 byte 252 - for int16 int 253 - for int32 long 254 - for float32 float 255 - for double double If there are dates to convert, then dtype will already have the correct type inserted.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\stata.py",
    "ast_data": "FunctionDef name:_dtype_to_stata_type arg:dtype arg:column arguments arg arg If Compare Assign Call Call Return return:yes Call If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Raise Call"
  },
  {
    "library": "django",
    "name": "FileDescriptor",
    "source_code": "class FileDescriptor(DeferredAttribute):\n\n    def __get__(self, instance, cls=None):\n        if instance is None:\n            return self\n        file = super().__get__(instance, cls)\n        if isinstance(file, str) or file is None:\n            attr = self.field.attr_class(instance, self.field, file)\n            instance.__dict__[self.field.attname] = attr\n        elif isinstance(file, DatabaseDefault):\n            attr = self.field.attr_class(instance, self.field, self.field.db_default)\n            instance.__dict__[self.field.attname] = attr\n        elif isinstance(file, File) and (not isinstance(file, FieldFile)):\n            file_copy = self.field.attr_class(instance, self.field, file.name)\n            file_copy.file = file\n            file_copy._committed = False\n            instance.__dict__[self.field.attname] = file_copy\n        elif isinstance(file, FieldFile) and (not hasattr(file, 'field')):\n            file.instance = instance\n            file.field = self.field\n            file.storage = self.field.storage\n        elif isinstance(file, FieldFile) and instance is not file.instance:\n            file.instance = instance\n        return instance.__dict__[self.field.attname]\n\n    def __set__(self, instance, value):\n        instance.__dict__[self.field.attname] = value",
    "docstring": "The descriptor for the file attribute on the model instance. Return a FieldFile when accessed so you can write code like:: >>> from myapp.models import MyModel >>> instance = MyModel.objects.get(pk=1) >>> instance.file.size Assign a file object on assignment so you can do:: >>> with open('/path/to/hello.world') as f: ... instance.file = File(f)",
    "type": "class",
    "file_path": "django\\django\\db\\models\\fields\\files.py",
    "ast_data": "ClassDef name:FileDescriptor FunctionDef name:__get__ arg:self arg:instance arg:cls arguments arg arg arg If Compare Return return:yes Assign Call Call If BoolOp Call Compare Assign Call Assign If Call Assign Call Assign If BoolOp Call Call Assign Call Assign Assign Assign If BoolOp Call Call Assign Assign Assign If BoolOp Call Compare Assign Return return:yes FunctionDef name:__set__ arg:self arg:instance arg:value arguments arg arg arg Assign"
  },
  {
    "library": "matplotlib",
    "name": "set_xlim",
    "source_code": "def set_xlim(self, *args, **kwargs):\n    raise TypeError('Changing axes limits of a geographic projection is not supported.  Please consider using Cartopy.')",
    "docstring": "Not supported. Please consider using Cartopy.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\geo.py",
    "ast_data": "FunctionDef name:set_xlim arg:self arguments arg arg arg Raise Call"
  },
  {
    "library": "scipy",
    "name": "_read_int32",
    "source_code": "def _read_int32(f):\n    return np.int32(struct.unpack('>i', f.read(4))[0])",
    "docstring": "Read a signed 32-bit integer",
    "type": "function",
    "file_path": "scipy\\scipy\\io\\_idl.py",
    "ast_data": "FunctionDef name:_read_int32 arg:f arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "byte",
    "source_code": "def byte(self):\n    _warn_typed_storage_removal()\n    return self._to(torch.uint8)",
    "docstring": "Casts this storage to byte type.",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:byte arg:self arguments arg Call Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "get_mapping",
    "source_code": "def get_mapping(self, scale: Scale, data: Series) -> Mapping:\n    if isinstance(scale, Nominal):\n        return self._get_nominal_mapping(scale, data)\n    elif isinstance(scale, Boolean):\n        return self._get_boolean_mapping(scale, data)\n    if scale.values is None:\n        vmin, vmax = self._forward(self.default_range)\n    elif isinstance(scale.values, tuple) and len(scale.values) == 2:\n        vmin, vmax = self._forward(scale.values)\n    else:\n        if isinstance(scale.values, tuple):\n            actual = f'{len(scale.values)}-tuple'\n        else:\n            actual = str(type(scale.values))\n        scale_class = scale.__class__.__name__\n        err = ' '.join([f'Values for {self.variable} variables with {scale_class} scale', f'must be 2-tuple; not {actual}.'])\n        raise TypeError(err)\n\n    def mapping(x):\n        return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n    return mapping",
    "docstring": "Return a function that maps from data domain to property range.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\properties.py",
    "ast_data": "FunctionDef name:get_mapping arg:self arg:scale arg:data arguments arg arg arg If Call Return return:yes Call If Call Return return:yes Call If Compare Assign Call If BoolOp Call Compare Call Assign Call If Call Assign Call Assign Call Call Assign Assign Call Raise Call FunctionDef name:mapping arg:x arguments arg Return return:yes Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "__next__",
    "source_code": "def __next__(self):\n    if self._leftover:\n        output = self._leftover\n        self._leftover = b''\n    else:\n        output = next(self._producer)\n        self._unget_history = []\n    self.position += len(output)\n    return output",
    "docstring": "Used when the exact number of bytes to read is unimportant. Return whatever chunk is conveniently returned from the iterator. Useful to avoid unnecessary bookkeeping if performance is an issue.",
    "type": "method",
    "file_path": "django\\django\\http\\multipartparser.py",
    "ast_data": "FunctionDef name:__next__ arg:self arguments arg If Assign Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "loggers_set_enabled",
    "source_code": "def loggers_set_enabled(model: torch.nn.Module, enabled: bool) -> None:\n    for _, child in model.named_modules():\n        if isinstance(child, OutputLogger):\n            child.enabled = enabled",
    "docstring": "Sets the setting on a 's loggers",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\ns\\_numeric_suite_fx.py",
    "ast_data": "FunctionDef name:loggers_set_enabled arg:model arg:enabled arguments arg arg For Call If Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "set_clip_path",
    "source_code": "def set_clip_path(self, path, transform=None):\n    from matplotlib.patches import Patch, Rectangle\n    success = False\n    if transform is None:\n        if isinstance(path, Rectangle):\n            self.clipbox = TransformedBbox(Bbox.unit(), path.get_transform())\n            self._clippath = None\n            success = True\n        elif isinstance(path, Patch):\n            self._clippath = TransformedPatchPath(path)\n            success = True\n        elif isinstance(path, tuple):\n            path, transform = path\n    if path is None:\n        self._clippath = None\n        success = True\n    elif isinstance(path, Path):\n        self._clippath = TransformedPath(path, transform)\n        success = True\n    elif isinstance(path, TransformedPatchPath):\n        self._clippath = path\n        success = True\n    elif isinstance(path, TransformedPath):\n        self._clippath = path\n        success = True\n    if not success:\n        raise TypeError(f'Invalid arguments to set_clip_path, of type {type(path).__name__} and {type(transform).__name__}')\n    self.pchanged()\n    self.stale = True",
    "docstring": "Set the artist's clip path. Parameters ---------- path : or or or None The clip path. If given a , *transform* must be provided as well. If *None*, a previously set clip path is removed. transform : , optional Only used if *path* is a , in which case the given is converted to a using *transform*. Notes ----- For efficiency, if *path* is a this method will set the clipping box to the corresponding rectangle and set the clipping path to `~.Artist.set`), a tuple (*path*, *transform*) is also accepted as a single positional parameter. .. ACCEPTS: Patch or (Path, Transform) or None",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:set_clip_path arg:self arg:path arg:transform arguments arg arg arg Assign If Compare If Call Assign Call Call Call Assign Assign If Call Assign Call Assign If Call Assign If Compare Assign Assign If Call Assign Call Assign If Call Assign Assign If Call Assign Assign If Raise Call Call Call Call Assign"
  },
  {
    "library": "numpy",
    "name": "replace",
    "source_code": "def replace(self, old, new, count=None):\n    return replace(self, old, new, count if count is not None else -1)",
    "docstring": "For each element in , return a copy of the string with all occurrences of substring replaced by . See Also -------- char.replace",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:replace arg:self arg:old arg:new arg:count arguments arg arg arg arg Return return:yes Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "dtype_policy",
    "source_code": "@property\ndef dtype_policy(self):\n    return self._dtype_policy",
    "docstring": "The dtype policy associated with this layer. This is an instance of a .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:dtype_policy arg:self arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "check_inline",
    "source_code": "def check_inline(self):\n    return check_inline(self)",
    "docstring": "Return the inline keyword recognized by the compiler, empty string otherwise.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\command\\config.py",
    "ast_data": "FunctionDef name:check_inline arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "size",
    "source_code": "@property\ndef size(self):\n    points = self.get_points()\n    return points[1] - points[0]",
    "docstring": "The (signed) width and height of the bounding box.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:size arg:self arguments arg Assign Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "on",
    "source_code": "@property\ndef on(self):\n    raise AttributeError(_attr_error)",
    "docstring": "Flag whether the tool is enabled.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cptools.py",
    "ast_data": "FunctionDef name:on arg:self arguments arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "reset_dtensor_device",
    "source_code": "@tf_export('experimental.dtensor._reset_dtensor_device', v1=[])\ndef reset_dtensor_device(is_async: bool) -> None:\n    global _dtensor_singleton\n    device = dtensor_device.DTensorDevice(meshes=[], is_async=is_async)\n    _dtensor_singleton = device",
    "docstring": "Resets the Eager execution device for DTensor. This function is only intended for testing and diagnostics. Args: is_async: If True, the device uses async execution.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\api.py",
    "ast_data": "FunctionDef name:reset_dtensor_device arg:is_async arguments arg Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "__eq__",
    "source_code": "def __eq__(self, other):\n    return isinstance(other, self.__class__) and self.to_string() == other.to_string()",
    "docstring": "Checks if the DeviceSpec is same as the current instance, eg have same value for all the internal fields. Args: other: Another DeviceSpec Returns: Return if is also a DeviceSpec instance and has same value as the current instance. Return otherwise.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\device_spec.py",
    "ast_data": "FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes BoolOp Call Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "_or_policy",
    "source_code": "def _or_policy(module: nn.Module, recurse: bool, nonwrapped_numel: int, policies) -> bool:\n    return any((policy(module=module, recurse=recurse, nonwrapped_numel=nonwrapped_numel) for policy in policies))",
    "docstring": "A policy that wraps ``.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\wrap.py",
    "ast_data": "FunctionDef name:_or_policy arg:module arg:recurse arg:nonwrapped_numel arg:policies arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_ConstraintTarget",
    "source_code": "@dataclasses.dataclass\nclass _ConstraintTarget:\n    t_id: int\n    dim: int",
    "docstring": "This represents input tensor dimensions.",
    "type": "class",
    "file_path": "pytorch\\torch\\export\\dynamic_shapes.py",
    "ast_data": "ClassDef name:_ConstraintTarget"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, default_value, initializer):\n    super(InitializableLookupTableBase, self).__init__(initializer.key_dtype, initializer.value_dtype)\n    self._default_value = ops.convert_to_tensor(default_value, dtype=self._value_dtype)\n    self._default_value.get_shape().merge_with(tensor_shape.TensorShape([]))\n    if isinstance(initializer, trackable_base.Trackable):\n        self._initializer = self._track_trackable(initializer, '_initializer')\n    with ops.init_scope():\n        self._resource_handle = self._create_resource()\n    if not context.executing_eagerly() and ops.get_default_graph()._get_control_flow_context() is not None:\n        with ops.init_scope():\n            self._init_op = self._initialize()\n    else:\n        self._init_op = self._initialize()",
    "docstring": "Construct a table object from a table reference. If requires a table initializer object (subclass of ). It provides the table key and value types, as well as the op to initialize the table. The caller is responsible to execute the initialization op. Args: default_value: The value to use if a key is missing in the table. initializer: The table initializer to use.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:default_value arg:initializer arguments arg arg arg Call Call Assign Call Call Call Call If Call Assign Call With Call Assign Call If BoolOp Call Compare Call Call With Call Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "spatial_3d_padding",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None):\n    assert len(padding) == 3\n    assert len(padding[0]) == 2\n    assert len(padding[1]) == 2\n    assert len(padding[2]) == 2\n    if data_format is None:\n        data_format = image_data_format()\n    if data_format not in {'channels_first', 'channels_last'}:\n        raise ValueError('Unknown data_format: ' + str(data_format))\n    if data_format == 'channels_first':\n        pattern = [[0, 0], [0, 0], [padding[0][0], padding[0][1]], [padding[1][0], padding[1][1]], [padding[2][0], padding[2][1]]]\n    else:\n        pattern = [[0, 0], [padding[0][0], padding[0][1]], [padding[1][0], padding[1][1]], [padding[2][0], padding[2][1]], [0, 0]]\n    return array_ops.pad(x, pattern)",
    "docstring": "Pads 5D tensor with zeros along the depth, height, width dimensions. Pads these dimensions with respectively \"padding[0]\", \"padding[1]\" and \"padding[2]\" zeros left and right. For 'channels_last' data_format, the 2nd, 3rd and 4th dimension will be padded. For 'channels_first' data_format, the 3rd, 4th and 5th dimension will be padded. Args: x: Tensor or variable. padding: Tuple of 3 tuples, padding pattern. data_format: One of or . Returns: A padded 5D tensor. Raises: ValueError: if is neither or .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:spatial_3d_padding arg:x arg:padding arg:data_format arguments arg arg arg Compare Call Compare Call Compare Call Compare Call If Compare Assign Call If Compare Raise Call Call If Compare Assign Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "available",
    "source_code": "def available(self, o):\n    return not self.locked() or self.isowner(o)",
    "docstring": "Return whether drawing is available to *o*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:available arg:self arg:o arguments arg arg Return return:yes BoolOp Call Call"
  },
  {
    "library": "pytorch",
    "name": "all_reduce",
    "source_code": "def all_reduce(self: torch.Tensor, reduceOp: str, group: RANK_TYPES, tag: str=''):\n    group_name = _resolve_group_name(group, tag)\n    tensor = torch.ops._c10d_functional.all_reduce(self, reduceOp.lower(), group_name)\n    return _maybe_wrap_tensor(tensor)",
    "docstring": "Reduces the tensor data across all machines in such a way that all get the final result. The input tensor is left unmodified. Group can be one of: List[int]: ranks participating in the collective. List[List[int]]: 2D mesh of ranks taking part of this collective in MPMD. ProcessGroup: Will perform a collective using the ranks and tag of the PG. DeviceMesh: Do a SPMD collective over all ranks of the mesh (DeviceMesh, int): Do a MPMD collective over one dimension of the DeviceMesh :: N.B. If you pass a PG or a 1D list to perform a MPMD collective, the compiler won't be able to recover that information and perform collective algebraic optimization. Use other forms of input for that.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_functional_collectives.py",
    "ast_data": "FunctionDef name:all_reduce arg:self arg:reduceOp arg:group arg:tag arguments arg arg arg arg Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "UserInfoEndpoint",
    "source_code": "class UserInfoEndpoint:\n    ENDPOINT_NAME = 'userinfo'\n\n    def __init__(self, server: Optional[AuthorizationServer]=None, resource_protector: Optional[ResourceProtector]=None):\n        self.server = server\n        self.resource_protector = resource_protector\n\n    def create_endpoint_request(self, request: OAuth2Request):\n        return self.server.create_oauth2_request(request)\n\n    def __call__(self, request: OAuth2Request):\n        token = self.resource_protector.acquire_token('openid')\n        client = token.get_client()\n        user = token.get_user()\n        user_info = self.generate_user_info(user, token.scope)\n        if (alg := client.client_metadata.get('userinfo_signed_response_alg')):\n            user_info['iss'] = self.get_issuer()\n            user_info['aud'] = client.client_id\n            data = jwt.encode({'alg': alg}, user_info, self.resolve_private_key())\n            return (200, data, [('Content-Type', 'application/jwt')])\n        return (200, user_info, default_json_headers)\n\n    def generate_user_info(self, user, scope: str) -> UserInfo:\n        raise NotImplementedError()\n\n    def get_issuer(self) -> str:\n        raise NotImplementedError()\n\n    def resolve_private_key(self):\n        return None",
    "docstring": "OpenID Connect Core UserInfo Endpoint. This endpoint returns information about a given user, as a JSON payload or as a JWT. It must be subclassed and a few methods needs to be manually implemented:: class UserInfoEndpoint(oidc.core.UserInfoEndpoint): def get_issuer(self): return \" def generate_user_info(self, user, scope): return UserInfo( sub=user.id, name=user.name, ... ).filter(scope) def resolve_private_key(self): return server_private_jwk_set() It is also needed to pass a :class: instance with a registered :class: at initialization, so the access to the endpoint can be restricter to valid token bearers:: resource_protector = ResourceProtector() resource_protector.register_token_validator(BearerTokenValidator()) server.register_endpoint( UserInfoEndpoint(resource_protector=resource_protector) ) And then you can plug the endpoint to your application:: @app.route(\"/oauth/userinfo\", methods=[\"GET\", \"POST\"]) def userinfo(): return server.create_endpoint_response(\"userinfo\")",
    "type": "class",
    "file_path": "authlib\\authlib\\oidc\\core\\userinfo.py",
    "ast_data": "ClassDef name:UserInfoEndpoint Assign FunctionDef name:__init__ arg:self arg:server arg:resource_protector arguments arg arg arg Assign Assign FunctionDef name:create_endpoint_request arg:self arg:request arguments arg arg Return return:yes Call FunctionDef name:__call__ arg:self arg:request arguments arg arg Assign Call Assign Call Assign Call Assign Call If Call Assign Call Assign Assign Call Call Return return:yes Return return:yes FunctionDef name:generate_user_info arg:self arg:user arg:scope arguments arg arg arg Raise Call FunctionDef name:get_issuer arg:self arguments arg Raise Call FunctionDef name:resolve_private_key arg:self arguments arg Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "_tf_ragged_for_stmt",
    "source_code": "def _tf_ragged_for_stmt(iter_, extra_test, body, get_state, set_state, symbol_names, opts):\n    init_vars = get_state()\n    control_flow.verify_loop_init_vars(init_vars, symbol_names)\n    if iter_.shape and iter_.shape[0] is not None:\n        n = iter_.shape[0]\n    else:\n        n = iter_.row_lengths()[0]\n    iterate_index = 0\n\n    def aug_get_state():\n        return (iterate_index,) + get_state()\n\n    def aug_set_state(aug_loop_vars):\n        nonlocal iterate_index\n        iterate_index, *loop_vars = aug_loop_vars\n        set_state(loop_vars)\n\n    def aug_body():\n        nonlocal iterate_index\n        body(iter_[iterate_index])\n        iterate_index += 1\n\n    def aug_test():\n        main_test = iterate_index < n\n        if extra_test is not None:\n            return tf_cond.cond(main_test, extra_test, lambda: False)\n        return main_test\n    control_flow._add_max_iterations_hint(opts, n)\n    control_flow._tf_while_stmt(aug_test, aug_body, aug_get_state, aug_set_state, ('<internal iterate>',) + symbol_names, opts)",
    "docstring": "Overload of for_stmt that iterates over TF ragged tensors.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_autograph.py",
    "ast_data": "FunctionDef name:_tf_ragged_for_stmt arg:iter_ arg:extra_test arg:body arg:get_state arg:set_state arg:symbol_names arg:opts arguments arg arg arg arg arg arg arg Assign Call Call If BoolOp Compare Assign Assign Call Assign FunctionDef name:aug_get_state arguments Return return:yes Call FunctionDef name:aug_set_state arg:aug_loop_vars arguments arg Assign Call FunctionDef name:aug_body arguments Call FunctionDef name:aug_test arguments Assign Compare If Compare Return return:yes Call arguments Return return:yes Call Call"
  },
  {
    "library": "virtualenv",
    "name": "ExePathRefToDest",
    "source_code": "class ExePathRefToDest(PathRefToDest, ExePathRef):\n\n    def __init__(self, src, targets, dest, must=RefMust.NA, when=RefWhen.ANY) -> None:\n        ExePathRef.__init__(self, src, must, when)\n        PathRefToDest.__init__(self, src, dest, must, when)\n        if not self.FS_CASE_SENSITIVE:\n            targets = list(OrderedDict(((i.lower(), None) for i in targets)).keys())\n        self.base = targets[0]\n        self.aliases = targets[1:]\n        self.dest = dest\n\n    def run(self, creator, symlinks):\n        bin_dir = self.dest(creator, self.src).parent\n        dest = bin_dir / self.base\n        method = self.method(symlinks)\n        method(self.src, dest)\n        if not symlinks:\n            make_exe(dest)\n        for extra in self.aliases:\n            link_file = bin_dir / extra\n            if link_file.exists():\n                link_file.unlink()\n            if symlinks:\n                link_file.symlink_to(self.base)\n            else:\n                copy(self.src, link_file)\n            if not symlinks:\n                make_exe(link_file)\n\n    def __repr__(self) -> str:\n        return f'{self.__class__.__name__}(src={self.src}, alias={self.aliases})'",
    "docstring": "Link a exe path on the file system.",
    "type": "class",
    "file_path": "virtualenv\\src\\virtualenv\\create\\via_global_ref\\builtin\\ref.py",
    "ast_data": "ClassDef name:ExePathRefToDest FunctionDef name:__init__ arg:self arg:src arg:targets arg:dest arg:must arg:when arguments arg arg arg arg arg arg Call Call If Assign Call Call Call Call Assign Assign Assign FunctionDef name:run arg:self arg:creator arg:symlinks arguments arg arg arg Assign Call Assign Assign Call Call If Call For Assign If Call Call If Call Call If Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "can_pan",
    "source_code": "def can_pan(self):\n    return False",
    "docstring": "Return whether this Axes supports the pan/zoom button functionality. This Axes object does not support interactive pan/zoom.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\geo.py",
    "ast_data": "FunctionDef name:can_pan arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__rfloordiv__",
    "source_code": "def __rfloordiv__(self, other):\n    other = as_dimension(other)\n    if self._value is None or other.value is None:\n        return Dimension(None)\n    else:\n        return Dimension(other.value // self._value)",
    "docstring": "Returns the quotient of and rounded down. Args: other: Another Dimension, or a value accepted by . Returns: A whose value is the integer quotient of and .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:__rfloordiv__ arg:self arg:other arguments arg arg Assign Call If BoolOp Compare Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "update_from_dict",
    "source_code": "def update_from_dict(self, dic: Dict[str, float], batch_size: int) -> None:\n    for k, v in dic.items():\n        self.update(k, v, batch_size)",
    "docstring": "Update the stats by the dict.",
    "type": "method",
    "file_path": "kornia\\kornia\\x\\utils.py",
    "ast_data": "FunctionDef name:update_from_dict arg:self arg:dic arg:batch_size arguments arg arg arg For Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "is_stationary",
    "source_code": "def is_stationary(self):\n    return self.metric in ['rbf']",
    "docstring": "Returns whether the kernel is stationary.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:is_stationary arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "TfRecordRepresentativeDatasetLoader",
    "source_code": "class TfRecordRepresentativeDatasetLoader(RepresentativeDatasetLoader):\n\n    def __init__(self, dataset_file_map: Mapping[str, _RepresentativeDatasetFile]) -> None:\n        self.dataset_file_map = dataset_file_map\n\n    def _load_tf_record(self, tf_record_path: str) -> RepresentativeDataset:\n        samples = []\n        with context.eager_mode():\n            for sample_bytes in readers.TFRecordDatasetV2(filenames=[tf_record_path]):\n                sample_proto = _RepresentativeDataSample.FromString(sample_bytes.numpy())\n                sample = {}\n                for input_key, tensor_proto in sample_proto.tensor_proto_inputs.items():\n                    sample[input_key] = tensor_util.MakeNdarray(tensor_proto)\n                samples.append(sample)\n        return samples\n\n    def load(self) -> RepresentativeDatasetMapping:\n        repr_dataset_map = {}\n        for signature_def_key, dataset_file in self.dataset_file_map.items():\n            if dataset_file.HasField('tfrecord_file_path'):\n                repr_dataset_map[signature_def_key] = self._load_tf_record(dataset_file.tfrecord_file_path)\n            else:\n                raise ValueError('Unsupported Representative Dataset filetype')\n        return repr_dataset_map",
    "docstring": "TFRecord representative dataset loader. Loads representative dataset stored in TFRecord files.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\representative_dataset.py",
    "ast_data": "ClassDef name:TfRecordRepresentativeDatasetLoader FunctionDef name:__init__ arg:self arg:dataset_file_map arguments arg arg Assign FunctionDef name:_load_tf_record arg:self arg:tf_record_path arguments arg arg Assign With Call For Call Assign Call Call Assign For Call Assign Call Call Return return:yes FunctionDef name:load arg:self arguments arg Assign For Call If Call Assign Call Raise Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "calculate_solid_angles",
    "source_code": "def calculate_solid_angles(R):\n    numerator = np.linalg.det(R)\n    denominator = 1 + (np.einsum('ij,ij->i', R[:, 0], R[:, 1]) + np.einsum('ij,ij->i', R[:, 1], R[:, 2]) + np.einsum('ij,ij->i', R[:, 2], R[:, 0]))\n    return np.abs(2 * np.arctan2(numerator, denominator))",
    "docstring": "Calculates the solid angles of plane triangles. Implements the method of Van Oosterom and Strackee [VanOosterom]_ with some modifications. Assumes that input points have unit norm.",
    "type": "function",
    "file_path": "scipy\\scipy\\spatial\\_spherical_voronoi.py",
    "ast_data": "FunctionDef name:calculate_solid_angles arg:R arguments arg Assign Call Assign Call Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "handle_equal",
    "source_code": "@staticmethod\ndef handle_equal(args, kwargs) -> bool:\n    a, b = (args[0], args[1])\n    if len(a.local_shards()) != len(b.local_shards()):\n        return False\n    if not all((aten.equal.default(x, y) for x, y in zip(a.local_shards(), b.local_shards()))):\n        return False\n    if not a.storage_metadata() == b.storage_metadata():\n        return False\n    return True",
    "docstring": "LocalShardsWrapper equal impl also checks for equality of storage metadata and the order of shards",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_shards_wrapper.py",
    "ast_data": "FunctionDef name:handle_equal arg:args arg:kwargs arguments arg arg Assign If Compare Call Call Call Call Return return:yes If Call Call Call Call Call Return return:yes If Compare Call Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "OutputLogger",
    "source_code": "class OutputLogger(torch.nn.Module):\n    _is_impure = True\n\n    def __init__(self, debug_handle: int, node_name: Optional[str]=None, nn_module_stack: Optional[object]=None) -> None:\n        super().__init__()\n        self.node_name = node_name\n        self.nn_module_stack = nn_module_stack\n        self.debug_handle = debug_handle\n        self.stats: list[object] = []\n\n    def forward(self, x: object) -> object:\n        self.stats.append(_detach(x))\n        return x\n\n    def __extra_repr__(self) -> str:\n        return f'debug_handle={self.debug_handle}, node_name={self.node_name}, nn_module_stack={{self.nn_module_stack}}, num_stats={{len(self.stats)}})'",
    "docstring": "Base class for capturing output values for nodes in a GraphModule, it only captures Tensor output currently, but we can extend it to work for other types of inputs later if needed",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\_numeric_debugger.py",
    "ast_data": "ClassDef name:OutputLogger Assign FunctionDef name:__init__ arg:self arg:debug_handle arg:node_name arg:nn_module_stack arguments arg arg arg arg Call Call Assign Assign Assign FunctionDef name:forward arg:self arg:x arguments arg arg Call Call Return return:yes FunctionDef name:__extra_repr__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "parameter_bytes",
    "source_code": "@abc.abstractmethod\ndef parameter_bytes(self, encoding: _serialization.Encoding, format: _serialization.ParameterFormat) -> bytes:\n    pass",
    "docstring": "Returns the parameters serialized as bytes.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\dh.py",
    "ast_data": "FunctionDef name:parameter_bytes arg:self arg:encoding arg:format arguments arg arg arg"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, op_type):\n    if not isinstance(op_type, str):\n        raise TypeError('op_type must be a string')\n    self._op_type = op_type",
    "docstring": "Creates a new decorator with as the Operation type. Args: op_type: The string type of an operation. This corresponds to the field for the proto that defines the operation. Raises: TypeError: If is not string.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:op_type arguments arg arg If Call Raise Call Assign"
  },
  {
    "library": "pandas",
    "name": "dict_compat",
    "source_code": "def dict_compat(d: dict[Scalar, Scalar]) -> dict[Scalar, Scalar]:\n    return {maybe_box_datetimelike(key): value for key, value in d.items()}",
    "docstring": "Convert datetimelike-keyed dicts to a Timestamp-keyed dict. Parameters ---------- d: dict-like object Returns ------- dict",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\cast.py",
    "ast_data": "FunctionDef name:dict_compat arg:d arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__call__",
    "source_code": "def __call__(self, *args, **kwargs):\n    pass",
    "docstring": "Executes this callable. This behaves like a regular op - in eager mode, it immediately starts execution, returning results. In graph mode, it creates ops which return symbolic TensorFlow values (like , , etc.). For example, callables typically generate a op, but not always - the exact operations being generated are an internal implementation detail. Args: *args: positional argument for this call **kwargs: keyword arguments for this call Returns: The execution results.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\types\\core.py",
    "ast_data": "FunctionDef name:__call__ arg:self arguments arg arg arg"
  },
  {
    "library": "matplotlib",
    "name": "draw_mathtext",
    "source_code": "@_log_if_debug_on\ndef draw_mathtext(self, gc, x, y, s, prop, angle):\n    width, height, descent, glyphs, rects = self._text2path.mathtext_parser.parse(s, 72, prop)\n    self.set_color(*gc.get_rgb())\n    self._pswriter.write(f'gsave\\n{x:g} {y:g} translate\\n{angle:g} rotate\\n')\n    lastfont = None\n    for font, fontsize, num, ox, oy in glyphs:\n        self._character_tracker.track_glyph(font, num)\n        if (font.postscript_name, fontsize) != lastfont:\n            lastfont = (font.postscript_name, fontsize)\n            self._pswriter.write(f'/{font.postscript_name} {fontsize} selectfont\\n')\n        glyph_name = font.get_name_char(chr(num)) if isinstance(font, AFM) else font.get_glyph_name(font.get_char_index(num))\n        self._pswriter.write(f'{ox:g} {oy:g} moveto\\n/{glyph_name} glyphshow\\n')\n    for ox, oy, w, h in rects:\n        self._pswriter.write(f'{ox} {oy} {w} {h} rectfill\\n')\n    self._pswriter.write('grestore\\n')",
    "docstring": "Draw the math text using matplotlib.mathtext.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_ps.py",
    "ast_data": "FunctionDef name:draw_mathtext arg:self arg:gc arg:x arg:y arg:s arg:prop arg:angle arguments arg arg arg arg arg arg arg Assign Call Call Call Call Assign For Call If Compare Assign Call Assign Call Call Call Call Call Call For Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_validate_flat_values",
    "source_code": "def _validate_flat_values(self, flat_values):\n    if not isinstance(flat_values, tensor_lib.Tensor):\n        return flat_values\n    if self.row_partitions:\n        last_row_partition = self.row_partitions[-1]\n        flat_values_shape = flat_values.shape\n        if flat_values_shape is None:\n            return self._validate_flat_values_dynamically(flat_values)\n        first_dim_flat_values = flat_values_shape[0]\n        if isinstance(first_dim_flat_values, tensor_shape.Dimension):\n            first_dim_flat_values = first_dim_flat_values.value\n        if first_dim_flat_values is None:\n            return self._validate_flat_values_dynamically(flat_values)\n        static_nvals = last_row_partition.static_nvals\n        if static_nvals is None:\n            return self._validate_flat_values_dynamically(flat_values)\n        if first_dim_flat_values != static_nvals:\n            raise ValueError('Last row partition does not match flat_values.')\n    return flat_values",
    "docstring": "Test if flat_values have the right nvals.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:_validate_flat_values arg:self arg:flat_values arguments arg arg If Call Return return:yes If Assign Assign If Compare Return return:yes Call Assign If Call Assign If Compare Return return:yes Call Assign If Compare Return return:yes Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "Keane",
    "source_code": "class Keane(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([0.0] * self.N, [10.0] * self.N))\n        self.global_optimum = [[7.85396153, 7.85396135]]\n        self.custom_bounds = [(-1, 0.34), (-1, 0.34)]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        val = sin(x[0] - x[1]) ** 2 * sin(x[0] + x[1]) ** 2\n        return val / sqrt(x[0] ** 2 + x[1] ** 2)",
    "docstring": "Keane objective function. This class defines the Keane [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Keane}}(x) = \\frac{\\sin^2(x_1 - x_2)\\sin^2(x_1 + x_2)} {\\sqrt{x_1^2 + x_2^2}} with :math: for :math:. *Global optimum*: :math: for :math:. .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO: Jamil #69, there is no way that the function can have a negative value. Everything is squared. I think that they have the wrong solution.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_K.py",
    "ast_data": "ClassDef name:Keane FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_dense_tensor",
    "source_code": "def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):\n    del weight_collections\n    del trainable\n    if isinstance(self.categorical_column, _SequenceCategoricalColumn):\n        raise ValueError('In indicator_column: {}. categorical_column must not be of type _SequenceCategoricalColumn. Suggested fix A: If you wish to use input_layer, use a non-sequence categorical_column_with_*. Suggested fix B: If you wish to create sequence input, use sequence_input_layer instead of input_layer. Given (type {}): {}'.format(self.name, type(self.categorical_column), self.categorical_column))\n    return inputs.get(self)",
    "docstring": "Returns dense representing feature. Args: inputs: A object to access inputs. weight_collections: Unused since no variables are created in this function. trainable: Unused bool since no variables are created in this function. Returns: Dense created within . Raises: ValueError: If is a .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py",
    "ast_data": "FunctionDef name:_get_dense_tensor arg:self arg:inputs arg:weight_collections arg:trainable arguments arg arg arg arg If Call Raise Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "unpack",
    "source_code": "def unpack(t: TensorLike, layout: layout_lib.Layout, split_fn=np.split, stack_fn=np.stack) -> List[TensorLike]:\n    if not layout.rank:\n        return [t] * layout.mesh.size\n    sharded_tensor = _split(t, [layout.num_shards(i) for i in range(layout.rank)], split_fn=split_fn, stack_fn=stack_fn)\n    flattened = [np.ndarray([])] * layout.mesh.size\n    for offset, shard in enumerate(layout.offset_to_shard()):\n        flattened[offset] = sharded_tensor[tuple(shard)]\n    return flattened",
    "docstring": "Slice into a flattened list of tensors suitable for .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\numpy_util.py",
    "ast_data": "FunctionDef name:unpack arg:t arg:layout arg:split_fn arg:stack_fn arguments arg arg arg arg If Return return:yes Assign Call Call Call Assign Call For Call Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "pip",
    "source_code": "def pip(self, *args: str, **popen_kwargs: Any) -> subprocess.CompletedProcess[str]:\n    return self.python('-m', 'pip', *args, **popen_kwargs)",
    "docstring": "Run a pip command in the virtual environment.",
    "type": "method",
    "file_path": "pytorch\\tools\\nightly.py",
    "ast_data": "FunctionDef name:pip arg:self arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "desc_name",
    "source_code": "class desc_name(_desc_classes_injector, nodes.Part, nodes.Inline, nodes.FixedTextElement):\n    classes = ['sig-name', 'descname']",
    "docstring": "Node for the main object name. For example, in the declaration of a Python class ``.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\addnodes.py",
    "ast_data": "ClassDef name:desc_name Assign"
  },
  {
    "library": "tensorflow",
    "name": "OpStats",
    "source_code": "class OpStats(object):\n    __slots__ = ['_statistic_type', '_value']\n\n    def __init__(self, statistic_type, value=None) -> None:\n        self.statistic_type = statistic_type\n        self.value = value\n\n    @property\n    def statistic_type(self):\n        return self._statistic_type\n\n    @statistic_type.setter\n    def statistic_type(self, statistic_type):\n        self._statistic_type = statistic_type\n\n    @property\n    def value(self):\n        return self._value\n\n    @value.setter\n    def value(self, value):\n        self._value = value\n\n    def __iadd__(self: OpStatsType, other: OpStatsType) -> OpStatsType:\n        if other.statistic_type != self.statistic_type:\n            raise ValueError(\"Can't add an OpStat of type %s to one of %s.\" % (self.statistic_type, other.statistic_type))\n        if self.value is None:\n            self.value = other.value\n        elif other.value is not None:\n            self._value += other.value\n        return self",
    "docstring": "A holder for statistics about an operator. This class holds information about the resource requirements for an op, including the size of its weight parameters on-disk and how many FLOPS it requires to execute forward inference. If you define a new operation, you can create a function that will return a set of information about its usage of the CPU and disk space when serialized. The function itself takes a Graph object that's been set up so you can call methods like get_tensor_by_name to help calculate the results, and a NodeDef argument.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "ClassDef name:OpStats Assign FunctionDef name:__init__ arg:self arg:statistic_type arg:value arguments arg arg arg Assign Assign FunctionDef name:statistic_type arg:self arguments arg Return return:yes FunctionDef name:statistic_type arg:self arg:statistic_type arguments arg arg Assign FunctionDef name:value arg:self arguments arg Return return:yes FunctionDef name:value arg:self arg:value arguments arg arg Assign FunctionDef name:__iadd__ arg:self arg:other arguments arg arg If Compare Raise Call If Compare Assign If Compare Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "increment",
    "source_code": "def increment(self, metric: str, value: int, extra: Optional[dict[str, Any]]=None) -> None:\n    if not self._metrics:\n        self._start_time_ns = time.time_ns()\n    if metric not in self._metrics:\n        self._metrics[metric] = 0\n    self._metrics[metric] += value\n    if extra:\n        for k, v in extra.items():\n            if k not in self._metrics and v is not None:\n                self._metrics[k] = v",
    "docstring": "Increment a metric by a given amount.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\metrics_context.py",
    "ast_data": "FunctionDef name:increment arg:self arg:metric arg:value arg:extra arguments arg arg arg arg If Assign Call If Compare Assign If For Call If BoolOp Compare Compare Assign"
  },
  {
    "library": "pytorch",
    "name": "save_state_dict",
    "source_code": "@deprecated('`save_state_dict` is deprecated and will be removed in future versions.Please use `save` instead.', category=FutureWarning)\ndef save_state_dict(state_dict: STATE_DICT_TYPE, storage_writer: StorageWriter, process_group: Optional[dist.ProcessGroup]=None, coordinator_rank: int=0, no_dist: bool=False, planner: Optional[SavePlanner]=None) -> Metadata:\n    storage_writer.reset()\n    with _profile():\n        return _save_state_dict(state_dict, storage_writer, process_group, coordinator_rank, no_dist, planner)",
    "docstring": "This method is deprecated. Please switch to 'save'.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\state_dict_saver.py",
    "ast_data": "FunctionDef name:save_state_dict arg:state_dict arg:storage_writer arg:process_group arg:coordinator_rank arg:no_dist arg:planner arguments arg arg arg arg arg arg Call With Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "arange",
    "source_code": "def arange(start: float, /, stop: float | None=None, step: float=1, *, dtype: DType | None=None, device: Device | None=None, **kwargs: object) -> Array:\n    _helpers._check_device(da, device)\n    args: list[Any] = [start]\n    if stop is not None:\n        args.append(stop)\n    else:\n        args.insert(0, 0)\n    args.append(step)\n    return da.arange(*args, dtype=dtype, **kwargs)",
    "docstring": "Array API compatibility wrapper for arange(). See the corresponding documentation in the array library and/or the array API specification for more details.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\dask\\array\\_aliases.py",
    "ast_data": "FunctionDef name:arange arg:stop arg:step arguments arg arg arg arg arg arg Call If Compare Call Call Call Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "revoke_token",
    "source_code": "def revoke_token(self, token, request):\n    raise NotImplementedError()",
    "docstring": "Mark token as revoked. Since token MUST be unique, it would be dangerous to delete it. Consider this situation: 1. Jane obtained a token XYZ 2. Jane revoked (deleted) token XYZ 3. Bob generated a new token XYZ 4. Jane can use XYZ to access Bob's resource It would be secure to mark a token as revoked:: def revoke_token(self, token, request): hint = request.form.get(\"token_type_hint\") if hint == \"access_token\": token.access_token_revoked = True else: token.access_token_revoked = True token.refresh_token_revoked = True token.save()",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc7009\\revocation.py",
    "ast_data": "FunctionDef name:revoke_token arg:self arg:token arg:request arguments arg arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "from_config",
    "source_code": "@classmethod\ndef from_config(cls, config):\n    return cls(**config)",
    "docstring": "Instantiates a from its config (output of ). Args: config: Output of . Returns: A instance.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py",
    "ast_data": "FunctionDef name:from_config arg:cls arg:config arguments arg arg Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "from_args",
    "source_code": "def from_args(self, headers: Mapping[bytes, bytes] | None=None, url: str | None=None, filename: str | None=None, body: bytes | None=None) -> type[Response]:\n    cls = Response\n    if headers is not None:\n        cls = self.from_headers(headers)\n    if cls is Response and url is not None:\n        cls = self.from_filename(url)\n    if cls is Response and filename is not None:\n        cls = self.from_filename(filename)\n    if cls is Response and body is not None:\n        cls = self.from_body(body)\n    return cls",
    "docstring": "Guess the most appropriate Response class based on the given arguments.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\responsetypes.py",
    "ast_data": "FunctionDef name:from_args arg:self arg:headers arg:url arg:filename arg:body arguments arg arg arg arg arg Assign If Compare Assign Call If BoolOp Compare Compare Assign Call If BoolOp Compare Compare Assign Call If BoolOp Compare Compare Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "clear",
    "source_code": "def clear(self):\n    self.mismatch_error = None\n    self.pt_outs = None\n    self._onnx_graph = None\n    self.upper_graph_info = None\n    self.lower_graph_info = None",
    "docstring": "Clear states and results of previous verification.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\verification.py",
    "ast_data": "FunctionDef name:clear arg:self arguments arg Assign Assign Assign Assign Assign"
  },
  {
    "library": "kornia",
    "name": "KORNIA_CHECK",
    "source_code": "def KORNIA_CHECK(condition: bool, msg: Optional[str]=None, raises: bool=True) -> bool:\n    if not condition:\n        if raises:\n            raise Exception(f'{condition} not true.\\n{msg}')\n        return False\n    return True",
    "docstring": "Check any arbitrary boolean condition. Args: condition: the condition to evaluate. msg: message to show in the exception. raises: bool indicating whether an exception should be raised upon failure. Raises: Exception: if the condition is met and raises is True. Example: >>> x = torch.rand(2, 3, 3) >>> KORNIA_CHECK(x.shape[-2:] == (3, 3), \"Invalid homography\") True",
    "type": "function",
    "file_path": "kornia\\kornia\\core\\check.py",
    "ast_data": "FunctionDef name:KORNIA_CHECK arg:condition arg:msg arg:raises arguments arg arg arg If If Raise Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_flattened_names",
    "source_code": "def get_flattened_names(feeds_or_fetches):\n    lines = []\n    if isinstance(feeds_or_fetches, (list, tuple)):\n        for item in feeds_or_fetches:\n            lines.extend(get_flattened_names(item))\n    elif isinstance(feeds_or_fetches, dict):\n        for key in feeds_or_fetches:\n            lines.extend(get_flattened_names(feeds_or_fetches[key]))\n    else:\n        lines.append(get_graph_element_name(feeds_or_fetches))\n    return lines",
    "docstring": "Get a flattened list of the names in run() call feeds or fetches. Args: feeds_or_fetches: Feeds or fetches of the call. It maybe a Tensor, an Operation or a Variable. It may also be nested lists, tuples or dicts. See doc of for more details. Returns: (list of str) A flattened list of fetch names from .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\common.py",
    "ast_data": "FunctionDef name:get_flattened_names arg:feeds_or_fetches arguments arg Assign If Call For Call Call If Call For Call Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_model",
    "source_code": "def get_model(self, app_label, model_name=None, require_ready=True):\n    if require_ready:\n        self.check_models_ready()\n    else:\n        self.check_apps_ready()\n    if model_name is None:\n        app_label, model_name = app_label.split('.')\n    app_config = self.get_app_config(app_label)\n    if not require_ready and app_config.models is None:\n        app_config.import_models()\n    return app_config.get_model(model_name, require_ready=require_ready)",
    "docstring": "Return the model matching the given app_label and model_name. As a shortcut, app_label may be in the form .. model_name is case-insensitive. Raise LookupError if no application exists with this label, or no model exists with this name in the application. Raise ValueError if called with a single argument that doesn't contain exactly one dot.",
    "type": "method",
    "file_path": "django\\django\\apps\\registry.py",
    "ast_data": "FunctionDef name:get_model arg:self arg:app_label arg:model_name arg:require_ready arguments arg arg arg arg If Call Call If Compare Assign Call Assign Call If BoolOp Compare Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "lookup",
    "source_code": "def lookup(self, obj):\n    return self._registered_map[self.get_registered_name(obj)]",
    "docstring": "Looks up the registered object using the predicate. Args: obj: Object to pass to each of the registered predicates to look up the registered object. Returns: The object registered with the first passing predicate. Raises: LookupError if the object does not match any of the predicate functions.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\registration\\registration.py",
    "ast_data": "FunctionDef name:lookup arg:self arg:obj arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "apply_to_single_assignments",
    "source_code": "def apply_to_single_assignments(targets, values, apply_fn):\n    if not isinstance(targets, (list, tuple)):\n        targets = (targets,)\n    for target in targets:\n        if isinstance(target, (gast.Tuple, gast.List)):\n            for i in range(len(target.elts)):\n                target_el = target.elts[i]\n                if isinstance(values, (gast.Tuple, gast.List)):\n                    value_el = values.elts[i]\n                else:\n                    idx = parser.parse_expression(str(i))\n                    value_el = gast.Subscript(values, idx, ctx=gast.Load())\n                apply_to_single_assignments(target_el, value_el, apply_fn)\n        else:\n            apply_fn(target, values)",
    "docstring": "Applies a function to each individual assignment. This function can process a possibly-unpacked (e.g. a, b = c, d) assignment. It tries to break down the unpacking if possible. In effect, it has the same effect as passing the assigned values in SSA form to apply_fn. Examples: The following will result in apply_fn(a, c), apply_fn(b, d): a, b = c, d The following will result in apply_fn(a, c[0]), apply_fn(b, c[1]): a, b = c The following will result in apply_fn(a, (b, c)): a = b, c It uses the visitor pattern to allow subclasses to process single assignments individually. Args: targets: Union[List[ast.AST, ...], Tuple[ast.AST, ...], ast.AST, should be used with the targets field of an ast.Assign node values: ast.AST apply_fn: Callable[[ast.AST, ast.AST], None], called with the respective nodes of each single assignment",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\ast_util.py",
    "ast_data": "FunctionDef name:apply_to_single_assignments arg:targets arg:values arg:apply_fn arguments arg arg arg If Call Assign For If Call For Call Call Assign If Call Assign Assign Call Call Assign Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "default_value",
    "source_code": "@property\ndef default_value(self):\n    return self._default_value",
    "docstring": "The default value of the table.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "FunctionDef name:default_value arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "apply_to_operation",
    "source_code": "def apply_to_operation(self, operation):\n    attr_value = attr_value_pb2.AttrValue(s=self._proto.SerializeToString())\n    operation._set_attr('_XlaSharding', attr_value)",
    "docstring": "Applies this Sharding attribute to . Args: operation: A tf.Operation to add sharding annotation.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\xla\\experimental\\xla_sharding.py",
    "ast_data": "FunctionDef name:apply_to_operation arg:self arg:operation arguments arg arg Assign Call Call Call"
  },
  {
    "library": "django",
    "name": "update",
    "source_code": "def update(self, response):\n    self._prepare_messages(self._queued_messages)\n    if self.used:\n        return self._store(self._queued_messages, response)\n    elif self.added_new:\n        messages = self._loaded_messages + self._queued_messages\n        return self._store(messages, response)",
    "docstring": "Store all unread messages. If the backend has yet to be iterated, store previously stored messages again. Otherwise, only store messages added after the last iteration.",
    "type": "method",
    "file_path": "django\\django\\contrib\\messages\\storage\\base.py",
    "ast_data": "FunctionDef name:update arg:self arg:response arguments arg arg Call If Return return:yes Call If Assign Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_copy",
    "source_code": "def _copy(self, system):\n    self.num = system.num\n    self.den = system.den",
    "docstring": "Copy the parameters of another object Parameters ---------- system : The system that is to be copied",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:_copy arg:self arg:system arguments arg arg Assign Assign"
  },
  {
    "library": "scikit-learn",
    "name": "_inverse_permutation",
    "source_code": "def _inverse_permutation(p):\n    n = p.size\n    s = np.zeros(n, dtype=np.int32)\n    i = np.arange(n, dtype=np.int32)\n    np.put(s, p, i)\n    return s",
    "docstring": "Inverse permutation p.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\datasets\\_rcv1.py",
    "ast_data": "FunctionDef name:_inverse_permutation arg:p arguments arg Assign Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_indexed_slices_to_tensor",
    "source_code": "def _indexed_slices_to_tensor(value, dtype=None, name=None, as_ref=False):\n    _ = as_ref\n    if dtype and (not dtype.is_compatible_with(value.dtype)):\n        raise ValueError(f'Incompatible tensor conversion requested to `dtype` {dtype.name} for IndexedSlices ({value}) with dtype {value.dtype.name}')\n    if value.dense_shape is None:\n        raise ValueError(f'Tensor conversion requested for IndexedSlices for argument `value` without dense_shape: {value!s}')\n    if not context.executing_eagerly():\n        dense_shape_value = tensor_util.constant_value(value.dense_shape)\n        if dense_shape_value is not None:\n            num_elements = np.prod(dense_shape_value)\n            if num_elements >= _LARGE_SPARSE_NUM_ELEMENTS:\n                warnings.warn('Converting sparse IndexedSlices to a dense Tensor with %d elements. This may consume a large amount of memory.' % num_elements)\n    return gen_math_ops.unsorted_segment_sum(value.values, value.indices, value.dense_shape[0], name=name)",
    "docstring": "Converts an IndexedSlices object to a Tensor. NOTE(mrry): This function is potentially expensive. Args: value: An ops.IndexedSlices object. dtype: The dtype of the Tensor to be returned. name: Optional name to use for the returned Tensor. as_ref: True if a ref is requested. Returns: A dense Tensor representing the values in the given IndexedSlices. Raises: ValueError: If the IndexedSlices does not have the same dtype.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\indexed_slices.py",
    "ast_data": "FunctionDef name:_indexed_slices_to_tensor arg:value arg:dtype arg:name arg:as_ref arguments arg arg arg arg Assign If BoolOp Call Raise Call If Compare Raise Call If Call Assign Call If Compare Assign Call If Compare Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "visit_option_argument",
    "source_code": "def visit_option_argument(self, node: Element) -> None:\n    self.body.append(node.get('delimiter', ' '))",
    "docstring": "The delimiter between an option and its argument.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\writers\\latex.py",
    "ast_data": "FunctionDef name:visit_option_argument arg:self arg:node arguments arg arg Call Call"
  },
  {
    "library": "pandas",
    "name": "main",
    "source_code": "def main(function: Callable[[IO[str]], Iterable[tuple[int, str]]], source_path: str, output_format: str) -> bool:\n    is_failed: bool = False\n    for file_path in source_path:\n        with open(file_path, encoding='utf-8') as file_obj:\n            for line_number, msg in function(file_obj):\n                is_failed = True\n                print(output_format.format(source_path=file_path, line_number=line_number, msg=msg))\n    return is_failed",
    "docstring": "Main entry point of the script. Parameters ---------- function : Callable Function to execute for the specified validation type. source_path : str Source path representing path to a file/directory. output_format : str Output format of the error message. file_extensions_to_check : str Comma separated values of what file extensions to check. excluded_file_paths : str Comma separated values of what file paths to exclude during the check. Returns ------- bool True if found any patterns are found related to the given function. Raises ------ ValueError If the is not pointing to existing file/directory.",
    "type": "function",
    "file_path": "pandas\\scripts\\validate_unwanted_patterns.py",
    "ast_data": "FunctionDef name:main arg:function arg:source_path arg:output_format arguments arg arg arg For With Call For Call Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "reset_cache",
    "source_code": "def reset_cache(self):\n    self._moment_raw_cache = {}\n    self._moment_central_cache = {}\n    self._moment_standardized_cache = {}\n    self._support_cache = None\n    self._method_cache = {}\n    self._constant_cache = None",
    "docstring": "Clear all cached values. To improve the speed of some calculations, the distribution's support and moments are cached. This function is called automatically whenever the distribution parameters are updated.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distribution_infrastructure.py",
    "ast_data": "FunctionDef name:reset_cache arg:self arguments arg Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "django",
    "name": "name",
    "source_code": "@property\ndef name(self):\n    return force_str(capi.get_driver_description(self.ptr))",
    "docstring": "Return description/name string for this driver.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\driver.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_all_gather_base",
    "source_code": "@_exception_logger\n@deprecated('`torch.distributed._all_gather_base` is a private function and will be deprecated. Please use `torch.distributed.all_gather_into_tensor` instead.', category=FutureWarning)\ndef _all_gather_base(output_tensor, input_tensor, group=None, async_op=False):\n    return all_gather_into_tensor(output_tensor, input_tensor, group, async_op)",
    "docstring": "Single tensor all gather. Gathers a single tensor from all ranks, and puts them in a single output tensor. Args: output_tensor (Tensor): Output tensor. It should contain correctly-sized tensors to be used for output of the collective. input_tensor (Tensor): Tensor to be broadcast from current process. group (ProcessGroup, optional): The process group to work on. If None, the default process group will be used. async_op (bool, optional): Whether this op should be an async op Returns: Async work handle, if async_op is set to True. None, if not async_op or if not part of the group .. warning:: is a private function. Users should use instead.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:_all_gather_base arg:output_tensor arg:input_tensor arg:group arg:async_op arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "save",
    "source_code": "def save(self, save_path, options=None):\n    save_start_time = time.time()\n    if not self._initialized:\n        self._ensure_initialized()\n    else:\n        self._queue.join()\n        self._copy_to_cpu()\n    self._check_async_thread_error()\n    save_counter = self.checkpointer().save_counter.numpy() + 1\n    full_path = '{}-{}'.format(save_path, save_counter)\n    context.async_wait()\n    self._save_file_prefix = save_path\n    self._use_checkpoint_save = True\n    self._checkpoint_options = copy.copy(options) if options else None\n    if self._checkpoint_options:\n        self._checkpoint_options.experimental_enable_async_checkpoint = False\n    self._queue.put(True)\n    save_end_time = time.time()\n    metrics.AddCheckpointWriteDuration(api_label=_ASYNC_CHECKPOINT, microseconds=_get_duration_microseconds(save_start_time, save_end_time))\n    return full_path",
    "docstring": "Save the checkpointed variables. Args: save_path: The file prefix of the checkpoint file. options: Optional CheckpointOption instance. Returns: The full path of the checkpoint file.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\async_checkpoint_helper.py",
    "ast_data": "FunctionDef name:save arg:self arg:save_path arg:options arguments arg arg arg Assign Call If Call Call Call Call Assign Call Call Assign Call Call Assign Assign Assign Call If Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "lexsort_indexer",
    "source_code": "def lexsort_indexer(keys: Sequence[ArrayLike | Index | Series], orders=None, na_position: str='last', key: Callable | None=None, codes_given: bool=False) -> npt.NDArray[np.intp]:\n    from pandas.core.arrays import Categorical\n    if na_position not in ['last', 'first']:\n        raise ValueError(f'invalid na_position: {na_position}')\n    if isinstance(orders, bool):\n        orders = itertools.repeat(orders, len(keys))\n    elif orders is None:\n        orders = itertools.repeat(True, len(keys))\n    else:\n        orders = reversed(orders)\n    labels = []\n    for k, order in zip(reversed(keys), orders):\n        k = ensure_key_mapped(k, key)\n        if codes_given:\n            codes = cast(np.ndarray, k)\n            n = codes.max() + 1 if len(codes) else 0\n        else:\n            cat = Categorical(k, ordered=True)\n            codes = cat.codes\n            n = len(cat.categories)\n        mask = codes == -1\n        if na_position == 'last' and mask.any():\n            codes = np.where(mask, n, codes)\n        if not order:\n            codes = np.where(mask, codes, n - codes - 1)\n        labels.append(codes)\n    return np.lexsort(labels)",
    "docstring": "Performs lexical sorting on a set of keys Parameters ---------- keys : Sequence[ArrayLike | Index | Series] Sequence of arrays to be sorted by the indexer Sequence[Series] is only if key is not None. orders : bool or list of booleans, optional Determines the sorting order for each element in keys. If a list, it must be the same length as keys. This determines whether the corresponding element in keys should be sorted in ascending (True) or descending (False) order. if bool, applied to all elements as above. if None, defaults to True. na_position : {'first', 'last'}, default 'last' Determines placement of NA elements in the sorted list (\"last\" or \"first\") key : Callable, optional Callable key function applied to every element in keys before sorting codes_given: bool, False Avoid categorical materialization if codes are already provided. Returns ------- np.ndarray[np.intp]",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\sorting.py",
    "ast_data": "FunctionDef name:lexsort_indexer arg:keys arg:orders arg:na_position arg:key arg:codes_given arguments arg arg arg arg arg If Compare Raise Call If Call Assign Call Call If Compare Assign Call Call Assign Call Assign For Call Call Assign Call If Assign Call Assign Call Call Assign Call Assign Assign Call Assign Compare If BoolOp Compare Call Assign Call If Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, values, row_splits):\n    if not (isinstance(row_splits, (np.ndarray, np.generic)) and row_splits.dtype in (np.int64, np.int32) and (row_splits.ndim == 1)):\n        raise TypeError('row_splits must be a 1D int32 or int64 numpy array')\n    if not isinstance(values, (np.ndarray, np.generic, RaggedTensorValue)):\n        raise TypeError('values must be a numpy array or a RaggedTensorValue')\n    if isinstance(values, RaggedTensorValue) and row_splits.dtype != values.row_splits.dtype:\n        raise ValueError('row_splits and values.row_splits must have the same dtype')\n    self._values = values\n    self._row_splits = row_splits",
    "docstring": "Creates a . Args: values: A numpy array of any type and shape; or a RaggedTensorValue. row_splits: A 1-D int32 or int64 numpy array.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor_value.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:values arg:row_splits arguments arg arg arg If BoolOp Call Compare Compare Raise Call If Call Raise Call If BoolOp Call Compare Raise Call Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, angleA=90, angleB=0):\n    self.angleA = angleA\n    self.angleB = angleB",
    "docstring": "Parameters ---------- angleA : float Starting angle of the path. angleB : float Ending angle of the path.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:angleA arg:angleB arguments arg arg arg Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_assert_float_dtype",
    "source_code": "def _assert_float_dtype(dtype):\n    if not dtype.is_floating:\n        raise ValueError(f'Argument `dtype` is expected to be floating point. Received: {dtype}.')\n    return dtype",
    "docstring": "Validate and return floating point type based on . must be a floating point type. Args: dtype: The data type to validate. Returns: Validated type. Raises: ValueError: if is not a floating point type.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops.py",
    "ast_data": "FunctionDef name:_assert_float_dtype arg:dtype arguments arg If Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "add_push_null_call_function_ex",
    "source_code": "def add_push_null_call_function_ex(inst_or_insts: Union[Instruction, list[Instruction]]) -> list[Instruction]:\n    if isinstance(inst_or_insts, Instruction):\n        insts = [inst_or_insts]\n    else:\n        insts = inst_or_insts\n    if sys.version_info < (3, 11):\n        return insts\n    idx = -1 if sys.version_info >= (3, 13) else 0\n    if insts[idx].opname == 'LOAD_GLOBAL':\n        assert insts[idx].arg is not None\n        if insts[idx].arg & 1 == 0:\n            insts[idx].arg |= 1\n            return insts\n    if sys.version_info >= (3, 13):\n        insts = insts + [create_instruction('PUSH_NULL')]\n    else:\n        insts = [create_instruction('PUSH_NULL')] + insts\n    return insts",
    "docstring": "Like add_push_null, but the low bit of LOAD_ATTR/LOAD_SUPER_ATTR is not set, due to an expected CALL_FUNCTION_EX instruction.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\bytecode_transformation.py",
    "ast_data": "FunctionDef name:add_push_null_call_function_ex arg:inst_or_insts arguments arg If Call Assign Assign If Compare Return return:yes Assign Compare If Compare Compare If Compare Return return:yes If Compare Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "locals_in_original_context",
    "source_code": "def locals_in_original_context(caller_fn_scope):\n    return _find_originating_frame(caller_fn_scope, innermost=True).f_locals",
    "docstring": "Executes the locals function in the context of a specified function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\py_builtins.py",
    "ast_data": "FunctionDef name:locals_in_original_context arg:caller_fn_scope arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "batch_reduce_implementation",
    "source_code": "@doc_controls.for_subclass_implementers\ndef batch_reduce_implementation(self, reduce_op, value_destination_pairs, options):\n    raise NotImplementedError('batch_reduce_implementation method must be implemented in descendants.')",
    "docstring": "Implementation of . Overriding this method is useful for subclass implementers. Args: reduce_op: a specifying how values should be combined. value_destination_pairs: a sequence of (value, destinations) pairs. See for descriptions. options: a . See for details. Returns: A list of or , one per pair in . Raises: ValueError: if is not an iterable of tuples of and destinations.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_ops.py",
    "ast_data": "FunctionDef name:batch_reduce_implementation arg:self arg:reduce_op arg:value_destination_pairs arg:options arguments arg arg arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "propagate_line_nums",
    "source_code": "def propagate_line_nums(instructions):\n    cur_line_no = None\n\n    def populate_line_num(inst):\n        nonlocal cur_line_no\n        if inst.starts_line:\n            cur_line_no = inst.starts_line\n        inst.starts_line = cur_line_no\n    for inst in instructions:\n        populate_line_num(inst)",
    "docstring": "Ensure every instruction has line number set in case some are removed",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\bytecode_analysis.py",
    "ast_data": "FunctionDef name:propagate_line_nums arg:instructions arguments arg Assign FunctionDef name:populate_line_num arg:inst arguments arg If Assign Assign For Call"
  },
  {
    "library": "pandas",
    "name": "all_indexes_same",
    "source_code": "def all_indexes_same(indexes) -> bool:\n    itr = iter(indexes)\n    first = next(itr)\n    return all((first.equals(index) for index in itr))",
    "docstring": "Determine if all indexes contain the same elements. Parameters ---------- indexes : iterable of Index objects Returns ------- bool True if all indexes contain the same elements, False otherwise.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\indexes\\api.py",
    "ast_data": "FunctionDef name:all_indexes_same arg:indexes arguments arg Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_alloc_padded_unsharded_flat_param",
    "source_code": "def _alloc_padded_unsharded_flat_param(self):\n    self._check_sharded_strategy()\n    flat_param = self.flat_param\n    unsharded_flat_param = self._get_padded_unsharded_flat_param()\n    self._check_storage_freed(unsharded_flat_param)\n    _alloc_storage(unsharded_flat_param, flat_param._padded_unsharded_size)\n    return unsharded_flat_param",
    "docstring": "Allocate the *padded* unsharded flat parameter. The unpadded unsharded flat parameter is always a view into the padded one. This padded parameter is saved to a different attribute on the `` depending on if we force full precision.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py",
    "ast_data": "FunctionDef name:_alloc_padded_unsharded_flat_param arg:self arguments arg Call Assign Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "call",
    "source_code": "def call(self, inputs, state):\n    _check_rnn_cell_input_dtypes([inputs, state])\n    gate_inputs = math_ops.matmul(array_ops.concat([inputs, state], 1), self._kernel)\n    gate_inputs = nn_ops.bias_add(gate_inputs, self._bias)\n    output = self._activation(gate_inputs)\n    return (output, output)",
    "docstring": "Most basic RNN: output = new_state = act(W * input + U * state + B).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\legacy_rnn\\rnn_cell_impl.py",
    "ast_data": "FunctionDef name:call arg:self arg:inputs arg:state arguments arg arg arg Call Assign Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "common_fill_value",
    "source_code": "def common_fill_value(a, b):\n    t1 = get_fill_value(a)\n    t2 = get_fill_value(b)\n    if t1 == t2:\n        return t1\n    return None",
    "docstring": "Return the common filling value of two masked arrays, if any. If ``, return the fill value, otherwise return None. Parameters ---------- a, b : MaskedArray The masked arrays for which to compare fill values. Returns ------- fill_value : scalar or None The common fill value, or None. Examples -------- >>> import numpy as np >>> x = np.ma.array([0, 1.], fill_value=3) >>> y = np.ma.array([0, 1.], fill_value=3) >>> np.ma.common_fill_value(x, y) 3.0",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:common_fill_value arg:a arg:b arguments arg arg Assign Call Assign Call If Compare Return return:yes Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "_getfullargspec",
    "source_code": "def _getfullargspec(target):\n    return _convert_maybe_argspec_to_fullargspec(getargspec(target))",
    "docstring": "A python2 version of getfullargspec. Args: target: the target object to inspect. Returns: A FullArgSpec with empty kwonlyargs, kwonlydefaults and annotations.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\tf_inspect.py",
    "ast_data": "FunctionDef name:_getfullargspec arg:target arguments arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "sf",
    "source_code": "def sf(self, x, *args, **kwds):\n    args, loc, scale = self._parse_args(*args, **kwds)\n    x, loc, scale = map(asarray, (x, loc, scale))\n    args = tuple(map(asarray, args))\n    _a, _b = self._get_support(*args)\n    dtyp = np.promote_types(x.dtype, np.float64)\n    x = np.asarray((x - loc) / scale, dtype=dtyp)\n    cond0 = self._argcheck(*args) & (scale > 0)\n    cond1 = self._open_support_mask(x, *args) & (scale > 0)\n    cond2 = cond0 & (x <= _a)\n    cond = cond0 & cond1\n    output = zeros(shape(cond), dtyp)\n    place(output, 1 - cond0 + np.isnan(x), self.badvalue)\n    place(output, cond2, 1.0)\n    if np.any(cond):\n        goodargs = argsreduce(cond, *(x,) + args)\n        place(output, cond, self._sf(*goodargs))\n    if output.ndim == 0:\n        return output[()]\n    return output",
    "docstring": "Survival function (1 - ) at x of the given RV. Parameters ---------- x : array_like quantiles arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- sf : array_like Survival function evaluated at x",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:sf arg:self arg:x arguments arg arg arg arg Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Assign Call Compare Assign Call Compare Assign Compare Assign Assign Call Call Call Call Call If Call Assign Call Call Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "ViewDoesNotExist",
    "source_code": "class ViewDoesNotExist(Exception):\n    pass",
    "docstring": "The requested view does not exist",
    "type": "class",
    "file_path": "django\\django\\core\\exceptions.py",
    "ast_data": "ClassDef name:ViewDoesNotExist"
  },
  {
    "library": "tensorflow",
    "name": "keras_mode_combinations",
    "source_code": "def keras_mode_combinations(mode=None, run_eagerly=None):\n    if mode is None:\n        mode = ['eager'] if tf2.enabled() else ['graph', 'eager']\n    if run_eagerly is None:\n        run_eagerly = [True, False]\n    result = []\n    if 'eager' in mode:\n        result += combinations.combine(mode=['eager'], run_eagerly=run_eagerly)\n    if 'graph' in mode:\n        result += combinations.combine(mode=['graph'], run_eagerly=[False])\n    return result",
    "docstring": "Returns the default test combinations for tf.keras tests. Note that if tf2 is enabled, then v1 session test will be skipped. Args: mode: List of modes to run the tests. The valid options are 'graph' and 'eager'. Default to ['graph', 'eager'] if not specified. If a empty list is provide, then the test will run under the context based on tf's version, eg graph for v1 and eager for v2. run_eagerly: List of value to be run with the tests. Default to [True, False] if not specified. Note that for mode, run_eagerly value will only be False. Returns: A list contains all the combinations to be used to generate test cases.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\combinations.py",
    "ast_data": "FunctionDef name:keras_mode_combinations arg:mode arg:run_eagerly arguments arg arg If Compare Assign Call If Compare Assign Assign If Compare Call If Compare Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "combine_modular_indexing_pairs",
    "source_code": "def combine_modular_indexing_pairs(self, index: sympy.Expr) -> sympy.Expr:\n\n    def _check_args(x, div, mod, is_first):\n        if not isinstance(div, sympy.Integer) or not isinstance(mod, sympy.Integer):\n            return False\n        if div != 1:\n            return False\n        if mod <= 0:\n            return False\n        if is_first:\n            if not isinstance(x, ModularIndexing):\n                return False\n        elif not isinstance(x, sympy.Symbol) or not self.statically_known_geq(x, 0):\n            return False\n        return True\n    if isinstance(index, ModularIndexing):\n        x, div, mod = index.args\n        if not _check_args(x, div, mod, True):\n            return index\n        x2, div2, mod2 = x.args\n        if not _check_args(x2, div2, mod2, False):\n            return index\n        if mod2 % mod != 0:\n            return index\n        return ModularIndexing(x2, 1, mod)\n    return index",
    "docstring": "A pair of special ModularIndexing can be combined. E.g. ModularIndexing(ModularIndexing(x, 1, a), 1, b) We can simplify this to ModuleIndexing(x, 1, b), if 1. x is non negative integer 2. a and b are positive integers 3. a is a multiple of b.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\sizevars.py",
    "ast_data": "FunctionDef name:combine_modular_indexing_pairs arg:self arg:index arguments arg arg FunctionDef name:_check_args arg:x arg:div arg:mod arg:is_first arguments arg arg arg arg If BoolOp Call Call Return return:yes If Compare Return return:yes If Compare Return return:yes If If Call Return return:yes If BoolOp Call Call Return return:yes Return return:yes If Call Assign If Call Return return:yes Assign If Call Return return:yes If Compare Return return:yes Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_pad",
    "source_code": "def _pad(x):\n    shape = array_ops.concat([array_ops.shape(x)[:-1], [1]], axis=0)\n    z = array_ops.zeros(shape, dtype=x.dtype)\n    return array_ops.concat([z, x, z], axis=-1)",
    "docstring": "Prepends and appends a zero to every vector in a batch of vectors.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\util.py",
    "ast_data": "FunctionDef name:_pad arg:x arguments arg Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_SparseReorderGrad",
    "source_code": "@ops.RegisterGradient('SparseReorder')\ndef _SparseReorderGrad(op: ops.Operation, unused_output_indices_grad, output_values_grad):\n    input_indices = op.inputs[0]\n    input_shape = op.inputs[2]\n    num_entries = array_ops.shape(input_indices)[0]\n    entry_indices = math_ops.range(num_entries)\n    sp_unordered = sparse_tensor.SparseTensor(input_indices, entry_indices, input_shape)\n    sp_ordered = sparse_ops.sparse_reorder(sp_unordered)\n    inverted_permutation = array_ops.invert_permutation(sp_ordered.values)\n    return (None, array_ops.gather(output_values_grad, inverted_permutation), None)",
    "docstring": "Gradients for the SparseReorder op. Args: op: the SparseReorder op unused_output_indices_grad: the incoming gradients of the output indices output_values_grad: the incoming gradients of the output values Returns: Gradient for each of the 3 input tensors: (input_indices, input_values, input_shape) The gradients for input_indices and input_shape is None.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_grad.py",
    "ast_data": "FunctionDef name:_SparseReorderGrad arg:op arg:unused_output_indices_grad arg:output_values_grad arguments arg arg arg Assign Assign Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "locked_x1",
    "source_code": "@property\ndef locked_x1(self):\n    if self._locked_points.mask[1, 0]:\n        return None\n    else:\n        return self._locked_points[1, 0]",
    "docstring": "float or None: The value used for the locked x1.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:locked_x1 arg:self arguments arg If Return return:no Return return:yes"
  },
  {
    "library": "pandas",
    "name": "from_blocks",
    "source_code": "@classmethod\ndef from_blocks(cls, blocks: list[Block], axes: list[Index]) -> Self:\n    assert len(blocks) == 1\n    assert len(axes) == 1\n    return cls(blocks[0], axes[0], verify_integrity=False)",
    "docstring": "Constructor for BlockManager and SingleBlockManager with same signature.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:from_blocks arg:cls arg:blocks arg:axes arguments arg arg arg Compare Call Compare Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_indices",
    "source_code": "def get_indices(self, i):\n    rows = self.rows_[i]\n    columns = self.columns_[i]\n    return (np.nonzero(rows)[0], np.nonzero(columns)[0])",
    "docstring": "Row and column indices of the 'th bicluster. Only works if `` attributes exist. Parameters ---------- i : int The index of the cluster. Returns ------- row_ind : ndarray, dtype=np.intp Indices of rows in the dataset that belong to the bicluster. col_ind : ndarray, dtype=np.intp Indices of columns in the dataset that belong to the bicluster.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\base.py",
    "ast_data": "FunctionDef name:get_indices arg:self arg:i arguments arg arg Assign Assign Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "Homography",
    "source_code": "class Homography(BaseModel):\n\n    def __init__(self) -> None:\n        super().__init__()\n        self.model = nn.Parameter(torch.eye(3))\n        self.reset_model()\n\n    def __repr__(self) -> str:\n        return f'{self.__class__.__name__}({self.model})'\n\n    def reset_model(self) -> None:\n        torch.nn.init.eye_(self.model)\n\n    def forward(self) -> Tensor:\n        return torch.unsqueeze(self.model / self.model[2, 2], dim=0)\n\n    def forward_inverse(self) -> Tensor:\n        return torch.unsqueeze(torch.inverse(self.model), dim=0)",
    "docstring": "Homography geometric model to be used with ImageRegistrator for the optimization-based image registration.",
    "type": "class",
    "file_path": "kornia\\kornia\\geometry\\transform\\image_registrator.py",
    "ast_data": "ClassDef name:Homography FunctionDef name:__init__ arg:self arguments arg Call Call Assign Call Call Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:reset_model arg:self arguments arg Call FunctionDef name:forward arg:self arguments arg Return return:yes Call FunctionDef name:forward_inverse arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "nested_row_lengths",
    "source_code": "def nested_row_lengths(self, name=None):\n    with ops.name_scope(name, 'RaggedNestedRowLengths', [self]):\n        rt_nested_row_lengths = []\n        rt = self\n        while isinstance(rt, RaggedTensor):\n            rt_nested_row_lengths.append(rt.row_lengths())\n            rt = rt.values\n        return tuple(rt_nested_row_lengths)",
    "docstring": "Returns a tuple containing the row_lengths for all ragged dimensions. is a tuple containing the tensors for all ragged dimensions in , ordered from outermost to innermost. Args: name: A name prefix for the returned tensors (optional). Returns: A of 1-D integer . The length of the tuple is equal to .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:nested_row_lengths arg:self arg:name arguments arg arg With Call Assign Assign While Call Call Call Assign Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "partition",
    "source_code": "@set_module('numpy.strings')\n@array_function_dispatch(_partition_dispatcher)\ndef partition(a, sep):\n    a = np.asanyarray(a)\n    sep = np.asanyarray(sep)\n    if np.result_type(a, sep).char == 'T':\n        return _partition(a, sep)\n    sep = sep.astype(a.dtype, copy=False)\n    pos = _find_ufunc(a, sep, 0, MAX)\n    a_len = str_len(a)\n    sep_len = str_len(sep)\n    not_found = pos < 0\n    buffersizes1 = np.where(not_found, a_len, pos)\n    buffersizes3 = np.where(not_found, 0, a_len - pos - sep_len)\n    out_dtype = ','.join([f'{a.dtype.char}{n}' for n in (buffersizes1.max(), 1 if np.all(not_found) else sep_len.max(), buffersizes3.max())])\n    shape = np.broadcast_shapes(a.shape, sep.shape)\n    out = np.empty_like(a, shape=shape, dtype=out_dtype)\n    return _partition_index(a, sep, pos, out=(out['f0'], out['f1'], out['f2']))",
    "docstring": "Partition each element in `` dtype with the part after the separator See Also -------- str.partition Examples -------- >>> import numpy as np >>> x = np.array([\"Numpy is nice!\"]) >>> np.strings.partition(x, \" \") (array(['Numpy'], dtype='<U5'), array([' '], dtype='<U1'), array(['is nice!'], dtype='<U8'))",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\strings.py",
    "ast_data": "FunctionDef name:partition arg:a arg:sep arguments arg arg Assign Call Assign Call If Compare Call Return return:yes Call Assign Call Assign Call Assign Call Assign Call Assign Compare Assign Call Assign Call Assign Call Call Call Call Call Assign Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "rename_axis",
    "source_code": "def rename_axis(model: ir.Model, rename_mapping: dict[str, str]) -> None:\n    sorted_rename_mapping = dict(sorted(rename_mapping.items(), key=lambda item: len(item[0]), reverse=True))\n    for value in _all_values(model):\n        if value.shape is None:\n            continue\n        new_shape = []\n        changed = False\n        for dim in value.shape:\n            if not isinstance(dim, ir.SymbolicDim):\n                new_shape.append(dim)\n                continue\n            dim_name = dim.value\n            if dim_name in sorted_rename_mapping:\n                new_shape.append(sorted_rename_mapping[dim_name])\n                changed = True\n            elif dim_name is not None:\n                new_name = _replace_names(dim_name, sorted_rename_mapping)\n                new_shape.append(new_name)\n                if new_name != dim_name:\n                    changed = True\n            else:\n                new_shape.append(None)\n        if changed:\n            value.shape = ir.Shape(new_shape)",
    "docstring": "Rename dynamic axes in a model according to the specified dynamic_axes names.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_ir_passes.py",
    "ast_data": "FunctionDef name:rename_axis arg:model arg:rename_mapping arguments arg arg Assign Call Call Call arguments arg Call For Call If Compare Assign Assign For If Call Call Assign If Compare Call Assign If Compare Assign Call Call If Compare Assign Call If Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_register_mesh",
    "source_code": "def _register_mesh(self, mesh: layout_lib.Mesh):\n    with self._mesh_lock:\n        if mesh not in self._meshes:\n            _pywrap_dtensor_device.AddMesh(self._device_info, mesh.to_string(), False)\n            self._meshes.add(mesh)\n            if mesh.device_type().upper() == 'TPU':\n                logging.info('Registering virtual 1:1 mapped host mesh %s for mesh %s', mesh.host_mesh().to_string(), mesh.to_string())\n                _pywrap_dtensor_device.AddMesh(self._device_info, mesh.host_mesh().to_string(), True)\n                self._meshes.add(mesh.host_mesh())",
    "docstring": "Idempotently register with the dtensor device.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\dtensor_device.py",
    "ast_data": "FunctionDef name:_register_mesh arg:self arg:mesh arguments arg arg With If Compare Call Call Call If Compare Call Call Call Call Call Call Call Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "Mishra05",
    "source_code": "class Mishra05(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n        self.global_optimum = [[-1.98682, -10.0]]\n        self.fglob = -1.019829519930646\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return 0.01 * x[0] + 0.1 * x[1] + (sin((cos(x[0]) + cos(x[1])) ** 2) ** 2 + cos((sin(x[0]) + sin(x[1])) ** 2) ** 2 + x[0]) ** 2",
    "docstring": "Mishra 5 objective function. This class defines the Mishra 5 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Mishra05}}(x) = \\left [ \\sin^2 ((\\cos(x_1) + \\cos(x_2))^2) + \\cos^2 ((\\sin(x_1) + \\sin(x_2))^2) + x_1 \\right ]^2 + 0.01(x_1 + x_2) with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Mishra, S. Global Optimization by Differential Evolution and Particle Swarm Methods: Evaluation on Some Benchmark Functions. Munich Personal RePEc Archive, 2006, 1005 TODO Line 381 in paper",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_M.py",
    "ast_data": "ClassDef name:Mishra05 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call Call Call Call Call"
  },
  {
    "library": "kornia",
    "name": "intrinsics",
    "source_code": "@property\ndef intrinsics(self) -> Tensor:\n    if not self._check_valid_params(self._intrinsics, 'intrinsics'):\n        raise AssertionError\n    return self._intrinsics",
    "docstring": "The full 4x4 intrinsics matrix. Returns: tensor of shape :math:.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\camera\\pinhole.py",
    "ast_data": "FunctionDef name:intrinsics arg:self arguments arg If Call Raise Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_supports_insertion",
    "source_code": "def _supports_insertion(self, module: nn.Module) -> bool:\n    num_children = len(list(module.children()))\n    return num_children == 0 and (not _is_activation_post_process(module))",
    "docstring": "Returns whether the given module is supported for observers insertion Any module that doesn't have children and isn't an observer itself is supported Args module: The module to check and ensure is supported Returns True if the module is supported by observer, False otherwise",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\detector.py",
    "ast_data": "FunctionDef name:_supports_insertion arg:self arg:module arguments arg arg Assign Call Call Call Return return:yes BoolOp Compare Call"
  },
  {
    "library": "django",
    "name": "__get__",
    "source_code": "def __get__(self, instance, cls=None):\n    if instance is None:\n        return self\n    data = instance.__dict__\n    field_name = self.field.attname\n    if field_name not in data:\n        val = self._check_parent_chain(instance)\n        if val is None:\n            if not instance._is_pk_set():\n                raise AttributeError(f'Cannot retrieve deferred field {field_name!r} from an unsaved model.')\n            instance.refresh_from_db(fields=[field_name])\n        else:\n            data[field_name] = val\n    return data[field_name]",
    "docstring": "Retrieve and caches the value from the datastore on the first lookup. Return the cached value.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query_utils.py",
    "ast_data": "FunctionDef name:__get__ arg:self arg:instance arg:cls arguments arg arg arg If Compare Return return:yes Assign Assign If Compare Assign Call If Compare If Call Raise Call Call Assign Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "add_event",
    "source_code": "def add_event(self, name: str) -> None:\n    logger.debug('[app] adding event: %r', name)\n    self.events.add(name)",
    "docstring": "Register an event called *name*. This is needed to be able to emit it. :param name: The name of the event",
    "type": "method",
    "file_path": "sphinx\\sphinx\\application.py",
    "ast_data": "FunctionDef name:add_event arg:self arg:name arguments arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "add_metric",
    "source_code": "@doc_controls.for_subclass_implementers\ndef add_metric(self, value, aggregation=None, name=None):\n    if aggregation is not None and aggregation != 'mean':\n        raise ValueError('We currently support only `mean` sample-wise metric aggregation. You provided aggregation=`%s`' % aggregation)\n    from_metric_obj = hasattr(value, '_metric_obj')\n    is_symbolic = tf_utils.is_symbolic_tensor(value)\n    in_call_context = base_layer_utils.call_context().in_call\n    if name is None and (not from_metric_obj):\n        raise ValueError(\"Please provide a name for your metric like `self.add_metric(tf.reduce_sum(inputs), name='mean_activation', aggregation='mean')`\")\n    elif from_metric_obj:\n        name = value._metric_obj.name\n    if in_call_context:\n        self._symbolic_add_metric(value, aggregation, name)\n    else:\n        if not is_symbolic:\n            raise ValueError('Expected a symbolic Tensor for the metric value, received: ' + str(value))\n        if not getattr(self, '_is_graph_network', False):\n            with backend.get_graph().as_default():\n                self._symbolic_add_metric(value, aggregation, name)\n            return\n        if from_metric_obj:\n            raise ValueError('Using the result of calling a `Metric` object when calling `add_metric` on a Functional Model is not supported. Please pass the Tensor to monitor directly.')\n        self._graph_network_add_metric(value, aggregation, name)",
    "docstring": "Adds metric tensor to the layer. Args: value: Metric tensor. aggregation: Sample-wise metric reduction function. If , it indicates that the metric tensor provided has been aggregated already. eg, followed by . If aggregation='mean', the given metric tensor will be sample-wise reduced using function. eg, . name: String metric name. Raises: ValueError: If is anything other than None or .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py",
    "ast_data": "FunctionDef name:add_metric arg:self arg:value arg:aggregation arg:name arguments arg arg arg arg If BoolOp Compare Compare Raise Call Assign Call Assign Call Assign Call If BoolOp Compare Raise Call If Assign If Call If Raise Call Call If Call With Call Call Call Return return:no If Raise Call Call"
  },
  {
    "library": "numpy",
    "name": "less_equal",
    "source_code": "@array_function_dispatch(_binary_op_dispatcher)\ndef less_equal(x1, x2):\n    return compare_chararrays(x1, x2, '<=', True)",
    "docstring": "Return (x1 >> import numpy as np >>> x1 = np.array(['a', 'b', 'c']) >>> np.char.less_equal(x1, 'b') array([ True, True, False])",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:less_equal arg:x1 arg:x2 arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_get_inverse_dash_pattern",
    "source_code": "def _get_inverse_dash_pattern(offset, dashes):\n    gaps = dashes[-1:] + dashes[:-1]\n    offset_gaps = offset + dashes[-1]\n    return (offset_gaps, gaps)",
    "docstring": "Return the inverse of the given dash pattern, for filling the gaps.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:_get_inverse_dash_pattern arg:offset arg:dashes arguments arg arg Assign Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "CSSWarning",
    "source_code": "class CSSWarning(UserWarning):\n    pass",
    "docstring": "Warning is raised when converting css styling fails. This can be due to the styling not having an equivalent value or because the styling isn't properly formatted. See Also -------- DataFrame.style : Returns a Styler object for applying CSS-like styles. io.formats.style.Styler : Helps style a DataFrame or Series according to the data with HTML and CSS. io.formats.style.Styler.to_excel : Export styled DataFrame to Excel. io.formats.style.Styler.to_html : Export styled DataFrame to HTML. Examples -------- >>> df = pd.DataFrame({\"A\": [1, 1, 1]}) >>> df.style.map(lambda x: \"background-color: blueGreenRed;\").to_excel( ... \"styled.xlsx\" ... ) # doctest: +SKIP CSSWarning: Unhandled color format: 'blueGreenRed' >>> df.style.map(lambda x: \"border: 1px solid red red;\").to_excel( ... \"styled.xlsx\" ... ) # doctest: +SKIP CSSWarning: Unhandled color format: 'blueGreenRed'",
    "type": "class",
    "file_path": "pandas\\pandas\\errors\\__init__.py",
    "ast_data": "ClassDef name:CSSWarning"
  },
  {
    "library": "django",
    "name": "serialize_result",
    "source_code": "def serialize_result(self, obj, to_field_name):\n    return {'id': str(getattr(obj, to_field_name)), 'text': str(obj)}",
    "docstring": "Convert the provided model object to a dictionary that is added to the results list.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\views\\autocomplete.py",
    "ast_data": "FunctionDef name:serialize_result arg:self arg:obj arg:to_field_name arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "input",
    "source_code": "@property\ndef input(self):\n    if not self._inbound_nodes:\n        raise AttributeError('Layer ' + self.name + ' is not connected, no input to return.')\n    return self._get_node_attribute_at_index(0, 'input_tensors', 'input')",
    "docstring": "Retrieves the input tensor(s) of a layer. Only applicable if the layer has exactly one input, i.e. if it is connected to one incoming layer. Returns: Input tensor or list of input tensors. Raises: RuntimeError: If called in Eager mode. AttributeError: If no inbound nodes are found.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py",
    "ast_data": "FunctionDef name:input arg:self arguments arg If Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "validate_ragged_weights",
    "source_code": "def validate_ragged_weights(values, weights, dtype=None):\n    if weights is None:\n        if dtype:\n            return array_ops.constant([], dtype=dtype)\n        return array_ops.constant([], dtype=values.values.dtype)\n    if not isinstance(weights, ragged_tensor.RaggedTensor):\n        raise ValueError(f'`weights` must be a RaggedTensor if `values` is a RaggedTensor. Received argument weights={weights} of type: {type(weights).__name__}.')\n    checks = []\n    if weights.row_splits is not values.row_splits:\n        checks.append(check_ops.assert_equal(weights.row_splits, values.row_splits, message=\"'weights' and 'values' must have the same row splits.\"))\n    if checks:\n        with ops.control_dependencies(checks):\n            weights = array_ops.identity(weights.values)\n    else:\n        weights = weights.values\n    return weights",
    "docstring": "Validates the passed weight tensor or creates an empty one.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_bincount_ops.py",
    "ast_data": "FunctionDef name:validate_ragged_weights arg:values arg:weights arg:dtype arguments arg arg arg If Compare If Return return:yes Call Return return:yes Call If Call Raise Call Call Assign If Compare Call Call If With Call Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "bessel_j0",
    "source_code": "@tf_export('math.special.bessel_j0')\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef bessel_j0(x, name=None):\n    with ops.name_scope(name, 'bessel_j0', [x]):\n        return gen_special_math_ops.bessel_j0(x)",
    "docstring": "Computes the Bessel j0 function of element-wise. Modified Bessel function of order 0. >>> tf.math.special.bessel_j0([0.5, 1., 2., 4.]).numpy() array([ 0.93846981, 0.76519769, 0.22389078, -0.39714981], dtype=float32) Args: x: A or . Must be one of the following types: , , . name: A name for the operation (optional). Returns: A or , respectively. Has the same type as . @compatibility(scipy) Equivalent to scipy.special.j0 @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\special_math_ops.py",
    "ast_data": "FunctionDef name:bessel_j0 arg:x arg:name arguments arg arg With Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "_prepare_messages",
    "source_code": "def _prepare_messages(self, messages):\n    for message in messages:\n        message._prepare()",
    "docstring": "Prepare a list of messages for storage.",
    "type": "method",
    "file_path": "django\\django\\contrib\\messages\\storage\\base.py",
    "ast_data": "FunctionDef name:_prepare_messages arg:self arg:messages arguments arg arg For Call"
  },
  {
    "library": "tensorflow",
    "name": "IsLoopConstantEnter",
    "source_code": "def IsLoopConstantEnter(op):\n    return IsLoopEnter(op) and op.get_attr('is_constant')",
    "docstring": "Return true iff op is a loop invariant.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_util.py",
    "ast_data": "FunctionDef name:IsLoopConstantEnter arg:op arguments arg Return return:yes BoolOp Call Call"
  },
  {
    "library": "django",
    "name": "_does_token_match",
    "source_code": "def _does_token_match(request_csrf_token, csrf_secret):\n    if len(request_csrf_token) == CSRF_TOKEN_LENGTH:\n        request_csrf_token = _unmask_cipher_token(request_csrf_token)\n    assert len(request_csrf_token) == CSRF_SECRET_LENGTH\n    return constant_time_compare(request_csrf_token, csrf_secret)",
    "docstring": "Return whether the given CSRF token matches the given CSRF secret, after unmasking the token if necessary. This function assumes that the request_csrf_token argument has been validated to have the correct length (CSRF_SECRET_LENGTH or CSRF_TOKEN_LENGTH characters) and allowed characters, and that if it has length CSRF_TOKEN_LENGTH, it is a masked secret.",
    "type": "function",
    "file_path": "django\\django\\middleware\\csrf.py",
    "ast_data": "FunctionDef name:_does_token_match arg:request_csrf_token arg:csrf_secret arguments arg arg If Compare Call Assign Call Compare Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "short_path",
    "source_code": "def short_path(path, cwd=None):\n    if not isinstance(path, str):\n        return path\n    if cwd is None:\n        cwd = os.getcwd()\n    abspath = os.path.abspath(path)\n    relpath = os.path.relpath(path, cwd)\n    if len(abspath) <= len(relpath):\n        return abspath\n    return relpath",
    "docstring": "Return relative or absolute path name, whichever is shortest.",
    "type": "function",
    "file_path": "scipy\\tools\\refguide_check.py",
    "ast_data": "FunctionDef name:short_path arg:path arg:cwd arguments arg arg If Call Return return:yes If Compare Assign Call Assign Call Assign Call If Compare Call Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_any_version_depends_on_gradient",
    "source_code": "def _any_version_depends_on_gradient(self) -> set[int]:\n    depends_on_gradient: set[int] = set()\n    while True:\n        start_size = len(depends_on_gradient)\n        for node in self._data_flow_graph.flow_nodes:\n            ids = tuple((key.id for key, (_, version) in node.inputs.items() if self._categories.get(key, version) in (Category.GRADIENT, Category.PARAMETER) or key.id in depends_on_gradient))\n            if ids:\n                depends_on_gradient.update(ids)\n                depends_on_gradient.update((key.id for key in node.outputs))\n        if len(depends_on_gradient) == start_size:\n            return depends_on_gradient",
    "docstring": "Extract IDs of Tensors which depend or will depend on a gradient. Note that this weakened definition of \"depends\" requires us to loop over the data flow graph multiple times because it allows dependency information to flow backward through edges and removes the guarantee that nodes are topologically sorted. (Or indeed, even that a valid topological order exists.) Put another way, we have converted an acyclic data flow graph into a cyclic graph and we are attempting to partition cycles involving a gradient from the rest of the graph.",
    "type": "method",
    "file_path": "pytorch\\torch\\profiler\\_memory_profiler.py",
    "ast_data": "FunctionDef name:_any_version_depends_on_gradient arg:self arguments arg Call While Assign Call For Assign Call Call BoolOp Compare Call Compare If Call Call If Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "read_file",
    "source_code": "@tf_export('io.read_file', v1=['io.read_file', 'read_file'])\ndef read_file(filename, name=None):\n    return gen_io_ops.read_file(filename, name)",
    "docstring": "Reads the contents of file. This operation returns a tensor with the entire contents of the input filename. It does not do any parsing, it just returns the contents as they are. Usually, this is the first step in the input pipeline. Example: >>> with open(\"/tmp/file.txt\", \"w\") as f: ... f.write(\"asdf\") ... 4 >>> tf.io.read_file(\"/tmp/file.txt\") Example of using the op in a function to read an image, decode it and reshape the tensor containing the pixel data: >>> @tf.function ... def load_image(filename): ... raw = tf.io.read_file(filename) ... image = tf.image.decode_png(raw, channels=3) ... # the executes during tracing. ... print(\"Initial shape: \", image.shape) ... image.set_shape([28, 28, 3]) ... print(\"Final shape: \", image.shape) ... return image Args: filename: string. filename to read from. name: string. Optional name for the op. Returns: A tensor of dtype \"string\", with the file contents.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\io_ops.py",
    "ast_data": "FunctionDef name:read_file arg:filename arg:name arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "LSTMSaliencyPruner",
    "source_code": "class LSTMSaliencyPruner(BaseStructuredSparsifier):\n\n    def update_mask(self, module, tensor_name, **kwargs):\n        weights = getattr(module, tensor_name)\n        for p in getattr(module.parametrizations, tensor_name):\n            if isinstance(p, FakeStructuredSparsity):\n                mask = cast(torch.Tensor, p.mask)\n                if weights.dim() <= 1:\n                    raise Exception('Structured pruning can only be applied to a 2+dim weight tensor!')\n                dims = tuple(range(1, weights.dim()))\n                saliency = weights.norm(dim=dims, p=1)\n                split_size = len(mask) // 4\n                masks = torch.split(mask, split_size)\n                saliencies = torch.split(saliency, split_size)\n                for keep_mask, sal in zip(masks, saliencies):\n                    k = int(len(keep_mask) * kwargs['sparsity_level'])\n                    prune = sal.topk(k, largest=False, sorted=False).indices\n                    keep_mask.data[prune] = False",
    "docstring": "Prune packed LSTM weights based on saliency. For each layer {k} inside a LSTM, we have two packed weight matrices - weight_ih_l{k} - weight_hh_l{k} These tensors pack the weights for the 4 linear layers together for efficiency. [W_ii | W_if | W_ig | W_io] Pruning this tensor directly will lead to weights being misassigned when unpacked. To ensure that each packed linear layer is pruned the same amount: 1. We split the packed weight into the 4 constituent linear parts 2. Update the mask for each individual piece using saliency individually This applies to both weight_ih_l{k} and weight_hh_l{k}.",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\pruner\\lstm_saliency_pruner.py",
    "ast_data": "ClassDef name:LSTMSaliencyPruner FunctionDef name:update_mask arg:self arg:module arg:tensor_name arguments arg arg arg arg Assign Call For Call If Call Assign Call If Compare Call Raise Call Assign Call Call Call Assign Call Assign Call Assign Call Assign Call For Call Assign Call Call Assign Call Assign"
  },
  {
    "library": "scikit-learn",
    "name": "CrossValidationBenchmark",
    "source_code": "class CrossValidationBenchmark(Benchmark):\n    timeout = 20000\n    param_names = ['n_jobs']\n    params = (Benchmark.n_jobs_vals,)\n\n    def setup(self, *params):\n        n_jobs, = params\n        data = _synth_classification_dataset(n_samples=50000, n_features=100)\n        self.X, self.X_val, self.y, self.y_val = data\n        self.clf = RandomForestClassifier(n_estimators=50, max_depth=10, random_state=0)\n        cv = 16 if Benchmark.data_size == 'large' else 4\n        self.cv_params = {'n_jobs': n_jobs, 'cv': cv}\n\n    def time_crossval(self, *args):\n        cross_val_score(self.clf, self.X, self.y, **self.cv_params)\n\n    def peakmem_crossval(self, *args):\n        cross_val_score(self.clf, self.X, self.y, **self.cv_params)\n\n    def track_crossval(self, *args):\n        return float(cross_val_score(self.clf, self.X, self.y, **self.cv_params).mean())",
    "docstring": "Benchmarks for Cross Validation.",
    "type": "class",
    "file_path": "scikit-learn\\asv_benchmarks\\benchmarks\\model_selection.py",
    "ast_data": "ClassDef name:CrossValidationBenchmark Assign Assign Assign FunctionDef name:setup arg:self arguments arg arg Assign Assign Call Assign Assign Call Assign Compare Assign FunctionDef name:time_crossval arg:self arguments arg arg Call FunctionDef name:peakmem_crossval arg:self arguments arg arg Call FunctionDef name:track_crossval arg:self arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "draw_base2",
    "source_code": "def draw_base2(self, m: int, out: Optional[torch.Tensor]=None, dtype: Optional[torch.dtype]=None) -> torch.Tensor:\n    n = 2 ** m\n    total_n = self.num_generated + n\n    if not total_n & total_n - 1 == 0:\n        raise ValueError(f\"The balance properties of Sobol' points require n to be a power of 2. {self.num_generated} points have been previously generated, then: n={self.num_generated}+2**{m}={total_n}. If you still want to do this, please use 'SobolEngine.draw()' instead.\")\n    return self.draw(n=n, out=out, dtype=dtype)",
    "docstring": "Function to draw a sequence of :attr: points from a Sobol sequence. Note that the samples are dependent on the previous samples. The size of the result is :math:. Args: m (Int): The (base2) exponent of the number of points to draw. out (Tensor, optional): The output tensor dtype (:class:, optional): the desired data type of the returned tensor. Default: ``",
    "type": "method",
    "file_path": "pytorch\\torch\\quasirandom.py",
    "ast_data": "FunctionDef name:draw_base2 arg:self arg:m arg:out arg:dtype arguments arg arg arg arg Assign Assign If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "calculate_areas",
    "source_code": "def calculate_areas(self):\n    if self._dim == 2:\n        return self._calculate_areas_2d()\n    elif self._dim == 3:\n        return self._calculate_areas_3d()\n    else:\n        raise TypeError('Only supported for 2D and 3D point sets')",
    "docstring": "Calculates the areas of the Voronoi regions. For 2D point sets, the regions are circular arcs. The sum of the areas is ``. .. versionadded:: 1.5.0 Returns ------- areas : double array of shape (npoints,) The areas of the Voronoi regions.",
    "type": "method",
    "file_path": "scipy\\scipy\\spatial\\_spherical_voronoi.py",
    "ast_data": "FunctionDef name:calculate_areas arg:self arguments arg If Compare Return return:yes Call If Compare Return return:yes Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "instance_norm",
    "source_code": "def instance_norm(input: Tensor, running_mean: Optional[Tensor]=None, running_var: Optional[Tensor]=None, weight: Optional[Tensor]=None, bias: Optional[Tensor]=None, use_input_stats: bool=True, momentum: float=0.1, eps: float=1e-05) -> Tensor:\n    if has_torch_function_variadic(input, running_mean, running_var, weight, bias):\n        return handle_torch_function(instance_norm, (input, running_mean, running_var, weight, bias), input, running_mean=running_mean, running_var=running_var, weight=weight, bias=bias, use_input_stats=use_input_stats, momentum=momentum, eps=eps)\n    if use_input_stats:\n        _verify_spatial_size(input.size())\n    return torch.instance_norm(input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps, torch.backends.cudnn.enabled)",
    "docstring": "Apply Instance Normalization independently for each channel in every data sample within a batch. See :class:, :class:, :class: for details.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:instance_norm arg:input arg:running_mean arg:running_var arg:weight arg:bias arg:use_input_stats arg:momentum arg:eps arguments arg arg arg arg arg arg arg arg If Call Return return:yes Call If Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "statically_known_geq",
    "source_code": "def statically_known_geq(self, left: Expr, right: Union[Expr, int]) -> bool:\n    expr = left >= right\n    return self.is_expr_static_and_true(expr)",
    "docstring": "Returns a bool indicating if it is sound to optimize as if left is greater than or equal to right.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\sizevars.py",
    "ast_data": "FunctionDef name:statically_known_geq arg:self arg:left arg:right arguments arg arg arg Assign Compare Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "SourceType",
    "source_code": "class SourceType(Enum):\n    Existing = 0\n    New = 1",
    "docstring": "This Enum divides VariableTracker into 2 cases, depending on the variable it represents: - already existed that Dynamo began tracking while introspection (Existing) - is a new variable that is created during Dynamo introspection (New) In general, we have these invariants: 1. for associated with , its field must not be None. 2. for associated with , most of the time its field is None, except for cases like side effect codegen for , during which we generate a for such variable, to facilitate codegen.",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\base.py",
    "ast_data": "ClassDef name:SourceType Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "is_rerun_disabled_tests",
    "source_code": "def is_rerun_disabled_tests(report: Path, workflow_run_id: int, workflow_run_attempt: int, tests: dict[str, dict[str, int]]) -> bool:\n    if all((t.get('num_green', 0) + t.get('num_red', 0) > MAX_RETRY_IN_NON_DISABLED_MODE for t in tests.values())):\n        return True\n    job_id = get_job_id(report)\n    job_name = get_job_name(job_id, workflow_run_id, workflow_run_attempt)\n    return job_name is not None and 'rerun_disabled_tests' in job_name",
    "docstring": "Check if the test report is coming from rerun_disabled_tests workflow where each test is run multiple times",
    "type": "function",
    "file_path": "pytorch\\tools\\stats\\upload_stats_lib.py",
    "ast_data": "FunctionDef name:is_rerun_disabled_tests arg:report arg:workflow_run_id arg:workflow_run_attempt arg:tests arguments arg arg arg arg If Call Compare Call Call Call Return return:yes Assign Call Assign Call Return return:yes BoolOp Compare Compare"
  },
  {
    "library": "scikit-learn",
    "name": "predict_log_proba",
    "source_code": "def predict_log_proba(self, X):\n    return np.log(self.predict_proba(X))",
    "docstring": "Predict logarithm of probability estimates. The returned estimates for all classes are ordered by the label of classes. Parameters ---------- X : array-like of shape (n_samples, n_features) Vector to be scored, where is the number of samples and is the number of features. Returns ------- T : array-like of shape (n_samples, n_classes) Returns the log-probability of the sample for each class in the model, where classes are ordered as they are in ``.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_logistic.py",
    "ast_data": "FunctionDef name:predict_log_proba arg:self arg:X arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "RemoteValueStatus",
    "source_code": "class RemoteValueStatus(enum.Enum):\n    NOT_READY = 'NOT_READY'\n    ABORTED = 'ABORTED'\n    READY = 'READY'",
    "docstring": "The status of a object. A object can have three states: 1) not ready: no value, no non-retryable error and not aborted; 2) aborted: i.e. the execution of function was aborted because of task failure, but can be retried; 3) ready: i.e. has value or has non-tryable error; The initial state of a is \"not ready\". When its corresponding closure has been executed at least once, it will become aborted or ready. The state transitions are: 1) not ready -> 2) aborted: when the corresponding closure is aborted due to worker failure, and the worker failure is not immediately handled. 1) not ready -> 3) ready: when the corresponding closure has been executed successfully. 2) aborted -> 3) ready: when the is rebuilt by rerunning the corresponding closure and the closure has been executed successfully. 3) ready -> 2) aborted: when the corresponding closure had been executed successfully but later the corresponding remote worker failed. This is currently only implemented for resource like iterators.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\remote_value.py",
    "ast_data": "ClassDef name:RemoteValueStatus Assign Assign Assign"
  },
  {
    "library": "scipy",
    "name": "tocsr",
    "source_code": "def tocsr(self, copy=False):\n    return self.tocoo(copy=copy).tocsr(copy=False)",
    "docstring": "Convert this array/matrix to Compressed Sparse Row format. With copy=False, the data/indices may be shared between this array/matrix and the resultant csr_array/matrix.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_base.py",
    "ast_data": "FunctionDef name:tocsr arg:self arg:copy arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "functions",
    "source_code": "@property\ndef functions(self):\n    return {key: value for key, value in self._function_dict.items() if value is not None}",
    "docstring": "Returns dictionary of all functions.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\serialized_attributes.py",
    "ast_data": "FunctionDef name:functions arg:self arguments arg Return return:yes Call Compare"
  },
  {
    "library": "scikit-learn",
    "name": "diag",
    "source_code": "def diag(self, X):\n    return np.full(_num_samples(X), self.constant_value, dtype=np.array(self.constant_value).dtype)",
    "docstring": "Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters ---------- X : array-like of shape (n_samples_X, n_features) or list of object Argument to the kernel. Returns ------- K_diag : ndarray of shape (n_samples_X,) Diagonal of kernel k(X, X)",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:diag arg:self arg:X arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_create_saver",
    "source_code": "def _maybe_create_saver(self, saver=None):\n    if not saver:\n        saver = tf_saver.Saver(variables._all_saveable_objects(), sharded=True, write_version=saver_pb2.SaverDef.V2, allow_empty=True)\n    return saver",
    "docstring": "Creates a sharded saver if one does not already exist.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\builder_impl.py",
    "ast_data": "FunctionDef name:_maybe_create_saver arg:self arg:saver arguments arg arg If Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "rename_libtensorflow",
    "source_code": "def rename_libtensorflow(srcs_dir: str, version: str):\n    major_version = version.split('.')[0]\n    if is_macos():\n        shutil.move(os.path.join(srcs_dir, 'libtensorflow_cc.{}.dylib'.format(version)), os.path.join(srcs_dir, 'libtensorflow_cc.{}.dylib'.format(major_version)))\n        shutil.move(os.path.join(srcs_dir, 'libtensorflow_framework.{}.dylib'.format(version)), os.path.join(srcs_dir, 'libtensorflow_framework.{}.dylib'.format(major_version)))\n    else:\n        shutil.move(os.path.join(srcs_dir, 'libtensorflow_cc.so.{}'.format(version)), os.path.join(srcs_dir, 'libtensorflow_cc.so.{}'.format(major_version)))\n        shutil.move(os.path.join(srcs_dir, 'libtensorflow_framework.so.{}'.format(version)), os.path.join(srcs_dir, 'libtensorflow_framework.so.{}'.format(major_version)))",
    "docstring": "Update libtensorflow_cc file name. Bazel sets full TF version in name but libtensorflow_cc must contain only major. Update accordingly to the platform: e.g. libtensorflow_cc.so.2.15.0 -> libtensorflow_cc.2 Args: srcs_dir: target directory with files. version: Major version to be set.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\pip_package\\build_pip_package.py",
    "ast_data": "FunctionDef name:rename_libtensorflow arg:srcs_dir arg:version arguments arg arg Assign Call If Call Call Call Call Call Call Call Call Call Call Call Call Call Call Call Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "set_help_intro",
    "source_code": "def set_help_intro(self, help_intro):\n    self._help_intro = help_intro",
    "docstring": "Set an introductory message to help output. Args: help_intro: (RichTextLines) Rich text lines appended to the beginning of the output of the command \"help\", as introductory information.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py",
    "ast_data": "FunctionDef name:set_help_intro arg:self arg:help_intro arguments arg arg Assign"
  },
  {
    "library": "sphinx",
    "name": "restore",
    "source_code": "def restore(self, other: Project) -> None:\n    self.docnames = other.docnames\n    self._path_to_docname = other._path_to_docname\n    self._docname_to_path = other._docname_to_path",
    "docstring": "Take over a result of last build.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\project.py",
    "ast_data": "FunctionDef name:restore arg:self arg:other arguments arg arg Assign Assign Assign"
  },
  {
    "library": "pandas",
    "name": "_get_join_freq",
    "source_code": "def _get_join_freq(self, other):\n    freq = None\n    if self._can_fast_union(other):\n        freq = self.freq\n    return freq",
    "docstring": "Get the freq to attach to the result of a join operation.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\datetimelike.py",
    "ast_data": "FunctionDef name:_get_join_freq arg:self arg:other arguments arg arg Assign If Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_should_partial_index",
    "source_code": "@final\ndef _should_partial_index(self, target: Index) -> bool:\n    if isinstance(self.dtype, IntervalDtype):\n        if isinstance(target.dtype, IntervalDtype):\n            return False\n        return self.left._should_compare(target)\n    return False",
    "docstring": "Should we attempt partial-matching indexing?",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_should_partial_index arg:self arg:target arguments arg arg If Call If Call Return return:yes Return return:yes Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "edit_margin",
    "source_code": "def edit_margin(self, todo, size, cell):\n    self.solver.suggestValue(self.margins[todo][cell], size)\n    self.margin_vals[todo][cell] = size",
    "docstring": "Change the size of the margin for one cell. Parameters ---------- todo : string (one of 'left', 'right', 'bottom', 'top') margin to alter. size : float Size of the margin. If it is larger than the existing minimum it updates the margin size. Fraction of figure size. cell : int Cell column or row to edit.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_layoutgrid.py",
    "ast_data": "FunctionDef name:edit_margin arg:self arg:todo arg:size arg:cell arguments arg arg arg arg Call Assign"
  },
  {
    "library": "django",
    "name": "__getitem__",
    "source_code": "def __getitem__(self, name):\n    if name in MEDIA_TYPES:\n        return Media(**{str(name): getattr(self, '_' + name)})\n    raise KeyError('Unknown media type \"%s\"' % name)",
    "docstring": "Return a Media object that only contains media of the given type.",
    "type": "method",
    "file_path": "django\\django\\forms\\widgets.py",
    "ast_data": "FunctionDef name:__getitem__ arg:self arg:name arguments arg arg If Compare Return return:yes Call Call Call Raise Call"
  },
  {
    "library": "pandas",
    "name": "_constructor",
    "source_code": "@property\ndef _constructor(self) -> Callable[..., Self]:\n    raise AbstractMethodError(self)",
    "docstring": "Used when a manipulation result has the same dimensions as the original.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:_constructor arg:self arguments arg Raise Call"
  },
  {
    "library": "django",
    "name": "union",
    "source_code": "def union(self, other):\n    return self._geomgen(capi.geom_union, other)",
    "docstring": "Return a new geometry consisting of the region which is the union of this geometry and the other.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:union arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "acquire_thread",
    "source_code": "def acquire_thread(self):\n    thread_ident = _thread.get_ident()\n    if thread_ident not in self.threads:\n        i = len(self.threads) + 1\n        self.threads[thread_ident] = i\n        self.bus.publish('start_thread', i)",
    "docstring": "Run 'start_thread' listeners for the current thread. If the current thread has already been seen, any 'start_thread' listeners will not be run again.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\plugins.py",
    "ast_data": "FunctionDef name:acquire_thread arg:self arguments arg Assign Call If Compare Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "UnknownArgument",
    "source_code": "class UnknownArgument(object):\n    pass",
    "docstring": "Signifies an argument which is not currently handled.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\func_graph.py",
    "ast_data": "ClassDef name:UnknownArgument"
  },
  {
    "library": "matplotlib",
    "name": "do_3d_projection",
    "source_code": "def do_3d_projection(self):\n    segments = np.asanyarray(self._segments3d)\n    mask = False\n    if np.ma.isMA(segments):\n        mask = segments.mask\n    if self._axlim_clip:\n        viewlim_mask = _viewlim_mask(segments[..., 0], segments[..., 1], segments[..., 2], self.axes)\n        if np.any(viewlim_mask):\n            viewlim_mask = np.broadcast_to(viewlim_mask[..., np.newaxis], (*viewlim_mask.shape, 3))\n            mask = mask | viewlim_mask\n    xyzs = np.ma.array(proj3d._proj_transform_vectors(segments, self.axes.M), mask=mask)\n    segments_2d = xyzs[..., 0:2]\n    LineCollection.set_segments(self, segments_2d)\n    if len(xyzs) > 0:\n        minz = min(xyzs[..., 2].min(), 1000000000.0)\n    else:\n        minz = np.nan\n    return minz",
    "docstring": "Project the points according to renderer matrix.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py",
    "ast_data": "FunctionDef name:do_3d_projection arg:self arguments arg Assign Call Assign If Call Assign If Assign Call If Call Assign Call Assign Assign Call Call Assign Call If Compare Call Assign Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "res_call",
    "source_code": "def res_call(self, ns, types_ns, node, f_type, args, keywords):\n    raise NotImplementedError('subclasses must implement')",
    "docstring": "Resolves the return type an external function or method call. Args: ns: namespace types_ns: types namespace node: str, the function name f_type: types of the actual function being called, if known args: types of each respective argument in node.args keywords: types of each respective argument in node.keywords Returns: Tuple (return_type, side_effect_types). The first element is just the return types of the function. The second element is a map from argument names to sets of types, and allow modelling side effects of functions (for example via global or nonlocal).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\static_analysis\\type_inference.py",
    "ast_data": "FunctionDef name:res_call arg:self arg:ns arg:types_ns arg:node arg:f_type arg:args arg:keywords arguments arg arg arg arg arg arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "IsContainingContext",
    "source_code": "def IsContainingContext(ctxt, maybe_containing_ctxt):\n    while ctxt is not maybe_containing_ctxt:\n        if ctxt is None:\n            return False\n        ctxt = ctxt.outer_context\n    return True",
    "docstring": "Returns true if is or contains .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_util.py",
    "ast_data": "FunctionDef name:IsContainingContext arg:ctxt arg:maybe_containing_ctxt arguments arg arg While Compare If Compare Return return:yes Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "numpy_text",
    "source_code": "def numpy_text(tensor, is_repr=False) -> str:\n    if tensor.dtype.is_numpy_compatible:\n        tensor_numpy = tensor._numpy()\n        if is_repr:\n            if np.isscalar(tensor_numpy) and (not isinstance(tensor_numpy, bytes)):\n                text = repr(tensor_numpy.item())\n            else:\n                text = repr(tensor_numpy)\n        else:\n            text = str(tensor_numpy)\n    else:\n        text = '<unprintable>'\n    if '\\n' in text:\n        text = '\\n' + text\n    return text",
    "docstring": "Human readable representation of a tensor's numpy value.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:numpy_text arg:tensor arg:is_repr arguments arg arg If Assign Call If If BoolOp Call Call Assign Call Call Assign Call Assign Call Assign If Compare Assign Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "extrapolate_statistics",
    "source_code": "def extrapolate_statistics(scope):\n    c = {}\n    for k, v in scope.copy().items():\n        if isinstance(v, dict):\n            v = extrapolate_statistics(v)\n        elif isinstance(v, (list, tuple)):\n            v = [extrapolate_statistics(record) for record in v]\n        elif hasattr(v, '__call__'):\n            v = v(scope)\n        c[k] = v\n    return c",
    "docstring": "Return an extrapolated copy of the given scope.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\cpstats.py",
    "ast_data": "FunctionDef name:extrapolate_statistics arg:scope arguments arg Assign For Call Call If Call Assign Call If Call Assign Call If Call Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "ExportedSymbol",
    "source_code": "class ExportedSymbol(NamedTuple):\n    file_name: str\n    line_no: int\n    symbol_name: str\n    v1_apis: tuple[str, ...]\n    v2_apis: tuple[str, ...]\n\n    @classmethod\n    def create(cls, *, v1_apis: Sequence[str], v2_apis: Sequence[str], **kwargs) -> 'ExportedSymbol':\n        return cls(v1_apis=tuple(v1_apis), v2_apis=tuple(v2_apis), **kwargs)",
    "docstring": "Information about a single tf_export instance.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\api\\generator2\\shared\\exported_api.py",
    "ast_data": "ClassDef name:ExportedSymbol FunctionDef name:create arg:cls arguments arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "is_functional_schema",
    "source_code": "def is_functional_schema(schema: Any) -> bool:\n\n    def is_functional(schema):\n        if schema.is_mutable:\n            return False\n        rets = schema.returns\n        is_non_mutating_view = len(rets) > 0 and any((r.alias_info is not None and (not r.alias_info.is_write) for r in rets))\n        if is_non_mutating_view:\n            return False\n        if not schema.returns:\n            return False\n        return True\n    if isinstance(schema, torch._C.FunctionSchema):\n        return is_functional(schema)\n    from torchgen.model import FunctionSchema\n    if isinstance(schema, str):\n        schema = FunctionSchema.parse(schema)\n    assert isinstance(schema, FunctionSchema)\n    return is_functional(schema)",
    "docstring": "Check if the schema is functional. An operator is functional if: - it does not mutate any of its inputs - it does not return a view on any of its inputs - it has at least one return",
    "type": "function",
    "file_path": "pytorch\\torch\\_library\\utils.py",
    "ast_data": "FunctionDef name:is_functional_schema arg:schema arguments arg FunctionDef name:is_functional arg:schema arguments arg If Return return:yes Assign Assign BoolOp Compare Call Call BoolOp Compare If Return return:yes If Return return:yes Return return:yes If Call Return return:yes Call If Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "put",
    "source_code": "def put(self, url, **kwargs):\n    return self.request('PUT', url, **kwargs)",
    "docstring": "Invoke PUT http request. If `` configured, shortcut is available:: client.put(\"profile\", json={\"name\": \"Hsiaoming Yang\"})",
    "type": "method",
    "file_path": "authlib\\authlib\\integrations\\base_client\\sync_app.py",
    "ast_data": "FunctionDef name:put arg:self arg:url arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_convert_and_box_cache",
    "source_code": "def _convert_and_box_cache(arg: DatetimeScalarOrArrayConvertible, cache_array: Series, name: Hashable | None=None) -> Index:\n    from pandas import Series\n    result = Series(arg, dtype=cache_array.index.dtype).map(cache_array)\n    return _box_as_indexlike(result._values, utc=False, name=name)",
    "docstring": "Convert array of dates with a cache and wrap the result in an Index. Parameters ---------- arg : integer, float, string, datetime, list, tuple, 1-d array, Series cache_array : Series Cache of converted, unique dates name : string, default None Name for a DatetimeIndex Returns ------- result : Index-like of converted dates",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\tools\\datetimes.py",
    "ast_data": "FunctionDef name:_convert_and_box_cache arg:arg arg:cache_array arg:name arguments arg arg arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "create_tmp_raii_handle_var_if_needed",
    "source_code": "def create_tmp_raii_handle_var_if_needed(self, handle: str, writer: Optional[Union[HasWriteLine, list[str]]]=None) -> str:\n    if not handle.startswith(('borrow_arrayref_tensor_as_tensor(', 'copy_arrayref_tensor_to_tensor(', 'wrap_with_raii_handle_if_needed(', 'RAIIAtenTensorHandle(')):\n        return handle\n    tmp_var_name = f'var_{next(self.arg_var_id)}'\n    call_str = f'auto {tmp_var_name} = {handle};'\n    writer = writer if writer is not None else self\n    if isinstance(writer, list):\n        writer.append(call_str)\n    else:\n        writer.writeline(call_str)\n    return tmp_var_name",
    "docstring": "If the input handle is an rvalue RAII tensor, creates an lvalue variable for it in writer. Returns a variable name that can be used to access handle.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp_wrapper_cpu.py",
    "ast_data": "FunctionDef name:create_tmp_raii_handle_var_if_needed arg:self arg:handle arg:writer arguments arg arg arg If Call Return return:yes Assign Call Assign Assign Compare If Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_cuda_load_module",
    "source_code": "def _cuda_load_module(ptx: Union[str, bytes], kernel_names: Optional[list[str]]=None) -> Union[_CudaModule, dict[str, '_CudaKernel']]:\n    import torch.cuda\n    libcuda = _get_cuda_library()\n    if isinstance(ptx, str):\n        ptx = ptx.encode('utf-8')\n    module = ctypes.c_void_p()\n    stream = torch.cuda.current_stream()\n    with stream:\n        _check_cuda(libcuda.cuModuleLoadData(ctypes.byref(module), ptx))\n    if not kernel_names:\n        return _CudaModule(module)\n    kernels = {}\n    for name in kernel_names:\n        func = ctypes.c_void_p()\n        _check_cuda(libcuda.cuModuleGetFunction(ctypes.byref(func), module, name.encode('utf-8')))\n        kernels[name] = _CudaKernel(func, module)\n    return kernels",
    "docstring": "Loads a CUDA module from PTX code and returns a module object that can access kernels. Args: ptx (bytes or str): The PTX code to load kernel_names (list, optional): List of kernel names to extract from the module. If None, will return a module object with __getattr__. Returns: object: If kernel_names is None, returns a module object with __getattr__ to access kernels. If kernel_names is provided, returns a dict mapping kernel names to _CudaKernel objects.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\_utils.py",
    "ast_data": "FunctionDef name:_cuda_load_module arg:ptx arg:kernel_names arguments arg arg Assign Call If Call Assign Call Assign Call Assign Call With Call Call Call If Return return:yes Call Assign For Assign Call Call Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "iterations",
    "source_code": "@property\ndef iterations(self):\n    if self._iterations is None:\n        with self._distribution_strategy_scope():\n            self._iterations = self.add_weight('iter', shape=[], dtype=dtypes.int64, trainable=False, aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA)\n        self._weights.append(self._iterations)\n    return self._iterations",
    "docstring": "Variable. The number of training steps this Optimizer has run.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py",
    "ast_data": "FunctionDef name:iterations arg:self arguments arg If Compare With Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_canonical_name",
    "source_code": "def get_canonical_name(api_names: Sequence[str], deprecated_api_names: Sequence[str]) -> Optional[str]:\n    non_deprecated_name = next((name for name in api_names if name not in deprecated_api_names), None)\n    if non_deprecated_name:\n        return non_deprecated_name\n    if api_names:\n        return api_names[0]\n    return None",
    "docstring": "Get preferred endpoint name. Args: api_names: API names iterable. deprecated_api_names: Deprecated API names iterable. Returns: Returns one of the following in decreasing preference: - first non-deprecated endpoint - first endpoint - None",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\tf_export.py",
    "ast_data": "FunctionDef name:get_canonical_name arg:api_names arg:deprecated_api_names arguments arg arg Assign Call Compare If Return return:yes If Return return:yes Return return:no"
  },
  {
    "library": "cryptography",
    "name": "_SSHFormatSKECDSA",
    "source_code": "class _SSHFormatSKECDSA:\n\n    def load_public(self, data: memoryview) -> tuple[ec.EllipticCurvePublicKey, memoryview]:\n        public_key, data = _lookup_kformat(_ECDSA_NISTP256).load_public(data)\n        _, data = load_application(data)\n        return (public_key, data)\n\n    def get_public(self, data: memoryview) -> typing.NoReturn:\n        raise UnsupportedAlgorithm('sk-ecdsa-sha2-nistp256 private keys cannot be loaded')",
    "docstring": "The format of a sk-ecdsa-sha2-nistp256@openssh.com public key is: string \"sk-ecdsa-sha2-nistp256@openssh.com\" string curve name ec_point Q string application (user-specified, but typically \"ssh:\")",
    "type": "class",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\serialization\\ssh.py",
    "ast_data": "ClassDef name:_SSHFormatSKECDSA FunctionDef name:load_public arg:self arg:data arguments arg arg Assign Call Call Assign Call Return return:yes FunctionDef name:get_public arg:self arg:data arguments arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_ragged_tensor_mse",
    "source_code": "@dispatch.dispatch_for_types(mean_squared_error, ragged_tensor.RaggedTensor)\ndef _ragged_tensor_mse(y_true, y_pred):\n    return _ragged_tensor_apply_loss(mean_squared_error, y_true, y_pred)",
    "docstring": "Implements support for handling RaggedTensors. Args: y_true: RaggedTensor truth values. shape = . y_pred: RaggedTensor predicted values. shape = . Returns: Mean squared error values. shape = . When the number of dimensions of the batch feature vector [d0, .. dN] is greater than one the return value is a RaggedTensor. Otherwise a Dense tensor with dimensions [batch_size] is returned.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py",
    "ast_data": "FunctionDef name:_ragged_tensor_mse arg:y_true arg:y_pred arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "source",
    "source_code": "def source(self, *args, **kwargs):\n    print(self._source(*args))",
    "docstring": "Print source code for the function corresponding to inputs",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\unification\\multipledispatch\\dispatcher.py",
    "ast_data": "FunctionDef name:source arg:self arguments arg arg arg Call Call"
  },
  {
    "library": "numpy",
    "name": "get_npy_pkg_dir",
    "source_code": "def get_npy_pkg_dir():\n    d = os.environ.get('NPY_PKG_CONFIG_PATH')\n    if d is not None:\n        return d\n    spec = importlib.util.find_spec('numpy')\n    d = os.path.join(os.path.dirname(spec.origin), '_core', 'lib', 'npy-pkg-config')\n    return d",
    "docstring": "Return the path where to find the npy-pkg-config directory. If the NPY_PKG_CONFIG_PATH environment variable is set, the value of that is returned. Otherwise, a path inside the location of the numpy module is returned. The NPY_PKG_CONFIG_PATH can be useful when cross-compiling, maintaining customized npy-pkg-config .ini files for the cross-compilation environment, and using them when cross-compiling.",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\misc_util.py",
    "ast_data": "FunctionDef name:get_npy_pkg_dir arguments Assign Call If Compare Return return:yes Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_example_value",
    "source_code": "def _get_example_value(node: fx.Node) -> Optional[str]:\n    if 'example_value' in node.meta:\n        return node.meta['example_value']\n    elif 'val' in node.meta:\n        return node.meta['val']\n    else:\n        return None",
    "docstring": "Get the example value key for a node, since dynamo uses \"example_value\" while non-strict export uses \"val.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\passes\\runtime_assert.py",
    "ast_data": "FunctionDef name:_get_example_value arg:node arguments arg If Compare Return return:yes If Compare Return return:yes Return return:no"
  },
  {
    "library": "pytorch",
    "name": "find_guarded_entry",
    "source_code": "@classmethod\ndef find_guarded_entry(cls: type[GuardedCache[T]], key: str, local: bool, remote_cache: Optional[RemoteCache[JsonDataTy]], evaluate_guards: Callable[[str, Union[list[int], list[torch.SymInt]]], bool], hints: list[int]) -> tuple[Optional[T], Optional[bytes], dict[str, str]]:\n    graph = None\n    pickled_content = None\n    result_status = 'full_miss'\n    sample_guards_expr = None\n    for candidate, content in cls.iterate_over_candidates(local, remote_cache, key):\n        assert hasattr(candidate, 'guards_expr')\n        if not candidate.guards_expr:\n            graph = candidate\n            pickled_content = content\n            result_status = 'hit'\n            break\n        hit = bool(evaluate_guards(candidate.guards_expr, hints))\n        if hit:\n            graph = candidate\n            pickled_content = content\n            result_status = 'hit'\n            sample_guards_expr = candidate.guards_expr\n            break\n        else:\n            result_status = 'guard_miss'\n            sample_guards_expr = candidate.guards_expr\n    info = {'cache_status_detailed': result_status}\n    if sample_guards_expr is not None:\n        info['cache_status_guard_expr'] = sample_guards_expr\n    return (graph, pickled_content, info)",
    "docstring": "Find the first cache entry in iterate_over_candidates that passes . Args: key: The cache key to look up local: Whether to check the local cache remote_cache: The remote cache to check, if any evaluate_guards: Function that evaluates whether a guard passes the check, given a list of hint values and the guard expression. hints: List of symint hints paired with evaluate_guards Returns: A tuple of (graph, pickled_content) if found, or (None, None) if not found",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codecache.py",
    "ast_data": "FunctionDef name:find_guarded_entry arg:cls arg:key arg:local arg:remote_cache arg:evaluate_guards arg:hints arguments arg arg arg arg arg arg Assign Assign Assign Assign For Call Call If Assign Assign Assign Assign Call Call If Assign Assign Assign Assign Assign Assign Assign If Compare Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_convert_key",
    "source_code": "def _convert_key(self, key):\n    for i in key:\n        if not is_integer(i):\n            raise ValueError('iAt based indexing can only have integer indexers')\n    return key",
    "docstring": "Require integer args. (and convert to label arguments)",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexing.py",
    "ast_data": "FunctionDef name:_convert_key arg:self arg:key arguments arg arg For If Call Raise Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_namespace",
    "source_code": "def _namespace(xp):\n    return np_compat if xp is None else array_namespace(xp.empty(0))",
    "docstring": "A shim for the arg of and acos/arccos. Will be able to replace with when we drop support for numpy 1.x and cupy 13.x",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\windows\\_windows.py",
    "ast_data": "FunctionDef name:_namespace arg:xp arguments arg Return return:yes Compare Call Call"
  },
  {
    "library": "sphinx",
    "name": "suppress_logging",
    "source_code": "@contextmanager\ndef suppress_logging() -> Iterator[MemoryHandler]:\n    logger = logging.getLogger(NAMESPACE)\n    memhandler = MemoryHandler()\n    try:\n        handlers = []\n        for handler in logger.handlers[:]:\n            logger.removeHandler(handler)\n            handlers.append(handler)\n        logger.addHandler(memhandler)\n        yield memhandler\n    finally:\n        logger.removeHandler(memhandler)\n        for handler in handlers:\n            logger.addHandler(handler)",
    "docstring": "Context manager to suppress logging all logs temporarily. For example:: >>> with suppress_logging(): >>> logger.warning('Warning message!') # suppressed >>> some_long_process() >>>",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\logging.py",
    "ast_data": "FunctionDef name:suppress_logging arguments Assign Call Assign Call Try Assign For Call Call Call Call For Call"
  },
  {
    "library": "tensorflow",
    "name": "with_rank_at_least",
    "source_code": "def with_rank_at_least(self, rank):\n    if self.rank is not None and self.rank < rank:\n        raise ValueError('Shape %s must have rank at least %d' % (self, rank))\n    else:\n        return self",
    "docstring": "Returns a shape based on with at least the given rank. Args: rank: An integer. Returns: A shape that is at least as specific as with at least the given rank. Raises: ValueError: If does not represent a shape with at least the given .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:with_rank_at_least arg:self arg:rank arguments arg arg If BoolOp Compare Compare Raise Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "get_mathlibs",
    "source_code": "def get_mathlibs(path=None):\n    if path is not None:\n        config_file = os.path.join(path, '_numpyconfig.h')\n    else:\n        dirs = get_numpy_include_dirs()\n        for path in dirs:\n            fn = os.path.join(path, '_numpyconfig.h')\n            if os.path.exists(fn):\n                config_file = fn\n                break\n        else:\n            raise DistutilsError('_numpyconfig.h not found in numpy include dirs %r' % (dirs,))\n    with open(config_file) as fid:\n        mathlibs = []\n        s = '#define MATHLIB'\n        for line in fid:\n            if line.startswith(s):\n                value = line[len(s):].strip()\n                if value:\n                    mathlibs.extend(value.split(','))\n    return mathlibs",
    "docstring": "Return the MATHLIB line from numpyconfig.h",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\misc_util.py",
    "ast_data": "FunctionDef name:get_mathlibs arg:path arguments arg If Compare Assign Call Assign Call For Assign Call If Call Assign Raise Call With Call Assign Assign For If Call Assign Call Call If Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "sym_float",
    "source_code": "def sym_float(a):\n    if overrides.has_torch_function_unary(a):\n        return overrides.handle_torch_function(sym_float, (a,), a)\n    if isinstance(a, SymFloat):\n        return a\n    elif hasattr(a, '__sym_float__'):\n        return a.__sym_float__()\n    return builtins.float(a)",
    "docstring": "SymInt-aware utility for float casting. Args: a (SymInt, SymFloat, or object): Object to cast",
    "type": "function",
    "file_path": "pytorch\\torch\\__init__.py",
    "ast_data": "FunctionDef name:sym_float arg:a arguments arg If Call Return return:yes Call If Call Return return:yes If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "transpose",
    "source_code": "def transpose(self, *axes: int) -> Self:\n    return self[:]",
    "docstring": "Return a transposed view on this array. Because ExtensionArrays are always 1D, this is a no-op. It is included for compatibility with np.ndarray. Returns ------- ExtensionArray Examples -------- >>> pd.array([1, 2, 3]).transpose() [1, 2, 3] Length: 3, dtype: Int64",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\base.py",
    "ast_data": "FunctionDef name:transpose arg:self arguments arg arg Return return:yes"
  },
  {
    "library": "django",
    "name": "srid",
    "source_code": "@property\ndef srid(self):\n    s = capi.geos_get_srid(self.ptr)\n    if s == 0:\n        return None\n    else:\n        return s",
    "docstring": "Get the SRID for the geometry. Return None if no SRID is set.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:srid arg:self arguments arg Assign Call If Compare Return return:no Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_compatible_structured_output_specs",
    "source_code": "def _get_compatible_structured_output_specs(true_graph, false_graph):\n    return nest.map_structure(_get_compatible_spec, true_graph.structured_outputs, false_graph.structured_outputs)",
    "docstring": "Returns the most specific compatible specs of graph structured outputs.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\cond_v2.py",
    "ast_data": "FunctionDef name:_get_compatible_structured_output_specs arg:true_graph arg:false_graph arguments arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_number_format",
    "source_code": "@property\ndef _number_format(self) -> dict[str, Any]:\n    return {'na_rep': self.na_rep, 'float_format': self.float_format, 'date_format': self.date_format, 'quoting': self.quoting, 'decimal': self.decimal}",
    "docstring": "Dictionary used for storing number formatting settings.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\csvs.py",
    "ast_data": "FunctionDef name:_number_format arg:self arguments arg Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "CatalogRepository",
    "source_code": "class CatalogRepository:\n\n    def __init__(self, basedir: str | os.PathLike[str], locale_dirs: list[str], language: str, encoding: str) -> None:\n        self.basedir = _StrPath(basedir)\n        self._locale_dirs = locale_dirs\n        self.language = language\n        self.encoding = encoding\n\n    @property\n    def locale_dirs(self) -> Iterator[_StrPath]:\n        if not self.language:\n            return\n        for locale_dir in self._locale_dirs:\n            locale_path = self.basedir / locale_dir / self.language / 'LC_MESSAGES'\n            if locale_path.exists():\n                yield (self.basedir / locale_dir)\n            else:\n                logger.verbose(__('locale_dir %s does not exist'), locale_path)\n\n    @property\n    def pofiles(self) -> Iterator[tuple[_StrPath, _StrPath]]:\n        for locale_dir in self.locale_dirs:\n            locale_path = locale_dir / self.language / 'LC_MESSAGES'\n            for abs_path in locale_path.rglob('*.po'):\n                rel_path = abs_path.relative_to(locale_path)\n                if any((part.startswith('.') for part in rel_path.parts[:-1])):\n                    continue\n                yield (locale_path, rel_path)\n\n    @property\n    def catalogs(self) -> Iterator[CatalogInfo]:\n        for basedir, filename in self.pofiles:\n            domain = filename.with_suffix('').as_posix()\n            yield CatalogInfo(basedir, domain, self.encoding)",
    "docstring": "A repository for message catalogs.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\util\\i18n.py",
    "ast_data": "ClassDef name:CatalogRepository FunctionDef name:__init__ arg:self arg:basedir arg:locale_dirs arg:language arg:encoding arguments arg arg arg arg arg Assign Call Assign Assign Assign FunctionDef name:locale_dirs arg:self arguments arg If Return return:no For Assign If Call Call Call FunctionDef name:pofiles arg:self arguments arg For Assign For Call Assign Call If Call Call FunctionDef name:catalogs arg:self arguments arg For Assign Call Call Call"
  },
  {
    "library": "django",
    "name": "harden_runtime",
    "source_code": "def harden_runtime(self, password, encoded):\n    warnings.warn('subclasses of BasePasswordHasher should provide a harden_runtime() method')",
    "docstring": "Bridge the runtime gap between the work factor supplied in and the work factor suggested by this hasher. Taking PBKDF2 as an example, if contains 20000 iterations and is 30000, this method should run password through another 10000 iterations of PBKDF2. Similar approaches should exist for any hasher that has a work factor. If not, this method should be defined as a no-op to silence the warning.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\hashers.py",
    "ast_data": "FunctionDef name:harden_runtime arg:self arg:password arg:encoded arguments arg arg arg Call"
  },
  {
    "library": "pandas",
    "name": "set_table_attributes",
    "source_code": "def set_table_attributes(self, attributes: str) -> Styler:\n    self.table_attributes = attributes\n    return self",
    "docstring": "Set the table attributes added to the HTML elements. Examples -------- >>> df = pd.DataFrame(np.random.randn(10, 4)) >>> df.style.set_table_attributes('class=\"pure-table\"') # doctest: +SKIP # ... ...",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\style.py",
    "ast_data": "FunctionDef name:set_table_attributes arg:self arg:attributes arguments arg arg Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "AppendableSeriesTable",
    "source_code": "class AppendableSeriesTable(AppendableFrameTable):\n    pandas_kind = 'series_table'\n    table_type = 'appendable_series'\n    ndim = 2\n    obj_type = Series\n\n    @property\n    def is_transposed(self) -> bool:\n        return False\n\n    @classmethod\n    def get_object(cls, obj, transposed: bool):\n        return obj\n\n    def write(self, obj, data_columns=None, **kwargs) -> None:\n        if not isinstance(obj, DataFrame):\n            name = obj.name or 'values'\n            obj = obj.to_frame(name)\n        super().write(obj=obj, data_columns=obj.columns.tolist(), **kwargs)\n\n    def read(self, where=None, columns=None, start: int | None=None, stop: int | None=None) -> Series:\n        is_multi_index = self.is_multi_index\n        if columns is not None and is_multi_index:\n            assert isinstance(self.levels, list)\n            for n in self.levels:\n                if n not in columns:\n                    columns.insert(0, n)\n        s = super().read(where=where, columns=columns, start=start, stop=stop)\n        if is_multi_index:\n            s.set_index(self.levels, inplace=True)\n        s = s.iloc[:, 0]\n        if s.name == 'values':\n            s.name = None\n        return s",
    "docstring": "support the new appendable table formats",
    "type": "class",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "ClassDef name:AppendableSeriesTable Assign Assign Assign Assign FunctionDef name:is_transposed arg:self arguments arg Return return:yes FunctionDef name:get_object arg:cls arg:obj arg:transposed arguments arg arg arg Return return:yes FunctionDef name:write arg:self arg:obj arg:data_columns arguments arg arg arg arg If Call Assign BoolOp Assign Call Call Call Call FunctionDef name:read arg:self arg:where arg:columns arg:start arg:stop arguments arg arg arg arg arg Assign If BoolOp Compare Call For If Compare Call Assign Call Call If Call Assign If Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_tpu_multi_host_concat",
    "source_code": "def _tpu_multi_host_concat(v, strategy):\n    replicas = strategy.unwrap(v)\n    num_replicas_per_host = strategy.extended.num_replicas_per_host\n    ordered_replicas = []\n    for replica_id in range(num_replicas_per_host):\n        ordered_replicas += replicas[replica_id::num_replicas_per_host]\n    return concat(ordered_replicas)",
    "docstring": "Correctly order TPU PerReplica objects.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py",
    "ast_data": "FunctionDef name:_tpu_multi_host_concat arg:v arg:strategy arguments arg arg Assign Call Assign Assign For Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "unbatch",
    "source_code": "def unbatch(self, name=None) -> 'DatasetV2':\n    from tensorflow.python.data.ops import unbatch_op\n    return unbatch_op._unbatch(self, name=name)",
    "docstring": "Splits elements of a dataset into multiple elements. For example, if elements of the dataset are shaped , where may vary for each input element, then for each element in the dataset, the unbatched dataset will contain consecutive elements of shape . >>> elements = [ [1, 2, 3], [1, 2], [1, 2, 3, 4] ] >>> dataset = tf.data.Dataset.from_generator(lambda: elements, tf.int64) >>> dataset = dataset.unbatch() >>> [a.item() for a in dataset.as_numpy_iterator()] [1, 2, 3, 1, 2, 1, 2, 3, 4] Note: requires a data copy to slice up the batched tensor into smaller, unbatched tensors. When optimizing performance, try to avoid unnecessary usage of . Args: name: (Optional.) A name for the tf.data operation. Returns: A new with the transformation applied as described above.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:unbatch arg:self arg:name arguments arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "maybe_extract_name",
    "source_code": "def maybe_extract_name(name, obj, cls) -> Hashable:\n    if name is None and isinstance(obj, (Index, ABCSeries)):\n        name = obj.name\n    if not is_hashable(name):\n        raise TypeError(f'{cls.__name__}.name must be a hashable type')\n    return name",
    "docstring": "If no name is passed, then extract it from data, validating hashability.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:maybe_extract_name arg:name arg:obj arg:cls arguments arg arg arg If BoolOp Compare Call Assign If Call Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "master_target",
    "source_code": "@property\ndef master_target(self):\n    return self._master_target",
    "docstring": "Returns the session master for the corresponding task to connect to.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_coordinator.py",
    "ast_data": "FunctionDef name:master_target arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_iterator_transformer",
    "source_code": "def _iterator_transformer(parent, node, full_name, name, logs):\n    if full_name and (full_name.startswith('tf.compat.v1.data') or full_name.startswith('tf.data')):\n        return\n    if not isinstance(node.func, ast.Attribute):\n        return\n    node.args = [node.func.value] + node.args\n    node.func.value = ast_edits.full_name_node('tf.compat.v1.data')\n    logs.append((ast_edits.WARNING, node.lineno, node.col_offset, 'Changing dataset.%s() to tf.compat.v1.data.%s(dataset). Please check this transformation.\\n' % (name, name)))\n    return node",
    "docstring": "Transform iterator methods to compat function calls.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\tf_upgrade_v2.py",
    "ast_data": "FunctionDef name:_iterator_transformer arg:parent arg:node arg:full_name arg:name arg:logs arguments arg arg arg arg arg If BoolOp BoolOp Call Call Return return:no If Call Return return:no Assign Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "true_divide",
    "source_code": "@_onnx_symbolic('aten::true_divide')\ndef true_divide(g: jit_utils.GraphContext, self, other):\n    if symbolic_helper._is_fp(self) or symbolic_helper._is_fp(other):\n        return g.op('Div', self, other)\n    scalar_type = torch.get_default_dtype()\n    onnx_scalar_type = _C_onnx.TensorProtoDataType.FLOAT\n    assert scalar_type is torch.float or scalar_type is torch.double\n    if torch.get_default_dtype() is torch.double:\n        onnx_scalar_type = _C_onnx.TensorProtoDataType.DOUBLE\n    self = g.op('Cast', self, to_i=onnx_scalar_type)\n    other = g.op('Cast', other, to_i=onnx_scalar_type)\n    return g.op('Div', self, other)",
    "docstring": "Division where both inputs are cast to floating types If both inputs are floating, performs div as usual If only one input is a floating type, the other input is cast to its type If neither input is a floating type, both inputs are cast to the default scalar type",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\symbolic_opset9.py",
    "ast_data": "FunctionDef name:true_divide arg:g arg:self arg:other arguments arg arg arg If BoolOp Call Call Return return:yes Call Assign Call Assign BoolOp Compare Compare If Compare Call Assign Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "to_coo",
    "source_code": "def to_coo(self) -> spmatrix:\n    import_optional_dependency('scipy')\n    from scipy.sparse import coo_matrix\n    dtype = find_common_type(self._parent.dtypes.to_list())\n    if isinstance(dtype, SparseDtype):\n        dtype = dtype.subtype\n    cols, rows, data = ([], [], [])\n    for col, (_, ser) in enumerate(self._parent.items()):\n        sp_arr = ser.array\n        row = sp_arr.sp_index.indices\n        cols.append(np.repeat(col, len(row)))\n        rows.append(row)\n        data.append(sp_arr.sp_values.astype(dtype, copy=False))\n    cols = np.concatenate(cols)\n    rows = np.concatenate(rows)\n    data = np.concatenate(data)\n    return coo_matrix((data, (rows, cols)), shape=self._parent.shape)",
    "docstring": "Return the contents of the frame as a sparse SciPy COO matrix. Returns ------- scipy.sparse.spmatrix If the caller is heterogeneous and contains booleans or objects, the result will be of dtype=object. See Notes. See Also -------- DataFrame.sparse.to_dense : Convert a DataFrame with sparse values to dense. Notes ----- The dtype will be the lowest-common-denominator type (implicit upcasting); that is to say if the dtypes (even of numeric types) are mixed, the one that accommodates all will be chosen. e.g. If the dtypes are float16 and float32, dtype will be upcast to float32. By numpy.find_common_type convention, mixing int64 and and uint64 will result in a float64 dtype. Examples -------- >>> df = pd.DataFrame({\"A\": pd.arrays.SparseArray([0, 1, 0, 1])}) >>> df.sparse.to_coo()",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\sparse\\accessor.py",
    "ast_data": "FunctionDef name:to_coo arg:self arguments arg Call Assign Call Call If Call Assign Assign For Call Call Assign Assign Call Call Call Call Call Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X, copy=None):\n    check_is_fitted(self)\n    copy = copy if copy is not None else self.copy\n    X = validate_data(self, X, reset=False, accept_sparse='csr', copy=copy, dtype=FLOAT_DTYPES, force_writeable=True, ensure_all_finite='allow-nan')\n    if sparse.issparse(X):\n        if self.with_mean:\n            raise ValueError('Cannot center sparse matrices: pass `with_mean=False` instead. See docstring for motivation and alternatives.')\n        if self.scale_ is not None:\n            inplace_column_scale(X, 1 / self.scale_)\n    else:\n        if self.with_mean:\n            X -= self.mean_\n        if self.with_std:\n            X /= self.scale_\n    return X",
    "docstring": "Perform standardization by centering and scaling. Parameters ---------- X : {array-like, sparse matrix of shape (n_samples, n_features) The data used to scale along the features axis. copy : bool, default=None Copy the input X or not. Returns ------- X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) Transformed array.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arg:copy arguments arg arg arg Call Assign Compare Assign Call If Call If Raise Call If Compare Call If If Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "with_alpha",
    "source_code": "def with_alpha(self, alpha):\n    if not isinstance(alpha, Real):\n        raise TypeError(f\"'alpha' must be numeric or None, not {type(alpha)}\")\n    if not 0 <= alpha <= 1:\n        ValueError(\"'alpha' must be between 0 and 1, inclusive\")\n    new_cm = self.copy()\n    if not new_cm._isinit:\n        new_cm._init()\n    new_cm._lut[:, 3] = alpha\n    return new_cm",
    "docstring": "Return a copy of the colormap with a new uniform transparency. Parameters ---------- alpha : float The alpha blending value, between 0 (transparent) and 1 (opaque).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:with_alpha arg:self arg:alpha arguments arg arg If Call Raise Call Call If Compare Call Assign Call If Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_gradient_components",
    "source_code": "@abc.abstractmethod\ndef get_gradient_components(self, value):\n    raise NotImplementedError(f'{type(self).__name__}.get_gradient_components()')",
    "docstring": "Returns the components of that should be included in gradients. This method may not call TensorFlow ops, since any new ops added to the graph would not be properly tracked by the gradient mechanisms. Args: value: A value. Returns: A nested structure of or .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\composite_tensor_gradient.py",
    "ast_data": "FunctionDef name:get_gradient_components arg:self arg:value arguments arg arg Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "strict_fusion",
    "source_code": "class strict_fusion:\n\n    def __init__(self) -> None:\n        if not torch._jit_internal.is_scripting():\n            warnings.warn('Only works in script mode')\n\n    def __enter__(self):\n        pass\n\n    def __exit__(self, type: Any, value: Any, tb: Any) -> None:\n        pass",
    "docstring": "Give errors if not all nodes have been fused in inference, or symbolically differentiated in training. Example: Forcing fusion of additions. .. code-block:: python @torch.jit.script def foo(x): with torch.jit.strict_fusion(): return x + x + x",
    "type": "class",
    "file_path": "pytorch\\torch\\jit\\__init__.py",
    "ast_data": "ClassDef name:strict_fusion FunctionDef name:__init__ arg:self arguments arg If Call Call FunctionDef name:__enter__ arg:self arguments arg FunctionDef name:__exit__ arg:self arg:type arg:value arg:tb arguments arg arg arg arg"
  },
  {
    "library": "django",
    "name": "actions",
    "source_code": "@property\ndef actions(self):\n    return self._actions.items()",
    "docstring": "Get all the enabled actions as an iterable of (name, func).",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\sites.py",
    "ast_data": "FunctionDef name:actions arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "is_leaf_module",
    "source_code": "@compatibility(is_backward_compatible=True)\ndef is_leaf_module(self, m: torch.nn.Module, module_qualified_name: str) -> bool:\n    return (m.__module__.startswith('torch.nn') or m.__module__.startswith('torch.ao.nn')) and (not isinstance(m, torch.nn.Sequential))",
    "docstring": "A method to specify whether a given `` here.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\_symbolic_trace.py",
    "ast_data": "FunctionDef name:is_leaf_module arg:self arg:m arg:module_qualified_name arguments arg arg arg Return return:yes BoolOp BoolOp Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_thread_name",
    "source_code": "def _get_thread_name() -> str:\n    return torch._C._get_thread_name()",
    "docstring": "Get the name of the current thread. Returns: str: Name of the current thread.",
    "type": "function",
    "file_path": "pytorch\\torch\\multiprocessing\\__init__.py",
    "ast_data": "FunctionDef name:_get_thread_name arguments Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "InitializationOnlyStatus",
    "source_code": "class InitializationOnlyStatus(_LoadStatus):\n\n    def __init__(self, object_graph_view, restore_uid):\n        self._restore_uid = restore_uid\n        self._object_graph_view = object_graph_view\n        self._root = object_graph_view.root\n\n    def assert_consumed(self):\n        raise AssertionError('No checkpoint specified (save_path=None); nothing is being restored.')\n\n    def assert_existing_objects_matched(self):\n        raise AssertionError('No checkpoint specified (save_path=None); nothing is being restored.')\n\n    def assert_nontrivial_match(self):\n        raise AssertionError('No checkpoint specified (save_path=None); nothing is being restored.')\n\n    def run_restore_ops(self, session=None):\n        raise AssertionError('No checkpoint specified, so no restore ops are available (save_path=None to Saver.restore).')\n\n    def initialize_or_restore(self, session=None):\n        if context.executing_eagerly():\n            return\n        if session is None:\n            session = get_session()\n        trackable_objects = util.list_objects(self._object_graph_view)\n        initializers = [c.initializer for c in trackable_objects if hasattr(c, 'initializer') and c.initializer is not None and (getattr(c, '_update_uid', self._restore_uid - 1) < self._restore_uid)]\n        session.run(initializers)",
    "docstring": "Returned from when no checkpoint has been specified. Objects of this type have the same method as , but it always fails. However, works on objects of both types, and will initialize variables in objects or restore them otherwise.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "ClassDef name:InitializationOnlyStatus FunctionDef name:__init__ arg:self arg:object_graph_view arg:restore_uid arguments arg arg arg Assign Assign Assign FunctionDef name:assert_consumed arg:self arguments arg Raise Call FunctionDef name:assert_existing_objects_matched arg:self arguments arg Raise Call FunctionDef name:assert_nontrivial_match arg:self arguments arg Raise Call FunctionDef name:run_restore_ops arg:self arg:session arguments arg arg Raise Call FunctionDef name:initialize_or_restore arg:self arg:session arguments arg arg If Call Return return:no If Compare Assign Call Assign Call Assign BoolOp Call Compare Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "offload_wrapper",
    "source_code": "def offload_wrapper(module: torch.nn.Module) -> torch.nn.Module:\n    return OffloadWrapper(module)",
    "docstring": "Wrap a module for activation offloading to CPU. Offloads intermediate activations to the CPU for modules wrapped with this function. Wrappers with activation offload can be composed with ones that do recomputation-based checkpoint to trade off increased compute versus increased CPU memory usage and additional H2D transfers. Usage:: offloaded_module = offload_wrapper(module) outputs = checkpointed_module(inputs) Args: module (nn.Module): The module to be wrapped Returns: (nn.Module): Wrapped module",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\_checkpoint\\checkpoint_wrapper.py",
    "ast_data": "FunctionDef name:offload_wrapper arg:module arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "codes",
    "source_code": "@property\ndef codes(self):\n    return self._codes",
    "docstring": "The list of codes in the as a 1D array. Each code is one of , , , , or . For codes that correspond to more than one vertex ( and ), that code will be repeated so that the length of and is always the same.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\path.py",
    "ast_data": "FunctionDef name:codes arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "topological_sort_schedule",
    "source_code": "def topological_sort_schedule(self, nodes: list[BaseSchedulerNode]) -> list[BaseSchedulerNode]:\n    seen = OrderedSet[BaseSchedulerNode]()\n    name_to_node: dict[str, BaseSchedulerNode] = dict()\n    result: list[BaseSchedulerNode] = []\n\n    def visit(n: BaseSchedulerNode) -> None:\n        if n not in seen:\n            seen.add(n)\n            for dep in sorted(n.unmet_dependencies, key=lambda d: d.name):\n                if dep.name not in name_to_node:\n                    continue\n                visit(name_to_node[dep.name])\n            result.append(n)\n    for node in nodes:\n        for name in node.get_buffer_names():\n            name_to_node[name] = node\n    for node in nodes:\n        visit(node)\n    return result",
    "docstring": "Ensure nodes is in topologically sorted order",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:topological_sort_schedule arg:self arg:nodes arguments arg arg Assign Call Call FunctionDef name:visit arg:n arguments arg If Compare Call For Call arguments arg If Compare Call Call For For Call Assign For Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "all_reduce",
    "source_code": "def all_reduce(self, input_tensor: core.TensorLike, control_input: Optional[Union[core.TensorLike, ops.Operation]]=None, options: Optional[collective_util.Options]=None) -> core.Tensor:\n    instance_key = self._next_instance_key()\n    options = self._options.merge(options)\n    ordering_token = self._get_ordering_token()\n    with ops.device(self._device), self._control_input(control_input):\n        return collective_ops.all_reduce_v2(input_tensor, self._group_size, self._group_key, instance_key, communication_hint=options.implementation.value, timeout=options.timeout_seconds, ordering_token=ordering_token)",
    "docstring": "All-reduce a dense tensor. Args: input_tensor: a dense tensor. It must have the same shape on all replicas. control_input: if not None, add control edges between control_input and the all-reduce. options: an optional tf.distribute.experimental.CommunicationOptions. If provided, it overrides the default options. Returns: The reduced tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_utils.py",
    "ast_data": "FunctionDef name:all_reduce arg:self arg:input_tensor arg:control_input arg:options arguments arg arg arg arg Assign Call Assign Call Assign Call With Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "delete",
    "source_code": "def delete(self, name):\n    raise NotImplementedError('subclasses of Storage must provide a delete() method')",
    "docstring": "Delete the specified file from the storage system.",
    "type": "method",
    "file_path": "django\\django\\core\\files\\storage\\base.py",
    "ast_data": "FunctionDef name:delete arg:self arg:name arguments arg arg Raise Call"
  },
  {
    "library": "kornia",
    "name": "__init__",
    "source_code": "def __init__(self, dim: int, num_heads: int, mlp_ratio: float=4.0, qkv_bias: bool=True, norm_layer: type[Module]=nn.LayerNorm, act_layer: type[Module]=nn.GELU, use_rel_pos: bool=False, rel_pos_zero_init: bool=True, window_size: int=0, input_size: Optional[tuple[int, int]]=None) -> None:\n    super().__init__()\n    self.norm1 = norm_layer(dim)\n    self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, use_rel_pos=use_rel_pos, rel_pos_zero_init=rel_pos_zero_init, input_size=input_size if window_size == 0 else (window_size, window_size))\n    self.norm2 = norm_layer(dim)\n    self.mlp = MLPBlock(embedding_dim=dim, mlp_dim=int(dim * mlp_ratio), act=act_layer)\n    self.window_size = window_size",
    "docstring": "Construct transformer block. Args: dim: Number of input channels. num_heads: Number of attention heads in each ViT block. mlp_ratio: Ratio of mlp hidden dim to embedding dim. qkv_bias: If True, add a learnable bias to query, key, value. norm_layer: Normalization layer. act_layer: Activation layer. use_rel_pos: If True, add relative positional embeddings to the attention map. rel_pos_zero_init: If True, zero initialize relative positional parameters. window_size: Window size for window attention blocks. If it equals 0, then use global attention. input_size: Input resolution for calculating the relative positional parameter size.",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\models\\sam\\architecture\\image_encoder.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:dim arg:num_heads arg:mlp_ratio arg:qkv_bias arg:norm_layer arg:act_layer arg:use_rel_pos arg:rel_pos_zero_init arg:window_size arg:input_size arguments arg arg arg arg arg arg arg arg arg arg arg Call Call Assign Call Assign Call Compare Assign Call Assign Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "load_state_dict",
    "source_code": "@override\ndef load_state_dict(self, state_dict: dict[str, Any]) -> None:\n    self.__dict__.update(state_dict)\n    self._init_is_better(mode=self.mode, threshold=self.threshold, threshold_mode=self.threshold_mode)",
    "docstring": "Load the scheduler's state.",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\lr_scheduler.py",
    "ast_data": "FunctionDef name:load_state_dict arg:self arg:state_dict arguments arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "TFSlicingOpDispatcher",
    "source_code": "class TFSlicingOpDispatcher(dispatch.OpDispatcher):\n\n    def __init__(self, op):\n        self.op = op\n\n    def handle(self, args, kwargs):\n        args = nest.map_structure(_slice_to_dict, args)\n        kwargs = nest.map_structure(_slice_to_dict, kwargs)\n        if any((isinstance(x, keras_tensor.KerasTensor) for x in nest.flatten([args, kwargs]))):\n            return SlicingOpLambda(self.op)(*args, **kwargs)\n        else:\n            return self.NOT_SUPPORTED",
    "docstring": "A global dispatcher that allows building a functional model with TF Ops.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\core.py",
    "ast_data": "ClassDef name:TFSlicingOpDispatcher FunctionDef name:__init__ arg:self arg:op arguments arg arg Assign FunctionDef name:handle arg:self arg:args arg:kwargs arguments arg arg arg Assign Call Assign Call If Call Call Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_position",
    "source_code": "def get_position(self):\n    self._ensure_position_is_set()\n    return self._position",
    "docstring": "Return the spine position.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\spines.py",
    "ast_data": "FunctionDef name:get_position arg:self arguments arg Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "CheckpointPolicy",
    "source_code": "class CheckpointPolicy(enum.Enum):\n    MUST_SAVE = 0\n    PREFER_SAVE = 1\n    MUST_RECOMPUTE = 2\n    PREFER_RECOMPUTE = 3",
    "docstring": "Enum for specifying the policy for checkpointing during backpropagation. The following policies are supported: - `torch.compile` every op is NOT equivalent to not using checkpointing. Using such a policy would save additional tensors not limited to ones that are actually needed for gradient computation.",
    "type": "class",
    "file_path": "pytorch\\torch\\utils\\checkpoint.py",
    "ast_data": "ClassDef name:CheckpointPolicy Assign Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "export_repro",
    "source_code": "def export_repro(self, repro_dir: str | None=None, name: str | None=None) -> str:\n    if repro_dir is None:\n        repro_dir = os.getcwd()\n    repro_dir = os.path.join(repro_dir, 'onnx_debug')\n    onnx_graph, onnx_params_dict = _onnx_graph_from_aten_graph(self.graph, self.export_options, self.params_dict)\n    proto, _ = _onnx_proto_from_onnx_graph(onnx_graph, self.export_options, onnx_params_dict)\n    return OnnxTestCaseRepro.create_test_case_repro(proto, self.input_args, self.pt_outs, repro_dir, name)",
    "docstring": "Export the subgraph to ONNX along with the input/output data for repro. The repro directory will contain the following files:: dir ├── test_ │ ├── model.onnx │ └── test_data_set_0 │ ├── input_0.pb │ ├── input_1.pb │ ├── output_0.pb │ └── output_1.pb Args: repro_dir: The directory to export the repro files to. Defaults to current working directory if None. name: An optional name for the test case folder: \"test_{name}\". Returns: The path to the exported repro directory.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\verification.py",
    "ast_data": "FunctionDef name:export_repro arg:self arg:repro_dir arg:name arguments arg arg arg If Compare Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_distance_graph",
    "source_code": "@classmethod\ndef _distance_graph(cls, inputs, clusters, distance_metric):\n    assert isinstance(inputs, list)\n    if distance_metric == SQUARED_EUCLIDEAN_DISTANCE:\n        return cls._compute_euclidean_distance(inputs, clusters)\n    elif distance_metric == COSINE_DISTANCE:\n        return cls._compute_cosine_distance(inputs, clusters, inputs_normalized=True)\n    else:\n        assert False, str(distance_metric)",
    "docstring": "Computes distance between each input and each cluster center. Args: inputs: list of input Tensors. clusters: cluster Tensor. distance_metric: distance metric used for clustering Returns: list of Tensors, where each element corresponds to each element in inputs. The value is the distance of each row to all the cluster centers. Currently only Euclidean distance and cosine distance are supported.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\clustering_ops.py",
    "ast_data": "FunctionDef name:_distance_graph arg:cls arg:inputs arg:clusters arg:distance_metric arguments arg arg arg arg Call If Compare Return return:yes Call If Compare Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "setup",
    "source_code": "def setup(app: Sphinx, status: IO[str], warning: IO[str]) -> None:\n    logger = logging.getLogger(NAMESPACE)\n    logger.setLevel(logging.DEBUG)\n    logger.propagate = False\n    for handler in logger.handlers[:]:\n        logger.removeHandler(handler)\n    info_handler = NewLineStreamHandler(SafeEncodingWriter(status))\n    info_handler.addFilter(InfoFilter())\n    info_handler.addFilter(InfoLogRecordTranslator(app))\n    info_handler.setLevel(VERBOSITY_MAP[app.verbosity])\n    info_handler.setFormatter(ColorizeFormatter())\n    warning_handler = WarningStreamHandler(SafeEncodingWriter(warning))\n    if app._exception_on_warning:\n        warning_handler.addFilter(_RaiseOnWarningFilter())\n    warning_handler.addFilter(WarningSuppressor(app))\n    warning_handler.addFilter(WarningLogRecordTranslator(app))\n    warning_handler.addFilter(OnceFilter())\n    warning_handler.setLevel(logging.WARNING)\n    warning_handler.setFormatter(ColorizeFormatter())\n    messagelog_handler = logging.StreamHandler(LastMessagesWriter(app, status))\n    messagelog_handler.addFilter(InfoFilter())\n    messagelog_handler.setLevel(VERBOSITY_MAP[app.verbosity])\n    logger.addHandler(info_handler)\n    logger.addHandler(warning_handler)\n    logger.addHandler(messagelog_handler)",
    "docstring": "Setup root logger for Sphinx",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\logging.py",
    "ast_data": "FunctionDef name:setup arg:app arg:status arg:warning arguments arg arg arg Assign Call Call Assign For Call Assign Call Call Call Call Call Call Call Call Call Assign Call Call If Call Call Call Call Call Call Call Call Call Call Call Assign Call Call Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_loss_object",
    "source_code": "def _get_loss_object(self, loss):\n    if loss is None:\n        return None\n    loss = losses_mod.get(loss)\n    if not isinstance(loss, losses_mod.Loss):\n        loss_name = get_custom_object_name(loss)\n        if loss_name is None:\n            raise ValueError('Loss should be a callable, found: {}'.format(loss))\n        loss = losses_mod.LossFunctionWrapper(loss, name=loss_name)\n    loss._allow_sum_over_batch_size = True\n    return loss",
    "docstring": "Returns a object. Converts the user-supplied loss to a object. Also allows reduction to be used for this loss. Args: loss: A string, function, or object. Returns: A object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\compile_utils.py",
    "ast_data": "FunctionDef name:_get_loss_object arg:self arg:loss arguments arg arg If Compare Return return:no Assign Call If Call Assign Call If Compare Raise Call Call Assign Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "_check_fields",
    "source_code": "def _check_fields(self, obj):\n    if obj.fields is None:\n        return []\n    elif not isinstance(obj.fields, (list, tuple)):\n        return must_be('a list or tuple', option='fields', obj=obj, id='admin.E004')\n    elif obj.fieldsets:\n        return [checks.Error(\"Both 'fieldsets' and 'fields' are specified.\", obj=obj.__class__, id='admin.E005')]\n    field_counts = collections.Counter(flatten(obj.fields))\n    if (duplicate_fields := [field for field, count in field_counts.items() if count > 1]):\n        return [checks.Error(\"The value of 'fields' contains duplicate field(s).\", hint='Remove duplicates of %s.' % ', '.join(map(repr, duplicate_fields)), obj=obj.__class__, id='admin.E006')]\n    return list(chain.from_iterable((self._check_field_spec(obj, field_name, 'fields') for field_name in obj.fields)))",
    "docstring": "Check that only refer to existing fields, doesn't contain duplicates. Check if at most one of and is defined.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\checks.py",
    "ast_data": "FunctionDef name:_check_fields arg:self arg:obj arguments arg arg If Compare Return return:no If Call Return return:yes Call If Return return:yes Call Assign Call Call If Call Compare Return return:yes Call Call Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_create_slots",
    "source_code": "def _create_slots(self, table: 'TableConfig', variable_creator: Callable[[Text, init_ops_v2.Initializer], tf_variables.Variable], initializer_wrapper: Optional[Callable[[str, init_ops_v2.Initializer], init_ops_v2.Initializer]]=None) -> Dict[Text, tf_variables.Variable]:\n    names = self._slot_names()\n    initializers = self._slot_initializers()\n    if initializer_wrapper is not None:\n        initializers = [initializer_wrapper(name, initializer) for name, initializer in zip(names, initializers)]\n    if self.slot_variable_creation_fn is not None:\n        return self.slot_variable_creation_fn(table, names, initializers)\n    else:\n        slots = {}\n        for slot, initializer in zip(names, initializers):\n            slots[slot] = variable_creator(slot, initializer)\n        return slots",
    "docstring": "Creates slot variables for table. Args: table: The table variable to create slots for. variable_creator: A function which creates variables. Takes parameters 'name', 'initializer'. initializer_wrapper: A function that wraps the initializer. Returns: A dict of variables, keyed by self._slot_names().",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2_utils.py",
    "ast_data": "FunctionDef name:_create_slots arg:self arg:table arg:variable_creator arg:initializer_wrapper arguments arg arg arg arg Assign Call Assign Call If Compare Assign Call Call If Compare Return return:yes Call Assign For Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "GcovCoverageParser",
    "source_code": "class GcovCoverageParser:\n\n    def __init__(self, llvm_coverage: dict[str, Any]) -> None:\n        self._llvm_coverage = llvm_coverage\n\n    @staticmethod\n    def _skip_coverage(path: str) -> bool:\n        return 'third-party' in path\n\n    def parse(self) -> list[CoverageRecord]:\n        records: list[CoverageRecord] = []\n        for file_info in self._llvm_coverage['files']:\n            filepath = file_info['file']\n            if self._skip_coverage(filepath):\n                continue\n            covered_lines: set[int] = set()\n            uncovered_lines: set[int] = set()\n            for line in file_info['lines']:\n                line_number = line['line_number']\n                count = line['count']\n                if count == 0:\n                    uncovered_lines.update([line_number])\n                else:\n                    covered_lines.update([line_number])\n            records.append(CoverageRecord(filepath, sorted(covered_lines), sorted(uncovered_lines)))\n        return records",
    "docstring": "Accepts a parsed json produced by gcov --json-format -- typically, representing a single C++ test and produces a list of CoverageRecord(s).",
    "type": "class",
    "file_path": "pytorch\\tools\\code_coverage\\package\\tool\\parser\\gcov_coverage_parser.py",
    "ast_data": "ClassDef name:GcovCoverageParser FunctionDef name:__init__ arg:self arg:llvm_coverage arguments arg arg Assign FunctionDef name:_skip_coverage arg:path arguments arg Return return:yes Compare FunctionDef name:parse arg:self arguments arg For Assign If Call Call Call For Assign Assign If Compare Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_copy_over_literal_conv_args",
    "source_code": "def _copy_over_literal_conv_args(original_node: Node, new_node: Node):\n    assert _is_conv_or_conv_transpose_node(original_node)\n    assert _is_conv_or_conv_transpose_node(new_node)\n    new_args = list(new_node.args)\n    if len(new_args) < 3:\n        new_args.append(None)\n    new_node.args = tuple(new_args[:3]) + original_node.args[3:]",
    "docstring": "Copy over literal args in conv, such as stride and padding, from the matched node in the original graph to its replacement in the new graph. This is needed due to the following limitation in the subgraph rewriter when used with dynamo export: literal (non-tensor) args are not supported in the match and replacement patterns. This is because dynamo export automatically inlines these literal args, making them dead placeholder nodes. In the future, we should check if dynamo export can optionally disable this inlining, or if subgraph rewriter can do the copying for us. See Note: Unlike other tensor args like conv weights and biases, literal args are preserved in the original nodes after replacement, so we can access them here.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\qat_utils.py",
    "ast_data": "FunctionDef name:_copy_over_literal_conv_args arg:original_node arg:new_node arguments arg arg Call Call Assign Call If Compare Call Call Assign Call"
  },
  {
    "library": "kornia",
    "name": "CornerGFTT",
    "source_code": "class CornerGFTT(Module):\n\n    def __init__(self, grads_mode: str='sobel') -> None:\n        super().__init__()\n        self.grads_mode: str = grads_mode\n\n    def __repr__(self) -> str:\n        return f'{self.__class__.__name__}(grads_mode={self.grads_mode})'\n\n    def forward(self, input: Tensor, sigmas: Optional[Tensor]=None) -> Tensor:\n        return gftt_response(input, self.grads_mode, sigmas)",
    "docstring": "Module that calculates Shi-Tomasi corners. .. image:: _static/img/gftt_response.png See :func: for details.",
    "type": "class",
    "file_path": "kornia\\kornia\\feature\\responses.py",
    "ast_data": "ClassDef name:CornerGFTT FunctionDef name:__init__ arg:self arg:grads_mode arguments arg arg Call Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:forward arg:self arg:input arg:sigmas arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "lex",
    "source_code": "def lex(s, name=None, trim_whitespace=True, line_offset=0, delimiters=None):\n    if delimiters is None:\n        delimiters = (Template.default_namespace['start_braces'], Template.default_namespace['end_braces'])\n    in_expr = False\n    chunks = []\n    last = 0\n    last_pos = (line_offset + 1, 1)\n    token_re = re.compile('%s|%s' % (re.escape(delimiters[0]), re.escape(delimiters[1])))\n    for match in token_re.finditer(s):\n        expr = match.group(0)\n        pos = find_position(s, match.end(), last, last_pos)\n        if expr == delimiters[0] and in_expr:\n            raise TemplateError('%s inside expression' % delimiters[0], position=pos, name=name)\n        elif expr == delimiters[1] and (not in_expr):\n            raise TemplateError('%s outside expression' % delimiters[1], position=pos, name=name)\n        if expr == delimiters[0]:\n            part = s[last:match.start()]\n            if part:\n                chunks.append(part)\n            in_expr = True\n        else:\n            chunks.append((s[last:match.start()], last_pos))\n            in_expr = False\n        last = match.end()\n        last_pos = pos\n    if in_expr:\n        raise TemplateError('No %s to finish last expression' % delimiters[1], name=name, position=last_pos)\n    part = s[last:]\n    if part:\n        chunks.append(part)\n    if trim_whitespace:\n        chunks = trim_lex(chunks)\n    return chunks",
    "docstring": "Lex a string into chunks: >>> lex('hey') ['hey'] >>> lex('hey {{you}}') ['hey ', ('you', (1, 7))] >>> lex('hey {{') Traceback (most recent call last): ... TemplateError: No }} to finish last expression at line 1 column 7 >>> lex('hey }}') Traceback (most recent call last): ... TemplateError: }} outside expression at line 1 column 7 >>> lex('hey {{ {{') Traceback (most recent call last): ... TemplateError: {{ inside expression at line 1 column 10",
    "type": "function",
    "file_path": "scipy\\scipy\\_build_utils\\tempita\\_tempita.py",
    "ast_data": "FunctionDef name:lex arg:s arg:name arg:trim_whitespace arg:line_offset arg:delimiters arguments arg arg arg arg arg If Compare Assign Assign Assign Assign Assign Assign Call Call Call For Call Assign Call Assign Call Call If BoolOp Compare Raise Call If BoolOp Compare Raise Call If Compare Assign Call If Call Assign Call Call Assign Assign Call Assign If Raise Call Assign If Call If Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "pade",
    "source_code": "def pade(an, m, n=None):\n    an = asarray(an)\n    if n is None:\n        n = len(an) - 1 - m\n        if n < 0:\n            raise ValueError('Order of q <m> must be smaller than len(an)-1.')\n    if n < 0:\n        raise ValueError('Order of p <n> must be greater than 0.')\n    N = m + n\n    if N > len(an) - 1:\n        raise ValueError('Order of q+p <m+n> must be smaller than len(an).')\n    an = an[:N + 1]\n    Akj = eye(N + 1, n + 1, dtype=an.dtype)\n    Bkj = zeros((N + 1, m), dtype=an.dtype)\n    for row in range(1, m + 1):\n        Bkj[row, :row] = -an[:row][::-1]\n    for row in range(m + 1, N + 1):\n        Bkj[row, :] = -an[row - m:row][::-1]\n    C = hstack((Akj, Bkj))\n    pq = linalg.solve(C, an)\n    p = pq[:n + 1]\n    q = r_[1.0, pq[n + 1:]]\n    return (poly1d(p[::-1]), poly1d(q[::-1]))",
    "docstring": "Return Pade approximation to a polynomial as the ratio of two polynomials. Parameters ---------- an : (N,) array_like Taylor series coefficients. m : int The order of the returned approximating polynomial . n : int, optional The order of the returned approximating polynomial . By default, the order is `an` >>> e_poly(1) 2.7166666666666668 >>> p(1)/q(1) 2.7179487179487181",
    "type": "function",
    "file_path": "scipy\\scipy\\interpolate\\_pade.py",
    "ast_data": "FunctionDef name:pade arg:an arg:m arg:n arguments arg arg arg Assign Call If Compare Assign Call If Compare Raise Call If Compare Raise Call Assign If Compare Call Raise Call Assign Assign Call Assign Call For Call Assign For Call Assign Assign Call Assign Call Assign Assign Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_not_a_knot",
    "source_code": "def _not_a_knot(x, k):\n    x = np.asarray(x)\n    if k % 2 == 1:\n        k2 = (k + 1) // 2\n        t = x.copy()\n    else:\n        k2 = k // 2\n        t = (x[1:] + x[:-1]) / 2\n    t = t[k2:-k2]\n    t = np.r_[(x[0],) * (k + 1), t, (x[-1],) * (k + 1)]\n    return t",
    "docstring": "Given data x, construct the knot vector w/ not-a-knot BC. cf de Boor, XIII(12). For even k, it's a bit ad hoc: Greville sites + omit 2nd and 2nd-to-last data points, a la not-a-knot. This seems to match what Dierckx does, too:",
    "type": "function",
    "file_path": "scipy\\scipy\\interpolate\\_bsplines.py",
    "ast_data": "FunctionDef name:_not_a_knot arg:x arg:k arguments arg arg Assign Call If Compare Assign Assign Call Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=False)\ndef fit(self, X, y=None):\n    return self._fit(X)",
    "docstring": "Fit the nearest neighbors estimator from the training dataset. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_samples) if metric='precomputed' Training data. y : Ignored Not used, present for API consistency by convention. Returns ------- self : NearestNeighbors The fitted nearest neighbors estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neighbors\\_unsupervised.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_convert_to_numpy",
    "source_code": "def _convert_to_numpy(array, xp):\n    if _is_xp_namespace(xp, 'torch'):\n        return array.cpu().numpy()\n    elif _is_xp_namespace(xp, 'cupy'):\n        return array.get()\n    elif _is_xp_namespace(xp, 'array_api_strict'):\n        return numpy.asarray(xp.asarray(array, device=xp.Device('CPU_DEVICE')))\n    return numpy.asarray(array)",
    "docstring": "Convert X into a NumPy ndarray on the CPU.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_array_api.py",
    "ast_data": "FunctionDef name:_convert_to_numpy arg:array arg:xp arguments arg arg If Call Return return:yes Call Call If Call Return return:yes Call If Call Return return:yes Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_feature_key_name",
    "source_code": "def get_feature_key_name(self):\n    if self.is_categorical_column_weighted():\n        return self.categorical_column.categorical_column.name\n    return self.categorical_column.name",
    "docstring": "get_feature_key_name.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\feature_column_v2.py",
    "ast_data": "FunctionDef name:get_feature_key_name arg:self arguments arg If Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_optimize_tf_model",
    "source_code": "@convert_phase(Component.PREPARE_TF_MODEL, SubComponent.OPTIMIZE_TF_MODEL)\ndef _optimize_tf_model(self, graph_def, input_tensors, output_tensors, frozen_func):\n    grappler_config = self._grappler_config()\n    if grappler_config.graph_options.rewrite_options.optimizers:\n        graph_def = _run_graph_optimizations(graph_def, input_tensors, output_tensors, config=grappler_config, graph=frozen_func.graph)\n    return graph_def",
    "docstring": "Run a Grappler pass to optimize the TensorFlow graph. Args: graph_def: Frozen GraphDef to be optimized. input_tensors: List of input tensors. output_tensors: List of output tensors. frozen_func: TensorFlow Graph. Returns: The optimized TensorFlow graph.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "FunctionDef name:_optimize_tf_model arg:self arg:graph_def arg:input_tensors arg:output_tensors arg:frozen_func arguments arg arg arg arg arg Assign Call If Assign Call Return return:yes Call"
  },
  {
    "library": "pygame",
    "name": "array",
    "source_code": "def array(sound):\n    return numpy.array(sound, copy=True)",
    "docstring": "pygame.sndarray.array(Sound): return array Copy Sound samples into an array. Creates a new array for the sound data and copies the samples. The array will always be in the format returned from pygame.mixer.get_init().",
    "type": "function",
    "file_path": "pygame\\src_py\\sndarray.py",
    "ast_data": "FunctionDef name:array arg:sound arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_get_fsdp_root_states_with_modules",
    "source_code": "def _get_fsdp_root_states_with_modules(module: nn.Module) -> tuple[list[_FSDPState], list[nn.Module]]:\n    fsdp_root_states: list[_FSDPState] = []\n    fsdp_root_modules: list[nn.Module] = []\n    visited_fsdp_states: set[_FSDPState] = set()\n    for submodule in module.modules():\n        optional_state = _get_module_fsdp_state(submodule)\n        if optional_state is not None and optional_state not in visited_fsdp_states and _is_fsdp_root(optional_state, submodule):\n            visited_fsdp_states.add(optional_state)\n            fsdp_root_states.append(optional_state)\n            fsdp_root_modules.append(submodule)\n    return (fsdp_root_states, fsdp_root_modules)",
    "docstring": "Returns a tuple containing: 1. A list of the root `_get_fsdp_states_with_modules_is_fsdp_root` to force a lazy initialization to determine the FSDP root in case lazy initialization has not yet happened.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_runtime_utils.py",
    "ast_data": "FunctionDef name:_get_fsdp_root_states_with_modules arg:module arguments arg Call For Call Assign Call If BoolOp Compare Compare Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "asgd",
    "source_code": "@_disable_dynamo_if_unsupported(single_tensor_fn=_single_tensor_asgd)\ndef asgd(params: list[Tensor], grads: list[Tensor], axs: list[Tensor], mus: list[Tensor], etas: list[Tensor], state_steps: list[Tensor], foreach: Optional[bool]=None, maximize: bool=False, differentiable: bool=False, capturable: bool=False, has_complex: bool=False, *, lambd: float, lr: float, t0: float, alpha: float, weight_decay: float):\n    if foreach is None:\n        _, foreach = _default_to_fused_or_foreach(params, differentiable, use_fused=False)\n    if foreach and torch.jit.is_scripting():\n        raise RuntimeError('torch.jit.script not supported with foreach optimizers')\n    if foreach and (not torch.jit.is_scripting()):\n        func = _multi_tensor_asgd\n    else:\n        func = _single_tensor_asgd\n    func(params, grads, axs, mus, etas, state_steps, lambd=lambd, lr=lr, t0=t0, alpha=alpha, weight_decay=weight_decay, maximize=maximize, differentiable=differentiable, capturable=capturable, has_complex=has_complex)",
    "docstring": "Functional API that performs asgd algorithm computation. See :class: for details.",
    "type": "function",
    "file_path": "pytorch\\torch\\optim\\asgd.py",
    "ast_data": "FunctionDef name:asgd arg:params arg:grads arg:axs arg:mus arg:etas arg:state_steps arg:foreach arg:maximize arg:differentiable arg:capturable arg:has_complex arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg If Compare Assign Call If BoolOp Call Raise Call If BoolOp Call Assign Assign Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_custom_preamble",
    "source_code": "@classmethod\ndef get_custom_preamble(cls):\n    return mpl.rcParams['text.latex.preamble']",
    "docstring": "Return a string containing user additions to the tex preamble.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\texmanager.py",
    "ast_data": "FunctionDef name:get_custom_preamble arg:cls arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_delegate_property",
    "source_code": "def _delegate_property(keras_tensor_cls, property_name):\n    property_access = property(lambda self: InstanceProperty(property_name)(self))\n    setattr(keras_tensor_cls, property_name, property_access)",
    "docstring": "Register property on a KerasTensor class. Calling this multiple times with the same arguments should be a no-op. This method exposes a property on the KerasTensor class that will use an layer to access the property on the represented intermediate values in the model. Args: keras_tensor_cls: The KerasTensor subclass that should expose the property. property_name: The name of the property to expose and delegate to the represented (Composite)Tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\core.py",
    "ast_data": "FunctionDef name:_delegate_property arg:keras_tensor_cls arg:property_name arguments arg arg Assign Call arguments arg Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "ready_op",
    "source_code": "@property\ndef ready_op(self):\n    return self._ready_op",
    "docstring": "Return the Ready Op used by the supervisor. Returns: An Op or .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py",
    "ast_data": "FunctionDef name:ready_op arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_export_to_saved_model_graph",
    "source_code": "def _export_to_saved_model_graph(self, object_map, tensor_map, options, **kwargs):\n    resource_list = []\n    for v in self._variables + [self._saving_variable]:\n        resource_list.extend(v._export_to_saved_model_graph(object_map, tensor_map, options, **kwargs))\n    object_map[self] = ShardedVariable([object_map[self._saving_variable]], name=self.name)\n    return resource_list",
    "docstring": "For implementing SavedModel export.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\sharded_variable.py",
    "ast_data": "FunctionDef name:_export_to_saved_model_graph arg:self arg:object_map arg:tensor_map arg:options arguments arg arg arg arg arg Assign For Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_gather_from_tensor_or_composite",
    "source_code": "def _gather_from_tensor_or_composite(x, i):\n    if _should_expand_composite(x):\n        spec = x._type_spec\n        gathered_tensors = [_broadcasting_gather(t, i) for t in spec._to_batched_tensor_list(x)]\n        return spec._unbatch()._from_compatible_tensor_list(gathered_tensors)\n    return _broadcasting_gather(x, i)",
    "docstring": "Wrapper for gather that handles CompositeTensors.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\control_flow_ops.py",
    "ast_data": "FunctionDef name:_gather_from_tensor_or_composite arg:x arg:i arguments arg arg If Call Assign Assign Call Call Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_prepare_init",
    "source_code": "def _prepare_init(self, remote_device_str: str) -> bool:\n    assert rpc._is_current_rpc_agent_set(), 'RemoteModule only works in RPC.'\n    remote_device = _remote_device(remote_device_str)\n    self.on = remote_device.worker_name() if remote_device.worker_name() is not None else remote_device.rank()\n    self.device = str(remote_device.device())\n    agent = rpc._get_current_rpc_agent()\n    self.is_device_map_set = bool(agent._get_device_map(agent.get_worker_info(self.on)))\n    enable_moving_cpu_tensors_to_cuda = torch.device(self.device).type == 'cuda'\n    return enable_moving_cpu_tensors_to_cuda",
    "docstring": "Prepare the initialization and returns whether to enable automatically moving CPU tensors to CUDA devices.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\nn\\api\\remote_module.py",
    "ast_data": "FunctionDef name:_prepare_init arg:self arg:remote_device_str arguments arg arg Call Assign Call Assign Compare Call Call Call Assign Call Call Assign Call Assign Call Call Call Assign Compare Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_axes_locator",
    "source_code": "def set_axes_locator(self, locator):\n    self._axes_locator = locator\n    self.stale = True",
    "docstring": "Set the Axes locator. Parameters ---------- locator : Callable[[Axes, Renderer], Bbox]",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:set_axes_locator arg:self arg:locator arguments arg arg Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_load_all",
    "source_code": "def _load_all(self):\n    self._load_nodes()\n    self._load_edges()\n    self._setup_remaining_functions()\n    self._load_checkpoint_save_and_restore_functions()",
    "docstring": "Loads all nodes and functions from the SavedModel and their edges.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\load.py",
    "ast_data": "FunctionDef name:_load_all arg:self arguments arg Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "unify_object",
    "source_code": "def unify_object(u, v, s):\n    if type(u) != type(v):\n        return False\n    if hasattr(u, '__slots__'):\n        return unify([getattr(u, slot) for slot in u.__slots__], [getattr(v, slot) for slot in v.__slots__], s)\n    else:\n        return unify(u.__dict__, v.__dict__, s)",
    "docstring": "Unify two Python objects Unifies their type and `` attributes >>> # xdoctest: +SKIP >>> class Foo(object): ... def __init__(self, a, b): ... self.a = a ... self.b = b ... ... def __str__(self): ... return \"Foo(%s, %s)\" % (str(self.a), str(self.b)) >>> x = var(\"x\") >>> f = Foo(1, x) >>> g = Foo(1, 2) >>> unify_object(f, g, {}) {~x: 2}",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\unification\\more.py",
    "ast_data": "FunctionDef name:unify_object arg:u arg:v arg:s arguments arg arg arg If Compare Call Call Return return:yes If Call Return return:yes Call Call Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "filepath_from_subprocess_output",
    "source_code": "def filepath_from_subprocess_output(output):\n    mylocale = locale.getpreferredencoding(False)\n    if mylocale is None:\n        mylocale = 'ascii'\n    output = output.decode(mylocale, errors='replace')\n    output = output.replace('\\r\\n', '\\n')\n    if output[-1:] == '\\n':\n        output = output[:-1]\n    return output",
    "docstring": "Convert in the encoding used by a subprocess into a filesystem-appropriate . Inherited from , and possibly incorrect.",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\exec_command.py",
    "ast_data": "FunctionDef name:filepath_from_subprocess_output arg:output arguments arg Assign Call If Compare Assign Assign Call Assign Call If Compare Assign Return return:yes"
  },
  {
    "library": "virtualenv",
    "name": "read",
    "source_code": "def read(self):\n    return",
    "docstring": "Nothing to read.",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\app_data\\na.py",
    "ast_data": "FunctionDef name:read arg:self arguments arg Return return:no"
  },
  {
    "library": "numpy",
    "name": "check_compiler_gcc",
    "source_code": "def check_compiler_gcc(cmd):\n    cmd._check_compiler()\n    body = textwrap.dedent('\\n        int\\n        main()\\n        {\\n        #if (! defined __GNUC__)\\n        #error gcc required\\n        #endif\\n            return 0;\\n        }\\n        ')\n    return cmd.try_compile(body, None, None)",
    "docstring": "Check if the compiler is GCC.",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\command\\autodist.py",
    "ast_data": "FunctionDef name:check_compiler_gcc arg:cmd arguments arg Call Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "effective_default",
    "source_code": "def effective_default(self, field):\n    return field.get_db_prep_save(self._effective_default(field), self.connection)",
    "docstring": "Return a field's effective database default value.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\schema.py",
    "ast_data": "FunctionDef name:effective_default arg:self arg:field arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_pull_request",
    "source_code": "def get_pull_request(project, num, auth=False):\n    url = f'https://api.github.com/repos/{project}/pulls/{num}'\n    if auth:\n        header = make_auth_header()\n    else:\n        header = None\n    print('fetching %s' % url, file=sys.stderr)\n    response = requests.get(url, headers=header)\n    response.raise_for_status()\n    return json.loads(response.text, object_hook=Obj)",
    "docstring": "Return the pull request info for a given PR number.",
    "type": "function",
    "file_path": "matplotlib\\tools\\gh_api.py",
    "ast_data": "FunctionDef name:get_pull_request arg:project arg:num arg:auth arguments arg arg arg Assign If Assign Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_maybe_coerce_freq",
    "source_code": "def _maybe_coerce_freq(code) -> str:\n    assert code is not None\n    if isinstance(code, DateOffset):\n        code = PeriodDtype(to_offset(code.name))._freqstr\n    if code in {'h', 'min', 's', 'ms', 'us', 'ns'}:\n        return code\n    else:\n        return code.upper()",
    "docstring": "we might need to coerce a code to a rule_code and uppercase it Parameters ---------- source : str or DateOffset Frequency converting from Returns ------- str",
    "type": "function",
    "file_path": "pandas\\pandas\\tseries\\frequencies.py",
    "ast_data": "FunctionDef name:_maybe_coerce_freq arg:code arguments arg Compare If Call Assign Call Call If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "CrossInTray",
    "source_code": "class CrossInTray(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n        self.global_optimum = [(1.34940668535334, 1.349406608602084), (-1.34940668535334, 1.349406608602084), (1.34940668535334, -1.349406608602084), (-1.34940668535334, -1.349406608602084)]\n        self.fglob = -2.062611870822739\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return -0.0001 * (abs(sin(x[0]) * sin(x[1]) * exp(abs(100 - sqrt(x[0] ** 2 + x[1] ** 2) / pi))) + 1) ** 0.1",
    "docstring": "Cross-in-Tray objective function. This class defines the Cross-in-Tray [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{CrossInTray}}(x) = - 0.0001 \\left(\\left|{e^{\\left|{100 - \\frac{\\sqrt{x_{1}^{2} + x_{2}^{2}}}{\\pi}}\\right|} \\sin\\left(x_{1}\\right) \\sin\\left(x_{2}\\right)}\\right| + 1\\right)^{0.1} with :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_C.py",
    "ast_data": "ClassDef name:CrossInTray FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "do_decode",
    "source_code": "def do_decode(self, value, decode_fn):\n    type_spec_proto = value.type_spec_value\n    return self.type_spec_class._deserialize(decode_fn(type_spec_proto.type_state))",
    "docstring": "Returns the built in encoded by the proto .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\nested_structure_coder.py",
    "ast_data": "FunctionDef name:do_decode arg:self arg:value arg:decode_fn arguments arg arg arg Assign Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "DualLevelContextManager",
    "source_code": "class DualLevelContextManager(ContextWrappingVariable):\n    _guards_singleton = Guard(GlobalStateSource(), GuardBuilder.DUAL_LEVEL)\n\n    @staticmethod\n    def create(tx: 'InstructionTranslator', **kwargs):\n        return DualLevelContextManager(target_values=None, initial_values=None, **kwargs)\n\n    def enter(self, tx):\n        install_guard(self._guards_singleton)\n        self.new_level = torch.autograd.forward_ad.enter_dual_level()\n        self.set_cleanup_hook(tx, lambda: torch.autograd.forward_ad.exit_dual_level(level=self.new_level))\n        self.proxy = tx.output.create_node('call_function', torch._C._enter_dual_level, (), {})\n        return variables.ConstantVariable.create(self.new_level)\n\n    def exit(self, tx: 'InstructionTranslator', *args):\n        self.cleanup()\n        tx.output.create_node('call_function', torch._C._exit_dual_level, (self.new_level,), {})\n        return variables.ConstantVariable.create(None)",
    "docstring": "Represents torch.autograd.forward_ad.dual_level ctx manager",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\ctx_manager.py",
    "ast_data": "ClassDef name:DualLevelContextManager Assign Call Call FunctionDef name:create arg:tx arguments arg arg Return return:yes Call FunctionDef name:enter arg:self arg:tx arguments arg arg Call Assign Call Call arguments Call Assign Call Return return:yes Call FunctionDef name:exit arg:self arg:tx arguments arg arg arg Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "value",
    "source_code": "def value(self):\n    return self.used_parameters.get(self.parameter_name)",
    "docstring": "Return the value (in string format) provided in the request's query string for this filter, if any, or None if the value wasn't provided.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\filters.py",
    "ast_data": "FunctionDef name:value arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_default_redirect_url",
    "source_code": "def get_default_redirect_url(self):\n    if self.next_page:\n        return resolve_url(self.next_page)\n    raise ImproperlyConfigured('No URL to redirect to. Provide a next_page.')",
    "docstring": "Return the default redirect URL.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\views.py",
    "ast_data": "FunctionDef name:get_default_redirect_url arg:self arguments arg If Return return:yes Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_ConverterData",
    "source_code": "class _ConverterData(object):\n\n    def __init__(self, graph_def, variable_names_allowlist=None, variable_names_denylist=None):\n        self._graph_def = graph_def\n        self._tensor_data = {}\n        self._build_node_defs_list()\n        self._variable_names_allowlist = variable_names_allowlist\n        self._variable_names_denylist = variable_names_denylist\n\n    @property\n    def graph_def(self):\n        return self._graph_def\n\n    @property\n    def node_defs(self):\n        return self._node_defs\n\n    @property\n    def tensor_data(self):\n        return self._tensor_data\n\n    def _should_convert(self, name):\n        return (self._variable_names_allowlist is None or name in self._variable_names_allowlist) and (self._variable_names_denylist is None or name not in self._variable_names_denylist)\n\n    def _build_node_defs_list(self):\n        self._node_defs = {node.name: node for node in self._graph_def.node}\n        if self._graph_def.library:\n            for func in self._graph_def.library.function:\n                self._node_defs.update({node.name: node for node in func.node_def if node.op in _CONTROL_FLOW_OPS})",
    "docstring": "Container for constant conversion supporting data. The data includes the graph being converted, and the pre-converted tensors. This class will be specialized for ConcreteFunction and Session-based conversions, as the means to obtain that data is different for each case.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "ClassDef name:_ConverterData FunctionDef name:__init__ arg:self arg:graph_def arg:variable_names_allowlist arg:variable_names_denylist arguments arg arg arg arg Assign Assign Call Assign Assign FunctionDef name:graph_def arg:self arguments arg Return return:yes FunctionDef name:node_defs arg:self arguments arg Return return:yes FunctionDef name:tensor_data arg:self arguments arg Return return:yes FunctionDef name:_should_convert arg:self arg:name arguments arg arg Return return:yes BoolOp BoolOp Compare Compare BoolOp Compare Compare FunctionDef name:_build_node_defs_list arg:self arguments arg Assign If For Call Compare"
  },
  {
    "library": "pytorch",
    "name": "load_state_dict",
    "source_code": "@override\ndef load_state_dict(self, state_dict: dict[str, Any]) -> None:\n    fn = state_dict.pop('_scale_fn_custom')\n    super().load_state_dict(state_dict)\n    if fn is not None:\n        self._scale_fn_custom.__dict__.update(fn)\n    self._init_scale_fn()",
    "docstring": "Load the scheduler's state.",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\lr_scheduler.py",
    "ast_data": "FunctionDef name:load_state_dict arg:self arg:state_dict arguments arg arg Assign Call Call Call If Compare Call Call"
  },
  {
    "library": "tensorflow",
    "name": "group_by_reducer",
    "source_code": "@tf_export('data.experimental.group_by_reducer')\ndef group_by_reducer(key_func, reducer):\n\n    def _apply_fn(dataset):\n        return _GroupByReducerDataset(dataset, key_func, reducer)\n    return _apply_fn",
    "docstring": "A transformation that groups elements and performs a reduction. This transformation maps element of a dataset to a key using and groups the elements by key. The is used to process each group; its is used to initialize state for each group when it is created, the is used to update the state every time an element is mapped to the matching group, and the is used to map the final state to an output value. Args: key_func: A function mapping a nested structure of tensors (having shapes and types defined by and ) to a scalar tensor. reducer: An instance of , which captures the reduction logic using the , , and functions. Returns: A transformation function, which can be passed to .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\grouping.py",
    "ast_data": "FunctionDef name:group_by_reducer arg:key_func arg:reducer arguments arg arg FunctionDef name:_apply_fn arg:dataset arguments arg Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "WeightedQuantizedModule",
    "source_code": "class WeightedQuantizedModule(torch.nn.Module, metaclass=abc.ABCMeta):\n\n    @classmethod\n    @abc.abstractmethod\n    def from_reference(cls, ref_module, output_scale, output_zero_point):\n        raise NotImplementedError",
    "docstring": "Wrapper for quantized modules than can be lowered from reference modules.",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\modules\\utils.py",
    "ast_data": "ClassDef name:WeightedQuantizedModule FunctionDef name:from_reference arg:cls arg:ref_module arg:output_scale arg:output_zero_point arguments arg arg arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "_infer_num_gpus_per_worker",
    "source_code": "def _infer_num_gpus_per_worker(devices):\n    if _is_device_list_single_worker(devices):\n        return sum((1 for d in devices if _is_gpu_device(d)))\n    else:\n        device_dict = _group_device_list(devices)\n        num_gpus = None\n        for _, devices_in_task in device_dict.items():\n            for device_in_task in devices_in_task:\n                if num_gpus is None:\n                    num_gpus = sum((1 for d in device_in_task if _is_gpu_device(d)))\n                elif num_gpus != sum((1 for d in device_in_task if _is_gpu_device(d))):\n                    raise ValueError('All workers should have the same number of GPUs.')\n                for d in device_in_task:\n                    d_spec = tf_device.DeviceSpec.from_string(d)\n                    if d_spec.device_type == 'GPU' and d_spec.device_index >= num_gpus:\n                        raise ValueError('GPU `device_index` on a worker should be consecutive and start from 0.')\n        return num_gpus",
    "docstring": "Infers the number of GPUs on each worker. Currently to make multi-worker cross device ops work, we need all workers to have the same number of GPUs. Args: devices: a list of device strings, can be either local devices or remote devices. Returns: number of GPUs per worker. Raises: ValueError if workers have different number of GPUs or GPU indices are not consecutive and starting from 0.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\mirrored_strategy.py",
    "ast_data": "FunctionDef name:_infer_num_gpus_per_worker arg:devices arguments arg If Call Return return:yes Call Call Assign Call Assign For Call For If Compare Assign Call Call If Compare Call Call Raise Call For Assign Call If BoolOp Compare Compare Raise Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "func",
    "source_code": "def func(*args, **kw):\n    if not _routing_enabled():\n        raise RuntimeError('This method is only available when metadata routing is enabled. You can enable it using sklearn.set_config(enable_metadata_routing=True).')\n    if self.validate_keys and set(kw) - set(self.keys):\n        raise TypeError(f'Unexpected args: {set(kw) - set(self.keys)} in {self.name}. Accepted arguments are: {set(self.keys)}')\n    if instance is None:\n        _instance = args[0]\n        args = args[1:]\n    else:\n        _instance = instance\n    if args:\n        raise TypeError(f'set_{self.name}_request() takes 0 positional argument but {len(args)} were given')\n    requests = _instance._get_metadata_request()\n    method_metadata_request = getattr(requests, self.name)\n    for prop, alias in kw.items():\n        if alias is not UNCHANGED:\n            method_metadata_request.add_request(param=prop, alias=alias)\n    _instance._metadata_request = requests\n    return _instance",
    "docstring": "Updates the request for provided parameters This docstring is overwritten below. See REQUESTER_DOC for expected functionality",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py",
    "ast_data": "FunctionDef name:func arguments arg arg If Call Raise Call If BoolOp Call Call Raise Call Call Call Call If Compare Assign Assign Assign If Raise Call Call Assign Call Assign Call For Call If Compare Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "sort",
    "source_code": "def sort(self, dtypes: tuple[torch.dtype, ...], values: tuple[T, ...], stable: bool, descending: bool) -> tuple[T, ...]:\n    raise NotImplementedError",
    "docstring": "Sort values along the reduction dimension.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ops_handler.py",
    "ast_data": "FunctionDef name:sort arg:self arg:dtypes arg:values arg:stable arg:descending arguments arg arg arg arg arg Raise"
  },
  {
    "library": "django",
    "name": "num_geom",
    "source_code": "@property\ndef num_geom(self):\n    return capi.get_num_geoms(self.ptr)",
    "docstring": "Return the number of geometries in the Geometry.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:num_geom arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "is_initialized",
    "source_code": "def is_initialized() -> bool:\n    return bool(_INITIALIZED_ACCELERATOR_SYSTEM_TYPE)",
    "docstring": "Returns whether accelerator system has been initialized.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\accelerator_util.py",
    "ast_data": "FunctionDef name:is_initialized arguments Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_ShuffleDataset",
    "source_code": "class _ShuffleDataset(dataset_ops.UnaryUnchangedStructureDataset):\n\n    def __init__(self, input_dataset, buffer_size, seed=None, reshuffle_each_iteration=True, name=None):\n        self._input_dataset = input_dataset\n        self._buffer_size = ops.convert_to_tensor(buffer_size, dtype=dtypes.int64, name='buffer_size')\n        self._seed, self._seed2 = random_seed.get_seed(seed)\n        self._reshuffle_each_iteration = reshuffle_each_iteration\n        self._name = name\n        if tf2.enabled() and (context.executing_eagerly() or ops.inside_function()):\n            variant_tensor = gen_dataset_ops.shuffle_dataset_v3(input_dataset._variant_tensor, buffer_size=self._buffer_size, seed=self._seed, seed2=self._seed2, seed_generator=gen_dataset_ops.dummy_seed_generator(), reshuffle_each_iteration=self._reshuffle_each_iteration, **self._common_args)\n        else:\n            variant_tensor = gen_dataset_ops.shuffle_dataset(input_dataset._variant_tensor, buffer_size=self._buffer_size, seed=self._seed, seed2=self._seed2, reshuffle_each_iteration=self._reshuffle_each_iteration, **self._common_args)\n        super().__init__(input_dataset, variant_tensor)",
    "docstring": "A that randomly shuffles the elements of its input.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\shuffle_op.py",
    "ast_data": "ClassDef name:_ShuffleDataset FunctionDef name:__init__ arg:self arg:input_dataset arg:buffer_size arg:seed arg:reshuffle_each_iteration arg:name arguments arg arg arg arg arg arg Assign Assign Call Assign Call Assign Assign If BoolOp Call BoolOp Call Call Assign Call Call Assign Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_render_latex",
    "source_code": "def _render_latex(self, sparse_index: bool, sparse_columns: bool, clines: str | None, **kwargs) -> str:\n    d = self._render(sparse_index, sparse_columns, None, None)\n    self._translate_latex(d, clines=clines)\n    self.template_latex.globals['parse_wrap'] = _parse_latex_table_wrapping\n    self.template_latex.globals['parse_table'] = _parse_latex_table_styles\n    self.template_latex.globals['parse_cell'] = _parse_latex_cell_styles\n    self.template_latex.globals['parse_header'] = _parse_latex_header_span\n    d.update(kwargs)\n    return self.template_latex.render(**d)",
    "docstring": "Render a Styler in latex format",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\style_render.py",
    "ast_data": "FunctionDef name:_render_latex arg:self arg:sparse_index arg:sparse_columns arg:clines arguments arg arg arg arg arg Assign Call Call Assign Assign Assign Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "deriv",
    "source_code": "def deriv(self, m=1):\n    return poly1d(polyder(self.coeffs, m=m))",
    "docstring": "Return a derivative of this polynomial. Refer to for full documentation. See Also -------- polyder : equivalent function",
    "type": "method",
    "file_path": "numpy\\numpy\\lib\\_polynomial_impl.py",
    "ast_data": "FunctionDef name:deriv arg:self arg:m arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_animated",
    "source_code": "def set_animated(self, b):\n    if self._animated != b:\n        self._animated = b\n        self.pchanged()",
    "docstring": "Set whether the artist is intended to be used in an animation. If True, the artist is excluded from regular drawing of the figure. You have to call / explicitly on the artist. This approach is used to speed up animations using blitting. See also and :ref:. Parameters ---------- b : bool",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:set_animated arg:self arg:b arguments arg arg If Compare Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "extended",
    "source_code": "@property\ndef extended(self):\n    return self._extended",
    "docstring": "with additional methods.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:extended arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "pack_sequence",
    "source_code": "def pack_sequence(sequences: list[Tensor], enforce_sorted: bool=True) -> PackedSequence:\n    lengths = torch.as_tensor([v.size(0) for v in sequences])\n    return pack_padded_sequence(pad_sequence(sequences), lengths, enforce_sorted=enforce_sorted)",
    "docstring": "Packs a list of variable length Tensors. Consecutive call of the next functions: `L*enforce_sorted = FalsePackedSequence` object",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\utils\\rnn.py",
    "ast_data": "FunctionDef name:pack_sequence arg:sequences arg:enforce_sorted arguments arg arg Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "in_test_phase",
    "source_code": "@doc_controls.do_not_generate_docs\ndef in_test_phase(x, alt, training=None):\n    return in_train_phase(alt, x, training=training)",
    "docstring": "Selects in test phase, and otherwise. Note that should have the *same shape* as . Args: x: What to return in test phase (tensor or callable that returns a tensor). alt: What to return otherwise (tensor or callable that returns a tensor). training: Optional scalar tensor (or Python boolean, or Python integer) specifying the learning phase. Returns: Either or based on .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:in_test_phase arg:x arg:alt arg:training arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "register_option",
    "source_code": "def register_option(key: str, defval: object, doc: str='', validator: Callable[[object], Any] | None=None, cb: Callable[[str], Any] | None=None) -> None:\n    import keyword\n    import tokenize\n    key = key.lower()\n    if key in _registered_options:\n        raise OptionError(f\"Option '{key}' has already been registered\")\n    if key in _reserved_keys:\n        raise OptionError(f\"Option '{key}' is a reserved key\")\n    if validator:\n        validator(defval)\n    path = key.split('.')\n    for k in path:\n        if not re.match('^' + tokenize.Name + '$', k):\n            raise ValueError(f'{k} is not a valid identifier')\n        if keyword.iskeyword(k):\n            raise ValueError(f'{k} is a python keyword')\n    cursor = _global_config\n    msg = \"Path prefix to option '{option}' is already an option\"\n    for i, p in enumerate(path[:-1]):\n        if not isinstance(cursor, dict):\n            raise OptionError(msg.format(option='.'.join(path[:i])))\n        if p not in cursor:\n            cursor[p] = {}\n        cursor = cursor[p]\n    if not isinstance(cursor, dict):\n        raise OptionError(msg.format(option='.'.join(path[:-1])))\n    cursor[path[-1]] = defval\n    _registered_options[key] = RegisteredOption(key=key, defval=defval, doc=doc, validator=validator, cb=cb)",
    "docstring": "Register an option in the package-wide pandas config object Parameters ---------- key : str Fully-qualified key, e.g. \"x.y.option - z\". defval : object Default value of the option. doc : str Description of the option. validator : Callable, optional Function of a single argument, should raise if called with a value which is not a legal value for the option. cb a function of a single argument \"key\", which is called immediately after an option value is set/reset. key is the full name of the option. Raises ------ ValueError if is specified and is not a valid value.",
    "type": "function",
    "file_path": "pandas\\pandas\\_config\\config.py",
    "ast_data": "FunctionDef name:register_option arg:key arg:defval arg:doc arg:validator arg:cb arguments arg arg arg arg arg Assign Call If Compare Raise Call If Compare Raise Call If Call Assign Call For If Call Raise Call If Call Raise Call Assign Assign For Call If Call Raise Call Call Call If Compare Assign Assign If Call Raise Call Call Call Assign Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "encode_structure",
    "source_code": "@tf_export('__internal__.saved_model.encode_structure', v1=[])\ndef encode_structure(nested_structure):\n    return _map_structure(nested_structure, _get_encoders())",
    "docstring": "Encodes nested structures composed of encodable types into a proto. Args: nested_structure: Structure to encode. Returns: Encoded proto. Raises: NotEncodableError: For values for which there are no encoders.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\nested_structure_coder.py",
    "ast_data": "FunctionDef name:encode_structure arg:nested_structure arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_fetch_all_embeddings",
    "source_code": "def _fetch_all_embeddings(model):\n    embedding_modules = []\n    stack = [model]\n    while stack:\n        module = stack.pop()\n        for _, child in module.named_children():\n            fqn_name = module_to_fqn(model, child)\n            if type(child) in SUPPORTED_MODULES:\n                embedding_modules.append((fqn_name, child))\n            else:\n                stack.append(child)\n    return embedding_modules",
    "docstring": "Fetches Embedding and EmbeddingBag modules from the model",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\data_sparsifier\\quantization_utils.py",
    "ast_data": "FunctionDef name:_fetch_all_embeddings arg:model arguments arg Assign Assign While Assign Call For Call Assign Call If Compare Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "step",
    "source_code": "def step(self, metrics: SupportsFloat, epoch=None) -> None:\n    current = float(metrics)\n    if epoch is None:\n        epoch = self.last_epoch + 1\n    else:\n        warnings.warn(EPOCH_DEPRECATION_WARNING, UserWarning)\n    self.last_epoch = epoch\n    if self.is_better(current, self.best):\n        self.best = current\n        self.num_bad_epochs = 0\n    else:\n        self.num_bad_epochs += 1\n    if self.in_cooldown:\n        self.cooldown_counter -= 1\n        self.num_bad_epochs = 0\n    if self.num_bad_epochs > self.patience:\n        self._reduce_lr(epoch)\n        self.cooldown_counter = self.cooldown\n        self.num_bad_epochs = 0\n    self._last_lr = [group['lr'] for group in self.optimizer.param_groups]",
    "docstring": "Perform a step.",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\lr_scheduler.py",
    "ast_data": "FunctionDef name:step arg:self arg:metrics arg:epoch arguments arg arg arg Assign Call If Compare Assign Call Assign If Call Assign Assign If Assign If Compare Call Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "less",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef less(x, y):\n    return math_ops.less(x, y)",
    "docstring": "Element-wise truth value of (x < y). Args: x: Tensor or variable. y: Tensor or variable. Returns: A bool tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:less arg:x arg:y arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "eval",
    "source_code": "def eval(self):\n    if not self._auto_gc_enabled:\n        raise TypeError('Persistent tensor %s may have already been deleted.' % self.handle)\n    holder, reader = _get_handle_reader(self._session.graph, self._handle, self._dtype)\n    return self._session.run(reader, feed_dict={holder: self._handle})",
    "docstring": "Return the value of the tensor represented by this handle.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\session_ops.py",
    "ast_data": "FunctionDef name:eval arg:self arguments arg If Raise Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "gen_from_yaml",
    "source_code": "@staticmethod\ndef gen_from_yaml(args: dict[str, tuple[str, str]], type_alias_map: dict[str, list[str]], dim_order_alias_map: dict[str, list[int]]) -> list[ETKernelKey]:\n    dim_order_alias_map = {k: [int(alias) for alias in v] for k, v in dim_order_alias_map.items()}\n    kernel_keys = []\n    dtype_alias_used = set()\n    for type_alias, dim_order in args.values():\n        assert type_alias in type_alias_map, 'Undefined type alias: ' + str(type_alias)\n        assert dim_order in dim_order_alias_map, f'Undefined dim_order alias: {dim_order}'\n        dtype_alias_used.add(type_alias)\n    alias_dtypes = [[(alias, dtype) for dtype in type_alias_map[alias]] for alias in dtype_alias_used]\n    alias_permutations = [dict(permutation) for permutation in list(itertools.product(*alias_dtypes))]\n    op_arg_cache = {}\n    for permutation in alias_permutations:\n        arg_list = []\n        for arg_name, arg_spec in args.items():\n            dtype = permutation[arg_spec[0]]\n            dim_order = dim_order_alias_map[arg_spec[1]]\n            if (cache_key := (arg_name, dtype, tuple(dim_order))) not in op_arg_cache:\n                op_arg_cache[cache_key] = ETKernelKeyOpArgMeta(*cache_key)\n            arg_list.append(op_arg_cache[cache_key])\n        kernel_keys.append(ETKernelKey(tuple(arg_list)))\n    return kernel_keys",
    "docstring": "Generate ETKernelKeys from arg kernel specs Multiple ETKernelKeys are returned due to dtype permutations from utilizing type_alias_map (actualizing each potential type permutation as a KernelKey) Args: args: Mapping from argument name to kernel specs Kernel specs are a tuple of (dtype, dim_order). Currently tuple entries must be aliased via the alias map arguments type_alias_map: Mapping from type alias to potential type enums i.e { T0 : [Double, Int] } means T0 can be either Double or Int Used for lookup by args dim_order_alias_map: Mapping from alias to a list of dimension orders Used for lookup by args",
    "type": "method",
    "file_path": "pytorch\\torchgen\\executorch\\model.py",
    "ast_data": "FunctionDef name:gen_from_yaml arg:args arg:type_alias_map arg:dim_order_alias_map arguments arg arg arg Assign Call Call Assign Assign Call For Call Compare Call Compare Call Assign Assign Call Call Call Assign For Assign For Call Assign Assign If Compare Call Assign Call Call Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "getfullargspec_no_self",
    "source_code": "def getfullargspec_no_self(func):\n    sig = inspect.signature(func)\n    args = [p.name for p in sig.parameters.values() if p.kind in [inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.POSITIONAL_ONLY]]\n    varargs = [p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.VAR_POSITIONAL]\n    varargs = varargs[0] if varargs else None\n    varkw = [p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.VAR_KEYWORD]\n    varkw = varkw[0] if varkw else None\n    defaults = tuple((p.default for p in sig.parameters.values() if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD and p.default is not p.empty)) or None\n    kwonlyargs = [p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.KEYWORD_ONLY]\n    kwdefaults = {p.name: p.default for p in sig.parameters.values() if p.kind == inspect.Parameter.KEYWORD_ONLY and p.default is not p.empty}\n    annotations = {p.name: p.annotation for p in sig.parameters.values() if p.annotation is not p.empty}\n    return FullArgSpec(args, varargs, varkw, defaults, kwonlyargs, kwdefaults or None, annotations)",
    "docstring": "inspect.getfullargspec replacement using inspect.signature. If func is a bound method, do not list the 'self' parameter. Parameters ---------- func : callable A callable to inspect Returns ------- fullargspec : FullArgSpec(args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations) NOTE: if the first argument of is self, it is *not*, I repeat *not*, included in fullargspec.args. This is done for consistency between inspect.getargspec() under Python 2.x, and inspect.signature() under Python 3.x.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_util.py",
    "ast_data": "FunctionDef name:getfullargspec_no_self arg:func arguments arg Assign Call Assign Call Compare Assign Call Compare Assign Assign Call Compare Assign Assign BoolOp Call Call BoolOp Compare Compare Assign Call Compare Assign Call BoolOp Compare Compare Assign Call Compare Return return:yes Call BoolOp"
  },
  {
    "library": "django",
    "name": "setup_proxy",
    "source_code": "def setup_proxy(self, target):\n    self.pk = target._meta.pk\n    self.proxy_for_model = target\n    self.db_table = target._meta.db_table",
    "docstring": "Do the internal setup so that the current model is a proxy for \"target\".",
    "type": "method",
    "file_path": "django\\django\\db\\models\\options.py",
    "ast_data": "FunctionDef name:setup_proxy arg:self arg:target arguments arg arg Assign Assign Assign"
  },
  {
    "library": "scikit-learn",
    "name": "nunique",
    "source_code": "def nunique(x: Array, /, *, xp: ModuleType | None=None) -> Array:\n    if xp is None:\n        xp = array_namespace(x)\n    if is_jax_array(x):\n        _, counts = xp.unique_counts(x, size=_compat.size(x))\n        return xp.astype(counts, xp.bool).sum()\n    _, counts = xp.unique_counts(x)\n    n = _compat.size(counts)\n    if n is None:\n        return xp.astype(counts, xp.bool).sum()\n    return xp.asarray(n, device=_compat.device(x))",
    "docstring": "Count the number of unique elements in an array. Compatible with JAX and Dask, whose laziness would be otherwise problematic. Parameters ---------- x : Array Input array. xp : array_namespace, optional The standard-compatible namespace for . Default: infer. Returns ------- array: 0-dimensional integer array The number of unique elements in . It can be lazy.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_extra\\_lib\\_funcs.py",
    "ast_data": "FunctionDef name:nunique arguments arg arg If Compare Assign Call If Call Assign Call Call Return return:yes Call Call Assign Call Assign Call If Compare Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "cherrypy",
    "name": "args",
    "source_code": "@args.setter\ndef args(self, args):\n    cherrypy.serving.request.args = args\n    return cherrypy.serving.request.args",
    "docstring": "Set the request arguments in order.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpdispatch.py",
    "ast_data": "FunctionDef name:args arg:self arg:args arguments arg arg Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_transform",
    "source_code": "def get_transform(self):\n    return self._transform",
    "docstring": "Return the associated with this scale.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\scale.py",
    "ast_data": "FunctionDef name:get_transform arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "offset_tuple_to_global_index",
    "source_code": "def offset_tuple_to_global_index(self, offset_tuple):\n    index = 0\n    for i, o in enumerate(offset_tuple):\n        m = 1\n        for x in range(i + 1, self.rank):\n            m = m * self.num_shards(x)\n        index = index + m * o\n    return index",
    "docstring": "Mapping from offset to index in global tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\layout.py",
    "ast_data": "FunctionDef name:offset_tuple_to_global_index arg:self arg:offset_tuple arguments arg arg Assign For Call Assign For Call Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "static_input",
    "source_code": "def static_input(x: torch.Tensor) -> torch.Tensor:\n    return torch.empty_strided(x.size(), x.stride(), dtype=x.dtype, device=x.device)",
    "docstring": "Copy and input while preserving strides",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\compile_fx.py",
    "ast_data": "FunctionDef name:static_input arg:x arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, layout, inputs, constant_args=(), has_bias=True) -> None:\n    self.has_bias = has_bias\n    self.idx_for_inplace_sum = 6\n    super().__init__(layout, inputs, constant_args, None, op_overload=torch.ops.onednn.qlinear_pointwise.binary_tensor, cpp_kernel_name='aoti_torch_cpu__qlinear_pointwise_binary_tensor')",
    "docstring": "if bias is not None - inputs = [x, w, x_scale, x_zp, weight_scale, weight_zp, x2, bias] - const_args is: [o_scale, o_zp, fp32_output, binary_attr, aplha, unary_attr, unary_scalars, unary_algorithm] else - inputs = [x, w, x_scale, x_zp, weight_scale, weight_zp, x2] - const_args is: [bias, o_scale, o_zp, fp32_output, binary_attr, aplha, unary_attr, unary_scalars, unary_algorithm]",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\mkldnn_ir.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:layout arg:inputs arg:constant_args arg:has_bias arguments arg arg arg arg arg Assign Assign Call Call"
  },
  {
    "library": "scipy",
    "name": "MatlabObject",
    "source_code": "class MatlabObject(np.ndarray):\n\n    def __new__(cls, input_array, classname=None):\n        obj = np.asarray(input_array).view(cls)\n        obj.classname = classname\n        return obj\n\n    def __array_finalize__(self, obj):\n        self.classname = getattr(obj, 'classname', None)",
    "docstring": "Subclass of ndarray to signal this is a matlab object. This is a simple subclass of :class: meant to be used by :func: and should not be instantiated directly.",
    "type": "class",
    "file_path": "scipy\\scipy\\io\\matlab\\_mio5_params.py",
    "ast_data": "ClassDef name:MatlabObject FunctionDef name:__new__ arg:cls arg:input_array arg:classname arguments arg arg arg Assign Call Call Assign Return return:yes FunctionDef name:__array_finalize__ arg:self arg:obj arguments arg arg Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "static_uniform_row_length",
    "source_code": "@property\ndef static_uniform_row_length(self):\n    if self._uniform_row_length is not None:\n        return tensor_util.constant_value(self._uniform_row_length)\n    return None",
    "docstring": "The number of values in each row of this partition, if statically known. Returns: The number of values in each row of this partition as an (if statically known); or (otherwise).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py",
    "ast_data": "FunctionDef name:static_uniform_row_length arg:self arguments arg If Compare Return return:yes Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "_check_stop",
    "source_code": "def _check_stop(self):\n    return self._should_stop",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\monitored_session.py",
    "ast_data": "FunctionDef name:_check_stop arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "write_string",
    "source_code": "def write_string(self, name: str, data: str) -> None:\n    assert isinstance(data, str), f'Expected string but got {type(data)}'\n    data_bytes = data.encode()\n    self.write_bytes(name, data_bytes)",
    "docstring": "Write a string object to the archive. name: The destination file inside the archive. data: The string object to write.",
    "type": "method",
    "file_path": "pytorch\\torch\\export\\pt2_archive\\_package.py",
    "ast_data": "FunctionDef name:write_string arg:self arg:name arg:data arguments arg arg arg Call Call Assign Call Call"
  },
  {
    "library": "cryptography",
    "name": "update",
    "source_code": "@abc.abstractmethod\ndef update(self, data: Buffer) -> bytes:\n    pass",
    "docstring": "Processes the provided bytes through the cipher and returns the results as bytes.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\ciphers\\base.py",
    "ast_data": "FunctionDef name:update arg:self arg:data arguments arg arg"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, initial_learning_rate, decay_steps, initial_variance=1.0, variance_decay=0.55, num_periods=0.5, alpha=0.0, beta=0.001, name=None):\n    super(NoisyLinearCosineDecay, self).__init__()\n    self.initial_learning_rate = initial_learning_rate\n    self.decay_steps = decay_steps\n    self.initial_variance = initial_variance\n    self.variance_decay = variance_decay\n    self.num_periods = num_periods\n    self.alpha = alpha\n    self.beta = beta\n    self.name = name",
    "docstring": "Applies noisy linear cosine decay to the learning rate. Args: initial_learning_rate: A scalar or Tensor or a Python number. The initial learning rate. decay_steps: A scalar or or a Python number. Number of steps to decay over. initial_variance: initial variance for the noise. See computation above. variance_decay: decay for the noise's variance. See computation above. num_periods: Number of periods in the cosine part of the decay. See computation above. alpha: See computation above. beta: See computation above. name: String. Optional name of the operation. Defaults to 'NoisyLinearCosineDecay'.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\learning_rate_schedule.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:initial_learning_rate arg:decay_steps arg:initial_variance arg:variance_decay arg:num_periods arg:alpha arg:beta arg:name arguments arg arg arg arg arg arg arg arg arg Call Call Assign Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "TimerClass",
    "source_code": "class TimerClass(Protocol):\n\n    def __init__(self, stmt: str, setup: str, timer: Callable[[], float], globals: dict[str, Any], **kwargs: Any) -> None:\n        ...\n\n    def timeit(self, number: int) -> float:\n        ...",
    "docstring": "This is the portion of the API used by benchmark utils.",
    "type": "class",
    "file_path": "pytorch\\torch\\utils\\benchmark\\utils\\_stubs.py",
    "ast_data": "ClassDef name:TimerClass FunctionDef name:__init__ arg:self arg:stmt arg:setup arg:timer arg:globals arguments arg arg arg arg arg arg FunctionDef name:timeit arg:self arg:number arguments arg arg"
  },
  {
    "library": "django",
    "name": "validate_consistency",
    "source_code": "def validate_consistency(self):\n    [n.raise_error() for n in self.node_map.values() if isinstance(n, DummyNode)]",
    "docstring": "Ensure there are no dummy nodes remaining in the graph.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\graph.py",
    "ast_data": "FunctionDef name:validate_consistency arg:self arguments arg Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "select_model_mode_for_export",
    "source_code": "@deprecated('Please set training mode before exporting the model', category=None)\n@contextlib.contextmanager\ndef select_model_mode_for_export(model, mode: _C_onnx.TrainingMode):\n    if not isinstance(mode, _C_onnx.TrainingMode):\n        raise TypeError(f\"'mode' should be a torch.onnx.TrainingMode enum, but got '{type(mode)}'.\")\n    originally_training: bool = False\n    if hasattr(model, 'training'):\n        originally_training = model.training\n        if mode == _C_onnx.TrainingMode.TRAINING or (mode == _C_onnx.TrainingMode.PRESERVE and originally_training):\n            GLOBALS.export_training = True\n            if GLOBALS.export_onnx_opset_version < 12:\n                warnings.warn(f'You are exporting the model in training mode with onnx opset version {GLOBALS.export_onnx_opset_version}. Opset versions lower than opset 12 will not be able to export nodes such as Dropout and BatchNorm correctly.')\n        else:\n            GLOBALS.export_training = False\n        GLOBALS.training_mode = mode\n        if mode == _C_onnx.TrainingMode.TRAINING:\n            model.train(True)\n        elif mode == _C_onnx.TrainingMode.EVAL:\n            model.train(False)\n    try:\n        yield\n    finally:\n        if hasattr(model, 'training') and (not mode == _C_onnx.TrainingMode.PRESERVE):\n            model.train(originally_training)",
    "docstring": "A context manager to temporarily set the training mode of `exportexport`.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\utils.py",
    "ast_data": "FunctionDef name:select_model_mode_for_export arg:model arg:mode arguments arg arg If Call Raise Call Call If Call Assign If BoolOp Compare BoolOp Compare Assign If Compare Call Assign Assign If Compare Call If Compare Call Try If BoolOp Call Compare Call Call"
  },
  {
    "library": "pandas",
    "name": "check_keys_split",
    "source_code": "@final\ndef check_keys_split(self, decoded: dict) -> None:\n    bad_keys = set(decoded.keys()).difference(set(self._split_keys))\n    if bad_keys:\n        bad_keys_joined = ', '.join(bad_keys)\n        raise ValueError(f'JSON data had unexpected key(s): {bad_keys_joined}')",
    "docstring": "Checks that dict has only the appropriate keys for orient='split'.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\json\\_json.py",
    "ast_data": "FunctionDef name:check_keys_split arg:self arg:decoded arguments arg arg Assign Call Call Call Call If Assign Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "see",
    "source_code": "def see(self, *key):\n    if key in self.seen:\n        raise RuntimeError('duplicate key: ' + str(key))\n    self.seen.add(key)",
    "docstring": "Observe a key and raise an error if it is seen multiple times.",
    "type": "method",
    "file_path": "pytorch\\torch\\autograd\\profiler.py",
    "ast_data": "FunctionDef name:see arg:self arguments arg arg If Compare Raise Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_preserve_dia_indices_dtype",
    "source_code": "def _preserve_dia_indices_dtype(sparse_container, original_container_format, requested_sparse_format):\n    if original_container_format == 'dia_array' and requested_sparse_format in ('csr', 'coo'):\n        if requested_sparse_format == 'csr':\n            index_dtype = _smallest_admissible_index_dtype(arrays=(sparse_container.indptr, sparse_container.indices), maxval=max(sparse_container.nnz, sparse_container.shape[1]), check_contents=True)\n            sparse_container.indices = sparse_container.indices.astype(index_dtype, copy=False)\n            sparse_container.indptr = sparse_container.indptr.astype(index_dtype, copy=False)\n        else:\n            index_dtype = _smallest_admissible_index_dtype(maxval=max(sparse_container.shape))\n            sparse_container.row = sparse_container.row.astype(index_dtype, copy=False)\n            sparse_container.col = sparse_container.col.astype(index_dtype, copy=False)",
    "docstring": "Preserve indices dtype for SciPy < 1.12 when converting from DIA to CSR/CSC. For SciPy < 1.12, DIA arrays indices are upcasted to that is inconsistent with DIA matrices. We downcast the indices dtype to to be consistent with DIA matrices. The converted indices arrays are affected back inplace to the sparse container. Parameters ---------- sparse_container : sparse container Sparse container to be checked. requested_sparse_format : str or bool The type of format of . Notes ----- See for more details.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\fixes.py",
    "ast_data": "FunctionDef name:_preserve_dia_indices_dtype arg:sparse_container arg:original_container_format arg:requested_sparse_format arguments arg arg arg If BoolOp Compare Compare If Compare Assign Call Call Assign Call Assign Call Assign Call Call Assign Call Assign Call"
  },
  {
    "library": "scikit-learn",
    "name": "_pprint",
    "source_code": "def _pprint(params, offset=0, printer=repr):\n    options = np.get_printoptions()\n    np.set_printoptions(precision=5, threshold=64, edgeitems=2)\n    params_list = list()\n    this_line_length = offset\n    line_sep = ',\\n' + (1 + offset // 2) * ' '\n    for i, (k, v) in enumerate(sorted(params.items())):\n        if isinstance(v, float):\n            this_repr = '%s=%s' % (k, str(v))\n        else:\n            this_repr = '%s=%s' % (k, printer(v))\n        if len(this_repr) > 500:\n            this_repr = this_repr[:300] + '...' + this_repr[-100:]\n        if i > 0:\n            if this_line_length + len(this_repr) >= 75 or '\\n' in this_repr:\n                params_list.append(line_sep)\n                this_line_length = len(line_sep)\n            else:\n                params_list.append(', ')\n                this_line_length += 2\n        params_list.append(this_repr)\n        this_line_length += len(this_repr)\n    np.set_printoptions(**options)\n    lines = ''.join(params_list)\n    lines = '\\n'.join((l.rstrip(' ') for l in lines.split('\\n')))\n    return lines",
    "docstring": "Pretty print the dictionary 'params' Parameters ---------- params : dict The dictionary to pretty print offset : int, default=0 The offset in characters to add at the begin of each line. printer : callable, default=repr The function to convert entries to strings, typically the builtin str or repr",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py",
    "ast_data": "FunctionDef name:_pprint arg:params arg:offset arg:printer arguments arg arg arg Assign Call Call Assign Call Assign Assign For Call Call Call If Call Assign Call Assign Call If Compare Call Assign If Compare If BoolOp Compare Call Compare Call Assign Call Call Call Call Call Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "make_call_generated_code",
    "source_code": "def make_call_generated_code(self, fn_name: str) -> None:\n    self.extend_output(self.load_function_name(fn_name, True))\n    graphargs = self.tx.output.graphargs\n    seen_sources: OrderedSet[Source] = OrderedSet()\n\n    def collect_temp_source(source):\n        if source in seen_sources:\n            self.mark_source_temp(source)\n            return\n        seen_sources.add(source)\n        if isinstance(source, ChainedSource):\n            collect_temp_source(source.base)\n        if isinstance(source, DictGetItemSource) and isinstance(source.index, Source):\n            collect_temp_source(source.index)\n    for arg in graphargs:\n        if arg.source is not None:\n            collect_temp_source(arg.source)\n    for arg in graphargs:\n        if arg.pass_arg_as_tensor:\n            self.add_push_null(lambda: self.extend_output([self.create_load_python_module(torch), self.create_load_attr('_as_tensor_fullprec')]))\n            self.call_reconstruct(arg)\n            self.extend_output(create_call_function(1, False))\n        else:\n            self.call_reconstruct(arg)\n    self.extend_output(create_call_function(len(graphargs), False))",
    "docstring": "Call the generated code function stored in fn_name",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\codegen.py",
    "ast_data": "FunctionDef name:make_call_generated_code arg:self arg:fn_name arguments arg arg Call Call Assign Call FunctionDef name:collect_temp_source arg:source arguments arg If Compare Call Return return:no Call If Call Call If BoolOp Call Call Call For If Compare Call For If Call arguments Call Call Call Call Call Call Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "line_search_wolfe1",
    "source_code": "def line_search_wolfe1(f, fprime, xk, pk, gfk=None, old_fval=None, old_old_fval=None, args=(), c1=0.0001, c2=0.9, amax=50, amin=1e-08, xtol=1e-14):\n    if gfk is None:\n        gfk = fprime(xk, *args)\n    gval = [gfk]\n    gc = [0]\n    fc = [0]\n\n    def phi(s):\n        fc[0] += 1\n        return f(xk + s * pk, *args)\n\n    def derphi(s):\n        gval[0] = fprime(xk + s * pk, *args)\n        gc[0] += 1\n        return np.dot(gval[0], pk)\n    derphi0 = np.dot(gfk, pk)\n    stp, fval, old_fval = scalar_search_wolfe1(phi, derphi, old_fval, old_old_fval, derphi0, c1=c1, c2=c2, amax=amax, amin=amin, xtol=xtol)\n    return (stp, fc[0], gc[0], fval, old_fval, gval[0])",
    "docstring": "As but do a line search to direction Parameters ---------- f : callable Function fprime : callable Gradient of xk : array_like Current point pk : array_like Search direction gfk : array_like, optional Gradient of at point old_fval : float, optional Value of at point old_old_fval : float, optional Value of at point preceding The rest of the parameters are the same as for . Returns ------- stp, f_count, g_count, fval, old_fval As in gval : array Gradient of at the final point Notes ----- Parameters and must satisfy ``.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_linesearch.py",
    "ast_data": "FunctionDef name:line_search_wolfe1 arg:f arg:fprime arg:xk arg:pk arg:gfk arg:old_fval arg:old_old_fval arg:args arg:c1 arg:c2 arg:amax arg:amin arg:xtol arguments arg arg arg arg arg arg arg arg arg arg arg arg arg If Compare Assign Call Assign Assign Assign FunctionDef name:phi arg:s arguments arg Return return:yes Call FunctionDef name:derphi arg:s arguments arg Assign Call Return return:yes Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "merge_with",
    "source_code": "def merge_with(self, other):\n    other = as_dimension(other)\n    self.assert_is_compatible_with(other)\n    if self._value is None:\n        return Dimension(other.value)\n    else:\n        return Dimension(self._value)",
    "docstring": "Returns a Dimension that combines the information in and . Dimensions are combined as follows: Args: other: Another Dimension. Returns: A Dimension containing the combined information of and . Raises: ValueError: If and are not compatible (see is_compatible_with).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:merge_with arg:self arg:other arguments arg arg Assign Call Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "bmm_to_mm",
    "source_code": "@register_graph_pattern(CallFunction(aten.bmm, Arg(), Arg()), pass_dict=patterns)\ndef bmm_to_mm(match: Match, mat1: torch.fx.Node, mat2: torch.fx.Node):\n\n    def repl(a, b):\n        return torch.mm(a.squeeze(0), b.squeeze(0)).unsqueeze(0)\n    if check_device(mat1.meta['val'], mat2.meta['val'], get_gpu_type()) and statically_known_true(mat1.meta['val'].shape[0] == 1) and statically_known_true(mat2.meta['val'].shape[0] == 1):\n        match.replace_by_example(repl, [mat1, mat2])",
    "docstring": "Convert bmm to mm when batch size is 1",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\joint_graph.py",
    "ast_data": "FunctionDef name:bmm_to_mm arg:match arg:mat1 arg:mat2 arguments arg arg arg FunctionDef name:repl arg:a arg:b arguments arg arg Return return:yes Call Call Call Call If BoolOp Call Call Call Compare Call Compare Call Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "ColorButton",
    "source_code": "class ColorButton(QtWidgets.QPushButton):\n    colorChanged = QtCore.Signal(QtGui.QColor)\n\n    def __init__(self, parent=None):\n        super().__init__(parent)\n        self.setFixedSize(20, 20)\n        self.setIconSize(QtCore.QSize(12, 12))\n        self.clicked.connect(self.choose_color)\n        self._color = QtGui.QColor()\n\n    def choose_color(self):\n        color = QtWidgets.QColorDialog.getColor(self._color, self.parentWidget(), '', QtWidgets.QColorDialog.ColorDialogOption.ShowAlphaChannel)\n        if color.isValid():\n            self.set_color(color)\n\n    def get_color(self):\n        return self._color\n\n    @QtCore.Slot(QtGui.QColor)\n    def set_color(self, color):\n        if color != self._color:\n            self._color = color\n            self.colorChanged.emit(self._color)\n            pixmap = QtGui.QPixmap(self.iconSize())\n            pixmap.fill(color)\n            self.setIcon(QtGui.QIcon(pixmap))\n    color = QtCore.Property(QtGui.QColor, get_color, set_color)",
    "docstring": "Color choosing push button",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\qt_editor\\_formlayout.py",
    "ast_data": "ClassDef name:ColorButton Assign Call FunctionDef name:__init__ arg:self arg:parent arguments arg arg Call Call Call Call Call Call Assign Call FunctionDef name:choose_color arg:self arguments arg Assign Call Call If Call Call FunctionDef name:get_color arg:self arguments arg Return return:yes FunctionDef name:set_color arg:self arg:color arguments arg arg If Compare Assign Call Assign Call Call Call Call Call Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "graph",
    "source_code": "@property\ndef graph(self):\n    raise NotImplementedError",
    "docstring": "The of this variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:graph arg:self arguments arg Raise"
  },
  {
    "library": "scikit-learn",
    "name": "setup_cache",
    "source_code": "def setup_cache(self):\n    clear_tmp()\n    param_grid = list(itertools.product(*self.params))\n    for params in param_grid:\n        if self.skip(params):\n            continue\n        estimator = self.make_estimator(params)\n        X, _, y, _ = self.make_data(params)\n        estimator.fit(X, y)\n        est_path = get_estimator_path(self, Benchmark.save_dir, params, Benchmark.save_estimators)\n        with est_path.open(mode='wb') as f:\n            pickle.dump(estimator, f)",
    "docstring": "Pickle a fitted estimator for all combinations of parameters",
    "type": "method",
    "file_path": "scikit-learn\\asv_benchmarks\\benchmarks\\common.py",
    "ast_data": "FunctionDef name:setup_cache arg:self arguments arg Call Assign Call Call For If Call Assign Call Assign Call Call Assign Call With Call Call"
  },
  {
    "library": "kornia",
    "name": "inverse",
    "source_code": "def inverse(self, input: Tensor, params: Optional[Dict[str, Tensor]]=None, **kwargs: Any) -> Tensor:\n    input_shape = input.shape\n    in_tensor = self.transform_tensor(input)\n    params, flags = self._process_kwargs_to_params_and_flags(self._params if params is None else params, self.flags, **kwargs)\n    if params is None:\n        params = self._params\n    transform = self.get_transformation_matrix(in_tensor, params=params, flags=flags)\n    transform = self.compute_inverse_transformation(transform)\n    output = self.inverse_inputs(in_tensor, params, flags, transform)\n    if self.keepdim:\n        return self.transform_output_tensor(output, input_shape)\n    return output",
    "docstring": "Perform inverse operations. Args: input: the input tensor. params: the corresponding parameters for an operation. If None, a new parameter suite will be generated. **kwargs: key-value pairs to override the parameters and flags.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\_2d\\geometric\\base.py",
    "ast_data": "FunctionDef name:inverse arg:self arg:input arg:params arguments arg arg arg arg Assign Assign Call Assign Call Compare If Compare Assign Assign Call Assign Call Assign Call If Return return:yes Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "draw_path",
    "source_code": "def draw_path(self, renderer, gc, tpath, affine, rgbFace):\n    gc0 = renderer.new_gc()\n    gc0.copy_properties(gc)\n    if self._shadow_color is None:\n        r, g, b = (gc0.get_foreground() or (1.0, 1.0, 1.0))[:3]\n        shadow_rgbFace = (r * self._rho, g * self._rho, b * self._rho)\n    else:\n        shadow_rgbFace = self._shadow_color\n    gc0.set_foreground(shadow_rgbFace)\n    gc0.set_alpha(self._alpha)\n    gc0 = self._update_gc(gc0, self._gc)\n    renderer.draw_path(gc0, tpath, affine + self._offset_transform(renderer))\n    gc0.restore()",
    "docstring": "Overrides the standard draw_path to add the shadow offset and necessary color changes for the shadow.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patheffects.py",
    "ast_data": "FunctionDef name:draw_path arg:self arg:renderer arg:gc arg:tpath arg:affine arg:rgbFace arguments arg arg arg arg arg arg Assign Call Call If Compare Assign BoolOp Call Assign Assign Call Call Assign Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_write_python_version",
    "source_code": "def _write_python_version(self):\n    self._write('.data/python_version', platform.python_version())",
    "docstring": "Writes the python version that the package was created with to .data/python_version",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\package_exporter.py",
    "ast_data": "FunctionDef name:_write_python_version arg:self arguments arg Call Call"
  },
  {
    "library": "scipy",
    "name": "dual_win",
    "source_code": "@property\ndef dual_win(self) -> np.ndarray:\n    if self._dual_win is None:\n        self._dual_win = _calc_dual_canonical_window(self.win, self.hop)\n        self.dual_win.setflags(write=False)\n    return self._dual_win",
    "docstring": "Dual window (canonical dual window by default). A STFT can be interpreted as the input signal being expressed as a weighted sum of modulated and time-shifted dual windows. If no dual window is given on instantiation, the canonical dual window, i.e., the window with the minimal energy (i.e., minimal L²-norm) is calculated. Alternative means for determining dual windows are provided by and the class-method. Note that is also always a dual window of . has same length as , namely samples. If the dual window cannot be calculated a `windual_winwindual_win` are equal. closest_STFT_dual_window: Calculate dual window closest to a desired window. numpy.ndarray.setflags: Modify array flags. ShortTimeFFT: Class this property belongs to.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_short_time_fft.py",
    "ast_data": "FunctionDef name:dual_win arg:self arguments arg If Compare Assign Call Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "Bz2Plugin",
    "source_code": "class Bz2Plugin:\n\n    def __init__(self, file: BinaryIO, feed_options: dict[str, Any]) -> None:\n        self.file = file\n        self.feed_options = feed_options\n        compress_level = self.feed_options.get('bz2_compresslevel', 9)\n        self.bz2file = BZ2File(filename=self.file, mode='wb', compresslevel=compress_level)\n\n    def write(self, data: bytes) -> int:\n        return self.bz2file.write(data)\n\n    def close(self) -> None:\n        self.bz2file.close()",
    "docstring": "Compresses received data using _. Accepted `bz2_compresslevelbz2.BZ2File` for more info about parameters.",
    "type": "class",
    "file_path": "scrapy\\scrapy\\extensions\\postprocessing.py",
    "ast_data": "ClassDef name:Bz2Plugin FunctionDef name:__init__ arg:self arg:file arg:feed_options arguments arg arg arg Assign Assign Assign Call Assign Call FunctionDef name:write arg:self arg:data arguments arg arg Return return:yes Call FunctionDef name:close arg:self arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "enable",
    "source_code": "@staticmethod\ndef enable():\n    torch._C._set_check_sparse_tensor_invariants(True)",
    "docstring": "Enable sparse tensor invariants checking in sparse tensor constructors. .. note:: By default, the sparse tensor invariants checks are disabled. Use :func: to retrieve the current state of sparse tensor invariants checking. .. note:: The sparse tensor invariants check flag is effective to all sparse tensor constructors, both in Python and ATen. The flag can be locally overridden by the `` optional argument of the sparse tensor constructor functions.",
    "type": "method",
    "file_path": "pytorch\\torch\\sparse\\__init__.py",
    "ast_data": "FunctionDef name:enable arguments Call"
  },
  {
    "library": "tensorflow",
    "name": "enable_run_metadata",
    "source_code": "def enable_run_metadata(self):\n    self.ensure_initialized()\n    pywrap_tfe.TFE_ContextEnableRunMetadata(self._handle)",
    "docstring": "Enables tracing of op execution via RunMetadata. To retrieve the accumulated metadata call context.export_run_metadata() and to stop tracing call context.disable_run_metadata().",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:enable_run_metadata arg:self arguments arg Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_antialiased",
    "source_code": "def set_antialiased(self, aa):\n    self._antialiased = mpl._val_or_rc(aa, 'patch.antialiased')\n    self.stale = True",
    "docstring": "Set whether to use antialiased rendering. Parameters ---------- aa : bool or None",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:set_antialiased arg:self arg:aa arguments arg arg Assign Call Assign"
  },
  {
    "library": "pytorch",
    "name": "get_floating_dtype",
    "source_code": "def get_floating_dtype(A):\n    dtype = A.dtype\n    if dtype in (torch.float16, torch.float32, torch.float64):\n        return dtype\n    return torch.float32",
    "docstring": "Return the floating point dtype of tensor A. Integer types map to float32.",
    "type": "function",
    "file_path": "pytorch\\torch\\_linalg_utils.py",
    "ast_data": "FunctionDef name:get_floating_dtype arg:A arguments arg Assign If Compare Return return:yes Return return:yes"
  },
  {
    "library": "scipy",
    "name": "p_min",
    "source_code": "@property\ndef p_min(self) -> int:\n    return self._pre_padding()[1]",
    "docstring": "The smallest possible slice index. is the index of the left-most slice, where the window still sticks into the signal, i.e., has non-zero part for t >= 0. is the smallest index where the window function of the slice is non-zero. Since, per convention the zeroth slice is centered at t=0, <= 0 always holds. A detailed example is provided in the :ref: section of the :ref:. See Also -------- k_min: The smallest possible signal index. k_max: First sample index after signal end not touched by a time slice. p_max: Index of first non-overlapping upper time slice. p_num: Number of time slices, i.e., - . p_range: Determine and validate slice index range. ShortTimeFFT: Class this property belongs to.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_short_time_fft.py",
    "ast_data": "FunctionDef name:p_min arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "deferred_to_future",
    "source_code": "def deferred_to_future(d: Deferred[_T]) -> Future[_T]:\n    return d.asFuture(_get_asyncio_event_loop())",
    "docstring": ".. versionadded:: 2.6.0 Return an :class: object that wraps *d*. When :ref:, you cannot await on :class: objects from :ref:, you can only await on `` objects allows you to wait on them:: class MySpider(Spider): ... async def parse(self, response): additional_request = scrapy.Request(' deferred = self.crawler.engine.download(additional_request) additional_response = await deferred_to_future(deferred)",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\defer.py",
    "ast_data": "FunctionDef name:deferred_to_future arg:d arguments arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "ifft2",
    "source_code": "@_dispatch\ndef ifft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *, plan=None):\n    return (Dispatchable(x, np.ndarray),)",
    "docstring": "Compute the 2-D inverse discrete Fourier Transform. This function computes the inverse of the 2-D discrete Fourier Transform over any number of axes in an M-D array by means of the Fast Fourier Transform (FFT). In other words, `ifftfft2nsaxesifftfftxfft~scipy.fft.fftaxesaxessaxesaxesaxesxifft2ifft2ifftnaxesifftnfftifftifft2` is called. Examples -------- >>> import scipy.fft >>> import numpy as np >>> x = 4 * np.eye(4) >>> scipy.fft.ifft2(x) array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary [0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j], [0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j], [0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])",
    "type": "function",
    "file_path": "scipy\\scipy\\fft\\_basic.py",
    "ast_data": "FunctionDef name:ifft2 arg:x arg:s arg:axes arg:norm arg:overwrite_x arg:workers arguments arg arg arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "randn",
    "source_code": "def randn(self, seed: T, offset: T) -> T:\n    raise NotImplementedError",
    "docstring": "Computes inductor_prims.random with mode=\"randn\". offset has dtype int32.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ops_handler.py",
    "ast_data": "FunctionDef name:randn arg:self arg:seed arg:offset arguments arg arg arg Raise"
  },
  {
    "library": "pandas",
    "name": "non_null_counts",
    "source_code": "@property\ndef non_null_counts(self) -> Series:\n    return self.data.count()",
    "docstring": "Sequence of non-null counts for all columns or column (if series).",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:non_null_counts arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_allowed_wrong_prediction_pct",
    "source_code": "def get_allowed_wrong_prediction_pct(self):\n    return 0.01",
    "docstring": "This is used to determine a threshold for when a learned heuristic returns 'unsure'. If this function returns 0.01, we will set the probability required for the decision tree to return a decision such that at most 1% of the predictions will be wrong on the validation set.",
    "type": "method",
    "file_path": "pytorch\\torchgen\\_autoheuristic\\train_decision.py",
    "ast_data": "FunctionDef name:get_allowed_wrong_prediction_pct arg:self arguments arg Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "UnsafeUrlPolicy",
    "source_code": "class UnsafeUrlPolicy(ReferrerPolicy):\n    name: str = POLICY_UNSAFE_URL\n\n    def referrer(self, response_url: str, request_url: str) -> str | None:\n        return self.stripped_referrer(response_url)",
    "docstring": "The \"unsafe-url\" policy specifies that a full URL, stripped for use as a referrer, is sent along with both cross-origin requests and same-origin requests made from a particular request client. Note: The policy's name doesn't lie; it is unsafe. This policy will leak origins and paths from TLS-protected resources to insecure origins. Carefully consider the impact of setting such a policy for potentially sensitive documents.",
    "type": "class",
    "file_path": "scrapy\\scrapy\\spidermiddlewares\\referer.py",
    "ast_data": "ClassDef name:UnsafeUrlPolicy FunctionDef name:referrer arg:self arg:response_url arg:request_url arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "generate_user_info",
    "source_code": "def generate_user_info(self, user, scope):\n    raise NotImplementedError()",
    "docstring": "Provide user information for the given scope. Developers MUST implement this method in subclass, e.g.:: from authlib.oidc.core import UserInfo def generate_user_info(self, user, scope): user_info = UserInfo(sub=user.id, name=user.name) if \"email\" in scope: user_info[\"email\"] = user.email return user_info :param user: user instance :param scope: scope of the token :return: `` instance",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\core\\grants\\code.py",
    "ast_data": "FunctionDef name:generate_user_info arg:self arg:user arg:scope arguments arg arg arg Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "semilogy",
    "source_code": "@_docstring.interpd\ndef semilogy(self, *args, **kwargs):\n    d = {k: v for k, v in kwargs.items() if k in ['base', 'subs', 'nonpositive', 'basey', 'subsy', 'nonposy']}\n    self.set_yscale('log', **d)\n    return self.plot(*args, **{k: v for k, v in kwargs.items() if k not in d})",
    "docstring": "Make a plot with log scaling on the y-axis. Call signatures:: semilogy([x], y, [fmt], data=None, **kwargs) semilogy([x], y, [fmt], [x2], y2, [fmt2], ..., **kwargs) This is just a thin wrapper around which additionally changes the y-axis to log scaling. All the concepts and parameters of plot can be used here as well. The additional parameters *base*, *subs*, and *nonpositive* control the y-axis properties. They are just forwarded to . Parameters ---------- base : float, default: 10 Base of the y logarithm. subs : array-like, optional The location of the minor yticks. If *None*, reasonable locations are automatically chosen depending on the number of decades in the plot. See for details. nonpositive : {'mask', 'clip'}, default: 'clip' Non-positive values in y can be masked as invalid, or clipped to a very small positive number. **kwargs All parameters supported by . Returns ------- list of Objects representing the plotted data.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_axes.py",
    "ast_data": "FunctionDef name:semilogy arg:self arguments arg arg arg Assign Call Compare Call Return return:yes Call Call Compare"
  },
  {
    "library": "pandas",
    "name": "_gen_dtypes",
    "source_code": "def _gen_dtypes(self) -> Iterator[str]:\n    for dtype in self.dtypes:\n        yield pprint_thing(dtype)",
    "docstring": "Iterator with string representation of column dtypes.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:_gen_dtypes arg:self arguments arg For Call"
  },
  {
    "library": "pytorch",
    "name": "_local_pre_load_state_dict_hook",
    "source_code": "def _local_pre_load_state_dict_hook(module: nn.Module, fsdp_state: _FSDPState, state_dict: dict[str, Any], prefix: str) -> None:\n    _lazy_init(fsdp_state, module)\n    _replace_by_prefix(state_dict, prefix, f'{prefix}{FSDP_PREFIX}')\n    fqn = f'{prefix}{FSDP_PREFIX}{FLAT_PARAM}'\n    if fqn not in state_dict:\n        assert not _has_fsdp_params(fsdp_state, module), 'No `FlatParameter` in `state_dict` for this FSDP instance but it has parameters'\n        return\n    load_tensor = state_dict[fqn]\n    assert isinstance(load_tensor, ShardedTensor), 'Tensors in local_state_dict should be ShardedTensor.'\n    flat_param = _module_handle(fsdp_state, module).flat_param\n    assert flat_param is not None\n    valid_data_size = flat_param.numel() - flat_param._shard_numel_padded\n    shards = load_tensor.local_shards()\n    if valid_data_size > 0:\n        assert len(shards), 'load_local_state_dict assume one shard per ShardedTensor.'\n        load_tensor = shards[0].tensor\n        if flat_param._shard_numel_padded > 0:\n            assert load_tensor.numel() < flat_param.numel(), f'Local shard size = {flat_param.numel()} and the tensor in the state_dict is {load_tensor.numel()}.'\n            load_tensor = F.pad(load_tensor, [0, flat_param._shard_numel_padded])\n    else:\n        load_tensor = flat_param\n    state_dict[fqn] = load_tensor",
    "docstring": "This hook finds the local flat_param for this FSDP module from the state_dict. The flat_param should be a ShardedTensor. This hook converts the ShardedTensor to a tensor. No copy happen unless padding is required.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_state_dict_utils.py",
    "ast_data": "FunctionDef name:_local_pre_load_state_dict_hook arg:module arg:fsdp_state arg:state_dict arg:prefix arguments arg arg arg arg Call Call Assign If Compare Call Return return:no Assign Call Assign Call Compare Assign Call Assign Call If Compare Call Assign If Compare Compare Call Call Call Call Assign Call Assign Assign"
  },
  {
    "library": "scipy",
    "name": "_jackknife_resample",
    "source_code": "def _jackknife_resample(sample, batch=None):\n    n = sample.shape[-1]\n    batch_nominal = batch or n\n    for k in range(0, n, batch_nominal):\n        batch_actual = min(batch_nominal, n - k)\n        j = np.ones((batch_actual, n), dtype=bool)\n        np.fill_diagonal(j[:, k:k + batch_actual], False)\n        i = np.arange(n)\n        i = np.broadcast_to(i, (batch_actual, n))\n        i = i[j].reshape((batch_actual, n - 1))\n        resamples = sample[..., i]\n        yield resamples",
    "docstring": "Jackknife resample the sample. Only one-sample stats for now.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_resampling.py",
    "ast_data": "FunctionDef name:_jackknife_resample arg:sample arg:batch arguments arg arg Assign Assign BoolOp For Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Assign"
  },
  {
    "library": "numpy",
    "name": "astype",
    "source_code": "@array_function_dispatch(_astype_dispatcher)\ndef astype(x, dtype, /, *, copy=True, device=None):\n    if not (isinstance(x, np.ndarray) or isscalar(x)):\n        raise TypeError(f'Input should be a NumPy array or scalar. It is a {type(x)} instead.')\n    if device is not None and device != 'cpu':\n        raise ValueError(f'Device not understood. Only \"cpu\" is allowed, but received: {device}')\n    return x.astype(dtype, copy=copy)",
    "docstring": "Copies an array to a specified data type. This function is an Array API compatible alternative to . Parameters ---------- x : ndarray Input NumPy array to cast. `` if passed. .. versionadded:: 2.1.0 Returns ------- out : ndarray An array having the specified data type. See Also -------- ndarray.astype Examples -------- >>> import numpy as np >>> arr = np.array([1, 2, 3]); arr array([1, 2, 3]) >>> np.astype(arr, np.float64) array([1., 2., 3.]) Non-copy case: >>> arr = np.array([1, 2, 3]) >>> arr_noncpy = np.astype(arr, arr.dtype, copy=False) >>> np.shares_memory(arr, arr_noncpy) True",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\numeric.py",
    "ast_data": "FunctionDef name:astype arguments arg arg arg arg If BoolOp Call Call Raise Call Call If BoolOp Compare Compare Raise Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_weight",
    "source_code": "def get_weight(self):\n    assert isinstance(self.weight_scale, torch.Tensor)\n    assert isinstance(self.weight_zero_point, torch.Tensor)\n    if self.is_decomposed:\n        return _quantize_and_dequantize_weight_decomposed(self.weight, self.weight_qscheme, self.weight_dtype, self.weight_scale, self.weight_zero_point, self.weight_axis_int, self.weight_quant_min, self.weight_quant_max)\n    else:\n        return _quantize_and_dequantize_weight(self.weight, self.weight_qscheme, self.weight_dtype, self.weight_scale, self.weight_zero_point, self.weight_axis_int)",
    "docstring": "Fake quantize (quantize and dequantize) the weight with the quantization parameters for weight, this is used to simulate the numerics for the quantized weight in a quantized model",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\reference\\modules\\utils.py",
    "ast_data": "FunctionDef name:get_weight arg:self arguments arg Call Call If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "aten_abs_complex",
    "source_code": "@onnx_impl(aten.abs.default, complex=True, trace_only=True)\ndef aten_abs_complex(self: TRealOrUInt8) -> TRealOrUInt8:\n    return op.ReduceL2(self, [-1], keepdims=False)",
    "docstring": "abs(Tensor self) -> Tensor",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_torchlib\\ops\\core.py",
    "ast_data": "FunctionDef name:aten_abs_complex arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "from_bernstein_basis",
    "source_code": "@classmethod\ndef from_bernstein_basis(cls, bp, extrapolate=None):\n    if not isinstance(bp, BPoly):\n        raise TypeError(f'.from_bernstein_basis only accepts BPoly instances. Got {type(bp)} instead.')\n    dx = np.diff(bp.x)\n    k = bp.c.shape[0] - 1\n    rest = (None,) * (bp.c.ndim - 2)\n    c = np.zeros_like(bp.c)\n    for a in range(k + 1):\n        factor = (-1) ** a * comb(k, a) * bp.c[a]\n        for s in range(a, k + 1):\n            val = comb(k - a, s - a) * (-1) ** s\n            c[k - s] += factor * val / dx[(slice(None),) + rest] ** s\n    if extrapolate is None:\n        extrapolate = bp.extrapolate\n    return cls.construct_fast(c, bp.x, extrapolate, bp.axis)",
    "docstring": "Construct a piecewise polynomial in the power basis from a polynomial in Bernstein basis. Parameters ---------- bp : BPoly A Bernstein basis polynomial, as created by BPoly extrapolate : bool or 'periodic', optional If bool, determines whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. If 'periodic', periodic extrapolation is used. Default is True.",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_interpolate.py",
    "ast_data": "FunctionDef name:from_bernstein_basis arg:cls arg:bp arg:extrapolate arguments arg arg arg If Call Raise Call Call Assign Call Assign Assign Assign Call For Call Assign Call For Call Assign Call Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "build_chunks",
    "source_code": "def build_chunks(self) -> int:\n    return 0",
    "docstring": "Splits the proto, and returns the size of the chunks created.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\tools\\proto_splitter\\split_graph_def.py",
    "ast_data": "FunctionDef name:build_chunks arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "get_year_format",
    "source_code": "def get_year_format(self):\n    return self.year_format",
    "docstring": "Get a year format string in strptime syntax to be used to parse the year from url variables.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\dates.py",
    "ast_data": "FunctionDef name:get_year_format arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_gen_list_of_colors_from_iterable",
    "source_code": "def _gen_list_of_colors_from_iterable(color: Collection[Color]) -> Iterator[Color]:\n    for x in color:\n        if _is_single_color(x):\n            yield x\n        else:\n            raise ValueError(f'Invalid color {x}')",
    "docstring": "Yield colors from string of several letters or from collection of colors.",
    "type": "function",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\style.py",
    "ast_data": "FunctionDef name:_gen_list_of_colors_from_iterable arg:color arguments arg For If Call Raise Call"
  },
  {
    "library": "scipy",
    "name": "reconstruct_skel_matrix",
    "source_code": "def reconstruct_skel_matrix(A, k, idx):\n    return A[:, idx[:k]]",
    "docstring": "Reconstruct skeleton matrix from ID. The skeleton matrix can be reconstructed from the original matrix and its ID rank and indices and , respectively, as:: B = A[:,idx[:k]] The original matrix can then be reconstructed via:: numpy.hstack([B, numpy.dot(B, proj)])[:,numpy.argsort(idx)] See also :func: and :func:. .. This function automatically detects the matrix data type and calls the appropriate backend. For details, see :func: and :func:. Parameters ---------- A : :class: Original matrix. k : int Rank of ID. idx : :class: Column index array. Returns ------- :class: Skeleton matrix.",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\interpolative.py",
    "ast_data": "FunctionDef name:reconstruct_skel_matrix arg:A arg:k arg:idx arguments arg arg arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "moderatef",
    "source_code": "def moderatef(f):\n    f = FUNCMAX if np.isnan(f) else f\n    f = np.clip(f, -REALMAX, FUNCMAX)\n    return f",
    "docstring": "This function moderates the function value of a MINIMIZATION problem. It replaces NaN and any value above FUNCMAX by FUNCMAX.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\pyprima\\pyprima\\src\\pyprima\\common\\evaluate.py",
    "ast_data": "FunctionDef name:moderatef arg:f arguments arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_weight",
    "source_code": "def get_weight(self):\n    return self._weight",
    "docstring": "Set the font weight. Options are: A numeric value in the range 0-1000 or one of 'light', 'normal', 'regular', 'book', 'medium', 'roman', 'semibold', 'demibold', 'demi', 'bold', 'heavy', 'extra bold', 'black'",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\font_manager.py",
    "ast_data": "FunctionDef name:get_weight arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "LinearReLU",
    "source_code": "class LinearReLU(nnqat.Linear, nni._FusedModule):\n    _FLOAT_MODULE = nni.LinearReLU\n\n    def __init__(self, in_features, out_features, bias=True, qconfig=None):\n        super().__init__(in_features, out_features, bias, qconfig)\n\n    def forward(self, input):\n        return F.relu(F.linear(input, self.weight_fake_quant(self.weight), self.bias))\n\n    @classmethod\n    def from_float(cls, mod, use_precomputed_fake_quant=False):\n        return super().from_float(mod, use_precomputed_fake_quant)\n\n    def to_float(self):\n        linear = torch.nn.Linear(self.in_features, self.out_features, self.bias is not None)\n        linear.weight = torch.nn.Parameter(self.weight.detach())\n        if self.bias is not None:\n            linear.bias = torch.nn.Parameter(self.bias.detach())\n        relu = torch.nn.ReLU()\n        return torch.ao.nn.intrinsic.LinearReLU(linear, relu)",
    "docstring": "A LinearReLU module fused from Linear and ReLU modules, attached with FakeQuantize modules for weight, used in quantization aware training. We adopt the same interface as :class:. Similar to , with FakeQuantize modules initialized to default. Attributes: weight: fake quant module for weight Examples:: >>> # xdoctest: +SKIP >>> m = nn.qat.LinearReLU(20, 30) >>> input = torch.randn(128, 20) >>> output = m(input) >>> print(output.size()) torch.Size([128, 30])",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\nn\\intrinsic\\qat\\modules\\linear_relu.py",
    "ast_data": "ClassDef name:LinearReLU Assign FunctionDef name:__init__ arg:self arg:in_features arg:out_features arg:bias arg:qconfig arguments arg arg arg arg arg Call Call FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call Call Call FunctionDef name:from_float arg:cls arg:mod arg:use_precomputed_fake_quant arguments arg arg arg Return return:yes Call Call FunctionDef name:to_float arg:self arguments arg Assign Call Compare Assign Call Call If Compare Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "pygame",
    "name": "start",
    "source_code": "def start(self):\n    pass",
    "docstring": "Not implemented.",
    "type": "method",
    "file_path": "pygame\\src_py\\_camera_vidcapture.py",
    "ast_data": "FunctionDef name:start arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "make_list_of_t",
    "source_code": "def make_list_of_t(ts, check_graph=True, allow_graph=True, ignore_ops=False):\n    if isinstance(ts, ops.Graph):\n        if allow_graph:\n            return get_tensors(ts)\n        else:\n            raise TypeError('allow_graph is False: cannot convert a tf.Graph.')\n    else:\n        if not is_iterable(ts):\n            ts = [ts]\n        if not ts:\n            return []\n        if check_graph:\n            check_types = None if ignore_ops else tensor_lib.Tensor\n            get_unique_graph(ts, check_types=check_types)\n        return [t for t in ts if isinstance(t, tensor_lib.Tensor)]",
    "docstring": "Convert ts to a list of . Args: ts: can be an iterable of , a or a single tensor. check_graph: if check if all the tensors belong to the same graph. allow_graph: if a cannot be converted. ignore_ops: if , silently ignore . Returns: A newly created list of . Raises: TypeError: if cannot be converted to a list of or, if is , if all the ops do not belong to the same graph.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\op_selector.py",
    "ast_data": "FunctionDef name:make_list_of_t arg:ts arg:check_graph arg:allow_graph arg:ignore_ops arguments arg arg arg arg If Call If Return return:yes Call Raise Call If Call Assign If Return return:no If Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "__floordiv__",
    "source_code": "def __floordiv__(self, other):\n    if self._delegate_binop(other):\n        return NotImplemented\n    return floor_divide(self, other)",
    "docstring": "Divide other into self, and return a new masked array.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:__floordiv__ arg:self arg:other arguments arg arg If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_construct_axes_dict",
    "source_code": "@final\ndef _construct_axes_dict(self, axes: Sequence[Axis] | None=None, **kwargs: AxisInt) -> dict:\n    d = {a: self._get_axis(a) for a in axes or self._AXIS_ORDERS}\n    d.update(kwargs)\n    return d",
    "docstring": "Return an axes dictionary for myself.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:_construct_axes_dict arg:self arg:axes arguments arg arg arg Assign Call BoolOp Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "GetRealValue",
    "source_code": "def GetRealValue(self, value):\n    assert value.op.type not in ['Variable', 'VariableV2']\n    real_value = self._history_map.get(value.name)\n    if real_value is None:\n        cur_value = value\n        cur_grad_state = self\n        while True:\n            enter_op = util.GetLoopConstantEnter(cur_value)\n            if enter_op:\n                cur_value = enter_op.inputs[0]\n                cur_grad_state = cur_grad_state.outer_grad_state\n                if cur_grad_state is None:\n                    real_value = self._grad_context.AddValue(cur_value)\n                    break\n            elif constant_op.is_constant(cur_value):\n                real_value = constant_op.constant(tensor_util.constant_value(cur_value), dtype=cur_value.dtype)\n                break\n            else:\n                self._grad_context.Exit()\n                history_value = cur_grad_state.AddForwardAccumulator(cur_value)\n                self._grad_context.Enter()\n                break\n        if real_value is None:\n            real_value = cur_grad_state.AddBackpropAccumulatedValue(history_value, cur_value)\n            if cur_grad_state != self:\n                real_value = self._grad_context.AddValue(real_value)\n        self._history_map[value.name] = real_value\n    return real_value",
    "docstring": "Get the real value of . If backprop \"uses\" a value produced by forward inference, an accumulator is added in the forward loop to accumulate its values. We use the accumulated value. This method must be called in the grad loop context. must be in forward and needed for backprop. Args: value: A tensor to be captured. Returns: The same tensor obtained from the saved history.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_state.py",
    "ast_data": "FunctionDef name:GetRealValue arg:self arg:value arguments arg arg Compare Assign Call If Compare Assign Assign While Assign Call If Assign Assign If Compare Assign Call If Call Assign Call Call Call Assign Call Call If Compare Assign Call If Compare Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "significant_figures",
    "source_code": "@property\ndef significant_figures(self) -> int:\n    self._lazy_init()\n    n_total = len(self._sorted_times)\n    lower_bound = int(n_total // 4)\n    upper_bound = int(torch.tensor(3 * n_total / 4).ceil())\n    interquartile_points: tuple[float, ...] = self._sorted_times[lower_bound:upper_bound]\n    std = torch.tensor(interquartile_points).std(unbiased=False).item()\n    sqrt_n = torch.tensor(len(interquartile_points)).sqrt().item()\n    confidence_interval = max(1.645 * std / sqrt_n, _MIN_CONFIDENCE_INTERVAL)\n    relative_ci = torch.tensor(self._median / confidence_interval).log10().item()\n    num_significant_figures = int(torch.tensor(relative_ci).floor())\n    return min(max(num_significant_figures, 1), _MAX_SIGNIFICANT_FIGURES)",
    "docstring": "Approximate significant figure estimate. This property is intended to give a convenient way to estimate the precision of a measurement. It only uses the interquartile region to estimate statistics to try to mitigate skew from the tails, and uses a static z value of 1.645 since it is not expected to be used for small values of , so z can approximate . The significant figure estimation used in conjunction with the method to provide a more human interpretable data summary. __repr__ does not use this method; it simply displays raw values. Significant figure estimation is intended for .",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\benchmark\\utils\\common.py",
    "ast_data": "FunctionDef name:significant_figures arg:self arguments arg Call Assign Call Assign Call Assign Call Call Call Assign Call Call Call Assign Call Call Call Call Assign Call Assign Call Call Call Assign Call Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_crc32_options",
    "source_code": "def get_crc32_options() -> bool:\n    from torch.utils.serialization import config\n    return config.save.compute_crc32",
    "docstring": "Get whether :func: computes and writes crc32 for each record. Defaults to ``.",
    "type": "function",
    "file_path": "pytorch\\torch\\serialization.py",
    "ast_data": "FunctionDef name:get_crc32_options arguments Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "breakpoints",
    "source_code": "@property\ndef breakpoints(self):\n    return self._breakpoints",
    "docstring": "Get a set of the currently-activated breakpoints. Returns: A of 3-tuples: (node_name, output_slot, debug_op), e.g., {(\"MatMul\", 0, \"DebugIdentity\")}.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\grpc_debug_server.py",
    "ast_data": "FunctionDef name:breakpoints arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "device",
    "source_code": "@property\ndef device(self) -> torch.device:\n    return self.data.device",
    "docstring": "Return the image device.",
    "type": "method",
    "file_path": "kornia\\kornia\\image\\image.py",
    "ast_data": "FunctionDef name:device arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "get_metadata_routing",
    "source_code": "def get_metadata_routing(self):\n    router = MetadataRouter(owner=self.__class__.__name__).add_self_request(self).add(splitter=self.cv, method_mapping=MethodMapping().add(caller='fit', callee='split')).add(scorer=self._get_scorer(), method_mapping=MethodMapping().add(caller='score', callee='score').add(caller='fit', callee='score'))\n    return router",
    "docstring": "Get metadata routing of this object. Please check :ref: on how the routing mechanism works. .. versionadded:: 1.4 Returns ------- routing : MetadataRouter A :class: encapsulating routing information.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_logistic.py",
    "ast_data": "FunctionDef name:get_metadata_routing arg:self arguments arg Assign Call Call Call Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "prepare_or_wait_for_session",
    "source_code": "def prepare_or_wait_for_session(self, master='', config=None, wait_for_checkpoint=False, max_wait_secs=7200, start_standard_services=True):\n    self._coord.clear_stop()\n    if self._summary_writer:\n        self._summary_writer.reopen()\n    if self._is_chief:\n        sess = self._session_manager.prepare_session(master, init_op=self.init_op, saver=self.saver, checkpoint_dir=self._logdir, wait_for_checkpoint=wait_for_checkpoint, max_wait_secs=max_wait_secs, config=config, init_feed_dict=self._init_feed_dict, init_fn=self._init_fn)\n        self._write_graph()\n        if start_standard_services:\n            logging.info('Starting standard services.')\n            self.start_standard_services(sess)\n    else:\n        sess = self._session_manager.wait_for_session(master, config=config, max_wait_secs=max_wait_secs)\n    if start_standard_services:\n        logging.info('Starting queue runners.')\n        self.start_queue_runners(sess)\n    return sess",
    "docstring": "Make sure the model is ready to be used. Create a session on 'master', recovering or initializing the model as needed, or wait for a session to be ready. If running as the chief and is set to True, also call the session manager to start the standard services. Args: master: name of the TensorFlow master to use. See the constructor for how this is interpreted. config: Optional ConfigProto proto used to configure the session, which is passed as-is to create the session. wait_for_checkpoint: Whether we should wait for the availability of a checkpoint before creating Session. Defaults to False. max_wait_secs: Maximum time to wait for the session to become available. start_standard_services: Whether to start the standard services and the queue runners. Returns: A Session object that can be used to drive the model.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py",
    "ast_data": "FunctionDef name:prepare_or_wait_for_session arg:self arg:master arg:config arg:wait_for_checkpoint arg:max_wait_secs arg:start_standard_services arguments arg arg arg arg arg arg Call If Call If Assign Call Call If Call Call Assign Call If Call Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "IFeedStorage",
    "source_code": "class IFeedStorage(Interface):\n\n    def __init__(uri, *, feed_options=None):\n        pass\n\n    def open(spider):\n        pass\n\n    def store(file):\n        pass",
    "docstring": "Interface that all Feed Storages must implement",
    "type": "class",
    "file_path": "scrapy\\scrapy\\extensions\\feedexport.py",
    "ast_data": "ClassDef name:IFeedStorage FunctionDef name:__init__ arg:uri arguments arg arg FunctionDef name:open arg:spider arguments arg FunctionDef name:store arg:file arguments arg"
  },
  {
    "library": "pygame",
    "name": "get_definitions",
    "source_code": "def get_definitions():\n    import re\n    deps = []\n    match = re.compile('([a-zA-Z0-9_]+) += +(.+)$').match\n    with open(PATH) as setup_in:\n        for line in setup_in:\n            m = match(line)\n            if m is not None:\n                deps.append(Definition(m.group(1), m.group(2)))\n    return deps",
    "docstring": "Return a list of definitions in the Windows Common Setup Each macro definition object has a 'name' and 'value' attribute.",
    "type": "function",
    "file_path": "pygame\\buildconfig\\setup_win_common.py",
    "ast_data": "FunctionDef name:get_definitions arguments Assign Assign Call With Call For Assign Call If Compare Call Call Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "expand_dims",
    "source_code": "def expand_dims(a: Array, /, *, axis: int | tuple[int, ...]=(0,), xp: ModuleType | None=None) -> Array:\n    if xp is None:\n        xp = array_namespace(a)\n    if not isinstance(axis, tuple):\n        axis = (axis,)\n    ndim = a.ndim + len(axis)\n    if axis != () and (min(axis) < -ndim or max(axis) >= ndim):\n        err_msg = f'a provided axis position is out of bounds for array of dimension {a.ndim}'\n        raise IndexError(err_msg)\n    axis = tuple((dim % ndim for dim in axis))\n    if len(set(axis)) != len(axis):\n        err_msg = 'Duplicate dimensions specified in `axis`.'\n        raise ValueError(err_msg)\n    for i in sorted(axis):\n        a = xp.expand_dims(a, axis=i)\n    return a",
    "docstring": "Expand the shape of an array. Insert (a) new axis/axes that will appear at the position(s) specified by in the expanded array shape. This is `axisaa` may also be a tuple: >>> y = xpx.expand_dims(x, axis=(0, 1), xp=xp) >>> y Array([[[1, 2]]], dtype=array_api_strict.int64) >>> y = xpx.expand_dims(x, axis=(2, 0), xp=xp) >>> y Array([[[1], [2]]], dtype=array_api_strict.int64)",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_extra\\_lib\\_funcs.py",
    "ast_data": "FunctionDef name:expand_dims arguments arg arg arg If Compare Assign Call If Call Assign Assign Call If BoolOp Compare BoolOp Compare Call Compare Call Assign Raise Call Assign Call If Compare Call Call Call Assign Raise Call For Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, ax, label, image=None, color='0.85', hovercolor='0.95', *, useblit=True):\n    super().__init__(ax)\n    if image is not None:\n        ax.imshow(image)\n    self.label = ax.text(0.5, 0.5, label, verticalalignment='center', horizontalalignment='center', transform=ax.transAxes)\n    self._useblit = useblit and self.canvas.supports_blit\n    self._observers = cbook.CallbackRegistry(signals=['clicked'])\n    self.connect_event('button_press_event', self._click)\n    self.connect_event('button_release_event', self._release)\n    self.connect_event('motion_notify_event', self._motion)\n    ax.set_navigate(False)\n    ax.set_facecolor(color)\n    ax.set_xticks([])\n    ax.set_yticks([])\n    self.color = color\n    self.hovercolor = hovercolor",
    "docstring": "Parameters ---------- ax : The instance the button will be placed into. label : str The button text. image : array-like or PIL Image The image to place in the button, if not *None*. The parameter is directly forwarded to . color : :mpltype: The color of the button when not activated. hovercolor : :mpltype: The color of the button when the mouse is over it. useblit : bool, default: True Use blitting for faster drawing if supported by the backend. See the tutorial :ref: for details. .. versionadded:: 3.7",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:ax arg:label arg:image arg:color arg:hovercolor arguments arg arg arg arg arg arg arg Call Call If Compare Call Assign Call Assign BoolOp Assign Call Call Call Call Call Call Call Call Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "_get_text_metrics_with_cache",
    "source_code": "def _get_text_metrics_with_cache(renderer, text, fontprop, ismath, dpi):\n    return _get_text_metrics_with_cache_impl(weakref.ref(renderer), text, fontprop.copy(), ismath, dpi)",
    "docstring": "Call ``, caching the results.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:_get_text_metrics_with_cache arg:renderer arg:text arg:fontprop arg:ismath arg:dpi arguments arg arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "inverse_transform",
    "source_code": "def inverse_transform(self, X):\n    return self._inverse_transform(X, self.dictionary)",
    "docstring": "Transform data back to its original space. Parameters ---------- X : array-like of shape (n_samples, n_components) Data to be transformed back. Must have the same number of components as the data used to train the model. Returns ------- X_original : ndarray of shape (n_samples, n_features) Transformed data.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_dict_learning.py",
    "ast_data": "FunctionDef name:inverse_transform arg:self arg:X arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_register_replace_node_hook",
    "source_code": "def _register_replace_node_hook(self, f):\n    assert callable(f), 'create_node hook must be a callable.'\n    self._replace_hooks.append(f)",
    "docstring": "Takes a callable which will be called everytime when we replace a node to a new node, or change the node's name. Callable takes three arguments: the old node we're changing, and NAME of the new node, followed by the user node which consumes the old node to be replaced.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\graph_module.py",
    "ast_data": "FunctionDef name:_register_replace_node_hook arg:self arg:f arguments arg arg Call Call"
  },
  {
    "library": "seaborn",
    "name": "Coordinate",
    "source_code": "class Coordinate(Property):\n    legend = False\n    normed = False",
    "docstring": "The position of visual marks with respect to the axes of the plot.",
    "type": "class",
    "file_path": "seaborn\\seaborn\\_core\\properties.py",
    "ast_data": "ClassDef name:Coordinate Assign Assign"
  },
  {
    "library": "django",
    "name": "add_q",
    "source_code": "def add_q(self, q_object, reuse_all=False):\n    existing_inner = {a for a in self.alias_map if self.alias_map[a].join_type == INNER}\n    if reuse_all:\n        can_reuse = set(self.alias_map)\n    else:\n        can_reuse = self.used_aliases\n    clause, _ = self._add_q(q_object, can_reuse)\n    if clause:\n        self.where.add(clause, AND)\n    self.demote_joins(existing_inner)",
    "docstring": "A preprocessor for the internal _add_q(). Responsible for doing final join promotion.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\query.py",
    "ast_data": "FunctionDef name:add_q arg:self arg:q_object arg:reuse_all arguments arg arg arg Assign Compare If Assign Call Assign Assign Call If Call Call"
  },
  {
    "library": "matplotlib",
    "name": "norm",
    "source_code": "@property\ndef norm(self):\n    return self.scalar * self.scalar + np.dot(self.vector, self.vector)",
    "docstring": "The 2-norm, q*q', a scalar",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:norm arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_ModuleProviderAction",
    "source_code": "class _ModuleProviderAction(Enum):\n    INTERN = 1\n    EXTERN = 2\n    MOCK = 3\n    DENY = 4\n    REPACKAGED_MOCK_MODULE = 5\n    SKIP = 6",
    "docstring": "Represents one of the actions that :class: can take on a module. See :meth: and friends for a description of what the actions do.",
    "type": "class",
    "file_path": "pytorch\\torch\\package\\package_exporter.py",
    "ast_data": "ClassDef name:_ModuleProviderAction Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "sphinx",
    "name": "error_info",
    "source_code": "def error_info(messages: str, extensions: str, traceback: str) -> str:\n    import platform\n    import docutils\n    import jinja2\n    import pygments\n    import sphinx\n    return f'Versions\\n========\\n\\n* Platform:         {sys.platform}; ({platform.platform()})\\n* Python version:   {platform.python_version()} ({platform.python_implementation()})\\n* Sphinx version:   {sphinx.__display_version__}\\n* Docutils version: {docutils.__version__}\\n* Jinja2 version:   {jinja2.__version__}\\n* Pygments version: {pygments.__version__}\\n\\nLast Messages\\n=============\\n\\n{messages}\\n\\nLoaded Extensions\\n=================\\n\\n{extensions}\\n\\nTraceback\\n=========\\n\\n{traceback}\\n'",
    "docstring": "Format the traceback and extensions list with environment information.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\_cli\\util\\errors.py",
    "ast_data": "FunctionDef name:error_info arg:messages arg:extensions arg:traceback arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_compute_p_max",
    "source_code": "def _compute_p_max(m_max):\n    sqrt_m_max = np.sqrt(m_max)\n    p_low = int(np.floor(sqrt_m_max))\n    p_high = int(np.ceil(sqrt_m_max + 1))\n    return max((p for p in range(p_low, p_high + 1) if p * (p - 1) <= m_max + 1))",
    "docstring": "Compute the largest positive integer p such that p*(p-1) <= m_max + 1. Do this in a slightly dumb way, but safe and not too slow. Parameters ---------- m_max : int A count related to bounds.",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_expm_multiply.py",
    "ast_data": "FunctionDef name:_compute_p_max arg:m_max arguments arg Assign Call Assign Call Call Assign Call Call Return return:yes Call Call Compare"
  },
  {
    "library": "pytorch",
    "name": "set_module_name_regex",
    "source_code": "def set_module_name_regex(self, module_name_regex: str, qconfig_list: list[QConfigAny]) -> QConfigMultiMapping:\n    self._insert_qconfig_list('module_name_regex_qconfigs', [module_name_regex], qconfig_list)\n    return self",
    "docstring": "Set module_name_regex QConfigs see :func: for more info",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\ns\\fx\\qconfig_multi_mapping.py",
    "ast_data": "FunctionDef name:set_module_name_regex arg:self arg:module_name_regex arg:qconfig_list arguments arg arg arg Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "error_log",
    "source_code": "def error_log(self, msg='', level=20, traceback=False):\n    cherrypy.engine.log(msg, level, traceback)",
    "docstring": "Write given message to the error log.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpwsgi_server.py",
    "ast_data": "FunctionDef name:error_log arg:self arg:msg arg:level arg:traceback arguments arg arg arg arg Call"
  },
  {
    "library": "django",
    "name": "set_extra_mask",
    "source_code": "def set_extra_mask(self, names):\n    if names is None:\n        self.extra_select_mask = None\n    else:\n        self.extra_select_mask = set(names)\n    self._extra_select_cache = None",
    "docstring": "Set the mask of extra select items that will be returned by SELECT. Don't remove them from the Query since they might be used later.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\query.py",
    "ast_data": "FunctionDef name:set_extra_mask arg:self arg:names arguments arg arg If Compare Assign Assign Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "repeat_elements",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef repeat_elements(x, rep, axis):\n    x_shape = x.shape.as_list()\n    if x_shape[axis] is not None:\n        splits = array_ops.split(value=x, num_or_size_splits=x_shape[axis], axis=axis)\n        x_rep = [s for s in splits for _ in range(rep)]\n        return concatenate(x_rep, axis)\n    auxiliary_axis = axis + 1\n    x_shape = array_ops.shape(x)\n    x_rep = array_ops.expand_dims(x, axis=auxiliary_axis)\n    reps = np.ones(len(x.shape) + 1)\n    reps[auxiliary_axis] = rep\n    x_rep = array_ops.tile(x_rep, reps)\n    reps = np.delete(reps, auxiliary_axis)\n    reps[axis] = rep\n    reps = array_ops.constant(reps, dtype='int32')\n    x_shape *= reps\n    x_rep = array_ops.reshape(x_rep, x_shape)\n    x_shape = x.shape.as_list()\n    x_rep.set_shape(x_shape)\n    x_rep._keras_shape = tuple(x_shape)\n    return x_rep",
    "docstring": "Repeats the elements of a tensor along an axis, like . If has shape and is , the output will have shape . Args: x: Tensor or variable. rep: Python integer, number of times to repeat. axis: Axis along which to repeat. Returns: A tensor. Example: >>> b = tf.constant([1, 2, 3]) >>> tf.keras.backend.repeat_elements(b, rep=2, axis=0)",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:repeat_elements arg:x arg:rep arg:axis arguments arg arg arg Assign Call If Compare Assign Call Assign Call Return return:yes Call Assign Assign Call Assign Call Assign Call Call Assign Assign Call Assign Call Assign Assign Call Assign Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "identity_matrix",
    "source_code": "def identity_matrix(self, input: Tensor) -> Tensor:\n    return eye_like(3, input)",
    "docstring": "Return identity matrix.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\auto\\base.py",
    "ast_data": "FunctionDef name:identity_matrix arg:self arg:input arguments arg arg Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "_has_custom_template",
    "source_code": "def _has_custom_template(self, template_name: str) -> bool:\n    template = os.path.join(self.templatedir, os.path.basename(template_name))\n    return bool(self.templatedir) and os.path.exists(template)",
    "docstring": "Check if custom template file exists. Note: Please don't use this function from extensions. It will be removed in the future without deprecation period.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\cmd\\quickstart.py",
    "ast_data": "FunctionDef name:_has_custom_template arg:self arg:template_name arguments arg arg Assign Call Call Return return:yes BoolOp Call Call"
  },
  {
    "library": "matplotlib",
    "name": "finish",
    "source_code": "def finish(self):\n    out, err = self._proc.communicate()\n    out = TextIOWrapper(BytesIO(out)).read()\n    err = TextIOWrapper(BytesIO(err)).read()\n    if out:\n        _log.log(logging.WARNING if self._proc.returncode else logging.DEBUG, 'MovieWriter stdout:\\n%s', out)\n    if err:\n        _log.log(logging.WARNING if self._proc.returncode else logging.DEBUG, 'MovieWriter stderr:\\n%s', err)\n    if self._proc.returncode:\n        raise subprocess.CalledProcessError(self._proc.returncode, self._proc.args, out, err)",
    "docstring": "Finish any processing for writing the movie.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\animation.py",
    "ast_data": "FunctionDef name:finish arg:self arguments arg Assign Call Assign Call Call Call Assign Call Call Call If Call If Call If Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_benchmark_graph",
    "source_code": "def _benchmark_graph(self):\n    with context.graph_mode():\n        with ops.Graph().as_default():\n            x = array_ops.placeholder(dtypes.float32)\n            cond_val = self._create_cond(x)\n            with session.Session() as sess:\n                cond_fn = sess.make_callable(cond_val, [x])\n                for _ in range(self.NUM_WARM_UP_ITERS):\n                    cond_fn(0.0)\n                start_time = time.time()\n                for _ in range(self.NUM_ITERS):\n                    cond_fn(0.0)\n                self.report_benchmark(wall_time=time.time() - start_time, iters=self.NUM_ITERS)",
    "docstring": "Benchmarks cond in legacy graph mode.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops_benchmark.py",
    "ast_data": "FunctionDef name:_benchmark_graph arg:self arguments arg With Call With Call Call Assign Call Assign Call With Call Assign Call For Call Call Assign Call For Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, banned_ops=None):\n    if banned_ops is None:\n        banned_ops = set()\n    self.banned_ops = banned_ops\n    super().__init__()",
    "docstring": "This version of CSE Pass aims to be dialect agnostic, and it's implemented purely based on the connectivity between fx.Node. For functional dialects, user would only need to specify the random ops in ban list. Warning: CSE Pass cannot be safely applied on a FX graph in non-functional dialects. If your dialect contains stateful operators, please customized the banned_ops.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\passes\\dialect\\common\\cse_pass.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:banned_ops arguments arg arg If Compare Assign Call Assign Call Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, fh):\n    self._header = _parse_header(fh)\n    self._metrics, self._metrics_by_name = _parse_char_metrics(fh)\n    self._kern, self._composite = _parse_optional(fh)",
    "docstring": "Parse the AFM file in file object *fh*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_afm.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:fh arguments arg arg Assign Call Assign Call Assign Call"
  },
  {
    "library": "kornia",
    "name": "apply_transform",
    "source_code": "def apply_transform(self, input: Tensor, params: Dict[str, Tensor], flags: Dict[str, Any], transform: Optional[Tensor]=None) -> Tensor:\n    KORNIA_CHECK(len(input.shape) in (3, 4), 'Wrong input dimension.')\n    if len(input.shape) == 3:\n        input = input[None, :, :, :]\n    KORNIA_CHECK(input.shape[1] in {3, 1}, 'Number of color channels should be 1 or 3.')\n    noisy_image = input.clone()\n    noisy_image[params['mask_salt'].to(input.device)] = 1.0\n    noisy_image[params['mask_pepper'].to(input.device)] = 0.0\n    return noisy_image",
    "docstring": "Apply random Salt and Pepper noise transformation to the input image.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\_2d\\intensity\\salt_pepper_noise.py",
    "ast_data": "FunctionDef name:apply_transform arg:self arg:input arg:params arg:flags arg:transform arguments arg arg arg arg arg Call Compare Call If Compare Call Assign Call Compare Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_ifftn",
    "source_code": "def _ifftn(input_tensor, fft_length=None, axes=None, norm=None, name=None):\n    with _ops.name_scope(name, default_name, [input_tensor, fft_length, axes]) as name:\n        axes = _process_empty_axes(input_tensor, axes)\n        fft_rank = axes.shape[0]\n        input_tensor = _ops.convert_to_tensor(input_tensor, preferred_dtype=_dtypes.complex64)\n        input_tensor.shape.with_rank_at_least(fft_rank)\n        if fft_length is None:\n            fft_length = _infer_fft_length_for_fftn(input_tensor)\n        else:\n            fft_length = _ops.convert_to_tensor(fft_length, _dtypes.int32)\n        input_tensor = _maybe_pad_for_rfft(input_tensor, fft_rank, fft_length)\n        fft_length_static = _tensor_util.constant_value(fft_length)\n        if fft_length_static is not None:\n            fft_length = fft_length_static\n        if norm is None:\n            norm = 'backward'\n        n = 1\n        if norm != 'backward':\n            for fft_length_i in fft_length:\n                n *= fft_length_i\n            if norm == 'forward':\n                input_tensor *= n\n            elif norm == 'ortho':\n                input_tensor *= np.sqrt(n)\n        return ifft_n(input_tensor, fft_length, axes, name=name)",
    "docstring": "Wrapper around gen_spectral_ops.*fft that infers fft_length and axes arguments.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\signal\\fft_ops.py",
    "ast_data": "FunctionDef name:_ifftn arg:input_tensor arg:fft_length arg:axes arg:norm arg:name arguments arg arg arg arg arg With Call Assign Call Assign Assign Call Call If Compare Assign Call Assign Call Assign Call Assign Call If Compare Assign If Compare Assign Assign If Compare For If Compare If Compare Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "items",
    "source_code": "def items(self):\n    for h in self._info_axis:\n        yield (h, self[h])",
    "docstring": "Iterate over (label, values) on info axis This is index for Series and columns for DataFrame. Returns ------- Generator",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:items arg:self arguments arg For"
  },
  {
    "library": "scikit-learn",
    "name": "validate_parameter_constraints",
    "source_code": "def validate_parameter_constraints(parameter_constraints, params, caller_name):\n    for param_name, param_val in params.items():\n        if param_name not in parameter_constraints:\n            continue\n        constraints = parameter_constraints[param_name]\n        if constraints == 'no_validation':\n            continue\n        constraints = [make_constraint(constraint) for constraint in constraints]\n        for constraint in constraints:\n            if constraint.is_satisfied_by(param_val):\n                break\n        else:\n            constraints = [constraint for constraint in constraints if not constraint.hidden]\n            if len(constraints) == 1:\n                constraints_str = f'{constraints[0]}'\n            else:\n                constraints_str = f'{', '.join([str(c) for c in constraints[:-1]])} or {constraints[-1]}'\n            raise InvalidParameterError(f'The {param_name!r} parameter of {caller_name} must be {constraints_str}. Got {param_val!r} instead.')",
    "docstring": "Validate types and values of given parameters. Parameters ---------- parameter_constraints : dict or {\"no_validation\"} If \"no_validation\", validation is skipped for this parameter. If a dict, it must be a dictionary . A parameter is valid if it satisfies one of the constraints from the list. Constraints can be: - an Interval object, representing a continuous or discrete range of numbers - the string \"array-like\" - the string \"sparse matrix\" - the string \"random_state\" - callable - None, meaning that None is a valid value for the parameter - any type, meaning that any instance of this type is valid - an Options object, representing a set of elements of a given type - a StrOptions object, representing a set of strings - the string \"boolean\" - the string \"verbose\" - the string \"cv_object\" - the string \"nan\" - a MissingValues object representing markers for missing values - a HasMethods object, representing method(s) an object must have - a Hidden object, representing a constraint not meant to be exposed to the user params : dict A dictionary . The parameters to validate against the constraints. caller_name : str The name of the estimator or function or method that called this function.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_param_validation.py",
    "ast_data": "FunctionDef name:validate_parameter_constraints arg:parameter_constraints arg:params arg:caller_name arguments arg arg arg For Call If Compare Assign If Compare Assign Call For If Call Assign If Compare Call Assign Assign Call Call Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "set_antialiased",
    "source_code": "def set_antialiased(self, b):\n    if self._antialiased != b:\n        self.stale = True\n    self._antialiased = b",
    "docstring": "Set whether to use antialiased rendering. Parameters ---------- b : bool",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:set_antialiased arg:self arg:b arguments arg arg If Compare Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "realize_all",
    "source_code": "@classmethod\ndef realize_all(cls, value: Any, cache: Optional[dict[int, tuple[Any, Any]]]=None) -> Any:\n    if cache is None:\n        cache = {}\n    idx = id(value)\n    if idx in cache:\n        return cache[idx][0]\n    value_cls = type(value)\n    if issubclass(value_cls, LazyVariableTracker):\n        result = cls.realize_all(value.realize(), cache)\n    elif issubclass(value_cls, VariableTracker):\n        result = value\n        value_dict = value.__dict__\n        nonvars = value._nonvar_fields\n        for key in value_dict:\n            if key not in nonvars:\n                value_dict[key] = cls.realize_all(value_dict[key], cache)\n    elif value_cls is list:\n        result = [cls.realize_all(v, cache) for v in value]\n    elif value_cls is tuple:\n        result = tuple((cls.realize_all(v, cache) for v in value))\n    elif value_cls in (dict, collections.OrderedDict):\n        result = {k: cls.realize_all(v, cache) for k, v in list(value.items())}\n    else:\n        result = value\n    cache[idx] = (result, value)\n    return result",
    "docstring": "Walk an object and realize all LazyVariableTrackers inside it.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\lazy.py",
    "ast_data": "FunctionDef name:realize_all arg:cls arg:value arg:cache arguments arg arg arg If Compare Assign Assign Call If Compare Return return:yes Assign Call If Call Assign Call Call If Call Assign Assign Assign For If Compare Assign Call If Compare Assign Call If Compare Assign Call Call If Compare Assign Call Call Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "Close",
    "source_code": "def Close(self):\n    _pywrap_debug_events_writer.Close(self._dump_root)",
    "docstring": "Close the writer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_writer.py",
    "ast_data": "FunctionDef name:Close arg:self arguments arg Call"
  },
  {
    "library": "pandas",
    "name": "sem",
    "source_code": "@deprecate_nonkeyword_arguments(version='4.0', allowed_args=['self'], name='sem')\ndef sem(self, axis: Axis | None=0, skipna: bool=True, ddof: int=1, numeric_only: bool=False, **kwargs) -> Series | Any:\n    result = super().sem(axis=axis, skipna=skipna, ddof=ddof, numeric_only=numeric_only, **kwargs)\n    if isinstance(result, Series):\n        result = result.__finalize__(self, method='sem')\n    return result",
    "docstring": "Return unbiased standard error of the mean over requested axis. Normalized by N-1 by default. This can be changed using the ddof argument Parameters ---------- axis : {index (0), columns (1)} For this parameter is unused and defaults to 0. .. warning:: The behavior of DataFrame.sem with `numeric_onlyTrue` to avoid getting an error. >>> df = pd.DataFrame({\"a\": [1, 2], \"b\": [\"T\", \"Z\"]}, index=[\"tiger\", \"zebra\"]) >>> df.sem(numeric_only=True) a 0.5 dtype: float64",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:sem arg:self arg:axis arg:skipna arg:ddof arg:numeric_only arguments arg arg arg arg arg arg Assign Call Call If Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_is_llt_product",
    "source_code": "def _is_llt_product(self):\n    if len(self.operators) != 2:\n        return False\n    if not linear_operator_util.is_aat_form(self.operators):\n        return False\n    return isinstance(self.operators[0], linear_operator_lower_triangular.LinearOperatorLowerTriangular)",
    "docstring": "Determines if linop = L @ L.H for L = LinearOperatorLowerTriangular.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_composition.py",
    "ast_data": "FunctionDef name:_is_llt_product arg:self arguments arg If Compare Call Return return:yes If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "crawl",
    "source_code": "def crawl(self, crawler_or_spidercls: type[Spider] | str | Crawler, *args: Any, **kwargs: Any) -> asyncio.Future[None]:\n    if isinstance(crawler_or_spidercls, Spider):\n        raise ValueError('The crawler_or_spidercls argument cannot be a spider object, it must be a spider class (or a Crawler object)')\n    if not is_asyncio_reactor_installed():\n        raise RuntimeError('AsyncCrawlerRunner requires AsyncioSelectorReactor.')\n    crawler = self.create_crawler(crawler_or_spidercls)\n    return self._crawl(crawler, *args, **kwargs)",
    "docstring": "Run a crawler with the provided arguments. It will call the given Crawler's :meth: method, while keeping track of it so it can be stopped later. If `~scrapy.crawler.Crawler~asyncio.Future~scrapy.crawler.Crawler~scrapy.spiders.Spider` subclass or string :param args: arguments to initialize the spider :param kwargs: keyword arguments to initialize the spider",
    "type": "method",
    "file_path": "scrapy\\scrapy\\crawler.py",
    "ast_data": "FunctionDef name:crawl arg:self arg:crawler_or_spidercls arguments arg arg arg arg If Call Raise Call If Call Raise Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "take",
    "source_code": "@tf_export.tf_export('experimental.numpy.take', v1=[])\n@np_utils.np_doc('take')\ndef take(a, indices, axis=None, out=None, mode='clip'):\n    if out is not None:\n        raise ValueError('out argument is not supported in take.')\n    if mode not in {'raise', 'clip', 'wrap'}:\n        raise ValueError(\"Invalid mode '{}' for take\".format(mode))\n    a = asarray(a)\n    indices = asarray(indices)\n    if axis is None:\n        a = array_ops.reshape(a, [-1])\n        axis = 0\n    axis_size = array_ops.shape(a, out_type=indices.dtype)[axis]\n    if mode == 'clip':\n        indices = clip_ops.clip_by_value(indices, 0, axis_size - 1)\n    elif mode == 'wrap':\n        indices = math_ops.floormod(indices, axis_size)\n    else:\n        raise ValueError(\"The 'raise' mode to take is not supported.\")\n    return array_ops.gather(a, indices, axis=axis)",
    "docstring": "out argument is not supported, and default mode is clip.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_array_ops.py",
    "ast_data": "FunctionDef name:take arg:a arg:indices arg:axis arg:out arg:mode arguments arg arg arg arg arg If Compare Raise Call If Compare Raise Call Call Assign Call Assign Call If Compare Assign Call Assign Assign Call If Compare Assign Call If Compare Assign Call Raise Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "parse_module_info",
    "source_code": "def parse_module_info(module_info: ModuleInfo) -> Graph:\n    mod_stats = module_info['mod_stats']\n    fw_pre_order = module_info['mod_order']['fw_pre_order']\n    assert len(mod_stats) == len(fw_pre_order)\n    n_nodes = len(mod_stats)\n    g = Graph(n_nodes)\n    g.fw_post_order = module_info['mod_order']['fw_post_order']\n    module_info['mod_stats'] = sorted(mod_stats, key=lambda x: fw_pre_order.index(x['fqn']))\n    for i, one_mod_stats in enumerate(mod_stats):\n        node: Node = cast(Node, one_mod_stats)\n        node['index'] = i\n        node['pos_fw_post_order'] = g.fw_post_order.index(node['fqn'])\n        g.add_node(node)\n    for i in range(n_nodes):\n        for j in range(i, n_nodes):\n            if is_self_or_submodule(g.nodes[j]['fqn'], g.nodes[i]['fqn']):\n                g.ad_matrix[i][j] = 1\n            else:\n                break\n    return g",
    "docstring": "Parse module info and create a graph (tree) of modules. The graph will be used by MILP solver to find optimal SAC and/or FSDP configurations.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_tools\\ilp_utils.py",
    "ast_data": "FunctionDef name:parse_module_info arg:module_info arguments arg Assign Assign Compare Call Call Assign Call Assign Call Assign Assign Call arguments arg Call For Call Call Assign Assign Call Call For Call For Call If Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_is_observer_in_same_graph",
    "source_code": "def _is_observer_in_same_graph(node: Node, named_modules: dict[str, torch.nn.Module], obs_or_fq_map: dict[EdgeOrNode, ObserverOrFakeQuantize], is_qat):\n    node_output_dtype = _get_arg_target_dtype_as_output(node, named_modules, obs_or_fq_map, is_qat)\n    if len(node.args) > 0 and isinstance(node.args[0], Node):\n        if node_output_dtype in [torch.quint8, torch.uint8] and node.args[0].op == 'placeholder':\n            return False\n    return True",
    "docstring": "Check if observer in same graph when the node output is not fp32 and input is 'placeholder' the input is assumed to be quantized, so it is observed in a different place rather than not observed.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\prepare.py",
    "ast_data": "FunctionDef name:_is_observer_in_same_graph arg:node arg:named_modules arg:obs_or_fq_map arg:is_qat arguments arg arg arg arg Assign Call If BoolOp Compare Call Call If BoolOp Compare Compare Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "_get_dependencies_for_model",
    "source_code": "def _get_dependencies_for_model(self, app_label, model_name):\n    dependencies = []\n    model_state = self.to_state.models[app_label, model_name]\n    for field in model_state.fields.values():\n        if field.is_relation:\n            dependencies.extend(self._get_dependencies_for_foreign_key(app_label, model_name, field, self.to_state))\n    return dependencies",
    "docstring": "Return foreign key dependencies of the given model.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\autodetector.py",
    "ast_data": "FunctionDef name:_get_dependencies_for_model arg:self arg:app_label arg:model_name arguments arg arg arg Assign Assign For Call If Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "weights",
    "source_code": "@property\ndef weights(self):\n    return self._weights",
    "docstring": "Returns variables of this Optimizer based on the order created.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py",
    "ast_data": "FunctionDef name:weights arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_AbslProcess",
    "source_code": "class _AbslProcess:\n\n    def __init__(self, *args, **kwargs):\n        super(_AbslProcess, self).__init__(*args, **kwargs)\n        self._run_impl = getattr(self, 'run')\n        self.run = self._run_with_absl\n\n    def _run_with_absl(self):\n        app.run(lambda _: self._run_impl())",
    "docstring": "A process that runs using absl.app.run.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_process_lib.py",
    "ast_data": "ClassDef name:_AbslProcess FunctionDef name:__init__ arg:self arguments arg arg arg Call Call Assign Call Assign FunctionDef name:_run_with_absl arg:self arguments arg Call arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "empty_like",
    "source_code": "def empty_like(x, init=None):\n    x = ops.convert_to_tensor(x)\n    return gen_array_ops.empty(array_ops.shape(x), x.dtype, init=init)",
    "docstring": "Returns a non-initialized tensor with the same shape and dtype as x. Args: x: A Tensor. init: Initialize the returned tensor with the default value of x.dtype(), if True. Otherwise, do not initialize. Defaults to None. Returns: A tensor y, whose dtype and shape are the same as those of x. y is guaranteed not to be an alias of x. Upon return, y may contain arbitrary data.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\inplace_ops.py",
    "ast_data": "FunctionDef name:empty_like arg:x arg:init arguments arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_decode_error",
    "source_code": "def _decode_error(self, err):\n    if hasattr(err, 'errors'):\n        self._decode_converter_error(err)\n    else:\n        self._decode_error_legacy(err)\n    if self._raise_exception and self._log_messages:\n        raise CompatibilityError(f'CompatibilityException at {repr(self._func)}')",
    "docstring": "Parses the given ConverterError and generates compatibility warnings.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\authoring\\authoring.py",
    "ast_data": "FunctionDef name:_decode_error arg:self arg:err arguments arg arg If Call Call Call If BoolOp Raise Call Call"
  },
  {
    "library": "sphinx",
    "name": "update",
    "source_code": "def update(self, config: Config) -> None:\n    for key in self.LATEX_ELEMENTS_KEYS:\n        if config.latex_elements.get(key):\n            value = config.latex_elements[key]\n            setattr(self, key, value)\n    for key in self.UPDATABLE_KEYS:\n        if key in config.latex_theme_options:\n            value = config.latex_theme_options[key]\n            setattr(self, key, value)",
    "docstring": "Override theme settings by user's configuration.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\latex\\theming.py",
    "ast_data": "FunctionDef name:update arg:self arg:config arguments arg arg For If Call Assign Call For If Compare Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_to_matrix",
    "source_code": "def _to_matrix(u):\n    u_rank = len(u.shape)\n    if u_rank not in [1, 2]:\n        raise ValueError('The input tensor should have rank 1 or 2. Given rank: {}'.format(u_rank))\n    if u_rank == 1:\n        return array_ops.expand_dims(u, 0)\n    return u",
    "docstring": "If input tensor is a vector (i.e., has rank 1), converts it to matrix.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\kernelized_utils.py",
    "ast_data": "FunctionDef name:_to_matrix arg:u arguments arg Assign Call If Compare Raise Call Call If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "aot_compile",
    "source_code": "def aot_compile(f: Callable, args: tuple[Any], kwargs: Optional[dict[str, Any]]=None, *, dynamic_shapes: Optional[dict[str, Any]]=None, options: Optional[dict[str, Any]]=None, remove_runtime_assertions: bool=False, disable_constraint_solver: bool=False, same_signature: bool=True) -> Union[list[str], str]:\n    from torch.export._trace import _export_to_torch_ir\n    from torch._inductor.decomposition import select_decomp_table\n    from torch._inductor import config\n    aot_compile_warning()\n    if config.is_predispatch:\n        gm = torch.export._trace._export(f, args, kwargs, dynamic_shapes, pre_dispatch=True).module()\n    else:\n        gm = _export_to_torch_ir(f, args, kwargs, dynamic_shapes, disable_constraint_solver=disable_constraint_solver, same_signature=same_signature, restore_fqn=False)\n    with torch.no_grad():\n        so_path = torch._inductor.aot_compile(gm, args, kwargs, options=options)\n    return so_path",
    "docstring": "Note: this function is not stable yet Traces either an nn.Module's forward function or just a callable with PyTorch operations inside, generates executable cpp code from the program, and returns the path to the generated shared library Args: f: the or callable to trace. args: example positional inputs. kwargs: optional example keyword inputs. dynamic_shapes: Should either be: 1) a dict from argument names of `DimDimDim` types correspond to dynamic dimensions, and static dimensions are denoted by None. Arguments that are dicts or tuples / lists of tensors are recursively specified by using mappings or sequences of contained specifications. options: A dictionary of options to control inductor disable_constraint_solver: Whether the dim constraint solver must be disabled. Returns: Path to the generated shared library",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\__init__.py",
    "ast_data": "FunctionDef name:aot_compile arg:f arg:args arg:kwargs arguments arg arg arg arg arg arg arg arg Call If Assign Call Call Assign Call With Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_name_scope_transformer",
    "source_code": "def _name_scope_transformer(parent, node, full_name, name, logs):\n    name_found, name = ast_edits.get_arg_value(node, 'name', 0)\n    default_found, default_name = ast_edits.get_arg_value(node, 'default_name', 1)\n    if name_found and pasta.dump(name) != 'None':\n        logs.append((ast_edits.INFO, node.lineno, node.col_offset, '`name` passed to `name_scope`. Because you may be re-entering an existing scope, it is not safe to convert automatically,  the v2 name_scope does not support re-entering scopes by name.\\n'))\n        new_name = 'tf.compat.v1.name_scope'\n        logs.append((ast_edits.INFO, node.func.lineno, node.func.col_offset, 'Renamed %r to %r' % (full_name, new_name)))\n        new_name_node = ast_edits.full_name_node(new_name, node.func.ctx)\n        ast.copy_location(new_name_node, node.func)\n        pasta.ast_utils.replace_child(node, node.func, new_name_node)\n        return node\n    if default_found:\n        logs.append((ast_edits.INFO, node.lineno, node.col_offset, 'Using default_name as name in call to name_scope.\\n'))\n        node.args = []\n        node.keywords = [ast.keyword(arg='name', value=default_name)]\n        return node\n    logs.append((ast_edits.ERROR, node.lineno, node.col_offset, 'name_scope call with neither name nor default_name cannot be converted properly.'))",
    "docstring": "Fix name scope invocation to use 'default_name' and omit 'values' args.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\tf_upgrade_v2.py",
    "ast_data": "FunctionDef name:_name_scope_transformer arg:parent arg:node arg:full_name arg:name arg:logs arguments arg arg arg arg arg Assign Call Assign Call If BoolOp Compare Call Call Assign Call Assign Call Call Call Return return:yes If Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "render",
    "source_code": "def render(pieces, style):\n    if pieces['error']:\n        return {'version': 'unknown', 'full-revisionid': pieces.get('long'), 'dirty': None, 'error': pieces['error'], 'date': None}\n    if not style or style == 'default':\n        style = 'pep440'\n    if style == 'pep440':\n        rendered = render_pep440(pieces)\n    elif style == 'pep440-branch':\n        rendered = render_pep440_branch(pieces)\n    elif style == 'pep440-pre':\n        rendered = render_pep440_pre(pieces)\n    elif style == 'pep440-post':\n        rendered = render_pep440_post(pieces)\n    elif style == 'pep440-post-branch':\n        rendered = render_pep440_post_branch(pieces)\n    elif style == 'pep440-old':\n        rendered = render_pep440_old(pieces)\n    elif style == 'git-describe':\n        rendered = render_git_describe(pieces)\n    elif style == 'git-describe-long':\n        rendered = render_git_describe_long(pieces)\n    else:\n        raise ValueError(f\"unknown style '{style}'\")\n    return {'version': rendered, 'full-revisionid': pieces['long'], 'dirty': pieces['dirty'], 'error': None, 'date': pieces.get('date')}",
    "docstring": "Render the given version pieces into the requested style.",
    "type": "function",
    "file_path": "pandas\\pandas\\_version.py",
    "ast_data": "FunctionDef name:render arg:pieces arg:style arguments arg arg If Return return:yes Call If BoolOp Compare Assign If Compare Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__new__",
    "source_code": "def __new__(cls, x=None, y=None, ildj_map=None, kwargs=None):\n    return super(_Mapping, cls).__new__(cls, x, y, ildj_map, kwargs)",
    "docstring": "Custom __new__ so namedtuple items have defaults. Args: x: . Forward. y: . Inverse. ildj_map: . This is a mapping from event_ndims to a representing the inverse log det jacobian. kwargs: Python dictionary. Extra args supplied to forward/inverse/etc functions. Returns: mapping: New instance of _Mapping.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bijector_impl.py",
    "ast_data": "FunctionDef name:__new__ arg:cls arg:x arg:y arg:ildj_map arg:kwargs arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "maxinconsts",
    "source_code": "@lazy_cython\ndef maxinconsts(Z, R):\n    xp = array_namespace(Z, R)\n    Z = _asarray(Z, order='C', dtype=xp.float64, xp=xp)\n    R = _asarray(R, order='C', dtype=xp.float64, xp=xp)\n    _is_valid_linkage(Z, throw=True, name='Z', xp=xp)\n    _is_valid_im(R, throw=True, name='R', xp=xp)\n    if Z.shape[0] != R.shape[0]:\n        raise ValueError('The inconsistency matrix and linkage matrix each have a different number of rows.')\n\n    def cy_maxinconsts(Z, R, validate):\n        if validate:\n            _is_valid_linkage(Z, throw=True, name='Z', xp=np)\n            _is_valid_im(R, throw=True, name='R', xp=np)\n        n = Z.shape[0] + 1\n        MI = np.zeros((n - 1,))\n        _hierarchy.get_max_Rfield_for_each_cluster(Z, R, MI, n, 3)\n        return MI\n    return xpx.lazy_apply(cy_maxinconsts, Z, R, validate=is_lazy_array(Z), shape=(Z.shape[0],), dtype=xp.float64, as_numpy=True, xp=xp)",
    "docstring": "Return the maximum inconsistency coefficient for each non-singleton cluster and its children. Parameters ---------- Z : ndarray The hierarchical clustering encoded as a matrix. See for more information. R : ndarray The inconsistency matrix. Returns ------- MI : ndarray A monotonic `scipy.cluster.hierarchy.inconsistentscipy.cluster.hierarchy.maxinconsts`) for each non-singleton cluster and its children: >>> maxinconsts(Z, R) array([0. , 0. , 0. , 0. , 0.70710678, 0.70710678, 0.70710678, 0.70710678, 1.15470054, 1.15470054, 1.15470054])",
    "type": "function",
    "file_path": "scipy\\scipy\\cluster\\hierarchy.py",
    "ast_data": "FunctionDef name:maxinconsts arg:Z arg:R arguments arg arg Assign Call Assign Call Assign Call Call Call If Compare Raise Call FunctionDef name:cy_maxinconsts arg:Z arg:R arg:validate arguments arg arg arg If Call Call Assign Assign Call Call Return return:yes Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "reduce",
    "source_code": "@_exception_logger\ndef reduce(tensor: torch.Tensor, dst: Optional[int]=None, op=ReduceOp.SUM, group: Optional[ProcessGroup]=None, async_op: bool=False, group_dst: Optional[int]=None):\n    group = _group_or_default_group(group)\n    group_dst = _canonicalize_group_rank(group, dst, group_dst, return_global=False)\n    _check_single_tensor(tensor, 'tensor')\n    if _rank_not_in_group(group):\n        _warn_not_in_group('reduce')\n        return\n    opts = ReduceOptions()\n    opts.reduceOp = op\n    opts.rootRank = group_dst\n    opts.asyncOp = async_op\n    work = group.reduce([tensor], opts)\n    if async_op:\n        return work\n    elif work is not None:\n        work.wait()",
    "docstring": "Reduces the tensor data across all machines. Only the process with rank `` but not both. Returns: Async work handle, if async_op is set to True. None, if not async_op or if not part of the group",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:reduce arg:tensor arg:dst arg:op arg:group arg:async_op arg:group_dst arguments arg arg arg arg arg arg Assign Call Assign Call Call If Call Call Return return:no Assign Call Assign Assign Assign Assign Call If Return return:yes If Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "count_params",
    "source_code": "def count_params(self):\n    if not self.built:\n        if getattr(self, '_is_graph_network', False):\n            with tf_utils.maybe_init_scope(self):\n                self._maybe_build(self.inputs)\n        else:\n            raise ValueError('You tried to call `count_params` on ' + self.name + \", but the layer isn't built. You can build it manually via: `\" + self.name + '.build(batch_input_shape)`.')\n    return layer_utils.count_params(self.weights)",
    "docstring": "Count the total number of scalars composing the weights. Returns: An integer count. Raises: ValueError: if the layer isn't yet built (in which case its weights aren't yet defined).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py",
    "ast_data": "FunctionDef name:count_params arg:self arguments arg If If Call With Call Call Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_has_kwargs",
    "source_code": "def _has_kwargs(fn):\n    if isinstance(fn, functools.partial):\n        fn = fn.func\n    elif _is_callable_object(fn):\n        fn = fn.__call__\n    elif not callable(fn):\n        raise TypeError('fn should be a function-like object, but is of type {}.'.format(type(fn)))\n    return tf_inspect.getfullargspec(fn).varkw is not None",
    "docstring": "Returns whether the passed callable has **kwargs in its signature. Args: fn: Function, or function-like object (e.g., result of ). Returns: : if has **kwargs in its signature. Raises: : If fn is not a Function, or function-like object.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\legacy_tf_layers\\variable_scope_shim.py",
    "ast_data": "FunctionDef name:_has_kwargs arg:fn arguments arg If Call Assign If Call Assign If Call Raise Call Call Call Return return:yes Compare Call"
  },
  {
    "library": "scipy",
    "name": "reinforce_box_boundaries",
    "source_code": "def reinforce_box_boundaries(x, lb, ub):\n    return np.minimum(np.maximum(x, lb), ub)",
    "docstring": "Return clipped value of x",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_trustregion_constr\\qp_subproblem.py",
    "ast_data": "FunctionDef name:reinforce_box_boundaries arg:x arg:lb arg:ub arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "from_reference",
    "source_code": "@classmethod\ndef from_reference(cls, ref_qlinear, output_scale, output_zero_point):\n    qlinear = cls(ref_qlinear.in_features, ref_qlinear.out_features)\n    qweight = ref_qlinear.get_quantized_weight()\n    qlinear.set_weight_bias(qweight, ref_qlinear.bias)\n    qlinear.scale = float(output_scale)\n    qlinear.zero_point = int(output_zero_point)\n    return qlinear",
    "docstring": "Create a (fbgemm/qnnpack) quantized module from a reference quantized module Args: ref_qlinear (Module): a reference quantized linear module, either produced by torch.ao.quantization utilities or provided by the user output_scale (float): scale for output Tensor output_zero_point (int): zero point for output Tensor",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\modules\\linear.py",
    "ast_data": "FunctionDef name:from_reference arg:cls arg:ref_qlinear arg:output_scale arg:output_zero_point arguments arg arg arg arg Assign Call Assign Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_last_lr",
    "source_code": "def get_last_lr(self) -> list[float]:\n    return self._last_lr",
    "docstring": "Return last computed learning rate by current scheduler.",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\lr_scheduler.py",
    "ast_data": "FunctionDef name:get_last_lr arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "back_prop",
    "source_code": "@property\ndef back_prop(self):\n    if self.GetWhileContext():\n        return self.GetWhileContext().back_prop\n    return False",
    "docstring": "Forwards to the enclosing while context, if any.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_replication.py",
    "ast_data": "FunctionDef name:back_prop arg:self arguments arg If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "set_output",
    "source_code": "def set_output(self, *, transform=None):\n    for _, _, step in self._iter():\n        _safe_set_output(step, transform=transform)\n    return self",
    "docstring": "Set the output container when and are called. Calling will set the output of all estimators in . Parameters ---------- transform : {\"default\", \"pandas\", \"polars\"}, default=None Configure output of and . - : Default output format of a transformer - : DataFrame output - : Polars output - : Transform configuration is unchanged .. versionadded:: 1.4 option was added. Returns ------- self : estimator instance Estimator instance.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\pipeline.py",
    "ast_data": "FunctionDef name:set_output arg:self arguments arg arg For Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_most_frequent",
    "source_code": "def _most_frequent(array, extra_value, n_repeat):\n    if array.size > 0:\n        if array.dtype == object:\n            counter = Counter(array)\n            most_frequent_count = counter.most_common(1)[0][1]\n            most_frequent_value = min((value for value, count in counter.items() if count == most_frequent_count))\n        else:\n            mode = _mode(array)\n            most_frequent_value = mode[0][0]\n            most_frequent_count = mode[1][0]\n    else:\n        most_frequent_value = 0\n        most_frequent_count = 0\n    if most_frequent_count == 0 and n_repeat == 0:\n        return np.nan\n    elif most_frequent_count < n_repeat:\n        return extra_value\n    elif most_frequent_count > n_repeat:\n        return most_frequent_value\n    elif most_frequent_count == n_repeat:\n        return min(most_frequent_value, extra_value)",
    "docstring": "Compute the most frequent value in a 1d array extended with [extra_value] * n_repeat, where extra_value is assumed to be not part of the array.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\impute\\_base.py",
    "ast_data": "FunctionDef name:_most_frequent arg:array arg:extra_value arg:n_repeat arguments arg arg arg If Compare If Compare Assign Call Assign Call Assign Call Call Compare Assign Call Assign Assign Assign Assign If BoolOp Compare Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_is_constant_feature",
    "source_code": "def _is_constant_feature(var, mean, n_samples):\n    eps = np.finfo(np.float64).eps\n    upper_bound = n_samples * eps * var + (n_samples * mean * eps) ** 2\n    return var <= upper_bound",
    "docstring": "Detect if a feature is indistinguishable from a constant feature. The detection is based on its computed variance and on the theoretical error bounds of the '2 pass algorithm' for variance computation. See \"Algorithms for computing the sample variance: analysis and recommendations\", by Chan, Golub, and LeVeque.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py",
    "ast_data": "FunctionDef name:_is_constant_feature arg:var arg:mean arg:n_samples arguments arg arg arg Assign Call Assign Return return:yes Compare"
  },
  {
    "library": "django",
    "name": "patch_vary_headers",
    "source_code": "def patch_vary_headers(response, newheaders):\n    if response.has_header('Vary'):\n        vary_headers = cc_delim_re.split(response.headers['Vary'])\n    else:\n        vary_headers = []\n    existing_headers = {header.lower() for header in vary_headers}\n    additional_headers = [newheader for newheader in newheaders if newheader.lower() not in existing_headers]\n    vary_headers += additional_headers\n    if '*' in vary_headers:\n        response.headers['Vary'] = '*'\n    else:\n        response.headers['Vary'] = ', '.join(vary_headers)",
    "docstring": "Add (or update) the \"Vary\" header in the given HttpResponse object. newheaders is a list of header names that should be in \"Vary\". If headers contains an asterisk, then \"Vary\" header will consist of a single asterisk '*'. Otherwise, existing headers in \"Vary\" aren't removed.",
    "type": "function",
    "file_path": "django\\django\\utils\\cache.py",
    "ast_data": "FunctionDef name:patch_vary_headers arg:response arg:newheaders arguments arg arg If Call Assign Call Assign Assign Call Assign Compare Call If Compare Assign Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_subscribe_new",
    "source_code": "def _subscribe_new(tensor, side_effects, control_cache):\n    update_input = []\n    for consumer_op in list(tensor.consumers()):\n        update_input.append((consumer_op, list(consumer_op.inputs).index(tensor)))\n    update_control_input = control_cache.get_control_outputs(tensor.op)\n    name_scope = tensor.op.name + '/subscription/'\n    with ops.name_scope(name_scope):\n        outs = []\n        for s in side_effects:\n            outs += s(tensor)\n        with ops.control_dependencies(outs):\n            out = array_ops.identity(tensor)\n    for consumer_op, index in update_input:\n        consumer_op._update_input(index, out)\n    for consumer_op in update_control_input:\n        new_control_inputs = consumer_op.control_inputs\n        if tensor.op in new_control_inputs:\n            new_control_inputs.remove(tensor.op)\n        new_control_inputs.append(out.op)\n        consumer_op._remove_all_control_inputs()\n        consumer_op._add_control_inputs(new_control_inputs)\n    return out",
    "docstring": "Helper method that subscribes a single tensor to a list of side_effects. Args: tensor: side_effects: List of side_effect functions see subscribe for details. control_cache: helper to get control_outputs faster. Returns: The modified replacement to the passed in tensor which triggers the side effects.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\subscribe.py",
    "ast_data": "FunctionDef name:_subscribe_new arg:tensor arg:side_effects arg:control_cache arguments arg arg arg Assign For Call Call Call Call Call Assign Call Assign With Call Assign For Call With Call Assign Call For Call For Assign If Compare Call Call Call Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "JSCallable",
    "source_code": "class JSCallable(JSObject):\n    has_arguments = True\n    doc_field_types = [TypedField('arguments', label=_('Arguments'), names=('argument', 'arg', 'parameter', 'param'), typerolename='func', typenames=('paramtype', 'type')), GroupedField('errors', label=_('Throws'), rolename='func', names=('throws',), can_collapse=True), Field('returnvalue', label=_('Returns'), has_arg=False, names=('returns', 'return')), Field('returntype', label=_('Return type'), has_arg=False, names=('rtype',))]",
    "docstring": "Description of a JavaScript function, method or constructor.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\domains\\javascript.py",
    "ast_data": "ClassDef name:JSCallable Assign Assign Call Call Call Call Call Call Call Call"
  },
  {
    "library": "cherrypy",
    "name": "_file_for_module",
    "source_code": "@classmethod\ndef _file_for_module(cls, module):\n    return cls._archive_for_zip_module(module) or cls._file_for_file_module(module)",
    "docstring": "Return the relevant file for the module.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\plugins.py",
    "ast_data": "FunctionDef name:_file_for_module arg:cls arg:module arguments arg arg Return return:yes BoolOp Call Call"
  },
  {
    "library": "numpy",
    "name": "_zip_descr",
    "source_code": "def _zip_descr(seqarrays, flatten=False):\n    return _zip_dtype(seqarrays, flatten=flatten).descr",
    "docstring": "Combine the dtype description of a series of arrays. Parameters ---------- seqarrays : sequence of arrays Sequence of arrays flatten : {boolean}, optional Whether to collapse nested descriptions.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\recfunctions.py",
    "ast_data": "FunctionDef name:_zip_descr arg:seqarrays arg:flatten arguments arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "skip",
    "source_code": "def skip(self, params):\n    return False",
    "docstring": "Return True if the benchmark should be skipped for these params",
    "type": "method",
    "file_path": "scikit-learn\\asv_benchmarks\\benchmarks\\common.py",
    "ast_data": "FunctionDef name:skip arg:self arg:params arguments arg arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "f",
    "source_code": "@property\ndef f(self) -> np.ndarray:\n    if self.fft_mode in {'onesided', 'onesided2X'}:\n        return fft_lib.rfftfreq(self.mfft, self.T)\n    elif self.fft_mode == 'twosided':\n        return fft_lib.fftfreq(self.mfft, self.T)\n    elif self.fft_mode == 'centered':\n        return fft_lib.fftshift(fft_lib.fftfreq(self.mfft, self.T))\n    fft_modes = get_args(FFT_MODE_TYPE)\n    raise RuntimeError(f'self.fft_mode={self.fft_mode!r} not in {fft_modes}!')",
    "docstring": "Frequencies values of the STFT. A 1d array of length with spaced entries is returned. See Also -------- delta_f: Width of the frequency bins of the STFT. f_pts: Number of points along the frequency axis. mfft: Length of the input for FFT used. ShortTimeFFT: Class this property belongs to.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_short_time_fft.py",
    "ast_data": "FunctionDef name:f arg:self arguments arg If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call Call Assign Call Raise Call"
  },
  {
    "library": "seaborn",
    "name": "_auto_ticks",
    "source_code": "def _auto_ticks(self, ax, labels, axis):\n    transform = ax.figure.dpi_scale_trans.inverted()\n    bbox = ax.get_window_extent().transformed(transform)\n    size = [bbox.width, bbox.height][axis]\n    axis = [ax.xaxis, ax.yaxis][axis]\n    tick, = axis.set_ticks([0])\n    fontsize = tick.label1.get_size()\n    max_ticks = int(size // (fontsize / 72))\n    if max_ticks < 1:\n        return ([], [])\n    tick_every = len(labels) // max_ticks + 1\n    tick_every = 1 if tick_every == 0 else tick_every\n    ticks, labels = self._skip_ticks(labels, tick_every)\n    return (ticks, labels)",
    "docstring": "Determine ticks and ticklabels that minimize overlap.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\matrix.py",
    "ast_data": "FunctionDef name:_auto_ticks arg:self arg:ax arg:labels arg:axis arguments arg arg arg arg Assign Call Assign Call Call Assign Assign Assign Call Assign Call Assign Call If Compare Return return:no Assign Call Assign Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_create_checkpoint_manager",
    "source_code": "def _maybe_create_checkpoint_manager(self):\n    if isinstance(self._checkpoint_or_checkpoint_manager, checkpoint_management.CheckpointManager):\n        self._read_checkpoint_manager = self._checkpoint_or_checkpoint_manager\n        self._write_checkpoint_manager = self._checkpoint_or_checkpoint_manager\n        self._api_made_checkpoint_manager = False\n    else:\n        self._api_made_checkpoint_manager = True\n        self._read_checkpoint_manager = checkpoint_management.CheckpointManager(self._checkpoint_or_checkpoint_manager, directory=self._checkpoint_dir, max_to_keep=1)\n        if self._is_chief:\n            self._write_checkpoint_manager = self._read_checkpoint_manager\n        else:\n            self._write_checkpoint_manager = checkpoint_management.CheckpointManager(self._checkpoint_or_checkpoint_manager, _non_chief_checkpoint_dir(self._checkpoint_dir, self._cluster_resolver.task_id), max_to_keep=1)",
    "docstring": "Create CheckpointManager(s) if a checkpoint is passed else take it.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\failure_handling\\failure_handling.py",
    "ast_data": "FunctionDef name:_maybe_create_checkpoint_manager arg:self arguments arg If Call Assign Assign Assign Assign Assign Call If Assign Assign Call Call"
  },
  {
    "library": "scipy",
    "name": "_cumulatively_sum_simpson_integrals",
    "source_code": "def _cumulatively_sum_simpson_integrals(y: np.ndarray, dx: np.ndarray, integration_func: Callable[[np.ndarray, np.ndarray], np.ndarray]) -> np.ndarray:\n    sub_integrals_h1 = integration_func(y, dx)\n    sub_integrals_h2 = integration_func(y[..., ::-1], dx[..., ::-1])[..., ::-1]\n    shape = list(sub_integrals_h1.shape)\n    shape[-1] += 1\n    sub_integrals = np.empty(shape)\n    sub_integrals[..., :-1:2] = sub_integrals_h1[..., ::2]\n    sub_integrals[..., 1::2] = sub_integrals_h2[..., ::2]\n    sub_integrals[..., -1] = sub_integrals_h2[..., -1]\n    res = np.cumsum(sub_integrals, axis=-1)\n    return res",
    "docstring": "Calculate cumulative sum of Simpson integrals. Takes as input the integration function to be used. The integration_func is assumed to return the cumulative sum using composite Simpson's rule. Assumes the axis of summation is -1.",
    "type": "function",
    "file_path": "scipy\\scipy\\integrate\\_quadrature.py",
    "ast_data": "FunctionDef name:_cumulatively_sum_simpson_integrals arg:y arg:dx arg:integration_func arguments arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_shape",
    "source_code": "def get_shape(self) -> tensor_shape.TensorShape:\n    return self._dense_shape_default",
    "docstring": "Get the representing the shape of the dense tensor. Returns: A object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\sparse_tensor.py",
    "ast_data": "FunctionDef name:get_shape arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "NeedleEye",
    "source_code": "class NeedleEye(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n        self.global_optimum = [[0.0 for _ in range(self.N)]]\n        self.fglob = 1.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        f = fp = 0.0\n        eye = 0.0001\n        for val in x:\n            if abs(val) >= eye:\n                fp = 1.0\n                f += 100.0 + abs(val)\n            else:\n                f += 1.0\n        if fp < 1e-06:\n            f = f / self.N\n        return f",
    "docstring": "NeedleEye objective function. This class defines the Needle-Eye [1]_ global optimization problem. This is a a multimodal minimization problem defined as follows: .. math:: f_{\\text{NeedleEye}}(x) = \\begin{cases} 1 & \\textrm{if }\\hspace{5pt} \\lvert x_i \\rvert eye \\\\ 0 & \\textrm{otherwise}\\\\ \\end{cases} Where, in this exercise, :math:. Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_N.py",
    "ast_data": "ClassDef name:NeedleEye Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Assign For If Compare Call Assign Call If Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "replicate",
    "source_code": "@classmethod\ndef replicate(cls):\n    return Sharding(proto=xla_data_pb2.OpSharding(type=xla_data_pb2.OpSharding.REPLICATED))",
    "docstring": "Returns a replicated sharding attribute. This causes an op to be computed in its entirety independently on all cores in the XLA device.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\xla\\experimental\\xla_sharding.py",
    "ast_data": "FunctionDef name:replicate arg:cls arguments arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_scale_norm",
    "source_code": "def _scale_norm(self, norm, vmin, vmax, A):\n    if vmin is not None or vmax is not None:\n        self.set_clim(vmin, vmax)\n        if isinstance(norm, colors.Normalize):\n            raise ValueError('Passing a Normalize instance simultaneously with vmin/vmax is not supported.  Please pass vmin/vmax directly to the norm when creating it.')\n    self.autoscale_None(A)",
    "docstring": "Helper for initial scaling. Used by public functions that create a ScalarMappable and support parameters *vmin*, *vmax* and *norm*. This makes sure that a *norm* will take precedence over *vmin*, *vmax*. Note that this method does not set the norm.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colorizer.py",
    "ast_data": "FunctionDef name:_scale_norm arg:self arg:norm arg:vmin arg:vmax arg:A arguments arg arg arg arg arg If BoolOp Compare Compare Call If Call Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "str_signature",
    "source_code": "def str_signature(sig):\n    return ', '.join((cls.__name__ for cls in sig))",
    "docstring": "String representation of type signature >>> str_signature((int, float)) 'int, float'",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\unification\\multipledispatch\\dispatcher.py",
    "ast_data": "FunctionDef name:str_signature arg:sig arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_frame_on",
    "source_code": "def set_frame_on(self, b):\n    self.legendPatch.set_visible(b)\n    self.stale = True",
    "docstring": "Set whether the legend box patch is drawn. Parameters ---------- b : bool",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\legend.py",
    "ast_data": "FunctionDef name:set_frame_on arg:self arg:b arguments arg arg Call Assign"
  },
  {
    "library": "pytorch",
    "name": "get_size_hint",
    "source_code": "def get_size_hint(self) -> int:\n    raise NotImplementedError",
    "docstring": "Number of bytes used for example inputs",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\memory_planning.py",
    "ast_data": "FunctionDef name:get_size_hint arg:self arguments arg Raise"
  },
  {
    "library": "scipy",
    "name": "_compute_factors",
    "source_code": "def _compute_factors(roots, multiplicity, include_powers=False):\n    current = np.array([1])\n    suffixes = [current]\n    for pole, mult in zip(roots[-1:0:-1], multiplicity[-1:0:-1]):\n        monomial = np.array([1, -pole])\n        for _ in range(mult):\n            current = np.polymul(current, monomial)\n        suffixes.append(current)\n    suffixes = suffixes[::-1]\n    factors = []\n    current = np.array([1])\n    for pole, mult, suffix in zip(roots, multiplicity, suffixes):\n        monomial = np.array([1, -pole])\n        block = []\n        for i in range(mult):\n            if i == 0 or include_powers:\n                block.append(np.polymul(current, suffix))\n            current = np.polymul(current, monomial)\n        factors.extend(reversed(block))\n    return (factors, current)",
    "docstring": "Compute the total polynomial divided by factors for each root.",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_signaltools.py",
    "ast_data": "FunctionDef name:_compute_factors arg:roots arg:multiplicity arg:include_powers arguments arg arg arg Assign Call Assign For Call Assign Call For Call Assign Call Call Assign Assign Assign Call For Call Assign Call Assign For Call If BoolOp Compare Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "triton_acc_type",
    "source_code": "def triton_acc_type(dtype: torch.dtype) -> str:\n    return triton_compute_type(upcast_acc_dtype(dtype))",
    "docstring": "Convert torch.dtype to triton type, with reduction upcasts",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py",
    "ast_data": "FunctionDef name:triton_acc_type arg:dtype arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "canonical_name",
    "source_code": "def canonical_name(device):\n    if device is None:\n        return ''\n    if is_device_spec(device):\n        return device.to_string()\n    else:\n        device = DeviceSpec.from_string(device)\n        return device.to_string()",
    "docstring": "Returns a canonical name for the given or device name.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\device.py",
    "ast_data": "FunctionDef name:canonical_name arg:device arguments arg If Compare Return return:yes If Call Return return:yes Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_TPUPollingThread",
    "source_code": "class _TPUPollingThread(threading.Thread):\n\n    def __init__(self, cluster, session):\n        super(_TPUPollingThread, self).__init__()\n        self.daemon = True\n        self._running = True\n        self._session_closed = False\n        self._cluster = cluster\n        self._session = session\n        self._interval = 30\n        for name in ['googleapiclient.discovery', 'oauth2client.client']:\n            _logging.getLogger(name).setLevel(_logging.WARNING)\n\n    def stop(self):\n        self._running = False\n        self._session_closed = True\n        self.join()\n\n    def run(self):\n        if not tpu_cluster_resolver.is_running_in_gce():\n            logging.warning('TPUPollingThread is running in a non-GCE environment, exiting...')\n            self._running = False\n            return\n        while self._running:\n            recoverable = self._cluster._cloud_tpu_client.recoverable()\n            if not recoverable:\n                logging.warning('TPUPollingThread found TPU %s in state %s', self._cluster._tpu, self._cluster._cloud_tpu_client.state())\n                os._exit(1)\n            time.sleep(self._interval)",
    "docstring": "A thread that polls the state of a TPU node. When the node transitions into a TERMINAL state (PREEMPTED, TERMINATED) that's considered as not recoverable by the underlying infrastructure, it attempts to close the session, and exits the entire process if the session.close() stucks.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\preempted_hook.py",
    "ast_data": "ClassDef name:_TPUPollingThread FunctionDef name:__init__ arg:self arg:cluster arg:session arguments arg arg arg Call Call Assign Assign Assign Assign Assign Assign For Call Call FunctionDef name:stop arg:self arguments arg Assign Assign Call FunctionDef name:run arg:self arguments arg If Call Call Assign Return return:no While Assign Call If Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_normalize_docstring",
    "source_code": "def _normalize_docstring(docstring):\n    if not docstring:\n        return ''\n    lines = docstring.expandtabs().splitlines()\n    indent = sys.maxsize\n    for line in lines[1:]:\n        stripped = line.lstrip()\n        if stripped:\n            indent = min(indent, len(line) - len(stripped))\n    trimmed = [lines[0].strip()]\n    if indent < sys.maxsize:\n        for line in lines[1:]:\n            trimmed.append(line[indent:].rstrip())\n    while trimmed and (not trimmed[-1]):\n        trimmed.pop()\n    while trimmed and (not trimmed[0]):\n        trimmed.pop(0)\n    return '\\n'.join(trimmed)",
    "docstring": "Normalizes the docstring. Replaces tabs with spaces, removes leading and trailing blanks lines, and removes any indentation. Copied from PEP-257: Args: docstring: the docstring to normalize Returns: The normalized docstring",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\decorator_utils.py",
    "ast_data": "FunctionDef name:_normalize_docstring arg:docstring arguments arg If Return return:yes Assign Call Call Assign For Assign Call If Assign Call Call Call Assign Call If Compare For Call Call While BoolOp Call While BoolOp Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "on_run_start",
    "source_code": "def on_run_start(self, request):\n    debug_urls, watch_opts = self._prepare_run_watch_config(request.fetches, request.feed_dict)\n    return OnRunStartResponse(OnRunStartAction.DEBUG_RUN, debug_urls, debug_ops=watch_opts.debug_ops, node_name_regex_allowlist=watch_opts.node_name_regex_allowlist, op_type_regex_allowlist=watch_opts.op_type_regex_allowlist, tensor_dtype_regex_allowlist=watch_opts.tensor_dtype_regex_allowlist, tolerate_debug_op_creation_failures=watch_opts.tolerate_debug_op_creation_failures)",
    "docstring": "See doc of BaseDebugWrapperSession.on_run_start.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\framework.py",
    "ast_data": "FunctionDef name:on_run_start arg:self arg:request arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "virtualenv",
    "name": "get_env_var",
    "source_code": "def get_env_var(key, as_type, env):\n    environ_key = f'VIRTUALENV_{key.upper()}'\n    if env.get(environ_key):\n        value = env[environ_key]\n        with suppress(Exception):\n            source = f'env var {environ_key}'\n            as_type = convert(value, as_type, source)\n            return (as_type, source)\n    return None",
    "docstring": "Get the environment variable option. :param key: the config key requested :param as_type: the type we would like to convert it to :param env: environment variables to use :return:",
    "type": "function",
    "file_path": "virtualenv\\src\\virtualenv\\config\\env_var.py",
    "ast_data": "FunctionDef name:get_env_var arg:key arg:as_type arg:env arguments arg arg arg Assign Call If Call Assign With Call Assign Assign Call Return return:yes Return return:no"
  },
  {
    "library": "scipy",
    "name": "_permutation_distribution_t",
    "source_code": "def _permutation_distribution_t(data, permutations, size_a, equal_var, random_state=None):\n    random_state = check_random_state(random_state)\n    size = data.shape[-1]\n    n_max = special.comb(size, size_a)\n    if permutations < n_max:\n        perm_generator = (random_state.permutation(size) for i in range(permutations))\n    else:\n        permutations = n_max\n        perm_generator = (np.concatenate(z) for z in _all_partitions(size_a, size - size_a))\n    t_stat = []\n    for indices in _batch_generator(perm_generator, batch=50):\n        indices = np.array(indices)\n        data_perm = data[..., indices]\n        data_perm = np.moveaxis(data_perm, -2, 0)\n        a = data_perm[..., :size_a]\n        b = data_perm[..., size_a:]\n        t_stat.append(_calc_t_stat(a, b, equal_var))\n    t_stat = np.concatenate(t_stat, axis=0)\n    return (t_stat, permutations, n_max)",
    "docstring": "Generation permutation distribution of t statistic",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_stats_py.py",
    "ast_data": "FunctionDef name:_permutation_distribution_t arg:data arg:permutations arg:size_a arg:equal_var arg:random_state arguments arg arg arg arg arg Assign Call Assign Assign Call If Compare Assign Call Call Assign Assign Call Call Assign For Call Assign Call Assign Assign Call Assign Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "solve_toeplitz",
    "source_code": "def solve_toeplitz(c_or_cr, b, check_finite=True):\n    c, r = c_or_cr if isinstance(c_or_cr, tuple) else (c_or_cr, np.conjugate(c_or_cr))\n    return _solve_toeplitz(c, r, b, check_finite)",
    "docstring": "Solve the equation `c_or_crsolve_toeplitzbb`. >>> T = toeplitz(c, r) >>> T.dot(x) array([ 1., 2., 2., 5.])",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_basic.py",
    "ast_data": "FunctionDef name:solve_toeplitz arg:c_or_cr arg:b arg:check_finite arguments arg arg arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_Merge",
    "source_code": "class _Merge(_Node):\n\n    def convert_variable_to_constant(self, incoming_edge, tensor_data):\n        super(_Merge, self).convert_variable_to_constant(_Edge(incoming_edge.source, _Edge(incoming_edge.destination.convertible, 0)), tensor_data)",
    "docstring": "Specialization of _Node to Merge ops.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "ClassDef name:_Merge FunctionDef name:convert_variable_to_constant arg:self arg:incoming_edge arg:tensor_data arguments arg arg arg Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "name",
    "source_code": "@property\ndef name(self):\n    return self._name",
    "docstring": "Returns the name given to this Template.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\template.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "sparse_reduce_sum_sparse",
    "source_code": "@tf_export(v1=['sparse.reduce_sum_sparse', 'sparse_reduce_sum_sparse'])\n@deprecation.deprecated_endpoints('sparse_reduce_sum_sparse')\n@deprecation.deprecated_args(None, 'keep_dims is deprecated, use keepdims instead', 'keep_dims')\ndef sparse_reduce_sum_sparse(sp_input, axis=None, keepdims=None, reduction_axes=None, keep_dims=None):\n    keepdims = deprecation.deprecated_argument_lookup('keepdims', keepdims, 'keep_dims', keep_dims)\n    axis = deprecation.deprecated_argument_lookup('axis', axis, 'reduction_axes', reduction_axes)\n    if keepdims is None:\n        keepdims = False\n    output_ind, output_val, output_shape = gen_sparse_ops.sparse_reduce_sum_sparse(sp_input.indices, sp_input.values, sp_input.dense_shape, math_ops._ReductionDims(sp_input, axis), keepdims)\n    return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)",
    "docstring": "Computes the sum of elements across dimensions of a SparseTensor. This Op takes a SparseTensor and is the sparse counterpart to . In contrast to SparseReduceSum, this Op returns a SparseTensor. Note: A gradient is not defined for this function, so it can't be used in training models that need gradient descent. Reduces along the dimensions given in . Unless is true, the rank of the tensor is reduced by 1 for each entry in . If is true, the reduced dimensions are retained with length 1. If has no entries, all dimensions are reduced, and a tensor with a single element is returned. Additionally, the axes can be negative, which are interpreted according to the indexing rules in Python. Args: sp_input: The SparseTensor to reduce. Should have numeric type. axis: The dimensions to reduce; list or scalar. If (the default), reduces all dimensions. keepdims: If true, retain reduced dimensions with length 1. reduction_axes: Deprecated name of axis. keep_dims: Deprecated alias for . Returns: The reduced SparseTensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_ops.py",
    "ast_data": "FunctionDef name:sparse_reduce_sum_sparse arg:sp_input arg:axis arg:keepdims arg:reduction_axes arg:keep_dims arguments arg arg arg arg arg Assign Call Assign Call If Compare Assign Assign Call Call Return return:yes Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "hardsigmoid",
    "source_code": "def hardsigmoid(input: Tensor, inplace: bool=False) -> Tensor:\n    if has_torch_function_unary(input):\n        return handle_torch_function(hardsigmoid, (input,), input, inplace=inplace)\n    if inplace:\n        return torch._C._nn.hardsigmoid_(input)\n    return torch._C._nn.hardsigmoid(input)",
    "docstring": "Apply the Hardsigmoid function element-wise. .. math:: \\text{Hardsigmoid}(x) = \\begin{cases} 0 & \\text{if~} x \\le -3, \\\\ 1 & \\text{if~} x \\ge +3, \\\\ x / 6 + 1 / 2 & \\text{otherwise} \\end{cases} Args: inplace: If set to `~torch.nn.Hardsigmoid` for more details.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:hardsigmoid arg:input arg:inplace arguments arg arg If Call Return return:yes Call If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "check_aliasing_constraint",
    "source_code": "def check_aliasing_constraint(name, prev, result, get_module=lambda: '???'):\n    storages = {id(t.untyped_storage()) for t in prev if isinstance(t, torch.Tensor)}\n    tuple_result = result\n    if not isinstance(result, tuple):\n        tuple_result = (result,)\n    for tensor in iter_tensors(tuple_result, {}):\n        key = id(tensor.untyped_storage())\n        if id(tensor.untyped_storage()) in storages:\n            raise RuntimeError(f'{name} (with implementation in {get_module()}): The output of this custom operator (1) must not also be an input to this custom operator and (2) may not alias any inputs to this custom operator or other returns. The most common way to trigger this error is if we have y = custom_op(x) and y and x are the same Tensor. Please instead return a clone of the offending output tensor(s) (e.g. return x.clone()) or refactor the custom operator to not return y.')\n        storages.add(key)",
    "docstring": "custom operators' outputs must not alias any inputs or other outputs.",
    "type": "function",
    "file_path": "pytorch\\torch\\_library\\utils.py",
    "ast_data": "FunctionDef name:check_aliasing_constraint arg:name arg:prev arg:result arg:get_module arguments arg arg arg arg arguments Assign Call Call Call Assign If Call Assign For Call Assign Call Call If Compare Call Call Raise Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "sequence_categorical_column_with_vocabulary_list",
    "source_code": "@doc_controls.header(_FEATURE_COLUMN_DEPRECATION_WARNING)\n@tf_export('feature_column.sequence_categorical_column_with_vocabulary_list')\n@deprecation.deprecated(None, _FEATURE_COLUMN_DEPRECATION_RUNTIME_WARNING)\ndef sequence_categorical_column_with_vocabulary_list(key, vocabulary_list, dtype=None, default_value=-1, num_oov_buckets=0):\n    return fc.SequenceCategoricalColumn(fc.categorical_column_with_vocabulary_list(key=key, vocabulary_list=vocabulary_list, dtype=dtype, default_value=default_value, num_oov_buckets=num_oov_buckets))",
    "docstring": "A sequence of categorical terms where ids use an in-memory list. Pass this to or to convert sequence categorical data into dense representation for input to sequence NN, such as RNN. Example: Args: key: A unique string identifying the input feature. vocabulary_list: An ordered iterable defining the vocabulary. Each feature is mapped to the index of its value (if present) in . Must be castable to . dtype: The type of features. Only string and integer types are supported. If , it will be inferred from . default_value: The integer ID value to return for out-of-vocabulary feature values, defaults to . This can not be specified with a positive . num_oov_buckets: Non-negative integer, the number of out-of-vocabulary buckets. All out-of-vocabulary inputs will be assigned IDs in the range based on a hash of the input value. A positive can not be specified with . Returns: A . Raises: ValueError: if is empty, or contains duplicate keys. ValueError: is a negative integer. ValueError: and are both specified. ValueError: if is not integer or string.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\sequence_feature_column.py",
    "ast_data": "FunctionDef name:sequence_categorical_column_with_vocabulary_list arg:key arg:vocabulary_list arg:dtype arg:default_value arg:num_oov_buckets arguments arg arg arg arg arg Return return:yes Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "impl_factory",
    "source_code": "def impl_factory(self) -> typing.Callable:\n\n    def inner(f):\n        self._register_impl('factory', f)\n        library.impl(self._lib, self._opname, 'BackendSelect')(f)\n        return f\n    return inner",
    "docstring": "Register an implementation for a factory function.",
    "type": "method",
    "file_path": "pytorch\\torch\\_custom_op\\impl.py",
    "ast_data": "FunctionDef name:impl_factory arg:self arguments arg FunctionDef name:inner arg:f arguments arg Call Call Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_global_step_read",
    "source_code": "def _get_global_step_read(graph=None):\n    graph = graph or ops.get_default_graph()\n    global_step_read_tensors = graph.get_collection(GLOBAL_STEP_READ_KEY)\n    if len(global_step_read_tensors) > 1:\n        raise RuntimeError('There are multiple items in collection {}. There should be only one.'.format(GLOBAL_STEP_READ_KEY))\n    if len(global_step_read_tensors) == 1:\n        return global_step_read_tensors[0]\n    return None",
    "docstring": "Gets global step read tensor in graph. Args: graph: The graph in which to create the global step read tensor. If missing, use default graph. Returns: Global step read tensor. Raises: RuntimeError: if multiple items found in collection GLOBAL_STEP_READ_KEY.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\training_util.py",
    "ast_data": "FunctionDef name:_get_global_step_read arg:graph arguments arg Assign BoolOp Call Assign Call If Compare Call Raise Call Call If Compare Call Return return:yes Return return:no"
  },
  {
    "library": "pandas",
    "name": "_transform_general",
    "source_code": "def _transform_general(self, func: Callable, engine, engine_kwargs, *args, **kwargs) -> Series:\n    if maybe_use_numba(engine):\n        return self._transform_with_numba(func, *args, engine_kwargs=engine_kwargs, **kwargs)\n    assert callable(func)\n    klass = type(self.obj)\n    results = []\n    for name, group in self._grouper.get_iterator(self._obj_with_exclusions):\n        object.__setattr__(group, 'name', name)\n        res = func(group, *args, **kwargs)\n        results.append(klass(res, index=group.index))\n    if results:\n        from pandas.core.reshape.concat import concat\n        concatenated = concat(results, ignore_index=True)\n        result = self._set_result_index_ordered(concatenated)\n    else:\n        result = self.obj._constructor(dtype=np.float64)\n    result.name = self.obj.name\n    return result",
    "docstring": "Transform with a callable .",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\groupby\\generic.py",
    "ast_data": "FunctionDef name:_transform_general arg:self arg:func arg:engine arg:engine_kwargs arguments arg arg arg arg arg arg If Call Return return:yes Call Call Assign Call Assign For Call Call Assign Call Call Call If Assign Call Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "record_pre_forward",
    "source_code": "def record_pre_forward(self, handle: Optional[FlatParamHandle], is_training: bool) -> None:\n    if not handle:\n        return\n    self._check_order(handle, is_training)\n    if not self.is_first_iter or handle._pre_forward_order_index is not None:\n        return\n    index = len(self.handles_pre_forward_order)\n    handle._pre_forward_order_index = index\n    self.handles_pre_forward_order.append(handle)",
    "docstring": "Records `_check_order` for details.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_exec_order_utils.py",
    "ast_data": "FunctionDef name:record_pre_forward arg:self arg:handle arg:is_training arguments arg arg arg If Return return:no Call If BoolOp Compare Return return:no Assign Call Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "get_url",
    "source_code": "def get_url(self):\n    return self._url",
    "docstring": "Return a url if one is set, None otherwise.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:get_url arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "__getitem__",
    "source_code": "def __getitem__(self, key):\n    getitem = self._data.__getitem__\n    if is_integer(key) or is_float(key):\n        key = com.cast_scalar_indexer(key)\n        return getitem(key)\n    if isinstance(key, slice):\n        return self._getitem_slice(key)\n    if com.is_bool_indexer(key):\n        if isinstance(getattr(key, 'dtype', None), ExtensionDtype):\n            key = key.to_numpy(dtype=bool, na_value=False)\n        else:\n            key = np.asarray(key, dtype=bool)\n        if not isinstance(self.dtype, ExtensionDtype):\n            if len(key) == 0 and len(key) != len(self):\n                raise ValueError('The length of the boolean indexer cannot be 0 when the Index has length greater than 0.')\n    result = getitem(key)\n    if result.ndim > 1:\n        disallow_ndim_indexing(result)\n    return self._constructor._simple_new(result, name=self._name)",
    "docstring": "Override numpy.ndarray's __getitem__ method to work as desired. This function adds lists and Series as valid boolean indexers (ndarrays only supports ndarray with dtype=bool). If resulting ndim != 1, plain ndarray is returned instead of corresponding subclass.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:__getitem__ arg:self arg:key arguments arg arg Assign If BoolOp Call Call Assign Call Return return:yes Call If Call Return return:yes Call If Call If Call Call Assign Call Assign Call If Call If BoolOp Compare Call Compare Call Call Raise Call Assign Call If Compare Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_ymargin",
    "source_code": "def get_ymargin(self):\n    return self._ymargin",
    "docstring": "Retrieve autoscaling margin of the y-axis. .. versionadded:: 3.9 Returns ------- ymargin : float See Also -------- matplotlib.axes.Axes.set_ymargin",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:get_ymargin arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "function_protos",
    "source_code": "def function_protos(self):\n    return self._function_key_to_function.values()",
    "docstring": "Returns list of protos.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\pprof_profiler.py",
    "ast_data": "FunctionDef name:function_protos arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "grid",
    "source_code": "def grid(self, visible=None, which='major', **kwargs):\n    if kwargs:\n        if visible is None:\n            visible = True\n        elif not visible:\n            _api.warn_external('First parameter to grid() is false, but line properties are supplied. The grid will be enabled.')\n            visible = True\n    which = which.lower()\n    _api.check_in_list(['major', 'minor', 'both'], which=which)\n    gridkw = {f'grid_{name}': value for name, value in kwargs.items()}\n    if which in ['minor', 'both']:\n        gridkw['gridOn'] = not self._minor_tick_kw['gridOn'] if visible is None else visible\n        self.set_tick_params(which='minor', **gridkw)\n    if which in ['major', 'both']:\n        gridkw['gridOn'] = not self._major_tick_kw['gridOn'] if visible is None else visible\n        self.set_tick_params(which='major', **gridkw)\n    self.stale = True",
    "docstring": "Configure the grid lines. Parameters ---------- visible : bool or None Whether to show the grid lines. If any *kwargs* are supplied, it is assumed you want the grid on and *visible* will be set to True. If *visible* is *None* and there are no *kwargs*, this toggles the visibility of the lines. which : {'major', 'minor', 'both'} The grid lines to apply the changes on. **kwargs : properties Define the line properties of the grid, e.g.:: grid(color='r', linestyle='-', linewidth=2)",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:grid arg:self arg:visible arg:which arguments arg arg arg arg If If Compare Assign If Call Assign Assign Call Call Assign Call If Compare Assign Compare Call If Compare Assign Compare Call Assign"
  },
  {
    "library": "pytorch",
    "name": "TrieNode",
    "source_code": "class TrieNode:\n\n    def __init__(self):\n        self.children = {}",
    "docstring": "A Trie node whose children are represented as a directory of char: TrieNode. A special char '' represents end of word",
    "type": "class",
    "file_path": "pytorch\\torch\\utils\\hipify\\hipify_python.py",
    "ast_data": "ClassDef name:TrieNode FunctionDef name:__init__ arg:self arguments arg Assign"
  },
  {
    "library": "django",
    "name": "select_format",
    "source_code": "def select_format(self, compiler, sql, params):\n    if not compiler.query.subquery:\n        return (compiler.connection.ops.select % sql, params)\n    return (sql, params)",
    "docstring": "Return the selection format string, depending on the requirements of the spatial backend. For example, Oracle and MySQL require custom selection formats in order to retrieve geometries in OGC WKB.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\models\\fields.py",
    "ast_data": "FunctionDef name:select_format arg:self arg:compiler arg:sql arg:params arguments arg arg arg arg If Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_args",
    "source_code": "def _args(self):\n    return NotImplementedError('args needs to be implemented by subclass.')",
    "docstring": "Assemble list of encoder-specific command-line arguments.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\animation.py",
    "ast_data": "FunctionDef name:_args arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_or_create_debug_dir",
    "source_code": "def get_or_create_debug_dir(export_dir):\n    debug_dir = get_debug_dir(export_dir)\n    file_io.recursive_create_dir(debug_dir)\n    return debug_dir",
    "docstring": "Returns path to the debug sub-directory, creating if it does not exist.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\path_helpers.py",
    "ast_data": "FunctionDef name:get_or_create_debug_dir arg:export_dir arguments arg Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "duration_string",
    "source_code": "def duration_string(duration):\n    days, hours, minutes, seconds, microseconds = _get_duration_components(duration)\n    string = '{:02d}:{:02d}:{:02d}'.format(hours, minutes, seconds)\n    if days:\n        string = '{} '.format(days) + string\n    if microseconds:\n        string += '.{:06d}'.format(microseconds)\n    return string",
    "docstring": "Version of str(timedelta) which is not English specific.",
    "type": "function",
    "file_path": "django\\django\\utils\\duration.py",
    "ast_data": "FunctionDef name:duration_string arg:duration arguments arg Assign Call Assign Call If Assign Call If Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "__repr__",
    "source_code": "def __repr__(self) -> str:\n    return object.__repr__(self)",
    "docstring": "Return a string representation for a particular object.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\base.py",
    "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "__call__",
    "source_code": "def __call__(self, X, Y=None, eval_gradient=False):\n    X = np.atleast_2d(X)\n    if Y is None:\n        K = np.inner(X, X) + self.sigma_0 ** 2\n    else:\n        if eval_gradient:\n            raise ValueError('Gradient can only be evaluated when Y is None.')\n        K = np.inner(X, Y) + self.sigma_0 ** 2\n    if eval_gradient:\n        if not self.hyperparameter_sigma_0.fixed:\n            K_gradient = np.empty((K.shape[0], K.shape[1], 1))\n            K_gradient[..., 0] = 2 * self.sigma_0 ** 2\n            return (K, K_gradient)\n        else:\n            return (K, np.empty((X.shape[0], X.shape[0], 0)))\n    else:\n        return K",
    "docstring": "Return the kernel k(X, Y) and optionally its gradient. Parameters ---------- X : ndarray of shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : ndarray of shape (n_samples_Y, n_features), default=None Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradient : bool, default=False Determines whether the gradient with respect to the log of the kernel hyperparameter is computed. Only supported when Y is None. Returns ------- K : ndarray of shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), optional The gradient of the kernel k(X, X) with respect to the log of the hyperparameter of the kernel. Only returned when is True.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:X arg:Y arg:eval_gradient arguments arg arg arg arg Assign Call If Compare Assign Call If Raise Call Assign Call If If Assign Call Assign Return return:yes Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "scatter_nd_max",
    "source_code": "def scatter_nd_max(self, indices, updates, name=None):\n    return self._lazy_read(gen_state_ops.resource_scatter_nd_max(self.handle, indices, ops.convert_to_tensor(updates, self.dtype), name=name))",
    "docstring": "Updates this variable with the max of and itself. is a with rank and is a of rank . must be integer tensor, containing indices into . It must be shape where . The innermost dimension of (with length ) corresponds to indices into elements (if ) or slices (if ) along the th dimension of . is of rank with shape: See for more details about how to make updates to slices. Args: indices: The indices to be used in the operation. updates: The values to be used in the operation. name: the name of the operation. Returns: The updated variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:scatter_nd_max arg:self arg:indices arg:updates arg:name arguments arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_data",
    "source_code": "def get_data(self):\n    StairData = namedtuple('StairData', 'values edges baseline')\n    return StairData(self._values, self._edges, self._baseline)",
    "docstring": "Get values, edges and baseline as namedtuple.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_data arg:self arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "ContextlibContextManagerLocalGeneratorObjectVariable",
    "source_code": "class ContextlibContextManagerLocalGeneratorObjectVariable(LocalGeneratorObjectVariable):\n    pass",
    "docstring": ".. note:: This is only used when the function is annotated with @contextlib.contextmanager It is a special case of a generator function as we do not allow return a context manager from a torch.compile function.",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\functions.py",
    "ast_data": "ClassDef name:ContextlibContextManagerLocalGeneratorObjectVariable"
  },
  {
    "library": "pytorch",
    "name": "_LoweringSerializer",
    "source_code": "class _LoweringSerializer:\n    fallbacks: OrderedSet[str]\n\n    def __init__(self) -> None:\n        from . import lowering\n        self.fallbacks = OrderedSet((str(k) for k, v in lowering.lowerings.items() if _is_fallback_handler(v)))\n\n    def patch(self) -> _LoweringSerializerContextManager:\n        return _LoweringSerializerContextManager(self)",
    "docstring": "This handles the data for serializing lowering.lowering",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\compile_fx_ext.py",
    "ast_data": "ClassDef name:_LoweringSerializer FunctionDef name:__init__ arg:self arguments arg Assign Call Call Call Call FunctionDef name:patch arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "ResponseBody",
    "source_code": "class ResponseBody(object):\n    unicode_err = 'Page handlers MUST return bytes. Use tools.encode if you wish to return unicode.'\n\n    def __get__(self, obj, objclass=None):\n        if obj is None:\n            return self\n        else:\n            return obj._body\n\n    def __set__(self, obj, value):\n        if isinstance(value, str):\n            raise ValueError(self.unicode_err)\n        elif isinstance(value, list):\n            if any((isinstance(item, str) for item in value)):\n                raise ValueError(self.unicode_err)\n        obj._body = encoding.prepare_iter(value)",
    "docstring": "The body of the HTTP response (the response entity).",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\_cprequest.py",
    "ast_data": "ClassDef name:ResponseBody Assign FunctionDef name:__get__ arg:self arg:obj arg:objclass arguments arg arg arg If Compare Return return:yes Return return:yes FunctionDef name:__set__ arg:self arg:obj arg:value arguments arg arg arg If Call Raise Call If Call If Call Call Raise Call Assign Call"
  },
  {
    "library": "scipy",
    "name": "estimate_smallest_singular_value",
    "source_code": "def estimate_smallest_singular_value(U):\n    U = np.atleast_2d(U)\n    m, n = U.shape\n    if m != n:\n        raise ValueError('A square triangular matrix should be provided.')\n    p = np.zeros(n)\n    w = np.empty(n)\n    for k in range(n):\n        wp = (1 - p[k]) / U.T[k, k]\n        wm = (-1 - p[k]) / U.T[k, k]\n        pp = p[k + 1:] + U.T[k + 1:, k] * wp\n        pm = p[k + 1:] + U.T[k + 1:, k] * wm\n        if abs(wp) + norm(pp, 1) >= abs(wm) + norm(pm, 1):\n            w[k] = wp\n            p[k + 1:] = pp\n        else:\n            w[k] = wm\n            p[k + 1:] = pm\n    v = solve_triangular(U, w)\n    v_norm = norm(v)\n    w_norm = norm(w)\n    s_min = w_norm / v_norm\n    z_min = v / v_norm\n    return (s_min, z_min)",
    "docstring": "Given upper triangular matrix ``. The estimation will be better more ill-conditioned is the matrix. References ---------- .. [1] Cline, A. K., Moler, C. B., Stewart, G. W., Wilkinson, J. H. An estimate for the condition number of a matrix. 1979. SIAM Journal on Numerical Analysis, 16(2), 368-375.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_trustregion_exact.py",
    "ast_data": "FunctionDef name:estimate_smallest_singular_value arg:U arguments arg Assign Call Assign If Compare Raise Call Assign Call Assign Call For Call Assign Assign Assign Assign If Compare Call Call Call Call Assign Assign Assign Assign Assign Call Assign Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "maybe_realize",
    "source_code": "def maybe_realize(args: list[Optional[IRNode]]):\n    return tree_map(lambda x: realize_inputs(x) if x is not None and (not isinstance(x, sympy.Symbol)) else x, args)",
    "docstring": "Accepts a list of optional IRNodes and returns a list of realized IRNodes",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\kernel\\flex_attention.py",
    "ast_data": "FunctionDef name:maybe_realize arg:args arguments arg Return return:yes Call arguments arg BoolOp Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "constant_name",
    "source_code": "def constant_name(self, name: str, device_override: Optional[torch.device]) -> str:\n    if self.constants[name].device == device_override or device_override is None:\n        return name\n    with torch.utils._python_dispatch._disable_current_modes():\n        return self.allocate_non_dup_const_name(f'{name}_{device_override.type}{device_override.index or 0}', self.constants[name].to(device_override))",
    "docstring": "We AOT copy constants to the devices they are needed on. If device_override doesn't match the constant's device, then copy it and return a different name.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\graph.py",
    "ast_data": "FunctionDef name:constant_name arg:self arg:name arg:device_override arguments arg arg arg If BoolOp Compare Compare Return return:yes With Call Return return:yes Call BoolOp Call"
  },
  {
    "library": "matplotlib",
    "name": "_find_best_position",
    "source_code": "def _find_best_position(self, width, height, renderer):\n    assert self.isaxes\n    start_time = time.perf_counter()\n    bboxes, lines, offsets = self._auto_legend_data(renderer)\n    bbox = Bbox.from_bounds(0, 0, width, height)\n    candidates = []\n    for idx in range(1, len(self.codes)):\n        l, b = self._get_anchored_bbox(idx, bbox, self.get_bbox_to_anchor(), renderer)\n        legendBox = Bbox.from_bounds(l, b, width, height)\n        badness = sum((legendBox.count_contains(line.vertices) for line in lines)) + legendBox.count_contains(offsets) + legendBox.count_overlaps(bboxes) + sum((line.intersects_bbox(legendBox, filled=False) for line in lines))\n        candidates.append((badness, idx, (l, b)))\n        if badness == 0:\n            break\n    _, _, (l, b) = min(candidates)\n    if self._loc_used_default and time.perf_counter() - start_time > 1:\n        _api.warn_external('Creating legend with loc=\"best\" can be slow with large amounts of data.')\n    return (l, b)",
    "docstring": "Determine the best location to place the legend.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\legend.py",
    "ast_data": "FunctionDef name:_find_best_position arg:self arg:width arg:height arg:renderer arguments arg arg arg arg Assign Call Assign Call Assign Call Assign For Call Call Assign Call Call Assign Call Assign Call Call Call Call Call Call Call If Compare Assign Call If BoolOp Compare Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ZeroPointDomain",
    "source_code": "class ZeroPointDomain(Enum):\n    INT = auto()\n    FLOAT = auto()\n    NONE = auto()",
    "docstring": "Enum that indicate whether zero_point is in integer domain or floating point domain integer domain: quantized_val = (float_val / scale) (integer) + zero_point (integer) float domain: quantized_val = (float_val - (zero_point (float) - scale * mid_point)) / scale none domain: quantized_val = (float_val / scale)",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\quantization\\observer.py",
    "ast_data": "ClassDef name:ZeroPointDomain Assign Call Assign Call Assign Call"
  },
  {
    "library": "scipy",
    "name": "_remove_nans",
    "source_code": "def _remove_nans(samples, paired):\n    if not paired:\n        return [sample[~np.isnan(sample)] for sample in samples]\n    nans = np.isnan(samples[0])\n    for sample in samples[1:]:\n        nans = nans | np.isnan(sample)\n    not_nans = ~nans\n    return [sample[not_nans] for sample in samples]",
    "docstring": "Remove nans from paired or unpaired 1D samples",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_axis_nan_policy.py",
    "ast_data": "FunctionDef name:_remove_nans arg:samples arg:paired arguments arg arg If Return return:yes Call Assign Call For Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "save_stats",
    "source_code": "def save_stats(self, path: str) -> None:\n    stats = {'memories_allocated': self.memories_allocated, 'memories_active': self.memories_active, 'memories_reserved': self.memories_reserved, 'markers': self._markers, 'num_alloc_retries': self._num_cuda_retries}\n    with open(path, 'wb') as f:\n        pickle.dump(stats, f, pickle.HIGHEST_PROTOCOL)",
    "docstring": "Save the stats using pickle during runtime if users want to plot the traces in other places like notebook.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_tools\\memory_tracker.py",
    "ast_data": "FunctionDef name:save_stats arg:self arg:path arguments arg arg Assign With Call Call"
  },
  {
    "library": "scipy",
    "name": "invpascal",
    "source_code": "def invpascal(n, kind='symmetric', exact=True):\n    from scipy.special import comb\n    if kind not in ['symmetric', 'lower', 'upper']:\n        raise ValueError(\"'kind' must be 'symmetric', 'lower' or 'upper'.\")\n    if kind == 'symmetric':\n        if exact:\n            if n > 34:\n                dt = object\n            else:\n                dt = np.int64\n        else:\n            dt = np.float64\n        invp = np.empty((n, n), dtype=dt)\n        for i in range(n):\n            for j in range(0, i + 1):\n                v = 0\n                for k in range(n - i):\n                    v += comb(i + k, k, exact=exact) * comb(i + k, i + k - j, exact=exact)\n                invp[i, j] = (-1) ** (i - j) * v\n                if i != j:\n                    invp[j, i] = invp[i, j]\n    else:\n        invp = pascal(n, kind=kind, exact=exact)\n        if invp.dtype == np.uint64:\n            invp = invp.view(np.int64)\n        invp *= toeplitz((-1) ** np.arange(n)).astype(invp.dtype)\n    return invp",
    "docstring": "Returns the inverse of the n x n Pascal matrix. The Pascal matrix is a matrix containing the binomial coefficients as its elements. Parameters ---------- n : int The size of the matrix to create; that is, the result is an n x n matrix. kind : str, optional Must be one of 'symmetric', 'lower', or 'upper'. Default is 'symmetric'. exact : bool, optional If is True, the result is either an array of type `nkindexact`: >>> invpascal(5, kind='lower', exact=False) array([[ 1., -0., 0., -0., 0.], [-1., 1., -0., 0., -0.], [ 1., -2., 1., -0., 0.], [-1., 3., -3., 1., -0.], [ 1., -4., 6., -4., 1.]])",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_special_matrices.py",
    "ast_data": "FunctionDef name:invpascal arg:n arg:kind arg:exact arguments arg arg arg If Compare Raise Call If Compare If If Compare Assign Assign Assign Assign Call For Call For Call Assign For Call Call Call Assign If Compare Assign Assign Call If Compare Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_unlift",
    "source_code": "def _unlift(gm: torch.fx.GraphModule, lifted_inputs: Sequence[Optional[str]], mutated_outputs: Sequence[Optional[str]], in_spec: pytree.TreeSpec, out_spec: Optional[pytree.TreeSpec], state_dict: dict[str, Any], constants: dict[str, Any], forward_arg_names: Optional[list[str]]=None):\n    unlifted_name_to_node, input_name_to_node = _unlift_inputs_as_getattr(gm, lifted_inputs)\n    _insert_copy_for_mutations(gm, mutated_outputs, unlifted_name_to_node, input_name_to_node)\n    gm.graph._codegen = _get_codegen(in_spec, out_spec, forward_arg_names)\n    gm.graph.lint()\n    gm.recompile()\n    return gm",
    "docstring": "Args: lifted_inputs: A list matching the graph module's input nodes. For an input node that is referring to a lifted parameter/buffer, this list will contain the fqn the corresponding attribute. Otherwise, this list will contain None. This is used to unlift the lifted parameters as get_attr nodes. mutated_outputs: A list matching the graph module's output nodes. For an output node that is referring to a mutated buffer or user input, this list will contain the name of the corresponding buffer or user input that needs to be mutated. Otherwise, this list will contain None. This is used to re-insert an inplace copy_ operator to copy the mutated values back to the original node.",
    "type": "function",
    "file_path": "pytorch\\torch\\export\\_unlift.py",
    "ast_data": "FunctionDef name:_unlift arg:gm arg:lifted_inputs arg:mutated_outputs arg:in_spec arg:out_spec arg:state_dict arg:constants arg:forward_arg_names arguments arg arg arg arg arg arg arg arg Assign Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_repr_html_",
    "source_code": "def _repr_html_(self) -> str | None:\n    if get_option('styler.render.repr') == 'html':\n        return self.to_html()\n    return None",
    "docstring": "Hooks into Jupyter notebook rich display system, which calls _repr_html_ by default if an object is returned at the end of a cell.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\style.py",
    "ast_data": "FunctionDef name:_repr_html_ arg:self arguments arg If Compare Call Return return:yes Call Return return:no"
  },
  {
    "library": "pytorch",
    "name": "dump_node_schedule",
    "source_code": "def dump_node_schedule(node_schedule: Sequence[BaseSchedulerNode]) -> None:\n    from torch._inductor.codegen.simd import DisableReduction, EnableReduction\n    from torch._inductor.scheduler import SchedulerNode\n    print(f'Node schedule with {len(node_schedule)} nodes')\n    for idx, node in enumerate(node_schedule):\n        print(f' {idx:3}:')\n        if node is EnableReduction:\n            print('enable reduction')\n        elif node is DisableReduction:\n            print('disable reduction')\n        elif isinstance(node, SchedulerNode):\n            is_red = node.is_reduction()\n            print(f'{('red' if is_red else 'pw')} scheduler node')\n            if is_red:\n                assert node.node is not None\n                print(f'original reduction hint {node.node.data.reduction_hint}')\n            print('ReadDep:')\n            for dep in node.read_writes.reads:\n                print(dep)\n            print('WriteDep:')\n            for dep in node.read_writes.writes:\n                print(dep)\n        else:\n            raise RuntimeError(f'Unrecognized node type: {type(node)}')",
    "docstring": "An API that can be used in pdb to dump a node_schedule. Right mainly dump the read/write dependencies but can add more as needed.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\utils.py",
    "ast_data": "FunctionDef name:dump_node_schedule arg:node_schedule arguments arg Call Call For Call Call If Compare Call If Compare Call If Call Assign Call Call If Compare Call Call For Call Call For Call Raise Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_clear_state",
    "source_code": "def _clear_state(self):\n    if hasattr(self, 'estimators_'):\n        self.estimators_ = np.empty((0, 0), dtype=object)\n    if hasattr(self, 'train_score_'):\n        del self.train_score_\n    if hasattr(self, 'oob_improvement_'):\n        del self.oob_improvement_\n    if hasattr(self, 'oob_scores_'):\n        del self.oob_scores_\n    if hasattr(self, 'oob_score_'):\n        del self.oob_score_\n    if hasattr(self, 'init_'):\n        del self.init_\n    if hasattr(self, '_rng'):\n        del self._rng",
    "docstring": "Clear the state of the gradient boosting model.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_gb.py",
    "ast_data": "FunctionDef name:_clear_state arg:self arguments arg If Call Assign Call If Call If Call If Call If Call If Call If Call"
  },
  {
    "library": "tensorflow",
    "name": "on_train_end",
    "source_code": "def on_train_end(self, logs=None):\n    logs = self._process_logs(logs)\n    for callback in self.callbacks:\n        callback.on_train_end(logs)",
    "docstring": "Calls the methods of its callbacks. Args: logs: Dict. Currently no data is passed to this argument for this method but that may change in the future.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:on_train_end arg:self arg:logs arguments arg arg Assign Call For Call"
  },
  {
    "library": "tensorflow",
    "name": "_call_begin_hook",
    "source_code": "def _call_begin_hook(self, mode):\n    if mode == ModeKeys.TRAIN:\n        self.on_train_begin()\n    elif mode == ModeKeys.TEST:\n        self.on_test_begin()\n    else:\n        self.on_predict_begin()",
    "docstring": "Helper function for on_{train|test|predict}_begin methods.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:_call_begin_hook arg:self arg:mode arguments arg arg If Compare Call If Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "exec_summary_df",
    "source_code": "def exec_summary_df(self, fn, metric):\n    cols = {}\n    cols['Compiler'] = self.compilers\n    for suite in self.suites:\n        df = self.parsed_frames[suite][metric]\n        speedups = [fn(compiler, df) for compiler in self.compilers]\n        col = pd.Series(data=speedups, index=self.compilers)\n        cols[suite] = col\n    df = pd.DataFrame(cols)\n    df = df.fillna(0)\n    df.to_csv(os.path.join(self.output_dir, f'{fn.__name__}.csv'))\n    return df",
    "docstring": "Generate a table with passrate and geomean perf",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\dynamo\\runner.py",
    "ast_data": "FunctionDef name:exec_summary_df arg:self arg:fn arg:metric arguments arg arg arg Assign Assign For Assign Assign Call Assign Call Assign Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_determine_prefix",
    "source_code": "def _determine_prefix(files: list[str]) -> str:\n    possible_prefixes: defaultdict[str, set[int]] = defaultdict(set)\n    for f in files:\n        m = exp.search(f)\n        if m:\n            p, r = m.groups()\n            possible_prefixes[p].add(int(r))\n    if len(possible_prefixes) == 1:\n        prefix = next(iter(possible_prefixes))\n        logger.debug('Inferred common prefix %s', prefix)\n        return prefix\n    else:\n        raise ValueError('Unable to automatically determine the common prefix for the trace file names. Please specify --prefix argument manually')",
    "docstring": "If the user doesn't specify a prefix, but does pass a dir full of similarly-prefixed files, we should be able to infer the common prefix most of the time. But if we can't confidently infer, just fall back to requring the user to specify it",
    "type": "function",
    "file_path": "pytorch\\tools\\flight_recorder\\components\\loader.py",
    "ast_data": "FunctionDef name:_determine_prefix arg:files arguments arg Call For Assign Call If Assign Call Call Call If Compare Call Assign Call Call Call Return return:yes Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "_press",
    "source_code": "def _press(self, event):\n    if self._ids_zoom:\n        self._cancel_action()\n    if event.button == 1:\n        self._button_pressed = 1\n    elif event.button == 3:\n        self._button_pressed = 3\n    else:\n        self._cancel_action()\n        return\n    x, y = (event.x, event.y)\n    self._xypress = []\n    for i, a in enumerate(self.figure.get_axes()):\n        if x is not None and y is not None and a.in_axes(event) and a.get_navigate() and a.can_zoom():\n            self._xypress.append((x, y, a, i, a._get_view()))\n    id1 = self.figure.canvas.mpl_connect('motion_notify_event', self._mouse_move)\n    id2 = self.figure.canvas.mpl_connect('key_press_event', self._switch_on_zoom_mode)\n    id3 = self.figure.canvas.mpl_connect('key_release_event', self._switch_off_zoom_mode)\n    self._ids_zoom = (id1, id2, id3)\n    self._zoom_mode = event.key",
    "docstring": "Callback for mouse button presses in zoom-to-rectangle mode.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py",
    "ast_data": "FunctionDef name:_press arg:self arg:event arguments arg arg If Call If Compare Assign If Compare Assign Call Return return:no Assign Assign For Call Call If BoolOp Compare Compare Call Call Call Call Call Assign Call Assign Call Assign Call Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "elapsed_us",
    "source_code": "def elapsed_us(self):\n    return self.end - self.start",
    "docstring": "Returns the length of the interval",
    "type": "method",
    "file_path": "pytorch\\torch\\autograd\\profiler_util.py",
    "ast_data": "FunctionDef name:elapsed_us arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "process_files",
    "source_code": "def process_files(self, file_list):\n    file_groups = {}\n    for translatable in file_list:\n        file_group = file_groups.setdefault(translatable.locale_dir, [])\n        file_group.append(translatable)\n    for locale_dir, files in file_groups.items():\n        self.process_locale_dir(locale_dir, files)",
    "docstring": "Group translatable files by locale directory and run pot file build process for each group.",
    "type": "method",
    "file_path": "django\\django\\core\\management\\commands\\makemessages.py",
    "ast_data": "FunctionDef name:process_files arg:self arg:file_list arguments arg arg Assign For Assign Call Call For Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_markerfacecoloralt",
    "source_code": "def get_markerfacecoloralt(self):\n    return self._get_markerfacecolor(alt=True)",
    "docstring": "Return the alternate marker face color. See also .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:get_markerfacecoloralt arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "sparse_read",
    "source_code": "def sparse_read(self, indices, name=None):\n    with ops.name_scope('Gather' if name is None else name) as name:\n        variable_accessed(self)\n        value = gen_resource_variable_ops.resource_gather(self.handle, indices, dtype=self._dtype, name=name)\n        if self._dtype == dtypes.variant:\n            handle_data = get_eager_safe_handle_data(self.handle)\n            if handle_data.is_set and len(handle_data.shape_and_type) > 1:\n                value._handle_data = cpp_shape_inference_pb2.CppShapeInferenceResult.HandleData(is_set=True, shape_and_type=handle_data.shape_and_type[1:])\n            return array_ops.identity(value)\n    return value",
    "docstring": "Reads the value of this variable sparsely, using .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:sparse_read arg:self arg:indices arg:name arguments arg arg arg With Call Compare Call Assign Call If Compare Assign Call If BoolOp Compare Call Assign Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__repr__",
    "source_code": "def __repr__(self) -> str:\n    return f'_StridedShard(dim={self.dim}, sf={self.split_factor})'",
    "docstring": "machine readable representation of the _StridedShard placement",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\placement_types.py",
    "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_picker",
    "source_code": "def set_picker(self, p):\n    if not callable(p):\n        self.set_pickradius(p)\n    self._picker = p",
    "docstring": "Set the event picker details for the line. Parameters ---------- p : float or callable[[Artist, Event], tuple[bool, dict]] If a float, it is used as the pick radius in points.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:set_picker arg:self arg:p arguments arg arg If Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "values",
    "source_code": "def values(self):\n    return list(self._primary.values())",
    "docstring": "Returns a list of all functions held by this cache.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_cache.py",
    "ast_data": "FunctionDef name:values arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_intersection_non_unique",
    "source_code": "def _intersection_non_unique(self, other: IntervalIndex) -> IntervalIndex:\n    mask = np.zeros(len(self), dtype=bool)\n    if self.hasnans and other.hasnans:\n        first_nan_loc = np.arange(len(self))[self.isna()][0]\n        mask[first_nan_loc] = True\n    other_tups = set(zip(other.left, other.right))\n    for i, tup in enumerate(zip(self.left, self.right)):\n        if tup in other_tups:\n            mask[i] = True\n    return self[mask]",
    "docstring": "Used when the IntervalIndex does have some common endpoints, on either sides. Return the intersection with another IntervalIndex. Parameters ---------- other : IntervalIndex Returns ------- IntervalIndex",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\interval.py",
    "ast_data": "FunctionDef name:_intersection_non_unique arg:self arg:other arguments arg arg Assign Call Call If BoolOp Assign Call Call Call Assign Assign Call Call For Call Call If Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_check_tf2_flags",
    "source_code": "def _check_tf2_flags(flags):\n    if not flags.keras_model_file and (not flags.saved_model_dir):\n        raise ValueError('one of the arguments --saved_model_dir --keras_model_file is required')",
    "docstring": "Checks the parsed and unparsed flags to ensure they are valid in 2.X. Args: flags: argparse.Namespace object containing TFLite flags. Raises: ValueError: Invalid flags.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\tflite_convert.py",
    "ast_data": "FunctionDef name:_check_tf2_flags arg:flags arguments arg If BoolOp Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "_make_validation_split",
    "source_code": "def _make_validation_split(self, y, sample_mask):\n    n_samples = y.shape[0]\n    validation_mask = np.zeros(n_samples, dtype=np.bool_)\n    if not self.early_stopping:\n        return validation_mask\n    if is_classifier(self):\n        splitter_type = StratifiedShuffleSplit\n    else:\n        splitter_type = ShuffleSplit\n    cv = splitter_type(test_size=self.validation_fraction, random_state=self.random_state)\n    idx_train, idx_val = next(cv.split(np.zeros(shape=(y.shape[0], 1)), y))\n    if not np.any(sample_mask[idx_val]):\n        raise ValueError('The sample weights for validation set are all zero, consider using a different random state.')\n    if idx_train.shape[0] == 0 or idx_val.shape[0] == 0:\n        raise ValueError('Splitting %d samples into a train set and a validation set with validation_fraction=%r led to an empty set (%d and %d samples). Please either change validation_fraction, increase number of samples, or disable early_stopping.' % (n_samples, self.validation_fraction, idx_train.shape[0], idx_val.shape[0]))\n    validation_mask[idx_val] = True\n    return validation_mask",
    "docstring": "Split the dataset between training set and validation set. Parameters ---------- y : ndarray of shape (n_samples, ) Target values. sample_mask : ndarray of shape (n_samples, ) A boolean array indicating whether each sample should be included for validation set. Returns ------- validation_mask : ndarray of shape (n_samples, ) Equal to True on the validation set, False on the training set.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_stochastic_gradient.py",
    "ast_data": "FunctionDef name:_make_validation_split arg:self arg:y arg:sample_mask arguments arg arg arg Assign Assign Call If Return return:yes If Call Assign Assign Assign Call Assign Call Call Call If Call Raise Call If BoolOp Compare Compare Raise Call Assign Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "get_signature_prefix",
    "source_code": "def get_signature_prefix(self, sig: str) -> Sequence[nodes.Node]:\n    return []",
    "docstring": "May return a prefix to put before the object name in the signature.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\domains\\python\\_object.py",
    "ast_data": "FunctionDef name:get_signature_prefix arg:self arg:sig arguments arg arg Return return:no"
  },
  {
    "library": "django",
    "name": "test_capability",
    "source_code": "def test_capability(self, capability):\n    return bool(capi.test_capability(self.ptr, force_bytes(capability)))",
    "docstring": "Return a bool indicating whether the this Layer supports the given capability (a string). Valid capability strings include: 'RandomRead', 'SequentialWrite', 'RandomWrite', 'FastSpatialFilter', 'FastFeatureCount', 'FastGetExtent', 'CreateField', 'Transactions', 'DeleteFeature', and 'FastSetNextByIndex'.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\layer.py",
    "ast_data": "FunctionDef name:test_capability arg:self arg:capability arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "SkipFile",
    "source_code": "class SkipFile(UploadFileException):\n    pass",
    "docstring": "This exception is raised by an upload handler that wants to skip a given file.",
    "type": "class",
    "file_path": "django\\django\\core\\files\\uploadhandler.py",
    "ast_data": "ClassDef name:SkipFile"
  },
  {
    "library": "pytorch",
    "name": "_generate_output",
    "source_code": "def _generate_output(self) -> None:\n    output_nodes = [self._generate_buffer(node) for idx, node in enumerate(V.graph.graph_outputs)]\n    output_value = output_nodes[0] if len(output_nodes) == 1 else output_nodes\n    self.gm.graph.output(output_value)",
    "docstring": "Generate FX IR for graph outputs.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\wrapper_fxir.py",
    "ast_data": "FunctionDef name:_generate_output arg:self arguments arg Assign Call Call Assign Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "create_winfo",
    "source_code": "@classmethod\ndef create_winfo(cls, st: torch.UntypedStorage, device: torch.device, reftype: _RefType, callback: Optional[Callable[[Self, weakref.ref], Any]]=None) -> tuple[Self, weakref.ref]:\n    winfo = cls(st.size(), st.element_size(), device, reftype)\n    w_st = weakref.ref(st, partial(callback, winfo) if callback else None)\n    return (winfo, w_st)",
    "docstring": "Creates a new `` instance and the weak reference to the storage object. The weak reference may have an attached callback if provided.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_tools\\mem_tracker.py",
    "ast_data": "FunctionDef name:create_winfo arg:cls arg:st arg:device arg:reftype arg:callback arguments arg arg arg arg arg Assign Call Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "oneway_transfer_error",
    "source_code": "def oneway_transfer_error(pts1: Tensor, pts2: Tensor, H: Tensor, squared: bool=True, eps: float=1e-08) -> Tensor:\n    KORNIA_CHECK_SHAPE(H, ['B', '3', '3'])\n    if pts1.size(-1) == 3:\n        pts1 = convert_points_from_homogeneous(pts1)\n    if pts2.size(-1) == 3:\n        pts2 = convert_points_from_homogeneous(pts2)\n    pts1_in_2: Tensor = transform_points(H, pts1)\n    error_squared: Tensor = (pts1_in_2 - pts2).pow(2).sum(dim=-1)\n    if squared:\n        return error_squared\n    return (error_squared + eps).sqrt()",
    "docstring": "Return transfer error in image 2 for correspondences given the homography matrix. Args: pts1: correspondences from the left images with shape (B, N, 2 or 3). If they are homogeneous, converted automatically. pts2: correspondences from the right images with shape (B, N, 2 or 3). If they are homogeneous, converted automatically. H: Homographies with shape :math:. squared: if True (default), the squared distance is returned. eps: Small constant for safe sqrt. Returns: the computed distance with shape :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\homography.py",
    "ast_data": "FunctionDef name:oneway_transfer_error arg:pts1 arg:pts2 arg:H arg:squared arg:eps arguments arg arg arg arg arg Call If Compare Call Assign Call If Compare Call Assign Call Call Call Call If Return return:yes Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "default_range",
    "source_code": "@property\ndef default_range(self) -> tuple[float, float]:\n    base = mpl.rcParams['lines.linewidth']\n    return (base * 0.5, base * 2)",
    "docstring": "Min and max values used by default for semantic mapping.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\properties.py",
    "ast_data": "FunctionDef name:default_range arg:self arguments arg Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "CooperativeReductionWorkspaceCache",
    "source_code": "class CooperativeReductionWorkspaceCache:\n\n    def __init__(self, args):\n        self.args = args\n        self.current_loop = []\n        self.prior_loop = []\n        self.ready_for_reuse = collections.defaultdict(collections.deque)\n        self.loop_count = 0\n        self.store_count = 0\n\n    def allocate(self, nbytes: sympy.Expr):\n        cached = self.ready_for_reuse.get(nbytes)\n        if cached:\n            return cached.popleft()\n        ws_name, ws_offset = self.args.workspace(nbytes, False)\n        self.current_loop.append((nbytes, ws_name, ws_offset))\n        return (ws_name, ws_offset)\n\n    def on_loop_end(self):\n        for nbytes, ws_name, ws_offset in self.prior_loop:\n            self.ready_for_reuse[nbytes].append((ws_name, ws_offset))\n        self.prior_loop = self.current_loop\n        self.current_loop = []\n        self.loop_count += 1\n\n    def increment_store_count(self):\n        prior = self.store_count\n        self.store_count += 1\n        return prior",
    "docstring": "The scratch space used for cooperative reductions can be reused after two reduction loops. This keeps track of what can be reused.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py",
    "ast_data": "ClassDef name:CooperativeReductionWorkspaceCache FunctionDef name:__init__ arg:self arg:args arguments arg arg Assign Assign Assign Assign Call Assign Assign FunctionDef name:allocate arg:self arg:nbytes arguments arg arg Assign Call If Return return:yes Call Assign Call Call Return return:yes FunctionDef name:on_loop_end arg:self arguments arg For Call Assign Assign FunctionDef name:increment_store_count arg:self arguments arg Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "trainable_variables_parameter",
    "source_code": "@staticmethod\ndef trainable_variables_parameter():\n    return {'max_depth': 10000, 'min_bytes': 0, 'min_micros': 0, 'min_params': 0, 'min_float_ops': 0, 'min_occurrence': 0, 'order_by': 'name', 'account_type_regexes': [tfprof_logger.TRAINABLE_VARIABLES], 'start_name_regexes': ['.*'], 'trim_name_regexes': [], 'show_name_regexes': ['.*'], 'hide_name_regexes': [], 'account_displayed_op_only': True, 'select': ['params'], 'step': -1, 'output': 'stdout'}",
    "docstring": "Options used to profile trainable variable parameters. Normally used together with 'scope' view. Returns: A dict of profiling options.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\option_builder.py",
    "ast_data": "FunctionDef name:trainable_variables_parameter arguments Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_indicator_column",
    "source_code": "def _indicator_column(categorical_column):\n    return _IndicatorColumn(categorical_column)",
    "docstring": "Represents multi-hot representation of given categorical column. - For DNN model, can be used to wrap any (e.g., to feed to DNN). Consider to Use if the number of buckets/unique(values) are large. - For Wide (aka linear) model, is the internal representation for categorical column when passing categorical column directly (as any element in feature_columns) to . See for details. Args: categorical_column: A which is created by or functions. Returns: An .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py",
    "ast_data": "FunctionDef name:_indicator_column arg:categorical_column arguments arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_scalar_heuristic",
    "source_code": "def _scalar_heuristic(arr, elem):\n    if not isinstance(elem, np.ndarray):\n        return True\n    elif arr.dtype.type is np.object_:\n        if arr.dtype is not elem.dtype:\n            return True\n    elif type(arr).__getitem__ == ndarray.__getitem__:\n        return False\n    return None",
    "docstring": "Return whether is a scalar result of indexing , or None if undecidable without promoting nomask to a full mask",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:_scalar_heuristic arg:arr arg:elem arguments arg arg If Call Return return:yes If Compare If Compare Return return:yes If Compare Call Return return:yes Return return:no"
  },
  {
    "library": "scipy",
    "name": "euclidean",
    "source_code": "def euclidean(u, v, w=None):\n    return minkowski(u, v, p=2, w=w)",
    "docstring": "Computes the Euclidean distance between two 1-D arrays. The Euclidean distance between 1-D arrays and , is defined as .. math:: {\\|u-v\\|}_2 \\left(\\sum{(w_i |(u_i - v_i)|^2)}\\right)^{1/2} Parameters ---------- u : (N,) array_like Input array. v : (N,) array_like Input array. w : (N,) array_like, optional The weights for each value in and . Default is None, which gives each value a weight of 1.0 Returns ------- euclidean : double The Euclidean distance between vectors and . Examples -------- >>> from scipy.spatial import distance >>> distance.euclidean([1, 0, 0], [0, 1, 0]) 1.4142135623730951 >>> distance.euclidean([1, 1, 0], [0, 1, 0]) 1.0",
    "type": "function",
    "file_path": "scipy\\scipy\\spatial\\distance.py",
    "ast_data": "FunctionDef name:euclidean arg:u arg:v arg:w arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_bottom_margin_bbox",
    "source_code": "def get_bottom_margin_bbox(self, rows=0, cols=0):\n    rows = np.atleast_1d(rows)\n    cols = np.atleast_1d(cols)\n    bbox = Bbox.from_extents(self.lefts[cols[0]].value(), self.bottoms[rows[-1]].value() + self.margins['bottomcb'][rows[-1]].value(), self.rights[cols[-1]].value(), self.bottoms[rows[-1]].value() + self.margins['bottom'][rows[-1]].value() + self.margins['bottomcb'][rows[-1]].value())\n    return bbox",
    "docstring": "Return the left margin bounding box of the subplot specs given by rows and cols. rows and cols can be spans.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_layoutgrid.py",
    "ast_data": "FunctionDef name:get_bottom_margin_bbox arg:self arg:rows arg:cols arguments arg arg arg Assign Call Assign Call Assign Call Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "value",
    "source_code": "def value(self):\n    return pywrap_tfe.TFE_MonitoringIntGaugeCellValue(self._cell)",
    "docstring": "Retrieves the current value.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py",
    "ast_data": "FunctionDef name:value arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "replace_iterable_params",
    "source_code": "def replace_iterable_params(args, kwargs, iterable_params):\n    args = list(args)\n    for name, index in iterable_params:\n        if index < len(args):\n            args[index] = list(args[index])\n        elif name in kwargs:\n            kwargs[name] = list(kwargs[name])\n    return (tuple(args), kwargs)",
    "docstring": "Returns (args, kwargs) with any iterable parameters converted to lists. Args: args: Positional rguments to a function kwargs: Keyword arguments to a function. iterable_params: A list of (name, index) tuples for iterable parameters. Returns: A tuple (args, kwargs), where any positional or keyword parameters in have their value converted to a .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\dispatch.py",
    "ast_data": "FunctionDef name:replace_iterable_params arg:args arg:kwargs arg:iterable_params arguments arg arg arg Assign Call For If Compare Call Assign Call If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "Theme",
    "source_code": "class Theme:\n    LATEX_ELEMENTS_KEYS = ['papersize', 'pointsize']\n    UPDATABLE_KEYS = ['papersize', 'pointsize']\n\n    def __init__(self, name: str) -> None:\n        self.name = name\n        self.docclass = name\n        self.wrapperclass = name\n        self.papersize = 'letterpaper'\n        self.pointsize = '10pt'\n        self.toplevel_sectioning = 'chapter'\n\n    def update(self, config: Config) -> None:\n        for key in self.LATEX_ELEMENTS_KEYS:\n            if config.latex_elements.get(key):\n                value = config.latex_elements[key]\n                setattr(self, key, value)\n        for key in self.UPDATABLE_KEYS:\n            if key in config.latex_theme_options:\n                value = config.latex_theme_options[key]\n                setattr(self, key, value)",
    "docstring": "A set of LaTeX configurations.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\builders\\latex\\theming.py",
    "ast_data": "ClassDef name:Theme Assign Assign FunctionDef name:__init__ arg:self arg:name arguments arg arg Assign Assign Assign Assign Assign Assign FunctionDef name:update arg:self arg:config arguments arg arg For If Call Assign Call For If Compare Assign Call"
  },
  {
    "library": "django",
    "name": "build_lookup",
    "source_code": "def build_lookup(self, lookups, lhs, rhs):\n    *transforms, lookup_name = lookups or ['exact']\n    for name in transforms:\n        lhs = self.try_transform(lhs, name, lookups)\n    lookup_class = lhs.get_lookup(lookup_name)\n    if not lookup_class:\n        lhs = self.try_transform(lhs, lookup_name)\n        lookup_name = 'exact'\n        lookup_class = lhs.get_lookup(lookup_name)\n        if not lookup_class:\n            return\n    lookup = lookup_class(lhs, rhs)\n    if lookup.rhs is None and (not lookup.can_use_none_as_rhs):\n        if lookup_name not in ('exact', 'iexact'):\n            raise ValueError('Cannot use None as a query value')\n        return lhs.get_lookup('isnull')(lhs, True)\n    if lookup_name == 'exact' and lookup.rhs == '' and connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls:\n        return lhs.get_lookup('isnull')(lhs, True)\n    return lookup",
    "docstring": "Try to extract transforms and lookup from given lhs. The lhs value is something that works like SQLExpression. The rhs value is what the lookup is going to compare against. The lookups is a list of names to extract using get_lookup() and get_transform().",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\query.py",
    "ast_data": "FunctionDef name:build_lookup arg:self arg:lookups arg:lhs arg:rhs arguments arg arg arg arg Assign BoolOp For Assign Call Assign Call If Assign Call Assign Assign Call If Return return:no Assign Call If BoolOp Compare If Compare Raise Call Return return:yes Call Call If BoolOp Compare Compare Return return:yes Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_dtensor",
    "source_code": "@tf_export('experimental.dtensor.is_dtensor', v1=[])\ndef is_dtensor(tensor) -> bool:\n    return _dtensor_device().is_dtensor(tensor)",
    "docstring": "Check whether the input tensor is a DTensor. In Python, a DTensor has the same type as a . This method will let you check and handle the tensor differently if a tf.Tensor is a DTensor. Args: tensor: an object to be checked. Returns: bool, True if the given tensor is a DTensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\api.py",
    "ast_data": "FunctionDef name:is_dtensor arg:tensor arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "BBContainsLookup",
    "source_code": "@BaseSpatialField.register_lookup\nclass BBContainsLookup(GISLookup):\n    lookup_name = 'bbcontains'",
    "docstring": "The 'bbcontains' operator returns true if A's bounding box completely contains by B's bounding box.",
    "type": "class",
    "file_path": "django\\django\\contrib\\gis\\db\\models\\lookups.py",
    "ast_data": "ClassDef name:BBContainsLookup Assign"
  },
  {
    "library": "matplotlib",
    "name": "set_color",
    "source_code": "def set_color(self, c):\n    self.set_facecolor(c)\n    self.set_edgecolor(c)\n    self.set_hatchcolor(c)",
    "docstring": "Set the edgecolor, facecolor and hatchcolor. .. versionchanged:: 3.11 Now sets the hatchcolor as well. Parameters ---------- c : :mpltype: or list of RGBA tuples See Also -------- Collection.set_facecolor, Collection.set_edgecolor, Collection.set_hatchcolor For setting the facecolor, edgecolor, and hatchcolor individually.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:set_color arg:self arg:c arguments arg arg Call Call Call"
  },
  {
    "library": "django",
    "name": "_save_m2m",
    "source_code": "def _save_m2m(self):\n    cleaned_data = self.cleaned_data\n    exclude = self._meta.exclude\n    fields = self._meta.fields\n    opts = self.instance._meta\n    for f in chain(opts.many_to_many, opts.private_fields):\n        if not hasattr(f, 'save_form_data'):\n            continue\n        if fields and f.name not in fields:\n            continue\n        if exclude and f.name in exclude:\n            continue\n        if f.name in cleaned_data:\n            f.save_form_data(self.instance, cleaned_data[f.name])",
    "docstring": "Save the many-to-many fields and generic relations for this form.",
    "type": "method",
    "file_path": "django\\django\\forms\\models.py",
    "ast_data": "FunctionDef name:_save_m2m arg:self arguments arg Assign Assign Assign Assign For Call If Call If BoolOp Compare If BoolOp Compare If Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "_ReshapeGrad",
    "source_code": "@ops.RegisterGradient('Reshape')\ndef _ReshapeGrad(op: ops.Operation, grad):\n    input_shape = op.inputs[0].shape\n    if input_shape.rank is not None and (not input_shape.is_fully_defined()):\n        input_shape_as_list = input_shape.as_list()\n        undefined_dims = []\n        has_zero_dim = False\n        for i, dim in enumerate(input_shape_as_list):\n            if dim is None:\n                undefined_dims.append(i)\n            elif dim == 0:\n                has_zero_dim = True\n        if len(undefined_dims) == 1 and (not has_zero_dim):\n            input_shape_as_list[undefined_dims[0]] = -1\n            return [array_ops.reshape(_IndexedSlicesToTensorNoWarning(grad), input_shape_as_list), None]\n    return [array_ops.reshape(_IndexedSlicesToTensorNoWarning(grad), array_ops.shape(op.inputs[0])), None]",
    "docstring": "Defines the gradient for .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_grad.py",
    "ast_data": "FunctionDef name:_ReshapeGrad arg:op arg:grad arguments arg arg Assign If BoolOp Compare Call Assign Call Assign Assign For Call If Compare Call If Compare Assign If BoolOp Compare Call Assign Return return:yes Call Call Return return:yes Call Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_modified_weiszfeld_step",
    "source_code": "def _modified_weiszfeld_step(X, x_old):\n    diff = X - x_old\n    diff_norm = np.sqrt(np.sum(diff ** 2, axis=1))\n    mask = diff_norm >= _EPSILON\n    is_x_old_in_X = int(mask.sum() < X.shape[0])\n    diff = diff[mask]\n    diff_norm = diff_norm[mask][:, np.newaxis]\n    quotient_norm = linalg.norm(np.sum(diff / diff_norm, axis=0))\n    if quotient_norm > _EPSILON:\n        new_direction = np.sum(X[mask, :] / diff_norm, axis=0) / np.sum(1 / diff_norm, axis=0)\n    else:\n        new_direction = 1.0\n        quotient_norm = 1.0\n    return max(0.0, 1.0 - is_x_old_in_X / quotient_norm) * new_direction + min(1.0, is_x_old_in_X / quotient_norm) * x_old",
    "docstring": "Modified Weiszfeld step. This function defines one iteration step in order to approximate the spatial median (L1 median). It is a form of an iteratively re-weighted least squares method. Parameters ---------- X : array-like of shape (n_samples, n_features) Training vector, where is the number of samples and is the number of features. x_old : ndarray of shape = (n_features,) Current start vector. Returns ------- x_new : ndarray of shape (n_features,) New iteration step. References ---------- - On Computation of Spatial Median for Robust Data Mining, 2005 T. Kärkkäinen and S. Äyrämö",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_theil_sen.py",
    "ast_data": "FunctionDef name:_modified_weiszfeld_step arg:X arg:x_old arguments arg arg Assign Assign Call Call Assign Compare Assign Call Compare Call Assign Assign Assign Call Call If Compare Assign Call Call Assign Assign Return return:yes Call Call"
  },
  {
    "library": "cherrypy",
    "name": "check_config_types",
    "source_code": "def check_config_types(self):\n    self._known_types(cherrypy.config)\n    for sn, app in cherrypy.tree.apps.items():\n        if not isinstance(app, cherrypy.Application):\n            continue\n        self._known_types(app.config)",
    "docstring": "Assert that config values are of the same type as default values.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpchecker.py",
    "ast_data": "FunctionDef name:check_config_types arg:self arguments arg Call For Call If Call Call"
  },
  {
    "library": "tensorflow",
    "name": "on_execution",
    "source_code": "def on_execution(self, execution_index, execution):\n    pass",
    "docstring": "Monitor method for top-level execution events. Return values (if any) are ignored by the associated DebugDataReader. Args: execution_index: The index of the top-level execution event, as an int. execution: An Execution data object, for a top-level op or function execution event.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_monitors.py",
    "ast_data": "FunctionDef name:on_execution arg:self arg:execution_index arg:execution arguments arg arg arg"
  },
  {
    "library": "matplotlib",
    "name": "set_extremes",
    "source_code": "def set_extremes(self, *, bad=None, under=None, over=None):\n    if bad is not None:\n        self.set_bad(bad)\n    if under is not None:\n        self.set_under(under)\n    if over is not None:\n        self.set_over(over)",
    "docstring": "Set the colors for masked (*bad*) values and, when ``, low (*under*) and high (*over*) out-of-range values.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:set_extremes arg:self arguments arg arg arg arg If Compare Call If Compare Call If Compare Call"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X):\n    xp, _ = get_namespace(X, self.components_, self.explained_variance_)\n    check_is_fitted(self)\n    X = validate_data(self, X, dtype=[xp.float64, xp.float32], accept_sparse=('csr', 'csc'), reset=False)\n    return self._transform(X, xp=xp, x_is_centered=False)",
    "docstring": "Apply dimensionality reduction to X. X is projected on the first principal components previously extracted from a training set. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) New data, where is the number of samples and is the number of features. Returns ------- X_new : array-like of shape (n_samples, n_components) Projection of X in the first principal components, where is the number of samples and is the number of the components.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_base.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "right_multiply",
    "source_code": "def right_multiply(J, d, copy=True):\n    if copy and (not isinstance(J, LinearOperator)):\n        J = J.copy()\n    if issparse(J):\n        J.data *= d.take(J.indices, mode='clip')\n    elif isinstance(J, LinearOperator):\n        J = right_multiplied_operator(J, d)\n    else:\n        J *= d\n    return J",
    "docstring": "Compute J diag(d). If is False, is modified in place (unless being LinearOperator).",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_lsq\\common.py",
    "ast_data": "FunctionDef name:right_multiply arg:J arg:d arg:copy arguments arg arg arg If BoolOp Call Assign Call If Call Call If Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "b",
    "source_code": "def b(self):\n    return MONTHS_3[self.data.month]",
    "docstring": "Month, textual, 3 letters, lowercase; e.g. 'jan'",
    "type": "method",
    "file_path": "django\\django\\utils\\dateformat.py",
    "ast_data": "FunctionDef name:b arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_backport_for_mobile",
    "source_code": "def _backport_for_mobile(f_input, f_output, to_version):\n    if isinstance(f_input, (str, os.PathLike)):\n        if not os.path.exists(f_input):\n            raise ValueError(f'The provided filename {f_input} does not exist')\n        if os.path.isdir(f_input):\n            raise ValueError(f'The provided filename {f_input} is a directory')\n    if isinstance(f_input, (str, os.PathLike)) and isinstance(f_output, (str, os.PathLike)):\n        return torch._C._backport_for_mobile(os.fspath(f_input), os.fspath(f_output), to_version)\n    else:\n        return torch._C._backport_for_mobile_from_buffer(f_input.read(), str(f_output), to_version)",
    "docstring": "Take a input string containing a file name (file-like object) and a new destination to return a boolean. Args: f_input: a file-like object (has to implement read, readline, tell, and seek), or a string containing a file name f_output: path to new model destination to_version: the expected output model bytecode version Returns: success: A boolean. If backport success, return true, otherwise false",
    "type": "function",
    "file_path": "pytorch\\torch\\jit\\mobile\\__init__.py",
    "ast_data": "FunctionDef name:_backport_for_mobile arg:f_input arg:f_output arg:to_version arguments arg arg arg If Call If Call Raise Call If Call Raise Call If BoolOp Call Call Return return:yes Call Call Call Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "EggCrate",
    "source_code": "class EggCrate(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-5.0] * self.N, [5.0] * self.N))\n        self.global_optimum = [[0.0, 0.0]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return x[0] ** 2 + x[1] ** 2 + 25 * (sin(x[0]) ** 2 + sin(x[1]) ** 2)",
    "docstring": "Egg Crate objective function. This class defines the Egg Crate [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{EggCrate}}(x) = x_1^2 + x_2^2 + 25 \\left[ \\sin^2(x_1) + \\sin^2(x_2) \\right] with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_E.py",
    "ast_data": "ClassDef name:EggCrate FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_width",
    "source_code": "def set_width(self, width):\n    self.width = width\n    self.stale = True",
    "docstring": "Set the width of the box. Parameters ---------- width : float",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py",
    "ast_data": "FunctionDef name:set_width arg:self arg:width arguments arg arg Assign Assign"
  },
  {
    "library": "numpy",
    "name": "config_openblas",
    "source_code": "@click.command(context_settings={'ignore_unknown_options': True})\n@click.option('--with-scipy-openblas', type=click.Choice(['32', '64']), default=None, required=True, help='Build with pre-installed scipy-openblas32 or scipy-openblas64 wheel')\ndef config_openblas(with_scipy_openblas):\n    _config_openblas(with_scipy_openblas)",
    "docstring": "🔧 Create .openblas/scipy-openblas.pc file Also create _distributor_init_local.py Requires a pre-installed scipy-openblas64 or scipy-openblas32",
    "type": "function",
    "file_path": "numpy\\.spin\\cmds.py",
    "ast_data": "FunctionDef name:config_openblas arg:with_scipy_openblas arguments arg Call Call Call Call"
  },
  {
    "library": "kornia",
    "name": "fy",
    "source_code": "@property\ndef fy(self) -> Tensor:\n    return self.intrinsics[..., 1, 1]",
    "docstring": "Return the focal length in the y-direction. Returns: tensor of shape :math:.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\camera\\pinhole.py",
    "ast_data": "FunctionDef name:fy arg:self arguments arg Return return:yes"
  },
  {
    "library": "authlib",
    "name": "validate_request_object_encryption_enc",
    "source_code": "def validate_request_object_encryption_enc(self):\n    if self.get('request_object_encryption_enc') and (not self.get('request_object_encryption_alg')):\n        raise InvalidClaimError('request_object_encryption_enc')\n    if self.get('request_object_encryption_alg'):\n        self.setdefault('request_object_encryption_enc', 'A128CBC-HS256')\n    self._validate_claim_value('request_object_encryption_enc')",
    "docstring": "JWE enc algorithm [JWA] the RP is declaring that it may use for encrypting Request Objects sent to the OP. If request_object_encryption_alg is specified, the default request_object_encryption_enc value is A128CBC-HS256. When request_object_encryption_enc is included, request_object_encryption_alg MUST also be provided.",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\registration\\claims.py",
    "ast_data": "FunctionDef name:validate_request_object_encryption_enc arg:self arguments arg If BoolOp Call Call Raise Call If Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "is_jax_array",
    "source_code": "def is_jax_array(x: object) -> TypeIs[jax.Array]:\n    cls = cast(Hashable, type(x))\n    return _issubclass_fast(cls, 'jax', 'Array') or _is_jax_zero_gradient_array(x)",
    "docstring": "Return True if is a JAX array. This function does not import JAX if it has not already been imported and is therefore cheap to use. See Also -------- array_namespace is_array_api_obj is_numpy_array is_cupy_array is_torch_array is_ndonnx_array is_dask_array is_pydata_sparse_array",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\common\\_helpers.py",
    "ast_data": "FunctionDef name:is_jax_array arg:x arguments arg Assign Call Call Return return:yes BoolOp Call Call"
  },
  {
    "library": "tensorflow",
    "name": "assert_cardinality",
    "source_code": "@tf_export('data.experimental.assert_cardinality')\ndef assert_cardinality(expected_cardinality):\n\n    def _apply_fn(dataset):\n        return _AssertCardinalityDataset(dataset, expected_cardinality)\n    return _apply_fn",
    "docstring": "Asserts the cardinality of the input dataset. NOTE: The following assumes that \"examples.tfrecord\" contains 42 records. >>> dataset = tf.data.TFRecordDataset(\"examples.tfrecord\") >>> cardinality = tf.data.experimental.cardinality(dataset) >>> print((cardinality == tf.data.experimental.UNKNOWN_CARDINALITY).numpy()) True >>> dataset = dataset.apply(tf.data.experimental.assert_cardinality(42)) >>> print(tf.data.experimental.cardinality(dataset).numpy()) 42 Args: expected_cardinality: The expected cardinality of the input dataset. Returns: A transformation function, which can be passed to . Raises: FailedPreconditionError: The assertion is checked at runtime (when iterating the dataset) and an error is raised if the actual and expected cardinality differ.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\cardinality.py",
    "ast_data": "FunctionDef name:assert_cardinality arg:expected_cardinality arguments arg FunctionDef name:_apply_fn arg:dataset arguments arg Return return:yes Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "def forward(self, x: Tensor, include_fit: bool=False) -> Tensor:\n    if include_fit:\n        self.fit(x)\n    if not self.fitted:\n        raise RuntimeError('Needs to be fitted first before running. Please call fit or set include_fit to True.')\n    x_whiten = linear_transform(x, self.transform_matrix, self.mean_vector, self.dim)\n    return x_whiten",
    "docstring": "Apply the whitening transform to the data. Args: x: Input data. include_fit: Indicates whether to fit the data as part of the forward pass. Returns: The transformed data.",
    "type": "method",
    "file_path": "kornia\\kornia\\enhance\\zca.py",
    "ast_data": "FunctionDef name:forward arg:self arg:x arg:include_fit arguments arg arg arg If Call If Raise Call Assign Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "Matcher",
    "source_code": "class Matcher:\n\n    def __init__(self, exclude_patterns: Iterable[str]) -> None:\n        expanded = [pat[3:] for pat in exclude_patterns if pat.startswith('**/')]\n        self.patterns = compile_matchers(list(exclude_patterns) + expanded)\n\n    def __call__(self, string: str) -> bool:\n        return self.match(string)\n\n    def match(self, string: str) -> bool:\n        string = canon_path(string)\n        return any((pat(string) for pat in self.patterns))",
    "docstring": "A pattern matcher for Multiple shell-style glob patterns. Note: this modifies the patterns to work with copy_asset(). For example, \"**/index.rst\" matches with \"index.rst\"",
    "type": "class",
    "file_path": "sphinx\\sphinx\\util\\matching.py",
    "ast_data": "ClassDef name:Matcher FunctionDef name:__init__ arg:self arg:exclude_patterns arguments arg arg Assign Call Assign Call Call FunctionDef name:__call__ arg:self arg:string arguments arg arg Return return:yes Call FunctionDef name:match arg:self arg:string arguments arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "DjangoHelpFormatter",
    "source_code": "class DjangoHelpFormatter(HelpFormatter):\n    show_last = {'--version', '--verbosity', '--traceback', '--settings', '--pythonpath', '--no-color', '--force-color', '--skip-checks'}\n\n    def _reordered_actions(self, actions):\n        return sorted(actions, key=lambda a: set(a.option_strings) & self.show_last != set())\n\n    def add_usage(self, usage, actions, *args, **kwargs):\n        super().add_usage(usage, self._reordered_actions(actions), *args, **kwargs)\n\n    def add_arguments(self, actions):\n        super().add_arguments(self._reordered_actions(actions))",
    "docstring": "Customized formatter so that command-specific arguments appear in the --help output before arguments common to all commands.",
    "type": "class",
    "file_path": "django\\django\\core\\management\\base.py",
    "ast_data": "ClassDef name:DjangoHelpFormatter Assign FunctionDef name:_reordered_actions arg:self arg:actions arguments arg arg Return return:yes Call arguments arg Compare Call Call FunctionDef name:add_usage arg:self arg:usage arg:actions arguments arg arg arg arg arg Call Call Call FunctionDef name:add_arguments arg:self arg:actions arguments arg arg Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_raise_if_error",
    "source_code": "def _raise_if_error(self):\n    if self._error:\n        logging.error('Start cancelling closures due to error %r: %s', self._error, self._error)\n        self._cancel_all_closures()\n        try:\n            raise self._error\n        finally:\n            self._error = None",
    "docstring": "Raises the error if one exists. If an error exists, cancel the closures in queue, raises it, and clear the error. This method expects self._queue_lock to be held prior to entry.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py",
    "ast_data": "FunctionDef name:_raise_if_error arg:self arguments arg If Call Call Try Raise Assign"
  },
  {
    "library": "pytorch",
    "name": "_normalize_size",
    "source_code": "@staticmethod\ndef _normalize_size(x, new_size):\n    sizevars = V.graph.sizevars\n    new_size = list(map(sympy.expand, new_size))\n    old_size = x.get_size()\n    old_size = [None] * (len(new_size) - len(old_size)) + list(old_size)\n    assert len(new_size) == len(old_size)\n    for i in range(len(new_size)):\n        if new_size[i] == -1:\n            assert old_size[i] is not None\n            new_size[i] = old_size[i]\n        elif old_size[i] is None or V.graph.sizevars.shape_env.evaluate_expr(sympy.Eq(old_size[i], 1), size_oblivious=True):\n            pass\n        else:\n            assert sizevars.size_hint(new_size[i] - old_size[i], fallback=0) == 0, 'Broadcast failed in ExpandView({x.get_size()}, {new_size}) on dimension {i}'\n    return new_size",
    "docstring": "Replace with correct sizes",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ir.py",
    "ast_data": "FunctionDef name:_normalize_size arg:x arg:new_size arguments arg arg Assign Assign Call Call Assign Call Assign Call Call Call Compare Call Call For Call Call If Compare Compare Assign If BoolOp Compare Call Call Compare Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "__init__",
    "source_code": "def __init__(self, depth: int, embedding_dim: int, num_heads: int, mlp_dim: int, activation: type[Module]=nn.ReLU, attention_downsample_rate: int=2) -> None:\n    super().__init__()\n    self.depth = depth\n    self.embedding_dim = embedding_dim\n    self.num_heads = num_heads\n    self.mlp_dim = mlp_dim\n    self.layers = nn.ModuleList()\n    for i in range(depth):\n        self.layers.append(TwoWayAttentionBlock(embedding_dim=embedding_dim, num_heads=num_heads, mlp_dim=mlp_dim, activation=activation, attention_downsample_rate=attention_downsample_rate, skip_first_layer_pe=i == 0))\n    self.final_attn_token_to_image = Attention(embedding_dim, num_heads, downsample_rate=attention_downsample_rate)\n    self.norm_final_attn = nn.LayerNorm(embedding_dim)",
    "docstring": "Construct a transformer decoder that attends to an input image using queries whose positional embedding is supplied. Args: depth: number of layers in the transformer embedding_dim: the channel dimension for the input embeddings num_heads: the number of heads for multihead attention. Must divide embedding_dim mlp_dim: the channel dimension internal to the MLP block activation: the activation to use in the MLP block attention_downsample_rate: downsampling rate from embedding dimension",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\models\\sam\\architecture\\transformer.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:depth arg:embedding_dim arg:num_heads arg:mlp_dim arg:activation arg:attention_downsample_rate arguments arg arg arg arg arg arg arg Call Call Assign Assign Assign Assign Assign Call For Call Call Call Compare Assign Call Assign Call"
  },
  {
    "library": "pygame",
    "name": "render",
    "source_code": "def render(self, text, antialias, color, background=None):\n    if text is None:\n        text = ''\n    if isinstance(text, str) and self.__unull in text:\n        raise ValueError('A null character was found in the text')\n    if isinstance(text, bytes) and self.__bnull in text:\n        raise ValueError('A null character was found in the text')\n    save_antialiased = self.antialiased\n    self.antialiased = bool(antialias)\n    try:\n        s, _ = super().render(text, color, background)\n        return s\n    finally:\n        self.antialiased = save_antialiased",
    "docstring": "render(text, antialias, color, background=None) -> Surface draw text on a new Surface",
    "type": "method",
    "file_path": "pygame\\src_py\\ftfont.py",
    "ast_data": "FunctionDef name:render arg:self arg:text arg:antialias arg:color arg:background arguments arg arg arg arg arg If Compare Assign If BoolOp Call Compare Raise Call If BoolOp Call Compare Raise Call Assign Assign Call Try Assign Call Call Return return:yes Assign"
  },
  {
    "library": "scikit-learn",
    "name": "dtypes",
    "source_code": "@cache\ndef dtypes(self, *, device=None, kind=None):\n    res = self._dtypes(kind)\n    for k, v in res.copy().items():\n        try:\n            torch.empty((0,), dtype=v, device=device)\n        except:\n            del res[k]\n    return res",
    "docstring": "The array API data types supported by PyTorch. Note that this function only returns data types that are defined by the array API. Parameters ---------- device : Device, optional The device to get the data types for. Unused for PyTorch, as all devices use the same dtypes. kind : str or tuple of str, optional The kind of data types to return. If ``. Returns ------- dtypes : dict A dictionary mapping the names of data types to the corresponding PyTorch data types. See Also -------- __array_namespace_info__.capabilities, __array_namespace_info__.default_device, __array_namespace_info__.default_dtypes, __array_namespace_info__.devices Examples -------- >>> info = xp.__array_namespace_info__() >>> info.dtypes(kind='signed integer') {'int8': numpy.int8, 'int16': numpy.int16, 'int32': numpy.int32, 'int64': numpy.int64}",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\torch\\_info.py",
    "ast_data": "FunctionDef name:dtypes arg:self arguments arg arg arg Assign Call For Call Call Try Call ExceptHandler Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_show_signature_def_map_keys",
    "source_code": "def _show_signature_def_map_keys(saved_model_dir, tag_set):\n    signature_def_map = get_signature_def_map(saved_model_dir, tag_set)\n    print('The given SavedModel MetaGraphDef contains SignatureDefs with the following keys:')\n    for signature_def_key in sorted(signature_def_map.keys()):\n        print('SignatureDef key: \"%s\"' % signature_def_key)",
    "docstring": "Prints the keys for each SignatureDef in the SignatureDef map. Prints the list of SignatureDef keys from the SignatureDef map specified by the given tag-set and SavedModel directory. Args: saved_model_dir: Directory containing the SavedModel to inspect. tag_set: Group of tag(s) of the MetaGraphDef to get SignatureDef map from, in string format, separated by ','. For tag-set contains multiple tags, all tags must be passed in.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\saved_model_cli.py",
    "ast_data": "FunctionDef name:_show_signature_def_map_keys arg:saved_model_dir arg:tag_set arguments arg arg Assign Call Call For Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "size_v2",
    "source_code": "@dispatch.dispatch_for_types(array_ops.size_v2, StructuredTensor)\ndef size_v2(input, out_type=None, name=None):\n    if out_type is None:\n        if flags.config().tf_shape_default_int64.value():\n            out_type = dtypes.int64\n        else:\n            out_type = dtypes.int32\n    return size(input, name=name, out_type=out_type)",
    "docstring": "Returns the size of a tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_array_ops.py",
    "ast_data": "FunctionDef name:size_v2 arg:input arg:out_type arg:name arguments arg arg arg If Compare If Call Call Assign Assign Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "desc_returns",
    "source_code": "class desc_returns(desc_type):\n\n    def astext(self) -> str:\n        return ' -> ' + super().astext()",
    "docstring": "Node for a \"returns\" annotation (a la -> in Python).",
    "type": "class",
    "file_path": "sphinx\\sphinx\\addnodes.py",
    "ast_data": "ClassDef name:desc_returns FunctionDef name:astext arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "track_glyph",
    "source_code": "def track_glyph(self, font, glyph):\n    self.used.setdefault(font.fname, set()).add(glyph)",
    "docstring": "Record that codepoint *glyph* is being typeset using font *font*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\_backend_pdf_ps.py",
    "ast_data": "FunctionDef name:track_glyph arg:self arg:font arg:glyph arguments arg arg arg Call Call Call"
  },
  {
    "library": "kornia",
    "name": "pad",
    "source_code": "def pad(self, padding_size: Tensor) -> Boxes:\n    if not (len(padding_size.shape) == 2 and padding_size.size(1) == 4):\n        raise RuntimeError(f'Expected padding_size as (B, 4). Got {padding_size.shape}.')\n    self._data[..., 0] += padding_size[..., None, :1].to(device=self._data.device)\n    self._data[..., 1] += padding_size[..., None, 2:3].to(device=self._data.device)\n    return self",
    "docstring": "Pad a bounding box. Args: padding_size: (B, 4)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\boxes.py",
    "ast_data": "FunctionDef name:pad arg:self arg:padding_size arguments arg arg If BoolOp Compare Call Compare Call Raise Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "matrices_to_flat_transforms",
    "source_code": "def matrices_to_flat_transforms(transform_matrices):\n    with ops.name_scope('matrices_to_flat_transforms'):\n        transform_matrices = ops.convert_to_tensor(transform_matrices, name='transform_matrices')\n        if transform_matrices.shape.ndims not in (2, 3):\n            raise ValueError('Matrices should be 2D or 3D, got: %s' % transform_matrices)\n        transforms = array_ops.reshape(transform_matrices, constant_op.constant([-1, 9]))\n        transforms /= transforms[:, 8:9]\n        return transforms[:, :8]",
    "docstring": "Converts affine matrices to projective transforms. Note that we expect matrices that map output coordinates to input coordinates. To convert forward transformation matrices, call on the matrices and use the result here. Args: transform_matrices: One or more affine transformation matrices, for the reverse transformation in homogeneous coordinates. Shape or . Returns: 2D tensor of flat transforms with shape , which may be passed into . Raises: ValueError: If have an invalid shape.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops.py",
    "ast_data": "FunctionDef name:matrices_to_flat_transforms arg:transform_matrices arguments arg With Call Assign Call If Compare Raise Call Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_initial_nodes_b",
    "source_code": "def _initial_nodes_b(n, k):\n    a = n % 2 - 0.5\n    nu = 4.0 * floor(n / 2.0) + 2.0 * a + 2.0\n    ak = _specfun.airyzo(k.max(), 1)[0][::-1]\n    xksq = nu + 2.0 ** (2.0 / 3.0) * ak * nu ** (1.0 / 3.0) + 1.0 / 5.0 * 2.0 ** (4.0 / 3.0) * ak ** 2 * nu ** (-1.0 / 3.0) + (9.0 / 140.0 - 12.0 / 175.0 * ak ** 3) * nu ** (-1.0) + (16.0 / 1575.0 * ak + 92.0 / 7875.0 * ak ** 4) * 2.0 ** (2.0 / 3.0) * nu ** (-5.0 / 3.0) - (15152.0 / 3031875.0 * ak ** 5 + 1088.0 / 121275.0 * ak ** 2) * 2.0 ** (1.0 / 3.0) * nu ** (-7.0 / 3.0)\n    return xksq",
    "docstring": "Gatteschi initial guesses Computes an initial approximation to the square of the kth (positive) root :math: of the Hermite polynomial :math: of order :math:. The formula is the one from lemma 3.2 in the original paper. The guesses are accurate in the region just below :math:. Parameters ---------- n : int Quadrature order k : ndarray of type int Index of roots to compute Returns ------- xksq : ndarray Square of the approximate root See Also -------- initial_nodes roots_hermite_asy",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_orthogonal.py",
    "ast_data": "FunctionDef name:_initial_nodes_b arg:n arg:k arguments arg arg Assign Assign Call Assign Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "update",
    "source_code": "def update(self, grads):\n    grads = nest.flatten(grads)\n    if distribute_lib.has_strategy() and distribute_lib.in_cross_replica_context():\n        distribution = distribute_lib.get_strategy()\n        is_finite_per_replica = distribution.extended.call_for_each_replica(_is_all_finite, args=(grads,))\n        is_finite = distribution.experimental_local_results(is_finite_per_replica)[0]\n    else:\n        is_finite = _is_all_finite(grads)\n\n    def update_if_finite_grads():\n\n        def incr_loss_scale():\n            new_loss_scale = self.current_loss_scale * self.multiplier\n            return control_flow_ops.group(_assign_if_finite(self.current_loss_scale, new_loss_scale), self.counter.assign(0))\n        return cond.cond(self.counter + 1 >= self.growth_steps, incr_loss_scale, lambda: _op_in_graph_mode(self.counter.assign_add(1)))\n\n    def update_if_not_finite_grads():\n        new_loss_scale = math_ops.maximum(self.current_loss_scale / self.multiplier, 1)\n        return control_flow_ops.group(self.counter.assign(0), self.current_loss_scale.assign(new_loss_scale))\n    update_op = cond.cond(is_finite, update_if_finite_grads, update_if_not_finite_grads)\n    should_apply_gradients = is_finite\n    return (update_op, should_apply_gradients)",
    "docstring": "Updates the value of the loss scale. Args: grads: A nested structure of unscaled gradients, each which is an all-reduced gradient of the loss with respect to a weight. Returns: update_op: In eager mode, None. In graph mode, an op to update the loss scale. should_apply_gradients: Either a bool or a scalar boolean tensor. If False, the caller should skip applying to the variables this step.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\loss_scale_optimizer.py",
    "ast_data": "FunctionDef name:update arg:self arg:grads arguments arg arg Assign Call If BoolOp Call Call Assign Call Assign Call Assign Call Assign Call FunctionDef name:update_if_finite_grads arguments FunctionDef name:incr_loss_scale arguments Assign Return return:yes Call Call Call Return return:yes Call Compare arguments Call Call FunctionDef name:update_if_not_finite_grads arguments Assign Call Return return:yes Call Call Call Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_hessian_vector_product",
    "source_code": "def _hessian_vector_product(ys, xs, v):\n    length = len(xs)\n    if len(v) != length:\n        raise ValueError('xs and v must have the same length.')\n    grads = gradients(ys, xs)\n    assert len(grads) == length\n    elemwise_products = [math_ops.multiply(grad_elem, array_ops.stop_gradient(v_elem)) for grad_elem, v_elem in zip(grads, v) if grad_elem is not None]\n    return gradients(elemwise_products, xs)",
    "docstring": "Multiply the Hessian of wrt by . This is an efficient construction that uses a backprop-like approach to compute the product between the Hessian and another vector. The Hessian is usually too large to be explicitly computed or even represented, but this method allows us to at least multiply by it for the same big-O cost as backprop. Implicit Hessian-vector products are the main practical, scalable way of using second derivatives with neural networks. They allow us to do things like construct Krylov subspaces and approximate conjugate gradient descent. Example: if = 1/2 ^T A , then will return an expression that evaluates to the same values as (A + A.T) . Args: ys: A scalar value, or a tensor or list of tensors to be summed to yield a scalar. xs: A list of tensors that we should construct the Hessian over. v: A list of tensors, with the same shapes as xs, that we want to multiply by the Hessian. Returns: A list of tensors (or if the list would be length 1, a single tensor) containing the product between the Hessian and . Raises: ValueError: and have different length.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\gradients_impl.py",
    "ast_data": "FunctionDef name:_hessian_vector_product arg:ys arg:xs arg:v arguments arg arg arg Assign Call If Compare Call Raise Call Assign Call Compare Call Assign Call Call Call Compare Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "gds_deregister_buffer",
    "source_code": "def gds_deregister_buffer(s: Storage) -> None:\n    torch._C._gds_deregister_buffer(s)",
    "docstring": "Deregisters a previously registered storage on a CUDA device as a cufile buffer. Example:: >>> # xdoctest: +SKIP(\"gds filesystem requirements\") >>> src = torch.randn(1024, device=\"cuda\") >>> s = src.untyped_storage() >>> gds_register_buffer(s) >>> gds_deregister_buffer(s) Args: s (Storage): Buffer to register.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\gds.py",
    "ast_data": "FunctionDef name:gds_deregister_buffer arg:s arguments arg Call"
  },
  {
    "library": "django",
    "name": "_apply_rel_filters",
    "source_code": "def _apply_rel_filters(self, queryset):\n    db = self._db or router.db_for_read(self.model, instance=self.instance)\n    return queryset.using(db).filter(**self.core_filters)",
    "docstring": "Filter the queryset for the instance this manager is bound to.",
    "type": "method",
    "file_path": "django\\django\\contrib\\contenttypes\\fields.py",
    "ast_data": "FunctionDef name:_apply_rel_filters arg:self arg:queryset arguments arg arg Assign BoolOp Call Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "Invert",
    "source_code": "class Invert(Module):\n\n    def __init__(self, max_val: Optional[Tensor]=None) -> None:\n        super().__init__()\n        if max_val is None:\n            max_val = torch.tensor(1.0)\n        if not isinstance(max_val, Parameter):\n            self.register_buffer('max_val', max_val)\n        else:\n            self.max_val = max_val\n\n    def forward(self, input: Tensor) -> Tensor:\n        return invert(input, self.max_val)",
    "docstring": "Invert the values of an input tensor by its maximum value. Args: input: The input tensor to invert with an arbitatry shape. max_val: The expected maximum value in the input tensor. The shape has to according to the input tensor shape, or at least has to work with broadcasting. Default: 1.0. Example: >>> img = torch.rand(1, 2, 4, 4) >>> Invert()(img).shape torch.Size([1, 2, 4, 4]) >>> img = 255. * torch.rand(1, 2, 3, 4, 4) >>> Invert(torch.as_tensor(255.))(img).shape torch.Size([1, 2, 3, 4, 4]) >>> img = torch.rand(1, 3, 4, 4) >>> Invert(torch.as_tensor([[[[1.]]]]))(img).shape torch.Size([1, 3, 4, 4])",
    "type": "class",
    "file_path": "kornia\\kornia\\enhance\\adjust.py",
    "ast_data": "ClassDef name:Invert FunctionDef name:__init__ arg:self arg:max_val arguments arg arg Call Call If Compare Assign Call If Call Call Assign FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "clarkson_woodruff_transform",
    "source_code": "@_transition_to_rng('seed', position_num=2)\ndef clarkson_woodruff_transform(input_matrix, sketch_size, rng=None):\n    if issparse(input_matrix) and input_matrix.ndim > 2:\n        message = 'Batch support for sparse arrays is not available.'\n        raise NotImplementedError(message)\n    S = cwt_matrix(sketch_size, input_matrix.shape[-2], rng=rng)\n    return S @ input_matrix if input_matrix.ndim <= 2 else _batch_dot(input_matrix, S)",
    "docstring": "Applies a Clarkson-Woodruff Transform/sketch to the input matrix. Given an input_matrix `numpy.random.Generatorrngnumpy.random.Generatornumpy.random.Generatornumpy.random.default_rng\\min \\|Ax - b\\|` with high probability. >>> linalg.norm(A @ x - b) 122.83242365433877 >>> linalg.norm(A @ x_sketched - b) 166.58473879945151",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_sketches.py",
    "ast_data": "FunctionDef name:clarkson_woodruff_transform arg:input_matrix arg:sketch_size arg:rng arguments arg arg arg If BoolOp Call Compare Assign Raise Call Assign Call Return return:yes Compare Call Call"
  },
  {
    "library": "scipy",
    "name": "append",
    "source_code": "def append(a, vancestors):\n    add = True\n    for j, va in enumerate(vancestors):\n        if issubclass(va, a):\n            add = False\n            break\n        if issubclass(a, va):\n            vancestors[j] = a\n            add = False\n    if add:\n        vancestors.append(a)",
    "docstring": "Append `` to the list of the virtual ancestors, unless it is already included.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\decorator.py",
    "ast_data": "FunctionDef name:append arg:a arg:vancestors arguments arg arg Assign For Call If Call Assign If Call Assign Assign If Call"
  },
  {
    "library": "cherrypy",
    "name": "__new__",
    "source_code": "def __new__(cls, points=None):\n    d = dict.__new__(cls)\n    for p in points or []:\n        d[p] = []\n    return d",
    "docstring": "Construct a fresh hook map instance.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cprequest.py",
    "ast_data": "FunctionDef name:__new__ arg:cls arg:points arguments arg arg Assign Call For BoolOp Assign Return return:yes"
  },
  {
    "library": "authlib",
    "name": "get_error_uri",
    "source_code": "def get_error_uri(self, request, error):\n    return None",
    "docstring": "Return a URI for the given error, framework may implement this method.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\authorization_server.py",
    "ast_data": "FunctionDef name:get_error_uri arg:self arg:request arg:error arguments arg arg arg Return return:no"
  },
  {
    "library": "sphinx",
    "name": "stable_str",
    "source_code": "def stable_str(obj: Any, *, indent: int | None=None) -> str:\n    return json.dumps(_stable_str_prep(obj), indent=indent)",
    "docstring": "Return a stable string representation of a Python data structure. We can't just use `` as the order of collections may be random.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\_serialise.py",
    "ast_data": "FunctionDef name:stable_str arg:obj arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "onnx_type",
    "source_code": "def onnx_type(self) -> _C_onnx.TensorProtoDataType:\n    if self not in _SCALAR_TYPE_TO_ONNX:\n        raise errors.OnnxExporterError(f'Scalar type {self} cannot be converted to ONNX')\n    return _SCALAR_TYPE_TO_ONNX[self]",
    "docstring": "Convert a JitScalarType to an ONNX data type.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_type_utils.py",
    "ast_data": "FunctionDef name:onnx_type arg:self arguments arg If Compare Raise Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_forward_related_filter",
    "source_code": "def get_forward_related_filter(self, obj):\n    return {'%s__%s' % (self.name, rh_field.name): getattr(obj, rh_field.attname) for _, rh_field in self.related_fields}",
    "docstring": "Return the keyword arguments that when supplied to self.model.object.filter(), would select all instances related through this field to the remote obj. This is used to build the querysets returned by related descriptors. obj is an instance of self.related_field.model.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\related.py",
    "ast_data": "FunctionDef name:get_forward_related_filter arg:self arg:obj arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_is_padded_shape_compatible_with",
    "source_code": "def _is_padded_shape_compatible_with(padded_shape, input_component_shape):\n    if padded_shape.dims is None or input_component_shape.dims is None:\n        return True\n    if len(padded_shape.dims) != len(input_component_shape.dims):\n        return False\n    for padded_dim, input_dim in zip(padded_shape.dims, input_component_shape.dims):\n        if padded_dim.value is not None and input_dim.value is not None and (padded_dim.value < input_dim.value):\n            return False\n    return True",
    "docstring": "Returns if can be padded to . Args: padded_shape: A . input_component_shape: A . Returns: if can be padded to , otherwise .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\padded_batch_op.py",
    "ast_data": "FunctionDef name:_is_padded_shape_compatible_with arg:padded_shape arg:input_component_shape arguments arg arg If BoolOp Compare Compare Return return:yes If Compare Call Call Return return:yes For Call If BoolOp Compare Compare Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "record",
    "source_code": "def record():\n    with ops.device('cpu:0'):\n        raw_summary_op = gen_summary_ops.write_raw_proto_summary(_summary_state.writer._resource, step, array_ops.identity(tensor), name=scope)\n        with ops.control_dependencies([raw_summary_op]):\n            return constant_op.constant(True)",
    "docstring": "Record the actual summary and return True.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "FunctionDef name:record arguments With Call Assign Call Call With Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "gather_nd",
    "source_code": "def gather_nd(self, indices, name=None):\n    raise AttributeError",
    "docstring": "Gather slices from into a Tensor with shape specified by . See tf.gather_nd for details. Args: indices: A . Must be one of the following types: , . Index tensor. name: A name for the operation (optional). Returns: A . Has the same type as .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:gather_nd arg:self arg:indices arg:name arguments arg arg arg Raise"
  },
  {
    "library": "matplotlib",
    "name": "axes",
    "source_code": "@property\ndef axes(self):\n    return self._localaxes[:]",
    "docstring": "List of Axes in the SubFigure. You can access and modify the Axes in the SubFigure through this list. Modifying this list has no effect. Instead, use , or to add or remove an Axes. Note: The property and method are equivalent.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:axes arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_check_means_parameters",
    "source_code": "def _check_means_parameters(self, X):\n    _, n_features = X.shape\n    if self.mean_precision_prior is None:\n        self.mean_precision_prior_ = 1.0\n    else:\n        self.mean_precision_prior_ = self.mean_precision_prior\n    if self.mean_prior is None:\n        self.mean_prior_ = X.mean(axis=0)\n    else:\n        self.mean_prior_ = check_array(self.mean_prior, dtype=[np.float64, np.float32], ensure_2d=False)\n        _check_shape(self.mean_prior_, (n_features,), 'means')",
    "docstring": "Check the parameters of the Gaussian distribution. Parameters ---------- X : array-like of shape (n_samples, n_features)",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\mixture\\_bayesian_mixture.py",
    "ast_data": "FunctionDef name:_check_means_parameters arg:self arg:X arguments arg arg Assign If Compare Assign Assign If Compare Assign Call Assign Call Call"
  },
  {
    "library": "matplotlib",
    "name": "direction",
    "source_code": "@property\ndef direction(self):\n    return self._direction",
    "docstring": "Direction of the handle: 'vertical' or 'horizontal'.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:direction arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_init_from_args",
    "source_code": "def _init_from_args(self, queue=None, enqueue_ops=None, close_op=None, cancel_op=None, queue_closed_exception_types=None):\n    if not queue or not enqueue_ops:\n        raise ValueError('Must provide queue and enqueue_ops.')\n    self._queue = queue\n    self._enqueue_ops = enqueue_ops\n    self._close_op = close_op\n    self._cancel_op = cancel_op\n    if queue_closed_exception_types is not None:\n        if not isinstance(queue_closed_exception_types, tuple) or not queue_closed_exception_types or (not all((issubclass(t, errors.OpError) for t in queue_closed_exception_types))):\n            raise TypeError('queue_closed_exception_types, when provided, must be a tuple of tf.error types, but saw: %s' % queue_closed_exception_types)\n    self._queue_closed_exception_types = queue_closed_exception_types\n    if self._close_op is None:\n        self._close_op = self._queue.close()\n    if self._cancel_op is None:\n        self._cancel_op = self._queue.close(cancel_pending_enqueues=True)\n    if not self._queue_closed_exception_types:\n        self._queue_closed_exception_types = (errors.OutOfRangeError,)\n    else:\n        self._queue_closed_exception_types = tuple(self._queue_closed_exception_types)",
    "docstring": "Create a QueueRunner from arguments. Args: queue: A . enqueue_ops: List of enqueue ops to run in threads later. close_op: Op to close the queue. Pending enqueue ops are preserved. cancel_op: Op to close the queue and cancel pending enqueue ops. queue_closed_exception_types: Tuple of exception types, which indicate the queue has been safely closed. Raises: ValueError: If or are not provided when not restoring from . TypeError: If is provided, but is not a non-empty tuple of error types (subclasses of ).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\queue_runner_impl.py",
    "ast_data": "FunctionDef name:_init_from_args arg:self arg:queue arg:enqueue_ops arg:close_op arg:cancel_op arg:queue_closed_exception_types arguments arg arg arg arg arg arg If BoolOp Raise Call Assign Assign Assign Assign If Compare If BoolOp Call Call Call Raise Call Assign If Compare Assign Call If Compare Assign Call If Assign Assign Call"
  },
  {
    "library": "numpy",
    "name": "expand_dims",
    "source_code": "@array_function_dispatch(_expand_dims_dispatcher)\ndef expand_dims(a, axis):\n    if isinstance(a, matrix):\n        a = asarray(a)\n    else:\n        a = asanyarray(a)\n    if not isinstance(axis, (tuple, list)):\n        axis = (axis,)\n    out_ndim = len(axis) + a.ndim\n    axis = normalize_axis_tuple(axis, out_ndim)\n    shape_it = iter(a.shape)\n    shape = [1 if ax in axis else next(shape_it) for ax in range(out_ndim)]\n    return a.reshape(shape)",
    "docstring": "Expand the shape of an array. Insert a new axis that will appear at the position in the expanded array shape. Parameters ---------- a : array_like Input array. axis : int or tuple of ints Position in the expanded axes where the new axis (or axes) is placed. .. deprecated:: 1.13.0 Passing an axis where ``. These are the same objects: >>> np.newaxis is None True",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_shape_base_impl.py",
    "ast_data": "FunctionDef name:expand_dims arg:a arg:axis arguments arg arg If Call Assign Call Assign Call If Call Assign Assign Call Assign Call Assign Call Assign Compare Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "support_set",
    "source_code": "@property\ndef support_set(self):\n    roots = set()\n    if self.has_attr():\n        roots.update(self.parent.support_set)\n    elif self.has_subscript():\n        roots.update(self.parent.support_set)\n        roots.update(self.qn[1].support_set)\n    else:\n        roots.add(self)\n    return roots",
    "docstring": "Returns the set of simple symbols that this QN relies on. This would be the smallest set of symbols necessary for the QN to statically resolve (assuming properties and index ranges are verified at runtime). Examples: 'a.b' has only one support symbol, 'a' 'a[i]' has two support symbols, 'a' and 'i'",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\qual_names.py",
    "ast_data": "FunctionDef name:support_set arg:self arguments arg Assign Call If Call Call If Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "build_plan",
    "source_code": "@abc.abstractmethod\ndef build_plan(self, module: nn.Module) -> ShardingPlan:\n    pass",
    "docstring": "Given a nn.Module, define how to shard the module across ranks, return a ShardingPlan Args: module (:class:): The module to apply sharding to. Returns: A :class: object that represents how to shard the module.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharding_plan\\api.py",
    "ast_data": "FunctionDef name:build_plan arg:self arg:module arguments arg arg"
  },
  {
    "library": "scipy",
    "name": "linprog_terse_callback",
    "source_code": "def linprog_terse_callback(res):\n    nit = res['nit']\n    x = res['x']\n    if nit == 0:\n        print('Iter:   X:')\n    print(f'{nit: <5d}   ', end='')\n    print(x)",
    "docstring": "A sample callback function demonstrating the linprog callback interface. This callback produces brief output to sys.stdout before each iteration and after the final iteration of the simplex algorithm. Parameters ---------- res : A consisting of the following fields: x : 1-D array The independent variable vector which optimizes the linear programming problem. fun : float Value of the objective function. success : bool True if the algorithm succeeded in finding an optimal solution. slack : 1-D array The values of the slack variables. Each slack variable corresponds to an inequality constraint. If the slack is zero, then the corresponding constraint is active. con : 1-D array The (nominally zero) residuals of the equality constraints, that is, `` : Serious numerical difficulties encountered nit : int The number of iterations performed. message : str A string descriptor of the exit status of the optimization.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_linprog.py",
    "ast_data": "FunctionDef name:linprog_terse_callback arg:res arguments arg Assign Assign If Compare Call Call Call"
  },
  {
    "library": "kornia",
    "name": "get_gaussian_kernel1d",
    "source_code": "def get_gaussian_kernel1d(kernel_size: int, sigma: float | Tensor, force_even: bool=False, *, device: Optional[Device]=None, dtype: Optional[Dtype]=None) -> Tensor:\n    _check_kernel_size(kernel_size, allow_even=force_even)\n    return gaussian(kernel_size, sigma, device=device, dtype=dtype)",
    "docstring": "Return Gaussian filter coefficients. Args: kernel_size: filter size. It should be odd and positive. sigma: gaussian standard deviation. force_even: overrides requirement for odd kernel size. device: This value will be used if sigma is a float. Device desired to compute. dtype: This value will be used if sigma is a float. Dtype desired for compute. Returns: gaussian filter coefficients with shape :math:. Examples: >>> get_gaussian_kernel1d(3, 2.5) tensor([[0.3243, 0.3513, 0.3243]]) >>> get_gaussian_kernel1d(5, 1.5) tensor([[0.1201, 0.2339, 0.2921, 0.2339, 0.1201]]) >>> get_gaussian_kernel1d(5, torch.tensor([[1.5], [0.7]])) tensor([[0.1201, 0.2339, 0.2921, 0.2339, 0.1201], [0.0096, 0.2054, 0.5699, 0.2054, 0.0096]])",
    "type": "function",
    "file_path": "kornia\\kornia\\filters\\kernels.py",
    "ast_data": "FunctionDef name:get_gaussian_kernel1d arg:kernel_size arg:sigma arg:force_even arguments arg arg arg arg arg Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_is_causal_behavior",
    "source_code": "def _is_causal_behavior(rank: int, world_size: int, i: int, is_causal: bool) -> _CausalBehavior:\n    if not is_causal:\n        return _CausalBehavior.NOT_IS_CAUSAL\n    if i == 0:\n        return _CausalBehavior.IS_CAUSAL\n    source_rank = (rank - i) % world_size\n    if source_rank < rank or _cp_options.enable_load_balance:\n        return _CausalBehavior.NOT_IS_CAUSAL\n    else:\n        return _CausalBehavior.SKIP",
    "docstring": "Calculate is_causal behavior for each KV block. The attention can either be calculated in full, not at all or with the causal mask applied.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\experimental\\_attention.py",
    "ast_data": "FunctionDef name:_is_causal_behavior arg:rank arg:world_size arg:i arg:is_causal arguments arg arg arg arg If Return return:yes If Compare Return return:yes Assign If BoolOp Compare Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "update_default_handler_map",
    "source_code": "@classmethod\ndef update_default_handler_map(cls, handler_map):\n    cls._default_handler_map.update(handler_map)",
    "docstring": "Update the global default handler map, shared by all legends.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\legend.py",
    "ast_data": "FunctionDef name:update_default_handler_map arg:cls arg:handler_map arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "output_shapes",
    "source_code": "@property\n@deprecation.deprecated(None, 'Use `tf.compat.v1.data.get_output_shapes(iterator)`.')\ndef output_shapes(self):\n    return nest.map_structure(lambda component_spec: component_spec._to_legacy_output_shapes(), self._element_spec)",
    "docstring": "Returns the shape of each component of an element of this iterator. Returns: A (nested) structure of objects corresponding to each component of an element of this dataset.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\iterator_ops.py",
    "ast_data": "FunctionDef name:output_shapes arg:self arguments arg Return return:yes Call arguments arg Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_transform",
    "source_code": "def get_transform(self):\n    return self.get_patch_transform() + artist.Artist.get_transform(self)",
    "docstring": "Return the applied to the .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_transform arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "DocExportedTwiceError",
    "source_code": "class DocExportedTwiceError(Exception):\n    pass",
    "docstring": "Exception for when two docstrings are registered to a single module.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\api\\generator2\\generator\\generator.py",
    "ast_data": "ClassDef name:DocExportedTwiceError"
  },
  {
    "library": "scikit-learn",
    "name": "_post_process_frame",
    "source_code": "def _post_process_frame(frame, feature_names, target_names):\n    X = frame[feature_names]\n    if len(target_names) >= 2:\n        y = frame[target_names]\n    elif len(target_names) == 1:\n        y = frame[target_names[0]]\n    else:\n        y = None\n    return (X, y)",
    "docstring": "Post process a dataframe to select the desired columns in and . Parameters ---------- frame : dataframe The dataframe to split into and . feature_names : list of str The list of feature names to populate . target_names : list of str The list of target names to populate . Returns ------- X : dataframe The dataframe containing the features. y : {series, dataframe} or None The series or dataframe containing the target.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\datasets\\_arff_parser.py",
    "ast_data": "FunctionDef name:_post_process_frame arg:frame arg:feature_names arg:target_names arguments arg arg arg Assign If Compare Call Assign If Compare Call Assign Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_transform_index",
    "source_code": "@final\ndef _transform_index(self, func, *, level=None) -> Index:\n    if isinstance(self, ABCMultiIndex):\n        values = [self.get_level_values(i).map(func) if i == level or level is None else self.get_level_values(i) for i in range(self.nlevels)]\n        return type(self).from_arrays(values)\n    else:\n        items = [func(x) for x in self]\n        return Index(items, name=self.name, tupleize_cols=False)",
    "docstring": "Apply function to all values found in index. This includes transforming multiindex entries separately. Only apply function to one level of the MultiIndex if level is specified.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_transform_index arg:self arg:func arguments arg arg arg If Call Assign BoolOp Compare Compare Call Call Call Call Return return:yes Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "ShardedStateDictConfig",
    "source_code": "@dataclass\nclass ShardedStateDictConfig(StateDictConfig):\n    _use_dtensor: bool = False",
    "docstring": "`ShardedStateDictConfig`.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\api.py",
    "ast_data": "ClassDef name:ShardedStateDictConfig"
  },
  {
    "library": "pytorch",
    "name": "FunctionMeta",
    "source_code": "class FunctionMeta(type):\n\n    def __init__(cls, name, bases, attrs):\n        backward_fn = type(name + 'Backward', (BackwardCFunction,), {'_forward_cls': cls})\n        backward_fn._autograd_function_id = next(AUTOGRAD_FUNCTION_COUNTER)\n        backward_fn._bw_module = None\n        if getattr(cls, '_lazy_backward_info', None):\n            backward_fn._bw_module = cls._lazy_backward_info.bw_module\n        cls._backward_cls = backward_fn\n        super().__init__(name, bases, attrs)",
    "docstring": "Function metaclass. This metaclass sets up the following properties: _backward_cls: The Function class corresponding to the differentiated version of this function (which is generated on the fly by this metaclass).",
    "type": "class",
    "file_path": "pytorch\\torch\\autograd\\function.py",
    "ast_data": "ClassDef name:FunctionMeta FunctionDef name:__init__ arg:cls arg:name arg:bases arg:attrs arguments arg arg arg arg Assign Call Assign Call Assign If Call Assign Assign Call Call"
  },
  {
    "library": "matplotlib",
    "name": "corners",
    "source_code": "def corners(self):\n    (x0, y0), (x1, y1) = self.get_points()\n    return np.array([[x0, y0], [x0, y1], [x1, y0], [x1, y1]])",
    "docstring": "Return the corners of this rectangle as an array of points. Specifically, this returns the array ``.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:corners arg:self arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "add_benchmark_harness",
    "source_code": "def add_benchmark_harness(self, output):\n    if not config.benchmark_harness:\n        return\n    self.benchmark_compiled_module(output)\n    output.writelines(['', '', 'if __name__ == \"__main__\":'])\n    with output.indent():\n        output.writelines(['from torch._inductor.wrapper_benchmark import compiled_module_main', f\"compiled_module_main('{get_benchmark_name()}', benchmark_compiled_module)\"])",
    "docstring": "Append a benchmark harness to generated code for debugging",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\wrapper.py",
    "ast_data": "FunctionDef name:add_benchmark_harness arg:self arg:output arguments arg arg If Return return:no Call Call With Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_register_foreach_lowering",
    "source_code": "def _register_foreach_lowering(aten_fn, decomp_fn):\n\n    @functools.wraps(decomp_fn)\n    def wrapped(*args, **kwargs):\n        assert len(args) <= 2\n        out = decomp_fn(*args, **kwargs)\n        validate_ir(out)\n        return out\n    aten_fns = get_overloads(aten_fn)\n    foreach_ops.update(aten_fns)\n    lowerings.update(dict.fromkeys(aten_fns, wrapped))\n    return wrapped",
    "docstring": "Add a foreach lowering to lowerings dict. Arguments: aten_fn: torch.ops.aten.* fn we are lowering decomp_fn: alternate implementation on our IR broadcast: True to apply broadcasting to tensor inputs type_promotion_kind: kind of type promotion applied to tensor inputs, means no type promotion convert_input_to_bool: some logical ops require inputs are converted to bool",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\lowering.py",
    "ast_data": "FunctionDef name:_register_foreach_lowering arg:aten_fn arg:decomp_fn arguments arg arg FunctionDef name:wrapped arguments arg arg Compare Call Assign Call Call Return return:yes Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "__float__",
    "source_code": "def __float__(self):\n    if self.size > 1:\n        raise TypeError('Only length-1 arrays can be converted to Python scalars')\n    elif self._mask:\n        warnings.warn('Warning: converting a masked element to nan.', stacklevel=2)\n        return np.nan\n    return float(self.item())",
    "docstring": "Convert to float.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:__float__ arg:self arguments arg If Compare Raise Call If Call Return return:yes Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "find_commands",
    "source_code": "def find_commands(management_dir):\n    command_dir = os.path.join(management_dir, 'commands')\n    return [name for _, name, is_pkg in pkgutil.iter_modules([command_dir]) if not is_pkg and (not name.startswith('_'))]",
    "docstring": "Given a path to a management directory, return a list of all the command names that are available.",
    "type": "function",
    "file_path": "django\\django\\core\\management\\__init__.py",
    "ast_data": "FunctionDef name:find_commands arg:management_dir arguments arg Assign Call Return return:yes Call BoolOp Call"
  },
  {
    "library": "tensorflow",
    "name": "AbslForkServerProcess",
    "source_code": "class AbslForkServerProcess(_AbslProcess, multiprocessing.context.ForkServerProcess):\n    pass",
    "docstring": "An absl-compatible Forkserver process. Note: Forkserver is not available in windows.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_process_lib.py",
    "ast_data": "ClassDef name:AbslForkServerProcess"
  },
  {
    "library": "tensorflow",
    "name": "internal_captures",
    "source_code": "@property\ndef internal_captures(self):\n    return list(self._function_captures.by_val_internal.values())",
    "docstring": "Placeholders in this function corresponding captured tensors.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\func_graph.py",
    "ast_data": "FunctionDef name:internal_captures arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "is_generic_union",
    "source_code": "def is_generic_union(tp):\n    return tp is not typing.Union and getattr(tp, '__origin__', None) is typing.Union",
    "docstring": "Returns true if is a parameterized typing.Union value.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\type_annotations.py",
    "ast_data": "FunctionDef name:is_generic_union arg:tp arguments arg Return return:yes BoolOp Compare Compare Call"
  },
  {
    "library": "django",
    "name": "CreateError",
    "source_code": "class CreateError(Exception):\n    pass",
    "docstring": "Used internally as a consistent exception type to catch from save (see the docstring for SessionBase.save() for details).",
    "type": "class",
    "file_path": "django\\django\\contrib\\sessions\\backends\\base.py",
    "ast_data": "ClassDef name:CreateError"
  },
  {
    "library": "matplotlib",
    "name": "extents",
    "source_code": "@property\ndef extents(self):\n    if self.direction == 'horizontal':\n        vmin = self._selection_artist.get_x()\n        vmax = vmin + self._selection_artist.get_width()\n    else:\n        vmin = self._selection_artist.get_y()\n        vmax = vmin + self._selection_artist.get_height()\n    return (vmin, vmax)",
    "docstring": "(float, float) The values, in data coordinates, for the start and end points of the current selection. If there is no selection then the start and end values will be the same.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:extents arg:self arguments arg If Compare Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "clone_metric",
    "source_code": "def clone_metric(metric):\n    if isinstance(metric, Metric):\n        with ops.init_scope():\n            return metric.__class__.from_config(metric.get_config())\n    return metric",
    "docstring": "Returns a clone of the metric if stateful, otherwise returns it as is.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py",
    "ast_data": "FunctionDef name:clone_metric arg:metric arguments arg If Call With Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=False)\ndef fit(self, X, y=None):\n    self._fit(X)\n    n_samples = self.n_samples_fit_\n    if self.n_neighbors > n_samples:\n        warnings.warn('n_neighbors (%s) is greater than the total number of samples (%s). n_neighbors will be set to (n_samples - 1) for estimation.' % (self.n_neighbors, n_samples))\n    self.n_neighbors_ = max(1, min(self.n_neighbors, n_samples - 1))\n    self._distances_fit_X_, _neighbors_indices_fit_X_ = self.kneighbors(n_neighbors=self.n_neighbors_)\n    if self._fit_X.dtype == np.float32:\n        self._distances_fit_X_ = self._distances_fit_X_.astype(self._fit_X.dtype, copy=False)\n    self._lrd = self._local_reachability_density(self._distances_fit_X_, _neighbors_indices_fit_X_)\n    lrd_ratios_array = self._lrd[_neighbors_indices_fit_X_] / self._lrd[:, np.newaxis]\n    self.negative_outlier_factor_ = -np.mean(lrd_ratios_array, axis=1)\n    if self.contamination == 'auto':\n        self.offset_ = -1.5\n    else:\n        self.offset_ = np.percentile(self.negative_outlier_factor_, 100.0 * self.contamination)\n    if np.min(self.negative_outlier_factor_) < -10000000.0 and (not self.novelty):\n        warnings.warn('Duplicate values are leading to incorrect results. Increase the number of neighbors for more accurate results.')\n    return self",
    "docstring": "Fit the local outlier factor detector from the training dataset. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_samples) if metric='precomputed' Training data. y : Ignored Not used, present for API consistency by convention. Returns ------- self : LocalOutlierFactor The fitted local outlier factor detector.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neighbors\\_lof.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Call Assign If Compare Call Assign Call Call Assign Call If Compare Assign Call Assign Call Assign Assign Call If Compare Assign Assign Call If BoolOp Compare Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "use_mkl_length",
    "source_code": "def use_mkl_length(graph: MklSubgraph) -> bool:\n    return len(graph.nodes) > 2",
    "docstring": "This is a heuristic that can be passed into that determines whether a subgraph should be run in MKL by checking if there are more than 2 nodes in it",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\optimization.py",
    "ast_data": "FunctionDef name:use_mkl_length arg:graph arguments arg Return return:yes Compare Call"
  },
  {
    "library": "scipy",
    "name": "Ripple01",
    "source_code": "class Ripple01(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([0.0] * self.N, [1.0] * self.N))\n        self.global_optimum = [[0.1 for _ in range(self.N)]]\n        self.fglob = -2.2\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        u = -2.0 * log(2.0) * ((x - 0.1) / 0.8) ** 2.0\n        v = sin(5.0 * pi * x) ** 6.0 + 0.1 * cos(500.0 * pi * x) ** 2.0\n        return sum(-exp(u) * v)",
    "docstring": "Ripple 1 objective function. This class defines the Ripple 1 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Ripple01}}(x) = \\sum_{i=1}^2 -e^{-2 \\log 2 (\\frac{x_i-0.1}{0.8})^2} \\left[\\sin^6(5 \\pi x_i) + 0.1\\cos^2(500 \\pi x_i) \\right] with :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_R.py",
    "ast_data": "ClassDef name:Ripple01 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "resolve",
    "source_code": "def resolve(node, source_info, graphs, resolver):\n    visitor = FunctionVisitor(source_info, graphs, resolver)\n    node = visitor.visit(node)\n    return node",
    "docstring": "Performs type inference. Args: node: ast.AST source_info: transformer.SourceInfo graphs: Dict[ast.FunctionDef, cfg.Graph] resolver: Resolver Returns: ast.AST",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\static_analysis\\type_inference.py",
    "ast_data": "FunctionDef name:resolve arg:node arg:source_info arg:graphs arg:resolver arguments arg arg arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "parse_attribute",
    "source_code": "@classmethod\ndef parse_attribute(cls, name, attr_string):\n    attr_string_lower = attr_string.lower().strip()\n    if attr_string_lower[:len('date')] == 'date':\n        date_format, datetime_unit = cls._get_date_format(attr_string)\n        return cls(name, date_format, datetime_unit)\n    else:\n        return None",
    "docstring": "Parse the attribute line if it knows how. Returns the parsed attribute, or None. For date attributes, the attribute string would be like 'date '.",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\arff\\_arffread.py",
    "ast_data": "FunctionDef name:parse_attribute arg:cls arg:name arg:attr_string arguments arg arg arg Assign Call Call If Compare Call Assign Call Return return:yes Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "_TensorProcessor",
    "source_code": "class _TensorProcessor(_OptimizableVariable):\n\n    def __init__(self, v):\n        self._v = v\n\n    def target(self):\n        return self._v\n\n    def update_op(self, optimizer, g):\n        raise NotImplementedError('Trying to update a Tensor ', self._v)",
    "docstring": "Processor for ordinary Tensors. Even though a Tensor can't really be updated, sometimes it is useful to compute the gradients with respect to a Tensor using the optimizer. Updating the Tensor is, of course, unsupported.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\training\\optimizer.py",
    "ast_data": "ClassDef name:_TensorProcessor FunctionDef name:__init__ arg:self arg:v arguments arg arg Assign FunctionDef name:target arg:self arguments arg Return return:yes FunctionDef name:update_op arg:self arg:optimizer arg:g arguments arg arg arg Raise Call"
  },
  {
    "library": "scipy",
    "name": "_remove_dups",
    "source_code": "def _remove_dups(L):\n    seen_before = set()\n    L2 = []\n    for i in L:\n        if i not in seen_before:\n            seen_before.add(i)\n            L2.append(i)\n    return L2",
    "docstring": "Remove duplicates AND preserve the original order of the elements. The set class is not guaranteed to do this.",
    "type": "function",
    "file_path": "scipy\\scipy\\cluster\\hierarchy.py",
    "ast_data": "FunctionDef name:_remove_dups arg:L arguments arg Assign Call Assign For If Compare Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "NearConstantInputWarning",
    "source_code": "class NearConstantInputWarning(DegenerateDataWarning):\n\n    def __init__(self, msg=None):\n        if msg is None:\n            msg = 'All values in data are nearly equal; results may not be reliable.'\n        self.args = (msg,)",
    "docstring": "Warns when all values in data are nearly equal.",
    "type": "class",
    "file_path": "scipy\\scipy\\stats\\_warnings_errors.py",
    "ast_data": "ClassDef name:NearConstantInputWarning FunctionDef name:__init__ arg:self arg:msg arguments arg arg If Compare Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "pad",
    "source_code": "@tf_export.tf_export('experimental.numpy.pad', v1=[])\n@np_utils.np_doc('pad')\ndef pad(array, pad_width, mode, **kwargs):\n    constant_values = kwargs.get('constant_values', 0)\n    if not (mode == 'constant' or mode == 'reflect' or mode == 'symmetric'):\n        raise ValueError('Unsupported padding mode: ' + mode)\n    mode = mode.upper()\n    array = asarray(array)\n    pad_width = asarray(pad_width, dtype=dtypes.int32)\n    return array_ops.pad(tensor=array, paddings=pad_width, mode=mode, constant_values=constant_values)",
    "docstring": "Only supports modes 'constant', 'reflect' and 'symmetric' currently.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_array_ops.py",
    "ast_data": "FunctionDef name:pad arg:array arg:pad_width arg:mode arguments arg arg arg arg Assign Call If BoolOp Compare Compare Compare Raise Call Assign Call Assign Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_major_ticks",
    "source_code": "def get_major_ticks(self, numticks=None):\n    if numticks is None:\n        numticks = len(self.get_majorticklocs())\n    while len(self.majorTicks) < numticks:\n        tick = self._get_tick(major=True)\n        self.majorTicks.append(tick)\n        self._copy_tick_props(self.majorTicks[0], tick)\n    return self.majorTicks[:numticks]",
    "docstring": "Return the list of major \\s. .. warning:: Ticks are not guaranteed to be persistent. Various operations can create, delete and modify the Tick instances. There is an imminent risk that changes to individual ticks will not survive if you work on the figure further (including also panning/zooming on a displayed figure). Working on the individual ticks is a method of last resort. Use instead if possible.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:get_major_ticks arg:self arg:numticks arguments arg arg If Compare Assign Call Call While Compare Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ExpTransform",
    "source_code": "class ExpTransform(Transform):\n    domain = constraints.real\n    codomain = constraints.positive\n    bijective = True\n    sign = +1\n\n    def __eq__(self, other):\n        return isinstance(other, ExpTransform)\n\n    def _call(self, x):\n        return x.exp()\n\n    def _inverse(self, y):\n        return y.log()\n\n    def log_abs_det_jacobian(self, x, y):\n        return x",
    "docstring": "Transform via the mapping :math:.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributions\\transforms.py",
    "ast_data": "ClassDef name:ExpTransform Assign Assign Assign Assign FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes Call FunctionDef name:_call arg:self arg:x arguments arg arg Return return:yes Call FunctionDef name:_inverse arg:self arg:y arguments arg arg Return return:yes Call FunctionDef name:log_abs_det_jacobian arg:self arg:x arg:y arguments arg arg arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "safe_sparse_dot",
    "source_code": "def safe_sparse_dot(a, b, *, dense_output=False):\n    xp, _ = get_namespace(a, b)\n    if a.ndim > 2 or b.ndim > 2:\n        if sparse.issparse(a):\n            b_ = np.rollaxis(b, -2)\n            b_2d = b_.reshape((b.shape[-2], -1))\n            ret = a @ b_2d\n            ret = ret.reshape(a.shape[0], *b_.shape[1:])\n        elif sparse.issparse(b):\n            a_2d = a.reshape(-1, a.shape[-1])\n            ret = a_2d @ b\n            ret = ret.reshape(*a.shape[:-1], b.shape[1])\n        else:\n            b_axis = -1 if b.ndim == 1 else -2\n            ret = xp.tensordot(a, b, axes=[-1, b_axis])\n    else:\n        ret = a @ b\n    if sparse.issparse(a) and sparse.issparse(b) and dense_output and hasattr(ret, 'toarray'):\n        return ret.toarray()\n    return ret",
    "docstring": "Dot product that handle the sparse matrix case correctly. Parameters ---------- a : {ndarray, sparse matrix} b : {ndarray, sparse matrix} dense_output : bool, default=False When False, ``. Examples -------- >>> from scipy.sparse import csr_matrix >>> from sklearn.utils.extmath import safe_sparse_dot >>> X = csr_matrix([[1, 2], [3, 4], [5, 6]]) >>> dot_product = safe_sparse_dot(X, X.T) >>> dot_product.toarray() array([[ 5, 11, 17], [11, 25, 39], [17, 39, 61]])",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\extmath.py",
    "ast_data": "FunctionDef name:safe_sparse_dot arg:a arg:b arguments arg arg arg Assign Call If BoolOp Compare Compare If Call Assign Call Assign Call Assign Assign Call If Call Assign Call Assign Assign Call Assign Compare Assign Call Assign If BoolOp Call Call Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "max_rows",
    "source_code": "@property\ndef max_rows(self) -> int:\n    return get_option('display.max_info_rows')",
    "docstring": "Maximum info rows to be displayed.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:max_rows arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, devices, group_size, options, collective_keys=None, canonicalize_devices=True):\n    if group_size % len(devices) > 0:\n        raise ValueError('group_size must be divisible by the number of devices.')\n    self._group_size = group_size\n    self._options = options\n    self._collective_keys = collective_keys or cross_device_utils.CollectiveKeys()\n    self._lock = threading.Lock()\n    if canonicalize_devices:\n        self._devices = tuple((device_util.canonicalize(d) for d in devices))\n    else:\n        self._devices = tuple((device_util.canonicalize_without_job_and_task(d) for d in devices))\n    group_key = self._collective_keys.get_group_key(self._devices)\n    self._launchers = []\n    self._limited_nccl = False\n    for device in self._devices:\n        launcher = cross_device_utils.CollectiveReplicaLauncher(group_key, group_size, self._collective_keys, device, options)\n        self._launchers.append(launcher)\n        if not launcher.can_order_nccl():\n            self._limited_nccl = True\n    super(CollectiveAllReduce, self).__init__()\n    self._canonicalize_devices = canonicalize_devices",
    "docstring": "Initializes the object. Args: devices: a list of device strings to run collectives on. group_size: the global group size. For between-graph replicated training it's the total number of devices across all workers. options: a . collective_keys: an optional CollectiveKey object. canonicalize_devices: Whether to canonicalize devices for workers or not.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_ops.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:devices arg:group_size arg:options arg:collective_keys arg:canonicalize_devices arguments arg arg arg arg arg arg If Compare Call Raise Call Assign Assign Assign BoolOp Call Assign Call If Assign Call Call Assign Call Call Assign Call Assign Assign For Assign Call Call If Call Assign Call Call Assign"
  },
  {
    "library": "seaborn",
    "name": "_check_list_length",
    "source_code": "def _check_list_length(self, levels, values, variable):\n    message = ''\n    if len(levels) > len(values):\n        message = ' '.join([f'\\nThe {variable} list has fewer values ({len(values)})', f'than needed ({len(levels)}) and will cycle, which may', 'produce an uninterpretable plot.'])\n        values = [x for _, x in zip(levels, itertools.cycle(values))]\n    elif len(values) > len(levels):\n        message = ' '.join([f'The {variable} list has more values ({len(values)})', f'than needed ({len(levels)}), which may not be intended.'])\n        values = values[:len(levels)]\n    if message:\n        warnings.warn(message, UserWarning, stacklevel=6)\n    return values",
    "docstring": "Input check when values are provided as a list.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_base.py",
    "ast_data": "FunctionDef name:_check_list_length arg:self arg:levels arg:values arg:variable arguments arg arg arg arg Assign If Compare Call Call Assign Call Call Call Assign Call Call If Compare Call Call Assign Call Call Call Assign Call If Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_index_label_pos",
    "source_code": "def get_index_label_pos(index, extent, origin, inverted_xindex):\n    if extent is None:\n        extent = lookup_extent(origin)\n    left, right, bottom, top = extent\n    x, y = index_to_coordinate(index, extent, origin)\n    is_x0 = index[-2:] == '0]'\n    halign = 'left' if is_x0 ^ inverted_xindex else 'right'\n    hshift = 0.5 * np.sign(left - right)\n    x += hshift * (1 if is_x0 else -1)\n    return (x, y, halign)",
    "docstring": "Return the desired position and horizontal alignment of an index label.",
    "type": "function",
    "file_path": "matplotlib\\galleries\\users_explain\\artists\\imshow_extent.py",
    "ast_data": "FunctionDef name:get_index_label_pos arg:index arg:extent arg:origin arg:inverted_xindex arguments arg arg arg arg If Compare Assign Call Assign Assign Call Assign Compare Assign Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X):\n    check_is_fitted(self)\n    X = self._check_X(X)\n    return self._get_median_predict(X, len(self.estimators_))",
    "docstring": "Predict regression value for X. The predicted regression value of an input sample is computed as the weighted median prediction of the regressors in the ensemble. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. COO, DOK, and LIL are converted to CSR. Returns ------- y : ndarray of shape (n_samples,) The predicted regression values.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_weight_boosting.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "is_invalid_cancel",
    "source_code": "def is_invalid_cancel(name: str, conclusion: Optional[str], drci_classifications: Any) -> bool:\n    if not name or not drci_classifications or (not conclusion) or (conclusion.upper() != 'CANCELLED'):\n        return False\n    return all((name != failure['name'] for failure in drci_classifications.get('FAILED', [])))",
    "docstring": "After invalid cancelled signals have been removed from HUD and Dr.CI. The same needs to be done here for consistency",
    "type": "function",
    "file_path": "pytorch\\.github\\scripts\\trymerge.py",
    "ast_data": "FunctionDef name:is_invalid_cancel arg:name arg:conclusion arg:drci_classifications arguments arg arg arg If BoolOp Compare Call Return return:yes Return return:yes Call Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "_iter_slices",
    "source_code": "def _iter_slices(full_shape, num_slices, slice_dim):\n    num_slices_with_excess = full_shape[slice_dim] % num_slices\n    offset = [0] * len(full_shape)\n    min_slice_len = full_shape[slice_dim] // num_slices\n    for i in range(num_slices):\n        shape = full_shape[:]\n        shape[slice_dim] = min_slice_len + bool(i < num_slices_with_excess)\n        yield (offset[:], shape)\n        offset[slice_dim] += shape[slice_dim]",
    "docstring": "Slices a given a shape along the specified dimension.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variable_scope.py",
    "ast_data": "FunctionDef name:_iter_slices arg:full_shape arg:num_slices arg:slice_dim arguments arg arg arg Assign Assign Call Assign For Call Assign Assign Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "_enclosing_xla_context",
    "source_code": "def _enclosing_xla_context():\n    graph = ops.get_default_graph()\n    while graph is not None:\n        context_ = graph._get_control_flow_context()\n        while context_ is not None:\n            if isinstance(context_, control_flow_ops.XLAControlFlowContext):\n                return context_\n            context_ = context_.outer_context\n        graph = getattr(graph, 'outer_graph', None)\n    return None",
    "docstring": "Returns the XLAControlFlowContext, which exists inside a tpu.rewrite().",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\function_context.py",
    "ast_data": "FunctionDef name:_enclosing_xla_context arguments Assign Call While Compare Assign Call While Compare If Call Return return:yes Assign Assign Call Return return:no"
  },
  {
    "library": "pytorch",
    "name": "_module_stack_to_str",
    "source_code": "def _module_stack_to_str(module_stack: object) -> str:\n    if not isinstance(module_stack, dict):\n        return str(module_stack)\n    module_values_list = list(module_stack.values())\n    if len(module_values_list) > 0:\n        owning_module = module_values_list[-1][0]\n        return str(owning_module)\n    else:\n        return str(module_stack)",
    "docstring": "Simplifies the stack from (\"mod\", \"mod.foo\", \"mod.foo.0\", \"mod.foo.0.linear\") to \"mod.foo.0.linear\"",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\_numeric_debugger.py",
    "ast_data": "FunctionDef name:_module_stack_to_str arg:module_stack arguments arg If Call Return return:yes Call Assign Call Call If Compare Call Assign Return return:yes Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_set_locator",
    "source_code": "def _set_locator(self, locator):\n    pass",
    "docstring": "Subclasses may want to override this to set a locator.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:_set_locator arg:self arg:locator arguments arg arg"
  },
  {
    "library": "tensorflow",
    "name": "MeanSquaredError",
    "source_code": "class MeanSquaredError(MeanMetricWrapper):\n\n    def __init__(self, name='mean_squared_error', dtype=None):\n        super(MeanSquaredError, self).__init__(mean_squared_error, name, dtype=dtype)",
    "docstring": "Computes the mean squared error between and . Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.MeanSquaredError() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.result().numpy() 0.25 >>> m.reset_state() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]], ... sample_weight=[1, 0]) >>> m.result().numpy() 0.5 Usage with API:",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py",
    "ast_data": "ClassDef name:MeanSquaredError FunctionDef name:__init__ arg:self arg:name arg:dtype arguments arg arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_show_memory_counters",
    "source_code": "def _show_memory_counters(self) -> None:\n    allocations = {}\n    for name in self._tensors:\n        tensor = self._tensors[name]\n        self._chrome_trace.emit_obj_delete('Tensor', name, tensor.last_unref, tensor.pid, 0, tensor.object_id)\n        allocator = tensor.allocator\n        if allocator not in allocations:\n            allocations[allocator] = []\n        num_bytes = tensor.num_bytes\n        allocations[allocator].append((tensor.create_time, num_bytes, name))\n        allocations[allocator].append((tensor.last_unref, -num_bytes, name))\n    alloc_maxes = {}\n    for allocator in allocations:\n        alloc_list = allocations[allocator]\n        alloc_list.sort()\n        total_bytes = 0\n        alloc_tensor_set = set()\n        alloc_maxes[allocator] = AllocationMaximum(timestamp=0, num_bytes=0, tensors=set())\n        for time, num_bytes, name in sorted(alloc_list, key=lambda allocation: allocation[0]):\n            total_bytes += num_bytes\n            if num_bytes < 0:\n                alloc_tensor_set.discard(name)\n            else:\n                alloc_tensor_set.add(name)\n            if total_bytes > alloc_maxes[allocator].num_bytes:\n                alloc_maxes[allocator] = AllocationMaximum(timestamp=time, num_bytes=total_bytes, tensors=copy.deepcopy(alloc_tensor_set))\n            self._chrome_trace.emit_counter('Memory', allocator, self._allocators_pid, time, allocator, total_bytes)\n    self._allocator_maximums = alloc_maxes",
    "docstring": "Produce a counter series for each memory allocator.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py",
    "ast_data": "FunctionDef name:_show_memory_counters arg:self arguments arg Assign For Assign Call Assign If Compare Assign Assign Call Call Assign For Assign Call Assign Assign Call Assign Call Call For Call arguments arg If Compare Call Call If Compare Assign Call Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "set_up_planner",
    "source_code": "@abc.abstractmethod\ndef set_up_planner(self, state_dict: STATE_DICT_TYPE, metadata: Optional[Metadata]=None, is_coordinator: bool=False) -> None:\n    pass",
    "docstring": "Initialize this instance to load data into ``. . N.B. This is called on every rank.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\planner.py",
    "ast_data": "FunctionDef name:set_up_planner arg:self arg:state_dict arg:metadata arg:is_coordinator arguments arg arg arg arg"
  },
  {
    "library": "django",
    "name": "get_related_updates",
    "source_code": "def get_related_updates(self):\n    if not self.related_updates:\n        return []\n    result = []\n    for model, values in self.related_updates.items():\n        query = UpdateQuery(model)\n        query.values = values\n        if self.related_ids is not None:\n            query.add_filter('pk__in', self.related_ids[model])\n        result.append(query)\n    return result",
    "docstring": "Return a list of query objects: one for each update required to an ancestor model. Each query will have the same filtering conditions as the current query but will only update a single table.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\subqueries.py",
    "ast_data": "FunctionDef name:get_related_updates arg:self arguments arg If Return return:no Assign For Call Assign Call Assign If Compare Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "batch_size",
    "source_code": "@property\ndef batch_size(self) -> int:\n    return self.rectified_left_camera.shape[0]",
    "docstring": "Return the batch size of the storage. Returns: scalar with the batch size",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\camera\\stereo.py",
    "ast_data": "FunctionDef name:batch_size arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_numpy",
    "source_code": "def _numpy(self):\n    if not self._is_eager():\n        raise ValueError('SparseTensor.numpy() is only supported in eager mode.')\n    arr = np.zeros(self.dense_shape, dtype=self.dtype.as_numpy_dtype())\n    for i, v in zip(self.indices, self.values):\n        arr[tuple(i)] = v\n    return arr",
    "docstring": "Returns a numpy with the values for this . Requires that this was constructed in eager execution mode.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\sparse_tensor.py",
    "ast_data": "FunctionDef name:_numpy arg:self arguments arg If Call Raise Call Assign Call Call For Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "ZerosLikeForExit",
    "source_code": "def ZerosLikeForExit(self, val):\n    val_shape = val.get_shape()\n    forward_ctxt = val.op._get_control_flow_context()\n    outer_forward_ctxt = forward_ctxt.outer_context\n    if outer_forward_ctxt:\n        outer_forward_ctxt = outer_forward_ctxt.GetWhileContext()\n    outer_grad_state = None\n    if outer_forward_ctxt:\n        outer_grad_state = self._map.get(outer_forward_ctxt)\n    if outer_grad_state:\n        if val_shape.is_fully_defined():\n            outer_grad_state.grad_context.Enter()\n            result = array_ops.zeros(val_shape.dims, val.dtype)\n            outer_grad_state.grad_context.Exit()\n        else:\n            forward_ctxt.outer_context.Enter()\n            shape = array_ops.shape_internal(val, optimize=False)\n            forward_ctxt.outer_context.Exit()\n            history_shape = outer_grad_state.AddForwardAccumulator(shape)\n            outer_grad_ctxt = outer_grad_state.grad_context\n            outer_grad_ctxt.Enter()\n            real_shape = outer_grad_state.AddBackpropAccumulatedValue(history_shape, shape)\n            result = array_ops.zeros(real_shape, val.dtype)\n            outer_grad_ctxt.Exit()\n    elif val_shape.is_fully_defined():\n        result = array_ops.zeros(val_shape.dims, val.dtype)\n    else:\n        result = array_ops.zeros_like(val, optimize=False)\n    return result",
    "docstring": "Create zeros_like gradient for a loop exit. If the result of a loop variable is not used but is involved in computing the result of some needed loop variable, we create a zero-valued tensor that is fed as gradient for the Exit node of that loop variable. Note that val.op is an Exit, and this method must be called in the control flow context where gradients() is called. Args: val: The output tensor of an Exit op. Returns: A zero tensor of the same shape of val.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_state.py",
    "ast_data": "FunctionDef name:ZerosLikeForExit arg:self arg:val arguments arg arg Assign Call Assign Call Assign If Assign Call Assign If Assign Call If If Call Call Assign Call Call Call Assign Call Call Assign Call Assign Call Assign Call Assign Call Call If Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_figure",
    "source_code": "def get_figure(self, root=False):\n    if root and self._parent_figure is not None:\n        return self._parent_figure.get_figure(root=True)\n    return self._parent_figure",
    "docstring": "Return the or instance the artist belongs to. Parameters ---------- root : bool, default=False If False, return the (Sub)Figure this artist is on. If True, return the root Figure for a nested tree of SubFigures.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:get_figure arg:self arg:root arguments arg arg If BoolOp Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "__call__",
    "source_code": "def __call__(self, a, b):\n    return where(self.compare(a, b), a, b)",
    "docstring": "Executes the call behavior.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:a arg:b arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "save_to_buffer",
    "source_code": "def save_to_buffer(string: str, buf: FilePath | WriteBuffer[str] | None=None, encoding: str | None=None) -> str | None:\n    with _get_buffer(buf, encoding=encoding) as fd:\n        fd.write(string)\n        if buf is None:\n            return fd.getvalue()\n        return None",
    "docstring": "Perform serialization. Write to buf or return as string if buf is None.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\formats\\format.py",
    "ast_data": "FunctionDef name:save_to_buffer arg:string arg:buf arg:encoding arguments arg arg arg With Call Call If Compare Return return:yes Call Return return:no"
  },
  {
    "library": "pytorch",
    "name": "_restore_state_dict",
    "source_code": "def _restore_state_dict(original_module: torch.nn.Module, traced_module: torch.fx.GraphModule) -> None:\n    param_buffer_table = _get_param_buffer_mapping(original_module, traced_module)\n    for name, fqn in param_buffer_table.items():\n        param_buffer_table[name] = fqn.replace('.', '_')\n    for name, fqn in param_buffer_table.items():\n        if not hasattr(traced_module, name):\n            continue\n        attr = getattr(traced_module, name)\n        if isinstance(attr, torch.Tensor) and (not isinstance(attr, torch.nn.Parameter)):\n            traced_module.register_buffer(fqn, attr)\n        else:\n            setattr(traced_module, fqn, attr)\n        delattr(traced_module, name)\n    for node in traced_module.graph.nodes:\n        if node.op == 'get_attr':\n            attr_name = node.target\n            if attr_name in param_buffer_table:\n                node.target = param_buffer_table[attr_name]\n    traced_module.recompile()",
    "docstring": "Restores the state dict of the traced module to that of the original module.",
    "type": "function",
    "file_path": "pytorch\\torch\\export\\_trace.py",
    "ast_data": "FunctionDef name:_restore_state_dict arg:original_module arg:traced_module arguments arg arg Assign Call For Call Assign Call For Call If Call Assign Call If BoolOp Call Call Call Call Call For If Compare Assign If Compare Assign Call"
  },
  {
    "library": "pytorch",
    "name": "handle_leaf",
    "source_code": "def handle_leaf(self, tree_, node, indent, unsafe_leaves):\n    value = tree_.value[node][0][0]\n    return f'{indent}return {str(value)}'",
    "docstring": "Generates the code for a leaf node. This is just the value predicted by the regression tree.",
    "type": "method",
    "file_path": "pytorch\\torchgen\\_autoheuristic\\train_regression.py",
    "ast_data": "FunctionDef name:handle_leaf arg:self arg:tree_ arg:node arg:indent arg:unsafe_leaves arguments arg arg arg arg arg Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_should_pack",
    "source_code": "def _should_pack(arg):\n    return isinstance(arg, list)",
    "docstring": "Determines whether the caller needs to pack the argument in a tuple. If user-defined function returns a list of tensors, and and would conspire to attempt to stack those tensors into a single tensor because the tf.data version of does not recurse into lists. Since it is more likely that the list arose from returning the result of an operation (such as ) that returns a list of not-necessarily-stackable tensors, we treat the returned value as a instead. A user wishing to pack the return value into a single tensor can use an explicit before returning. Args: arg: argument to check Returns: Indication of whether the caller needs to pack the argument in a tuple.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\structured_function.py",
    "ast_data": "FunctionDef name:_should_pack arg:arg arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "from_dict",
    "source_code": "@classmethod\ndef from_dict(cls, fuse_custom_config_dict: dict[str, Any]) -> FuseCustomConfig:\n    conf = cls()\n    conf.set_preserved_attributes(fuse_custom_config_dict.get(PRESERVED_ATTRIBUTES_DICT_KEY, []))\n    return conf",
    "docstring": "Create a `` This function is primarily for backward compatibility and may be removed in the future.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\custom_config.py",
    "ast_data": "FunctionDef name:from_dict arg:cls arg:fuse_custom_config_dict arguments arg arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "svd_reduce",
    "source_code": "def svd_reduce(self, max_rank, to_retain=None):\n    if self.collapsed is not None:\n        return\n    p = max_rank\n    if to_retain is not None:\n        q = to_retain\n    else:\n        q = p - 2\n    if self.cs:\n        p = min(p, len(self.cs[0]))\n    q = max(0, min(q, p - 1))\n    m = len(self.cs)\n    if m < p:\n        return\n    C = np.array(self.cs).T\n    D = np.array(self.ds).T\n    D, R = qr(D, mode='economic')\n    C = dot(C, R.T.conj())\n    U, S, WH = svd(C, full_matrices=False)\n    C = dot(C, inv(WH))\n    D = dot(D, WH.T.conj())\n    for k in range(q):\n        self.cs[k] = C[:, k].copy()\n        self.ds[k] = D[:, k].copy()\n    del self.cs[q:]\n    del self.ds[q:]",
    "docstring": "Reduce the rank of the matrix by retaining some SVD components. This corresponds to the \"Broyden Rank Reduction Inverse\" algorithm described in [1]_. Note that the SVD decomposition can be done by solving only a problem whose size is the effective rank of this matrix, which is viable even for large problems. Parameters ---------- max_rank : int Maximum rank of this matrix after reduction. to_retain : int, optional Number of SVD components to retain when reduction is done (ie. rank > max_rank). Default is ``. References ---------- .. [1] B.A. van der Rotten, PhD thesis, \"A limited memory Broyden method to solve high-dimensional systems of nonlinear equations\". Mathematisch Instituut, Universiteit Leiden, The Netherlands (2003).",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_nonlin.py",
    "ast_data": "FunctionDef name:svd_reduce arg:self arg:max_rank arg:to_retain arguments arg arg arg If Compare Return return:no Assign If Compare Assign Assign If Assign Call Call Assign Call Call Assign Call If Compare Return return:no Assign Call Assign Call Assign Call Assign Call Call Assign Call Assign Call Call Assign Call Call For Call Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, func, lower_control_flow, aggressive_inlining, variable_names_allowlist=None, variable_names_denylist=None, session=None):\n    self._session = session\n    session.run(variables.global_variables_initializer())\n    for op in ops.get_default_graph().get_collection(VAR_ASSIGN_COLLECTION):\n        session.run(op)\n    super(_FunctionConverterDataInGraph, self).__init__(func, lower_control_flow, aggressive_inlining, variable_names_allowlist, variable_names_denylist)",
    "docstring": "Creates the conversion data for the given function. Args: func: ConcreteFunction. lower_control_flow: Boolean indicating whether or not to lower control flow ops such as If and While. aggressive_inlining: Boolean indicating whether or not to do aggressive function inlining (might be unsafe if function has stateful ops, not properly connected to control outputs). variable_names_allowlist: The set of variable names to convert (by default, all variables are converted). variable_names_denylist: The set of variable names to omit converting to constants. session: Session object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:func arg:lower_control_flow arg:aggressive_inlining arg:variable_names_allowlist arg:variable_names_denylist arg:session arguments arg arg arg arg arg arg arg Assign Call Call For Call Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "gmres_loose",
    "source_code": "def gmres_loose(A, b, tol):\n    b = np.asarray(b)\n    min_tol = 1000 * np.sqrt(b.size) * np.finfo(b.dtype).eps\n    return gmres(A, b, rtol=max(tol, min_tol), atol=0)",
    "docstring": "gmres with looser termination condition.",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_eigen\\arpack\\arpack.py",
    "ast_data": "FunctionDef name:gmres_loose arg:A arg:b arg:tol arguments arg arg arg Assign Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "deserialize",
    "source_code": "@dispatch.add_dispatch_support\ndef deserialize(name, custom_objects=None):\n    globs = globals()\n    advanced_activations_globs = advanced_activations.get_globals()\n    for key, val in advanced_activations_globs.items():\n        if key not in globs:\n            globs[key] = val\n    return deserialize_keras_object(name, module_objects=globs, custom_objects=custom_objects, printable_module_name='activation function')",
    "docstring": "Returns activation function given a string identifier. Args: name: The name of the activation function. custom_objects: Optional dictionary listing user-provided activation functions. Returns: Corresponding activation function. For example: >>> tf.keras.activations.deserialize('linear') >>> tf.keras.activations.deserialize('sigmoid') >>> tf.keras.activations.deserialize('abcd') Traceback (most recent call last): ... ValueError: Unknown activation function:abcd Raises: ValueError: if the input string does not denote any defined Tensorflow activation function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\activations.py",
    "ast_data": "FunctionDef name:deserialize arg:name arg:custom_objects arguments arg arg Assign Call Assign Call For Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_split_sparse_columns",
    "source_code": "def _split_sparse_columns(arff_data: ArffSparseDataType, include_columns: List) -> ArffSparseDataType:\n    arff_data_new: ArffSparseDataType = (list(), list(), list())\n    reindexed_columns = {column_idx: array_idx for array_idx, column_idx in enumerate(include_columns)}\n    for val, row_idx, col_idx in zip(arff_data[0], arff_data[1], arff_data[2]):\n        if col_idx in include_columns:\n            arff_data_new[0].append(val)\n            arff_data_new[1].append(row_idx)\n            arff_data_new[2].append(reindexed_columns[col_idx])\n    return arff_data_new",
    "docstring": "Obtains several columns from sparse ARFF representation. Additionally, the column indices are re-labelled, given the columns that are not included. (e.g., when including [1, 2, 3], the columns will be relabelled to [0, 1, 2]). Parameters ---------- arff_data : tuple A tuple of three lists of equal size; first list indicating the value, second the x coordinate and the third the y coordinate. include_columns : list A list of columns to include. Returns ------- arff_data_new : tuple Subset of arff data with only the include columns indicated by the include_columns argument.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\datasets\\_arff_parser.py",
    "ast_data": "FunctionDef name:_split_sparse_columns arg:arff_data arg:include_columns arguments arg arg Call Call Call Assign Call For Call If Compare Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_py_not",
    "source_code": "def _py_not(a):\n    return not a",
    "docstring": "Default Python implementation of the \"not_\" operator.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\logical.py",
    "ast_data": "FunctionDef name:_py_not arg:a arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_CorrCholesky",
    "source_code": "class _CorrCholesky(Constraint):\n    event_dim = 2\n\n    def check(self, value):\n        tol = torch.finfo(value.dtype).eps * value.size(-1) * 10\n        row_norm = torch.linalg.norm(value.detach(), dim=-1)\n        unit_row_norm = (row_norm - 1.0).abs().le(tol).all(dim=-1)\n        return _LowerCholesky().check(value) & unit_row_norm",
    "docstring": "Constrain to lower-triangular square matrices with positive diagonals and each row vector being of unit length.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributions\\constraints.py",
    "ast_data": "ClassDef name:_CorrCholesky Assign FunctionDef name:check arg:self arg:value arguments arg arg Assign Call Call Assign Call Call Assign Call Call Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_QuadraticModel",
    "source_code": "class _QuadraticModel(Model):\n\n    def __init__(self):\n        super().__init__(_quadratic, fjacd=_quad_fjd, fjacb=_quad_fjb, estimate=_quad_est, meta={'name': 'Quadratic', 'equ': 'y = B_0*x**2 + B_1*x + B_2', 'TeXequ': '$y = \\\\beta_0 x^2 + \\\\beta_1 x + \\\\beta_2'})",
    "docstring": "Quadratic model This model is defined by :math: Examples -------- We can calculate orthogonal distance regression with a quadratic model: >>> from scipy import odr >>> import numpy as np >>> x = np.linspace(0.0, 5.0) >>> y = 1.0 * x ** 2 + 2.0 * x + 3.0 >>> data = odr.Data(x, y) >>> odr_obj = odr.ODR(data, odr.quadratic) >>> output = odr_obj.run() >>> print(output.beta) [1. 2. 3.]",
    "type": "class",
    "file_path": "scipy\\scipy\\odr\\_models.py",
    "ast_data": "ClassDef name:_QuadraticModel FunctionDef name:__init__ arg:self arguments arg Call Call"
  },
  {
    "library": "kornia",
    "name": "zca_whiten",
    "source_code": "def zca_whiten(inp: Tensor, dim: int=0, unbiased: bool=True, eps: float=1e-06) -> Tensor:\n    if not isinstance(inp, Tensor):\n        raise TypeError(f'Input type is not a Tensor. Got {type(inp)}')\n    if not isinstance(eps, float):\n        raise TypeError(f'eps type is not a float. Got{type(eps)}')\n    if not isinstance(unbiased, bool):\n        raise TypeError(f'unbiased type is not bool. Got{type(unbiased)}')\n    if not isinstance(dim, int):\n        raise TypeError(f\"Argument 'dim' must be of type int. Got {type(dim)}\")\n    transform, mean, _ = zca_mean(inp, dim, unbiased, eps, False)\n    inp_whiten = linear_transform(inp, transform, mean, dim)\n    return inp_whiten",
    "docstring": "Apply ZCA whitening transform. See :class: for details. Args: inp: input data tensor. dim: Specifies the dimension that serves as the samples dimension. unbiased: Whether to use the unbiased estimate of the covariance matrix. eps: a small number used for numerical stability. Returns: Whiten Input data. .. note:: See a working example __. Examples: >>> x = torch.tensor([[0,1],[1,0],[-1,0]], dtype = torch.float32) >>> zca_whiten(x) tensor([[ 0.0000, 1.1547], [ 1.0000, -0.5773], [-1.0000, -0.5773]])",
    "type": "function",
    "file_path": "kornia\\kornia\\enhance\\zca.py",
    "ast_data": "FunctionDef name:zca_whiten arg:inp arg:dim arg:unbiased arg:eps arguments arg arg arg arg If Call Raise Call Call If Call Raise Call Call If Call Raise Call Call If Call Raise Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "fac_psd",
    "source_code": "@property\ndef fac_psd(self) -> float:\n    if self.scaling == 'psd':\n        return 1\n    if self._fac_psd is None:\n        self._fac_psd = 1 / np.sqrt(sum(self.win.real ** 2 + self.win.imag ** 2) / self.T)\n    return self._fac_psd",
    "docstring": "Factor to multiply the STFT values by to scale each frequency slice to a power spectral density (PSD). It is 1 if attribute `scale_to`. See Also -------- fac_magnitude: Scaling factor for to a magnitude spectrum. scale_to: Scale window to obtain 'magnitude' or 'psd' scaling. scaling: Normalization applied to the window function. ShortTimeFFT: Class this property belongs to.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_short_time_fft.py",
    "ast_data": "FunctionDef name:fac_psd arg:self arguments arg If Compare Return return:yes If Compare Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "QuantizationSpecBase",
    "source_code": "class QuantizationSpecBase(ABC):\n    pass",
    "docstring": "Base class for different types of quantization specs that allows users to specify how to quantize a Tensor (input/output of a Node) in the model",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\quantizer.py",
    "ast_data": "ClassDef name:QuantizationSpecBase"
  },
  {
    "library": "scipy",
    "name": "rvs",
    "source_code": "def rvs(self, mean=None, rowcov=1, colcov=1, size=1, random_state=None):\n    size = int(size)\n    dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov, colcov)\n    rowchol = scipy.linalg.cholesky(rowcov, lower=True)\n    colchol = scipy.linalg.cholesky(colcov, lower=True)\n    random_state = self._get_random_state(random_state)\n    std_norm = random_state.standard_normal(size=(dims[1], size, dims[0])).transpose(1, 2, 0)\n    out = mean + np.einsum('jp,ipq,kq->ijk', rowchol, std_norm, colchol, optimize=True)\n    if size == 1:\n        out = out.reshape(mean.shape)\n    return out",
    "docstring": "Draw random samples from a matrix normal distribution. Parameters ---------- %(_matnorm_doc_default_callparams)s size : integer, optional Number of samples to draw (default 1). %(_doc_random_state)s Returns ------- rvs : ndarray or scalar Random variates of size (, ), where is the dimension of the random matrices. Notes ----- %(_matnorm_doc_callparams_note)s",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:rvs arg:self arg:mean arg:rowcov arg:colcov arg:size arg:random_state arguments arg arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "_engine_namespace_handler",
    "source_code": "def _engine_namespace_handler(k, v):\n    engine = cherrypy.engine\n    if k in {'SIGHUP', 'SIGTERM'}:\n        engine.subscribe(k, v)\n        return\n    if '.' in k:\n        plugin, attrname = k.split('.', 1)\n        plugin = getattr(engine, plugin)\n        op = 'subscribe' if v else 'unsubscribe'\n        sub_unsub = getattr(plugin, op, None)\n        if attrname == 'on' and callable(sub_unsub):\n            sub_unsub()\n            return\n        setattr(plugin, attrname, v)\n    else:\n        setattr(engine, k, v)",
    "docstring": "Config handler for the \"engine\" namespace.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\_cpconfig.py",
    "ast_data": "FunctionDef name:_engine_namespace_handler arg:k arg:v arguments arg arg Assign If Compare Call Return return:no If Compare Assign Call Assign Call Assign Assign Call If BoolOp Compare Call Call Return return:no Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_serialize_nested_config",
    "source_code": "def _serialize_nested_config(config):\n\n    def _serialize_fn(obj):\n        if callable(obj):\n            return generic_utils.serialize_keras_object(obj)\n        return obj\n    return nest.map_structure(_serialize_fn, config)",
    "docstring": "Serialized a nested structure of Keras objects.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saving_utils.py",
    "ast_data": "FunctionDef name:_serialize_nested_config arg:config arguments arg FunctionDef name:_serialize_fn arg:obj arguments arg If Call Return return:yes Call Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "help",
    "source_code": "def help(self, *args, **kwargs):\n    print(self._help(*args))",
    "docstring": "Print docstring for the function corresponding to inputs",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\unification\\multipledispatch\\dispatcher.py",
    "ast_data": "FunctionDef name:help arg:self arguments arg arg arg Call Call"
  },
  {
    "library": "sphinx",
    "name": "Include",
    "source_code": "class Include(BaseInclude, SphinxDirective):\n\n    def run(self) -> Sequence[Node]:\n\n        def _insert_input(include_lines: list[str], source: str) -> None:\n            text = '\\n'.join(include_lines[:-2])\n            path = Path(relpath(Path(source).resolve(), start=self.env.srcdir))\n            docname = self.env.docname\n            arg = [text]\n            self.env.events.emit('include-read', path, docname, arg)\n            text = arg[0]\n            include_lines = text.splitlines() + include_lines[-2:]\n            return StateMachine.insert_input(self.state_machine, include_lines, source)\n        if self.env.events.listeners.get('include-read'):\n            self.state_machine.insert_input = _insert_input\n        if self.arguments[0].startswith('<') and self.arguments[0].endswith('>'):\n            return super().run()\n        _rel_filename, filename = self.env.relfn2path(self.arguments[0])\n        self.arguments[0] = str(filename)\n        self.env.note_included(filename)\n        return super().run()",
    "docstring": "Like the standard \"Include\" directive, but interprets absolute paths \"correctly\", i.e. relative to source directory.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\directives\\other.py",
    "ast_data": "ClassDef name:Include FunctionDef name:run arg:self arguments arg FunctionDef name:_insert_input arg:include_lines arg:source arguments arg arg Assign Call Assign Call Call Call Call Assign Assign Call Assign Assign Call Return return:yes Call If Call Assign If BoolOp Call Call Return return:yes Call Call Assign Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_tensors_from_trackable",
    "source_code": "def _get_tensors_from_trackable(trackable_data: _TrackableData, call_with_mapped_captures: Union[Callable[..., Any], None], object_graph_proto: trackable_object_graph_pb2.TrackableObjectGraph) -> Dict[str, Any]:\n    trackable = trackable_data.object_to_save\n    save_fn = trackable._serialize_to_tensors\n    if call_with_mapped_captures and isinstance(save_fn, core.ConcreteFunction):\n        ret_tensor_dict = call_with_mapped_captures(save_fn, [])\n    else:\n        ret_tensor_dict = save_fn()\n    tensor_dict = {}\n    for tensor_name, maybe_tensor in ret_tensor_dict.items():\n        local_name = trackable_utils.escape_local_name(tensor_name)\n        checkpoint_key = trackable_utils.checkpoint_key(trackable_data.object_name, local_name)\n        tensor_dict[checkpoint_key] = maybe_tensor\n        if isinstance(maybe_tensor, saveable_object_lib.SaveSpec):\n            maybe_tensor.name = checkpoint_key\n            maybe_tensor.slice_spec = ''\n        if object_graph_proto is not None:\n            object_graph_proto.nodes[trackable_data.node_id].attributes.add(name=local_name, checkpoint_key=checkpoint_key, full_name=util.get_full_name(trackable))\n    return tensor_dict",
    "docstring": "Gets tensors to serialize from a Trackable.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\save_util.py",
    "ast_data": "FunctionDef name:_get_tensors_from_trackable arg:trackable_data arg:call_with_mapped_captures arg:object_graph_proto arguments arg arg arg Assign Assign If BoolOp Call Assign Call Assign Call Assign For Call Assign Call Assign Call Assign If Call Assign Assign If Compare Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "stats",
    "source_code": "def stats(self, inclusive: bool=False) -> FunctionCounts:\n    return self.stmt_inclusive_stats if inclusive else self.stmt_exclusive_stats",
    "docstring": "Returns detailed function counts. Conceptually, the FunctionCounts returned can be thought of as a tuple of (count, path_and_function_name) tuples. matches the semantics of callgrind. If True, the counts include instructions executed by children. is useful for identifying hot spots in code; is useful for reducing noise when diffing counts from two different runs. (See CallgrindStats.delta(...) for more details)",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\benchmark\\utils\\valgrind_wrapper\\timer_interface.py",
    "ast_data": "FunctionDef name:stats arg:self arg:inclusive arguments arg arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "_fetch_repo_contents",
    "source_code": "@staticmethod\ndef _fetch_repo_contents(folder: str) -> list[dict[str, Any]]:\n    url = f'https://huggingface.co/api/models/kornia/ONNX_models/tree/main/{folder}'\n    response = requests.get(url, timeout=10)\n    if response.status_code == 200:\n        return response.json()\n    else:\n        raise ValueError(f'Failed to fetch repository contents: {response.status_code}')",
    "docstring": "Fetch the contents of the Hugging Face repository using the Hugging Face API. Returns: A list of all files in the repository as dictionaries containing file details.",
    "type": "method",
    "file_path": "kornia\\kornia\\onnx\\utils.py",
    "ast_data": "FunctionDef name:_fetch_repo_contents arg:folder arguments arg Assign Assign Call If Compare Return return:yes Call Raise Call"
  },
  {
    "library": "numpy",
    "name": "Block3D",
    "source_code": "class Block3D(Benchmark):\n    params = [[1, 10, 100], ['block', 'copy']]\n    param_names = ['n', 'mode']\n\n    def setup(self, n, mode):\n        self.a000 = np.ones((2 * n, 2 * n, 2 * n), int) * 1\n        self.a100 = np.ones((3 * n, 2 * n, 2 * n), int) * 2\n        self.a010 = np.ones((2 * n, 3 * n, 2 * n), int) * 3\n        self.a001 = np.ones((2 * n, 2 * n, 3 * n), int) * 4\n        self.a011 = np.ones((2 * n, 3 * n, 3 * n), int) * 5\n        self.a101 = np.ones((3 * n, 2 * n, 3 * n), int) * 6\n        self.a110 = np.ones((3 * n, 3 * n, 2 * n), int) * 7\n        self.a111 = np.ones((3 * n, 3 * n, 3 * n), int) * 8\n        self.block = [[[self.a000, self.a001], [self.a010, self.a011]], [[self.a100, self.a101], [self.a110, self.a111]]]\n        self.arr_list = [a for two_d in self.block for one_d in two_d for a in one_d]\n\n    def time_3d(self, n, mode):\n        if mode == 'block':\n            np.block(self.block)\n        else:\n            [arr.copy() for arr in self.arr_list]\n    time_3d.benchmark_name = 'bench_shape_base.Block.time_3d'",
    "docstring": "This benchmark concatenates an array of size ``",
    "type": "class",
    "file_path": "numpy\\benchmarks\\benchmarks\\bench_shape_base.py",
    "ast_data": "ClassDef name:Block3D Assign Assign FunctionDef name:setup arg:self arg:n arg:mode arguments arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Assign FunctionDef name:time_3d arg:self arg:n arg:mode arguments arg arg arg If Compare Call Call Assign"
  },
  {
    "library": "pandas",
    "name": "__deepcopy__",
    "source_code": "@final\ndef __deepcopy__(self, memo=None) -> Self:\n    return self.copy(deep=True)",
    "docstring": "Parameters ---------- memo, default None Standard signature. Unused",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:__deepcopy__ arg:self arg:memo arguments arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "Wavy",
    "source_code": "class Wavy(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-pi] * self.N, [pi] * self.N))\n        self.global_optimum = [[0.0 for _ in range(self.N)]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return 1.0 - 1.0 / self.N * sum(cos(10 * x) * exp(-x ** 2.0 / 2.0))",
    "docstring": "Wavy objective function. This class defines the W / Wavy [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Wavy}}(x) = 1 - \\frac{1}{n} \\sum_{i=1}^{n} \\cos(kx_i)e^{-\\frac{x_i^2}{2}} Where, in this exercise, :math:. The number of local minima is :math: and :math: for odd and even :math: respectively. Here, :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_W.py",
    "ast_data": "ClassDef name:Wavy Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_check_broadcast_up_to",
    "source_code": "def _check_broadcast_up_to(arr_from, shape_to, name):\n    shape_from = arr_from.shape\n    if len(shape_to) >= len(shape_from):\n        for t, f in zip(shape_to[::-1], shape_from[::-1]):\n            if f != 1 and f != t:\n                break\n        else:\n            if arr_from.size != 1 and arr_from.shape != shape_to:\n                arr_from = np.ones(shape_to, arr_from.dtype) * arr_from\n            return arr_from.ravel()\n    raise ValueError(f'{name} argument must be able to broadcast up to shape {shape_to} but had shape {shape_from}')",
    "docstring": "Helper to check that arr_from broadcasts up to shape_to",
    "type": "function",
    "file_path": "scipy\\scipy\\interpolate\\_interpolate.py",
    "ast_data": "FunctionDef name:_check_broadcast_up_to arg:arr_from arg:shape_to arg:name arguments arg arg arg Assign If Compare Call Call For Call If BoolOp Compare Compare If BoolOp Compare Compare Assign Call Return return:yes Call Raise Call"
  },
  {
    "library": "numpy",
    "name": "AtLeast1D",
    "source_code": "class AtLeast1D(Benchmark):\n\n    def setup(self):\n        self.x = np.array([1, 2, 3])\n        self.zero_d = np.float64(1.0)\n\n    def time_atleast_1d(self):\n        np.atleast_1d(self.x, self.x, self.x)\n\n    def time_atleast_1d_reshape(self):\n        np.atleast_1d(self.zero_d, self.zero_d, self.zero_d)\n\n    def time_atleast_1d_single_argument(self):\n        np.atleast_1d(self.x)",
    "docstring": "Benchmarks for np.atleast_1d",
    "type": "class",
    "file_path": "numpy\\benchmarks\\benchmarks\\bench_shape_base.py",
    "ast_data": "ClassDef name:AtLeast1D FunctionDef name:setup arg:self arguments arg Assign Call Assign Call FunctionDef name:time_atleast_1d arg:self arguments arg Call FunctionDef name:time_atleast_1d_reshape arg:self arguments arg Call FunctionDef name:time_atleast_1d_single_argument arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "dump_size_bytes",
    "source_code": "@property\ndef dump_size_bytes(self):\n    return self._dump_size_bytes",
    "docstring": "Size of the dump file. Unit: byte. Returns: If the dump file exists, size of the dump file, in bytes. If the dump file does not exist, None.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:dump_size_bytes arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_ProcessInputMapParam",
    "source_code": "def _ProcessInputMapParam(input_map):\n    if input_map is None:\n        input_map = {}\n    else:\n        if not isinstance(input_map, dict):\n            raise TypeError(f'Argument `input_map` must be a dictionary. Obtained {type(input_map).__name__}')\n        if not all((isinstance(k, compat.bytes_or_text_types) for k in input_map.keys())):\n            raise TypeError(f'All keys for argument `input_map` must be strings. Obtained keys: {list(input_map.keys())}')\n    return input_map",
    "docstring": "Type-checks and possibly canonicalizes .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\importer.py",
    "ast_data": "FunctionDef name:_ProcessInputMapParam arg:input_map arguments arg If Compare Assign If Call Raise Call Call If Call Call Call Raise Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "reduce_all",
    "source_code": "@tf_export('math.reduce_all', 'reduce_all', v1=[])\n@dispatch.add_dispatch_support\ndef reduce_all(input_tensor, axis=None, keepdims=False, name=None):\n    keepdims = False if keepdims is None else bool(keepdims)\n    return _may_reduce_to_scalar(keepdims, axis, gen_math_ops._all(input_tensor, _ReductionDims(input_tensor, axis), keepdims, name=name))",
    "docstring": "Computes of elements across dimensions of a tensor. This is the reduction operation for the elementwise op. Reduces along the dimensions given in . Unless is true, the rank of the tensor is reduced by 1 for each of the entries in , which must be unique. If is true, the reduced dimensions are retained with length 1. If is None, all dimensions are reduced, and a tensor with a single element is returned. For example: >>> x = tf.constant([[True, True], [False, False]]) >>> tf.math.reduce_all(x) >>> tf.math.reduce_all(x, 0) >>> tf.math.reduce_all(x, 1) Args: input_tensor: The boolean tensor to reduce. axis: The dimensions to reduce. If (the default), reduces all dimensions. Must be in the range . keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). Returns: The reduced tensor. @compatibility(numpy) Equivalent to np.all @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:reduce_all arg:input_tensor arg:axis arg:keepdims arg:name arguments arg arg arg arg Assign Compare Call Return return:yes Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_data",
    "source_code": "def set_data(self, values=None, edges=None, baseline=None):\n    if values is None and edges is None and (baseline is None):\n        raise ValueError('Must set *values*, *edges* or *baseline*.')\n    if values is not None:\n        self._values = np.asarray(values)\n    if edges is not None:\n        self._edges = np.asarray(edges)\n    if baseline is not None:\n        self._baseline = np.asarray(baseline)\n    self._update_path()\n    self.stale = True",
    "docstring": "Set values, edges and baseline. Parameters ---------- values : 1D array-like or None Will not update values, if passing None edges : 1D array-like, optional baseline : float, 1D array-like or None",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:set_data arg:self arg:values arg:edges arg:baseline arguments arg arg arg arg If BoolOp Compare Compare Compare Raise Call If Compare Assign Call If Compare Assign Call If Compare Assign Call Call Assign"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "def fit(self, X, y, **params):\n    return super().fit(X, y, **params)",
    "docstring": "Fit MultiTaskLasso model with coordinate descent. Fit is on grid of alphas and best alpha estimated by cross-validation. Parameters ---------- X : ndarray of shape (n_samples, n_features) Data. y : ndarray of shape (n_samples, n_targets) Target. Will be cast to X's dtype if necessary. **params : dict, default=None Parameters to be passed to the CV splitter. .. versionadded:: 1.4 Only available if , which can be set by using `Metadata Routing User Guide ` for more details. Returns ------- self : object Returns an instance of fitted model.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_coordinate_descent.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "register_end_callback",
    "source_code": "def register_end_callback(self, callback: Callable[[], None]) -> Callable[[], None]:\n    self.end_callbacks.append(callback)\n    return callback",
    "docstring": "Register a callback function to be called when the compilation ends. Args: - callback (Callable): The callback function to register.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\callback.py",
    "ast_data": "FunctionDef name:register_end_callback arg:self arg:callback arguments arg arg Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "register",
    "source_code": "def register(self):\n    _GLOBAL_DISPATCHERS.append(self)",
    "docstring": "Register this dispatcher as a handler for all ops.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\util\\dispatch.py",
    "ast_data": "FunctionDef name:register arg:self arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "bfloat16",
    "source_code": "def bfloat16(self):\n    return self._to(torch.bfloat16)",
    "docstring": "Casts this storage to bfloat16 type.",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:bfloat16 arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_nonmonotone_line_search_cruz",
    "source_code": "def _nonmonotone_line_search_cruz(f, x_k, d, prev_fs, eta, gamma=0.0001, tau_min=0.1, tau_max=0.5):\n    f_k = prev_fs[-1]\n    f_bar = max(prev_fs)\n    alpha_p = 1\n    alpha_m = 1\n    alpha = 1\n    while True:\n        xp = x_k + alpha_p * d\n        fp, Fp = f(xp)\n        if fp <= f_bar + eta - gamma * alpha_p ** 2 * f_k:\n            alpha = alpha_p\n            break\n        alpha_tp = alpha_p ** 2 * f_k / (fp + (2 * alpha_p - 1) * f_k)\n        xp = x_k - alpha_m * d\n        fp, Fp = f(xp)\n        if fp <= f_bar + eta - gamma * alpha_m ** 2 * f_k:\n            alpha = -alpha_m\n            break\n        alpha_tm = alpha_m ** 2 * f_k / (fp + (2 * alpha_m - 1) * f_k)\n        alpha_p = np.clip(alpha_tp, tau_min * alpha_p, tau_max * alpha_p)\n        alpha_m = np.clip(alpha_tm, tau_min * alpha_m, tau_max * alpha_m)\n    return (alpha, xp, fp, Fp)",
    "docstring": "Nonmonotone backtracking line search as described in [1]_ Parameters ---------- f : callable Function returning a tuple `` is the nonmonotonicity window parameter. eta : float Allowed merit function increase, see [1]_ gamma, tau_min, tau_max : float, optional Search parameters, see [1]_ Returns ------- alpha : float Step length xp : ndarray Next position fp : float Merit function value at next position Fp : ndarray Residual at next position References ---------- [1] \"Spectral residual method without gradient information for solving large-scale nonlinear systems of equations.\" W. La Cruz, J.M. Martinez, M. Raydan. Math. Comp. **75**, 1429 (2006).",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_linesearch.py",
    "ast_data": "FunctionDef name:_nonmonotone_line_search_cruz arg:f arg:x_k arg:d arg:prev_fs arg:eta arg:gamma arg:tau_min arg:tau_max arguments arg arg arg arg arg arg arg arg Assign Assign Call Assign Assign Assign While Assign Assign Call If Compare Assign Assign Assign Assign Call If Compare Assign Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_get_axis_name",
    "source_code": "def _get_axis_name(self):\n    return next((name for name, axis in self.axes._axis_map.items() if axis is self))",
    "docstring": "Return the axis name.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:_get_axis_name arg:self arguments arg Return return:yes Call Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, funcs, trackable_obj=None):\n    super(TFLiteFrozenGraphConverterV2, self).__init__()\n    self._funcs = funcs\n    self._trackable_obj = trackable_obj\n    self.experimental_lower_to_saved_model = True",
    "docstring": "Constructor for TFLiteConverter. Args: funcs: List of TensorFlow ConcreteFunctions. The list should not contain duplicate elements. trackable_obj: tf.AutoTrackable object associated with . A reference to this object needs to be maintained so that Variables do not get garbage collected since functions have a weak reference to Variables. This is only required when the tf.AutoTrackable object is not maintained by the user (e.g. ).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:funcs arg:trackable_obj arguments arg arg arg Call Call Assign Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "_set_cmap",
    "source_code": "def _set_cmap(self, cmap):\n    from matplotlib import cm\n    in_init = self._cmap is None\n    self._cmap = cm._ensure_cmap(cmap)\n    if not in_init:\n        self.changed()",
    "docstring": "Set the colormap for luminance data. Parameters ---------- cmap : or str or None",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colorizer.py",
    "ast_data": "FunctionDef name:_set_cmap arg:self arg:cmap arguments arg arg Assign Compare Assign Call If Call"
  },
  {
    "library": "django",
    "name": "bound_data",
    "source_code": "def bound_data(self, data, initial):\n    if self.disabled:\n        return initial\n    return data",
    "docstring": "Return the value that should be shown for this field on render of a bound form, given the submitted POST data for the field and the initial data, if any. For most fields, this will simply be data; FileFields need to handle it a bit differently.",
    "type": "method",
    "file_path": "django\\django\\forms\\fields.py",
    "ast_data": "FunctionDef name:bound_data arg:self arg:data arg:initial arguments arg arg arg If Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_SliceGrad",
    "source_code": "@ops.RegisterGradient('Slice')\ndef _SliceGrad(op: ops.Operation, grad):\n    input_vec = op.inputs[0]\n    begin_vec = op.inputs[1]\n    input_rank = array_ops.rank(input_vec)\n    index_dtype = begin_vec.dtype\n    slice_size = array_ops.shape(op.outputs[0], out_type=index_dtype)\n    if control_flow_util.GraphOrParentsInXlaContext(ops.get_default_graph()):\n        return (gen_xla_ops.xla_dynamic_update_slice(array_ops.zeros_like(input_vec), grad, begin_vec), None, None)\n    shape = array_ops_stack.stack([input_rank, 1])\n    before_pad = array_ops.reshape(begin_vec, shape)\n    after_pad = array_ops.reshape(array_ops.shape(input_vec, out_type=index_dtype) - slice_size - begin_vec, shape)\n    paddings = array_ops.concat([before_pad, after_pad], 1)\n    return (array_ops.pad(grad, paddings), None, None)",
    "docstring": "Gradient for Slice op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_grad.py",
    "ast_data": "FunctionDef name:_SliceGrad arg:op arg:grad arguments arg arg Assign Assign Assign Call Assign Assign Call If Call Call Return return:yes Call Call Assign Call Assign Call Assign Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_transform_path",
    "source_code": "def _transform_path(self, subslice=None):\n    if subslice is not None:\n        xy = STEP_LOOKUP_MAP[self._drawstyle](*self._xy[subslice, :].T)\n        _path = Path(np.asarray(xy).T, _interpolation_steps=self._path._interpolation_steps)\n    else:\n        _path = self._path\n    self._transformed_path = TransformedPath(_path, self.get_transform())",
    "docstring": "Put a TransformedPath instance at self._transformed_path; all invalidation of the transform is then handled by the TransformedPath instance.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:_transform_path arg:self arg:subslice arguments arg arg If Compare Assign Call Assign Call Call Assign Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "philox_rand_offset",
    "source_code": "def philox_rand_offset(shape):\n    numel = 1\n    for s in shape:\n        numel = numel * s\n    return tensor(numel, dtype=torch.int64)",
    "docstring": "TorchInductor offset calculation differs from PyTorch eager offset calculation for random ops (tl.rand vs torch.rand). In future, we should strive for same impl for tl.rand and torch.rand.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\lowering.py",
    "ast_data": "FunctionDef name:philox_rand_offset arg:shape arguments arg Assign For Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_load_nodes",
    "source_code": "def _load_nodes(self):\n    nodes, node_setters = self._initialize_loaded_nodes()\n    slot_variable_node_ids = {}\n    for node_id, proto in self._iter_all_nodes():\n        for slot_variable_proto in proto.slot_variables:\n            slot_variable_node_id = slot_variable_proto.slot_variable_node_id\n            slot_variable_node_ids[slot_variable_node_id] = (node_id, slot_variable_proto)\n    for node_id, proto in self._iter_all_nodes():\n        if nodes.get(node_id) is not None:\n            continue\n        elif node_id in slot_variable_node_ids:\n            optimizer_node_id, slot_variable_proto = slot_variable_node_ids[node_id]\n            optimizer_object = nodes[optimizer_node_id]\n            optimized_variable = nodes[slot_variable_proto.original_variable_node_id]\n            slot_variable = optimizer_object.add_slot(var=optimized_variable, slot_name=slot_variable_proto.slot_name)\n            nodes[slot_variable_proto.slot_variable_node_id] = slot_variable\n            node_setters[slot_variable_proto.slot_variable_node_id] = setattr\n        else:\n            node, setter = self._recreate(proto, node_id, nodes)\n            nodes[node_id] = node\n            node_setters[node_id] = setter\n    if 0 not in nodes:\n        nodes[0] = self._recreate_base_user_object()[0]\n    self._nodes = [nodes.get(node_id) for node_id in range(len(self._proto.nodes))]\n    self._node_setters = node_setters",
    "docstring": "Load all saved objects.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\load.py",
    "ast_data": "FunctionDef name:_load_nodes arg:self arguments arg Assign Call Assign For Call For Assign Assign For Call If Compare Call If Compare Assign Assign Assign Assign Call Assign Assign Assign Call Assign Assign If Compare Assign Call Assign Call Call Call Assign"
  },
  {
    "library": "django",
    "name": "debug",
    "source_code": "@register.tag\ndef debug(parser, token):\n    return DebugNode()",
    "docstring": "Output a whole load of debugging information, including the current context and imported modules. Sample usage:: {% debug %}",
    "type": "function",
    "file_path": "django\\django\\template\\defaulttags.py",
    "ast_data": "FunctionDef name:debug arg:parser arg:token arguments arg arg Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "authenticate_token",
    "source_code": "def authenticate_token(self, request, client):\n    self.check_params(request, client)\n    token = self.query_token(request.form['token'], request.form.get('token_type_hint'))\n    if token and self.check_permission(token, client, request):\n        return token",
    "docstring": "The protected resource calls the introspection endpoint using an HTTP `` value returned from the token endpoint as defined in OAuth 2.0. token_type_hint **OPTIONAL** A hint about the type of the token submitted for introspection.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc7662\\introspection.py",
    "ast_data": "FunctionDef name:authenticate_token arg:self arg:request arg:client arguments arg arg arg Call Assign Call Call If BoolOp Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "pfor_converter",
    "source_code": "@property\ndef pfor_converter(self) -> 'WhileOp':\n    return self",
    "docstring": "Return a converter for the while loop.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "FunctionDef name:pfor_converter arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_create_signature_def_map",
    "source_code": "def _create_signature_def_map(model, mode):\n    inputs_dict = {name: x for name, x in zip(model.input_names, model.inputs)}\n    if model.optimizer:\n        targets_dict = {x.name.split(':')[0]: x for x in model._targets if x is not None}\n        inputs_dict.update(targets_dict)\n    outputs_dict = {name: x for name, x in zip(model.output_names, model.outputs)}\n    metrics = saving_utils.extract_model_metrics(model)\n    local_vars = set(ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES))\n    vars_to_add = set()\n    if metrics is not None:\n        for key, value in metrics.items():\n            if isinstance(value, metrics_lib.Metric):\n                vars_to_add.update(value.variables)\n                metrics[key] = (value.result(), value.updates[0])\n    vars_to_add = vars_to_add.difference(local_vars)\n    for v in vars_to_add:\n        ops.add_to_collection(ops.GraphKeys.LOCAL_VARIABLES, v)\n    export_outputs = model_utils.export_outputs_for_mode(mode, predictions=outputs_dict, loss=model.total_loss if model.optimizer else None, metrics=metrics)\n    return model_utils.build_all_signature_defs(inputs_dict, export_outputs=export_outputs, serving_only=mode == mode_keys.ModeKeys.PREDICT)",
    "docstring": "Creates a SignatureDef map from a Keras model.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model_experimental.py",
    "ast_data": "FunctionDef name:_create_signature_def_map arg:model arg:mode arguments arg arg Assign Call If Assign Call Compare Call Assign Call Assign Call Assign Call Call Assign Call If Compare For Call If Call Call Assign Call Assign Call For Call Assign Call Return return:yes Call Compare"
  },
  {
    "library": "scikit-learn",
    "name": "_name_estimators",
    "source_code": "def _name_estimators(estimators):\n    names = [estimator if isinstance(estimator, str) else type(estimator).__name__.lower() for estimator in estimators]\n    namecount = defaultdict(int)\n    for est, name in zip(estimators, names):\n        namecount[name] += 1\n    for k, v in list(namecount.items()):\n        if v == 1:\n            del namecount[k]\n    for i in reversed(range(len(estimators))):\n        name = names[i]\n        if name in namecount:\n            names[i] += '-%d' % namecount[name]\n            namecount[name] -= 1\n    return list(zip(names, estimators))",
    "docstring": "Generate names for estimators.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\pipeline.py",
    "ast_data": "FunctionDef name:_name_estimators arg:estimators arguments arg Assign Call Call Call Assign Call For Call For Call Call If Compare For Call Call Call Assign If Compare Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "create",
    "source_code": "@classmethod\ndef create(cls, device: torch.device, dst_dtype: torch.dtype, src_dtype: torch.dtype, inner_fn: Callable[..., Any], ranges: Sequence[Expr], reduction_ranges: Sequence[Expr], num_output: int, reduction_hint: ReductionHint=ReductionHint.DEFAULT, input_node: Optional[IRNode]=None) -> Sequence[TensorBox]:\n    results = tuple((TensorBox.create(MultiOutputReduction(device, dst_dtype, inner_fn, ranges, reduction_ranges, 'online_softmax_reduce', src_dtype, reduction_hint, output_idx)) for output_idx in range(num_output)))\n    for t in results:\n        t.realize()\n    return results",
    "docstring": "Create the reduction disregarding splitting.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ir.py",
    "ast_data": "FunctionDef name:create arg:cls arg:device arg:dst_dtype arg:src_dtype arg:inner_fn arg:ranges arg:reduction_ranges arg:num_output arg:reduction_hint arg:input_node arguments arg arg arg arg arg arg arg arg arg arg Assign Call Call Call Call For Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "VariableWindowIndexer",
    "source_code": "class VariableWindowIndexer(BaseIndexer):\n\n    @Appender(get_window_bounds_doc)\n    def get_window_bounds(self, num_values: int=0, min_periods: int | None=None, center: bool | None=None, closed: str | None=None, step: int | None=None) -> tuple[np.ndarray, np.ndarray]:\n        return calculate_variable_window_bounds(num_values, self.window_size, min_periods, center, closed, self.index_array)",
    "docstring": "Creates window boundaries that are of variable length, namely for time series.",
    "type": "class",
    "file_path": "pandas\\pandas\\core\\indexers\\objects.py",
    "ast_data": "ClassDef name:VariableWindowIndexer FunctionDef name:get_window_bounds arg:self arg:num_values arg:min_periods arg:center arg:closed arg:step arguments arg arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "value",
    "source_code": "def value(self):\n    data = self.initial\n    if self.form.is_bound:\n        data = self.field.bound_data(self.data, data)\n    return self.field.prepare_value(data)",
    "docstring": "Return the value for this BoundField, using the initial value if the form is not bound or the data otherwise.",
    "type": "method",
    "file_path": "django\\django\\forms\\boundfield.py",
    "ast_data": "FunctionDef name:value arg:self arguments arg Assign If Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "to_proto",
    "source_code": "def to_proto(self, export_scope=None):\n    if export_scope is None or self.name.startswith(export_scope):\n        context_def = control_flow_pb2.CondContextDef()\n        context_def.context_name = ops.strip_name_scope(self.name, export_scope)\n        context_def.pred_name = ops.strip_name_scope(self._pred.name, export_scope)\n        context_def.pivot_name = ops.strip_name_scope(self._pivot.name, export_scope)\n        context_def.branch = self._branch\n        context_def.values_def.MergeFrom(super(CondContext, self)._to_values_def(export_scope))\n        for nested in self._nested_contexts:\n            nested_def = context_def.nested_contexts.add()\n            nested.to_control_flow_context_def(nested_def)\n        return context_def\n    else:\n        return None",
    "docstring": "Converts a to a protocol buffer. Args: export_scope: Optional . Name scope to remove. Returns: A protocol buffer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:to_proto arg:self arg:export_scope arguments arg arg If BoolOp Compare Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Call For Assign Call Call Return return:yes Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "_with_space_to_batch_adjust",
    "source_code": "def _with_space_to_batch_adjust(orig, fill_value, spatial_dims):\n    fill_dims = orig.get_shape().as_list()[1:]\n    dtype = orig.dtype.as_numpy_dtype\n    parts = []\n    const_orig = tensor_util.constant_value(orig)\n    const_or_orig = const_orig if const_orig is not None else orig\n    prev_spatial_dim = 0\n    i = 0\n    while i < len(spatial_dims):\n        start_i = i\n        start_spatial_dim = spatial_dims[i]\n        if start_spatial_dim > 1:\n            parts.append(np.full([start_spatial_dim - 1 - prev_spatial_dim] + fill_dims, fill_value, dtype=dtype))\n        while i + 1 < len(spatial_dims) and spatial_dims[i + 1] == spatial_dims[i] + 1:\n            i += 1\n        parts.append(const_or_orig[start_i:i + 1])\n        prev_spatial_dim = spatial_dims[i]\n        i += 1\n    if const_orig is not None:\n        return np.concatenate(parts)\n    else:\n        return array_ops.concat(parts, 0)",
    "docstring": "Returns an version of based on . Tensor of the same type as and with shape where: adjusted[spatial_dims[i] - 1, ...] = orig[i, ...] for 0 max(spatial_dims). fill_value: Numpy scalar (of same data type as adjusted` tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py",
    "ast_data": "FunctionDef name:_with_space_to_batch_adjust arg:orig arg:fill_value arg:spatial_dims arguments arg arg arg Assign Call Call Assign Assign Assign Call Assign Compare Assign Assign While Compare Call Assign Assign If Compare Call Call While BoolOp Compare Call Compare Call Assign If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "_define_support_bivariate",
    "source_code": "def _define_support_bivariate(self, x1, x2, weights):\n    clip = self.clip\n    if clip[0] is None or np.isscalar(clip[0]):\n        clip = (clip, clip)\n    kde = self._fit([x1, x2], weights)\n    bw = np.sqrt(np.diag(kde.covariance).squeeze())\n    grid1 = self._define_support_grid(x1, bw[0], self.cut, clip[0], self.gridsize)\n    grid2 = self._define_support_grid(x2, bw[1], self.cut, clip[1], self.gridsize)\n    return (grid1, grid2)",
    "docstring": "Create a 2D grid of evaluation points.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_statistics.py",
    "ast_data": "FunctionDef name:_define_support_bivariate arg:self arg:x1 arg:x2 arg:weights arguments arg arg arg arg Assign If BoolOp Compare Call Assign Assign Call Assign Call Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "virtualenv",
    "name": "Pypy3Windows",
    "source_code": "class Pypy3Windows(PyPy3, WindowsSupports):\n\n    @property\n    def less_v37(self):\n        return self.interpreter.version_info.minor < 7\n\n    @classmethod\n    def _shared_libs(cls, python_dir):\n        for pattern in ['libpypy*.dll', 'libffi*.dll']:\n            srcs = python_dir.glob(pattern)\n            yield from srcs",
    "docstring": "PyPy 3 on Windows.",
    "type": "class",
    "file_path": "virtualenv\\src\\virtualenv\\create\\via_global_ref\\builtin\\pypy\\pypy3.py",
    "ast_data": "ClassDef name:Pypy3Windows FunctionDef name:less_v37 arg:self arguments arg Return return:yes Compare FunctionDef name:_shared_libs arg:cls arg:python_dir arguments arg arg For Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "conv2d",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef conv2d(x, kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)):\n    if data_format is None:\n        data_format = image_data_format()\n    if data_format not in {'channels_first', 'channels_last'}:\n        raise ValueError('Unknown data_format: ' + str(data_format))\n    x, tf_data_format = _preprocess_conv2d_input(x, data_format)\n    padding = _preprocess_padding(padding)\n    x = nn.convolution(input=x, filter=kernel, dilation_rate=dilation_rate, strides=strides, padding=padding, data_format=tf_data_format)\n    if data_format == 'channels_first' and tf_data_format == 'NHWC':\n        x = array_ops.transpose(x, (0, 3, 1, 2))\n    return x",
    "docstring": "2D convolution. Args: x: Tensor or variable. kernel: kernel tensor. strides: strides tuple. padding: string, or . data_format: or . dilation_rate: tuple of 2 integers. Returns: A tensor, result of 2D convolution. Raises: ValueError: if is neither or .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:conv2d arg:x arg:kernel arg:strides arg:padding arg:data_format arg:dilation_rate arguments arg arg arg arg arg arg If Compare Assign Call If Compare Raise Call Call Assign Call Assign Call Assign Call If BoolOp Compare Compare Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_label",
    "source_code": "def set_label(self, label, *, loc=None, **kwargs):\n    if self.orientation == 'vertical':\n        self.ax.set_ylabel(label, loc=loc, **kwargs)\n    else:\n        self.ax.set_xlabel(label, loc=loc, **kwargs)\n    self.stale = True",
    "docstring": "Add a label to the long axis of the colorbar. Parameters ---------- label : str The label text. loc : str, optional The location of the label. - For horizontal orientation one of {'left', 'center', 'right'} - For vertical orientation one of {'bottom', 'center', 'top'} Defaults to :rc: or :rc: depending on the orientation. **kwargs Keyword arguments are passed to / . Supported keywords are *labelpad* and properties.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colorbar.py",
    "ast_data": "FunctionDef name:set_label arg:self arg:label arguments arg arg arg arg If Compare Call Call Assign"
  },
  {
    "library": "numpy",
    "name": "get_names",
    "source_code": "def get_names(adtype):\n    listnames = []\n    names = adtype.names\n    for name in names:\n        current = adtype[name]\n        if current.names is not None:\n            listnames.append((name, tuple(get_names(current))))\n        else:\n            listnames.append(name)\n    return tuple(listnames)",
    "docstring": "Returns the field names of the input datatype as a tuple. Input datatype must have fields otherwise error is raised. Parameters ---------- adtype : dtype Input datatype Examples -------- >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> rfn.get_names(np.empty((1,), dtype=[('A', int)]).dtype) ('A',) >>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]).dtype) ('A', 'B') >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])]) >>> rfn.get_names(adtype) ('a', ('b', ('ba', 'bb')))",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\recfunctions.py",
    "ast_data": "FunctionDef name:get_names arg:adtype arguments arg Assign Assign For Assign If Compare Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "FSDPCommContext",
    "source_code": "class FSDPCommContext:\n\n    def lazy_init(self, device: torch.device):\n        self.device_handle = _get_device_handle(device.type)\n        high_priority = -1\n        self.all_gather_copy_in_stream = self.device_handle.Stream(priority=high_priority)\n        self.all_gather_stream = self.device_handle.Stream(priority=high_priority)\n        self.reduce_scatter_stream = self.device_handle.Stream(priority=high_priority)\n        self.all_reduce_stream = self.device_handle.Stream()\n        self.all_gather_state: Optional[AllGatherState] = None\n        self.reduce_scatter_state: Optional[ReduceScatterState] = None\n        self.post_forward_order: list[FSDPParamGroup] = []\n\n    def get_all_gather_streams(self, async_op: bool, training_state: TrainingState) -> tuple[torch.Stream, torch.Stream]:\n        if not async_op and training_state in (TrainingState.FORWARD, TrainingState.PRE_BACKWARD):\n            return (self.all_gather_copy_in_stream, self.all_gather_stream)\n        current_stream = self.device_handle.current_stream()\n        return (current_stream, current_stream)",
    "docstring": "This has the communication state shared across FSDP states/parameter groups.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fsdp_param_group.py",
    "ast_data": "ClassDef name:FSDPCommContext FunctionDef name:lazy_init arg:self arg:device arguments arg arg Assign Call Assign Assign Call Assign Call Assign Call Assign Call FunctionDef name:get_all_gather_streams arg:self arg:async_op arg:training_state arguments arg arg arg If BoolOp Compare Return return:yes Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "set_solout",
    "source_code": "def set_solout(self, solout):\n    if self._integrator.supports_solout:\n        self._integrator.set_solout(solout, complex=True)\n    else:\n        raise TypeError('selected integrator does not support solouta, choose another one')",
    "docstring": "Set callable to be called at every successful integration step. Parameters ---------- solout : callable `` solout should return -1 to stop integration otherwise it should return None or 0",
    "type": "method",
    "file_path": "scipy\\scipy\\integrate\\_ode.py",
    "ast_data": "FunctionDef name:set_solout arg:self arg:solout arguments arg arg If Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_parse_placeholder_types",
    "source_code": "def _parse_placeholder_types(values):\n    values = [int(value) for value in values.split(',')]\n    return values if len(values) > 1 else values[0]",
    "docstring": "Extracts placeholder types from a comma separate list.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\optimize_for_inference.py",
    "ast_data": "FunctionDef name:_parse_placeholder_types arg:values arguments arg Assign Call Call Return return:yes Compare Call"
  },
  {
    "library": "pytorch",
    "name": "include_paths",
    "source_code": "def include_paths(device_type: str='cpu') -> list[str]:\n    lib_include = os.path.join(_TORCH_PATH, 'include')\n    paths = [lib_include, os.path.join(lib_include, 'torch', 'csrc', 'api', 'include')]\n    if device_type == 'cuda' and IS_HIP_EXTENSION:\n        paths.append(os.path.join(lib_include, 'THH'))\n        paths.append(_join_rocm_home('include'))\n    elif device_type == 'cuda':\n        cuda_home_include = _join_cuda_home('include')\n        if cuda_home_include != '/usr/include':\n            paths.append(cuda_home_include)\n        if (cuda_inc_path := os.environ.get('CUDA_INC_PATH', None)) and cuda_inc_path != '/usr/include':\n            paths.append(cuda_inc_path)\n        if CUDNN_HOME is not None:\n            paths.append(os.path.join(CUDNN_HOME, 'include'))\n    elif device_type == 'xpu':\n        paths.append(_join_sycl_home('include'))\n        paths.append(_join_sycl_home('include', 'sycl'))\n    return paths",
    "docstring": "Get the include paths required to build a C++ or CUDA or SYCL extension. Args: device_type: Defaults to \"cpu\". Returns: A list of include path strings.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\cpp_extension.py",
    "ast_data": "FunctionDef name:include_paths arg:device_type arguments arg Assign Call Assign Call If BoolOp Compare Call Call Call Call If Compare Assign Call If Compare Call If BoolOp Call Compare Call If Compare Call Call If Compare Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__getitem__",
    "source_code": "def __getitem__(self, key: int) -> Any:\n    if hasattr(self, FSDP_WRAPPED_MODULE):\n        return self._fsdp_wrapped_module.__getitem__(key)\n    return super().__getitem__(key)",
    "docstring": "Forward indexing calls in case the module is an ``.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\fully_sharded_data_parallel.py",
    "ast_data": "FunctionDef name:__getitem__ arg:self arg:key arguments arg arg If Call Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_cont_bern_log_norm",
    "source_code": "def _cont_bern_log_norm(self):\n    cut_probs = self._cut_probs()\n    cut_probs_below_half = torch.where(torch.le(cut_probs, 0.5), cut_probs, torch.zeros_like(cut_probs))\n    cut_probs_above_half = torch.where(torch.ge(cut_probs, 0.5), cut_probs, torch.ones_like(cut_probs))\n    log_norm = torch.log(torch.abs(torch.log1p(-cut_probs) - torch.log(cut_probs))) - torch.where(torch.le(cut_probs, 0.5), torch.log1p(-2.0 * cut_probs_below_half), torch.log(2.0 * cut_probs_above_half - 1.0))\n    x = torch.pow(self.probs - 0.5, 2)\n    taylor = math.log(2.0) + (4.0 / 3.0 + 104.0 / 45.0 * x) * x\n    return torch.where(self._outside_unstable_region(), log_norm, taylor)",
    "docstring": "computes the log normalizing constant as a function of the 'probs' parameter",
    "type": "method",
    "file_path": "pytorch\\torch\\distributions\\continuous_bernoulli.py",
    "ast_data": "FunctionDef name:_cont_bern_log_norm arg:self arguments arg Assign Call Assign Call Call Call Assign Call Call Call Assign Call Call Call Call Call Call Call Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "unstack",
    "source_code": "@array_function_dispatch(_unstack_dispatcher)\ndef unstack(x, /, *, axis=0):\n    if x.ndim == 0:\n        raise ValueError('Input array must be at least 1-d.')\n    return tuple(_nx.moveaxis(x, axis, 0))",
    "docstring": "Split an array into a sequence of arrays along the given axis. The `stack`, since iterating on an array iterates along the first axis. Examples -------- >>> arr = np.arange(24).reshape((2, 3, 4)) >>> np.unstack(arr) (array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]]), array([[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23]])) >>> np.unstack(arr, axis=1) (array([[ 0, 1, 2, 3], [12, 13, 14, 15]]), array([[ 4, 5, 6, 7], [16, 17, 18, 19]]), array([[ 8, 9, 10, 11], [20, 21, 22, 23]])) >>> arr2 = np.stack(np.unstack(arr, axis=1), axis=1) >>> arr2.shape (2, 3, 4) >>> np.all(arr == arr2) np.True_",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\shape_base.py",
    "ast_data": "FunctionDef name:unstack arguments arg arg If Compare Raise Call Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "get_queryset",
    "source_code": "def get_queryset(self):\n    if self.queryset is not None:\n        queryset = self.queryset\n        if isinstance(queryset, QuerySet):\n            queryset = queryset.all()\n    elif self.model is not None:\n        queryset = self.model._default_manager.all()\n    else:\n        raise ImproperlyConfigured('%(cls)s is missing a QuerySet. Define %(cls)s.model, %(cls)s.queryset, or override %(cls)s.get_queryset().' % {'cls': self.__class__.__name__})\n    ordering = self.get_ordering()\n    if ordering:\n        if isinstance(ordering, str):\n            ordering = (ordering,)\n        queryset = queryset.order_by(*ordering)\n    return queryset",
    "docstring": "Return the list of items for this view. The return value must be an iterable and may be an instance of in which case specific behavior will be enabled.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\list.py",
    "ast_data": "FunctionDef name:get_queryset arg:self arguments arg If Compare Assign If Call Assign Call If Compare Assign Call Raise Call Assign Call If If Call Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, existing_stack: Optional[list[TraceableObject[T]]]=None):\n    self._stack: list[TraceableObject[T]] = existing_stack[:] if existing_stack else []",
    "docstring": "Constructor. Args: existing_stack: [TraceableObject, ...] If provided, this object will set its new stack to a SHALLOW COPY of existing_stack.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\traceable_stack.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:existing_stack arguments arg arg"
  },
  {
    "library": "pytorch",
    "name": "record_exception",
    "source_code": "def record_exception(self, e: BaseException) -> None:\n    file = self._get_error_file_path()\n    if file:\n        data = {'message': {'message': f'{type(e).__name__}: {e}', 'extraInfo': {'py_callstack': traceback.format_exc(), 'timestamp': str(int(time.time()))}}}\n        with open(file, 'w') as fp:\n            json.dump(data, fp)",
    "docstring": "Write a structured information about the exception into an error file in JSON format. If the error file cannot be determined, then logs the content that would have been written to the error file.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\multiprocessing\\errors\\error_handler.py",
    "ast_data": "FunctionDef name:record_exception arg:self arg:e arguments arg arg Assign Call If Assign Call Call Call Call Call With Call Call"
  },
  {
    "library": "pandas",
    "name": "_warn_if_deprecated",
    "source_code": "def _warn_if_deprecated(key: str) -> bool:\n    d = _get_deprecated_option(key)\n    if d:\n        if d.msg:\n            warnings.warn(d.msg, FutureWarning, stacklevel=find_stack_level())\n        else:\n            msg = f\"'{key}' is deprecated\"\n            if d.removal_ver:\n                msg += f' and will be removed in {d.removal_ver}'\n            if d.rkey:\n                msg += f\", please use '{d.rkey}' instead.\"\n            else:\n                msg += ', please refrain from using it.'\n            warnings.warn(msg, FutureWarning, stacklevel=find_stack_level())\n        return True\n    return False",
    "docstring": "Checks if is a deprecated option and if so, prints a warning. Returns ------- bool - True if is deprecated, False otherwise.",
    "type": "function",
    "file_path": "pandas\\pandas\\_config\\config.py",
    "ast_data": "FunctionDef name:_warn_if_deprecated arg:key arguments arg Assign Call If If Call Call Assign If If Call Call Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "replace",
    "source_code": "def replace(obj, /, **changes):\n    cls = obj.__class__\n    func = getattr(cls, '__replace__', None)\n    if func is None:\n        raise TypeError(f'replace() does not support {cls.__name__} objects')\n    return func(obj, **changes)",
    "docstring": "Return a new object replacing specified fields with new values. This is especially useful for immutable objects, like named tuples or frozen dataclasses.",
    "type": "function",
    "file_path": "django\\django\\utils\\copy.py",
    "ast_data": "FunctionDef name:replace arguments arg arg Assign Assign Call If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "Line",
    "source_code": "@document_properties\n@dataclass\nclass Line(Path):\n    _sort: ClassVar[bool] = True",
    "docstring": "A mark connecting data points with sorting along the orientation axis. See also -------- Path : A mark connecting data points in the order they appear. Lines : A faster but less-flexible mark for drawing many lines. Examples -------- .. include:: ../docstrings/objects.Line.rst",
    "type": "class",
    "file_path": "seaborn\\seaborn\\_marks\\line.py",
    "ast_data": "ClassDef name:Line"
  },
  {
    "library": "cherrypy",
    "name": "__call__",
    "source_code": "def __call__(self):\n    if self.on:\n        oldformatwarning = warnings.formatwarning\n        warnings.formatwarning = self.formatwarning\n        try:\n            for name in dir(self):\n                if name.startswith('check_'):\n                    method = getattr(self, name)\n                    if method and hasattr(method, '__call__'):\n                        method()\n        finally:\n            warnings.formatwarning = oldformatwarning",
    "docstring": "Run all check_* methods.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpchecker.py",
    "ast_data": "FunctionDef name:__call__ arg:self arguments arg If Assign Assign Try For Call If Call Assign Call If BoolOp Call Call Assign"
  },
  {
    "library": "cherrypy",
    "name": "elements",
    "source_code": "def elements(self, key):\n    return header_elements(self.transform_key(key), self.get(key))",
    "docstring": "Return a sorted list of HeaderElements for the given header.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\httputil.py",
    "ast_data": "FunctionDef name:elements arg:self arg:key arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_header_version",
    "source_code": "def _get_header_version(path, name):\n    for line in io.open(path, 'r', encoding='utf-8'):\n        match = re.match('#define %s +(\\\\d+)' % name, line)\n        if match:\n            value = match.group(1)\n            return int(value)\n    raise ConfigError('#define \"{}\" is either\\n'.format(name) + '  not present in file {} OR\\n'.format(path) + '  its value is not an integer literal')",
    "docstring": "Returns preprocessor defines in C header file.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\third_party\\gpus\\find_rocm_config.py",
    "ast_data": "FunctionDef name:_get_header_version arg:path arg:name arguments arg arg For Call Assign Call If Assign Call Return return:yes Call Raise Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "update_state",
    "source_code": "def update_state(self, y_true, y_pred, sample_weight=None):\n    return metrics_utils.update_confusion_matrix_variables({self._confusion_matrix_cond: self.accumulator}, y_true, y_pred, thresholds=self.thresholds, thresholds_distributed_evenly=self._thresholds_distributed_evenly, sample_weight=sample_weight)",
    "docstring": "Accumulates the metric statistics. Args: y_true: The ground truth values. y_pred: The predicted values. sample_weight: Optional weighting of each example. Defaults to 1. Can be a whose rank is either 0, or the same rank as , and must be broadcastable to . Returns: Update op.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py",
    "ast_data": "FunctionDef name:update_state arg:self arg:y_true arg:y_pred arg:sample_weight arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_yeojohnson_transform",
    "source_code": "def _yeojohnson_transform(x, lmbda):\n    dtype = x.dtype if np.issubdtype(x.dtype, np.floating) else np.float64\n    out = np.zeros_like(x, dtype=dtype)\n    pos = x >= 0\n    if abs(lmbda) < np.spacing(1.0):\n        out[pos] = np.log1p(x[pos])\n    else:\n        out[pos] = np.expm1(lmbda * np.log1p(x[pos])) / lmbda\n    if abs(lmbda - 2) > np.spacing(1.0):\n        out[~pos] = -np.expm1((2 - lmbda) * np.log1p(-x[~pos])) / (2 - lmbda)\n    else:\n        out[~pos] = -np.log1p(-x[~pos])\n    return out",
    "docstring": "Returns transformed by the Yeo-Johnson power transform with given parameter .",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_morestats.py",
    "ast_data": "FunctionDef name:_yeojohnson_transform arg:x arg:lmbda arguments arg arg Assign Call Assign Call Assign Compare If Compare Call Call Assign Call Assign Call Call If Compare Call Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "literal_strong",
    "source_code": "class literal_strong(nodes.strong, not_smartquotable):\n    pass",
    "docstring": "Node that behaves like , but further text processors are not applied (e.g. smartypants for HTML output).",
    "type": "class",
    "file_path": "sphinx\\sphinx\\addnodes.py",
    "ast_data": "ClassDef name:literal_strong"
  },
  {
    "library": "matplotlib",
    "name": "_get_bbox_header",
    "source_code": "def _get_bbox_header(lbrt):\n    l, b, r, t = lbrt\n    return f'%%BoundingBox: {int(l)} {int(b)} {math.ceil(r)} {math.ceil(t)}\\n%%HiResBoundingBox: {l:.6f} {b:.6f} {r:.6f} {t:.6f}'",
    "docstring": "Return a PostScript header string for bounding box *lbrt*=(l, b, r, t).",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_ps.py",
    "ast_data": "FunctionDef name:_get_bbox_header arg:lbrt arguments arg Assign Return return:yes Call Call Call Call"
  },
  {
    "library": "numpy",
    "name": "ConverterError",
    "source_code": "class ConverterError(Exception):\n    pass",
    "docstring": "Exception raised when an error occurs in a converter for string values.",
    "type": "class",
    "file_path": "numpy\\numpy\\lib\\_iotools.py",
    "ast_data": "ClassDef name:ConverterError"
  },
  {
    "library": "sphinx",
    "name": "evaluate_signature",
    "source_code": "def evaluate_signature(sig: Signature, globalns: dict[str, Any] | None=None, localns: dict[str, Any] | None=None) -> Signature:\n    if globalns is None:\n        globalns = {}\n    if localns is None:\n        localns = globalns\n    parameters = list(sig.parameters.values())\n    for i, param in enumerate(parameters):\n        if param.annotation:\n            annotation = _evaluate(param.annotation, globalns, localns)\n            parameters[i] = param.replace(annotation=annotation)\n    return_annotation = sig.return_annotation\n    if return_annotation:\n        return_annotation = _evaluate(return_annotation, globalns, localns)\n    return sig.replace(parameters=parameters, return_annotation=return_annotation)",
    "docstring": "Evaluate unresolved type annotations in a signature object.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\inspect.py",
    "ast_data": "FunctionDef name:evaluate_signature arg:sig arg:globalns arg:localns arguments arg arg arg If Compare Assign If Compare Assign Assign Call Call For Call If Assign Call Assign Call Assign If Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_set_datapipe_valid_iterator_id",
    "source_code": "def _set_datapipe_valid_iterator_id(datapipe):\n    if hasattr(datapipe, '_is_child_datapipe') and datapipe._is_child_datapipe is True:\n        if hasattr(datapipe, '_set_main_datapipe_valid_iterator_id'):\n            datapipe._set_main_datapipe_valid_iterator_id()\n        else:\n            raise RuntimeError('ChildDataPipe must have method `_set_main_datapipe_valid_iterator_id`.')\n    else:\n        if datapipe._valid_iterator_id is None:\n            datapipe._valid_iterator_id = 0\n        else:\n            datapipe._valid_iterator_id += 1\n        datapipe.reset()\n    return datapipe._valid_iterator_id",
    "docstring": "Given a DataPipe, updates its valid iterator ID and reset the DataPipe.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\data\\datapipes\\_hook_iterator.py",
    "ast_data": "FunctionDef name:_set_datapipe_valid_iterator_id arg:datapipe arguments arg If BoolOp Call Compare If Call Call Raise Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "validate_policy_uri",
    "source_code": "def validate_policy_uri(self):\n    self._validate_uri('policy_uri')",
    "docstring": "URL string that points to a human-readable privacy policy document that describes how the deployment organization collects, uses, retains, and discloses personal data. The authorization server SHOULD display this URL to the end-user if it is provided. The value of this field MUST point to a valid web page. The value of this field MAY be internationalized, as described in Section 2.2.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc7591\\claims.py",
    "ast_data": "FunctionDef name:validate_policy_uri arg:self arguments arg Call"
  },
  {
    "library": "scipy",
    "name": "berp_zeros",
    "source_code": "def berp_zeros(nt):\n    if not isscalar(nt) or floor(nt) != nt or nt <= 0:\n        raise ValueError('nt must be positive integer scalar.')\n    return _specfun.klvnzo(nt, 5)",
    "docstring": "Compute nt zeros of the derivative of the Kelvin function ber. Parameters ---------- nt : int Number of zeros to compute. Must be positive. Returns ------- ndarray First zeros of the derivative of the Kelvin function. See Also -------- ber, berp References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special Functions\", John Wiley and Sons, 1996. Examples -------- Compute the first 5 zeros of the derivative of the Kelvin function. >>> from scipy.special import berp_zeros >>> berp_zeros(5) array([ 6.03871081, 10.51364251, 14.96844542, 19.41757493, 23.86430432])",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_basic.py",
    "ast_data": "FunctionDef name:berp_zeros arg:nt arguments arg If BoolOp Call Compare Call Compare Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_matrix_exp_pade9",
    "source_code": "def _matrix_exp_pade9(matrix):\n    b = [17643225600.0, 8821612800.0, 2075673600.0, 302702400.0, 30270240.0, 2162160.0, 110880.0, 3960.0, 90.0]\n    b = [constant_op.constant(x, matrix.dtype) for x in b]\n    ident = linalg_ops.eye(array_ops.shape(matrix)[-2], batch_shape=array_ops.shape(matrix)[:-2], dtype=matrix.dtype)\n    matrix_2 = math_ops.matmul(matrix, matrix)\n    matrix_4 = math_ops.matmul(matrix_2, matrix_2)\n    matrix_6 = math_ops.matmul(matrix_4, matrix_2)\n    matrix_8 = math_ops.matmul(matrix_6, matrix_2)\n    tmp = matrix_8 + b[7] * matrix_6 + b[5] * matrix_4 + b[3] * matrix_2 + b[1] * ident\n    matrix_u = math_ops.matmul(matrix, tmp)\n    matrix_v = b[8] * matrix_8 + b[6] * matrix_6 + b[4] * matrix_4 + b[2] * matrix_2 + b[0] * ident\n    return (matrix_u, matrix_v)",
    "docstring": "9th-order Pade approximant for matrix exponential.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linalg_impl.py",
    "ast_data": "FunctionDef name:_matrix_exp_pade9 arg:matrix arguments arg Assign Assign Call Assign Call Call Call Assign Call Assign Call Assign Call Assign Call Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_major_locator",
    "source_code": "def set_major_locator(self, locator):\n    _api.check_isinstance(mticker.Locator, locator=locator)\n    self.isDefault_majloc = False\n    self.major.locator = locator\n    if self.major.formatter:\n        self.major.formatter._set_locator(locator)\n    locator.set_axis(self)\n    self.stale = True",
    "docstring": "Set the locator of the major ticker. Parameters ---------- locator :",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:set_major_locator arg:self arg:locator arguments arg arg Call Assign Assign If Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "replace",
    "source_code": "def replace(template, **replacements):\n    if not isinstance(template, str):\n        raise ValueError('Expected string template, got %s' % type(template))\n    for k in replacements:\n        replacements[k] = _convert_to_ast(replacements[k])\n    template_str = parser.STANDARD_PREAMBLE + textwrap.dedent(template)\n    nodes = parser.parse(template_str, preamble_len=parser.STANDARD_PREAMBLE_LEN, single_node=False)\n    results = []\n    for node in nodes:\n        node = ReplaceTransformer(replacements).visit(node)\n        if isinstance(node, (list, tuple)):\n            results.extend(node)\n        else:\n            results.append(node)\n    results = [qual_names.resolve(r) for r in results]\n    return results",
    "docstring": "Replaces placeholders in a Python template. AST Name and Tuple nodes always receive the context that inferred from the template. However, when replacing more complex nodes (that can potentially contain Name children), then the caller is responsible for setting the appropriate context. Args: template: A string representing Python code. Any symbol name can be used that appears in the template code can be used as placeholder. **replacements: A mapping from placeholder names to (lists of) AST nodes that these placeholders will be replaced by. String values are also supported as a shorthand for AST Name nodes with the respective ID. Returns: An AST node or list of AST nodes with the replacements made. If the template was a function, a list will be returned. If the template was a node, the same node will be returned. If the template was a string, an AST node will be returned (a node in the case of a multi-line string, an node otherwise). Raises: ValueError: if the arguments are incorrect.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\templates.py",
    "ast_data": "FunctionDef name:replace arg:template arguments arg arg If Call Raise Call Call For Assign Call Assign Call Assign Call Assign For Assign Call Call If Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_tf_if_exp",
    "source_code": "def _tf_if_exp(cond, if_true, if_false, expr_repr):\n    true_val = []\n    false_val = []\n\n    def true_fn():\n        true_val.append(if_true())\n        if true_val and false_val:\n            control_flow.verify_single_cond_var(expr_repr, true_val[0], false_val[0])\n        return true_val[0]\n\n    def false_fn():\n        false_val.append(if_false())\n        if true_val and false_val:\n            control_flow.verify_single_cond_var(expr_repr, true_val[0], false_val[0])\n        return false_val[0]\n    return tf_cond.cond(cond, true_fn, false_fn)",
    "docstring": "Overload of if_exp that stages a TF cond.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\conditional_expressions.py",
    "ast_data": "FunctionDef name:_tf_if_exp arg:cond arg:if_true arg:if_false arg:expr_repr arguments arg arg arg arg Assign Assign FunctionDef name:true_fn arguments Call Call If BoolOp Call Return return:yes FunctionDef name:false_fn arguments Call Call If BoolOp Call Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_import",
    "source_code": "def get_import(self, file_prefixes_to_strip: Sequence[str], module_prefix: str, use_lazy_loading: bool) -> str:\n    module_import_path = _get_import_path(self.exported_symbol.file_name, file_prefixes_to_strip, module_prefix)\n    alias = ''\n    symbol_name = self.exported_symbol.symbol_name\n    if self.name != symbol_name:\n        alias = f' as {self.name}'\n    if not use_lazy_loading:\n        return f'from {module_import_path} import {symbol_name}{alias} # line: {self.exported_symbol.line_no}'\n    else:\n        return f\"  '{self.name}': ('{module_import_path}', '{symbol_name}'), # line: {self.exported_symbol.line_no}\"",
    "docstring": "Returns the import statement for this entrypoint. Args: file_prefixes_to_strip: List of prefixes to strip from the file name. module_prefix: A prefix to add to the import. use_lazy_loading: Whether to use lazy loading or not.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\api\\generator2\\generator\\generator.py",
    "ast_data": "FunctionDef name:get_import arg:self arg:file_prefixes_to_strip arg:module_prefix arg:use_lazy_loading arguments arg arg arg arg Assign Call Assign Assign If Compare Assign If Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "update",
    "source_code": "def update(self, *args, **kwargs):\n    if len(args) > 1:\n        raise TypeError('update expected at most 1 argument, got %d' % len(args))\n    if args:\n        arg = args[0]\n        if isinstance(arg, MultiValueDict):\n            for key, value_list in arg.lists():\n                self.setlistdefault(key).extend(value_list)\n        else:\n            if isinstance(arg, Mapping):\n                arg = arg.items()\n            for key, value in arg:\n                self.setlistdefault(key).append(value)\n    for key, value in kwargs.items():\n        self.setlistdefault(key).append(value)",
    "docstring": "Extend rather than replace existing key lists.",
    "type": "method",
    "file_path": "django\\django\\utils\\datastructures.py",
    "ast_data": "FunctionDef name:update arg:self arguments arg arg arg If Compare Call Raise Call Call If Assign If Call For Call Call Call If Call Assign Call For Call Call For Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "all_outputs_are_dead",
    "source_code": "def all_outputs_are_dead(self) -> bool:\n    for depth, output_index in self.live_indices_after_graph:\n        if is_live(self.path_weakrefs[depth][output_index]):\n            return False\n    return True",
    "docstring": "All outputs of the path from this node to its root are dead",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py",
    "ast_data": "FunctionDef name:all_outputs_are_dead arg:self arguments arg For If Call Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "SessionInterrupted",
    "source_code": "class SessionInterrupted(BadRequest):\n    pass",
    "docstring": "The session was interrupted.",
    "type": "class",
    "file_path": "django\\django\\contrib\\sessions\\exceptions.py",
    "ast_data": "ClassDef name:SessionInterrupted"
  },
  {
    "library": "scipy",
    "name": "idstn",
    "source_code": "@_dispatch\ndef idstn(x, type=2, s=None, axes=None, norm=None, overwrite_x=False, workers=None, orthogonalize=None):\n    return (Dispatchable(x, np.ndarray),)",
    "docstring": "Return multidimensional Inverse Discrete Sine Transform along the specified axes. Parameters ---------- x : array_like The input array. type : {1, 2, 3, 4}, optional Type of the DST (see Notes). Default type is 2. s : int or array_like of ints or None, optional The shape of the result. If both and (see below) are None, is `saxess`s[i] >> import numpy as np >>> from scipy.fft import dstn, idstn >>> rng = np.random.default_rng() >>> y = rng.standard_normal((16, 16)) >>> np.allclose(y, idstn(dstn(y))) True",
    "type": "function",
    "file_path": "scipy\\scipy\\fft\\_realtransforms.py",
    "ast_data": "FunctionDef name:idstn arg:x arg:type arg:s arg:axes arg:norm arg:overwrite_x arg:workers arg:orthogonalize arguments arg arg arg arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_parse_doc",
    "source_code": "def _parse_doc(self, raw_doc: FilePath | ReadBuffer[bytes] | ReadBuffer[str]) -> Element | etree._Element:\n    raise AbstractMethodError(self)",
    "docstring": "Build tree from path_or_buffer. This method will parse XML object into tree either from string/bytes or file location.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\xml.py",
    "ast_data": "FunctionDef name:_parse_doc arg:self arg:raw_doc arguments arg arg Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "_parallel_predict_proba",
    "source_code": "def _parallel_predict_proba(estimators, estimators_features, X, n_classes, predict_params=None, predict_proba_params=None):\n    n_samples = X.shape[0]\n    proba = np.zeros((n_samples, n_classes))\n    for estimator, features in zip(estimators, estimators_features):\n        if hasattr(estimator, 'predict_proba'):\n            proba_estimator = estimator.predict_proba(X[:, features], **predict_params or {})\n            if n_classes == len(estimator.classes_):\n                proba += proba_estimator\n            else:\n                proba[:, estimator.classes_] += proba_estimator[:, range(len(estimator.classes_))]\n        else:\n            predictions = estimator.predict(X[:, features], **predict_proba_params or {})\n            for i in range(n_samples):\n                proba[i, predictions[i]] += 1\n    return proba",
    "docstring": "Private function used to compute (proba-)predictions within a job.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_bagging.py",
    "ast_data": "FunctionDef name:_parallel_predict_proba arg:estimators arg:estimators_features arg:X arg:n_classes arg:predict_params arg:predict_proba_params arguments arg arg arg arg arg arg Assign Assign Call For Call If Call Assign Call BoolOp If Compare Call Call Call Assign Call BoolOp For Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_highs_to_scipy_status_message",
    "source_code": "def _highs_to_scipy_status_message(highs_status, highs_message):\n    scipy_statuses_messages = {None: (4, 'HiGHS did not provide a status code. '), HighsModelStatus.kNotset: (4, ''), HighsModelStatus.kLoadError: (4, ''), HighsModelStatus.kModelError: (2, ''), HighsModelStatus.kPresolveError: (4, ''), HighsModelStatus.kSolveError: (4, ''), HighsModelStatus.kPostsolveError: (4, ''), HighsModelStatus.kModelEmpty: (4, ''), HighsModelStatus.kObjectiveBound: (4, ''), HighsModelStatus.kObjectiveTarget: (4, ''), HighsModelStatus.kOptimal: (0, 'Optimization terminated successfully. '), HighsModelStatus.kTimeLimit: (1, 'Time limit reached. '), HighsModelStatus.kIterationLimit: (1, 'Iteration limit reached. '), HighsModelStatus.kInfeasible: (2, 'The problem is infeasible. '), HighsModelStatus.kUnbounded: (3, 'The problem is unbounded. '), HighsModelStatus.kUnboundedOrInfeasible: (4, 'The problem is unbounded or infeasible. ')}\n    unrecognized = (4, 'The HiGHS status code was not recognized. ')\n    scipy_status, scipy_message = scipy_statuses_messages.get(highs_status, unrecognized)\n    hstat = int(highs_status) if highs_status is not None else None\n    scipy_message = f'{scipy_message}(HiGHS Status {hstat}: {highs_message})'\n    return (scipy_status, scipy_message)",
    "docstring": "Converts HiGHS status number/message to SciPy status number/message",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_linprog_highs.py",
    "ast_data": "FunctionDef name:_highs_to_scipy_status_message arg:highs_status arg:highs_message arguments arg arg Assign Assign Assign Call Assign Compare Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, input_nodes, layout: ir.Layout, num_threads: int, register_blocking: GemmBlocking, beta=1, alpha=1, has_bias=False, epilogue_creator: Optional[Callable[[ir.Buffer], ir.Pointwise]]=None, should_block_weights: bool=False, name='bmm'):\n    super().__init__(input_nodes, layout, num_threads, register_blocking, beta=beta, alpha=alpha, has_bias=has_bias, epilogue_creator=epilogue_creator, should_block_weights=should_block_weights, name=name)\n    self.b_index = sympy.Symbol('s_b_index', integer=True, nonnegative=True)",
    "docstring": "In order to simplify the implementation and increase code reuse, the BMM template implements two versions of the GEMM kernel: a single-threaded version and a multi-threaded version. GEMM kernels are called in a loop over the batch dimension, with single-threaded GEMM calls for all but the last (B % num_threads), which are handled by the multi-threaded GEMM kernel. We use an extra sizevar to index the batch dimension, which we pass into the GEMM template as a sympy.Symbol. This allows us to slice the 3D batch tensors in the GEMM template without any changes to the GEMM template itself.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp_bmm_template.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:input_nodes arg:layout arg:num_threads arg:register_blocking arg:beta arg:alpha arg:has_bias arg:epilogue_creator arg:should_block_weights arg:name arguments arg arg arg arg arg arg arg arg arg arg arg Call Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_TensorListSetItemGrad",
    "source_code": "@ops.RegisterGradient('TensorListSetItem')\ndef _TensorListSetItemGrad(op: ops.Operation, dlist):\n    input_list, index, item = op.inputs\n    list_grad = gen_list_ops.tensor_list_set_item(dlist, index=index, item=array_ops.zeros_like(item))\n    index_grad = None\n    element_grad = tensor_list_get_item(dlist, index, element_shape=array_ops.shape(item), element_dtype=item.dtype)\n    if op.get_attr('resize_if_index_out_of_bounds'):\n        input_list_size = gen_list_ops.tensor_list_length(input_list)\n        list_grad = gen_list_ops.tensor_list_resize(list_grad, input_list_size)\n    return (list_grad, index_grad, element_grad)",
    "docstring": "Gradient function for TensorListSetItem.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\list_ops.py",
    "ast_data": "FunctionDef name:_TensorListSetItemGrad arg:op arg:dlist arguments arg arg Assign Assign Call Call Assign Assign Call Call If Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_remove_native_segments",
    "source_code": "def _remove_native_segments(input_func):\n    input_graph_def = input_func.graph.as_graph_def()\n    nodes_deleted = 0\n    for func_id in reversed(range(len(input_graph_def.library.function))):\n        f = input_graph_def.library.function[func_id]\n        if 'native_segment' in f.signature.name:\n            nodes_deleted += 1\n            while context.context().has_function(f.signature.name):\n                context.context().remove_function(f.signature.name)\n            del input_graph_def.library.function[func_id]\n    logging.info(f'Found and deleted native segments from {nodes_deleted} TRTEngineOp nodes.')\n    for node in input_graph_def.node:\n        if node.op == 'TRTEngineOp':\n            del node.attr['segment_func']\n    for func in input_graph_def.library.function:\n        for node in func.node_def:\n            if node.op == 'TRTEngineOp':\n                del node.attr['segment_func']\n    new_func = _construct_function_from_graph_def(input_func, input_graph_def)\n    return new_func",
    "docstring": "Remove native segments from the input TF-TRT Converted Function. Args: input_func: provide the concrete function with native segment nodes. The transformed output func will not contain any native segment nodes. All the TRTEngineOp references will be deleted and reset to default empty func.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\tensorrt\\trt_convert.py",
    "ast_data": "FunctionDef name:_remove_native_segments arg:input_func arguments arg Assign Call Assign For Call Call Call Assign If Compare While Call Call Call Call Call For If Compare For For If Compare Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_streaming_load",
    "source_code": "def _streaming_load(f: BufferedIOBase, map_location: MAP_LOCATION=None, pickle_module: Any=None, *, weights_only: bool=True, **pickle_load_args: Any) -> object:\n    if weights_only:\n        if pickle_module is not None:\n            raise RuntimeError('Can not safely load weights when explicit pickle_module is specified')\n        pickle_module = _weights_only_unpickler\n    elif pickle_module is None:\n        pickle_module = pickle\n    if 'encoding' not in pickle_load_args.keys():\n        pickle_load_args['encoding'] = 'utf-8'\n    zip_file = _PseudoZipFile()\n    zip_file.read_from(f)\n    return _load(zip_file=zip_file, map_location=map_location, pickle_module=pickle_module, **pickle_load_args)",
    "docstring": "Load the object from a file-like object in a streaming fashion compatible with network sockets. See :func: for more details about the streaming behavior. See :func: for more details on specific arguments.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_serialization.py",
    "ast_data": "FunctionDef name:_streaming_load arg:f arg:map_location arg:pickle_module arguments arg arg arg arg arg If If Compare Raise Call Assign If Compare Assign If Compare Call Assign Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "scatter_sub",
    "source_code": "def scatter_sub(self, sparse_delta, use_locking=False, name=None):\n    if not isinstance(sparse_delta, indexed_slices.IndexedSlices):\n        raise TypeError(f'Argument `sparse_delta` must be a `tf.IndexedSlices`. Received arg: {sparse_delta}')\n    return self._lazy_read(gen_resource_variable_ops.resource_scatter_sub(self.handle, sparse_delta.indices, ops.convert_to_tensor(sparse_delta.values, self.dtype), name=name))",
    "docstring": "Subtracts from this variable. Args: sparse_delta: to be subtracted from this variable. use_locking: If , use locking during the operation. name: the name of the operation. Returns: The updated variable. Raises: TypeError: if is not an .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:scatter_sub arg:self arg:sparse_delta arg:use_locking arg:name arguments arg arg arg arg If Call Raise Call Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "CsOperation",
    "source_code": "class CsOperation(GEOSFuncFactory):\n    restype = c_int\n\n    def __init__(self, *args, ordinate=False, get=False, **kwargs):\n        if get:\n            errcheck = check_cs_get\n            dbl_param = POINTER(c_double)\n        else:\n            errcheck = check_cs_op\n            dbl_param = c_double\n        if ordinate:\n            argtypes = [CS_PTR, c_uint, c_uint, dbl_param]\n        else:\n            argtypes = [CS_PTR, c_uint, dbl_param]\n        super().__init__(*args, **{**kwargs, 'errcheck': errcheck, 'argtypes': argtypes})",
    "docstring": "For coordinate sequence operations.",
    "type": "class",
    "file_path": "django\\django\\contrib\\gis\\geos\\prototypes\\coordseq.py",
    "ast_data": "ClassDef name:CsOperation Assign FunctionDef name:__init__ arg:self arguments arg arg arg arg arg If Assign Assign Call Assign Assign If Assign Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_assert",
    "source_code": "def _assert(cond, ex_type, msg):\n    if _is_tensor(cond):\n        return [control_flow_assert.Assert(cond, [msg])]\n    elif not cond:\n        raise ex_type(msg)\n    else:\n        return []",
    "docstring": "A polymorphic assert, works with tensors and boolean expressions. If is not a tensor, behave like an ordinary assert statement, except that a empty list is returned. If is a tensor, return a list containing a single TensorFlow assert op. Args: cond: Something evaluates to a boolean value. May be a tensor. ex_type: The exception class to use. msg: The error message. Returns: A list, containing at most one assert op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:_assert arg:cond arg:ex_type arg:msg arguments arg arg arg If Call Return return:yes Call If Raise Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "_map_args",
    "source_code": "def _map_args(call_node, function):\n    args = call_node.args\n    kwds = {kwd.arg: kwd.value for kwd in call_node.keywords}\n    call_args = tf_inspect.getcallargs(function, *args, **kwds)\n    unexpected_defaults = []\n    for k in call_args:\n        if k not in kwds and call_args[k] not in args and (call_args[k] is not directives.UNSPECIFIED):\n            unexpected_defaults.append(k)\n    if unexpected_defaults:\n        raise ValueError('Unexpected keyword argument values, %s, for function %s' % (zip(unexpected_defaults, [call_args[k] for k in unexpected_defaults]), function))\n    return {k: v for k, v in call_args.items() if v is not directives.UNSPECIFIED}",
    "docstring": "Maps AST call nodes to the actual function's arguments. Args: call_node: ast.Call function: Callable[..., Any], the actual function matching call_node Returns: Dict[Text, ast.AST], mapping each of the function's argument names to the respective AST node. Raises: ValueError: if the default arguments are not correctly set",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\converters\\directives.py",
    "ast_data": "FunctionDef name:_map_args arg:call_node arg:function arguments arg arg Assign Assign Assign Call Assign For If BoolOp Compare Compare Compare Call If Raise Call Call Return return:yes Call Compare"
  },
  {
    "library": "matplotlib",
    "name": "cvt",
    "source_code": "def cvt(length, upe=font.units_per_EM, nearest=True):\n    value = length / upe * 1000\n    if nearest:\n        return round(value)\n    if value < 0:\n        return math.floor(value)\n    else:\n        return math.ceil(value)",
    "docstring": "Convert font coordinates to PDF glyph coordinates.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py",
    "ast_data": "FunctionDef name:cvt arg:length arg:upe arg:nearest arguments arg arg arg Assign If Return return:yes Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "parse_reftarget",
    "source_code": "def parse_reftarget(reftarget: str, suppress_prefix: bool=False) -> tuple[str, str, str, bool]:\n    refspecific = False\n    if reftarget.startswith('.'):\n        reftarget = reftarget[1:]\n        title = reftarget\n        refspecific = True\n    elif reftarget.startswith('~'):\n        reftarget = reftarget[1:]\n        title = reftarget.split('.')[-1]\n    elif suppress_prefix:\n        title = reftarget.split('.')[-1]\n    elif reftarget.startswith('typing.'):\n        title = reftarget[7:]\n    else:\n        title = reftarget\n    if reftarget == 'None' or reftarget.startswith('typing.'):\n        reftype = 'obj'\n    else:\n        reftype = 'class'\n    return (reftype, reftarget, title, refspecific)",
    "docstring": "Parse a type string and return (reftype, reftarget, title, refspecific flag)",
    "type": "function",
    "file_path": "sphinx\\sphinx\\domains\\python\\_annotations.py",
    "ast_data": "FunctionDef name:parse_reftarget arg:reftarget arg:suppress_prefix arguments arg arg Assign If Call Assign Assign Assign If Call Assign Assign Call If Assign Call If Call Assign Assign If BoolOp Compare Call Assign Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "tick_left",
    "source_code": "def tick_left(self):\n    label = True\n    if 'label1On' in self._major_tick_kw:\n        label = self._major_tick_kw['label1On'] or self._major_tick_kw['label2On']\n    self.set_ticks_position('left')\n    self.set_tick_params(which='both', labelleft=label)",
    "docstring": "Move ticks and ticklabels (if present) to the left of the Axes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:tick_left arg:self arguments arg Assign If Compare Assign BoolOp Call Call"
  },
  {
    "library": "kornia",
    "name": "FocalLoss",
    "source_code": "class FocalLoss(nn.Module):\n\n    def __init__(self, alpha: Optional[float], gamma: float=2.0, reduction: str='none', weight: Optional[Tensor]=None, ignore_index: Optional[int]=-100) -> None:\n        super().__init__()\n        self.alpha: Optional[float] = alpha\n        self.gamma: float = gamma\n        self.reduction: str = reduction\n        self.weight: Optional[Tensor] = weight\n        self.ignore_index: Optional[int] = ignore_index\n\n    def forward(self, pred: Tensor, target: Tensor) -> Tensor:\n        return focal_loss(pred, target, self.alpha, self.gamma, self.reduction, self.weight, self.ignore_index)",
    "docstring": "Criterion that computes Focal loss. According to :cite:, the Focal loss is computed as follows: .. math:: \\text{FL}(p_t) = -\\alpha_t (1 - p_t)^{\\gamma} \\, \\text{log}(p_t) Where: - :math: is the model's estimated probability for each class. Args: alpha: Weighting factor :math:. gamma: Focusing parameter :math:. reduction: Specifies the reduction to apply to the output: `(num\\_of\\_classes,)(N, C, *)(N, *)target[i] \\in [0, C)`. Example: >>> C = 5 # num_classes >>> pred = torch.randn(1, C, 3, 5, requires_grad=True) >>> target = torch.randint(C, (1, 3, 5)) >>> kwargs = {\"alpha\": 0.5, \"gamma\": 2.0, \"reduction\": 'mean'} >>> criterion = FocalLoss(**kwargs) >>> output = criterion(pred, target) >>> output.backward()",
    "type": "class",
    "file_path": "kornia\\kornia\\losses\\focal.py",
    "ast_data": "ClassDef name:FocalLoss FunctionDef name:__init__ arg:self arg:alpha arg:gamma arg:reduction arg:weight arg:ignore_index arguments arg arg arg arg arg arg Call Call FunctionDef name:forward arg:self arg:pred arg:target arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_parse_topology",
    "source_code": "def _parse_topology(self, serialized):\n    proto = topology_pb2.TopologyProto()\n    proto.ParseFromString(serialized)\n    self._mesh_shape = np.array(proto.mesh_shape, dtype=np.int32)\n    if len(self._mesh_shape) != 4 or any(self._mesh_shape < 1):\n        raise ValueError('`mesh_shape` must be a vector of size 4 with positive entries; got {}'.format(self._mesh_shape))\n    if proto.num_tasks < 0:\n        raise ValueError('`num_tasks` must be >= 0; got {}'.format(proto.num_tasks))\n    if proto.num_tpu_devices_per_task < 0:\n        raise ValueError('`num_tpu_devices_per_task` must be >= 0; got {}'.format(proto.num_tpu_devices_per_task))\n    expected_coordinates_size = proto.num_tasks * proto.num_tpu_devices_per_task * len(proto.mesh_shape)\n    if len(proto.device_coordinates) != expected_coordinates_size:\n        raise ValueError('`device_coordinates` must have shape num_tasks ({}) * num_tpu_devices_per_task ({}) * len(mesh_shape) ({}); got shape {}'.format(proto.num_tasks, proto.num_tpu_devices_per_task, proto.mesh_shape, len(proto.device_coordinates)))\n    coords = np.array(proto.device_coordinates, dtype=np.int32)\n    if any(coords < 0):\n        raise ValueError('All values in `device_coordinates` must be >= 0, got {}'.format(coords))\n    coords = coords.reshape((proto.num_tasks, proto.num_tpu_devices_per_task, len(proto.mesh_shape)))\n    self._device_coordinates = coords",
    "docstring": "Parses a serialized into .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\topology.py",
    "ast_data": "FunctionDef name:_parse_topology arg:self arg:serialized arguments arg arg Assign Call Call Assign Call If BoolOp Compare Call Call Compare Raise Call Call If Compare Raise Call Call If Compare Raise Call Call Assign Call If Compare Call Raise Call Call Call Assign Call If Call Compare Raise Call Call Assign Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "token_lines",
    "source_code": "@cached_property\ndef token_lines(self) -> list[list[TokenInfo]]:\n    token_lines: list[list[TokenInfo]] = [[]]\n    for t in self.tokens:\n        if t.type not in (token.COMMENT, token.ENDMARKER, token.NL):\n            token_lines[-1].append(t)\n            if t.type == token.NEWLINE:\n                token_lines.append([])\n    if token_lines and (not token_lines[-1]):\n        token_lines.pop()\n    return token_lines",
    "docstring": "Returns lists of TokenInfo segmented by token.NEWLINE",
    "type": "method",
    "file_path": "pytorch\\tools\\linter\\adapters\\_linter.py",
    "ast_data": "FunctionDef name:token_lines arg:self arguments arg For If Compare Call If Compare Call If BoolOp Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_set_transform",
    "source_code": "def _set_transform(self):\n    dx = self._dots_per_unit(self.units)\n    self._trans_scale = dx\n    trans = transforms.Affine2D().scale(dx)\n    self.set_transform(trans)\n    return trans",
    "docstring": "Set the PolyCollection transform to go from arrow width units to pixels.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\quiver.py",
    "ast_data": "FunctionDef name:_set_transform arg:self arguments arg Assign Call Assign Assign Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "residues",
    "source_code": "def residues(self):\n    if self._residues is None:\n        with np.errstate(divide='ignore', invalid='ignore'):\n            N = 1 / np.subtract.outer(self.poles(), self._support_points) @ (self._support_values * self.weights)\n            Ddiff = -(1 / np.subtract.outer(self.poles(), self._support_points)) ** 2 @ self.weights\n            self._residues = N / Ddiff\n    return self._residues",
    "docstring": "Compute the residues of the poles of the approximation. Returns ------- residues : array Residues associated with the of the approximation",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_bary_rational.py",
    "ast_data": "FunctionDef name:residues arg:self arguments arg If Compare With Call Assign Call Call Assign Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "predict_function",
    "source_code": "def predict_function(iterator):\n    return step_function(self, iterator)",
    "docstring": "Runs an evaluation execution with one step.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py",
    "ast_data": "FunctionDef name:predict_function arg:iterator arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "value_shape",
    "source_code": "def value_shape(self):\n    for serialized_tensor in self.object_proto.attributes:\n        if serialized_tensor.name == constants.VARIABLE_VALUE_KEY:\n            return self._checkpoint.shape_map[serialized_tensor.checkpoint_key]\n    return None",
    "docstring": "The shape of the VARIABLE_VALUE tensor. Returns: If found a TensorShape object, otherwise None.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\restore.py",
    "ast_data": "FunctionDef name:value_shape arg:self arguments arg For If Compare Return return:yes Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "_try_build_layer",
    "source_code": "def _try_build_layer(self, obj, node_id, build_input_shape):\n    if obj.built or hasattr(obj.build, '_is_default'):\n        obj.built = True\n        return True\n    if build_input_shape is None:\n        build_input_shape = self._infer_inputs(node_id, convert_to_shapes=True)\n    if build_input_shape is not None:\n        obj.build(build_input_shape)\n        base_layer.Layer.build(obj, build_input_shape)\n        return True\n    return False",
    "docstring": "Attempts to build the layer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\load.py",
    "ast_data": "FunctionDef name:_try_build_layer arg:self arg:obj arg:node_id arg:build_input_shape arguments arg arg arg arg If BoolOp Call Assign Return return:yes If Compare Assign Call If Compare Call Call Return return:yes Return return:yes"
  },
  {
    "library": "pandas",
    "name": "PossibleDataLossError",
    "source_code": "class PossibleDataLossError(Exception):\n    pass",
    "docstring": "Exception raised when trying to open a HDFStore file when already opened. This error is triggered when there is a potential risk of data loss due to conflicting operations on an HDFStore file. It serves to prevent unintended overwrites or data corruption by enforcing exclusive access to the file. See Also -------- HDFStore : Dict-like IO interface for storing pandas objects in PyTables. HDFStore.open : Open an HDFStore file in the specified mode. Examples -------- >>> store = pd.HDFStore(\"my-store\", \"a\") # doctest: +SKIP >>> store.open(\"w\") # doctest: +SKIP",
    "type": "class",
    "file_path": "pandas\\pandas\\errors\\__init__.py",
    "ast_data": "ClassDef name:PossibleDataLossError"
  },
  {
    "library": "pytorch",
    "name": "_simplify_obj_name",
    "source_code": "def _simplify_obj_name(obj) -> str:\n    if inspect.isfunction(obj):\n        return obj.__name__\n    else:\n        return repr(obj)",
    "docstring": "Simplify the display strings of objects for the purpose of rendering within DataPipe error messages.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\data\\datapipes\\_hook_iterator.py",
    "ast_data": "FunctionDef name:_simplify_obj_name arg:obj arguments arg If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_ScalarGradWrapper",
    "source_code": "class _ScalarGradWrapper:\n\n    def __init__(self, grad, fun=None, args=None, finite_diff_options=None):\n        self.fun = fun\n        self.grad = grad\n        self.args = [] if args is None else args\n        self.finite_diff_options = finite_diff_options\n        self.ngev = 0\n        self.nfev = 0\n\n    def __call__(self, x, f0=None, **kwds):\n        if callable(self.grad):\n            g = np.atleast_1d(self.grad(np.copy(x), *self.args))\n        elif self.grad in FD_METHODS:\n            g, dct = approx_derivative(self.fun, x, f0=f0, **self.finite_diff_options)\n            self.nfev += dct['nfev']\n        self.ngev += 1\n        return g",
    "docstring": "Wrapper class for gradient calculation",
    "type": "class",
    "file_path": "scipy\\scipy\\optimize\\_differentiable_functions.py",
    "ast_data": "ClassDef name:_ScalarGradWrapper FunctionDef name:__init__ arg:self arg:grad arg:fun arg:args arg:finite_diff_options arguments arg arg arg arg arg Assign Assign Assign Compare Assign Assign Assign FunctionDef name:__call__ arg:self arg:x arg:f0 arguments arg arg arg arg If Call Assign Call Call Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_decode_error_legacy",
    "source_code": "def _decode_error_legacy(self, err):\n    for line in str(err).splitlines():\n        if line.startswith(_CUSTOM_OPS_HDR):\n            custom_ops = line[len(_CUSTOM_OPS_HDR):]\n            err_string = f\"{_AUTHORING_ERROR_HDR}: op '{custom_ops}' is(are) not natively supported by TensorFlow Lite. You need to provide a custom operator. https://www.tensorflow.org/lite/guide/ops_custom\"\n            self._log(err_string)\n        elif line.startswith(_TF_OPS_HDR):\n            tf_ops = line[len(_TF_OPS_HDR):]\n            err_string = f\"\"\"{_AUTHORING_WARNING_HDR}: op '{tf_ops}' require(s) \"Select TF Ops\" for model conversion for TensorFlow Lite. https://www.tensorflow.org/lite/guide/ops_select\"\"\"\n            self._log(err_string)",
    "docstring": "Parses the given legacy ConverterError for OSS.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\authoring\\authoring.py",
    "ast_data": "FunctionDef name:_decode_error_legacy arg:self arg:err arguments arg arg For Call Call If Call Assign Call Assign Call If Call Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "write_graph",
    "source_code": "@tf_export('io.write_graph', v1=['io.write_graph', 'train.write_graph'])\ndef write_graph(graph_or_graph_def, logdir, name, as_text=True):\n    if isinstance(graph_or_graph_def, ops.Graph):\n        graph_def = graph_or_graph_def.as_graph_def()\n    else:\n        graph_def = graph_or_graph_def\n    if sys.byteorder == 'big':\n        if hasattr(graph_def, 'node'):\n            byte_swap_tensor.swap_tensor_content_in_graph_node(graph_def, 'big', 'little')\n        else:\n            byte_swap_tensor.swap_tensor_content_in_graph_function(graph_def, 'big', 'little')\n    if not logdir.startswith('gs:'):\n        file_io.recursive_create_dir(logdir)\n    path = os.path.join(logdir, name)\n    if as_text:\n        file_io.atomic_write_string_to_file(path, text_format.MessageToString(graph_def, float_format=''))\n    else:\n        file_io.atomic_write_string_to_file(path, graph_def.SerializeToString(deterministic=True))\n    return path",
    "docstring": "Writes a graph proto to a file. The graph is written as a text proto unless is . or Args: graph_or_graph_def: A or a protocol buffer. logdir: Directory where to write the graph. This can refer to remote filesystems, such as Google Cloud Storage (GCS). name: Filename for the graph. as_text: If , writes the graph as an ASCII proto. Returns: The path of the output proto file.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\graph_io.py",
    "ast_data": "FunctionDef name:write_graph arg:graph_or_graph_def arg:logdir arg:name arg:as_text arguments arg arg arg arg If Call Assign Call Assign If Compare If Call Call Call If Call Call Assign Call If Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "prepare_for_pickle",
    "source_code": "def prepare_for_pickle(self) -> tuple[Any, Any, Any, Any, Any]:\n    old_values = (self.fn.fn, self.fn.__globals__, self.fn.used_global_vals, self.fn.repr, self.launchers)\n    self.fn.fn = None\n    self.fn.__globals__ = None\n    self.fn.used_global_vals = None\n    self.fn.repr = _ConstRepr(self.fn.repr(self.fn))\n    self.launchers = []\n    return old_values",
    "docstring": "Drop stuff from triton.JITFunction that does not pickle. This must be called after precompile so that these things are no longer needed. Returns a tuple of old values",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\triton_heuristics.py",
    "ast_data": "FunctionDef name:prepare_for_pickle arg:self arguments arg Assign Assign Assign Assign Assign Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "extra_repr",
    "source_code": "def extra_repr(self) -> str:\n    return '{normalized_shape}, eps={eps}, elementwise_affine={elementwise_affine}'.format(**self.__dict__)",
    "docstring": "Extra information about the module.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\normalization.py",
    "ast_data": "FunctionDef name:extra_repr arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "ExclusiveKeywordArg",
    "source_code": "class ExclusiveKeywordArg(PatternExpr):\n    name: str\n\n    def __init__(self, name: str) -> None:\n        super().__init__()\n        self.name = name\n\n    def __repr__(self) -> str:\n        return f'ExclusiveKeywordArg({self.name!r})'\n\n    def _match(self, node: NodeOrConstant, ctx: MatchContext) -> MatchResult:\n        if node in ctx.exclusive_node_set:\n            return FailedMatch('exclusive arg appears twice')\n        ctx.exclusive_node_set.append(node)\n        return Match(ctx, self, kwargs={self.name: node})\n\n    def pattern_eq(self, other: Any) -> bool:\n        other = typing.cast(Self, other)\n        return super().pattern_eq(other) and self.name == other.name",
    "docstring": "Capture a kwarg which will become an input to the handler.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\pattern_matcher.py",
    "ast_data": "ClassDef name:ExclusiveKeywordArg FunctionDef name:__init__ arg:self arg:name arguments arg arg Call Call Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:_match arg:self arg:node arg:ctx arguments arg arg arg If Compare Return return:yes Call Call Return return:yes Call FunctionDef name:pattern_eq arg:self arg:other arguments arg arg Assign Call Return return:yes BoolOp Call Call Compare"
  },
  {
    "library": "numpy",
    "name": "insert_quotes",
    "source_code": "def insert_quotes(s, d):\n    for k, v in d.items():\n        kind = k[:k.find('@')]\n        if kind:\n            kind += '_'\n        s = s.replace(k, kind + v)\n    return s",
    "docstring": "Inverse of eliminate_quotes.",
    "type": "function",
    "file_path": "numpy\\numpy\\f2py\\symbolic.py",
    "ast_data": "FunctionDef name:insert_quotes arg:s arg:d arguments arg arg For Call Assign Call If Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "private_import_across_module",
    "source_code": "def private_import_across_module(file_obj: IO[str]) -> Iterable[tuple[int, str]]:\n    contents = file_obj.read()\n    tree = ast.parse(contents)\n    for node in ast.walk(tree):\n        if not isinstance(node, (ast.Import, ast.ImportFrom)):\n            continue\n        for module in node.names:\n            module_name = module.name.split('.')[-1]\n            if module_name in PRIVATE_IMPORTS_TO_IGNORE:\n                continue\n            if module_name.startswith('_'):\n                yield (node.lineno, f'Import of internal function {module_name!r}')",
    "docstring": "Checking that a private function is not imported across modules. Parameters ---------- file_obj : IO File-like object containing the Python code to validate. Yields ------ line_number : int Line number of import statement, that imports the private function. msg : str Explanation of the error.",
    "type": "function",
    "file_path": "pandas\\scripts\\validate_unwanted_patterns.py",
    "ast_data": "FunctionDef name:private_import_across_module arg:file_obj arguments arg Assign Call Assign Call For Call If Call For Assign Call If Compare If Call"
  },
  {
    "library": "kornia",
    "name": "SpatialSoftArgmax2d",
    "source_code": "class SpatialSoftArgmax2d(Module):\n\n    def __init__(self, temperature: Optional[Tensor]=None, normalized_coordinates: bool=True) -> None:\n        super().__init__()\n        if temperature is None:\n            temperature = tensor(1.0)\n        self.temperature: Tensor = temperature\n        self.normalized_coordinates: bool = normalized_coordinates\n\n    def __repr__(self) -> str:\n        return f'{self.__class__.__name__}temperature={self.temperature}, normalized_coordinates={self.normalized_coordinates})'\n\n    def forward(self, input: Tensor) -> Tensor:\n        return spatial_soft_argmax2d(input, self.temperature, self.normalized_coordinates)",
    "docstring": "Compute the Spatial Soft-Argmax 2D of a given heatmap. See :func: for details.",
    "type": "class",
    "file_path": "kornia\\kornia\\geometry\\subpix\\spatial_soft_argmax.py",
    "ast_data": "ClassDef name:SpatialSoftArgmax2d FunctionDef name:__init__ arg:self arg:temperature arg:normalized_coordinates arguments arg arg arg Call Call If Compare Assign Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_scale",
    "source_code": "def get_scale(self) -> float:\n    if self._enabled:\n        return self._init_scale if (scale := self._get_scale_async()) is None else cast(float, scale.item())\n    return 1.0",
    "docstring": "Return a Python float containing the current scale, or 1.0 if scaling is disabled. .. warning:: :meth: incurs a CPU-GPU sync.",
    "type": "method",
    "file_path": "pytorch\\torch\\amp\\grad_scaler.py",
    "ast_data": "FunctionDef name:get_scale arg:self arguments arg If Return return:yes Compare Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "convert",
    "source_code": "@_export_metrics\ndef convert(self):\n    if not self._has_valid_tensors():\n        if not self._input_arrays_with_shape or not (self._output_arrays or self._control_output_arrays):\n            raise ValueError('If input_tensors and output_tensors are None, both input_arrays_with_shape and output_arrays|control_output_arrays must be defined.')\n    return super(TFLiteFrozenGraphConverter, self).convert()",
    "docstring": "Converts a TensorFlow GraphDef based on instance variables. Returns: The converted data in serialized format, either a TFLite Flatbuffer or a Graphviz graph depending on value in . Raises: ValueError: Input shape is not specified. None value for dimension in input_tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "FunctionDef name:convert arg:self arguments arg If Call If BoolOp BoolOp Raise Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "accepted_types",
    "source_code": "@cached_property\ndef accepted_types(self):\n    header_value = self.headers.get('Accept', '*/*')\n    return sorted((MediaType(token) for token in header_value.split(',') if token.strip()), key=operator.attrgetter('quality', 'specificity'), reverse=True)",
    "docstring": "Return a list of MediaType instances, in order of preference.",
    "type": "method",
    "file_path": "django\\django\\http\\request.py",
    "ast_data": "FunctionDef name:accepted_types arg:self arguments arg Assign Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_add_asset_to_metagraph",
    "source_code": "def _add_asset_to_metagraph(meta_graph_def, asset_filename, asset_tensor):\n    asset_proto = meta_graph_def.asset_file_def.add()\n    asset_proto.filename = asset_filename\n    asset_proto.tensor_info.name = asset_tensor.name",
    "docstring": "Builds an asset proto and adds it to the meta graph def. Args: meta_graph_def: The meta graph def to which the asset will be added. asset_filename: The filename of the asset to be added. asset_tensor: The asset tensor used to populate the tensor info of the asset proto.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\builder_impl.py",
    "ast_data": "FunctionDef name:_add_asset_to_metagraph arg:meta_graph_def arg:asset_filename arg:asset_tensor arguments arg arg arg Assign Call Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "_get_type_constraint_name",
    "source_code": "def _get_type_constraint_name(type_: TypeAnnotationValue) -> str | None:\n    if isinstance(type_, TypeVar):\n        return type_.__name__\n    if _is_optional(type_):\n        subtypes = typing.get_args(type_)\n        for subtype in subtypes:\n            if subtype is type(None):\n                continue\n            type_param_name = _get_type_constraint_name(subtype)\n            return type_param_name if type_param_name else None\n    origin_type = typing.get_origin(type_)\n    if isinstance(origin_type, type) and issubclass(origin_type, Sequence):\n        subtypes = typing.get_args(type_)\n        type_param_name = _get_type_constraint_name(subtypes[0])\n        return f'Sequence_{type_param_name}' if type_param_name else None\n    return None",
    "docstring": "Returns the name of the type constraint for a given type annotation. Args: type_: A Python type. Returns: The name of the type constraint if it is a TypeVar. - Prefixes the name with \"Sequence_\" if the type annotation is a Sequence[].",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_schemas.py",
    "ast_data": "FunctionDef name:_get_type_constraint_name arg:type_ arguments arg If Call Return return:yes If Call Assign Call For If Compare Call Assign Call Return return:yes Assign Call If BoolOp Call Call Assign Call Assign Call Return return:yes Return return:no"
  },
  {
    "library": "matplotlib",
    "name": "new_locator",
    "source_code": "def new_locator(self, nx, ny, nx1=None, ny1=None):\n    if nx1 is None:\n        nx1 = nx + 1\n    if ny1 is None:\n        ny1 = ny + 1\n    xref = self._xrefindex\n    yref = self._yrefindex\n    locator = functools.partial(self._locate, nx - xref, ny - yref, nx1 - xref, ny1 - yref)\n    locator.get_subplotspec = self.get_subplotspec\n    return locator",
    "docstring": "Return an axes locator callable for the specified cell. Parameters ---------- nx, nx1 : int Integers specifying the column-position of the cell. When *nx1* is None, a single *nx*-th column is specified. Otherwise, location of columns spanning between *nx* to *nx1* (but excluding *nx1*-th column) is specified. ny, ny1 : int Same as *nx* and *nx1*, but for row positions.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_divider.py",
    "ast_data": "FunctionDef name:new_locator arg:self arg:nx arg:ny arg:nx1 arg:ny1 arguments arg arg arg arg arg If Compare Assign If Compare Assign Assign Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_HistogramPercentile",
    "source_code": "@_implements(_CalibrationMethod.CALIBRATION_METHOD_HISTOGRAM_PERCENTILE)\nclass _HistogramPercentile(_HistogramCalibrationAlgorithmBase):\n\n    def get_min_max_value(self) -> tuple[float, float]:\n        total_freq = sum(self._hist_freq)\n        hist_freq_cumsum = np.cumsum(self._hist_freq) / total_freq\n        min_quantile, max_quantile = (self._calib_opts.calibration_parameters.min_percentile / 100.0, self._calib_opts.calibration_parameters.max_percentile / 100.0)\n        min_quantile_idx, max_quantile_idx = (np.searchsorted(hist_freq_cumsum, min_quantile, side='right'), np.searchsorted(hist_freq_cumsum, max_quantile, side='left'))\n        min_value, max_value = (self._hist_mids[min_quantile_idx], self._hist_mids[max_quantile_idx])\n        return (min_value, max_value)",
    "docstring": "HistogramPercentile for calculating min and max values of calibration result.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\calibrator\\calibration_algorithm.py",
    "ast_data": "ClassDef name:_HistogramPercentile FunctionDef name:get_min_max_value arg:self arguments arg Assign Call Assign Call Assign Assign Call Call Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "StorageWriter",
    "source_code": "class StorageWriter(abc.ABC):\n\n    @abc.abstractmethod\n    def reset(self, checkpoint_id: Union[str, os.PathLike, None]=None) -> None:\n        ...\n\n    @abc.abstractmethod\n    def set_up_storage_writer(self, is_coordinator: bool) -> None:\n        pass\n\n    @abc.abstractmethod\n    def prepare_local_plan(self, plan: SavePlan) -> SavePlan:\n        pass\n\n    @abc.abstractmethod\n    def prepare_global_plan(self, plans: list[SavePlan]) -> list[SavePlan]:\n        pass\n\n    @abc.abstractmethod\n    def write_data(self, plan: SavePlan, planner: SavePlanner) -> Future[list[WriteResult]]:\n        pass\n\n    @abc.abstractmethod\n    def finish(self, metadata: Metadata, results: list[list[WriteResult]]) -> None:\n        pass\n\n    @classmethod\n    @abc.abstractmethod\n    def validate_checkpoint_id(cls, checkpoint_id: Union[str, os.PathLike]) -> bool:\n        ...\n\n    def storage_meta(self) -> Optional[StorageMeta]:\n        return None",
    "docstring": "Interface used by `` to write to storage. One StorageWriter instance acts as both the coordinator and the follower in a distributed checkpoint. As part of initialization, each instance is told its role. A subclass should expect the following sequence of calls. 0) (all ranks) set checkpoint_id if users pass a valid checkpoint_id. 1) (all ranks) set_up_storage_writer() 2) (all ranks) prepare_local_plan() 3) (coordinator) prepare_global_plan() 4) (all ranks) write_data() 5) (coordinator) finish()",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\storage.py",
    "ast_data": "ClassDef name:StorageWriter FunctionDef name:reset arg:self arg:checkpoint_id arguments arg arg FunctionDef name:set_up_storage_writer arg:self arg:is_coordinator arguments arg arg FunctionDef name:prepare_local_plan arg:self arg:plan arguments arg arg FunctionDef name:prepare_global_plan arg:self arg:plans arguments arg arg FunctionDef name:write_data arg:self arg:plan arg:planner arguments arg arg arg FunctionDef name:finish arg:self arg:metadata arg:results arguments arg arg arg FunctionDef name:validate_checkpoint_id arg:cls arg:checkpoint_id arguments arg arg FunctionDef name:storage_meta arg:self arguments arg Return return:no"
  },
  {
    "library": "scipy",
    "name": "_UnilinearModel",
    "source_code": "class _UnilinearModel(Model):\n\n    def __init__(self):\n        super().__init__(_unilin, fjacd=_unilin_fjd, fjacb=_unilin_fjb, estimate=_unilin_est, meta={'name': 'Univariate Linear', 'equ': 'y = B_0 * x + B_1', 'TeXequ': '$y = \\\\beta_0 x + \\\\beta_1$'})",
    "docstring": "Univariate linear model This model is defined by :math: Examples -------- We can calculate orthogonal distance regression with an unilinear model: >>> from scipy import odr >>> import numpy as np >>> x = np.linspace(0.0, 5.0) >>> y = 1.0 * x + 2.0 >>> data = odr.Data(x, y) >>> odr_obj = odr.ODR(data, odr.unilinear) >>> output = odr_obj.run() >>> print(output.beta) [1. 2.]",
    "type": "class",
    "file_path": "scipy\\scipy\\odr\\_models.py",
    "ast_data": "ClassDef name:_UnilinearModel FunctionDef name:__init__ arg:self arguments arg Call Call"
  },
  {
    "library": "matplotlib",
    "name": "pathpatch_2d_to_3d",
    "source_code": "def pathpatch_2d_to_3d(pathpatch, z=0, zdir='z'):\n    path = pathpatch.get_path()\n    trans = pathpatch.get_patch_transform()\n    mpath = trans.transform_path(path)\n    pathpatch.__class__ = PathPatch3D\n    pathpatch.set_3d_properties(mpath, z, zdir)",
    "docstring": "Convert a to a object.",
    "type": "function",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py",
    "ast_data": "FunctionDef name:pathpatch_2d_to_3d arg:pathpatch arg:z arg:zdir arguments arg arg arg Assign Call Assign Call Assign Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "DimDynamic",
    "source_code": "class DimDynamic(Enum):\n    DYNAMIC = 0\n    DUCK = 1\n    STATIC = 2\n    SIZE_LIKE_UNBACKED = 3\n    INFER_STRIDE = 4\n    OBLIVIOUS_SIZE = 5",
    "docstring": "Controls how to perform symbol allocation for a dimension. It is always sound to default this to DYNAMIC, but the policies DUCK and STATIC can result in better trace-time and compile-time performance, as they reduce the number of allocated symbols and generally make your graph more static. NB: If we notice you've applied a constraint to the dimension, we will force it to DYNAMIC for simplicity. DimDynamic is controlled by a variety of higher level UX features. Currently: - In eager mode, the default policy is DUCK. - The default is changed to STATIC with assume_static_by_default. - An individual dim is marked DYNAMIC if you mark_dynamic_dim. - In export mode, the default policy is STATIC. - An individual dim is marked DYNAMIC if you specify it in dynamic_shapes passed to export.",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "ClassDef name:DimDynamic Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "pandas",
    "name": "MergeError",
    "source_code": "class MergeError(ValueError):\n    pass",
    "docstring": "Exception raised when merging data. Subclass of ``. See Also -------- DataFrame.join : For joining DataFrames on their indexes. merge : For merging two DataFrames on a common set of keys. Examples -------- >>> left = pd.DataFrame( ... {\"a\": [\"a\", \"b\", \"b\", \"d\"], \"b\": [\"cat\", \"dog\", \"weasel\", \"horse\"]}, ... index=range(4), ... ) >>> right = pd.DataFrame( ... {\"a\": [\"a\", \"b\", \"c\", \"d\"], \"c\": [\"meow\", \"bark\", \"chirp\", \"nay\"]}, ... index=range(4), ... ).set_index(\"a\") >>> left.join( ... right, ... on=\"a\", ... validate=\"one_to_one\", ... ) Traceback (most recent call last): MergeError: Merge keys are not unique in left dataset; not a one-to-one merge",
    "type": "class",
    "file_path": "pandas\\pandas\\errors\\__init__.py",
    "ast_data": "ClassDef name:MergeError"
  },
  {
    "library": "pytorch",
    "name": "_batch_lowrank_mahalanobis",
    "source_code": "def _batch_lowrank_mahalanobis(W, D, x, capacitance_tril):\n    Wt_Dinv = W.mT / D.unsqueeze(-2)\n    Wt_Dinv_x = _batch_mv(Wt_Dinv, x)\n    mahalanobis_term1 = (x.pow(2) / D).sum(-1)\n    mahalanobis_term2 = _batch_mahalanobis(capacitance_tril, Wt_Dinv_x)\n    return mahalanobis_term1 - mahalanobis_term2",
    "docstring": "Uses \"Woodbury matrix identity\":: inv(W @ W.T + D) = inv(D) - inv(D) @ W @ inv(C) @ W.T @ inv(D), where :math: is the capacitance matrix :math:, to compute the squared Mahalanobis distance :math:.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributions\\lowrank_multivariate_normal.py",
    "ast_data": "FunctionDef name:_batch_lowrank_mahalanobis arg:W arg:D arg:x arg:capacitance_tril arguments arg arg arg arg Assign Call Assign Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "uid",
    "source_code": "@property\ndef uid(self):\n    return self._uid",
    "docstring": "The uid under which to run. Availability: Unix.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\plugins.py",
    "ast_data": "FunctionDef name:uid arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "he_normal",
    "source_code": "def he_normal(seed=None):\n    return VarianceScaling(scale=2.0, mode='fan_in', distribution='truncated_normal', seed=seed)",
    "docstring": "He normal initializer. Initializers allow you to pre-specify an initialization strategy, encoded in the Initializer object, without knowing the shape and dtype of the variable being initialized. It draws samples from a truncated normal distribution centered on 0 with where is the number of input units in the weight tensor. Examples: >>> def make_variables(k, initializer): ... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)), ... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32))) >>> v1, v2 = make_variables(3, tf.initializers.he_normal()) >>> v1 >> v2 >> make_variables(4, tf.initializers.RandomNormal()) (<tf.Variable ... shape=(4, 4) dtype=float32... <tf.Variable ... shape=(4, 4, 4) dtype=float32... Args: seed: A Python integer. Used to seed the random generator. Returns: A callable Initializer with and arguments which generates a tensor. References: [He et al., 2015]( # pylint: disable=line-too-long ([pdf](",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops_v2.py",
    "ast_data": "FunctionDef name:he_normal arg:seed arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "DeletedVariable",
    "source_code": "class DeletedVariable(VariableTracker):\n    pass",
    "docstring": "Marker used to implement delattr()",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\misc.py",
    "ast_data": "ClassDef name:DeletedVariable"
  },
  {
    "library": "scikit-learn",
    "name": "make_hastie_10_2",
    "source_code": "@validate_params({'n_samples': [Interval(Integral, 1, None, closed='left')], 'random_state': ['random_state']}, prefer_skip_nested_validation=True)\ndef make_hastie_10_2(n_samples=12000, *, random_state=None):\n    rs = check_random_state(random_state)\n    shape = (n_samples, 10)\n    X = rs.normal(size=shape).reshape(shape)\n    y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64, copy=False)\n    y[y == 0.0] = -1.0\n    return (X, y)",
    "docstring": "Generate data for binary classification used in Hastie et al. 2009, Example 10.2. The ten features are standard independent Gaussian and the target `User Guide Glossary `. Returns ------- X : ndarray of shape (n_samples, 10) The input samples. y : ndarray of shape (n_samples,) The output values. See Also -------- make_gaussian_quantiles : A generalization of this dataset approach. References ---------- .. [1] T. Hastie, R. Tibshirani and J. Friedman, \"Elements of Statistical Learning Ed. 2\", Springer, 2009. Examples -------- >>> from sklearn.datasets import make_hastie_10_2 >>> X, y = make_hastie_10_2(n_samples=24000, random_state=42) >>> X.shape (24000, 10) >>> y.shape (24000,) >>> list(y[:5]) [np.float64(-1.0), np.float64(1.0), np.float64(-1.0), np.float64(1.0), np.float64(-1.0)]",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\datasets\\_samples_generator.py",
    "ast_data": "FunctionDef name:make_hastie_10_2 arg:n_samples arguments arg arg Assign Call Assign Assign Call Call Assign Call Compare Call Assign Compare Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "equal",
    "source_code": "@array_function_dispatch(_binary_op_dispatcher)\ndef equal(x1, x2):\n    return compare_chararrays(x1, x2, '==', True)",
    "docstring": "Return (x1 == x2) element-wise. Unlike , this comparison is performed by first stripping whitespace characters from the end of the string. This behavior is provided for backward-compatibility with numarray. Parameters ---------- x1, x2 : array_like of str or unicode Input arrays of the same shape. Returns ------- out : ndarray Output array of bools. Examples -------- >>> import numpy as np >>> y = \"aa \" >>> x = \"aa\" >>> np.char.equal(x, y) array(True) See Also -------- not_equal, greater_equal, less_equal, greater, less",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:equal arg:x1 arg:x2 arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "set_overwrite_module_params_on_conversion",
    "source_code": "def set_overwrite_module_params_on_conversion(value: bool) -> None:\n    global _overwrite_module_params_on_conversion\n    _overwrite_module_params_on_conversion = value",
    "docstring": "Sets whether to assign new tensors to the parameters instead of changing the existing parameters in-place when converting an `nn.Module.cuda()nn.Module.float()nn.Module.tonn.Module.to_empty` Args: value (bool): Whether to assign new tensors or not.",
    "type": "function",
    "file_path": "pytorch\\torch\\__future__.py",
    "ast_data": "FunctionDef name:set_overwrite_module_params_on_conversion arg:value arguments arg Assign"
  },
  {
    "library": "django",
    "name": "height",
    "source_code": "@property\ndef height(self):\n    return capi.get_band_ysize(self._ptr)",
    "docstring": "Height (Y axis) in pixels of the band.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\raster\\band.py",
    "ast_data": "FunctionDef name:height arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "read_data",
    "source_code": "def read_data(self, plan: LoadPlan, planner: LoadPlanner) -> Future[None]:\n    planner = cast(DefaultLoadPlanner, planner)\n    if self.is_coordinator:\n        assert self.checkpoint_id is not None\n        torch_state_dict = torch.load(self.checkpoint_id, map_location='cpu', weights_only=False)\n        if planner.flatten_state_dict:\n            torch_state_dict, _ = flatten_state_dict(torch_state_dict)\n    else:\n        torch_state_dict = None\n    for req in plan.items:\n        if req.type == LoadItemType.BYTE_IO:\n            raise RuntimeError(f'Non-tensor value identified at {req.storage_index.fqn}. At this time {type(self).__name__} only supports loading Tensors.')\n        if self.is_coordinator:\n            pg_device = dist.distributed_c10d._get_pg_default_device()\n            tensor = torch_state_dict[req.storage_index.fqn].to(pg_device)\n        else:\n            tensor = torch.empty_like(planner.state_dict[req.storage_index.fqn])\n        dist.broadcast(tensor, src=self.coordinator_rank, async_op=False)\n        tensor = narrow_tensor_by_index(tensor, req.storage_offsets, req.lengths)\n        target_tensor = planner.resolve_tensor(req).detach()\n        assert target_tensor.size() == tensor.size(), f'req {req.storage_index} mismatch sizes, {target_tensor.size()} vs {tensor.size()}'\n        target_tensor.copy_(tensor)\n        planner.commit_tensor(req, target_tensor)\n    fut: Future = Future()\n    fut.set_result(None)\n    return fut",
    "docstring": "Reads torch save data on the coordinator rank, and broadcast afterwards this incurrs a communication cost, but avoids having to load the entire checkpoint on each rank, hopefully preventing OOM issues",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\format_utils.py",
    "ast_data": "FunctionDef name:read_data arg:self arg:plan arg:planner arguments arg arg arg Assign Call If Compare Assign Call If Assign Call Assign For If Compare Raise Call Call If Assign Call Assign Call Assign Call Call Assign Call Assign Call Call Compare Call Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_get_counts",
    "source_code": "def _get_counts(values_shape: Shape, mask: npt.NDArray[np.bool_] | None, axis: AxisInt | None, dtype: np.dtype[np.floating]=np.dtype(np.float64)) -> np.floating | npt.NDArray[np.floating]:\n    if axis is None:\n        if mask is not None:\n            n = mask.size - mask.sum()\n        else:\n            n = np.prod(values_shape)\n        return dtype.type(n)\n    if mask is not None:\n        count = mask.shape[axis] - mask.sum(axis)\n    else:\n        count = values_shape[axis]\n    if is_integer(count):\n        return dtype.type(count)\n    return count.astype(dtype, copy=False)",
    "docstring": "Get the count of non-null values along an axis Parameters ---------- values_shape : tuple of int shape tuple from values ndarray, used if mask is None mask : Optional[ndarray[bool]] locations in values that should be considered missing axis : Optional[int] axis to count along dtype : type, optional type to use for count Returns ------- count : scalar or array",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\nanops.py",
    "ast_data": "FunctionDef name:_get_counts arg:values_shape arg:mask arg:axis arg:dtype arguments arg arg arg arg Call If Compare If Compare Assign Call Assign Call Return return:yes Call If Compare Assign Call Assign If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_make_validated_mono_param",
    "source_code": "def _make_validated_mono_param(name, value, kind, type_context, poly_type) -> Parameter:\n    mono_type = trace_type.from_value(value, type_context)\n    if poly_type and (not mono_type.is_subtype_of(poly_type)):\n        raise TypeError(f'Parameter `{name}` was expected to be of type {poly_type} but is {mono_type}')\n    return Parameter(name, kind, False, mono_type)",
    "docstring": "Generates and validates a parameter for Monomorphic FunctionType.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_type.py",
    "ast_data": "FunctionDef name:_make_validated_mono_param arg:name arg:value arg:kind arg:type_context arg:poly_type arguments arg arg arg arg arg Assign Call If BoolOp Call Raise Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "backtick_repl",
    "source_code": "def backtick_repl(matchobj):\n    if matchobj.group(2) != ' ':\n        post = '\\\\ ' + matchobj.group(2)\n    else:\n        post = matchobj.group(2)\n    return '``' + matchobj.group(1) + '``' + post",
    "docstring": "repl to add an escaped space following a code block if needed",
    "type": "function",
    "file_path": "scipy\\tools\\gh_lists.py",
    "ast_data": "FunctionDef name:backtick_repl arg:matchobj arguments arg If Compare Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_compute_kernel",
    "source_code": "def _compute_kernel(self, X):\n    if callable(self.kernel):\n        kernel = self.kernel(X, self.__Xfit)\n        if sp.issparse(kernel):\n            kernel = kernel.toarray()\n        X = np.asarray(kernel, dtype=np.float64, order='C')\n    return X",
    "docstring": "Return the data transformed by a callable kernel",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\svm\\_base.py",
    "ast_data": "FunctionDef name:_compute_kernel arg:self arg:X arguments arg arg If Call Assign Call If Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "HasherSpec",
    "source_code": "class HasherSpec(collections.namedtuple('HasherSpec', ['hasher', 'key'])):\n    __slots__ = ()",
    "docstring": "A structure for the spec of the hashing function to use for hash buckets. is the name of the hashing function to use (eg. \"fasthash\", \"stronghash\"). is optional and specify the key to use for the hash function if supported, currently only used by a strong hash. Fields: hasher: The hasher name to use. key: The key to be used by the hashing function, if required.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "ClassDef name:HasherSpec Call Assign"
  },
  {
    "library": "kornia",
    "name": "fy",
    "source_code": "@property\ndef fy(self) -> Tensor:\n    return self.rectified_left_camera[..., 1, 1]",
    "docstring": "Returns the focal length in the y-direction. Note that the focal lengths of the rectified left and right camera are assumed to be equal. Returns: tensor of shape :math:",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\camera\\stereo.py",
    "ast_data": "FunctionDef name:fy arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_read_variable_op",
    "source_code": "def _read_variable_op(self):\n    if self.trainable:\n        tape.variable_accessed(self)\n    handle = self.handle\n    if getattr(handle, 'is_packed', False):\n        with ops.device(self._get_on_device_or_primary().device):\n            return gen_resource_variable_ops.read_variable_op(handle, self.dtype)\n    else:\n        return gen_resource_variable_ops.read_variable_op(handle, self.dtype)",
    "docstring": "Reads the value of this variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_values.py",
    "ast_data": "FunctionDef name:_read_variable_op arg:self arguments arg If Call Assign If Call With Call Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "check_fid_range",
    "source_code": "def check_fid_range(self, fid_range):\n    if fid_range:\n        if isinstance(fid_range, (tuple, list)):\n            return slice(*fid_range)\n        elif isinstance(fid_range, slice):\n            return fid_range\n        else:\n            raise TypeError\n    else:\n        return None",
    "docstring": "Check the keyword.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\utils\\layermapping.py",
    "ast_data": "FunctionDef name:check_fid_range arg:self arg:fid_range arguments arg arg If If Call Return return:yes Call If Call Return return:yes Raise Return return:no"
  },
  {
    "library": "scipy",
    "name": "_cdf",
    "source_code": "def _cdf(self, x):\n    return np.interp(x, self._hbins, self._hcdf)",
    "docstring": "CDF calculated from the histogram",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_continuous_distns.py",
    "ast_data": "FunctionDef name:_cdf arg:self arg:x arguments arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_cumcount_array",
    "source_code": "@final\ndef _cumcount_array(self, ascending: bool=True) -> np.ndarray:\n    ids = self._grouper.ids\n    ngroups = self._grouper.ngroups\n    sorter = get_group_index_sorter(ids, ngroups)\n    ids, count = (ids[sorter], len(ids))\n    if count == 0:\n        return np.empty(0, dtype=np.int64)\n    run = np.r_[True, ids[:-1] != ids[1:]]\n    rep = np.diff(np.r_[np.nonzero(run)[0], count])\n    out = (~run).cumsum()\n    if ascending:\n        out -= np.repeat(out[run], rep)\n    else:\n        out = np.repeat(out[np.r_[run[1:], True]], rep) - out\n    if self._grouper.has_dropped_na:\n        out = np.where(ids == -1, np.nan, out.astype(np.float64, copy=False))\n    else:\n        out = out.astype(np.int64, copy=False)\n    rev = np.empty(count, dtype=np.intp)\n    rev[sorter] = np.arange(count, dtype=np.intp)\n    return out[rev]",
    "docstring": "Parameters ---------- ascending : bool, default True If False, number in reverse, from length of group - 1 to 0. Notes ----- this is currently implementing sort=False (though the default is sort=True) for groupby in general",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\groupby\\groupby.py",
    "ast_data": "FunctionDef name:_cumcount_array arg:self arg:ascending arguments arg arg Assign Assign Assign Call Assign Call If Compare Return return:yes Call Assign Compare Assign Call Call Assign Call If Call Assign Call If Assign Call Compare Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "conv_layout",
    "source_code": "def conv_layout(x: TensorBox, weight: TensorBox, bias: Optional[TensorBox], stride: Sequence[int], padding: tuple[int, ...], dilation: tuple[int, ...], transposed: bool, output_padding: tuple[int, ...], groups: int) -> ir.Layout:\n    with V.graph.fake_mode:\n        output = torch.ops.aten.convolution(ir.ir_node_to_tensor(x, guard_shape=True), ir.ir_node_to_tensor(weight, guard_shape=True), ir.ir_node_to_tensor(bias, guard_shape=True), V.graph.sizevars.size_hints(stride), V.graph.sizevars.size_hints(padding), V.graph.sizevars.size_hints(dilation), transposed, V.graph.sizevars.size_hints(output_padding), groups)\n        sizes = ir.convert_shape_to_inductor(output.size())\n        stride = ir.convert_shape_to_inductor(output.stride())\n    return ir.FixedLayout(x.get_device_or_error(), x.get_dtype(), sizes, stride)",
    "docstring": "Determine output layout for a convolution",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\kernel\\conv.py",
    "ast_data": "FunctionDef name:conv_layout arg:x arg:weight arg:bias arg:stride arg:padding arg:dilation arg:transposed arg:output_padding arg:groups arguments arg arg arg arg arg arg arg arg arg With Assign Call Call Call Call Call Call Call Call Assign Call Call Assign Call Call Return return:yes Call Call Call"
  },
  {
    "library": "numpy",
    "name": "_valnd",
    "source_code": "def _valnd(val_f, c, *args):\n    args = [np.asanyarray(a) for a in args]\n    shape0 = args[0].shape\n    if not all((a.shape == shape0 for a in args[1:])):\n        if len(args) == 3:\n            raise ValueError('x, y, z are incompatible')\n        elif len(args) == 2:\n            raise ValueError('x, y are incompatible')\n        else:\n            raise ValueError('ordinates are incompatible')\n    it = iter(args)\n    x0 = next(it)\n    c = val_f(x0, c)\n    for xi in it:\n        c = val_f(xi, c, tensor=False)\n    return c",
    "docstring": "Helper function used to implement the `` functions for more detail",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\polyutils.py",
    "ast_data": "FunctionDef name:_valnd arg:val_f arg:c arguments arg arg arg Assign Call Assign If Call Compare If Compare Call Raise Call If Compare Call Raise Call Raise Call Assign Call Assign Call Assign Call For Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "multinomial_frozen",
    "source_code": "class multinomial_frozen(multi_rv_frozen):\n\n    def __init__(self, n, p, seed=None):\n        self._dist = multinomial_gen(seed)\n        self.n, self.p, self.npcond = self._dist._process_parameters(n, p)\n\n        def _process_parameters(n, p):\n            return (self.n, self.p, self.npcond)\n        self._dist._process_parameters = _process_parameters\n\n    def logpmf(self, x):\n        return self._dist.logpmf(x, self.n, self.p)\n\n    def pmf(self, x):\n        return self._dist.pmf(x, self.n, self.p)\n\n    def mean(self):\n        return self._dist.mean(self.n, self.p)\n\n    def cov(self):\n        return self._dist.cov(self.n, self.p)\n\n    def entropy(self):\n        return self._dist.entropy(self.n, self.p)\n\n    def rvs(self, size=1, random_state=None):\n        return self._dist.rvs(self.n, self.p, size, random_state)",
    "docstring": "Create a frozen Multinomial distribution. Parameters ---------- n : int number of trials p: array_like probability of a trial falling into each category; should sum to 1 seed : {None, int, , }, optional If is None (or ), the singleton is used. If is an int, a new `seedseed` instance then that instance is used.",
    "type": "class",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "ClassDef name:multinomial_frozen FunctionDef name:__init__ arg:self arg:n arg:p arg:seed arguments arg arg arg arg Assign Call Assign Call FunctionDef name:_process_parameters arg:n arg:p arguments arg arg Return return:yes Assign FunctionDef name:logpmf arg:self arg:x arguments arg arg Return return:yes Call FunctionDef name:pmf arg:self arg:x arguments arg arg Return return:yes Call FunctionDef name:mean arg:self arguments arg Return return:yes Call FunctionDef name:cov arg:self arguments arg Return return:yes Call FunctionDef name:entropy arg:self arguments arg Return return:yes Call FunctionDef name:rvs arg:self arg:size arg:random_state arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "res_value",
    "source_code": "def res_value(self, ns, value):\n    raise NotImplementedError('subclasses must implement')",
    "docstring": "Resolves the type a literal or static value.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\static_analysis\\type_inference.py",
    "ast_data": "FunctionDef name:res_value arg:self arg:ns arg:value arguments arg arg arg Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "_MockEstimatorOnOffPrediction",
    "source_code": "class _MockEstimatorOnOffPrediction(BaseEstimator):\n\n    def __init__(self, response_methods=None):\n        self.response_methods = response_methods\n\n    def fit(self, X, y):\n        self.classes_ = np.unique(y)\n        return self\n\n    @available_if(_check_response('predict'))\n    def predict(self, X):\n        return 'predict'\n\n    @available_if(_check_response('predict_proba'))\n    def predict_proba(self, X):\n        return 'predict_proba'\n\n    @available_if(_check_response('decision_function'))\n    def decision_function(self, X):\n        return 'decision_function'",
    "docstring": "Estimator for which we can turn on/off the prediction methods. Parameters ---------- response_methods: list of {\"predict\", \"predict_proba\", \"decision_function\"}, default=None List containing the response implemented by the estimator. When, the response is in the list, it will return the name of the response method when called. Otherwise, an is raised. It allows to use as any conventional estimator. By default, no response methods are mocked.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\utils\\_mocking.py",
    "ast_data": "ClassDef name:_MockEstimatorOnOffPrediction FunctionDef name:__init__ arg:self arg:response_methods arguments arg arg Assign FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Assign Call Return return:yes FunctionDef name:predict arg:self arg:X arguments arg arg Return return:yes Call Call FunctionDef name:predict_proba arg:self arg:X arguments arg arg Return return:yes Call Call FunctionDef name:decision_function arg:self arg:X arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "grad_pass_through",
    "source_code": "@tf_export('grad_pass_through')\ndef grad_pass_through(f):\n\n    @custom_gradient\n    def _grad_pass_through_op(*args, **kwargs):\n\n        def grad(*args, **kwargs):\n            variables = kwargs.get('variables')\n            if variables is not None:\n                return (args, [None] * len(variables))\n            return args\n        return (f(*args, **kwargs), grad)\n    return tf_decorator.make_decorator(f, _grad_pass_through_op)",
    "docstring": "Creates a grad-pass-through op with the forward behavior provided in f. Use this function to wrap any op, maintaining its behavior in the forward pass, but replacing the original op in the backward graph with an identity. For example: Another example is a 'differentiable' moving average approximation, where gradients are allowed to flow into the last value fed to the moving average, but the moving average is still used for the forward pass: Args: f: function that returns a or nested structure of outputs. Returns: A function which returns the same values as and whose gradients are the same as those of an identity function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\custom_gradient.py",
    "ast_data": "FunctionDef name:grad_pass_through arg:f arguments arg FunctionDef name:_grad_pass_through_op arguments arg arg FunctionDef name:grad arguments arg arg Assign Call If Compare Return return:yes Call Return return:yes Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "is_dataset_shape_fully_defined",
    "source_code": "def is_dataset_shape_fully_defined(dataset):\n    shapes = nest.flatten(dataset_ops.get_legacy_output_shapes(dataset))\n    unknown_shapes = [s for s in shapes if not s.is_fully_defined()]\n    return not unknown_shapes",
    "docstring": "Returns whether a dataset contains a final partial batch.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_training_utils_v1.py",
    "ast_data": "FunctionDef name:is_dataset_shape_fully_defined arg:dataset arguments arg Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "parse_file_upload",
    "source_code": "def parse_file_upload(self, META, post_data):\n    self.upload_handlers = ImmutableList(self.upload_handlers, warning='You cannot alter upload handlers after the upload has been processed.')\n    parser = MultiPartParser(META, post_data, self.upload_handlers, self.encoding)\n    return parser.parse()",
    "docstring": "Return a tuple of (POST QueryDict, FILES MultiValueDict).",
    "type": "method",
    "file_path": "django\\django\\http\\request.py",
    "ast_data": "FunctionDef name:parse_file_upload arg:self arg:META arg:post_data arguments arg arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "render_pep440_old",
    "source_code": "def render_pep440_old(pieces):\n    if pieces['closest-tag']:\n        rendered = pieces['closest-tag']\n        if pieces['distance'] or pieces['dirty']:\n            rendered += f'0.post{pieces['distance']}'\n            if pieces['dirty']:\n                rendered += '.dev0'\n    else:\n        rendered = f'0.post{pieces['distance']}'\n        if pieces['dirty']:\n            rendered += '.dev0'\n    return rendered",
    "docstring": "TAG[.postDISTANCE[.dev0]] . The \".dev0\" means dirty. Exceptions: 1: no tags. 0.postDISTANCE[.dev0]",
    "type": "function",
    "file_path": "pandas\\pandas\\_version.py",
    "ast_data": "FunctionDef name:render_pep440_old arg:pieces arguments arg If Assign If BoolOp If Assign If Return return:yes"
  },
  {
    "library": "authlib",
    "name": "InsufficientScopeError",
    "source_code": "class InsufficientScopeError(OAuth2Error):\n    error = 'insufficient_scope'\n    description = 'The request requires higher privileges than provided by the access token.'\n    status_code = 403",
    "docstring": "The request requires higher privileges than provided by the access token. The resource server SHOULD respond with the HTTP 403 (Forbidden) status code and MAY include the \"scope\" attribute with the scope necessary to access the protected resource.",
    "type": "class",
    "file_path": "authlib\\authlib\\oauth2\\rfc6750\\errors.py",
    "ast_data": "ClassDef name:InsufficientScopeError Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "max_pooling3d",
    "source_code": "def max_pooling3d(inputs, pool_size, strides, padding='valid', data_format='channels_last', name=None):\n    warnings.warn('`tf.layers.max_pooling3d` is deprecated and will be removed in a future version. Please use `tf.keras.layers.MaxPooling3D` instead.')\n    layer = MaxPooling3D(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name)\n    return layer.apply(inputs)",
    "docstring": "Max pooling layer for 3D inputs (e.g. volumes). Args: inputs: The tensor over which to pool. Must have rank 5. pool_size: An integer or tuple/list of 3 integers: (pool_depth, pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 3 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. (default) and are supported. corresponds to inputs with shape while corresponds to inputs with shape . name: A string, the name of the layer. Returns: Output tensor. Raises: ValueError: if eager execution is enabled.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\legacy_tf_layers\\pooling.py",
    "ast_data": "FunctionDef name:max_pooling3d arg:inputs arg:pool_size arg:strides arg:padding arg:data_format arg:name arguments arg arg arg arg arg arg Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "format_decomp_comparison",
    "source_code": "def format_decomp_comparison(pre_decomp_unique_ops: set[str], post_decomp_unique_ops: set[str]) -> str:\n    return f'Ops exist only in the ExportedProgram before decomposition: `{sorted(pre_decomp_unique_ops)}`\\n\\nOps exist only in the ExportedProgram after decomposition: `{sorted(post_decomp_unique_ops)}`\\n'",
    "docstring": "Format the decomposition comparison result. Args: unique_ops_in_a: The unique ops in the first program. unique_ops_in_b: The unique ops in the second program. Returns: The formatted comparison result.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_reporting.py",
    "ast_data": "FunctionDef name:format_decomp_comparison arg:pre_decomp_unique_ops arg:post_decomp_unique_ops arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_check_not_implemented",
    "source_code": "def _check_not_implemented(cond, message=None):\n    _check_with(NotImplementedError, cond, message)",
    "docstring": "Throws error containing an optional message if the specified condition is False. Error type: `bool`",
    "type": "function",
    "file_path": "pytorch\\torch\\__init__.py",
    "ast_data": "FunctionDef name:_check_not_implemented arg:cond arg:message arguments arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, gm: GraphModule, passname: str, subsystem: Optional[str]=None, log_url: Optional[str]=None):\n    from torch._inductor.config import trace\n    self.gm = gm\n    self.passname = passname\n    self.subsystem = subsystem\n    if log_url is None:\n        log_url = trace.log_url_for_graph_xform\n    self.log_url = log_url\n    self.active = trace.enabled or self.log_url is not None\n    if self.active:\n        self.erased_nodes: set[str] = set()\n        self.created_nodes: set[str] = set()\n        self.name_to_node: dict[str, Node] = {}\n        self.copied_gms: list[GraphModule] = []\n        self._node_creation_hook = self.get_node_creation_hook()\n        self._node_erase_hook = self.get_node_erase_hook()\n        self._node_replace_hook = self.get_node_replace_hook()\n        self._deepcopy_hook = self.get_deepcopy_hook()\n    if self.log_url is None:\n        return\n    GraphTransformObserver.__pass_count += 1\n    self.input_dot_graph = FxGraphDrawer(self.gm, self.passname, ignore_getattr=True, ignore_parameters_and_buffers=True).get_dot_graph()",
    "docstring": "log_url is inferred to be torch._inductor.config.trace.log_url_for_graph_xform unless otherwise specified",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\passes\\graph_transform_observer.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:gm arg:passname arg:subsystem arg:log_url arguments arg arg arg arg arg Assign Assign Assign If Compare Assign Assign Assign BoolOp Compare If Call Call Assign Call Assign Call Assign Call Assign Call If Compare Return return:no Assign Call Call"
  },
  {
    "library": "matplotlib",
    "name": "start",
    "source_code": "@_api.delete_parameter('3.9', 'interval', alternative='timer.interval')\ndef start(self, interval=None):\n    if interval is not None:\n        self.interval = interval\n    self._timer_start()",
    "docstring": "Start the timer object. Parameters ---------- interval : int, optional Timer interval in milliseconds; overrides a previously set interval if provided.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:start arg:self arg:interval arguments arg arg If Compare Assign Call Call"
  },
  {
    "library": "scipy",
    "name": "zeta",
    "source_code": "def zeta(x, q=None, out=None):\n    if q is None:\n        return _ufuncs._riemann_zeta(x, out)\n    else:\n        return _ufuncs._zeta(x, q, out)",
    "docstring": "Riemann or Hurwitz zeta function. Parameters ---------- x : array_like of float or complex. Input data q : array_like of float, optional Input data, must be real. Defaults to Riemann zeta. When is `xqxpolygamma` function: >>> m = 3 >>> x = 1.25 >>> polygamma(m, x) array(2.782144009188397) >>> (-1)**(m+1) * factorial(m) * zeta(m+1, x) 2.7821440091883969",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_basic.py",
    "ast_data": "FunctionDef name:zeta arg:x arg:q arg:out arguments arg arg arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_minimize_trustregion_exact",
    "source_code": "def _minimize_trustregion_exact(fun, x0, args=(), jac=None, hess=None, **trust_region_options):\n    if jac is None:\n        raise ValueError('Jacobian is required for trust region exact minimization.')\n    if not callable(hess):\n        raise ValueError('Hessian matrix is required for trust region exact minimization.')\n    return _minimize_trust_region(fun, x0, args=args, jac=jac, hess=hess, subproblem=IterativeSubproblem, **trust_region_options)",
    "docstring": "Minimization of scalar function of one or more variables using a nearly exact trust-region algorithm. Options ------- initial_trust_radius : float Initial trust-region radius. max_trust_radius : float Maximum value of the trust-region radius. No steps that are longer than this value will be proposed. eta : float Trust region related acceptance stringency for proposed steps. gtol : float Gradient norm must be less than `` before successful termination.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_trustregion_exact.py",
    "ast_data": "FunctionDef name:_minimize_trustregion_exact arg:fun arg:x0 arg:args arg:jac arg:hess arguments arg arg arg arg arg arg If Compare Raise Call If Call Raise Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_on_key_up",
    "source_code": "def _on_key_up(self, event):\n    KeyEvent('key_release_event', self, self._get_key(event), *self._mpl_coords(), guiEvent=event)._process()\n    if self:\n        event.Skip()",
    "docstring": "Release key.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_wx.py",
    "ast_data": "FunctionDef name:_on_key_up arg:self arg:event arguments arg arg Call Call Call Call If Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, *args, which='major', axis='both', **kwargs):\n    self._which = which\n    self._axis = axis\n    super().__init__(*args, **kwargs)\n    self.set_grid_helper(None)",
    "docstring": "Collection of grid lines. Parameters ---------- which : {\"major\", \"minor\"} Which grid to consider. axis : {\"both\", \"x\", \"y\"} Which axis to consider. *args, **kwargs Passed to .",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axis_artist.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg arg arg arg arg Assign Assign Call Call Call"
  },
  {
    "library": "pandas",
    "name": "headers",
    "source_code": "@property\ndef headers(self) -> Sequence[str]:\n    if self.with_counts:\n        return ['Non-Null Count', 'Dtype']\n    return ['Dtype']",
    "docstring": "Headers names of the columns in verbose table.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:headers arg:self arguments arg If Return return:yes Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "eager_shape",
    "source_code": "def eager_shape(x: Array, /) -> tuple[int, ...]:\n    shape = x.shape\n    if any((s is None or math.isnan(s) for s in shape)):\n        msg = 'Unsupported lazy shape'\n        raise TypeError(msg)\n    return cast(tuple[int, ...], shape)",
    "docstring": "Return shape of an array. Raise if shape is not fully defined. Parameters ---------- x : Array Input array. Returns ------- tuple[int, ...] Shape of the array.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_extra\\_lib\\_utils\\_helpers.py",
    "ast_data": "FunctionDef name:eager_shape arguments arg Assign If Call BoolOp Compare Call Assign Raise Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "rotate_token",
    "source_code": "def rotate_token(request):\n    _add_new_csrf_cookie(request)",
    "docstring": "Change the CSRF token in use for a request - should be done on login for security purposes.",
    "type": "function",
    "file_path": "django\\django\\middleware\\csrf.py",
    "ast_data": "FunctionDef name:rotate_token arg:request arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "compute",
    "source_code": "def compute(i, a_flat, tas):\n    packed_elems = input_pack([elem_ta.read(i) for elem_ta in elems_ta])\n    packed_a = output_pack(a_flat)\n    a_out = fn(packed_a, packed_elems)\n    nest.assert_same_structure(elems if initializer is None else initializer, a_out)\n    flat_a_out = output_flatten(a_out)\n    tas = [ta.write(i, value) for ta, value in zip(tas, flat_a_out)]\n    if reverse:\n        next_i = i - 1\n    else:\n        next_i = i + 1\n    return (next_i, flat_a_out, tas)",
    "docstring": "The loop body of scan. Args: i: the loop counter. a_flat: the accumulator value(s), flattened. tas: the output accumulator TensorArray(s), flattened. Returns: [i + 1, a_flat, tas]: the updated counter + new accumulator values + updated TensorArrays Raises: TypeError: if initializer and fn() output structure do not match ValueType: if initializer and fn() output lengths do not match",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\functional_ops.py",
    "ast_data": "FunctionDef name:compute arg:i arg:a_flat arg:tas arguments arg arg arg Assign Call Call Assign Call Assign Call Call Compare Assign Call Assign Call Call If Assign Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "inverse_transform",
    "source_code": "def inverse_transform(self, X):\n    check_is_fitted(self)\n    return self._inverse_transform(X, self.components_)",
    "docstring": "Transform data back to its original space. Parameters ---------- X : array-like of shape (n_samples, n_components) Data to be transformed back. Must have the same number of components as the data used to train the model. Returns ------- X_original : ndarray of shape (n_samples, n_features) Transformed data.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_dict_learning.py",
    "ast_data": "FunctionDef name:inverse_transform arg:self arg:X arguments arg arg Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "BaseLogger",
    "source_code": "class BaseLogger(Callback):\n\n    def __init__(self, stateful_metrics=None):\n        super(BaseLogger, self).__init__()\n        self.stateful_metrics = set(stateful_metrics or [])\n\n    def on_epoch_begin(self, epoch, logs=None):\n        self.seen = 0\n        self.totals = {}\n\n    def on_batch_end(self, batch, logs=None):\n        logs = logs or {}\n        batch_size = logs.get('size', 0)\n        num_steps = logs.get('num_steps', 1)\n        self.seen += batch_size * num_steps\n        for k, v in logs.items():\n            if k in self.stateful_metrics:\n                self.totals[k] = v\n            elif k in self.totals:\n                self.totals[k] += v * batch_size\n            else:\n                self.totals[k] = v * batch_size\n\n    def on_epoch_end(self, epoch, logs=None):\n        if logs is not None:\n            for k in self.params['metrics']:\n                if k in self.totals:\n                    if k in self.stateful_metrics:\n                        logs[k] = self.totals[k]\n                    else:\n                        logs[k] = self.totals[k] / self.seen",
    "docstring": "Callback that accumulates epoch averages of metrics. This callback is automatically applied to every Keras model. Args: stateful_metrics: Iterable of string names of metrics that should *not* be averaged over an epoch. Metrics in this list will be logged as-is in . All others will be averaged in .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "ClassDef name:BaseLogger FunctionDef name:__init__ arg:self arg:stateful_metrics arguments arg arg Call Call Assign Call BoolOp FunctionDef name:on_epoch_begin arg:self arg:epoch arg:logs arguments arg arg arg Assign Assign FunctionDef name:on_batch_end arg:self arg:batch arg:logs arguments arg arg arg Assign BoolOp Assign Call Assign Call For Call If Compare Assign If Compare Assign FunctionDef name:on_epoch_end arg:self arg:epoch arg:logs arguments arg arg arg If Compare For If Compare If Compare Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "new_func",
    "source_code": "@functools.wraps(func)\ndef new_func(*args, **kwargs):\n    if is_in_graph_mode.IS_IN_GRAPH_MODE() and _PRINT_DEPRECATION_WARNINGS:\n        invalid_args = []\n        named_args = tf_inspect.getcallargs(func, *args, **kwargs)\n        for arg_name, spec in iter(deprecated_positions.items()):\n            if spec.position < len(args) and (not (spec.has_ok_value and _same_value(named_args[arg_name], spec.ok_value))):\n                invalid_args.append(arg_name)\n        if is_varargs_deprecated and len(args) > len(arg_spec.args):\n            invalid_args.append(arg_spec.varargs)\n        if is_kwargs_deprecated and kwargs:\n            invalid_args.append(arg_spec.varkw)\n        for arg_name in deprecated_arg_names:\n            if arg_name in kwargs and (not (deprecated_positions[arg_name].has_ok_value and _same_value(named_args[arg_name], deprecated_positions[arg_name].ok_value))):\n                invalid_args.append(arg_name)\n        for arg_name in invalid_args:\n            if (func, arg_name) not in _PRINTED_WARNING:\n                if warn_once:\n                    _PRINTED_WARNING[func, arg_name] = True\n                _log_deprecation('From %s: calling %s (from %s) with %s is deprecated and will be removed %s.\\nInstructions for updating:\\n%s', _call_location(), decorator_utils.get_qualified_name(func), func.__module__, arg_name, 'in a future version' if date is None else 'after %s' % date, instructions)\n    return func(*args, **kwargs)",
    "docstring": "Deprecation wrapper.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\deprecation.py",
    "ast_data": "FunctionDef name:new_func arguments arg arg If BoolOp Call Assign Assign Call For Call Call If BoolOp Compare Call BoolOp Call Call If BoolOp Compare Call Call Call If BoolOp Call For If BoolOp Compare BoolOp Call Call For If Compare If Assign Call Call Call Compare Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n    X = validate_data(self, X, accept_sparse='csr')\n    random_state = check_random_state(self.random_state)\n    n_features = X.shape[1]\n    sparse = sp.issparse(X)\n    if self.gamma == 'scale':\n        X_var = X.multiply(X).mean() - X.mean() ** 2 if sparse else X.var()\n        self._gamma = 1.0 / (n_features * X_var) if X_var != 0 else 1.0\n    else:\n        self._gamma = self.gamma\n    self.random_weights_ = (2.0 * self._gamma) ** 0.5 * random_state.normal(size=(n_features, self.n_components))\n    self.random_offset_ = random_state.uniform(0, 2 * np.pi, size=self.n_components)\n    if X.dtype == np.float32:\n        self.random_weights_ = self.random_weights_.astype(X.dtype, copy=False)\n        self.random_offset_ = self.random_offset_.astype(X.dtype, copy=False)\n    self._n_features_out = self.n_components\n    return self",
    "docstring": "Fit the model with X. Samples random projection according to n_features. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. y : array-like, shape (n_samples,) or (n_samples, n_outputs), default=None Target values (None for unsupervised transformations). Returns ------- self : object Returns the instance itself.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\kernel_approximation.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Assign Call Assign Call Assign Assign Call If Compare Assign Call Call Call Call Assign Compare Assign Assign Call Assign Call If Compare Assign Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_multi_worker_session",
    "source_code": "def _multi_worker_session(kwargs):\n    strategy = None\n    for _, v in kwargs.items():\n        if isinstance(v, distribute_lib.StrategyBase):\n            if strategy is not None:\n                logging.warning('The test uses multiple strategies. Skipping entering a session that is configured for the strategy.')\n                return ops.NullContextmanager()\n            strategy = v\n    if context.executing_eagerly() or not isinstance(strategy, collective_all_reduce_strategy.CollectiveAllReduceStrategy):\n        return ops.NullContextmanager()\n    sess_config = copy.deepcopy(context.context().config)\n    sess_config = strategy.update_config_proto(sess_config)\n    target = strategy.cluster_resolver.master()\n    return session.Session(config=sess_config, target=target).as_default()",
    "docstring": "Returns a context manager that enters a session that is configured for the MultiWorkerMirroredStrategy. Args: kwargs: a dict. Keyword arguments passed to the test. Returns: A context manager. If MultiWorkerMirroredStrategy is the one and only one strategy in kwargs and it's in graph mode, it's the session that is configured for that strategy. Otherwise, it's a no-op context manager.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\combinations.py",
    "ast_data": "FunctionDef name:_multi_worker_session arg:kwargs arguments arg Assign For Call If Call If Compare Call Return return:yes Call Assign If BoolOp Call Call Return return:yes Call Assign Call Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_lookup_global_by_name",
    "source_code": "@classmethod\ndef _lookup_global_by_name(cls, name: str) -> object:\n    if '.' in name:\n        mod, rest = name.split('.', 1)\n        root = globals()[mod]\n        return cls._getattr_by_name(root, rest)\n    else:\n        return globals()[name]",
    "docstring": "Like but supports dotted names.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\_graph_pickler.py",
    "ast_data": "FunctionDef name:_lookup_global_by_name arg:cls arg:name arguments arg arg If Compare Assign Call Assign Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "check_tutorials",
    "source_code": "@spin.util.extend_command(test, doc='')\ndef check_tutorials(*, parent_callback, pytest_args, **kwargs):\n    if not pytest_args or all((arg.startswith('-') for arg in pytest_args)):\n        pytest_args = ('doc/source/user',) + pytest_args\n    pytest_args = tuple((str(curdir / '..' / arg) if not arg.startswith('-') else arg for arg in pytest_args))\n    doctest_args = ('--doctest-glob=*rst',)\n    pytest_args = pytest_args + doctest_args\n    parent_callback(**{'pytest_args': pytest_args, **kwargs})",
    "docstring": "🔧 Run doctests of user-facing rst tutorials. To test all tutorials in the numpy doc/source/user/ directory, use spin check-tutorials To run tests on a specific RST file: \b spin check-tutorials doc/source/user/absolute-beginners.rst \b Note: ----- \b - This command only runs doctests and skips everything under tests/ - This command only doctests public objects: those which are accessible from the top-level file.",
    "type": "function",
    "file_path": "numpy\\.spin\\cmds.py",
    "ast_data": "FunctionDef name:check_tutorials arguments arg arg arg If BoolOp Call Call Assign Assign Call Call Call Assign Assign Call Call"
  },
  {
    "library": "pandas",
    "name": "_get_dtype",
    "source_code": "def _get_dtype(arr_or_dtype) -> DtypeObj:\n    if arr_or_dtype is None:\n        raise TypeError('Cannot deduce dtype from null object')\n    if isinstance(arr_or_dtype, np.dtype):\n        return arr_or_dtype\n    elif isinstance(arr_or_dtype, type):\n        return np.dtype(arr_or_dtype)\n    elif hasattr(arr_or_dtype, 'dtype'):\n        arr_or_dtype = arr_or_dtype.dtype\n    return pandas_dtype(arr_or_dtype)",
    "docstring": "Get the dtype instance associated with an array or dtype object. Parameters ---------- arr_or_dtype : array-like or dtype The array-like or dtype object whose dtype we want to extract. Returns ------- obj_dtype : The extract dtype instance from the passed in array or dtype object. Raises ------ TypeError : The passed in object is None.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\common.py",
    "ast_data": "FunctionDef name:_get_dtype arg:arr_or_dtype arguments arg If Compare Raise Call If Call Return return:yes If Call Return return:yes Call If Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_in_multi_worker_mode",
    "source_code": "def _in_multi_worker_mode(self):\n    return False",
    "docstring": "Whether this strategy indicates working in multi-worker settings.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:_in_multi_worker_mode arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "apply_list_or_dict_like",
    "source_code": "def apply_list_or_dict_like(self) -> DataFrame | Series:\n    if self.engine == 'numba':\n        raise NotImplementedError(\"The 'numba' engine doesn't support list-like/dict likes of callables yet.\")\n    if self.axis == 1 and isinstance(self.obj, ABCDataFrame):\n        return self.obj.T.apply(self.func, 0, args=self.args, **self.kwargs).T\n    func = self.func\n    kwargs = self.kwargs\n    if is_dict_like(func):\n        result = self.agg_or_apply_dict_like(op_name='apply')\n    else:\n        result = self.agg_or_apply_list_like(op_name='apply')\n    result = reconstruct_and_relabel_result(result, func, **kwargs)\n    return result",
    "docstring": "Compute apply in case of a list-like or dict-like. Returns ------- result: Series, DataFrame, or None Result when self.func is a list-like or dict-like, None otherwise.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\apply.py",
    "ast_data": "FunctionDef name:apply_list_or_dict_like arg:self arguments arg If Compare Raise Call If BoolOp Compare Call Return return:yes Call Assign Assign If Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "NumbaUtilError",
    "source_code": "class NumbaUtilError(Exception):\n    pass",
    "docstring": "Error raised for unsupported Numba engine routines. See Also -------- DataFrame.groupby : Group DataFrame using a mapper or by a Series of columns. Series.groupby : Group Series using a mapper or by a Series of columns. DataFrame.agg : Aggregate using one or more operations over the specified axis. Series.agg : Aggregate using one or more operations over the specified axis. Examples -------- >>> df = pd.DataFrame( ... {\"key\": [\"a\", \"a\", \"b\", \"b\"], \"data\": [1, 2, 3, 4]}, columns=[\"key\", \"data\"] ... ) >>> def incorrect_function(x): ... return sum(x) * 2.7 >>> df.groupby(\"key\").agg(incorrect_function, engine=\"numba\") Traceback (most recent call last): NumbaUtilError: The first 2 arguments to incorrect_function must be ['values', 'index']",
    "type": "class",
    "file_path": "pandas\\pandas\\errors\\__init__.py",
    "ast_data": "ClassDef name:NumbaUtilError"
  },
  {
    "library": "tensorflow",
    "name": "TextLineReader",
    "source_code": "@tf_export(v1=['TextLineReader'])\nclass TextLineReader(ReaderBase):\n\n    @deprecation.deprecated(None, 'Queue-based input pipelines have been replaced by `tf.data`. Use `tf.data.TextLineDataset`.')\n    def __init__(self, skip_header_lines=None, name=None):\n        rr = gen_io_ops.text_line_reader_v2(skip_header_lines=skip_header_lines, name=name)\n        super(TextLineReader, self).__init__(rr)",
    "docstring": "A Reader that outputs the lines of a file delimited by newlines. Newlines are stripped from the output. See ReaderBase for supported methods. @compatibility(eager) Readers are not compatible with eager execution. Instead, please use to get data into your model. @end_compatibility",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\io_ops.py",
    "ast_data": "ClassDef name:TextLineReader FunctionDef name:__init__ arg:self arg:skip_header_lines arg:name arguments arg arg arg Assign Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "FuseHandler",
    "source_code": "class FuseHandler(ABC):\n\n    @abstractmethod\n    def __init__(self, node: Node):\n        pass\n\n    @abstractmethod\n    def fuse(self, load_arg: Callable, named_modules: dict[str, torch.nn.Module], fused_graph: Graph, root_node: Node, extra_inputs: list[Any], matched_node_pattern: NodePattern, fuse_custom_config: FuseCustomConfig, fuser_method_mapping: dict[Pattern, Union[torch.nn.Sequential, Callable]], is_qat: bool) -> Node:\n        pass",
    "docstring": "Base handler class for the fusion patterns",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\fuse_handler.py",
    "ast_data": "ClassDef name:FuseHandler FunctionDef name:__init__ arg:self arg:node arguments arg arg FunctionDef name:fuse arg:self arg:load_arg arg:named_modules arg:fused_graph arg:root_node arg:extra_inputs arg:matched_node_pattern arg:fuse_custom_config arg:fuser_method_mapping arg:is_qat arguments arg arg arg arg arg arg arg arg arg arg"
  },
  {
    "library": "pandas",
    "name": "_get_window_indexer",
    "source_code": "def _get_window_indexer(self) -> BaseIndexer:\n    return ExpandingIndexer()",
    "docstring": "Return an indexer class that will compute the window start and end bounds",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\window\\expanding.py",
    "ast_data": "FunctionDef name:_get_window_indexer arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_copy_trackable_to_cpu",
    "source_code": "def _copy_trackable_to_cpu(self, object_map):\n    del object_map\n    raise NotImplementedError('Need to implement _copy_trackable_to_cpu() if the Trackable requires AsyncCheckpoint support.')",
    "docstring": "Creates a copy of this object onto CPU, also copies values over. Needs to be overridden if the requires AsyncCheckpoint support. The method first checks whether a copy of is already created in , and creates one if not already created. Then the method copies the **values** of itself over to its copy mapped by . Args: object_map: A dictionary that maps original Trackables to the copied Trackables, which reside in the CPU.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\base.py",
    "ast_data": "FunctionDef name:_copy_trackable_to_cpu arg:self arg:object_map arguments arg arg Raise Call"
  },
  {
    "library": "seaborn",
    "name": "__init__",
    "source_code": "def __init__(self, variable: str | None=None):\n    if not variable:\n        variable = self.__class__.__name__.lower()\n    self.variable = variable",
    "docstring": "Initialize the property with the name of the corresponding plot variable.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\properties.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:variable arguments arg arg If Assign Call Assign"
  },
  {
    "library": "pandas",
    "name": "dispatch_ufunc_with_out",
    "source_code": "def dispatch_ufunc_with_out(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):\n    out = kwargs.pop('out')\n    where = kwargs.pop('where', None)\n    result = getattr(ufunc, method)(*inputs, **kwargs)\n    if result is NotImplemented:\n        return NotImplemented\n    if isinstance(result, tuple):\n        if not isinstance(out, tuple) or len(out) != len(result):\n            raise NotImplementedError\n        for arr, res in zip(out, result):\n            _assign_where(arr, res, where)\n        return out\n    if isinstance(out, tuple):\n        if len(out) == 1:\n            out = out[0]\n        else:\n            raise NotImplementedError\n    _assign_where(out, result, where)\n    return out",
    "docstring": "If we have an keyword, then call the ufunc without and then set the result into the given .",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\arraylike.py",
    "ast_data": "FunctionDef name:dispatch_ufunc_with_out arg:self arg:ufunc arg:method arguments arg arg arg arg arg Assign Call Assign Call Assign Call Call If Compare Return return:yes If Call If BoolOp Call Compare Call Call Raise For Call Call Return return:yes If Call If Compare Call Assign Raise Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "iterate_hypercube",
    "source_code": "def iterate_hypercube(self):\n    if self.disp:\n        logging.info('Constructing and refining simplicial complex graph structure')\n    if self.n is None:\n        self.HC.refine_all()\n        self.n_sampled = self.HC.V.size()\n    else:\n        self.HC.refine(self.n)\n        self.n_sampled += self.n\n    if self.disp:\n        logging.info('Triangulation completed, evaluating all constraints and objective function values.')\n    if len(self.LMC.xl_maps) > 0:\n        for xl in self.LMC.cache:\n            v = self.HC.V[xl]\n            v_near = v.star()\n            for v in v.nn:\n                v_near = v_near.union(v.nn)\n    self.HC.V.process_pools()\n    if self.disp:\n        logging.info('Evaluations completed.')\n    self.fn = self.HC.V.nfev\n    return",
    "docstring": "Iterate a subdivision of the complex Note: called with `` after class initiation",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_shgo.py",
    "ast_data": "FunctionDef name:iterate_hypercube arg:self arguments arg If Call If Compare Call Assign Call Call If Call If Compare Call For Assign Assign Call For Assign Call Call If Call Assign Return return:no"
  },
  {
    "library": "kornia",
    "name": "to_euler",
    "source_code": "def to_euler(self) -> Tuple[Tensor, Tensor, Tensor]:\n    return euler_from_quaternion(self.w, self.x, self.y, self.z)",
    "docstring": "Convert the quaternion to a triple of Euler angles (roll, pitch, yaw). Example: >>> q = Quaternion(tensor([2., 0., 1., 1.])) >>> roll, pitch, yaw = q.to_euler() >>> roll tensor(2.0344, grad_fn=) >>> pitch tensor(1.5708, grad_fn=) >>> yaw tensor(2.2143, grad_fn=)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\quaternion.py",
    "ast_data": "FunctionDef name:to_euler arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "storable",
    "source_code": "@property\ndef storable(self):\n    return self.group",
    "docstring": "return my storable",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:storable arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_as_graph_element",
    "source_code": "def _as_graph_element(self):\n    return self.tensor",
    "docstring": "Convert to a graph element.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\weak_tensor.py",
    "ast_data": "FunctionDef name:_as_graph_element arg:self arguments arg Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "json",
    "source_code": "def json(self) -> Any:\n    if self._cached_decoded_json is _NONE:\n        self._cached_decoded_json = json.loads(self.body)\n    return self._cached_decoded_json",
    "docstring": ".. versionadded:: 2.2 Deserialize a JSON document to a Python object.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\http\\response\\text.py",
    "ast_data": "FunctionDef name:json arg:self arguments arg If Compare Assign Call Return return:yes"
  },
  {
    "library": "pygame",
    "name": "__init__",
    "source_code": "def __init__(self, device=0, size=(640, 480), mode='RGB', show_video_window=0):\n    self.dev = vidcap.new_Dev(device, show_video_window)\n    width, height = size\n    self.dev.setresolution(width, height)",
    "docstring": "device: VideoCapture enumerates the available video capture devices on your system. If you have more than one device, specify the desired one here. The device number starts from 0. show_video_window: 0 ... do not display a video window (the default) 1 ... display a video window Mainly used for debugging, since the video window can not be closed or moved around.",
    "type": "method",
    "file_path": "pygame\\src_py\\_camera_vidcapture.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:device arg:size arg:mode arg:show_video_window arguments arg arg arg arg arg Assign Call Assign Call"
  },
  {
    "library": "pandas",
    "name": "is_list_like_indexer",
    "source_code": "def is_list_like_indexer(key) -> bool:\n    return is_list_like(key) and (not (isinstance(key, tuple) and type(key) is not tuple))",
    "docstring": "Check if we have a list-like indexer that is *not* a NamedTuple. Parameters ---------- key : object Returns ------- bool",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\indexers\\utils.py",
    "ast_data": "FunctionDef name:is_list_like_indexer arg:key arguments arg Return return:yes BoolOp Call BoolOp Call Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "outer_graph",
    "source_code": "@property\ndef outer_graph(self):\n    return self._outer_graph",
    "docstring": "The graph active when this _FuncGraph was created.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\function.py",
    "ast_data": "FunctionDef name:outer_graph arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "push",
    "source_code": "def push(self, o):\n    self._elements[self._pos + 1:] = [o]\n    self._pos = len(self._elements) - 1\n    return o",
    "docstring": "Push *o* to the stack after the current position, and return *o*. Discard all later elements.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\cbook.py",
    "ast_data": "FunctionDef name:push arg:self arg:o arguments arg arg Assign Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "Yuv422ToRgb",
    "source_code": "class Yuv422ToRgb(Module):\n    ONNX_EXPORTABLE = False\n\n    def forward(self, inputy: Tensor, inputuv: Tensor) -> Tensor:\n        return yuv422_to_rgb(inputy, inputuv)",
    "docstring": "Convert an image from YUV to RGB. Width must be evenly divisible by 2. The image data is assumed to be in the range of :math: for luma (Y). The ranges of U and V are :math: and :math:, respectively. YUV formula follows M/PAL values (see _, Table 2, items 2.5 and 2.6). Returns: RGB version of the image. Shape: - imagey: :math: - imageuv: :math: - output: :math: Examples: >>> inputy = torch.rand(2, 1, 4, 6) >>> inputuv = torch.rand(2, 2, 4, 3) >>> rgb = Yuv422ToRgb() >>> output = rgb(inputy, inputuv) # 2x3x4x6",
    "type": "class",
    "file_path": "kornia\\kornia\\color\\yuv.py",
    "ast_data": "ClassDef name:Yuv422ToRgb Assign FunctionDef name:forward arg:self arg:inputy arg:inputuv arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "model_variables",
    "source_code": "@tf_export(v1=['model_variables'])\ndef model_variables(scope=None):\n    return ops.get_collection(ops.GraphKeys.MODEL_VARIABLES, scope)",
    "docstring": "Returns all variables in the MODEL_VARIABLES collection. Args: scope: (Optional.) A string. If supplied, the resulting list is filtered to include only items whose attribute matches using . Items without a attribute are never returned if a scope is supplied. The choice of means that a without special tokens filters by prefix. Returns: A list of local Variable objects.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:model_variables arg:scope arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "on_session_init",
    "source_code": "def on_session_init(self, request):\n    return OnSessionInitResponse(OnSessionInitAction.PROCEED)",
    "docstring": "See doc of BaseDebugWrapperSession.on_run_start.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\framework.py",
    "ast_data": "FunctionDef name:on_session_init arg:self arg:request arguments arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_validate_remainder",
    "source_code": "def _validate_remainder(self, X):\n    cols = set(chain(*self._transformer_to_input_indices.values()))\n    remaining = sorted(set(range(self.n_features_in_)) - cols)\n    self._transformer_to_input_indices['remainder'] = remaining\n    remainder_cols = self._get_remainder_cols(remaining)\n    self._remainder = ('remainder', self.remainder, remainder_cols)",
    "docstring": "Validates `` targeting the remaining columns.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\compose\\_column_transformer.py",
    "ast_data": "FunctionDef name:_validate_remainder arg:self arg:X arguments arg arg Assign Call Call Call Assign Call Call Call Assign Assign Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "get_name_char",
    "source_code": "def get_name_char(self, c, isord=False):\n    if not isord:\n        c = ord(c)\n    return self._metrics[c].name",
    "docstring": "Get the name of the character, i.e., ';' is 'semicolon'.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_afm.py",
    "ast_data": "FunctionDef name:get_name_char arg:self arg:c arg:isord arguments arg arg arg If Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_reconstruct_dtensor",
    "source_code": "def _reconstruct_dtensor(module: nn.Module, _input: Any):\n    param_list = []\n    for name, t in module.named_parameters():\n        if hasattr(t, '_st_info'):\n            dtensor = _unflatten_tensor(t, t._st_info)\n            param_list.append((*_get_submodule_n_params(module, name), dtensor))\n    _update_module_param(param_list)",
    "docstring": "Recontruct DTensor parameters from local tensors",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\parallel\\ddp.py",
    "ast_data": "FunctionDef name:_reconstruct_dtensor arg:module arg:_input arguments arg arg Assign For Call If Call Assign Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_is_aligned",
    "source_code": "def _is_aligned(v: sympy.Expr) -> bool:\n    if isinstance(v, (sympy.Add, sympy.Max)):\n        return all(map(_is_aligned, v.args))\n    return isinstance(v, align) or sympy.gcd(v, ALIGN_BYTES) == ALIGN_BYTES",
    "docstring": "v can be statically proven to be a multiple of ALIGN_BYTES",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\utils.py",
    "ast_data": "FunctionDef name:_is_aligned arg:v arguments arg If Call Return return:yes Call Call Return return:yes BoolOp Call Compare Call"
  },
  {
    "library": "pytorch",
    "name": "_get_buffer",
    "source_code": "def _get_buffer(self, node: ir.IRNode) -> CodegenBuffer:\n    if isinstance(node, (ir.Buffer, WorkspaceArg)):\n        return node\n    elif isinstance(node, (ir.BaseView, ir.MutableBox)):\n        return self._get_buffer(node.data)\n    elif isinstance(node, sympy.Symbol):\n        return SymbolBuffer(node)\n    else:\n        raise NotImplementedError(f'Unable to extract buffer from node: {node}')",
    "docstring": "Extract buffer data from an IR node.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\wrapper_fxir.py",
    "ast_data": "FunctionDef name:_get_buffer arg:self arg:node arguments arg arg If Call Return return:yes If Call Return return:yes Call If Call Return return:yes Call Raise Call"
  },
  {
    "library": "scrapy",
    "name": "BrowserLikeContextFactory",
    "source_code": "@implementer(IPolicyForHTTPS)\nclass BrowserLikeContextFactory(ScrapyClientContextFactory):\n\n    def creatorForNetloc(self, hostname: bytes, port: int) -> ClientTLSOptions:\n        return optionsForClientTLS(hostname=hostname.decode('ascii'), trustRoot=platformTrust(), extraCertificateOptions={'method': self._ssl_method})",
    "docstring": "Twisted-recommended context factory for web clients. Quoting the documentation of the :class: class: The default is to use a :class:, so unless you have special requirements you can leave this as-is. :meth: is the same as :class: except this context factory allows setting the TLS/SSL method to use. The default OpenSSL method is ``) which allows TLS protocol negotiation.",
    "type": "class",
    "file_path": "scrapy\\scrapy\\core\\downloader\\contextfactory.py",
    "ast_data": "ClassDef name:BrowserLikeContextFactory FunctionDef name:creatorForNetloc arg:self arg:hostname arg:port arguments arg arg arg Return return:yes Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_dump_csv",
    "source_code": "def _dump_csv(self, filename):\n    with open(filename, 'w', newline='') as csvfile:\n        writer = csv.writer(csvfile)\n        for rank in self.pipeline_order:\n            writer.writerow(self.pipeline_order[rank])",
    "docstring": "Dump a CSV representation of the schedule into a file with the provided filename.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\schedules.py",
    "ast_data": "FunctionDef name:_dump_csv arg:self arg:filename arguments arg arg With Call Assign Call For Call"
  },
  {
    "library": "tensorflow",
    "name": "get_value",
    "source_code": "@abc.abstractmethod\ndef get_value(self, name=None):\n    raise NotImplementedError('Optional.get_value()')",
    "docstring": "Returns the value wrapped by this optional. If this optional does not have a value (i.e. evaluates to ), this operation will raise at runtime. >>> optional = tf.experimental.Optional.from_value(42) >>> print(optional.get_value()) tf.Tensor(42, shape=(), dtype=int32) Args: name: (Optional.) A name for the created operation. Returns: The wrapped value.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\optional_ops.py",
    "ast_data": "FunctionDef name:get_value arg:self arg:name arguments arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "put",
    "source_code": "def put(self, key, vals, indices=None, name=None):\n    with ops.name_scope(name, '%s_put' % self._name, self._scope_vals(vals)) as scope:\n        vals, indices = self._check_put_dtypes(vals, indices)\n        with ops.colocate_with(self._coloc_op):\n            op = self._put_fn(key, indices, vals, dtypes=self._dtypes, shared_name=self._name, name=scope, capacity=self._capacity, memory_limit=self._memory_limit)\n    return op",
    "docstring": "Create an op that stores the (key, vals) pair in the staging area. Incomplete puts are possible, preferably using a dictionary for vals as the appropriate dtypes and shapes can be inferred from the value names dictionary key values. If vals is a list or tuple, indices must also be specified so that the op knows at which element position to perform the insert. This operation will block if the capacity or memory limit of this container is reached. Args: key: Key associated with the data vals: Tensor (or a dict/tuple of Tensors) to place into the staging area. indices: (Optional) if vals is a tuple/list, this is required. name: A name for the operation (optional) Returns: The created op Raises: ValueError: If the number or type of inputs don't match the staging area.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:put arg:self arg:key arg:vals arg:indices arg:name arguments arg arg arg arg arg With Call Call Assign Call With Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "saveable",
    "source_code": "@property\ndef saveable(self):\n    return self._saveable",
    "docstring": "Returns whether this FuncGraph is saveable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\func_graph.py",
    "ast_data": "FunctionDef name:saveable arg:self arguments arg Return return:yes"
  },
  {
    "library": "pygame",
    "name": "do",
    "source_code": "def do(self, f, *args, **kwArgs):\n    self.queue.put((f, args, kwArgs))",
    "docstring": "puts a function on a queue for running later.",
    "type": "method",
    "file_path": "pygame\\src_py\\threads\\__init__.py",
    "ast_data": "FunctionDef name:do arg:self arg:f arguments arg arg arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "set_initialized",
    "source_code": "def set_initialized(value):\n    global _INITIALIZED_ACCELERATOR_SYSTEM_TYPE\n    _INITIALIZED_ACCELERATOR_SYSTEM_TYPE = value",
    "docstring": "Sets if accelerator system has been initialized.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\accelerator_util.py",
    "ast_data": "FunctionDef name:set_initialized arg:value arguments arg Assign"
  },
  {
    "library": "pytorch",
    "name": "_prepare_exported_program_for_export",
    "source_code": "def _prepare_exported_program_for_export(exported_program: torch.export.ExportedProgram, *, registry: _registration.ONNXRegistry) -> torch.export.ExportedProgram:\n    exported_program = _fx_passes.decompose_with_registry(exported_program, registry)\n    graph_module = exported_program.graph_module\n    _fx_passes.insert_type_promotion_nodes(graph_module)\n    graph_module = _fx_passes.remove_assertion_nodes(graph_module)\n    exported_program._graph_module = graph_module\n    return exported_program",
    "docstring": "Decompose and apply pre-export transformations to the exported program.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_core.py",
    "ast_data": "FunctionDef name:_prepare_exported_program_for_export arg:exported_program arguments arg arg Assign Call Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "authlib",
    "name": "create_exists_nonce_func",
    "source_code": "def create_exists_nonce_func(cache, key_prefix='nonce:', expires=86400):\n\n    def exists_nonce(nonce, timestamp, client_id, oauth_token):\n        key = f'{key_prefix}{nonce}-{timestamp}-{client_id}'\n        if oauth_token:\n            key = f'{key}-{oauth_token}'\n        rv = cache.has(key)\n        cache.set(key, 1, timeout=expires)\n        return rv\n    return exists_nonce",
    "docstring": "Create an `` function that can be used in hooks and resource protector. :param cache: Cache instance :param key_prefix: key prefix for temporary credential :param expires: Expire time for nonce",
    "type": "function",
    "file_path": "authlib\\authlib\\integrations\\flask_oauth1\\cache.py",
    "ast_data": "FunctionDef name:create_exists_nonce_func arg:cache arg:key_prefix arg:expires arguments arg arg arg FunctionDef name:exists_nonce arg:nonce arg:timestamp arg:client_id arg:oauth_token arguments arg arg arg arg Assign If Assign Assign Call Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "node_def",
    "source_code": "@property\ndef node_def(self):\n    return self._node_def",
    "docstring": "The proto representing the op that failed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py",
    "ast_data": "FunctionDef name:node_def arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ConvReLU3d",
    "source_code": "class ConvReLU3d(_FusedModule):\n\n    def __init__(self, conv, relu):\n        assert type_before_parametrizations(conv) == Conv3d and type_before_parametrizations(relu) == ReLU, f'Incorrect types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(relu)}'\n        super().__init__(conv, relu)",
    "docstring": "This is a sequential container which calls the Conv3d and ReLU modules. During quantization this will be replaced with the corresponding fused module.",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\nn\\intrinsic\\modules\\fused.py",
    "ast_data": "ClassDef name:ConvReLU3d FunctionDef name:__init__ arg:self arg:conv arg:relu arguments arg arg arg BoolOp Compare Call Compare Call Call Call Call Call"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, bus, frequency=1, match='.*'):\n    self.mtimes = {}\n    self.files = set()\n    self.match = match\n    Monitor.__init__(self, bus, self.run, frequency)",
    "docstring": "Initialize the auto-reloader monitor plugin.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\plugins.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:bus arg:frequency arg:match arguments arg arg arg arg Assign Assign Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "_get_folded_quantized_qat_conv_bn_pattern",
    "source_code": "def _get_folded_quantized_qat_conv_bn_pattern(is_per_channel: bool, has_bias: bool, bias_is_quantized: bool, conv_fn: Callable, bn_is_training: bool) -> Callable:\n    bn_eps = 1e-05\n\n    def _folded_quantized_qat_conv_bn_pattern(x: torch.Tensor, conv_weight: torch.Tensor, bn_weight: torch.Tensor, bn_bias: torch.Tensor, bn_running_mean: torch.Tensor, bn_running_var: torch.Tensor, **kwargs) -> torch.Tensor:\n        conv_weight = _append_qdq(conv_weight, is_per_channel, is_bias=False, kwargs=kwargs)\n        if has_bias:\n            bias = kwargs['conv_bias']\n            if bias_is_quantized:\n                bias = _append_qdq(bias, is_per_channel, is_bias=True, kwargs=kwargs)\n        else:\n            bias = None\n        x = conv_fn(x, conv_weight, bias)\n        x = F.batch_norm(x, bn_running_mean, bn_running_var, bn_weight, bn_bias, training=bn_is_training, eps=bn_eps)\n        return x\n    return _WrapperModule(_folded_quantized_qat_conv_bn_pattern)",
    "docstring": "Quantized QAT conv - bn pattern with bn weights being folded into conv.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\qat_utils.py",
    "ast_data": "FunctionDef name:_get_folded_quantized_qat_conv_bn_pattern arg:is_per_channel arg:has_bias arg:bias_is_quantized arg:conv_fn arg:bn_is_training arguments arg arg arg arg arg Assign FunctionDef name:_folded_quantized_qat_conv_bn_pattern arg:x arg:conv_weight arg:bn_weight arg:bn_bias arg:bn_running_mean arg:bn_running_var arguments arg arg arg arg arg arg arg Assign Call If Assign If Assign Call Assign Assign Call Assign Call Return return:yes Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "is_nested_object",
    "source_code": "def is_nested_object(obj) -> bool:\n    return bool(isinstance(obj, ABCSeries) and is_object_dtype(obj.dtype) and any((isinstance(v, ABCSeries) for v in obj._values)))",
    "docstring": "return a boolean if we have a nested object, e.g. a Series with 1 or more Series elements This may not be necessarily be performant.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\cast.py",
    "ast_data": "FunctionDef name:is_nested_object arg:obj arguments arg Return return:yes Call BoolOp Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "size",
    "source_code": "def size(self, name=None):\n    raise NotImplementedError",
    "docstring": "Compute the number of elements in this table.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "FunctionDef name:size arg:self arg:name arguments arg arg Raise"
  },
  {
    "library": "pytorch",
    "name": "FlatParamShardMetadata",
    "source_code": "class FlatParamShardMetadata(NamedTuple):\n    param_names: tuple[str, ...]\n    param_shapes: tuple[torch.Size, ...]\n    param_strides: tuple[tuple[int, ...], ...]\n    param_contiguities: tuple[bool, ...]\n    param_numels: tuple[int, ...]\n    param_offsets: tuple[tuple[int, int], ...]",
    "docstring": "This holds metadata specific to this rank's shard of the flat parameter. Attributes: param_names (Tuple[str, ...]): Prefixed parameter names of this rank's shard of the parameters; see :class:. param_shapes (Tuple[torch.Size, ...]): Parameter shapes of this rank's shard of the parameters; see :class:. param_strides (Tuple[torch.Size, ...]): Parameter strides of this rank's shard of the parameters; see :class:. param_contiguities (Tuple[bool, ...]): Parameter call results of this rank's shard of the parameters; see :class:. param_numels (Tuple[int, ...]): Parameter numels of this rank's shard of the parameters; see :class:. param_offsets (Tuple[Tuple[int, int], ...]): [start, end] offsets (in units of numels) giving this rank's part of each flattened original parameter.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py",
    "ast_data": "ClassDef name:FlatParamShardMetadata"
  },
  {
    "library": "matplotlib",
    "name": "disable_mouse_rotation",
    "source_code": "def disable_mouse_rotation(self):\n    self.mouse_init(rotate_btn=[], pan_btn=[], zoom_btn=[])",
    "docstring": "Disable mouse buttons for 3D rotation, panning, and zooming.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:disable_mouse_rotation arg:self arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "dequantize_per_tensor",
    "source_code": "@impl(quantized_decomposed_lib, 'dequantize_per_tensor', 'CompositeExplicitAutograd')\ndef dequantize_per_tensor(input: torch.Tensor, scale: float, zero_point: int, quant_min: int, quant_max: int, dtype: torch.dtype, *, out_dtype: Optional[torch.dtype]=None) -> torch.Tensor:\n    assert input.dtype == dtype, f'Expecting input to have dtype: {dtype}, but got {input.dtype}'\n    if out_dtype is None:\n        out_dtype = torch.float32\n    if dtype in _DTYPE_TO_QVALUE_BOUNDS:\n        return (input.to(out_dtype) - zero_point) * scale\n    else:\n        raise ValueError(f'Unsupported dtype in dequantize_per_tensor: {dtype}')",
    "docstring": "Affine dequantization for the Tensor using the same quantization parameters to map from quantized values to floating point values Args: input (torch.Tensor): Tensor with dtype matching argument, e.g. (), it is a per tensor quantized Tensor if combined with quantization parameters in the argument of this function (scale/zero_point) scale (float): quantization parameter for affine quantization zero_point (int): quantization parameter for affine quantization quant_min (int): minimum quantized value for input Tensor (not used in computation, reserved for pattern matching) quant_max (int): maximum quantized value for input Tensor (not used in computation, reserved for pattern matching) dtype (torch.dtype): dtype for input Tensor (not used in computation, reserved for pattern matching) out_dtype (torch.dtype?): optional dtype for output Tensor Returns: dequantized float32 Tensor",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_decomposed.py",
    "ast_data": "FunctionDef name:dequantize_per_tensor arg:input arg:scale arg:zero_point arg:quant_min arg:quant_max arg:dtype arguments arg arg arg arg arg arg arg Compare If Compare Assign If Compare Return return:yes Call Raise Call Call"
  },
  {
    "library": "django",
    "name": "quote_name_unless_alias",
    "source_code": "def quote_name_unless_alias(self, name):\n    if name in self.quote_cache:\n        return self.quote_cache[name]\n    if name in self.query.alias_map and name not in self.query.table_map or name in self.query.extra_select or (self.query.external_aliases.get(name) and name not in self.query.table_map):\n        self.quote_cache[name] = name\n        return name\n    r = self.connection.ops.quote_name(name)\n    self.quote_cache[name] = r\n    return r",
    "docstring": "A wrapper around connection.ops.quote_name that doesn't quote aliases for table names. This avoids problems with some SQL dialects that treat quoted strings specially (e.g. PostgreSQL).",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\compiler.py",
    "ast_data": "FunctionDef name:quote_name_unless_alias arg:self arg:name arguments arg arg If Compare Return return:yes If BoolOp BoolOp Compare Compare Compare BoolOp Call Compare Assign Return return:yes Assign Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "savefig",
    "source_code": "def savefig(self, figure=None, **kwargs):\n    if not isinstance(figure, Figure):\n        if figure is None:\n            manager = Gcf.get_active()\n        else:\n            manager = Gcf.get_fig_manager(figure)\n        if manager is None:\n            raise ValueError(f'No figure {figure}')\n        figure = manager.canvas.figure\n    width, height = figure.get_size_inches()\n    if self._n_figures == 0:\n        self._write_header(width, height)\n    else:\n        self._file.write(b'\\\\newpage\\\\ifdefined\\\\pdfpagewidth\\\\pdfpagewidth\\\\else\\\\pagewidth\\\\fi=%fin\\\\ifdefined\\\\pdfpageheight\\\\pdfpageheight\\\\else\\\\pageheight\\\\fi=%fin%%\\n' % (width, height))\n    figure.savefig(self._file, format='pgf', backend='pgf', **kwargs)\n    self._n_figures += 1",
    "docstring": "Save a to this file as a new page. Any other keyword arguments are passed to . Parameters ---------- figure : or int, default: the active figure The figure, or index of the figure, that is saved to the file.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pgf.py",
    "ast_data": "FunctionDef name:savefig arg:self arg:figure arguments arg arg arg If Call If Compare Assign Call Assign Call If Compare Raise Call Assign Assign Call If Compare Call Call Call"
  },
  {
    "library": "django",
    "name": "multiple_chunks",
    "source_code": "def multiple_chunks(self, chunk_size=None):\n    return self.size > (chunk_size or self.DEFAULT_CHUNK_SIZE)",
    "docstring": "Return `` -- there's no good reason to read from memory in chunks.",
    "type": "method",
    "file_path": "django\\django\\core\\files\\base.py",
    "ast_data": "FunctionDef name:multiple_chunks arg:self arg:chunk_size arguments arg arg Return return:yes Compare BoolOp"
  },
  {
    "library": "tensorflow",
    "name": "_get_dequantized_hist_mids_after_quantize",
    "source_code": "def _get_dequantized_hist_mids_after_quantize(self, quant_min: float, quant_max: float) -> np.ndarray:\n    maxbound = 2 ** self._num_bits - 1\n    minbound = 0\n    scale = (quant_max - quant_min) / maxbound\n    zero_point = -quant_min / scale\n    if abs(zero_point) > 9000000000.0:\n        zero_point = 9000000000.0\n    if abs(scale) < 1e-09:\n        scale = 1e-09\n    zero_point = round(zero_point)\n    quantized_hist_mids = np.clip(np.round(self._hist_mids / scale) + zero_point, minbound, maxbound)\n    dequantized_hist_mids = scale * (quantized_hist_mids - zero_point)\n    return dequantized_hist_mids",
    "docstring": "Quantizes and dequantizes hist_mids using quant_min and quant_max. Quantization converts the range of numbers from [quant_min, quant_max] to [0, 2^num_bits - 1]. Values less than quant_min are converted to 0, and values greater than quant_max are converted to 2^num_bits - 1. The histogram represents the distribution of the data, and our goal is to find the quant_min and quant_max that best describe this distribution. To do this, we quantize hist_mids using quant_min and quant_max and dequantize them again. Then the difference between hist_mids and dequantized hist_mids equates to quantization error when using quant_min and quant_max. Args: quant_min: The minimum real value that can be represented by a quantized value. quant_max: The maximum real value that can be represented by a quantized value. Returns: dequantized hist_mids after quantizing by quant_min and quant_max",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\calibrator\\calibration_algorithm.py",
    "ast_data": "FunctionDef name:_get_dequantized_hist_mids_after_quantize arg:self arg:quant_min arg:quant_max arguments arg arg arg Assign Assign Assign Assign If Compare Call Assign If Compare Call Assign Assign Call Assign Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "Tensor",
    "source_code": "@tf_export('__internal__.types.Tensor', v1=[])\nclass Tensor(object):\n\n    @property\n    def dtype(self):\n        pass\n\n    @property\n    def shape(self):\n        pass",
    "docstring": "The base class of all dense Tensor objects. A dense tensor has a static data type (dtype), and may have a static rank and shape. Tensor objects are immutable. Mutable objects may be backed by a Tensor which holds the unique handle that identifies the mutable object.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\types\\core.py",
    "ast_data": "ClassDef name:Tensor FunctionDef name:dtype arg:self arguments arg FunctionDef name:shape arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "get_shape_and_type",
    "source_code": "def get_shape_and_type(matrix):\n    handle_data = getattr(matrix, '_handle_data', None)\n    if handle_data is None:\n        return None\n    if len(handle_data.shape_and_type) != 1:\n        raise ValueError('shape_and_type array in _handle_data must have length one, but saw: %d' % len(handle_data.shape_and_type))\n    return handle_data.shape_and_type[0]",
    "docstring": "Return matrix's shape and type if available.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\sparse\\sparse_csr_matrix_ops.py",
    "ast_data": "FunctionDef name:get_shape_and_type arg:matrix arguments arg Assign Call If Compare Return return:no If Compare Call Raise Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_hatch_linewidth",
    "source_code": "def get_hatch_linewidth(self):\n    return self._hatch_linewidth",
    "docstring": "Get the hatch linewidth.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:get_hatch_linewidth arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_validate_encoding",
    "source_code": "@final\ndef _validate_encoding(self) -> None:\n    codecs.lookup(self.encoding)",
    "docstring": "Validate encoding. This method will check if encoding is among listed under codecs. Raises ------ LookupError * If encoding is not available in codecs.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\xml.py",
    "ast_data": "FunctionDef name:_validate_encoding arg:self arguments arg Call"
  },
  {
    "library": "scipy",
    "name": "_var",
    "source_code": "def _var(self, dim, df, scale):\n    if df > dim + 3:\n        var = (df - dim + 1) * scale ** 2\n        diag = scale.diagonal()\n        var += (df - dim - 1) * np.outer(diag, diag)\n        var /= (df - dim) * (df - dim - 1) ** 2 * (df - dim - 3)\n    else:\n        var = None\n    return var",
    "docstring": "Variance of the inverse Wishart distribution. Parameters ---------- dim : int Dimension of the scale matrix %(_doc_default_callparams)s Notes ----- As this function does no argument checking, it should not be called directly; use 'var' instead.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:_var arg:self arg:dim arg:df arg:scale arguments arg arg arg arg If Compare Assign Assign Call Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "make_valid",
    "source_code": "def make_valid(self):\n    return GEOSGeometry(capi.geos_makevalid(self.ptr), srid=self.srid)",
    "docstring": "Attempt to create a valid representation of a given invalid geometry without losing any of the input vertices.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:make_valid arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "patch",
    "source_code": "def patch(self) -> _VirtualizedSerializerContextManager:\n    return _VirtualizedSerializerContextManager(self)",
    "docstring": "Returns a context manager which patches the saved values into the current environment. While patched, any value not listed above will be poisoned so that reads will raise an error.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\compile_fx_ext.py",
    "ast_data": "FunctionDef name:patch arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "forward_shape",
    "source_code": "def forward_shape(self, shape):\n    return shape",
    "docstring": "Infers the shape of the forward computation, given the input shape. Defaults to preserving shape.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributions\\transforms.py",
    "ast_data": "FunctionDef name:forward_shape arg:self arg:shape arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_select_class_id",
    "source_code": "def _select_class_id(ids, selected_id):\n    ids = sparse_tensor.convert_to_tensor_or_sparse_tensor(ids)\n    if isinstance(ids, sparse_tensor.SparseTensor):\n        return sparse_ops.sparse_retain(ids, math_ops.equal(ids.values, selected_id))\n    ids_shape = array_ops.shape(ids, out_type=dtypes.int64)\n    ids_last_dim = array_ops.size(ids_shape) - 1\n    filled_selected_id_shape = math_ops.reduced_shape(ids_shape, array_ops.reshape(ids_last_dim, [1]))\n    filled_selected_id = array_ops.fill(filled_selected_id_shape, math_ops.cast(selected_id, dtypes.int64))\n    result = sets.set_intersection(filled_selected_id, ids)\n    return sparse_tensor.SparseTensor(indices=result.indices, values=result.values, dense_shape=ids_shape)",
    "docstring": "Filter all but out of . Args: ids: or of IDs. selected_id: Int id to select. Returns: of same dimensions as . This contains only the entries equal to .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\metrics_impl.py",
    "ast_data": "FunctionDef name:_select_class_id arg:ids arg:selected_id arguments arg arg Assign Call If Call Return return:yes Call Call Assign Call Assign Call Assign Call Call Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_draw_paths_with_artist_properties",
    "source_code": "def _draw_paths_with_artist_properties(self, renderer, draw_path_args_list):\n    renderer.open_group('patch', self.get_gid())\n    gc = renderer.new_gc()\n    gc.set_foreground(self._edgecolor, isRGBA=True)\n    lw = self._linewidth\n    if self._edgecolor[3] == 0 or self._linestyle == 'None':\n        lw = 0\n    gc.set_linewidth(lw)\n    gc.set_dashes(*self._dash_pattern)\n    gc.set_capstyle(self._capstyle)\n    gc.set_joinstyle(self._joinstyle)\n    gc.set_antialiased(self._antialiased)\n    self._set_gc_clip(gc)\n    gc.set_url(self._url)\n    gc.set_snap(self.get_snap())\n    gc.set_alpha(self._alpha)\n    if self._hatch:\n        gc.set_hatch(self._hatch)\n        gc.set_hatch_color(self.get_hatchcolor())\n        gc.set_hatch_linewidth(self._hatch_linewidth)\n    if self.get_sketch_params() is not None:\n        gc.set_sketch_params(*self.get_sketch_params())\n    if self.get_path_effects():\n        from matplotlib.patheffects import PathEffectRenderer\n        renderer = PathEffectRenderer(self.get_path_effects(), renderer)\n    for draw_path_args in draw_path_args_list:\n        renderer.draw_path(gc, *draw_path_args)\n    gc.restore()\n    renderer.close_group('patch')\n    self.stale = False",
    "docstring": "`FancyArrowPatch` for each tuple *draw_path_args* in *draw_path_args_list*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:_draw_paths_with_artist_properties arg:self arg:renderer arg:draw_path_args_list arguments arg arg arg Call Call Assign Call Call Assign If BoolOp Compare Compare Assign Call Call Call Call Call Call Call Call Call Call If Call Call Call Call If Compare Call Call Call If Call Assign Call Call For Call Call Call Assign"
  },
  {
    "library": "django",
    "name": "BaseTable",
    "source_code": "class BaseTable:\n    join_type = None\n    parent_alias = None\n    filtered_relation = None\n\n    def __init__(self, table_name, alias):\n        self.table_name = table_name\n        self.table_alias = alias\n\n    def as_sql(self, compiler, connection):\n        alias_str = '' if self.table_alias == self.table_name else ' %s' % self.table_alias\n        base_sql = compiler.quote_name_unless_alias(self.table_name)\n        return (base_sql + alias_str, [])\n\n    def relabeled_clone(self, change_map):\n        return self.__class__(self.table_name, change_map.get(self.table_alias, self.table_alias))\n\n    @property\n    def identity(self):\n        return (self.__class__, self.table_name, self.table_alias)\n\n    def __eq__(self, other):\n        if not isinstance(other, BaseTable):\n            return NotImplemented\n        return self.identity == other.identity\n\n    def __hash__(self):\n        return hash(self.identity)",
    "docstring": "The BaseTable class is used for base table references in FROM clause. For example, the SQL \"foo\" in SELECT * FROM \"foo\" WHERE somecond could be generated by this class.",
    "type": "class",
    "file_path": "django\\django\\db\\models\\sql\\datastructures.py",
    "ast_data": "ClassDef name:BaseTable Assign Assign Assign FunctionDef name:__init__ arg:self arg:table_name arg:alias arguments arg arg arg Assign Assign FunctionDef name:as_sql arg:self arg:compiler arg:connection arguments arg arg arg Assign Compare Assign Call Return return:yes FunctionDef name:relabeled_clone arg:self arg:change_map arguments arg arg Return return:yes Call Call FunctionDef name:identity arg:self arguments arg Return return:yes FunctionDef name:__eq__ arg:self arg:other arguments arg arg If Call Return return:yes Return return:yes Compare FunctionDef name:__hash__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "or_masks",
    "source_code": "def or_masks(*mask_mods: _mask_mod_signature) -> _mask_mod_signature:\n    if not all((callable(arg) for arg in mask_mods)):\n        raise RuntimeError(f'All inputs should be callable mask_mods: {mask_mods}')\n\n    def or_mask(b, h, q_idx, kv_idx):\n        result = b.new_zeros((), dtype=torch.bool)\n        for mask in mask_mods:\n            result = result | mask(b, h, q_idx, kv_idx)\n        return result\n    return or_mask",
    "docstring": "Returns a mask_mod that's the union of provided mask_mods",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\attention\\flex_attention.py",
    "ast_data": "FunctionDef name:or_masks arguments arg If Call Call Raise Call FunctionDef name:or_mask arg:b arg:h arg:q_idx arg:kv_idx arguments arg arg arg arg Assign Call For Assign Call Return return:yes Return return:yes"
  },
  {
    "library": "kornia",
    "name": "add_metadata",
    "source_code": "def add_metadata(onnx_model: onnx.ModelProto, additional_metadata: Optional[list[tuple[str, str]]]=None) -> onnx.ModelProto:\n    if additional_metadata is None:\n        additional_metadata = []\n    for key, value in [('source', 'kornia'), ('version', kornia.__version__), *additional_metadata]:\n        metadata_props = onnx_model.metadata_props.add()\n        metadata_props.key = key\n        metadata_props.value = str(value)\n    return onnx_model",
    "docstring": "Add metadata to an ONNX model. The metadata includes the source library (set to \"kornia\"), the version of kornia, and any additional metadata provided as a list of key-value pairs. Args: onnx_model: The ONNX model to add metadata to. additional_metadata: A list of tuples, where each tuple contains a key and a value for the additional metadata to add to the ONNX model. Returns: The ONNX model with the added metadata.",
    "type": "function",
    "file_path": "kornia\\kornia\\onnx\\utils.py",
    "ast_data": "FunctionDef name:add_metadata arg:onnx_model arg:additional_metadata arguments arg arg If Compare Assign For Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "normalize_prefetch_lookups",
    "source_code": "def normalize_prefetch_lookups(lookups, prefix=None):\n    ret = []\n    for lookup in lookups:\n        if not isinstance(lookup, Prefetch):\n            lookup = Prefetch(lookup)\n        if prefix:\n            lookup.add_prefix(prefix)\n        ret.append(lookup)\n    return ret",
    "docstring": "Normalize lookups into Prefetch objects.",
    "type": "function",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:normalize_prefetch_lookups arg:lookups arg:prefix arguments arg arg Assign For If Call Assign Call If Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_fr1",
    "source_code": "def _fr1(a):\n    if a.size == 1:\n        a = a.copy()\n        a.shape = ()\n    return a",
    "docstring": "fix rank > 0 --> rank-0",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\getlimits.py",
    "ast_data": "FunctionDef name:_fr1 arg:a arguments arg If Compare Assign Call Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "__init__",
    "source_code": "def __init__(self, image_size: ImageSize, params: Tensor) -> None:\n    if params.shape[-1] != 8 or len(params.shape) > 2:\n        raise ValueError('params must be of shape B, 8 for KANNALA_BRANDT_K3 Camera')\n    super().__init__(KannalaBrandtK3Transform(), Z1Projection(), image_size, params)",
    "docstring": "Construct KannalaBrandtK3 class. Args: image_size: Image size params: Camera parameters of shape :math: of the form :math:.",
    "type": "method",
    "file_path": "kornia\\kornia\\sensors\\camera\\camera_model.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:image_size arg:params arguments arg arg arg If BoolOp Compare Compare Call Raise Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "assert_proper_iterable",
    "source_code": "@tf_export('debugging.assert_proper_iterable', v1=['debugging.assert_proper_iterable', 'assert_proper_iterable'])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints('assert_proper_iterable')\ndef assert_proper_iterable(values):\n    unintentional_iterables = (tensor_lib.Tensor, sparse_tensor.SparseTensor, np.ndarray) + compat.bytes_or_text_types\n    if isinstance(values, unintentional_iterables):\n        raise TypeError('Expected argument \"values\" to be a \"proper\" iterable.  Found: %s' % type(values))\n    if not hasattr(values, '__iter__'):\n        raise TypeError('Expected argument \"values\" to be iterable.  Found: %s' % type(values))",
    "docstring": "Static assert that values is a \"proper\" iterable. that expect iterables of can call this to validate input. Useful since , , byte/text type are all iterables themselves. Args: values: Object to be checked. Raises: TypeError: If is not iterable or is one of , , , .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\check_ops.py",
    "ast_data": "FunctionDef name:assert_proper_iterable arg:values arguments arg Assign If Call Raise Call Call If Call Raise Call Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_insert_error_scores",
    "source_code": "def _insert_error_scores(results, error_score):\n    successful_score = None\n    failed_indices = []\n    for i, result in enumerate(results):\n        if result['fit_error'] is not None:\n            failed_indices.append(i)\n        elif successful_score is None:\n            successful_score = result['test_scores']\n    if isinstance(successful_score, dict):\n        formatted_error = {name: error_score for name in successful_score}\n        for i in failed_indices:\n            results[i]['test_scores'] = formatted_error.copy()\n            if 'train_scores' in results[i]:\n                results[i]['train_scores'] = formatted_error.copy()",
    "docstring": "Insert error in by replacing them inplace with . This only applies to multimetric scores because will handle the single metric case.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_validation.py",
    "ast_data": "FunctionDef name:_insert_error_scores arg:results arg:error_score arguments arg arg Assign Assign For Call If Compare Call If Compare Assign If Call Assign For Assign Call If Compare Assign Call"
  },
  {
    "library": "scipy",
    "name": "__call__",
    "source_code": "def __call__(self, m, n, seed=None):\n    return multivariate_hypergeom_frozen(m, n, seed=seed)",
    "docstring": "Create a frozen multivariate_hypergeom distribution. See for more information.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:m arg:n arg:seed arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "TopLevelFormatter",
    "source_code": "class TopLevelFormatter(logging.Filter):\n\n    def __init__(self, loggers: list[str] | None=None):\n        super().__init__()\n        self.loggers: list[str] = loggers or []\n\n    def filter(self, record: logging.LogRecord) -> bool:\n        if any((record.name.startswith(logger + '.') for logger in self.loggers)):\n            record.name = record.name.split('.', 1)[0]\n        return True",
    "docstring": "Keep only top level loggers' name (direct children from root) from records. This filter will replace Scrapy loggers' names with 'scrapy'. This mimics the old Scrapy log behaviour and helps shortening long names. Since it can't be set for just one logger (it won't propagate for its children), it's going to be set in the root handler, with a parametrized `` list where it should act.",
    "type": "class",
    "file_path": "scrapy\\scrapy\\utils\\log.py",
    "ast_data": "ClassDef name:TopLevelFormatter FunctionDef name:__init__ arg:self arg:loggers arguments arg arg Call Call BoolOp FunctionDef name:filter arg:self arg:record arguments arg arg If Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "set_external_captures",
    "source_code": "def set_external_captures(self, captures):\n    self._captured_inputs = captures",
    "docstring": "Updates the function capture values. The new values must have tensor types and shapes consistent with the original captures of the concrete function, but it is allowed to change a value captured with a deferred one and vice-versa. Args: captures: A list of tensors or closures. Tensors are value captures, and closures are call-time (deferred captures).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py",
    "ast_data": "FunctionDef name:set_external_captures arg:self arg:captures arguments arg arg Assign"
  },
  {
    "library": "pytorch",
    "name": "memory_stats_as_nested_dict",
    "source_code": "def memory_stats_as_nested_dict(device: 'Device'=None) -> dict[str, Any]:\n    if not is_initialized():\n        return {}\n    device = _get_device_index(device, optional=True)\n    return torch._C._cuda_memoryStats(device)",
    "docstring": "Return the result of :func: as a nested dictionary.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\memory.py",
    "ast_data": "FunctionDef name:memory_stats_as_nested_dict arg:device arguments arg If Call Return return:no Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_post_forward_reshard",
    "source_code": "@no_type_check\ndef _post_forward_reshard(state: _FSDPState, handle: FlatParamHandle) -> None:\n    if not handle:\n        return\n    free_unsharded_flat_param = not state._is_root and handle._sharding_strategy in RESHARD_AFTER_FORWARD_HANDLE_STRATEGIES\n    _reshard(state, handle, free_unsharded_flat_param)",
    "docstring": "Reshards parameters in the post-forward.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_runtime_utils.py",
    "ast_data": "FunctionDef name:_post_forward_reshard arg:state arg:handle arguments arg arg If Return return:no Assign BoolOp Compare Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "def fit(self, X, y=None, init=None):\n    self.fit_transform(X, init=init)\n    return self",
    "docstring": "Compute the position of the points in the embedding space. Parameters ---------- X : array-like of shape (n_samples, n_features) or (n_samples, n_samples) Input data. If ``, the input should be the dissimilarity matrix. y : Ignored Not used, present for API consistency by convention. init : ndarray of shape (n_samples, n_components), default=None Starting configuration of the embedding to initialize the SMACOF algorithm. By default, the algorithm is initialized with a randomly chosen array. Returns ------- self : object Fitted estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\manifold\\_mds.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arg:init arguments arg arg arg arg Call Return return:yes"
  },
  {
    "library": "django",
    "name": "width",
    "source_code": "@property\ndef width(self):\n    return capi.get_ds_xsize(self._ptr)",
    "docstring": "Width (X axis) in pixels.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\raster\\source.py",
    "ast_data": "FunctionDef name:width arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "__init__",
    "source_code": "def __init__(self, uri: URI, settings: Settings, conn_lost_deferred: Deferred[list[BaseException]]) -> None:\n    self._conn_lost_deferred: Deferred[list[BaseException]] = conn_lost_deferred\n    config = H2Configuration(client_side=True, header_encoding='utf-8')\n    self.conn = H2Connection(config=config)\n    self._stream_id_generator = itertools.count(start=1, step=2)\n    self.streams: dict[int, Stream] = {}\n    self._pending_request_stream_pool: deque[Stream] = deque()\n    self._conn_lost_errors: list[BaseException] = []\n    self.metadata: dict[str, Any] = {'certificate': None, 'ip_address': None, 'uri': uri, 'default_download_maxsize': settings.getint('DOWNLOAD_MAXSIZE'), 'default_download_warnsize': settings.getint('DOWNLOAD_WARNSIZE'), 'active_streams': 0, 'settings_acknowledged': False}",
    "docstring": "Arguments: uri -- URI of the base url to which HTTP/2 Connection will be made. uri is used to verify that incoming client requests have correct base URL. settings -- Scrapy project settings conn_lost_deferred -- Deferred fires with the reason: Failure to notify that connection was lost",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\http2\\protocol.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:uri arg:settings arg:conn_lost_deferred arguments arg arg arg arg Assign Call Assign Call Assign Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "uses_star_args_in_call",
    "source_code": "def uses_star_args_in_call(node):\n    if sys.version_info[:2] >= (3, 5):\n        for arg in node.args:\n            if isinstance(arg, ast.Starred):\n                return True\n    elif node.starargs:\n        return True\n    return False",
    "docstring": "Check if an ast.Call node uses arbitrary-length positional *args. This function works with the AST call node format of Python3.5+ as well as the different AST format of earlier versions of Python. Args: node: The ast.Call node to check arg values for. Returns: True if the node uses starred variadic positional args or keyword args. False if it does not.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\ast_edits.py",
    "ast_data": "FunctionDef name:uses_star_args_in_call arg:node arguments arg If Compare For If Call Return return:yes If Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_qconv",
    "source_code": "@classmethod\ndef get_qconv(cls, mod, activation_post_process, weight_post_process=None):\n    if weight_post_process is None:\n        weight_post_process = mod.qconfig.weight()\n    weight_post_process(mod.weight)\n    assert weight_post_process.dtype == torch.qint8, 'Weight observer must have a dtype of qint8'\n    qweight = _quantize_weight(mod.weight.float(), weight_post_process)\n    qconv = cls(mod.in_channels, mod.out_channels, mod.kernel_size, mod.stride, mod.padding, mod.dilation, mod.groups, mod.bias is not None, mod.padding_mode)\n    qconv.set_weight_bias(qweight, mod.bias)\n    if activation_post_process is None or activation_post_process.dtype == torch.float:\n        return qconv\n    else:\n        act_scale, act_zp = activation_post_process.calculate_qparams()\n        qconv.scale = float(act_scale)\n        qconv.zero_point = int(act_zp)\n        return qconv",
    "docstring": "Creates a qconv object and returns it.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\modules\\conv.py",
    "ast_data": "FunctionDef name:get_qconv arg:cls arg:mod arg:activation_post_process arg:weight_post_process arguments arg arg arg arg If Compare Assign Call Call Compare Assign Call Call Assign Call Compare Call If BoolOp Compare Compare Return return:yes Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "database_forwards",
    "source_code": "def database_forwards(self, app_label, schema_editor, from_state, to_state):\n    raise NotImplementedError('subclasses of Operation must provide a database_forwards() method')",
    "docstring": "Perform the mutation on the database schema in the normal (forwards) direction.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\operations\\base.py",
    "ast_data": "FunctionDef name:database_forwards arg:self arg:app_label arg:schema_editor arg:from_state arg:to_state arguments arg arg arg arg arg Raise Call"
  },
  {
    "library": "numpy",
    "name": "convert",
    "source_code": "def convert(self, domain=None, kind=None, window=None):\n    if kind is None:\n        kind = self.__class__\n    if domain is None:\n        domain = kind.domain\n    if window is None:\n        window = kind.window\n    return self(kind.identity(domain, window=window, symbol=self.symbol))",
    "docstring": "Convert series to a different kind and/or domain and/or window. Parameters ---------- domain : array_like, optional The domain of the converted series. If the value is None, the default domain of is used. kind : class, optional The polynomial series type class to which the current instance should be converted. If kind is None, then the class of the current instance is used. window : array_like, optional The window of the converted series. If the value is None, the default window of is used. Returns ------- new_series : series The returned class can be of different type than the current instance and/or have a different domain and/or different window. Notes ----- Conversion between domains and class types can result in numerically ill defined series.",
    "type": "method",
    "file_path": "numpy\\numpy\\polynomial\\_polybase.py",
    "ast_data": "FunctionDef name:convert arg:self arg:domain arg:kind arg:window arguments arg arg arg arg If Compare Assign If Compare Assign If Compare Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "register_overrides",
    "source_code": "def register_overrides():\n    control_flow.for_loop_registry.register(dataset_ops.DatasetV2, _tf_ag_dataset_for_stmt)\n    py_builtins.abs_registry.register(dataset_ops.DatasetV2, _tf_ag_dataset_abs)\n    py_builtins.len_registry.register(dataset_ops.DatasetV2, _tf_ag_dataset_len)\n    py_builtins.enumerate_registry.register(dataset_ops.DatasetV2, _tf_ag_dataset_enumerate)\n    py_builtins.zip_registry.register(dataset_ops.DatasetV2, _tf_ag_dataset_zip)\n    py_builtins.map_registry.register(dataset_ops.DatasetV2, _tf_ag_dataset_map)\n    py_builtins.filter_registry.register(dataset_ops.DatasetV2, _tf_ag_dataset_filter)\n    py_builtins.any_registry.register(dataset_ops.DatasetV2, _tf_ag_dataset_any)\n    py_builtins.all_registry.register(dataset_ops.DatasetV2, _tf_ag_dataset_all)",
    "docstring": "Registers the autograph specific overrides for dataset_ops.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_autograph.py",
    "ast_data": "FunctionDef name:register_overrides arguments Call Call Call Call Call Call Call Call Call"
  },
  {
    "library": "virtualenv",
    "name": "supports",
    "source_code": "@classmethod\ndef supports(cls, interpreter):\n    return True",
    "docstring": "Check if the activation script is supported in the given interpreter. :param interpreter: the interpreter we need to support :return: `` otherwise",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\activation\\activator.py",
    "ast_data": "FunctionDef name:supports arg:cls arg:interpreter arguments arg arg Return return:yes"
  },
  {
    "library": "django",
    "name": "srs",
    "source_code": "@srs.setter\ndef srs(self, value):\n    if isinstance(value, SpatialReference):\n        srs = value\n    elif isinstance(value, (int, str)):\n        srs = SpatialReference(value)\n    else:\n        raise ValueError('Could not create a SpatialReference from input.')\n    capi.set_ds_projection_ref(self._ptr, srs.wkt.encode())\n    self._flush()",
    "docstring": "Set the spatial reference used in this GDALRaster. The input can be a SpatialReference or any parameter accepted by the SpatialReference constructor.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\raster\\source.py",
    "ast_data": "FunctionDef name:srs arg:self arg:value arguments arg arg If Call Assign If Call Assign Call Raise Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "Branin02",
    "source_code": "class Branin02(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = [(-5.0, 15.0), (-5.0, 15.0)]\n        self.global_optimum = [[-3.1969884, 12.52625787]]\n        self.fglob = 5.558914403893825\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return (x[1] - 5.1 / (4 * pi ** 2) * x[0] ** 2 + 5 * x[0] / pi - 6) ** 2 + 10 * (1 - 1 / (8 * pi)) * cos(x[0]) * cos(x[1]) + log(x[0] ** 2.0 + x[1] ** 2.0 + 1.0) + 10",
    "docstring": "Branin02 objective function. The Branin02 global optimization problem is a multimodal minimization problem defined as follows .. math:: f_{\\text{Branin02}}(x) = \\left(- 1.275 \\frac{x_1^{2}}{\\pi^{2}} + 5 \\frac{x_1}{\\pi} + x_2 - 6 \\right)^{2} + \\left(10 - \\frac{5}{4 \\pi} \\right) \\cos\\left(x_1\\right) \\cos\\left(x_2\\right) + \\log(x_1^2+x_2^2 + 1) + 10 with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_B.py",
    "ast_data": "ClassDef name:Branin02 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "__sub__",
    "source_code": "def __sub__(self, other):\n    if not isinstance(other, Transform):\n        return NotImplemented\n    for remainder, sub_tree in self._iter_break_from_left_to_right():\n        if sub_tree == other:\n            return remainder\n    for remainder, sub_tree in other._iter_break_from_left_to_right():\n        if sub_tree == self:\n            if not remainder.has_inverse:\n                raise ValueError(\"The shortcut cannot be computed since 'other' includes a non-invertible component\")\n            return remainder.inverted()\n    if other.has_inverse:\n        return self + other.inverted()\n    else:\n        raise ValueError('It is not possible to compute transA - transB since transB cannot be inverted and there is no shortcut possible.')",
    "docstring": "Compose *self* with the inverse of *other*, cancelling identical terms if any:: # In general: A - B == A + B.inverted() # (but see note regarding frozen transforms below). # If A \"ends with\" B (i.e. A == A' + B for some A') we can cancel # out B: (A' + B) - B == A' # Likewise, if B \"starts with\" A (B = A + B'), we can cancel out A: A - (A + B') == B'.inverted() == B'^-1 Cancellation (rather than naively returning `` is mutated.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:__sub__ arg:self arg:other arguments arg arg If Call Return return:yes For Call If Compare Return return:yes For Call If Compare If Raise Call Return return:yes Call If Return return:yes Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "should_use_result",
    "source_code": "def should_use_result(fn=None, warn_in_eager=False, error_in_function=False):\n\n    def decorated(fn):\n\n        def wrapped(*args, **kwargs):\n            return _add_should_use_warning(fn(*args, **kwargs), warn_in_eager=warn_in_eager, error_in_function=error_in_function)\n        fn_doc = fn.__doc__ or ''\n        split_doc = fn_doc.split('\\n', 1)\n        if len(split_doc) == 1:\n            updated_doc = fn_doc\n        else:\n            brief, rest = split_doc\n            updated_doc = '\\n'.join([brief, textwrap.dedent(rest)])\n        note = '\\n\\nNote: The output of this function should be used. If it is not, a warning will be logged or an error may be raised. To mark the output as used, call its .mark_used() method.'\n        return tf_decorator.make_decorator(target=fn, decorator_func=wrapped, decorator_name='should_use_result', decorator_doc=updated_doc + note)\n    if fn is not None:\n        return decorated(fn)\n    else:\n        return decorated",
    "docstring": "Function wrapper that ensures the function's output is used. If the output is not used, a is logged. If is set, then a will be raised at the end of function tracing if the output is not used by that point. An output is marked as used if any of its attributes are read, modified, or updated. Examples when the output is a include: - Using it in any capacity (e.g. , ) - Accessing a property (e.g. getting or ). - Calling . Note, certain behaviors cannot be tracked - for these the object may not be marked as used. Examples include: - . In this case, comparison is done on types / ids. - . Similar to above. Args: fn: The function to wrap. warn_in_eager: Whether to create warnings in Eager as well. error_in_function: Whether to raise an error when creating a tf.function. Returns: The wrapped function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\tf_should_use.py",
    "ast_data": "FunctionDef name:should_use_result arg:fn arg:warn_in_eager arg:error_in_function arguments arg arg arg FunctionDef name:decorated arg:fn arguments arg FunctionDef name:wrapped arguments arg arg Return return:yes Call Call Assign BoolOp Assign Call If Compare Call Assign Assign Assign Call Call Assign Return return:yes Call If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "tanhshrink",
    "source_code": "def tanhshrink(input):\n    if has_torch_function_unary(input):\n        return handle_torch_function(tanhshrink, (input,), input)\n    return input - input.tanh()",
    "docstring": "tanhshrink(input) -> Tensor Applies element-wise, :math: See :class: for more details.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:tanhshrink arg:input arguments arg If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "partial_derivative",
    "source_code": "def partial_derivative(self, dx, dy):\n    if dx == 0 and dy == 0:\n        return self\n    else:\n        kx, ky = self.degrees\n        if not (dx >= 0 and dy >= 0):\n            raise ValueError('order of derivative must be positive or zero')\n        if not (dx < kx and dy < ky):\n            raise ValueError('order of derivative must be less than degree of spline')\n        tx, ty, c = self.tck[:3]\n        with FITPACK_LOCK:\n            newc, ier = dfitpack.pardtc(tx, ty, c, kx, ky, dx, dy)\n        if ier != 0:\n            raise ValueError(f'Unexpected error code returned by pardtc: {ier}')\n        nx = len(tx)\n        ny = len(ty)\n        newtx = tx[dx:nx - dx]\n        newty = ty[dy:ny - dy]\n        newkx, newky = (kx - dx, ky - dy)\n        newclen = (nx - dx - kx - 1) * (ny - dy - ky - 1)\n        return _DerivedBivariateSpline._from_tck((newtx, newty, newc[:newclen], newkx, newky))",
    "docstring": "Construct a new spline representing a partial derivative of this spline. Parameters ---------- dx, dy : int Orders of the derivative in x and y respectively. They must be non-negative integers and less than the respective degree of the original spline (self) in that direction (``) representing the derivative of this spline. Notes ----- .. versionadded:: 1.9.0",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_fitpack2.py",
    "ast_data": "FunctionDef name:partial_derivative arg:self arg:dx arg:dy arguments arg arg arg If BoolOp Compare Compare Return return:yes Assign If BoolOp Compare Compare Raise Call If BoolOp Compare Compare Raise Call Assign With Assign Call If Compare Raise Call Assign Call Assign Call Assign Assign Assign Assign Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "parse_data",
    "source_code": "def parse_data(self, data_str):\n    date_str = data_str.strip().strip(\"'\").strip('\"')\n    if date_str == '?':\n        return np.datetime64('NaT', self.datetime_unit)\n    else:\n        dt = datetime.datetime.strptime(date_str, self.date_format)\n        return np.datetime64(dt).astype(f'datetime64[{self.datetime_unit}]')",
    "docstring": "Parse a value of this type.",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\arff\\_arffread.py",
    "ast_data": "FunctionDef name:parse_data arg:self arg:data_str arguments arg arg Assign Call Call Call If Compare Return return:yes Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pygame",
    "name": "pixels3d",
    "source_code": "def pixels3d(surface):\n    return numpy_array(surface.get_view('3'), copy=False)",
    "docstring": "pygame.surfarray.pixels3d(Surface): return array reference pixels into a 3d array Create a new 3D array that directly references the pixel values in a Surface. Any changes to the array will affect the pixels in the Surface. This is a fast operation since no data is copied. This will only work on Surfaces that have 24-bit or 32-bit formats. Lower pixel formats cannot be referenced. The Surface this references will remain locked for the lifetime of the array (see the Surface.lock - lock the Surface memory for pixel access method).",
    "type": "function",
    "file_path": "pygame\\src_py\\surfarray.py",
    "ast_data": "FunctionDef name:pixels3d arg:surface arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "values",
    "source_code": "@_copy_to_script_wrapper\ndef values(self) -> Iterable[Module]:\n    return self._modules.values()",
    "docstring": "Return an iterable of the ModuleDict values.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\container.py",
    "ast_data": "FunctionDef name:values arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_IntWrapper",
    "source_code": "@dataclasses.dataclass\nclass _IntWrapper:\n    val: int\n    dynamism: Optional[Union[_DimHint, int]] = dataclasses.field(init=False, default=None)",
    "docstring": "Dummy wrapper class to wrap around integer inputs so that when we parse the dynamic_shapes structure, we can mark if any of the integers were marked as dynamic.",
    "type": "class",
    "file_path": "pytorch\\torch\\export\\dynamic_shapes.py",
    "ast_data": "ClassDef name:_IntWrapper Call"
  },
  {
    "library": "pytorch",
    "name": "lnotab_writer",
    "source_code": "def lnotab_writer(lineno: int, byteno: int=0) -> tuple[list[int], Callable[[int, int], None]]:\n    assert sys.version_info < (3, 10)\n    lnotab: list[int] = []\n\n    def update(lineno_new, byteno_new):\n        nonlocal byteno, lineno\n        while byteno_new != byteno or lineno_new != lineno:\n            byte_offset = max(0, min(byteno_new - byteno, 255))\n            line_offset = max(-128, min(lineno_new - lineno, 127))\n            assert byte_offset != 0 or line_offset != 0\n            byteno += byte_offset\n            lineno += line_offset\n            lnotab.extend((byte_offset, line_offset & 255))\n    return (lnotab, update)",
    "docstring": "Used to create typing.CodeType.co_lnotab See This is the internal format of the line number table if Python < 3.10",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\bytecode_transformation.py",
    "ast_data": "FunctionDef name:lnotab_writer arg:lineno arg:byteno arguments arg arg Compare FunctionDef name:update arg:lineno_new arg:byteno_new arguments arg arg While BoolOp Compare Compare Assign Call Call Assign Call Call BoolOp Compare Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "broadcast_recv_v2",
    "source_code": "def broadcast_recv_v2(shape, dtype, group_size, group_key, instance_key, communication_hint='auto', timeout=0):\n    return gen_collective_ops.collective_bcast_recv_v2(T=dtype, group_size=group_size, group_key=group_key, instance_key=instance_key, shape=shape, communication_hint=communication_hint.lower(), timeout_seconds=timeout)",
    "docstring": "Receives a broadcasts tensor, across devices. Args: shape: an int tensor. Shape of the tensor to be received. dtype: Type of the tensor to be received. group_size: an int32 tensor. One plus the number of receiving tensors, i.e. the total number of devices participating. Each tensor must reside on a different device. group_key: an int32 tensor identifying the group of devices. instance_key: an int32 tensor identifying the participating group of Ops. communication_hint: preferred collective communication. The implementation may fall back to another mechanism. Options include , , and . timeout: If set to a non zero, set a completion timeout to detect staleness. If the timer goes off, a DeadlineExceededError is raised. The timeout value in seconds. This feature is experimental. Returns: An Op implementing the broadcast receive.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\collective_ops.py",
    "ast_data": "FunctionDef name:broadcast_recv_v2 arg:shape arg:dtype arg:group_size arg:group_key arg:instance_key arg:communication_hint arg:timeout arguments arg arg arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "Subtract",
    "source_code": "class Subtract(_Merge):\n\n    @tf_utils.shape_type_conversion\n    def build(self, input_shape):\n        super(Subtract, self).build(input_shape)\n        if len(input_shape) != 2:\n            raise ValueError('A `Subtract` layer should be called on exactly 2 inputs')\n\n    def _merge_function(self, inputs):\n        if len(inputs) != 2:\n            raise ValueError('A `Subtract` layer should be called on exactly 2 inputs')\n        return inputs[0] - inputs[1]",
    "docstring": "Layer that subtracts two inputs. It takes as input a list of tensors of size 2, both of the same shape, and returns a single tensor, (inputs[0] - inputs[1]), also of the same shape. Examples:",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\merge.py",
    "ast_data": "ClassDef name:Subtract FunctionDef name:build arg:self arg:input_shape arguments arg arg Call Call If Compare Call Raise Call FunctionDef name:_merge_function arg:self arg:inputs arguments arg arg If Compare Call Raise Call Return return:yes"
  },
  {
    "library": "django",
    "name": "tablespace_sql",
    "source_code": "def tablespace_sql(self, tablespace, inline=False):\n    return ''",
    "docstring": "Return the SQL that will be used in a query to define the tablespace. Return '' if the backend doesn't support tablespaces. If is True, append the SQL to a row; otherwise append it to the entire CREATE TABLE or CREATE INDEX statement.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:tablespace_sql arg:self arg:tablespace arg:inline arguments arg arg arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "Methods0DInvert",
    "source_code": "class Methods0DInvert(Benchmark):\n    params = ['int16', 'int32', 'int64']\n    param_names = ['npdtypes']\n    timeout = 10\n\n    def setup(self, npdtypes):\n        self.xarg = np.array(3, dtype=npdtypes)\n\n    def time_ndarray__0d__(self, npdtypes):\n        self.xarg.__invert__()",
    "docstring": "Zero dimension array methods",
    "type": "class",
    "file_path": "numpy\\benchmarks\\benchmarks\\bench_ufunc.py",
    "ast_data": "ClassDef name:Methods0DInvert Assign Assign Assign FunctionDef name:setup arg:self arg:npdtypes arguments arg arg Assign Call FunctionDef name:time_ndarray__0d__ arg:self arg:npdtypes arguments arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "set_default_load_endianness",
    "source_code": "def set_default_load_endianness(endianness):\n    if not isinstance(endianness, LoadEndianness) and endianness is not None:\n        raise TypeError('Invalid argument type in function set_default_load_endianness')\n    from torch.utils.serialization import config\n    config.load.endianness = endianness",
    "docstring": "Set fallback byte order for loading files If byteorder mark is not present in saved checkpoint, this byte order is used as fallback. By default, it's \"native\" byte order. Args: endianness: the new fallback byte order",
    "type": "function",
    "file_path": "pytorch\\torch\\serialization.py",
    "ast_data": "FunctionDef name:set_default_load_endianness arg:endianness arguments arg If BoolOp Call Compare Raise Call Assign"
  },
  {
    "library": "scipy",
    "name": "mat_reader_factory",
    "source_code": "@docfiller\ndef mat_reader_factory(file_name, appendmat=True, **kwargs):\n    byte_stream, file_opened = _open_file(file_name, appendmat)\n    mjv, mnv = _get_matfile_version(byte_stream)\n    if mjv == 0:\n        return (MatFile4Reader(byte_stream, **kwargs), file_opened)\n    elif mjv == 1:\n        return (MatFile5Reader(byte_stream, **kwargs), file_opened)\n    elif mjv == 2:\n        raise NotImplementedError('Please use HDF reader for matlab v7.3 files, e.g. h5py')\n    else:\n        raise TypeError(f'Did not recognize version {mjv}')",
    "docstring": "Create reader for matlab .mat format files. Parameters ---------- %(file_arg)s %(append_arg)s %(load_args)s %(struct_arg)s Returns ------- matreader : MatFileReader object Initialized instance of MatFileReader class matching the mat file type detected in . file_opened : bool Whether the file was opened by this routine.",
    "type": "function",
    "file_path": "scipy\\scipy\\io\\matlab\\_mio.py",
    "ast_data": "FunctionDef name:mat_reader_factory arg:file_name arg:appendmat arguments arg arg arg Assign Call Assign Call If Compare Return return:yes Call If Compare Return return:yes Call If Compare Raise Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "bfloat16_scope",
    "source_code": "@tf_export(v1=['tpu.bfloat16_scope'])\n@tf_contextlib.contextmanager\ndef bfloat16_scope(name: Optional[Text]=None) -> Generator[variable_scope.variable_scope, None, None]:\n    if name is None:\n        name = ''\n    with variable_scope.variable_scope(name, custom_getter=_get_custom_getter()) as varscope:\n        yield varscope",
    "docstring": "Scope class for bfloat16 variables so that the model uses custom getter. This enables variables to be read as bfloat16 type when using get_variable. Arguments: name: Name to use for scope. Yields: a variable scope.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\bfloat16.py",
    "ast_data": "FunctionDef name:bfloat16_scope arg:name arguments arg If Compare Assign With Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "replace_params_with_constants",
    "source_code": "def replace_params_with_constants(gm: torch.fx.GraphModule, flat_params: list[Any], fw_metadata: torch._functorch.aot_autograd.ViewAndMutationMeta) -> list[int]:\n    params = gm.graph.find_nodes(op='placeholder')\n    fake_inp_nodes = params[:len(params)]\n    preserved_arg_indices = []\n    aliased_input_args = [out_info.base_idx for out_info in fw_metadata.output_info if out_info.base_idx is not None]\n    mutated_inps = [i for i, m in enumerate(fw_metadata.input_info) if m.mutation_type in (MutationType.MUTATED_IN_GRAPH, MutationType.MUTATED_OUT_GRAPH)]\n    static_indices_new = []\n    static_indices_offset = 0\n    for i, (real_input, node) in enumerate(zip(flat_params, fake_inp_nodes)):\n        if i in mutated_inps or i in aliased_input_args:\n            preserved_arg_indices.append(i)\n            if i in fw_metadata.static_input_indices:\n                new_static_index = i - static_indices_offset\n                static_indices_new.append(new_static_index)\n        else:\n            replace_node_with_constant(gm, node, real_input)\n            static_indices_offset += 1\n    preserved_arg_indices.extend(range(len(flat_params), len(params)))\n    fw_metadata.static_input_indices = static_indices_new\n    gm.recompile()\n    return preserved_arg_indices",
    "docstring": "Replaces the parameters of a PyTorch GraphModule with constants wherever possible. Returns a list of indices representing the input parameters that were not converted to constants.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\freezing.py",
    "ast_data": "FunctionDef name:replace_params_with_constants arg:gm arg:flat_params arg:fw_metadata arguments arg arg arg Assign Call Assign Call Assign Assign Compare Assign Call Compare Assign Assign For Call Call If BoolOp Compare Compare Call If Compare Assign Call Call Call Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "def forward(self, input: Tensor, params: Optional[Dict[str, Tensor]]=None, **kwargs: Any) -> Tensor:\n    in_tensor = self.__unpack_input__(input)\n    input_shape = in_tensor.shape\n    in_tensor = self.transform_tensor(in_tensor)\n    batch_shape = in_tensor.shape\n    if params is None:\n        params = self.forward_parameters(batch_shape)\n    if 'batch_prob' not in params:\n        params['batch_prob'] = tensor([True] * batch_shape[0])\n    params, flags = self._process_kwargs_to_params_and_flags(params, self.flags, **kwargs)\n    output = self.apply_func(in_tensor, params, flags)\n    return self.transform_output_tensor(output, input_shape) if self.keepdim else output",
    "docstring": "Perform forward operations. Args: input: the input tensor. params: the corresponding parameters for an operation. If None, a new parameter suite will be generated. **kwargs: key-value pairs to override the parameters and flags. Note: By default, all the overwriting parameters in kwargs will not be recorded as in `` additionally.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\base.py",
    "ast_data": "FunctionDef name:forward arg:self arg:input arg:params arguments arg arg arg arg Assign Call Assign Assign Call Assign If Compare Assign Call If Compare Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "Schwefel21",
    "source_code": "class Schwefel21(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-100.0] * self.N, [100.0] * self.N))\n        self.global_optimum = [[0.0 for _ in range(self.N)]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return max(abs(x))",
    "docstring": "Schwefel 21 objective function. This class defines the Schwefel 21 [1]_ global optimization problem. This is a unimodal minimization problem defined as follows: .. math:: f_{\\text{Schwefel21}}(x) = \\smash{\\displaystyle\\max_{1 \\leq i \\leq n}} \\lvert x_i \\rvert Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_S.py",
    "ast_data": "ClassDef name:Schwefel21 Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "serialize_keras_class_and_config",
    "source_code": "def serialize_keras_class_and_config(cls_name, cls_config, obj=None, shared_object_id=None):\n    base_config = {'class_name': cls_name, 'config': cls_config}\n    if shared_object_id is not None:\n        base_config[SHARED_OBJECT_KEY] = shared_object_id\n    if _shared_object_saving_scope() is not None and obj is not None:\n        shared_object_config = _shared_object_saving_scope().get_config(obj)\n        if shared_object_config is None:\n            return _shared_object_saving_scope().create_config(base_config, obj)\n        return shared_object_config\n    return base_config",
    "docstring": "Returns the serialization of the class with the given config.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\generic_utils.py",
    "ast_data": "FunctionDef name:serialize_keras_class_and_config arg:cls_name arg:cls_config arg:obj arg:shared_object_id arguments arg arg arg arg Assign If Compare Assign If BoolOp Compare Call Compare Assign Call Call If Compare Return return:yes Call Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "all_gather_into_tensor_coalesced",
    "source_code": "def all_gather_into_tensor_coalesced(self: list[torch.Tensor], group: RANK_TYPES, tag: str='') -> list[torch.Tensor]:\n    group_name = _resolve_group_name(group, tag)\n    group_size = c10d._get_group_size_by_name(group_name)\n    tensor_list = torch.ops._c10d_functional.all_gather_into_tensor_coalesced(self, group_size, group_name)\n    return list(map(_maybe_wrap_tensor, tensor_list))",
    "docstring": "Gather a list of tensors across from all machines. Note that it currently only supports gather_dim = 0. The input tensor is left unmodified. Group can be one of: List[int]: ranks participating in the collective. List[List[int]]: 2D mesh of ranks taking part of this collective in MPMD. ProcessGroup: Will perform a collective using the ranks and tag of the PG. DeviceMesh: Do a SPMD collective over all ranks of the mesh (DeviceMesh, int): Do a MPMD collective over one dimension of the DeviceMesh :: N.B. If you pass a PG or a 1D list to perform a MPMD collective, the compiler won't be able to recover that information and perform collective algebraic optimization. Use other forms of input for that.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_functional_collectives.py",
    "ast_data": "FunctionDef name:all_gather_into_tensor_coalesced arg:self arg:group arg:tag arguments arg arg arg Assign Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "CoordTransform",
    "source_code": "class CoordTransform(GDALBase):\n    destructor = capi.destroy_ct\n\n    def __init__(self, source, target):\n        if not isinstance(source, SpatialReference) or not isinstance(target, SpatialReference):\n            raise TypeError('source and target must be of type SpatialReference')\n        self.ptr = capi.new_ct(source._ptr, target._ptr)\n        self._srs1_name = source.name\n        self._srs2_name = target.name\n\n    def __str__(self):\n        return 'Transform from \"%s\" to \"%s\"' % (self._srs1_name, self._srs2_name)",
    "docstring": "The coordinate system transformation object.",
    "type": "class",
    "file_path": "django\\django\\contrib\\gis\\gdal\\srs.py",
    "ast_data": "ClassDef name:CoordTransform Assign FunctionDef name:__init__ arg:self arg:source arg:target arguments arg arg arg If BoolOp Call Call Raise Call Assign Call Assign Assign FunctionDef name:__str__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_aware_return_wrapper",
    "source_code": "def _aware_return_wrapper(self, f, returns_list=False):\n    if self._tzinfo is None:\n        return f\n\n    def normalize_arg(arg):\n        if isinstance(arg, datetime.datetime) and arg.tzinfo is not None:\n            if arg.tzinfo is not self._tzinfo:\n                arg = arg.astimezone(self._tzinfo)\n            return arg.replace(tzinfo=None)\n        return arg\n\n    def normalize_args(args, kwargs):\n        args = tuple((normalize_arg(arg) for arg in args))\n        kwargs = {kw: normalize_arg(arg) for kw, arg in kwargs.items()}\n        return (args, kwargs)\n    if not returns_list:\n\n        def inner_func(*args, **kwargs):\n            args, kwargs = normalize_args(args, kwargs)\n            dt = f(*args, **kwargs)\n            return self._attach_tzinfo(dt, self._tzinfo)\n    else:\n\n        def inner_func(*args, **kwargs):\n            args, kwargs = normalize_args(args, kwargs)\n            dts = f(*args, **kwargs)\n            return [self._attach_tzinfo(dt, self._tzinfo) for dt in dts]\n    return functools.wraps(f)(inner_func)",
    "docstring": "Decorator function that allows rrule methods to handle tzinfo.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\dates.py",
    "ast_data": "FunctionDef name:_aware_return_wrapper arg:self arg:f arg:returns_list arguments arg arg arg If Compare Return return:yes FunctionDef name:normalize_arg arg:arg arguments arg If BoolOp Call Compare If Compare Assign Call Return return:yes Call Return return:yes FunctionDef name:normalize_args arg:args arg:kwargs arguments arg arg Assign Call Call Assign Call Call Return return:yes If FunctionDef name:inner_func arguments arg arg Assign Call Assign Call Return return:yes Call FunctionDef name:inner_func arguments arg arg Assign Call Assign Call Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "public",
    "source_code": "@property\ndef public(self) -> str:\n    return str(self).split('+', 1)[0]",
    "docstring": "The public portion of the version. >>> Version(\"1.2.3\").public '1.2.3' >>> Version(\"1.2.3+abc\").public '1.2.3' >>> Version(\"1.2.3+abc.dev1\").public '1.2.3'",
    "type": "method",
    "file_path": "pytorch\\torch\\_vendor\\packaging\\version.py",
    "ast_data": "FunctionDef name:public arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "SetFwdGradEnabledContextManager",
    "source_code": "class SetFwdGradEnabledContextManager(ContextWrappingVariable):\n\n    @staticmethod\n    def create(tx: 'InstructionTranslator', target_values, **kwargs):\n        return SetFwdGradEnabledContextManager(target_values=target_values, initial_values=None, **kwargs)\n\n    def enter(self, tx):\n        [mode] = self.target_values\n        self.prev_state = torch._C._is_fwd_grad_enabled()\n        torch._C._set_fwd_grad_enabled(mode)\n        self.set_cleanup_hook(tx, lambda: torch._C._set_fwd_grad_enabled(self.prev_state))\n        self.proxy = tx.output.create_node('call_function', torch._C._set_fwd_grad_enabled, (mode,), {})\n        return variables.ConstantVariable.create(None)\n\n    def exit(self, tx: 'InstructionTranslator', *args):\n        self.cleanup()\n        tx.output.create_node('call_function', torch._C._set_fwd_grad_enabled, (self.prev_state,), {})\n        return variables.ConstantVariable.create(None)",
    "docstring": "represents torch.autograd.forward_ad._set_fwd_grad_enabled() to enable/disable fwd grad",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\ctx_manager.py",
    "ast_data": "ClassDef name:SetFwdGradEnabledContextManager FunctionDef name:create arg:tx arg:target_values arguments arg arg arg Return return:yes Call FunctionDef name:enter arg:self arg:tx arguments arg arg Assign Assign Call Call Call arguments Call Assign Call Return return:yes Call FunctionDef name:exit arg:self arg:tx arguments arg arg arg Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "aten_gelu_opset20",
    "source_code": "@onnx_impl(aten.gelu.default, trace_only=True, opset_introduced=20)\ndef aten_gelu_opset20(self: TReal, approximate: str='none') -> TReal:\n    return op20.Gelu(self, approximate=approximate)",
    "docstring": "gelu(Tensor self, *, bool approximate=False) -> Tensor",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_torchlib\\ops\\nn.py",
    "ast_data": "FunctionDef name:aten_gelu_opset20 arg:self arg:approximate arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "authlib",
    "name": "get_nonce",
    "source_code": "def get_nonce(self):\n    raise NotImplementedError()",
    "docstring": "Get \"nonce\" value of the authorization code object.",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\core\\models.py",
    "ast_data": "FunctionDef name:get_nonce arg:self arguments arg Raise Call"
  },
  {
    "library": "scipy",
    "name": "array_namespace",
    "source_code": "def array_namespace(*arrays: Array) -> ModuleType:\n    if not SCIPY_ARRAY_API:\n        return np_compat\n    api_arrays = list(_compliance_scipy(arrays))\n    if api_arrays:\n        return array_api_compat.array_namespace(*api_arrays)\n    return np_compat",
    "docstring": "Get the array API compatible namespace for the arrays xs. Parameters ---------- *arrays : sequence of array_like Arrays used to infer the common namespace. Returns ------- namespace : module Common namespace. Notes ----- Thin wrapper around . 1. Check for the global switch: SCIPY_ARRAY_API. 2. raise exceptions on known-bad subclasses. See its definition for more details. When the global switch is False, it defaults to the namespace. In that case, there is no compliance check. This is a convenience to ease the adoption. Otherwise, arrays must comply with the new rules.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_array_api_override.py",
    "ast_data": "FunctionDef name:array_namespace arguments arg If Return return:yes Assign Call Call If Return return:yes Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "fillna",
    "source_code": "def fillna(self, value, limit: int | None=None, inplace: bool=False) -> list[Block]:\n    inplace = validate_bool_kwarg(inplace, 'inplace')\n    if not self._can_hold_na:\n        noop = True\n    else:\n        mask = isna(self.values)\n        mask, noop = validate_putmask(self.values, mask)\n    if noop:\n        return [self.copy(deep=False)]\n    if limit is not None:\n        mask[mask.cumsum(self.values.ndim - 1) > limit] = False\n    if inplace:\n        nbs = self.putmask(mask.T, value)\n    else:\n        nbs = self.where(value, ~mask.T)\n    return extend_blocks(nbs)",
    "docstring": "fillna on the block with the value. If we fail, then convert to block to hold objects instead and try again",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\blocks.py",
    "ast_data": "FunctionDef name:fillna arg:self arg:value arg:limit arg:inplace arguments arg arg arg arg Assign Call If Assign Assign Call Assign Call If Return return:yes Call If Compare Assign Compare Call If Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "__init__",
    "source_code": "def __init__(self, data):\n    self._store = {}\n    if data:\n        for header, value in self._unpack_items(data):\n            self[header] = value",
    "docstring": "Populate the initial data using __setitem__ to ensure values are correctly encoded.",
    "type": "method",
    "file_path": "django\\django\\http\\response.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:data arguments arg arg Assign If For Call Assign"
  },
  {
    "library": "pytorch",
    "name": "tree_is_leaf",
    "source_code": "def tree_is_leaf(tree: PyTree, is_leaf: Optional[Callable[[PyTree], bool]]=None) -> bool:\n    if is_leaf is not None and is_leaf(tree):\n        return True\n    return _get_node_type(tree) not in SUPPORTED_NODES",
    "docstring": "Check if a pytree is a leaf. >>> tree_is_leaf(1) True >>> tree_is_leaf(None) True >>> tree_is_leaf([1, 2, 3]) False >>> tree_is_leaf((1, 2, 3), is_leaf=lambda x: isinstance(x, tuple)) True >>> tree_is_leaf({'a': 1, 'b': 2, 'c': 3}) False >>> tree_is_leaf({'a': 1, 'b': 2, 'c': None}) False",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_pytree.py",
    "ast_data": "FunctionDef name:tree_is_leaf arg:tree arg:is_leaf arguments arg arg If BoolOp Compare Call Return return:yes Return return:yes Compare Call"
  },
  {
    "library": "pytorch",
    "name": "_matrix_polynomial_value",
    "source_code": "def _matrix_polynomial_value(poly, x, zero_power=None):\n\n    def transition(curr_poly_val, x, poly_coeff):\n        res = x.matmul(curr_poly_val)\n        res.diagonal(dim1=-2, dim2=-1).add_(poly_coeff.unsqueeze(-1))\n        return res\n    if zero_power is None:\n        zero_power = torch.eye(x.size(-1), x.size(-1), dtype=x.dtype, device=x.device).view(*[1] * len(list(x.shape[:-2])), x.size(-1), x.size(-1))\n    return _polynomial_value(poly, x, zero_power, transition)",
    "docstring": "Evaluates for the (batched) matrix input . Check out function for more details.",
    "type": "function",
    "file_path": "pytorch\\torch\\_lobpcg.py",
    "ast_data": "FunctionDef name:_matrix_polynomial_value arg:poly arg:x arg:zero_power arguments arg arg arg FunctionDef name:transition arg:curr_poly_val arg:x arg:poly_coeff arguments arg arg arg Assign Call Call Call Call Return return:yes If Compare Assign Call Call Call Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__exit__",
    "source_code": "def __exit__(self, unused_type, unused_value, unused_traceback):\n    self.close()",
    "docstring": "Make usable with \"with\" statement.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py",
    "ast_data": "FunctionDef name:__exit__ arg:self arg:unused_type arg:unused_value arg:unused_traceback arguments arg arg arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "lookup_weakrefs",
    "source_code": "def lookup_weakrefs(self, obj):\n    if id(obj) in self._weakrefs:\n        return self._weakrefs[id(obj)]\n    return None",
    "docstring": "Lookup the _weakrefs created in id_ref function for ID_MATCH'd objects",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\guards.py",
    "ast_data": "FunctionDef name:lookup_weakrefs arg:self arg:obj arguments arg arg If Compare Call Return return:yes Call Return return:no"
  },
  {
    "library": "pytorch",
    "name": "_quantize_and_dequantize_weight",
    "source_code": "def _quantize_and_dequantize_weight(weight: torch.Tensor, weight_qscheme: torch.qscheme, weight_dtype: torch.dtype, weight_scale: torch.Tensor, weight_zero_point: torch.Tensor, weight_axis_int: int) -> torch.Tensor:\n    if weight_qscheme in [torch.per_tensor_affine, torch.per_channel_affine, torch.per_channel_affine_float_qparams]:\n        weight_quant = _quantize_weight(weight, weight_qscheme, weight_dtype, weight_scale, weight_zero_point, weight_axis_int)\n        weight_dequant = weight_quant.dequantize()\n    else:\n        weight_dequant = weight\n    return weight_dequant",
    "docstring": "Quantize and then dequantize the weight based on the quantization parameters",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\reference\\modules\\utils.py",
    "ast_data": "FunctionDef name:_quantize_and_dequantize_weight arg:weight arg:weight_qscheme arg:weight_dtype arg:weight_scale arg:weight_zero_point arg:weight_axis_int arguments arg arg arg arg arg arg If Compare Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, iterator_resource, initializer, output_types, output_shapes, output_classes):\n    self._iterator_resource = iterator_resource\n    self._initializer = initializer\n    if output_types is None or output_shapes is None or output_classes is None:\n        raise ValueError(f'All of `output_types`, `output_shapes`, and `output_classes` must be specified to create an iterator. Got `output_types` = {output_types!r}, `output_shapes` = {output_shapes!r}, `output_classes` = {output_classes!r}.')\n    self._element_spec = structure.convert_legacy_structure(output_types, output_shapes, output_classes)\n    self._flat_tensor_shapes = structure.get_flat_tensor_shapes(self._element_spec)\n    self._flat_tensor_types = structure.get_flat_tensor_types(self._element_spec)\n    self._string_handle = gen_dataset_ops.iterator_to_string_handle(self._iterator_resource)\n    self._get_next_call_count = 0\n    ops.add_to_collection(GLOBAL_ITERATORS, self._iterator_resource)",
    "docstring": "Creates a new iterator from the given iterator resource. Note: Most users will not call this initializer directly, and will instead use or . Args: iterator_resource: A scalar representing the iterator. initializer: A that should be run to initialize this iterator. output_types: A (nested) structure of objects corresponding to each component of an element of this iterator. output_shapes: A (nested) structure of objects corresponding to each component of an element of this iterator. output_classes: A (nested) structure of Python objects corresponding to each component of an element of this iterator. Raises: TypeError: If , , or is not specified.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\iterator_ops.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:iterator_resource arg:initializer arg:output_types arg:output_shapes arg:output_classes arguments arg arg arg arg arg arg Assign Assign If BoolOp Compare Compare Compare Raise Call Assign Call Assign Call Assign Call Assign Call Assign Call"
  },
  {
    "library": "pandas",
    "name": "__init__",
    "source_code": "def __init__(self, name: str='', rules=None) -> None:\n    super().__init__()\n    if not name:\n        name = type(self).__name__\n    self.name = name\n    if rules is not None:\n        self.rules = rules",
    "docstring": "Initializes holiday object with a given set a rules. Normally classes just have the rules defined within them. Parameters ---------- name : str Name of the holiday calendar, defaults to class name rules : array of Holiday objects A set of rules used to create the holidays.",
    "type": "method",
    "file_path": "pandas\\pandas\\tseries\\holiday.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:name arg:rules arguments arg arg arg Call Call If Assign Call Assign If Compare Assign"
  },
  {
    "library": "pytorch",
    "name": "_get_q_dq_nodes",
    "source_code": "def _get_q_dq_nodes(n: Node) -> tuple[Node, Node, Node]:\n    assert _is_dequantize(n)\n    q_node = n.args[0]\n    assert isinstance(q_node, Node)\n    assert _is_quantize(q_node)\n    orig_node = q_node.args[0]\n    assert isinstance(orig_node, Node)\n    return (orig_node, q_node, n)",
    "docstring": "Return a 3-tuple of (orig_node, q_node, dq_node).",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\qat_utils.py",
    "ast_data": "FunctionDef name:_get_q_dq_nodes arg:n arguments arg Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_check_spectrum_and_return_tensor",
    "source_code": "def _check_spectrum_and_return_tensor(self, spectrum):\n    spectrum = linear_operator_util.convert_nonref_to_tensor(spectrum, name='spectrum')\n    if spectrum.shape.ndims is not None:\n        if spectrum.shape.ndims < self.block_depth:\n            raise ValueError(f'Argument `spectrum` must have at least {self.block_depth} dimensions. Received: {spectrum}.')\n    return spectrum",
    "docstring": "Static check of spectrum. Then return version.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_circulant.py",
    "ast_data": "FunctionDef name:_check_spectrum_and_return_tensor arg:self arg:spectrum arguments arg arg Assign Call If Compare If Compare Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "register_intern_hook",
    "source_code": "def register_intern_hook(self, hook: ActionHook) -> RemovableHandle:\n    handle = RemovableHandle(self._intern_hooks)\n    self._intern_hooks[handle.id] = hook\n    return handle",
    "docstring": "Registers an intern hook on the exporter. The hook will be called each time a module matches against an :meth: pattern. It should have the following signature:: hook(exporter: PackageExporter, module_name: str) -> None Hooks will be called in order of registration. Returns: :class:: A handle that can be used to remove the added hook by calling ``.",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\package_exporter.py",
    "ast_data": "FunctionDef name:register_intern_hook arg:self arg:hook arguments arg arg Assign Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_frame",
    "source_code": "def get_frame(self):\n    return self.legendPatch",
    "docstring": "Return the used to frame the legend.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\legend.py",
    "ast_data": "FunctionDef name:get_frame arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "display_tpot",
    "source_code": "def display_tpot():\n    e2e_latency_mean = statistics.mean(latency_list)\n    ttft_mean = statistics.mean(ttft_ms_list)\n    generation_time_mean = e2e_latency_mean - ttft_mean\n    tpot = generation_time_mean / (OUTPUT_TOKEN_LEN - 1)\n    print(f'TPOT: {round(tpot, 2)} ms')",
    "docstring": "Calculate the time per output token.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\xla\\backends\\cpu\\benchmarks\\e2e\\gemma2\\flax_2b\\benchmark.py",
    "ast_data": "FunctionDef name:display_tpot arguments Assign Call Assign Call Assign Assign Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_base",
    "source_code": "def set_base(self, base):\n    self._base = float(base)",
    "docstring": "Change the *base* for labeling. .. warning:: Should always match the base used for :class:",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:set_base arg:self arg:base arguments arg arg Assign Call"
  },
  {
    "library": "pytorch",
    "name": "FileSystemWriter",
    "source_code": "class FileSystemWriter(_FileSystemWriter, BlockingAsyncStager):\n\n    def __init__(self, path: Union[str, os.PathLike], single_file_per_rank: bool=True, sync_files: bool=True, thread_count: int=1, per_thread_copy_ahead: int=10000000, cache_staged_state_dict: bool=False, overwrite: bool=True, _extensions: Optional[Sequence[StreamTransformExtension]]=None, serialization_format: SerializationFormat=SerializationFormat.TORCH_SAVE) -> None:\n        _FileSystemWriter.__init__(self, path=path, single_file_per_rank=single_file_per_rank, sync_files=sync_files, thread_count=thread_count, per_thread_copy_ahead=per_thread_copy_ahead, overwrite=overwrite, _extensions=_extensions, serialization_format=serialization_format)\n        BlockingAsyncStager.__init__(self, cache_staged_state_dict=cache_staged_state_dict)\n\n    def stage(self, state_dict: STATE_DICT_TYPE) -> STATE_DICT_TYPE:\n        self.per_thread_copy_ahead = 0\n        return super().stage(state_dict)",
    "docstring": "Basic implementation of StorageWriter using file IO. This implementation makes the following assumptions and simplifications: * The checkpoint path is an empty or non-existing directory. * File creation is atomic The checkpoint consist of one file per write request plus a file with the serialized metadata.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\filesystem.py",
    "ast_data": "ClassDef name:FileSystemWriter FunctionDef name:__init__ arg:self arg:path arg:single_file_per_rank arg:sync_files arg:thread_count arg:per_thread_copy_ahead arg:cache_staged_state_dict arg:overwrite arg:_extensions arg:serialization_format arguments arg arg arg arg arg arg arg arg arg arg Call Call FunctionDef name:stage arg:self arg:state_dict arguments arg arg Assign Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "check",
    "source_code": "def check(self, value):\n    raise NotImplementedError",
    "docstring": "Returns a byte tensor of `` indicating whether each event in value satisfies this constraint.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributions\\constraints.py",
    "ast_data": "FunctionDef name:check arg:self arg:value arguments arg arg Raise"
  },
  {
    "library": "numpy",
    "name": "get_msvcr_replacement",
    "source_code": "def get_msvcr_replacement():\n    msvcr = msvc_runtime_library()\n    return [] if msvcr is None else [msvcr]",
    "docstring": "Replacement for outdated version of get_msvcr from cygwinccompiler",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\mingw32ccompiler.py",
    "ast_data": "FunctionDef name:get_msvcr_replacement arguments Assign Call Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, augmented_graph_view: _AugmentedGraphView, options: save_options.SaveOptions):\n    self.augmented_graph_view = augmented_graph_view\n    self.options = options\n    self._trackable_objects, self.node_paths, self.node_ids, self._slot_variables, self.object_names = checkpoint_util.objects_ids_and_slot_variables_and_paths(self.augmented_graph_view)\n    untraced_functions = self.augmented_graph_view.untraced_functions\n    if untraced_functions:\n        logging.info('Found untraced functions such as %s while saving (showing %d of %d). These functions will not be directly callable after loading.', ', '.join(untraced_functions[:_NUM_DISPLAY_UNTRACED_FUNCTIONS]), min(_NUM_DISPLAY_UNTRACED_FUNCTIONS, len(untraced_functions)), len(untraced_functions))\n    self._initialize_save_and_restore_functions()\n    self._initialize_nodes_and_concrete_functions()\n    self.captured_tensor_node_ids = object_identity.ObjectIdentityDictionary()",
    "docstring": "Initializes a SaveableView. Args: augmented_graph_view: A GraphView object. options: A SaveOptions instance.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\save.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:augmented_graph_view arg:options arguments arg arg arg Assign Assign Assign Call Assign If Call Call Call Call Call Call Call Assign Call"
  },
  {
    "library": "authlib",
    "name": "create_client",
    "source_code": "def create_client(self, name):\n    if name in self._clients:\n        return self._clients[name]\n    if name not in self._registry:\n        return None\n    overwrite, config = self._registry[name]\n    client_cls = config.pop('client_cls', None)\n    if client_cls and client_cls.OAUTH_APP_CONFIG:\n        kwargs = client_cls.OAUTH_APP_CONFIG\n        kwargs.update(config)\n    else:\n        kwargs = config\n    kwargs = self.generate_client_kwargs(name, overwrite, **kwargs)\n    framework = self.framework_integration_cls(name, self.cache)\n    if client_cls:\n        client = client_cls(framework, name, **kwargs)\n    elif kwargs.get('request_token_url'):\n        client = self.oauth1_client_cls(framework, name, **kwargs)\n    else:\n        client = self.oauth2_client_cls(framework, name, **kwargs)\n    self._clients[name] = client\n    return client",
    "docstring": "Create or get the given named OAuth client. For instance, the OAuth registry has `` a twitter client, developers may access the client with:: client = oauth.create_client(\"twitter\") :param: name: Name of the remote application :return: OAuth remote app",
    "type": "method",
    "file_path": "authlib\\authlib\\integrations\\base_client\\registry.py",
    "ast_data": "FunctionDef name:create_client arg:self arg:name arguments arg arg If Compare Return return:yes If Compare Return return:no Assign Assign Call If BoolOp Assign Call Assign Assign Call Assign Call If Assign Call If Call Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_run_conversion",
    "source_code": "def _run_conversion(self):\n    grappler_session_config = config_pb2.ConfigProto()\n    custom_rewriter_config = _get_tensorrt_rewriter_config(conversion_params=self._conversion_params, is_dynamic_op=self._is_dynamic_op, max_batch_size=self._max_batch_size, disable_non_trt_optimizers=self._test_only_disable_non_trt_optimizers, use_implicit_batch=True)\n    grappler_session_config.graph_options.rewrite_options.CopyFrom(custom_rewriter_config)\n    self._converted_graph_def = tf_optimizer.OptimizeGraph(grappler_session_config, self._grappler_meta_graph_def, graph_id=b'tf_graph')\n    self._converted = True",
    "docstring": "Run Grappler's OptimizeGraph() tool to convert the graph.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\tensorrt\\trt_convert.py",
    "ast_data": "FunctionDef name:_run_conversion arg:self arguments arg Assign Call Assign Call Call Assign Call Assign"
  },
  {
    "library": "pytorch",
    "name": "tile",
    "source_code": "def tile(self, depth, factor):\n    assert self.loops\n    self.loops[depth] = self.loops[depth].tile(factor)\n    return self.loops[depth]",
    "docstring": "Do loop-tiling at the level with . for (x0 = 0; x0 for (x0 = 0; x0 < x0_end; x0 += factor) See details in Note [tiled_size].",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp.py",
    "ast_data": "FunctionDef name:tile arg:self arg:depth arg:factor arguments arg arg arg Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "categories",
    "source_code": "@property\ndef categories(self) -> Index:\n    return self._categories",
    "docstring": "An `` containing the unique categories allowed. See Also -------- ordered : Whether the categories have an ordered relationship. Examples -------- >>> cat_type = pd.CategoricalDtype(categories=[\"a\", \"b\"], ordered=True) >>> cat_type.categories Index(['a', 'b'], dtype='object')",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py",
    "ast_data": "FunctionDef name:categories arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "produce_guards",
    "source_code": "def produce_guards(self, *args: Any, **kwargs: Any) -> list[str]:\n    return self.produce_guards_verbose(*args, **kwargs, langs=('python',))[0].exprs",
    "docstring": "Like produce_guards_verbose, but only returns the non-verbose python guard expressions (no verbose guards produced.)",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:produce_guards arg:self arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "Mirrored",
    "source_code": "class Mirrored(DistributedDelegate, ds_types.Mirrored):\n\n    def _get_cross_replica(self):\n        return self._get_on_device_or_primary()\n\n    def _as_graph_element(self):\n        obj = self._get()\n        conv_fn = getattr(obj, '_as_graph_element', None)\n        if conv_fn and callable(conv_fn):\n            return conv_fn()\n        return obj\n\n    def _is_mirrored(self):\n        return True",
    "docstring": "Holds a map from replica to values which are kept in sync.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py",
    "ast_data": "ClassDef name:Mirrored FunctionDef name:_get_cross_replica arg:self arguments arg Return return:yes Call FunctionDef name:_as_graph_element arg:self arguments arg Assign Call Assign Call If BoolOp Call Return return:yes Call Return return:yes FunctionDef name:_is_mirrored arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "VertexBase",
    "source_code": "class VertexBase(ABC):\n\n    def __init__(self, x, nn=None, index=None):\n        self.x = x\n        self.hash = hash(self.x)\n        if nn is not None:\n            self.nn = set(nn)\n        else:\n            self.nn = set()\n        self.index = index\n\n    def __hash__(self):\n        return self.hash\n\n    def __getattr__(self, item):\n        if item not in ['x_a']:\n            raise AttributeError(f\"{type(self)} object has no attribute '{item}'\")\n        if item == 'x_a':\n            self.x_a = np.array(self.x)\n            return self.x_a\n\n    @abstractmethod\n    def connect(self, v):\n        raise NotImplementedError('This method is only implemented with an associated child of the base class.')\n\n    @abstractmethod\n    def disconnect(self, v):\n        raise NotImplementedError('This method is only implemented with an associated child of the base class.')\n\n    def star(self):\n        self.st = self.nn\n        self.st.add(self)\n        return self.st",
    "docstring": "Base class for a vertex.",
    "type": "class",
    "file_path": "scipy\\scipy\\optimize\\_shgo_lib\\_vertex.py",
    "ast_data": "ClassDef name:VertexBase FunctionDef name:__init__ arg:self arg:x arg:nn arg:index arguments arg arg arg arg Assign Assign Call If Compare Assign Call Assign Call Assign FunctionDef name:__hash__ arg:self arguments arg Return return:yes FunctionDef name:__getattr__ arg:self arg:item arguments arg arg If Compare Raise Call Call If Compare Assign Call Return return:yes FunctionDef name:connect arg:self arg:v arguments arg arg Raise Call FunctionDef name:disconnect arg:self arg:v arguments arg arg Raise Call FunctionDef name:star arg:self arguments arg Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "balanced_accuracy_score",
    "source_code": "@validate_params({'y_true': ['array-like'], 'y_pred': ['array-like'], 'sample_weight': ['array-like', None], 'adjusted': ['boolean']}, prefer_skip_nested_validation=True)\ndef balanced_accuracy_score(y_true, y_pred, *, sample_weight=None, adjusted=False):\n    C = confusion_matrix(y_true, y_pred, sample_weight=sample_weight)\n    with np.errstate(divide='ignore', invalid='ignore'):\n        per_class = np.diag(C) / C.sum(axis=1)\n    if np.any(np.isnan(per_class)):\n        warnings.warn('y_pred contains classes not in y_true')\n        per_class = per_class[~np.isnan(per_class)]\n    score = np.mean(per_class)\n    if adjusted:\n        n_classes = len(per_class)\n        chance = 1 / n_classes\n        score -= chance\n        score /= 1 - chance\n    return float(score)",
    "docstring": "Compute the balanced accuracy. The balanced accuracy in binary and multiclass classification problems to deal with imbalanced datasets. It is defined as the average of recall obtained on each class. The best value is 1 and the worst value is 0 when `User Guide accuracy_scoreUser Guide Fundamentals of Machine Learning for Predictive Data Analytics: Algorithms, Worked Examples, and Case Studies `_. Examples -------- >>> from sklearn.metrics import balanced_accuracy_score >>> y_true = [0, 1, 0, 0, 1, 0] >>> y_pred = [0, 1, 0, 0, 0, 1] >>> balanced_accuracy_score(y_true, y_pred) 0.625",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\metrics\\_classification.py",
    "ast_data": "FunctionDef name:balanced_accuracy_score arg:y_true arg:y_pred arguments arg arg arg arg Assign Call With Call Assign Call Call If Call Call Call Assign Call Assign Call If Assign Call Assign Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_replace_with_hop_pass_helper",
    "source_code": "def _replace_with_hop_pass_helper(gm: torch.fx.GraphModule, graph_signature: Optional[ExportGraphSignature], sequential_split_and_maybe_inline_subgraphs: Callable[[torch.fx.GraphModule, Optional[ExportGraphSignature]], tuple[torch.fx.GraphModule, Optional[ExportGraphSignature]]]) -> tuple[torch.fx.GraphModule, Optional[ExportGraphSignature]]:\n    new_gm, new_signature = sequential_split_and_maybe_inline_subgraphs(gm, graph_signature)\n    for node in new_gm.graph.nodes:\n        if node.op == 'get_attr':\n            subgm = getattr(new_gm, node.target)\n            if not isinstance(subgm, torch.fx.GraphModule):\n                continue\n            new_subgm, _ = _replace_with_hop_pass_helper(subgm, None, sequential_split_and_maybe_inline_subgraphs)\n            setattr(new_gm, node.target, new_subgm)\n    new_gm.recompile()\n    new_gm.graph.lint()\n    return (new_gm, new_signature)",
    "docstring": "Split gm into sub-graph-modules using , and then recursively call itself on each of the submodules.",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\passes\\replace_with_hop_pass_util.py",
    "ast_data": "FunctionDef name:_replace_with_hop_pass_helper arg:gm arg:graph_signature arg:sequential_split_and_maybe_inline_subgraphs arguments arg arg arg Assign Call For If Compare Assign Call If Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "add_http_if_no_scheme",
    "source_code": "def add_http_if_no_scheme(url: str) -> str:\n    match = re.match('^\\\\w+://', url, flags=re.IGNORECASE)\n    if not match:\n        parts = urlparse(url)\n        scheme = 'http:' if parts.netloc else 'http://'\n        url = scheme + url\n    return url",
    "docstring": "Add http as the default scheme if it is missing from the url.",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\url.py",
    "ast_data": "FunctionDef name:add_http_if_no_scheme arg:url arguments arg Assign Call If Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "alogout",
    "source_code": "async def alogout(request):\n    user = getattr(request, 'auser', None)\n    if user is not None:\n        user = await user()\n    if not getattr(user, 'is_authenticated', True):\n        user = None\n    await user_logged_out.asend(sender=user.__class__, request=request, user=user)\n    await request.session.aflush()\n    if hasattr(request, 'user'):\n        from django.contrib.auth.models import AnonymousUser\n        request.user = AnonymousUser()",
    "docstring": "See logout().",
    "type": "function",
    "file_path": "django\\django\\contrib\\auth\\__init__.py",
    "ast_data": "AsyncFunctionDef name:alogout arg:request arguments arg Assign Call If Compare Assign Call If Call Assign Call Call If Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "get_root_dir_with_all_resources",
    "source_code": "@tf_export(v1=['resource_loader.get_root_dir_with_all_resources'])\ndef get_root_dir_with_all_resources():\n    script_dir = get_data_files_path()\n    directories = [script_dir]\n    data_files_dir = ''\n    while True:\n        candidate_dir = directories[-1]\n        current_directory = _os.path.basename(candidate_dir)\n        if '.runfiles' in current_directory:\n            if len(directories) > 1:\n                data_files_dir = directories[-2]\n            break\n        else:\n            new_candidate_dir = _os.path.dirname(candidate_dir)\n            if new_candidate_dir == candidate_dir:\n                break\n            else:\n                directories.append(new_candidate_dir)\n    return data_files_dir or script_dir",
    "docstring": "Get a root directory containing all the data attributes in the build rule. Returns: The path to the specified file present in the data attribute of py_test or py_binary. Falls back to returning the same as get_data_files_path if it fails to detect a bazel runfiles directory.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\platform\\resource_loader.py",
    "ast_data": "FunctionDef name:get_root_dir_with_all_resources arguments Assign Call Assign Assign While Assign Assign Call If Compare If Compare Call Assign Assign Call If Compare Call Return return:yes BoolOp Call"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X, *, normalize=True):\n    check_is_fitted(self)\n    X = self._check_non_neg_array(X, reset_n_features=False, whom='LatentDirichletAllocation.transform')\n    doc_topic_distr = self._unnormalized_transform(X)\n    if normalize:\n        doc_topic_distr /= doc_topic_distr.sum(axis=1)[:, np.newaxis]\n    return doc_topic_distr",
    "docstring": "Transform data X according to the fitted model. .. versionchanged:: 0.18 is now normalized. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Document word matrix. normalize : bool, default=True Whether to normalize the document topic distribution. Returns ------- doc_topic_distr : ndarray of shape (n_samples, n_components) Document topic distribution for X.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_lda.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg arg Call Assign Call Assign Call If Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_logger",
    "source_code": "def get_logger(name: Optional[str]=None):\n    return _setup_logger(name or _derive_module_name(depth=2))",
    "docstring": "Util function to set up a simple logger that writes into stderr. The loglevel is fetched from the LOGLEVEL env. variable or WARNING as default. The function will use the module name of the caller if no name is provided. Args: name: Name of the logger. If no name provided, the name will be derived from the call stack.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\elastic\\utils\\logging.py",
    "ast_data": "FunctionDef name:get_logger arg:name arguments arg Return return:yes Call BoolOp Call"
  },
  {
    "library": "pytorch",
    "name": "PositiveDefiniteTransform",
    "source_code": "class PositiveDefiniteTransform(Transform):\n    domain = constraints.independent(constraints.real, 2)\n    codomain = constraints.positive_definite\n\n    def __eq__(self, other):\n        return isinstance(other, PositiveDefiniteTransform)\n\n    def _call(self, x):\n        x = LowerCholeskyTransform()(x)\n        return x @ x.mT\n\n    def _inverse(self, y):\n        y = torch.linalg.cholesky(y)\n        return LowerCholeskyTransform().inv(y)",
    "docstring": "Transform from unconstrained matrices to positive-definite matrices.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributions\\transforms.py",
    "ast_data": "ClassDef name:PositiveDefiniteTransform Assign Call Assign FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes Call FunctionDef name:_call arg:self arg:x arguments arg arg Assign Call Call Return return:yes FunctionDef name:_inverse arg:self arg:y arguments arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_height",
    "source_code": "def set_height(self, height):\n    self.height = height\n    self.stale = True",
    "docstring": "Set the height of the box. Parameters ---------- height : float",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py",
    "ast_data": "FunctionDef name:set_height arg:self arg:height arguments arg arg Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "register_log",
    "source_code": "def register_log(setting_name, log_name):\n    log_registry.register_log(setting_name, log_name)",
    "docstring": "Enables a log to be controlled by the env var and user API with the setting_name Args: setting_name: the shorthand name used in the env var and user API log_name: the log name that the setting_name is associated with",
    "type": "function",
    "file_path": "pytorch\\torch\\_logging\\_internal.py",
    "ast_data": "FunctionDef name:register_log arg:setting_name arg:log_name arguments arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "parse_args",
    "source_code": "def parse_args() -> argparse.Namespace:\n    parser = argparse.ArgumentParser(description=' System-level Usage Logger ')\n    parser.add_argument('--debug', action='store_true', help='Enable debug mode')\n    parser.add_argument('--log-interval', type=float, default=5, help='set time interval for logging utilization data, default is 5 seconds')\n    parser.add_argument('--data-collect-interval', type=float, default=1, help='set time interval to collect data, default is 1 second, this should not longer than log_interval')\n    args = parser.parse_args()\n    return args",
    "docstring": "Parse command line arguments. Returns: argparse.Namespace: Parsed arguments.",
    "type": "function",
    "file_path": "pytorch\\tools\\stats\\monitor.py",
    "ast_data": "FunctionDef name:parse_args arguments Assign Call Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "make_block",
    "source_code": "@final\ndef make_block(self, values, placement: BlockPlacement | None=None, refs: BlockValuesRefs | None=None) -> Block:\n    if placement is None:\n        placement = self._mgr_locs\n    if self.is_extension:\n        values = ensure_block_shape(values, ndim=self.ndim)\n    return new_block(values, placement=placement, ndim=self.ndim, refs=refs)",
    "docstring": "Create a new block, with type inference propagate any values that are not specified",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\blocks.py",
    "ast_data": "FunctionDef name:make_block arg:self arg:values arg:placement arg:refs arguments arg arg arg arg If Compare Assign If Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "__init__",
    "source_code": "def __init__(self):\n    self._conversors = []\n    self._current_line = 0",
    "docstring": "Constructor.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\externals\\_arff.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg Assign Assign"
  },
  {
    "library": "virtualenv",
    "name": "Pep514PythonInfo",
    "source_code": "class Pep514PythonInfo(PythonInfo):\n    pass",
    "docstring": "A Python information acquired from PEP-514.",
    "type": "class",
    "file_path": "virtualenv\\src\\virtualenv\\discovery\\windows\\__init__.py",
    "ast_data": "ClassDef name:Pep514PythonInfo"
  },
  {
    "library": "pytorch",
    "name": "_is_any_annotated",
    "source_code": "def _is_any_annotated(nodes: list[Node]):\n    return any((_is_node_annotated(node) for node in nodes))",
    "docstring": "Given a list of nodes (that represents an operator pattern), check if any of the node is annotated, return True if any of the node is annotated, otherwise return False.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\x86_inductor_quantizer.py",
    "ast_data": "FunctionDef name:_is_any_annotated arg:nodes arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "CsvDatasetV1",
    "source_code": "@tf_export(v1=['data.experimental.CsvDataset'])\nclass CsvDatasetV1(dataset_ops.DatasetV1Adapter):\n\n    @functools.wraps(CsvDatasetV2.__init__, ('__module__', '__name__'))\n    def __init__(self, filenames, record_defaults, compression_type=None, buffer_size=None, header=False, field_delim=',', use_quote_delim=True, na_value='', select_cols=None):\n        wrapped = CsvDatasetV2(filenames, record_defaults, compression_type, buffer_size, header, field_delim, use_quote_delim, na_value, select_cols)\n        super(CsvDatasetV1, self).__init__(wrapped)",
    "docstring": "A Dataset comprising lines from one or more CSV files.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\readers.py",
    "ast_data": "ClassDef name:CsvDatasetV1 FunctionDef name:__init__ arg:self arg:filenames arg:record_defaults arg:compression_type arg:buffer_size arg:header arg:field_delim arg:use_quote_delim arg:na_value arg:select_cols arguments arg arg arg arg arg arg arg arg arg arg Assign Call Call Call Call Call"
  },
  {
    "library": "authlib",
    "name": "validate_aud",
    "source_code": "def validate_aud(self):\n    aud_option = self.options.get('aud')\n    aud = self.get('aud')\n    if not aud_option or not aud:\n        return\n    aud_values = aud_option.get('values')\n    if not aud_values:\n        aud_value = aud_option.get('value')\n        if aud_value:\n            aud_values = [aud_value]\n    if not aud_values:\n        return\n    if isinstance(self['aud'], list):\n        aud_list = self['aud']\n    else:\n        aud_list = [self['aud']]\n    if not any([v in aud_list for v in aud_values]):\n        raise InvalidClaimError('aud')",
    "docstring": "The \"aud\" (audience) claim identifies the recipients that the JWT is intended for. Each principal intended to process the JWT MUST identify itself with a value in the audience claim. If the principal processing the claim does not identify itself with a value in the \"aud\" claim when this claim is present, then the JWT MUST be rejected. In the general case, the \"aud\" value is an array of case- sensitive strings, each containing a StringOrURI value. In the special case when the JWT has one audience, the \"aud\" value MAY be a single case-sensitive string containing a StringOrURI value. The interpretation of audience values is generally application specific. Use of this claim is OPTIONAL.",
    "type": "method",
    "file_path": "authlib\\authlib\\jose\\rfc7519\\claims.py",
    "ast_data": "FunctionDef name:validate_aud arg:self arguments arg Assign Call Assign Call If BoolOp Return return:no Assign Call If Assign Call If Assign If Return return:no If Call Assign Assign If Call Compare Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_ensure_cache_artifacts_registered",
    "source_code": "@staticmethod\ndef _ensure_cache_artifacts_registered() -> None:\n    from torch._dynamo.pgo import PGOCacheArtifact\n    from torch._functorch._aot_autograd.autograd_cache import AOTAutogradCacheArtifact\n    from torch._inductor.codecache import InductorCacheArtifact\n    from torch._inductor.runtime.autotune_cache import AutotuneCacheArtifact",
    "docstring": "When deserializing caches in fresh process, we need to ensure that all cache artifacts are registered in the cache registry. This is done by simply importing all the cache artifacts already wrapped with register call.",
    "type": "method",
    "file_path": "pytorch\\torch\\compiler\\_cache.py",
    "ast_data": "FunctionDef name:_ensure_cache_artifacts_registered arguments"
  },
  {
    "library": "pytorch",
    "name": "_format_exception",
    "source_code": "def _format_exception(e: Exception) -> str:\n    return '\\n'.join(traceback.format_exception(type(e), e, e.__traceback__))",
    "docstring": "Format the full traceback as Python would show it.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_core.py",
    "ast_data": "FunctionDef name:_format_exception arg:e arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "pandas",
    "name": "get_column_by_name",
    "source_code": "@abstractmethod\ndef get_column_by_name(self, name: str) -> Column:\n    pass",
    "docstring": "Return the column whose name is the indicated name.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\interchange\\dataframe_protocol.py",
    "ast_data": "FunctionDef name:get_column_by_name arg:self arg:name arguments arg arg"
  },
  {
    "library": "tensorflow",
    "name": "_global_batch_size",
    "source_code": "@property\ndef _global_batch_size(self):\n    return True",
    "docstring": "Global and per-replica batching are equivalent for OneDeviceStrategy.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\one_device_strategy.py",
    "ast_data": "FunctionDef name:_global_batch_size arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "WorldMetaClassVariable",
    "source_code": "class WorldMetaClassVariable(DistributedVariable):\n\n    @classmethod\n    def is_group_member_type(cls, value):\n        if not cls.is_available():\n            return False\n        from torch.distributed.distributed_c10d import _WorldMeta\n        return type(value) is _WorldMeta\n\n    def var_getattr(self, tx: 'InstructionTranslator', name: str) -> VariableTracker:\n        if name == 'WORLD':\n            source = AttrSource(base=self.source, member='WORLD')\n            install_guard(source.make_guard(GuardBuilder.ID_MATCH))\n            return ProcessGroupVariable(self.value.WORLD)\n        elif name == 'NON_GROUP_MEMBER':\n            source = AttrSource(base=self.source, member='NON_GROUP_MEMBER')\n            install_guard(source.make_guard(GuardBuilder.ID_MATCH))\n            return EnumVariable(self.value.NON_GROUP_MEMBER)\n        return super().var_getattr(tx, name)",
    "docstring": "Tracks torch.distributed.GroupMember and torch.distributed.group, which are instances of the metaclass _WorldMeta.",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\distributed.py",
    "ast_data": "ClassDef name:WorldMetaClassVariable FunctionDef name:is_group_member_type arg:cls arg:value arguments arg arg If Call Return return:yes Return return:yes Compare Call FunctionDef name:var_getattr arg:self arg:tx arg:name arguments arg arg arg If Compare Assign Call Call Call Return return:yes Call If Compare Assign Call Call Call Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "create_symboolnode",
    "source_code": "def create_symboolnode(self, sym: sympy.Expr) -> SymBool:\n    return SymBool(SymNode(sym, self, bool, None))",
    "docstring": "Create a SymBool object from a sympy boolean expression",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:create_symboolnode arg:self arg:sym arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "capfirst",
    "source_code": "@register.filter(is_safe=True)\n@stringfilter\ndef capfirst(value):\n    return value and value[0].upper() + value[1:]",
    "docstring": "Capitalize the first character of the value.",
    "type": "function",
    "file_path": "django\\django\\template\\defaultfilters.py",
    "ast_data": "FunctionDef name:capfirst arg:value arguments arg Return return:yes BoolOp Call Call"
  },
  {
    "library": "pytorch",
    "name": "current_stream",
    "source_code": "def current_stream(device: _device_t=None) -> Stream:\n    return _current_stream",
    "docstring": "Returns the currently selected :class: for a given device. Args: device (torch.device or int, optional): Ignored. N.B. This function only exists to facilitate device-agnostic code",
    "type": "function",
    "file_path": "pytorch\\torch\\cpu\\__init__.py",
    "ast_data": "FunctionDef name:current_stream arg:device arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_cast",
    "source_code": "def _cast(param, value, param_id=None, param_groups=None, key=None):\n    if isinstance(value, torch.Tensor):\n        return Optimizer._process_value_according_to_param_policy(param, value, param_id, param_groups, key)\n    elif isinstance(value, dict):\n        return {k: _cast(param, v, param_id=param_id, param_groups=param_groups, key=k) for k, v in value.items()}\n    elif isinstance(value, Iterable):\n        return type(value)((_cast(param, v, param_id=param_id, param_groups=param_groups) for v in value))\n    else:\n        return value",
    "docstring": "Make a deep copy of value, casting all tensors to device of param.",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\optimizer.py",
    "ast_data": "FunctionDef name:_cast arg:param arg:value arg:param_id arg:param_groups arg:key arguments arg arg arg arg arg If Call Return return:yes Call If Call Return return:yes Call Call If Call Return return:yes Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_xaxis",
    "source_code": "def get_xaxis(self):\n    return self.xaxis",
    "docstring": "[*Discouraged*] Return the XAxis instance. .. admonition:: Discouraged The use of this function is discouraged. You should instead directly access the attribute .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:get_xaxis arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_getcol",
    "source_code": "def _getcol(self, j):\n    if self.ndim == 1:\n        raise ValueError('getcol not provided for 1d arrays. Use indexing A[j]')\n    N = self.shape[-1]\n    if j < 0:\n        j += N\n    if j < 0 or j >= N:\n        raise IndexError('index out of bounds')\n    col_selector = self._csc_container(([1], [[j], [0]]), shape=(N, 1), dtype=self.dtype)\n    result = self @ col_selector\n    return result",
    "docstring": "Returns a copy of column j of the array, as an (m x 1) sparse array (column vector).",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_base.py",
    "ast_data": "FunctionDef name:_getcol arg:self arg:j arguments arg arg If Compare Raise Call Assign If Compare If BoolOp Compare Compare Raise Call Assign Call Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "ifft2",
    "source_code": "@array_function_dispatch(_fftn_dispatcher)\ndef ifft2(a, s=None, axes=(-2, -1), norm=None, out=None):\n    return _raw_fftnd(a, s, axes, ifft, norm, out=None)",
    "docstring": "Compute the 2-dimensional inverse discrete Fourier Transform. This function computes the inverse of the 2-dimensional discrete Fourier Transform over any number of axes in an M-dimensional array by means of the Fast Fourier Transform (FFT). In other words, `ifftfft2nsaxesifftsaxessaxessaxesnumpy.fftaxesaxessaxesaxesaxesaifft2ifft2ifftnaxesifftnnumpy.fftifftifft2` is called. Examples -------- >>> import numpy as np >>> a = 4 * np.eye(4) >>> np.fft.ifft2(a) array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary [0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j], [0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j], [0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])",
    "type": "function",
    "file_path": "numpy\\numpy\\fft\\_pocketfft.py",
    "ast_data": "FunctionDef name:ifft2 arg:a arg:s arg:axes arg:norm arg:out arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict_log_proba",
    "source_code": "def predict_log_proba(self, X):\n    scores = self._decision_function(X)\n    log_likelihood = scores - scores.max(axis=1)[:, np.newaxis]\n    return log_likelihood - np.log(np.exp(log_likelihood).sum(axis=1)[:, np.newaxis])",
    "docstring": "Estimate log class probabilities. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Input data. Returns ------- y_log_proba : ndarray of shape (n_samples, n_classes) Estimated log probabilities.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\discriminant_analysis.py",
    "ast_data": "FunctionDef name:predict_log_proba arg:self arg:X arguments arg arg Assign Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "numpy",
    "name": "Kron",
    "source_code": "class Kron(Benchmark):\n\n    def setup(self):\n        self.large_arr = np.random.random((10,) * 4)\n        self.large_mat = np.asmatrix(np.random.random((100, 100)))\n        self.scalar = 7\n\n    def time_arr_kron(self):\n        np.kron(self.large_arr, self.large_arr)\n\n    def time_scalar_kron(self):\n        np.kron(self.large_arr, self.scalar)\n\n    def time_mat_kron(self):\n        np.kron(self.large_mat, self.large_mat)",
    "docstring": "Benchmarks for Kronecker product of two arrays",
    "type": "class",
    "file_path": "numpy\\benchmarks\\benchmarks\\bench_shape_base.py",
    "ast_data": "ClassDef name:Kron FunctionDef name:setup arg:self arguments arg Assign Call Assign Call Call Assign FunctionDef name:time_arr_kron arg:self arguments arg Call FunctionDef name:time_scalar_kron arg:self arguments arg Call FunctionDef name:time_mat_kron arg:self arguments arg Call"
  },
  {
    "library": "scipy",
    "name": "shape_from_header",
    "source_code": "def shape_from_header(self, hdr):\n    mclass = hdr.mclass\n    if mclass == mxFULL_CLASS:\n        shape = tuple(map(int, hdr.dims))\n    elif mclass == mxCHAR_CLASS:\n        shape = tuple(map(int, hdr.dims))\n        if self.chars_as_strings:\n            shape = shape[:-1]\n    elif mclass == mxSPARSE_CLASS:\n        dt = hdr.dtype\n        dims = hdr.dims\n        if not (len(dims) == 2 and dims[0] >= 1 and (dims[1] >= 1)):\n            return ()\n        self.mat_stream.seek(dt.itemsize * (dims[0] - 1), 1)\n        rows = np.ndarray(shape=(), dtype=dt, buffer=self.mat_stream.read(dt.itemsize))\n        self.mat_stream.seek(dt.itemsize * (dims[0] - 1), 1)\n        cols = np.ndarray(shape=(), dtype=dt, buffer=self.mat_stream.read(dt.itemsize))\n        shape = (int(rows), int(cols))\n    else:\n        raise TypeError(f'No reader for class code {mclass}')\n    if self.squeeze_me:\n        shape = tuple([x for x in shape if x != 1])\n    return shape",
    "docstring": "Read the shape of the array described by the header. The file position after this call is unspecified.",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\matlab\\_mio4.py",
    "ast_data": "FunctionDef name:shape_from_header arg:self arg:hdr arguments arg arg Assign If Compare Assign Call Call If Compare Assign Call Call If Assign If Compare Assign Assign If BoolOp Compare Call Compare Compare Return return:no Call Assign Call Call Call Assign Call Call Assign Call Call Raise Call If Assign Call Compare Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_angle",
    "source_code": "def get_angle(self):\n    return self._angle",
    "docstring": "Return the angle of the annulus.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_angle arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "__str__",
    "source_code": "def __str__(self):\n    return '%s (%s)' % (self.name, self.driver)",
    "docstring": "Return OGR GetName and Driver for the Data Source.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\datasource.py",
    "ast_data": "FunctionDef name:__str__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_suptitle",
    "source_code": "def get_suptitle(self):\n    text_obj = self._suptitle\n    return '' if text_obj is None else text_obj.get_text()",
    "docstring": "Return the suptitle as string or an empty string if not set.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:get_suptitle arg:self arguments arg Assign Return return:yes Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "reduce_any_v1",
    "source_code": "@tf_export(v1=['math.reduce_any', 'reduce_any'])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_args(None, 'keep_dims is deprecated, use keepdims instead', 'keep_dims')\ndef reduce_any_v1(input_tensor, axis=None, keepdims=None, name=None, reduction_indices=None, keep_dims=None):\n    axis = deprecation.deprecated_argument_lookup('axis', axis, 'reduction_indices', reduction_indices)\n    keepdims = deprecation.deprecated_argument_lookup('keepdims', keepdims, 'keep_dims', keep_dims)\n    return reduce_any(input_tensor, axis, keepdims, name)",
    "docstring": "Computes of elements across dimensions of a tensor. This is the reduction operation for the elementwise op. Reduces along the dimensions given in . Unless is true, the rank of the tensor is reduced by 1 for each of the entries in , which must be unique. If is true, the reduced dimensions are retained with length 1. If is None, all dimensions are reduced, and a tensor with a single element is returned. For example: >>> x = tf.constant([[True, True], [False, False]]) >>> tf.reduce_any(x) >>> tf.reduce_any(x, 0) >>> tf.reduce_any(x, 1) Args: input_tensor: The boolean tensor to reduce. axis: The dimensions to reduce. If (the default), reduces all dimensions. Must be in the range . keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). reduction_indices: The old (deprecated) name for axis. keep_dims: Deprecated alias for . Returns: The reduced tensor. @compatibility(numpy) Equivalent to np.any @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:reduce_any_v1 arg:input_tensor arg:axis arg:keepdims arg:name arg:reduction_indices arg:keep_dims arguments arg arg arg arg arg arg Assign Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "json_load",
    "source_code": "def json_load(filename):\n    with open(filename) as fh:\n        return json.load(fh, object_hook=_json_decode)",
    "docstring": "Load a from the JSON file named *filename*. See Also -------- json_dump",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\font_manager.py",
    "ast_data": "FunctionDef name:json_load arg:filename arguments arg With Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "LazyInstanceNorm2d",
    "source_code": "class LazyInstanceNorm2d(_LazyNormBase, _InstanceNorm):\n    cls_to_become = InstanceNorm2d\n\n    def _get_no_batch_dim(self):\n        return 3\n\n    def _check_input_dim(self, input):\n        if input.dim() not in (3, 4):\n            raise ValueError(f'expected 3D or 4D input (got {input.dim()}D input)')",
    "docstring": "A :class: module with lazy initialization of the `InstanceNorm2dweightbiasrunning_meanrunning_vartorch.nn.modules.lazy.LazyModuleMixinC(N, C, H, W)(C, H, W)(N, C, H, W)(C, H, W)(N, C, H, W)(C, H, W)` (same shape as input)",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\instancenorm.py",
    "ast_data": "ClassDef name:LazyInstanceNorm2d Assign FunctionDef name:_get_no_batch_dim arg:self arguments arg Return return:yes FunctionDef name:_check_input_dim arg:self arg:input arguments arg arg If Compare Call Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "add_to_optionally_restored",
    "source_code": "def add_to_optionally_restored(self, var):\n    self._optionally_restored.append(var)",
    "docstring": "Add a variable to the list of optionally restored variables. There are situations where certain variables should be ignored in assertions such as assert_existing_objects_matched(). One example is that of a checkpoint saved with train.Saver(), and restored with train.Checkpoint(): it is possible for the train.Saver() checkpoint to be missing the internal variable, which we want to ignore on restore. Args: var: The variable to treat as optionally restored.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:add_to_optionally_restored arg:self arg:var arguments arg arg Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_metadata_routing",
    "source_code": "def get_metadata_routing(self):\n    router = MetadataRouter(owner=self.__class__.__name__).add(splitter=self.cv, method_mapping=MethodMapping().add(caller='fit', callee='split'))\n    return router",
    "docstring": "Get metadata routing of this object. Please check :ref: on how the routing mechanism works. .. versionadded:: 1.4 Returns ------- routing : MetadataRouter A :class: encapsulating routing information.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_omp.py",
    "ast_data": "FunctionDef name:get_metadata_routing arg:self arguments arg Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "add_initial_prefix",
    "source_code": "def add_initial_prefix(self, field_name):\n    return 'initial-%s' % self.add_prefix(field_name)",
    "docstring": "Add an 'initial' prefix for checking dynamic initial values.",
    "type": "method",
    "file_path": "django\\django\\forms\\forms.py",
    "ast_data": "FunctionDef name:add_initial_prefix arg:self arg:field_name arguments arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_store",
    "source_code": "def _store(self, messages, response, *args, **kwargs):\n    for storage in self.storages:\n        if messages:\n            messages = storage._store(messages, response, remove_oldest=False)\n        elif storage in self._used_storages:\n            storage._store([], response)\n            self._used_storages.remove(storage)\n    return messages",
    "docstring": "Store the messages and return any unstored messages after trying all backends. For each storage backend, any messages not stored are passed on to the next backend.",
    "type": "method",
    "file_path": "django\\django\\contrib\\messages\\storage\\fallback.py",
    "ast_data": "FunctionDef name:_store arg:self arg:messages arg:response arguments arg arg arg arg arg For If Assign Call If Compare Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_encode_with_my_categories",
    "source_code": "def _encode_with_my_categories(self, other: Categorical) -> Categorical:\n    codes = recode_for_categories(other.codes, other.categories, self.categories, copy=False)\n    return self._from_backing_data(codes)",
    "docstring": "Re-encode another categorical using this Categorical's categories. Notes ----- This assumes we have already checked self._categories_match_up_to_permutation(other).",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\categorical.py",
    "ast_data": "FunctionDef name:_encode_with_my_categories arg:self arg:other arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "display",
    "source_code": "def display(self):\n    return self.id",
    "docstring": "Return what to display in error messages for this node",
    "type": "method",
    "file_path": "django\\django\\template\\smartif.py",
    "ast_data": "FunctionDef name:display arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "finite_precision",
    "source_code": "def finite_precision(self):\n    self.find_lowest_vertex()\n    if self.disp:\n        logging.info(f'Lowest function evaluation = {self.f_lowest}')\n        logging.info(f'Specified minimum = {self.f_min_true}')\n    if self.f_lowest is None:\n        return self.stop_global\n    if self.f_min_true == 0.0:\n        if self.f_lowest <= self.f_tol:\n            self.stop_global = True\n    else:\n        pe = (self.f_lowest - self.f_min_true) / abs(self.f_min_true)\n        if self.f_lowest <= self.f_min_true:\n            self.stop_global = True\n            if abs(pe) >= 2 * self.f_tol:\n                warnings.warn(f'A much lower value than expected f* = {self.f_min_true} was found f_lowest = {self.f_lowest}', stacklevel=3)\n        if pe <= self.f_tol:\n            self.stop_global = True\n    return self.stop_global",
    "docstring": "Stop the algorithm if the final function value is known Specify in options (with ``",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_shgo.py",
    "ast_data": "FunctionDef name:finite_precision arg:self arguments arg Call If Call Call If Compare Return return:yes If Compare If Compare Assign Assign Call If Compare Assign If Compare Call Call If Compare Assign Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "VirtualHost",
    "source_code": "class VirtualHost(object):\n    default = None\n    'The default WSGI application.\\n\\n    Required.\\n    '\n    use_x_forwarded_host = True\n    'If True (the default), any \"X-Forwarded-Host\"\\n    request header will be used instead of the \"Host\" header. This\\n    is commonly added by HTTP servers (such as Apache) when proxying.'\n    domains = {}\n    'A dict of {host header value: application} pairs.\\n    The incoming \"Host\" request header is looked up in this dict, and,\\n    if a match is found, the corresponding WSGI application will be\\n    called instead of the default. Note that you often need separate\\n    entries for \"example.com\" and \"www.example.com\". In addition, \"Host\"\\n    headers may contain the port number.\\n    '\n\n    def __init__(self, default, domains=None, use_x_forwarded_host=True):\n        self.default = default\n        self.domains = domains or {}\n        self.use_x_forwarded_host = use_x_forwarded_host\n\n    def __call__(self, environ, start_response):\n        domain = environ.get('HTTP_HOST', '')\n        if self.use_x_forwarded_host:\n            domain = environ.get('HTTP_X_FORWARDED_HOST', domain)\n        nextapp = self.domains.get(domain)\n        if nextapp is None:\n            nextapp = self.default\n        return nextapp(environ, start_response)",
    "docstring": "Select a different WSGI application based on the Host header. This can be useful when running multiple sites within one CP server. It allows several domains to point to different applications. For example:: root = Root() RootApp = cherrypy.Application(root) Domain2App = cherrypy.Application(root) SecureApp = cherrypy.Application(Secure()) vhost = cherrypy._cpwsgi.VirtualHost( RootApp, domains={ 'www.domain2.example': Domain2App, 'www.domain2.example:443': SecureApp, }, ) cherrypy.tree.graft(vhost)",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\_cpwsgi.py",
    "ast_data": "ClassDef name:VirtualHost Assign Assign Assign FunctionDef name:__init__ arg:self arg:default arg:domains arg:use_x_forwarded_host arguments arg arg arg arg Assign Assign BoolOp Assign FunctionDef name:__call__ arg:self arg:environ arg:start_response arguments arg arg arg Assign Call If Assign Call Assign Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "confidence_threshold",
    "source_code": "def confidence_threshold(self, layer_index: int) -> float:\n    threshold = 0.8 + 0.1 * math.exp(-4.0 * layer_index / self.conf.n_layers)\n    return min(max(threshold, 0), 1)",
    "docstring": "Scaled confidence threshold.",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\lightglue.py",
    "ast_data": "FunctionDef name:confidence_threshold arg:self arg:layer_index arguments arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_max_tuning_duration",
    "source_code": "def get_max_tuning_duration() -> int:\n    return torch._C._cuda_tunableop_get_max_tuning_duration()",
    "docstring": "Get max time to spend tuning a given solution.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\tunable.py",
    "ast_data": "FunctionDef name:get_max_tuning_duration arguments Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_finalize_mm_configs",
    "source_code": "def _finalize_mm_configs(self, configs: list[BaseConfig]) -> Generator[TritonConfig, None, None]:\n    used: OrderedSet[tuple[int, ...]] = OrderedSet()\n    max_mm_configs = config.test_configs.max_mm_configs\n    for conf in configs:\n        num_warps = min(conf.num_warps, conf.block_m * conf.block_n // 256)\n        key: tuple[int, ...] = (conf.block_m, conf.block_n, conf.block_k, conf.num_stages, num_warps)\n        group_m = getattr(conf, 'group_m', None)\n        if group_m is not None:\n            key += (group_m,)\n        if key not in used and (max_mm_configs is None or len(used) < max_mm_configs):\n            used.add(key)\n            kwargs = {'BLOCK_M': conf.block_m, 'BLOCK_N': conf.block_n, 'BLOCK_K': conf.block_k, 'num_stages': conf.num_stages, 'num_warps': num_warps}\n            if group_m is not None:\n                kwargs['GROUP_M'] = group_m\n            yield self.triton_config(**kwargs)",
    "docstring": "Finalizes configs after scaling, applying additional constraints.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\template_heuristics.py",
    "ast_data": "FunctionDef name:_finalize_mm_configs arg:self arg:configs arguments arg arg Call Assign For Assign Call Assign Call If Compare If BoolOp Compare BoolOp Compare Compare Call Call Assign If Compare Assign Call"
  },
  {
    "library": "pandas",
    "name": "_scalar_type",
    "source_code": "@property\ndef _scalar_type(self) -> type[DatetimeLikeScalar]:\n    raise AbstractMethodError(self)",
    "docstring": "The scalar associated with this datelike * PeriodArray : Period * DatetimeArray : Timestamp * TimedeltaArray : Timedelta",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py",
    "ast_data": "FunctionDef name:_scalar_type arg:self arguments arg Raise Call"
  },
  {
    "library": "scipy",
    "name": "sosfreqz",
    "source_code": "def sosfreqz(*args, **kwargs):\n    return freqz_sos(*args, **kwargs)",
    "docstring": "Compute the frequency response of a digital filter in SOS format (legacy). .. legacy:: function This function is an alias, provided for backward compatibility. New code should use the function :func:. This function became obsolete from version 1.15.0.",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_filter_design.py",
    "ast_data": "FunctionDef name:sosfreqz arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_sig_decl_wrapper",
    "source_code": "def _sig_decl_wrapper(sig: CppSignature | ExecutorchCppSignature) -> str:\n    if isinstance(sig, ExecutorchCppSignature):\n        return sig.decl()\n    returns_type = aten_cpp.returns_type(sig.func.returns).cpp_type()\n    cpp_args = [a.decl() for a in sig.arguments()]\n    cpp_args_str = ', '.join([contextArg.decl()] + cpp_args)\n    sig_decl = f'{returns_type} {sig.name()}({cpp_args_str})'\n    return sig_decl",
    "docstring": "A wrapper function to basically get . For ATen kernel, the codegen has no idea about ET contextArg, so we use this wrapper to add it.",
    "type": "function",
    "file_path": "pytorch\\torchgen\\gen_executorch.py",
    "ast_data": "FunctionDef name:_sig_decl_wrapper arg:sig arguments arg If Call Return return:yes Call Assign Call Call Assign Call Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, bytes_per_pack=0, timeout_seconds=None):\n    pass",
    "docstring": "Creates a CollectiveHints. Args: bytes_per_pack: a non-negative integer. Breaks collective operations into packs of certain size. If it's zero, the value is determined automatically. This only applies to all-reduce with currently. timeout_seconds: a float or None, timeout in seconds. If not None, the collective raises if it takes longer than this timeout. This can be useful when debugging hanging issues. This should only be used for debugging since it creates a new thread for each collective, i.e. an overhead of more threads. This only works for . Raises: ValueError: When arguments have invalid value.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\collective_util.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:bytes_per_pack arg:timeout_seconds arguments arg arg arg"
  },
  {
    "library": "matplotlib",
    "name": "get_offset",
    "source_code": "def get_offset(self):\n    if len(self.locs) == 0:\n        return ''\n    if self.orderOfMagnitude or self.offset:\n        offsetStr = ''\n        sciNotStr = ''\n        if self.offset:\n            offsetStr = self.format_data(self.offset)\n            if self.offset > 0:\n                offsetStr = '+' + offsetStr\n        if self.orderOfMagnitude:\n            if self._usetex or self._useMathText:\n                sciNotStr = self.format_data(10 ** self.orderOfMagnitude)\n            else:\n                sciNotStr = '1e%d' % self.orderOfMagnitude\n        if self._useMathText or self._usetex:\n            if sciNotStr != '':\n                sciNotStr = '\\\\times\\\\mathdefault{%s}' % sciNotStr\n            s = f'${sciNotStr}\\\\mathdefault{{{offsetStr}}}$'\n        else:\n            s = ''.join((sciNotStr, offsetStr))\n        return self.fix_minus(s)\n    return ''",
    "docstring": "Return scientific notation, plus offset.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:get_offset arg:self arguments arg If Compare Call Return return:yes If BoolOp Assign Assign If Assign Call If Compare Assign If If BoolOp Assign Call Assign If BoolOp If Compare Assign Assign Assign Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "graph_partition",
    "source_code": "def graph_partition(self) -> tuple[list[PartitionType], list[GraphPartitionSignature]]:\n    partitions: list[PartitionType] = []\n    skip_cudagraph = True\n    cur_partition: PartitionType = []\n    skip_cudagraphs = []\n    for node in self.nodes:\n        should_partition = self.should_partition(node)\n        if cur_partition and skip_cudagraph != should_partition:\n            partitions.append(cur_partition)\n            skip_cudagraphs.append(skip_cudagraph)\n            cur_partition = []\n        skip_cudagraph = should_partition\n        cur_partition.append(node)\n    if cur_partition:\n        partitions.append(cur_partition)\n        skip_cudagraphs.append(skip_cudagraph)\n    signatures = self.get_graph_partition_signature(partitions=partitions, skip_cudagraphs=skip_cudagraphs)\n    self.compute_graph_partition_maps(signatures)\n    return (partitions, signatures)",
    "docstring": "Given a list of BaseSchedulerNodes, split into a list of graph partitions and compute partition input/output signatures.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:graph_partition arg:self arguments arg Assign Assign For Assign Call If BoolOp Compare Call Call Assign Assign Call If Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "weight_norm",
    "source_code": "def weight_norm(module: T_module, name: str='weight', dim: int=0) -> T_module:\n    WeightNorm.apply(module, name, dim)\n    return module",
    "docstring": "Apply weight normalization to a parameter in the given module. .. math:: \\mathbf{w} = g \\dfrac{\\mathbf{v}}{\\|\\mathbf{v}\\|} Weight normalization is a reparameterization that decouples the magnitude of a weight tensor from its direction. This replaces the parameter specified by :attr: (e.g. `~Module.forwardtorch.nn.utils.parametrizations.weight_normtorch.nn.utils.parametrize.remove_parametrizationstorch.nn.utils.parametrize.cached` before invoking the module in question. Args: module (Module): containing module name (str, optional): name of weight parameter dim (int, optional): dimension over which to compute the norm Returns: The original module with the weight norm hook Example:: >>> m = weight_norm(nn.Linear(20, 40), name='weight') >>> m Linear(in_features=20, out_features=40, bias=True) >>> m.weight_g.size() torch.Size([40, 1]) >>> m.weight_v.size() torch.Size([40, 20])",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\utils\\weight_norm.py",
    "ast_data": "FunctionDef name:weight_norm arg:module arg:name arg:dim arguments arg arg arg Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_join_modules",
    "source_code": "def _join_modules(module1, module2):\n    if not module1:\n        return module2\n    if not module2:\n        return module1\n    return '%s.%s' % (module1, module2)",
    "docstring": "Concatenate 2 module components. Args: module1: First module to join. module2: Second module to join. Returns: Given two modules aaa.bbb and ccc.ddd, returns a joined module aaa.bbb.ccc.ddd.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\api\\generator\\create_python_api.py",
    "ast_data": "FunctionDef name:_join_modules arg:module1 arg:module2 arguments arg arg If Return return:yes If Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_RemoveExternalControlEdges",
    "source_code": "def _RemoveExternalControlEdges(self, op: ops.Operation):\n    internal_control_inputs = []\n    external_control_inputs = []\n    for x in op.control_inputs:\n        is_internal_op = False\n        ctxt = x._get_control_flow_context()\n        while ctxt is not None:\n            if ctxt == self:\n                is_internal_op = True\n                break\n            ctxt = ctxt._outer_context\n        if is_internal_op:\n            internal_control_inputs.append(x)\n        else:\n            external_control_inputs.append(x)\n    op._remove_all_control_inputs()\n    op._add_control_inputs(internal_control_inputs)\n    return (internal_control_inputs, external_control_inputs)",
    "docstring": "Remove any external control dependency on this op.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\xla\\xla.py",
    "ast_data": "FunctionDef name:_RemoveExternalControlEdges arg:self arg:op arguments arg arg Assign Assign For Assign Assign Call While Compare If Compare Assign Assign If Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "dispatch_strategy",
    "source_code": "def dispatch_strategy(fn: NativeFunctionWithDifferentiabilityInfo) -> str:\n    if fn.func.is_abstract or (fn.info is not None and any((info.has_derivatives for info in fn.info.values()))):\n        return 'use_derived'\n    else:\n        return 'use_type'",
    "docstring": "How are we going to call the underlying implementation of a declaration? There are two strategies: - use_derived: we want to call the implementation on CPUDoubleType (or a similar, derived Type instance). Because these derived instances deal in Tensors, not Variables (it's a completely different object, so it doesn't dispatch back to VariableType), code on this dispatch path needs to wrap/unwrap tensors. If the derived implementation takes and returns tensors, the implementation is usually differentiable (although we also use the derived dispatch path for non-differentiable functions that we still want to dispatch on the derived Type instance; e.g., size()) - use_type: we want to call the implementation on Type, because it is implemented concretely, and the functions it invokes will get dispatched back to VariableType (which will ensure that they are differentiable.)",
    "type": "function",
    "file_path": "pytorch\\torchgen\\api\\autograd.py",
    "ast_data": "FunctionDef name:dispatch_strategy arg:fn arguments arg If BoolOp BoolOp Compare Call Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_init_from_proto",
    "source_code": "def _init_from_proto(self, variable_def, import_scope=None):\n    assert isinstance(variable_def, variable_pb2.VariableDef)\n    g = ops.get_default_graph()\n    self._variable = g.as_graph_element(ops.prepend_name_scope(variable_def.variable_name, import_scope=import_scope))\n    self._name = self._variable.name\n    self._initializer_op = g.as_graph_element(ops.prepend_name_scope(variable_def.initializer_name, import_scope=import_scope))\n    if hasattr(variable_def, 'initial_value_name') and variable_def.initial_value_name:\n        self._initial_value = g.as_graph_element(ops.prepend_name_scope(variable_def.initial_value_name, import_scope=import_scope))\n    else:\n        self._initial_value = None\n    synchronization, aggregation, trainable = variables.validate_synchronization_aggregation_trainable(variable_def.synchronization, variable_def.aggregation, variable_def.trainable, variable_def.variable_name)\n    self._synchronization = synchronization\n    self._aggregation = aggregation\n    self._trainable = trainable\n    self._snapshot = g.as_graph_element(ops.prepend_name_scope(variable_def.snapshot_name, import_scope=import_scope))\n    if variable_def.HasField('save_slice_info_def'):\n        self._save_slice_info = variables.Variable.SaveSliceInfo(save_slice_info_def=variable_def.save_slice_info_def, import_scope=import_scope)\n    else:\n        self._save_slice_info = None\n    self._caching_device = None\n    self._constraint = None",
    "docstring": "Recreates the Variable object from a protocol buffer. Args: variable_def: protocol buffer, describing a variable whose nodes already exists in the graph. import_scope: Optional . Name scope to add.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py",
    "ast_data": "FunctionDef name:_init_from_proto arg:self arg:variable_def arg:import_scope arguments arg arg arg Call Assign Call Assign Call Call Assign Assign Call Call If BoolOp Call Assign Call Call Assign Assign Call Assign Assign Assign Assign Call Call If Call Assign Call Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "train_on_batch",
    "source_code": "def train_on_batch(model, inputs, targets, sample_weights=None, output_loss_metrics=None):\n    inputs = training_utils_v1.cast_to_model_input_dtypes(inputs, model)\n    outs, total_loss, output_losses, masks = _process_single_batch(model, inputs, targets, sample_weights=sample_weights, training=True, output_loss_metrics=output_loss_metrics)\n    if not isinstance(outs, list):\n        outs = [outs]\n    metrics_results = _eager_metrics_fn(model, outs, targets, sample_weights=sample_weights, masks=masks)\n    total_loss = nest.flatten(total_loss)\n    return {'total_loss': total_loss, 'output_losses': output_losses, 'metrics': metrics_results}",
    "docstring": "Calculates the loss and gradient updates for one input batch. Args: model: Model whose loss has to be calculated. inputs: Input batch data. targets: Target batch data. sample_weights: Sample weight batch data. output_loss_metrics: List of metrics that are used to aggregated output loss values. Returns: Dict with three items: 'total_loss': list with a single tensor for overall loss, 'output_losses': list of tensors for loss corresponding to each of the model output. Could be a empty list when model has only one output. 'metrics': list of tensors for metric specified.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_eager_v1.py",
    "ast_data": "FunctionDef name:train_on_batch arg:model arg:inputs arg:targets arg:sample_weights arg:output_loss_metrics arguments arg arg arg arg arg Assign Call Assign Call If Call Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "key_dtype",
    "source_code": "@property\ndef key_dtype(self):\n    return self._key_dtype",
    "docstring": "The table key dtype.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "FunctionDef name:key_dtype arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_update_qconfig_for_qat",
    "source_code": "def _update_qconfig_for_qat(qconfig_mapping: QConfigMapping, backend_config: BackendConfig):\n    module_to_qat_module_class = get_module_to_qat_module(backend_config)\n    object_type_dict = qconfig_mapping.object_type_qconfigs\n    new_object_type_dict = object_type_dict.copy()\n    for k, v in new_object_type_dict.items():\n        if k in module_to_qat_module_class:\n            object_type_dict[module_to_qat_module_class[k]] = v",
    "docstring": "Update the qconfig_mapping to account for module swaps during QAT. During QAT we perform a module swap on the nn.Module types to the corresponding nn.qat.modules types.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\qconfig_mapping_utils.py",
    "ast_data": "FunctionDef name:_update_qconfig_for_qat arg:qconfig_mapping arg:backend_config arguments arg arg Assign Call Assign Assign Call For Call If Compare Assign"
  },
  {
    "library": "scrapy",
    "name": "_send_pending_requests",
    "source_code": "def _send_pending_requests(self) -> None:\n    while self._pending_request_stream_pool and self.metadata['active_streams'] < self.allowed_max_concurrent_streams and self.h2_connected:\n        self.metadata['active_streams'] += 1\n        stream = self._pending_request_stream_pool.popleft()\n        stream.initiate_request()\n        self._write_to_transport()",
    "docstring": "Initiate all pending requests from the deque following FIFO We make sure that at any time {allowed_max_concurrent_streams} streams are active.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\http2\\protocol.py",
    "ast_data": "FunctionDef name:_send_pending_requests arg:self arguments arg While BoolOp Compare Assign Call Call Call"
  },
  {
    "library": "numpy",
    "name": "setxor1d",
    "source_code": "@array_function_dispatch(_setxor1d_dispatcher)\ndef setxor1d(ar1, ar2, assume_unique=False):\n    if not assume_unique:\n        ar1 = unique(ar1)\n        ar2 = unique(ar2)\n    aux = np.concatenate((ar1, ar2), axis=None)\n    if aux.size == 0:\n        return aux\n    aux.sort()\n    flag = np.concatenate(([True], aux[1:] != aux[:-1], [True]))\n    return aux[flag[1:] & flag[:-1]]",
    "docstring": "Find the set exclusive-or of two arrays. Return the sorted, unique values that are in only one (not both) of the input arrays. Parameters ---------- ar1, ar2 : array_like Input arrays. assume_unique : bool If True, the input arrays are both assumed to be unique, which can speed up the calculation. Default is False. Returns ------- setxor1d : ndarray Sorted 1D array of unique values that are in only one of the input arrays. Examples -------- >>> import numpy as np >>> a = np.array([1, 2, 3, 2, 4]) >>> b = np.array([2, 3, 5, 7, 5]) >>> np.setxor1d(a,b) array([1, 4, 5, 7])",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_arraysetops_impl.py",
    "ast_data": "FunctionDef name:setxor1d arg:ar1 arg:ar2 arg:assume_unique arguments arg arg arg If Assign Call Assign Call Assign Call If Compare Return return:yes Call Assign Call Compare Return return:yes Call"
  },
  {
    "library": "django",
    "name": "validate_key",
    "source_code": "def validate_key(self, key):\n    for warning in memcache_key_warnings(key):\n        warnings.warn(warning, CacheKeyWarning)",
    "docstring": "Warn about keys that would not be portable to the memcached backend. This encourages (but does not force) writing backend-portable cache code.",
    "type": "method",
    "file_path": "django\\django\\core\\cache\\backends\\base.py",
    "ast_data": "FunctionDef name:validate_key arg:self arg:key arguments arg arg For Call Call"
  },
  {
    "library": "tensorflow",
    "name": "mark_finished",
    "source_code": "def mark_finished(self):\n    with self._queue_lock:\n        if self._inflight_closure_count < 1:\n            raise AssertionError('There is no inflight closures to mark_finished.')\n        self.inflight_closure_count -= 1\n        if self._inflight_closure_count == 0:\n            self._no_inflight_closure_condition.notify_all()\n        if self._queue.empty() and self._inflight_closure_count == 0:\n            self._stop_waiting_condition.notify_all()\n        self._watchdog.report_closure_done()",
    "docstring": "Let the queue know that a closure has been successfully executed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py",
    "ast_data": "FunctionDef name:mark_finished arg:self arguments arg With If Compare Raise Call If Compare Call If BoolOp Call Compare Call Call"
  },
  {
    "library": "tensorflow",
    "name": "flow",
    "source_code": "@property\ndef flow(self):\n    return self._flow",
    "docstring": "For compatibility; flows are not meaningful when eager is enabled.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:flow arg:self arguments arg Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "inheritance_diagram",
    "source_code": "class inheritance_diagram(graphviz):\n    pass",
    "docstring": "A docutils node to use as a placeholder for the inheritance diagram.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\ext\\inheritance_diagram.py",
    "ast_data": "ClassDef name:inheritance_diagram"
  },
  {
    "library": "django",
    "name": "setX",
    "source_code": "def setX(self, index, value):\n    self.setOrdinate(0, index, value)",
    "docstring": "Set X with the value at the given index.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\coordseq.py",
    "ast_data": "FunctionDef name:setX arg:self arg:index arg:value arguments arg arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "args",
    "source_code": "@args.setter\ndef args(self, a: tuple[Argument, ...]) -> None:\n    self._update_args_kwargs(a, self._kwargs)",
    "docstring": "Set the tuple of arguments to this Node. The interpretation of arguments depends on the node's opcode. See the `` docstring for more information.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\node.py",
    "ast_data": "FunctionDef name:args arg:self arg:a arguments arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "_load_dispatch_table",
    "source_code": "@classmethod\ndef _load_dispatch_table(cls, custom_dispatch_table=None) -> None:\n    if getattr(cls, 'SPARSE_DISPATCH', None) is None:\n        cls.SPARSE_DISPATCH = {torch.ops.aten.values: semi_sparse_values, torch.ops.aten.indices: semi_sparse_indices, torch.ops.aten.is_same_size: fallback_dispatcher, torch.ops.aten.detach_: fallback_dispatcher, torch.ops.aten.detach: semi_sparse_detach, torch.ops.aten.t: semi_sparse_t, torch.ops.aten.view: semi_sparse_view, torch.ops.aten.mm: semi_sparse_mm, torch.ops.aten.matmul: semi_sparse_mm, torch.ops.aten.addmm: semi_sparse_addmm, torch.ops.aten.linear: semi_sparse_linear, torch.ops.aten._to_copy: fallback_dispatcher, torch.ops.aten._scaled_mm: semi_sparse_scaled_mm}\n        if custom_dispatch_table is not None:\n            cls.SPARSE_DISPATCH.update(custom_dispatch_table)",
    "docstring": "Loads the op overload sparse dispatch table for the current class.",
    "type": "method",
    "file_path": "pytorch\\torch\\sparse\\semi_structured.py",
    "ast_data": "FunctionDef name:_load_dispatch_table arg:cls arg:custom_dispatch_table arguments arg arg If Compare Call Assign If Compare Call"
  },
  {
    "library": "matplotlib",
    "name": "_make_verts_for_region",
    "source_code": "def _make_verts_for_region(self, t, f1, f2, idx0, idx1):\n    t_slice = t[idx0:idx1]\n    f1_slice = f1[idx0:idx1]\n    f2_slice = f2[idx0:idx1]\n    if self._step is not None:\n        step_func = cbook.STEP_LOOKUP_MAP['steps-' + self._step]\n        t_slice, f1_slice, f2_slice = step_func(t_slice, f1_slice, f2_slice)\n    if self._interpolate:\n        start = self._get_interpolating_points(t, f1, f2, idx0)\n        end = self._get_interpolating_points(t, f1, f2, idx1)\n    else:\n        start = (t_slice[0], f2_slice[0])\n        end = (t_slice[-1], f2_slice[-1])\n    pts = np.concatenate((np.asarray([start]), np.stack((t_slice, f1_slice), axis=-1), np.asarray([end]), np.stack((t_slice, f2_slice), axis=-1)[::-1]))\n    return self._fix_pts_xy_order(pts)",
    "docstring": "Make ``.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:_make_verts_for_region arg:self arg:t arg:f1 arg:f2 arg:idx0 arg:idx1 arguments arg arg arg arg arg arg Assign Assign Assign If Compare Assign Assign Call If Assign Call Assign Call Assign Assign Assign Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "ceil_to_int",
    "source_code": "def ceil_to_int(self, x: T, dtype: torch.dtype) -> T:\n    raise NotImplementedError",
    "docstring": "Convert x to dtype with ceiling semantics. See also trunc_to_int.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ops_handler.py",
    "ast_data": "FunctionDef name:ceil_to_int arg:self arg:x arg:dtype arguments arg arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "_py_list_pop",
    "source_code": "def _py_list_pop(list_, i):\n    if i is None:\n        x = list_.pop()\n    else:\n        x = list_.pop(i)\n    return (list_, x)",
    "docstring": "Overload of list_pop that executes a Python list append.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\data_structures.py",
    "ast_data": "FunctionDef name:_py_list_pop arg:list_ arg:i arguments arg arg If Compare Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "less_equal",
    "source_code": "def less_equal(a, b):\n    return _maybe_static(a) <= _maybe_static(b)",
    "docstring": "A version of tf.less_equal that eagerly evaluates if possible.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_utils.py",
    "ast_data": "FunctionDef name:less_equal arg:a arg:b arguments arg arg Return return:yes Compare Call Call"
  },
  {
    "library": "tensorflow",
    "name": "execute_fn_for_device",
    "source_code": "@tf_export('__internal__.execute_fn_for_device', v1=[])\ndef execute_fn_for_device(device_branch_fns, default_fn, name='execute_fn'):\n    is_in_xla = util.GraphOrParentsInXlaContext(ops.get_default_graph())\n    if is_in_xla:\n        return default_fn()\n    device_branch_fns_upper = {k.upper(): v for k, v in device_branch_fns.items()}\n    branch_fns = list(device_branch_fns_upper.values())\n    devices = list(device_branch_fns_upper.keys())\n    device_index = gen_functional_ops.device_index(device_names=devices)\n    return _indexed_case_helper(branch_fns, default_fn, device_index, name, lower_using_switch_merge=False)",
    "docstring": "Executes one of the provided callables based on the device placement. This API is used when the implementations for high level function depend on the underlying device placement. It takes a dictionary of device type to callables. The device type includes \"CPU\", \"GPU\", \"TPU\", etc. When the type of the device where to run this op matches the key in 'device_branch_fns', the corresponding callable is executed, falling back to 'default_fn' if none matches. **Example:** 'r' is evaluated as 1 when it runs on CPU, 2 running on GPU, 1 running on any other device types. Args: device_branch_fns: a dictionary of device types to the callables. Each callable must return a matching structure of tensors. default_fn: fallback callable when the underlying device does not match any key in the 'device_branch_fns'. name: A name for this operation (optional). Returns: The tensors returned by the callable identified by device type during execution, or those returned by 'default_fn' if no key matches.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_switch_case.py",
    "ast_data": "FunctionDef name:execute_fn_for_device arg:device_branch_fns arg:default_fn arg:name arguments arg arg arg Assign Call Call If Return return:yes Call Assign Call Call Assign Call Call Assign Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, shape=None, dtype=dtypes.float32, indices_dtype=dtypes.int64, dense_shape_dtype=None, indices_shape=None):\n    self._shape = tensor_shape.as_shape(shape)\n    self._values_dtype = dtypes.as_dtype(dtype)\n    self._indices_dtype = dtypes.as_dtype(indices_dtype)\n    if dense_shape_dtype is None:\n        self._dense_shape_dtype = None\n    else:\n        self._dense_shape_dtype = dtypes.as_dtype(dense_shape_dtype)\n    self._indices_shape = tensor_shape.as_shape(indices_shape).with_rank(1)",
    "docstring": "Constructs a type specification for a . Args: shape: The dense shape of the , or to allow any dense shape. dtype: of values in the . indices_dtype: of the in the . One of or . dense_shape_dtype: of the in the . One of , , or (if the has no tensor). indices_shape: The shape of the component, which indicates how many slices are in the .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\indexed_slices.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:shape arg:dtype arg:indices_dtype arg:dense_shape_dtype arg:indices_shape arguments arg arg arg arg arg arg Assign Call Assign Call Assign Call If Compare Assign Assign Call Assign Call Call"
  },
  {
    "library": "sphinx",
    "name": "_find_themes",
    "source_code": "@staticmethod\ndef _find_themes(theme_path: Path) -> dict[str, Path]:\n    themes: dict[str, Path] = {}\n    if not theme_path.is_dir():\n        return themes\n    for pathname in theme_path.iterdir():\n        entry = pathname.name\n        if pathname.is_file() and pathname.suffix.lower() == '.zip':\n            if _is_archived_theme(pathname):\n                themes[pathname.stem] = pathname\n            else:\n                logger.warning(__('file %r on theme path is not a valid zipfile or contains no theme'), entry)\n        else:\n            toml_path = pathname / _THEME_TOML\n            conf_path = pathname / _THEME_CONF\n            if toml_path.is_file() or conf_path.is_file():\n                themes[entry] = pathname\n    return themes",
    "docstring": "Search themes from specified directory.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\theming.py",
    "ast_data": "FunctionDef name:_find_themes arg:theme_path arguments arg If Call Return return:yes For Call Assign If BoolOp Call Compare Call If Call Assign Call Call Assign Assign If BoolOp Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "OptimStateDictConfig",
    "source_code": "@dataclass\nclass OptimStateDictConfig:\n    offload_to_cpu: bool = True",
    "docstring": "``)",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\api.py",
    "ast_data": "ClassDef name:OptimStateDictConfig"
  },
  {
    "library": "scikit-learn",
    "name": "_bistochastic_normalize",
    "source_code": "def _bistochastic_normalize(X, max_iter=1000, tol=1e-05):\n    X = make_nonnegative(X)\n    X_scaled = X\n    for _ in range(max_iter):\n        X_new, _, _ = _scale_normalize(X_scaled)\n        if issparse(X):\n            dist = norm(X_scaled.data - X.data)\n        else:\n            dist = norm(X_scaled - X_new)\n        X_scaled = X_new\n        if dist is not None and dist < tol:\n            break\n    return X_scaled",
    "docstring": "Normalize rows and columns of `` simultaneously so that all rows sum to one constant and all columns sum to a different constant.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\cluster\\_bicluster.py",
    "ast_data": "FunctionDef name:_bistochastic_normalize arg:X arg:max_iter arg:tol arguments arg arg arg Assign Call Assign For Call Assign Call If Call Assign Call Assign Call Assign If BoolOp Compare Compare Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_torch_export_args",
    "source_code": "def _get_torch_export_args(args: tuple[Any, ...], kwargs: dict[str, Any] | None) -> tuple[tuple[Any, ...], dict[str, Any] | None]:\n    if not kwargs and args and isinstance(args[-1], dict):\n        kwargs = args[-1]\n        args = args[:-1]\n    return (args, kwargs)",
    "docstring": "Obtain the arguments for torch.onnx.export from the model and the input arguments.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\utils.py",
    "ast_data": "FunctionDef name:_get_torch_export_args arg:args arg:kwargs arguments arg arg If BoolOp Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "experimental_as_proto",
    "source_code": "def experimental_as_proto(self) -> struct_pb2.TensorSpecProto:\n    return struct_pb2.TensorSpecProto(shape=self.shape.experimental_as_proto(), dtype=self.dtype.experimental_as_proto().datatype, name=self.name)",
    "docstring": "Returns a proto representation of the TensorSpec instance.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor.py",
    "ast_data": "FunctionDef name:experimental_as_proto arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "output_size",
    "source_code": "@property\ndef output_size(self):\n    raise NotImplementedError('Abstract method')",
    "docstring": "Integer or TensorShape: size of outputs produced by this cell.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\legacy_rnn\\rnn_cell_impl.py",
    "ast_data": "FunctionDef name:output_size arg:self arguments arg Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X):\n    check_is_fitted(self)\n    return self.classes_[np.argmax(self.predict_proba(X), axis=1)]",
    "docstring": "Predict the target of new samples. The predicted class is the class that has the highest probability, and can thus be different from the prediction of the uncalibrated classifier. Parameters ---------- X : array-like of shape (n_samples, n_features) The samples, as accepted by . Returns ------- C : ndarray of shape (n_samples,) The predicted class.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\calibration.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "fit",
    "source_code": "def fit(self, model, x=None, y=None, batch_size=None, epochs=1, verbose=1, callbacks=None, validation_split=0.0, validation_data=None, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0, steps_per_epoch=None, validation_steps=None, validation_freq=1, **kwargs):\n    raise NotImplementedError()",
    "docstring": "Train the model with the inputs and targets.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py",
    "ast_data": "FunctionDef name:fit arg:self arg:model arg:x arg:y arg:batch_size arg:epochs arg:verbose arg:callbacks arg:validation_split arg:validation_data arg:shuffle arg:class_weight arg:sample_weight arg:initial_epoch arg:steps_per_epoch arg:validation_steps arg:validation_freq arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_matmul_3d_with_batch_dim_folding",
    "source_code": "def _matmul_3d_with_batch_dim_folding(a, b, **kwargs):\n    reshaped_a = array_ops.expand_dims(a.values, 1)\n    reshaped_b = array_ops.repeat(b, a.row_lengths(), axis=0)\n    flat_result = math_ops.matmul(reshaped_a, reshaped_b, **kwargs)\n    return a.with_values(array_ops.squeeze(flat_result, axis=1))",
    "docstring": "Multiply batches of 2D matrices where only is ragged. Args: a: A RaggedTensor with . (ragged_rank must be 1.) b: A Tensor with **kwargs: Additional arguments for (e.g. transpose_a). transpose_a and adjoint_a must not be true. Returns: A RaggedTensor with `shape=[B, (I), K].",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_math_ops.py",
    "ast_data": "FunctionDef name:_matmul_3d_with_batch_dim_folding arg:a arg:b arguments arg arg arg Assign Call Assign Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "update_from_data_x",
    "source_code": "def update_from_data_x(self, x, ignore=None):\n    x = np.ravel(x)\n    self.update_from_data_xy(np.array([x, x]).T, ignore=ignore, updatey=False)",
    "docstring": "Update the x-bounds of the based on the passed in data. After updating, the bounds will have positive *width*, and *x0* will be the minimal value. Parameters ---------- x : Array of x-values. ignore : bool, optional - When `BboxBboxignore`.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:update_from_data_x arg:self arg:x arg:ignore arguments arg arg arg Assign Call Call Call"
  },
  {
    "library": "scipy",
    "name": "BoxBetts",
    "source_code": "class BoxBetts(Benchmark):\n\n    def __init__(self, dimensions=3):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = ([0.9, 1.2], [9.0, 11.2], [0.9, 1.2])\n        self.global_optimum = [[1.0, 10.0, 1.0]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        i = arange(1, 11)\n        g = exp(-0.1 * i * x[0]) - exp(-0.1 * i * x[1]) - (exp(-0.1 * i) - exp(-i)) * x[2]\n        return sum(g ** 2)",
    "docstring": "BoxBetts objective function. The BoxBetts global optimization problem is a multimodal minimization problem defined as follows .. math:: f_{\\text{BoxBetts}}(x) = \\sum_{i=1}^k g(x_i)^2 Where, in this exercise: .. math:: g(x) = e^{-0.1i x_1} - e^{-0.1i x_2} - x_3\\left[e^{-0.1i} - e^{-i}\\right] And :math:. Here, :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_B.py",
    "ast_data": "ClassDef name:BoxBetts FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "masked_inside",
    "source_code": "def masked_inside(x, v1, v2, copy=True):\n    if v2 < v1:\n        v1, v2 = (v2, v1)\n    xf = filled(x)\n    condition = (xf >= v1) & (xf <= v2)\n    return masked_where(condition, x, copy=copy)",
    "docstring": "Mask an array inside a given interval. Shortcut to `conditionxv1v2` doesn't matter. >>> ma.masked_inside(x, 0.3, -0.3) masked_array(data=[0.31, 1.2, --, --, -0.4, -1.1], mask=[False, False, True, True, False, False], fill_value=1e+20)",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:masked_inside arg:x arg:v1 arg:v2 arg:copy arguments arg arg arg arg If Compare Assign Assign Call Assign Compare Compare Return return:yes Call"
  },
  {
    "library": "cryptography",
    "name": "public_key",
    "source_code": "@abc.abstractmethod\ndef public_key(self) -> Ed448PublicKey:\n    pass",
    "docstring": "The Ed448PublicKey derived from the private key.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ed448.py",
    "ast_data": "FunctionDef name:public_key arg:self arguments arg"
  },
  {
    "library": "django",
    "name": "get_list_select_related",
    "source_code": "def get_list_select_related(self, request):\n    return self.list_select_related",
    "docstring": "Return a list of fields to add to the select_related() part of the changelist items query.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:get_list_select_related arg:self arg:request arguments arg arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "StixSansFonts",
    "source_code": "class StixSansFonts(StixFonts):\n    _sans = True",
    "docstring": "A font handling class for the STIX fonts (that uses sans-serif characters by default).",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\_mathtext.py",
    "ast_data": "ClassDef name:StixSansFonts Assign"
  },
  {
    "library": "tensorflow",
    "name": "get_c_function",
    "source_code": "def get_c_function(self, name):\n    self.ensure_initialized()\n    return c_api_util.ScopedTFFunction(pywrap_tfe.TFE_ContextGetFunction(self._handle, name), name)",
    "docstring": "Get a C API TF_Function from the context. Args: name: Name of the function to get. Returns: A ScopedTFFunction wrapping the C API TF_Function.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:get_c_function arg:self arg:name arguments arg arg Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_disallow_inside_tf_function",
    "source_code": "def _disallow_inside_tf_function(method_name):\n    if ops.inside_function():\n        error_msg = 'Detected a call to `PreprocessingLayer.{method_name}` inside a `tf.function`. `PreprocessingLayer.{method_name} is a high-level endpoint that manages its own `tf.function`. Please move the call to `PreprocessingLayer.{method_name}` outside of all enclosing `tf.function`s. Note that you can call a `PreprocessingLayer` directly on `Tensor`s inside a `tf.function` like: `layer(x)`, or update its state like: `layer.update_state(x)`.'.format(method_name=method_name)\n        raise RuntimeError(error_msg)",
    "docstring": "Disallow calling a method inside a .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_preprocessing_layer.py",
    "ast_data": "FunctionDef name:_disallow_inside_tf_function arg:method_name arguments arg If Call Assign Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "Functions",
    "source_code": "class Functions(object):\n\n    def __init__(self, string_table):\n        self._string_table = string_table\n        self._function_key_to_function = {}\n\n    def index_of(self, file_path, function_name, function_start_line):\n        function_key = (file_path, function_name, function_start_line)\n        if function_key in self._function_key_to_function:\n            return self._function_key_to_function[function_key].id\n        else:\n            function_index = len(self._function_key_to_function) + 1\n            function = profile_pb2.Function()\n            function.id = function_index\n            function.name = self._string_table.index_of(function_name)\n            function.filename = self._string_table.index_of(file_path)\n            function.start_line = function_start_line\n            self._function_key_to_function[function_key] = function\n            return function_index\n\n    def function_protos(self):\n        return self._function_key_to_function.values()",
    "docstring": "Keeps track of protos for pprof profile.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\pprof_profiler.py",
    "ast_data": "ClassDef name:Functions FunctionDef name:__init__ arg:self arg:string_table arguments arg arg Assign Assign FunctionDef name:index_of arg:self arg:file_path arg:function_name arg:function_start_line arguments arg arg arg arg Assign If Compare Return return:yes Assign Call Assign Call Assign Assign Call Assign Call Assign Assign Return return:yes FunctionDef name:function_protos arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "is_rng_supported_mesh",
    "source_code": "def is_rng_supported_mesh(device_mesh: DeviceMesh) -> bool:\n    device_handle = _get_device_handle(device_mesh.device_type)\n    if device_handle and hasattr(device_handle, 'set_rng_state'):\n        return True\n    else:\n        warnings.warn(f'DTensor random operators may not have complete support on {device_mesh.device_type} device mesh')\n        return False",
    "docstring": "Checks if the current device of `DeviceMesh` supports DTensor Random APIs; False otherwise. .. warning:: Currently we only support correct RNG on cuda/cuda-like devices.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_random.py",
    "ast_data": "FunctionDef name:is_rng_supported_mesh arg:device_mesh arguments arg Assign Call If BoolOp Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "register_gradient_tensor",
    "source_code": "def register_gradient_tensor(self, x_tensor_name, gradient_tensor):\n    if len(_gradient_debuggers) == 1 or self._is_active_context:\n        self._check_same_graph(gradient_tensor)\n        self._gradient_tensors[x_tensor_name] = gradient_tensor",
    "docstring": "Register the gradient tensor for an x-tensor. Args: x_tensor_name: () the name of the independent , i.e., the tensor on the denominator of the differentiation. gradient_tensor: the gradient .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_gradients.py",
    "ast_data": "FunctionDef name:register_gradient_tensor arg:self arg:x_tensor_name arg:gradient_tensor arguments arg arg arg If BoolOp Compare Call Call Assign"
  },
  {
    "library": "scipy",
    "name": "_post_padding",
    "source_code": "@lru_cache(maxsize=256)\ndef _post_padding(self, n: int) -> tuple[int, int]:\n    if not n >= (m2p := (self.m_num - self.m_num_mid)):\n        raise ValueError(f'Parameter n must be >= ceil(m_num/2) = {m2p}!')\n    w2 = self.win.real ** 2 + self.win.imag ** 2\n    q1 = n // self.hop\n    k1 = q1 * self.hop - self.m_num_mid\n    for q_, k_ in enumerate(range(k1, n + self.m_num, self.hop), start=q1):\n        n_next = k_ + self.hop\n        if n_next >= n or all(w2[:n - n_next] == 0):\n            return (k_ + self.m_num, q_ + 1)\n    raise RuntimeError('This is code line should not have been reached!')",
    "docstring": "Largest signal index and slice index due to padding. Parameters ---------- n : int Number of samples of input signal (must be ≥ half of the window length).",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_short_time_fft.py",
    "ast_data": "FunctionDef name:_post_padding arg:self arg:n arguments arg arg If Compare Raise Call Assign Assign Assign For Call Call Assign If BoolOp Compare Call Compare Return return:yes Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "partial_tile",
    "source_code": "@classmethod\ndef partial_tile(cls, tile_assignment):\n    if not isinstance(tile_assignment, _np.ndarray):\n        raise TypeError('PartialTile assignment must be of type np.ndarray')\n    dims = list(tile_assignment.shape)\n    flattened_devices = tile_assignment.reshape(-1, order='C')\n    return Sharding(proto=xla_data_pb2.OpSharding(type=xla_data_pb2.OpSharding.OTHER, tile_assignment_dimensions=dims, tile_assignment_devices=list(flattened_devices), replicate_on_last_tile_dim=True))",
    "docstring": "Returns a partially tiled sharding attribute. This is similar to tile(), but tile_assignment has one more dimension than the tensor, and tiles in the last dimension of tile_assignment are replicated. Args: tile_assignment: An np.ndarray describing the topology of the tiling and which device will compute which part of the topology. Raises: TypeError: tile_assignment was not of np.array type.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\xla\\experimental\\xla_sharding.py",
    "ast_data": "FunctionDef name:partial_tile arg:cls arg:tile_assignment arguments arg arg If Call Raise Call Assign Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "filter_empty_layer_containers",
    "source_code": "def filter_empty_layer_containers(layer_list):\n    existing = set()\n    to_visit = layer_list[::-1]\n    while to_visit:\n        obj = to_visit.pop()\n        if id(obj) in existing:\n            continue\n        existing.add(id(obj))\n        if hasattr(obj, '_is_layer') and (not isinstance(obj, type)):\n            yield obj\n        else:\n            sub_layers = getattr(obj, 'layers', None) or []\n            to_visit.extend(sub_layers[::-1])",
    "docstring": "Filter out empty Layer-like containers and uniquify.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\layer_utils.py",
    "ast_data": "FunctionDef name:filter_empty_layer_containers arg:layer_list arguments arg Assign Call Assign While Assign Call If Compare Call Call Call If BoolOp Call Call Assign BoolOp Call Call"
  },
  {
    "library": "pytorch",
    "name": "_skip_coverage",
    "source_code": "@staticmethod\ndef _skip_coverage(path: str) -> bool:\n    return '/third-party/' in path",
    "docstring": "Returns True if file path should not be processed. This is repo-specific and only makes sense for the current state of ovrsource.",
    "type": "method",
    "file_path": "pytorch\\tools\\code_coverage\\package\\tool\\parser\\llvm_coverage_parser.py",
    "ast_data": "FunctionDef name:_skip_coverage arg:path arguments arg Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "_reset_layer_losses",
    "source_code": "def _reset_layer_losses(parent_layer):\n    losses_dict = {}\n    for layer in utils.list_all_layers_and_sublayers(parent_layer):\n        losses_dict[layer] = {'losses': layer._losses[:], 'eager_losses': layer._eager_losses[:]}\n        with utils.no_automatic_dependency_tracking_scope(layer):\n            layer._losses = []\n            layer._eager_losses = []\n    return losses_dict",
    "docstring": "Resets losses of layer and its sublayers, and returns original losses.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\save_impl.py",
    "ast_data": "FunctionDef name:_reset_layer_losses arg:parent_layer arguments arg Assign For Call Assign With Call Assign Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "isalpha",
    "source_code": "def isalpha(self):\n    return isalpha(self)",
    "docstring": "Returns true for each element if all characters in the string are alphabetic and there is at least one character, false otherwise. See Also -------- char.isalpha",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:isalpha arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_cmplx_sort",
    "source_code": "def _cmplx_sort(p):\n    p = np.asarray(p)\n    indx = np.argsort(abs(p))\n    return (np.take(p, indx, 0), indx)",
    "docstring": "Sort roots based on magnitude. Parameters ---------- p : array_like The roots to sort, as a 1-D array. Returns ------- p_sorted : ndarray Sorted roots. indx : ndarray Array of indices needed to sort the input . Examples -------- >>> from scipy import signal >>> vals = [1, 4, 1+1.j, 3] >>> p_sorted, indx = signal.cmplx_sort(vals) >>> p_sorted array([1.+0.j, 1.+1.j, 3.+0.j, 4.+0.j]) >>> indx array([0, 2, 3, 1])",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_signaltools.py",
    "ast_data": "FunctionDef name:_cmplx_sort arg:p arguments arg Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_get_submatrix",
    "source_code": "def _get_submatrix(self, major=None, minor=None, copy=False):\n    M, N = self._swap(self._shape_as_2d)\n    i0, i1 = _process_slice(major, M)\n    j0, j1 = _process_slice(minor, N)\n    if i0 == 0 and j0 == 0 and (i1 == M) and (j1 == N):\n        return self.copy() if copy else self\n    indptr, indices, data = get_csr_submatrix(M, N, self.indptr, self.indices, self.data, i0, i1, j0, j1)\n    shape = self._swap((i1 - i0, j1 - j0))\n    if self.ndim == 1:\n        shape = (shape[1],)\n    return self.__class__((data, indices, indptr), shape=shape, dtype=self.dtype, copy=False)",
    "docstring": "Return a submatrix of this matrix. major, minor: None, int, or slice with step 1",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_compressed.py",
    "ast_data": "FunctionDef name:_get_submatrix arg:self arg:major arg:minor arg:copy arguments arg arg arg arg Assign Call Assign Call Assign Call If BoolOp Compare Compare Compare Compare Return return:yes Call Assign Call Assign Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "autograd_cache_key",
    "source_code": "def autograd_cache_key(gm: torch.fx.GraphModule, example_inputs, config: AOTConfig, fx_config: _CompileFxKwargs) -> tuple[str, list[str]]:\n    check_cacheable(gm)\n    if has_triton_package():\n        import triton\n        if triton.__version__ < '3.2.0':\n            raise BypassAOTAutogradCache('AOTAutogradCache requires triton 3.2.0')\n    details = AOTAutogradCacheDetails(gm, example_inputs, config, fx_config)\n    pickler = AOTAutogradCachePickler(gm)\n    key = 'a' + pickler.get_hash(details)\n    debug_lines = pickler.debug_lines(details)\n    log.debug('Autograd graph cache hash details for key %s:\\n%s', key, LazyString(lambda: '\\n'.join(debug_lines)))\n    return (key, debug_lines)",
    "docstring": "Generate a unique hash of the FX graph for caching.",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\autograd_cache.py",
    "ast_data": "FunctionDef name:autograd_cache_key arg:gm arg:example_inputs arg:config arg:fx_config arguments arg arg arg arg Call If Call If Compare Raise Call Assign Call Assign Call Assign Call Assign Call Call Call arguments Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_parse_math",
    "source_code": "def set_parse_math(self, parse_math):\n    self._parse_math = bool(parse_math)",
    "docstring": "Override switch to disable any mathtext parsing for this . Parameters ---------- parse_math : bool If False, this will never use mathtext. If True, mathtext will be used if there is an even number of unescaped dollar signs.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:set_parse_math arg:self arg:parse_math arguments arg arg Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "PerWorkerValuesTypeSpec",
    "source_code": "class PerWorkerValuesTypeSpec(type_spec_lib.TypeSpec):\n\n    def __init__(self, value_spec, descendant_type):\n        assert value_spec\n        self._value_spec = value_spec\n        self._descendant_type = descendant_type\n\n    def _serialize(self):\n        return (self._value_spec,)\n\n    @property\n    def value_type(self):\n        return self._descendant_type\n\n    def most_specific_common_supertype(self, others):\n        raise NotImplementedError('most_specific_common_supertype is not implemented')\n\n    @property\n    def _component_specs(self):\n        return self._value_spec\n\n    def _to_components(self, value):\n        return self._value_spec\n\n    def _from_components(self, value):\n        return value",
    "docstring": "TypeSpec for PerWorkerValues. It only support tracing a function using a PerWorkerValues.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\values.py",
    "ast_data": "ClassDef name:PerWorkerValuesTypeSpec FunctionDef name:__init__ arg:self arg:value_spec arg:descendant_type arguments arg arg arg Assign Assign FunctionDef name:_serialize arg:self arguments arg Return return:yes FunctionDef name:value_type arg:self arguments arg Return return:yes FunctionDef name:most_specific_common_supertype arg:self arg:others arguments arg arg Raise Call FunctionDef name:_component_specs arg:self arguments arg Return return:yes FunctionDef name:_to_components arg:self arg:value arguments arg arg Return return:yes FunctionDef name:_from_components arg:self arg:value arguments arg arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "get_chunk",
    "source_code": "def get_chunk(self, size: int | None=None) -> pd.DataFrame:\n    if size is None:\n        size = self._chunksize\n    return self.read(nrows=size)",
    "docstring": "Reads lines from Xport file and returns as dataframe Parameters ---------- size : int, defaults to None Number of lines to read. If None, reads whole file. Returns ------- DataFrame",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\sas\\sas_xport.py",
    "ast_data": "FunctionDef name:get_chunk arg:self arg:size arguments arg arg If Compare Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "within",
    "source_code": "def within(self, other):\n    return self._topology(capi.ogr_within, other)",
    "docstring": "Return True if this geometry is within the other.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:within arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "geom_name",
    "source_code": "@property\ndef geom_name(self):\n    return capi.get_geom_name(self.ptr)",
    "docstring": "Return the Name of this Geometry.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:geom_name arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "upcast_compute_type",
    "source_code": "def upcast_compute_type(dtype: torch.dtype) -> torch.dtype:\n    if dtype in (torch.float16, torch.bfloat16) and config.triton.codegen_upcast_to_fp32 and (get_current_backend() == 'triton'):\n        return torch.float32\n    return dtype",
    "docstring": "Maybe upcast [b]float16 to float32",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\utils.py",
    "ast_data": "FunctionDef name:upcast_compute_type arg:dtype arguments arg If BoolOp Compare Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "delete",
    "source_code": "def delete(self, session_key=None):\n    raise NotImplementedError('subclasses of SessionBase must provide a delete() method')",
    "docstring": "Delete the session data under this key. If the key is None, use the current session key value.",
    "type": "method",
    "file_path": "django\\django\\contrib\\sessions\\backends\\base.py",
    "ast_data": "FunctionDef name:delete arg:self arg:session_key arguments arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "reset_code_caches",
    "source_code": "def reset_code_caches() -> None:\n    import logging\n    log = logging.getLogger(__name__)\n    log.info('torch._dynamo.reset_code_caches')\n    'Clear compile caches that are keyed by code objects'\n    with convert_frame.compile_lock:\n        reset_code_state()\n        for weak_code in convert_frame.input_codes.seen + convert_frame.output_codes.seen:\n            code = weak_code()\n            if code:\n                reset_code(code)\n        code_context.clear()",
    "docstring": "Clears in-memory code cache, which is what stores compiled products. This resets less state than :func: and is mostly only used for testing purposes.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\__init__.py",
    "ast_data": "FunctionDef name:reset_code_caches arguments Assign Call Call With Call For Assign Call If Call Call"
  },
  {
    "library": "pandas",
    "name": "cvalues",
    "source_code": "@property\ndef cvalues(self):\n    return self.data",
    "docstring": "return my cython values",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:cvalues arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_copy_source",
    "source_code": "def _copy_source(s, graph, op_map, handle_captures, inverse_captures, base_graph):\n    if handle_captures and s in inverse_captures:\n        copied_placeholder = graph.capture(inverse_captures[s], name=s.op.name)\n    elif s.op.type == 'PlaceholderWithDefault' and _constant_inputs(s):\n        default_value = s.op.inputs[0]\n        unavailable_inputs, unavailable_control_inputs = _copy_non_source(op=default_value.op, graph=graph, op_map=op_map, base_graph=base_graph)\n        if unavailable_inputs or unavailable_control_inputs:\n            raise AssertionError('Could not copy source node {} because it has inputs.'.format(default_value))\n        with ops.device(s.op.device):\n            copied_placeholder = array_ops.placeholder_with_default(input=op_map[default_value], shape=s.shape, name=s.op.name)\n    else:\n        with ops.device(s.op.device):\n            copied_placeholder = array_ops.placeholder(dtype=s.dtype, shape=s.shape, name=s.op.name)\n    base_handle = resource_variable_ops.get_resource_handle_data(s)\n    if base_handle.shape_and_type:\n        resource_variable_ops._set_handle_shapes_and_types(copied_placeholder, base_handle, graph_mode=True)\n    op_map[s] = copied_placeholder\n    op_map[s.op] = copied_placeholder.op",
    "docstring": "Create a source in a graph based on a Tensor from a different graph. This function creates a placeholder analog of in a graph with the following behavior: 1) If s is a captured Tensor or Variable and handle_captures is set to True, simply capture it in the new graph as well. 2) If s is a PlaceholderWithDefault whose default is a constant, preserve said default in the new graph. 3) When applicable, copy resource variable metadata from to the newly created placeholder. Args: s: The source of interest. graph: The destination graph. op_map: A dict mapping ops and tensors in the old graph to the new one. handle_captures: A boolean indicating whether to re-capture s in the new graph or simply create a vanilla placeholder. inverse_captures: A dict mapping s back to the Tensor or Variable that it captures. base_graph: The graph being copied from.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\lift_to_graph.py",
    "ast_data": "FunctionDef name:_copy_source arg:s arg:graph arg:op_map arg:handle_captures arg:inverse_captures arg:base_graph arguments arg arg arg arg arg arg If BoolOp Compare Assign Call If BoolOp Compare Call Assign Assign Call If BoolOp Raise Call Call With Call Assign Call With Call Assign Call Assign Call If Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_get_num_workers",
    "source_code": "def _get_num_workers(cluster_spec):\n    if not cluster_spec:\n        return 0\n    return len(cluster_spec.as_dict().get(_TaskType.WORKER, [])) + len(cluster_spec.as_dict().get(_TaskType.CHIEF, []))",
    "docstring": "Gets number of workers including chief.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distribute_coordinator_utils.py",
    "ast_data": "FunctionDef name:_get_num_workers arg:cluster_spec arguments arg If Return return:yes Return return:yes Call Call Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_is_homogeneous_mgr",
    "source_code": "def _is_homogeneous_mgr(mgr: BlockManager, first_dtype: DtypeObj) -> bool:\n    if mgr.nblocks != 1:\n        return False\n    blk = mgr.blocks[0]\n    if not (blk.mgr_locs.is_slice_like and blk.mgr_locs.as_slice.step == 1):\n        return False\n    return blk.dtype == first_dtype",
    "docstring": "Check if this Manager can be treated as a single ndarray.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\internals\\concat.py",
    "ast_data": "FunctionDef name:_is_homogeneous_mgr arg:mgr arg:first_dtype arguments arg arg If Compare Return return:yes Assign If BoolOp Compare Return return:yes Return return:yes Compare"
  },
  {
    "library": "scipy",
    "name": "_compute_angular_rate",
    "source_code": "def _compute_angular_rate(rotvecs, rotvecs_dot):\n    return _matrix_vector_product_of_stacks(_rotvec_dot_to_angular_rate_matrix(rotvecs), rotvecs_dot)",
    "docstring": "Compute angular rates given rotation vectors and its derivatives. Parameters ---------- rotvecs : ndarray, shape (n, 3) Set of rotation vectors. rotvecs_dot : ndarray, shape (n, 3) Set of rotation vector derivatives. Returns ------- ndarray, shape (n, 3)",
    "type": "function",
    "file_path": "scipy\\scipy\\spatial\\transform\\_rotation_spline.py",
    "ast_data": "FunctionDef name:_compute_angular_rate arg:rotvecs arg:rotvecs_dot arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "mahalanobis",
    "source_code": "def mahalanobis(u, v, VI):\n    u = _validate_vector(u)\n    v = _validate_vector(v)\n    VI = np.atleast_2d(VI)\n    delta = u - v\n    m = np.dot(np.dot(delta, VI), delta)\n    return np.sqrt(m)",
    "docstring": "Compute the Mahalanobis distance between two 1-D arrays. The Mahalanobis distance between 1-D arrays and , is defined as .. math:: \\sqrt{ (u-v) V^{-1} (u-v)^T } where `VIuv`. Examples -------- >>> from scipy.spatial import distance >>> iv = [[1, 0.5, 0.5], [0.5, 1, 0.5], [0.5, 0.5, 1]] >>> distance.mahalanobis([1, 0, 0], [0, 1, 0], iv) 1.0 >>> distance.mahalanobis([0, 2, 0], [0, 1, 0], iv) 1.0 >>> distance.mahalanobis([2, 0, 0], [0, 1, 0], iv) 1.7320508075688772",
    "type": "function",
    "file_path": "scipy\\scipy\\spatial\\distance.py",
    "ast_data": "FunctionDef name:mahalanobis arg:u arg:v arg:VI arguments arg arg arg Assign Call Assign Call Assign Call Assign Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "experimental_should_init",
    "source_code": "@property\ndef experimental_should_init(self):\n    return self._strategy.extended.experimental_should_init",
    "docstring": "Whether to run init ops.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_coordinator.py",
    "ast_data": "FunctionDef name:experimental_should_init arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_sync_param_groups",
    "source_code": "@staticmethod\ndef _sync_param_groups(src_param_groups: list[dict[Any, Any]], dst_param_groups: list[dict[Any, Any]]) -> None:\n    assert len(src_param_groups) == len(dst_param_groups), 'Mismatch between number of source and destination parameter groups'\n    for src_param_group, dst_param_group in zip(src_param_groups, dst_param_groups):\n        for attr in filter(lambda x: x != 'params', src_param_group.keys()):\n            dst_param_group[attr] = src_param_group[attr]",
    "docstring": "Sync the attributes from the source parameter groups to the destination parameter groups. Example attributes include learning rate or scheduler attributes. The two parameter groups should have the same length (i.e. same number of parameter groups). Arguments: src_param_groups (list[dict]): parameter groups giving the attribute settings to copy. dst_param_groups (list[dict]): parameter groups giving the attribute settings to set.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\optim\\zero_redundancy_optimizer.py",
    "ast_data": "FunctionDef name:_sync_param_groups arg:src_param_groups arg:dst_param_groups arguments arg arg Compare Call Call For Call For Call arguments arg Compare Call Assign"
  },
  {
    "library": "django",
    "name": "make_token",
    "source_code": "def make_token(self, user):\n    return self._make_token_with_timestamp(user, self._num_seconds(self._now()), self.secret)",
    "docstring": "Return a token that can be used once to do a password reset for the given user.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\tokens.py",
    "ast_data": "FunctionDef name:make_token arg:self arg:user arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "virtualenv",
    "name": "close",
    "source_code": "@abstractmethod\ndef close(self):\n    pass",
    "docstring": "Called before virtualenv exits.",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\app_data\\base.py",
    "ast_data": "FunctionDef name:close arg:self arguments arg"
  },
  {
    "library": "scikit-learn",
    "name": "predict_log_proba",
    "source_code": "@available_if(_check_proba)\ndef predict_log_proba(self, X):\n    return np.log(self.predict_proba(X))",
    "docstring": "Compute log probabilities of possible outcomes for samples in X. The model need to have probability information computed at training time: fit with attribute set to True. Parameters ---------- X : array-like of shape (n_samples, n_features) or (n_samples_test, n_samples_train) For kernel=\"precomputed\", the expected shape of X is (n_samples_test, n_samples_train). Returns ------- T : ndarray of shape (n_samples, n_classes) Returns the log-probabilities of the sample for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute :term:. Notes ----- The probability model is created using cross validation, so the results can be slightly different than those obtained by predict. Also, it will produce meaningless results on very small datasets.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\svm\\_base.py",
    "ast_data": "FunctionDef name:predict_log_proba arg:self arg:X arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "freeze_graph",
    "source_code": "def freeze_graph(sess, input_tensors, output_tensors):\n    graph_def = _convert_to_constants.disable_lower_using_switch_merge(sess.graph_def)\n    config = get_grappler_config(['function'])\n    graph_def = run_graph_optimizations(graph_def, input_tensors, output_tensors, config, graph=sess.graph)\n    hinted_outputs_nodes = find_all_hinted_output_nodes(sess)\n    if hinted_outputs_nodes:\n        return _convert_op_hints_if_present(sess, graph_def, output_tensors, hinted_outputs_nodes)\n    if not is_frozen_graph(sess):\n        output_node_names = [tensor.name.split(':')[0] for tensor in output_tensors]\n        return _convert_to_constants.convert_variables_to_constants(sess, graph_def, output_node_names)\n    else:\n        return sess.graph_def",
    "docstring": "Returns a frozen GraphDef. Runs a Grappler pass and freezes a graph with Variables in it. Otherwise the existing GraphDef is returned. The Grappler pass is only run on models that are frozen in order to inline the functions in the graph. If OpHints is present, it will try to convert the OpHint graph. Args: sess: TensorFlow Session. input_tensors: List of input tensors. output_tensors: List of output tensors (only .name is used from this). Returns: Frozen GraphDef.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\util.py",
    "ast_data": "FunctionDef name:freeze_graph arg:sess arg:input_tensors arg:output_tensors arguments arg arg arg Assign Call Assign Call Assign Call Assign Call If Return return:yes Call If Call Assign Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ConvReLU2d",
    "source_code": "class ConvReLU2d(_FusedModule):\n\n    def __init__(self, conv, relu):\n        assert type_before_parametrizations(conv) == Conv2d and type_before_parametrizations(relu) == ReLU, f'Incorrect types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(relu)}'\n        super().__init__(conv, relu)",
    "docstring": "This is a sequential container which calls the Conv2d and ReLU modules. During quantization this will be replaced with the corresponding fused module.",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\nn\\intrinsic\\modules\\fused.py",
    "ast_data": "ClassDef name:ConvReLU2d FunctionDef name:__init__ arg:self arg:conv arg:relu arguments arg arg arg BoolOp Compare Call Compare Call Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_url",
    "source_code": "def get_url(self):\n    return self._url",
    "docstring": "Return the url.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:get_url arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_period_dispatch",
    "source_code": "def _period_dispatch(meth: F) -> F:\n\n    @wraps(meth)\n    def new_meth(self, *args, **kwargs):\n        if not isinstance(self.dtype, PeriodDtype):\n            return meth(self, *args, **kwargs)\n        arr = self.view('M8[ns]')\n        result = meth(arr, *args, **kwargs)\n        if result is NaT:\n            return NaT\n        elif isinstance(result, Timestamp):\n            return self._box_func(result._value)\n        res_i8 = result.view('i8')\n        return self._from_backing_data(res_i8)\n    return cast(F, new_meth)",
    "docstring": "For PeriodArray methods, dispatch to DatetimeArray and re-wrap the results in PeriodArray. We cannot use ._ndarray directly for the affected methods because the i8 data has different semantics on NaT values.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py",
    "ast_data": "FunctionDef name:_period_dispatch arg:meth arguments arg FunctionDef name:new_meth arg:self arguments arg arg arg If Call Return return:yes Call Assign Call Assign Call If Compare Return return:yes If Call Return return:yes Call Assign Call Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "_color_to_rgb",
    "source_code": "def _color_to_rgb(color, input):\n    if input == 'hls':\n        color = colorsys.hls_to_rgb(*color)\n    elif input == 'husl':\n        color = husl.husl_to_rgb(*color)\n        color = tuple(np.clip(color, 0, 1))\n    elif input == 'xkcd':\n        color = xkcd_rgb[color]\n    return mpl.colors.to_rgb(color)",
    "docstring": "Add some more flexibility to color choices.",
    "type": "function",
    "file_path": "seaborn\\seaborn\\palettes.py",
    "ast_data": "FunctionDef name:_color_to_rgb arg:color arg:input arguments arg arg If Compare Assign Call If Compare Assign Call Assign Call Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "maybe_clone_args",
    "source_code": "def maybe_clone_args(self, exclude: Container[str], *args, **kwargs) -> tuple[list[Any], dict[str, Any]]:\n    from ..compile_fx import clone_preserve_strides\n\n    def prepare_arg(name, arg):\n        if name in self.mutated_arg_names and name not in exclude:\n            assert isinstance(arg, torch.Tensor)\n            return clone_preserve_strides(arg)\n        else:\n            return arg\n    cloned_args = [prepare_arg(name, arg) for name, arg in itertools.zip_longest(self.fn.arg_names[:len(args)], args)]\n    cloned_kwargs = {name: prepare_arg(name, arg) for name, arg in kwargs.items()}\n    return (cloned_args, cloned_kwargs)",
    "docstring": "Prepare new args and kwargs by cloning any in-place buffers (that are not in the provided exclusion list), to avoid autotune contaminating them. Avoid cloning the other buffers because it leads to increased memory usage.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\triton_heuristics.py",
    "ast_data": "FunctionDef name:maybe_clone_args arg:self arg:exclude arguments arg arg arg arg FunctionDef name:prepare_arg arg:name arg:arg arguments arg arg If BoolOp Compare Compare Call Return return:yes Call Return return:yes Assign Call Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_r2rn",
    "source_code": "def _r2rn(forward, transform, x, type=2, s=None, axes=None, norm=None, overwrite_x=False, workers=None, orthogonalize=None):\n    tmp = _asfarray(x)\n    shape, axes = _init_nd_shape_and_axes(tmp, s, axes)\n    overwrite_x = overwrite_x or _datacopied(tmp, x)\n    if len(axes) == 0:\n        return x\n    tmp, copied = _fix_shape(tmp, shape, axes)\n    overwrite_x = overwrite_x or copied\n    if not forward:\n        if type == 2:\n            type = 3\n        elif type == 3:\n            type = 2\n    norm = _normalization(norm, forward)\n    workers = _workers(workers)\n    out = tmp if overwrite_x else None\n    if np.iscomplexobj(x):\n        out = np.empty_like(tmp) if out is None else out\n        transform(tmp.real, type, axes, norm, out.real, workers)\n        transform(tmp.imag, type, axes, norm, out.imag, workers)\n        return out\n    return transform(tmp, type, axes, norm, out, workers, orthogonalize)",
    "docstring": "Forward or backward nd DCT/DST Parameters ---------- forward : bool Transform direction (determines type and normalisation) transform : {pypocketfft.dct, pypocketfft.dst} The transform to perform",
    "type": "function",
    "file_path": "scipy\\scipy\\fft\\_pocketfft\\realtransforms.py",
    "ast_data": "FunctionDef name:_r2rn arg:forward arg:transform arg:x arg:type arg:s arg:axes arg:norm arg:overwrite_x arg:workers arg:orthogonalize arguments arg arg arg arg arg arg arg arg arg arg Assign Call Assign Call Assign BoolOp Call If Compare Call Return return:yes Assign Call Assign BoolOp If If Compare Assign If Compare Assign Assign Call Assign Call Assign If Call Assign Compare Call Call Call Return return:yes Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "GridSpecFromSubplotSpec",
    "source_code": "class GridSpecFromSubplotSpec(GridSpecBase):\n\n    def __init__(self, nrows, ncols, subplot_spec, wspace=None, hspace=None, height_ratios=None, width_ratios=None):\n        self._wspace = wspace\n        self._hspace = hspace\n        if isinstance(subplot_spec, SubplotSpec):\n            self._subplot_spec = subplot_spec\n        else:\n            raise TypeError('subplot_spec must be type SubplotSpec, usually from GridSpec, or axes.get_subplotspec.')\n        self.figure = self._subplot_spec.get_gridspec().figure\n        super().__init__(nrows, ncols, width_ratios=width_ratios, height_ratios=height_ratios)\n\n    def get_subplot_params(self, figure=None):\n        hspace = self._hspace if self._hspace is not None else figure.subplotpars.hspace if figure is not None else mpl.rcParams['figure.subplot.hspace']\n        wspace = self._wspace if self._wspace is not None else figure.subplotpars.wspace if figure is not None else mpl.rcParams['figure.subplot.wspace']\n        figbox = self._subplot_spec.get_position(figure)\n        left, bottom, right, top = figbox.extents\n        return SubplotParams(left=left, right=right, bottom=bottom, top=top, wspace=wspace, hspace=hspace)\n\n    def get_topmost_subplotspec(self):\n        return self._subplot_spec.get_topmost_subplotspec()",
    "docstring": "GridSpec whose subplot layout parameters are inherited from the location specified by a given SubplotSpec.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\gridspec.py",
    "ast_data": "ClassDef name:GridSpecFromSubplotSpec FunctionDef name:__init__ arg:self arg:nrows arg:ncols arg:subplot_spec arg:wspace arg:hspace arg:height_ratios arg:width_ratios arguments arg arg arg arg arg arg arg arg Assign Assign If Call Assign Raise Call Assign Call Call Call FunctionDef name:get_subplot_params arg:self arg:figure arguments arg arg Assign Compare Compare Assign Compare Compare Assign Call Assign Return return:yes Call FunctionDef name:get_topmost_subplotspec arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "scope_name",
    "source_code": "@scope_name.setter\ndef scope_name(self, s):\n    self._thread_local_data.scope_name = s",
    "docstring": "Sets scope name for the current thread.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:scope_name arg:self arg:s arguments arg arg Assign"
  },
  {
    "library": "pytorch",
    "name": "type",
    "source_code": "def type(self, dst_type: Union[dtype, str]) -> Self:\n    return self._apply(lambda t: t.type(dst_type))",
    "docstring": "Casts all parameters and buffers to :attr:. .. note:: This method modifies the module in-place. Args: dst_type (type or string): the desired type Returns: Module: self",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:type arg:self arg:dst_type arguments arg arg Return return:yes Call arguments arg Call"
  },
  {
    "library": "django",
    "name": "name",
    "source_code": "@property\ndef name(self):\n    name = capi.get_ds_name(self._ptr)\n    return force_str(name, self.encoding, strings_only=True)",
    "docstring": "Return the name of the data source.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\datasource.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, log_dir, max_queue=10, flush_secs=120, filename_suffix=''):\n    log_dir = str(log_dir)\n    self.event_writer = EventFileWriter(log_dir, max_queue, flush_secs, filename_suffix)",
    "docstring": "Create a and an event file. On construction the writer creates a new event file in . The other arguments to the constructor control the asynchronous writes to the event file. Args: log_dir: A string. Directory where event file will be written. max_queue: Integer. Size of the queue for pending events and summaries before one of the 'add' calls forces a flush to disk. Default is ten items. flush_secs: Number. How often, in seconds, to flush the pending events and summaries to disk. Default is every two minutes. filename_suffix: A string. Suffix added to all event filenames in the log_dir directory. More details on filename construction in tensorboard.summary.writer.event_file_writer.EventFileWriter.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\tensorboard\\writer.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:log_dir arg:max_queue arg:flush_secs arg:filename_suffix arguments arg arg arg arg arg Assign Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "replicate_tensor_dim",
    "source_code": "def replicate_tensor_dim(placements: Sequence[Placement], dim: int) -> tuple[Placement, ...]:\n    return tuple((Replicate() if p.is_partial() or (isinstance(p, Shard) and p.dim == dim) else p for p in placements))",
    "docstring": "Force the given tensor dimension to be replicated.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_ops\\_tensor_ops.py",
    "ast_data": "FunctionDef name:replicate_tensor_dim arg:placements arg:dim arguments arg arg Return return:yes Call BoolOp Call BoolOp Call Compare Call"
  },
  {
    "library": "pytorch",
    "name": "state_dict",
    "source_code": "@override\ndef state_dict(self) -> dict[str, Any]:\n    state_dict = {key: value for key, value in self.__dict__.items() if key not in ('optimizer', 'lr_lambdas')}\n    state_dict['lr_lambdas'] = [None] * len(self.lr_lambdas)\n    for idx, fn in enumerate(self.lr_lambdas):\n        if not isinstance(fn, types.FunctionType):\n            state_dict['lr_lambdas'][idx] = fn.__dict__.copy()\n    return state_dict",
    "docstring": "Return the state of the scheduler as a :class:. It contains an entry for every variable in self.__dict__ which is not the optimizer. The learning rate lambda functions will only be saved if they are callable objects and not if they are functions or lambdas. When saving or loading the scheduler, please make sure to also save or load the state of the optimizer.",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\lr_scheduler.py",
    "ast_data": "FunctionDef name:state_dict arg:self arguments arg Assign Call Compare Assign Call For Call If Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_ExecOrderWarnStatus",
    "source_code": "class _ExecOrderWarnStatus(Enum):\n    NONE = auto()\n    WARNING = auto()\n    WARNED = auto()",
    "docstring": "Used internally for execution order validation.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_exec_order_utils.py",
    "ast_data": "ClassDef name:_ExecOrderWarnStatus Assign Call Assign Call Assign Call"
  },
  {
    "library": "pandas",
    "name": "__getattr__",
    "source_code": "@final\ndef __getattr__(self, name: str):\n    if name not in self._internal_names_set and name not in self._metadata and (name not in self._accessors) and self._info_axis._can_hold_identifiers_and_holds_name(name):\n        return self[name]\n    return object.__getattribute__(self, name)",
    "docstring": "After regular attribute access, try looking up the name This allows simpler access to columns for interactive use.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:__getattr__ arg:self arg:name arguments arg arg If BoolOp Compare Compare Compare Call Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_feature_columns",
    "source_code": "def get_feature_columns(self, df):\n    exclude_columns = ['speedup', 'winner', 'target', 'avail_choices', 'choice2time', 'index', 'actual_winner', 'relative_performance']\n    feature_columns = [col for col in df.columns if col not in exclude_columns]\n    return feature_columns",
    "docstring": "The dataframe contains columns that are not features, such as 'winner', 'speedup' that are only used for debugging purposes. This function returns the columns that are actually features.",
    "type": "method",
    "file_path": "pytorch\\torchgen\\_autoheuristic\\train_decision.py",
    "ast_data": "FunctionDef name:get_feature_columns arg:self arg:df arguments arg arg Assign Assign Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "write_csv_from_dict",
    "source_code": "def write_csv_from_dict(filename, input_dict):\n    f = open(PATH_TO_DIR + '/data/' + filename, 'w')\n    for k, v in input_dict.items():\n        line = k\n        for item in v:\n            line += ',' + item\n        f.write(line + '\\n')\n    f.flush()\n    print('Wrote to file %s' % filename)\n    check_with_golden(filename)",
    "docstring": "Writes out a file from an input dictionary. After writing out the file, it checks the new list against the golden to make sure golden file is up-to-date. Args: filename: String that is the output file name. input_dict: Dictionary that is to be written out to a file.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\tensorflow_builder\\config_detector\\data\\cuda_compute_capability.py",
    "ast_data": "FunctionDef name:write_csv_from_dict arg:filename arg:input_dict arguments arg arg Assign Call For Call Assign For Call Call Call Call"
  },
  {
    "library": "django",
    "name": "set_urlconf",
    "source_code": "def set_urlconf(urlconf_name):\n    if urlconf_name:\n        _urlconfs.value = urlconf_name\n    elif hasattr(_urlconfs, 'value'):\n        del _urlconfs.value",
    "docstring": "Set the URLconf for the current thread or asyncio task (overriding the default one in settings). If urlconf_name is None, revert back to the default.",
    "type": "function",
    "file_path": "django\\django\\urls\\base.py",
    "ast_data": "FunctionDef name:set_urlconf arg:urlconf_name arguments arg If Assign If Call"
  },
  {
    "library": "tensorflow",
    "name": "_format_neighbors",
    "source_code": "def _format_neighbors(self, neighbor_type, non_ctrls, ctrls):\n    lines = []\n    font_attr_segs = {}\n    lines.append('')\n    lines.append('  %d %s(s) + %d control %s(s):' % (len(non_ctrls), neighbor_type, len(ctrls), neighbor_type))\n    lines.append('    %d %s(s):' % (len(non_ctrls), neighbor_type))\n    for non_ctrl in non_ctrls:\n        line = '      [%s] %s' % (self._debug_dump.node_op_type(non_ctrl), non_ctrl)\n        lines.append(line)\n        font_attr_segs[len(lines) - 1] = [(len(line) - len(non_ctrl), len(line), debugger_cli_common.MenuItem(None, 'ni -a -d -t %s' % non_ctrl))]\n    if ctrls:\n        lines.append('')\n        lines.append('    %d control %s(s):' % (len(ctrls), neighbor_type))\n        for ctrl in ctrls:\n            line = '      [%s] %s' % (self._debug_dump.node_op_type(ctrl), ctrl)\n            lines.append(line)\n            font_attr_segs[len(lines) - 1] = [(len(line) - len(ctrl), len(line), debugger_cli_common.MenuItem(None, 'ni -a -d -t %s' % ctrl))]\n    return debugger_cli_common.RichTextLines(lines, font_attr_segs=font_attr_segs)",
    "docstring": "List neighbors (inputs or recipients) of a node. Args: neighbor_type: (\"input\" | \"recipient\") non_ctrls: Non-control neighbor node names, as a list of str. ctrls: Control neighbor node names, as a list of str. Returns: A RichTextLines object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\analyzer_cli.py",
    "ast_data": "FunctionDef name:_format_neighbors arg:self arg:neighbor_type arg:non_ctrls arg:ctrls arguments arg arg arg arg Assign Assign Call Call Call Call Call Call For Assign Call Call Assign Call Call Call Call Call If Call Call Call For Assign Call Call Assign Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_KerasTensorIterator",
    "source_code": "class _KerasTensorIterator(object):\n\n    def __init__(self, tensor, dim0):\n        self._tensor = tensor\n        self._index = 0\n        self._limit = dim0\n\n    def __iter__(self):\n        return self\n\n    def __next__(self):\n        if self._index == self._limit:\n            raise StopIteration\n        result = self._tensor[self._index]\n        self._index += 1\n        return result",
    "docstring": "Iterates over the leading dim of a KerasTensor. Performs 0 error checks.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\keras_tensor.py",
    "ast_data": "ClassDef name:_KerasTensorIterator FunctionDef name:__init__ arg:self arg:tensor arg:dim0 arguments arg arg arg Assign Assign Assign FunctionDef name:__iter__ arg:self arguments arg Return return:yes FunctionDef name:__next__ arg:self arguments arg If Compare Raise Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "hasm",
    "source_code": "@property\ndef hasm(self):\n    if geos_version_tuple() < (3, 12):\n        raise GEOSException('GEOSGeometry.hasm requires GEOS >= 3.12.0.')\n    return capi.geos_hasm(self.ptr)",
    "docstring": "Return whether the geometry has a M dimension.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:hasm arg:self arguments arg If Compare Call Raise Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "add_top_n",
    "source_code": "def add_top_n(self, metric: str, key: Any, val: int) -> None:\n    if self._level == 0:\n        return\n    if metric not in self._metrics:\n        self._metrics[metric] = TopN()\n    self._metrics[metric].add(key, val)",
    "docstring": "Records a metric as a TopN set of values.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\metrics_context.py",
    "ast_data": "FunctionDef name:add_top_n arg:self arg:metric arg:key arg:val arguments arg arg arg arg If Compare Return return:no If Compare Assign Call Call"
  },
  {
    "library": "authlib",
    "name": "fetch_access_token",
    "source_code": "def fetch_access_token(self, url, verifier=None, **kwargs):\n    if verifier:\n        self.auth.verifier = verifier\n    if not self.auth.verifier:\n        self.handle_error('missing_verifier', 'Missing \"verifier\" value')\n    return self._fetch_token(url, **kwargs)",
    "docstring": "Method for fetching an access token from the token endpoint. This is the final step in the OAuth 1 workflow. An access token is obtained using all previously obtained credentials, including the verifier from the authorization step. :param url: Access Token endpoint. :param verifier: A verifier string to prove authorization was granted. :param kwargs: Extra parameters to include for fetching access token. :return: A token dict.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth1\\client.py",
    "ast_data": "FunctionDef name:fetch_access_token arg:self arg:url arg:verifier arguments arg arg arg arg If Assign If Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "sanitize_gm_for_cache",
    "source_code": "@contextlib.contextmanager\ndef sanitize_gm_for_cache(gm: torch.fx.GraphModule):\n    IGNORED_FIELDS = ('meta', 'compile_subgraph_reason', '_param_name_to_source')\n    saved_fields = {}\n    for field in IGNORED_FIELDS:\n        saved_fields[field] = getattr(gm, field, None)\n        setattr(gm, field, None)\n    try:\n        yield\n    finally:\n        for field, value in saved_fields.items():\n            setattr(gm, field, value)",
    "docstring": "Clears a few fields in a dynamo supplied Graph Module that are not stable between graph inputs, but don't affect inductor or aotdispatch correctness. These fields **can** be used by code calling into aotdispatch (namely, dynamo), so we can't null them out completely. To ensure that these fields are not accessed by inductor or aotdispatch, we clear them during AOTAutogradCache.load, and then put them back before returning. This way, we generate a cache key based off of a canonical graph without these fields, and also guarantee they aren't used to affect the cache's output.",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\autograd_cache.py",
    "ast_data": "FunctionDef name:sanitize_gm_for_cache arg:gm arguments arg Assign Assign For Assign Call Call Try For Call Call"
  },
  {
    "library": "pygame",
    "name": "get_porttime_dep",
    "source_code": "def get_porttime_dep():\n    portmidi_as_porttime = True\n    if 'PORTMIDI_INC_PORTTIME' in os.environ:\n        inc_porttime = os.environ.get('PORTMIDI_INC_PORTTIME')\n        portmidi_as_porttime = True if inc_porttime in ['1', 'True'] else False\n    elif os.path.exists('/etc/redhat-release'):\n        portmidi_as_porttime = True\n    else:\n        portmidi_as_porttime = False\n    if portmidi_as_porttime:\n        return Dependency('PORTTIME', 'porttime.h', 'libportmidi.so', ['portmidi'])\n    else:\n        dep = Dependency('PORTTIME', 'porttime.h', 'libporttime.so', ['porttime'])\n        if not dep.found:\n            return Dependency('PORTTIME', 'porttime.h', 'libportmidi.so', ['portmidi'])",
    "docstring": "returns the porttime Dependency. On some distributions, such as Fedora, porttime is compiled into portmidi. On others, such as Debian, it is a separate library.",
    "type": "function",
    "file_path": "pygame\\buildconfig\\config_unix.py",
    "ast_data": "FunctionDef name:get_porttime_dep arguments Assign If Compare Assign Call Assign Compare If Call Assign Assign If Return return:yes Call Assign Call If Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "bode",
    "source_code": "def bode(self, w=None, n=100):\n    return dbode(self, w=w, n=n)",
    "docstring": "Calculate Bode magnitude and phase data of a discrete-time system. Returns a 3-tuple containing arrays of frequencies [rad/s], magnitude [dB] and phase [deg]. See for details. Examples -------- >>> from scipy import signal >>> import matplotlib.pyplot as plt Construct the transfer function :math: with sampling time 0.5s: >>> sys = signal.TransferFunction([1], [1, 2, 3], dt=0.5) Equivalent: signal.dbode(sys) >>> w, mag, phase = sys.bode() >>> plt.figure() >>> plt.semilogx(w, mag) # Bode magnitude plot >>> plt.figure() >>> plt.semilogx(w, phase) # Bode phase plot >>> plt.show()",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:bode arg:self arg:w arg:n arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "forward",
    "source_code": "def forward(self):\n    raise ValueError('this method should be reimplemented by subclass')",
    "docstring": "do one step worth of computation",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\tensorexpr\\benchmark.py",
    "ast_data": "FunctionDef name:forward arg:self arguments arg Raise Call"
  },
  {
    "library": "django",
    "name": "sign_object",
    "source_code": "def sign_object(self, obj, serializer=JSONSerializer, compress=False):\n    data = serializer().dumps(obj)\n    is_compressed = False\n    if compress:\n        compressed = zlib.compress(data)\n        if len(compressed) < len(data) - 1:\n            data = compressed\n            is_compressed = True\n    base64d = b64_encode(data).decode()\n    if is_compressed:\n        base64d = '.' + base64d\n    return self.sign(base64d)",
    "docstring": "Return URL-safe, hmac signed base64 compressed JSON string. If compress is True (not the default), check if compressing using zlib can save some space. Prepend a '.' to signify compression. This is included in the signature, to protect against zip bombs. The serializer is expected to return a bytestring.",
    "type": "method",
    "file_path": "django\\django\\core\\signing.py",
    "ast_data": "FunctionDef name:sign_object arg:self arg:obj arg:serializer arg:compress arguments arg arg arg arg Assign Call Call Assign If Assign Call If Compare Call Call Assign Assign Assign Call Call If Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "flush",
    "source_code": "def flush(self):\n    self.clear()\n    self.delete(self.session_key)\n    self._session_key = None",
    "docstring": "Remove the current session data from the database and regenerate the key.",
    "type": "method",
    "file_path": "django\\django\\contrib\\sessions\\backends\\cached_db.py",
    "ast_data": "FunctionDef name:flush arg:self arguments arg Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "task_id",
    "source_code": "@property\ndef task_id(self):\n    return self._task_id",
    "docstring": "Returns the id or index of the corresponding task.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distribute_coordinator_utils.py",
    "ast_data": "FunctionDef name:task_id arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "pro_cv_seq",
    "source_code": "def pro_cv_seq(m, n, c):\n    if not (isscalar(m) and isscalar(n) and isscalar(c)):\n        raise ValueError('Arguments must be scalars.')\n    if n != floor(n) or m != floor(m):\n        raise ValueError('Modes must be integers.')\n    if n - m > 199:\n        raise ValueError('Difference between n and m is too large.')\n    maxL = n - m + 1\n    return _specfun.segv(m, n, c, 1)[1][:maxL]",
    "docstring": "Characteristic values for prolate spheroidal wave functions. Compute a sequence of characteristic values for the prolate spheroidal wave functions for mode m and n'=m..n and spheroidal parameter c. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special Functions\", John Wiley and Sons, 1996.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_basic.py",
    "ast_data": "FunctionDef name:pro_cv_seq arg:m arg:n arg:c arguments arg arg arg If BoolOp Call Call Call Raise Call If BoolOp Compare Call Compare Call Raise Call If Compare Raise Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_flatten_first_two_dims",
    "source_code": "def _flatten_first_two_dims(x):\n    old_shape = array_ops.shape(x)\n    first_dim = constant_op.constant([-1], dtype=old_shape.dtype)\n    new_shape = array_ops.concat([first_dim, old_shape[2:]], axis=0)\n    return array_ops.reshape(x, new_shape)",
    "docstring": "Merges first two dimensions.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "FunctionDef name:_flatten_first_two_dims arg:x arguments arg Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_update_detector_equalization_qconfig_info",
    "source_code": "def _update_detector_equalization_qconfig_info(self, combined_info: DetectorQConfigInfo, new_info: DetectorQConfigInfo):\n    is_equalization_recommended = combined_info.is_equalization_recommended or new_info.is_equalization_recommended\n    combined_info.is_equalization_recommended = is_equalization_recommended",
    "docstring": "Takes in the old and new information and updates the combined information. Args: combined_info (DetectorQConfigInfo): The DetectorQConfigInfo we are compiling all of the information in new_info (DetectorQConfigInfo): The DetectorQConfigInfo with the information we are trying to merge the new info into it",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\model_report.py",
    "ast_data": "FunctionDef name:_update_detector_equalization_qconfig_info arg:self arg:combined_info arg:new_info arguments arg arg arg Assign BoolOp Assign"
  },
  {
    "library": "pytorch",
    "name": "materialize",
    "source_code": "def materialize(self, shape, device=None, dtype=None):\n    if device is None:\n        device = self.data.device\n    if dtype is None:\n        dtype = self.data.dtype\n    self.data = torch.empty(shape, device=device, dtype=dtype)\n    self.__class__ = self.cls_to_become",
    "docstring": "Create a Parameter or Tensor with the same properties of the uninitialized one. Given a shape, it materializes a parameter in the same device and with the same as the current one or the specified ones in the arguments. Args: shape : (tuple): the shape for the materialized tensor. device (:class:): the desired device of the parameters and buffers in this module. Optional. dtype (:class:): the desired floating point type of the floating point parameters and buffers in this module. Optional.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\parameter.py",
    "ast_data": "FunctionDef name:materialize arg:self arg:shape arg:device arg:dtype arguments arg arg arg arg If Compare Assign If Compare Assign Assign Call Assign"
  },
  {
    "library": "scipy",
    "name": "in_simplex",
    "source_code": "def in_simplex(self, S, v_x, A_j0=None):\n    A_11 = np.delete(S, 0, 0) - S[0]\n    sign_det_A_11 = np.sign(np.linalg.det(A_11))\n    if sign_det_A_11 == 0:\n        sign_det_A_11 = -1\n    if A_j0 is None:\n        A_j0 = S - v_x\n    for d in range(self.dim + 1):\n        det_A_jj = (-1) ** d * sign_det_A_11\n        sign_det_A_j0 = np.sign(np.linalg.det(np.delete(A_j0, d, 0)))\n        if det_A_jj == sign_det_A_j0:\n            continue\n        else:\n            return False\n    return True",
    "docstring": "Check if a vector v_x is in simplex . Parameters ---------- S : array_like Array containing simplex entries of vertices as rows v_x : A candidate vertex A_j0 : array, optional, Allows for A_j0 to be pre-calculated Returns ------- res : boolean True if is in",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_shgo_lib\\_complex.py",
    "ast_data": "FunctionDef name:in_simplex arg:self arg:S arg:v_x arg:A_j0 arguments arg arg arg arg Assign Call Assign Call Call If Compare Assign If Compare Assign For Call Assign Assign Call Call Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_applyConstraints",
    "source_code": "def _applyConstraints(blockVectorV, factYBY, blockVectorBY, blockVectorY):\n    YBV = blockVectorBY.T.conj() @ blockVectorV\n    tmp = cho_solve(factYBY, YBV)\n    blockVectorV -= blockVectorY @ tmp",
    "docstring": "Changes blockVectorV in-place.",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_eigen\\lobpcg\\lobpcg.py",
    "ast_data": "FunctionDef name:_applyConstraints arg:blockVectorV arg:factYBY arg:blockVectorBY arg:blockVectorY arguments arg arg arg arg Assign Call Assign Call"
  },
  {
    "library": "scipy",
    "name": "obl_cv_seq",
    "source_code": "def obl_cv_seq(m, n, c):\n    if not (isscalar(m) and isscalar(n) and isscalar(c)):\n        raise ValueError('Arguments must be scalars.')\n    if n != floor(n) or m != floor(m):\n        raise ValueError('Modes must be integers.')\n    if n - m > 199:\n        raise ValueError('Difference between n and m is too large.')\n    maxL = n - m + 1\n    return _specfun.segv(m, n, c, -1)[1][:maxL]",
    "docstring": "Characteristic values for oblate spheroidal wave functions. Compute a sequence of characteristic values for the oblate spheroidal wave functions for mode m and n'=m..n and spheroidal parameter c. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special Functions\", John Wiley and Sons, 1996.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_basic.py",
    "ast_data": "FunctionDef name:obl_cv_seq arg:m arg:n arg:c arguments arg arg arg If BoolOp Call Call Call Raise Call If BoolOp Compare Call Compare Call Raise Call If Compare Raise Call Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "forwards_plan",
    "source_code": "def forwards_plan(self, target):\n    if target not in self.nodes:\n        raise NodeNotFoundError('Node %r not a valid node' % (target,), target)\n    return self.iterative_dfs(self.node_map[target])",
    "docstring": "Given a node, return a list of which previous nodes (dependencies) must be applied, ending with the node itself. This is the list you would follow if applying the migrations to a database.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\graph.py",
    "ast_data": "FunctionDef name:forwards_plan arg:self arg:target arguments arg arg If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "rvs",
    "source_code": "def rvs(self, eigs, random_state=None, tol=1e-13, diag_tol=1e-07):\n    dim, eigs = self._process_parameters(eigs, tol=tol)\n    random_state = self._get_random_state(random_state)\n    m = ortho_group.rvs(dim, random_state=random_state)\n    m = np.dot(np.dot(m, np.diag(eigs)), m.T)\n    m = self._to_corr(m)\n    if abs(m.diagonal() - 1).max() > diag_tol:\n        raise RuntimeError('Failed to generate a valid correlation matrix')\n    return m",
    "docstring": "Draw random correlation matrices. Parameters ---------- eigs : 1d ndarray Eigenvalues of correlation matrix tol : float, optional Tolerance for input parameter checks diag_tol : float, optional Tolerance for deviation of the diagonal of the resulting matrix. Default: 1e-7 Raises ------ RuntimeError Floating point error prevented generating a valid correlation matrix. Returns ------- rvs : ndarray or scalar Random size N-dimensional matrices, dimension (size, dim, dim), each having eigenvalues eigs.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:rvs arg:self arg:eigs arg:random_state arg:tol arg:diag_tol arguments arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call Call Call Assign Call If Compare Call Call Call Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_remove_qconfig",
    "source_code": "def _remove_qconfig(module):\n    for child in module.children():\n        _remove_qconfig(child)\n    if hasattr(module, 'qconfig'):\n        del module.qconfig\n    _remove_activation_post_process(module)",
    "docstring": "Clean up the qconfig left in the module so that new qconfig can be propagated. Args: module: module to be cleaned up",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantize.py",
    "ast_data": "FunctionDef name:_remove_qconfig arg:module arguments arg For Call Call If Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_joinstyle",
    "source_code": "@_docstring.interpd\ndef set_joinstyle(self, s):\n    js = JoinStyle(s)\n    self._joinstyle = js\n    self.stale = True",
    "docstring": "Set the . The default joinstyle is 'round' for and 'miter' for all other patches. Parameters ---------- s : or %(JoinStyle)s",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:set_joinstyle arg:self arg:s arguments arg arg Assign Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_get_sparse_tensors",
    "source_code": "@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION)\ndef _get_sparse_tensors(self, inputs, weight_collections=None, trainable=None):\n    del weight_collections\n    del trainable\n    input_tensor = inputs.get(self)\n    return self._get_sparse_tensors_for_input_tensor(input_tensor)",
    "docstring": "Converts dense inputs to SparseTensor so downstream code can use it.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:_get_sparse_tensors arg:self arg:inputs arg:weight_collections arg:trainable arguments arg arg arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "seaborn",
    "name": "as_hex",
    "source_code": "def as_hex(self):\n    hex = [mpl.colors.rgb2hex(rgb) for rgb in self]\n    return _ColorPalette(hex)",
    "docstring": "Return a color palette with hex codes instead of RGB values.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\palettes.py",
    "ast_data": "FunctionDef name:as_hex arg:self arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, raw_X):\n    raw_X = iter(raw_X)\n    if self.input_type == 'dict':\n        raw_X = (_iteritems(d) for d in raw_X)\n    elif self.input_type == 'string':\n        first_raw_X = next(raw_X)\n        if isinstance(first_raw_X, str):\n            raise ValueError('Samples can not be a single string. The input must be an iterable over iterables of strings.')\n        raw_X_ = chain([first_raw_X], raw_X)\n        raw_X = (((f, 1) for f in x) for x in raw_X_)\n    indices, indptr, values = _hashing_transform(raw_X, self.n_features, self.dtype, self.alternate_sign, seed=0)\n    n_samples = indptr.shape[0] - 1\n    if n_samples == 0:\n        raise ValueError('Cannot vectorize empty sequence.')\n    X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype, shape=(n_samples, self.n_features))\n    X.sum_duplicates()\n    return X",
    "docstring": "Transform a sequence of instances to a scipy.sparse matrix. Parameters ---------- raw_X : iterable over iterable over raw features, length = n_samples Samples. Each sample must be iterable an (e.g., a list or tuple) containing/generating feature names (and optionally values, see the input_type constructor argument) which will be hashed. raw_X need not support the len function, so it can be the result of a generator; n_samples is determined on the fly. Returns ------- X : sparse matrix of shape (n_samples, n_features) Feature matrix, for use with estimators or further transformers.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_extraction\\_hash.py",
    "ast_data": "FunctionDef name:transform arg:self arg:raw_X arguments arg arg Assign Call If Compare Assign Call If Compare Assign Call If Call Raise Call Assign Call Assign Assign Call Assign If Compare Raise Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_path_from_root",
    "source_code": "@property\ndef _path_from_root(self) -> Generator[CUDAGraphNode, None, None]:\n    nodes = reversed(list(self._path_to_root))\n    yield from nodes",
    "docstring": "Returns all nodes in the path starting at the root and ending at self",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py",
    "ast_data": "FunctionDef name:_path_from_root arg:self arguments arg Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "set_devices",
    "source_code": "def set_devices(self, devices: list[DeviceType]):\n    self.devices = _to_device_list(devices)",
    "docstring": "Set local devices used by the TensorPipe RPC agent. When processing CUDA RPC requests, the TensorPipe RPC agent will properly synchronize CUDA streams for all devices in this ``. Args: devices (List of int, str, or torch.device): local devices used by the TensorPipe RPC agent.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\rpc\\options.py",
    "ast_data": "FunctionDef name:set_devices arg:self arg:devices arguments arg arg Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "target",
    "source_code": "@abc.abstractmethod\ndef target(self):\n    raise NotImplementedError('Calling an abstract method.')",
    "docstring": "Returns the optimization target for this variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\optimizer.py",
    "ast_data": "FunctionDef name:target arg:self arguments arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "get_swapped_custom_module_class",
    "source_code": "def get_swapped_custom_module_class(custom_module, custom_module_class_mapping, qconfig):\n    quant_type = get_quant_type(qconfig)\n    class_mapping = custom_module_class_mapping.get(quant_type, {})\n    assert type(custom_module) in class_mapping, f'did not find corresponding observed module class for {type(custom_module)} in mapping: {class_mapping}'\n    return class_mapping[type(custom_module)]",
    "docstring": "Get the observed/quantized custom module class that we need to swap to Input: custom_module: input, can be an instance of either a float or observed custom module custom_module_class_mapping: the float to observed or observed to quantized custom module class mapping qconfig: qconfig configured for the custom module Output: corresponding observed/quantized custom module class for input custom module instance",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\utils.py",
    "ast_data": "FunctionDef name:get_swapped_custom_module_class arg:custom_module arg:custom_module_class_mapping arg:qconfig arguments arg arg arg Assign Call Assign Call Compare Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "append",
    "source_code": "def append(self, module: Module) -> Self:\n    self.add_module(str(len(self)), module)\n    return self",
    "docstring": "Append a given module to the end. Args: module (nn.Module): module to append Example:: >>> import torch.nn as nn >>> n = nn.Sequential(nn.Linear(1, 2), nn.Linear(2, 3)) >>> n.append(nn.Linear(3, 4)) Sequential( (0): Linear(in_features=1, out_features=2, bias=True) (1): Linear(in_features=2, out_features=3, bias=True) (2): Linear(in_features=3, out_features=4, bias=True) )",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\container.py",
    "ast_data": "FunctionDef name:append arg:self arg:module arguments arg arg Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "store",
    "source_code": "def store(self, name: str, index: sympy.Expr, value: CSEVariable, mode: StoreMode=None) -> str:\n    assert self.mask is not None, 'Mask is required for inner stores in modifications'\n    assert mode == 'atomic_add', 'Only atomic_add is supported for inner stores'\n    buf_name = self._add_kernel_input(name)\n    index_str = self._process_indexing(index)\n    index_str = f'tl.broadcast_to({index_str}, {value}.shape)'\n    store = f\"tl.atomic_add({buf_name} + {index_str}, {value}, {self.mask}, sem='relaxed')\"\n    return store",
    "docstring": "Currently only supports stores for atomic adds coming from scatter nodes This is used by flex_attention's backwards grad for captured buffers, see zeros_and_scatter lowering",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\select_algorithm.py",
    "ast_data": "FunctionDef name:store arg:self arg:name arg:index arg:value arg:mode arguments arg arg arg arg arg Compare Compare Assign Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "Encoder",
    "source_code": "class Encoder(json.JSONEncoder):\n\n    def default(self, obj):\n        if isinstance(obj, tensor_shape.TensorShape):\n            items = obj.as_list() if obj.rank is not None else None\n            return {'class_name': 'TensorShape', 'items': items}\n        return get_json_type(obj)\n\n    def encode(self, obj):\n        return super(Encoder, self).encode(_encode_tuple(obj))",
    "docstring": "JSON encoder and decoder that handles TensorShapes and tuples.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\json_utils.py",
    "ast_data": "ClassDef name:Encoder FunctionDef name:default arg:self arg:obj arguments arg arg If Call Assign Compare Call Return return:yes Return return:yes Call FunctionDef name:encode arg:self arg:obj arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "pattern",
    "source_code": "def pattern(self):\n    return self._pattern(self.root, self._digest)",
    "docstring": "Export the Trie to a regex pattern.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\hipify\\hipify_python.py",
    "ast_data": "FunctionDef name:pattern arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "ask_not_null_alteration",
    "source_code": "def ask_not_null_alteration(self, field_name, model_name):\n    return None",
    "docstring": "Changing a NULL field to NOT NULL.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\questioner.py",
    "ast_data": "FunctionDef name:ask_not_null_alteration arg:self arg:field_name arg:model_name arguments arg arg arg Return return:no"
  },
  {
    "library": "scipy",
    "name": "_expect",
    "source_code": "def _expect(fun, lb, ub, x0, inc, maxcount=1000, tolerance=1e-10, chunksize=32):\n    if ub - lb <= chunksize:\n        supp = np.arange(lb, ub + 1, inc)\n        vals = fun(supp)\n        return np.sum(vals)\n    if x0 < lb:\n        x0 = lb\n    if x0 > ub:\n        x0 = ub\n    count, tot = (0, 0.0)\n    for x in _iter_chunked(x0, ub + 1, chunksize=chunksize, inc=inc):\n        count += x.size\n        delta = np.sum(fun(x))\n        tot += delta\n        if abs(delta) < tolerance * x.size:\n            break\n        if count > maxcount:\n            warnings.warn('expect(): sum did not converge', RuntimeWarning, stacklevel=3)\n            return tot\n    for x in _iter_chunked(x0 - 1, lb - 1, chunksize=chunksize, inc=-inc):\n        count += x.size\n        delta = np.sum(fun(x))\n        tot += delta\n        if abs(delta) < tolerance * x.size:\n            break\n        if count > maxcount:\n            warnings.warn('expect(): sum did not converge', RuntimeWarning, stacklevel=3)\n            break\n    return tot",
    "docstring": "Helper for computing the expectation value of .",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:_expect arg:fun arg:lb arg:ub arg:x0 arg:inc arg:maxcount arg:tolerance arg:chunksize arguments arg arg arg arg arg arg arg arg If Compare Assign Call Assign Call Return return:yes Call If Compare Assign If Compare Assign Assign For Call Assign Call Call If Compare Call If Compare Call Return return:yes For Call Assign Call Call If Compare Call If Compare Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "prune",
    "source_code": "def prune(self):\n    major_dim = self._swap(self._shape_as_2d)[0]\n    if len(self.indptr) != major_dim + 1:\n        raise ValueError('index pointer has invalid length')\n    if len(self.indices) < self.nnz:\n        raise ValueError('indices array has fewer than nnz elements')\n    if len(self.data) < self.nnz:\n        raise ValueError('data array has fewer than nnz elements')\n    self.indices = _prune_array(self.indices[:self.nnz])\n    self.data = _prune_array(self.data[:self.nnz])",
    "docstring": "Remove empty space after all non-zero elements.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_compressed.py",
    "ast_data": "FunctionDef name:prune arg:self arguments arg Assign Call If Compare Call Raise Call If Compare Call Raise Call If Compare Call Raise Call Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "stack",
    "source_code": "def stack(context=1):\n    return _inspect.stack(context)[1:]",
    "docstring": "TFDecorator-aware replacement for inspect.stack.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\tf_inspect.py",
    "ast_data": "FunctionDef name:stack arg:context arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_color",
    "source_code": "def get_color(index, data, cmap):\n    val = {'[0, 0]': data[0, 0], \"[0, N']\": data[0, -1], \"[M', 0]\": data[-1, 0], \"[M', N']\": data[-1, -1]}[index]\n    return cmap(val / data.max())",
    "docstring": "Return the data color of an index.",
    "type": "function",
    "file_path": "matplotlib\\galleries\\users_explain\\artists\\imshow_extent.py",
    "ast_data": "FunctionDef name:get_color arg:index arg:data arg:cmap arguments arg arg arg Assign Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_init_fqns",
    "source_code": "def _init_fqns(self) -> None:\n    assert self._is_root\n    root_module = self._modules[0]\n    param_to_fsdp_param: dict[nn.Parameter, FSDPParam] = {}\n    module_to_fsdp_param_group: dict[nn.Module, FSDPParamGroup] = {}\n    for state in self._state_ctx.all_states:\n        if (fsdp_param_group := state._fsdp_param_group):\n            for fsdp_param in fsdp_param_group.fsdp_params:\n                param_to_fsdp_param[fsdp_param.sharded_param] = fsdp_param\n            for module in fsdp_param_group.modules:\n                module_to_fsdp_param_group[module] = fsdp_param_group\n    for param_name, param in root_module.named_parameters():\n        if param in param_to_fsdp_param:\n            param_to_fsdp_param[param]._param_fqn = param_name\n    for module_name, module in root_module.named_modules():\n        if module in module_to_fsdp_param_group:\n            module_fqn = module_to_fsdp_param_group[module]._module_fqn\n            if module_fqn is None:\n                module_to_fsdp_param_group[module]._module_fqn = module_name\n            else:\n                assert isinstance(module_fqn, str), f'{module_fqn}'\n                module_fqn += f', {module_name}'\n                module_to_fsdp_param_group[module]._module_fqn = module_fqn",
    "docstring": "Sets module and parameter FQN attributes for debugging.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fsdp_state.py",
    "ast_data": "FunctionDef name:_init_fqns arg:self arguments arg Assign For If For Assign For Assign For Call If Compare Assign For Call If Compare Assign If Compare Assign Call Assign"
  },
  {
    "library": "pytorch",
    "name": "convert_shape_to_inductor",
    "source_code": "def convert_shape_to_inductor(lst: Iterable[Union[int, torch.SymInt]]) -> list[sympy.Expr]:\n    return [sympy.sympify(i) for i in lst]",
    "docstring": "Gets the shape and stride of a tensor. For non-symbolic tensors, this is trivial. But for symbolic tensors, we need to map from SymIntNode into sympy.Expr.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\utils.py",
    "ast_data": "FunctionDef name:convert_shape_to_inductor arg:lst arguments arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "Repository",
    "source_code": "class Repository(DataSource):\n\n    def __init__(self, baseurl, destpath=os.curdir):\n        DataSource.__init__(self, destpath=destpath)\n        self._baseurl = baseurl\n\n    def __del__(self):\n        DataSource.__del__(self)\n\n    def _fullpath(self, path):\n        splitpath = path.split(self._baseurl, 2)\n        if len(splitpath) == 1:\n            result = os.path.join(self._baseurl, path)\n        else:\n            result = path\n        return result\n\n    def _findfile(self, path):\n        return DataSource._findfile(self, self._fullpath(path))\n\n    def abspath(self, path):\n        return DataSource.abspath(self, self._fullpath(path))\n\n    def exists(self, path):\n        return DataSource.exists(self, self._fullpath(path))\n\n    def open(self, path, mode='r', encoding=None, newline=None):\n        return DataSource.open(self, self._fullpath(path), mode, encoding=encoding, newline=newline)\n\n    def listdir(self):\n        if self._isurl(self._baseurl):\n            raise NotImplementedError('Directory listing of URLs, not supported yet.')\n        else:\n            return os.listdir(self._baseurl)",
    "docstring": "Repository(baseurl, destpath='.') A data repository where multiple DataSource's share a base URL/directory. extends by prepending a base URL (or directory) to all the files it handles. Use when you will be working with multiple files from one base URL. Initialize with the base URL, then refer to each file by its filename only. Parameters ---------- baseurl : str Path to the local directory or remote location that contains the data files. destpath : str or None, optional Path to the directory where the source file gets downloaded to for use. If is None, a temporary directory will be created. The default path is the current directory. Examples -------- To analyze all files in the repository, do something like this (note: this is not self-contained code):: >>> repos = np.lib._datasource.Repository('/home/user/data/dir/') >>> for filename in filelist: ... fp = repos.open(filename) ... fp.analyze() ... fp.close() Similarly you could use a URL for a repository:: >>> repos = np.lib._datasource.Repository('",
    "type": "class",
    "file_path": "numpy\\numpy\\lib\\_datasource.py",
    "ast_data": "ClassDef name:Repository FunctionDef name:__init__ arg:self arg:baseurl arg:destpath arguments arg arg arg Call Assign FunctionDef name:__del__ arg:self arguments arg Call FunctionDef name:_fullpath arg:self arg:path arguments arg arg Assign Call If Compare Call Assign Call Assign Return return:yes FunctionDef name:_findfile arg:self arg:path arguments arg arg Return return:yes Call Call FunctionDef name:abspath arg:self arg:path arguments arg arg Return return:yes Call Call FunctionDef name:exists arg:self arg:path arguments arg arg Return return:yes Call Call FunctionDef name:open arg:self arg:path arg:mode arg:encoding arg:newline arguments arg arg arg arg arg Return return:yes Call Call FunctionDef name:listdir arg:self arguments arg If Call Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "while_loop_op",
    "source_code": "@staticmethod\ndef while_loop_op(op):\n    return control_flow_util.IsLoopSwitch(op) or control_flow_util.IsLoopMerge(op) or control_flow_util.IsLoopEnter(op) or control_flow_util.IsLoopExit(op) or TensorTracer.loop_cond_op(op) or (op.type in ('RefNextIteration', 'NextIteration'))",
    "docstring": "Returns true if op is one of the special ops of in a while loop. Args: op: A tf.Operation. Returns: True if the given op is one of [Switch, Merge, Enter, Exit, NextIteration, LoopCond], which are all building blocks for TF while loops.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:while_loop_op arg:op arguments arg Return return:yes BoolOp Call Call Call Call Call Compare"
  },
  {
    "library": "django",
    "name": "get_default_timezone_name",
    "source_code": "def get_default_timezone_name():\n    return _get_timezone_name(get_default_timezone())",
    "docstring": "Return the name of the default time zone.",
    "type": "function",
    "file_path": "django\\django\\utils\\timezone.py",
    "ast_data": "FunctionDef name:get_default_timezone_name arguments Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "serialize_sparse_tensors",
    "source_code": "def serialize_sparse_tensors(tensors):\n    ret = nest.pack_sequence_as(tensors, [sparse_ops.serialize_sparse(tensor, out_type=dtypes.variant) if isinstance(tensor, sparse_tensor.SparseTensor) else tensor for tensor in nest.flatten(tensors)])\n    return ret",
    "docstring": "Serializes sparse tensors. Args: tensors: a tensor structure to serialize. Returns: with any sparse tensors replaced by their serialized version.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\util\\sparse.py",
    "ast_data": "FunctionDef name:serialize_sparse_tensors arg:tensors arguments arg Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__getitem__",
    "source_code": "def __getitem__(self, item):\n    if not self._isinit:\n        self._init()\n    if item == 0:\n        origin_1_as_int = int(self._origin[1] * self.M)\n        if origin_1_as_int > self.M - 1:\n            origin_1_as_int = self.M - 1\n        one_d_lut = self._lut[:, origin_1_as_int]\n        new_cmap = ListedColormap(one_d_lut, name=f'{self.name}_0')\n    elif item == 1:\n        origin_0_as_int = int(self._origin[0] * self.N)\n        if origin_0_as_int > self.N - 1:\n            origin_0_as_int = self.N - 1\n        one_d_lut = self._lut[origin_0_as_int, :]\n        new_cmap = ListedColormap(one_d_lut, name=f'{self.name}_1')\n    else:\n        raise KeyError(f'only 0 or 1 are valid keys for BivarColormap, not {item!r}')\n    new_cmap._rgba_bad = self._rgba_bad\n    if self.shape in ['ignore', 'circleignore']:\n        new_cmap.set_over(self._rgba_outside)\n        new_cmap.set_under(self._rgba_outside)\n    return new_cmap",
    "docstring": "Creates and returns a colorbar along the selected axis",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:__getitem__ arg:self arg:item arguments arg arg If Call If Compare Assign Call If Compare Assign Assign Assign Call If Compare Assign Call If Compare Assign Assign Assign Call Raise Call Assign If Compare Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "add_text",
    "source_code": "def add_text(self, tag, text_string, global_step=None, walltime=None):\n    torch._C._log_api_usage_once('tensorboard.logging.add_text')\n    self._get_file_writer().add_summary(text(tag, text_string), global_step, walltime)",
    "docstring": "Add text data to summary. Args: tag (str): Data identifier text_string (str): String to save global_step (int): Global step value to record walltime (float): Optional override default walltime (time.time()) seconds after epoch of event Examples:: writer.add_text('lstm', 'This is an lstm', 0) writer.add_text('rnn', 'This is an rnn', 10)",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\tensorboard\\writer.py",
    "ast_data": "FunctionDef name:add_text arg:self arg:tag arg:text_string arg:global_step arg:walltime arguments arg arg arg arg arg Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "IncompatibilityWarning",
    "source_code": "class IncompatibilityWarning(Warning):\n    pass",
    "docstring": "Warning raised when trying to use where criteria on an incompatible HDF5 file.",
    "type": "class",
    "file_path": "pandas\\pandas\\errors\\__init__.py",
    "ast_data": "ClassDef name:IncompatibilityWarning"
  },
  {
    "library": "numpy",
    "name": "asarray",
    "source_code": "@set_module('numpy.char')\ndef asarray(obj, itemsize=None, unicode=None, order=None):\n    return array(obj, itemsize, copy=False, unicode=unicode, order=order)",
    "docstring": "Convert the input to a , copying the data only if necessary. Versus a NumPy array of dtype or , this class adds the following functionality: 1) values automatically have whitespace removed from the end when indexed 2) comparison operators automatically remove whitespace from the end when comparing values 3) vectorized string operations are provided as methods (e.g. ) and infix operators (e.g. `itemsizeitemsizeobjitemsizeitemsizeobjobjitemsize~numpy.char.chararrayobj~numpy.char.chararraystr_unicode_` - a Python str or unicode object, then the unicode setting of the output array will be automatically determined. order : {'C', 'F'}, optional Specify the order of the array. If order is 'C' (default), then the array will be in C-contiguous order (last-index varies the fastest). If order is 'F', then the returned array will be in Fortran-contiguous order (first-index varies the fastest). Examples -------- >>> import numpy as np >>> np.char.asarray(['hello', 'world']) chararray(['hello', 'world'], dtype='<U5')",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:asarray arg:obj arg:itemsize arg:unicode arg:order arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "limits",
    "source_code": "@property\ndef limits(self, clip_negative=True):\n    if self.as_numpy_dtype in dtype_range:\n        min, max = dtype_range[self.as_numpy_dtype]\n    else:\n        raise ValueError(str(self) + ' does not have defined limits.')\n    if clip_negative:\n        min = 0\n    return (min, max)",
    "docstring": "Return intensity limits, i.e. (min, max) tuple, of the dtype. Args: clip_negative : bool, optional If True, clip the negative range (i.e. return 0 for min intensity) even if the image dtype allows negative values. Returns min, max : tuple Lower and upper intensity limits.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\dtypes.py",
    "ast_data": "FunctionDef name:limits arg:self arg:clip_negative arguments arg arg If Compare Assign Raise Call Call If Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "Coalesce",
    "source_code": "class Coalesce(Func):\n    function = 'COALESCE'\n\n    def __init__(self, *expressions, **extra):\n        if len(expressions) < 2:\n            raise ValueError('Coalesce must take at least two expressions')\n        super().__init__(*expressions, **extra)\n\n    @property\n    def empty_result_set_value(self):\n        for expression in self.get_source_expressions():\n            result = expression.empty_result_set_value\n            if result is NotImplemented or result is not None:\n                return result\n        return None\n\n    def as_oracle(self, compiler, connection, **extra_context):\n        if self.output_field.get_internal_type() == 'TextField':\n            clone = self.copy()\n            clone.set_source_expressions([Func(expression, function='TO_NCLOB') for expression in self.get_source_expressions()])\n            return super(Coalesce, clone).as_sql(compiler, connection, **extra_context)\n        return self.as_sql(compiler, connection, **extra_context)",
    "docstring": "Return, from left to right, the first non-null expression.",
    "type": "class",
    "file_path": "django\\django\\db\\models\\functions\\comparison.py",
    "ast_data": "ClassDef name:Coalesce Assign FunctionDef name:__init__ arg:self arguments arg arg arg If Compare Call Raise Call Call Call FunctionDef name:empty_result_set_value arg:self arguments arg For Call Assign If BoolOp Compare Compare Return return:yes Return return:no FunctionDef name:as_oracle arg:self arg:compiler arg:connection arguments arg arg arg arg If Compare Call Assign Call Call Call Call Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "set_state",
    "source_code": "@contextlib.contextmanager\ndef set_state(state):\n    old_state = get_state()\n    _uarray.set_state(state)\n    try:\n        yield\n    finally:\n        _uarray.set_state(old_state, True)",
    "docstring": "A context manager that sets the state of the backends to one returned by :obj:. See Also -------- get_state Gets a state to be set by this context manager.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_uarray\\_backend.py",
    "ast_data": "FunctionDef name:set_state arg:state arguments arg Assign Call Call Try Call"
  },
  {
    "library": "pytorch",
    "name": "_OverlapInfo",
    "source_code": "class _OverlapInfo:\n\n    def __init__(self, world_size) -> None:\n        self.status: _OverlapStatus = _OverlapStatus.UNINITIALIZED\n        self.shard_buckets: bool = False\n        self.params_per_bucket: list[list[torch.Tensor]] = []\n        self.params_per_rank: list[list[torch.Tensor]] = [[] for _ in range(world_size)]\n        self.offsets: dict[int, int] = {}\n        self.assigned_ranks_per_bucket: list[set[int]] = []\n        self.num_bucket_assignments: int = 0\n        self.total_size: Optional[int] = None\n        self.broadcast_handles: list[Any] = []\n        self.bucket_indices_seen: list[int] = []\n        self.bucket_index_to_future: dict[int, torch.futures.Future] = {}\n        self.bucket_index_to_bucket: dict[int, dist.GradBucket] = {}\n\n    def wait_for_broadcasts(self) -> None:\n        assert len(self.broadcast_handles) == self.num_bucket_assignments, f'Missing at least one broadcast handle on rank {dist.get_rank()}'\n        _ = [x.wait() for x in self.broadcast_handles]\n        self.broadcast_handles.clear()\n\n    def clear_per_iter_info(self) -> None:\n        self.bucket_indices_seen.clear()\n        self.bucket_index_to_future.clear()\n        self.bucket_index_to_bucket.clear()",
    "docstring": "Information needed by :class: to overlap with :class:. Arguments: world_size (int): world size of the process group being used. Attributes: shard_buckets (bool): if `DistributedDataParallelZeroRedundancyOptimizerZeroRedundancyOptimizer_OverlapStatusdictDistributedDataParallellistdictdictlist` of the bucket indices seen on this iteration.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\optim\\zero_redundancy_optimizer.py",
    "ast_data": "ClassDef name:_OverlapInfo FunctionDef name:__init__ arg:self arg:world_size arguments arg arg Call FunctionDef name:wait_for_broadcasts arg:self arguments arg Compare Call Call Assign Call Call FunctionDef name:clear_per_iter_info arg:self arguments arg Call Call Call"
  },
  {
    "library": "scipy",
    "name": "MieleCantrell",
    "source_code": "class MieleCantrell(Benchmark):\n\n    def __init__(self, dimensions=4):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-1.0] * self.N, [1.0] * self.N))\n        self.global_optimum = [[0.0, 1.0, 1.0, 1.0]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return (exp(-x[0]) - x[1]) ** 4 + 100 * (x[1] - x[2]) ** 6 + tan(x[2] - x[3]) ** 4 + x[0] ** 8",
    "docstring": "Miele-Cantrell [1]_ objective function. This class defines the Miele-Cantrell global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{MieleCantrell}}({x}) = (e^{-x_1} - x_2)^4 + 100(x_2 - x_3)^6 + \\tan^4(x_3 - x_4) + x_1^8 with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_M.py",
    "ast_data": "ClassDef name:MieleCantrell FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "internal_assert",
    "source_code": "def internal_assert(pred: bool, assert_msg: str) -> None:\n    if not pred:\n        raise InternalError(assert_msg)",
    "docstring": "This is exir's custom assert method. It internally just throws InternalError. Note that the sole purpose is to throw our own error while maintaining similar syntax as python assert.",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\error.py",
    "ast_data": "FunctionDef name:internal_assert arg:pred arg:assert_msg arguments arg arg If Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "load_data",
    "source_code": "@memory.cache\ndef load_data(dtype=np.float32, order='F'):\n    print('Loading dataset...')\n    data = fetch_openml('mnist_784', as_frame=True)\n    X = check_array(data['data'], dtype=dtype, order=order)\n    y = data['target']\n    X = X / 255\n    print('Creating train-test split...')\n    n_train = 60000\n    X_train = X[:n_train]\n    y_train = y[:n_train]\n    X_test = X[n_train:]\n    y_test = y[n_train:]\n    return (X_train, X_test, y_train, y_test)",
    "docstring": "Load the data, then cache and memmap the train/test split",
    "type": "function",
    "file_path": "scikit-learn\\benchmarks\\bench_mnist.py",
    "ast_data": "FunctionDef name:load_data arg:dtype arg:order arguments arg arg Call Assign Call Assign Call Assign Assign Call Assign Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "lagline",
    "source_code": "def lagline(off, scl):\n    if scl != 0:\n        return np.array([off + scl, -scl])\n    else:\n        return np.array([off])",
    "docstring": "Laguerre series whose graph is a straight line. Parameters ---------- off, scl : scalars The specified line is given by ``. See Also -------- numpy.polynomial.polynomial.polyline numpy.polynomial.chebyshev.chebline numpy.polynomial.legendre.legline numpy.polynomial.hermite.hermline numpy.polynomial.hermite_e.hermeline Examples -------- >>> from numpy.polynomial.laguerre import lagline, lagval >>> lagval(0,lagline(3, 2)) 3.0 >>> lagval(1,lagline(3, 2)) 5.0",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\laguerre.py",
    "ast_data": "FunctionDef name:lagline arg:off arg:scl arguments arg arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_arg_indices_of_inputs_to_log",
    "source_code": "def get_arg_indices_of_inputs_to_log(node: Node) -> list[int]:\n    if len(node.args) == 0:\n        return []\n    if node.op == 'call_function' and (node.target in (torch.add, torch.ops.quantized.add, operator.add) or node.target in (torch.mul, torch.ops.quantized.mul, operator.mul)):\n        result = [i for i in range(2) if type(node.args[i]) == Node]\n        return result\n    return [0]",
    "docstring": "Returns the indices of args of the node which we should attach loggers to, if input logging is enabled. For example, * for (x + y), returns [0, 1] * for (1 + y), returns [1] * for (x + 1), returns [0] * for (linear(x, w, b)) returns [0] * by default, returns [0]",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\ns\\fx\\utils.py",
    "ast_data": "FunctionDef name:get_arg_indices_of_inputs_to_log arg:node arguments arg If Compare Call Return return:no If BoolOp Compare BoolOp Compare Compare Assign Call Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_generate_trimmed_row",
    "source_code": "def _generate_trimmed_row(self, max_cols: int) -> list:\n    index_headers = [_element('th', f'{self.css['row_heading']} {self.css['level']}{c} {self.css['row_trim']}', '...', not self.hide_index_[c], attributes='') for c in range(self.data.index.nlevels)]\n    data: list = []\n    visible_col_count: int = 0\n    for c, _ in enumerate(self.columns):\n        data_element_visible = c not in self.hidden_columns\n        if data_element_visible:\n            visible_col_count += 1\n        if self._check_trim(visible_col_count, max_cols, data, 'td', f'{self.css['data']} {self.css['row_trim']} {self.css['col_trim']}'):\n            break\n        data.append(_element('td', f'{self.css['data']} {self.css['col']}{c} {self.css['row_trim']}', '...', data_element_visible, attributes=''))\n    return index_headers + data",
    "docstring": "When a render has too many rows we generate a trimming row containing \"...\" Parameters ---------- max_cols : int Number of permissible columns Returns ------- list of elements",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\style_render.py",
    "ast_data": "FunctionDef name:_generate_trimmed_row arg:self arg:max_cols arguments arg arg Assign Call Call For Call Assign Compare If If Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "GeoFlexibleFieldLookupDict",
    "source_code": "class GeoFlexibleFieldLookupDict(FlexibleFieldLookupDict):\n    base_data_types_reverse = {**FlexibleFieldLookupDict.base_data_types_reverse, 'point': 'GeometryField', 'linestring': 'GeometryField', 'polygon': 'GeometryField', 'multipoint': 'GeometryField', 'multilinestring': 'GeometryField', 'multipolygon': 'GeometryField', 'geometrycollection': 'GeometryField'}",
    "docstring": "Subclass that includes updates the dict for geometry field types.",
    "type": "class",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\spatialite\\introspection.py",
    "ast_data": "ClassDef name:GeoFlexibleFieldLookupDict Assign"
  },
  {
    "library": "django",
    "name": "pk_default_value",
    "source_code": "def pk_default_value(self):\n    return 'DEFAULT'",
    "docstring": "Return the value to use during an INSERT statement to specify that the field should use its default value.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:pk_default_value arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_DummyEagerGraph",
    "source_code": "class _DummyEagerGraph(threading.local):\n\n    class _WeakReferencableClass:\n        pass\n\n    def __init__(self):\n        super(_DummyEagerGraph, self).__init__()\n        self.key = _DummyEagerGraph._WeakReferencableClass()\n        self.learning_phase_is_set = False",
    "docstring": "_DummyEagerGraph provides a thread local attribute. We can't use threading.local directly, i.e. without subclassing, because gevent monkey patches threading.local and its version does not support weak references.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "ClassDef name:_DummyEagerGraph ClassDef name:_WeakReferencableClass FunctionDef name:__init__ arg:self arguments arg Call Call Assign Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "is_fully_defined",
    "source_code": "def is_fully_defined(self):\n    return self._dims is not None and all((dim is not None for dim in self._dims))",
    "docstring": "Returns True iff is fully defined in every dimension.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:is_fully_defined arg:self arguments arg Return return:yes BoolOp Compare Call Compare"
  },
  {
    "library": "matplotlib",
    "name": "_rotation_about_vector",
    "source_code": "def _rotation_about_vector(v, angle):\n    vx, vy, vz = v / np.linalg.norm(v)\n    s = np.sin(angle)\n    c = np.cos(angle)\n    t = 2 * np.sin(angle / 2) ** 2\n    R = np.array([[t * vx * vx + c, t * vx * vy - vz * s, t * vx * vz + vy * s], [t * vy * vx + vz * s, t * vy * vy + c, t * vy * vz - vx * s], [t * vz * vx - vy * s, t * vz * vy + vx * s, t * vz * vz + c]])\n    return R",
    "docstring": "Produce a rotation matrix for an angle in radians about a vector.",
    "type": "function",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\proj3d.py",
    "ast_data": "FunctionDef name:_rotation_about_vector arg:v arg:angle arguments arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_signature_locals",
    "source_code": "def _get_signature_locals(f: Callable, loc: dict[str, Any]) -> dict[str, Any]:\n    return {k: v for k, v in loc.items() if k in signature(f).parameters}",
    "docstring": "Get local keyword arguments Example:: >> def f(self, a, b=9): pass >> loc = {\"a\": 6, \"c\": 7} >> _get_signature_locals(f, loc) {\"a\": 6}",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\utils.py",
    "ast_data": "FunctionDef name:_get_signature_locals arg:f arg:loc arguments arg arg Return return:yes Call Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "merge_caches_on_tpu",
    "source_code": "def merge_caches_on_tpu(self, local_tpu_cache_tensor):\n    x = array_ops.broadcast_to(local_tpu_cache_tensor, shape=[self._tt_config.num_replicas] + local_tpu_cache_tensor.shape.as_list())\n    if tensor_tracer_flags.TT_SINGLE_CORE_SUMMARIES.value:\n        return x\n    return tpu_ops.all_to_all(x, concat_dimension=0, split_dimension=0, split_count=self._tt_config.num_replicas, group_assignment=[list(range(self._tt_config.num_replicas))])",
    "docstring": "Merges the given caches on tpu. Args: local_tpu_cache_tensor: A local tensor that needs to be merged by concanting data from other tpu cores. Returns: A merged tf.Tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:merge_caches_on_tpu arg:self arg:local_tpu_cache_tensor arguments arg arg Assign Call Call If Return return:yes Return return:yes Call Call Call"
  },
  {
    "library": "authlib",
    "name": "sign_rsa_sha1",
    "source_code": "def sign_rsa_sha1(client, request):\n    base_string = generate_signature_base_string(request)\n    return rsa_sha1_signature(base_string, client.rsa_key)",
    "docstring": "Sign a RSASSA-PKCS #1 v1.5 base64 encoded signature.",
    "type": "function",
    "file_path": "authlib\\authlib\\oauth1\\rfc5849\\signature.py",
    "ast_data": "FunctionDef name:sign_rsa_sha1 arg:client arg:request arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "is_view",
    "source_code": "@property\ndef is_view(self) -> bool:\n    return False",
    "docstring": "Extension arrays are never treated as views.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\blocks.py",
    "ast_data": "FunctionDef name:is_view arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "create",
    "source_code": "def create(self):\n    raise NotImplementedError('subclasses of SessionBase must provide a create() method')",
    "docstring": "Create a new session instance. Guaranteed to create a new object with a unique key and will have saved the result once (with empty data) before the method returns.",
    "type": "method",
    "file_path": "django\\django\\contrib\\sessions\\backends\\base.py",
    "ast_data": "FunctionDef name:create arg:self arguments arg Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "on_clicked",
    "source_code": "def on_clicked(self, func):\n    return self._observers.connect('clicked', lambda event: func(event))",
    "docstring": "Connect the callback function *func* to button click events. Returns a connection id, which can be used to disconnect the callback.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:on_clicked arg:self arg:func arguments arg arg Return return:yes Call arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_all_gather",
    "source_code": "def _all_gather(self, input_tensor: core.TensorLike, options: Optional[collective_util.Options]) -> core.Tensor:\n    instance_key = self._next_instance_key()\n    options = self._options.merge(options)\n    ordering_token = self._get_ordering_token()\n    with ops.device(self._device):\n        return collective_ops.all_gather_v2(input_tensor, self._group_size, self._group_key, instance_key, communication_hint=options.implementation.value, timeout=options.timeout_seconds, ordering_token=ordering_token)",
    "docstring": "All-gather a dense tensor. Args: input_tensor: a dense tensor. It must have the same shape on all replicas. options: an optional tf.distribute.experimental.CommunicationOptions. If provided, it overrides the default options. Returns: The reduced tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_utils.py",
    "ast_data": "FunctionDef name:_all_gather arg:self arg:input_tensor arg:options arguments arg arg arg Assign Call Assign Call Assign Call With Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_value_or_dummy",
    "source_code": "def _get_value_or_dummy(input_workers, optional_list, produce_dummy):\n    value_list = []\n    for i, worker in enumerate(input_workers.worker_devices):\n        with ops.device(worker):\n            devices = input_workers.compute_devices_for_worker(i)\n            for j, device in enumerate(devices):\n                with ops.device(device):\n                    if produce_dummy:\n                        value_list.append(tf_cond.cond(optional_list[i][j].has_value(), lambda: optional_list[i][j].get_value(), lambda: _dummy_tensor_fn(optional_list[i][j].element_spec), strict=True))\n                    else:\n                        value_list.append(optional_list[i][j].get_value())\n    return value_list",
    "docstring": "Returns the value of the optionals or dummy values. Args: input_workers: the . optional_list: a list of lists . The values from each compute device grouped by the input device. produce_dummy: a bool. Whether to produce dummy tensors when the optional doesn't have a value. Returns: A flatten list of Tensors.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py",
    "ast_data": "FunctionDef name:_get_value_or_dummy arg:input_workers arg:optional_list arg:produce_dummy arguments arg arg arg Assign For Call With Call Assign Call For Call With Call If Call Call Call arguments Call arguments Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "__neg__",
    "source_code": "def __neg__(self):\n    return StateSpace(self.A, self.B, -self.C, -self.D, **self._dt_dict)",
    "docstring": "Negate the system (equivalent to pre-multiplying by -1).",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:__neg__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "FootnoteCollector",
    "source_code": "class FootnoteCollector(nodes.NodeVisitor):\n\n    def __init__(self, document: nodes.document) -> None:\n        self.auto_footnotes: list[nodes.footnote] = []\n        self.used_footnote_numbers: set[str] = set()\n        self.footnote_refs: list[nodes.footnote_reference] = []\n        super().__init__(document)\n\n    def unknown_visit(self, node: Node) -> None:\n        pass\n\n    def unknown_departure(self, node: Node) -> None:\n        pass\n\n    def visit_footnote(self, node: nodes.footnote) -> None:\n        if node.get('auto'):\n            self.auto_footnotes.append(node)\n        else:\n            for name in node['names']:\n                self.used_footnote_numbers.add(name)\n\n    def visit_footnote_reference(self, node: nodes.footnote_reference) -> None:\n        self.footnote_refs.append(node)",
    "docstring": "Collect footnotes and footnote references on the document",
    "type": "class",
    "file_path": "sphinx\\sphinx\\builders\\latex\\transforms.py",
    "ast_data": "ClassDef name:FootnoteCollector FunctionDef name:__init__ arg:self arg:document arguments arg arg Call Call Call FunctionDef name:unknown_visit arg:self arg:node arguments arg arg FunctionDef name:unknown_departure arg:self arg:node arguments arg arg FunctionDef name:visit_footnote arg:self arg:node arguments arg arg If Call Call For Call FunctionDef name:visit_footnote_reference arg:self arg:node arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_serialize_hyperparameter",
    "source_code": "def _serialize_hyperparameter(self, hyperparameter_name):\n    value = self._hyper[hyperparameter_name]\n    if isinstance(value, learning_rate_schedule.LearningRateSchedule):\n        return learning_rate_schedule.serialize(value)\n    if callable(value):\n        return value()\n    if tensor_util.is_tf_type(value):\n        return backend.get_value(value)\n    return value",
    "docstring": "Serialize a hyperparameter that can be a float, callable, or Tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py",
    "ast_data": "FunctionDef name:_serialize_hyperparameter arg:self arg:hyperparameter_name arguments arg arg Assign If Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "uvicorn",
    "name": "get_trusted_client_host",
    "source_code": "def get_trusted_client_host(self, x_forwarded_for: str) -> str:\n    x_forwarded_for_hosts = _parse_raw_hosts(x_forwarded_for)\n    if self.always_trust:\n        return x_forwarded_for_hosts[0]\n    for host in reversed(x_forwarded_for_hosts):\n        if host not in self:\n            return host\n    return x_forwarded_for_hosts[0]",
    "docstring": "Extract the client host from x_forwarded_for header In general this is the first \"untrusted\" host in the forwarded for list.",
    "type": "method",
    "file_path": "uvicorn\\uvicorn\\middleware\\proxy_headers.py",
    "ast_data": "FunctionDef name:get_trusted_client_host arg:self arg:x_forwarded_for arguments arg arg Assign Call If Return return:yes For Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "ProtoAssertions",
    "source_code": "class ProtoAssertions(object):\n\n    def assertProtoEqual(self, *args, **kwargs):\n        return assertProtoEqual(self, *args, **kwargs)",
    "docstring": "Mix this into a googletest.TestCase class to get proto2 assertions. Usage: class SomeTestCase(compare.ProtoAssertions, googletest.TestCase): ... def testSomething(self): ... self.assertProtoEqual(a, b) See module-level definitions for method documentation.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\util\\protobuf\\compare.py",
    "ast_data": "ClassDef name:ProtoAssertions FunctionDef name:assertProtoEqual arg:self arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "all_mismatch_leaf_graph_info",
    "source_code": "def all_mismatch_leaf_graph_info(self) -> list[GraphInfo]:\n    if not self.has_mismatch():\n        return []\n    no_mismatch_children = (self.upper_graph_info is None or not self.upper_graph_info.has_mismatch()) and (self.lower_graph_info is None or not self.lower_graph_info.has_mismatch())\n    if no_mismatch_children:\n        return [self]\n    results = []\n    if self.upper_graph_info is not None:\n        results += self.upper_graph_info.all_mismatch_leaf_graph_info()\n    if self.lower_graph_info is not None:\n        results += self.lower_graph_info.all_mismatch_leaf_graph_info()\n    return results",
    "docstring": "Return a list of all leaf objects that have mismatch.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\verification.py",
    "ast_data": "FunctionDef name:all_mismatch_leaf_graph_info arg:self arguments arg If Call Return return:no Assign BoolOp BoolOp Compare Call BoolOp Compare Call If Return return:yes Assign If Compare Call If Compare Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_identify_infrequent",
    "source_code": "def _identify_infrequent(self, category_count, n_samples, col_idx):\n    if isinstance(self.min_frequency, numbers.Integral):\n        infrequent_mask = category_count < self.min_frequency\n    elif isinstance(self.min_frequency, numbers.Real):\n        min_frequency_abs = n_samples * self.min_frequency\n        infrequent_mask = category_count < min_frequency_abs\n    else:\n        infrequent_mask = np.zeros(category_count.shape[0], dtype=bool)\n    n_current_features = category_count.size - infrequent_mask.sum() + 1\n    if self.max_categories is not None and self.max_categories < n_current_features:\n        frequent_category_count = self.max_categories - 1\n        if frequent_category_count == 0:\n            infrequent_mask[:] = True\n        else:\n            smallest_levels = np.argsort(category_count, kind='mergesort')[:-frequent_category_count]\n            infrequent_mask[smallest_levels] = True\n    output = np.flatnonzero(infrequent_mask)\n    return output if output.size > 0 else None",
    "docstring": "Compute the infrequent indices. Parameters ---------- category_count : ndarray of shape (n_cardinality,) Category counts. n_samples : int Number of samples. col_idx : int Index of the current category. Only used for the error message. Returns ------- output : ndarray of shape (n_infrequent_categories,) or None If there are infrequent categories, indices of infrequent categories. Otherwise None.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_encoders.py",
    "ast_data": "FunctionDef name:_identify_infrequent arg:self arg:category_count arg:n_samples arg:col_idx arguments arg arg arg arg If Call Assign Compare If Call Assign Assign Compare Assign Call Assign Call If BoolOp Compare Compare Assign If Compare Assign Assign Call Assign Assign Call Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, funcs, trackable_obj=None):\n    super(TFLiteConverterV2, self).__init__(funcs, trackable_obj)",
    "docstring": "Constructor for TFLiteConverter. Args: funcs: List of TensorFlow ConcreteFunctions. The list should not contain duplicate elements. trackable_obj: tf.AutoTrackable object associated with . A reference to this object needs to be maintained so that Variables do not get garbage collected since functions have a weak reference to Variables. This is only required when the tf.AutoTrackable object is not maintained by the user (e.g. ).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:funcs arg:trackable_obj arguments arg arg arg Call Call"
  },
  {
    "library": "scipy",
    "name": "integrate_1d",
    "source_code": "def integrate_1d(self, a, b, axis, extrapolate=None):\n    if extrapolate is None:\n        extrapolate = self.extrapolate\n    else:\n        extrapolate = bool(extrapolate)\n    ndim = len(self.x)\n    axis = int(axis) % ndim\n    c = self.c\n    swap = list(range(c.ndim))\n    swap.insert(0, swap[axis])\n    del swap[axis + 1]\n    swap.insert(1, swap[ndim + axis])\n    del swap[ndim + axis + 1]\n    c = c.transpose(swap)\n    p = PPoly.construct_fast(c.reshape(c.shape[0], c.shape[1], -1), self.x[axis], extrapolate=extrapolate)\n    out = p.integrate(a, b, extrapolate=extrapolate)\n    if ndim == 1:\n        return out.reshape(c.shape[2:])\n    else:\n        c = out.reshape(c.shape[2:])\n        x = self.x[:axis] + self.x[axis + 1:]\n        return self.construct_fast(c, x, extrapolate=extrapolate)",
    "docstring": "Compute NdPPoly representation for one dimensional definite integral The result is a piecewise polynomial representing the integral: .. math:: p(y, z, ...) = \\int_a^b dx\\, p(x, y, z, ...) where the dimension integrated over is specified with the parameter. Parameters ---------- a, b : float Lower and upper bound for integration. axis : int Dimension over which to compute the 1-D integrals extrapolate : bool, optional Whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. Returns ------- ig : NdPPoly or array-like Definite integral of the piecewise polynomial over [a, b]. If the polynomial was 1D, an array is returned, otherwise, an NdPPoly object.",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_interpolate.py",
    "ast_data": "FunctionDef name:integrate_1d arg:self arg:a arg:b arg:axis arg:extrapolate arguments arg arg arg arg arg If Compare Assign Assign Call Assign Call Assign Call Assign Assign Call Call Call Call Assign Call Assign Call Call Assign Call If Compare Return return:yes Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_as_row_partitions",
    "source_code": "def _as_row_partitions(self):\n    rank = self.rank\n    if rank is None:\n        raise ValueError('rank must be known for _as_row_partitions')\n    elif rank < 1:\n        raise ValueError('rank must be >= 1 for _as_row_partitions')\n    fully_ragged = self._with_num_row_partitions(rank - 1)\n    return fully_ragged.row_partitions",
    "docstring": "Returns row partitions representing this shape. In order to represent a shape as row partitions, the rank of the shape must be known, and the shape must have rank at least one. Returns: A list of RowPartition objects. Raises: ValueError, if the shape cannot be represented by RowPartitions.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:_as_row_partitions arg:self arguments arg Assign If Compare Raise Call If Compare Raise Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_smart_matrix_product",
    "source_code": "def _smart_matrix_product(A, B, alpha=None, structure=None):\n    if len(A.shape) != 2:\n        raise ValueError('expected A to be a rectangular matrix')\n    if len(B.shape) != 2:\n        raise ValueError('expected B to be a rectangular matrix')\n    f = None\n    if structure == UPPER_TRIANGULAR:\n        if not issparse(A) and (not issparse(B)) and (not is_pydata_spmatrix(A)) and (not is_pydata_spmatrix(B)):\n            f, = scipy.linalg.get_blas_funcs(('trmm',), (A, B))\n    if f is not None:\n        if alpha is None:\n            alpha = 1.0\n        out = f(alpha, A, B)\n    elif alpha is None:\n        out = A.dot(B)\n    else:\n        out = alpha * A.dot(B)\n    return out",
    "docstring": "A matrix product that knows about sparse and structured matrices. Parameters ---------- A : 2d ndarray First matrix. B : 2d ndarray Second matrix. alpha : float The matrix product will be scaled by this constant. structure : str, optional A string describing the structure of both matrices and . Only is currently supported. Returns ------- M : 2d ndarray Matrix product of A and B.",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_matfuncs.py",
    "ast_data": "FunctionDef name:_smart_matrix_product arg:A arg:B arg:alpha arg:structure arguments arg arg arg arg If Compare Call Raise Call If Compare Call Raise Call Assign If Compare If BoolOp Call Call Call Call Assign Call If Compare If Compare Assign Assign Call If Compare Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "argmax",
    "source_code": "def argmax(self, skipna: bool=True) -> int:\n    validate_bool_kwarg(skipna, 'skipna')\n    if not skipna and self._hasna:\n        raise ValueError('Encountered an NA value with skipna=False')\n    return nargminmax(self, 'argmax')",
    "docstring": "Return the index of maximum value. In case of multiple occurrences of the maximum value, the index corresponding to the first occurrence is returned. Parameters ---------- skipna : bool, default True Returns ------- int See Also -------- ExtensionArray.argmin : Return the index of the minimum value. Examples -------- >>> arr = pd.array([3, 1, 2, 5, 4]) >>> arr.argmax() np.int64(3)",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\base.py",
    "ast_data": "FunctionDef name:argmax arg:self arg:skipna arguments arg arg Call If BoolOp Raise Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "Gulf",
    "source_code": "class Gulf(Benchmark):\n\n    def __init__(self, dimensions=3):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([0.0] * self.N, [50.0] * self.N))\n        self.global_optimum = [[50.0, 25.0, 1.5]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        m = 99.0\n        i = arange(1.0, m + 1)\n        u = 25 + (-50 * log(i / 100.0)) ** (2 / 3.0)\n        vec = exp(-(abs(u - x[1]) ** x[2] / x[0])) - i / 100.0\n        return sum(vec ** 2)",
    "docstring": "Gulf objective function. This class defines the Gulf [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Gulf}}(x) = \\sum_{i=1}^99 \\left( e^{-\\frac{\\lvert y_i - x_2 \\rvert^{x_3}}{x_1}} - t_i \\right) Where, in this exercise: .. math:: t_i = i/100 \\\\ y_i = 25 + [-50 \\log(t_i)]^{2/3} with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015 TODO Gavana has absolute of (u - x[1]) term. Jamil doesn't... Leaving it in.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_G.py",
    "ast_data": "ClassDef name:Gulf FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Assign Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_autotune_deletion_call",
    "source_code": "def get_autotune_deletion_call() -> str:\n    tensors_to_delete = [tensor for tensor, kn in self.kernel_autotune_example_args.values() if kn == kernel_name]\n    if tensors_to_delete:\n        return f'del {', '.join(tensors_to_delete)}\\n'\n    return ''",
    "docstring": "After all the autotune kernel calls have been written (i.e. self.kernel_autotune_example_args is complete), returns a deletion call for all autotune example tensors that are unnecessary after kernel_name is called.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\wrapper.py",
    "ast_data": "FunctionDef name:get_autotune_deletion_call arguments Assign Call Compare If Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "reset",
    "source_code": "def reset(self, state):\n    state = _convert_to_state_tensor(state)\n    state.shape.assert_is_compatible_with([_get_state_size(self.algorithm)])\n    self._state_var.assign(state)",
    "docstring": "Resets the generator by a new state. See for the meaning of \"state\". Args: state: the new state.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\stateful_random_ops.py",
    "ast_data": "FunctionDef name:reset arg:self arg:state arguments arg arg Assign Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "visible_edges",
    "source_code": "@property\ndef visible_edges(self):\n    return self._visible_edges",
    "docstring": "The cell edges to be drawn with a line. Reading this property returns a substring of 'BRTL' (bottom, right, top, left'). When setting this property, you can use a substring of 'BRTL' or one of {'open', 'closed', 'horizontal', 'vertical'}.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\table.py",
    "ast_data": "FunctionDef name:visible_edges arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "node_arg_is_weight",
    "source_code": "def node_arg_is_weight(node: Node, arg: Any) -> bool:\n    weight_index = None\n    if 'target_dtype_info' in node.meta:\n        weight_index = node.meta['target_dtype_info'].get('weight_index', None)\n    if weight_index is not None and weight_index < len(node.args) and (node.args[weight_index] is arg):\n        return True\n    return node.kwargs.get('weight') is arg",
    "docstring": "Returns if node arg is weight",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\utils.py",
    "ast_data": "FunctionDef name:node_arg_is_weight arg:node arg:arg arguments arg arg Assign If Compare Assign Call If BoolOp Compare Compare Call Compare Return return:yes Return return:yes Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "from_config",
    "source_code": "@classmethod\ndef from_config(cls, config):\n    return cls(**config)",
    "docstring": "Creates a layer from its config. This method is the reverse of , capable of instantiating the same layer from the config dictionary. It does not handle layer connectivity (handled by Network), nor weights (handled by ). Args: config: A Python dictionary, typically the output of get_config. Returns: A layer instance.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py",
    "ast_data": "FunctionDef name:from_config arg:cls arg:config arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "generate_kernel_code_from_nodes",
    "source_code": "def generate_kernel_code_from_nodes(self, nodes: Sequence[BaseSchedulerNode], benchmark_kernel: bool) -> str:\n    raise NotImplementedError",
    "docstring": "Generate a kernel given a list of pre-fused nodes.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:generate_kernel_code_from_nodes arg:self arg:nodes arg:benchmark_kernel arguments arg arg arg Raise"
  },
  {
    "library": "pytorch",
    "name": "same_dtype",
    "source_code": "def same_dtype(*args, **kwargs):\n    return args[0].dtype == args[1].dtype",
    "docstring": "When the dtype is the same, return the original ShardedTensor. Args: same as ``. Return (bool): Whether to return early or not.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\_ops\\tensor_ops.py",
    "ast_data": "FunctionDef name:same_dtype arguments arg arg Return return:yes Compare"
  },
  {
    "library": "scipy",
    "name": "pbvv_seq",
    "source_code": "def pbvv_seq(v, x):\n    if not (isscalar(v) and isscalar(x)):\n        raise ValueError('arguments must be scalars.')\n    n = int(v)\n    v0 = v - n\n    if n <= 1:\n        n1 = 1\n    else:\n        n1 = n\n    v1 = n1 + v0\n    dv, dp, pdf, pdd = _specfun.pbvv(v1, x)\n    return (dv[:n1 + 1], dp[:n1 + 1])",
    "docstring": "Parabolic cylinder functions Vv(x) and derivatives. Parameters ---------- v : float Order of the parabolic cylinder function x : float Value at which to evaluate the function and derivatives Returns ------- dv : ndarray Values of V_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v. dp : ndarray Derivatives V_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special Functions\", John Wiley and Sons, 1996, chapter 13.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_basic.py",
    "ast_data": "FunctionDef name:pbvv_seq arg:v arg:x arguments arg arg If BoolOp Call Call Raise Call Assign Call Assign If Compare Assign Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "on_session_init",
    "source_code": "def on_session_init(self, request):\n    return framework.OnSessionInitResponse(framework.OnSessionInitAction.PROCEED)",
    "docstring": "Overrides on-session-init callback. Args: request: An instance of . Returns: An instance of .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\local_cli_wrapper.py",
    "ast_data": "FunctionDef name:on_session_init arg:self arg:request arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "rgb_to_yiq",
    "source_code": "@tf_export('image.rgb_to_yiq')\n@dispatch.add_dispatch_support\ndef rgb_to_yiq(images):\n    images = ops.convert_to_tensor(images, name='images')\n    kernel = ops.convert_to_tensor(_rgb_to_yiq_kernel, dtype=images.dtype, name='kernel')\n    ndims = images.get_shape().ndims\n    return math_ops.tensordot(images, kernel, axes=[[ndims - 1], [0]])",
    "docstring": "Converts one or more images from RGB to YIQ. Outputs a tensor of the same shape as the tensor, containing the YIQ value of the pixels. The output is only well defined if the value in images are in [0,1]. Usage Example: >>> x = tf.constant([[[1.0, 2.0, 3.0]]]) >>> tf.image.rgb_to_yiq(x) Args: images: 2-D or higher rank. Image data to convert. Last dimension must be size 3. Returns: images: tensor with the same shape as .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:rgb_to_yiq arg:images arguments arg Assign Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "cryptography",
    "name": "verify",
    "source_code": "@abc.abstractmethod\ndef verify(self, signature: Buffer, data: Buffer) -> None:\n    pass",
    "docstring": "Verify the signature.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ed448.py",
    "ast_data": "FunctionDef name:verify arg:self arg:signature arg:data arguments arg arg arg"
  },
  {
    "library": "tensorflow",
    "name": "job_name",
    "source_code": "@tf_export('experimental.dtensor.job_name', v1=[])\ndef job_name() -> str:\n    return os.environ.get(_DT_JOB_NAME, 'localhost' if num_clients() == 1 else 'worker')",
    "docstring": "Returns the job name used by all clients in this DTensor cluster.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\config.py",
    "ast_data": "FunctionDef name:job_name arguments Return return:yes Call Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "DirectoryReader",
    "source_code": "class DirectoryReader:\n\n    def __init__(self, directory):\n        self.directory = directory\n\n    def get_record(self, name):\n        filename = f'{self.directory}/{name}'\n        with open(filename, 'rb') as f:\n            return f.read()\n\n    def get_storage_from_record(self, name, numel, dtype):\n        filename = f'{self.directory}/{name}'\n        nbytes = torch._utils._element_size(dtype) * numel\n        storage = cast(Storage, torch.UntypedStorage)\n        return _HasStorage(storage.from_file(filename=filename, nbytes=nbytes))\n\n    def has_record(self, path):\n        full_path = os.path.join(self.directory, path)\n        return os.path.isfile(full_path)\n\n    def get_all_records(self):\n        files = [filename[len(self.directory) + 1:] for filename in glob(f'{self.directory}/**', recursive=True) if not os.path.isdir(filename)]\n        return files\n\n    def serialization_id(self):\n        if self.has_record(__serialization_id_record_name__):\n            return self.get_record(__serialization_id_record_name__)\n        else:\n            return ''",
    "docstring": "Class to allow PackageImporter to operate on unzipped packages. Methods copy the behavior of the internal PyTorchFileReader class (which is used for accessing packages in all other cases). N.B.: ScriptObjects are not depickleable or accessible via this DirectoryReader class due to ScriptObjects requiring an actual PyTorchFileReader instance.",
    "type": "class",
    "file_path": "pytorch\\torch\\package\\_directory_reader.py",
    "ast_data": "ClassDef name:DirectoryReader FunctionDef name:__init__ arg:self arg:directory arguments arg arg Assign FunctionDef name:get_record arg:self arg:name arguments arg arg Assign With Call Return return:yes Call FunctionDef name:get_storage_from_record arg:self arg:name arg:numel arg:dtype arguments arg arg arg arg Assign Assign Call Assign Call Return return:yes Call Call FunctionDef name:has_record arg:self arg:path arguments arg arg Assign Call Return return:yes Call FunctionDef name:get_all_records arg:self arguments arg Assign Call Call Call Return return:yes FunctionDef name:serialization_id arg:self arguments arg If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_assert_concat_compatible_structured_tensors",
    "source_code": "def _assert_concat_compatible_structured_tensors(values):\n    if not isinstance(values, Sequence):\n        raise ValueError('values must be a list of StructuredTensors (not a list)')\n    if not values:\n        raise ValueError('values must not be an empty list')\n    for st in values:\n        if not isinstance(st, StructuredTensor):\n            raise ValueError('values must be a list of StructuredTensors')\n    _assert_all_paths_match(values)\n    _assert_all_ranks_match(values)",
    "docstring": "Sometimes raises an error if concat doesn't make sense statically on values. values must be a sequence, and each element in values must be a structured tensor, and must have the same paths. Additionally, each path that is a submessage must have the same rank. These constraints are sufficient for concat on the fields to be the same as concat on structured tensors. This is meant to capture scenarios like paths that are not in the first structured tensor, but are in later structured tensors, which will just be ignored by the recursive algorithm. If the rank of a submessage was different for two structured tensors, then that is also a non-sensical merge. Note that all of these checks are static, as paths and submessage ranks are known. Args: values: a Sequence of StructuredTensors. Raises: ValueError: if there is any inconsistency as described above.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_array_ops.py",
    "ast_data": "FunctionDef name:_assert_concat_compatible_structured_tensors arg:values arguments arg If Call Raise Call If Raise Call For If Call Raise Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "rand",
    "source_code": "def rand(self, seed: T, offset: T) -> T:\n    raise NotImplementedError",
    "docstring": "Computes inductor_prims.random with mode=\"rand\". offset has dtype int32.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ops_handler.py",
    "ast_data": "FunctionDef name:rand arg:self arg:seed arg:offset arguments arg arg arg Raise"
  },
  {
    "library": "matplotlib",
    "name": "CloseEvent",
    "source_code": "class CloseEvent(Event):\n    pass",
    "docstring": "An event triggered by a figure being closed.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "ClassDef name:CloseEvent"
  },
  {
    "library": "scikit-learn",
    "name": "_limit_features",
    "source_code": "def _limit_features(self, X, vocabulary, high=None, low=None, limit=None):\n    if high is None and low is None and (limit is None):\n        return (X, set())\n    dfs = _document_frequency(X)\n    mask = np.ones(len(dfs), dtype=bool)\n    if high is not None:\n        mask &= dfs <= high\n    if low is not None:\n        mask &= dfs >= low\n    if limit is not None and mask.sum() > limit:\n        tfs = np.asarray(X.sum(axis=0)).ravel()\n        mask_inds = (-tfs[mask]).argsort()[:limit]\n        new_mask = np.zeros(len(dfs), dtype=bool)\n        new_mask[np.where(mask)[0][mask_inds]] = True\n        mask = new_mask\n    new_indices = np.cumsum(mask) - 1\n    for term, old_index in list(vocabulary.items()):\n        if mask[old_index]:\n            vocabulary[term] = new_indices[old_index]\n        else:\n            del vocabulary[term]\n    kept_indices = np.where(mask)[0]\n    if len(kept_indices) == 0:\n        raise ValueError('After pruning, no terms remain. Try a lower min_df or a higher max_df.')\n    return X[:, kept_indices]",
    "docstring": "Remove too rare or too common features. Prune features that are non zero in more samples than high or less documents than low, modifying the vocabulary, and restricting it to at most the limit most frequent. This does not prune samples with zero features.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_extraction\\text.py",
    "ast_data": "FunctionDef name:_limit_features arg:self arg:X arg:vocabulary arg:high arg:low arg:limit arguments arg arg arg arg arg arg If BoolOp Compare Compare Compare Return return:yes Call Assign Call Assign Call Call If Compare Compare If Compare Compare If BoolOp Compare Compare Call Assign Call Call Call Assign Call Assign Call Call Assign Call Assign Assign Call For Call Call If Assign Assign Call If Compare Call Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "UnspecializedBuiltinNNModuleVariable",
    "source_code": "class UnspecializedBuiltinNNModuleVariable(UnspecializedNNModuleVariable):\n\n    def _wrap_source(self, attr_source):\n        if not isinstance(attr_source, UnspecializedBuiltinNNModuleSource):\n            return UnspecializedBuiltinNNModuleSource(attr_source)\n        return attr_source",
    "docstring": "Differentiates between builtin nn modules (e.g. torch.nn.Linear) and user defined nn modules.",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\nn_module.py",
    "ast_data": "ClassDef name:UnspecializedBuiltinNNModuleVariable FunctionDef name:_wrap_source arg:self arg:attr_source arguments arg arg If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "update_pointwise_example_value",
    "source_code": "def update_pointwise_example_value(pointwise_node, input, other, op):\n    if pointwise_node is not None and hasattr(pointwise_node, 'meta'):\n        if op == torch.add:\n            example_value = torch.add(input, other)\n        elif op == torch.mul:\n            example_value = torch.mul(input, other)\n        else:\n            return\n        pointwise_node.meta['example_value'] = example_value",
    "docstring": "Update the example value of the add node in the graph to enable followup split cat opt.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\group_batch_fusion.py",
    "ast_data": "FunctionDef name:update_pointwise_example_value arg:pointwise_node arg:input arg:other arg:op arguments arg arg arg arg If BoolOp Compare Call If Compare Assign Call If Compare Assign Call Return return:no Assign"
  },
  {
    "library": "pandas",
    "name": "_get_level_values",
    "source_code": "def _get_level_values(self, level) -> Index:\n    self._validate_index_level(level)\n    return self",
    "docstring": "Return an Index of values for requested level. This is primarily useful to get an individual level of values from a MultiIndex, but is provided on Index as well for compatibility. Parameters ---------- level : int or str It is either the integer position or the name of the level. Returns ------- Index Calling object, as there is only one level in the Index. See Also -------- MultiIndex.get_level_values : Get values for a level of a MultiIndex. Notes ----- For Index, level should be 0, since there are no multiple levels. Examples -------- >>> idx = pd.Index(list(\"abc\")) >>> idx Index(['a', 'b', 'c'], dtype='object') Get level values by supplying as integer: >>> idx.get_level_values(0) Index(['a', 'b', 'c'], dtype='object')",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_get_level_values arg:self arg:level arguments arg arg Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_FromTorchTensor",
    "source_code": "class _FromTorchTensor(torch.autograd.Function):\n\n    @staticmethod\n    def forward(ctx, input: torch.Tensor) -> torch.Tensor:\n        return _maybe_wrap_tensor(input)\n\n    @staticmethod\n    def backward(ctx, grad_output: torch.Tensor) -> torch.Tensor:\n        return grad_output",
    "docstring": "_FromTorchTensor allows autograd to propagate from a normal Tensor to an AsyncCollectiveTensor.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\_functional_collectives.py",
    "ast_data": "ClassDef name:_FromTorchTensor FunctionDef name:forward arg:ctx arg:input arguments arg arg Return return:yes Call FunctionDef name:backward arg:ctx arg:grad_output arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "append",
    "source_code": "def append(self, value):\n    value = self._track_value(value, self._name_element(len(self._storage)))\n    self._storage.append(value)",
    "docstring": "Add a new trackable value.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\data_structures.py",
    "ast_data": "FunctionDef name:append arg:self arg:value arguments arg arg Assign Call Call Call Call"
  },
  {
    "library": "django",
    "name": "check_templates",
    "source_code": "@register(Tags.templates)\ndef check_templates(app_configs, **kwargs):\n    from django.template import engines\n    errors = []\n    for engine in engines.all():\n        errors.extend(engine.check())\n    return errors",
    "docstring": "Check all registered template engines.",
    "type": "function",
    "file_path": "django\\django\\core\\checks\\templates.py",
    "ast_data": "FunctionDef name:check_templates arg:app_configs arguments arg arg Assign For Call Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_ensure_single_chunk",
    "source_code": "def _ensure_single_chunk(x: Array, axis: int) -> tuple[Array, Callable[[Array], Array]]:\n    if axis < 0:\n        axis += x.ndim\n    if x.numblocks[axis] < 2:\n        return (x, lambda x: x)\n    x = x.rechunk({i: -1 if i == axis else 'auto' for i in range(x.ndim)})\n    return (x, lambda x: x.rechunk())",
    "docstring": "Make sure that Array is not broken into multiple chunks along axis. Returns ------- x : Array The input Array with a single chunk along axis. restore : Callable[Array, Array] function to apply to the output to rechunk it back into reasonable chunks",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\dask\\array\\_aliases.py",
    "ast_data": "FunctionDef name:_ensure_single_chunk arg:x arg:axis arguments arg arg If Compare If Compare Return return:yes arguments arg Assign Call Compare Call Return return:yes arguments arg Call"
  },
  {
    "library": "cherrypy",
    "name": "readline",
    "source_code": "def readline(self, size=-1):\n    data = self.rfile.readline(size)\n    self.bytes_read += len(data)\n    return data",
    "docstring": "Read a line from file, counting bytes.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\cpstats.py",
    "ast_data": "FunctionDef name:readline arg:self arg:size arguments arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "variable_shape",
    "source_code": "def variable_shape(handle, out_type=None):\n    if out_type is None:\n        if flags.config().tf_shape_default_int64.value():\n            out_type = dtypes.int64\n        else:\n            out_type = dtypes.int32\n    handle_data = get_eager_safe_handle_data(handle)\n    if handle_data is None or not handle_data.is_set:\n        return gen_resource_variable_ops.variable_shape(handle, out_type=out_type)\n    shape_proto = handle_data.shape_and_type[0].shape\n    if shape_proto.unknown_rank or any((x.size == -1 for x in shape_proto.dim)):\n        return gen_resource_variable_ops.variable_shape(handle, out_type=out_type)\n    return constant_op.constant([x.size for x in shape_proto.dim], dtype=out_type)",
    "docstring": "Returns the shape of the variable from the handle. If the output shape dtype is not specified, it will be set to int64 if tf_shape_default_int64 is enabled, otherwise it will be set to int32. Args: handle: The handle of the variable. out_type: The dtype of the output shape. Returns: The shape of the variable.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:variable_shape arg:handle arg:out_type arguments arg arg If Compare If Call Call Assign Assign Assign Call If BoolOp Compare Return return:yes Call Assign If BoolOp Call Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "get_body",
    "source_code": "def get_body(self):\n    error = super().get_body()\n    if self.state:\n        error.append(('state', self.state))\n    return error",
    "docstring": "Get a list of body.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\base.py",
    "ast_data": "FunctionDef name:get_body arg:self arguments arg Assign Call Call If Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_FeatureColumn",
    "source_code": "@six.add_metaclass(abc.ABCMeta)\nclass _FeatureColumn(object):\n\n    @abc.abstractproperty\n    def name(self):\n        pass\n\n    def __lt__(self, other):\n        return str(self) < str(other)\n\n    def __gt__(self, other):\n        return str(self) > str(other)\n\n    @property\n    def _var_scope_name(self):\n        return self.name\n\n    @abc.abstractmethod\n    def _transform_feature(self, inputs):\n        pass\n\n    @abc.abstractproperty\n    def _parse_example_spec(self):\n        pass\n\n    def _reset_config(self):\n        pass",
    "docstring": "Represents a feature column abstraction. WARNING: Do not subclass this layer unless you know what you are doing: the API is subject to future changes. To distinguish the concept of a feature family and a specific binary feature within a family, we refer to a feature family like \"country\" as a feature column. Following is an example feature in a format: {key: \"country\", value: [ \"US\" ]} In this example the value of feature is \"US\" and \"country\" refers to the column of the feature. This class is an abstract class. User should not create instances of this.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py",
    "ast_data": "ClassDef name:_FeatureColumn FunctionDef name:name arg:self arguments arg FunctionDef name:__lt__ arg:self arg:other arguments arg arg Return return:yes Compare Call Call FunctionDef name:__gt__ arg:self arg:other arguments arg arg Return return:yes Compare Call Call FunctionDef name:_var_scope_name arg:self arguments arg Return return:yes FunctionDef name:_transform_feature arg:self arg:inputs arguments arg arg FunctionDef name:_parse_example_spec arg:self arguments arg FunctionDef name:_reset_config arg:self arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "_set_static_graph",
    "source_code": "def _set_static_graph(self):\n    if self.static_graph:\n        warnings.warn(\"You've set static_graph to be True, no need to set it again.\")\n        return\n    self.static_graph = True\n    self._static_graph_delay_allreduce_enqueued = False\n    self.reducer._set_static_graph()\n    assert self.logger is not None\n    self.logger._set_static_graph()\n    if self.find_unused_parameters:\n        warnings.warn('You passed find_unused_parameters=true to DistributedDataParallel, `_set_static_graph` will detect unused parameters automatically, so you do not need to set find_unused_parameters=true, just be sure these unused parameters will not change during training loop while calling `_set_static_graph`.')",
    "docstring": "Set static graph for DDP. It is recommended to set static graph in the DDP constructor, which will call this private API internally.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\parallel\\distributed.py",
    "ast_data": "FunctionDef name:_set_static_graph arg:self arguments arg If Call Return return:no Assign Assign Call Compare Call If Call"
  },
  {
    "library": "tensorflow",
    "name": "assert_shallow_structure",
    "source_code": "def assert_shallow_structure(shallow_tree, input_tree, check_types=True, expand_composites=False):\n    nest_util.assert_shallow_structure(nest_util.Modality.CORE, shallow_tree, input_tree, check_types, expand_composites)",
    "docstring": "Asserts that is a shallow structure of . That is, this function tests if the structure can be created from the structure by replacing its leaf nodes with deeper tree structures. Examples: The following code will raise an exception: The following code will raise an exception: Args: shallow_tree: an arbitrarily nested structure. input_tree: an arbitrarily nested structure. check_types: if (default) the sequence types of and have to be the same. Note that even with check_types==True, this function will consider two different namedtuple classes with the same name and _fields attribute to be the same class. expand_composites: If true, then composite tensors such as and are expanded into their component tensors. Raises: TypeError: If is a sequence but is not. TypeError: If the sequence types of are different from . Only raised if is . ValueError: If the sequence lengths of are different from .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\nest.py",
    "ast_data": "FunctionDef name:assert_shallow_structure arg:shallow_tree arg:input_tree arg:check_types arg:expand_composites arguments arg arg arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "get_backend",
    "source_code": "@abstractmethod\ndef get_backend(self) -> str:\n    pass",
    "docstring": "Return the name of the rendezvous backend.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\api.py",
    "ast_data": "FunctionDef name:get_backend arg:self arguments arg"
  },
  {
    "library": "kornia",
    "name": "matrix_cofactor_tensor",
    "source_code": "def matrix_cofactor_tensor(matrix: torch.Tensor) -> torch.Tensor:\n    det = torch.det(matrix)\n    singular_mask = det != 0\n    if singular_mask.sum() != 0:\n        cofactor = torch.linalg.inv(matrix[singular_mask]).transpose(-2, -1) * det[:, None, None]\n        returned_cofactor = torch.zeros_like(matrix)\n        returned_cofactor[singular_mask] = cofactor\n        return returned_cofactor\n    else:\n        raise Exception('all singular matrices')",
    "docstring": "Cofactor matrix, refer to the numpy doc. Args: matrix: The input matrix in the shape :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\epipolar\\numeric.py",
    "ast_data": "FunctionDef name:matrix_cofactor_tensor arg:matrix arguments arg Assign Call Assign Compare If Compare Call Assign Call Call Assign Call Assign Return return:yes Raise Call"
  },
  {
    "library": "scipy",
    "name": "_first",
    "source_code": "def _first(arr, axis):\n    return np.take_along_axis(arr, np.array(0, ndmin=arr.ndim), axis)",
    "docstring": "Return arr[..., 0:1, ...] where 0:1 is in the position.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_stats_py.py",
    "ast_data": "FunctionDef name:_first arg:arr arg:axis arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "cohere",
    "source_code": "@_docstring.interpd\ndef cohere(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning, noverlap=0, pad_to=None, sides='default', scale_by_freq=None):\n    if len(x) < 2 * NFFT:\n        raise ValueError('Coherence is calculated by averaging over *NFFT* length segments.  Your signal is too short for your choice of *NFFT*.')\n    Pxx, f = psd(x, NFFT, Fs, detrend, window, noverlap, pad_to, sides, scale_by_freq)\n    Pyy, f = psd(y, NFFT, Fs, detrend, window, noverlap, pad_to, sides, scale_by_freq)\n    Pxy, f = csd(x, y, NFFT, Fs, detrend, window, noverlap, pad_to, sides, scale_by_freq)\n    Cxy = np.abs(Pxy) ** 2 / (Pxx * Pyy)\n    return (Cxy, f)",
    "docstring": "The coherence between *x* and *y*. Coherence is the normalized cross spectral density: .. math:: C_{xy} = \\frac{|P_{xy}|^2}{P_{xx}P_{yy}} Parameters ---------- x, y Array or sequence containing the data %(Spectral)s %(PSD)s noverlap : int, default: 0 (no overlap) The number of points of overlap between segments. Returns ------- Cxy : 1-D array The coherence vector. freqs : 1-D array The frequencies for the elements in *Cxy*. See Also -------- :func:, :func: : For information about the methods used to compute :math:, :math: and :math:.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\mlab.py",
    "ast_data": "FunctionDef name:cohere arg:x arg:y arg:NFFT arg:Fs arg:detrend arg:window arg:noverlap arg:pad_to arg:sides arg:scale_by_freq arguments arg arg arg arg arg arg arg arg arg arg If Compare Call Raise Call Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "cast",
    "source_code": "def cast(i, p):\n    return i * p + (1 - i) * (eye - p)",
    "docstring": "Return p or (1-p).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops.py",
    "ast_data": "FunctionDef name:cast arg:i arg:p arguments arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "iter",
    "source_code": "@compatibility(is_backward_compatible=True)\ndef iter(self, obj: 'Proxy') -> Iterator:\n    raise TraceError('Proxy object cannot be iterated. This can be attempted when the Proxy is used in a loop or as a *args or **kwargs function argument. See the torch.fx docs on pytorch.org for a more detailed explanation of what types of control flow can be traced, and check out the Proxy docstring for help troubleshooting Proxy iteration errors')",
    "docstring": "Called when a proxy object is being iterated over, such as when used in control flow. Normally we don't know what to do because we don't know the value of the proxy, but a custom tracer can attach more information to the graph node using create_node and can choose to return an iterator.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\proxy.py",
    "ast_data": "FunctionDef name:iter arg:self arg:obj arguments arg arg Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_mul_dispatch",
    "source_code": "def _mul_dispatch(x, y, name=None):\n    if isinstance(y, sparse_tensor.SparseTensor):\n        new_vals = gen_sparse_ops.sparse_dense_cwise_mul(y.indices, y.values, y.dense_shape, x, name)\n        return sparse_tensor.SparseTensor(y.indices, new_vals, y.dense_shape)\n    else:\n        return multiply(x, y, name=name)",
    "docstring": "Dispatches cwise mul for \"Dense*Dense\" and \"Dense*Sparse\".",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:_mul_dispatch arg:x arg:y arg:name arguments arg arg arg If Call Assign Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "transform_keypoints",
    "source_code": "def transform_keypoints(self, M: Tensor, inplace: bool=False) -> 'Keypoints':\n    if not 2 <= M.ndim <= 3 or M.shape[-2:] != (3, 3):\n        raise ValueError(f'The transformation matrix shape must be (3, 3) or (B, 3, 3). Got {M.shape}.')\n    transformed_boxes = transform_points(M, self._data)\n    if inplace:\n        self._data = transformed_boxes\n        return self\n    return Keypoints(transformed_boxes, False)",
    "docstring": "Apply a transformation matrix to the 2D keypoints. Args: M: The transformation matrix to be applied, shape of :math: or :math:. inplace: do transform in-place and return self. Returns: The transformed keypoints.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\keypoints.py",
    "ast_data": "FunctionDef name:transform_keypoints arg:self arg:M arg:inplace arguments arg arg arg If BoolOp Compare Compare Raise Call Assign Call If Assign Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "load_state_dict",
    "source_code": "@override\ndef load_state_dict(self, state_dict: dict[str, Any]) -> None:\n    lr_lambdas = state_dict.pop('lr_lambdas')\n    self.__dict__.update(state_dict)\n    state_dict['lr_lambdas'] = lr_lambdas\n    for idx, fn in enumerate(lr_lambdas):\n        if fn is not None:\n            self.lr_lambdas[idx].__dict__.update(fn)",
    "docstring": "Load the scheduler's state. Args: state_dict (dict): scheduler state. Should be an object returned from a call to :meth:.",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\lr_scheduler.py",
    "ast_data": "FunctionDef name:load_state_dict arg:self arg:state_dict arguments arg arg Assign Call Call Assign For Call If Compare Call"
  },
  {
    "library": "pandas",
    "name": "_maybe_squeeze_arg",
    "source_code": "def _maybe_squeeze_arg(self, arg: np.ndarray) -> np.ndarray:\n    return arg",
    "docstring": "For compatibility with 1D-only ExtensionArrays.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\blocks.py",
    "ast_data": "FunctionDef name:_maybe_squeeze_arg arg:self arg:arg arguments arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "expand",
    "source_code": "@_onnx_symbolic('aten::expand')\n@symbolic_helper.quantized_args(True)\ndef expand(g: jit_utils.GraphContext, self, size, implicit):\n    size = symbolic_helper._maybe_get_const(size, 'is')\n    if not symbolic_helper._is_value(size):\n        size = g.op('Constant', value_t=torch.LongTensor(size))\n    elif symbolic_helper._is_packed_list(size):\n        size = symbolic_helper._reshape_helper(g, stack(g, size, 0), g.op('Constant', value_t=torch.tensor([-1])))\n    dtype = _type_utils.JitScalarType.INT64\n    ones = ones_like(g, size, dtype)\n    neg_ones = mul(g, ones, g.op('Constant', value_t=torch.tensor(-1)))\n    size = where(g, g.op('Equal', size, neg_ones), ones, size)\n    return g.op('Expand', self, size)",
    "docstring": "Implement the expand function for a pytorch tensor in ONNX according to specified",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\symbolic_opset9.py",
    "ast_data": "FunctionDef name:expand arg:g arg:self arg:size arg:implicit arguments arg arg arg arg Assign Call If Call Assign Call Call If Call Assign Call Call Call Call Assign Assign Call Assign Call Call Call Assign Call Call Return return:yes Call Call Call"
  },
  {
    "library": "numpy",
    "name": "open",
    "source_code": "def open(path, mode='r', destpath=os.curdir, encoding=None, newline=None):\n    ds = DataSource(destpath)\n    return ds.open(path, mode, encoding=encoding, newline=newline)",
    "docstring": "Open with and return the file object. If `DataSourcedestpathpathdestpathopenDataSource`.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_datasource.py",
    "ast_data": "FunctionDef name:open arg:path arg:mode arg:destpath arg:encoding arg:newline arguments arg arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "save",
    "source_code": "def save(self, must_create=False):\n    raise NotImplementedError('subclasses of SessionBase must provide a save() method')",
    "docstring": "Save the session data. If 'must_create' is True, create a new session object (or raise CreateError). Otherwise, only update an existing object and don't create one (raise UpdateError if needed).",
    "type": "method",
    "file_path": "django\\django\\contrib\\sessions\\backends\\base.py",
    "ast_data": "FunctionDef name:save arg:self arg:must_create arguments arg arg Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "_char_index_at",
    "source_code": "def _char_index_at(self, x):\n    if not self._text:\n        return 0\n    text = self._text\n    fontproperties = str(self._fontproperties)\n    if fontproperties not in Text._charsize_cache:\n        Text._charsize_cache[fontproperties] = dict()\n    charsize_cache = Text._charsize_cache[fontproperties]\n    for char in set(text):\n        if char not in charsize_cache:\n            self.set_text(char)\n            bb = self.get_window_extent()\n            charsize_cache[char] = bb.x1 - bb.x0\n    self.set_text(text)\n    bb = self.get_window_extent()\n    size_accum = np.cumsum([0] + [charsize_cache[x] for x in text])\n    std_x = x - bb.x0\n    return np.abs(size_accum - std_x).argmin()",
    "docstring": "Calculate the index closest to the coordinate x in display space. The position of text[index] is assumed to be the sum of the widths of all preceding characters text[:index]. This works only on single line texts.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:_char_index_at arg:self arg:x arguments arg arg If Return return:yes Assign Assign Call If Compare Assign Call Assign For Call If Compare Call Assign Call Assign Call Assign Call Assign Call Assign Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "SeriesInfo",
    "source_code": "class SeriesInfo(_BaseInfo):\n\n    def __init__(self, data: Series, memory_usage: bool | str | None=None) -> None:\n        self.data: Series = data\n        self.memory_usage = _initialize_memory_usage(memory_usage)\n\n    def render(self, *, buf: WriteBuffer[str] | None=None, max_cols: int | None=None, verbose: bool | None=None, show_counts: bool | None=None) -> None:\n        if max_cols is not None:\n            raise ValueError('Argument `max_cols` can only be passed in DataFrame.info, not Series.info')\n        printer = _SeriesInfoPrinter(info=self, verbose=verbose, show_counts=show_counts)\n        printer.to_buffer(buf)\n\n    @property\n    def non_null_counts(self) -> list[int]:\n        return [self.data.count()]\n\n    @property\n    def dtypes(self) -> Iterable[Dtype]:\n        return [self.data.dtypes]\n\n    @property\n    def dtype_counts(self) -> Mapping[str, int]:\n        from pandas.core.frame import DataFrame\n        return _get_dataframe_dtype_counts(DataFrame(self.data))\n\n    @property\n    def memory_usage_bytes(self) -> int:\n        deep = self.memory_usage == 'deep'\n        return self.data.memory_usage(index=True, deep=deep)",
    "docstring": "Class storing series-specific info.",
    "type": "class",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "ClassDef name:SeriesInfo FunctionDef name:__init__ arg:self arg:data arg:memory_usage arguments arg arg arg Assign Call FunctionDef name:render arg:self arguments arg arg arg arg arg If Compare Raise Call Assign Call Call FunctionDef name:non_null_counts arg:self arguments arg Return return:yes Call FunctionDef name:dtypes arg:self arguments arg Return return:yes FunctionDef name:dtype_counts arg:self arguments arg Return return:yes Call Call FunctionDef name:memory_usage_bytes arg:self arguments arg Assign Compare Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "get_enumerable_node_type",
    "source_code": "def get_enumerable_node_type(self, node: Node) -> str | None:\n\n    def has_child(node: Element, cls: type) -> bool:\n        return any((isinstance(child, cls) for child in node))\n    if isinstance(node, nodes.section):\n        return 'section'\n    elif isinstance(node, nodes.container) and 'literal_block' in node and has_child(node, nodes.literal_block):\n        return 'code-block'\n    else:\n        figtype, _ = self.enumerable_nodes.get(node.__class__, (None, None))\n        return figtype",
    "docstring": "Get type of enumerable nodes.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\domains\\std\\__init__.py",
    "ast_data": "FunctionDef name:get_enumerable_node_type arg:self arg:node arguments arg arg FunctionDef name:has_child arg:node arg:cls arguments arg arg Return return:yes Call Call If Call Return return:yes If BoolOp Call Compare Call Return return:yes Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "set_non_tensor_output",
    "source_code": "def set_non_tensor_output(self, name, output):\n    if distribute_lib.in_cross_replica_context():\n        self._non_tensor_outputs[name] = output\n    else:\n\n        def merge_fn(distribution, value):\n            self._non_tensor_outputs[name] = distribution.experimental_local_results(value)\n        distribute_lib.get_replica_context().merge_call(merge_fn, args=(output,))",
    "docstring": "Set with to be captured as a non tensor output.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py",
    "ast_data": "FunctionDef name:set_non_tensor_output arg:self arg:name arg:output arguments arg arg arg If Call Assign FunctionDef name:merge_fn arg:distribution arg:value arguments arg arg Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "from_proto",
    "source_code": "@staticmethod\ndef from_proto(variable_def, import_scope=None):\n    raise NotImplementedError",
    "docstring": "Returns a object created from .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:from_proto arg:variable_def arg:import_scope arguments arg arg Raise"
  },
  {
    "library": "django",
    "name": "JSONCatalog",
    "source_code": "class JSONCatalog(JavaScriptCatalog):\n\n    def render_to_response(self, context, **response_kwargs):\n        return JsonResponse(context)",
    "docstring": "Return the selected language catalog as a JSON object. Receive the same parameters as JavaScriptCatalog and return a response with a JSON object of the following format: { \"catalog\": { # Translations catalog }, \"formats\": { # Language formats for date, time, etc. }, \"plural\": '...' # Expression for plural forms, or null. }",
    "type": "class",
    "file_path": "django\\django\\views\\i18n.py",
    "ast_data": "ClassDef name:JSONCatalog FunctionDef name:render_to_response arg:self arg:context arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "LazyCache",
    "source_code": "class LazyCache:\n\n    def __init__(self, value: Any, source: Any) -> None:\n        if not isinstance(value, LazySymNodeFormatString):\n            assert source\n        self.value = value\n        self.source = source\n        self.vt: Optional[VariableTracker] = None\n\n    def realize(self) -> None:\n        assert self.vt is None\n        from ..symbolic_convert import InstructionTranslator\n        from . import builder\n        tx = InstructionTranslator.current_tx()\n        if isinstance(self.value, LazySymNodeFormatString):\n            self.vt = builder.SourcelessBuilder.create(tx, self.value)\n        else:\n            self.vt = builder.VariableBuilder(tx, self.source)(self.value)\n        del self.value\n        del self.source",
    "docstring": "Container to cache the real VariableTracker",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\lazy.py",
    "ast_data": "ClassDef name:LazyCache FunctionDef name:__init__ arg:self arg:value arg:source arguments arg arg arg If Call Assign Assign FunctionDef name:realize arg:self arguments arg Compare Assign Call If Call Assign Call Assign Call Call"
  },
  {
    "library": "numpy",
    "name": "lagmul",
    "source_code": "def lagmul(c1, c2):\n    [c1, c2] = pu.as_series([c1, c2])\n    if len(c1) > len(c2):\n        c = c2\n        xs = c1\n    else:\n        c = c1\n        xs = c2\n    if len(c) == 1:\n        c0 = c[0] * xs\n        c1 = 0\n    elif len(c) == 2:\n        c0 = c[0] * xs\n        c1 = c[1] * xs\n    else:\n        nd = len(c)\n        c0 = c[-2] * xs\n        c1 = c[-1] * xs\n        for i in range(3, len(c) + 1):\n            tmp = c0\n            nd = nd - 1\n            c0 = lagsub(c[-i] * xs, c1 * (nd - 1) / nd)\n            c1 = lagadd(tmp, lagsub((2 * nd - 1) * c1, lagmulx(c1)) / nd)\n    return lagadd(c0, lagsub(c1, lagmulx(c1)))",
    "docstring": "Multiply one Laguerre series by another. Returns the product of two Laguerre series * . The arguments are sequences of coefficients, from lowest order \"term\" to highest, e.g., [1,2,3] represents the series ``. Parameters ---------- c1, c2 : array_like 1-D arrays of Laguerre series coefficients ordered from low to high. Returns ------- out : ndarray Of Laguerre series coefficients representing their product. See Also -------- lagadd, lagsub, lagmulx, lagdiv, lagpow Notes ----- In general, the (polynomial) product of two C-series results in terms that are not in the Laguerre polynomial basis set. Thus, to express the product as a Laguerre series, it is necessary to \"reproject\" the product onto said basis set, which may produce \"unintuitive\" (but correct) results; see Examples section below. Examples -------- >>> from numpy.polynomial.laguerre import lagmul >>> lagmul([1, 2, 3], [0, 1, 2]) array([ 8., -13., 38., -51., 36.])",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\laguerre.py",
    "ast_data": "FunctionDef name:lagmul arg:c1 arg:c2 arguments arg arg Assign Call If Compare Call Call Assign Assign Assign Assign If Compare Call Assign Assign If Compare Call Assign Assign Assign Call Assign Assign For Call Call Assign Assign Assign Call Assign Call Call Call Return return:yes Call Call Call"
  },
  {
    "library": "kornia",
    "name": "as_dict",
    "source_code": "def as_dict(self) -> Dict[str, AverageMeter]:\n    return self._stats",
    "docstring": "Return the dict format.",
    "type": "method",
    "file_path": "kornia\\kornia\\x\\utils.py",
    "ast_data": "FunctionDef name:as_dict arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "is_enabled",
    "source_code": "def is_enabled(self) -> bool:\n    return self._enabled",
    "docstring": "Return a bool indicating whether this instance is enabled.",
    "type": "method",
    "file_path": "pytorch\\torch\\amp\\grad_scaler.py",
    "ast_data": "FunctionDef name:is_enabled arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "raise_if_exceeds",
    "source_code": "def raise_if_exceeds(self, locs):\n    if len(locs) >= self.MAXTICKS:\n        _log.warning('Locator attempting to generate %s ticks ([%s, ..., %s]), which exceeds Locator.MAXTICKS (%s).', len(locs), locs[0], locs[-1], self.MAXTICKS)\n    return locs",
    "docstring": "Log at WARNING level if *locs* is longer than . This is intended to be called immediately before returning *locs* from `` to inform users in case their Locator returns a huge number of ticks, causing Matplotlib to run out of memory. The \"strange\" name of this method dates back to when it would raise an exception instead of emitting a log.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:raise_if_exceeds arg:self arg:locs arguments arg arg If Compare Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_linespacing",
    "source_code": "def set_linespacing(self, spacing):\n    _api.check_isinstance(Real, spacing=spacing)\n    self._linespacing = spacing\n    self.stale = True",
    "docstring": "Set the line spacing as a multiple of the font size. The default line spacing is 1.2. Parameters ---------- spacing : float (multiple of font size)",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:set_linespacing arg:self arg:spacing arguments arg arg Call Assign Assign"
  },
  {
    "library": "cherrypy",
    "name": "do_logout",
    "source_code": "def do_logout(self, from_page='..', **kwargs):\n    sess = cherrypy.session\n    username = sess.get(self.session_key)\n    sess[self.session_key] = None\n    if username:\n        cherrypy.serving.request.login = None\n        self.on_logout(username)\n    raise cherrypy.HTTPRedirect(from_page)",
    "docstring": "Logout. May raise redirect, or return True if request handled.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\cptools.py",
    "ast_data": "FunctionDef name:do_logout arg:self arg:from_page arguments arg arg arg Assign Assign Call Assign If Assign Call Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "_bounds",
    "source_code": "def _bounds(font):\n    gs = font.getGlyphSet(False)\n    pen = fontTools.pens.boundsPen.BoundsPen(gs)\n    for name in gs.keys():\n        gs[name].draw(pen)\n    return pen.bounds or (0, 0, 0, 0)",
    "docstring": "Compute the font bounding box, as if all glyphs were written at the same start position. Helper function for _font_to_ps_type42. Parameters ---------- font : fontTools.ttLib.ttFont.TTFont The font Returns ------- tuple (xMin, yMin, xMax, yMax) of the combined bounding box of all the glyphs in the font",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_ps.py",
    "ast_data": "FunctionDef name:_bounds arg:font arguments arg Assign Call Assign Call For Call Call Return return:yes BoolOp"
  },
  {
    "library": "scikit-learn",
    "name": "_update_dict",
    "source_code": "def _update_dict(dictionary, Y, code, A=None, B=None, verbose=False, random_state=None, positive=False):\n    n_samples, n_components = code.shape\n    random_state = check_random_state(random_state)\n    if A is None:\n        A = code.T @ code\n    if B is None:\n        B = Y.T @ code\n    n_unused = 0\n    for k in range(n_components):\n        if A[k, k] > 1e-06:\n            dictionary[k] += (B[:, k] - A[k] @ dictionary) / A[k, k]\n        else:\n            newd = Y[random_state.choice(n_samples)]\n            noise_level = 0.01 * (newd.std() or 1)\n            noise = random_state.normal(0, noise_level, size=len(newd))\n            dictionary[k] = newd + noise\n            code[:, k] = 0\n            n_unused += 1\n        if positive:\n            np.clip(dictionary[k], 0, None, out=dictionary[k])\n        dictionary[k] /= max(linalg.norm(dictionary[k]), 1)\n    if verbose and n_unused > 0:\n        print(f'{n_unused} unused atoms resampled.')",
    "docstring": "Update the dense dictionary factor in place. Parameters ---------- dictionary : ndarray of shape (n_components, n_features) Value of the dictionary at the previous iteration. Y : ndarray of shape (n_samples, n_features) Data matrix. code : ndarray of shape (n_samples, n_components) Sparse coding of the data against which to optimize the dictionary. A : ndarray of shape (n_components, n_components), default=None Together with , sufficient stats of the online model to update the dictionary. B : ndarray of shape (n_features, n_components), default=None Together with , sufficient stats of the online model to update the dictionary. verbose: bool, default=False Degree of output the procedure will print. random_state : int, RandomState instance or None, default=None Used for randomly initializing the dictionary. Pass an int for reproducible results across multiple function calls. See :term:. positive : bool, default=False Whether to enforce positivity when finding the dictionary. .. versionadded:: 0.20",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_dict_learning.py",
    "ast_data": "FunctionDef name:_update_dict arg:dictionary arg:Y arg:code arg:A arg:B arg:verbose arg:random_state arg:positive arguments arg arg arg arg arg arg arg arg Assign Assign Call If Compare Assign If Compare Assign Assign For Call If Compare Assign Call Assign BoolOp Call Assign Call Call Assign Assign If Call Call Call If BoolOp Compare Call"
  },
  {
    "library": "pytorch",
    "name": "Broadcast",
    "source_code": "@dataclass\nclass Broadcast(DimSpec):\n    dim: DimSpec\n    dim_size: int\n\n    @classmethod\n    def new(cls, dim: DimSpec, dim_size: int) -> DimSpec:\n        return Broadcast(dim, dim_size)\n\n    def inputs(self) -> Iterable[DimSpec]:\n        return (self.dim,)",
    "docstring": "Output is the broadcast of a singleton input dimension.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_ops\\_view_ops.py",
    "ast_data": "ClassDef name:Broadcast FunctionDef name:new arg:cls arg:dim arg:dim_size arguments arg arg arg Return return:yes Call FunctionDef name:inputs arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "num_feat",
    "source_code": "@property\ndef num_feat(self, force=1):\n    return capi.get_feature_count(self.ptr, force)",
    "docstring": "Return the number of features in the Layer.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\layer.py",
    "ast_data": "FunctionDef name:num_feat arg:self arg:force arguments arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "find_in_app",
    "source_code": "def find_in_app(self, app, path):\n    storage = self.storages.get(app)\n    if storage and storage.exists(path):\n        matched_path = storage.path(path)\n        if matched_path:\n            return matched_path",
    "docstring": "Find a requested static file in an app's static locations.",
    "type": "method",
    "file_path": "django\\django\\contrib\\staticfiles\\finders.py",
    "ast_data": "FunctionDef name:find_in_app arg:self arg:app arg:path arguments arg arg arg Assign Call If BoolOp Call Assign Call If Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_get_ufunc_and_otypes",
    "source_code": "def _get_ufunc_and_otypes(self, func, args):\n    if not args:\n        raise ValueError('args can not be empty')\n    if self.otypes is not None:\n        otypes = self.otypes\n        nin = len(args)\n        nout = len(self.otypes)\n        if func is not self.pyfunc or nin not in self._ufunc:\n            ufunc = frompyfunc(func, nin, nout)\n        else:\n            ufunc = None\n        if func is self.pyfunc:\n            ufunc = self._ufunc.setdefault(nin, ufunc)\n    else:\n        if builtins.any((arg.size == 0 for arg in args)):\n            raise ValueError('cannot call `vectorize` on size 0 inputs unless `otypes` is set')\n        inputs = [arg.flat[0] for arg in args]\n        outputs = func(*inputs)\n        if self.cache:\n            _cache = [outputs]\n\n            def _func(*vargs):\n                if _cache:\n                    return _cache.pop()\n                else:\n                    return func(*vargs)\n        else:\n            _func = func\n        if isinstance(outputs, tuple):\n            nout = len(outputs)\n        else:\n            nout = 1\n            outputs = (outputs,)\n        otypes = ''.join([asarray(outputs[_k]).dtype.char for _k in range(nout)])\n        ufunc = frompyfunc(_func, len(args), nout)\n    return (ufunc, otypes)",
    "docstring": "Return (ufunc, otypes).",
    "type": "method",
    "file_path": "numpy\\numpy\\lib\\_function_base_impl.py",
    "ast_data": "FunctionDef name:_get_ufunc_and_otypes arg:self arg:func arg:args arguments arg arg arg If Raise Call If Compare Assign Assign Call Assign Call If BoolOp Compare Compare Assign Call Assign If Compare Assign Call If Call Compare Raise Call Assign Assign Call If Assign FunctionDef name:_func arguments arg If Return return:yes Call Return return:yes Call Assign If Call Assign Call Assign Assign Assign Call Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "from_compatible_tensor_list",
    "source_code": "def from_compatible_tensor_list(element_spec, tensor_list):\n    return _from_tensor_list_helper(lambda spec, value: spec._from_compatible_tensor_list(value), element_spec, tensor_list)",
    "docstring": "Returns an element constructed from the given spec and tensor list. Args: element_spec: A nested structure of objects representing to element type specification. tensor_list: A list of tensors to use for constructing the value. Returns: An element constructed from the given spec and tensor list. Raises: ValueError: If the number of tensors needed to construct an element for the given spec does not match the given number of tensors.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\util\\structure.py",
    "ast_data": "FunctionDef name:from_compatible_tensor_list arg:element_spec arg:tensor_list arguments arg arg Return return:yes Call arguments arg arg Call"
  },
  {
    "library": "cryptography",
    "name": "private_bytes",
    "source_code": "@abc.abstractmethod\ndef private_bytes(self, encoding: _serialization.Encoding, format: _serialization.PrivateFormat, encryption_algorithm: _serialization.KeySerializationEncryption) -> bytes:\n    pass",
    "docstring": "Returns the key serialized as bytes.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\dh.py",
    "ast_data": "FunctionDef name:private_bytes arg:self arg:encoding arg:format arg:encryption_algorithm arguments arg arg arg arg"
  },
  {
    "library": "tensorflow",
    "name": "_tensor_pos",
    "source_code": "def _tensor_pos(self):\n    return self",
    "docstring": "Returns self, for unary operator .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_math_ops.py",
    "ast_data": "FunctionDef name:_tensor_pos arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "get_laf_descriptors",
    "source_code": "def get_laf_descriptors(img: Tensor, lafs: Tensor, patch_descriptor: Module, patch_size: int=32, grayscale_descriptor: bool=True) -> Tensor:\n    KORNIA_CHECK_LAF(lafs)\n    patch_descriptor = patch_descriptor.to(img)\n    patch_descriptor.eval()\n    timg: Tensor = img\n    if lafs.shape[1] == 0:\n        warnings.warn(f'LAF contains no keypoints {lafs.shape}, returning empty tensor', stacklevel=1)\n        return torch.empty(lafs.shape[0], lafs.shape[1], 128, dtype=lafs.dtype, device=lafs.device)\n    if grayscale_descriptor and img.size(1) == 3:\n        timg = rgb_to_grayscale(img)\n    patches: Tensor = extract_patches_from_pyramid(timg, lafs, patch_size)\n    B, N, CH, H, W = patches.size()\n    return patch_descriptor(patches.view(B * N, CH, H, W)).view(B, N, -1)",
    "docstring": "Get local descriptors, corresponding to LAFs (keypoints). Args: img: image features with shape :math:. lafs: local affine frames :math:. patch_descriptor: patch descriptor module, e.g. :class: or :class:. patch_size: patch size in pixels, which descriptor expects. grayscale_descriptor: True if `(B,N,D)D` is descriptor size.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\integrated.py",
    "ast_data": "FunctionDef name:get_laf_descriptors arg:img arg:lafs arg:patch_descriptor arg:patch_size arg:grayscale_descriptor arguments arg arg arg arg arg Call Assign Call Call If Compare Call Return return:yes Call If BoolOp Compare Call Assign Call Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "import_graph_def_for_function",
    "source_code": "def import_graph_def_for_function(graph_def, name=None, propagate_device_spec=False):\n    return _import_graph_def_internal(graph_def, validate_colocation_constraints=False, name=name, propagate_device_spec=propagate_device_spec)",
    "docstring": "Like import_graph_def but does not validate colocation constraints.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\importer.py",
    "ast_data": "FunctionDef name:import_graph_def_for_function arg:graph_def arg:name arg:propagate_device_spec arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "violation",
    "source_code": "def violation(self, x):\n    with catch_warnings():\n        filterwarnings('ignore', 'delta_grad', UserWarning)\n        ev = self.fun.fun(np.asarray(x))\n    excess_lb = np.maximum(self.bounds[0] - ev, 0)\n    excess_ub = np.maximum(ev - self.bounds[1], 0)\n    return excess_lb + excess_ub",
    "docstring": "How much the constraint is exceeded by. Parameters ---------- x : array-like Vector of independent variables Returns ------- excess : array-like How much the constraint is exceeded by, for each of the constraints specified by .",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_constraints.py",
    "ast_data": "FunctionDef name:violation arg:self arg:x arguments arg arg With Call Call Assign Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_default_args",
    "source_code": "def get_default_args(fn):\n    if fn is None:\n        return {}\n    signature = inspect.signature(fn)\n    return {k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty}",
    "docstring": "Get a dictionary of default arguments for a function. Args: fn: Callable - The function to inspect for default arguments. Returns: (Dict[str, Any]): mapping argument names to their default values if :attr: is not None, else empty dictionary.",
    "type": "function",
    "file_path": "pytorch\\torch\\jit\\frontend.py",
    "ast_data": "FunctionDef name:get_default_args arg:fn arguments arg If Compare Return return:no Assign Call Return return:yes Call Compare"
  },
  {
    "library": "kornia",
    "name": "get_mps_device_if_available",
    "source_code": "def get_mps_device_if_available() -> torch.device:\n    dev = 'cpu'\n    if hasattr(torch.backends, 'mps'):\n        if torch.backends.mps.is_available():\n            dev = 'mps'\n    return torch.device(dev)",
    "docstring": "Try to get mps device, if fail, return cpu. Returns: torch.device",
    "type": "function",
    "file_path": "kornia\\kornia\\utils\\helpers.py",
    "ast_data": "FunctionDef name:get_mps_device_if_available arguments Assign If Call If Call Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "QConfigDynamic",
    "source_code": "@deprecated('`QConfigDynamic` is going to be deprecated in PyTorch 1.12, please use `QConfig` instead', category=FutureWarning)\nclass QConfigDynamic(namedtuple('QConfigDynamic', ['activation', 'weight'])):\n    __slots__ = ()\n\n    def __new__(cls, activation=torch.nn.Identity, weight=torch.nn.Identity):\n        if isinstance(weight, nn.Module):\n            raise ValueError('QConfigDynamic received observer instance, please pass observer class instead. ' + 'Use MyObserver.with_args(x=1) to override arguments to constructor if needed')\n        return super().__new__(cls, activation, weight)",
    "docstring": "Describes how to dynamically quantize a layer or a part of the network by providing settings (observer classes) for weights. It's like QConfig, but for dynamic quantization. Note that QConfigDynamic needs to contain observer **classes** (like MinMaxObserver) or a callable that returns instances on invocation, not the concrete observer instances themselves. Quantization function will instantiate observers multiple times for each of the layers. Observer classes have usually reasonable default arguments, but they can be overwritten with method (that behaves like functools.partial):: my_qconfig = QConfigDynamic(weight=default_observer.with_args(dtype=torch.qint8))",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\quantization\\qconfig.py",
    "ast_data": "ClassDef name:QConfigDynamic Call Assign FunctionDef name:__new__ arg:cls arg:activation arg:weight arguments arg arg arg If Call Raise Call Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "ensure_timezone",
    "source_code": "def ensure_timezone(self):\n    return False",
    "docstring": "Ensure the connection's timezone is set to and return whether it changed or not.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\base.py",
    "ast_data": "FunctionDef name:ensure_timezone arg:self arguments arg Return return:yes"
  },
  {
    "library": "virtualenv",
    "name": "interpreter",
    "source_code": "@property\ndef interpreter(self):\n    return self._interpreter",
    "docstring": "Create a virtual environment based on this reference interpreter.",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\run\\session.py",
    "ast_data": "FunctionDef name:interpreter arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "YearArchiveView",
    "source_code": "class YearArchiveView(MultipleObjectTemplateResponseMixin, BaseYearArchiveView):\n    template_name_suffix = '_archive_year'",
    "docstring": "List of objects published in a given year.",
    "type": "class",
    "file_path": "django\\django\\views\\generic\\dates.py",
    "ast_data": "ClassDef name:YearArchiveView Assign"
  },
  {
    "library": "numpy",
    "name": "cumsum",
    "source_code": "def cumsum(self, axis=None, dtype=None, out=None):\n    result = self.filled(0).cumsum(axis=axis, dtype=dtype, out=out)\n    if out is not None:\n        if isinstance(out, MaskedArray):\n            out.__setmask__(self.mask)\n        return out\n    result = result.view(type(self))\n    result.__setmask__(self._mask)\n    return result",
    "docstring": "Return the cumulative sum of the array elements over the given axis. Masked values are set to 0 internally during the computation. However, their position is saved, and the result will be masked at the same locations. Refer to for full documentation. Notes ----- The mask is lost if is not a valid :class: ! Arithmetic is modular when using integer types, and no error is raised on overflow. See Also -------- numpy.ndarray.cumsum : corresponding function for ndarrays numpy.cumsum : equivalent function Examples -------- >>> import numpy as np >>> marr = np.ma.array(np.arange(10), mask=[0,0,0,1,1,1,0,0,0,0]) >>> marr.cumsum() masked_array(data=[0, 1, 3, --, --, --, 9, 16, 24, 33], mask=[False, False, False, True, True, True, False, False, False, False], fill_value=999999)",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:cumsum arg:self arg:axis arg:dtype arg:out arguments arg arg arg arg Assign Call Call If Compare If Call Call Return return:yes Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_check_dill_version",
    "source_code": "def _check_dill_version(pickle_module) -> None:\n    if pickle_module is not None and pickle_module.__name__ == 'dill':\n        required_dill_version = (0, 3, 1)\n        if not check_module_version_greater_or_equal(pickle_module, required_dill_version, False):\n            raise ValueError(\"'torch' supports dill >= {}, but you have dill {}. Please upgrade dill or switch to 'pickle'\".format('.'.join([str(num) for num in required_dill_version]), pickle_module.__version__))",
    "docstring": "Checks if using dill as the pickle module, and if so, checks if it is the correct version. If dill version is lower than 0.3.1, a ValueError is raised. Args: pickle_module: module used for pickling metadata and objects",
    "type": "function",
    "file_path": "pytorch\\torch\\serialization.py",
    "ast_data": "FunctionDef name:_check_dill_version arg:pickle_module arguments arg If BoolOp Compare Compare Assign If Call Raise Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "message",
    "source_code": "@property\ndef message(self):\n    return self._message",
    "docstring": "The error message that describes the error.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py",
    "ast_data": "FunctionDef name:message arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_Checkpointable",
    "source_code": "@runtime_checkable\nclass _Checkpointable(Protocol):\n\n    def __create_write_items__(self, fqn: str, object: object) -> list[object]:\n        raise NotImplementedError('_Checkpointable._create_write_items is not implemented')\n\n    def __create_chunk_list__(self) -> list[object]:\n        raise NotImplementedError('_Checkpointable._create_chunk_list is not implemented')\n\n    def __get_tensor_shard__(self, index: int) -> torch.Tensor:\n        raise NotImplementedError('_Checkpointable._get_tensor_shard is not implemented')",
    "docstring": "Interface for checkpointable objects. Implemented as a protocol, implicit subtyping is supported so subclasses do not need to inherit this explicitly. This is to allow arbitrary objects/tensor subclasses to hook into DCP seamlessly through implementing the interface.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\_checkpointable.py",
    "ast_data": "ClassDef name:_Checkpointable FunctionDef name:__create_write_items__ arg:self arg:fqn arg:object arguments arg arg arg Raise Call FunctionDef name:__create_chunk_list__ arg:self arguments arg Raise Call FunctionDef name:__get_tensor_shard__ arg:self arg:index arguments arg arg Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "get_cmap",
    "source_code": "def get_cmap(name: Colormap | str | None=None, lut: int | None=None) -> Colormap:\n    if name is None:\n        name = rcParams['image.cmap']\n    if isinstance(name, Colormap):\n        return name\n    _api.check_in_list(sorted(_colormaps), name=name)\n    if lut is None:\n        return _colormaps[name]\n    else:\n        return _colormaps[name].resampled(lut)",
    "docstring": "Get a colormap instance, defaulting to rc values if *name* is None. Parameters ---------- name : or str or None, default: None If a instance, it will be returned. Otherwise, the name of a colormap known to Matplotlib, which will be resampled by *lut*. The default, None, means :rc:. lut : int or None, default: None If *name* is not already a Colormap instance and *lut* is not None, the colormap will be resampled to have *lut* entries in the lookup table. Returns ------- Colormap",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:get_cmap arg:name arg:lut arguments arg arg If Compare Assign If Call Return return:yes Call Call If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_MakeFunc",
    "source_code": "def _MakeFunc(v, arg_name):\n    if isinstance(v, attr_value_pb2.NameAttrList):\n        return v\n    if isinstance(v, compat.bytes_or_text_types):\n        fn_attr = attr_value_pb2.NameAttrList(name=v)\n    elif hasattr(v, 'add_to_graph'):\n        v.add_to_graph(ops.get_default_graph())\n        if hasattr(v, '_as_name_attr_list'):\n            fn_attr = v._as_name_attr_list\n        else:\n            fn_attr = attr_value_pb2.NameAttrList(name=v.name)\n    else:\n        raise TypeError(f\"Don't know how to convert {repr(v)} to a func for argument {arg_name}\")\n    return fn_attr",
    "docstring": "Ensure v is a func.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\op_def_library.py",
    "ast_data": "FunctionDef name:_MakeFunc arg:v arg:arg_name arguments arg arg If Call Return return:yes If Call Assign Call If Call Call Call If Call Assign Assign Call Raise Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "extract_tensor_metadata",
    "source_code": "def extract_tensor_metadata(t: Tensor) -> TensorMetadata:\n    memory_format = suggest_memory_format(t)\n    if t._has_symbolic_sizes_strides or is_sparse_any(t) or (not t.is_contiguous(memory_format=memory_format)):\n        memory_format = None\n    storage_offset = t.storage_offset()\n    return TensorMetadata(t.dtype, t.shape, t.stride() if t.layout == torch.strided else (), t.device, t.layout, memory_format, storage_offset, t.untyped_storage().nbytes() if not is_sparse_any(t) else None, t.requires_grad, t.is_quantized, t.is_conj(), t.is_neg(), t.is_inference(), t.is_sparse, t.is_coalesced() if t.is_sparse else None, t.dense_dim() if is_sparse_any(t) else None, t.sparse_dim() if is_sparse_any(t) else None)",
    "docstring": "Extract the TensorMetadata of a tensor.",
    "type": "function",
    "file_path": "pytorch\\torch\\_subclasses\\fake_tensor.py",
    "ast_data": "FunctionDef name:extract_tensor_metadata arg:t arguments arg Assign Call If BoolOp Call Call Assign Assign Call Return return:yes Call Compare Call Call Call Call Call Call Call Call Call Call Call Call"
  },
  {
    "library": "scrapy",
    "name": "_spider_idle",
    "source_code": "def _spider_idle(self) -> None:\n    assert self.spider is not None\n    expected_ex = (DontCloseSpider, CloseSpider)\n    res = self.signals.send_catch_log(signals.spider_idle, spider=self.spider, dont_log=expected_ex)\n    detected_ex = {ex: x.value for _, x in res for ex in expected_ex if isinstance(x, Failure) and isinstance(x.value, ex)}\n    if DontCloseSpider in detected_ex:\n        return\n    if self.spider_is_idle():\n        ex = detected_ex.get(CloseSpider, CloseSpider(reason='finished'))\n        assert isinstance(ex, CloseSpider)\n        self.close_spider(self.spider, reason=ex.reason)",
    "docstring": "Called when a spider gets idle, i.e. when there are no remaining requests to download or schedule. It can be called multiple times. If a handler for the spider_idle signal raises a DontCloseSpider exception, the spider is not closed until the next loop and this function is guaranteed to be called (at least) once again. A handler can raise CloseSpider to provide a custom closing reason.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\engine.py",
    "ast_data": "FunctionDef name:_spider_idle arg:self arguments arg Compare Assign Assign Call Assign BoolOp Call Call If Compare Return return:no If Call Assign Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_initialize",
    "source_code": "def _initialize(self):\n    return self._coordinator_instance._initialize()",
    "docstring": "A function that initializes the resource.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\ps_values.py",
    "ast_data": "FunctionDef name:_initialize arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X):\n    check_is_fitted(self)\n    X = validate_data(self, X, accept_sparse='csr', reset=False)\n    K = self._centerer.transform(self._get_kernel(X, self.X_fit_))\n    non_zeros = np.flatnonzero(self.eigenvalues_)\n    scaled_alphas = np.zeros_like(self.eigenvectors_)\n    scaled_alphas[:, non_zeros] = self.eigenvectors_[:, non_zeros] / np.sqrt(self.eigenvalues_[non_zeros])\n    return np.dot(K, scaled_alphas)",
    "docstring": "Transform X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vector, where is the number of samples and is the number of features. Returns ------- X_new : ndarray of shape (n_samples, n_components) Returns the instance itself.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_kernel_pca.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "_find_numeric_cols",
    "source_code": "def _find_numeric_cols(self, data):\n    numeric_cols = []\n    for col in data:\n        if variable_type(data[col]) == 'numeric':\n            numeric_cols.append(col)\n    return numeric_cols",
    "docstring": "Find which variables in a DataFrame are numeric.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\axisgrid.py",
    "ast_data": "FunctionDef name:_find_numeric_cols arg:self arg:data arguments arg arg Assign For If Compare Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_SinhGrad",
    "source_code": "@ops.RegisterGradient('Sinh')\ndef _SinhGrad(op: ops.Operation, grad):\n    x = op.inputs[0]\n    with ops.control_dependencies([grad]):\n        x = math_ops.conj(x)\n        return grad * math_ops.cosh(x)",
    "docstring": "Returns grad * cosh(x).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_SinhGrad arg:op arg:grad arguments arg arg Assign With Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "found_docs",
    "source_code": "@property\ndef found_docs(self) -> set[str]:\n    return self.project.docnames",
    "docstring": "Contains all existing docnames.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\environment\\__init__.py",
    "ast_data": "FunctionDef name:found_docs arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_conditional_odds_ratio_ci",
    "source_code": "def _conditional_odds_ratio_ci(self, confidence_level=0.95, alternative='two-sided'):\n    table = self._table\n    if 0 in table.sum(axis=0) or 0 in table.sum(axis=1):\n        ci = (0, np.inf)\n    else:\n        ci = _conditional_oddsratio_ci(table, confidence_level=confidence_level, alternative=alternative)\n    return ConfidenceInterval(low=ci[0], high=ci[1])",
    "docstring": "Confidence interval for the conditional odds ratio.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_odds_ratio.py",
    "ast_data": "FunctionDef name:_conditional_odds_ratio_ci arg:self arg:confidence_level arg:alternative arguments arg arg arg Assign If BoolOp Compare Call Compare Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "indices_method",
    "source_code": "def indices_method(f_A, f_B, f_AB):\n    return np.squeeze(indices_method_(f_A=f_A, f_B=f_B, f_AB=f_AB))",
    "docstring": "Wrap indices method to ensure proper output dimension. 1D when single output, 2D otherwise.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_sensitivity_analysis.py",
    "ast_data": "FunctionDef name:indices_method arg:f_A arg:f_B arg:f_AB arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "erfcinv",
    "source_code": "@tf_export('math.erfcinv')\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef erfcinv(x, name=None):\n    with ops.name_scope(name, 'erfcinv', [x]):\n        x = ops.convert_to_tensor(x, name='start')\n        return -ndtri(0.5 * x) * np.sqrt(0.5)",
    "docstring": "Computes the inverse of complementary error function. Given , compute the inverse complementary error function of . This function is the inverse of , and is defined on . >>> tf.math.erfcinv([0., 0.2, 1., 1.5, 2.]) Args: x: with type or . name: A name for the operation (optional). Returns: Inverse complementary error function of . @compatibility(numpy) Equivalent to scipy.special.erfcinv @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:erfcinv arg:x arg:name arguments arg arg With Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "bias_correction",
    "source_code": "def bias_correction(float_model, quantized_model, img_data, target_modules=_supported_modules_quantized, neval_batches=None):\n    ns.prepare_model_with_stubs(float_model, quantized_model, _supported_modules, MeanShadowLogger)\n    uncorrected_modules = {name: submodule for name, submodule in quantized_model.named_modules() if type(submodule) in target_modules}\n    for uncorrected_module in uncorrected_modules:\n        quantized_submodule = get_module(quantized_model, uncorrected_module)\n        bias = get_param(quantized_submodule, 'bias')\n        if bias is not None:\n            for count, data in enumerate(img_data, start=1):\n                quantized_model(data[0])\n                if count == neval_batches:\n                    break\n            ob_dict = ns.get_logger_dict(quantized_model)\n            parent_name, _ = parent_child_names(uncorrected_module)\n            float_data = ob_dict[parent_name + '.stats']['float']\n            quant_data = ob_dict[parent_name + '.stats']['quantized']\n            quantization_error = quant_data - float_data\n            dims = list(range(quantization_error.dim()))\n            dims.remove(1)\n            expected_error = torch.mean(quantization_error, dims)\n            updated_bias = bias.data - expected_error\n            bias.data = updated_bias\n            for name, submodule in quantized_model.named_modules():\n                if isinstance(submodule, MeanShadowLogger):\n                    submodule.clear()",
    "docstring": "Perform bias correction on a module. Using numeric suite shadow module, the expected output of the floating point and quantized modules is recorded. Using that data the bias of supported modules is shifted to compensate for the drift caused by quantization Paper reference: (Section 4.2) Args: float_model: a trained model that serves as a reference to what bias correction should aim for quantized_model: quantized form of float_model that bias correction is to applied to img_data: calibration data to estimate the expected output (used to find quantization error) target_modules: specifies what submodules in quantized_model need bias correction (can be extended to unquantized submodules) neval_batches: a cap to the number of batches you want to be used for estimating the expected output",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\_correct_bias.py",
    "ast_data": "FunctionDef name:bias_correction arg:float_model arg:quantized_model arg:img_data arg:target_modules arg:neval_batches arguments arg arg arg arg arg Call Assign Call Compare Call For Assign Call Assign Call If Compare For Call Call If Compare Assign Call Assign Call Assign Assign Assign Assign Call Call Call Call Assign Call Assign Assign For Call If Call Call"
  },
  {
    "library": "pytorch",
    "name": "_apply",
    "source_code": "def _apply(self, *args, **kwargs):\n    context = self._deregister_orig_params_ctx() if self._use_orig_params else contextlib.nullcontext()\n    with context:\n        return super()._apply(*args, **kwargs)",
    "docstring": "Deregister the original parameters and expose the :class: s before calling ``.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\fully_sharded_data_parallel.py",
    "ast_data": "FunctionDef name:_apply arg:self arguments arg arg arg Assign Call Call With Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "extract_class_name",
    "source_code": "def extract_class_name(line: str) -> str:\n    start_token = 'class '\n    end_token = '('\n    start, end = (line.find(start_token) + len(start_token), line.find(end_token))\n    return line[start:end]",
    "docstring": "Extract class name from class definition in the form of \"class {CLASS_NAME}({Type}):\".",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\data\\datapipes\\gen_pyi.py",
    "ast_data": "FunctionDef name:extract_class_name arg:line arguments arg Assign Assign Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_init_device_handle",
    "source_code": "@no_type_check\ndef _init_device_handle(state: _FSDPState, module: nn.Module, ignored_params: set[nn.Parameter], device_id: Optional[Union[int, torch.device]]) -> _FSDPState:\n    determined_device = None\n    if device_id is not None:\n        determined_device = device_id if isinstance(device_id, torch.device) else torch.device(device_id)\n    if determined_device is None:\n        for param in _get_orig_params(module, ignored_params):\n            if param.device.type in {'cpu', 'meta'}:\n                continue\n            if determined_device is None:\n                determined_device = param.device\n            elif param.device.type != determined_device.type:\n                raise RuntimeError(f'FSDP does not support modules with different device types but got params on {determined_device.type} and {param.device.type}')\n        determined_device = determined_device or torch._C._get_accelerator()\n        if determined_device.type == 'cpu':\n            raise RuntimeError('FSDP needs a non-CPU accelerator device, but no accelerator device is detected.')\n    state._device_handle = _FSDPDeviceHandle.from_device(determined_device)\n    return state",
    "docstring": "Determine device handle used for initializing FSDP. If a device is specified by `Accelerators` for details. This method will be called once ignored paramters was determined, as the device handle maybe needed for other initialization.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_init_utils.py",
    "ast_data": "FunctionDef name:_init_device_handle arg:state arg:module arg:ignored_params arg:device_id arguments arg arg arg arg Assign If Compare Assign Call Call If Compare For Call If Compare If Compare Assign If Compare Raise Call Assign BoolOp Call If Compare Raise Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "@_docstring.interpd\ndef __init__(self, xy, radius=5, **kwargs):\n    super().__init__(xy, radius * 2, radius * 2, **kwargs)\n    self.radius = radius",
    "docstring": "Create a true circle at center *xy* = (*x*, *y*) with given *radius*. Unlike which is a polygonal approximation, this uses Bezier splines and is much closer to a scale-free circle. Valid keyword arguments are: %(Patch:kwdoc)s",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:xy arg:radius arguments arg arg arg arg Call Call Assign"
  },
  {
    "library": "django",
    "name": "linear_name",
    "source_code": "@property\ndef linear_name(self):\n    return self.srs.linear_name",
    "docstring": "Return the linear units name.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\base\\models.py",
    "ast_data": "FunctionDef name:linear_name arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_clean_mapping",
    "source_code": "@final\ndef _clean_mapping(self, mapping):\n    if not isinstance(mapping, dict):\n        return mapping\n    clean = {}\n    assert self.orig_names is not None\n    for col, v in mapping.items():\n        if isinstance(col, int) and col not in self.orig_names:\n            col = self.orig_names[col]\n        clean[col] = v\n    if isinstance(mapping, defaultdict):\n        remaining_cols = set(self.orig_names) - set(clean.keys())\n        clean.update({col: mapping[col] for col in remaining_cols})\n    return clean",
    "docstring": "converts col numbers to names",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\parsers\\base_parser.py",
    "ast_data": "FunctionDef name:_clean_mapping arg:self arg:mapping arguments arg arg If Call Return return:yes Assign Compare For Call If BoolOp Call Compare Assign Assign If Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "integrate_kde",
    "source_code": "def integrate_kde(self, other):\n    if other.d != self.d:\n        raise ValueError('KDEs are not the same dimensionality')\n    if other.n < self.n:\n        small = other\n        large = self\n    else:\n        small = self\n        large = other\n    sum_cov = small.covariance + large.covariance\n    sum_cov_chol = linalg.cho_factor(sum_cov)\n    result = 0.0\n    for i in range(small.n):\n        mean = small.dataset[:, i, newaxis]\n        diff = large.dataset - mean\n        tdiff = linalg.cho_solve(sum_cov_chol, diff)\n        energies = np_vecdot(diff, tdiff, axis=0) / 2.0\n        result += np_vecdot(exp(-energies), large.weights, axis=0) * small.weights[i]\n    sqrt_det = np.prod(np.diagonal(sum_cov_chol[0]))\n    norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det\n    result /= norm_const\n    return result",
    "docstring": "Computes the integral of the product of this kernel density estimate with another. Parameters ---------- other : gaussian_kde instance The other kde. Returns ------- value : scalar The result of the integral. Raises ------ ValueError If the KDEs have different dimensionality.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_kde.py",
    "ast_data": "FunctionDef name:integrate_kde arg:self arg:other arguments arg arg If Compare Raise Call If Compare Assign Assign Assign Assign Assign Assign Call Assign For Call Assign Assign Assign Call Assign Call Call Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "ensure_value_to_cell",
    "source_code": "def ensure_value_to_cell(value):\n\n    def dummy_fn():\n        value\n    cell_value = dummy_fn.__closure__[0]\n    if not isinstance(value, type(cell_value)):\n        return cell_value\n    return value",
    "docstring": "Ensures that a value is converted to a python cell object. Args: value: Any value that needs to be casted to the cell type Returns: A value wrapped as a cell object (see function \"func_load\")",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\generic_utils.py",
    "ast_data": "FunctionDef name:ensure_value_to_cell arg:value arguments arg FunctionDef name:dummy_fn arguments Assign If Call Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_add_new_node",
    "source_code": "def _add_new_node(self, ast_node):\n    if ast_node in self.node_index:\n        raise ValueError('%s added twice' % ast_node)\n    node = Node(next_=set(), prev=weakref.WeakSet(), ast_node=ast_node)\n    self.node_index[ast_node] = node\n    self.owners[node] = frozenset(self.active_stmts)\n    if self.head is None:\n        self.head = node\n    for leaf in self.leaves:\n        self._connect_nodes(leaf, node)\n    for section_id in self.pending_finally_sections:\n        self.finally_section_subgraphs[section_id][0] = node\n    self.pending_finally_sections = set()\n    return node",
    "docstring": "Grows the graph by adding a CFG node following the current leaves.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\cfg.py",
    "ast_data": "FunctionDef name:_add_new_node arg:self arg:ast_node arguments arg arg If Compare Raise Call Assign Call Call Call Assign Assign Call If Compare Assign For Call For Assign Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_make_axes_method",
    "source_code": "def _make_axes_method(func):\n    func.__qualname__ = f'Axes.{func.__name__}'\n    return func",
    "docstring": "Patch the qualname for functions that are directly added to Axes. Some Axes functionality is defined in functions in other submodules. These are simply added as attributes to Axes. As a result, their ` will also show \"Axes.table\". However, since these functions are not intended to be standalone, this is bearable.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_axes.py",
    "ast_data": "FunctionDef name:_make_axes_method arg:func arguments arg Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "pinv",
    "source_code": "@_apply_over_batch(('a', 2))\ndef pinv(a, *, atol=None, rtol=None, return_rank=False, check_finite=True):\n    a = _asarray_validated(a, check_finite=check_finite)\n    u, s, vh = _decomp_svd.svd(a, full_matrices=False, check_finite=False)\n    t = u.dtype.char.lower()\n    maxS = np.max(s, initial=0.0)\n    atol = 0.0 if atol is None else atol\n    rtol = max(a.shape) * np.finfo(t).eps if rtol is None else rtol\n    if atol < 0.0 or rtol < 0.0:\n        raise ValueError('atol and rtol values must be positive.')\n    val = atol + maxS * rtol\n    rank = np.sum(s > val)\n    u = u[:, :rank]\n    u /= s[:rank]\n    B = (u @ vh[:rank]).conj().T\n    if return_rank:\n        return (B, rank)\n    else:\n        return B",
    "docstring": "Compute the (Moore-Penrose) pseudo-inverse of a matrix. Calculate a generalized inverse of a matrix using its singular-value decomposition `areturn_rank` does not have to be a square matrix or have linearly independent columns/rows. As an example, we can calculate the Moore-Penrose pseudoinverse of a random non-square matrix and verify it satisfies the four conditions. >>> import numpy as np >>> from scipy import linalg >>> rng = np.random.default_rng() >>> A = rng.standard_normal((9, 6)) >>> B = linalg.pinv(A) >>> np.allclose(A @ B @ A, A) # Condition 1 True >>> np.allclose(B @ A @ B, B) # Condition 2 True >>> np.allclose((A @ B).conj().T, A @ B) # Condition 3 True >>> np.allclose((B @ A).conj().T, B @ A) # Condition 4 True",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_basic.py",
    "ast_data": "FunctionDef name:pinv arg:a arguments arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Compare Assign Compare Call Call If BoolOp Compare Compare Raise Call Assign Assign Call Compare Assign Assign Call If Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_init_init_op",
    "source_code": "def _init_init_op(self, init_op=USE_DEFAULT, init_feed_dict=None):\n    if init_op is Supervisor.USE_DEFAULT:\n        init_op = self._get_first_op_from_collection(ops.GraphKeys.INIT_OP)\n        if init_op is None:\n            init_op = variables.global_variables_initializer()\n            ops.add_to_collection(ops.GraphKeys.INIT_OP, init_op)\n    self._init_op = init_op\n    self._init_feed_dict = init_feed_dict",
    "docstring": "Initializes init_op. Args: init_op: to initialize the variables. If set to USE_DEFAULT, create an op that initializes all variables and tables. init_feed_dict: A dictionary that maps objects to feed values. This feed dictionary will be used when is evaluated.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py",
    "ast_data": "FunctionDef name:_init_init_op arg:self arg:init_op arg:init_feed_dict arguments arg arg arg If Compare Assign Call If Compare Assign Call Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_make_key_func",
    "source_code": "def _make_key_func(self, key_func, input_dataset):\n\n    def key_func_wrapper(*args):\n        return ops.convert_to_tensor(key_func(*args), dtype=dtypes.int64)\n    self._key_func = structured_function.StructuredFunctionWrapper(key_func_wrapper, self._transformation_name(), dataset=input_dataset)\n    if not self._key_func.output_structure.is_compatible_with(tensor_spec.TensorSpec([], dtypes.int64)):\n        raise ValueError(f'Invalid `key_func`. `key_func` must return a single `tf.int64` scalar tensor but its return type is {self._key_func.output_structure}.')",
    "docstring": "Make wrapping defun for key_func.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\group_by_window_op.py",
    "ast_data": "FunctionDef name:_make_key_func arg:self arg:key_func arg:input_dataset arguments arg arg arg FunctionDef name:key_func_wrapper arguments arg Return return:yes Call Call Assign Call Call If Call Call Raise Call"
  },
  {
    "library": "django",
    "name": "data",
    "source_code": "def data(self, data=None, offset=None, size=None, shape=None, as_memoryview=False):\n    offset = offset or (0, 0)\n    size = size or (self.width - offset[0], self.height - offset[1])\n    shape = shape or size\n    if any((x <= 0 for x in size)):\n        raise ValueError('Offset too big for this raster.')\n    if size[0] > self.width or size[1] > self.height:\n        raise ValueError('Size is larger than raster.')\n    ctypes_array = GDAL_TO_CTYPES[self.datatype()] * (shape[0] * shape[1])\n    if data is None:\n        access_flag = 0\n        data_array = ctypes_array()\n    else:\n        access_flag = 1\n        if isinstance(data, (bytes, memoryview)) or (numpy and isinstance(data, numpy.ndarray)):\n            data_array = ctypes_array.from_buffer_copy(data)\n        else:\n            data_array = ctypes_array(*data)\n    capi.band_io(self._ptr, access_flag, offset[0], offset[1], size[0], size[1], byref(data_array), shape[0], shape[1], self.datatype(), 0, 0)\n    if data is None:\n        if as_memoryview:\n            return memoryview(data_array)\n        elif numpy:\n            return numpy.frombuffer(data_array, dtype=numpy.dtype(data_array)).reshape(tuple(reversed(size)))\n        else:\n            return list(data_array)\n    else:\n        self._flush()",
    "docstring": "Read or writes pixel values for this band. Blocks of data can be accessed by specifying the width, height and offset of the desired block. The same specification can be used to update parts of a raster by providing an array of values. Allowed input data types are bytes, memoryview, list, tuple, and array.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\raster\\band.py",
    "ast_data": "FunctionDef name:data arg:self arg:data arg:offset arg:size arg:shape arg:as_memoryview arguments arg arg arg arg arg arg Assign BoolOp Assign BoolOp Assign BoolOp If Call Compare Raise Call If BoolOp Compare Compare Raise Call Assign Call If Compare Assign Assign Call Assign If BoolOp Call BoolOp Call Assign Call Assign Call Call Call Call If Compare If Return return:yes Call If Return return:yes Call Call Call Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_tensors",
    "source_code": "def _get_tensors(graph, signature_def_tensor_names=None, user_tensor_names=None):\n    tensors = []\n    if user_tensor_names:\n        user_tensor_names = sorted(user_tensor_names)\n        tensors = util.get_tensors_from_tensor_names(graph, user_tensor_names)\n    elif signature_def_tensor_names:\n        tensors = [graph.get_tensor_by_name(name) for name in sorted(signature_def_tensor_names)]\n    else:\n        raise ValueError('Specify either signature_def_tensor_names or user_tensor_names')\n    return tensors",
    "docstring": "Gets the tensors associated with the tensor names. Either signature_def_tensor_names or user_tensor_names should be provided. If the user provides tensors, the tensors associated with the user provided tensor names are provided. Otherwise, the tensors associated with the names in the SignatureDef are provided. Args: graph: GraphDef representing graph. signature_def_tensor_names: Tensor names stored in either the inputs or outputs of a SignatureDef. (default None) user_tensor_names: Tensor names provided by the user. (default None) Returns: List of tensors. Raises: ValueError: signature_def_tensors and user_tensor_names are undefined or empty. user_tensor_names are not valid.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\convert_saved_model.py",
    "ast_data": "FunctionDef name:_get_tensors arg:graph arg:signature_def_tensor_names arg:user_tensor_names arguments arg arg arg Assign If Assign Call Assign Call If Assign Call Call Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='squared_hinge'):\n    super().__init__(squared_hinge, name=name, reduction=reduction)",
    "docstring": "Initializes instance. Args: reduction: Type of to apply to loss. Default value is . indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to . When used with , outside of built-in training loops such as and , using or will raise an error. Please see this custom training [tutorial]( for more details. name: Optional name for the instance. Defaults to 'squared_hinge'.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:reduction arg:name arguments arg arg arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "cartesian_prod",
    "source_code": "def cartesian_prod(*tensors: Tensor) -> Tensor:\n    if has_torch_function(tensors):\n        return handle_torch_function(cartesian_prod, tensors, *tensors)\n    return _VF.cartesian_prod(tensors)",
    "docstring": "Do cartesian product of the given sequence of tensors. The behavior is similar to python's . Args: *tensors: any number of 1 dimensional tensors. Returns: Tensor: A tensor equivalent to converting all the input tensors into lists, do on these lists, and finally convert the resulting list into tensor. Example:: >>> import itertools >>> a = [1, 2, 3] >>> b = [4, 5] >>> list(itertools.product(a, b)) [(1, 4), (1, 5), (2, 4), (2, 5), (3, 4), (3, 5)] >>> tensor_a = torch.tensor(a) >>> tensor_b = torch.tensor(b) >>> torch.cartesian_prod(tensor_a, tensor_b) tensor([[1, 4], [1, 5], [2, 4], [2, 5], [3, 4], [3, 5]])",
    "type": "function",
    "file_path": "pytorch\\torch\\functional.py",
    "ast_data": "FunctionDef name:cartesian_prod arguments arg If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "BaseOptimizer",
    "source_code": "class BaseOptimizer:\n\n    def __init__(self, learning_rate_init=0.1):\n        self.learning_rate_init = learning_rate_init\n        self.learning_rate = float(learning_rate_init)\n\n    def update_params(self, params, grads):\n        updates = self._get_updates(grads)\n        for param, update in zip((p for p in params), updates):\n            param += update\n\n    def iteration_ends(self, time_step):\n        pass\n\n    def trigger_stopping(self, msg, verbose):\n        if verbose:\n            print(msg + ' Stopping.')\n        return True",
    "docstring": "Base (Stochastic) gradient descent optimizer Parameters ---------- learning_rate_init : float, default=0.1 The initial learning rate used. It controls the step-size in updating the weights Attributes ---------- learning_rate : float the current learning rate",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\neural_network\\_stochastic_optimizers.py",
    "ast_data": "ClassDef name:BaseOptimizer FunctionDef name:__init__ arg:self arg:learning_rate_init arguments arg arg Assign Assign Call FunctionDef name:update_params arg:self arg:params arg:grads arguments arg arg arg Assign Call For Call FunctionDef name:iteration_ends arg:self arg:time_step arguments arg arg FunctionDef name:trigger_stopping arg:self arg:msg arg:verbose arguments arg arg arg If Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "chebyc",
    "source_code": "def chebyc(n, monic=False):\n    if n < 0:\n        raise ValueError('n must be nonnegative.')\n    if n == 0:\n        n1 = n + 1\n    else:\n        n1 = n\n    x, w = roots_chebyc(n1)\n    if n == 0:\n        x, w = ([], [])\n    hn = 4 * pi * ((n == 0) + 1)\n    kn = 1.0\n    p = orthopoly1d(x, w, hn, kn, wfunc=lambda x: 1.0 / sqrt(1 - x * x / 4.0), limits=(-2, 2), monic=monic)\n    if not monic:\n        p._scale(2.0 / p(2))\n        p.__dict__['_eval_func'] = lambda x: _ufuncs.eval_chebyc(n, x)\n    return p",
    "docstring": "Chebyshev polynomial of the first kind on :math:. Defined as :math:, where :math: is the nth Chebychev polynomial of the first kind. Parameters ---------- n : int Degree of the polynomial. monic : bool, optional If , scale the leading coefficient to be 1. Default is . Returns ------- C : orthopoly1d Chebyshev polynomial of the first kind on :math:. See Also -------- chebyt : Chebyshev polynomial of the first kind. Notes ----- The polynomials :math: are orthogonal over :math: with weight function :math:. References ---------- .. [1] Abramowitz and Stegun, \"Handbook of Mathematical Functions\" Section 22. National Bureau of Standards, 1972.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_orthogonal.py",
    "ast_data": "FunctionDef name:chebyc arg:n arg:monic arguments arg arg If Compare Raise Call If Compare Assign Assign Assign Call If Compare Assign Assign Compare Assign Assign Call arguments arg Call If Call Call Assign arguments arg Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "KORNIA_CHECK_IS_COLOR_OR_GRAY",
    "source_code": "def KORNIA_CHECK_IS_COLOR_OR_GRAY(x: Tensor, msg: Optional[str]=None, raises: bool=True) -> bool:\n    if len(x.shape) < 3 or x.shape[-3] not in [1, 3]:\n        if raises:\n            raise TypeError(f'Not a color or gray tensor. Got: {type(x)}.\\n{msg}')\n        return False\n    return True",
    "docstring": "Check whether an image tensor is grayscale or color. Args: x: image tensor to evaluate. msg: message to show in the exception. raises: bool indicating whether an exception should be raised upon failure. Raises: TypeException: if the tensor has not a shape :math: or :math: and raises is True. Example: >>> img = torch.rand(2, 3, 4, 4) >>> KORNIA_CHECK_IS_COLOR_OR_GRAY(img, \"Image is not color or grayscale\") True",
    "type": "function",
    "file_path": "kornia\\kornia\\core\\check.py",
    "ast_data": "FunctionDef name:KORNIA_CHECK_IS_COLOR_OR_GRAY arg:x arg:msg arg:raises arguments arg arg arg If BoolOp Compare Call Compare If Raise Call Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "from_dict",
    "source_code": "@classmethod\ndef from_dict(cls, qconfig_dict: dict[str, Any]) -> QConfigMapping:\n    conf = cls()\n    if _GLOBAL_DICT_KEY in qconfig_dict:\n        conf.set_global(qconfig_dict[_GLOBAL_DICT_KEY])\n    for object_type, qconfig in qconfig_dict.get(_OBJECT_TYPE_DICT_KEY, []):\n        conf.set_object_type(object_type, qconfig)\n    for module_name_regex, qconfig in qconfig_dict.get(_MODULE_NAME_REGEX_DICT_KEY, []):\n        conf.set_module_name_regex(module_name_regex, qconfig)\n    for module_name, qconfig in qconfig_dict.get(_MODULE_NAME_DICT_KEY, []):\n        conf.set_module_name(module_name, qconfig)\n    for module_name, object_type, index, qconfig in qconfig_dict.get(_MODULE_NAME_OBJECT_TYPE_ORDER_DICT_KEY, []):\n        conf.set_module_name_object_type_order(module_name, object_type, index, qconfig)\n    return conf",
    "docstring": "Create a `` from a dictionary with the following keys (all optional): \"\" (for global QConfig) \"object_type\" \"module_name_regex\" \"module_name\" \"module_name_object_type_order\" The values of this dictionary are expected to be lists of tuples.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\qconfig_mapping.py",
    "ast_data": "FunctionDef name:from_dict arg:cls arg:qconfig_dict arguments arg arg Assign Call If Compare Call For Call Call For Call Call For Call Call For Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "any_chain",
    "source_code": "@compatibility(is_backward_compatible=False)\ndef any_chain(*op_support: OperatorSupportBase) -> OperatorSupportBase:\n\n    def _any_chain(submods, node) -> bool:\n        return any((x.is_node_supported(submods, node) for x in op_support))\n    return create_op_support(_any_chain)",
    "docstring": "Combines a sequence of instances to form a single instance by evaluating each input instance, and returns True if any of it reports True.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\passes\\operator_support.py",
    "ast_data": "FunctionDef name:any_chain arguments arg FunctionDef name:_any_chain arg:submods arg:node arguments arg arg Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_check_skiprows_func",
    "source_code": "def _check_skiprows_func(self, skiprows: Callable, rows_to_use: int) -> int:\n    i = 0\n    rows_used_so_far = 0\n    while rows_used_so_far < rows_to_use:\n        if not skiprows(i):\n            rows_used_so_far += 1\n        i += 1\n    return i",
    "docstring": "Determine how many file rows are required to obtain data rows when is a function. Parameters ---------- skiprows : function The function passed to read_excel by the user. rows_to_use : int The number of rows that will be needed for the header and the data. Returns ------- int",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\excel\\_base.py",
    "ast_data": "FunctionDef name:_check_skiprows_func arg:self arg:skiprows arg:rows_to_use arguments arg arg arg Assign Assign While Compare If Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "SphinxInfoLogRecord",
    "source_code": "class SphinxInfoLogRecord(SphinxLogRecord):\n    prefix = ''",
    "docstring": "Info log record class supporting location",
    "type": "class",
    "file_path": "sphinx\\sphinx\\util\\logging.py",
    "ast_data": "ClassDef name:SphinxInfoLogRecord Assign"
  },
  {
    "library": "tensorflow",
    "name": "_GetGradWrt",
    "source_code": "def _GetGradWrt(output_grad, other_operand, input_shape, input_subs, other_subs, output_subs):\n    reduced_label_set = set(input_subs).difference(set(output_subs + other_subs + '.'))\n    left_subs = ''.join((s for s in input_subs if s not in reduced_label_set))\n    grad_reduced = gen_linalg_ops.einsum([output_grad, other_operand], '{},{}->{}'.format(output_subs, other_subs, left_subs))\n    if not reduced_label_set:\n        return grad_reduced\n    return _GetGradReduced(grad_reduced, left_subs, input_subs, input_shape, reduced_label_set)",
    "docstring": "Returns the gradient wrt an input operand for a binary einsum. This function does not handle (un)broadcasting. This must be done separately on the returned gradient. Args: output_grad: The gradient wrt the output of a binary einsum operation. other_operand: The complementary operand i.e. which is not the input operand. input_shape: A representing the shape of input operand. input_subs: The subscripts of the input operand. other_subs: The subscripts of the complementary operand. output_subs: The output subscripts.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg_grad.py",
    "ast_data": "FunctionDef name:_GetGradWrt arg:output_grad arg:other_operand arg:input_shape arg:input_subs arg:other_subs arg:output_subs arguments arg arg arg arg arg arg Assign Call Call Call Assign Call Compare Assign Call Call If Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_SingleWorkerDatasetIterator",
    "source_code": "class _SingleWorkerDatasetIterator(input_lib._SingleWorkerDatasetIteratorBase):\n\n    def _make_iterator(self):\n        with ops.device(self._worker):\n            if self._options is not None:\n                self._iterator = multi_device_iterator_ops.MultiDeviceIterator(self._dataset, self._devices, max_buffer_size=self._options.experimental_per_replica_buffer_size, prefetch_buffer_size=self._options.experimental_per_replica_buffer_size)\n            else:\n                self._iterator = multi_device_iterator_ops.MultiDeviceIterator(self._dataset, self._devices)\n\n    def initialize(self):\n        if ops.executing_eagerly_outside_functions():\n            self._iterator._eager_reset()\n            return []\n        else:\n            return [self._iterator.initializer]\n\n    @property\n    def output_classes(self):\n        return dataset_ops.get_legacy_output_classes(self._iterator)\n\n    @property\n    def output_shapes(self):\n        return dataset_ops.get_legacy_output_shapes(self._iterator)\n\n    @property\n    def output_types(self):\n        return dataset_ops.get_legacy_output_types(self._iterator)",
    "docstring": "Iterator for a single DistributedDatasetV1 instance.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\input_lib.py",
    "ast_data": "ClassDef name:_SingleWorkerDatasetIterator FunctionDef name:_make_iterator arg:self arguments arg With Call If Compare Assign Call Assign Call FunctionDef name:initialize arg:self arguments arg If Call Call Return return:no Return return:yes FunctionDef name:output_classes arg:self arguments arg Return return:yes Call FunctionDef name:output_shapes arg:self arguments arg Return return:yes Call FunctionDef name:output_types arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "latest_checkpoint",
    "source_code": "@tf_export('train.latest_checkpoint')\ndef latest_checkpoint(checkpoint_dir, latest_filename=None):\n    ckpt = get_checkpoint_state(checkpoint_dir, latest_filename)\n    if ckpt and ckpt.model_checkpoint_path:\n        v2_path = _prefix_to_checkpoint_path(ckpt.model_checkpoint_path, saver_pb2.SaverDef.V2)\n        v1_path = _prefix_to_checkpoint_path(ckpt.model_checkpoint_path, saver_pb2.SaverDef.V1)\n        if file_io.get_matching_files(v2_path) or file_io.get_matching_files(v1_path):\n            return ckpt.model_checkpoint_path\n        else:\n            logging.error(\"Couldn't match files for checkpoint %s\", ckpt.model_checkpoint_path)\n    return None",
    "docstring": "Finds the filename of latest saved checkpoint file. Gets the checkpoint state given the provided checkpoint_dir and looks for a corresponding TensorFlow 2 (preferred) or TensorFlow 1.x checkpoint path. The latest_filename argument is only applicable if you are saving checkpoint using See the [Training Checkpoints Guide]( for more details and examples.v1.train.Saver.saveNone` if no checkpoint was found.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint_management.py",
    "ast_data": "FunctionDef name:latest_checkpoint arg:checkpoint_dir arg:latest_filename arguments arg arg Assign Call If BoolOp Assign Call Assign Call If BoolOp Call Call Return return:yes Call Return return:no Call"
  },
  {
    "library": "tensorflow",
    "name": "NewSession",
    "source_code": "def NewSession(self, request, context):\n    context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n    context.set_details('Method not implemented!')\n    raise NotImplementedError('Method not implemented!')",
    "docstring": "Starts a profiling session, blocks until it completes. TPUProfileAnalysis service delegate this to TPUProfiler service. Populate the profiled data in repository, then return status to caller.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\profiler\\profiler_analysis_pb2_grpc.py",
    "ast_data": "FunctionDef name:NewSession arg:self arg:request arg:context arguments arg arg arg Call Call Raise Call"
  },
  {
    "library": "pandas",
    "name": "set_caption",
    "source_code": "def set_caption(self, caption: str | tuple | list) -> Styler:\n    msg = '`caption` must be either a string or 2-tuple of strings.'\n    if isinstance(caption, (list, tuple)):\n        if len(caption) != 2 or not isinstance(caption[0], str) or (not isinstance(caption[1], str)):\n            raise ValueError(msg)\n    elif not isinstance(caption, str):\n        raise ValueError(msg)\n    self.caption = caption\n    return self",
    "docstring": "Set the text added to a ``Table Visualization `_ for more examples.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\style.py",
    "ast_data": "FunctionDef name:set_caption arg:self arg:caption arguments arg arg Assign If Call If BoolOp Compare Call Call Call Raise Call If Call Raise Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "MSELoss",
    "source_code": "class MSELoss(_Loss):\n    __constants__ = ['reduction']\n\n    def __init__(self, size_average=None, reduce=None, reduction: str='mean') -> None:\n        super().__init__(size_average, reduce, reduction)\n\n    def forward(self, input: Tensor, target: Tensor) -> Tensor:\n        return F.mse_loss(input, target, reduction=self.reduction)",
    "docstring": "Creates a criterion that measures the mean squared error (squared L2 norm) between each element in the input :math: and target :math:. The unreduced (i.e. with :attr: set to `Nreductionmean';}\\\\ \\operatorname{sum}(L), & \\text{if reduction} = \\text{xyNNNreductionsize_averagereducereductionsize_averagereducesize_averagesize_averagereducereduction(*)*(*)`, same shape as the input. Examples: >>> loss = nn.MSELoss() >>> input = torch.randn(3, 5, requires_grad=True) >>> target = torch.randn(3, 5) >>> output = loss(input, target) >>> output.backward()",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\loss.py",
    "ast_data": "ClassDef name:MSELoss Assign FunctionDef name:__init__ arg:self arg:size_average arg:reduce arg:reduction arguments arg arg arg arg Call Call FunctionDef name:forward arg:self arg:input arg:target arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "height",
    "source_code": "@property\ndef height(self):\n    return capi.get_ds_ysize(self._ptr)",
    "docstring": "Height (Y axis) in pixels.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\raster\\source.py",
    "ast_data": "FunctionDef name:height arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "FitError",
    "source_code": "class FitError(RuntimeError):\n\n    def __init__(self, msg=None):\n        if msg is None:\n            msg = 'An error occurred when fitting a distribution to data.'\n        self.args = (msg,)",
    "docstring": "Represents an error condition when fitting a distribution to data.",
    "type": "class",
    "file_path": "scipy\\scipy\\stats\\_warnings_errors.py",
    "ast_data": "ClassDef name:FitError FunctionDef name:__init__ arg:self arg:msg arguments arg arg If Compare Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "tag_sharding_attribute_for_dequeued_tensors",
    "source_code": "def tag_sharding_attribute_for_dequeued_tensors(dequeues, dims):\n    nest.assert_shallow_structure(dequeues, dims)\n    return nest.map_structure_up_to(dequeues, _tag_sharding_attribute_for_dequeued_tensor, dequeues, dims)",
    "docstring": "Tags appropriate XLA sharding attribute to the dequeued tensors. Args: dequeues: A list of dequeued tensors on TPU. dims: A list of integer describes how the tensor is partitioned. Returns: The same dequeues with appropriate xla_sharding attribute.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_feed.py",
    "ast_data": "FunctionDef name:tag_sharding_attribute_for_dequeued_tensors arg:dequeues arg:dims arguments arg arg Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "HandlerLine2D",
    "source_code": "class HandlerLine2D(HandlerNpoints):\n\n    def create_artists(self, legend, orig_handle, xdescent, ydescent, width, height, fontsize, trans):\n        xdata, xdata_marker = self.get_xdata(legend, xdescent, ydescent, width, height, fontsize)\n        markevery = None\n        if self.get_numpoints(legend) == 1:\n            xdata = np.linspace(xdata[0], xdata[-1], 3)\n            markevery = [1]\n        ydata = np.full_like(xdata, (height - ydescent) / 2)\n        legline = Line2D(xdata, ydata, markevery=markevery)\n        self.update_prop(legline, orig_handle, legend)\n        if legend.markerscale != 1:\n            newsz = legline.get_markersize() * legend.markerscale\n            legline.set_markersize(newsz)\n        legline.set_transform(trans)\n        return [legline]",
    "docstring": "Handler for instances. See Also -------- HandlerLine2DCompound : An earlier handler implementation, which used one artist for the line and another for the marker(s).",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\legend_handler.py",
    "ast_data": "ClassDef name:HandlerLine2D FunctionDef name:create_artists arg:self arg:legend arg:orig_handle arg:xdescent arg:ydescent arg:width arg:height arg:fontsize arg:trans arguments arg arg arg arg arg arg arg arg arg Assign Call Assign If Compare Call Assign Call Assign Assign Call Assign Call Call If Compare Assign Call Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "TransformerMixin",
    "source_code": "class TransformerMixin(_SetOutputMixin):\n\n    def __sklearn_tags__(self):\n        tags = super().__sklearn_tags__()\n        tags.transformer_tags = TransformerTags()\n        return tags\n\n    def fit_transform(self, X, y=None, **fit_params):\n        if _routing_enabled():\n            transform_params = self.get_metadata_routing().consumes(method='transform', params=fit_params.keys())\n            if transform_params:\n                warnings.warn(f\"This object ({self.__class__.__name__}) has a `transform` method which consumes metadata, but `fit_transform` does not forward metadata to `transform`. Please implement a custom `fit_transform` method to forward metadata to `transform` as well. Alternatively, you can explicitly do `set_transform_request`and set all values to `False` to disable metadata routed to `transform`, if that's an option.\", UserWarning)\n        if y is None:\n            return self.fit(X, **fit_params).transform(X)\n        else:\n            return self.fit(X, y, **fit_params).transform(X)",
    "docstring": "Mixin class for all transformers in scikit-learn. This mixin defines the following functionality: - a method that delegates to and ; - a method to output as a specific container type. If :term: is defined, then :class: will automatically wrap and to follow the API. See the :ref: for details. :class: and :class: are helpful mixins for defining :term:. Examples -------- >>> import numpy as np >>> from sklearn.base import BaseEstimator, TransformerMixin >>> class MyTransformer(TransformerMixin, BaseEstimator): ... def __init__(self, *, param=1): ... self.param = param ... def fit(self, X, y=None): ... return self ... def transform(self, X): ... return np.full(shape=len(X), fill_value=self.param) >>> transformer = MyTransformer() >>> X = [[1, 2], [2, 3], [3, 4]] >>> transformer.fit_transform(X) array([1, 1, 1])",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\base.py",
    "ast_data": "ClassDef name:TransformerMixin FunctionDef name:__sklearn_tags__ arg:self arguments arg Assign Call Call Assign Call Return return:yes FunctionDef name:fit_transform arg:self arg:X arg:y arguments arg arg arg arg If Call Assign Call Call Call If Call If Compare Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_is_quantized_op_pt2e",
    "source_code": "def _is_quantized_op_pt2e(node: torch.fx.Node):\n    if not _is_any_annotated([node]):\n        return False\n    quantization_annotation = node.meta.get(QUANT_ANNOTATION_KEY, None)\n    assert isinstance(quantization_annotation, _X86InductorQuantizationAnnotation)\n    return quantization_annotation._is_output_of_quantized_pattern",
    "docstring": "Used for pt2e flow to check if the node is a quantized node: Case1: the node has been annotated as output node of a fusion pattern. Case2: the node has been annotated as single quantized node.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\x86_inductor_quantizer.py",
    "ast_data": "FunctionDef name:_is_quantized_op_pt2e arg:node arguments arg If Call Return return:yes Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, node_def, op, message, *args):\n    super(PermissionDeniedError, self).__init__(node_def, op, message, PERMISSION_DENIED, *args)",
    "docstring": "Creates a .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:node_def arg:op arg:message arguments arg arg arg arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_parse_kernel_label",
    "source_code": "def _parse_kernel_label(self, label, node_name):\n    start = label.find('@@')\n    end = label.find('#')\n    if start >= 0 and end >= 0 and (start + 2 < end):\n        node_name = label[start + 2:end]\n    fields = node_name.split(':') + ['unknown']\n    name, op = fields[:2]\n    return (name, op)",
    "docstring": "Parses the fields in a node timeline label.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py",
    "ast_data": "FunctionDef name:_parse_kernel_label arg:self arg:label arg:node_name arguments arg arg arg Assign Call Assign Call If BoolOp Compare Compare Compare Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, offset=(0.0, 0.0)):\n    self._offset = offset",
    "docstring": "Parameters ---------- offset : (float, float), default: (0, 0) The (x, y) offset to apply to the path, measured in points.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patheffects.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:offset arguments arg arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "_MatrixDeterminantGrad",
    "source_code": "@ops.RegisterGradient('MatrixDeterminant')\ndef _MatrixDeterminantGrad(op: ops.Operation, grad):\n    a = op.inputs[0]\n    c = op.outputs[0]\n    a_adj_inv = linalg_ops.matrix_inverse(a, adjoint=True)\n    multipliers = array_ops.reshape(grad * c, array_ops.concat([array_ops.shape(c), [1, 1]], 0))\n    return multipliers * a_adj_inv",
    "docstring": "Gradient for MatrixDeterminant.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg_grad.py",
    "ast_data": "FunctionDef name:_MatrixDeterminantGrad arg:op arg:grad arguments arg arg Assign Assign Assign Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "pandas_converters",
    "source_code": "@contextlib.contextmanager\ndef pandas_converters() -> Generator[None]:\n    value = get_option('plotting.matplotlib.register_converters')\n    if value:\n        register()\n    try:\n        yield\n    finally:\n        if value == 'auto':\n            deregister()",
    "docstring": "Context manager registering pandas' converters for a plot. See Also -------- register_pandas_matplotlib_converters : Decorator that applies this.",
    "type": "function",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\converter.py",
    "ast_data": "FunctionDef name:pandas_converters arguments Assign Call If Call Try If Compare Call"
  },
  {
    "library": "kornia",
    "name": "KORNIA_CHECK_TYPE",
    "source_code": "def KORNIA_CHECK_TYPE(x: object, typ: T | tuple[T, ...], msg: Optional[str]=None, raises: bool=True) -> TypeGuard[T]:\n    if not isinstance(x, typ):\n        if raises:\n            raise TypeError(f'Invalid type: {type(x)}.\\n{msg}')\n        return False\n    return True",
    "docstring": "Check the type of an aribratry variable. Args: x: any input variable. typ: the expected type of the variable. msg: message to show in the exception. raises: bool indicating whether an exception should be raised upon failure. Raises: TypeException: if the input variable does not match with the expected and raises is True. Example: >>> KORNIA_CHECK_TYPE(\"foo\", str, \"Invalid string\") True",
    "type": "function",
    "file_path": "kornia\\kornia\\core\\check.py",
    "ast_data": "FunctionDef name:KORNIA_CHECK_TYPE arg:x arg:typ arg:msg arg:raises arguments arg arg arg arg If Call If Raise Call Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "call_metric_function",
    "source_code": "def call_metric_function(metric_fn, y_true, y_pred=None, weights=None, mask=None):\n    if mask is not None:\n        mask = math_ops.cast(mask, y_pred.dtype)\n        if weights is None:\n            weights = mask\n        else:\n            weights = math_ops.cast(weights, dtype=y_pred.dtype)\n            mask, _, weights = losses_utils.squeeze_or_expand_dimensions(mask, sample_weight=weights)\n            weights *= mask\n    if y_pred is not None:\n        return metric_fn(y_true, y_pred, sample_weight=weights)\n    return metric_fn(y_true, sample_weight=weights)",
    "docstring": "Invokes metric function and returns the metric result tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py",
    "ast_data": "FunctionDef name:call_metric_function arg:metric_fn arg:y_true arg:y_pred arg:weights arg:mask arguments arg arg arg arg arg If Compare Assign Call If Compare Assign Assign Call Assign Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "AggregateQuery",
    "source_code": "class AggregateQuery(Query):\n    compiler = 'SQLAggregateCompiler'\n\n    def __init__(self, model, inner_query):\n        self.inner_query = inner_query\n        super().__init__(model)",
    "docstring": "Take another query as a parameter to the FROM clause and only select the elements in the provided list.",
    "type": "class",
    "file_path": "django\\django\\db\\models\\sql\\subqueries.py",
    "ast_data": "ClassDef name:AggregateQuery Assign FunctionDef name:__init__ arg:self arg:model arg:inner_query arguments arg arg arg Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "_contains_usable_plan",
    "source_code": "def _contains_usable_plan(delta_plans: list[SavePlan]) -> bool:\n    return any((delta_plan and delta_plan.usable for delta_plan in delta_plans))",
    "docstring": "Check if any delta plan is usable, indicating the plan has changed. Args: delta_plans (List[SavePlan]): A list of delta plans to check. Returns: True if any delta plan is usable, False otherwise.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\planner_helpers.py",
    "ast_data": "FunctionDef name:_contains_usable_plan arg:delta_plans arguments arg Return return:yes Call BoolOp"
  },
  {
    "library": "matplotlib",
    "name": "TimerTk",
    "source_code": "class TimerTk(TimerBase):\n\n    def __init__(self, parent, *args, **kwargs):\n        self._timer = None\n        super().__init__(*args, **kwargs)\n        self.parent = parent\n\n    def _timer_start(self):\n        self._timer_stop()\n        self._timer = self.parent.after(self._interval, self._on_timer)\n\n    def _timer_stop(self):\n        if self._timer is not None:\n            self.parent.after_cancel(self._timer)\n        self._timer = None\n\n    def _on_timer(self):\n        super()._on_timer()\n        if not self._single and self._timer:\n            if self._interval > 0:\n                self._timer = self.parent.after(self._interval, self._on_timer)\n            else:\n                self._timer = self.parent.after_idle(lambda: self.parent.after(self._interval, self._on_timer))\n        else:\n            self._timer = None",
    "docstring": "Subclass of using Tk timer events.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\_backend_tk.py",
    "ast_data": "ClassDef name:TimerTk FunctionDef name:__init__ arg:self arg:parent arguments arg arg arg arg Assign Call Call Assign FunctionDef name:_timer_start arg:self arguments arg Call Assign Call FunctionDef name:_timer_stop arg:self arguments arg If Compare Call Assign FunctionDef name:_on_timer arg:self arguments arg Call Call If BoolOp If Compare Assign Call Assign Call arguments Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "from_function_and_signature",
    "source_code": "@classmethod\ndef from_function_and_signature(cls, python_function, input_signature, is_pure=False, jit_compile=None):\n    function_type, default_values = make_function_type(python_function, input_signature)\n    while isinstance(python_function, functools.partial):\n        python_function = python_function.func\n    name = getattr(python_function, '__name__', 'f')\n    return FunctionSpec(function_type, default_values, is_pure=is_pure, jit_compile=jit_compile, name=name)",
    "docstring": "Creates a FunctionSpec instance given a python function and signature. Args: python_function: a function to inspect input_signature: a signature of the function (None, if variable) is_pure: if True all input arguments (including variables and constants) will be converted to tensors and no variable changes allowed. jit_compile: see Returns: instance of FunctionSpec",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\function_type_utils.py",
    "ast_data": "FunctionDef name:from_function_and_signature arg:cls arg:python_function arg:input_signature arg:is_pure arg:jit_compile arguments arg arg arg arg arg Assign Call While Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_MultiDeviceAddN",
    "source_code": "def _MultiDeviceAddN(tensor_list, gradient_uid):\n    tensors_on_device = collections.defaultdict(lambda: [])\n    for tensor in tensor_list:\n        tensors_on_device[tensor.device].append(tensor)\n    summands = []\n\n    def DeviceKey(dev):\n        return '' if dev is None else dev\n    for dev in sorted(tensors_on_device, key=DeviceKey):\n        tensors = tensors_on_device[dev]\n        with ops._colocate_with_for_gradient(tensors[0].op, gradient_uid, ignore_existing=True):\n            summands.append(math_ops.add_n(tensors))\n    return math_ops.add_n(summands)",
    "docstring": "Adds tensors from potentially multiple devices.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\gradients_util.py",
    "ast_data": "FunctionDef name:_MultiDeviceAddN arg:tensor_list arg:gradient_uid arguments arg arg Assign Call arguments For Call Assign FunctionDef name:DeviceKey arg:dev arguments arg Return return:yes Compare For Call Assign With Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_RendezvousExitOp",
    "source_code": "class _RendezvousExitOp:\n\n    def __call__(self, ctx: _RendezvousContext, deadline: float) -> _Action:\n        if ctx.node in ctx.state.participants:\n            if time.monotonic() > deadline:\n                return _Action.ERROR_TIMEOUT\n            return _Action.REMOVE_FROM_PARTICIPANTS\n        return _Action.FINISH",
    "docstring": "Represent a rendezvous exit operation.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\dynamic_rendezvous.py",
    "ast_data": "ClassDef name:_RendezvousExitOp FunctionDef name:__call__ arg:self arg:ctx arg:deadline arguments arg arg arg If Compare If Compare Call Return return:yes Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "generate_calc_maxpool",
    "source_code": "@register_transformation_rule(CalcMaxPool)\ndef generate_calc_maxpool(constraint, counter):\n    d, counter = gen_tensor_dims(4, counter)\n    maxpool_result = TensorType([d[0], d[1], d[2], d[3]])\n    c1 = BinConstraintT(constraint.maxpool_result, maxpool_result, op_eq)\n    c2 = BinConstraintD(constraint.matching_constraint[1], d[1], op_eq)\n    c3 = BinConstraintD(constraint.matching_constraint[0], d[0], op_eq)\n    c4, c5 = calc_last_two_dims(constraint, d)\n    leq_constraints = Conj([BinConstraintD(0, d[0], op_leq), BinConstraintD(0, d[1], op_leq), BinConstraintD(0, d[2], op_leq), BinConstraintD(0, d[3], op_leq)])\n    return (Conj([c1, c2, c3, c4, c5, leq_constraints]), counter)",
    "docstring": "Transform maxpool constraints",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_transformation.py",
    "ast_data": "FunctionDef name:generate_calc_maxpool arg:constraint arg:counter arguments arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Call Call Call Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "RandomAutoContrast",
    "source_code": "class RandomAutoContrast(IntensityAugmentationBase2D):\n\n    def __init__(self, clip_output: bool=True, same_on_batch: bool=False, p: float=1.0, keepdim: bool=False) -> None:\n        super().__init__(p=p, same_on_batch=same_on_batch, keepdim=keepdim)\n        self.clip_output = clip_output\n\n    def apply_transform(self, input: Tensor, params: Dict[str, Tensor], flags: Dict[str, Any], transform: Optional[Tensor]=None) -> Tensor:\n        out = normalize_min_max(input)\n        if self.clip_output:\n            return out.clamp(0.0, 1.0)\n        return out",
    "docstring": "Apply a random auto-contrast of a tensor image. Args: p: probability of applying the transformation. clip_output: if true clip output same_on_batch: apply the same transformation across the batch. keepdim: whether to keep the output shape the same as input (True) or broadcast it to the batch form (False). Shape: - Input: :math: or :math: - Output: :math: .. note:: This function internally uses :func:",
    "type": "class",
    "file_path": "kornia\\kornia\\augmentation\\_2d\\intensity\\auto_contrast.py",
    "ast_data": "ClassDef name:RandomAutoContrast FunctionDef name:__init__ arg:self arg:clip_output arg:same_on_batch arg:p arg:keepdim arguments arg arg arg arg arg Call Call Assign FunctionDef name:apply_transform arg:self arg:input arg:params arg:flags arg:transform arguments arg arg arg arg arg Assign Call If Return return:yes Call Return return:yes"
  },
  {
    "library": "django",
    "name": "escape_leading_slashes",
    "source_code": "def escape_leading_slashes(url):\n    if url.startswith('//'):\n        url = '/%2F{}'.format(url.removeprefix('//'))\n    return url",
    "docstring": "If redirecting to an absolute path (two leading slashes), a slash must be escaped to prevent browsers from handling the path as schemaless and redirecting to another host.",
    "type": "function",
    "file_path": "django\\django\\utils\\http.py",
    "ast_data": "FunctionDef name:escape_leading_slashes arg:url arguments arg If Call Assign Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "__iter__",
    "source_code": "def __iter__(self):\n    return iter(self.estimators_)",
    "docstring": "Return iterator over estimators in the ensemble.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_base.py",
    "ast_data": "FunctionDef name:__iter__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "Precedence",
    "source_code": "class Precedence(Enum):\n    ATOM = 0\n    POWER = 1\n    UNARY = 2\n    PRODUCT = 3\n    SUM = 4\n    LT = 6\n    EQ = 7\n    LAND = 11\n    LOR = 12\n    TERNARY = 13\n    ASSIGN = 14\n    TUPLE = 15\n    NONE = 100",
    "docstring": "Used as Expr.tostring precedence argument.",
    "type": "class",
    "file_path": "numpy\\numpy\\f2py\\symbolic.py",
    "ast_data": "ClassDef name:Precedence Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "scipy",
    "name": "integrate",
    "source_code": "def integrate(self, a, b, extrapolate=None):\n    ib = self.antiderivative()\n    if extrapolate is None:\n        extrapolate = self.extrapolate\n    if extrapolate != 'periodic':\n        ib.extrapolate = extrapolate\n    if extrapolate == 'periodic':\n        if a <= b:\n            sign = 1\n        else:\n            a, b = (b, a)\n            sign = -1\n        xs, xe = (self.x[0], self.x[-1])\n        period = xe - xs\n        interval = b - a\n        n_periods, left = divmod(interval, period)\n        res = n_periods * (ib(xe) - ib(xs))\n        a = xs + (a - xs) % period\n        b = a + left\n        if b <= xe:\n            res += ib(b) - ib(a)\n        else:\n            res += ib(xe) - ib(a) + ib(xs + left + a - xe) - ib(xs)\n        return sign * res\n    else:\n        return ib(b) - ib(a)",
    "docstring": "Compute a definite integral over a piecewise polynomial. Parameters ---------- a : float Lower integration bound b : float Upper integration bound extrapolate : {bool, 'periodic', None}, optional Whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. If 'periodic', periodic extrapolation is used. If None (default), use . Returns ------- array_like Definite integral of the piecewise polynomial over [a, b]",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_interpolate.py",
    "ast_data": "FunctionDef name:integrate arg:self arg:a arg:b arg:extrapolate arguments arg arg arg arg Assign Call If Compare Assign If Compare Assign If Compare If Compare Assign Assign Assign Assign Assign Assign Assign Call Assign Call Call Assign Assign If Compare Call Call Call Call Call Call Return return:yes Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "__call__",
    "source_code": "def __call__(self, dim=None, seed=None):\n    return special_ortho_group_frozen(dim, seed=seed)",
    "docstring": "Create a frozen SO(N) distribution. See for more information.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:dim arg:seed arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "clean_removed_buffer_from_partition_signatures",
    "source_code": "def clean_removed_buffer_from_partition_signatures(self, signature: GraphPartitionSignature) -> GraphPartitionSignature:\n    input_nodes = {name: buffer for name, buffer in signature.input_nodes.items() if name not in V.graph.removed_buffers}\n    input_deallocation = {name: val for name, val in signature.input_deallocation.items() if name not in V.graph.removed_buffers}\n    output_nodes = [node for node in signature.output_nodes if node.maybe_get_name() not in V.graph.removed_buffers]\n    constant_names = [name for name in signature.constant_names if name not in V.graph.removed_buffers]\n    return GraphPartitionSignature(signature.symbol_inputs, input_nodes, output_nodes, input_deallocation, signature.skip_cudagraph, constant_names)",
    "docstring": "Updates the partition signature by removing buffers specified in V.graph.removed_buffers. See [Note: Removed Graph Partition Arguments]",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:clean_removed_buffer_from_partition_signatures arg:self arg:signature arguments arg arg Assign Call Compare Assign Call Compare Assign Compare Call Assign Compare Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "get_rel_pos",
    "source_code": "def get_rel_pos(q_size: int, k_size: int, rel_pos: Tensor) -> Tensor:\n    max_rel_dist = int(2 * max(q_size, k_size) - 1)\n    if rel_pos.shape[0] != max_rel_dist:\n        rel_pos_resized = F.interpolate(rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), size=max_rel_dist, mode='linear')\n        rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0)\n    else:\n        rel_pos_resized = rel_pos\n    q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0)\n    k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0)\n    relative_coords = q_coords - k_coords + (k_size - 1) * max(q_size / k_size, 1.0)\n    return rel_pos_resized[relative_coords.long()]",
    "docstring": "Get relative positional embeddings according to the relative positions of query and key sizes. Args: q_size: size of query q. k_size: size of key k. rel_pos: relative position embeddings (L, C). Returns: Extracted positional embeddings according to relative positions.",
    "type": "function",
    "file_path": "kornia\\kornia\\contrib\\models\\sam\\architecture\\image_encoder.py",
    "ast_data": "FunctionDef name:get_rel_pos arg:q_size arg:k_size arg:rel_pos arguments arg arg arg Assign Call Call If Compare Assign Call Call Call Assign Call Call Assign Assign Call Call Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "fit_regression",
    "source_code": "def fit_regression(self, ax=None, x_range=None, grid=None):\n    self._check_statsmodels()\n    if grid is None:\n        if self.truncate:\n            x_min, x_max = self.x_range\n        elif ax is None:\n            x_min, x_max = x_range\n        else:\n            x_min, x_max = ax.get_xlim()\n        grid = np.linspace(x_min, x_max, 100)\n    ci = self.ci\n    if self.order > 1:\n        yhat, yhat_boots = self.fit_poly(grid, self.order)\n    elif self.logistic:\n        from statsmodels.genmod.generalized_linear_model import GLM\n        from statsmodels.genmod.families import Binomial\n        yhat, yhat_boots = self.fit_statsmodels(grid, GLM, family=Binomial())\n    elif self.lowess:\n        ci = None\n        grid, yhat = self.fit_lowess()\n    elif self.robust:\n        from statsmodels.robust.robust_linear_model import RLM\n        yhat, yhat_boots = self.fit_statsmodels(grid, RLM)\n    elif self.logx:\n        yhat, yhat_boots = self.fit_logx(grid)\n    else:\n        yhat, yhat_boots = self.fit_fast(grid)\n    if ci is None:\n        err_bands = None\n    else:\n        err_bands = utils.ci(yhat_boots, ci, axis=0)\n    return (grid, yhat, err_bands)",
    "docstring": "Fit the regression model.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\regression.py",
    "ast_data": "FunctionDef name:fit_regression arg:self arg:ax arg:x_range arg:grid arguments arg arg arg arg Call If Compare If Assign If Compare Assign Assign Call Assign Call Assign If Compare Assign Call If Assign Call Call If Assign Assign Call If Assign Call If Assign Call Assign Call If Compare Assign Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "clip_to_bbox",
    "source_code": "def clip_to_bbox(self, bbox, inside=True):\n    verts = _path.clip_path_to_rect(self, bbox, inside)\n    paths = [Path(poly) for poly in verts]\n    return self.make_compound_path(*paths)",
    "docstring": "Clip the path to the given bounding box. The path must be made up of one or more closed polygons. This algorithm will not behave correctly for unclosed paths. If *inside* is , clip to the inside of the box, otherwise to the outside of the box.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\path.py",
    "ast_data": "FunctionDef name:clip_to_bbox arg:self arg:bbox arg:inside arguments arg arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "ShardedVariableSpec",
    "source_code": "class ShardedVariableSpec(type_spec.TypeSpec):\n    __slots__ = ['_variable_specs']\n    value_type = property(lambda self: ShardedVariable)\n\n    def __init__(self, *variable_specs):\n        self._variable_specs = tuple(variable_specs)\n\n    def _serialize(self):\n        return self._variable_specs\n\n    @property\n    def _component_specs(self):\n        return self._variable_specs\n\n    def _to_components(self, value):\n        return tuple(value.variables)\n\n    def _from_components(self, variables):\n        return ShardedVariable(variables)\n\n    def _cast(self, value, _):\n        return value",
    "docstring": "Type specification for a .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\sharded_variable.py",
    "ast_data": "ClassDef name:ShardedVariableSpec Assign Assign Call arguments arg FunctionDef name:__init__ arg:self arguments arg arg Assign Call FunctionDef name:_serialize arg:self arguments arg Return return:yes FunctionDef name:_component_specs arg:self arguments arg Return return:yes FunctionDef name:_to_components arg:self arg:value arguments arg arg Return return:yes Call FunctionDef name:_from_components arg:self arg:variables arguments arg arg Return return:yes Call FunctionDef name:_cast arg:self arg:value arg:_ arguments arg arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "fn",
    "source_code": "def fn(distribution, *a):\n    if hasattr(distribution.extended, '_outer_control_flow_context'):\n        if distribution.extended._outer_control_flow_context is None:\n            with ops.control_dependencies(None):\n                metric_value = metric_value_fn(distribution, *a)\n        else:\n            distribution.extended._outer_control_flow_context.Enter()\n            metric_value = metric_value_fn(distribution, *a)\n            distribution.extended._outer_control_flow_context.Exit()\n    else:\n        metric_value = metric_value_fn(distribution, *a)\n    if metrics_collections:\n        ops.add_to_collections(metrics_collections, metric_value)\n    return metric_value",
    "docstring": "Call in the correct control flow context.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\metrics_impl.py",
    "ast_data": "FunctionDef name:fn arg:distribution arguments arg arg If Call If Compare With Call Assign Call Call Assign Call Call Assign Call If Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_resolve_grad_inputs",
    "source_code": "def _resolve_grad_inputs(cond_graph, grad_graph):\n    new_inputs = []\n    for t in grad_graph.external_captures:\n        if t.graph != grad_graph.outer_graph:\n            assert t.graph == cond_graph\n            for i, output in enumerate(t.graph.outputs):\n                if output is t:\n                    t = t.graph._forward_cond.outputs[i]\n                    break\n            else:\n                for i, output in enumerate(t.graph.internal_captures):\n                    if output is t:\n                        t = t.graph.external_captures[i]\n                        break\n                else:\n                    raise ValueError('Could not find external tensor capture {tensor} in captures or outputs'.format(tensor=t))\n            assert t.graph == cond_graph.outer_graph\n        new_inputs.append(t)\n    return new_inputs",
    "docstring": "Returns the tensors to pass as inputs to . The may have external references to 1. Its outer graph containing the input gradients. These references are kept as is. 2. Tensors in the forward pass graph. These tensors may not be \"live\" when the gradient is being computed. We replace such references by their corresponding tensor in . In the case of nested control flow or functions, the gradient logic handling will make sure the tensor from is also correctly captured. Args: cond_graph: FuncGraph. The forward-pass function. grad_graph: FuncGraph. The gradients function. Returns: A list of inputs tensors to be passed to grad_graph.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\cond_v2.py",
    "ast_data": "FunctionDef name:_resolve_grad_inputs arg:cond_graph arg:grad_graph arguments arg arg Assign For If Compare Compare For Call If Compare Assign For Call If Compare Assign Raise Call Call Compare Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_correa_entropy",
    "source_code": "def _correa_entropy(X, m, *, xp):\n    n = X.shape[-1]\n    X = _pad_along_last_axis(X, m, xp=xp)\n    i = xp.arange(1, n + 1)\n    dj = xp.arange(-m, m + 1)[:, None]\n    j = i + dj\n    j0 = j + m - 1\n    Xibar = xp.mean(X[..., j0], axis=-2, keepdims=True)\n    difference = X[..., j0] - Xibar\n    num = xp.sum(difference * dj, axis=-2)\n    den = n * xp.sum(difference ** 2, axis=-2)\n    return -xp.mean(xp.log(num / den), axis=-1)",
    "docstring": "Compute the Correa estimator as described in [6].",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_entropy.py",
    "ast_data": "FunctionDef name:_correa_entropy arg:X arg:m arguments arg arg arg Assign Assign Call Assign Call Assign Call Assign Assign Assign Call Assign Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "_DomainCheckInterval",
    "source_code": "class _DomainCheckInterval:\n\n    def __init__(self, a, b):\n        if a > b:\n            a, b = (b, a)\n        self.a = a\n        self.b = b\n\n    def __call__(self, x):\n        with np.errstate(invalid='ignore'):\n            return umath.logical_or(umath.greater(x, self.b), umath.less(x, self.a))",
    "docstring": "Define a valid interval, so that : ``.",
    "type": "class",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "ClassDef name:_DomainCheckInterval FunctionDef name:__init__ arg:self arg:a arg:b arguments arg arg arg If Compare Assign Assign Assign FunctionDef name:__call__ arg:self arg:x arguments arg arg With Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "add",
    "source_code": "def add(self, func: Callable, opset: OpsetVersion) -> None:\n    if self._functions.in_base(opset):\n        warnings.warn(f\"Symbolic function '{self._name}' already registered for opset {opset}. Replacing the existing function with new function. This is unexpected. Please report it on {_constants.PYTORCH_GITHUB_ISSUES_URL}.\", errors.OnnxExporterWarning)\n    self._functions.set_base(opset, func)",
    "docstring": "Adds a symbolic function. Args: func: The function to add. opset: The opset version of the function to add.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\registration.py",
    "ast_data": "FunctionDef name:add arg:self arg:func arg:opset arguments arg arg arg If Call Call Call"
  },
  {
    "library": "authlib",
    "name": "validate_software_version",
    "source_code": "def validate_software_version(self):\n    pass",
    "docstring": "A version identifier string for the client software identified by \"software_id\". The value of the \"software_version\" SHOULD change on any update to the client software identified by the same \"software_id\". The value of this field is intended to be compared using string equality matching and no other comparison semantics are defined by this specification. The value of this field is outside the scope of this specification, but it is not intended to be human readable and is usually opaque to the client and authorization server. The definition of what constitutes an update to client software that would trigger a change to this value is specific to the software itself and is outside the scope of this specification.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc7591\\claims.py",
    "ast_data": "FunctionDef name:validate_software_version arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "_create_zeros_for_none_grads",
    "source_code": "def _create_zeros_for_none_grads(forward_graphs, grad_graphs):\n    assert len(forward_graphs) == len(grad_graphs)\n    branch_outputs = [g.structured_outputs for g in grad_graphs]\n    num_outputs_per_branch = [len(outs) for outs in branch_outputs]\n    assert len(set(num_outputs_per_branch)) == 1, num_outputs_per_branch\n    for output_idx, branch_outs in enumerate(zip(*branch_outputs)):\n        if any((t is None for t in branch_outs)) and any((t is not None for t in branch_outs)):\n            for branch_index, t in enumerate(branch_outs):\n                if t is None:\n                    with grad_graphs[branch_index].as_default():\n                        zeros = default_gradient.zeros_like(forward_graphs[branch_index].inputs[output_idx])\n                        grad_graphs[branch_index].structured_outputs[output_idx] = zeros\n    for grad_graph in grad_graphs:\n        grad_graph.outputs = [t for t in func_graph_module.flatten(grad_graph.structured_outputs) if t is not None]",
    "docstring": "Creates zeros for None out grads if at least one branch has non-None grad. Args: forward_graphs: List of forward FuncGraphs. grad_graphs: List of grad FuncGraphs.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\cond_v2.py",
    "ast_data": "FunctionDef name:_create_zeros_for_none_grads arg:forward_graphs arg:grad_graphs arguments arg arg Compare Call Call Assign Assign Call Compare Call Call For Call Call If BoolOp Call Compare Call Compare For Call If Compare With Call Assign Call Assign For Assign Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "EventListenerStub",
    "source_code": "class EventListenerStub(object):\n\n    def __init__(self, channel):\n        self.SendEvents = channel.stream_stream('/tensorflow.EventListener/SendEvents', request_serializer=tensorflow_dot_core_dot_util_dot_event__pb2.Event.SerializeToString, response_deserializer=tensorflow_dot_core_dot_debug_dot_debug__service__pb2.EventReply.FromString)\n        self.SendTracebacks = channel.unary_unary('/tensorflow.EventListener/SendTracebacks', request_serializer=tensorflow_dot_core_dot_debug_dot_debug__service__pb2.CallTraceback.SerializeToString, response_deserializer=tensorflow_dot_core_dot_debug_dot_debug__service__pb2.EventReply.FromString)\n        self.SendSourceFiles = channel.unary_unary('/tensorflow.EventListener/SendSourceFiles', request_serializer=tensorflow_dot_core_dot_protobuf_dot_debug__pb2.DebuggedSourceFiles.SerializeToString, response_deserializer=tensorflow_dot_core_dot_debug_dot_debug__service__pb2.EventReply.FromString)",
    "docstring": "EventListener: Receives Event protos, e.g., from debugged TensorFlow runtime(s).",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_service_pb2_grpc.py",
    "ast_data": "ClassDef name:EventListenerStub FunctionDef name:__init__ arg:self arg:channel arguments arg arg Assign Call Assign Call Assign Call"
  },
  {
    "library": "numpy",
    "name": "exec_mod_from_location",
    "source_code": "def exec_mod_from_location(modname, modfile):\n    spec = importlib.util.spec_from_file_location(modname, modfile)\n    foo = importlib.util.module_from_spec(spec)\n    spec.loader.exec_module(foo)\n    return foo",
    "docstring": "Use importlib machinery to import a module from the file . Depending on the , the module may not be registered in sys.modules.",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\misc_util.py",
    "ast_data": "FunctionDef name:exec_mod_from_location arg:modname arg:modfile arguments arg arg Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "antiderivative",
    "source_code": "def antiderivative(self, nu):\n    p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)\n    for axis, n in enumerate(nu):\n        p._antiderivative_inplace(n, axis)\n    p._ensure_c_contiguous()\n    return p",
    "docstring": "Construct a new piecewise polynomial representing the antiderivative. Antiderivative is also the indefinite integral of the function, and derivative is its inverse operation. Parameters ---------- nu : ndim-tuple of int Order of derivatives to evaluate for each dimension. If negative, the derivative is returned. Returns ------- pp : PPoly Piecewise polynomial of order k2 = k + n representing the antiderivative of this polynomial. Notes ----- The antiderivative returned by this function is continuous and continuously differentiable to order n-1, up to floating point rounding error.",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_interpolate.py",
    "ast_data": "FunctionDef name:antiderivative arg:self arg:nu arguments arg arg Assign Call Call For Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "_get_m2m_reverse_attr",
    "source_code": "def _get_m2m_reverse_attr(self, related, attr):\n    cache_attr = '_m2m_reverse_%s_cache' % attr\n    if hasattr(self, cache_attr):\n        return getattr(self, cache_attr)\n    found = False\n    if self.remote_field.through_fields is not None:\n        link_field_name = self.remote_field.through_fields[1]\n    else:\n        link_field_name = None\n    for f in self.remote_field.through._meta.fields:\n        if f.is_relation and f.remote_field.model == related.model:\n            if link_field_name is None and related.related_model == related.model:\n                if found:\n                    setattr(self, cache_attr, getattr(f, attr))\n                    break\n                else:\n                    found = True\n            elif link_field_name is None or link_field_name == f.name:\n                setattr(self, cache_attr, getattr(f, attr))\n                break\n    return getattr(self, cache_attr)",
    "docstring": "Function that can be curried to provide the related accessor or DB column name for the m2m table.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\related.py",
    "ast_data": "FunctionDef name:_get_m2m_reverse_attr arg:self arg:related arg:attr arguments arg arg arg Assign If Call Return return:yes Call Assign If Compare Assign Assign For If BoolOp Compare If BoolOp Compare Compare If Call Call Assign If BoolOp Compare Compare Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "batch_sharded",
    "source_code": "@classmethod\ndef batch_sharded(cls, mesh: Mesh, batch_dim: str, rank: int, axis: int=0) -> 'Layout':\n    return cls._new_object(mesh=mesh, rank=rank, batch_dim=batch_dim, axis=axis)",
    "docstring": "Returns a layout sharded on batch dimension.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\layout.py",
    "ast_data": "FunctionDef name:batch_sharded arg:cls arg:mesh arg:batch_dim arg:rank arg:axis arguments arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "converter",
    "source_code": "def converter(matchobj):\n    matches = matchobj.groupdict()\n    matched = matches['matched']\n    url = matches['url']\n    if re.match('^[a-z]+:', url) or url.startswith('//'):\n        return matched\n    if url.startswith('/') and (not url.startswith(settings.STATIC_URL)):\n        return matched\n    url_path, fragment = urldefrag(url)\n    if not url_path:\n        return matched\n    if url_path.startswith('/'):\n        assert url_path.startswith(settings.STATIC_URL)\n        target_name = url_path.removeprefix(settings.STATIC_URL)\n    else:\n        source_name = name if os.sep == '/' else name.replace(os.sep, '/')\n        target_name = posixpath.join(posixpath.dirname(source_name), url_path)\n    hashed_url = self._url(self._stored_name, unquote(target_name), force=True, hashed_files=hashed_files)\n    transformed_url = '/'.join(url_path.split('/')[:-1] + hashed_url.split('/')[-1:])\n    if fragment:\n        transformed_url += ('?#' if '?#' in url else '#') + fragment\n    matches['url'] = unquote(transformed_url)\n    return template % matches",
    "docstring": "Convert the matched URL to a normalized and hashed URL. This requires figuring out which files the matched URL resolves to and calling the url() method of the storage.",
    "type": "method",
    "file_path": "django\\django\\contrib\\staticfiles\\storage.py",
    "ast_data": "FunctionDef name:converter arg:matchobj arguments arg Assign Call Assign Assign If BoolOp Call Call Return return:yes If BoolOp Call Call Return return:yes Assign Call If Return return:yes If Call Call Assign Call Assign Compare Call Assign Call Call Assign Call Call Assign Call Call Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_is_trainable_mask",
    "source_code": "def _get_is_trainable_mask(self) -> list[bool]:\n    return list(map(_is_trainable, self._all_params))",
    "docstring": "Return a boolean mask indicating if each parameter is trainable (``) or not.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\optim\\zero_redundancy_optimizer.py",
    "ast_data": "FunctionDef name:_get_is_trainable_mask arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pygame",
    "name": "_clip_and_draw_line",
    "source_code": "def _clip_and_draw_line(surf, rect, color, pts):\n    if not clip_line(pts, BoundingBox(rect.x, rect.y, rect.x + rect.w - 1, rect.y + rect.h - 1)):\n        return 0\n    if pts[1] == pts[3]:\n        _drawhorzline(surf, color, pts[0], pts[1], pts[2])\n    elif pts[0] == pts[2]:\n        _drawvertline(surf, color, pts[0], pts[1], pts[3])\n    else:\n        _draw_line(surf, color, Point(pts[0], pts[1]), Point(pts[2], pts[3]))\n    return 1",
    "docstring": "clip the line into the rectangle and draw if needed. Returns true if anything has been drawn, else false.",
    "type": "function",
    "file_path": "pygame\\src_py\\draw_py.py",
    "ast_data": "FunctionDef name:_clip_and_draw_line arg:surf arg:rect arg:color arg:pts arguments arg arg arg arg If Call Call Return return:yes If Compare Call If Compare Call Call Call Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "has_granted_permission",
    "source_code": "def has_granted_permission(self, client, user):\n    raise NotImplementedError()",
    "docstring": "Check if the client has permission to access the given user's resource. Developers MUST implement it in subclass, e.g.:: def has_granted_permission(self, client, user): permission = ClientUserGrant.query(client=client, user=user) return permission.granted :param client: instance of OAuth client model :param user: instance of User model :return: bool",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc7523\\jwt_bearer.py",
    "ast_data": "FunctionDef name:has_granted_permission arg:self arg:client arg:user arguments arg arg arg Raise Call"
  },
  {
    "library": "pandas",
    "name": "__str__",
    "source_code": "@final\ndef __str__(self) -> str:\n    attrs = (f'{k}={getattr(self._timegrouper, k)}' for k in self._attributes if getattr(self._timegrouper, k, None) is not None)\n    return f'{type(self).__name__} [{', '.join(attrs)}]'",
    "docstring": "Provide a nice str repr of our rolling object.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\resample.py",
    "ast_data": "FunctionDef name:__str__ arg:self arguments arg Assign Call Compare Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set",
    "source_code": "def set(self, **kwargs):\n    raise NotImplementedError",
    "docstring": "Set the parameters for the layout engine.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\layout_engine.py",
    "ast_data": "FunctionDef name:set arg:self arguments arg arg Raise"
  },
  {
    "library": "matplotlib",
    "name": "frame_size",
    "source_code": "@property\ndef frame_size(self):\n    w, h = self.fig.get_size_inches()\n    return (int(w * self.dpi), int(h * self.dpi))",
    "docstring": "A tuple `` in pixels of a movie frame.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\animation.py",
    "ast_data": "FunctionDef name:frame_size arg:self arguments arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "feed_dict_additions",
    "source_code": "def feed_dict_additions(self):\n    return {self._save_string: self._state_callback()}",
    "docstring": "When running a graph, indicates fresh state to feed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\saving\\saveable_object_util.py",
    "ast_data": "FunctionDef name:feed_dict_additions arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, loss=None, predictions=None, metrics=None):\n    if loss is not None:\n        loss_dict = self._wrap_and_check_outputs(loss, self.LOSS_NAME)\n        self._loss = self._prefix_output_keys(loss_dict, self.LOSS_NAME)\n    if predictions is not None:\n        pred_dict = self._wrap_and_check_outputs(predictions, self.PREDICTIONS_NAME)\n        self._predictions = self._prefix_output_keys(pred_dict, self.PREDICTIONS_NAME)\n    if metrics is not None:\n        self._metrics = self._wrap_and_check_metrics(metrics)",
    "docstring": "Constructor for SupervisedOutput (ie, Train or Eval output). Args: loss: dict of Tensors or single Tensor representing calculated loss. predictions: dict of Tensors or single Tensor representing model predictions. metrics: Dict of metric results keyed by name. The values of the dict can be one of the following: (1) instance of class. (2) (metric_value, update_op) tuples, or a single tuple. metric_value must be a Tensor, and update_op must be a Tensor or Op. Raises: ValueError: if any of the outputs' dict keys are not strings or tuples of strings or the values are not Tensors (or Operations in the case of update_op).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\utils_v1\\export_output.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:loss arg:predictions arg:metrics arguments arg arg arg arg If Compare Assign Call Assign Call If Compare Assign Call Assign Call If Compare Assign Call"
  },
  {
    "library": "pygame",
    "name": "clear",
    "source_code": "def clear(self, surface, bgd):\n    if callable(bgd):\n        for lost_clear_rect in self.lostsprites:\n            bgd(surface, lost_clear_rect)\n        for clear_rect in self.spritedict.values():\n            if clear_rect:\n                bgd(surface, clear_rect)\n    else:\n        surface_blit = surface.blit\n        for lost_clear_rect in self.lostsprites:\n            surface_blit(bgd, lost_clear_rect, lost_clear_rect)\n        for clear_rect in self.spritedict.values():\n            if clear_rect:\n                surface_blit(bgd, clear_rect, clear_rect)",
    "docstring": "erase the previous position of all sprites Group.clear(surface, bgd): return None Clears the area under every drawn sprite in the group. The bgd argument should be Surface which is the same dimensions as the screen surface. The bgd could also be a function which accepts the given surface and the area to be cleared as arguments.",
    "type": "method",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:clear arg:self arg:surface arg:bgd arguments arg arg arg If Call For Call For Call If Call Assign For Call For Call If Call"
  },
  {
    "library": "pytorch",
    "name": "register_pytree_node",
    "source_code": "def register_pytree_node(self, class_type: type, flatten_func: pytree.FlattenFunc, unflatten_func: pytree.UnflattenFunc):\n    if class_type in pytree.SUPPORTED_NODES or class_type in self._extensions:\n        return\n    self._extensions[class_type] = (flatten_func, unflatten_func)",
    "docstring": "Register PyTree extension for a custom python type. Args: class_type: The custom python type. flatten_func: The flatten function. unflatten_func: The unflatten function. Raises: AssertionError: If the custom python type is already registered.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\dynamo_graph_extractor.py",
    "ast_data": "FunctionDef name:register_pytree_node arg:self arg:class_type arg:flatten_func arg:unflatten_func arguments arg arg arg arg If BoolOp Compare Compare Return return:no Assign"
  },
  {
    "library": "scipy",
    "name": "assert_almost_equal",
    "source_code": "def assert_almost_equal(actual, desired, decimal=7, *args, **kwds):\n    rtol, atol = (0, 1.5 * 10 ** (-decimal))\n    return xp_assert_close(actual, desired, *args, atol=atol, rtol=rtol, check_dtype=False, check_shape=False, **kwds)",
    "docstring": "Backwards compatible replacement. In new code, use xp_assert_close instead.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_array_api_no_0d.py",
    "ast_data": "FunctionDef name:assert_almost_equal arg:actual arg:desired arg:decimal arguments arg arg arg arg arg Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "pending_exits_count",
    "source_code": "@pending_exits_count.setter\ndef pending_exits_count(self, cnt):\n    self._pending_exits_count = cnt",
    "docstring": "Set the pending count to cnt.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_state.py",
    "ast_data": "FunctionDef name:pending_exits_count arg:self arg:cnt arguments arg arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "_avg_pool_grad_flops",
    "source_code": "@ops.RegisterStatistics('AvgPoolGrad', 'flops')\ndef _avg_pool_grad_flops(graph, node):\n    _verify_conv_data_format(node)\n    out_backprop_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[1])\n    out_backprop_shape.assert_is_fully_defined()\n    kernel_shape = list(node.attr['ksize'].list.i)\n    kernel_area = _list_product(kernel_shape)\n    return ops.OpStats('flops', kernel_area * out_backprop_shape.num_elements() * 2)",
    "docstring": "Compute flops for AvgPoolGrad operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py",
    "ast_data": "FunctionDef name:_avg_pool_grad_flops arg:graph arg:node arguments arg arg Call Assign Call Call Assign Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_serialize_to_tensors",
    "source_code": "def _serialize_to_tensors(self):\n\n    def _read_variable_closure():\n        v = self\n        with ops.device(v.device):\n            if context.executing_eagerly() and (not v.is_initialized()):\n                return None\n            x = v.read_value_no_copy()\n            with ops.device('/device:CPU:0'):\n                return array_ops.identity(x)\n    return {trackable.VARIABLE_VALUE_KEY: tensor_callable.Callable(_read_variable_closure, dtype=self.dtype, device=self.device)}",
    "docstring": "Implements Trackable._serialize_to_tensors.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:_serialize_to_tensors arg:self arguments arg FunctionDef name:_read_variable_closure arguments Assign With Call If BoolOp Call Call Return return:no Assign Call With Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "cryptography",
    "name": "public_bytes_raw",
    "source_code": "@abc.abstractmethod\ndef public_bytes_raw(self) -> bytes:\n    pass",
    "docstring": "The raw bytes of the public key. Equivalent to public_bytes(Raw, Raw).",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ed25519.py",
    "ast_data": "FunctionDef name:public_bytes_raw arg:self arguments arg"
  },
  {
    "library": "kornia",
    "name": "device",
    "source_code": "def device(self) -> torch.device:\n    return self._intrinsics.device",
    "docstring": "Return the device for camera buffers. Returns: Device type",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\camera\\pinhole.py",
    "ast_data": "FunctionDef name:device arg:self arguments arg Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "Stat",
    "source_code": "@dataclass\nclass Stat:\n    group_by_orient: ClassVar[bool] = False\n\n    def _check_param_one_of(self, param: str, options: Iterable[Any]) -> None:\n        value = getattr(self, param)\n        if value not in options:\n            *most, last = options\n            option_str = ', '.join((f'{x!r}' for x in most[:-1])) + f' or {last!r}'\n            err = ' '.join([f'The `{param}` parameter for `{self.__class__.__name__}` must be', f'one of {option_str}; not {value!r}.'])\n            raise ValueError(err)\n\n    def _check_grouping_vars(self, param: str, data_vars: list[str], stacklevel: int=2) -> None:\n        param_vars = getattr(self, param)\n        undefined = set(param_vars) - set(data_vars)\n        if undefined:\n            param = f'{self.__class__.__name__}.{param}'\n            names = ', '.join((f'{x!r}' for x in undefined))\n            msg = f'Undefined variable(s) passed for {param}: {names}.'\n            warnings.warn(msg, stacklevel=stacklevel)\n\n    def __call__(self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale]) -> DataFrame:\n        return data",
    "docstring": "Base class for objects that apply statistical transformations.",
    "type": "class",
    "file_path": "seaborn\\seaborn\\_stats\\base.py",
    "ast_data": "ClassDef name:Stat FunctionDef name:_check_param_one_of arg:self arg:param arg:options arguments arg arg arg Assign Call If Compare Assign Assign Call Assign Call Raise Call FunctionDef name:_check_grouping_vars arg:self arg:param arg:data_vars arg:stacklevel arguments arg arg arg arg Assign Call Assign Call Call If Assign Assign Call Assign Call FunctionDef name:__call__ arg:self arg:data arg:groupby arg:orient arg:scales arguments arg arg arg arg arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "in1d",
    "source_code": "def in1d(ar1, ar2, assume_unique=False, invert=False):\n    if not assume_unique:\n        ar1, rev_idx = unique(ar1, return_inverse=True)\n        ar2 = unique(ar2)\n    ar = ma.concatenate((ar1, ar2))\n    order = ar.argsort(kind='mergesort')\n    sar = ar[order]\n    if invert:\n        bool_ar = sar[1:] != sar[:-1]\n    else:\n        bool_ar = sar[1:] == sar[:-1]\n    flag = ma.concatenate((bool_ar, [invert]))\n    indx = order.argsort(kind='mergesort')[:len(ar1)]\n    if assume_unique:\n        return flag[indx]\n    else:\n        return flag[indx][rev_idx]",
    "docstring": "Test whether each element of an array is also present in a second array. The output is always a masked array. See for more details. We recommend using :func: instead of for new code. See Also -------- isin : Version of this function that preserves the shape of ar1. numpy.in1d : Equivalent function for ndarrays. Examples -------- >>> import numpy as np >>> ar1 = np.ma.array([0, 1, 2, 5, 0]) >>> ar2 = [0, 2] >>> np.ma.in1d(ar1, ar2) masked_array(data=[ True, False, True, False, True], mask=False, fill_value=True)",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\extras.py",
    "ast_data": "FunctionDef name:in1d arg:ar1 arg:ar2 arg:assume_unique arg:invert arguments arg arg arg arg If Assign Call Assign Call Assign Call Assign Call Assign If Assign Compare Assign Compare Assign Call Assign Call Call If Return return:yes Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "get_params",
    "source_code": "def get_params(self, deep=True):\n    return self._get_params('transformer_list', deep=deep)",
    "docstring": "Get parameters for this estimator. Returns the parameters given in the constructor as well as the estimators contained within the of the . Parameters ---------- deep : bool, default=True If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns ------- params : mapping of string to any Parameter names mapped to their values.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\pipeline.py",
    "ast_data": "FunctionDef name:get_params arg:self arg:deep arguments arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_max_depth",
    "source_code": "def get_max_depth(self):\n    return int(self.nodes['depth'].max())",
    "docstring": "Return maximum depth among all leaves.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\predictor.py",
    "ast_data": "FunctionDef name:get_max_depth arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "GoldsteinPrice",
    "source_code": "class GoldsteinPrice(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-2.0] * self.N, [2.0] * self.N))\n        self.global_optimum = [[0.0, -1.0]]\n        self.fglob = 3.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        a = 1 + (x[0] + x[1] + 1) ** 2 * (19 - 14 * x[0] + 3 * x[0] ** 2 - 14 * x[1] + 6 * x[0] * x[1] + 3 * x[1] ** 2)\n        b = 30 + (2 * x[0] - 3 * x[1]) ** 2 * (18 - 32 * x[0] + 12 * x[0] ** 2 + 48 * x[1] - 36 * x[0] * x[1] + 27 * x[1] ** 2)\n        return a * b",
    "docstring": "Goldstein-Price objective function. This class defines the Goldstein-Price [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{GoldsteinPrice}}(x) = \\left[ 1 + (x_1 + x_2 + 1)^2 (19 - 14 x_1 + 3 x_1^2 - 14 x_2 + 6 x_1 x_2 + 3 x_2^2) \\right] \\left[ 30 + ( 2x_1 - 3 x_2)^2 (18 - 32 x_1 + 12 x_1^2 + 48 x_2 - 36 x_1 x_2 + 27 x_2^2) \\right] with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_G.py",
    "ast_data": "ClassDef name:GoldsteinPrice FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "@_docstring.interpd\ndef __init__(self, xy, *, closed=True, **kwargs):\n    super().__init__(**kwargs)\n    self._closed = closed\n    self.set_xy(xy)",
    "docstring": "Parameters ---------- xy : (N, 2) array closed : bool, default: True Whether the polygon is closed (i.e., has identical start and end points). **kwargs %(Patch:kwdoc)s",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:xy arguments arg arg arg arg Call Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "_register_sharded_op_on_local_tensor",
    "source_code": "def _register_sharded_op_on_local_tensor(op, early_stop_func=None, extra_check=None, customized_func=None):\n\n    @custom_sharding_spec_op(ChunkShardingSpec, op)\n    @_sharded_op_common(op, early_stop_func, extra_check)\n    def sharded_tensor_op_on_local_tensor(types, args=(), kwargs=None, pg=None):\n        st = args[0]\n        sharding_spec = st.sharding_spec()\n        if len(st.local_shards()) != 1:\n            raise TypeError(f\"torch function '{op.__name__}', with args: {args} and kwargs: {kwargs} only supported for single local tensor!\")\n        st_size = st.size()\n        if customized_func:\n            local_tensor, sharding_spec, st_size = customized_func(args, kwargs, pg)\n        else:\n            args = (st.local_tensor(), *args[1:])\n            local_tensor = op(*args, **kwargs)\n        return ShardedTensor._init_from_local_tensor(local_tensor.contiguous(), sharding_spec, st_size, process_group=pg, init_rrefs=st._init_rrefs)",
    "docstring": "Handles `` dispatch.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharding_spec\\chunk_sharding_spec_ops\\_common.py",
    "ast_data": "FunctionDef name:_register_sharded_op_on_local_tensor arg:op arg:early_stop_func arg:extra_check arg:customized_func arguments arg arg arg arg FunctionDef name:sharded_tensor_op_on_local_tensor arg:types arg:args arg:kwargs arg:pg arguments arg arg arg arg Assign Assign Call If Compare Call Call Raise Call Assign Call If Assign Call Assign Call Assign Call Return return:yes Call Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict_proba",
    "source_code": "def predict_proba(self, X):\n    check_is_fitted(self)\n    xp, is_array_api_compliant = get_namespace(X)\n    decision = self.decision_function(X)\n    if size(self.classes_) == 2:\n        proba = _expit(decision, xp)\n        return xp.stack([1 - proba, proba], axis=1)\n    else:\n        return softmax(decision)",
    "docstring": "Estimate probability. Parameters ---------- X : array-like of shape (n_samples, n_features) Input data. Returns ------- C : ndarray of shape (n_samples, n_classes) Estimated probabilities.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\discriminant_analysis.py",
    "ast_data": "FunctionDef name:predict_proba arg:self arg:X arguments arg arg Call Assign Call Assign Call If Compare Call Assign Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_log_if_debug_on",
    "source_code": "def _log_if_debug_on(meth):\n\n    @functools.wraps(meth)\n    def wrapper(self, *args, **kwargs):\n        if debugPS:\n            self._pswriter.write(f'% {meth.__name__}\\n')\n        return meth(self, *args, **kwargs)\n    return wrapper",
    "docstring": "Wrap method *meth* to emit a PS comment with the method name, if the global flag is set.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_ps.py",
    "ast_data": "FunctionDef name:_log_if_debug_on arg:meth arguments arg FunctionDef name:wrapper arg:self arguments arg arg arg If Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "all_gather_object",
    "source_code": "def all_gather_object(self, object: T) -> list[T]:\n    if self.use_dist:\n        gather_objs = cast(list[T], [None] * dist.get_world_size(self.group))\n        dist.all_gather_object(object_list=gather_objs, obj=object, group=self.group)\n    else:\n        gather_objs = [object]\n    return gather_objs",
    "docstring": "Implement functionality similar to c10d::all_gather_object but without distributed enabled.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\utils.py",
    "ast_data": "FunctionDef name:all_gather_object arg:self arg:object arguments arg arg If Assign Call Call Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "ArchiveException",
    "source_code": "class ArchiveException(Exception):\n    pass",
    "docstring": "Base exception class for all archive errors.",
    "type": "class",
    "file_path": "django\\django\\utils\\archive.py",
    "ast_data": "ClassDef name:ArchiveException"
  },
  {
    "library": "pytorch",
    "name": "_create_onnx_supports_op_overload_table",
    "source_code": "def _create_onnx_supports_op_overload_table(registry) -> set[torch._ops.OperatorBase | Callable]:\n    table: set[torch._ops.OperatorBase | Callable] = set()\n    onnx_supported_aten_lookup_table = [k.split('::')[1].split('.')[0] for k in registry._all_registered_ops() if k.startswith('aten::')]\n    for op_namespace in (torch.ops.aten, torch.ops.prims):\n        attr_names = dir(op_namespace)\n        if op_namespace is torch.ops.aten:\n            attr_names += onnx_supported_aten_lookup_table\n        for attr_name in attr_names:\n            if not hasattr(op_namespace, attr_name):\n                continue\n            op_overload_packet = getattr(op_namespace, attr_name)\n            if not isinstance(op_overload_packet, torch._ops.OpOverloadPacket):\n                continue\n            for overload_name in op_overload_packet.overloads():\n                op_overload = getattr(op_overload_packet, overload_name)\n                internal_op_name = registration.OpName.from_qualified_name(qualified_name=op_overload.name())\n                if registry.is_registered_op(namespace=internal_op_name.namespace, op_name=internal_op_name.op_name, overload=internal_op_name.overload) or registry.is_registered_op(namespace=internal_op_name.namespace, op_name=internal_op_name.op_name, overload=None):\n                    table.add(op_overload)\n    return table",
    "docstring": "Creates a set of OperatorBase and Callable objects that represent ONNX-supported PyTorch operations. Args: registry (OnnxRegistry): The ONNX registry for PyTorch. Returns: A collection of OperatorBase and Callable objects representing ONNX-supported PyTorch operations.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\decomposition_table.py",
    "ast_data": "FunctionDef name:_create_onnx_supports_op_overload_table arg:registry arguments arg Call Assign Call Call Call Call For Assign Call If Compare For If Call Assign Call If Call For Call Assign Call Assign Call Call If BoolOp Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_get_setitem_indexer",
    "source_code": "def _get_setitem_indexer(self, key):\n    if self.name == 'loc':\n        self._ensure_listlike_indexer(key, axis=self.axis)\n    if isinstance(key, tuple):\n        for x in key:\n            check_dict_or_set_indexers(x)\n    if self.axis is not None:\n        key = _tupleize_axis_indexer(self.ndim, self.axis, key)\n    ax = self.obj._get_axis(0)\n    if isinstance(ax, MultiIndex) and self.name != 'iloc' and is_hashable(key) and (not isinstance(key, slice)):\n        with suppress(KeyError, InvalidIndexError):\n            return ax.get_loc(key)\n    if isinstance(key, tuple):\n        with suppress(IndexingError):\n            return self._convert_tuple(key)\n    if isinstance(key, range):\n        key = list(key)\n    return self._convert_to_indexer(key, axis=0)",
    "docstring": "Convert a potentially-label-based key into a positional indexer.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexing.py",
    "ast_data": "FunctionDef name:_get_setitem_indexer arg:self arg:key arguments arg arg If Compare Call If Call For Call If Compare Assign Call Assign Call If BoolOp Call Compare Call Call With Call Return return:yes Call If Call With Call Return return:yes Call If Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "pdf",
    "source_code": "def pdf(self, x, mean=None, cov=1, allow_singular=False):\n    params = self._process_parameters(mean, cov, allow_singular)\n    dim, mean, cov_object = params\n    x = self._process_quantiles(x, dim)\n    out = np.exp(self._logpdf(x, mean, cov_object))\n    if np.any(cov_object.rank < dim):\n        out_of_bounds = ~cov_object._support_mask(x - mean)\n        out[out_of_bounds] = 0.0\n    return _squeeze_output(out)",
    "docstring": "Multivariate normal probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of denoting the components. %(_mvn_doc_default_callparams)s Returns ------- pdf : ndarray or scalar Probability density function evaluated at Notes ----- %(_mvn_doc_callparams_note)s",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:pdf arg:self arg:x arg:mean arg:cov arg:allow_singular arguments arg arg arg arg arg Assign Call Assign Assign Call Assign Call Call If Call Compare Assign Call Assign Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_special_sparse_dot",
    "source_code": "def _special_sparse_dot(W, H, X):\n    if sp.issparse(X):\n        ii, jj = X.nonzero()\n        n_vals = ii.shape[0]\n        dot_vals = np.empty(n_vals)\n        n_components = W.shape[1]\n        batch_size = max(n_components, n_vals // n_components)\n        for start in range(0, n_vals, batch_size):\n            batch = slice(start, start + batch_size)\n            dot_vals[batch] = np.multiply(W[ii[batch], :], H.T[jj[batch], :]).sum(axis=1)\n        WH = sp.coo_matrix((dot_vals, (ii, jj)), shape=X.shape)\n        return WH.tocsr()\n    else:\n        return np.dot(W, H)",
    "docstring": "Computes np.dot(W, H), only where X is non zero.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_nmf.py",
    "ast_data": "FunctionDef name:_special_sparse_dot arg:W arg:H arg:X arguments arg arg arg If Call Assign Call Assign Assign Call Assign Assign Call For Call Assign Call Assign Call Call Assign Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "extract_kernel_fields",
    "source_code": "def extract_kernel_fields(es: object) -> dict[OperatorName, dict[str, Any]]:\n    fields: dict[OperatorName, dict[str, Any]] = defaultdict(dict)\n    for ei in es:\n        funcs = ei.get('func')\n        assert isinstance(funcs, str), f'not a str: {funcs}'\n        namespace_helper = NamespaceHelper.from_namespaced_entity(namespaced_entity=funcs, max_level=1)\n        opname = FunctionSchema.parse(namespace_helper.entity_name).name\n        for field in ET_FIELDS:\n            if (value := ei.get(field)) is not None:\n                fields[opname][field] = value\n    return fields",
    "docstring": "Given a loaded yaml representing a list of operators, extract the kernel key related fields indexed by the operator name.",
    "type": "function",
    "file_path": "pytorch\\torchgen\\executorch\\parse.py",
    "ast_data": "FunctionDef name:extract_kernel_fields arg:es arguments arg Call For Assign Call Call Assign Call Assign Call For If Compare Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_context_id",
    "source_code": "def _get_context_id(self, context):\n    if context in self._context_to_id:\n        return self._context_to_id[context]\n    graph_is_new = False\n    with self._context_lock:\n        if context not in self._context_to_id:\n            graph_is_new = True\n            context_id = _get_id()\n            self._context_to_id[context] = context_id\n    if graph_is_new:\n        self.get_writer().WriteDebuggedGraph(debug_event_pb2.DebuggedGraph(graph_id=context_id, graph_name=getattr(context, 'name', None), outer_context_id=self._get_outer_context_id(context)))\n    return self._context_to_id[context]",
    "docstring": "Get a unique ID for an op-construction context (e.g., a graph). If the graph has been encountered before, reuse the same unique ID. When encountering a new context (graph), this methods writes a DebugEvent proto with the debugged_graph field to the proper DebugEvent file. Args: context: A context to get the unique ID for. Must be hashable. E.g., a Graph object. Returns: A unique ID for the context.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\dumping_callback.py",
    "ast_data": "FunctionDef name:_get_context_id arg:self arg:context arguments arg arg If Compare Return return:yes Assign With If Compare Assign Assign Call Assign If Call Call Call Call Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "get_ha1_dict",
    "source_code": "def get_ha1_dict(user_ha1_dict):\n\n    def get_ha1(realm, username):\n        return user_ha1_dict.get(username)\n    return get_ha1",
    "docstring": "Return a get_ha1 function which obtains a HA1 password hash. user_ha1_dict is a dictionary of the form: {username : HA1}. If you want a dictionary-based authentication scheme, but with pre-computed HA1 hashes instead of plain-text passwords, use get_ha1_dict(my_userha1_dict) as the value for the get_ha1 argument to digest_auth().",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\auth_digest.py",
    "ast_data": "FunctionDef name:get_ha1_dict arg:user_ha1_dict arguments arg FunctionDef name:get_ha1 arg:realm arg:username arguments arg arg Return return:yes Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "iterate_all",
    "source_code": "def iterate_all(self):\n    if self.disp:\n        logging.info('Splitting first generation')\n    while not self.stop_global:\n        if self.break_routine:\n            break\n        self.iterate()\n        self.stopping_criteria()\n    if not self.minimize_every_iter:\n        if not self.break_routine:\n            self.find_minima()\n    self.res.nit = self.iters_done\n    self.fn = self.HC.V.nfev",
    "docstring": "Construct for iterations. If uniform sampling is used, every iteration adds 'n' sampling points. Iterations if a stopping criteria (e.g., sampling points or processing time) has been met.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_shgo.py",
    "ast_data": "FunctionDef name:iterate_all arg:self arguments arg If Call While If Call Call If If Call Assign Assign"
  },
  {
    "library": "sphinx",
    "name": "write_doc",
    "source_code": "def write_doc(self, docname: str, doctree: nodes.document) -> None:\n    raise NotImplementedError",
    "docstring": "Write the output file for the document :param docname: the :term:. :param doctree: defines the content to be written. The output filename must be determined within this method, typically by calling :meth: or :meth:.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\__init__.py",
    "ast_data": "FunctionDef name:write_doc arg:self arg:docname arg:doctree arguments arg arg arg Raise"
  },
  {
    "library": "django",
    "name": "area",
    "source_code": "@property\ndef area(self):\n    return capi.get_area(self.ptr)",
    "docstring": "Return the area for a LinearRing, Polygon, or MultiPolygon; 0 otherwise.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:area arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "run",
    "source_code": "def run(self, fetches, feed_dict=None, options=None, run_metadata=None):\n    raise NotImplementedError('run')",
    "docstring": "Runs operations in the session. See for details.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\session.py",
    "ast_data": "FunctionDef name:run arg:self arg:fetches arg:feed_dict arg:options arg:run_metadata arguments arg arg arg arg arg Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "grid",
    "source_code": "def grid(self, visible=None, which='major', axis='both', **kwargs):\n    super().grid(visible, which=which, axis=axis, **kwargs)\n    if not self._axisline_on:\n        return\n    if visible is None:\n        visible = self.axes.xaxis._minor_tick_kw['gridOn'] or self.axes.xaxis._major_tick_kw['gridOn'] or self.axes.yaxis._minor_tick_kw['gridOn'] or self.axes.yaxis._major_tick_kw['gridOn']\n    self.gridlines.set(which=which, axis=axis, visible=visible)\n    self.gridlines.set(**kwargs)",
    "docstring": "Toggle the gridlines, and optionally set the properties of the lines.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axislines.py",
    "ast_data": "FunctionDef name:grid arg:self arg:visible arg:which arg:axis arguments arg arg arg arg arg Call Call If Return return:no If Compare Assign BoolOp Call Call"
  },
  {
    "library": "tensorflow",
    "name": "is_chief",
    "source_code": "def is_chief(cluster_spec=None, task_type=None, task_id=None):\n    if has_worker_context():\n        return dc_context.get_current_worker_context().is_chief\n    _validate_cluster_spec(cluster_spec, task_type, task_id)\n    cluster_spec = normalize_cluster_spec(cluster_spec).as_dict()\n    if task_type == 'chief' or task_type == 'evaluator':\n        return True\n    if 'chief' not in cluster_spec and task_type == 'worker' and (task_id == 0):\n        return True\n    return False",
    "docstring": "Returns whether the given task is chief in the cluster. Since there is at most one evaluator and the evaluator itself should be independent of the training cluster, the evaluator job is also a chief job on its own. If this is currently running under a of distribute coordinator, the arguments can be omitted as the result is already available. Args: cluster_spec: a dict, or object specifying the cluster configurations. task_type: the task type in the cluster. task_id: the task id in the cluster. Returns: a boolean indicating whether the given task is chief. Raises: ValueError: if is not in the or exceeds the maximum id of the .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_worker_util.py",
    "ast_data": "FunctionDef name:is_chief arg:cluster_spec arg:task_type arg:task_id arguments arg arg arg If Call Return return:yes Call Call Assign Call Call If BoolOp Compare Compare Return return:yes If BoolOp Compare Compare Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_set_attr",
    "source_code": "def _set_attr(self, attr_name, attr_value) -> None:\n    buf = pywrap_tf_session.TF_NewBufferFromString(compat.as_bytes(attr_value.SerializeToString()))\n    try:\n        self._set_attr_with_buf(attr_name, buf)\n    finally:\n        pywrap_tf_session.TF_DeleteBuffer(buf)",
    "docstring": "Private method used to set an attribute in the node_def.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_set_attr arg:self arg:attr_name arg:attr_value arguments arg arg arg Assign Call Call Call Try Call Call"
  },
  {
    "library": "scipy",
    "name": "logpmf",
    "source_code": "def logpmf(self, x, alpha, n):\n    a, Sa, n, x = _dirichlet_multinomial_check_parameters(alpha, n, x)\n    out = np.asarray(loggamma(Sa) + loggamma(n + 1) - loggamma(n + Sa))\n    out += (loggamma(x + a) - (loggamma(a) + loggamma(x + 1))).sum(axis=-1)\n    np.place(out, n != x.sum(axis=-1), -np.inf)\n    return out[()]",
    "docstring": "The log of the probability mass function. Parameters ---------- x: ndarray Category counts (non-negative integers). Must be broadcastable with shape parameter ``. If multidimensional, the last axis must correspond with the categories. %(_dirichlet_mn_doc_default_callparams)s Returns ------- out: ndarray or scalar Log of the probability mass function.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:logpmf arg:self arg:x arg:alpha arg:n arguments arg arg arg arg Assign Call Assign Call Call Call Call Call Call Call Call Call Compare Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "cuda",
    "source_code": "def cuda(self, device: Optional[Union[int, device]]=None) -> Self:\n    return self._apply(lambda t: t.cuda(device))",
    "docstring": "Move all model parameters and buffers to the GPU. This also makes associated parameters and buffers different objects. So it should be called before constructing the optimizer if the module will live on GPU while being optimized. .. note:: This method modifies the module in-place. Args: device (int, optional): if specified, all parameters will be copied to that device Returns: Module: self",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:cuda arg:self arg:device arguments arg arg Return return:yes Call arguments arg Call"
  },
  {
    "library": "pandas",
    "name": "_maybe_null_out",
    "source_code": "def _maybe_null_out(result: np.ndarray | float | NaTType, axis: AxisInt | None, mask: npt.NDArray[np.bool_] | None, shape: tuple[int, ...], min_count: int=1, datetimelike: bool=False) -> np.ndarray | float | NaTType:\n    if mask is None and min_count == 0:\n        return result\n    if axis is not None and isinstance(result, np.ndarray):\n        if mask is not None:\n            null_mask = mask.shape[axis] - mask.sum(axis) - min_count < 0\n        else:\n            below_count = shape[axis] - min_count < 0\n            new_shape = shape[:axis] + shape[axis + 1:]\n            null_mask = np.broadcast_to(below_count, new_shape)\n        if np.any(null_mask):\n            if datetimelike:\n                result[null_mask] = iNaT\n            elif is_numeric_dtype(result):\n                if np.iscomplexobj(result):\n                    result = result.astype('c16')\n                elif not is_float_dtype(result):\n                    result = result.astype('f8', copy=False)\n                result[null_mask] = np.nan\n            else:\n                result[null_mask] = None\n    elif result is not NaT:\n        if check_below_min_count(shape, mask, min_count):\n            result_dtype = getattr(result, 'dtype', None)\n            if is_float_dtype(result_dtype):\n                result = result_dtype.type('nan')\n            else:\n                result = np.nan\n    return result",
    "docstring": "Returns ------- Dtype The product of all elements on a given axis. ( NaNs are treated as 1)",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\nanops.py",
    "ast_data": "FunctionDef name:_maybe_null_out arg:result arg:axis arg:mask arg:shape arg:min_count arg:datetimelike arguments arg arg arg arg arg arg If BoolOp Compare Compare Return return:yes If BoolOp Compare Call If Compare Assign Compare Call Assign Compare Assign Assign Call If Call If Assign If Call If Call Assign Call If Call Assign Call Assign Assign If Compare If Call Assign Call If Call Assign Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "asfreq",
    "source_code": "@final\n@doc(klass=_shared_doc_kwargs['klass'])\ndef asfreq(self, freq: Frequency, method: FillnaOptions | None=None, how: Literal['start', 'end'] | None=None, normalize: bool=False, fill_value: Hashable | None=None) -> Self:\n    from pandas.core.resample import asfreq\n    return asfreq(self, freq, method=method, how=how, normalize=normalize, fill_value=fill_value)",
    "docstring": "Convert time series to specified frequency. Returns the original data conformed to a new index with the specified frequency. If the index of this {klass} is a :class:, the new index is the result of transforming the original index with :meth: (so the original index will map one-to-one to the new index). Otherwise, the new index will be equivalent to `pandas.date_rangeresamplethis link`. >>> df.asfreq(freq=\"30s\", method=\"bfill\") s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 2.0 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 3.0 2000-01-01 00:03:00 3.0",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:asfreq arg:self arg:freq arg:method arg:how arg:normalize arg:fill_value arguments arg arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "PyperclipWindowsException",
    "source_code": "class PyperclipWindowsException(PyperclipException):\n\n    def __init__(self, message: str) -> None:\n        message += f' ({ctypes.WinError()})'\n        super().__init__(message)",
    "docstring": "Exception raised when clipboard functionality is unsupported by Windows. Access to the clipboard handle would be denied due to some other window process is accessing it.",
    "type": "class",
    "file_path": "pandas\\pandas\\errors\\__init__.py",
    "ast_data": "ClassDef name:PyperclipWindowsException FunctionDef name:__init__ arg:self arg:message arguments arg arg Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_run_metrics",
    "source_code": "def _run_metrics(self, metrics_lock):\n    warmup_response_time = None\n    response_times = []\n    for i in range(self.num_iters + 1):\n        response, request_time = self.response_queue.get()\n        if warmup_response_time is None:\n            self.warmup_event.set()\n            warmup_response_time = time.time() - request_time\n        else:\n            response_times.append(time.time() - request_time)\n    self.end_recv_time = time.time()\n    self.poll_gpu = False\n    response_times = np.array(response_times)\n    with metrics_lock:\n        self.metrics_dict['warmup_latency'] = warmup_response_time\n        self.metrics_dict['average_latency'] = response_times.mean()\n        self.metrics_dict['max_latency'] = response_times.max()\n        self.metrics_dict['min_latency'] = response_times.min()\n        self.metrics_dict['throughput'] = self.num_iters * self.batch_size / (self.end_recv_time - self.start_send_time)",
    "docstring": "This function will poll the response queue until it has received all responses. It records the startup latency, the average, max, min latency as well as througput of requests.",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\inference\\server.py",
    "ast_data": "FunctionDef name:_run_metrics arg:self arg:metrics_lock arguments arg arg Assign Assign For Call Assign Call If Compare Call Assign Call Call Call Assign Call Assign Assign Call With Assign Assign Call Assign Call Assign Call Assign"
  },
  {
    "library": "pytorch",
    "name": "_use_cuda_memory_pool_manager",
    "source_code": "@contextlib.contextmanager\ndef _use_cuda_memory_pool_manager(device: int, mem_pool: tuple[int, int], stream: torch.cuda.Stream) -> Generator[None, None, None]:\n    torch.cuda.synchronize()\n    stream.wait_stream(torch.cuda.current_stream())\n    with torch.cuda.stream(stream), torch.device(device):\n        torch._C._cuda_beginAllocateCurrentThreadToPool(device, mem_pool)\n        try:\n            yield\n        finally:\n            torch._C._cuda_endAllocateToPool(device, mem_pool)\n            torch._C._cuda_releasePool(device, mem_pool)\n    torch.cuda.current_stream().wait_stream(stream)",
    "docstring": "Context manager to use cuda graph pool for new allocations. If you use this manager all cudagraph tensors in use should be reflected in the allocator or they will be overwritten. existing_graph should already have been used in a capture, and the mem_pool must already exist, because this manager will not preserve a reference to the pool which keeps it alive.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py",
    "ast_data": "FunctionDef name:_use_cuda_memory_pool_manager arg:device arg:mem_pool arg:stream arguments arg arg arg Call Call Call With Call Call Call Try Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_model_hash",
    "source_code": "def get_model_hash(model):\n    hash_value = 0\n    for subgraph in model.subgraphs:\n        if subgraph.operators is not None:\n            hash_value = update_hash_with_primitive_value(hash_value, len(subgraph.operators))\n            for operator in subgraph.operators:\n                if operator.inputs is not None:\n                    hash_value = update_hash_with_array(hash_value, operator.inputs)\n                if operator.outputs is not None:\n                    hash_value = update_hash_with_array(hash_value, operator.outputs)\n        if subgraph.tensors is not None:\n            hash_value = update_hash_with_primitive_value(hash_value, len(subgraph.tensors))\n            for tensor in subgraph.tensors:\n                if tensor.buffer is not None:\n                    buffer = model.buffers[tensor.buffer]\n                    if buffer.data is not None:\n                        hash_value = update_hash_with_primitive_value(hash_value, len(buffer.data))\n                if tensor.shape is not None:\n                    hash_value = update_hash_with_array(hash_value, tensor.shape)\n        if subgraph.inputs is not None:\n            hash_value = update_hash_with_primitive_value(hash_value, len(subgraph.inputs))\n        if subgraph.outputs is not None:\n            hash_value = update_hash_with_primitive_value(hash_value, len(subgraph.outputs))\n    return hash_value",
    "docstring": "Calculate a 64-bit integer hash for a TensorFlow Lite model based on its structure. Args: model: A TensorFlow Lite model object. Returns: int: A 64-bit integer hash value representing the model structure.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\util.py",
    "ast_data": "FunctionDef name:get_model_hash arg:model arguments arg Assign For If Compare Assign Call Call For If Compare Assign Call If Compare Assign Call If Compare Assign Call Call For If Compare Assign If Compare Assign Call Call If Compare Assign Call If Compare Assign Call Call If Compare Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "to_json",
    "source_code": "def to_json(self, **kwargs):\n    model_config = self._updated_config()\n    return json.dumps(model_config, default=json_utils.get_json_type, **kwargs)",
    "docstring": "Returns a JSON string containing the network configuration. To load a network from a JSON save file, use . Args: **kwargs: Additional keyword arguments to be passed to . Returns: A JSON string.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py",
    "ast_data": "FunctionDef name:to_json arg:self arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "clear_ordering",
    "source_code": "def clear_ordering(self, force=False, clear_default=True):\n    if not force and (self.is_sliced or self.distinct_fields or self.select_for_update):\n        return\n    self.order_by = ()\n    self.extra_order_by = ()\n    if clear_default:\n        self.default_ordering = False",
    "docstring": "Remove any ordering settings if the current query allows it without side effects, set 'force' to True to clear the ordering regardless. If 'clear_default' is True, there will be no ordering in the resulting query (not even the model's default).",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\query.py",
    "ast_data": "FunctionDef name:clear_ordering arg:self arg:force arg:clear_default arguments arg arg arg If BoolOp BoolOp Return return:no Assign Assign If Assign"
  },
  {
    "library": "tensorflow",
    "name": "reduce_non_distributed_value",
    "source_code": "def reduce_non_distributed_value(reduce_op, value, destinations, num_replicas_in_graph, canonicalize_devices=True):\n    if isinstance(value, value_lib.DistributedValues):\n        raise ValueError('You are passing a `DistributedValues` to `reduce_non_distributed_value`, which is not allowed.')\n    if not tensor_util.is_tf_type(value) and np.all(value == 0):\n        return np.zeros(value.shape, dtype=value.dtype)\n    if reduce_op == reduce_util.ReduceOp.MEAN:\n        return value\n    elif num_replicas_in_graph != 1:\n        raise ValueError('A non-DistributedValues value %s cannot be reduced with the given reduce op %s.' % (value, reduce_op))\n    else:\n        validate_destinations(destinations)\n        return simple_broadcast(value, destinations, canonicalize_devices=canonicalize_devices)",
    "docstring": "Reduce a non-DistributedValue to .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_ops.py",
    "ast_data": "FunctionDef name:reduce_non_distributed_value arg:reduce_op arg:value arg:destinations arg:num_replicas_in_graph arg:canonicalize_devices arguments arg arg arg arg arg If Call Raise Call If BoolOp Call Call Compare Return return:yes Call If Compare Return return:yes If Compare Raise Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_get_colors_from_color_type",
    "source_code": "def _get_colors_from_color_type(color_type: str, num_colors: int) -> list[Color]:\n    if color_type == 'default':\n        prop_cycle = mpl.rcParams['axes.prop_cycle']\n        return [c['color'] for c in itertools.islice(prop_cycle, min(num_colors, len(prop_cycle)))]\n    elif color_type == 'random':\n        return np.random.default_rng(num_colors).random((num_colors, 3)).tolist()\n    else:\n        raise ValueError(\"color_type must be either 'default' or 'random'\")",
    "docstring": "Get colors from user input color type.",
    "type": "function",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\style.py",
    "ast_data": "FunctionDef name:_get_colors_from_color_type arg:color_type arg:num_colors arguments arg arg If Compare Assign Return return:yes Call Call Call If Compare Return return:yes Call Call Call Raise Call"
  },
  {
    "library": "scrapy",
    "name": "dropped",
    "source_code": "def dropped(self, item: Any, exception: BaseException, response: Response | None, spider: Spider) -> LogFormatterResult:\n    if (level := getattr(exception, 'log_level', None)) is None:\n        level = spider.crawler.settings['DEFAULT_DROPITEM_LOG_LEVEL']\n    if isinstance(level, str):\n        level = getattr(logging, level)\n    return {'level': level, 'msg': DROPPEDMSG, 'args': {'exception': exception, 'item': item}}",
    "docstring": "Logs a message when an item is dropped while it is passing through the item pipeline.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\logformatter.py",
    "ast_data": "FunctionDef name:dropped arg:self arg:item arg:exception arg:response arg:spider arguments arg arg arg arg arg If Compare Call Assign If Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "score",
    "source_code": "def score(self, X, y, sample_weight=None):\n    raw_prediction = self._linear_predictor(X)\n    y = check_array(y, dtype=raw_prediction.dtype, order='C', ensure_2d=False)\n    if sample_weight is not None:\n        sample_weight = _check_sample_weight(sample_weight, X, dtype=y.dtype)\n    base_loss = self._base_loss\n    if not base_loss.in_y_true_range(y):\n        raise ValueError(f'Some value(s) of y are out of the valid range of the loss {base_loss.__name__}.')\n    constant = np.average(base_loss.constant_to_optimal_zero(y_true=y, sample_weight=None), weights=sample_weight)\n    deviance = base_loss(y_true=y, raw_prediction=raw_prediction, sample_weight=sample_weight, n_threads=1)\n    y_mean = base_loss.link.link(np.average(y, weights=sample_weight))\n    deviance_null = base_loss(y_true=y, raw_prediction=np.tile(y_mean, y.shape[0]), sample_weight=sample_weight, n_threads=1)\n    return 1 - (deviance + constant) / (deviance_null + constant)",
    "docstring": "Compute D^2, the percentage of deviance explained. D^2 is a generalization of the coefficient of determination R^2. R^2 uses squared error and D^2 uses the deviance of this GLM, see the :ref:. D^2 is defined as :math:, :math: is the null deviance, i.e. the deviance of a model with intercept alone, which corresponds to :math:. The mean :math: is averaged by sample_weight. Best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Test samples. y : array-like of shape (n_samples,) True values of target. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- score : float D^2 of self.predict(X) w.r.t. y.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_glm\\glm.py",
    "ast_data": "FunctionDef name:score arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg Assign Call Assign Call If Compare Assign Call Assign If Call Raise Call Assign Call Call Assign Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_device_partition_stats",
    "source_code": "def get_device_partition_stats(partitions: list[Partition], devices: list[Device]) -> tuple[dict[Device, list[Partition]], dict[Device, int], list[Partition]]:\n    logical_id_to_device = get_logical_id_to_device(devices)\n    device_to_partitions: dict[Device, list[Partition]] = {}\n    device_to_left_mem_bytes: dict[Device, int] = {}\n    for d in devices:\n        device_to_partitions[d] = []\n        device_to_left_mem_bytes[d] = d.available_mem_bytes\n    no_device_partitions = []\n    for partition in partitions:\n        if partition.logical_device_ids != []:\n            for logical_id in partition.logical_device_ids:\n                device = logical_id_to_device[logical_id]\n                device_to_partitions[device].append(partition)\n                device_to_left_mem_bytes[device] -= partition.used_mem_bytes\n        else:\n            no_device_partitions.append(partition)\n    return (device_to_partitions, device_to_left_mem_bytes, no_device_partitions)",
    "docstring": "Given a list of partitions and a list of devices, returns: 1. A mapping from device to partitions on it; 2. A mapping from device to its remaining memory size; 3. A list of partitions that do not have a device.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\accelerator_partitioner.py",
    "ast_data": "FunctionDef name:get_device_partition_stats arg:partitions arg:devices arguments arg arg Assign Call For Assign Assign Assign For If Compare For Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_disallow_eval_train",
    "source_code": "def _disallow_eval_train(model: GraphModule):\n    error_message = '\\n        Calling train() or eval() is not supported for exported models.\\n        Please call `torch.ao.quantization.move_exported_model_to_train(model)` (or eval) instead.\\n\\n        If you cannot replace the calls to `model.train()` and `model.eval()`, you may override\\n        the behavior for these methods by calling `torch.ao.quantization.allow_exported_model_train_eval(model)`,\\n        which does the above automatically for you. Note that this has limited effect on switching\\n        behavior between train and eval modes, and should be used only for special ops such as dropout\\n        and batchnorm.\\n        '\n\n    def _train(self, mode: bool=True):\n        raise NotImplementedError(error_message)\n\n    def _eval(self, mode: bool=True):\n        raise NotImplementedError(error_message)\n    model.train = types.MethodType(_train, model)\n    model.eval = types.MethodType(_eval, model)\n    return model",
    "docstring": "Disallow calling or on the given GraphModule. This is useful for exported models, where these methods don't actually behave as expected.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\utils.py",
    "ast_data": "FunctionDef name:_disallow_eval_train arg:model arguments arg Assign FunctionDef name:_train arg:self arg:mode arguments arg arg Raise Call FunctionDef name:_eval arg:self arg:mode arguments arg arg Raise Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "Stateful",
    "source_code": "@runtime_checkable\nclass Stateful(Protocol):\n\n    def state_dict(self) -> dict[str, Any]:\n        ...\n\n    def load_state_dict(self, state_dict: dict[str, Any]) -> None:\n        ...",
    "docstring": "Stateful protocol for objects that can be checkpointed and restored.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\stateful.py",
    "ast_data": "ClassDef name:Stateful FunctionDef name:state_dict arg:self arguments arg FunctionDef name:load_state_dict arg:self arg:state_dict arguments arg arg"
  },
  {
    "library": "pytorch",
    "name": "var_getattr",
    "source_code": "def var_getattr(self, tx: 'InstructionTranslator', name: str) -> 'VariableTracker':\n    value = self.const_getattr(tx, name)\n    if not variables.ConstantVariable.is_literal(value):\n        raise NotImplementedError\n    source = self.source and AttrSource(self.source, name)\n    if source:\n        install_guard(source.make_guard(GuardBuilder.CONSTANT_MATCH))\n    return variables.ConstantVariable.create(value, source=source)",
    "docstring": "getattr(self, name) returning a new variable",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\base.py",
    "ast_data": "FunctionDef name:var_getattr arg:self arg:tx arg:name arguments arg arg arg Assign Call If Call Raise Assign BoolOp Call If Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "can_fast_delete",
    "source_code": "def can_fast_delete(self, *args, **kwargs):\n    return False",
    "docstring": "Always load related objects to display them when showing confirmation.",
    "type": "method",
    "file_path": "django\\django\\contrib\\contenttypes\\management\\commands\\remove_stale_contenttypes.py",
    "ast_data": "FunctionDef name:can_fast_delete arg:self arguments arg arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, res, rhs1, rhs2):\n    assert is_dim(res)\n    assert is_dim(rhs1)\n    assert is_dim(rhs2)\n    self.res = res\n    self.rhs1 = rhs1\n    self.rhs2 = rhs2",
    "docstring": ":param res: Dimension variable to store the result :param rhs1: dimension variable 1 :param rhs2: dimension variable 2",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:res arg:rhs1 arg:rhs2 arguments arg arg arg arg Call Call Call Assign Assign Assign"
  },
  {
    "library": "pandas",
    "name": "to_dense",
    "source_code": "def to_dense(self) -> Series:\n    from pandas import Series\n    return Series(self._parent.array.to_dense(), index=self._parent.index, name=self._parent.name, copy=False)",
    "docstring": "Convert a Series from sparse values to dense. Returns ------- Series: A Series with the same values, stored as a dense array. Examples -------- >>> series = pd.Series(pd.arrays.SparseArray([0, 1, 0])) >>> series 0 0 1 1 2 0 dtype: Sparse[int64, 0] >>> series.sparse.to_dense() 0 0 1 1 2 0 dtype: int64",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\sparse\\accessor.py",
    "ast_data": "FunctionDef name:to_dense arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, T):\n    T = column_or_1d(T)\n    return expit(-(self.a_ * T + self.b_))",
    "docstring": "Predict new data by linear interpolation. Parameters ---------- T : array-like of shape (n_samples,) Data to predict from. Returns ------- T_ : ndarray of shape (n_samples,) The predicted data.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\calibration.py",
    "ast_data": "FunctionDef name:predict arg:self arg:T arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "random_normal",
    "source_code": "@tf_export('random.normal', v1=['random.normal', 'random_normal'])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints('random_normal')\ndef random_normal(shape, mean=0.0, stddev=1.0, dtype=dtypes.float32, seed=None, name=None):\n    with ops.name_scope(name, 'random_normal', [shape, mean, stddev]) as name:\n        shape_tensor = shape_util.shape_tensor(shape)\n        mean_tensor = ops.convert_to_tensor(mean, dtype=dtype, name='mean')\n        stddev_tensor = ops.convert_to_tensor(stddev, dtype=dtype, name='stddev')\n        seed1, seed2 = random_seed.get_seed(seed)\n        rnd = gen_random_ops.random_standard_normal(shape_tensor, dtype, seed=seed1, seed2=seed2)\n        mul = rnd * stddev_tensor\n        value = math_ops.add(mul, mean_tensor, name=name)\n        shape_util.maybe_set_static_shape(value, shape)\n        return value",
    "docstring": "Outputs random values from a normal distribution. Example that generates a new set of random values every time: >>> tf.random.set_seed(5); >>> tf.random.normal([4], 0, 1, tf.float32) Example that outputs a reproducible result: >>> tf.random.set_seed(5); >>> tf.random.normal([2,2], 0, 1, tf.float32, seed=1) In this case, we are setting both the global and operation-level seed to ensure this result is reproducible. See for more information. Args: shape: A 1-D integer Tensor or Python array. The shape of the output tensor. mean: A Tensor or Python value of type , broadcastable with . The mean of the normal distribution. stddev: A Tensor or Python value of type , broadcastable with . The standard deviation of the normal distribution. dtype: The float type of the output: , , , . Defaults to . seed: A Python integer. Used to create a random seed for the distribution. See for behavior. name: A name for the operation (optional). Returns: A tensor of the specified shape filled with random normal values.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\random_ops.py",
    "ast_data": "FunctionDef name:random_normal arg:shape arg:mean arg:stddev arg:dtype arg:seed arg:name arguments arg arg arg arg arg arg With Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "get",
    "source_code": "def get(self, o: Any) -> Any:\n    return getattr(o, self.name)()",
    "docstring": "Call the method on object",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:get arg:self arg:o arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "set_output",
    "source_code": "def set_output(self, *, transform=None):\n    if not hasattr(self, '_sklearn_output_config'):\n        self._sklearn_output_config = {}\n    self._sklearn_output_config['transform'] = transform\n    return self",
    "docstring": "Set output container. See :ref: for an example on how to use the API. Parameters ---------- transform : {\"default\", \"pandas\", \"polars\"}, default=None Configure output of and . - : Default output format of a transformer - : DataFrame output - : Polars output - : Transform configuration is unchanged .. versionadded:: 1.4 option was added. Returns ------- self : estimator instance Estimator instance.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_function_transformer.py",
    "ast_data": "FunctionDef name:set_output arg:self arguments arg arg If Call Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "seed",
    "source_code": "def seed() -> None:\n    _get_default_mps_generator().seed()",
    "docstring": "Sets the seed for generating random numbers to a random number.",
    "type": "function",
    "file_path": "pytorch\\torch\\mps\\__init__.py",
    "ast_data": "FunctionDef name:seed arguments Call Call"
  },
  {
    "library": "scipy",
    "name": "DijkstraDensity",
    "source_code": "class DijkstraDensity(Benchmark):\n    params = [[10, 100, 1000], [0.1, 0.3, 0.5, 0.9]]\n    param_names = ['n', 'density']\n\n    def setup(self, n, density):\n        if n >= 1000 and (not is_xslow()):\n            raise NotImplementedError('skipped')\n        rng = np.random.default_rng(42)\n        self.graph = scipy.sparse.random_array(shape=(n, n), density=density, format='csr', rng=rng, data_sampler=lambda size: rng.integers(100, size=size, dtype=np.uint32))\n\n    def time_test_shortest_path(self, n, density):\n        shortest_path(self.graph, method='D', directed=False)",
    "docstring": "Benchmark performance of Dijkstra, adapted from [^1] [^1]:",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\sparse_csgraph_dijkstra.py",
    "ast_data": "ClassDef name:DijkstraDensity Assign Assign FunctionDef name:setup arg:self arg:n arg:density arguments arg arg arg If BoolOp Compare Call Raise Call Assign Call Assign Call arguments arg Call FunctionDef name:time_test_shortest_path arg:self arg:n arg:density arguments arg arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "isanytargetmethod",
    "source_code": "def isanytargetmethod(object):\n    decorators, target = tf_decorator.unwrap(object)\n    for decorator in decorators:\n        if _inspect.ismethod(decorator.decorated_target):\n            return True\n    while isinstance(target, functools.partial):\n        target = target.func\n    return callable(target) and (not _inspect.isfunction(target))",
    "docstring": "Checks if or a TF Decorator wrapped target contains self or cls. This function could be used along with to determine if the first argument of argspec is self or cls. If the first argument is self or cls, it needs to be excluded from argspec when we compare the argspec to the input arguments and, if provided, the tf.function input_signature. Like and python , it does not unwrap python decorators. Args: obj: An method, function, or functool.partial, possibly decorated by TFDecorator. Returns: A bool indicates if or any target along the chain of TF decorators is a method.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\tf_inspect.py",
    "ast_data": "FunctionDef name:isanytargetmethod arg:object arguments arg Assign Call For If Call Return return:yes While Call Assign Return return:yes BoolOp Call Call"
  },
  {
    "library": "pytorch",
    "name": "first",
    "source_code": "def first(seq):\n    return next(iter(seq))",
    "docstring": "The first element in a sequence >>> first(\"ABC\") 'A'",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\unification\\unification_tools.py",
    "ast_data": "FunctionDef name:first arg:seq arguments arg Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "get_qualname_for",
    "source_code": "def get_qualname_for(self, name: str) -> list[str] | None:\n    if self.current_function:\n        if self.current_classes and self.context[-1] == '__init__':\n            return [*self.context[:-1], name]\n        else:\n            return None\n    else:\n        return [*self.context, name]",
    "docstring": "Get qualified name for given object as a list of string(s).",
    "type": "method",
    "file_path": "sphinx\\sphinx\\pycode\\parser.py",
    "ast_data": "FunctionDef name:get_qualname_for arg:self arg:name arguments arg arg If If BoolOp Compare Return return:yes Return return:no Return return:yes"
  },
  {
    "library": "django",
    "name": "SpatialOperator",
    "source_code": "class SpatialOperator:\n    sql_template = None\n\n    def __init__(self, op=None, func=None):\n        self.op = op\n        self.func = func\n\n    @property\n    def default_template(self):\n        if self.func:\n            return '%(func)s(%(lhs)s, %(rhs)s)'\n        else:\n            return '%(lhs)s %(op)s %(rhs)s'\n\n    def as_sql(self, connection, lookup, template_params, sql_params):\n        sql_template = self.sql_template or lookup.sql_template or self.default_template\n        template_params.update({'op': self.op, 'func': self.func})\n        return (sql_template % template_params, sql_params)",
    "docstring": "Class encapsulating the behavior specific to a GIS operation (used by lookups).",
    "type": "class",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\utils.py",
    "ast_data": "ClassDef name:SpatialOperator Assign FunctionDef name:__init__ arg:self arg:op arg:func arguments arg arg arg Assign Assign FunctionDef name:default_template arg:self arguments arg If Return return:yes Return return:yes FunctionDef name:as_sql arg:self arg:connection arg:lookup arg:template_params arg:sql_params arguments arg arg arg arg arg Assign BoolOp Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "current_device_index",
    "source_code": "def current_device_index() -> int:\n    return torch._C._accelerator_getDeviceIndex()",
    "docstring": "Return the index of a currently selected device for the current :ref:. Returns: int: the index of a currently selected device.",
    "type": "function",
    "file_path": "pytorch\\torch\\accelerator\\__init__.py",
    "ast_data": "FunctionDef name:current_device_index arguments Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "save_source_string",
    "source_code": "def save_source_string(self, module_name: str, src: str, is_package: bool=False, dependencies: bool=True):\n    self.dependency_graph.add_node(module_name, source=src, is_package=is_package, provided=True, action=_ModuleProviderAction.INTERN)\n    if dependencies:\n        deps = self._get_dependencies(src, module_name, is_package)\n        for dep in deps:\n            self.dependency_graph.add_edge(module_name, dep)\n            self.add_dependency(dep)",
    "docstring": "Adds ``, we scan the source for dependencies.",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\package_exporter.py",
    "ast_data": "FunctionDef name:save_source_string arg:self arg:module_name arg:src arg:is_package arg:dependencies arguments arg arg arg arg arg Call If Assign Call For Call Call"
  },
  {
    "library": "numpy",
    "name": "_clean_args",
    "source_code": "def _clean_args(*args):\n    newargs = []\n    for chk in args:\n        if chk is None:\n            break\n        newargs.append(chk)\n    return newargs",
    "docstring": "Helper function for delegating arguments to Python string functions. Many of the Python string operations that have optional arguments do not use 'None' to indicate a default value. In these cases, we need to remove all None arguments, and those following them.",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\strings.py",
    "ast_data": "FunctionDef name:_clean_args arguments arg Assign For If Compare Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "float8_e5m2fnuz",
    "source_code": "def float8_e5m2fnuz(self):\n    return self._to(torch.float8_e5m2fnuz)",
    "docstring": "Casts this storage to float8_e5m2fnuz type",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:float8_e5m2fnuz arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "read_signatures",
    "source_code": "def read_signatures(lines):\n    sigs = []\n    for line in lines:\n        line = line.strip()\n        if not line or line.startswith('#'):\n            continue\n        line = line[:-1].split('(')\n        args = line[1]\n        name_and_type = line[0].split(' ')\n        ret_type = name_and_type[0]\n        name = name_and_type[1]\n        argtypes, argnames = zip(*[arg.split(' *') for arg in args.split(', ')])\n        if ret_type in argnames:\n            argnames = [n if n != ret_type else n + '_' for n in argnames]\n        argnames = [n if n not in ['lambda', 'in'] else n + '_' for n in argnames]\n        sigs.append({'name': name, 'return_type': ret_type, 'argnames': argnames, 'argtypes': list(argtypes)})\n    return sigs",
    "docstring": "Read BLAS/LAPACK signatures and split into name, return type, argument names, and argument types.",
    "type": "function",
    "file_path": "scipy\\scipy\\_build_utils\\_wrappers_common.py",
    "ast_data": "FunctionDef name:read_signatures arg:lines arguments arg Assign For Assign Call If BoolOp Call Assign Call Assign Assign Call Assign Assign Assign Call Call Call If Compare Assign Compare Assign Compare Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "apply_jvp",
    "source_code": "def apply_jvp(self, *args):\n    return self._forward_cls.jvp(self, *args)",
    "docstring": "Apply method used when executing forward mode AD during the forward",
    "type": "method",
    "file_path": "pytorch\\torch\\autograd\\function.py",
    "ast_data": "FunctionDef name:apply_jvp arg:self arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "serialize_to_string",
    "source_code": "def serialize_to_string(self):\n    return print_mdl.SerializeToString()",
    "docstring": "Serialize the ProfileProto to a binary string. Users can write it to file for offline analysis by tfprof commandline or graphical interface. Returns: ProfileProto binary string.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\model_analyzer.py",
    "ast_data": "FunctionDef name:serialize_to_string arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "prepare_value",
    "source_code": "def prepare_value(self, field, value):\n    if hasattr(value, 'resolve_expression'):\n        value = value.resolve_expression(self.query, allow_joins=False, for_save=True)\n        if value.contains_column_references:\n            raise ValueError('Failed to insert expression \"%s\" on %s. F() expressions can only be used to update, not to insert.' % (value, field))\n        if value.contains_aggregate:\n            raise FieldError('Aggregate functions are not allowed in this query (%s=%r).' % (field.name, value))\n        if value.contains_over_clause:\n            raise FieldError('Window expressions are not allowed in this query (%s=%r).' % (field.name, value))\n    return field.get_db_prep_save(value, connection=self.connection)",
    "docstring": "Prepare a value to be used in a query by resolving it if it is an expression and otherwise calling the field's get_db_prep_save().",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\compiler.py",
    "ast_data": "FunctionDef name:prepare_value arg:self arg:field arg:value arguments arg arg arg If Call Assign Call If Raise Call If Raise Call If Raise Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_clip_path",
    "source_code": "def get_clip_path(self):\n    return self._clippath",
    "docstring": "Return the clip path.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:get_clip_path arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "GraphPatternEntry",
    "source_code": "@dataclasses.dataclass\nclass GraphPatternEntry(PatternEntry):\n    handler: Callable[..., Any]\n\n    def apply(self, match: Match, graph: torch.fx.Graph, node: torch.fx.Node) -> None:\n        with graph.inserting_before(node):\n            self.handler(match, *match.args, **match.kwargs)",
    "docstring": "A pattern that runs a function on the FX graph",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\pattern_matcher.py",
    "ast_data": "ClassDef name:GraphPatternEntry FunctionDef name:apply arg:self arg:match arg:graph arg:node arguments arg arg arg arg With Call Call"
  },
  {
    "library": "pytorch",
    "name": "threshold",
    "source_code": "def threshold(input: Tensor, threshold: float, value: float) -> Tensor:\n    if not input.is_quantized:\n        raise ValueError(\"Input to 'quantized.threshold' must be quantized!\")\n    if threshold is None:\n        raise ValueError(\"Input to 'threshold' must be specified!\")\n    if value is None:\n        raise ValueError(\"Input to 'value' must be specified!\")\n    return torch._ops.ops.quantized.threshold(input, threshold, value)",
    "docstring": "Applies the quantized version of the threshold function element-wise: .. math:: x = \\begin{cases} x & \\text{if~} x > \\text{threshold} \\\\ \\text{value} & \\text{otherwise} \\end{cases} See :class: for more details.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\functional.py",
    "ast_data": "FunctionDef name:threshold arg:input arg:threshold arg:value arguments arg arg arg If Raise Call If Compare Raise Call If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "__call__",
    "source_code": "def __call__(self, *args):\n    return self.range_",
    "docstring": "Return stored value. *args needed because range_ can be float or func, and is called with variable number of parameters.",
    "type": "method",
    "file_path": "scipy\\scipy\\integrate\\_quadpack_py.py",
    "ast_data": "FunctionDef name:__call__ arg:self arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_inner_shape_dim",
    "source_code": "def _inner_shape_dim(self, dimension):\n    result = tensor_shape.dimension_value(self._static_inner_shape[dimension])\n    return self._inner_shape[dimension] if result is None else result",
    "docstring": "Returns an int or a tensor representing _inner_shape[dimension].",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:_inner_shape_dim arg:self arg:dimension arguments arg arg Assign Call Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "ExportedDoc",
    "source_code": "class ExportedDoc(NamedTuple):\n    file_name: str\n    line_no: int\n    modules: tuple[str, ...]\n    docstring: str\n\n    @classmethod\n    def create(cls, *, modules: Sequence[str], **kwargs) -> 'ExportedDoc':\n        return cls(modules=tuple(modules), **kwargs)",
    "docstring": "Information about an export Module docstring.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\api\\generator2\\shared\\exported_api.py",
    "ast_data": "ClassDef name:ExportedDoc FunctionDef name:create arg:cls arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_polyder",
    "source_code": "def _polyder(p, m):\n    if m == 0:\n        result = p\n    else:\n        n = len(p)\n        if n <= m:\n            result = np.zeros_like(p[:1, ...])\n        else:\n            dp = p[:-m].copy()\n            for k in range(m):\n                rng = np.arange(n - k - 1, m - k - 1, -1)\n                dp *= rng.reshape((n - m,) + (1,) * (p.ndim - 1))\n            result = dp\n    return result",
    "docstring": "Differentiate polynomials represented with coefficients. p must be a 1-D or 2-D array. In the 2-D case, each column gives the coefficients of a polynomial; the first row holds the coefficients associated with the highest power. m must be a nonnegative integer. (numpy.polyder doesn't handle the 2-D case.)",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_savitzky_golay.py",
    "ast_data": "FunctionDef name:_polyder arg:p arg:m arguments arg arg If Compare Assign Assign Call If Compare Assign Call Assign Call For Call Assign Call Call Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "inverse_transform",
    "source_code": "def inverse_transform(self, X):\n    check_is_fitted(self)\n    return X @ self.components_",
    "docstring": "Transform data back to its original space. .. versionadded:: 0.18 Parameters ---------- X : {ndarray, sparse matrix} of shape (n_samples, n_components) Transformed data matrix. Returns ------- X_original : ndarray of shape (n_samples, n_features) Returns a data matrix of the original shape.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_nmf.py",
    "ast_data": "FunctionDef name:inverse_transform arg:self arg:X arguments arg arg Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_bad",
    "source_code": "def set_bad(self, color='k', alpha=None):\n    self._rgba_bad = to_rgba(color, alpha)\n    if self._isinit:\n        self._set_extremes()",
    "docstring": "Set the color for masked values.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:set_bad arg:self arg:color arg:alpha arguments arg arg arg Assign Call If Call"
  },
  {
    "library": "pandas",
    "name": "delete",
    "source_code": "def delete(self, where=None, start: int | None=None, stop: int | None=None) -> int | None:\n    if com.all_none(where, start, stop):\n        self._handle.remove_node(self.group, recursive=True)\n        return None\n    raise TypeError('cannot delete on an abstract storer')",
    "docstring": "support fully deleting the node in its entirety (only) - where specification must be None",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:delete arg:self arg:where arg:start arg:stop arguments arg arg arg arg If Call Call Return return:no Raise Call"
  },
  {
    "library": "kornia",
    "name": "translation_vector",
    "source_code": "@property\ndef translation_vector(self) -> Tensor:\n    return self.extrinsics[..., :3, -1:]",
    "docstring": "Return the translation vector from the extrinsics. Returns: tensor of shape :math:.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\camera\\pinhole.py",
    "ast_data": "FunctionDef name:translation_vector arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_SymNodeDict",
    "source_code": "class _SymNodeDict:\n\n    def __init__(self) -> None:\n        self.sym_node_dict: dict[PySymType, _PySymProxyType] = {}\n\n    def __setitem__(self, key: PySymType, value: _PySymProxyType) -> None:\n        self.sym_node_dict[key.node] = value\n\n    def __getitem__(self, key: PySymType) -> _PySymProxyType:\n        return self.sym_node_dict[key.node]\n\n    def __contains__(self, key: PySymType) -> bool:\n        return key.node in self.sym_node_dict\n\n    def get(self, key: PySymType, default: Optional[_PySymProxyType]=None) -> _PySymProxyType:\n        return self.sym_node_dict.get(key.node, default)\n\n    def __iter__(self) -> Any:\n        raise NotImplementedError\n\n    def __len__(self) -> int:\n        return len(self.sym_node_dict)",
    "docstring": "Wrapper around a dictionary that will hash SymInts with their nodes",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\experimental\\proxy_tensor.py",
    "ast_data": "ClassDef name:_SymNodeDict FunctionDef name:__init__ arg:self arguments arg FunctionDef name:__setitem__ arg:self arg:key arg:value arguments arg arg arg Assign FunctionDef name:__getitem__ arg:self arg:key arguments arg arg Return return:yes FunctionDef name:__contains__ arg:self arg:key arguments arg arg Return return:yes Compare FunctionDef name:get arg:self arg:key arg:default arguments arg arg arg Return return:yes Call FunctionDef name:__iter__ arg:self arguments arg Raise FunctionDef name:__len__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_password_validators_help_text_html",
    "source_code": "def _password_validators_help_text_html(password_validators=None):\n    help_texts = password_validators_help_texts(password_validators)\n    help_items = format_html_join('', '<li>{}</li>', ((help_text,) for help_text in help_texts))\n    return format_html('<ul>{}</ul>', help_items) if help_items else ''",
    "docstring": "Return an HTML string with all help texts of all configured validators in an .",
    "type": "function",
    "file_path": "django\\django\\contrib\\auth\\password_validation.py",
    "ast_data": "FunctionDef name:_password_validators_help_text_html arg:password_validators arguments arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "__next__",
    "source_code": "def __next__(self):\n    return (self.iter.coords, next(self.iter))",
    "docstring": "Standard iterator method, returns the index tuple and array value. Returns ------- coords : tuple of ints The indices of the current iteration. val : scalar The array element of the current iteration.",
    "type": "method",
    "file_path": "numpy\\numpy\\lib\\_index_tricks_impl.py",
    "ast_data": "FunctionDef name:__next__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_total_to_compress_renum",
    "source_code": "@staticmethod\ndef _total_to_compress_renum(valid):\n    renum = np.full(np.size(valid), -1, dtype=np.int32)\n    n_valid = np.sum(valid)\n    renum[valid] = np.arange(n_valid, dtype=np.int32)\n    return renum",
    "docstring": "Parameters ---------- valid : 1D bool array Validity mask. Returns ------- int array Array so that ( being a compressed array based on a with mask ~*valid*): - For all i with valid[i] = True: valid_array[renum[i]] = masked_array[i] - For all i with valid[i] = False: renum[i] = -1 (invalid value)",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_tritools.py",
    "ast_data": "FunctionDef name:_total_to_compress_renum arg:valid arguments arg Assign Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "value",
    "source_code": "def value(self):\n    raise NotImplementedError",
    "docstring": "Returns the last snapshot of this variable. You usually do not need to call this method as all ops that need the value of the variable call it automatically through a call. Returns a which holds the value of the variable. You can not assign a new value to this tensor as it is not a reference to the variable. To avoid copies, if the consumer of the returned value is on the same device as the variable, this actually returns the live value of the variable, not a copy. Updates to the variable are seen by the consumer. If the consumer is on a different device it will get a copy of the variable. Returns: A containing the value of the variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:value arg:self arguments arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "_not_equal_flops",
    "source_code": "@ops.RegisterStatistics('NotEqual', 'flops')\ndef _not_equal_flops(graph, node):\n    return _binary_per_element_op_flops(graph, node)",
    "docstring": "Compute flops for NotEqual operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py",
    "ast_data": "FunctionDef name:_not_equal_flops arg:graph arg:node arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "reduce_logsumexp",
    "source_code": "@tf_export('math.reduce_logsumexp', 'reduce_logsumexp', v1=[])\n@dispatch.add_dispatch_support\ndef reduce_logsumexp(input_tensor, axis=None, keepdims=False, name=None):\n    with ops.name_scope(name, 'ReduceLogSumExp', [input_tensor]) as name:\n        raw_max = reduce_max(input_tensor, axis=axis, keepdims=True)\n        my_max = array_ops.stop_gradient(gen_math_ops.select_v2(gen_math_ops.is_finite(raw_max), raw_max, 0))\n        result = gen_math_ops.log(reduce_sum(exp(subtract(input_tensor, my_max)), axis=axis, keepdims=keepdims))\n        if not keepdims:\n            my_max = array_ops.reshape(my_max, gen_array_ops.shape(result))\n        result = add(result, my_max, name=name)\n        return _may_reduce_to_scalar(keepdims, axis, result)",
    "docstring": "Computes log(sum(exp(elements across dimensions of a tensor))). Reduces along the dimensions given in . Unless is true, the rank of the tensor is reduced by 1 for each of the entries in , which must be unique. If is true, the reduced dimensions are retained with length 1. If has no entries, all dimensions are reduced, and a tensor with a single element is returned. This function is more numerically stable than log(sum(exp(input))). It avoids overflows caused by taking the exp of large inputs and underflows caused by taking the log of small inputs. For example: Args: input_tensor: The tensor to reduce. Should have numeric type. axis: The dimensions to reduce. If (the default), reduces all dimensions. Must be in the range . keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). Returns: The reduced tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:reduce_logsumexp arg:input_tensor arg:axis arg:keepdims arg:name arguments arg arg arg arg With Call Assign Call Assign Call Call Call Assign Call Call Call Call If Assign Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_loss_for_variable",
    "source_code": "def _loss_for_variable(v):\n    with backend.name_scope(name + '/Regularizer'):\n        regularization = regularizer(v)\n    return regularization",
    "docstring": "Creates a regularization loss for variable .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:_loss_for_variable arg:v arguments arg With Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__iter__",
    "source_code": "def __iter__(self) -> iterator_ops.OwnedIterator:\n    if context.executing_eagerly() or ops.inside_function():\n        with ops.colocate_with(self._variant_tensor):\n            return iterator_ops.OwnedIterator(self)\n    else:\n        raise RuntimeError('`tf.data.Dataset` only supports Python-style iteration in eager mode or within tf.function.')",
    "docstring": "Creates an iterator for elements of this dataset. The returned iterator implements the Python Iterator protocol. Returns: An for the elements of this dataset. Raises: RuntimeError: If not inside of tf.function and not executing eagerly.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:__iter__ arg:self arguments arg If BoolOp Call Call With Call Return return:yes Call Raise Call"
  },
  {
    "library": "kornia",
    "name": "remove_borders",
    "source_code": "def remove_borders(self, score_map: Tensor, borders: int=15) -> Tensor:\n    mask = torch.zeros_like(score_map)\n    mask[:, :, borders:-borders, borders:-borders] = 1\n    return mask * score_map",
    "docstring": "Remove the borders of the image to avoid detections on the corners.",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\scale_space_detector.py",
    "ast_data": "FunctionDef name:remove_borders arg:self arg:score_map arg:borders arguments arg arg arg Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "set_post_optim_event",
    "source_code": "def set_post_optim_event(self, event: torch.Event) -> None:\n    self._get_fsdp_state()._state_ctx.post_optim_event = event",
    "docstring": "Sets a post-optimizer-step event for the root FSDP module to wait the all-gather streams on. By default, the root FSDP module waits the all-gather streams on the current stream to ensure that the optimizer step has finished before all-gathering. However, this may introduce false dependencies if there is unrelated computation after the optimizer step. This API allows the user to provide their own event to wait on. After the root waits on the event, the event is discarded, so this API should be called with a new event each iteration. Args: event (torch.Event): Event recorded after the optimizer step to wait all-gather streams on.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fully_shard.py",
    "ast_data": "FunctionDef name:set_post_optim_event arg:self arg:event arguments arg arg Assign Call"
  },
  {
    "library": "scikit-learn",
    "name": "RealNotInt",
    "source_code": "class RealNotInt(Real):\n    pass",
    "docstring": "A type that represents reals that are not instances of int. Behaves like float, but also works with values extracted from numpy arrays. isintance(1, RealNotInt) -> False isinstance(1.0, RealNotInt) -> True",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\utils\\_param_validation.py",
    "ast_data": "ClassDef name:RealNotInt"
  },
  {
    "library": "kornia",
    "name": "run_5point",
    "source_code": "def run_5point(points1: torch.Tensor, points2: torch.Tensor, weights: Optional[torch.Tensor]=None) -> torch.Tensor:\n    KORNIA_CHECK_SHAPE(points1, ['B', 'N', '2'])\n    KORNIA_CHECK_SAME_SHAPE(points1, points2)\n    KORNIA_CHECK(points1.shape[1] >= 5, 'Number of points should be >=5')\n    if weights is not None:\n        KORNIA_CHECK_SAME_SHAPE(points1[:, :, 0], weights)\n    batch_size, _, _ = points1.shape\n    x1, y1 = torch.chunk(points1, dim=-1, chunks=2)\n    x2, y2 = torch.chunk(points2, dim=-1, chunks=2)\n    ones = ones_like(x1)\n    X = torch.cat([x1 * x2, x1 * y2, x1, y1 * x2, y1 * y2, y1, x2, y2, ones], dim=-1)\n    if weights is None:\n        X = X.transpose(-2, -1) @ X\n    else:\n        w_diag = torch.diag_embed(weights)\n        X = X.transpose(-2, -1) @ w_diag @ X\n    E_Nister = null_to_Nister_solution(X, batch_size)\n    return E_Nister",
    "docstring": "Compute the essential matrix using the 5-point algorithm from Nister. The linear system is solved by Nister's 5-point algorithm [@nister2004efficient], and the solver implemented referred to [@barath2020magsac++][@wei2023generalized][@wang2023vggsfm]. Args: points1: A set of carlibrated points in the first image with a tensor shape :math:. points2: A set of points in the second image with a tensor shape :math:. weights: Tensor containing the weights per point correspondence with a shape of :math:. Returns: the computed essential matrix with shape :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\epipolar\\essential.py",
    "ast_data": "FunctionDef name:run_5point arg:points1 arg:points2 arg:weights arguments arg arg arg Call Call Call Compare If Compare Call Assign Assign Call Assign Call Assign Call Assign Call If Compare Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_path_to_3d_segment",
    "source_code": "def _path_to_3d_segment(path, zs=0, zdir='z'):\n    zs = np.broadcast_to(zs, len(path))\n    pathsegs = path.iter_segments(simplify=False, curves=False)\n    seg = [(x, y, z) for ((x, y), code), z in zip(pathsegs, zs)]\n    seg3d = [juggle_axes(x, y, z, zdir) for x, y, z in seg]\n    return seg3d",
    "docstring": "Convert a path to a 3D segment.",
    "type": "function",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py",
    "ast_data": "FunctionDef name:_path_to_3d_segment arg:path arg:zs arg:zdir arguments arg arg arg Assign Call Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_set_item",
    "source_code": "def _set_item(self, key, value) -> None:\n    value, refs = self._sanitize_column(value)\n    if key in self.columns and value.ndim == 1 and (not isinstance(value.dtype, ExtensionDtype)):\n        if not self.columns.is_unique or isinstance(self.columns, MultiIndex):\n            existing_piece = self[key]\n            if isinstance(existing_piece, DataFrame):\n                value = np.tile(value, (len(existing_piece.columns), 1)).T\n                refs = None\n    self._set_item_mgr(key, value, refs)",
    "docstring": "Add series to DataFrame in specified column. If series is a numpy-array (not a Series/TimeSeries), it must be the same length as the DataFrames index or an error will be thrown. Series/TimeSeries will be conformed to the DataFrames index to ensure homogeneity.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:_set_item arg:self arg:key arg:value arguments arg arg arg Assign Call If BoolOp Compare Compare Call If BoolOp Call Assign If Call Assign Call Call Assign Call"
  },
  {
    "library": "django",
    "name": "FetchFromCacheMiddleware",
    "source_code": "class FetchFromCacheMiddleware(MiddlewareMixin):\n\n    def __init__(self, get_response):\n        super().__init__(get_response)\n        self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX\n        self.cache_alias = settings.CACHE_MIDDLEWARE_ALIAS\n\n    @property\n    def cache(self):\n        return caches[self.cache_alias]\n\n    def process_request(self, request):\n        if request.method not in ('GET', 'HEAD'):\n            request._cache_update_cache = False\n            return None\n        cache_key = get_cache_key(request, self.key_prefix, 'GET', cache=self.cache)\n        if cache_key is None:\n            request._cache_update_cache = True\n            return None\n        response = self.cache.get(cache_key)\n        if response is None and request.method == 'HEAD':\n            cache_key = get_cache_key(request, self.key_prefix, 'HEAD', cache=self.cache)\n            response = self.cache.get(cache_key)\n        if response is None:\n            request._cache_update_cache = True\n            return None\n        if (max_age_seconds := get_max_age(response)) is not None and (expires_timestamp := parse_http_date_safe(response['Expires'])) is not None:\n            now_timestamp = int(time.time())\n            remaining_seconds = expires_timestamp - now_timestamp\n            response['Age'] = max(0, max_age_seconds - remaining_seconds)\n        request._cache_update_cache = False\n        return response",
    "docstring": "Request-phase cache middleware that fetches a page from the cache. Must be used as part of the two-part update/fetch cache middleware. FetchFromCacheMiddleware must be the last piece of middleware in MIDDLEWARE so that it'll get called last during the request phase.",
    "type": "class",
    "file_path": "django\\django\\middleware\\cache.py",
    "ast_data": "ClassDef name:FetchFromCacheMiddleware FunctionDef name:__init__ arg:self arg:get_response arguments arg arg Call Call Assign Assign FunctionDef name:cache arg:self arguments arg Return return:yes FunctionDef name:process_request arg:self arg:request arguments arg arg If Compare Assign Return return:no Assign Call If Compare Assign Return return:no Assign Call If BoolOp Compare Compare Assign Call Assign Call If Compare Assign Return return:no If BoolOp Compare Call Compare Call Assign Call Call Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "set_observation_type",
    "source_code": "def set_observation_type(self, observation_type: ObservationType) -> BackendPatternConfig:\n    self.observation_type = observation_type\n    return self",
    "docstring": "Set how observers should be inserted in the graph for this pattern. Observation type here refers to how observers (or quant-dequant ops) will be placed in the graph. This is used to produce the desired reference patterns understood by the backend. Weighted ops such as linear and conv require different observers (or quantization parameters passed to quantize ops in the reference model) for the input and the output. There are two observation types: (default): the output observer instance will be different from the input. This is the most common observation type. : the output observer instance will be the same as the input. This is useful for operators like . Note: This will be renamed in the near future, since we will soon insert QuantDeQuantStubs with observers (and fake quantizes) attached instead of observers themselves.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\backend_config.py",
    "ast_data": "FunctionDef name:set_observation_type arg:self arg:observation_type arguments arg arg Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "prefix_gen",
    "source_code": "def prefix_gen():\n    alphabet = ascii_uppercase\n    prefix = chr(ord(self.alias_prefix) + 1)\n    yield prefix\n    for n in count(1):\n        seq = alphabet[alphabet.index(prefix):] if prefix else alphabet\n        for s in product(seq, repeat=n):\n            yield ''.join(s)\n        prefix = None",
    "docstring": "Generate a sequence of characters in alphabetical order: -> 'A', 'B', 'C', ... When the alphabet is finished, the sequence will continue with the Cartesian product: -> 'AA', 'AB', 'AC', ...",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\query.py",
    "ast_data": "FunctionDef name:prefix_gen arguments Assign Assign Call Call For Call Assign Call For Call Call Assign"
  },
  {
    "library": "scrapy",
    "name": "LogCounterHandler",
    "source_code": "class LogCounterHandler(logging.Handler):\n\n    def __init__(self, crawler: Crawler, *args: Any, **kwargs: Any):\n        super().__init__(*args, **kwargs)\n        self.crawler: Crawler = crawler\n\n    def emit(self, record: logging.LogRecord) -> None:\n        sname = f'log_count/{record.levelname}'\n        assert self.crawler.stats\n        self.crawler.stats.inc_value(sname)",
    "docstring": "Record log levels count into a crawler stats",
    "type": "class",
    "file_path": "scrapy\\scrapy\\utils\\log.py",
    "ast_data": "ClassDef name:LogCounterHandler FunctionDef name:__init__ arg:self arg:crawler arguments arg arg arg arg Call Call FunctionDef name:emit arg:self arg:record arguments arg arg Assign Call"
  },
  {
    "library": "scikit-learn",
    "name": "_one_vs_one_coef",
    "source_code": "def _one_vs_one_coef(dual_coef, n_support, support_vectors):\n    n_class = dual_coef.shape[0] + 1\n    coef = []\n    sv_locs = np.cumsum(np.hstack([[0], n_support]))\n    for class1 in range(n_class):\n        sv1 = support_vectors[sv_locs[class1]:sv_locs[class1 + 1], :]\n        for class2 in range(class1 + 1, n_class):\n            sv2 = support_vectors[sv_locs[class2]:sv_locs[class2 + 1], :]\n            alpha1 = dual_coef[class2 - 1, sv_locs[class1]:sv_locs[class1 + 1]]\n            alpha2 = dual_coef[class1, sv_locs[class2]:sv_locs[class2 + 1]]\n            coef.append(safe_sparse_dot(alpha1, sv1) + safe_sparse_dot(alpha2, sv2))\n    return coef",
    "docstring": "Generate primal coefficients from dual coefficients for the one-vs-one multi class LibSVM in the case of a linear kernel.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\svm\\_base.py",
    "ast_data": "FunctionDef name:_one_vs_one_coef arg:dual_coef arg:n_support arg:support_vectors arguments arg arg arg Assign Assign Assign Call Call For Call Assign For Call Assign Assign Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "BackwardState",
    "source_code": "class BackwardState:\n    proxy: torch.fx.Proxy",
    "docstring": "BackwardState is used to pass Python hooks from the forwards pass into the backwards pass in Dynamo+Compiled Autograd. It is created by TorchDynamo and has special handling there. Dynamo will pass an empty BackwardState to the forwards, then populate members on it (via setattr) only after the forwards graph is finished. Later on, in CompileAutograd we will inline and add the needed guards on the BackwardState. BackwardState is identified and has special handling in AOTAutograd. During AOTAutograd: 1) BackwardState is an input to the forwards graph 2) It must only be used in the backwards 3) It will be empty in the forwards 4) In the forwards we add a wrapper to save it 5) In the backwards it becomes an input 6) There can only be one per graph BackwardState requires CompiledAutograd.",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\experimental\\_backward_state.py",
    "ast_data": "ClassDef name:BackwardState"
  },
  {
    "library": "django",
    "name": "r",
    "source_code": "def r(self):\n    value = self.data\n    if not isinstance(value, datetime):\n        default_timezone = get_default_timezone()\n        value = datetime.combine(value, time.min).replace(tzinfo=default_timezone)\n    elif is_naive(value):\n        value = make_aware(value, timezone=self.timezone)\n    return format_datetime_rfc5322(value)",
    "docstring": "RFC 5322 formatted date; e.g. 'Thu, 21 Dec 2000 16:01:07 +0200'",
    "type": "method",
    "file_path": "django\\django\\utils\\dateformat.py",
    "ast_data": "FunctionDef name:r arg:self arguments arg Assign If Call Assign Call Assign Call Call If Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__call__",
    "source_code": "def __call__(self, x):\n    return 0.0",
    "docstring": "Compute a regularization penalty from an input tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\regularizers.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:x arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_IterableInput",
    "source_code": "class _IterableInput(collections_abc.Iterable, distribute_types.DistributedDatasetInterface):\n\n    def __init__(self, input_workers):\n        assert isinstance(input_workers, InputWorkers)\n        self._input_workers = input_workers\n\n    def __iter__(self):\n        raise NotImplementedError('must be implemented in descendants')\n\n    def reduce(self, initial_state, reduce_fn):\n        iterator = iter(self)\n        optional_data = iterator.get_next_as_optional()\n\n        def cond(optional_data, state):\n            del state\n            return optional_data.has_value()\n\n        def loop_body(optional_data, state):\n            state = reduce_fn(state, optional_data.get_value())\n            optional_data = iterator.get_next_as_optional()\n            return (optional_data, state)\n        optional_data, final_state = while_loop.while_loop(cond, loop_body, [optional_data, initial_state], parallel_iterations=1, return_same_structure=True)\n        return final_state",
    "docstring": "Base class for iterable inputs for distribution strategies.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py",
    "ast_data": "ClassDef name:_IterableInput FunctionDef name:__init__ arg:self arg:input_workers arguments arg arg Call Assign FunctionDef name:__iter__ arg:self arguments arg Raise Call FunctionDef name:reduce arg:self arg:initial_state arg:reduce_fn arguments arg arg arg Assign Call Assign Call FunctionDef name:cond arg:optional_data arg:state arguments arg arg Return return:yes Call FunctionDef name:loop_body arg:optional_data arg:state arguments arg arg Assign Call Call Assign Call Return return:yes Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "reversed",
    "source_code": "def reversed(self, name=None):\n    raise NotImplementedError()",
    "docstring": "Return a reversed instance of the Colormap. .. note:: This function is not implemented for the base class. Parameters ---------- name : str, optional The name for the reversed colormap. If None, the name is set to ``. See Also -------- LinearSegmentedColormap.reversed ListedColormap.reversed",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:reversed arg:self arg:name arguments arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_validate_keep_input",
    "source_code": "def _validate_keep_input(keep_input, enqueue_many):\n    keep_input = ops.convert_to_tensor(keep_input)\n    if keep_input.shape.ndims is None:\n        raise ValueError('`keep_input` dimensions must be known at graph construction.')\n    if not enqueue_many and keep_input.shape.ndims == 1:\n        raise ValueError('`keep_input` cannot be a vector when `enqueue_many=False`.')\n    if keep_input.shape.ndims > 1:\n        raise ValueError('`keep_input` must be 0 or 1 dimensions.')\n    return keep_input",
    "docstring": "Validate argument to conditional batching functions.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\input.py",
    "ast_data": "FunctionDef name:_validate_keep_input arg:keep_input arg:enqueue_many arguments arg arg Assign Call If Compare Raise Call If BoolOp Compare Raise Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "django",
    "name": "OneToOneRel",
    "source_code": "class OneToOneRel(ManyToOneRel):\n\n    def __init__(self, field, to, field_name, related_name=None, related_query_name=None, limit_choices_to=None, parent_link=False, on_delete=None):\n        super().__init__(field, to, field_name, related_name=related_name, related_query_name=related_query_name, limit_choices_to=limit_choices_to, parent_link=parent_link, on_delete=on_delete)\n        self.multiple = False",
    "docstring": "Used by OneToOneField to store information about the relation. `` returns this class to provide access to the field flags for the reverse relation.",
    "type": "class",
    "file_path": "django\\django\\db\\models\\fields\\reverse_related.py",
    "ast_data": "ClassDef name:OneToOneRel FunctionDef name:__init__ arg:self arg:field arg:to arg:field_name arg:related_name arg:related_query_name arg:limit_choices_to arg:parent_link arg:on_delete arguments arg arg arg arg arg arg arg arg arg Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "GraphCompileReason",
    "source_code": "@dataclass\nclass GraphCompileReason:\n    reason: str\n    user_stack: list[traceback.FrameSummary]\n    graph_break: bool = True\n\n    def __post_init__(self):\n        if self.graph_break:\n            graph_break_reasons.append(self)",
    "docstring": "Stores why a given output graph was compiled; i.e. what caused the graph break.",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\output_graph.py",
    "ast_data": "ClassDef name:GraphCompileReason FunctionDef name:__post_init__ arg:self arguments arg If Call"
  },
  {
    "library": "pytorch",
    "name": "decide_loop_order_to_match",
    "source_code": "def decide_loop_order_to_match(self, other: 'MemoryDep') -> Optional[list[int]]:\n    assert self.num_vars == other.num_vars\n    if self.num_vars != len(self.index.free_symbols):\n        return None\n    if other.num_vars != len(other.index.free_symbols):\n        return None\n    if any((s == 0 or s == 1 for s in itertools.chain(self.size, other.size))):\n        return None\n    self_strides = V.graph.sizevars.stride_hints(self.index, self.var_names)\n    other_strides = V.graph.sizevars.stride_hints(other.index, other.var_names)\n    if len(OrderedSet(self_strides)) != len(self_strides) or len(OrderedSet(other_strides)) != len(other_strides):\n        log.debug('unable to decide loop order. self_dep=%s v.s. other_dep=%s, self_strides=%s v.s. other_strides=%s', self, other, self_strides, other_strides)\n        return None\n    if OrderedSet(self_strides) != OrderedSet(other_strides):\n        return None\n    stride_to_index = {s: i for i, s in enumerate(self_strides)}\n    order = [stride_to_index[s] for s in other_strides]\n    assert OrderedSet(order) == OrderedSet(range(0, self.num_vars))\n    return order",
    "docstring": "Can return None if not able to decide loop orders.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\dependencies.py",
    "ast_data": "FunctionDef name:decide_loop_order_to_match arg:self arg:other arguments arg arg Compare If Compare Call Return return:no If Compare Call Return return:no If Call BoolOp Compare Compare Call Return return:no Assign Call Assign Call If BoolOp Compare Call Call Call Compare Call Call Call Call Return return:no If Compare Call Call Return return:no Assign Call Assign Compare Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_path",
    "source_code": "def get_path(self):\n    raise NotImplementedError('Derived must override')",
    "docstring": "Return the path of this patch.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_path arg:self arguments arg Raise Call"
  },
  {
    "library": "pandas",
    "name": "step",
    "source_code": "@property\ndef step(self) -> int:\n    return self._range.step",
    "docstring": "The value of the parameter (`pandas.RangeIndex` if not supplied. >>> idx = pd.RangeIndex(1, 0) >>> idx.step 1",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\range.py",
    "ast_data": "FunctionDef name:step arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_type",
    "source_code": "def _get_type(value):\n    if isinstance(value, type_spec.TypeSpec):\n        return value.value_type()\n    else:\n        return type(value)",
    "docstring": "Returns the type of if it is a TypeSpec.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\readers.py",
    "ast_data": "FunctionDef name:_get_type arg:value arguments arg If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_compute_transformed_categories",
    "source_code": "def _compute_transformed_categories(self, i, remove_dropped=True):\n    cats = self.categories_[i]\n    if self._infrequent_enabled:\n        infreq_map = self._default_to_infrequent_mappings[i]\n        if infreq_map is not None:\n            frequent_mask = infreq_map < infreq_map.max()\n            infrequent_cat = 'infrequent_sklearn'\n            cats = np.concatenate((cats[frequent_mask], np.array([infrequent_cat], dtype=object)))\n    if remove_dropped:\n        cats = self._remove_dropped_categories(cats, i)\n    return cats",
    "docstring": "Compute the transformed categories used for column . 1. If there are infrequent categories, the category is named 'infrequent_sklearn'. 2. Dropped columns are removed when remove_dropped=True.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_encoders.py",
    "ast_data": "FunctionDef name:_compute_transformed_categories arg:self arg:i arg:remove_dropped arguments arg arg arg Assign If Assign If Compare Assign Compare Call Assign Assign Call Call If Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "ndtri",
    "source_code": "def ndtri(p, name='ndtri'):\n    with ops.name_scope(name, values=[p]):\n        p = ops.convert_to_tensor(p, name='p')\n        if p.dtype.as_numpy_dtype not in [np.float32, np.float64]:\n            raise TypeError('p.dtype=%s is not handled, see docstring for supported types.' % p.dtype)\n        return _ndtri(p)",
    "docstring": "The inverse of the CDF of the Normal distribution function. Returns x such that the area under the pdf from minus infinity to x is equal to p. A piece-wise rational approximation is done for the function. This is a port of the implementation in netlib. Args: p: of type , . name: Python string. A name for the operation (default=\"ndtri\"). Returns: x: with . Raises: TypeError: if is not floating-type.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\special_math.py",
    "ast_data": "FunctionDef name:ndtri arg:p arg:name arguments arg arg With Call Assign Call If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "check_permission",
    "source_code": "def check_permission(self, client, request):\n    raise NotImplementedError()",
    "docstring": "Checks whether the current client is allowed to be accessed, edited or deleted. Developers MUST implement it in subclass, e.g.:: def check_permission(self, client, request): return client.editable :return: boolean",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc7592\\endpoint.py",
    "ast_data": "FunctionDef name:check_permission arg:self arg:client arg:request arguments arg arg arg Raise Call"
  },
  {
    "library": "cryptography",
    "name": "rfc4514_string",
    "source_code": "def rfc4514_string(self, attr_name_overrides: _OidNameMap | None=None) -> str:\n    return ','.join((attr.rfc4514_string(attr_name_overrides) for attr in reversed(self._attributes)))",
    "docstring": "Format as RFC4514 Distinguished Name string. For example 'CN=foobar.com,O=Foo Corp,C=US' An X.509 name is a two-level structure: a list of sets of attributes. Each list element is separated by ',' and within each list element, set elements are separated by '+'. The latter is almost never used in real world certificates. According to RFC4514 section 2.1 the RDNSequence must be reversed when converting to string representation.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\x509\\name.py",
    "ast_data": "FunctionDef name:rfc4514_string arg:self arg:attr_name_overrides arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "path_to_bytes",
    "source_code": "def path_to_bytes(path):\n    if hasattr(path, '__fspath__'):\n        path = path.__fspath__()\n    return as_bytes(path)",
    "docstring": "Converts input which is a object to . Converts from any python constant representation of a object or to bytes. Args: path: An object that can be converted to path representation. Returns: A object. Usage: In case a simplified version of the path is needed from an object.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\compat.py",
    "ast_data": "FunctionDef name:path_to_bytes arg:path arguments arg If Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "disallow",
    "source_code": "def disallow(nodes: set[str]) -> Callable[[type[_T]], type[_T]]:\n\n    def disallowed(cls: type[_T]) -> type[_T]:\n        cls.unsupported_nodes = ()\n        for node in nodes:\n            new_method = _node_not_implemented(node)\n            name = f'visit_{node}'\n            cls.unsupported_nodes += (name,)\n            setattr(cls, name, new_method)\n        return cls\n    return disallowed",
    "docstring": "Decorator to disallow certain nodes from parsing. Raises a NotImplementedError instead. Returns ------- callable",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\computation\\expr.py",
    "ast_data": "FunctionDef name:disallow arg:nodes arguments arg FunctionDef name:disallowed arg:cls arguments arg Assign For Assign Call Assign Call Return return:yes Return return:yes"
  },
  {
    "library": "kornia",
    "name": "to",
    "source_code": "def to(self, device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None) -> 'FaceDetectorResult':\n    self._data = self._data.to(device=device, dtype=dtype)\n    return self",
    "docstring": "Like :func: method.",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\face_detection.py",
    "ast_data": "FunctionDef name:to arg:self arg:device arg:dtype arguments arg arg arg Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "write_element",
    "source_code": "def write_element(self, arr, mdtype=None):\n    if mdtype is None:\n        mdtype = NP_TO_MTYPES[arr.dtype.str[1:]]\n    if arr.dtype.byteorder == swapped_code:\n        arr = arr.byteswap().view(arr.dtype.newbyteorder())\n    byte_count = arr.size * arr.itemsize\n    if byte_count <= 4:\n        self.write_smalldata_element(arr, mdtype, byte_count)\n    else:\n        self.write_regular_element(arr, mdtype, byte_count)",
    "docstring": "write tag and data",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\matlab\\_mio5.py",
    "ast_data": "FunctionDef name:write_element arg:self arg:arr arg:mdtype arguments arg arg arg If Compare Assign If Compare Assign Call Call Call Assign If Compare Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "def fit(self, y):\n    y = column_or_1d(y, warn=True)\n    self.classes_ = _unique(y)\n    return self",
    "docstring": "Fit label encoder. Parameters ---------- y : array-like of shape (n_samples,) Target values. Returns ------- self : returns an instance of self. Fitted label encoder.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_label.py",
    "ast_data": "FunctionDef name:fit arg:self arg:y arguments arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_BatchDataset",
    "source_code": "class _BatchDataset(dataset_ops.UnaryDataset):\n\n    def __init__(self, input_dataset, batch_size, drop_remainder, name=None):\n        self._input_dataset = input_dataset\n        self._batch_size = ops.convert_to_tensor(batch_size, dtype=dtypes.int64, name='batch_size')\n        self._drop_remainder = ops.convert_to_tensor(drop_remainder, dtype=dtypes.bool, name='drop_remainder')\n        constant_drop_remainder = tensor_util.constant_value(self._drop_remainder)\n        if constant_drop_remainder:\n            constant_batch_size = tensor_util.constant_value(self._batch_size)\n            self._structure = nest.map_structure(lambda component_spec: component_spec._batch(constant_batch_size), input_dataset.element_spec)\n        else:\n            self._structure = nest.map_structure(lambda component_spec: component_spec._batch(None), input_dataset.element_spec)\n        self._name = name\n        variant_tensor = gen_dataset_ops.batch_dataset_v2(input_dataset._variant_tensor, batch_size=self._batch_size, drop_remainder=self._drop_remainder, **self._common_args)\n        super().__init__(input_dataset, variant_tensor)\n\n    @property\n    def element_spec(self):\n        return self._structure",
    "docstring": "A that batches contiguous elements from its input.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\batch_op.py",
    "ast_data": "ClassDef name:_BatchDataset FunctionDef name:__init__ arg:self arg:input_dataset arg:batch_size arg:drop_remainder arg:name arguments arg arg arg arg arg Assign Assign Call Assign Call Assign Call If Assign Call Assign Call arguments arg Call Assign Call arguments arg Call Assign Assign Call Call Call FunctionDef name:element_spec arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "add_push_null",
    "source_code": "def add_push_null(self, gen_fn, call_function_ex=False):\n    old_len = len(self._output)\n    if sys.version_info < (3, 13):\n        self.clear_tos()\n    gen_fn()\n    added_insts = self._output[old_len:]\n    del self._output[old_len:]\n    if call_function_ex:\n        self._output.extend(add_push_null_call_function_ex(added_insts))\n    else:\n        self._output.extend(add_push_null(added_insts))\n    if sys.version_info >= (3, 13):\n        self.clear_tos()",
    "docstring": "generates instructions via PyCodegen methods that push a single callable to the stack. pushes a NULL to the stack before or after the instructions generated by , depending on Python version. Will attempt to use the NULL push bit for instructions with such bits (LOAD_GLOBAL 3.11+, LOAD_ATTR 3.12+, LOAD_SUPER_ATTR).",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\codegen.py",
    "ast_data": "FunctionDef name:add_push_null arg:self arg:gen_fn arg:call_function_ex arguments arg arg arg Assign Call If Compare Call Call Assign If Call Call Call Call If Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "Aggregator",
    "source_code": "class Aggregator(object, metaclass=abc.ABCMeta):\n\n    def __init__(self, use_steps, num_samples=None, steps=None, batch_size=None):\n        self.use_steps = use_steps\n        self.num_samples = num_samples\n        self.steps = steps\n        self.batch_size = batch_size\n        self.results = []\n\n    @abc.abstractmethod\n    def create(self, batch_outs):\n        raise NotImplementedError('Must be implemented in subclasses.')\n\n    @abc.abstractmethod\n    def aggregate(self, batch_outs, batch_start=None, batch_end=None):\n        raise NotImplementedError('Must be implemented in subclasses.')\n\n    @abc.abstractmethod\n    def finalize(self):\n        raise NotImplementedError('Must be implemented in subclasses.')",
    "docstring": "Abstract base class used to aggregate batch-level outputs of a loop. Attributes: use_steps: Whether the loop is using or . num_samples: Total number of samples: . steps: Total number of steps. batch_size: Batch size. It is used for validation checks between inputs and outputs. results: What to return at the end of the aggregation loop.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py",
    "ast_data": "ClassDef name:Aggregator FunctionDef name:__init__ arg:self arg:use_steps arg:num_samples arg:steps arg:batch_size arguments arg arg arg arg arg Assign Assign Assign Assign Assign FunctionDef name:create arg:self arg:batch_outs arguments arg arg Raise Call FunctionDef name:aggregate arg:self arg:batch_outs arg:batch_start arg:batch_end arguments arg arg arg arg Raise Call FunctionDef name:finalize arg:self arguments arg Raise Call"
  },
  {
    "library": "kornia",
    "name": "_chroma_subsampling",
    "source_code": "def _chroma_subsampling(input_ycbcr: Tensor) -> tuple[Tensor, Tensor, Tensor]:\n    output_y: Tensor = input_ycbcr[:, 0]\n    output_cb: Tensor = input_ycbcr[:, 1]\n    output_cr: Tensor = input_ycbcr[:, 2]\n    output_cb = rescale(output_cb[:, None], factor=0.5, interpolation='bilinear', align_corners=False, antialias=True)\n    output_cr = rescale(output_cr[:, None], factor=0.5, interpolation='bilinear', align_corners=False, antialias=True)\n    return (output_y, output_cb[:, 0], output_cr[:, 0])",
    "docstring": "Implement chroma subsampling. Args: input_ycbcr (Tensor): YCbCr input tensor of the shape :math:. Returns: output_y (Tensor): Y component (not-subsampled), shape is :math:. output_cb (Tensor): Cb component (subsampled), shape is :math:. output_cr (Tensor): Cr component (subsampled), shape is :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\enhance\\jpeg.py",
    "ast_data": "FunctionDef name:_chroma_subsampling arg:input_ycbcr arguments arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "row_lengths",
    "source_code": "def row_lengths(self, axis=1, name=None):\n    if axis == 0:\n        return self._row_partition.nrows()\n    if axis == 1:\n        return self._row_partition.row_lengths()\n    with ops.name_scope(name, 'RaggedRowLengths', [self]):\n        axis = array_ops.get_positive_axis(axis, self.shape.rank, ndims_name='rank(self)')\n        if axis == 0:\n            return self.nrows()\n        elif axis == 1:\n            splits = self.row_splits\n            return splits[1:] - splits[:-1]\n        elif isinstance(self.values, RaggedTensor):\n            return self.with_values(self.values.row_lengths(axis - 1))\n        else:\n            shape = array_ops.shape(self.values, out_type=self._row_partition.dtype)\n            return self.with_values(array_ops.ones(shape[:axis - 1], self._row_partition.dtype) * shape[axis - 1])",
    "docstring": "Returns the lengths of the rows in this ragged tensor. indicates the number of values in the th row of . Args: axis: An integer constant indicating the axis whose row lengths should be returned. name: A name prefix for the returned tensor (optional). Returns: A potentially ragged integer Tensor with shape . Raises: ValueError: If is out of bounds. #### Example: >>> rt = tf.ragged.constant( ... [[[3, 1, 4], [1]], [], [[5, 9], [2]], [[6]], []]) >>> print(rt.row_lengths()) # lengths of rows in rt tf.Tensor([2 0 2 1 0], shape=(5,), dtype=int64) >>> print(rt.row_lengths(axis=2)) # lengths of axis=2 rows.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:row_lengths arg:self arg:axis arg:name arguments arg arg arg If Compare Return return:yes Call If Compare Return return:yes Call With Call Assign Call If Compare Return return:yes Call If Compare Assign Return return:yes If Call Return return:yes Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "initialize",
    "source_code": "def initialize(self, table):\n    raise NotImplementedError",
    "docstring": "Returns the table initialization op.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "FunctionDef name:initialize arg:self arg:table arguments arg arg Raise"
  },
  {
    "library": "pytorch",
    "name": "start",
    "source_code": "def start(self) -> None:\n    if self._thread:\n        raise RuntimeError('The timer has already started.')\n    self._thread = Thread(target=self._run, name=self._name or 'PeriodicTimer', args=(self._ctx,), daemon=True)\n    self._finalizer = weakref.finalize(self, self._stop_thread, self._thread, self._ctx.stop_event)\n    self._finalizer.atexit = False\n    self._thread.start()",
    "docstring": "Start the timer.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\utils.py",
    "ast_data": "FunctionDef name:start arg:self arguments arg If Raise Call Assign Call BoolOp Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, cluster_resolver=None, communication_options=None, *, mesh=None):\n    self._validate_init_args(mesh, cluster_resolver)\n    if not mesh:\n        if not cluster_resolver:\n            cluster_resolver = tfconfig_cluster_resolver.TFConfigClusterResolver()\n        dtensor_env_var = _parse_dtensor_env_var_from_cluster_resolver(cluster_resolver)\n        _config_dtensor_env_var(dtensor_env_var)\n        mesh = _build_distributed_mesh(dtensor_util.DEFAULT_BATCH_MESH_DIM_NAME)\n    extended = dtensor_strategy_extended.DTensorStrategyExtended(container_strategy=self, mesh=mesh)\n    super().__init__(extended)\n    self._mesh = mesh\n    self._cluster_resolver = cluster_resolver",
    "docstring": "Creates the strategy. Args: cluster_resolver: optional . In case neither nor are provided, is used. communication_options: currently ignore. mesh: optional Dtensor global mesh for the computation. Note that either or the should be provided. and not both.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\experimental\\multi_worker_mirrored_strategy.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:cluster_resolver arg:communication_options arguments arg arg arg arg Call If If Assign Call Assign Call Call Assign Call Assign Call Call Call Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "_get_default_qconfig_mapping_with_default_qconfig",
    "source_code": "def _get_default_qconfig_mapping_with_default_qconfig(is_qat: bool, backend: str, default_qconfig: QConfig) -> QConfigMapping:\n    if is_qat:\n        qconfig_mapping = get_default_qat_qconfig_mapping(backend)\n    else:\n        qconfig_mapping = get_default_qconfig_mapping(backend)\n    qconfig_mapping.set_global(default_qconfig)\n    for pattern in qconfig_mapping.object_type_qconfigs.keys():\n        if pattern not in _FIXED_QPARAMS_OP_TO_OBSERVER:\n            qconfig_mapping.set_object_type(pattern, default_qconfig)\n    return qconfig_mapping",
    "docstring": "Return a QConfigMapping that uses the provided qconfig as the default QConfig.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\qconfig_mapping.py",
    "ast_data": "FunctionDef name:_get_default_qconfig_mapping_with_default_qconfig arg:is_qat arg:backend arg:default_qconfig arguments arg arg arg If Assign Call Assign Call Call For Call If Compare Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_anncoords",
    "source_code": "def set_anncoords(self, coords):\n    self._textcoords = coords",
    "docstring": "Set the coordinate system to use for . See also *xycoords* in .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:set_anncoords arg:self arg:coords arguments arg arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "reset_memory_stats",
    "source_code": "def reset_memory_stats(self, dev):\n    self._initialize_physical_devices()\n    self.ensure_initialized()\n    pywrap_tfe.TFE_ResetMemoryStats(self._context_handle, dev)",
    "docstring": "Resets the tracked memory stats for the device.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:reset_memory_stats arg:self arg:dev arguments arg arg Call Call Call"
  },
  {
    "library": "django",
    "name": "get_from_clause",
    "source_code": "def get_from_clause(self):\n    result = []\n    params = []\n    for alias, from_clause in tuple(self.query.alias_map.items()):\n        if not self.query.alias_refcount[alias]:\n            continue\n        clause_sql, clause_params = self.compile(from_clause)\n        result.append(clause_sql)\n        params.extend(clause_params)\n    for t in self.query.extra_tables:\n        alias, _ = self.query.table_alias(t)\n        if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1:\n            result.append(', %s' % self.quote_name_unless_alias(alias))\n    return (result, params)",
    "docstring": "Return a list of strings that are joined together to go after the \"FROM\" part of the query, as well as a list any extra parameters that need to be included. Subclasses, can override this to create a from-clause via a \"select\". This should only be called after any SQL construction methods that might change the tables that are needed. This means the select columns, ordering, and distinct must be done first.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\compiler.py",
    "ast_data": "FunctionDef name:get_from_clause arg:self arguments arg Assign Assign For Call Call If Assign Call Call Call For Assign Call If BoolOp Compare Compare Call Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "_make_legend",
    "source_code": "def _make_legend(self, p: Plot) -> None:\n    merged_contents: dict[tuple[str, str | int], tuple[list[tuple[Artist, ...]], list[str]]] = {}\n    for key, new_artists, labels in self._legend_contents:\n        if key not in merged_contents:\n            new_artist_tuples = [tuple([a]) for a in new_artists]\n            merged_contents[key] = (new_artist_tuples, labels)\n        else:\n            existing_artists = merged_contents[key][0]\n            for i, new_artist in enumerate(new_artists):\n                existing_artists[i] += tuple([new_artist])\n    loc = 'center right' if self._pyplot else 'center left'\n    base_legend = None\n    for (name, _), (handles, labels) in merged_contents.items():\n        legend = mpl.legend.Legend(self._figure, handles, labels, title=name, loc=loc, bbox_to_anchor=(0.98, 0.55))\n        if base_legend:\n            base_legend_box = base_legend.get_children()[0]\n            this_legend_box = legend.get_children()[0]\n            base_legend_box.get_children().extend(this_legend_box.get_children())\n        else:\n            base_legend = legend\n            self._figure.legends.append(legend)",
    "docstring": "Create the legend artist(s) and add onto the figure.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\plot.py",
    "ast_data": "FunctionDef name:_make_legend arg:self arg:p arguments arg arg For If Compare Assign Call Assign Assign For Call Call Assign Assign For Call Assign Call If Assign Call Assign Call Call Call Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "compute_ancestors",
    "source_code": "def compute_ancestors(self) -> None:\n    name_to_ancestors: dict[str, OrderedSet[str]] = {}\n    for node in self.nodes:\n        ancestors = OrderedSet[str]()\n        for dep in node.unmet_dependencies:\n            dep_node_name = self.name_to_buf[dep.name].defining_op_name()\n            ancestors.add(dep_node_name)\n            ancestors |= name_to_ancestors[dep_node_name]\n        name_to_ancestors[node.get_name()] = ancestors\n        node.ancestors = ancestors\n    for order, node in enumerate(self.nodes):\n        node.min_order = order\n        node.max_order = order",
    "docstring": "Populate each node.ancestors",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:compute_ancestors arg:self arguments arg For Assign Call For Assign Call Call Assign Call Assign For Call Assign Assign"
  },
  {
    "library": "pandas",
    "name": "display_memory_usage",
    "source_code": "@property\ndef display_memory_usage(self) -> bool:\n    return bool(self.info.memory_usage)",
    "docstring": "Whether to display memory usage.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:display_memory_usage arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "max_pool2d",
    "source_code": "def max_pool2d(input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False, return_indices=False):\n    if return_indices:\n        raise NotImplementedError('return_indices is not yet implemented!')\n    if stride is None:\n        stride = torch.jit.annotate(list[int], [])\n    return torch.nn.functional.max_pool2d(input, kernel_size, stride, padding, dilation, ceil_mode=ceil_mode, return_indices=return_indices)",
    "docstring": "Applies a 2D max pooling over a quantized input signal composed of several quantized input planes. .. note:: The input quantization parameters are propagated to the output. See :class: for details.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\functional.py",
    "ast_data": "FunctionDef name:max_pool2d arg:input arg:kernel_size arg:stride arg:padding arg:dilation arg:ceil_mode arg:return_indices arguments arg arg arg arg arg arg arg If Raise Call If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_initialize_stream",
    "source_code": "def _initialize_stream(self):\n    self.file = io.BytesIO() if self._content_type == bytes else io.StringIO()",
    "docstring": "Initialize underlying stream according to the content type.",
    "type": "method",
    "file_path": "django\\django\\core\\files\\storage\\memory.py",
    "ast_data": "FunctionDef name:_initialize_stream arg:self arguments arg Assign Compare Call Call"
  },
  {
    "library": "tensorflow",
    "name": "BuiltinCodeToName",
    "source_code": "def BuiltinCodeToName(code):\n    for name, value in schema_fb.BuiltinOperator.__dict__.items():\n        if value == code:\n            return name\n    return None",
    "docstring": "Converts a builtin op code enum to a readable name.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\tools\\visualize.py",
    "ast_data": "FunctionDef name:BuiltinCodeToName arg:code arguments arg For Call If Compare Return return:yes Return return:no"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X):\n    check_is_fitted(self)\n    X = validate_data(self, X, accept_sparse=['csr', 'csc'], reset=False)\n    return safe_sparse_dot(X, self.components_.T)",
    "docstring": "Perform dimensionality reduction on X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) New data. Returns ------- X_new : ndarray of shape (n_samples, n_components) Reduced version of X. This will always be a dense array.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_truncated_svd.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "MergeDevice",
    "source_code": "class MergeDevice(object):\n    __slots__ = ['_spec']\n\n    def __init__(self, spec):\n        if isinstance(spec, device_spec.DeviceSpecV2):\n            self._spec = spec\n        elif isinstance(spec, device_spec.DeviceSpecV1):\n            self._spec = spec.__class__.from_string(spec.to_string())\n        else:\n            self._spec = DeviceSpec.from_string(spec)\n\n    def __call__(self, node_def):\n        current_device = DeviceSpec.from_string(node_def.device or '')\n        return self._spec.make_merged_spec(current_device)\n\n    def shortcut_string_merge(self, node_def):\n        device = node_def.device or ''\n        merge_key = (self._spec, device)\n        result = _string_merge_cache.get(merge_key)\n        if result is None:\n            result = self.__call__(node_def).to_string()\n            _string_merge_cache[merge_key] = result\n        return result\n\n    def __repr__(self):\n        return '{} (spec: {})'.format(super(MergeDevice, self).__repr__(), self._spec.to_string())\n\n    @property\n    def is_null_merge(self):\n        return not bool(self._spec.to_string())",
    "docstring": "Wraps a device specification (DeviceSpec or str) with merge functionality. When called, this class will merge a node_def with its own spec. It also exposes a method which can significantly improve performance of device placement.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\device.py",
    "ast_data": "ClassDef name:MergeDevice Assign FunctionDef name:__init__ arg:self arg:spec arguments arg arg If Call Assign If Call Assign Call Call Assign Call FunctionDef name:__call__ arg:self arg:node_def arguments arg arg Assign Call BoolOp Return return:yes Call FunctionDef name:shortcut_string_merge arg:self arg:node_def arguments arg arg Assign BoolOp Assign Assign Call If Compare Assign Call Call Assign Return return:yes FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call Call Call Call FunctionDef name:is_null_merge arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_alloc_pid",
    "source_code": "def _alloc_pid(self) -> int:\n    pid = self._next_pid\n    self._next_pid += 1\n    return pid",
    "docstring": "Allocate a process Id.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py",
    "ast_data": "FunctionDef name:_alloc_pid arg:self arguments arg Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "check_inplace_ensure_writeable",
    "source_code": "@ignore_warnings(category=FutureWarning)\ndef check_inplace_ensure_writeable(name, estimator_orig):\n    rng = np.random.RandomState(0)\n    estimator = clone(estimator_orig)\n    set_random_state(estimator)\n    n_samples = 100\n    X, _ = make_blobs(n_samples=n_samples, n_features=3, random_state=rng)\n    X = _enforce_estimator_tags_X(estimator, X)\n    if name in ('Lasso', 'ElasticNet', 'MultiTaskElasticNet', 'MultiTaskLasso'):\n        X = np.asfortranarray(X)\n    if hasattr(estimator, 'missing_values'):\n        X[0, 0] = np.nan\n    if is_regressor(estimator):\n        y = rng.normal(size=n_samples)\n    else:\n        y = rng.randint(low=0, high=2, size=n_samples)\n    y = _enforce_estimator_tags_y(estimator, y)\n    X_copy = X.copy()\n    X.setflags(write=False)\n    estimator.fit(X, y)\n    if hasattr(estimator, 'transform'):\n        estimator.transform(X)\n    assert not X.flags.writeable\n    assert_allclose(X, X_copy)",
    "docstring": "Check that estimators able to do inplace operations can work on read-only input data even if a copy is not explicitly requested by the user. Make sure that a copy is made and consequently that the input array and its writeability are not modified by the estimator.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\estimator_checks.py",
    "ast_data": "FunctionDef name:check_inplace_ensure_writeable arg:name arg:estimator_orig arguments arg arg Assign Call Assign Call Call Assign Assign Call Assign Call If Compare Assign Call If Call Assign If Call Assign Call Assign Call Assign Call Assign Call Call Call If Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "is_same_module_as",
    "source_code": "def is_same_module_as(self, node: _IRNode) -> bool:\n    return self.stack_meta == node.stack_meta",
    "docstring": "Determines if the provided node pertains to the same module as this node.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\modularization.py",
    "ast_data": "FunctionDef name:is_same_module_as arg:self arg:node arguments arg arg Return return:yes Compare"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "@compatibility(is_backward_compatible=True)\ndef __init__(self, graph: 'Graph', name: str, op: str, target: 'Target', args: tuple['Argument', ...], kwargs: dict[str, 'Argument'], return_type: Optional[Any]=None) -> None:\n    if op == 'call_function':\n        if not callable(target):\n            raise ValueError(f\"Node [graph = {graph}, name = '{name}'] target {target} has type {torch.typename(target)} but a Callable is expected\")\n    else:\n        assert op in _legal_ops\n        if not isinstance(target, str):\n            raise ValueError(f\"Node [graph = {graph}, name = '{name}'] target {target} has type {torch.typename(target)} but a str is expected\")\n    super().__init__(graph, name, op, target, return_type)\n    self._update_args_kwargs(args, kwargs)",
    "docstring": "Instantiate an instance of `` return_type (Optional[Any]): The python type expression representing the type of the output of this node. This field can be used for annotation of values in the generated code or for other types of analyses.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\node.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:graph arg:name arg:op arg:target arg:args arg:kwargs arg:return_type arguments arg arg arg arg arg arg arg arg If Compare If Call Raise Call Call Compare If Call Raise Call Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_mod_type",
    "source_code": "def _get_mod_type(fn: Callable) -> _ModificationType:\n    num_positional_args = sum((1 for param in inspect.signature(fn).parameters.values() if param.default == inspect.Parameter.empty))\n    assert num_positional_args == 5 or num_positional_args == 4\n    if num_positional_args == 5:\n        return _ModificationType.SCORE_MOD\n    elif num_positional_args == 4:\n        return _ModificationType.MASK_MOD\n    else:\n        return _ModificationType.UNKNOWN",
    "docstring": "Get the type of modification function. This function inspects the number of positional arguments of the function to determine the type of modification function. If the function has 5 positional arguments, it is considered as a score_mod function. If the function has 4 positional arguments, it is considered as a mask function.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\attention\\flex_attention.py",
    "ast_data": "FunctionDef name:_get_mod_type arg:fn arguments arg Assign Call Call Call Compare BoolOp Compare Compare If Compare Return return:yes If Compare Return return:yes Return return:yes"
  },
  {
    "library": "kornia",
    "name": "histogram",
    "source_code": "def histogram(x: Tensor, bins: Tensor, bandwidth: Tensor, epsilon: float=1e-10) -> Tensor:\n    pdf, _ = marginal_pdf(x.unsqueeze(2), bins, bandwidth, epsilon)\n    return pdf",
    "docstring": "Estimate the histogram of the input tensor. The calculation uses kernel density estimation which requires a bandwidth (smoothing) parameter. Args: x: Input tensor to compute the histogram with shape :math:. bins: The number of bins to use the histogram :math:. bandwidth: Gaussian smoothing factor with shape shape [1]. epsilon: A scalar, for numerical stability. Returns: Computed histogram of shape :math:. Examples: >>> x = torch.rand(1, 10) >>> bins = torch.torch.linspace(0, 255, 128) >>> hist = histogram(x, bins, bandwidth=torch.tensor(0.9)) >>> hist.shape torch.Size([1, 128])",
    "type": "function",
    "file_path": "kornia\\kornia\\enhance\\histogram.py",
    "ast_data": "FunctionDef name:histogram arg:x arg:bins arg:bandwidth arg:epsilon arguments arg arg arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "_add_new_csrf_cookie",
    "source_code": "def _add_new_csrf_cookie(request):\n    csrf_secret = _get_new_csrf_string()\n    request.META.update({'CSRF_COOKIE': csrf_secret, 'CSRF_COOKIE_NEEDS_UPDATE': True})\n    return csrf_secret",
    "docstring": "Generate a new random CSRF_COOKIE value, and add it to request.META.",
    "type": "function",
    "file_path": "django\\django\\middleware\\csrf.py",
    "ast_data": "FunctionDef name:_add_new_csrf_cookie arg:request arguments arg Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "forward",
    "source_code": "def forward(self, x, lengths):\n    for module in self.seq_module:\n        x = module(x)\n        mask = torch.BoolTensor(x.size()).fill_(0)\n        if x.is_cuda:\n            mask = mask.cuda()\n        for i, length in enumerate(lengths):\n            length = length.item()\n            if mask[i].size(2) - length > 0:\n                mask[i].narrow(2, length, mask[i].size(2) - length).fill_(1)\n        x = x.masked_fill(mask, 0)\n    return (x, lengths)",
    "docstring": ":param x: The input of size BxCxDxT :param lengths: The actual length of each sequence in the batch :return: Masked output from the module",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\functional_autograd_benchmark\\torchaudio_models.py",
    "ast_data": "FunctionDef name:forward arg:self arg:x arg:lengths arguments arg arg arg For Assign Call Assign Call Call Call If Assign Call For Call Assign Call If Compare Call Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "fresnels_zeros",
    "source_code": "def fresnels_zeros(nt):\n    if floor(nt) != nt or nt <= 0 or (not isscalar(nt)):\n        raise ValueError('Argument must be positive scalar integer.')\n    return _specfun.fcszo(2, nt)",
    "docstring": "Compute nt complex zeros of sine Fresnel integral S(z). Parameters ---------- nt : int Number of zeros to compute Returns ------- fresnels_zeros: ndarray Zeros of the sine Fresnel integral References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special Functions\", John Wiley and Sons, 1996.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_basic.py",
    "ast_data": "FunctionDef name:fresnels_zeros arg:nt arguments arg If BoolOp Compare Call Compare Call Raise Call Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "fetch_access_token",
    "source_code": "async def fetch_access_token(self, url, verifier=None, **kwargs):\n    if verifier:\n        self.auth.verifier = verifier\n    if not self.auth.verifier:\n        self.handle_error('missing_verifier', 'Missing \"verifier\" value')\n    token = await self._fetch_token(url, **kwargs)\n    self.auth.verifier = None\n    return token",
    "docstring": "Method for fetching an access token from the token endpoint. This is the final step in the OAuth 1 workflow. An access token is obtained using all previously obtained credentials, including the verifier from the authorization step. :param url: Access Token endpoint. :param verifier: A verifier string to prove authorization was granted. :param kwargs: Extra parameters to include for fetching access token. :return: A token dict.",
    "type": "method",
    "file_path": "authlib\\authlib\\integrations\\httpx_client\\oauth1_client.py",
    "ast_data": "AsyncFunctionDef name:fetch_access_token arg:self arg:url arg:verifier arguments arg arg arg arg If Assign If Call Assign Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "FixedWindowIndexer",
    "source_code": "class FixedWindowIndexer(BaseIndexer):\n\n    @Appender(get_window_bounds_doc)\n    def get_window_bounds(self, num_values: int=0, min_periods: int | None=None, center: bool | None=None, closed: str | None=None, step: int | None=None) -> tuple[np.ndarray, np.ndarray]:\n        if center or self.window_size == 0:\n            offset = (self.window_size - 1) // 2\n        else:\n            offset = 0\n        end = np.arange(1 + offset, num_values + 1 + offset, step, dtype='int64')\n        start = end - self.window_size\n        if closed in ['left', 'both']:\n            start -= 1\n        if closed in ['left', 'neither']:\n            end -= 1\n        end = np.clip(end, 0, num_values)\n        start = np.clip(start, 0, num_values)\n        return (start, end)",
    "docstring": "Creates window boundaries that are of fixed length.",
    "type": "class",
    "file_path": "pandas\\pandas\\core\\indexers\\objects.py",
    "ast_data": "ClassDef name:FixedWindowIndexer FunctionDef name:get_window_bounds arg:self arg:num_values arg:min_periods arg:center arg:closed arg:step arguments arg arg arg arg arg arg If BoolOp Compare Assign Assign Assign Call Assign If Compare If Compare Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "short",
    "source_code": "def short(self):\n    _warn_typed_storage_removal()\n    return self._to(torch.short)",
    "docstring": "Casts this storage to short type.",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:short arg:self arguments arg Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_compute_missing_values_in_feature_mask",
    "source_code": "def _compute_missing_values_in_feature_mask(self, X, estimator_name=None):\n    estimator_name = estimator_name or self.__class__.__name__\n    common_kwargs = dict(estimator_name=estimator_name, input_name='X')\n    if not self._support_missing_values(X):\n        assert_all_finite(X, **common_kwargs)\n        return None\n    with np.errstate(over='ignore'):\n        overall_sum = np.sum(X)\n    if not np.isfinite(overall_sum):\n        _assert_all_finite_element_wise(X, xp=np, allow_nan=True, **common_kwargs)\n    if not np.isnan(overall_sum):\n        return None\n    missing_values_in_feature_mask = _any_isnan_axis0(X)\n    return missing_values_in_feature_mask",
    "docstring": "Return boolean mask denoting if there are missing values for each feature. This method also ensures that X is finite. Parameter --------- X : array-like of shape (n_samples, n_features), dtype=DOUBLE Input data. estimator_name : str or None, default=None Name to use when raising an error. Defaults to the class name. Returns ------- missing_values_in_feature_mask : ndarray of shape (n_features,), or None Missing value mask. If missing values are not supported or there are no missing values, return None.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\tree\\_classes.py",
    "ast_data": "FunctionDef name:_compute_missing_values_in_feature_mask arg:self arg:X arg:estimator_name arguments arg arg arg Assign BoolOp Assign Call If Call Call Return return:no With Call Assign Call If Call Call If Call Return return:no Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "GetCurrentTimezoneNode",
    "source_code": "class GetCurrentTimezoneNode(Node):\n\n    def __init__(self, variable):\n        self.variable = variable\n\n    def render(self, context):\n        context[self.variable] = timezone.get_current_timezone_name()\n        return ''",
    "docstring": "Template node class used by ``.",
    "type": "class",
    "file_path": "django\\django\\templatetags\\tz.py",
    "ast_data": "ClassDef name:GetCurrentTimezoneNode FunctionDef name:__init__ arg:self arg:variable arguments arg arg Assign FunctionDef name:render arg:self arg:context arguments arg arg Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "warn_tensor_cycles",
    "source_code": "def warn_tensor_cycles():\n    logger.info('Watching Python reference cycles for CUDA Tensors.')\n\n    def write_and_log(html):\n        with NamedTemporaryFile('w', suffix='.html', delete=False) as f:\n            f.write(html)\n            logger.warning('Reference cycle includes a CUDA Tensor see visualization of cycle %s', f.name)\n    return observe_tensor_cycles(write_and_log)",
    "docstring": "Install a warning that reports whenever a cycle that is holding CUDA memory is observed. The warning produces an .html file that visualizes the cycle, and links it to the stack frame that allocted the CUDA tensor. Reference cycles are freed by the cycle collector rather than being cleaned up when the objects in the cycle first become unreachable. If a cycle points to a tensor, the CUDA memory for that tensor will not be freed until garbage collection runs. Accumulation of CUDA allocations can lead to out of memory errors (OOMs), as well as non-deterministic allocation behavior which is harder to debug.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\viz\\_cycles.py",
    "ast_data": "FunctionDef name:warn_tensor_cycles arguments Call FunctionDef name:write_and_log arg:html arguments arg With Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "StreamContext",
    "source_code": "class StreamContext:\n    cur_stream: Optional['torch.xpu.Stream']\n\n    def __init__(self, stream: Optional['torch.xpu.Stream']):\n        self.stream = stream\n        self.idx = _get_device_index(None, True)\n        if self.idx is None:\n            self.idx = -1\n\n    def __enter__(self):\n        cur_stream = self.stream\n        if cur_stream is None or self.idx == -1:\n            return\n        self.src_prev_stream = torch.xpu.current_stream(None)\n        if self.src_prev_stream.device != cur_stream.device:\n            with device(cur_stream.device):\n                self.dst_prev_stream = torch.xpu.current_stream(cur_stream.device)\n        torch.xpu.set_stream(cur_stream)\n\n    def __exit__(self, type: Any, value: Any, traceback: Any):\n        cur_stream = self.stream\n        if cur_stream is None or self.idx == -1:\n            return\n        if self.src_prev_stream.device != cur_stream.device:\n            torch.xpu.set_stream(self.dst_prev_stream)\n        torch.xpu.set_stream(self.src_prev_stream)",
    "docstring": "Context-manager that selects a given stream. All XPU kernels queued within its context will be enqueued on a selected stream. Args: Stream (Stream): selected stream. This manager is a no-op if it's ``. .. note:: Streams are per-device.",
    "type": "class",
    "file_path": "pytorch\\torch\\xpu\\__init__.py",
    "ast_data": "ClassDef name:StreamContext FunctionDef name:__init__ arg:self arg:stream arguments arg arg Assign Assign Call If Compare Assign FunctionDef name:__enter__ arg:self arguments arg Assign If BoolOp Compare Compare Return return:no Assign Call If Compare With Call Assign Call Call FunctionDef name:__exit__ arg:self arg:type arg:value arg:traceback arguments arg arg arg arg Assign If BoolOp Compare Compare Return return:no If Compare Call Call"
  },
  {
    "library": "tensorflow",
    "name": "DatasetV1Adapter",
    "source_code": "class DatasetV1Adapter(DatasetV1):\n\n    def __init__(self, dataset: DatasetV2):\n        self._dataset = dataset\n        super(DatasetV1Adapter, self).__init__()\n\n    def _as_variant_tensor(self):\n        return self._dataset._variant_tensor\n\n    def _inputs(self):\n        return self._dataset._inputs()\n\n    def _functions(self) -> list[StructuredFunctionWrapper]:\n        return self._dataset._functions()\n\n    def options(self):\n        return self._dataset.options()\n\n    @property\n    def element_spec(self):\n        return self._dataset.element_spec\n\n    def __iter__(self):\n        return iter(self._dataset)",
    "docstring": "Wraps a V2 object in the API.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "ClassDef name:DatasetV1Adapter FunctionDef name:__init__ arg:self arg:dataset arguments arg arg Assign Call Call FunctionDef name:_as_variant_tensor arg:self arguments arg Return return:yes FunctionDef name:_inputs arg:self arguments arg Return return:yes Call FunctionDef name:_functions arg:self arguments arg Return return:yes Call FunctionDef name:options arg:self arguments arg Return return:yes Call FunctionDef name:element_spec arg:self arguments arg Return return:yes FunctionDef name:__iter__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "take_while",
    "source_code": "def take_while(self, predicate, name=None) -> 'DatasetV2':\n    from tensorflow.python.data.ops import take_while_op\n    return take_while_op._take_while(self, predicate, name=name)",
    "docstring": "A transformation that stops dataset iteration based on a . >>> dataset = tf.data.Dataset.range(10) >>> dataset = dataset.take_while(lambda x: x >> [a.item() for a in dataset.as_numpy_iterator()] [0, 1, 2, 3, 4] Args: predicate: A function that maps a nested structure of tensors (having shapes and types defined by and ) to a scalar tensor. name: (Optional.) A name for the tf.data operation. Returns: A new with the transformation applied as described above.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:take_while arg:self arg:predicate arg:name arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "get_as_parameter",
    "source_code": "def get_as_parameter(self):\n    warnings.warn('\"get_as_parameter\" is deprecated. Use \"_as_parameter_\" instead', DeprecationWarning, stacklevel=2)\n    return self._as_parameter_",
    "docstring": "Deprecated getter for the property. .. deprecated:: 1.21",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\_internal.py",
    "ast_data": "FunctionDef name:get_as_parameter arg:self arguments arg Call Return return:yes"
  },
  {
    "library": "django",
    "name": "cut",
    "source_code": "@register.filter\n@stringfilter\ndef cut(value, arg):\n    safe = isinstance(value, SafeData)\n    value = value.replace(arg, '')\n    if safe and arg != ';':\n        return mark_safe(value)\n    return value",
    "docstring": "Remove all values of arg from the given string.",
    "type": "function",
    "file_path": "django\\django\\template\\defaultfilters.py",
    "ast_data": "FunctionDef name:cut arg:value arg:arg arguments arg arg Assign Call Assign Call If BoolOp Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_largest_integer_by_dtype",
    "source_code": "def _largest_integer_by_dtype(dt):\n    if not _is_known_dtype(dt):\n        raise TypeError('Unrecognized dtype: {}'.format(dt.name))\n    if dt.is_floating:\n        return int(2 ** (np.finfo(dt.as_numpy_dtype).nmant + 1))\n    if dt.is_integer:\n        return np.iinfo(dt.as_numpy_dtype).max\n    if dt.base_dtype == dtypes.bool:\n        return int(1)\n    raise TypeError('Unrecognized dtype: {}'.format(dt.name))",
    "docstring": "Helper returning the largest integer exactly representable by dtype.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\util.py",
    "ast_data": "FunctionDef name:_largest_integer_by_dtype arg:dt arguments arg If Call Raise Call Call If Return return:yes Call Call If Return return:yes Call If Compare Return return:yes Call Raise Call Call"
  },
  {
    "library": "sphinx",
    "name": "Figure",
    "source_code": "class Figure(images.Figure):\n\n    def run(self) -> list[Node]:\n        name = self.options.pop('name', None)\n        result = super().run()\n        if len(result) == 2 or isinstance(result[0], nodes.system_message):\n            return result\n        assert len(result) == 1\n        figure_node = cast('nodes.figure', result[0])\n        if name:\n            self.options['name'] = name\n            self.add_name(figure_node)\n        if figure_node.line is None and len(figure_node) == 2:\n            caption = cast('nodes.caption', figure_node[1])\n            figure_node.line = caption.line\n        return [figure_node]",
    "docstring": "The figure directive which applies option to the figure node instead of the image node.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\directives\\patches.py",
    "ast_data": "ClassDef name:Figure FunctionDef name:run arg:self arguments arg Assign Call Assign Call Call If BoolOp Compare Call Call Return return:yes Compare Call Assign Call If Assign Call If BoolOp Compare Compare Call Assign Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "geo_db_type",
    "source_code": "def geo_db_type(self, f):\n    return None",
    "docstring": "Return None because geometry columns are added via the stored procedure on SpatiaLite.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\spatialite\\operations.py",
    "ast_data": "FunctionDef name:geo_db_type arg:self arg:f arguments arg arg Return return:no"
  },
  {
    "library": "matplotlib",
    "name": "set_multilinebaseline",
    "source_code": "def set_multilinebaseline(self, t):\n    self._multilinebaseline = t\n    self.stale = True",
    "docstring": "Set multilinebaseline. If True, the baseline for multiline text is adjusted so that it is (approximately) center-aligned with single-line text. This is used e.g. by the legend implementation so that single-line labels are baseline-aligned, but multiline labels are \"center\"-aligned with them.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py",
    "ast_data": "FunctionDef name:set_multilinebaseline arg:self arg:t arguments arg arg Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "with_max_depth",
    "source_code": "def with_max_depth(self, max_depth):\n    self._options['max_depth'] = max_depth\n    return self",
    "docstring": "Set the maximum depth of display. The depth depends on profiling view. For 'scope' view, it's the depth of name scope hierarchy (tree), for 'op' view, it's the number of operation types (list), etc. Args: max_depth: Maximum depth of the data structure to display. Returns: self",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\option_builder.py",
    "ast_data": "FunctionDef name:with_max_depth arg:self arg:max_depth arguments arg arg Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "Schaffer01",
    "source_code": "class Schaffer01(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-100.0] * self.N, [100.0] * self.N))\n        self.custom_bounds = [(-10, 10), (-10, 10)]\n        self.global_optimum = [[0.0 for _ in range(self.N)]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        u = x[0] ** 2 + x[1] ** 2\n        num = sin(u) ** 2 - 0.5\n        den = (1 + 0.001 * u) ** 2\n        return 0.5 + num / den",
    "docstring": "Schaffer 1 objective function. This class defines the Schaffer 1 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Schaffer01}}(x) = 0.5 + \\frac{\\sin^2 (x_1^2 + x_2^2)^2 - 0.5} {1 + 0.001(x_1^2 + x_2^2)^2} with :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Mishra, S. Some new test functions for global optimization and performance of repulsive particle swarm method. Munich Personal RePEc Archive, 2006, 2718",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_S.py",
    "ast_data": "ClassDef name:Schaffer01 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "state_size",
    "source_code": "@property\ndef state_size(self):\n    raise NotImplementedError('Abstract method')",
    "docstring": "size(s) of state(s) used by this cell. It can be represented by an Integer, a TensorShape or a tuple of Integers or TensorShapes.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\recurrent.py",
    "ast_data": "FunctionDef name:state_size arg:self arguments arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_MultiDeviceReplicator",
    "source_code": "class _MultiDeviceReplicator:\n\n    def __init__(self, master_tensor: torch.Tensor) -> None:\n        self.master = master_tensor\n        self._per_device_tensors: dict[torch.device, torch.Tensor] = {}\n\n    def get(self, device: torch.device) -> torch.Tensor:\n        retval = self._per_device_tensors.get(device, None)\n        if retval is None:\n            retval = self.master.to(device=device, non_blocking=True, copy=True)\n            self._per_device_tensors[device] = retval\n        return retval",
    "docstring": "Lazily serves copies of a tensor to requested devices. Copies are cached per-device.",
    "type": "class",
    "file_path": "pytorch\\torch\\amp\\grad_scaler.py",
    "ast_data": "ClassDef name:_MultiDeviceReplicator FunctionDef name:__init__ arg:self arg:master_tensor arguments arg arg Assign FunctionDef name:get arg:self arg:device arguments arg arg Assign Call If Compare Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "number_of_shards",
    "source_code": "@property\ndef number_of_shards(self):\n    return self._sharding_policies[0].number_of_shards",
    "docstring": "Gets the number of shards to use for the InfeedQueue. Returns: Number of shards or None if the number of shards has not been set.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_feed.py",
    "ast_data": "FunctionDef name:number_of_shards arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_log",
    "source_code": "def _log(self, message):\n    self._log_messages.append(message)\n    print(message)",
    "docstring": "Log and print authoring warning / error message.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\authoring\\authoring.py",
    "ast_data": "FunctionDef name:_log arg:self arg:message arguments arg arg Call Call"
  },
  {
    "library": "pygame",
    "name": "visible",
    "source_code": "@property\ndef visible(self):\n    return self._get_visible()",
    "docstring": "You can make this sprite disappear without removing it from the group assign 0 for invisible and 1 for visible",
    "type": "method",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:visible arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "get_values",
    "source_code": "@final\ndef get_values(self, dtype: DtypeObj | None=None) -> np.ndarray:\n    values: ArrayLike = self.values\n    if dtype == _dtype_obj:\n        values = values.astype(object)\n    return np.asarray(values).reshape(self.shape)",
    "docstring": "return object dtype as boxed values, such as Timestamps/Timedelta",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\blocks.py",
    "ast_data": "FunctionDef name:get_values arg:self arg:dtype arguments arg arg If Compare Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "CheckpointInitialValueCallable",
    "source_code": "@tf_export('__internal__.tracking.CheckpointInitialValueCallable', v1=[])\nclass CheckpointInitialValueCallable(object):\n\n    def __init__(self, checkpoint_position):\n        self._checkpoint_position = checkpoint_position\n\n    @property\n    def checkpoint_position(self):\n        return self._checkpoint_position\n\n    def __call__(self, shape=None, dtype=None, shard_info=None):\n        return CheckpointInitialValue(self._checkpoint_position, shape, shard_info=shard_info)\n\n    @property\n    def restore_uid(self):\n        return self._checkpoint_position.restore_uid",
    "docstring": "A callable object that returns a CheckpointInitialValue. See CheckpointInitialValue for more information.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\base.py",
    "ast_data": "ClassDef name:CheckpointInitialValueCallable FunctionDef name:__init__ arg:self arg:checkpoint_position arguments arg arg Assign FunctionDef name:checkpoint_position arg:self arguments arg Return return:yes FunctionDef name:__call__ arg:self arg:shape arg:dtype arg:shard_info arguments arg arg arg arg Return return:yes Call FunctionDef name:restore_uid arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_weights_not_none",
    "source_code": "@property\ndef _weights_not_none(self):\n    if self.weights is None:\n        return None\n    return [w for est, w in zip(self.estimators, self.weights) if est[1] != 'drop']",
    "docstring": "Get the weights of not estimators.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_voting.py",
    "ast_data": "FunctionDef name:_weights_not_none arg:self arguments arg If Compare Return return:no Return return:yes Call Compare"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, center_longitude, center_latitude, resolution):\n    _GeoTransform.__init__(self, resolution)\n    self._center_longitude = center_longitude\n    self._center_latitude = center_latitude",
    "docstring": "Create a new Lambert transform. Resolution is the number of steps to interpolate between each input line segment to approximate its path in curved Lambert space.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\geo.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:center_longitude arg:center_latitude arg:resolution arguments arg arg arg arg Call Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "_set_node_metadata_hook",
    "source_code": "@contextlib.contextmanager\ndef _set_node_metadata_hook(gm: torch.fx.GraphModule, f):\n    assert callable(f), 'node_metadata_hook must be a callable.'\n    for m in gm.modules():\n        if isinstance(m, GraphModule):\n            m._register_create_node_hook(f)\n    try:\n        yield\n    finally:\n        for m in gm.modules():\n            if isinstance(m, GraphModule):\n                m._unregister_create_node_hook(f)",
    "docstring": "Takes a callable which will be called after we create a new node. The callable takes the newly created node as input and returns None.",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\passes\\_node_metadata_hook.py",
    "ast_data": "FunctionDef name:_set_node_metadata_hook arg:gm arg:f arguments arg arg Call For Call If Call Call Try For Call If Call Call"
  },
  {
    "library": "pytorch",
    "name": "_force_shutdown",
    "source_code": "def _force_shutdown(self, verbose: bool=False) -> None:\n    for job in self._active_jobs:\n        job.proc.interrupt()\n    if verbose and self._currently_processed is not None:\n        print(textwrap.dedent(f'\\n                Failed when processing the following Job:\\n                  Label:      {self._currently_processed.label}\\n                  AutoLabels: {self._currently_processed.autolabels}\\n                  Source cmd: {self._currently_processed.source_cmd}\\n            ').strip() + '\\n')\n    if self._active_jobs:\n        time.sleep(0.5)\n    remaining_jobs = [j for j in self._active_jobs if j.proc.poll() is None]\n    if remaining_jobs:\n        print(f'SIGINT sent to {len(self._active_jobs)} jobs, {len(remaining_jobs)} have not yet exited.\\nEntering short cleanup loop, after which stragglers will be forcibly terminated.')\n        for _ in range(5):\n            time.sleep(2.0)\n            remaining_jobs = [j for j in remaining_jobs if j.proc.poll() is None]\n            if remaining_jobs:\n                print(f'{len(remaining_jobs)} still remain.')\n            else:\n                print('All remaining jobs have gracefully terminated.')\n                return\n        print(f'{len(remaining_jobs)} jobs refused to exit. Forcibly terminating.')\n        for j in remaining_jobs:\n            j.proc.terminate()",
    "docstring": "Try to interrupt jobs, and kill if need be. We would prefer to softly terminate jobs so that they have a chance to clean up before shutting down.",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\instruction_counts\\execution\\runner.py",
    "ast_data": "FunctionDef name:_force_shutdown arg:self arg:verbose arguments arg arg For Call If BoolOp Compare Call Call Call If Call Assign Compare Call If Call Call Call For Call Call Assign Compare Call If Call Call Call Return return:no Call Call For Call"
  },
  {
    "library": "kornia",
    "name": "adjust_saturation_with_gray_subtraction",
    "source_code": "def adjust_saturation_with_gray_subtraction(image: Tensor, factor: Union[float, Tensor]) -> Tensor:\n    KORNIA_CHECK_IS_TENSOR(image, 'Expected shape (*, H, W)')\n    KORNIA_CHECK(isinstance(factor, (float, Tensor)), 'Factor should be float or Tensor.')\n    KORNIA_CHECK_IS_COLOR_OR_GRAY(image, 'Image should be an RGB or gray image')\n    if image.shape[-3] == 1:\n        return image\n    if isinstance(factor, float):\n        factor = torch.as_tensor(factor, device=image.device, dtype=image.dtype)\n    elif isinstance(factor, Tensor):\n        factor = factor.to(image.device, image.dtype)\n    while len(factor.shape) != len(image.shape):\n        factor = factor[..., None]\n    x_other: Tensor = rgb_to_grayscale(image)\n    x_adjusted: Tensor = (1 - factor) * x_other + factor * image\n    out: Tensor = torch.clamp(x_adjusted, 0.0, 1.0)\n    return out",
    "docstring": "Adjust color saturation of an image by blending the image with its grayscaled version. The image is expected to be an RGB image or a gray image in the range of [0, 1]. If it is an RGB image, returns blending of the image with its grayscaled version. If it is a gray image, returns the image. .. note:: this is just a convenience function to have compatibility with Pil Args: image: Image/Tensor to be adjusted in the shape of :math:. factor: How much to adjust the saturation. 0 will give a black and white image, 1 will give the original image while 2 will enhance the saturation by a factor of 2. Return: Adjusted image in the shape of :math:. Example: >>> x = torch.ones(1, 3, 3, 3) >>> adjust_saturation_with_gray_subtraction(x, 2.).shape torch.Size([1, 3, 3, 3]) >>> x = torch.ones(2, 3, 3, 3) >>> y = torch.tensor([1., 2.]) >>> adjust_saturation_with_gray_subtraction(x, y).shape torch.Size([2, 3, 3, 3])",
    "type": "function",
    "file_path": "kornia\\kornia\\enhance\\adjust.py",
    "ast_data": "FunctionDef name:adjust_saturation_with_gray_subtraction arg:image arg:factor arguments arg arg Call Call Call Call If Compare Return return:yes If Call Assign Call If Call Assign Call While Compare Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "revoke_access_token",
    "source_code": "def revoke_access_token(self, token, request):\n    raise NotImplementedError()",
    "docstring": "Revoke a token access in case an invalid client has been requested. Developers MUST implement this method in subclass:: def revoke_access_token(self, token, request): token.revoked = True token.save()",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc7592\\endpoint.py",
    "ast_data": "FunctionDef name:revoke_access_token arg:self arg:token arg:request arguments arg arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_register_create_node_hook",
    "source_code": "def _register_create_node_hook(self, f):\n    assert callable(f), 'create_node hook must be a callable.'\n    self._create_node_hooks.append(f)",
    "docstring": "Takes a callable which will be called after we create a new node. The callable takes the newly created node as input and returns None.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\graph_module.py",
    "ast_data": "FunctionDef name:_register_create_node_hook arg:self arg:f arguments arg arg Call Call"
  },
  {
    "library": "django",
    "name": "execute_wrapper",
    "source_code": "@contextmanager\ndef execute_wrapper(self, wrapper):\n    self.execute_wrappers.append(wrapper)\n    try:\n        yield\n    finally:\n        self.execute_wrappers.pop()",
    "docstring": "Return a context manager under which the wrapper is applied to suitable database query executions.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\base.py",
    "ast_data": "FunctionDef name:execute_wrapper arg:self arg:wrapper arguments arg arg Call Try Call"
  },
  {
    "library": "scipy",
    "name": "find_intersection",
    "source_code": "def find_intersection(x, tr_bounds, lb, ub):\n    lb_centered = lb - x\n    ub_centered = ub - x\n    lb_total = np.maximum(lb_centered, -tr_bounds)\n    ub_total = np.minimum(ub_centered, tr_bounds)\n    orig_l = np.equal(lb_total, lb_centered)\n    orig_u = np.equal(ub_total, ub_centered)\n    tr_l = np.equal(lb_total, -tr_bounds)\n    tr_u = np.equal(ub_total, tr_bounds)\n    return (lb_total, ub_total, orig_l, orig_u, tr_l, tr_u)",
    "docstring": "Find intersection of trust-region bounds and initial bounds. Returns ------- lb_total, ub_total : ndarray with shape of x Lower and upper bounds of the intersection region. orig_l, orig_u : ndarray of bool with shape of x True means that an original bound is taken as a corresponding bound in the intersection region. tr_l, tr_u : ndarray of bool with shape of x True means that a trust-region bound is taken as a corresponding bound in the intersection region.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_lsq\\dogbox.py",
    "ast_data": "FunctionDef name:find_intersection arg:x arg:tr_bounds arg:lb arg:ub arguments arg arg arg arg Assign Assign Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "sample_y",
    "source_code": "def sample_y(self, X, n_samples=1, random_state=0):\n    rng = check_random_state(random_state)\n    y_mean, y_cov = self.predict(X, return_cov=True)\n    if y_mean.ndim == 1:\n        y_samples = rng.multivariate_normal(y_mean, y_cov, n_samples).T\n    else:\n        y_samples = [rng.multivariate_normal(y_mean[:, target], y_cov[..., target], n_samples).T[:, np.newaxis] for target in range(y_mean.shape[1])]\n        y_samples = np.hstack(y_samples)\n    return y_samples",
    "docstring": "Draw samples from Gaussian process and evaluate at X. Parameters ---------- X : array-like of shape (n_samples_X, n_features) or list of object Query points where the GP is evaluated. n_samples : int, default=1 Number of samples drawn from the Gaussian process per query point. random_state : int, RandomState instance or None, default=0 Determines random number generation to randomly draw samples. Pass an int for reproducible results across multiple function calls. See :term:. Returns ------- y_samples : ndarray of shape (n_samples_X, n_samples), or (n_samples_X, n_targets, n_samples) Values of n_samples samples drawn from Gaussian process and evaluated at query points.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\_gpr.py",
    "ast_data": "FunctionDef name:sample_y arg:self arg:X arg:n_samples arg:random_state arguments arg arg arg arg Assign Call Assign Call If Compare Assign Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "_pair_square_euclidean",
    "source_code": "def _pair_square_euclidean(tensor1: torch.Tensor, tensor2: torch.Tensor) -> torch.Tensor:\n    t1_sq: torch.Tensor = tensor1.mul(tensor1).sum(dim=-1, keepdim=True)\n    t2_sq: torch.Tensor = tensor2.mul(tensor2).sum(dim=-1, keepdim=True).transpose(1, 2)\n    t1_t2: torch.Tensor = tensor1.matmul(tensor2.transpose(1, 2))\n    square_dist: torch.Tensor = -2 * t1_t2 + t1_sq + t2_sq\n    square_dist = square_dist.clamp(min=0)\n    return square_dist",
    "docstring": "Compute the pairwise squared euclidean distance matrices :math: between two tensors. Tensors with shapes (B, N, C) and (B, M, C).",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\transform\\thin_plate_spline.py",
    "ast_data": "FunctionDef name:_pair_square_euclidean arg:tensor1 arg:tensor2 arguments arg arg Call Call Call Call Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "combine_expression",
    "source_code": "def combine_expression(self, connector, sub_expressions):\n    conn = ' %s ' % connector\n    return conn.join(sub_expressions)",
    "docstring": "Combine a list of subexpressions into a single expression, using the provided connecting operator. This is required because operators can vary between backends (e.g., Oracle with %% and &) and between subexpression types (e.g., date expressions).",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:combine_expression arg:self arg:connector arg:sub_expressions arguments arg arg arg Assign Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "ResourceOwnerPasswordCredentialsGrant",
    "source_code": "class ResourceOwnerPasswordCredentialsGrant(BaseGrant, TokenEndpointMixin):\n    GRANT_TYPE = 'password'\n\n    def validate_token_request(self):\n        client = self.authenticate_token_endpoint_client()\n        log.debug('Validate token request of %r', client)\n        if not client.check_grant_type(self.GRANT_TYPE):\n            raise UnauthorizedClientError(f\"The client is not authorized to use 'grant_type={self.GRANT_TYPE}'\")\n        params = self.request.form\n        if 'username' not in params:\n            raise InvalidRequestError(\"Missing 'username' in request.\")\n        if 'password' not in params:\n            raise InvalidRequestError(\"Missing 'password' in request.\")\n        log.debug('Authenticate user of %r', params['username'])\n        user = self.authenticate_user(params['username'], params['password'])\n        if not user:\n            raise InvalidRequestError(\"Invalid 'username' or 'password' in request.\")\n        self.request.client = client\n        self.request.user = user\n        self.validate_requested_scope()\n\n    @hooked\n    def create_token_response(self):\n        user = self.request.user\n        scope = self.request.payload.scope\n        token = self.generate_token(user=user, scope=scope)\n        log.debug('Issue token %r to %r', token, self.client)\n        self.save_token(token)\n        return (200, token, self.TOKEN_RESPONSE_HEADER)\n\n    def authenticate_user(self, username, password):\n        raise NotImplementedError()",
    "docstring": "The resource owner password credentials grant type is suitable in cases where the resource owner has a trust relationship with the client, such as the device operating system or a highly privileged. application. The authorization server should take special care when enabling this grant type and only allow it when other flows are not viable. This grant type is suitable for clients capable of obtaining the resource owner's credentials (username and password, typically using an interactive form). It is also used to migrate existing clients using direct authentication schemes such as HTTP Basic or Digest authentication to OAuth by converting the stored credentials to an access token:: +----------+ | Resource | | Owner | | | +----------+ v | Resource Owner (A) Password Credentials | v +---------+ +---------------+ | |>--(B)---- Resource Owner ------->| | | | Password Credentials | Authorization | | Client | | Server | | |<--(C)---- Access Token ---------<| | | | (w/ Optional Refresh Token) | | +---------+ +---------------+",
    "type": "class",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\grants\\resource_owner_password_credentials.py",
    "ast_data": "ClassDef name:ResourceOwnerPasswordCredentialsGrant Assign FunctionDef name:validate_token_request arg:self arguments arg Assign Call Call If Call Raise Call Assign If Compare Raise Call If Compare Raise Call Call Assign Call If Raise Call Assign Assign Call FunctionDef name:create_token_response arg:self arguments arg Assign Assign Assign Call Call Call Return return:yes FunctionDef name:authenticate_user arg:self arg:username arg:password arguments arg arg arg Raise Call"
  },
  {
    "library": "scipy",
    "name": "_select_by_property",
    "source_code": "def _select_by_property(peak_properties, pmin, pmax):\n    keep = np.ones(peak_properties.size, dtype=bool)\n    if pmin is not None:\n        keep &= pmin <= peak_properties\n    if pmax is not None:\n        keep &= peak_properties <= pmax\n    return keep",
    "docstring": "Evaluate where the generic property of peaks confirms to an interval. Parameters ---------- peak_properties : ndarray An array with properties for each peak. pmin : None or number or ndarray Lower interval boundary for . `peak_propertiespeak_properties` confirms to the interval. See Also -------- find_peaks Notes ----- .. versionadded:: 1.1.0",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_peak_finding.py",
    "ast_data": "FunctionDef name:_select_by_property arg:peak_properties arg:pmin arg:pmax arguments arg arg arg Assign Call If Compare Compare If Compare Compare Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, namespace):\n    self.namespace = namespace",
    "docstring": "Initialize a toolbox instance.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cptools.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:namespace arguments arg arg Assign"
  },
  {
    "library": "scipy",
    "name": "assignValue",
    "source_code": "def assignValue(self, value):\n    if not self.data.flags.writeable:\n        raise RuntimeError('variable is not writeable')\n    self.data[:] = value",
    "docstring": "Assign a scalar value to a of length one. Parameters ---------- value : scalar Scalar value (of compatible type) to assign to a length-one netcdf variable. This value will be written to file. Raises ------ ValueError If the input is not a scalar, or if the destination is not a length-one netcdf variable.",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\_netcdf.py",
    "ast_data": "FunctionDef name:assignValue arg:self arg:value arguments arg arg If Raise Call Assign"
  },
  {
    "library": "pytorch",
    "name": "index_select_inference_rule",
    "source_code": "@register_inference_rule('index_select')\ndef index_select_inference_rule(n: Node, symbols, constraints, counter):\n    assert isinstance(n.args[0], Node)\n    assert isinstance(n.args[1], int)\n    assert isinstance(n.args[2], Node)\n    index_select, counter = gen_tvar(counter)\n    symbols[n] = index_select\n    dims, counter = gen_tensor_dims(1, counter)\n    is_size_1 = BinConstraintT(symbols[n.args[2]], TensorType(dims), op_eq)\n    is_dyn = BinConstraintT(symbols[n.args[2]], Dyn, op_eq)\n    c2 = Conj([is_size_1, Disj([IndexSelect(i + 1, symbols[n.args[0]], dims[0], n.args[1], index_select) for i in range(MAX_TENSOR_RANK)])])\n    c3 = Conj([is_dyn, Disj([IndexSelect(i + 1, symbols[n.args[0]], Dyn, n.args[1], index_select) for i in range(MAX_TENSOR_RANK)])])\n    return ([Disj([c2, c3])], counter)",
    "docstring": "We constrain the second argument to a vector or Dyn. The output replaces the input with the shape of the vector at the position given by the index (first argument)",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_generator.py",
    "ast_data": "FunctionDef name:index_select_inference_rule arg:n arg:symbols arg:constraints arg:counter arguments arg arg arg arg Call Call Call Assign Call Assign Assign Call Assign Call Call Assign Call Assign Call Call Call Call Assign Call Call Call Call Return return:yes Call Call"
  },
  {
    "library": "cryptography",
    "name": "finalize",
    "source_code": "@abc.abstractmethod\ndef finalize(self) -> bytes:\n    pass",
    "docstring": "Finalizes the hash context and returns the hash digest as bytes.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\hashes.py",
    "ast_data": "FunctionDef name:finalize arg:self arguments arg"
  },
  {
    "library": "pandas",
    "name": "do_adjust_figure",
    "source_code": "def do_adjust_figure(fig: Figure) -> bool:\n    if not hasattr(fig, 'get_constrained_layout'):\n        return False\n    return not fig.get_constrained_layout()",
    "docstring": "Whether fig has constrained_layout enabled.",
    "type": "function",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\tools.py",
    "ast_data": "FunctionDef name:do_adjust_figure arg:fig arguments arg If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "lagcompanion",
    "source_code": "def lagcompanion(c):\n    [c] = pu.as_series([c])\n    if len(c) < 2:\n        raise ValueError('Series must have maximum degree of at least 1.')\n    if len(c) == 2:\n        return np.array([[1 + c[0] / c[1]]])\n    n = len(c) - 1\n    mat = np.zeros((n, n), dtype=c.dtype)\n    top = mat.reshape(-1)[1::n + 1]\n    mid = mat.reshape(-1)[0::n + 1]\n    bot = mat.reshape(-1)[n::n + 1]\n    top[...] = -np.arange(1, n)\n    mid[...] = 2.0 * np.arange(n) + 1.0\n    bot[...] = top\n    mat[:, -1] += c[:-1] / c[-1] * n\n    return mat",
    "docstring": "Return the companion matrix of c. The usual companion matrix of the Laguerre polynomials is already symmetric when is a basis Laguerre polynomial, so no scaling is applied. Parameters ---------- c : array_like 1-D array of Laguerre series coefficients ordered from low to high degree. Returns ------- mat : ndarray Companion matrix of dimensions (deg, deg). Examples -------- >>> from numpy.polynomial.laguerre import lagcompanion >>> lagcompanion([1, 2, 3]) array([[ 1. , -0.33333333], [-1. , 4.33333333]])",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\laguerre.py",
    "ast_data": "FunctionDef name:lagcompanion arg:c arguments arg Assign Call If Compare Call Raise Call If Compare Call Return return:yes Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "backward",
    "source_code": "@staticmethod\ndef backward(ctx: Any, *grad_outputs: Any) -> Any:\n    raise NotImplementedError('You must implement either the backward or vjp method for your custom autograd.Function to use it with backward mode AD.')",
    "docstring": "Define a formula for differentiating the operation with backward mode automatic differentiation. This function is to be overridden by all subclasses. (Defining this function is equivalent to defining the `ctxforwardforwardctx.needs_input_gradbackwardforward` needs gradient computed w.r.t. the output.",
    "type": "method",
    "file_path": "pytorch\\torch\\autograd\\function.py",
    "ast_data": "FunctionDef name:backward arg:ctx arguments arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "add_param_group",
    "source_code": "def add_param_group(self, param_group: dict[str, Any]) -> None:\n    if self.initialized and self._overlap_with_ddp:\n        raise RuntimeError('ZeroRedundancyOptimizer with `overlap_with_ddp=True` only supports a single parameter group')\n    super().add_param_group(param_group)\n    if self.initialized:\n        self._clear_cache()\n        param_groups = self._partition_parameters()[self.rank]\n        if len(param_groups) == len(self.optim.param_groups) + 1:\n            self.optim.add_param_group(param_groups[-1])\n        if self.parameters_as_bucket_view:\n            self._build_param_buckets()",
    "docstring": "Add a parameter group to the :class: 's `Optimizer` as training progresses. Arguments: param_group (dict): specifies the parameters to be optimized and group-specific optimization options. .. warning:: This method handles updating the shards on all partitions but needs to be called on all ranks. Calling this on a subset of the ranks will cause the training to hang because communication primitives are called depending on the managed parameters and expect all the ranks to participate on the same set of parameters.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\optim\\zero_redundancy_optimizer.py",
    "ast_data": "FunctionDef name:add_param_group arg:self arg:param_group arguments arg arg If BoolOp Raise Call Call Call If Call Assign Call If Compare Call Call Call If Call"
  },
  {
    "library": "tensorflow",
    "name": "verify_tensor_all_finite_v2",
    "source_code": "@tf_export('debugging.assert_all_finite', v1=[])\n@dispatch.add_dispatch_support\ndef verify_tensor_all_finite_v2(x, message, name=None):\n    with ops.name_scope(name, 'VerifyFinite', [x]) as name:\n        x = ops.convert_to_tensor(x, name='x')\n        with ops.colocate_with(x):\n            verify_input = array_ops.check_numerics(x, message=message)\n            out = control_flow_ops.with_dependencies([verify_input], x)\n    return out",
    "docstring": "Assert that the tensor does not contain any NaN's or Inf's. >>> @tf.function ... def f(x): ... x = tf.debugging.assert_all_finite(x, 'Input x must be all finite') ... return x + 1 >>> f(tf.constant([np.inf, 1, 2])) Traceback (most recent call last): ... InvalidArgumentError: ... Args: x: Tensor to check. message: Message to log on failure. name: A name for this operation (optional). Returns: Same tensor as .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numerics.py",
    "ast_data": "FunctionDef name:verify_tensor_all_finite_v2 arg:x arg:message arg:name arguments arg arg arg With Call Assign Call With Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "transform_content",
    "source_code": "def transform_content(self, content_node: addnodes.desc_content) -> None:\n    pass",
    "docstring": "Can be used to manipulate the content. Called after creating the content through nested parsing, but before the `` event is emitted, and before the info-fields are transformed.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\directives\\__init__.py",
    "ast_data": "FunctionDef name:transform_content arg:self arg:content_node arguments arg arg"
  },
  {
    "library": "pandas",
    "name": "fill_mi_header",
    "source_code": "def fill_mi_header(row: list[Hashable], control_row: list[bool]) -> tuple[list[Hashable], list[bool]]:\n    last = row[0]\n    for i in range(1, len(row)):\n        if not control_row[i]:\n            last = row[i]\n        if row[i] == '' or row[i] is None:\n            row[i] = last\n        else:\n            control_row[i] = False\n            last = row[i]\n    return (row, control_row)",
    "docstring": "Forward fill blank entries in row but only inside the same parent index. Used for creating headers in Multiindex. Parameters ---------- row : list List of items in a single row. control_row : list of bool Helps to determine if particular column is in same parent index as the previous value. Used to stop propagation of empty cells between different indexes. Returns ------- Returns changed row and control_row",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\excel\\_util.py",
    "ast_data": "FunctionDef name:fill_mi_header arg:row arg:control_row arguments arg arg Assign For Call Call If Assign If BoolOp Compare Compare Assign Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_include",
    "source_code": "@tf_export('sysconfig.get_include')\ndef get_include():\n    import tensorflow as tf\n    return _os_path.join(_os_path.dirname(tf.__file__), 'include')",
    "docstring": "Get the directory containing the TensorFlow C++ header files. Returns: The directory as string.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\platform\\sysconfig.py",
    "ast_data": "FunctionDef name:get_include arguments Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "make_all",
    "source_code": "def make_all(module_name, doc_string_modules=None):\n    if doc_string_modules is None:\n        doc_string_modules = [_sys.modules[module_name]]\n    cur_members = set((name for name, _ in _tf_inspect.getmembers(_sys.modules[module_name])))\n    results = set()\n    for doc_module in doc_string_modules:\n        results.update([m.group(1) for m in _reference_pattern.finditer(doc_module.__doc__) if m.group(1) in cur_members])\n    return list(results)",
    "docstring": "Generates from the docstring of one or more modules. Usage: or . The doc string modules must each a docstring, and will contain all symbols with references, where that symbol currently exists in the module named . Args: module_name: The name of the module (usually ). doc_string_modules: a list of modules from which to take docstring. If None, then a list containing only the module named is used. Returns: A list suitable for use as .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\all_util.py",
    "ast_data": "FunctionDef name:make_all arg:module_name arg:doc_string_modules arguments arg arg If Compare Assign Assign Call Call Assign Call For Call Call Call Compare Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "pointless_convert",
    "source_code": "@register_graph_pattern(CallFunction(torch.ops.prims.convert_element_type.default, CallFunction(torch.ops.prims.convert_element_type.default, KeywordArg('arg'), KeywordArg('dtype1')), KeywordArg('dtype2')), pass_dict=patterns)\ndef pointless_convert(match: Match, arg, dtype1: torch.dtype, dtype2: torch.dtype):\n    graph = match.graph\n    node = match.output_node()\n    allowed = (torch.float16, torch.bfloat16, torch.float32, torch.float64)\n    if dtype1 in allowed and dtype2 in allowed:\n        repl = graph.call_function(torch.ops.prims.convert_element_type.default, (arg, dtype2))\n        repl.meta.update(node.meta)\n        node.replace_all_uses_with(repl)\n        match.erase_nodes()",
    "docstring": "Remove chain of dtype conversions often created by AMP",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\joint_graph.py",
    "ast_data": "FunctionDef name:pointless_convert arg:match arg:arg arg:dtype1 arg:dtype2 arguments arg arg arg arg Assign Assign Call Assign If BoolOp Compare Compare Assign Call Call Call Call Call Call Call Call Call Call"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self):\n    self.events = {}\n    wspbus.Bus.__init__(self)",
    "docstring": "Initialize a Win32 bus implementation.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\win32.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg Assign Call"
  },
  {
    "library": "scipy",
    "name": "minimize_quadratic_1d",
    "source_code": "def minimize_quadratic_1d(a, b, lb, ub, c=0):\n    t = [lb, ub]\n    if a != 0:\n        extremum = -0.5 * b / a\n        if lb < extremum < ub:\n            t.append(extremum)\n    t = np.asarray(t)\n    y = t * (a * t + b) + c\n    min_index = np.argmin(y)\n    return (t[min_index], y[min_index])",
    "docstring": "Minimize a 1-D quadratic function subject to bounds. The free term is 0 by default. Bounds must be finite. Returns ------- t : float Minimum point. y : float Minimum value.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_lsq\\common.py",
    "ast_data": "FunctionDef name:minimize_quadratic_1d arg:a arg:b arg:lb arg:ub arg:c arguments arg arg arg arg arg Assign If Compare Assign If Compare Call Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "_make_hash_value",
    "source_code": "def _make_hash_value(self, user, timestamp):\n    login_timestamp = '' if user.last_login is None else user.last_login.replace(microsecond=0, tzinfo=None)\n    email_field = user.get_email_field_name()\n    email = getattr(user, email_field, '') or ''\n    return f'{user.pk}{user.password}{login_timestamp}{timestamp}{email}'",
    "docstring": "Hash the user's primary key, email (if available), and some user state that's sure to change after a password reset to produce a token that is invalidated when it's used: 1. The password field will change upon a password reset (even if the same password is chosen, due to password salting). 2. The last_login field will usually be updated very shortly after a password reset. Failing those things, settings.PASSWORD_RESET_TIMEOUT eventually invalidates the token. Running this data through salted_hmac() prevents password cracking attempts using the reset token, provided the secret isn't compromised.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\tokens.py",
    "ast_data": "FunctionDef name:_make_hash_value arg:self arg:user arg:timestamp arguments arg arg arg Assign Compare Call Assign Call Assign BoolOp Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_get_linear_ramps",
    "source_code": "def _get_linear_ramps(padded, axis, width_pair, end_value_pair):\n    edge_pair = _get_edges(padded, axis, width_pair)\n    left_ramp, right_ramp = (np.linspace(start=end_value, stop=edge.squeeze(axis), num=width, endpoint=False, dtype=padded.dtype, axis=axis) for end_value, edge, width in zip(end_value_pair, edge_pair, width_pair))\n    right_ramp = right_ramp[_slice_at_axis(slice(None, None, -1), axis)]\n    return (left_ramp, right_ramp)",
    "docstring": "Construct linear ramps for empty-padded array in given dimension. Parameters ---------- padded : ndarray Empty-padded array. axis : int Dimension in which the ramps are constructed. width_pair : (int, int) Pair of widths that mark the pad area on both sides in the given dimension. end_value_pair : (scalar, scalar) End values for the linear ramps which form the edge of the fully padded array. These values are included in the linear ramps. Returns ------- left_ramp, right_ramp : ndarray Linear ramps to set on both sides of .",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_arraypad_impl.py",
    "ast_data": "FunctionDef name:_get_linear_ramps arg:padded arg:axis arg:width_pair arg:end_value_pair arguments arg arg arg arg Assign Call Assign Call Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "memory_stats",
    "source_code": "def memory_stats(device: Optional[_device_t]=None) -> dict[str, Any]:\n    if not is_initialized():\n        return {}\n    return torch._C._mtia_memoryStats(_get_device_index(device, optional=True))",
    "docstring": "Return a dictionary of MTIA memory allocator statistics for a given device. Args: device (torch.device, str, or int, optional) selected device. Returns statistics for the current device, given by current_device(), if device is None (default).",
    "type": "function",
    "file_path": "pytorch\\torch\\mtia\\memory.py",
    "ast_data": "FunctionDef name:memory_stats arg:device arguments arg If Call Return return:no Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_strip_comment",
    "source_code": "def _strip_comment(s):\n    pos = 0\n    while True:\n        quote_pos = s.find('\"', pos)\n        hash_pos = s.find('#', pos)\n        if quote_pos < 0:\n            without_comment = s if hash_pos < 0 else s[:hash_pos]\n            return without_comment.strip()\n        elif 0 <= hash_pos < quote_pos:\n            return s[:hash_pos].strip()\n        else:\n            closing_quote_pos = s.find('\"', quote_pos + 1)\n            if closing_quote_pos < 0:\n                raise ValueError(f'Missing closing quote in: {s!r}. If you need a double-quote inside a string, use escaping: e.g. \"the \" char\"')\n            pos = closing_quote_pos + 1",
    "docstring": "Strip everything from the first unquoted #.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\cbook.py",
    "ast_data": "FunctionDef name:_strip_comment arg:s arguments arg Assign While Assign Call Assign Call If Compare Assign Compare Return return:yes Call If Compare Return return:yes Call Assign Call If Compare Raise Call Assign"
  },
  {
    "library": "sphinx",
    "name": "CitationReferenceTransform",
    "source_code": "class CitationReferenceTransform(SphinxTransform):\n    default_priority = 619\n\n    def apply(self, **kwargs: Any) -> None:\n        domain = self.env.domains.citation_domain\n        for node in self.document.findall(nodes.citation_reference):\n            target = node.astext()\n            ref = pending_xref(target, refdomain='citation', reftype='ref', reftarget=target, refwarn=True, support_smartquotes=False, ids=node['ids'], classes=node.get('classes', []))\n            ref += nodes.inline(target, '[%s]' % target)\n            copy_source_info(node, ref)\n            node.replace_self(ref)\n            domain.note_citation_reference(ref)",
    "docstring": "Replace citation references by pending_xref nodes before the default docutils transform tries to resolve them.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\domains\\citation.py",
    "ast_data": "ClassDef name:CitationReferenceTransform Assign FunctionDef name:apply arg:self arguments arg arg Assign For Call Assign Call Assign Call Call Call Call Call Call"
  },
  {
    "library": "scrapy",
    "name": "LZMAPlugin",
    "source_code": "class LZMAPlugin:\n\n    def __init__(self, file: BinaryIO, feed_options: dict[str, Any]) -> None:\n        self.file = file\n        self.feed_options = feed_options\n        format = self.feed_options.get('lzma_format')\n        check = self.feed_options.get('lzma_check', -1)\n        preset = self.feed_options.get('lzma_preset')\n        filters = self.feed_options.get('lzma_filters')\n        self.lzmafile = LZMAFile(filename=self.file, mode='wb', format=format, check=check, preset=preset, filters=filters)\n\n    def write(self, data: bytes) -> int:\n        return self.lzmafile.write(data)\n\n    def close(self) -> None:\n        self.lzmafile.close()",
    "docstring": "Compresses received data using _. Accepted `lzma_formatlzma_checklzma_presetlzma_filterslzma.LZMAFile` for more info about parameters.",
    "type": "class",
    "file_path": "scrapy\\scrapy\\extensions\\postprocessing.py",
    "ast_data": "ClassDef name:LZMAPlugin FunctionDef name:__init__ arg:self arg:file arg:feed_options arguments arg arg arg Assign Assign Assign Call Assign Call Assign Call Assign Call Assign Call FunctionDef name:write arg:self arg:data arguments arg arg Return return:yes Call FunctionDef name:close arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "resolve",
    "source_code": "def resolve(node, source_info, graphs):\n    visitor = TreeAnnotator(source_info, graphs)\n    node = visitor.visit(node)\n    return node",
    "docstring": "Resolves reaching definitions for each symbol. Args: node: ast.AST source_info: transformer.SourceInfo graphs: Dict[ast.FunctionDef, cfg.Graph] Returns: ast.AST",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\static_analysis\\reaching_fndefs.py",
    "ast_data": "FunctionDef name:resolve arg:node arg:source_info arg:graphs arguments arg arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "inplace_update_buffers",
    "source_code": "@property\n@cache_on_self\ndef inplace_update_buffers(self):\n    for k in self.kernels[1:]:\n        assert k.inplace_update_buffers == self.kernels[0].inplace_update_buffers\n    return self.kernels[0].inplace_update_buffers",
    "docstring": "Make sure all kernels have the same inplace update mappings.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\multi_kernel.py",
    "ast_data": "FunctionDef name:inplace_update_buffers arg:self arguments arg For Compare Return return:yes"
  },
  {
    "library": "scipy",
    "name": "mean",
    "source_code": "@abstractmethod\ndef mean(self, *, method):\n    raise NotImplementedError()",
    "docstring": "Mean (raw first moment about the origin) Parameters ---------- method : {None, 'formula', 'transform', 'quadrature', 'cache'} Method used to calculate the raw first moment. Not all methods are available for all distributions. See for details. See Also -------- moment median mode References ---------- .. [1] Mean, *Wikipedia*, Examples -------- Instantiate a distribution with the desired parameters: >>> from scipy import stats >>> X = stats.Normal(mu=1., sigma=2.) Evaluate the variance: >>> X.mean() 1.0 >>> X.mean() == X.moment(order=1, kind='raw') == X.mu True",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_probability_distribution.py",
    "ast_data": "FunctionDef name:mean arg:self arguments arg arg Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "get_bbox",
    "source_code": "def get_bbox(self):\n    return transforms.Bbox.from_bounds(self._x, self._y, self._width, self._height)",
    "docstring": "Return the .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_bbox arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_em_step",
    "source_code": "def _em_step(self, X, total_samples, batch_update, parallel=None):\n    _, suff_stats = self._e_step(X, cal_sstats=True, random_init=True, parallel=parallel)\n    if batch_update:\n        self.components_ = self.topic_word_prior_ + suff_stats\n    else:\n        weight = np.power(self.learning_offset + self.n_batch_iter_, -self.learning_decay)\n        doc_ratio = float(total_samples) / X.shape[0]\n        self.components_ *= 1 - weight\n        self.components_ += weight * (self.topic_word_prior_ + doc_ratio * suff_stats)\n    self.exp_dirichlet_component_ = np.exp(_dirichlet_expectation_2d(self.components_))\n    self.n_batch_iter_ += 1\n    return",
    "docstring": "EM update for 1 iteration. update by batch VB or online VB. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Document word matrix. total_samples : int Total number of documents. It is only used when batch_update is . batch_update : bool Parameter that controls updating method. for batch learning, for online learning. parallel : joblib.Parallel, default=None Pre-initialized instance of joblib.Parallel Returns ------- doc_topic_distr : ndarray of shape (n_samples, n_components) Unnormalized document topic distribution.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_lda.py",
    "ast_data": "FunctionDef name:_em_step arg:self arg:X arg:total_samples arg:batch_update arg:parallel arguments arg arg arg arg arg Assign Call If Assign Assign Call Assign Call Assign Call Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "dawsn",
    "source_code": "@tf_export('math.special.dawsn')\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef dawsn(x, name=None):\n    with ops.name_scope(name, 'dawsn', [x]):\n        return gen_special_math_ops.dawsn(x)",
    "docstring": "Computes Dawson's integral of element-wise. Dawson's integral is defined as times the integral of from to , with the domain of definition all real numbers. Dawson's function is odd. >>> tf.math.special.dawsn([-1., -0.5, 0.5, 1.]).numpy() array([-0.5380795, -0.4244364, 0.4244364, 0.5380795], dtype=float32) This implementation is based off of the Cephes math library. Args: x: A or . Must be one of the following types: , . name: A name for the operation (optional). Returns: A or , respectively. Has the same type as . @compatibility(scipy) Equivalent to scipy.special.dawsn @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\special_math_ops.py",
    "ast_data": "FunctionDef name:dawsn arg:x arg:name arguments arg arg With Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_center",
    "source_code": "def get_center(self):\n    return self._center",
    "docstring": "Return the center of the annulus.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_center arg:self arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "greater_equal",
    "source_code": "@array_function_dispatch(_binary_op_dispatcher)\ndef greater_equal(x1, x2):\n    return compare_chararrays(x1, x2, '>=', True)",
    "docstring": "Return (x1 >= x2) element-wise. Unlike , this comparison is performed by first stripping whitespace characters from the end of the string. This behavior is provided for backward-compatibility with numarray. Parameters ---------- x1, x2 : array_like of str or unicode Input arrays of the same shape. Returns ------- out : ndarray Output array of bools. See Also -------- equal, not_equal, less_equal, greater, less Examples -------- >>> import numpy as np >>> x1 = np.array(['a', 'b', 'c']) >>> np.char.greater_equal(x1, 'b') array([False, True, True])",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:greater_equal arg:x1 arg:x2 arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "line_2d_to_3d",
    "source_code": "def line_2d_to_3d(line, zs=0, zdir='z', axlim_clip=False):\n    line.__class__ = Line3D\n    line.set_3d_properties(zs, zdir, axlim_clip)",
    "docstring": "Convert a to a object. Parameters ---------- zs : float The location along the *zdir* axis in 3D space to position the line. zdir : {'x', 'y', 'z'} Plane to plot line orthogonal to. Default: 'z'. See for a description of the values. axlim_clip : bool, default: False Whether to hide lines with an endpoint outside the axes view limits. .. versionadded:: 3.10",
    "type": "function",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py",
    "ast_data": "FunctionDef name:line_2d_to_3d arg:line arg:zs arg:zdir arg:axlim_clip arguments arg arg arg arg Assign Call"
  },
  {
    "library": "scipy",
    "name": "left_multiplied_operator",
    "source_code": "def left_multiplied_operator(J, d):\n    J = aslinearoperator(J)\n\n    def matvec(x):\n        return d * J.matvec(x)\n\n    def matmat(X):\n        return d[:, np.newaxis] * J.matmat(X)\n\n    def rmatvec(x):\n        return J.rmatvec(x.ravel() * d)\n    return LinearOperator(J.shape, matvec=matvec, matmat=matmat, rmatvec=rmatvec)",
    "docstring": "Return diag(d) J as LinearOperator.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_lsq\\common.py",
    "ast_data": "FunctionDef name:left_multiplied_operator arg:J arg:d arguments arg arg Assign Call FunctionDef name:matvec arg:x arguments arg Return return:yes Call FunctionDef name:matmat arg:X arguments arg Return return:yes Call FunctionDef name:rmatvec arg:x arguments arg Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "name",
    "source_code": "@property\ndef name(self) -> bytes:\n    return self._name",
    "docstring": "Name represented in UTF-8 encoded bytes.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\atomic_function.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "parents",
    "source_code": "@property\ndef parents(self):\n    return [self.key]",
    "docstring": "See 'FeatureColumn` base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:parents arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "execute_save",
    "source_code": "@abc.abstractmethod\ndef execute_save(self, staged_state_dict: STATE_DICT_TYPE, *, checkpoint_id: Union[str, os.PathLike, None]=None, storage_writer: Optional[StorageWriter]=None, planner: Optional[SavePlanner]=None, process_group: Optional[dist.ProcessGroup]=None) -> Future:\n    pass",
    "docstring": "Execute the checkpoint save request asynchronously. This method is intended to be used as an abstraction for implementing async checkpointing. The actual checkpoint save operation is executed in a separate thread or process depending on the implementation of this interface.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\_async_executor.py",
    "ast_data": "FunctionDef name:execute_save arg:self arg:staged_state_dict arguments arg arg arg arg arg arg"
  },
  {
    "library": "pytorch",
    "name": "attach_preserved_attrs_to_model",
    "source_code": "def attach_preserved_attrs_to_model(model: Union[GraphModule, torch.nn.Module], preserved_attrs: dict[str, Any]) -> None:\n    model.meta[_USER_PRESERVED_ATTRIBUTES_KEY] = copy.copy(preserved_attrs)\n    for attr_name, attr in model.meta[_USER_PRESERVED_ATTRIBUTES_KEY].items():\n        setattr(model, attr_name, attr)",
    "docstring": "Store preserved attributes to the model.meta so that it can be preserved during deepcopy",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantize_fx.py",
    "ast_data": "FunctionDef name:attach_preserved_attrs_to_model arg:model arg:preserved_attrs arguments arg arg Assign Call For Call Call"
  },
  {
    "library": "django",
    "name": "add_related_update",
    "source_code": "def add_related_update(self, model, field, value):\n    self.related_updates.setdefault(model, []).append((field, None, value))",
    "docstring": "Add (name, value) to an update query for an ancestor model. Update are coalesced so that only one update query per ancestor is run.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\subqueries.py",
    "ast_data": "FunctionDef name:add_related_update arg:self arg:model arg:field arg:value arguments arg arg arg arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_process_group_ranks",
    "source_code": "def get_process_group_ranks(group: ProcessGroup) -> list[int]:\n    return list(_world.pg_group_ranks[group].keys())",
    "docstring": "Get all ranks associated with ``. Args: group (ProcessGroup): ProcessGroup to get all ranks from. Returns: List of global ranks ordered by group rank.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:get_process_group_ranks arg:group arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_write_cache_index_map_section",
    "source_code": "def _write_cache_index_map_section(self, tensor_trace_order):\n    self._write_report('%s %s\\n' % (_MARKER_SECTION_BEGIN, _SECTION_NAME_CACHE_INDEX_MAP))\n    self._write_report('%s %d\\n' % (_FIELD_NAME_NUM_CACHE_INDICES, len(tensor_trace_order.cache_idx_to_tensor_idx)))\n    for cache_idx in range(0, len(tensor_trace_order.cache_idx_to_tensor_idx)):\n        tensor_idx = tensor_trace_order.cache_idx_to_tensor_idx[cache_idx]\n        line = '%d %d\\n' % (cache_idx, tensor_idx)\n        self._write_report(line)\n    self._write_report('%s %s\\n' % (_MARKER_SECTION_END, _SECTION_NAME_CACHE_INDEX_MAP))",
    "docstring": "Writes the mapping from cache index to tensor index to the report.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer_report.py",
    "ast_data": "FunctionDef name:_write_cache_index_map_section arg:self arg:tensor_trace_order arguments arg arg Call Call Call For Call Call Assign Assign Call Call"
  },
  {
    "library": "django",
    "name": "num_fields",
    "source_code": "@property\ndef num_fields(self):\n    return capi.get_feat_field_count(self.ptr)",
    "docstring": "Return the number of fields in the Feature.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\feature.py",
    "ast_data": "FunctionDef name:num_fields arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "context_parallel_unshard",
    "source_code": "@torch.no_grad()\ndef context_parallel_unshard(mesh: DeviceMesh, buffers: list[torch.Tensor], seq_dims: list[int]) -> list[torch.Tensor]:\n    sharder = _RoundRobinLoadBalancer if _cp_options.enable_load_balance else _SequentialSharder\n    return [sharder.unshard(b, mesh, dim) for b, dim in zip(buffers, seq_dims)]",
    "docstring": "Unshard the tensors (e.g., output) that are sharded due to context parallelism. Args: mesh (:class:): the device mesh for the context parallelism. buffers (List[torch.Tensor]): the buffers to be unsharded. seq_dims (List[int]): the sequence dimensions of ``. Returns: List[torch.Tensor]: the unsharded buffers.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\experimental\\_attention.py",
    "ast_data": "FunctionDef name:context_parallel_unshard arg:mesh arg:buffers arg:seq_dims arguments arg arg arg Assign Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "custom_getattribute",
    "source_code": "def custom_getattribute(self, attr, *, original_getattr, attrs_to_proxy):\n    out = original_getattr(self, attr)\n    if attr in attrs_to_proxy:\n        if torch._C._is_torch_function_mode_enabled():\n            if isinstance(out, torch.Tensor):\n                torch_function_mode_stack = torch.overrides._get_current_function_mode_stack()\n                for mode in torch_function_mode_stack:\n                    if isinstance(mode, PreDispatchTorchFunctionMode):\n                        tracer = mode.tracer\n                        proxy = get_proxy_slot(self, tracer).proxy\n                        inner_proxy = tracer.create_proxy('call_function', torch.ops.export.access_subclass_inner_tensor.default, (proxy, attr), {})\n                        track_tensor_tree(out, inner_proxy, constant=None, tracer=tracer)\n    return out",
    "docstring": "The idea here is that we override subclass getattr methods to proxy inner tensors and metadata. Because of infinite loop shenanigans, we have to manually construct the getattr proxy nodes without relying on torch function system.",
    "type": "function",
    "file_path": "pytorch\\torch\\export\\_trace.py",
    "ast_data": "FunctionDef name:custom_getattribute arg:self arg:attr arguments arg arg arg arg Assign Call If Compare If Call If Call Assign Call For If Call Assign Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "build_from_crawler",
    "source_code": "def build_from_crawler(objcls: type[T], crawler: Crawler, /, *args: Any, **kwargs: Any) -> T:\n    if hasattr(objcls, 'from_crawler'):\n        instance = objcls.from_crawler(crawler, *args, **kwargs)\n        method_name = 'from_crawler'\n    elif hasattr(objcls, 'from_settings'):\n        warnings.warn(f'{objcls.__qualname__} has from_settings() but not from_crawler(). This is deprecated and calling from_settings() will be removed in a future Scrapy version. You can implement a simple from_crawler() that calls from_settings() with crawler.settings.', category=ScrapyDeprecationWarning, stacklevel=2)\n        instance = objcls.from_settings(crawler.settings, *args, **kwargs)\n        method_name = 'from_settings'\n    else:\n        instance = objcls(*args, **kwargs)\n        method_name = '__new__'\n    if instance is None:\n        raise TypeError(f'{objcls.__qualname__}.{method_name} returned None')\n    return cast(T, instance)",
    "docstring": "Construct a class instance using its ``.",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\misc.py",
    "ast_data": "FunctionDef name:build_from_crawler arguments arg arg arg arg If Call Assign Call Assign If Call Call Assign Call Assign Assign Call Assign If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_rank",
    "source_code": "def _rank(x):\n    rank = ops.convert_to_tensor(x).get_shape().ndims\n    if rank:\n        return (rank, True)\n    else:\n        return (array_ops.rank(x), False)",
    "docstring": "Helper function to retrieve the rank of a tensor. Args: x: Something convertible to . Returns: Either a pair where is an integer or a pair where is an integer . In either case, is the rank of .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\embedding_ops.py",
    "ast_data": "FunctionDef name:_rank arg:x arguments arg Assign Call Call If Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_initializer",
    "source_code": "def get_initializer(self):\n    raise NotImplementedError('not impl')",
    "docstring": "Returns the initializer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\feature_column.py",
    "ast_data": "FunctionDef name:get_initializer arg:self arguments arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "concentration1",
    "source_code": "@property\ndef concentration1(self):\n    return self._concentration1",
    "docstring": "Concentration parameter associated with a outcome.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\beta.py",
    "ast_data": "FunctionDef name:concentration1 arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_predict_iterations",
    "source_code": "def _predict_iterations(self, X, predictors, raw_predictions, is_binned, n_threads):\n    if not is_binned:\n        known_cat_bitsets, f_idx_map = self._bin_mapper.make_known_categories_bitsets()\n    for predictors_of_ith_iteration in predictors:\n        for k, predictor in enumerate(predictors_of_ith_iteration):\n            if is_binned:\n                predict = partial(predictor.predict_binned, missing_values_bin_idx=self._bin_mapper.missing_values_bin_idx_, n_threads=n_threads)\n            else:\n                predict = partial(predictor.predict, known_cat_bitsets=known_cat_bitsets, f_idx_map=f_idx_map, n_threads=n_threads)\n            raw_predictions[:, k] += predict(X)",
    "docstring": "Add the predictions of the predictors to raw_predictions.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\gradient_boosting.py",
    "ast_data": "FunctionDef name:_predict_iterations arg:self arg:X arg:predictors arg:raw_predictions arg:is_binned arg:n_threads arguments arg arg arg arg arg arg If Assign Call For For Call If Assign Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "ONNXFakeContext",
    "source_code": "@dataclasses.dataclass\nclass ONNXFakeContext:\n    fake_mode: fake_tensor.FakeTensorMode\n    'The fake tensor mode used for tracing model using fake tensors and parameters.'\n    state_dict_paths: tuple[str | io.BytesIO | dict[str, Any]] | None = None\n    'List of paths of files that contain the model :meth:`state_dict`'",
    "docstring": "A dataclass used to store context for model export using FakeTensor. This dataclass stores the FakeTensorMode instance used to convert real tensors and model parameters into fake tensors. This :attr: is reused internally during tracing of a :class: into a FX :class:.",
    "type": "class",
    "file_path": "pytorch\\torch\\onnx\\_internal\\_exporter_legacy.py",
    "ast_data": "ClassDef name:ONNXFakeContext"
  },
  {
    "library": "scipy",
    "name": "to_tree",
    "source_code": "@xp_capabilities(jax_jit=False, allow_dask_compute=True)\ndef to_tree(Z, rd=False):\n    xp = array_namespace(Z)\n    Z = _asarray(Z, order='C', xp=xp)\n    _is_valid_linkage(Z, throw=True, name='Z', materialize=True, xp=xp)\n    n = Z.shape[0] + 1\n    d = [None] * (n * 2 - 1)\n    for i in range(0, n):\n        d[i] = ClusterNode(i)\n    nd = None\n    for i in range(Z.shape[0]):\n        row = Z[i, :]\n        fi = int_floor(row[0], xp)\n        fj = int_floor(row[1], xp)\n        if fi > i + n:\n            raise ValueError(f'Corrupt matrix Z. Index to derivative cluster is used before it is formed. See row {fi}, column 0')\n        if fj > i + n:\n            raise ValueError(f'Corrupt matrix Z. Index to derivative cluster is used before it is formed. See row {fj}, column 1')\n        nd = ClusterNode(i + n, d[fi], d[fj], row[2])\n        if row[3] != nd.count:\n            raise ValueError(f'Corrupt matrix Z. The count Z[{i},3] is incorrect.')\n        d[n + i] = nd\n    if rd:\n        return (nd, d)\n    else:\n        return nd",
    "docstring": "Convert a linkage matrix into an easy-to-use tree object. The reference to the root object is returned (by default). Each object has a `ClusterNodelinkageClusterNodeClusterNodelinkageClusterNoderd` above for more details. See Also -------- linkage, is_valid_linkage, ClusterNode Examples -------- >>> import numpy as np >>> from scipy.cluster import hierarchy >>> rng = np.random.default_rng() >>> x = rng.random((5, 2)) >>> Z = hierarchy.linkage(x) >>> hierarchy.to_tree(Z) >> rootnode, nodelist = hierarchy.to_tree(Z, rd=True) >>> rootnode >> len(nodelist) 9",
    "type": "function",
    "file_path": "scipy\\scipy\\cluster\\hierarchy.py",
    "ast_data": "FunctionDef name:to_tree arg:Z arg:rd arguments arg arg Assign Call Assign Call Call Assign Assign For Call Assign Call Assign For Call Assign Assign Call Assign Call If Compare Raise Call If Compare Raise Call Assign Call If Compare Raise Call Assign If Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "swap_node_to_partition",
    "source_code": "def swap_node_to_partition(node, p0, p1, node_to_latency_mapping, transfer_rate_per_sec):\n    p1_nodes = list(p1.nodes) + [None]\n    min_cost = float('inf')\n    node_pair: list[Node] = []\n    for n1 in p1_nodes:\n        if n1 is not None and n1.op in {'placeholder', 'get_attr'}:\n            continue\n        cost = try_swap_nodes(node, n1, p0, p1, node_to_latency_mapping, transfer_rate_per_sec)\n        if cost < min_cost:\n            node_pair = [node, n1]\n            min_cost = cost\n    return (cost, node_pair)",
    "docstring": "This function helps to swap one node from partition p0 with all the nodes in another partition p1",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\accelerator_partitioner.py",
    "ast_data": "FunctionDef name:swap_node_to_partition arg:node arg:p0 arg:p1 arg:node_to_latency_mapping arg:transfer_rate_per_sec arguments arg arg arg arg arg Assign Call Assign Call For If BoolOp Compare Compare Assign Call If Compare Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "cache",
    "source_code": "def cache(self, src: str, globals: dict[str, Any], co_fields=None):\n    key = self._get_key()\n    if co_fields:\n        key += f' from {co_fields['co_filename']}:{co_fields['co_firstlineno']} in {co_fields['co_name']}'\n    self.eval_cache[key] = src\n    globals_copy = globals.copy()\n    globals_copy['__file__'] = key\n    globals_copy['__name__'] = key\n    globals_copy['__loader__'] = self\n    linecache.lazycache(key, globals_copy)\n    return key",
    "docstring": "Store the source in a private cache, and add a lazy entry in linecache that allows the source to be retrieved by 'filename'. Args: src (str): The module source to cache globals (dict): The module globals Returns: str: The cache key (and dummy filename) generated for src.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\graph_module.py",
    "ast_data": "FunctionDef name:cache arg:self arg:src arg:globals arg:co_fields arguments arg arg arg arg Assign Call If Assign Assign Call Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_maybe_infer_tz",
    "source_code": "def _maybe_infer_tz(tz: tzinfo | None, inferred_tz: tzinfo | None) -> tzinfo | None:\n    if tz is None:\n        tz = inferred_tz\n    elif inferred_tz is None:\n        pass\n    elif not timezones.tz_compare(tz, inferred_tz):\n        raise TypeError(f'data is already tz-aware {inferred_tz}, unable to set specified tz: {tz}')\n    return tz",
    "docstring": "If a timezone is inferred from data, check that it is compatible with the user-provided timezone, if any. Parameters ---------- tz : tzinfo or None inferred_tz : tzinfo or None Returns ------- tz : tzinfo or None Raises ------ TypeError : if both timezones are present but do not match",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimes.py",
    "ast_data": "FunctionDef name:_maybe_infer_tz arg:tz arg:inferred_tz arguments arg arg If Compare Assign If Compare If Call Raise Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_path_collection_extents",
    "source_code": "def get_path_collection_extents(master_transform, paths, transforms, offsets, offset_transform):\n    from .transforms import Bbox\n    if len(paths) == 0:\n        raise ValueError('No paths provided')\n    if len(offsets) == 0:\n        raise ValueError('No offsets provided')\n    extents, minpos = _path.get_path_collection_extents(master_transform, paths, np.atleast_3d(transforms), offsets, offset_transform)\n    return Bbox.from_extents(*extents, minpos=minpos)",
    "docstring": "Get bounding box of a \\s internal objects. That is, given a sequence of \\s, \\s objects, and offsets, as found in a , return the bounding box that encapsulates all of them. Parameters ---------- master_transform : Global transformation applied to all paths. paths : list of transforms : list of If non-empty, this overrides *master_transform*. offsets : (N, 2) array-like offset_transform : Transform applied to the offsets before offsetting the path. Notes ----- The way that *paths*, *transforms* and *offsets* are combined follows the same method as for collections: each is iterated over independently, so if you have 3 paths (A, B, C), 2 transforms (α, β) and 1 offset (O), their combinations are as follows: - (A, α, O) - (B, β, O) - (C, α, O)",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\path.py",
    "ast_data": "FunctionDef name:get_path_collection_extents arg:master_transform arg:paths arg:transforms arg:offsets arg:offset_transform arguments arg arg arg arg arg If Compare Call Raise Call If Compare Call Raise Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "load_sample_image",
    "source_code": "@validate_params({'image_name': [StrOptions({'china.jpg', 'flower.jpg'})]}, prefer_skip_nested_validation=True)\ndef load_sample_image(image_name):\n    images = load_sample_images()\n    index = None\n    for i, filename in enumerate(images.filenames):\n        if filename.endswith(image_name):\n            index = i\n            break\n    if index is None:\n        raise AttributeError('Cannot find sample image: %s' % image_name)\n    return images.images[index]",
    "docstring": "Load the numpy array of a single sample image. Read more in the :ref:. Parameters ---------- image_name : {, } The name of the sample image loaded. Returns ------- img : 3D array The image as a numpy array: height x width x color. Examples -------- >>> from sklearn.datasets import load_sample_image >>> china = load_sample_image('china.jpg') # doctest: +SKIP >>> china.dtype # doctest: +SKIP dtype('uint8') >>> china.shape # doctest: +SKIP (427, 640, 3) >>> flower = load_sample_image('flower.jpg') # doctest: +SKIP >>> flower.dtype # doctest: +SKIP dtype('uint8') >>> flower.shape # doctest: +SKIP (427, 640, 3)",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\datasets\\_base.py",
    "ast_data": "FunctionDef name:load_sample_image arg:image_name arguments arg Assign Call Assign For Call If Call Assign If Compare Raise Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "load_state_dict",
    "source_code": "def load_state_dict(self, state_dict: dict[str, Any]) -> None:\n    if not self._enabled:\n        return\n    if len(state_dict) == 0:\n        raise RuntimeError('The source state dict is empty, possibly because it was saved from a disabled instance of GradScaler.')\n    self._init_scale = cast(float, state_dict['scale'])\n    if self._scale is not None:\n        self._scale.fill_(state_dict['scale'])\n    self._growth_factor = cast(float, state_dict['growth_factor'])\n    self._backoff_factor = cast(float, state_dict['backoff_factor'])\n    self._growth_interval = cast(int, state_dict['growth_interval'])\n    self._init_growth_tracker = cast(int, state_dict['_growth_tracker'])\n    if self._growth_tracker is not None:\n        self._growth_tracker.fill_(state_dict['_growth_tracker'])",
    "docstring": "Load the scaler state. If this instance is disabled, :meth: is a no-op. Args: state_dict(dict): scaler state. Should be an object returned from a call to :meth:.",
    "type": "method",
    "file_path": "pytorch\\torch\\amp\\grad_scaler.py",
    "ast_data": "FunctionDef name:load_state_dict arg:self arg:state_dict arguments arg arg If Return return:no If Compare Call Raise Call Assign Call If Compare Call Assign Call Assign Call Assign Call Assign Call If Compare Call"
  },
  {
    "library": "pytorch",
    "name": "map_placements_after_reduction",
    "source_code": "def map_placements_after_reduction(placements: tuple[Placement, ...], reduction_dims: list[int], reduction_dims_map: list[int], reduction_op: ReductionOpType) -> tuple[Placement, ...]:\n    new_placements: list[Placement] = []\n    for placement in placements:\n        if isinstance(placement, (Replicate, Partial)):\n            new_placements.append(placement)\n        else:\n            assert isinstance(placement, Shard)\n            shard_dim = placement.dim\n            new_shard_dim = reduction_dims_map[shard_dim]\n            if new_shard_dim == -1 or shard_dim in reduction_dims:\n                new_placements.append(get_placement_from_reduction_op(reduction_op))\n            else:\n                new_placements.append(Shard(new_shard_dim))\n    return tuple(new_placements)",
    "docstring": "Map each placement based on the output shape after reduction.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_ops\\_math_ops.py",
    "ast_data": "FunctionDef name:map_placements_after_reduction arg:placements arg:reduction_dims arg:reduction_dims_map arg:reduction_op arguments arg arg arg arg For If Call Call Call Assign Assign If BoolOp Compare Compare Call Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "use_operator_or_provided_hint_unless_contradicting",
    "source_code": "def use_operator_or_provided_hint_unless_contradicting(operator, hint_attr_name, provided_hint_value, message):\n    op_hint = getattr(operator, hint_attr_name)\n    if op_hint is False and provided_hint_value:\n        raise ValueError(message)\n    if op_hint and provided_hint_value is False:\n        raise ValueError(message)\n    if op_hint or provided_hint_value:\n        return True\n    if op_hint is False or provided_hint_value is False:\n        return False\n    return None",
    "docstring": "Get combined hint in the case where operator.hint should equal hint. Args: operator: LinearOperator that a meta-operator was initialized with. hint_attr_name: String name for the attribute. provided_hint_value: Bool or None. Value passed by user in initialization. message: Error message to print if hints contradict. Returns: True, False, or None. Raises: ValueError: If hints contradict.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_util.py",
    "ast_data": "FunctionDef name:use_operator_or_provided_hint_unless_contradicting arg:operator arg:hint_attr_name arg:provided_hint_value arg:message arguments arg arg arg arg Assign Call If BoolOp Compare Raise Call If BoolOp Compare Raise Call If BoolOp Return return:yes If BoolOp Compare Compare Return return:yes Return return:no"
  },
  {
    "library": "pytorch",
    "name": "set_name",
    "source_code": "def set_name(self, name: str) -> BackendConfig:\n    self.name = name\n    return self",
    "docstring": "Set the name of the target backend.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\backend_config.py",
    "ast_data": "FunctionDef name:set_name arg:self arg:name arguments arg arg Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "isocalendar",
    "source_code": "def isocalendar(self) -> DataFrame:\n    from pandas import DataFrame\n    values = self._local_timestamps()\n    sarray = fields.build_isocalendar_sarray(values, reso=self._creso)\n    iso_calendar_df = DataFrame(sarray, columns=['year', 'week', 'day'], dtype='UInt32')\n    if self._hasna:\n        iso_calendar_df.iloc[self._isnan] = None\n    return iso_calendar_df",
    "docstring": "Calculate year, week, and day according to the ISO 8601 standard. Returns ------- DataFrame With columns year, week and day. See Also -------- Timestamp.isocalendar : Function return a 3-tuple containing ISO year, week number, and weekday for the given Timestamp object. datetime.date.isocalendar : Return a named tuple object with three components: year, week and weekday. Examples -------- >>> idx = pd.date_range(start=\"2019-12-29\", freq=\"D\", periods=4) >>> idx.isocalendar() year week day 2019-12-29 2019 52 7 2019-12-30 2020 1 1 2019-12-31 2020 1 2 2020-01-01 2020 1 3 >>> idx.isocalendar().week 2019-12-29 52 2019-12-30 1 2019-12-31 1 2020-01-01 1 Freq: D, Name: week, dtype: UInt32",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimes.py",
    "ast_data": "FunctionDef name:isocalendar arg:self arguments arg Assign Call Assign Call Assign Call If Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "eigvalsh",
    "source_code": "@_apply_over_batch(('a', 2), ('b', 2))\ndef eigvalsh(a, b=None, *, lower=True, overwrite_a=False, overwrite_b=False, type=1, check_finite=True, subset_by_index=None, subset_by_value=None, driver=None):\n    return eigh(a, b=b, lower=lower, eigvals_only=True, overwrite_a=overwrite_a, overwrite_b=overwrite_b, type=type, check_finite=check_finite, subset_by_index=subset_by_index, subset_by_value=subset_by_value, driver=driver)",
    "docstring": "Solves a standard or generalized eigenvalue problem for a complex Hermitian or real symmetric matrix. Find eigenvalues array `scipy.linalg.eigh`. Returns ------- w : (N,) ndarray The N (N>> import numpy as np >>> from scipy.linalg import eigvalsh >>> A = np.array([[6, 3, 1, 5], [3, 0, 5, 1], [1, 5, 6, 2], [5, 1, 2, 2]]) >>> w = eigvalsh(A) >>> w array([-3.74637491, -0.76263923, 6.08502336, 12.42399079])",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_decomp.py",
    "ast_data": "FunctionDef name:eigvalsh arg:a arg:b arguments arg arg arg arg arg arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "reinit_buffer_if_null",
    "source_code": "def reinit_buffer_if_null(self, name):\n    assert name in self.local_buffers\n    buf = self.local_buffers[name]\n    ctype = f'{DTYPE_TO_CPP[buf.layout.dtype]}'\n    numel = f'{cexpr_index(buf.get_numel())}'\n    return f'if (_{name} == nullptr) {{ _{name} = std::make_unique<{ctype}[]>({numel}); {name} = _{name}.get(); }}'",
    "docstring": "Reinit the previously defined local buffer if it is null",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp_template_kernel.py",
    "ast_data": "FunctionDef name:reinit_buffer_if_null arg:self arg:name arguments arg arg Compare Assign Assign Assign Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "rgba_to_rgb",
    "source_code": "def rgba_to_rgb(image: Tensor) -> Tensor:\n    if not isinstance(image, Tensor):\n        raise TypeError(f'Input type is not a Tensor. Got {type(image)}')\n    if len(image.shape) < 3 or image.shape[-3] != 4:\n        raise ValueError(f'Input size must have a shape of (*, 4, H, W).Got {image.shape}')\n    r, g, b, a = torch.chunk(image, image.shape[-3], dim=-3)\n    a_one = torch.tensor(1.0) - a\n    r_new: Tensor = a_one * r + a * r\n    g_new: Tensor = a_one * g + a * g\n    b_new: Tensor = a_one * b + a * b\n    return torch.cat([r_new, g_new, b_new], dim=-3)",
    "docstring": "Convert an image from RGBA to RGB. Args: image: RGBA Image to be converted to RGB of shape :math:. Returns: RGB version of the image with shape :math:. Example: >>> input = torch.rand(2, 4, 4, 5) >>> output = rgba_to_rgb(input) # 2x3x4x5",
    "type": "function",
    "file_path": "kornia\\kornia\\color\\rgb.py",
    "ast_data": "FunctionDef name:rgba_to_rgb arg:image arguments arg If Call Raise Call Call If BoolOp Compare Call Compare Raise Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_as_name_attr_list",
    "source_code": "@property\ndef _as_name_attr_list(self):\n    ret = attr_value_pb2.NameAttrList(name=self.name)\n    for name, value in self._attrs.items():\n        ret.attr[name].CopyFrom(value)\n    return ret",
    "docstring": "Returns a representing this function.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py",
    "ast_data": "FunctionDef name:_as_name_attr_list arg:self arguments arg Assign Call For Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_traverse",
    "source_code": "def _traverse(dataset, op_filter_fn):\n    result = []\n    bfs_q = queue.Queue()\n    bfs_q.put(dataset._variant_tensor.op)\n    visited = []\n    while not bfs_q.empty():\n        op = bfs_q.get()\n        visited.append(op)\n        if op_filter_fn(op):\n            result.append(op)\n        for i in op.inputs:\n            input_op = i.op\n            if input_op not in visited:\n                bfs_q.put(input_op)\n    return result",
    "docstring": "Traverse a dataset graph, returning nodes matching .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\util\\traverse.py",
    "ast_data": "FunctionDef name:_traverse arg:dataset arg:op_filter_fn arguments arg arg Assign Assign Call Call Assign While Call Assign Call Call If Call Call For Assign If Compare Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "create_global_plan",
    "source_code": "@abc.abstractmethod\ndef create_global_plan(self, all_plans: list[SavePlan]) -> tuple[list[SavePlan], Metadata]:\n    pass",
    "docstring": "Compute the global checkpoint plan and return the local plan of each rank. This is called on the coordinator rank only.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\planner.py",
    "ast_data": "FunctionDef name:create_global_plan arg:self arg:all_plans arguments arg arg"
  },
  {
    "library": "pytorch",
    "name": "fp16_bf16_reduction_math_sdp_allowed",
    "source_code": "def fp16_bf16_reduction_math_sdp_allowed():\n    return torch._C._get_math_sdp_allow_fp16_bf16_reduction()",
    "docstring": ".. warning:: This flag is beta and subject to change. Returns whether fp16/bf16 reduction in math scaled dot product attention is enabled or not.",
    "type": "function",
    "file_path": "pytorch\\torch\\backends\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:fp16_bf16_reduction_math_sdp_allowed arguments Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "output",
    "source_code": "@property\ndef output(self):\n    return self._nested_outputs",
    "docstring": "Retrieves the output tensor(s) of a layer. Only applicable if the layer has exactly one output, i.e. if it is connected to one incoming layer. Returns: Output tensor or list of output tensors. Raises: AttributeError: if the layer is connected to more than one incoming layers. RuntimeError: if called in Eager mode.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\functional.py",
    "ast_data": "FunctionDef name:output arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_timer_set_single_shot",
    "source_code": "def _timer_set_single_shot(self):\n    pass",
    "docstring": "Used to set single shot on underlying timer object.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:_timer_set_single_shot arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "_check_params",
    "source_code": "def _check_params(window_length, dtype):\n    if not dtype.is_floating:\n        raise ValueError('dtype must be a floating point type. Found %s' % dtype)\n    window_length = ops.convert_to_tensor(window_length, dtype=dtypes.int32)\n    window_length.shape.assert_has_rank(0)\n    return window_length",
    "docstring": "Check window_length and dtype params. Args: window_length: A scalar value or . dtype: The data type to produce. Must be a floating point type. Returns: window_length converted to a tensor of type int32. Raises: ValueError: If is not a floating point type or window_length is not a scalar.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\signal\\window_ops.py",
    "ast_data": "FunctionDef name:_check_params arg:window_length arg:dtype arguments arg arg If Raise Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "mlir_sparsify",
    "source_code": "@convert_phase(Component.OPTIMIZE_TFLITE_MODEL, SubComponent.SPARSIFY)\ndef mlir_sparsify(input_data_str):\n    return wrap_converter.wrapped_experimental_mlir_sparsify(input_data_str)",
    "docstring": "Sparsify to encode sparse tensor with proper format. Args: input_data_str: Input data in serialized form (e.g. a TFLITE model). Returns: Sparsified model in serialized form (e.g. a TFLITE model).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\convert.py",
    "ast_data": "FunctionDef name:mlir_sparsify arg:input_data_str arguments arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_posterior_mode",
    "source_code": "def _posterior_mode(self, K, return_temporaries=False):\n    if self.warm_start and hasattr(self, 'f_cached') and (self.f_cached.shape == self.y_train_.shape):\n        f = self.f_cached\n    else:\n        f = np.zeros_like(self.y_train_, dtype=np.float64)\n    log_marginal_likelihood = -np.inf\n    for _ in range(self.max_iter_predict):\n        pi = expit(f)\n        W = pi * (1 - pi)\n        W_sr = np.sqrt(W)\n        W_sr_K = W_sr[:, np.newaxis] * K\n        B = np.eye(W.shape[0]) + W_sr_K * W_sr\n        L = cholesky(B, lower=True)\n        b = W * f + (self.y_train_ - pi)\n        a = b - W_sr * cho_solve((L, True), W_sr_K.dot(b))\n        f = K.dot(a)\n        lml = -0.5 * a.T.dot(f) - np.log1p(np.exp(-(self.y_train_ * 2 - 1) * f)).sum() - np.log(np.diag(L)).sum()\n        if lml - log_marginal_likelihood < 1e-10:\n            break\n        log_marginal_likelihood = lml\n    self.f_cached = f\n    if return_temporaries:\n        return (log_marginal_likelihood, (pi, W_sr, L, b, a))\n    else:\n        return log_marginal_likelihood",
    "docstring": "Mode-finding for binary Laplace GPC and fixed kernel. This approximates the posterior of the latent function values for given inputs and target observations with a Gaussian approximation and uses Newton's iteration to find the mode of this approximation.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\_gpc.py",
    "ast_data": "FunctionDef name:_posterior_mode arg:self arg:K arg:return_temporaries arguments arg arg arg If BoolOp Call Compare Assign Assign Call Assign For Call Assign Call Assign Assign Call Assign Assign Call Assign Call Assign Assign Call Call Assign Call Assign Call Call Call Call Call Call Call If Compare Assign Assign If Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "management_form",
    "source_code": "@cached_property\ndef management_form(self):\n    if self.is_bound:\n        form = ManagementForm(self.data, auto_id=self.auto_id, prefix=self.prefix, renderer=self.renderer)\n        form.full_clean()\n    else:\n        form = ManagementForm(auto_id=self.auto_id, prefix=self.prefix, initial={TOTAL_FORM_COUNT: self.total_form_count(), INITIAL_FORM_COUNT: self.initial_form_count(), MIN_NUM_FORM_COUNT: self.min_num, MAX_NUM_FORM_COUNT: self.max_num}, renderer=self.renderer)\n    return form",
    "docstring": "Return the ManagementForm instance for this FormSet.",
    "type": "method",
    "file_path": "django\\django\\forms\\formsets.py",
    "ast_data": "FunctionDef name:management_form arg:self arguments arg If Assign Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_min_max_value",
    "source_code": "def get_min_max_value(self) -> tuple[float, float]:\n    return (self._statistics.min_max_statistics.global_min, self._statistics.min_max_statistics.global_max)",
    "docstring": "Calculates the global min and max values. Returns: (min_value, max_value): Min and max calculated using MinMax",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\calibrator\\calibration_algorithm.py",
    "ast_data": "FunctionDef name:get_min_max_value arg:self arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "trimcoef",
    "source_code": "def trimcoef(c, tol=0):\n    if tol < 0:\n        raise ValueError('tol must be non-negative')\n    [c] = as_series([c])\n    [ind] = np.nonzero(np.abs(c) > tol)\n    if len(ind) == 0:\n        return c[:1] * 0\n    else:\n        return c[:ind[-1] + 1].copy()",
    "docstring": "Remove \"small\" \"trailing\" coefficients from a polynomial. \"Small\" means \"small in absolute value\" and is controlled by the parameter ; \"trailing\" means highest order coefficient(s), e.g., in `toltol` >> from numpy.polynomial import polyutils as pu >>> pu.trimcoef((0,0,3,0,5,0,0)) array([0., 0., 3., 0., 5.]) >>> pu.trimcoef((0,0,1e-3,0,1e-5,0,0),1e-3) # item == tol is trimmed array([0.]) >>> i = complex(0,1) # works for complex >>> pu.trimcoef((3e-4,1e-3*(1-i),5e-4,2e-5*(1+i)), 1e-3) array([0.0003+0.j , 0.001 -0.001j])",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\polyutils.py",
    "ast_data": "FunctionDef name:trimcoef arg:c arg:tol arguments arg arg If Compare Raise Call Assign Call Assign Call Compare Call If Compare Call Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_find_culprit",
    "source_code": "def _find_culprit(self, mod: torch.fx.GraphModule, inputs: Tensors) -> str:\n    return 'Unable to find a culprit because _find_culprit() function is not implemented.'",
    "docstring": "When an error occurs during lowering or running the lowered mod, we use this function to find culprits in the that causes the error.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\passes\\splitter_base.py",
    "ast_data": "FunctionDef name:_find_culprit arg:self arg:mod arg:inputs arguments arg arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_io_specs",
    "source_code": "def _get_io_specs(exported_program: torch.export.ExportedProgram) -> tuple[dict, dict]:\n    nodes: dict[str, torch.fx.Node] = {node.name: node for node in exported_program.graph.nodes}\n    user_inputs = [spec for spec in exported_program.graph_signature.input_specs if spec.kind == graph_signature.InputKind.USER_INPUT]\n    user_outputs = [spec for spec in exported_program.graph_signature.output_specs if spec.kind == graph_signature.OutputKind.USER_OUTPUT]\n    inputs: dict[str, torch._export.serde.schema.TensorMeta] = {}\n    outputs: dict[str, torch._export.serde.schema.TensorMeta] = {}\n    for spec in user_inputs:\n        if isinstance(spec.arg, graph_signature.ConstantArgument):\n            continue\n        name = spec.arg.name\n        inputs[name] = nodes[name].meta['tensor_meta']\n    for spec in user_outputs:\n        if isinstance(spec.arg, graph_signature.ConstantArgument):\n            continue\n        name = spec.arg.name\n        outputs[name] = nodes[name].meta['tensor_meta']\n    return (inputs, outputs)",
    "docstring": "Get the input and output specs of the exported program.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_analysis.py",
    "ast_data": "FunctionDef name:_get_io_specs arg:exported_program arguments arg Assign Compare Assign Compare For If Call Assign Assign For If Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "dynamic",
    "source_code": "@property\ndef dynamic(self):\n    return any((layer._dynamic for layer in self._flatten_layers()))",
    "docstring": "Whether the layer is dynamic (eager-only); set in the constructor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:dynamic arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "chunk_tensor",
    "source_code": "@abstractmethod\ndef chunk_tensor(self, tensor: torch.Tensor, rank: int, world_size: int, num_devices_per_node: int, pg: dist.ProcessGroup, device: Optional[torch.device]=None) -> torch.Tensor:\n    ...",
    "docstring": "Shards a tensor to chunks and returns the local chunk.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_fsdp_extensions.py",
    "ast_data": "FunctionDef name:chunk_tensor arg:self arg:tensor arg:rank arg:world_size arg:num_devices_per_node arg:pg arg:device arguments arg arg arg arg arg arg arg"
  },
  {
    "library": "tensorflow",
    "name": "_infer_fft_length_for_rfft",
    "source_code": "def _infer_fft_length_for_rfft(input_tensor, fft_rank):\n    fft_shape = input_tensor.get_shape()[-fft_rank:]\n    if not fft_shape.is_fully_defined():\n        return _array_ops.shape(input_tensor)[-fft_rank:]\n    return _ops.convert_to_tensor(fft_shape.as_list(), _dtypes.int32)",
    "docstring": "Infers the argument for a RFFT from .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\signal\\fft_ops.py",
    "ast_data": "FunctionDef name:_infer_fft_length_for_rfft arg:input_tensor arg:fft_rank arguments arg arg Assign Call If Call Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_draw_animated",
    "source_code": "def _draw_animated(self):\n    fig = self.canvas.figure\n    for a in self._artists:\n        fig.draw_artist(a)",
    "docstring": "Draw all of the animated artists.",
    "type": "method",
    "file_path": "matplotlib\\galleries\\users_explain\\animations\\blitting.py",
    "ast_data": "FunctionDef name:_draw_animated arg:self arguments arg Assign For Call"
  },
  {
    "library": "numpy",
    "name": "addfield",
    "source_code": "def addfield(mrecord, newfield, newfieldname=None):\n    _data = mrecord._data\n    _mask = mrecord._mask\n    if newfieldname is None or newfieldname in reserved_fields:\n        newfieldname = f'f{len(_data.dtype)}'\n    newfield = ma.array(newfield)\n    newdtype = np.dtype(_data.dtype.descr + [(newfieldname, newfield.dtype)])\n    newdata = np.recarray(_data.shape, newdtype)\n    [newdata.setfield(_data.getfield(*f), *f) for f in _data.dtype.fields.values()]\n    newdata.setfield(newfield._data, *newdata.dtype.fields[newfieldname])\n    newdata = newdata.view(MaskedRecords)\n    newmdtype = np.dtype([(n, np.bool) for n in newdtype.names])\n    newmask = np.recarray(_data.shape, newmdtype)\n    [newmask.setfield(_mask.getfield(*f), *f) for f in _mask.dtype.fields.values()]\n    newmask.setfield(ma.getmaskarray(newfield), *newmask.dtype.fields[newfieldname])\n    newdata._mask = newmask\n    return newdata",
    "docstring": "Adds a new field to the masked record array Uses as data and as name. If is None, the new field name is set to 'fi', where is the number of existing fields.",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\mrecords.py",
    "ast_data": "FunctionDef name:addfield arg:mrecord arg:newfield arg:newfieldname arguments arg arg arg Assign Assign If BoolOp Compare Compare Assign Call Assign Call Assign Call Assign Call Call Call Call Call Assign Call Assign Call Assign Call Call Call Call Call Call Assign Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "__eq__",
    "source_code": "@abc.abstractmethod\ndef __eq__(self, other: object) -> bool:\n    pass",
    "docstring": "Checks equality.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ed448.py",
    "ast_data": "FunctionDef name:__eq__ arg:self arg:other arguments arg arg"
  },
  {
    "library": "tensorflow",
    "name": "_create_hash_str",
    "source_code": "def _create_hash_str(self, input_arg, output_arg, node_def):\n    hasher = hashlib.sha1()\n\n    def update_num(n):\n        hasher.update(compat.as_bytes('%x' % n))\n\n    def update_str(s):\n        update_num(len(s))\n        hasher.update(compat.as_bytes(s))\n\n    def update_strs(slist):\n        update_num(len(slist))\n        for s in slist:\n            update_str(s)\n    for adef in input_arg:\n        update_str(adef.SerializeToString())\n    for adef in output_arg:\n        update_str(adef.SerializeToString())\n    for n in sorted(node_def, key=lambda n: n.name):\n        update_str(n.name)\n        update_str(n.op)\n        update_strs(n.input)\n        update_num(len(n.attr))\n        for k in sorted(n.attr):\n            update_str(k)\n            update_str(n.attr[k].SerializeToString())\n    return hasher.hexdigest()[:8]",
    "docstring": "Creates an 8-character string unique to this input. Args: input_arg: the input_arg field of an OpDef (e.g. self._definition.signature.input_arg) output_arg: the output_arg field of an OpDef (e.g. self._definition.signature.output_arg) node_def: the node_def field of a FunctionDef (e.g. self._definition.node_def) Returns: The unique string for this input",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\function.py",
    "ast_data": "FunctionDef name:_create_hash_str arg:self arg:input_arg arg:output_arg arg:node_def arguments arg arg arg arg Assign Call FunctionDef name:update_num arg:n arguments arg Call Call FunctionDef name:update_str arg:s arguments arg Call Call Call Call FunctionDef name:update_strs arg:slist arguments arg Call Call For Call For Call Call For Call Call For Call arguments arg Call Call Call Call Call For Call Call Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_generalized_average",
    "source_code": "def _generalized_average(U, V, average_method):\n    if average_method == 'min':\n        return min(U, V)\n    elif average_method == 'geometric':\n        return np.sqrt(U * V)\n    elif average_method == 'arithmetic':\n        return np.mean([U, V])\n    elif average_method == 'max':\n        return max(U, V)\n    else:\n        raise ValueError(\"'average_method' must be 'min', 'geometric', 'arithmetic', or 'max'\")",
    "docstring": "Return a particular mean of two numbers.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\metrics\\cluster\\_supervised.py",
    "ast_data": "FunctionDef name:_generalized_average arg:U arg:V arg:average_method arguments arg arg arg If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_py_get_item",
    "source_code": "def _py_get_item(target, i):\n    return target[i]",
    "docstring": "Overload of get_item that executes a Python list modification.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\slices.py",
    "ast_data": "FunctionDef name:_py_get_item arg:target arg:i arguments arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "BindInputStep",
    "source_code": "class BindInputStep(InputAdaptStep):\n\n    def __init__(self, model_signature: inspect.Signature):\n        self._model_signature = model_signature\n\n    def apply(self, model_args: Sequence[Any], model_kwargs: Mapping[str, Any], model: torch.nn.Module | Callable | torch_export.ExportedProgram | None=None) -> tuple[Sequence[Any], Mapping[str, Any]]:\n        bound = self._model_signature.bind(*model_args, **model_kwargs)\n        bound.apply_defaults()\n        if bound.kwargs:\n            raise ValueError('Keyword-only arguments are not supported.')\n        return ((), bound.arguments)",
    "docstring": "Bind the input arguments to the model signature.",
    "type": "class",
    "file_path": "pytorch\\torch\\onnx\\_internal\\io_adapter.py",
    "ast_data": "ClassDef name:BindInputStep FunctionDef name:__init__ arg:self arg:model_signature arguments arg arg Assign FunctionDef name:apply arg:self arg:model_args arg:model_kwargs arg:model arguments arg arg arg arg Assign Call Call If Raise Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_blocked_elementwise",
    "source_code": "def _blocked_elementwise(func):\n    block_size = 2 ** 20\n\n    def wrapper(x):\n        if x.shape[0] < block_size:\n            return func(x)\n        else:\n            y0 = func(x[:block_size])\n            y = np.zeros((x.shape[0],) + y0.shape[1:], dtype=y0.dtype)\n            y[:block_size] = y0\n            del y0\n            for j in range(block_size, x.shape[0], block_size):\n                y[j:j + block_size] = func(x[j:j + block_size])\n            return y\n    return wrapper",
    "docstring": "Decorator for an elementwise function, to apply it blockwise along first dimension, to avoid excessive memory usage in temporaries.",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_onenormest.py",
    "ast_data": "FunctionDef name:_blocked_elementwise arg:func arguments arg Assign FunctionDef name:wrapper arg:x arguments arg If Compare Return return:yes Call Assign Call Assign Call Assign For Call Assign Call Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_depart_query_reference_node",
    "source_code": "def _depart_query_reference_node(self, node):\n    self.depart_literal(node)",
    "docstring": "Act as if this is a .",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\sphinxext\\roles.py",
    "ast_data": "FunctionDef name:_depart_query_reference_node arg:self arg:node arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "convert_alg_to_int",
    "source_code": "def convert_alg_to_int(alg):\n    if isinstance(alg, int):\n        return alg\n    if isinstance(alg, Algorithm):\n        return alg.value\n    if isinstance(alg, tensor.Tensor):\n        return alg\n    if isinstance(alg, str):\n        canon_alg = alg.strip().lower().replace('-', '').replace('_', '')\n        if canon_alg == 'philox':\n            return Algorithm.PHILOX.value\n        elif canon_alg == 'threefry':\n            return Algorithm.THREEFRY.value\n        elif canon_alg == 'autoselect':\n            return Algorithm.AUTO_SELECT.value\n        else:\n            raise ValueError(unsupported_alg_error_msg(alg))\n    else:\n        raise TypeError(f\"Can't convert argument `alg` (of value {alg} and type {type(alg)}) to int.\")",
    "docstring": "Converts algorithm to an integer. Args: alg: can be one of these types: integer, Algorithm, Tensor, string. Allowed strings are \"philox\" and \"threefry\". Returns: An integer, unless the input is a Tensor in which case a Tensor is returned.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\random_ops_util.py",
    "ast_data": "FunctionDef name:convert_alg_to_int arg:alg arguments arg If Call Return return:yes If Call Return return:yes If Call Return return:yes If Call Assign Call Call Call Call If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Raise Call Call Raise Call Call"
  },
  {
    "library": "django",
    "name": "log_deletions",
    "source_code": "def log_deletions(self, request, queryset):\n    from django.contrib.admin.models import DELETION, LogEntry\n    return LogEntry.objects.log_actions(user_id=request.user.pk, queryset=queryset, action_flag=DELETION)",
    "docstring": "Log that objects will be deleted. Note that this method must be called before the deletion. The default implementation creates admin LogEntry objects.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:log_deletions arg:self arg:request arg:queryset arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_is_strictly_monotonic_increasing",
    "source_code": "@final\n@property\ndef _is_strictly_monotonic_increasing(self) -> bool:\n    return self.is_unique and self.is_monotonic_increasing",
    "docstring": "Return if the index is strictly monotonic increasing (only increasing) values. Examples -------- >>> Index([1, 2, 3])._is_strictly_monotonic_increasing True >>> Index([1, 2, 2])._is_strictly_monotonic_increasing False >>> Index([1, 3, 2])._is_strictly_monotonic_increasing False",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_is_strictly_monotonic_increasing arg:self arguments arg Return return:yes BoolOp"
  },
  {
    "library": "pandas",
    "name": "_concat_same_type",
    "source_code": "@classmethod\ndef _concat_same_type(cls, to_concat: Sequence[IntervalArray]) -> Self:\n    closed_set = {interval.closed for interval in to_concat}\n    if len(closed_set) != 1:\n        raise ValueError('Intervals must all be closed on the same side.')\n    closed = closed_set.pop()\n    left: IntervalSide = np.concatenate([interval.left for interval in to_concat])\n    right: IntervalSide = np.concatenate([interval.right for interval in to_concat])\n    left, right, dtype = cls._ensure_simple_new_inputs(left, right, closed=closed)\n    return cls._simple_new(left, right, dtype=dtype)",
    "docstring": "Concatenate multiple IntervalArray Parameters ---------- to_concat : sequence of IntervalArray Returns ------- IntervalArray",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\interval.py",
    "ast_data": "FunctionDef name:_concat_same_type arg:cls arg:to_concat arguments arg arg Assign If Compare Call Raise Call Assign Call Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_dir_deletions",
    "source_code": "@final\ndef _dir_deletions(self) -> set[str]:\n    return self._accessors | self._hidden_attrs",
    "docstring": "Delete unwanted __dir__ for this object.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\accessor.py",
    "ast_data": "FunctionDef name:_dir_deletions arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "merge_call",
    "source_code": "def merge_call(self, merge_fn, args=(), kwargs=None):\n    require_replica_context(self)\n    if kwargs is None:\n        kwargs = {}\n    merge_fn = autograph.tf_convert(merge_fn, autograph_ctx.control_status_ctx(), convert_by_default=False)\n    return self._merge_call(merge_fn, args, kwargs)",
    "docstring": "Merge args across replicas and run in a cross-replica context. This allows communication and coordination when there are multiple calls to the step_fn triggered by a call to . See for an explanation. If not inside a distributed scope, this is equivalent to: Args: merge_fn: Function that joins arguments from threads that are given as PerReplica. It accepts object as the first argument. args: List or tuple with positional per-thread arguments for . kwargs: Dict with keyword per-thread arguments for . Returns: The return value of , except for values which are unpacked.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:merge_call arg:self arg:merge_fn arg:args arg:kwargs arguments arg arg arg arg Call If Compare Assign Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_parse_type_to_int",
    "source_code": "def _parse_type_to_int(dtype, flag):\n    if dtype not in mmi_constants.TFLITE_TYPES:\n        raise ValueError(\"Unsupported value '{0}' for {1}. Only {2} are supported.\".format(dtype, flag, mmi_constants.TFLITE_TYPES))\n    dtype_str = mmi_constants.TFLITE_TO_STR_TYPES[dtype]\n    dtype_int = schema_fb.TensorType.__dict__[dtype_str]\n    return dtype_int",
    "docstring": "Converts a tflite type to it's integer representation. Args: dtype: tf.DType representing the inference type. flag: str representing the flag name. Returns: integer, a tflite TensorType enum value. Raises: ValueError: Unsupported tflite type.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\tools\\optimize\\python\\modify_model_interface_lib.py",
    "ast_data": "FunctionDef name:_parse_type_to_int arg:dtype arg:flag arguments arg arg If Compare Raise Call Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_canonical_import",
    "source_code": "def get_canonical_import(import_set):\n    import_list = sorted(import_set, key=lambda imp_and_priority: (-imp_and_priority[1], imp_and_priority[0]))\n    return import_list[0][0]",
    "docstring": "Obtain one single import from a set of possible sources of a symbol. One symbol might come from multiple places as it is being imported and reexported. To simplify API changes, we always use the same import for the same module, and give preference based on higher priority and alphabetical ordering. Args: import_set: (set) Imports providing the same symbol. This is a set of tuples in the form (import, priority). We want to pick an import with highest priority. Returns: A module name to import",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\api\\generator\\create_python_api.py",
    "ast_data": "FunctionDef name:get_canonical_import arg:import_set arguments arg Assign Call arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "format_to_string",
    "source_code": "def format_to_string(self, pretty: bool=False) -> str:\n    trace = {}\n    trace['traceEvents'] = self._metadata + self._events\n    if pretty:\n        return json.dumps(trace, indent=4, separators=(',', ': '))\n    else:\n        return json.dumps(trace, separators=(',', ':'))",
    "docstring": "Formats the chrome trace to a string. Args: pretty: (Optional.) If True, produce human-readable JSON output. Returns: A JSON-formatted string in Chrome Trace format.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py",
    "ast_data": "FunctionDef name:format_to_string arg:self arg:pretty arguments arg arg Assign Assign If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "draw_pane",
    "source_code": "def draw_pane(self, renderer):\n    renderer.open_group('pane3d', gid=self.get_gid())\n    xys, loc = self.active_pane()\n    self.pane.xy = xys[:, :2]\n    self.pane.draw(renderer)\n    renderer.close_group('pane3d')",
    "docstring": "Draw pane. Parameters ---------- renderer : subclass",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axis3d.py",
    "ast_data": "FunctionDef name:draw_pane arg:self arg:renderer arguments arg arg Call Call Assign Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get",
    "source_code": "def get(identifier):\n    if isinstance(identifier, (int, float)):\n        return FixedLossScale(identifier)\n    if identifier == 'dynamic':\n        return DynamicLossScale()\n    if isinstance(identifier, LossScale):\n        return identifier\n    elif identifier is None:\n        return None\n    else:\n        raise ValueError('Could not interpret loss scale identifier: %s' % identifier)",
    "docstring": "Get a loss scale object.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\experimental\\loss_scale.py",
    "ast_data": "FunctionDef name:get arg:identifier arguments arg If Call Return return:yes Call If Compare Return return:yes Call If Call Return return:yes If Compare Return return:no Raise Call"
  },
  {
    "library": "pytorch",
    "name": "timed",
    "source_code": "def timed(prefix: str) -> Callable[[F], F]:\n\n    def decorator(f: F) -> F:\n\n        @functools.wraps(f)\n        def wrapper(*args: Any, **kwargs: Any) -> Any:\n            logger = cast(logging.Logger, LOGGER)\n            logger.info(prefix)\n            with timer(logger, prefix):\n                return f(*args, **kwargs)\n        return cast(F, wrapper)\n    return decorator",
    "docstring": "Decorator for timing functions",
    "type": "function",
    "file_path": "pytorch\\tools\\nightly.py",
    "ast_data": "FunctionDef name:timed arg:prefix arguments arg FunctionDef name:decorator arg:f arguments arg FunctionDef name:wrapper arguments arg arg Assign Call Call With Call Return return:yes Call Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "make_image",
    "source_code": "def make_image(tensor, rescale=1, rois=None, labels=None):\n    from PIL import Image\n    height, width, channel = tensor.shape\n    scaled_height = int(height * rescale)\n    scaled_width = int(width * rescale)\n    image = Image.fromarray(tensor)\n    if rois is not None:\n        image = draw_boxes(image, rois, labels=labels)\n    ANTIALIAS = Image.Resampling.LANCZOS\n    image = image.resize((scaled_width, scaled_height), ANTIALIAS)\n    import io\n    output = io.BytesIO()\n    image.save(output, format='PNG')\n    image_string = output.getvalue()\n    output.close()\n    return Summary.Image(height=height, width=width, colorspace=channel, encoded_image_string=image_string)",
    "docstring": "Convert a numpy representation of an image to Image protobuf.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\tensorboard\\summary.py",
    "ast_data": "FunctionDef name:make_image arg:tensor arg:rescale arg:rois arg:labels arguments arg arg arg arg Assign Assign Call Assign Call Assign Call If Compare Assign Call Assign Assign Call Assign Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "transform_index_select",
    "source_code": "@register_transformation_rule(IndexSelect)\ndef transform_index_select(constraint, counter):\n    dims, counter = gen_tensor_dims(constraint.tensor_size, counter)\n    is_valid_index = valid_index(constraint.index, dims)\n    nat_constraints = gen_nat_constraints(dims)\n    if is_valid_index == T():\n        new_dims = copy.deepcopy(dims)\n        new_dims[constraint.index] = constraint.dim_replace\n    transformed_constraint = Conj([BinConstraintT(constraint.input_var, TensorType(dims), op_eq), *nat_constraints, is_valid_index, BinConstraintT(constraint.output, TensorType(new_dims), op_eq)])\n    return (transformed_constraint, counter)",
    "docstring": "The constraints consider the given tensor size, checks if the index is valid and if so, generates a constraint for replacing the input dimension with the required dimension",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_transformation.py",
    "ast_data": "FunctionDef name:transform_index_select arg:constraint arg:counter arguments arg arg Assign Call Assign Call Assign Call If Compare Call Assign Call Assign Assign Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "_complement_color",
    "source_code": "def _complement_color(self, color, base_color, hue_map):\n    if color == 'gray':\n        msg = 'Use \"auto\" to set automatic grayscale colors. From v0.14.0, \"gray\" will default to matplotlib\\'s definition.'\n        warnings.warn(msg, FutureWarning, stacklevel=3)\n        color = 'auto'\n    elif color is None or color is default:\n        color = 'auto'\n    if color != 'auto':\n        return color\n    if hue_map.lookup_table is None:\n        if base_color is None:\n            return None\n        basis = [mpl.colors.to_rgb(base_color)]\n    else:\n        basis = [mpl.colors.to_rgb(c) for c in hue_map.lookup_table.values()]\n    unique_colors = np.unique(basis, axis=0)\n    light_vals = [rgb_to_hls(*rgb[:3])[1] for rgb in unique_colors]\n    lum = min(light_vals) * 0.6\n    return (lum, lum, lum)",
    "docstring": "Allow a color to be set automatically using a basis of comparison.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\categorical.py",
    "ast_data": "FunctionDef name:_complement_color arg:self arg:color arg:base_color arg:hue_map arguments arg arg arg arg If Compare Assign Call Assign If BoolOp Compare Compare Assign If Compare Return return:yes If Compare If Compare Return return:no Assign Call Assign Call Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "Permute",
    "source_code": "class Permute(Layer):\n\n    def __init__(self, dims, **kwargs):\n        super(Permute, self).__init__(**kwargs)\n        self.dims = tuple(dims)\n        if sorted(dims) != list(range(1, len(dims) + 1)):\n            raise ValueError('Invalid permutation `dims` for Permute Layer: %s. The set of indices in `dims` must be consecutive and start from 1.' % (dims,))\n        self.input_spec = InputSpec(ndim=len(self.dims) + 1)\n\n    def compute_output_shape(self, input_shape):\n        input_shape = tensor_shape.TensorShape(input_shape).as_list()\n        output_shape = copy.copy(input_shape)\n        for i, dim in enumerate(self.dims):\n            target_dim = input_shape[dim]\n            output_shape[i + 1] = target_dim\n        return tensor_shape.TensorShape(output_shape)\n\n    def call(self, inputs):\n        return array_ops.transpose(inputs, perm=(0,) + self.dims)\n\n    def get_config(self):\n        config = {'dims': self.dims}\n        base_config = super(Permute, self).get_config()\n        return dict(list(base_config.items()) + list(config.items()))",
    "docstring": "Permutes the dimensions of the input according to a given pattern. Useful e.g. connecting RNNs and convnets. Example: Args: dims: Tuple of integers. Permutation pattern does not include the samples dimension. Indexing starts at 1. For instance, permutes the first and second dimensions of the input. Input shape: Arbitrary. Use the keyword argument (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same as the input shape, but with the dimensions re-ordered according to the specified pattern.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\core.py",
    "ast_data": "ClassDef name:Permute FunctionDef name:__init__ arg:self arg:dims arguments arg arg arg Call Call Assign Call If Compare Call Call Call Call Raise Call Assign Call Call FunctionDef name:compute_output_shape arg:self arg:input_shape arguments arg arg Assign Call Call Assign Call For Call Assign Assign Return return:yes Call FunctionDef name:call arg:self arg:inputs arguments arg arg Return return:yes Call FunctionDef name:get_config arg:self arguments arg Assign Assign Call Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "jac_mag",
    "source_code": "@property\ndef jac_mag(self):\n    if self._g_mag is None:\n        self._g_mag = scipy.linalg.norm(self.jac)\n    return self._g_mag",
    "docstring": "Magnitude of jacobian of objective function at current iteration.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_trustregion.py",
    "ast_data": "FunctionDef name:jac_mag arg:self arguments arg If Compare Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_should_decompose_because_unsafe_op",
    "source_code": "def _should_decompose_because_unsafe_op(op: torch._ops.OperatorBase) -> bool:\n    if not isinstance(op, torch._ops.OpOverload):\n        return False\n    if torch.Tag.maybe_aliasing_or_mutating in op.tags:\n        return True\n    return op == torch.ops.aten.native_batch_norm.default",
    "docstring": "Returns True if the op must always decompose in export/compile tracing system In export, we always decompose certain CIA ops that are tagged with maybe_aliasing_or_mutating because we statically need to know if the op is mutating or not. But these CIA ops could have different behaviour in runtime. native_batch_norm is a prim op which has a wrong schema and it needs to be replaced with correct schema. But until then, we will force decompose it via this tag.",
    "type": "function",
    "file_path": "pytorch\\torch\\_decomp\\__init__.py",
    "ast_data": "FunctionDef name:_should_decompose_because_unsafe_op arg:op arguments arg If Call Return return:yes If Compare Return return:yes Return return:yes Compare"
  },
  {
    "library": "pandas",
    "name": "swapkey",
    "source_code": "def swapkey(self, old_key: str, new_key: str, new_value=None) -> None:\n    if self.has_resolvers:\n        maps = self.resolvers.maps + self.scope.maps\n    else:\n        maps = self.scope.maps\n    maps.append(self.temps)\n    for mapping in maps:\n        if old_key in mapping:\n            mapping[new_key] = new_value\n            return",
    "docstring": "Replace a variable name, with a potentially new value. Parameters ---------- old_key : str Current variable name to replace new_key : str New variable name to replace with new_value : object Value to be replaced along with the possible renaming",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\computation\\scope.py",
    "ast_data": "FunctionDef name:swapkey arg:self arg:old_key arg:new_key arg:new_value arguments arg arg arg arg If Assign Assign Call For If Compare Assign Return return:no"
  },
  {
    "library": "scikit-learn",
    "name": "is_stationary",
    "source_code": "def is_stationary(self):\n    return False",
    "docstring": "Returns whether the kernel is stationary.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:is_stationary arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit_transform",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit_transform(self, y):\n    if self.classes is not None:\n        return self.fit(y).transform(y)\n    self._cached_dict = None\n    class_mapping = defaultdict(int)\n    class_mapping.default_factory = class_mapping.__len__\n    yt = self._transform(y, class_mapping)\n    tmp = sorted(class_mapping, key=class_mapping.get)\n    dtype = int if all((isinstance(c, int) for c in tmp)) else object\n    class_mapping = np.empty(len(tmp), dtype=dtype)\n    class_mapping[:] = tmp\n    self.classes_, inverse = np.unique(class_mapping, return_inverse=True)\n    yt.indices = np.asarray(inverse[yt.indices], dtype=yt.indices.dtype)\n    if not self.sparse_output:\n        yt = yt.toarray()\n    return yt",
    "docstring": "Fit the label sets binarizer and transform the given label sets. Parameters ---------- y : iterable of iterables A set of labels (any orderable and hashable object) for each sample. If the parameter is set, will not be iterated. Returns ------- y_indicator : {ndarray, sparse matrix} of shape (n_samples, n_classes) A matrix such that iff is in , and 0 otherwise. Sparse matrix will be of CSR format.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_label.py",
    "ast_data": "FunctionDef name:fit_transform arg:self arg:y arguments arg arg If Compare Return return:yes Call Call Assign Assign Call Assign Assign Call Assign Call Assign Call Call Assign Call Call Assign Assign Call Assign Call If Assign Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "get_kernel",
    "source_code": "def get_kernel(self) -> Tensor:\n    cross = tensor([[[0, 1, 0], [1, 1, 1], [0, 1, 0]]])\n    bound = tensor([[[0, 0, 0], [0, 1, 0], [0, 0, 0]]])\n    kernel = stack([bound, cross, bound], 1) * (1 / 7)\n    return kernel[None]",
    "docstring": "Get kernel for image morphology convolution.",
    "type": "method",
    "file_path": "kornia\\kornia\\losses\\hausdorff.py",
    "ast_data": "FunctionDef name:get_kernel arg:self arguments arg Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ComplexView",
    "source_code": "@ir_dataclass(frozen=False)\nclass ComplexView(FallbackKernel):\n\n    def should_allocate(self) -> bool:\n        return False\n\n    def get_inputs_that_alias_output(self) -> Sequence[str]:\n        return [self.inputs[0].get_name()]\n\n    def __init__(self, layout, kernel, tensor_args, nontensor_args, unflatten_args, *, unbacked_bindings=None) -> None:\n        super().__init__(layout, kernel, tensor_args, nontensor_args, unflatten_args, unbacked_bindings=unbacked_bindings)",
    "docstring": "View a complex number as two dtyped numbers or vice versa",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\ir.py",
    "ast_data": "ClassDef name:ComplexView FunctionDef name:should_allocate arg:self arguments arg Return return:yes FunctionDef name:get_inputs_that_alias_output arg:self arguments arg Return return:yes Call FunctionDef name:__init__ arg:self arg:layout arg:kernel arg:tensor_args arg:nontensor_args arg:unflatten_args arguments arg arg arg arg arg arg arg Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "PostProcessing",
    "source_code": "def PostProcessing(self):\n    for _, grad_state in self._map.items():\n        for _, b_merge in grad_state.switch_map.items():\n            if b_merge.op.inputs[0] == b_merge.op.inputs[1]:\n                dtype = b_merge.op.inputs[0].dtype\n                shape = b_merge.op.inputs[0].get_shape()\n                if shape.is_fully_defined():\n                    grad_state.grad_context.Enter()\n                    grad_val = constant_op.constant(0, dtype=dtype, shape=shape)\n                    next_grad_val = control_flow_ops._NextIteration(grad_val)\n                    grad_state.grad_context.Exit()\n                else:\n                    outer_grad_ctxt = grad_state.grad_context.outer_context\n                    if outer_grad_ctxt:\n                        outer_grad_ctxt.Enter()\n                    enter_grad_op = b_merge.op.inputs[0].op\n                    enter_grad = enter_grad_op.inputs[0]\n                    grad_shape = array_ops.shape_internal(enter_grad, optimize=False)\n                    grad_val = array_ops.zeros(grad_shape)\n                    if outer_grad_ctxt:\n                        outer_grad_ctxt.Exit()\n                    grad_state.grad_context.Enter()\n                    next_grad_val = control_flow_ops._NextIteration(grad_val)\n                    grad_state.grad_context.Exit()\n                b_merge.op._update_input(1, next_grad_val)",
    "docstring": "Perform postprocessing at the end of gradients(). We have created the gradient graph at this point. So this function can be used to perform any postprocessing on the gradient graph. We currently perform the following postprocessing: 1. Patch the gradient graph if the output of a loop variable doesn't depend on its input.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_state.py",
    "ast_data": "FunctionDef name:PostProcessing arg:self arguments arg For Call For Call If Compare Assign Assign Call If Call Call Assign Call Assign Call Call Assign If Call Assign Assign Assign Call Assign Call If Call Call Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "parameterized_truncated_normal",
    "source_code": "def parameterized_truncated_normal(shape, means=0.0, stddevs=1.0, minvals=-2.0, maxvals=2.0, dtype=dtypes.float32, seed=None, name=None):\n    with ops.name_scope(name, 'parameterized_truncated_normal', [shape, means, stddevs, minvals, maxvals]) as name:\n        shape_tensor = shape_util.shape_tensor(shape)\n        means_tensor = ops.convert_to_tensor(means, dtype=dtype, name='means')\n        stddevs_tensor = ops.convert_to_tensor(stddevs, dtype=dtype, name='stddevs')\n        minvals_tensor = ops.convert_to_tensor(minvals, dtype=dtype, name='minvals')\n        maxvals_tensor = ops.convert_to_tensor(maxvals, dtype=dtype, name='maxvals')\n        seed1, seed2 = random_seed.get_seed(seed)\n        rnd = gen_random_ops.parameterized_truncated_normal(shape_tensor, means_tensor, stddevs_tensor, minvals_tensor, maxvals_tensor, seed=seed1, seed2=seed2)\n        shape_util.maybe_set_static_shape(rnd, shape)\n        return rnd",
    "docstring": "Outputs random values from a truncated normal distribution. The generated values follow a normal distribution with specified mean and standard deviation, except that values whose magnitude is more than 2 standard deviations from the mean are dropped and re-picked. Args: shape: A 1-D integer Tensor or Python array. The shape of the output tensor. means: A 0-D Tensor or Python value of type . The mean of the truncated normal distribution. stddevs: A 0-D Tensor or Python value of type . The standard deviation of the truncated normal distribution. minvals: A 0-D Tensor or Python value of type . The minimum value of the truncated normal distribution. maxvals: A 0-D Tensor or Python value of type . The maximum value of the truncated normal distribution. dtype: The type of the output. seed: A Python integer. Used to create a random seed for the distribution. See for behavior. name: A name for the operation (optional). Returns: A tensor of the specified shape filled with random truncated normal values.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\random_ops.py",
    "ast_data": "FunctionDef name:parameterized_truncated_normal arg:shape arg:means arg:stddevs arg:minvals arg:maxvals arg:dtype arg:seed arg:name arguments arg arg arg arg arg arg arg arg With Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "split",
    "source_code": "@abc.abstractmethod\ndef split(self) -> tuple[Sequence[Union[message.Message, bytes]], chunk_pb2.ChunkedMessage]:\n    pass",
    "docstring": "Splits proto message into a Sequence of protos/bytes.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\tools\\proto_splitter\\split.py",
    "ast_data": "FunctionDef name:split arg:self arguments arg"
  },
  {
    "library": "pandas",
    "name": "boxplot_frame_groupby",
    "source_code": "def boxplot_frame_groupby(grouped: DataFrameGroupBy, subplots: bool=True, column=None, fontsize: int | None=None, rot: int=0, grid: bool=True, ax=None, figsize: tuple[float, float] | None=None, layout=None, sharex: bool=False, sharey: bool=True, backend=None, **kwargs):\n    plot_backend = _get_plot_backend(backend)\n    return plot_backend.boxplot_frame_groupby(grouped, subplots=subplots, column=column, fontsize=fontsize, rot=rot, grid=grid, ax=ax, figsize=figsize, layout=layout, sharex=sharex, sharey=sharey, **kwargs)",
    "docstring": "Make box plots from DataFrameGroupBy data. Parameters ---------- grouped : DataFrameGroupBy The grouped DataFrame object over which to create the box plots. subplots : bool * `` option shows the boxplots in a single figure. .. plot:: :context: close-figs >>> grouped.boxplot(subplots=False, rot=45, fontsize=12) # doctest: +SKIP",
    "type": "function",
    "file_path": "pandas\\pandas\\plotting\\_core.py",
    "ast_data": "FunctionDef name:boxplot_frame_groupby arg:grouped arg:subplots arg:column arg:fontsize arg:rot arg:grid arg:ax arg:figsize arg:layout arg:sharex arg:sharey arg:backend arguments arg arg arg arg arg arg arg arg arg arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "json_script",
    "source_code": "@register.filter(is_safe=True)\ndef json_script(value, element_id=None):\n    return _json_script(value, element_id)",
    "docstring": "Output value JSON-encoded, wrapped in a tag (with an optional id).",
    "type": "function",
    "file_path": "django\\django\\template\\defaultfilters.py",
    "ast_data": "FunctionDef name:json_script arg:value arg:element_id arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_add_axes_internal",
    "source_code": "def _add_axes_internal(self, ax, key):\n    self._axstack.add(ax)\n    if ax not in self._localaxes:\n        self._localaxes.append(ax)\n    self.sca(ax)\n    ax._remove_method = self.delaxes\n    ax._projection_init = key\n    self.stale = True\n    ax.stale_callback = _stale_figure_callback\n    return ax",
    "docstring": "Private helper for and .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:_add_axes_internal arg:self arg:ax arg:key arguments arg arg arg Call If Compare Call Call Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit_intercept_only",
    "source_code": "def fit_intercept_only(self, y_true, sample_weight=None):\n    if sample_weight is None:\n        median = np.percentile(y_true, 50, axis=0)\n    else:\n        median = _weighted_percentile(y_true, sample_weight, 50)\n    diff = y_true - median\n    term = np.sign(diff) * np.minimum(self.closs.delta, np.abs(diff))\n    return median + np.average(term, weights=sample_weight)",
    "docstring": "Compute raw_prediction of an intercept-only model. This is the weighted median of the target, i.e. over the samples axis=0.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\_loss\\loss.py",
    "ast_data": "FunctionDef name:fit_intercept_only arg:self arg:y_true arg:sample_weight arguments arg arg arg If Compare Assign Call Assign Call Assign Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "split_de_casteljau",
    "source_code": "def split_de_casteljau(beta, t):\n    beta = np.asarray(beta)\n    beta_list = [beta]\n    while True:\n        beta = _de_casteljau1(beta, t)\n        beta_list.append(beta)\n        if len(beta) == 1:\n            break\n    left_beta = [beta[0] for beta in beta_list]\n    right_beta = [beta[-1] for beta in reversed(beta_list)]\n    return (left_beta, right_beta)",
    "docstring": "Split a Bézier segment defined by its control points *beta* into two separate segments divided at *t* and return their control points.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\bezier.py",
    "ast_data": "FunctionDef name:split_de_casteljau arg:beta arg:t arguments arg arg Assign Call Assign While Assign Call Call If Compare Call Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "set_step",
    "source_code": "@tf_export('summary.experimental.set_step', v1=[])\ndef set_step(step):\n    _summary_state.step = step",
    "docstring": "Sets the default summary step for the current thread. For convenience, this function sets a default value for the parameter used in summary-writing functions elsewhere in the API so that it need not be explicitly passed in every such invocation. The value can be a constant or a variable, and can be retrieved via . Note: when using this with @tf.functions, the step value will be captured at the time the function is traced, so changes to the step outside the function will not be reflected inside the function unless using a step. Args: step: An -castable default step value, or None to unset.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "FunctionDef name:set_step arg:step arguments arg Assign Call"
  },
  {
    "library": "django",
    "name": "resolve_relation",
    "source_code": "def resolve_relation(scope_model, relation):\n    if relation == RECURSIVE_RELATIONSHIP_CONSTANT:\n        relation = scope_model\n    if isinstance(relation, str):\n        if '.' not in relation:\n            relation = '%s.%s' % (scope_model._meta.app_label, relation)\n    return relation",
    "docstring": "Transform relation into a model or fully-qualified model string of the form \"app_label.ModelName\", relative to scope_model. The relation argument can be: * RECURSIVE_RELATIONSHIP_CONSTANT, i.e. the string \"self\", in which case the model argument will be returned. * A bare model name without an app_label, in which case scope_model's app_label will be prepended. * An \"app_label.ModelName\" string. * A model class, which will be returned unchanged.",
    "type": "function",
    "file_path": "django\\django\\db\\models\\fields\\related.py",
    "ast_data": "FunctionDef name:resolve_relation arg:scope_model arg:relation arguments arg arg If Compare Assign If Call If Compare Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_reduce_tensor",
    "source_code": "def _reduce_tensor(self, t: Tensor) -> tuple[Callable[[T], T], tuple[Union[TensorMetadata, TensorMetadataAndValues]]]:\n    from .graph import GraphLowering\n    if t.is_mkldnn:\n        raise BypassFxGraphCache('mkldnn tensors unpickleable')\n    metadata = extract_tensor_metadata_for_cache_key(t)\n    if is_frozen_param(t) and (not GraphLowering.can_inline_constant(t)):\n        return (_ident, (metadata,))\n    start = time()\n    values = t.tolist()\n    elapsed = time() - start\n    if elapsed > 1.0:\n        warnings.warn(f'FX graph cache copying of a large constant took {elapsed:.1}s. Please file an issue.')\n    return (_ident, (TensorMetadataAndValues(metadata, values),))",
    "docstring": "Custom reducer to pickle Tensors. If we see tensors, we know they're constants stored as attributes on the GraphModule.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codecache.py",
    "ast_data": "FunctionDef name:_reduce_tensor arg:self arg:t arguments arg arg If Raise Call Assign Call If BoolOp Call Call Return return:yes Assign Call Assign Call Assign Call If Compare Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_mvn_qmc_integrand",
    "source_code": "def _mvn_qmc_integrand(covar, low, high, use_tent=False):\n    cho, lo, hi = _permuted_cholesky(covar, low, high)\n    n = cho.shape[0]\n    ndim_integrand = n - 1\n    ct = cho[0, 0]\n    c = phi(lo[0] / ct)\n    d = phi(hi[0] / ct)\n    ci = c\n    dci = d - ci\n\n    def integrand(*zs):\n        ndim_qmc = len(zs)\n        n_qmc_samples = len(np.atleast_1d(zs[0]))\n        assert ndim_qmc == ndim_integrand\n        y = np.zeros((ndim_qmc, n_qmc_samples))\n        c = np.full(n_qmc_samples, ci)\n        dc = np.full(n_qmc_samples, dci)\n        pv = dc.copy()\n        for i in range(1, n):\n            if use_tent:\n                x = abs(2 * zs[i - 1] - 1)\n            else:\n                x = zs[i - 1]\n            y[i - 1, :] = phinv(c + x * dc)\n            s = cho[i, :i] @ y[:i, :]\n            ct = cho[i, i]\n            c = phi((lo[i] - s) / ct)\n            d = phi((hi[i] - s) / ct)\n            dc = d - c\n            pv = pv * dc\n        return pv\n    return (integrand, ndim_integrand)",
    "docstring": "Transform the multivariate normal integration into a QMC integrand over a unit hypercube. The dimensionality of the resulting hypercube integration domain is one less than the dimensionality of the original integrand. Note that this transformation subsumes the integration bounds in order to account for infinite bounds. The QMC integration one does with the returned integrand should be on the unit hypercube. Parameters ---------- covar : (n, n) float array Possibly singular, positive semidefinite symmetric covariance matrix. low, high : (n,) float array The low and high integration bounds. use_tent : bool, optional If True, then use tent periodization. Only helpful for lattice rules. Returns ------- integrand : Callable[[NDArray], NDArray] The QMC-integrable integrand. It takes an ``.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_qmvnt.py",
    "ast_data": "FunctionDef name:_mvn_qmc_integrand arg:covar arg:low arg:high arg:use_tent arguments arg arg arg arg Assign Call Assign Assign Assign Assign Call Assign Call Assign Assign FunctionDef name:integrand arguments arg Assign Call Assign Call Call Compare Assign Call Assign Call Assign Call Assign Call For Call If Assign Call Assign Assign Call Assign Assign Assign Call Assign Call Assign Assign Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "should_synchronize_after_execute",
    "source_code": "@property\ndef should_synchronize_after_execute(self) -> bool:\n    return self._synchronize_after_execute",
    "docstring": "Whether to synchronize after executing the stage.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\staging.py",
    "ast_data": "FunctionDef name:should_synchronize_after_execute arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "should_checkpoint",
    "source_code": "@property\ndef should_checkpoint(self):\n    return self._strategy.extended.should_checkpoint",
    "docstring": "Whether to save checkpoint.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_coordinator.py",
    "ast_data": "FunctionDef name:should_checkpoint arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "set_state_dict",
    "source_code": "def set_state_dict(model: nn.Module, optimizers: Union[torch.optim.Optimizer, Iterable[torch.optim.Optimizer]], *, model_state_dict: dict[str, ValueType], optim_state_dict: OptimizerStateType, options: Optional[StateDictOptions]=None) -> _IncompatibleKeys:\n    model_state_dict: dict[str, ValueType] = _unflatten_model_state_dict(model, model_state_dict)\n    with _gc_context():\n        optimizers = (optimizers,) if isinstance(optimizers, torch.optim.Optimizer) else tuple(optimizers)\n        info = _verify_options(model, optimizers, optim_only=not model_state_dict, options=options)\n        _verify_state_dict(model_state_dict, optim_state_dict, info)\n        _load_optim_state_dict(model, optimizers, optim_state_dict, info)\n        return _load_model_state_dict(model, model_state_dict, info)",
    "docstring": "Load the model state_dict and optimizers state_dict. The counterpart of `StateDictOptions` fields: * **missing_keys** is a list of str containing the missing keys of the model state_dict. * **unexpected_keys** is a list of str containing the unexpected keys of the model state_dict. :type model_state_dict: typing.Dict[str, ValueType] :type optim_state_dict: typing.OptimizerStateType",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\state_dict.py",
    "ast_data": "FunctionDef name:set_state_dict arg:model arg:optimizers arguments arg arg arg arg arg Call With Call Assign Call Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "diagonal",
    "source_code": "@array_function_dispatch(_diagonal_dispatcher)\ndef diagonal(a, offset=0, axis1=0, axis2=1):\n    if isinstance(a, np.matrix):\n        return asarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2)\n    else:\n        return asanyarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2)",
    "docstring": "Return specified diagonals. If is 2-D, returns the diagonal of with the given offset, i.e., the collection of elements of the form `aaxis1axis2axis1axis2aaamatrixmatrixaxis1axis2anumpy.flipudnumpy.fliplr`. >>> a = np.arange(9).reshape(3, 3) >>> a array([[0, 1, 2], [3, 4, 5], [6, 7, 8]]) >>> np.fliplr(a).diagonal() # Horizontal flip array([2, 4, 6]) >>> np.flipud(a).diagonal() # Vertical flip array([6, 4, 2]) Note that the order in which the diagonal is retrieved varies depending on the flip function.",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\fromnumeric.py",
    "ast_data": "FunctionDef name:diagonal arg:a arg:offset arg:axis1 arg:axis2 arguments arg arg arg arg If Call Return return:yes Call Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "swap_tensor_content_in_graph_function",
    "source_code": "def swap_tensor_content_in_graph_function(graph_def, from_endiness, to_endiness):\n    if isinstance(graph_def, meta_graph_pb2.MetaGraphDef):\n        functions = graph_def.graph_def.library.function\n    elif isinstance(graph_def, graph_pb2.GraphDef):\n        functions = graph_def.library.function\n    else:\n        return\n    for function in functions:\n        node_def = function.node_def\n        for node in node_def:\n            if node.op == 'Const':\n                tensor = node.attr['value'].tensor\n                byte_swap_tensor_content(tensor, from_endiness, to_endiness)",
    "docstring": "Fix endiness of tensor contents. Args: graph_def: Target graph_def to change endiness. from_endiness: The original endianness format. \"big\" or \"little\" to_endiness: The target endianness format. \"big\" or \"little\"",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\byte_swap_tensor.py",
    "ast_data": "FunctionDef name:swap_tensor_content_in_graph_function arg:graph_def arg:from_endiness arg:to_endiness arguments arg arg arg If Call Assign If Call Assign Return return:no For Assign For If Compare Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "set_element_type",
    "source_code": "def set_element_type(entity, dtype, shape=UNSPECIFIED):\n    del entity\n    del dtype\n    del shape",
    "docstring": "Indicates that the entity is expected hold items of specified type/shape. The staged TensorFlow ops will reflect and assert this data type. Ignored otherwise. Args: entity: The entity to annotate. dtype: TensorFlow dtype value to assert for entity. shape: Optional shape to assert for entity.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\lang\\directives.py",
    "ast_data": "FunctionDef name:set_element_type arg:entity arg:dtype arg:shape arguments arg arg arg"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_modify_args",
    "source_code": "def _maybe_modify_args(self, node, full_name, name):\n    renamed_keywords = self._get_applicable_dict('function_keyword_renames', full_name, name)\n    if not renamed_keywords:\n        return False\n    if uses_star_kwargs_in_call(node):\n        self.add_log(WARNING, node.lineno, node.col_offset, '(Manual check required) upgrading %s may require renaming or removing call arguments, but it was passed variable-length *args or **kwargs. The upgrade script cannot handle these automatically.' % (full_name or name))\n    modified = False\n    new_keywords = []\n    for keyword in node.keywords:\n        argkey = keyword.arg\n        if argkey in renamed_keywords:\n            modified = True\n            if renamed_keywords[argkey] is None:\n                lineno = getattr(keyword, 'lineno', node.lineno)\n                col_offset = getattr(keyword, 'col_offset', node.col_offset)\n                self.add_log(INFO, lineno, col_offset, 'Removed argument %s for function %s' % (argkey, full_name or name))\n            else:\n                keyword.arg = renamed_keywords[argkey]\n                lineno = getattr(keyword, 'lineno', node.lineno)\n                col_offset = getattr(keyword, 'col_offset', node.col_offset)\n                self.add_log(INFO, lineno, col_offset, 'Renamed keyword argument for %s from %s to %s' % (full_name, argkey, renamed_keywords[argkey]))\n                new_keywords.append(keyword)\n        else:\n            new_keywords.append(keyword)\n    if modified:\n        node.keywords = new_keywords\n    return modified",
    "docstring": "Rename keyword args if the function called full_name requires it.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\ast_edits.py",
    "ast_data": "FunctionDef name:_maybe_modify_args arg:self arg:node arg:full_name arg:name arguments arg arg arg arg Assign Call If Return return:yes If Call Call BoolOp Assign Assign For Assign If Compare Assign If Compare Assign Call Assign Call Call BoolOp Assign Assign Call Assign Call Call Call Call If Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "ensure_registered",
    "source_code": "@classmethod\ndef ensure_registered(cls):\n    if not capi.get_driver_count():\n        capi.register_all()",
    "docstring": "Attempt to register all the data source drivers.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\driver.py",
    "ast_data": "FunctionDef name:ensure_registered arg:cls arguments arg If Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_invalidate_internal",
    "source_code": "def _invalidate_internal(self, level, invalidating_node):\n    if level <= self._invalid and (not self.pass_through):\n        return\n    self._invalid = level\n    for parent in list(self._parents.values()):\n        parent = parent()\n        if parent is not None:\n            parent._invalidate_internal(level=level, invalidating_node=self)",
    "docstring": "Called by :meth: and subsequently ascends the transform stack calling each TransformNode's _invalidate_internal method.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:_invalidate_internal arg:self arg:level arg:invalidating_node arguments arg arg arg If BoolOp Compare Return return:no Assign For Call Call Assign Call If Compare Call"
  },
  {
    "library": "pandas",
    "name": "quantile",
    "source_code": "def quantile(self, q: float | Sequence[float] | AnyArrayLike=0.5, interpolation: QuantileInterpolation='linear') -> float | Series:\n    validate_percentile(q)\n    df = self.to_frame()\n    result = df.quantile(q=q, interpolation=interpolation, numeric_only=False)\n    if result.ndim == 2:\n        result = result.iloc[:, 0]\n    if is_list_like(q):\n        result.name = self.name\n        idx = Index(q, dtype=np.float64)\n        return self._constructor(result, index=idx, name=self.name)\n    else:\n        return result.iloc[0]",
    "docstring": "Return value at the given quantile. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) The quantile(s) to compute, which can lie in range: 0 jijijij` and the values are the quantiles, otherwise a float will be returned. See Also -------- core.window.Rolling.quantile : Calculate the rolling quantile. numpy.percentile : Returns the q-th percentile(s) of the array elements. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s.quantile(0.5) 2.5 >>> s.quantile([0.25, 0.5, 0.75]) 0.25 1.75 0.50 2.50 0.75 3.25 dtype: float64",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:quantile arg:self arg:q arg:interpolation arguments arg arg arg Call Assign Call Assign Call If Compare Assign If Call Assign Assign Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "inject_functional_model_class",
    "source_code": "def inject_functional_model_class(cls):\n    from tensorflow.python.keras.engine import functional\n    from tensorflow.python.keras.engine import training_v1\n    if cls == Model or cls == training_v1.Model:\n        return functional.Functional\n    if cls == object:\n        return object\n    cls.__bases__ = tuple((inject_functional_model_class(base) for base in cls.__bases__))\n    cls.__new__(cls)\n    return cls",
    "docstring": "Inject into the hierarchy of this class if needed.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py",
    "ast_data": "FunctionDef name:inject_functional_model_class arg:cls arguments arg If BoolOp Compare Compare Return return:yes If Compare Return return:yes Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "can_reorder_logs",
    "source_code": "@staticmethod\ndef can_reorder_logs(fn, args, kwargs) -> True:\n    allowed_input_types = (variables.TensorVariable, variables.ConstantVariable, StringFormatVariable)\n    flat_args = pytree.tree_leaves([args, kwargs])\n    for arg in flat_args:\n        if not isinstance(arg, allowed_input_types):\n            return False\n    return True",
    "docstring": "Run some additional checks for what sort of function calls can we actually reorder.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\misc.py",
    "ast_data": "FunctionDef name:can_reorder_logs arg:fn arg:args arg:kwargs arguments arg arg arg Assign Assign Call For If Call Return return:yes Return return:yes"
  },
  {
    "library": "pandas",
    "name": "select_columns",
    "source_code": "@abstractmethod\ndef select_columns(self, indices: Sequence[int]) -> DataFrame:\n    pass",
    "docstring": "Create a new DataFrame by selecting a subset of columns by index.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\interchange\\dataframe_protocol.py",
    "ast_data": "FunctionDef name:select_columns arg:self arg:indices arguments arg arg"
  },
  {
    "library": "django",
    "name": "base36_to_int",
    "source_code": "def base36_to_int(s):\n    if len(s) > 13:\n        raise ValueError('Base36 input too large')\n    return int(s, 36)",
    "docstring": "Convert a base 36 string to an int. Raise ValueError if the input won't fit into an int.",
    "type": "function",
    "file_path": "django\\django\\utils\\http.py",
    "ast_data": "FunctionDef name:base36_to_int arg:s arguments arg If Compare Call Raise Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "fill_missing_names",
    "source_code": "def fill_missing_names(names: Sequence[Hashable | None]) -> list[Hashable]:\n    return [f'level_{i}' if name is None else name for i, name in enumerate(names)]",
    "docstring": "If a name is missing then replace it by level_n, where n is the count .. versionadded:: 1.4.0 Parameters ---------- names : list-like list of column names or None values. Returns ------- list list of column names with the None values replaced.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\common.py",
    "ast_data": "FunctionDef name:fill_missing_names arg:names arguments arg Return return:yes Compare Call"
  },
  {
    "library": "pandas",
    "name": "_memory_usage",
    "source_code": "@final\ndef _memory_usage(self, deep: bool=False) -> int:\n    if hasattr(self.array, 'memory_usage'):\n        return self.array.memory_usage(deep=deep)\n    v = self.array.nbytes\n    if deep and is_object_dtype(self.dtype) and (not PYPY):\n        values = cast(np.ndarray, self._values)\n        v += lib.memory_usage_of_objects(values)\n    return v",
    "docstring": "Memory usage of the values. Parameters ---------- deep : bool, default False Introspect the data deeply, interrogate dtypes for system-level memory consumption. Returns ------- bytes used Returns memory usage of the values in the Index in bytes. See Also -------- numpy.ndarray.nbytes : Total bytes consumed by the elements of the array. Notes ----- Memory usage does not include memory consumed by elements that are not components of the array if deep=False or if used on PyPy Examples -------- >>> idx = pd.Index([1, 2, 3]) >>> idx.memory_usage() 24",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\base.py",
    "ast_data": "FunctionDef name:_memory_usage arg:self arg:deep arguments arg arg If Call Return return:yes Call Assign If BoolOp Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, module: torch.fx.GraphModule, sample_input: Sequence[Any], operator_support: OperatorSupportBase, settings: _SplitterSettingBase, non_acc_submodule_name: str='_run_on_cpu_', return_tuple: bool=False, nodes_finder: Optional[FxNetAccNodesFinder]=None):\n    assert isinstance(module, torch.fx.GraphModule)\n    self.module = module\n    ShapeProp(self.module).propagate(*sample_input)\n    self.settings = settings\n    self.operator_support = operator_support\n    self.sample_input = sample_input\n    if nodes_finder is None:\n        nodes_finder = FxNetAccNodesFinder(self.module, self.operator_support, self.settings.allow_non_tensor)\n    self.acc_nodes = nodes_finder()\n    if self.settings.skip_fusion:\n        self.fusions = {}\n    else:\n        self.fusions = FxNetAccFusionsFinder(module, self.acc_nodes)()\n    self.deps = self.find_deps()\n    self.update_deps_for_fusions()\n    self.non_acc_submodule_name = non_acc_submodule_name\n    self._node_submodule_map: dict[str, str] = {}\n    self._return_tuple = return_tuple\n    self.tags: list[str] = []",
    "docstring": "Preprocesses graph before splitting: - finds nodes supported by ACC, - finds fusion groups for ACC nodes having non-tensor IO, - builds a graph of direct dependencies, - builds a map of fused nodes to their fusions. As a result we get self.acc_nodes, self.deps and self.fusions.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\passes\\splitter_base.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:module arg:sample_input arg:operator_support arg:settings arg:non_acc_submodule_name arg:return_tuple arg:nodes_finder arguments arg arg arg arg arg arg arg arg Call Assign Call Call Assign Assign Assign If Compare Assign Call Assign Call If Assign Assign Call Call Assign Call Call Assign Assign"
  },
  {
    "library": "scipy",
    "name": "run_monitored_wait4",
    "source_code": "def run_monitored_wait4(code):\n    code = textwrap.dedent(code)\n    start = time.time()\n    process = subprocess.Popen([sys.executable, '-c', code])\n    pid, returncode, rusage = os.wait4(process.pid, 0)\n    duration = time.time() - start\n    max_rss_bytes = get_max_rss_bytes(rusage)\n    if returncode != 0:\n        raise AssertionError(f'Running failed:\\n{code}')\n    return (duration, max_rss_bytes)",
    "docstring": "Run code in a new Python process, and monitor peak memory usage. Returns ------- duration : float Duration in seconds (including Python startup time) peak_memusage : int Peak memory usage in bytes of the child Python process Notes ----- Works on Unix platforms (Linux, macOS) that have .",
    "type": "function",
    "file_path": "scipy\\benchmarks\\benchmarks\\common.py",
    "ast_data": "FunctionDef name:run_monitored_wait4 arg:code arguments arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "mesh_rank",
    "source_code": "@property\ndef mesh_rank(self):\n    return len(self._mesh_shape)",
    "docstring": "Returns the number of dimensions in the mesh.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\topology.py",
    "ast_data": "FunctionDef name:mesh_rank arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_get_test_db_name",
    "source_code": "def _get_test_db_name(self):\n    return self.connection.settings_dict['NAME']",
    "docstring": "Return the 'production' DB name to get the test DB creation machinery to work. This isn't a great deal in this case because DB names as handled by Django don't have real counterparts in Oracle.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\oracle\\creation.py",
    "ast_data": "FunctionDef name:_get_test_db_name arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "incr",
    "source_code": "def incr(self, key, delta=1, version=None):\n    value = self.get(key, self._missing_key, version=version)\n    if value is self._missing_key:\n        raise ValueError(\"Key '%s' not found\" % key)\n    new_value = value + delta\n    self.set(key, new_value, version=version)\n    return new_value",
    "docstring": "Add delta to value in the cache. If the key does not exist, raise a ValueError exception.",
    "type": "method",
    "file_path": "django\\django\\core\\cache\\backends\\base.py",
    "ast_data": "FunctionDef name:incr arg:self arg:key arg:delta arg:version arguments arg arg arg arg Assign Call If Compare Raise Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "construct_array_type",
    "source_code": "@classmethod\ndef construct_array_type(cls) -> type[IntervalArray]:\n    from pandas.core.arrays import IntervalArray\n    return IntervalArray",
    "docstring": "Return the array type associated with this dtype. Returns ------- type",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py",
    "ast_data": "FunctionDef name:construct_array_type arg:cls arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "load_weights",
    "source_code": "def load_weights(mod: nn.Module, names: Sequence[str], params: Sequence[Tensor], as_params: bool=False) -> None:\n    accessor = NamedMemberAccessor(mod)\n    if as_params:\n        params = [nn.Parameter(p) for p in params]\n    accessor.set_tensors(names, params)",
    "docstring": "Reload a set of weights so that can be used again to perform a forward pass. Note that the are regular Tensors (that can have history) and so are left as Tensors. This means that mod.parameters() will still be empty after this call.",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\make_functional.py",
    "ast_data": "FunctionDef name:load_weights arg:mod arg:names arg:params arg:as_params arguments arg arg arg arg Assign Call If Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "ConvertComplexToRealRepresentationOutputStep",
    "source_code": "class ConvertComplexToRealRepresentationOutputStep(OutputAdaptStep):\n\n    def apply(self, model_outputs: Any, model: torch.nn.Module | Callable | torch_export.ExportedProgram | None=None) -> Any:\n        return [torch.view_as_real(output.resolve_conj()) if isinstance(output, torch.Tensor) and torch.is_complex(output) else output for output in model_outputs]",
    "docstring": "Convert complex dtype tensors to real representation tensors. ONNX does not support complex dtype tensors. Thus, we convert complex dtype tensors to real representation tensors (i.e., float dtype tensors with an extra dimension representing the real and imaginary parts of the complex number).",
    "type": "class",
    "file_path": "pytorch\\torch\\onnx\\_internal\\io_adapter.py",
    "ast_data": "ClassDef name:ConvertComplexToRealRepresentationOutputStep FunctionDef name:apply arg:self arg:model_outputs arg:model arguments arg arg arg Return return:yes BoolOp Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "tensor_diag_part",
    "source_code": "@tf_export('linalg.tensor_diag_part', v1=['linalg.tensor_diag_part', 'diag_part'])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints('diag_part')\ndef tensor_diag_part(input, name=None):\n    return gen_array_ops.diag_part(input=input, name=name)",
    "docstring": "Returns the diagonal part of the tensor. This operation returns a tensor with the part of the . The part is computed as follows: Assume has dimensions , then the output is a tensor of rank with dimensions where: . For a rank 2 tensor, and produce the same result. For rank 3 and higher, linalg.diag_part extracts the diagonal of each inner-most matrix in the tensor. An example where they differ is given below. >>> x = [[[[1111,1112],[1121,1122]], ... [[1211,1212],[1221,1222]]], ... [[[2111, 2112], [2121, 2122]], ... [[2211, 2212], [2221, 2222]]] ... ] >>> tf.linalg.tensor_diag_part(x) >>> tf.linalg.diag_part(x).shape TensorShape([2, 2, 2]) Args: input: A with rank . name: A name for the operation (optional). Returns: A Tensor containing diagonals of . Has the same type as , and rank .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:tensor_diag_part arg:input arg:name arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "get_choices",
    "source_code": "def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH, limit_choices_to=None, ordering=()):\n    if self.choices is not None:\n        if include_blank:\n            return BlankChoiceIterator(self.choices, blank_choice)\n        return self.choices\n    rel_model = self.remote_field.model\n    limit_choices_to = limit_choices_to or self.get_limit_choices_to()\n    choice_func = operator.attrgetter(self.remote_field.get_related_field().attname if hasattr(self.remote_field, 'get_related_field') else 'pk')\n    qs = rel_model._default_manager.complex_filter(limit_choices_to)\n    if ordering:\n        qs = qs.order_by(*ordering)\n    return (blank_choice if include_blank else []) + [(choice_func(x), str(x)) for x in qs]",
    "docstring": "Return choices with a default blank choices included, for use as choices for this field.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\__init__.py",
    "ast_data": "FunctionDef name:get_choices arg:self arg:include_blank arg:blank_choice arg:limit_choices_to arg:ordering arguments arg arg arg arg arg If Compare If Return return:yes Call Return return:yes Assign Assign BoolOp Call Assign Call Call Call Assign Call If Assign Call Return return:yes Call Call"
  },
  {
    "library": "cherrypy",
    "name": "__next__",
    "source_code": "def __next__(self):\n    res = next(self._iterator)\n    if isinstance(res, str):\n        res = res.encode('utf-8')\n    return res",
    "docstring": "UTF-8-encode the next chunk of the stream.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\encoding.py",
    "ast_data": "FunctionDef name:__next__ arg:self arguments arg Assign Call If Call Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "zipfile_factory",
    "source_code": "def zipfile_factory(file, *args, **kwargs):\n    if not hasattr(file, 'read'):\n        file = os.fspath(file)\n    import zipfile\n    kwargs['allowZip64'] = True\n    return zipfile.ZipFile(file, *args, **kwargs)",
    "docstring": "Create a ZipFile. Allows for Zip64, and the argument can accept file, str, or pathlib.Path objects. and are passed to the zipfile.ZipFile constructor.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_npyio_impl.py",
    "ast_data": "FunctionDef name:zipfile_factory arg:file arguments arg arg arg If Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_text_words",
    "source_code": "def _text_words(self, length, truncate):\n    words = self._wrapped.split()\n    if len(words) > length:\n        words = words[:length]\n        return add_truncation_text(' '.join(words), truncate)\n    return ' '.join(words)",
    "docstring": "Truncate a string after a certain number of words. Strip newlines in the string.",
    "type": "method",
    "file_path": "django\\django\\utils\\text.py",
    "ast_data": "FunctionDef name:_text_words arg:self arg:length arg:truncate arguments arg arg arg Assign Call If Compare Call Assign Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, *args: str, api_name: str=TENSORFLOW_API_NAME, v1: Optional[Sequence[str]]=None, allow_multiple_exports: bool=True):\n    self._names = args\n    self._names_v1 = v1 if v1 is not None else args\n    self._api_name = api_name\n    self._validate_symbol_names()",
    "docstring": "Export under the names *args (first one is considered canonical). Args: *args: API names in dot delimited format. api_name: API you want to generate Currently, only . v1: Names for the TensorFlow V1 API. If not set, we will use V2 API names both for TensorFlow V1 and V2 APIs. allow_multiple_exports: Deprecated.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\util\\tf_export.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg arg arg arg arg Assign Assign Compare Assign Call"
  },
  {
    "library": "django",
    "name": "get_level",
    "source_code": "def get_level(request):\n    storage = getattr(request, '_messages', default_storage(request))\n    return storage.level",
    "docstring": "Return the minimum level of messages to be recorded. The default level is the `` level.",
    "type": "function",
    "file_path": "django\\django\\contrib\\messages\\api.py",
    "ast_data": "FunctionDef name:get_level arg:request arguments arg Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "adadelta",
    "source_code": "@_disable_dynamo_if_unsupported(single_tensor_fn=_single_tensor_adadelta)\ndef adadelta(params: list[Tensor], grads: list[Tensor], square_avgs: list[Tensor], acc_deltas: list[Tensor], state_steps: list[Tensor], capturable: bool=False, foreach: Optional[bool]=None, differentiable: bool=False, has_complex: bool=False, *, lr: float, rho: float, eps: float, weight_decay: float, maximize: bool):\n    if not torch.compiler.is_compiling() and (not all((isinstance(t, torch.Tensor) for t in state_steps))):\n        raise RuntimeError('API has changed, `state_steps` argument must contain a list of singleton tensors')\n    if foreach is None:\n        _, foreach = _default_to_fused_or_foreach(params, differentiable, use_fused=False)\n    if foreach and torch.jit.is_scripting():\n        raise RuntimeError('torch.jit.script not supported with foreach optimizers')\n    if foreach and (not torch.jit.is_scripting()):\n        func = _multi_tensor_adadelta\n    else:\n        func = _single_tensor_adadelta\n    func(params, grads, square_avgs, acc_deltas, state_steps, lr=lr, rho=rho, eps=eps, weight_decay=weight_decay, maximize=maximize, differentiable=differentiable, capturable=capturable, has_complex=has_complex)",
    "docstring": "Functional API that performs Adadelta algorithm computation. See :class: for details.",
    "type": "function",
    "file_path": "pytorch\\torch\\optim\\adadelta.py",
    "ast_data": "FunctionDef name:adadelta arg:params arg:grads arg:square_avgs arg:acc_deltas arg:state_steps arg:capturable arg:foreach arg:differentiable arg:has_complex arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg If BoolOp Call Call Call Raise Call If Compare Assign Call If BoolOp Call Raise Call If BoolOp Call Assign Assign Call Call"
  },
  {
    "library": "django",
    "name": "cycle_key",
    "source_code": "def cycle_key(self):\n    data = self._session\n    key = self.session_key\n    self.create()\n    self._session_cache = data\n    if key:\n        self.delete(key)",
    "docstring": "Create a new session key, while retaining the current session data.",
    "type": "method",
    "file_path": "django\\django\\contrib\\sessions\\backends\\base.py",
    "ast_data": "FunctionDef name:cycle_key arg:self arguments arg Assign Assign Call Assign If Call"
  },
  {
    "library": "pytorch",
    "name": "add_image_with_boxes",
    "source_code": "def add_image_with_boxes(self, tag, img_tensor, box_tensor, global_step=None, walltime=None, rescale=1, dataformats='CHW', labels=None):\n    torch._C._log_api_usage_once('tensorboard.logging.add_image_with_boxes')\n    if labels is not None:\n        if isinstance(labels, str):\n            labels = [labels]\n        if len(labels) != box_tensor.shape[0]:\n            labels = None\n    self._get_file_writer().add_summary(image_boxes(tag, img_tensor, box_tensor, rescale=rescale, dataformats=dataformats, labels=labels), global_step, walltime)",
    "docstring": "Add image and draw bounding boxes on the image. Args: tag (str): Data identifier img_tensor (torch.Tensor, numpy.ndarray, or string/blobname): Image data box_tensor (torch.Tensor, numpy.ndarray, or string/blobname): Box data (for detected objects) box should be represented as [x1, y1, x2, y2]. global_step (int): Global step value to record walltime (float): Optional override default walltime (time.time()) seconds after epoch of event rescale (float): Optional scale override dataformats (str): Image data format specification of the form NCHW, NHWC, CHW, HWC, HW, WH, etc. labels (list of string): The label to be shown for each bounding box. Shape: img_tensor: Default is :math:. It can be specified with `` argument. e.g. CHW or HWC box_tensor: (torch.Tensor, numpy.ndarray, or string/blobname): NX4, where N is the number of boxes and each 4 elements in a row represents (xmin, ymin, xmax, ymax).",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\tensorboard\\writer.py",
    "ast_data": "FunctionDef name:add_image_with_boxes arg:self arg:tag arg:img_tensor arg:box_tensor arg:global_step arg:walltime arg:rescale arg:dataformats arg:labels arguments arg arg arg arg arg arg arg arg arg Call If Compare If Call Assign If Compare Call Assign Call Call Call"
  },
  {
    "library": "kornia",
    "name": "deg2rad",
    "source_code": "def deg2rad(tensor: Tensor) -> Tensor:\n    if not isinstance(tensor, Tensor):\n        raise TypeError(f'Input type is not a Tensor. Got {type(tensor)}')\n    return tensor * pi.to(tensor.device).type(tensor.dtype) / 180.0",
    "docstring": "Convert angles from degrees to radians. Args: tensor: Tensor of arbitrary shape. Returns: tensor with same shape as input. Examples: >>> input = tensor(180.) >>> deg2rad(input) tensor(3.1416)",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\conversions.py",
    "ast_data": "FunctionDef name:deg2rad arg:tensor arguments arg If Call Raise Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_dump_csv",
    "source_code": "def _dump_csv(self, filename: str):\n    assert self.pipeline_order_with_comms is not None, 'Must initialize compute_comms schedule before dump_csv'\n    with open(filename, 'w', newline='') as csvfile:\n        writer = csv.writer(csvfile)\n        for rank in self.pipeline_order_with_comms:\n            writer.writerow(self.pipeline_order_with_comms[rank])",
    "docstring": "Dump a CSV representation of the compute + comms schedule into a file with the provided filename.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\schedules.py",
    "ast_data": "FunctionDef name:_dump_csv arg:self arg:filename arguments arg arg Compare With Call Assign Call For Call"
  },
  {
    "library": "numpy",
    "name": "make_mask",
    "source_code": "def make_mask(m, copy=False, shrink=True, dtype=MaskType):\n    if m is nomask:\n        return nomask\n    dtype = make_mask_descr(dtype)\n    if isinstance(m, ndarray) and m.dtype.fields and (dtype == np.bool):\n        return np.ones(m.shape, dtype=dtype)\n    copy = None if not copy else True\n    result = np.array(filled(m, True), copy=copy, dtype=dtype, subok=True)\n    if shrink:\n        result = _shrink_mask(result)\n    return result",
    "docstring": "Create a boolean mask from an array. Return as a boolean mask, creating a copy if necessary or requested. The function can accept any sequence that is convertible to integers, or `mmmmmshrinkdtype`. >>> m = [1, 0, 1, 1] >>> n = [0, 1, 0, 0] >>> arr = [] >>> for man, mouse in zip(m, n): ... arr.append((man, mouse)) >>> arr [(1, 0), (0, 1), (1, 0), (1, 0)] >>> dtype = np.dtype({'names':['man', 'mouse'], ... 'formats':[np.int64, np.int64]}) >>> arr = np.array(arr, dtype=dtype) >>> arr array([(1, 0), (0, 1), (1, 0), (1, 0)], dtype=[('man', '>> ma.make_mask(arr, dtype=dtype) array([(True, False), (False, True), (True, False), (True, False)], dtype=[('man', '|b1'), ('mouse', '|b1')])",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:make_mask arg:m arg:copy arg:shrink arg:dtype arguments arg arg arg arg If Compare Return return:yes Assign Call If BoolOp Call Compare Return return:yes Call Assign Assign Call Call If Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_l2_loss_flops",
    "source_code": "@ops.RegisterStatistics('L2Loss', 'flops')\ndef _l2_loss_flops(graph, node):\n    in_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])\n    in_shape.assert_is_fully_defined()\n    return ops.OpStats('flops', in_shape.num_elements() * 3 - 1)",
    "docstring": "Compute flops for L2Loss operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py",
    "ast_data": "FunctionDef name:_l2_loss_flops arg:graph arg:node arguments arg arg Assign Call Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "serialize_model",
    "source_code": "def serialize_model(module, inputs, *, config=None, return_shapes=None, use_int16_for_qint16=False):\n    return _NnapiSerializer(config, use_int16_for_qint16).serialize_model(module, inputs, return_shapes)",
    "docstring": "Convert to NNAPI and serialize torchscript module. Parameters: module: Torchscript module to convert inputs: Tensors used to specify input details for NNAPI config (optional): Optional config to attach to module return_shapes (optional): Specify shape of outputs if your module uses runtime flexible shapes to set output buffer size for NNAPI use_int16_for_qint16 (optional): Use Pytorch int16 to represent NNAPI qint16 values",
    "type": "function",
    "file_path": "pytorch\\torch\\backends\\_nnapi\\serializer.py",
    "ast_data": "FunctionDef name:serialize_model arg:module arg:inputs arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "lists",
    "source_code": "def lists(self):\n    return iter(super().items())",
    "docstring": "Yield (key, list) pairs.",
    "type": "method",
    "file_path": "django\\django\\utils\\datastructures.py",
    "ast_data": "FunctionDef name:lists arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "pandas",
    "name": "__from_arrow__",
    "source_code": "def __from_arrow__(self, array: pa.Array | pa.ChunkedArray) -> DatetimeArray:\n    import pyarrow\n    from pandas.core.arrays import DatetimeArray\n    array = array.cast(pyarrow.timestamp(unit=self._unit), safe=True)\n    if isinstance(array, pyarrow.Array):\n        np_arr = array.to_numpy(zero_copy_only=False)\n    else:\n        np_arr = array.to_numpy()\n    return DatetimeArray._simple_new(np_arr, dtype=self)",
    "docstring": "Construct DatetimeArray from pyarrow Array/ChunkedArray. Note: If the units in the pyarrow Array are the same as this DatetimeDtype, then values corresponding to the integer representation of `pandas.Timestamp.min`, regardless of the null indicator in the pyarrow array. Parameters ---------- array : pyarrow.Array or pyarrow.ChunkedArray The Arrow array to convert to DatetimeArray. Returns ------- extension array : DatetimeArray",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py",
    "ast_data": "FunctionDef name:__from_arrow__ arg:self arg:array arguments arg arg Assign Call Call If Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_get_xdg_config_dir",
    "source_code": "def _get_xdg_config_dir():\n    return os.environ.get('XDG_CONFIG_HOME') or str(Path.home() / '.config')",
    "docstring": "Return the XDG configuration directory, according to the XDG base directory spec:",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\__init__.py",
    "ast_data": "FunctionDef name:_get_xdg_config_dir arguments Return return:yes BoolOp Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "release",
    "source_code": "@abc.abstractmethod\ndef release(self, scope_id: str):\n    pass",
    "docstring": "Releases the timer for the `` on the worker this client represents. After this method is called, the countdown timer on the scope is no longer in effect.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\timer\\api.py",
    "ast_data": "FunctionDef name:release arg:self arg:scope_id arguments arg arg"
  },
  {
    "library": "seaborn",
    "name": "update",
    "source_code": "def update(self, other: dict[str, Any] | None=None, /, **kwds):\n    if other is not None:\n        theme = self._filter_params(other)\n    else:\n        theme = {}\n    theme.update(kwds)\n    super().update(theme)",
    "docstring": "Update the theme with a dictionary or keyword arguments of rc parameters.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\plot.py",
    "ast_data": "FunctionDef name:update arguments arg arg arg If Compare Assign Call Assign Call Call Call"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, expiration):\n    self.expiration = expiration",
    "docstring": "Create a timer that expires at (UTC datetime).",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\locking.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:expiration arguments arg arg Assign"
  },
  {
    "library": "pygame",
    "name": "stop",
    "source_code": "def stop(self):\n    self.queue.put(STOP)\n    for thread in self.pool:\n        thread.join()",
    "docstring": "Stops the WorkerQueue, waits for all of the threads to finish up.",
    "type": "method",
    "file_path": "pygame\\src_py\\threads\\__init__.py",
    "ast_data": "FunctionDef name:stop arg:self arguments arg Call For Call"
  },
  {
    "library": "pytorch",
    "name": "get_unique_id",
    "source_code": "def get_unique_id(self) -> str:\n    ret = str(self._unique_id)\n    self._unique_id += 1\n    return ret",
    "docstring": "Get an id. This id is guaranteed to only be handed out once for this package.",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\package_exporter.py",
    "ast_data": "FunctionDef name:get_unique_id arg:self arguments arg Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_wait_all",
    "source_code": "@contextlib.contextmanager\ndef _wait_all():\n    _thread_local_var.future_list = []\n    try:\n        yield\n    finally:\n        try:\n            torch.futures.wait_all(_thread_local_var.future_list)\n        finally:\n            del _thread_local_var.future_list",
    "docstring": "A context manager that collects all futures returned by `` and waits them on the context manager's exit; relieving the user of needing to explicitly call wait. Example:: >>> # xdoctest: +SKIP(\"distributed\") >>> # On worker 0: >>> import torch >>> import torch.distributed.rpc as rpc >>> rpc.init_rpc(\"worker0\", rank=0, world_size=2) >>> with rpc._wait_all(): >>> fut_1 = rpc.rpc_async(dst, torch.add, (torch.ones(2, 2), 1)) >>> fut_2 = rpc.rpc_async(dst, torch.add, (torch.ones(2, 2), 1)) >>> #fut_1 and fut_2 are waited on",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\rpc\\api.py",
    "ast_data": "FunctionDef name:_wait_all arguments Assign Try Try Call"
  },
  {
    "library": "django",
    "name": "_get_missing_target_ids",
    "source_code": "def _get_missing_target_ids(self, source_field_name, target_field_name, db, target_ids):\n    vals = self.through._default_manager.using(db).values_list(target_field_name, flat=True).filter(**{source_field_name: self.related_val[0], '%s__in' % target_field_name: target_ids})\n    return target_ids.difference(vals)",
    "docstring": "Return the subset of ids of that aren't already assigned to this relationship.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\related_descriptors.py",
    "ast_data": "FunctionDef name:_get_missing_target_ids arg:self arg:source_field_name arg:target_field_name arg:db arg:target_ids arguments arg arg arg arg arg Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "trans_y",
    "source_code": "@classmethod\ndef trans_y(cls, y: Tensor) -> Se2:\n    zs = zeros_like(y)\n    return cls.trans(zs, y)",
    "docstring": "Construct a y-axis translation. Args: y: the y-axis translation.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\se2.py",
    "ast_data": "FunctionDef name:trans_y arg:cls arg:y arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "get_flags_ar",
    "source_code": "def get_flags_ar(self):\n    return self._get_command_flags('archiver')",
    "docstring": "List of archiver flags.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\fcompiler\\__init__.py",
    "ast_data": "FunctionDef name:get_flags_ar arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_points",
    "source_code": "def get_points(self):\n    self._invalid = 0\n    return self._points",
    "docstring": "Get the points of the bounding box as an array of the form ``.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:get_points arg:self arguments arg Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "reflective_transformation",
    "source_code": "def reflective_transformation(y, lb, ub):\n    if in_bounds(y, lb, ub):\n        return (y, np.ones_like(y))\n    lb_finite = np.isfinite(lb)\n    ub_finite = np.isfinite(ub)\n    x = y.copy()\n    g_negative = np.zeros_like(y, dtype=bool)\n    mask = lb_finite & ~ub_finite\n    x[mask] = np.maximum(y[mask], 2 * lb[mask] - y[mask])\n    g_negative[mask] = y[mask] < lb[mask]\n    mask = ~lb_finite & ub_finite\n    x[mask] = np.minimum(y[mask], 2 * ub[mask] - y[mask])\n    g_negative[mask] = y[mask] > ub[mask]\n    mask = lb_finite & ub_finite\n    d = ub - lb\n    t = np.remainder(y[mask] - lb[mask], 2 * d[mask])\n    x[mask] = lb[mask] + np.minimum(t, 2 * d[mask] - t)\n    g_negative[mask] = t > d[mask]\n    g = np.ones_like(y)\n    g[g_negative] = -1\n    return (x, g)",
    "docstring": "Compute reflective transformation and its gradient.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_lsq\\common.py",
    "ast_data": "FunctionDef name:reflective_transformation arg:y arg:lb arg:ub arguments arg arg arg If Call Return return:yes Call Assign Call Assign Call Assign Call Assign Call Assign Assign Call Assign Compare Assign Assign Call Assign Compare Assign Assign Assign Call Assign Call Assign Compare Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "per_image_standardization",
    "source_code": "@tf_export('image.per_image_standardization')\n@dispatch.add_dispatch_support\ndef per_image_standardization(image):\n    with ops.name_scope(None, 'per_image_standardization', [image]) as scope:\n        image = ops.convert_to_tensor(image, name='image')\n        image = _AssertAtLeast3DImage(image)\n        image = math_ops.cast(image, dtype=dtypes.float32)\n        num_pixels = math_ops.reduce_prod(array_ops.shape(image)[-3:])\n        image_mean = math_ops.reduce_mean(image, axis=[-1, -2, -3], keepdims=True)\n        stddev = math_ops.reduce_std(image, axis=[-1, -2, -3], keepdims=True)\n        min_stddev = math_ops.rsqrt(math_ops.cast(num_pixels, dtypes.float32))\n        adjusted_stddev = math_ops.maximum(stddev, min_stddev)\n        image -= image_mean\n        image = math_ops.divide(image, adjusted_stddev, name=scope)\n        return image",
    "docstring": "Linearly scales each image in to have mean 0 and variance 1. For each 3-D image in , computes , where - is the average of all values in - is capped away from 0 to protect against division by 0 when handling uniform images - is the number of elements in - is the standard deviation of all values in Example Usage: >>> image = tf.constant(np.arange(1, 13, dtype=np.int32), shape=[2, 2, 3]) >>> image # 3-D tensor >>> new_image = tf.image.per_image_standardization(image) >>> new_image # 3-D tensor with mean ~= 0 and variance ~= 1 Args: image: An n-D with at least 3 dimensions, the last 3 of which are the dimensions of each image. Returns: A with the same shape as and its dtype is . Raises: ValueError: The shape of has fewer than 3 dimensions.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:per_image_standardization arg:image arguments arg With Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "alpha_dropout",
    "source_code": "def alpha_dropout(input: Tensor, p: float=0.5, training: bool=False, inplace: bool=False) -> Tensor:\n    if has_torch_function_unary(input):\n        return handle_torch_function(alpha_dropout, (input,), input, p=p, training=training, inplace=inplace)\n    if p < 0.0 or p > 1.0:\n        raise ValueError(f'dropout probability has to be between 0 and 1, but got {p}')\n    return _VF.alpha_dropout_(input, p, training) if inplace else _VF.alpha_dropout(input, p, training)",
    "docstring": "Apply alpha dropout to the input. See :class: for details.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:alpha_dropout arg:input arg:p arg:training arg:inplace arguments arg arg arg arg If Call Return return:yes Call If BoolOp Compare Compare Raise Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "CheckpointException",
    "source_code": "class CheckpointException(BaseException):\n\n    def __init__(self, msg: str, failures: dict[int, WRAPPED_EXCEPTION]):\n        super().__init__(msg, failures)\n        self._failures = failures\n\n    @property\n    def failures(self) -> dict[int, WRAPPED_EXCEPTION]:\n        return self._failures\n\n    def __str__(self) -> str:\n        str = f'CheckpointException ranks:{self._failures.keys()}\\n'\n        for rank, exc_pair in self._failures.items():\n            exc, trace = exc_pair\n            str += f'Traceback (most recent call last): (RANK {rank})\\n'\n            if trace is not None:\n                str += ''.join(tb.format_list(trace))\n            str += ''.join(tb.format_exception_only(type(exc), value=exc))\n        return str",
    "docstring": "Exception raised if failure was detected as part of a checkpoint load or save.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\api.py",
    "ast_data": "ClassDef name:CheckpointException FunctionDef name:__init__ arg:self arg:msg arg:failures arguments arg arg arg Call Call Assign FunctionDef name:failures arg:self arguments arg Return return:yes FunctionDef name:__str__ arg:self arguments arg Assign Call For Call Assign If Compare Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "MixedPrecisionPolicy",
    "source_code": "@dataclass(frozen=True)\nclass MixedPrecisionPolicy:\n    param_dtype: Optional[torch.dtype] = None\n    reduce_dtype: Optional[torch.dtype] = None\n    output_dtype: Optional[torch.dtype] = None\n    cast_forward_inputs: bool = True",
    "docstring": "This configures FSDP's mixed precision. Unlike autocast, this applies mixed precision at the module level, not op level, which means low-precision activations are saved for backward and high-to-low-precision casts are incurred only at module boundaries. FSDP works well with module-level mixed precision since it keeps the high-precision sharded parameters in memory anyway. In other words, FSDP does not require any extra memory to keep a high-precision copy of the parameters for the optimizer step. Attributes: param_dtype (Optional[torch.dtype]): This specifies the dtype for the unsharded parameter and hence the dtype for forward/backward computation and the parameter all-gather. If this is `set_requires_gradient_sync` or not.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fsdp_api.py",
    "ast_data": "ClassDef name:MixedPrecisionPolicy Call"
  },
  {
    "library": "tensorflow",
    "name": "write",
    "source_code": "def write(self, dataset):\n    if not isinstance(dataset, data_types.DatasetV2):\n        raise TypeError(f'Invalid `dataset.` Expected a `tf.data.Dataset` object but got {type(dataset)}.')\n    if not dataset_ops.get_structure(dataset).is_compatible_with(tensor_spec.TensorSpec([], dtypes.string)):\n        raise TypeError(f'Invalid `dataset`. Expected a`dataset` that produces scalar `tf.string` elements, but got a dataset which produces elements with shapes {dataset_ops.get_legacy_output_shapes(dataset)} and types {dataset_ops.get_legacy_output_types(dataset)}.')\n    dataset = dataset._apply_debug_options()\n    return gen_experimental_dataset_ops.dataset_to_tf_record(dataset._variant_tensor, self._filename, self._compression_type)",
    "docstring": "Writes a dataset to a TFRecord file. An operation that writes the content of the specified dataset to the file specified in the constructor. If the file exists, it will be overwritten. Args: dataset: a whose elements are to be written to a file Returns: In graph mode, this returns an operation which when executed performs the write. In eager mode, the write is performed by the method itself and there is no return value. Raises TypeError: if is not a . TypeError: if the elements produced by the dataset are not scalar strings.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\writers.py",
    "ast_data": "FunctionDef name:write arg:self arg:dataset arguments arg arg If Call Raise Call Call If Call Call Call Raise Call Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "DatasetInitializer",
    "source_code": "@tf_export('data.experimental.DatasetInitializer')\nclass DatasetInitializer(lookup_ops.TableInitializerBase):\n\n    def __init__(self, dataset):\n        self.dataset = dataset\n        elem_spec = self.dataset.element_spec\n        _check_table_initializer_element_spec(elem_spec)\n        key_type = elem_spec[0].dtype\n        value_type = elem_spec[1].dtype\n        super(DatasetInitializer, self).__init__(key_type, value_type)\n\n    def initialize(self, table):\n        lookup_ops.check_table_dtypes(table, self._key_dtype, self._value_dtype)\n        init_op = ged_ops.initialize_table_from_dataset(table.resource_handle, self.dataset._variant_tensor)\n        ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op)\n        return init_op",
    "docstring": "Creates a table initializer from a . Sample usage: >>> keys = tf.data.Dataset.range(100) >>> values = tf.data.Dataset.range(100).map( ... lambda x: tf.strings.as_string(x * 2)) >>> ds = tf.data.Dataset.zip((keys, values)) >>> init = tf.data.experimental.DatasetInitializer(ds) >>> table = tf.lookup.StaticHashTable(init, \"\") >>> table.lookup(tf.constant([0, 1, 2], dtype=tf.int64)).numpy() array([b'0', b'2', b'4'], dtype=object) Attributes: dataset: A object that produces tuples of scalars. The first scalar is treated as a key and the second as value. Raises: ValueError if doesn't conform to specifications.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\lookup_ops.py",
    "ast_data": "ClassDef name:DatasetInitializer FunctionDef name:__init__ arg:self arg:dataset arguments arg arg Assign Assign Call Assign Assign Call Call FunctionDef name:initialize arg:self arg:table arguments arg arg Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "batch",
    "source_code": "@abc.abstractmethod\ndef batch(self, spec, batch_size):\n    raise NotImplementedError(f'{type(self).__name__}.batch')",
    "docstring": "Returns the TypeSpec representing a batch of values described by . Args: spec: The for an individual value. batch_size: An indicating the number of values that are batched together, or if the batch size is not known. Returns: A for a batch of values.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py",
    "ast_data": "FunctionDef name:batch arg:self arg:spec arg:batch_size arguments arg arg arg Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "scatter_sub",
    "source_code": "@tf_export(v1=['scatter_sub'])\ndef scatter_sub(ref, indices, updates, use_locking=False, name=None):\n    if ref.dtype._is_ref_dtype:\n        return gen_state_ops.scatter_sub(ref, indices, updates, use_locking=use_locking, name=name)\n    return ref._lazy_read(gen_resource_variable_ops.resource_scatter_sub(ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype), name=name))",
    "docstring": "Subtracts sparse updates to a variable reference. This operation outputs after the update is done. This makes it easier to chain operations that need to use the reset value. Duplicate entries are handled correctly: if multiple reference the same location, their (negated) contributions add. Requires or . Args: ref: A mutable . Must be one of the following types: , , , , , , , , , , , , , , , , . Should be from a node. indices: A . Must be one of the following types: , . A tensor of indices into the first dimension of . updates: A . Must have the same type as . A tensor of updated values to subtract from . use_locking: An optional . Defaults to . If True, the subtraction will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. name: A name for the operation (optional). Returns: A mutable . Has the same type as .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\state_ops.py",
    "ast_data": "FunctionDef name:scatter_sub arg:ref arg:indices arg:updates arg:use_locking arg:name arguments arg arg arg arg arg If Return return:yes Call Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_TypeMap",
    "source_code": "class _TypeMap(object):\n\n    def __init__(self, init_from=None):\n        if init_from:\n            assert isinstance(init_from, _TypeMap)\n            self.types = {s: set(other_types) for s, other_types in init_from.types.items()}\n        else:\n            self.types = {}\n\n    def __eq__(self, other):\n        if frozenset(self.types.keys()) != frozenset(other.types.keys()):\n            return False\n        ret = all((self.types[s] == other.types[s] for s in self.types))\n        return ret\n\n    def __ne__(self, other):\n        return not self.__eq__(other)\n\n    def __or__(self, other):\n        assert isinstance(other, _TypeMap)\n        result = _TypeMap(self)\n        for s, other_types in other.types.items():\n            if s not in result.types:\n                self_types = set()\n                result.types[s] = self_types\n            else:\n                self_types = result.types[s]\n            self_types.update(other_types)\n        return result\n\n    def __repr__(self):\n        return 'SymbolTable {}'.format(self.types)",
    "docstring": "Abstraction for the state of the CFG walk for type inference. This is a value type. Only implements the strictly necessary operators. Attributes: types: Dict[qual_names.QN, Set[Type]], mapping symbols to the set of possible types.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\static_analysis\\type_inference.py",
    "ast_data": "ClassDef name:_TypeMap FunctionDef name:__init__ arg:self arg:init_from arguments arg arg If Call Assign Call Call Assign FunctionDef name:__eq__ arg:self arg:other arguments arg arg If Compare Call Call Call Call Return return:yes Assign Call Compare Return return:yes FunctionDef name:__ne__ arg:self arg:other arguments arg arg Return return:yes Call FunctionDef name:__or__ arg:self arg:other arguments arg arg Call Assign Call For Call If Compare Assign Call Assign Assign Call Return return:yes FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "FCN",
    "source_code": "class FCN(_SimpleSegmentationModel):\n    pass",
    "docstring": "Implements a Fully-Convolutional Network for semantic segmentation. Args: backbone (nn.Module): the network used to compute the features for the model. The backbone should return an OrderedDict[Tensor], with the key being \"out\" for the last feature map used, and \"aux\" if an auxiliary classifier is used. classifier (nn.Module): module that takes the \"out\" element returned from the backbone and returns a dense prediction. aux_classifier (nn.Module, optional): auxiliary classifier used during training",
    "type": "class",
    "file_path": "pytorch\\benchmarks\\functional_autograd_benchmark\\torchvision_models.py",
    "ast_data": "ClassDef name:FCN"
  },
  {
    "library": "tensorflow",
    "name": "_get_compile_args",
    "source_code": "def _get_compile_args(self, user_metrics=True):\n    self._assert_compile_was_called()\n    saved_metrics = self.compiled_metrics._user_metrics\n    saved_weighted_metrics = self.compiled_metrics._user_weighted_metrics\n    if not user_metrics:\n        if saved_metrics is not None:\n            saved_metrics = self.compiled_metrics._metrics\n        if saved_weighted_metrics is not None:\n            saved_weighted_metrics = self.compiled_metrics._weighted_metrics\n    compile_args = {'optimizer': self.optimizer, 'loss': self.compiled_loss._user_losses, 'metrics': saved_metrics, 'weighted_metrics': saved_weighted_metrics, 'loss_weights': self.compiled_loss._user_loss_weights}\n    return compile_args",
    "docstring": "Used for saving or cloning a Model. Args: user_metrics: Whether to return user-supplied metrics or objects. Defaults to returning the user-supplied metrics. Returns: Dictionary of arguments that were used when compiling the model.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py",
    "ast_data": "FunctionDef name:_get_compile_args arg:self arg:user_metrics arguments arg arg Call Assign Assign If If Compare Assign If Compare Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "eye_",
    "source_code": "def eye_(tensor):\n    if tensor.ndimension() != 2:\n        raise ValueError('Only tensors with 2 dimensions are supported')\n    with torch.no_grad():\n        torch.eye(*tensor.shape, out=tensor, requires_grad=tensor.requires_grad)\n    return tensor",
    "docstring": "Fill the 2-dimensional input with the identity matrix. Preserves the identity of the inputs in layers, where as many inputs are preserved as possible. Args: tensor: a 2-dimensional Examples: >>> w = torch.empty(3, 5) >>> nn.init.eye_(w)",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\init.py",
    "ast_data": "FunctionDef name:eye_ arg:tensor arguments arg If Compare Call Raise Call With Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_config",
    "source_code": "def get_config(self):\n    config = dict(zip(self._fields, self))\n    config['dtype'] = self.dtype.name\n    return config",
    "docstring": "See 'FeatureColumn` base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:get_config arg:self arguments arg Assign Call Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "argsort",
    "source_code": "def argsort(self, *args, **kwargs) -> npt.NDArray[np.intp]:\n    return self._data.argsort(*args, **kwargs)",
    "docstring": "Return the integer indices that would sort the index. Parameters ---------- *args Passed to . **kwargs Passed to . Returns ------- np.ndarray[np.intp] Integer indices that would sort the index if used as an indexer. See Also -------- numpy.argsort : Similar method for NumPy arrays. Index.sort_values : Return sorted copy of Index. Examples -------- >>> idx = pd.Index([\"b\", \"a\", \"d\", \"c\"]) >>> idx Index(['b', 'a', 'd', 'c'], dtype='object') >>> order = idx.argsort() >>> order array([1, 0, 3, 2]) >>> idx[order] Index(['a', 'b', 'c', 'd'], dtype='object')",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:argsort arg:self arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "apply_non_transform_class",
    "source_code": "def apply_non_transform_class(self, input: Tensor, params: Dict[str, Tensor], flags: Dict[str, Any], transform: Optional[Tensor]=None) -> Tensor:\n    return input",
    "docstring": "Process class tags corresponding to the inputs that are no transformation applied.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\base.py",
    "ast_data": "FunctionDef name:apply_non_transform_class arg:self arg:input arg:params arg:flags arg:transform arguments arg arg arg arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "num_gpus",
    "source_code": "def num_gpus():\n    return context().num_gpus()",
    "docstring": "Get the number of available GPU devices. Returns: The number of available GPU devices.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:num_gpus arguments Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "draw",
    "source_code": "def draw(self, renderer):\n    if not self.get_visible():\n        return\n    self.stale = False",
    "docstring": "Draw the Artist (and its children) using the given renderer. This has no effect if the artist is not visible ( returns False). Parameters ---------- renderer : subclass. Notes ----- This method is overridden in the Artist subclasses.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:draw arg:self arg:renderer arguments arg arg If Call Return return:no Assign"
  },
  {
    "library": "tensorflow",
    "name": "StaticHashTableV1",
    "source_code": "@tf_export(v1=['lookup.StaticHashTable'])\nclass StaticHashTableV1(StaticHashTable):\n\n    @property\n    def initializer(self):\n        return self._init_op",
    "docstring": "A generic hash table that is immutable once initialized. When running in graph mode, you must evaluate the tensor returned by before evaluating the tensor returned by this class's method. Example usage in graph mode: Note that in graph mode if you set to , you should only call once, otherwise each will create (and destroy) a new table unrelated to each other, leading to errors such as \"Table not initialized\". You can do so like this: In eager mode, no special code is needed to initialize the table. Example usage in eager mode:",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "ClassDef name:StaticHashTableV1 FunctionDef name:initializer arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "diag",
    "source_code": "def diag(self, X):\n    return self.k1.diag(X) * self.k2.diag(X)",
    "docstring": "Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters ---------- X : array-like of shape (n_samples_X, n_features) or list of object Argument to the kernel. Returns ------- K_diag : ndarray of shape (n_samples_X,) Diagonal of kernel k(X, X)",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:diag arg:self arg:X arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "assert_consumed",
    "source_code": "def assert_consumed(self):\n    raise AssertionError('No checkpoint specified (save_path=None); nothing is being restored.')",
    "docstring": "Assertion for consistency with . Always fails.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:assert_consumed arg:self arguments arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_set_read_only_resource_inputs_attr",
    "source_code": "def _set_read_only_resource_inputs_attr(op: ops.Operation, func_graph: func_graph_module.FuncGraph):\n    read_only_indices = acd.get_read_only_resource_input_indices_graph(func_graph)\n    ops.set_int_list_attr(op, acd.READ_ONLY_RESOURCE_INPUTS_ATTR, read_only_indices)",
    "docstring": "Sets the list of resource inputs which are read-only. This is used by AutomaticControlDependencies. Args: op: PartitionedCall Operation. func_graph: FuncGraph.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\atomic_function.py",
    "ast_data": "FunctionDef name:_set_read_only_resource_inputs_attr arg:op arg:func_graph arguments arg arg Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_FunctionConverterDataInGraph",
    "source_code": "class _FunctionConverterDataInGraph(_FunctionConverterData):\n\n    def __init__(self, func, lower_control_flow, aggressive_inlining, variable_names_allowlist=None, variable_names_denylist=None, session=None):\n        self._session = session\n        session.run(variables.global_variables_initializer())\n        for op in ops.get_default_graph().get_collection(VAR_ASSIGN_COLLECTION):\n            session.run(op)\n        super(_FunctionConverterDataInGraph, self).__init__(func, lower_control_flow, aggressive_inlining, variable_names_allowlist, variable_names_denylist)\n\n    def _eval(self, tensor):\n        return self._session.run(tensor)",
    "docstring": "Container for ConcreteFunction-based conversion data in Graph mode.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "ClassDef name:_FunctionConverterDataInGraph FunctionDef name:__init__ arg:self arg:func arg:lower_control_flow arg:aggressive_inlining arg:variable_names_allowlist arg:variable_names_denylist arg:session arguments arg arg arg arg arg arg arg Assign Call Call For Call Call Call Call Call FunctionDef name:_eval arg:self arg:tensor arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "wrap_layer_objects",
    "source_code": "def wrap_layer_objects(layer, serialization_cache):\n    all_losses = layer._callable_losses[:]\n    for child_layer in utils.list_all_layers(layer):\n        all_losses.extend(child_layer._callable_losses)\n    keras_loss_cache = serialization_cache.setdefault('keras_losses', {})\n    wrapped_loss_functions = []\n    for loss_fn in all_losses:\n        if loss_fn in keras_loss_cache:\n            wrapped_loss_functions.append(keras_loss_cache[loss_fn])\n        else:\n            wrapped_loss = _wrap_unconditional_loss(loss_fn, len(keras_loss_cache))\n            keras_loss_cache[loss_fn] = wrapped_loss\n            wrapped_loss_functions.append(wrapped_loss)\n    wrapped_layer_losses = [keras_loss_cache[fn] for fn in layer._callable_losses[:]]\n    layer_metrics = data_structures.wrap_or_unwrap({m.name: m for m in layer._metrics})\n    return dict(variables=data_structures.wrap_or_unwrap(layer.variables), trainable_variables=data_structures.wrap_or_unwrap(layer.trainable_variables), non_trainable_variables=data_structures.wrap_or_unwrap(layer.non_trainable_variables), layers=data_structures.wrap_or_unwrap(utils.list_all_layers(layer)), metrics=data_structures.wrap_or_unwrap(layer.metrics), regularization_losses=data_structures.wrap_or_unwrap(wrapped_loss_functions), layer_regularization_losses=data_structures.wrap_or_unwrap(wrapped_layer_losses), layer_metrics=layer_metrics)",
    "docstring": "Returns extra trackable objects to attach to the serialized layer. Args: layer: Keras Layer object. serialization_cache: Dictionary shared between all objects during serialization. Returns: A dictionary containing all checkpointable objects from a SerializedAttributes object. See LayerAttributes and ModelAttributes for entire list of objects",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\save_impl.py",
    "ast_data": "FunctionDef name:wrap_layer_objects arg:layer arg:serialization_cache arguments arg arg Assign For Call Call Assign Call Assign For If Compare Call Assign Call Call Assign Call Assign Assign Call Return return:yes Call Call Call Call Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "is_concrete_int",
    "source_code": "def is_concrete_int(a: IntLikeType) -> bool:\n    assert isinstance(a, (SymInt, int))\n    if isinstance(a, int):\n        return True\n    if isinstance(a.node.expr, sympy.core.numbers.Integer):\n        return True\n    return False",
    "docstring": "Utility to check if underlying object in SymInt is concrete value. Also returns true if integer is passed in. Args: a (SymInt or int): Object to test if it int",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:is_concrete_int arg:a arguments arg Call If Call Return return:yes If Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_extract_attrs",
    "source_code": "def _extract_attrs(op, keys):\n    kwargs = {}\n    not_found = object()\n    for k in keys:\n        srcs = [getattr(op, k, not_found), getattr(op, '_' + k, not_found), getattr(op, 'parameters', {}).get(k, not_found)]\n        if any((v is not not_found for v in srcs)):\n            kwargs[k] = [v for v in srcs if v is not not_found][0]\n        else:\n            raise ValueError(f\"Could not determine an appropriate value for field `{k}` in object  `{op}`. Looked for \\n 1. an attr called `{k}`,\\n 2. an attr called `_{k}`,\\n 3. an entry in `op.parameters` with key '{k}'.\")\n        if k in op._composite_tensor_prefer_static_fields and kwargs[k] is not None:\n            if tensor_util.is_tensor(kwargs[k]):\n                static_val = tensor_util.constant_value(kwargs[k])\n                if static_val is not None:\n                    kwargs[k] = static_val\n        if isinstance(kwargs[k], (np.ndarray, np.generic)):\n            kwargs[k] = kwargs[k].tolist()\n    return kwargs",
    "docstring": "Extract constructor kwargs to reconstruct . Args: op: A instance. keys: A Python of strings indicating the names of the constructor kwargs to extract from . Returns: kwargs: A Python of kwargs to 's constructor, keyed by .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "FunctionDef name:_extract_attrs arg:op arg:keys arguments arg arg Assign Assign Call For Assign Call Call Call Call If Call Compare Assign Compare Raise Call If BoolOp Compare Compare If Call Assign Call If Compare Assign If Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "desc",
    "source_code": "def desc(self):\n    config = self.config()\n    config_str = '_'.join([str(x) for x in config])\n    device = self.device\n    if 'NNC_NUM_THREADS' in os.environ:\n        num_threads_str = os.environ['NNC_NUM_THREADS']\n        device += num_threads_str\n    return f'{self.engine.mode}: {self.module()}_{self.mode}_{device}_{config_str}'",
    "docstring": "return the description of the current benchmark",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\tensorexpr\\benchmark.py",
    "ast_data": "FunctionDef name:desc arg:self arguments arg Assign Call Assign Call Call Assign If Compare Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "pointwise_read_writes",
    "source_code": "@cache_on_self\ndef pointwise_read_writes(self) -> dependencies.ReadWrites:\n    return self.pointwise_or_reduction_read_writes(pointwise=True)",
    "docstring": "Get the memory dependencies in the non-reduction axes.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:pointwise_read_writes arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "device_of",
    "source_code": "class device_of(device):\n\n    def __init__(self, obj):\n        idx = obj.get_device() if obj.is_cuda else -1\n        super().__init__(idx)",
    "docstring": "Context-manager that changes the current device to that of given object. You can use both tensors and storages as arguments. If a given object is not allocated on a GPU, this is a no-op. Args: obj (Tensor or Storage): object allocated on the selected device.",
    "type": "class",
    "file_path": "pytorch\\torch\\cuda\\__init__.py",
    "ast_data": "ClassDef name:device_of FunctionDef name:__init__ arg:self arg:obj arguments arg arg Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "DisableSharedObjectScope",
    "source_code": "class DisableSharedObjectScope(object):\n\n    def __enter__(self):\n        SHARED_OBJECT_DISABLED.disabled = True\n        self._orig_loading_scope = _shared_object_loading_scope()\n        self._orig_saving_scope = _shared_object_saving_scope()\n\n    def __exit__(self, *args, **kwargs):\n        SHARED_OBJECT_DISABLED.disabled = False\n        SHARED_OBJECT_LOADING.scope = self._orig_loading_scope\n        SHARED_OBJECT_SAVING.scope = self._orig_saving_scope",
    "docstring": "A context manager for disabling handling of shared objects. Disables shared object handling for both saving and loading. Created primarily for use with , which does extra surgery that is incompatible with shared objects.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\generic_utils.py",
    "ast_data": "ClassDef name:DisableSharedObjectScope FunctionDef name:__enter__ arg:self arguments arg Assign Assign Call Assign Call FunctionDef name:__exit__ arg:self arguments arg arg arg Assign Assign Assign"
  },
  {
    "library": "sphinx",
    "name": "format_traceback",
    "source_code": "def format_traceback(exception: BaseException, /, *, short_traceback: bool=False) -> str:\n    if short_traceback:\n        from traceback import TracebackException\n        te = TracebackException.from_exception(exception, limit=-1)\n        exc_format = te.stack.format()[-1] + ''.join(te.format_exception_only())\n    elif isinstance(exception, SphinxParallelError):\n        exc_format = f'(Error in parallel process)\\n{exception.traceback}'\n    else:\n        from traceback import format_exception\n        exc_format = ''.join(format_exception(exception))\n    return '\\n'.join((f'    {line}' for line in exc_format.rstrip().splitlines()))",
    "docstring": "Format the given exception's traceback.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\_cli\\util\\errors.py",
    "ast_data": "FunctionDef name:format_traceback arguments arg arg If Assign Call Assign Call Call Call If Call Assign Assign Call Call Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "emit_cmdstr",
    "source_code": "def emit_cmdstr(cmd):\n    console = Console(theme=console_theme)\n    console.print(f'{EMOJI.cmd} [cmd] {cmd}')",
    "docstring": "Print the command that's being run to stdout Note: cannot use this in the below tasks (yet), because as is these command strings are always echoed to the console, even if the command isn't run (but for example the command is run).",
    "type": "function",
    "file_path": "scipy\\dev.py",
    "ast_data": "FunctionDef name:emit_cmdstr arg:cmd arguments arg Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "handle_torch_function",
    "source_code": "def handle_torch_function(public_api: Callable, relevant_args: Iterable[Any], *args, **kwargs) -> Any:\n    overloaded_args = _get_overloaded_args(relevant_args)\n    types = tuple(map(type, overloaded_args))\n    if _is_torch_function_mode_enabled():\n        with _pop_mode_temporarily() as mode:\n            result = mode.__torch_function__(public_api, types, args, kwargs)\n        if result is not NotImplemented:\n            return result\n    for overloaded_arg in overloaded_args:\n        torch_func_method = overloaded_arg.__torch_function__\n        if hasattr(torch_func_method, '__self__') and torch_func_method.__self__ is overloaded_arg and (torch_func_method is not torch._C._disabled_torch_function_impl):\n            warnings.warn('Defining your `__torch_function__ as a plain method is deprecated and will be an error in future, please define it as a classmethod.', DeprecationWarning)\n        result = torch_func_method(public_api, types, args, kwargs)\n        if result is not NotImplemented:\n            return result\n    func_name = f'{public_api.__module__}.{public_api.__name__}'\n    msg = f\"no implementation found for '{func_name}' on types that implement __torch_function__: {[type(arg) for arg in overloaded_args]}\"\n    if _is_torch_function_mode_enabled():\n        msg += f' nor in mode {_get_current_function_mode()}'\n    raise TypeError(msg)",
    "docstring": "Implement a function with checks for `` method, as appropriate. Raises ------ TypeError : if no implementation is found. Example ------- >>> def func(a): ... if has_torch_function_unary(a): ... return handle_torch_function(func, (a,), a) ... return a + 0",
    "type": "function",
    "file_path": "pytorch\\torch\\overrides.py",
    "ast_data": "FunctionDef name:handle_torch_function arg:public_api arg:relevant_args arguments arg arg arg arg Assign Call Assign Call Call If Call With Call Assign Call If Compare Return return:yes For Assign If BoolOp Call Compare Compare Call Assign Call If Compare Return return:yes Assign Assign Call If Call Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "global_step",
    "source_code": "@tf_export(v1=['train.global_step'])\ndef global_step(sess, global_step_tensor):\n    if context.executing_eagerly():\n        return int(global_step_tensor.numpy())\n    return int(sess.run(global_step_tensor))",
    "docstring": "Small helper to get the global step. Args: sess: A TensorFlow object. global_step_tensor: or the of the operation that contains the global step. Returns: The global step value.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\training_util.py",
    "ast_data": "FunctionDef name:global_step arg:sess arg:global_step_tensor arguments arg arg If Call Return return:yes Call Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "reduction_read_writes",
    "source_code": "@cache_on_self\ndef reduction_read_writes(self) -> dependencies.ReadWrites:\n    return self.pointwise_or_reduction_read_writes(pointwise=False)",
    "docstring": "Get the memory dependencies in the reduction axes.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:reduction_read_writes arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "relate_pattern",
    "source_code": "def relate_pattern(self, other, pattern):\n    if not isinstance(pattern, str) or len(pattern) > 9:\n        raise GEOSException('invalid intersection matrix pattern')\n    return capi.geos_relatepattern(self.ptr, other.ptr, force_bytes(pattern))",
    "docstring": "Return true if the elements in the DE-9IM intersection matrix for the two Geometries match the elements in pattern.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:relate_pattern arg:self arg:other arg:pattern arguments arg arg arg If BoolOp Call Compare Call Raise Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "quantize_per_tensor",
    "source_code": "@impl(quantized_decomposed_lib, 'quantize_per_tensor', 'CompositeExplicitAutograd')\ndef quantize_per_tensor(input: torch.Tensor, scale: float, zero_point: int, quant_min: int, quant_max: int, dtype: torch.dtype) -> torch.Tensor:\n    if input.dtype in [torch.float16, torch.bfloat16]:\n        input = input.to(torch.float32)\n    assert input.dtype == torch.float32, f'Expecting input to have dtype torch.float32, but got dtype: {input.dtype}'\n    _quant_min_max_bounds_check(quant_min, quant_max, dtype)\n    inv_scale = 1.0 / scale\n    return torch.clamp(torch.round(input * inv_scale) + zero_point, quant_min, quant_max).to(dtype)",
    "docstring": "Affine quantization for the Tensor using the same quantization parameters to map from floating point to quantized values Args: input (torch.Tensor): original float32 or bfloat16 Tensor scale (float): quantization parameter for affine quantization zero_point (int): quantization parameter for affine quantization quant_min (int): minimum quantized value for output Tensor quant_max (int): maximum quantized value for output Tensor dtype (torch.dtype): requested dtype (e.g. torch.uint8) for output Tensor Returns: Tensor with requested dtype (e.g. torch.uint8), note the quantization parameters are not stored in the Tensor, we are storing them in function arguments instead",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_decomposed.py",
    "ast_data": "FunctionDef name:quantize_per_tensor arg:input arg:scale arg:zero_point arg:quant_min arg:quant_max arg:dtype arguments arg arg arg arg arg arg If Compare Assign Call Compare Call Assign Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "local_conv2d",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef local_conv2d(inputs, kernel, kernel_size, strides, output_shape, data_format=None):\n    return local_conv(inputs, kernel, kernel_size, strides, output_shape, data_format)",
    "docstring": "Apply 2D conv with un-shared weights. Args: inputs: 4D tensor with shape: (batch_size, filters, new_rows, new_cols) if data_format='channels_first' or 4D tensor with shape: (batch_size, new_rows, new_cols, filters) if data_format='channels_last'. kernel: the unshared weight for convolution, with shape (output_items, feature_dim, filters). kernel_size: a tuple of 2 integers, specifying the width and height of the 2D convolution window. strides: a tuple of 2 integers, specifying the strides of the convolution along the width and height. output_shape: a tuple with (output_row, output_col). data_format: the data format, channels_first or channels_last. Returns: A 4D tensor with shape: (batch_size, filters, new_rows, new_cols) if data_format='channels_first' or 4D tensor with shape: (batch_size, new_rows, new_cols, filters) if data_format='channels_last'.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:local_conv2d arg:inputs arg:kernel arg:kernel_size arg:strides arg:output_shape arg:data_format arguments arg arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "any",
    "source_code": "def any(self, *args, **kwargs):\n    nv.validate_any(args, kwargs)\n    self._maybe_disable_logical_methods('any')\n    vals = self._values\n    if not isinstance(vals, np.ndarray):\n        return vals._reduce('any')\n    return np.any(vals)",
    "docstring": "Return whether any element is Truthy. Parameters ---------- *args Required for compatibility with numpy. **kwargs Required for compatibility with numpy. Returns ------- bool or array-like (if axis is specified) A single element array-like may be converted to bool. See Also -------- Index.all : Return whether all elements are True. Series.all : Return whether all elements are True. Notes ----- Not a Number (NaN), positive infinity and negative infinity evaluate to True because these are not equal to zero. Examples -------- >>> index = pd.Index([0, 1, 2]) >>> index.any() True >>> index = pd.Index([0, 0, 0]) >>> index.any() False",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:any arg:self arguments arg arg arg Call Call Assign If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "cudagraph_mark_step_begin",
    "source_code": "def cudagraph_mark_step_begin():\n    from .cudagraph_trees import mark_step_begin\n    mark_step_begin()",
    "docstring": "Indicates that a new iteration of inference or training is about to begin.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\__init__.py",
    "ast_data": "FunctionDef name:cudagraph_mark_step_begin arguments Call"
  },
  {
    "library": "pygame",
    "name": "benchmark_workers",
    "source_code": "def benchmark_workers(a_bench_func=None, the_data=None):\n    import pygame\n    import pygame.transform\n    import time\n    if not a_bench_func:\n\n        def doit(x):\n            return pygame.transform.scale(x, (544, 576))\n    else:\n        doit = a_bench_func\n    if not the_data:\n        thedata = [pygame.Surface((155, 155), 0, 32) for x in range(10)]\n    else:\n        thedata = the_data\n    best = time.time() + 100000000\n    best_number = 0\n    for num_workers in range(0, MAX_WORKERS_TO_TEST):\n        wq = WorkerQueue(num_workers)\n        t1 = time.time()\n        for _ in range(20):\n            print(f'active count:{threading.active_count()}')\n            tmap(doit, thedata, worker_queue=wq)\n        t2 = time.time()\n        wq.stop()\n        total_time = t2 - t1\n        print(f'total time num_workers:{num_workers}: time:{total_time}:')\n        if total_time < best:\n            best_number = num_workers\n            best = total_time\n        if num_workers - best_number > 1:\n            break\n    return best_number",
    "docstring": "does a little test to see if workers are at all faster. Returns the number of workers which works best. Takes a little bit of time to run, so you should only really call it once. You can pass in benchmark data, and functions if you want. a_bench_func - f(data) the_data - data to work on.",
    "type": "function",
    "file_path": "pygame\\src_py\\threads\\__init__.py",
    "ast_data": "FunctionDef name:benchmark_workers arg:a_bench_func arg:the_data arguments arg arg If FunctionDef name:doit arg:x arguments arg Return return:yes Call Assign If Assign Call Call Assign Assign Call Assign For Call Assign Call Assign Call For Call Call Call Call Assign Call Call Assign Call If Compare Assign Assign If Compare Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_render_href",
    "source_code": "def _render_href(x, format):\n    if isinstance(x, str):\n        if format == 'html':\n            href = '<a href=\"{0}\" target=\"_blank\">{0}</a>'\n        elif format == 'latex':\n            href = '\\\\href{{{0}}}{{{0}}}'\n        else:\n            raise ValueError(\"``hyperlinks`` format can only be 'html' or 'latex'\")\n        pat = \"((http|ftp)s?:\\\\/\\\\/|www.)[\\\\w/\\\\-?=%.:@]+\\\\.[\\\\w/\\\\-&?=%.,':;~!@#$*()\\\\[\\\\]]+\"\n        return re.sub(pat, lambda m: href.format(m.group(0)), x)\n    return x",
    "docstring": "uses regex to detect a common URL pattern and converts to href tag in format.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\formats\\style_render.py",
    "ast_data": "FunctionDef name:_render_href arg:x arg:format arguments arg arg If Call If Compare Assign If Compare Assign Raise Call Assign Return return:yes Call arguments arg Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_forward_name",
    "source_code": "def _forward_name(n):\n    return '%s%s_%s' % (_FORWARD_PREFIX, n, ops.uid())",
    "docstring": "The name of a generated forward defun named n.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py",
    "ast_data": "FunctionDef name:_forward_name arg:n arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, saved_model_checksum: int=None, graph_def_program_hash: int=None, signature_def_hash: int=None, saved_object_graph_hash: int=None, checkpoint_hash: int=None, version: int=None):\n    self.saved_model_checksum = saved_model_checksum\n    self.graph_def_program_hash = graph_def_program_hash\n    self.signature_def_hash = signature_def_hash\n    self.saved_object_graph_hash = saved_object_graph_hash\n    self.checkpoint_hash = checkpoint_hash\n    self.version = version",
    "docstring": "Initializes the instance based on values in the SavedModel fingerprint. Args: saved_model_checksum: Value of the. graph_def_program_hash: Value of the . signature_def_hash: Value of the . saved_object_graph_hash: Value of the . checkpoint_hash: Value of the . version: Value of the producer field of the VersionDef.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\fingerprinting.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:saved_model_checksum arg:graph_def_program_hash arg:signature_def_hash arg:saved_object_graph_hash arg:checkpoint_hash arg:version arguments arg arg arg arg arg arg arg Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_ConvertInputMapValues",
    "source_code": "def _ConvertInputMapValues(name, input_map):\n    if not all((isinstance(v, tensor.Tensor) for v in input_map.values())):\n        if name == '':\n            raise ValueError('tf.import_graph_def() requires a non-empty `name` if `input_map` contains non-Tensor values. Try calling tf.convert_to_tensor() on `input_map` values before calling tf.import_graph_def().')\n        with ops.name_scope('_inputs'):\n            input_map = {k: ops.convert_to_tensor(v) for k, v in input_map.items()}\n    return input_map",
    "docstring": "Ensures all input map values are tensors. This should be called from inside the import name scope. Args: name: the argument passed to import_graph_def input_map: the argument passed to import_graph_def. Returns: An possibly-updated version of . Raises: ValueError: if input map values cannot be converted due to empty name scope.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\importer.py",
    "ast_data": "FunctionDef name:_ConvertInputMapValues arg:name arg:input_map arguments arg arg If Call Call Call If Compare Raise Call With Call Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "argsreduce",
    "source_code": "def argsreduce(cond, *args):\n    newargs = np.atleast_1d(*args)\n    if not isinstance(newargs, list | tuple):\n        newargs = (newargs,)\n    if np.all(cond):\n        *newargs, cond = np.broadcast_arrays(*newargs, cond)\n        return [arg.ravel() for arg in newargs]\n    s = cond.shape\n    return [arg if np.size(arg) == 1 else np.extract(cond, np.broadcast_to(arg, s)) for arg in newargs]",
    "docstring": "Clean arguments to: 1. Ensure all arguments are iterable (arrays of dimension at least one 2. If cond != True and size > 1, ravel(args[i]) where ravel(condition) is True, in 1D. Return list of processed arguments. Examples -------- >>> import numpy as np >>> from scipy.stats._distn_infrastructure import argsreduce >>> rng = np.random.default_rng() >>> A = rng.random((4, 5)) >>> B = 2 >>> C = rng.random((1, 5)) >>> cond = np.ones(A.shape) >>> [A1, B1, C1] = argsreduce(cond, A, B, C) >>> A1.shape (4, 5) >>> B1.shape (1,) >>> C1.shape (1, 5) >>> cond[2,:] = 0 >>> [A1, B1, C1] = argsreduce(cond, A, B, C) >>> A1.shape (15,) >>> B1.shape (1,) >>> C1.shape (15,)",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:argsreduce arg:cond arguments arg arg Assign Call If Call Assign If Call Assign Call Return return:yes Call Assign Return return:yes Compare Call Call Call"
  },
  {
    "library": "scipy",
    "name": "ThreeHumpCamel",
    "source_code": "class ThreeHumpCamel(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-5.0] * self.N, [5.0] * self.N))\n        self.custom_bounds = [(-2, 2), (-1.5, 1.5)]\n        self.global_optimum = [[0.0, 0.0]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return 2.0 * x[0] ** 2.0 - 1.05 * x[0] ** 4.0 + x[0] ** 6 / 6.0 + x[0] * x[1] + x[1] ** 2.0",
    "docstring": "Three Hump Camel objective function. This class defines the Three Hump Camel [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{ThreeHumpCamel}}(x) = 2x_1^2 - 1.05x_1^4 + \\frac{x_1^6}{6} + x_1x_2 + x_2^2 with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_T.py",
    "ast_data": "ClassDef name:ThreeHumpCamel FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__rtruediv__",
    "source_code": "def __rtruediv__(self, other):\n    raise TypeError(\"unsupported operand type(s) for /: '{}' and 'Dimension', please use // instead\".format(type(other).__name__))",
    "docstring": "Use via instead. This function exists only to have a better error message. Instead of: , this function will explicitly call for usage of instead. Args: other: Another . Raises: TypeError.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:__rtruediv__ arg:self arg:other arguments arg arg Raise Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "find_reverse_deps",
    "source_code": "def find_reverse_deps(self, tag_id: Optional[int]=None) -> dict[torch.fx.Node, NodeSet]:\n    result: dict[torch.fx.Node, NodeSet] = defaultdict(set)\n    for node in self.module.graph.nodes:\n        if node.op not in CALLABLE_NODE_OPS:\n            continue\n        for user in node.users:\n            if user.op not in CALLABLE_NODE_OPS:\n                continue\n            if tag_id is None or int(user.tag.split('_')[-1]) < tag_id:\n                result[node].add(user)\n    return result",
    "docstring": "Builds reversed topological node dependencies, if tag_id is specified, we ignore nodes that are in later subgraph i.e. nodes have greater tag_id.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\passes\\splitter_base.py",
    "ast_data": "FunctionDef name:find_reverse_deps arg:self arg:tag_id arguments arg arg Call For If Compare For If Compare If BoolOp Compare Compare Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_median_bias",
    "source_code": "def _median_bias(n):\n    ii_2 = 2 * np.arange(1.0, (n - 1) // 2 + 1)\n    return 1 + np.sum(1.0 / (ii_2 + 1) - 1.0 / ii_2)",
    "docstring": "Returns the bias of the median of a set of periodograms relative to the mean. See Appendix B from [1]_ for details. Parameters ---------- n : int Numbers of periodograms being averaged. Returns ------- bias : float Calculated bias. References ---------- .. [1] B. Allen, W.G. Anderson, P.R. Brady, D.A. Brown, J.D.E. Creighton. \"FINDCHIRP: an algorithm for detection of gravitational waves from inspiraling compact binaries\", Physical Review D 85, 2012, :arxiv:",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_spectral_py.py",
    "ast_data": "FunctionDef name:_median_bias arg:n arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "reduce_scatter_hook",
    "source_code": "def reduce_scatter_hook(state: DefaultState, grad: torch.Tensor, output: torch.Tensor):\n    if state.gradient_predivide_factor > 1:\n        grad.div_(state.gradient_predivide_factor)\n    dist.reduce_scatter_tensor(output, grad, group=state.process_group)\n    if state.gradient_postdivide_factor > 1:\n        output.div_(state.gradient_postdivide_factor)",
    "docstring": "Implement the FSDP communication hook for ``.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\_comm_hooks\\default_hooks.py",
    "ast_data": "FunctionDef name:reduce_scatter_hook arg:state arg:grad arg:output arguments arg arg arg If Compare Call Call If Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "concat_along_batch_dimension",
    "source_code": "def concat_along_batch_dimension(outputs):\n    if isinstance(outputs[0], sparse_tensor.SparseTensor):\n        return sparse_ops.sparse_concat_v2(axis=0, sp_inputs=outputs)\n    if isinstance(outputs[0], ragged_tensor.RaggedTensor):\n        return array_ops.concat(outputs, axis=0)\n    return np.concatenate(outputs)",
    "docstring": "Concats prediction outputs along the batch dimension.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_training_utils_v1.py",
    "ast_data": "FunctionDef name:concat_along_batch_dimension arg:outputs arguments arg If Call Return return:yes Call If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "add_definition",
    "source_code": "def add_definition(self, name: str, entry: tuple[str, int, int]) -> None:\n    if self.indents and self.indents[-1][0] == entry[0] == 'def':\n        pass\n    else:\n        self.definitions[name] = entry",
    "docstring": "Add a location of definition.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\pycode\\parser.py",
    "ast_data": "FunctionDef name:add_definition arg:self arg:name arg:entry arguments arg arg arg If BoolOp Compare Assign"
  },
  {
    "library": "django",
    "name": "regex_lookup",
    "source_code": "def regex_lookup(self, lookup_type):\n    raise NotImplementedError('subclasses of BaseDatabaseOperations may require a regex_lookup() method')",
    "docstring": "Return the string to use in a query when performing regular expression lookups (using \"regex\" or \"iregex\"). It should contain a '%s' placeholder for the column being searched against. If the feature is not supported (or part of it is not supported), raise NotImplementedError.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:regex_lookup arg:self arg:lookup_type arguments arg arg Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "_predict_recursive",
    "source_code": "def _predict_recursive(self, X, sample_weight, cluster_node):\n    if cluster_node.left is None:\n        return np.full(X.shape[0], cluster_node.label, dtype=np.int32)\n    centers = np.vstack((cluster_node.left.center, cluster_node.right.center))\n    if hasattr(self, '_X_mean'):\n        centers += self._X_mean\n    cluster_labels = _labels_inertia_threadpool_limit(X, sample_weight, centers, self._n_threads, return_inertia=False)\n    mask = cluster_labels == 0\n    labels = np.full(X.shape[0], -1, dtype=np.int32)\n    labels[mask] = self._predict_recursive(X[mask], sample_weight[mask], cluster_node.left)\n    labels[~mask] = self._predict_recursive(X[~mask], sample_weight[~mask], cluster_node.right)\n    return labels",
    "docstring": "Predict recursively by going down the hierarchical tree. Parameters ---------- X : {ndarray, csr_matrix} of shape (n_samples, n_features) The data points, currently assigned to , to predict between the subclusters of this node. sample_weight : ndarray of shape (n_samples,) The weights for each observation in X. cluster_node : _BisectingTree node object The cluster node of the hierarchical tree. Returns ------- labels : ndarray of shape (n_samples,) Index of the cluster each sample belongs to.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_bisect_k_means.py",
    "ast_data": "FunctionDef name:_predict_recursive arg:self arg:X arg:sample_weight arg:cluster_node arguments arg arg arg arg If Compare Return return:yes Call Assign Call If Call Assign Call Assign Compare Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "KORNIA_CHECK_LAF",
    "source_code": "def KORNIA_CHECK_LAF(laf: Tensor, raises: bool=True) -> bool:\n    return KORNIA_CHECK_SHAPE(laf, ['B', 'N', '2', '3'], raises)",
    "docstring": "Check whether a Local Affine Frame (laf) has a valid shape. Args: laf: local affine frame tensor to evaluate. raises: bool indicating whether an exception should be raised upon failure. Raises: Exception: if the input laf does not have a shape :math: and raises is True. Example: >>> lafs = torch.rand(2, 10, 2, 3) >>> KORNIA_CHECK_LAF(lafs) True",
    "type": "function",
    "file_path": "kornia\\kornia\\core\\check.py",
    "ast_data": "FunctionDef name:KORNIA_CHECK_LAF arg:laf arg:raises arguments arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "get_series_repr_params",
    "source_code": "def get_series_repr_params() -> dict[str, Any]:\n    width, height = get_terminal_size()\n    max_rows_opt = get_option('display.max_rows')\n    max_rows = height if max_rows_opt == 0 else max_rows_opt\n    min_rows = height if max_rows_opt == 0 else get_option('display.min_rows')\n    return {'name': True, 'dtype': True, 'min_rows': min_rows, 'max_rows': max_rows, 'length': get_option('display.show_dimensions')}",
    "docstring": "Get the parameters used to repr(Series) calls using Series.to_string. Supplying these parameters to Series.to_string is equivalent to calling ``. This is useful if you want to adjust the series repr output. .. versionadded:: 1.4.0 Example ------- >>> import pandas as pd >>> >>> ser = pd.Series([1, 2, 3, 4]) >>> repr_params = pd.io.formats.format.get_series_repr_params() >>> repr(ser) == ser.to_string(**repr_params) True",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\formats\\format.py",
    "ast_data": "FunctionDef name:get_series_repr_params arguments Assign Call Assign Call Assign Compare Assign Compare Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "require_http_methods",
    "source_code": "def require_http_methods(request_method_list):\n\n    def decorator(func):\n        if iscoroutinefunction(func):\n\n            @wraps(func)\n            async def inner(request, *args, **kwargs):\n                if request.method not in request_method_list:\n                    response = HttpResponseNotAllowed(request_method_list)\n                    log_response('Method Not Allowed (%s): %s', request.method, request.path, response=response, request=request)\n                    return response\n                return await func(request, *args, **kwargs)\n        else:\n\n            @wraps(func)\n            def inner(request, *args, **kwargs):\n                if request.method not in request_method_list:\n                    response = HttpResponseNotAllowed(request_method_list)\n                    log_response('Method Not Allowed (%s): %s', request.method, request.path, response=response, request=request)\n                    return response\n                return func(request, *args, **kwargs)\n        return inner\n    return decorator",
    "docstring": "Decorator to make a view only accept particular request methods. Usage:: @require_http_methods([\"GET\", \"POST\"]) def my_view(request): # I can assume now that only GET or POST requests make it this far # ... Note that request methods should be in uppercase.",
    "type": "function",
    "file_path": "django\\django\\views\\decorators\\http.py",
    "ast_data": "FunctionDef name:require_http_methods arg:request_method_list arguments arg FunctionDef name:decorator arg:func arguments arg If Call AsyncFunctionDef name:inner arg:request arguments arg arg arg If Compare Assign Call Call Return return:yes Return return:yes Call Call FunctionDef name:inner arg:request arguments arg arg arg If Compare Assign Call Call Return return:yes Return return:yes Call Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "reset_partition_in_sparse_nn",
    "source_code": "def reset_partition_in_sparse_nn(partition, new_partition=True):\n    if in_embedding_region:\n        embedding_partitions.append(partition)\n    else:\n        non_embedding_partitions.append(partition)\n    if new_partition:\n        partition = self.create_partition()\n        partition.left_mem_bytes = available_mem_bytes\n        return partition\n    return None",
    "docstring": "If crossing the boundary between non-embedding nodes and embedding nodes, create a new partition",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\accelerator_partitioner.py",
    "ast_data": "FunctionDef name:reset_partition_in_sparse_nn arg:partition arg:new_partition arguments arg arg If Call Call If Assign Call Assign Return return:yes Return return:no"
  },
  {
    "library": "pandas",
    "name": "hexbin",
    "source_code": "def hexbin(self, x: Hashable, y: Hashable, C: Hashable | None=None, reduce_C_function: Callable | None=None, gridsize: int | tuple[int, int] | None=None, **kwargs) -> PlotAccessor:\n    if reduce_C_function is not None:\n        kwargs['reduce_C_function'] = reduce_C_function\n    if gridsize is not None:\n        kwargs['gridsize'] = gridsize\n    return self(kind='hexbin', x=x, y=y, C=C, **kwargs)",
    "docstring": "Generate a hexagonal binning plot. Generate a hexagonal binning plot of versus . If is (the default), this is a histogram of the number of occurrences of the observations at `Creduce_C_functionnumpy.meanCxy(x, y)np.meannp.meannp.maxnp.sumnp.stdDataFrame.plotCnp.sumreduce_C_function'observations'reduce_C_function`. .. plot:: :context: close-figs >>> n = 500 >>> df = pd.DataFrame( ... { ... \"coord_x\": np.random.uniform(-3, 3, size=n), ... \"coord_y\": np.random.uniform(30, 50, size=n), ... \"observations\": np.random.randint(1, 5, size=n), ... } ... ) >>> ax = df.plot.hexbin( ... x=\"coord_x\", ... y=\"coord_y\", ... C=\"observations\", ... reduce_C_function=np.sum, ... gridsize=10, ... cmap=\"viridis\", ... )",
    "type": "method",
    "file_path": "pandas\\pandas\\plotting\\_core.py",
    "ast_data": "FunctionDef name:hexbin arg:self arg:x arg:y arg:C arg:reduce_C_function arg:gridsize arguments arg arg arg arg arg arg arg If Compare Assign If Compare Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_rewrite_dynamo_tensor_constants",
    "source_code": "def _rewrite_dynamo_tensor_constants(orig_mod_buffers: set[torch.Tensor], traced_mod_buffers: dict[str, torch.Tensor], graph_signature: ExportGraphSignature, constants: dict[str, _ConstantAttributeType]) -> None:\n    for spec in graph_signature.input_specs:\n        if spec.kind == InputKind.BUFFER:\n            assert spec.target is not None\n            value = traced_mod_buffers[spec.target]\n            if value not in orig_mod_buffers:\n                spec.kind = InputKind.CONSTANT_TENSOR\n                constants[spec.target] = value",
    "docstring": "Dynamo erroneously marks tensor attributes on modules as buffers. Rewrite them to be tensor constants.",
    "type": "function",
    "file_path": "pytorch\\torch\\export\\_trace.py",
    "ast_data": "FunctionDef name:_rewrite_dynamo_tensor_constants arg:orig_mod_buffers arg:traced_mod_buffers arg:graph_signature arg:constants arguments arg arg arg arg For If Compare Compare Assign If Compare Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "_reconstruct",
    "source_code": "def _reconstruct(self, cpp_module):\n    self.__dict__['_actual_script_module']._reconstruct(cpp_module)",
    "docstring": "Re-construct an instance of TopLevelTracedModule using an instance of a C++ module. Args: cpp_module: The C++ module that this TopLevelTracedModule will be rebuilt around.",
    "type": "method",
    "file_path": "pytorch\\torch\\jit\\_trace.py",
    "ast_data": "FunctionDef name:_reconstruct arg:self arg:cpp_module arguments arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "shutdown",
    "source_code": "@abstractmethod\ndef shutdown(self) -> bool:\n    pass",
    "docstring": "Close all resources that were open for the rendezvous. Example:: rdzv_handler = ... try: store, rank, world_size = rdzv_handler.next_rendezvous() finally: rdzv_handler.shutdown()",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\api.py",
    "ast_data": "FunctionDef name:shutdown arg:self arguments arg"
  },
  {
    "library": "scipy",
    "name": "one_cycle",
    "source_code": "def one_cycle(self):\n    self.nstep += 1\n    new_global_min = False\n    accept, minres = self._monte_carlo_step()\n    if accept:\n        self.energy = minres.fun\n        self.x = np.copy(minres.x)\n        self.incumbent_minres = minres\n        new_global_min = self.storage.update(minres)\n    if self.disp:\n        self.print_report(minres.fun, accept)\n        if new_global_min:\n            print(f'found new global minimum on step {self.nstep} with function value {self.energy:g}')\n    self.xtrial = minres.x\n    self.energy_trial = minres.fun\n    self.accept = accept\n    return new_global_min",
    "docstring": "Do one cycle of the basinhopping algorithm",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_basinhopping.py",
    "ast_data": "FunctionDef name:one_cycle arg:self arguments arg Assign Assign Call If Assign Assign Call Assign Assign Call If Call If Call Assign Assign Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "redirect_to_login",
    "source_code": "def redirect_to_login(next, login_url=None, redirect_field_name=REDIRECT_FIELD_NAME):\n    resolved_url = resolve_url(login_url or settings.LOGIN_URL)\n    login_url_parts = list(urlsplit(resolved_url))\n    if redirect_field_name:\n        querystring = QueryDict(login_url_parts[3], mutable=True)\n        querystring[redirect_field_name] = next\n        login_url_parts[3] = querystring.urlencode(safe='/')\n    return HttpResponseRedirect(urlunsplit(login_url_parts))",
    "docstring": "Redirect the user to the login page, passing the given 'next' page.",
    "type": "function",
    "file_path": "django\\django\\contrib\\auth\\views.py",
    "ast_data": "FunctionDef name:redirect_to_login arg:next arg:login_url arg:redirect_field_name arguments arg arg arg Assign Call BoolOp Assign Call Call If Assign Call Assign Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_create_variables_and_slots",
    "source_code": "def _create_variables_and_slots(self):\n    raise NotImplementedError",
    "docstring": "Create variables and slots variables for TPU embeddings.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_base.py",
    "ast_data": "FunctionDef name:_create_variables_and_slots arg:self arguments arg Raise"
  },
  {
    "library": "scrapy",
    "name": "Field",
    "source_code": "class Field(dict[str, Any]):\n    pass",
    "docstring": "Container of field metadata",
    "type": "class",
    "file_path": "scrapy\\scrapy\\item.py",
    "ast_data": "ClassDef name:Field"
  },
  {
    "library": "matplotlib",
    "name": "_recompute_transform",
    "source_code": "def _recompute_transform(self):\n    center = (self.convert_xunits(self._center[0]), self.convert_yunits(self._center[1]))\n    width = self.convert_xunits(self._width)\n    height = self.convert_yunits(self._height)\n    self._patch_transform = transforms.Affine2D().scale(width * 0.5, height * 0.5 * self._aspect_ratio_correction).rotate_deg(self.angle).scale(1, 1 / self._aspect_ratio_correction).translate(*center)",
    "docstring": "Notes ----- This cannot be called until after this has been added to an Axes, otherwise unit conversion will fail. This makes it very important to call the accessor method and not directly access the transformation member variable.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:_recompute_transform arg:self arguments arg Assign Call Call Assign Call Assign Call Assign Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "display_modulewise_snapshots",
    "source_code": "def display_modulewise_snapshots(self, depth: int=2, units: str='B', tabulate: bool=False) -> None:\n\n    def natural_sort_key(s: str) -> list[Union[int, str]]:\n        return [int(text) if text.isdigit() else text.lower() for text in re.split('([0-9]+)', s)]\n    for mod_stats in sorted(self.memory_tracking.values(), key=lambda m_stats: natural_sort_key(m_stats.mod_fqn)):\n        mod_fqn = mod_stats.mod_fqn\n        mod_depth = mod_fqn.count('.') + 1\n        if mod_depth > depth:\n            continue\n        print(f'Module:  {mod_fqn}')\n        if tabulate:\n            _print_state_snapshots_tabular(mod_stats.snapshots, units)\n        else:\n            _print_state_snapshots(mod_stats.snapshots, units)",
    "docstring": "Print per device memory breakdown snapshot for each module called within MemTracker. Snapshots are displayed for the states defined by ``. The module hierarchy is displayed up to the specified depth. Keyword Args: depth (int, optional): The depth of the module hierarchy to display. Defaults to 2. units (str, optional): The units to use for memory tracking. Defaults to \"B\". Supports [\"B\", \"KiB\", \"MiB\", \"GiB\"]. tabulate (bool, optional): Whether to display the snapshot in a tabular format. Defaults to False.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_tools\\mem_tracker.py",
    "ast_data": "FunctionDef name:display_modulewise_snapshots arg:self arg:depth arg:units arg:tabulate arguments arg arg arg arg FunctionDef name:natural_sort_key arg:s arguments arg Return return:yes Call Call Call Call For Call Call arguments arg Call Assign Assign Call If Compare Call If Call Call"
  },
  {
    "library": "numpy",
    "name": "piecewise",
    "source_code": "@array_function_dispatch(_piecewise_dispatcher)\ndef piecewise(x, condlist, funclist, *args, **kw):\n    x = asanyarray(x)\n    n2 = len(funclist)\n    if isscalar(condlist) or (not isinstance(condlist[0], (list, ndarray)) and x.ndim != 0):\n        condlist = [condlist]\n    condlist = asarray(condlist, dtype=bool)\n    n = len(condlist)\n    if n == n2 - 1:\n        condelse = ~np.any(condlist, axis=0, keepdims=True)\n        condlist = np.concatenate([condlist, condelse], axis=0)\n        n += 1\n    elif n != n2:\n        raise ValueError(f'with {n} condition(s), either {n} or {n + 1} functions are expected')\n    y = zeros_like(x)\n    for cond, func in zip(condlist, funclist):\n        if not isinstance(func, collections.abc.Callable):\n            y[cond] = func\n        else:\n            vals = x[cond]\n            if vals.size > 0:\n                y[cond] = func(vals, *args, **kw)\n    return y",
    "docstring": "Evaluate a piecewise-defined function. Given a set of conditions and corresponding functions, evaluate each function on the input data wherever its condition is true. Parameters ---------- x : ndarray or scalar The input domain. condlist : list of bool arrays or bool scalars Each boolean array corresponds to a function in . Wherever is used as the output value. Each boolean array in selects a piece of , and should therefore be of the same shape as . The length of must correspond to that of . If one extra function is given, i.e. if `xpiecewisepiecewisefunclistxcondlistxcondlist`. >>> np.piecewise(x, [x = 0], [lambda x: -x, lambda x: x]) array([2.5, 1.5, 0.5, 0.5, 1.5, 2.5]) Apply the same function to a scalar value. >>> y = -2 >>> np.piecewise(y, [y = 0], [lambda x: -x, lambda x: x]) array(2)",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_function_base_impl.py",
    "ast_data": "FunctionDef name:piecewise arg:x arg:condlist arg:funclist arguments arg arg arg arg arg Assign Call Assign Call If BoolOp Call BoolOp Call Compare Assign Assign Call Assign Call If Compare Assign Call Assign Call If Compare Raise Call Assign Call For Call If Call Assign Assign If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_short_name",
    "source_code": "def get_short_name(self):\n    return self.first_name",
    "docstring": "Return the short name for the user.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\models.py",
    "ast_data": "FunctionDef name:get_short_name arg:self arguments arg Return return:yes"
  },
  {
    "library": "authlib",
    "name": "check",
    "source_code": "@classmethod\ndef check(cls, uri):\n    if not is_secure_transport(uri):\n        raise cls()",
    "docstring": "Check and raise InsecureTransportError with the given URI.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\errors.py",
    "ast_data": "FunctionDef name:check arg:cls arg:uri arguments arg arg If Call Raise Call"
  },
  {
    "library": "django",
    "name": "BaseDatabaseClient",
    "source_code": "class BaseDatabaseClient:\n    executable_name = None\n\n    def __init__(self, connection):\n        self.connection = connection\n\n    def __del__(self):\n        del self.connection\n\n    @classmethod\n    def settings_to_cmd_args_env(cls, settings_dict, parameters):\n        raise NotImplementedError('subclasses of BaseDatabaseClient must provide a settings_to_cmd_args_env() method or override a runshell().')\n\n    def runshell(self, parameters):\n        args, env = self.settings_to_cmd_args_env(self.connection.settings_dict, parameters)\n        env = {**os.environ, **env} if env else None\n        subprocess.run(args, env=env, check=True)",
    "docstring": "Encapsulate backend-specific methods for opening a client shell.",
    "type": "class",
    "file_path": "django\\django\\db\\backends\\base\\client.py",
    "ast_data": "ClassDef name:BaseDatabaseClient Assign FunctionDef name:__init__ arg:self arg:connection arguments arg arg Assign FunctionDef name:__del__ arg:self arguments arg FunctionDef name:settings_to_cmd_args_env arg:cls arg:settings_dict arg:parameters arguments arg arg arg Raise Call FunctionDef name:runshell arg:self arg:parameters arguments arg arg Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "set_and_validate_functions",
    "source_code": "def set_and_validate_functions(self, function_dict):\n    for key in self.all_functions:\n        if key in function_dict:\n            if function_dict[key] is not None and (not isinstance(function_dict[key], (def_function.Function, save_impl.LayerCall))):\n                raise ValueError('Function dictionary contained a non-function object: {} (for key {})'.format(function_dict[key], key))\n            fn = function_dict[key]\n            self._function_dict[key] = fn\n            tf_fn = fn.wrapped_call if isinstance(fn, save_impl.LayerCall) else fn\n            setattr(self._keras_trackable, key, tf_fn)\n        else:\n            raise ValueError('Function {} missing from serialized function dict.'.format(key))\n    return self.functions",
    "docstring": "Saves function dictionary, and validates dictionary values.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\serialized_attributes.py",
    "ast_data": "FunctionDef name:set_and_validate_functions arg:self arg:function_dict arguments arg arg For If Compare If BoolOp Compare Call Raise Call Call Assign Assign Assign Call Call Raise Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_cdf",
    "source_code": "def _cdf(self, x):\n    y = self._frozendist.cdf(x)\n    if self._p_domain == 1.0:\n        return y\n    return np.clip((y - self._p_lower) / self._p_domain, 0, 1)",
    "docstring": "Cumulative distribution function (CDF) Parameters ---------- x : array_like The values where the CDF is evaluated Returns ------- y : ndarray CDF evaluated at x",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_sampling.py",
    "ast_data": "FunctionDef name:_cdf arg:self arg:x arguments arg arg Assign Call If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "requires_vector_input",
    "source_code": "@property\ndef requires_vector_input(self):\n    return self.kernel.requires_vector_input",
    "docstring": "Returns whether the kernel is defined on discrete structures.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:requires_vector_input arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_tile_variant",
    "source_code": "def _tile_variant(t, pfor_input: _PforInput):\n    return _tile_variant_with_length(t, pfor_input.pfor.loop_len_vector)",
    "docstring": "stacks according to its loop context.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "FunctionDef name:_tile_variant arg:t arg:pfor_input arguments arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "BaseCrossValidator",
    "source_code": "class BaseCrossValidator(_MetadataRequester, metaclass=ABCMeta):\n    __metadata_request__split = {'groups': metadata_routing.UNUSED}\n\n    def split(self, X, y=None, groups=None):\n        X, y, groups = indexable(X, y, groups)\n        indices = np.arange(_num_samples(X))\n        for test_index in self._iter_test_masks(X, y, groups):\n            train_index = indices[np.logical_not(test_index)]\n            test_index = indices[test_index]\n            yield (train_index, test_index)\n\n    def _iter_test_masks(self, X=None, y=None, groups=None):\n        for test_index in self._iter_test_indices(X, y, groups):\n            test_mask = np.zeros(_num_samples(X), dtype=bool)\n            test_mask[test_index] = True\n            yield test_mask\n\n    def _iter_test_indices(self, X=None, y=None, groups=None):\n        raise NotImplementedError\n\n    @abstractmethod\n    def get_n_splits(self, X=None, y=None, groups=None):\n        pass\n\n    def __repr__(self):\n        return _build_repr(self)",
    "docstring": "Base class for all cross-validators. Implementations must define or .",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py",
    "ast_data": "ClassDef name:BaseCrossValidator Assign FunctionDef name:split arg:self arg:X arg:y arg:groups arguments arg arg arg arg Assign Call Assign Call Call For Call Assign Call Assign FunctionDef name:_iter_test_masks arg:self arg:X arg:y arg:groups arguments arg arg arg arg For Call Assign Call Call Assign FunctionDef name:_iter_test_indices arg:self arg:X arg:y arg:groups arguments arg arg arg arg Raise FunctionDef name:get_n_splits arg:self arg:X arg:y arg:groups arguments arg arg arg arg FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "IrregularRenderer",
    "source_code": "class IrregularRenderer(VolumeRenderer):\n\n    def forward(self, rgbs: Tensor, densities: Tensor, points_3d: Tensor) -> Tensor:\n        t_vals = calc_ray_t_vals(points_3d)\n        deltas = t_vals[..., 1:] - t_vals[..., :-1]\n        far = torch.empty(size=t_vals.shape[:-1], dtype=t_vals.dtype, device=t_vals.device).fill_(self._huge)\n        deltas = torch.cat([deltas, far[..., None]], dim=-1)\n        alpha = 1 - torch.exp(-1.0 * densities * deltas[..., None])\n        return self._render(alpha, rgbs)",
    "docstring": "Renders 3D irregularly sampled points along rays.",
    "type": "class",
    "file_path": "kornia\\kornia\\nerf\\volume_renderer.py",
    "ast_data": "ClassDef name:IrregularRenderer FunctionDef name:forward arg:self arg:rgbs arg:densities arg:points_3d arguments arg arg arg arg Assign Call Assign Assign Call Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "getcallargs",
    "source_code": "def getcallargs(*func_and_positional, **named):\n    func = func_and_positional[0]\n    positional = func_and_positional[1:]\n    argspec = getfullargspec(func)\n    call_args = named.copy()\n    this = getattr(func, 'im_self', None) or getattr(func, '__self__', None)\n    if ismethod(func) and this:\n        positional = (this,) + positional\n    remaining_positionals = [arg for arg in argspec.args if arg not in call_args]\n    call_args.update(dict(zip(remaining_positionals, positional)))\n    default_count = 0 if not argspec.defaults else len(argspec.defaults)\n    if default_count:\n        for arg, value in zip(argspec.args[-default_count:], argspec.defaults):\n            if arg not in call_args:\n                call_args[arg] = value\n    if argspec.kwonlydefaults is not None:\n        for k, v in argspec.kwonlydefaults.items():\n            if k not in call_args:\n                call_args[k] = v\n    return call_args",
    "docstring": "TFDecorator-aware replacement for inspect.getcallargs. Args: *func_and_positional: A callable, possibly decorated, followed by any positional arguments that would be passed to . **named: The named argument dictionary that would be passed to . Returns: A dictionary mapping 's named arguments to the values they would receive if were called. will use the argspec from the outermost decorator that provides it. If no attached decorators modify argspec, the final unwrapped target's argspec will be used.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\tf_inspect.py",
    "ast_data": "FunctionDef name:getcallargs arguments arg arg Assign Assign Assign Call Assign Call Assign BoolOp Call Call If BoolOp Call Assign Assign Compare Call Call Call Assign Call If For Call If Compare Assign If Compare For Call If Compare Assign Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "DownloadFiles",
    "source_code": "class DownloadFiles(dict[Path, tuple[set[str], _StrPath]]):\n\n    def add_file(self, docname: str, filename: str | os.PathLike[str]) -> _StrPath:\n        filename = Path(filename)\n        if filename not in self:\n            digest = hashlib.md5(filename.as_posix().encode(), usedforsecurity=False).hexdigest()\n            dest_path = _StrPath(digest, filename.name)\n            self[filename] = ({docname}, dest_path)\n            return dest_path\n        docnames, dest_path = self[filename]\n        docnames.add(docname)\n        return dest_path\n\n    def purge_doc(self, docname: str) -> None:\n        for filename, (docs, _dest) in list(self.items()):\n            docs.discard(docname)\n            if not docs:\n                del self[filename]\n\n    def merge_other(self, docnames: Set[str], other: dict[Path, tuple[set[str], _StrPath]]) -> None:\n        for filename, (docs, _dest) in other.items():\n            for docname in docs & set(docnames):\n                self.add_file(docname, filename)",
    "docstring": "A special dictionary for download files. .. important:: This class would be refactored in nearly future. Hence don't hack this directly.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\util\\_files.py",
    "ast_data": "ClassDef name:DownloadFiles FunctionDef name:add_file arg:self arg:docname arg:filename arguments arg arg arg Assign Call If Compare Assign Call Call Call Call Assign Call Assign Return return:yes Assign Call Return return:yes FunctionDef name:purge_doc arg:self arg:docname arguments arg arg For Call Call Call If FunctionDef name:merge_other arg:self arg:docnames arg:other arguments arg arg arg For Call For Call Call"
  },
  {
    "library": "pytorch",
    "name": "strict_translation_mode",
    "source_code": "@contextlib.contextmanager\ndef strict_translation_mode(self, check_fn: Callable[[VariableTracker], bool]):\n    prior = self.strict_checks_fn\n    self.strict_checks_fn = check_fn\n    try:\n        yield\n    finally:\n        self.strict_checks_fn = prior",
    "docstring": "Strict mode is enabled on a per-VariableTracker level depending on the return value of check_fn(node).",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\symbolic_convert.py",
    "ast_data": "FunctionDef name:strict_translation_mode arg:self arg:check_fn arguments arg arg Assign Assign Try Assign"
  },
  {
    "library": "matplotlib",
    "name": "get_children",
    "source_code": "def get_children(self):\n    return list(self._cells.values())",
    "docstring": "Return the Artists contained by the table.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\table.py",
    "ast_data": "FunctionDef name:get_children arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_DigammaGrad",
    "source_code": "@ops.RegisterGradient('Digamma')\ndef _DigammaGrad(op: ops.Operation, grad):\n    x = op.inputs[0]\n    with ops.control_dependencies([grad]):\n        x = math_ops.conj(x)\n        partial_x = math_ops.polygamma(array_ops.constant(1, dtype=x.dtype), x)\n        return grad * partial_x",
    "docstring": "Compute gradient of the digamma function with respect to its argument.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_DigammaGrad arg:op arg:grad arguments arg arg Assign With Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "reset",
    "source_code": "def reset(self):\n    self.quasi.copy_(self.shift)\n    self.num_generated = 0\n    return self",
    "docstring": "Function to reset the `` to base state.",
    "type": "method",
    "file_path": "pytorch\\torch\\quasirandom.py",
    "ast_data": "FunctionDef name:reset arg:self arguments arg Call Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "isspmatrix_lil",
    "source_code": "def isspmatrix_lil(x):\n    return isinstance(x, lil_matrix)",
    "docstring": "Is of lil_matrix type? Parameters ---------- x object to check for being a lil matrix Returns ------- bool True if is a lil matrix, False otherwise Examples -------- >>> from scipy.sparse import lil_array, lil_matrix, coo_matrix, isspmatrix_lil >>> isspmatrix_lil(lil_matrix([[5]])) True >>> isspmatrix_lil(lil_array([[5]])) False >>> isspmatrix_lil(coo_matrix([[5]])) False",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\_lil.py",
    "ast_data": "FunctionDef name:isspmatrix_lil arg:x arguments arg Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "_load_entry_point_themes",
    "source_code": "def _load_entry_point_themes(self) -> None:\n    for entry_point in entry_points(group='sphinx.html_themes'):\n        if entry_point.name in self._themes:\n            continue\n\n        def _load_theme_closure(app: Sphinx=self._app, theme_module: str=entry_point.module) -> None:\n            app.setup_extension(theme_module)\n            _config_post_init(app, app.config)\n        self._entry_point_themes[entry_point.name] = _load_theme_closure",
    "docstring": "Try to load a theme with the specified name. This uses the `` entry point from package metadata.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\theming.py",
    "ast_data": "FunctionDef name:_load_entry_point_themes arg:self arguments arg For Call If Compare FunctionDef name:_load_theme_closure arg:app arg:theme_module arguments arg arg Call Call Assign"
  },
  {
    "library": "sphinx",
    "name": "MathReferenceTransform",
    "source_code": "class MathReferenceTransform(SphinxPostTransform):\n    default_priority = 5\n    formats = ('latex',)\n\n    def run(self, **kwargs: Any) -> None:\n        equations = self.env.domains.math_domain.data['objects']\n        for node in self.document.findall(addnodes.pending_xref):\n            if node['refdomain'] == 'math' and node['reftype'] in {'eq', 'numref'}:\n                docname, _ = equations.get(node['reftarget'], (None, None))\n                if docname:\n                    refnode = math_reference('', docname=docname, target=node['reftarget'])\n                    node.replace_self(refnode)",
    "docstring": "Replace pending_xref nodes for math by math_reference. To handle math reference easily on LaTeX writer, this converts pending_xref nodes to math_reference.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\builders\\latex\\transforms.py",
    "ast_data": "ClassDef name:MathReferenceTransform Assign Assign FunctionDef name:run arg:self arguments arg arg Assign For Call If BoolOp Compare Compare Assign Call If Assign Call Call"
  },
  {
    "library": "scipy",
    "name": "_logpdf",
    "source_code": "def _logpdf(self, x, alpha):\n    lnB = _lnB(alpha)\n    return -lnB + np.sum(xlogy(alpha - 1, x.T).T, 0)",
    "docstring": "Log of the Dirichlet probability density function. Parameters ---------- x : ndarray Points at which to evaluate the log of the probability density function %(_dirichlet_doc_default_callparams)s Notes ----- As this function does no argument checking, it should not be called directly; use 'logpdf' instead.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:_logpdf arg:self arg:x arg:alpha arguments arg arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "skip_magic",
    "source_code": "def skip_magic(code_line, magic_list):\n    for magic in magic_list:\n        if code_line.startswith(magic):\n            return True\n    return False",
    "docstring": "Checks if the cell has magic, that is not Python-based. Args: code_line: A line of Python code magic_list: A list of jupyter \"magic\" exceptions Returns: If the line jupyter \"magic\" line, not Python line >>> skip_magic('!ls -laF', ['%', '!', '?']) True",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\ipynb.py",
    "ast_data": "FunctionDef name:skip_magic arg:code_line arg:magic_list arguments arg arg For If Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "wait",
    "source_code": "def wait(self):\n    pywrap_tfe.TFE_ExecutorWaitForAllPendingNodes(self._handle)",
    "docstring": "Waits for ops dispatched in this executor to finish.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\executor.py",
    "ast_data": "FunctionDef name:wait arg:self arguments arg Call"
  },
  {
    "library": "matplotlib",
    "name": "_remove_vertex",
    "source_code": "def _remove_vertex(self, i):\n    if len(self._xys) > 2 and self._selection_completed and (i in (0, len(self._xys) - 1)):\n        self._xys.pop(0)\n        self._xys.pop(-1)\n        self._xys.append(self._xys[0])\n    else:\n        self._xys.pop(i)\n    if len(self._xys) <= 2:\n        self._selection_completed = False\n        self._remove_box()",
    "docstring": "Remove vertex with index i.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:_remove_vertex arg:self arg:i arguments arg arg If BoolOp Compare Call Compare Call Call Call Call Call If Compare Call Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "contains",
    "source_code": "def contains(self, mouseevent):\n    _log.warning(\"%r needs 'contains' method\", self.__class__.__name__)\n    return (False, {})",
    "docstring": "Test whether the artist contains the mouse event. Parameters ---------- mouseevent : Returns ------- contains : bool Whether any values are within the radius. details : dict An artist-specific dictionary of details of the event context, such as which points are contained in the pick radius. See the individual Artist subclasses for details.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:contains arg:self arg:mouseevent arguments arg arg Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "transform_feature",
    "source_code": "def transform_feature(self, transformation_cache, state_manager):\n    feature_tensors = []\n    for key in _collect_leaf_level_keys(self):\n        if isinstance(key, six.string_types):\n            feature_tensors.append(transformation_cache.get(key, state_manager))\n        elif isinstance(key, (fc_old._CategoricalColumn, CategoricalColumn)):\n            ids_and_weights = key.get_sparse_tensors(transformation_cache, state_manager)\n            if ids_and_weights.weight_tensor is not None:\n                raise ValueError('crossed_column does not support weight_tensor, but the given column populates weight_tensor. Given column: {}'.format(key.name))\n            feature_tensors.append(ids_and_weights.id_tensor)\n        else:\n            raise ValueError('Unsupported column type. Given: {}'.format(key))\n    return sparse_ops.sparse_cross_hashed(inputs=feature_tensors, num_buckets=self.hash_bucket_size, hash_key=self.hash_key)",
    "docstring": "Generates a hashed sparse cross from the input tensors.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:transform_feature arg:self arg:transformation_cache arg:state_manager arguments arg arg arg Assign For Call If Call Call Call If Call Assign Call If Compare Raise Call Call Call Raise Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "check_additional",
    "source_code": "def check_additional(self, gm: GraphModule) -> None:\n    pass",
    "docstring": "Additional checks that are specific to some dialects.",
    "type": "method",
    "file_path": "pytorch\\torch\\_export\\verifier.py",
    "ast_data": "FunctionDef name:check_additional arg:self arg:gm arguments arg arg"
  },
  {
    "library": "tensorflow",
    "name": "gradients_function",
    "source_code": "def gradients_function(f, params=None):\n\n    def decorated(*args, **kwds):\n        _, grad = val_and_grad_function(f, params=params)(*args, **kwds)\n        return grad\n    return decorated",
    "docstring": "Returns a function which differentiates f with respect to params. Example: Note that only tensors with real or complex dtypes are differentiable. Args: f: function to be differentiated. If returns a scalar, this scalar will be differentiated. If returns a tensor or list of tensors, by default a scalar will be computed by adding all their values to produce a single scalar. If desired, the tensors can be elementwise multiplied by the tensors passed as the keyword argument to the returned gradient function. params: list of parameter names of f or list of integers indexing the parameters with respect to which we'll differentiate. Passing None differentiates with respect to all parameters. Returns: function which, when called, returns the value of f and the gradient of with respect to all of . The function takes an extra optional keyword argument . Setting it allows computation of vector jacobian products for vectors other than the vector of ones. Raises: ValueError: if the params are not all strings or all integers.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\backprop.py",
    "ast_data": "FunctionDef name:gradients_function arg:f arg:params arguments arg arg FunctionDef name:decorated arguments arg arg Assign Call Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "close",
    "source_code": "def close(self, cancel_pending_enqueues=False, name=None):\n    if name is None:\n        name = '%s_BarrierClose' % self._name\n    return gen_data_flow_ops.barrier_close(self._barrier_ref, cancel_pending_enqueues=cancel_pending_enqueues, name=name)",
    "docstring": "Closes this barrier. This operation signals that no more new key values will be inserted in the given barrier. Subsequent InsertMany operations with new keys will fail. InsertMany operations that just complement already existing keys with other components, will continue to succeed. Subsequent TakeMany operations will continue to succeed if sufficient elements remain in the barrier. Subsequent TakeMany operations that would block will fail immediately. If is , all pending requests to the underlying queue will also be canceled, and completing of already started values is also not acceptable anymore. Args: cancel_pending_enqueues: (Optional.) A boolean, defaulting to (described above). name: Optional name for the op. Returns: The operation that closes the barrier.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:close arg:self arg:cancel_pending_enqueues arg:name arguments arg arg arg If Compare Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_num_buckets",
    "source_code": "@property\ndef _num_buckets(self):\n    return self.vocabulary_size + self.num_oov_buckets",
    "docstring": "Returns number of buckets in this sparse feature.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py",
    "ast_data": "FunctionDef name:_num_buckets arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "MedianBlur",
    "source_code": "class MedianBlur(Module):\n\n    def __init__(self, kernel_size: tuple[int, int] | int) -> None:\n        super().__init__()\n        self.kernel_size = kernel_size\n\n    def forward(self, input: Tensor) -> Tensor:\n        return median_blur(input, self.kernel_size)",
    "docstring": "Blur an image using the median filter. Args: kernel_size: the blurring kernel size. Returns: the blurred input tensor. Shape: - Input: :math: - Output: :math: Example: >>> input = torch.rand(2, 4, 5, 7) >>> blur = MedianBlur((3, 3)) >>> output = blur(input) >>> output.shape torch.Size([2, 4, 5, 7])",
    "type": "class",
    "file_path": "kornia\\kornia\\filters\\median.py",
    "ast_data": "ClassDef name:MedianBlur FunctionDef name:__init__ arg:self arg:kernel_size arguments arg arg Call Call Assign FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_coeff_of_divided_diff",
    "source_code": "def _coeff_of_divided_diff(x):\n    n = x.shape[0]\n    res = np.zeros(n)\n    for i in range(n):\n        pp = 1.0\n        for k in range(n):\n            if k != i:\n                pp *= x[i] - x[k]\n        res[i] = 1.0 / pp\n    return res",
    "docstring": "Returns the coefficients of the divided difference. Parameters ---------- x : array, shape (n,) Array which is used for the computation of divided difference. Returns ------- res : array_like, shape (n,) Coefficients of the divided difference. Notes ----- Vector `` should have unique elements, otherwise an error division by zero might be raised. No checks are performed.",
    "type": "function",
    "file_path": "scipy\\scipy\\interpolate\\_bsplines.py",
    "ast_data": "FunctionDef name:_coeff_of_divided_diff arg:x arguments arg Assign Assign Call For Call Assign For Call If Compare Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "_property_names",
    "source_code": "@cached_property\ndef _property_names(self):\n    names = set()\n    seen = set()\n    for klass in self.model.__mro__:\n        names |= {name for name, value in klass.__dict__.items() if isinstance(value, property) and name not in seen}\n        seen |= set(klass.__dict__)\n    return frozenset(names)",
    "docstring": "Return a set of the names of the properties defined on the model.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\options.py",
    "ast_data": "FunctionDef name:_property_names arg:self arguments arg Assign Call Assign Call For Call BoolOp Call Compare Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y, coef_init=None, intercept_init=None):\n    self._more_validate_params()\n    lr = 'pa1' if self.loss == 'hinge' else 'pa2'\n    return self._fit(X, y, alpha=1.0, C=self.C, loss='hinge', learning_rate=lr, coef_init=coef_init, intercept_init=intercept_init)",
    "docstring": "Fit linear model with Passive Aggressive algorithm. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) Target values. coef_init : ndarray of shape (n_classes, n_features) The initial coefficients to warm-start the optimization. intercept_init : ndarray of shape (n_classes,) The initial intercept to warm-start the optimization. Returns ------- self : object Fitted estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_passive_aggressive.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arg:coef_init arg:intercept_init arguments arg arg arg arg arg Call Assign Compare Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "TFRecordReader",
    "source_code": "@tf_export(v1=['TFRecordReader'])\nclass TFRecordReader(ReaderBase):\n\n    @deprecation.deprecated(None, 'Queue-based input pipelines have been replaced by `tf.data`. Use `tf.data.TFRecordDataset`.')\n    def __init__(self, name=None, options=None):\n        compression_type = python_io.TFRecordOptions.get_compression_type_string(options)\n        rr = gen_io_ops.tf_record_reader_v2(name=name, compression_type=compression_type)\n        super(TFRecordReader, self).__init__(rr)",
    "docstring": "A Reader that outputs the records from a TFRecords file. See ReaderBase for supported methods. @compatibility(eager) Readers are not compatible with eager execution. Instead, please use to get data into your model. @end_compatibility",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\io_ops.py",
    "ast_data": "ClassDef name:TFRecordReader FunctionDef name:__init__ arg:self arg:name arg:options arguments arg arg arg Assign Call Assign Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "kaiming_normal_",
    "source_code": "def kaiming_normal_(tensor: Tensor, a: float=0, mode: str='fan_in', nonlinearity: str='leaky_relu', generator: _Optional[torch.Generator]=None):\n    if 0 in tensor.shape:\n        warnings.warn('Initializing zero-element tensors is a no-op')\n        return tensor\n    fan = _calculate_correct_fan(tensor, mode)\n    gain = calculate_gain(nonlinearity, a)\n    std = gain / math.sqrt(fan)\n    with torch.no_grad():\n        return tensor.normal_(0, std, generator=generator)",
    "docstring": "Fill the input with values using a Kaiming normal distribution. The method is described in - He, K. et al. (2015). The resulting tensor will have values sampled from :math: where .. math:: \\text{std} = \\frac{\\text{gain}}{\\sqrt{\\text{fan\\_mode}}} Also known as He initialization. Args: tensor: an n-dimensional a: the negative slope of the rectifier used after this layer (only used with `nn.functional`.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\init.py",
    "ast_data": "FunctionDef name:kaiming_normal_ arg:tensor arg:a arg:mode arg:nonlinearity arg:generator arguments arg arg arg arg arg If Compare Call Return return:yes Assign Call Assign Call Assign Call With Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_form_kwargs",
    "source_code": "def get_form_kwargs(self):\n    kwargs = super().get_form_kwargs()\n    if hasattr(self, 'object'):\n        kwargs.update({'instance': self.object})\n    return kwargs",
    "docstring": "Return the keyword arguments for instantiating the form.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\edit.py",
    "ast_data": "FunctionDef name:get_form_kwargs arg:self arguments arg Assign Call Call If Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_create_ordered_metrics",
    "source_code": "def _create_ordered_metrics(self):\n    self._metrics_in_order = []\n    for output_metrics, output_weighted_metrics in zip(self._metrics, self._weighted_metrics):\n        for m in nest.flatten(output_metrics):\n            if m is not None:\n                self._metrics_in_order.append(m)\n        for wm in nest.flatten(output_weighted_metrics):\n            if wm is not None:\n                self._metrics_in_order.append(wm)",
    "docstring": "Cache the flat order needed when returning metrics, for backwards compat.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\compile_utils.py",
    "ast_data": "FunctionDef name:_create_ordered_metrics arg:self arguments arg Assign For Call For Call If Compare Call For Call If Compare Call"
  },
  {
    "library": "matplotlib",
    "name": "set_one_half",
    "source_code": "def set_one_half(self, one_half):\n    self._one_half = one_half",
    "docstring": "Set the way one half is displayed. one_half : str The string used to represent 1/2.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:set_one_half arg:self arg:one_half arguments arg arg Assign"
  },
  {
    "library": "virtualenv",
    "name": "add_parser_arguments",
    "source_code": "@classmethod\ndef add_parser_arguments(cls, parser):\n    raise NotImplementedError",
    "docstring": "Add CLI arguments for this discovery mechanisms. :param parser: the CLI parser",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\discovery\\discover.py",
    "ast_data": "FunctionDef name:add_parser_arguments arg:cls arg:parser arguments arg arg Raise"
  },
  {
    "library": "matplotlib",
    "name": "AbstractPathEffect",
    "source_code": "class AbstractPathEffect:\n\n    def __init__(self, offset=(0.0, 0.0)):\n        self._offset = offset\n\n    def _offset_transform(self, renderer):\n        return mtransforms.Affine2D().translate(*map(renderer.points_to_pixels, self._offset))\n\n    def _update_gc(self, gc, new_gc_dict):\n        new_gc_dict = new_gc_dict.copy()\n        dashes = new_gc_dict.pop('dashes', None)\n        if dashes:\n            gc.set_dashes(**dashes)\n        for k, v in new_gc_dict.items():\n            set_method = getattr(gc, 'set_' + k, None)\n            if not callable(set_method):\n                raise AttributeError(f'Unknown property {k}')\n            set_method(v)\n        return gc\n\n    def draw_path(self, renderer, gc, tpath, affine, rgbFace=None):\n        if isinstance(renderer, PathEffectRenderer):\n            renderer = renderer._renderer\n        return renderer.draw_path(gc, tpath, affine, rgbFace)",
    "docstring": "A base class for path effects. Subclasses should override the `` method to add effect functionality.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\patheffects.py",
    "ast_data": "ClassDef name:AbstractPathEffect FunctionDef name:__init__ arg:self arg:offset arguments arg arg Assign FunctionDef name:_offset_transform arg:self arg:renderer arguments arg arg Return return:yes Call Call Call FunctionDef name:_update_gc arg:self arg:gc arg:new_gc_dict arguments arg arg arg Assign Call Assign Call If Call For Call Assign Call If Call Raise Call Call Return return:yes FunctionDef name:draw_path arg:self arg:renderer arg:gc arg:tpath arg:affine arg:rgbFace arguments arg arg arg arg arg arg If Call Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "prelu",
    "source_code": "@register_decomposition(aten.prelu)\n@elementwise_type_promotion_wrapper(type_promoting_args=('a', 'weight'), type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT)\ndef prelu(a: TensorLikeType, weight: TensorLikeType) -> TensorLikeType:\n    torch._check(isinstance(a, TensorLike), lambda: f'prelu: Expected `a` to be tensor, but got: {type(a)}')\n    torch._check(isinstance(weight, TensorLike), lambda: f'prelu: Expected `weight` to be tensor, but got: {type(weight)}')\n    if weight.numel() != 1:\n        torch._check(a.ndim > 0, lambda: 'Not allow zero-dim input tensor.')\n        channel_size = a.shape[1] if a.ndim >= 2 else 1\n        torch._check(weight.numel() == channel_size, lambda: f'Mismatch of parameter numbers and input channel size. Found parameter numbers = {weight.numel()} and channel size = {channel_size}.')\n    torch._check(weight.ndim == 0 or weight.ndim == 1, lambda: f'prelu: Expected `weight` to be a scalar or 1D tensor, but got: ndim = {weight.ndim}')\n    if a.ndim == 0:\n        weight = weight[0] if weight.ndim == 1 else weight\n    else:\n        weight = prims.broadcast_in_dim(weight, a.shape, () if weight.ndim == 0 else (0 if a.ndim == 1 else 1,))\n    return torch.where(a > 0, a, a * weight)",
    "docstring": "Reference implementation of torch.nn.functional.prelu",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\nn\\functional\\__init__.py",
    "ast_data": "FunctionDef name:prelu arg:a arg:weight arguments arg arg Call Call arguments Call Call Call arguments Call If Compare Call Call Compare arguments Assign Compare Call Compare Call arguments Call Call BoolOp Compare Compare arguments If Compare Assign Compare Assign Call Compare Compare Return return:yes Call Compare Call Call"
  },
  {
    "library": "kornia",
    "name": "validate_tensor",
    "source_code": "def validate_tensor(self, input: Tensor) -> None:\n    _validate_input_dtype(input, accepted_dtypes=[float16, float32, float64])\n    if len(input.shape) != 4:\n        raise RuntimeError(f'Expect (B, C, H, W). Got {input.shape}.')",
    "docstring": "Check if the input tensor is formatted as expected.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\_2d\\base.py",
    "ast_data": "FunctionDef name:validate_tensor arg:self arg:input arguments arg arg Call If Compare Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "getimmediatesource",
    "source_code": "def getimmediatesource(obj):\n    with _linecache_lock:\n        _fix_linecache_record(obj)\n        lines, lnum = inspect.findsource(obj)\n        return ''.join(inspect.getblock(lines[lnum:]))",
    "docstring": "A variant of inspect.getsource that ignores the __wrapped__ property.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\inspect_utils.py",
    "ast_data": "FunctionDef name:getimmediatesource arg:obj arguments arg With Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "authlib",
    "name": "check_sensitive_data",
    "source_code": "def check_sensitive_data(self, payload):\n    for k in payload:\n        if k in self.SENSITIVE_NAMES:\n            raise InsecureClaimError(k)\n        v = payload[k]\n        if isinstance(v, str) and self.SENSITIVE_VALUES.search(v):\n            raise InsecureClaimError(k)",
    "docstring": "Check if payload contains sensitive information.",
    "type": "method",
    "file_path": "authlib\\authlib\\jose\\rfc7519\\jwt.py",
    "ast_data": "FunctionDef name:check_sensitive_data arg:self arg:payload arguments arg arg For If Compare Raise Call Assign If BoolOp Call Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "remove_assertion_nodes",
    "source_code": "def remove_assertion_nodes(graph_module: torch.fx.GraphModule) -> torch.fx.GraphModule:\n    aten_assertion_targets = {torch.ops.aten.sym_constrain_range_for_size.default, torch.ops.aten._assert_async.default, torch.ops.aten._assert_async.msg, torch.ops.aten._assert_scalar.default, torch.ops.aten._assert_tensor_metadata.default}\n    for node in graph_module.graph.nodes:\n        if node.op == 'call_function' and node.target in aten_assertion_targets:\n            graph_module.graph.erase_node(node)\n    graph_module.recompile()\n    return graph_module",
    "docstring": "Remove all assertion and check nodes from the FX graph",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_fx_passes.py",
    "ast_data": "FunctionDef name:remove_assertion_nodes arg:graph_module arguments arg Assign For If BoolOp Compare Compare Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "from_proto",
    "source_code": "@staticmethod\ndef from_proto(context_def, import_scope=None):\n    ret = CondContext(context_def=context_def, import_scope=import_scope)\n    ret.Enter()\n    for nested_def in context_def.nested_contexts:\n        from_control_flow_context_def(nested_def, import_scope=import_scope)\n    ret.Exit()\n    return ret",
    "docstring": "Returns a object created from .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:from_proto arg:context_def arg:import_scope arguments arg arg Assign Call Call For Call Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "h2_connected",
    "source_code": "@property\ndef h2_connected(self) -> bool:\n    assert self.transport is not None\n    return bool(self.transport.connected) and self.metadata['settings_acknowledged']",
    "docstring": "Boolean to keep track of the connection status. This is used while initiating pending streams to make sure that we initiate stream only during active HTTP/2 Connection",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\http2\\protocol.py",
    "ast_data": "FunctionDef name:h2_connected arg:self arguments arg Compare Return return:yes BoolOp Call"
  },
  {
    "library": "matplotlib",
    "name": "LinearScale",
    "source_code": "class LinearScale(ScaleBase):\n    name = 'linear'\n\n    def __init__(self, axis):\n        pass\n\n    def set_default_locators_and_formatters(self, axis):\n        axis.set_major_locator(AutoLocator())\n        axis.set_major_formatter(ScalarFormatter())\n        axis.set_minor_formatter(NullFormatter())\n        if axis.axis_name == 'x' and mpl.rcParams['xtick.minor.visible'] or (axis.axis_name == 'y' and mpl.rcParams['ytick.minor.visible']):\n            axis.set_minor_locator(AutoMinorLocator())\n        else:\n            axis.set_minor_locator(NullLocator())\n\n    def get_transform(self):\n        return IdentityTransform()",
    "docstring": "The default linear scale.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\scale.py",
    "ast_data": "ClassDef name:LinearScale Assign FunctionDef name:__init__ arg:self arg:axis arguments arg arg FunctionDef name:set_default_locators_and_formatters arg:self arg:axis arguments arg arg Call Call Call Call Call Call If BoolOp BoolOp Compare BoolOp Compare Call Call Call Call FunctionDef name:get_transform arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "introspect_token",
    "source_code": "def introspect_token(self, token):\n    raise NotImplementedError()",
    "docstring": "Read given token and return its introspection metadata as a dictionary following _:: def introspect_token(self, token): return { \"active\": True, \"client_id\": token.client_id, \"token_type\": token.token_type, \"username\": get_token_username(token), \"scope\": token.get_scope(), \"sub\": get_token_user_sub(token), \"aud\": token.client_id, \"iss\": \" \"exp\": token.expires_at, \"iat\": token.issued_at, } .. _:",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc7662\\introspection.py",
    "ast_data": "FunctionDef name:introspect_token arg:self arg:token arguments arg arg Raise Call"
  },
  {
    "library": "pandas",
    "name": "_get_names",
    "source_code": "def _get_names(self) -> FrozenList:\n    return FrozenList((self.name,))",
    "docstring": "Get names on index. This method returns a FrozenList containing the names of the object. It's primarily intended for internal use. Returns ------- FrozenList A FrozenList containing the object's names, contains None if the object does not have a name. See Also -------- Index.name : Index name as a string, or None for MultiIndex. Examples -------- >>> idx = pd.Index([1, 2, 3], name=\"x\") >>> idx.names FrozenList(['x']) >>> idx = pd.Index([1, 2, 3], name=(\"x\", \"y\")) >>> idx.names FrozenList([('x', 'y')]) If the index does not have a name set: >>> idx = pd.Index([1, 2, 3]) >>> idx.names FrozenList([None])",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_get_names arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "virtualenv",
    "name": "env_patch_text",
    "source_code": "def env_patch_text(self):\n    with self.app_data.ensure_extracted(Path(__file__).parent / '_virtualenv.py') as resolved_path:\n        text = resolved_path.read_text(encoding='utf-8')\n        return text.replace('\"__SCRIPT_DIR__\"', repr(os.path.relpath(str(self.script_dir), str(self.purelib))))",
    "docstring": "Patch the distutils package to not be derailed by its configuration files.",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\create\\via_global_ref\\api.py",
    "ast_data": "FunctionDef name:env_patch_text arg:self arguments arg With Call Call Assign Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "numpy",
    "name": "__rtruediv__",
    "source_code": "def __rtruediv__(self, other):\n    return true_divide(other, self)",
    "docstring": "Divide self into other, and return a new masked array.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:__rtruediv__ arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "WeekdayLocator",
    "source_code": "class WeekdayLocator(RRuleLocator):\n\n    def __init__(self, byweekday=1, interval=1, tz=None):\n        rule = rrulewrapper(DAILY, byweekday=byweekday, interval=interval, **self.hms0d)\n        super().__init__(rule, tz=tz)",
    "docstring": "Make ticks on occurrences of each weekday.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\dates.py",
    "ast_data": "ClassDef name:WeekdayLocator FunctionDef name:__init__ arg:self arg:byweekday arg:interval arg:tz arguments arg arg arg arg Assign Call Call Call"
  },
  {
    "library": "pandas",
    "name": "is_empty_indexer",
    "source_code": "def is_empty_indexer(indexer) -> bool:\n    if is_list_like(indexer) and (not len(indexer)):\n        return True\n    if not isinstance(indexer, tuple):\n        indexer = (indexer,)\n    return any((isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer))",
    "docstring": "Check if we have an empty indexer. Parameters ---------- indexer : object Returns ------- bool",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\indexers\\utils.py",
    "ast_data": "FunctionDef name:is_empty_indexer arg:indexer arguments arg If BoolOp Call Call Return return:yes If Call Assign Return return:yes Call BoolOp Call Compare Call"
  },
  {
    "library": "numpy",
    "name": "_get_f90_modules",
    "source_code": "def _get_f90_modules(source):\n    if not f90_ext_match(source):\n        return []\n    modules = []\n    with open(source) as f:\n        for line in f:\n            m = f90_module_name_match(line)\n            if m:\n                name = m.group('name')\n                modules.append(name)\n    return modules",
    "docstring": "Return a list of Fortran f90 module names that given source file defines.",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\misc_util.py",
    "ast_data": "FunctionDef name:_get_f90_modules arg:source arguments arg If Call Return return:no Assign With Call For Assign Call If Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_children",
    "source_code": "def get_children(self):\n    return [self.patch, *self.artists, *self._localaxes, *self.lines, *self.patches, *self.texts, *self.images, *self.legends, *self.subfigs]",
    "docstring": "Get a list of artists contained in the figure.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:get_children arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_zbound",
    "source_code": "def set_zbound(self, lower=None, upper=None, view_margin=None):\n    self._set_bound3d(self.get_zbound, self.set_zlim, self.zaxis_inverted, lower, upper, view_margin)",
    "docstring": "Set the lower and upper numerical bounds of the z-axis. This method will honor axis inversion regardless of parameter order. It will not change the autoscaling setting (). Parameters ---------- lower, upper : float or None The lower and upper bounds. If *None*, the respective axis bound is not modified. view_margin : float or None The margin to apply to the bounds. If *None*, the margin is handled by . See Also -------- get_zbound get_zlim, set_zlim invert_zaxis, zaxis_inverted",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:set_zbound arg:self arg:lower arg:upper arg:view_margin arguments arg arg arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "transpose",
    "source_code": "@tf_export('image.transpose', v1=['image.transpose', 'image.transpose_image'])\n@dispatch.add_dispatch_support\ndef transpose(image, name=None):\n    with ops.name_scope(name, 'transpose', [image]):\n        image = ops.convert_to_tensor(image, name='image')\n        image = _AssertAtLeast3DImage(image)\n        shape = image.get_shape()\n        if shape.ndims is None:\n            rank = array_ops.rank(image)\n\n            def f_rank3():\n                return array_ops.transpose(image, [1, 0, 2], name=name)\n\n            def f_rank4():\n                return array_ops.transpose(image, [0, 2, 1, 3], name=name)\n            return tf_cond.cond(math_ops.equal(rank, 3), f_rank3, f_rank4)\n        elif shape.ndims == 3:\n            return array_ops.transpose(image, [1, 0, 2], name=name)\n        elif shape.ndims == 4:\n            return array_ops.transpose(image, [0, 2, 1, 3], name=name)\n        else:\n            raise ValueError(\"'image' (shape %s) must have either 3 or 4 dimensions.\" % shape)",
    "docstring": "Transpose image(s) by swapping the height and width dimension. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.transpose(x) Args: image: 4-D Tensor of shape or 3-D Tensor of shape . name: A name for this operation (optional). Returns: If was 4-D, a 4-D float Tensor of shape If was 3-D, a 3-D float Tensor of shape Raises: ValueError: if the shape of not supported. Usage Example: >>> image = [[[1, 2], [3, 4]], ... [[5, 6], [7, 8]], ... [[9, 10], [11, 12]]] >>> image = tf.constant(image) >>> tf.image.transpose(image)",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:transpose arg:image arg:name arguments arg arg With Call Assign Call Assign Call Assign Call If Compare Assign Call FunctionDef name:f_rank3 arguments Return return:yes Call FunctionDef name:f_rank4 arguments Return return:yes Call Return return:yes Call Call If Compare Return return:yes Call If Compare Return return:yes Call Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_cpu_type",
    "source_code": "def get_cpu_type():\n    key = 'cpu_type'\n    out, err = run_shell_cmd(cmds_all[PLATFORM][key])\n    cpu_detected = out.split(b':')[1].strip()\n    if err and FLAGS.debug:\n        print('Error in detecting CPU type:\\n %s' % str(err))\n    return cpu_detected",
    "docstring": "Retrieves CPU (type) information. Returns: String that is name of the CPU. e.g. 'GenuineIntel'",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\tensorflow_builder\\config_detector\\config_detector.py",
    "ast_data": "FunctionDef name:get_cpu_type arguments Assign Assign Call Assign Call Call If BoolOp Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "fill",
    "source_code": "@tf_export('fill')\n@dispatch.add_dispatch_support\ndef fill(dims, value, name=None, layout=None):\n    result = d_api.call_with_layout(gen_array_ops.fill, layout=layout, dims=dims, value=value, name=name)\n    shape_util.maybe_set_static_shape(result, dims)\n    return result",
    "docstring": "Creates a tensor filled with a scalar value. See also , , , . This operation creates a tensor of shape and fills it with . For example: >>> tf.fill([2, 3], 9) evaluates at graph runtime and supports dynamic shapes based on other runtime , unlike , which embeds the value as a node. Args: dims: A 1-D sequence of non-negative numbers. Represents the shape of the output . Entries should be of type: , . value: A value to fill the returned . name: Optional string. The name of the output . layout: Optional, . If provided, the result is a [DTensor]( with the provided layout. Returns: A with shape and the same dtype as . Raises: InvalidArgumentError: contains negative entries. NotFoundError: contains non-integer entries. @compatibility(numpy) Similar to . In , more parameters are supported. Passing a number argument as the shape () is valid in for specifying a 1-D shaped result, while TensorFlow does not support this syntax. @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:fill arg:dims arg:value arg:name arg:layout arguments arg arg arg arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "http_date",
    "source_code": "def http_date(epoch_seconds=None):\n    return formatdate(epoch_seconds, usegmt=True)",
    "docstring": "Format the time to match the RFC 5322 date format as specified by RFC 9110 Section 5.6.7. is a floating point number expressed in seconds since the epoch, in UTC - such as that outputted by time.time(). If set to None, it defaults to the current time. Output a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.",
    "type": "function",
    "file_path": "django\\django\\utils\\http.py",
    "ast_data": "FunctionDef name:http_date arg:epoch_seconds arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "TensorMetadataAndValues",
    "source_code": "@dataclasses.dataclass\nclass TensorMetadataAndValues:\n    tensor_metadata: TensorMetadata\n    values: list[Any]",
    "docstring": "TensorMetadata plus the elements as a list of raw values. Used for hashing inlined constants.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\codecache.py",
    "ast_data": "ClassDef name:TensorMetadataAndValues"
  },
  {
    "library": "pytorch",
    "name": "_no_grad",
    "source_code": "def _no_grad(func):\n\n    def _no_grad_wrapper(*args, **kwargs):\n        with torch.no_grad():\n            return func(*args, **kwargs)\n    functools.update_wrapper(_no_grad_wrapper, func)\n    return _no_grad_wrapper",
    "docstring": "This wrapper is needed to avoid a circular import when using @torch.no_grad on the exposed functions clip_grad_norm_ and clip_grad_value_ themselves.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\utils\\clip_grad.py",
    "ast_data": "FunctionDef name:_no_grad arg:func arguments arg FunctionDef name:_no_grad_wrapper arguments arg arg With Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_hash",
    "source_code": "def get_hash(self, obj: Any) -> str:\n    serialized_data = self.dumps(obj)\n    return sha256_hash(serialized_data)",
    "docstring": "Serialize an object and return a hash of the bytes.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codecache.py",
    "ast_data": "FunctionDef name:get_hash arg:self arg:obj arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "retrieve_from_golden",
    "source_code": "def retrieve_from_golden():\n    out_dict = dict()\n    with open(CUDA_CC_GOLDEN_DIR) as g_file:\n        for line in g_file:\n            line_items = line.split(',')\n            val_list = []\n            for item in line_items[1:]:\n                val_list.append(item.strip('\\n'))\n            out_dict[line_items[0]] = val_list\n    return out_dict",
    "docstring": "Retrieves list of all CUDA compute capability from a golden file. The following file is set as default: Returns: Dictionary that lists of all CUDA compute capability in the following format: {'': ['.', ...], ...} If there are multiple versions available for a given GPU, then it appends all supported versions in the value list (in the key-value pair.)",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\tensorflow_builder\\config_detector\\data\\cuda_compute_capability.py",
    "ast_data": "FunctionDef name:retrieve_from_golden arguments Assign Call With Call For Assign Call Assign For Call Call Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_get_threadpool_controller",
    "source_code": "def _get_threadpool_controller():\n    global _threadpool_controller\n    if _threadpool_controller is None:\n        _threadpool_controller = ThreadpoolController()\n    return _threadpool_controller",
    "docstring": "Return the global threadpool controller instance.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\parallel.py",
    "ast_data": "FunctionDef name:_get_threadpool_controller arguments If Compare Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "choices",
    "source_code": "def choices(self, changelist):\n    raise NotImplementedError('subclasses of ListFilter must provide a choices() method')",
    "docstring": "Return choices ready to be output in the template. is the ChangeList to be displayed.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\filters.py",
    "ast_data": "FunctionDef name:choices arg:self arg:changelist arguments arg arg Raise Call"
  },
  {
    "library": "django",
    "name": "_convert_stream_content",
    "source_code": "def _convert_stream_content(self, mode):\n    new_content_type = bytes if 'b' in mode else str\n    if self._content_type == new_content_type:\n        return\n    content = self.file.getvalue()\n    content = content.encode() if isinstance(content, str) else content.decode()\n    self._content_type = new_content_type\n    self._initialize_stream()\n    self.file.write(content)",
    "docstring": "Convert actual file content according to the opening mode.",
    "type": "method",
    "file_path": "django\\django\\core\\files\\storage\\memory.py",
    "ast_data": "FunctionDef name:_convert_stream_content arg:self arg:mode arguments arg arg Assign Compare If Compare Return return:no Assign Call Assign Call Call Call Assign Call Call"
  },
  {
    "library": "scipy",
    "name": "proc_fpool_g",
    "source_code": "def proc_fpool_g(self):\n    for v in self.fpool:\n        if v.feasible:\n            self.compute_sfield(v)\n    self.fpool = set()",
    "docstring": "Process all field functions with constraints supplied.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_shgo_lib\\_vertex.py",
    "ast_data": "FunctionDef name:proc_fpool_g arg:self arguments arg For If Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "_get_bn_configs",
    "source_code": "def _get_bn_configs() -> list[BackendPatternConfig]:\n    observation_type = ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT\n    dtype_configs = [qnnpack_default_op_qint8_symmetric_dtype_config, executorch_default_op_quint8_dtype_config]\n    bn_configs = []\n    bn_configs.append(BackendPatternConfig(nn.BatchNorm2d).set_observation_type(observation_type).set_dtype_configs(dtype_configs))\n    return bn_configs",
    "docstring": "Return all configs related to batchnorm.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\executorch.py",
    "ast_data": "FunctionDef name:_get_bn_configs arguments Assign Assign Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "hash_array",
    "source_code": "def hash_array(vals: ArrayLike, encoding: str='utf8', hash_key: str=_default_hash_key, categorize: bool=True) -> npt.NDArray[np.uint64]:\n    if not hasattr(vals, 'dtype'):\n        raise TypeError('must pass a ndarray-like')\n    if isinstance(vals, ABCExtensionArray):\n        return vals._hash_pandas_object(encoding=encoding, hash_key=hash_key, categorize=categorize)\n    if not isinstance(vals, np.ndarray):\n        raise TypeError(f'hash_array requires np.ndarray or ExtensionArray, not {type(vals).__name__}. Use hash_pandas_object instead.')\n    return _hash_ndarray(vals, encoding, hash_key, categorize)",
    "docstring": "Given a 1d array, return an array of deterministic integers. Parameters ---------- vals : ndarray or ExtensionArray The input array to hash. encoding : str, default 'utf8' Encoding for data & key when strings. hash_key : str, default _default_hash_key Hash_key for string key to encode. categorize : bool, default True Whether to first categorize object arrays before hashing. This is more efficient when the array contains duplicate values. Returns ------- ndarray[np.uint64, ndim=1] Hashed values, same length as the vals. See Also -------- util.hash_pandas_object : Return a data hash of the Index/Series/DataFrame. util.hash_tuples : Hash an MultiIndex / listlike-of-tuples efficiently. Examples -------- >>> pd.util.hash_array(np.array([1, 2, 3])) array([ 6238072747940578789, 15839785061582574730, 2185194620014831856], dtype=uint64)",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\util\\hashing.py",
    "ast_data": "FunctionDef name:hash_array arg:vals arg:encoding arg:hash_key arg:categorize arguments arg arg arg arg If Call Raise Call If Call Return return:yes Call If Call Raise Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "as_sqlite",
    "source_code": "def as_sqlite(self, compiler, connection, **extra_context):\n    return super().as_sqlite(compiler, connection, function='MIN', **extra_context)",
    "docstring": "Use the MIN function on SQLite.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\functions\\comparison.py",
    "ast_data": "FunctionDef name:as_sqlite arg:self arg:compiler arg:connection arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, tpu_cluster_resolver=None, experimental_device_assignment=None, experimental_spmd_xla_partitioning=False):\n    super().__init__(TPUExtended(self, tpu_cluster_resolver, device_assignment=experimental_device_assignment, use_spmd_for_xla_partitioning=experimental_spmd_xla_partitioning))\n    distribute_lib.distribution_strategy_gauge.get_cell('V2').set('TPUStrategy')\n    distribute_lib.distribution_strategy_replica_gauge.get_cell('num_workers').set(self.extended.num_hosts)\n    distribute_lib.distribution_strategy_replica_gauge.get_cell('num_replicas_per_worker').set(self.extended.num_replicas_per_host)\n    self._enable_packed_variable_in_eager_mode = True",
    "docstring": "Synchronous training in TPU donuts or Pods. Args: tpu_cluster_resolver: A instance, which provides information about the TPU cluster. If None, it will assume running on a local TPU worker. experimental_device_assignment: Optional to specify the placement of replicas on the TPU cluster. experimental_spmd_xla_partitioning: If True, enable the SPMD (Single Program Multiple Data) mode in XLA compiler. This flag only affects the performance of XLA compilation and the HBM requirement of the compiled TPU program. Ceveat: if this flag is True, calling will result in a ValueError.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_strategy.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:tpu_cluster_resolver arg:experimental_device_assignment arg:experimental_spmd_xla_partitioning arguments arg arg arg arg Call Call Call Call Call Call Call Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "get_json_type",
    "source_code": "def get_json_type(obj):\n    if hasattr(obj, 'get_config'):\n        return {'class_name': obj.__class__.__name__, 'config': obj.get_config()}\n    if type(obj).__module__ == np.__name__:\n        if isinstance(obj, np.ndarray):\n            return obj.tolist()\n        else:\n            return obj.item()\n    if callable(obj):\n        return obj.__name__\n    if type(obj).__name__ == type.__name__:\n        return obj.__name__\n    if isinstance(obj, tensor_shape.Dimension):\n        return obj.value\n    if isinstance(obj, tensor_shape.TensorShape):\n        return obj.as_list()\n    if isinstance(obj, dtypes.DType):\n        return obj.name\n    if isinstance(obj, collections_abc.Mapping):\n        return dict(obj)\n    if obj is Ellipsis:\n        return {'class_name': '__ellipsis__'}\n    if isinstance(obj, wrapt.ObjectProxy):\n        return obj.__wrapped__\n    raise TypeError(f'Object {obj} is not JSON-serializable. You may implement a `get_config()` method on the class (returning a JSON-serializable dictionary) to make it serializable.')",
    "docstring": "Serializes any object to a JSON-serializable structure. Args: obj: the object to serialize Returns: JSON-serializable structure representing . Raises: TypeError: if cannot be serialized.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\serialization.py",
    "ast_data": "FunctionDef name:get_json_type arg:obj arguments arg If Call Return return:yes Call If Compare Call If Call Return return:yes Call Return return:yes Call If Call Return return:yes If Compare Call Return return:yes If Call Return return:yes If Call Return return:yes Call If Call Return return:yes If Call Return return:yes Call If Compare Return return:yes If Call Return return:yes Raise Call"
  },
  {
    "library": "scrapy",
    "name": "extract_domain",
    "source_code": "def extract_domain(url: str) -> str:\n    o = urlparse(url)\n    if o.scheme == '' and o.netloc == '':\n        o = urlparse('//' + url.lstrip('/'))\n    return o.netloc",
    "docstring": "Extract domain name from URL string",
    "type": "function",
    "file_path": "scrapy\\scrapy\\commands\\genspider.py",
    "ast_data": "FunctionDef name:extract_domain arg:url arguments arg Assign Call If BoolOp Compare Compare Assign Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "highlight_quantile",
    "source_code": "@Substitution(subset=subset_args, color=coloring_args.format(default='yellow'), props=properties_args)\ndef highlight_quantile(self, subset: Subset | None=None, color: str='yellow', axis: Axis | None=0, q_left: float=0.0, q_right: float=1.0, interpolation: QuantileInterpolation='linear', inclusive: IntervalClosedType='both', props: str | None=None) -> Styler:\n    subset_ = slice(None) if subset is None else subset\n    subset_ = non_reducing_slice(subset_)\n    data = self.data.loc[subset_]\n    quantiles = [q_left, q_right]\n    if axis is None:\n        q = Series(data.to_numpy().ravel()).quantile(q=quantiles, interpolation=interpolation)\n        axis_apply: int | None = None\n    else:\n        axis = self.data._get_axis_number(axis)\n        q = data.quantile(axis=axis, numeric_only=False, q=quantiles, interpolation=interpolation)\n        axis_apply = 1 - axis\n    if props is None:\n        props = f'background-color: {color};'\n    return self.apply(_highlight_between, axis=axis_apply, subset=subset, props=props, left=q.iloc[0], right=q.iloc[1], inclusive=inclusive)",
    "docstring": "Highlight values defined by a quantile with a style. .. versionadded:: 1.3.0 Parameters ---------- %(subset)s %(color)s axis : {0 or 'index', 1 or 'columns', None}, default 0 Axis along which to determine and highlight quantiles. If `` instead of default background coloring >>> df.style.highlight_quantile( ... axis=None, ... q_left=0.2, ... q_right=0.8, ... props=\"font-weight:bold;color:#e83e8c\", ... ) # doctest: +SKIP .. figure:: ../../_static/style/hq_props.png",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\style.py",
    "ast_data": "FunctionDef name:highlight_quantile arg:self arg:subset arg:color arg:axis arg:q_left arg:q_right arg:interpolation arg:inclusive arg:props arguments arg arg arg arg arg arg arg arg arg Assign Compare Call Assign Call Assign Assign If Compare Assign Call Call Call Call Assign Call Assign Call Assign If Compare Assign Return return:yes Call Call Call"
  },
  {
    "library": "scrapy",
    "name": "start",
    "source_code": "def start(self, stop_after_crawl: bool=True, install_signal_handlers: bool=True) -> None:\n    from twisted.internet import reactor\n    if stop_after_crawl:\n        d = self.join()\n        if d.called:\n            return\n        d.addBoth(self._stop_reactor)\n    resolver_class = load_object(self.settings['DNS_RESOLVER'])\n    resolver = build_from_crawler(resolver_class, self, reactor=reactor)\n    resolver.install_on_reactor()\n    tp = reactor.getThreadPool()\n    tp.adjustPoolsize(maxthreads=self.settings.getint('REACTOR_THREADPOOL_MAXSIZE'))\n    reactor.addSystemEventTrigger('before', 'shutdown', self.stop)\n    if install_signal_handlers:\n        reactor.addSystemEventTrigger('after', 'startup', install_shutdown_handlers, self._signal_shutdown)\n    reactor.run(installSignalHandlers=install_signal_handlers)",
    "docstring": "This method starts a :mod:, adjusts its pool size to :setting:, and installs a DNS cache based on :setting: and :setting:. If `join`. :param bool stop_after_crawl: stop or not the reactor when all crawlers have finished :param bool install_signal_handlers: whether to install the OS signal handlers from Twisted and Scrapy (default: True)",
    "type": "method",
    "file_path": "scrapy\\scrapy\\crawler.py",
    "ast_data": "FunctionDef name:start arg:self arg:stop_after_crawl arg:install_signal_handlers arguments arg arg arg If Assign Call If Return return:no Call Assign Call Assign Call Call Assign Call Call Call Call If Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "feature_names_in_",
    "source_code": "@property\ndef feature_names_in_(self):\n    return self.steps[0][1].feature_names_in_",
    "docstring": "Names of features seen during first step method.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\pipeline.py",
    "ast_data": "FunctionDef name:feature_names_in_ arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_active",
    "source_code": "def set_active(self, index):\n    if index not in range(len(self.labels)):\n        raise ValueError(f'Invalid RadioButton index: {index}')\n    self.value_selected = self.labels[index].get_text()\n    self.index_selected = index\n    button_facecolors = self._buttons.get_facecolor()\n    button_facecolors[:] = colors.to_rgba('none')\n    button_facecolors[index] = colors.to_rgba(self._active_colors[index])\n    self._buttons.set_facecolor(button_facecolors)\n    if self.drawon:\n        if self._useblit:\n            if self._background is not None:\n                self.canvas.restore_region(self._background)\n            self.ax.draw_artist(self._buttons)\n            self.canvas.blit(self.ax.bbox)\n        else:\n            self.canvas.draw()\n    if self.eventson:\n        self._observers.process('clicked', self.labels[index].get_text())",
    "docstring": "Select button with number *index*. Callbacks will be triggered if :attr: is True. Parameters ---------- index : int The index of the button to activate. Raises ------ ValueError If the index is invalid.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:set_active arg:self arg:index arguments arg arg If Compare Call Call Raise Call Assign Call Assign Assign Call Assign Call Assign Call Call If If If Compare Call Call Call Call If Call Call"
  },
  {
    "library": "django",
    "name": "ManagementForm",
    "source_code": "class ManagementForm(Form):\n    TOTAL_FORMS = IntegerField(widget=HiddenInput)\n    INITIAL_FORMS = IntegerField(widget=HiddenInput)\n    MIN_NUM_FORMS = IntegerField(required=False, widget=HiddenInput)\n    MAX_NUM_FORMS = IntegerField(required=False, widget=HiddenInput)\n\n    def clean(self):\n        cleaned_data = super().clean()\n        cleaned_data.setdefault(TOTAL_FORM_COUNT, 0)\n        cleaned_data.setdefault(INITIAL_FORM_COUNT, 0)\n        return cleaned_data",
    "docstring": "Keep track of how many form instances are displayed on the page. If adding new forms via JavaScript, you should increment the count field of this form as well.",
    "type": "class",
    "file_path": "django\\django\\forms\\formsets.py",
    "ast_data": "ClassDef name:ManagementForm Assign Call Assign Call Assign Call Assign Call FunctionDef name:clean arg:self arguments arg Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_block_info_recursion",
    "source_code": "def _block_info_recursion(arrays, max_depth, result_ndim, depth=0):\n    if depth < max_depth:\n        shapes, slices, arrays = zip(*[_block_info_recursion(arr, max_depth, result_ndim, depth + 1) for arr in arrays])\n        axis = result_ndim - max_depth + depth\n        shape, slice_prefixes = _concatenate_shapes(shapes, axis)\n        slices = [slice_prefix + the_slice for slice_prefix, inner_slices in zip(slice_prefixes, slices) for the_slice in inner_slices]\n        arrays = functools.reduce(operator.add, arrays)\n        return (shape, slices, arrays)\n    else:\n        arr = _atleast_nd(arrays, result_ndim)\n        return (arr.shape, [()], [arr])",
    "docstring": "Returns the shape of the final array, along with a list of slices and a list of arrays that can be used for assignment inside the new array Parameters ---------- arrays : nested list of arrays The arrays to check max_depth : list of int The number of nested lists result_ndim : int The number of dimensions in thefinal array. Returns ------- shape : tuple of int The shape that the final array will take on. slices: list of tuple of slices The slices into the full array required for assignment. These are required to be prepended with `` to obtain to correct final index. arrays: list of ndarray The data to assign to each slice of the full array",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\shape_base.py",
    "ast_data": "FunctionDef name:_block_info_recursion arg:arrays arg:max_depth arg:result_ndim arg:depth arguments arg arg arg arg If Compare Assign Call Call Assign Assign Call Assign Call Assign Call Return return:yes Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "add_op",
    "source_code": "def add_op(self, graph_op_creation_digest):\n    if graph_op_creation_digest.op_name in self._op_by_name:\n        raise ValueError('Duplicate op name: %s (op type: %s)' % (graph_op_creation_digest.op_name, graph_op_creation_digest.op_type))\n    self._op_by_name[graph_op_creation_digest.op_name] = graph_op_creation_digest",
    "docstring": "Add an op creation data object. Args: graph_op_creation_digest: A GraphOpCreationDigest data object describing the creation of an op inside this graph.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py",
    "ast_data": "FunctionDef name:add_op arg:self arg:graph_op_creation_digest arguments arg arg If Compare Raise Call Assign"
  },
  {
    "library": "pytorch",
    "name": "mesh_scatter",
    "source_code": "def mesh_scatter(output: torch.Tensor, scatter_list: list[torch.Tensor], mesh: DeviceMesh, mesh_dim: int=0, async_op: bool=False, *, group_src: int=0) -> Optional[Work]:\n    if output.is_meta:\n        return None\n    dim_group = mesh.get_group(mesh_dim)\n    assert isinstance(dim_group, ProcessGroup)\n    if group_src == get_rank(dim_group):\n        fut = scatter(output, scatter_list=scatter_list, group=dim_group, async_op=async_op, group_src=group_src)\n    else:\n        fut = scatter(output, scatter_list=None, group=dim_group, async_op=async_op, group_src=group_src)\n    return fut",
    "docstring": "scatter a list of tensors to a device mesh dimension. We by default use the first rank of the mesh dimension as the source of truth, i.e for a 2d mesh [[0, 1], [2, 3]], if we scatter on mesh_dim = 1, we will scatter the tensor list on rank 0 to rank 0/1, and tensor list on rank 2 to rank 2/3. Args: output (torch.Tensor): the tensor to receive the scattered list. scatter_list (List[torch.Tensor]): the tensor list to be scattered. mesh_dim (int, optional): indicate which mesh dimension we want to scatter on, we by default choose the first rank on the mesh dimension as source of truth. Keyword args: group_src (int, optional): the group rank of the source data for the logical/global tensor, on the specific mesh dimension. By default, we use `Work` object",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_collective_utils.py",
    "ast_data": "FunctionDef name:mesh_scatter arg:output arg:scatter_list arg:mesh arg:mesh_dim arg:async_op arguments arg arg arg arg arg arg If Return return:no Assign Call Call If Compare Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_pprint_seq",
    "source_code": "def _pprint_seq(seq: ListLike, _nest_lvl: int=0, max_seq_items: int | None=None, **kwds: Any) -> str:\n    if isinstance(seq, set):\n        fmt = '{{{body}}}'\n    elif isinstance(seq, frozenset):\n        fmt = 'frozenset({{{body}}})'\n    else:\n        fmt = '[{body}]' if hasattr(seq, '__setitem__') else '({body})'\n    if max_seq_items is False:\n        max_items = None\n    else:\n        max_items = max_seq_items or get_option('max_seq_items') or len(seq)\n    s = iter(seq)\n    r = []\n    max_items_reached = False\n    for i, item in enumerate(s):\n        if max_items is not None and i >= max_items:\n            max_items_reached = True\n            break\n        r.append(pprint_thing(item, _nest_lvl + 1, max_seq_items=max_seq_items, **kwds))\n    body = ', '.join(r)\n    if max_items_reached:\n        body += ', ...'\n    elif isinstance(seq, tuple) and len(seq) == 1:\n        body += ','\n    return fmt.format(body=body)",
    "docstring": "internal. pprinter for iterables. you should probably use pprint_thing() rather than calling this directly. bounds length of printed sequence, depending on options",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\formats\\printing.py",
    "ast_data": "FunctionDef name:_pprint_seq arg:seq arg:_nest_lvl arg:max_seq_items arguments arg arg arg arg If Call Assign If Call Assign Assign Call If Compare Assign Assign BoolOp Call Call Assign Call Assign Assign For Call If BoolOp Compare Compare Assign Call Call Assign Call If If BoolOp Call Compare Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_binary_per_element_op_flops",
    "source_code": "def _binary_per_element_op_flops(graph, node, ops_per_element=1):\n    out_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)\n    out_shape.assert_is_fully_defined()\n    return ops.OpStats('flops', out_shape.num_elements() * ops_per_element)",
    "docstring": "Common code which compute flops for binary operations.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py",
    "ast_data": "FunctionDef name:_binary_per_element_op_flops arg:graph arg:node arg:ops_per_element arguments arg arg arg Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "benchmark_in_sub_process",
    "source_code": "def benchmark_in_sub_process(choices: list[TritonTemplateCaller]) -> dict[TritonTemplateCaller, float]:\n    return get_tuning_process_pool().benchmark(choices)",
    "docstring": "Do benchmarking in a subprocess and return the perf number (latency).",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\autotune_process.py",
    "ast_data": "FunctionDef name:benchmark_in_sub_process arg:choices arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "ndtri",
    "source_code": "@tf_export('math.ndtri')\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef ndtri(x, name=None):\n    with ops.name_scope(name, 'ndtri', [x]):\n        return gen_math_ops.ndtri(x)",
    "docstring": "Compute quantile of Standard Normal. Args: x: with type or . name: A name for the operation (optional). Returns: Inverse error function of .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:ndtri arg:x arg:name arguments arg arg With Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_has_no_reference_block",
    "source_code": "def _has_no_reference_block(self, blkno: int) -> bool:\n    return not self.blocks[blkno].refs.has_reference()",
    "docstring": "Check for block if it has references. (whether it references another array or is itself being referenced) Returns True if the block has no references.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:_has_no_reference_block arg:self arg:blkno arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "__contains__",
    "source_code": "def __contains__(self, element: Any, /) -> bool:\n    if has_torch_function_unary(self):\n        return handle_torch_function(Tensor.__contains__, (self,), self, element)\n    if isinstance(element, (torch.Tensor, Number, torch.SymInt, torch.SymFloat, torch.SymBool)):\n        return bool((element == self).any().item())\n    raise RuntimeError(f'Tensor.__contains__ only supports Tensor or scalar, but you passed in a {type(element)}.')",
    "docstring": "Check if is present in tensor Args: element (Tensor or scalar): element to be checked for presence in current tensor\"",
    "type": "method",
    "file_path": "pytorch\\torch\\_tensor.py",
    "ast_data": "FunctionDef name:__contains__ arguments arg arg If Call Return return:yes Call If Call Return return:yes Call Call Call Compare Raise Call Call"
  },
  {
    "library": "django",
    "name": "enable_constraint_checking",
    "source_code": "def enable_constraint_checking(self):\n    pass",
    "docstring": "Backends can implement as needed to re-enable foreign key constraint checking.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\base.py",
    "ast_data": "FunctionDef name:enable_constraint_checking arg:self arguments arg"
  },
  {
    "library": "kornia",
    "name": "__repr__",
    "source_code": "def __repr__(self) -> str:\n    repr = f'gain={self.gain}, sign={self.sign}'\n    return repr",
    "docstring": "Return a string representation of the object.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\random_generator\\_2d\\linear_illumination.py",
    "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "main",
    "source_code": "def main() -> None:\n    iter_method_definitions = get_method_definitions(iterDP_file_path, iterDP_files_to_exclude, iterDP_deprecated_files, 'IterDataPipe', iterDP_method_to_special_output_type)\n    map_method_definitions = get_method_definitions(mapDP_file_path, mapDP_files_to_exclude, mapDP_deprecated_files, 'MapDataPipe', mapDP_method_to_special_output_type)\n    path = Path(__file__).absolute().parent\n    fm = FileManager(install_dir=path, template_dir=path, dry_run=False)\n    fm.write_with_template('datapipe.pyi', 'datapipe.pyi.in', lambda: {'IterDataPipeMethods': iter_method_definitions, 'MapDataPipeMethods': map_method_definitions})",
    "docstring": "# Inject file into template datapipe.pyi.in. TODO: The current implementation of this script only generates interfaces for built-in methods. To generate interface for user-defined DataPipes, consider changing .",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\data\\datapipes\\gen_pyi.py",
    "ast_data": "FunctionDef name:main arguments Assign Call Assign Call Assign Call Call Assign Call Call arguments"
  },
  {
    "library": "scikit-learn",
    "name": "_ledoit_wolf",
    "source_code": "def _ledoit_wolf(X, *, assume_centered, block_size):\n    if len(X.shape) == 2 and X.shape[1] == 1:\n        if not assume_centered:\n            X = X - X.mean()\n        return (np.atleast_2d((X ** 2).mean()), 0.0)\n    n_features = X.shape[1]\n    shrinkage = ledoit_wolf_shrinkage(X, assume_centered=assume_centered, block_size=block_size)\n    emp_cov = empirical_covariance(X, assume_centered=assume_centered)\n    mu = np.sum(np.trace(emp_cov)) / n_features\n    shrunk_cov = (1.0 - shrinkage) * emp_cov\n    shrunk_cov.flat[::n_features + 1] += shrinkage * mu\n    return (shrunk_cov, shrinkage)",
    "docstring": "Estimate the shrunk Ledoit-Wolf covariance matrix.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\covariance\\_shrunk_covariance.py",
    "ast_data": "FunctionDef name:_ledoit_wolf arg:X arguments arg arg arg If BoolOp Compare Call Compare If Assign Call Return return:yes Call Call Assign Assign Call Assign Call Assign Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "AlphaDropout",
    "source_code": "class AlphaDropout(_DropoutNd):\n\n    def forward(self, input: Tensor) -> Tensor:\n        return F.alpha_dropout(input, self.p, self.training)",
    "docstring": "Applies Alpha Dropout over the input. Alpha Dropout is a type of Dropout that maintains the self-normalizing property. For an input with zero mean and unit standard deviation, the output of Alpha Dropout maintains the original mean and standard deviation of the input. Alpha Dropout goes hand-in-hand with SELU activation function, which ensures that the outputs have zero mean and unit standard deviation. During training, it randomly masks some of the elements of the input tensor with probability *p* using samples from a bernoulli distribution. The elements to masked are randomized on every forward call, and scaled and shifted to maintain zero mean and unit standard deviation. During evaluation the module simply computes an identity function. More details can be found in the paper _ . Args: p (float): probability of an element to be dropped. Default: 0.5 inplace (bool, optional): If set to `(*)(*)`. Output is of the same shape as input Examples:: >>> m = nn.AlphaDropout(p=0.2) >>> input = torch.randn(20, 16) >>> output = m(input) .. _Self-Normalizing Neural Networks:",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\dropout.py",
    "ast_data": "ClassDef name:AlphaDropout FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "run",
    "source_code": "def run(gemma_lm, max_len):\n    start = time.time()\n    output = gemma_lm.generate(_QUERY, max_length=max_len + 1)\n    num_actual_output_tokens = len(output.split(' '))\n    warmup_time = (time.time() - start) * 1000\n    if _VERBOSE:\n        print('=== Max len: %d ===' % max_len)\n        print('Warmup: %lf ms' % warmup_time)\n        print('Output:\\n%s\\n' % output)\n    times = []\n    for i in range(1, 6):\n        start = time.time()\n        output = gemma_lm.generate(_QUERY, max_length=max_len + 1)\n        assert num_actual_output_tokens == len(output.split(' '))\n        elapsed_time = (time.time() - start) * 1000\n        times.append(elapsed_time)\n        if _VERBOSE:\n            print('%d: %lf ms' % (i, elapsed_time))\n    mean, diff = compute_stats(times)\n    if _VERBOSE:\n        print('Mean: %lf ± %d%% ms\\n' % (mean, diff))\n    return (mean, diff, num_actual_output_tokens)",
    "docstring": "Benchmarks inferences with at most output tokens. Args: gemma_lm: The Gemma2 Keras model. max_len: The maximum number of output tokens per one inference. Returns: mean ± %diff and the actual number of output tokens generated per inference.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\xla\\backends\\cpu\\benchmarks\\e2e\\gemma2\\keras\\benchmark.py",
    "ast_data": "FunctionDef name:run arg:gemma_lm arg:max_len arguments arg arg Assign Call Assign Call Assign Call Call Assign Call If Call Call Call Assign For Call Assign Call Assign Call Compare Call Call Assign Call Call If Call Assign Call If Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "isocalendar",
    "source_code": "def isocalendar(self) -> DataFrame:\n    return self._get_values().isocalendar().set_index(self._parent.index)",
    "docstring": "Calculate year, week, and day according to the ISO 8601 standard. Returns ------- DataFrame With columns year, week and day. See Also -------- Timestamp.isocalendar : Function return a 3-tuple containing ISO year, week number, and weekday for the given Timestamp object. datetime.date.isocalendar : Return a named tuple object with three components: year, week and weekday. Examples -------- >>> ser = pd.to_datetime(pd.Series([\"2010-01-01\", pd.NaT])) >>> ser.dt.isocalendar() year week day 0 2009 53 5 1 >>> ser.dt.isocalendar().week 0 53 1 Name: week, dtype: UInt32",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\accessors.py",
    "ast_data": "FunctionDef name:isocalendar arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "unpack_sequence",
    "source_code": "def unpack_sequence(packed_sequences: PackedSequence) -> list[Tensor]:\n    padded_sequences, lengths = pad_packed_sequence(packed_sequences, batch_first=True)\n    unpacked_sequences = unpad_sequence(padded_sequences, lengths, batch_first=True)\n    return unpacked_sequences",
    "docstring": "Unpack PackedSequence into a list of variable length Tensors. `Tensor` objects",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\utils\\rnn.py",
    "ast_data": "FunctionDef name:unpack_sequence arg:packed_sequences arguments arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_estimate_precisions",
    "source_code": "def _estimate_precisions(self, nk, xk, sk):\n    {'full': self._estimate_wishart_full, 'tied': self._estimate_wishart_tied, 'diag': self._estimate_wishart_diag, 'spherical': self._estimate_wishart_spherical}[self.covariance_type](nk, xk, sk)\n    self.precisions_cholesky_ = _compute_precision_cholesky(self.covariances_, self.covariance_type)",
    "docstring": "Estimate the precisions parameters of the precision distribution. Parameters ---------- nk : array-like of shape (n_components,) xk : array-like of shape (n_components, n_features) sk : array-like The shape depends of : 'full' : (n_components, n_features, n_features) 'tied' : (n_features, n_features) 'diag' : (n_components, n_features) 'spherical' : (n_components,)",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\mixture\\_bayesian_mixture.py",
    "ast_data": "FunctionDef name:_estimate_precisions arg:self arg:nk arg:xk arg:sk arguments arg arg arg arg Call Assign Call"
  },
  {
    "library": "django",
    "name": "accepted_type",
    "source_code": "def accepted_type(self, media_type):\n    return next((accepted_type for accepted_type in self.accepted_types if accepted_type.match(media_type)), None)",
    "docstring": "Return the preferred MediaType instance which matches the given media type.",
    "type": "method",
    "file_path": "django\\django\\http\\request.py",
    "ast_data": "FunctionDef name:accepted_type arg:self arg:media_type arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_config",
    "source_code": "def get_config(self):\n    return {}",
    "docstring": "Returns the configuration of the initializer as a JSON-serializable dict. Returns: A JSON-serializable Python dict.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops_v2.py",
    "ast_data": "FunctionDef name:get_config arg:self arguments arg Return return:no"
  },
  {
    "library": "cherrypy",
    "name": "log_hooks",
    "source_code": "def log_hooks(debug=False):\n    request = cherrypy.serving.request\n    msg = []\n    from cherrypy import _cprequest\n    points = _cprequest.hookpoints\n    for k in request.hooks.keys():\n        if k not in points:\n            points.append(k)\n    for k in points:\n        msg.append('    %s:' % k)\n        v = request.hooks.get(k, [])\n        v.sort()\n        for h in v:\n            msg.append('        %r' % h)\n    cherrypy.log('\\nRequest Hooks for ' + cherrypy.url() + ':\\n' + '\\n'.join(msg), 'HTTP')",
    "docstring": "Write request.hooks to the cherrypy error log.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\cptools.py",
    "ast_data": "FunctionDef name:log_hooks arg:debug arguments arg Assign Assign Assign For Call If Compare Call For Call Assign Call Call For Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "pproc_fpool_g",
    "source_code": "def pproc_fpool_g(self):\n    self.wfield.func\n    fpool_l = []\n    for v in self.fpool:\n        if v.feasible:\n            fpool_l.append(v.x_a)\n        else:\n            v.f = np.inf\n    F = self._mapwrapper(self.wfield.func, fpool_l)\n    for va, f in zip(fpool_l, F):\n        vt = tuple(va)\n        self[vt].f = f\n        self.nfev += 1\n    self.fpool = set()",
    "docstring": "Process all field functions with constraints supplied in parallel.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_shgo_lib\\_vertex.py",
    "ast_data": "FunctionDef name:pproc_fpool_g arg:self arguments arg Assign For If Call Assign Assign Call For Call Assign Call Assign Assign Call"
  },
  {
    "library": "pandas",
    "name": "to_markdown",
    "source_code": "@doc(klass=_shared_doc_kwargs['klass'], storage_options=_shared_docs['storage_options'], examples=dedent('Examples\\n            --------\\n            >>> s = pd.Series([\"elk\", \"pig\", \"dog\", \"quetzal\"], name=\"animal\")\\n            >>> print(s.to_markdown())\\n            |    | animal   |\\n            |---:|:---------|\\n            |  0 | elk      |\\n            |  1 | pig      |\\n            |  2 | dog      |\\n            |  3 | quetzal  |\\n\\n            Output markdown with a tabulate option.\\n\\n            >>> print(s.to_markdown(tablefmt=\"grid\"))\\n            +----+----------+\\n            |    | animal   |\\n            +====+==========+\\n            |  0 | elk      |\\n            +----+----------+\\n            |  1 | pig      |\\n            +----+----------+\\n            |  2 | dog      |\\n            +----+----------+\\n            |  3 | quetzal  |\\n            +----+----------+'))\n@deprecate_nonkeyword_arguments(version='4.0', allowed_args=['self', 'buf'], name='to_markdown')\ndef to_markdown(self, buf: IO[str] | None=None, mode: str='wt', index: bool=True, storage_options: StorageOptions | None=None, **kwargs) -> str | None:\n    return self.to_frame().to_markdown(buf, mode=mode, index=index, storage_options=storage_options, **kwargs)",
    "docstring": "Print {klass} in Markdown-friendly format. Parameters ---------- buf : str, Path or StringIO-like, optional, default None Buffer to write to. If None, the output is returned as a string. mode : str, optional Mode in which file is opened, \"wt\" by default. index : bool, optional, default True Add index (row) labels. {storage_options} **kwargs These parameters will be passed to _. Returns ------- str {klass} in Markdown-friendly format. See Also -------- Series.to_frame : Rrite a text representation of object to the system clipboard. Series.to_latex : Render Series to LaTeX-formatted table. Notes ----- Requires the _ package. {examples}",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:to_markdown arg:self arg:buf arg:mode arg:index arg:storage_options arguments arg arg arg arg arg arg Return return:yes Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "reduce_std",
    "source_code": "@dispatch.dispatch_for_api(math_ops.reduce_std)\ndef reduce_std(input_tensor: ragged_tensor.Ragged, axis=None, keepdims=False, name=None):\n    with ops.name_scope(name, 'RaggedReduceStd', [input_tensor, axis]):\n        variance = reduce_variance(input_tensor, axis=axis, keepdims=keepdims)\n        return math_ops.sqrt(variance)",
    "docstring": "For docs, see: _RAGGED_REDUCE_DOCSTRING.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_math_ops.py",
    "ast_data": "FunctionDef name:reduce_std arg:input_tensor arg:axis arg:keepdims arg:name arguments arg arg arg arg With Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "Initializer",
    "source_code": "class Initializer:\n\n    def __call__(self, shape, dtype=None, partition_info=None):\n        raise NotImplementedError\n\n    def get_config(self):\n        return {}\n\n    @classmethod\n    def from_config(cls, config):\n        return cls(**config)",
    "docstring": "Initializer base class: all initializers inherit from this class.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops.py",
    "ast_data": "ClassDef name:Initializer FunctionDef name:__call__ arg:self arg:shape arg:dtype arg:partition_info arguments arg arg arg arg Raise FunctionDef name:get_config arg:self arguments arg Return return:no FunctionDef name:from_config arg:cls arg:config arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "lower_pt2e_quantized_to_x86",
    "source_code": "def lower_pt2e_quantized_to_x86(model: torch.fx.GraphModule, example_inputs: tuple[torch.Tensor, ...]) -> torch.fx.GraphModule:\n\n    def _post_autograd_decomp_table():\n        decomp_table = torch.export.default_decompositions()\n        for k in list(decomp_table.keys()):\n            if not torch._export.utils._is_cia_op(k):\n                del decomp_table[k]\n        return decomp_table\n\n    def _node_replace(m):\n        aten = torch.ops.aten\n        g = m.graph\n        for node in g.nodes:\n            if node.target == aten.t.default:\n                with g.inserting_before(node):\n                    x = node.args[0]\n                    dims = [1, 0]\n                    perm_node = g.call_function(aten.permute.default, args=(x, dims))\n                    node.replace_all_uses_with(perm_node)\n                    g.erase_node(node)\n        g.lint()\n        m.recompile()\n    lowered_model = torch.export.export_for_training(model, example_inputs, strict=True).run_decompositions(_post_autograd_decomp_table()).module()\n    _node_replace(lowered_model)\n    freezing_passes(lowered_model, example_inputs)\n    constant_fold(lowered_model)\n    return lowered_model",
    "docstring": "Lower a PT2E-qantized model to x86 backend. Args: * (torch.fx.GraphModule): a model quantized by PT2E quantization flow. * (tuple[torch.Tensor, ...]): example inputs for the model. Return: A GraphModule lowered to x86 backend.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\lowering.py",
    "ast_data": "FunctionDef name:lower_pt2e_quantized_to_x86 arg:model arg:example_inputs arguments arg arg FunctionDef name:_post_autograd_decomp_table arguments Assign Call For Call Call If Call Return return:yes FunctionDef name:_node_replace arg:m arguments arg Assign Assign For If Compare With Call Assign Assign Assign Call Call Call Call Call Assign Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "DateDetailView",
    "source_code": "class DateDetailView(SingleObjectTemplateResponseMixin, BaseDateDetailView):\n    template_name_suffix = '_detail'",
    "docstring": "Detail view of a single object on a single date; this differs from the standard DetailView by accepting a year/month/day in the URL.",
    "type": "class",
    "file_path": "django\\django\\views\\generic\\dates.py",
    "ast_data": "ClassDef name:DateDetailView Assign"
  },
  {
    "library": "cryptography",
    "name": "get_public",
    "source_code": "def get_public(self, data: memoryview) -> tuple[tuple[memoryview, memoryview], memoryview]:\n    curve, data = _get_sshstr(data)\n    point, data = _get_sshstr(data)\n    if curve != self.ssh_curve_name:\n        raise ValueError('Curve name mismatch')\n    if point[0] != 4:\n        raise NotImplementedError('Need uncompressed point')\n    return ((curve, point), data)",
    "docstring": "ECDSA public fields",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\serialization\\ssh.py",
    "ast_data": "FunctionDef name:get_public arg:self arg:data arguments arg arg Assign Call Assign Call If Compare Raise Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "from_matrix",
    "source_code": "@classmethod\ndef from_matrix(cls, matrix: Tensor) -> So3:\n    return cls(Quaternion.from_matrix(matrix))",
    "docstring": "Create So3 from a rotation matrix. Args: matrix: the rotation matrix to convert of shape :math:. Example: >>> m = torch.eye(3) >>> s = So3.from_matrix(m) >>> s Parameter containing: tensor([1., 0., 0., 0.], requires_grad=True)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\so3.py",
    "ast_data": "FunctionDef name:from_matrix arg:cls arg:matrix arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_implements_predict_batch_hooks",
    "source_code": "def _implements_predict_batch_hooks(self):\n    return not generic_utils.is_default(self.on_predict_batch_begin) or not generic_utils.is_default(self.on_predict_batch_end)",
    "docstring": "Determines if this Callback should be called for each predict batch.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:_implements_predict_batch_hooks arg:self arguments arg Return return:yes BoolOp Call Call"
  },
  {
    "library": "tensorflow",
    "name": "restore_or_initialize",
    "source_code": "def restore_or_initialize(self):\n    if self._latest_checkpoint is not None:\n        self._checkpoint.restore(self._latest_checkpoint)\n        if self._checkpoint_interval is not None:\n            self._last_checkpoint_step = _evaluate(self._step_counter)\n        return self._latest_checkpoint\n    if self._init_fn is not None:\n        self._init_fn()\n        logging.info('Customized initialization is done through the passed `init_fn`.')\n    return None",
    "docstring": "Restore items in from the latest checkpoint file. This method will first try to restore from the most recent checkpoint in . If no checkpoints exist in , and is specified, this method will call to do customized initialization. This can be used to support initialization from pretrained models. Note that unlike , this method doesn't return a load status object that users can run assertions on (e.g. assert_consumed()). Thus to run assertions, users should directly use method. Returns: The restored checkpoint path if the latest checkpoint is found and restored. Otherwise None.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint_management.py",
    "ast_data": "FunctionDef name:restore_or_initialize arg:self arguments arg If Compare Call If Compare Assign Call Return return:yes If Compare Call Call Return return:no"
  },
  {
    "library": "pytorch",
    "name": "context_decorator",
    "source_code": "def context_decorator(ctx, func):\n    assert not (callable(ctx) and hasattr(ctx, '__enter__')), f'Passed in {ctx} is both callable and also a valid context manager (has __enter__), making it ambiguous which interface to use.  If you intended to pass a context manager factory, rewrite your call as context_decorator(lambda: ctx()); if you intended to pass a context manager directly, rewrite your call as context_decorator(lambda: ctx)'\n    if not callable(ctx):\n\n        def ctx_factory():\n            return ctx\n    else:\n        ctx_factory = ctx\n    if inspect.isclass(func):\n        raise RuntimeError('Cannot decorate classes; it is ambiguous whether or not only the constructor or all methods should have the context manager applied; additionally, decorating a class at definition-site will prevent use of the identifier as a conventional type.  To specify which methods to decorate, decorate each of them individually.')\n    if inspect.isgeneratorfunction(func):\n        return _wrap_generator(ctx_factory, func)\n\n    @functools.wraps(func)\n    def decorate_context(*args, **kwargs):\n        with ctx_factory():\n            return func(*args, **kwargs)\n    return decorate_context",
    "docstring": "Like contextlib.ContextDecorator. But with the following differences: 1. Is done by wrapping, rather than inheritance, so it works with context managers that are implemented from C and thus cannot easily inherit from Python classes 2. Wraps generators in the intuitive way (c.f. 3. Errors out if you try to wrap a class, because it is ambiguous whether or not you intended to wrap only the constructor The input argument can either be a context manager (in which case it must be a multi-shot context manager that can be directly invoked multiple times) or a callable that produces a context manager.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_contextlib.py",
    "ast_data": "FunctionDef name:context_decorator arg:ctx arg:func arguments arg arg BoolOp Call Call If Call FunctionDef name:ctx_factory arguments Return return:yes Assign If Call Raise Call If Call Return return:yes Call FunctionDef name:decorate_context arguments arg arg With Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "make_session_run_hook",
    "source_code": "def make_session_run_hook(self, is_chief, num_tokens=-1):\n    return _SyncReplicasOptimizerHook(self, is_chief, num_tokens)",
    "docstring": "Creates a hook to handle SyncReplicasHook ops such as initialization.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\sync_replicas_optimizer.py",
    "ast_data": "FunctionDef name:make_session_run_hook arg:self arg:is_chief arg:num_tokens arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "update_bbox_position_size",
    "source_code": "def update_bbox_position_size(self, renderer):\n    if self._bbox_patch:\n        posx = float(self.convert_xunits(self._x))\n        posy = float(self.convert_yunits(self._y))\n        posx, posy = self.get_transform().transform((posx, posy))\n        x_box, y_box, w_box, h_box = _get_textbox(self, renderer)\n        self._bbox_patch.set_bounds(0.0, 0.0, w_box, h_box)\n        self._bbox_patch.set_transform(Affine2D().rotate_deg(self.get_rotation()).translate(posx + x_box, posy + y_box))\n        fontsize_in_pixel = renderer.points_to_pixels(self.get_size())\n        self._bbox_patch.set_mutation_scale(fontsize_in_pixel)",
    "docstring": "Update the location and the size of the bbox. This method should be used when the position and size of the bbox needs to be updated before actually drawing the bbox.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:update_bbox_position_size arg:self arg:renderer arguments arg arg If Assign Call Call Assign Call Call Assign Call Call Assign Call Call Call Call Call Call Call Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "unwrap",
    "source_code": "def unwrap(self) -> Union[VariableTracker, Self]:\n    if self.is_realized():\n        assert self._cache.vt is not None\n        return self._cache.vt\n    return self",
    "docstring": "Return the real VariableTracker if it already exists",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\lazy.py",
    "ast_data": "FunctionDef name:unwrap arg:self arguments arg If Call Compare Return return:yes Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "categorical_mapping",
    "source_code": "def categorical_mapping(self, data, palette, order):\n    levels = categorical_order(data, order)\n    n_colors = len(levels)\n    if isinstance(palette, dict):\n        missing = set(levels) - set(palette)\n        if any(missing):\n            err = 'The palette dictionary is missing keys: {}'\n            raise ValueError(err.format(missing))\n        lookup_table = palette\n    else:\n        if palette is None:\n            if n_colors <= len(get_color_cycle()):\n                colors = color_palette(None, n_colors)\n            else:\n                colors = color_palette('husl', n_colors)\n        elif isinstance(palette, list):\n            colors = self._check_list_length(levels, palette, 'palette')\n        else:\n            colors = color_palette(palette, n_colors)\n        lookup_table = dict(zip(levels, colors))\n    return (levels, lookup_table)",
    "docstring": "Determine colors when the hue mapping is categorical.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_base.py",
    "ast_data": "FunctionDef name:categorical_mapping arg:self arg:data arg:palette arg:order arguments arg arg arg arg Assign Call Assign Call If Call Assign Call Call If Call Assign Raise Call Call Assign If Compare If Compare Call Call Assign Call Assign Call If Call Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, num_buckets: int, lower: float, upper: float):\n    if num_buckets < 2:\n        raise ValueError(f'num_buckets is {num_buckets}, must be at least 2 for simulated quantization.')\n    self.num_buckets = num_buckets\n    self.lower = lower\n    self.upper = upper",
    "docstring": "Simulated quantizaiton configuration. Args: num_buckets: The number of quantization buckets, must be atleast 2. lower: The lower bound for the quantization range. upper: The upper bound for the quantization range. Returns: . Raises: ValueError: if is less than 2.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2_utils.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:num_buckets arg:lower arg:upper arguments arg arg arg arg If Compare Raise Call Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_generate_modified_filenames",
    "source_code": "def _generate_modified_filenames(filename: str) -> Iterator[str]:\n    yield filename\n    base, ext = os.path.splitext(filename)\n    for name_suffix in itertools.count(1, 1):\n        yield '{}_{}{}'.format(base, name_suffix, ext)",
    "docstring": "Generates the modified filenames with incremental name suffix added. This helper function first yields the given filename itself, and subsequently yields modified filenames by incrementing number suffix to the basename. Args: filename: The original filename to be modified. Yields: The original filename and then modified filenames with incremental suffix.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\ios\\extract_object_files.py",
    "ast_data": "FunctionDef name:_generate_modified_filenames arg:filename arguments arg Assign Call For Call Call"
  },
  {
    "library": "tensorflow",
    "name": "ret",
    "source_code": "def ret(self, value, did_return):\n    del did_return\n    if isinstance(value, variables.UndefinedReturnValue):\n        return None\n    if self.use_auto_deps:\n        self._return_value_marked = True\n        if value is None:\n            return None\n\n        def _mark_return_if_tensor(t):\n            if tensor_util.is_tf_type(t):\n                return self.autodeps_scope.mark_as_return(t)\n            return t\n        value = nest.map_structure(_mark_return_if_tensor, value)\n    return value",
    "docstring": "Marks a value as returned from the function guarded by the scope.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\core\\function_wrappers.py",
    "ast_data": "FunctionDef name:ret arg:self arg:value arg:did_return arguments arg arg arg If Call Return return:no If Assign If Compare Return return:no FunctionDef name:_mark_return_if_tensor arg:t arguments arg If Call Return return:yes Call Return return:yes Assign Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "jobs_argument",
    "source_code": "def jobs_argument(value: str) -> int:\n    if value == 'auto':\n        return multiprocessing.cpu_count()\n    else:\n        jobs = int(value)\n        if jobs <= 0:\n            raise argparse.ArgumentTypeError(__('job number should be a positive number'))\n        else:\n            return jobs",
    "docstring": "Parse the `` flag. Return the number of CPUs if 'auto' is used, otherwise ensure *value* is a positive integer.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\cmd\\build.py",
    "ast_data": "FunctionDef name:jobs_argument arg:value arguments arg If Compare Return return:yes Call Assign Call If Compare Raise Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_getitem_slice",
    "source_code": "@final\ndef _getitem_slice(self, key: slice) -> Self:\n    slobj = self.index._convert_slice_indexer(key, kind='getitem')\n    if isinstance(slobj, np.ndarray):\n        indexer = lib.maybe_indices_to_slice(slobj.astype(np.intp), len(self))\n        if isinstance(indexer, np.ndarray):\n            return self.take(indexer, axis=0)\n        slobj = indexer\n    return self._slice(slobj)",
    "docstring": "__getitem__ for the case where the key is a slice object.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:_getitem_slice arg:self arg:key arguments arg arg Assign Call If Call Assign Call Call Call If Call Return return:yes Call Assign Return return:yes Call"
  },
  {
    "library": "uvicorn",
    "name": "pause_writing",
    "source_code": "def pause_writing(self) -> None:\n    self.writable.clear()",
    "docstring": "Called by the transport when the write buffer exceeds the high water mark.",
    "type": "method",
    "file_path": "uvicorn\\uvicorn\\protocols\\websockets\\wsproto_impl.py",
    "ast_data": "FunctionDef name:pause_writing arg:self arguments arg Call"
  },
  {
    "library": "matplotlib",
    "name": "get_sketch_params",
    "source_code": "def get_sketch_params(self):\n    return self._sketch",
    "docstring": "Return the sketch parameters for the artist. Returns ------- tuple or None A 3-tuple with the following elements: - *scale*: The amplitude of the wiggle perpendicular to the source line. - *length*: The length of the wiggle along the line. - *randomness*: The scale factor by which the length is shrunken or expanded. Returns *None* if no sketch parameters were set.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:get_sketch_params arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "log_softmax",
    "source_code": "def log_softmax(input: Tensor, dim: Optional[int]=None, _stacklevel: int=3, dtype: Optional[DType]=None) -> Tensor:\n    if has_torch_function_unary(input):\n        return handle_torch_function(log_softmax, (input,), input, dim=dim, _stacklevel=_stacklevel, dtype=dtype)\n    if dim is None:\n        dim = _get_softmax_dim('log_softmax', input.dim(), _stacklevel)\n    if dtype is None:\n        ret = input.log_softmax(dim)\n    else:\n        ret = input.log_softmax(dim, dtype=dtype)\n    return ret",
    "docstring": "Apply a softmax followed by a logarithm. While mathematically equivalent to log(softmax(x)), doing these two operations separately is slower and numerically unstable. This function uses an alternative formulation to compute the output and gradient correctly. See :class: for more details. Args: input (Tensor): input dim (int): A dimension along which log_softmax will be computed. dtype (:class:, optional): the desired data type of returned tensor. If specified, the input tensor is cast to :attr: before the operation is performed. This is useful for preventing data type overflows. Default: None.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:log_softmax arg:input arg:dim arg:_stacklevel arg:dtype arguments arg arg arg arg If Call Return return:yes Call If Compare Assign Call Call If Compare Assign Call Assign Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "get_ratio",
    "source_code": "def get_ratio(old: str, new: str) -> float:\n    if not all([old, new]):\n        return VERSIONING_RATIO\n    if IS_SPEEDUP:\n        return Levenshtein.distance(old, new) / (len(old) / 100.0)\n    else:\n        return levenshtein_distance(old, new) / (len(old) / 100.0)",
    "docstring": "Return a \"similarity ratio\" (in percent) representing the similarity between the two strings where 0 is equal and anything above less than equal.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\versioning.py",
    "ast_data": "FunctionDef name:get_ratio arg:old arg:new arguments arg arg If Call Return return:yes If Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_class_means",
    "source_code": "def _class_means(X, y):\n    xp, is_array_api_compliant = get_namespace(X)\n    classes, y = xp.unique_inverse(y)\n    means = xp.zeros((classes.shape[0], X.shape[1]), device=device(X), dtype=X.dtype)\n    if is_array_api_compliant:\n        for i in range(classes.shape[0]):\n            means[i, :] = xp.mean(X[y == i], axis=0)\n    else:\n        cnt = np.bincount(y)\n        np.add.at(means, y, X)\n        means /= cnt[:, None]\n    return means",
    "docstring": "Compute class means. Parameters ---------- X : array-like of shape (n_samples, n_features) Input data. y : array-like of shape (n_samples,) or (n_samples, n_targets) Target values. Returns ------- means : array-like of shape (n_classes, n_features) Class means.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\discriminant_analysis.py",
    "ast_data": "FunctionDef name:_class_means arg:X arg:y arguments arg arg Assign Call Assign Call Assign Call Call If For Call Assign Call Compare Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "tpu_replicated_input_resolver",
    "source_code": "@auto_control_deps.register_acd_resource_resolver\ndef tpu_replicated_input_resolver(op: ops.Operation, resource_reads: object_identity.ObjectIdentitySet, resource_writes: object_identity.ObjectIdentitySet) -> bool:\n    if op.type == 'TPUReplicatedInput':\n        if resource_reads or resource_writes:\n            resource_reads.clear()\n            resource_writes.clear()\n            return True\n        else:\n            return False\n\n    def replace_with_unreplicated_resources(resource_inputs):\n        to_remove = []\n        to_add = []\n        for resource in resource_inputs:\n            if resource.op.type == 'TPUReplicatedInput':\n                to_remove.append(resource)\n                to_add.extend(resource.op.inputs)\n        for t in to_remove:\n            resource_inputs.discard(t)\n        resource_inputs.update(to_add)\n        return to_add or to_remove\n    return bool(replace_with_unreplicated_resources(resource_reads) or replace_with_unreplicated_resources(resource_writes))",
    "docstring": "Replaces TPUReplicatedInput outputs with its inputs in resource_inputs.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu.py",
    "ast_data": "FunctionDef name:tpu_replicated_input_resolver arg:op arg:resource_reads arg:resource_writes arguments arg arg arg If Compare If BoolOp Call Call Return return:yes Return return:yes FunctionDef name:replace_with_unreplicated_resources arg:resource_inputs arguments arg Assign Assign For If Compare Call Call For Call Call Return return:yes BoolOp Return return:yes Call BoolOp Call Call"
  },
  {
    "library": "kornia",
    "name": "tz",
    "source_code": "@property\ndef tz(self) -> Tensor:\n    return self.extrinsics[..., 2, -1]",
    "docstring": "Returns the z-coordinate of the translation vector. Returns: tensor of shape :math:.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\camera\\pinhole.py",
    "ast_data": "FunctionDef name:tz arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, maxsize=0):\n    self._maxsize = maxsize\n    self._queue = collections.deque()\n    self._closed = False\n    self._mutex = threading.Lock()\n    self._not_empty = threading.Condition(self._mutex)\n    self._not_full = threading.Condition(self._mutex)",
    "docstring": "Create a queue object with a given maximum size. Args: maxsize: int size of queue. If <= 0, the queue size is infinite.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\event_file_writer.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:maxsize arguments arg arg Assign Assign Call Assign Assign Call Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "get_config",
    "source_code": "def get_config(self):\n    return dict(zip(self._fields, self))",
    "docstring": "See 'FeatureColumn` base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:get_config arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_create_datasets_from_function_with_input_context",
    "source_code": "def _create_datasets_from_function_with_input_context(input_contexts, input_workers, dataset_fn):\n    datasets = []\n    for i, ctx in enumerate(input_contexts):\n        worker = input_workers.worker_devices[i]\n        with ops.device(worker):\n            dataset = dataset_fn(ctx)\n            datasets.append(dataset)\n    return (datasets, dataset.element_spec)",
    "docstring": "Create device datasets per worker given a dataset function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py",
    "ast_data": "FunctionDef name:_create_datasets_from_function_with_input_context arg:input_contexts arg:input_workers arg:dataset_fn arguments arg arg arg Assign For Call Assign With Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "convex_min_zero_map",
    "source_code": "@staticmethod\ndef convex_min_zero_map(x: Union[ExprIn, ExprVR], fn: ExprFn) -> ExprVR:\n    x = ValueRanges.wrap(x)\n    if 0 in x:\n        upper = max(fn(x.lower), fn(x.upper))\n        upper = simple_sympify(upper)\n        if isinstance(upper, sympy.Float) or upper == sympy.oo:\n            return ValueRanges(0.0, upper)\n        return ValueRanges(0, upper)\n    return ValueRanges.monotone_map(x, fn)",
    "docstring": "Fn is convex and has a minimum at 0.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\_sympy\\value_ranges.py",
    "ast_data": "FunctionDef name:convex_min_zero_map arg:x arg:fn arguments arg arg Assign Call If Compare Assign Call Call Call Assign Call If BoolOp Call Compare Return return:yes Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "insert_arg",
    "source_code": "@compatibility(is_backward_compatible=True)\ndef insert_arg(self, idx: int, arg: Argument) -> None:\n    assert 0 <= idx <= len(self.args), 'insert_args index must be between 0 and len(self.args)'\n    args_left = self.args[:idx]\n    args_right = self.args[idx:]\n    self._args = args_left + (arg,) + args_right\n    _new_input_nodes: dict[Node, None] = {}\n    _fx_map_arg(arg, _new_input_nodes.setdefault)\n    for new_use in _new_input_nodes.keys():\n        if new_use not in self._input_nodes:\n            self._input_nodes.setdefault(new_use)\n            new_use.users.setdefault(self)",
    "docstring": "Insert an positional argument to the argument list with given index. Args: idx (int): The index of the element in ``",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\node.py",
    "ast_data": "FunctionDef name:insert_arg arg:self arg:idx arg:arg arguments arg arg arg Compare Call Assign Assign Assign Call For Call If Compare Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "yield_value",
    "source_code": "def yield_value(modality, iterable):\n    if modality == Modality.CORE:\n        yield from _tf_core_yield_value(iterable)\n    elif modality == Modality.DATA:\n        yield from _tf_data_yield_value(iterable)\n    else:\n        raise ValueError('Unknown modality used {} for nested structure'.format(modality))",
    "docstring": "Yield elements of in a deterministic order. Args: modality: enum value of supported modality [Modality.CORE or Modality.DATA] iterable: an iterable. Yields: The iterable elements in a deterministic order.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\nest_util.py",
    "ast_data": "FunctionDef name:yield_value arg:modality arg:iterable arguments arg arg If Compare Call If Compare Call Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "finfo",
    "source_code": "@tf_export.tf_export('experimental.numpy.finfo', v1=[])\n@np_doc('finfo')\ndef finfo(dtype):\n    return np.finfo(_to_numpy_type(dtype))",
    "docstring": "Note that currently it just forwards to the numpy namesake, while tensorflow and numpy dtypes may have different properties.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_utils.py",
    "ast_data": "FunctionDef name:finfo arg:dtype arguments arg Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_update_input",
    "source_code": "def _update_input(self, index, tensor) -> None:\n    if not isinstance(tensor, tensor_lib.Tensor):\n        raise TypeError('tensor must be a Tensor: %s' % tensor)\n    _assert_same_graph(self, tensor)\n    self._inputs_val = None\n    with self.graph._c_graph.get() as c_graph:\n        pywrap_tf_session.UpdateEdge(c_graph, tensor._as_tf_output(), self._tf_input(index))",
    "docstring": "Update the input to this operation at the given index. NOTE: This is for TF internal use only. Please don't use it. Args: index: the index of the input to update. tensor: the Tensor to be used as the input at the given index. Raises: TypeError: if tensor is not a Tensor, or if input tensor type is not convertible to dtype. ValueError: if the Tensor is from a different graph.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_update_input arg:self arg:index arg:tensor arguments arg arg arg If Call Raise Call Call Assign With Call Call Call Call"
  },
  {
    "library": "django",
    "name": "paragraph",
    "source_code": "def paragraph():\n    return ' '.join((sentence() for i in range(random.randint(1, 4))))",
    "docstring": "Return a randomly generated paragraph of lorem ipsum text. The paragraph consists of between 1 and 4 sentences, inclusive.",
    "type": "function",
    "file_path": "django\\django\\utils\\lorem_ipsum.py",
    "ast_data": "FunctionDef name:paragraph arguments Return return:yes Call Call Call Call"
  },
  {
    "library": "kornia",
    "name": "adjust_brightness_accumulative",
    "source_code": "def adjust_brightness_accumulative(image: Tensor, factor: Union[float, Tensor], clip_output: bool=True) -> Tensor:\n    KORNIA_CHECK_IS_TENSOR(image, 'Expected shape (*, H, W)')\n    KORNIA_CHECK(isinstance(factor, (float, Tensor)), 'Factor should be float or Tensor.')\n    if isinstance(factor, float):\n        factor = torch.as_tensor(factor, device=image.device, dtype=image.dtype)\n    elif isinstance(factor, Tensor):\n        factor = factor.to(image.device, image.dtype)\n    while len(factor.shape) != len(image.shape):\n        factor = factor[..., None]\n    img_adjust: Tensor = image * factor\n    if clip_output:\n        img_adjust = img_adjust.clamp(min=0.0, max=1.0)\n    return img_adjust",
    "docstring": "Adjust the brightness accumulatively of an image tensor. This implementation follows PIL convention. The input image and factor is expected to be in the range of [0, 1]. Args: image: Image to be adjusted in the shape of :math:. factor: Brightness adjust factor per element in the batch. It's recommended to bound the factor by [0, 1]. 0 does not modify the input image while any other number modify the brightness. clip_output: Whether to clip output to be in [0,1]. Return: Adjusted tensor in the shape of :math:. Example: >>> x = torch.ones(1, 1, 2, 2) >>> adjust_brightness_accumulative(x, 1.) tensor([[[[1., 1.], [1., 1.]]]]) >>> x = torch.ones(2, 5, 3, 3) >>> y = torch.tensor([0.25, 0.50]) >>> adjust_brightness_accumulative(x, y).shape torch.Size([2, 5, 3, 3])",
    "type": "function",
    "file_path": "kornia\\kornia\\enhance\\adjust.py",
    "ast_data": "FunctionDef name:adjust_brightness_accumulative arg:image arg:factor arg:clip_output arguments arg arg arg Call Call Call If Call Assign Call If Call Assign Call While Compare Call Call Assign If Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "hermmul",
    "source_code": "def hermmul(c1, c2):\n    [c1, c2] = pu.as_series([c1, c2])\n    if len(c1) > len(c2):\n        c = c2\n        xs = c1\n    else:\n        c = c1\n        xs = c2\n    if len(c) == 1:\n        c0 = c[0] * xs\n        c1 = 0\n    elif len(c) == 2:\n        c0 = c[0] * xs\n        c1 = c[1] * xs\n    else:\n        nd = len(c)\n        c0 = c[-2] * xs\n        c1 = c[-1] * xs\n        for i in range(3, len(c) + 1):\n            tmp = c0\n            nd = nd - 1\n            c0 = hermsub(c[-i] * xs, c1 * (2 * (nd - 1)))\n            c1 = hermadd(tmp, hermmulx(c1) * 2)\n    return hermadd(c0, hermmulx(c1) * 2)",
    "docstring": "Multiply one Hermite series by another. Returns the product of two Hermite series * . The arguments are sequences of coefficients, from lowest order \"term\" to highest, e.g., [1,2,3] represents the series ``. Parameters ---------- c1, c2 : array_like 1-D arrays of Hermite series coefficients ordered from low to high. Returns ------- out : ndarray Of Hermite series coefficients representing their product. See Also -------- hermadd, hermsub, hermmulx, hermdiv, hermpow Notes ----- In general, the (polynomial) product of two C-series results in terms that are not in the Hermite polynomial basis set. Thus, to express the product as a Hermite series, it is necessary to \"reproject\" the product onto said basis set, which may produce \"unintuitive\" (but correct) results; see Examples section below. Examples -------- >>> from numpy.polynomial.hermite import hermmul >>> hermmul([1, 2, 3], [0, 1, 2]) array([52., 29., 52., 7., 6.])",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\hermite.py",
    "ast_data": "FunctionDef name:hermmul arg:c1 arg:c2 arguments arg arg Assign Call If Compare Call Call Assign Assign Assign Assign If Compare Call Assign Assign If Compare Call Assign Assign Assign Call Assign Assign For Call Call Assign Assign Assign Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_record_state",
    "source_code": "def _record_state(self):\n    filenames, timestamps = zip(*self._maybe_delete.items())\n    update_checkpoint_state_internal(self._directory, model_checkpoint_path=self.latest_checkpoint, all_model_checkpoint_paths=filenames, all_model_checkpoint_timestamps=timestamps, last_preserved_timestamp=self._last_preserved_timestamp, save_relative_paths=True)",
    "docstring": "Saves the 's state in .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint_management.py",
    "ast_data": "FunctionDef name:_record_state arg:self arguments arg Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "AssertTransformer",
    "source_code": "class AssertTransformer(converter.Base):\n\n    def visit_Assert(self, node):\n        self.generic_visit(node)\n        template = '\\n      ag__.assert_stmt(test, lambda: msg)\\n    '\n        if node.msg is None:\n            return templates.replace(template, test=node.test, msg=gast.Constant('Assertion error', kind=None))\n        elif isinstance(node.msg, gast.Constant):\n            return templates.replace(template, test=node.test, msg=node.msg)\n        else:\n            raise NotImplementedError('can only convert string messages for now.')",
    "docstring": "Transforms Assert nodes to Call so they can be handled as functions.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\converters\\asserts.py",
    "ast_data": "ClassDef name:AssertTransformer FunctionDef name:visit_Assert arg:self arg:node arguments arg arg Call Assign If Compare Return return:yes Call Call If Call Return return:yes Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_preprocess_padding",
    "source_code": "def _preprocess_padding(padding):\n    if padding == 'same':\n        padding = 'SAME'\n    elif padding == 'valid':\n        padding = 'VALID'\n    else:\n        raise ValueError('Invalid padding: ' + str(padding))\n    return padding",
    "docstring": "Convert keras' padding to TensorFlow's padding. Args: padding: string, one of 'same' , 'valid' Returns: a string, one of 'SAME', 'VALID'. Raises: ValueError: if invalid",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:_preprocess_padding arg:padding arguments arg If Compare Assign If Compare Assign Raise Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "reinitialize_physical_devices",
    "source_code": "def reinitialize_physical_devices(self):\n    self._initialize_physical_devices(True)",
    "docstring": "Gets local devices visible to the system.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:reinitialize_physical_devices arg:self arguments arg Call"
  },
  {
    "library": "sphinx",
    "name": "run",
    "source_code": "def run(self) -> list[Node]:\n    if ':' in self.name:\n        self.domain, self.objtype = self.name.split(':', 1)\n    else:\n        self.domain, self.objtype = ('', self.name)\n    node = addnodes.desc()\n    node.document = self.state.document\n    node['domain'] = self.domain\n    node['objtype'] = node['desctype'] = self.objtype\n    self.names: list[str] = []\n    alias_options = {'maxdepth': self.options.get('maxdepth', 1), 'noroot': 'noroot' in self.options}\n    if alias_options['noroot'] and alias_options['maxdepth'] == 1:\n        logger.warning(\"Error in C++ alias declaration. Requested 'noroot' but 'maxdepth' 1. When skipping the root declaration, need 'maxdepth' 0 for infinite or at least 2.\", location=self.get_location())\n    signatures = self.get_signatures()\n    for sig in signatures:\n        node.append(AliasNode(sig, alias_options, env=self.env))\n    self.before_content()\n    content_node = addnodes.desc_content('', *self.parse_content_to_nodes())\n    node.append(content_node)\n    self.env.current_document.obj_desc_name = ''\n    self.after_content()\n    return [node]",
    "docstring": "On purpose this doesn't call the ObjectDescription version, but is based on it. Each alias signature may expand into multiple real signatures (an overload set). The code is therefore based on the ObjectDescription version.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\domains\\cpp\\__init__.py",
    "ast_data": "FunctionDef name:run arg:self arguments arg If Compare Assign Call Assign Assign Call Assign Assign Assign Assign Call Compare If BoolOp Compare Call Call Assign Call For Call Call Call Assign Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "__call__",
    "source_code": "def __call__(self, loc=None, shape=1, df=1, allow_singular=False, seed=None):\n    if df == np.inf:\n        return multivariate_normal_frozen(mean=loc, cov=shape, allow_singular=allow_singular, seed=seed)\n    return multivariate_t_frozen(loc=loc, shape=shape, df=df, allow_singular=allow_singular, seed=seed)",
    "docstring": "Create a frozen multivariate t-distribution. See for parameters.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:loc arg:shape arg:df arg:allow_singular arg:seed arguments arg arg arg arg arg arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "point_count",
    "source_code": "@property\ndef point_count(self):\n    return sum((self[i].point_count for i in range(self.geom_count)))",
    "docstring": "Return the number of Points in this Geometry Collection.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:point_count arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "decision_function",
    "source_code": "def decision_function(self, X):\n    check_is_fitted(self)\n    X = self._check_X(X)\n    n_classes = self.n_classes_\n    classes = self.classes_[:, np.newaxis]\n    if n_classes == 1:\n        return np.zeros_like(X, shape=(X.shape[0], 1))\n    pred = sum((np.where((estimator.predict(X) == classes).T, w, -1 / (n_classes - 1) * w) for estimator, w in zip(self.estimators_, self.estimator_weights_)))\n    pred /= self.estimator_weights_.sum()\n    if n_classes == 2:\n        pred[:, 0] *= -1\n        return pred.sum(axis=1)\n    return pred",
    "docstring": "Compute the decision function of `classes_`, respectively.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_weight_boosting.py",
    "ast_data": "FunctionDef name:decision_function arg:self arg:X arguments arg arg Call Assign Call Assign Assign If Compare Return return:yes Call Assign Call Call Compare Call Call Call If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "ClickableMapDefinition",
    "source_code": "class ClickableMapDefinition:\n    maptag_re = re.compile('<map id=\"(.*?)\"')\n    href_re = re.compile('href=\".*?\"')\n\n    def __init__(self, filename: str, content: str, dot: str='') -> None:\n        self.id: str | None = None\n        self.filename = filename\n        self.content = content.splitlines()\n        self.clickable: list[str] = []\n        self.parse(dot=dot)\n\n    def parse(self, dot: str) -> None:\n        matched = self.maptag_re.match(self.content[0])\n        if not matched:\n            msg = f'Invalid clickable map file found: {self.filename}'\n            raise GraphvizError(msg)\n        self.id = matched.group(1)\n        if self.id == '%3':\n            hashed = sha1(dot.encode(), usedforsecurity=False).hexdigest()\n            self.id = f'grapviz{hashed[-10:]}'\n            self.content[0] = self.content[0].replace('%3', self.id)\n        for line in self.content:\n            if self.href_re.search(line):\n                self.clickable.append(line)\n\n    def generate_clickable_map(self) -> str:\n        if self.clickable:\n            return '\\n'.join((self.content[0], *self.clickable, self.content[-1]))\n        else:\n            return ''",
    "docstring": "A manipulator for clickable map file of graphviz.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\ext\\graphviz.py",
    "ast_data": "ClassDef name:ClickableMapDefinition Assign Call Assign Call FunctionDef name:__init__ arg:self arg:filename arg:content arg:dot arguments arg arg arg arg Assign Assign Call Call FunctionDef name:parse arg:self arg:dot arguments arg arg Assign Call If Assign Raise Call Assign Call If Compare Assign Call Call Call Assign Assign Call For If Call Call FunctionDef name:generate_clickable_map arg:self arguments arg If Return return:yes Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_wrap_results",
    "source_code": "def _wrap_results(result, dtype: np.dtype, fill_value=None):\n    if result is NaT:\n        pass\n    elif dtype.kind == 'M':\n        if fill_value is None:\n            fill_value = iNaT\n        if not isinstance(result, np.ndarray):\n            assert not isna(fill_value), 'Expected non-null fill_value'\n            if result == fill_value:\n                result = np.nan\n            if isna(result):\n                result = np.datetime64('NaT', 'ns').astype(dtype)\n            else:\n                result = np.int64(result).view(dtype)\n            result = result.astype(dtype, copy=False)\n        else:\n            result = result.astype(dtype)\n    elif dtype.kind == 'm':\n        if not isinstance(result, np.ndarray):\n            if result == fill_value or np.isnan(result):\n                result = np.timedelta64('NaT').astype(dtype)\n            elif np.fabs(result) > lib.i8max:\n                raise ValueError('overflow in timedelta operation')\n            else:\n                result = np.int64(result).astype(dtype, copy=False)\n        else:\n            result = result.astype('m8[ns]').view(dtype)\n    return result",
    "docstring": "wrap our results if needed",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\nanops.py",
    "ast_data": "FunctionDef name:_wrap_results arg:result arg:dtype arg:fill_value arguments arg arg arg If Compare If Compare If Compare Assign If Call Call If Compare Assign If Call Assign Call Call Assign Call Call Assign Call Assign Call If Compare If Call If BoolOp Compare Call Assign Call Call If Compare Call Raise Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_composite_from_tensors",
    "source_code": "def _composite_from_tensors(stacked_tensors, preconverted_value, batch_size):\n    if _should_expand_composite(preconverted_value):\n        batch_type_spec = preconverted_value._type_spec._batch(batch_size)\n        return batch_type_spec._from_compatible_tensor_list(stacked_tensors)\n    return stacked_tensors",
    "docstring": "Converts a list of stacked tensors to a batch CompositeTensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\control_flow_ops.py",
    "ast_data": "FunctionDef name:_composite_from_tensors arg:stacked_tensors arg:preconverted_value arg:batch_size arguments arg arg arg If Call Assign Call Return return:yes Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "width",
    "source_code": "@property\ndef width(self) -> int | Tensor:\n    return self._width",
    "docstring": "Returns the width of the image.",
    "type": "method",
    "file_path": "kornia\\kornia\\sensors\\camera\\camera_model.py",
    "ast_data": "FunctionDef name:width arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_input_shape_and_dtype",
    "source_code": "def get_input_shape_and_dtype(layer):\n\n    def _is_graph_model(layer):\n        return hasattr(layer, '_is_graph_network') and layer._is_graph_network or layer.__class__.__name__ == 'Sequential'\n    while _is_graph_model(layer):\n        if not layer.layers:\n            raise ValueError('An empty Model cannot be used as a Layer.')\n        layer = layer.layers[0]\n    if getattr(layer, '_batch_input_shape', None):\n        return (layer._batch_input_shape, layer.dtype)\n    return (None, None)",
    "docstring": "Retrieves input shape and input dtype of layer if applicable. Args: layer: Layer (or model) instance. Returns: Tuple (input_shape, input_dtype). Both could be None if the layer does not have a defined input shape. Raises: ValueError: in case an empty Sequential or Functional model is passed.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils.py",
    "ast_data": "FunctionDef name:get_input_shape_and_dtype arg:layer arguments arg FunctionDef name:_is_graph_model arg:layer arguments arg Return return:yes BoolOp BoolOp Call Compare While Call If Raise Call Assign If Call Return return:yes Return return:no"
  },
  {
    "library": "pytorch",
    "name": "_DistributedPdb",
    "source_code": "class _DistributedPdb(pdb.Pdb):\n\n    def interaction(self, *args, **kwargs):\n        _stdin = sys.stdin\n        try:\n            sys.stdin = open('/dev/stdin')\n            pdb.Pdb.interaction(self, *args, **kwargs)\n        finally:\n            sys.stdin = _stdin",
    "docstring": "Supports using PDB from inside a multiprocessing child process. Usage: _DistributedPdb().set_trace()",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\__init__.py",
    "ast_data": "ClassDef name:_DistributedPdb FunctionDef name:interaction arg:self arguments arg arg arg Assign Try Assign Call Call Assign"
  },
  {
    "library": "pandas",
    "name": "_get_values",
    "source_code": "def _get_values(values: np.ndarray, skipna: bool, fill_value: Any=None, fill_value_typ: str | None=None, mask: npt.NDArray[np.bool_] | None=None) -> tuple[np.ndarray, npt.NDArray[np.bool_] | None]:\n    mask = _maybe_get_mask(values, skipna, mask)\n    dtype = values.dtype\n    datetimelike = False\n    if values.dtype.kind in 'mM':\n        values = np.asarray(values.view('i8'))\n        datetimelike = True\n    if skipna and mask is not None:\n        fill_value = _get_fill_value(dtype, fill_value=fill_value, fill_value_typ=fill_value_typ)\n        if fill_value is not None:\n            if mask.any():\n                if datetimelike or _na_ok_dtype(dtype):\n                    values = values.copy()\n                    np.putmask(values, mask, fill_value)\n                else:\n                    values = np.where(~mask, values, fill_value)\n    return (values, mask)",
    "docstring": "Utility to get the values view, mask, dtype, dtype_max, and fill_value. If both mask and fill_value/fill_value_typ are not None and skipna is True, the values array will be copied. For input arrays of boolean or integer dtypes, copies will only occur if a precomputed mask, a fill_value/fill_value_typ, and skipna=True are provided. Parameters ---------- values : ndarray input array to potentially compute mask for skipna : bool boolean for whether NaNs should be skipped fill_value : Any value to fill NaNs with fill_value_typ : str Set to '+inf' or '-inf' to handle dtype-specific infinities mask : Optional[np.ndarray[bool]] nan-mask if known Returns ------- values : ndarray Potential copy of input value array mask : Optional[ndarray[bool]] Mask for values, if deemed necessary to compute",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\nanops.py",
    "ast_data": "FunctionDef name:_get_values arg:values arg:skipna arg:fill_value arg:fill_value_typ arg:mask arguments arg arg arg arg arg Assign Call Assign Assign If Compare Assign Call Call Assign If BoolOp Compare Assign Call If Compare If Call If BoolOp Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "from_config",
    "source_code": "@classmethod\ndef from_config(cls, config):\n    return cls(**config)",
    "docstring": "Creates a layer from its config. This method is the reverse of , capable of instantiating the same layer from the config dictionary. It does not handle layer connectivity (handled by Network), nor weights (handled by ). Args: config: A Python dictionary, typically the output of get_config. Returns: A layer instance.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:from_config arg:cls arg:config arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "send_graph_tracebacks",
    "source_code": "def send_graph_tracebacks(destinations, run_key, origin_stack, graph, send_source=True):\n    _send_call_tracebacks(destinations, origin_stack, is_eager_execution=False, call_key=run_key, graph=graph, send_source=send_source)",
    "docstring": "Send the tracebacks of a graph execution call to debug server(s). Args: destinations: gRPC destination addresses, a or a of s, e.g., \"localhost:4242\". If a , gRPC requests containing the same proto payload will be sent to all the destinations. run_key: A string describing the feeds, fetches (and targets) names of the call. origin_stack: The traceback of the invocation. graph: A Python object (i.e., *not* a ), which contains op tracebacks. send_source: Whether the source files involved in the op tracebacks but outside the TensorFlow library are to be sent.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\source_remote.py",
    "ast_data": "FunctionDef name:send_graph_tracebacks arg:destinations arg:run_key arg:origin_stack arg:graph arg:send_source arguments arg arg arg arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_set_output_names",
    "source_code": "def _set_output_names(self):\n    uniquified = []\n    output_names = set()\n    prefix_count = {}\n    for layer in self._output_layers:\n        proposal = layer.name\n        while proposal in output_names:\n            existing_count = prefix_count.get(layer.name, 1)\n            proposal = '{}_{}'.format(layer.name, existing_count)\n            prefix_count[layer.name] = existing_count + 1\n        output_names.add(proposal)\n        uniquified.append(proposal)\n    self.output_names = uniquified",
    "docstring": "Assigns unique names to the Network's outputs. Output layers with multiple output tensors would otherwise lead to duplicate names in self.output_names.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\functional.py",
    "ast_data": "FunctionDef name:_set_output_names arg:self arguments arg Assign Assign Call Assign For Assign While Compare Assign Call Assign Call Assign Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "half",
    "source_code": "def half(self):\n    return self._to(torch.half)",
    "docstring": "Casts this storage to half type.",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:half arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "load",
    "source_code": "@deprecated(None, 'Prefer Variable.assign which has equivalent behavior in 2.X.')\ndef load(self, value, session=None):\n    if context.executing_eagerly():\n        self.assign(value)\n    else:\n        session = session or ops.get_default_session()\n        if session is None:\n            raise ValueError('Either session argument should be provided or default session should be established')\n        session.run(self.initializer, {self.initializer.inputs[1]: value})",
    "docstring": "Load new value into this variable. Writes new value to variable's memory. Doesn't add ops to the graph. This convenience method requires a session where the graph containing this variable has been launched. If no session is passed, the default session is used. See for more information on launching a graph and on sessions. Args: value: New variable value session: The session to use to evaluate this variable. If none, the default session is used. Raises: ValueError: Session is not passed and no default session",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:load arg:self arg:value arg:session arguments arg arg arg If Call Call Assign BoolOp Call If Compare Raise Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_get_default_locs",
    "source_code": "def _get_default_locs(self, vmin, vmax):\n    locator = self.finder(vmin, vmax, self.freq)\n    if self.isminor:\n        return np.compress(locator['min'], locator['val'])\n    return np.compress(locator['maj'], locator['val'])",
    "docstring": "Returns the default locations of ticks.",
    "type": "method",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\converter.py",
    "ast_data": "FunctionDef name:_get_default_locs arg:self arg:vmin arg:vmax arguments arg arg arg Assign Call If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "apply",
    "source_code": "def apply(self, model_args: Sequence[Any], model_kwargs: Mapping[str, Any], model: torch.nn.Module | Callable | torch_export.ExportedProgram | None=None) -> tuple[Sequence[Any], Mapping[str, Any]]:\n    assert not model_kwargs\n    return (tuple((arg for arg in model_args if not isinstance(arg, (int, float, bool, str)))), {})",
    "docstring": "Remove Constant from arguments. Args: model_args: The model args. model_kwargs: The model kwargs. model: The PyTorch model. Returns: A tuple of the model args and kwargs. Raises: ValueError: If is not empty.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\io_adapter.py",
    "ast_data": "FunctionDef name:apply arg:self arg:model_args arg:model_kwargs arg:model arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "kolmogni",
    "source_code": "def kolmogni(n, q, cdf=True):\n    it = np.nditer([n, q, cdf, None])\n    for _n, _q, _cdf, z in it:\n        if np.isnan(_n):\n            z[...] = _n\n            continue\n        if int(_n) != _n:\n            raise ValueError(f'n is not integral: {_n}')\n        _pcdf, _psf = (_q, 1 - _q) if _cdf else (1 - _q, _q)\n        z[...] = _kolmogni(int(_n), _pcdf, _psf)\n    result = it.operands[-1]\n    return result",
    "docstring": "Computes the PPF(or ISF) for the two-sided Kolmogorov-Smirnov distribution. Parameters ---------- n : integer, array_like the number of samples q : float, array_like Probabilities, float between 0 and 1 cdf : bool, optional whether to compute the PPF(default=true) or the ISF. Returns ------- ppf : ndarray PPF (or ISF if cdf is False) at the specified locations The return value has shape the result of numpy broadcasting n and x.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_ksstats.py",
    "ast_data": "FunctionDef name:kolmogni arg:n arg:q arg:cdf arguments arg arg arg Assign Call For If Call Assign If Compare Call Raise Call Assign Assign Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_inverse",
    "source_code": "def _inverse(self, y):\n    raise NotImplementedError",
    "docstring": "Abstract method to compute inverse transformation.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributions\\transforms.py",
    "ast_data": "FunctionDef name:_inverse arg:self arg:y arguments arg arg Raise"
  },
  {
    "library": "pytorch",
    "name": "check_tensor",
    "source_code": "def check_tensor(shards_metadata, tensor_dims) -> None:\n    tensor_rank = len(tensor_dims)\n    shards_rank = len(shards_metadata[0].shard_offsets)\n    if tensor_rank != shards_rank:\n        raise ValueError(f'Rank of tensor is {tensor_rank}, but shards rank is {shards_rank}')\n    total_shard_volume = 0\n    for shard in shards_metadata:\n        shard_volume = 1\n        for i, shard_length in enumerate(shard.shard_sizes):\n            shard_volume *= shard_length\n            if shard.shard_offsets[i] + shard.shard_sizes[i] > tensor_dims[i]:\n                raise ValueError(f'Shard offset {shard.shard_offsets[i]} and length {shard.shard_sizes[i]} exceeds tensor dim: {tensor_dims[i]} for shard {shard}')\n        total_shard_volume += shard_volume\n    tensor_volume = 1\n    for size in tensor_dims:\n        tensor_volume *= size\n    if total_shard_volume != tensor_volume:\n        raise ValueError(f'Total volume of shards: {total_shard_volume} does not match tensor volume: {tensor_volume}, in other words all the individual shards do not cover the entire tensor')",
    "docstring": "Checks if the shards_metadata is compatible with the provided tensor dims. Args: shards_metadata(List[ShardMetadata]): List of :class: objects representing each shard of the tensor. tensor_dims(Sequence of int): Dimensions of tensor to verify Raises: `` if not compatible.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharding_spec\\_internals.py",
    "ast_data": "FunctionDef name:check_tensor arg:shards_metadata arg:tensor_dims arguments arg arg Assign Call Assign Call If Compare Raise Call Assign For Assign For Call If Compare Raise Call Assign For If Compare Raise Call"
  },
  {
    "library": "numpy",
    "name": "cumprod",
    "source_code": "def cumprod(self, axis=None, dtype=None, out=None):\n    result = self.filled(1).cumprod(axis=axis, dtype=dtype, out=out)\n    if out is not None:\n        if isinstance(out, MaskedArray):\n            out.__setmask__(self._mask)\n        return out\n    result = result.view(type(self))\n    result.__setmask__(self._mask)\n    return result",
    "docstring": "Return the cumulative product of the array elements over the given axis. Masked values are set to 1 internally during the computation. However, their position is saved, and the result will be masked at the same locations. Refer to for full documentation. Notes ----- The mask is lost if is not a valid MaskedArray ! Arithmetic is modular when using integer types, and no error is raised on overflow. See Also -------- numpy.ndarray.cumprod : corresponding function for ndarrays numpy.cumprod : equivalent function",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:cumprod arg:self arg:axis arg:dtype arg:out arguments arg arg arg arg Assign Call Call If Compare If Call Call Return return:yes Assign Call Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "hflip",
    "source_code": "def hflip(input: Tensor) -> Tensor:\n    return input.flip(-1).contiguous()",
    "docstring": "Horizontally flip a tensor image or a batch of tensor images. .. image:: _static/img/hflip.png Input must be a tensor of shape (C, H, W) or a batch of tensors :math:. Args: input: input tensor. Returns: The horizontally flipped image tensor.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\transform\\flips.py",
    "ast_data": "FunctionDef name:hflip arg:input arguments arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_changed",
    "source_code": "def _changed(self):\n    self.callbacks.process('changed')",
    "docstring": "Call this whenever the norm is changed to notify all the callback listeners to the 'changed' signal.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:_changed arg:self arguments arg Call"
  },
  {
    "library": "cherrypy",
    "name": "HeaderElement",
    "source_code": "class HeaderElement(object):\n\n    def __init__(self, value, params=None):\n        self.value = value\n        if params is None:\n            params = {}\n        self.params = params\n\n    def __cmp__(self, other):\n        return builtins.cmp(self.value, other.value)\n\n    def __lt__(self, other):\n        return self.value < other.value\n\n    def __str__(self):\n        p = [';%s=%s' % (k, v) for k, v in self.params.items()]\n        return str('%s%s' % (self.value, ''.join(p)))\n\n    def __bytes__(self):\n        return ntob(self.__str__())\n\n    def __unicode__(self):\n        return ntou(self.__str__())\n\n    @staticmethod\n    def parse(elementstr):\n        initial_value, params = parse_header(elementstr)\n        return (initial_value, params)\n\n    @classmethod\n    def from_str(cls, elementstr):\n        ival, params = cls.parse(elementstr)\n        return cls(ival, params)",
    "docstring": "An element (with parameters) from an HTTP header's element list.",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\lib\\httputil.py",
    "ast_data": "ClassDef name:HeaderElement FunctionDef name:__init__ arg:self arg:value arg:params arguments arg arg arg Assign If Compare Assign Assign FunctionDef name:__cmp__ arg:self arg:other arguments arg arg Return return:yes Call FunctionDef name:__lt__ arg:self arg:other arguments arg arg Return return:yes Compare FunctionDef name:__str__ arg:self arguments arg Assign Call Return return:yes Call Call FunctionDef name:__bytes__ arg:self arguments arg Return return:yes Call Call FunctionDef name:__unicode__ arg:self arguments arg Return return:yes Call Call FunctionDef name:parse arg:elementstr arguments arg Assign Call Return return:yes FunctionDef name:from_str arg:cls arg:elementstr arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_get_param_names",
    "source_code": "@classmethod\ndef _get_param_names(cls):\n    init = getattr(cls.__init__, 'deprecated_original', cls.__init__)\n    if init is object.__init__:\n        return []\n    init_signature = inspect.signature(init)\n    parameters = [p for p in init_signature.parameters.values() if p.name != 'self' and p.kind != p.VAR_KEYWORD]\n    for p in parameters:\n        if p.kind == p.VAR_POSITIONAL:\n            raise RuntimeError(\"scikit-learn estimators should always specify their parameters in the signature of their __init__ (no varargs). %s with constructor %s doesn't  follow this convention.\" % (cls, init_signature))\n    return sorted([p.name for p in parameters])",
    "docstring": "Get parameter names for the estimator",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\base.py",
    "ast_data": "FunctionDef name:_get_param_names arg:cls arguments arg Assign Call If Compare Return return:no Assign Call Assign Call BoolOp Compare Compare For If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "adapt_timefield_value",
    "source_code": "def adapt_timefield_value(self, value):\n    if value is None:\n        return None\n    if timezone.is_aware(value):\n        raise ValueError('Django does not support timezone-aware times.')\n    return str(value)",
    "docstring": "Transform a time value to an object compatible with what is expected by the backend driver for time columns.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:adapt_timefield_value arg:self arg:value arguments arg arg If Compare Return return:no If Call Raise Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "clone_and_convert_to_meta",
    "source_code": "def clone_and_convert_to_meta(example_input: Any) -> Any:\n\n    def transform_fn(value: Any) -> Any:\n        if isinstance(value, torch.Tensor):\n            return value.clone().to(device='meta')\n        return value\n    return tree_map(transform_fn, example_input)",
    "docstring": "This function takes a list of example inputs and for each tensor, clones it and converts it to device=meta. For non-tensor values, it keeps the reference. It uses pytree to handle nested structures recursively.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\_dynamism.py",
    "ast_data": "FunctionDef name:clone_and_convert_to_meta arg:example_input arguments arg FunctionDef name:transform_fn arg:value arguments arg If Call Return return:yes Call Call Return return:yes Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_parse_latex_options_strip",
    "source_code": "def _parse_latex_options_strip(value: str | float, arg: str) -> str:\n    return str(value).replace(arg, '').replace('/*', '').replace('*/', '').strip()",
    "docstring": "Strip a css_value which may have latex wrapping arguments, css comment identifiers, and whitespaces, to a valid string for latex options parsing. For example: 'red /* --wrap */ ' --> 'red'",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\formats\\style_render.py",
    "ast_data": "FunctionDef name:_parse_latex_options_strip arg:value arg:arg arguments arg arg Return return:yes Call Call Call Call Call"
  },
  {
    "library": "django",
    "name": "merge_dicts",
    "source_code": "@staticmethod\ndef merge_dicts(dicts):\n    merged = {}\n    for d in reversed(dicts):\n        merged.update(d)\n    return merged",
    "docstring": "Merge dicts in reverse to preference the order of the original list. e.g., merge_dicts([a, b]) will preference the keys in 'a' over those in 'b'.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query_utils.py",
    "ast_data": "FunctionDef name:merge_dicts arg:dicts arguments arg Assign For Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "from_function_def",
    "source_code": "def from_function_def(function_def: function_pb2.FunctionDef, function_type: function_type_lib.FunctionType) -> AtomicFunction:\n    bound_context = context.context()\n    if bound_context.has_function(compat.as_bytes(function_def.signature.name)):\n        raise ValueError('Function already registered in context.')\n    bound_context.add_function_def(function_def)\n    return AtomicFunction(function_def.signature.name, bound_context, function_type)",
    "docstring": "Create a new AtomicFunction from FunctionDef + FunctionType.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\atomic_function.py",
    "ast_data": "FunctionDef name:from_function_def arg:function_def arg:function_type arguments arg arg Assign Call If Call Call Raise Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_neighbour_values",
    "source_code": "def get_neighbour_values(self, name, orig_val, radius=1, include_self=False):\n    assert radius >= 1\n\n    def update(cur_val, inc=True):\n        if name == 'num_stages':\n            if inc:\n                return cur_val + 1\n            else:\n                return cur_val - 1\n        elif inc:\n            return cur_val * 2\n        else:\n            return cur_val // 2\n    out = []\n    cur_val = orig_val\n    for _ in range(radius):\n        cur_val = update(cur_val, True)\n        if self.value_too_large(name, cur_val):\n            break\n        out.append(cur_val)\n    cur_val = orig_val\n    for _ in range(radius):\n        cur_val = update(cur_val, False)\n        if cur_val <= 0:\n            break\n        out.append(cur_val)\n    if include_self:\n        out.append(orig_val)\n    return out",
    "docstring": "Get neighbour values in 'radius' steps. The original value is not returned as it's own neighbour.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\coordinate_descent_tuner.py",
    "ast_data": "FunctionDef name:get_neighbour_values arg:self arg:name arg:orig_val arg:radius arg:include_self arguments arg arg arg arg arg Compare FunctionDef name:update arg:cur_val arg:inc arguments arg arg If Compare If Return return:yes Return return:yes If Return return:yes Return return:yes Assign Assign For Call Assign Call If Call Call Assign For Call Assign Call If Compare Call If Call Return return:yes"
  },
  {
    "library": "django",
    "name": "constant_time_compare",
    "source_code": "def constant_time_compare(val1, val2):\n    return secrets.compare_digest(force_bytes(val1), force_bytes(val2))",
    "docstring": "Return True if the two strings are equal, False otherwise.",
    "type": "function",
    "file_path": "django\\django\\utils\\crypto.py",
    "ast_data": "FunctionDef name:constant_time_compare arg:val1 arg:val2 arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_pid_namespace",
    "source_code": "def _pid_namespace(pid: Optional[int]=None) -> int:\n    pid = pid or os.getpid()\n    link = _pid_namespace_link(pid)\n    return int(link[link.find('[') + 1:-1])",
    "docstring": "Returns the process's namespace id",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_strobelight\\cli_function_profiler.py",
    "ast_data": "FunctionDef name:_pid_namespace arg:pid arguments arg Assign BoolOp Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "placeholder_value",
    "source_code": "@doc_controls.do_not_doc_inheritable\ndef placeholder_value(self, placeholder_context):\n    return super().placeholder_value(placeholder_context)",
    "docstring": "See tf.types.experimental.TraceType base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:placeholder_value arg:self arg:placeholder_context arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "set_intra_op_parallelism_threads",
    "source_code": "@tf_export('config.threading.set_intra_op_parallelism_threads')\ndef set_intra_op_parallelism_threads(num_threads):\n    context.context().intra_op_parallelism_threads = num_threads",
    "docstring": "Set number of threads used within an individual op for parallelism. Certain operations like matrix multiplication and reductions can utilize parallel threads for speed ups. A value of 0 means the system picks an appropriate number. Args: num_threads: Number of parallel threads",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\config.py",
    "ast_data": "FunctionDef name:set_intra_op_parallelism_threads arg:num_threads arguments arg Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_all_pg_configs",
    "source_code": "def _get_all_pg_configs() -> list[dict[str, Any]]:\n    config_info: list[dict[str, Any]] = [_get_pg_config(pg) for pg in _world.pg_map.keys()]\n    return config_info",
    "docstring": "Return the pg configuration of all the process groups.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:_get_all_pg_configs arguments Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "set_memory_growth",
    "source_code": "@tf_export('config.experimental.set_memory_growth')\ndef set_memory_growth(device, enable):\n    context.context().set_memory_growth(device, enable)",
    "docstring": "Set if memory growth should be enabled for a . If memory growth is enabled for a , the runtime initialization will not allocate all memory on the device. Memory growth cannot be configured on a with virtual devices configured. For example: >>> physical_devices = tf.config.list_physical_devices('GPU') >>> try: ... tf.config.experimental.set_memory_growth(physical_devices[0], True) ... except: ... # Invalid device or cannot modify virtual devices once initialized. ... pass Args: device: to configure enable: (Boolean) Whether to enable or disable memory growth Raises: ValueError: Invalid specified. RuntimeError: Runtime is already initialized.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\config.py",
    "ast_data": "FunctionDef name:set_memory_growth arg:device arg:enable arguments arg arg Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_tick_out",
    "source_code": "def set_tick_out(self, b):\n    self._tick_out = b",
    "docstring": "Set whether ticks are drawn inside or outside the axes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axis_artist.py",
    "ast_data": "FunctionDef name:set_tick_out arg:self arg:b arguments arg arg Assign"
  },
  {
    "library": "pytorch",
    "name": "create_low_rank_tensor",
    "source_code": "def create_low_rank_tensor(fill_random_values, rng):\n    if fill_random_values:\n        with torch.random.fork_rng(devices=[]):\n            torch.manual_seed(rng.randint(1000000000))\n            return torch.randn(square_side_length, state.matrix_approximation_rank, device='cpu', dtype=input_tensor.dtype).to(device)\n    else:\n        return torch.empty(square_side_length, state.matrix_approximation_rank, device=device, dtype=input_tensor.dtype)",
    "docstring": "Return a low-rank 2D tensor of square_side_length * matrix_approximation_rank.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\ddp_comm_hooks\\powerSGD_hook.py",
    "ast_data": "FunctionDef name:create_low_rank_tensor arg:fill_random_values arg:rng arguments arg arg If With Call Call Call Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "track_tf_optimizer",
    "source_code": "def track_tf_optimizer(tf_optimizer):\n    if context.executing_eagerly():\n        return\n    optimizers = _GRAPH_TF_OPTIMIZERS[None]\n    optimizers.add(tf_optimizer)",
    "docstring": "Tracks the given TF optimizer for initialization of its variables.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:track_tf_optimizer arg:tf_optimizer arguments arg If Call Return return:no Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "get_mutation_scale",
    "source_code": "def get_mutation_scale(self):\n    return self._mutation_scale",
    "docstring": "Return the mutation scale. Returns ------- scalar",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_mutation_scale arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_correct_predecessor",
    "source_code": "def _correct_predecessor(reachability_plot, predecessor_plot, ordering, s, e):\n    while s < e:\n        if reachability_plot[s] > reachability_plot[e]:\n            return (s, e)\n        p_e = predecessor_plot[e]\n        for i in range(s, e):\n            if p_e == ordering[i]:\n                return (s, e)\n        e -= 1\n    return (None, None)",
    "docstring": "Correct for predecessors. Applies Algorithm 2 of [1]_. Input parameters are ordered by the computer OPTICS ordering. .. [1] Schubert, Erich, Michael Gertz. \"Improving the Cluster Structure Extracted from OPTICS Plots.\" Proc. of the Conference \"Lernen, Wissen, Daten, Analysen\" (LWDA) (2018): 318-329.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\cluster\\_optics.py",
    "ast_data": "FunctionDef name:_correct_predecessor arg:reachability_plot arg:predecessor_plot arg:ordering arg:s arg:e arguments arg arg arg arg arg While Compare If Compare Return return:yes Assign For Call If Compare Return return:yes Return return:no"
  },
  {
    "library": "pandas",
    "name": "_concatenated_visible_rows",
    "source_code": "def _concatenated_visible_rows(obj, n, row_indices):\n    row_indices.extend([r + n for r in range(len(obj.index)) if r not in obj.hidden_rows])\n    n += len(obj.index)\n    for concatenated in obj.concatenated:\n        n = _concatenated_visible_rows(concatenated, n, row_indices)\n    return n",
    "docstring": "Extract all visible row indices recursively from concatenated stylers.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\style_render.py",
    "ast_data": "FunctionDef name:_concatenated_visible_rows arg:obj arg:n arg:row_indices arguments arg arg arg Call Call Call Compare Call For Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_block_diag",
    "source_code": "def _block_diag(self):\n    if self.ndim < 2:\n        raise ValueError('array must have atleast dim=2')\n    num_blocks = math.prod(self.shape[:-2])\n    n_col = self.shape[-1]\n    n_row = self.shape[-2]\n    res_arr = self.reshape((num_blocks, n_row, n_col))\n    new_coords = (res_arr.coords[1] + res_arr.coords[0] * res_arr.shape[1], res_arr.coords[2] + res_arr.coords[0] * res_arr.shape[2])\n    new_shape = (num_blocks * n_row, num_blocks * n_col)\n    return coo_array((self.data, tuple(new_coords)), shape=new_shape)",
    "docstring": "Converts an N-D COO array into a 2-D COO array in block diagonal form. Parameters: self (coo_array): An N-Dimensional COO sparse array. Returns: coo_array: A 2-Dimensional COO sparse array in block diagonal form.",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\_coo.py",
    "ast_data": "FunctionDef name:_block_diag arg:self arguments arg If Compare Raise Call Assign Call Assign Assign Assign Call Assign Assign Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_PositiveDefinite",
    "source_code": "class _PositiveDefinite(_Symmetric):\n\n    def check(self, value):\n        sym_check = super().check(value)\n        if not sym_check.all():\n            return sym_check\n        return torch.linalg.cholesky_ex(value).info.eq(0)",
    "docstring": "Constrain to positive-definite matrices.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributions\\constraints.py",
    "ast_data": "ClassDef name:_PositiveDefinite FunctionDef name:check arg:self arg:value arguments arg arg Assign Call Call If Call Return return:yes Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "is_platform_power",
    "source_code": "def is_platform_power() -> bool:\n    return platform.machine() in ('ppc64', 'ppc64le')",
    "docstring": "Checking if the running platform use Power architecture. Returns ------- bool True if the running platform uses ARM architecture.",
    "type": "function",
    "file_path": "pandas\\pandas\\compat\\__init__.py",
    "ast_data": "FunctionDef name:is_platform_power arguments Return return:yes Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "capacity",
    "source_code": "@property\ndef capacity(self):\n    return self._capacity",
    "docstring": "The maximum number of elements of this staging area.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:capacity arg:self arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "assign_fields_by_name",
    "source_code": "@array_function_dispatch(_assign_fields_by_name_dispatcher)\ndef assign_fields_by_name(dst, src, zero_unassigned=True):\n    if dst.dtype.names is None:\n        dst[...] = src\n        return\n    for name in dst.dtype.names:\n        if name not in src.dtype.names:\n            if zero_unassigned:\n                dst[name] = 0\n        else:\n            assign_fields_by_name(dst[name], src[name], zero_unassigned)",
    "docstring": "Assigns values from one structured array to another by field name. Normally in numpy >= 1.14, assignment of one structured array to another copies fields \"by position\", meaning that the first field from the src is copied to the first field of the dst, and so on, regardless of field name. This function instead copies \"by field name\", such that fields in the dst are assigned from the identically named field in the src. This applies recursively for nested structures. This is how structure assignment worked in numpy >= 1.6 to <= 1.13. Parameters ---------- dst : ndarray src : ndarray The source and destination arrays during assignment. zero_unassigned : bool, optional If True, fields in the dst for which there was no matching field in the src are filled with the value 0 (zero). This was the behavior of numpy <= 1.13. If False, those fields are not modified.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\recfunctions.py",
    "ast_data": "FunctionDef name:assign_fields_by_name arg:dst arg:src arg:zero_unassigned arguments arg arg arg If Compare Assign Return return:no For If Compare If Assign Call Call"
  },
  {
    "library": "scipy",
    "name": "tocsc",
    "source_code": "def tocsc(self, copy=False):\n    return self.tocsr(copy=copy).tocsc(copy=False)",
    "docstring": "Convert this array/matrix to Compressed Sparse Column format. With copy=False, the data/indices may be shared between this array/matrix and the resultant csc_array/matrix.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_base.py",
    "ast_data": "FunctionDef name:tocsc arg:self arg:copy arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_anncoords",
    "source_code": "def get_anncoords(self):\n    return self._textcoords",
    "docstring": "Return the coordinate system to use for . See also *xycoords* in .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:get_anncoords arg:self arguments arg Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "_get_u64",
    "source_code": "def _get_u64(data: memoryview) -> tuple[int, memoryview]:\n    if len(data) < 8:\n        raise ValueError('Invalid data')\n    return (int.from_bytes(data[:8], byteorder='big'), data[8:])",
    "docstring": "Uint64",
    "type": "function",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\serialization\\ssh.py",
    "ast_data": "FunctionDef name:_get_u64 arg:data arguments arg If Compare Call Raise Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_draw_ps",
    "source_code": "def _draw_ps(self, ps, gc, rgbFace, *, fill=True, stroke=True):\n    write = self._pswriter.write\n    mightstroke = gc.get_linewidth() > 0 and (not self._is_transparent(gc.get_rgb()))\n    if not mightstroke:\n        stroke = False\n    if self._is_transparent(rgbFace):\n        fill = False\n    hatch = gc.get_hatch()\n    if mightstroke:\n        self.set_linewidth(gc.get_linewidth())\n        self.set_linejoin(gc.get_joinstyle())\n        self.set_linecap(gc.get_capstyle())\n        self.set_linedash(*gc.get_dashes())\n    if mightstroke or hatch:\n        self.set_color(*gc.get_rgb()[:3])\n    write('gsave\\n')\n    write(self._get_clip_cmd(gc))\n    write(ps.strip())\n    write('\\n')\n    if fill:\n        if stroke or hatch:\n            write('gsave\\n')\n        self.set_color(*rgbFace[:3], store=False)\n        write('fill\\n')\n        if stroke or hatch:\n            write('grestore\\n')\n    if hatch:\n        hatch_name = self.create_hatch(hatch, gc.get_hatch_linewidth())\n        write('gsave\\n')\n        write(_nums_to_str(*gc.get_hatch_color()[:3]))\n        write(f' {hatch_name} setpattern fill grestore\\n')\n    if stroke:\n        write('stroke\\n')\n    write('grestore\\n')",
    "docstring": "Emit the PostScript snippet *ps* with all the attributes from *gc* applied. *ps* must consist of PostScript commands to construct a path. The *fill* and/or *stroke* kwargs can be set to False if the *ps* string already includes filling and/or stroking, in which case is just supplying properties and clipping.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_ps.py",
    "ast_data": "FunctionDef name:_draw_ps arg:self arg:ps arg:gc arg:rgbFace arguments arg arg arg arg arg arg Assign Assign BoolOp Compare Call Call Call If Assign If Call Assign Assign Call If Call Call Call Call Call Call Call Call If BoolOp Call Call Call Call Call Call Call Call If If BoolOp Call Call Call If BoolOp Call If Assign Call Call Call Call Call Call Call If Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_aliases",
    "source_code": "def get_aliases(self):\n    names = [name for name in dir(self.o) if name.startswith(('set_', 'get_')) and callable(getattr(self.o, name))]\n    aliases = {}\n    for name in names:\n        func = getattr(self.o, name)\n        if not self.is_alias(func):\n            continue\n        propname = re.search(f'`({name[:4]}.*)`', inspect.getdoc(func)).group(1)\n        aliases.setdefault(propname[4:], set()).add(name[4:])\n    return aliases",
    "docstring": "Get a dict mapping property fullnames to sets of aliases for each alias in the :class:. e.g., for lines:: {'markerfacecolor': {'mfc'}, 'linewidth' : {'lw'}, }",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:get_aliases arg:self arguments arg Assign Call BoolOp Call Call Call Assign For Assign Call If Call Assign Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "load_output",
    "source_code": "def load_output(self) -> Union[WorkerTimerArgs, WorkerOutput, WorkerFailure]:\n    result = self.load()\n    assert isinstance(result, (WorkerTimerArgs, WorkerOutput, WorkerFailure))\n    return result",
    "docstring": "Convenience method for type safe loading.",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\instruction_counts\\worker\\main.py",
    "ast_data": "FunctionDef name:load_output arg:self arguments arg Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "timestamp_isoformat",
    "source_code": "def timestamp_isoformat(self):\n    return datetime.fromtimestamp(self.timestamp).isoformat(sep='_')",
    "docstring": "Return timestamp in ISO format (YYYY-MM-DD_HH:MM:SS).",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\multiprocessing\\errors\\__init__.py",
    "ast_data": "FunctionDef name:timestamp_isoformat arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "_check_raw_id_fields",
    "source_code": "def _check_raw_id_fields(self, obj):\n    if not isinstance(obj.raw_id_fields, (list, tuple)):\n        return must_be('a list or tuple', option='raw_id_fields', obj=obj, id='admin.E001')\n    else:\n        return list(chain.from_iterable((self._check_raw_id_fields_item(obj, field_name, 'raw_id_fields[%d]' % index) for index, field_name in enumerate(obj.raw_id_fields))))",
    "docstring": "Check that only contains field names that are listed on the model.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\checks.py",
    "ast_data": "FunctionDef name:_check_raw_id_fields arg:self arg:obj arguments arg arg If Call Return return:yes Call Return return:yes Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_data_3d",
    "source_code": "def set_data_3d(self, *args):\n    if len(args) == 1:\n        args = args[0]\n    for name, xyz in zip('xyz', args):\n        if not np.iterable(xyz):\n            raise RuntimeError(f'{name} must be a sequence')\n    self._verts3d = args\n    self.stale = True",
    "docstring": "Set the x, y and z data Parameters ---------- x : array-like The x-data to be plotted. y : array-like The y-data to be plotted. z : array-like The z-data to be plotted. Notes ----- Accepts x, y, z arguments or a single array-like (x, y, z)",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py",
    "ast_data": "FunctionDef name:set_data_3d arg:self arguments arg arg If Compare Call Assign For Call If Call Raise Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_get_mapped_registered_restore_fn",
    "source_code": "def _get_mapped_registered_restore_fn(fn: Callable[..., tensor_lib.Tensor], trackables: Sequence[base.Trackable], call_with_mapped_captures: MappedCapturesCallable) -> Callable[..., tensor_lib.Tensor]:\n\n    def restore_fn(merged_prefix: tensor_lib.Tensor) -> tensor_lib.Tensor:\n        return fn(trackables=trackables, merged_prefix=merged_prefix)\n    if call_with_mapped_captures is None:\n        return restore_fn\n    else:\n        tf_fn = def_function.function(restore_fn, autograph=False)\n        concrete = tf_fn.get_concrete_function(merged_prefix=tensor_spec.TensorSpec(shape=(), dtype=dtypes.string))\n\n        def restore_fn_with_replaced_captures(merged_prefix: tensor_lib.Tensor) -> tensor_lib.Tensor:\n            return call_with_mapped_captures(concrete, [merged_prefix])\n        return restore_fn_with_replaced_captures",
    "docstring": "Converts the function to a python or tf.function with a single file arg.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\functional_saver.py",
    "ast_data": "FunctionDef name:_get_mapped_registered_restore_fn arg:fn arg:trackables arg:call_with_mapped_captures arguments arg arg arg FunctionDef name:restore_fn arg:merged_prefix arguments arg Return return:yes Call If Compare Return return:yes Assign Call Assign Call Call FunctionDef name:restore_fn_with_replaced_captures arg:merged_prefix arguments arg Return return:yes Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_inverted",
    "source_code": "def set_inverted(self, inverted):\n    a, b = self.get_view_interval()\n    self._set_lim(*sorted((a, b), reverse=bool(inverted)), auto=None)",
    "docstring": "Set whether this Axis is oriented in the \"inverse\" direction. The \"normal\" direction is increasing to the right for the x-axis and to the top for the y-axis; the \"inverse\" direction is increasing to the left for the x-axis and to the bottom for the y-axis.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:set_inverted arg:self arg:inverted arguments arg arg Assign Call Call Call Call"
  },
  {
    "library": "numpy",
    "name": "hermegauss",
    "source_code": "def hermegauss(deg):\n    ideg = pu._as_int(deg, 'deg')\n    if ideg <= 0:\n        raise ValueError('deg must be a positive integer')\n    c = np.array([0] * deg + [1])\n    m = hermecompanion(c)\n    x = la.eigvalsh(m)\n    dy = _normed_hermite_e_n(x, ideg)\n    df = _normed_hermite_e_n(x, ideg - 1) * np.sqrt(ideg)\n    x -= dy / df\n    fm = _normed_hermite_e_n(x, ideg - 1)\n    fm /= np.abs(fm).max()\n    w = 1 / (fm * fm)\n    w = (w + w[::-1]) / 2\n    x = (x - x[::-1]) / 2\n    w *= np.sqrt(2 * np.pi) / w.sum()\n    return (x, w)",
    "docstring": "Gauss-HermiteE quadrature. Computes the sample points and weights for Gauss-HermiteE quadrature. These sample points and weights will correctly integrate polynomials of degree :math: or less over the interval :math: with the weight function :math:. Parameters ---------- deg : int Number of sample points and weights. It must be >= 1. Returns ------- x : ndarray 1-D ndarray containing the sample points. y : ndarray 1-D ndarray containing the weights. Notes ----- The results have only been tested up to degree 100, higher degrees may be problematic. The weights are determined by using the fact that .. math:: w_k = c / (He'_n(x_k) * He_{n-1}(x_k)) where :math: is a constant independent of :math: and :math: is the k'th root of :math:, and then scaling the results to get the right value when integrating 1.",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\hermite_e.py",
    "ast_data": "FunctionDef name:hermegauss arg:deg arguments arg Assign Call If Compare Raise Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Call Call Assign Assign Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "assign",
    "source_code": "def assign(self, value, use_locking=False, name=None, read_value=True):\n    raise NotImplementedError",
    "docstring": "Assigns a new value to the variable. This is essentially a shortcut for . Args: value: A . The new value for this variable. use_locking: If , use locking during the assignment. name: The name of the operation to be created read_value: if True, will return something which evaluates to the new value of the variable; if False will return the assign op. Returns: The updated variable. If is false, instead returns None in Eager mode and the assign op in graph mode.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:assign arg:self arg:value arg:use_locking arg:name arg:read_value arguments arg arg arg arg arg Raise"
  },
  {
    "library": "scikit-learn",
    "name": "_accumulate_prediction",
    "source_code": "def _accumulate_prediction(predict, X, out, lock):\n    prediction = predict(X, check_input=False)\n    with lock:\n        if len(out) == 1:\n            out[0] += prediction\n        else:\n            for i in range(len(out)):\n                out[i] += prediction[i]",
    "docstring": "This is a utility function for joblib's Parallel. It can't go locally in ForestClassifier or ForestRegressor, because joblib complains that it cannot pickle it when placed there.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_forest.py",
    "ast_data": "FunctionDef name:_accumulate_prediction arg:predict arg:X arg:out arg:lock arguments arg arg arg arg Assign Call With If Compare Call For Call Call"
  },
  {
    "library": "tensorflow",
    "name": "from_row_starts",
    "source_code": "@classmethod\ndef from_row_starts(cls, row_starts, nvals, validate=True, dtype=None, dtype_hint=None):\n    if not isinstance(validate, bool):\n        raise TypeError('validate must have type bool')\n    with ops.name_scope(None, 'RowPartitionFromRowStarts', [row_starts]):\n        row_starts = cls._convert_row_partition(row_starts, 'row_starts', dtype_hint=dtype_hint, dtype=dtype)\n        row_starts.shape.assert_has_rank(1)\n        nvals = math_ops.cast(nvals, row_starts.dtype)\n        if validate:\n            msg = 'Arguments to from_row_starts do not form a valid RaggedTensor'\n            checks = [check_ops.assert_rank(row_starts, 1, message=msg), _assert_zero(row_starts[:1], message=msg), _assert_monotonic_increasing(row_starts, message=msg), check_ops.assert_less_equal(row_starts[-1:], nvals, message=msg)]\n            row_starts = control_flow_ops.with_dependencies(checks, row_starts)\n        row_splits = array_ops.concat([row_starts, [nvals]], axis=0)\n        return cls(row_splits=row_splits, nvals=nvals, internal=_row_partition_factory_key)",
    "docstring": "Creates a with rows partitioned by . Equivalent to: . Args: row_starts: A 1-D integer tensor with shape . Must be nonnegative and sorted in ascending order. If , then must be zero. nvals: A scalar tensor indicating the number of values. validate: If true, then use assertions to check that the arguments form a valid . dtype: Optional dtype for the RowPartition. If missing, the type is inferred from the type of , dtype_hint, or tf.int64. dtype_hint: Optional dtype for the RowPartition, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so dtype_hint can be used as a soft preference. If the conversion to is not possible, this argument has no effect. Returns: A .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py",
    "ast_data": "FunctionDef name:from_row_starts arg:cls arg:row_starts arg:nvals arg:validate arg:dtype arg:dtype_hint arguments arg arg arg arg arg arg If Call Raise Call With Call Assign Call Call Assign Call If Assign Assign Call Call Call Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "step_size_to_bound",
    "source_code": "def step_size_to_bound(x, s, lb, ub):\n    non_zero = np.nonzero(s)\n    s_non_zero = s[non_zero]\n    steps = np.empty_like(x)\n    steps.fill(np.inf)\n    with np.errstate(over='ignore'):\n        steps[non_zero] = np.maximum((lb - x)[non_zero] / s_non_zero, (ub - x)[non_zero] / s_non_zero)\n    min_step = np.min(steps)\n    return (min_step, np.equal(steps, min_step) * np.sign(s).astype(int))",
    "docstring": "Compute a min_step size required to reach a bound. The function computes a positive scalar t, such that x + s * t is on the bound. Returns ------- step : float Computed step. Non-negative value. hits : ndarray of int with shape of x Each element indicates whether a corresponding variable reaches the bound: * 0 - the bound was not hit. * -1 - the lower bound was hit. * 1 - the upper bound was hit.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_lsq\\common.py",
    "ast_data": "FunctionDef name:step_size_to_bound arg:x arg:s arg:lb arg:ub arguments arg arg arg arg Assign Call Assign Assign Call Call With Call Assign Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, path, *, zs=(), zdir='z', axlim_clip=False, **kwargs):\n    Patch.__init__(self, **kwargs)\n    self.set_3d_properties(path, zs, zdir, axlim_clip)",
    "docstring": "Parameters ---------- path : zs : float The location along the *zdir* axis in 3D space to position the path patch. zdir : {'x', 'y', 'z', 3-tuple} Plane to plot path patch orthogonal to. Default: 'z'. See for a description of the values. axlim_clip : bool, default: False Whether to hide path patches with a point outside the axes view limits. .. versionadded:: 3.10",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:path arguments arg arg arg arg arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_create_device_transfers",
    "source_code": "def _create_device_transfers(self, tensors):\n    if not isinstance(tensors, (tuple, list)):\n        tensors = [tensors]\n    curr_device_scope = control_flow_ops.no_op().device\n    if curr_device_scope != self._coloc_op.device:\n        tensors = [array_ops.identity(t) for t in tensors]\n    return tensors",
    "docstring": "Encode inter-device transfers if the current device is not the same as the Staging Area's device.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:_create_device_transfers arg:self arg:tensors arguments arg arg If Call Assign Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "debug_graph_def",
    "source_code": "@property\ndef debug_graph_def(self):\n    return self._debug_graph_def",
    "docstring": "The debugger-decorated GraphDef.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_graphs.py",
    "ast_data": "FunctionDef name:debug_graph_def arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "dump",
    "source_code": "def dump(dot_file_name: str):\n    return torch._C._lazy._dump_ir_cache(dot_file_name)",
    "docstring": "Dump TrieCache in the dot format",
    "type": "function",
    "file_path": "pytorch\\torch\\_lazy\\ir_cache.py",
    "ast_data": "FunctionDef name:dump arg:dot_file_name arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "file_path",
    "source_code": "@property\ndef file_path(self):\n    return self._file_path",
    "docstring": "Path to the file which stores the value of the dumped tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:file_path arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "add_event",
    "source_code": "def add_event(self, event):\n    if not self._closed:\n        event_pb = event.SerializeToString()\n        self._session.run(self._add_event_op, feed_dict={self._event_placeholder: event_pb})",
    "docstring": "Adds an event to the event file. Args: event: An protocol buffer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\event_file_writer_v2.py",
    "ast_data": "FunctionDef name:add_event arg:self arg:event arguments arg arg If Assign Call Call"
  },
  {
    "library": "scipy",
    "name": "_nc_hypergeom_mean_inverse",
    "source_code": "def _nc_hypergeom_mean_inverse(x, M, n, N):\n    nc = _solve(lambda nc: nchypergeom_fisher.mean(M, n, N, nc) - x)\n    return nc",
    "docstring": "For the given noncentral hypergeometric parameters x, M, n,and N (table[0,0], total, row 0 sum and column 0 sum, resp., of a 2x2 contingency table), find the noncentrality parameter of Fisher's noncentral hypergeometric distribution whose mean is x.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_odds_ratio.py",
    "ast_data": "FunctionDef name:_nc_hypergeom_mean_inverse arg:x arg:M arg:n arg:N arguments arg arg arg arg Assign Call arguments arg Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_reshard_callback",
    "source_code": "@abc.abstractmethod\ndef get_reshard_callback(self, name: str) -> Optional[ReshardCallback]:\n    pass",
    "docstring": "Returns the reshard callback for the trackable with .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint_adapter.py",
    "ast_data": "FunctionDef name:get_reshard_callback arg:self arg:name arguments arg arg"
  },
  {
    "library": "pytorch",
    "name": "BisectionResult",
    "source_code": "@dataclasses.dataclass\nclass BisectionResult:\n    backend: str\n    subsystem: Optional[str] = None\n    bisect_number: Optional[int] = None\n    debug_info: Optional[str] = None",
    "docstring": "backend: torch.compile backend responsible for failure subsystem: optional, registered component identified for failure bisect_number: optional, number of times the subsystem needed to be applied to trigger failure debug_info: associated info of the triggering bisect application of subsystem",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\compiler_bisector.py",
    "ast_data": "ClassDef name:BisectionResult"
  },
  {
    "library": "matplotlib",
    "name": "get_facecolor",
    "source_code": "def get_facecolor(self):\n    return self._facecolor",
    "docstring": "Return the face color.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_facecolor arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "register_extern_hook",
    "source_code": "def register_extern_hook(self, hook: ActionHook) -> RemovableHandle:\n    handle = RemovableHandle(self._extern_hooks)\n    self._extern_hooks[handle.id] = hook\n    return handle",
    "docstring": "Registers an extern hook on the exporter. The hook will be called each time a module matches against an :meth: pattern. It should have the following signature:: hook(exporter: PackageExporter, module_name: str) -> None Hooks will be called in order of registration. Returns: :class:: A handle that can be used to remove the added hook by calling ``.",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\package_exporter.py",
    "ast_data": "FunctionDef name:register_extern_hook arg:self arg:hook arguments arg arg Assign Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "NDFrameDescriberAbstract",
    "source_code": "class NDFrameDescriberAbstract(ABC):\n\n    def __init__(self, obj: DataFrame | Series) -> None:\n        self.obj = obj\n\n    @abstractmethod\n    def describe(self, percentiles: Sequence[float] | np.ndarray) -> DataFrame | Series:\n        pass",
    "docstring": "Abstract class for describing dataframe or series. Parameters ---------- obj : Series or DataFrame Object to be described.",
    "type": "class",
    "file_path": "pandas\\pandas\\core\\methods\\describe.py",
    "ast_data": "ClassDef name:NDFrameDescriberAbstract FunctionDef name:__init__ arg:self arg:obj arguments arg arg Assign FunctionDef name:describe arg:self arg:percentiles arguments arg arg"
  },
  {
    "library": "scipy",
    "name": "nllf",
    "source_code": "def nllf(self, params=None, data=None):\n    params = params if params is not None else self.params\n    data = data if data is not None else self._data\n    return self._dist.nnlf(theta=params, x=data)",
    "docstring": "Negative log-likelihood function Evaluates the negative of the log-likelihood function of the provided data at the provided parameters. Parameters ---------- params : tuple, optional The shape parameters, location, and (if applicable) scale of the distribution as a single tuple. Default is the maximum likelihood estimates (``). data : array_like, optional The data for which the log-likelihood function is to be evaluated. Default is the data to which the distribution was fit. Returns ------- nllf : float The negative of the log-likelihood function.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_fit.py",
    "ast_data": "FunctionDef name:nllf arg:self arg:params arg:data arguments arg arg arg Assign Compare Assign Compare Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_proxy_mode",
    "source_code": "def get_proxy_mode() -> Optional[ProxyTorchDispatchMode]:\n    pre_dispatch_mode = torch._ops._get_dispatch_mode_pre_dispatch(torch._C._TorchDispatchModeKey.PROXY)\n    mode = torch._C._get_dispatch_mode(torch._C._TorchDispatchModeKey.PROXY)\n    assert pre_dispatch_mode is None or mode is None, f'pre_dispatch_mode={pre_dispatch_mode}, mode={mode}'\n    return pre_dispatch_mode or mode",
    "docstring": "Current the currently active proxy tracing mode, or None if we are not currently tracing. This includes pre-dispatch proxy tracing.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\proxy_tensor.py",
    "ast_data": "FunctionDef name:get_proxy_mode arguments Assign Call Assign Call BoolOp Compare Compare Return return:yes BoolOp"
  },
  {
    "library": "pytorch",
    "name": "_group_tensors_by_device_dtype_and_is_multidim",
    "source_code": "def _group_tensors_by_device_dtype_and_is_multidim(tensorlists: TensorListList) -> dict[tuple[Optional[torch.device], Optional[torch.dtype], bool], list[list[Optional[Tensor]]]]:\n    grouped_tensors = Optimizer._group_tensors_by_device_and_dtype(tensorlists)\n    ultra_grouped_tensors: dict[tuple[Optional[torch.device], Optional[torch.dtype], bool], list[list[Optional[Tensor]]]] = {}\n    for (device, dtype), (tensorlists, _) in grouped_tensors.items():\n        matrix_key = (device, dtype, True)\n        vector_key = (device, dtype, False)\n        for j, tensor in enumerate(tensorlists[1]):\n            assert tensor is not None, 'grad should not be None'\n            if tensor.dim() > 1:\n                if matrix_key not in ultra_grouped_tensors:\n                    ultra_grouped_tensors[matrix_key] = [[] for _ in tensorlists]\n                for i in range(len(tensorlists)):\n                    ultra_grouped_tensors[matrix_key][i].append(tensorlists[i][j])\n            else:\n                if vector_key not in ultra_grouped_tensors:\n                    ultra_grouped_tensors[vector_key] = [[] for _ in tensorlists]\n                for i in range(len(tensorlists)):\n                    ultra_grouped_tensors[vector_key][i].append(tensorlists[i][j])\n    return ultra_grouped_tensors",
    "docstring": "Groups tensors by device, dtype, AND multidimensionality -- whether the tensor has multiple dims or just one dim (is a vector). This allows the foreach impl of Adafactor to assume that every group of params will either be factored or not.",
    "type": "function",
    "file_path": "pytorch\\torch\\optim\\_adafactor.py",
    "ast_data": "FunctionDef name:_group_tensors_by_device_dtype_and_is_multidim arg:tensorlists arguments arg Assign Call For Call Assign Assign For Call Compare If Compare Call If Compare Assign For Call Call Call If Compare Assign For Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "timezone_today",
    "source_code": "def timezone_today():\n    if settings.USE_TZ:\n        return timezone.localdate()\n    else:\n        return datetime.date.today()",
    "docstring": "Return the current date in the current time zone.",
    "type": "function",
    "file_path": "django\\django\\views\\generic\\dates.py",
    "ast_data": "FunctionDef name:timezone_today arguments If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "BadNominalValue",
    "source_code": "class BadNominalValue(ArffException):\n\n    def __init__(self, value):\n        super().__init__()\n        self.message = 'Data value %s not found in nominal declaration, ' % value + 'at line %d.'",
    "docstring": "Error raised when a value in used in some data instance but is not declared into it respective attribute declaration.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\externals\\_arff.py",
    "ast_data": "ClassDef name:BadNominalValue FunctionDef name:__init__ arg:self arg:value arguments arg arg Call Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "_get_grid_bbox",
    "source_code": "def _get_grid_bbox(self, renderer):\n    boxes = [cell.get_window_extent(renderer) for (row, col), cell in self._cells.items() if row >= 0 and col >= 0]\n    bbox = Bbox.union(boxes)\n    return bbox.transformed(self.get_transform().inverted())",
    "docstring": "Get a bbox, in axes coordinates for the cells. Only include those in the range (0, 0) to (maxRow, maxCol).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\table.py",
    "ast_data": "FunctionDef name:_get_grid_bbox arg:self arg:renderer arguments arg arg Assign Call Call BoolOp Compare Compare Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "fp16_compress_hook",
    "source_code": "def fp16_compress_hook(state: LowPrecisionState, grad: torch.Tensor, output: Optional[torch.Tensor]=None):\n    fp16_hook = functools.partial(_low_precision_hook, torch.float16)\n    return fp16_hook(state, grad, output)",
    "docstring": "Implement FSDP communication hook for a simple gradient compression approach. Casts ``.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\_comm_hooks\\default_hooks.py",
    "ast_data": "FunctionDef name:fp16_compress_hook arg:state arg:grad arg:output arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_isolated_graphmodule",
    "source_code": "def get_isolated_graphmodule(func: Callable, args: tuple[object, ...], kwargs: dict[str, object], tracing_mode: str='real', decomposition_table: Optional[Mapping[OpOverload, Callable]]=None) -> GraphModule:\n    wrapped, all_args = wrapper_and_args_for_make_fx(func, args, kwargs)\n    with disable_proxy_modes_tracing():\n        gm = make_fx(wrapped, decomposition_table=decomposition_table, tracing_mode=tracing_mode)(all_args)\n    return gm",
    "docstring": "A helper function used to get the GraphModule for the given func. It's expected to be used in the ProxyTensor tracing context. It detaches the args and kwargs from the current tracer so that the trace of the current graph module can be created without any side-effects.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\proxy_tensor.py",
    "ast_data": "FunctionDef name:get_isolated_graphmodule arg:func arg:args arg:kwargs arg:tracing_mode arg:decomposition_table arguments arg arg arg arg arg Assign Call With Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_session",
    "source_code": "def _get_session(op_input_list=()):\n    global _SESSION\n    default_session = ops.get_default_session()\n    if default_session is not None:\n        session = default_session\n    else:\n        if ops.inside_function():\n            raise RuntimeError('Cannot get session inside Tensorflow graph function.')\n        if getattr(_SESSION, 'session', None) is None or _SESSION.session.graph is not _current_graph(op_input_list):\n            if distribute_lib.has_strategy():\n                configure_and_create_distributed_session(distribute_lib.get_strategy())\n            else:\n                _SESSION.session = session_module.Session(config=get_default_session_config())\n        session = _SESSION.session\n    return session",
    "docstring": "Returns the session object for the current thread.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:_get_session arg:op_input_list arguments arg Assign Call If Compare Assign If Call Raise Call If BoolOp Compare Call Compare Call If Call Call Call Assign Call Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "close_if_health_check_failed",
    "source_code": "def close_if_health_check_failed(self):\n    if self.connection is None or not self.health_check_enabled or self.health_check_done:\n        return\n    if not self.is_usable():\n        self.close()\n    self.health_check_done = True",
    "docstring": "Close existing connection if it fails a health check.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\base.py",
    "ast_data": "FunctionDef name:close_if_health_check_failed arg:self arguments arg If BoolOp Compare Return return:no If Call Call Assign"
  },
  {
    "library": "pandas",
    "name": "set_attr",
    "source_code": "def set_attr(self) -> None:\n    setattr(self.attrs, self.kind_attr, self.values)\n    setattr(self.attrs, self.meta_attr, self.meta)\n    assert self.dtype is not None\n    setattr(self.attrs, self.dtype_attr, self.dtype)",
    "docstring": "set the data for this column",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:set_attr arg:self arguments arg Call Call Compare Call"
  },
  {
    "library": "pandas",
    "name": "_getitem_slice",
    "source_code": "def _getitem_slice(self, slobj: slice) -> Self:\n    res = self._range[slobj]\n    return type(self)._simple_new(res, name=self._name)",
    "docstring": "Fastpath for __getitem__ when we know we have a slice.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\range.py",
    "ast_data": "FunctionDef name:_getitem_slice arg:self arg:slobj arguments arg arg Assign Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "apply_transform_box",
    "source_code": "def apply_transform_box(self, input: Boxes, params: Dict[str, Tensor], flags: Dict[str, Any], transform: Optional[Tensor]=None) -> Boxes:\n    raise NotImplementedError",
    "docstring": "Process boxes corresponding to the inputs that are transformed.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\base.py",
    "ast_data": "FunctionDef name:apply_transform_box arg:self arg:input arg:params arg:flags arg:transform arguments arg arg arg arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "input",
    "source_code": "@property\ndef input(self):\n    if not self._inbound_nodes:\n        raise AttributeError('Layer ' + self.name + ' is not connected, no input to return.')\n    return self._get_node_attribute_at_index(0, 'input_tensors', 'input')",
    "docstring": "Retrieves the input tensor(s) of a layer. Only applicable if the layer has exactly one input, i.e. if it is connected to one incoming layer. Returns: Input tensor or list of input tensors. Raises: RuntimeError: If called in Eager mode. AttributeError: If no inbound nodes are found.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:input arg:self arguments arg If Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, dataset_file_map: Mapping[str, _RepresentativeDatasetFile]) -> None:\n    self.dataset_file_map = dataset_file_map",
    "docstring": "Initializes TFRecord represenatative dataset loader. Args: dataset_file_map: Signature key -> mapping. Raises: DecodeError: If the sample is not RepresentativeDataSample.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\representative_dataset.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:dataset_file_map arguments arg arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "_MapOnGpuDataset",
    "source_code": "class _MapOnGpuDataset(dataset_ops.UnaryDataset):\n\n    def __init__(self, input_dataset, map_func, use_inter_op_parallelism=True):\n        self._input_dataset = input_dataset\n        self._use_inter_op_parallelism = use_inter_op_parallelism\n        self._map_func = structured_function.StructuredFunctionWrapper(map_func, self._transformation_name(), dataset=input_dataset, defun_kwargs={'experimental_ints_on_device': True})\n        variant_tensor = ged_ops.experimental_map_dataset(self._input_dataset._variant_tensor, self._map_func.function.captured_inputs, f=self._map_func.function, use_inter_op_parallelism=self._use_inter_op_parallelism, **self._flat_structure)\n        super(_MapOnGpuDataset, self).__init__(input_dataset, variant_tensor)\n\n    def _functions(self):\n        return [self._map_func]\n\n    @property\n    def element_spec(self):\n        return self._map_func.output_structure\n\n    def _transformation_name(self):\n        return 'map_on_gpu()'",
    "docstring": "A that maps a function over elements in its using a GPU.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\prefetching_ops.py",
    "ast_data": "ClassDef name:_MapOnGpuDataset FunctionDef name:__init__ arg:self arg:input_dataset arg:map_func arg:use_inter_op_parallelism arguments arg arg arg arg Assign Assign Assign Call Call Assign Call Call Call FunctionDef name:_functions arg:self arguments arg Return return:yes FunctionDef name:element_spec arg:self arguments arg Return return:yes FunctionDef name:_transformation_name arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "getcol",
    "source_code": "def getcol(self, j):\n    return self._getcol(j)",
    "docstring": "Returns a copy of column j of the matrix, as an (m x 1) sparse matrix (column vector).",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_matrix.py",
    "ast_data": "FunctionDef name:getcol arg:self arg:j arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "graph_execution_trace_to_tensor_id",
    "source_code": "def graph_execution_trace_to_tensor_id(self, trace):\n    return self.symbolic_tensor_id(trace.graph_id, trace.op_name, trace.output_slot)",
    "docstring": "Get symbolic tensor ID from a GraphExecutoinTraceDigest object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py",
    "ast_data": "FunctionDef name:graph_execution_trace_to_tensor_id arg:self arg:trace arguments arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "gain",
    "source_code": "@property\ndef gain(self):\n    return self._gain",
    "docstring": "Gain of the system.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:gain arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "mark_as_unsaveable",
    "source_code": "def mark_as_unsaveable():\n    if ops.inside_function() and (not save_context.in_save_context()):\n        ops.get_default_graph().mark_as_unsaveable(\"\\nConcreteFunction that uses distributed variables in certain way cannot be saved.\\nIf you're saving with\\n\\ntf.saved_model.save(..., signatures=f.get_concrete_function())\\n\\ndo\\n\\n@tf.function(input_signature=...)\\ndef f_with_input_signature():\\n  ...\\n\\ntf.saved_model.save(..., signatures=f_with_input_signature)`\\n\\ninstead.\")",
    "docstring": "Marks the function as unsaveable if not inside save context.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values_util.py",
    "ast_data": "FunctionDef name:mark_as_unsaveable arguments If BoolOp Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "L1L2",
    "source_code": "class L1L2(Regularizer):\n\n    def __init__(self, l1=0.0, l2=0.0):\n        l1 = 0.0 if l1 is None else l1\n        l2 = 0.0 if l2 is None else l2\n        _check_penalty_number(l1)\n        _check_penalty_number(l2)\n        self.l1 = backend.cast_to_floatx(l1)\n        self.l2 = backend.cast_to_floatx(l2)\n\n    def __call__(self, x):\n        regularization = backend.constant(0.0, dtype=x.dtype)\n        if self.l1:\n            regularization += self.l1 * math_ops.reduce_sum(math_ops.abs(x))\n        if self.l2:\n            regularization += self.l2 * math_ops.reduce_sum(math_ops.square(x))\n        return regularization\n\n    def get_config(self):\n        return {'l1': float(self.l1), 'l2': float(self.l2)}",
    "docstring": "A regularizer that applies both L1 and L2 regularization penalties. The L1 regularization penalty is computed as: The L2 regularization penalty is computed as L1L2 may be passed to a layer as a string identifier: >>> dense = tf.keras.layers.Dense(3, kernel_regularizer='l1_l2') In this case, the default values used are and . Attributes: l1: Float; L1 regularization factor. l2: Float; L2 regularization factor.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\regularizers.py",
    "ast_data": "ClassDef name:L1L2 FunctionDef name:__init__ arg:self arg:l1 arg:l2 arguments arg arg arg Assign Compare Assign Compare Call Call Assign Call Assign Call FunctionDef name:__call__ arg:self arg:x arguments arg arg Assign Call If Call Call If Call Call Return return:yes FunctionDef name:get_config arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_save_fn",
    "source_code": "def _save_fn():\n    logging.info('Saving checkpoints for %d into %s.', step, self._save_path)\n    start_time = time.time()\n    for l in self._listeners:\n        l.before_save(session, step)\n    self._get_saver().save(session, self._save_path, global_step=step)\n    if self._summary_writer is None:\n        raise ValueError('Summary writer is not initialised')\n    self._summary_writer.add_session_log(event_pb2.SessionLog(status=event_pb2.SessionLog.CHECKPOINT, checkpoint_path=self._save_path), step)\n    for l in self._listeners:\n        l.after_save(session, step)\n    end_time = time.time()\n    metrics.AddAsyncCheckpointWriteDuration(api_label=_ASYNC_CHECKPOINT_V1, microseconds=_get_duration_microseconds(start_time, end_time))\n    global _END_TIME_OF_LAST_WRITE\n    with _END_TIME_OF_LAST_WRITE_LOCK:\n        metrics.AddTrainingTimeSaved(api_label=_ASYNC_CHECKPOINT_V1, microseconds=_get_duration_microseconds(_END_TIME_OF_LAST_WRITE, start_time))\n    _END_TIME_OF_LAST_WRITE = start_time\n    logging.info('Checkpoint actual writing time: (%.3f sec)', end_time - start_time)\n    logging.info('Checkpoint finished for %d into %s.', step, self._save_path)",
    "docstring": "Run the saver process.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\async_checkpoint.py",
    "ast_data": "FunctionDef name:_save_fn arguments Call Assign Call For Call Call Call If Compare Raise Call Call Call For Call Assign Call Call Call With Call Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "assert_scalar",
    "source_code": "@tf_export(v1=['debugging.assert_scalar', 'assert_scalar'])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints('assert_scalar')\ndef assert_scalar(tensor, name=None, message=None):\n    with ops.name_scope(name, 'assert_scalar', [tensor]) as name_scope:\n        tensor = ops.convert_to_tensor(tensor, name=name_scope)\n        shape = tensor.get_shape()\n        message = _message_prefix(message)\n        if shape.ndims != 0:\n            if context.executing_eagerly():\n                raise ValueError('%sExpected scalar shape, saw shape: %s.' % (message, shape))\n            else:\n                raise ValueError('%sExpected scalar shape for %s, saw shape: %s.' % (message, tensor.name, shape))\n        return tensor",
    "docstring": "Asserts that the given is a scalar (i.e. zero-dimensional). This function raises unless it can be certain that the given is a scalar. is also raised if the shape of is unknown. Args: tensor: A . name: A name for this operation. Defaults to \"assert_scalar\" message: A string to prefix to the default message. Returns: The input tensor (potentially converted to a ). Raises: ValueError: If the tensor is not scalar (rank 0), or if its shape is unknown.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\check_ops.py",
    "ast_data": "FunctionDef name:assert_scalar arg:tensor arg:name arg:message arguments arg arg arg With Call Assign Call Assign Call Assign Call If Compare If Call Raise Call Raise Call Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "_write_array_header",
    "source_code": "def _write_array_header(fp, d, version=None):\n    header = ['{']\n    for key, value in sorted(d.items()):\n        header.append(f\"'{key}': {repr(value)}, \")\n    header.append('}')\n    header = ''.join(header)\n    shape = d['shape']\n    header += ' ' * (GROWTH_AXIS_MAX_DIGITS - len(repr(shape[-1 if d['fortran_order'] else 0])) if len(shape) > 0 else 0)\n    if version is None:\n        header = _wrap_header_guess_version(header)\n    else:\n        header = _wrap_header(header, version)\n    fp.write(header)",
    "docstring": "Write the header for an array and returns the version used Parameters ---------- fp : filelike object d : dict This has the appropriate entries for writing its string representation to the header of the file. version : tuple or None None means use oldest that works. Providing an explicit version will raise a ValueError if the format does not allow saving this data. Default: None",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_format_impl.py",
    "ast_data": "FunctionDef name:_write_array_header arg:fp arg:d arg:version arguments arg arg arg Assign For Call Call Call Call Call Assign Call Assign Compare Call Call Call If Compare Assign Call Assign Call Call"
  },
  {
    "library": "pandas",
    "name": "tz_to_dtype",
    "source_code": "def tz_to_dtype(tz: tzinfo | None, unit: str='ns') -> np.dtype[np.datetime64] | DatetimeTZDtype:\n    if tz is None:\n        return np.dtype(f'M8[{unit}]')\n    else:\n        return DatetimeTZDtype(tz=tz, unit=unit)",
    "docstring": "Return a datetime64[ns] dtype appropriate for the given timezone. Parameters ---------- tz : tzinfo or None unit : str, default \"ns\" Returns ------- np.dtype or Datetime64TZDType",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimes.py",
    "ast_data": "FunctionDef name:tz_to_dtype arg:tz arg:unit arguments arg arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "cuFFTPlanCacheManager",
    "source_code": "class cuFFTPlanCacheManager:\n    __initialized = False\n\n    def __init__(self):\n        self.caches = []\n        self.__initialized = True\n\n    def __getitem__(self, device):\n        index = torch.cuda._utils._get_device_index(device)\n        if index < 0 or index >= torch.cuda.device_count():\n            raise RuntimeError(f'cufft_plan_cache: expected 0 <= device index < {torch.cuda.device_count()}, but got device with index {index}')\n        if len(self.caches) == 0:\n            self.caches.extend((cuFFTPlanCache(index) for index in range(torch.cuda.device_count())))\n        return self.caches[index]\n\n    def __getattr__(self, name):\n        return getattr(self[torch.cuda.current_device()], name)\n\n    def __setattr__(self, name, value):\n        if self.__initialized:\n            return setattr(self[torch.cuda.current_device()], name, value)\n        else:\n            return super().__setattr__(name, value)",
    "docstring": "Represent all cuFFT plan caches, return the cuFFTPlanCache for a given device when indexed. Finally, this object, when used directly as a object (e.g., setting the ) attribute, the current device's cuFFT plan cache is used.",
    "type": "class",
    "file_path": "pytorch\\torch\\backends\\cuda\\__init__.py",
    "ast_data": "ClassDef name:cuFFTPlanCacheManager Assign FunctionDef name:__init__ arg:self arguments arg Assign Assign FunctionDef name:__getitem__ arg:self arg:device arguments arg arg Assign Call If BoolOp Compare Compare Call Raise Call Call If Compare Call Call Call Call Call Return return:yes FunctionDef name:__getattr__ arg:self arg:name arguments arg arg Return return:yes Call Call FunctionDef name:__setattr__ arg:self arg:name arg:value arguments arg arg arg If Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "flush",
    "source_code": "def flush(self):\n    if hasattr(self, 'mode') and self.mode in 'wa':\n        self._write()",
    "docstring": "Perform a sync-to-disk flush if the object is in write mode. See Also -------- sync : Identical function",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\_netcdf.py",
    "ast_data": "FunctionDef name:flush arg:self arguments arg If BoolOp Call Compare Call"
  },
  {
    "library": "matplotlib",
    "name": "font_as_file",
    "source_code": "def font_as_file(font):\n    fh = BytesIO()\n    font.save(fh, reorderTables=False)\n    return fh",
    "docstring": "Convert a TTFont object into a file-like object. Parameters ---------- font : fontTools.ttLib.ttFont.TTFont A font object Returns ------- BytesIO A file object with the font saved into it",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\_backend_pdf_ps.py",
    "ast_data": "FunctionDef name:font_as_file arg:font arguments arg Assign Call Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "keys",
    "source_code": "def keys(self):\n    if not self.loaded:\n        self.load()\n    return self._data.keys()",
    "docstring": "Return an iterable of session keys. D.keys() -> list of D's keys.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\sessions.py",
    "ast_data": "FunctionDef name:keys arg:self arguments arg If Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_lexsort_depth",
    "source_code": "def _lexsort_depth(codes: list[np.ndarray], nlevels: int) -> int:\n    int64_codes = [ensure_int64(level_codes) for level_codes in codes]\n    for k in range(nlevels, 0, -1):\n        if libalgos.is_lexsorted(int64_codes[:k]):\n            return k\n    return 0",
    "docstring": "Count depth (up to a maximum of ) with which codes are lexsorted.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "FunctionDef name:_lexsort_depth arg:codes arg:nlevels arguments arg arg Assign Call For Call If Call Return return:yes Return return:yes"
  },
  {
    "library": "scipy",
    "name": "pinvh",
    "source_code": "@_apply_over_batch(('a', 2))\ndef pinvh(a, atol=None, rtol=None, lower=True, return_rank=False, check_finite=True):\n    a = _asarray_validated(a, check_finite=check_finite)\n    s, u = _decomp.eigh(a, lower=lower, check_finite=False, driver='ev')\n    t = u.dtype.char.lower()\n    maxS = np.max(np.abs(s), initial=0.0)\n    atol = 0.0 if atol is None else atol\n    rtol = max(a.shape) * np.finfo(t).eps if rtol is None else rtol\n    if atol < 0.0 or rtol < 0.0:\n        raise ValueError('atol and rtol values must be positive.')\n    val = atol + maxS * rtol\n    above_cutoff = abs(s) > val\n    psigma_diag = 1.0 / s[above_cutoff]\n    u = u[:, above_cutoff]\n    B = u * psigma_diag @ u.conj().T\n    if return_rank:\n        return (B, len(psigma_diag))\n    else:\n        return B",
    "docstring": "Compute the (Moore-Penrose) pseudo-inverse of a Hermitian matrix. Calculate a generalized inverse of a complex Hermitian/real symmetric matrix using its eigenvalue decomposition and including all eigenvalues with 'large' absolute value. Parameters ---------- a : (N, N) array_like Real symmetric or complex hermetian matrix to be pseudo-inverted atol : float, optional Absolute threshold term, default value is 0. .. versionadded:: 1.7.0 rtol : float, optional Relative threshold term, default value is `aareturn_rankpinv`. >>> import numpy as np >>> from scipy.linalg import pinvh >>> rng = np.random.default_rng() >>> a = rng.standard_normal((9, 6)) >>> a = np.dot(a, a.T) >>> B = pinvh(a) >>> np.allclose(a, a @ B @ a) True >>> np.allclose(B, B @ a @ B) True",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_basic.py",
    "ast_data": "FunctionDef name:pinvh arg:a arg:atol arg:rtol arg:lower arg:return_rank arg:check_finite arguments arg arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call Call Assign Compare Assign Compare Call Call If BoolOp Compare Compare Raise Call Assign Assign Compare Call Assign Assign Assign Call If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "touches",
    "source_code": "def touches(self, other):\n    return capi.geos_touches(self.ptr, other.ptr)",
    "docstring": "Return true if the DE-9IM intersection matrix for the two Geometries is FT*******, F**T***** or F***T****.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:touches arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "roots_laguerre",
    "source_code": "def roots_laguerre(n, mu=False):\n    return roots_genlaguerre(n, 0.0, mu=mu)",
    "docstring": "Gauss-Laguerre quadrature. Compute the sample points and weights for Gauss-Laguerre quadrature. The sample points are the roots of the nth degree Laguerre polynomial, :math:. These sample points and weights correctly integrate polynomials of degree :math: or less over the interval :math: with weight function :math:. See 22.2.13 in [AS]_ for details. Parameters ---------- n : int quadrature order mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights See Also -------- scipy.integrate.fixed_quad numpy.polynomial.laguerre.laggauss References ---------- .. [AS] Milton Abramowitz and Irene A. Stegun, eds. Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables. New York: Dover, 1972.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_orthogonal.py",
    "ast_data": "FunctionDef name:roots_laguerre arg:n arg:mu arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "node_name",
    "source_code": "@property\ndef node_name(self) -> str:\n    op_name = f'{self.name.name}_{self.name.overload_name}'.lower()\n    return ''.join((word.capitalize() or '' for word in op_name.split('_')))",
    "docstring": "Return camel-case version of op in node. Note: This function also appends any in the operation. For example, if the op is , the returned name will be .",
    "type": "method",
    "file_path": "pytorch\\torchgen\\api\\lazy.py",
    "ast_data": "FunctionDef name:node_name arg:self arguments arg Assign Call Return return:yes Call BoolOp Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_min_matrix_dim",
    "source_code": "def _min_matrix_dim(self):\n    domain_dim = tensor_shape.dimension_value(self.domain_dimension)\n    range_dim = tensor_shape.dimension_value(self.range_dimension)\n    if domain_dim is None or range_dim is None:\n        return None\n    return min(domain_dim, range_dim)",
    "docstring": "Minimum of domain/range dimension, if statically available, else None.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_identity.py",
    "ast_data": "FunctionDef name:_min_matrix_dim arg:self arguments arg Assign Call Assign Call If BoolOp Compare Compare Return return:no Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, values):\n    self._values = tuple(values)",
    "docstring": "Should only be called by subclass __init__.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:values arguments arg arg Assign Call"
  },
  {
    "library": "django",
    "name": "__contains__",
    "source_code": "def __contains__(self, key):\n    return self.has_key(key)",
    "docstring": "Return True if the key is in the cache and has not expired.",
    "type": "method",
    "file_path": "django\\django\\core\\cache\\backends\\base.py",
    "ast_data": "FunctionDef name:__contains__ arg:self arg:key arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "hint_int",
    "source_code": "def hint_int(a: Union[torch.SymInt, int], fallback: Optional[int]=None) -> int:\n    if isinstance(a, torch.SymInt):\n        return a.node.require_hint(fallback)\n    assert type(a) is int, a\n    return a",
    "docstring": "Retrieve the hint for an int (based on the underlying real values as observed at runtime). If no hint is available (e.g., because data dependent shapes), if fallback is not None, use that instead (otherwise raise an error).",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:hint_int arg:a arg:fallback arguments arg arg If Call Return return:yes Call Compare Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "PrependParamsBuffersConstantAotAutogradInputStep",
    "source_code": "class PrependParamsBuffersConstantAotAutogradInputStep(InputAdaptStep):\n\n    def apply(self, model_args: Sequence[Any], model_kwargs: Mapping[str, Any], model: torch.nn.Module | Callable | torch_export.ExportedProgram | None=None) -> tuple[Sequence[Any], Mapping[str, Any]]:\n        ordered_params = tuple((model.state_dict[name] for name in model.graph_signature.parameters))\n        non_persistent_buffers = set(model.graph_signature.non_persistent_buffers)\n        ordered_buffers = []\n        for name in model.graph_signature.buffers:\n            if name in non_persistent_buffers:\n                ordered_buffers.append(model.constants[name])\n            else:\n                ordered_buffers.append(model.state_dict[name])\n        ordered_constant_tensors = tuple((model.constants[fqn] for fqn in model.graph_signature.lifted_tensor_constants))\n        updated_args = (*ordered_params, *ordered_buffers, *ordered_constant_tensors, *model_args)\n        if model_kwargs:\n            return MergeKwargsIntoArgsInputStep().apply(updated_args, model_kwargs, model=model)\n        return (updated_args, {})",
    "docstring": "Prepend model parameters, buffers and constants to the user input. :func: lifts model parameters, buffers and constants as model input, thus, they must be added to the user input before the model is executed. Args: model: The PyTorch model with embedded parameters and buffers.",
    "type": "class",
    "file_path": "pytorch\\torch\\onnx\\_internal\\io_adapter.py",
    "ast_data": "ClassDef name:PrependParamsBuffersConstantAotAutogradInputStep FunctionDef name:apply arg:self arg:model_args arg:model_kwargs arg:model arguments arg arg arg arg Assign Call Assign Call Assign For If Compare Call Call Assign Call Assign If Return return:yes Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_assign_lanes",
    "source_code": "def _assign_lanes(self) -> None:\n    for device_stats in self._step_stats.dev_stats:\n        lanes = [0]\n        for ns in device_stats.node_stats:\n            l = -1\n            for i, lts in enumerate(lanes):\n                if ns.all_start_micros > lts:\n                    l = i\n                    lanes[l] = ns.all_start_micros + ns.all_end_rel_micros\n                    break\n            if l < 0:\n                l = len(lanes)\n                lanes.append(ns.all_start_micros + ns.all_end_rel_micros)\n            ns.thread_id = l",
    "docstring": "Assigns non-overlapping lanes for the activities on each device.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py",
    "ast_data": "FunctionDef name:_assign_lanes arg:self arguments arg For Assign For Assign For Call If Compare Assign Assign If Compare Assign Call Call Assign"
  },
  {
    "library": "scikit-learn",
    "name": "_query_include_self",
    "source_code": "def _query_include_self(X, include_self, mode):\n    if include_self == 'auto':\n        include_self = mode == 'connectivity'\n    if not include_self:\n        X = None\n    return X",
    "docstring": "Return the query based on include_self param",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\neighbors\\_graph.py",
    "ast_data": "FunctionDef name:_query_include_self arg:X arg:include_self arg:mode arguments arg arg arg If Compare Assign Compare If Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_update_coordinate_descent",
    "source_code": "def _update_coordinate_descent(X, W, Ht, l1_reg, l2_reg, shuffle, random_state):\n    n_components = Ht.shape[1]\n    HHt = np.dot(Ht.T, Ht)\n    XHt = safe_sparse_dot(X, Ht)\n    if l2_reg != 0.0:\n        HHt.flat[::n_components + 1] += l2_reg\n    if l1_reg != 0.0:\n        XHt -= l1_reg\n    if shuffle:\n        permutation = random_state.permutation(n_components)\n    else:\n        permutation = np.arange(n_components)\n    permutation = np.asarray(permutation, dtype=np.intp)\n    return _update_cdnmf_fast(W, HHt, XHt, permutation)",
    "docstring": "Helper function for _fit_coordinate_descent. Update W to minimize the objective function, iterating once over all coordinates. By symmetry, to update H, one can call _update_coordinate_descent(X.T, Ht, W, ...).",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_nmf.py",
    "ast_data": "FunctionDef name:_update_coordinate_descent arg:X arg:W arg:Ht arg:l1_reg arg:l2_reg arg:shuffle arg:random_state arguments arg arg arg arg arg arg arg Assign Assign Call Assign Call If Compare If Compare If Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "create_table_index",
    "source_code": "def create_table_index(self, key: str, columns=None, optlevel: int | None=None, kind: str | None=None) -> None:\n    _tables()\n    s = self.get_storer(key)\n    if s is None:\n        return\n    if not isinstance(s, Table):\n        raise TypeError('cannot create table index on a Fixed format store')\n    s.create_index(columns=columns, optlevel=optlevel, kind=kind)",
    "docstring": "Create a pytables index on the table. Parameters ---------- key : str columns : None, bool, or listlike[str] Indicate which columns to create an index on. * False : Do not create any indexes. * True : Create indexes on all columns. * None : Create indexes on all columns. * listlike : Create indexes on the given columns. optlevel : int or None, default None Optimization level, if None, pytables defaults to 6. kind : str or None, default None Kind of index, if None, pytables defaults to \"medium\". Raises ------ TypeError: raises if the node is not a table",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:create_table_index arg:self arg:key arg:columns arg:optlevel arg:kind arguments arg arg arg arg arg Call Assign Call If Compare Return return:no If Call Raise Call Call"
  },
  {
    "library": "kornia",
    "name": "warp_src_into_dst",
    "source_code": "def warp_src_into_dst(self, src_img: Tensor) -> Tensor:\n    _height, _width = src_img.shape[-2:]\n    warper = self.warper(_height, _width)\n    img_src_to_dst = warper(src_img, self.model())\n    return img_src_to_dst",
    "docstring": "Warp src_img with estimated model.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\transform\\image_registrator.py",
    "ast_data": "FunctionDef name:warp_src_into_dst arg:self arg:src_img arguments arg arg Assign Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "EarlyStopping",
    "source_code": "class EarlyStopping:\n\n    def __init__(self, monitor: str, min_delta: float=0.0, patience: int=8, max_mode: bool=False) -> None:\n        self.monitor = monitor\n        self.min_delta = min_delta\n        self.patience = patience\n        self.max_mode = max_mode\n        self.counter: int = 0\n        self.best_score: float = -inf if max_mode else inf\n        self.early_stop: bool = False\n\n    def __call__(self, model: Module, epoch: int, valid_metric: Dict[str, AverageMeter]) -> TrainerState:\n        score: float = valid_metric[self.monitor].avg\n        is_best: bool = score > self.best_score if self.max_mode else score < self.best_score\n        if is_best:\n            self.best_score = score\n            self.counter = 0\n        else:\n            is_within_delta: bool = score > self.best_score - self.min_delta if self.max_mode else score < self.best_score + self.min_delta\n            if not is_within_delta:\n                self.counter += 1\n                if self.counter >= self.patience:\n                    self.early_stop = True\n        if self.early_stop:\n            print(f'[INFO] Early-Stopping the training process. Epoch: {epoch}.')\n            return TrainerState.TERMINATE\n        return TrainerState.TRAINING",
    "docstring": "Callback that evaluates whether there is improvement in the loss function. The module track the losses and in case of finish patience sends a termination signal to the trainer. Args: monitor: the name of the value to track. min_delta: the minimum difference between losses to increase the patience counter. patience: the number of times to wait until the trainer does not terminate. max_mode: if true metric will be multiply by -1, turn this flag when increasing metric value is expected for example Accuracy **Usage example:** .. code:: python early_stop = EarlyStopping( monitor=\"loss\", patience=10 ) trainer = ImageClassifierTrainer( callbacks={\"on_epoch_end\", early_stop} )",
    "type": "class",
    "file_path": "kornia\\kornia\\x\\callbacks.py",
    "ast_data": "ClassDef name:EarlyStopping FunctionDef name:__init__ arg:self arg:monitor arg:min_delta arg:patience arg:max_mode arguments arg arg arg arg arg Assign Assign Assign Assign FunctionDef name:__call__ arg:self arg:model arg:epoch arg:valid_metric arguments arg arg arg arg Compare Compare If Assign Assign Compare Compare If If Compare Assign If Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "clear_previously_created_nodes",
    "source_code": "def clear_previously_created_nodes(layer, created_nodes):\n    for node in layer._inbound_nodes:\n        prev_layers = node.inbound_layers\n        for prev_layer in nest.flatten(prev_layers):\n            prev_layer._outbound_nodes = [n for n in prev_layer._outbound_nodes if n not in created_nodes]\n    layer._inbound_nodes = [n for n in layer._inbound_nodes if n not in created_nodes]",
    "docstring": "Remove nodes from from the layer's inbound_nodes.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\sequential.py",
    "ast_data": "FunctionDef name:clear_previously_created_nodes arg:layer arg:created_nodes arguments arg arg For Assign For Call Assign Compare Assign Compare"
  },
  {
    "library": "matplotlib",
    "name": "get_x",
    "source_code": "def get_x(self):\n    return self._x0",
    "docstring": "Return the left coordinate of the rectangle.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_x arg:self arguments arg Return return:yes"
  },
  {
    "library": "authlib",
    "name": "authenticate_client_secret_post",
    "source_code": "def authenticate_client_secret_post(query_client, request):\n    data = request.form\n    client_id = data.get('client_id')\n    client_secret = data.get('client_secret')\n    if client_id and client_secret:\n        client = _validate_client(query_client, client_id)\n        if client.check_client_secret(client_secret):\n            log.debug(f'Authenticate {client_id} via \"client_secret_post\" success')\n            return client\n    log.debug(f'Authenticate {client_id} via \"client_secret_post\" failed')",
    "docstring": "Authenticate client by `` method. The client uses POST parameters for authentication.",
    "type": "function",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\authenticate_client.py",
    "ast_data": "FunctionDef name:authenticate_client_secret_post arg:query_client arg:request arguments arg arg Assign Assign Call Assign Call If BoolOp Assign Call If Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "set_output",
    "source_code": "@available_if(_auto_wrap_is_configured)\ndef set_output(self, *, transform=None):\n    if transform is None:\n        return self\n    if not hasattr(self, '_sklearn_output_config'):\n        self._sklearn_output_config = {}\n    self._sklearn_output_config['transform'] = transform\n    return self",
    "docstring": "Set output container. See :ref: for an example on how to use the API. Parameters ---------- transform : {\"default\", \"pandas\", \"polars\"}, default=None Configure output of and . - : Default output format of a transformer - : DataFrame output - : Polars output - : Transform configuration is unchanged .. versionadded:: 1.4 option was added. Returns ------- self : estimator instance Estimator instance.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_set_output.py",
    "ast_data": "FunctionDef name:set_output arg:self arguments arg arg If Compare Return return:yes If Call Assign Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_sequence_dense_tensor",
    "source_code": "def get_sequence_dense_tensor(self, transformation_cache, state_manager):\n    if not isinstance(self.categorical_column, SequenceCategoricalColumn):\n        raise ValueError('In embedding_column: {}. categorical_column must be of type SequenceCategoricalColumn to use SequenceFeatures. Suggested fix: Use one of sequence_categorical_column_with_*. Given (type {}): {}'.format(self.name, type(self.categorical_column), self.categorical_column))\n    dense_tensor = self._get_dense_tensor_internal(transformation_cache, state_manager)\n    sparse_tensors = self.categorical_column.get_sparse_tensors(transformation_cache, state_manager)\n    sequence_length = fc_utils.sequence_length_from_sparse_tensor(sparse_tensors.id_tensor)\n    return SequenceDenseColumn.TensorSequenceLengthPair(dense_tensor=dense_tensor, sequence_length=sequence_length)",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:get_sequence_dense_tensor arg:self arg:transformation_cache arg:state_manager arguments arg arg arg If Call Raise Call Call Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "info",
    "source_code": "def info(self) -> PipeInfo:\n    return PipeInfo(graph=self.split_gm.graph, num_stages=self.num_stages, has_loss_and_backward=self.has_loss_and_backward)",
    "docstring": "Get information about the pipe. Returns ------- PipeInfo A dataclass containing information about the pipe.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\_IR.py",
    "ast_data": "FunctionDef name:info arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "asend",
    "source_code": "async def asend(self, sender, **named):\n    if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:\n        return []\n    sync_receivers, async_receivers = self._live_receivers(sender)\n    if sync_receivers:\n\n        @sync_to_async\n        def sync_send():\n            responses = []\n            for receiver in sync_receivers:\n                response = receiver(signal=self, sender=sender, **named)\n                responses.append((receiver, response))\n            return responses\n    else:\n\n        async def sync_send():\n            return []\n    responses, async_responses = await asyncio.gather(sync_send(), asyncio.gather(*(receiver(signal=self, sender=sender, **named) for receiver in async_receivers)))\n    responses.extend(zip(async_receivers, async_responses))\n    return responses",
    "docstring": "Send signal from sender to all connected receivers in async mode. All sync receivers will be wrapped by sync_to_async() If any receiver raises an error, the error propagates back through send, terminating the dispatch loop. So it's possible that all receivers won't be called if an error is raised. If any receivers are synchronous, they are grouped and called behind a sync_to_async() adaption before executing any asynchronous receivers. If any receivers are asynchronous, they are grouped and executed concurrently with asyncio.gather(). Arguments: sender The sender of the signal. Either a specific object or None. named Named arguments which will be passed to receivers. Return a list of tuple pairs [(receiver, response), ...].",
    "type": "method",
    "file_path": "django\\django\\dispatch\\dispatcher.py",
    "ast_data": "AsyncFunctionDef name:asend arg:self arg:sender arguments arg arg arg If BoolOp Compare Call Return return:no Assign Call If FunctionDef name:sync_send arguments Assign For Assign Call Call Return return:yes AsyncFunctionDef name:sync_send arguments Return return:no Assign Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "remove_pointless_jumps",
    "source_code": "def remove_pointless_jumps(instructions):\n    pointless_jumps = {id(a) for a, b in zip(instructions, instructions[1:]) if a.opname == 'JUMP_ABSOLUTE' and a.target is b}\n    return [inst for inst in instructions if id(inst) not in pointless_jumps]",
    "docstring": "Eliminate jumps to the next instruction",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\bytecode_analysis.py",
    "ast_data": "FunctionDef name:remove_pointless_jumps arg:instructions arguments arg Assign Call Call BoolOp Compare Compare Return return:yes Compare Call"
  },
  {
    "library": "pandas",
    "name": "nsmallest",
    "source_code": "def nsmallest(self, n: int=5, keep: Literal['first', 'last', 'all']='first') -> Series:\n    return selectn.SelectNSeries(self, n=n, keep=keep).nsmallest()",
    "docstring": "Return the smallest elements. Parameters ---------- n : int, default 5 Return this many ascending sorted values. keep : {'first', 'last', 'all'}, default 'first' When there are duplicate values that cannot all fit in a Series of elements: - `nnnnnnnnnkeepnn` with all duplicates kept. Note that the returned Series has four elements due to the three duplicates. >>> s.nsmallest(3, keep=\"all\") Montserrat 5200 Nauru 11300 Tuvalu 11300 Anguilla 11300 dtype: int64",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:nsmallest arg:self arg:n arg:keep arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_call_location",
    "source_code": "def _call_location(outer=False):\n    f = inspect.currentframe().f_back.f_back\n    parent = f and f.f_back\n    if outer and parent is not None:\n        f = parent\n    return '{}:{}'.format(f.f_code.co_filename, f.f_lineno)",
    "docstring": "Returns call location given level up from current call.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\deprecation.py",
    "ast_data": "FunctionDef name:_call_location arg:outer arguments arg Assign Call Assign BoolOp If BoolOp Compare Assign Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "content_metadata",
    "source_code": "def content_metadata(self) -> dict[str, Any]:\n    writing_mode = self.config.epub_writing_mode\n    if (source_date_epoch := os.getenv('SOURCE_DATE_EPOCH')) is not None:\n        time_tuple = time.gmtime(int(source_date_epoch))\n    else:\n        time_tuple = time.gmtime()\n    metadata = super().content_metadata()\n    metadata['description'] = html.escape(self.config.epub_description)\n    metadata['contributor'] = html.escape(self.config.epub_contributor)\n    metadata['page_progression_direction'] = PAGE_PROGRESSION_DIRECTIONS.get(writing_mode)\n    metadata['ibook_scroll_axis'] = IBOOK_SCROLL_AXIS.get(writing_mode)\n    metadata['date'] = html.escape(time.strftime('%Y-%m-%dT%H:%M:%SZ', time_tuple))\n    metadata['version'] = html.escape(self.config.version)\n    metadata['epub_version'] = self.config.epub_version\n    return metadata",
    "docstring": "Create a dictionary with all metadata for the content.opf file properly escaped.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\epub3.py",
    "ast_data": "FunctionDef name:content_metadata arg:self arguments arg Assign If Compare Call Assign Call Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "sdp_kernel",
    "source_code": "@contextlib.contextmanager\n@deprecated('`torch.backends.cuda.sdp_kernel()` is deprecated. In the future, this context manager will be removed. Please see `torch.nn.attention.sdpa_kernel()` for the new context manager, with updated signature.', category=FutureWarning)\ndef sdp_kernel(enable_flash: bool=True, enable_math: bool=True, enable_mem_efficient: bool=True, enable_cudnn: bool=True):\n    from torch.nn.attention import sdpa_kernel\n    backend_list = []\n    if enable_flash:\n        backend_list.append(SDPBackend.FLASH_ATTENTION)\n    if enable_mem_efficient:\n        backend_list.append(SDPBackend.EFFICIENT_ATTENTION)\n    if enable_math:\n        backend_list.append(SDPBackend.MATH)\n    if enable_cudnn:\n        backend_list.append(SDPBackend.CUDNN_ATTENTION)\n    with sdpa_kernel(backend_list) as context:\n        try:\n            yield context\n        finally:\n            pass",
    "docstring": ".. warning:: This flag is beta and subject to change. This context manager can be used to temporarily enable or disable any of the three backends for scaled dot product attention. Upon exiting the context manager, the previous state of the flags will be restored.",
    "type": "function",
    "file_path": "pytorch\\torch\\backends\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:sdp_kernel arg:enable_flash arg:enable_math arg:enable_mem_efficient arg:enable_cudnn arguments arg arg arg arg Assign If Call If Call If Call If Call With Call Try Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, vmin=None, vmax=None, clip=False):\n    self._vmin = _sanitize_extrema(vmin)\n    self._vmax = _sanitize_extrema(vmax)\n    self._clip = clip\n    self._scale = None\n    self.callbacks = cbook.CallbackRegistry(signals=['changed'])",
    "docstring": "Parameters ---------- vmin, vmax : float or None Values within the range ``, input data will be mapped to 0.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:vmin arg:vmax arg:clip arguments arg arg arg arg Assign Call Assign Call Assign Assign Assign Call"
  },
  {
    "library": "django",
    "name": "hidden",
    "source_code": "@cached_property\ndef hidden(self):\n    return bool(self.related_name) and self.related_name[-1] == '+'",
    "docstring": "Should the related object be hidden?",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\reverse_related.py",
    "ast_data": "FunctionDef name:hidden arg:self arguments arg Return return:yes BoolOp Call Compare"
  },
  {
    "library": "scikit-learn",
    "name": "_solve_eigen_covariance_intercept",
    "source_code": "def _solve_eigen_covariance_intercept(self, alpha, y, sqrt_sw, X_mean, eigvals, V, X):\n    intercept_sv = np.zeros(V.shape[0])\n    intercept_sv[-1] = 1\n    intercept_dim = _find_smallest_angle(intercept_sv, V)\n    w = 1 / (eigvals + alpha)\n    w[intercept_dim] = 1 / eigvals[intercept_dim]\n    A = (V * w).dot(V.T)\n    X_op = _X_CenterStackOp(X, X_mean, sqrt_sw)\n    AXy = A.dot(X_op.T.dot(y))\n    y_hat = X_op.dot(AXy)\n    hat_diag = self._sparse_multidot_diag(X, A, X_mean, sqrt_sw)\n    if len(y.shape) != 1:\n        hat_diag = hat_diag[:, np.newaxis]\n    return ((1 - hat_diag) / alpha, (y - y_hat) / alpha)",
    "docstring": "Compute dual coefficients and diagonal of G^-1. Used when we have a decomposition of X^T.X (n_samples > n_features and X is sparse), and we are fitting an intercept.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_ridge.py",
    "ast_data": "FunctionDef name:_solve_eigen_covariance_intercept arg:self arg:alpha arg:y arg:sqrt_sw arg:X_mean arg:eigvals arg:V arg:X arguments arg arg arg arg arg arg arg arg Assign Call Assign Assign Call Assign Assign Assign Call Assign Call Assign Call Call Assign Call Assign Call If Compare Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "index_cols",
    "source_code": "def index_cols(self) -> list[tuple[Any, Any]]:\n    return [(i.axis, i.cname) for i in self.index_axes]",
    "docstring": "return a list of my index cols",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:index_cols arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "ordered",
    "source_code": "@property\ndef ordered(self) -> Ordered:\n    return self.dtype.ordered",
    "docstring": "Whether the categories have an ordered relationship. See Also -------- set_ordered : Set the ordered attribute. as_ordered : Set the Categorical to be ordered. as_unordered : Set the Categorical to be unordered. Examples -------- For :class:: >>> ser = pd.Series([\"a\", \"b\", \"c\", \"a\"], dtype=\"category\") >>> ser.cat.ordered False >>> raw_cat = pd.Categorical([\"a\", \"b\", \"c\", \"a\"], ordered=True) >>> ser = pd.Series(raw_cat) >>> ser.cat.ordered True For :class:: >>> cat = pd.Categorical([\"a\", \"b\"], ordered=True) >>> cat.ordered True >>> cat = pd.Categorical([\"a\", \"b\"], ordered=False) >>> cat.ordered False For :class:: >>> ci = pd.CategoricalIndex([\"a\", \"b\"], ordered=True) >>> ci.ordered True >>> ci = pd.CategoricalIndex([\"a\", \"b\"], ordered=False) >>> ci.ordered False",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\categorical.py",
    "ast_data": "FunctionDef name:ordered arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "use_agent_store",
    "source_code": "@property\ndef use_agent_store(self) -> bool:\n    return False",
    "docstring": "Indicates that store reference returned by :py:meth: can be shared with user applications and will be available during application lifecyle. Rendezous handler impl will share store details as instance of :py:class:. Applications as a convention use / env variables to lookup the store.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\api.py",
    "ast_data": "FunctionDef name:use_agent_store arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_flat_tensor_types",
    "source_code": "def get_flat_tensor_types(element_spec):\n    return [spec.dtype for spec in get_flat_tensor_specs(element_spec)]",
    "docstring": "Returns a list s for the element tensor representation. Args: element_spec: A nested structure of objects representing to element type specification. Returns: A list s for the element tensor representation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\util\\structure.py",
    "ast_data": "FunctionDef name:get_flat_tensor_types arg:element_spec arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "bessel_y1",
    "source_code": "@tf_export('math.special.bessel_y1')\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef bessel_y1(x, name=None):\n    with ops.name_scope(name, 'bessel_y1', [x]):\n        return gen_special_math_ops.bessel_y1(x)",
    "docstring": "Computes the Bessel y1 function of element-wise. Modified Bessel function of order 1. >>> tf.math.special.bessel_y1([0.5, 1., 2., 4.]).numpy() array([-1.47147239, -0.78121282, -0.10703243, 0.39792571], dtype=float32) Args: x: A or . Must be one of the following types: , , . name: A name for the operation (optional). Returns: A or , respectively. Has the same type as . @compatibility(scipy) Equivalent to scipy.special.y1 @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\special_math_ops.py",
    "ast_data": "FunctionDef name:bessel_y1 arg:x arg:name arguments arg arg With Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X):\n    check_is_fitted(self)\n    X = validate_data(self, X, accept_sparse='csr', reset=False)\n    projection = safe_sparse_dot(X, self.random_weights_)\n    projection += self.random_offset_\n    np.cos(projection, projection)\n    projection *= (2.0 / self.n_components) ** 0.5\n    return projection",
    "docstring": "Apply the approximate feature map to X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) New data, where is the number of samples and is the number of features. Returns ------- X_new : array-like, shape (n_samples, n_components) Returns the instance itself.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\kernel_approximation.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "build_Call",
    "source_code": "def build_Call(self, o):\n    import ast\n    callee = self.build(o.func)\n    args = []\n    if o.args is not None:\n        for a in o.args:\n            if isinstance(a, ast.Starred):\n                args.append(self.build(a.value))\n            else:\n                args.append(self.build(a))\n    kwargs = {}\n    for kw in o.keywords:\n        if kw.arg is None:\n            rst = self.build(kw.value)\n            if not isinstance(rst, dict):\n                raise TypeError('Invalid argument for call.Must be a mapping object.')\n            for k, v in rst.items():\n                if k not in kwargs:\n                    kwargs[k] = v\n        else:\n            kwargs[kw.arg] = self.build(kw.value)\n    return callee(*args, **kwargs)",
    "docstring": "Emulate `` under Python 3.5+.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\reprconf.py",
    "ast_data": "FunctionDef name:build_Call arg:self arg:o arguments arg arg Assign Call Assign If Compare For If Call Call Call Call Call Assign For If Compare Assign Call If Call Raise Call For Call If Compare Assign Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_compute_fans",
    "source_code": "def _compute_fans(shape):\n    if len(shape) < 1:\n        fan_in = fan_out = 1\n    elif len(shape) == 1:\n        fan_in = fan_out = shape[0]\n    elif len(shape) == 2:\n        fan_in = shape[0]\n        fan_out = shape[1]\n    else:\n        receptive_field_size = 1\n        for dim in shape[:-2]:\n            receptive_field_size *= dim\n        fan_in = shape[-2] * receptive_field_size\n        fan_out = shape[-1] * receptive_field_size\n    return (int(fan_in), int(fan_out))",
    "docstring": "Computes the number of input and output units for a weight shape. Args: shape: Integer shape tuple or TF tensor shape. Returns: A tuple of integer scalars (fan_in, fan_out).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops.py",
    "ast_data": "FunctionDef name:_compute_fans arg:shape arguments arg If Compare Call Assign If Compare Call Assign If Compare Call Assign Assign Assign For Assign Assign Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "print_locals",
    "source_code": "def print_locals(self, *, file=None, stacklevel=0):\n    tx = self.__get_tx(stacklevel)\n    for k, v in tx.symbolic_locals.items():\n        print(f'{k} = {v.debug_repr()}', file=file)",
    "docstring": "Print all of the locals available in the current context. By default this view is very limited; you can get more information about any individual local using get_local().",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\comptime.py",
    "ast_data": "FunctionDef name:print_locals arg:self arguments arg arg arg Assign Call For Call Call Call"
  },
  {
    "library": "kornia",
    "name": "batched_forward",
    "source_code": "def batched_forward(model: Module, data: Tensor, device: Device, batch_size: int=128, **kwargs: Dict[str, Any]) -> Tensor:\n    model_dev = model.to(device)\n    B: int = len(data)\n    bs: int = batch_size\n    if B > batch_size:\n        out_list = []\n        n_batches = int(B // bs + 1)\n        for batch_idx in range(n_batches):\n            st = batch_idx * bs\n            if batch_idx == n_batches - 1:\n                if (batch_idx + 1) * bs > B:\n                    end = B\n                else:\n                    end = (batch_idx + 1) * bs\n            else:\n                end = (batch_idx + 1) * bs\n            if st >= end:\n                continue\n            out_list.append(model_dev(data[st:end].to(device), **kwargs))\n        out = concatenate(out_list, 0)\n        return out.to(data.device)\n    return model(data, **kwargs)",
    "docstring": "Run the forward in micro-batches. When the just model.forward(data) does not fit into device memory, e.g. on laptop GPU. In the end, it transfers the output to the device of the input data tensor. E.g. running HardNet on 8000x1x32x32 tensor. Args: model: Any torch model, which outputs a single tensor as an output. data: Input data of Bx(Any) shape. device: which device should we run on. batch_size: \"micro-batch\" size. **kwargs: any other arguments, which accepts model. Returns: output of the model. Example: >>> patches = torch.rand(8000, 1, 32, 32) >>> sift = kornia.feature.SIFTDescriptor(32) >>> desc_batched = batched_forward(sift, patches, torch.device('cpu'), 128) >>> desc = sift(patches) >>> assert torch.allclose(desc, desc_batched)",
    "type": "function",
    "file_path": "kornia\\kornia\\utils\\memory.py",
    "ast_data": "FunctionDef name:batched_forward arg:model arg:data arg:device arg:batch_size arguments arg arg arg arg arg Assign Call Call If Compare Assign Assign Call For Call Assign If Compare If Compare Assign Assign Assign If Compare Call Call Call Assign Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "exists",
    "source_code": "def exists(self, session_key=None):\n    return False",
    "docstring": "This method makes sense when you're talking to a shared resource, but it doesn't matter when you're storing the information in the client's cookie.",
    "type": "method",
    "file_path": "django\\django\\contrib\\sessions\\backends\\signed_cookies.py",
    "ast_data": "FunctionDef name:exists arg:self arg:session_key arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_initialize_multi_worker",
    "source_code": "def _initialize_multi_worker(self, devices):\n    device_dict = _group_device_list(devices)\n    workers = []\n    worker_devices = []\n    for job in ('chief', 'worker'):\n        for task in range(len(device_dict.get(job, []))):\n            worker = '/job:%s/task:%d' % (job, task)\n            workers.append(worker)\n            worker_devices.append((worker, device_dict[job][task]))\n    self._default_device = workers[0]\n    self._host_input_device = numpy_dataset.SingleDevice(workers[0])\n    self._devices = tuple(devices)\n    self._input_workers_devices = worker_devices\n    self._is_multi_worker_training = True\n    if len(workers) > 1:\n        if not isinstance(self._cross_device_ops, cross_device_ops_lib.ReductionToOneDevice) or self._cross_device_ops._num_between_graph_workers > 1:\n            raise ValueError('In-graph multi-worker training with `MirroredStrategy` is not supported.')\n        self._inferred_cross_device_ops = self._cross_device_ops\n    else:\n        self._inferred_cross_device_ops = cross_device_ops_lib.NcclAllReduce()\n    logging.info('Using MirroredStrategy with remote devices %r', devices)",
    "docstring": "Initializes the object for multi-worker training.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\mirrored_strategy.py",
    "ast_data": "FunctionDef name:_initialize_multi_worker arg:self arg:devices arguments arg arg Assign Call Assign Assign For For Call Call Call Assign Call Call Assign Assign Call Assign Call Assign Assign If Compare Call If BoolOp Call Compare Raise Call Assign Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "deprecated_argument_lookup",
    "source_code": "def deprecated_argument_lookup(new_name, new_value, old_name, old_value):\n    if old_value is not None:\n        if new_value is not None:\n            raise ValueError(f\"Cannot specify both '{old_name}' and '{new_name}'.\")\n        return old_value\n    return new_value",
    "docstring": "Looks up deprecated argument name and ensures both are not used. Args: new_name: new name of argument new_value: value of new argument (or None if not used) old_name: old name of argument old_value: value of old argument (or None if not used) Returns: The effective argument that should be used. Raises: ValueError: if new_value and old_value are both non-null",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\deprecation.py",
    "ast_data": "FunctionDef name:deprecated_argument_lookup arg:new_name arg:new_value arg:old_name arg:old_value arguments arg arg arg arg If Compare If Compare Raise Call Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "BboxTransformToMaxOnly",
    "source_code": "@_api.deprecated('3.9')\nclass BboxTransformToMaxOnly(BboxTransformTo):\n\n    def get_matrix(self):\n        if self._invalid:\n            xmax, ymax = self._boxout.max\n            if DEBUG and (xmax == 0 or ymax == 0):\n                raise ValueError('Transforming to a singular bounding box.')\n            self._mtx = np.array([[xmax, 0.0, 0.0], [0.0, ymax, 0.0], [0.0, 0.0, 1.0]], float)\n            self._inverted = None\n            self._invalid = 0\n        return self._mtx",
    "docstring": "is a transformation that linearly transforms points from the unit bounding box to a given with a fixed upper left of (0, 0).",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "ClassDef name:BboxTransformToMaxOnly FunctionDef name:get_matrix arg:self arguments arg If Assign If BoolOp BoolOp Compare Compare Raise Call Assign Call Assign Assign Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X):\n    Y = self.decision_function(X)\n    if self.n_classes_ == 2:\n        thresh = _threshold_for_binary_predict(self.estimators_[0])\n        return self.classes_[(Y > thresh).astype(int)]\n    return self.classes_[Y.argmax(axis=1)]",
    "docstring": "Estimate the best class label for each sample in X. This is implemented as `` which will return the label of the class with most votes by estimators predicting the outcome of a decision for each possible class pair. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Data. Returns ------- y : numpy array of shape [n_samples] Predicted multi-class targets.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\multiclass.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Assign Call If Compare Assign Call Return return:yes Call Compare Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_SegmentMinGrad",
    "source_code": "@ops.RegisterGradient('SegmentMin')\ndef _SegmentMinGrad(op: ops.Operation, grad):\n    return _SegmentMinOrMaxGrad(op, grad)",
    "docstring": "Gradient for SegmentMin.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_SegmentMinGrad arg:op arg:grad arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "freeze",
    "source_code": "@record_shapeenv_event()\ndef freeze(self) -> None:\n    self.frozen = True",
    "docstring": "Freeze this ShapeEnv to stop accumulating guards A frozen ShapeEnv will ignore any further guards generated on it and only emit a warning which may lead to accuracy problems.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:freeze arg:self arguments arg Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "get_config",
    "source_code": "def get_config(self):\n    from tensorflow.python.feature_column.serialization import serialize_feature_column\n    config = dict(zip(self._fields, self))\n    config['categorical_column'] = serialize_feature_column(self.categorical_column)\n    return config",
    "docstring": "See 'FeatureColumn` base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:get_config arg:self arguments arg Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_auto_legend_data",
    "source_code": "def _auto_legend_data(self, renderer):\n    assert self.isaxes\n    bboxes = []\n    lines = []\n    offsets = []\n    for artist in self.parent._children:\n        if isinstance(artist, Line2D):\n            lines.append(artist.get_transform().transform_path(artist.get_path()))\n        elif isinstance(artist, Rectangle):\n            bboxes.append(artist.get_bbox().transformed(artist.get_data_transform()))\n        elif isinstance(artist, Patch):\n            lines.append(artist.get_transform().transform_path(artist.get_path()))\n        elif isinstance(artist, PolyCollection):\n            lines.extend((artist.get_transform().transform_path(path) for path in artist.get_paths()))\n        elif isinstance(artist, Collection):\n            transform, transOffset, hoffsets, _ = artist._prepare_points()\n            if len(hoffsets):\n                offsets.extend(transOffset.transform(hoffsets))\n        elif isinstance(artist, Text):\n            bboxes.append(artist.get_window_extent(renderer))\n    return (bboxes, lines, offsets)",
    "docstring": "Return display coordinates for hit testing for \"best\" positioning. Returns ------- bboxes List of bounding boxes of all patches. lines List of corresponding to each line. offsets List of (x, y) offsets of all collection.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\legend.py",
    "ast_data": "FunctionDef name:_auto_legend_data arg:self arg:renderer arguments arg arg Assign Assign Assign For If Call Call Call Call Call If Call Call Call Call Call If Call Call Call Call Call If Call Call Call Call Call If Call Assign Call If Call Call Call If Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "world_transformation",
    "source_code": "def world_transformation(xmin, xmax, ymin, ymax, zmin, zmax, pb_aspect=None):\n    dx = xmax - xmin\n    dy = ymax - ymin\n    dz = zmax - zmin\n    if pb_aspect is not None:\n        ax, ay, az = pb_aspect\n        dx /= ax\n        dy /= ay\n        dz /= az\n    return np.array([[1 / dx, 0, 0, -xmin / dx], [0, 1 / dy, 0, -ymin / dy], [0, 0, 1 / dz, -zmin / dz], [0, 0, 0, 1]])",
    "docstring": "Produce a matrix that scales homogeneous coords in the specified ranges to [0, 1], or [0, pb_aspect[i]] if the plotbox aspect ratio is specified.",
    "type": "function",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\proj3d.py",
    "ast_data": "FunctionDef name:world_transformation arg:xmin arg:xmax arg:ymin arg:ymax arg:zmin arg:zmax arg:pb_aspect arguments arg arg arg arg arg arg arg Assign Assign Assign If Compare Assign Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "list_models",
    "source_code": "@classmethod\ndef list_models(cls) -> None:\n    repo_contents = cls._fetch_repo_contents('models')\n    models = [file['path'] for file in repo_contents]\n    pprint.pp(models)",
    "docstring": "List all available ONNX models in the 'models' folder of the Hugging Face repository.",
    "type": "method",
    "file_path": "kornia\\kornia\\onnx\\utils.py",
    "ast_data": "FunctionDef name:list_models arg:cls arguments arg Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "contextmanager",
    "source_code": "def contextmanager(target):\n    context_manager = _contextlib.contextmanager(target)\n    return tf_decorator.make_decorator(target, context_manager, 'contextmanager')",
    "docstring": "A tf_decorator-aware wrapper for . Usage is identical to . Args: target: A callable to be wrapped in a contextmanager. Returns: A callable that can be used inside of a statement.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_contextlib.py",
    "ast_data": "FunctionDef name:contextmanager arg:target arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "is_traceable_wrapper_subclass_type",
    "source_code": "def is_traceable_wrapper_subclass_type(t: type) -> TypeIs[type[TensorWithFlatten]]:\n    return issubclass(t, torch.Tensor) and t != torch.Tensor and hasattr(t, '__tensor_flatten__') and hasattr(t, '__tensor_unflatten__')",
    "docstring": "Same as above, but takes a type argument instead of an instance.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_python_dispatch.py",
    "ast_data": "FunctionDef name:is_traceable_wrapper_subclass_type arg:t arguments arg Return return:yes BoolOp Call Compare Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_format_snake_case_op_name",
    "source_code": "def _format_snake_case_op_name(s):\n    s = s.replace('2D', '2d').replace('3D', '3d')\n    snake_case = ''.join(['_' + i.lower() if i.isupper() else i for i in s]).lstrip('_')\n    return snake_case.replace('mat_mul', 'matmul').replace('bias_add', 'bias')",
    "docstring": "Formats the op name to snake case.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\gen_quantized_function_library.py",
    "ast_data": "FunctionDef name:_format_snake_case_op_name arg:s arguments arg Assign Call Call Assign Call Call Call Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_label_props",
    "source_code": "def set_label_props(self, props):\n    _api.check_isinstance(dict, props=props)\n    props = _expand_text_props(props)\n    for text, prop in zip(self.labels, props):\n        text.update(prop)",
    "docstring": "Set properties of the labels. .. versionadded:: 3.7 Parameters ---------- props : dict Dictionary of properties to be used for the labels.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:set_label_props arg:self arg:props arguments arg arg Call Assign Call For Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "set_params",
    "source_code": "def set_params(self, **kwargs):\n    self._set_params('steps', **kwargs)\n    return self",
    "docstring": "Set the parameters of this estimator. Valid parameter keys can be listed with `stepssteps`. Parameters of the steps may be set using its name and the parameter name separated by a '__'. Returns ------- self : object Pipeline class instance.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\pipeline.py",
    "ast_data": "FunctionDef name:set_params arg:self arguments arg arg Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "HimmelBlau",
    "source_code": "class HimmelBlau(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-5.0] * self.N, [5.0] * self.N))\n        self.global_optimum = [[3.0, 2.0]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return (x[0] ** 2 + x[1] - 11) ** 2 + (x[0] + x[1] ** 2 - 7) ** 2",
    "docstring": "HimmelBlau objective function. This class defines the HimmelBlau [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{HimmelBlau}}({x}) = (x_1^2 + x_2 - 11)^2 + (x_1 + x_2^2 - 7)^2 with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_H.py",
    "ast_data": "ClassDef name:HimmelBlau FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "device_memory_used",
    "source_code": "def device_memory_used(device: Optional[Union[Device, int]]=None) -> int:\n    if not torch.version.hip:\n        handle = _get_pynvml_handler()\n        device = _get_nvml_device_index(device)\n        handle = pynvml.nvmlDeviceGetHandleByIndex(device)\n        return pynvml.nvmlDeviceGetMemoryInfo(handle).used\n    else:\n        return _get_amdsmi_device_memory_used(device)",
    "docstring": "Return used global (device) memory in bytes as given by or . Args: device (torch.device or int, optional): selected device. Returns statistic for the current device, given by :func:, if :attr: is `` (default).",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:device_memory_used arg:device arguments arg If Assign Call Assign Call Assign Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "pan",
    "source_code": "def pan(self, *args):\n    if not self.canvas.widgetlock.available(self):\n        self.set_message('pan unavailable')\n        return\n    if self.mode == _Mode.PAN:\n        self.mode = _Mode.NONE\n        self.canvas.widgetlock.release(self)\n    else:\n        self.mode = _Mode.PAN\n        self.canvas.widgetlock(self)\n    for a in self.canvas.figure.get_axes():\n        a.set_navigate_mode(self.mode._navigate_mode)",
    "docstring": "Toggle the pan/zoom tool. Pan with left button, zoom with right.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:pan arg:self arguments arg arg If Call Call Return return:no If Compare Assign Call Assign Call For Call Call"
  },
  {
    "library": "matplotlib",
    "name": "family_name",
    "source_code": "@property\ndef family_name(self):\n    return self.get_familyname()",
    "docstring": "The font family name, e.g., 'Times'.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_afm.py",
    "ast_data": "FunctionDef name:family_name arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "freeze",
    "source_code": "def freeze(self):\n    if not self._frozen:\n        self._fill_default_values()\n        self._frozen = True",
    "docstring": "Prevents further modification to the sharding policy. Any values that have not been set when freeze is called are set to defaults. If the ShardingPolicy is already frozen, this is a NoOp.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_sharding.py",
    "ast_data": "FunctionDef name:freeze arg:self arguments arg If Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_prepare_validation_data",
    "source_code": "def _prepare_validation_data(self, validation_data, batch_size, validation_steps):\n    val_x, val_y, val_sample_weights = training_utils_v1.unpack_validation_data(validation_data)\n    return self._standardize_user_data(val_x, val_y, sample_weight=val_sample_weights, batch_size=batch_size, steps=validation_steps, steps_name='validation_steps')",
    "docstring": "Unpack and check the validation data.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py",
    "ast_data": "FunctionDef name:_prepare_validation_data arg:self arg:validation_data arg:batch_size arg:validation_steps arguments arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_check_work_float",
    "source_code": "def _check_work_float(value, dtype, int_dtype):\n    if dtype == np.float32 or dtype == np.complex64:\n        value = np.nextafter(value, np.inf, dtype=np.float32)\n    value = int(value)\n    if int_dtype.itemsize == 4:\n        if value < 0 or value > _int32_max:\n            raise ValueError('Too large work array required -- computation cannot be performed with standard 32-bit LAPACK.')\n    elif int_dtype.itemsize == 8:\n        if value < 0 or value > _int64_max:\n            raise ValueError('Too large work array required -- computation cannot be performed with standard 64-bit LAPACK.')\n    return value",
    "docstring": "Convert LAPACK-returned work array size float to integer, carefully for single-precision types.",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\lapack.py",
    "ast_data": "FunctionDef name:_check_work_float arg:value arg:dtype arg:int_dtype arguments arg arg arg If BoolOp Compare Compare Assign Call Assign Call If Compare If BoolOp Compare Compare Raise Call If Compare If BoolOp Compare Compare Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "set_head_dim_values",
    "source_code": "def set_head_dim_values(kernel_options: dict[str, Any], qk_head_dim, v_head_dim, graph_sizevars):\n    qk_head_dim_static = graph_sizevars.evaluate_static_shape(qk_head_dim)\n    kernel_options.setdefault('QK_HEAD_DIM', qk_head_dim_static)\n    kernel_options.setdefault('QK_HEAD_DIM_ROUNDED', next_power_of_two(qk_head_dim_static))\n    v_head_dim_static = graph_sizevars.evaluate_static_shape(v_head_dim)\n    kernel_options.setdefault('V_HEAD_DIM', v_head_dim_static)\n    kernel_options.setdefault('V_HEAD_DIM_ROUNDED', next_power_of_two(v_head_dim_static))\n    kernel_options.setdefault('SAFE_HEAD_DIM', is_power_of_2(qk_head_dim_static) and is_power_of_2(v_head_dim_static))",
    "docstring": "Mutates kernel options, adding head dimension calculations. Args: kernel_options: Dictionary to populate with options qk_head_dim: Query/Key head dimension v_head_dim: Value head dimension graph_sizevars: Graph size variables object with evaluate_static_shape method",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\kernel\\flex_attention.py",
    "ast_data": "FunctionDef name:set_head_dim_values arg:kernel_options arg:qk_head_dim arg:v_head_dim arg:graph_sizevars arguments arg arg arg arg Assign Call Call Call Call Assign Call Call Call Call Call BoolOp Call Call"
  },
  {
    "library": "tensorflow",
    "name": "initialize",
    "source_code": "def initialize(self, table):\n    check_table_dtypes(table, self.key_dtype, self.value_dtype)\n    with ops.name_scope(self._name, 'text_file_init', (table.resource_handle,)):\n        filename = ops.convert_to_tensor(self._filename, dtypes.string, name='asset_filepath')\n        init_op = gen_lookup_ops.initialize_table_from_text_file_v2(table.resource_handle, filename, self._key_index, self._value_index, -1 if self._vocab_size is None else self._vocab_size, self._delimiter, self._offset)\n    ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op)\n    if not context.executing_eagerly() and constant_op.is_constant(filename):\n        ops.add_to_collection(ops.GraphKeys.ASSET_FILEPATHS, filename)\n    return init_op",
    "docstring": "Initializes the table from a text file. Args: table: The table to be initialized. Returns: The operation that initializes the table. Raises: TypeError: when the keys and values data types do not match the table key and value data types.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "FunctionDef name:initialize arg:self arg:table arguments arg arg Call With Call Assign Call Assign Call Compare Call If BoolOp Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "mean",
    "source_code": "def mean(self, name='mean'):\n    with self._name_scope(name):\n        return self._mean()",
    "docstring": "Mean.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:mean arg:self arg:name arguments arg arg With Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "ClusterCombination",
    "source_code": "class ClusterCombination(combinations_lib.TestCombination):\n\n    def parameter_modifiers(self):\n        return [ClusterParameters()]",
    "docstring": "Sets up multi worker tests.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\combinations.py",
    "ast_data": "ClassDef name:ClusterCombination FunctionDef name:parameter_modifiers arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_users",
    "source_code": "def get_users(self, email):\n    email_field_name = UserModel.get_email_field_name()\n    active_users = UserModel._default_manager.filter(**{'%s__iexact' % email_field_name: email, 'is_active': True})\n    return (u for u in active_users if u.has_usable_password() and _unicode_ci_compare(email, getattr(u, email_field_name)))",
    "docstring": "Given an email, return matching user(s) who should receive a reset. This allows subclasses to more easily customize the default policies that prevent inactive users and users with unusable passwords from resetting their password.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\forms.py",
    "ast_data": "FunctionDef name:get_users arg:self arg:email arguments arg arg Assign Call Assign Call Return return:yes BoolOp Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_get_textbox",
    "source_code": "def _get_textbox(text, renderer):\n    projected_xs = []\n    projected_ys = []\n    theta = np.deg2rad(text.get_rotation())\n    tr = Affine2D().rotate(-theta)\n    _, parts, d = text._get_layout(renderer)\n    for t, wh, x, y in parts:\n        w, h = wh\n        xt1, yt1 = tr.transform((x, y))\n        yt1 -= d\n        xt2, yt2 = (xt1 + w, yt1 + h)\n        projected_xs.extend([xt1, xt2])\n        projected_ys.extend([yt1, yt2])\n    xt_box, yt_box = (min(projected_xs), min(projected_ys))\n    w_box, h_box = (max(projected_xs) - xt_box, max(projected_ys) - yt_box)\n    x_box, y_box = Affine2D().rotate(theta).transform((xt_box, yt_box))\n    return (x_box, y_box, w_box, h_box)",
    "docstring": "Calculate the bounding box of the text. The bbox position takes text rotation into account, but the width and height are those of the unrotated box (unlike ).",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:_get_textbox arg:text arg:renderer arguments arg arg Assign Assign Assign Call Call Assign Call Call Assign Call For Assign Assign Call Assign Call Call Assign Call Call Assign Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, value, params=None):\n    self.value = value\n    if params is None:\n        params = {}\n    self.params = params",
    "docstring": "Initialize an HTTP header value representation.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\httputil.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:value arg:params arguments arg arg arg Assign If Compare Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "host_memory_stats",
    "source_code": "def host_memory_stats() -> dict[str, Any]:\n    result = []\n\n    def _recurse_add_to_result(prefix, obj):\n        if isinstance(obj, dict):\n            if len(prefix) > 0:\n                prefix += '.'\n            for k, v in obj.items():\n                _recurse_add_to_result(prefix + k, v)\n        else:\n            result.append((prefix, obj))\n    stats = host_memory_stats_as_nested_dict()\n    _recurse_add_to_result('', stats)\n    result.sort()\n    return collections.OrderedDict(result)",
    "docstring": "Return a dictionary of CUDA memory allocator statistics for a given device. The return value of this function is a dictionary of statistics, each of which is a non-negative integer. Core statistics: - ``: average time per call.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\memory.py",
    "ast_data": "FunctionDef name:host_memory_stats arguments Assign FunctionDef name:_recurse_add_to_result arg:prefix arg:obj arguments arg arg If Call If Compare Call For Call Call Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "set_as_default",
    "source_code": "def set_as_default(self, step=None):\n    if context.executing_eagerly() and self._closed:\n        raise RuntimeError(f'SummaryWriter {self!r} is already closed')\n    super().set_as_default(step)",
    "docstring": "See .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "FunctionDef name:set_as_default arg:self arg:step arguments arg arg If BoolOp Call Raise Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_use_temp_cache",
    "source_code": "def _use_temp_cache(self):\n    if self._use_tensor_buffer():\n        return False\n    if self._use_tensor_values_cache():\n        return self._parameters.use_temp_cache_var\n    else:\n        return False",
    "docstring": "Returns true if the intermediate values should be stacked instead of being stored in a tf.Variable. Returns: A boolean, denoting whether to use a temporary cache or not.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:_use_temp_cache arg:self arguments arg If Call Return return:yes If Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "create_edges",
    "source_code": "def create_edges(self):\n    raise NotImplementedError",
    "docstring": "Calls add_outgoing_edge for all edges known to this Convertible. This is used to build the graph dependencies, so that conversion of variables to constants can be properly propagated through the graph. Usually this method will call add_outgoing_edge() to all the Convertible inputs.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "FunctionDef name:create_edges arg:self arguments arg Raise"
  },
  {
    "library": "pandas",
    "name": "create_description",
    "source_code": "def create_description(self, complib, complevel: int | None, fletcher32: bool, expectedrows: int | None) -> dict[str, Any]:\n    if expectedrows is None:\n        expectedrows = max(self.nrows_expected, 10000)\n    d = {'name': 'table', 'expectedrows': expectedrows}\n    d['description'] = {a.cname: a.typ for a in self.axes}\n    if complib:\n        if complevel is None:\n            complevel = self._complevel or 9\n        filters = _tables().Filters(complevel=complevel, complib=complib, fletcher32=fletcher32 or self._fletcher32)\n        d['filters'] = filters\n    elif self._filters is not None:\n        d['filters'] = self._filters\n    return d",
    "docstring": "create the description of the table from the axes & values",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:create_description arg:self arg:complib arg:complevel arg:fletcher32 arg:expectedrows arguments arg arg arg arg arg If Compare Assign Call Assign Assign If If Compare Assign BoolOp Assign Call Call BoolOp Assign If Compare Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "should_simplify",
    "source_code": "@property\ndef should_simplify(self):\n    return self._should_simplify",
    "docstring": "if the vertices array should be simplified.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\path.py",
    "ast_data": "FunctionDef name:should_simplify arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "find_homography_dlt_iterated",
    "source_code": "def find_homography_dlt_iterated(points1: Tensor, points2: Tensor, weights: Tensor, soft_inl_th: float=3.0, n_iter: int=5) -> Tensor:\n    H: Tensor = find_homography_dlt(points1, points2, weights)\n    for _ in range(n_iter - 1):\n        errors: Tensor = symmetric_transfer_error(points1, points2, H, False)\n        weights_new: Tensor = torch.exp(-errors / (2.0 * soft_inl_th ** 2))\n        H = find_homography_dlt(points1, points2, weights_new)\n    return H",
    "docstring": "Compute the homography matrix using the iteratively-reweighted least squares (IRWLS). The linear system is solved by using the Reweighted Least Squares Solution for the 4 Points algorithm. Args: points1: A set of points in the first image with a tensor shape :math:. points2: A set of points in the second image with a tensor shape :math:. weights: Tensor containing the weights per point correspondence with a shape of :math:. Used for the first iteration of the IRWLS. soft_inl_th: Soft inlier threshold used for weight calculation. n_iter: number of iterations. Returns: the computed homography matrix with shape :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\homography.py",
    "ast_data": "FunctionDef name:find_homography_dlt_iterated arg:points1 arg:points2 arg:weights arg:soft_inl_th arg:n_iter arguments arg arg arg arg arg Call For Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_check_inputs",
    "source_code": "def _check_inputs(self, X, in_fit, accept_sparse_negative=False, copy=False):\n    X = validate_data(self, X, reset=in_fit, accept_sparse='csc', copy=copy, dtype=FLOAT_DTYPES, force_writeable=True if not in_fit else None, ensure_all_finite='allow-nan')\n    with np.errstate(invalid='ignore'):\n        if not accept_sparse_negative and (not self.ignore_implicit_zeros) and (sparse.issparse(X) and np.any(X.data < 0)):\n            raise ValueError('QuantileTransformer only accepts non-negative sparse matrices.')\n    return X",
    "docstring": "Check inputs before fit and transform.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py",
    "ast_data": "FunctionDef name:_check_inputs arg:self arg:X arg:in_fit arg:accept_sparse_negative arg:copy arguments arg arg arg arg arg Assign Call With Call If BoolOp BoolOp Call Call Compare Raise Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_derivative_inplace",
    "source_code": "def _derivative_inplace(self, nu, axis):\n    if nu < 0:\n        return self._antiderivative_inplace(-nu, axis)\n    ndim = len(self.x)\n    axis = axis % ndim\n    if nu == 0:\n        return\n    else:\n        sl = [slice(None)] * ndim\n        sl[axis] = slice(None, -nu, None)\n        c2 = self.c[tuple(sl)]\n    if c2.shape[axis] == 0:\n        shp = list(c2.shape)\n        shp[axis] = 1\n        c2 = np.zeros(shp, dtype=c2.dtype)\n    factor = spec.poch(np.arange(c2.shape[axis], 0, -1), nu)\n    sl = [None] * c2.ndim\n    sl[axis] = slice(None)\n    c2 *= factor[tuple(sl)]\n    self.c = c2",
    "docstring": "Compute 1-D derivative along a selected dimension in-place May result to non-contiguous c array.",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_interpolate.py",
    "ast_data": "FunctionDef name:_derivative_inplace arg:self arg:nu arg:axis arguments arg arg arg If Compare Return return:yes Call Assign Call Assign If Compare Return return:no Assign Call Assign Call Assign Call If Compare Assign Call Assign Assign Call Assign Call Call Assign Assign Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "get_current_worker_index",
    "source_code": "@tf_export('distribute.coordinator.experimental_get_current_worker_index', v1=[])\ndef get_current_worker_index():\n    msg = 'Cannot retrieve the worker index. `get_worker_idx_and_num_workers` should be called from within a tf.function being executed on a worker. This method should only be called from either a dataset_fn that is passed into `ClusterCoordinator.create_per_worker_dataset`, or a tf.function that is passed into `ClusterCoordinator.schedule`.'\n    if not ops.inside_function():\n        raise RuntimeError(msg)\n\n    def call_time_worker_index():\n        dispatch_context = get_current_dispatch_context()\n        if not dispatch_context:\n            raise RuntimeError(msg)\n        return dispatch_context.worker_index\n    worker_index = ops.get_default_graph().capture_call_time_value(call_time_worker_index, tensor.TensorSpec([], dtype=dtypes.int64))\n    worker_index.op._set_attr('_user_specified_name', attr_value_pb2.AttrValue(s=compat.as_bytes('worker_index')))\n    return worker_index",
    "docstring": "Returns the current worker index, when called within a worker closure. Some parameter server training workloads may require the worker to know its index, for example for data sharding for reduced-variance training. This method may be used within a that is executed on a worker. That is, either a that runs via , or any other function scheduled via . Example (sharding data by worker): Raises: RuntimeError: if called from outside a or outside of a remote closure execution context (that is, on a non-worker machine).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\coordinator_context.py",
    "ast_data": "FunctionDef name:get_current_worker_index arguments Assign If Call Raise Call FunctionDef name:call_time_worker_index arguments Assign Call If Raise Call Return return:yes Assign Call Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "at",
    "source_code": "@tf_export('data.experimental.at', v1=[])\ndef at(dataset, index):\n    return structure.from_tensor_list(dataset.element_spec, gen_experimental_dataset_ops.get_element_at_index(dataset._variant_tensor, index, output_types=structure.get_flat_tensor_types(dataset.element_spec), output_shapes=structure.get_flat_tensor_shapes(dataset.element_spec)))",
    "docstring": "Returns the element at a specific index in a datasest. Currently, random access is supported for the following tf.data operations: - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - (in-memory only) Users can use the cache operation to enable random access for any dataset, even one comprised of transformations which are not on this list. E.g., to get the third element of a TFDS dataset: Args: dataset: A to determine whether it supports random access. index: The index at which to fetch the element. Returns: A (nested) structure of values matching . Raises: UnimplementedError: If random access is not yet supported for a dataset.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\random_access.py",
    "ast_data": "FunctionDef name:at arg:dataset arg:index arguments arg arg Return return:yes Call Call Call Call Call"
  },
  {
    "library": "numpy",
    "name": "count_nonzero",
    "source_code": "@array_function_dispatch(_count_nonzero_dispatcher)\ndef count_nonzero(a, axis=None, *, keepdims=False):\n    if axis is None and (not keepdims):\n        return multiarray.count_nonzero(a)\n    a = asanyarray(a)\n    if np.issubdtype(a.dtype, np.character):\n        a_bool = a != a.dtype.type()\n    else:\n        a_bool = a.astype(np.bool, copy=False)\n    return a_bool.sum(axis=axis, dtype=np.intp, keepdims=keepdims)",
    "docstring": "Counts the number of non-zero values in the array ``. keepdims : bool, optional If this is set to True, the axes that are counted are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input array. Returns ------- count : int or array of int Number of non-zero values in the array along a given axis. Otherwise, the total number of non-zero values in the array is returned. See Also -------- nonzero : Return the coordinates of all the non-zero values. Examples -------- >>> import numpy as np >>> np.count_nonzero(np.eye(4)) 4 >>> a = np.array([[0, 1, 7, 0], ... [3, 0, 2, 19]]) >>> np.count_nonzero(a) 5 >>> np.count_nonzero(a, axis=0) array([1, 1, 2, 1]) >>> np.count_nonzero(a, axis=1) array([2, 3]) >>> np.count_nonzero(a, axis=1, keepdims=True) array([[2], [3]])",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\numeric.py",
    "ast_data": "FunctionDef name:count_nonzero arg:a arg:axis arguments arg arg arg If BoolOp Compare Return return:yes Call Assign Call If Call Assign Compare Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "write",
    "source_code": "@classmethod\ndef write(cls, source_code: str, dst_file_ext: str) -> tuple[str, str]:\n    if config.cuda.cutlass_hash_with_compile_cmd:\n        cuda_command = repr(cuda_compile_command(['dummy_input'], 'dummy_output', dst_file_ext))\n        extra = cuda_command\n    else:\n        extra = repr([_cuda_compiler(), _nvcc_compiler_options(), _nvcc_host_compiler_options(), cutlass_key()] + [dst_file_ext] if dst_file_ext == 'o' else [])\n    key, input_path = write(source_code, cls._SOURCE_CODE_SUFFIX, extra=extra)\n    return (key, input_path)",
    "docstring": "Writes source code into a file with dst_file_ext as the file extension. Returns the hash key of source code, and the path to the file.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codecache.py",
    "ast_data": "FunctionDef name:write arg:cls arg:source_code arg:dst_file_ext arguments arg arg arg If Assign Call Call Assign Assign Call Compare Call Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "stateless_dropout",
    "source_code": "@dispatch.dispatch_for_api(nn_ops.stateless_dropout)\ndef stateless_dropout(x: ragged_tensor.Ragged, rate, seed, rng_alg=None, noise_shape=None, name=None):\n    if noise_shape is not None:\n        raise ValueError('noise_shape is not supported yet for RaggedTensor x')\n    with ops.name_scope(name, 'RaggedNNStatelessDropout', [x, rate]):\n        x = ragged_tensor.convert_to_tensor_or_ragged_tensor(x, name='x')\n        return x.with_flat_values(nn_ops.stateless_dropout(x.flat_values, rate=rate, seed=seed, rng_alg=rng_alg))",
    "docstring": "Ragged dispatch target for tf.nn.experimental.stateless_dropout.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_math_ops.py",
    "ast_data": "FunctionDef name:stateless_dropout arg:x arg:rate arg:seed arg:rng_alg arg:noise_shape arg:name arguments arg arg arg arg arg arg If Compare Raise Call With Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "numpy",
    "name": "_get_num_chars",
    "source_code": "def _get_num_chars(a):\n    if issubclass(a.dtype.type, np.str_):\n        return a.itemsize // 4\n    return a.itemsize",
    "docstring": "Helper function that returns the number of characters per field in a string or unicode array. This is to abstract out the fact that for a unicode array this is itemsize / 4.",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\strings.py",
    "ast_data": "FunctionDef name:_get_num_chars arg:a arguments arg If Call Return return:yes Return return:yes"
  },
  {
    "library": "pandas",
    "name": "format",
    "source_code": "def format(self):\n    return [self.filter]",
    "docstring": "return the actual filter format",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\computation\\pytables.py",
    "ast_data": "FunctionDef name:format arg:self arguments arg Return return:yes"
  },
  {
    "library": "authlib",
    "name": "url_decode",
    "source_code": "def url_decode(query):\n    if query and (not set(query) <= urlencoded):\n        error = \"Error trying to decode a non urlencoded string. Found invalid characters: %s in the string: '%s'. Please ensure the request/response body is x-www-form-urlencoded.\"\n        raise ValueError(error % (set(query) - urlencoded, query))\n    if INVALID_HEX_PATTERN.search(query):\n        raise ValueError('Invalid hex encoding in query string.')\n    params = urlparse.parse_qsl(query, keep_blank_values=True)\n    decoded = []\n    for k, v in params:\n        decoded.append((to_unicode(k), to_unicode(v)))\n    return decoded",
    "docstring": "Decode a query string in x-www-form-urlencoded format into a sequence of two-element tuples. Unlike urlparse.parse_qsl(..., strict_parsing=True) urldecode will enforce correct formatting of the query string by validation. If validation fails a ValueError will be raised. urllib.parse_qsl will only raise errors if any of name-value pairs omits the equals sign.",
    "type": "function",
    "file_path": "authlib\\authlib\\common\\urls.py",
    "ast_data": "FunctionDef name:url_decode arg:query arguments arg If BoolOp Compare Call Assign Raise Call Call If Call Raise Call Assign Call Assign For Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "join_device",
    "source_code": "@property\n@abstractmethod\ndef join_device(self) -> torch.device:\n    ...",
    "docstring": "Return the device from which to perform collective communications needed by the join context manager.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\join.py",
    "ast_data": "FunctionDef name:join_device arg:self arguments arg"
  },
  {
    "library": "numpy",
    "name": "simple_version_match",
    "source_code": "def simple_version_match(pat='[-.\\\\d]+', ignore='', start=''):\n\n    def matcher(self, version_string):\n        version_string = version_string.replace('\\n', ' ')\n        pos = 0\n        if start:\n            m = re.match(start, version_string)\n            if not m:\n                return None\n            pos = m.end()\n        while True:\n            m = re.search(pat, version_string[pos:])\n            if not m:\n                return None\n            if ignore and re.match(ignore, m.group(0)):\n                pos = m.end()\n                continue\n            break\n        return m.group(0)\n    return matcher",
    "docstring": "Simple matching of version numbers, for use in CCompiler and FCompiler. Parameters ---------- pat : str, optional A regular expression matching version numbers. Default is `matchermatcher` takes a single parameter, a version string.",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\ccompiler.py",
    "ast_data": "FunctionDef name:simple_version_match arg:pat arg:ignore arg:start arguments arg arg arg FunctionDef name:matcher arg:self arg:version_string arguments arg arg Assign Call Assign If Assign Call If Return return:no Assign Call While Assign Call If Return return:no If BoolOp Call Call Assign Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "default_eval_fn",
    "source_code": "def default_eval_fn(model, calib_data):\n    for data, _target in calib_data:\n        model(data)",
    "docstring": "Default evaluation function takes a torch.utils.data.Dataset or a list of input Tensors and run the model on the dataset",
    "type": "function",
    "file_path": "pytorch\\torch\\quantization\\__init__.py",
    "ast_data": "FunctionDef name:default_eval_fn arg:model arg:calib_data arguments arg arg For Call"
  },
  {
    "library": "seaborn",
    "name": "silverman_factor",
    "source_code": "def silverman_factor(self):\n    return power(self.neff * (self.d + 2.0) / 4.0, -1.0 / (self.d + 4))",
    "docstring": "Compute the Silverman factor. Returns ------- s : float The silverman factor.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\external\\kde.py",
    "ast_data": "FunctionDef name:silverman_factor arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "CallbackKeywordArgumentsContract",
    "source_code": "class CallbackKeywordArgumentsContract(Contract):\n    name = 'cb_kwargs'\n\n    def adjust_request_args(self, args: dict[str, Any]) -> dict[str, Any]:\n        args['cb_kwargs'] = json.loads(' '.join(self.args))\n        return args",
    "docstring": "Contract to set the keyword arguments for the request. The value should be a JSON-encoded dictionary, e.g.: @cb_kwargs {\"arg1\": \"some value\"}",
    "type": "class",
    "file_path": "scrapy\\scrapy\\contracts\\default.py",
    "ast_data": "ClassDef name:CallbackKeywordArgumentsContract Assign FunctionDef name:adjust_request_args arg:self arg:args arguments arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "is_proxy",
    "source_code": "def is_proxy(self):\n    return self.__variable.is_proxy()",
    "docstring": "Returns True if as_proxy() would succeed.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\comptime.py",
    "ast_data": "FunctionDef name:is_proxy arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "accesskey",
    "source_code": "def accesskey(context: Any, key: str) -> str:\n    if '_accesskeys' not in context:\n        context.vars['_accesskeys'] = {}\n    if key and key not in context.vars['_accesskeys']:\n        context.vars['_accesskeys'][key] = 1\n        return 'accesskey=\"%s\"' % key\n    return ''",
    "docstring": "Helper to output each access key only once.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\jinja2glue.py",
    "ast_data": "FunctionDef name:accesskey arg:context arg:key arguments arg arg If Compare Assign If BoolOp Compare Assign Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "allow_nan_stats",
    "source_code": "@property\ndef allow_nan_stats(self):\n    return self._allow_nan_stats",
    "docstring": "Python describing behavior when a stat is undefined. Stats return +/- infinity when it makes sense. E.g., the variance of a Cauchy distribution is infinity. However, sometimes the statistic is undefined, e.g., if a distribution's pdf does not achieve a maximum within the support of the distribution, the mode is undefined. If the mean is undefined, then by definition the variance is undefined. E.g. the mean for Student's T for df = 1 is undefined (no clear way to say it is either + or - infinity), so the variance = E[(X - mean)**2] is also undefined. Returns: allow_nan_stats: Python .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:allow_nan_stats arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "inverse_transform",
    "source_code": "def inverse_transform(self, X):\n    X = check_array(X)\n    return np.dot(X, self.components_)",
    "docstring": "Transform X back to its original space. Returns an array X_original whose transform would be X. Parameters ---------- X : array-like of shape (n_samples, n_components) New data. Returns ------- X_original : ndarray of shape (n_samples, n_features) Note that this is always a dense array.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_truncated_svd.py",
    "ast_data": "FunctionDef name:inverse_transform arg:self arg:X arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "_solarize",
    "source_code": "def _solarize(input: Tensor, thresholds: Union[float, Tensor]=0.5) -> Tensor:\n    if not isinstance(input, Tensor):\n        raise TypeError(f'Input type is not a Tensor. Got {type(input)}')\n    if not isinstance(thresholds, (float, Tensor)):\n        raise TypeError(f'The factor should be either a float or Tensor. Got {type(thresholds)}')\n    if isinstance(thresholds, Tensor) and len(thresholds.shape) != 0:\n        if not (input.size(0) == len(thresholds) and len(thresholds.shape) == 1):\n            raise AssertionError(f'thresholds must be a 1-d vector of shape ({input.size(0)},). Got {thresholds}')\n        thresholds = thresholds.to(input.device).to(input.dtype)\n        thresholds = torch.stack([x.expand(*input.shape[-3:]) for x in thresholds])\n    return torch.where(input < thresholds, input, 1.0 - input)",
    "docstring": "For each pixel in the image, select the pixel if the value is less than the threshold. Otherwise, subtract 1.0 from the pixel. Args: input: image or batched images to solarize. thresholds: solarize thresholds. If int or one element tensor, input will be solarized across the whole batch. If 1-d tensor, input will be solarized element-wise, len(thresholds) == len(input). Returns: Solarized images.",
    "type": "function",
    "file_path": "kornia\\kornia\\enhance\\adjust.py",
    "ast_data": "FunctionDef name:_solarize arg:input arg:thresholds arguments arg arg If Call Raise Call Call If Call Raise Call Call If BoolOp Call Compare Call If BoolOp Compare Call Call Compare Call Raise Call Call Assign Call Call Assign Call Call Return return:yes Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "with_min_occurrence",
    "source_code": "def with_min_occurrence(self, min_occurrence):\n    self._options['min_occurrence'] = min_occurrence\n    return self",
    "docstring": "Only show profiler nodes including no less than 'min_occurrence' graph nodes. A \"node\" means a profiler output node, which can be a python line (code view), an operation type (op view), or a graph node (graph/scope view). A python line includes all graph nodes created by that line, while an operation type includes all graph nodes of that type. Args: min_occurrence: Only show nodes including no less than this. Returns: self",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\option_builder.py",
    "ast_data": "FunctionDef name:with_min_occurrence arg:self arg:min_occurrence arguments arg arg Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "__dlpack__",
    "source_code": "def __dlpack__(self) -> Any:\n    raise NotImplementedError",
    "docstring": "Represent this structure as DLPack interface.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\interchange\\buffer.py",
    "ast_data": "FunctionDef name:__dlpack__ arg:self arguments arg Raise"
  },
  {
    "library": "matplotlib",
    "name": "format_shortcut",
    "source_code": "@staticmethod\ndef format_shortcut(key_sequence):\n    return key_sequence if len(key_sequence) == 1 else re.sub('\\\\+[A-Z]', '+Shift\\\\g<0>', key_sequence).title()",
    "docstring": "Convert a shortcut string from the notation used in rc config to the standard notation for displaying shortcuts, e.g. 'ctrl+a' -> 'Ctrl+A'.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py",
    "ast_data": "FunctionDef name:format_shortcut arg:key_sequence arguments arg Return return:yes Compare Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "from_row_splits",
    "source_code": "@classmethod\n@dispatch.add_dispatch_support\ndef from_row_splits(cls, values, row_splits, name=None, validate=True):\n    if not isinstance(validate, bool):\n        raise TypeError(f'Argument `validate` must have type bool. Received {validate}.')\n    with ops.name_scope(name, 'RaggedFromRowSplits', [values, row_splits]):\n        row_partition = RowPartition.from_row_splits(row_splits=row_splits, validate=validate, dtype_hint=_get_optional_partition_dtype(values))\n        return cls._from_row_partition(values, row_partition, validate=validate)",
    "docstring": "Creates a with rows partitioned by . The returned corresponds with the python list defined by: Args: values: A potentially ragged tensor with shape . row_splits: A 1-D integer tensor with shape . Must not be empty, and must be sorted in ascending order. must be zero and must be . name: A name prefix for the RaggedTensor (optional). validate: If true, then use assertions to check that the arguments form a valid . Note: these assertions incur a runtime cost, since they must be checked for each tensor value. Returns: A . . . Raises: ValueError: If is an empty list. #### Example: >>> print(tf.RaggedTensor.from_row_splits( ... values=[3, 1, 4, 1, 5, 9, 2, 6], ... row_splits=[0, 4, 4, 7, 8, 8]))",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:from_row_splits arg:cls arg:values arg:row_splits arg:name arg:validate arguments arg arg arg arg arg If Call Raise Call With Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "is_monotonic_increasing",
    "source_code": "@numba.jit(numba.boolean(numba.int64[:]), nopython=True, nogil=True, parallel=False)\ndef is_monotonic_increasing(bounds: np.ndarray) -> bool:\n    n = len(bounds)\n    if n < 2:\n        return True\n    prev = bounds[0]\n    for i in range(1, n):\n        cur = bounds[i]\n        if cur < prev:\n            return False\n        prev = cur\n    return True",
    "docstring": "Check if int64 values are monotonically increasing.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\_numba\\kernels\\shared.py",
    "ast_data": "FunctionDef name:is_monotonic_increasing arg:bounds arguments arg Assign Call If Compare Return return:yes Assign For Call Assign If Compare Return return:yes Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_clean_out_of_range_indices",
    "source_code": "def _clean_out_of_range_indices(labels, num_classes):\n\n    def _labels_is_sparse():\n        return isinstance(labels, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue))\n\n    def _clean_out_of_range(values):\n        return array_ops.where_v2(math_ops.greater_equal(values, num_classes), -1 * array_ops.ones_like(values), values)\n\n    def _clean_labels_out_of_range():\n        if _labels_is_sparse():\n            return type(labels)(indices=labels.indices, values=_clean_out_of_range(labels.values), dense_shape=labels.dense_shape)\n        else:\n            return _clean_out_of_range(labels)\n    max_labels = math_ops.reduce_max(labels.values if _labels_is_sparse() else labels)\n    return cond.cond(math_ops.greater_equal(max_labels, num_classes), _clean_labels_out_of_range, lambda: labels)",
    "docstring": "Replaces large out-of-range labels by small out-of-range labels. Replaces any value in that is greater or equal to by -1. Do this conditionally for efficiency in case there are no such values. Args: labels: or . num_classes: scalar . Returns: An or as with indices greater or equal to num_classes replaced by -1.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\metrics_impl.py",
    "ast_data": "FunctionDef name:_clean_out_of_range_indices arg:labels arg:num_classes arguments arg arg FunctionDef name:_labels_is_sparse arguments Return return:yes Call FunctionDef name:_clean_out_of_range arg:values arguments arg Return return:yes Call Call Call FunctionDef name:_clean_labels_out_of_range arguments If Call Return return:yes Call Call Call Return return:yes Call Assign Call Call Return return:yes Call Call arguments"
  },
  {
    "library": "tensorflow",
    "name": "reopen",
    "source_code": "def reopen(self):\n    self.event_writer.reopen()\n    self._closed = False",
    "docstring": "Reopens the EventFileWriter. Can be called after to add more events in the same directory. The events will go into a new events file. Does nothing if the EventFileWriter was not closed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\writer.py",
    "ast_data": "FunctionDef name:reopen arg:self arguments arg Call Assign"
  },
  {
    "library": "pandas",
    "name": "_cross_merge",
    "source_code": "def _cross_merge(left: DataFrame, right: DataFrame, on: IndexLabel | AnyArrayLike | None=None, left_on: IndexLabel | AnyArrayLike | None=None, right_on: IndexLabel | AnyArrayLike | None=None, left_index: bool=False, right_index: bool=False, sort: bool=False, suffixes: Suffixes=('_x', '_y'), indicator: str | bool=False, validate: str | None=None) -> DataFrame:\n    if left_index or right_index or right_on is not None or (left_on is not None) or (on is not None):\n        raise MergeError('Can not pass on, right_on, left_on or set right_index=True or left_index=True')\n    cross_col = f'_cross_{uuid.uuid4()}'\n    left = left.assign(**{cross_col: 1})\n    right = right.assign(**{cross_col: 1})\n    left_on = right_on = [cross_col]\n    res = merge(left, right, how='inner', on=on, left_on=left_on, right_on=right_on, left_index=left_index, right_index=right_index, sort=sort, suffixes=suffixes, indicator=indicator, validate=validate)\n    del res[cross_col]\n    return res",
    "docstring": "See merge.__doc__ with how='cross'",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\reshape\\merge.py",
    "ast_data": "FunctionDef name:_cross_merge arg:left arg:right arg:on arg:left_on arg:right_on arg:left_index arg:right_index arg:sort arg:suffixes arg:indicator arg:validate arguments arg arg arg arg arg arg arg arg arg arg arg If BoolOp Compare Compare Compare Raise Call Assign Call Assign Call Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "named_params_with_sharded_tensor",
    "source_code": "def named_params_with_sharded_tensor(module: nn.Module, prefix: str='', recurse: bool=True) -> Iterator[tuple[str, Union[nn.Parameter, ShardedTensor]]]:\n    modules = module.named_modules(prefix=prefix) if recurse else [(prefix, module)]\n    memo = set()\n    for mod_prefix, mod in modules:\n        for name, val in vars(mod).items():\n            if isinstance(val, ShardedTensor) and val not in memo:\n                memo.add(val)\n                name = mod_prefix + ('.' if mod_prefix else '') + name\n                yield (name, val)\n    for name, val in module.named_parameters():\n        yield (name, val)",
    "docstring": "Returns an iterator over module parameters (together with the ShardedTensor parameters), yielding both the name of the parameter as well as the parameter itself. This is typically passed to a :class:torch.distributed._shard.sharded_optim.ShardedOptimizer Args: prefix (str): prefix to prepend to all parameter names. recurse (bool): if True, then yields parameters of this module and all submodules. Otherwise, yields only parameters that are direct members of this module. Yields: (str, Union[Tensor, ShardedTensor]): Tuple containing the name and parameter (or ShardedTensor parameter) Example:: >>> # xdoctest: +SKIP >>> model = torch.nn.Linear(*linear_size) >>> shard_parameter(model, \"weight\", spec) >>> for name, param in named_params_with_sharded_tensor(model): >>> if name in ['weight']: >>> print(param.size())",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_optim\\__init__.py",
    "ast_data": "FunctionDef name:named_params_with_sharded_tensor arg:module arg:prefix arg:recurse arguments arg arg arg Assign Call Assign Call For For Call Call If BoolOp Call Compare Call Assign For Call"
  },
  {
    "library": "pytorch",
    "name": "is_pinned",
    "source_code": "def is_pinned(self, device: Union[str, torch.device]='cuda'):\n    return torch.tensor([], dtype=torch.uint8, device=self.device).set_(cast(Storage, self)).is_pinned(device)",
    "docstring": "Determine whether the CPU storage is already pinned on device. Args: device (str or torch.device): The device to pin memory on (default: ``). This argument is discouraged and subject to deprecated. Returns: A boolean variable.",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:is_pinned arg:self arg:device arguments arg arg Return return:yes Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "op",
    "source_code": "def op(self, opname: str, *raw_args: torch.Tensor | _C.Value, outputs: int=1, **kwargs):\n    return _add_op(self, opname, *raw_args, outputs=outputs, **kwargs)",
    "docstring": "Creates an ONNX operator \"opname\", taking \"raw_args\" as inputs and \"kwargs\" as attributes. The set of operators and the inputs/attributes they take is documented at Args: opname: The ONNX operator name, e.g., or , or an operator qualified with a namespace, e.g., . raw_args: The inputs to the operator; usually provided as arguments to the definition. outputs: The number of outputs this operator returns. By default an operator is assumed to return a single output. If is greater than one, this functions returns a tuple of output , representing each output of the ONNX operator in order. kwargs: The attributes of the ONNX operator, whose keys are named according to the following convention: indicates the attribute with type . The valid type specifiers are (float), (int), (string) or (Tensor). An attribute specified with type float accepts either a single float, or a list of floats (e.g., you would say for a attribute that takes a list of integers). Returns: The value representing the single output of this operator (see the keyword argument for multi-return nodes).",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\jit_utils.py",
    "ast_data": "FunctionDef name:op arg:self arg:opname arguments arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_extract_tensors_and_variables",
    "source_code": "def _extract_tensors_and_variables(tensor):\n    for obj in nest.flatten(tensor):\n        if _pywrap_utils.IsTensor(obj) or _pywrap_utils.IsVariable(obj):\n            yield obj\n        elif isinstance(obj, composite_tensor.CompositeTensor):\n            components = type_spec.type_spec_from_value(obj)._to_components(obj)\n            yield from _extract_tensors_and_variables(components)\n        else:\n            raise ValueError(f'Passed in object {obj} of type {type(obj).__name__!r}, not tf.Tensor or tf.Variable or ExtensionType.')",
    "docstring": "Extracts tensors and variables from the input object.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\backprop.py",
    "ast_data": "FunctionDef name:_extract_tensors_and_variables arg:tensor arguments arg For Call If BoolOp Call Call If Call Assign Call Call Call Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "GlobalAveragePooling1D",
    "source_code": "class GlobalAveragePooling1D(GlobalPooling1D):\n\n    def __init__(self, data_format='channels_last', **kwargs):\n        super(GlobalAveragePooling1D, self).__init__(data_format=data_format, **kwargs)\n        self.supports_masking = True\n\n    def call(self, inputs, mask=None):\n        steps_axis = 1 if self.data_format == 'channels_last' else 2\n        if mask is not None:\n            mask = math_ops.cast(mask, inputs[0].dtype)\n            mask = array_ops.expand_dims(mask, 2 if self.data_format == 'channels_last' else 1)\n            inputs *= mask\n            return backend.sum(inputs, axis=steps_axis, keepdims=self.keepdims) / math_ops.reduce_sum(mask, axis=steps_axis, keepdims=self.keepdims)\n        else:\n            return backend.mean(inputs, axis=steps_axis, keepdims=self.keepdims)\n\n    def compute_mask(self, inputs, mask=None):\n        return None",
    "docstring": "Global average pooling operation for temporal data. Examples: >>> input_shape = (2, 3, 4) >>> x = tf.random.normal(input_shape) >>> y = tf.keras.layers.GlobalAveragePooling1D()(x) >>> print(y.shape) (2, 4) Args: data_format: A string, one of (default) or . The ordering of the dimensions in the inputs. corresponds to inputs with shape while corresponds to inputs with shape . keepdims: A boolean, whether to keep the temporal dimension or not. If is (default), the rank of the tensor is reduced for spatial dimensions. If is , the temporal dimension are retained with length 1. The behavior is the same as for or . Call arguments: inputs: A 3D tensor. mask: Binary tensor of shape indicating whether a given step should be masked (excluded from the average). Input shape: - If : 3D tensor with shape: - If : 3D tensor with shape: Output shape: - If =False: 2D tensor with shape . - If =True: - If : 3D tensor with shape - If : 3D tensor with shape",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\pooling.py",
    "ast_data": "ClassDef name:GlobalAveragePooling1D FunctionDef name:__init__ arg:self arg:data_format arguments arg arg arg Call Call Assign FunctionDef name:call arg:self arg:inputs arg:mask arguments arg arg arg Assign Compare If Compare Assign Call Assign Call Compare Return return:yes Call Call Return return:yes Call FunctionDef name:compute_mask arg:self arg:inputs arg:mask arguments arg arg arg Return return:no"
  },
  {
    "library": "django",
    "name": "clean",
    "source_code": "def clean(self, value):\n    value = self.to_python(value)\n    self.validate(value)\n    self.run_validators(value)\n    return value",
    "docstring": "Validate the given value and return its \"cleaned\" value as an appropriate Python object. Raise ValidationError for any errors.",
    "type": "method",
    "file_path": "django\\django\\forms\\fields.py",
    "ast_data": "FunctionDef name:clean arg:self arg:value arguments arg arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "recommended_max_memory",
    "source_code": "def recommended_max_memory() -> int:\n    return torch._C._mps_recommendedMaxMemory()",
    "docstring": "Returns recommended max Working set size for GPU memory in bytes. .. note:: Recommended max working set size for Metal. returned from device.recommendedMaxWorkingSetSize.",
    "type": "function",
    "file_path": "pytorch\\torch\\mps\\__init__.py",
    "ast_data": "FunctionDef name:recommended_max_memory arguments Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "RgbToYuv420",
    "source_code": "class RgbToYuv420(Module):\n    ONNX_EXPORTABLE = False\n\n    def forward(self, yuvinput: Tensor) -> tuple[Tensor, Tensor]:\n        return rgb_to_yuv420(yuvinput)",
    "docstring": "Convert an image from RGB to YUV420. Width and Height evenly divisible by 2. The image data is assumed to be in the range of :math:. YUV formula follows M/PAL values (see _, Table 2, items 2.5 and 2.6). Returns: YUV420 version of the image. Shape: - image: :math: - output: :math: and :math: Examples: >>> yuvinput = torch.rand(2, 3, 4, 6) >>> yuv = RgbToYuv420() >>> output = yuv(yuvinput) # # (2x1x4x6, 2x1x2x3) Reference:: [1]",
    "type": "class",
    "file_path": "kornia\\kornia\\color\\yuv.py",
    "ast_data": "ClassDef name:RgbToYuv420 Assign FunctionDef name:forward arg:self arg:yuvinput arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_experimental_parameter_ndims_to_matrix_ndims",
    "source_code": "@property\ndef _experimental_parameter_ndims_to_matrix_ndims(self):\n    return ()",
    "docstring": "A dict of names to number of dimensions contributing to an operator. This is a dictionary of parameter names to s specifying the number of right-most dimensions contributing to the **matrix** shape of the densified operator. If the parameter is a , this is mapped to an . If the parameter is a (called ), this specifies the number of batch dimensions of contributing to this s matrix shape. If the parameter is a structure, this is a structure of the same type of s.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "FunctionDef name:_experimental_parameter_ndims_to_matrix_ndims arg:self arguments arg Return return:no"
  },
  {
    "library": "matplotlib",
    "name": "with_extremes",
    "source_code": "def with_extremes(self, *, bad=None, under=None, over=None):\n    new_cm = self.copy()\n    new_cm.set_extremes(bad=bad, under=under, over=over)\n    return new_cm",
    "docstring": "Return a copy of the colormap, for which the colors for masked (*bad*) values and, when ``, low (*under*) and high (*over*) out-of-range values, have been set accordingly.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:with_extremes arg:self arguments arg arg arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "max_error",
    "source_code": "def max_error(grad1, grad2):\n    error = 0\n    for j_t, j_n in zip(grad1, grad2):\n        if j_t.size or j_n.size:\n            error = np.maximum(error, np.fabs(j_t - j_n).max())\n    return error",
    "docstring": "Computes maximum elementwise gap. Computes the maximum elementwise gap between two lists of tensors of the same shape. Args: grad1: a lists of tensors. grad2: a lists of tensors with the same shape as grad1. Returns: The maximum elementwise gap between the two.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\gradient_checker_v2.py",
    "ast_data": "FunctionDef name:max_error arg:grad1 arg:grad2 arguments arg arg Assign For Call If BoolOp Assign Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "as_double",
    "source_code": "def as_double(self):\n    return capi.get_field_as_double(self._feat.ptr, self._index) if self.is_set else None",
    "docstring": "Retrieve the Field's value as a double (float).",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\field.py",
    "ast_data": "FunctionDef name:as_double arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "sanitize_for_s3",
    "source_code": "def sanitize_for_s3(text: str) -> str:\n    return re.sub('[^a-zA-Z0-9_-]', '_', text)",
    "docstring": "S3 keys can only contain alphanumeric characters, underscores, and dashes. This function replaces all other characters with underscores.",
    "type": "function",
    "file_path": "pytorch\\.github\\scripts\\file_io_utils.py",
    "ast_data": "FunctionDef name:sanitize_for_s3 arg:text arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "exp",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef exp(x):\n    return math_ops.exp(x)",
    "docstring": "Element-wise exponential. Args: x: Tensor or variable. Returns: A tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:exp arg:x arguments arg Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "Stack",
    "source_code": "@dataclass\nclass Stack(Move):\n\n    def _stack(self, df, orient):\n        if df['baseline'].nunique() > 1:\n            err = 'Stack move cannot be used when baselines are already heterogeneous'\n            raise RuntimeError(err)\n        other = {'x': 'y', 'y': 'x'}[orient]\n        stacked_lengths = (df[other] - df['baseline']).dropna().cumsum()\n        offsets = stacked_lengths.shift(1).fillna(0)\n        df[other] = stacked_lengths\n        df['baseline'] = df['baseline'] + offsets\n        return df\n\n    def __call__(self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale]) -> DataFrame:\n        groupers = ['col', 'row', orient]\n        return GroupBy(groupers).apply(data, self._stack, orient)",
    "docstring": "Displacement of overlapping bar or area marks along the value axis. Examples -------- .. include:: ../docstrings/objects.Stack.rst",
    "type": "class",
    "file_path": "seaborn\\seaborn\\_core\\moves.py",
    "ast_data": "ClassDef name:Stack FunctionDef name:_stack arg:self arg:df arg:orient arguments arg arg arg If Compare Call Assign Raise Call Assign Assign Call Call Assign Call Call Assign Assign Return return:yes FunctionDef name:__call__ arg:self arg:data arg:groupby arg:orient arg:scales arguments arg arg arg arg arg Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "on_test_batch_end",
    "source_code": "@doc_controls.for_subclass_implementers\n@generic_utils.default\ndef on_test_batch_end(self, batch, logs=None):\n    pass",
    "docstring": "Called at the end of a batch in methods. Also called at the end of a validation batch in the methods, if validation data is provided. Subclasses should override for any actions to run. Note that if the argument to in is set to , this method will only be called every batches. Args: batch: Integer, index of batch within the current epoch. logs: Dict. Aggregated metric results up until this batch.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:on_test_batch_end arg:self arg:batch arg:logs arguments arg arg arg"
  },
  {
    "library": "matplotlib",
    "name": "get_active",
    "source_code": "def get_active(self):\n    return self._active",
    "docstring": "Get whether the widget is active.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:get_active arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_compute_euclidean_distance",
    "source_code": "@classmethod\ndef _compute_euclidean_distance(cls, inputs, clusters):\n    output = []\n    for inp in inputs:\n        with ops.colocate_with(inp, ignore_existing=True):\n            squared_distance = math_ops.reduce_sum(math_ops.square(inp), 1, keepdims=True) - 2 * math_ops.matmul(inp, clusters, transpose_b=True) + array_ops.transpose(math_ops.reduce_sum(math_ops.square(clusters), 1, keepdims=True))\n            output.append(squared_distance)\n    return output",
    "docstring": "Computes Euclidean distance between each input and each cluster center. Args: inputs: list of input Tensors. clusters: cluster Tensor. Returns: list of Tensors, where each element corresponds to each element in inputs. The value is the distance of each row to all the cluster centers.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\clustering_ops.py",
    "ast_data": "FunctionDef name:_compute_euclidean_distance arg:cls arg:inputs arg:clusters arguments arg arg arg Assign For With Call Assign Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "draw_gouraud_triangles",
    "source_code": "def draw_gouraud_triangles(self, gc, triangles_array, colors_array, transform):\n    raise NotImplementedError",
    "docstring": "Draw a series of Gouraud triangles. Parameters ---------- gc : The graphics context. triangles_array : (N, 3, 2) array-like Array of *N* (x, y) points for the triangles. colors_array : (N, 3, 4) array-like Array of *N* RGBA colors for each point of the triangles. transform : An affine transform to apply to the points.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:draw_gouraud_triangles arg:self arg:gc arg:triangles_array arg:colors_array arg:transform arguments arg arg arg arg arg Raise"
  },
  {
    "library": "pandas",
    "name": "get",
    "source_code": "def get(self, key: str):\n    with patch_pickle():\n        group = self.get_node(key)\n        if group is None:\n            raise KeyError(f'No object named {key} in the file')\n        return self._read_group(group)",
    "docstring": "Retrieve pandas object stored in file. Parameters ---------- key : str Object to retrieve from file. Raises KeyError if not found. Returns ------- object Same type as object stored in file. See Also -------- HDFStore.get_node : Returns the node with the key. HDFStore.get_storer : Returns the storer object for a key. Examples -------- >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=[\"A\", \"B\"]) >>> store = pd.HDFStore(\"store.h5\", \"w\") # doctest: +SKIP >>> store.put(\"data\", df) # doctest: +SKIP >>> store.get(\"data\") # doctest: +SKIP >>> store.close() # doctest: +SKIP",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:get arg:self arg:key arguments arg arg With Call Assign Call If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "simplify_threshold",
    "source_code": "@property\ndef simplify_threshold(self):\n    return self._simplify_threshold",
    "docstring": "The fraction of a pixel difference below which vertices will be simplified out.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\path.py",
    "ast_data": "FunctionDef name:simplify_threshold arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_snapshot_tensor",
    "source_code": "def _snapshot_tensor(self, tensor):\n    snapshot_variable = self._create_or_get_tensor_values_cache(tensor.name, tensor.op.graph, tensor.shape.as_list(), tensor.dtype)\n    return state_ops.assign(snapshot_variable, tensor).op",
    "docstring": "Creates a new tf.Variable and a new tf.Operation that assigns the value of the tensor to this variable. Args: tensor: tensor whose values will be stored in a new tf.Variable. Returns: An assignment operation.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:_snapshot_tensor arg:self arg:tensor arguments arg arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "set_multithreading_enabled",
    "source_code": "class set_multithreading_enabled(_DecoratorContextManager):\n\n    def __init__(self, mode: bool) -> None:\n        self.prev = torch._C._is_multithreading_enabled()\n        torch._C._set_multithreading_enabled(mode)\n        self.mode = mode\n\n    def __enter__(self) -> None:\n        pass\n\n    def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:\n        torch._C._set_multithreading_enabled(self.prev)\n\n    def clone(self) -> 'set_multithreading_enabled':\n        return self.__class__(self.mode)",
    "docstring": "Context-manager that sets multithreaded backwards on or off. `modeforward-mode AD `.",
    "type": "class",
    "file_path": "pytorch\\torch\\autograd\\grad_mode.py",
    "ast_data": "ClassDef name:set_multithreading_enabled FunctionDef name:__init__ arg:self arg:mode arguments arg arg Assign Call Call Assign FunctionDef name:__enter__ arg:self arguments arg FunctionDef name:__exit__ arg:self arg:exc_type arg:exc_value arg:traceback arguments arg arg arg arg Call FunctionDef name:clone arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_distribution",
    "source_code": "def _get_distribution(old_value):\n    dist = pasta.parse('\"uniform\" if old_value else \"truncated_normal\"')\n    ifexpr = dist.body[0].value\n    pasta.ast_utils.replace_child(ifexpr, ifexpr.test, old_value)\n    pasta.base.formatting.set(dist, 'prefix', '(')\n    pasta.base.formatting.set(dist, 'suffix', ')')\n    return dist",
    "docstring": "Returns an AST matching the following: (\"uniform\" if (old_value) else \"truncated_normal\")",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\tf_upgrade_v2.py",
    "ast_data": "FunctionDef name:_get_distribution arg:old_value arguments arg Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "write_data_chunk",
    "source_code": "def write_data_chunk(self, rows: np.ndarray, indexes: list[np.ndarray], mask: npt.NDArray[np.bool_] | None, values: list[np.ndarray]) -> None:\n    for v in values:\n        if not np.prod(v.shape):\n            return\n    nrows = indexes[0].shape[0]\n    if nrows != len(rows):\n        rows = np.empty(nrows, dtype=self.dtype)\n    names = self.dtype.names\n    nindexes = len(indexes)\n    for i, idx in enumerate(indexes):\n        rows[names[i]] = idx\n    for i, v in enumerate(values):\n        rows[names[i + nindexes]] = v\n    if mask is not None:\n        m = ~mask.ravel().astype(bool, copy=False)\n        if not m.all():\n            rows = rows[m]\n    if len(rows):\n        self.table.append(rows)\n        self.table.flush()",
    "docstring": "Parameters ---------- rows : an empty memory space where we are putting the chunk indexes : an array of the indexes mask : an array of the masks values : an array of the values",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:write_data_chunk arg:self arg:rows arg:indexes arg:mask arg:values arguments arg arg arg arg arg For If Call Return return:no Assign If Compare Call Assign Call Assign Assign Call For Call Assign For Call Assign If Compare Assign Call Call If Call Assign If Call Call Call"
  },
  {
    "library": "django",
    "name": "_merge_sanity_check",
    "source_code": "def _merge_sanity_check(self, other):\n    if self._fields is not None and (set(self.query.values_select) != set(other.query.values_select) or set(self.query.extra_select) != set(other.query.extra_select) or set(self.query.annotation_select) != set(other.query.annotation_select)):\n        raise TypeError(\"Merging '%s' classes must involve the same values in each case.\" % self.__class__.__name__)",
    "docstring": "Check that two QuerySet classes may be merged.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:_merge_sanity_check arg:self arg:other arguments arg arg If BoolOp Compare BoolOp Compare Call Call Compare Call Call Compare Call Call Raise Call"
  },
  {
    "library": "pandas",
    "name": "__setstate__",
    "source_code": "def __setstate__(self, state) -> None:\n    if isinstance(state, tuple):\n        nd_state, (fill_value, sp_index) = state\n        sparse_values = np.array([])\n        sparse_values.__setstate__(nd_state)\n        self._sparse_values = sparse_values\n        self._sparse_index = sp_index\n        self._dtype = SparseDtype(sparse_values.dtype, fill_value)\n    else:\n        self.__dict__.update(state)",
    "docstring": "Necessary for making this object picklable",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\sparse\\array.py",
    "ast_data": "FunctionDef name:__setstate__ arg:self arg:state arguments arg arg If Call Assign Assign Call Call Assign Assign Assign Call Call"
  },
  {
    "library": "scipy",
    "name": "YaoLiu09",
    "source_code": "class YaoLiu09(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-5.12] * self.N, [5.12] * self.N))\n        self.global_optimum = [[0 for _ in range(self.N)]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return sum(x ** 2.0 - 10.0 * cos(2 * pi * x) + 10)",
    "docstring": "Yao-Liu 9 objective function. This class defines the Yao-Liu [1]_ function 9 global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{YaoLiu09}}(x) = \\sum_{i=1}^n \\left [ x_i^2 - 10 \\cos(2 \\pi x_i ) + 10 \\right ] Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Yao X., Liu Y. (1997) Fast evolution strategies. In: Angeline P.J., Reynolds R.G., McDonnell J.R., Eberhart R. (eds) Evolutionary Programming VI. EP 1997. Lecture Notes in Computer Science, vol 1213. Springer, Berlin, Heidelberg .. [2] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_Y.py",
    "ast_data": "ClassDef name:YaoLiu09 Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "ArmCCompiler",
    "source_code": "class ArmCCompiler(UnixCCompiler):\n    compiler_type = 'arm'\n    cc_exe = 'armclang'\n    cxx_exe = 'armclang++'\n\n    def __init__(self, verbose=0, dry_run=0, force=0):\n        UnixCCompiler.__init__(self, verbose, dry_run, force)\n        cc_compiler = self.cc_exe\n        cxx_compiler = self.cxx_exe\n        self.set_executables(compiler=cc_compiler + ' -O3 -fPIC', compiler_so=cc_compiler + ' -O3 -fPIC', compiler_cxx=cxx_compiler + ' -O3 -fPIC', linker_exe=cc_compiler + ' -lamath', linker_so=cc_compiler + ' -lamath -shared')",
    "docstring": "Arm compiler.",
    "type": "class",
    "file_path": "numpy\\numpy\\distutils\\armccompiler.py",
    "ast_data": "ClassDef name:ArmCCompiler Assign Assign Assign FunctionDef name:__init__ arg:self arg:verbose arg:dry_run arg:force arguments arg arg arg arg Call Assign Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "visit_ImportFrom",
    "source_code": "def visit_ImportFrom(self, node: ast.ImportFrom) -> None:\n    if node.module == self._decorator_package:\n        for name in node.names:\n            if name.name == self._decorator_symbol:\n                if name.asname:\n                    self._current_file_decorators.add(name.asname)\n                else:\n                    self._current_file_decorators.add(name.name)\n    else:\n        parent, module = self._decorator_package.rsplit('.', 1)\n        if node.module == parent:\n            for name in node.names:\n                if name.name == module:\n                    if name.asname:\n                        self._current_file_decorators.add(name.asname + '.' + self._decorator_symbol)\n                    else:\n                        self._current_file_decorators.add(name.name + '.' + self._decorator_symbol)\n    self.generic_visit(node)",
    "docstring": "Identifies imports of decorator.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\api\\generator2\\extractor\\extractor.py",
    "ast_data": "FunctionDef name:visit_ImportFrom arg:self arg:node arguments arg arg If Compare For If Compare If Call Call Assign Call If Compare For If Compare If Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "prepare_run_debug_urls",
    "source_code": "def prepare_run_debug_urls(self, fetches, feed_dict):\n    return self._grpc_debug_server_urls",
    "docstring": "Implementation of abstract method in superclass. See doc of for details. Args: fetches: Same as the argument to feed_dict: Same as the argument to Returns: debug_urls: ( or of ) file:// debug URLs to be used in this call.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\grpc_wrapper.py",
    "ast_data": "FunctionDef name:prepare_run_debug_urls arg:self arg:fetches arg:feed_dict arguments arg arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "experimental_make_numpy_dataset",
    "source_code": "def experimental_make_numpy_dataset(self, numpy_input, session=None):\n    _require_cross_replica_or_default_context_extended(self)\n    return self._experimental_make_numpy_dataset(numpy_input, session=session)",
    "docstring": "Makes a dataset for input provided via a numpy array. This avoids adding as a large constant in the graph, and copies the data to the machine or machines that will be processing the input. Args: numpy_input: A nest of NumPy input arrays that will be distributed evenly across all replicas. Note that lists of Numpy arrays are stacked, as that is normal behavior. session: (TensorFlow v1.x graph execution only) A session used for initialization. Returns: A representing .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:experimental_make_numpy_dataset arg:self arg:numpy_input arg:session arguments arg arg arg Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "ImportExceptionGroup",
    "source_code": "class ImportExceptionGroup(Exception):\n\n    def __init__(self, message: str | None, exceptions: Sequence[BaseException]) -> None:\n        super().__init__(message)\n        self.exceptions = list(exceptions)",
    "docstring": "Exceptions raised during importing the target objects. It contains an error messages and a list of exceptions as its arguments.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\ext\\autosummary\\__init__.py",
    "ast_data": "ClassDef name:ImportExceptionGroup FunctionDef name:__init__ arg:self arg:message arg:exceptions arguments arg arg arg Call Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "num_buckets",
    "source_code": "@property\ndef num_buckets(self):\n    return self.hash_bucket_size",
    "docstring": "Returns number of buckets in this sparse feature.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:num_buckets arg:self arguments arg Return return:yes"
  },
  {
    "library": "pygame",
    "name": "set_resolution",
    "source_code": "def set_resolution(self, width, height):\n    self.dev.setresolution(width, height)",
    "docstring": "Sets the capture resolution. (without dialog)",
    "type": "method",
    "file_path": "pygame\\src_py\\_camera_vidcapture.py",
    "ast_data": "FunctionDef name:set_resolution arg:self arg:width arg:height arguments arg arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, **kwargs):\n    super(_Merge, self).__init__(**kwargs)\n    self.supports_masking = True",
    "docstring": "Initializes a Merge layer. Args: **kwargs: standard layer keyword arguments.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\merge.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg arg Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_resource_apply_dense",
    "source_code": "def _resource_apply_dense(self, grad, handle):\n    raise RuntimeError('This function should never be called')",
    "docstring": "This function should never be called.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\experimental\\loss_scale_optimizer.py",
    "ast_data": "FunctionDef name:_resource_apply_dense arg:self arg:grad arg:handle arguments arg arg arg Raise Call"
  },
  {
    "library": "django",
    "name": "create_cursor",
    "source_code": "def create_cursor(self, name=None):\n    raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a create_cursor() method')",
    "docstring": "Create a cursor. Assume that a connection is established.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\base.py",
    "ast_data": "FunctionDef name:create_cursor arg:self arg:name arguments arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "get_test_and_val_size",
    "source_code": "def get_test_and_val_size(self):\n    return (0.15, 0.15)",
    "docstring": "Returns the size of the test and validation sets.",
    "type": "method",
    "file_path": "pytorch\\torchgen\\_autoheuristic\\train_decision.py",
    "ast_data": "FunctionDef name:get_test_and_val_size arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "unzip",
    "source_code": "def unzip(p: Path) -> None:\n    assert p.is_file()\n    unzipped_dir = p.with_name('unzipped-' + p.stem)\n    print(f'Extracting {p} to {unzipped_dir}')\n    with zipfile.ZipFile(p, 'r') as zip:\n        zip.extractall(unzipped_dir)",
    "docstring": "Unzip the provided zipfile to a similarly-named directory. Returns None if is not a zipfile. Looks like: /tmp/test-reports.zip -> /tmp/unzipped-test-reports/",
    "type": "function",
    "file_path": "pytorch\\tools\\stats\\upload_stats_lib.py",
    "ast_data": "FunctionDef name:unzip arg:p arguments arg Call Assign Call Call With Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_boost",
    "source_code": "@abstractmethod\ndef _boost(self, iboost, X, y, sample_weight, random_state):\n    pass",
    "docstring": "Implement a single boost. Warning: This method needs to be overridden by subclasses. Parameters ---------- iboost : int The index of the current boost iteration. X : {array-like, sparse matrix} of shape (n_samples, n_features) The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. COO, DOK, and LIL are converted to CSR. y : array-like of shape (n_samples,) The target values (class labels). sample_weight : array-like of shape (n_samples,) The current sample weights. random_state : RandomState The current random number generator Returns ------- sample_weight : array-like of shape (n_samples,) or None The reweighted sample weights. If None then boosting has terminated early. estimator_weight : float The weight for the current boost. If None then boosting has terminated early. error : float The classification error for the current boost. If None then boosting has terminated early.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_weight_boosting.py",
    "ast_data": "FunctionDef name:_boost arg:self arg:iboost arg:X arg:y arg:sample_weight arg:random_state arguments arg arg arg arg arg arg"
  },
  {
    "library": "tensorflow",
    "name": "VariableAndLossTracker",
    "source_code": "class VariableAndLossTracker(module.Module):\n\n    def __init__(self):\n        self._var_store = _EagerVariableStore()\n        self._variables = {}\n\n    def _variable_creator(self, next_creator, **kwargs):\n        var = next_creator(**kwargs)\n        self._variables[var.name] = var\n        return var\n\n    @tf_contextlib.contextmanager\n    def scope(self):\n        with vs.variable_creator_scope(self._variable_creator), vs.with_variable_store(self._var_store):\n            yield\n\n    def get_regularization_losses(self):\n        losses = {}\n        for var_name, regularizer in self._var_store._regularizers.items():\n            losses[var_name] = regularizer()\n        return losses",
    "docstring": "Module that has a scope to capture vars/losses made by .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\legacy_tf_layers\\variable_scope_shim.py",
    "ast_data": "ClassDef name:VariableAndLossTracker FunctionDef name:__init__ arg:self arguments arg Assign Call Assign FunctionDef name:_variable_creator arg:self arg:next_creator arguments arg arg arg Assign Call Assign Return return:yes FunctionDef name:scope arg:self arguments arg With Call Call FunctionDef name:get_regularization_losses arg:self arguments arg Assign For Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "training_mode",
    "source_code": "@property\ndef training_mode(self):\n    return self._training_mode",
    "docstring": "The training mode for the exporter.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_globals.py",
    "ast_data": "FunctionDef name:training_mode arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "name",
    "source_code": "@abc.abstractproperty\ndef name(self):\n    pass",
    "docstring": "Returns string. Used for naming.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2_types.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "restore",
    "source_code": "@abc.abstractmethod\ndef restore(self, output):\n    pass",
    "docstring": "Create an accumulator based on 'output'. This method creates a new accumulator with identical internal state to the one used to create the data in 'output'. This means that if you do output_data = combiner.extract(accumulator_1) accumulator_2 = combiner.restore(output_data) then accumulator_1 and accumulator_2 will have identical internal state, and computations using either of them will be equivalent. Args: output: The data output from a previous computation. Should be in the same form as provided by 'extract_output'. Returns: A new accumulator.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_preprocessing_layer.py",
    "ast_data": "FunctionDef name:restore arg:self arg:output arguments arg arg"
  },
  {
    "library": "scikit-learn",
    "name": "fit_transform",
    "source_code": "def fit_transform(self, X, y, **fit_params):\n    _raise_for_params(fit_params, self, 'fit', allow=['sample_weight'])\n    return super().fit_transform(X, y, **fit_params)",
    "docstring": "Fit the estimators and return the predictions for X for each estimator. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vectors, where is the number of samples and is the number of features. y : array-like of shape (n_samples,) Target values. **fit_params : dict Parameters to pass to the underlying estimators. .. versionadded:: 1.6 Only available if , which can be set by using `Metadata Routing User Guide ` for more details. Returns ------- y_preds : ndarray of shape (n_samples, n_estimators) Prediction outputs for each estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_stacking.py",
    "ast_data": "FunctionDef name:fit_transform arg:self arg:X arg:y arguments arg arg arg arg Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "RenderContext",
    "source_code": "class RenderContext(BaseContext):\n    template = None\n\n    def __iter__(self):\n        yield from self.dicts[-1]\n\n    def __contains__(self, key):\n        return key in self.dicts[-1]\n\n    def get(self, key, otherwise=None):\n        return self.dicts[-1].get(key, otherwise)\n\n    def __getitem__(self, key):\n        return self.dicts[-1][key]\n\n    @contextmanager\n    def push_state(self, template, isolated_context=True):\n        initial = self.template\n        self.template = template\n        if isolated_context:\n            self.push()\n        try:\n            yield\n        finally:\n            self.template = initial\n            if isolated_context:\n                self.pop()",
    "docstring": "A stack container for storing Template state. RenderContext simplifies the implementation of template Nodes by providing a safe place to store state between invocations of a node's method. The RenderContext also provides scoping rules that are more sensible for 'template local' variables. The render context stack is pushed before each template is rendered, creating a fresh scope with nothing in it. Name resolution fails if a variable is not found at the top of the RequestContext stack. Thus, variables are local to a specific template and don't affect the rendering of other templates as they would if they were stored in the normal template context.",
    "type": "class",
    "file_path": "django\\django\\template\\context.py",
    "ast_data": "ClassDef name:RenderContext Assign FunctionDef name:__iter__ arg:self arguments arg FunctionDef name:__contains__ arg:self arg:key arguments arg arg Return return:yes Compare FunctionDef name:get arg:self arg:key arg:otherwise arguments arg arg arg Return return:yes Call FunctionDef name:__getitem__ arg:self arg:key arguments arg arg Return return:yes FunctionDef name:push_state arg:self arg:template arg:isolated_context arguments arg arg arg Assign Assign If Call Try Assign If Call"
  },
  {
    "library": "pandas",
    "name": "TimeSeries_TimedeltaFormatter",
    "source_code": "class TimeSeries_TimedeltaFormatter(mpl.ticker.Formatter):\n    axis: Axis\n\n    @staticmethod\n    def format_timedelta_ticks(x, pos, n_decimals: int) -> str:\n        s, ns = divmod(x, 10 ** 9)\n        m, s = divmod(s, 60)\n        h, m = divmod(m, 60)\n        d, h = divmod(h, 24)\n        decimals = int(ns * 10 ** (n_decimals - 9))\n        s = f'{int(h):02d}:{int(m):02d}:{int(s):02d}'\n        if n_decimals > 0:\n            s += f'.{decimals:0{n_decimals}d}'\n        if d != 0:\n            s = f'{int(d):d} days {s}'\n        return s\n\n    def __call__(self, x, pos: int | None=0) -> str:\n        vmin, vmax = tuple(self.axis.get_view_interval())\n        n_decimals = min(int(np.ceil(np.log10(100 * 10 ** 9 / abs(vmax - vmin)))), 9)\n        return self.format_timedelta_ticks(x, pos, n_decimals)",
    "docstring": "Formats the ticks along an axis controlled by a :class:.",
    "type": "class",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\converter.py",
    "ast_data": "ClassDef name:TimeSeries_TimedeltaFormatter FunctionDef name:format_timedelta_ticks arg:x arg:pos arg:n_decimals arguments arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Call If Compare If Compare Assign Call Return return:yes FunctionDef name:__call__ arg:self arg:x arg:pos arguments arg arg arg Assign Call Call Assign Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "top_k_categorical_accuracy",
    "source_code": "@dispatch.add_dispatch_support\ndef top_k_categorical_accuracy(y_true, y_pred, k=5):\n    return math_ops.cast(nn.in_top_k(y_pred, math_ops.argmax(y_true, axis=-1), k), backend.floatx())",
    "docstring": "Computes how often targets are in the top predictions. Standalone usage: >>> y_true = [[0, 0, 1], [0, 1, 0]] >>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]] >>> m = tf.keras.metrics.top_k_categorical_accuracy(y_true, y_pred, k=3) >>> assert m.shape == (2,) >>> m.numpy() array([1., 1.], dtype=float32) Args: y_true: The ground truth values. y_pred: The prediction values. k: (Optional) Number of top elements to look at for computing accuracy. Defaults to 5. Returns: Top K categorical accuracy value.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py",
    "ast_data": "FunctionDef name:top_k_categorical_accuracy arg:y_true arg:y_pred arg:k arguments arg arg arg Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "grayscale_to_rgb",
    "source_code": "@tf_export('image.grayscale_to_rgb')\n@dispatch.add_dispatch_support\ndef grayscale_to_rgb(images, name=None):\n    with ops.name_scope(name, 'grayscale_to_rgb', [images]) as name:\n        images = _AssertGrayscaleImage(images)\n        images = ops.convert_to_tensor(images, name='images')\n        rank_1 = array_ops.expand_dims(array_ops.rank(images) - 1, 0)\n        shape_list = [array_ops.ones(rank_1, dtype=dtypes.int32)] + [array_ops.expand_dims(3, 0)]\n        multiples = array_ops.concat(shape_list, 0)\n        rgb = array_ops.tile(images, multiples, name=name)\n        rgb.set_shape(images.get_shape()[:-1].concatenate([3]))\n        return rgb",
    "docstring": "Converts one or more images from Grayscale to RGB. Outputs a tensor of the same and rank as . The size of the last dimension of the output is 3, containing the RGB value of the pixels. The input images' last dimension must be size 1. >>> original = tf.constant([[[1.0], [2.0], [3.0]]]) >>> converted = tf.image.grayscale_to_rgb(original) >>> print(converted.numpy()) [[[1. 1. 1.] [2. 2. 2.] [3. 3. 3.]]] Args: images: The Grayscale tensor to convert. The last dimension must be size 1. name: A name for the operation (optional). Returns: The converted grayscale image(s).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:grayscale_to_rgb arg:images arg:name arguments arg arg With Call Assign Call Assign Call Assign Call Call Assign Call Call Assign Call Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_init_amx",
    "source_code": "def _init_amx() -> bool:\n    return torch._C._cpu._init_amx()",
    "docstring": "Initializes AMX instructions.",
    "type": "function",
    "file_path": "pytorch\\torch\\cpu\\__init__.py",
    "ast_data": "FunctionDef name:_init_amx arguments Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_get_level_values",
    "source_code": "def _get_level_values(self, level: int, unique: bool=False) -> Index:\n    lev = self.levels[level]\n    level_codes = self.codes[level]\n    name = self._names[level]\n    if unique:\n        level_codes = algos.unique(level_codes)\n    filled = algos.take_nd(lev._values, level_codes, fill_value=lev._na_value)\n    return lev._shallow_copy(filled, name=name)",
    "docstring": "Return vector of label values for requested level, equal to the length of the index **this is an internal method** Parameters ---------- level : int unique : bool, default False if True, drop duplicated values Returns ------- Index",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "FunctionDef name:_get_level_values arg:self arg:level arg:unique arguments arg arg arg Assign Assign Assign If Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "SortIds",
    "source_code": "class SortIds(SphinxTransform):\n    default_priority = 261\n\n    def apply(self, **kwargs: Any) -> None:\n        for node in self.document.findall(nodes.section):\n            if len(node['ids']) > 1 and node['ids'][0].startswith('id'):\n                node['ids'] = [*node['ids'][1:], node['ids'][0]]",
    "docstring": "Sort section IDs so that the \"id[0-9]+\" one comes last.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\transforms\\__init__.py",
    "ast_data": "ClassDef name:SortIds Assign FunctionDef name:apply arg:self arguments arg arg For Call If BoolOp Compare Call Call Assign"
  },
  {
    "library": "pandas",
    "name": "_format_strings",
    "source_code": "def _format_strings(self) -> list[str]:\n    values = self.values\n    if self.formatter is not None:\n        return [self.formatter(x) for x in values]\n    fmt_values = values._format_native_types(na_rep=self.nat_rep, date_format=self.date_format)\n    return fmt_values.tolist()",
    "docstring": "we by definition have DO NOT have a TZ",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\format.py",
    "ast_data": "FunctionDef name:_format_strings arg:self arguments arg Assign If Compare Return return:yes Call Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_get_timezone_name",
    "source_code": "def _get_timezone_name(timezone):\n    return timezone.tzname(None) or str(timezone)",
    "docstring": "Return the offset for fixed offset timezones, or the name of timezone if not set.",
    "type": "function",
    "file_path": "django\\django\\utils\\timezone.py",
    "ast_data": "FunctionDef name:_get_timezone_name arg:timezone arguments arg Return return:yes BoolOp Call Call"
  },
  {
    "library": "pytorch",
    "name": "jvp",
    "source_code": "@exposed_in('torch.func')\ndef jvp(func: Callable, primals: Any, tangents: Any, *, strict: bool=False, has_aux: bool=False):\n    return _jvp_with_argnums(func, primals, tangents, argnums=None, strict=strict, has_aux=has_aux)",
    "docstring": "Standing for the Jacobian-vector product, returns a tuple containing the output of and the \"Jacobian of `jvp` can support functions with multiple inputs by passing in the tangents for each of the inputs >>> from torch.func import jvp >>> x = torch.randn(5) >>> y = torch.randn(5) >>> f = lambda x, y: (x * y) >>> _, output = jvp(f, (x, y), (torch.ones(5), torch.ones(5))) >>> assert torch.allclose(output, x + y)",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\eager_transforms.py",
    "ast_data": "FunctionDef name:jvp arg:func arg:primals arg:tangents arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "lecun_uniform",
    "source_code": "@tf_export(v1=['initializers.lecun_uniform'])\ndef lecun_uniform(seed=None):\n    return VarianceScaling(scale=1.0, mode='fan_in', distribution='uniform', seed=seed)",
    "docstring": "LeCun uniform initializer. It draws samples from a uniform distribution within [-limit, limit] where is where is the number of input units in the weight tensor. Args: seed: A Python integer. Used to seed the random generator. Returns: An initializer. References: - Self-Normalizing Neural Networks, [Klambauer et al., 2017]( # pylint: disable=line-too-long ([pdf]( - Efficient Backprop, [Lecun et al., 1998](",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops.py",
    "ast_data": "FunctionDef name:lecun_uniform arg:seed arguments arg Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "def forward(self, rgbs: Tensor, densities: Tensor, points_3d: Tensor) -> Tensor:\n    KORNIA_CHECK_SHAPE(rgbs, ['*', 'N', '3'])\n    KORNIA_CHECK_SHAPE(densities, ['*', 'N'])\n    KORNIA_CHECK_SHAPE(points_3d, ['*', 'N', '3'])\n    num_ray_points: int = points_3d.shape[-2]\n    points_3d = points_3d.reshape(-1, num_ray_points, 3)\n    delta_3d = points_3d[0, 1, :] - points_3d[0, 0, :]\n    delta = torch.linalg.norm(delta_3d, dim=-1)\n    alpha = 1 - torch.exp(-1.0 * densities * delta)\n    return self._render(alpha, rgbs)",
    "docstring": "Render 3D regularly sampled points along rays. Args: rgbs: RGB values of points along rays :math: densities: Volume densities of points along rays :math: points_3d: 3D points along rays :math: Returns: Rendered RGB values for each ray :math:",
    "type": "method",
    "file_path": "kornia\\kornia\\nerf\\volume_renderer.py",
    "ast_data": "FunctionDef name:forward arg:self arg:rgbs arg:densities arg:points_3d arguments arg arg arg arg Call Call Call Assign Call Assign Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_allow_exported_model_train_eval",
    "source_code": "def _allow_exported_model_train_eval(model: torch.fx.GraphModule):\n\n    def _train(self, mode: bool=True):\n        if mode:\n            _move_exported_model_to_train(self)\n        else:\n            _move_exported_model_to_eval(self)\n\n    def _eval(self):\n        _move_exported_model_to_eval(self)\n    model.train = types.MethodType(_train, model)\n    model.eval = types.MethodType(_eval, model)\n    return model",
    "docstring": "Allow users to call and on an exported model, but with the effect of changing behavior between the two modes limited to special ops only, which are currently dropout and batchnorm. Note: This does not achieve the same effect as what and does in eager models, but only provides an approximation. In particular, user code branching on flag will not function correctly in general because the branch is already specialized at export time. Additionally, other ops beyond dropout and batchnorm that have different train/eval behavior will also not be converted properly.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\export_utils.py",
    "ast_data": "FunctionDef name:_allow_exported_model_train_eval arg:model arguments arg FunctionDef name:_train arg:self arg:mode arguments arg arg If Call Call FunctionDef name:_eval arg:self arguments arg Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "from_descriptor_list",
    "source_code": "def from_descriptor_list(self, descriptors: Sequence[str]) -> Sequence[Extension]:\n\n    def from_descriptor(desc: str) -> Extension:\n        name, _, version = desc.partition('/')\n        if version is None:\n            version = 0\n        ext = self.extensions.get(name)\n        if not ext:\n            raise ValueError(f'Unknown extension name={name!r}')\n        return ext.from_descriptor(version)\n    return [from_descriptor(desc) for desc in descriptors]",
    "docstring": "Given a seuquence of descriptor strings as returned by Extension.get_descriptor at save time, creates a sequence of Extension instances. The name[@local-domain] preceding the version number is used to look up an implementation class in the registry, and the version is passed to the class's from_descriptor static method. If the registry contains no match, this will throw ValueError. If the from_descriptor method raises an exception, that will pass through to the caller.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\_extension.py",
    "ast_data": "FunctionDef name:from_descriptor_list arg:self arg:descriptors arguments arg arg FunctionDef name:from_descriptor arg:desc arguments arg Assign Call If Compare Assign Assign Call If Raise Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_validate_variable_name",
    "source_code": "def _validate_variable_name(self, name: str) -> str:\n    for c in name:\n        if ord(c) < 128 and (c < 'A' or c > 'Z') and (c < 'a' or c > 'z') and (c < '0' or c > '9') and (c != '_') or 128 <= ord(c) < 192 or c in {'×', '÷'}:\n            name = name.replace(c, '_')\n    return name",
    "docstring": "Validate variable names for Stata export. Parameters ---------- name : str Variable name Returns ------- str The validated name with invalid characters replaced with underscores. Notes ----- Stata 118+ support most unicode characters. The only limitation is in the ascii range where the characters supported are a-z, A-Z, 0-9 and _.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\stata.py",
    "ast_data": "FunctionDef name:_validate_variable_name arg:self arg:name arguments arg arg For If BoolOp BoolOp Compare Call BoolOp Compare Compare BoolOp Compare Compare BoolOp Compare Compare Compare Compare Call Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, element_shape=None, dtype=dtypes.float32, dynamic_size=False, infer_shape=True):\n    self._element_shape = tensor_shape.as_shape(element_shape)\n    self._dtype = dtypes.as_dtype(dtype)\n    self._dynamic_size = dynamic_size\n    self._infer_shape = infer_shape",
    "docstring": "Constructs a type specification for a . Args: element_shape: The shape of each element in the . dtype: Data type of the . dynamic_size: Whether the can grow past its initial size. infer_shape: Whether shape inference is enabled.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:element_shape arg:dtype arg:dynamic_size arg:infer_shape arguments arg arg arg arg arg Assign Call Assign Call Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "_generate_dict_info",
    "source_code": "def _generate_dict_info(self, input_info: dict, weight_info: dict, comp_stats: dict) -> dict[str, dict]:\n    input_weight_equalization_info: dict[str, dict] = {}\n    for module_fqn in input_info:\n        mod_input_info: dict = input_info[module_fqn]\n        mod_weight_info: dict = weight_info[module_fqn]\n        mod_comp_stat: dict = comp_stats[module_fqn]\n        channel_rec_vals: list = []\n        for val in mod_comp_stat:\n            float_rep: float = val.item()\n            recommended: bool = float_rep >= self.ratio_threshold and float_rep <= 1 / self.ratio_threshold\n            channel_rec_vals.append(recommended)\n        input_weight_equalization_info[module_fqn] = {self.RECOMMENDED_KEY: channel_rec_vals, self.COMP_METRIC_KEY: mod_comp_stat, self.THRESHOLD_KEY: self.ratio_threshold, self.CHANNEL_KEY: self.ch_axis, **mod_input_info, **mod_weight_info}\n    return input_weight_equalization_info",
    "docstring": "Helper function for generate_detector_report that does the generation of the dictionary. This process is done as specified in generate_detector_report documentation Args: input_info (dict): A dict mapping each module to input range information weight_info (dict): A dict mapping each module to weight range information comp_stats (dict): A dict mapping each module to its corresponding comp stat Returns a dictionary mapping each module with relevant ModelReportObservers around them to: whether input weight equalization is recommended their s_c metric compared to the threshold the threshold used to make the recommendation the channel used for recording data the input channel range info the weight channel range info",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\detector.py",
    "ast_data": "FunctionDef name:_generate_dict_info arg:self arg:input_info arg:weight_info arg:comp_stats arguments arg arg arg arg For For Call BoolOp Compare Compare Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_reduce_tensor",
    "source_code": "def _reduce_tensor(self, tensor):\n    metadata = extract_tensor_metadata_for_cache_key(tensor)\n    return (_ident, (metadata,))",
    "docstring": "Reduce the tensor to a stable key for caching.",
    "type": "method",
    "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\autograd_cache.py",
    "ast_data": "FunctionDef name:_reduce_tensor arg:self arg:tensor arguments arg arg Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "Schwefel22",
    "source_code": "class Schwefel22(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-100.0] * self.N, [100.0] * self.N))\n        self.custom_bounds = ([-10.0, 10.0], [-10.0, 10.0])\n        self.global_optimum = [[0.0 for _ in range(self.N)]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return sum(abs(x)) + prod(abs(x))",
    "docstring": "Schwefel 22 objective function. This class defines the Schwefel 22 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Schwefel22}}(x) = \\sum_{i=1}^n \\lvert x_i \\rvert + \\prod_{i=1}^n \\lvert x_i \\rvert Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_S.py",
    "ast_data": "ClassDef name:Schwefel22 Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_feature_names_out",
    "source_code": "def get_feature_names_out(self, input_features=None):\n    check_is_fitted(self, 'feature_names_')\n    if any((not isinstance(name, str) for name in self.feature_names_)):\n        feature_names = [str(name) for name in self.feature_names_]\n    else:\n        feature_names = self.feature_names_\n    return np.asarray(feature_names, dtype=object)",
    "docstring": "Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Not used, present here for API consistency by convention. Returns ------- feature_names_out : ndarray of str objects Transformed feature names.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_extraction\\_dict_vectorizer.py",
    "ast_data": "FunctionDef name:get_feature_names_out arg:self arg:input_features arguments arg arg Call If Call Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "RequestFingerprinter",
    "source_code": "class RequestFingerprinter:\n\n    @classmethod\n    def from_crawler(cls, crawler: Crawler) -> Self:\n        return cls(crawler)\n\n    def __init__(self, crawler: Crawler | None=None):\n        if crawler:\n            implementation = crawler.settings.get('REQUEST_FINGERPRINTER_IMPLEMENTATION')\n        else:\n            implementation = 'SENTINEL'\n        if implementation != 'SENTINEL':\n            message = \"'REQUEST_FINGERPRINTER_IMPLEMENTATION' is a deprecated setting.\\nIt will be removed in a future version of Scrapy.\"\n            warnings.warn(message, category=ScrapyDeprecationWarning, stacklevel=2)\n        self._fingerprint = fingerprint\n\n    def fingerprint(self, request: Request) -> bytes:\n        return self._fingerprint(request)",
    "docstring": "Default fingerprinter. It takes into account a canonical version (:func:) of :attr: and the values of :attr: and :attr:. It then generates an _ hash.",
    "type": "class",
    "file_path": "scrapy\\scrapy\\utils\\request.py",
    "ast_data": "ClassDef name:RequestFingerprinter FunctionDef name:from_crawler arg:cls arg:crawler arguments arg arg Return return:yes Call FunctionDef name:__init__ arg:self arg:crawler arguments arg arg If Assign Call Assign If Compare Assign Call Assign FunctionDef name:fingerprint arg:self arg:request arguments arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "project_state",
    "source_code": "def project_state(self, nodes=None, at_end=True):\n    return self.graph.make_state(nodes=nodes, at_end=at_end, real_apps=self.unmigrated_apps)",
    "docstring": "Return a ProjectState object representing the most recent state that the loaded migrations represent. See graph.make_state() for the meaning of \"nodes\" and \"at_end\".",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\loader.py",
    "ast_data": "FunctionDef name:project_state arg:self arg:nodes arg:at_end arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "after_create_session",
    "source_code": "def after_create_session(self, session, coord):\n    pass",
    "docstring": "Called when new TensorFlow session is created. This is called to signal the hooks that a new session has been created. This has two essential differences with the situation in which is called: * When this is called, the graph is finalized and ops can no longer be added to the graph. * This method will also be called as a result of recovering a wrapped session, not only at the beginning of the overall session. Args: session: A TensorFlow Session that has been created. coord: A Coordinator object which keeps track of all threads.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\session_run_hook.py",
    "ast_data": "FunctionDef name:after_create_session arg:self arg:session arg:coord arguments arg arg arg"
  },
  {
    "library": "tensorflow",
    "name": "_batch_all_reduce",
    "source_code": "def _batch_all_reduce(self, reduce_op, per_replica_values):\n    dense_values, dense_indices, sparse_values, sparse_indices = cross_device_utils.split_by_sparsity(per_replica_values)\n    if dense_values:\n        dense_results = self._do_batch_all_reduce(reduce_op, dense_values)\n    else:\n        dense_results = []\n    if sparse_values:\n        sparse_results = self._do_batch_all_reduce_sparse(reduce_op, sparse_values)\n    else:\n        sparse_results = []\n    return cross_device_utils.stitch_values(((dense_results, dense_indices), (sparse_results, sparse_indices)))",
    "docstring": "All-reduce algorithm in a batch.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_ops.py",
    "ast_data": "FunctionDef name:_batch_all_reduce arg:self arg:reduce_op arg:per_replica_values arguments arg arg arg Assign Call If Assign Call Assign If Assign Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "disable_v2_tensorshape",
    "source_code": "@tf_export(v1=['disable_v2_tensorshape'])\ndef disable_v2_tensorshape():\n    global _TENSORSHAPE_V2_OVERRIDE\n    _TENSORSHAPE_V2_OVERRIDE = False\n    logging.vlog(1, 'Disabling v2 tensorshape')\n    _api_usage_gauge.get_cell().set(False)",
    "docstring": "Disables the V2 TensorShape behavior and reverts to V1 behavior. See docstring for for details about the new behavior.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:disable_v2_tensorshape arguments Assign Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "apply_padding",
    "source_code": "def apply_padding(e1_var: TVar, e11: BinConstraintT, e2: BinConstraintT, e12: BinConstraintT, d2: list[DVar], d11: list[DVar], d12: list[DVar], counter: int):\n    res = []\n    for i in range(1, len(d2)):\n        d1, counter = gen_tensor_dims(i, counter)\n        nat_constraints = gen_nat_constraints(d1 + d2 + d11 + d12)\n        e1 = BinConstraintT(e1_var, TensorType(d1), op_eq)\n        simulate_padding = [None] * (len(d2) - i)\n        assert len(simulate_padding + d1) == len(d2)\n        broadcast_padding = [broadcast_dim(simulate_padding, d2, d11, d12, j, True) for j in range(len(d2) - i)]\n        all_broadcasting_possibilities = generate_all_broadcasting_possibilities_no_padding(d1, d2[len(d2) - i:], d11[len(d2) - i:], d12[len(d2) - i:])\n        c = Conj([e1, e11, e2, e12, *broadcast_padding, all_broadcasting_possibilities, *nat_constraints])\n        res.append(c)\n    return (Disj(res), counter)",
    "docstring": "We are considering the possibility where one input has less dimensions than another input, so we apply padding to the broadcasted results Args: e1_var: Variable representing the first input where padding will be e11: constraint of the form e11 = Tensortype[d1, ..., dn] e2: constraint of the form e2 = Tensortype[d1, ..., dn] e12: constraint of the form e11 = Tensortype[d1, ..., dn] d2: Tensor variables for the second input d11: Tensor variables for the broadcasted first input d12: Tensor variables for the broadcasted second input counter: variable tracking Returns: A new constraint whose goal is to apply padding to the broadcasted result",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_transformation.py",
    "ast_data": "FunctionDef name:apply_padding arg:e1_var arg:e11 arg:e2 arg:e12 arg:d2 arg:d11 arg:d12 arg:counter arguments arg arg arg arg arg arg arg arg Assign For Call Call Assign Call Assign Call Assign Call Call Assign Call Compare Call Call Assign Call Call Call Assign Call Call Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_spatial_median",
    "source_code": "def _spatial_median(X, max_iter=300, tol=0.001):\n    if X.shape[1] == 1:\n        return (1, np.median(X.ravel(), keepdims=True))\n    tol **= 2\n    spatial_median_old = np.mean(X, axis=0)\n    for n_iter in range(max_iter):\n        spatial_median = _modified_weiszfeld_step(X, spatial_median_old)\n        if np.sum((spatial_median_old - spatial_median) ** 2) < tol:\n            break\n        else:\n            spatial_median_old = spatial_median\n    else:\n        warnings.warn('Maximum number of iterations {max_iter} reached in spatial median for TheilSen regressor.'.format(max_iter=max_iter), ConvergenceWarning)\n    return (n_iter, spatial_median)",
    "docstring": "Spatial median (L1 median). The spatial median is member of a class of so-called M-estimators which are defined by an optimization problem. Given a number of p points in an n-dimensional space, the point x minimizing the sum of all distances to the p other points is called spatial median. Parameters ---------- X : array-like of shape (n_samples, n_features) Training vector, where is the number of samples and is the number of features. max_iter : int, default=300 Maximum number of iterations. tol : float, default=1.e-3 Stop the algorithm if spatial_median has converged. Returns ------- spatial_median : ndarray of shape = (n_features,) Spatial median. n_iter : int Number of iterations needed. References ---------- - On Computation of Spatial Median for Robust Data Mining, 2005 T. Kärkkäinen and S. Äyrämö",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_theil_sen.py",
    "ast_data": "FunctionDef name:_spatial_median arg:X arg:max_iter arg:tol arguments arg arg arg If Compare Return return:yes Call Call Assign Call For Call Assign Call If Compare Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "output_json",
    "source_code": "def output_json(output_file, headers, row):\n    mapping_headers = {headers[i]: v for i, v in enumerate(row)}\n    record = {'benchmark': {'name': 'PyTorch gpt-fast benchmark', 'mode': 'inference', 'dtype': mapping_headers['dtype'], 'extra_info': {'device': mapping_headers['device'], 'arch': mapping_headers['arch']}}, 'model': {'name': mapping_headers['name'], 'type': 'OSS model' if mapping_headers['is_model'] else 'micro-benchmark', 'origins': ['pytorch']}, 'metric': {'name': mapping_headers['metric'], 'benchmark_values': [mapping_headers['actual']], 'target_value': mapping_headers['target']}}\n    with open(f'{os.path.splitext(output_file)[0]}.json', 'a') as f:\n        print(json.dumps(record), file=f)",
    "docstring": "Write the result into JSON format, so that it can be uploaded to the benchmark database to be displayed on OSS dashboard. The JSON format is defined at",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\gpt_fast\\benchmark.py",
    "ast_data": "FunctionDef name:output_json arg:output_file arg:headers arg:row arguments arg arg arg Assign Call Assign With Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_argcheck",
    "source_code": "def _argcheck(self, beta, m):\n    return (m > 1) & (beta > 0)",
    "docstring": "Shape parameter bounds are m > 1 and beta > 0.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_continuous_distns.py",
    "ast_data": "FunctionDef name:_argcheck arg:self arg:beta arg:m arguments arg arg arg Return return:yes Compare Compare"
  },
  {
    "library": "tensorflow",
    "name": "_SparseMatrixSoftmaxGrad",
    "source_code": "@ops.RegisterGradient('SparseMatrixSoftmax')\ndef _SparseMatrixSoftmaxGrad(op: ops.Operation, grad_softmax):\n    softmax = op.outputs[0]\n    return sparse_csr_matrix_ops.sparse_matrix_softmax_grad(softmax, grad_softmax, type=op.get_attr('type'))",
    "docstring": "Gradient for sparse_matrix_softmax op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\sparse\\sparse_csr_matrix_grad.py",
    "ast_data": "FunctionDef name:_SparseMatrixSoftmaxGrad arg:op arg:grad_softmax arguments arg arg Assign Return return:yes Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "not_smartquotable",
    "source_code": "class not_smartquotable:\n    support_smartquotes = False",
    "docstring": "A node which does not support smart-quotes.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\addnodes.py",
    "ast_data": "ClassDef name:not_smartquotable Assign"
  },
  {
    "library": "matplotlib",
    "name": "set_height",
    "source_code": "def set_height(self, h):\n    self._height = h\n    self.stale = True",
    "docstring": "Set the height of the rectangle.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:set_height arg:self arg:h arguments arg arg Assign Assign"
  },
  {
    "library": "pandas",
    "name": "_compute",
    "source_code": "def _compute(self):\n    self.ctx.clear()\n    self.ctx_index.clear()\n    self.ctx_columns.clear()\n    r = self\n    for func, args, kwargs in self._todo:\n        r = func(self)(*args, **kwargs)\n    return r",
    "docstring": "Execute the style functions built up in . Relies on the conventions that all style functions go through .apply or .map. The append styles to apply as tuples of (application method, *args, **kwargs)",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\style_render.py",
    "ast_data": "FunctionDef name:_compute arg:self arguments arg Call Call Call Assign For Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_hatch",
    "source_code": "def set_hatch(self, hatch):\n    self._hatch = hatch",
    "docstring": "Set the hatch style (for fills).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:set_hatch arg:self arg:hatch arguments arg arg Assign"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y):\n    self._fit_encodings_all(X, y)\n    return self",
    "docstring": "Fit the :class: to X and y. Parameters ---------- X : array-like of shape (n_samples, n_features) The data to determine the categories of each feature. y : array-like of shape (n_samples,) The target data used to encode the categories. Returns ------- self : object Fitted encoder.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_target_encoder.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_read_tagdesc",
    "source_code": "def _read_tagdesc(f):\n    tagdesc = {'offset': _read_long(f)}\n    if tagdesc['offset'] == -1:\n        tagdesc['offset'] = _read_uint64(f)\n    tagdesc['typecode'] = _read_long(f)\n    tagflags = _read_long(f)\n    tagdesc['array'] = tagflags & 4 == 4\n    tagdesc['structure'] = tagflags & 32 == 32\n    tagdesc['scalar'] = tagdesc['typecode'] in DTYPE_DICT\n    return tagdesc",
    "docstring": "Function to read in a tag descriptor",
    "type": "function",
    "file_path": "scipy\\scipy\\io\\_idl.py",
    "ast_data": "FunctionDef name:_read_tagdesc arg:f arguments arg Assign Call If Compare Assign Call Assign Call Assign Call Assign Compare Assign Compare Assign Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_commit",
    "source_code": "def get_commit(self, repo: str, commit_id: str) -> requests.Response:\n    endpoint = f'repos/{repo}/commits/{commit_id}'\n    return self._make_request('GET', endpoint)",
    "docstring": "Gets a commit by it's SHA-1 hash. commit Arguments: repo: a string of the form , e.g. openxla/xla. commit_id: a string describing the commit to get, e.g. or . Returns: a requests.Response object containing the response from the API. Raises: requests.exceptions.HTTPError",
    "type": "method",
    "file_path": "tensorflow\\third_party\\xla\\.github\\workflows\\github_api.py",
    "ast_data": "FunctionDef name:get_commit arg:self arg:repo arg:commit_id arguments arg arg arg Assign Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_split",
    "source_code": "@final\ndef _split(self) -> Generator[Block]:\n    assert self.ndim == 2\n    for i, ref_loc in enumerate(self._mgr_locs):\n        vals = self.values[slice(i, i + 1)]\n        bp = BlockPlacement(ref_loc)\n        nb = type(self)(vals, placement=bp, ndim=2, refs=self.refs)\n        yield nb",
    "docstring": "Split a block into a list of single-column blocks.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\blocks.py",
    "ast_data": "FunctionDef name:_split arg:self arguments arg Compare For Call Assign Call Assign Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "DTypeIndex",
    "source_code": "class DTypeIndex(dict):\n\n    def get_dtype_index(self, dtype):\n        if dtype not in self:\n            self[dtype] = len(self) + 1\n        return self[dtype]",
    "docstring": "Helper class to create an index of dtypes with incremental values.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\tensorrt\\utils.py",
    "ast_data": "ClassDef name:DTypeIndex FunctionDef name:get_dtype_index arg:self arg:dtype arguments arg arg If Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_reader",
    "source_code": "def _get_reader(self, file_path):\n    file_path = compat.as_bytes(file_path)\n    if file_path not in self._readers:\n        with self._readers_lock:\n            if file_path not in self._readers:\n                self._readers[file_path] = tf_record.tf_record_random_reader(file_path)\n                self._reader_read_locks[file_path] = threading.Lock()\n                self._reader_offsets[file_path] = 0\n    return self._readers[file_path]",
    "docstring": "Get a random-access reader for TFRecords file at file_path.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py",
    "ast_data": "FunctionDef name:_get_reader arg:self arg:file_path arguments arg arg Assign Call If Compare With If Compare Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_MemRefType",
    "source_code": "class _MemRefType(_RefType):\n    PARAM = 'Parameter'\n    BUFFER = 'Buffer'\n    GRAD = 'Gradient'\n    ACT = 'Activation'\n    TEMP = 'Temp'\n    OPT = 'Optstate'\n    OTH = 'Other'",
    "docstring": "An enum to define memory reference types, categorizing tensors based on their usage within a model. - PARAM: Tensors registered as nn.Parameter within modules. - BUFFER: Tensors registered as nn.Buffer within modules. - GRAD: Gradients associated with parameters. - ACT: Tensors produced during the forward pass and recomputation in activation checkpointing. - TMP: Temporary memory used during the backward pass, including gradients of activations. - OPT: Tensors holding optimizer states. - OTH: Tensors registered via that do not fit the above categories.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\_tools\\mem_tracker.py",
    "ast_data": "ClassDef name:_MemRefType Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "_root_copy_hook",
    "source_code": "def _root_copy_hook(self, *args: Any, **kwargs: Any) -> None:\n    self._submodule_to_event = defaultdict(deque)\n    with self._mp_stream:\n        for submodule in self.module.modules():\n            for param in submodule.parameters(recurse=False):\n                if hasattr(param, '_ddp_ignored') and param._ddp_ignored:\n                    continue\n                _alloc_storage(param._mp_param, param.size())\n                with torch.no_grad():\n                    param._mp_param.copy_(param.data)\n                    if param.grad is not None:\n                        param.grad.data = param.grad.to(self.mixed_precision.param_dtype)\n                param.data = param._mp_param\n            copy_event = torch.Event()\n            copy_event.record()\n            self._submodule_to_event[submodule].append(copy_event)",
    "docstring": "For DDP mixed precision, put low precision copies on separate stream and create events to wait for them. When training with DDP mixed precision, this root pre-forward hook kicks off low precision copies on a separate stream and creates respective events to wait for them.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\parallel\\distributed.py",
    "ast_data": "FunctionDef name:_root_copy_hook arg:self arguments arg arg arg Assign Call With For Call For Call If BoolOp Call Call Call With Call Call If Compare Assign Call Assign Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_threshold",
    "source_code": "def _threshold(input: Tensor, threshold: float, value: float, inplace: bool=False) -> Tensor:\n    if has_torch_function_unary(input):\n        return handle_torch_function(_threshold, (input,), input, threshold, value, inplace=inplace)\n    if inplace:\n        result = _VF.threshold_(input, threshold, value)\n    else:\n        result = _VF.threshold(input, threshold, value)\n    return result",
    "docstring": "Apply a threshold to each element of the input Tensor. See :class: for more details.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:_threshold arg:input arg:threshold arg:value arg:inplace arguments arg arg arg arg If Call Return return:yes Call If Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_parse_service",
    "source_code": "def _parse_service(service) -> tuple[str, str]:\n    if not isinstance(service, str):\n        raise ValueError(f'`service` must be a string, but `service` was of type {type(service)}. service={service}')\n    if not service:\n        raise ValueError('`service` must not be empty')\n    parts = service.split('://')\n    if len(parts) == 2:\n        protocol, address = parts\n    elif len(parts) == 1:\n        address = parts[0]\n        protocol = _pywrap_utils_exp.TF_DATA_DefaultProtocol()\n    else:\n        raise ValueError(f\"Malformed `service` string has multiple '://': {service}.\")\n    return (protocol, address)",
    "docstring": "Converts a tf.data service string into a (protocol, address) tuple. Args: service: A string in the format \"protocol://address\" or just \"address\". If the string is only an address, the default protocol will be used. Returns: The (protocol, address) tuple",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\data_service_ops.py",
    "ast_data": "FunctionDef name:_parse_service arg:service arguments arg If Call Raise Call Call If Raise Call Assign Call If Compare Call Assign If Compare Call Assign Assign Call Raise Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_inverse_transform",
    "source_code": "def _inverse_transform(self, code, dictionary):\n    code = check_array(code)\n    expected_n_components = dictionary.shape[0]\n    if self.split_sign:\n        expected_n_components += expected_n_components\n    if not code.shape[1] == expected_n_components:\n        raise ValueError(f'The number of components in the code is different from the number of components in the dictionary.Expected {expected_n_components}, got {code.shape[1]}.')\n    if self.split_sign:\n        n_samples, n_features = code.shape\n        n_features //= 2\n        code = code[:, :n_features] - code[:, n_features:]\n    return code @ dictionary",
    "docstring": "Private method allowing to accommodate both DictionaryLearning and SparseCoder.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_dict_learning.py",
    "ast_data": "FunctionDef name:_inverse_transform arg:self arg:code arg:dictionary arguments arg arg arg Assign Call Assign If If Compare Raise Call If Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "scatter_mul",
    "source_code": "def scatter_mul(self, sparse_delta, use_locking=False, name=None):\n    if not isinstance(sparse_delta, indexed_slices.IndexedSlices):\n        raise TypeError(f'Argument `sparse_delta` must be a `tf.IndexedSlices`. Received arg: {sparse_delta}')\n    return self._lazy_read(gen_resource_variable_ops.resource_scatter_mul(self.handle, sparse_delta.indices, ops.convert_to_tensor(sparse_delta.values, self.dtype), name=name))",
    "docstring": "Multiply this variable by . Args: sparse_delta: to multiply this variable by. use_locking: If , use locking during the operation. name: the name of the operation. Returns: The updated variable. Raises: TypeError: if is not an .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:scatter_mul arg:self arg:sparse_delta arg:use_locking arg:name arguments arg arg arg arg If Call Raise Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "tree_flatten_with_path",
    "source_code": "def tree_flatten_with_path(tree: PyTree, is_leaf: Optional[Callable[[PyTree], bool]]=None) -> tuple[list[tuple[KeyPath, Any]], TreeSpec]:\n    _, treespec = tree_flatten(tree, is_leaf)\n    return (list(_generate_key_paths((), tree, is_leaf)), treespec)",
    "docstring": "Flattens a pytree like :func:, but also returns each leaf's key path. Args: tree: a pytree to flatten. If it contains a custom type, that type must be registered with an appropriate when registered with :func:. is_leaf: An extra leaf predicate function that will be called at each flattening step. The function should have a single argument with signature `TrueTreeSpec` representing the structure of the flattened tree.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_pytree.py",
    "ast_data": "FunctionDef name:tree_flatten_with_path arg:tree arg:is_leaf arguments arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "PandasBuffer",
    "source_code": "class PandasBuffer(Buffer):\n\n    def __init__(self, x: np.ndarray, allow_copy: bool=True) -> None:\n        if x.strides[0] and (not x.strides == (x.dtype.itemsize,)):\n            if allow_copy:\n                x = x.copy()\n            else:\n                raise RuntimeError('Exports cannot be zero-copy in the case of a non-contiguous buffer')\n        self._x = x\n\n    @property\n    def bufsize(self) -> int:\n        return self._x.size * self._x.dtype.itemsize\n\n    @property\n    def ptr(self) -> int:\n        return self._x.__array_interface__['data'][0]\n\n    def __dlpack__(self) -> Any:\n        return self._x.__dlpack__()\n\n    def __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]:\n        return (DlpackDeviceType.CPU, None)\n\n    def __repr__(self) -> str:\n        return 'PandasBuffer(' + str({'bufsize': self.bufsize, 'ptr': self.ptr, 'device': self.__dlpack_device__()[0].name}) + ')'",
    "docstring": "Data in the buffer is guaranteed to be contiguous in memory.",
    "type": "class",
    "file_path": "pandas\\pandas\\core\\interchange\\buffer.py",
    "ast_data": "ClassDef name:PandasBuffer FunctionDef name:__init__ arg:self arg:x arg:allow_copy arguments arg arg arg If BoolOp Compare If Assign Call Raise Call Assign FunctionDef name:bufsize arg:self arguments arg Return return:yes FunctionDef name:ptr arg:self arguments arg Return return:yes FunctionDef name:__dlpack__ arg:self arguments arg Return return:yes Call FunctionDef name:__dlpack_device__ arg:self arguments arg Return return:yes FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "dtype",
    "source_code": "@property\ndef dtype(self):\n    return self._dtype",
    "docstring": "The specified by this type for the RaggedTensor. Examples: >>> rt = tf.ragged.constant([[\"a\"], [\"b\", \"c\"]], dtype=tf.string) >>> tf.type_spec_from_value(rt).dtype tf.string Returns: A of the values in the RaggedTensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:dtype arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "output_shapes",
    "source_code": "@property\n@deprecation.deprecated(None, 'Use `tf.compat.v1.data.get_output_shapes(dataset)`.')\ndef output_shapes(self):\n    return nest.map_structure(lambda component_spec: component_spec._to_legacy_output_shapes(), self.element_spec)",
    "docstring": "Returns the shape of each component of an element of this dataset. Returns: A (nested) structure of objects corresponding to each component of an element of this dataset.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:output_shapes arg:self arguments arg Return return:yes Call arguments arg Call Call"
  },
  {
    "library": "django",
    "name": "deconstruct",
    "source_code": "def deconstruct(self):\n    return (self.__class__.__name__, self._constructor_args[0], self._constructor_args[1])",
    "docstring": "Return a 3-tuple of class import path (or just name if it lives under django.db.migrations), positional arguments, and keyword arguments.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\operations\\base.py",
    "ast_data": "FunctionDef name:deconstruct arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_c_func",
    "source_code": "@property\ndef _c_func(self) -> Any:\n    return self._bound_context.get_c_function(self.name)",
    "docstring": "Returns a scoped pybind object containing FunctionRecord in runtime.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\atomic_function.py",
    "ast_data": "FunctionDef name:_c_func arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "flash_sdp_enabled",
    "source_code": "def flash_sdp_enabled():\n    return torch._C._get_flash_sdp_enabled()",
    "docstring": ".. warning:: This flag is beta and subject to change. Returns whether flash scaled dot product attention is enabled or not.",
    "type": "function",
    "file_path": "pytorch\\torch\\backends\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:flash_sdp_enabled arguments Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "split",
    "source_code": "@classmethod\ndef split(cls, tensor, split_dimension, num_devices, input_shape=None):\n    if input_shape:\n        shape = input_shape\n    else:\n        shape = tensor.shape.as_list()\n    if shape[split_dimension] is not None and shape[split_dimension] < num_devices:\n        raise ValueError('Split dimension was smaller than the required number of splits: shape=%r, dimension=%r, num_devices=%r' % (shape, split_dimension, num_devices))\n    tile_assignment_dims = [1] * len(shape)\n    tile_assignment_dims[split_dimension] = num_devices\n    return Sharding(proto=xla_data_pb2.OpSharding(type=xla_data_pb2.OpSharding.OTHER, tile_assignment_dimensions=tile_assignment_dims, tile_assignment_devices=range(num_devices)))",
    "docstring": "Returns a Sharding that splits a tensor across a dimension. This creates a Tiled attribute, similar to tile(), but easier to use for the common case of tiling a tensor N ways in one dimension. Args: tensor: A tf.Tensor to split. split_dimension: The dimension number to split. num_devices: The number of cores to split over. input_shape: The shape of the original tensor. Raises: ValueError: The tensor to split was smaller in the split dimension than the number of devices to split over.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\xla\\experimental\\xla_sharding.py",
    "ast_data": "FunctionDef name:split arg:cls arg:tensor arg:split_dimension arg:num_devices arg:input_shape arguments arg arg arg arg arg If Assign Assign Call If BoolOp Compare Compare Raise Call Assign Call Assign Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "reduce_all_v1",
    "source_code": "@tf_export(v1=['math.reduce_all', 'reduce_all'])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_args(None, 'keep_dims is deprecated, use keepdims instead', 'keep_dims')\ndef reduce_all_v1(input_tensor, axis=None, keepdims=None, name=None, reduction_indices=None, keep_dims=None):\n    axis = deprecation.deprecated_argument_lookup('axis', axis, 'reduction_indices', reduction_indices)\n    keepdims = deprecation.deprecated_argument_lookup('keepdims', keepdims, 'keep_dims', keep_dims)\n    return reduce_all(input_tensor, axis, keepdims, name)",
    "docstring": "Computes of elements across dimensions of a tensor. This is the reduction operation for the elementwise op. Reduces along the dimensions given in . Unless is true, the rank of the tensor is reduced by 1 for each of the entries in , which must be unique. If is true, the reduced dimensions are retained with length 1. If is None, all dimensions are reduced, and a tensor with a single element is returned. For example: >>> x = tf.constant([[True, True], [False, False]]) >>> tf.math.reduce_all(x) >>> tf.math.reduce_all(x, 0) >>> tf.math.reduce_all(x, 1) Args: input_tensor: The boolean tensor to reduce. axis: The dimensions to reduce. If (the default), reduces all dimensions. Must be in the range . keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). reduction_indices: The old (deprecated) name for axis. keep_dims: Deprecated alias for . Returns: The reduced tensor. @compatibility(numpy) Equivalent to np.all @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:reduce_all_v1 arg:input_tensor arg:axis arg:keepdims arg:name arg:reduction_indices arg:keep_dims arguments arg arg arg arg arg arg Assign Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_forward_approximate",
    "source_code": "def _forward_approximate(self, input):\n    assert self.bn.running_var is not None\n    running_std = torch.sqrt(self.bn.running_var + self.bn.eps)\n    scale_factor = self.bn.weight / running_std\n    weight_shape = [1] * len(self.weight.shape)\n    weight_shape[0] = -1\n    bias_shape = [1] * len(self.weight.shape)\n    bias_shape[1] = -1\n    scaled_weight = self.weight_fake_quant(self.weight * scale_factor.reshape(weight_shape))\n    if self.bias is not None:\n        zero_bias = torch.zeros_like(self.bias, dtype=input.dtype)\n    else:\n        zero_bias = torch.zeros(self.out_channels, device=scaled_weight.device, dtype=input.dtype)\n    conv = self._conv_forward(input, scaled_weight, zero_bias)\n    conv_orig = conv / scale_factor.reshape(bias_shape)\n    if self.bias is not None:\n        conv_orig = conv_orig + self.bias.reshape(bias_shape)\n    conv = self.bn(conv_orig)\n    return conv",
    "docstring": "Approximated method to fuse conv and bn. It requires only one forward pass. conv_orig = conv / scale_factor where scale_factor = bn.weight / running_std",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\nn\\intrinsic\\qat\\modules\\conv_fused.py",
    "ast_data": "FunctionDef name:_forward_approximate arg:self arg:input arguments arg arg Compare Assign Call Assign Assign Call Assign Assign Call Assign Assign Call Call If Compare Assign Call Assign Call Assign Call Assign Call If Compare Assign Call Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "cpu_dispatch_names",
    "source_code": "def cpu_dispatch_names(self):\n    return self.parse_dispatch_names",
    "docstring": "return a list of final CPU dispatch feature names",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py",
    "ast_data": "FunctionDef name:cpu_dispatch_names arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "Round",
    "source_code": "@_register_style(_style_list)\nclass Round:\n\n    def __init__(self, pad=0.3, rounding_size=None):\n        self.pad = pad\n        self.rounding_size = rounding_size\n\n    def __call__(self, x0, y0, width, height, mutation_size):\n        pad = mutation_size * self.pad\n        if self.rounding_size:\n            dr = mutation_size * self.rounding_size\n        else:\n            dr = pad\n        width, height = (width + 2 * pad, height + 2 * pad)\n        x0, y0 = (x0 - pad, y0 - pad)\n        x1, y1 = (x0 + width, y0 + height)\n        cp = [(x0 + dr, y0), (x1 - dr, y0), (x1, y0), (x1, y0 + dr), (x1, y1 - dr), (x1, y1), (x1 - dr, y1), (x0 + dr, y1), (x0, y1), (x0, y1 - dr), (x0, y0 + dr), (x0, y0), (x0 + dr, y0), (x0 + dr, y0)]\n        com = [Path.MOVETO, Path.LINETO, Path.CURVE3, Path.CURVE3, Path.LINETO, Path.CURVE3, Path.CURVE3, Path.LINETO, Path.CURVE3, Path.CURVE3, Path.LINETO, Path.CURVE3, Path.CURVE3, Path.CLOSEPOLY]\n        return Path(cp, com)",
    "docstring": "A box with round corners.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "ClassDef name:Round FunctionDef name:__init__ arg:self arg:pad arg:rounding_size arguments arg arg arg Assign Assign FunctionDef name:__call__ arg:self arg:x0 arg:y0 arg:width arg:height arg:mutation_size arguments arg arg arg arg arg arg Assign If Assign Assign Assign Assign Assign Assign Assign Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_update_inner_stats",
    "source_code": "def _update_inner_stats(self, X, code, batch_size, step):\n    if step < batch_size - 1:\n        theta = (step + 1) * batch_size\n    else:\n        theta = batch_size ** 2 + step + 1 - batch_size\n    beta = (theta + 1 - batch_size) / (theta + 1)\n    self._A *= beta\n    self._A += code.T @ code / batch_size\n    self._B *= beta\n    self._B += X.T @ code / batch_size",
    "docstring": "Update the inner stats inplace.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_dict_learning.py",
    "ast_data": "FunctionDef name:_update_inner_stats arg:self arg:X arg:code arg:batch_size arg:step arguments arg arg arg arg arg If Compare Assign Assign Assign"
  },
  {
    "library": "scipy",
    "name": "lint",
    "source_code": "@click.command()\n@click.option('--fix', default=False, is_flag=True, help='Attempt to auto-fix errors')\n@click.option('--diff-against', default='main', help='Diff against this branch and lint modified files. Use either `--diff-against` or `--files`, but not both.')\n@click.option('--files', default='', help='Lint these files or directories; use **/*.py to lint all files')\n@click.option('--all', default=False, is_flag=True, help='This overrides `--diff-against` and `--files` to lint all local files (excluding subprojects).')\n@click.option('--no-cython', default=True, is_flag=True, help='Do not run cython-lint.')\n@click.pass_context\ndef lint(ctx, fix, diff_against, files, all, no_cython):\n    cmd_prefix = [sys.executable] if sys.platform == 'win32' else []\n    cmd_lint = cmd_prefix + [os.path.join('tools', 'lint.py'), f'--diff-against={diff_against}']\n    if files != '':\n        cmd_lint += [f'--files={files}']\n    if all:\n        cmd_lint += ['--all']\n    if no_cython:\n        cmd_lint += ['--no-cython']\n    if fix:\n        cmd_lint += ['--fix']\n    util.run(cmd_lint)\n    cmd_unicode = cmd_prefix + [os.path.join('tools', 'check_unicode.py')]\n    util.run(cmd_unicode)\n    cmd_check_test_name = cmd_prefix + [os.path.join('tools', 'check_test_name.py')]\n    util.run(cmd_check_test_name)",
    "docstring": "🔦 Run linter on modified files and check for disallowed Unicode characters and possibly-invalid test names.",
    "type": "function",
    "file_path": "scipy\\.spin\\cmds.py",
    "ast_data": "FunctionDef name:lint arg:ctx arg:fix arg:diff_against arg:files arg:all arg:no_cython arguments arg arg arg arg arg arg Assign Compare Assign Call If Compare If If If Call Assign Call Call Assign Call Call Call Call Call Call Call Call"
  },
  {
    "library": "django",
    "name": "d",
    "source_code": "def d(self):\n    return '%02d' % self.data.day",
    "docstring": "Day of the month, 2 digits with leading zeros; i.e. '01' to '31'",
    "type": "method",
    "file_path": "django\\django\\utils\\dateformat.py",
    "ast_data": "FunctionDef name:d arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get",
    "source_code": "@staticmethod\ndef get(logdir):\n    with FileWriterCache._lock:\n        if logdir not in FileWriterCache._cache:\n            FileWriterCache._cache[logdir] = FileWriter(logdir, graph=ops.get_default_graph())\n        return FileWriterCache._cache[logdir]",
    "docstring": "Returns the FileWriter for the specified directory. Args: logdir: str, name of the directory. Returns: A .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\writer_cache.py",
    "ast_data": "FunctionDef name:get arg:logdir arguments arg With If Compare Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "adapt_datefield_value",
    "source_code": "def adapt_datefield_value(self, value):\n    if value is None:\n        return None\n    return str(value)",
    "docstring": "Transform a date value to an object compatible with what is expected by the backend driver for date columns.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:adapt_datefield_value arg:self arg:value arguments arg arg If Compare Return return:no Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "flat_values",
    "source_code": "@property\ndef flat_values(self):\n    rt_values = self.values\n    while isinstance(rt_values, RaggedTensor):\n        rt_values = rt_values.values\n    return rt_values",
    "docstring": "The innermost tensor for this ragged tensor. Concretely, if is a , then is ; otherwise, is . Conceptually, is the tensor formed by flattening the outermost dimension and all of the ragged dimensions into a single dimension. (where is the number of items in the flattened dimensions). Returns: A . #### Example: >>> rt = tf.ragged.constant([[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]) >>> print(rt.flat_values) tf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32)",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:flat_values arg:self arguments arg Assign While Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ConvBn3d",
    "source_code": "class ConvBn3d(_FusedModule):\n\n    def __init__(self, conv, bn):\n        assert type_before_parametrizations(conv) == Conv3d and type_before_parametrizations(bn) == BatchNorm3d, f'Incorrect types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(bn)}'\n        super().__init__(conv, bn)",
    "docstring": "This is a sequential container which calls the Conv 3d and Batch Norm 3d modules. During quantization this will be replaced with the corresponding fused module.",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\nn\\intrinsic\\modules\\fused.py",
    "ast_data": "ClassDef name:ConvBn3d FunctionDef name:__init__ arg:self arg:conv arg:bn arguments arg arg arg BoolOp Compare Call Compare Call Call Call Call Call"
  },
  {
    "library": "django",
    "name": "ask_unique_callable_default_addition",
    "source_code": "def ask_unique_callable_default_addition(self, field_name, model_name):\n    if not self.dry_run:\n        version = get_docs_version()\n        choice = self._choice_input(f'Callable default on unique field {model_name}.{field_name} will not generate unique values upon migrating.\\nPlease choose how to proceed:\\n', [f'Continue making this migration as the first step in writing a manual migration to generate unique values described here: https://docs.djangoproject.com/en/{version}/howto/writing-migrations/#migrations-that-add-unique-fields.', 'Quit and edit field options in models.py.'])\n        if choice == 2:\n            sys.exit(3)\n    return None",
    "docstring": "Adding a unique field with a callable default.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\questioner.py",
    "ast_data": "FunctionDef name:ask_unique_callable_default_addition arg:self arg:field_name arg:model_name arguments arg arg arg If Assign Call Assign Call If Compare Call Return return:no"
  },
  {
    "library": "pytorch",
    "name": "externed_modules",
    "source_code": "def externed_modules(self) -> list[str]:\n    return self._nodes_with_action_type(_ModuleProviderAction.EXTERN)",
    "docstring": "Return all modules that are currently externed. Returns: A list containing the names of modules which will be externed in this package.",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\package_exporter.py",
    "ast_data": "FunctionDef name:externed_modules arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "define_support",
    "source_code": "def define_support(self, x1, x2=None, weights=None, cache=True):\n    if x2 is None:\n        support = self._define_support_univariate(x1, weights)\n    else:\n        support = self._define_support_bivariate(x1, x2, weights)\n    if cache:\n        self.support = support\n    return support",
    "docstring": "Create the evaluation grid for a given data set.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_statistics.py",
    "ast_data": "FunctionDef name:define_support arg:self arg:x1 arg:x2 arg:weights arg:cache arguments arg arg arg arg arg If Compare Assign Call Assign Call If Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "set_global_step",
    "source_code": "def set_global_step(self, new_global_step, name=None):\n    return gen_data_flow_ops.resource_accumulator_set_global_step(self._accumulator_ref, math_ops.cast(ops.convert_to_tensor(new_global_step), _dtypes.int64), name=name)",
    "docstring": "Sets the global time step of the accumulator. The operation logs a warning if we attempt to set to a time step that is lower than the accumulator's own time step. Args: new_global_step: Value of new time step. Can be a variable or a constant name: Optional name for the operation. Returns: Operation that sets the accumulator's time step.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:set_global_step arg:self arg:new_global_step arg:name arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "synchronize_staging",
    "source_code": "def synchronize_staging(self) -> None:\n    pass",
    "docstring": "No-op function, since staging is blocking.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\staging.py",
    "ast_data": "FunctionDef name:synchronize_staging arg:self arguments arg"
  },
  {
    "library": "django",
    "name": "AreaField",
    "source_code": "class AreaField(models.FloatField):\n\n    def __init__(self, geo_field):\n        super().__init__()\n        self.geo_field = geo_field\n\n    def get_prep_value(self, value):\n        if not isinstance(value, Area):\n            raise ValueError('AreaField only accepts Area measurement objects.')\n        return value\n\n    def get_db_prep_value(self, value, connection, prepared=False):\n        if value is None:\n            return\n        area_att = connection.ops.get_area_att_for_field(self.geo_field)\n        return getattr(value, area_att) if area_att else value\n\n    def from_db_value(self, value, expression, connection):\n        if value is None:\n            return\n        if isinstance(value, Decimal):\n            value = float(value)\n        area_att = connection.ops.get_area_att_for_field(self.geo_field)\n        return Area(**{area_att: value}) if area_att else value\n\n    def get_internal_type(self):\n        return 'AreaField'",
    "docstring": "Wrapper for Area values.",
    "type": "class",
    "file_path": "django\\django\\contrib\\gis\\db\\models\\sql\\conversion.py",
    "ast_data": "ClassDef name:AreaField FunctionDef name:__init__ arg:self arg:geo_field arguments arg arg Call Call Assign FunctionDef name:get_prep_value arg:self arg:value arguments arg arg If Call Raise Call Return return:yes FunctionDef name:get_db_prep_value arg:self arg:value arg:connection arg:prepared arguments arg arg arg arg If Compare Return return:no Assign Call Return return:yes Call FunctionDef name:from_db_value arg:self arg:value arg:expression arg:connection arguments arg arg arg arg If Compare Return return:no If Call Assign Call Assign Call Return return:yes Call FunctionDef name:get_internal_type arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "prepare_global_plan",
    "source_code": "@abc.abstractmethod\ndef prepare_global_plan(self, plans: list[LoadPlan]) -> list[LoadPlan]:\n    pass",
    "docstring": "Perform centralized planning of storage loading. This method is only called on the coordinator instance. While this method can produce a completely different plan, the preferred way is to store storage specific data in LoadPlan::storage_data. Args: plans: A list of `` after storage global planning",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\storage.py",
    "ast_data": "FunctionDef name:prepare_global_plan arg:self arg:plans arguments arg arg"
  },
  {
    "library": "pandas",
    "name": "is_potential_multi_index",
    "source_code": "def is_potential_multi_index(columns: Sequence[Hashable] | MultiIndex, index_col: bool | Sequence[int] | None=None) -> bool:\n    if index_col is None or isinstance(index_col, bool):\n        index_columns = set()\n    else:\n        index_columns = set(index_col)\n    return bool(len(columns) and (not isinstance(columns, ABCMultiIndex)) and all((isinstance(c, tuple) for c in columns if c not in index_columns)))",
    "docstring": "Check whether or not the parameter could be converted into a MultiIndex. Parameters ---------- columns : array-like Object which may or may not be convertible into a MultiIndex index_col : None, bool or list, optional Column or columns to use as the (possibly hierarchical) index Returns ------- bool : Whether or not columns could become a MultiIndex",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\common.py",
    "ast_data": "FunctionDef name:is_potential_multi_index arg:columns arg:index_col arguments arg arg If BoolOp Compare Call Assign Call Assign Call Return return:yes Call BoolOp Call Call Call Call Compare"
  },
  {
    "library": "django",
    "name": "translation_file_changed",
    "source_code": "def translation_file_changed(sender, file_path, **kwargs):\n    if file_path.suffix == '.mo':\n        import gettext\n        from django.utils.translation import trans_real\n        gettext._translations = {}\n        trans_real._translations = {}\n        trans_real._default = None\n        trans_real._active = Local()\n        return True",
    "docstring": "Clear the internal translations cache if a .mo file is modified.",
    "type": "function",
    "file_path": "django\\django\\utils\\translation\\reloader.py",
    "ast_data": "FunctionDef name:translation_file_changed arg:sender arg:file_path arguments arg arg arg If Compare Assign Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "shard",
    "source_code": "@torch.no_grad()\ndef shard(self):\n    flat_param = self.flat_param\n    if not self.uses_sharded_strategy:\n        self._init_shard_metadata(0, 0, flat_param.numel() - 1)\n    else:\n        _p_assert(flat_param.storage_offset() == 0, 'The `FlatParameter` is not the sole occupant of its storage')\n        sharded_flat_param, numel_padded = FlatParamHandle._get_shard(flat_param, self.rank, self.world_size)\n        if not torch.distributed._functional_collectives.is_torchdynamo_compiling():\n            allocated = flat_param._typed_storage()._size() > 0\n            if allocated:\n                flat_param._typed_storage()._resize_(0)\n        flat_param.set_(sharded_flat_param)\n        start_idx = sharded_flat_param.numel() * self.rank\n        end_idx = sharded_flat_param.numel() * (self.rank + 1) - 1\n        self._init_shard_metadata(numel_padded, start_idx, end_idx)\n    if self._use_orig_params:\n        self._use_sharded_views()",
    "docstring": "Shard the handle's `` is the sharded flat parameter. Shard metadata attributes are set for all sharding strategies.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py",
    "ast_data": "FunctionDef name:shard arg:self arguments arg Assign If Call Call Call Compare Call Assign Call If Call Assign Compare Call Call If Call Call Call Assign Call Assign Call Call If Call Call"
  },
  {
    "library": "pytorch",
    "name": "generate_kernel_code_from_nodes",
    "source_code": "def generate_kernel_code_from_nodes(self, nodes: Sequence[BaseSchedulerNode], benchmark_kernel: bool) -> str:\n    assert len(nodes) > 0\n    device = nodes[0].get_device()\n    self.current_device = device\n    backend = self.get_backend(device)\n    with dynamo_timed('benchmark_fused_nodes'):\n        return backend.generate_kernel_code_from_nodes(nodes, benchmark_kernel)",
    "docstring": "Benchmark fused list of nodes and return the execution time in milliseconds on randomly generated inputs.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:generate_kernel_code_from_nodes arg:self arg:nodes arg:benchmark_kernel arguments arg arg arg Compare Call Assign Call Assign Assign Call With Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "equals",
    "source_code": "def equals(self, other: object) -> bool:\n    if self.is_(other):\n        return True\n    if not isinstance(other, Index):\n        return False\n    if len(self) != len(other):\n        return False\n    if not isinstance(other, MultiIndex):\n        if not self._should_compare(other):\n            return False\n        return array_equivalent(self._values, other._values)\n    if self.nlevels != other.nlevels:\n        return False\n    for i in range(self.nlevels):\n        self_codes = self.codes[i]\n        other_codes = other.codes[i]\n        self_mask = self_codes == -1\n        other_mask = other_codes == -1\n        if not np.array_equal(self_mask, other_mask):\n            return False\n        self_level = self.levels[i]\n        other_level = other.levels[i]\n        new_codes = recode_for_categories(other_codes, other_level, self_level, copy=False)\n        if not np.array_equal(self_codes, new_codes):\n            return False\n        if not self_level[:0].equals(other_level[:0]):\n            return False\n    return True",
    "docstring": "Determines if two MultiIndex objects have the same labeling information (the levels themselves do not necessarily have to be the same) See Also -------- equal_levels",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "FunctionDef name:equals arg:self arg:other arguments arg arg If Call Return return:yes If Call Return return:yes If Compare Call Call Return return:yes If Call If Call Return return:yes Return return:yes Call If Compare Return return:yes For Call Assign Assign Assign Compare Assign Compare If Call Return return:yes Assign Assign Assign Call If Call Return return:yes If Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_self_device_time",
    "source_code": "def get_self_device_time(ev: torch.autograd.profiler_util.EventList) -> float:\n    return ev.self_device_time_total / 1000 / nruns",
    "docstring": "ev.self_device_time_total is in microsecond. Convert to millisecond.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\wrapper_benchmark.py",
    "ast_data": "FunctionDef name:get_self_device_time arg:ev arguments arg Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "_get_nominal_mapping",
    "source_code": "def _get_nominal_mapping(self, scale: Nominal, data: Series) -> Mapping:\n    levels = categorical_order(data, scale.order)\n    values = self._get_values(scale, levels)\n\n    def mapping(x):\n        ixs = np.asarray(x, np.intp)\n        out = np.full(len(x), np.nan)\n        use = np.isfinite(x)\n        out[use] = np.take(values, ixs[use])\n        return out\n    return mapping",
    "docstring": "Identify evenly-spaced values using interval or explicit mapping.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\properties.py",
    "ast_data": "FunctionDef name:_get_nominal_mapping arg:self arg:scale arg:data arguments arg arg arg Assign Call Assign Call FunctionDef name:mapping arg:x arguments arg Assign Call Assign Call Call Assign Call Assign Call Return return:yes Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "resolve_any_xref",
    "source_code": "def resolve_any_xref(self, env: BuildEnvironment, fromdocname: str, builder: Builder, target: str, node: pending_xref, contnode: Element) -> list[tuple[str, nodes.reference]]:\n    raise NotImplementedError",
    "docstring": "Resolve the pending_xref *node* with the given *target*. The reference comes from an \"any\" or similar role, which means that we don't know the type. Otherwise, the arguments are the same as for :meth:. The method must return a list (potentially empty) of tuples `resolve_xref` would return. .. versionadded:: 1.3",
    "type": "method",
    "file_path": "sphinx\\sphinx\\domains\\__init__.py",
    "ast_data": "FunctionDef name:resolve_any_xref arg:self arg:env arg:fromdocname arg:builder arg:target arg:node arg:contnode arguments arg arg arg arg arg arg arg Raise"
  },
  {
    "library": "pytorch",
    "name": "get_backoff_factor",
    "source_code": "def get_backoff_factor(self) -> float:\n    return self._backoff_factor",
    "docstring": "Return a Python float containing the scale backoff factor.",
    "type": "method",
    "file_path": "pytorch\\torch\\amp\\grad_scaler.py",
    "ast_data": "FunctionDef name:get_backoff_factor arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "generate_d_gub",
    "source_code": "@register_transformation_rule(DGreatestUpperBound)\ndef generate_d_gub(constraint, counter):\n    c1 = Conj([BinConstraintD(constraint.rhs1, Dyn, op_eq), BinConstraintD(constraint.res, constraint.rhs2, op_eq)])\n    c2 = Conj([BinConstraintD(constraint.rhs2, Dyn, op_eq), BinConstraintD(constraint.res, constraint.rhs1, op_eq)])\n    c3 = Conj([BinConstraintD(constraint.rhs2, constraint.rhs1, op_eq), BinConstraintD(constraint.res, constraint.rhs1, op_eq)])\n    return (Disj([c1, c2, c3]), counter)",
    "docstring": "Transform greatest upper bound for dimensions into equality constraints",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_transformation.py",
    "ast_data": "FunctionDef name:generate_d_gub arg:constraint arg:counter arguments arg arg Assign Call Call Call Assign Call Call Call Assign Call Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_update_scheduler",
    "source_code": "def _update_scheduler(self) -> None:\n    from .scheduler import Scheduler\n    with config.patch('triton.store_cubin', False):\n        self.scheduler = Scheduler(self.operations)",
    "docstring": "(Re)initializes the scheduler member. When initializing the scheduler, no CUBIN files should be generated (to avoid biasing any benchmarks and pessimizing fusion decisions).",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\graph.py",
    "ast_data": "FunctionDef name:_update_scheduler arg:self arguments arg With Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_process_call",
    "source_code": "def _process_call(self, node: ast.Call) -> None:\n    func = node.func\n    if self._is_export_call(func):\n        func = cast(ast.Call, func)\n        if len(node.args) != 1 or node.keywords:\n            raise BadExportError(f'{self._current_file}:{node.lineno} export must be called with a single value: {ast.dump(node)}')\n        symbol = self._name(self._unwrap_simple_call(node.args[0]))\n        if not symbol:\n            raise BadExportError(f'{self._current_file}:{node.lineno} export must be called with a single value: {ast.dump(node)}')\n        self._add_exported_symbol(func, symbol)\n    elif isinstance(func, ast.Attribute) and func.attr == 'export_constant' and self._is_export_call(func.value):\n        if len(node.args) != 2 or node.keywords or self._name(node.args[0]) != '__name__':\n            raise BadExportError(f'{self._current_file}:{node.lineno} export_constant must be called with __name__, <id>: {ast.dump(node)}')\n        self._add_exported_symbol(func.value, self._literal_value(node.args[1]))\n    else:\n        self.visit(node)",
    "docstring": "Process top-level call for potential symbol export.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\api\\generator2\\extractor\\extractor.py",
    "ast_data": "FunctionDef name:_process_call arg:self arg:node arguments arg arg Assign If Call Assign Call If BoolOp Compare Call Raise Call Call Assign Call Call If Raise Call Call Call If BoolOp Call Compare Call If BoolOp Compare Call Compare Call Raise Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "create_graph_debug_info_def",
    "source_code": "def create_graph_debug_info_def(func_named_operations):\n    builder = tf_stack.GraphDebugInfoBuilder()\n    for func_name, op in func_named_operations:\n        if op.traceback is None:\n            continue\n        builder.AccumulateStackTrace(func_name, op.name, _compute_useful_frames(op.traceback, 10))\n    return builder.Build()",
    "docstring": "Construct and returns a protocol buffer. Args: func_named_operations: An iterable of (func_name, op.Operation) tuples where the Operation instances have a _traceback members. The func_name should be the empty string for operations in the top-level Graph. Returns: GraphDebugInfo protocol buffer. Raises: TypeError: If the arguments are not of the correct proto buffer type.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\error_interpolation.py",
    "ast_data": "FunctionDef name:create_graph_debug_info_def arg:func_named_operations arguments arg Assign Call For If Compare Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_set_order",
    "source_code": "def _set_order(X, y, order='C'):\n    if order not in [None, 'C', 'F']:\n        raise ValueError(\"Unknown value for order. Got {} instead of None, 'C' or 'F'.\".format(order))\n    sparse_X = sparse.issparse(X)\n    sparse_y = sparse.issparse(y)\n    if order is not None:\n        sparse_format = 'csc' if order == 'F' else 'csr'\n        if sparse_X:\n            X = X.asformat(sparse_format, copy=False)\n        else:\n            X = np.asarray(X, order=order)\n        if sparse_y:\n            y = y.asformat(sparse_format)\n        else:\n            y = np.asarray(y, order=order)\n    return (X, y)",
    "docstring": "Change the order of X and y if necessary. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. y : ndarray of shape (n_samples,) Target values. order : {None, 'C', 'F'} If 'C', dense arrays are returned as C-ordered, sparse matrices in csr format. If 'F', dense arrays are return as F-ordered, sparse matrices in csc format. Returns ------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data with guaranteed order. y : ndarray of shape (n_samples,) Target values with guaranteed order.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_coordinate_descent.py",
    "ast_data": "FunctionDef name:_set_order arg:X arg:y arg:order arguments arg arg arg If Compare Raise Call Call Assign Call Assign Call If Compare Assign Compare If Assign Call Assign Call If Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "flush",
    "source_code": "def flush(self, fsync: bool=False) -> None:\n    if self._handle is not None:\n        self._handle.flush()\n        if fsync:\n            with suppress(OSError):\n                os.fsync(self._handle.fileno())",
    "docstring": "Force all buffered modifications to be written to disk. Parameters ---------- fsync : bool (default False) call ``, flushing may not guarantee that the OS writes to disk. With fsync, the operation will block until the OS claims the file has been written; however, other caching layers may still interfere.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:flush arg:self arg:fsync arguments arg arg If Compare Call If With Call Call Call"
  },
  {
    "library": "kornia",
    "name": "_no_match",
    "source_code": "def _no_match(dm: Tensor) -> Tuple[Tensor, Tensor]:\n    dists = torch.empty(0, 1, device=dm.device, dtype=dm.dtype)\n    idxs = torch.empty(0, 2, device=dm.device, dtype=torch.long)\n    return (dists, idxs)",
    "docstring": "Output empty tensors. Returns: - Descriptor distance of matching descriptors, shape of :math:. - Long tensor indexes of matching descriptors in desc1 and desc2, shape of :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\adalam\\core.py",
    "ast_data": "FunctionDef name:_no_match arg:dm arguments arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "all_reach_via_pointwise_with_no_other_inputs",
    "source_code": "def all_reach_via_pointwise_with_no_other_inputs(src: torch.fx.Node, dst: torch.fx.Node) -> tuple[bool, OrderedSet[torch.fx.Node]]:\n    visited = OrderedSet[torch.fx.Node]()\n    input_counter: dict[torch.fx.Node, int] = {}\n    all_reachable = True\n    queue = deque([src])\n    while queue:\n        node = queue.popleft()\n        if node not in visited:\n            if node is dst:\n                visited.add(node)\n            elif node is src or is_pointwise_node(node):\n                for user in node.users.keys():\n                    if user not in input_counter:\n                        input_counter[user] = len(user.all_input_nodes)\n                    input_counter[user] -= 1\n                    queue.append(user)\n                visited.add(node)\n            else:\n                all_reachable = False\n                break\n    return (all_reachable and all((count == 0 for count in input_counter.values())), visited)",
    "docstring": "check whether every user path from src reaches dst via pointwise nodes, with no other input nodes for the intermediates and dst; return (1) the Boolean value (2) the subgraph node set including src and dst (which only makes sense when the Boolean value is True)",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\b2b_gemm.py",
    "ast_data": "FunctionDef name:all_reach_via_pointwise_with_no_other_inputs arg:src arg:dst arguments arg arg Assign Call Assign Assign Call While Assign Call If Compare If Compare Call If BoolOp Compare Call For Call If Compare Assign Call Call Call Assign Return return:yes BoolOp Call Compare Call"
  },
  {
    "library": "kornia",
    "name": "make_samplers",
    "source_code": "def make_samplers(self, device: torch.device, dtype: torch.dtype) -> None:\n    drop = _range_bound((0.0, 1.0), 'drop', device=device, dtype=dtype)\n    self.drop_sampler = UniformDistribution(drop[0], drop[1], validate_args=False)",
    "docstring": "Create samplers for generating random dropout parameters.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\random_generator\\_2d\\channel_dropout.py",
    "ast_data": "FunctionDef name:make_samplers arg:self arg:device arg:dtype arguments arg arg arg Assign Call Assign Call"
  },
  {
    "library": "django",
    "name": "metadata",
    "source_code": "@property\ndef metadata(self):\n    domain_list = ['DEFAULT']\n    meta_list = capi.get_ds_metadata_domain_list(self._ptr)\n    if meta_list:\n        counter = 0\n        domain = meta_list[counter]\n        while domain:\n            domain_list.append(domain.decode())\n            counter += 1\n            domain = meta_list[counter]\n    capi.free_dsl(meta_list)\n    result = {}\n    for domain in domain_list:\n        data = capi.get_ds_metadata(self._ptr, None if domain == 'DEFAULT' else domain.encode())\n        if not data:\n            continue\n        domain_meta = {}\n        counter = 0\n        item = data[counter]\n        while item:\n            key, val = item.decode().split('=')\n            domain_meta[key] = val\n            counter += 1\n            item = data[counter]\n        result[domain or 'DEFAULT'] = domain_meta\n    return result",
    "docstring": "Return the metadata for this raster or band. The return value is a nested dictionary, where the first-level key is the metadata domain and the second-level is the metadata item names and values for that domain.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\raster\\base.py",
    "ast_data": "FunctionDef name:metadata arg:self arguments arg Assign Assign Call If Assign Assign While Call Call Assign Call Assign For Assign Call Compare Call If Assign Assign Assign While Assign Call Call Assign Assign Assign BoolOp Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, grid_helper, side, nth_coord_ticks=None):\n    super().__init__(loc=side)\n    self.grid_helper = grid_helper\n    if nth_coord_ticks is None:\n        nth_coord_ticks = self.nth_coord\n    self.nth_coord_ticks = nth_coord_ticks\n    self.side = side",
    "docstring": "nth_coord = along which coordinate value varies. nth_coord = 0 -> x axis, nth_coord = 1 -> y axis",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\grid_helper_curvelinear.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:grid_helper arg:side arg:nth_coord_ticks arguments arg arg arg arg Call Call Assign If Compare Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "_get_module_name_filter",
    "source_code": "def _get_module_name_filter(module_name: str):\n\n    def module_name_filter(n: Node) -> bool:\n        nn_module_stack = n.meta.get('nn_module_stack', {})\n\n        def _normalize_path(n):\n            prefix = 0\n            if n.startswith(\"L['self'].\"):\n                prefix = len(\"L['self'].\")\n            return n[prefix:]\n        names = [_normalize_path(n) for n, _ in nn_module_stack.values()]\n        return module_name in names\n    return module_name_filter",
    "docstring": "Get the module_name_filter function for a given module name, the filter accepts a node and checks if the node comes from a module that has certain module name For example: node: linear_op = call_function # comes from a module with name blocks.sub.linear1 >> module_name_filter = _get_module_name_filter(\"blocks.sub\") >> print(module_name_filter(node)) True # the node is from \"blocks.sub\" based on the fully qualified name \"blocks.sub.linear1\"",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\utils.py",
    "ast_data": "FunctionDef name:_get_module_name_filter arg:module_name arguments arg FunctionDef name:module_name_filter arg:n arguments arg Assign Call FunctionDef name:_normalize_path arg:n arguments arg Assign If Call Assign Call Return return:yes Assign Call Call Return return:yes Compare Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_lr",
    "source_code": "@override\ndef get_lr(self) -> list[float]:\n    _warn_get_lr_called_within_step(self)\n    if self.last_epoch == 0:\n        return [group['lr'] * self.factor for group in self.optimizer.param_groups]\n    if self.last_epoch != self.total_iters:\n        return [group['lr'] for group in self.optimizer.param_groups]\n    return [group['lr'] * (1.0 / self.factor) for group in self.optimizer.param_groups]",
    "docstring": "Compute the learning rate of each parameter group.",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\lr_scheduler.py",
    "ast_data": "FunctionDef name:get_lr arg:self arguments arg Call If Compare Return return:yes If Compare Return return:yes Return return:yes"
  },
  {
    "library": "pandas",
    "name": "is_open",
    "source_code": "@property\ndef is_open(self) -> bool:\n    if self._handle is None:\n        return False\n    return bool(self._handle.isopen)",
    "docstring": "return a boolean indicating whether the file is open",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:is_open arg:self arguments arg If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "feature_implies_c",
    "source_code": "def feature_implies_c(self, names):\n    if isinstance(names, str):\n        names = set((names,))\n    else:\n        names = set(names)\n    return names.union(self.feature_implies(names))",
    "docstring": "same as feature_implies() but combining 'names'",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py",
    "ast_data": "FunctionDef name:feature_implies_c arg:self arg:names arguments arg arg If Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "finalize",
    "source_code": "def finalize(self):\n    assert not self.is_final\n    if self.parent is not None:\n        assert not self.parent.is_final\n        if not self.isolated:\n            self.parent.read.update(self.read - self.isolated_names)\n            self.parent.modified.update(self.modified - self.isolated_names)\n            self.parent.bound.update(self.bound - self.isolated_names)\n            self.parent.globals.update(self.globals)\n            self.parent.nonlocals.update(self.nonlocals)\n            self.parent.annotations.update(self.annotations)\n        else:\n            self.parent.read.update(self.read - self.bound)\n            self.parent.annotations.update(self.annotations - self.bound)\n    self.is_final = True",
    "docstring": "Freezes this scope.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\static_analysis\\activity.py",
    "ast_data": "FunctionDef name:finalize arg:self arguments arg If Compare If Call Call Call Call Call Call Call Call Assign"
  },
  {
    "library": "pandas",
    "name": "nanargmax",
    "source_code": "def nanargmax(values: np.ndarray, *, axis: AxisInt | None=None, skipna: bool=True, mask: npt.NDArray[np.bool_] | None=None) -> int | np.ndarray:\n    values, mask = _get_values(values, True, fill_value_typ='-inf', mask=mask)\n    result = values.argmax(axis)\n    result = _maybe_arg_null_out(result, axis, mask, skipna)\n    return result",
    "docstring": "Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : int or ndarray[int] The index/indices of max value in specified axis or -1 in the NA case Examples -------- >>> from pandas.core import nanops >>> arr = np.array([1, 2, 3, np.nan, 4]) >>> nanops.nanargmax(arr) np.int64(4) >>> arr = np.array(range(12), dtype=np.float64).reshape(4, 3) >>> arr[2:, 2] = np.nan >>> arr array([[ 0., 1., 2.], [ 3., 4., 5.], [ 6., 7., nan], [ 9., 10., nan]]) >>> nanops.nanargmax(arr, axis=1) array([2, 2, 1, 1])",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\nanops.py",
    "ast_data": "FunctionDef name:nanargmax arg:values arguments arg arg arg arg Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "getRoutine",
    "source_code": "def getRoutine(self, rname):\n    unique = []\n    rname = rname.lower()\n    routine = self.names_to_routines.get(rname, unique)\n    if routine is unique:\n        routine = self._findRoutine(rname)\n        self.names_to_routines[rname] = routine\n    return routine",
    "docstring": "Get a routine from the library. Will add if it's not found.",
    "type": "method",
    "file_path": "numpy\\numpy\\linalg\\lapack_lite\\make_lite.py",
    "ast_data": "FunctionDef name:getRoutine arg:self arg:rname arguments arg arg Assign Assign Call Assign Call If Compare Assign Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_pseudo_css",
    "source_code": "def _pseudo_css(self, uuid: str, name: str, row: int, col: int, text: str) -> list[CSSDict]:\n    selector_id = '#T_' + uuid + '_row' + str(row) + '_col' + str(col)\n    return [{'selector': selector_id + f':hover .{name}', 'props': [('visibility', 'visible')]}, {'selector': selector_id + f' .{name}::after', 'props': [('content', f'\"{text}\"')]}]",
    "docstring": "For every table data-cell that has a valid tooltip (not None, NaN or empty string) must create two pseudo CSS entries for the specific element id which are added to overall table styles: an on hover visibility change and a content change dependent upon the user's chosen display string. For example: [{\"selector\": \"T__row1_col1:hover .pd-t\", \"props\": [(\"visibility\", \"visible\")]}, {\"selector\": \"T__row1_col1 .pd-t::after\", \"props\": [(\"content\", \"Some Valid Text String\")]}] Parameters ---------- uuid: str The uuid of the Styler instance name: str The css-name of the class used for styling tooltips row : int The row index of the specified tooltip string data col : int The col index of the specified tooltip string data text : str The textual content of the tooltip to be displayed in HTML. Returns ------- pseudo_css : List",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\style_render.py",
    "ast_data": "FunctionDef name:_pseudo_css arg:self arg:uuid arg:name arg:row arg:col arg:text arguments arg arg arg arg arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_object_format",
    "source_code": "def _object_format(o):\n    if type(o) is list:\n        fmt = 'list({!r})'\n    else:\n        fmt = '{!r}'\n    return fmt.format(o)",
    "docstring": "Object arrays containing lists should be printed unambiguously",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\arrayprint.py",
    "ast_data": "FunctionDef name:_object_format arg:o arguments arg If Compare Call Assign Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "FailedPreconditionError",
    "source_code": "@tf_export('errors.FailedPreconditionError')\nclass FailedPreconditionError(OpError):\n\n    def __init__(self, node_def, op, message, *args):\n        super(FailedPreconditionError, self).__init__(node_def, op, message, FAILED_PRECONDITION, *args)",
    "docstring": "Raised when some prerequisites are not met when running an operation. This typically indicates that system is not in state to execute the operation and requires preconditions to be met before successfully executing current operation. For example, this exception is commonly raised when running an operation that reads a before it has been initialized.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py",
    "ast_data": "ClassDef name:FailedPreconditionError FunctionDef name:__init__ arg:self arg:node_def arg:op arg:message arguments arg arg arg arg arg Call Call Call"
  },
  {
    "library": "cherrypy",
    "name": "reopen_files",
    "source_code": "def reopen_files(self):\n    for log in (self.error_log, self.access_log):\n        for h in log.handlers:\n            if isinstance(h, logging.FileHandler):\n                h.acquire()\n                h.stream.close()\n                h.stream = open(h.baseFilename, h.mode)\n                h.release()",
    "docstring": "Close and reopen all file handlers.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cplogging.py",
    "ast_data": "FunctionDef name:reopen_files arg:self arguments arg For For If Call Call Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "create_dir_v2",
    "source_code": "@tf_export('io.gfile.mkdir')\ndef create_dir_v2(path):\n    _pywrap_file_io.CreateDir(compat.path_to_bytes(path))",
    "docstring": "Creates a directory with the name given by . Args: path: string, name of the directory to be created Notes: The parent directories need to exist. Use instead if there is the possibility that the parent dirs don't exist. Raises: errors.OpError: If the operation fails.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py",
    "ast_data": "FunctionDef name:create_dir_v2 arg:path arguments arg Call Call Call"
  },
  {
    "library": "django",
    "name": "__init__",
    "source_code": "def __init__(self, wrapper):\n    self.wrapper = wrapper",
    "docstring": "wrapper is a database wrapper. It must have a Database attribute defining PEP-249 exceptions.",
    "type": "method",
    "file_path": "django\\django\\db\\utils.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:wrapper arguments arg arg Assign"
  },
  {
    "library": "pytorch",
    "name": "OutputSharding",
    "source_code": "@dataclass\nclass OutputSharding:\n    output_spec: OutputSpecType\n    redistribute_schema: Optional[OpSchema] = None\n    needs_redistribute: bool = False\n\n    @cached_property\n    def mesh(self):\n        if isinstance(self.output_spec, DTensorSpec):\n            return self.output_spec.mesh\n        elif isinstance(self.output_spec, tuple):\n            out_spec = self.output_spec[0]\n            if isinstance(out_spec, DTensorSpec):\n                return out_spec.mesh\n            else:\n                raise ValueError(f'Unknown output spec type: {type(out_spec)}')\n        else:\n            raise ValueError(f'Unknown output spec type: {type(self.output_spec)}')",
    "docstring": "OutputSharding is a data class that is used by the sharding propagation, it could set the output_spec upon successful propagation. If needs_redistribute is set to True, a redistribute_schema would be returned together to indicate the input arguments needs to be redistributed before the op execution. NOTE: the redistribute_schema generated by sharding propagation should be exactly the same as the operator OpSchema, except the DTensorSpecs",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_op_schema.py",
    "ast_data": "ClassDef name:OutputSharding FunctionDef name:mesh arg:self arguments arg If Call Return return:yes If Call Assign If Call Return return:yes Raise Call Call Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "is_nested",
    "source_code": "def is_nested(modality, structure):\n    if modality == Modality.CORE:\n        return _tf_core_is_nested(structure)\n    elif modality == Modality.DATA:\n        return _tf_data_is_nested(structure)\n    else:\n        raise ValueError('Unknown modality used {} for nested structure'.format(modality))",
    "docstring": "Returns true if its input is a nested structure. For Modality.CORE refer to [tf.nest]( for the definition of a nested structure. Args: modality: enum value of supported modality [Modality.CORE or Modality.DATA] structure: the value to test. Returns: True if the input is a nested structure.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\nest_util.py",
    "ast_data": "FunctionDef name:is_nested arg:modality arg:structure arguments arg arg If Compare Return return:yes Call If Compare Return return:yes Call Raise Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "HuberLoss",
    "source_code": "class HuberLoss(BaseLoss):\n    differentiable = False\n    need_update_leaves_values = True\n\n    def __init__(self, sample_weight=None, quantile=0.9, delta=0.5):\n        check_scalar(quantile, 'quantile', target_type=numbers.Real, min_val=0, max_val=1, include_boundaries='neither')\n        self.quantile = quantile\n        super().__init__(closs=CyHuberLoss(delta=float(delta)), link=IdentityLink())\n        self.approx_hessian = True\n        self.constant_hessian = False\n\n    def fit_intercept_only(self, y_true, sample_weight=None):\n        if sample_weight is None:\n            median = np.percentile(y_true, 50, axis=0)\n        else:\n            median = _weighted_percentile(y_true, sample_weight, 50)\n        diff = y_true - median\n        term = np.sign(diff) * np.minimum(self.closs.delta, np.abs(diff))\n        return median + np.average(term, weights=sample_weight)",
    "docstring": "Huber loss, for regression. Domain: y_true and y_pred all real numbers quantile in (0, 1) Link: y_pred = raw_prediction For a given sample x_i, the Huber loss is defined as:: loss(x_i) = 1/2 * abserr**2 if abserr delta abserr = |y_true_i - raw_prediction_i| delta = quantile(abserr, self.quantile) Note: HuberLoss(quantile=1) equals HalfSquaredError and HuberLoss(quantile=0) equals delta * (AbsoluteError() - delta/2). Additional Attributes --------------------- quantile : float The quantile level which defines the breaking point to distinguish between absolute error and squared error. Must be in range (0, 1). Reference --------- .. [1] Friedman, J.H. (2001). :doi:. Annals of Statistics, 29, 1189-1232.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\_loss\\loss.py",
    "ast_data": "ClassDef name:HuberLoss Assign Assign FunctionDef name:__init__ arg:self arg:sample_weight arg:quantile arg:delta arguments arg arg arg arg Call Assign Call Call Call Call Call Assign Assign FunctionDef name:fit_intercept_only arg:self arg:y_true arg:sample_weight arguments arg arg arg If Compare Assign Call Assign Call Assign Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "__repr__",
    "source_code": "def __repr__(self):\n    _names = self.dtype.names\n    fmt = f'%{max((len(n) for n in _names)) + 4}s : %s'\n    reprstr = [fmt % (f, getattr(self, f)) for f in self.dtype.names]\n    reprstr.insert(0, 'masked_records(')\n    reprstr.extend([fmt % ('    fill_value', self.fill_value), '              )'])\n    return str('\\n'.join(reprstr))",
    "docstring": "Calculates the repr representation.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\mrecords.py",
    "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Assign Assign Call Call Assign Call Call Call Return return:yes Call Call"
  },
  {
    "library": "virtualenv",
    "name": "__init__",
    "source_code": "def __init__(self, options) -> None:\n    self._has_run = False\n    self._interpreter = None\n    self._env = options.env",
    "docstring": "Create a new discovery mechanism. :param options: the parsed options as defined within :meth:",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\discovery\\discover.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:options arguments arg arg Assign Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "rotation_point",
    "source_code": "@property\ndef rotation_point(self):\n    return self._rotation_point",
    "docstring": "The rotation point of the patch.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:rotation_point arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "mean",
    "source_code": "@deprecate_nonkeyword_arguments(version='4.0', allowed_args=['self'], name='mean')\ndef mean(self, axis: Axis | None=0, skipna: bool=True, numeric_only: bool=False, **kwargs) -> Any:\n    return NDFrame.mean(self, axis=axis, skipna=skipna, numeric_only=numeric_only, **kwargs)",
    "docstring": "Return the mean of the values over the requested axis. Parameters ---------- axis : {index (0)} Axis for the function to be applied on. For this parameter is unused and defaults to 0. For DataFrames, specifying `` will apply the aggregation across both axes. .. versionadded:: 2.0.0 skipna : bool, default True Exclude NA/null values when computing the result. numeric_only : bool, default False Include only float, int, boolean columns. **kwargs Additional keyword arguments to be passed to the function. Returns ------- scalar or Series (if level specified) Mean of the values for the requested axis. See Also -------- numpy.median : Equivalent numpy function for computing median. Series.sum : Sum of the values. Series.median : Median of the values. Series.std : Standard deviation of the values. Series.var : Variance of the values. Series.min : Minimum value. Series.max : Maximum value. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.mean() 2.0",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:mean arg:self arg:axis arg:skipna arg:numeric_only arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_output_from_fit_transform",
    "source_code": "def _output_from_fit_transform(transformer, name, X, df, y):\n    outputs = {}\n    cases = [('fit.transform/df/df', df, df), ('fit.transform/df/array', df, X), ('fit.transform/array/df', X, df), ('fit.transform/array/array', X, X)]\n    if all((hasattr(transformer, meth) for meth in ['fit', 'transform'])):\n        for case, data_fit, data_transform in cases:\n            transformer.fit(data_fit, y)\n            if name in CROSS_DECOMPOSITION:\n                X_trans, _ = transformer.transform(data_transform, y)\n            else:\n                X_trans = transformer.transform(data_transform)\n            outputs[case] = (X_trans, transformer.get_feature_names_out())\n    cases = [('fit_transform/df', df), ('fit_transform/array', X)]\n    if hasattr(transformer, 'fit_transform'):\n        for case, data in cases:\n            if name in CROSS_DECOMPOSITION:\n                X_trans, _ = transformer.fit_transform(data, y)\n            else:\n                X_trans = transformer.fit_transform(data, y)\n            outputs[case] = (X_trans, transformer.get_feature_names_out())\n    return outputs",
    "docstring": "Generate output to test for different configuration: - calling either or ; - passing either a dataframe or a numpy array to fit; - passing either a dataframe or a numpy array to transform.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\estimator_checks.py",
    "ast_data": "FunctionDef name:_output_from_fit_transform arg:transformer arg:name arg:X arg:df arg:y arguments arg arg arg arg arg Assign Assign If Call Call For Call If Compare Assign Call Assign Call Assign Call Assign If Call For If Compare Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_SparseFillEmptyRowsGrad",
    "source_code": "@ops.RegisterGradient('SparseFillEmptyRows')\ndef _SparseFillEmptyRowsGrad(op: ops.Operation, unused_grad_output_indices, output_grad_values, unused_grad_empty_row_indicator, unused_grad_reverse_index_map):\n    reverse_index_map = op.outputs[3]\n    d_values, d_default_value = gen_sparse_ops.sparse_fill_empty_rows_grad(reverse_index_map=reverse_index_map, grad_values=output_grad_values)\n    return [None, d_values, None, d_default_value]",
    "docstring": "Gradients for SparseFillEmptyRows.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_grad.py",
    "ast_data": "FunctionDef name:_SparseFillEmptyRowsGrad arg:op arg:unused_grad_output_indices arg:output_grad_values arg:unused_grad_empty_row_indicator arg:unused_grad_reverse_index_map arguments arg arg arg arg arg Assign Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, tpu_cluster_resolver=None, steps_per_run=None, device_assignment=None):\n    super().__init__(TPUExtended(self, tpu_cluster_resolver, steps_per_run, device_assignment))\n    distribute_lib.distribution_strategy_gauge.get_cell('V1').set('TPUStrategy')\n    distribute_lib.distribution_strategy_replica_gauge.get_cell('num_workers').set(self.extended.num_hosts)\n    distribute_lib.distribution_strategy_replica_gauge.get_cell('num_replicas_per_worker').set(self.extended.num_replicas_per_host)\n    self._enable_packed_variable_in_eager_mode = True",
    "docstring": "Initializes the TPUStrategy object. Args: tpu_cluster_resolver: A tf.distribute.cluster_resolver.TPUClusterResolver, which provides information about the TPU cluster. steps_per_run: Number of steps to run on device before returning to the host. Note that this can have side-effects on performance, hooks, metrics, summaries etc. This parameter is only used when Distribution Strategy is used with Keras. device_assignment: Optional to specify the placement of replicas on the TPU cluster. Currently only supports the usecase of using a single core within a TPU cluster.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_strategy.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:tpu_cluster_resolver arg:steps_per_run arg:device_assignment arguments arg arg arg arg Call Call Call Call Call Call Call Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "_remap_constants",
    "source_code": "def _remap_constants(orig_constant_attrs: ConstantAttrMap, graph_signature: ExportGraphSignature, constants: dict[str, _ConstantAttributeType]) -> None:\n    remap_table: dict[str, list[str]] = {}\n    for name, value in constants.items():\n        if value in orig_constant_attrs:\n            remap_table[name] = orig_constant_attrs[value]\n    for spec in graph_signature.input_specs:\n        if spec.kind in (InputKind.CONSTANT_TENSOR, InputKind.CUSTOM_OBJ):\n            orig_target = spec.target\n            assert orig_target is not None\n            targets = remap_table.get(orig_target, [orig_target])\n            spec.target = targets[0]\n            constant = constants[orig_target]\n            del constants[orig_target]\n            for target in targets:\n                constants[target] = constant",
    "docstring": "Rewrite the graph signature and constants table to use the FQN from the original module.",
    "type": "function",
    "file_path": "pytorch\\torch\\export\\_trace.py",
    "ast_data": "FunctionDef name:_remap_constants arg:orig_constant_attrs arg:graph_signature arg:constants arguments arg arg arg For Call If Compare Assign For If Compare Assign Compare Assign Call Assign Assign For Assign"
  },
  {
    "library": "tensorflow",
    "name": "save_summaries_secs",
    "source_code": "@property\ndef save_summaries_secs(self):\n    return self._save_summaries_secs",
    "docstring": "Return the delay between summary computations. Returns: A timestamp.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py",
    "ast_data": "FunctionDef name:save_summaries_secs arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_vec_load_line",
    "source_code": "def _get_vec_load_line(self, var: str, index: sympy.Expr, dtype: torch.dtype, load_mask: Optional[CppCSEVariable]=None):\n    cpp_type = DTYPE_TO_CPP[dtype]\n    num_vectors = self._get_num_vectors(dtype)\n    load_mask_str = None\n    if load_mask:\n        if not load_mask.is_vec:\n            load_mask_str = f'{self._get_mask_type(torch.float)}::from({load_mask})'\n        else:\n            load_mask_str = f'{self._get_mask_cast(load_mask, torch.float)}'\n    loadbuf = f'{var} + {cexpr_index(index)}' if index != 0 else var\n    if dtype == torch.bool:\n        line = f'{self._get_mask_type()}::from({loadbuf})'\n    else:\n        line = f'{load_mask_str}.template loadu<{cpp_type},{num_vectors}>({loadbuf})' if load_mask_str else f'{self._get_vec_type(dtype)}::loadu({loadbuf}, {cexpr_index(self.num_elems)})'\n    return line",
    "docstring": "Get a load line str that loads a vector from at of type . If is not None, we do a masked load accordingly. Notes on the : 1. We always load number of elements regardless of the . It means we load half of the vector lanes for 16-bit data types and quarter of the vector lanes for 8-bit data types. 2. and could mean masks and we load them as float mask vectors.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp.py",
    "ast_data": "FunctionDef name:_get_vec_load_line arg:self arg:var arg:index arg:dtype arg:load_mask arguments arg arg arg arg arg Assign Assign Call Assign If If Assign Call Assign Call Assign Compare Call If Compare Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_DenseReadResourceVariableProcessor",
    "source_code": "class _DenseReadResourceVariableProcessor(_OptimizableVariable):\n\n    def __init__(self, v):\n        self._v = v\n\n    def target(self):\n        return self._v\n\n    def update_op(self, optimizer, g):\n        update_op = optimizer._resource_apply_dense(g, self._v.op.inputs[0])\n        if self._v.constraint is not None:\n            with ops.control_dependencies([update_op]):\n                return self._v.assign(self._v.constraint(self._v))\n        else:\n            return update_op",
    "docstring": "Processor for dense ResourceVariables.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\training\\optimizer.py",
    "ast_data": "ClassDef name:_DenseReadResourceVariableProcessor FunctionDef name:__init__ arg:self arg:v arguments arg arg Assign FunctionDef name:target arg:self arguments arg Return return:yes FunctionDef name:update_op arg:self arg:optimizer arg:g arguments arg arg arg Assign Call If Compare With Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_nondebug_quantized_model",
    "source_code": "def get_nondebug_quantized_model(self) -> bytes:\n    return self._get_quantized_model(is_debug=False)",
    "docstring": "Returns a non-instrumented quantized model. Convert the quantized model with the initialized converter and return bytes for nondebug model. The model will not be instrumented with numeric verification operations. Returns: Model bytes corresponding to the model. Raises: ValueError: if converter is not passed to the debugger.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\tools\\optimize\\debugging\\python\\debugger.py",
    "ast_data": "FunctionDef name:get_nondebug_quantized_model arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "verbose_name_raw",
    "source_code": "@cached_property\ndef verbose_name_raw(self):\n    if isinstance(self.verbose_name, str):\n        return self.verbose_name\n    with override(None):\n        return str(self.verbose_name)",
    "docstring": "Return the untranslated verbose name.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\options.py",
    "ast_data": "FunctionDef name:verbose_name_raw arg:self arguments arg If Call Return return:yes With Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "LeftLookup",
    "source_code": "@BaseSpatialField.register_lookup\nclass LeftLookup(GISLookup):\n    lookup_name = 'left'",
    "docstring": "The 'left' operator returns true if A's bounding box is strictly to the left of B's bounding box.",
    "type": "class",
    "file_path": "django\\django\\contrib\\gis\\db\\models\\lookups.py",
    "ast_data": "ClassDef name:LeftLookup Assign"
  },
  {
    "library": "tensorflow",
    "name": "_less_equal_flops",
    "source_code": "@ops.RegisterStatistics('LessEqual', 'flops')\ndef _less_equal_flops(graph, node):\n    return _binary_per_element_op_flops(graph, node)",
    "docstring": "Compute flops for LessEqual operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py",
    "ast_data": "FunctionDef name:_less_equal_flops arg:graph arg:node arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "cluster_spec",
    "source_code": "@property\ndef cluster_spec(self):\n    return copy.deepcopy(self._cluster_spec)",
    "docstring": "Returns a copy of the cluster_spec object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_coordinator.py",
    "ast_data": "FunctionDef name:cluster_spec arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "must_run_on_cpu",
    "source_code": "@deprecation.deprecated(date=None, instructions=_DEPRECATION_MSG)\n@tf_export(v1=['graph_util.must_run_on_cpu'])\ndef must_run_on_cpu(node, pin_variables_on_cpu=False):\n    if isinstance(node, ops.Operation):\n        node_def = node.node_def\n    else:\n        assert isinstance(node, node_def_pb2.NodeDef)\n        node_def = node\n    if pin_variables_on_cpu and _is_variable_op(node_def.op):\n        return True\n    if node_def.op == 'Const':\n        dtype = node_def.attr['dtype'].type\n        if dtype == dtypes.string or dtype == dtypes.int32:\n            return True\n    if node_def.op in ['DynamicStitch', 'ParallelDynamicStitch']:\n        dtype = node_def.attr['T'].type\n        if dtype == dtypes.int32:\n            return True\n    if node_def.op in ['Cast']:\n        dtype = node_def.attr['SrcT'].type\n        if dtype == dtypes.int32:\n            return True\n    return False",
    "docstring": "Returns True if the given node_def must run on CPU, otherwise False. Args: node: The node to be assigned to a device. Could be either an ops.Operation or NodeDef. pin_variables_on_cpu: If True, this function will return False if node_def represents a variable-related op. Returns: True if the given node must run on CPU, otherwise False.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\graph_util_impl.py",
    "ast_data": "FunctionDef name:must_run_on_cpu arg:node arg:pin_variables_on_cpu arguments arg arg If Call Assign Call Assign If BoolOp Call Return return:yes If Compare Assign If BoolOp Compare Compare Return return:yes If Compare Assign If Compare Return return:yes If Compare Assign If Compare Return return:yes Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "get_check_kwargs",
    "source_code": "def get_check_kwargs(self, options):\n    return {'tags': set()}",
    "docstring": "Validation is called explicitly each time the server reloads.",
    "type": "method",
    "file_path": "django\\django\\core\\management\\commands\\runserver.py",
    "ast_data": "FunctionDef name:get_check_kwargs arg:self arg:options arguments arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_falling_factorial",
    "source_code": "def _falling_factorial(x, n):\n    val = 1\n    for k in range(x - n + 1, x + 1):\n        val *= k\n    return val",
    "docstring": "Return the factorial of to the falling. This is defined as: .. math:: x^\\underline n = (x)_n = x (x-1) \\cdots (x-n+1) This can more efficiently calculate ratios of factorials, since: n!/m! == falling_factorial(n, n-m) where n >= m skipping the factors that cancel out the usual factorial n! == ff(n, n)",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_filter_design.py",
    "ast_data": "FunctionDef name:_falling_factorial arg:x arg:n arguments arg arg Assign For Call Return return:yes"
  },
  {
    "library": "django",
    "name": "display",
    "source_code": "def display(function=None, *, boolean=None, ordering=None, description=None, empty_value=None):\n\n    def decorator(func):\n        if boolean is not None and empty_value is not None:\n            raise ValueError('The boolean and empty_value arguments to the @display decorator are mutually exclusive.')\n        if boolean is not None:\n            func.boolean = boolean\n        if ordering is not None:\n            func.admin_order_field = ordering\n        if description is not None:\n            func.short_description = description\n        if empty_value is not None:\n            func.empty_value_display = empty_value\n        return func\n    if function is None:\n        return decorator\n    else:\n        return decorator(function)",
    "docstring": "Conveniently add attributes to a display function:: @admin.display( boolean=True, ordering='-publish_date', description='Is Published?', ) def is_published(self, obj): return obj.publish_date is not None This is equivalent to setting some attributes (with the original, longer names) on the function directly:: def is_published(self, obj): return obj.publish_date is not None is_published.boolean = True is_published.admin_order_field = '-publish_date' is_published.short_description = 'Is Published?'",
    "type": "function",
    "file_path": "django\\django\\contrib\\admin\\decorators.py",
    "ast_data": "FunctionDef name:display arg:function arguments arg arg arg arg arg FunctionDef name:decorator arg:func arguments arg If BoolOp Compare Compare Raise Call If Compare Assign If Compare Assign If Compare Assign If Compare Assign Return return:yes If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "ReshardCallback",
    "source_code": "class ReshardCallback:\n\n    def object_name(self) -> str:\n        return None\n\n    def reshard(self, checkpoint_values: List[tensor.Tensor], shape_and_slice_spec: List[str]) -> tensor.Tensor:\n        del shape_and_slice_spec\n        if len(checkpoint_values) != 1:\n            raise ValueError('Default reshard expects a single checkpoint value.')\n        return checkpoint_values[0]\n\n    def update_restore_inputs(self, checkpoint_key, shape_and_slice_spec) -> tuple[List[str], List[str]]:\n        return ([checkpoint_key], [shape_and_slice_spec])",
    "docstring": "API to reshard a checkpoint value during restore. When a ReshardCallback is attached to a CheckpointPosition, the restored value of the checkpoint position is resharded based on this callback.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint_adapter.py",
    "ast_data": "ClassDef name:ReshardCallback FunctionDef name:object_name arg:self arguments arg Return return:no FunctionDef name:reshard arg:self arg:checkpoint_values arg:shape_and_slice_spec arguments arg arg arg If Compare Call Raise Call Return return:yes FunctionDef name:update_restore_inputs arg:self arg:checkpoint_key arg:shape_and_slice_spec arguments arg arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_TensorConversionFunction",
    "source_code": "@staticmethod\ndef _TensorConversionFunction(v, dtype=None, name=None, as_ref=False):\n    _ = name\n    if dtype and (not dtype.is_compatible_with(v.dtype)):\n        raise ValueError(f\"Incompatible type conversion requested to type '{dtype.name}' for variable of type '{v.dtype.name}' (Variable: {v}).\")\n    if as_ref:\n        return v._ref()\n    else:\n        return v.value()",
    "docstring": "Utility function for converting a Variable to a Tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:_TensorConversionFunction arg:v arg:dtype arg:name arg:as_ref arguments arg arg arg arg Assign If BoolOp Call Raise Call If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_initialize",
    "source_code": "def _initialize(self, X, resp):\n    n_samples, _ = X.shape\n    weights, means, covariances = (None, None, None)\n    if resp is not None:\n        weights, means, covariances = _estimate_gaussian_parameters(X, resp, self.reg_covar, self.covariance_type)\n        if self.weights_init is None:\n            weights /= n_samples\n    self.weights_ = weights if self.weights_init is None else self.weights_init\n    self.means_ = means if self.means_init is None else self.means_init\n    if self.precisions_init is None:\n        self.covariances_ = covariances\n        self.precisions_cholesky_ = _compute_precision_cholesky(covariances, self.covariance_type)\n    else:\n        self.precisions_cholesky_ = _compute_precision_cholesky_from_precisions(self.precisions_init, self.covariance_type)",
    "docstring": "Initialization of the Gaussian mixture parameters. Parameters ---------- X : array-like of shape (n_samples, n_features) resp : array-like of shape (n_samples, n_components)",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\mixture\\_gaussian_mixture.py",
    "ast_data": "FunctionDef name:_initialize arg:self arg:X arg:resp arguments arg arg arg Assign Assign If Compare Assign Call If Compare Assign Compare Assign Compare If Compare Assign Assign Call Assign Call"
  },
  {
    "library": "kornia",
    "name": "TranslateX",
    "source_code": "class TranslateX(OperationBase):\n\n    def __init__(self, initial_magnitude: Optional[float]=0.2, initial_probability: float=0.5, magnitude_range: Tuple[float, float]=(0.0, 0.5), temperature: float=0.1, symmetric_megnitude: bool=True) -> None:\n        if symmetric_megnitude and magnitude_range[0] < 0:\n            raise ValueError(f'Lower bound of {self.__class__.__name__} is a symmetric operation. The lower bound must above 0. Got {magnitude_range[0]}.')\n        super().__init__(K.RandomTranslate(magnitude_range, same_on_batch=False, p=initial_probability, align_corners=True), initial_magnitude=[('translate_x', initial_magnitude)], temperature=temperature, symmetric_megnitude=symmetric_megnitude)",
    "docstring": "Apply translate operation along x-axis. Args: initial_magnitude: the initial magnitude. initial_probability: the initial probability. If None, the augmentation will be randomly applied according to he augmentation sampling range. magnitude_range: the sampling range for random sampling and clamping the optimized magnitude. temperature: temperature for RelaxedBernoulli distribution used during training. symmetric_megnitude: if to randomly assign the magnitude as negative or not.",
    "type": "class",
    "file_path": "kornia\\kornia\\augmentation\\auto\\operations\\ops.py",
    "ast_data": "ClassDef name:TranslateX FunctionDef name:__init__ arg:self arg:initial_magnitude arg:initial_probability arg:magnitude_range arg:temperature arg:symmetric_megnitude arguments arg arg arg arg arg arg If BoolOp Compare Raise Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_insert_ancillary_layers",
    "source_code": "def _insert_ancillary_layers(model, ancillary_layers, metrics_names, new_nodes):\n    metric_layers = [layer for layer in ancillary_layers if isinstance(layer, AddMetric)]\n    metric_layers.sort(key=lambda layer: metrics_names.index(layer.metric_name))\n    ancillary_layers = [layer for layer in ancillary_layers if not isinstance(layer, AddMetric)] + metric_layers\n    model._insert_layers(ancillary_layers, relevant_nodes=list(new_nodes))",
    "docstring": "Inserts ancillary layers into the model with the proper order.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\models.py",
    "ast_data": "FunctionDef name:_insert_ancillary_layers arg:model arg:ancillary_layers arg:metrics_names arg:new_nodes arguments arg arg arg arg Assign Call Call arguments arg Call Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_move_exported_model_to_eval",
    "source_code": "def _move_exported_model_to_eval(model: torch.fx.GraphModule):\n    is_training = getattr(model, _EXPORTED_TRAINING_ATTR, True)\n    if not is_training:\n        return model\n    setattr(model, _EXPORTED_TRAINING_ATTR, False)\n    _replace_dropout(model, train_to_eval=True)\n    _replace_batchnorm(model, train_to_eval=True)\n    return model",
    "docstring": "Move an exported GraphModule to eval mode. This is equivalent to model.eval() but only for certain special ops like dropout, batchnorm. QAT users should call this before performing inference on the model. This call is idempotent; if the model is already in eval mode, nothing will happen.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\export_utils.py",
    "ast_data": "FunctionDef name:_move_exported_model_to_eval arg:model arguments arg Assign Call If Return return:yes Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "master_target",
    "source_code": "@property\ndef master_target(self):\n    return self._master_target",
    "docstring": "Returns the session master for the corresponding task to connect to.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distribute_coordinator_utils.py",
    "ast_data": "FunctionDef name:master_target arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_updated_ctor_param",
    "source_code": "def _updated_ctor_param(self):\n    dct = self._ctor_param.copy()\n    dct['a'] = self.a\n    dct['b'] = self.b\n    dct['xtol'] = self.xtol\n    dct['badvalue'] = self.badvalue\n    dct['name'] = self.name\n    dct['shapes'] = self.shapes\n    return dct",
    "docstring": "Return the current version of _ctor_param, possibly updated by user. Used by freezing. Keep this in sync with the signature of __init__.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:_updated_ctor_param arg:self arguments arg Assign Call Assign Assign Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "autolabel_time",
    "source_code": "def autolabel_time(rects, ax):\n    for rect in rects:\n        height = rect.get_height()\n        ax.text(rect.get_x() + rect.get_width() / 2.0, 1.05 * height, '%.1f' % height, ha='center', va='bottom')",
    "docstring": "Attach a text label above each bar displaying its height.",
    "type": "function",
    "file_path": "scikit-learn\\benchmarks\\bench_online_ocsvm.py",
    "ast_data": "FunctionDef name:autolabel_time arg:rects arg:ax arguments arg arg For Assign Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_bin_numbers",
    "source_code": "def _bin_numbers(sample, nbin, edges, dedges):\n    Dlen, Ndim = sample.shape\n    sampBin = [np.digitize(sample[:, i], edges[i]) for i in range(Ndim)]\n    for i in range(Ndim):\n        dedges_min = dedges[i].min()\n        if dedges_min == 0:\n            raise ValueError('The smallest edge difference is numerically 0.')\n        decimal = int(-np.log10(dedges_min)) + 6\n        on_edge = np.where((sample[:, i] >= edges[i][-1]) & (np.around(sample[:, i], decimal) == np.around(edges[i][-1], decimal)))[0]\n        sampBin[i][on_edge] -= 1\n    binnumbers = np.ravel_multi_index(sampBin, nbin)\n    return binnumbers",
    "docstring": "Compute the bin number each sample falls into, in each dimension",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_binned_statistic.py",
    "ast_data": "FunctionDef name:_bin_numbers arg:sample arg:nbin arg:edges arg:dedges arguments arg arg arg arg Assign Assign Call Call For Call Assign Call If Compare Raise Call Assign Call Call Assign Call Compare Compare Call Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "_decode",
    "source_code": "def _decode(loc: torch.Tensor, priors: torch.Tensor, variances: List[float]) -> torch.Tensor:\n    boxes = torch.cat((priors[:, 0:2] + loc[:, 0:2] * variances[0] * priors[:, 2:4], priors[:, 2:4] * torch.exp(loc[:, 2:4] * variances[1]), priors[:, 0:2] + loc[:, 4:6] * variances[0] * priors[:, 2:4], priors[:, 0:2] + loc[:, 6:8] * variances[0] * priors[:, 2:4], priors[:, 0:2] + loc[:, 8:10] * variances[0] * priors[:, 2:4], priors[:, 0:2] + loc[:, 10:12] * variances[0] * priors[:, 2:4], priors[:, 0:2] + loc[:, 12:14] * variances[0] * priors[:, 2:4]), 1)\n    tmp = boxes[:, 0:2] - boxes[:, 2:4] / 2\n    return torch.cat((tmp, boxes[:, 2:4] + tmp, boxes[:, 4:]), dim=-1)",
    "docstring": "Decode locations from predictions using priors to undo the encoding for offset regression at train time. Args: loc:location predictions for loc layers. Shape: [num_priors,4]. priors: Prior boxes in center-offset form. Shape: [num_priors,4]. variances: (list[float]) Variances of priorboxes. Return: Tensor containing decoded bounding box predictions.",
    "type": "function",
    "file_path": "kornia\\kornia\\contrib\\face_detection.py",
    "ast_data": "FunctionDef name:_decode arg:loc arg:priors arg:variances arguments arg arg arg Assign Call Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "element_spec",
    "source_code": "@property\ndef element_spec(self):\n    return self._element_spec",
    "docstring": "The inner element spec.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:element_spec arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_step_microbatches",
    "source_code": "def _step_microbatches(self, arg_mbs: Optional[list]=None, kwarg_mbs: Optional[list]=None, target_mbs: Optional[list]=None, losses: Optional[list]=None):\n    if target_mbs is not None or losses is not None:\n        raise RuntimeError('Forward-only schedule does not support loss computation')\n    arg_mbs, kwarg_mbs = self._check_inputs(arg_mbs, kwarg_mbs, target_mbs, losses)\n    if not self._stage_initialized:\n        self._initialize_stage(arg_mbs[0], kwarg_mbs[0])\n    fwd_sends_to_wait: list[list[dist.Work]] = []\n    for i in range(self._n_microbatches):\n        with record_function(f'Forward {i}'):\n            ops = self._stage.get_fwd_recv_ops(i)\n            works = _sorted_batch_p2p(ops, desc='fwd_recv')\n            for work in works.values():\n                _wait_batch_p2p(work)\n            self._stage.forward_one_chunk(i, arg_mbs[i], kwarg_mbs[i])\n            ops = self._stage.get_fwd_send_ops(i)\n            works = _sorted_batch_p2p(ops, desc='fwd_send')\n            fwd_sends_to_wait.extend(works.values())\n        logger.debug('[%s] Forwarded microbatch %s', self._stage.stage_index, i)\n    for work in fwd_sends_to_wait:\n        _wait_batch_p2p(work)",
    "docstring": "Run one iteration of the pipeline schedule",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\schedules.py",
    "ast_data": "FunctionDef name:_step_microbatches arg:self arg:arg_mbs arg:kwarg_mbs arg:target_mbs arg:losses arguments arg arg arg arg arg If BoolOp Compare Compare Raise Call Assign Call If Call For Call With Call Assign Call Assign Call For Call Call Call Assign Call Assign Call Call Call Call For Call"
  },
  {
    "library": "tensorflow",
    "name": "get_embedding_var_name",
    "source_code": "def get_embedding_var_name(self):\n    raise NotImplementedError('not impl')",
    "docstring": "Returns the embedding variable name. Feature key name and embedding variable name are usually one-to-one mapping. But for shared embedding columns, it is many-to-one mapping.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\feature_column.py",
    "ast_data": "FunctionDef name:get_embedding_var_name arg:self arguments arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_should_wrap_tuple",
    "source_code": "def _should_wrap_tuple(t):\n    for element in t:\n        if isinstance(element, NoDependency):\n            return True\n        if isinstance(element, base.Trackable):\n            return True\n        if type(element) == dict:\n            return True\n        if type(element) == collections.OrderedDict:\n            return True\n        if type(element) == list:\n            return True\n        if isinstance(element, tuple) and _should_wrap_tuple(element):\n            return True\n    return False",
    "docstring": "Determine if a tuple has any trackable components.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\data_structures.py",
    "ast_data": "FunctionDef name:_should_wrap_tuple arg:t arguments arg For If Call Return return:yes If Call Return return:yes If Compare Call Return return:yes If Compare Call Return return:yes If Compare Call Return return:yes If BoolOp Call Call Return return:yes Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_update_coef_",
    "source_code": "def _update_coef_(self, X, y, n_samples, n_features, XT_y, U, Vh, eigen_vals_, alpha_, lambda_):\n    if n_samples > n_features:\n        coef_ = np.linalg.multi_dot([Vh.T, Vh / (eigen_vals_ + lambda_ / alpha_)[:, np.newaxis], XT_y])\n    else:\n        coef_ = np.linalg.multi_dot([X.T, U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T, y])\n    sse_ = np.sum((y - np.dot(X, coef_)) ** 2)\n    return (coef_, sse_)",
    "docstring": "Update posterior mean and compute corresponding sse (sum of squared errors). Posterior mean is given by coef_ = scaled_sigma_ * X.T * y where scaled_sigma_ = (lambda_/alpha_ * np.eye(n_features) + np.dot(X.T, X))^-1",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_bayes.py",
    "ast_data": "FunctionDef name:_update_coef_ arg:self arg:X arg:y arg:n_samples arg:n_features arg:XT_y arg:U arg:Vh arg:eigen_vals_ arg:alpha_ arg:lambda_ arguments arg arg arg arg arg arg arg arg arg arg arg If Compare Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "asfreq",
    "source_code": "def asfreq(obj: NDFrameT, freq, method=None, how=None, normalize: bool=False, fill_value=None) -> NDFrameT:\n    if isinstance(obj.index, PeriodIndex):\n        if method is not None:\n            raise NotImplementedError(\"'method' argument is not supported\")\n        if how is None:\n            how = 'E'\n        if isinstance(freq, BaseOffset):\n            if hasattr(freq, '_period_dtype_code'):\n                freq = PeriodDtype(freq)._freqstr\n        new_obj = obj.copy()\n        new_obj.index = obj.index.asfreq(freq, how=how)\n    elif len(obj.index) == 0:\n        new_obj = obj.copy()\n        new_obj.index = _asfreq_compat(obj.index, freq)\n    else:\n        unit = None\n        if isinstance(obj.index, DatetimeIndex):\n            unit = obj.index.unit\n        dti = date_range(obj.index.min(), obj.index.max(), freq=freq, unit=unit)\n        dti.name = obj.index.name\n        new_obj = obj.reindex(dti, method=method, fill_value=fill_value)\n        if normalize:\n            new_obj.index = new_obj.index.normalize()\n    return new_obj",
    "docstring": "Utility frequency conversion method for Series/DataFrame. See :meth: for full documentation.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\resample.py",
    "ast_data": "FunctionDef name:asfreq arg:obj arg:freq arg:method arg:how arg:normalize arg:fill_value arguments arg arg arg arg arg arg If Call If Compare Raise Call If Compare Assign If Call If Call Assign Call Assign Call Assign Call If Compare Call Assign Call Assign Call Assign If Call Assign Assign Call Call Call Assign Assign Call If Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_batch_static_inner_shape",
    "source_code": "def _batch_static_inner_shape(old_shape: tensor_shape.TensorShape, batch_size: Optional[int]) -> tensor_shape.TensorShape:\n    head_dim = tensor_shape.dimension_at_index(old_shape, 0) * batch_size\n    return head_dim + old_shape[1:]",
    "docstring": "Returns a copy of old_shape with axis=0 multiplied by batch_size. Only use if this is the inner_shape of a DynamicRaggedShape.Spec with one or more row partitions. Args: old_shape: the original inner_shape. batch_size: the batch size. Returns: a new shape.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:_batch_static_inner_shape arg:old_shape arg:batch_size arguments arg arg Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "log_device_compatibility_check",
    "source_code": "def log_device_compatibility_check(policy_name):\n    global _logged_compatibility_check\n    if _logged_compatibility_check:\n        return\n    _logged_compatibility_check = True\n    gpus = config.list_physical_devices('GPU')\n    gpu_details_list = [config.get_device_details(g) for g in gpus]\n    _log_device_compatibility_check(policy_name, gpu_details_list)",
    "docstring": "Logs a compatibility check if the devices support the policy. Currently only logs for the policy mixed_float16. A log is shown only the first time this function is called. Args: policy_name: The name of the dtype policy.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\device_compatibility_check.py",
    "ast_data": "FunctionDef name:log_device_compatibility_check arg:policy_name arguments arg If Return return:no Assign Assign Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "refine_names",
    "source_code": "def refine_names(self, *names):\n    if has_torch_function_unary(self):\n        return handle_torch_function(Tensor.refine_names, (self,), self, *names)\n    names = resolve_ellipsis(names, self.names, 'refine_names')\n    return super().refine_names(names)",
    "docstring": "Refines the dimension names of :attr: according to :attr:. Refining is a special case of renaming that \"lifts\" unnamed dimensions. A `namesnames`). Args: names (iterable of str): The desired names of the output tensor. May contain up to one Ellipsis. Examples:: >>> imgs = torch.randn(32, 3, 128, 128) >>> named_imgs = imgs.refine_names('N', 'C', 'H', 'W') >>> named_imgs.names ('N', 'C', 'H', 'W') >>> tensor = torch.randn(2, 3, 5, 7, 11) >>> tensor = tensor.refine_names('A', ..., 'B', 'C') >>> tensor.names ('A', None, None, 'B', 'C') .. warning:: The named tensor API is experimental and subject to change.",
    "type": "method",
    "file_path": "pytorch\\torch\\_tensor.py",
    "ast_data": "FunctionDef name:refine_names arg:self arguments arg arg If Call Return return:yes Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_emit_op",
    "source_code": "def _emit_op(self, nodestats: step_stats_pb2.NodeExecStats, pid: int, is_gputrace: bool) -> None:\n    node_name = nodestats.node_name\n    start = nodestats.all_start_micros\n    duration = nodestats.all_end_rel_micros\n    tid = nodestats.thread_id\n    inputs = []\n    if is_gputrace:\n        node_name, op = self._parse_kernel_label(nodestats.timeline_label, node_name)\n    elif node_name == 'RecvTensor':\n        op = 'RecvTensor'\n    else:\n        _, op, inputs = self._parse_op_label(nodestats.timeline_label)\n    args = {'name': node_name, 'op': op}\n    if build_info.build_info['is_rocm_build']:\n        args['kernel'] = nodestats.timeline_label.split('@@')[0]\n    for i, iname in enumerate(inputs):\n        args['input%d' % i] = iname\n    self._chrome_trace.emit_region(start, duration, pid, tid, 'Op', op, args)",
    "docstring": "Generates a Chrome Trace event to show Op execution. Args: nodestats: The 'step_stats_pb2.NodeExecStats' proto recording op execution. pid: The pid assigned for the device where this op ran. is_gputrace: If True then this op came from the GPUTracer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py",
    "ast_data": "FunctionDef name:_emit_op arg:self arg:nodestats arg:pid arg:is_gputrace arguments arg arg arg arg Assign Assign Assign Assign Assign If Assign Call If Compare Assign Assign Call Assign If Assign Call For Call Assign Call"
  },
  {
    "library": "seaborn",
    "name": "choose_diverging_palette",
    "source_code": "def choose_diverging_palette(as_cmap=False):\n    pal = []\n    if as_cmap:\n        cmap = _init_mutable_colormap()\n\n    @interact\n    def choose_diverging_palette(h_neg=IntSlider(min=0, max=359, value=220), h_pos=IntSlider(min=0, max=359, value=10), s=IntSlider(min=0, max=99, value=74), l=IntSlider(min=0, max=99, value=50), sep=IntSlider(min=1, max=50, value=10), n=(2, 16), center=['light', 'dark']):\n        if as_cmap:\n            colors = diverging_palette(h_neg, h_pos, s, l, sep, 256, center)\n            _update_lut(cmap, colors)\n            _show_cmap(cmap)\n        else:\n            pal[:] = diverging_palette(h_neg, h_pos, s, l, sep, n, center)\n            palplot(pal)\n    if as_cmap:\n        return cmap\n    return pal",
    "docstring": "Launch an interactive widget to choose a diverging color palette. This corresponds with the :func: function. This kind of palette is good for data that range between interesting low values and interesting high values with a meaningful midpoint. (For example, change scores relative to some baseline value). Requires IPython 2+ and must be used in the notebook. Parameters ---------- as_cmap : bool If True, the return value is a matplotlib colormap rather than a list of discrete colors. Returns ------- pal or cmap : list of colors or matplotlib colormap Object that can be passed to plotting functions. See Also -------- diverging_palette : Create a diverging color palette or colormap. choose_colorbrewer_palette : Interactively choose palettes from the colorbrewer set, including diverging palettes.",
    "type": "function",
    "file_path": "seaborn\\seaborn\\widgets.py",
    "ast_data": "FunctionDef name:choose_diverging_palette arg:as_cmap arguments arg Assign If Assign Call FunctionDef name:choose_diverging_palette arg:h_neg arg:h_pos arg:s arg:l arg:sep arg:n arg:center arguments arg arg arg arg arg arg arg Call Call Call Call Call If Assign Call Call Call Assign Call Call If Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "update_if_finite_grads",
    "source_code": "def update_if_finite_grads():\n\n    def incr_loss_scale():\n        new_loss_scale = self._current_loss_scale * self._multiplier\n        return control_flow_ops.group(_assign_if_finite(self._current_loss_scale, new_loss_scale), self._num_good_steps.assign(0))\n    return cond.cond(self._num_good_steps + 1 >= self._increment_period, incr_loss_scale, lambda: _op_in_graph_mode(self._num_good_steps.assign_add(1)))",
    "docstring": "Update assuming the gradients are finite.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\experimental\\loss_scale.py",
    "ast_data": "FunctionDef name:update_if_finite_grads arguments FunctionDef name:incr_loss_scale arguments Assign Return return:yes Call Call Call Return return:yes Call Compare arguments Call Call"
  },
  {
    "library": "pytorch",
    "name": "layer_norm_inference_rule",
    "source_code": "@register_inference_rule(torch.nn.LayerNorm)\ndef layer_norm_inference_rule(n: Node, module_instance, symbols, constraints, counter):\n    assert isinstance(n.args[0], Node)\n    return gen_layer_norm_constraints(n, module_instance.normalized_shape, symbols, counter)",
    "docstring": "Input and output shapes should be equal. Input should be consistent with the normalized_shape",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_generator.py",
    "ast_data": "FunctionDef name:layer_norm_inference_rule arg:n arg:module_instance arg:symbols arg:constraints arg:counter arguments arg arg arg arg arg Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_other_namespaces",
    "source_code": "@final\ndef _other_namespaces(self) -> dict:\n    nmsp_dict: dict[str, str] = {}\n    if self.namespaces:\n        nmsp_dict = {f'xmlns{(p if p == '' else f':{p}')}': n for p, n in self.namespaces.items() if n != self.prefix_uri[1:-1]}\n    return nmsp_dict",
    "docstring": "Define other namespaces. This method will build dictionary of namespaces attributes for root element, conditionally with optional namespaces and prefix.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\xml.py",
    "ast_data": "FunctionDef name:_other_namespaces arg:self arguments arg If Assign Compare Call Compare Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_cubicspline_interpolate",
    "source_code": "def _cubicspline_interpolate(xi: np.ndarray, yi: np.ndarray, x: np.ndarray, axis: AxisInt=0, bc_type: str | tuple[Any, Any]='not-a-knot', extrapolate=None):\n    from scipy import interpolate\n    P = interpolate.CubicSpline(xi, yi, axis=axis, bc_type=bc_type, extrapolate=extrapolate)\n    return P(x)",
    "docstring": "Convenience function for cubic spline data interpolator. See for details. Parameters ---------- xi : np.ndarray, shape (n,) 1-d array containing values of the independent variable. Values must be real, finite and in strictly increasing order. yi : np.ndarray Array containing values of the dependent variable. It can have arbitrary number of dimensions, but the length along `ybc_typeyyybc_type(order, deriv_values)orderderiv_valueyyderiv_valueyderiv_valueCubic Spline Interpolation `_ on Wikiversity. .. [2] Carl de Boor, \"A Practical Guide to Splines\", Springer-Verlag, 1978.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\missing.py",
    "ast_data": "FunctionDef name:_cubicspline_interpolate arg:xi arg:yi arg:x arg:axis arg:bc_type arg:extrapolate arguments arg arg arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "__init__",
    "source_code": "def __init__(self, name):\n    self.name = name",
    "docstring": "Arguments: * name: the name of the field this expression references",
    "type": "method",
    "file_path": "django\\django\\db\\models\\expressions.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:name arguments arg arg Assign"
  },
  {
    "library": "pandas",
    "name": "swaplevel",
    "source_code": "def swaplevel(self, i=-2, j=-1) -> MultiIndex:\n    new_levels = list(self.levels)\n    new_codes = list(self.codes)\n    new_names = list(self.names)\n    i = self._get_level_number(i)\n    j = self._get_level_number(j)\n    new_levels[i], new_levels[j] = (new_levels[j], new_levels[i])\n    new_codes[i], new_codes[j] = (new_codes[j], new_codes[i])\n    new_names[i], new_names[j] = (new_names[j], new_names[i])\n    return MultiIndex(levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False)",
    "docstring": "Swap level i with level j. Calling this method does not change the ordering of the values. Parameters ---------- i : int, str, default -2 First level of index to be swapped. Can pass level name as string. Type of parameters can be mixed. j : int, str, default -1 Second level of index to be swapped. Can pass level name as string. Type of parameters can be mixed. Returns ------- MultiIndex A new MultiIndex. See Also -------- Series.swaplevel : Swap levels i and j in a MultiIndex. DataFrame.swaplevel : Swap levels i and j in a MultiIndex on a particular axis. Examples -------- >>> mi = pd.MultiIndex( ... levels=[[\"a\", \"b\"], [\"bb\", \"aa\"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]] ... ) >>> mi MultiIndex([('a', 'bb'), ('a', 'aa'), ('b', 'bb'), ('b', 'aa')], ) >>> mi.swaplevel(0, 1) MultiIndex([('bb', 'a'), ('aa', 'a'), ('bb', 'b'), ('aa', 'b')], )",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "FunctionDef name:swaplevel arg:self arg:i arg:j arguments arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Assign Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, fps=5, codec=None, bitrate=None, extra_args=None, metadata=None):\n    if type(self) is MovieWriter:\n        raise TypeError('MovieWriter cannot be instantiated directly. Please use one of its subclasses.')\n    super().__init__(fps=fps, metadata=metadata, codec=codec, bitrate=bitrate)\n    self.frame_format = self.supported_formats[0]\n    self.extra_args = extra_args",
    "docstring": "Parameters ---------- fps : int, default: 5 Movie frame rate (per second). codec : str or None, default: :rc: The codec to use. bitrate : int, default: :rc: The bitrate of the movie, in kilobits per second. Higher values means higher quality movies, but increase the file size. A value of -1 lets the underlying movie encoder select the bitrate. extra_args : list of str or None, optional Extra command-line arguments passed to the underlying movie encoder. These arguments are passed last to the encoder, just before the filename. The default, None, means to use :rc: for the builtin writers. metadata : dict[str, str], default: {} A dictionary of keys and values for metadata to include in the output file. Some keys that may be of use include: title, artist, genre, subject, copyright, srcform, comment.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\animation.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:fps arg:codec arg:bitrate arg:extra_args arg:metadata arguments arg arg arg arg arg arg If Compare Call Raise Call Call Call Assign Assign"
  },
  {
    "library": "scikit-learn",
    "name": "poisson_loss",
    "source_code": "def poisson_loss(y_true, y_pred, sample_weight=None):\n    return np.average(xlogy(y_true, y_true / y_pred) - y_true + y_pred, weights=sample_weight, axis=0).sum()",
    "docstring": "Compute (half of the) Poisson deviance loss for regression. Parameters ---------- y_true : array-like or label indicator matrix Ground truth (correct) labels. y_pred : array-like or label indicator matrix Predicted values, as returned by a regression estimator. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- loss : float The degree to which the samples are correctly predicted.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\neural_network\\_base.py",
    "ast_data": "FunctionDef name:poisson_loss arg:y_true arg:y_pred arg:sample_weight arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "ln_structured",
    "source_code": "def ln_structured(module, name, amount, n, dim, importance_scores=None):\n    LnStructured.apply(module, name, amount, n, dim, importance_scores=importance_scores)\n    return module",
    "docstring": "Prune tensor by removing channels with the lowest L\\ `torch.norm`. dim (int): index of the dim along which we define channels to prune. importance_scores (torch.Tensor): tensor of importance scores (of same shape as module parameter) used to compute mask for pruning. The values in this tensor indicate the importance of the corresponding elements in the parameter being pruned. If unspecified or None, the module parameter will be used in its place. Returns: module (nn.Module): modified (i.e. pruned) version of the input module Examples: >>> from torch.nn.utils import prune >>> m = prune.ln_structured( ... nn.Conv2d(5, 3, 2), 'weight', amount=0.3, dim=1, n=float('-inf') ... )",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\utils\\prune.py",
    "ast_data": "FunctionDef name:ln_structured arg:module arg:name arg:amount arg:n arg:dim arg:importance_scores arguments arg arg arg arg arg arg Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "has_nested_fields",
    "source_code": "def has_nested_fields(ndtype):\n    return any((ndtype[name].names is not None for name in ndtype.names or ()))",
    "docstring": "Returns whether one or several fields of a dtype are nested. Parameters ---------- ndtype : dtype Data-type of a structured array. Raises ------ AttributeError If does not have a attribute. Examples -------- >>> import numpy as np >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float)]) >>> np.lib._iotools.has_nested_fields(dt) False",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_iotools.py",
    "ast_data": "FunctionDef name:has_nested_fields arg:ndtype arguments arg Return return:yes Call Compare BoolOp"
  },
  {
    "library": "pytorch",
    "name": "get_overwrite_module_params_on_conversion",
    "source_code": "def get_overwrite_module_params_on_conversion() -> bool:\n    return _overwrite_module_params_on_conversion",
    "docstring": "Returns whether to assign new tensors to the parameters instead of changing the existing parameters in-place when converting an :class:. Defaults to `~torch.__future__.set_overwrite_module_params_on_conversion` for more information.",
    "type": "function",
    "file_path": "pytorch\\torch\\__future__.py",
    "ast_data": "FunctionDef name:get_overwrite_module_params_on_conversion arguments Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_quantized_weight_bias_dict",
    "source_code": "def get_quantized_weight_bias_dict(self):\n    quantized_weight_bias_dict = {}\n    for wn in self._flat_weights_names:\n        if hasattr(self, wn):\n            if wn.startswith('weight'):\n                weight_or_bias = get_quantized_weight(self, wn)\n            else:\n                weight_or_bias = getattr(self, wn)\n        else:\n            weight_or_bias = None\n        quantized_weight_bias_dict[wn] = weight_or_bias\n    return quantized_weight_bias_dict",
    "docstring": "dictionary from flat_weight_name to quantized weight or (unquantized) bias e.g. { \"weight_ih_l0\": quantized_weight, \"bias_ih_l0\": unquantized_bias, ... }",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\reference\\modules\\rnn.py",
    "ast_data": "FunctionDef name:get_quantized_weight_bias_dict arg:self arguments arg Assign For If Call If Call Assign Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "set_exception",
    "source_code": "def set_exception(self, result: T) -> None:\n    assert isinstance(result, Exception), f'{result} is of type {type(result)}, not an Exception.'\n\n    def raise_error(fut_result):\n        raise fut_result\n    super()._set_unwrap_func(raise_error)\n    self.set_result(result)",
    "docstring": "Set an exception for this ``. Example:: >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES) >>> fut = torch.futures.Future() >>> fut.set_exception(ValueError(\"foo\")) >>> fut.wait() Traceback (most recent call last): ... ValueError: foo",
    "type": "method",
    "file_path": "pytorch\\torch\\futures\\__init__.py",
    "ast_data": "FunctionDef name:set_exception arg:self arg:result arguments arg arg Call Call FunctionDef name:raise_error arg:fut_result arguments arg Raise Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_Booleans",
    "source_code": "class _Booleans(_Constraint):\n\n    def __init__(self):\n        super().__init__()\n        self._constraints = [_InstancesOf(bool), _InstancesOf(np.bool_)]\n\n    def is_satisfied_by(self, val):\n        return any((c.is_satisfied_by(val) for c in self._constraints))\n\n    def __str__(self):\n        return f'{', '.join([str(c) for c in self._constraints[:-1]])} or {self._constraints[-1]}'",
    "docstring": "Constraint representing boolean likes. Convenience class for [bool, np.bool_]",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\utils\\_param_validation.py",
    "ast_data": "ClassDef name:_Booleans FunctionDef name:__init__ arg:self arguments arg Call Call Assign Call Call FunctionDef name:is_satisfied_by arg:self arg:val arguments arg arg Return return:yes Call Call FunctionDef name:__str__ arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "RepresentativeDataset",
    "source_code": "@_tf_export('lite.RepresentativeDataset')\nclass RepresentativeDataset:\n\n    def __init__(self, input_gen):\n        self.input_gen = input_gen",
    "docstring": "Representative dataset used to optimize the model. This is a generator function that provides a small dataset to calibrate or estimate the range, i.e, (min, max) of all floating-point arrays in the model (such as model input, activation outputs of intermediate layers, and model output) for quantization. Usually, this is a small subset of a few hundred samples randomly chosen, in no particular order, from the training or evaluation dataset.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "ClassDef name:RepresentativeDataset FunctionDef name:__init__ arg:self arg:input_gen arguments arg arg Assign Call"
  },
  {
    "library": "pandas",
    "name": "main",
    "source_code": "def main(source_paths: list[str]) -> int:\n    number_of_errors: int = 0\n    for filename in source_paths:\n        for title, line_number in find_titles(filename):\n            if title != correct_title_capitalization(title):\n                print(f'{filename}:{line_number}:{err_msg} \"{title}\" to \"{correct_title_capitalization(title)}\" ')\n                number_of_errors += 1\n    return number_of_errors",
    "docstring": "The main method to print all headings with incorrect capitalization. Parameters ---------- source_paths : str List of directories to validate, provided through command line arguments. Returns ------- int Number of incorrect headings found overall.",
    "type": "function",
    "file_path": "pandas\\scripts\\validate_rst_title_capitalization.py",
    "ast_data": "FunctionDef name:main arg:source_paths arguments arg For For Call If Compare Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "adaptive_avg_pool3d",
    "source_code": "def adaptive_avg_pool3d(input: Tensor, output_size: BroadcastingList3[int]) -> Tensor:\n    if has_torch_function_unary(input):\n        return handle_torch_function(adaptive_avg_pool3d, (input,), input, output_size)\n    _output_size = _list_with_default(output_size, input.size())\n    return torch._C._nn.adaptive_avg_pool3d(input, _output_size)",
    "docstring": "Apply a 3D adaptive average pooling over an input signal composed of several input planes. See :class: for details and output shape. Args: output_size: the target output size (single integer or triple-integer tuple)",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:adaptive_avg_pool3d arg:input arg:output_size arguments arg arg If Call Return return:yes Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "generate_file_pxd",
    "source_code": "def generate_file_pxd(sigs, lib_name):\n    if lib_name == 'BLAS':\n        preamble = blas_pxd_preamble\n    elif lib_name == 'LAPACK':\n        preamble = lapack_pxd_preamble\n    else:\n        raise RuntimeError(f'Unrecognized lib_name: {lib_name}.')\n    preamble = ['\"\"\"\\n', *COMMENT_TEXT, '\"\"\"\\n', preamble]\n    decls = [generate_decl_pxd(**sig) for sig in sigs]\n    content = preamble + decls\n    return ''.join(content)",
    "docstring": "Create content for Cython header file for generated pyx.",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_generate_pyx.py",
    "ast_data": "FunctionDef name:generate_file_pxd arg:sigs arg:lib_name arguments arg arg If Compare Assign If Compare Assign Raise Call Assign Assign Call Assign Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "render_git_describe",
    "source_code": "def render_git_describe(pieces):\n    if pieces['closest-tag']:\n        rendered = pieces['closest-tag']\n        if pieces['distance']:\n            rendered += f'-{pieces['distance']}-g{pieces['short']}'\n    else:\n        rendered = pieces['short']\n    if pieces['dirty']:\n        rendered += '-dirty'\n    return rendered",
    "docstring": "TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix)",
    "type": "function",
    "file_path": "pandas\\pandas\\_version.py",
    "ast_data": "FunctionDef name:render_git_describe arg:pieces arguments arg If Assign If Assign If Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, angleA=90, angleB=0, rad=0.0):\n    self.angleA = angleA\n    self.angleB = angleB\n    self.rad = rad",
    "docstring": "Parameters ---------- angleA : float Starting angle of the path. angleB : float Ending angle of the path. rad : float Rounding radius of the edge.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:angleA arg:angleB arg:rad arguments arg arg arg arg Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "CUDAGraph",
    "source_code": "class CUDAGraph(torch._C._CUDAGraph):\n\n    def __new__(cls):\n        return super().__new__(cls)\n\n    def capture_begin(self, pool=None, capture_error_mode='global'):\n        super().capture_begin(pool=pool, capture_error_mode=capture_error_mode)\n\n    def capture_end(self):\n        super().capture_end()\n\n    def replay(self):\n        super().replay()\n\n    def reset(self):\n        super().reset()\n\n    def pool(self):\n        return super().pool()\n\n    def enable_debug_mode(self):\n        return super().enable_debug_mode()\n\n    def debug_dump(self, debug_path):\n        return super().debug_dump(debug_path)",
    "docstring": "Wrapper around a CUDA graph. .. warning:: This API is in beta and may change in future releases.",
    "type": "class",
    "file_path": "pytorch\\torch\\cuda\\graphs.py",
    "ast_data": "ClassDef name:CUDAGraph FunctionDef name:__new__ arg:cls arguments arg Return return:yes Call Call FunctionDef name:capture_begin arg:self arg:pool arg:capture_error_mode arguments arg arg arg Call Call FunctionDef name:capture_end arg:self arguments arg Call Call FunctionDef name:replay arg:self arguments arg Call Call FunctionDef name:reset arg:self arguments arg Call Call FunctionDef name:pool arg:self arguments arg Return return:yes Call Call FunctionDef name:enable_debug_mode arg:self arguments arg Return return:yes Call Call FunctionDef name:debug_dump arg:self arg:debug_path arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_offset_position",
    "source_code": "def set_offset_position(self, position):\n    x, y = self.offsetText.get_position()\n    x = _api.check_getitem({'left': 0, 'right': 1}, position=position)\n    self.offsetText.set_ha(position)\n    self.offsetText.set_position((x, y))\n    self.stale = True",
    "docstring": "Parameters ---------- position : {'left', 'right'}",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:set_offset_position arg:self arg:position arguments arg arg Assign Call Assign Call Call Call Assign"
  },
  {
    "library": "scikit-learn",
    "name": "get_n_splits",
    "source_code": "def get_n_splits(self, X=None, y=None, groups=None):\n    rng = check_random_state(self.random_state)\n    cv = self.cv(random_state=rng, shuffle=True, **self.cvargs)\n    return cv.get_n_splits(X, y, groups) * self.n_repeats",
    "docstring": "Returns the number of splitting iterations in the cross-validator. Parameters ---------- X : object Always ignored, exists for compatibility. `` may be used as a placeholder. groups : array-like of shape (n_samples,), default=None Group labels for the samples used while splitting the dataset into train/test set. Returns ------- n_splits : int Returns the number of splitting iterations in the cross-validator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py",
    "ast_data": "FunctionDef name:get_n_splits arg:self arg:X arg:y arg:groups arguments arg arg arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "IteratorSpec",
    "source_code": "@tf_export('data.IteratorSpec', v1=[])\nclass IteratorSpec(type_spec.TypeSpec):\n    __slots__ = ['_element_spec']\n\n    def __init__(self, element_spec):\n        self._element_spec = element_spec\n\n    @property\n    def value_type(self):\n        return OwnedIterator\n\n    def _serialize(self):\n        return (self._element_spec,)\n\n    @property\n    def _component_specs(self):\n        return (tensor.TensorSpec([], dtypes.resource),)\n\n    def _to_components(self, value):\n        return (value._iterator_resource,)\n\n    def _from_components(self, components):\n        return OwnedIterator(dataset=None, components=components, element_spec=self._element_spec)\n\n    @staticmethod\n    def from_value(value):\n        return IteratorSpec(value.element_spec)",
    "docstring": "Type specification for . For instance, can be used to define a tf.function that takes as an input argument: >>> @tf.function(input_signature=[tf.data.IteratorSpec( ... tf.TensorSpec(shape=(), dtype=tf.int32, name=None))]) ... def square(iterator): ... x = iterator.get_next() ... return x * x >>> dataset = tf.data.Dataset.from_tensors(5) >>> iterator = iter(dataset) >>> print(square(iterator)) tf.Tensor(25, shape=(), dtype=int32) Attributes: element_spec: A (nested) structure of objects that represents the type specification of the iterator elements.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\iterator_ops.py",
    "ast_data": "ClassDef name:IteratorSpec Assign FunctionDef name:__init__ arg:self arg:element_spec arguments arg arg Assign FunctionDef name:value_type arg:self arguments arg Return return:yes FunctionDef name:_serialize arg:self arguments arg Return return:yes FunctionDef name:_component_specs arg:self arguments arg Return return:yes Call FunctionDef name:_to_components arg:self arg:value arguments arg arg Return return:yes FunctionDef name:_from_components arg:self arg:components arguments arg arg Return return:yes Call FunctionDef name:from_value arg:value arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_copy_to_context_device",
    "source_code": "def _maybe_copy_to_context_device(tensor, device_name):\n    in_device = tensor.backing_device\n    if device_name == in_device:\n        return tensor\n    else:\n        return tensor._copy()",
    "docstring": "Copy an EagerTensor to the current device if it's not on .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\script_ops.py",
    "ast_data": "FunctionDef name:_maybe_copy_to_context_device arg:tensor arg:device_name arguments arg arg Assign If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "django",
    "name": "chunk",
    "source_code": "def chunk(data, index):\n    return (data[:index], data[index:])",
    "docstring": "Split a string into two parts at the input index.",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\postgis\\pgraster.py",
    "ast_data": "FunctionDef name:chunk arg:data arg:index arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "from_saved_model",
    "source_code": "@classmethod\ndef from_saved_model(cls, saved_model_dir, input_arrays=None, input_shapes=None, output_arrays=None, tag_set=None, signature_key=None):\n    TFLiteConverterBase._set_original_model_type(conversion_metadata_fb.ModelType.TF_SAVED_MODEL)\n    if tag_set is None:\n        tag_set = set([_tag_constants.SERVING])\n    if signature_key is None:\n        signature_key = _signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY\n    saved_model_converter = TFLiteSavedModelConverter(saved_model_dir, tag_set, [signature_key])\n    if saved_model_converter.saved_model_dir:\n        return saved_model_converter\n    result = _freeze_saved_model(saved_model_dir, input_arrays, input_shapes, output_arrays, tag_set, signature_key)\n    return cls(graph_def=result[0], input_tensors=result[1], output_tensors=result[2], experimental_debug_info_func=_build_debug_info_func(result[3]))",
    "docstring": "Creates a TFLiteConverter class from a SavedModel. Args: saved_model_dir: SavedModel directory to convert. input_arrays: List of input tensors to freeze graph with. Uses input arrays from SignatureDef when none are provided. (default None) input_shapes: Dict of strings representing input tensor names to list of integers representing input shapes (e.g., {\"foo\" : [1, 16, 16, 3]}). Automatically determined when input shapes is None (e.g., {\"foo\" : None}). (default None) output_arrays: List of output tensors to freeze graph with. Uses output arrays from SignatureDef when none are provided. (default None) tag_set: Set of tags identifying the MetaGraphDef within the SavedModel to analyze. All tags in the tag set must be present. (default {tf.saved_model.SERVING}) signature_key: Key identifying SignatureDef containing inputs and outputs. (default tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY) Returns: TFLiteConverter class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "FunctionDef name:from_saved_model arg:cls arg:saved_model_dir arg:input_arrays arg:input_shapes arg:output_arrays arg:tag_set arg:signature_key arguments arg arg arg arg arg arg arg Call If Compare Assign Call If Compare Assign Assign Call If Return return:yes Assign Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "BaseDateDetailView",
    "source_code": "class BaseDateDetailView(YearMixin, MonthMixin, DayMixin, DateMixin, BaseDetailView):\n\n    def get_object(self, queryset=None):\n        year = self.get_year()\n        month = self.get_month()\n        day = self.get_day()\n        date = _date_from_string(year, self.get_year_format(), month, self.get_month_format(), day, self.get_day_format())\n        qs = self.get_queryset() if queryset is None else queryset\n        if not self.get_allow_future() and date > datetime.date.today():\n            raise Http404(_('Future %(verbose_name_plural)s not available because %(class_name)s.allow_future is False.') % {'verbose_name_plural': qs.model._meta.verbose_name_plural, 'class_name': self.__class__.__name__})\n        lookup_kwargs = self._make_single_date_lookup(date)\n        qs = qs.filter(**lookup_kwargs)\n        return super().get_object(queryset=qs)",
    "docstring": "Base detail view for a single object on a single date; this differs from the standard DetailView by accepting a year/month/day in the URL. This requires subclassing to provide a response mixin.",
    "type": "class",
    "file_path": "django\\django\\views\\generic\\dates.py",
    "ast_data": "ClassDef name:BaseDateDetailView FunctionDef name:get_object arg:self arg:queryset arguments arg arg Assign Call Assign Call Assign Call Assign Call Call Call Call Assign Compare Call If BoolOp Call Compare Call Raise Call Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_size_of_node",
    "source_code": "@compatibility(is_backward_compatible=False)\ndef get_size_of_node(fx_module: GraphModule, node: Node) -> size_bytes:\n    total_num_of_elems = 0\n    if node.op == 'call_module':\n        submodule_dict = dict(fx_module.named_modules())\n        submodule = submodule_dict[node.target]\n        parameters = submodule.named_parameters()\n        for _name, p in parameters:\n            total_num_of_elems += p.numel()\n    tensor_meta = get_tensor_meta(node)\n    output_elem = tensor_meta.shape.numel()\n    total_num_of_elems += output_elem\n    if tensor_meta.is_quantized:\n        size_per_elem_bytes = torch._empty_affine_quantized([], dtype=tensor_meta.dtype).element_size()\n    else:\n        size_per_elem_bytes = torch.tensor([], dtype=tensor_meta.dtype).element_size()\n    total_size = size_per_elem_bytes * total_num_of_elems\n    output_size = size_per_elem_bytes * output_elem\n    return size_bytes(output_size, total_size)",
    "docstring": "Given a node with node.dtype and node.shape, return its total size and its output size. total_size = weights + bias + output_size",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\passes\\graph_manipulation.py",
    "ast_data": "FunctionDef name:get_size_of_node arg:fx_module arg:node arguments arg arg Assign If Compare Assign Call Call Assign Assign Call For Call Assign Call Assign Call If Assign Call Call Assign Call Call Assign Assign Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "Input",
    "source_code": "class Input(Widget):\n    input_type = None\n    template_name = 'django/forms/widgets/input.html'\n\n    def __init__(self, attrs=None):\n        if attrs is not None:\n            attrs = attrs.copy()\n            self.input_type = attrs.pop('type', self.input_type)\n        super().__init__(attrs)\n\n    def get_context(self, name, value, attrs):\n        context = super().get_context(name, value, attrs)\n        context['widget']['type'] = self.input_type\n        return context",
    "docstring": "Base class for all widgets.",
    "type": "class",
    "file_path": "django\\django\\forms\\widgets.py",
    "ast_data": "ClassDef name:Input Assign Assign FunctionDef name:__init__ arg:self arg:attrs arguments arg arg If Compare Assign Call Assign Call Call Call FunctionDef name:get_context arg:self arg:name arg:value arg:attrs arguments arg arg arg arg Assign Call Call Assign Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "make_main",
    "source_code": "def make_main(argv: Sequence[str]) -> int:\n    from sphinx.cmd import make_mode\n    return make_mode.run_make_mode(argv[1:])",
    "docstring": "Sphinx build \"make mode\" entry.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\cmd\\build.py",
    "ast_data": "FunctionDef name:make_main arg:argv arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "BuildType",
    "source_code": "class BuildType:\n\n    def __init__(self, cmake_build_type_env: str | None=None) -> None:\n        if cmake_build_type_env is not None:\n            self.build_type_string = cmake_build_type_env\n            return\n        cmake_cache_txt = os.path.join(BUILD_DIR, 'CMakeCache.txt')\n        if os.path.isfile(cmake_cache_txt):\n            from .cmake_utils import get_cmake_cache_variables_from_file\n            with open(cmake_cache_txt) as f:\n                cmake_cache_vars = get_cmake_cache_variables_from_file(f)\n            self.build_type_string = cast(str, cmake_cache_vars['CMAKE_BUILD_TYPE'])\n        else:\n            self.build_type_string = os.environ.get('CMAKE_BUILD_TYPE', 'Release')\n\n    def is_debug(self) -> bool:\n        return self.build_type_string == 'Debug'\n\n    def is_rel_with_deb_info(self) -> bool:\n        return self.build_type_string == 'RelWithDebInfo'\n\n    def is_release(self) -> bool:\n        return self.build_type_string == 'Release'",
    "docstring": "Checks build type. The build type will be given in :attr:. If :attr: is `` does not exist, os.environ['CMAKE_BUILD_TYPE'] will be used. Args: cmake_build_type_env (str): The value of os.environ['CMAKE_BUILD_TYPE']. If None, the actual build type will be inferred.",
    "type": "class",
    "file_path": "pytorch\\tools\\setup_helpers\\env.py",
    "ast_data": "ClassDef name:BuildType FunctionDef name:__init__ arg:self arg:cmake_build_type_env arguments arg arg If Compare Assign Return return:no Assign Call If Call With Call Assign Call Assign Call Assign Call FunctionDef name:is_debug arg:self arguments arg Return return:yes Compare FunctionDef name:is_rel_with_deb_info arg:self arguments arg Return return:yes Compare FunctionDef name:is_release arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, bus, pidfile):\n    SimplePlugin.__init__(self, bus)\n    self.pidfile = pidfile\n    self.finalized = False",
    "docstring": "Initialize the PID file plugin.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\plugins.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:bus arg:pidfile arguments arg arg arg Call Assign Assign"
  },
  {
    "library": "sphinx",
    "name": "index",
    "source_code": "class index(nodes.Invisible, nodes.Inline, nodes.TextElement):\n    pass",
    "docstring": "Node for index entries. This node is created by the `glossary` and",
    "type": "class",
    "file_path": "sphinx\\sphinx\\addnodes.py",
    "ast_data": "ClassDef name:index"
  },
  {
    "library": "numpy",
    "name": "compress_rows",
    "source_code": "def compress_rows(a):\n    a = asarray(a)\n    if a.ndim != 2:\n        raise NotImplementedError('compress_rows works for 2D arrays only.')\n    return compress_rowcols(a, 0)",
    "docstring": "Suppress whole rows of a 2-D array that contain masked values. This is equivalent to `compress_rowcolsxmasknomask`. Must be a 2D array. Returns ------- compressed_array : ndarray The compressed array. See Also -------- compress_rowcols Examples -------- >>> import numpy as np >>> a = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], ... [1, 0, 0], ... [0, 0, 0]]) >>> np.ma.compress_rows(a) array([[6, 7, 8]])",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\extras.py",
    "ast_data": "FunctionDef name:compress_rows arg:a arguments arg Assign Call If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "set_custom_combo_kernel_horizontal_partition",
    "source_code": "def set_custom_combo_kernel_horizontal_partition(algorithm: Callable[[list[BaseSchedulerNode], SIMDScheduling, dict[BaseSchedulerNode, TritonKernel], dict[BaseSchedulerNode, tuple[Any, Any, Any, Any]]], list[list[BaseSchedulerNode]]]) -> None:\n    global _custom_combo_kernel_horizontal_partition_algorithm\n    _custom_combo_kernel_horizontal_partition_algorithm = algorithm",
    "docstring": "Sets the algorithm used to partition nodes into horizontal partitions. Nodes in different partitions are implemented in different combo kernels. Nodes in the same partition are likely to be implemented in the same combo kernel, but subject to subsequent restricts like CUDA limits for number of args. The algorithm should take a list of nodes and return a list of list of nodes. The default algorithm is to partition nodes based on number of block dimensions.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\triton_combo_kernel.py",
    "ast_data": "FunctionDef name:set_custom_combo_kernel_horizontal_partition arg:algorithm arguments arg Assign"
  },
  {
    "library": "authlib",
    "name": "serialize",
    "source_code": "def serialize(self, header, payload, key, sender_key=None):\n    if 'protected' in header or 'unprotected' in header or 'recipients' in header:\n        return self.serialize_json(header, payload, key, sender_key)\n    return self.serialize_compact(header, payload, key, sender_key)",
    "docstring": "Generate a JWE Serialization. It will automatically generate a compact or JSON serialization depending on argument. If is a dict with \"protected\", \"unprotected\" and/or \"recipients\" keys, it will call , otherwise it will call . :param header: A dict of header(s) :param payload: Payload (bytes or a value convertible to bytes) :param key: Public key(s) used to encrypt payload :param sender_key: Sender's private key in case JWEAlgorithmWithTagAwareKeyAgreement is used :return: JWE compact serialization as bytes or JWE JSON serialization as dict",
    "type": "method",
    "file_path": "authlib\\authlib\\jose\\rfc7516\\jwe.py",
    "ast_data": "FunctionDef name:serialize arg:self arg:header arg:payload arg:key arg:sender_key arguments arg arg arg arg arg If BoolOp Compare Compare Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "add_continue_node",
    "source_code": "def add_continue_node(self, ast_node, section_id, guards):\n    node = self._add_jump_node(ast_node, guards)\n    self.continues[section_id].add(node)",
    "docstring": "Grows the graph by adding a reentry node. This node causes control flow to go back to the loop section's entry. Args: ast_node: ast.AST section_id: Hashable, the node for which ast_node should be considered to be an exit node guards: Tuple[ast.AST, ...], the finally sections that guard ast_node",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\cfg.py",
    "ast_data": "FunctionDef name:add_continue_node arg:self arg:ast_node arg:section_id arg:guards arguments arg arg arg arg Assign Call Call"
  },
  {
    "library": "kornia",
    "name": "warp_grid",
    "source_code": "def warp_grid(grid: Tensor, src_homo_dst: Tensor) -> Tensor:\n    batch_size: int = src_homo_dst.size(0)\n    _, height, width, _ = grid.size()\n    grid = grid.expand(batch_size, -1, -1, -1)\n    if len(src_homo_dst.shape) == 3:\n        src_homo_dst = src_homo_dst.view(batch_size, 1, 3, 3)\n    flow: Tensor = transform_points(src_homo_dst, grid.to(src_homo_dst))\n    return flow.view(batch_size, height, width, 2)",
    "docstring": "Compute the grid to warp the coordinates grid by the homography/ies. Args: grid: Unwrapped grid of the shape :math:. src_homo_dst: Homography or homographies (stacked) to transform all points in the grid. Shape of the homography has to be :math: or :math:. Returns: the transformed grid of shape :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\transform\\imgwarp.py",
    "ast_data": "FunctionDef name:warp_grid arg:grid arg:src_homo_dst arguments arg arg Call Assign Call Assign Call If Compare Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "HalfGammaLoss",
    "source_code": "class HalfGammaLoss(BaseLoss):\n\n    def __init__(self, sample_weight=None):\n        super().__init__(closs=CyHalfGammaLoss(), link=LogLink())\n        self.interval_y_true = Interval(0, np.inf, False, False)\n\n    def constant_to_optimal_zero(self, y_true, sample_weight=None):\n        term = -np.log(y_true) - 1\n        if sample_weight is not None:\n            term *= sample_weight\n        return term",
    "docstring": "Half Gamma deviance loss with log-link, for regression. Domain: y_true and y_pred in positive real numbers Link: y_pred = exp(raw_prediction) For a given sample x_i, half Gamma deviance loss is defined as:: loss(x_i) = log(exp(raw_prediction_i)/y_true_i) + y_true/exp(raw_prediction_i) - 1 Half the Gamma deviance is actually proportional to the negative log- likelihood up to constant terms (not involving raw_prediction) and simplifies the computation of the gradients. We also skip the constant term .",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\_loss\\loss.py",
    "ast_data": "ClassDef name:HalfGammaLoss FunctionDef name:__init__ arg:self arg:sample_weight arguments arg arg Call Call Call Call Assign Call FunctionDef name:constant_to_optimal_zero arg:self arg:y_true arg:sample_weight arguments arg arg arg Assign Call If Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_prune_nodes_from_input_and_recipient_maps",
    "source_code": "def _prune_nodes_from_input_and_recipient_maps(self, nodes_to_prune):\n    for node in nodes_to_prune:\n        del self._node_inputs[node]\n        del self._node_ctrl_inputs[node]\n        del self._node_recipients[node]\n        del self._node_ctrl_recipients[node]",
    "docstring": "Prune nodes out of input and recipient maps. Args: nodes_to_prune: ( of ) Names of the nodes to be pruned.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_graphs.py",
    "ast_data": "FunctionDef name:_prune_nodes_from_input_and_recipient_maps arg:self arg:nodes_to_prune arguments arg arg For"
  },
  {
    "library": "scikit-learn",
    "name": "_apply_where",
    "source_code": "def _apply_where(cond: Array, f1: Callable[..., Array], f2: Callable[..., Array] | None, fill_value: Array | int | float | complex | bool | None, *args: Array, xp: ModuleType) -> Array:\n    if is_jax_namespace(xp):\n        return xp.where(cond, f1(*args), f2(*args) if f2 is not None else fill_value)\n    temp1 = f1(*(arr[cond] for arr in args))\n    if f2 is None:\n        dtype = xp.result_type(temp1, fill_value)\n        if isinstance(fill_value, int | float | complex):\n            out = xp.full_like(cond, dtype=dtype, fill_value=fill_value)\n        else:\n            out = xp.astype(fill_value, dtype, copy=True)\n    else:\n        ncond = ~cond\n        temp2 = f2(*(arr[ncond] for arr in args))\n        dtype = xp.result_type(temp1, temp2)\n        out = xp.empty_like(cond, dtype=dtype)\n        out = at(out, ncond).set(temp2)\n    return at(out, cond).set(temp1)",
    "docstring": "Helper of . On Dask, this runs on a single chunk.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_extra\\_lib\\_funcs.py",
    "ast_data": "FunctionDef name:_apply_where arg:cond arg:f1 arg:f2 arg:fill_value arguments arg arg arg arg arg arg If Call Return return:yes Call Call Compare Call Assign Call If Compare Assign Call If Call Assign Call Assign Call Assign Assign Call Assign Call Assign Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "EnzymeReaction",
    "source_code": "class EnzymeReaction(LSQBenchmarkProblem):\n    INITIAL_GUESSES = [np.array([2.5, 3.9, 4.15, 3.9]) * 0.1]\n\n    def __init__(self, x0_ind):\n        super().__init__(4, 11, 0.0003075057, x0_ind)\n        self.u = np.array([4.0, 2.0, 1.0, 0.5, 0.25, 0.167, 0.125, 0.1, 0.0833, 0.0714, 0.0625])\n        self.y = np.array([0.1957, 0.1947, 0.1735, 0.16, 0.0844, 0.0627, 0.0456, 0.0342, 0.0323, 0.0235, 0.0246])\n\n    def fun(self, x):\n        return x[0] * (self.u ** 2 + x[1] * self.u) / (self.u ** 2 + x[2] * self.u + x[3]) - self.y\n\n    def jac(self, x):\n        J = np.empty((self.m, self.n))\n        den = self.u ** 2 + x[2] * self.u + x[3]\n        num = self.u ** 2 + x[1] * self.u\n        J[:, 0] = num / den\n        J[:, 1] = x[0] * self.u / den\n        J[:, 2] = -x[0] * num * self.u / den ** 2\n        J[:, 3] = -x[0] * num / den ** 2\n        return J",
    "docstring": "The problem of fitting kinetic parameters for an enzyme reaction, [1]_. Number of variables --- 4, number of residuals --- 11, no bounds. .. [1] Brett M. Averick et al. \"The MINPACK-2 Test Problem Collection\", p. 29",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\lsq_problems.py",
    "ast_data": "ClassDef name:EnzymeReaction Assign Call FunctionDef name:__init__ arg:self arg:x0_ind arguments arg arg Call Call Assign Call Assign Call FunctionDef name:fun arg:self arg:x arguments arg arg Return return:yes FunctionDef name:jac arg:self arg:x arguments arg arg Assign Call Assign Assign Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "copy_file",
    "source_code": "def copy_file(src_file: str, dst_dir: str, strip: str=None, dest_file: str=None) -> None:\n    dest = dest_file if dest_file else src_file\n    if dest.startswith('bazel-out'):\n        dest = dest[dest.index('bin') + 4:]\n    if strip:\n        dest = dest.removeprefix(strip)\n    dest_dir_path = os.path.join(dst_dir, os.path.dirname(dest))\n    os.makedirs(dest_dir_path, exist_ok=True)\n    shutil.copy(src_file, dest_dir_path)\n    os.chmod(os.path.join(dst_dir, dest), 420)",
    "docstring": "Copy a file to the destination directory. Args: src_file: file to be copied dst_dir: destination directory strip: prefix to strip before copying to destination dest_file: destanation file location if different from src_file",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\pip_package\\utils\\utils.py",
    "ast_data": "FunctionDef name:copy_file arg:src_file arg:dst_dir arg:strip arg:dest_file arguments arg arg arg arg Assign If Call Assign Call If Assign Call Assign Call Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "display",
    "source_code": "def display(self) -> None:\n    print(json.dumps(self.asdict()), flush=True)",
    "docstring": "Print to stdout for lintrunner to consume.",
    "type": "method",
    "file_path": "pytorch\\tools\\linter\\adapters\\ruff_linter.py",
    "ast_data": "FunctionDef name:display arg:self arguments arg Call Call Call"
  },
  {
    "library": "numpy",
    "name": "__init__",
    "source_code": "def __init__(self, eps):\n    self.eps = eps",
    "docstring": "domain_tan(eps) = true where abs(cos(x)) < eps)",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:eps arguments arg arg Assign"
  },
  {
    "library": "cherrypy",
    "name": "_setup",
    "source_code": "def _setup(self):\n    cherrypy.serving.request.error_response = self._wrapper",
    "docstring": "Wire this tool into ``. The standard CherryPy request object will automatically call this method when the tool is \"turned on\" in config.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cptools.py",
    "ast_data": "FunctionDef name:_setup arg:self arguments arg Assign"
  },
  {
    "library": "pytorch",
    "name": "get_input_dtype",
    "source_code": "def get_input_dtype(node: torch.fx.Node) -> Optional[torch.dtype]:\n    if node.target == 'store':\n        return V.graph.get_dtype(node.args[1])\n    elif node.target == 'to_dtype_bitcast':\n        return node.args[-1]\n    elif node.target == 'to_dtype':\n        if len(node.args) > 3:\n            return node.args[3]\n        else:\n            return node.kwargs.get('src_dtype', None)\n    else:\n        return None",
    "docstring": "Get input dtype for nodes that may consumes lowp fp dt",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp.py",
    "ast_data": "FunctionDef name:get_input_dtype arg:node arguments arg If Compare Return return:yes Call If Compare Return return:yes If Compare If Compare Call Return return:yes Return return:yes Call Return return:no"
  },
  {
    "library": "cryptography",
    "name": "__copy__",
    "source_code": "@abc.abstractmethod\ndef __copy__(self) -> Ed25519PublicKey:\n    pass",
    "docstring": "Returns a copy.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ed25519.py",
    "ast_data": "FunctionDef name:__copy__ arg:self arguments arg"
  },
  {
    "library": "sphinx",
    "name": "terminal_safe",
    "source_code": "def terminal_safe(s: str, /) -> str:\n    return s.encode('ascii', 'backslashreplace').decode('ascii')",
    "docstring": "Safely encode a string for printing to the terminal.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\_cli\\util\\errors.py",
    "ast_data": "FunctionDef name:terminal_safe arguments arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "get_edited_object",
    "source_code": "def get_edited_object(self):\n    return self.content_type.get_object_for_this_type(pk=self.object_id)",
    "docstring": "Return the edited object represented by this log entry.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\models.py",
    "ast_data": "FunctionDef name:get_edited_object arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_get_field_choices",
    "source_code": "def _get_field_choices():\n    queue = collections.deque([(None, self.klass_info)])\n    while queue:\n        parent_path, klass_info = queue.popleft()\n        if parent_path is None:\n            path = []\n            yield 'self'\n        else:\n            field = klass_info['field']\n            if klass_info['reverse']:\n                field = field.remote_field\n            path = [*parent_path, field.name]\n            yield LOOKUP_SEP.join(path)\n        queue.extend(((path, klass_info) for klass_info in _get_parent_klass_info(klass_info)))\n        queue.extend(((path, klass_info) for klass_info in klass_info.get('related_klass_infos', [])))",
    "docstring": "Yield all allowed field paths in breadth-first search order.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\compiler.py",
    "ast_data": "FunctionDef name:_get_field_choices arguments Assign Call While Assign Call If Compare Assign Assign If Assign Assign Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_to_proto",
    "source_code": "def _to_proto(self):\n    raise NotImplementedError('{}._to_proto()'.format(type(self).__name__))",
    "docstring": "Convert options to protocol buffer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\util\\options.py",
    "ast_data": "FunctionDef name:_to_proto arg:self arguments arg Raise Call Call Call"
  },
  {
    "library": "scipy",
    "name": "__new__",
    "source_code": "def __new__(cls, *system, **kwargs):\n    if len(system) == 1 and isinstance(system[0], LinearTimeInvariant):\n        return system[0].to_zpk()\n    if cls is ZerosPolesGain:\n        if kwargs.get('dt') is None:\n            return ZerosPolesGainContinuous.__new__(ZerosPolesGainContinuous, *system, **kwargs)\n        else:\n            return ZerosPolesGainDiscrete.__new__(ZerosPolesGainDiscrete, *system, **kwargs)\n    return super().__new__(cls)",
    "docstring": "Handle object conversion if input is an instance of",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:__new__ arg:cls arguments arg arg arg If BoolOp Compare Call Call Return return:yes Call If Compare If Compare Call Return return:yes Call Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "shapes",
    "source_code": "@property\ndef shapes(self):\n    return self._shapes",
    "docstring": "The list of shapes for each component of a staging area element.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:shapes arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "margins",
    "source_code": "def margins(self, *margins, x=None, y=None, z=None, tight=True):\n    if margins and (x is not None or y is not None or z is not None):\n        raise TypeError('Cannot pass both positional and keyword arguments for x, y, and/or z.')\n    elif len(margins) == 1:\n        x = y = z = margins[0]\n    elif len(margins) == 3:\n        x, y, z = margins\n    elif margins:\n        raise TypeError('Must pass a single positional argument for all margins, or one for each margin (x, y, z).')\n    if x is None and y is None and (z is None):\n        if tight is not True:\n            _api.warn_external(f'ignoring tight={tight!r} in get mode')\n        return (self._xmargin, self._ymargin, self._zmargin)\n    if x is not None:\n        self.set_xmargin(x)\n    if y is not None:\n        self.set_ymargin(y)\n    if z is not None:\n        self.set_zmargin(z)\n    self.autoscale_view(tight=tight, scalex=x is not None, scaley=y is not None, scalez=z is not None)",
    "docstring": "Set or retrieve autoscaling margins. See for full documentation. Because this function applies to 3D Axes, it also takes a *z* argument, and returns ``.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:margins arg:self arguments arg arg arg arg arg arg If BoolOp BoolOp Compare Compare Compare Raise Call If Compare Call Assign If Compare Call Assign If Raise Call If BoolOp Compare Compare Compare If Compare Call Return return:yes If Compare Call If Compare Call If Compare Call Call Compare Compare Compare"
  },
  {
    "library": "pytorch",
    "name": "pop",
    "source_code": "def pop(self, key: str) -> Module:\n    v = self[key]\n    del self[key]\n    return v",
    "docstring": "Remove key from the ModuleDict and return its module. Args: key (str): key to pop from the ModuleDict",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\container.py",
    "ast_data": "FunctionDef name:pop arg:self arg:key arguments arg arg Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_poll",
    "source_code": "@abc.abstractmethod\ndef _poll(self) -> Optional[RunProcsResult]:\n    raise NotImplementedError",
    "docstring": "Poll the run status of the processes running under this context. This method follows an \"all-or-nothing\" policy and returns a `` if all processes are still running.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\multiprocessing\\api.py",
    "ast_data": "FunctionDef name:_poll arg:self arguments arg Raise"
  },
  {
    "library": "pandas",
    "name": "get_calendar",
    "source_code": "def get_calendar(name: str) -> AbstractHolidayCalendar:\n    return holiday_calendars[name]()",
    "docstring": "Return an instance of a calendar based on its name. Parameters ---------- name : str Calendar name to return an instance of",
    "type": "function",
    "file_path": "pandas\\pandas\\tseries\\holiday.py",
    "ast_data": "FunctionDef name:get_calendar arg:name arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "log",
    "source_code": "def log(self) -> Tensor:\n    return self.z.imag.atan2(self.z.real)",
    "docstring": "Convert elements of lie group to elements of lie algebra. Example: >>> real = torch.tensor([1.0]) >>> imag = torch.tensor([3.0]) >>> So2(torch.complex(real, imag)).log() tensor([1.2490], grad_fn=)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\so2.py",
    "ast_data": "FunctionDef name:log arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "maximum",
    "source_code": "def maximum(self, other):\n    return self._maximum_minimum(other, np.maximum)",
    "docstring": "Element-wise maximum between this and another array/matrix.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_base.py",
    "ast_data": "FunctionDef name:maximum arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "inverse",
    "source_code": "def inverse(self) -> NamedPose:\n    return NamedPose(self._dst_from_src.inverse(), self._frame_dst, self._frame_src)",
    "docstring": "Inverse of the NamedPose. Returns: Inverse of the NamedPose. Example: >>> b_from_a = NamedPose(Se3.identity(), frame_src=\"frame_a\", frame_dst=\"frame_b\") >>> b_from_a.inverse() NamedPose(dst_from_src=rotation: Parameter containing: tensor([1., -0., -0., -0.], requires_grad=True) translation: x: 0.0 y: 0.0 z: 0.0, frame_src: frame_b -> frame_dst: frame_a)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\pose.py",
    "ast_data": "FunctionDef name:inverse arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "mock",
    "source_code": "def mock(self, include: 'GlobPattern', *, exclude: 'GlobPattern'=(), allow_empty: bool=True):\n    self.patterns[GlobGroup(include, exclude=exclude)] = _PatternInfo(_ModuleProviderAction.MOCK, allow_empty)",
    "docstring": "Replace some required modules with a mock implementation. Mocked modules will return a fake object for any attribute accessed from it. Because we copy file-by-file, the dependency resolution will sometimes find files that are imported by model files but whose functionality is never used (e.g. custom serialization code or training helpers). Use this function to mock this functionality out without having to modify the original code. Args: include (Union[List[str], str]): A string e.g. `mockclose`, no such exception is thrown.",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\package_exporter.py",
    "ast_data": "FunctionDef name:mock arg:self arg:include arguments arg arg arg arg Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_create_attribute",
    "source_code": "@trackable.no_automatic_dependency_tracking\ndef _maybe_create_attribute(self, name, default_value):\n    if not hasattr(self, name):\n        self.__setattr__(name, default_value)",
    "docstring": "Create the attribute with the default value if it hasn't been created. This is useful for fields that is used for tracking purpose, _trainable_weights, or _layers. Note that user could create a layer subclass and assign an internal field before invoking the Layer.__init__(), the __setattr__() need to create the tracking fields and __init__() need to not override them. Args: name: String, the name of the attribute. default_value: Object, the default value of the attribute.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:_maybe_create_attribute arg:self arg:name arg:default_value arguments arg arg arg If Call Call"
  },
  {
    "library": "pytorch",
    "name": "lookup_object",
    "source_code": "def lookup_object(self, index: MetadataIndex) -> Any:\n    return find_state_dict_object(self.state_dict, index)",
    "docstring": "Extension from the planner interface to make it easy to extend the default planner.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\default_planner.py",
    "ast_data": "FunctionDef name:lookup_object arg:self arg:index arguments arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "hide",
    "source_code": "def hide(self, subset: Subset | None=None, axis: Axis=0, level: Level | list[Level] | None=None, names: bool=False) -> Styler:\n    axis = self.data._get_axis_number(axis)\n    if axis == 0:\n        obj, objs, alt = ('index', 'index', 'rows')\n    else:\n        obj, objs, alt = ('column', 'columns', 'columns')\n    if level is not None and subset is not None:\n        raise ValueError('`subset` and `level` cannot be passed simultaneously')\n    if subset is None:\n        if level is None and names:\n            setattr(self, f'hide_{obj}_names', True)\n            return self\n        levels_ = refactor_levels(level, getattr(self, objs))\n        setattr(self, f'hide_{objs}_', [lev in levels_ for lev in range(getattr(self, objs).nlevels)])\n    else:\n        if axis == 0:\n            subset_ = IndexSlice[subset, :]\n        else:\n            subset_ = IndexSlice[:, subset]\n        subset = non_reducing_slice(subset_)\n        hide = self.data.loc[subset]\n        h_els = getattr(self, objs).get_indexer_for(getattr(hide, objs))\n        setattr(self, f'hidden_{alt}', h_els)\n    if names:\n        setattr(self, f'hide_{obj}_names', True)\n    return self",
    "docstring": "Hide the entire index / column headers, or specific rows / columns from display. .. versionadded:: 1.4.0 Parameters ---------- subset : label, array-like, IndexSlice, optional A valid 1d input or single key along the axis within or depending upon ``.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\style.py",
    "ast_data": "FunctionDef name:hide arg:self arg:subset arg:axis arg:level arg:names arguments arg arg arg arg arg Assign Call If Compare Assign Assign If BoolOp Compare Compare Raise Call If Compare If BoolOp Compare Call Return return:yes Assign Call Call Call Compare Call Call If Compare Assign Assign Assign Call Assign Assign Call Call Call Call If Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_concat_ragged_splits",
    "source_code": "def _concat_ragged_splits(splits_list):\n    pieces = [splits_list[0]]\n    splits_offset = splits_list[0][-1]\n    for splits in splits_list[1:]:\n        pieces.append(splits[1:] + splits_offset)\n        splits_offset += splits[-1]\n    return array_ops.concat(pieces, axis=0)",
    "docstring": "Concatenates a list of RaggedTensor splits to form a single splits.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_concat_ops.py",
    "ast_data": "FunctionDef name:_concat_ragged_splits arg:splits_list arguments arg Assign Assign For Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "Message",
    "source_code": "class Message:\n    __slots__ = ('text', 'locations', 'uuids')\n    text: str\n    locations: list[tuple[str, int]]\n    uuids: list[str]\n\n    def __init__(self, text: str, locations: list[tuple[str, int]], uuids: list[str]) -> None:\n        self.text = text\n        self.locations = locations\n        self.uuids = uuids\n\n    def __repr__(self) -> str:\n        return f'Message(text={self.text!r}, locations={self.locations!r}, uuids={self.uuids!r})'",
    "docstring": "An entry of translatable message.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\builders\\gettext.py",
    "ast_data": "ClassDef name:Message Assign FunctionDef name:__init__ arg:self arg:text arg:locations arg:uuids arguments arg arg arg arg Assign Assign Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "make_dataset_iterator",
    "source_code": "def make_dataset_iterator(self, dataset):\n    return self._extended._make_dataset_iterator(dataset)",
    "docstring": "Makes an iterator for input provided via . DEPRECATED: This method is not available in TF 2.x. Data from the given dataset will be distributed evenly across all the compute replicas. We will assume that the input dataset is batched by the global batch size. With this assumption, we will make a best effort to divide each batch across all the replicas (one or more workers). If this effort fails, an error will be thrown, and the user should instead use which provides more control to the user, and does not try to divide a batch across replicas. The user could also use if they want to customize which input is fed to which replica/worker etc. Args: dataset: that will be distributed evenly across all replicas. Returns: An which returns inputs for each step of the computation. User should call on the returned iterator.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:make_dataset_iterator arg:self arg:dataset arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "check_not_null",
    "source_code": "def check_not_null(self, node: IRNode) -> str:\n    if node is None:\n        return ''\n    size_str = self.size(node, 0, -1)\n    name_str = self.arg_name(node)\n    if name_str is None:\n        return ''\n    res = IndentedBuffer(initial_indent=2)\n    res.tabwidth = 1\n    res.splice(f'\\n            {{\\n              if (!{name_str}) {{\\n                int64_t {name_str}_size = {size_str};\\n                if ({name_str}_size > 0) {{\\n                  throw std::runtime_error(\"input {name_str} is null but size is not 0!\");\\n                }}\\n              }}\\n            }}\\n            ')\n    return res.getvalue()",
    "docstring": "Generates code to check that a node is not null.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\cuda_kernel.py",
    "ast_data": "FunctionDef name:check_not_null arg:self arg:node arguments arg arg If Compare Return return:yes Assign Call Assign Call If Compare Return return:yes Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "log_combinations",
    "source_code": "def log_combinations(n, counts, name='log_combinations'):\n    with ops.name_scope(name, values=[n, counts]):\n        n = ops.convert_to_tensor(n, name='n')\n        counts = ops.convert_to_tensor(counts, name='counts')\n        total_permutations = math_ops.lgamma(n + 1)\n        counts_factorial = math_ops.lgamma(counts + 1)\n        redundant_permutations = math_ops.reduce_sum(counts_factorial, axis=[-1])\n        return total_permutations - redundant_permutations",
    "docstring": "Multinomial coefficient. Given and , where has last dimension , we compute the multinomial coefficient as: where runs over all classes. Args: n: Floating-point broadcastable with . This represents outcomes. counts: Floating-point broadcastable with . This represents counts in classes, where is the last dimension of the tensor. name: A name for this operation (optional). Returns: representing the multinomial coefficient between and .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\util.py",
    "ast_data": "FunctionDef name:log_combinations arg:n arg:counts arg:name arguments arg arg arg With Call Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_handle_tpu_embedding",
    "source_code": "def _handle_tpu_embedding(self, tpu_embedding):\n    if not hasattr(type(tpu_embedding), _TPU_EMBEDDING_ATTR) or not callable(tpu_embedding._create_copy_for_async_checkpoint):\n        raise AttributeError('Expecting TPUEmbedding type; got %s' % type(tpu_embedding))\n    new_embedding = tpu_embedding._create_copy_for_async_checkpoint(feature_config=tpu_embedding._feature_config, optimizer=tpu_embedding._table_config[0] if tpu_embedding._table_config else None, pipeline_execution_with_tensor_core=tpu_embedding._pipeline_execution_with_tensor_core)\n    self._object_map[tpu_embedding] = new_embedding\n    if tpu_embedding not in self._tpu_embedding_objects:\n        self._tpu_embedding_objects.append(tpu_embedding)",
    "docstring": "Handle TPUEmbedding. This is the only place where we populate object map in the class of . For all other checkpointable trackables, we populate object map using the trackable's own . Args: tpu_embedding: TPUEmbedding object to be handled. Raises: AttributeError: if the input trackable is not TPUEmbedding type.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\async_checkpoint_helper.py",
    "ast_data": "FunctionDef name:_handle_tpu_embedding arg:self arg:tpu_embedding arguments arg arg If BoolOp Call Call Call Raise Call Call Assign Call Assign If Compare Call"
  },
  {
    "library": "django",
    "name": "no_translations",
    "source_code": "def no_translations(handle_func):\n\n    def wrapper(*args, **kwargs):\n        from django.utils import translation\n        saved_locale = translation.get_language()\n        translation.deactivate_all()\n        try:\n            res = handle_func(*args, **kwargs)\n        finally:\n            if saved_locale is not None:\n                translation.activate(saved_locale)\n        return res\n    return wrapper",
    "docstring": "Decorator that forces a command to run with translations deactivated.",
    "type": "function",
    "file_path": "django\\django\\core\\management\\base.py",
    "ast_data": "FunctionDef name:no_translations arg:handle_func arguments arg FunctionDef name:wrapper arguments arg arg Assign Call Call Try Assign Call If Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_pruned_guards",
    "source_code": "def get_pruned_guards(self, symints: Sequence[torch.SymInt]) -> list[ShapeGuard]:\n    symints = {s.node.expr for s in symints if isinstance(s.node.expr, sympy.Symbol)}\n    guards = [g for g in self.guards if all((s in symints for s in g.expr.free_symbols))]\n    return guards",
    "docstring": "Get a list of guards, but pruned so it only provides guards that reference symints from the passed in input",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:get_pruned_guards arg:self arg:symints arguments arg arg Assign Call Assign Call Compare Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_get_tick_boxes_siblings",
    "source_code": "def _get_tick_boxes_siblings(self, renderer):\n    name = self._get_axis_name()\n    if name not in self.get_figure(root=False)._align_label_groups:\n        return ([], [])\n    grouper = self.get_figure(root=False)._align_label_groups[name]\n    bboxes = []\n    bboxes2 = []\n    for ax in grouper.get_siblings(self.axes):\n        axis = ax._axis_map[name]\n        ticks_to_draw = axis._update_ticks()\n        tlb, tlb2 = axis._get_ticklabel_bboxes(ticks_to_draw, renderer)\n        bboxes.extend(tlb)\n        bboxes2.extend(tlb2)\n    return (bboxes, bboxes2)",
    "docstring": "Get the bounding boxes for this and its siblings as set by or . By default, it just gets bboxes for *self*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:_get_tick_boxes_siblings arg:self arg:renderer arguments arg arg Assign Call If Compare Call Return return:no Assign Call Assign Assign For Call Assign Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "WhileBody",
    "source_code": "@function.Defun(*body_sig, func_name=body_name)\ndef WhileBody(i, n, start, delta, *args):\n    for_result = forbody(start + i * delta, *args)\n    if isinstance(for_result, ops.Operation):\n        for_result = ()\n    elif isinstance(for_result, tensor.Tensor):\n        for_result = (for_result,)\n    return (i + 1, n, start, delta) + tuple(for_result)",
    "docstring": "A While wrapper for forbody that handles loop-carried captured inputs.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\functional_ops.py",
    "ast_data": "FunctionDef name:WhileBody arg:i arg:n arg:start arg:delta arguments arg arg arg arg arg Assign Call If Call Assign If Call Assign Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "too_close",
    "source_code": "def too_close(self, x, y, lw):\n    thresh = (1.2 * lw) ** 2\n    return any(((x - loc[0]) ** 2 + (y - loc[1]) ** 2 < thresh for loc in self.labelXYs))",
    "docstring": "Return whether a label is already near this location.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\contour.py",
    "ast_data": "FunctionDef name:too_close arg:self arg:x arg:y arg:lw arguments arg arg arg arg Assign Return return:yes Call Compare"
  },
  {
    "library": "django",
    "name": "__str__",
    "source_code": "def __str__(self):\n    if self.field.show_hidden_initial:\n        return self.as_widget() + self.as_hidden(only_initial=True)\n    return self.as_widget()",
    "docstring": "Render this field as an HTML widget.",
    "type": "method",
    "file_path": "django\\django\\forms\\utils.py",
    "ast_data": "FunctionDef name:__str__ arg:self arguments arg If Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_set_wrap_both",
    "source_code": "def _set_wrap_both(padded, axis, width_pair, original_period):\n    left_pad, right_pad = width_pair\n    period = padded.shape[axis] - right_pad - left_pad\n    period = period // original_period * original_period\n    new_left_pad = 0\n    new_right_pad = 0\n    if left_pad > 0:\n        slice_end = left_pad + period\n        slice_start = slice_end - min(period, left_pad)\n        right_slice = _slice_at_axis(slice(slice_start, slice_end), axis)\n        right_chunk = padded[right_slice]\n        if left_pad > period:\n            pad_area = _slice_at_axis(slice(left_pad - period, left_pad), axis)\n            new_left_pad = left_pad - period\n        else:\n            pad_area = _slice_at_axis(slice(None, left_pad), axis)\n        padded[pad_area] = right_chunk\n    if right_pad > 0:\n        slice_start = -right_pad - period\n        slice_end = slice_start + min(period, right_pad)\n        left_slice = _slice_at_axis(slice(slice_start, slice_end), axis)\n        left_chunk = padded[left_slice]\n        if right_pad > period:\n            pad_area = _slice_at_axis(slice(-right_pad, -right_pad + period), axis)\n            new_right_pad = right_pad - period\n        else:\n            pad_area = _slice_at_axis(slice(-right_pad, None), axis)\n        padded[pad_area] = left_chunk\n    return (new_left_pad, new_right_pad)",
    "docstring": "Pad of with wrapped values. Parameters ---------- padded : ndarray Input array of arbitrary shape. axis : int Axis along which to pad . width_pair : (int, int) Pair of widths that mark the pad area on both sides in the given dimension. original_period : int Original length of data on of . Returns ------- pad_amt : tuple of ints, length 2 New index positions of padding to do along the . If these are both 0, padding is done in this dimension.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_arraypad_impl.py",
    "ast_data": "FunctionDef name:_set_wrap_both arg:padded arg:axis arg:width_pair arg:original_period arguments arg arg arg arg Assign Assign Assign Assign Assign If Compare Assign Assign Call Assign Call Call Assign If Compare Assign Call Call Assign Assign Call Call Assign If Compare Assign Assign Call Assign Call Call Assign If Compare Assign Call Call Assign Assign Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_graph_execution_trace_from_debug_event_proto",
    "source_code": "def _graph_execution_trace_from_debug_event_proto(self, debug_event, locator):\n    trace_proto = debug_event.graph_execution_trace\n    graph_ids = [trace_proto.tfdbg_context_id]\n    while True:\n        graph = self.graph_by_id(graph_ids[0])\n        if graph.outer_graph_id:\n            graph_ids.insert(0, graph.outer_graph_id)\n        else:\n            break\n    if trace_proto.tensor_debug_mode == debug_event_pb2.TensorDebugMode.FULL_TENSOR:\n        debug_tensor_value = None\n    else:\n        debug_tensor_value = _parse_tensor_value(trace_proto.tensor_proto, return_list=True)\n    return GraphExecutionTrace(self._graph_execution_trace_digest_from_debug_event_proto(debug_event, locator), graph_ids=graph_ids, tensor_debug_mode=trace_proto.tensor_debug_mode, debug_tensor_value=debug_tensor_value, device_name=trace_proto.device_name or None)",
    "docstring": "Convert a DebugEvent proto into a GraphExecutionTrace data object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py",
    "ast_data": "FunctionDef name:_graph_execution_trace_from_debug_event_proto arg:self arg:debug_event arg:locator arguments arg arg arg Assign Assign While Assign Call If Call If Compare Assign Assign Call Return return:yes Call Call BoolOp"
  },
  {
    "library": "scipy",
    "name": "convex_hull_plot_2d",
    "source_code": "@_held_figure\ndef convex_hull_plot_2d(hull, ax=None):\n    from matplotlib.collections import LineCollection\n    if hull.points.shape[1] != 2:\n        raise ValueError('Convex hull is not 2-D')\n    ax.plot(hull.points[:, 0], hull.points[:, 1], 'o')\n    line_segments = [hull.points[simplex] for simplex in hull.simplices]\n    ax.add_collection(LineCollection(line_segments, colors='k', linestyle='solid'))\n    _adjust_bounds(ax, hull.points)\n    return ax.figure",
    "docstring": "Plot the given convex hull diagram in 2-D Parameters ---------- hull : scipy.spatial.ConvexHull instance Convex hull to plot ax : matplotlib.axes.Axes instance, optional Axes to plot on Returns ------- fig : matplotlib.figure.Figure instance Figure for the plot See Also -------- ConvexHull Notes ----- Requires Matplotlib. Examples -------- >>> import numpy as np >>> import matplotlib.pyplot as plt >>> from scipy.spatial import ConvexHull, convex_hull_plot_2d The convex hull of a random set of points: >>> rng = np.random.default_rng() >>> points = rng.random((30, 2)) >>> hull = ConvexHull(points) Plot it: >>> _ = convex_hull_plot_2d(hull) >>> plt.show()",
    "type": "function",
    "file_path": "scipy\\scipy\\spatial\\_plotutils.py",
    "ast_data": "FunctionDef name:convex_hull_plot_2d arg:hull arg:ax arguments arg arg If Compare Raise Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "cc_normalize_flags",
    "source_code": "def cc_normalize_flags(self, flags):\n    assert isinstance(flags, list)\n    if self.cc_is_gcc or self.cc_is_clang or self.cc_is_icc:\n        return self._cc_normalize_unix(flags)\n    if self.cc_is_msvc or self.cc_is_iccw:\n        return self._cc_normalize_win(flags)\n    return flags",
    "docstring": "Remove the conflicts that caused due gathering implied features flags. Parameters ---------- 'flags' list, compiler flags flags should be sorted from the lowest to the highest interest. Returns ------- list, filtered from any conflicts. Examples -------- >>> self.cc_normalize_flags(['-march=armv8.2-a+fp16', '-march=armv8.2-a+dotprod']) ['armv8.2-a+fp16+dotprod'] >>> self.cc_normalize_flags( ['-msse', '-msse2', '-msse3', '-mssse3', '-msse4.1', '-msse4.2', '-mavx', '-march=core-avx2'] ) ['-march=core-avx2']",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py",
    "ast_data": "FunctionDef name:cc_normalize_flags arg:self arg:flags arguments arg arg Call If BoolOp Return return:yes Call If BoolOp Return return:yes Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "def forward(self, x: Tensor) -> Tensor:\n    shape_im = x.shape\n    feats: List[Tensor] = [self.feature_extractor(x)]\n    for _ in range(1, self.num_levels):\n        x = pyrdown(x, factor=1.2)\n        feats_i = self.feature_extractor(x)\n        feats_i = F.interpolate(feats_i, size=(shape_im[2], shape_im[3]), mode='bilinear')\n        feats.append(feats_i)\n    scores = self.last_conv(concatenate(feats, 1))\n    return scores",
    "docstring": "X - input image.",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\keynet.py",
    "ast_data": "FunctionDef name:forward arg:self arg:x arguments arg arg Assign Call For Call Assign Call Assign Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "tell",
    "source_code": "def tell(self):\n    if self._read_check_passed:\n        self._preread_check()\n        return self._read_buf.tell()\n    else:\n        self._prewrite_check()\n        return self._writable_file.tell()",
    "docstring": "Returns the current position in the file.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py",
    "ast_data": "FunctionDef name:tell arg:self arguments arg If Call Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_are_inputs_layout_compatible",
    "source_code": "def _are_inputs_layout_compatible(self, layouts: list[Layout]) -> bool:\n    assert len(layouts) == 2 or len(layouts) == 3\n    A_layout, B_layout = layouts[:2]\n    if len(A_layout.size) != 2:\n        return False\n    if len(B_layout.size) != 2:\n        return False\n    A_size = [int(i) for i in A_layout.size]\n    B_size = [int(i) for i in B_layout.size]\n    K = max(A_size[1], B_size[0])\n    return (K == A_size[1] or K == 2 * A_size[1]) and K == B_size[0]",
    "docstring": "Evaluates whether input layouts are compatible for set of operations supported by this class. Args: layouts (List[Layout]): List containing Layout objects representing the input matrices. Returns: bool: True if layouts are GEMM compatible, otherwise False.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\gemm_template.py",
    "ast_data": "FunctionDef name:_are_inputs_layout_compatible arg:self arg:layouts arguments arg arg BoolOp Compare Call Compare Call Assign If Compare Call Return return:yes If Compare Call Return return:yes Assign Call Assign Call Assign Call Return return:yes BoolOp BoolOp Compare Compare Compare"
  },
  {
    "library": "pytorch",
    "name": "_annotate_conv_transpose_bn_relu",
    "source_code": "@register_annotator('conv_transpose_bn_relu')\ndef _annotate_conv_transpose_bn_relu(gm: torch.fx.GraphModule, quantization_config: Optional[QuantizationConfig], filter_fn: Optional[Callable[[Node], bool]]=None) -> Optional[list[list[Node]]]:\n    return _do_annotate_conv_bn(gm, quantization_config, filter_fn, has_relu=True, is_conv_transpose=True)",
    "docstring": "Find conv_transpose + batchnorm + relu parititions Note: This is only used for QAT. In PTQ, batchnorm should already be fused into the conv.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\xnnpack_quantizer_utils.py",
    "ast_data": "FunctionDef name:_annotate_conv_transpose_bn_relu arg:gm arg:quantization_config arg:filter_fn arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "ones_like",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef ones_like(x, dtype=None, name=None):\n    return array_ops.ones_like(x, dtype=dtype, name=name)",
    "docstring": "Instantiates an all-ones variable of the same shape as another tensor. Args: x: Keras variable or tensor. dtype: String, dtype of returned Keras variable. None uses the dtype of x. name: String, name for the variable to create. Returns: A Keras variable with the shape of x filled with ones. Example: >>> kvar = tf.keras.backend.variable(np.random.random((2,3))) >>> kvar_ones = tf.keras.backend.ones_like(kvar) >>> tf.keras.backend.eval(kvar_ones) array([[1., 1., 1.], [1., 1., 1.]], dtype=float32)",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:ones_like arg:x arg:dtype arg:name arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_unblockify_then_matricize",
    "source_code": "def _unblockify_then_matricize(self, vec):\n    vec_flat = self._unblockify(vec)\n    matrix = distribution_util.rotate_transpose(vec_flat, shift=-1)\n    return matrix",
    "docstring": "Flatten the block dimensions then reshape to a batch matrix.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_circulant.py",
    "ast_data": "FunctionDef name:_unblockify_then_matricize arg:self arg:vec arguments arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "next_index",
    "source_code": "def next_index(self):\n    return len(self._string_table)",
    "docstring": "Gets index that would be assigned to the next added string. Returns: Index of the next string if it was added.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\pprof_profiler.py",
    "ast_data": "FunctionDef name:next_index arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "pop",
    "source_code": "@trackable.no_automatic_dependency_tracking\ndef pop(self):\n    if not self.layers:\n        raise TypeError('There are no layers in the model.')\n    layer = self._self_tracked_trackables.pop()\n    self._layer_call_argspecs.pop(layer)\n    if not self.layers:\n        self.outputs = None\n        self.inputs = None\n        self.built = False\n        self._inferred_input_shape = None\n        self._has_explicit_input_shape = False\n        self._graph_initialized = False\n    elif self._graph_initialized:\n        self.layers[-1]._outbound_nodes = []\n        self.outputs = [self.layers[-1].output]\n        self._init_graph_network(self.inputs, self.outputs)\n        self.built = True",
    "docstring": "Removes the last layer in the model. Raises: TypeError: if there are no layers in the model.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\sequential.py",
    "ast_data": "FunctionDef name:pop arg:self arguments arg If Raise Call Assign Call Call If Assign Assign Assign Assign Assign Assign If Assign Assign Call Assign"
  },
  {
    "library": "pytorch",
    "name": "_remove_previous_dequantize_in_custom_module",
    "source_code": "def _remove_previous_dequantize_in_custom_module(node: Node, prev_node: Node, graph: Graph) -> None:\n    assert isinstance(prev_node, Node), f'Expecting the argument for custom module node to be a Node, but got {prev_node}'\n    if prev_node.op == 'call_method' and prev_node.target == 'dequantize':\n        node.replace_input_with(prev_node, prev_node.args[0])\n        if len(prev_node.users) == 0:\n            graph.erase_node(prev_node)",
    "docstring": "Given a custom module , if the previous node is a dequantize, reroute the custom as follows: Before: quantize - dequantize - custom_module After: quantize - custom_module \\ - dequantize",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\convert.py",
    "ast_data": "FunctionDef name:_remove_previous_dequantize_in_custom_module arg:node arg:prev_node arg:graph arguments arg arg arg Call If BoolOp Compare Compare Call If Compare Call Call"
  },
  {
    "library": "tensorflow",
    "name": "assert_scalar_v2",
    "source_code": "@tf_export('debugging.assert_scalar', v1=[])\n@dispatch.add_dispatch_support\ndef assert_scalar_v2(tensor, message=None, name=None):\n    assert_scalar(tensor=tensor, message=message, name=name)",
    "docstring": "Asserts that the given is a scalar. This function raises unless it can be certain that the given is a scalar. is also raised if the shape of is unknown. This is always checked statically, so this method returns nothing. Args: tensor: A . message: A string to prefix to the default message. name: A name for this operation. Defaults to \"assert_scalar\" Raises: ValueError: If the tensor is not scalar (rank 0), or if its shape is unknown.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\check_ops.py",
    "ast_data": "FunctionDef name:assert_scalar_v2 arg:tensor arg:message arg:name arguments arg arg arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "run",
    "source_code": "@abc.abstractmethod\ndef run(self, role: str=DEFAULT_ROLE) -> RunResult:\n    raise NotImplementedError",
    "docstring": "Run the agent. Supports retrying the worker group on failures up to ``. Returns: The result of the execution, containing the return values or failure details for each worker mapped by the worker's global rank. Raises: Exception - any other failures NOT related to worker process",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\agent\\server\\api.py",
    "ast_data": "FunctionDef name:run arg:self arg:role arguments arg arg Raise"
  },
  {
    "library": "pytorch",
    "name": "export_memory_timeline_raw",
    "source_code": "def export_memory_timeline_raw(self, path, device_str) -> None:\n    device = torch.device(device_str)\n    raw_events: list[tuple[int, int, int, int]] = []\n\n    def get_category_index(key, version):\n        category = self.categories.get(key, version) if isinstance(key, TensorKey) else None\n        return _CATEGORY_TO_INDEX[category]\n    for t, action, (key, version), numbytes in self.timeline:\n        if key.device != device:\n            continue\n        if action in (Action.PREEXISTING, Action.CREATE):\n            raw_events.append((t, _ACTION_TO_INDEX[action], numbytes, get_category_index(key, version)))\n        elif action == Action.INCREMENT_VERSION:\n            raw_events.append((t, _ACTION_TO_INDEX[action], -numbytes, get_category_index(key, version)))\n            raw_events.append((t, _ACTION_TO_INDEX[action], numbytes, get_category_index(key, version + 1)))\n        elif action == Action.DESTROY:\n            raw_events.append((t, _ACTION_TO_INDEX[action], -numbytes, get_category_index(key, version)))\n        else:\n            raise ValueError(f'Unknown action: {action}')\n    import json\n    with open(path, 'w') as f:\n        json.dump(raw_events, f)",
    "docstring": "Saves the memory timeline as raw memory event tuples in the form of (timestamp, action, numbytes, category) as a JSON formatted file to the given path for the given device.",
    "type": "method",
    "file_path": "pytorch\\torch\\profiler\\_memory_profiler.py",
    "ast_data": "FunctionDef name:export_memory_timeline_raw arg:self arg:path arg:device_str arguments arg arg arg Assign Call FunctionDef name:get_category_index arg:key arg:version arguments arg arg Assign Call Call Return return:yes For If Compare If Compare Call Call If Compare Call Call Call Call If Compare Call Call Raise Call With Call Call"
  },
  {
    "library": "seaborn",
    "name": "_map_bivariate",
    "source_code": "def _map_bivariate(self, func, indices, **kwargs):\n    from .distributions import histplot, kdeplot\n    if func is histplot or func is kdeplot:\n        self._extract_legend_handles = True\n    kws = kwargs.copy()\n    for i, j in indices:\n        x_var = self.x_vars[j]\n        y_var = self.y_vars[i]\n        ax = self.axes[i, j]\n        if ax is None:\n            continue\n        self._plot_bivariate(x_var, y_var, ax, func, **kws)\n    self._add_axis_labels()\n    if 'hue' in signature(func).parameters:\n        self.hue_names = list(self._legend_data)",
    "docstring": "Draw a bivariate plot on the indicated axes.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\axisgrid.py",
    "ast_data": "FunctionDef name:_map_bivariate arg:self arg:func arg:indices arguments arg arg arg arg If BoolOp Compare Compare Assign Assign Call For Assign Assign Assign If Compare Call Call If Compare Call Assign Call"
  },
  {
    "library": "django",
    "name": "_last_modification",
    "source_code": "def _last_modification(self):\n    modification = os.stat(self._key_to_file()).st_mtime\n    tz = datetime.UTC if settings.USE_TZ else None\n    return datetime.datetime.fromtimestamp(modification, tz=tz)",
    "docstring": "Return the modification time of the file storing the session's content.",
    "type": "method",
    "file_path": "django\\django\\contrib\\sessions\\backends\\file.py",
    "ast_data": "FunctionDef name:_last_modification arg:self arguments arg Assign Call Call Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_wsgi_application",
    "source_code": "def get_wsgi_application():\n    django.setup(set_prefix=False)\n    return WSGIHandler()",
    "docstring": "The public interface to Django's WSGI support. Return a WSGI callable. Avoids making django.core.handlers.WSGIHandler a public API, in case the internal WSGI implementation changes or moves in the future.",
    "type": "function",
    "file_path": "django\\django\\core\\wsgi.py",
    "ast_data": "FunctionDef name:get_wsgi_application arguments Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "print_all_configs",
    "source_code": "def print_all_configs(configs, missing, warning):\n    print_text = ''\n    llen = 65\n    for i, row in enumerate(configs):\n        if i != 0:\n            print_text += '-' * llen + '\\n'\n        if isinstance(row[1], list):\n            val = ', '.join(row[1])\n        else:\n            val = row[1]\n        print_text += ' {: <28}'.format(row[0]) + '    {: <25}'.format(val) + '\\n'\n    print_text += '=' * llen\n    print('\\n\\n {: ^32}    {: ^25}'.format('Configuration(s)', 'Detected value(s)'))\n    print('=' * llen)\n    print(print_text)\n    if missing:\n        print('\\n * ERROR: The following configurations are missing:')\n        for m in missing:\n            print('   ', *m)\n    if warning:\n        print('\\n * WARNING: The following configurations could cause issues:')\n        for w in warning:\n            print('   ', *w)\n    if not missing and (not warning):\n        print('\\n * INFO: Successfully found all configurations.')\n    print('\\n')",
    "docstring": "Prints the status and info on all configurations in a table format. Args: configs: List of all configurations found. missing: List of all configurations that are missing. warning: List of all configurations found with warnings.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\tensorflow_builder\\config_detector\\config_detector.py",
    "ast_data": "FunctionDef name:print_all_configs arg:configs arg:missing arg:warning arguments arg arg arg Assign Assign For Call If Compare If Call Assign Call Assign Call Call Call Call Call Call If Call For Call If Call For Call If BoolOp Call Call"
  },
  {
    "library": "pytorch",
    "name": "init",
    "source_code": "def init():\n    _lazy_init()",
    "docstring": "Initialize PyTorch's XPU state. This is a Python API about lazy initialization that avoids initializing XPU until the first time it is accessed. Does nothing if the XPU state is already initialized.",
    "type": "function",
    "file_path": "pytorch\\torch\\xpu\\__init__.py",
    "ast_data": "FunctionDef name:init arguments Call"
  },
  {
    "library": "django",
    "name": "_quote_params_for_last_executed_query",
    "source_code": "def _quote_params_for_last_executed_query(self, params):\n    BATCH_SIZE = 999\n    if len(params) > BATCH_SIZE:\n        results = ()\n        for index in range(0, len(params), BATCH_SIZE):\n            chunk = params[index:index + BATCH_SIZE]\n            results += self._quote_params_for_last_executed_query(chunk)\n        return results\n    sql = 'SELECT ' + ', '.join(['QUOTE(?)'] * len(params))\n    cursor = self.connection.connection.cursor()\n    try:\n        return cursor.execute(sql, params).fetchone()\n    finally:\n        cursor.close()",
    "docstring": "Only for last_executed_query! Don't use this to execute SQL queries!",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\sqlite3\\operations.py",
    "ast_data": "FunctionDef name:_quote_params_for_last_executed_query arg:self arg:params arguments arg arg Assign If Compare Call Assign For Call Call Assign Call Return return:yes Assign Call Call Assign Call Try Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "TensorBoardVersionSelector",
    "source_code": "class TensorBoardVersionSelector(object):\n\n    def __new__(cls, *args, **kwargs):\n        use_v2 = should_use_v2()\n        start_cls = cls\n        cls = swap_class(start_cls, callbacks.TensorBoard, callbacks_v1.TensorBoard, use_v2)\n        if start_cls == callbacks_v1.TensorBoard and cls == callbacks.TensorBoard:\n            return cls(*args, **kwargs)\n        return super(TensorBoardVersionSelector, cls).__new__(cls)",
    "docstring": "Chooses between Keras v1 and v2 TensorBoard callback class.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\version_utils.py",
    "ast_data": "ClassDef name:TensorBoardVersionSelector FunctionDef name:__new__ arg:cls arguments arg arg arg Assign Call Assign Assign Call If BoolOp Compare Compare Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "saved_tensors",
    "source_code": "@property\ndef saved_tensors(self):\n    flat_tensors = super().saved_tensors\n    return _unflatten(flat_tensors, self._to_save_nested)",
    "docstring": "See :meth:.",
    "type": "method",
    "file_path": "pytorch\\torch\\autograd\\function.py",
    "ast_data": "FunctionDef name:saved_tensors arg:self arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "chunk_bytes",
    "source_code": "@classmethod\ndef chunk_bytes(cls, data):\n    position = 0\n    if not data:\n        yield (data, True)\n        return\n    while position < len(data):\n        yield (data[position:position + cls.chunk_size], position + cls.chunk_size >= len(data))\n        position += cls.chunk_size",
    "docstring": "Chunks some data up so it can be sent in reasonable size messages. Yields (chunk, last_chunk) tuples.",
    "type": "method",
    "file_path": "django\\django\\core\\handlers\\asgi.py",
    "ast_data": "FunctionDef name:chunk_bytes arg:cls arg:data arguments arg arg Assign If Return return:no While Compare Call Compare Call"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self) -> None:\n    devices = self.get_device_list()\n    autotuning_log.debug('Sub-process autotune device list: %s', devices)\n    self.processes = [TuningProcess(device=device) for device in devices]\n    self.process_queue: queue.Queue[TuningProcess] = queue.Queue()\n    for p in self.processes:\n        self.process_queue.put(p)\n    self.executor = ThreadPoolExecutor(max_workers=len(devices))",
    "docstring": "Start the child processes.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\autotune_process.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg Assign Call Call Assign Call Call For Call Assign Call Call"
  },
  {
    "library": "django",
    "name": "ReverseGenericManyToOneDescriptor",
    "source_code": "class ReverseGenericManyToOneDescriptor(ReverseManyToOneDescriptor):\n\n    @cached_property\n    def related_manager_cls(self):\n        return create_generic_related_manager(self.rel.model._default_manager.__class__, self.rel)",
    "docstring": "Accessor to the related objects manager on the one-to-many relation created by GenericRelation. In the example:: class Post(Model): comments = GenericRelation(Comment) `` is a ReverseGenericManyToOneDescriptor instance.",
    "type": "class",
    "file_path": "django\\django\\contrib\\contenttypes\\fields.py",
    "ast_data": "ClassDef name:ReverseGenericManyToOneDescriptor FunctionDef name:related_manager_cls arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_ymargin",
    "source_code": "def set_ymargin(self, m):\n    if m <= -0.5:\n        raise ValueError('margin must be greater than -0.5')\n    self._ymargin = m\n    self._request_autoscale_view('y')\n    self.stale = True",
    "docstring": "Set padding of Y data limits prior to autoscaling. *m* times the data interval will be added to each end of that interval before it is used in autoscaling. If *m* is negative, this will clip the data range instead of expanding it. For example, if your data is in the range [0, 2], a margin of 0.1 will result in a range [-0.2, 2.2]; a margin of -0.1 will result in a range of [0.2, 1.8]. Parameters ---------- m : float greater than -0.5",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:set_ymargin arg:self arg:m arguments arg arg If Compare Raise Call Assign Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_standardize_batch",
    "source_code": "def _standardize_batch(self, data):\n    x, y, sample_weight = unpack_x_y_sample_weight(data)\n    data = pack_x_y_sample_weight(x, y, sample_weight)\n    data = nest.list_to_tuple(data)\n\n    def _convert_dtype(t):\n        if isinstance(t, np.ndarray) and issubclass(t.dtype.type, np.floating):\n            return np.array(t, dtype=backend.floatx())\n        return t\n    data = nest.map_structure(_convert_dtype, data)\n    return data",
    "docstring": "Standardizes a batch output by a generator.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\data_adapter.py",
    "ast_data": "FunctionDef name:_standardize_batch arg:self arg:data arguments arg arg Assign Call Assign Call Assign Call FunctionDef name:_convert_dtype arg:t arguments arg If BoolOp Call Call Return return:yes Call Call Return return:yes Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_block_lstm_grad",
    "source_code": "def _block_lstm_grad(op, *grads):\n    seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b = op.inputs\n    i, cs, f, o, ci, co, h = op.outputs\n    _, cs_grad, _, _, _, _, h_grad = grads\n    x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad, wco_grad, b_grad = gen_rnn_ops.block_lstm_grad(seq_len_max=seq_len_max, x=x, cs_prev=cs_prev, h_prev=h_prev, w=w, wci=wci, wcf=wcf, wco=wco, b=b, i=i, cs=cs, f=f, o=o, ci=ci, co=co, h=h, cs_grad=cs_grad, h_grad=h_grad, use_peephole=op.get_attr('use_peephole'))\n    return (None, x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad, wco_grad, b_grad)",
    "docstring": "Gradient for the BlockLSTM op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\rnn_grad.py",
    "ast_data": "FunctionDef name:_block_lstm_grad arg:op arguments arg arg Assign Assign Assign Assign Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "column_names",
    "source_code": "@abstractmethod\ndef column_names(self) -> Iterable[str]:\n    pass",
    "docstring": "Return an iterator yielding the column names.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\interchange\\dataframe_protocol.py",
    "ast_data": "FunctionDef name:column_names arg:self arguments arg"
  },
  {
    "library": "pandas",
    "name": "recode_for_groupby",
    "source_code": "def recode_for_groupby(c: Categorical, sort: bool, observed: bool) -> Categorical:\n    if observed:\n        take_codes = unique1d(c.codes[c.codes != -1])\n        if sort:\n            take_codes = np.sort(take_codes)\n        categories = c.categories.take(take_codes)\n        codes = recode_for_categories(c.codes, c.categories, categories)\n        dtype = CategoricalDtype(categories, ordered=c.ordered)\n        return Categorical._simple_new(codes, dtype=dtype)\n    if sort:\n        return c\n    unique_notnan_codes = unique1d(c.codes[c.codes != -1])\n    if sort:\n        unique_notnan_codes = np.sort(unique_notnan_codes)\n    if (num_cat := len(c.categories)) > len(unique_notnan_codes):\n        missing_codes = np.setdiff1d(np.arange(num_cat), unique_notnan_codes, assume_unique=True)\n        take_codes = np.concatenate((unique_notnan_codes, missing_codes))\n    else:\n        take_codes = unique_notnan_codes\n    return Categorical(c, c.categories.take(take_codes))",
    "docstring": "Code the categories to ensure we can groupby for categoricals. If observed=True, we return a new Categorical with the observed categories only. If sort=False, return a copy of self, coded with categories as returned by .unique(), followed by any categories not appearing in the data. If sort=True, return self. This method is needed solely to ensure the categorical index of the GroupBy result has categories in the order of appearance in the data (GH-8868). Parameters ---------- c : Categorical sort : bool The value of the sort parameter groupby was called with. observed : bool Account only for the observed values Returns ------- Categorical If sort=False, the new categories are set to the order of appearance in codes (unless ordered=True, in which case the original order is preserved), followed by any unrepresented categories in the original order.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\groupby\\categorical.py",
    "ast_data": "FunctionDef name:recode_for_groupby arg:c arg:sort arg:observed arguments arg arg arg If Assign Call Compare If Assign Call Assign Call Assign Call Assign Call Return return:yes Call If Return return:yes Assign Call Compare If Assign Call If Compare Call Call Assign Call Call Assign Call Assign Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_norm",
    "source_code": "def _norm(x):\n    return np.sqrt(squared_norm(x))",
    "docstring": "Dot product-based Euclidean norm implementation See:",
    "type": "function",
    "file_path": "scikit-learn\\benchmarks\\bench_plot_nmf.py",
    "ast_data": "FunctionDef name:_norm arg:x arguments arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_hatchcolor",
    "source_code": "def set_hatchcolor(self, color):\n    self._original_hatchcolor = color\n    self._set_hatchcolor(color)",
    "docstring": "Set the patch hatch color. Parameters ---------- color : :mpltype: or 'edge' or None",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:set_hatchcolor arg:self arg:color arguments arg arg Assign Call"
  },
  {
    "library": "django",
    "name": "SameAsLookup",
    "source_code": "@BaseSpatialField.register_lookup\nclass SameAsLookup(GISLookup):\n    lookup_name = 'same_as'",
    "docstring": "The \"~=\" operator is the \"same as\" operator. It tests actual geometric equality of two features. So if A and B are the same feature, vertex-by-vertex, the operator returns true.",
    "type": "class",
    "file_path": "django\\django\\contrib\\gis\\db\\models\\lookups.py",
    "ast_data": "ClassDef name:SameAsLookup Assign"
  },
  {
    "library": "django",
    "name": "process_response",
    "source_code": "def process_response(self, request, response):\n    if hasattr(request, '_messages'):\n        unstored_messages = request._messages.update(response)\n        if unstored_messages and settings.DEBUG:\n            raise ValueError('Not all temporary messages could be stored.')\n    return response",
    "docstring": "Update the storage backend (i.e., save the messages). Raise ValueError if not all messages could be stored and DEBUG is True.",
    "type": "method",
    "file_path": "django\\django\\contrib\\messages\\middleware.py",
    "ast_data": "FunctionDef name:process_response arg:self arg:request arg:response arguments arg arg arg If Call Assign Call If BoolOp Raise Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "get_count",
    "source_code": "def get_count(self):\n    return self.count",
    "docstring": "The number of leaf nodes (original observations) belonging to the cluster node nd. If the target node is a leaf, 1 is returned. Returns ------- get_count : int The number of leaf nodes below the target node.",
    "type": "method",
    "file_path": "scipy\\scipy\\cluster\\hierarchy.py",
    "ast_data": "FunctionDef name:get_count arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "set_shape",
    "source_code": "def set_shape(self, shape):\n    raise NotImplementedError",
    "docstring": "Overrides the shape for this variable. Args: shape: the representing the overridden shape.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:set_shape arg:self arg:shape arguments arg arg Raise"
  },
  {
    "library": "kornia",
    "name": "_get_nms_kernel3d",
    "source_code": "def _get_nms_kernel3d(kd: int, ky: int, kx: int) -> Tensor:\n    numel: int = kd * ky * kx\n    center: int = numel // 2\n    weight = eye(numel)\n    weight[center, center] = 0\n    return weight.view(numel, 1, kd, ky, kx)",
    "docstring": "Return neigh2channels conv kernel.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\subpix\\nms.py",
    "ast_data": "FunctionDef name:_get_nms_kernel3d arg:kd arg:ky arg:kx arguments arg arg arg Assign Call Assign Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "get_mainlanguage_options",
    "source_code": "def get_mainlanguage_options(self) -> str | None:\n    if self.use_polyglossia is False:\n        return None\n    elif self.language == 'german':\n        language = super().language_name(self.language_code)\n        if language == 'ngerman':\n            return 'spelling=new'\n        else:\n            return 'spelling=old'\n    else:\n        return None",
    "docstring": "Return options for polyglossia's ``.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\latex\\util.py",
    "ast_data": "FunctionDef name:get_mainlanguage_options arg:self arguments arg If Compare Return return:no If Compare Assign Call Call If Compare Return return:yes Return return:yes Return return:no"
  },
  {
    "library": "pytorch",
    "name": "is_node_supported",
    "source_code": "def is_node_supported(self, submodules: t.Mapping[str, torch.nn.Module], node: torch.fx.Node) -> bool:\n    if node.op not in CALLABLE_NODE_OPS:\n        return True\n    target = get_node_target(submodules, node)\n    if target not in self._support_dict:\n        return False\n    if self._support_dict[target] is None:\n        return True\n    args_dtypes, kwargs_dtypes = self._support_dict[target]\n    for i, dtypes in enumerate(args_dtypes):\n        if len(node.args) <= i:\n            break\n        if dtypes is None:\n            continue\n        if not isinstance(node.args[i], torch.fx.Node):\n            continue\n        arg_dtype = _get_arg_dtype(node.args[i])\n        if arg_dtype not in dtypes:\n            return False\n    for k, dtypes in kwargs_dtypes.items():\n        if k not in node.kwargs:\n            continue\n        if not isinstance(node.kwargs[k], torch.fx.Node):\n            continue\n        kwarg_dtype = _get_arg_dtype(node.kwargs[k])\n        if kwarg_dtype not in dtypes:\n            return False\n    return True",
    "docstring": "Args: : mapping from module name to the module. This can be retrieved by calling model.named_modules(). : a Fx node that we want to determine whether it's supported. Returns: : whether the arg is supported.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\passes\\operator_support.py",
    "ast_data": "FunctionDef name:is_node_supported arg:self arg:submodules arg:node arguments arg arg arg If Compare Return return:yes Assign Call If Compare Return return:yes If Compare Return return:yes Assign For Call If Compare Call If Compare If Call Assign Call If Compare Return return:yes For Call If Compare If Call Assign Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "kornia",
    "name": "_get_nms_kernel2d",
    "source_code": "def _get_nms_kernel2d(kx: int, ky: int) -> Tensor:\n    numel: int = ky * kx\n    center: int = numel // 2\n    weight = eye(numel)\n    weight[center, center] = 0\n    return weight.view(numel, 1, ky, kx)",
    "docstring": "Return neigh2channels conv kernel.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\subpix\\nms.py",
    "ast_data": "FunctionDef name:_get_nms_kernel2d arg:kx arg:ky arguments arg arg Assign Call Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "RemoveInputMutation",
    "source_code": "class RemoveInputMutation(_pass.Transform):\n\n    def _run(self, *args) -> torch.fx.GraphModule:\n        for node in reversed(self.module.graph.nodes):\n            if node.op == 'call_function' and node.target == torch.ops.aten.copy_.default and (len(node.users) == 0) and isinstance(node.args[0], torch.fx.Node) and (node.args[0].op == 'placeholder'):\n                self.module.graph.erase_node(node)\n        return self.module",
    "docstring": "Remove nodes that mutate module inputs. This pass is recommended to be used after `aten.copy_.default` nodes to the graph when it detects mutations to inputs. These nodes are not needed for ONNX export for inference. They could be useful for training.",
    "type": "class",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\functionalization.py",
    "ast_data": "ClassDef name:RemoveInputMutation FunctionDef name:_run arg:self arguments arg arg For Call If BoolOp Compare Compare Compare Call Call Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, bytes_per_pack=0, timeout_seconds=None, implementation=CommunicationImplementation.AUTO):\n    pass",
    "docstring": "Creates a CollectiveHints. Args: bytes_per_pack: a non-negative integer. Breaks collective operations into packs of certain size. If it's zero, the value is determined automatically. This hint is respected by all multi-replica strategies except . timeout_seconds: a float or None, timeout in seconds. If not None, the collective raises if it takes longer than this timeout. Zero disables timeout. This can be useful when debugging hanging issues. This should only be used for debugging since it creates a new thread for each collective, i.e. an overhead of more threads. This only works for . implementation: a . This is a hint on the preferred communication implementation. Possible values include , , and . NCCL is generally more performant for GPU, but doesn't work for CPU. This only works for . Raises: ValueError: When arguments have invalid value.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\collective_util.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:bytes_per_pack arg:timeout_seconds arg:implementation arguments arg arg arg arg"
  },
  {
    "library": "matplotlib",
    "name": "get_ticks_position",
    "source_code": "def get_ticks_position(self):\n    return {1: 'left', 2: 'right', 'default': 'default', 'unknown': 'unknown'}[self._get_ticks_position()]",
    "docstring": "Return the ticks position (\"left\", \"right\", \"default\", or \"unknown\").",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:get_ticks_position arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_detect_is_causal_mask",
    "source_code": "def _detect_is_causal_mask(mask: Optional[Tensor], is_causal: Optional[bool]=None, size: Optional[int]=None) -> bool:\n    make_causal = is_causal is True\n    if is_causal is None and mask is not None:\n        sz = size if size is not None else mask.size(-2)\n        causal_comparison = _generate_square_subsequent_mask(sz, device=mask.device, dtype=mask.dtype)\n        if mask.size() == causal_comparison.size():\n            make_causal = bool((mask == causal_comparison).all())\n        else:\n            make_causal = False\n    return make_causal",
    "docstring": "Return whether the given attention mask is causal. Warning: If `` if not None, check whether the mask is a causal mask of the provided size Otherwise, checks for any causal mask.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\modules\\transformer.py",
    "ast_data": "FunctionDef name:_detect_is_causal_mask arg:mask arg:is_causal arg:size arguments arg arg arg Assign Compare If BoolOp Compare Compare Assign Compare Call Assign Call If Compare Call Call Assign Call Call Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_ArgTemplateBuilder",
    "source_code": "class _ArgTemplateBuilder(object):\n\n    def __init__(self):\n        self._arg_accumulator = []\n        self._argspec = []\n        self._finalized = False\n\n    def _consume_args(self):\n        if self._arg_accumulator:\n            self._argspec.append(gast.Tuple(elts=self._arg_accumulator, ctx=gast.Load()))\n            self._arg_accumulator = []\n\n    def add_arg(self, a):\n        self._arg_accumulator.append(a)\n\n    def add_stararg(self, a):\n        self._consume_args()\n        self._argspec.append(gast.Call(gast.Name('tuple', ctx=gast.Load(), annotation=None, type_comment=None), args=[a], keywords=()))\n\n    def finalize(self):\n        self._consume_args()\n        self._finalized = True\n\n    def to_ast(self):\n        assert self._finalized\n        if self._argspec:\n            result = self._argspec[0]\n            for i in range(1, len(self._argspec)):\n                result = gast.BinOp(result, gast.Add(), self._argspec[i])\n            return result\n        return gast.Tuple([], gast.Load())",
    "docstring": "Constructs a tuple representing the positional arguments in a call. Example (yes, it's legal Python 3): f(*args1, b, *args2, c, d) -> args1 + (b,) + args2 + (c, d)",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\converters\\call_trees.py",
    "ast_data": "ClassDef name:_ArgTemplateBuilder FunctionDef name:__init__ arg:self arguments arg Assign Assign Assign FunctionDef name:_consume_args arg:self arguments arg If Call Call Call Assign FunctionDef name:add_arg arg:self arg:a arguments arg arg Call FunctionDef name:add_stararg arg:self arg:a arguments arg arg Call Call Call Call Call FunctionDef name:finalize arg:self arguments arg Call Assign FunctionDef name:to_ast arg:self arguments arg If Assign For Call Call Assign Call Call Return return:yes Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "FXGraphExtractor",
    "source_code": "class FXGraphExtractor(abc.ABC):\n\n    def __init__(self) -> None:\n        super().__init__()\n        self.input_adapter: io_adapter.InputAdapter = io_adapter.InputAdapter()\n        self.output_adapter: io_adapter.OutputAdapter = io_adapter.OutputAdapter()\n\n    @abc.abstractmethod\n    def generate_fx(self, options: ResolvedExportOptions, model: torch.nn.Module | Callable, model_args: Sequence[Any], model_kwargs: Mapping[str, Any]) -> torch.fx.GraphModule:\n        ...\n\n    @abc.abstractmethod\n    def pre_export_passes(self, options: ResolvedExportOptions, original_model: torch.nn.Module | Callable, fx_module: torch.fx.GraphModule, fx_module_args: Sequence[Any]):\n        ...",
    "docstring": "Abstract interface for FX graph extractor engines. This class isolates FX extraction logic from the rest of the export logic. That allows a single ONNX exporter that can leverage different FX graphs.",
    "type": "class",
    "file_path": "pytorch\\torch\\onnx\\_internal\\_exporter_legacy.py",
    "ast_data": "ClassDef name:FXGraphExtractor FunctionDef name:__init__ arg:self arguments arg Call Call Call Call FunctionDef name:generate_fx arg:self arg:options arg:model arg:model_args arg:model_kwargs arguments arg arg arg arg arg FunctionDef name:pre_export_passes arg:self arg:options arg:original_model arg:fx_module arg:fx_module_args arguments arg arg arg arg arg"
  },
  {
    "library": "tensorflow",
    "name": "_satisfied_at_timestamp",
    "source_code": "def _satisfied_at_timestamp(self, device_name, pending, timestamp, start_i=0):\n    if not pending:\n        return True\n    for datum in self._dump_tensor_data[device_name][start_i:]:\n        if datum.timestamp > timestamp:\n            break\n        if datum.timestamp == timestamp and (datum.node_name, datum.output_slot) in pending:\n            pending.remove((datum.node_name, datum.output_slot))\n            if not pending:\n                return True\n    return not pending",
    "docstring": "Determine whether pending inputs are satisfied at given timestamp. Note: This method mutates the input argument \"pending\". Args: device_name: (str) device name. pending: A list of 2-tuple (node_name, output_slot): the dependencies to check. timestamp: (int) the timestamp in question. start_i: (int) the index in self._dump_tensor_data to start searching for the timestamp. Returns: (bool) Whether all the dependencies in pending are satisfied at the timestamp. If pending is empty to begin with, return True.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:_satisfied_at_timestamp arg:self arg:device_name arg:pending arg:timestamp arg:start_i arguments arg arg arg arg arg If Return return:yes For If Compare If BoolOp Compare Compare Call If Return return:yes Return return:yes"
  },
  {
    "library": "kornia",
    "name": "fx",
    "source_code": "@property\ndef fx(self) -> Tensor:\n    return self.rectified_left_camera[..., 0, 0]",
    "docstring": "Return the focal length in the x-direction. Note that the focal lengths of the rectified left and right camera are assumed to be equal. Returns: tensor of shape :math:",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\camera\\stereo.py",
    "ast_data": "FunctionDef name:fx arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "slice_block_rows",
    "source_code": "@final\ndef slice_block_rows(self, slicer: slice) -> Self:\n    new_values = self.values[slicer]\n    return type(self)(new_values, self._mgr_locs, ndim=self.ndim, refs=self.refs)",
    "docstring": "Perform __getitem__-like specialized to slicing along index.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\blocks.py",
    "ast_data": "FunctionDef name:slice_block_rows arg:self arg:slicer arguments arg arg Assign Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_cov2wt",
    "source_code": "def _cov2wt(self, cov):\n    from scipy.linalg import inv\n    if len(cov.shape) == 2:\n        return inv(cov)\n    else:\n        weights = np.zeros(cov.shape, float)\n        for i in range(cov.shape[-1]):\n            weights[:, :, i] = inv(cov[:, :, i])\n        return weights",
    "docstring": "Convert covariance matrix(-ices) to weights.",
    "type": "method",
    "file_path": "scipy\\scipy\\odr\\_odrpack.py",
    "ast_data": "FunctionDef name:_cov2wt arg:self arg:cov arguments arg arg If Compare Call Return return:yes Call Assign Call For Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, dtensor_components: Tuple[tensor.Tensor], global_element_spec: tensor_spec.TensorSpec, layouts: Any):\n    [self._iterator_resource_dtensor] = dtensor_components\n    self._global_element_spec = global_element_spec\n    self._layouts = layouts\n    self._layouts_str = nest.map_structure(lambda layout: layout.to_string(), layouts)\n    super().__init__(components=dtensor_components, element_spec=global_element_spec)",
    "docstring": "Initializes a distributed iterator for DTensor datasets. This iterator encapsulates tf.data iterators for the underlying devices, and treats it as a packed DTensor of iterator resource tensors. Args: dtensor_components: a tuple containing the underlying iterator resources packed into a DTensor. This is expected to be a tuple with a single element. global_element_spec: the underlying dataset's element spec from a global view. layouts: a structure of DTensor layouts to be applied to the elements returned by the underlying iterators. This can be a single layout or (possibly nested) tuples or dictionaries of layouts, and the structure must match the structure of the iterator elements.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\input_util.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:dtensor_components arg:global_element_spec arg:layouts arguments arg arg arg arg Assign Assign Assign Assign Call arguments arg Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "reparameterization_type",
    "source_code": "@property\ndef reparameterization_type(self):\n    return self._reparameterization_type",
    "docstring": "Describes how samples from the distribution are reparameterized. Currently this is one of the static instances or . Returns: An instance of .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:reparameterization_type arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_process_python_constants",
    "source_code": "def _process_python_constants(signature: _schemas.OpSignature, named_inputs: dict[str, AllowedArgType], type_binding: Mapping[_schemas.TypeConstraintParam, ir.TypeProtocol], constant_farm: dict[tuple[bool | int | float | str | tuple[int] | tuple[float], ir.DataType], ir.Value], opset: onnxscript.values.Opset) -> dict[str, ir.Value | None]:\n    for name, arg in named_inputs.items():\n        param = signature.params_map[name]\n        assert isinstance(param, _schemas.Parameter), f'Expected Parameter, got {type(param)}'\n        if isinstance(arg, ir.Value):\n            continue\n        if isinstance(arg, Sequence) and len(arg) > 0 and any((isinstance(val, ir.Value) for val in arg)):\n            continue\n        if param.variadic:\n            continue\n        if _allowed_types_are_sequence_types(param.type_constraint.allowed_types):\n            continue\n        dtype = _determine_input_dtype(param, arg, type_binding)\n        if arg is None:\n            constant_value = None\n        elif isinstance(arg, (ir.Tensor, ir.TensorProtocol)):\n            constant_value = opset.Constant(value=arg)\n        else:\n            constant_value = _get_or_create_constant(constant_farm, arg, dtype, opset)\n        named_inputs[param.name] = constant_value\n    return named_inputs",
    "docstring": "Convert Python constants to Constant nodes and list to Sequence nodes based on the dtype information. The added constants will be replacing values in named_inputs in place. Args: signature: The OpSignature for the node. named_inputs: The mapping of parameter names to their arguments. type_binding: A mapping of Constraint names to ir.DataType. constant_farm: A dictionary of {(py_value, ir.DataType): ir.Value} to store the deduplicated constants. opset: The Opset to use for creating Constant nodes. Returns: A mapping of parameter names to Python constants converted to constant Nodes.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_building.py",
    "ast_data": "FunctionDef name:_process_python_constants arg:signature arg:named_inputs arg:type_binding arg:constant_farm arg:opset arguments arg arg arg arg arg For Call Assign Call Call If Call If BoolOp Call Compare Call Call Call If If Call Assign Call If Compare Assign If Call Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "strip_graph_default_valued_attrs",
    "source_code": "def strip_graph_default_valued_attrs(meta_graph_def):\n    op_name_to_function = {}\n    for function_def in meta_graph_def.graph_def.library.function:\n        op_name_to_function[function_def.signature.name] = function_def\n\n    def _strip_node_default_valued_attrs(node_def):\n        if node_def.op in op_name_to_function:\n            return\n        op_def = op_def_registry.get(node_def.op)\n        if op_def is None:\n            return\n        attrs_to_strip = set()\n        for attr_name, attr_value in node_def.attr.items():\n            if _is_default_attr_value(op_def, attr_name, attr_value):\n                attrs_to_strip.add(attr_name)\n        for attr in attrs_to_strip:\n            del node_def.attr[attr]\n    for node_def in meta_graph_def.graph_def.node:\n        _strip_node_default_valued_attrs(node_def)\n    for function_def in meta_graph_def.graph_def.library.function:\n        for function_node_def in function_def.node_def:\n            _strip_node_default_valued_attrs(function_node_def)\n    meta_graph_def.meta_info_def.stripped_default_attrs = True",
    "docstring": "Strips default valued attributes for node defs in given MetaGraphDef. This method also sets in the given proto to True. Args: meta_graph_def: protocol buffer Returns: None.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\meta_graph.py",
    "ast_data": "FunctionDef name:strip_graph_default_valued_attrs arg:meta_graph_def arguments arg Assign For Assign FunctionDef name:_strip_node_default_valued_attrs arg:node_def arguments arg If Compare Return return:no Assign Call If Compare Return return:no Assign Call For Call If Call Call For For Call For For Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "get_registered_schemes",
    "source_code": "@tf_export('io.gfile.get_registered_schemes')\ndef get_registered_schemes():\n    return _pywrap_file_io.GetRegisteredSchemes()",
    "docstring": "Returns the currently registered filesystem schemes. The APIs, in addition to accepting traditional filesystem paths, also accept file URIs that begin with a scheme. For example, the local filesystem path can also be addressed as . In this case, the scheme is , followed by and then the path, according to [URI syntax]( This function returns the currently registered schemes that will be recognized by APIs. This includes both built-in schemes and those registered by other TensorFlow filesystem implementations, for example those provided by [TensorFlow I/O]( The empty string is always included, and represents the \"scheme\" for regular local filesystem paths. Returns: List of string schemes, e.g. , in arbitrary order. Raises: errors.OpError: If the operation fails.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py",
    "ast_data": "FunctionDef name:get_registered_schemes arguments Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "overhead_experiment",
    "source_code": "def overhead_experiment(*args, model_iter_fn):\n    return speedup_experiment(*args, model_iter_fn)",
    "docstring": "Measure overheads of TorchDynamo by running with no backend (only eager+FX), and reporting speedup/slowdown over eager. Writes to ./overheads.csv",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\dynamo\\common.py",
    "ast_data": "FunctionDef name:overhead_experiment arguments arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "ensure_str",
    "source_code": "def ensure_str(value: bytes | Any) -> str:\n    if isinstance(value, bytes):\n        value = value.decode('utf-8')\n    elif not isinstance(value, str):\n        value = str(value)\n    return value",
    "docstring": "Ensure that bytes and non-strings get converted into `` objects.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\common.py",
    "ast_data": "FunctionDef name:ensure_str arg:value arguments arg If Call Assign Call If Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "suppress_guards",
    "source_code": "def suppress_guards(self) -> _GeneratorContextManager[None]:\n    return _suppress_guards(self)",
    "docstring": "Context manager to ignore all guards generated inside",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:suppress_guards arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "extend",
    "source_code": "def extend(self, values):\n    self._check_external_modification()\n    super().extend(values)\n    self._update_snapshot()",
    "docstring": "Add a sequence of trackable values.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\data_structures.py",
    "ast_data": "FunctionDef name:extend arg:self arg:values arguments arg arg Call Call Call Call"
  },
  {
    "library": "django",
    "name": "flatten_fieldsets",
    "source_code": "def flatten_fieldsets(fieldsets):\n    field_names = []\n    for name, opts in fieldsets:\n        field_names.extend(flatten(opts['fields']))\n    return field_names",
    "docstring": "Return a list of field names from an admin fieldsets structure.",
    "type": "function",
    "file_path": "django\\django\\contrib\\admin\\utils.py",
    "ast_data": "FunctionDef name:flatten_fieldsets arg:fieldsets arguments arg Assign For Call Call Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "name",
    "source_code": "@property\n@abc.abstractmethod\ndef name(self) -> str:\n    pass",
    "docstring": "A string naming this mode (e.g. \"AES\", \"Camellia\").",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\_cipheralgorithm.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg"
  },
  {
    "library": "django",
    "name": "_get_single_internal",
    "source_code": "def _get_single_internal(self, index):\n    if index == 0:\n        return capi.get_extring(self.ptr)\n    else:\n        return capi.get_intring(self.ptr, index - 1)",
    "docstring": "Return the ring at the specified index. The first index, 0, will always return the exterior ring. Indices > 0 will return the interior ring at the given index (e.g., poly[1] and poly[2] would return the first and second interior ring, respectively). CAREFUL: Internal/External are not the same as Interior/Exterior! Return a pointer from the existing geometries for use internally by the object's methods. _get_single_external() returns a clone of the same geometry for use by external code.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\polygon.py",
    "ast_data": "FunctionDef name:_get_single_internal arg:self arg:index arguments arg arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "@torch.inference_mode()\ndef forward(self, images: Union[Tensor, list[Tensor]]) -> Union[Tensor, list[Tensor]]:\n    images, image_sizes = self.pre_processor(images)\n    out_images = self.model(images)\n    return self.post_processor(out_images, image_sizes)",
    "docstring": "Forward pass of the edge detection model. Args: images: If list of RGB images. Each image is a Tensor with shape :math:. If Tensor, a Tensor with shape :math:. Returns: output tensor.",
    "type": "method",
    "file_path": "kornia\\kornia\\models\\edge_detection\\base.py",
    "ast_data": "FunctionDef name:forward arg:self arg:images arguments arg arg Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "barrier",
    "source_code": "@_exception_logger\ndef barrier(group: Optional[ProcessGroup]=GroupMember.WORLD, async_op=False, device_ids=None):\n    group = group or _get_default_group()\n    if _rank_not_in_group(group):\n        _warn_not_in_group('barrier')\n        return\n    opts = BarrierOptions()\n    opts.asyncOp = async_op\n    device = torch._C._get_accelerator()\n    if isinstance(device_ids, list):\n        opts.device_ids = device_ids\n        opts.device = torch.device(device.type, device_ids[0])\n    elif getattr(group, 'bound_device_id', None) is not None:\n        opts.device = group.bound_device_id\n    elif device.type == 'cpu' or _get_object_coll_device(group) == 'cpu':\n        opts.device = torch.device('cpu')\n    else:\n        opts.device = device\n        warnings.warn('No device id is provided via `init_process_group` or `barrier `. Using the current device set by the user. ')\n    work = group.barrier(opts=opts)\n    if async_op:\n        return work\n    elif work is not None:\n        work.wait()",
    "docstring": "Synchronize all processes. This collective blocks processes until the whole group enters this function, if async_op is False, or if async work handle is called on wait(). Args: group (ProcessGroup, optional): The process group to work on. If None, the default process group will be used. async_op (bool, optional): Whether this op should be an async op device_ids ([int], optional): List of device/GPU ids. Only one id is expected. Returns: Async work handle, if async_op is set to True. None, if not async_op or if not part of the group .. note:: now blocks the cpu thread till the completion of the barrier collective.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:barrier arg:group arg:async_op arg:device_ids arguments arg arg arg Assign BoolOp Call If Call Call Return return:no Assign Call Assign Assign Call If Call Assign Assign Call If Compare Call Assign If BoolOp Compare Compare Call Assign Call Assign Call Assign Call If Return return:yes If Compare Call"
  },
  {
    "library": "kornia",
    "name": "__pow__",
    "source_code": "def __pow__(self, t: float) -> 'Quaternion':\n    theta = self.polar_angle[..., None]\n    vec_norm = self.vec.norm(dim=-1, keepdim=True)\n    n = where(vec_norm != 0, self.vec / vec_norm, self.vec * 0)\n    w = (t * theta).cos()\n    xyz = (t * theta).sin() * n\n    return Quaternion(concatenate((w, xyz), -1))",
    "docstring": "Return the power of a quaternion raised to exponent t. Args: t: raised exponent. Example: >>> q = Quaternion(tensor([1., .5, 0., 0.])) >>> q_pow = q**2",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\quaternion.py",
    "ast_data": "FunctionDef name:__pow__ arg:self arg:t arguments arg arg Assign Assign Call Assign Call Compare Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_custom_objects",
    "source_code": "def get_custom_objects():\n    return _GLOBAL_CUSTOM_OBJECTS",
    "docstring": "Retrieves a live reference to the global dictionary of custom objects. Updating and clearing custom objects using is preferred, but can be used to directly access the current collection of custom objects. Example: Returns: Global dictionary of names to classes ().",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\generic_utils.py",
    "ast_data": "FunctionDef name:get_custom_objects arguments Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "start_of_file",
    "source_code": "class start_of_file(nodes.Element):\n    pass",
    "docstring": "Node to mark start of a new file, used in the LaTeX builder only.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\addnodes.py",
    "ast_data": "ClassDef name:start_of_file"
  },
  {
    "library": "pandas",
    "name": "offset",
    "source_code": "@property\n@abstractmethod\ndef offset(self) -> int:\n    pass",
    "docstring": "Offset of first element. May be > 0 if using chunks; for example for a column with N chunks of equal size M (only the last chunk may be shorter), ``.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\interchange\\dataframe_protocol.py",
    "ast_data": "FunctionDef name:offset arg:self arguments arg"
  },
  {
    "library": "django",
    "name": "write",
    "source_code": "def write(self, geom):\n    return wkt_writer_write(self.ptr, geom.ptr)",
    "docstring": "Return the WKT representation of the given geometry.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\prototypes\\io.py",
    "ast_data": "FunctionDef name:write arg:self arg:geom arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "LinearReLU",
    "source_code": "class LinearReLU(nnqd.Linear):\n    _FLOAT_MODULE = nni.LinearReLU\n\n    def __init__(self, in_features, out_features, bias=True, dtype=torch.qint8):\n        super().__init__(in_features, out_features, bias, dtype)\n\n    def forward(self, x: torch.Tensor) -> torch.Tensor:\n        if self._packed_params.dtype == torch.qint8:\n            Y = torch.ops.quantized.linear_relu_dynamic(x, self._packed_params._packed_params, reduce_range=True)\n        elif self._packed_params.dtype == torch.float16:\n            Y = torch.ops.quantized.linear_relu_dynamic_fp16(x, self._packed_params._packed_params)\n        else:\n            raise RuntimeError('Unsupported dtype on dynamic quantized linear relu!')\n        return Y.to(x.dtype)\n\n    def _get_name(self):\n        return 'DynamicQuantizedLinearReLU'\n\n    @classmethod\n    def from_float(cls, mod, use_precomputed_fake_quant=False):\n        return super().from_float(mod, use_precomputed_fake_quant=use_precomputed_fake_quant)\n\n    @classmethod\n    def from_reference(cls, ref_qlinear_relu):\n        return super().from_reference(ref_qlinear_relu[0])",
    "docstring": "A LinearReLU module fused from Linear and ReLU modules that can be used for dynamic quantization. Supports both, FP16 and INT8 quantization. We adopt the same interface as :class:. Attributes: Same as torch.ao.nn.quantized.dynamic.Linear Examples:: >>> # xdoctest: +SKIP >>> m = nn.intrinsic.quantized.dynamic.LinearReLU(20, 30) >>> input = torch.randn(128, 20) >>> output = m(input) >>> print(output.size()) torch.Size([128, 30])",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\nn\\intrinsic\\quantized\\dynamic\\modules\\linear_relu.py",
    "ast_data": "ClassDef name:LinearReLU Assign FunctionDef name:__init__ arg:self arg:in_features arg:out_features arg:bias arg:dtype arguments arg arg arg arg arg Call Call FunctionDef name:forward arg:self arg:x arguments arg arg If Compare Assign Call If Compare Assign Call Raise Call Return return:yes Call FunctionDef name:_get_name arg:self arguments arg Return return:yes FunctionDef name:from_float arg:cls arg:mod arg:use_precomputed_fake_quant arguments arg arg arg Return return:yes Call Call FunctionDef name:from_reference arg:cls arg:ref_qlinear_relu arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "notna",
    "source_code": "@final\ndef notna(self) -> npt.NDArray[np.bool_]:\n    return ~self.isna()",
    "docstring": "Detect existing (non-missing) values. Return a boolean same-sized object indicating if the values are not NA. Non-missing values get mapped to `numpy.infnumpy.NaN` values. Returns ------- numpy.ndarray[bool] Boolean array to indicate which entries are not NA. See Also -------- Index.notnull : Alias of notna. Index.isna: Inverse of notna. notna : Top-level notna. Examples -------- Show which entries in an Index are not NA. The result is an array. >>> idx = pd.Index([5.2, 6.0, np.nan]) >>> idx Index([5.2, 6.0, nan], dtype='float64') >>> idx.notna() array([ True, True, False]) Empty strings are not considered NA values. None is considered a NA value. >>> idx = pd.Index([\"black\", \"\", \"red\", None]) >>> idx Index(['black', '', 'red', None], dtype='object') >>> idx.notna() array([ True, True, True, False])",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:notna arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_slot",
    "source_code": "def get_slot(self, *args, **kwargs):\n    return self._opt.get_slot(*args, **kwargs)",
    "docstring": "Return a slot named \"name\" created for \"var\" by the Optimizer. This simply wraps the get_slot() from the actual optimizer. Args: *args: Arguments for get_slot(). **kwargs: Keyword arguments for get_slot(). Returns: The for the slot if it was created, otherwise.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\sync_replicas_optimizer.py",
    "ast_data": "FunctionDef name:get_slot arg:self arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "dump",
    "source_code": "def dump(self):\n    return self.root",
    "docstring": "Return the root node of Trie.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\hipify\\hipify_python.py",
    "ast_data": "FunctionDef name:dump arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "disjoint",
    "source_code": "def disjoint(self, other):\n    return self._topology(capi.ogr_disjoint, other)",
    "docstring": "Return True if this geometry and the other are spatially disjoint.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:disjoint arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_logdir",
    "source_code": "def get_logdir(self):\n    return self._logdir",
    "docstring": "Returns the directory where event file will be written.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\event_file_writer.py",
    "ast_data": "FunctionDef name:get_logdir arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_register_erase_node_hook",
    "source_code": "def _register_erase_node_hook(self, f):\n    assert callable(f), 'erase_node hook must be a callable.'\n    self._erase_node_hooks.append(f)",
    "docstring": "Takes a callable which will be called after we erase a node. The callable takes the node that is being erased as input and returns None.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\graph_module.py",
    "ast_data": "FunctionDef name:_register_erase_node_hook arg:self arg:f arguments arg arg Call Call"
  },
  {
    "library": "django",
    "name": "__init__",
    "source_code": "def __init__(self, feat, index):\n    self._feat = feat\n    self._index = index\n    fld_ptr = capi.get_feat_field_defn(feat.ptr, index)\n    if not fld_ptr:\n        raise GDALException('Cannot create OGR Field, invalid pointer given.')\n    self.ptr = fld_ptr\n    self.__class__ = OGRFieldTypes[self.type]",
    "docstring": "Initialize on the feature object and the integer index of the field within the feature.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\field.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:feat arg:index arguments arg arg arg Assign Assign Assign Call If Raise Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_convert_model_from_object_to_bytearray",
    "source_code": "def _convert_model_from_object_to_bytearray(model_object):\n    builder = flatbuffers.Builder(1024)\n    model_offset = model_object.Pack(builder)\n    builder.Finish(model_offset, file_identifier=_TFLITE_FILE_IDENTIFIER)\n    return bytes(builder.Output())",
    "docstring": "Converts a tflite model from a parsable object into a bytearray.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\util.py",
    "ast_data": "FunctionDef name:_convert_model_from_object_to_bytearray arg:model_object arguments arg Assign Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "BoolGaugeCell",
    "source_code": "class BoolGaugeCell(object):\n    __slots__ = ['_cell']\n\n    def __init__(self, cell):\n        self._cell = cell\n\n    def set(self, value):\n        pywrap_tfe.TFE_MonitoringBoolGaugeCellSet(self._cell, value)\n\n    def value(self):\n        return pywrap_tfe.TFE_MonitoringBoolGaugeCellValue(self._cell)",
    "docstring": "A single boolean value stored in an .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py",
    "ast_data": "ClassDef name:BoolGaugeCell Assign FunctionDef name:__init__ arg:self arg:cell arguments arg arg Assign FunctionDef name:set arg:self arg:value arguments arg arg Call FunctionDef name:value arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "fromkeys",
    "source_code": "@classmethod\ndef fromkeys(cls, iterable, value='', mutable=False, encoding=None):\n    q = cls('', mutable=True, encoding=encoding)\n    for key in iterable:\n        q.appendlist(key, value)\n    if not mutable:\n        q._mutable = False\n    return q",
    "docstring": "Return a new QueryDict with keys (may be repeated) from an iterable and values from value.",
    "type": "method",
    "file_path": "django\\django\\http\\request.py",
    "ast_data": "FunctionDef name:fromkeys arg:cls arg:iterable arg:value arg:mutable arg:encoding arguments arg arg arg arg arg Assign Call For Call If Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_aspect",
    "source_code": "def get_aspect(self):\n    return self._aspect",
    "docstring": "Return the aspect ratio of the Axes scaling. This is either \"auto\" or a float giving the ratio of y/x-scale.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:get_aspect arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_AssertCardinalityDataset",
    "source_code": "class _AssertCardinalityDataset(dataset_ops.UnaryUnchangedStructureDataset):\n\n    def __init__(self, input_dataset, expected_cardinality):\n        self._input_dataset = input_dataset\n        self._expected_cardinality = ops.convert_to_tensor(expected_cardinality, dtype=dtypes.int64, name='expected_cardinality')\n        variant_tensor = ged_ops.assert_cardinality_dataset(self._input_dataset._variant_tensor, self._expected_cardinality, **self._flat_structure)\n        super(_AssertCardinalityDataset, self).__init__(input_dataset, variant_tensor)",
    "docstring": "A that assert the cardinality of its input.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\cardinality.py",
    "ast_data": "ClassDef name:_AssertCardinalityDataset FunctionDef name:__init__ arg:self arg:input_dataset arg:expected_cardinality arguments arg arg arg Assign Assign Call Assign Call Call Call"
  },
  {
    "library": "scipy",
    "name": "trace_to_dicts",
    "source_code": "def trace_to_dicts(target, trace, options, pid, tid):\n    for event in read_events(trace, options):\n        ninja_time = (target.end - target.start) * 1000\n        if event['dur'] > ninja_time:\n            print('Inconsistent timing found (clang time > ninja time). Please ensure that timings are from consistent builds.')\n            sys.exit(1)\n        event['pid'] = pid\n        event['tid'] = tid\n        event['ts'] += target.start * 1000\n        yield event",
    "docstring": "Read a file-like object |trace| containing -ftime-trace data and yields about:tracing dict per eligible event in that log.",
    "type": "function",
    "file_path": "scipy\\tools\\ninjatracing.py",
    "ast_data": "FunctionDef name:trace_to_dicts arg:target arg:trace arg:options arg:pid arg:tid arguments arg arg arg arg arg For Call Assign If Compare Call Call Assign Assign"
  },
  {
    "library": "sphinx",
    "name": "build_specific",
    "source_code": "@final\ndef build_specific(self, filenames: Sequence[Path]) -> None:\n    docnames: list[str] = []\n    filenames = [Path(filename).resolve() for filename in filenames]\n    for filename in filenames:\n        if not filename.is_file():\n            logger.warning(__('file %r given on command line does not exist, '), filename)\n            continue\n        if not filename.is_relative_to(self.srcdir):\n            logger.warning(__('file %r given on command line is not under the source directory, ignoring'), filename)\n            continue\n        docname = self.env.path2doc(filename)\n        if not docname:\n            logger.warning(__('file %r given on command line is not a valid document, ignoring'), filename)\n            continue\n        docnames.append(docname)\n    self.compile_specific_catalogs(filenames)\n    self.build(docnames, summary=__('%d source files given on command line') % len(docnames), method='specific')",
    "docstring": "Only rebuild as much as needed for changes in the *filenames*.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\__init__.py",
    "ast_data": "FunctionDef name:build_specific arg:self arg:filenames arguments arg arg Assign Call Call For If Call Call Call If Call Call Call Assign Call If Call Call Call Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "Output",
    "source_code": "class Output:\n\n    def __init__(self, output):\n        self.beta = output[0]\n        self.sd_beta = output[1]\n        self.cov_beta = output[2]\n        if len(output) == 4:\n            self.__dict__.update(output[3])\n            self.stopreason = _report_error(self.info)\n\n    def pprint(self):\n        print('Beta:', self.beta)\n        print('Beta Std Error:', self.sd_beta)\n        print('Beta Covariance:', self.cov_beta)\n        if hasattr(self, 'info'):\n            print('Residual Variance:', self.res_var)\n            print('Inverse Condition #:', self.inv_condnum)\n            print('Reason(s) for Halting:')\n            for r in self.stopreason:\n                print(f'  {r}')",
    "docstring": "The Output class stores the output of an ODR run. Attributes ---------- beta : ndarray Estimated parameter values, of shape (q,). sd_beta : ndarray Standard deviations of the estimated parameters, of shape (p,). cov_beta : ndarray Covariance matrix of the estimated parameters, of shape (p,p). Note that this is not scaled by the residual variance , whereas is. This means `output.sd_betaxyinfo~scipy.odr.odr~scipy.odr.odr`.",
    "type": "class",
    "file_path": "scipy\\scipy\\odr\\_odrpack.py",
    "ast_data": "ClassDef name:Output FunctionDef name:__init__ arg:self arg:output arguments arg arg Assign Assign Assign If Compare Call Call Assign Call FunctionDef name:pprint arg:self arguments arg Call Call Call If Call Call Call Call For Call"
  },
  {
    "library": "django",
    "name": "copy_file",
    "source_code": "def copy_file(self, path, prefixed_path, source_storage):\n    if prefixed_path in self.copied_files:\n        return self.log(\"Skipping '%s' (already copied earlier)\" % path)\n    if not self.delete_file(path, prefixed_path, source_storage):\n        return\n    source_path = source_storage.path(path)\n    if self.dry_run:\n        self.log(\"Pretending to copy '%s'\" % source_path, level=1)\n    else:\n        self.log(\"Copying '%s'\" % source_path, level=2)\n        with source_storage.open(path) as source_file:\n            self.storage.save(prefixed_path, source_file)\n    self.copied_files.append(prefixed_path)",
    "docstring": "Attempt to copy `` with storage",
    "type": "method",
    "file_path": "django\\django\\contrib\\staticfiles\\management\\commands\\collectstatic.py",
    "ast_data": "FunctionDef name:copy_file arg:self arg:path arg:prefixed_path arg:source_storage arguments arg arg arg arg If Compare Return return:yes Call If Call Return return:no Assign Call If Call Call With Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "disable_saved_tensors_hooks",
    "source_code": "@contextlib.contextmanager\ndef disable_saved_tensors_hooks(error_message: str) -> Generator[None, None, None]:\n    maybe_prev_message = None\n    try:\n        maybe_prev_message = torch._C._autograd._saved_tensors_hooks_get_disabled_error_message()\n        torch._C._autograd._saved_tensors_hooks_disable(error_message)\n        yield\n    finally:\n        if maybe_prev_message is None:\n            torch._C._autograd._saved_tensors_hooks_enable()\n        else:\n            torch._C._autograd._saved_tensors_hooks_disable(maybe_prev_message)",
    "docstring": "Context-manager that disables the saved tensors default hooks feature. Useful for if you are creating a feature that does not work with saved tensors default hooks. Args: error_message (str): When saved tensors default hooks are used when they have been are disabled, a RuntimeError with this error message gets raised. Example:: >>> # xdoctest: +SKIP(failing) >>> message = \"saved tensors default hooks are disabled\" >>> with torch.autograd.graph.disable_saved_tensors_hooks(message): ... # Raises RuntimeError: saved tensors default hooks are disabled ... with torch.autograd.graph.save_on_cpu(): ... pass",
    "type": "function",
    "file_path": "pytorch\\torch\\autograd\\graph.py",
    "ast_data": "FunctionDef name:disable_saved_tensors_hooks arg:error_message arguments arg Assign Try Assign Call Call If Compare Call Call"
  },
  {
    "library": "kornia",
    "name": "SpatialGradient",
    "source_code": "class SpatialGradient(Module):\n    ONNX_DEFAULT_INPUTSHAPE: ClassVar[list[int]] = [-1, -1, -1, -1]\n    ONNX_DEFAULT_OUTPUTSHAPE: ClassVar[list[int]] = [-1, -1, 2, -1, -1]\n\n    def __init__(self, mode: str='sobel', order: int=1, normalized: bool=True) -> None:\n        super().__init__()\n        self.normalized: bool = normalized\n        self.order: int = order\n        self.mode: str = mode\n\n    def __repr__(self) -> str:\n        return f'{self.__class__.__name__}(order={self.order}, normalized={self.normalized}, mode={self.mode})'\n\n    def forward(self, input: Tensor) -> Tensor:\n        return spatial_gradient(input, self.mode, self.order, self.normalized)",
    "docstring": "Compute the first order image derivative in both x and y using a Sobel operator. Args: mode: derivatives modality, can be: or . order: the order of the derivatives. normalized: whether the output is normalized. Return: the sobel edges of the input feature map. Shape: - Input: :math: - Output: :math: Examples: >>> input = torch.rand(1, 3, 4, 4) >>> output = SpatialGradient()(input) # 1x3x2x4x4",
    "type": "class",
    "file_path": "kornia\\kornia\\filters\\sobel.py",
    "ast_data": "ClassDef name:SpatialGradient FunctionDef name:__init__ arg:self arg:mode arg:order arg:normalized arguments arg arg arg arg Call Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_thetagrids",
    "source_code": "def set_thetagrids(self, angles, labels=None, fmt=None, **kwargs):\n    angles = self.convert_yunits(angles)\n    angles = np.deg2rad(angles)\n    self.set_xticks(angles)\n    if labels is not None:\n        self.set_xticklabels(labels)\n    elif fmt is not None:\n        self.xaxis.set_major_formatter(mticker.FormatStrFormatter(fmt))\n    for t in self.xaxis.get_ticklabels():\n        t._internal_update(kwargs)\n    return (self.xaxis.get_ticklines(), self.xaxis.get_ticklabels())",
    "docstring": "Set the theta gridlines in a polar plot. Parameters ---------- angles : tuple with floats, degrees The angles of the theta gridlines. labels : tuple with strings or None The labels to use at each theta gridline. The will be used if None. fmt : str or None Format string used in . For example '%f'. Note that the angle that is used is in radians. Returns ------- lines : list of The theta gridlines. labels : list of The tick labels. Other Parameters ---------------- **kwargs *kwargs* are optional properties for the labels. .. warning:: This only sets the properties of the current ticks. Ticks are not guaranteed to be persistent. Various operations can create, delete and modify the Tick instances. There is an imminent risk that these settings can get lost if you work on the figure further (including also panning/zooming on a displayed figure). Use instead if possible. See Also -------- .PolarAxes.set_rgrids .Axis.get_gridlines .Axis.get_ticklabels",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\polar.py",
    "ast_data": "FunctionDef name:set_thetagrids arg:self arg:angles arg:labels arg:fmt arguments arg arg arg arg arg Assign Call Assign Call Call If Compare Call If Compare Call Call For Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, min_shard_bytes=256 << 10, max_shards=1, bytes_per_string=16):\n    if min_shard_bytes < 1:\n        raise ValueError(f'Argument `min_shard_bytes` must be positive. Received: {min_shard_bytes}')\n    if max_shards < 1:\n        raise ValueError(f'Argument `max_shards` must be positive. Received: {max_shards}')\n    if bytes_per_string < 1:\n        raise ValueError(f'Argument `bytes_per_string` must be positive. Received: {bytes_per_string}')\n    self._min_shard_bytes = min_shard_bytes\n    self._max_shards = max_shards\n    self._bytes_per_string = bytes_per_string",
    "docstring": "Creates a new . Args: min_shard_bytes: Minimum bytes of each shard. Defaults to 256K. max_shards: Upper bound on the number of shards. Defaults to 1. bytes_per_string: If the partition value is of type string, this provides an estimate of how large each string is.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\sharded_variable.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:min_shard_bytes arg:max_shards arg:bytes_per_string arguments arg arg arg arg If Compare Raise Call If Compare Raise Call If Compare Raise Call Assign Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "get_minor_locator",
    "source_code": "def get_minor_locator(self):\n    return self.minor.locator",
    "docstring": "Get the locator of the minor ticker.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:get_minor_locator arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_overloaded_args",
    "source_code": "def _get_overloaded_args(relevant_args: Iterable[Any], get_type_fn: Optional[Callable[[Any], type]]=None) -> list[Any]:\n    if get_type_fn is None:\n        get_type_fn = type\n    if not torch._C._is_torch_function_enabled():\n        return []\n    overloaded_types: set[type] = set()\n    overloaded_args: list[Any] = []\n    for arg in relevant_args:\n        arg_type = get_type_fn(arg)\n        if arg_type not in overloaded_types and hasattr(arg_type, '__torch_function__') and (arg_type.__torch_function__ != torch._C._disabled_torch_function_impl):\n            if overloaded_types:\n                overloaded_types.add(arg_type)\n                index = len(overloaded_args)\n                for i, old_arg in enumerate(overloaded_args):\n                    if issubclass(arg_type, get_type_fn(old_arg)):\n                        index = i\n                        break\n                overloaded_args.insert(index, arg)\n            else:\n                overloaded_types = {arg_type}\n                overloaded_args = [arg]\n    return overloaded_args",
    "docstring": "Returns a list of arguments on which to call __torch_function__. Checks arguments in relevant_args for __torch_function__ implementations, storing references to the arguments and their types in overloaded_args and overloaded_types in order of calling precedence. Only distinct types are considered. If a type is a subclass of another type it will have higher precedence, otherwise the precedence order is the same as the order of arguments in relevant_args, that is, from left-to-right in the argument list. The precedence-determining algorithm implemented in this function is described in _. See torch::append_overloaded_arg for the equivalent function in the C++ implementation. Parameters ---------- relevant_args : iterable of array-like Iterable of array-like arguments to check for __torch_function__ methods. get_type_fn : callable, optional Function to call on each argument in relevant_args to get its type. Returns ------- overloaded_args : list Arguments from relevant_args on which to call __torch_function__ methods, in the order in which they should be called. .. _NEP-0018:",
    "type": "function",
    "file_path": "pytorch\\torch\\overrides.py",
    "ast_data": "FunctionDef name:_get_overloaded_args arg:relevant_args arg:get_type_fn arguments arg arg If Compare Assign If Call Return return:no Call For Assign Call If BoolOp Compare Call Compare If Call Assign Call For Call If Call Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_needs_obs_or_fq",
    "source_code": "def _needs_obs_or_fq(prev_output_dtype: Any, prev_output_is_dynamic: bool, cur_target_dtype: Any, cur_target_is_dynamic: bool, reuse_input_obs_or_fq: bool, is_zeroth_arg: bool=False) -> bool:\n    if cur_target_is_dynamic:\n        assert cur_target_dtype in _OBS_DTYPE_LIST, f'Expected cur_target_dtype to be torch.float, but got: {cur_target_dtype}'\n        assert prev_output_dtype not in _DO_NOT_OBS_DTYPE_LIST\n        return is_zeroth_arg\n    if reuse_input_obs_or_fq:\n        return False\n    if cur_target_dtype in _OBS_DTYPE_LIST:\n        return prev_output_dtype in _OBS_DTYPE_LIST + [torch.float] and cur_target_dtype != prev_output_dtype\n    return False",
    "docstring": "note: we will treat \"not specified\" as torch.float for now utility function that checks if we should insert an observer or fake quant node base on the requested dtype for the nodes from user is_zeroth_arg: we only dynamically quantize the first arg of the node right now this should be removed when we enable configuring dynamic quantization for a specific argument, this can be removed if we deprecate fx graph mode quantization",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\prepare.py",
    "ast_data": "FunctionDef name:_needs_obs_or_fq arg:prev_output_dtype arg:prev_output_is_dynamic arg:cur_target_dtype arg:cur_target_is_dynamic arg:reuse_input_obs_or_fq arg:is_zeroth_arg arguments arg arg arg arg arg arg If Compare Compare Return return:yes If Return return:yes If Compare Return return:yes BoolOp Compare Compare Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_write",
    "source_code": "def _write(self, to_write: str) -> None:\n    self.handles.handle.write(to_write.encode(self._encoding))",
    "docstring": "Helper to call encode before writing to file for Python 3 compat.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\stata.py",
    "ast_data": "FunctionDef name:_write arg:self arg:to_write arguments arg arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "are_nodes_independent",
    "source_code": "def are_nodes_independent(nodes: list[Node]):\n    for i, j in itertools.combinations(nodes, 2):\n        if may_depend_on(i, j) or may_depend_on(j, i):\n            return False\n    return True",
    "docstring": "Check if all of the given nodes are pairwise-data independent. Arguments: nodes: The nodes to check for data dependencies. Returns: True if any pair in nodes has a data dependency.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\merge_matmul.py",
    "ast_data": "FunctionDef name:are_nodes_independent arg:nodes arguments arg For Call If BoolOp Call Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "shape_internal",
    "source_code": "def shape_internal(input, name=None, optimize=True, out_type=None):\n    with ops.name_scope(name, 'Shape', [input]) as name:\n        if isinstance(input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):\n            if not out_type:\n                out_type = dtypes.int32\n            return gen_math_ops.cast(input.dense_shape, out_type)\n        else:\n            if not context.executing_eagerly():\n                input = ops.convert_to_tensor(input)\n                input_shape = input.get_shape()\n                if optimize and input_shape.is_fully_defined():\n                    if not out_type:\n                        return constant_op._tensor_shape_tensor_conversion_function(input_shape)\n                    return constant(input_shape.as_list(), out_type, name=name)\n            if not out_type:\n                out_type = dtypes.int32\n            return gen_array_ops.shape(input, name=name, out_type=out_type)",
    "docstring": "Returns the shape of a tensor. If is not specified and the shape is fully known, then we look at the dimension values to determine whether to return an int32 or int64 tensor. If the shape is not fully known, we default to int32. Args: input: A or . name: A name for the operation (optional). optimize: if true, encode the shape as a constant when possible. out_type: (Optional) The specified output type of the operation ( or ). Defaults to tf.int32. Returns: A of type .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:shape_internal arg:input arg:name arg:optimize arg:out_type arguments arg arg arg arg With Call If Call If Assign Return return:yes Call If Call Assign Call Assign Call If BoolOp Call If Return return:yes Call Return return:yes Call Call If Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_list_cache_files",
    "source_code": "def _list_cache_files(self):\n    return [os.path.join(self._dir, fname) for fname in glob.glob(f'*{self.cache_suffix}', root_dir=self._dir)]",
    "docstring": "Get a list of paths to all the cache files. These are all the files in the root cache dir that end on the cache_suffix.",
    "type": "method",
    "file_path": "django\\django\\core\\cache\\backends\\filebased.py",
    "ast_data": "FunctionDef name:_list_cache_files arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "import_user_input",
    "source_code": "def import_user_input(self, user_input):\n    capi.from_user_input(self.ptr, force_bytes(user_input))",
    "docstring": "Import the Spatial Reference from the given user input string.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\srs.py",
    "ast_data": "FunctionDef name:import_user_input arg:self arg:user_input arguments arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "devices",
    "source_code": "def devices(self):\n    return self._devices",
    "docstring": "List of the names of devices available to execute operations.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:devices arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__new__",
    "source_code": "def __new__(cls, name: str):\n    if not isinstance(name, str):\n        raise ValueError('Backend constructor parameter must be string-ish')\n    value = getattr(Backend, name.upper(), Backend.UNDEFINED)\n    if value == Backend.UNDEFINED:\n        value = name.lower()\n    return value",
    "docstring": "Create and return a new instance of the class.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:__new__ arg:cls arg:name arguments arg arg If Call Raise Call Assign Call Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "count",
    "source_code": "def count(self, value: Any, /) -> int:\n    raise NotImplementedError",
    "docstring": "Return the number of occurrences of .",
    "type": "method",
    "file_path": "numpy\\numpy\\_typing\\_nested_sequence.py",
    "ast_data": "FunctionDef name:count arguments arg arg Raise"
  },
  {
    "library": "scikit-learn",
    "name": "_ica_par",
    "source_code": "def _ica_par(X, tol, g, fun_args, max_iter, w_init):\n    W = _sym_decorrelation(w_init)\n    del w_init\n    p_ = float(X.shape[1])\n    for ii in range(max_iter):\n        gwtx, g_wtx = g(np.dot(W, X), fun_args)\n        W1 = _sym_decorrelation(np.dot(gwtx, X.T) / p_ - g_wtx[:, np.newaxis] * W)\n        del gwtx, g_wtx\n        lim = max(abs(abs(np.einsum('ij,ij->i', W1, W)) - 1))\n        W = W1\n        if lim < tol:\n            break\n    else:\n        warnings.warn('FastICA did not converge. Consider increasing tolerance or the maximum number of iterations.', ConvergenceWarning)\n    return (W, ii + 1)",
    "docstring": "Parallel FastICA. Used internally by FastICA --main loop",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_fastica.py",
    "ast_data": "FunctionDef name:_ica_par arg:X arg:tol arg:g arg:fun_args arg:max_iter arg:w_init arguments arg arg arg arg arg arg Assign Call Assign Call For Call Assign Call Call Assign Call Call Assign Call Call Call Call Assign If Compare Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_dist_str",
    "source_code": "@staticmethod\ndef _dist_str(*args):\n\n    def to_str(arg):\n        if not isinstance(arg, str) and hasattr(arg, '__iter__'):\n            ret = []\n            for a in arg:\n                ret.append(to_str(a))\n            return '(' + ' '.join(ret) + ')'\n        return str(arg)\n    stack = inspect.stack()[2]\n    start = 'CCompilerOpt.%s[%d] : ' % (stack.function, stack.lineno)\n    out = ' '.join([to_str(a) for a in (*args,)])\n    return start + out",
    "docstring": "Return a string to print by log and errors.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py",
    "ast_data": "FunctionDef name:_dist_str arguments arg FunctionDef name:to_str arg:arg arguments arg If BoolOp Call Call Assign For Call Call Return return:yes Call Return return:yes Call Assign Call Assign Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "process_inputs",
    "source_code": "def process_inputs(op_name, producer_version, keywords):\n    attr_protos, inputs, input_types, output_structure = _op_def_library_pybind.process_inputs(op_name, producer_version, keywords)\n    for k, attr in attr_protos.items():\n        attr_protos[k] = attr_value_pb2.AttrValue.FromString(attr)\n    return (attr_protos, inputs, input_types, output_structure)",
    "docstring": "Helper method to speed up in op_def_library.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\op_def_library_pybind.py",
    "ast_data": "FunctionDef name:process_inputs arg:op_name arg:producer_version arg:keywords arguments arg arg arg Assign Call For Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_DependentProperty",
    "source_code": "class _DependentProperty(property, _Dependent):\n\n    def __init__(self, fn: Optional[Callable[..., Any]]=None, *, is_discrete: Optional[bool]=NotImplemented, event_dim: Optional[int]=NotImplemented) -> None:\n        super().__init__(fn)\n        self._is_discrete = is_discrete\n        self._event_dim = event_dim\n\n    def __call__(self, fn: Callable[..., Any]) -> '_DependentProperty':\n        return _DependentProperty(fn, is_discrete=self._is_discrete, event_dim=self._event_dim)",
    "docstring": "Decorator that extends @property to act like a constraint when called on a class and act like a property when called on an object. Example:: class Uniform(Distribution): def __init__(self, low, high): self.low = low self.high = high @constraints.dependent_property(is_discrete=False, event_dim=0) def support(self): return constraints.interval(self.low, self.high) Args: fn (Callable): The function to be decorated. is_discrete (bool): Optional value of `` attribute will raise a NotImplementedError.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributions\\constraints.py",
    "ast_data": "ClassDef name:_DependentProperty FunctionDef name:__init__ arg:self arg:fn arguments arg arg arg arg Call Call Assign Assign FunctionDef name:__call__ arg:self arg:fn arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, cluster_resolver=None):\n    if cluster_resolver is None:\n        cluster_resolver = tfconfig_cluster_resolver.TFConfigClusterResolver()\n    super(ParameterServerStrategyV1, self).__init__(ParameterServerStrategyExtended(self, cluster_resolver=cluster_resolver))\n    distribute_lib.distribution_strategy_gauge.get_cell('V1').set('ParameterServerStrategy')",
    "docstring": "Initializes this strategy with an optional . Args: cluster_resolver: Optional object. Defaults to a .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\parameter_server_strategy.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:cluster_resolver arguments arg arg If Compare Assign Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "GammaWithSoftplusConcentrationRate",
    "source_code": "class GammaWithSoftplusConcentrationRate(Gamma):\n\n    @deprecation.deprecated('2019-01-01', 'Use `tfd.Gamma(tf.nn.softplus(concentration), tf.nn.softplus(rate))` instead.', warn_once=True)\n    def __init__(self, concentration, rate, validate_args=False, allow_nan_stats=True, name='GammaWithSoftplusConcentrationRate'):\n        parameters = dict(locals())\n        with ops.name_scope(name, values=[concentration, rate]) as name:\n            super(GammaWithSoftplusConcentrationRate, self).__init__(concentration=nn.softplus(concentration, name='softplus_concentration'), rate=nn.softplus(rate, name='softplus_rate'), validate_args=validate_args, allow_nan_stats=allow_nan_stats, name=name)\n        self._parameters = parameters",
    "docstring": "with softplus of and .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\gamma.py",
    "ast_data": "ClassDef name:GammaWithSoftplusConcentrationRate FunctionDef name:__init__ arg:self arg:concentration arg:rate arg:validate_args arg:allow_nan_stats arg:name arguments arg arg arg arg arg arg Assign Call Call With Call Call Call Call Call Assign Call"
  },
  {
    "library": "pandas",
    "name": "is_platform_arm",
    "source_code": "def is_platform_arm() -> bool:\n    return platform.machine() in ('arm64', 'aarch64') or platform.machine().startswith('armv')",
    "docstring": "Checking if the running platform use ARM architecture. Returns ------- bool True if the running platform uses ARM architecture.",
    "type": "function",
    "file_path": "pandas\\pandas\\compat\\__init__.py",
    "ast_data": "FunctionDef name:is_platform_arm arguments Return return:yes BoolOp Compare Call Call Call"
  },
  {
    "library": "numpy",
    "name": "UnknownFortranRoutine",
    "source_code": "class UnknownFortranRoutine(FortranRoutine):\n    type = 'unknown'\n\n    def __init__(self, name):\n        FortranRoutine.__init__(self, name=name, filename='<unknown>')\n\n    def dependencies(self):\n        return []",
    "docstring": "Wrapper for a Fortran routine for which the corresponding file is not known.",
    "type": "class",
    "file_path": "numpy\\numpy\\linalg\\lapack_lite\\make_lite.py",
    "ast_data": "ClassDef name:UnknownFortranRoutine Assign FunctionDef name:__init__ arg:self arg:name arguments arg arg Call FunctionDef name:dependencies arg:self arguments arg Return return:no"
  },
  {
    "library": "kornia",
    "name": "adjust_gamma",
    "source_code": "def adjust_gamma(input: Tensor, gamma: Union[float, Tensor], gain: Union[float, Tensor]=1.0) -> Tensor:\n    if not isinstance(input, Tensor):\n        raise TypeError(f'Input type is not a Tensor. Got {type(input)}')\n    if not isinstance(gamma, (float, Tensor)):\n        raise TypeError(f'The gamma should be a positive float or Tensor. Got {type(gamma)}')\n    if not isinstance(gain, (float, Tensor)):\n        raise TypeError(f'The gain should be a positive float or Tensor. Got {type(gain)}')\n    if isinstance(gamma, float):\n        gamma = Tensor([gamma])\n    if isinstance(gain, float):\n        gain = Tensor([gain])\n    gamma = gamma.to(input.device).to(input.dtype)\n    gain = gain.to(input.device).to(input.dtype)\n    if (gamma < 0.0).any():\n        raise ValueError(f'Gamma must be non-negative. Got {gamma}')\n    if (gain < 0.0).any():\n        raise ValueError(f'Gain must be non-negative. Got {gain}')\n    for _ in range(len(input.shape) - len(gamma.shape)):\n        gamma = torch.unsqueeze(gamma, dim=-1)\n    for _ in range(len(input.shape) - len(gain.shape)):\n        gain = torch.unsqueeze(gain, dim=-1)\n    x_adjust: Tensor = gain * torch.pow(input, gamma)\n    out: Tensor = torch.clamp(x_adjust, 0.0, 1.0)\n    return out",
    "docstring": "Perform gamma correction on an image. .. image:: _static/img/adjust_contrast.png The input image is expected to be in the range of [0, 1]. Args: input: Image to be adjusted in the shape of :math:. gamma: Non negative real number, same as y\\gammay in the equation. gamma larger than 1 make the shadows darker, while gamma smaller than 1 make dark regions lighter. gain: The constant multiplier. Return: Adjusted image in the shape of :math:. .. note:: See a working example __. Example: >>> x = torch.ones(1, 1, 2, 2) >>> adjust_gamma(x, 1.0, 2.0) tensor([[[[1., 1.], [1., 1.]]]]) >>> x = torch.ones(2, 5, 3, 3) >>> y1 = torch.ones(2) * 1.0 >>> y2 = torch.ones(2) * 2.0 >>> adjust_gamma(x, y1, y2).shape torch.Size([2, 5, 3, 3])",
    "type": "function",
    "file_path": "kornia\\kornia\\enhance\\adjust.py",
    "ast_data": "FunctionDef name:adjust_gamma arg:input arg:gamma arg:gain arguments arg arg arg If Call Raise Call Call If Call Raise Call Call If Call Raise Call Call If Call Assign Call If Call Assign Call Assign Call Call Assign Call Call If Call Compare Raise Call If Call Compare Raise Call For Call Call Call Assign Call For Call Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "float",
    "source_code": "def float(self):\n    return self._to(torch.float)",
    "docstring": "Casts this storage to float type.",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:float arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "template_localtime",
    "source_code": "def template_localtime(value, use_tz=None):\n    should_convert = isinstance(value, datetime) and (settings.USE_TZ if use_tz is None else use_tz) and (not is_naive(value)) and getattr(value, 'convert_to_local_time', True)\n    return localtime(value) if should_convert else value",
    "docstring": "Check if value is a datetime and converts it to local time if necessary. If use_tz is provided and is not None, that will force the value to be converted (or not), overriding the value of settings.USE_TZ. This function is designed for use by the template engine.",
    "type": "function",
    "file_path": "django\\django\\utils\\timezone.py",
    "ast_data": "FunctionDef name:template_localtime arg:value arg:use_tz arguments arg arg Assign BoolOp Call Compare Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_prepare_scalar_function",
    "source_code": "def _prepare_scalar_function(fun, x0, jac=None, args=(), bounds=None, epsilon=None, finite_diff_rel_step=None, hess=None, workers=None):\n    if callable(jac):\n        grad = jac\n    elif jac in FD_METHODS:\n        epsilon = None\n        grad = jac\n    else:\n        grad = '2-point'\n        epsilon = epsilon\n    if hess is None:\n\n        def hess(x, *args):\n            return None\n    if bounds is None:\n        bounds = (-np.inf, np.inf)\n    workers = workers or map\n    sf = ScalarFunction(fun, x0, args, grad, hess, finite_diff_rel_step, bounds, epsilon=epsilon, workers=workers)\n    return sf",
    "docstring": "Creates a ScalarFunction object for use with scalar minimizers (BFGS/LBFGSB/SLSQP/TNC/CG/etc). Parameters ---------- fun : callable The objective function to be minimized. `{'2-point', '3-point', 'cs'}Nonefunjachmultiprocessing.Pool.mapworkersworkersmultiprocessing.Pool approx_derivative` will incur large overhead from setting up new processes. .. versionadded:: 1.16.0 Returns ------- sf : ScalarFunction",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_optimize.py",
    "ast_data": "FunctionDef name:_prepare_scalar_function arg:fun arg:x0 arg:jac arg:args arg:bounds arg:epsilon arg:finite_diff_rel_step arg:hess arg:workers arguments arg arg arg arg arg arg arg arg arg If Call Assign If Compare Assign Assign Assign Assign If Compare FunctionDef name:hess arg:x arguments arg arg Return return:no If Compare Assign Assign BoolOp Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "tile",
    "source_code": "@classmethod\ndef tile(cls, tile_assignment):\n    if not isinstance(tile_assignment, _np.ndarray):\n        raise TypeError('Tile assignment must be of type np.ndarray')\n    dims = list(tile_assignment.shape)\n    flattened_devices = tile_assignment.reshape(-1, order='C')\n    return Sharding(proto=xla_data_pb2.OpSharding(type=xla_data_pb2.OpSharding.OTHER, tile_assignment_dimensions=dims, tile_assignment_devices=list(flattened_devices)))",
    "docstring": "Returns a Tiled sharding attribute. This causes an op to be partially computed on multiple cores in the XLA device. Args: tile_assignment: An np.ndarray describing the topology of the tiling and which device will compute which part of the topology. Raises: TypeError: tile_assignment was not of np.array type. TODO(jmolloy): This concept is nefarious and is not something we really want to expose to users (especially as the contract for tile_assignment is very strict).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\xla\\experimental\\xla_sharding.py",
    "ast_data": "FunctionDef name:tile arg:cls arg:tile_assignment arguments arg arg If Call Raise Call Assign Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "seaborn",
    "name": "__call__",
    "source_code": "def __call__(self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale]) -> DataFrame:\n    return data",
    "docstring": "Apply statistical transform to data subgroups and return combined result.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_stats\\base.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:data arg:groupby arg:orient arg:scales arguments arg arg arg arg arg Return return:yes"
  },
  {
    "library": "django",
    "name": "check",
    "source_code": "def check(self, app_configs):\n    if app_configs is None:\n        app_configs = apps.get_app_configs()\n    app_configs = set(app_configs)\n    errors = []\n    modeladmins = (o for o in self._registry.values() if o.__class__ is not ModelAdmin)\n    for modeladmin in modeladmins:\n        if modeladmin.model._meta.app_config in app_configs:\n            errors.extend(modeladmin.check())\n    return errors",
    "docstring": "Run the system checks on all ModelAdmins, except if they aren't customized at all.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\sites.py",
    "ast_data": "FunctionDef name:check arg:self arg:app_configs arguments arg arg If Compare Assign Call Assign Call Assign Assign Call Compare For If Compare Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "keys",
    "source_code": "def keys(self) -> Index:\n    return self.index",
    "docstring": "Return alias for index. Returns ------- Index Index of the Series. See Also -------- Series.index : The index (axis labels) of the Series. Examples -------- >>> s = pd.Series([1, 2, 3], index=[0, 1, 2]) >>> s.keys() Index([0, 1, 2], dtype='int64')",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:keys arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "node_proto",
    "source_code": "def node_proto(name, op='UnSpecified', input=None, dtype=None, shape: Optional[tuple]=None, outputsize=None, attributes=''):\n    if input is None:\n        input = []\n    if not isinstance(input, list):\n        input = [input]\n    return NodeDef(name=name.encode(encoding='utf_8'), op=op, input=input, attr=attr_value_proto(dtype, outputsize, attributes))",
    "docstring": "Create an object matching a NodeDef. Follows .",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\tensorboard\\_proto_graph.py",
    "ast_data": "FunctionDef name:node_proto arg:name arg:op arg:input arg:dtype arg:shape arg:outputsize arg:attributes arguments arg arg arg arg arg arg arg If Compare Assign If Call Assign Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "update_state",
    "source_code": "def update_state(self, y_true, y_pred, sample_weight=None):\n    y_true = math_ops.cast(y_true, self._dtype)\n    y_pred = math_ops.cast(y_pred, self._dtype)\n    [y_true, y_pred], sample_weight = metrics_utils.ragged_assert_compatible_and_get_flat_values([y_true, y_pred], sample_weight)\n    y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(y_pred, y_true)\n    ag_fn = autograph.tf_convert(self._fn, ag_ctx.control_status_ctx())\n    matches = ag_fn(y_true, y_pred, **self._fn_kwargs)\n    return super(MeanMetricWrapper, self).update_state(matches, sample_weight=sample_weight)",
    "docstring": "Accumulates metric statistics. and should have the same shape. Args: y_true: Ground truth values. shape = . y_pred: The predicted values. shape = . sample_weight: Optional acts as a coefficient for the metric. If a scalar is provided, then the metric is simply scaled by the given value. If is a tensor of size , then the metric for each sample of the batch is rescaled by the corresponding element in the vector. If the shape of is (or can be broadcasted to this shape), then each metric element of is scaled by the corresponding value of . (Note on : all metric functions reduce by 1 dimension, usually the last axis (-1)). Returns: Update op.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py",
    "ast_data": "FunctionDef name:update_state arg:self arg:y_true arg:y_pred arg:sample_weight arguments arg arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "update_join_types",
    "source_code": "def update_join_types(self, query):\n    to_promote = set()\n    to_demote = set()\n    for table, votes in self.votes.items():\n        if self.effective_connector == OR and votes < self.num_children:\n            to_promote.add(table)\n        if self.effective_connector == AND or (self.effective_connector == OR and votes == self.num_children):\n            to_demote.add(table)\n    query.promote_joins(to_promote)\n    query.demote_joins(to_demote)\n    return to_demote",
    "docstring": "Change join types so that the generated query is as efficient as possible, but still correct. So, change as many joins as possible to INNER, but don't make OUTER joins INNER if that could remove results from the query.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\query.py",
    "ast_data": "FunctionDef name:update_join_types arg:self arg:query arguments arg arg Assign Call Assign Call For Call If BoolOp Compare Compare Call If BoolOp Compare BoolOp Compare Compare Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_convert_listlike",
    "source_code": "def _convert_listlike(arg, unit: UnitChoices | None=None, errors: DateTimeErrorChoices='raise', name: Hashable | None=None):\n    arg_dtype = getattr(arg, 'dtype', None)\n    if isinstance(arg, (list, tuple)) or arg_dtype is None:\n        arg = np.array(arg, dtype=object)\n    elif isinstance(arg_dtype, ArrowDtype) and arg_dtype.kind == 'm':\n        return arg\n    td64arr = sequence_to_td64ns(arg, unit=unit, errors=errors, copy=False)[0]\n    from pandas import TimedeltaIndex\n    value = TimedeltaIndex(td64arr, name=name)\n    return value",
    "docstring": "Convert a list of objects to a timedelta index object.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\tools\\timedeltas.py",
    "ast_data": "FunctionDef name:_convert_listlike arg:arg arg:unit arg:errors arg:name arguments arg arg arg arg Assign Call If BoolOp Call Compare Assign Call If BoolOp Call Compare Return return:yes Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "decode_proto",
    "source_code": "@tf_export('__internal__.saved_model.decode_proto', v1=[])\ndef decode_proto(proto):\n    return _map_structure(proto, _get_decoders())",
    "docstring": "Decodes proto representing a nested structure. Args: proto: Proto to decode. Returns: Decoded structure. Raises: NotEncodableError: For values for which there are no encoders.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\nested_structure_coder.py",
    "ast_data": "FunctionDef name:decode_proto arg:proto arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "pandas",
    "name": "maybe_infer_to_datetimelike",
    "source_code": "def maybe_infer_to_datetimelike(value: npt.NDArray[np.object_], convert_to_nullable_dtype: bool=False) -> np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray | IntervalArray:\n    if not isinstance(value, np.ndarray) or value.dtype != object:\n        raise TypeError(type(value))\n    if value.ndim != 1:\n        raise ValueError(value.ndim)\n    if not len(value):\n        return value\n    return lib.maybe_convert_objects(value, convert_numeric=False, convert_non_numeric=True, convert_to_nullable_dtype=convert_to_nullable_dtype, dtype_if_all_nat=np.dtype('M8[s]'))",
    "docstring": "we might have a array (or single object) that is datetime like, and no dtype is passed don't change the value unless we find a datetime/timedelta set this is pretty strict in that a datetime/timedelta is REQUIRED in addition to possible nulls/string likes Parameters ---------- value : np.ndarray[object] Returns ------- np.ndarray, DatetimeArray, TimedeltaArray, PeriodArray, or IntervalArray",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\cast.py",
    "ast_data": "FunctionDef name:maybe_infer_to_datetimelike arg:value arg:convert_to_nullable_dtype arguments arg arg If BoolOp Call Compare Raise Call Call If Compare Raise Call If Call Return return:yes Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "resolve_name",
    "source_code": "def resolve_name(self, name, package, level):\n    bits = package.rsplit('.', level - 1)\n    if len(bits) < level:\n        raise ImportError('attempted relative import beyond top-level package')\n    base = bits[0]\n    return f'{base}.{name}' if name else base",
    "docstring": "Copied from the Cpython implementation of __import__ Resolve a relative module name to an absolute one.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\symbolic_convert.py",
    "ast_data": "FunctionDef name:resolve_name arg:self arg:name arg:package arg:level arguments arg arg arg arg Assign Call If Compare Call Raise Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "partition_cudagraphs",
    "source_code": "def partition_cudagraphs(gm, inputs):\n    FakeTensorProp(gm).propagate(*inputs)\n    supported_ops = CudaGraphsSupport()\n    partitioner = CapabilityBasedPartitioner(gm, supported_ops, allows_single_node_partition=True)\n    partitions = partitioner.propose_partitions()\n    fused_graph = partitioner.fuse_partitions(partitions)\n    return fused_graph",
    "docstring": "Partition an FX graph into sub-GraphModules that can be validly run under CUDA graphs. For a subgraph to be runnable under CUDA, all of the operations must involve CUDA tensors only/",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\passes\\backends\\cudagraphs.py",
    "ast_data": "FunctionDef name:partition_cudagraphs arg:gm arg:inputs arguments arg arg Call Call Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "relayout_like",
    "source_code": "@tf_export('experimental.dtensor.relayout_like', v1=[])\ndef relayout_like(tensor: tensor_lib.Tensor, layout_tensor: tensor_lib.Tensor, name: Optional[str]=None) -> tensor_lib.Tensor:\n    return gen_dtensor_ops.relayout_like(input=tensor, layout_input=layout_tensor, name=name)",
    "docstring": "Changes the layout of to the same as . is often used inside a , to ensure a tensor is placed to the same mesh and with the same layout as another tensor. The backward gradient of a is a operation, to ensure the backward tensor has the same layout as the forward input tensor: Here is another illustrative example: Args: tensor: A DTensor to specify a new layout for. layout_tensor: A Tensor object whose layout will be used for the layout of result. The shape and type of layout_tensor are irrelevant. name: name of the Op. Returns: A DTensor output from the RelayoutLike op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\api.py",
    "ast_data": "FunctionDef name:relayout_like arg:tensor arg:layout_tensor arg:name arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_create_expansion",
    "source_code": "def _create_expansion(X, interaction_only, deg, n_features, cumulative_size=0):\n    total_nnz = _calc_total_nnz(X.indptr, interaction_only, deg)\n    expanded_col = _calc_expanded_nnz(n_features, interaction_only, deg)\n    if expanded_col == 0:\n        return None\n    max_indices = expanded_col - 1\n    max_indptr = total_nnz\n    max_int32 = np.iinfo(np.int32).max\n    needs_int64 = max(max_indices, max_indptr) > max_int32\n    index_dtype = np.int64 if needs_int64 else np.int32\n    expanded_data = np.empty(shape=total_nnz, dtype=X.data.dtype)\n    expanded_indices = np.empty(shape=total_nnz, dtype=index_dtype)\n    expanded_indptr = np.empty(shape=X.indptr.shape[0], dtype=index_dtype)\n    _csr_polynomial_expansion(X.data, X.indices, X.indptr, X.shape[1], expanded_data, expanded_indices, expanded_indptr, interaction_only, deg)\n    return sparse.csr_matrix((expanded_data, expanded_indices, expanded_indptr), shape=(X.indptr.shape[0] - 1, expanded_col), dtype=X.dtype)",
    "docstring": "Helper function for creating and appending sparse expansion matrices",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_polynomial.py",
    "ast_data": "FunctionDef name:_create_expansion arg:X arg:interaction_only arg:deg arg:n_features arg:cumulative_size arguments arg arg arg arg arg Assign Call Assign Call If Compare Return return:no Assign Assign Assign Call Assign Compare Call Assign Assign Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_get_numerical_jacobian",
    "source_code": "def _get_numerical_jacobian(fn, inputs, outputs=None, target=None, eps=0.001, is_forward_ad=False) -> list[tuple[torch.Tensor, ...]]:\n    jacobians: list[tuple[torch.Tensor, ...]] = []\n    if outputs is None:\n        outputs = _as_tuple(fn(*_as_tuple(inputs)))\n    if not is_forward_ad and any((o.is_complex() for o in outputs)):\n        raise ValueError('Expected output to be non-complex. get_numerical_jacobian no longer supports functions that return complex outputs.')\n    if target is None:\n        target = inputs\n    inp_indices = [i for i, a in enumerate(target) if is_tensor_like(a) and a.requires_grad]\n    for i, (inp, inp_idx) in enumerate(zip(_iter_tensors(target, True), inp_indices)):\n        jacobians += [get_numerical_jacobian_wrt_specific_input(fn, inp_idx, inputs, outputs, eps, input=inp, is_forward_ad=is_forward_ad)]\n    return jacobians",
    "docstring": "Compute the numerical Jacobian of with respect to . If not specified, targets are the input. Returns M * N Jacobians where N is the number of tensors in target that require grad and M is the number of non-integral outputs. Args: fn: the function to compute the jacobian for inputs: inputs to outputs: provide precomputed outputs to avoid one extra invocation of fn target: the Tensors wrt whom Jacobians are calculated (default=) eps: the magnitude of the perturbation during finite differencing (default=) is_forward_ad: if this numerical jacobian is computed to be checked wrt forward AD gradients (this is used for error checking only) Returns: A list of M N-tuples of tensors Note that may not even be part of to , so please be **very careful** in this to not clone .",
    "type": "function",
    "file_path": "pytorch\\torch\\autograd\\gradcheck.py",
    "ast_data": "FunctionDef name:_get_numerical_jacobian arg:fn arg:inputs arg:outputs arg:target arg:eps arg:is_forward_ad arguments arg arg arg arg arg arg If Compare Assign Call Call Call If BoolOp Call Call Raise Call If Compare Assign Assign Call BoolOp Call For Call Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "cumcount",
    "source_code": "@final\n@Substitution(name='groupby')\ndef cumcount(self, ascending: bool=True):\n    index = self._obj_with_exclusions.index\n    cumcounts = self._cumcount_array(ascending=ascending)\n    return self._obj_1d_constructor(cumcounts, index)",
    "docstring": "Number each item in each group from 0 to the length of that group - 1. Essentially this is equivalent to .. code-block:: python self.apply(lambda x: pd.Series(np.arange(len(x)), x.index)) Parameters ---------- ascending : bool, default True If False, number in reverse, from length of group - 1 to 0. Returns ------- Series Sequence number of each element within each group. See Also -------- .ngroup : Number the groups themselves. Examples -------- >>> df = pd.DataFrame([[\"a\"], [\"a\"], [\"a\"], [\"b\"], [\"b\"], [\"a\"]], columns=[\"A\"]) >>> df A 0 a 1 a 2 a 3 b 4 b 5 a >>> df.groupby(\"A\").cumcount() 0 0 1 1 2 2 3 0 4 1 5 3 dtype: int64 >>> df.groupby(\"A\").cumcount(ascending=False) 0 3 1 2 2 1 3 1 4 0 5 0 dtype: int64",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\groupby\\groupby.py",
    "ast_data": "FunctionDef name:cumcount arg:self arg:ascending arguments arg arg Assign Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "group_count",
    "source_code": "@group_count.setter\ndef group_count(self, value: int) -> None:\n    global _group_count\n    _group_count = value",
    "docstring": "Use to compute the name of ProcessGroups when using global synchronization.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:group_count arg:self arg:value arguments arg arg Assign"
  },
  {
    "library": "django",
    "name": "check_database_version_supported",
    "source_code": "def check_database_version_supported(self):\n    if self.features.minimum_database_version is not None and self.get_database_version() < self.features.minimum_database_version:\n        db_version = '.'.join(map(str, self.get_database_version()))\n        min_db_version = '.'.join(map(str, self.features.minimum_database_version))\n        raise NotSupportedError(f'{self.display_name} {min_db_version} or later is required (found {db_version}).')",
    "docstring": "Raise an error if the database version isn't supported by this version of Django.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\base.py",
    "ast_data": "FunctionDef name:check_database_version_supported arg:self arguments arg If BoolOp Compare Compare Call Assign Call Call Call Assign Call Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_ones_like",
    "source_code": "def _ones_like(x):\n    if x.get_shape().is_fully_defined():\n        return array_ops.ones(x.get_shape().as_list(), dtype=x.dtype)\n    return array_ops.ones_like(x)",
    "docstring": "Convenience function attempts to statically construct .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\transformed_distribution.py",
    "ast_data": "FunctionDef name:_ones_like arg:x arguments arg If Call Call Return return:yes Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "UserDefinedDictVariable",
    "source_code": "class UserDefinedDictVariable(UserDefinedObjectVariable):\n    _nonvar_fields = UserDefinedObjectVariable._nonvar_fields\n\n    def __init__(self, value, dict_vt=None, **kwargs):\n        super().__init__(value, **kwargs)\n        self._dict_vt = dict_vt\n        if self._dict_vt is None:\n            assert self.source is None, 'dict_vt must be constructed by builder.py when source is present'\n            self._dict_vt = variables.ConstDictVariable({}, mutation_type=ValueMutationNew())\n        self._dict_methods = dict_methods\n\n    def call_method(self, tx, name, args: 'list[VariableTracker]', kwargs: 'dict[str, VariableTracker]') -> 'VariableTracker':\n        method = self._maybe_get_baseclass_method(name)\n        if method in self._dict_methods:\n            return self._dict_vt.call_method(tx, name, args, kwargs)\n        return super().call_method(tx, name, args, kwargs)\n\n    def unpack_var_sequence(self, tx):\n        if type(self.value).__iter__ in (dict.__iter__, collections.OrderedDict.__iter__):\n            return self._dict_vt.unpack_var_sequence(tx)\n        raise NotImplementedError\n\n    def is_underlying_vt_modified(self, side_effects):\n        return side_effects.is_modified(self._dict_vt)",
    "docstring": "Represents user defined objects that are subclasses of dict/OrderedDict. Internally, it uses a ConstDictVariable to represent the dict part of the variable tracker. For everything else, it falls back to UserDefinedObjectVariable.",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\user_defined.py",
    "ast_data": "ClassDef name:UserDefinedDictVariable Assign FunctionDef name:__init__ arg:self arg:value arg:dict_vt arguments arg arg arg arg Call Call Assign If Compare Compare Assign Call Call Assign FunctionDef name:call_method arg:self arg:tx arg:name arg:args arg:kwargs arguments arg arg arg arg arg Assign Call If Compare Return return:yes Call Return return:yes Call Call FunctionDef name:unpack_var_sequence arg:self arg:tx arguments arg arg If Compare Call Return return:yes Call Raise FunctionDef name:is_underlying_vt_modified arg:self arg:side_effects arguments arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "memory_usage",
    "source_code": "def memory_usage(self, index: bool=True, deep: bool=False) -> int:\n    v = self._memory_usage(deep=deep)\n    if index:\n        v += self.index.memory_usage(deep=deep)\n    return v",
    "docstring": "Return the memory usage of the Series. The memory usage can optionally include the contribution of the index and of elements of dtype. Parameters ---------- index : bool, default True Specifies whether to include the memory usage of the Series index. deep : bool, default False If True, introspect the data deeply by interrogating dtypes for system-level memory consumption, and include it in the returned value. Returns ------- int Bytes of memory consumed. See Also -------- numpy.ndarray.nbytes : Total bytes consumed by the elements of the array. DataFrame.memory_usage : Bytes consumed by a DataFrame. Examples -------- >>> s = pd.Series(range(3)) >>> s.memory_usage() 152 Not including the index gives the size of the rest of the data, which is necessarily smaller: >>> s.memory_usage(index=False) 24 The memory footprint of values is ignored by default: >>> s = pd.Series([\"a\", \"b\"]) >>> s.values array(['a', 'b'], dtype=object) >>> s.memory_usage() 144 >>> s.memory_usage(deep=True) 244",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:memory_usage arg:self arg:index arg:deep arguments arg arg arg Assign Call If Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "synchronize",
    "source_code": "def synchronize(self) -> None:\n    torch._C._mps_synchronizeEvent(self.__eventId)",
    "docstring": "Waits until the completion of all work currently captured in this event. This prevents the CPU thread from proceeding until the event completes.",
    "type": "method",
    "file_path": "pytorch\\torch\\mps\\event.py",
    "ast_data": "FunctionDef name:synchronize arg:self arguments arg Call"
  },
  {
    "library": "matplotlib",
    "name": "get_xaxis_transform",
    "source_code": "def get_xaxis_transform(self, which='grid'):\n    if which == 'grid':\n        return self._xaxis_transform\n    elif which == 'tick1':\n        return self.spines.bottom.get_spine_transform()\n    elif which == 'tick2':\n        return self.spines.top.get_spine_transform()\n    else:\n        raise ValueError(f'unknown value for which: {which!r}')",
    "docstring": "Get the transformation used for drawing x-axis labels, ticks and gridlines. The x-direction is in data coordinates and the y-direction is in axis coordinates. .. note:: This transformation is primarily used by the class, and is meant to be overridden by new kinds of projections that may need to place axis elements in different locations. Parameters ---------- which : {'grid', 'tick1', 'tick2'}",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:get_xaxis_transform arg:self arg:which arguments arg arg If Compare Return return:yes If Compare Return return:yes Call If Compare Return return:yes Call Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict_proba",
    "source_code": "def predict_proba(self, X):\n    check_is_fitted(self)\n    X = self._validate_X_predict(X)\n    n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)\n    all_proba = [np.zeros((X.shape[0], j), dtype=np.float64) for j in np.atleast_1d(self.n_classes_)]\n    lock = threading.Lock()\n    Parallel(n_jobs=n_jobs, verbose=self.verbose, require='sharedmem')((delayed(_accumulate_prediction)(e.predict_proba, X, all_proba, lock) for e in self.estimators_))\n    for proba in all_proba:\n        proba /= len(self.estimators_)\n    if len(all_proba) == 1:\n        return all_proba[0]\n    else:\n        return all_proba",
    "docstring": "Predict class probabilities for X. The predicted class probabilities of an input sample are computed as the mean predicted class probabilities of the trees in the forest. The class probability of a single tree is the fraction of samples of the same class in a leaf. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, its dtype will be converted to `classes_`.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_forest.py",
    "ast_data": "FunctionDef name:predict_proba arg:self arg:X arguments arg arg Call Assign Call Assign Call Assign Call Call Assign Call Call Call Call Call For Call If Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "fake_dataset_fn",
    "source_code": "def fake_dataset_fn(unused):\n    del unused\n\n    def make_dataset(time_us, num_elements):\n        dataset = dataset_ops.Dataset.range(num_elements)\n        if time_us > 0:\n            dataset = dataset.apply(testing.sleep(time_us))\n        return dataset\n    if not initial_delay_us:\n        return make_dataset(remainder_delay_us, 100)\n    return make_dataset(initial_delay_us, 0).concatenate(make_dataset(remainder_delay_us, 100))",
    "docstring": "Returns a function that creates a dataset with the specified delays.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\benchmarks\\interleave_benchmark.py",
    "ast_data": "FunctionDef name:fake_dataset_fn arg:unused arguments arg FunctionDef name:make_dataset arg:time_us arg:num_elements arguments arg arg Assign Call If Compare Assign Call Call Return return:yes If Return return:yes Call Return return:yes Call Call Call"
  },
  {
    "library": "kornia",
    "name": "scalar",
    "source_code": "@property\ndef scalar(self) -> Tensor:\n    return self.real",
    "docstring": "Return a scalar with the real with shape :math:. Alias for :func:",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\quaternion.py",
    "ast_data": "FunctionDef name:scalar arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "create_node",
    "source_code": "@compatibility(is_backward_compatible=True)\ndef create_node(self, kind: str, target: Target, args: tuple[Argument, ...], kwargs: dict[str, Argument], name: Optional[str]=None, type_expr: Optional[Any]=None) -> Node:\n    if kind == 'call_function' and self.check_mutable_operations:\n        check_for_mutable_operation(target, args, kwargs)\n    node = self.graph.create_node(kind, target, args, kwargs, name, type_expr)\n    self.node_name_to_scope[node.name] = (self.scope.module_path, self.scope.module_type)\n    if fx_traceback.has_preserved_node_meta():\n        current_meta: dict[str, Any] = fx_traceback.get_current_meta()\n        stack_trace = current_meta.get('stack_trace')\n        if stack_trace:\n            node.stack_trace = stack_trace\n        for field in _COPY_META_FIELDS:\n            if field in current_meta:\n                node.meta[field] = copy.copy(current_meta[field])\n        new_seq_nr = torch.autograd._get_sequence_nr() - 1\n        if current_meta.get('in_grad_fn', 0) > 0:\n            new_seq_nr = current_meta['grad_fn_seq_nr'][-1]\n        node.meta['seq_nr'] = new_seq_nr\n    elif self.module_stack:\n        node.meta['nn_module_stack'] = copy.copy(self.module_stack)\n    log.debug('create_node %s', node)\n    return node",
    "docstring": "Inserts a graph node given target, args, kwargs, and name. This method can be overridden to do extra checking, validation, or modification of values used in node creation. For example, one might want to disallow in-place operations from being recorded.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\proxy.py",
    "ast_data": "FunctionDef name:create_node arg:self arg:kind arg:target arg:args arg:kwargs arg:name arg:type_expr arguments arg arg arg arg arg arg arg If BoolOp Compare Call Assign Call Assign If Call Call Assign Call If Assign For If Compare Assign Call Assign Call If Compare Call Assign Assign If Assign Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_transform_rotates_text",
    "source_code": "def set_transform_rotates_text(self, t):\n    self._transform_rotates_text = t\n    self.stale = True",
    "docstring": "Whether rotations of the transform affect the text direction. Parameters ---------- t : bool",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:set_transform_rotates_text arg:self arg:t arguments arg arg Assign Assign"
  },
  {
    "library": "django",
    "name": "get_tag_uri",
    "source_code": "def get_tag_uri(url, date):\n    bits = urlparse(url)\n    d = ''\n    if date is not None:\n        d = ',%s' % date.strftime('%Y-%m-%d')\n    return 'tag:%s%s:%s/%s' % (bits.hostname, d, bits.path, bits.fragment)",
    "docstring": "Create a TagURI. See",
    "type": "function",
    "file_path": "django\\django\\utils\\feedgenerator.py",
    "ast_data": "FunctionDef name:get_tag_uri arg:url arg:date arguments arg arg Assign Call Assign If Compare Assign Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "prepare_revoke_token_request",
    "source_code": "def prepare_revoke_token_request(token, token_type_hint=None, body=None, headers=None):\n    params = [('token', token)]\n    if token_type_hint:\n        params.append(('token_type_hint', token_type_hint))\n    body = add_params_to_qs(body or '', params)\n    if headers is None:\n        headers = {}\n    headers['Content-Type'] = 'application/x-www-form-urlencoded'\n    return (body, headers)",
    "docstring": "Construct request body and headers for revocation endpoint. :param token: access_token or refresh_token string. :param token_type_hint: Optional, or . :param body: current request body. :param headers: current request headers. :return: tuple of (body, headers)",
    "type": "function",
    "file_path": "authlib\\authlib\\oauth2\\rfc7009\\parameters.py",
    "ast_data": "FunctionDef name:prepare_revoke_token_request arg:token arg:token_type_hint arg:body arg:headers arguments arg arg arg arg Assign If Call Assign Call BoolOp If Compare Assign Assign Return return:yes"
  },
  {
    "library": "virtualenv",
    "name": "activators",
    "source_code": "@property\ndef activators(self):\n    return self._activators",
    "docstring": "Activators used to generate activations scripts.",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\run\\session.py",
    "ast_data": "FunctionDef name:activators arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "count",
    "source_code": "@cached_property\ndef count(self):\n    c = getattr(self.object_list, 'count', None)\n    if callable(c) and (not inspect.isbuiltin(c)) and method_has_no_args(c):\n        return c()\n    return len(self.object_list)",
    "docstring": "Return the total number of objects, across all pages.",
    "type": "method",
    "file_path": "django\\django\\core\\paginator.py",
    "ast_data": "FunctionDef name:count arg:self arguments arg Assign Call If BoolOp Call Call Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "sampled_surface",
    "source_code": "def sampled_surface(self, infty_cons_sampl=False):\n    if self.disp:\n        logging.info('Generating sampling points')\n    self.sampling(self.nc, self.dim)\n    if len(self.LMC.xl_maps) > 0:\n        self.C = np.vstack((self.C, np.array(self.LMC.xl_maps)))\n    if not infty_cons_sampl:\n        if self.g_cons is not None:\n            self.sampling_subspace()\n    self.sorted_samples()\n    self.n_sampled = self.nc",
    "docstring": "Sample the function surface. There are 2 modes, if `` is False, then the infeasible points are discarded and only a subspace of the sampled points are used. This comes at the cost of the loss of guaranteed convergence and usually requires more objective function evaluations.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_shgo.py",
    "ast_data": "FunctionDef name:sampled_surface arg:self arg:infty_cons_sampl arguments arg arg If Call Call If Compare Call Assign Call Call If If Compare Call Call Assign"
  },
  {
    "library": "pandas",
    "name": "_validate_positional_slice",
    "source_code": "@final\ndef _validate_positional_slice(self, key: slice) -> None:\n    self._validate_indexer('positional', key.start, 'iloc')\n    self._validate_indexer('positional', key.stop, 'iloc')\n    self._validate_indexer('positional', key.step, 'iloc')",
    "docstring": "For positional indexing, a slice must have either int or None for each of start, stop, and step.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_validate_positional_slice arg:self arg:key arguments arg arg Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "resume",
    "source_code": "def resume(self):\n    self.event_source.start()\n    if self._blit:\n        for artist in self._drawn_artists:\n            artist.set_animated(True)",
    "docstring": "Resume the animation.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\animation.py",
    "ast_data": "FunctionDef name:resume arg:self arguments arg Call If For Call"
  },
  {
    "library": "pytorch",
    "name": "quantize_helper",
    "source_code": "def quantize_helper(g: jit_utils.GraphContext, tensor: _C.Value, scale: _C.Value, zero_point: _C.Value, axis: _C.Value | None=None) -> _C.Value:\n    if axis is not None and (not _is_none(axis)) and (GLOBALS.export_onnx_opset_version < 13):\n        _onnx_opset_unsupported_detailed('QuantizeLinear', GLOBALS.export_onnx_opset_version, 13, 'Attribute axis is not supported.', tensor)\n    assert scale is not None\n    if _type_utils.JitScalarType.from_value(scale, _type_utils.JitScalarType.UNDEFINED) != _type_utils.JitScalarType.FLOAT:\n        scale = g.op('Cast', scale, to_i=_C_onnx.TensorProtoDataType.FLOAT)\n    assert zero_point is not None\n    if _type_utils.JitScalarType.from_value(zero_point, _type_utils.JitScalarType.UNDEFINED) not in {_type_utils.JitScalarType.UINT8, _type_utils.JitScalarType.INT8}:\n        zero_point = g.op('Cast', zero_point, to_i=_C_onnx.TensorProtoDataType.UINT8)\n    output = g.op('QuantizeLinear', tensor, scale, zero_point, axis_i=_get_const(axis, 'i', 'axis'))\n    args = [output, scale, zero_point]\n    if axis is not None and (not _is_none(axis)):\n        args.append(axis)\n    return g.op('prim::TupleConstruct', *args)",
    "docstring": "Appends to graph ONNX nodes that quantizes based on , and . Args: g: Graph, the ONNX IR graph that is under construction. tensor: torch._C.Value, representing the tensor to be quantized. scale: torch._C.Value, quantized scale. zero_point: torch._C.Value, quantized zero point. axis: Optional[torch._C.Value] default None, if None, represents per tensor quantization. Otherwise, represents per channel quantization, along given axis. Returns: A TupleConstruct storing information of the quantized tensor.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\symbolic_helper.py",
    "ast_data": "FunctionDef name:quantize_helper arg:g arg:tensor arg:scale arg:zero_point arg:axis arguments arg arg arg arg arg If BoolOp Compare Call Compare Call Compare If Compare Call Assign Call Compare If Compare Call Assign Call Assign Call Call Assign If BoolOp Compare Call Call Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "load_pre_crawler_settings",
    "source_code": "@classmethod\ndef load_pre_crawler_settings(cls, settings: BaseSettings):\n    for clspath in build_component_list(settings['ADDONS']):\n        addoncls = load_object(clspath)\n        if hasattr(addoncls, 'update_pre_crawler_settings'):\n            addoncls.update_pre_crawler_settings(settings)",
    "docstring": "Update early settings that do not require a crawler instance, such as SPIDER_MODULES. Similar to the load_settings method, this loads each add-on configured in the `~scrapy.settings.BaseSettings~scrapy.settings.Settings`",
    "type": "method",
    "file_path": "scrapy\\scrapy\\addons.py",
    "ast_data": "FunctionDef name:load_pre_crawler_settings arg:cls arg:settings arguments arg arg For Call Assign Call If Call Call"
  },
  {
    "library": "matplotlib",
    "name": "connectors",
    "source_code": "@property\ndef connectors(self):\n    if self._inset_ax is None:\n        return\n    if self._auto_update_bounds:\n        self._rectangle.set_bounds(self._bounds_from_inset_ax())\n    self._update_connectors()\n    return tuple(self._connectors)",
    "docstring": "4-tuple of or None The four connector lines connecting to (lower_left, upper_left, lower_right upper_right) corners of *inset_ax*. Two lines are set with visibility to *False*, but the user can set the visibility to True if the automatic choice is not deemed correct.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\inset.py",
    "ast_data": "FunctionDef name:connectors arg:self arguments arg If Compare Return return:no If Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "is_structseq",
    "source_code": "def is_structseq(obj: Union[object, type]) -> bool:\n    cls = obj if isinstance(obj, type) else type(obj)\n    return is_structseq_class(cls)",
    "docstring": "Return whether the object is an instance of PyStructSequence or a class of PyStructSequence.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_pytree.py",
    "ast_data": "FunctionDef name:is_structseq arg:obj arguments arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "numpy_method_wrapper",
    "source_code": "class numpy_method_wrapper:\n\n    def __init__(self, method: str):\n        self.method = method\n        self.__name__ = 'wrapped_' + self.method\n\n    def __repr__(self) -> str:\n        return f'<Wrapped method <original {self.method}>>'\n\n    def __call__(self, *args, **kwargs):\n        obj = args[0]\n        if isinstance(obj, torch.Tensor):\n            obj = tnp.ndarray(obj)\n        method_callable = getattr(obj, self.method)\n        out = method_callable(*args[1:], **kwargs)\n        return numpy_to_tensor(out)",
    "docstring": "Convert obj from torch.Tensor to tnp.ndarray and call method. Then convert result back to torch.Tensor.",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\utils.py",
    "ast_data": "ClassDef name:numpy_method_wrapper FunctionDef name:__init__ arg:self arg:method arguments arg arg Assign Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:__call__ arg:self arguments arg arg arg Assign If Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__get_cmp_key",
    "source_code": "def __get_cmp_key(self):\n    if not hasattr(self, _CACHED_CMP_KEY):\n        setattr(self, _CACHED_CMP_KEY, (type(self), self.__make_cmp_key(self._serialize())))\n    return getattr(self, _CACHED_CMP_KEY)",
    "docstring": "Returns a hashable eq-comparable key for .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py",
    "ast_data": "FunctionDef name:__get_cmp_key arg:self arguments arg If Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "invalid_error_characters",
    "source_code": "def invalid_error_characters(text: str) -> list[str]:\n    valid_ranges = [(32, 33), (35, 91), (93, 126)]\n    return [char for char in set(text) if not any((start <= ord(char) <= end for start, end in valid_ranges))]",
    "docstring": "Check whether the string only contains characters from the restricted ASCII set defined in RFC6749 for errors.",
    "type": "function",
    "file_path": "authlib\\authlib\\oauth2\\base.py",
    "ast_data": "FunctionDef name:invalid_error_characters arg:text arguments arg Assign Return return:yes Call Call Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "_legacy_contrib_should_record_summaries",
    "source_code": "def _legacy_contrib_should_record_summaries():\n    return _should_record_summaries_internal(default_state=False)",
    "docstring": "Returns boolean Tensor which is true if summaries should be recorded.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "FunctionDef name:_legacy_contrib_should_record_summaries arguments Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "__new__",
    "source_code": "def __new__(cls, *system, **kwargs):\n    if cls is dlti:\n        N = len(system)\n        if N == 2:\n            return TransferFunctionDiscrete.__new__(TransferFunctionDiscrete, *system, **kwargs)\n        elif N == 3:\n            return ZerosPolesGainDiscrete.__new__(ZerosPolesGainDiscrete, *system, **kwargs)\n        elif N == 4:\n            return StateSpaceDiscrete.__new__(StateSpaceDiscrete, *system, **kwargs)\n        else:\n            raise ValueError('`system` needs to be an instance of `dlti` or have 2, 3 or 4 arguments.')\n    return super().__new__(cls)",
    "docstring": "Create an instance of the appropriate subclass.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:__new__ arg:cls arguments arg arg arg If Compare Assign Call If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call Raise Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "has_changed",
    "source_code": "def has_changed(self):\n    return bool(self.changed_data)",
    "docstring": "Return True if data differs from initial.",
    "type": "method",
    "file_path": "django\\django\\forms\\forms.py",
    "ast_data": "FunctionDef name:has_changed arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_infer_var_name",
    "source_code": "def _infer_var_name(var):\n    name_to_var_dict = saveable_object_util.op_list_to_dict(var)\n    if len(name_to_var_dict) > 1:\n        raise TypeError('`var` = %s passed as arg violates the constraints.  name_to_var_dict = %s' % (var, name_to_var_dict))\n    return list(name_to_var_dict.keys())[0]",
    "docstring": "Returns name of the . Args: var: A list. The list can contain either of the following: (i) A single (ii) A single (iii) Multiple objects which must be slices of the same larger variable. (iv) A single Returns: Name of the",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\warm_starting_util.py",
    "ast_data": "FunctionDef name:_infer_var_name arg:var arguments arg Assign Call If Compare Call Raise Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_SymHashingDict",
    "source_code": "class _SymHashingDict:\n\n    def __init__(self):\n        self.sym_hash_dict = {}\n\n    def __setitem__(self, key, value):\n        self.sym_hash_dict.__setitem__(self._wrap_to_sym_expr_hash(key), value)\n\n    def __getitem__(self, key):\n        return self.sym_hash_dict[self._wrap_to_sym_expr_hash(key)]\n\n    def __contains__(self, key):\n        return self._wrap_to_sym_expr_hash(key) in self.sym_hash_dict\n\n    def get(self, key, default=None):\n        return self.sym_hash_dict.get(self._wrap_to_sym_expr_hash(key), default)\n\n    def _wrap_to_sym_expr_hash(self, key):\n        return _SymExprHash(key) if isinstance(key, py_sym_types) else key",
    "docstring": "Wrapper around a dictionary that will convert sym types to hash with _SymExprHash and reuse existing sym proxies. SymPy hash is not always reliable so optimistically hash sympy expression, and if those fail, fallback to symnodes.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\dedupe_symint_uses.py",
    "ast_data": "ClassDef name:_SymHashingDict FunctionDef name:__init__ arg:self arguments arg Assign FunctionDef name:__setitem__ arg:self arg:key arg:value arguments arg arg arg Call Call FunctionDef name:__getitem__ arg:self arg:key arguments arg arg Return return:yes Call FunctionDef name:__contains__ arg:self arg:key arguments arg arg Return return:yes Compare Call FunctionDef name:get arg:self arg:key arg:default arguments arg arg arg Return return:yes Call Call FunctionDef name:_wrap_to_sym_expr_hash arg:self arg:key arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_gen_harmonic_gt1",
    "source_code": "def _gen_harmonic_gt1(n, a):\n    return zeta(a, 1) - zeta(a, n + 1)",
    "docstring": "Generalized harmonic number, a > 1",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_discrete_distns.py",
    "ast_data": "FunctionDef name:_gen_harmonic_gt1 arg:n arg:a arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "metal_capture",
    "source_code": "@contextlib.contextmanager\ndef metal_capture(fname: str):\n    try:\n        torch._C._mps_startCapture(fname)\n        yield\n        torch.mps.synchronize()\n    finally:\n        torch._C._mps_stopCapture()",
    "docstring": "Conext manager that enables capturing of Metal calls into gputrace",
    "type": "function",
    "file_path": "pytorch\\torch\\mps\\profiler.py",
    "ast_data": "FunctionDef name:metal_capture arg:fname arguments arg Try Call Call Call"
  },
  {
    "library": "numpy",
    "name": "__RandomState_ctor",
    "source_code": "def __RandomState_ctor():\n    return RandomState(seed=0)",
    "docstring": "Return a RandomState instance. This function exists solely to assist (un)pickling. Note that the state of the RandomState returned here is irrelevant, as this function's entire purpose is to return a newly allocated RandomState whose state pickle can set. Consequently the RandomState returned by this function is a freshly allocated copy with a seed=0. See for a detailed discussion",
    "type": "function",
    "file_path": "numpy\\numpy\\random\\__init__.py",
    "ast_data": "FunctionDef name:__RandomState_ctor arguments Return return:yes Call"
  },
  {
    "library": "django",
    "name": "apply_limit_choices_to_to_formfield",
    "source_code": "def apply_limit_choices_to_to_formfield(formfield):\n    from django.db.models import Exists, OuterRef, Q\n    if hasattr(formfield, 'queryset') and hasattr(formfield, 'get_limit_choices_to'):\n        limit_choices_to = formfield.get_limit_choices_to()\n        if limit_choices_to:\n            complex_filter = limit_choices_to\n            if not isinstance(complex_filter, Q):\n                complex_filter = Q(**limit_choices_to)\n            complex_filter &= Q(pk=OuterRef('pk'))\n            formfield.queryset = formfield.queryset.filter(Exists(formfield.queryset.model._base_manager.filter(complex_filter)))",
    "docstring": "Apply limit_choices_to to the formfield's queryset if needed.",
    "type": "function",
    "file_path": "django\\django\\forms\\models.py",
    "ast_data": "FunctionDef name:apply_limit_choices_to_to_formfield arg:formfield arguments arg If BoolOp Call Call Assign Call If Assign If Call Assign Call Call Call Assign Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_process_parameters",
    "source_code": "def _process_parameters(self, mu, kappa):\n    mu = np.asarray(mu)\n    if mu.ndim > 1:\n        raise ValueError(\"'mu' must have one-dimensional shape.\")\n    if not np.allclose(np.linalg.norm(mu), 1.0):\n        raise ValueError(\"'mu' must be a unit vector of norm 1.\")\n    if not mu.size > 1:\n        raise ValueError(\"'mu' must have at least two entries.\")\n    kappa_error_msg = \"'kappa' must be a positive scalar.\"\n    if not np.isscalar(kappa) or kappa < 0:\n        raise ValueError(kappa_error_msg)\n    if float(kappa) == 0.0:\n        raise ValueError(\"For 'kappa=0' the von Mises-Fisher distribution becomes the uniform distribution on the sphere surface. Consider using 'scipy.stats.uniform_direction' instead.\")\n    dim = mu.size\n    return (dim, mu, kappa)",
    "docstring": "Infer dimensionality from mu and ensure that mu is a one-dimensional unit vector and kappa positive.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:_process_parameters arg:self arg:mu arg:kappa arguments arg arg arg Assign Call If Compare Raise Call If Call Call Raise Call If Compare Raise Call Assign If BoolOp Call Compare Raise Call If Compare Call Raise Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "clean_username",
    "source_code": "def clean_username(self):\n    username = self.cleaned_data.get('username')\n    if username and self._meta.model.objects.filter(username__iexact=username).exists():\n        self._update_errors(ValidationError({'username': self.instance.unique_error_message(self._meta.model, ['username'])}))\n    else:\n        return username",
    "docstring": "Reject usernames that differ only in case.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\forms.py",
    "ast_data": "FunctionDef name:clean_username arg:self arguments arg Assign Call If BoolOp Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_ConjGrad",
    "source_code": "@ops.RegisterGradient('Conj')\ndef _ConjGrad(_, grad):\n    return math_ops.conj(grad)",
    "docstring": "Returns the complex conjugate of grad.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_ConjGrad arg:_ arg:grad arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "_set_srs",
    "source_code": "def _set_srs(self, srs):\n    if isinstance(srs, SpatialReference):\n        srs_ptr = srs.ptr\n    elif isinstance(srs, (int, str)):\n        sr = SpatialReference(srs)\n        srs_ptr = sr.ptr\n    elif srs is None:\n        srs_ptr = None\n    else:\n        raise TypeError('Cannot assign spatial reference with object of type: %s' % type(srs))\n    capi.assign_srs(self.ptr, srs_ptr)",
    "docstring": "Set the SpatialReference for this geometry.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:_set_srs arg:self arg:srs arguments arg arg If Call Assign If Call Assign Call Assign If Compare Assign Raise Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_is_shape_and_default_value_compatible",
    "source_code": "def _is_shape_and_default_value_compatible(default_value, shape):\n    if nest.is_nested(default_value) != bool(shape):\n        return False\n    if not shape:\n        return True\n    if len(default_value) != shape[0]:\n        return False\n    for i in range(shape[0]):\n        if not _is_shape_and_default_value_compatible(default_value[i], shape[1:]):\n            return False\n    return True",
    "docstring": "Verifies compatibility of shape and default_value.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\utils.py",
    "ast_data": "FunctionDef name:_is_shape_and_default_value_compatible arg:default_value arg:shape arguments arg arg If Compare Call Call Return return:yes If Return return:yes If Compare Call Return return:yes For Call If Call Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "get_fields_from_path",
    "source_code": "def get_fields_from_path(model, path):\n    pieces = path.split(LOOKUP_SEP)\n    fields = []\n    for piece in pieces:\n        if fields:\n            parent = get_model_from_relation(fields[-1])\n        else:\n            parent = model\n        fields.append(parent._meta.get_field(piece))\n    return fields",
    "docstring": "Return list of Fields given path relative to model. e.g. (ModelX, \"user__groups__name\") -> [ , , , ]",
    "type": "function",
    "file_path": "django\\django\\contrib\\admin\\utils.py",
    "ast_data": "FunctionDef name:get_fields_from_path arg:model arg:path arguments arg arg Assign Call Assign For If Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "pretty_wkt",
    "source_code": "@property\ndef pretty_wkt(self, simplify=0):\n    return capi.to_pretty_wkt(self.ptr, byref(c_char_p()), simplify)",
    "docstring": "Return the 'pretty' representation of the WKT.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\srs.py",
    "ast_data": "FunctionDef name:pretty_wkt arg:self arg:simplify arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "validate_thread_sharing",
    "source_code": "def validate_thread_sharing(self):\n    if not (self.allow_thread_sharing or self._thread_ident == _thread.get_ident()):\n        raise DatabaseError(\"DatabaseWrapper objects created in a thread can only be used in that same thread. The object with alias '%s' was created in thread id %s and this is thread id %s.\" % (self.alias, self._thread_ident, _thread.get_ident()))",
    "docstring": "Validate that the connection isn't accessed by another thread than the one which originally created it, unless the connection was explicitly authorized to be shared between threads (via the method). Raise an exception if the validation fails.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\base.py",
    "ast_data": "FunctionDef name:validate_thread_sharing arg:self arguments arg If BoolOp Compare Call Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "strategy_supports_no_merge_call",
    "source_code": "@tf_export('__internal__.distribute.strategy_supports_no_merge_call', v1=[])\ndef strategy_supports_no_merge_call():\n    if not distribute_lib.has_strategy():\n        return True\n    strategy = distribute_lib.get_strategy()\n    return not strategy.extended._use_merge_call()",
    "docstring": "Returns if the current can operate in pure replica context.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\merge_call_interim.py",
    "ast_data": "FunctionDef name:strategy_supports_no_merge_call arguments If Call Return return:yes Assign Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "read_style_directory",
    "source_code": "def read_style_directory(style_dir):\n    styles = dict()\n    for path in Path(style_dir).glob(f'*.{STYLE_EXTENSION}'):\n        with warnings.catch_warnings(record=True) as warns:\n            styles[path.stem] = _rc_params_in_file(path)\n        for w in warns:\n            _log.warning('In %s: %s', path, w.message)\n    return styles",
    "docstring": "Return dictionary of styles defined in *style_dir*.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\style\\core.py",
    "ast_data": "FunctionDef name:read_style_directory arg:style_dir arguments arg Assign Call For Call Call With Call Assign Call For Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_get_fields_and_offsets",
    "source_code": "def _get_fields_and_offsets(dt, offset=0):\n\n    def count_elem(dt):\n        count = 1\n        while dt.shape != ():\n            for size in dt.shape:\n                count *= size\n            dt = dt.base\n        return (dt, count)\n    fields = []\n    for name in dt.names:\n        field = dt.fields[name]\n        f_dt, f_offset = (field[0], field[1])\n        f_dt, n = count_elem(f_dt)\n        if f_dt.names is None:\n            fields.append((np.dtype((f_dt, (n,))), n, f_offset + offset))\n        else:\n            subfields = _get_fields_and_offsets(f_dt, f_offset + offset)\n            size = f_dt.itemsize\n            for i in range(n):\n                if i == 0:\n                    fields.extend(subfields)\n                else:\n                    fields.extend([(d, c, o + i * size) for d, c, o in subfields])\n    return fields",
    "docstring": "Returns a flat list of (dtype, count, offset) tuples of all the scalar fields in the dtype \"dt\", including nested fields, in left to right order.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\recfunctions.py",
    "ast_data": "FunctionDef name:_get_fields_and_offsets arg:dt arg:offset arguments arg arg FunctionDef name:count_elem arg:dt arguments arg Assign While Compare For Assign Return return:yes Assign For Assign Assign Assign Call If Compare Call Call Assign Call Assign For Call If Compare Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "ion",
    "source_code": "def ion() -> AbstractContextManager:\n    stack = ExitStack()\n    stack.callback(ion if isinteractive() else ioff)\n    matplotlib.interactive(True)\n    install_repl_displayhook()\n    return stack",
    "docstring": "Enable interactive mode. See for more details. See Also -------- ioff : Disable interactive mode. isinteractive : Whether interactive mode is enabled. show : Show all figures (and maybe block). pause : Show all figures, and block for a time. Notes ----- For a temporary change, this can be used as a context manager:: # if interactive mode is off # then figures will not be shown on creation plt.ioff() # This figure will not be shown immediately fig = plt.figure() with plt.ion(): # interactive mode will be on # figures will automatically be shown fig2 = plt.figure() # ... To enable optional usage as a context manager, this function returns a context manager object, which is not intended to be stored or accessed by the user.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:ion arguments Assign Call Call Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "f1",
    "source_code": "def f1(x):\n    return x * (x - 1.0)",
    "docstring": "f1 is a quadratic with roots at 0 and 1",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_tstutils.py",
    "ast_data": "FunctionDef name:f1 arg:x arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "PostGISGeometryColumns",
    "source_code": "class PostGISGeometryColumns(models.Model):\n    f_table_catalog = models.CharField(max_length=256)\n    f_table_schema = models.CharField(max_length=256)\n    f_table_name = models.CharField(max_length=256)\n    f_geometry_column = models.CharField(max_length=256)\n    coord_dimension = models.IntegerField()\n    srid = models.IntegerField(primary_key=True)\n    type = models.CharField(max_length=30)\n\n    class Meta:\n        app_label = 'gis'\n        db_table = 'geometry_columns'\n        managed = False\n\n    def __str__(self):\n        return '%s.%s - %dD %s field (SRID: %d)' % (self.f_table_name, self.f_geometry_column, self.coord_dimension, self.type, self.srid)\n\n    @classmethod\n    def table_name_col(cls):\n        return 'f_table_name'\n\n    @classmethod\n    def geom_col_name(cls):\n        return 'f_geometry_column'",
    "docstring": "The 'geometry_columns' view from PostGIS. See the PostGIS documentation at Ch. 4.3.2.",
    "type": "class",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\postgis\\models.py",
    "ast_data": "ClassDef name:PostGISGeometryColumns Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call ClassDef name:Meta Assign Assign Assign FunctionDef name:__str__ arg:self arguments arg Return return:yes FunctionDef name:table_name_col arg:cls arguments arg Return return:yes FunctionDef name:geom_col_name arg:cls arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "to_raw_op",
    "source_code": "def to_raw_op(f: types.FunctionType) -> Callable[..., Any]:\n    f = types.FunctionType(f.__code__, f.__globals__, f.__name__, f.__defaults__, f.__closure__)\n    return kwarg_only(f)",
    "docstring": "Make a given op wrapper function raw. Raw op wrappers can only be called with keyword arguments. Args: f: An op wrapper function to make raw. Returns: Raw .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:to_raw_op arg:f arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "byte_swap_string_content",
    "source_code": "def byte_swap_string_content(buffer, from_endiness, to_endiness):\n    num_of_strings = int.from_bytes(buffer.data[0:4], from_endiness)\n    string_content = bytearray(buffer.data[4 * (num_of_strings + 2):])\n    prefix_data = b''.join([int.from_bytes(buffer.data[i:i + 4], from_endiness).to_bytes(4, to_endiness) for i in range(0, (num_of_strings + 1) * 4 + 1, 4)])\n    buffer.data = prefix_data + string_content",
    "docstring": "Helper function for byte-swapping the string buffer. Args: buffer: TFLite string buffer of from_endiness format. from_endiness: The original endianness format of the string buffer. to_endiness: The destined endianness format of the string buffer.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\tools\\flatbuffer_utils.py",
    "ast_data": "FunctionDef name:byte_swap_string_content arg:buffer arg:from_endiness arg:to_endiness arguments arg arg arg Assign Call Assign Call Assign Call Call Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "can_device_access_peer",
    "source_code": "def can_device_access_peer(device: _device_t, peer_device: _device_t) -> bool:\n    _lazy_init()\n    device = _get_device_index(device, optional=True)\n    peer_device = _get_device_index(peer_device)\n    if device < 0 or device >= device_count():\n        raise AssertionError('Invalid device id')\n    if peer_device < 0 or peer_device >= device_count():\n        raise AssertionError('Invalid peer device id')\n    return torch._C._cuda_canDeviceAccessPeer(device, peer_device)",
    "docstring": "Check if peer access between two devices is possible.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:can_device_access_peer arg:device arg:peer_device arguments arg arg Call Assign Call Assign Call If BoolOp Compare Compare Call Raise Call If BoolOp Compare Compare Call Raise Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "__iadd__",
    "source_code": "def __iadd__(self, other):\n    self.extend(other)\n    return self",
    "docstring": "add another list-like object to self",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\mutable_list.py",
    "ast_data": "FunctionDef name:__iadd__ arg:self arg:other arguments arg arg Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "tile_one_dimension",
    "source_code": "def tile_one_dimension(data, axis, multiple):\n    if data.shape.ndims is not None:\n        multiples = [1] * data.shape.ndims\n        multiples[axis] = multiple\n    else:\n        ones_value = ones(rank(data), dtypes.int32)\n        multiples = concat([ones_value[:axis], [multiple], ones_value[axis + 1:]], axis=0)\n    return tile(data, multiples)",
    "docstring": "Tiles a single dimension of a tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:tile_one_dimension arg:data arg:axis arg:multiple arguments arg arg arg If Compare Assign Assign Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "legvander",
    "source_code": "def legvander(x, deg):\n    ideg = pu._as_int(deg, 'deg')\n    if ideg < 0:\n        raise ValueError('deg must be non-negative')\n    x = np.array(x, copy=None, ndmin=1) + 0.0\n    dims = (ideg + 1,) + x.shape\n    dtyp = x.dtype\n    v = np.empty(dims, dtype=dtyp)\n    v[0] = x * 0 + 1\n    if ideg > 0:\n        v[1] = x\n        for i in range(2, ideg + 1):\n            v[i] = (v[i - 1] * x * (2 * i - 1) - v[i - 2] * (i - 1)) / i\n    return np.moveaxis(v, 0, -1)",
    "docstring": "Pseudo-Vandermonde matrix of given degree. Returns the pseudo-Vandermonde matrix of degree and sample points . The pseudo-Vandermonde matrix is defined by .. math:: V[..., i] = L_i(x) where `VxcVxx`.",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\legendre.py",
    "ast_data": "FunctionDef name:legvander arg:x arg:deg arguments arg arg Assign Call If Compare Raise Call Assign Call Assign Assign Assign Call Assign If Compare Assign For Call Assign Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "squared_distance",
    "source_code": "def squared_distance(self, point: Tensor) -> Tensor:\n    diff: Tensor = point - self.origin\n    return squared_norm(diff - self.direction @ diff * self.direction)",
    "docstring": "Return the squared distance of a point to its projection onte the line. Args: point: the point to calculate the distance onto the line.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\line.py",
    "ast_data": "FunctionDef name:squared_distance arg:self arg:point arguments arg arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "FunctionHtmlFormatter",
    "source_code": "class FunctionHtmlFormatter(HtmlFormatter):\n\n    def __init__(self, lines, **kwargs):\n        HtmlFormatter.__init__(self, **kwargs)\n        self.lines = lines\n\n    def wrap(self, source, outfile):\n        for i, (c, t) in enumerate(HtmlFormatter.wrap(self, source, outfile)):\n            as_functions = self.lines.get(i - 1, None)\n            if as_functions is not None:\n                yield (0, '<div title=%s style=\"background: #ccffcc\">[%2d]' % (quoteattr('as ' + ', '.join(as_functions)), len(as_functions)))\n            else:\n                yield (0, '    ')\n            yield (c, t)\n            if as_functions is not None:\n                yield (0, '</div>')",
    "docstring": "Custom HTML formatter to insert extra information with the lines.",
    "type": "class",
    "file_path": "numpy\\tools\\c_coverage\\c_coverage_report.py",
    "ast_data": "ClassDef name:FunctionHtmlFormatter FunctionDef name:__init__ arg:self arg:lines arguments arg arg arg Call Assign FunctionDef name:wrap arg:self arg:source arg:outfile arguments arg arg arg For Call Call Assign Call If Compare Call Call Call If Compare"
  },
  {
    "library": "matplotlib",
    "name": "get_user_transform",
    "source_code": "def get_user_transform(self):\n    if self._user_transform is not None:\n        return self._user_transform.frozen()",
    "docstring": "Return user supplied part of marker transform.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\markers.py",
    "ast_data": "FunctionDef name:get_user_transform arg:self arguments arg If Compare Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_as_graph_element",
    "source_code": "def _as_graph_element(self):\n    return self._variable",
    "docstring": "Conversion function for Graph.as_graph_element().",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py",
    "ast_data": "FunctionDef name:_as_graph_element arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_graph_key",
    "source_code": "@property\ndef _graph_key(self):\n    return self._primary._graph_key",
    "docstring": "Lets Optimizers know which graph this variable is from.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py",
    "ast_data": "FunctionDef name:_graph_key arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "tuple_shapes",
    "source_code": "def tuple_shapes(self):\n    if not self.is_tuple():\n        raise ValueError('tuple_shapes() called on a non-tuple shape')\n    return self._tuple_shapes",
    "docstring": "If this is a tuple, returns its sequence of constituent Shape objects. Returns: Tuple sub-shapes. Raises: ValueError: if this is not a tuple.",
    "type": "method",
    "file_path": "tensorflow\\third_party\\xla\\xla\\python_api\\xla_shape.py",
    "ast_data": "FunctionDef name:tuple_shapes arg:self arguments arg If Call Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "keys",
    "source_code": "def keys(self) -> Iterable[str]:\n    return self._keys.keys()",
    "docstring": "Return an iterable of the ParameterDict keys.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\container.py",
    "ast_data": "FunctionDef name:keys arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "__getitem__",
    "source_code": "def __getitem__(self, indx):\n    _localdict = self.__dict__\n    _mask = np.ndarray.__getattribute__(self, '_mask')\n    _data = np.ndarray.view(self, _localdict['_baseclass'])\n    if isinstance(indx, str):\n        obj = _data[indx].view(ma.MaskedArray)\n        obj._mask = _mask[indx]\n        obj._sharedmask = True\n        fval = _localdict['_fill_value']\n        if fval is not None:\n            obj._fill_value = fval[indx]\n        if not obj.ndim and obj._mask:\n            return ma.masked\n        return obj\n    obj = np.asarray(_data[indx]).view(mrecarray)\n    obj._mask = np.asarray(_mask[indx]).view(np.recarray)\n    return obj",
    "docstring": "Returns all the fields sharing the same fieldname base. The fieldname base is either or .",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\mrecords.py",
    "ast_data": "FunctionDef name:__getitem__ arg:self arg:indx arguments arg arg Assign Assign Call Assign Call If Call Assign Call Assign Assign Assign If Compare Assign If BoolOp Return return:yes Return return:yes Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_bw_hook",
    "source_code": "def _bw_hook(self, mod, output):\n    self.activation_checkpointing = False\n    self.name = super()._get_mod_name(mod)",
    "docstring": "This function is called when the backward pass of a module is called. It updates the current module for backward passes",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\debug\\_comm_mode.py",
    "ast_data": "FunctionDef name:_bw_hook arg:self arg:mod arg:output arguments arg arg arg Assign Assign Call Call"
  },
  {
    "library": "scipy",
    "name": "expm_frechet_kronform",
    "source_code": "def expm_frechet_kronform(A, method=None, check_finite=True):\n    if check_finite:\n        A = np.asarray_chkfinite(A)\n    else:\n        A = np.asarray(A)\n    if len(A.shape) != 2 or A.shape[0] != A.shape[1]:\n        raise ValueError('expected a square matrix')\n    n = A.shape[0]\n    ident = np.identity(n)\n    cols = []\n    for i in range(n):\n        for j in range(n):\n            E = np.outer(ident[i], ident[j])\n            F = expm_frechet(A, E, method=method, compute_expm=False, check_finite=False)\n            cols.append(vec(F))\n    return np.vstack(cols).T",
    "docstring": "Construct the Kronecker form of the Frechet derivative of expm. Parameters ---------- A : array_like with shape (N, N) Matrix to be expm'd. method : str, optional Extra keyword to be passed to expm_frechet. check_finite : bool, optional Whether to check that the input matrix contains only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- K : 2-D ndarray with shape (N*N, N*N) Kronecker form of the Frechet derivative of the matrix exponential. Notes ----- This function is used to help compute the condition number of the matrix exponential. See Also -------- expm : Compute a matrix exponential. expm_frechet : Compute the Frechet derivative of the matrix exponential. expm_cond : Compute the relative condition number of the matrix exponential in the Frobenius norm.",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_expm_frechet.py",
    "ast_data": "FunctionDef name:expm_frechet_kronform arg:A arg:method arg:check_finite arguments arg arg arg If Assign Call Assign Call If BoolOp Compare Call Compare Raise Call Assign Assign Call Assign For Call For Call Assign Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "feature_flags",
    "source_code": "@_Cache.me\ndef feature_flags(self, names):\n    names = self.feature_sorted(self.feature_implies_c(names))\n    flags = []\n    for n in names:\n        d = self.feature_supported[n]\n        f = d.get('flags', [])\n        if not f or not self.cc_test_flags(f):\n            continue\n        flags += f\n    return self.cc_normalize_flags(flags)",
    "docstring": "Return a list of CPU features flags sorted from the lowest to highest interest.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py",
    "ast_data": "FunctionDef name:feature_flags arg:self arg:names arguments arg arg Assign Call Call Assign For Assign Assign Call If BoolOp Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "is_available",
    "source_code": "def is_available() -> bool:\n    return torch._C._has_cusparselt",
    "docstring": "Return a bool indicating if cuSPARSELt is currently available.",
    "type": "function",
    "file_path": "pytorch\\torch\\backends\\cusparselt\\__init__.py",
    "ast_data": "FunctionDef name:is_available arguments Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "VariablePolicy",
    "source_code": "@tf_export('saved_model.experimental.VariablePolicy')\nclass VariablePolicy(enum.Enum):\n    NONE = None\n    SAVE_VARIABLE_DEVICES = 'save_variable_devices'\n    EXPAND_DISTRIBUTED_VARIABLES = 'expand_distributed_variables'\n\n    def _save_variable_devices(self):\n        return self != VariablePolicy.NONE\n\n    def _expand_distributed_variables(self):\n        return self == VariablePolicy.EXPAND_DISTRIBUTED_VARIABLES\n\n    @staticmethod\n    def from_obj(obj):\n        if obj is None:\n            return VariablePolicy.NONE\n        if isinstance(obj, VariablePolicy):\n            return obj\n        key = str(obj).lower()\n        for policy in VariablePolicy:\n            if key == policy.value:\n                return policy\n        raise ValueError(f'Received invalid VariablePolicy value: {obj}.')",
    "docstring": "Enum defining options for variable handling when saving. NONE No policy applied: Distributed variables are saved as one variable, with no device attached. SAVE_VARIABLE_DEVICES When saving variables, also save their device assignment. This is useful if one wants to hardcode devices in saved models, but it also makes them non-portable if soft device placement is disabled (more details in ). This is currently not fully supported by , and is mainly intended to be used when one will be reading the saved model at a lower API level. In the example below, the graph saved by the call to will have the variable devices correctly specified: Distributed variables are still saved as one variable under this policy. EXPAND_DISTRIBUTED_VARIABLES Distributed variables will be saved with information about their components, allowing for their restoration on load. Also, the saved graph will contain references to those variables. This is useful when one wants to use the model for training in environments where the original distribution strategy is not available.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\save_options.py",
    "ast_data": "ClassDef name:VariablePolicy Assign Assign Assign FunctionDef name:_save_variable_devices arg:self arguments arg Return return:yes Compare FunctionDef name:_expand_distributed_variables arg:self arguments arg Return return:yes Compare FunctionDef name:from_obj arg:obj arguments arg If Compare Return return:yes If Call Return return:yes Assign Call Call For If Compare Return return:yes Raise Call Call"
  },
  {
    "library": "authlib",
    "name": "import_key_set",
    "source_code": "@classmethod\ndef import_key_set(cls, raw):\n    raw = _transform_raw_key(raw)\n    if isinstance(raw, dict) and 'keys' in raw:\n        keys = raw.get('keys')\n        return KeySet([cls.import_key(k) for k in keys])\n    raise ValueError('Invalid key set format')",
    "docstring": "Import KeySet from string, dict or a list of keys. :return: KeySet instance",
    "type": "method",
    "file_path": "authlib\\authlib\\jose\\rfc7517\\jwk.py",
    "ast_data": "FunctionDef name:import_key_set arg:cls arg:raw arguments arg arg Assign Call If BoolOp Call Compare Assign Call Return return:yes Call Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_ymask",
    "source_code": "def _ymask(length):\n    return _math_ops.cast(1 - 2 * (_math_ops.range(length) % 2), complex_dtype)",
    "docstring": "A sequence of [1+0j, -1+0j, 1+0j, -1+0j, ...] with length .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\signal\\fft_ops.py",
    "ast_data": "FunctionDef name:_ymask arg:length arguments arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "has_header",
    "source_code": "def has_header(self, header):\n    return header in self.headers",
    "docstring": "Case-insensitive check for a header.",
    "type": "method",
    "file_path": "django\\django\\http\\response.py",
    "ast_data": "FunctionDef name:has_header arg:self arg:header arguments arg arg Return return:yes Compare"
  },
  {
    "library": "pytorch",
    "name": "RendezvousBackend",
    "source_code": "class RendezvousBackend(ABC):\n\n    @property\n    @abstractmethod\n    def name(self) -> str:\n        pass\n\n    @abstractmethod\n    def get_state(self) -> Optional[tuple[bytes, Token]]:\n        pass\n\n    @abstractmethod\n    def set_state(self, state: bytes, token: Optional[Token]=None) -> Optional[tuple[bytes, Token, bool]]:\n        pass",
    "docstring": "Represent a backend that holds the rendezvous state.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\dynamic_rendezvous.py",
    "ast_data": "ClassDef name:RendezvousBackend FunctionDef name:name arg:self arguments arg FunctionDef name:get_state arg:self arguments arg FunctionDef name:set_state arg:self arg:state arg:token arguments arg arg arg"
  },
  {
    "library": "pytorch",
    "name": "_compare_onnx_pytorch_outputs",
    "source_code": "def _compare_onnx_pytorch_outputs(onnx_outs: _OutputsType, pt_outs: Any, options: VerificationOptions):\n    if options.ignore_none:\n        pt_outs, _ = torch.jit._flatten(pt_outs)\n    else:\n        pt_outs = _inline_flatten_list([pt_outs], [])\n    pt_outs_np = _unpack_to_numpy(pt_outs, cast_onnx_accepted=False)\n    onnx_outs = _inline_flatten_list(onnx_outs, [])\n    _compare_onnx_pytorch_outputs_in_np(onnx_outs, pt_outs_np, options)",
    "docstring": "Compare ONNX and PyTorch outputs. Args: onnx_outs: outputs from ONNX backend. pt_outs: outputs from PyTorch. options: options for verification. Raises: AssertionError: if outputs from ONNX model and PyTorch model are not equal up to specified precision. ValueError: if arguments provided are invalid.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\verification.py",
    "ast_data": "FunctionDef name:_compare_onnx_pytorch_outputs arg:onnx_outs arg:pt_outs arg:options arguments arg arg arg If Assign Call Assign Call Assign Call Assign Call Call"
  },
  {
    "library": "numpy",
    "name": "_fromnxfunction_allargs",
    "source_code": "class _fromnxfunction_allargs(_fromnxfunction):\n\n    def __call__(self, *args, **params):\n        func = getattr(np, self.__name__)\n        res = []\n        for x in args:\n            _d = func(np.asarray(x), **params)\n            _m = func(getmaskarray(x), **params)\n            res.append(masked_array(_d, mask=_m))\n        if len(args) == 1:\n            return res[0]\n        return res",
    "docstring": "A version of that is called with multiple array arguments. Similar to except that all args are converted to arrays even if they are not so already. This makes it possible to process scalars as 1-D arrays. Only keyword arguments are passed through verbatim for the data and mask calls. Arrays arguments are processed independently and the results are returned in a list. If only one arg is present, the return value is just the processed array instead of a list.",
    "type": "class",
    "file_path": "numpy\\numpy\\ma\\extras.py",
    "ast_data": "ClassDef name:_fromnxfunction_allargs FunctionDef name:__call__ arg:self arguments arg arg arg Assign Call Assign For Assign Call Call Assign Call Call Call Call If Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "disable_graph_collection",
    "source_code": "def disable_graph_collection(self):\n    if not self._context_handle:\n        return\n    pywrap_tfe.TFE_ContextDisableGraphCollection(self._context_handle)",
    "docstring": "Disables graph collection of executed functions.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:disable_graph_collection arg:self arguments arg If Return return:no Call"
  },
  {
    "library": "scipy",
    "name": "Zirilli",
    "source_code": "class Zirilli(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n        self.custom_bounds = ([-2.0, 2.0], [-2.0, 2.0])\n        self.global_optimum = [[-1.0465, 0.0]]\n        self.fglob = -0.35238603\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return 0.25 * x[0] ** 4 - 0.5 * x[0] ** 2 + 0.1 * x[0] + 0.5 * x[1] ** 2",
    "docstring": "Zettl objective function. This class defines the Zirilli [1]_ global optimization problem. This is a unimodal minimization problem defined as follows: .. math:: f_{\\text{Zirilli}}(x) = 0.25x_1^4 - 0.5x_1^2 + 0.1x_1 + 0.5x_2^2 Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_Z.py",
    "ast_data": "ClassDef name:Zirilli FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "collect_node_menus",
    "source_code": "def collect_node_menus(self) -> None:\n    node_menus = self.node_menus\n    targets: list[Element] = [self.document]\n    targets.extend(self.document.findall(nodes.section))\n    for node in targets:\n        assert node.get('node_name', False)\n        entries = [s['node_name'] for s in find_subsections(node)]\n        node_menus[node['node_name']] = entries\n    title = self.document.next_node(nodes.title)\n    top = title.parent if title else self.document\n    if not isinstance(top, nodes.document | nodes.section):\n        top = self.document\n    if top is not self.document:\n        entries = node_menus[top['node_name']]\n        entries += node_menus['Top'][1:]\n        node_menus['Top'] = entries\n        del node_menus[top['node_name']]\n        top['node_name'] = 'Top'\n    for name, _content in self.indices:\n        node_menus[name] = []\n        node_menus['Top'].append(name)",
    "docstring": "Collect the menu entries for each \"node\" section.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\writers\\texinfo.py",
    "ast_data": "FunctionDef name:collect_node_menus arg:self arguments arg Assign Call Call For Call Assign Call Assign Assign Call Assign If Call Assign If Compare Assign Assign Assign For Assign Call"
  },
  {
    "library": "scrapy",
    "name": "timeoutConnection",
    "source_code": "def timeoutConnection(self) -> None:\n    if self.conn.open_outbound_streams > 0 or self.conn.open_inbound_streams > 0 or self.metadata['active_streams'] > 0:\n        error_code = ErrorCodes.PROTOCOL_ERROR\n    else:\n        error_code = ErrorCodes.NO_ERROR\n    self.conn.close_connection(error_code=error_code)\n    self._write_to_transport()\n    self._lose_connection_with_error([TimeoutError(f'Connection was IDLE for more than {self.IDLE_TIMEOUT}s')])",
    "docstring": "Called when the connection times out. We lose the connection with TimeoutError",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\http2\\protocol.py",
    "ast_data": "FunctionDef name:timeoutConnection arg:self arguments arg If BoolOp Compare Compare Compare Assign Assign Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_input_at",
    "source_code": "def get_input_at(self, node_index):\n    return self._get_node_attribute_at_index(node_index, 'input_tensors', 'input')",
    "docstring": "Retrieves the input tensor(s) of a layer at a given node. Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. will correspond to the first input node of the layer. Returns: A tensor (or list of tensors if the layer has multiple inputs). Raises: RuntimeError: If called in Eager mode.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py",
    "ast_data": "FunctionDef name:get_input_at arg:self arg:node_index arguments arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_update_class_log_prior",
    "source_code": "def _update_class_log_prior(self, class_prior=None):\n    n_classes = len(self.classes_)\n    if class_prior is not None:\n        if len(class_prior) != n_classes:\n            raise ValueError('Number of priors must match number of classes.')\n        self.class_log_prior_ = np.log(class_prior)\n    elif self.fit_prior:\n        with warnings.catch_warnings():\n            warnings.simplefilter('ignore', RuntimeWarning)\n            log_class_count = np.log(self.class_count_)\n        self.class_log_prior_ = log_class_count - np.log(self.class_count_.sum())\n    else:\n        self.class_log_prior_ = np.full(n_classes, -np.log(n_classes))",
    "docstring": "Update class log priors. The class log priors are based on , class count or the number of classes. This method is called each time or update the model.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\naive_bayes.py",
    "ast_data": "FunctionDef name:_update_class_log_prior arg:self arg:class_prior arguments arg arg Assign Call If Compare If Compare Call Raise Call Assign Call If With Call Call Assign Call Assign Call Call Assign Call Call"
  },
  {
    "library": "scipy",
    "name": "normaltest",
    "source_code": "def normaltest(a, axis=0):\n    a, axis = _chk_asarray(a, axis)\n    s, _ = skewtest(a, axis)\n    k, _ = kurtosistest(a, axis)\n    k2 = s * s + k * k\n    return NormaltestResult(k2, distributions.chi2.sf(k2, 2))",
    "docstring": "Tests whether a sample differs from a normal distribution. Parameters ---------- a : array_like The array containing the data to be tested. axis : int or None, optional Axis along which to compute test. Default is 0. If None, compute over the whole array . Returns ------- statistic : float or array `skewtestkurtosistestnormaltestscipy.stats.normaltest`.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mstats_basic.py",
    "ast_data": "FunctionDef name:normaltest arg:a arg:axis arguments arg arg Assign Call Assign Call Assign Call Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "create_gpu_capa_map",
    "source_code": "def create_gpu_capa_map(match_list, generate_csv=False, filename='compute_capability'):\n    gpu_capa = collections.OrderedDict()\n    include = False\n    gpu = ''\n    cnt = 0\n    mismatch_cnt = 0\n    for match in match_list:\n        if 'Products' in match:\n            if not include:\n                include = True\n            continue\n        elif 'www' in match:\n            include = False\n            break\n        if include:\n            if gpu:\n                if gpu in gpu_capa:\n                    gpu_capa[gpu].append(match)\n                else:\n                    gpu_capa[gpu] = [match]\n                gpu = ''\n                cnt += 1\n                if len(list(gpu_capa.keys())) < cnt:\n                    mismatch_cnt += 1\n                    cnt = len(list(gpu_capa.keys()))\n            else:\n                gpu = match\n    if generate_csv:\n        f_name = filename + '.csv'\n        write_csv_from_dict(f_name, gpu_capa)\n    return gpu_capa",
    "docstring": "Generates a map between GPU types and corresponding compute capability. This method is used for retrieving CUDA compute capability from the web only. Args: match_list: List of all CUDA compute capability detected from the webpage. generate_csv: Boolean for creating csv file to store results. filename: String that is the name of the csv file (without ending). Returns: OrderedDict that lists in the incoming order of all CUDA compute capability provided as .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\tensorflow_builder\\config_detector\\data\\cuda_compute_capability.py",
    "ast_data": "FunctionDef name:create_gpu_capa_map arg:match_list arg:generate_csv arg:filename arguments arg arg arg Assign Call Assign Assign Assign Assign For If Compare If Assign If Compare Assign If If If Compare Call Assign Assign If Compare Call Call Call Assign Call Call Call Assign If Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_get_loss",
    "source_code": "def _get_loss(self):\n    return HalfSquaredError()",
    "docstring": "This is only necessary because of the link and power arguments of the TweedieRegressor. Note that we do not need to pass sample_weight to the loss class as this is only needed to set loss.constant_hessian on which GLMs do not rely.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_glm\\glm.py",
    "ast_data": "FunctionDef name:_get_loss arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_bin_seeds",
    "source_code": "def get_bin_seeds(X, bin_size, min_bin_freq=1):\n    if bin_size == 0:\n        return X\n    bin_sizes = defaultdict(int)\n    for point in X:\n        binned_point = np.round(point / bin_size)\n        bin_sizes[tuple(binned_point)] += 1\n    bin_seeds = np.array([point for point, freq in bin_sizes.items() if freq >= min_bin_freq], dtype=np.float32)\n    if len(bin_seeds) == len(X):\n        warnings.warn('Binning data failed with provided bin_size=%f, using data points as seeds.' % bin_size)\n        return X\n    bin_seeds = bin_seeds * bin_size\n    return bin_seeds",
    "docstring": "Find seeds for mean_shift. Finds seeds by first binning data onto a grid whose lines are spaced bin_size apart, and then choosing those bins with at least min_bin_freq points. Parameters ---------- X : array-like of shape (n_samples, n_features) Input points, the same points that will be used in mean_shift. bin_size : float Controls the coarseness of the binning. Smaller values lead to more seeding (which is computationally more expensive). If you're not sure how to set this, set it to the value of the bandwidth used in clustering.mean_shift. min_bin_freq : int, default=1 Only bins with at least min_bin_freq will be selected as seeds. Raising this value decreases the number of seeds found, which makes mean_shift computationally cheaper. Returns ------- bin_seeds : array-like of shape (n_samples, n_features) Points used as initial kernel positions in clustering.mean_shift.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\cluster\\_mean_shift.py",
    "ast_data": "FunctionDef name:get_bin_seeds arg:X arg:bin_size arg:min_bin_freq arguments arg arg arg If Compare Return return:yes Assign Call For Assign Call Call Assign Call Call Compare If Compare Call Call Call Return return:yes Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "GradScaler",
    "source_code": "class GradScaler(torch.amp.GradScaler):\n\n    @deprecated(\"`torch.cpu.amp.GradScaler(args...)` is deprecated. Please use `torch.amp.GradScaler('cpu', args...)` instead.\", category=FutureWarning)\n    def __init__(self, init_scale: float=2.0 ** 16, growth_factor: float=2.0, backoff_factor: float=0.5, growth_interval: int=2000, enabled: bool=True) -> None:\n        super().__init__('cpu', init_scale=init_scale, growth_factor=growth_factor, backoff_factor=backoff_factor, growth_interval=growth_interval, enabled=enabled)",
    "docstring": "See :class:. `` instead.",
    "type": "class",
    "file_path": "pytorch\\torch\\cpu\\amp\\grad_scaler.py",
    "ast_data": "ClassDef name:GradScaler FunctionDef name:__init__ arg:self arg:init_scale arg:growth_factor arg:backoff_factor arg:growth_interval arg:enabled arguments arg arg arg arg arg arg Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_resolve_num_tasks",
    "source_code": "def _resolve_num_tasks(self):\n    return _get_num_slurm_tasks()",
    "docstring": "Returns the number of tasks for the current job step.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\slurm_cluster_resolver.py",
    "ast_data": "FunctionDef name:_resolve_num_tasks arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "export_run_metadata",
    "source_code": "def export_run_metadata():\n    return context().export_run_metadata()",
    "docstring": "Returns a RunMetadata proto with accumulated information. The returned protocol buffer contains information since the most recent call to either enable_run_metadata or export_run_metadata. Returns: A RunMetadata protocol buffer.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:export_run_metadata arguments Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_current_fig_manager",
    "source_code": "def get_current_fig_manager() -> FigureManagerBase | None:\n    return gcf().canvas.manager",
    "docstring": "Return the figure manager of the current figure. The figure manager is a container for the actual backend-depended window that displays the figure on screen. If no current figure exists, a new one is created, and its figure manager is returned. Returns ------- or backend-dependent subclass thereof",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:get_current_fig_manager arguments Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_get_all_submeshes",
    "source_code": "def _get_all_submeshes(self, device_mesh: 'DeviceMesh', mesh_dim_name: str) -> list['DeviceMesh']:\n    mesh_dim = self.get_mesh_dim_by_name(device_mesh, mesh_dim_name)\n    pg_ranks_by_dim = device_mesh.mesh.swapdims(-1, mesh_dim).reshape(-1, device_mesh.mesh.size(mesh_dim))\n    cur_rank = device_mesh.get_rank()\n    res_submeshes = []\n    for mesh_1d in pg_ranks_by_dim:\n        submesh = DeviceMesh(device_mesh.device_type, mesh_1d, mesh_dim_names=(mesh_dim_name,), _init_backend=False)\n        submesh._dim_group_names = [device_mesh._dim_group_names[mesh_dim]] if cur_rank in mesh_1d else []\n        res_submeshes.append(submesh)\n    return res_submeshes",
    "docstring": "Return all the submeshes of a given mesh dimension of the device mesh.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\device_mesh.py",
    "ast_data": "FunctionDef name:_get_all_submeshes arg:self arg:device_mesh arg:mesh_dim_name arguments arg arg arg Assign Call Assign Call Call Call Assign Call Assign For Assign Call Assign Compare Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_WorldMeta",
    "source_code": "class _WorldMeta(type):\n\n    @property\n    def WORLD(cls) -> Optional[ProcessGroup]:\n        return _world.default_pg\n\n    @WORLD.setter\n    def WORLD(cls, pg: Optional[ProcessGroup]):\n        _world.default_pg = pg",
    "docstring": "Meta class of ``.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "ClassDef name:_WorldMeta FunctionDef name:WORLD arg:cls arguments arg Return return:yes FunctionDef name:WORLD arg:cls arg:pg arguments arg arg Assign"
  },
  {
    "library": "matplotlib",
    "name": "_get_camera_loc",
    "source_code": "def _get_camera_loc(self):\n    cx, cy, cz, dx, dy, dz = self._get_w_centers_ranges()\n    c = np.array([cx, cy, cz])\n    r = np.array([dx, dy, dz])\n    if self._focal_length == np.inf:\n        focal_length = 1000000000.0\n    else:\n        focal_length = self._focal_length\n    eye = c + self._view_w * self._dist * r / self._box_aspect * focal_length\n    return eye",
    "docstring": "Returns the current camera location in data coordinates.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:_get_camera_loc arg:self arguments arg Assign Call Assign Call Assign Call If Compare Assign Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__call__",
    "source_code": "def __call__(self, shape, dtype=dtypes.float32, **kwargs):\n    self._validate_kwargs(kwargs)\n    dtype = dtypes.as_dtype(dtype)\n    if not dtype.is_numpy_compatible or dtype == dtypes.string:\n        raise ValueError(f'Argument `dtype` expected to be numeric or boolean. Received {dtype}.')\n    if _PARTITION_SHAPE in kwargs:\n        shape = kwargs[_PARTITION_SHAPE]\n    return array_ops.zeros(shape, dtype)",
    "docstring": "Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are supported. **kwargs: Additional keyword arguments. Raises: ValuesError: If the dtype is not numeric or boolean.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops_v2.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:shape arg:dtype arguments arg arg arg arg Call Assign Call If BoolOp Compare Raise Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "__str__",
    "source_code": "def __str__(self):\n    return self.name",
    "docstring": "The string name of the layer.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\layer.py",
    "ast_data": "FunctionDef name:__str__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_check_batch_shape_possibly_add_asserts",
    "source_code": "def _check_batch_shape_possibly_add_asserts(self):\n    if self._batch_shape_arg is None:\n        return\n    if self._assert_proper_shapes:\n        self._batch_shape_arg = control_flow_ops.with_dependencies([check_ops.assert_rank(self._batch_shape_arg, 1, message='Argument batch_shape must be a 1-D Tensor.'), check_ops.assert_non_negative(self._batch_shape_arg, message='Argument batch_shape must be non-negative.')], self._batch_shape_arg)\n    if not self._batch_shape_arg.dtype.is_integer:\n        raise TypeError('Argument batch_shape must be integer type.  Found: %s' % self._batch_shape_arg)\n    if self._batch_shape_static is None:\n        return\n    if self._batch_shape_static.ndim != 1:\n        raise ValueError('Argument batch_shape must be a 1-D Tensor.  Found: %s' % self._batch_shape_static)\n    if np.any(self._batch_shape_static < 0):\n        raise ValueError('Argument batch_shape must be non-negative.  Found:%s' % self._batch_shape_static)",
    "docstring": "Static check of init arg , possibly add asserts.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_zeros.py",
    "ast_data": "FunctionDef name:_check_batch_shape_possibly_add_asserts arg:self arguments arg If Compare Return return:no If Assign Call Call Call If Raise Call If Compare Return return:no If Compare Raise Call If Call Compare Raise Call"
  },
  {
    "library": "scipy",
    "name": "msign",
    "source_code": "def msign(x):\n    return ma.filled(np.sign(x), 0)",
    "docstring": "Returns the sign of x, or 0 if x is masked.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mstats_basic.py",
    "ast_data": "FunctionDef name:msign arg:x arguments arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_radius",
    "source_code": "def set_radius(self, radius):\n    self.width = self.height = 2 * radius\n    self.stale = True",
    "docstring": "Set the radius of the circle. Parameters ---------- radius : float",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:set_radius arg:self arg:radius arguments arg arg Assign Assign"
  },
  {
    "library": "numpy",
    "name": "_check_mode",
    "source_code": "def _check_mode(mode, encoding, newline):\n    if 't' in mode:\n        if 'b' in mode:\n            raise ValueError(f'Invalid mode: {mode!r}')\n    else:\n        if encoding is not None:\n            raise ValueError(\"Argument 'encoding' not supported in binary mode\")\n        if newline is not None:\n            raise ValueError(\"Argument 'newline' not supported in binary mode\")",
    "docstring": "Check mode and that encoding and newline are compatible. Parameters ---------- mode : str File open mode. encoding : str File encoding. newline : str Newline for text files.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_datasource.py",
    "ast_data": "FunctionDef name:_check_mode arg:mode arg:encoding arg:newline arguments arg arg arg If Compare If Compare Raise Call If Compare Raise Call If Compare Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "set_zorder",
    "source_code": "def set_zorder(self, level):\n    if level is None:\n        level = self.__class__.zorder\n    if level != self.zorder:\n        self.zorder = level\n        self.pchanged()\n        self.stale = True",
    "docstring": "Set the zorder for the artist. Artists with lower zorder values are drawn first. Parameters ---------- level : float",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:set_zorder arg:self arg:level arguments arg arg If Compare Assign If Compare Assign Call Assign"
  },
  {
    "library": "scikit-learn",
    "name": "inplace_exp",
    "source_code": "def inplace_exp(X):\n    np.exp(X, out=X)",
    "docstring": "Compute the exponential inplace. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) The input data.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\neural_network\\_base.py",
    "ast_data": "FunctionDef name:inplace_exp arg:X arguments arg Call"
  },
  {
    "library": "numpy",
    "name": "hermvander2d",
    "source_code": "def hermvander2d(x, y, deg):\n    return pu._vander_nd_flat((hermvander, hermvander), (x, y), deg)",
    "docstring": "Pseudo-Vandermonde matrix of given degrees. Returns the pseudo-Vandermonde matrix of degrees and sample points ``0 >> import numpy as np >>> from numpy.polynomial.hermite import hermvander2d >>> x = np.array([-1, 0, 1]) >>> y = np.array([-1, 0, 1]) >>> hermvander2d(x, y, [2, 2]) array([[ 1., -2., 2., -2., 4., -4., 2., -4., 4.], [ 1., 0., -2., 0., 0., -0., -2., -0., 4.], [ 1., 2., 2., 2., 4., 4., 2., 4., 4.]])",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\hermite.py",
    "ast_data": "FunctionDef name:hermvander2d arg:x arg:y arg:deg arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "unifiable",
    "source_code": "def unifiable(cls):\n    _unify.add((cls, cls, dict), unify_object)\n    _reify.add((cls, dict), reify_object)\n    return cls",
    "docstring": "Register standard unify and reify operations on class This uses the type and __dict__ or __slots__ attributes to define the nature of the term See Also: >>> # xdoctest: +SKIP >>> class A(object): ... def __init__(self, a, b): ... self.a = a ... self.b = b >>> unifiable(A) >>> x = var(\"x\") >>> a = A(1, 2) >>> b = A(1, x) >>> unify(a, b, {}) {~x: 2}",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\unification\\more.py",
    "ast_data": "FunctionDef name:unifiable arg:cls arguments arg Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_export_outputs",
    "source_code": "def get_export_outputs(export_outputs, predictions):\n    if export_outputs is None:\n        default_output = export_output_lib.PredictOutput(predictions)\n        export_outputs = {signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: default_output}\n    if not isinstance(export_outputs, dict):\n        raise TypeError('export_outputs must be dict, given: {}'.format(export_outputs))\n    for v in export_outputs.values():\n        if not isinstance(v, export_output_lib.ExportOutput):\n            raise TypeError('Values in export_outputs must be ExportOutput objects. Given: {}'.format(export_outputs))\n    _maybe_add_default_serving_output(export_outputs)\n    return export_outputs",
    "docstring": "Validate export_outputs or create default export_outputs. Args: export_outputs: Describes the output signatures to be exported to and used during serving. Should be a dict or None. predictions: Predictions or dict of . Returns: Valid export_outputs dict Raises: TypeError: if export_outputs is not a dict or its values are not ExportOutput instances.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\utils_v1\\export_utils.py",
    "ast_data": "FunctionDef name:get_export_outputs arg:export_outputs arg:predictions arguments arg arg If Compare Assign Call Assign If Call Raise Call Call For Call If Call Raise Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "export_dir",
    "source_code": "@property\ndef export_dir(self):\n    return self._export_dir",
    "docstring": "Directory containing the SavedModel.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\loader_impl.py",
    "ast_data": "FunctionDef name:export_dir arg:self arguments arg Return return:yes"
  },
  {
    "library": "authlib",
    "name": "InvalidClientMetadataError",
    "source_code": "class InvalidClientMetadataError(OAuth2Error):\n    error = 'invalid_client_metadata'",
    "docstring": "The value of one of the client metadata fields is invalid and the server has rejected this request. Note that an authorization server MAY choose to substitute a valid value for any requested parameter of a client's metadata.",
    "type": "class",
    "file_path": "authlib\\authlib\\oauth2\\rfc7591\\errors.py",
    "ast_data": "ClassDef name:InvalidClientMetadataError Assign"
  },
  {
    "library": "scipy",
    "name": "aps07_f",
    "source_code": "def aps07_f(x, n):\n    return (1 + (1 - n) ** 2) * x - (1 - n * x) ** 2",
    "docstring": "Upside down parabola with parametrizable height",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_tstutils.py",
    "ast_data": "FunctionDef name:aps07_f arg:x arg:n arguments arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "AutogradStateOpsFailSafeguard",
    "source_code": "class AutogradStateOpsFailSafeguard(TorchFunctionMode):\n\n    def __torch_function__(self, func, types, args=(), kwargs=None):\n        kwargs = kwargs or {}\n        unsupported_grad_mode_ops = [torch._C._set_grad_enabled]\n        current_state = torch._C.is_grad_enabled()\n        if func in unsupported_grad_mode_ops:\n            assert len(args) == 1\n            changed_state = args[0]\n            mode = torch._C._get_dispatch_mode(torch._C._TorchDispatchModeKey.PROXY)\n            if mode and isinstance(mode, ProxyTorchDispatchMode) and (not mode.pre_dispatch) and (changed_state != current_state):\n                raise RuntimeError(f\"Encountered autograd state manager op {func} trying to change global autograd state while exporting. This is unsafe because we don't capture this op in torch.export today, hence we can't reflect the user intention soundly. You can fix this by adding a torch.no_grad() context around the export call.\")\n        return func(*args, **kwargs)",
    "docstring": "Detect grad state ops during exporting the graph and fail the process by raising an error, to avoid unexpected behavior. Those grad mode ops could be: Export with predispatch mode is exempted.",
    "type": "class",
    "file_path": "pytorch\\torch\\export\\_safeguard.py",
    "ast_data": "ClassDef name:AutogradStateOpsFailSafeguard FunctionDef name:__torch_function__ arg:self arg:func arg:types arg:args arg:kwargs arguments arg arg arg arg arg Assign BoolOp Assign Assign Call If Compare Compare Call Assign Assign Call If BoolOp Call Compare Raise Call Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "_plot_bivariate",
    "source_code": "def _plot_bivariate(self, x_var, y_var, ax, func, **kwargs):\n    if 'hue' not in signature(func).parameters:\n        self._plot_bivariate_iter_hue(x_var, y_var, ax, func, **kwargs)\n        return\n    kwargs = kwargs.copy()\n    if str(func.__module__).startswith('seaborn'):\n        kwargs['ax'] = ax\n    else:\n        plt.sca(ax)\n    if x_var == y_var:\n        axes_vars = [x_var]\n    else:\n        axes_vars = [x_var, y_var]\n    if self._hue_var is not None and self._hue_var not in axes_vars:\n        axes_vars.append(self._hue_var)\n    data = self.data[axes_vars]\n    if self._dropna:\n        data = data.dropna()\n    x = data[x_var]\n    y = data[y_var]\n    if self._hue_var is None:\n        hue = None\n    else:\n        hue = data.get(self._hue_var)\n    if 'hue' not in kwargs:\n        kwargs.update({'hue': hue, 'hue_order': self._hue_order, 'palette': self._orig_palette})\n    func(x=x, y=y, **kwargs)\n    self._update_legend_data(ax)",
    "docstring": "Draw a bivariate plot on the specified axes.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\axisgrid.py",
    "ast_data": "FunctionDef name:_plot_bivariate arg:self arg:x_var arg:y_var arg:ax arg:func arguments arg arg arg arg arg arg If Compare Call Call Return return:no Assign Call If Call Call Assign Call If Compare Assign Assign If BoolOp Compare Compare Call Assign If Assign Call Assign Assign If Compare Assign Assign Call If Compare Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "node_defs",
    "source_code": "@property\ndef node_defs(self):\n    return self._node_defs",
    "docstring": "All the node defs in the graph to be converted. Returns: A map from node name to the NodeDef for all NodeDefs in the graph, as well as all control flow NodeDefs in the functions.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "FunctionDef name:node_defs arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "supported_activities",
    "source_code": "def supported_activities():\n    return torch.autograd._supported_activities()",
    "docstring": "Returns a set of supported profiler tracing activities. Note: profiler uses CUPTI library to trace on-device CUDA kernels. In case when CUDA is enabled but CUPTI is not available, passing ``). This, in turn, results in including CUDA time in the profiler table output, but not in the JSON trace.",
    "type": "function",
    "file_path": "pytorch\\torch\\profiler\\profiler.py",
    "ast_data": "FunctionDef name:supported_activities arguments Return return:yes Call"
  },
  {
    "library": "django",
    "name": "convex_hull",
    "source_code": "@property\ndef convex_hull(self):\n    return self._topology(capi.geos_convexhull(self.ptr))",
    "docstring": "Return the smallest convex Polygon that contains all the points in the Geometry.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:convex_hull arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_writeback_to_local_shard",
    "source_code": "@torch.no_grad()\ndef _writeback_to_local_shard(handle: FlatParamHandle, writeback_grad: bool):\n\n    def _get_shard(flat_param_or_grad: torch.Tensor) -> torch.Tensor:\n        if handle.uses_sharded_strategy:\n            shard, _ = FlatParamHandle._get_unpadded_shard(flat_param_or_grad, handle.rank, handle.world_size)\n            return shard\n        return flat_param_or_grad\n    param_shard = _get_shard(handle.flat_param)\n    handle.flat_param._local_shard[:param_shard.numel()].copy_(param_shard)\n    if writeback_grad:\n        existing_grad = handle.sharded_grad\n        if existing_grad is not None:\n            assert handle.flat_param.grad is not None\n            grad_shard = _get_shard(handle.flat_param.grad)\n            existing_grad[:grad_shard.numel()].copy_(grad_shard)",
    "docstring": "For the handle, writes back the this rank's shard of the unsharded flattened parameter to the sharded flattened parameter. If `` 's data points to the padded unsharded flattened parameter.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_unshard_param_utils.py",
    "ast_data": "FunctionDef name:_writeback_to_local_shard arg:handle arg:writeback_grad arguments arg arg FunctionDef name:_get_shard arg:flat_param_or_grad arguments arg If Assign Call Return return:yes Return return:yes Assign Call Call Call If Assign If Compare Compare Assign Call Call Call Call"
  },
  {
    "library": "django",
    "name": "PBKDF2SHA1PasswordHasher",
    "source_code": "class PBKDF2SHA1PasswordHasher(PBKDF2PasswordHasher):\n    algorithm = 'pbkdf2_sha1'\n    digest = hashlib.sha1",
    "docstring": "Alternate PBKDF2 hasher which uses SHA1, the default PRF recommended by PKCS #5. This is compatible with other implementations of PBKDF2, such as openssl's PKCS5_PBKDF2_HMAC_SHA1().",
    "type": "class",
    "file_path": "django\\django\\contrib\\auth\\hashers.py",
    "ast_data": "ClassDef name:PBKDF2SHA1PasswordHasher Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "format_data",
    "source_code": "def format_data(self, value):\n    sign = 1\n    fmt = 'g' if self.places is None else f'.{self.places:d}f'\n    if value < 0:\n        sign = -1\n        value = -value\n    if value != 0:\n        pow10 = int(math.floor(math.log10(value) / 3) * 3)\n    else:\n        pow10 = 0\n        value = 0.0\n    pow10 = np.clip(pow10, min(self.ENG_PREFIXES), max(self.ENG_PREFIXES))\n    mant = sign * value / 10.0 ** pow10\n    if abs(float(format(mant, fmt))) >= 1000 and pow10 < max(self.ENG_PREFIXES):\n        mant /= 1000\n        pow10 += 3\n    unit_prefix = self.ENG_PREFIXES[int(pow10)]\n    if self.unit or unit_prefix:\n        suffix = f'{self.sep}{unit_prefix}{self.unit}'\n    else:\n        suffix = ''\n    if self._usetex or self._useMathText:\n        return f'${mant:{fmt}}${suffix}'\n    else:\n        return f'{mant:{fmt}}{suffix}'",
    "docstring": "Format a number in engineering notation, appending a letter representing the power of 1000 of the original number. Some examples: >>> format_data(0) # for self.places = 0 '0' >>> format_data(1000000) # for self.places = 1 '1.0 M' >>> format_data(-1e-6) # for self.places = 2 '-1.00 µ'",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:format_data arg:self arg:value arguments arg arg Assign Assign Compare If Compare Assign Assign If Compare Assign Call Call Call Assign Assign Assign Call Call Call Assign If BoolOp Compare Call Call Call Compare Call Assign Call If BoolOp Assign Assign If BoolOp Return return:yes Return return:yes"
  },
  {
    "library": "pandas",
    "name": "reindex_axis",
    "source_code": "@final\ndef reindex_axis(self, new_index: Index, axis: AxisInt, fill_value=None, only_slice: bool=False) -> Self:\n    new_index, indexer = self.axes[axis].reindex(new_index)\n    return self.reindex_indexer(new_index, indexer, axis=axis, fill_value=fill_value, only_slice=only_slice)",
    "docstring": "Conform data manager to new index.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:reindex_axis arg:self arg:new_index arg:axis arg:fill_value arg:only_slice arguments arg arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_detector_name",
    "source_code": "def get_detector_name(self) -> str:\n    return 'dynamic_vs_static_detector'",
    "docstring": "returns the string name of this detector",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\detector.py",
    "ast_data": "FunctionDef name:get_detector_name arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "uninitialized_variable_creator",
    "source_code": "def uninitialized_variable_creator(next_creator, **kwargs):\n    del next_creator\n    return resource_variable_ops.UninitializedVariable(**kwargs)",
    "docstring": "A variable creator that creates uninitialized variables.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\load.py",
    "ast_data": "FunctionDef name:uninitialized_variable_creator arg:next_creator arguments arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "tmin",
    "source_code": "def tmin(a, lowerlimit=None, axis=0, inclusive=True):\n    a, axis = _chk_asarray(a, axis)\n    am = trima(a, (lowerlimit, None), (inclusive, False))\n    return ma.minimum.reduce(am, axis)",
    "docstring": "Compute the trimmed minimum Parameters ---------- a : array_like array of values lowerlimit : None or float, optional Values in the input array less than the given limit will be ignored. When lowerlimit is None, then all values are used. The default value is None. axis : int or None, optional Axis along which to operate. Default is 0. If None, compute over the whole array . inclusive : {True, False}, optional This flag determines whether values exactly equal to the lower limit are included. The default value is True. Returns ------- tmin : float, int or ndarray Notes ----- For more details on , see . Examples -------- >>> import numpy as np >>> from scipy.stats import mstats >>> a = np.array([[6, 8, 3, 0], ... [3, 2, 1, 2], ... [8, 1, 8, 2], ... [5, 3, 0, 2], ... [4, 7, 5, 2]]) ... >>> mstats.tmin(a, 5) masked_array(data=[5, 7, 5, --], mask=[False, False, False, True], fill_value=999999)",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mstats_basic.py",
    "ast_data": "FunctionDef name:tmin arg:a arg:lowerlimit arg:axis arg:inclusive arguments arg arg arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "set_requires_gradient_sync",
    "source_code": "def set_requires_gradient_sync(self, requires_gradient_sync: bool) -> None:\n    replicate.state(self)._no_sync = not requires_gradient_sync",
    "docstring": "Sets if the module should sync gradients. This can be used to implement gradient accumulation without communication. Args: requires_gradient_sync (bool): Whether to reduce gradients for the module's parameters.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_composable\\replicate.py",
    "ast_data": "FunctionDef name:set_requires_gradient_sync arg:self arg:requires_gradient_sync arguments arg arg Assign Call"
  },
  {
    "library": "cherrypy",
    "name": "stats",
    "source_code": "def stats(self, filename, sortby='cumulative'):\n    sio = io.StringIO()\n    s = pstats.Stats(os.path.join(self.path, filename), stream=sio)\n    s.strip_dirs()\n    s.sort_stats(sortby)\n    s.print_stats()\n    response = sio.getvalue()\n    sio.close()\n    return response",
    "docstring": "Generate statistics from given profile. :returns: The sorted stats index printout. :rtype: str",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\profiler.py",
    "ast_data": "FunctionDef name:stats arg:self arg:filename arg:sortby arguments arg arg arg Assign Call Assign Call Call Call Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "generate_file_pyx",
    "source_code": "def generate_file_pyx(sigs, lib_name, header_name, accelerate):\n    if lib_name == 'BLAS':\n        preamble_template = blas_pyx_preamble\n        epilog = blas_py_wrappers\n    elif lib_name == 'LAPACK':\n        preamble_template = lapack_pyx_preamble\n        epilog = lapack_py_wrappers\n    else:\n        raise RuntimeError(f'Unrecognized lib_name: {lib_name}.')\n    names = '\\n- '.join([sig['name'] for sig in sigs])\n    comment = ['# ' + c for c in COMMENT_TEXT]\n    preamble = comment + [preamble_template.format(names)]\n    decls = [generate_decl_pyx(**sig, accelerate=accelerate, header_name=header_name) for sig in sigs]\n    content = preamble + decls + [epilog]\n    return ''.join(content)",
    "docstring": "Generate content for pyx file with BLAS/LAPACK declarations and tests.",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_generate_pyx.py",
    "ast_data": "FunctionDef name:generate_file_pyx arg:sigs arg:lib_name arg:header_name arg:accelerate arguments arg arg arg arg If Compare Assign Assign If Compare Assign Assign Raise Call Assign Call Assign Assign Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_check_shape",
    "source_code": "def _check_shape(param, param_shape, name):\n    param = np.array(param)\n    if param.shape != param_shape:\n        raise ValueError(\"The parameter '%s' should have the shape of %s, but got %s\" % (name, param_shape, param.shape))",
    "docstring": "Validate the shape of the input parameter 'param'. Parameters ---------- param : array param_shape : tuple name : str",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\mixture\\_base.py",
    "ast_data": "FunctionDef name:_check_shape arg:param arg:param_shape arg:name arguments arg arg arg Assign Call If Compare Raise Call"
  },
  {
    "library": "sphinx",
    "name": "_strip_basic_auth",
    "source_code": "def _strip_basic_auth(url: str) -> str:\n    frags = list(urlsplit(url))\n    if '@' in frags[1]:\n        frags[1] = frags[1].split('@')[1]\n    return urlunsplit(frags)",
    "docstring": "Returns *url* with basic auth credentials removed. Also returns the basic auth username and password if they're present in *url*. E.g.: => *url* need not include basic auth credentials. :param url: url which may or may not contain basic auth credentials :type url: ``",
    "type": "function",
    "file_path": "sphinx\\sphinx\\ext\\intersphinx\\_load.py",
    "ast_data": "FunctionDef name:_strip_basic_auth arg:url arguments arg Assign Call Call If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "test_db_signature",
    "source_code": "def test_db_signature(self):\n    test_database_name = self._get_test_db_name()\n    sig = [self.connection.settings_dict['NAME']]\n    if self.is_in_memory_db(test_database_name):\n        sig.append(self.connection.alias)\n    else:\n        sig.append(test_database_name)\n    return tuple(sig)",
    "docstring": "Return a tuple that uniquely identifies a test database. This takes into account the special cases of \":memory:\" and \"\" for SQLite since the databases will be distinct despite having the same TEST NAME. See",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\sqlite3\\creation.py",
    "ast_data": "FunctionDef name:test_db_signature arg:self arguments arg Assign Call Assign If Call Call Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "def forward(self, imgs: Union[Tensor, List[Tensor]], original_sizes: Tensor) -> Union[Tensor, List[Tensor]]:\n    resized_imgs: list[Tensor] = []\n    if torch.onnx.is_in_onnx_export():\n        warnings.warn('ResizePostProcessor is not supported in ONNX export. The output will not be resized back to the original size.', stacklevel=1)\n        return imgs\n    iters = len(imgs) if isinstance(imgs, list) else imgs.shape[0]\n    for i in range(iters):\n        img = imgs[i]\n        size = original_sizes[i]\n        resized_imgs.append(resize(img[None], size=size.cpu().long().numpy().tolist(), interpolation=self.interpolation_mode))\n    return resized_imgs",
    "docstring": "Run forward. Returns: resized_imgs: resized images in a batch. original_sizes: the original image sizes of (height, width).",
    "type": "method",
    "file_path": "kornia\\kornia\\models\\utils.py",
    "ast_data": "FunctionDef name:forward arg:self arg:imgs arg:original_sizes arguments arg arg arg If Call Call Return return:yes Assign Call Call For Call Assign Assign Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "canonicalize_without_job_and_task",
    "source_code": "def canonicalize_without_job_and_task(d):\n    canonicalized_device = canonicalize(d)\n    spec = tf_device.DeviceSpec.from_string(canonicalized_device)\n    spec = spec.replace(job=None, task=None, replica=0)\n    return spec.to_string()",
    "docstring": "Partially canonicalize device string. This returns device string from without including job and task. This is most useful for parameter server strategy where the device strings are generated on the chief, but executed on workers. For example: If d = '/cpu:0', default='/job:worker/task:1', it returns '/replica:0/device:CPU:0'. If d = '/cpu:0', default='/job:worker', it returns '/replica:0/device:CPU:0'. If d = '/gpu:0', default=None, it returns '/replica:0/device:GPU:0'. Note: This uses \"job:localhost\" as the default if executing eagerly. Args: d: a device string or tf.config.LogicalDevice Returns: a partially canonicalized device string.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\device_util.py",
    "ast_data": "FunctionDef name:canonicalize_without_job_and_task arg:d arguments arg Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_node_attribute_at_index",
    "source_code": "def _get_node_attribute_at_index(self, node_index, attr, attr_name):\n    if not self._inbound_nodes:\n        raise RuntimeError('The layer has never been called and thus has no defined ' + attr_name + '.')\n    if not len(self._inbound_nodes) > node_index:\n        raise ValueError('Asked to get ' + attr_name + ' at node ' + str(node_index) + ', but the layer has only ' + str(len(self._inbound_nodes)) + ' inbound nodes.')\n    values = getattr(self._inbound_nodes[node_index], attr)\n    if isinstance(values, list) and len(values) == 1:\n        return values[0]\n    else:\n        return values",
    "docstring": "Private utility to retrieves an attribute (e.g. inputs) from a node. This is used to implement the methods: - get_input_shape_at - get_output_shape_at - get_input_at etc... Args: node_index: Integer index of the node from which to retrieve the attribute. attr: Exact node attribute name. attr_name: Human-readable attribute name, for error messages. Returns: The layer's attribute at the node of index . Raises: RuntimeError: If the layer has no inbound nodes, or if called in Eager mode. ValueError: If the index provided does not match any node.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:_get_node_attribute_at_index arg:self arg:node_index arg:attr arg:attr_name arguments arg arg arg arg If Raise Call If Compare Call Raise Call Call Call Call Assign Call If BoolOp Call Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, from_logits=False, reduction=losses_utils.ReductionV2.AUTO, name='sparse_categorical_crossentropy'):\n    super().__init__(sparse_categorical_crossentropy, name=name, reduction=reduction, from_logits=from_logits)",
    "docstring": "Initializes instance. Args: from_logits: Whether is expected to be a logits tensor. By default, we assume that encodes a probability distribution. reduction: Type of to apply to loss. Default value is . indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to . When used with , outside of built-in training loops such as and , using or will raise an error. Please see this custom training [tutorial]( for more details. name: Optional name for the instance. Defaults to 'sparse_categorical_crossentropy'.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:from_logits arg:reduction arg:name arguments arg arg arg arg Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_label",
    "source_code": "def set_label(self, s):\n    label = str(s) if s is not None else None\n    if label != self._label:\n        self._label = label\n        self.pchanged()\n        self.stale = True",
    "docstring": "Set a label that will be displayed in the legend. Parameters ---------- s : object *s* will be converted to a string by calling .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:set_label arg:self arg:s arguments arg arg Assign Compare Call If Compare Assign Call Assign"
  },
  {
    "library": "pytorch",
    "name": "get_x86_inductor_linear_dynamic_fp16_config",
    "source_code": "@functools.lru_cache\ndef get_x86_inductor_linear_dynamic_fp16_config():\n    weight_quantization_spec = QuantizationSpec(dtype=torch.float16, observer_or_fake_quant_ctr=PlaceholderObserver)\n    quantization_config = QuantizationConfig(None, None, weight_quantization_spec, None)\n    return quantization_config",
    "docstring": "For linear_dynamic_fp16. The name may be confusing. The op's behavior is fp32_input * (fp16_weight -> to_fp32) -> fp32_output.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\x86_inductor_quantizer.py",
    "ast_data": "FunctionDef name:get_x86_inductor_linear_dynamic_fp16_config arguments Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "validate_clip_with_axis",
    "source_code": "def validate_clip_with_axis(axis: ndarray | AxisNoneT, args, kwargs) -> AxisNoneT | None:\n    if isinstance(axis, ndarray):\n        args = (axis,) + args\n        axis = None\n    validate_clip(args, kwargs)\n    return axis",
    "docstring": "If 'NDFrame.clip' is called via the numpy library, the third parameter in its signature is 'out', which can takes an ndarray, so check if the 'axis' parameter is an instance of ndarray, since 'axis' itself should either be an integer or None",
    "type": "function",
    "file_path": "pandas\\pandas\\compat\\numpy\\function.py",
    "ast_data": "FunctionDef name:validate_clip_with_axis arg:axis arg:args arg:kwargs arguments arg arg arg If Call Assign Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "restore_dropped_levels_multijoin",
    "source_code": "def restore_dropped_levels_multijoin(left: MultiIndex, right: MultiIndex, dropped_level_names, join_index: Index, lindexer: npt.NDArray[np.intp], rindexer: npt.NDArray[np.intp]) -> tuple[FrozenList, FrozenList, FrozenList]:\n\n    def _convert_to_multiindex(index: Index) -> MultiIndex:\n        if isinstance(index, MultiIndex):\n            return index\n        else:\n            return MultiIndex.from_arrays([index._values], names=[index.name])\n    join_index = _convert_to_multiindex(join_index)\n    join_levels = join_index.levels\n    join_codes = join_index.codes\n    join_names = join_index.names\n    for dropped_level_name in dropped_level_names:\n        if dropped_level_name in left.names:\n            idx = left\n            indexer = lindexer\n        else:\n            idx = right\n            indexer = rindexer\n        name_idx = idx.names.index(dropped_level_name)\n        restore_levels = idx.levels[name_idx]\n        codes = idx.codes[name_idx]\n        if indexer is None:\n            restore_codes = codes\n        else:\n            restore_codes = algos.take_nd(codes, indexer, fill_value=-1)\n        join_levels = join_levels + [restore_levels]\n        join_codes = join_codes + [restore_codes]\n        join_names = join_names + [dropped_level_name]\n    return (join_levels, join_codes, join_names)",
    "docstring": "*this is an internal non-public method* Returns the levels, labels and names of a multi-index to multi-index join. Depending on the type of join, this method restores the appropriate dropped levels of the joined multi-index. The method relies on lindexer, rindexer which hold the index positions of left and right, where a join was feasible Parameters ---------- left : MultiIndex left index right : MultiIndex right index dropped_level_names : str array list of non-common level names join_index : Index the index of the join between the common levels of left and right lindexer : np.ndarray[np.intp] left indexer rindexer : np.ndarray[np.intp] right indexer Returns ------- levels : list of Index levels of combined multiindexes labels : np.ndarray[np.intp] labels of combined multiindexes names : List[Hashable] names of combined multiindex levels",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\reshape\\merge.py",
    "ast_data": "FunctionDef name:restore_dropped_levels_multijoin arg:left arg:right arg:dropped_level_names arg:join_index arg:lindexer arg:rindexer arguments arg arg arg arg arg arg FunctionDef name:_convert_to_multiindex arg:index arguments arg If Call Return return:yes Return return:yes Call Assign Call Assign Assign Assign For If Compare Assign Assign Assign Assign Assign Call Assign Assign If Compare Assign Assign Call Assign Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "assert_global_step",
    "source_code": "@tf_export(v1=['train.assert_global_step'])\ndef assert_global_step(global_step_tensor):\n    if not (isinstance(global_step_tensor, variables.Variable) or isinstance(global_step_tensor, tensor.Tensor) or resource_variable_ops.is_resource_variable(global_step_tensor)):\n        raise TypeError('Existing \"global_step\" must be a Variable or Tensor: %s.' % global_step_tensor)\n    if not global_step_tensor.dtype.base_dtype.is_integer:\n        raise TypeError('Existing \"global_step\" does not have integer type: %s' % global_step_tensor.dtype)\n    if global_step_tensor.get_shape().ndims != 0 and global_step_tensor.get_shape().is_fully_defined():\n        raise TypeError('Existing \"global_step\" is not scalar: %s' % global_step_tensor.get_shape())",
    "docstring": "Asserts is a scalar int or . Args: global_step_tensor: to test.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\training_util.py",
    "ast_data": "FunctionDef name:assert_global_step arg:global_step_tensor arguments arg If BoolOp Call Call Call Raise Call If Raise Call If BoolOp Compare Call Call Call Raise Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_func_annotations",
    "source_code": "@classmethod\ndef get_func_annotations(cls, func):\n    params = cls.get_func_params(func)\n    if params:\n        Parameter = inspect.Parameter\n        params = (param for param in params if param.kind in (Parameter.POSITIONAL_ONLY, Parameter.POSITIONAL_OR_KEYWORD))\n        annotations = tuple((param.annotation for param in params))\n        if all((ann is not Parameter.empty for ann in annotations)):\n            return annotations",
    "docstring": "get annotations of function positional parameters",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\unification\\multipledispatch\\dispatcher.py",
    "ast_data": "FunctionDef name:get_func_annotations arg:cls arg:func arguments arg arg Assign Call If Assign Assign Compare Assign Call If Call Compare Return return:yes"
  },
  {
    "library": "django",
    "name": "ahas_perms",
    "source_code": "async def ahas_perms(self, perm_list, obj=None):\n    if not isinstance(perm_list, Iterable) or isinstance(perm_list, str):\n        raise ValueError('perm_list must be an iterable of permissions.')\n    for perm in perm_list:\n        if not await self.ahas_perm(perm, obj):\n            return False\n    return True",
    "docstring": "See has_perms()",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\models.py",
    "ast_data": "AsyncFunctionDef name:ahas_perms arg:self arg:perm_list arg:obj arguments arg arg arg If BoolOp Call Call Raise Call For If Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ConvPoolArgs2d",
    "source_code": "class ConvPoolArgs2d(NamedTuple):\n    kernel_h: int\n    kernel_w: int\n    stride_h: int\n    stride_w: int\n    pad_t: int\n    pad_b: int\n    pad_l: int\n    pad_r: int\n    dilation_h: int\n    dilation_w: int\n    group: int",
    "docstring": "Configuration arguments for a convolution.",
    "type": "class",
    "file_path": "pytorch\\torch\\backends\\_nnapi\\serializer.py",
    "ast_data": "ClassDef name:ConvPoolArgs2d"
  },
  {
    "library": "pandas",
    "name": "_attr_getter",
    "source_code": "def _attr_getter(self, obj, attr):\n    return obj.get(attr)",
    "docstring": "Return the attribute value of an individual DOM node. Parameters ---------- obj : node-like A DOM node. attr : str or unicode The attribute, such as \"colspan\" Returns ------- str or unicode The attribute value.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\html.py",
    "ast_data": "FunctionDef name:_attr_getter arg:self arg:obj arg:attr arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "AliasesPriorGraphOutput",
    "source_code": "class AliasesPriorGraphOutput(OutputAliasInfo):\n    __slots__ = ['index']\n    index: PathOutputIndex\n\n    def __init__(self, index: PathOutputIndex) -> None:\n        assert isinstance(index, tuple)\n        self.index = index",
    "docstring": "Marks that the graph output aliases an output of a prior graph",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py",
    "ast_data": "ClassDef name:AliasesPriorGraphOutput Assign FunctionDef name:__init__ arg:self arg:index arguments arg arg Call Assign"
  },
  {
    "library": "pytorch",
    "name": "print_sl",
    "source_code": "def print_sl(self, is_verbose, group, sl, epoch=None):\n    if is_verbose:\n        if epoch is None:\n            print(f'Adjusting sparsity level of group {group} to {sl:.4e}.')\n        else:\n            print(f'Epoch {epoch:5d}: adjusting sparsity level of group {group} to {sl:.4e}.')",
    "docstring": "Display the current sparsity level.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\pruning\\scheduler\\base_scheduler.py",
    "ast_data": "FunctionDef name:print_sl arg:self arg:is_verbose arg:group arg:sl arg:epoch arguments arg arg arg arg arg If If Compare Call Call"
  },
  {
    "library": "matplotlib",
    "name": "legend_elements",
    "source_code": "def legend_elements(self, variable_name='x', str_format=str):\n    artists = []\n    labels = []\n    if self.filled:\n        lowers, uppers = self._get_lowers_and_uppers()\n        n_levels = len(self._paths)\n        for idx in range(n_levels):\n            artists.append(mpatches.Rectangle((0, 0), 1, 1, facecolor=self.get_facecolor()[idx], hatch=self.hatches[idx % len(self.hatches)]))\n            lower = str_format(lowers[idx])\n            upper = str_format(uppers[idx])\n            if idx == 0 and self.extend in ('min', 'both'):\n                labels.append(f'${variable_name} \\\\leq {lower}s$')\n            elif idx == n_levels - 1 and self.extend in ('max', 'both'):\n                labels.append(f'${variable_name} > {upper}s$')\n            else:\n                labels.append(f'${lower} < {variable_name} \\\\leq {upper}$')\n    else:\n        for idx, level in enumerate(self.levels):\n            artists.append(Line2D([], [], color=self.get_edgecolor()[idx], linewidth=self.get_linewidths()[idx], linestyle=self.get_linestyles()[idx]))\n            labels.append(f'${variable_name} = {str_format(level)}$')\n    return (artists, labels)",
    "docstring": "Return a list of artists and labels suitable for passing through to which represent this ContourSet. The labels have the form \"0 str Function used to format the numbers in the labels. Returns ------- artists : list[] A list of the artists. labels : list[str] A list of the labels.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\contour.py",
    "ast_data": "FunctionDef name:legend_elements arg:self arg:variable_name arg:str_format arguments arg arg arg Assign Assign If Assign Call Assign Call For Call Call Call Call Call Assign Call Assign Call If BoolOp Compare Compare Call If BoolOp Compare Compare Call Call For Call Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "dimension_value",
    "source_code": "@tf_export('compat.dimension_value', v1=['dimension_value', 'compat.dimension_value'])\ndef dimension_value(dimension: Union['Dimension', int, None]) -> Union[int, None]:\n    if isinstance(dimension, Dimension):\n        return dimension.value\n    return dimension",
    "docstring": "Compatibility utility required to allow for both V1 and V2 behavior in TF. Until the release of TF 2.0, we need the legacy behavior of to coexist with the new behavior. This utility is a bridge between the two. When accessing the value of a TensorShape dimension, use this utility, like this: Args: dimension: Either a instance, an integer, or None. Returns: A plain value, i.e. an integer or None.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:dimension_value arg:dimension arguments arg If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "django",
    "name": "centroid",
    "source_code": "@property\ndef centroid(self):\n    return self._topology(capi.geos_centroid(self.ptr))",
    "docstring": "The centroid is equal to the centroid of the set of component Geometries of highest dimension (since the lower-dimension geometries contribute zero \"weight\" to the centroid).",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:centroid arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_clip_path",
    "source_code": "def get_clip_path(self):\n    if self._clippath is not None:\n        tpath, tr = self._clippath.get_transformed_path_and_affine()\n        if np.all(np.isfinite(tpath.vertices)):\n            return (tpath, tr)\n        else:\n            _log.warning('Ill-defined clip_path detected. Returning None.')\n            return (None, None)\n    return (None, None)",
    "docstring": "Return the clip path in the form (path, transform), where path is a instance, and transform is an affine transform to apply to the path before clipping.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:get_clip_path arg:self arguments arg If Compare Assign Call If Call Call Return return:yes Call Return return:no Return return:no"
  },
  {
    "library": "django",
    "name": "resolve_expression",
    "source_code": "def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):\n    c = self.copy()\n    c.is_summary = summarize\n    source_expressions = [expr.resolve_expression(query, allow_joins, reuse, summarize) if expr is not None else None for expr in c.get_source_expressions()]\n    if not self.allows_composite_expressions and any((isinstance(expr, ColPairs) for expr in source_expressions)):\n        raise ValueError(f'{self.__class__.__name__} expression does not support composite primary keys.')\n    c.set_source_expressions(source_expressions)\n    return c",
    "docstring": "Provide the chance to do any preprocessing or validation before being added to the query. Arguments: * query: the backend query implementation * allow_joins: boolean allowing or denying use of joins in this query * reuse: a set of reusable joins for multijoins * summarize: a terminal aggregate clause * for_save: whether this expression about to be used in a save or update Return: an Expression to be added to the query.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\expressions.py",
    "ast_data": "FunctionDef name:resolve_expression arg:self arg:query arg:allow_joins arg:reuse arg:summarize arg:for_save arguments arg arg arg arg arg arg Assign Call Assign Assign Compare Call Call If BoolOp Call Call Raise Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_merge_with",
    "source_code": "def _merge_with(self, other: 'DynamicRaggedShape.Spec') -> 'DynamicRaggedShape.Spec':\n    max_num_row_partitions = max(self.num_row_partitions, other.num_row_partitions)\n    a = self._with_num_row_partitions(max_num_row_partitions)\n    b = other._with_num_row_partitions(max_num_row_partitions)\n    new_rp = [a._merge_with(b) for a, b in zip(a._row_partitions, b._row_partitions)]\n    new_static_inner_shape = a._static_inner_shape.merge_with(b._static_inner_shape)\n    dtype = b.dtype if a.dtype == dtypes.int32 else dtypes.int64\n    return DynamicRaggedShape.Spec(new_rp, new_static_inner_shape, dtype=dtype)",
    "docstring": "Merges all information between two specs. Specs are expected to represent the same information modulo num_row_partitons. If the specs are of different ranks, then fail. Args: other: another Spec of the same rank. Returns: a Spec with the union of information.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:_merge_with arg:self arg:other arguments arg arg Assign Call Assign Call Assign Call Assign Call Call Assign Call Assign Compare Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "read_var_array",
    "source_code": "def read_var_array(self, header, process=True):\n    return self._matrix_reader.array_from_header(header, process)",
    "docstring": "Read array, given Parameters ---------- header : header object object with fields defining variable header process : {True, False} bool, optional If True, apply recursive post-processing during loading of array. Returns ------- arr : array array with post-processing applied or not according to .",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\matlab\\_mio5.py",
    "ast_data": "FunctionDef name:read_var_array arg:self arg:header arg:process arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "get_build_temp_dir",
    "source_code": "def get_build_temp_dir(self):\n    cmd = get_cmd('build')\n    cmd.ensure_finalized()\n    return cmd.build_temp",
    "docstring": "Return a path to a temporary directory where temporary files should be placed.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\misc_util.py",
    "ast_data": "FunctionDef name:get_build_temp_dir arg:self arguments arg Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "update_last_login",
    "source_code": "def update_last_login(sender, user, **kwargs):\n    user.last_login = timezone.now()\n    user.save(update_fields=['last_login'])",
    "docstring": "A signal receiver which updates the last_login date for the user logging in.",
    "type": "function",
    "file_path": "django\\django\\contrib\\auth\\models.py",
    "ast_data": "FunctionDef name:update_last_login arg:sender arg:user arguments arg arg arg Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__call__",
    "source_code": "def __call__(self, shape, dtype=None, **kwargs):\n    raise NotImplementedError",
    "docstring": "Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. If not provided will return tensor of . **kwargs: Additional keyword arguments. Accepted values: and . Used when creating a single partition in a partitioned variable. is the shape of the partition (i.e. the shape of the returned tensor) and is a tuple of specifying the offset of this partition w.r.t each axis. For example, a tensor of shape can be partitioned into two partitions: of shape and of shape ; if the initializer is called with and , it should return the value for .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops_v2.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:shape arg:dtype arguments arg arg arg arg Raise"
  },
  {
    "library": "matplotlib",
    "name": "subplot_tool",
    "source_code": "def subplot_tool(targetfig: Figure | None=None) -> SubplotTool | None:\n    if targetfig is None:\n        targetfig = gcf()\n    tb = targetfig.canvas.manager.toolbar\n    if hasattr(tb, 'configure_subplots'):\n        from matplotlib.backend_bases import NavigationToolbar2\n        return cast(NavigationToolbar2, tb).configure_subplots()\n    elif hasattr(tb, 'trigger_tool'):\n        from matplotlib.backend_bases import ToolContainerBase\n        cast(ToolContainerBase, tb).trigger_tool('subplots')\n        return None\n    else:\n        raise ValueError('subplot_tool can only be launched for figures with an associated toolbar')",
    "docstring": "Launch a subplot tool window for a figure. Returns -------",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:subplot_tool arg:targetfig arguments arg If Compare Assign Call Assign If Call Return return:yes Call Call If Call Call Call Return return:no Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, name, state_callback, restore_callback):\n\n    def _state_callback_wrapper():\n        with ops.init_scope():\n            return state_callback()\n    self._state_callback = _state_callback_wrapper\n    self._restore_callback = restore_callback\n    with ops.device('/cpu:0'):\n        self._save_string = constant_op.constant('', dtype=dtypes.string)\n    spec = saveable_object.SaveSpec(self._save_string, '', name, dtype=dtypes.string)\n    super(_PythonStringStateSaveable, self).__init__(self._save_string, [spec], name)",
    "docstring": "Configure saving. Args: name: The checkpoint key to write to. state_callback: A function taking no arguments which returns a string. This function is run every time a checkpoint is written. restore_callback: A function taking a Python string, used to restore state.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\saving\\saveable_object_util.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:name arg:state_callback arg:restore_callback arguments arg arg arg arg FunctionDef name:_state_callback_wrapper arguments With Call Return return:yes Call Assign Assign With Call Assign Call Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "gather_object",
    "source_code": "def gather_object(self, object: T) -> Optional[list[T]]:\n    if self.use_dist:\n        gather_objs = cast(list[T], [None] * dist.get_world_size(self.group)) if self.is_coordinator else None\n        dist.gather_object(obj=object, object_gather_list=gather_objs if self.is_coordinator else None, dst=self.global_coordinator_rank, group=self.group)\n        result = gather_objs\n    else:\n        result = [object]\n    return result",
    "docstring": "Implement functionality similar to c10d::gather_object but without distributed enabled.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\utils.py",
    "ast_data": "FunctionDef name:gather_object arg:self arg:object arguments arg arg If Assign Call Call Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "bessel_i1",
    "source_code": "@tf_export('math.bessel_i1', 'math.special.bessel_i1')\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef bessel_i1(x, name=None):\n    with ops.name_scope(name, 'bessel_i1', [x]):\n        return gen_special_math_ops.bessel_i1(x)",
    "docstring": "Computes the Bessel i1 function of element-wise. Modified Bessel function of order 1. It is preferable to use the numerically stabler function instead. >>> tf.math.special.bessel_i1([-1., -0.5, 0.5, 1.]).numpy() array([-0.5651591 , -0.25789431, 0.25789431, 0.5651591 ], dtype=float32) Args: x: A or . Must be one of the following types: , , . name: A name for the operation (optional). Returns: A or , respectively. Has the same type as . @compatibility(scipy) Equivalent to scipy.special.i1 @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\special_math_ops.py",
    "ast_data": "FunctionDef name:bessel_i1 arg:x arg:name arguments arg arg With Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "is_aligned",
    "source_code": "def is_aligned(x: KernelArgType, alignment: int, include_tensor: bool) -> bool:\n    if isinstance(x, TensorArg):\n        if include_tensor:\n            offset_aligned = V.graph.sizevars.statically_known_multiple_of(x.offset * x.dtype.itemsize, alignment)\n            return offset_aligned and (not is_unaligned_buffer(x))\n        else:\n            return False\n    if isinstance(x, SizeArg):\n        if x.name.startswith('load_seed_offset'):\n            return False\n        if x.expr is None:\n            return False\n        if isinstance(x.expr, float):\n            return False\n        return V.graph.sizevars.statically_known_multiple_of(x.expr, alignment)\n    if isinstance(x, WorkspaceArg):\n        return True\n    if isinstance(x, (TMADescriptorArg, ConstexprArg)):\n        return False\n    raise NotImplementedError(f'unhandled {type(x)}: {x}')",
    "docstring": "Roughly follow triton code here:",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\triton_utils.py",
    "ast_data": "FunctionDef name:is_aligned arg:x arg:alignment arg:include_tensor arguments arg arg arg If Call If Assign Call Return return:yes BoolOp Call Return return:yes If Call If Call Return return:yes If Compare Return return:yes If Call Return return:yes Return return:yes Call If Call Return return:yes If Call Return return:yes Raise Call Call"
  },
  {
    "library": "matplotlib",
    "name": "Copy_to_Clipboard",
    "source_code": "def Copy_to_Clipboard(self, event=None):\n    bmp_obj = wx.BitmapDataObject()\n    bmp_obj.SetBitmap(self.bitmap)\n    if not wx.TheClipboard.IsOpened():\n        open_success = wx.TheClipboard.Open()\n        if open_success:\n            wx.TheClipboard.SetData(bmp_obj)\n            wx.TheClipboard.Flush()\n            wx.TheClipboard.Close()",
    "docstring": "Copy bitmap of canvas to system clipboard.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_wx.py",
    "ast_data": "FunctionDef name:Copy_to_Clipboard arg:self arg:event arguments arg arg Assign Call Call If Call Assign Call If Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "from_config",
    "source_code": "@classmethod\ndef from_config(cls, config):\n    config.pop('dtype', None)\n    return cls(**config)",
    "docstring": "Instantiates an initializer from a configuration dictionary. Example: Args: config: A Python dictionary, the output of . Returns: A instance.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\initializers\\initializers_v2.py",
    "ast_data": "FunctionDef name:from_config arg:cls arg:config arguments arg arg Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_kmeans",
    "source_code": "def _kmeans(obs, guess, thresh=1e-05, xp=None):\n    xp = np if xp is None else xp\n    code_book = guess\n    diff = xp.inf\n    prev_avg_dists = deque([diff], maxlen=2)\n    np_obs = np.asarray(obs)\n    while diff > thresh:\n        obs_code, distort = vq(obs, code_book, check_finite=False)\n        prev_avg_dists.append(xp.mean(distort, axis=-1))\n        obs_code = np.asarray(obs_code)\n        code_book, has_members = _vq.update_cluster_means(np_obs, obs_code, code_book.shape[0])\n        code_book = code_book[has_members]\n        code_book = xp.asarray(code_book)\n        diff = xp.abs(prev_avg_dists[0] - prev_avg_dists[1])\n    return (code_book, prev_avg_dists[1])",
    "docstring": "\"raw\" version of k-means. Returns ------- code_book The lowest distortion codebook found. avg_dist The average distance a observation is from a code in the book. Lower means the code_book matches the data better. See Also -------- kmeans : wrapper around k-means Examples -------- Note: not whitened in this example. >>> import numpy as np >>> from scipy.cluster.vq import _kmeans >>> features = np.array([[ 1.9,2.3], ... [ 1.5,2.5], ... [ 0.8,0.6], ... [ 0.4,1.8], ... [ 1.0,1.0]]) >>> book = np.array((features[0],features[2])) >>> _kmeans(features,book) (array([[ 1.7 , 2.4 ], [ 0.73333333, 1.13333333]]), 0.40563916697728591)",
    "type": "function",
    "file_path": "scipy\\scipy\\cluster\\vq.py",
    "ast_data": "FunctionDef name:_kmeans arg:obs arg:guess arg:thresh arg:xp arguments arg arg arg arg Assign Compare Assign Assign Assign Call Assign Call While Compare Assign Call Call Call Assign Call Assign Call Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "wrap",
    "source_code": "def wrap(v, cls):\n    if type(v) in {tuple, list}:\n        return type(v)((wrap(vi, cls) for vi in v))\n    return cls(v) if isinstance(v, Tensor) else v",
    "docstring": "Wrap type.",
    "type": "function",
    "file_path": "kornia\\kornia\\core\\tensor_wrapper.py",
    "ast_data": "FunctionDef name:wrap arg:v arg:cls arguments arg arg If Compare Call Return return:yes Call Call Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_validate_names",
    "source_code": "def _validate_names(names: Sequence[Hashable] | None) -> None:\n    if names is not None:\n        if len(names) != len(set(names)):\n            raise ValueError('Duplicate names are not allowed.')\n        if not (is_list_like(names, allow_sets=False) or isinstance(names, abc.KeysView)):\n            raise ValueError('Names should be an ordered collection.')",
    "docstring": "Raise ValueError if the parameter contains duplicates or has an invalid data type. Parameters ---------- names : array-like or None An array containing a list of the names used for the output DataFrame. Raises ------ ValueError If names are not unique or are not ordered (e.g. set).",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\parsers\\readers.py",
    "ast_data": "FunctionDef name:_validate_names arg:names arguments arg If Compare If Compare Call Call Call Raise Call If BoolOp Call Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_gradient_function",
    "source_code": "def _gradient_function(op_name, attr_tuple, num_inputs, inputs, outputs, out_grads, skip_input_indices, forward_pass_name_scope):\n    mock_op = _MockOp(attr_tuple, inputs, outputs, op_name, skip_input_indices)\n    grad_fn = ops._gradient_registry.lookup(op_name)\n    if grad_fn is None:\n        return [None] * num_inputs\n    if ops.executing_eagerly_outside_functions() or control_flow_util.EnableControlFlowV2(ops.get_default_graph()):\n        gradient_name_scope = 'gradient_tape/'\n        if forward_pass_name_scope:\n            gradient_name_scope += forward_pass_name_scope + '/'\n        with ops.name_scope(gradient_name_scope):\n            return grad_fn(mock_op, *out_grads)\n    else:\n        return grad_fn(mock_op, *out_grads)",
    "docstring": "Calls the gradient function of the op. Args: op_name: the name of the op to be differentiated. attr_tuple: the attrs, as a tuple. num_inputs: the number of inputs to the op. inputs: inputs to the original operation. outputs: outputs to the original operation. out_grads: gradients of the operation wrt its outputs. skip_input_indices: a tuple that is passed to the gradient function, indicating which inputs to skip calculating the gradient for forward_pass_name_scope: the namescope of the op in the forward pass. Returns: The gradients with respect to the inputs of the function, as a list.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\backprop.py",
    "ast_data": "FunctionDef name:_gradient_function arg:op_name arg:attr_tuple arg:num_inputs arg:inputs arg:outputs arg:out_grads arg:skip_input_indices arg:forward_pass_name_scope arguments arg arg arg arg arg arg arg arg Assign Call Assign Call If Compare Return return:yes If BoolOp Call Call Call Assign If With Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "argmax",
    "source_code": "def argmax(self, axis=None, out=None):\n    return N.ndarray.argmax(self, axis, out)._align(axis)",
    "docstring": "Indexes of the maximum values along an axis. Return the indexes of the first occurrences of the maximum values along the specified axis. If axis is None, the index is for the flattened matrix. Parameters ---------- See for complete descriptions See Also -------- numpy.argmax Notes ----- This is the same as , but returns a object where would return an . Examples -------- >>> x = np.matrix(np.arange(12).reshape((3,4))); x matrix([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]]) >>> x.argmax() 11 >>> x.argmax(0) matrix([[2, 2, 2, 2]]) >>> x.argmax(1) matrix([[3], [3], [3]])",
    "type": "method",
    "file_path": "numpy\\numpy\\matrixlib\\defmatrix.py",
    "ast_data": "FunctionDef name:argmax arg:self arg:axis arg:out arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "split_sequence_columns_v2",
    "source_code": "def split_sequence_columns_v2(feature_columns):\n    sequence_columns = []\n    non_sequence_columns = []\n    for column in feature_columns:\n        if not isinstance(column, (_TPUEmbeddingColumnV2, _TPUSharedEmbeddingColumnV2)):\n            raise TypeError(f'column must be a _TPUEmbeddingColumnV2 or _TPUSharedEmbeddingColumnV2 but got {type(column)} instead.')\n        if column.is_sequence_column():\n            sequence_columns.append(column)\n        else:\n            non_sequence_columns.append(column)\n    return (sequence_columns, non_sequence_columns)",
    "docstring": "Split a list of _TPUEmbeddingColumn into sequence and non-sequence columns. For use in a TPUEstimator model_fn function. E.g. def model_fn(features): sequence_columns, feature_columns = ( tf.tpu.feature_column.split_sequence_columns(feature_columns)) input = tf.feature_column.input_layer( features=features, feature_columns=feature_columns) sequence_features, sequence_lengths = ( tf.contrib.feature_column.sequence_input_layer( features=features, feature_columns=sequence_columns)) Args: feature_columns: A list of _TPUEmbeddingColumns to split. Returns: Two lists of _TPUEmbeddingColumns, the first is the sequence columns and the second is the non-sequence columns.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\feature_column_v2.py",
    "ast_data": "FunctionDef name:split_sequence_columns_v2 arg:feature_columns arguments arg Assign Assign For If Call Raise Call Call If Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_DeconstructedSymType",
    "source_code": "@dataclass_slots\n@dataclass(frozen=True)\nclass _DeconstructedSymType:\n    ty: type[PySymType]\n    node: _DeconstructedSymNode\n\n    @staticmethod\n    def from_sym_type(value: PySymType) -> _DeconstructedSymType:\n        return _DeconstructedSymType(type(value), value.node)\n\n    def extract(self, shape_env: ShapeEnv) -> PySymType:\n        return self.ty(self.node.extract(shape_env))\n\n    def __str__(self) -> str:\n        return f'{self.ty}({self.node})'\n\n    def __repr__(self) -> str:\n        return f'_DeconstructedSymType({self.ty}, {self.node!r})'\n\n    def __eq__(self, other: object) -> bool:\n        return NotImplemented\n\n    def __hash__(self) -> int:\n        return NotImplemented",
    "docstring": "Represents a SymInt, SymFloat, SymBool without the associated ShapeEnv",
    "type": "class",
    "file_path": "pytorch\\torch\\_subclasses\\_fake_tensor_utils.py",
    "ast_data": "ClassDef name:_DeconstructedSymType FunctionDef name:from_sym_type arg:value arguments arg Return return:yes Call Call FunctionDef name:extract arg:self arg:shape_env arguments arg arg Return return:yes Call Call FunctionDef name:__str__ arg:self arguments arg Return return:yes FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes FunctionDef name:__hash__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "report_proto",
    "source_code": "def report_proto(self):\n    if self._report_proto:\n        return self._report_proto\n    else:\n        raise ValueError('Call to report_proto must be done after tracing.Report proto only exists for trace_mode=[summary|full_tensor_summary]')",
    "docstring": "Getter for tensor_tracer.proto object for summary and full_tensor_summary modes. Returns: A tensor_tracer.proto object. Raises: ValueError if called before tracing happens, or when trace mode is not summary or full_tensor_summary.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:report_proto arg:self arguments arg If Return return:yes Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "set_location",
    "source_code": "def set_location(self, location, transform=None):\n    _api.check_isinstance((transforms.Transform, None), transform=transform)\n    if isinstance(location, str):\n        _api.check_in_list(self._locstrings, location=location)\n        self._pos = 1.0 if location in ('top', 'right') else 0.0\n    elif isinstance(location, numbers.Real):\n        self._pos = location\n    else:\n        raise ValueError(f'location must be {self._locstrings[0]!r}, {self._locstrings[1]!r}, or a float, not {location!r}')\n    self._loc = location\n    if self._orientation == 'x':\n        bounds = [0, self._pos, 1.0, 1e-10]\n        if transform is not None:\n            transform = transforms.blended_transform_factory(self._parent.transAxes, transform)\n    else:\n        bounds = [self._pos, 0, 1e-10, 1]\n        if transform is not None:\n            transform = transforms.blended_transform_factory(transform, self._parent.transAxes)\n    if transform is None:\n        transform = self._parent.transAxes\n    self.set_axes_locator(_TransformedBoundsLocator(bounds, transform))",
    "docstring": "Set the vertical or horizontal location of the axes in parent-normalized coordinates. Parameters ---------- location : {'top', 'bottom', 'left', 'right'} or float The position to put the secondary axis. Strings can be 'top' or 'bottom' for orientation='x' and 'right' or 'left' for orientation='y'. A float indicates the relative position on the parent Axes to put the new Axes, 0.0 being the bottom (or left) and 1.0 being the top (or right). transform : , optional Transform for the location to use. Defaults to the parent's ``, so locations are normally relative to the parent axes. .. versionadded:: 3.9",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_secondary_axes.py",
    "ast_data": "FunctionDef name:set_location arg:self arg:location arg:transform arguments arg arg arg Call If Call Call Assign Compare If Call Assign Raise Call Assign If Compare Assign If Compare Assign Call Assign If Compare Assign Call If Compare Assign Call Call"
  },
  {
    "library": "pandas",
    "name": "_fill_limit_area_2d",
    "source_code": "def _fill_limit_area_2d(mask: npt.NDArray[np.bool_], limit_area: Literal['outside', 'inside']) -> None:\n    neg_mask = ~mask.T\n    if limit_area == 'outside':\n        la_mask = np.maximum.accumulate(neg_mask, axis=0) & np.maximum.accumulate(neg_mask[::-1], axis=0)[::-1]\n    else:\n        la_mask = ~np.maximum.accumulate(neg_mask, axis=0) | ~np.maximum.accumulate(neg_mask[::-1], axis=0)[::-1]\n    mask[la_mask.T] = False",
    "docstring": "Prepare 2d mask for ffill/bfill with limit_area. When called, mask will no longer faithfully represent when the corresponding are NA or not. Parameters ---------- mask : np.ndarray[bool, ndim=1] Mask representing NA values when filling. limit_area : { \"outside\", \"inside\" } Whether to limit filling to outside or inside the outer most non-NA value.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\missing.py",
    "ast_data": "FunctionDef name:_fill_limit_area_2d arg:mask arg:limit_area arguments arg arg Assign If Compare Assign Call Call Assign Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "run_independently",
    "source_code": "def run_independently(self, op):\n    self._independent_ops.append(op)\n    op._set_attr('_independent_side_effects', attr_value_pb2.AttrValue(b=True))",
    "docstring": "Marks the given op as independent. Overrides any other rule for the op. Independent ops are guaranteed to execute before the return values, but are allowed to run in parallel with everything else. Use in programs which can guarantee that an op has side effects that don't affect any other op. Args: op: An operation",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\auto_control_deps.py",
    "ast_data": "FunctionDef name:run_independently arg:self arg:op arguments arg arg Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "copy",
    "source_code": "def copy(self, **override_parameters_kwargs):\n    parameters = dict(self.parameters, **override_parameters_kwargs)\n    return type(self)(**parameters)",
    "docstring": "Creates a deep copy of the distribution. Note: the copy distribution may continue to depend on the original initialization arguments. Args: **override_parameters_kwargs: String/value dictionary of initialization arguments to override with new values. Returns: distribution: A new instance of initialized from the union of self.parameters and override_parameters_kwargs, i.e., .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:copy arg:self arguments arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "update_dtype",
    "source_code": "def update_dtype(self, dtype: str_type | CategoricalDtype) -> CategoricalDtype:\n    if isinstance(dtype, str) and dtype == 'category':\n        return self\n    elif not self.is_dtype(dtype):\n        raise ValueError(f'a CategoricalDtype must be passed to perform an update, got {dtype!r}')\n    else:\n        dtype = cast(CategoricalDtype, dtype)\n    if isinstance(dtype, CategoricalDtype) and dtype.categories is not None and (dtype.ordered is not None):\n        return dtype\n    new_categories = dtype.categories if dtype.categories is not None else self.categories\n    new_ordered = dtype.ordered if dtype.ordered is not None else self.ordered\n    return CategoricalDtype(new_categories, new_ordered)",
    "docstring": "Returns a CategoricalDtype with categories and ordered taken from dtype if specified, otherwise falling back to self if unspecified Parameters ---------- dtype : CategoricalDtype Returns ------- new_dtype : CategoricalDtype",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py",
    "ast_data": "FunctionDef name:update_dtype arg:self arg:dtype arguments arg arg If BoolOp Call Compare Return return:yes If Call Raise Call Assign Call If BoolOp Call Compare Compare Return return:yes Assign Compare Assign Compare Return return:yes Call"
  },
  {
    "library": "django",
    "name": "naturaltime",
    "source_code": "@register.filter\ndef naturaltime(value):\n    return NaturalTimeFormatter.string_for(value)",
    "docstring": "For date and time values show how many seconds, minutes, or hours ago compared to current timestamp return representing string.",
    "type": "function",
    "file_path": "django\\django\\contrib\\humanize\\templatetags\\humanize.py",
    "ast_data": "FunctionDef name:naturaltime arg:value arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_initialize_single_worker",
    "source_code": "def _initialize_single_worker(self, devices):\n    self._devices = tuple((device_util.canonicalize(d) for d in devices))\n    self._input_workers_devices = ((device_util.canonicalize('/device:CPU:0', devices[0]), devices),)\n    self._host_input_device = numpy_dataset.SingleDevice(self._input_workers_devices[0][0])\n    device_spec = tf_device.DeviceSpec.from_string(self._input_workers_devices[0][0])\n    if device_spec.job is not None and device_spec.job != 'localhost':\n        self._default_device = '/job:%s/replica:%d/task:%d' % (device_spec.job, device_spec.replica, device_spec.task)\n    logging.info('Using MirroredStrategy with devices %r', devices)",
    "docstring": "Initializes the object for single-worker training.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\mirrored_strategy.py",
    "ast_data": "FunctionDef name:_initialize_single_worker arg:self arg:devices arguments arg arg Assign Call Call Assign Call Assign Call Assign Call If BoolOp Compare Compare Assign Call"
  },
  {
    "library": "pytorch",
    "name": "_pop_config_kwargs",
    "source_code": "def _pop_config_kwargs(config: dict[str, Any]) -> dict[str, Any]:\n    popped = {}\n    for key in ('num_warps', 'num_stages', 'num_ctas', 'maxnreg', 'num_consumer_groups', 'num_buffers_warp_spec'):\n        val = config.pop(key, None)\n        if val is not None:\n            popped[key] = val\n    return popped",
    "docstring": "Extract triton.Config options that should become kwargs",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\triton_heuristics.py",
    "ast_data": "FunctionDef name:_pop_config_kwargs arg:config arguments arg Assign For Assign Call If Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_total_loss",
    "source_code": "@tf_export(v1=['losses.get_total_loss'])\ndef get_total_loss(add_regularization_losses=True, name='total_loss', scope=None):\n    losses = get_losses(scope=scope)\n    if add_regularization_losses:\n        losses += get_regularization_losses(scope=scope)\n    return math_ops.add_n(losses, name=name)",
    "docstring": "Returns a tensor whose value represents the total loss. In particular, this adds any losses you have added with to any regularization losses that have been added by regularization parameters on layers constructors e.g. . Be very sure to use this if you are constructing a loss_op manually. Otherwise regularization arguments on methods will not function. Args: add_regularization_losses: A boolean indicating whether or not to use the regularization losses in the sum. name: The name of the returned tensor. scope: An optional scope name for filtering the losses to return. Note that this filters the losses added with as well as the regularization losses to that scope. Returns: A whose value represents the total loss. Raises: ValueError: if is not iterable.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\losses\\util.py",
    "ast_data": "FunctionDef name:get_total_loss arg:add_regularization_losses arg:name arg:scope arguments arg arg arg Assign Call If Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "post",
    "source_code": "def post(self, request, *args, **kwargs):\n    form = self.get_form()\n    if form.is_valid():\n        return self.form_valid(form)\n    else:\n        return self.form_invalid(form)",
    "docstring": "Handle POST requests: instantiate a form instance with the passed POST variables and then check if it's valid.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\edit.py",
    "ast_data": "FunctionDef name:post arg:self arg:request arguments arg arg arg arg Assign Call If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, row_splits, row_lengths=None, value_rowids=None, nrows=None, uniform_row_length=None, nvals=None, internal=False):\n    if internal is not _row_partition_factory_key:\n        raise ValueError('RowPartition constructor is private; please use one of the factory methods instead (e.g., RowPartition.from_row_lengths())')\n    if not isinstance(row_splits, tensor_lib.Tensor):\n        raise TypeError('Row-partitioning argument must be a Tensor, got %r' % row_splits)\n    if row_splits.dtype not in (dtypes.int32, dtypes.int64):\n        raise ValueError('Row-partitioning argument must be int32 or int64')\n    row_splits.shape.assert_has_rank(1)\n    row_splits.set_shape([None])\n    self._row_splits = row_splits\n    for tensor in [row_lengths, value_rowids, nrows, uniform_row_length, nvals]:\n        if tensor is not None:\n            if not isinstance(tensor, tensor_lib.Tensor):\n                raise TypeError('Cached value must be a Tensor or None.')\n            elif tensor.dtype != row_splits.dtype:\n                raise ValueError(f'Inconsistent dtype for encoding tensors: {tensor} vs {row_splits}')\n    self._row_lengths = row_lengths\n    self._value_rowids = value_rowids\n    self._nrows = nrows\n    self._uniform_row_length = uniform_row_length\n    self._nvals = nvals",
    "docstring": "Creates a from the specified encoding tensor(s). This constructor is private -- please use one of the following ops to build s: * * * * * * If row_splits is has a constant value, then all other arguments should have a constant value. Args: row_splits: A 1-D integer tensor with shape . row_lengths: A 1-D integer tensor with shape value_rowids: A 1-D integer tensor with shape . nrows: A 1-D integer scalar tensor. uniform_row_length: A scalar tensor. nvals: A scalar tensor. internal: Private key value, required to ensure that this private constructor is *only* called from the factory methods. Raises: TypeError: If a row partitioning tensor has an inappropriate dtype. TypeError: If exactly one row partitioning argument was not specified. ValueError: If a row partitioning tensor has an inappropriate shape. ValueError: If multiple partitioning arguments are specified. ValueError: If nrows is specified but value_rowids is not None.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:row_splits arg:row_lengths arg:value_rowids arg:nrows arg:uniform_row_length arg:nvals arg:internal arguments arg arg arg arg arg arg arg arg If Compare Raise Call If Call Raise Call If Compare Raise Call Call Call Assign For If Compare If Call Raise Call If Compare Raise Call Assign Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "__lt__",
    "source_code": "def __lt__(self, other):\n    return str(self) < str(other)",
    "docstring": "Allows feature columns to be sorted in Python 3 as they are in Python 2. Feature columns need to occasionally be sortable, for example when used as keys in a features dictionary passed to a layer. In CPython, must be defined for all objects in the sequence being sorted. If any objects do not have an compatible with feature column objects (such as strings), then CPython will fall back to using the method below. Args: other: The other object to compare to. Returns: True if the string representation of this object is lexicographically less than the string representation of . For FeatureColumn objects, this looks like \"\".",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py",
    "ast_data": "FunctionDef name:__lt__ arg:self arg:other arguments arg arg Return return:yes Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "ConversionError",
    "source_code": "class ConversionError(torch.onnx.errors.OnnxExporterError):\n    pass",
    "docstring": "Error during ExportedProgram to ONNX conversion.",
    "type": "class",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_errors.py",
    "ast_data": "ClassDef name:ConversionError"
  },
  {
    "library": "scikit-learn",
    "name": "compute",
    "source_code": "@classmethod\n@abstractmethod\ndef compute(cls, X, Y, **kwargs):\n    pass",
    "docstring": "Compute the reduction. Parameters ---------- X : ndarray or CSR matrix of shape (n_samples_X, n_features) Input data. Y : ndarray or CSR matrix of shape (n_samples_Y, n_features) Input data. **kwargs : additional parameters for the reduction Notes ----- This method is an abstract class method: it has to be implemented for all subclasses.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\metrics\\_pairwise_distances_reduction\\_dispatcher.py",
    "ast_data": "FunctionDef name:compute arg:cls arg:X arg:Y arguments arg arg arg arg"
  },
  {
    "library": "pytorch",
    "name": "FakeItemVariable",
    "source_code": "class FakeItemVariable(TensorVariable):\n    _nonvar_fields = {'need_unwrap', *TensorVariable._nonvar_fields}\n\n    def __init__(self, proxy: torch.fx.Proxy, **kwargs) -> None:\n        need_unwrap = kwargs.pop('need_unwrap', False)\n        super().__init__(proxy, **kwargs)\n        self.need_unwrap = need_unwrap\n\n    @classmethod\n    def from_tensor_variable(cls, tensor_variable):\n        return FakeItemVariable(**dict(tensor_variable.__dict__))",
    "docstring": "An unspecialized python variable which prevents access to the underlying raw value. This is needed if item is called on a FakeTensor.",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\tensor.py",
    "ast_data": "ClassDef name:FakeItemVariable Assign FunctionDef name:__init__ arg:self arg:proxy arguments arg arg arg Assign Call Call Call Assign FunctionDef name:from_tensor_variable arg:cls arg:tensor_variable arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_transform",
    "source_code": "def get_transform(self):\n    return self._transform",
    "docstring": "Return the associated with this scale.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\scale.py",
    "ast_data": "FunctionDef name:get_transform arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "optimize",
    "source_code": "def optimize(self, operations, app_label):\n    if app_label is None:\n        raise TypeError('app_label must be a str.')\n    self._iterations = 0\n    while True:\n        result = self.optimize_inner(operations, app_label)\n        self._iterations += 1\n        if result == operations:\n            return result\n        operations = result",
    "docstring": "Main optimization entry point. Pass in a list of Operation instances, get out a new list of Operation instances. Unfortunately, due to the scope of the optimization (two combinable operations might be separated by several hundred others), this can't be done as a peephole optimization with checks/output implemented on the Operations themselves; instead, the optimizer looks at each individual operation and scans forwards in the list to see if there are any matches, stopping at boundaries - operations which can't be optimized over (RunSQL, operations on the same field/model, etc.) The inner loop is run until the starting list is the same as the result list, and then the result is returned. This means that operation optimization must be stable and always return an equal or shorter list.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\optimizer.py",
    "ast_data": "FunctionDef name:optimize arg:self arg:operations arg:app_label arguments arg arg arg If Compare Raise Call Assign While Assign Call If Compare Return return:yes Assign"
  },
  {
    "library": "seaborn",
    "name": "Count",
    "source_code": "@dataclass\nclass Count(Stat):\n    group_by_orient: ClassVar[bool] = True\n\n    def __call__(self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale]) -> DataFrame:\n        var = {'x': 'y', 'y': 'x'}[orient]\n        res = groupby.agg(data.assign(**{var: data[orient]}), {var: len}).dropna(subset=['x', 'y']).reset_index(drop=True)\n        return res",
    "docstring": "Count distinct observations within groups. See Also -------- Hist : A more fully-featured transform including binning and/or normalization. Examples -------- .. include:: ../docstrings/objects.Count.rst",
    "type": "class",
    "file_path": "seaborn\\seaborn\\_stats\\counting.py",
    "ast_data": "ClassDef name:Count FunctionDef name:__call__ arg:self arg:data arg:groupby arg:orient arg:scales arguments arg arg arg arg arg Assign Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_embedded_frames",
    "source_code": "def _embedded_frames(frame_list, frame_format):\n    if frame_format == 'svg':\n        frame_format = 'svg+xml'\n    template = '  frames[{0}] = \"data:image/{1};base64,{2}\"\\n'\n    return '\\n' + ''.join((template.format(i, frame_format, frame_data.replace('\\n', '\\\\\\n')) for i, frame_data in enumerate(frame_list)))",
    "docstring": "frame_list should be a list of base64-encoded png files",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\animation.py",
    "ast_data": "FunctionDef name:_embedded_frames arg:frame_list arg:frame_format arguments arg arg If Compare Assign Assign Return return:yes Call Call Call Call"
  },
  {
    "library": "django",
    "name": "from_pgraster",
    "source_code": "def from_pgraster(data):\n    if data is None:\n        return\n    header, data = chunk(data, 122)\n    header = unpack(POSTGIS_HEADER_STRUCTURE, header)\n    bands = []\n    pixeltypes = []\n    while data:\n        pixeltype_with_flags, data = chunk(data, 2)\n        pixeltype_with_flags = unpack('B', pixeltype_with_flags)[0]\n        pixeltype = pixeltype_with_flags & BANDTYPE_PIXTYPE_MASK\n        pixeltype = POSTGIS_TO_GDAL[pixeltype]\n        pack_type = GDAL_TO_STRUCT[pixeltype]\n        pack_size = 2 * STRUCT_SIZE[pack_type]\n        nodata, data = chunk(data, pack_size)\n        nodata = unpack(pack_type, nodata)[0]\n        band, data = chunk(data, pack_size * header[10] * header[11])\n        band_result = {'data': bytes.fromhex(band)}\n        if pixeltype_with_flags & BANDTYPE_FLAG_HASNODATA:\n            band_result['nodata_value'] = nodata\n        bands.append(band_result)\n        pixeltypes.append(pixeltype)\n    if len(set(pixeltypes)) != 1:\n        raise ValidationError('Band pixeltypes are not all equal.')\n    return {'srid': int(header[9]), 'width': header[10], 'height': header[11], 'datatype': pixeltypes[0], 'origin': (header[5], header[6]), 'scale': (header[3], header[4]), 'skew': (header[7], header[8]), 'bands': bands}",
    "docstring": "Convert a PostGIS HEX String into a dictionary.",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\postgis\\pgraster.py",
    "ast_data": "FunctionDef name:from_pgraster arg:data arguments arg If Compare Return return:no Assign Call Assign Call Assign Assign While Assign Call Assign Call Assign Assign Assign Assign Assign Call Assign Call Assign Call Assign Call If Assign Call Call If Compare Call Call Raise Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_validate_apply_axis_arg",
    "source_code": "def _validate_apply_axis_arg(arg: NDFrame | Sequence | np.ndarray, arg_name: str, dtype: Any | None, data: NDFrame) -> np.ndarray:\n    dtype = {'dtype': dtype} if dtype else {}\n    if isinstance(arg, Series) and isinstance(data, DataFrame):\n        raise ValueError(f\"'{arg_name}' is a Series but underlying data for operations is a DataFrame since 'axis=None'\")\n    if isinstance(arg, DataFrame) and isinstance(data, Series):\n        raise ValueError(f\"'{arg_name}' is a DataFrame but underlying data for operations is a Series with 'axis in [0,1]'\")\n    if isinstance(arg, (Series, DataFrame)):\n        arg = arg.reindex_like(data).to_numpy(**dtype)\n    else:\n        arg = np.asarray(arg, **dtype)\n        assert isinstance(arg, np.ndarray)\n        if arg.shape != data.shape:\n            raise ValueError(f\"supplied '{arg_name}' is not correct shape for data over selected 'axis': got {arg.shape}, expected {data.shape}\")\n    return arg",
    "docstring": "For the apply-type methods, `` we must make sure that the two are compatible shapes, or raise. Parameters ---------- arg : sequence, Series or DataFrame the user input arg arg_name : string name of the arg for use in error messages dtype : numpy dtype, optional forced numpy dtype if given data : Series or DataFrame underling subset of Styler data on which operations are performed Returns ------- ndarray",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\formats\\style.py",
    "ast_data": "FunctionDef name:_validate_apply_axis_arg arg:arg arg:arg_name arg:dtype arg:data arguments arg arg arg arg Assign If BoolOp Call Call Raise Call If BoolOp Call Call Raise Call If Call Assign Call Call Assign Call Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "on_clicked",
    "source_code": "def on_clicked(self, func):\n    return self._observers.connect('clicked', func)",
    "docstring": "Connect the callback function *func* to button click events. Parameters ---------- func : callable When the button is clicked, call *func* with button label. When all buttons are cleared, call *func* with None. The callback func must have the signature:: def func(label: str | None) -> Any Return values may exist, but are ignored. Returns ------- A connection id, which can be used to disconnect the callback.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:on_clicked arg:self arg:func arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_allocate_and_copy_recording_inputs",
    "source_code": "def _allocate_and_copy_recording_inputs(self, inputs: list[InputType]) -> list[InputType]:\n    torch.cuda.synchronize()\n    self.stream.wait_stream(torch.cuda.current_stream())\n    recording_inputs: list[InputType] = []\n    with warnings.catch_warnings(record=True), torch.cuda.device(self.device), _use_cuda_memory_pool_manager(self.device, mem_pool=self.cuda_graphs_pool, stream=self.stream):\n        for i, inp in enumerate(inputs):\n            if not isinstance(inp, torch.Tensor):\n                assert isinstance(inp, (int, torch.Generator))\n                recording_inputs.append(inp)\n            elif i not in self.static_input_idxs:\n                recording_inputs.append(static_input(inp))\n            else:\n                recording_inputs.append(inp)\n        self._copy_inputs_and_remove_from_src(recording_inputs, inputs)\n    return recording_inputs",
    "docstring": "Allocate inputs for non static, non cudagraph managed tensors in the memory pool and copy over the tensor values.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py",
    "ast_data": "FunctionDef name:_allocate_and_copy_recording_inputs arg:self arg:inputs arguments arg arg Call Call Call With Call Call Call For Call If Call Call Call If Compare Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_replica_ctx_all_reduce",
    "source_code": "def _replica_ctx_all_reduce(self, reduce_op, value, options=None):\n    if options is None:\n        options = collective_util.Options()\n    replica_context = get_replica_context()\n    assert replica_context, '`StrategyExtended._replica_ctx_all_reduce` must be called in a replica context'\n\n    def merge_fn(_, flat_value):\n        return self.batch_reduce_to(reduce_op, [(v, v) for v in flat_value], options)\n    reduced = replica_context.merge_call(merge_fn, args=(nest.flatten(value),))\n    return nest.pack_sequence_as(value, reduced)",
    "docstring": "All-reduce across all replicas so that all get the final result. If is a nested structure of tensors, all-reduces of these tensors will be batched when possible. can be set to hint the batching behavior. This API must be called in a replica context. Args: reduce_op: A value specifying how values should be combined. value: Value to be reduced. A tensor or a nested structure of tensors. options: A . Options to perform collective operations. This overrides the default options if the takes one in the constructor. Returns: A tensor or a nested structure of tensors with the reduced values. The structure is the same as .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:_replica_ctx_all_reduce arg:self arg:reduce_op arg:value arg:options arguments arg arg arg arg If Compare Assign Call Assign Call FunctionDef name:merge_fn arg:_ arg:flat_value arguments arg arg Return return:yes Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "configure_coordination_service",
    "source_code": "def configure_coordination_service(self, service_type, service_leader='', enable_health_check=True, cluster_register_timeout_in_ms=0, heartbeat_timeout_in_ms=0, shutdown_barrier_timeout_in_ms=0, coordinated_jobs=None, allow_new_incarnation_to_reconnect=False):\n    if self._context_handle:\n        logging.warning('Configuring coordination service type may not be effective because the context is already initialized.')\n    config = coordination_config_pb2.CoordinationServiceConfig()\n    config.service_type = service_type\n    if service_leader:\n        config.service_leader = pydev.canonical_name(service_leader)\n    config.enable_health_check = enable_health_check\n    config.cluster_register_timeout_in_ms = cluster_register_timeout_in_ms\n    config.heartbeat_timeout_in_ms = heartbeat_timeout_in_ms\n    config.shutdown_barrier_timeout_in_ms = shutdown_barrier_timeout_in_ms\n    config.allow_new_incarnation_to_reconnect = allow_new_incarnation_to_reconnect\n    if coordinated_jobs is not None:\n        if isinstance(coordinated_jobs, list):\n            config.coordinated_job_list.extend(coordinated_jobs)\n        else:\n            raise ValueError('`coordinated_jobs` must be list[CoordinatedJob] or None, but got: %s' % (coordinated_jobs,))\n    self._coordination_service_config = config",
    "docstring": "Enable distributed coordination service with specified configs.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:configure_coordination_service arg:self arg:service_type arg:service_leader arg:enable_health_check arg:cluster_register_timeout_in_ms arg:heartbeat_timeout_in_ms arg:shutdown_barrier_timeout_in_ms arg:coordinated_jobs arg:allow_new_incarnation_to_reconnect arguments arg arg arg arg arg arg arg arg arg If Call Assign Call Assign If Assign Call Assign Assign Assign Assign Assign If Compare If Call Call Raise Call Assign"
  },
  {
    "library": "pytorch",
    "name": "is_tensor_like",
    "source_code": "def is_tensor_like(inp):\n    return type(inp) is torch.Tensor or hasattr(inp, '__torch_function__')",
    "docstring": "Returns `` attribute on the type of the input. Examples -------- A subclass of tensor is generally a Tensor-like. >>> class SubTensor(torch.Tensor): ... >>> is_tensor_like(SubTensor([0])) True Built-in or user types aren't usually Tensor-like. >>> is_tensor_like(6) False >>> is_tensor_like(None) False >>> class NotATensor: ... >>> is_tensor_like(NotATensor()) False But, they can be made Tensor-like by implementing __torch_function__. >>> class TensorLike: ... @classmethod ... def __torch_function__(cls, func, types, args, kwargs): ... return -1 >>> is_tensor_like(TensorLike()) True",
    "type": "function",
    "file_path": "pytorch\\torch\\overrides.py",
    "ast_data": "FunctionDef name:is_tensor_like arg:inp arguments arg Return return:yes BoolOp Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "hardswish",
    "source_code": "def hardswish(input: Tensor, scale: float, zero_point: int) -> Tensor:\n    if not input.is_quantized:\n        raise ValueError(\"Input to 'quantized.hardswish' must be quantized!\")\n    return torch._ops.ops.quantized.hardswish(input, scale, zero_point)",
    "docstring": "This is the quantized version of :func:. Args: input: quantized input scale: quantization scale of the output tensor zero_point: quantization zero point of the output tensor",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\functional.py",
    "ast_data": "FunctionDef name:hardswish arg:input arg:scale arg:zero_point arguments arg arg arg If Raise Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_live_receivers",
    "source_code": "def _live_receivers(self, sender):\n    receivers = None\n    if self.use_caching and (not self._dead_receivers):\n        receivers = self.sender_receivers_cache.get(sender)\n        if receivers is NO_RECEIVERS:\n            return ([], [])\n    if receivers is None:\n        with self.lock:\n            self._clear_dead_receivers()\n            senderkey = _make_id(sender)\n            receivers = []\n            for (_receiverkey, r_senderkey), receiver, sender_ref, is_async in self.receivers:\n                if r_senderkey == NONE_ID or r_senderkey == senderkey:\n                    receivers.append((receiver, sender_ref, is_async))\n            if self.use_caching:\n                if not receivers:\n                    self.sender_receivers_cache[sender] = NO_RECEIVERS\n                else:\n                    self.sender_receivers_cache[sender] = receivers\n    non_weak_sync_receivers = []\n    non_weak_async_receivers = []\n    for receiver, sender_ref, is_async in receivers:\n        if isinstance(receiver, weakref.ReferenceType):\n            receiver = receiver()\n            if receiver is None:\n                continue\n        if sender_ref is not None and sender_ref() is None:\n            continue\n        if is_async:\n            non_weak_async_receivers.append(receiver)\n        else:\n            non_weak_sync_receivers.append(receiver)\n    return (non_weak_sync_receivers, non_weak_async_receivers)",
    "docstring": "Filter sequence of receivers to get resolved, live receivers. This checks for weak references and resolves them, then returning only live receivers.",
    "type": "method",
    "file_path": "django\\django\\dispatch\\dispatcher.py",
    "ast_data": "FunctionDef name:_live_receivers arg:self arg:sender arguments arg arg Assign If BoolOp Assign Call If Compare Return return:no If Compare With Call Assign Call Assign For If BoolOp Compare Compare Call If If Assign Assign Assign Assign For If Call Assign Call If Compare If BoolOp Compare Compare Call If Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_subplotspec",
    "source_code": "def set_subplotspec(self, subplotspec):\n    self._subplotspec = subplotspec\n    self.set_position(subplotspec.get_position(self.figure))",
    "docstring": "Set the SubplotSpec instance.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_divider.py",
    "ast_data": "FunctionDef name:set_subplotspec arg:self arg:subplotspec arguments arg arg Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_supervised_signature_def",
    "source_code": "def _supervised_signature_def(method_name, inputs, loss=None, predictions=None, metrics=None):\n    if inputs is None or not inputs:\n        raise ValueError('{} inputs cannot be None or empty.'.format(method_name))\n    signature_inputs = {key: utils.build_tensor_info(tensor) for key, tensor in inputs.items()}\n    signature_outputs = {}\n    for output_set in (loss, predictions, metrics):\n        if output_set is not None:\n            sig_out = {key: utils.build_tensor_info(tensor) for key, tensor in output_set.items()}\n            signature_outputs.update(sig_out)\n    signature_def = signature_def_utils.build_signature_def(signature_inputs, signature_outputs, method_name)\n    return signature_def",
    "docstring": "Creates a signature for training and eval data. This function produces signatures that describe the inputs and outputs of a supervised process, such as training or evaluation, that results in loss, metrics, and the like. Note that this function only requires inputs to be not None. Args: method_name: Method name of the SignatureDef as a string. inputs: dict of string to . loss: dict of string to representing computed loss. predictions: dict of string to representing the output predictions. metrics: dict of string to representing metric ops. Returns: A train- or eval-flavored signature_def. Raises: ValueError: If inputs or outputs is .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\utils_v1\\signature_def_utils.py",
    "ast_data": "FunctionDef name:_supervised_signature_def arg:method_name arg:inputs arg:loss arg:predictions arg:metrics arguments arg arg arg arg arg If BoolOp Compare Raise Call Call Assign Call Call Assign For If Compare Assign Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "spheroid",
    "source_code": "@property\ndef spheroid(self):\n    return self.srs['spheroid']",
    "docstring": "Return the spheroid name for this spatial reference.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\base\\models.py",
    "ast_data": "FunctionDef name:spheroid arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_eager_metrics_fn",
    "source_code": "def _eager_metrics_fn(model, outputs, targets, sample_weights=None, masks=None):\n    outputs = nest.flatten(outputs)\n    targets = nest.flatten(targets)\n    metric_results = []\n    if targets:\n        if len(model._targets) != len(targets):\n            new_targets = [None if t is None else targets.pop(0) for t in model._targets]\n            targets = new_targets\n        metric_results = model._handle_metrics(outputs, targets=targets, sample_weights=sample_weights, masks=masks, return_weighted_and_unweighted_metrics=True, skip_target_masks=model._prepare_skip_target_masks())\n    metric_results.extend([m.result() for m in model.metrics if m not in model._compile_metric_functions])\n    return metric_results",
    "docstring": "Calculates the metrics for each output of the given model. Args: model: The model on which metrics are being calculated. outputs: The outputs of the given model. targets: The predictions or targets of the given model. sample_weights: Optional list of sample weights for each output. masks: Optional list of masks for each output. Returns: Returns the metric results for each output of the model.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_eager_v1.py",
    "ast_data": "FunctionDef name:_eager_metrics_fn arg:model arg:outputs arg:targets arg:sample_weights arg:masks arguments arg arg arg arg arg Assign Call Assign Call Assign If If Compare Call Call Assign Compare Call Assign Assign Call Call Call Call Compare Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "execute",
    "source_code": "def execute(self, fig):\n    raise NotImplementedError",
    "docstring": "Execute the layout on the figure given by *fig*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\layout_engine.py",
    "ast_data": "FunctionDef name:execute arg:self arg:fig arguments arg arg Raise"
  },
  {
    "library": "pytorch",
    "name": "_measure_time",
    "source_code": "def _measure_time(self, launch_test, test_case, iters, print_per_iter):\n    curr_test_total_time = 0\n    time_trace = []\n    while True:\n        run_time_sec = launch_test(test_case, iters, print_per_iter)\n        curr_test_total_time += run_time_sec\n        results_are_significant = self._iteration_result_is_significant(iters, run_time_sec, curr_test_total_time, self.has_explicit_iteration_count)\n        report_run_time = 1000000.0 * run_time_sec / iters\n        time_trace.append(report_run_time)\n        if self.args.report_aibench:\n            mode = 'JIT' if self.use_jit else 'Eager'\n            test_name = '_'.join([test_case.framework, test_case.test_config.test_name, mode])\n            print('PyTorchObserver ' + json.dumps({'type': test_name, 'metric': 'latency', 'unit': 'ms', 'value': str(report_run_time / 1000.0)}))\n        if results_are_significant:\n            break\n        iters = self._predict_num_iter_needed(iters)\n    reported_run_time_us = np.percentile(np.array(time_trace), 50)\n    return reported_run_time_us",
    "docstring": "This function execute the operator for iterations then look at the time. If it's not significant, the number of iterations will be increased before rerun. The execution stops when the time becomes significant.",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\operator_benchmark\\benchmark_core.py",
    "ast_data": "FunctionDef name:_measure_time arg:self arg:launch_test arg:test_case arg:iters arg:print_per_iter arguments arg arg arg arg arg Assign Assign While Assign Call Assign Call Assign Call If Assign Assign Call Call Call Call If Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "clone",
    "source_code": "def clone(self):\n    return type(self)(self.nbytes(), device=self.device).copy_(self)",
    "docstring": "Return a copy of this storage.",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:clone arg:self arguments arg Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "flat_transforms_to_matrices",
    "source_code": "def flat_transforms_to_matrices(transforms):\n    with ops.name_scope('flat_transforms_to_matrices'):\n        transforms = ops.convert_to_tensor(transforms, name='transforms')\n        if transforms.shape.ndims not in (1, 2):\n            raise ValueError('Transforms should be 1D or 2D, got: %s' % transforms)\n        transforms = array_ops.reshape(transforms, constant_op.constant([-1, 8]))\n        num_transforms = array_ops.shape(transforms)[0]\n        return array_ops.reshape(array_ops.concat([transforms, array_ops.ones([num_transforms, 1])], axis=1), constant_op.constant([-1, 3, 3]))",
    "docstring": "Converts projective transforms to affine matrices. Note that the output matrices map output coordinates to input coordinates. For the forward transformation matrix, call on the result. Args: transforms: Vector of length 8, or batches of transforms with shape . Returns: 3D tensor of matrices with shape . The output matrices map the *output coordinates* (in homogeneous coordinates) of each transform to the corresponding *input coordinates*. Raises: ValueError: If have an invalid shape.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops.py",
    "ast_data": "FunctionDef name:flat_transforms_to_matrices arg:transforms arguments arg With Call Assign Call If Compare Raise Call Assign Call Call Assign Call Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_create_save_counter",
    "source_code": "def _maybe_create_save_counter(self):\n    if self._save_counter is None:\n        with ops.device('/cpu:0'):\n            self._save_counter = data_structures.NoDependency(add_variable(self, name='save_counter', initializer=0, dtype=dtypes.int64, trainable=False))\n            if self._attached_dependencies is not None:\n                self._attached_dependencies.append(base.TrackableReference('save_counter', self._save_counter))\n                if isinstance(self.root, weakref.ref):\n                    root = self.root()\n                else:\n                    root = self.root\n                restore = root._deferred_dependencies.pop('save_counter', ())\n                if restore:\n                    restore[0].restore(self._save_counter)",
    "docstring": "Create a save counter if it does not yet exist.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:_maybe_create_save_counter arg:self arguments arg If Compare With Call Assign Call Call If Compare Call Call If Call Assign Call Assign Assign Call If Call"
  },
  {
    "library": "pytorch",
    "name": "benchmark_sub_kernels",
    "source_code": "def benchmark_sub_kernels(self, *args, **kwargs):\n\n    def wrap_fn(kernel):\n\n        def inner():\n            args_clone, kwargs_clone = kernel.clone_args(*args, **kwargs)\n            return kernel.run(*args_clone, **kwargs_clone)\n        return inner\n    return [benchmarker.benchmark_gpu(wrap_fn(kernel), rep=40) for kernel in self.kernels]",
    "docstring": "Benchmark all the sub kernels and return the execution time (in milliseconds) for each of time. Unit test may mock this method to force a specific kernel to be picked.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\multi_kernel.py",
    "ast_data": "FunctionDef name:benchmark_sub_kernels arg:self arguments arg arg arg FunctionDef name:wrap_fn arg:kernel arguments arg FunctionDef name:inner arguments Assign Call Return return:yes Call Return return:yes Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "getdomain",
    "source_code": "def getdomain(x):\n    [x] = as_series([x], trim=False)\n    if x.dtype.char in np.typecodes['Complex']:\n        rmin, rmax = (x.real.min(), x.real.max())\n        imin, imax = (x.imag.min(), x.imag.max())\n        return np.array((complex(rmin, imin), complex(rmax, imax)))\n    else:\n        return np.array((x.min(), x.max()))",
    "docstring": "Return a domain suitable for given abscissae. Find a domain suitable for a polynomial or Chebyshev series defined at the values supplied. Parameters ---------- x : array_like 1-d array of abscissae whose domain will be determined. Returns ------- domain : ndarray 1-d array containing two values. If the inputs are complex, then the two returned points are the lower left and upper right corners of the smallest rectangle (aligned with the axes) in the complex plane containing the points . If the inputs are real, then the two points are the ends of the smallest interval containing the points . See Also -------- mapparms, mapdomain Examples -------- >>> import numpy as np >>> from numpy.polynomial import polyutils as pu >>> points = np.arange(4)**2 - 5; points array([-5, -4, -1, 4]) >>> pu.getdomain(points) array([-5., 4.]) >>> c = np.exp(complex(0,1)*np.pi*np.arange(12)/6) # unit circle >>> pu.getdomain(c) array([-1.-1.j, 1.+1.j])",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\polyutils.py",
    "ast_data": "FunctionDef name:getdomain arg:x arguments arg Assign Call If Compare Assign Call Call Assign Call Call Return return:yes Call Call Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_ExtractRemainingAttrs",
    "source_code": "def _ExtractRemainingAttrs(op_type_name, op_def, keywords, default_type_attr_map, attrs):\n    for attr in op_def.attr:\n        if attr.name in attrs:\n            if attr.name in keywords:\n                raise TypeError(f\"Should not specify value for inferred attr '{attr.name}' for {op_type_name}.\")\n            continue\n        if attr.name in keywords:\n            attrs[attr.name] = keywords.pop(attr.name)\n        elif attr.name + '_' in keywords:\n            attrs[attr.name] = keywords.pop(attr.name + '_')\n        elif attr.name in default_type_attr_map:\n            attrs[attr.name] = default_type_attr_map[attr.name]\n        else:\n            raise TypeError(f'No argument found for attr {attr.name} for {op_type_name}')",
    "docstring": "Extracts the remaining attributes into in _apply_op_helper.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\op_def_library.py",
    "ast_data": "FunctionDef name:_ExtractRemainingAttrs arg:op_type_name arg:op_def arg:keywords arg:default_type_attr_map arg:attrs arguments arg arg arg arg arg For If Compare If Compare Raise Call If Compare Assign Call If Compare Assign Call If Compare Assign Raise Call"
  },
  {
    "library": "kornia",
    "name": "_validate_input3d",
    "source_code": "def _validate_input3d(f: Callable[..., Any]) -> Callable[..., Any]:\n\n    @wraps(f)\n    def wrapper(input: Tensor, *args: Any, **kwargs: Any) -> Any:\n        if not torch.is_tensor(input):\n            raise TypeError(f'Input type is not a Tensor. Got {type(input)}')\n        input_shape = len(input.shape)\n        if input_shape != 5:\n            raise AssertionError(f'Expect input of 5 dimensions, got {input_shape} instead')\n        _validate_input_dtype(input, accepted_dtypes=[torch.float16, torch.float32, torch.float64])\n        return f(input, *args, **kwargs)\n    return wrapper",
    "docstring": "Validate the 3D input of the wrapped function. Args: f: a function that takes the first argument as tensor. Returns: the wrapped function after input is validated.",
    "type": "function",
    "file_path": "kornia\\kornia\\augmentation\\utils\\helpers.py",
    "ast_data": "FunctionDef name:_validate_input3d arg:f arguments arg FunctionDef name:wrapper arg:input arguments arg arg arg If Call Raise Call Call Assign Call If Compare Raise Call Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "maybe_coerce_values",
    "source_code": "def maybe_coerce_values(values: ArrayLike) -> ArrayLike:\n    if isinstance(values, np.ndarray):\n        values = ensure_wrapped_if_datetimelike(values)\n        if issubclass(values.dtype.type, str):\n            values = np.array(values, dtype=object)\n    if isinstance(values, (DatetimeArray, TimedeltaArray)) and values.freq is not None:\n        values = values._with_freq(None)\n    return values",
    "docstring": "Input validation for values passed to __init__. Ensure that any datetime64/timedelta64 dtypes are in nanoseconds. Ensure that we do not have string dtypes. Parameters ---------- values : np.ndarray or ExtensionArray Returns ------- values : np.ndarray or ExtensionArray",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\internals\\blocks.py",
    "ast_data": "FunctionDef name:maybe_coerce_values arg:values arguments arg If Call Assign Call If Call Assign Call If BoolOp Call Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, checkpoint_dir, save_secs=None, save_steps=None, saver=None, checkpoint_basename='model.ckpt', scaffold=None, listeners=None, save_graph_def=True):\n    logging.info('Create CheckpointSaverHook.')\n    if saver is not None and scaffold is not None:\n        raise ValueError('You cannot provide both saver and scaffold.')\n    self._saver = saver\n    self._checkpoint_dir = checkpoint_dir\n    self._save_path = os.path.join(checkpoint_dir, checkpoint_basename)\n    self._scaffold = scaffold\n    self._timer = SecondOrStepTimer(every_secs=save_secs, every_steps=save_steps)\n    self._listeners = listeners or []\n    self._steps_per_run = 1000000\n    self._save_graph_def = save_graph_def",
    "docstring": "Initializes a . Args: checkpoint_dir: , base directory for the checkpoint files. save_secs: , save every N secs. save_steps: , save every N steps. saver: object, used for saving. checkpoint_basename: , base name for the checkpoint files. scaffold: , use to get saver object. listeners: List of subclass instances. Used for callbacks that run immediately before or after this hook saves the checkpoint. save_graph_def: Whether to save the GraphDef and MetaGraphDef to . The GraphDef is saved after the session is created as . MetaGraphDefs are saved out for every checkpoint as . Raises: ValueError: One of or should be set. ValueError: At most one of or should be set.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\basic_session_run_hooks.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:checkpoint_dir arg:save_secs arg:save_steps arg:saver arg:checkpoint_basename arg:scaffold arg:listeners arg:save_graph_def arguments arg arg arg arg arg arg arg arg arg Call If BoolOp Compare Compare Raise Call Assign Assign Assign Call Assign Assign Call Assign BoolOp Assign Assign"
  },
  {
    "library": "pandas",
    "name": "_filter_nodes",
    "source_code": "def _filter_nodes(superclass, all_nodes=_all_nodes):\n    node_names = (node.__name__ for node in all_nodes if issubclass(node, superclass))\n    return frozenset(node_names)",
    "docstring": "Filter out AST nodes that are subclasses of ``.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\computation\\expr.py",
    "ast_data": "FunctionDef name:_filter_nodes arg:superclass arg:all_nodes arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "minorlocator",
    "source_code": "@property\ndef minorlocator(self):\n    return self.long_axis.get_minor_locator()",
    "docstring": "Minor tick for the colorbar.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colorbar.py",
    "ast_data": "FunctionDef name:minorlocator arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "soften_mask",
    "source_code": "def soften_mask(self):\n    self._hardmask = False\n    return self",
    "docstring": "Force the mask to soft (default), allowing unmasking by assignment. Whether the mask of a masked array is hard or soft is determined by its property. sets to `` (and returns the modified self). See Also -------- ma.MaskedArray.hardmask ma.MaskedArray.harden_mask",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:soften_mask arg:self arguments arg Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "num_pages",
    "source_code": "@cached_property\ndef num_pages(self):\n    if self.count == 0 and (not self.allow_empty_first_page):\n        return 0\n    hits = max(1, self.count - self.orphans)\n    return ceil(hits / self.per_page)",
    "docstring": "Return the total number of pages.",
    "type": "method",
    "file_path": "django\\django\\core\\paginator.py",
    "ast_data": "FunctionDef name:num_pages arg:self arguments arg If BoolOp Compare Return return:yes Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_validate_flat_values_dynamically",
    "source_code": "def _validate_flat_values_dynamically(self, flat_values):\n    if self.row_partitions:\n        assert_op = check_ops.assert_equal(self.row_partitions[-1].nvals(), array_ops.shape(flat_values, out_type=self.dtype)[0], message='Last row partition does not match flat_values.')\n        return control_flow_ops.with_dependencies([assert_op], flat_values)\n    return flat_values",
    "docstring": "Test if flat_values have the right nvals dynamically.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:_validate_flat_values_dynamically arg:self arg:flat_values arguments arg arg If Assign Call Call Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "npoints",
    "source_code": "@property\ndef npoints(self) -> int:\n    return self.sp_index.npoints",
    "docstring": "The number of non- `` points, as decimal. Examples -------- >>> from pandas.arrays import SparseArray >>> s = SparseArray([0, 0, 1, 1, 1], fill_value=0) >>> s.npoints 3",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\sparse\\array.py",
    "ast_data": "FunctionDef name:npoints arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "resolve_relation",
    "source_code": "def resolve_relation(model, app_label=None, model_name=None):\n    if isinstance(model, str):\n        if model == RECURSIVE_RELATIONSHIP_CONSTANT:\n            if app_label is None or model_name is None:\n                raise TypeError('app_label and model_name must be provided to resolve recursive relationships.')\n            return (app_label, model_name)\n        if '.' in model:\n            app_label, model_name = model.split('.', 1)\n            return (app_label, model_name.lower())\n        if app_label is None:\n            raise TypeError('app_label must be provided to resolve unscoped model relationships.')\n        return (app_label, model.lower())\n    return (model._meta.app_label, model._meta.model_name)",
    "docstring": "Turn a model class or model reference string and return a model tuple. app_label and model_name are used to resolve the scope of recursive and unscoped model relationship.",
    "type": "function",
    "file_path": "django\\django\\db\\migrations\\utils.py",
    "ast_data": "FunctionDef name:resolve_relation arg:model arg:app_label arg:model_name arguments arg arg arg If Call If Compare If BoolOp Compare Compare Raise Call Return return:yes If Compare Assign Call Return return:yes Call If Compare Raise Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "reapply_all_patches",
    "source_code": "def reapply_all_patches(self):\n    for patch in self.patches_made:\n        patch.patch()\n    return self.patches_made",
    "docstring": "Patch all the stored patcheds. It doesn't modify patches_made.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\_symbolic_trace.py",
    "ast_data": "FunctionDef name:reapply_all_patches arg:self arguments arg For Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "dump",
    "source_code": "def dump():\n    current_file = inspect.getfile(dump)\n    f = open(current_file)\n    current_content = f.read()\n    f.close()\n    begin_data_str = '# BEGIN GENERATED DATA\\n'\n    begin_data_index = current_content.find(begin_data_str)\n    end_data_index = current_content.find('    # END GENERATED DATA\\n')\n    if begin_data_index == -1 or end_data_index == -1:\n        warnings.warn(f'{current_file} cannot be updated: BEGIN/END GENERATED DATA comment blocks appear to be corrupted')\n        return\n\n    def sort_key(key):\n        op, device_name, version = key\n        version = tuple((str(item) if isinstance(item, torch.dtype) else item for item in version))\n        return (op, device_name, version)\n    part1 = current_content[:begin_data_index + len(begin_data_str)]\n    part2 = current_content[end_data_index:]\n    data_part = []\n    for op_key in sorted(_operation_device_version_data, key=sort_key):\n        data_part.append('    ' + repr(op_key).replace(\"'\", '\"') + ': {')\n        op_data = _operation_device_version_data[op_key]\n        data_part.extend((f'        {key}: {op_data[key]},' for key in sorted(op_data)))\n        data_part.append('    },')\n    new_content = part1 + '\\n'.join(data_part) + '\\n' + part2\n    if current_content != new_content:\n        f = open(current_file, 'w')\n        f.write(new_content)\n        f.close()",
    "docstring": "Store the current runtime db state to the module file.",
    "type": "function",
    "file_path": "pytorch\\torch\\sparse\\_triton_ops_meta.py",
    "ast_data": "FunctionDef name:dump arguments Assign Call Assign Call Assign Call Call Assign Assign Call Assign Call If BoolOp Compare Compare Call Return return:no FunctionDef name:sort_key arg:key arguments arg Assign Assign Call Call Call Return return:yes Assign Call Assign Assign For Call Call Call Call Assign Call Call Call Assign Call If Compare Assign Call Call Call"
  },
  {
    "library": "django",
    "name": "_setup_query",
    "source_code": "def _setup_query(self):\n    self.values = []\n    self.related_ids = None\n    self.related_updates = {}",
    "docstring": "Run on initialization and at the end of chaining. Any attributes that would normally be set in __init__() should go here instead.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\subqueries.py",
    "ast_data": "FunctionDef name:_setup_query arg:self arguments arg Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "get_or_default",
    "source_code": "@staticmethod\ndef get_or_default(arg_name, collection_key, default_constructor):\n    elements = ops.get_collection(collection_key)\n    if elements:\n        if len(elements) > 1:\n            raise RuntimeError('More than one item in the collection \"%s\". Please indicate which one to use by passing it to the tf.Scaffold constructor as:  tf.Scaffold(%s=item to use)', collection_key, arg_name)\n        return elements[0]\n    op = default_constructor()\n    if op is not None:\n        ops.add_to_collection(collection_key, op)\n    return op",
    "docstring": "Get from cache or create a default operation.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\monitored_session.py",
    "ast_data": "FunctionDef name:get_or_default arg:arg_name arg:collection_key arg:default_constructor arguments arg arg arg Assign Call If If Compare Call Raise Call Return return:yes Assign Call If Compare Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "__next__",
    "source_code": "def __next__(self):\n    return next(self.iter_response)",
    "docstring": "Iterate over the app response.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpwsgi.py",
    "ast_data": "FunctionDef name:__next__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "JsonResponse",
    "source_code": "class JsonResponse(HttpResponse):\n\n    def __init__(self, data, encoder=DjangoJSONEncoder, safe=True, json_dumps_params=None, **kwargs):\n        if safe and (not isinstance(data, dict)):\n            raise TypeError('In order to allow non-dict objects to be serialized set the safe parameter to False.')\n        if json_dumps_params is None:\n            json_dumps_params = {}\n        kwargs.setdefault('content_type', 'application/json')\n        data = json.dumps(data, cls=encoder, **json_dumps_params)\n        super().__init__(content=data, **kwargs)",
    "docstring": "An HTTP response class that consumes data to be serialized to JSON. :param data: Data to be dumped into json. By default only ``. :param json_dumps_params: A dictionary of kwargs passed to json.dumps().",
    "type": "class",
    "file_path": "django\\django\\http\\response.py",
    "ast_data": "ClassDef name:JsonResponse FunctionDef name:__init__ arg:self arg:data arg:encoder arg:safe arg:json_dumps_params arguments arg arg arg arg arg arg If BoolOp Call Raise Call If Compare Assign Call Assign Call Call Call"
  },
  {
    "library": "scipy",
    "name": "HolderTable",
    "source_code": "class HolderTable(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n        self.global_optimum = [(8.055023472141116, 9.664590028909654), (-8.055023472141116, 9.664590028909654), (8.055023472141116, -9.664590028909654), (-8.055023472141116, -9.664590028909654)]\n        self.fglob = -19.20850256788675\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return -abs(sin(x[0]) * cos(x[1]) * exp(abs(1 - sqrt(x[0] ** 2 + x[1] ** 2) / pi)))",
    "docstring": "HolderTable objective function. This class defines the HolderTable [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{HolderTable}}({x}) = - \\left|{e^{\\left|{1 - \\frac{\\sqrt{x_{1}^{2} + x_{2}^{2}}}{\\pi} }\\right|} \\sin\\left(x_{1}\\right) \\cos\\left(x_{2}\\right)}\\right| with :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015 TODO: Jamil #146 equation is wrong - should be squaring the x1 and x2 terms, but isn't. Gavana does.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_H.py",
    "ast_data": "ClassDef name:HolderTable FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "set_clipboard",
    "source_code": "def set_clipboard(clipboard):\n    global copy, paste\n    clipboard_types = {'pbcopy': init_osx_pbcopy_clipboard, 'pyobjc': init_osx_pyobjc_clipboard, 'qt': init_qt_clipboard, 'xclip': init_xclip_clipboard, 'xsel': init_xsel_clipboard, 'wl-clipboard': init_wl_clipboard, 'klipper': init_klipper_clipboard, 'windows': init_windows_clipboard, 'no': init_no_clipboard}\n    if clipboard not in clipboard_types:\n        allowed_clipboard_types = [repr(_) for _ in clipboard_types]\n        raise ValueError(f'Argument must be one of {', '.join(allowed_clipboard_types)}')\n    copy, paste = clipboard_types[clipboard]()",
    "docstring": "Explicitly sets the clipboard mechanism. The \"clipboard mechanism\" is how the copy() and paste() functions interact with the operating system to implement the copy/paste feature. The clipboard parameter must be one of: - pbcopy - pyobjc (default on macOS) - qt - xclip - xsel - klipper - windows (default on Windows) - no (this is what is set when no clipboard mechanism can be found)",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\clipboard\\__init__.py",
    "ast_data": "FunctionDef name:set_clipboard arg:clipboard arguments arg Assign If Compare Assign Call Raise Call Call Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "set_x",
    "source_code": "def set_x(self, x):\n    self._x = x\n    self.stale = True",
    "docstring": "Set the left coord of the rectangle. Parameters ---------- x : float",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:set_x arg:self arg:x arguments arg arg Assign Assign"
  },
  {
    "library": "kornia",
    "name": "__init__",
    "source_code": "def __init__(self, height: int, width: int, interpolation_mode: str='bilinear') -> None:\n    super().__init__()\n    self.size = (height, width)\n    self.interpolation_mode = interpolation_mode",
    "docstring": "Construct ResizePreprocessor module. Args: height: height of the resized image. width: width of the resized image. interpolation_mode: interpolation mode for image resizing. Supported values: ``.",
    "type": "method",
    "file_path": "kornia\\kornia\\models\\utils.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:height arg:width arg:interpolation_mode arguments arg arg arg arg Call Call Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "get_anchor",
    "source_code": "def get_anchor(self):\n    return self._anchor",
    "docstring": "Get the anchor location. See Also -------- matplotlib.axes.Axes.set_anchor for a description of the anchor. matplotlib.axes.Axes.set_aspect for a description of aspect handling.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:get_anchor arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "resolve_callables",
    "source_code": "def resolve_callables(mapping):\n    for k, v in mapping.items():\n        yield (k, v() if callable(v) else v)",
    "docstring": "Generate key/value pairs for the given mapping where the values are evaluated if they're callable.",
    "type": "function",
    "file_path": "django\\django\\db\\models\\utils.py",
    "ast_data": "FunctionDef name:resolve_callables arg:mapping arguments arg For Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_rotate",
    "source_code": "def _rotate(self, components, n_components=None, tol=1e-06):\n    return _ortho_rotation(components.T, method=self.rotation, tol=tol)[:self.n_components]",
    "docstring": "Rotate the factor analysis solution.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_factor_analysis.py",
    "ast_data": "FunctionDef name:_rotate arg:self arg:components arg:n_components arg:tol arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "Enter",
    "source_code": "def Enter(self):\n    graph = ops.get_default_graph()\n    self._context_stack.append(graph._get_control_flow_context())\n    graph._set_control_flow_context(self)",
    "docstring": "Enter this control flow context.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:Enter arg:self arguments arg Assign Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "release",
    "source_code": "def release(self):\n    self._func_graph = None",
    "docstring": "Call off the FuncGraph deletion.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py",
    "ast_data": "FunctionDef name:release arg:self arguments arg Assign"
  },
  {
    "library": "pytorch",
    "name": "reduce_scatter_tensor",
    "source_code": "def reduce_scatter_tensor(self: torch.Tensor, reduceOp: str, scatter_dim: int, group: RANK_TYPES, tag: str=''):\n    group_name = _resolve_group_name(group, tag)\n    group_size = c10d._get_group_size_by_name(group_name)\n    assert self.size(scatter_dim) % group_size == 0, f'input dimension 0 ({self.size(0)} must be a multiple of group_size {group_size})'\n    if scatter_dim != 0:\n        tensor_list = torch.chunk(self, group_size, dim=scatter_dim)\n        self = torch.cat(tensor_list)\n    tensor = torch.ops._c10d_functional.reduce_scatter_tensor(self, reduceOp.lower(), group_size, group_name)\n    res = _maybe_wrap_tensor(tensor)\n    return res",
    "docstring": "Reduces the tensor data across all machines in such a way that all get the final result, then scatter the results to corresponding ranks. The input tensor is left unmodified. Group can be one of: List[int]: ranks participating in the collective. List[List[int]]: 2D mesh of ranks taking part of this collective in MPMD. ProcessGroup: Will perform a collective using the ranks and tag of the PG. DeviceMesh: Do a SPMD collective over all ranks of the mesh (DeviceMesh, int): Do a MPMD collective over one dimension of the DeviceMesh :: N.B. If you pass a PG or a 1D list to perform a MPMD collective, the compiler won't be able to recover that information and perform collective algebraic optimization. Use other forms of input for that.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_functional_collectives.py",
    "ast_data": "FunctionDef name:reduce_scatter_tensor arg:self arg:reduceOp arg:scatter_dim arg:group arg:tag arguments arg arg arg arg arg Assign Call Assign Call Compare Call Call If Compare Assign Call Assign Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_device",
    "source_code": "def get_device(node) -> Optional[torch.device]:\n    if 'val' not in node.meta:\n        return None\n    candidates = node.meta['val']\n    if not isinstance(candidates, tuple):\n        candidates = (candidates,)\n    for candidate in candidates:\n        if isinstance(candidate, torch.Tensor):\n            if candidate.device.type == 'cuda':\n                return candidate.device\n    return torch.device('cpu')",
    "docstring": "Check the example value of the node outputs to find the device type.",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\partitioners.py",
    "ast_data": "FunctionDef name:get_device arg:node arguments arg If Compare Return return:no Assign If Call Assign For If Call If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "get_original_image_uri",
    "source_code": "def get_original_image_uri(self, name: str) -> str:\n    while _StrPath(name) in self.env.original_image_uri:\n        name = self.env.original_image_uri[_StrPath(name)]\n    return name",
    "docstring": "Get the original image URI.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\environment\\adapters\\asset.py",
    "ast_data": "FunctionDef name:get_original_image_uri arg:self arg:name arguments arg arg While Compare Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_prologue_template_epilogue",
    "source_code": "@staticmethod\ndef get_prologue_template_epilogue(nodes: list[BaseSchedulerNode]) -> tuple[list[BaseSchedulerNode], BaseSchedulerNode, list[BaseSchedulerNode]]:\n    template_index = next((i for i, n in enumerate(nodes) if n.is_template()))\n    prologue = nodes[:template_index]\n    template_node = nodes[template_index]\n    epilogue = nodes[template_index + 1:]\n    return (prologue, template_node, epilogue)",
    "docstring": "For the list of nodes, get the prologue, template, and epilogue",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:get_prologue_template_epilogue arg:nodes arguments arg Assign Call Call Call Assign Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_slot_names",
    "source_code": "def get_slot_names(self):\n    return sorted(self._slots.keys())",
    "docstring": "Return a list of the names of slots created by the . See . Returns: A list of strings.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\optimizer.py",
    "ast_data": "FunctionDef name:get_slot_names arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_SelfAdjointEigV2Grad",
    "source_code": "@ops.RegisterGradient('SelfAdjointEigV2')\ndef _SelfAdjointEigV2Grad(op: ops.Operation, grad_e, grad_v):\n    e = op.outputs[0]\n    compute_v = op.get_attr('compute_v')\n    with ops.control_dependencies([grad_e, grad_v]):\n        if compute_v:\n            v = op.outputs[1]\n            f = array_ops.matrix_set_diag(_SafeReciprocal(array_ops.expand_dims(e, -2) - array_ops.expand_dims(e, -1)), array_ops.zeros_like(e))\n            grad_a = math_ops.matmul(v, math_ops.matmul(array_ops.matrix_diag(grad_e) + f * math_ops.matmul(v, grad_v, adjoint_a=True), v, adjoint_b=True))\n        else:\n            _, v = linalg_ops.self_adjoint_eig(op.inputs[0])\n            grad_a = math_ops.matmul(v, math_ops.matmul(array_ops.matrix_diag(grad_e), v, adjoint_b=True))\n        grad_a = array_ops.matrix_band_part(grad_a + _linalg.adjoint(grad_a), -1, 0)\n        grad_a = array_ops.matrix_set_diag(grad_a, 0.5 * array_ops.matrix_diag_part(grad_a))\n        return grad_a",
    "docstring": "Gradient for SelfAdjointEigV2.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg_grad.py",
    "ast_data": "FunctionDef name:_SelfAdjointEigV2Grad arg:op arg:grad_e arg:grad_v arguments arg arg arg Assign Assign Call With Call If Assign Assign Call Call Call Call Call Assign Call Call Call Call Assign Call Assign Call Call Call Assign Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_validate_dt64_dtype",
    "source_code": "def _validate_dt64_dtype(dtype):\n    if dtype is not None:\n        dtype = pandas_dtype(dtype)\n        if dtype == np.dtype('M8'):\n            msg = \"Passing in 'datetime64' dtype with no precision is not allowed. Please pass in 'datetime64[ns]' instead.\"\n            raise ValueError(msg)\n        if isinstance(dtype, np.dtype) and (dtype.kind != 'M' or not is_supported_dtype(dtype)) or not isinstance(dtype, (np.dtype, DatetimeTZDtype)):\n            raise ValueError(f\"Unexpected value for 'dtype': '{dtype}'. Must be 'datetime64[s]', 'datetime64[ms]', 'datetime64[us]', 'datetime64[ns]' or DatetimeTZDtype'.\")\n        if getattr(dtype, 'tz', None):\n            dtype = cast(DatetimeTZDtype, dtype)\n            dtype = DatetimeTZDtype(unit=dtype.unit, tz=timezones.tz_standardize(dtype.tz))\n    return dtype",
    "docstring": "Check that a dtype, if passed, represents either a numpy datetime64[ns] dtype or a pandas DatetimeTZDtype. Parameters ---------- dtype : object Returns ------- dtype : None, numpy.dtype, or DatetimeTZDtype Raises ------ ValueError : invalid dtype Notes ----- Unlike _validate_tz_from_dtype, this does _not_ allow non-existent tz errors to go through",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimes.py",
    "ast_data": "FunctionDef name:_validate_dt64_dtype arg:dtype arguments arg If Compare Assign Call If Compare Call Assign Raise Call If BoolOp BoolOp Call BoolOp Compare Call Call Raise Call If Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_border_expander",
    "source_code": "def _border_expander(side: str='') -> Callable:\n    if side != '':\n        side = f'-{side}'\n\n    def expand(self: CSSResolver, prop: str, value: str) -> Generator[tuple[str, str]]:\n        tokens = value.split()\n        if len(tokens) == 0 or len(tokens) > 3:\n            warnings.warn(f'Too many tokens provided to \"{prop}\" (expected 1-3)', CSSWarning, stacklevel=find_stack_level())\n        border_declarations = {f'border{side}-color': 'black', f'border{side}-style': 'none', f'border{side}-width': 'medium'}\n        for token in tokens:\n            if token.lower() in self.BORDER_STYLES:\n                border_declarations[f'border{side}-style'] = token\n            elif any((ratio in token.lower() for ratio in self.BORDER_WIDTH_RATIOS)):\n                border_declarations[f'border{side}-width'] = token\n            else:\n                border_declarations[f'border{side}-color'] = token\n        yield from self.atomize(border_declarations.items())\n    return expand",
    "docstring": "Wrapper to expand 'border' property into border color, style, and width properties Parameters ---------- side : str The border side to expand into properties Returns ------- function: Return to call when a 'border(-{side}): {value}' string is encountered",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\formats\\css.py",
    "ast_data": "FunctionDef name:_border_expander arg:side arguments arg If Compare Assign FunctionDef name:expand arg:self arg:prop arg:value arguments arg arg arg Assign Call If BoolOp Compare Call Compare Call Call Call Assign For If Compare Call Assign If Call Compare Call Assign Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "AttributeParameter",
    "source_code": "@dataclasses.dataclass(frozen=True)\nclass AttributeParameter:\n    name: str\n    type: ir.AttributeType\n    required: bool\n    default: ir.Attr | None = None\n\n    def __str__(self) -> str:\n        type_str = self.type.name\n        if self.has_default():\n            return f'{self.name}: {type_str} = {self.default}'\n        return f'{self.name}: {type_str}'\n\n    def has_default(self) -> bool:\n        return self.default is not None",
    "docstring": "A parameter in the function signature that represents an ONNX attribute.",
    "type": "class",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_schemas.py",
    "ast_data": "ClassDef name:AttributeParameter FunctionDef name:__str__ arg:self arguments arg Assign If Call Return return:yes Return return:yes FunctionDef name:has_default arg:self arguments arg Return return:yes Compare Call"
  },
  {
    "library": "pandas",
    "name": "maybe_set_size",
    "source_code": "def maybe_set_size(self, min_itemsize=None) -> None:\n    if self.kind == 'string':\n        if isinstance(min_itemsize, dict):\n            min_itemsize = min_itemsize.get(self.name)\n        if min_itemsize is not None and self.typ.itemsize < min_itemsize:\n            self.typ = _tables().StringCol(itemsize=min_itemsize, pos=self.pos)",
    "docstring": "maybe set a string col itemsize: min_itemsize can be an integer or a dict with this columns name with an integer size",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:maybe_set_size arg:self arg:min_itemsize arguments arg arg If Compare If Call Assign Call If BoolOp Compare Compare Assign Call Call"
  },
  {
    "library": "pandas",
    "name": "_format_native_types",
    "source_code": "def _format_native_types(self, *, na_rep: str | float='NaT', date_format=None) -> npt.NDArray[np.object_]:\n    raise AbstractMethodError(self)",
    "docstring": "Helper method for astype when converting to strings. Returns ------- ndarray[str]",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py",
    "ast_data": "FunctionDef name:_format_native_types arg:self arguments arg arg arg Raise Call"
  },
  {
    "library": "pandas",
    "name": "_check_arg_length",
    "source_code": "def _check_arg_length(fname, args, max_fname_arg_count, compat_args) -> None:\n    if max_fname_arg_count < 0:\n        raise ValueError(\"'max_fname_arg_count' must be non-negative\")\n    if len(args) > len(compat_args):\n        max_arg_count = len(compat_args) + max_fname_arg_count\n        actual_arg_count = len(args) + max_fname_arg_count\n        argument = 'argument' if max_arg_count == 1 else 'arguments'\n        raise TypeError(f'{fname}() takes at most {max_arg_count} {argument} ({actual_arg_count} given)')",
    "docstring": "Checks whether 'args' has length of at most 'compat_args'. Raises a TypeError if that is not the case, similar to in Python when a function is called with too many arguments.",
    "type": "function",
    "file_path": "pandas\\pandas\\util\\_validators.py",
    "ast_data": "FunctionDef name:_check_arg_length arg:fname arg:args arg:max_fname_arg_count arg:compat_args arguments arg arg arg arg If Compare Raise Call If Compare Call Call Assign Call Assign Call Assign Compare Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "saving_errors",
    "source_code": "@property\ndef saving_errors(self):\n    return self._saving_errors",
    "docstring": "Returns set of errors preventing this FuncGraph from being saved.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\func_graph.py",
    "ast_data": "FunctionDef name:saving_errors arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_mwu_input_validation",
    "source_code": "def _mwu_input_validation(x, y, use_continuity, alternative, axis, method):\n    x, y = (np.atleast_1d(x), np.atleast_1d(y))\n    if np.isnan(x).any() or np.isnan(y).any():\n        raise ValueError('`x` and `y` must not contain NaNs.')\n    if np.size(x) == 0 or np.size(y) == 0:\n        raise ValueError('`x` and `y` must be of nonzero size.')\n    bools = {True, False}\n    if use_continuity not in bools:\n        raise ValueError(f'`use_continuity` must be one of {bools}.')\n    alternatives = {'two-sided', 'less', 'greater'}\n    alternative = alternative.lower()\n    if alternative not in alternatives:\n        raise ValueError(f'`alternative` must be one of {alternatives}.')\n    axis_int = int(axis)\n    if axis != axis_int:\n        raise ValueError('`axis` must be an integer.')\n    if not isinstance(method, stats.PermutationMethod):\n        methods = {'asymptotic', 'exact', 'auto'}\n        method = method.lower()\n        if method not in methods:\n            raise ValueError(f'`method` must be one of {methods}.')\n    return (x, y, use_continuity, alternative, axis_int, method)",
    "docstring": "Input validation and standardization for mannwhitneyu",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mannwhitneyu.py",
    "ast_data": "FunctionDef name:_mwu_input_validation arg:x arg:y arg:use_continuity arg:alternative arg:axis arg:method arguments arg arg arg arg arg arg Assign Call Call If BoolOp Call Call Call Call Raise Call If BoolOp Compare Call Compare Call Raise Call Assign If Compare Raise Call Assign Assign Call If Compare Raise Call Assign Call If Compare Raise Call If Call Assign Assign Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "tiny",
    "source_code": "@property\ndef tiny(self):\n    return self.smallest_normal",
    "docstring": "Return the value for tiny, alias of smallest_normal. Returns ------- tiny : float Value for the smallest normal, alias of smallest_normal. Warns ----- UserWarning If the calculated value for the smallest normal is requested for double-double.",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\getlimits.py",
    "ast_data": "FunctionDef name:tiny arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "PandasObject",
    "source_code": "class PandasObject(DirNamesMixin):\n    _cache: dict[str, Any]\n\n    @property\n    def _constructor(self) -> type[Self]:\n        return type(self)\n\n    def __repr__(self) -> str:\n        return object.__repr__(self)\n\n    def _reset_cache(self, key: str | None=None) -> None:\n        if not hasattr(self, '_cache'):\n            return\n        if key is None:\n            self._cache.clear()\n        else:\n            self._cache.pop(key, None)\n\n    def __sizeof__(self) -> int:\n        memory_usage = getattr(self, 'memory_usage', None)\n        if memory_usage:\n            mem = memory_usage(deep=True)\n            return int(mem if is_scalar(mem) else mem.sum())\n        return super().__sizeof__()",
    "docstring": "Baseclass for various pandas objects.",
    "type": "class",
    "file_path": "pandas\\pandas\\core\\base.py",
    "ast_data": "ClassDef name:PandasObject FunctionDef name:_constructor arg:self arguments arg Return return:yes Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call FunctionDef name:_reset_cache arg:self arg:key arguments arg arg If Call Return return:no If Compare Call Call FunctionDef name:__sizeof__ arg:self arguments arg Assign Call If Assign Call Return return:yes Call Call Call Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "ModelBase",
    "source_code": "class ModelBase(ABC, Module, Generic[ModelConfig]):\n\n    def load_checkpoint(self, checkpoint: str, device: Optional[torch.device]=None) -> None:\n        if os.path.isfile(checkpoint):\n            with open(checkpoint, 'rb') as f:\n                state_dict = torch.load(f, map_location=device)\n        else:\n            state_dict = torch.hub.load_state_dict_from_url(checkpoint, map_location=device)\n        self.load_state_dict(state_dict)\n\n    @staticmethod\n    @abstractmethod\n    def from_config(config: ModelConfig) -> ModelBase[ModelConfig]:\n        raise NotImplementedError\n\n    def compile(self, *, fullgraph: bool=False, dynamic: bool=False, backend: str='inductor', mode: Optional[str]=None, options: Optional[dict[Any, Any]]=None, disable: bool=False) -> ModelBase[ModelConfig]:\n        compiled = torch.compile(self, fullgraph=fullgraph, dynamic=dynamic, backend=backend, mode=mode, options=options, disable=disable)\n        compiled = cast(ModelBase[ModelConfig], compiled)\n        return compiled",
    "docstring": "Abstract model class with some utilities function.",
    "type": "class",
    "file_path": "kornia\\kornia\\contrib\\models\\base.py",
    "ast_data": "ClassDef name:ModelBase FunctionDef name:load_checkpoint arg:self arg:checkpoint arg:device arguments arg arg arg If Call With Call Assign Call Assign Call Call FunctionDef name:from_config arg:config arguments arg Raise FunctionDef name:compile arg:self arguments arg arg arg arg arg arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_check_comparison_types",
    "source_code": "def _check_comparison_types(result: ArrayLike | bool, a: ArrayLike, b: Scalar | Pattern) -> None:\n    if is_bool(result) and isinstance(a, np.ndarray):\n        type_names = [type(a).__name__, type(b).__name__]\n        type_names[0] = f'ndarray(dtype={a.dtype})'\n        raise TypeError(f'Cannot compare types {type_names[0]!r} and {type_names[1]!r}')",
    "docstring": "Raises an error if the two arrays (a,b) cannot be compared. Otherwise, returns the comparison result as expected.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\array_algos\\replace.py",
    "ast_data": "FunctionDef name:_check_comparison_types arg:result arg:a arg:b arguments arg arg arg If BoolOp Call Call Assign Call Call Assign Raise Call"
  },
  {
    "library": "django",
    "name": "Table",
    "source_code": "class Table(Reference):\n\n    def __init__(self, table, quote_name):\n        self.table = table\n        self.quote_name = quote_name\n\n    def references_table(self, table):\n        return self.table == table\n\n    def references_index(self, table, index):\n        return self.references_table(table) and str(self) == index\n\n    def rename_table_references(self, old_table, new_table):\n        if self.table == old_table:\n            self.table = new_table\n\n    def __str__(self):\n        return self.quote_name(self.table)",
    "docstring": "Hold a reference to a table.",
    "type": "class",
    "file_path": "django\\django\\db\\backends\\ddl_references.py",
    "ast_data": "ClassDef name:Table FunctionDef name:__init__ arg:self arg:table arg:quote_name arguments arg arg arg Assign Assign FunctionDef name:references_table arg:self arg:table arguments arg arg Return return:yes Compare FunctionDef name:references_index arg:self arg:table arg:index arguments arg arg arg Return return:yes BoolOp Call Compare Call FunctionDef name:rename_table_references arg:self arg:old_table arg:new_table arguments arg arg arg If Compare Assign FunctionDef name:__str__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, loss=None, predictions=None, metrics=None):\n    if loss is not None:\n        loss_dict = self._wrap_and_check_outputs(loss, self.LOSS_NAME)\n        self._loss = self._prefix_output_keys(loss_dict, self.LOSS_NAME)\n    if predictions is not None:\n        pred_dict = self._wrap_and_check_outputs(predictions, self.PREDICTIONS_NAME)\n        self._predictions = self._prefix_output_keys(pred_dict, self.PREDICTIONS_NAME)\n    if metrics is not None:\n        self._metrics = self._wrap_and_check_metrics(metrics)",
    "docstring": "Constructor for SupervisedOutput (ie, Train or Eval output). Args: loss: dict of Tensors or single Tensor representing calculated loss. predictions: dict of Tensors or single Tensor representing model predictions. metrics: Dict of metric results keyed by name. The values of the dict can be one of the following: (1) instance of class. (2) (metric_value, update_op) tuples, or a single tuple. metric_value must be a Tensor, and update_op must be a Tensor or Op. Raises: ValueError: if any of the outputs' dict keys are not strings or tuples of strings or the values are not Tensors (or Operations in the case of update_op).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\model_utils\\export_output.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:loss arg:predictions arg:metrics arguments arg arg arg arg If Compare Assign Call Assign Call If Compare Assign Call Assign Call If Compare Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "rc_params_from_file",
    "source_code": "def rc_params_from_file(fname, fail_on_error=False, use_default_template=True):\n    config_from_file = _rc_params_in_file(fname, fail_on_error=fail_on_error)\n    if not use_default_template:\n        return config_from_file\n    with _api.suppress_matplotlib_deprecation_warning():\n        config = RcParams({**rcParamsDefault, **config_from_file})\n    if ''.join(config['text.latex.preamble']):\n        _log.info('\\n*****************************************************************\\nYou have the following UNSUPPORTED LaTeX preamble customizations:\\n%s\\nPlease do not ask for support with these customizations active.\\n*****************************************************************\\n', '\\n'.join(config['text.latex.preamble']))\n    _log.debug('loaded rc file %s', fname)\n    return config",
    "docstring": "Construct a from file *fname*. Parameters ---------- fname : str or path-like A file with Matplotlib rc settings. fail_on_error : bool If True, raise an error when the parser fails to convert a parameter. use_default_template : bool If True, initialize with default parameters before updating with those in the given file. If False, the configuration class only contains the parameters specified in the file. (Useful for updating dicts.)",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\__init__.py",
    "ast_data": "FunctionDef name:rc_params_from_file arg:fname arg:fail_on_error arg:use_default_template arguments arg arg arg Assign Call If Return return:yes With Call Assign Call If Call Call Call Call Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "__copy__",
    "source_code": "@abc.abstractmethod\ndef __copy__(self) -> EllipticCurvePublicKey:\n    pass",
    "docstring": "Returns a copy.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ec.py",
    "ast_data": "FunctionDef name:__copy__ arg:self arguments arg"
  },
  {
    "library": "pandas",
    "name": "max",
    "source_code": "def max(self, *, axis: AxisInt | None=None, skipna: bool=True):\n    nv.validate_minmax_axis(axis, self.ndim)\n    return self._min_max('max', skipna=skipna)",
    "docstring": "Max of array values, ignoring NA values if specified. Parameters ---------- axis : int, default 0 Not Used. NumPy compatibility. skipna : bool, default True Whether to ignore NA values. Returns ------- scalar",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\sparse\\array.py",
    "ast_data": "FunctionDef name:max arg:self arguments arg arg arg Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_load_graph_execution_traces",
    "source_code": "def _load_graph_execution_traces(self):\n    for i, traces_iter in enumerate(self._reader.graph_execution_traces_iterators()):\n        for debug_event, offset in traces_iter:\n            self._graph_execution_trace_digests.append(self._graph_execution_trace_digest_from_debug_event_proto(debug_event, (i, offset)))\n            if self._monitors:\n                graph_execution_trace = self._graph_execution_trace_from_debug_event_proto(debug_event, (i, offset))\n                for monitor in self._monitors:\n                    monitor.on_graph_execution_trace(len(self._graph_execution_trace_digests) - 1, graph_execution_trace)",
    "docstring": "Incrementally load the .graph_execution_traces file.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py",
    "ast_data": "FunctionDef name:_load_graph_execution_traces arg:self arguments arg For Call Call For Call Call If Assign Call For Call Call"
  },
  {
    "library": "django",
    "name": "id_for_label",
    "source_code": "def id_for_label(self, id_, index=None):\n    if index is None:\n        return ''\n    return super().id_for_label(id_, index)",
    "docstring": "Don't include for=\"field_0\" in to improve accessibility when using a screen reader, in addition clicking such a label would toggle the first input.",
    "type": "method",
    "file_path": "django\\django\\forms\\widgets.py",
    "ast_data": "FunctionDef name:id_for_label arg:self arg:id_ arg:index arguments arg arg arg If Compare Return return:yes Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "KeyValTuple",
    "source_code": "class KeyValTuple(tuple):\n\n    def __repr__(self):\n        return super().__repr__()",
    "docstring": "Dummy class for correctly rendering key-value tuples from dicts.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\utils\\_pprint.py",
    "ast_data": "ClassDef name:KeyValTuple FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_register_flat_param",
    "source_code": "def _register_flat_param(state: _FSDPState, module: nn.Module) -> None:\n    handle = _module_handle(state, module)\n    if _has_fsdp_params(state, module):\n        cast(nn.Module, module.module)._parameters[FLAT_PARAM] = handle.flat_param",
    "docstring": "Registers the flattened parameter to the wrapped module, making it visible to `nn.Module.register_parameter` methods.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_unshard_param_utils.py",
    "ast_data": "FunctionDef name:_register_flat_param arg:state arg:module arguments arg arg Assign Call If Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "LinearTanh",
    "source_code": "class LinearTanh(_FusedModule):\n\n    def __init__(self, linear, tanh):\n        assert type(linear) == Linear and type(tanh) == torch.nn.Tanh, f'Incorrect types for input modules{type(linear)}{type(tanh)}'\n        super().__init__(linear, tanh)",
    "docstring": "This is a sequential container which calls the Linear and Tanh modules. During quantization this will be replaced with the corresponding fused module.",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\nn\\intrinsic\\modules\\fused.py",
    "ast_data": "ClassDef name:LinearTanh FunctionDef name:__init__ arg:self arg:linear arg:tanh arguments arg arg arg BoolOp Compare Call Compare Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_to_replicate_tensor",
    "source_code": "def _to_replicate_tensor(self, local_tensor: torch.Tensor, mesh: DeviceMesh, mesh_dim: int, current_logical_shape: list[int]) -> torch.Tensor:\n    num_chunks = mesh.size(mesh_dim=mesh_dim)\n    logical_dim_size = current_logical_shape[self.dim]\n    is_padded = logical_dim_size % num_chunks != 0\n    if is_padded:\n        full_chunk_size = (logical_dim_size + num_chunks - 1) // num_chunks\n        pad_size = full_chunk_size - local_tensor.size(self.dim)\n        local_tensor = pad_tensor(local_tensor, self.dim, pad_size)\n    if not local_tensor.is_contiguous():\n        local_tensor = local_tensor.contiguous()\n    result = funcol.all_gather_tensor(local_tensor, gather_dim=self.dim, group=(mesh, mesh_dim))\n    if is_padded:\n        unpad_size = full_chunk_size * num_chunks - logical_dim_size\n        result = unpad_tensor(result, self.dim, unpad_size)\n    return result",
    "docstring": "This function all_gather all shards and return a tensor that is replicated on the previously sharded mesh dimension",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\placement_types.py",
    "ast_data": "FunctionDef name:_to_replicate_tensor arg:self arg:local_tensor arg:mesh arg:mesh_dim arg:current_logical_shape arguments arg arg arg arg arg Assign Call Assign Assign Compare If Assign Assign Call Assign Call If Call Assign Call Assign Call If Assign Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "GEOSContextHandle",
    "source_code": "class GEOSContextHandle(GEOSBase):\n    ptr_type = CONTEXT_PTR\n    destructor = lgeos.finishGEOS_r\n\n    def __init__(self):\n        self.ptr = lgeos.initGEOS_r(notice_h, error_h)",
    "docstring": "Represent a GEOS context handle.",
    "type": "class",
    "file_path": "django\\django\\contrib\\gis\\geos\\prototypes\\threadsafe.py",
    "ast_data": "ClassDef name:GEOSContextHandle Assign Assign FunctionDef name:__init__ arg:self arguments arg Assign Call"
  },
  {
    "library": "pytorch",
    "name": "module_inputs",
    "source_code": "def module_inputs(self) -> Sequence[torch.fx.Node]:\n    nodes = list(self.fx_nodes())\n    assert len(nodes) > 0, 'Cannot extract module inputs from empty nodes.'\n    module_inputs: dict[torch.fx.Node, None] = {}\n    node_set: set[torch.fx.Node] = set(nodes)\n\n    def _extract_arg_if_node_outside_module(arg: Any):\n        if isinstance(arg, torch.fx.Node) and arg not in node_set:\n            module_inputs[arg] = None\n    for node in nodes:\n        pytree.tree_map(_extract_arg_if_node_outside_module, node.args)\n        pytree.tree_map(_extract_arg_if_node_outside_module, node.kwargs)\n    return list(module_inputs.keys())",
    "docstring": "Extract module inputs from the sequence of fx nodes this instance holds. All node args that are produced by nodes outside of the module are considered module inputs. The order of returned module inputs is the same as the their use order. ### Known limitations The original ordering of module inputs is not preserved. There is no meta information to be found from the that can be used to recover the original ordering. Returns: Sequence of module inputs.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\modularization.py",
    "ast_data": "FunctionDef name:module_inputs arg:self arguments arg Assign Call Call Compare Call Call FunctionDef name:_extract_arg_if_node_outside_module arg:arg arguments arg If BoolOp Call Compare Assign For Call Call Return return:yes Call Call"
  },
  {
    "library": "authlib",
    "name": "validate_require_request_uri_registration",
    "source_code": "def validate_require_request_uri_registration(self):\n    _validate_boolean_value(self, 'require_request_uri_registration')",
    "docstring": "OPTIONAL. Boolean value specifying whether the OP requires any request_uri values used to be pre-registered using the request_uris registration parameter. Pre-registration is REQUIRED when the value is true. If omitted, the default value is false.",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\discovery\\models.py",
    "ast_data": "FunctionDef name:validate_require_request_uri_registration arg:self arguments arg Call"
  },
  {
    "library": "django",
    "name": "check_geography",
    "source_code": "def check_geography(self, lookup, template_params):\n    if lookup.lhs.output_field.geography and (not self.geography):\n        template_params['lhs'] += '::geometry'\n    return template_params",
    "docstring": "Convert geography fields to geometry types, if necessary.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\postgis\\operations.py",
    "ast_data": "FunctionDef name:check_geography arg:self arg:lookup arg:template_params arguments arg arg arg If BoolOp Return return:yes"
  },
  {
    "library": "django",
    "name": "apply_migration",
    "source_code": "def apply_migration(self, state, migration, fake=False, fake_initial=False):\n    migration_recorded = False\n    if self.progress_callback:\n        self.progress_callback('apply_start', migration, fake)\n    if not fake:\n        if fake_initial:\n            applied, state = self.detect_soft_applied(state, migration)\n            if applied:\n                fake = True\n        if not fake:\n            with self.connection.schema_editor(atomic=migration.atomic) as schema_editor:\n                state = migration.apply(state, schema_editor)\n                if not schema_editor.deferred_sql:\n                    self.record_migration(migration.app_label, migration.name)\n                    migration_recorded = True\n    if not migration_recorded:\n        self.record_migration(migration.app_label, migration.name)\n    if self.progress_callback:\n        self.progress_callback('apply_success', migration, fake)\n    return state",
    "docstring": "Run a migration forwards.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\executor.py",
    "ast_data": "FunctionDef name:apply_migration arg:self arg:state arg:migration arg:fake arg:fake_initial arguments arg arg arg arg arg Assign If Call If If Assign Call If Assign If With Call Assign Call If Call Assign If Call If Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "get_colormap",
    "source_code": "def get_colormap(self, num_classes: int, colormap: str='random', manual_seed: int=2147) -> Tensor:\n    if colormap == 'random':\n        g_cpu = torch.Generator()\n        g_cpu.manual_seed(manual_seed)\n        colors = torch.rand(num_classes, 3, generator=g_cpu)\n    else:\n        raise ValueError(f'Unsupported colormap: {colormap}')\n    return colors",
    "docstring": "Get a color map of size num_classes. Args: num_classes: The number of colors in the color map. colormap: The colormap to use, can be \"random\" or a custom color map. manual_seed: The manual seed to use for the colormap. Returns: A tensor of shape (num_classes, 3) representing the color map.",
    "type": "method",
    "file_path": "kornia\\kornia\\models\\segmentation\\base.py",
    "ast_data": "FunctionDef name:get_colormap arg:self arg:num_classes arg:colormap arg:manual_seed arguments arg arg arg arg If Compare Assign Call Call Assign Call Raise Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "predict_log_proba",
    "source_code": "@available_if(_estimator_has('predict_log_proba'))\ndef predict_log_proba(self, X, **params):\n    check_is_fitted(self)\n    _raise_for_params(params, self, 'predict_log_proba')\n    if _routing_enabled():\n        routed_params = process_routing(self, 'predict_log_proba', **params)\n    else:\n        routed_params = Bunch(estimator=Bunch(predict_log_proba={}))\n    X = validate_data(self, X, accept_sparse=True, ensure_all_finite=False, reset=False)\n    return self.estimator_.predict_log_proba(X, **routed_params.estimator.predict_log_proba)",
    "docstring": "Predict log probability for each possible outcome. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Array representing the data. **params : dict of str -> object Parameters to pass to the underlying estimator's `enable_metadata_routing=TrueMetadata Routing User Guide ` for more details. Returns ------- y : ndarray of shape (n_samples, n_features) Array with log prediction probabilities.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\semi_supervised\\_self_training.py",
    "ast_data": "FunctionDef name:predict_log_proba arg:self arg:X arguments arg arg arg Call Call If Call Assign Call Assign Call Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "route_params",
    "source_code": "def route_params(self, *, caller, params):\n    if self._self_request:\n        self._self_request._check_warnings(params=params, method=caller)\n    res = Bunch()\n    for name, route_mapping in self._route_mappings.items():\n        router, mapping = (route_mapping.router, route_mapping.mapping)\n        res[name] = Bunch()\n        for _caller, _callee in mapping:\n            if _caller == caller:\n                res[name][_callee] = router._route_params(params=params, method=_callee, parent=self.owner, caller=caller)\n    return res",
    "docstring": "Return the input parameters requested by child objects. The output of this method is a :class:, which includes the metadata for all methods of each child object that is used in the router's method. If the router is also a consumer, it also checks for warnings of 's/consumer's requested metadata. Parameters ---------- caller : str The name of the method for which the parameters are requested and routed. If called inside the :term: method of a router, it would be . params : dict A dictionary of provided metadata. Returns ------- params : Bunch A :class: of the form `` which can be used to pass the required metadata to corresponding methods or corresponding child objects.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py",
    "ast_data": "FunctionDef name:route_params arg:self arguments arg arg arg If Call Assign Call For Call Assign Assign Call For If Compare Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_cpu_count_user",
    "source_code": "def _cpu_count_user(os_cpu_count):\n    cpu_count_affinity = _cpu_count_affinity(os_cpu_count)\n    cpu_count_cgroup = _cpu_count_cgroup(os_cpu_count)\n    cpu_count_loky = int(os.environ.get('LOKY_MAX_CPU_COUNT', os_cpu_count))\n    return min(cpu_count_affinity, cpu_count_cgroup, cpu_count_loky)",
    "docstring": "Number of user defined available CPUs",
    "type": "function",
    "file_path": "scipy\\dev.py",
    "ast_data": "FunctionDef name:_cpu_count_user arg:os_cpu_count arguments arg Assign Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "__reduce_package__",
    "source_code": "def __reduce_package__(self, exporter: PackageExporter):\n    script_module_id = exporter.get_unique_id()\n    exporter.script_module_serializer.serialize(self._c, int(script_module_id))\n    return (unpackage_script_module, (script_module_id,))",
    "docstring": "Save a ScriptModule inside of a `` function.",
    "type": "method",
    "file_path": "pytorch\\torch\\jit\\_script.py",
    "ast_data": "FunctionDef name:__reduce_package__ arg:self arg:exporter arguments arg arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "__len__",
    "source_code": "def __len__(self) -> int:\n    return self.colors.shape[-1]",
    "docstring": "Return the number of colors in the colormap. Returns: Number of colors in the colormap.",
    "type": "method",
    "file_path": "kornia\\kornia\\color\\colormap.py",
    "ast_data": "FunctionDef name:__len__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_ModState",
    "source_code": "class _ModState(_State):\n    PRE_FW = 'Pre-Forward'\n    POST_FW = 'Post-Forward'\n    PEAK_FW = 'Peak-Forward'\n    PRE_BW = 'Pre-Backward'\n    PRE_FW_AC = 'Pre-Forward-AC'\n    POST_FW_AC = 'Post-Forward-AC'\n    POST_BW = 'Post-Backward'\n    PEAK_BW = 'Peak-Backward'",
    "docstring": "An enum to define the state of a module. - PRE_FW: The module is about to run the forward pass. - POST_FW: The module has finished running the forward pass. - PEAK_FW: The module has reached the peak memory usage during the forward pass. - PRE_BW: The module is about to run the backward pass. - PRE_FW_AC: The module is about to run the forward pass with activation checkpointing. - POST_FW_AC: The module has finished running the forward pass with activation checkpointing. - POST_BW: The module has finished running the backward pass. - PEAK_BW: The module has reached the peak memory usage during the backward pass.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\_tools\\mem_tracker.py",
    "ast_data": "ClassDef name:_ModState Assign Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "set_verts_and_codes",
    "source_code": "def set_verts_and_codes(self, verts, codes):\n    self.set_verts(verts, closed=False)\n    self._codes3d = codes",
    "docstring": "Set 3D vertices with path codes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py",
    "ast_data": "FunctionDef name:set_verts_and_codes arg:self arg:verts arg:codes arguments arg arg arg Call Assign"
  },
  {
    "library": "scrapy",
    "name": "handle_spider_output",
    "source_code": "def handle_spider_output(self, result: Iterable[_T] | AsyncIterator[_T], request: Request, response: Response, spider: Spider | None=None) -> Deferred[None]:\n    if spider is not None:\n        warnings.warn(\"Passing a 'spider' argument to Scraper.handle_spider_output() is deprecated.\", category=ScrapyDeprecationWarning, stacklevel=2)\n    return deferred_from_coro(self.handle_spider_output_async(result, request, response))",
    "docstring": "Pass items/requests produced by a callback to `` in parallel.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\scraper.py",
    "ast_data": "FunctionDef name:handle_spider_output arg:self arg:result arg:request arg:response arg:spider arguments arg arg arg arg arg If Compare Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "strides",
    "source_code": "@property\ndef strides(self) -> List[int]:\n    return _compute_mesh_strides(self.shape())",
    "docstring": "Returns the strides tensor array for this mesh. If the mesh shape is , then the strides array can be computed as . This array can be useful in computing local device offsets given a device ID. Using the same example, the device coordinates of the mesh can be computed as: This is the same as . Returns: The mesh strides as an integer tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\layout.py",
    "ast_data": "FunctionDef name:strides arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "path",
    "source_code": "@property\ndef path(self):\n    if self._path.startswith(('http://', 'https://', '/')):\n        return self._path\n    return static(self._path)",
    "docstring": "Ensure an absolute path. Relative paths are resolved via the {% static %} template tag.",
    "type": "method",
    "file_path": "django\\django\\forms\\widgets.py",
    "ast_data": "FunctionDef name:path arg:self arguments arg If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "BulkSaverBuilder",
    "source_code": "class BulkSaverBuilder(BaseSaverBuilder):\n\n    def bulk_restore(self, filename_tensor, saveables, preferred_shard, restore_sequentially):\n        del restore_sequentially\n        restore_specs = []\n        for saveable in saveables:\n            for spec in saveable.specs:\n                restore_specs.append((spec.name, spec.slice_spec, spec.dtype))\n        names, slices, dtypes = zip(*restore_specs)\n        with ops.device('cpu:0'):\n            return io_ops.restore_v2(filename_tensor, names, slices, dtypes)",
    "docstring": "SaverBuilder with support for bulk restoring multiple saveables.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\training\\saver.py",
    "ast_data": "ClassDef name:BulkSaverBuilder FunctionDef name:bulk_restore arg:self arg:filename_tensor arg:saveables arg:preferred_shard arg:restore_sequentially arguments arg arg arg arg arg Assign For For Call Assign Call With Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_on_resource_closure_failure",
    "source_code": "def _on_resource_closure_failure(self, e):\n    logging.info('[Worker %d] Clearing tagged queue after resource closure failure.', self.worker_index)\n    with self._resource_tracking_lock:\n        self._is_dead_with_error = e\n        self._cluster.closure_queue.clear_tag_unlocked(self.worker_index)\n        self._set_resources_aborted(e)",
    "docstring": "Clear tagged queue to ensure resource closures are rebuilt. Args: e: The exception arisen from the resource closure.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py",
    "ast_data": "FunctionDef name:_on_resource_closure_failure arg:self arg:e arguments arg arg Call With Assign Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_get_transformed_path",
    "source_code": "def _get_transformed_path(self):\n    if self._transformed_path is None:\n        self._transform_path()\n    return self._transformed_path",
    "docstring": "Return this line's .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:_get_transformed_path arg:self arguments arg If Compare Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "choose",
    "source_code": "def choose(indices, choices, out=None, mode='raise'):\n\n    def fmask(x):\n        if x is masked:\n            return True\n        return filled(x)\n\n    def nmask(x):\n        if x is masked:\n            return True\n        return getmask(x)\n    c = filled(indices, 0)\n    masks = [nmask(x) for x in choices]\n    data = [fmask(x) for x in choices]\n    outputmask = np.choose(c, masks, mode=mode)\n    outputmask = make_mask(mask_or(outputmask, getmask(indices)), copy=False, shrink=True)\n    d = np.choose(c, data, mode=mode, out=out).view(MaskedArray)\n    if out is not None:\n        if isinstance(out, MaskedArray):\n            out.__setmask__(outputmask)\n        return out\n    d.__setmask__(outputmask)\n    return d",
    "docstring": "Use an index array to construct a new array from a list of choices. Given an array of integers and a list of n choice arrays, this method will create a new array that merges each of the choice arrays. Where a value in is i, the new array will have the value that choices[i] contains in the same place. Parameters ---------- indices : ndarray of ints This array must contain integers in `dtype`. mode : {'raise', 'wrap', 'clip'}, optional Specifies how out-of-bounds indices will behave. * 'raise' : raise an error * 'wrap' : wrap around * 'clip' : clip to the range Returns ------- merged_array : array See Also -------- choose : equivalent function Examples -------- >>> import numpy as np >>> choice = np.array([[1,1,1], [2,2,2], [3,3,3]]) >>> a = np.array([2, 1, 0]) >>> np.ma.choose(a, choice) masked_array(data=[3, 2, 1], mask=False, fill_value=999999)",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:choose arg:indices arg:choices arg:out arg:mode arguments arg arg arg arg FunctionDef name:fmask arg:x arguments arg If Compare Return return:yes Return return:yes Call FunctionDef name:nmask arg:x arguments arg If Compare Return return:yes Return return:yes Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Call Assign Call Call If Compare If Call Call Return return:yes Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "decision_function",
    "source_code": "@available_if(_check_novelty_decision_function)\ndef decision_function(self, X):\n    return self.score_samples(X) - self.offset_",
    "docstring": "Shifted opposite of the Local Outlier Factor of X. Bigger is better, i.e. large values correspond to inliers. **Only available for novelty detection (when novelty is set to True).** The shift offset allows a zero threshold for being an outlier. The argument X is supposed to contain *new data*: if X contains a point from training, it considers the later in its own neighborhood. Also, the samples in X are not considered in the neighborhood of any point. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The query sample or samples to compute the Local Outlier Factor w.r.t. the training samples. Returns ------- shifted_opposite_lof_scores : ndarray of shape (n_samples,) The shifted opposite of the Local Outlier Factor of each input samples. The lower, the more abnormal. Negative scores represent outliers, positive scores represent inliers.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neighbors\\_lof.py",
    "ast_data": "FunctionDef name:decision_function arg:self arg:X arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_lower_right",
    "source_code": "def _lower_right(self, device: torch.device) -> torch.Tensor:\n    diagonal_offset = self.seq_len_kv - self.seq_len_q\n    return torch.tril(torch.ones(self.seq_len_q, self.seq_len_kv, device=device, dtype=torch.bool), diagonal=diagonal_offset)",
    "docstring": "Lower right causal bias",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\attention\\bias.py",
    "ast_data": "FunctionDef name:_lower_right arg:self arg:device arguments arg arg Assign Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_final_estimator_has",
    "source_code": "def _final_estimator_has(attr):\n\n    def check(self):\n        getattr(self._final_estimator, attr)\n        return True\n    return check",
    "docstring": "Check that final_estimator has . Used together with in .",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\pipeline.py",
    "ast_data": "FunctionDef name:_final_estimator_has arg:attr arguments arg FunctionDef name:check arg:self arguments arg Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "eig",
    "source_code": "@tf_export('linalg.eig', 'eig', v1=[])\n@dispatch.add_dispatch_support\ndef eig(tensor, name=None):\n    if tensor.dtype == dtypes.float32 or tensor.dtype == dtypes.complex64:\n        out_dtype = dtypes.complex64\n    elif tensor.dtype == dtypes.float64 or tensor.dtype == dtypes.complex128:\n        out_dtype = dtypes.complex128\n    e, v = gen_linalg_ops.eig(tensor, Tout=out_dtype, compute_v=True, name=name)\n    return (e, v)",
    "docstring": "Computes the eigen decomposition of a batch of matrices. The eigenvalues and eigenvectors for a non-Hermitian matrix in general are complex. The eigenvectors are not guaranteed to be linearly independent. Computes the eigenvalues and right eigenvectors of the innermost N-by-N matrices in such that , for i=0...N-1. Args: tensor: of shape . Only the lower triangular part of each inner inner matrix is referenced. name: string, optional name of the operation. Returns: e: Eigenvalues. Shape is . The eigenvalues are not necessarily ordered. v: Eigenvectors. Shape is . The columns of the inner most matrices contain eigenvectors of the corresponding matrices in",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg_ops.py",
    "ast_data": "FunctionDef name:eig arg:tensor arg:name arguments arg arg If BoolOp Compare Compare Assign If BoolOp Compare Compare Assign Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "run_as_function_for_tape_gradients",
    "source_code": "def run_as_function_for_tape_gradients(make_op, inputs):\n    if gradients_util.PossibleTapeGradientTypes(inputs) == gradients_util.POSSIBLE_GRADIENT_TYPES_HIGHER_ORDER and (not (ops.get_default_graph().building_function and 'cflow_gradient_wrapper' in ops.get_default_graph().name)):\n        results = tracing_compilation.call_function((inputs,), tracing_options=tracing_compilation.TracingOptions(make_op, 'cflow_gradient_wrapper', autograph=False))\n        return results\n    else:\n        return make_op(inputs)",
    "docstring": "Fix higher-order tape gradients by wrapping in a function. Args: make_op: A function that takes a list of inputs and returns a list of output tensors. This function should set any handle data relevant to its outputs before returning. inputs: A list of tensors to check for tape gradients and pass to . These should include all tensors used in . Returns: Tensors corresponding to 's output.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_util_v2.py",
    "ast_data": "FunctionDef name:run_as_function_for_tape_gradients arg:make_op arg:inputs arguments arg arg If BoolOp Compare Call BoolOp Call Compare Call Assign Call Call Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_assert_same_non_optimizer_objects",
    "source_code": "def _assert_same_non_optimizer_objects(model, model_graph, clone, clone_graph):\n    return True",
    "docstring": "Asserts model and clone contain the same trackable objects.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model_experimental.py",
    "ast_data": "FunctionDef name:_assert_same_non_optimizer_objects arg:model arg:model_graph arg:clone arg:clone_graph arguments arg arg arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "softplus_inverse",
    "source_code": "def softplus_inverse(x, name=None):\n    with ops.name_scope(name, 'softplus_inverse', values=[x]):\n        x = ops.convert_to_tensor(x, name='x')\n        threshold = np.log(np.finfo(x.dtype.as_numpy_dtype).eps) + 2.0\n        is_too_small = math_ops.less(x, np.exp(threshold))\n        is_too_large = math_ops.greater(x, -threshold)\n        too_small_value = math_ops.log(x)\n        too_large_value = x\n        x = array_ops.where_v2(math_ops.logical_or(is_too_small, is_too_large), array_ops.ones_like(x), x)\n        y = x + math_ops.log(-math_ops.expm1(-x))\n        return array_ops.where_v2(is_too_small, too_small_value, array_ops.where_v2(is_too_large, too_large_value, y))",
    "docstring": "Computes the inverse softplus, i.e., x = softplus_inverse(softplus(x)). Mathematically this op is equivalent to: Args: x: . Non-negative (not enforced), floating-point. name: A name for the operation (optional). Returns: . Has the same type/shape as input .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\util.py",
    "ast_data": "FunctionDef name:softplus_inverse arg:x arg:name arguments arg arg With Call Assign Call Assign Call Call Assign Call Call Assign Call Assign Call Assign Assign Call Call Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "delete",
    "source_code": "def delete(self, request, *args, **kwargs):\n    self.object = self.get_object()\n    success_url = self.get_success_url()\n    self.object.delete()\n    return HttpResponseRedirect(success_url)",
    "docstring": "Call the delete() method on the fetched object and then redirect to the success URL.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\edit.py",
    "ast_data": "FunctionDef name:delete arg:self arg:request arguments arg arg arg arg Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_call_batch_begin_hook",
    "source_code": "def _call_batch_begin_hook(self, mode, batch, logs):\n    hook_name = 'on_{mode}_batch_begin'.format(mode=mode)\n    self._call_batch_hook_helper(hook_name, batch, logs)\n    if self._check_timing:\n        self._batch_start_time = time.time()",
    "docstring": "Helper function for methods.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:_call_batch_begin_hook arg:self arg:mode arg:batch arg:logs arguments arg arg arg arg Assign Call Call If Assign Call"
  },
  {
    "library": "scipy",
    "name": "Easom",
    "source_code": "class Easom(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-100.0] * self.N, [100.0] * self.N))\n        self.global_optimum = [[pi for _ in range(self.N)]]\n        self.fglob = -1.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        a = (x[0] - pi) ** 2 + (x[1] - pi) ** 2\n        return -cos(x[0]) * cos(x[1]) * exp(-a)",
    "docstring": "Easom objective function. This class defines the Easom [1]_ global optimization problem. This is a a multimodal minimization problem defined as follows: .. math:: f_{\\text{Easom}}({x}) = a - \\frac{a}{e^{b \\sqrt{\\frac{\\sum_{i=1}^{n} x_i^{2}}{n}}}} + e - e^{\\frac{\\sum_{i=1}^{n} \\cos\\left(c x_i\\right)} {n}} Where, in this exercise, :math: and :math:. Here, :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO Gavana website disagrees with Jamil, etc. Gavana equation in docstring is totally wrong.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_E.py",
    "ast_data": "ClassDef name:Easom FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Return return:yes Call Call Call"
  },
  {
    "library": "pandas",
    "name": "get_test_result",
    "source_code": "def get_test_result() -> list[bool]:\n    global _TEST_RESULT\n    res = _TEST_RESULT\n    _TEST_RESULT = []\n    return res",
    "docstring": "Get test result and reset test_results.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\computation\\expressions.py",
    "ast_data": "FunctionDef name:get_test_result arguments Assign Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "bounds",
    "source_code": "@property\ndef bounds(self):\n    return self.kernel.bounds",
    "docstring": "Returns the log-transformed bounds on the theta. Returns ------- bounds : ndarray of shape (n_dims, 2) The log-transformed bounds on the kernel's hyperparameters theta",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:bounds arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "experimental_distribute_dataset",
    "source_code": "def experimental_distribute_dataset(self, dataset, options=None):\n    if options and options.experimental_replication_moden == distribute_lib.InputReplicationMode.PER_REPLICA:\n        raise NotImplementedError('InputReplicationMode.PER_REPLICA is only supported in `experimental_distribute_datasets_from_function`.')\n    return super(CentralStorageStrategy, self).experimental_distribute_dataset(dataset, options)",
    "docstring": "Distributes a tf.data.Dataset instance provided via dataset. The returned dataset is a wrapped strategy dataset which creates a multidevice iterator under the hood. It prefetches the input data to the specified devices on the worker. The returned distributed dataset can be iterated over similar to how regular datasets can. NOTE: Currently, the user cannot add any more transformations to a distributed dataset. For Example: Args: dataset: to be prefetched to device. options: used to control options on how this dataset is distributed. Returns: A \"distributed \" that the caller can iterate over.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\central_storage_strategy.py",
    "ast_data": "FunctionDef name:experimental_distribute_dataset arg:self arg:dataset arg:options arguments arg arg arg If BoolOp Compare Raise Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "ppf",
    "source_code": "def ppf(self, q):\n    q = np.asarray(q)\n    if self._mirror_uniform:\n        x = self._rng.ppf(1 - q)\n    else:\n        x = self._rng.ppf(q)\n    if self._rvs_transform is not None:\n        x = self._rvs_transform(x, *self._frozendist.args)\n    return self.scale * x + self.loc",
    "docstring": "Very fast PPF (inverse CDF) of the distribution which is a very close approximation of the exact PPF values. Parameters ---------- u : array_like Array with probabilities. Returns ------- ppf : array_like Quantiles corresponding to the values in . Notes ----- The evaluation of the PPF is very fast but it may have a large relative error in the far tails. The numerical precision of the PPF is controlled by the u-error, that is, `evaluate_error`. Note that this PPF is designed to generate random samples.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_sampling.py",
    "ast_data": "FunctionDef name:ppf arg:self arg:q arguments arg arg Assign Call If Assign Call Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "parse_version",
    "source_code": "def parse_version(version: str) -> Version:\n    version_number_str = version\n    for i in range(len(version)):\n        c = version[i]\n        if not (c.isdigit() or c == '.'):\n            version_number_str = version[:i]\n            break\n    return cast(Version, tuple([int(n) for n in version_number_str.split('.')]))",
    "docstring": "Parses a version string into (major, minor, patch) version numbers. Args: version: Full version number string, possibly including revision / commit hash. Returns: An int 3-tuple of (major, minor, patch) version numbers.",
    "type": "function",
    "file_path": "pytorch\\tools\\setup_helpers\\gen_version_header.py",
    "ast_data": "FunctionDef name:parse_version arg:version arguments arg Assign For Call Call Assign If BoolOp Call Compare Assign Return return:yes Call Call Call Call"
  },
  {
    "library": "pygame",
    "name": "tmap",
    "source_code": "def tmap(f, seq_args, num_workers=20, worker_queue=None, wait=True, stop_on_error=True):\n    if worker_queue:\n        wq = worker_queue\n    elif _wq:\n        wq = _wq\n    else:\n        if num_workers == 0:\n            return map(f, seq_args)\n        wq = WorkerQueue(num_workers)\n    if len(wq.pool) == 0:\n        return map(f, seq_args)\n    results = []\n    for sa in seq_args:\n        results.append(FuncResult(f))\n        wq.do(results[-1], sa)\n    if wait:\n        wq.wait()\n        if wq.queue.qsize():\n            raise RuntimeError('buggy threadmap')\n        if not worker_queue and (not _wq):\n            wq.stop()\n            if wq.queue.qsize():\n                um = wq.queue.get()\n                if um is not STOP:\n                    raise RuntimeError('buggy threadmap')\n        if stop_on_error:\n            error_ones = list(filter(lambda x: x.exception, results))\n            if error_ones:\n                raise error_ones[0].exception\n        return (x.result for x in results)\n    return [wq, results]",
    "docstring": "like map, but uses a thread pool to execute. num_workers - the number of worker threads that will be used. If pool is passed in, then the num_workers arg is ignored. worker_queue - you can optionally pass in an existing WorkerQueue. wait - True means that the results are returned when everything is finished. False means that we return the [worker_queue, results] right away instead. results, is returned as a list of FuncResult instances. stop_on_error -",
    "type": "function",
    "file_path": "pygame\\src_py\\threads\\__init__.py",
    "ast_data": "FunctionDef name:tmap arg:f arg:seq_args arg:num_workers arg:worker_queue arg:wait arg:stop_on_error arguments arg arg arg arg arg arg If Assign If Assign If Compare Return return:yes Call Assign Call If Compare Call Return return:yes Call Assign For Call Call Call If Call If Call Raise Call If BoolOp Call If Call Assign Call If Compare Raise Call If Assign Call Call arguments arg If Raise Return return:yes Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_pull_records",
    "source_code": "def _pull_records(js: dict[str, Any], spec: list | str) -> list:\n    result = _pull_field(js, spec, extract_record=True)\n    if not isinstance(result, list):\n        if pd.isnull(result):\n            result = []\n        else:\n            raise TypeError(f'Path must contain list or null, but got {type(result).__name__} at {spec!r}')\n    return result",
    "docstring": "Internal function to pull field for records, and similar to _pull_field, but require to return list. And will raise error if has non iterable value.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\json\\_normalize.py",
    "ast_data": "FunctionDef name:_pull_records arg:js arg:spec arguments arg arg Assign Call If Call If Call Assign Raise Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "select_unit",
    "source_code": "def select_unit(t: float) -> tuple[str, float]:\n    time_unit = {-3: 'ns', -2: 'us', -1: 'ms'}.get(int(torch.tensor(t).log10().item() // 3), 's')\n    time_scale = {'ns': 1e-09, 'us': 1e-06, 'ms': 0.001, 's': 1}[time_unit]\n    return (time_unit, time_scale)",
    "docstring": "Determine how to scale times for O(1) magnitude. This utility is used to format numbers for human consumption.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\benchmark\\utils\\common.py",
    "ast_data": "FunctionDef name:select_unit arg:t arguments arg Assign Call Call Call Call Call Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "assert_deallocated",
    "source_code": "@contextmanager\ndef assert_deallocated(func, *args, **kwargs):\n    if IS_PYPY:\n        raise RuntimeError('assert_deallocated is unavailable on PyPy')\n    with gc_state(False):\n        obj = func(*args, **kwargs)\n        ref = weakref.ref(obj)\n        yield obj\n        del obj\n        if ref() is not None:\n            raise ReferenceError('Remaining reference(s) to object')",
    "docstring": "Context manager to check that object is deallocated This is useful for checking that an object can be freed directly by reference counting, without requiring gc to break reference cycles. GC is disabled inside the context manager. This check is not available on PyPy. Parameters ---------- func : callable Callable to create object to check \\*args : sequence positional arguments to in order to create object to check \\*\\*kwargs : dict keyword arguments to in order to create object to check Examples -------- >>> class C: pass >>> with assert_deallocated(C) as c: ... # do something ... del c >>> class C: ... def __init__(self): ... self._circular = self # Make circular reference >>> with assert_deallocated(C) as c: #doctest: +IGNORE_EXCEPTION_DETAIL ... # do something ... del c Traceback (most recent call last): ... ReferenceError: Remaining reference(s) to object",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_gcutils.py",
    "ast_data": "FunctionDef name:assert_deallocated arg:func arguments arg arg arg If Raise Call With Call Assign Call Assign Call If Compare Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "add_chunk",
    "source_code": "def add_chunk(self, chunk: Union[message.Message, bytes], field_tags: util.FieldTypes, index=None) -> None:\n    if self._parent_splitter is not None:\n        self._parent_splitter.add_chunk(chunk, self._fields_in_parent + field_tags, index)\n    else:\n        assert self._chunks is not None\n        assert self._chunked_message is not None\n        field = self._chunked_message.chunked_fields.add(field_tag=util.get_field_tag(self._proto, field_tags))\n        new_chunk_index = len(self._chunks)\n        field.message.chunk_index = new_chunk_index\n        self._add_chunk_order.append(id(chunk))\n        if index is None:\n            self._chunks.append(chunk)\n        else:\n            self._chunks.insert(index, chunk)\n            self._fix_chunk_order = True",
    "docstring": "Adds a new chunk and updates the ChunkedMessage proto. Args: chunk: Proto message or bytes. field_tags: Field information about the placement of the chunked data within self._proto. index: Optional index at which to insert the chunk. The chunk ordering is important for merging.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\tools\\proto_splitter\\split.py",
    "ast_data": "FunctionDef name:add_chunk arg:self arg:chunk arg:field_tags arg:index arguments arg arg arg arg If Compare Call Compare Compare Assign Call Call Assign Call Assign Call Call If Compare Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "traverse_dps",
    "source_code": "def traverse_dps(datapipe: DataPipe) -> DataPipeGraph:\n    cache: set[int] = set()\n    return _traverse_helper(datapipe, only_datapipe=True, cache=cache)",
    "docstring": "Traverse the DataPipes and their attributes to extract the DataPipe graph. This only looks into the attribute from each DataPipe that is either a DataPipe and a Python collection object such as ``. Args: datapipe: the end DataPipe of the graph Returns: A graph represented as a nested dictionary, where keys are ids of DataPipe instances and values are tuples of DataPipe instance and the sub-graph",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\data\\graph.py",
    "ast_data": "FunctionDef name:traverse_dps arg:datapipe arguments arg Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "export_outputs_for_mode",
    "source_code": "def export_outputs_for_mode(mode, serving_export_outputs=None, predictions=None, loss=None, metrics=None):\n    if mode not in SIGNATURE_KEY_MAP:\n        raise ValueError(f'Export output type not found for `mode`: {mode}. Expected one of: {list(SIGNATURE_KEY_MAP.keys())}.')\n    signature_key = SIGNATURE_KEY_MAP[mode]\n    if mode_keys.is_predict(mode):\n        return get_export_outputs(serving_export_outputs, predictions)\n    elif mode_keys.is_train(mode):\n        return {signature_key: export_output_lib.TrainOutput(loss=loss, predictions=predictions, metrics=metrics)}\n    else:\n        return {signature_key: export_output_lib.EvalOutput(loss=loss, predictions=predictions, metrics=metrics)}",
    "docstring": "Util function for constructing a dict given a mode. The returned dict can be directly passed to helper function as the argument, used for generating a SignatureDef map. Args: mode: A specifying the mode. serving_export_outputs: Describes the output signatures to be exported to and used during serving. Should be a dict or None. predictions: A dict of Tensors or single Tensor representing model predictions. This argument is only used if serving_export_outputs is not set. loss: A dict of Tensors or single Tensor representing calculated loss. metrics: A dict of (metric_value, update_op) tuples, or a single tuple. metric_value must be a Tensor, and update_op must be a Tensor or Op Returns: Dictionary mapping the key to an object. The key is the expected SignatureDef key for the mode. Raises: ValueError: if an appropriate ExportOutput cannot be found for the mode.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\model_utils\\export_utils.py",
    "ast_data": "FunctionDef name:export_outputs_for_mode arg:mode arg:serving_export_outputs arg:predictions arg:loss arg:metrics arguments arg arg arg arg arg If Compare Raise Call Call Call Assign If Call Return return:yes Call If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "_artist_kws",
    "source_code": "def _artist_kws(self, kws, fill, element, multiple, color, alpha):\n    kws = kws.copy()\n    if fill:\n        kws = normalize_kwargs(kws, mpl.collections.PolyCollection)\n        kws.setdefault('facecolor', to_rgba(color, alpha))\n        if element == 'bars':\n            kws['color'] = 'none'\n        if multiple in ['stack', 'fill'] or element == 'bars':\n            kws.setdefault('edgecolor', mpl.rcParams['patch.edgecolor'])\n        else:\n            kws.setdefault('edgecolor', to_rgba(color, 1))\n    elif element == 'bars':\n        kws['facecolor'] = 'none'\n        kws['edgecolor'] = to_rgba(color, alpha)\n    else:\n        kws['color'] = to_rgba(color, alpha)\n    return kws",
    "docstring": "Handle differences between artists in filled/unfilled plots.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\distributions.py",
    "ast_data": "FunctionDef name:_artist_kws arg:self arg:kws arg:fill arg:element arg:multiple arg:color arg:alpha arguments arg arg arg arg arg arg arg Assign Call If Assign Call Call Call If Compare Assign If BoolOp Compare Compare Call Call Call If Compare Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "TensorMetadata",
    "source_code": "@dataclass_slots\n@dataclass\nclass TensorMetadata:\n    dtype: torch.dtype\n    shape: tuple[_MetadataIntLike, ...]\n    stride: tuple[_MetadataIntLike, ...]\n    device: torch.device\n    layout: torch.layout\n    memory_format: Optional[torch.memory_format]\n    storage_offset: _MetadataIntLike\n    storage_bytes: Optional[_MetadataIntLike]\n    requires_grad: bool\n    is_quantized: bool\n    is_conj: bool\n    is_neg: bool\n    is_inference: bool\n    is_sparse: bool\n    is_coalesced: Optional[bool]\n    dense_dim: Optional[int]\n    sparse_dim: Optional[int]\n\n    def _flatten_into(self, result: list[object], mode: FakeTensorMode, state: _CacheKeyState) -> None:\n        for field in dataclasses.fields(self):\n            value = getattr(self, field.name)\n            if isinstance(value, (tuple, list, torch.Size)):\n                id_hashed_objects: list[object] = []\n                mode._prep_args_for_hash(result, value, state, id_hashed_objects)\n                id_hashed_objects.clear()\n            elif isinstance(value, SymInt):\n                state.convert_sym_int(result, value)\n            else:\n                result.append(value)",
    "docstring": "The Tensor metadata relevant to hashing FakeTensors when caching.",
    "type": "class",
    "file_path": "pytorch\\torch\\_subclasses\\fake_tensor.py",
    "ast_data": "ClassDef name:TensorMetadata FunctionDef name:_flatten_into arg:self arg:result arg:mode arg:state arguments arg arg arg arg For Call Assign Call If Call Call Call If Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "add",
    "source_code": "@register_decomposition(aten.add)\n@out_wrapper()\n@elementwise_type_promotion_wrapper(type_promoting_args=('a', 'b'), type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT)\ndef add(a: Union[TensorLikeType, NumberType], b: Union[TensorLikeType, NumberType], *, alpha: Optional[NumberType]=None):\n    a, b = _maybe_broadcast(a, b)\n    if alpha is not None:\n        dtype = a.dtype if isinstance(a, TensorLike) else b.dtype\n        python_type = utils.dtype_to_type(dtype)\n        if python_type != bool and (not utils.is_weakly_lesser_type(type(alpha), python_type)):\n            msg = f'alpha argument of type {type(alpha)} cannot be safely cast to type {python_type}!'\n            raise ValueError(msg)\n        if isinstance(b, TensorLike):\n            b = prims.mul(b, alpha)\n        else:\n            b = b * alpha\n    output = prims.add(a, b)\n    return handle_noncontiguous_outputs([a, b], output)",
    "docstring": "Reference implementation of torch.add",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\__init__.py",
    "ast_data": "FunctionDef name:add arg:a arg:b arguments arg arg arg Assign Call If Compare Assign Call Assign Call If BoolOp Compare Call Call Assign Call Raise Call If Call Assign Call Assign Assign Call Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_WeakReferencableClass",
    "source_code": "class _WeakReferencableClass:\n    pass",
    "docstring": "This dummy class is needed for two reasons. - We need something that supports weak references. Basic types like string and ints don't. - We need something whose hash and equality are based on object identity to make sure they are treated as different keys to _GRAPH_LEARNING_PHASES. An empty Python class satisfies both of these requirements.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "ClassDef name:_WeakReferencableClass"
  },
  {
    "library": "tensorflow",
    "name": "validator",
    "source_code": "def validator(sample: rd.RepresentativeSample) -> rd.RepresentativeSample:\n    if not isinstance(sample, Mapping):\n        raise ValueError(f'Invalid representative sample type. Provide a mapping (usually a dict) of {{input_key: input_value}}. Got type: {type(sample)} instead.')\n    if set(sample.keys()) != expected_input_keys:\n        raise KeyError(f'Invalid input keys for representative sample. The function expects input keys of: {set(expected_input_keys)}. Got: {set(sample.keys())}. Please provide correct input keys for representative samples.')\n    return sample",
    "docstring": "Validates a single instance of representative sample. This provides a simple check for that this is a mapping of {input_key: input_value}. Args: sample: A to validate. Returns: iff it is valid. Raises: ValueError: iff the sample isn't an instance of . KeyError: iff the sample does not have the set of input keys that match the input keys of the function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\py_function_lib.py",
    "ast_data": "FunctionDef name:validator arg:sample arguments arg If Call Raise Call Call If Compare Call Call Raise Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, axes, normalize=False, **kwargs):\n    super(Dot, self).__init__(**kwargs)\n    if not isinstance(axes, int):\n        if not isinstance(axes, (list, tuple)):\n            raise TypeError('Invalid type for `axes` - should be a list or an int.')\n        if len(axes) != 2:\n            raise ValueError('Invalid format for `axes` - should contain two elements.')\n        if not isinstance(axes[0], int) or not isinstance(axes[1], int):\n            raise ValueError('Invalid format for `axes` - list elements should be \"int\".')\n    self.axes = axes\n    self.normalize = normalize\n    self.supports_masking = True\n    self._reshape_required = False",
    "docstring": "Initializes a layer that computes the element-wise dot product. >>> x = np.arange(10).reshape(1, 5, 2) >>> print(x) [[[0 1] [2 3] [4 5] [6 7] [8 9]]] >>> y = np.arange(10, 20).reshape(1, 2, 5) >>> print(y) [[[10 11 12 13 14] [15 16 17 18 19]]] >>> tf.keras.layers.Dot(axes=(1, 2))([x, y]) Args: axes: Integer or tuple of integers, axis or axes along which to take the dot product. If a tuple, should be two integers corresponding to the desired axis from the first input and the desired axis from the second input, respectively. Note that the size of the two selected axes must match. normalize: Whether to L2-normalize samples along the dot product axis before taking the dot product. If set to True, then the output of the dot product is the cosine proximity between the two samples. **kwargs: Standard layer keyword arguments.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\merge.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:axes arg:normalize arguments arg arg arg arg Call Call If Call If Call Raise Call If Compare Call Raise Call If BoolOp Call Call Raise Call Assign Assign Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "writeInfoDict",
    "source_code": "def writeInfoDict(self):\n    self.infoObject = self.reserveObject('info')\n    self.writeObject(self.infoObject, self.infoDict)",
    "docstring": "Write out the info dictionary, checking it for good form",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py",
    "ast_data": "FunctionDef name:writeInfoDict arg:self arguments arg Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_clang_major_version",
    "source_code": "def _get_clang_major_version(path_to_clang: str) -> int:\n    logging.info('Running echo __clang_major__ | %s -E -P -', path_to_clang)\n    clang_version_proc = subprocess.run([path_to_clang, '-E', '-P', '-'], input='__clang_major__', check=True, capture_output=True, text=True)\n    major_version = int(clang_version_proc.stdout)\n    logging.info('%s reports major version %s.', path_to_clang, major_version)\n    return major_version",
    "docstring": "Gets the major version of the clang at . Args: path_to_clang: Path to a clang executable Returns: The major version.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\build_tools\\configure\\configure.py",
    "ast_data": "FunctionDef name:_get_clang_major_version arg:path_to_clang arguments arg Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "SymbolTable",
    "source_code": "class SymbolTable(object):\n\n    def __init__(self):\n        self.symbols = []\n        self.enter_scope()\n        self.scf_scope = 0\n        self.insert_symbol('len', 'len', TFRTypes.PY_BUILTIN_FUNC)\n\n    def enter_scope(self, scf_scope=False):\n        self.symbols.append({'types': {}, 'symbols': {}})\n        self.curr_table = self.symbols[len(self.symbols) - 1]\n        if scf_scope:\n            self.scf_scope += 1\n\n    def insert_symbol(self, name, value, type_):\n        self.curr_table['symbols'][name] = (value, type_)\n        self.curr_table['types'][name] = type_\n        return value\n\n    def exit_scope(self):\n        self.symbols.pop()\n        self.curr_table = self.symbols[len(self.symbols) - 1]\n        if self.scf_scope > 0:\n            self.scf_scope -= 1\n\n    def in_scf_scope(self):\n        return self.scf_scope > 0\n\n    def lookup(self, name):\n        curr_idx = len(self.symbols) - 1\n        while curr_idx >= 0 and name not in self.symbols[curr_idx]['symbols']:\n            curr_idx -= 1\n        if curr_idx < 0:\n            return None\n        return self.symbols[curr_idx]['symbols'][name]",
    "docstring": "Symbol Table for python code.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\tfr\\python\\tfr_gen.py",
    "ast_data": "ClassDef name:SymbolTable FunctionDef name:__init__ arg:self arguments arg Assign Call Assign Call FunctionDef name:enter_scope arg:self arg:scf_scope arguments arg arg Call Assign Call If FunctionDef name:insert_symbol arg:self arg:name arg:value arg:type_ arguments arg arg arg arg Assign Assign Return return:yes FunctionDef name:exit_scope arg:self arguments arg Call Assign Call If Compare FunctionDef name:in_scf_scope arg:self arguments arg Return return:yes Compare FunctionDef name:lookup arg:self arg:name arguments arg arg Assign Call While BoolOp Compare Compare If Compare Return return:no Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_onednn_backend_config",
    "source_code": "def get_onednn_backend_config() -> BackendConfig:\n    return BackendConfig('onednn').set_backend_pattern_configs(conv_configs).set_backend_pattern_configs(linear_configs).set_backend_pattern_configs(_get_binary_op_configs(binary_op_dtype_configs)).set_backend_pattern_config(_get_cat_config(default_op_dtype_configs)).set_backend_pattern_configs(_get_default_op_configs(default_op_dtype_configs)).set_backend_pattern_configs(_get_fixed_qparams_op_configs(fixed_qparams_op_dtype_configs)).set_backend_pattern_configs(_get_share_qparams_op_configs(share_qparams_op_dtype_configs)).set_backend_pattern_configs(_get_bn_configs(default_op_dtype_configs)).set_backend_pattern_configs(_get_ln_configs(layer_norm_op_dtype_configs)).set_backend_pattern_configs(_get_rnn_op_configs(rnn_op_dtype_configs)).set_backend_pattern_configs(_get_embedding_op_configs(embedding_op_dtype_configs))",
    "docstring": "Return the for PyTorch's native ONEDNN backend.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\onednn.py",
    "ast_data": "FunctionDef name:get_onednn_backend_config arguments Return return:yes Call Call Call Call Call Call Call Call Call Call Call Call Call Call Call Call Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "loop_pass",
    "source_code": "def loop_pass(base_pass: Callable, n_iter: Optional[int]=None, predicate: Optional[Callable]=None):\n    assert (n_iter is not None) ^ (predicate is not None), 'Exactly one of `n_iter`or `predicate` must be specified.'\n\n    @wraps(base_pass)\n    def new_pass(source):\n        output = source\n        if n_iter is not None and n_iter > 0:\n            for _ in range(n_iter):\n                output = base_pass(output)\n        elif predicate is not None:\n            while predicate(output):\n                output = base_pass(output)\n        else:\n            raise RuntimeError(f'loop_pass must be given positive int n_iter (given {n_iter}) xor predicate (given {predicate})')\n        return output\n    return new_pass",
    "docstring": "Convenience wrapper for passes which need to be applied multiple times. Exactly one of or must be specified. Args: base_pass (Callable[Object, Object]): pass to be applied in loop n_iter (int, optional): number of times to loop pass predicate (Callable[Object, bool], optional):",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\passes\\pass_manager.py",
    "ast_data": "FunctionDef name:loop_pass arg:base_pass arg:n_iter arg:predicate arguments arg arg arg Compare Compare FunctionDef name:new_pass arg:source arguments arg Assign If BoolOp Compare Compare For Call Assign Call If Compare While Call Assign Call Raise Call Return return:yes Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "draw_point2d",
    "source_code": "def draw_point2d(image: Tensor, points: Tensor, color: Tensor) -> Tensor:\n    KORNIA_CHECK(len(image.shape) == 2 and len(color.shape) == 1 or image.shape[0] == color.shape[0], 'Color dim must match the channel dims of the provided image')\n    points = points.to(dtype=torch.int64, device=image.device)\n    x, y = zip(*points)\n    if len(color.shape) == 1:\n        color = torch.unsqueeze(color, dim=1)\n    color = color.to(dtype=image.dtype, device=image.device)\n    if len(image.shape) == 2:\n        image[y, x] = color\n    else:\n        image[:, y, x] = color\n    return image",
    "docstring": "Set one or more coordinates in a Tensor to a color. Args: image: the input image on which to draw the points with shape :math or :math. points: the [x, y] points to be drawn on the image. color: the color of the pixel with :math where :math is the number of channels of the image. Return: The image with points set to the color.",
    "type": "function",
    "file_path": "kornia\\kornia\\utils\\draw.py",
    "ast_data": "FunctionDef name:draw_point2d arg:image arg:points arg:color arguments arg arg arg Call BoolOp BoolOp Compare Call Compare Call Compare Assign Call Assign Call If Compare Call Assign Call Assign Call If Compare Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_make_one_shot_iterator",
    "source_code": "def _make_one_shot_iterator(self):\n    if not context.executing_eagerly():\n        raise ValueError('Cannot create a one shot iterator. Please use `make_initializable_iterator()` instead.')\n    return self._get_iterator()",
    "docstring": "Get an iterator for DistributedDatasetV1.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\input_lib.py",
    "ast_data": "FunctionDef name:_make_one_shot_iterator arg:self arguments arg If Call Raise Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "__call__",
    "source_code": "def __call__(self, graph_module: GraphModule) -> Optional[PassResult]:\n    self.requires(graph_module)\n    res = self.call(graph_module)\n    self.ensures(graph_module)\n    return res",
    "docstring": "Runs the precondition check, the pass itself, and the postcondition check.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\passes\\infra\\pass_base.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:graph_module arguments arg arg Call Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_dict_formatter",
    "source_code": "def _dict_formatter(d, n=0, mplus=1, sorter=None):\n    if isinstance(d, dict):\n        m = max(map(len, list(d.keys()))) + mplus\n        s = '\\n'.join([k.rjust(m) + ': ' + _indenter(_dict_formatter(v, m + n + 2, 0, sorter), m + 2) for k, v in sorter(d)])\n    else:\n        with np.printoptions(linewidth=76 - n, edgeitems=2, threshold=12, formatter={'float_kind': _float_formatter_10}):\n            s = str(d)\n    return s",
    "docstring": "Pretty printer for dictionaries keeps track of the starting indentation; lines are indented by this much after a line break. is additional left padding applied to keys",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_util.py",
    "ast_data": "FunctionDef name:_dict_formatter arg:d arg:n arg:mplus arg:sorter arguments arg arg arg arg If Call Assign Call Call Call Call Assign Call Call Call Call Call With Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self):\n    pass",
    "docstring": "Constructor of EventListenerBaseStreamHandler.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\grpc_debug_server.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "append",
    "source_code": "def append(self, value: Any) -> Self:\n    new_idx = len(self)\n    self._size += 1\n    self[new_idx] = value\n    return self",
    "docstring": "Append a given value at the end of the list. Args: value (Any): value to append",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\container.py",
    "ast_data": "FunctionDef name:append arg:self arg:value arguments arg arg Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_calc_dilation2d_flops",
    "source_code": "@ops.RegisterStatistics('Dilation2D', 'flops')\ndef _calc_dilation2d_flops(graph, node):\n    input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])\n    input_shape.assert_is_fully_defined()\n    filter_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[1])\n    filter_shape.assert_is_fully_defined()\n    output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)\n    output_shape.assert_is_fully_defined()\n    filter_height = int(filter_shape[0])\n    filter_width = int(filter_shape[1])\n    output_count = np.prod(output_shape.as_list(), dtype=np.int64)\n    return ops.OpStats('flops', output_count * filter_height * filter_width * 2)",
    "docstring": "Calculates the compute resources needed for Dilation2D.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py",
    "ast_data": "FunctionDef name:_calc_dilation2d_flops arg:graph arg:node arguments arg arg Assign Call Call Assign Call Call Assign Call Call Assign Call Assign Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_serialize_function_spec",
    "source_code": "def _serialize_function_spec(function_spec):\n    if function_spec.fullargspec.args and function_spec.fullargspec.args[0] == 'self':\n        raise TypeError(\"Can not serialize tf.function with unbound 'self' parameter.\")\n    proto = saved_object_graph_pb2.FunctionSpec()\n    proto.fullargspec.CopyFrom(nested_structure_coder.encode_structure(function_spec.fullargspec._replace(annotations={})))\n    proto.is_method = False\n    proto.input_signature.CopyFrom(nested_structure_coder.encode_structure(function_spec.input_signature))\n    proto.jit_compile = {None: saved_object_graph_pb2.FunctionSpec.JitCompile.DEFAULT, True: saved_object_graph_pb2.FunctionSpec.JitCompile.ON, False: saved_object_graph_pb2.FunctionSpec.JitCompile.OFF}.get(function_spec.jit_compile)\n    return proto",
    "docstring": "Serialize a FunctionSpec object into its proto representation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\function_serialization.py",
    "ast_data": "FunctionDef name:_serialize_function_spec arg:function_spec arguments arg If BoolOp Compare Raise Call Assign Call Call Call Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_backend_features",
    "source_code": "def get_backend_features(self, device: torch.device) -> OrderedSet[BackendFeature]:\n    return OrderedSet()",
    "docstring": "Return a set of .codegen.common.BackendFeature()",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:get_backend_features arg:self arg:device arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_delete_tracking",
    "source_code": "def _delete_tracking(self, name):\n    self._maybe_initialize_trackable()\n    if name in self._unconditional_dependency_names:\n        del self._unconditional_dependency_names[name]\n        for index, (dep_name, _) in enumerate(self._unconditional_checkpoint_dependencies):\n            if dep_name == name:\n                del self._unconditional_checkpoint_dependencies[index]\n                break",
    "docstring": "Removes the tracking of name.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\autotrackable.py",
    "ast_data": "FunctionDef name:_delete_tracking arg:self arg:name arguments arg arg Call If Compare For Call If Compare"
  },
  {
    "library": "pytorch",
    "name": "_launch_forward",
    "source_code": "def _launch_forward(self, test_case, iters, print_per_iter):\n    cuda_sync = 'cuda' in test_case.test_config.test_name\n    func = test_case.run_forward\n    if self.use_jit:\n        func = test_case.run_jit_forward\n    forward_time = timeit.timeit(functools.partial(func, iters, print_per_iter, cuda_sync), number=1)\n    return forward_time",
    "docstring": "Use Python's timeit module to measure execution time (unit: second).",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\operator_benchmark\\benchmark_core.py",
    "ast_data": "FunctionDef name:_launch_forward arg:self arg:test_case arg:iters arg:print_per_iter arguments arg arg arg arg Assign Compare Assign If Assign Assign Call Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "follow_all",
    "source_code": "def follow_all(self, urls: Iterable[str | Link], callback: CallbackT | None=None, method: str='GET', headers: Mapping[AnyStr, Any] | Iterable[tuple[AnyStr, Any]] | None=None, body: bytes | str | None=None, cookies: CookiesT | None=None, meta: dict[str, Any] | None=None, encoding: str | None='utf-8', priority: int=0, dont_filter: bool=False, errback: Callable[[Failure], Any] | None=None, cb_kwargs: dict[str, Any] | None=None, flags: list[str] | None=None) -> Iterable[Request]:\n    if not hasattr(urls, '__iter__'):\n        raise TypeError(\"'urls' argument must be an iterable\")\n    return (self.follow(url=url, callback=callback, method=method, headers=headers, body=body, cookies=cookies, meta=meta, encoding=encoding, priority=priority, dont_filter=dont_filter, errback=errback, cb_kwargs=cb_kwargs, flags=flags) for url in urls)",
    "docstring": ".. versionadded:: 2.0 Return an iterable of :class: instances to follow all links in `~scrapy.link.Link~.TextResponse~.TextResponse.follow_all` method which supports selectors in addition to absolute/relative URLs and Link objects.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\http\\response\\__init__.py",
    "ast_data": "FunctionDef name:follow_all arg:self arg:urls arg:callback arg:method arg:headers arg:body arg:cookies arg:meta arg:encoding arg:priority arg:dont_filter arg:errback arg:cb_kwargs arg:flags arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg If Call Raise Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "name",
    "source_code": "@property\ndef name(self) -> str:\n    return self._dtype.name",
    "docstring": "A bit-width name for this data-type.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "resnet18",
    "source_code": "def resnet18(pretrained=False, progress=True, **kwargs):\n    return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, **kwargs)",
    "docstring": "ResNet-18 model from _ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\functional_autograd_benchmark\\torchvision_models.py",
    "ast_data": "FunctionDef name:resnet18 arg:pretrained arg:progress arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "active_fake_mode",
    "source_code": "def active_fake_mode():\n    from torch._subclasses.fake_tensor import FakeTensorMode\n    from torch.utils._python_dispatch import _get_current_dispatch_mode_stack\n    for _, m in enumerate(reversed(_get_current_dispatch_mode_stack())):\n        if isinstance(m, FakeTensorMode):\n            return m\n    return None",
    "docstring": "Inspects the dispatch mode stack for an active fake mode and returns it. Returns None if no fake mode is active.",
    "type": "function",
    "file_path": "pytorch\\torch\\_guards.py",
    "ast_data": "FunctionDef name:active_fake_mode arguments For Call Call Call If Call Return return:yes Return return:no"
  },
  {
    "library": "pytorch",
    "name": "post_unflatten_transform",
    "source_code": "@abstractmethod\ndef post_unflatten_transform(self, tensor: torch.Tensor, param_extension: Any) -> torch.Tensor:\n    ...",
    "docstring": "E.g. converting local tensor to ``.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_fsdp_extensions.py",
    "ast_data": "FunctionDef name:post_unflatten_transform arg:self arg:tensor arg:param_extension arguments arg arg arg"
  },
  {
    "library": "tensorflow",
    "name": "_row_partitions",
    "source_code": "@property\ndef _row_partitions(self):\n    return self.row_partitions",
    "docstring": "Deprecated form of row_partitions.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor.py",
    "ast_data": "FunctionDef name:_row_partitions arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "join_hook",
    "source_code": "def join_hook(self, **kwargs):\n    divide_by_initial_world_size = kwargs.get('divide_by_initial_world_size', True)\n    return _DDPJoinHook(self, divide_by_initial_world_size=divide_by_initial_world_size)",
    "docstring": "DDP join hook enables training on uneven inputs by mirroring communications in forward and backward passes. Arguments: kwargs (dict): a :class: containing any keyword arguments to modify the behavior of the join hook at run time; all :class: instances sharing the same join context manager are forwarded the same value for ``.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\parallel\\distributed.py",
    "ast_data": "FunctionDef name:join_hook arg:self arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "BaseListView",
    "source_code": "class BaseListView(MultipleObjectMixin, View):\n\n    def get(self, request, *args, **kwargs):\n        self.object_list = self.get_queryset()\n        allow_empty = self.get_allow_empty()\n        if not allow_empty:\n            if self.get_paginate_by(self.object_list) is not None and hasattr(self.object_list, 'exists'):\n                is_empty = not self.object_list.exists()\n            else:\n                is_empty = not self.object_list\n            if is_empty:\n                raise Http404(_('Empty list and “%(class_name)s.allow_empty” is False.') % {'class_name': self.__class__.__name__})\n        context = self.get_context_data()\n        return self.render_to_response(context)",
    "docstring": "Base view for displaying a list of objects. This requires subclassing to provide a response mixin.",
    "type": "class",
    "file_path": "django\\django\\views\\generic\\list.py",
    "ast_data": "ClassDef name:BaseListView FunctionDef name:get arg:self arg:request arguments arg arg arg arg Assign Call Assign Call If If BoolOp Compare Call Call Assign Call Assign If Raise Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "traverse",
    "source_code": "def traverse(root, visit):\n    _traverse_internal(root, visit, [], '')",
    "docstring": "Recursively enumerate all members of . Similar to the Python library function . Traverses the tree of Python objects starting with , depth first. Parent-child relationships in the tree are defined by membership in modules or classes. The function is called with arguments for each module or class found in the tree of python objects starting with . is a string containing the name with which is reachable from the current context. For example, if is a local class called which contains a class , will be called with ). If is not a module or class, is never called. never descends into built-in modules. , a list of pairs are determined by . To avoid visiting parts of the tree, can be modified in place, using or slice assignment. Cycles (determined by reference equality, ) stop the traversal. A stack of objects is kept to find cycles. Objects forming cycles may appear in , but will not be called with any object as which is already in the stack. Traversing system modules can take a long time, it is advisable to pass a callable which denylists such modules. Args: root: A python object with which to start the traversal. visit: A function taking arguments . Will be called for each object found in the traversal.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\common\\traverse.py",
    "ast_data": "FunctionDef name:traverse arg:root arg:visit arguments arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "cross_product_configs",
    "source_code": "def cross_product_configs(**configs):\n    _validate(configs)\n    configs_attrs_list = []\n    for key, values in configs.items():\n        tmp_results = [{key: value} for value in values]\n        configs_attrs_list.append(tmp_results)\n    generated_configs = list(itertools.product(*configs_attrs_list))\n    return generated_configs",
    "docstring": "Given configs from users, we want to generate different combinations of those configs For example, given M = ((1, 2), N = (4, 5)), we will generate (({'M': 1}, {'N' : 4}), ({'M': 1}, {'N' : 5}), ({'M': 2}, {'N' : 4}), ({'M': 2}, {'N' : 5}))",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\operator_benchmark\\benchmark_utils.py",
    "ast_data": "FunctionDef name:cross_product_configs arguments arg Call Assign For Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "validate_userinfo_signing_alg_values_supported",
    "source_code": "def validate_userinfo_signing_alg_values_supported(self):\n    validate_array_value(self, 'userinfo_signing_alg_values_supported')",
    "docstring": "OPTIONAL. JSON array containing a list of the JWS signing algorithms (alg values) [JWA] supported by the UserInfo Endpoint to encode the Claims in a JWT. The value none MAY be included.",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\discovery\\models.py",
    "ast_data": "FunctionDef name:validate_userinfo_signing_alg_values_supported arg:self arguments arg Call"
  },
  {
    "library": "matplotlib",
    "name": "set_array",
    "source_code": "def set_array(self, A):\n    if A is None:\n        self._A = None\n        return\n    A = cbook.safe_masked_invalid(A, copy=True)\n    if not np.can_cast(A.dtype, float, 'same_kind'):\n        raise TypeError(f'Image data of dtype {A.dtype} cannot be converted to float')\n    self._A = A\n    if not self.norm.scaled():\n        self._colorizer.autoscale_None(A)",
    "docstring": "Set the value array from array-like *A*. Parameters ---------- A : array-like or None The values that are mapped to colors. The base class does not make any assumptions on the dimensionality and shape of the value array *A*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colorizer.py",
    "ast_data": "FunctionDef name:set_array arg:self arg:A arguments arg arg If Compare Assign Return return:no Assign Call If Call Raise Call Assign If Call Call"
  },
  {
    "library": "tensorflow",
    "name": "sharded_filename",
    "source_code": "def sharded_filename(filename_tensor: tensor_lib.Tensor, shard: int, num_shards: tensor_lib.Tensor) -> tensor_lib.Tensor:\n    return gen_io_ops.sharded_filename(filename_tensor, shard, num_shards)",
    "docstring": "Append sharding information to a filename. Args: filename_tensor: A string tensor. shard: Integer. The shard for the filename. num_shards: An int Tensor for the number of shards. Returns: A string tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\functional_saver.py",
    "ast_data": "FunctionDef name:sharded_filename arg:filename_tensor arg:shard arg:num_shards arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_get_function_name",
    "source_code": "def _get_function_name(self):\n    if hasattr(self.func, '__name__'):\n        return self.func.__name__\n    if isinstance(self.func, partial):\n        return self.func.func.__name__\n    return f'{self.func.__class__.__name__}(...)'",
    "docstring": "Get the name display of the used in HTML representation.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_function_transformer.py",
    "ast_data": "FunctionDef name:_get_function_name arg:self arguments arg If Call Return return:yes If Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "sym_not",
    "source_code": "@onnx_impl(torch.sym_not, trace_only=True)\ndef sym_not(self: BOOL) -> BOOL:\n    return op.Not(self)",
    "docstring": "sym_not(SymBool self) -> SymBool",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_torchlib\\ops\\symops.py",
    "ast_data": "FunctionDef name:sym_not arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "cherrypy",
    "name": "get_instances",
    "source_code": "def get_instances(self):\n    return self._exceptions[:]",
    "docstring": "Return a list of seen exception instances.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\wspbus.py",
    "ast_data": "FunctionDef name:get_instances arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_get_engine_target",
    "source_code": "def _get_engine_target(self) -> ArrayLike:\n    vals = self._values\n    if isinstance(vals, StringArray):\n        return vals._ndarray\n    if isinstance(vals, ArrowExtensionArray) and self.dtype.kind in 'Mm':\n        import pyarrow as pa\n        pa_type = vals._pa_array.type\n        if pa.types.is_timestamp(pa_type):\n            vals = vals._to_datetimearray()\n            return vals._ndarray.view('i8')\n        elif pa.types.is_duration(pa_type):\n            vals = vals._to_timedeltaarray()\n            return vals._ndarray.view('i8')\n    if type(self) is Index and isinstance(self._values, ExtensionArray) and (not isinstance(self._values, BaseMaskedArray)) and (not (isinstance(self._values, ArrowExtensionArray) and is_numeric_dtype(self.dtype) and (self.dtype.kind != 'O'))):\n        return self._values.astype(object)\n    return vals",
    "docstring": "Get the ndarray or ExtensionArray that we can pass to the IndexEngine constructor.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_get_engine_target arg:self arguments arg Assign If Call Return return:yes If BoolOp Call Compare Assign If Call Assign Call Return return:yes Call If Call Assign Call Return return:yes Call If BoolOp Compare Call Call Call BoolOp Call Call Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "compute",
    "source_code": "@abc.abstractmethod\ndef compute(self, batch_values, accumulator=None):\n    pass",
    "docstring": "Compute a step in this computation, returning a new accumulator. This method computes a step of the computation described by this Combiner. If an accumulator is passed, the data in that accumulator is also used; so compute(batch_values) results in f(batch_values), while compute(batch_values, accumulator) results in merge(f(batch_values), accumulator). Args: batch_values: A list of ndarrays representing the values of the inputs for this step of the computation. accumulator: the current accumulator. Can be None. Returns: An accumulator that includes the passed batch of inputs.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_preprocessing_layer.py",
    "ast_data": "FunctionDef name:compute arg:self arg:batch_values arg:accumulator arguments arg arg arg"
  },
  {
    "library": "numpy",
    "name": "find_names",
    "source_code": "def find_names(module, names_dict):\n    patterns = ['^\\\\s\\\\s\\\\s([a-z_0-9A-Z]+)(\\\\s+-+.*)?$', '^\\\\.\\\\. (?:data|function)::\\\\s*([a-z_0-9A-Z]+)\\\\s*$']\n    if module.__name__ == 'scipy.constants':\n        patterns += ['^``([a-z_0-9A-Z]+)``']\n    patterns = [re.compile(pattern) for pattern in patterns]\n    module_name = module.__name__\n    for line in module.__doc__.splitlines():\n        res = re.search('^\\\\s*\\\\.\\\\. (?:currentmodule|module):: ([a-z0-9A-Z_.]+)\\\\s*$', line)\n        if res:\n            module_name = res.group(1)\n            continue\n        for pattern in patterns:\n            res = re.match(pattern, line)\n            if res is not None:\n                name = res.group(1)\n                entry = f'{module_name}.{name}'\n                names_dict.setdefault(module_name, set()).add(name)\n                break",
    "docstring": "Finds the occurrences of function names, special directives like data and functions and scipy constants in the docstrings of . The following patterns are searched for: * 3 spaces followed by function name, and maybe some spaces, some dashes, and an explanation; only function names listed in refguide are formatted like this (mostly, there may be some false positives * special directives, such as data and function * (scipy.constants only): quoted list The is updated by reference and accessible in calling method Parameters ---------- module : ModuleType The module, whose docstrings is to be searched names_dict : dict Dictionary which contains module name as key and a set of found function names and directives as value Returns ------- None",
    "type": "function",
    "file_path": "numpy\\tools\\refguide_check.py",
    "ast_data": "FunctionDef name:find_names arg:module arg:names_dict arguments arg arg Assign If Compare Assign Call Assign For Call Assign Call If Assign Call For Assign Call If Compare Assign Call Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "mark_as_return",
    "source_code": "def mark_as_return(self, tensor):\n    if isinstance(tensor, indexed_slices.IndexedSlices):\n        values = array_ops.identity(tensor.values)\n        indices = array_ops.identity(tensor.indices)\n        self._returned_tensors.add(indices)\n        self._returned_tensors.add(values)\n        return indexed_slices.IndexedSlices(values, indices, dense_shape=tensor.dense_shape)\n    elif isinstance(tensor, sparse_tensor.SparseTensor):\n        values = array_ops.identity(tensor.values)\n        indices = array_ops.identity(tensor.indices)\n        self._returned_tensors.add(indices)\n        self._returned_tensors.add(values)\n        return sparse_tensor.SparseTensor(indices, values, dense_shape=tensor.dense_shape)\n    elif isinstance(tensor, tensor_array_ops.TensorArray):\n        flow = array_ops.identity(tensor.flow)\n        self._returned_tensors.add(flow)\n        return tensor_array_ops.build_ta_with_new_flow(tensor, flow)\n    tensor = array_ops.identity(tensor)\n    self._returned_tensors.add(tensor)\n    return tensor",
    "docstring": "Acts like identity but marks the as a return value. This will possibly return a copy of the . Usage: Args: tensor: the to be marked Returns: a copy of the .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\auto_control_deps.py",
    "ast_data": "FunctionDef name:mark_as_return arg:self arg:tensor arguments arg arg If Call Assign Call Assign Call Call Call Return return:yes Call If Call Assign Call Assign Call Call Call Return return:yes Call If Call Assign Call Call Return return:yes Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_time_step",
    "source_code": "def _time_step(time, output_ta_t, state):\n    if in_graph_mode:\n        input_t = tuple((ta.read(time) for ta in input_ta))\n        for input_, shape in zip(input_t, inputs_got_shape):\n            input_.set_shape(shape[1:])\n    else:\n        input_t = tuple((ta[time.numpy()] for ta in input_ta))\n    input_t = nest.pack_sequence_as(structure=inputs, flat_sequence=input_t)\n    call_cell = lambda: cell(input_t, state)\n    if sequence_length is not None:\n        output, new_state = _rnn_step(time=time, sequence_length=sequence_length, min_sequence_length=min_sequence_length, max_sequence_length=max_sequence_length, zero_output=zero_output, state=state, call_cell=call_cell, state_size=state_size, skip_conditionals=True)\n    else:\n        output, new_state = call_cell()\n    output = nest.flatten(output)\n    if in_graph_mode:\n        output_ta_t = tuple((ta.write(time, out) for ta, out in zip(output_ta_t, output)))\n    else:\n        for ta, out in zip(output_ta_t, output):\n            ta[time.numpy()] = out\n    return (time + 1, output_ta_t, new_state)",
    "docstring": "Take a time step of the dynamic RNN. Args: time: int32 scalar Tensor. output_ta_t: List of s that represent the output. state: nested tuple of vector tensors that represent the state. Returns: The tuple (time + 1, output_ta_t with updated flow, new_state).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\rnn.py",
    "ast_data": "FunctionDef name:_time_step arg:time arg:output_ta_t arg:state arguments arg arg arg If Assign Call Call For Call Call Assign Call Call Assign Call Assign arguments Call If Compare Assign Call Assign Call Assign Call If Assign Call Call Call For Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "Schwefel01",
    "source_code": "class Schwefel01(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-100.0] * self.N, [100.0] * self.N))\n        self.custom_bounds = ([-4.0, 4.0], [-4.0, 4.0])\n        self.global_optimum = [[0.0 for _ in range(self.N)]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        alpha = sqrt(pi)\n        return sum(x ** 2.0) ** alpha",
    "docstring": "Schwefel 1 objective function. This class defines the Schwefel 1 [1]_ global optimization problem. This is a unimodal minimization problem defined as follows: .. math:: f_{\\text{Schwefel01}}(x) = \\left(\\sum_{i=1}^n x_i^2 \\right)^{\\alpha} Where, in this exercise, :math:. Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_S.py",
    "ast_data": "ClassDef name:Schwefel01 Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "get_sobel_kernel_3x3",
    "source_code": "def get_sobel_kernel_3x3(*, device: Optional[Device]=None, dtype: Optional[Dtype]=None) -> Tensor:\n    return tensor([[-1.0, 0.0, 1.0], [-2.0, 0.0, 2.0], [-1.0, 0.0, 1.0]], device=device, dtype=dtype)",
    "docstring": "Return a sobel kernel of 3x3.",
    "type": "function",
    "file_path": "kornia\\kornia\\filters\\kernels.py",
    "ast_data": "FunctionDef name:get_sobel_kernel_3x3 arguments arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "srs_output",
    "source_code": "def srs_output(func, argtypes):\n    func.argtypes = argtypes\n    func.restype = c_void_p\n    func.errcheck = check_srs\n    return func",
    "docstring": "Generate a ctypes prototype for the given function with the given C arguments that returns a pointer to an OGR Spatial Reference System.",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\gdal\\prototypes\\generation.py",
    "ast_data": "FunctionDef name:srs_output arg:func arg:argtypes arguments arg arg Assign Assign Assign Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "limit",
    "source_code": "def limit(self, **limits: tuple[Any, Any]) -> Plot:\n    new = self._clone()\n    new._limits.update(limits)\n    return new",
    "docstring": "Control the range of visible data. Keywords correspond to variables defined in the plot, and values are a tuple (where either can be to leave unset). Limits apply only to the axis; data outside the visible range are still used for any stat transforms and added to the plot. Behavior for non-coordinate variables is currently undefined. Examples -------- .. include:: ../docstrings/objects.Plot.limit.rst",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\plot.py",
    "ast_data": "FunctionDef name:limit arg:self arguments arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, y_tensor=None):\n    self._uuid = uuid.uuid4().hex\n    _gradient_debuggers[self._uuid] = self\n    self._gradient_tensors = {}\n    self._y_tensor = y_tensor\n    self._graph = None\n    if y_tensor:\n        self._graph = y_tensor.graph\n    self._is_active_context = False",
    "docstring": "Constructor of GradientsDebugger. Args: y_tensor: optional: the to be differentiated, i.e., the tensor on the numerator of the differentiation.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_gradients.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:y_tensor arguments arg arg Assign Call Assign Assign Assign Assign If Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_prune_invalid_ids",
    "source_code": "def _prune_invalid_ids(sparse_ids, sparse_weights):\n    is_id_valid = math_ops.greater_equal(sparse_ids.values, 0)\n    if sparse_weights is not None:\n        is_id_valid = math_ops.logical_and(is_id_valid, array_ops.ones_like(sparse_weights.values, dtype=dtypes.bool))\n    sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_id_valid)\n    if sparse_weights is not None:\n        sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_id_valid)\n    return (sparse_ids, sparse_weights)",
    "docstring": "Prune invalid IDs (< 0) from the input ids and weights.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:_prune_invalid_ids arg:sparse_ids arg:sparse_weights arguments arg arg Assign Call If Compare Assign Call Call Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "add_quant_dequant",
    "source_code": "def add_quant_dequant(module):\n    if has_no_children_ignoring_parametrizations(module) and hasattr(module, 'qconfig') and module.qconfig:\n        return QuantWrapper(module)\n    for name, child in module.named_children():\n        module._modules[name] = add_quant_dequant(child)\n    return module",
    "docstring": "Wrap the leaf child module in QuantWrapper if it has a valid qconfig Note that this function will modify the children of module inplace and it can return a new module which wraps the input module as well. Args: module: input module with qconfig attributes for all the leaf modules that we want to quantize Return: Either the inplace modified module with submodules wrapped in based on qconfig or a new module which wraps the input module, the latter case only happens when the input module is a leaf module and we want to quantize it.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantize.py",
    "ast_data": "FunctionDef name:add_quant_dequant arg:module arguments arg If BoolOp Call Call Return return:yes Call For Call Assign Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "__init__",
    "source_code": "def __init__(self, uri: str, *, feed_options: dict[str, Any] | None=None):\n    pass",
    "docstring": "Initialize the storage with the parameters given in the URI and the feed-specific options (see :setting:)",
    "type": "method",
    "file_path": "scrapy\\scrapy\\extensions\\feedexport.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:uri arguments arg arg arg"
  },
  {
    "library": "tensorflow",
    "name": "row_splits",
    "source_code": "@property\ndef row_splits(self):\n    return self._row_partition.row_splits()",
    "docstring": "The row-split indices for this ragged tensor's . specifies where the values for each row begin and end in . In particular, the values for row are stored in the slice . Returns: A 1-D integer with shape . The returned tensor is non-empty, and is sorted in ascending order. is zero, and is equal to . #### Example: >>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []]) >>> print(rt.row_splits) # indices of row splits in rt.values tf.Tensor([0 4 4 7 8 8], shape=(6,), dtype=int64)",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:row_splits arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_set_trainable_state",
    "source_code": "def _set_trainable_state(self, trainable_state):\n    for layer in self._flatten_layers():\n        if layer in trainable_state:\n            layer.trainable = trainable_state[layer]",
    "docstring": "Set state for each sublayer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:_set_trainable_state arg:self arg:trainable_state arguments arg arg For Call If Compare Assign"
  },
  {
    "library": "numpy",
    "name": "set_options",
    "source_code": "def set_options(self, **options):\n    for key, value in options.items():\n        if key in self.options:\n            self.options[key] = value\n        else:\n            raise ValueError('Unknown option: ' + key)",
    "docstring": "Configure Configuration instance. The following options are available: - ignore_setup_xxx_py - assume_default_configuration - delegate_options_to_subpackages - quiet",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\misc_util.py",
    "ast_data": "FunctionDef name:set_options arg:self arguments arg arg For Call If Compare Assign Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "conv",
    "source_code": "def conv(lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation, dimension_numbers, feature_group_count=1, precision_config=None, preferred_element_type=None, name=None, use_v2=False, batch_group_count=1):\n    precision_config_proto = ''\n    if precision_config:\n        precision_config_proto = precision_config.SerializeToString()\n    needs_v2 = preferred_element_type or lhs.dtype != rhs.dtype or batch_group_count > 1\n    if preferred_element_type is None:\n        preferred_element_type = np_utils.result_type(lhs.dtype, rhs.dtype)\n    if needs_v2 or use_v2:\n        return gen_xla_ops.xla_conv_v2(lhs, rhs, window_strides=window_strides, padding=padding, lhs_dilation=lhs_dilation, rhs_dilation=rhs_dilation, feature_group_count=feature_group_count, batch_group_count=batch_group_count, dimension_numbers=dimension_numbers.SerializeToString(), precision_config=precision_config_proto, preferred_element_type=preferred_element_type, name=name)\n    return gen_xla_ops.xla_conv(lhs, rhs, window_strides=window_strides, padding=padding, lhs_dilation=lhs_dilation, rhs_dilation=rhs_dilation, feature_group_count=feature_group_count, dimension_numbers=dimension_numbers.SerializeToString(), precision_config=precision_config_proto, name=name)",
    "docstring": "Wraps the XLA ConvGeneralDilated operator. ConvGeneralDilated is the most general form of XLA convolution and is documented at Args: lhs: the input tensor rhs: the kernel tensor window_strides: the inter-window strides padding: the padding to apply at the start and end of each input dimensions lhs_dilation: dilation to apply between input elements rhs_dilation: dilation to apply between kernel elements dimension_numbers: a proto. feature_group_count: number of feature groups for grouped convolution. precision_config: a proto. preferred_element_type: the result . name: an optional name for the operator. use_v2: an optional request to use the XlaConvV2 op even if not necessary. batch_group_count: number of batch groups or grouped filters. Returns: A tensor representing the output of the convolution.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\tf2xla\\python\\xla.py",
    "ast_data": "FunctionDef name:conv arg:lhs arg:rhs arg:window_strides arg:padding arg:lhs_dilation arg:rhs_dilation arg:dimension_numbers arg:feature_group_count arg:precision_config arg:preferred_element_type arg:name arg:use_v2 arg:batch_group_count arguments arg arg arg arg arg arg arg arg arg arg arg arg arg Assign If Assign Call Assign BoolOp Compare Compare If Compare Assign Call If BoolOp Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_name_and_module",
    "source_code": "def _get_name_and_module(full_name):\n    name_segments = full_name.split('.')\n    return ('.'.join(name_segments[:-1]), name_segments[-1])",
    "docstring": "Split full_name into module and short name. Args: full_name: Full name of symbol that includes module. Returns: Full module name and short symbol name.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\api\\generator\\create_python_api.py",
    "ast_data": "FunctionDef name:_get_name_and_module arg:full_name arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "copy_to_mesh",
    "source_code": "@tf_export('experimental.dtensor.copy_to_mesh', v1=[])\ndef copy_to_mesh(tensor: Any, layout: layout_lib.Layout, source_layout: Optional[layout_lib.Layout]=None) -> tensor_lib.Tensor:\n    del source_layout\n    return relayout(tensor, layout)",
    "docstring": "Copies a tf.Tensor onto the DTensor device with the given layout. Copies a regular tf.Tensor onto the DTensor device. Use the mesh attached to as target mesh. This method currently only supports replicated layouts, or one-to-one copies for sharded layouts. Args: tensor: A regular tf.Tensor to be copied as a DTensor. layout: Target layout (and mesh) for the result DTensor. source_layout: Source layout of the tensor before copy. This argument is deprecated. Returns: A DTensor on the DTensor device with the given layout.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\api.py",
    "ast_data": "FunctionDef name:copy_to_mesh arg:tensor arg:layout arg:source_layout arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "imag",
    "source_code": "@tf_export('math.imag', v1=['math.imag', 'imag'])\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints('imag')\ndef imag(input, name=None):\n    with ops.name_scope(name, 'Imag', [input]) as name:\n        input = ops.convert_to_tensor(input, name='input')\n        if input.dtype.is_complex:\n            return gen_math_ops.imag(input, Tout=input.dtype.real_dtype, name=name)\n        else:\n            return array_ops.zeros_like(input)",
    "docstring": "Returns the imaginary part of a complex (or real) tensor. Given a tensor , this operation returns a tensor of type that is the imaginary part of each element in considered as a complex number. If is real, a tensor of all zeros is returned. For example: Args: input: A . Must be one of the following types: , , , . name: A name for the operation (optional). Returns: A of type or .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:imag arg:input arg:name arguments arg arg With Call Assign Call If Return return:yes Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "converged",
    "source_code": "def converged(curr_modules, prev_modules, threshold=0.0001):\n    if curr_modules.keys() != prev_modules.keys():\n        raise ValueError('The keys to the given mappings must have the same set of names of modules')\n    summed_norms = torch.tensor(0.0)\n    if None in prev_modules.values():\n        return False\n    for name in curr_modules.keys():\n        curr_weight = get_module_weight(curr_modules[name])\n        prev_weight = get_module_weight(prev_modules[name])\n        difference = curr_weight.sub(prev_weight)\n        summed_norms += torch.norm(difference)\n    return bool(summed_norms < threshold)",
    "docstring": "Test whether modules are converged to a specified threshold. Tests for the summed norm of the differences between each set of modules being less than the given threshold Takes two dictionaries mapping names to modules, the set of names for each dictionary should be the same, looping over the set of names, for each name take the difference between the associated modules in each dictionary",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\_equalize.py",
    "ast_data": "FunctionDef name:converged arg:curr_modules arg:prev_modules arg:threshold arguments arg arg arg If Compare Call Call Raise Call Assign Call If Compare Call Return return:yes For Call Assign Call Assign Call Assign Call Call Return return:yes Call Compare"
  },
  {
    "library": "pandas",
    "name": "_sanitize_str_dtypes",
    "source_code": "def _sanitize_str_dtypes(result: np.ndarray, data, dtype: np.dtype | None, copy: bool) -> np.ndarray:\n    if issubclass(result.dtype.type, str):\n        if not lib.is_scalar(data):\n            if not np.all(isna(data)):\n                data = np.asarray(data, dtype=dtype)\n            if not copy:\n                result = np.asarray(data, dtype=object)\n            else:\n                result = np.array(data, dtype=object, copy=copy)\n    return result",
    "docstring": "Ensure we have a dtype that is supported by pandas.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\construction.py",
    "ast_data": "FunctionDef name:_sanitize_str_dtypes arg:result arg:data arg:dtype arg:copy arguments arg arg arg arg If Call If Call If Call Call Assign Call If Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_global_batch_size",
    "source_code": "@property\ndef _global_batch_size(self):\n    return True",
    "docstring": "and use global batch size. assumes per-replica batching. Returns: Boolean.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\collective_all_reduce_strategy.py",
    "ast_data": "FunctionDef name:_global_batch_size arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "mark_source_temp",
    "source_code": "def mark_source_temp(self, source: Source) -> None:\n    if source not in self.tempvars:\n        self.tempvars[source] = None",
    "docstring": "Mark a source as a temp variable, so that it can be reused.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\codegen.py",
    "ast_data": "FunctionDef name:mark_source_temp arg:self arg:source arguments arg arg If Compare Assign"
  },
  {
    "library": "tensorflow",
    "name": "keyword_args_only",
    "source_code": "def keyword_args_only(func):\n    decorator_utils.validate_callable(func, 'keyword_args_only')\n\n    @functools.wraps(func)\n    def new_func(*args, **kwargs):\n        if args:\n            raise ValueError(f'The function {func.__name__} only accepts keyword arguments. Do not pass positional arguments. Received the following positional arguments: {args}')\n        return func(**kwargs)\n    return new_func",
    "docstring": "Decorator for marking specific function accepting keyword args only. This decorator raises a if the input is called with any non-keyword args. This prevents the caller from providing the arguments in wrong order. Args: func: The function or method needed to be decorated. Returns: Decorated function or method. Raises: ValueError: If is not callable.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\keyword_args.py",
    "ast_data": "FunctionDef name:keyword_args_only arg:func arguments arg Call FunctionDef name:new_func arguments arg arg If Raise Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "compile_path",
    "source_code": "def compile_path(self, path: Path, top_package_path: Path):\n    if path.is_dir():\n        self.compile_package(path, top_package_path)\n    else:\n        self.compile_file(path, top_package_path)",
    "docstring": "Entry point for compiling a Path object.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\_freeze.py",
    "ast_data": "FunctionDef name:compile_path arg:self arg:path arg:top_package_path arguments arg arg arg If Call Call Call"
  },
  {
    "library": "scipy",
    "name": "__call__",
    "source_code": "def __call__(self, dim=None, seed=None):\n    return unitary_group_frozen(dim, seed=seed)",
    "docstring": "Create a frozen (U(N)) n-dimensional unitary matrix distribution. See for more information.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:dim arg:seed arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "has_resolvers",
    "source_code": "@property\ndef has_resolvers(self) -> bool:\n    return bool(len(self.resolvers))",
    "docstring": "Return whether we have any extra scope. For example, DataFrames pass Their columns as resolvers during calls to ``. Returns ------- hr : bool",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\computation\\scope.py",
    "ast_data": "FunctionDef name:has_resolvers arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "DistributeOptions",
    "source_code": "@tf_export('data.experimental.DistributeOptions')\nclass DistributeOptions(options_lib.OptionsBase):\n    auto_shard_policy = options_lib.create_option(name='auto_shard_policy', ty=AutoShardPolicy, docstring='The type of sharding to use. See `tf.data.experimental.AutoShardPolicy` for additional information.', default_factory=lambda: AutoShardPolicy.AUTO)\n    num_devices = options_lib.create_option(name='num_devices', ty=int, docstring='The number of devices attached to this input pipeline. This will be automatically set by `MultiDeviceIterator`.')\n\n    def _to_proto(self):\n        pb = dataset_options_pb2.DistributeOptions()\n        pb.auto_shard_policy = AutoShardPolicy._to_proto(self.auto_shard_policy)\n        if self.num_devices is not None:\n            pb.num_devices = self.num_devices\n        return pb\n\n    def _from_proto(self, pb):\n        self.auto_shard_policy = AutoShardPolicy._from_proto(pb.auto_shard_policy)\n        if pb.WhichOneof('optional_num_devices') is not None:\n            self.num_devices = pb.num_devices",
    "docstring": "Represents options for distributed data processing. You can set the distribution options of a dataset through the property of ; the property is an instance of .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\options.py",
    "ast_data": "ClassDef name:DistributeOptions Assign Call arguments Assign Call FunctionDef name:_to_proto arg:self arguments arg Assign Call Assign Call If Compare Assign Return return:yes FunctionDef name:_from_proto arg:self arg:pb arguments arg arg Assign Call If Compare Call Assign Call"
  },
  {
    "library": "pandas",
    "name": "subtype",
    "source_code": "@property\ndef subtype(self):\n    return self._subtype",
    "docstring": "The dtype of the Interval bounds. See Also -------- IntervalDtype: An ExtensionDtype for Interval data. Examples -------- >>> dtype = pd.IntervalDtype(subtype=\"int64\", closed=\"both\") >>> dtype.subtype dtype('int64')",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py",
    "ast_data": "FunctionDef name:subtype arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "undistort_points_affine",
    "source_code": "def undistort_points_affine(distorted_points_in_camera: Tensor, params: Tensor) -> Tensor:\n    KORNIA_CHECK_SHAPE(distorted_points_in_camera, ['*', '2'])\n    KORNIA_CHECK_SHAPE(params, ['*', '4'])\n    u = distorted_points_in_camera[..., 0]\n    v = distorted_points_in_camera[..., 1]\n    fx, fy = (params[..., 0], params[..., 1])\n    cx, cy = (params[..., 2], params[..., 3])\n    x = (u - cx) / fx\n    y = (v - cy) / fy\n    return ops.stack([x, y], dim=-1)",
    "docstring": "Undistort one or more points from the camera frame into the canonical z=1 plane. .. math:: \\begin{bmatrix} x \\\\ y \\end{bmatrix} = \\begin{bmatrix} u \\\\ v \\end{bmatrix} - \\begin{bmatrix} c_x \\\\ c_y \\end{bmatrix} \\begin{bmatrix} f_x & 0 \\\\ 0 & f_y \\end{bmatrix}^{-1} Args: distorted_points_in_camera: Tensor representing the points to undistort with shape (..., 2). params: Tensor representing the parameters of the affine distortion model with shape (..., 4). Returns: Tensor representing the undistorted points with shape (..., 2). Example: >>> points = torch.tensor([319.5, 239.5]) # center of a 640x480 image >>> params = torch.tensor([600., 600., 319.5, 239.5]) >>> undistort_points_affine(points, params) tensor([0., 0.])",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\camera\\distortion_affine.py",
    "ast_data": "FunctionDef name:undistort_points_affine arg:distorted_points_in_camera arg:params arguments arg arg Call Call Assign Assign Assign Assign Assign Assign Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "_format_rfc_target",
    "source_code": "def _format_rfc_target(target: str, /) -> str:\n    number, _, anchor = target.partition('#')\n    if anchor:\n        first, _, remaining = anchor.partition('-')\n        if first in {'appendix', 'page', 'section'}:\n            if remaining:\n                return f'RFC {number} {first.title()} {remaining}'\n            return f'RFC {number} {first.title()}'\n    return f'RFC {target}'",
    "docstring": "Takes an RFC number with an optional anchor (like ``) and attempts to produce a human-friendly title for it. We have a set of known anchors that we format nicely, everything else we leave alone.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\roles.py",
    "ast_data": "FunctionDef name:_format_rfc_target arguments arg Assign Call If Assign Call If Compare If Return return:yes Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "Tanh",
    "source_code": "class Tanh(Module):\n\n    def forward(self, input: Tensor) -> Tensor:\n        return torch.tanh(input)",
    "docstring": "Applies the Hyperbolic Tangent (Tanh) function element-wise. Tanh is defined as: .. math:: \\text{Tanh}(x) = \\tanh(x) = \\frac{\\exp(x) - \\exp(-x)} {\\exp(x) + \\exp(-x)} Shape: - Input: :math:, where :math: means any number of dimensions. - Output: :math:, same shape as the input. .. image:: ../scripts/activation_images/Tanh.png Examples:: >>> m = nn.Tanh() >>> input = torch.randn(2) >>> output = m(input)",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\activation.py",
    "ast_data": "ClassDef name:Tanh FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "add_op_consumer",
    "source_code": "def add_op_consumer(self, src_op_name, src_slot, dst_op_name, dst_slot):\n    self._op_consumers[src_op_name].append((src_slot, dst_op_name, dst_slot))",
    "docstring": "Add a consuming op for this op. Args: src_op_name: Name of the op of which the output tensor is being consumed. src_slot: 0-based output slot of the op being consumed. dst_op_name: Name of the consuming op (e.g., \"Conv2D_3/BiasAdd\") dst_slot: 0-based input slot of the consuming op that receives the tensor from this op.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py",
    "ast_data": "FunctionDef name:add_op_consumer arg:self arg:src_op_name arg:src_slot arg:dst_op_name arg:dst_slot arguments arg arg arg arg arg Call"
  },
  {
    "library": "kornia",
    "name": "to_jax",
    "source_code": "def to_jax() -> ModuleType:\n    return ivy.transpile(kornia, source='torch', target='jax')",
    "docstring": "Convert Kornia to JAX. Transpiles the Kornia library to JAX using [ivy]( The transpilation process occurs lazily, so the transpilation on a given kornia function/class will only occur when it's called or instantiated for the first time. This will make any functions/classes slow when being used for the first time, but any subsequent uses should be as fast as expected. Return: The Kornia library transpiled to JAX Example: .. highlight:: python .. code-block:: python import kornia jax_kornia = kornia.to_jax() import jax input = jax.random.normal(jax.random.key(42), shape=(2, 3, 4, 5)) gray = jax_kornia.color.gray.rgb_to_grayscale(input)",
    "type": "function",
    "file_path": "kornia\\kornia\\transpiler\\transpiler.py",
    "ast_data": "FunctionDef name:to_jax arguments Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_benchmarkFetch",
    "source_code": "def _benchmarkFetch(self, name, target, size, iters):\n    times = []\n    with ops.Graph().as_default():\n        v = variables.Variable(random_ops.random_normal([size]))\n        with session.Session(target) as sess:\n            sess.run(v.initializer)\n            sess.run(v)\n            for _ in range(iters):\n                start_time = time.time()\n                sess.run(v)\n                end_time = time.time()\n                times.append(end_time - start_time)\n    print('%s %d %f' % (name, size, np.median(times)))\n    self.report_benchmark(iters=1, wall_time=np.median(times), name=name)",
    "docstring": "Runs a microbenchmark to measure the cost of fetching a tensor. Reports the median cost of fetching a tensor of * bytes. Args: name: A human-readable name for logging the output. target: The session target to use for the benchmark. size: The number of floating-point numbers to be fetched. iters: The number of iterations to perform.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\session_benchmark.py",
    "ast_data": "FunctionDef name:_benchmarkFetch arg:self arg:name arg:target arg:size arg:iters arguments arg arg arg arg arg Assign With Call Call Assign Call Call With Call Call Call For Call Assign Call Call Assign Call Call Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_ha_for_angle",
    "source_code": "def _ha_for_angle(self, angle):\n    anchor_at_bottom = self.get_verticalalignment() == 'bottom'\n    if angle <= 10 or 85 <= angle <= 95 or 350 <= angle or (170 <= angle <= 190) or (265 <= angle <= 275):\n        return 'center'\n    elif 10 < angle < 85 or 190 < angle < 265:\n        return 'left' if anchor_at_bottom else 'right'\n    return 'right' if anchor_at_bottom else 'left'",
    "docstring": "Determines horizontal alignment ('ha') for rotation_mode \"xtick\" based on the angle of rotation in degrees and the vertical alignment.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:_ha_for_angle arg:self arg:angle arguments arg arg Assign Compare Call If BoolOp Compare Compare Compare Compare Compare Return return:yes If BoolOp Compare Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "CurrentModuleFilter",
    "source_code": "class CurrentModuleFilter(StackTraceFilter):\n\n    def __init__(self):\n        super().__init__()\n        filter_filename = None\n        outer_f = None\n        f = inspect.currentframe()\n        try:\n            if f is not None:\n                outer_f = f.f_back\n                if outer_f is not None:\n                    filter_filename = inspect.getfile(outer_f)\n            self._filename = filter_filename\n            self._cached_set = None\n        finally:\n            del f\n            del outer_f\n\n    def get_filtered_filenames(self):\n        if self._cached_set is not None:\n            return self._cached_set\n        filtered_filenames = frozenset((self._filename,))\n        if self.parent is not None:\n            filtered_filenames |= self.parent.get_filtered_filenames()\n        self._cached_set = filtered_filenames\n        return filtered_filenames",
    "docstring": "Filters stack frames from the module where this is used (best effort).",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\util\\tf_stack.py",
    "ast_data": "ClassDef name:CurrentModuleFilter FunctionDef name:__init__ arg:self arguments arg Call Call Assign Assign Assign Call Try If Compare Assign If Compare Assign Call Assign Assign FunctionDef name:get_filtered_filenames arg:self arguments arg If Compare Return return:yes Assign Call If Compare Call Assign Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "from_filename",
    "source_code": "def from_filename(self, filename: str) -> type[Response]:\n    mimetype, encoding = self.mimetypes.guess_type(filename)\n    if mimetype and (not encoding):\n        return self.from_mimetype(mimetype)\n    return Response",
    "docstring": "Return the most appropriate Response class from a file name",
    "type": "method",
    "file_path": "scrapy\\scrapy\\responsetypes.py",
    "ast_data": "FunctionDef name:from_filename arg:self arg:filename arguments arg arg Assign Call If BoolOp Return return:yes Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_normplot",
    "source_code": "def _normplot(method, x, la, lb, plot=None, N=80):\n    if method == 'boxcox':\n        title = 'Box-Cox Normality Plot'\n        transform_func = boxcox\n    else:\n        title = 'Yeo-Johnson Normality Plot'\n        transform_func = yeojohnson\n    x = np.asarray(x)\n    if x.size == 0:\n        return x\n    if lb <= la:\n        raise ValueError('`lb` has to be larger than `la`.')\n    if method == 'boxcox' and np.any(x <= 0):\n        raise ValueError('Data must be positive.')\n    lmbdas = np.linspace(la, lb, num=N)\n    ppcc = lmbdas * 0.0\n    for i, val in enumerate(lmbdas):\n        z = transform_func(x, lmbda=val)\n        _, (_, _, r) = probplot(z, dist='norm', fit=True)\n        ppcc[i] = r\n    if plot is not None:\n        plot.plot(lmbdas, ppcc, 'x')\n        _add_axis_labels_title(plot, xlabel='$\\\\lambda$', ylabel='Prob Plot Corr. Coef.', title=title)\n    return (lmbdas, ppcc)",
    "docstring": "Compute parameters for a Box-Cox or Yeo-Johnson normality plot, optionally show it. See or for details.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_morestats.py",
    "ast_data": "FunctionDef name:_normplot arg:method arg:x arg:la arg:lb arg:plot arg:N arguments arg arg arg arg arg arg If Compare Assign Assign Assign Assign Assign Call If Compare Return return:yes If Compare Raise Call If BoolOp Compare Call Compare Raise Call Assign Call Assign For Call Assign Call Assign Call Assign If Compare Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_categorical_column_weighted",
    "source_code": "def is_categorical_column_weighted(self):\n    if isinstance(self.categorical_column, (fc._WeightedCategoricalColumn, fc_lib.WeightedCategoricalColumn)):\n        return True\n    return False",
    "docstring": "Check if the categorical column of the embedding column is weighted.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\feature_column.py",
    "ast_data": "FunctionDef name:is_categorical_column_weighted arg:self arguments arg If Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__exit__",
    "source_code": "def __exit__(self, unused_type, unused_value, unused_traceback):\n    self.close()",
    "docstring": "Make usable with \"with\" statement.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\writer.py",
    "ast_data": "FunctionDef name:__exit__ arg:self arg:unused_type arg:unused_value arg:unused_traceback arguments arg arg arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "RemoveNoneInputStep",
    "source_code": "class RemoveNoneInputStep(InputAdaptStep):\n\n    def apply(self, model_args: Sequence[Any], model_kwargs: Mapping[str, Any], model: torch.nn.Module | Callable | torch_export.ExportedProgram | None=None) -> tuple[Sequence[Any], Mapping[str, Any]]:\n        assert not model_kwargs\n        return (tuple((arg for arg in model_args if arg is not None)), {})",
    "docstring": "Remove from arguments. This adapt step assumes `None` inside nested collections.",
    "type": "class",
    "file_path": "pytorch\\torch\\onnx\\_internal\\io_adapter.py",
    "ast_data": "ClassDef name:RemoveNoneInputStep FunctionDef name:apply arg:self arg:model_args arg:model_kwargs arg:model arguments arg arg arg arg Return return:yes Call Compare"
  },
  {
    "library": "pytorch",
    "name": "_module_wrap_policy",
    "source_code": "def _module_wrap_policy(module: nn.Module, recurse: bool, nonwrapped_numel: int, module_classes: set[type[nn.Module]]) -> bool:\n    if recurse:\n        return True\n    return isinstance(module, tuple(module_classes))",
    "docstring": "This auto wrap policy wraps every module that is an instance of any type in ``.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\wrap.py",
    "ast_data": "FunctionDef name:_module_wrap_policy arg:module arg:recurse arg:nonwrapped_numel arg:module_classes arguments arg arg arg arg If Return return:yes Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_gen_param_group_key",
    "source_code": "def _gen_param_group_key(param_keys: list[str]) -> str:\n    return '/'.join(sorted(param_keys))",
    "docstring": "Concatenate all param keys as a unique indentifier for one param group.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\optim\\named_optimizer.py",
    "ast_data": "FunctionDef name:_gen_param_group_key arg:param_keys arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_smallest_integer_by_dtype",
    "source_code": "def _smallest_integer_by_dtype(dt):\n    if not _is_known_dtype(dt):\n        raise TypeError('Unrecognized dtype: {}'.format(dt.name))\n    if _is_known_unsigned_by_dtype(dt):\n        return 0\n    return -1 * _largest_integer_by_dtype(dt)",
    "docstring": "Helper returning the smallest integer exactly representable by dtype.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\util.py",
    "ast_data": "FunctionDef name:_smallest_integer_by_dtype arg:dt arguments arg If Call Raise Call Call If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "validate_inclusive",
    "source_code": "def validate_inclusive(inclusive: str | None) -> tuple[bool, bool]:\n    left_right_inclusive: tuple[bool, bool] | None = None\n    if isinstance(inclusive, str):\n        left_right_inclusive = {'both': (True, True), 'left': (True, False), 'right': (False, True), 'neither': (False, False)}.get(inclusive)\n    if left_right_inclusive is None:\n        raise ValueError(\"Inclusive has to be either 'both', 'neither', 'left' or 'right'\")\n    return left_right_inclusive",
    "docstring": "Check that the argument is among {\"both\", \"neither\", \"left\", \"right\"}. Parameters ---------- inclusive : {\"both\", \"neither\", \"left\", \"right\"} Returns ------- left_right_inclusive : tuple[bool, bool] Raises ------ ValueError : if argument is not among valid values",
    "type": "function",
    "file_path": "pandas\\pandas\\util\\_validators.py",
    "ast_data": "FunctionDef name:validate_inclusive arg:inclusive arguments arg If Call Assign Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "transposed",
    "source_code": "def transposed(self):\n    return self.resampled((None, None), transposed=True)",
    "docstring": "Transposes the colormap by swapping the order of the axis",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:transposed arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "set_shard_dimensions",
    "source_code": "def set_shard_dimensions(self, shard_dimensions):\n    if len(shard_dimensions) != self.number_of_tuple_elements:\n        raise ValueError(f'shard_dimensions is {str(shard_dimensions)}, but must be a list of length {self.number_of_tuple_elements}')\n    for policy, dimension in zip(self._sharding_policies, shard_dimensions):\n        policy.set_shard_dimension(dimension)\n    self._validate()",
    "docstring": "Sets the shard_dimension of each element of the queue. shard_dimensions must be a list of length self.number_of_tuple_elements, and each element must be convertible to a Dimension compatible with self.tuple_shapes. Args: shard_dimensions: the dimensions of each queue element. Raises: ValueError: if shard_dimensions is not of length self.number_of_tuple_elements; or an element of shard_dimensions cannot be converted to a Dimension; or an element of shard_dimensions is a Dimension that is out of range for the corresponding tuple element shape.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_feed.py",
    "ast_data": "FunctionDef name:set_shard_dimensions arg:self arg:shard_dimensions arguments arg arg If Compare Call Raise Call Call For Call Call Call"
  },
  {
    "library": "numpy",
    "name": "masked_outside",
    "source_code": "def masked_outside(x, v1, v2, copy=True):\n    if v2 < v1:\n        v1, v2 = (v2, v1)\n    xf = filled(x)\n    condition = (xf < v1) | (xf > v2)\n    return masked_where(condition, x, copy=copy)",
    "docstring": "Mask an array outside a given interval. Shortcut to `conditionxv1v2xv1v2` doesn't matter. >>> ma.masked_outside(x, 0.3, -0.3) masked_array(data=[--, --, 0.01, 0.2, --, --], mask=[ True, True, False, False, True, True], fill_value=1e+20)",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:masked_outside arg:x arg:v1 arg:v2 arg:copy arguments arg arg arg arg If Compare Assign Assign Call Assign Compare Compare Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_box_aspect",
    "source_code": "def set_box_aspect(self, aspect, *, zoom=1):\n    if zoom <= 0:\n        raise ValueError(f'Argument zoom = {zoom} must be > 0')\n    if aspect is None:\n        aspect = np.asarray((4, 4, 3), dtype=float)\n    else:\n        aspect = np.asarray(aspect, dtype=float)\n        _api.check_shape((3,), aspect=aspect)\n    aspect *= 1.8294640721620434 * 25 / 24 * zoom / np.linalg.norm(aspect)\n    self._box_aspect = self._roll_to_vertical(aspect, reverse=True)\n    self.stale = True",
    "docstring": "Set the Axes box aspect. The box aspect is the ratio of height to width in display units for each face of the box when viewed perpendicular to that face. This is not to be confused with the data aspect (see ). The default ratios are 4:4:3 (x:y:z). To simulate having equal aspect in data space, set the box aspect to match your data range in each dimension. *zoom* controls the overall size of the Axes3D in the figure. Parameters ---------- aspect : 3-tuple of floats or None Changes the physical dimensions of the Axes3D, such that the ratio of the axis lengths in display units is x:y:z. If None, defaults to (4, 4, 3). zoom : float, default: 1 Control overall size of the Axes3D in the figure. Must be > 0.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:set_box_aspect arg:self arg:aspect arguments arg arg arg If Compare Raise Call If Compare Assign Call Assign Call Call Call Assign Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "new_symbol",
    "source_code": "def new_symbol(self, name_root, reserved_locals):\n    all_reserved_locals = set()\n    for s in reserved_locals:\n        if isinstance(s, qual_names.QN):\n            all_reserved_locals.update(s.qn)\n        elif isinstance(s, str):\n            all_reserved_locals.add(s)\n        else:\n            raise ValueError('Unexpected symbol type \"%s\"' % type(s))\n    pieces = name_root.split('_')\n    if pieces[-1].isdigit():\n        name_root = '_'.join(pieces[:-1])\n        n = int(pieces[-1])\n    else:\n        n = 0\n    new_name = name_root\n    while new_name in self.global_namespace or new_name in all_reserved_locals or new_name in self.generated_names:\n        n += 1\n        new_name = '%s_%d' % (name_root, n)\n    self.generated_names.add(new_name)\n    return new_name",
    "docstring": "See control_flow.SymbolNamer.new_symbol.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\naming.py",
    "ast_data": "FunctionDef name:new_symbol arg:self arg:name_root arg:reserved_locals arguments arg arg arg Assign Call For If Call Call If Call Call Raise Call Call Assign Call If Call Assign Call Assign Call Assign Assign While BoolOp Compare Compare Compare Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_box_func",
    "source_code": "def _box_func(self, x):\n    return x",
    "docstring": "Wrap numpy type in our dtype.type if necessary.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\_mixins.py",
    "ast_data": "FunctionDef name:_box_func arg:self arg:x arguments arg arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "IndexLocator",
    "source_code": "class IndexLocator(Locator):\n\n    def __init__(self, base, offset):\n        self._base = base\n        self.offset = offset\n\n    def set_params(self, base=None, offset=None):\n        if base is not None:\n            self._base = base\n        if offset is not None:\n            self.offset = offset\n\n    def __call__(self):\n        dmin, dmax = self.axis.get_data_interval()\n        return self.tick_values(dmin, dmax)\n\n    def tick_values(self, vmin, vmax):\n        return self.raise_if_exceeds(np.arange(vmin + self.offset, vmax + 1, self._base))",
    "docstring": "Place ticks at every nth point plotted. IndexLocator assumes index plotting; i.e., that the ticks are placed at integer values in the range between 0 and len(data) inclusive.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "ClassDef name:IndexLocator FunctionDef name:__init__ arg:self arg:base arg:offset arguments arg arg arg Assign Assign FunctionDef name:set_params arg:self arg:base arg:offset arguments arg arg arg If Compare Assign If Compare Assign FunctionDef name:__call__ arg:self arguments arg Assign Call Return return:yes Call FunctionDef name:tick_values arg:self arg:vmin arg:vmax arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "_get_postgis_func",
    "source_code": "def _get_postgis_func(self, func):\n    with self.connection.temporary_connection() as cursor:\n        cursor.execute('SELECT %s()' % func)\n        return cursor.fetchone()[0]",
    "docstring": "Helper routine for calling PostGIS functions and returning their result.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\postgis\\operations.py",
    "ast_data": "FunctionDef name:_get_postgis_func arg:self arg:func arguments arg arg With Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_center",
    "source_code": "def set_center(self, xy):\n    self._center = xy\n    self.stale = True",
    "docstring": "Set the center of the ellipse. Parameters ---------- xy : (float, float)",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:set_center arg:self arg:xy arguments arg arg Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_flat_signature_summary",
    "source_code": "def _flat_signature_summary(self):\n    assert self._arg_keywords is not None\n    assert self._num_positional_args is not None\n    arg_names = self._arg_keywords\n    if self._num_positional_args > len(arg_names):\n        arg_names.extend(('<arg{}>'.format(i + 1) for i in range(len(arg_names), self._num_positional_args)))\n    return f'{self._func_graph.name}({', '.join(arg_names)})'",
    "docstring": "Returns a string summarizing this function's flat signature.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py",
    "ast_data": "FunctionDef name:_flat_signature_summary arg:self arguments arg Compare Compare Assign If Compare Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "DICT_KEYS_MATCH",
    "source_code": "def DICT_KEYS_MATCH(self, guard):\n    ref = self.arg_ref(guard)\n    value = self.get(guard.name)\n    if value is torch.utils._pytree.SUPPORTED_NODES:\n        self.DICT_VERSION(guard)\n        return\n    self.SEQUENCE_LENGTH(guard)\n    code = []\n    code.append(f'list(dict.keys({ref})) == {list(builtin_dict_keys(value))!r}')\n    self._set_guard_export_info(guard, code)\n    if self.requires_key_order_guarding(guard.originating_source):\n        self.guard_on_dict_keys_and_order(value, guard)\n    else:\n        self.guard_on_dict_keys_and_ignore_order(value, guard)",
    "docstring": "Insert guard to check that the keys of a dict are same",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\guards.py",
    "ast_data": "FunctionDef name:DICT_KEYS_MATCH arg:self arg:guard arguments arg arg Assign Call Assign Call If Compare Call Return return:no Call Assign Call Call Call Call If Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_c_function",
    "source_code": "def get_c_function(name):\n    return context().get_c_function(name)",
    "docstring": "Get a C API TF_Function from the context.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:get_c_function arg:name arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "probs",
    "source_code": "@property\ndef probs(self):\n    return self._probs",
    "docstring": "Probability of drawing a in that coordinate.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\multinomial.py",
    "ast_data": "FunctionDef name:probs arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "wait",
    "source_code": "def wait(self, keys, override_timeout: Optional[datetime.timedelta]=None):\n    b64_keys = [self.prefix + self._encode(key) for key in keys]\n    kvs = self._try_wait_get(b64_keys, override_timeout)\n    if kvs is None:\n        raise LookupError('Timeout while waiting for keys in EtcdStore')",
    "docstring": "Wait until all of the keys are published, or until timeout. Raises: LookupError - if timeout occurs",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\etcd_store.py",
    "ast_data": "FunctionDef name:wait arg:self arg:keys arg:override_timeout arguments arg arg arg Assign Call Assign Call If Compare Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "is_compatible_with",
    "source_code": "def is_compatible_with(self, spec_or_tensor):\n    return super(TensorSpec, self).is_compatible_with(spec_or_tensor)",
    "docstring": "Returns True if spec_or_tensor is compatible with this TensorSpec. Two tensors are considered compatible if they have the same dtype and their shapes are compatible (see ). Args: spec_or_tensor: A tf.TensorSpec or a tf.Tensor Returns: True if spec_or_tensor is compatible with self.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor.py",
    "ast_data": "FunctionDef name:is_compatible_with arg:self arg:spec_or_tensor arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "is_valid",
    "source_code": "def is_valid(self):\n    if not self.is_bound:\n        return False\n    self.errors\n    forms_valid = all([form.is_valid() for form in self.forms if not (self.can_delete and self._should_delete_form(form))])\n    return forms_valid and (not self.non_form_errors())",
    "docstring": "Return True if every form in self.forms is valid.",
    "type": "method",
    "file_path": "django\\django\\forms\\formsets.py",
    "ast_data": "FunctionDef name:is_valid arg:self arguments arg If Return return:yes Assign Call Call BoolOp Call Return return:yes BoolOp Call"
  },
  {
    "library": "matplotlib",
    "name": "_set_title_offset_trans",
    "source_code": "def _set_title_offset_trans(self, title_offset_points):\n    self.titleOffsetTrans = mtransforms.ScaledTranslation(0.0, title_offset_points / 72, self.get_figure(root=False).dpi_scale_trans)\n    for _title in (self.title, self._left_title, self._right_title):\n        _title.set_transform(self.transAxes + self.titleOffsetTrans)\n        _title.set_clip_box(None)",
    "docstring": "Set the offset for the title either from :rc: or from set_title kwarg ``.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:_set_title_offset_trans arg:self arg:title_offset_points arguments arg arg Assign Call Call For Call Call"
  },
  {
    "library": "scipy",
    "name": "cg",
    "source_code": "def cg(A, b, x0=None, *, rtol=1e-05, atol=0.0, maxiter=None, M=None, callback=None):\n    A, M, x, b = make_system(A, M, x0, b)\n    bnrm2 = np.linalg.norm(b)\n    atol, _ = _get_atol_rtol('cg', bnrm2, atol, rtol)\n    if bnrm2 == 0:\n        return (b, 0)\n    n = len(b)\n    if maxiter is None:\n        maxiter = n * 10\n    dotprod = np.vdot if np.iscomplexobj(x) else np.dot\n    matvec = A.matvec\n    psolve = M.matvec\n    r = b - matvec(x) if x.any() else b.copy()\n    rho_prev, p = (None, None)\n    for iteration in range(maxiter):\n        if np.linalg.norm(r) < atol:\n            return (x, 0)\n        z = psolve(r)\n        rho_cur = dotprod(r, z)\n        if iteration > 0:\n            beta = rho_cur / rho_prev\n            p *= beta\n            p += z\n        else:\n            p = np.empty_like(r)\n            p[:] = z[:]\n        q = matvec(p)\n        alpha = rho_cur / dotprod(p, q)\n        x += alpha * p\n        r -= alpha * q\n        rho_prev = rho_cur\n        if callback:\n            callback(x)\n    else:\n        return (x, maxiter)",
    "docstring": "Solve `AAAMA`, see [2]_. References ---------- .. [1] \"Conjugate Gradient Method, Wikipedia, .. [2] \"Preconditioner\", Wikipedia, Examples -------- >>> import numpy as np >>> from scipy.sparse import csc_array >>> from scipy.sparse.linalg import cg >>> P = np.array([[4, 0, 1, 0], ... [0, 5, 0, 0], ... [1, 0, 3, 2], ... [0, 0, 2, 4]]) >>> A = csc_array(P) >>> b = np.array([-1, -0.5, -1, 2]) >>> x, exit_code = cg(A, b, atol=1e-5) >>> print(exit_code) # 0 indicates successful convergence 0 >>> np.allclose(A.dot(x), b) True",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_isolve\\iterative.py",
    "ast_data": "FunctionDef name:cg arg:A arg:b arg:x0 arguments arg arg arg arg arg arg arg arg Assign Call Assign Call Assign Call If Compare Return return:yes Assign Call If Compare Assign Assign Call Assign Assign Assign Call Call Call Assign For Call If Compare Call Return return:yes Assign Call Assign Call If Compare Assign Assign Call Assign Assign Call Assign Call Assign If Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "is_object_dtype",
    "source_code": "def is_object_dtype(arr_or_dtype) -> bool:\n    return _is_dtype_type(arr_or_dtype, classes(np.object_))",
    "docstring": "Check whether an array-like or dtype is of the object dtype. This method examines the input to determine if it is of the object data type. Object dtype is a generic data type that can hold any Python objects, including strings, lists, and custom objects. Parameters ---------- arr_or_dtype : array-like or dtype The array-like or dtype to check. Returns ------- boolean Whether or not the array-like or dtype is of the object dtype. See Also -------- api.types.is_numeric_dtype : Check whether the provided array or dtype is of a numeric dtype. api.types.is_string_dtype : Check whether the provided array or dtype is of the string dtype. api.types.is_bool_dtype : Check whether the provided array or dtype is of a boolean dtype. Examples -------- >>> from pandas.api.types import is_object_dtype >>> is_object_dtype(object) True >>> is_object_dtype(int) False >>> is_object_dtype(np.array([], dtype=object)) True >>> is_object_dtype(np.array([], dtype=int)) False >>> is_object_dtype([1, 2, 3]) False",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\common.py",
    "ast_data": "FunctionDef name:is_object_dtype arg:arr_or_dtype arguments arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_fit",
    "source_code": "def _fit(self, X, n_components, random_state):\n    code_init = self.V_init.T if self.V_init is not None else None\n    dict_init = self.U_init.T if self.U_init is not None else None\n    code, dictionary, E, self.n_iter_ = dict_learning(X.T, n_components, alpha=self.alpha, tol=self.tol, max_iter=self.max_iter, method=self.method, n_jobs=self.n_jobs, verbose=self.verbose, random_state=random_state, code_init=code_init, dict_init=dict_init, return_n_iter=True)\n    code, dictionary = svd_flip(code, dictionary, u_based_decision=True)\n    self.components_ = code.T\n    components_norm = np.linalg.norm(self.components_, axis=1)[:, np.newaxis]\n    components_norm[components_norm == 0] = 1\n    self.components_ /= components_norm\n    self.n_components_ = len(self.components_)\n    self.error_ = E\n    return self",
    "docstring": "Specialized for SparsePCA.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_sparse_pca.py",
    "ast_data": "FunctionDef name:_fit arg:self arg:X arg:n_components arg:random_state arguments arg arg arg arg Assign Compare Assign Compare Assign Call Assign Call Assign Assign Call Assign Compare Assign Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_construct_result",
    "source_code": "def _construct_result(self, result, name, other):\n    raise AbstractMethodError(self)",
    "docstring": "Construct an appropriately-wrapped result from the ArrayLike result of an arithmetic-like operation.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\base.py",
    "ast_data": "FunctionDef name:_construct_result arg:self arg:result arg:name arg:other arguments arg arg arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_StorageWriterTransforms",
    "source_code": "class _StorageWriterTransforms:\n\n    def __init__(self, extensions: Optional[Sequence[StreamTransformExtension]]=None) -> None:\n        self.extensions = () if extensions is None else extensions\n\n    def transform_save_stream(self, write_item: WriteItem, raw_stream: io.IOBase) -> tuple[IO[bytes], list[str]]:\n\n        class NoCloseWriter(io.IOBase):\n\n            def __init__(self, raw: io.IOBase):\n                self.raw = raw\n\n            def writeable(self) -> bool:\n                return True\n\n            def write(self, b: Buffer) -> int:\n                return self.raw.write(b)\n\n            def close(self):\n                self.flush()\n                self.raw.flush()\n        transform_to = cast(IO[bytes], NoCloseWriter(raw_stream))\n        for ex in self.extensions:\n            transform_to = ex.transform_to(transform_to)\n        return (transform_to, [ex.get_descriptor() for ex in reversed(self.extensions)])",
    "docstring": "This is experimental, and will likely move elsewhere in the future. It lives here to minimize changes while we are still learning and gathering feedback.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\filesystem.py",
    "ast_data": "ClassDef name:_StorageWriterTransforms FunctionDef name:__init__ arg:self arg:extensions arguments arg arg Assign Compare FunctionDef name:transform_save_stream arg:self arg:write_item arg:raw_stream arguments arg arg arg ClassDef name:NoCloseWriter FunctionDef name:__init__ arg:self arg:raw arguments arg arg Assign FunctionDef name:writeable arg:self arguments arg Return return:yes FunctionDef name:write arg:self arg:b arguments arg arg Return return:yes Call FunctionDef name:close arg:self arguments arg Call Call Assign Call Call For Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_reduction_op_flops",
    "source_code": "def _reduction_op_flops(graph, node, reduce_flops=1, finalize_flops=0):\n    in_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])\n    in_shape.assert_is_fully_defined()\n    out_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)\n    out_shape.assert_is_fully_defined()\n    num_flops = in_shape.num_elements() * reduce_flops + out_shape.num_elements() * (finalize_flops - reduce_flops)\n    return ops.OpStats('flops', num_flops)",
    "docstring": "Common code which compute flops for reduction operations.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py",
    "ast_data": "FunctionDef name:_reduction_op_flops arg:graph arg:node arg:reduce_flops arg:finalize_flops arguments arg arg arg arg Assign Call Call Assign Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_assert_same_graph",
    "source_code": "def _assert_same_graph(original_item, item) -> None:\n    original_graph = getattr(original_item, 'graph', None)\n    graph = getattr(item, 'graph', None)\n    if original_graph and graph and (original_graph is not graph):\n        raise ValueError('%s must be from the same graph as %s (graphs are %s and %s).' % (item, original_item, graph, original_graph))",
    "docstring": "Fail if the 2 items are from different graphs. Args: original_item: Original item to check against. item: Item to check. Raises: ValueError: if graphs do not match.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_assert_same_graph arg:original_item arg:item arguments arg arg Assign Call Assign Call If BoolOp Compare Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "single_offset",
    "source_code": "def single_offset(self, shape):\n    single_slice_dim = self.single_slice_dim(shape)\n    if single_slice_dim is None:\n        return 0\n    return self.var_offset[single_slice_dim]",
    "docstring": "Returns the offset when the variable is partitioned in at most one dim. Args: shape: Tuple or list of indicating the shape of one specific variable partition. Returns: representing the offset in the dimension along which the variable is partitioned. Returns 0 if the variable is not being partitioned. Raises: ValueError: Depending on self.single_slice_dim().",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variable_scope.py",
    "ast_data": "FunctionDef name:single_offset arg:self arg:shape arguments arg arg Assign Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "add_artist",
    "source_code": "def add_artist(self, a):\n    self._children.append(a)\n    a.set_transform(self.get_transform())\n    self.stale = True",
    "docstring": "Add an to the container box.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py",
    "ast_data": "FunctionDef name:add_artist arg:self arg:a arguments arg arg Call Call Call Assign"
  },
  {
    "library": "kornia",
    "name": "KORNIA_CHECK_IS_TENSOR",
    "source_code": "def KORNIA_CHECK_IS_TENSOR(x: object, msg: Optional[str]=None, raises: bool=True) -> TypeGuard[Tensor]:\n    if not isinstance(x, Tensor):\n        if raises:\n            raise TypeError(f'Not a Tensor type. Got: {type(x)}.\\n{msg}')\n        return False\n    return True",
    "docstring": "Check the input variable is a Tensor. Args: x: any input variable. msg: message to show in the exception. raises: bool indicating whether an exception should be raised upon failure. Raises: TypeException: if the input variable does not match with the expected and raises is True. Example: >>> x = torch.rand(2, 3, 3) >>> KORNIA_CHECK_IS_TENSOR(x, \"Invalid tensor\") True",
    "type": "function",
    "file_path": "kornia\\kornia\\core\\check.py",
    "ast_data": "FunctionDef name:KORNIA_CHECK_IS_TENSOR arg:x arg:msg arg:raises arguments arg arg arg If Call If Raise Call Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "WrapperUserFunctionVariable",
    "source_code": "class WrapperUserFunctionVariable(VariableTracker):\n\n    def __init__(self, wrapper_obj, attr_to_trace, **kwargs) -> None:\n        super().__init__(**kwargs)\n        self.wrapper_obj = wrapper_obj\n        self.attr_to_trace = attr_to_trace\n\n    def var_getattr(self, tx: 'InstructionTranslator', name):\n        if name == self.attr_to_trace:\n            val = getattr(self.wrapper_obj, self.attr_to_trace)\n            source = self.source and AttrSource(self.source, name)\n            return VariableTracker.build(tx, val, source)\n        return super().var_getattr(tx, name)\n\n    def call_function(self, tx: 'InstructionTranslator', args: 'list[VariableTracker]', kwargs: 'dict[str, VariableTracker]') -> 'VariableTracker':\n        if hasattr(self.wrapper_obj, 'cache_info'):\n            warnings.warn('Dynamo detected a call to a `functools.lru_cache` wrapped function.Dynamo currently ignores `functools.lru_cache` and directly traces the wrapped function.`functools.lru_cache` wrapped functions that read outside state may not be traced soundly.')\n        return variables.UserFunctionVariable(polyfills.getattr_and_trace).call_function(tx, [self, variables.ConstantVariable(self.attr_to_trace), *args], kwargs)",
    "docstring": "Used to represent a wrapper object that contains the actual callable as an attribute. For example, torch.jit.script/trace have the original function at their _torchdynamo_inline attribute. Similarly, functions with __script_if_tracing_wrapper have the original attr at \"__original_fn\".",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\functions.py",
    "ast_data": "ClassDef name:WrapperUserFunctionVariable FunctionDef name:__init__ arg:self arg:wrapper_obj arg:attr_to_trace arguments arg arg arg arg Call Call Assign Assign FunctionDef name:var_getattr arg:self arg:tx arg:name arguments arg arg arg If Compare Assign Call Assign BoolOp Call Return return:yes Call Return return:yes Call Call FunctionDef name:call_function arg:self arg:tx arg:args arg:kwargs arguments arg arg arg arg If Call Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_call_wrapped_cell",
    "source_code": "def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs):\n\n    def _should_dropout(p):\n        return not isinstance(p, float) or p < 1\n    if _should_dropout(self._input_keep_prob):\n        inputs = self._dropout(inputs, 'input', self._recurrent_input_noise, self._input_keep_prob)\n    output, new_state = cell_call_fn(inputs, state, **kwargs)\n    if _should_dropout(self._state_keep_prob):\n        shallow_filtered_substructure = nest.get_traverse_shallow_structure(self._dropout_state_filter, new_state)\n        new_state = self._dropout(new_state, 'state', self._recurrent_state_noise, self._state_keep_prob, shallow_filtered_substructure)\n    if _should_dropout(self._output_keep_prob):\n        output = self._dropout(output, 'output', self._recurrent_output_noise, self._output_keep_prob)\n    return (output, new_state)",
    "docstring": "Runs the wrapped cell and applies dropout. Args: inputs: A tensor with wrapped cell's input. state: A tensor or tuple of tensors with wrapped cell's state. cell_call_fn: Wrapped cell's method to use for step computation (cell's or 'call' method). **kwargs: Additional arguments. Returns: A pair containing: - Output: A tensor with cell's output. - New state: A tensor or tuple of tensors with new wrapped cell's state.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\legacy_rnn\\rnn_cell_wrapper_impl.py",
    "ast_data": "FunctionDef name:_call_wrapped_cell arg:self arg:inputs arg:state arg:cell_call_fn arguments arg arg arg arg arg FunctionDef name:_should_dropout arg:p arguments arg Return return:yes BoolOp Call Compare If Call Assign Call Assign Call If Call Assign Call Assign Call If Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_offset",
    "source_code": "def _get_offset(key, name, numel):\n    nonlocal current_offset, offsets\n    if name in offsets:\n        storage_offset = offsets[name]\n        return storage_offset\n    if current_offset is None:\n        assert key == '0'\n        current_offset = zip_file.get_record_offset(name)\n        local_header_offset = zip_file.get_record_header_offset(name)\n        storage_offset = current_offset\n    else:\n        storage_offset = zip_file.get_record_offset_no_read(current_offset, name, numel, storage_alignment)\n        local_header_offset = current_offset\n    offsets[name] = storage_offset\n    current_offset = storage_offset + numel\n    if numel > 0:\n        if local_header_offset >= mz_uint32_max or numel >= mz_uint32_max:\n            current_offset += data_descripter_size64\n        else:\n            current_offset += data_descripter_size32\n    return storage_offset",
    "docstring": "Return the offset of the storage associated with key with record name and size numel. It is expected that the zipfile header of this storage starts at current_offset. WARNING: This function relies on the behavior of the zipwriter in miniz.c. In particular, the behavior of . The behavior of this function must be kept in sync with that of miniz! After reading a storage of size numel that starts at storage_offset if it is the first time that storage was read, update nonlocal variable current_offset to the start of the next zipfile header by incrementing it by numel and the data descriptor size.",
    "type": "function",
    "file_path": "pytorch\\torch\\serialization.py",
    "ast_data": "FunctionDef name:_get_offset arg:key arg:name arg:numel arguments arg arg arg If Compare Assign Return return:yes If Compare Compare Assign Call Assign Call Assign Assign Call Assign Assign Assign If Compare If BoolOp Compare Compare Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "handle_extension",
    "source_code": "def handle_extension(extensions, f):\n    extensions = extensions.lower().split()\n\n    def g(key, data):\n        extension = key.lower().split('.')\n        for target in extensions:\n            target = target.split('.')\n            if len(target) > len(extension):\n                continue\n            if extension[-len(target):] == target:\n                return f(data)\n            return None\n    return g",
    "docstring": "Return a decoder handler function for the list of extensions. Extensions can be a space separated list of extensions. Extensions can contain dots, in which case the corresponding number of extension components must be present in the key given to f. Comparisons are case insensitive. Examples: handle_extension(\"jpg jpeg\", my_decode_jpg) # invoked for any file.jpg handle_extension(\"seg.jpg\", special_case_jpg) # invoked only for file.seg.jpg",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\data\\datapipes\\utils\\decoder.py",
    "ast_data": "FunctionDef name:handle_extension arg:extensions arg:f arguments arg arg Assign Call Call FunctionDef name:g arg:key arg:data arguments arg arg Assign Call Call For Assign Call If Compare Call Call If Compare Call Return return:yes Call Return return:no Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_has_cycle",
    "source_code": "def _has_cycle(op, state):\n    op_state = state.get(op.name, _UNKNOWN)\n    if op_state == _STARTED:\n        return True\n    elif op_state == _FINISHED:\n        return False\n    state[op.name] = _STARTED\n    for i in itertools.chain((i.op for i in op.inputs), op.control_inputs):\n        if _has_cycle(i, state):\n            return True\n    state[op.name] = _FINISHED\n    return False",
    "docstring": "Detect cycles in the dependencies of .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:_has_cycle arg:op arg:state arguments arg arg Assign Call If Compare Return return:yes If Compare Return return:yes Assign For Call If Call Return return:yes Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "timeit",
    "source_code": "def timeit(self, number: int=1000000) -> common.Measurement:\n    with common.set_torch_threads(self._task_spec.num_threads):\n        self._timeit(number=max(int(number // 100), 2))\n        return common.Measurement(number_per_run=number, raw_times=[self._timeit(number=number)], task_spec=self._task_spec)",
    "docstring": "Mirrors the semantics of timeit.Timer.timeit(). Execute the main statement () times.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\benchmark\\utils\\timer.py",
    "ast_data": "FunctionDef name:timeit arg:self arg:number arguments arg arg With Call Call Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_safe_mean",
    "source_code": "def _safe_mean(losses, num_present):\n    total_loss = math_ops.reduce_sum(losses)\n    return math_ops.div_no_nan(total_loss, num_present, name='value')",
    "docstring": "Computes a safe mean of the losses. Args: losses: whose elements contain individual loss measurements. num_present: The number of measurable elements in . Returns: A scalar representing the mean of . If is zero, then zero is returned.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\losses\\losses_impl.py",
    "ast_data": "FunctionDef name:_safe_mean arg:losses arg:num_present arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "Fixed",
    "source_code": "class Fixed(_Base):\n\n    def __init__(self, fixed_size):\n        _api.check_isinstance(Real, fixed_size=fixed_size)\n        self.fixed_size = fixed_size\n\n    def get_size(self, renderer):\n        rel_size = 0.0\n        abs_size = self.fixed_size\n        return (rel_size, abs_size)",
    "docstring": "Simple fixed size with absolute part = *fixed_size* and relative part = 0.",
    "type": "class",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_size.py",
    "ast_data": "ClassDef name:Fixed FunctionDef name:__init__ arg:self arg:fixed_size arguments arg arg Call Assign FunctionDef name:get_size arg:self arg:renderer arguments arg arg Assign Assign Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "copy_assets",
    "source_code": "def copy_assets(self) -> None:\n    pass",
    "docstring": "Where assets (images, static files, etc) are copied before writing",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\__init__.py",
    "ast_data": "FunctionDef name:copy_assets arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "apply_aggregation_replica_context",
    "source_code": "def apply_aggregation_replica_context(value, aggregation, destinations):\n    if isinstance(value, DistributedValues):\n        raise TypeError('Cannot use DistributedValues to update variables in replica context.')\n    if not tensor_util.is_tf_type(value):\n        return value\n    if aggregation == vs.VariableAggregation.ONLY_FIRST_REPLICA:\n\n        def merge_fn(strategy, value):\n            return strategy.extended.broadcast_to(strategy.experimental_local_results(value)[0], destinations=destinations)\n        return distribute_lib.get_replica_context().merge_call(merge_fn, args=(value,))\n    else:\n        reduce_op = reduce_util.ReduceOp.from_variable_aggregation(aggregation)\n        aggregated_value = distribute_lib.get_strategy().extended._replica_ctx_all_reduce(reduce_op, value)\n        return aggregated_value",
    "docstring": "Aggregate to as specified by .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py",
    "ast_data": "FunctionDef name:apply_aggregation_replica_context arg:value arg:aggregation arg:destinations arguments arg arg arg If Call Raise Call If Call Return return:yes If Compare FunctionDef name:merge_fn arg:strategy arg:value arguments arg arg Return return:yes Call Call Return return:yes Call Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n    X = validate_data(self, X)\n    random_state = check_random_state(self.random_state)\n    n_features = X.shape[1]\n    uniform = random_state.uniform(size=(n_features, self.n_components))\n    self.random_weights_ = 1.0 / np.pi * np.log(np.tan(np.pi / 2.0 * uniform))\n    self.random_offset_ = random_state.uniform(0, 2 * np.pi, size=self.n_components)\n    if X.dtype == np.float32:\n        self.random_weights_ = self.random_weights_.astype(X.dtype, copy=False)\n        self.random_offset_ = self.random_offset_.astype(X.dtype, copy=False)\n    self._n_features_out = self.n_components\n    return self",
    "docstring": "Fit the model with X. Samples random projection according to n_features. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. y : array-like, shape (n_samples,) or (n_samples, n_outputs), default=None Target values (None for unsupervised transformations). Returns ------- self : object Returns the instance itself.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\kernel_approximation.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Assign Call Assign Call Assign Assign Call Assign Call Call Assign Call If Compare Assign Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "remove_from_cache",
    "source_code": "def remove_from_cache(f):\n    if isinstance(f, types.CodeType):\n        reset_code(f)\n    elif hasattr(f, '__code__'):\n        reset_code(f.__code__)\n    elif hasattr(getattr(f, 'forward', None), '__code__'):\n        reset_code(f.forward.__code__)\n    else:\n        from . import reset\n        reset()\n        log.warning('could not determine __code__ for %s', f)",
    "docstring": "Make sure f.__code__ is not cached to force a recompile",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\eval_frame.py",
    "ast_data": "FunctionDef name:remove_from_cache arg:f arguments arg If Call Call If Call Call If Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "restore",
    "source_code": "def restore(self, trackable, reader=None):\n    with ops.init_scope():\n        if self.bind_object(trackable):\n            restore_ops = self._restore_descendants(reader)\n            if restore_ops:\n                self._checkpoint.new_restore_ops(restore_ops)",
    "docstring": "Restore this value into .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\restore.py",
    "ast_data": "FunctionDef name:restore arg:self arg:trackable arg:reader arguments arg arg arg With Call If Call Assign Call If Call"
  },
  {
    "library": "scikit-learn",
    "name": "_joint_log_likelihood",
    "source_code": "def _joint_log_likelihood(self, X):\n    n_features = self.feature_log_prob_.shape[1]\n    n_features_X = X.shape[1]\n    if n_features_X != n_features:\n        raise ValueError('Expected input with %d features, got %d instead' % (n_features, n_features_X))\n    neg_prob = np.log(1 - np.exp(self.feature_log_prob_))\n    jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)\n    jll += self.class_log_prior_ + neg_prob.sum(axis=1)\n    return jll",
    "docstring": "Calculate the posterior log probability of the samples X",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\naive_bayes.py",
    "ast_data": "FunctionDef name:_joint_log_likelihood arg:self arg:X arguments arg arg Assign Assign If Compare Raise Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "to_tf",
    "source_code": "def to_tf(self, **kwargs):\n    return TransferFunction(*ss2tf(self._A, self._B, self._C, self._D, **kwargs), **self._dt_dict)",
    "docstring": "Convert system representation to . Parameters ---------- kwargs : dict, optional Additional keywords passed to Returns ------- sys : instance of Transfer function of the current system",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:to_tf arg:self arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "add_auto_profiling",
    "source_code": "def add_auto_profiling(self, cmd, options, profile_steps):\n    if not self._enabled:\n        return\n    self._auto_profiles.append((cmd, options, profile_steps[:]))\n    self._slow_path_steps |= set(profile_steps)\n    self._trace_steps |= set(profile_steps)",
    "docstring": "Traces and profiles at some session run steps. Args: cmd: The profiling commands. (i.e. scope, op, python, graph) options: The profiling options. profile_steps: A list/set of integers. The profiling command and options will be run automatically at these integer steps. Each step is a session.run.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\profile_context.py",
    "ast_data": "FunctionDef name:add_auto_profiling arg:self arg:cmd arg:options arg:profile_steps arguments arg arg arg arg If Return return:no Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "truediv",
    "source_code": "def truediv(self, x0: T, x1: T) -> T:\n    raise NotImplementedError",
    "docstring": "True division between floats. Integer inputs are NOT valid. To do Python-style (int, int) -> float division, use int_truediv",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ops_handler.py",
    "ast_data": "FunctionDef name:truediv arg:self arg:x0 arg:x1 arguments arg arg arg Raise"
  },
  {
    "library": "pandas",
    "name": "root",
    "source_code": "@property\ndef root(self):\n    self._check_if_open()\n    assert self._handle is not None\n    return self._handle.root",
    "docstring": "return the root node",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:root arg:self arguments arg Call Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_prune_control_edges_of_debug_ops",
    "source_code": "def _prune_control_edges_of_debug_ops(self):\n    for node in self._node_ctrl_inputs:\n        ctrl_inputs = self._node_ctrl_inputs[node]\n        debug_op_inputs = []\n        for ctrl_inp in ctrl_inputs:\n            if is_debug_node(ctrl_inp):\n                debug_op_inputs.append(ctrl_inp)\n        for debug_op_inp in debug_op_inputs:\n            ctrl_inputs.remove(debug_op_inp)",
    "docstring": "Prune control edges related to the debug ops.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_graphs.py",
    "ast_data": "FunctionDef name:_prune_control_edges_of_debug_ops arg:self arguments arg For Assign Assign For If Call Call For Call"
  },
  {
    "library": "tensorflow",
    "name": "_configure",
    "source_code": "def _configure(self, session_config=None, cluster_spec=None, task_type=None, task_id=None):\n    del session_config, cluster_spec, task_type, task_id",
    "docstring": "Configures the strategy class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:_configure arg:self arg:session_config arg:cluster_spec arg:task_type arg:task_id arguments arg arg arg arg arg"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, server, conn):\n    super(CPWSGIHTTPRequest, self).__init__(server, conn, proxy_mode=True)",
    "docstring": "Initialize HTTP request container instance. Args: server (cheroot.server.HTTPServer): web server object receiving this request conn (cheroot.server.HTTPConnection): HTTP connection object for this request",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpwsgi_server.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:server arg:conn arguments arg arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_from_tensor_list",
    "source_code": "def _from_tensor_list(self, tensor_list: List['core_types.Symbol']) -> Any:\n    self.__check_tensor_list(tensor_list)\n    return self._from_compatible_tensor_list(tensor_list)",
    "docstring": "Reconstructs a value from a flat list of . Args: tensor_list: A flat list of , compatible with . Returns: A value that is compatible with this . Raises: ValueError: If is not compatible with .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py",
    "ast_data": "FunctionDef name:_from_tensor_list arg:self arg:tensor_list arguments arg arg Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_hd_1D",
    "source_code": "def _hd_1D(data, prob, var):\n    xsorted = np.squeeze(np.sort(data.compressed().view(ndarray)))\n    n = xsorted.size\n    hd = np.empty((2, len(prob)), float64)\n    if n < 2:\n        hd.flat = np.nan\n        if var:\n            return hd\n        return hd[0]\n    v = np.arange(n + 1) / float(n)\n    betacdf = beta.cdf\n    for i, p in enumerate(prob):\n        _w = betacdf(v, (n + 1) * p, (n + 1) * (1 - p))\n        w = _w[1:] - _w[:-1]\n        hd_mean = np.dot(w, xsorted)\n        hd[0, i] = hd_mean\n        hd[1, i] = np.dot(w, (xsorted - hd_mean) ** 2)\n    hd[0, prob == 0] = xsorted[0]\n    hd[0, prob == 1] = xsorted[-1]\n    if var:\n        hd[1, prob == 0] = hd[1, prob == 1] = np.nan\n        return hd\n    return hd[0]",
    "docstring": "Computes the HD quantiles for a 1D array. Returns nan for invalid data.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mstats_extras.py",
    "ast_data": "FunctionDef name:_hd_1D arg:data arg:prob arg:var arguments arg arg arg Assign Call Call Call Call Assign Assign Call Call If Compare Assign If Return return:yes Return return:yes Assign Call Call Assign For Call Assign Call Assign Assign Call Assign Assign Call Assign Compare Assign Compare If Assign Compare Compare Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_MinimizerSettingBase",
    "source_code": "@dataclass\nclass _MinimizerSettingBase:\n    accumulate_error: bool = False\n    traverse_method: str = 'sequential'\n    find_all: bool = False\n    return_intermediate: bool = False\n    all_outputs: bool = False\n\n    def __str__(self):\n        settings_str = 'FX Minimizer Settings:\\n'\n        for k, v in vars(self).items():\n            settings_str += f'\\t{k}: {v}\\n'\n        return settings_str",
    "docstring": "Args: : Instead of using a's input for both converted module to verify , use the previous outputs of each converted module as input to accumulate the errors. : \"sequential\" or \"binary\" or \"accumulate\" Determine the way of traverse the nodes in FX module. : Minimizer will go through the entire model and return all problematic nodes. : If true, when using function to run the model, intermediate results of all the ops will be returned as output. : If true, when using function, all the output nodes in the subgraph will be used for comparison.",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\passes\\net_min_base.py",
    "ast_data": "ClassDef name:_MinimizerSettingBase FunctionDef name:__str__ arg:self arguments arg Assign For Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_load_csv",
    "source_code": "def _load_csv(self, filename: str, format: str='compute_only'):\n    if format == 'compute_only':\n        super()._load_csv(filename)\n        self._load_actions(self.pipeline_order)\n    elif format == 'compute_comms':\n        actions = {}\n        with open(filename, newline='') as csvfile:\n            reader = csv.reader(csvfile)\n            for rank, row in enumerate(reader):\n                actions[rank] = [_Action.from_str(s) for s in row]\n            self._load_actions(actions, format=format)\n    else:\n        raise NotImplementedError(f'format={format!r} is not implemented')",
    "docstring": "Loads a csv in simple format and then lowers it to include comunication actions format must be either \"compute_only\" or \"compute_comms\". If compute_only, the lowering passes will automatically be run to generate a compute_comms schedule.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\schedules.py",
    "ast_data": "FunctionDef name:_load_csv arg:self arg:filename arg:format arguments arg arg arg If Compare Call Call Call If Compare Assign With Call Assign Call For Call Assign Call Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "assert_shallow_structure",
    "source_code": "def assert_shallow_structure(shallow_tree, input_tree, check_types=True):\n    nest_util.assert_shallow_structure(nest_util.Modality.DATA, shallow_tree, input_tree, check_types)",
    "docstring": "Asserts that is a shallow structure of . That is, this function tests if the structure can be created from the structure by replacing its leaf nodes with deeper tree structures. Examples: The following code will raise an exception: The following code will not raise an exception: Args: shallow_tree: an arbitrarily nested structure. input_tree: an arbitrarily nested structure. check_types: if (default) the sequence types of and have to be the same. Raises: TypeError: If is a sequence but is not. TypeError: If the sequence types of are different from . Only raised if is . ValueError: If the sequence lengths of are different from .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\util\\nest.py",
    "ast_data": "FunctionDef name:assert_shallow_structure arg:shallow_tree arg:input_tree arg:check_types arguments arg arg arg Call"
  },
  {
    "library": "matplotlib",
    "name": "RadialAxis",
    "source_code": "class RadialAxis(maxis.YAxis):\n    __name__ = 'radialaxis'\n    axis_name = 'radius'\n    _tick_class = RadialTick\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self.sticky_edges.y.append(0)\n\n    def _wrap_locator_formatter(self):\n        self.set_major_locator(RadialLocator(self.get_major_locator(), self.axes))\n        self.isDefault_majloc = True\n\n    def clear(self):\n        super().clear()\n        self.set_ticks_position('none')\n        self._wrap_locator_formatter()\n\n    def _set_scale(self, value, **kwargs):\n        super()._set_scale(value, **kwargs)\n        self._wrap_locator_formatter()",
    "docstring": "A radial Axis. This overrides certain properties of a to provide special-casing for a radial axis.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\polar.py",
    "ast_data": "ClassDef name:RadialAxis Assign Assign Assign FunctionDef name:__init__ arg:self arguments arg arg arg Call Call Call FunctionDef name:_wrap_locator_formatter arg:self arguments arg Call Call Call Assign FunctionDef name:clear arg:self arguments arg Call Call Call Call FunctionDef name:_set_scale arg:self arg:value arguments arg arg arg Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_assert_all_finite",
    "source_code": "def _assert_all_finite(X, allow_nan=False, msg_dtype=None, estimator_name=None, input_name=''):\n    xp, is_array_api = get_namespace(X)\n    if _get_config()['assume_finite']:\n        return\n    X = xp.asarray(X)\n    if not is_array_api and X.dtype == np.dtype('object') and (not allow_nan):\n        if _object_dtype_isnan(X).any():\n            raise ValueError('Input contains NaN')\n    if not xp.isdtype(X.dtype, ('real floating', 'complex floating')):\n        return\n    with np.errstate(over='ignore'):\n        first_pass_isfinite = xp.isfinite(xp.sum(X))\n    if first_pass_isfinite:\n        return\n    _assert_all_finite_element_wise(X, xp=xp, allow_nan=allow_nan, msg_dtype=msg_dtype, estimator_name=estimator_name, input_name=input_name)",
    "docstring": "Like assert_all_finite, but only for ndarray.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\validation.py",
    "ast_data": "FunctionDef name:_assert_all_finite arg:X arg:allow_nan arg:msg_dtype arg:estimator_name arg:input_name arguments arg arg arg arg arg Assign Call If Call Return return:no Assign Call If BoolOp Compare Call If Call Call Raise Call If Call Return return:no With Call Assign Call Call If Return return:no Call"
  },
  {
    "library": "kornia",
    "name": "as_cpu",
    "source_code": "def as_cpu(self, **kwargs: Any) -> None:\n    self._session.set_providers(['CPUExecutionProvider'], provider_options=[{**kwargs}])",
    "docstring": "Set the session to run on CPU.",
    "type": "method",
    "file_path": "kornia\\kornia\\core\\mixin\\onnx.py",
    "ast_data": "FunctionDef name:as_cpu arg:self arguments arg arg Call"
  },
  {
    "library": "matplotlib",
    "name": "_start",
    "source_code": "def _start(self, *args):\n    if self._fig.canvas.is_saving():\n        return\n    self._fig.canvas.mpl_disconnect(self._first_draw_id)\n    self._init_draw()\n    self.event_source.start()",
    "docstring": "Starts interactive animation. Adds the draw frame command to the GUI handler, calls show to start the event loop.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\animation.py",
    "ast_data": "FunctionDef name:_start arg:self arguments arg arg If Call Return return:no Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "local_response_norm",
    "source_code": "def local_response_norm(input: Tensor, size: int, alpha: float=0.0001, beta: float=0.75, k: float=1.0) -> Tensor:\n    if has_torch_function_unary(input):\n        return handle_torch_function(local_response_norm, (input,), input, size, alpha=alpha, beta=beta, k=k)\n    dim = input.dim()\n    if dim < 3:\n        raise ValueError(f'Expected 3D or higher dimensionality                          input (got {dim} dimensions)')\n    if input.numel() == 0:\n        return input\n    div = input.mul(input)\n    if dim == 3:\n        div = div.unsqueeze(1)\n        div = pad(div, (0, 0, size // 2, (size - 1) // 2))\n        div = avg_pool2d(div, (size, 1), stride=1).squeeze(1)\n    else:\n        sizes = input.size()\n        div = div.view(sizes[0], 1, sizes[1], sizes[2], -1)\n        div = pad(div, (0, 0, 0, 0, size // 2, (size - 1) // 2))\n        div = avg_pool3d(div, (size, 1, 1), stride=1).squeeze(1)\n        div = div.view(sizes)\n    div = div.mul(alpha).add(k).pow(beta)\n    return input / div",
    "docstring": "Apply local response normalization over an input signal. The input signal is composed of several input planes, where channels occupy the second dimension. Normalization is applied across channels. See :class: for details.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:local_response_norm arg:input arg:size arg:alpha arg:beta arg:k arguments arg arg arg arg arg If Call Return return:yes Call Assign Call If Compare Raise Call If Compare Call Return return:yes Assign Call If Compare Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_xmargin",
    "source_code": "def set_xmargin(self, m):\n    if m <= -0.5:\n        raise ValueError('margin must be greater than -0.5')\n    self._xmargin = m\n    self._request_autoscale_view('x')\n    self.stale = True",
    "docstring": "Set padding of X data limits prior to autoscaling. *m* times the data interval will be added to each end of that interval before it is used in autoscaling. If *m* is negative, this will clip the data range instead of expanding it. For example, if your data is in the range [0, 2], a margin of 0.1 will result in a range [-0.2, 2.2]; a margin of -0.1 will result in a range of [0.2, 1.8]. Parameters ---------- m : float greater than -0.5",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:set_xmargin arg:self arg:m arguments arg arg If Compare Raise Call Assign Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "_format_maybe_minus_and_locale",
    "source_code": "def _format_maybe_minus_and_locale(self, fmt, arg):\n    return self.fix_minus((','.join((locale.format_string(part, (arg,), True).replace(',', '{,}') for part in fmt.split(','))) if self._useMathText else locale.format_string(fmt, (arg,), True)) if self._useLocale else fmt % arg)",
    "docstring": "Format *arg* with *fmt*, applying Unicode minus and locale if desired.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:_format_maybe_minus_and_locale arg:self arg:fmt arg:arg arguments arg arg arg Return return:yes Call Call Call Call Call Call"
  },
  {
    "library": "numpy",
    "name": "ndindex",
    "source_code": "@set_module('numpy')\nclass ndindex:\n\n    def __init__(self, *shape):\n        if len(shape) == 1 and isinstance(shape[0], tuple):\n            shape = shape[0]\n        x = as_strided(_nx.zeros(1), shape=shape, strides=_nx.zeros_like(shape))\n        self._it = _nx.nditer(x, flags=['multi_index', 'zerosize_ok'], order='C')\n\n    def __iter__(self):\n        return self\n\n    def ndincr(self):\n        warnings.warn('`ndindex.ndincr()` is deprecated, use `next(ndindex)` instead', DeprecationWarning, stacklevel=2)\n        next(self)\n\n    def __next__(self):\n        next(self._it)\n        return self._it.multi_index",
    "docstring": "An N-dimensional iterator object to index arrays. Given the shape of an array, an instance iterates over the N-dimensional index of the array. At each iteration a tuple of indices is returned, the last dimension is iterated over first. Parameters ---------- shape : ints, or a single tuple of ints The size of each dimension of the array can be passed as individual parameters or as the elements of a tuple. See Also -------- ndenumerate, flatiter Examples -------- >>> import numpy as np Dimensions as individual arguments >>> for index in np.ndindex(3, 2, 1): ... print(index) (0, 0, 0) (0, 1, 0) (1, 0, 0) (1, 1, 0) (2, 0, 0) (2, 1, 0) Same dimensions - but in a tuple `` >>> for index in np.ndindex((3, 2, 1)): ... print(index) (0, 0, 0) (0, 1, 0) (1, 0, 0) (1, 1, 0) (2, 0, 0) (2, 1, 0)",
    "type": "class",
    "file_path": "numpy\\numpy\\lib\\_index_tricks_impl.py",
    "ast_data": "ClassDef name:ndindex FunctionDef name:__init__ arg:self arguments arg arg If BoolOp Compare Call Call Assign Assign Call Call Call Assign Call FunctionDef name:__iter__ arg:self arguments arg Return return:yes FunctionDef name:ndincr arg:self arguments arg Call Call FunctionDef name:__next__ arg:self arguments arg Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_distance",
    "source_code": "def get_distance(self, f, dist_val, lookup_type):\n    value = dist_val[0]\n    geodetic = f.geodetic(self.connection)\n    geography = f.geography\n    if isinstance(value, Distance):\n        if geography:\n            dist_param = value.m\n        elif geodetic:\n            if lookup_type == 'dwithin':\n                raise ValueError('Only numeric values of degree units are allowed on geographic DWithin queries.')\n            dist_param = value.m\n        else:\n            dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))\n    else:\n        dist_param = value\n    return [dist_param]",
    "docstring": "Retrieve the distance parameters for the given geometry field, distance lookup value, and the distance lookup type. This is the most complex implementation of the spatial backends due to what is supported on geodetic geometry columns vs. what's available on projected geometry columns. In addition, it has to take into account the geography column type.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\postgis\\operations.py",
    "ast_data": "FunctionDef name:get_distance arg:self arg:f arg:dist_val arg:lookup_type arguments arg arg arg arg Assign Assign Call Assign If Call If Assign If If Compare Raise Call Assign Assign Call Call Call Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X, **params):\n    _raise_for_params(params, self, 'transform')\n    if _routing_enabled():\n        routed_params = process_routing(self, 'transform', **params)\n    else:\n        routed_params = Bunch()\n        for name, _ in self.transformer_list:\n            routed_params[name] = Bunch(transform={})\n    Xs = Parallel(n_jobs=self.n_jobs)((delayed(_transform_one)(trans, X, None, weight, params=routed_params[name]) for name, trans, weight in self._iter()))\n    if not Xs:\n        return np.zeros((X.shape[0], 0))\n    return self._hstack(Xs)",
    "docstring": "Transform X separately by each transformer, concatenate results. Parameters ---------- X : iterable or array-like, depending on transformers Input data to be transformed. **params : dict, default=None Parameters routed to the method of the sub-transformers via the metadata routing API. See :ref: for more details. .. versionadded:: 1.5 Returns ------- X_t : array-like or sparse matrix of shape (n_samples, sum_n_components) The of results of transformers. is the sum of (output dimension) over transformers.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\pipeline.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg arg Call If Call Assign Call Assign Call For Assign Call Assign Call Call Call Call Call If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "feature_importances_",
    "source_code": "@property\ndef feature_importances_(self):\n    check_is_fitted(self)\n    all_importances = Parallel(n_jobs=self.n_jobs, prefer='threads')((delayed(getattr)(tree, 'feature_importances_') for tree in self.estimators_ if tree.tree_.node_count > 1))\n    if not all_importances:\n        return np.zeros(self.n_features_in_, dtype=np.float64)\n    all_importances = np.mean(all_importances, axis=0, dtype=np.float64)\n    return all_importances / np.sum(all_importances)",
    "docstring": "The impurity-based feature importances. The higher, the more important the feature. The importance of a feature is computed as the (normalized) total reduction of the criterion brought by that feature. It is also known as the Gini importance. Warning: impurity-based feature importances can be misleading for high cardinality features (many unique values). See :func: as an alternative. Returns ------- feature_importances_ : ndarray of shape (n_features,) The values of this array sum to 1, unless all trees are single node trees consisting of only the root node, in which case it will be an array of zeros.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_forest.py",
    "ast_data": "FunctionDef name:feature_importances_ arg:self arguments arg Call Assign Call Call Call Call Compare If Return return:yes Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "ExponentialBuckets",
    "source_code": "class ExponentialBuckets(Buckets):\n    __slots__ = []\n\n    def __init__(self, scale, growth_factor, bucket_count):\n        super(ExponentialBuckets, self).__init__(pywrap_tfe.TFE_MonitoringNewExponentialBuckets(scale, growth_factor, bucket_count))",
    "docstring": "Exponential bucketing strategy. Sets up buckets of the form: [-DBL_MAX, ..., scale * growth^i, scale * growth_factor^(i + 1), ..., DBL_MAX].",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py",
    "ast_data": "ClassDef name:ExponentialBuckets Assign FunctionDef name:__init__ arg:self arg:scale arg:growth_factor arg:bucket_count arguments arg arg arg arg Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_apply_scores",
    "source_code": "def _apply_scores(self, scores, value, scores_mask=None, training=None):\n    if scores_mask is not None:\n        padding_mask = math_ops.logical_not(scores_mask)\n        if scores.dtype is dtypes.float16:\n            scores -= 65504.0 * math_ops.cast(padding_mask, dtype=scores.dtype)\n        else:\n            scores -= 1000000000.0 * math_ops.cast(padding_mask, dtype=scores.dtype)\n    if training is None:\n        training = backend.learning_phase()\n    weights = nn.softmax(scores)\n\n    def dropped_weights():\n        return nn.dropout(weights, rate=self.dropout)\n    weights = control_flow_util.smart_cond(training, dropped_weights, lambda: array_ops.identity(weights))\n    return (math_ops.matmul(weights, value), weights)",
    "docstring": "Applies attention scores to the given value tensor. To use this method in your attention layer, follow the steps: * Use tensor of shape and tensor of shape to calculate the attention . * Pass and tensors to this method. The method applies , calculates , then returns query_mask[batch_size, Tq, Tv][batch_size, Tv, dim]Tensor[batch_size, 1, Tv][batch_size, Tq, Tv]scores_mask==FalseTrue[batch_size, Tq, dim][batch_size, Tq, Tv]`.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\dense_attention.py",
    "ast_data": "FunctionDef name:_apply_scores arg:self arg:scores arg:value arg:scores_mask arg:training arguments arg arg arg arg arg If Compare Assign Call If Compare Call Call If Compare Assign Call Assign Call FunctionDef name:dropped_weights arguments Return return:yes Call Assign Call arguments Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "traverse_translatable_index",
    "source_code": "def traverse_translatable_index(doctree: Element) -> Iterable[tuple[Element, list[tuple[str, str, str, str, str | None]]]]:\n    matcher = NodeMatcher(addnodes.index, inline=False)\n    for node in matcher.findall(doctree):\n        if 'raw_entries' in node:\n            entries = node['raw_entries']\n        else:\n            entries = node['entries']\n        yield (node, entries)",
    "docstring": "Traverse translatable index node from a document tree.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\nodes.py",
    "ast_data": "FunctionDef name:traverse_translatable_index arg:doctree arguments arg Assign Call For Call If Compare Assign Assign"
  },
  {
    "library": "django",
    "name": "check_string",
    "source_code": "def check_string(result, func, cargs):\n    if not result:\n        raise GEOSException('Error encountered checking string return value in GEOS C function \"%s\".' % func.__name__)\n    s = string_at(result)\n    free(result)\n    return s",
    "docstring": "Error checking for routines that return strings. This frees the memory allocated by GEOS at the result pointer.",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\geos\\prototypes\\errcheck.py",
    "ast_data": "FunctionDef name:check_string arg:result arg:func arg:cargs arguments arg arg arg If Raise Call Assign Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_select_best_index",
    "source_code": "@staticmethod\ndef _select_best_index(refit, refit_metric, results):\n    last_iter = np.max(results['iter'])\n    last_iter_indices = np.flatnonzero(results['iter'] == last_iter)\n    test_scores = results['mean_test_score'][last_iter_indices]\n    if np.isnan(test_scores).all():\n        best_idx = 0\n    else:\n        best_idx = np.nanargmax(test_scores)\n    return last_iter_indices[best_idx]",
    "docstring": "Custom refit callable to return the index of the best candidate. We want the best candidate out of the last iteration. By default BaseSearchCV would return the best candidate out of all iterations. Currently, we only support for a single metric thus and are not required.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_search_successive_halving.py",
    "ast_data": "FunctionDef name:_select_best_index arg:refit arg:refit_metric arg:results arguments arg arg arg Assign Call Assign Call Compare Assign If Call Call Assign Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "E",
    "source_code": "def E(self):\n    return MONTHS_ALT[self.data.month]",
    "docstring": "Alternative month names as required by some locales. Proprietary extension.",
    "type": "method",
    "file_path": "django\\django\\utils\\dateformat.py",
    "ast_data": "FunctionDef name:E arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_is_user_included_op",
    "source_code": "def _is_user_included_op(self, op):\n    for opname_re in self._parameters.included_opname_re_list:\n        if opname_re.match(op.name):\n            return True\n    for optype_re in self._parameters.included_optype_re_list:\n        if optype_re.match(op.type):\n            return True\n    return False",
    "docstring": "Checks whether the op is included in the tensor tracer flags. Args: op: tf Operation Returns: True, if the op is included. An op is included if: - Its op name is given in included_opnames - Its op type is given in included_optypes - The op is at most _trace_ops_before_included hops before an included op - The op is at most _trace_ops_after_included hops after an included op",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:_is_user_included_op arg:self arg:op arguments arg arg For If Call Return return:yes For If Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "DeviceOrderMode",
    "source_code": "@tf_export('tpu.experimental.DeviceOrderMode')\nclass DeviceOrderMode(enum.IntEnum):\n    AUTO = 0\n    RING = 1\n    MESH = 2",
    "docstring": "The way of determining device orders when computing device assignment.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\device_assignment.py",
    "ast_data": "ClassDef name:DeviceOrderMode Assign Assign Assign Call"
  },
  {
    "library": "kornia",
    "name": "__sub__",
    "source_code": "def __sub__(self, right: 'Quaternion') -> 'Quaternion':\n    KORNIA_CHECK_TYPE(right, Quaternion)\n    return Quaternion(self.data - right.data)",
    "docstring": "Subtract a given quaternion. Args: right: the quaternion to subtract. Example: >>> q1 = Quaternion(tensor([2., 0., 1., 1.])) >>> q2 = Quaternion.identity() >>> q3 = q1 - q2 >>> q3.data Parameter containing: tensor([1., 0., 1., 1.], requires_grad=True)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\quaternion.py",
    "ast_data": "FunctionDef name:__sub__ arg:self arg:right arguments arg arg Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "_transform_warp_impl3d",
    "source_code": "def _transform_warp_impl3d(src: Tensor, dst_pix_trans_src_pix: Tensor, dsize_src: tuple[int, int, int], dsize_dst: tuple[int, int, int], grid_mode: str, padding_mode: str, align_corners: bool) -> Tensor:\n    dst_norm_trans_src_norm: Tensor = normalize_homography3d(dst_pix_trans_src_pix, dsize_src, dsize_dst)\n    src_norm_trans_dst_norm = torch.inverse(dst_norm_trans_src_norm)\n    return homography_warp3d(src, src_norm_trans_dst_norm, dsize_dst, grid_mode, padding_mode, align_corners, True)",
    "docstring": "Compute the transform in normalized coordinates and perform the warping.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\transform\\imgwarp.py",
    "ast_data": "FunctionDef name:_transform_warp_impl3d arg:src arg:dst_pix_trans_src_pix arg:dsize_src arg:dsize_dst arg:grid_mode arg:padding_mode arg:align_corners arguments arg arg arg arg arg arg arg Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "tensor_parallel_transformation",
    "source_code": "def tensor_parallel_transformation(exported_program: ExportedProgram, rank: int, world_size: int, device_type: str, parallel_strategies: dict[str, ParallelStyle]) -> ExportedProgram:\n    gm = exported_program.graph_module\n    sig = copy.deepcopy(exported_program.graph_signature)\n    state_dict = copy.copy(exported_program.state_dict)\n    with gm._set_replace_hook(sig.get_replace_hook()):\n        res = _TensorParallelTransformPass(rank, world_size, device_type, state_dict, exported_program.graph_signature, parallel_strategies)(gm)\n        assert res is not None\n        gm = res.graph_module\n    return exported_program._update(gm, sig, state_dict=state_dict)",
    "docstring": "The entry point function to perform graph transformations on an exported program to transform a single-device graph into a tensor parallel graph. .. warning:: This API is experimental and subject to change.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\experimental\\_tp_transform.py",
    "ast_data": "FunctionDef name:tensor_parallel_transformation arg:exported_program arg:rank arg:world_size arg:device_type arg:parallel_strategies arguments arg arg arg arg arg Assign Assign Call Assign Call With Call Call Assign Call Call Compare Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_get_scheme",
    "source_code": "def _get_scheme(self):\n    return 'http'",
    "docstring": "Hook for subclasses like WSGIRequest to implement. Return 'http' by default.",
    "type": "method",
    "file_path": "django\\django\\http\\request.py",
    "ast_data": "FunctionDef name:_get_scheme arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_prune_module_bias",
    "source_code": "def _prune_module_bias(module: nn.Module, mask: Tensor) -> None:\n    original_bias = cast(Tensor, getattr(module, '_bias', module.bias))\n    if original_bias is not None:\n        module.bias = nn.Parameter(original_bias[mask])\n    if hasattr(module, '_bias'):\n        delattr(module, '_bias')",
    "docstring": "Applies mask to given modules bias",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\pruner\\prune_functions.py",
    "ast_data": "FunctionDef name:_prune_module_bias arg:module arg:mask arguments arg arg Assign Call Call If Compare Assign Call If Call Call"
  },
  {
    "library": "uvicorn",
    "name": "shutdown",
    "source_code": "def shutdown(self) -> None:\n    if self.cycle is None or self.cycle.response_complete:\n        self.transport.close()\n    else:\n        self.cycle.keep_alive = False",
    "docstring": "Called by the server to commence a graceful shutdown.",
    "type": "method",
    "file_path": "uvicorn\\uvicorn\\protocols\\http\\httptools_impl.py",
    "ast_data": "FunctionDef name:shutdown arg:self arguments arg If BoolOp Compare Call Assign"
  },
  {
    "library": "pytorch",
    "name": "format_traceback_short",
    "source_code": "def format_traceback_short(tb):\n    return format_frame(traceback.extract_tb(tb)[-1])",
    "docstring": "Format a TracebackType in a short way, printing only the inner-most frame.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_traceback.py",
    "ast_data": "FunctionDef name:format_traceback_short arg:tb arguments arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "SplitHiddenDateTimeWidget",
    "source_code": "class SplitHiddenDateTimeWidget(SplitDateTimeWidget):\n    template_name = 'django/forms/widgets/splithiddendatetime.html'\n\n    def __init__(self, attrs=None, date_format=None, time_format=None, date_attrs=None, time_attrs=None):\n        super().__init__(attrs, date_format, time_format, date_attrs, time_attrs)\n        for widget in self.widgets:\n            widget.input_type = 'hidden'",
    "docstring": "A widget that splits datetime input into two inputs.",
    "type": "class",
    "file_path": "django\\django\\forms\\widgets.py",
    "ast_data": "ClassDef name:SplitHiddenDateTimeWidget Assign FunctionDef name:__init__ arg:self arg:attrs arg:date_format arg:time_format arg:date_attrs arg:time_attrs arguments arg arg arg arg arg arg Call Call For Assign"
  },
  {
    "library": "scipy",
    "name": "read_var_header",
    "source_code": "def read_var_header(self):\n    mdtype, byte_count = self._file_reader.read_full_tag()\n    if not byte_count > 0:\n        raise ValueError('Did not read any bytes')\n    next_pos = self.mat_stream.tell() + byte_count\n    if mdtype == miCOMPRESSED:\n        stream = ZlibInputStream(self.mat_stream, byte_count)\n        self._matrix_reader.set_stream(stream)\n        check_stream_limit = self.verify_compressed_data_integrity\n        mdtype, byte_count = self._matrix_reader.read_full_tag()\n    else:\n        check_stream_limit = False\n        self._matrix_reader.set_stream(self.mat_stream)\n    if not mdtype == miMATRIX:\n        raise TypeError(f'Expecting miMATRIX type here, got {mdtype}')\n    header = self._matrix_reader.read_header(check_stream_limit)\n    return (header, next_pos)",
    "docstring": "Read header, return header, next position Header has to define at least .name and .is_global Parameters ---------- None Returns ------- header : object object that can be passed to self.read_var_array, and that has attributes .name and .is_global next_position : int position in stream of next variable",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\matlab\\_mio5.py",
    "ast_data": "FunctionDef name:read_var_header arg:self arguments arg Assign Call If Compare Raise Call Assign Call If Compare Assign Call Call Assign Assign Call Assign Call If Compare Raise Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_snap",
    "source_code": "def get_snap(self):\n    if mpl.rcParams['path.snap']:\n        return self._snap\n    else:\n        return False",
    "docstring": "Return the snap setting. See for details.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:get_snap arg:self arguments arg If Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "_destroy_test_db",
    "source_code": "def _destroy_test_db(self, test_database_name, verbosity=1):\n    if not self.connection.is_pool:\n        self.connection.settings_dict['USER'] = self.connection.settings_dict['SAVED_USER']\n        self.connection.settings_dict['PASSWORD'] = self.connection.settings_dict['SAVED_PASSWORD']\n    self.connection.close()\n    self.connection.close_pool()\n    parameters = self._get_test_db_params()\n    with self._maindb_connection.cursor() as cursor:\n        if self._test_user_create():\n            if verbosity >= 1:\n                self.log('Destroying test user...')\n            self._destroy_test_user(cursor, parameters, verbosity)\n        if self._test_database_create():\n            if verbosity >= 1:\n                self.log('Destroying test database tables...')\n            self._execute_test_db_destruction(cursor, parameters, verbosity)\n    self._maindb_connection.close()\n    self._maindb_connection.close_pool()",
    "docstring": "Destroy a test database, prompting the user for confirmation if the database already exists. Return the name of the test database created.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\oracle\\creation.py",
    "ast_data": "FunctionDef name:_destroy_test_db arg:self arg:test_database_name arg:verbosity arguments arg arg arg If Assign Assign Call Call Assign Call With Call If Call If Compare Call Call If Call If Compare Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "on_run_end",
    "source_code": "@abc.abstractmethod\ndef on_run_end(self, request):\n    pass",
    "docstring": "Callback invoked on run() calls to the debug-wrapper session. This is a blocking callback. The invocation happens right before the wrapper exits its run() call. Args: request: () callback request object carrying information such as the actual action performed by the session wrapper for the run() call. Returns: An instance of .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\framework.py",
    "ast_data": "FunctionDef name:on_run_end arg:self arg:request arguments arg arg"
  },
  {
    "library": "tensorflow",
    "name": "_check_table_initializer_element_spec",
    "source_code": "def _check_table_initializer_element_spec(element_spec):\n    base_error = 'Datasets used to initialize lookup tables must produce elements in the form (key, value), where the keys and values are scalar tensors. '\n    specific_error = None\n    if len(element_spec) != 2:\n        raise ValueError(base_error + f'However, the given dataset produces {len(element_spec)} components instead of two (key, value) components. Full dataset element spec: {element_spec}.')\n    if not isinstance(element_spec[0], tensor.TensorSpec):\n        raise ValueError(base_error + f'However, the given dataset produces non-Tensor keys of type {type(element_spec[0])}.')\n    if not isinstance(element_spec[1], tensor.TensorSpec):\n        raise ValueError(base_error + f'However, the given dataset produces non-Tensor values of type {type(element_spec[1])}.')\n    if element_spec[0].shape.rank not in (None, 0):\n        raise ValueError(base_error + f'However, the given dataset produces non-scalar key Tensors of rank {element_spec[0].shape.rank}.')\n    if element_spec[1].shape.rank not in (None, 0):\n        raise ValueError(base_error + f'However, the given dataset produces non-scalar value Tensors of rank {element_spec[1].shape.rank}.')",
    "docstring": "Raises an error if the given table initializer element spec is invalid.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\lookup_ops.py",
    "ast_data": "FunctionDef name:_check_table_initializer_element_spec arg:element_spec arguments arg Assign Assign If Compare Call Raise Call Call If Call Raise Call Call If Call Raise Call Call If Compare Raise Call If Compare Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "async_clear_error",
    "source_code": "@tf_export('experimental.async_clear_error')\ndef async_clear_error():\n    context().clear_executor_errors()",
    "docstring": "Clear pending operations and error statuses in async execution. In async execution mode, an error in op/function execution can lead to errors in subsequent ops/functions that are scheduled but not yet executed. Calling this method clears all pending operations and reset the async execution state. Example:",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:async_clear_error arguments Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "step",
    "source_code": "def step(self, closure=None):\n    self._optim.step(closure)",
    "docstring": "Performs a single optimization step (parameter update). Args: closure (Callable): A closure that reevaluates the model and returns the loss. Optional for most optimizers. .. note:: Unless otherwise specified, this function should not modify the `` field of the parameters.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_optim\\api.py",
    "ast_data": "FunctionDef name:step arg:self arg:closure arguments arg arg Call"
  },
  {
    "library": "scipy",
    "name": "f_pts",
    "source_code": "@property\ndef f_pts(self) -> int:\n    return self.mfft // 2 + 1 if self.onesided_fft else self.mfft",
    "docstring": "Number of points along the frequency axis. See Also -------- delta_f: Width of the frequency bins of the STFT. f: Frequencies values of the STFT. mfft: Length of the input for FFT used. ShortTimeFFT: Class this property belongs to.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_short_time_fft.py",
    "ast_data": "FunctionDef name:f_pts arg:self arguments arg Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, *args, **kwargs):\n    self.args = args\n    self.kwargs = kwargs\n    self.ready = False",
    "docstring": "Initialize the flup CGI Server plugin.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\servers.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg arg arg Assign Assign Assign"
  },
  {
    "library": "scikit-learn",
    "name": "__call__",
    "source_code": "@abstractmethod\ndef __call__(self, X, Y=None, eval_gradient=False):\n    pass",
    "docstring": "Evaluate the kernel.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:X arg:Y arg:eval_gradient arguments arg arg arg arg"
  },
  {
    "library": "matplotlib",
    "name": "_find_transformed_bbox",
    "source_code": "def _find_transformed_bbox(self, trans, bbox):\n    grid = np.reshape(np.meshgrid(np.linspace(bbox.x0, bbox.x1, self.nx), np.linspace(bbox.y0, bbox.y1, self.ny)), (2, -1)).T\n    tbbox = Bbox.null()\n    tbbox.update_from_data_xy(trans.transform(grid))\n    return tbbox.expanded(1 + 2 / self.nx, 1 + 2 / self.ny)",
    "docstring": "Compute an approximation of the bounding box obtained by applying *trans* to *bbox*. See `` for details; this method performs similar calculations, but using a different representation of the arguments and return value.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\grid_finder.py",
    "ast_data": "FunctionDef name:_find_transformed_bbox arg:self arg:trans arg:bbox arguments arg arg arg Assign Call Call Call Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_create_act_send_info",
    "source_code": "def _create_act_send_info(self):\n    act_send_info: dict[int, list] = {}\n    out_idx = 0\n    for user in self.node.users:\n        if user.target is operator.getitem:\n            gi_dsts = act_send_info.setdefault(out_idx, [])\n            for gi_user in user.users:\n                dst_rank = self.find_dst_rank(gi_user)\n                if dst_rank is not None:\n                    gi_dsts.append(dst_rank)\n            out_idx += 1\n        else:\n            dsts = act_send_info.setdefault(out_idx, [])\n            dst_rank = self.find_dst_rank(user)\n            if dst_rank is not None:\n                dsts.append(dst_rank)\n    output_node = self._get_output_node()\n    output_vals: tuple[torch.Tensor] = tuple((v.meta['val'] for v in flatten_args(output_node.args)))\n    self._configure_outputs_meta(output_vals)\n    logger.debug('%s Send info: %s', self.log_prefix, act_send_info)\n    return act_send_info",
    "docstring": "Create a dict of send info for activations. The dict is of the form: { output_index: [dst_rank_0, dst_rank_1, ...], ... } where the list of s covers the case where an output value may be consumed by multiple stages.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py",
    "ast_data": "FunctionDef name:_create_act_send_info arg:self arguments arg Assign For If Compare Assign Call For Assign Call If Compare Call Assign Call Assign Call If Compare Call Assign Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_to_values_def",
    "source_code": "def _to_values_def(self, export_scope=None):\n    values_def = control_flow_pb2.ValuesDef()\n    values_def.values.extend([ops.strip_name_scope(v, export_scope) for v in sorted(self._values)])\n    for k, v in self._external_values.items():\n        k = ops.strip_name_scope(k, export_scope)\n        values_def.external_values[k] = ops.strip_name_scope(v.name, export_scope)\n    return values_def",
    "docstring": "Converts the values to a protocol buffer. Args: export_scope: Optional . Name scope to remove. Returns: A protocol buffer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:_to_values_def arg:self arg:export_scope arguments arg arg Assign Call Call Call Call For Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "value",
    "source_code": "def value(self):\n    if self._cached_value is not None:\n        return self._cached_value\n    with ops.colocate_with(None, ignore_existing=True):\n        return self._read_variable_op()",
    "docstring": "A cached operation which reads the value of this variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:value arg:self arguments arg If Compare Return return:yes With Call Return return:yes Call"
  },
  {
    "library": "pygame",
    "name": "set_at",
    "source_code": "def set_at(surf, in_x, in_y, color):\n    surf.set_at((in_x, in_y), color)",
    "docstring": "Set the color of a pixel in a surface",
    "type": "function",
    "file_path": "pygame\\src_py\\draw_py.py",
    "ast_data": "FunctionDef name:set_at arg:surf arg:in_x arg:in_y arg:color arguments arg arg arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "reset_captures",
    "source_code": "def reset_captures(self, tensors, placeholders):\n    self._by_val_external = MutationAwareDict()\n    self._by_val_internal = MutationAwareDict()\n    self._by_val_tracetype = MutationAwareDict()\n    for external, internal in zip(tensors, placeholders):\n        key = id(external)\n        self._by_val_external[key] = external\n        self._by_val_internal[key] = internal\n        self._by_val_tracetype[key] = trace_type.from_value(external)",
    "docstring": "Set the captures with the provided list of captures & placeholder.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\capture\\capture_container.py",
    "ast_data": "FunctionDef name:reset_captures arg:self arg:tensors arg:placeholders arguments arg arg arg Assign Call Assign Call Assign Call For Call Assign Call Assign Assign Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "watch",
    "source_code": "def watch(tape, tensor):\n    pywrap_tfe.TFE_Py_TapeWatch(tape._tape, tensor)",
    "docstring": "Marks this tensor to be watched by the given tape.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\tape.py",
    "ast_data": "FunctionDef name:watch arg:tape arg:tensor arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "create_parser",
    "source_code": "def create_parser():\n    parser = argparse_flags.ArgumentParser(description='saved_model_cli: Command-line interface for SavedModel', conflict_handler='resolve')\n    parser.add_argument('-v', '--version', action='version', version='0.1.0')\n    subparsers = parser.add_subparsers(title='commands', description='valid commands', help='additional help')\n    add_show_subparser(subparsers)\n    add_run_subparser(subparsers)\n    add_scan_subparser(subparsers)\n    add_convert_subparser(subparsers)\n    add_aot_compile_cpu_subparser(subparsers)\n    add_freeze_model_subparser(subparsers)\n    return parser",
    "docstring": "Creates a parser that parse the command line arguments. Returns: A namespace parsed from command line arguments.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\saved_model_cli.py",
    "ast_data": "FunctionDef name:create_parser arguments Assign Call Call Assign Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "has_backward",
    "source_code": "@property\ndef has_backward(self) -> bool:\n    return self._has_backward",
    "docstring": "Returns true if this stage has a backward pass.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py",
    "ast_data": "FunctionDef name:has_backward arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "run",
    "source_code": "def run(self, fetches, feed_dict=None, options=None, run_metadata=None):\n    return self._sess.run(fetches, feed_dict=feed_dict, options=options, run_metadata=run_metadata)",
    "docstring": "Run ops in the monitored session. This method is completely compatible with the method. Args: fetches: Same as . feed_dict: Same as . options: Same as . run_metadata: Same as . Returns: Same as .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\monitored_session.py",
    "ast_data": "FunctionDef name:run arg:self arg:fetches arg:feed_dict arg:options arg:run_metadata arguments arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "transpose",
    "source_code": "@final\ndef transpose(self, *args, **kwargs) -> Self:\n    nv.validate_transpose(args, kwargs)\n    return self",
    "docstring": "Return the transpose, which is by definition self. Returns ------- %(klass)s",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\base.py",
    "ast_data": "FunctionDef name:transpose arg:self arguments arg arg arg Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "_adapted_rsampling",
    "source_code": "def _adapted_rsampling(shape: Union[Tuple[int, ...], torch.Size], dist: torch.distributions.Distribution, same_on_batch: Optional[bool]=False) -> Tensor:\n    if isinstance(shape, tuple):\n        shape = torch.Size(shape)\n    if same_on_batch:\n        rsample_size = torch.Size((1, *shape[1:]))\n        rsample = dist.rsample(rsample_size)\n        return rsample.repeat(shape[0], *[1] * (len(rsample.shape) - 1))\n    return dist.rsample(shape)",
    "docstring": "Sample from a uniform reparameterized sampling function that accepts 'same_on_batch'. If same_on_batch is True, all values generated will be exactly same given a batch_size (shape[0]). By default, same_on_batch is set to False.",
    "type": "function",
    "file_path": "kornia\\kornia\\augmentation\\utils\\helpers.py",
    "ast_data": "FunctionDef name:_adapted_rsampling arg:shape arg:dist arg:same_on_batch arguments arg arg arg If Call Assign Call If Assign Call Assign Call Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_icon",
    "source_code": "def _icon(self, name):\n    path_regular = cbook._get_data_path('images', name)\n    path_large = path_regular.with_name(path_regular.name.replace('.png', '_large.png'))\n    filename = str(path_large if path_large.exists() else path_regular)\n    pm = QtGui.QPixmap(filename)\n    pm.setDevicePixelRatio(self.devicePixelRatioF() or 1)\n    if self.palette().color(self.backgroundRole()).value() < 128:\n        icon_color = self.palette().color(self.foregroundRole())\n        mask = pm.createMaskFromColor(QtGui.QColor('black'), QtCore.Qt.MaskMode.MaskOutColor)\n        pm.fill(icon_color)\n        pm.setMask(mask)\n    return QtGui.QIcon(pm)",
    "docstring": "Construct a from an image file *name*, including the extension and relative to Matplotlib's \"images\" data directory.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_qt.py",
    "ast_data": "FunctionDef name:_icon arg:self arg:name arguments arg arg Assign Call Assign Call Call Assign Call Call Assign Call Call BoolOp Call If Compare Call Call Call Call Assign Call Call Call Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "infer_symbolic_types_single_pass",
    "source_code": "def infer_symbolic_types_single_pass(traced):\n    r = Refine(traced)\n    r.refine()\n    mgu = unify_eq(r.constraints)\n    substitute_all_types(traced.graph, mgu)",
    "docstring": "Calls our symbolic inferencer once.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\unify_refinements.py",
    "ast_data": "FunctionDef name:infer_symbolic_types_single_pass arg:traced arguments arg Assign Call Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "load_file_system_library",
    "source_code": "@deprecation.deprecated(date=None, instructions='Use `tf.load_library` instead.')\n@tf_export(v1=['load_file_system_library'])\ndef load_file_system_library(library_filename):\n    py_tf.TF_LoadLibrary(library_filename)",
    "docstring": "Loads a TensorFlow plugin, containing file system implementation. Pass to a platform-specific mechanism for dynamically loading a library. The rules for determining the exact location of the library are platform-specific and are not documented here. Args: library_filename: Path to the plugin. Relative or absolute filesystem path to a dynamic library file. Returns: None. Raises: RuntimeError: when unable to load the library.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\load_library.py",
    "ast_data": "FunctionDef name:load_file_system_library arg:library_filename arguments arg Call Call Call"
  },
  {
    "library": "kornia",
    "name": "Jl",
    "source_code": "@staticmethod\ndef Jl(vec: Tensor) -> Tensor:\n    return So3.left_jacobian(vec)",
    "docstring": "Alias for left jacobian. Args: vec: the input point of shape :math:.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\so3.py",
    "ast_data": "FunctionDef name:Jl arg:vec arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "symmetrical_epipolar_distance",
    "source_code": "def symmetrical_epipolar_distance(pts1: Tensor, pts2: Tensor, Fm: Tensor, squared: bool=True, eps: float=1e-08) -> Tensor:\n    if not isinstance(Fm, Tensor):\n        raise TypeError(f'Fm type is not a torch.Tensor. Got {type(Fm)}')\n    if len(Fm.shape) < 3 or not Fm.shape[-2:] == (3, 3):\n        raise ValueError(f'Fm must be a (*, 3, 3) tensor. Got {Fm.shape}')\n    if pts1.shape[-1] == 2:\n        pts1 = convert_points_to_homogeneous(pts1)\n    if pts2.shape[-1] == 2:\n        pts2 = convert_points_to_homogeneous(pts2)\n    F_t: Tensor = Fm.transpose(dim0=-2, dim1=-1)\n    line1_in_2: Tensor = pts1 @ F_t\n    line2_in_1: Tensor = pts2 @ Fm\n    numerator: Tensor = (pts2 * line1_in_2).sum(dim=-1).pow(2)\n    denominator_inv: Tensor = 1.0 / line1_in_2[..., :2].norm(2, dim=-1).pow(2) + 1.0 / line2_in_1[..., :2].norm(2, dim=-1).pow(2)\n    out: Tensor = numerator * denominator_inv\n    if squared:\n        return out\n    return (out + eps).sqrt()",
    "docstring": "Return symmetrical epipolar distance for correspondences given the fundamental matrix. Args: pts1: correspondences from the left images with shape :math:. If they are not homogeneous, converted automatically. pts2: correspondences from the right images with shape :math:. If they are not homogeneous, converted automatically. Fm: Fundamental matrices with shape :math:. Called Fm to avoid ambiguity with torch.nn.functional. squared: if True (default), the squared distance is returned. eps: Small constant for safe sqrt. Returns: the computed Symmetrical distance with shape :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\epipolar\\_metrics.py",
    "ast_data": "FunctionDef name:symmetrical_epipolar_distance arg:pts1 arg:pts2 arg:Fm arg:squared arg:eps arguments arg arg arg arg arg If Call Raise Call Call If BoolOp Compare Call Compare Raise Call If Compare Assign Call If Compare Assign Call Call Call Call Call Call Call Call If Return return:yes Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "index",
    "source_code": "def index(self, value: Any, /) -> int:\n    raise NotImplementedError",
    "docstring": "Return the first index of .",
    "type": "method",
    "file_path": "numpy\\numpy\\_typing\\_nested_sequence.py",
    "ast_data": "FunctionDef name:index arguments arg arg Raise"
  },
  {
    "library": "pytorch",
    "name": "_annealing_cos",
    "source_code": "@staticmethod\ndef _annealing_cos(start, end, pct):\n    cos_out = math.cos(math.pi * pct) + 1\n    return end + (start - end) / 2.0 * cos_out",
    "docstring": "Cosine anneal from to as pct goes from 0.0 to 1.0.",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\lr_scheduler.py",
    "ast_data": "FunctionDef name:_annealing_cos arg:start arg:end arg:pct arguments arg arg arg Assign Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "_key_func_0",
    "source_code": "def _key_func_0(entry: _IndexEntryTarget) -> tuple[bool, str | Literal[False]]:\n    main, uri = entry\n    return (not main, uri)",
    "docstring": "Sort the index entries for same keyword.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\environment\\adapters\\indexentries.py",
    "ast_data": "FunctionDef name:_key_func_0 arg:entry arguments arg Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "should_save_summary",
    "source_code": "@property\ndef should_save_summary(self):\n    return self._strategy.extended.should_save_summary",
    "docstring": "Whether to save summaries.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distribute_coordinator_utils.py",
    "ast_data": "FunctionDef name:should_save_summary arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_check_multi_class",
    "source_code": "def _check_multi_class(multi_class, solver, n_classes):\n    if multi_class == 'auto':\n        if solver in ('liblinear',):\n            multi_class = 'ovr'\n        elif n_classes > 2:\n            multi_class = 'multinomial'\n        else:\n            multi_class = 'ovr'\n    if multi_class == 'multinomial' and solver in ('liblinear',):\n        raise ValueError('Solver %s does not support a multinomial backend.' % solver)\n    return multi_class",
    "docstring": "Computes the multi class type, either \"multinomial\" or \"ovr\". For > 2 and a solver that supports it, returns \"multinomial\". For all other cases, in particular binary classification, return \"ovr\".",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_logistic.py",
    "ast_data": "FunctionDef name:_check_multi_class arg:multi_class arg:solver arg:n_classes arguments arg arg arg If Compare If Compare Assign If Compare Assign Assign If BoolOp Compare Compare Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "embedding_tables",
    "source_code": "@property\ndef embedding_tables(self) -> Dict[tpu_embedding_v2_utils.TableConfig, tf_variables.Variable]:\n    self._maybe_build()\n    return {stacked_table_name: self._variables[stacked_table_name]['parameters'] for stacked_table_name in self._stacked_table_to_tables}",
    "docstring": "Returns a dict of embedding tables, keyed by .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3.py",
    "ast_data": "FunctionDef name:embedding_tables arg:self arguments arg Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_master_target",
    "source_code": "def _get_master_target(self):\n    if not self._cluster_spec or self._task_type == _TaskType.EVALUATOR:\n        return ''\n    if not self._task_type:\n        if _TaskType.CHIEF in self._cluster_spec.jobs:\n            task_type = _TaskType.CHIEF\n            task_id = 0\n        else:\n            assert _TaskType.WORKER in self._cluster_spec.jobs\n            task_type = _TaskType.WORKER\n            task_id = 0\n    else:\n        task_type = self._task_type\n        task_id = self._task_id\n    prefix = ''\n    if self._rpc_layer:\n        prefix = self._rpc_layer + '://'\n    return prefix + self._cluster_spec.job_tasks(task_type)[task_id or 0]",
    "docstring": "Return the master target for a task.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_coordinator.py",
    "ast_data": "FunctionDef name:_get_master_target arg:self arguments arg If BoolOp Compare Return return:yes If If Compare Assign Assign Compare Assign Assign Assign Assign Assign If Assign Return return:yes Call BoolOp"
  },
  {
    "library": "tensorflow",
    "name": "GlobalMaxPooling3D",
    "source_code": "class GlobalMaxPooling3D(GlobalPooling3D):\n\n    def call(self, inputs):\n        if self.data_format == 'channels_last':\n            return backend.max(inputs, axis=[1, 2, 3], keepdims=self.keepdims)\n        else:\n            return backend.max(inputs, axis=[2, 3, 4], keepdims=self.keepdims)",
    "docstring": "Global Max pooling operation for 3D data. Args: data_format: A string, one of (default) or . The ordering of the dimensions in the inputs. corresponds to inputs with shape while corresponds to inputs with shape . It defaults to the value found in your Keras config file at . If you never set it, then it will be \"channels_last\". keepdims: A boolean, whether to keep the spatial dimensions or not. If is (default), the rank of the tensor is reduced for spatial dimensions. If is , the spatial dimensions are retained with length 1. The behavior is the same as for or . Input shape: - If : 5D tensor with shape: - If : 5D tensor with shape: Output shape: - If =False: 2D tensor with shape . - If =True: - If : 5D tensor with shape - If : 5D tensor with shape",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\pooling.py",
    "ast_data": "ClassDef name:GlobalMaxPooling3D FunctionDef name:call arg:self arg:inputs arguments arg arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "iscode",
    "source_code": "def iscode(object):\n    return isinstance(object, types.CodeType)",
    "docstring": "Return true if the object is a code object. Code objects provide these attributes: co_argcount number of arguments (not including * or ** args) co_code string of raw compiled bytecode co_consts tuple of constants used in the bytecode co_filename name of file in which this code object was created co_firstlineno number of first line in Python source code co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg co_lnotab encoded mapping of line numbers to bytecode indices co_name name with which this code object was defined co_names tuple of names of local variables co_nlocals number of local variables co_stacksize virtual machine stack space required co_varnames tuple of names of arguments and local variables",
    "type": "function",
    "file_path": "numpy\\numpy\\_utils\\_inspect.py",
    "ast_data": "FunctionDef name:iscode arg:object arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "add_outgoing_edge",
    "source_code": "def add_outgoing_edge(self, edge):\n    self._outgoing_edges.append(edge)",
    "docstring": "Adds an outgoing edge to the Convertible's list of edges. Args: edge: The outgoing edge (its source should be 'self').",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "FunctionDef name:add_outgoing_edge arg:self arg:edge arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_should_act_as_resource_variable",
    "source_code": "def _should_act_as_resource_variable(self):\n    pass",
    "docstring": "Pass resource_variable_ops.is_resource_variable check.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\autocast_variable.py",
    "ast_data": "FunctionDef name:_should_act_as_resource_variable arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, node_def, op, message, *args):\n    super(UnavailableError, self).__init__(node_def, op, message, UNAVAILABLE, *args)",
    "docstring": "Creates an .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:node_def arg:op arg:message arguments arg arg arg arg arg Call Call"
  },
  {
    "library": "kornia",
    "name": "vgg13",
    "source_code": "def vgg13(*, weights: Optional[Any]=None, **kwargs: Any) -> VGG:\n    return _vgg('B', False, weights, **kwargs)",
    "docstring": "VGG-13 from __. Args: weights (:class:, optional): The pretrained weights to use. See :class: below for more details, and possible values. By default, no pre-trained weights are used. progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True. **kwargs: parameters passed to the `source code `_ for more details about this class. .. autoclass:: torchvision.models.VGG13_Weights :members:",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\dedode\\vgg.py",
    "ast_data": "FunctionDef name:vgg13 arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "state_dict",
    "source_code": "def state_dict(self) -> dict[str, Any]:\n    groups: list[dict[str, Any]] = [dict(filter(lambda key_value: key_value[0] not in KEYS_NOT_IN_STATE_DICT, mg.items())) for mg in self.groups]\n    return {'state': self.state, 'groups': groups}",
    "docstring": "Returns the state of the optimizer as a :class:. It contains: * state - current state of the sparsification. * groups - a list containing all sparsity configuration groups with the key 'tensor_fqn' specifying the path to the sparsified tensor within a model TODO: Need a clean way of loading the state of the \"prepared\" module",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\pruning\\sparsifier\\base_sparsifier.py",
    "ast_data": "FunctionDef name:state_dict arg:self arguments arg Call Call arguments arg Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "call",
    "source_code": "def call(self, inputs, state, **kwargs):\n    return self._call_wrapped_cell(inputs, state, cell_call_fn=self.cell.call, **kwargs)",
    "docstring": "Runs the RNN cell step computation. When is being used, we assume that the wrapper object has been built, and therefore the wrapped cells has been built via its method and its method can be used directly. This allows to use the wrapped cell and the non-wrapped cell equivalently when using and . Args: inputs: A tensor with wrapped cell's input. state: A tensor or tuple of tensors with wrapped cell's state. **kwargs: Additional arguments passed to the wrapped cell's . Returns: A pair containing: - Output: A tensor with cell's output. - New state: A tensor or tuple of tensors with new wrapped cell's state.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\rnn_cell_wrapper_v2.py",
    "ast_data": "FunctionDef name:call arg:self arg:inputs arg:state arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "to_numpy",
    "source_code": "def to_numpy(self, dtype: npt.DTypeLike | None=None, copy: bool=False, na_value: object=lib.no_default) -> np.ndarray:\n    hasna = self._hasna\n    dtype, na_value = to_numpy_dtype_inference(self, dtype, na_value, hasna)\n    if dtype is None:\n        dtype = object\n    if hasna:\n        if dtype != object and (not is_string_dtype(dtype)) and (na_value is libmissing.NA):\n            raise ValueError(f\"cannot convert to '{dtype}'-dtype NumPy array with missing values. Specify an appropriate 'na_value' for this dtype.\")\n        with warnings.catch_warnings():\n            warnings.filterwarnings('ignore', category=RuntimeWarning)\n            data = self._data.astype(dtype)\n        data[self._mask] = na_value\n    else:\n        with warnings.catch_warnings():\n            warnings.filterwarnings('ignore', category=RuntimeWarning)\n            data = self._data.astype(dtype, copy=copy)\n    return data",
    "docstring": "Convert to a NumPy Array. By default converts to an object-dtype NumPy array. Specify the and keywords to customize the conversion. Parameters ---------- dtype : dtype, default object The numpy dtype to convert to. copy : bool, default False Whether to ensure that the returned value is a not a view on the array. Note that `dtypeNAna_value` instead >>> a.to_numpy(dtype=\"bool\", na_value=False) array([ True, False, False])",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\masked.py",
    "ast_data": "FunctionDef name:to_numpy arg:self arg:dtype arg:copy arg:na_value arguments arg arg arg arg Assign Assign Call If Compare Assign If If BoolOp Compare Call Compare Raise Call With Call Call Assign Call Assign With Call Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "add_step_closure",
    "source_code": "def add_step_closure(closure, args=(), run_async=False):\n    devctx = get_device_context()\n    closures_type = 'async_step_closures' if run_async else 'step_closures'\n    step_closures = getattr(devctx, closures_type, None)\n    if step_closures is None:\n        step_closures = []\n        setattr(devctx, closures_type, step_closures)\n    step_closures.append(lambda a=args: closure(*a))",
    "docstring": "Adds a closure to the list of the ones to be run at the end of the step. Many times during model training there is the need to print/report (print to console, post to tensorboard, etc...) information which require the content of intermediary tensors to be inspected. Inspecting different tensors content in different points of the model code requires many executions and typically causes performance issues. Adding a step closure will ensure that it will be run after the barrier, when all the live tensors will be already materialized to device data. Live tensors which will include the ones captured by the closure arguments. So using will ensure a single execution will be performed, even when multiple closures are queued, requiring multiple tensors to be inspected. Step closures will be run sequentially in the order they have been queued. Note that even though using this API the execution will be optimized, it is advised to throttle the printing/reporting events once every N steps. Args: closure (callable): The function to be called. args (tuple): The arguments to be passed to the closure. run_async: If True, run the closure asynchronously.",
    "type": "function",
    "file_path": "pytorch\\torch\\_lazy\\closure.py",
    "ast_data": "FunctionDef name:add_step_closure arg:closure arg:args arg:run_async arguments arg arg arg Assign Call Assign Assign Call If Compare Assign Call Call arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_convert_scipy_sparse_tensor",
    "source_code": "def _convert_scipy_sparse_tensor(value, expected_input):\n    if issparse is not None and issparse(value):\n        if backend.is_sparse(expected_input):\n            sparse_coo = value.tocoo()\n            row, col = (sparse_coo.row, sparse_coo.col)\n            data, shape = (sparse_coo.data, sparse_coo.shape)\n            indices = np.concatenate((np.expand_dims(row, 1), np.expand_dims(col, 1)), 1)\n            return sparse_tensor.SparseTensor(indices, data, shape)\n        else:\n            if ops.executing_eagerly_outside_functions():\n                raise ValueError('A SciPy sparse matrix was passed to a model that expects dense inputs. Please densify your inputs first, such as by calling `x.toarray().')\n            return value.toarray()\n    else:\n        return value",
    "docstring": "Handle scipy sparse tensor conversions. This method takes a value 'value' and returns the proper conversion. If value is a scipy sparse tensor and the expected input is a dense tensor, we densify 'value'. If value is a scipy sparse tensor and the expected input is a TF SparseTensor, we convert 'value' to a SparseTensor. If 'value' is not a scipy sparse tensor, or scipy is not imported, we pass it through unchanged. Args: value: An object that may be a scipy sparse tensor expected_input: The expected input placeholder. Returns: The possibly-converted 'value'.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py",
    "ast_data": "FunctionDef name:_convert_scipy_sparse_tensor arg:value arg:expected_input arguments arg arg If BoolOp Compare Call If Call Assign Call Assign Assign Assign Call Call Call Return return:yes Call If Call Raise Call Return return:yes Call Return return:yes"
  },
  {
    "library": "django",
    "name": "json",
    "source_code": "@property\ndef json(self):\n    return self.ogr.json",
    "docstring": "Return GeoJSON representation of this Geometry.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:json arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "sca",
    "source_code": "def sca(ax: Axes) -> None:\n    fig = ax.get_figure(root=False)\n    figure(fig)\n    fig.sca(ax)",
    "docstring": "Set the current Axes to *ax* and the current Figure to the parent of *ax*.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:sca arg:ax arguments arg Assign Call Call Call"
  },
  {
    "library": "pandas",
    "name": "is_view",
    "source_code": "@property\ndef is_view(self) -> bool:\n    raise AbstractMethodError(self)",
    "docstring": "return a boolean if I am possibly a view",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\blocks.py",
    "ast_data": "FunctionDef name:is_view arg:self arguments arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "tensor_always_has_static_shape",
    "source_code": "def tensor_always_has_static_shape(tensor: Union[torch.Tensor, Any], is_tensor: bool, tensor_source: Source) -> tuple[bool, Optional[TensorStaticReason]]:\n    from .source import is_from_unspecialized_param_buffer_source\n    if (tensor_source.guard_source().is_specialized_nn_module() or tensor_source.guard_source().is_unspecialized_builtin_nn_module()) and config.force_nn_module_property_static_shapes:\n        return (True, TensorStaticReason.NN_MODULE_PROPERTY)\n    if (type(tensor) is torch.nn.Parameter or is_from_unspecialized_param_buffer_source(tensor_source)) and config.force_parameter_static_shapes:\n        return (True, TensorStaticReason.PARAMETER)\n    if not is_tensor:\n        return (True, TensorStaticReason.NOT_TENSOR)\n    return (False, None)",
    "docstring": "Given a tensor, source, and is_tensor flag, determine if a shape should be static. Args: tensor - the real tensor to evaluate, parameters force a static shape. is_tensor - internal dynamo check, essentially \"is_tensor\": target_cls is TensorVariable, tensors not in a TensorVariable for whatever reason are forced static. Returns a tuple, where the first element is the bool of whether or not this tensor should have a static shape. The second element is a TensorStaticReason, useful for passing to tensor_static_reason_to_message if needed.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\utils.py",
    "ast_data": "FunctionDef name:tensor_always_has_static_shape arg:tensor arg:is_tensor arg:tensor_source arguments arg arg arg If BoolOp BoolOp Call Call Call Call Return return:yes If BoolOp BoolOp Compare Call Call Return return:yes If Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "matshow",
    "source_code": "def matshow(A: ArrayLike, fignum: None | int=None, **kwargs) -> AxesImage:\n    A = np.asanyarray(A)\n    if fignum == 0:\n        ax = gca()\n    else:\n        if fignum is not None and fignum_exists(fignum):\n            figsize = None\n        else:\n            figsize = figaspect(A)\n        fig = figure(fignum, figsize=figsize)\n        ax = fig.add_axes((0.15, 0.09, 0.775, 0.775))\n    im = ax.matshow(A, **kwargs)\n    sci(im)\n    return im",
    "docstring": "Display a 2D array as a matrix in a new figure window. The origin is set at the upper left hand corner. The indexing is `~matplotlib.image.AxesImage~matplotlib.axes.Axes.imshow` arguments",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:matshow arg:A arg:fignum arguments arg arg arg Assign Call If Compare Assign Call If BoolOp Compare Call Assign Assign Call Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_concat_homogeneous_fastpath",
    "source_code": "def _concat_homogeneous_fastpath(mgrs_indexers, shape: Shape, first_dtype: np.dtype) -> Block:\n    if all((not indexers for _, indexers in mgrs_indexers)):\n        arrs = [mgr.blocks[0].values.T for mgr, _ in mgrs_indexers]\n        arr = np.concatenate(arrs).T\n        bp = libinternals.BlockPlacement(slice(shape[0]))\n        nb = new_block_2d(arr, bp)\n        return nb\n    arr = np.empty(shape, dtype=first_dtype)\n    if first_dtype == np.float64:\n        take_func = libalgos.take_2d_axis0_float64_float64\n    else:\n        take_func = libalgos.take_2d_axis0_float32_float32\n    start = 0\n    for mgr, indexers in mgrs_indexers:\n        mgr_len = mgr.shape[1]\n        end = start + mgr_len\n        if 0 in indexers:\n            take_func(mgr.blocks[0].values, indexers[0], arr[:, start:end])\n        else:\n            arr[:, start:end] = mgr.blocks[0].values\n        start += mgr_len\n    bp = libinternals.BlockPlacement(slice(shape[0]))\n    nb = new_block_2d(arr, bp)\n    return nb",
    "docstring": "With single-Block managers with homogeneous dtypes (that can already hold nan), we avoid [...]",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\internals\\concat.py",
    "ast_data": "FunctionDef name:_concat_homogeneous_fastpath arg:mgrs_indexers arg:shape arg:first_dtype arguments arg arg arg If Call Assign Assign Call Assign Call Call Assign Call Return return:yes Assign Call If Compare Assign Assign Assign For Assign Assign If Compare Call Assign Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_highlight_value",
    "source_code": "def _highlight_value(data: DataFrame | Series, op: str, props: str) -> np.ndarray:\n    value = getattr(data, op)(skipna=True)\n    if isinstance(data, DataFrame):\n        value = getattr(value, op)(skipna=True)\n    cond = data == value\n    cond = cond.where(pd.notna(cond), False)\n    return np.where(cond, props, '')",
    "docstring": "Return an array of css strings based on the condition of values matching an op.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\formats\\style.py",
    "ast_data": "FunctionDef name:_highlight_value arg:data arg:op arg:props arguments arg arg arg Assign Call Call If Call Assign Call Call Assign Compare Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "graph",
    "source_code": "def graph(self):\n    return self.__tx.output.graph",
    "docstring": "Retrieve the partially constructed FX graph that would be passed to the user compiler after compilation.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\comptime.py",
    "ast_data": "FunctionDef name:graph arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "on_predict_end",
    "source_code": "def on_predict_end(self, logs=None):\n    logs = self._process_logs(logs)\n    for callback in self.callbacks:\n        callback.on_predict_end(logs)",
    "docstring": "Calls the methods of its callbacks. Args: logs: Dict. Currently no data is passed to this argument for this method but that may change in the future.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:on_predict_end arg:self arg:logs arguments arg arg Assign Call For Call"
  },
  {
    "library": "pytorch",
    "name": "codegen_inputs",
    "source_code": "def codegen_inputs(self):\n    bound_vars = OrderedSet[sympy.Symbol]()\n    graph_inputs = self.get_graph_inputs()\n    inputs = [(k, v) for k, v in graph_inputs.items() if isinstance(v, sympy.Symbol)] + [(k, v) for k, v in graph_inputs.items() if not isinstance(v, sympy.Symbol)]\n    for name, value in inputs:\n        self.codegen_input_symbol_assignment(name, value, bound_vars)\n\n    def _verify_input_symbol_assignment(value: ir.TensorBox, bound_vars: OrderedSet[sympy.Symbol]):\n        for expr in chain.from_iterable([value.get_size(), value.get_stride()]):\n            if not isinstance(expr, Expr) or isinstance(expr, sympy.Symbol):\n                continue\n            undefined_symbols = [sym for sym in expr.free_symbols if sym not in bound_vars]\n            if len(undefined_symbols) > 0:\n                raise AssertionError(f'For {expr}, expected {undefined_symbols} to have been codegen-ed.')\n    for _, value in inputs:\n        if not isinstance(value, ir.TensorBox):\n            continue\n        _verify_input_symbol_assignment(value, bound_vars)",
    "docstring": "Assign all symbolic shapes to locals",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\wrapper.py",
    "ast_data": "FunctionDef name:codegen_inputs arg:self arguments arg Assign Call Assign Call Assign Call Call Call Call For Call FunctionDef name:_verify_input_symbol_assignment arg:value arg:bound_vars arguments arg arg For Call Call Call If BoolOp Call Call Assign Compare If Compare Call Raise Call For If Call Call"
  },
  {
    "library": "pytorch",
    "name": "SigmoidTransform",
    "source_code": "class SigmoidTransform(Transform):\n    domain = constraints.real\n    codomain = constraints.unit_interval\n    bijective = True\n    sign = +1\n\n    def __eq__(self, other):\n        return isinstance(other, SigmoidTransform)\n\n    def _call(self, x):\n        return _clipped_sigmoid(x)\n\n    def _inverse(self, y):\n        finfo = torch.finfo(y.dtype)\n        y = y.clamp(min=finfo.tiny, max=1.0 - finfo.eps)\n        return y.log() - (-y).log1p()\n\n    def log_abs_det_jacobian(self, x, y):\n        return -F.softplus(-x) - F.softplus(x)",
    "docstring": "Transform via the mapping :math: and :math:.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributions\\transforms.py",
    "ast_data": "ClassDef name:SigmoidTransform Assign Assign Assign Assign FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes Call FunctionDef name:_call arg:self arg:x arguments arg arg Return return:yes Call FunctionDef name:_inverse arg:self arg:y arguments arg arg Assign Call Assign Call Return return:yes Call Call FunctionDef name:log_abs_det_jacobian arg:self arg:x arg:y arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "generate_return_type_definition_and_registrations",
    "source_code": "def generate_return_type_definition_and_registrations(overloads: Sequence[PythonSignatureNativeFunctionPair]) -> tuple[list[str], list[str]]:\n    typenames: dict[str, str] = {}\n    definitions: list[str] = []\n    registrations: list[str] = []\n    for overload in overloads:\n        fieldnames = structseq_fieldnames(overload.function.func.returns)\n        if not fieldnames:\n            continue\n        fields = ', '.join((f'{{\"{fn}\", \"\"}}' for fn in fieldnames))\n        name = cpp.name(overload.function.func)\n        tn_key = gen_structseq_typename_key(overload.function)\n        typename = typenames.get(tn_key)\n        if typename is None:\n            typename = f'{name}NamedTuple{('' if not definitions else len(definitions))}'\n            typenames[tn_key] = typename\n            definitions.append(f'PyTypeObject* get_{name}_structseq() {{\\n    static PyStructSequence_Field NamedTuple_fields[] = {{ {fields},  {{nullptr}} }};\\n    static PyTypeObject {typename};\\n    static bool is_initialized = false;\\n    static PyStructSequence_Desc desc = {{ \"torch.return_types.{name}\", nullptr, NamedTuple_fields, {len(fieldnames)} }};\\n    if (!is_initialized) {{\\n        PyStructSequence_InitType(&{typename}, &desc);\\n        {typename}.tp_repr = (reprfunc)torch::utils::returned_structseq_repr;\\n        is_initialized = true;\\n    }}\\n    return &{typename};\\n}}\\n')\n            registrations.append(f'addReturnType(return_types_module, \"{name}\", generated::get_{name}_structseq());')\n    return (definitions, registrations)",
    "docstring": "Generate block of function in to initialize and return named tuple for a native function which returns named tuple and registration invocations in same file.",
    "type": "function",
    "file_path": "pytorch\\tools\\autograd\\gen_python_functions.py",
    "ast_data": "FunctionDef name:generate_return_type_definition_and_registrations arg:overloads arguments arg For Assign Call If Assign Call Assign Call Assign Call Assign Call If Compare Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_path",
    "source_code": "def get_path(self):\n    if self._invalidy or self._invalidx:\n        self.recache()\n    return self._path",
    "docstring": "Return the associated with this line.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:get_path arg:self arguments arg If BoolOp Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_check_precision_positivity",
    "source_code": "def _check_precision_positivity(precision, covariance_type):\n    if np.any(np.less_equal(precision, 0.0)):\n        raise ValueError(\"'%s precision' should be positive\" % covariance_type)",
    "docstring": "Check a precision vector is positive-definite.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\mixture\\_gaussian_mixture.py",
    "ast_data": "FunctionDef name:_check_precision_positivity arg:precision arg:covariance_type arguments arg arg If Call Call Raise Call"
  },
  {
    "library": "sphinx",
    "name": "TranslationProgressTotaliser",
    "source_code": "class TranslationProgressTotaliser(SphinxTransform):\n    default_priority = 25\n\n    def apply(self, **kwargs: Any) -> None:\n        from sphinx.builders.gettext import MessageCatalogBuilder\n        if isinstance(self.app.builder, MessageCatalogBuilder):\n            return\n        total = translated = 0\n        for node in NodeMatcher(nodes.Element, translated=Any).findall(self.document):\n            total += 1\n            if node['translated']:\n                translated += 1\n        self.document['translation_progress'] = {'total': total, 'translated': translated}",
    "docstring": "Calculate the number of translated and untranslated nodes.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\transforms\\i18n.py",
    "ast_data": "ClassDef name:TranslationProgressTotaliser Assign FunctionDef name:apply arg:self arguments arg arg If Call Return return:no Assign For Call Call If Assign"
  },
  {
    "library": "scrapy",
    "name": "getpriority",
    "source_code": "def getpriority(self, name: _SettingsKeyT) -> int | None:\n    if name not in self:\n        return None\n    return self.attributes[name].priority",
    "docstring": "Return the current numerical priority value of a setting, or `` does not exist. :param name: the setting name :type name: str",
    "type": "method",
    "file_path": "scrapy\\scrapy\\settings\\__init__.py",
    "ast_data": "FunctionDef name:getpriority arg:self arg:name arguments arg arg If Compare Return return:no Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_fill_limit_area_1d",
    "source_code": "def _fill_limit_area_1d(mask: npt.NDArray[np.bool_], limit_area: Literal['outside', 'inside']) -> None:\n    neg_mask = ~mask\n    first = neg_mask.argmax()\n    last = len(neg_mask) - neg_mask[::-1].argmax() - 1\n    if limit_area == 'inside':\n        mask[:first] = False\n        mask[last + 1:] = False\n    elif limit_area == 'outside':\n        mask[first + 1:last] = False",
    "docstring": "Prepare 1d mask for ffill/bfill with limit_area. Caller is responsible for checking at least one value of mask is False. When called, mask will no longer faithfully represent when the corresponding are NA or not. Parameters ---------- mask : np.ndarray[bool, ndim=1] Mask representing NA values when filling. limit_area : { \"outside\", \"inside\" } Whether to limit filling to outside or inside the outer most non-NA value.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\missing.py",
    "ast_data": "FunctionDef name:_fill_limit_area_1d arg:mask arg:limit_area arguments arg arg Assign Assign Call Assign Call Call If Compare Assign Assign If Compare Assign"
  },
  {
    "library": "pandas",
    "name": "any_extension_types",
    "source_code": "@property\ndef any_extension_types(self) -> bool:\n    return any((block.is_extension for block in self.blocks))",
    "docstring": "Whether any of the blocks in this manager are extension blocks",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:any_extension_types arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "supercedes",
    "source_code": "def supercedes(a, b):\n    if len(a) < len(b):\n        return not a and len(b) == 1 and isvariadic(b[-1])\n    elif len(a) == len(b):\n        return all(map(issubclass, a, b))\n    else:\n        p1 = 0\n        p2 = 0\n        while p1 < len(a) and p2 < len(b):\n            cur_a = a[p1]\n            cur_b = b[p2]\n            if not (isvariadic(cur_a) or isvariadic(cur_b)):\n                if not issubclass(cur_a, cur_b):\n                    return False\n                p1 += 1\n                p2 += 1\n            elif isvariadic(cur_a):\n                assert p1 == len(a) - 1\n                return p2 == len(b) - 1 and issubclass(cur_a, cur_b)\n            elif isvariadic(cur_b):\n                assert p2 == len(b) - 1\n                if not issubclass(cur_a, cur_b):\n                    return False\n                p1 += 1\n        return p2 == len(b) - 1 and p1 == len(a)",
    "docstring": "A is consistent and strictly more specific than B",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\unification\\multipledispatch\\conflict.py",
    "ast_data": "FunctionDef name:supercedes arg:a arg:b arguments arg arg If Compare Call Call Return return:yes BoolOp Compare Call Call If Compare Call Call Return return:yes Call Call Assign Assign While BoolOp Compare Call Compare Call Assign Assign If BoolOp Call Call If Call Return return:yes If Call Compare Call Return return:yes BoolOp Compare Call Call If Call Compare Call If Call Return return:yes Return return:yes BoolOp Compare Call Compare Call"
  },
  {
    "library": "pandas",
    "name": "nbytes",
    "source_code": "@property\ndef nbytes(self) -> int:\n    raise AbstractMethodError(self)",
    "docstring": "The number of bytes needed to store this object in memory. See Also -------- ExtensionArray.shape: Return a tuple of the array dimensions. ExtensionArray.size: The number of elements in the array. Examples -------- >>> pd.array([1, 2, 3]).nbytes 27",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\base.py",
    "ast_data": "FunctionDef name:nbytes arg:self arguments arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_find_dtype_iterable",
    "source_code": "def _find_dtype_iterable(iterable: Iterable[Any], dtype: Optional[dtypes.DType]) -> Optional[dtypes.DType]:\n    if dtype is not None:\n        return dtype\n    for x in iterable:\n        dtype = _find_dtype(x, dtype)\n    return dtype",
    "docstring": "Find the preferred dtype of a list of objects. This will go over the iterable, and use the first object with a preferred dtype. The dtype passed has highest priority if it is not None. Args: iterable: an iterable with things that might have a dtype. dtype: an overriding dtype, or None. Returns: an optional dtype.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:_find_dtype_iterable arg:iterable arg:dtype arguments arg arg If Compare Return return:yes For Assign Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "_getargs",
    "source_code": "def _getargs(func):\n    import types\n    if isinstance(func, types.MethodType):\n        func = func.__func__\n    co = func.__code__\n    return co.co_varnames[:co.co_argcount]",
    "docstring": "Return the names of all static arguments to the given function.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\_cptools.py",
    "ast_data": "FunctionDef name:_getargs arg:func arguments arg If Call Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "pids",
    "source_code": "@abc.abstractmethod\ndef pids(self) -> dict[int, int]:\n    raise NotImplementedError",
    "docstring": "Return pids of processes mapped by their respective local_ranks.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\multiprocessing\\api.py",
    "ast_data": "FunctionDef name:pids arg:self arguments arg Raise"
  },
  {
    "library": "pytorch",
    "name": "caching_allocator_delete",
    "source_code": "def caching_allocator_delete(mem_ptr):\n    torch._C._cuda_cudaCachingAllocator_raw_delete(mem_ptr)",
    "docstring": "Delete memory allocated using the CUDA memory allocator. Memory allocated with :func:. is freed here. The associated device and stream are tracked inside the allocator. Args: mem_ptr (int): memory address to be freed by the allocator. .. note:: See :ref: for more details about GPU memory management.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\memory.py",
    "ast_data": "FunctionDef name:caching_allocator_delete arg:mem_ptr arguments arg Call"
  },
  {
    "library": "django",
    "name": "cursor",
    "source_code": "@async_unsafe\ndef cursor(self):\n    return self._cursor()",
    "docstring": "Create a cursor, opening a connection if necessary.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\base.py",
    "ast_data": "FunctionDef name:cursor arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "max",
    "source_code": "@property\ndef max(self):\n    return np.max(self.get_points(), axis=0)",
    "docstring": "The top-right corner of the bounding box.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:max arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_unwrap_simple_call",
    "source_code": "def _unwrap_simple_call(self, node: ast.expr) -> ast.expr:\n    if isinstance(node, ast.Call) and len(node.args) == 1 and (not node.keywords):\n        return self._unwrap_simple_call(node.args[0])\n    return node",
    "docstring": "Unwraps a function call that takes a single unnamed parameter.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\api\\generator2\\extractor\\extractor.py",
    "ast_data": "FunctionDef name:_unwrap_simple_call arg:self arg:node arguments arg arg If BoolOp Call Compare Call Return return:yes Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "__getattr__",
    "source_code": "def __getattr__(self, attr):\n    if attr.startswith('__'):\n        raise AttributeError(self, attr)\n    return getattr(self._iterator, attr)",
    "docstring": "Return the underlying byte stream attribute value.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\encoding.py",
    "ast_data": "FunctionDef name:__getattr__ arg:self arg:attr arguments arg arg If Call Raise Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "pick",
    "source_code": "def pick(self, mouseevent):\n    from .backend_bases import PickEvent\n    if self.pickable():\n        picker = self.get_picker()\n        if callable(picker):\n            inside, prop = picker(self, mouseevent)\n        else:\n            inside, prop = self.contains(mouseevent)\n        if inside:\n            PickEvent('pick_event', self.get_figure(root=True).canvas, mouseevent, self, **prop)._process()\n    for a in self.get_children():\n        ax = getattr(a, 'axes', None)\n        if isinstance(a, mpl.figure.SubFigure) or mouseevent.inaxes is None or ax is None or (mouseevent.inaxes == ax):\n            a.pick(mouseevent)",
    "docstring": "Process a pick event. Each child artist will fire a pick event if *mouseevent* is over the artist and the artist has picker set. See Also -------- .Artist.set_picker, .Artist.get_picker, .Artist.pickable",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:pick arg:self arg:mouseevent arguments arg arg If Call Assign Call If Call Assign Call Assign Call If Call Call Call For Call Assign Call If BoolOp Call Compare Compare Compare Call"
  },
  {
    "library": "matplotlib",
    "name": "current",
    "source_code": "def current(self):\n    return max(self._axes, key=self._axes.__getitem__, default=None)",
    "docstring": "Return the active Axes, or None if the stack is empty.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:current arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_git_diff_stdout",
    "source_code": "def get_git_diff_stdout() -> str:\n    proc = subprocess.run(['git', 'diff', 'origin/main', 'HEAD'], capture_output=True, check=True, text=True)\n    return proc.stdout",
    "docstring": "Run git diff with appropriate arguments and capture stdout as a str.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\build_tools\\lint\\diff_parser.py",
    "ast_data": "FunctionDef name:get_git_diff_stdout arguments Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "import_module",
    "source_code": "@abstractmethod\ndef import_module(self, module_name: str) -> ModuleType:\n    pass",
    "docstring": "Import from this environment. The contract is the same as for importlib.import_module.",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\importer.py",
    "ast_data": "FunctionDef name:import_module arg:self arg:module_name arguments arg arg"
  },
  {
    "library": "numpy",
    "name": "TypeDescription",
    "source_code": "class TypeDescription:\n\n    def __init__(self, type, f=None, in_=None, out=None, astype=None, cfunc_alias=None, dispatch=None):\n        self.type = type\n        self.func_data = f\n        if astype is None:\n            astype = {}\n        self.astype_dict = astype\n        if in_ is not None:\n            in_ = in_.replace('P', type)\n        self.in_ = in_\n        if out is not None:\n            out = out.replace('P', type)\n        self.out = out\n        self.cfunc_alias = cfunc_alias\n        self.dispatch = dispatch\n\n    def finish_signature(self, nin, nout):\n        if self.in_ is None:\n            self.in_ = self.type * nin\n        assert len(self.in_) == nin\n        if self.out is None:\n            self.out = self.type * nout\n        assert len(self.out) == nout\n        self.astype = self.astype_dict.get(self.type, None)",
    "docstring": "Type signature for a ufunc. Attributes ---------- type : str Character representing the nominal type. func_data : str or None or FullTypeDescr or FuncNameSuffix, optional The string representing the expression to insert into the data array, if any. in_ : str or None, optional The typecode(s) of the inputs. out : str or None, optional The typecode(s) of the outputs. astype : dict or None, optional If astype['x'] is 'y', uses PyUFunc_x_x_As_y_y/PyUFunc_xx_x_As_yy_y instead of PyUFunc_x_x/PyUFunc_xx_x. cfunc_alias : str or none, optional Appended to inner loop C function name, e.g., FLOAT_{cfunc_alias}. See make_arrays. NOTE: it doesn't support 'astype' dispatch : str or None, optional Dispatch-able source name without its extension '.dispatch.c' that contains the definition of ufunc, dispatched at runtime depending on the specified targets of the dispatch-able source. NOTE: it doesn't support 'astype'",
    "type": "class",
    "file_path": "numpy\\numpy\\_core\\code_generators\\generate_umath.py",
    "ast_data": "ClassDef name:TypeDescription FunctionDef name:__init__ arg:self arg:type arg:f arg:in_ arg:out arg:astype arg:cfunc_alias arg:dispatch arguments arg arg arg arg arg arg arg arg Assign Assign If Compare Assign Assign If Compare Assign Call Assign If Compare Assign Call Assign Assign Assign FunctionDef name:finish_signature arg:self arg:nin arg:nout arguments arg arg arg If Compare Assign Compare Call If Compare Assign Compare Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "apply_shuffle_settings",
    "source_code": "def apply_shuffle_settings(datapipe: DataPipe, shuffle: Optional[bool]=None) -> DataPipe:\n    if shuffle is None:\n        return datapipe\n    graph = traverse_dps(datapipe)\n    all_pipes = get_all_graph_pipes(graph)\n    shufflers = [pipe for pipe in all_pipes if _is_shuffle_datapipe(pipe)]\n    if not shufflers and shuffle:\n        warnings.warn('`shuffle=True` was set, but the datapipe does not contain a `Shuffler`. Adding one at the end. Be aware that the default buffer size might not be sufficient for your task.')\n        datapipe = datapipe.shuffle()\n        shufflers = [datapipe]\n    for shuffler in shufflers:\n        shuffler.set_shuffle(shuffle)\n    return datapipe",
    "docstring": "Traverse the graph of `DataPipe` and no-op to the graph)",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\data\\graph_settings.py",
    "ast_data": "FunctionDef name:apply_shuffle_settings arg:datapipe arg:shuffle arguments arg arg If Compare Return return:yes Assign Call Assign Call Assign Call If BoolOp Call Assign Call Assign For Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "extract_object_name",
    "source_code": "def extract_object_name(key):\n    search_key = '/' + OBJECT_ATTRIBUTES_NAME\n    return key[:key.index(search_key)]",
    "docstring": "Substrings the checkpoint key to the start of \"/.ATTRIBUTES\".",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\trackable_utils.py",
    "ast_data": "FunctionDef name:extract_object_name arg:key arguments arg Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "graph",
    "source_code": "@tf_export('summary.graph', v1=[])\ndef graph(graph_data):\n    if not context.executing_eagerly():\n        raise ValueError('graph() cannot be invoked inside a graph context.')\n    writer = _summary_state.writer\n    if writer is None:\n        return constant_op.constant(False)\n    with ops.device('cpu:0'):\n        if not should_record_summaries():\n            return constant_op.constant(False)\n        if isinstance(graph_data, (ops.Graph, graph_pb2.GraphDef)):\n            tensor = ops.convert_to_tensor(_serialize_graph(graph_data), dtypes.string)\n        else:\n            raise ValueError(f\"Argument 'graph_data' is not tf.Graph or tf.compat.v1.GraphDef. Received graph_data={graph_data} of type {type(graph_data).__name__}.\")\n        gen_summary_ops.write_graph_summary(writer._resource, 0, tensor)\n        return constant_op.constant(True)",
    "docstring": "Writes a TensorFlow graph summary. Write an instance of or as summary only in an eager mode. Please prefer to use the trace APIs (, , and ) when using which can automatically collect and record graphs from executions. Usage Example: Args: graph_data: The TensorFlow graph to write, as a or a . Returns: True on success, or False if no summary was written because no default summary writer was available. Raises: ValueError: summary API is invoked in a graph mode.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "FunctionDef name:graph arg:graph_data arguments arg If Call Raise Call Assign If Compare Return return:yes Call With Call If Call Return return:yes Call If Call Assign Call Call Raise Call Call Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "clear_deferred_loading",
    "source_code": "def clear_deferred_loading(self):\n    self.deferred_loading = (frozenset(), True)",
    "docstring": "Remove any fields from the deferred loading set.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\query.py",
    "ast_data": "FunctionDef name:clear_deferred_loading arg:self arguments arg Assign Call"
  },
  {
    "library": "scipy",
    "name": "_secant",
    "source_code": "def _secant(xvals, fvals):\n    x0, x1 = xvals[:2]\n    f0, f1 = fvals[:2]\n    if f0 == f1:\n        return np.nan\n    if np.abs(f1) > np.abs(f0):\n        x2 = (-f0 / f1 * x1 + x0) / (1 - f0 / f1)\n    else:\n        x2 = (-f1 / f0 * x0 + x1) / (1 - f1 / f0)\n    return x2",
    "docstring": "Perform a secant step, taking a little care",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_zeros_py.py",
    "ast_data": "FunctionDef name:_secant arg:xvals arg:fvals arguments arg arg Assign Assign If Compare Return return:yes If Compare Call Call Assign Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "item_extra_kwargs",
    "source_code": "def item_extra_kwargs(self, item):\n    return {}",
    "docstring": "Return an extra keyword arguments dictionary that is used with the call of the feed generator.",
    "type": "method",
    "file_path": "django\\django\\contrib\\syndication\\views.py",
    "ast_data": "FunctionDef name:item_extra_kwargs arg:self arg:item arguments arg arg Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "loop",
    "source_code": "@staticmethod\ndef loop(coord, timer_interval_secs, target, args=None, kwargs=None):\n    looper = LooperThread(coord, timer_interval_secs, target=target, args=args, kwargs=kwargs)\n    looper.start()\n    return looper",
    "docstring": "Start a LooperThread that calls a function periodically. If is None the thread calls repeatedly. Otherwise is called every seconds. The thread terminates when a stop of the coordinator is requested. Args: coord: A Coordinator. timer_interval_secs: Number. Time boundaries at which to call . target: A callable object. args: Optional arguments to pass to when calling it. kwargs: Optional keyword arguments to pass to when calling it. Returns: The started thread.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\coordinator.py",
    "ast_data": "FunctionDef name:loop arg:coord arg:timer_interval_secs arg:target arg:args arg:kwargs arguments arg arg arg arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "EmbeddingPipeliningContext",
    "source_code": "class EmbeddingPipeliningContext(control_flow_ops.ControlFlowContext):\n\n    def __init__(self, mode: str, enable: bool):\n        super().__init__()\n        self._name = 'EmbeddingPipelinigContext'\n        self._mode = attr_value_pb2.AttrValue(s=compat.as_bytes(mode))\n        self._enable = enable\n        recording_summaries = summary_ops_v2.is_recording_summaries()\n        if not isinstance(recording_summaries, bool):\n            recording_summaries = False\n        if enable and recording_summaries:\n            logging.info('Summary recording detected, disabling pipelining.')\n            self._mode = attr_value_pb2.AttrValue(s=compat.as_bytes(mode + _PIPELINE_MODEL_SEQUENTIAL))\n\n    def to_control_flow_context_def(self, context_def: Any, export_scope: Any=None):\n        super().to_control_flow_context_def(context_def, export_scope)\n\n    def AddOp(self, op: ops.Operation):\n        if self._enable:\n            op._set_attr(_PIPELINE_ATTRIBUTE, self._mode)\n        if self._outer_context:\n            self._outer_context.AddOp(op)",
    "docstring": "Sets the _embedding_pipelining attribute on all ops created in the scope.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3.py",
    "ast_data": "ClassDef name:EmbeddingPipeliningContext FunctionDef name:__init__ arg:self arg:mode arg:enable arguments arg arg arg Call Call Assign Assign Call Call Assign Assign Call If Call Assign If BoolOp Call Assign Call Call FunctionDef name:to_control_flow_context_def arg:self arg:context_def arg:export_scope arguments arg arg arg Call Call FunctionDef name:AddOp arg:self arg:op arguments arg arg If Call If Call"
  },
  {
    "library": "scipy",
    "name": "set_backend",
    "source_code": "def set_backend(backend, coerce=False, only=False):\n    backend = _backend_from_arg(backend)\n    return ua.set_backend(backend, coerce=coerce, only=only)",
    "docstring": "Context manager to set the backend within a fixed scope. Upon entering the ``, then a BackendNotImplemented error will be raised immediately. Ignoring any lower priority backends. Examples -------- >>> import scipy.fft as fft >>> with fft.set_backend('scipy', only=True): ... fft.fft([1]) # Always calls the scipy implementation array([1.+0.j])",
    "type": "function",
    "file_path": "scipy\\scipy\\fft\\_backend.py",
    "ast_data": "FunctionDef name:set_backend arg:backend arg:coerce arg:only arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_list_display_links",
    "source_code": "def get_list_display_links(self, request, list_display):\n    if self.list_display_links or self.list_display_links is None or (not list_display):\n        return self.list_display_links\n    else:\n        return list(list_display)[:1]",
    "docstring": "Return a sequence containing the fields to be displayed as links on the changelist. The list_display parameter is the list of fields returned by get_list_display().",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:get_list_display_links arg:self arg:request arg:list_display arguments arg arg arg If BoolOp Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "is_xccl_available",
    "source_code": "def is_xccl_available() -> bool:\n    return _XCCL_AVAILABLE",
    "docstring": "Check if the XCCL backend is available.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:is_xccl_available arguments Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_flash_version",
    "source_code": "def _get_flash_version() -> str:\n    return '2.5.7'",
    "docstring": "This returns the closest matching tag for the flash attention backend",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\attention\\__init__.py",
    "ast_data": "FunctionDef name:_get_flash_version arguments Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "log",
    "source_code": "def log(self, request: Request, spider: Spider) -> None:\n    warn('Calling BaseDupeFilter.log() is deprecated.', ScrapyDeprecationWarning, stacklevel=2)",
    "docstring": "Log that a request has been filtered",
    "type": "method",
    "file_path": "scrapy\\scrapy\\dupefilters.py",
    "ast_data": "FunctionDef name:log arg:self arg:request arg:spider arguments arg arg arg Call"
  },
  {
    "library": "matplotlib",
    "name": "AffineDeltaTransform",
    "source_code": "class AffineDeltaTransform(Affine2DBase):\n    pass_through = True\n\n    def __init__(self, transform, **kwargs):\n        super().__init__(**kwargs)\n        self._base_transform = transform\n        self.set_children(transform)\n    __str__ = _make_str_method('_base_transform')\n\n    def get_matrix(self):\n        if self._invalid:\n            self._mtx = self._base_transform.get_matrix().copy()\n            self._mtx[:2, -1] = 0\n        return self._mtx",
    "docstring": "A transform wrapper for transforming displacements between pairs of points. This class is intended to be used to transform displacements (\"position deltas\") between pairs of points (e.g., as the `.Collection`. This is implemented by forcing the offset components of the transform matrix to zero. This class is experimental as of 3.3, and the API may change.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "ClassDef name:AffineDeltaTransform Assign FunctionDef name:__init__ arg:self arg:transform arguments arg arg arg Call Call Assign Call Assign Call FunctionDef name:get_matrix arg:self arguments arg If Assign Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "write_object_proto",
    "source_code": "def write_object_proto(var, proto, options):\n    if options.experimental_variable_policy._expand_distributed_variables():\n        for var in var.values:\n            var_proto = proto.variable.experimental_distributed_variable_components.add()\n            var_proto.name = var.name.split(':')[0]\n            var_proto.device = var.device",
    "docstring": "Update a SavedObject proto for the caller. If a DistributedVariable object supports this method, it will be called when saving with a pre-built proto representing the object, plus an instance of . This method is then free to modify that proto instance. with or synchronization optionally write out information about their components to the field of a (depending on the variable policy). Args: var: The DistributedVariable object. proto: A pre-built proto for this object. It is assumed this will be a instance. options: A instance.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values_util.py",
    "ast_data": "FunctionDef name:write_object_proto arg:var arg:proto arg:options arguments arg arg arg If Call For Assign Call Assign Call Assign"
  },
  {
    "library": "kornia",
    "name": "compute_projection_matrix",
    "source_code": "def compute_projection_matrix(self, pinhole_src: PinholeCamera) -> DepthWarper:\n    if not isinstance(self._pinhole_dst, PinholeCamera):\n        raise TypeError(f'Member self._pinhole_dst expected to be of class PinholeCamera. Got {type(self._pinhole_dst)}')\n    if not isinstance(pinhole_src, PinholeCamera):\n        raise TypeError(f'Argument pinhole_src expected to be of class PinholeCamera. Got {type(pinhole_src)}')\n    dst_trans_src: Tensor = compose_transformations(self._pinhole_dst.extrinsics, inverse_transformation(pinhole_src.extrinsics))\n    dst_proj_src: Tensor = torch.matmul(self._pinhole_dst.intrinsics, dst_trans_src)\n    self._pinhole_src = pinhole_src\n    self._dst_proj_src = dst_proj_src\n    return self",
    "docstring": "Compute the projection matrix from the source to destination frame.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\depth.py",
    "ast_data": "FunctionDef name:compute_projection_matrix arg:self arg:pinhole_src arguments arg arg If Call Raise Call Call If Call Raise Call Call Call Call Call Assign Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "conv_1x1_bn",
    "source_code": "def conv_1x1_bn(inp: int, oup: int) -> Module:\n    return nn.Sequential(nn.Conv2d(inp, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup), nn.SiLU())",
    "docstring": "Apply 1x1 Convolution with Batch Norm.",
    "type": "function",
    "file_path": "kornia\\kornia\\contrib\\vit_mobile.py",
    "ast_data": "FunctionDef name:conv_1x1_bn arg:inp arg:oup arguments arg arg Return return:yes Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "memory_reserved",
    "source_code": "def memory_reserved(device: _device_t=None) -> int:\n    return memory_stats(device=device).get('reserved_bytes.all.current', 0)",
    "docstring": "Return the current GPU memory managed by the caching allocator in bytes for a given device. Args: device (torch.device or int or str, optional): selected device. Returns statistic for the current device, given by :func:, if :attr: is `` (default).",
    "type": "function",
    "file_path": "pytorch\\torch\\xpu\\memory.py",
    "ast_data": "FunctionDef name:memory_reserved arg:device arguments arg Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "getdoc",
    "source_code": "def getdoc(obj: Any, attrgetter: _AttrGetter=safe_getattr, allow_inherited: bool=False, cls: Any=None, name: str | None=None) -> str | None:\n    if cls and name and is_classmethod_like(obj, cls, name):\n        for basecls in getmro(cls):\n            meth = basecls.__dict__.get(name)\n            if not meth:\n                continue\n            if hasattr(meth, '__func__') or is_classmethod_descriptor(meth):\n                doc: str | None = getdoc(getattr(meth, '__func__', meth))\n                if doc is not None or not allow_inherited:\n                    return doc\n    doc = _getdoc_internal(obj)\n    if ispartial(obj) and doc == obj.__class__.__doc__:\n        return getdoc(obj.func)\n    elif doc is None and allow_inherited:\n        if cls and name:\n            for basecls in getmro(cls):\n                meth = safe_getattr(basecls, name, None)\n                if meth is not None:\n                    doc = _getdoc_internal(meth)\n                    if doc is not None:\n                        break\n            if doc is None:\n                for basecls in getmro(cls):\n                    meth = safe_getattr(basecls, name, None)\n                    if meth is not None:\n                        doc = inspect.getdoc(meth)\n                        if doc is not None:\n                            break\n        if doc is None:\n            doc = inspect.getdoc(obj)\n    return doc",
    "docstring": "Get the docstring for the object. This tries to obtain the docstring for some kind of objects additionally: * partial functions * inherited docstring * inherited decorated methods",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\inspect.py",
    "ast_data": "FunctionDef name:getdoc arg:obj arg:attrgetter arg:allow_inherited arg:cls arg:name arguments arg arg arg arg arg If BoolOp Call For Call Assign Call If If BoolOp Call Call Call Call If BoolOp Compare Return return:yes Assign Call If BoolOp Call Compare Return return:yes Call If BoolOp Compare If BoolOp For Call Assign Call If Compare Assign Call If Compare If Compare For Call Assign Call If Compare Assign Call If Compare If Compare Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "gen_bucketize_strategy",
    "source_code": "@register_op_strategy(aten.bucketize.Tensor)\ndef gen_bucketize_strategy(op_schema: OpSchema) -> StrategyType:\n    mesh = op_schema.get_mesh_from_args()\n    input_strategy = op_schema.args_schema[0]\n    bucketize_strategy = OpStrategy([])\n    assert isinstance(input_strategy, OpStrategy)\n    for arg_strategy in input_strategy.strategies:\n        arg_spec = DTensorSpec(mesh, arg_strategy.output_spec.placements)\n        replica_spec = DTensorSpec(mesh, tuple([Replicate()] * mesh.ndim))\n        bucketize_strategy.strategies.append(PlacementStrategy(output_specs=arg_spec, input_specs=(arg_spec, replica_spec)))\n    return bucketize_strategy",
    "docstring": "Just propagate input sharding, but expect replicated for boundaries input.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_ops\\_tensor_ops.py",
    "ast_data": "FunctionDef name:gen_bucketize_strategy arg:op_schema arguments arg Assign Call Assign Assign Call Call For Assign Call Assign Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_BatchMatMul",
    "source_code": "@ops.RegisterGradient('BatchMatMul')\ndef _BatchMatMul(op: ops.Operation, grad):\n    x = op.inputs[0]\n    y = op.inputs[1]\n    adj_x = op.get_attr('adj_x')\n    adj_y = op.get_attr('adj_y')\n    if not adj_x:\n        if not adj_y:\n            grad_x = math_ops.matmul(grad, y, adjoint_a=False, adjoint_b=True)\n            grad_y = math_ops.matmul(x, grad, adjoint_a=True, adjoint_b=False)\n        else:\n            grad_x = math_ops.matmul(grad, y, adjoint_a=False, adjoint_b=False)\n            grad_y = math_ops.matmul(grad, x, adjoint_a=True, adjoint_b=False)\n    elif not adj_y:\n        grad_x = math_ops.matmul(y, grad, adjoint_a=False, adjoint_b=True)\n        grad_y = math_ops.matmul(x, grad, adjoint_a=False, adjoint_b=False)\n    else:\n        grad_x = math_ops.matmul(y, grad, adjoint_a=True, adjoint_b=True)\n        grad_y = math_ops.matmul(grad, x, adjoint_a=True, adjoint_b=True)\n    return (grad_x, grad_y)",
    "docstring": "Returns the gradient of x and y given the gradient of x * y.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_BatchMatMul arg:op arg:grad arguments arg arg Assign Assign Assign Call Assign Call If If Assign Call Assign Call Assign Call Assign Call If Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "static_dispatch",
    "source_code": "def static_dispatch(sig: CppSignature | ExecutorchCppSignature, f: NativeFunction, backend_indices: list[BackendIndex]) -> str:\n    if len(backend_indices) == 0 or f.manual_kernel_registration:\n        return ''\n    backends = [b for b in backend_indices if b.has_kernel(f)]\n    static_block = None\n    if len(backends) == 1:\n        backend_metadata = backends[0].get_kernel(f)\n        if backend_metadata:\n            args = ', '.join((a.name for a in sig.arguments()))\n            static_block = f'return ::{backend_metadata.cpp_namespace}::{backend_metadata.kernel}({args});'\n    else:\n        static_block = f'\\nET_ASSERT_UNREACHABLE_MSG(\"The number of native function(s) binding to {f.func.name} is {len(backends)}.\");\\n    '\n    return f'\\n// {f.namespace}::{f.func}\\nTORCH_API inline {_sig_decl_wrapper(sig)} {{\\n    {static_block}\\n}}\\n'",
    "docstring": "For a given , find out the corresponding native function and dispatch to it. If zero or more than one native function exists, error out. A simplified version of register_dispatch_key.py Arguments: sig: A CppSignature for this native function we want to use. f: NativeFunction to generate static dispatch. backend_indices: All available backends. Return: C++ code to call backend-specific functions, e.g., \"return at::native::add(self, other, scale);\"",
    "type": "function",
    "file_path": "pytorch\\torchgen\\gen_executorch.py",
    "ast_data": "FunctionDef name:static_dispatch arg:sig arg:f arg:backend_indices arguments arg arg arg If BoolOp Compare Call Return return:yes Assign Call Assign If Compare Call Assign Call If Assign Call Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "from_backend",
    "source_code": "@classmethod\ndef from_backend(cls, run_id: str, store: Store, backend: RendezvousBackend, min_nodes: int, max_nodes: int, local_addr: Optional[str]=None, timeout: Optional[RendezvousTimeout]=None, keep_alive_interval: int=5, keep_alive_max_attempt: int=3):\n    node = cls._node_desc_generator.generate(local_addr)\n    settings = RendezvousSettings(run_id, min_nodes, max_nodes, timeout or RendezvousTimeout(), keep_alive_interval=timedelta(seconds=keep_alive_interval), keep_alive_max_attempt=keep_alive_max_attempt)\n    state_holder = _BackendRendezvousStateHolder(backend, settings)\n    return cls(node, settings, backend.name, store, state_holder)",
    "docstring": "Create a new :py:class:. Args: run_id: The run id of the rendezvous. store: The C10d store to return as part of the rendezvous. backend: The backend to use to hold the rendezvous state. min_nodes: The minimum number of nodes to admit to the rendezvous. max_nodes: The maximum number of nodes to admit to the rendezvous. local_addr: The local node address. timeout: The timeout configuration of the rendezvous. keep_alive_interval: The amount of time a node waits before sending a heartbeat to keep it alive in the rendezvous. keep_alive_max_attempt: The maximum number of failed heartbeat attempts after which a node is considered dead.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\dynamic_rendezvous.py",
    "ast_data": "FunctionDef name:from_backend arg:cls arg:run_id arg:store arg:backend arg:min_nodes arg:max_nodes arg:local_addr arg:timeout arg:keep_alive_interval arg:keep_alive_max_attempt arguments arg arg arg arg arg arg arg arg arg arg Assign Call Assign Call BoolOp Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "y",
    "source_code": "@property\ndef y(self) -> Tensor:\n    return self.keypoints[:, 1]",
    "docstring": "Accesses the y coordinates of keypoints (along image height).",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\disk\\structs.py",
    "ast_data": "FunctionDef name:y arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_validate_tp_mesh_dim",
    "source_code": "def _validate_tp_mesh_dim(device_mesh: DeviceMesh) -> None:\n    if device_mesh.ndim > 1:\n        raise ValueError(f'Tensor Parallel only accepts a 1D DeviceMesh, but found {device_mesh.ndim}D!If you have a 2-D or N-D device_mesh, consider passing in device_mesh[\"tp\"]')\n    root_mesh = _mesh_resources.get_root_mesh(device_mesh)\n    if root_mesh and root_mesh != device_mesh:\n        tp_mesh_dim_in_root = _mesh_resources.get_root_mesh_dim(device_mesh)\n        if tp_mesh_dim_in_root != root_mesh.ndim - 1:\n            raise RuntimeError(f'Found TP device_mesh on the {tp_mesh_dim_in_root} dimension of its parent mesh.', 'Currently we only support intranode TP and TP needs to be the innermost dimension on its parent mesh.')",
    "docstring": "Check whether TP mesh dimension is valid or not. Args: device_mesh (:class:): The where we perform Tensor Parallelism on. Return: if the mesh dimension is valid, otherwise.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\parallel\\_utils.py",
    "ast_data": "FunctionDef name:_validate_tp_mesh_dim arg:device_mesh arguments arg If Compare Raise Call Assign Call If BoolOp Compare Assign Call If Compare Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, input):\n    if isinstance(input, tuple) and len(input) == 3:\n        self.parts = input\n    else:\n        with open(input, 'rb') as file:\n            data = self._read(file)\n        self.parts = self._split(data)\n    self.decrypted = self._decrypt(self.parts[1], 'eexec')\n    self._abbr = {'RD': 'RD', 'ND': 'ND', 'NP': 'NP'}\n    self._parse()",
    "docstring": "Initialize a Type-1 font. Parameters ---------- input : str or 3-tuple Either a pfb file name, or a 3-tuple of already-decoded Type-1 font .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_type1font.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:input arguments arg arg If BoolOp Call Compare Call Assign With Call Assign Call Assign Call Assign Call Assign Call"
  },
  {
    "library": "scipy",
    "name": "issparse",
    "source_code": "def issparse(x):\n    return isinstance(x, SparseABC)",
    "docstring": "Is of a sparse array or sparse matrix type? Parameters ---------- x object to check for being a sparse array or sparse matrix Returns ------- bool True if is a sparse array or a sparse matrix, False otherwise Notes ----- Use to check between an array or matrix. Use to check the sparse format, e.g. . Examples -------- >>> import numpy as np >>> from scipy.sparse import csr_array, csr_matrix, issparse >>> issparse(csr_matrix([[5]])) True >>> issparse(csr_array([[5]])) True >>> issparse(np.array([[5]])) False >>> issparse(5) False",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_sparse.py",
    "ast_data": "FunctionDef name:issparse arg:x arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, keras_model, trackable_obj=None):\n    super(TFLiteKerasModelConverterV2, self).__init__()\n    self._keras_model = keras_model\n    self._trackable_obj = trackable_obj\n    self.experimental_lower_to_saved_model = True",
    "docstring": "Constructor for TFLiteConverter. Args: keras_model: tf.Keras.Model. trackable_obj: tf.AutoTrackable object associated with . A reference to this object needs to be maintained so that Variables do not get garbage collected since functions have a weak reference to Variables. This is only required when the tf.AutoTrackable object is not maintained by the user (e.g. ).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:keras_model arg:trackable_obj arguments arg arg arg Call Call Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "determine_observer_insert_points",
    "source_code": "def determine_observer_insert_points(self, prepared_fx_model: GraphModule) -> dict[str, dict[str, Any]]:\n    obs_ctr = ModelReportObserver\n    obs_fqn_to_info: dict[str, dict[str, Any]] = {}\n    for fqn, module in prepared_fx_model.named_modules():\n        if self._is_supported(module, insert=True):\n            targeted_node = self._get_targeting_node(prepared_fx_model, fqn)\n            pre_obs_fqn = fqn + '.' + self.DEFAULT_PRE_OBSERVER_NAME\n            obs_fqn_to_info[pre_obs_fqn] = {DETECTOR_TARGET_NODE_KEY: targeted_node, DETECTOR_OBS_TO_INSERT_KEY: obs_ctr(), DETECTOR_IS_POST_OBS_KEY: False, DETECTOR_OBS_ARGS_KEY: targeted_node.args}\n            post_obs_fqn = fqn + '.' + self.DEFAULT_POST_OBSERVER_NAME\n            obs_fqn_to_info[post_obs_fqn] = {DETECTOR_TARGET_NODE_KEY: targeted_node, DETECTOR_OBS_TO_INSERT_KEY: obs_ctr(), DETECTOR_IS_POST_OBS_KEY: True, DETECTOR_OBS_ARGS_KEY: (targeted_node,)}\n    return obs_fqn_to_info",
    "docstring": "Determines where observers need to be inserted for the Dynamic vs Static detector. For this detector, we want to place observers on either side of linear layers in the model. Currently inserts observers for: linear layers Args: prepared_fx_model (GraphModule): The prepared Fx GraphModule Returns a Dict mapping from unique observer fqns (where we want to insert them) to a Dict with: key \"target_node\" -> the node we are trying to observe with this observer (torch.fx.node.Node) key \"observer_to_insert\" -> the observer we wish to insert (ObserverBase) key \"is_post_observer\" -> True if this is meant to be a post-observer for target_node, False if pre-observer key \"observer_args\" -> The arguments that are meant to be passed into the observer",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\detector.py",
    "ast_data": "FunctionDef name:determine_observer_insert_points arg:self arg:prepared_fx_model arguments arg arg Assign For Call If Call Assign Call Assign Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "modify_model_interface",
    "source_code": "def modify_model_interface(input_file, output_file, input_type, output_type):\n    input_type_int = _parse_type_to_int(input_type, 'input_type')\n    output_type_int = _parse_type_to_int(output_type, 'output_type')\n    status = _pywrap_modify_model_interface.modify_model_interface(input_file, output_file, input_type_int, output_type_int)\n    if status != 0:\n        raise RuntimeError('Error occurred when trying to modify the model input type from float to {input_type} and output type from float to {output_type}.'.format(input_type=input_type, output_type=output_type))",
    "docstring": "Modify a quantized model's interface (input/output) from float to integer. Args: input_file: Full path name to the input tflite file. output_file: Full path name to the output tflite file. input_type: Final input interface type. output_type: Final output interface type. Raises: RuntimeError: If the modification of the model interface was unsuccessful. ValueError: If the input_type or output_type is unsupported.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\tools\\optimize\\python\\modify_model_interface_lib.py",
    "ast_data": "FunctionDef name:modify_model_interface arg:input_file arg:output_file arg:input_type arg:output_type arguments arg arg arg arg Assign Call Assign Call Assign Call If Compare Raise Call Call"
  },
  {
    "library": "matplotlib",
    "name": "on_changed",
    "source_code": "def on_changed(self, func):\n    return self._observers.connect('changed', lambda val: func(val))",
    "docstring": "Connect *func* as callback function to changes of the slider value. Parameters ---------- func : callable Function to call when slider is changed. The function must accept a 2-tuple of floats as its argument. Returns ------- int Connection id (which can be used to disconnect *func*).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:on_changed arg:self arg:func arguments arg arg Return return:yes Call arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_specs_for_flat_tensors",
    "source_code": "def _specs_for_flat_tensors(element_spec):\n    if isinstance(element_spec, StructuredTensor.Spec):\n        specs = []\n        for _, field_spec in sorted(element_spec._field_specs.items(), key=lambda t: t[0]):\n            specs.extend(_specs_for_flat_tensors(field_spec))\n    elif isinstance(element_spec, type_spec.BatchableTypeSpec) and element_spec.__class__._flat_tensor_specs is type_spec.BatchableTypeSpec._flat_tensor_specs:\n        specs = nest.flatten(element_spec._component_specs, expand_composites=False)\n    else:\n        specs = nest.flatten(element_spec, expand_composites=False)\n    return specs",
    "docstring": "Return a flat list of type specs for element_spec. Note that \"flat\" in this function and in is a nickname for the \"batchable tensor list\" encoding used by datasets and map_fn internally (in C++/graphs). The ability to batch, unbatch and change batch size is one important characteristic of this encoding. A second important characteristic is that it represents a ragged tensor or sparse tensor as a single tensor of type variant (and this encoding uses special ops to encode/decode to/from variants). (In contrast, the more typical encoding, e.g. the C++/graph representation when calling a tf.function, is \"component encoding\" which represents sparse and ragged tensors as multiple dense tensors and does not use variants or special ops for encoding/decoding.) Args: element_spec: A nest of TypeSpec describing the elements of a dataset (or map_fn). Returns: A non-nested list of TypeSpec used by the encoding of tensors by datasets and map_fn for ELEMENT_SPEC. The items in this list correspond to the items in .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_utils.py",
    "ast_data": "FunctionDef name:_specs_for_flat_tensors arg:element_spec arguments arg If Call Assign For Call Call arguments arg Call Call If BoolOp Call Compare Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "update_state",
    "source_code": "@abc.abstractmethod\ndef update_state(self, *args, **kwargs):\n    raise NotImplementedError('Must be implemented in subclasses.')",
    "docstring": "Accumulates statistics for the metric. Note: This function is executed as a graph function in graph mode. This means: a) Operations on the same resource are executed in textual order. This should make it easier to do things like add the updated value of a variable to another, for example. b) You don't need to worry about collecting the update ops to execute. All update ops added to the graph by this function will be executed. As a result, code should generally work the same way with graph or eager execution. Args: *args: **kwargs: A mini-batch of inputs to the Metric.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py",
    "ast_data": "FunctionDef name:update_state arg:self arguments arg arg arg Raise Call"
  },
  {
    "library": "scipy",
    "name": "_from_tck",
    "source_code": "@classmethod\ndef _from_tck(cls, tck, ext=0):\n    self = cls.__new__(cls)\n    t, c, k = tck\n    self._eval_args = tck\n    self._data = (None, None, None, None, None, k, None, len(t), t, c, None, None, None, None)\n    self.ext = ext\n    return self",
    "docstring": "Construct a spline object from given tck",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_fitpack2.py",
    "ast_data": "FunctionDef name:_from_tck arg:cls arg:tck arg:ext arguments arg arg arg Assign Call Assign Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_pack_sequence_as",
    "source_code": "def _pack_sequence_as(loop_vars_signature, flat_orig_loop_vars, loop_vars):\n\n    def flow_to_tensor_array(flow, ta):\n        return tensor_array_ops.build_ta_with_new_flow(ta, flow) if isinstance(ta, tensor_array_ops.TensorArray) else flow\n    flattened_loop_vars = [flow_to_tensor_array(*z) for z in zip(nest.flatten(loop_vars, expand_composites=True), flat_orig_loop_vars)]\n    return nest.pack_sequence_as(loop_vars_signature, flattened_loop_vars, expand_composites=True)",
    "docstring": "Like but also replaces flows with TensorArrays.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\while_v2.py",
    "ast_data": "FunctionDef name:_pack_sequence_as arg:loop_vars_signature arg:flat_orig_loop_vars arg:loop_vars arguments arg arg arg FunctionDef name:flow_to_tensor_array arg:flow arg:ta arguments arg arg Return return:yes Call Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "virtualenv",
    "name": "generate_re",
    "source_code": "def generate_re(self, *, windows: bool) -> re.Pattern:\n    version = '{}(\\\\.{}(\\\\.{})?)?'.format(*('\\\\d+' if v is None else v for v in (self.major, self.minor, self.micro)))\n    impl = 'python' if self.implementation is None else f'python|{re.escape(self.implementation)}'\n    mod = 't?' if self.free_threaded else ''\n    suffix = '\\\\.exe' if windows else ''\n    version_conditional = '?' if windows or self.major is None else ''\n    return re.compile(f'(?P<impl>{impl})(?P<v>{version}{mod}){version_conditional}{suffix}$', flags=re.IGNORECASE)",
    "docstring": "Generate a regular expression for matching against a filename.",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\discovery\\py_spec.py",
    "ast_data": "FunctionDef name:generate_re arg:self arguments arg arg Assign Call Compare Assign Compare Call Assign Assign Assign BoolOp Compare Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "ErrorLines",
    "source_code": "class ErrorLines:\n    WINDOW = 5\n    BEFORE = 2\n    AFTER = WINDOW - BEFORE - 1",
    "docstring": "How many lines to display before and after an error",
    "type": "class",
    "file_path": "pytorch\\tools\\linter\\adapters\\_linter.py",
    "ast_data": "ClassDef name:ErrorLines Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_dense_solve",
    "source_code": "def _dense_solve(self, rhs, adjoint=False, adjoint_arg=False):\n    if self.is_square is False:\n        raise NotImplementedError('Solve is not yet implemented for non-square operators.')\n    rhs = linalg.adjoint(rhs) if adjoint_arg else rhs\n    if self._can_use_cholesky():\n        return linalg_ops.cholesky_solve(linalg_ops.cholesky(self.to_dense()), rhs)\n    return linear_operator_util.matrix_solve_with_broadcast(self.to_dense(), rhs, adjoint=adjoint)",
    "docstring": "Solve by conversion to a dense matrix.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "FunctionDef name:_dense_solve arg:self arg:rhs arg:adjoint arg:adjoint_arg arguments arg arg arg arg If Compare Raise Call Assign Call If Call Return return:yes Call Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "populate_deserializable_objects",
    "source_code": "def populate_deserializable_objects():\n    global LOCAL\n    if not hasattr(LOCAL, 'ALL_OBJECTS'):\n        LOCAL.ALL_OBJECTS = {}\n        LOCAL.GENERATED_WITH_V2 = None\n    if LOCAL.ALL_OBJECTS and LOCAL.GENERATED_WITH_V2 == tf2.enabled():\n        return\n    LOCAL.ALL_OBJECTS = {}\n    LOCAL.GENERATED_WITH_V2 = tf2.enabled()\n    base_cls = base_layer.Layer\n    generic_utils.populate_dict_with_module_objects(LOCAL.ALL_OBJECTS, ALL_MODULES, obj_filter=lambda x: inspect.isclass(x) and issubclass(x, base_cls))\n    if tf2.enabled():\n        generic_utils.populate_dict_with_module_objects(LOCAL.ALL_OBJECTS, ALL_V2_MODULES, obj_filter=lambda x: inspect.isclass(x) and issubclass(x, base_cls))\n    from tensorflow.python.keras import models\n    LOCAL.ALL_OBJECTS['Input'] = input_layer.Input\n    LOCAL.ALL_OBJECTS['InputSpec'] = input_spec.InputSpec\n    LOCAL.ALL_OBJECTS['Functional'] = models.Functional\n    LOCAL.ALL_OBJECTS['Model'] = models.Model\n    LOCAL.ALL_OBJECTS['Sequential'] = models.Sequential\n    LOCAL.ALL_OBJECTS['add'] = merge.add\n    LOCAL.ALL_OBJECTS['subtract'] = merge.subtract\n    LOCAL.ALL_OBJECTS['multiply'] = merge.multiply\n    LOCAL.ALL_OBJECTS['average'] = merge.average\n    LOCAL.ALL_OBJECTS['maximum'] = merge.maximum\n    LOCAL.ALL_OBJECTS['minimum'] = merge.minimum\n    LOCAL.ALL_OBJECTS['concatenate'] = merge.concatenate\n    LOCAL.ALL_OBJECTS['dot'] = merge.dot",
    "docstring": "Populates dict ALL_OBJECTS with every built-in layer.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\serialization.py",
    "ast_data": "FunctionDef name:populate_deserializable_objects arguments If Call Assign Assign If BoolOp Compare Call Return return:no Assign Assign Call Assign Call arguments arg BoolOp Call Call If Call Call arguments arg BoolOp Call Call Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "authlib",
    "name": "delete_temporary_credential",
    "source_code": "def delete_temporary_credential(self, request):\n    raise NotImplementedError()",
    "docstring": "Delete temporary credential from database or cache. For instance, if temporary credential is saved in cache:: def delete_temporary_credential(self, request): key = \"a-key-prefix:{}\".format(request.token) cache.delete(key) :param request: OAuth1Request instance",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth1\\rfc5849\\authorization_server.py",
    "ast_data": "FunctionDef name:delete_temporary_credential arg:self arg:request arguments arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "randn",
    "source_code": "def randn(*size, requires_grad: bool=False, dtype: Optional[torch.dtype]=None, layout: torch.layout=torch.strided, device_mesh: Optional[DeviceMesh]=None, placements: Optional[Sequence[Placement]]=None) -> DTensor:\n    torch_size = normalize_to_torch_size(size)\n    return _dtensor_init_helper(torch.randn, torch_size, dtype=dtype, layout=layout, requires_grad=requires_grad, device_mesh=device_mesh, placements=placements)",
    "docstring": "Returns a :class: filled with random numbers from a normal distribution with mean 0 and variance 1. The shape of the tensor is defined by the variable argument `DTensortorch.dtypeDTensortorch.set_default_dtypetorch.layoutDTensorDeviceMeshPlacementDTensor` object on each rank",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_api.py",
    "ast_data": "FunctionDef name:randn arguments arg arg arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "DatabaseDefault",
    "source_code": "class DatabaseDefault(Expression):\n\n    def __init__(self, expression, output_field=None):\n        super().__init__(output_field)\n        self.expression = expression\n\n    def get_source_expressions(self):\n        return [self.expression]\n\n    def set_source_expressions(self, exprs):\n        self.expression, = exprs\n\n    def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):\n        resolved_expression = self.expression.resolve_expression(query=query, allow_joins=allow_joins, reuse=reuse, summarize=summarize, for_save=for_save)\n        if not for_save:\n            return resolved_expression\n        return DatabaseDefault(resolved_expression, output_field=self._output_field_or_none)\n\n    def as_sql(self, compiler, connection):\n        if not connection.features.supports_default_keyword_in_insert:\n            return compiler.compile(self.expression)\n        return ('DEFAULT', [])",
    "docstring": "Expression to use DEFAULT keyword during insert otherwise the underlying expression.",
    "type": "class",
    "file_path": "django\\django\\db\\models\\expressions.py",
    "ast_data": "ClassDef name:DatabaseDefault FunctionDef name:__init__ arg:self arg:expression arg:output_field arguments arg arg arg Call Call Assign FunctionDef name:get_source_expressions arg:self arguments arg Return return:yes FunctionDef name:set_source_expressions arg:self arg:exprs arguments arg arg Assign FunctionDef name:resolve_expression arg:self arg:query arg:allow_joins arg:reuse arg:summarize arg:for_save arguments arg arg arg arg arg arg Assign Call If Return return:yes Return return:yes Call FunctionDef name:as_sql arg:self arg:compiler arg:connection arguments arg arg arg If Return return:yes Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_relations",
    "source_code": "def get_relations(self, cursor, table_name):\n    cursor.execute('PRAGMA foreign_key_list(%s)' % self.connection.ops.quote_name(table_name))\n    return {column_name: (ref_column_name, ref_table_name) for _, _, ref_table_name, column_name, ref_column_name, *_ in cursor.fetchall()}",
    "docstring": "Return a dictionary of {column_name: (ref_column_name, ref_table_name)} representing all foreign keys in the given table.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\sqlite3\\introspection.py",
    "ast_data": "FunctionDef name:get_relations arg:self arg:cursor arg:table_name arguments arg arg arg Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_should_cast",
    "source_code": "def _should_cast(self):\n    autocast_dtype = getattr(_autocast_dtype, 'dtype', None)\n    return autocast_dtype is not None and self.dtype != autocast_dtype",
    "docstring": "Returns True if this variable should be casted when accessed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\autocast_variable.py",
    "ast_data": "FunctionDef name:_should_cast arg:self arguments arg Assign Call Return return:yes BoolOp Compare Compare"
  },
  {
    "library": "tensorflow",
    "name": "_create_tuple",
    "source_code": "def _create_tuple(shape, value):\n    if shape:\n        return tuple([_create_tuple(shape[1:], value) for _ in range(shape[0])])\n    return value",
    "docstring": "Returns a tuple with given shape and filled with value.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\utils.py",
    "ast_data": "FunctionDef name:_create_tuple arg:shape arg:value arguments arg arg If Return return:yes Call Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "FFTWNotFoundError",
    "source_code": "class FFTWNotFoundError(NotFoundError):\n    pass",
    "docstring": "FFTW ( libraries not found. Directories to search for the libraries can be specified in the numpy/distutils/site.cfg file (section [fftw]) or by setting the FFTW environment variable.",
    "type": "class",
    "file_path": "numpy\\numpy\\distutils\\system_info.py",
    "ast_data": "ClassDef name:FFTWNotFoundError"
  },
  {
    "library": "tensorflow",
    "name": "from_session",
    "source_code": "@classmethod\ndef from_session(cls, sess, input_tensors, output_tensors):\n    TFLiteConverterBase._set_original_model_type(conversion_metadata_fb.ModelType.TF_SESSION)\n    graph_def = _freeze_graph(sess, input_tensors, output_tensors)\n    return cls(graph_def, input_tensors, output_tensors, experimental_debug_info_func=_build_debug_info_func(sess.graph))",
    "docstring": "Creates a TFLiteConverter class from a TensorFlow Session. Args: sess: TensorFlow Session. input_tensors: List of input tensors. Type and shape are computed using and . output_tensors: List of output tensors (only .name is used from this). Returns: TFLiteConverter class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "FunctionDef name:from_session arg:cls arg:sess arg:input_tensors arg:output_tensors arguments arg arg arg arg Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "mean",
    "source_code": "@_apply_docstring_templates\ndef mean(input: Union[Tensor, MaskedTensor], dim: DimOrDims=None, *, keepdim: Optional[bool]=False, dtype: Optional[DType]=None, mask: Optional[Tensor]=None) -> Tensor:\n    dtype_source = 'Optional'\n    if dtype is None:\n        dtype = input.dtype\n        dtype_source = 'Input'\n    if not (dtype.is_floating_point or dtype.is_complex):\n        raise ValueError(f'mean(): Could not infer output dtype. {dtype_source} dtype must be either a floating point or complex dtype. Got: {dtype}')\n    if input.layout == torch.strided:\n        if mask is None:\n            count = sum(torch.ones(input.shape, dtype=torch.int64, device=input.device), dim, keepdim=keepdim)\n            total = sum(input, dim, keepdim=keepdim, dtype=dtype)\n        else:\n            inmask = _input_mask(input, mask=mask)\n            count = inmask.sum(dim=dim, keepdim=bool(keepdim))\n            total = sum(input, dim, keepdim=keepdim, dtype=dtype, mask=inmask)\n        return total / count\n    elif input.layout == torch.sparse_csr:\n        mask_input = _combine_input_and_mask(mean, input, mask)\n        dim_ = _canonical_dim(dim, mask_input.ndim)\n        if mask is None:\n            raise ValueError('masked mean expects explicit mask for sparse_csr tensor input')\n        return _sparse_csr_segment_reduction_helper(torch.mean, mask_input, dim_, bool(keepdim), dtype)\n    else:\n        raise ValueError(f'masked mean expects strided or sparse_csr tensor (got {input.layout} tensor)')",
    "docstring": "{reduction_signature} {reduction_descr} By definition, the identity value of a mean operation is the mean value of the tensor. If all elements of the input tensor along given dimension(s) :attr: are masked-out, the identity value of the mean is undefined. Due to this ambiguity, the elements of output tensor with strided layout, that correspond to fully masked-out elements, have `` values. {reduction_args} {reduction_example}",
    "type": "function",
    "file_path": "pytorch\\torch\\masked\\_ops.py",
    "ast_data": "FunctionDef name:mean arg:input arg:dim arguments arg arg arg arg arg Assign If Compare Assign Assign If BoolOp Raise Call If Compare If Compare Assign Call Call Assign Call Assign Call Assign Call Call Assign Call Return return:yes If Compare Assign Call Assign Call If Compare Raise Call Return return:yes Call Call Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "show_versions",
    "source_code": "def show_versions():\n    sys_info = _get_sys_info()\n    deps_info = _get_deps_info()\n    print('\\nSystem:')\n    for k, stat in sys_info.items():\n        print('{k:>10}: {stat}'.format(k=k, stat=stat))\n    print('\\nPython dependencies:')\n    for k, stat in deps_info.items():\n        print('{k:>13}: {stat}'.format(k=k, stat=stat))\n    print('\\n{k}: {stat}'.format(k='Built with OpenMP', stat=_openmp_parallelism_enabled()))\n    threadpool_results = threadpool_info()\n    if threadpool_results:\n        print()\n        print('threadpoolctl info:')\n        for i, result in enumerate(threadpool_results):\n            for key, val in result.items():\n                print(f'{key:>15}: {val}')\n            if i != len(threadpool_results) - 1:\n                print()",
    "docstring": "Print useful debugging information\" .. versionadded:: 0.20 Examples -------- >>> from sklearn import show_versions >>> show_versions() # doctest: +SKIP",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_show_versions.py",
    "ast_data": "FunctionDef name:show_versions arguments Assign Call Assign Call Call For Call Call Call Call For Call Call Call Call Call Call Assign Call If Call Call For Call For Call Call If Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "impl_abstract",
    "source_code": "@deprecated('`torch.library.impl_abstract` was renamed to `torch.library.register_fake`. Please use that instead; we will remove `torch.library.impl_abstract` in a future version of PyTorch.', category=FutureWarning)\ndef impl_abstract(qualname, func=None, *, lib=None, _stacklevel=1):\n    if func is not None:\n        _stacklevel = _stacklevel + 1\n    return register_fake(qualname, func, lib=lib, _stacklevel=_stacklevel)",
    "docstring": "This API was renamed to :func: in PyTorch 2.4. Please use that instead.",
    "type": "function",
    "file_path": "pytorch\\torch\\library.py",
    "ast_data": "FunctionDef name:impl_abstract arg:qualname arg:func arguments arg arg arg arg If Compare Assign Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_clip_on",
    "source_code": "def get_clip_on(self):\n    return self._clipon",
    "docstring": "Return whether the artist uses clipping.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:get_clip_on arg:self arguments arg Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "desc_sig_name",
    "source_code": "class desc_sig_name(desc_sig_element, _sig_element=True):\n    classes = ['n']",
    "docstring": "Node for an identifier in a signature.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\addnodes.py",
    "ast_data": "ClassDef name:desc_sig_name Assign"
  },
  {
    "library": "pytorch",
    "name": "kaiming_uniform_",
    "source_code": "def kaiming_uniform_(tensor: Tensor, a: float=0, mode: str='fan_in', nonlinearity: str='leaky_relu', generator: _Optional[torch.Generator]=None):\n    if torch.overrides.has_torch_function_variadic(tensor):\n        return torch.overrides.handle_torch_function(kaiming_uniform_, (tensor,), tensor=tensor, a=a, mode=mode, nonlinearity=nonlinearity, generator=generator)\n    if 0 in tensor.shape:\n        warnings.warn('Initializing zero-element tensors is a no-op')\n        return tensor\n    fan = _calculate_correct_fan(tensor, mode)\n    gain = calculate_gain(nonlinearity, a)\n    std = gain / math.sqrt(fan)\n    bound = math.sqrt(3.0) * std\n    with torch.no_grad():\n        return tensor.uniform_(-bound, bound, generator=generator)",
    "docstring": "Fill the input with values using a Kaiming uniform distribution. The method is described in - He, K. et al. (2015). The resulting tensor will have values sampled from :math: where .. math:: \\text{bound} = \\text{gain} \\times \\sqrt{\\frac{3}{\\text{fan\\_mode}}} Also known as He initialization. Args: tensor: an n-dimensional a: the negative slope of the rectifier used after this layer (only used with `nn.functional`.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\init.py",
    "ast_data": "FunctionDef name:kaiming_uniform_ arg:tensor arg:a arg:mode arg:nonlinearity arg:generator arguments arg arg arg arg arg If Call Return return:yes Call If Compare Call Return return:yes Assign Call Assign Call Assign Call Assign Call With Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "train_function",
    "source_code": "def train_function(iterator):\n    return step_function(self, iterator)",
    "docstring": "Runs a training execution with one step.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py",
    "ast_data": "FunctionDef name:train_function arg:iterator arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_non_cat_node_input",
    "source_code": "def get_non_cat_node_input(self, split_node: torch.fx.Node, node: torch.fx.Node) -> list[_Range]:\n    node_input = []\n    split_users = OrderedSet(split_node.users.keys())\n    for node_arg in node.all_input_nodes:\n        if node_arg in split_users:\n            getitem_num = get_arg_value(node_arg, 1)\n            node_input.append((getitem_num, getitem_num))\n    return node_input",
    "docstring": "Get input for a non cat node in the same format as",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\split_cat.py",
    "ast_data": "FunctionDef name:get_non_cat_node_input arg:self arg:split_node arg:node arguments arg arg arg Assign Assign Call Call For If Compare Assign Call Call Return return:yes"
  },
  {
    "library": "virtualenv",
    "name": "__init__",
    "source_code": "def __init__(self, options, enabled) -> None:\n    self.enabled = enabled\n    self.env = options.env",
    "docstring": "Create. :param options: the parsed options as defined within :meth: :param enabled: a flag weather the seeder is enabled or not",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\seed\\seeder.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:options arg:enabled arguments arg arg arg Assign Assign"
  },
  {
    "library": "numpy",
    "name": "__len__",
    "source_code": "def __len__(self):\n    if self.ndim:\n        return len(self._data)\n    return len(self.dtype)",
    "docstring": "Returns the length",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\mrecords.py",
    "ast_data": "FunctionDef name:__len__ arg:self arguments arg If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_StateStack",
    "source_code": "class _StateStack(object):\n\n    def __init__(self, type_):\n        object.__setattr__(self, 'type', type_)\n        object.__setattr__(self, '_stack', [])\n        if not hasattr(type_, 'no_root'):\n            self.enter()\n\n    def __enter__(self):\n        self.enter()\n        return self\n\n    def __exit__(self, exc_type, exc_value, traceback):\n        self.exit()\n\n    def enter(self):\n        self._stack.append(self.type())\n\n    def exit(self):\n        self._stack.pop()\n\n    @property\n    def stack(self):\n        return self._stack\n\n    @property\n    def level(self):\n        return len(self._stack)\n\n    @property\n    def value(self):\n        return self._stack[-1]\n\n    def __iter__(self):\n        return iter(self._stack)\n\n    def __getattr__(self, key):\n        return getattr(self._stack[-1], key)\n\n    def __setattr__(self, key, value):\n        setattr(self._stack[-1], key, value)",
    "docstring": "Templated context manager. This class provides syntactic sugar for a stack of objects of known type. It allows accessing attributes of the object at the top of the stack directly against this object, which allows for very terse syntax. For example, this code: stack = _StateStack(Foo) stack.enter() stack.bar Is equivalent to: stack = [] stack.append(Foo()) foo = stack[-1] foo.bar See _State for more on how this is used. Attributes: type: Any, the type of objects that this stack holds level: int, the current stack depth stack: List[Any], the actual stack value: Any, the instance of the object at the top of the stack",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\transformer.py",
    "ast_data": "ClassDef name:_StateStack FunctionDef name:__init__ arg:self arg:type_ arguments arg arg Call Call If Call Call FunctionDef name:__enter__ arg:self arguments arg Call Return return:yes FunctionDef name:__exit__ arg:self arg:exc_type arg:exc_value arg:traceback arguments arg arg arg arg Call FunctionDef name:enter arg:self arguments arg Call Call FunctionDef name:exit arg:self arguments arg Call FunctionDef name:stack arg:self arguments arg Return return:yes FunctionDef name:level arg:self arguments arg Return return:yes Call FunctionDef name:value arg:self arguments arg Return return:yes FunctionDef name:__iter__ arg:self arguments arg Return return:yes Call FunctionDef name:__getattr__ arg:self arg:key arguments arg arg Return return:yes Call FunctionDef name:__setattr__ arg:self arg:key arg:value arguments arg arg arg Call"
  },
  {
    "library": "scipy",
    "name": "poles",
    "source_code": "@property\ndef poles(self):\n    return self.to_zpk().poles",
    "docstring": "Poles of the system.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:poles arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_create_temp_cache",
    "source_code": "def _create_temp_cache(self, num_traced_tensors, num_signatures, graph):\n    init_value = constant_op.constant(_COMPACT_TRACE_ENTRY_INIT_VALUE, dtype=dtypes.float32, shape=[num_signatures])\n    self._temp_cache_var[graph] = [init_value for _ in range(num_traced_tensors)]",
    "docstring": "Creates a temporary cache with the given dimensions. Fills the self._temp_cache_var with num_traced_tensors tf.constant() ops that have shape of [num_signatures]. Args: num_traced_tensors: Int, denoting total number of traced tensors. num_signatures: Int, denoting the number of statistics collected per tensors. graph: TensorFlow graph.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:_create_temp_cache arg:self arg:num_traced_tensors arg:num_signatures arg:graph arguments arg arg arg arg Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "ConvertLiteralToNumpyArray",
    "source_code": "def ConvertLiteralToNumpyArray(literal):\n    element_type = literal.shape.element_type\n    if element_type == xla_data_pb2.TUPLE:\n        return tuple((ConvertLiteralToNumpyArray(subliteral) for subliteral in literal.tuple_literals))\n    type_record = types_.MAP_XLA_TYPE_TO_RECORD[element_type]\n    if not literal.shape.dimensions:\n        return _np.array(getattr(literal, type_record.literal_field_name)[0], type_record.numpy_dtype)\n    else:\n        layout_order = literal.shape.layout.minor_to_major\n        numpy_shape = tuple(literal.shape.dimensions)\n        if layout_order == list(range(len(literal.shape.dimensions))):\n            numpy_reshaper = lambda arr: arr.reshape(numpy_shape, order='F')\n        elif layout_order == list(range(len(literal.shape.dimensions) - 1, -1, -1)):\n            numpy_reshaper = lambda arr: arr.reshape(numpy_shape, order='C')\n        else:\n            raise NotImplementedError('Unsupported layout: {0}'.format(layout_order))\n        ndarray = _np.asarray(getattr(literal, type_record.literal_field_name), dtype=type_record.numpy_dtype)\n        return numpy_reshaper(ndarray)",
    "docstring": "Converts a XLA literal to a Numpy array.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\xla\\python_api\\xla_literal.py",
    "ast_data": "FunctionDef name:ConvertLiteralToNumpyArray arg:literal arguments arg Assign If Compare Return return:yes Call Call Assign If Return return:yes Call Call Assign Assign Call If Compare Call Call Call Assign arguments arg Call If Compare Call Call Call Assign arguments arg Call Raise Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_ExceptionInfo",
    "source_code": "class _ExceptionInfo:\n\n    def __init__(self, cls, *args):\n        self._cls = cls\n        self._args = args\n\n    @classmethod\n    def from_exception(cls, exc):\n        return cls(type(exc), *exc.args)\n\n    def to_exception(self):\n        return self._cls(*self._args)",
    "docstring": "A class to carry exception information around. This is used to store and later raise exceptions. It's an alternative to directly storing Exception instances that circumvents traceback-related issues: caching tracebacks can keep user's objects in local namespaces alive indefinitely, which can lead to very surprising memory issues for users and result in incorrect tracebacks.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\cbook.py",
    "ast_data": "ClassDef name:_ExceptionInfo FunctionDef name:__init__ arg:self arg:cls arguments arg arg arg Assign Assign FunctionDef name:from_exception arg:cls arg:exc arguments arg arg Return return:yes Call Call FunctionDef name:to_exception arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "find_uri",
    "source_code": "def find_uri(self, node: nodes.Element) -> str | None:\n    if isinstance(node, nodes.reference):\n        if 'refuri' in node:\n            return node['refuri']\n    if isinstance(node, nodes.image):\n        uri = node['candidates'].get('?')\n        if uri and '://' in uri:\n            return uri\n    if isinstance(node, nodes.raw):\n        uri = node.get('source')\n        if uri and '://' in uri:\n            return uri\n    return None",
    "docstring": "Find a URI for a given node. This call can be used to retrieve a URI from a provided node. If no URI exists for a provided node, this call will return ``. This method can be useful for extension developers who wish to easily inject hyperlinks into a builder by only needing to override this method. :param node: A node class :returns: URI of the node",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\linkcheck.py",
    "ast_data": "FunctionDef name:find_uri arg:self arg:node arguments arg arg If Call If Compare Return return:yes If Call Assign Call If BoolOp Compare Return return:yes If Call Assign Call If BoolOp Compare Return return:yes Return return:no"
  },
  {
    "library": "scipy",
    "name": "__init__",
    "source_code": "def __init__(self, seed=None):\n    super().__init__(seed)\n    self.__doc__ = doccer.docformat(self.__doc__, mvt_docdict_params)\n    self._random_state = check_random_state(seed)",
    "docstring": "Initialize a multivariate t-distributed random variable. Parameters ---------- seed : Random state.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:seed arguments arg arg Call Call Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_tfmw_import_module",
    "source_code": "def _tfmw_import_module(self, name):\n    if self._tfmw_is_compat_v1 and name != 'app' and (not TFModuleWrapper.compat_v1_usage_recorded):\n        TFModuleWrapper.compat_v1_usage_recorded = True\n        compat_v1_usage_gauge.get_cell().set(True)\n    symbol_loc_info = self._tfmw_public_apis[name]\n    if symbol_loc_info[0]:\n        module = importlib.import_module(symbol_loc_info[0])\n        attr = getattr(module, symbol_loc_info[1])\n    else:\n        attr = importlib.import_module(symbol_loc_info[1])\n    setattr(self._tfmw_wrapped_module, name, attr)\n    self.__dict__[name] = attr\n    self._fastdict_insert(name, attr)\n    return attr",
    "docstring": "Lazily loading the modules.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\util\\module_wrapper.py",
    "ast_data": "FunctionDef name:_tfmw_import_module arg:self arg:name arguments arg arg If BoolOp Compare Assign Call Call Assign If Assign Call Assign Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "get_objects",
    "source_code": "def get_objects(self) -> Iterable[tuple[str, str, str, str, str, int]]:\n    return []",
    "docstring": "Return an iterable of \"object descriptions\". Object descriptions are tuples with six items: `` Object should not show up in search at all.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\domains\\__init__.py",
    "ast_data": "FunctionDef name:get_objects arg:self arguments arg Return return:no"
  },
  {
    "library": "pytorch",
    "name": "help",
    "source_code": "def help(github, model, force_reload=False, skip_validation=False, trust_repo=None):\n    repo_dir = _get_cache_or_reload(github, force_reload, trust_repo, 'help', verbose=True, skip_validation=skip_validation)\n    with _add_to_sys_path(repo_dir):\n        hubconf_path = os.path.join(repo_dir, MODULE_HUBCONF)\n        hub_module = _import_module(MODULE_HUBCONF, hubconf_path)\n    entry = _load_entry_from_hubconf(hub_module, model)\n    return entry.__doc__",
    "docstring": "Show the docstring of entrypoint `` in v2.0. Example: >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_HUB) >>> print(torch.hub.help(\"pytorch/vision\", \"resnet18\", force_reload=True))",
    "type": "function",
    "file_path": "pytorch\\torch\\hub.py",
    "ast_data": "FunctionDef name:help arg:github arg:model arg:force_reload arg:skip_validation arg:trust_repo arguments arg arg arg arg arg Assign Call With Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_containing_app_config",
    "source_code": "def get_containing_app_config(self, object_name):\n    self.check_apps_ready()\n    candidates = []\n    for app_config in self.app_configs.values():\n        if object_name.startswith(app_config.name):\n            subpath = object_name.removeprefix(app_config.name)\n            if subpath == '' or subpath[0] == '.':\n                candidates.append(app_config)\n    if candidates:\n        return sorted(candidates, key=lambda ac: -len(ac.name))[0]",
    "docstring": "Look for an app config containing a given object. object_name is the dotted Python path to the object. Return the app config for the inner application in case of nesting. Return None if the object isn't in any registered app config.",
    "type": "method",
    "file_path": "django\\django\\apps\\registry.py",
    "ast_data": "FunctionDef name:get_containing_app_config arg:self arg:object_name arguments arg arg Call Assign For Call If Call Assign Call If BoolOp Compare Compare Call If Return return:yes Call arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "assert_is_batch_matrix",
    "source_code": "def assert_is_batch_matrix(tensor):\n    sh = tensor.shape\n    if sh.ndims is not None and sh.ndims < 2:\n        raise ValueError(f'Expected [batch] matrix to have at least two dimensions. Found: {tensor}.')",
    "docstring": "Static assert that has rank or higher.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_util.py",
    "ast_data": "FunctionDef name:assert_is_batch_matrix arg:tensor arguments arg Assign If BoolOp Compare Compare Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "changed",
    "source_code": "def changed(self):\n    self._imcache = None\n    super().changed()",
    "docstring": "Call this whenever the mappable is changed so observers can update.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\image.py",
    "ast_data": "FunctionDef name:changed arg:self arguments arg Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "NoDependency",
    "source_code": "class NoDependency:\n    __slots__ = ['value']\n\n    def __init__(self, value):\n        self.value = value",
    "docstring": "Allows attribute assignment to objects with no dependency. Example usage: in this example has a dependency on the variable \"dep\", and both attributes contain un-wrapped objects. also works with , but only for checkpoint dependencies: wrapping a in will assign the (unwrapped) to the attribute without a checkpoint dependency, but the will still track the (so it will appear in , and its variables will appear in ).",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\data_structures.py",
    "ast_data": "ClassDef name:NoDependency Assign FunctionDef name:__init__ arg:self arg:value arguments arg arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "scatter_nd_sub",
    "source_code": "def scatter_nd_sub(self, indices, updates, name=None):\n    return gen_state_ops.scatter_nd_sub(self._variable, indices, updates, use_locking=True, name=name)",
    "docstring": "Applies sparse subtraction to individual values or slices in a Variable. is a with rank and is a of rank . must be integer tensor, containing indices into . It must be shape where . The innermost dimension of (with length ) corresponds to indices into elements (if ) or slices (if ) along the th dimension of . is of rank with shape: For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 elements. In Python, that update would look like this: The resulting update to ref would look like this: [1, -9, 3, -6, -6, 6, 7, -4] See for more details about how to make updates to slices. Args: indices: The indices to be used in the operation. updates: The values to be used in the operation. name: the name of the operation. Returns: A that will hold the new value of this variable after the scattered subtraction has completed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py",
    "ast_data": "FunctionDef name:scatter_nd_sub arg:self arg:indices arg:updates arg:name arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_input_idxs_to_check",
    "source_code": "def get_input_idxs_to_check(inputs: Sequence[InputType], static_input_idxs: Sequence[int]) -> Sequence[int]:\n    ids_to_check = []\n    for i, input in enumerate(inputs):\n        if not isinstance(input, torch.Tensor):\n            continue\n        if not is_gpu(input.device.type):\n            continue\n        with maybe_get_suppress_shape_guards_ctx():\n            if i in static_input_idxs and tensor_is_aligned(input):\n                continue\n            if not should_assume_input_aligned(input):\n                continue\n        ids_to_check.append(i)\n    return ids_to_check",
    "docstring": "This function runs at compile time, and generates a list of indices for which we might need to do a copy to preserve alignment requirements.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\compile_fx.py",
    "ast_data": "FunctionDef name:get_input_idxs_to_check arg:inputs arg:static_input_idxs arguments arg arg Assign For Call If Call If Call With Call If BoolOp Compare Call If Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "greater",
    "source_code": "@array_function_dispatch(_binary_op_dispatcher)\ndef greater(x1, x2):\n    return compare_chararrays(x1, x2, '>', True)",
    "docstring": "Return (x1 > x2) element-wise. Unlike , this comparison is performed by first stripping whitespace characters from the end of the string. This behavior is provided for backward-compatibility with numarray. Parameters ---------- x1, x2 : array_like of str or unicode Input arrays of the same shape. Returns ------- out : ndarray Output array of bools. See Also -------- equal, not_equal, greater_equal, less_equal, less Examples -------- >>> import numpy as np >>> x1 = np.array(['a', 'b', 'c']) >>> np.char.greater(x1, 'b') array([False, False, True])",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:greater arg:x1 arg:x2 arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "distributed_function",
    "source_code": "def distributed_function(input_fn):\n    x, y, sample_weights = input_fn()\n    outputs = strategy.run(per_replica_function, args=(x, y, sample_weights))\n    all_outputs = unwrap_outputs(strategy, outputs, with_loss_tensor=mode != ModeKeys.PREDICT)\n    return all_outputs",
    "docstring": "A single step of the distributed execution across replicas.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_training_utils_v1.py",
    "ast_data": "FunctionDef name:distributed_function arg:input_fn arguments arg Assign Call Assign Call Assign Call Compare Return return:yes"
  },
  {
    "library": "kornia",
    "name": "_FeatureExtractor",
    "source_code": "class _FeatureExtractor(Module):\n\n    def __init__(self) -> None:\n        super().__init__()\n        self.hc_block = _HandcraftedBlock()\n        self.lb_block = _LearnableBlock()\n\n    def forward(self, x: Tensor) -> Tensor:\n        x_hc = self.hc_block(x)\n        x_lb = self.lb_block(x_hc)\n        return x_lb",
    "docstring": "Helper class for KeyNet. It loads both, the handcrafted and learnable blocks",
    "type": "class",
    "file_path": "kornia\\kornia\\feature\\keynet.py",
    "ast_data": "ClassDef name:_FeatureExtractor FunctionDef name:__init__ arg:self arguments arg Call Call Assign Call Assign Call FunctionDef name:forward arg:self arg:x arguments arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "InternalRedirect",
    "source_code": "class InternalRedirect(CherryPyException):\n\n    def __init__(self, path, query_string=''):\n        self.request = cherrypy.serving.request\n        self.query_string = query_string\n        if '?' in path:\n            path, self.query_string = path.split('?', 1)\n        path = urllib.parse.urljoin(self.request.path_info, path)\n        self.path = path\n        CherryPyException.__init__(self, path, self.query_string)",
    "docstring": "Exception raised to switch to the handler for a different URL. This exception will redirect processing to another path within the site (without informing the client). Provide the new path as an argument when raising the exception. Provide any params in the querystring for the new URL.",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\_cperror.py",
    "ast_data": "ClassDef name:InternalRedirect FunctionDef name:__init__ arg:self arg:path arg:query_string arguments arg arg arg Assign Assign If Compare Assign Call Assign Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "_load_csv",
    "source_code": "def _load_csv(self, filename, format='compute_only'):\n    assert format == 'compute_only'\n    with open(filename, newline='') as csvfile:\n        reader = csv.reader(csvfile)\n        for rank, row in enumerate(reader):\n            self.pipeline_order[rank] = [_Action.from_str(s) for s in row]\n    self._validate_and_set_stage_mapping(self.pipeline_order)",
    "docstring": "Load a CSV representation of the schedule from a file with the provided filename. This API will most likely get renamed/refactored so is marked as internal for now. format must be \"compute_only\" for PipelineScheduleMulti.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\schedules.py",
    "ast_data": "FunctionDef name:_load_csv arg:self arg:filename arg:format arguments arg arg arg Compare With Call Assign Call For Call Assign Call Call"
  },
  {
    "library": "cherrypy",
    "name": "__call__",
    "source_code": "@staticmethod\ndef __call__(**kwargs):\n\n    def tool_decorator(f):\n        _Vars(f).setdefault('_cp_config', {}).update(kwargs)\n        return f\n    return tool_decorator",
    "docstring": "Decorate for page handlers to set _cp_config.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpconfig.py",
    "ast_data": "FunctionDef name:__call__ arguments arg FunctionDef name:tool_decorator arg:f arguments arg Call Call Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "PriorityQueue",
    "source_code": "@tf_export('queue.PriorityQueue', v1=['queue.PriorityQueue', 'io.PriorityQueue', 'PriorityQueue'])\n@deprecation.deprecated_endpoints(['io.PriorityQueue', 'PriorityQueue'])\nclass PriorityQueue(QueueBase):\n\n    def __init__(self, capacity, types, shapes=None, names=None, shared_name=None, name='priority_queue'):\n        types = _as_type_list(types)\n        shapes = _as_shape_list(shapes, types)\n        queue_ref = gen_data_flow_ops.priority_queue_v2(component_types=types, shapes=shapes, capacity=capacity, shared_name=_shared_name(shared_name), name=name)\n        priority_dtypes = [_dtypes.int64] + types\n        priority_shapes = [()] + shapes if shapes else shapes\n        super(PriorityQueue, self).__init__(priority_dtypes, priority_shapes, names, queue_ref)",
    "docstring": "A queue implementation that dequeues elements in prioritized order. See for a description of the methods on this class.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "ClassDef name:PriorityQueue FunctionDef name:__init__ arg:self arg:capacity arg:types arg:shapes arg:names arg:shared_name arg:name arguments arg arg arg arg arg arg arg Assign Call Assign Call Assign Call Call Assign Assign Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_apply_mask_1d",
    "source_code": "def _apply_mask_1d(reshaped_tensor, mask, axis=None):\n    indices = squeeze(where_v2(mask), axis=[1])\n    return gather(reshaped_tensor, indices, axis=axis)",
    "docstring": "Mask tensor along dimension 0 with a 1-D mask.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:_apply_mask_1d arg:reshaped_tensor arg:mask arg:axis arguments arg arg arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "IntArrayRef_summary",
    "source_code": "def IntArrayRef_summary(valobj: Any, internal_dict: Any, options: Any) -> str:\n    with DisableBreakpoints():\n        target = get_target()\n        tensor = valobj.GetName()\n        result = target.EvaluateExpression(f'torch::gdb::int_array_ref_string({tensor})')\n        str_result = str(result)\n        str_result = str_result[str_result.find('\"') + 1:-1]\n        return str_result",
    "docstring": "Print human readable representation of c10::IntArrayRef",
    "type": "function",
    "file_path": "pytorch\\tools\\lldb\\pytorch_lldb.py",
    "ast_data": "FunctionDef name:IntArrayRef_summary arg:valobj arg:internal_dict arg:options arguments arg arg arg With Call Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, offset=(0, 0), **kwargs):\n    super().__init__(offset)\n    self._gc = kwargs",
    "docstring": "The path will be stroked with its gc updated with the given keyword arguments, i.e., the keyword arguments should be valid gc parameter values.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patheffects.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:offset arguments arg arg arg Call Call Assign"
  },
  {
    "library": "pandas",
    "name": "dtypes",
    "source_code": "@property\ndef dtypes(self) -> Iterable[Dtype]:\n    return self.info.dtypes",
    "docstring": "Dtypes of each of the DataFrame's columns.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:dtypes arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "add_auto_adjustable_area",
    "source_code": "def add_auto_adjustable_area(self, use_axes, pad=0.1, adjust_dirs=None):\n    if adjust_dirs is None:\n        adjust_dirs = ['left', 'right', 'bottom', 'top']\n    for d in adjust_dirs:\n        self.append_size(d, Size._AxesDecorationsSize(use_axes, d) + pad)",
    "docstring": "Add auto-adjustable padding around *use_axes* to take their decorations (title, labels, ticks, ticklabels) into account during layout. Parameters ---------- use_axes : or list of The Axes whose decorations are taken into account. pad : float, default: 0.1 Additional padding in inches. adjust_dirs : list of {\"left\", \"right\", \"bottom\", \"top\"}, optional The sides where padding is added; defaults to all four sides.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_divider.py",
    "ast_data": "FunctionDef name:add_auto_adjustable_area arg:self arg:use_axes arg:pad arg:adjust_dirs arguments arg arg arg arg If Compare Assign For Call Call"
  },
  {
    "library": "tensorflow",
    "name": "ones_like_impl",
    "source_code": "def ones_like_impl(tensor, dtype, name, optimize=True, layout=None):\n    with ops.name_scope(name, 'ones_like', [tensor]) as name:\n        tensor = ops.convert_to_tensor(tensor, name='tensor')\n        ones_shape = shape_internal(tensor, optimize=optimize)\n        if dtype is None:\n            dtype = tensor.dtype\n        ret = ones(ones_shape, dtype=dtype, name=name, layout=layout)\n        if not context.executing_eagerly():\n            ret.set_shape(tensor.get_shape())\n        return ret",
    "docstring": "Internal implementation for the v1/v2 ones_like API calls.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:ones_like_impl arg:tensor arg:dtype arg:name arg:optimize arg:layout arguments arg arg arg arg arg With Call Assign Call Assign Call If Compare Assign Assign Call If Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_update_replica",
    "source_code": "def _update_replica(self, update_fn, value, **kwargs):\n    if self._policy:\n        return self._policy._update_replica(self, update_fn, value, **kwargs)\n    raise NotImplementedError(f'DistributedVariable._update_replica requires a valid VariablePolicy. Please set the policy via the `var_policy` argument in the constructor, or override this method in sub-classes which support cross-replica accesses. Type name is {type(self)}')",
    "docstring": "Applies updates in one replica. Args: update_fn: A callable to update the variable. It should has the same signature as . value: value to be passed to . **kwargs: remaining arguments to . Returns: Updated variable or .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py",
    "ast_data": "FunctionDef name:_update_replica arg:self arg:update_fn arg:value arguments arg arg arg arg If Return return:yes Call Raise Call Call"
  },
  {
    "library": "matplotlib",
    "name": "autoscale",
    "source_code": "def autoscale(self, A):\n    A = np.asanyarray(A)\n    self.halfrange = max(self._vcenter - A.min(), A.max() - self._vcenter)",
    "docstring": "Set *halfrange* to ``, then set *vmin* and *vmax*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:autoscale arg:self arg:A arguments arg arg Assign Call Assign Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "@_docstring.interpd\ndef __init__(self, bbox, **kwargs):\n    if 'transform' in kwargs:\n        raise ValueError('transform should not be set')\n    kwargs['transform'] = IdentityTransform()\n    super().__init__(**kwargs)\n    self.bbox = bbox",
    "docstring": "Patch showing the shape bounded by a Bbox. Parameters ---------- bbox : Bbox to use for the extents of this patch. **kwargs Patch properties. Valid arguments include: %(Patch:kwdoc)s",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\inset_locator.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:bbox arguments arg arg arg If Compare Raise Call Assign Call Call Call Assign"
  },
  {
    "library": "django",
    "name": "fields",
    "source_code": "@cached_property\ndef fields(self):\n\n    def is_not_an_m2m_field(f):\n        return not (f.is_relation and f.many_to_many)\n\n    def is_not_a_generic_relation(f):\n        return not (f.is_relation and f.one_to_many)\n\n    def is_not_a_generic_foreign_key(f):\n        return not (f.is_relation and f.many_to_one and (not (hasattr(f.remote_field, 'model') and f.remote_field.model)))\n    return make_immutable_fields_list('fields', (f for f in self._get_fields(reverse=False) if is_not_an_m2m_field(f) and is_not_a_generic_relation(f) and is_not_a_generic_foreign_key(f)))",
    "docstring": "Return a list of all forward fields on the model and its parents, excluding ManyToManyFields. Private API intended only to be used by Django itself; get_fields() combined with filtering of field properties is the public API for obtaining this field list.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\options.py",
    "ast_data": "FunctionDef name:fields arg:self arguments arg FunctionDef name:is_not_an_m2m_field arg:f arguments arg Return return:yes BoolOp FunctionDef name:is_not_a_generic_relation arg:f arguments arg Return return:yes BoolOp FunctionDef name:is_not_a_generic_foreign_key arg:f arguments arg Return return:yes BoolOp BoolOp Call Return return:yes Call Call BoolOp Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_gridlines",
    "source_code": "def get_gridlines(self, which, axis):\n    return []",
    "docstring": "Return list of grid lines as a list of paths (list of points). Parameters ---------- which : {\"both\", \"major\", \"minor\"} axis : {\"both\", \"x\", \"y\"}",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axislines.py",
    "ast_data": "FunctionDef name:get_gridlines arg:self arg:which arg:axis arguments arg arg arg Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "he_uniform",
    "source_code": "def he_uniform(seed=None):\n    return VarianceScaling(scale=2.0, mode='fan_in', distribution='uniform', seed=seed)",
    "docstring": "He uniform variance scaling initializer. Initializers allow you to pre-specify an initialization strategy, encoded in the Initializer object, without knowing the shape and dtype of the variable being initialized. Draws samples from a uniform distribution within [-limit, limit] where is where is the number of input units in the weight tensor. Examples: >>> def make_variables(k, initializer): ... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)), ... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32))) >>> v1, v2 = make_variables(3, tf.initializers.he_uniform()) >>> v1 >> v2 >> make_variables(4, tf.initializers.RandomNormal()) (<tf.Variable ... shape=(4, 4) dtype=float32... <tf.Variable ... shape=(4, 4, 4) dtype=float32... Args: seed: A Python integer. Used to seed the random generator. Returns: A callable Initializer with and arguments which generates a tensor. References: [He et al., 2015]( # pylint: disable=line-too-long ([pdf](",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops_v2.py",
    "ast_data": "FunctionDef name:he_uniform arg:seed arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, parent, orientation, location, functions, transform=None, **kwargs):\n    _api.check_in_list(['x', 'y'], orientation=orientation)\n    self._functions = functions\n    self._parent = parent\n    self._orientation = orientation\n    self._ticks_set = False\n    fig = self._parent.get_figure(root=False)\n    if self._orientation == 'x':\n        super().__init__(fig, [0, 1.0, 1, 0.0001], **kwargs)\n        self._axis = self.xaxis\n        self._locstrings = ['top', 'bottom']\n        self._otherstrings = ['left', 'right']\n    else:\n        super().__init__(fig, [0, 1.0, 0.0001, 1], **kwargs)\n        self._axis = self.yaxis\n        self._locstrings = ['right', 'left']\n        self._otherstrings = ['top', 'bottom']\n    self._parentscale = None\n    self.set_location(location, transform)\n    self.set_functions(functions)\n    otheraxis = self.yaxis if self._orientation == 'x' else self.xaxis\n    otheraxis.set_major_locator(mticker.NullLocator())\n    otheraxis.set_ticks_position('none')\n    self.spines[self._otherstrings].set_visible(False)\n    self.spines[self._locstrings].set_visible(True)\n    if self._pos < 0.5:\n        self._locstrings = self._locstrings[::-1]\n    self.set_alignment(self._locstrings[0])",
    "docstring": "See and for the doc string. While there is no need for this to be private, it should really be called by those higher level functions.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_secondary_axes.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:parent arg:orientation arg:location arg:functions arg:transform arguments arg arg arg arg arg arg arg Call Assign Assign Assign Assign Assign Call If Compare Call Call Assign Assign Assign Call Call Assign Assign Assign Assign Call Call Assign Compare Call Call Call Call Call If Compare Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "from_proto",
    "source_code": "@classmethod\ndef from_proto(cls, layout_proto: layout_pb2.LayoutProto) -> 'Layout':\n    return cls._new_object(layout_proto=layout_proto)",
    "docstring": "Creates an instance from a LayoutProto.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\layout.py",
    "ast_data": "FunctionDef name:from_proto arg:cls arg:layout_proto arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "tpu_core_ids_to_locations",
    "source_code": "def tpu_core_ids_to_locations(self, tpu_core_ids):\n    return _pywrap_dtensor_device.TPUCoreIDsToLocations(context.context()._handle, self._device_info, tpu_core_ids)",
    "docstring": "Translates TPU core IDs to TPU core locations. Args: tpu_core_ids: A list of TPU core IDs. Each one is an unsigned integer. Returns: A list of corresponding TPU core locations.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\dtensor_device.py",
    "ast_data": "FunctionDef name:tpu_core_ids_to_locations arg:self arg:tpu_core_ids arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_transform_features_v2",
    "source_code": "def _transform_features_v2(features, feature_columns, state_manager):\n    feature_columns = _normalize_feature_columns(feature_columns)\n    outputs = {}\n    with ops.name_scope(None, default_name='transform_features', values=features.values()):\n        transformation_cache = FeatureTransformationCache(features)\n        for column in feature_columns:\n            with ops.name_scope(None, default_name=_sanitize_column_name_for_variable_scope(column.name)):\n                outputs[column] = transformation_cache.get(column, state_manager)\n    return outputs",
    "docstring": "Returns transformed features based on features columns passed in. Please note that most probably you would not need to use this function. Please check and to see whether they will satisfy your use case or not. Example: Args: features: A mapping from key to tensors. s look up via these keys. For example will look at 'price' key in this dict. Values can be a or a depends on corresponding . feature_columns: An iterable containing all the s. state_manager: A StateManager object that holds the FeatureColumn state. Returns: A mapping to and values.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:_transform_features_v2 arg:features arg:feature_columns arg:state_manager arguments arg arg arg Assign Call Assign With Call Call Assign Call For With Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "broadcast_row_partition",
    "source_code": "def broadcast_row_partition(self, rp):\n    if not rp.is_uniform():\n        return RowPartition.from_row_lengths(self.broadcast_tensor(rp.row_lengths()))\n    else:\n        return RowPartition.from_uniform_row_length(rp.uniform_row_length(), nvals=rp.uniform_row_length() * self.dest_nrows(), nrows=self.dest_nrows())",
    "docstring": "Return a new shape where the rows are broadcasted. *--self--->* | | rp result | | V V *--------->* This is equivalent to: return RowPartition.from_row_lengths(self.broadcast(rp.row_lengths())) However, if the shape has uniform row length, then that property is maintained. Args: rp: a row partition. Returns: a RowPartition representing a broadcast version of this row partition.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:broadcast_row_partition arg:self arg:rp arguments arg arg If Call Return return:yes Call Call Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_BiasLayer",
    "source_code": "class _BiasLayer(base.Layer):\n\n    def __init__(self, units=1, trainable=True, weight_collections=None, name=None, **kwargs):\n        super(_BiasLayer, self).__init__(trainable=trainable, name=name, **kwargs)\n        self._units = units\n        self._weight_collections = weight_collections\n\n    def build(self, _):\n        self._bias_variable = self.add_variable('bias_weights', shape=[self._units], initializer=init_ops.zeros_initializer(), trainable=self.trainable)\n        _add_to_collections(self._bias_variable, self._weight_collections)\n        self.built = True\n\n    def call(self, _):\n        return self._bias_variable",
    "docstring": "A layer for the bias term.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py",
    "ast_data": "ClassDef name:_BiasLayer FunctionDef name:__init__ arg:self arg:units arg:trainable arg:weight_collections arg:name arguments arg arg arg arg arg arg Call Call Assign Assign FunctionDef name:build arg:self arg:_ arguments arg arg Assign Call Call Call Assign FunctionDef name:call arg:self arg:_ arguments arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "evaluate_guards_expression",
    "source_code": "def evaluate_guards_expression(self, code: str, args: Sequence[object]) -> bool:\n    arg_names = [f't{i}' for i in range(len(args))]\n    return eval(code, SYMPY_INTERP, {'L': dict(zip(arg_names, args))})",
    "docstring": "Expected to be used with produce_guards_expression(). Evaluates an expression generated by produce_guards_expression for the given concrete args.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:evaluate_guards_expression arg:self arg:code arg:args arguments arg arg arg Assign Call Call Return return:yes Call Call Call"
  },
  {
    "library": "pandas",
    "name": "isna",
    "source_code": "def isna(self) -> np.ndarray | ExtensionArraySupportsAnyAll:\n    raise AbstractMethodError(self)",
    "docstring": "A 1-D array indicating if each value is missing. Returns ------- numpy.ndarray or pandas.api.extensions.ExtensionArray In most cases, this should return a NumPy ndarray. For exceptional cases like `ExtensionArray._reduceExtensionArray._accumulate` should be implemented Examples -------- >>> arr = pd.array([1, 2, np.nan, np.nan]) >>> arr.isna() array([False, False, True, True])",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\base.py",
    "ast_data": "FunctionDef name:isna arg:self arguments arg Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "named_estimators",
    "source_code": "@property\ndef named_estimators(self):\n    return Bunch(**dict(self.estimators))",
    "docstring": "Dictionary to access any fitted sub-estimators by name. Returns ------- :class:",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_base.py",
    "ast_data": "FunctionDef name:named_estimators arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "apply_non_transform_keypoint",
    "source_code": "def apply_non_transform_keypoint(self, input: Keypoints, params: Dict[str, Tensor], flags: Dict[str, Any], transform: Optional[Tensor]=None) -> Keypoints:\n    return input",
    "docstring": "Process keypoints corresponding to the inputs that are no transformation applied.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\base.py",
    "ast_data": "FunctionDef name:apply_non_transform_keypoint arg:self arg:input arg:params arg:flags arg:transform arguments arg arg arg arg arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_generate_get_feature_names_out",
    "source_code": "def _generate_get_feature_names_out(estimator, n_features_out, input_features=None):\n    _check_feature_names_in(estimator, input_features, generate_names=False)\n    estimator_name = estimator.__class__.__name__.lower()\n    return np.asarray([f'{estimator_name}{i}' for i in range(n_features_out)], dtype=object)",
    "docstring": "Generate feature names out for estimator using the estimator name as the prefix. The input_feature names are validated but not used. This function is useful for estimators that generate their own names based on , i.e. PCA. Parameters ---------- estimator : estimator instance Estimator producing output feature names. n_feature_out : int Number of feature names out. input_features : array-like of str or None, default=None Only used to validate feature names with . Returns ------- feature_names_in : ndarray of str or Feature names in.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\validation.py",
    "ast_data": "FunctionDef name:_generate_get_feature_names_out arg:estimator arg:n_features_out arg:input_features arguments arg arg arg Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "angular_units",
    "source_code": "@property\ndef angular_units(self):\n    units, name = capi.angular_units(self.ptr, byref(c_char_p()))\n    return units",
    "docstring": "Return the value of the angular units.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\srs.py",
    "ast_data": "FunctionDef name:angular_units arg:self arguments arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "KerasHistory",
    "source_code": "class KerasHistory(collections.namedtuple('KerasHistory', ['layer', 'node_index', 'tensor_index'])):\n    __slots__ = ()",
    "docstring": "Tracks the Layer call that created a Tensor, for Keras Graph Networks. During construction of Keras Graph Networks, this metadata is added to each Tensor produced as the output of a Layer, starting with an . This allows Keras to track how each Tensor was produced, and this information is later retraced by the class to reconstruct the Keras Graph Network. Attributes: layer: The Layer that produced the Tensor. node_index: The specific call to the Layer that produced this Tensor. Layers can be called multiple times in order to share weights. A new node is created every time a Layer is called. tensor_index: The output index for this Tensor. Always zero if the Layer that produced this Tensor only has one output. Nested structures of Tensors are deterministically assigned an index via .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\node.py",
    "ast_data": "ClassDef name:KerasHistory Call Assign"
  },
  {
    "library": "pytorch",
    "name": "storage_type",
    "source_code": "def storage_type(self):\n    if has_torch_function_unary(self):\n        return handle_torch_function(Tensor.storage_type, (self,), self)\n    torch.storage._warn_typed_storage_removal()\n    return self._typed_storage()._get_legacy_storage_class()",
    "docstring": "storage_type() -> type Returns the type of the underlying storage.",
    "type": "method",
    "file_path": "pytorch\\torch\\_tensor.py",
    "ast_data": "FunctionDef name:storage_type arg:self arguments arg If Call Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "cryptography",
    "name": "serial_number",
    "source_code": "@property\n@abc.abstractmethod\ndef serial_number(self) -> int:\n    pass",
    "docstring": "Returns the serial number of the revoked certificate.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\x509\\base.py",
    "ast_data": "FunctionDef name:serial_number arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "_EigGrad",
    "source_code": "@ops.RegisterGradient('Eig')\ndef _EigGrad(op: ops.Operation, grad_e, grad_v):\n    e = op.outputs[0]\n    compute_v = op.get_attr('compute_v')\n    with ops.control_dependencies([grad_e, grad_v]):\n        if compute_v:\n            v = op.outputs[1]\n            vt = _linalg.adjoint(v)\n            f = array_ops.matrix_set_diag(_SafeReciprocal(array_ops.expand_dims(e, -2) - array_ops.expand_dims(e, -1)), array_ops.zeros_like(e))\n            f = math_ops.conj(f)\n            vgv = math_ops.matmul(vt, grad_v)\n            mid = array_ops.matrix_diag(grad_e)\n            diag_grad_part = array_ops.matrix_diag(array_ops.matrix_diag_part(math_ops.cast(math_ops.real(vgv), vgv.dtype)))\n            mid += f * (vgv - math_ops.matmul(math_ops.matmul(vt, v), diag_grad_part))\n            grad_a = linalg_ops.matrix_solve(vt, math_ops.matmul(mid, vt))\n        else:\n            _, v = linalg_ops.eig(op.inputs[0])\n            vt = _linalg.adjoint(v)\n            grad_a = linalg_ops.matrix_solve(vt, math_ops.matmul(array_ops.matrix_diag(grad_e), vt))\n        return math_ops.cast(grad_a, op.inputs[0].dtype)",
    "docstring": "Gradient for Eig. Based on eq. 4.77 from paper by Christoph Boeddeker et al. See also \"Computation of eigenvalue and eigenvector derivatives for a general complex-valued eigensystem\" by Nico van der Aa. As for now only distinct eigenvalue case is considered.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg_grad.py",
    "ast_data": "FunctionDef name:_EigGrad arg:op arg:grad_e arg:grad_v arguments arg arg arg Assign Assign Call With Call If Assign Assign Call Assign Call Call Call Call Call Assign Call Assign Call Assign Call Assign Call Call Call Call Call Call Assign Call Call Assign Call Assign Call Assign Call Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_AddAndReturnScaledIdentity",
    "source_code": "class _AddAndReturnScaledIdentity(_Adder):\n\n    def can_add(self, op1, op2):\n        types = {_type(op1), _type(op2)}\n        return not types.difference(_IDENTITY_FAMILY)\n\n    def _add(self, op1, op2, operator_name, hints):\n        if _type(op1) == _SCALED_IDENTITY:\n            multiplier_1 = op1.multiplier\n        else:\n            multiplier_1 = array_ops.ones(op1.batch_shape_tensor(), dtype=op1.dtype)\n        if _type(op2) == _SCALED_IDENTITY:\n            multiplier_2 = op2.multiplier\n        else:\n            multiplier_2 = array_ops.ones(op2.batch_shape_tensor(), dtype=op2.dtype)\n        return linear_operator_identity.LinearOperatorScaledIdentity(num_rows=op1.range_dimension_tensor(), multiplier=multiplier_1 + multiplier_2, is_non_singular=hints.is_non_singular, is_self_adjoint=hints.is_self_adjoint, is_positive_definite=hints.is_positive_definite, name=operator_name)",
    "docstring": "Handles additions resulting in an Identity family member. The Identity (, ) family is closed under addition. This respects that, and returns an Identity",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_addition.py",
    "ast_data": "ClassDef name:_AddAndReturnScaledIdentity FunctionDef name:can_add arg:self arg:op1 arg:op2 arguments arg arg arg Assign Call Call Return return:yes Call FunctionDef name:_add arg:self arg:op1 arg:op2 arg:operator_name arg:hints arguments arg arg arg arg arg If Compare Call Assign Assign Call Call If Compare Call Assign Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "tvar",
    "source_code": "def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1):\n    a = a.astype(float).ravel()\n    if limits is None:\n        n = (~a.mask).sum()\n        return np.ma.var(a) * n / (n - 1.0)\n    am = _mask_to_limits(a, limits=limits, inclusive=inclusive)\n    return np.ma.var(am, axis=axis, ddof=ddof)",
    "docstring": "Compute the trimmed variance This function computes the sample variance of an array of values, while ignoring values which are outside of given . Parameters ---------- a : array_like Array of values. limits : None or (lower limit, upper limit), optional Values in the input array less than the lower limit or greater than the upper limit will be ignored. When limits is None, then all values are used. Either of the limit values in the tuple can also be None representing a half-open interval. The default value is None. inclusive : (bool, bool), optional A tuple consisting of the (lower flag, upper flag). These flags determine whether values exactly equal to the lower or upper limits are included. The default value is (True, True). axis : int or None, optional Axis along which to operate. If None, compute over the whole array. Default is zero. ddof : int, optional Delta degrees of freedom. Default is 1. Returns ------- tvar : float Trimmed variance. Notes ----- For more details on , see .",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mstats_basic.py",
    "ast_data": "FunctionDef name:tvar arg:a arg:limits arg:inclusive arg:axis arg:ddof arguments arg arg arg arg arg Assign Call Call If Compare Assign Call Return return:yes Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "is_distributed_table",
    "source_code": "def is_distributed_table(v):\n    return getattr(v, 'is_distributed_table', False)",
    "docstring": "Determine if an object is a DistributedTable.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_utils.py",
    "ast_data": "FunctionDef name:is_distributed_table arg:v arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "clear",
    "source_code": "def clear(self, event):\n    if self.ignore(event):\n        return\n    if self.useblit:\n        for canvas, info in self._canvas_infos.items():\n            if canvas is not canvas.figure.canvas:\n                continue\n            info['background'] = canvas.copy_from_bbox(canvas.figure.bbox)",
    "docstring": "Clear the cursor.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:clear arg:self arg:event arguments arg arg If Call Return return:no If For Call If Compare Assign Call"
  },
  {
    "library": "pandas",
    "name": "dtype_to_unit",
    "source_code": "def dtype_to_unit(dtype: DatetimeTZDtype | np.dtype | ArrowDtype) -> str:\n    if isinstance(dtype, DatetimeTZDtype):\n        return dtype.unit\n    elif isinstance(dtype, ArrowDtype):\n        if dtype.kind not in 'mM':\n            raise ValueError(f'dtype={dtype!r} does not have a resolution.')\n        return dtype.pyarrow_dtype.unit\n    return np.datetime_data(dtype)[0]",
    "docstring": "Return the unit str corresponding to the dtype's resolution. Parameters ---------- dtype : DatetimeTZDtype or np.dtype If np.dtype, we assume it is a datetime64 dtype. Returns ------- str",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py",
    "ast_data": "FunctionDef name:dtype_to_unit arg:dtype arguments arg If Call Return return:yes If Call If Compare Raise Call Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "build",
    "source_code": "def build(self):\n    for node in self.node_index.values():\n        node.freeze()\n    stmt_next = {}\n    stmt_prev = {}\n    for node in self.node_index.values():\n        for stmt in self.owners[node]:\n            if stmt not in stmt_prev:\n                stmt_prev[stmt] = set()\n            if stmt not in stmt_next:\n                stmt_next[stmt] = set()\n    for first, second in self.forward_edges:\n        stmts_exited = self.owners[first] - self.owners[second]\n        for stmt in stmts_exited:\n            stmt_next[stmt].add(second)\n        stmts_entered = self.owners[second] - self.owners[first]\n        for stmt in stmts_entered:\n            stmt_prev[stmt].add(first)\n    for stmt in stmt_next:\n        stmt_next[stmt] = frozenset(stmt_next[stmt])\n    for stmt in stmt_prev:\n        stmt_prev[stmt] = frozenset(stmt_prev[stmt])\n    result = Graph(entry=self.head, exit=self.leaves, error=self.errors, index=self.node_index, stmt_prev=stmt_prev, stmt_next=stmt_next)\n    self.reset()\n    return result",
    "docstring": "Returns the CFG accumulated so far and resets the builder. Returns: Graph",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\cfg.py",
    "ast_data": "FunctionDef name:build arg:self arguments arg For Call Call Assign Assign For Call For If Compare Assign Call If Compare Assign Call For Assign For Call Assign For Call For Assign Call For Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "BenchLeastSquares",
    "source_code": "class BenchLeastSquares(Benchmark):\n    problems = extract_lsq_problems()\n    params = [list(problems.keys()), ['average time', 'nfev', 'success']]\n    param_names = ['problem', 'result type']\n\n    def track_all(self, problem_name, result_type):\n        problem = self.problems[problem_name]\n        if problem.lb is not None or problem.ub is not None:\n            raise NotImplementedError\n        ftol = 1e-05\n        if result_type == 'average time':\n            n_runs = 10\n            t0 = time.time()\n            for _ in range(n_runs):\n                leastsq(problem.fun, problem.x0, Dfun=problem.jac, ftol=ftol, full_output=True)\n            return (time.time() - t0) / n_runs\n        x, cov_x, info, message, ier = leastsq(problem.fun, problem.x0, Dfun=problem.jac, ftol=ftol, full_output=True)\n        if result_type == 'nfev':\n            return info['nfev']\n        elif result_type == 'success':\n            return int(problem.check_answer(x, ftol))\n        else:\n            raise NotImplementedError",
    "docstring": "Class for benchmarking nonlinear least squares solvers.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\optimize.py",
    "ast_data": "ClassDef name:BenchLeastSquares Assign Call Assign Call Call Assign FunctionDef name:track_all arg:self arg:problem_name arg:result_type arguments arg arg arg Assign If BoolOp Compare Compare Raise Assign If Compare Assign Assign Call For Call Call Return return:yes Call Assign Call If Compare Return return:yes If Compare Return return:yes Call Call Raise"
  },
  {
    "library": "pytorch",
    "name": "_FSDPModMemStats",
    "source_code": "class _FSDPModMemStats:\n\n    def __init__(self, mod_fqn: str) -> None:\n        self.mod_fqn = mod_fqn\n        self.local_peak: dict[torch.device, int] = {}\n        self.snapshots: dict[_FSDPModState, list[dict[torch.device, dict[str, int]]]] = {}",
    "docstring": "A class to store the memory statistics of an FSDP module. Args: mod_fqn (str): The fully qualified name of the FSDP module. Attributes: snapshots (Dict[_FSDPModState, Dict[torch.device, Dict[str, int]]]): A dictionary of memory snapshots of the module at different states as defined by `` and values as the memory consumed in bytes.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\_tools\\fsdp2_mem_tracker.py",
    "ast_data": "ClassDef name:_FSDPModMemStats FunctionDef name:__init__ arg:self arg:mod_fqn arguments arg arg Assign"
  },
  {
    "library": "pytorch",
    "name": "compute_cosine_similarity",
    "source_code": "@maybe_dequantize_first_two_tensor_args_and_handle_tuples\ndef compute_cosine_similarity(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n    x = x.reshape(1, -1)\n    y = y.reshape(1, -1)\n    return torch.nn.functional.cosine_similarity(x, y)",
    "docstring": "Computes the cosine similarity between and . Args: x: Tensor or tuple of tensors y: Tensor or tuple of tensors Return: float or tuple of floats",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\ns\\fx\\utils.py",
    "ast_data": "FunctionDef name:compute_cosine_similarity arg:x arg:y arguments arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_is_type_subset",
    "source_code": "def _is_type_subset(a, b):\n    if isinstance(a, type_spec.TypeSpec):\n        return a.most_specific_compatible_type(b) == a\n    return True",
    "docstring": "Returns true if is a subset of type (or if a is not a TypeSpec.)",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py",
    "ast_data": "FunctionDef name:_is_type_subset arg:a arg:b arguments arg arg If Call Return return:yes Compare Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_fix_defaults",
    "source_code": "def _fix_defaults(output, defaults=None):\n    names = output.dtype.names\n    data, mask, fill_value = (output.data, output.mask, output.fill_value)\n    for k, v in (defaults or {}).items():\n        if k in names:\n            fill_value[k] = v\n            data[k][mask[k]] = v\n    return output",
    "docstring": "Update the fill_value and masked data of from the default given in a dictionary defaults.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\recfunctions.py",
    "ast_data": "FunctionDef name:_fix_defaults arg:output arg:defaults arguments arg arg Assign Assign For Call BoolOp If Compare Assign Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "dedode_descriptor_B",
    "source_code": "def dedode_descriptor_B(amp_dtype: torch.dtype=torch.float16) -> DeDoDeDescriptor:\n    NUM_PROTOTYPES = 256\n    residual = True\n    hidden_blocks = 5\n    amp = True\n    conv_refiner = nn.ModuleDict({'8': ConvRefiner(512, 512, 256 + NUM_PROTOTYPES, hidden_blocks=hidden_blocks, residual=residual, amp=amp, amp_dtype=amp_dtype), '4': ConvRefiner(256 + 256, 256, 128 + NUM_PROTOTYPES, hidden_blocks=hidden_blocks, residual=residual, amp=amp, amp_dtype=amp_dtype), '2': ConvRefiner(128 + 128, 64, 32 + NUM_PROTOTYPES, hidden_blocks=hidden_blocks, residual=residual, amp=amp, amp_dtype=amp_dtype), '1': ConvRefiner(64 + 32, 32, 1 + NUM_PROTOTYPES, hidden_blocks=hidden_blocks, residual=residual, amp=amp, amp_dtype=amp_dtype)})\n    encoder = VGG19(amp=amp, amp_dtype=amp_dtype)\n    decoder = Decoder(conv_refiner, num_prototypes=NUM_PROTOTYPES)\n    model = DeDoDeDescriptor(encoder=encoder, decoder=decoder)\n    return model",
    "docstring": "Get DeDoDe descriptor of type B.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\dedode\\dedode_models.py",
    "ast_data": "FunctionDef name:dedode_descriptor_B arg:amp_dtype arguments arg Assign Assign Assign Assign Assign Call Call Call Call Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_blklocs",
    "source_code": "@property\ndef _blklocs(self) -> None:\n    return None",
    "docstring": "compat with BlockManager",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:_blklocs arg:self arguments arg Return return:no"
  },
  {
    "library": "matplotlib",
    "name": "set_aspect",
    "source_code": "def set_aspect(self, *args, **kwargs):\n    _api.warn_external(\"Secondary Axes can't set the aspect ratio\")",
    "docstring": "Secondary Axes cannot set the aspect ratio, so calling this just sets a warning.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_secondary_axes.py",
    "ast_data": "FunctionDef name:set_aspect arg:self arguments arg arg arg Call"
  },
  {
    "library": "scikit-learn",
    "name": "sqeuclidean_row_norms",
    "source_code": "def sqeuclidean_row_norms(X, num_threads):\n    if X.dtype == np.float64:\n        return np.asarray(_sqeuclidean_row_norms64(X, num_threads))\n    if X.dtype == np.float32:\n        return np.asarray(_sqeuclidean_row_norms32(X, num_threads))\n    raise ValueError(f'Only float64 or float32 datasets are supported at this time, got: X.dtype={X.dtype}.')",
    "docstring": "Compute the squared euclidean norm of the rows of X in parallel. Parameters ---------- X : ndarray or CSR matrix of shape (n_samples, n_features) Input data. Must be c-contiguous. num_threads : int The number of OpenMP threads to use. Returns ------- sqeuclidean_row_norms : ndarray of shape (n_samples,) Arrays containing the squared euclidean norm of each row of X.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\metrics\\_pairwise_distances_reduction\\_dispatcher.py",
    "ast_data": "FunctionDef name:sqeuclidean_row_norms arg:X arg:num_threads arguments arg arg If Compare Return return:yes Call Call If Compare Return return:yes Call Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "elementwise_unary_scalar_wrapper",
    "source_code": "def elementwise_unary_scalar_wrapper(fn: Callable[_P, _T]) -> Callable[_P, Union[_T, NumberType]]:\n    sig = inspect.signature(fn)\n\n    @wraps(fn)\n    def _fn(*args, **kwargs):\n        if len(args) > 0 and isinstance(args[0], Number):\n            dtype = utils.type_to_dtype(type(args[0]))\n            args_ = list(args)\n            args_[0] = torch.tensor(args[0], dtype=dtype)\n            result = fn(*args_, **kwargs)\n            assert isinstance(result, torch.Tensor)\n            return result.item()\n        return fn(*args, **kwargs)\n    _fn.__signature__ = sig\n    return _fn",
    "docstring": "Allows unary operators that accept tensors to work with Python numbers.",
    "type": "function",
    "file_path": "pytorch\\torch\\_prims_common\\wrappers.py",
    "ast_data": "FunctionDef name:elementwise_unary_scalar_wrapper arg:fn arguments arg Assign Call FunctionDef name:_fn arguments arg arg If BoolOp Compare Call Call Assign Call Call Assign Call Assign Call Assign Call Call Return return:yes Call Return return:yes Call Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_initialize_x_y",
    "source_code": "def _initialize_x_y(self, z):\n    if z.ndim != 2:\n        raise TypeError(f'Input z must be 2D, not {z.ndim}D')\n    elif z.shape[0] < 2 or z.shape[1] < 2:\n        raise TypeError(f'Input z must be at least a (2, 2) shaped array, but has shape {z.shape}')\n    else:\n        Ny, Nx = z.shape\n    if self.origin is None:\n        if self.extent is None:\n            return np.meshgrid(np.arange(Nx), np.arange(Ny))\n        else:\n            x0, x1, y0, y1 = self.extent\n            x = np.linspace(x0, x1, Nx)\n            y = np.linspace(y0, y1, Ny)\n            return np.meshgrid(x, y)\n    if self.extent is None:\n        x0, x1, y0, y1 = (0, Nx, 0, Ny)\n    else:\n        x0, x1, y0, y1 = self.extent\n    dx = (x1 - x0) / Nx\n    dy = (y1 - y0) / Ny\n    x = x0 + (np.arange(Nx) + 0.5) * dx\n    y = y0 + (np.arange(Ny) + 0.5) * dy\n    if self.origin == 'upper':\n        y = y[::-1]\n    return np.meshgrid(x, y)",
    "docstring": "Return X, Y arrays such that contour(Z) will match imshow(Z) if origin is not None. The center of pixel Z[i, j] depends on origin: if origin is None, x = j, y = i; if origin is 'lower', x = j + 0.5, y = i + 0.5; if origin is 'upper', x = j + 0.5, y = Nrows - i - 0.5 If extent is not None, x and y will be scaled to match, as in imshow. If origin is None and extent is not None, then extent will give the minimum and maximum values of x and y.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\contour.py",
    "ast_data": "FunctionDef name:_initialize_x_y arg:self arg:z arguments arg arg If Compare Raise Call If BoolOp Compare Compare Raise Call Assign If Compare If Compare Return return:yes Call Call Call Assign Assign Call Assign Call Return return:yes Call If Compare Assign Assign Assign Assign Assign Call Assign Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_FuncWrapper",
    "source_code": "class _FuncWrapper:\n\n    def __init__(self, function):\n        self.function = function\n        update_wrapper(self, self.function)\n\n    def with_config_and_warning_filters(self, config, warning_filters):\n        self.config = config\n        self.warning_filters = warning_filters\n        return self\n\n    def __call__(self, *args, **kwargs):\n        config = getattr(self, 'config', {})\n        warning_filters = getattr(self, 'warning_filters', [])\n        if not config or not warning_filters:\n            warnings.warn('`sklearn.utils.parallel.delayed` should be used with `sklearn.utils.parallel.Parallel` to make it possible to propagate the scikit-learn configuration of the current thread to the joblib workers.', UserWarning)\n        with config_context(**config), warnings.catch_warnings():\n            warnings.filters = warning_filters\n            return self.function(*args, **kwargs)",
    "docstring": "Load the global configuration before calling the function.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\utils\\parallel.py",
    "ast_data": "ClassDef name:_FuncWrapper FunctionDef name:__init__ arg:self arg:function arguments arg arg Assign Call FunctionDef name:with_config_and_warning_filters arg:self arg:config arg:warning_filters arguments arg arg arg Assign Assign Return return:yes FunctionDef name:__call__ arg:self arguments arg arg arg Assign Call Assign Call If BoolOp Call With Call Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "np_asarray",
    "source_code": "def np_asarray(values, dtype=None, order=None, copy=None):\n    if np.lib.NumpyVersion(np.__version__) >= '2.0.0.dev0':\n        if dtype is not None and np.issubdtype(dtype, np.number):\n            return np.asarray(values, order=order, copy=copy).astype(dtype, copy=copy)\n        else:\n            return np.asarray(values, dtype=dtype, order=order, copy=copy)\n    else:\n        return np.asarray(values, dtype=dtype, order=order)",
    "docstring": "Converts input values to a NumPy array. It will not make a copy. In NumPy 2.x and later, strict type casting can lead to errors when values overflow the specified dtype. This function addresses this by replacing direct np.array(..., dtype=...) calls with np.array(...).astype(...). This allows for intended overflows, aligning with the behavior of older NumPy versions. Args: values: Array_like objects. E.g., a python list, tuple, or an object whose __array__ method returns an array. dtype: The desired numpy data type for the array. order: {‘C’, ‘F’, ‘A’, ‘K’}. copy: bool. If True, then the object is copied. If None then the object is copied only if needed, i.e. if __array__ returns a copy, if obj is a nested sequence, or if a copy is needed to satisfy any of the other requirements (dtype, order, etc.). For False it raises a ValueError if a copy cannot be avoided. Returns: A NumPy array with the specified data type.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\numpy_compat.py",
    "ast_data": "FunctionDef name:np_asarray arg:values arg:dtype arg:order arg:copy arguments arg arg arg arg If Compare Call If BoolOp Compare Call Return return:yes Call Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_from_any",
    "source_code": "@classmethod\ndef _from_any(cls, arg):\n    if arg is None:\n        return cls()\n    elif isinstance(arg, cls):\n        return arg\n    elif isinstance(arg, os.PathLike):\n        return cls(fname=arg)\n    elif isinstance(arg, str):\n        return cls(arg)\n    else:\n        return cls(**arg)",
    "docstring": "Generic constructor which can build a from any of the following: - a : it is passed through as is; - : a using rc values is used; - an : it is used as path to the font file; - a : it is parsed as a fontconfig pattern; - a : it is passed as `.FontProperties`.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\font_manager.py",
    "ast_data": "FunctionDef name:_from_any arg:cls arg:arg arguments arg arg If Compare Return return:yes Call If Call Return return:yes If Call Return return:yes Call If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_utcnow",
    "source_code": "def _utcnow():\n    return datetime.datetime.utcnow()",
    "docstring": "A wrapper function around datetime.datetime.utcnow. This function is created for unit testing purpose. It's not easy to do StubOutWithMock with datetime.datetime package. Returns: datetime.datetime",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\client\\client.py",
    "ast_data": "FunctionDef name:_utcnow arguments Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "bare_error",
    "source_code": "def bare_error(extrabody=None):\n    body = b'Unrecoverable error in the server.'\n    if extrabody is not None:\n        if not isinstance(extrabody, bytes):\n            extrabody = extrabody.encode('utf-8')\n        body += b'\\n' + extrabody\n    return (b'500 Internal Server Error', [(b'Content-Type', b'text/plain'), (b'Content-Length', ntob(str(len(body)), 'ISO-8859-1'))], [body])",
    "docstring": "Produce status, headers, body for a critical error. Returns a triple without calling any other questionable functions, so it should be as error-free as possible. Call it from an HTTP server if you get errors outside of the request. If extrabody is None, a friendly but rather unhelpful error message is set in the body. If extrabody is a string, it will be appended as-is to the body.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\_cperror.py",
    "ast_data": "FunctionDef name:bare_error arg:extrabody arguments arg Assign If Compare If Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_standard_normal_samples",
    "source_code": "def _standard_normal_samples(self, n: IntNumber=1) -> np.ndarray:\n    samples = self.engine.random(n)\n    if self._inv_transform:\n        return stats.norm.ppf(0.5 + (1 - 1e-10) * (samples - 0.5))\n    else:\n        even = np.arange(0, samples.shape[-1], 2)\n        Rs = np.sqrt(-2 * np.log(samples[:, even]))\n        thetas = 2 * math.pi * samples[:, 1 + even]\n        cos = np.cos(thetas)\n        sin = np.sin(thetas)\n        transf_samples = np.stack([Rs * cos, Rs * sin], -1).reshape(n, -1)\n        return transf_samples[:, :self._d]",
    "docstring": "Draw QMC samples from the standard Normal :math:. Parameters ---------- n : int, optional Number of samples to generate in the parameter space. Default is 1. Returns ------- sample : array_like (n, d) Sample.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_qmc.py",
    "ast_data": "FunctionDef name:_standard_normal_samples arg:self arg:n arguments arg arg Assign Call If Return return:yes Call Assign Call Assign Call Call Assign Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "reset",
    "source_code": "def reset(self) -> None:\n    self.__init__()",
    "docstring": "Reset the option store to its initial state Returns ------- None",
    "type": "method",
    "file_path": "pandas\\pandas\\plotting\\_misc.py",
    "ast_data": "FunctionDef name:reset arg:self arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "assign_memory_planning_info_for_scheduler_buffers",
    "source_code": "def assign_memory_planning_info_for_scheduler_buffers(nodes: list[BaseSchedulerNode], name_to_buf: dict[str, SchedulerBuffer]) -> None:\n    sched_buf_to_size = compute_size_for_scheduler_buffer(name_to_buf)\n    dep_name_to_succ_nodes: dict[str, OrderedSet[BaseSchedulerNode]] = collections.defaultdict(OrderedSet)\n    for node in nodes:\n        for dep in node.unmet_dependencies:\n            dep_name_to_succ_nodes[dep.name].add(node)\n    for buf_name in name_to_buf.keys():\n        name_to_buf[buf_name].mpi_buffer = MemoryPlanningInfoForBuffer(size_alloc=sched_buf_to_size[buf_name][0], size_free=sched_buf_to_size[buf_name][1], succ_nodes=dep_name_to_succ_nodes[buf_name])",
    "docstring": "For each SchedulerBuffer, assign its size info and successor nodes. A buffer's successor nodes determines when a buffer can be freed.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\memory.py",
    "ast_data": "FunctionDef name:assign_memory_planning_info_for_scheduler_buffers arg:nodes arg:name_to_buf arguments arg arg Assign Call Call For For Call For Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, num_evals, log_progress=True):\n    self._num_evals = num_evals\n    self._evals_completed = None\n    self._log_progress = log_progress\n    self._log_frequency = 1 if num_evals is None or num_evals < 20 else math.floor(num_evals / 10.0)",
    "docstring": "Constructs the run hook. Args: num_evals: The number of evaluations to run for. if set to None, will iterate the dataset until all inputs are exhausted. log_progress: Whether to log evaluation progress, defaults to True.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\evaluation.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:num_evals arg:log_progress arguments arg arg arg Assign Assign Assign Assign BoolOp Compare Compare Call"
  },
  {
    "library": "matplotlib",
    "name": "ColorConverter",
    "source_code": "class ColorConverter:\n    colors = _colors_full_map\n    cache = _colors_full_map.cache\n    to_rgb = staticmethod(to_rgb)\n    to_rgba = staticmethod(to_rgba)\n    to_rgba_array = staticmethod(to_rgba_array)",
    "docstring": "A class only kept for backwards compatibility. Its functionality is entirely provided by module-level functions.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "ClassDef name:ColorConverter Assign Assign Assign Call Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_alias_id",
    "source_code": "def _alias_id(self):\n    alias_id = None\n    if self.dtype._handle_data:\n        alias_id = self.dtype._handle_data.alias_id\n    return alias_id",
    "docstring": "Returns an id specifying identical tensors to avoid duplication.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor.py",
    "ast_data": "FunctionDef name:_alias_id arg:self arguments arg Assign If Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "DLPMethods",
    "source_code": "class DLPMethods(Benchmark):\n    params = [['__dlpack__', '__dlpack_device__'], DLPACK_TYPES]\n    param_names = ['methods', 'npdtypes']\n    timeout = 10\n\n    def setup(self, methname, npdtypes):\n        values = get_squares_()\n        if npdtypes == 'bool':\n            if version.parse(np.__version__) > version.parse('1.25'):\n                self.xarg = values.get('int16')[0].astype('bool')\n            else:\n                raise NotImplementedError('Not supported before v1.25')\n        else:\n            self.xarg = values.get('int16')[0]\n\n    def time_ndarray_dlp(self, methname, npdtypes):\n        meth = getattr(self.xarg, methname)\n        meth()",
    "docstring": "Benchmark for DLPACK helpers",
    "type": "class",
    "file_path": "numpy\\benchmarks\\benchmarks\\bench_ufunc.py",
    "ast_data": "ClassDef name:DLPMethods Assign Assign Assign FunctionDef name:setup arg:self arg:methname arg:npdtypes arguments arg arg arg Assign Call If Compare If Compare Call Call Assign Call Call Raise Call Assign Call FunctionDef name:time_ndarray_dlp arg:self arg:methname arg:npdtypes arguments arg arg arg Assign Call Call"
  },
  {
    "library": "pandas",
    "name": "__getitem__",
    "source_code": "def __getitem__(self, key: int | slice) -> Series:\n    from pandas import Series\n    if isinstance(key, int):\n        element = pc.list_element(self._pa_array, key)\n        return Series(element, dtype=ArrowDtype(element.type), index=self._data.index, name=self._data.name)\n    elif isinstance(key, slice):\n        if pa_version_under11p0:\n            raise NotImplementedError(f'List slice not supported by pyarrow {pa.__version__}.')\n        start, stop, step = (key.start, key.stop, key.step)\n        if start is None:\n            start = 0\n        if step is None:\n            step = 1\n        sliced = pc.list_slice(self._pa_array, start, stop, step)\n        return Series(sliced, dtype=ArrowDtype(sliced.type), index=self._data.index, name=self._data.name)\n    else:\n        raise ValueError(f'key must be an int or slice, got {type(key).__name__}')",
    "docstring": "Index or slice lists in the Series. Parameters ---------- key : int | slice Index or slice of indices to access from each list. Returns ------- pandas.Series The list at requested index. See Also -------- ListAccessor.flatten : Flatten list values. Examples -------- >>> import pyarrow as pa >>> s = pd.Series( ... [ ... [1, 2, 3], ... [3], ... ], ... dtype=pd.ArrowDtype(pa.list_(pa.int64())), ... ) >>> s.list[0] 0 1 1 3 dtype: int64[pyarrow]",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\arrow\\accessors.py",
    "ast_data": "FunctionDef name:__getitem__ arg:self arg:key arguments arg arg If Call Assign Call Return return:yes Call Call If Call If Raise Call Assign If Compare Assign If Compare Assign Assign Call Return return:yes Call Call Raise Call Call"
  },
  {
    "library": "numpy",
    "name": "dot",
    "source_code": "def dot(self, b, out=None, strict=False):\n    return dot(self, b, out=out, strict=strict)",
    "docstring": "a.dot(b, out=None) Masked dot product of two arrays. Note that and are located in different positions than in . In order to maintain compatibility with the functional version, it is recommended that the optional arguments be treated as keyword only. At some point that may be mandatory. Parameters ---------- b : masked_array_like Inputs array. out : masked_array, optional Output argument. This must have the exact kind that would be returned if it was not used. In particular, it must have the right type, must be C-contiguous, and its dtype must be the dtype that would be returned for . This is a performance feature. Therefore, if these conditions are not met, an exception is raised, instead of attempting to be flexible. strict : bool, optional Whether masked data are propagated (True) or set to 0 (False) for the computation. Default is False. Propagating the mask means that if a masked value appears in a row or column, the whole row or column is considered masked. See Also -------- numpy.ma.dot : equivalent function",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:dot arg:self arg:b arg:out arg:strict arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "kmz",
    "source_code": "def kmz(request, label, model, field_name=None, using=DEFAULT_DB_ALIAS):\n    return kml(request, label, model, field_name, compress=True, using=using)",
    "docstring": "Return KMZ for the given app label, model, and field name.",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\sitemaps\\views.py",
    "ast_data": "FunctionDef name:kmz arg:request arg:label arg:model arg:field_name arg:using arguments arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "enqueue",
    "source_code": "def enqueue(self, vals, name=None):\n    with ops.name_scope(name, '%s_enqueue' % self._name, self._scope_vals(vals)) as scope:\n        vals = self._check_enqueue_dtypes(vals)\n        for val, shape in zip(vals, self._shapes):\n            val.get_shape().assert_is_compatible_with(shape)\n        if self._queue_ref.dtype == _dtypes.resource:\n            return gen_data_flow_ops.queue_enqueue_v2(self._queue_ref, vals, name=scope)\n        else:\n            return gen_data_flow_ops.queue_enqueue(self._queue_ref, vals, name=scope)",
    "docstring": "Enqueues one element to this queue. If the queue is full when this operation executes, it will block until the element has been enqueued. At runtime, this operation may raise an error if the queue is before or during its execution. If the queue is closed before this operation runs, will be raised. If this operation is blocked, and either (i) the queue is closed by a close operation with , or (ii) the session is , will be raised. >>> q = tf.queue.FIFOQueue(capacity=3, dtypes=tf.int32) >>> q.enqueue(1) >>> q.enqueue(2) >>> q.size() >>> q = tf.queue.FIFOQueue(2, tf.int32, shapes=tf.TensorShape(4)) >>> q.enqueue(tf.constant([1, 2, 3, 4], dtype=tf.int32)) >>> q.size() Args: vals: A tensor, a list or tuple of tensors, or a dictionary containing the values to enqueue. name: A name for the operation (optional). Returns: The operation that enqueues a new tuple of tensors to the queue.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:enqueue arg:self arg:vals arg:name arguments arg arg arg With Call Call Assign Call For Call Call Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "set_submodule",
    "source_code": "def set_submodule(self, target: str, module: 'Module', strict: bool=False) -> None:\n    if target == '':\n        raise ValueError('Cannot set the submodule without a target name!')\n    atoms: list[str] = target.split('.')\n    if not isinstance(module, torch.nn.Module):\n        raise ValueError('`' + 'module' + f'` is not an nn.Module, found {type(module)}')\n    if len(atoms) == 1:\n        parent: torch.nn.Module = self\n    else:\n        parent_key = '.'.join(atoms[:-1])\n        parent = self.get_submodule(parent_key)\n    if strict and (not hasattr(parent, atoms[-1])):\n        raise AttributeError(parent._get_name() + ' has no attribute `' + atoms[-1] + '`')\n    if hasattr(parent, atoms[-1]):\n        mod = getattr(parent, atoms[-1])\n        if not isinstance(mod, torch.nn.Module):\n            raise AttributeError('`' + atoms[-1] + '` is not an nn.Module')\n    setattr(parent, atoms[-1], module)",
    "docstring": "Set the submodule given by ``.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:set_submodule arg:self arg:target arg:module arg:strict arguments arg arg arg arg If Compare Raise Call Call If Call Raise Call Call If Compare Call Assign Call Assign Call If BoolOp Call Raise Call Call If Call Assign Call If Call Raise Call Call"
  },
  {
    "library": "pygame",
    "name": "change_layer",
    "source_code": "def change_layer(self, sprite, new_layer):\n    sprites = self._spritelist\n    sprites_layers = self._spritelayers\n    sprites.remove(sprite)\n    sprites_layers.pop(sprite)\n    leng = len(sprites)\n    low = mid = 0\n    high = leng - 1\n    while low <= high:\n        mid = low + (high - low) // 2\n        if sprites_layers[sprites[mid]] <= new_layer:\n            low = mid + 1\n        else:\n            high = mid - 1\n    while mid < leng and sprites_layers[sprites[mid]] <= new_layer:\n        mid += 1\n    sprites.insert(mid, sprite)\n    if hasattr(sprite, '_layer'):\n        setattr(sprite, '_layer', new_layer)\n    sprites_layers[sprite] = new_layer",
    "docstring": "change the layer of the sprite LayeredUpdates.change_layer(sprite, new_layer): return None The sprite must have been added to the renderer already. This is not checked.",
    "type": "method",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:change_layer arg:self arg:sprite arg:new_layer arguments arg arg arg Assign Assign Call Call Assign Call Assign Assign While Compare Assign If Compare Assign Assign While BoolOp Compare Compare Call If Call Call Assign"
  },
  {
    "library": "kornia",
    "name": "from_qxyz",
    "source_code": "@classmethod\ndef from_qxyz(cls, qxyz: Tensor) -> Se3:\n    q, xyz = (qxyz[..., :4], qxyz[..., 4:])\n    return cls(So3.from_wxyz(q), Vector3(xyz))",
    "docstring": "Create a Se3 group a quaternion and translation vector. Args: qxyz: tensor of shape :math:. Example: >>> qxyz = torch.tensor([1., 2., 3., 0., 0., 0., 1.]) >>> s = Se3.from_qxyz(qxyz) >>> s.r Parameter containing: tensor([1., 2., 3., 0.], requires_grad=True) >>> s.t x: 0.0 y: 0.0 z: 1.0",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\se3.py",
    "ast_data": "FunctionDef name:from_qxyz arg:cls arg:qxyz arguments arg arg Assign Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_min_depth",
    "source_code": "def _get_min_depth(node):\n    min_depth = 0\n    for layer, node_id, _, _ in node.iterate_inbound():\n        inbound_node = layer._inbound_nodes[node_id]\n        if inbound_node in node_to_depth:\n            min_depth = min(min_depth, node_to_depth[inbound_node])\n        elif inbound_node not in network_nodes:\n            continue\n        else:\n            return None\n    return min_depth - 1",
    "docstring": "Gets the minimum depth at which node can be computed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\functional.py",
    "ast_data": "FunctionDef name:_get_min_depth arg:node arguments arg Assign For Call Assign If Compare Assign Call If Compare Return return:no Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "Ellipse",
    "source_code": "@_register_style(_style_list)\nclass Ellipse:\n\n    def __init__(self, pad=0.3):\n        self.pad = pad\n\n    def __call__(self, x0, y0, width, height, mutation_size):\n        pad = mutation_size * self.pad\n        width, height = (width + 2 * pad, height + 2 * pad)\n        x0, y0 = (x0 - pad, y0 - pad)\n        a = width / math.sqrt(2)\n        b = height / math.sqrt(2)\n        trans = Affine2D().scale(a, b).translate(x0 + width / 2, y0 + height / 2)\n        return trans.transform_path(Path.unit_circle())",
    "docstring": "An elliptical box. .. versionadded:: 3.7",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "ClassDef name:Ellipse FunctionDef name:__init__ arg:self arg:pad arguments arg arg Assign FunctionDef name:__call__ arg:self arg:x0 arg:y0 arg:width arg:height arg:mutation_size arguments arg arg arg arg arg arg Assign Assign Assign Assign Call Assign Call Assign Call Call Call Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "antiderivative",
    "source_code": "def antiderivative(self, nu=1):\n    if nu <= 0:\n        return self.derivative(-nu)\n    c = np.zeros((self.c.shape[0] + nu, self.c.shape[1]) + self.c.shape[2:], dtype=self.c.dtype)\n    c[:-nu] = self.c\n    factor = spec.poch(np.arange(self.c.shape[0], 0, -1), nu)\n    c[:-nu] /= factor[(slice(None),) + (None,) * (c.ndim - 1)]\n    self._ensure_c_contiguous()\n    _ppoly.fix_continuity(c.reshape(c.shape[0], c.shape[1], -1), self.x, nu - 1)\n    if self.extrapolate == 'periodic':\n        extrapolate = False\n    else:\n        extrapolate = self.extrapolate\n    return self.construct_fast(c, self.x, extrapolate, self.axis)",
    "docstring": "Construct a new piecewise polynomial representing the antiderivative. Antiderivative is also the indefinite integral of the function, and derivative is its inverse operation. Parameters ---------- nu : int, optional Order of antiderivative to evaluate. Default is 1, i.e., compute the first integral. If negative, the derivative is returned. Returns ------- pp : PPoly Piecewise polynomial of order k2 = k + n representing the antiderivative of this polynomial. Notes ----- The antiderivative returned by this function is continuous and continuously differentiable to order n-1, up to floating point rounding error. If antiderivative is computed and ``, it will be set to False for the returned instance. This is done because the antiderivative is no longer periodic and its correct evaluation outside of the initially given x interval is difficult.",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_interpolate.py",
    "ast_data": "FunctionDef name:antiderivative arg:self arg:nu arguments arg arg If Compare Return return:yes Call Assign Call Assign Assign Call Call Call Call Call Call If Compare Assign Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "Cropping1D",
    "source_code": "class Cropping1D(Layer):\n\n    def __init__(self, cropping=(1, 1), **kwargs):\n        super(Cropping1D, self).__init__(**kwargs)\n        self.cropping = conv_utils.normalize_tuple(cropping, 2, 'cropping')\n        self.input_spec = InputSpec(ndim=3)\n\n    def compute_output_shape(self, input_shape):\n        input_shape = tensor_shape.TensorShape(input_shape).as_list()\n        if input_shape[1] is not None:\n            length = input_shape[1] - self.cropping[0] - self.cropping[1]\n        else:\n            length = None\n        return tensor_shape.TensorShape([input_shape[0], length, input_shape[2]])\n\n    def call(self, inputs):\n        if self.cropping[1] == 0:\n            return inputs[:, self.cropping[0]:, :]\n        else:\n            return inputs[:, self.cropping[0]:-self.cropping[1], :]\n\n    def get_config(self):\n        config = {'cropping': self.cropping}\n        base_config = super(Cropping1D, self).get_config()\n        return dict(list(base_config.items()) + list(config.items()))",
    "docstring": "Cropping layer for 1D input (e.g. temporal sequence). It crops along the time dimension (axis 1). Examples: >>> input_shape = (2, 3, 2) >>> x = np.arange(np.prod(input_shape)).reshape(input_shape) >>> print(x) [[[ 0 1] [ 2 3] [ 4 5]] [[ 6 7] [ 8 9] [10 11]]] >>> y = tf.keras.layers.Cropping1D(cropping=1)(x) >>> print(y) tf.Tensor( [[[2 3]] [[8 9]]], shape=(2, 1, 2), dtype=int64) Args: cropping: Int or tuple of int (length 2) How many units should be trimmed off at the beginning and end of the cropping dimension (axis 1). If a single int is provided, the same value will be used for both. Input shape: 3D tensor with shape Output shape: 3D tensor with shape",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\convolutional.py",
    "ast_data": "ClassDef name:Cropping1D FunctionDef name:__init__ arg:self arg:cropping arguments arg arg arg Call Call Assign Call Assign Call FunctionDef name:compute_output_shape arg:self arg:input_shape arguments arg arg Assign Call Call If Compare Assign Assign Return return:yes Call FunctionDef name:call arg:self arg:inputs arguments arg arg If Compare Return return:yes Return return:yes FunctionDef name:get_config arg:self arguments arg Assign Assign Call Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "shutdown_tpu_system",
    "source_code": "def shutdown_tpu_system():\n\n    @def_function.function\n    def _shutdown_tpu_system():\n        return gen_dtensor_ops.shutdown_tpu_system()\n    success = _shutdown_tpu_system() if context.is_tfrt_enabled() else True\n    if success:\n        logging.info('TPU system shut down.')\n    else:\n        logging.warning('TPU system fails to shut down.')",
    "docstring": "Shuts down the TPU system.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\tpu_util.py",
    "ast_data": "FunctionDef name:shutdown_tpu_system arguments FunctionDef name:_shutdown_tpu_system arguments Return return:yes Call Assign Call Call If Call Call"
  },
  {
    "library": "kornia",
    "name": "_jpeg_encode",
    "source_code": "def _jpeg_encode(image_rgb: Tensor, jpeg_quality: Tensor, quantization_table_y: Tensor, quantization_table_c: Tensor) -> tuple[Tensor, Tensor, Tensor]:\n    image_ycbcr: Tensor = rgb_to_ycbcr(image_rgb)\n    image_ycbcr = 255.0 * image_ycbcr\n    input_y, input_cb, input_cr = _chroma_subsampling(image_ycbcr)\n    input_y, input_cb, input_cr = (_patchify_8x8(input_y), _patchify_8x8(input_cb), _patchify_8x8(input_cr))\n    dct_y = _dct_8x8(input_y)\n    dct_cb_cr = _dct_8x8(torch.cat((input_cb, input_cr), dim=1))\n    y_encoded: Tensor = _quantize(dct_y, jpeg_quality, quantization_table_y)\n    cb_encoded, cr_encoded = _quantize(dct_cb_cr, jpeg_quality, quantization_table_c).chunk(2, dim=1)\n    return (y_encoded, cb_encoded, cr_encoded)",
    "docstring": "Perform JPEG encoding. Args: image_rgb (Tensor): RGB input images of the shape :math:. jpeg_quality (Tensor): Compression strength of the shape :math:. quantization_table_y (Tensor): Quantization table for Y channel. quantization_table_c (Tensor): Quantization table for C channels. Returns: y_encoded (Tensor): Encoded Y component of the shape :math:. cb_encoded (Tensor): Encoded Cb component of the shape :math:. cr_encoded (Tensor): Encoded Cr component of the shape :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\enhance\\jpeg.py",
    "ast_data": "FunctionDef name:_jpeg_encode arg:image_rgb arg:jpeg_quality arg:quantization_table_y arg:quantization_table_c arguments arg arg arg arg Call Assign Assign Call Assign Call Call Call Assign Call Assign Call Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "mail_admins",
    "source_code": "def mail_admins(subject, message, fail_silently=False, connection=None, html_message=None):\n    _send_server_message(setting_name='ADMINS', subject=subject, message=message, html_message=html_message, fail_silently=fail_silently, connection=connection)",
    "docstring": "Send a message to the admins, as defined by the ADMINS setting.",
    "type": "function",
    "file_path": "django\\django\\core\\mail\\__init__.py",
    "ast_data": "FunctionDef name:mail_admins arg:subject arg:message arg:fail_silently arg:connection arg:html_message arguments arg arg arg arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "write",
    "source_code": "def write(self, save_path, options=None):\n    return self._write(save_path, options)",
    "docstring": "Save the checkpointed variables. Args: save_path: The file prefix of the checkpoint file. options: Optional CheckpointOption instance. Returns: The full path of the checkpoint file.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\async_checkpoint_helper.py",
    "ast_data": "FunctionDef name:write arg:self arg:save_path arg:options arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_check_parent_chain",
    "source_code": "def _check_parent_chain(self, instance):\n    opts = instance._meta\n    link_field = opts.get_ancestor_link(self.field.model)\n    if self.field.primary_key and self.field != link_field:\n        return getattr(instance, link_field.attname)\n    return None",
    "docstring": "Check if the field value can be fetched from a parent field already loaded in the instance. This can be done if the to-be fetched field is a primary key field.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query_utils.py",
    "ast_data": "FunctionDef name:_check_parent_chain arg:self arg:instance arguments arg arg Assign Assign Call If BoolOp Compare Return return:yes Call Return return:no"
  },
  {
    "library": "scipy",
    "name": "_digammainv",
    "source_code": "def _digammainv(y):\n    _em = 0.5772156649015329\n\n    def func(x):\n        return sc.digamma(x) - y\n    if y > -0.125:\n        x0 = np.exp(y) + 0.5\n        if y < 10:\n            value = optimize.newton(func, x0, tol=1e-10)\n            return value\n    elif y > -3:\n        x0 = np.exp(y / 2.332) + 0.08661\n    else:\n        x0 = 1.0 / (-y - _em)\n    value, info, ier, mesg = optimize.fsolve(func, x0, xtol=1e-11, full_output=True)\n    if ier != 1:\n        raise RuntimeError(f'_digammainv: fsolve failed, y = {y!r}')\n    return value[0]",
    "docstring": "Inverse of the digamma function (real positive arguments only). This function is used in the method of . The function uses either optimize.fsolve or optimize.newton to solve . There is probably room for improvement, but currently it works over a wide range of y: >>> import numpy as np >>> rng = np.random.default_rng() >>> y = 64*rng.standard_normal(1000000) >>> y.min(), y.max() (-311.43592651416662, 351.77388222276869) >>> x = [_digammainv(t) for t in y] >>> np.abs(sc.digamma(x) - y).max() 1.1368683772161603e-13",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_continuous_distns.py",
    "ast_data": "FunctionDef name:_digammainv arg:y arguments arg Assign FunctionDef name:func arg:x arguments arg Return return:yes Call If Compare Assign Call If Compare Assign Call Return return:yes If Compare Assign Call Assign Assign Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_generate_stats",
    "source_code": "def _generate_stats(self, data_list: list[float]) -> UtilizationStats:\n    if len(data_list) == 0:\n        return UtilizationStats()\n    total = sum(data_list)\n    avg = total / len(data_list)\n    maxi = max(data_list)\n    return UtilizationStats(avg=round(avg, 2), max=round(maxi, 2))",
    "docstring": "Generate stats from the data list.",
    "type": "method",
    "file_path": "pytorch\\tools\\stats\\monitor.py",
    "ast_data": "FunctionDef name:_generate_stats arg:self arg:data_list arguments arg arg If Compare Call Return return:yes Call Assign Call Assign Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "value",
    "source_code": "@property\ndef value(self):\n    return self.as_double()",
    "docstring": "Return a float contained in this field.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\field.py",
    "ast_data": "FunctionDef name:value arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "dimension",
    "source_code": "@property\ndef dimension(self):\n    return self._d",
    "docstring": "The dimension of the curve.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\bezier.py",
    "ast_data": "FunctionDef name:dimension arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_dual_poly",
    "source_code": "def _dual_poly(j, k, t, y):\n    if k == 0:\n        return 1\n    return np.prod([y - t[j + i] for i in range(1, k + 1)])",
    "docstring": "Dual polynomial of the B-spline B_{j,k,t} - polynomial which is associated with B_{j,k,t}: $p_{j,k}(y) = (y - t_{j+1})(y - t_{j+2})...(y - t_{j+k})$",
    "type": "function",
    "file_path": "scipy\\scipy\\interpolate\\_bsplines.py",
    "ast_data": "FunctionDef name:_dual_poly arg:j arg:k arg:t arg:y arguments arg arg arg arg If Compare Return return:yes Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "KeyNetDetector",
    "source_code": "class KeyNetDetector(MultiResolutionDetector):\n\n    def __init__(self, pretrained: bool=False, num_features: int=2048, keynet_conf: KeyNet_conf=keynet_default_config, ori_module: Optional[Module]=None, aff_module: Optional[Module]=None) -> None:\n        model = KeyNet(pretrained, keynet_conf)\n        super().__init__(model, num_features, keynet_conf['Detector_conf'], ori_module, aff_module)",
    "docstring": "Multi-scale feature detector based on KeyNet. This is based on the original code from paper \"Key.Net: Keypoint Detection by Handcrafted and Learned CNN Filters\". See :cite: for more details. .. image:: _static/img/keynet.jpg Args: pretrained: Download and set pretrained weights to the model. num_features: Number of features to detect. keynet_conf: Dict with initialization parameters. Do not pass it, unless you know what you are doing~kornia.feature.PassLAF~kornia.feature.LAFOrienter~kornia.feature.PassLAF~kornia.feature.LAFAffineShapeEstimator` for details.",
    "type": "class",
    "file_path": "kornia\\kornia\\feature\\keynet.py",
    "ast_data": "ClassDef name:KeyNetDetector FunctionDef name:__init__ arg:self arg:pretrained arg:num_features arg:keynet_conf arg:ori_module arg:aff_module arguments arg arg arg arg arg arg Assign Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "FilledArrow",
    "source_code": "class FilledArrow(SimpleArrow):\n    ArrowAxisClass = _FancyAxislineStyle.FilledArrow\n\n    def __init__(self, size=1, facecolor=None):\n        facecolor = mpl._val_or_rc(facecolor, 'axes.edgecolor')\n        self.size = size\n        self._facecolor = facecolor\n        super().__init__(size=size)\n\n    def new_line(self, axis_artist, transform):\n        linepath = Path([(0, 0), (0, 1)])\n        axisline = self.ArrowAxisClass(axis_artist, linepath, transform, line_mutation_scale=self.size, facecolor=self._facecolor)\n        return axisline",
    "docstring": "An arrow with a filled head.",
    "type": "class",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axisline_style.py",
    "ast_data": "ClassDef name:FilledArrow Assign FunctionDef name:__init__ arg:self arg:size arg:facecolor arguments arg arg arg Assign Call Assign Assign Call Call FunctionDef name:new_line arg:self arg:axis_artist arg:transform arguments arg arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "state_dict",
    "source_code": "def state_dict(self):\n    return {key: value for key, value in self.__dict__.items() if key != 'data_sparsifier'}",
    "docstring": "Returns the state of the scheduler as a :class:. It contains an entry for every variable in self.__dict__ which is not the sparsifier. Note: The scheduler class does not track the state of the data_sparsifier. Make sure to store the state of the sparsifier before storing the state of the scheduler",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\data_scheduler\\base_data_scheduler.py",
    "ast_data": "FunctionDef name:state_dict arg:self arguments arg Return return:yes Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "register_tensor_conversion_function",
    "source_code": "@tf_export('register_tensor_conversion_function')\ndef register_tensor_conversion_function(base_type, conversion_func, priority=100):\n    base_types = base_type if isinstance(base_type, tuple) else (base_type,)\n    if any((not isinstance(x, type) for x in base_types)):\n        raise TypeError(f'Argument `base_type` must be a type or a tuple of types. Obtained: {base_type}')\n    if any((issubclass(x, _CONSTANT_OP_CONVERTIBLES) for x in base_types)):\n        raise TypeError('Cannot register conversions for Python numeric types and NumPy scalars and arrays.')\n    del base_types\n    register_tensor_conversion_function_internal(base_type, conversion_func, priority)",
    "docstring": "Registers a function for converting objects of to . The conversion function must have the following signature: It must return a with the given if specified. If the conversion function creates a new , it should use the given if specified. All exceptions will be propagated to the caller. The conversion function may return for some inputs. In this case, the conversion process will continue to try subsequent conversion functions. If is true, the function must return a reference, such as a . NOTE: The conversion functions will execute in order of priority, followed by order of registration. To ensure that a conversion function runs before another conversion function , ensure that is registered with a smaller priority than . Args: base_type: The base type or tuple of base types for all objects that accepts. conversion_func: A function that converts instances of to . priority: Optional integer that indicates the priority for applying this conversion function. Conversion functions with smaller priority values run earlier than conversion functions with larger priority values. Defaults to 100. Raises: TypeError: If the arguments do not have the appropriate type.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_conversion_registry.py",
    "ast_data": "FunctionDef name:register_tensor_conversion_function arg:base_type arg:conversion_func arg:priority arguments arg arg arg Assign Call If Call Call Raise Call If Call Call Raise Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_corners",
    "source_code": "def get_corners(self):\n    return self.get_patch_transform().transform([(0, 0), (1, 0), (1, 1), (0, 1)])",
    "docstring": "Return the corners of the rectangle, moving anti-clockwise from (x0, y0).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_corners arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_make_execution_function_with_cloning",
    "source_code": "def _make_execution_function_with_cloning(model, mode):\n    distributed_model = get_distributed_model(model, mode)\n    if distributed_model and hasattr(distributed_model, '_distribution_function') and (not (hasattr(distributed_model, '_recompile_exec_function') and distributed_model._recompile_exec_function)):\n        return distributed_model._distributed_function\n    if not distributed_model:\n        _make_replicated_models_with_cloning(model, mode)\n        distributed_model = get_distributed_model(model, mode)\n    assert distributed_model\n    if context.executing_eagerly():\n        distributed_function = _make_eager_execution_function(model, mode)\n    else:\n        distributed_function = _make_graph_execution_function(model, mode)\n    distributed_model._distributed_function = distributed_function\n    distributed_model._recompile_exec_function = False\n    return distributed_function",
    "docstring": "Clones or re-uses models to run one step of distributed model execution.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_training_utils_v1.py",
    "ast_data": "FunctionDef name:_make_execution_function_with_cloning arg:model arg:mode arguments arg arg Assign Call If BoolOp Call BoolOp Call Return return:yes If Call Assign Call If Call Assign Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "register_full_backward_pre_hook",
    "source_code": "def register_full_backward_pre_hook(self, hook: Callable[['Module', _grad_t], Union[None, _grad_t]], prepend: bool=False) -> RemovableHandle:\n    handle = RemovableHandle(self._backward_pre_hooks)\n    self._backward_pre_hooks[handle.id] = hook\n    if prepend:\n        self._backward_pre_hooks.move_to_end(handle.id, last=False)\n    return handle",
    "docstring": "Register a backward pre-hook on the module. The hook will be called every time the gradients for the module are computed. The hook should have the following signature:: hook(module, grad_output) -> tuple[Tensor] or None The :attr: is a tuple. The hook should not modify its arguments, but it can optionally return a new gradient with respect to the output that will be used in place of :attr: in subsequent computations. Entries in :attr: will be `torch.nn.Moduletorch.nn.Moduleregister_module_full_backward_pre_hooktorch.utils.hooks.RemovableHandle`",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:register_full_backward_pre_hook arg:self arg:hook arg:prepend arguments arg arg arg Assign Call Assign If Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "request_to_curl",
    "source_code": "def request_to_curl(request: Request) -> str:\n    method = request.method\n    data = f\"--data-raw '{request.body.decode('utf-8')}'\" if request.body else ''\n    headers = ' '.join((f\"-H '{k.decode()}: {v[0].decode()}'\" for k, v in request.headers.items()))\n    url = request.url\n    cookies = ''\n    if request.cookies:\n        if isinstance(request.cookies, dict):\n            cookie = '; '.join((f'{k}={v}' for k, v in request.cookies.items()))\n            cookies = f\"--cookie '{cookie}'\"\n        elif isinstance(request.cookies, list):\n            cookie = '; '.join((f'{next(iter(c.keys()))}={next(iter(c.values()))}' for c in request.cookies))\n            cookies = f\"--cookie '{cookie}'\"\n    curl_cmd = f'curl -X {method} {url} {data} {headers} {cookies}'.strip()\n    return ' '.join(curl_cmd.split())",
    "docstring": "Converts a :class: object to a curl command. :param :class:: Request object to be converted :return: string containing the curl command",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\request.py",
    "ast_data": "FunctionDef name:request_to_curl arg:request arguments arg Assign Assign Call Assign Call Call Call Call Assign Assign If If Call Assign Call Call Assign If Call Assign Call Call Call Call Call Call Call Assign Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_init_writer",
    "source_code": "def _init_writer(self, model):\n    if context.executing_eagerly():\n        self.writer = summary_ops_v2.create_file_writer_v2(self.log_dir)\n        if not model.run_eagerly and self.write_graph:\n            with self.writer.as_default():\n                summary_ops_v2.graph(K.get_graph())\n    elif self.write_graph:\n        self.writer = tf_summary.FileWriter(self.log_dir, K.get_graph())\n    else:\n        self.writer = tf_summary.FileWriter(self.log_dir)",
    "docstring": "Sets file writer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks_v1.py",
    "ast_data": "FunctionDef name:_init_writer arg:self arg:model arguments arg arg If Call Assign Call If BoolOp With Call Call Call If Assign Call Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "sym_and",
    "source_code": "def sym_and(x: BoolLikeType, *others: BoolLikeType) -> BoolLikeType:\n    assert isinstance(x, (bool, SymBool))\n    if len(others) == 0:\n        return x\n    for y in others:\n        assert isinstance(y, (bool, SymBool))\n        x = operator.and_(x, y)\n    return x",
    "docstring": "and, but for symbolic expressions, without bool casting.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:sym_and arg:x arguments arg arg Call If Compare Call Return return:yes For Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "static",
    "source_code": "def static(path):\n    return StaticNode.handle_simple(path)",
    "docstring": "Given a relative path to a static asset, return the absolute path to the asset.",
    "type": "function",
    "file_path": "django\\django\\templatetags\\static.py",
    "ast_data": "FunctionDef name:static arg:path arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "addGouraudTriangles",
    "source_code": "def addGouraudTriangles(self, points, colors):\n    name = Name('GT%d' % len(self.gouraudTriangles))\n    ob = self.reserveObject(f'Gouraud triangle {name}')\n    self.gouraudTriangles.append((name, ob, points, colors))\n    return (name, ob)",
    "docstring": "Add a Gouraud triangle shading. Parameters ---------- points : np.ndarray Triangle vertices, shape (n, 3, 2) where n = number of triangles, 3 = vertices, 2 = x, y. colors : np.ndarray Vertex colors, shape (n, 3, 1) or (n, 3, 4) as with points, but last dimension is either (gray,) or (r, g, b, alpha). Returns ------- Name, Reference",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py",
    "ast_data": "FunctionDef name:addGouraudTriangles arg:self arg:points arg:colors arguments arg arg arg Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "LearnedHeuristic",
    "source_code": "class LearnedHeuristic:\n\n    def __init__(self) -> None:\n        pass\n\n    def check_precondition(self, metadata: AHMetadata, context: AHContext) -> bool:\n        return True\n\n    def get_decision(self, context: AHContext, choices: list[Choice]) -> Optional[Choice]:\n        return None\n\n    def get_confidence_threshold(self) -> float:\n        return 1.0\n\n    def get_name(self) -> str:\n        return ''\n\n    def get_decisions_ranked(self, context: AHContext) -> Optional[list[str]]:\n        return None",
    "docstring": "LearnedHeuristic is a base class for all learned heuristics.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\autoheuristic\\learnedheuristic_interface.py",
    "ast_data": "ClassDef name:LearnedHeuristic FunctionDef name:__init__ arg:self arguments arg FunctionDef name:check_precondition arg:self arg:metadata arg:context arguments arg arg arg Return return:yes FunctionDef name:get_decision arg:self arg:context arg:choices arguments arg arg arg Return return:no FunctionDef name:get_confidence_threshold arg:self arguments arg Return return:yes FunctionDef name:get_name arg:self arguments arg Return return:yes FunctionDef name:get_decisions_ranked arg:self arg:context arguments arg arg Return return:no"
  },
  {
    "library": "scipy",
    "name": "_get_missing_value",
    "source_code": "def _get_missing_value(self):\n    if '_FillValue' in self._attributes:\n        missing_value = self._attributes['_FillValue']\n    elif 'missing_value' in self._attributes:\n        missing_value = self._attributes['missing_value']\n    else:\n        missing_value = None\n    return missing_value",
    "docstring": "Returns the value denoting \"no data\" for this variable. If this variable does not have a missing/fill value, returns None. If both _FillValue and missing_value are given, give precedence to _FillValue. The netCDF standard gives special meaning to _FillValue; missing_value is just used for compatibility with old datasets.",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\_netcdf.py",
    "ast_data": "FunctionDef name:_get_missing_value arg:self arguments arg If Compare Assign If Compare Assign Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_logged_cached",
    "source_code": "def _logged_cached(fmt, func=None):\n    if func is None:\n        return functools.partial(_logged_cached, fmt)\n    called = False\n    ret = None\n\n    @functools.wraps(func)\n    def wrapper(**kwargs):\n        nonlocal called, ret\n        if not called:\n            ret = func(**kwargs)\n            called = True\n            _log.debug(fmt, ret)\n        return ret\n    return wrapper",
    "docstring": "Decorator that logs a function's return value, and memoizes that value. After :: @_logged_cached(fmt) def func(): ... the first call to *func* will log its return value at the DEBUG level using %-format string *fmt*, and memoize it; later calls to *func* will directly return that value.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\__init__.py",
    "ast_data": "FunctionDef name:_logged_cached arg:fmt arg:func arguments arg arg If Compare Return return:yes Call Assign Assign FunctionDef name:wrapper arguments arg If Assign Call Assign Call Return return:yes Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_get_updates",
    "source_code": "def _get_updates(self, grads):\n    updates = [self.momentum * velocity - self.learning_rate * grad for velocity, grad in zip(self.velocities, grads)]\n    self.velocities = updates\n    if self.nesterov:\n        updates = [self.momentum * velocity - self.learning_rate * grad for velocity, grad in zip(self.velocities, grads)]\n    return updates",
    "docstring": "Get the values used to update params with given gradients Parameters ---------- grads : list, length = len(coefs_) + len(intercepts_) Containing gradients with respect to coefs_ and intercepts_ in MLP model. So length should be aligned with params Returns ------- updates : list, length = len(grads) The values to add to params",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neural_network\\_stochastic_optimizers.py",
    "ast_data": "FunctionDef name:_get_updates arg:self arg:grads arguments arg arg Assign Call Assign If Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "on_predict_end",
    "source_code": "@doc_controls.for_subclass_implementers\ndef on_predict_end(self, logs=None):\n    pass",
    "docstring": "Called at the end of prediction. Subclasses should override for any actions to run. Args: logs: Dict. Currently no data is passed to this argument for this method but that may change in the future.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:on_predict_end arg:self arg:logs arguments arg arg"
  },
  {
    "library": "tensorflow",
    "name": "_init_values_from_proto",
    "source_code": "def _init_values_from_proto(self, values_def, import_scope=None):\n    assert isinstance(values_def, control_flow_pb2.ValuesDef)\n    self._values = set((ops.prepend_name_scope(value, import_scope) for value in values_def.values))\n    g = ops.get_default_graph()\n    self._external_values = {}\n    for k, v in values_def.external_values.items():\n        k = ops.prepend_name_scope(k, import_scope)\n        self._external_values[k] = g.as_graph_element(ops.prepend_name_scope(v, import_scope))\n    op_names = set([op.split(':')[0] for op in self._values - set(self._external_values.keys())])\n    for op in op_names:\n        g.as_graph_element(op)._set_control_flow_context(self)",
    "docstring": "Initializes values and external_values from protocol buffer. Args: values_def: protocol buffer. import_scope: Optional . Name scope to add.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:_init_values_from_proto arg:self arg:values_def arg:import_scope arguments arg arg arg Call Assign Call Call Assign Call Assign For Call Assign Call Assign Call Call Assign Call Call Call Call For Call Call"
  },
  {
    "library": "pytorch",
    "name": "Conv2dBiasFollowedByBatchNorm2dPattern",
    "source_code": "class Conv2dBiasFollowedByBatchNorm2dPattern(Pattern):\n\n    def __init__(self, prof: profile, should_benchmark: bool=False):\n        super().__init__(prof, should_benchmark)\n        self.name = 'Enabling Bias in Conv2d Followed By BatchNorm Pattern'\n        self.description = \"Detected bias enabled in Conv2d that is followed by BatchNorm2d. Please set 'bias=False' in Conv2d.\"\n        self.url = 'https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html#disable-bias-for-convolutions-directly-followed-by-a-batch-norm'\n\n    @property\n    def skip(self):\n        return self.prof.record_shapes is False or super().skip\n\n    def match(self, event: _ProfilerEvent):\n        if event.name != 'aten::conv2d':\n            return False\n        if len(input_dtypes(event)) < 3 or input_dtypes(event)[2] is None:\n            return False\n        event = self.go_up_until(event, lambda e: e.name.startswith('nn.Module: Conv2d'))\n        if not event:\n            return False\n        event = self.next_of(event)\n        if not event:\n            return False\n        return event.name.startswith('nn.Module: BatchNorm2d')",
    "docstring": "This pattern identifies if we are enabling bias in Conv2d which is followed by BatchNorm2d. Bias doesn't do anything when followed by batchnorm. Pattern: nn.Module: Conv2d | nn.Module: BatchNorm2d ... aten::conv2d AND dtype of third argument is not null The third argument is the bias Algorithm: String match",
    "type": "class",
    "file_path": "pytorch\\torch\\profiler\\_pattern_matcher.py",
    "ast_data": "ClassDef name:Conv2dBiasFollowedByBatchNorm2dPattern FunctionDef name:__init__ arg:self arg:prof arg:should_benchmark arguments arg arg arg Call Call Assign Assign Assign FunctionDef name:skip arg:self arguments arg Return return:yes BoolOp Compare Call FunctionDef name:match arg:self arg:event arguments arg arg If Compare Return return:yes If BoolOp Compare Call Call Compare Call Return return:yes Assign Call arguments arg Call If Return return:yes Assign Call If Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_add_default_callbacks",
    "source_code": "def _add_default_callbacks(self, add_history, add_progbar):\n    self._progbar = None\n    self._history = None\n    for cb in self.callbacks:\n        if isinstance(cb, ProgbarLogger):\n            self._progbar = cb\n        elif isinstance(cb, History):\n            self._history = cb\n    if self._progbar is None and add_progbar:\n        self._progbar = ProgbarLogger(count_mode='steps')\n        self.callbacks.insert(0, self._progbar)\n    if self._history is None and add_history:\n        self._history = History()\n        self.callbacks.append(self._history)",
    "docstring": "Adds s that are always present.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:_add_default_callbacks arg:self arg:add_history arg:add_progbar arguments arg arg arg Assign Assign For If Call Assign If Call Assign If BoolOp Compare Assign Call Call If BoolOp Compare Assign Call Call"
  },
  {
    "library": "numpy",
    "name": "I",
    "source_code": "@property\ndef I(self):\n    M, N = self.shape\n    if M == N:\n        from numpy.linalg import inv as func\n    else:\n        from numpy.linalg import pinv as func\n    return asmatrix(func(self))",
    "docstring": "Returns the (multiplicative) inverse of invertible . Parameters ---------- None Returns ------- ret : matrix object If is non-singular, is such that `self` is singular. See Also -------- linalg.inv Examples -------- >>> m = np.matrix('[1, 2; 3, 4]'); m matrix([[1, 2], [3, 4]]) >>> m.getI() matrix([[-2. , 1. ], [ 1.5, -0.5]]) >>> m.getI() * m matrix([[ 1., 0.], # may vary [ 0., 1.]])",
    "type": "method",
    "file_path": "numpy\\numpy\\matrixlib\\defmatrix.py",
    "ast_data": "FunctionDef name:I arg:self arguments arg Assign If Compare Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_collect_model_statistics",
    "source_code": "def _collect_model_statistics(self) -> Dict[str, float]:\n    model_statistics = collections.defaultdict(list)\n    initialize = True\n    for tensor_data in self._data_gen():\n        self._set_input_tensors(self._quant_interpreter, tensor_data, initialize)\n        self._quant_interpreter.invoke()\n        quant_tensor_data = self._get_output_tensors(self._quant_interpreter)\n        float_tensor_data = []\n        if self._float_interpreter:\n            self._set_input_tensors(self._float_interpreter, tensor_data, initialize)\n            self._float_interpreter.invoke()\n            float_tensor_data = self._get_output_tensors(self._float_interpreter)\n        initialize = False\n        for metric_name, metric_fn in self._debug_options.model_debug_metrics.items():\n            model_statistics[metric_name].append(metric_fn(float_tensor_data, quant_tensor_data))\n    return {metric_name: np.mean(metric) for metric_name, metric in model_statistics.items()}",
    "docstring": "Collects model output metrics. For all data from the given RepresentativeDataset, collect all model output results from float model & quantized debug model, and calculate metrics by using model output functions. As a result, self.model_results is filled, where self.model_results[model_output_function_name] = (a scalar). Returns: aggregated per-model output discrepancy metrics. {metric_name: aggregated_metric}",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\tools\\optimize\\debugging\\python\\debugger.py",
    "ast_data": "FunctionDef name:_collect_model_statistics arg:self arguments arg Assign Call Assign For Call Call Call Assign Call Assign If Call Call Assign Call Assign For Call Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "graph_def_versions",
    "source_code": "@property\ndef graph_def_versions(self) -> versions_pb2.VersionDef:\n    return versions_pb2.VersionDef.FromString(self._version_def)",
    "docstring": "The GraphDef version information of this graph. For details on the meaning of each version, see []( Returns: A .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:graph_def_versions arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "gen_functions_declarations",
    "source_code": "def gen_functions_declarations(*, native_functions: Sequence[NativeFunction], kernel_index: ETKernelIndex, selector: SelectiveBuilder, use_aten_lib: bool, custom_ops_native_functions: Sequence[NativeFunction] | None=None) -> str:\n    backend_index = kernel_index._to_backend_index()\n    ns_grouped_functions = defaultdict(list)\n    for native_function in native_functions:\n        ns_grouped_functions[native_function.namespace].append(native_function)\n    functions_declarations = ''\n    newline = '\\n'\n    for namespace in ns_grouped_functions:\n        ns_helper = NamespaceHelper(namespace_str=namespace, entity_name='', max_level=3)\n        declarations = list(mapMaybe(ComputeFunction(static_dispatch_backend_indices=[backend_index], selector=selector, use_aten_lib=use_aten_lib, is_custom_op=lambda f: custom_ops_native_functions is not None and f in custom_ops_native_functions), ns_grouped_functions[namespace]))\n        functions_declarations += f'\\n{ns_helper.prologue}\\n{newline.join(declarations)}\\n{ns_helper.epilogue}\\n        '\n    return functions_declarations",
    "docstring": "Generates namespace separated C++ function API inline declaration/definitions. Native functions are grouped by namespaces and the generated code is wrapped inside namespace blocks. E.g., for in yaml file we will generate a C++ API as a symbol in . This way we avoid symbol conflict when the other is available.",
    "type": "function",
    "file_path": "pytorch\\torchgen\\gen_executorch.py",
    "ast_data": "FunctionDef name:gen_functions_declarations arguments arg arg arg arg arg Assign Call Assign Call For Call Assign Assign For Assign Call Assign Call Call Call arguments arg BoolOp Compare Compare Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "find_common_type",
    "source_code": "def find_common_type(types):\n    if not types:\n        raise ValueError('no types given')\n    first = types[0]\n    if lib.dtypes_all_equal(list(types)):\n        return first\n    types = list(dict.fromkeys(types).keys())\n    if any((isinstance(t, ExtensionDtype) for t in types)):\n        for t in types:\n            if isinstance(t, ExtensionDtype):\n                res = t._get_common_dtype(types)\n                if res is not None:\n                    return res\n        return np.dtype('object')\n    if all((lib.is_np_dtype(t, 'M') for t in types)):\n        return np.dtype(max(types))\n    if all((lib.is_np_dtype(t, 'm') for t in types)):\n        return np.dtype(max(types))\n    has_bools = any((t.kind == 'b' for t in types))\n    if has_bools:\n        for t in types:\n            if t.kind in 'iufc':\n                return np.dtype('object')\n    return np_find_common_type(*types)",
    "docstring": "Find a common data type among the given dtypes. Parameters ---------- types : list of dtypes Returns ------- pandas extension or numpy dtype See Also -------- numpy.find_common_type",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\cast.py",
    "ast_data": "FunctionDef name:find_common_type arg:types arguments arg If Raise Call Assign If Call Call Return return:yes Assign Call Call Call If Call Call For If Call Assign Call If Compare Return return:yes Return return:yes Call If Call Call Return return:yes Call Call If Call Call Return return:yes Call Call Assign Call Compare If For If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_set_trainable_state",
    "source_code": "def _set_trainable_state(self, trainable_state):\n    if self in trainable_state:\n        self.trainable = trainable_state[self]\n    layers = self._flatten_layers(include_self=False, recursive=False)\n    for l in layers:\n        if l in trainable_state:\n            l._set_trainable_state(trainable_state)",
    "docstring": "Set state for each sublayer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py",
    "ast_data": "FunctionDef name:_set_trainable_state arg:self arg:trainable_state arguments arg arg If Compare Assign Assign Call For If Compare Call"
  },
  {
    "library": "pytorch",
    "name": "demangle",
    "source_code": "def demangle(name: str) -> str:\n    if is_mangled(name):\n        _first, sep, last = name.partition('.')\n        return last if len(sep) != 0 else ''\n    return name",
    "docstring": "Note: Unlike PackageMangler.demangle, this version works on any mangled name, irrespective of which PackageMangler created it.",
    "type": "function",
    "file_path": "pytorch\\torch\\package\\_mangling.py",
    "ast_data": "FunctionDef name:demangle arg:name arguments arg If Call Assign Call Return return:yes Compare Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "values_cols",
    "source_code": "def values_cols(self) -> list[str]:\n    return [i.cname for i in self.values_axes]",
    "docstring": "return a list of my values cols",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:values_cols arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ambiguity_warn",
    "source_code": "def ambiguity_warn(dispatcher, ambiguities):\n    warn(warning_text(dispatcher.name, ambiguities), AmbiguityWarning)",
    "docstring": "Raise warning when ambiguity is detected Parameters ---------- dispatcher : Dispatcher The dispatcher on which the ambiguity was detected ambiguities : set Set of type signature pairs that are ambiguous within this dispatcher See Also: Dispatcher.add warning_text",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\unification\\multipledispatch\\dispatcher.py",
    "ast_data": "FunctionDef name:ambiguity_warn arg:dispatcher arg:ambiguities arguments arg arg Call Call"
  },
  {
    "library": "pandas",
    "name": "_set_names",
    "source_code": "def _set_names(self, values, *, level=None) -> None:\n    if not is_list_like(values):\n        raise ValueError('Names must be a list-like')\n    if len(values) != 1:\n        raise ValueError(f'Length of new names must be 1, got {len(values)}')\n    validate_all_hashable(*values, error_name=f'{type(self).__name__}.name')\n    self._name = values[0]",
    "docstring": "Set new names on index. Each name has to be a hashable type. Parameters ---------- values : str or sequence name(s) to set level : int, level name, or sequence of int/level names (default None) If the index is a MultiIndex (hierarchical), level(s) to set (None for all levels). Otherwise level must be None Raises ------ TypeError if each name is not hashable.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_set_names arg:self arg:values arguments arg arg arg If Call Raise Call If Compare Call Raise Call Call Call Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "blocking_input_loop",
    "source_code": "def blocking_input_loop(figure, event_names, timeout, handler):\n    if figure.canvas.manager:\n        figure.show()\n    cids = [figure.canvas.mpl_connect(name, handler) for name in event_names]\n    try:\n        figure.canvas.start_event_loop(timeout)\n    finally:\n        for cid in cids:\n            figure.canvas.mpl_disconnect(cid)",
    "docstring": "Run *figure*'s event loop while listening to interactive events. The events listed in *event_names* are passed to *handler*. This function is used to implement , , and . Parameters ---------- figure : event_names : list of str The names of the events passed to *handler*. timeout : float If positive, the event loop is stopped after *timeout* seconds. handler : Callable[[Event], Any] Function called for each event; it can force an early exit of the event loop by calling ``.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\_blocking_input.py",
    "ast_data": "FunctionDef name:blocking_input_loop arg:figure arg:event_names arg:timeout arg:handler arguments arg arg arg arg If Call Assign Call Try Call For Call"
  },
  {
    "library": "tensorflow",
    "name": "flatten_up_to",
    "source_code": "def flatten_up_to(shallow_tree, input_tree):\n    return nest_util.flatten_up_to(nest_util.Modality.DATA, shallow_tree, input_tree)",
    "docstring": "Flattens up to . Any further depth in structure in is retained as elements in the partially flatten output. If and are not sequences, this returns a single-element list: . Use Case: Sometimes we may wish to partially flatten a nested sequence, retaining some of the nested structure. We achieve this by specifying a shallow structure, , we wish to flatten up to. The input, , can be thought of as having the same structure as , but with leaf nodes that are themselves tree structures. Examples: Non-Sequence Edge Cases: Args: shallow_tree: a possibly pruned structure of input_tree. input_tree: an arbitrarily nested structure or a scalar object. Note, numpy arrays are considered scalars. Returns: A Python list, the partially flattened version of according to the structure of . Raises: TypeError: If is a sequence but is not. TypeError: If the sequence types of are different from . ValueError: If the sequence lengths of are different from .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\util\\nest.py",
    "ast_data": "FunctionDef name:flatten_up_to arg:shallow_tree arg:input_tree arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "reduce_acc_nodes_non_tensor_input",
    "source_code": "def reduce_acc_nodes_non_tensor_input(self):\n    non_tensor_cpu_nodes: NodeList = []\n    for node in self.module.graph.nodes:\n        if node.op not in CALLABLE_NODE_OPS:\n            continue\n        if node in self.acc_nodes:\n            continue\n        if is_node_output_tensor(node):\n            continue\n        non_tensor_cpu_nodes.append(node)\n    self.reduce_acc_nodes_non_tensor_input_helper(non_tensor_cpu_nodes)",
    "docstring": "Excludes nodes from ACC supported set that have direct upstream CPU nodes that produce non-tensor outputs.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\passes\\splitter_base.py",
    "ast_data": "FunctionDef name:reduce_acc_nodes_non_tensor_input arg:self arguments arg For If Compare If Compare If Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "isdescriptor",
    "source_code": "def isdescriptor(x: Any) -> TypeIs[_SupportsGet | _SupportsSet | _SupportsDelete]:\n    return any((callable(safe_getattr(x, item, None)) for item in ('__get__', '__set__', '__delete__')))",
    "docstring": "Check if the object is a :external+python:term:.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\inspect.py",
    "ast_data": "FunctionDef name:isdescriptor arg:x arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "most_specific_common_supertype",
    "source_code": "def most_specific_common_supertype(self, types: Sequence[trace.TraceType]) -> Optional['DType']:\n    return self if all((self == other for other in types)) else None",
    "docstring": "See tf.types.experimental.TraceType base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\dtypes.py",
    "ast_data": "FunctionDef name:most_specific_common_supertype arg:self arg:types arguments arg arg Return return:yes Call Compare"
  },
  {
    "library": "scipy",
    "name": "lambertw",
    "source_code": "def lambertw(z, k=0, tol=1e-08):\n    k = np.asarray(k, dtype=np.dtype('long'))\n    return _lambertw(z, k, tol)",
    "docstring": "lambertw(z, k=0, tol=1e-8) Lambert W function. The Lambert W function is defined as the inverse function of `kwzlambertwkk = 0x = a + b e^{c x}xcz e^z = -b c e^{a c}z = c (a - x)zz = W(-b c e^{a c})x = a - W(-b c e^{a c})/cx = a + b e^{c x}z^{z^{z^{\\ldots}}}`: >>> def tower(z, n): ... if n == 0: ... return z ... return z ** tower(z, n-1) ... >>> tower(0.5, 100) 0.641185744504986 >>> -lambertw(-np.log(0.5)) / np.log(0.5) (0.64118574450498589+0j)",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_lambertw.py",
    "ast_data": "FunctionDef name:lambertw arg:z arg:k arg:tol arguments arg arg arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_has_alpha_channel",
    "source_code": "def _has_alpha_channel(c):\n    if isinstance(c, str):\n        if c[0] == '#' and (len(c) == 5 or len(c) == 9):\n            return True\n    else:\n        if len(c) == 4:\n            return True\n        if len(c) == 2 and (c[1] is not None or _has_alpha_channel(c[0])):\n            return True\n    return False",
    "docstring": "Return whether *c* is a color with an alpha channel. If *c* is not a valid color specifier, then the result is undefined.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:_has_alpha_channel arg:c arguments arg If Call If BoolOp Compare BoolOp Compare Call Compare Call Return return:yes If Compare Call Return return:yes If BoolOp Compare Call BoolOp Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_load_edges",
    "source_code": "def _load_edges(self):\n    for node_id, object_proto in self._iter_all_nodes():\n        self._add_object_graph_edges(object_proto, node_id)\n    if self._filtered_nodes is not None and 0 not in self._filtered_nodes:\n        root = self.get(0)\n        for node_path in self._node_filters:\n            loaded_node = self._nodes[self._node_path_to_id[node_path]]\n            path = node_path.split('.')\n            current_node = root\n            for name in path[1:-1]:\n                if not hasattr(current_node, name):\n                    setattr(current_node, name, self._recreate_base_user_object()[0])\n                current_node = getattr(current_node, name)\n            if not hasattr(current_node, path[-1]):\n                setattr(current_node, path[-1], loaded_node)",
    "docstring": "Adds edges from objects to other objects and functions.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\load.py",
    "ast_data": "FunctionDef name:_load_edges arg:self arguments arg For Call Call If BoolOp Compare Compare Assign Call For Assign Assign Call Assign For If Call Call Call Assign Call If Call Call"
  },
  {
    "library": "scipy",
    "name": "_concordant_pairs",
    "source_code": "def _concordant_pairs(A):\n    m, n = A.shape\n    count = 0\n    for i in range(m):\n        for j in range(n):\n            count += A[i, j] * _Aij(A, i, j)\n    return count",
    "docstring": "Twice the number of concordant pairs, excluding ties.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_stats_pythran.py",
    "ast_data": "FunctionDef name:_concordant_pairs arg:A arguments arg Assign Assign For Call For Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "set_pattern",
    "source_code": "def set_pattern(self, pattern: Pattern) -> BackendPatternConfig:\n    if self._pattern_complex_format is not None:\n        raise ValueError(\"Only one of 'pattern' or 'pattern_complex_format' can be set\")\n    self.pattern = pattern\n    return self",
    "docstring": "Set the pattern to configure. The pattern can be a float module, functional operator, pytorch operator, or a tuple combination of the above. Tuple patterns are treated as sequential patterns, and currently only tuples of 2 or 3 elements are supported.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\backend_config.py",
    "ast_data": "FunctionDef name:set_pattern arg:self arg:pattern arguments arg arg If Compare Raise Call Assign Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "_setup",
    "source_code": "def _setup(self):\n    conf = self._merged_args()\n    p = conf.pop('priority', None)\n    if p is None:\n        p = getattr(self.callable, 'priority', self._priority)\n    cherrypy.serving.request.hooks.attach(self._point, self._wrapper, priority=p, **conf)",
    "docstring": "Wire this tool into ``. The standard CherryPy request object will automatically call this method when the tool is \"turned on\" in config.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cptools.py",
    "ast_data": "FunctionDef name:_setup arg:self arguments arg Assign Call Assign Call If Compare Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_skip_tests",
    "source_code": "def get_skip_tests(suite, device, is_training: bool):\n    skip_tests = set()\n    original_dir = abspath(os.getcwd())\n    module = importlib.import_module(suite)\n    os.chdir(original_dir)\n    if suite == 'torchbench':\n        skip_tests.update(module.TorchBenchmarkRunner().skip_models)\n        if is_training:\n            skip_tests.update(module.TorchBenchmarkRunner().skip_not_suitable_for_training_models)\n        if device == 'cpu':\n            skip_tests.update(module.TorchBenchmarkRunner().skip_models_for_cpu)\n        elif device == 'cuda':\n            skip_tests.update(module.TorchBenchmarkRunner().skip_models_for_cuda)\n    skip_tests = (f'-x {name}' for name in skip_tests)\n    skip_str = ' '.join(skip_tests)\n    return skip_str",
    "docstring": "Generate -x seperated string to skip the unusual setup training tests",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\dynamo\\runner.py",
    "ast_data": "FunctionDef name:get_skip_tests arg:suite arg:device arg:is_training arguments arg arg arg Assign Call Assign Call Call Assign Call Call If Compare Call Call If Call Call If Compare Call Call If Compare Call Call Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "descendants",
    "source_code": "def descendants(self):\n    return self._descendants_with_paths()[0]",
    "docstring": "Returns a list of all nodes from self.root using a breadth first traversal.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\trackable_view.py",
    "ast_data": "FunctionDef name:descendants arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_evaluate",
    "source_code": "@abc.abstractmethod\ndef _evaluate(self):\n    pass",
    "docstring": "Return an evaluated expression. Parameters ---------- env : Scope The local and global environment in which to evaluate an expression. Notes ----- Must be implemented by subclasses.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\computation\\engines.py",
    "ast_data": "FunctionDef name:_evaluate arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "shard",
    "source_code": "@abstractmethod\ndef shard(self, tensor: torch.Tensor, src_rank: int=0, process_group=None) -> 'ShardedTensor':\n    pass",
    "docstring": "Given a global tensor on src_rank, shard this tensor across ranks within the process group, return a ShardedTensor. Args: tensor (:class:): Tensor needs to be sharded. Keyword args: src_rank (int, optional): The source rank which is used as the ground truth of the data for the parameter that would be sharded and scattered across the rest of the ranks. Default: 0. process_group (ProcessGroup, optional): The process group to work on. If None, the default process group will be used. Returns: A :class: sharded from the given tensor.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharding_spec\\api.py",
    "ast_data": "FunctionDef name:shard arg:self arg:tensor arg:src_rank arg:process_group arguments arg arg arg arg"
  },
  {
    "library": "numpy",
    "name": "polydiv",
    "source_code": "def polydiv(c1, c2):\n    [c1, c2] = pu.as_series([c1, c2])\n    if c2[-1] == 0:\n        raise ZeroDivisionError\n    lc1 = len(c1)\n    lc2 = len(c2)\n    if lc1 < lc2:\n        return (c1[:1] * 0, c1)\n    elif lc2 == 1:\n        return (c1 / c2[-1], c1[:1] * 0)\n    else:\n        dlen = lc1 - lc2\n        scl = c2[-1]\n        c2 = c2[:-1] / scl\n        i = dlen\n        j = lc1 - 1\n        while i >= 0:\n            c1[i:j] -= c2 * c1[j]\n            i -= 1\n            j -= 1\n        return (c1[j + 1:] / scl, pu.trimseq(c1[:j + 1]))",
    "docstring": "Divide one polynomial by another. Returns the quotient-with-remainder of two polynomials / . The arguments are sequences of coefficients, from lowest order term to highest, e.g., [1,2,3] represents ``. Parameters ---------- c1, c2 : array_like 1-D arrays of polynomial coefficients ordered from low to high. Returns ------- [quo, rem] : ndarrays Of coefficient series representing the quotient and remainder. See Also -------- polyadd, polysub, polymulx, polymul, polypow Examples -------- >>> from numpy.polynomial import polynomial as P >>> c1 = (1, 2, 3) >>> c2 = (3, 2, 1) >>> P.polydiv(c1, c2) (array([3.]), array([-8., -4.])) >>> P.polydiv(c2, c1) (array([ 0.33333333]), array([ 2.66666667, 1.33333333])) # may vary",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\polynomial.py",
    "ast_data": "FunctionDef name:polydiv arg:c1 arg:c2 arguments arg arg Assign Call If Compare Raise Assign Call Assign Call If Compare Return return:yes If Compare Return return:yes Assign Assign Assign Assign Assign While Compare Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_isna",
    "source_code": "def _isna(obj):\n    if is_scalar(obj):\n        return libmissing.checknull(obj)\n    elif isinstance(obj, ABCMultiIndex):\n        raise NotImplementedError('isna is not defined for MultiIndex')\n    elif isinstance(obj, type):\n        return False\n    elif isinstance(obj, (np.ndarray, ABCExtensionArray)):\n        return _isna_array(obj)\n    elif isinstance(obj, ABCIndex):\n        if not obj._can_hold_na:\n            return obj.isna()\n        return _isna_array(obj._values)\n    elif isinstance(obj, ABCSeries):\n        result = _isna_array(obj._values)\n        result = obj._constructor(result, index=obj.index, name=obj.name, copy=False)\n        return result\n    elif isinstance(obj, ABCDataFrame):\n        return obj.isna()\n    elif isinstance(obj, list):\n        return _isna_array(np.asarray(obj, dtype=object))\n    elif hasattr(obj, '__array__'):\n        return _isna_array(np.asarray(obj))\n    else:\n        return False",
    "docstring": "Detect missing values, treating None, NaN or NA as null. Parameters ---------- obj: ndarray or object value Input array or scalar value. Returns ------- boolean ndarray or boolean",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\missing.py",
    "ast_data": "FunctionDef name:_isna arg:obj arguments arg If Call Return return:yes Call If Call Raise Call If Call Return return:yes If Call Return return:yes Call If Call If Return return:yes Call Return return:yes Call If Call Assign Call Assign Call Return return:yes If Call Return return:yes Call If Call Return return:yes Call Call If Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "format_table_styles",
    "source_code": "def format_table_styles(styles: CSSStyles) -> CSSStyles:\n    return [{'selector': selector, 'props': css_dict['props']} for css_dict in styles for selector in css_dict['selector'].split(',')]",
    "docstring": "looks for multiple CSS selectors and separates them: [{'selector': 'td, th', 'props': 'a:v;'}] ---> [{'selector': 'td', 'props': 'a:v;'}, {'selector': 'th', 'props': 'a:v;'}]",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\formats\\style_render.py",
    "ast_data": "FunctionDef name:format_table_styles arg:styles arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_marker",
    "source_code": "def get_marker(self):\n    return self._marker.get_marker()",
    "docstring": "Return the line marker. See also .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:get_marker arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_can_dot",
    "source_code": "def _can_dot(inputs, result, idx_removed):\n    if len(idx_removed) == 0:\n        return False\n    if len(inputs) != 2:\n        return False\n    input_left, input_right = inputs\n    for c in set(input_left + input_right):\n        nl, nr = (input_left.count(c), input_right.count(c))\n        if nl > 1 or nr > 1 or nl + nr > 2:\n            return False\n        if nl + nr - 1 == int(c in result):\n            return False\n    set_left = set(input_left)\n    set_right = set(input_right)\n    keep_left = set_left - idx_removed\n    keep_right = set_right - idx_removed\n    rs = len(idx_removed)\n    if input_left == input_right:\n        return True\n    if set_left == set_right:\n        return False\n    if input_left[-rs:] == input_right[:rs]:\n        return True\n    if input_left[:rs] == input_right[-rs:]:\n        return True\n    if input_left[-rs:] == input_right[-rs:]:\n        return True\n    if input_left[:rs] == input_right[:rs]:\n        return True\n    if not keep_left or not keep_right:\n        return False\n    return True",
    "docstring": "Checks if we can use BLAS (np.tensordot) call and its beneficial to do so. Parameters ---------- inputs : list of str Specifies the subscripts for summation. result : str Resulting summation. idx_removed : set Indices that are removed in the summation Returns ------- type : bool Returns true if BLAS should and can be used, else False Notes ----- If the operations is BLAS level 1 or 2 and is not already aligned we default back to einsum as the memory movement to copy is more costly than the operation itself. Examples -------- # Standard GEMM operation >>> _can_dot(['ij', 'jk'], 'ik', set('j')) True # Can use the standard BLAS, but requires odd data movement >>> _can_dot(['ijj', 'jk'], 'ik', set('j')) False # DDOT where the memory is not aligned >>> _can_dot(['ijk', 'ikj'], '', set('ijk')) False",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\einsumfunc.py",
    "ast_data": "FunctionDef name:_can_dot arg:inputs arg:result arg:idx_removed arguments arg arg arg If Compare Call Return return:yes If Compare Call Return return:yes Assign For Call Assign Call Call If BoolOp Compare Compare Compare Return return:yes If Compare Call Compare Return return:yes Assign Call Assign Call Assign Assign Assign Call If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If BoolOp Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_jvp_helper_wrapper",
    "source_code": "def _jvp_helper_wrapper(op_name, attr_tuple, inputs, outputs, tangents, use_batch):\n    if use_batch:\n        for primal, tangent in zip(inputs, tangents):\n            if not tangent.shape.is_compatible_with([None] + primal.shape):\n                raise ValueError('Tangent {} was expected to be of shape {} but is instead of shape {}'.format(tangent, [None] + primal.shape, tangent.shape))\n        return control_flow_ops.vectorized_map(functools.partial(_jvp_helper, op_name, attr_tuple, inputs, outputs), tangents)\n    return _jvp_helper(op_name, attr_tuple, inputs, outputs, tangents)",
    "docstring": "Computes a batch of Jacobian-vector product for an op. Args: op_name: A string, the type of operation being executed. attr_tuple: Attributes of the operation. inputs: A flat list of input Tensors to the operation. outputs: A flat list of output Tensors from the operation. tangents: A flat list of Tensors, compatible with shape . use_batch: A bool, True to vetorize over batch of tangents of shape . Returns: A flat list of tangents compatible with or . Raises: ValueError: if tangent shapes are not compatible with input shapes.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\forwardprop.py",
    "ast_data": "FunctionDef name:_jvp_helper_wrapper arg:op_name arg:attr_tuple arg:inputs arg:outputs arg:tangents arg:use_batch arguments arg arg arg arg arg arg If For Call If Call Raise Call Call Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "benchmark",
    "source_code": "def benchmark(self, choices: list[TritonTemplateCaller]) -> dict[TritonTemplateCaller, float]:\n    results = dict(zip(choices, self.executor.map(self.target, choices)))\n    return results",
    "docstring": "Benchmark each choice in a separate process.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\autotune_process.py",
    "ast_data": "FunctionDef name:benchmark arg:self arg:choices arguments arg arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_projection_class",
    "source_code": "def get_projection_class(self, name):\n    return self._all_projection_types[name]",
    "docstring": "Get a projection class from its *name*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\__init__.py",
    "ast_data": "FunctionDef name:get_projection_class arg:self arg:name arguments arg arg Return return:yes"
  },
  {
    "library": "authlib",
    "name": "sign",
    "source_code": "def sign(self, method, uri, headers, body):\n    nonce = generate_nonce()\n    timestamp = generate_timestamp()\n    if body is None:\n        body = b''\n    timestamp = str(timestamp)\n    if headers is None:\n        headers = {}\n    oauth_params = self.get_oauth_params(nonce, timestamp)\n    if body and headers.get('Content-Type') != CONTENT_TYPE_FORM_URLENCODED:\n        oauth_body_hash = base64.b64encode(hashlib.sha1(body).digest())\n        oauth_params.append(('oauth_body_hash', oauth_body_hash.decode('utf-8')))\n    uri, headers, body = self._render(uri, headers, body, oauth_params)\n    sig = self.get_oauth_signature(method, uri, headers, body)\n    oauth_params.append(('oauth_signature', sig))\n    uri, headers, body = self._render(uri, headers, body, oauth_params)\n    return (uri, headers, body)",
    "docstring": "Sign the HTTP request, add OAuth parameters and signature. :param method: HTTP method of the request. :param uri: URI of the HTTP request. :param body: Body payload of the HTTP request. :param headers: Headers of the HTTP request. :return: uri, headers, body",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth1\\rfc5849\\client_auth.py",
    "ast_data": "FunctionDef name:sign arg:self arg:method arg:uri arg:headers arg:body arguments arg arg arg arg arg Assign Call Assign Call If Compare Assign Assign Call If Compare Assign Assign Call If BoolOp Compare Call Assign Call Call Call Call Call Assign Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "geo_db_type",
    "source_code": "def geo_db_type(self, f):\n    raise NotImplementedError('subclasses of BaseSpatialOperations must provide a geo_db_type() method')",
    "docstring": "Return the database column type for the geometry field on the spatial backend.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:geo_db_type arg:self arg:f arguments arg arg Raise Call"
  },
  {
    "library": "scipy",
    "name": "precision",
    "source_code": "def precision(key: str) -> float:\n    _check_obsolete(key)\n    return physical_constants[key][2] / physical_constants[key][0]",
    "docstring": "Relative precision in physical_constants indexed by key Parameters ---------- key : Python string Key in dictionary Returns ------- prec : float Relative precision in corresponding to Examples -------- >>> from scipy import constants >>> constants.precision('proton mass') 5.1e-37",
    "type": "function",
    "file_path": "scipy\\scipy\\constants\\_codata.py",
    "ast_data": "FunctionDef name:precision arg:key arguments arg Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_assemble_r2_explained_variance",
    "source_code": "def _assemble_r2_explained_variance(numerator, denominator, n_outputs, multioutput, force_finite, xp, device):\n    dtype = numerator.dtype\n    nonzero_denominator = denominator != 0\n    if not force_finite:\n        output_scores = 1 - numerator / denominator\n    else:\n        nonzero_numerator = numerator != 0\n        output_scores = xp.ones([n_outputs], device=device, dtype=dtype)\n        valid_score = nonzero_denominator & nonzero_numerator\n        output_scores[valid_score] = 1 - numerator[valid_score] / denominator[valid_score]\n        output_scores[nonzero_numerator & ~nonzero_denominator] = 0.0\n    if isinstance(multioutput, str):\n        if multioutput == 'raw_values':\n            return output_scores\n        elif multioutput == 'uniform_average':\n            avg_weights = None\n        elif multioutput == 'variance_weighted':\n            avg_weights = denominator\n            if not xp.any(nonzero_denominator):\n                avg_weights = None\n    else:\n        avg_weights = multioutput\n    result = _average(output_scores, weights=avg_weights)\n    if size(result) == 1:\n        return float(result)\n    return result",
    "docstring": "Common part used by explained variance score and :math: score.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\metrics\\_regression.py",
    "ast_data": "FunctionDef name:_assemble_r2_explained_variance arg:numerator arg:denominator arg:n_outputs arg:multioutput arg:force_finite arg:xp arg:device arguments arg arg arg arg arg arg arg Assign Assign Compare If Assign Assign Compare Assign Call Assign Assign Assign If Call If Compare Return return:yes If Compare Assign If Compare Assign If Call Assign Assign Assign Call If Compare Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_parse_local_version",
    "source_code": "def _parse_local_version(local: Optional[str]) -> Optional[LocalType]:\n    if local is not None:\n        return tuple((part.lower() if not part.isdigit() else int(part) for part in _local_version_separators.split(local)))\n    return None",
    "docstring": "Takes a string like abc.1.twelve and turns it into (\"abc\", 1, \"twelve\").",
    "type": "function",
    "file_path": "pytorch\\torch\\_vendor\\packaging\\version.py",
    "ast_data": "FunctionDef name:_parse_local_version arg:local arguments arg If Compare Return return:yes Call Call Call Call Call Return return:no"
  },
  {
    "library": "pandas",
    "name": "check_key_length",
    "source_code": "def check_key_length(columns: Index, key, value: DataFrame) -> None:\n    if columns.is_unique:\n        if len(value.columns) != len(key):\n            raise ValueError('Columns must be same length as key')\n    elif len(columns.get_indexer_non_unique(key)[0]) != len(value.columns):\n        raise ValueError('Columns must be same length as key')",
    "docstring": "Checks if a key used as indexer has the same length as the columns it is associated with. Parameters ---------- columns : Index The columns of the DataFrame to index. key : A list-like of keys to index with. value : DataFrame The value to set for the keys. Raises ------ ValueError: If the length of key is not equal to the number of columns in value or if the number of columns referenced by key is not equal to number of columns.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\indexers\\utils.py",
    "ast_data": "FunctionDef name:check_key_length arg:columns arg:key arg:value arguments arg arg arg If If Compare Call Call Raise Call If Compare Call Call Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_preserve_control_flow_context",
    "source_code": "@contextlib.contextmanager\ndef _preserve_control_flow_context(tensor):\n    context = tensor.op._get_control_flow_context()\n    if context:\n        context.Enter()\n    try:\n        yield\n    finally:\n        if context:\n            context.Exit()",
    "docstring": "Preserve the control flow context for the given tensor. Sets the graph context to the tensor's context so that side effect ops are added under the same context. This is needed when subscribing to tensors defined within a conditional block or a while loop. In these cases we need that the side-effect ops are created within the same control flow context as that of the tensor they are attached to. Args: tensor: tensor whose context should be preserved. Yields: None",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\subscribe.py",
    "ast_data": "FunctionDef name:_preserve_control_flow_context arg:tensor arguments arg Assign Call If Call Try If Call"
  },
  {
    "library": "tensorflow",
    "name": "alias_inplace_sub",
    "source_code": "@deprecation.deprecated(None, 'Prefer tf.tensor_scatter_nd_sub, which offers the same functionality with well-defined read-write semantics.')\ndef alias_inplace_sub(x, i, v):\n    return _inplace_helper(x, i, v, gen_array_ops.inplace_sub)",
    "docstring": "Applies an inplace sub on input x at index i with value v. Aliases x. If i is None, x and v must be the same shape. Computes x -= v; If i is a scalar, x has a rank 1 higher than v's. Computes x[i, :] -= v; Otherwise, x and v must have the same rank. Computes x[i, :] -= v; Args: x: A Tensor. i: None, a scalar or a vector. v: A Tensor. Returns: Returns x.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\inplace_ops.py",
    "ast_data": "FunctionDef name:alias_inplace_sub arg:x arg:i arg:v arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "add_include_dirs",
    "source_code": "def add_include_dirs(self, *paths):\n    include_dirs = self.paths(paths)\n    dist = self.get_distribution()\n    if dist is not None:\n        if dist.include_dirs is None:\n            dist.include_dirs = []\n        dist.include_dirs.extend(include_dirs)\n    else:\n        self.include_dirs.extend(include_dirs)",
    "docstring": "Add paths to configuration include directories. Add the given sequence of paths to the beginning of the include_dirs list. This list will be visible to all extension modules of the current package.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\misc_util.py",
    "ast_data": "FunctionDef name:add_include_dirs arg:self arguments arg arg Assign Call Assign Call If Compare If Compare Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "failing",
    "source_code": "def failing(self) -> bool:\n    return self == Status.FAILED_COMPILE or self == Status.FAILED_RUN_EAGER_EXCEPTION or self == Status.FAILED_RUN_COMPILE_EXCEPTION or (self == Status.FAILED_RUN_RETURN)",
    "docstring": "Convenience method to check whether these status represent failure.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\fuzzer.py",
    "ast_data": "FunctionDef name:failing arg:self arguments arg Return return:yes BoolOp Compare Compare Compare Compare"
  },
  {
    "library": "tensorflow",
    "name": "FeedFnHook",
    "source_code": "@tf_export(v1=['train.FeedFnHook'])\nclass FeedFnHook(session_run_hook.SessionRunHook):\n\n    def __init__(self, feed_fn):\n        self.feed_fn = feed_fn\n\n    def before_run(self, run_context):\n        return session_run_hook.SessionRunArgs(fetches=None, feed_dict=self.feed_fn())",
    "docstring": "Runs and sets the accordingly.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\training\\basic_session_run_hooks.py",
    "ast_data": "ClassDef name:FeedFnHook FunctionDef name:__init__ arg:self arg:feed_fn arguments arg arg Assign FunctionDef name:before_run arg:self arg:run_context arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_run_graph",
    "source_code": "def _run_graph(self, device, input_shape, axes, num_layers, mode, scale, train, num_iters):\n    graph = ops.Graph()\n    with graph.as_default():\n        outputs = build_graph(device, input_shape, axes, num_layers, mode, scale, train)\n    with session_lib.Session(graph=graph) as session:\n        variables.global_variables_initializer().run()\n        _ = session.run([out.op for out in outputs])\n        start_time = time.time()\n        for _ in range(num_iters):\n            _ = session.run([out.op for out in outputs])\n        duration = time.time() - start_time\n    print('%s shape:%d/%d #layers:%d mode:%s scale:%r train:%r - %f secs' % (device, len(input_shape), len(axes), num_layers, mode, scale, train, duration / num_iters))\n    name_template = 'batch_norm_{device}_input_shape_{shape}_axes_{axes}_mode_{mode}_layers_{num_layers}_scale_{scale}_train_{train}'\n    self.report_benchmark(name=name_template.format(device=device, mode=mode, num_layers=num_layers, scale=scale, train=train, shape=str(input_shape).replace(' ', ''), axes=str(axes)).replace(' ', ''), iters=num_iters, wall_time=duration / num_iters)\n    return duration",
    "docstring": "Run the graph and print its execution time. Args: device: string, the device to run on. input_shape: shape of the input tensor. axes: axes that are to be normalized across. num_layers: number of batch normalization layers in the graph. mode: \"op\", \"py\" or \"slow\" depending on the implementation. scale: scale after normalization. train: if true, also run backprop. num_iters: number of steps to run. Returns: The duration of the run in seconds.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\batch_norm_benchmark.py",
    "ast_data": "FunctionDef name:_run_graph arg:self arg:device arg:input_shape arg:axes arg:num_layers arg:mode arg:scale arg:train arg:num_iters arguments arg arg arg arg arg arg arg arg arg Assign Call With Call Assign Call With Call Call Call Assign Call Assign Call For Call Assign Call Assign Call Call Call Call Assign Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pygame",
    "name": "set_controls",
    "source_code": "def set_controls(self, **kwargs):\n    pass",
    "docstring": "Not implemented.",
    "type": "method",
    "file_path": "pygame\\src_py\\_camera_vidcapture.py",
    "ast_data": "FunctionDef name:set_controls arg:self arguments arg arg"
  },
  {
    "library": "pytorch",
    "name": "SplitPoint",
    "source_code": "class SplitPoint(Enum):\n    BEGINNING = 1\n    END = 2",
    "docstring": "Enum representing the points at which a split can occur in the execution of a submodule. Attributes: BEGINNING: Represents adding a split point *before* the execution of a certain submodule in the function. END: Represents adding a split point *after* the execution of a certain submodule in the function.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\_IR.py",
    "ast_data": "ClassDef name:SplitPoint Assign Assign"
  },
  {
    "library": "pygame",
    "name": "layers",
    "source_code": "def layers(self):\n    return sorted(set(self._spritelayers.values()))",
    "docstring": "return a list of unique defined layers defined. LayeredUpdates.layers(): return layers",
    "type": "method",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:layers arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "flatchoices",
    "source_code": "@property\ndef flatchoices(self):\n    return list(flatten_choices(self.choices))",
    "docstring": "Flattened version of choices tuple.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\__init__.py",
    "ast_data": "FunctionDef name:flatchoices arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "IndexEntry",
    "source_code": "class IndexEntry(NamedTuple):\n    name: str\n    subtype: int\n    docname: str\n    anchor: str\n    extra: str\n    qualifier: str\n    descr: str",
    "docstring": "An index entry. .. note:: The *qualifier* and *description* are not rendered for some output formats, such as LaTeX.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\domains\\_index.py",
    "ast_data": "ClassDef name:IndexEntry"
  },
  {
    "library": "tensorflow",
    "name": "global_variables",
    "source_code": "def global_variables(self):\n    return self.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)",
    "docstring": "Get this scope's global variables.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variable_scope.py",
    "ast_data": "FunctionDef name:global_variables arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_zinv_to_z",
    "source_code": "@staticmethod\ndef _zinv_to_z(num, den):\n    diff = len(num) - len(den)\n    if diff > 0:\n        den = np.hstack((den, np.zeros(diff)))\n    elif diff < 0:\n        num = np.hstack((num, np.zeros(-diff)))\n    return (num, den)",
    "docstring": "Change a transfer function from the variable to . Parameters ---------- num, den: 1d array_like Sequences representing the coefficients of the numerator and denominator polynomials, in order of ascending degree of 'z**-1'. That is, ``.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:_zinv_to_z arg:num arg:den arguments arg arg Assign Call Call If Compare Assign Call Call If Compare Assign Call Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "generate_user_info",
    "source_code": "def generate_user_info(self, user, scope):\n    raise NotImplementedError()",
    "docstring": "Provide user information for the given scope. Developers MUST implement this method in subclass, e.g.:: from authlib.oidc.core import UserInfo def generate_user_info(self, user, scope): user_info = UserInfo(sub=user.id, name=user.name) if \"email\" in scope: user_info[\"email\"] = user.email return user_info :param user: user instance :param scope: scope of the token :return: `` instance",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\core\\grants\\implicit.py",
    "ast_data": "FunctionDef name:generate_user_info arg:self arg:user arg:scope arguments arg arg arg Raise Call"
  },
  {
    "library": "django",
    "name": "get_files",
    "source_code": "def get_files(storage, ignore_patterns=None, location=''):\n    if ignore_patterns is None:\n        ignore_patterns = []\n    directories, files = storage.listdir(location)\n    for fn in files:\n        if matches_patterns(fn, ignore_patterns):\n            continue\n        if location:\n            fn = os.path.join(location, fn)\n            if matches_patterns(fn, ignore_patterns):\n                continue\n        yield fn\n    for dir in directories:\n        if matches_patterns(dir, ignore_patterns):\n            continue\n        if location:\n            dir = os.path.join(location, dir)\n        yield from get_files(storage, ignore_patterns, dir)",
    "docstring": "Recursively walk the storage directories yielding the paths of all files that should be copied.",
    "type": "function",
    "file_path": "django\\django\\contrib\\staticfiles\\utils.py",
    "ast_data": "FunctionDef name:get_files arg:storage arg:ignore_patterns arg:location arguments arg arg arg If Compare Assign Assign Call For If Call If Assign Call If Call For If Call If Assign Call Call"
  },
  {
    "library": "virtualenv",
    "name": "ensure_extracted",
    "source_code": "@contextmanager\ndef ensure_extracted(self, path, to_folder=None):\n    if IS_ZIPAPP:\n        with self.extract(path, to_folder) as result:\n            yield result\n    else:\n        yield path",
    "docstring": "Some paths might be within the zipapp, unzip these to a path on the disk.",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\app_data\\base.py",
    "ast_data": "FunctionDef name:ensure_extracted arg:self arg:path arg:to_folder arguments arg arg arg If With Call"
  },
  {
    "library": "pytorch",
    "name": "LowPrecisionState",
    "source_code": "class LowPrecisionState(DefaultState):\n    __slots__ = ['parameter_type']\n\n    def __init__(self, process_group, parameter_type=torch.float32):\n        super().__init__(process_group)\n        self.parameter_type = parameter_type",
    "docstring": "Stores state needed to perform gradient communication in a lower precision within a communication hook. Communication hook will cast gradients back to the original parameter precision specified by `DefaultState`. Args: parameter_type (torch.dtype): The precision of model's parameters. Required for a hook to cast gradients back to a parameter's precision.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\_comm_hooks\\default_hooks.py",
    "ast_data": "ClassDef name:LowPrecisionState Assign FunctionDef name:__init__ arg:self arg:process_group arg:parameter_type arguments arg arg arg Call Call Assign"
  },
  {
    "library": "pandas",
    "name": "_find_common_type_compat",
    "source_code": "@final\ndef _find_common_type_compat(self, target) -> DtypeObj:\n    target_dtype, _ = infer_dtype_from(target)\n    if using_string_dtype():\n        from pandas.core.indexes.range import RangeIndex\n        if len(self) == 0 and (isinstance(self, RangeIndex) or self.dtype == np.object_):\n            return target_dtype\n        if isinstance(target, Index) and len(target) == 0 and (isinstance(target, RangeIndex) or target_dtype == np.object_):\n            return self.dtype\n    if self.dtype == 'uint64' or target_dtype == 'uint64':\n        if is_signed_integer_dtype(self.dtype) or is_signed_integer_dtype(target_dtype):\n            return _dtype_obj\n    dtype = find_result_type(self.dtype, target)\n    dtype = common_dtype_categorical_compat([self, target], dtype)\n    return dtype",
    "docstring": "Implementation of find_common_type that adjusts for Index-specific special cases.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_find_common_type_compat arg:self arg:target arguments arg arg Assign Call If Call If BoolOp Compare Call BoolOp Call Compare Return return:yes If BoolOp Call Compare Call BoolOp Call Compare Return return:yes If BoolOp Compare Compare If BoolOp Call Call Return return:yes Assign Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "_compute_tiles",
    "source_code": "def _compute_tiles(imgs: torch.Tensor, grid_size: Tuple[int, int], even_tile_size: bool=False) -> Tuple[torch.Tensor, torch.Tensor]:\n    batch: torch.Tensor = imgs\n    h, w = batch.shape[-2:]\n    kernel_vert: int = math.ceil(h / grid_size[0])\n    kernel_horz: int = math.ceil(w / grid_size[1])\n    if even_tile_size:\n        kernel_vert += 1 if kernel_vert % 2 else 0\n        kernel_horz += 1 if kernel_horz % 2 else 0\n    pad_vert = kernel_vert * grid_size[0] - h\n    pad_horz = kernel_horz * grid_size[1] - w\n    if pad_vert > batch.shape[-2] or pad_horz > batch.shape[-1]:\n        raise ValueError('Cannot compute tiles on the image according to the given grid size')\n    if pad_vert > 0 or pad_horz > 0:\n        batch = F.pad(batch, [0, pad_horz, 0, pad_vert], mode='reflect')\n    c: int = batch.shape[-3]\n    tiles: torch.Tensor = batch.unfold(1, c, c).unfold(2, kernel_vert, kernel_vert).unfold(3, kernel_horz, kernel_horz).squeeze(1).contiguous()\n    if tiles.shape[-5] != grid_size[0]:\n        raise AssertionError\n    if tiles.shape[-4] != grid_size[1]:\n        raise AssertionError\n    return (tiles, batch)",
    "docstring": "Compute tiles on an image according to a grid size. Note that padding can be added to the image in order to crop properly the image. So, the grid_size (GH, GW) x tile_size (TH, TW) >= image_size (H, W) Args: imgs: batch of 2D images with shape (B, C, H, W) or (C, H, W). grid_size: number of tiles to be cropped in each direction (GH, GW) even_tile_size: Determine if the width and height of the tiles must be even. Returns: tensor with tiles (B, GH, GW, C, TH, TW). B = 1 in case of a single image is provided. tensor with the padded batch of 2D imageswith shape (B, C, H', W').",
    "type": "function",
    "file_path": "kornia\\kornia\\enhance\\equalization.py",
    "ast_data": "FunctionDef name:_compute_tiles arg:imgs arg:grid_size arg:even_tile_size arguments arg arg arg Assign Call Call If Assign Assign If BoolOp Compare Compare Raise Call If BoolOp Compare Compare Assign Call Call Call Call Call Call If Compare Raise If Compare Raise Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_recursive_copy_to_device",
    "source_code": "def _recursive_copy_to_device(value: Any, non_blocking: bool, device: torch.device) -> Any:\n    if isinstance(value, torch.Tensor):\n        return value.to(device, non_blocking=non_blocking)\n    if isinstance(value, (list, tuple)):\n        values = [_recursive_copy_to_device(val, non_blocking=non_blocking, device=device) for val in value]\n        return values if isinstance(value, list) else tuple(values)\n    if isinstance(value, collections.abc.Mapping):\n        return {key: _recursive_copy_to_device(val, non_blocking=non_blocking, device=device) for key, val in value.items()}\n    return value",
    "docstring": "Recursively searches lists, tuples, dicts and copies tensors to device if possible. Non-tensor values are passed as-is in the result. .. note:: These are all copies, so if there are two objects that reference the same object, then after this call, there will be two different objects referenced on the device.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\optim\\zero_redundancy_optimizer.py",
    "ast_data": "FunctionDef name:_recursive_copy_to_device arg:value arg:non_blocking arg:device arguments arg arg arg If Call Return return:yes Call If Call Assign Call Return return:yes Call Call If Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_call_ntuple",
    "source_code": "def _call_ntuple(self, tx: 'InstructionTranslator', args, kwargs):\n    if self.value is torch.nn.modules.utils._ntuple:\n        count = args[0].as_python_constant()\n    else:\n        count = self.value.__closure__[0].cell_contents\n    assert isinstance(count, int)\n    assert not kwargs\n\n    def handle_ntuple(value):\n        if value.has_unpack_var_sequence(tx):\n            return variables.TupleVariable(list(value.unpack_var_sequence(tx)))\n        elif value.is_python_constant():\n            return variables.ConstantVariable.create(torch.nn.modules.utils._ntuple(count)(value.as_python_constant()))\n        else:\n            unimplemented(f'torch.nn.modules.utils._ntuple({value})')\n    if self.value is torch.nn.modules.utils._ntuple:\n        return variables.LambdaVariable(handle_ntuple)\n    else:\n        return handle_ntuple(args[0])",
    "docstring": "inline behavior of torch.nn.modules.utils._ntuple",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\torch.py",
    "ast_data": "FunctionDef name:_call_ntuple arg:self arg:tx arg:args arg:kwargs arguments arg arg arg arg If Compare Assign Call Assign Call FunctionDef name:handle_ntuple arg:value arguments arg If Call Return return:yes Call Call Call If Call Return return:yes Call Call Call Call Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_wait_batch_p2p",
    "source_code": "def _wait_batch_p2p(work: list[dist.Work]):\n    for w in work:\n        w.wait()",
    "docstring": "Waits for a list of dist.Work (typically from _batch_p2p / _sorted_batch_p2p).",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\schedules.py",
    "ast_data": "FunctionDef name:_wait_batch_p2p arg:work arguments arg For Call"
  },
  {
    "library": "pytorch",
    "name": "_resolve_name",
    "source_code": "def _resolve_name(name, package, level):\n    bits = package.rsplit('.', level - 1)\n    if len(bits) < level:\n        raise ValueError('attempted relative import beyond top-level package')\n    base = bits[0]\n    return f'{base}.{name}' if name else base",
    "docstring": "Resolve a relative module name to an absolute one.",
    "type": "function",
    "file_path": "pytorch\\torch\\package\\_importlib.py",
    "ast_data": "FunctionDef name:_resolve_name arg:name arg:package arg:level arguments arg arg arg Assign Call If Compare Call Raise Call Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_check_order",
    "source_code": "def _check_order(types1, types2):\n    dtype_order = bints + 'kK' + times + flts + cmplxP + 'O'\n    for t1, t2 in zip(types1, types2):\n        if t1 in 'OP' or t2 in 'OP':\n            return True\n        if t1 in 'mM' or t2 in 'mM':\n            return True\n        t1i = dtype_order.index(t1)\n        t2i = dtype_order.index(t2)\n        if t1i < t2i:\n            return\n        if t2i > t1i:\n            break\n    if types1 == 'QQ?' and types2 == 'qQ?':\n        return\n    raise TypeError(f'Input dtypes are unsorted or duplicate: {types1} and {types2}')",
    "docstring": "Helper to check that the loop types are ordered. The legacy type resolver (and potentially downstream) may pick use the first loop to which operands can be cast safely.",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\code_generators\\generate_umath.py",
    "ast_data": "FunctionDef name:_check_order arg:types1 arg:types2 arguments arg arg Assign For Call If BoolOp Compare Compare Return return:yes If BoolOp Compare Compare Return return:yes Assign Call Assign Call If Compare Return return:no If Compare If BoolOp Compare Compare Return return:no Raise Call"
  },
  {
    "library": "pandas",
    "name": "nunique_ints",
    "source_code": "def nunique_ints(values: ArrayLike) -> int:\n    if len(values) == 0:\n        return 0\n    values = _ensure_data(values)\n    result = (np.bincount(values.ravel().astype('intp')) != 0).sum()\n    return result",
    "docstring": "Return the number of unique values for integer array-likes. Significantly faster than pandas.unique for long enough sequences. No checks are done to ensure input is integral. Parameters ---------- values : 1d array-like Returns ------- int : The number of unique values in ``",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\algorithms.py",
    "ast_data": "FunctionDef name:nunique_ints arg:values arguments arg If Compare Call Return return:yes Assign Call Assign Call Compare Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_queryset",
    "source_code": "def get_queryset(self):\n    return self._queryset_class(model=self.model, using=self._db, hints=self._hints)",
    "docstring": "Return a new QuerySet object. Subclasses can override this method to customize the behavior of the Manager.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\manager.py",
    "ast_data": "FunctionDef name:get_queryset arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "entropy",
    "source_code": "@abstractmethod\ndef entropy(self, *, method):\n    raise NotImplementedError()",
    "docstring": "Differential entropy In terms of probability density function :math: and support :math:, the differential entropy (or simply \"entropy\") of a continuous random variable :math: is: .. math:: h(X) = - \\int_{\\chi} f(x) \\log f(x) dx The definition for a discrete random variable is analogous, with the PMF replacing the PDF and a sum over the support replacing the integral. Parameters ---------- method : {None, 'formula', 'logexp', 'quadrature'} The strategy used to evaluate the entropy. By default (`methodmethode`. Consequently, the value is expressed in (dimensionless) \"units\" of nats. To convert the entropy to different units (i.e. corresponding with a different base), divide the result by the natural logarithm of the desired base. References ---------- .. [1] Differential entropy, *Wikipedia*, Examples -------- Instantiate a distribution with the desired parameters: >>> from scipy import stats >>> X = stats.Uniform(a=-1., b=1.) Evaluate the entropy: >>> X.entropy() 0.6931471805599454",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_probability_distribution.py",
    "ast_data": "FunctionDef name:entropy arg:self arguments arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_get_next",
    "source_code": "def _get_next(self):\n    _check_iterator_valid(self.datapipe, self.iterator_id)\n    result = next(self.iterator)\n    if not self.self_and_has_next_method:\n        self.datapipe._number_of_samples_yielded += 1\n    return result",
    "docstring": "Return next with logic related to iterator validity, profiler, and incrementation of samples yielded.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\data\\datapipes\\_hook_iterator.py",
    "ast_data": "FunctionDef name:_get_next arg:self arguments arg Call Assign Call If Return return:yes"
  },
  {
    "library": "django",
    "name": "check_str_arg",
    "source_code": "def check_str_arg(result, func, cargs):\n    dbl = result\n    ptr = cargs[-1]._obj\n    return (dbl, ptr.value.decode())",
    "docstring": "This is for the OSRGet[Angular|Linear]Units functions, which require that the returned string pointer not be freed. This returns both the double and string values.",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\gdal\\prototypes\\errcheck.py",
    "ast_data": "FunctionDef name:check_str_arg arg:result arg:func arg:cargs arguments arg arg arg Assign Assign Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_as_pairs",
    "source_code": "def _as_pairs(x, ndim, as_index=False):\n    if x is None:\n        return ((None, None),) * ndim\n    x = np.array(x)\n    if as_index:\n        x = np.round(x).astype(np.intp, copy=False)\n    if x.ndim < 3:\n        if x.size == 1:\n            x = x.ravel()\n            if as_index and x < 0:\n                raise ValueError(\"index can't contain negative values\")\n            return ((x[0], x[0]),) * ndim\n        if x.size == 2 and x.shape != (2, 1):\n            x = x.ravel()\n            if as_index and (x[0] < 0 or x[1] < 0):\n                raise ValueError(\"index can't contain negative values\")\n            return ((x[0], x[1]),) * ndim\n    if as_index and x.min() < 0:\n        raise ValueError(\"index can't contain negative values\")\n    return np.broadcast_to(x, (ndim, 2)).tolist()",
    "docstring": "Broadcast to an array with the shape (, 2). A helper function for that prepares and validates arguments like for iteration in pairs. Parameters ---------- x : {None, scalar, array-like} The object to broadcast to the shape (, 2). ndim : int Number of pairs the broadcasted will have. as_index : bool, optional If is not None, try to round each element of to an integer (dtype ) and ensure every element is positive. Returns ------- pairs : nested iterables, shape (, 2) The broadcasted version of . Raises ------ ValueError If is True and contains negative elements. Or if is not broadcastable to the shape (, 2).",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_arraypad_impl.py",
    "ast_data": "FunctionDef name:_as_pairs arg:x arg:ndim arg:as_index arguments arg arg arg If Compare Return return:yes Assign Call If Assign Call Call If Compare If Compare Assign Call If BoolOp Compare Raise Call Return return:yes If BoolOp Compare Compare Assign Call If BoolOp BoolOp Compare Compare Raise Call Return return:yes If BoolOp Compare Call Raise Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_get_num_samples",
    "source_code": "def _get_num_samples(data_qualities: OpenmlQualitiesType) -> int:\n    default_n_samples = -1\n    qualities = {d['name']: d['value'] for d in data_qualities}\n    return int(float(qualities.get('NumberOfInstances', default_n_samples)))",
    "docstring": "Get the number of samples from data qualities. Parameters ---------- data_qualities : list of dict Used to retrieve the number of instances (samples) in the dataset. Returns ------- n_samples : int The number of samples in the dataset or -1 if data qualities are unavailable.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\datasets\\_openml.py",
    "ast_data": "FunctionDef name:_get_num_samples arg:data_qualities arguments arg Assign Assign Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "WorkerGroup",
    "source_code": "class WorkerGroup:\n    __slots__ = ['spec', 'workers', 'store', 'group_rank', 'group_world_size', 'state', 'master_addr', 'master_port']\n\n    def __init__(self, spec: WorkerSpec):\n        self.spec = spec\n        self.workers = [Worker(local_rank=i) for i in range(self.spec.local_world_size)]\n        self.store = None\n        self.group_rank = None\n        self.group_world_size = None\n        self.master_addr = None\n        self.master_port = None\n        self.state = WorkerState.INIT",
    "docstring": "A set of ``. Whether the worker group contains cross instance workers or not depends on the implementation of the agent.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\elastic\\agent\\server\\api.py",
    "ast_data": "ClassDef name:WorkerGroup Assign FunctionDef name:__init__ arg:self arg:spec arguments arg arg Assign Assign Call Call Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_jvp_dispatch",
    "source_code": "def _jvp_dispatch(op_name, attr_tuple, inputs, outputs, tangents, use_batch=False):\n    if _TRACE_COUNT.get(op_name, 0) < _TRACE_COUNT_LIMIT:\n        config = _jvp_exact_config\n    else:\n        config = _jvp_relaxed_config\n    return tracing_compilation.call_function((op_name, attr_tuple, inputs, outputs, tangents, use_batch), tracing_options=config)",
    "docstring": "Determine which forwardprop function to call.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\forwardprop.py",
    "ast_data": "FunctionDef name:_jvp_dispatch arg:op_name arg:attr_tuple arg:inputs arg:outputs arg:tangents arg:use_batch arguments arg arg arg arg arg arg If Compare Call Assign Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "render",
    "source_code": "def render(self, source: Source) -> str:\n    return f'{self.vr.lower} <= {source.name()} <= {self.vr.upper}'",
    "docstring": "Format the constrain equation",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:render arg:self arg:source arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "PathPatch3D",
    "source_code": "class PathPatch3D(Patch3D):\n\n    def __init__(self, path, *, zs=(), zdir='z', axlim_clip=False, **kwargs):\n        Patch.__init__(self, **kwargs)\n        self.set_3d_properties(path, zs, zdir, axlim_clip)\n\n    def set_3d_properties(self, path, zs=0, zdir='z', axlim_clip=False):\n        Patch3D.set_3d_properties(self, path.vertices, zs=zs, zdir=zdir, axlim_clip=axlim_clip)\n        self._code3d = path.codes\n\n    def do_3d_projection(self):\n        s = self._segment3d\n        if self._axlim_clip:\n            mask = _viewlim_mask(*zip(*s), self.axes)\n            xs, ys, zs = np.ma.array(zip(*s), dtype=float, mask=mask).filled(np.nan)\n        else:\n            xs, ys, zs = zip(*s)\n        vxs, vys, vzs, vis = proj3d._proj_transform_clip(xs, ys, zs, self.axes.M, self.axes._focal_length)\n        self._path2d = mpath.Path(np.ma.column_stack([vxs, vys]), self._code3d)\n        return min(vzs)",
    "docstring": "3D PathPatch object.",
    "type": "class",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py",
    "ast_data": "ClassDef name:PathPatch3D FunctionDef name:__init__ arg:self arg:path arguments arg arg arg arg arg arg Call Call FunctionDef name:set_3d_properties arg:self arg:path arg:zs arg:zdir arg:axlim_clip arguments arg arg arg arg arg Call Assign FunctionDef name:do_3d_projection arg:self arguments arg Assign If Assign Call Call Assign Call Call Call Assign Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "forward_inverse",
    "source_code": "def forward_inverse(self) -> Tensor:\n    return torch.inverse(self.forward())",
    "docstring": "Single-batch inverse similarity transform\". Returns: Similarity with shape :math:",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\transform\\image_registrator.py",
    "ast_data": "FunctionDef name:forward_inverse arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X):\n    check_is_fitted(self)\n    xp, _ = get_namespace(X)\n    X = validate_data(self, X, accept_sparse=('csr', 'csc'), copy=self.copy, reset=False, dtype=_array_api.supported_float_dtypes(xp), force_writeable=True, ensure_all_finite='allow-nan')\n    if sparse.issparse(X):\n        inplace_column_scale(X, 1.0 / self.scale_)\n    else:\n        X /= self.scale_\n    return X",
    "docstring": "Scale the data. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data that should be scaled. Returns ------- X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) Transformed array.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Call Assign Call Assign Call Call If Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "has_duplicates",
    "source_code": "@final\n@property\ndef has_duplicates(self) -> bool:\n    return not self.is_unique",
    "docstring": "Check if the Index has duplicate values. Returns ------- bool Whether or not the Index has duplicate values. See Also -------- Index.is_unique : Inverse method that checks if it has unique values. Examples -------- >>> idx = pd.Index([1, 5, 7, 7]) >>> idx.has_duplicates True >>> idx = pd.Index([1, 5, 7]) >>> idx.has_duplicates False >>> idx = pd.Index([\"Watermelon\", \"Orange\", \"Apple\", \"Watermelon\"]).astype( ... \"category\" ... ) >>> idx.has_duplicates True >>> idx = pd.Index([\"Orange\", \"Apple\", \"Watermelon\"]).astype(\"category\") >>> idx.has_duplicates False",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:has_duplicates arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "can_introspect_foreign_keys",
    "source_code": "@cached_property\ndef can_introspect_foreign_keys(self):\n    return self._mysql_storage_engine != 'MyISAM'",
    "docstring": "Confirm support for introspected foreign keys",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\mysql\\features.py",
    "ast_data": "FunctionDef name:can_introspect_foreign_keys arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "get_next",
    "source_code": "def get_next(self, name=None):\n    with distribute_lib.enter_or_assert_strategy(self._strategy):\n        if distribute_lib.get_replica_context() is not None:\n            raise ValueError('next(iterator) should be called from outside of replica_fn. e.g. strategy.run(replica_fn, args=(next(iterator),))')\n    if not self._enable_get_next_as_optional:\n        return self._get_next_no_partial_batch_handling(name)\n    optional_list = []\n    for i, worker in enumerate(self._input_workers.worker_devices):\n        with ops.device(worker):\n            optional_list.append(self._iterators[i].get_next_as_optional_list())\n    num_replicas_with_values = _calculate_replicas_with_values(self._strategy, self._input_workers, optional_list)\n\n    def _value_or_dummy():\n        value_list = _get_value_or_dummy(self._input_workers, optional_list, produce_dummy=True)\n        if self._replica_order is not None:\n            value_list = self._reorder_replicas(value_list)\n        return _create_per_replica(value_list, self._strategy)\n\n    def _eof():\n        return self._get_next_no_partial_batch_handling()\n    return tf_cond.cond(num_replicas_with_values > 0, _value_or_dummy, _eof, strict=True)",
    "docstring": "Returns the next input from the iterator for all replicas.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py",
    "ast_data": "FunctionDef name:get_next arg:self arg:name arguments arg arg With Call If Compare Call Raise Call If Return return:yes Call Assign For Call With Call Call Call Assign Call FunctionDef name:_value_or_dummy arguments Assign Call If Compare Assign Call Return return:yes Call FunctionDef name:_eof arguments Return return:yes Call Return return:yes Call Compare"
  },
  {
    "library": "scrapy",
    "name": "requestTunnel",
    "source_code": "def requestTunnel(self, protocol: Protocol) -> Protocol:\n    assert protocol.transport\n    tunnelReq = tunnel_request_data(self._tunneledHost, self._tunneledPort, self._proxyAuthHeader)\n    protocol.transport.write(tunnelReq)\n    self._protocolDataReceived = protocol.dataReceived\n    protocol.dataReceived = self.processProxyResponse\n    self._protocol = protocol\n    return protocol",
    "docstring": "Asks the proxy to open a tunnel.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\downloader\\handlers\\http11.py",
    "ast_data": "FunctionDef name:requestTunnel arg:self arg:protocol arguments arg arg Assign Call Call Assign Assign Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "max_x",
    "source_code": "@property\ndef max_x(self):\n    return self._envelope.MaxX",
    "docstring": "Return the value of the maximum X coordinate.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\envelope.py",
    "ast_data": "FunctionDef name:max_x arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "_store",
    "source_code": "def _store(self, messages, response, *args, **kwargs):\n    if messages:\n        self.request.session[self.session_key] = self.serialize_messages(messages)\n    else:\n        self.request.session.pop(self.session_key, None)\n    return []",
    "docstring": "Store a list of messages to the request's session.",
    "type": "method",
    "file_path": "django\\django\\contrib\\messages\\storage\\session.py",
    "ast_data": "FunctionDef name:_store arg:self arg:messages arg:response arguments arg arg arg arg arg If Assign Call Call Return return:no"
  },
  {
    "library": "django",
    "name": "ObjectDoesNotExist",
    "source_code": "class ObjectDoesNotExist(Exception):\n    silent_variable_failure = True",
    "docstring": "The requested object does not exist",
    "type": "class",
    "file_path": "django\\django\\core\\exceptions.py",
    "ast_data": "ClassDef name:ObjectDoesNotExist Assign"
  },
  {
    "library": "scipy",
    "name": "keip_zeros",
    "source_code": "def keip_zeros(nt):\n    if not isscalar(nt) or floor(nt) != nt or nt <= 0:\n        raise ValueError('nt must be positive integer scalar.')\n    return _specfun.klvnzo(nt, 8)",
    "docstring": "Compute nt zeros of the derivative of the Kelvin function kei. Parameters ---------- nt : int Number of zeros to compute. Must be positive. Returns ------- ndarray First zeros of the derivative of the Kelvin function. See Also -------- kei, keip References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special Functions\", John Wiley and Sons, 1996.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_basic.py",
    "ast_data": "FunctionDef name:keip_zeros arg:nt arguments arg If BoolOp Call Compare Call Compare Raise Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_cached_instance_for_options",
    "source_code": "@staticmethod\ndef get_cached_instance_for_options(options: Optional[Union[OrtBackendOptions, Mapping[str, Any]]]=None) -> 'OrtBackend':\n\n    def reusable(a: OrtBackendOptions, b: OrtBackendOptions):\n        if a.preferred_execution_providers != b.preferred_execution_providers or a.infer_execution_providers != b.infer_execution_providers or a.default_execution_providers != b.default_execution_providers or (a.preallocate_output != b.preallocate_output) or (a.use_aot_autograd != b.use_aot_autograd) or (a.pre_ort_model_transforms != b.pre_ort_model_transforms):\n            return False\n        if a.ort_session_options is not None or b.ort_session_options is not None:\n            return False\n        return True\n    if not isinstance(options, OrtBackendOptions):\n        options = OrtBackendOptions(**options or {})\n    backend = next((b for b in OrtBackend.__instance_cache if reusable(b._options, options)), None)\n    if backend is None:\n        assert len(OrtBackend.__instance_cache) < OrtBackend.__instance_cache_max_count, f'No more than {OrtBackend.__instance_cache_max_count} instances of {OrtBackend} allowed. Please instantiate `{OrtBackend}` explicitly to pass to `torch.compile`. See https://github.com/pytorch/pytorch/pull/107973#discussion_r1306144795 for discussion.'\n        OrtBackend.__instance_cache.append((backend := OrtBackend(options)))\n    return backend",
    "docstring": "Returns a possibly cached instance of an `` cannot participate in caching.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\onnxruntime.py",
    "ast_data": "FunctionDef name:get_cached_instance_for_options arg:options arguments arg FunctionDef name:reusable arg:a arg:b arguments arg arg If BoolOp Compare Compare Compare Compare Compare Compare Return return:yes If BoolOp Compare Compare Return return:yes Return return:yes If Call Assign Call BoolOp Assign Call Call If Compare Compare Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_adjoint",
    "source_code": "def _adjoint(self):\n    return _AdjointLinearOperator(self)",
    "docstring": "Default implementation of _adjoint; defers to rmatvec.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_interface.py",
    "ast_data": "FunctionDef name:_adjoint arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "header",
    "source_code": "def header(self) -> IndentedBuffer:\n    res = super().header()\n    res.splice('\\n                #include \"cutlass/gemm/gemm.h\"\\n                #include \"cutlass/gemm/device/gemm_universal.h\"\\n                #include \"cutlass/gemm/device/gemm_universal_adapter.h\"\\n                #include \"cutlass/gemm/kernel/gemm_universal.hpp\"\\n                #include \"cutlass/gemm/device/gemm_sparse.h\"\\n                #include \"cutlass/gemm/collective/collective_builder.hpp\"\\n                #include \"cutlass/epilogue/collective/collective_builder.hpp\"\\n                #include \"cutlass/epilogue/collective/default_epilogue.hpp\"\\n                #include \"cutlass/epilogue/thread/linear_combination.h\"\\n                #include \"cutlass/epilogue/thread/activation.h\"\\n                #include \"cutlass/gemm/dispatch_policy.hpp\"\\n                #include \"cutlass/gemm/kernel/tile_scheduler.hpp\"\\n                #include \"cutlass/tensor_ref.h\"\\n                #include \"cutlass/util/distribution.h\"\\n                #include \"cutlass/util/packed_stride.hpp\"\\n                #include \"cutlass/util/tensor_view_io.h\"\\n            ')\n    if inductor_cuda_config.generate_test_runner and (not is_dynamic(*self.input_nodes, self.output_node)):\n        res.splice(GEMM_STANDALONE_RUNNER_ADDITIONAL_INCLUDES)\n    return res",
    "docstring": "Returns a buffer containing CUDA C++ code for the header section of the CUTLASS GEMM template. This section primarily includes the necessary header files. Returns: IndentedBuffer: An instance of IndentedBuffer that contains the generated CUDA C++ header code.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\gemm_template.py",
    "ast_data": "FunctionDef name:header arg:self arguments arg Assign Call Call Call If BoolOp Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "should_save_checkpoint",
    "source_code": "def should_save_checkpoint():\n    return dc_context.get_current_worker_context().should_checkpoint",
    "docstring": "Returns whether the current worker should save checkpoints. In multi-worker training, if saving checkpoint is requested by user, or needed for fault-tolerance, the cluster should save checkpoint but not necessarily every worker in the cluster should. TODO(rchao): Consider generalizing this util to be as there can be other files to save such as summary. Returns: Whether this particular worker in the cluster should save checkpoints.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_worker_util.py",
    "ast_data": "FunctionDef name:should_save_checkpoint arguments Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "set_params",
    "source_code": "def set_params(self, **kwargs):\n    self._set_params('_transformers', **kwargs)\n    return self",
    "docstring": "Set the parameters of this estimator. Valid parameter keys can be listed with `transformersColumnTransformer`. Parameters ---------- **kwargs : dict Estimator parameters. Returns ------- self : ColumnTransformer This estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\compose\\_column_transformer.py",
    "ast_data": "FunctionDef name:set_params arg:self arguments arg arg Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "unbatch",
    "source_code": "@abc.abstractmethod\ndef unbatch(self, spec):\n    raise NotImplementedError(f'{type(self).__name__}.unbatch')",
    "docstring": "Returns the TypeSpec for a single unbatched element in . Args: spec: The for a batch of values. Returns: A for an individual value.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py",
    "ast_data": "FunctionDef name:unbatch arg:self arg:spec arguments arg arg Raise Call Call"
  },
  {
    "library": "pygame",
    "name": "add_internal",
    "source_code": "def add_internal(self, sprite, layer=None):\n    if not hasattr(sprite, 'dirty'):\n        raise AttributeError()\n    if not hasattr(sprite, 'visible'):\n        raise AttributeError()\n    if not hasattr(sprite, 'blendmode'):\n        raise AttributeError()\n    if not isinstance(sprite, DirtySprite):\n        raise TypeError()\n    if sprite.dirty == 0:\n        sprite.dirty = 1\n    LayeredUpdates.add_internal(self, sprite, layer)",
    "docstring": "Do not use this method directly. It is used by the group to add a sprite internally.",
    "type": "method",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:add_internal arg:self arg:sprite arg:layer arguments arg arg arg If Call Raise Call If Call Raise Call If Call Raise Call If Call Raise Call If Compare Assign Call"
  },
  {
    "library": "pandas",
    "name": "is_file_like",
    "source_code": "def is_file_like(obj: object) -> bool:\n    if not (hasattr(obj, 'read') or hasattr(obj, 'write')):\n        return False\n    return bool(hasattr(obj, '__iter__'))",
    "docstring": "Check if the object is a file-like object. For objects to be considered file-like, they must be an iterator AND have either a and/or method as an attribute. Note: file-like objects must be iterable, but iterable objects need not be file-like. Parameters ---------- obj : object The object to check for file-like properties. This can be any Python object, and the function will check if it has attributes typically associated with file-like objects (e.g., , , ). Returns ------- bool Whether has file-like properties. See Also -------- api.types.is_dict_like : Check if the object is dict-like. api.types.is_hashable : Return True if hash(obj) will succeed, False otherwise. api.types.is_named_tuple : Check if the object is a named tuple. api.types.is_iterator : Check if the object is an iterator. Examples -------- >>> import io >>> from pandas.api.types import is_file_like >>> buffer = io.StringIO(\"data\") >>> is_file_like(buffer) True >>> is_file_like([1, 2, 3]) False",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\inference.py",
    "ast_data": "FunctionDef name:is_file_like arg:obj arguments arg If BoolOp Call Call Return return:yes Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "link",
    "source_code": "@abstractmethod\ndef link(self, y_pred, out=None):\n    pass",
    "docstring": "Compute the link function g(y_pred). The link function maps (predicted) target values to raw predictions, i.e. . Parameters ---------- y_pred : array Predicted target values. out : array A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or None, a freshly-allocated array is returned. Returns ------- out : array Output array, element-wise link function.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\_loss\\link.py",
    "ast_data": "FunctionDef name:link arg:self arg:y_pred arg:out arguments arg arg arg"
  },
  {
    "library": "pytorch",
    "name": "get_subsystem",
    "source_code": "@classmethod\ndef get_subsystem(cls) -> Optional[str]:\n    if (val := get_env_val('TORCH_BISECT_SUBSYSTEM')):\n        return val\n    file_path = os.path.join(cls.get_dir(), 'bisect_status.txt')\n    lines = cls.read_lines_from_file(file_path)\n    for line in lines:\n        if line.startswith('subsystem='):\n            out = line.strip().split('=')[1]\n            return out if out else None\n    return None",
    "docstring": "Returns the active subsystem, if any",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\compiler_bisector.py",
    "ast_data": "FunctionDef name:get_subsystem arg:cls arguments arg If Call Return return:yes Assign Call Call Assign Call For If Call Assign Call Call Return return:yes Return return:no"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X):\n    check_is_fitted(self)\n    X = validate_data(self, X, accept_sparse='csr', reset=False)\n    with config_context(assume_finite=True):\n        return euclidean_distances(X, self.subcluster_centers_)",
    "docstring": "Transform X into subcluster centroids dimension. Each dimension represents the distance from the sample point to each cluster centroid. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Input data. Returns ------- X_trans : {array-like, sparse matrix} of shape (n_samples, n_clusters) Transformed data.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_birch.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Call Assign Call With Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "register_comm_hook",
    "source_code": "def register_comm_hook(self, state: object, hook: callable):\n    if not self.check_is_root():\n        raise AssertionError('register_comm_hook can only be called on a root instance.')\n    for fsdp_state in traversal_utils._get_fsdp_states(self):\n        if fsdp_state.sharding_strategy in HYBRID_SHARDING_STRATEGIES:\n            raise AssertionError(f'Communication hook is not supported for hybrid strategies: {fsdp_state.sharding_strategy}')\n        if fsdp_state._comm_hook is not None:\n            raise AssertionError('A communication hook is already registered')\n        if not callable(hook):\n            raise ValueError(f'The communication hook must be callable but got {hook}')\n        fsdp_state._comm_hook = hook\n        fsdp_state._comm_hook_state = state",
    "docstring": "Register a communication hook. This is an enhancement that provides a flexible hook to users where they can specify how FSDP aggregates gradients across multiple workers. This hook can be used to implement several algorithms like _ and gradient compression which involve different communication strategies for parameter syncs while training with :class:. .. warning :: FSDP communication hook should be registered before running an initial forward pass and only once. Args: state (object): Passed to the hook to maintain any state information during the training process. Examples include error feedback in gradient compression, peers to communicate with next in _, etc. It is locally stored by each worker and shared by all the gradient tensors on the worker. hook (Callable): Callable, which has one of the following signatures: 1) `NO_SHARD` case. Callables with signature 2 are expected to handle gradient communication for sharded cases.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\fully_sharded_data_parallel.py",
    "ast_data": "FunctionDef name:register_comm_hook arg:self arg:state arg:hook arguments arg arg arg If Call Raise Call For Call If Compare Raise Call If Compare Raise Call If Call Raise Call Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "_construct_strides",
    "source_code": "def _construct_strides(sizes: Sequence[int], fill_order: Sequence[int]) -> Sequence[int]:\n    assert len(sizes) == len(fill_order), 'Length of sizes must match the length of the fill order'\n    strides = [0] * len(sizes)\n    current_stride = 1\n    for dim in fill_order:\n        strides[dim] = current_stride\n        current_stride *= sizes[dim]\n    return strides",
    "docstring": "From a list of sizes and a fill order, construct the strides of the permuted tensor.",
    "type": "function",
    "file_path": "pytorch\\torch\\_higher_order_ops\\flex_attention.py",
    "ast_data": "FunctionDef name:_construct_strides arg:sizes arg:fill_order arguments arg arg Compare Call Call Assign Call Assign For Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "process_exists",
    "source_code": "def process_exists(self, task_type, task_id):\n    return self.get_process_exit_code(task_type, task_id) is None",
    "docstring": "Returns whether the subprocess still exists given the task type and id. Args: task_type: The task type. task_id: The task id. Returns: Boolean; whether the subprocess still exists. If the subprocess has exited, this returns False.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_process_runner.py",
    "ast_data": "FunctionDef name:process_exists arg:self arg:task_type arg:task_id arguments arg arg arg Return return:yes Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "clear_op_callbacks",
    "source_code": "def clear_op_callbacks():\n    for callback in context.context().op_callbacks:\n        remove_op_callback(callback)",
    "docstring": "Clear all op callbacks registered in the current thread.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\op_callbacks.py",
    "ast_data": "FunctionDef name:clear_op_callbacks arguments For Call Call"
  },
  {
    "library": "matplotlib",
    "name": "tick_values",
    "source_code": "def tick_values(self, vmin, vmax):\n    return []",
    "docstring": "Return the locations of the ticks. .. note:: Because there are no ticks, *vmin* and *vmax* are not used.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:tick_values arg:self arg:vmin arg:vmax arguments arg arg arg Return return:no"
  },
  {
    "library": "kornia",
    "name": "left_jacobian",
    "source_code": "@staticmethod\ndef left_jacobian(vec: Tensor) -> Tensor:\n    R_skew = vector_to_skew_symmetric_matrix(vec)\n    theta = vec.norm(dim=-1, keepdim=True)[..., None]\n    I = eye(3, device=vec.device, dtype=vec.dtype)\n    Jl = I + (1 - theta.cos()) / theta ** 2 * R_skew + (theta - theta.sin()) / theta ** 3 * (R_skew @ R_skew)\n    return Jl",
    "docstring": "Compute the left Jacobian of So3. Args: vec: the input point of shape :math:. Example: >>> vec = torch.tensor([1., 2., 3.]) >>> So3.left_jacobian(vec) tensor([[-0.0687, -0.2267, 0.5074], [ 0.5556, 0.1779, 0.3629], [-0.0141, 0.6236, 0.5890]])",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\so3.py",
    "ast_data": "FunctionDef name:left_jacobian arg:vec arguments arg Assign Call Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "vee",
    "source_code": "@staticmethod\ndef vee(omega: Tensor) -> Tensor:\n    check_so2_matrix_shape(omega)\n    return omega[..., 0, 1]",
    "docstring": "Convert elements from lie algebra to vector space. Returns vector of shape :math:. Args: omega: 2x2-matrix representing lie algebra. Example: >>> v = torch.ones(3) >>> omega = So2.hat(v) >>> So2.vee(omega) tensor([1., 1., 1.])",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\so2.py",
    "ast_data": "FunctionDef name:vee arg:omega arguments arg Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_from_pyval",
    "source_code": "@classmethod\ndef _from_pyval(cls, pyval, typespec, path_so_far):\n    if isinstance(pyval, dict):\n        return cls._from_pydict(pyval, typespec, path_so_far)\n    elif isinstance(pyval, (list, tuple)):\n        keys = set()\n        rank = _pyval_find_struct_keys_and_depth(pyval, keys)\n        if rank is not None:\n            return cls._from_pylist_of_dict(pyval, keys, rank, typespec, path_so_far)\n        else:\n            return cls._from_pylist_of_value(pyval, typespec, path_so_far)\n    else:\n        return cls._from_pyscalar(pyval, typespec, path_so_far)",
    "docstring": "Helper function for from_pyval. Args: pyval: The nested Python structure that should be used to create the new . typespec: A specifying the expected type for each field. If not specified, then all nested dictionaries are turned into StructuredTensors, and all nested lists are turned into Tensors (if rank=2). path_so_far: the path of fields that led here (for error messages). Returns: A .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor.py",
    "ast_data": "FunctionDef name:_from_pyval arg:cls arg:pyval arg:typespec arg:path_so_far arguments arg arg arg arg If Call Return return:yes Call If Call Assign Call Assign Call If Compare Return return:yes Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_is_dates_only",
    "source_code": "@property\ndef _is_dates_only(self) -> bool:\n    if not lib.is_np_dtype(self.dtype):\n        return False\n    values_int = self.asi8\n    consider_values = values_int != iNaT\n    reso = get_unit_from_dtype(self.dtype)\n    ppd = periods_per_day(reso)\n    even_days = np.logical_and(consider_values, values_int % ppd != 0).sum() == 0\n    return even_days",
    "docstring": "Check if we are round times at midnight (and no timezone), which will be given a more compact __repr__ than other cases. For TimedeltaArray we are checking for multiples of 24H.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py",
    "ast_data": "FunctionDef name:_is_dates_only arg:self arguments arg If Call Return return:yes Assign Assign Compare Assign Call Assign Call Assign Compare Call Call Compare Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_release",
    "source_code": "def _release(self, event):\n    if self._active_handle_idx >= 0:\n        if event.button == 3:\n            self._remove_vertex(self._active_handle_idx)\n            self._draw_polygon()\n        self._active_handle_idx = -1\n    elif len(self._xys) > 3 and self._xys[-1] == self._xys[0]:\n        self._selection_completed = True\n        if self._draw_box and self._box is None:\n            self._add_box()\n    elif not self._selection_completed and 'move_all' not in self._state and ('move_vertex' not in self._state):\n        self._xys.insert(-1, self._get_data_coords(event))\n    if self._selection_completed:\n        self.onselect(self.verts)",
    "docstring": "Button release event handler.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:_release arg:self arg:event arguments arg arg If Compare If Compare Call Call Assign If BoolOp Compare Call Compare Assign If BoolOp Compare Call If BoolOp Compare Compare Call Call If Call"
  },
  {
    "library": "tensorflow",
    "name": "load_tensor_from_event_file",
    "source_code": "def load_tensor_from_event_file(event_file_path):\n    event = event_pb2.Event()\n    with gfile.Open(event_file_path, 'rb') as f:\n        event.ParseFromString(f.read())\n        return load_tensor_from_event(event)",
    "docstring": "Load a tensor from an event file. Assumes that the event file contains a protobuf and the protobuf contains a value. Args: event_file_path: () path to the event file. Returns: The tensor value loaded from the event file, as a . For uninitialized Tensors, returns . For Tensors of data types that cannot be converted to (e.g., ), return .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:load_tensor_from_event_file arg:event_file_path arguments arg Assign Call With Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "is_lowp_fp_source",
    "source_code": "def is_lowp_fp_source(node: torch.fx.Node, dt: torch.dtype):\n    assert dt in DTYPE_LOWP_FP\n    return get_output_dtype(node) == dt",
    "docstring": "Check if the given node produces output with expected low precision floating point data type.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp.py",
    "ast_data": "FunctionDef name:is_lowp_fp_source arg:node arg:dt arguments arg arg Compare Return return:yes Compare Call"
  },
  {
    "library": "pandas",
    "name": "default_array_ufunc",
    "source_code": "def default_array_ufunc(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):\n    if not any((x is self for x in inputs)):\n        raise NotImplementedError\n    new_inputs = [x if x is not self else np.asarray(x) for x in inputs]\n    return getattr(ufunc, method)(*new_inputs, **kwargs)",
    "docstring": "Fallback to the behavior we would get if we did not define __array_ufunc__. Notes ----- We are assuming that is among .",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\arraylike.py",
    "ast_data": "FunctionDef name:default_array_ufunc arg:self arg:ufunc arg:method arguments arg arg arg arg arg If Call Compare Raise Assign Compare Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "staged_decision_function",
    "source_code": "def staged_decision_function(self, X):\n    check_is_fitted(self)\n    X = self._check_X(X)\n    n_classes = self.n_classes_\n    classes = self.classes_[:, np.newaxis]\n    pred = None\n    norm = 0.0\n    for weight, estimator in zip(self.estimator_weights_, self.estimators_):\n        norm += weight\n        current_pred = np.where((estimator.predict(X) == classes).T, weight, -1 / (n_classes - 1) * weight)\n        if pred is None:\n            pred = current_pred\n        else:\n            pred += current_pred\n        if n_classes == 2:\n            tmp_pred = np.copy(pred)\n            tmp_pred[:, 0] *= -1\n            yield (tmp_pred / norm).sum(axis=1)\n        else:\n            yield (pred / norm)",
    "docstring": "Compute decision function of `classes_`, respectively.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_weight_boosting.py",
    "ast_data": "FunctionDef name:staged_decision_function arg:self arg:X arguments arg arg Call Assign Call Assign Assign Assign Assign For Call Assign Call Compare Call If Compare Assign If Compare Assign Call Call"
  },
  {
    "library": "numpy",
    "name": "degree",
    "source_code": "def degree(self):\n    return len(self) - 1",
    "docstring": "The degree of the series. Returns ------- degree : int Degree of the series, one less than the number of coefficients. Examples -------- Create a polynomial object for ``: >>> np.polynomial.set_default_printstyle(\"unicode\") >>> poly = np.polynomial.Polynomial([1, 7, 4]) >>> print(poly) 1.0 + 7.0·x + 4.0·x² >>> poly.degree() 2 Note that this method does not check for non-zero coefficients. You must trim the polynomial to remove any trailing zeroes: >>> poly = np.polynomial.Polynomial([1, 7, 0]) >>> print(poly) 1.0 + 7.0·x + 0.0·x² >>> poly.degree() 2 >>> poly.trim().degree() 1",
    "type": "method",
    "file_path": "numpy\\numpy\\polynomial\\_polybase.py",
    "ast_data": "FunctionDef name:degree arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, target, converter_target_spec=None, converter_allow_custom_ops=None, raise_exception=False):\n    functools.update_wrapper(self, target)\n    self._func = target\n    self._obj_func = None\n    self._verified = False\n    self._log_messages = []\n    self._raise_exception = raise_exception\n    self._converter_target_spec = converter_target_spec\n    self._converter_allow_custom_ops = converter_allow_custom_ops",
    "docstring": "Initialize the decorator object. Here is the description of the object variables. - _func : decorated function. - _obj_func : for class object, we need to use this object to provide instance as 1 first argument. - _verified : whether the compatibility is checked or not. Args: target: decorated function. converter_target_spec : target_spec of TFLite converter parameter. converter_allow_custom_ops : allow_custom_ops of TFLite converter parameter. raise_exception : to raise an exception on compatibility issues. User need to use get_compatibility_log() to check details.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\authoring\\authoring.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:target arg:converter_target_spec arg:converter_allow_custom_ops arg:raise_exception arguments arg arg arg arg arg Call Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "_attach_meta_to_node_if_not_exist",
    "source_code": "def _attach_meta_to_node_if_not_exist(model: GraphModule) -> None:\n    for node in model.graph.nodes:\n        if not hasattr(node, 'meta'):\n            node.meta = {}",
    "docstring": "Attach meta field to all nodes of the graph if it does not exist, meta field is a field stores some meta information about the node, such as dtype and shape information for output of the node, this only exists if the program is captured by make_fx (used in quantize_pt2e flow), if the program is captured by torch.fx symbolic tracing, this field may not exist, so we add it here to avoid checking this all over the places",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantize_fx.py",
    "ast_data": "FunctionDef name:_attach_meta_to_node_if_not_exist arg:model arguments arg For If Call Assign"
  },
  {
    "library": "pytorch",
    "name": "info_dict",
    "source_code": "def info_dict(self) -> dict[str, Union[PrimitiveInfoType, list[PrimitiveInfoType]]]:\n    return self.log_info",
    "docstring": "Information returned here is logged to the autotune log file when that is enabled.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\select_algorithm.py",
    "ast_data": "FunctionDef name:info_dict arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "unpacked_to_numpy",
    "source_code": "def unpacked_to_numpy(unpacked: List[TensorLike], layout: layout_lib.Layout) -> np.ndarray:\n    if len(unpacked) != len(layout.offset_to_shard()):\n        raise ValueError('Wrong number of component Tensors.')\n    unravelled = np.ndarray([layout.num_shards(i) for i in range(layout.rank)], dtype=object)\n    for offset, loc in enumerate(layout.offset_to_shard()):\n        unravelled[loc] = unpacked[offset]\n    concat_tensor = np.block(unravelled.tolist())\n    while concat_tensor.ndim > unpacked[0].ndim:\n        concat_tensor = np.squeeze(concat_tensor, axis=0)\n    return concat_tensor",
    "docstring": "Heals local Tensor components to a numpy array.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\numpy_util.py",
    "ast_data": "FunctionDef name:unpacked_to_numpy arg:unpacked arg:layout arguments arg arg If Compare Call Call Call Raise Call Assign Call Call Call For Call Call Assign Assign Call Call While Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_tf_tensor_len",
    "source_code": "def _tf_tensor_len(s):\n    if s.shape.ndims and s.shape.dims[0].value is not None:\n        return s.shape.dims[0].value\n    shape = array_ops.shape(s)\n    assert shape.shape, 'shape tensor of zero size? {}'.format(shape)\n    if shape.shape[0] == 0:\n        raise ValueError('len requires a non-scalar tensor, got one of shape {}'.format(shape))\n    if shape.shape.dims[0].value is not None:\n        return array_ops.shape(s)[0]\n    rank = array_ops.rank(s)\n\n    def raise_zero_rank_error():\n        msg = gen_string_ops.string_join(['len requires non-zero rank, got ', gen_string_ops.as_string(rank)])\n        with ops.control_dependencies([control_flow_assert.Assert(False, [msg])]):\n            return constant_op.constant(0, dtype=dtypes.int32)\n    return cond.cond(rank > 0, lambda: array_ops.shape(s)[0], raise_zero_rank_error)",
    "docstring": "Overload of len_ for Tensor arguments.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\py_builtins.py",
    "ast_data": "FunctionDef name:_tf_tensor_len arg:s arguments arg If BoolOp Compare Return return:yes Assign Call Call If Compare Raise Call Call If Compare Return return:yes Call Assign Call FunctionDef name:raise_zero_rank_error arguments Assign Call Call With Call Call Return return:yes Call Return return:yes Call Compare arguments Call"
  },
  {
    "library": "sphinx",
    "name": "parse_comments",
    "source_code": "def parse_comments(self) -> None:\n    tree = ast.parse(self.code, type_comments=True)\n    picker = VariableCommentPicker(self.code.splitlines(True), self.encoding)\n    picker.visit(tree)\n    self.annotations = picker.annotations\n    self.comments = picker.comments\n    self.deforders = picker.deforders\n    self.finals = picker.finals\n    self.overloads = picker.overloads",
    "docstring": "Parse the code and pick up comments.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\pycode\\parser.py",
    "ast_data": "FunctionDef name:parse_comments arg:self arguments arg Assign Call Assign Call Call Call Assign Assign Assign Assign Assign"
  },
  {
    "library": "django",
    "name": "mutate_state",
    "source_code": "def mutate_state(self, project_state, preserve=True):\n    new_state = project_state\n    if preserve:\n        new_state = project_state.clone()\n    for operation in self.operations:\n        operation.state_forwards(self.app_label, new_state)\n    return new_state",
    "docstring": "Take a ProjectState and return a new one with the migration's operations applied to it. Preserve the original object state by default and return a mutated state from a copy.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\migration.py",
    "ast_data": "FunctionDef name:mutate_state arg:self arg:project_state arg:preserve arguments arg arg arg Assign If Assign Call For Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "export_to_dot",
    "source_code": "def export_to_dot(self, best_model, df, feature_columns):\n    dot_str = best_model.to_dot()\n    with open('best_model.dot', 'w') as f:\n        f.write(dot_str)",
    "docstring": "Export a learned decision tree to a dot file.",
    "type": "method",
    "file_path": "pytorch\\torchgen\\_autoheuristic\\train_decision.py",
    "ast_data": "FunctionDef name:export_to_dot arg:self arg:best_model arg:df arg:feature_columns arguments arg arg arg arg Assign Call With Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "update_split_subclusters",
    "source_code": "def update_split_subclusters(self, subcluster, new_subcluster1, new_subcluster2):\n    ind = self.subclusters_.index(subcluster)\n    self.subclusters_[ind] = new_subcluster1\n    self.init_centroids_[ind] = new_subcluster1.centroid_\n    self.init_sq_norm_[ind] = new_subcluster1.sq_norm_\n    self.append_subcluster(new_subcluster2)",
    "docstring": "Remove a subcluster from a node and update it with the split subclusters.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_birch.py",
    "ast_data": "FunctionDef name:update_split_subclusters arg:self arg:subcluster arg:new_subcluster1 arg:new_subcluster2 arguments arg arg arg arg Assign Call Assign Assign Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_record_and_ignore_transient_ps_failure",
    "source_code": "def _record_and_ignore_transient_ps_failure(self, e):\n    if self._transient_ps_failures_threshold <= 0 or not _is_ps_failure(e):\n        return False\n    ps_tasks = _extract_failed_ps_instances(str(e))\n    with self._potential_ps_failures_lock:\n        for t in ps_tasks:\n            self._potential_ps_failures_count[t] += 1\n            if self._potential_ps_failures_count[t] >= self._transient_ps_failures_threshold:\n                return False\n    return True",
    "docstring": "Records potential PS failures and return if failure should be ignored.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py",
    "ast_data": "FunctionDef name:_record_and_ignore_transient_ps_failure arg:self arg:e arguments arg arg If BoolOp Compare Call Return return:yes Assign Call Call With For If Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "replace_metric_functions",
    "source_code": "def replace_metric_functions(child_layer, serialized_fns):\n    original_fns[child_layer] = {'__call__': child_layer.__call__, 'result': child_layer.result, 'update_state': child_layer.update_state}\n    with utils.no_automatic_dependency_tracking_scope(child_layer):\n        child_layer.__call__ = serialized_fns['__call__']\n        child_layer.result = serialized_fns['result']\n        child_layer.update_state = serialized_fns['update_state']",
    "docstring": "Replaces metric functions with wrapped functions.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\save_impl.py",
    "ast_data": "FunctionDef name:replace_metric_functions arg:child_layer arg:serialized_fns arguments arg arg Assign With Call Assign Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "_add_solids",
    "source_code": "def _add_solids(self, X, Y, C):\n    if self.solids is not None:\n        self.solids.remove()\n    for solid in self.solids_patches:\n        solid.remove()\n    mappable = getattr(self, 'mappable', None)\n    if isinstance(mappable, contour.ContourSet) and any((hatch is not None for hatch in mappable.hatches)):\n        self._add_solids_patches(X, Y, C, mappable)\n    else:\n        self.solids = self.ax.pcolormesh(X, Y, C, cmap=self.cmap, norm=self.norm, alpha=self.alpha, edgecolors='none', shading='flat')\n        if not self.drawedges:\n            if len(self._y) >= self.n_rasterize:\n                self.solids.set_rasterized(True)\n    self._update_dividers()",
    "docstring": "Draw the colors; optionally add separators.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colorbar.py",
    "ast_data": "FunctionDef name:_add_solids arg:self arg:X arg:Y arg:C arguments arg arg arg arg If Compare Call For Call Assign Call If BoolOp Call Call Compare Call Assign Call If If Compare Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "PruningMode",
    "source_code": "class PruningMode:\n    NO_PRUNING, PRE_PRUNING, POST_PRUNING = range(0, 3)\n    _map = {'none': NO_PRUNING, 'pre': PRE_PRUNING, 'post': POST_PRUNING}\n\n    @classmethod\n    def from_str(cls, mode):\n        if mode in cls._map:\n            return cls._map[mode]\n        else:\n            raise ValueError('pruning_mode mode must be one of: {}. Found: {}'.format(', '.join(sorted(cls._map)), mode))",
    "docstring": "Class for working with Pruning modes.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\boosted_trees_ops.py",
    "ast_data": "ClassDef name:PruningMode Assign Call Assign FunctionDef name:from_str arg:cls arg:mode arguments arg arg If Compare Return return:yes Raise Call Call Call Call"
  },
  {
    "library": "kornia",
    "name": "so2",
    "source_code": "@property\ndef so2(self) -> So2:\n    return self._rotation",
    "docstring": "Return the underlying .",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\se2.py",
    "ast_data": "FunctionDef name:so2 arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "join",
    "source_code": "def join(self) -> None:\n    self._server.join()",
    "docstring": "Blocks until the server has shut down. This is useful when starting a dedicated dispatch process. Raises: tf.errors.OpError: Or one of its subclasses if an error occurs while joining the server.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\service\\server_lib.py",
    "ast_data": "FunctionDef name:join arg:self arguments arg Call"
  },
  {
    "library": "pandas",
    "name": "make_empty",
    "source_code": "def make_empty(self, axes=None) -> Self:\n    if axes is None:\n        axes = [default_index(0)] + self.axes[1:]\n    if self.ndim == 1:\n        assert isinstance(self, SingleBlockManager)\n        blk = self.blocks[0]\n        arr = blk.values[:0]\n        bp = BlockPlacement(slice(0, 0))\n        nb = blk.make_block_same_class(arr, placement=bp)\n        blocks = [nb]\n    else:\n        blocks = []\n    return type(self).from_blocks(blocks, axes)",
    "docstring": "return an empty BlockManager with the items axis of len 0",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:make_empty arg:self arg:axes arguments arg arg If Compare Assign Call If Compare Call Assign Assign Assign Call Call Assign Call Assign Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "AbstractRNNCell",
    "source_code": "class AbstractRNNCell(Layer):\n\n    def call(self, inputs, states):\n        raise NotImplementedError('Abstract method')\n\n    @property\n    def state_size(self):\n        raise NotImplementedError('Abstract method')\n\n    @property\n    def output_size(self):\n        raise NotImplementedError('Abstract method')\n\n    def get_initial_state(self, inputs=None, batch_size=None, dtype=None):\n        return _generate_zero_filled_state_for_cell(self, inputs, batch_size, dtype)",
    "docstring": "Abstract object representing an RNN cell. See [the Keras RNN API guide]( for details about the usage of RNN API. This is the base class for implementing RNN cells with custom behavior. Every must have the properties below and implement with the signature . Examples: This definition of cell differs from the definition used in the literature. In the literature, 'cell' refers to an object with a single scalar output. This definition refers to a horizontal array of such units. An RNN cell, in the most abstract setting, is anything that has a state and performs some operation that takes a matrix of inputs. This operation results in an output matrix with columns. If is an integer, this operation also results in a new state matrix with columns. If is a (possibly nested tuple of) TensorShape object(s), then it should return a matching structure of Tensors having shape for each in .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\recurrent.py",
    "ast_data": "ClassDef name:AbstractRNNCell FunctionDef name:call arg:self arg:inputs arg:states arguments arg arg arg Raise Call FunctionDef name:state_size arg:self arguments arg Raise Call FunctionDef name:output_size arg:self arguments arg Raise Call FunctionDef name:get_initial_state arg:self arg:inputs arg:batch_size arg:dtype arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "generate_decl_pxd",
    "source_code": "def generate_decl_pxd(name, return_type, argnames, argtypes):\n    args = ', '.join([' *'.join(arg) for arg in zip(argtypes, argnames)])\n    return f'cdef {return_type} {name}({args}) noexcept nogil\\n'",
    "docstring": "Create Cython header declaration for BLAS/LAPACK function.",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_generate_pyx.py",
    "ast_data": "FunctionDef name:generate_decl_pxd arg:name arg:return_type arg:argnames arg:argtypes arguments arg arg arg arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "var",
    "source_code": "def var(self, m, n):\n    M, m, n, _, _, mncond = self._process_parameters(m, n)\n    if m.size != 0:\n        M, n = (M[..., np.newaxis], n[..., np.newaxis])\n    cond = (M == 0) & (M - 1 == 0)\n    M = np.ma.masked_array(M, mask=cond)\n    output = n * m / M * (M - m) / M * (M - n) / (M - 1)\n    if m.size != 0:\n        mncond = mncond[..., np.newaxis] | np.zeros(output.shape, dtype=np.bool_)\n    return self._checkresult(output, mncond, np.nan)",
    "docstring": "Variance of the multivariate hypergeometric distribution. Parameters ---------- %(_doc_default_callparams)s Returns ------- array_like The variances of the components of the distribution. This is the diagonal of the covariance matrix of the distribution",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:var arg:self arg:m arg:n arguments arg arg arg Assign Call If Compare Assign Assign Compare Compare Assign Call Assign If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "modify_mesh",
    "source_code": "def modify_mesh(x, insert_1, insert_2):\n    return np.sort(np.hstack((x, 0.5 * (x[insert_1] + x[insert_1 + 1]), (2 * x[insert_2] + x[insert_2 + 1]) / 3, (x[insert_2] + 2 * x[insert_2 + 1]) / 3)))",
    "docstring": "Insert nodes into a mesh. Nodes removal logic is not established, its impact on the solver is presumably negligible. So, only insertion is done in this function. Parameters ---------- x : ndarray, shape (m,) Mesh nodes. insert_1 : ndarray Intervals to each insert 1 new node in the middle. insert_2 : ndarray Intervals to each insert 2 new nodes, such that divide an interval into 3 equal parts. Returns ------- x_new : ndarray New mesh nodes. Notes ----- and should not have common values.",
    "type": "function",
    "file_path": "scipy\\scipy\\integrate\\_bvp.py",
    "ast_data": "FunctionDef name:modify_mesh arg:x arg:insert_1 arg:insert_2 arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "bisect",
    "source_code": "def bisect(self, num_attempts: int=100, p: float=0.5) -> list[ConfigType]:\n    print(f'Starting random testing with bisection, seed {self.seed}, and p {p}')\n    random.seed(self.seed)\n    self._reset_configs()\n    results = ResultType()\n    ret: list[ConfigType] = []\n    for attempt in range(num_attempts):\n        print(f'Random attempt {attempt + 1}/{num_attempts}')\n        config = self.new_config()\n        for field_name, config_entry in self.fields.items():\n            if field_name not in config and (not field_name.startswith('_')) and ('TESTING_ONLY' not in field_name) and (random.random() < p):\n                value = self.sample(field_name, config_entry.value_type, config_entry.default)\n                config[field_name] = value\n        status = self.test_config(results, config)\n        if status not in OrderedSet([Status.PASSED, Status.SKIPPED]):\n            if (minimal_failing_config := self._bisect_failing_config(results, config)):\n                print(f'Minimum failing config: {minimal_failing_config}')\n                ret.append(minimal_failing_config)\n    return ret",
    "docstring": "Test configs and bisect to minimal failing configuration.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\fuzzer.py",
    "ast_data": "FunctionDef name:bisect arg:self arg:num_attempts arg:p arguments arg arg arg Call Call Call Assign Call For Call Call Assign Call For Call If BoolOp Compare Call Compare Compare Call Assign Call Assign Assign Call If Compare Call If Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "__repr__",
    "source_code": "def __repr__(self):\n    class_name = self.__class__.__name__\n    parameters = list(self._original_parameters.items())\n    info = []\n    with np.printoptions(threshold=10):\n        str_parameters = [f'{symbol}={repr(value)}' for symbol, value in parameters]\n    str_parameters = f'{', '.join(str_parameters)}'\n    info.append(str_parameters)\n    return f'{class_name}({', '.join(info)})'",
    "docstring": "Returns a string representation of the distribution. Includes the name of the distribution family, the names of the parameters and the of each of their values.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distribution_infrastructure.py",
    "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Assign Assign Call Call Assign With Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_as_spec_tuple",
    "source_code": "def _as_spec_tuple(slice_spec):\n    if isinstance(slice_spec, (list, tuple)) and (not isinstance(slice_spec, np.ndarray)):\n        is_index = True\n        for s in slice_spec:\n            if s is None or s is Ellipsis or isinstance(s, (list, tuple, slice)):\n                is_index = False\n                break\n            elif isinstance(s, (np_arrays.ndarray, np.ndarray)) and s.ndim != 0:\n                is_index = False\n                break\n        if not is_index:\n            return tuple(slice_spec)\n    return (slice_spec,)",
    "docstring": "Convert slice_spec to tuple.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_array_ops.py",
    "ast_data": "FunctionDef name:_as_spec_tuple arg:slice_spec arguments arg If BoolOp Call Call Assign For If BoolOp Compare Compare Call Assign If BoolOp Call Compare Assign If Return return:yes Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "emit_firstresult",
    "source_code": "def emit_firstresult(self, event: str, *args: Any, allowed_exceptions: tuple[type[Exception], ...]=()) -> Any:\n    return self.events.emit_firstresult(event, *args, allowed_exceptions=allowed_exceptions)",
    "docstring": "Emit *event* and pass *arguments* to the callback functions. Return the result of the first callback that doesn't return ``. :param event: The name of event that will be emitted :param args: The arguments for the event :param allowed_exceptions: The list of exceptions that are allowed in the callbacks .. versionadded:: 0.5 .. versionchanged:: 3.1 Added *allowed_exceptions* to specify path-through exceptions",
    "type": "method",
    "file_path": "sphinx\\sphinx\\application.py",
    "ast_data": "FunctionDef name:emit_firstresult arg:self arg:event arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "enable",
    "source_code": "def enable(val: bool=True) -> None:\n    torch._C._cuda_tunableop_enable(val)",
    "docstring": "This is the big on/off switch for all TunableOp implementations.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\tunable.py",
    "ast_data": "FunctionDef name:enable arg:val arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "start",
    "source_code": "def start(self):\n    if self._registered and (not self._execution_trace_running):\n        _enable_execution_trace_observer()\n        self._execution_trace_running = True\n        self._record_pg_config()",
    "docstring": "Starts to capture.",
    "type": "method",
    "file_path": "pytorch\\torch\\profiler\\profiler.py",
    "ast_data": "FunctionDef name:start arg:self arguments arg If BoolOp Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "get_v2_names",
    "source_code": "def get_v2_names(symbol: Any) -> Sequence[str]:\n    names_v2 = []\n    tensorflow_api_attr = API_ATTRS[TENSORFLOW_API_NAME].names\n    keras_api_attr = API_ATTRS[KERAS_API_NAME].names\n    if not hasattr(symbol, '__dict__'):\n        return names_v2\n    if tensorflow_api_attr in symbol.__dict__:\n        names_v2.extend(getattr(symbol, tensorflow_api_attr))\n    if keras_api_attr in symbol.__dict__:\n        names_v2.extend(getattr(symbol, keras_api_attr))\n    return names_v2",
    "docstring": "Get a list of TF 2.0 names for this symbol. Args: symbol: symbol to get API names for. Returns: List of all API names for this symbol.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\tf_export.py",
    "ast_data": "FunctionDef name:get_v2_names arg:symbol arguments arg Assign Assign Assign If Call Return return:yes If Compare Call Call If Compare Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "disable",
    "source_code": "def disable(fn=None, recursive=True, *, reason=None):\n    import torch._dynamo\n    return torch._dynamo.disable(fn, recursive, reason=reason)",
    "docstring": "This function provides a decorator to disable compilation on a function. It also provides the option of recursively disabling called functions. Args: fn (optional): The function to disable recursive (optional): A boolean value indicating whether the disabling should be recursive. reason (optional): A string value indicating the reason for disabling the function.",
    "type": "function",
    "file_path": "pytorch\\torch\\compiler\\__init__.py",
    "ast_data": "FunctionDef name:disable arg:fn arg:recursive arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "repeat",
    "source_code": "def repeat(self, count=None, name=None) -> 'DatasetV2':\n    from tensorflow.python.data.ops import repeat_op\n    return repeat_op._repeat(self, count, name)",
    "docstring": "Repeats this dataset so each original value is seen times. >>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3]) >>> dataset = dataset.repeat(3) >>> [a.item() for a in dataset.as_numpy_iterator()] [1, 2, 3, 1, 2, 3, 1, 2, 3] Note: If the input dataset depends on global state (e.g. a random number generator) or its output is non-deterministic (e.g. because of upstream ), then different repetitions may produce different elements. Args: count: (Optional.) A scalar , representing the number of times the dataset should be repeated. The default behavior (if is or ) is for the dataset be repeated indefinitely. name: (Optional.) A name for the tf.data operation. Returns: A new with the transformation applied as described above.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:repeat arg:self arg:count arg:name arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "autoscale_None",
    "source_code": "def autoscale_None(self):\n    self._colorizer.autoscale_None(self._A)",
    "docstring": "Autoscale the scalar limits on the norm instance using the current array, changing only limits that are None",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colorizer.py",
    "ast_data": "FunctionDef name:autoscale_None arg:self arguments arg Call"
  },
  {
    "library": "scikit-learn",
    "name": "decision_function",
    "source_code": "def decision_function(self, X):\n    check_is_fitted(self, 'coef_')\n    X = validate_data(self, X, accept_sparse='csr', reset=False)\n    decisions = safe_sparse_dot(X, self.coef_.T, dense_output=True) - self.offset_\n    return decisions.ravel()",
    "docstring": "Signed distance to the separating hyperplane. Signed distance is positive for an inlier and negative for an outlier. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Testing data. Returns ------- dec : array-like, shape (n_samples,) Decision function values of the samples.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_stochastic_gradient.py",
    "ast_data": "FunctionDef name:decision_function arg:self arg:X arguments arg arg Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_iterate_sparse_X",
    "source_code": "def _iterate_sparse_X(X):\n    n_samples = X.shape[0]\n    X_indices = X.indices\n    X_data = X.data\n    X_indptr = X.indptr\n    for i in range(n_samples):\n        row = np.zeros(X.shape[1])\n        startptr, endptr = (X_indptr[i], X_indptr[i + 1])\n        nonzero_indices = X_indices[startptr:endptr]\n        row[nonzero_indices] = X_data[startptr:endptr]\n        yield row",
    "docstring": "This little hack returns a densified row when iterating over a sparse matrix, instead of constructing a sparse matrix for every row that is expensive.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\cluster\\_birch.py",
    "ast_data": "FunctionDef name:_iterate_sparse_X arg:X arguments arg Assign Assign Assign Assign For Call Assign Call Assign Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "release_zoom",
    "source_code": "def release_zoom(self, event):\n    if self._zoom_info is None:\n        return\n    self.canvas.mpl_disconnect(self._zoom_info.cid)\n    self.remove_rubberband()\n    start_x, start_y = self._zoom_info.start_xy\n    direction = 'in' if self._zoom_info.button == 1 else 'out'\n    key = event.key\n    if self._zoom_info.cbar == 'horizontal':\n        key = 'x'\n    elif self._zoom_info.cbar == 'vertical':\n        key = 'y'\n    if abs(event.x - start_x) < 5 and key != 'y' or (abs(event.y - start_y) < 5 and key != 'x'):\n        self._cleanup_post_zoom()\n        return\n    for i, ax in enumerate(self._zoom_info.axes):\n        twinx = any((ax.get_shared_x_axes().joined(ax, prev) for prev in self._zoom_info.axes[:i]))\n        twiny = any((ax.get_shared_y_axes().joined(ax, prev) for prev in self._zoom_info.axes[:i]))\n        ax._set_view_from_bbox((start_x, start_y, event.x, event.y), direction, key, twinx, twiny)\n    self._cleanup_post_zoom()\n    self.push_current()",
    "docstring": "Callback for mouse button release in zoom to rect mode.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:release_zoom arg:self arg:event arguments arg arg If Compare Return return:no Call Call Assign Assign Compare Assign If Compare Assign If Compare Assign If BoolOp BoolOp Compare Call Compare BoolOp Compare Call Compare Call Return return:no For Call Assign Call Call Call Assign Call Call Call Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "on_doctree_read",
    "source_code": "def on_doctree_read(app: Sphinx, doctree: nodes.document) -> None:\n    duration = time.monotonic() - app.env.current_document.reading_started_at\n    domain = app.env.domains['duration']\n    domain.note_reading_duration(duration)",
    "docstring": "Record a reading duration.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\ext\\duration.py",
    "ast_data": "FunctionDef name:on_doctree_read arg:app arg:doctree arguments arg arg Assign Call Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, minor=False, *, nbins='auto'):\n    self._minor = minor\n    super().__init__(nbins=nbins, steps=[1, 2, 5, 10])",
    "docstring": "Parameters ---------- nbins : int or 'auto', optional Number of ticks. Only used if minor is False. minor : bool, default: False Indicate if this locator is for minor ticks or not.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:minor arguments arg arg arg Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_clean_labels_out_of_range",
    "source_code": "def _clean_labels_out_of_range():\n    if _labels_is_sparse():\n        return type(labels)(indices=labels.indices, values=_clean_out_of_range(labels.values), dense_shape=labels.dense_shape)\n    else:\n        return _clean_out_of_range(labels)",
    "docstring": "Replaces by -1 ane large out-of-range values in .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\metrics_impl.py",
    "ast_data": "FunctionDef name:_clean_labels_out_of_range arguments If Call Return return:yes Call Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "auto_set_font_size",
    "source_code": "def auto_set_font_size(self, renderer):\n    fontsize = self.get_fontsize()\n    required = self.get_required_width(renderer)\n    while fontsize > 1 and required > self.get_width():\n        fontsize -= 1\n        self.set_fontsize(fontsize)\n        required = self.get_required_width(renderer)\n    return fontsize",
    "docstring": "Shrink font size until the text fits into the cell width.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\table.py",
    "ast_data": "FunctionDef name:auto_set_font_size arg:self arg:renderer arguments arg arg Assign Call Assign Call While BoolOp Compare Compare Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "num_buckets",
    "source_code": "@property\ndef num_buckets(self):\n    return self.vocabulary_size + self.num_oov_buckets",
    "docstring": "Returns number of buckets in this sparse feature.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:num_buckets arg:self arguments arg Return return:yes"
  },
  {
    "library": "uvicorn",
    "name": "resume_writing",
    "source_code": "def resume_writing(self) -> None:\n    self.writable.set()",
    "docstring": "Called by the transport when the write buffer drops below the low water mark.",
    "type": "method",
    "file_path": "uvicorn\\uvicorn\\protocols\\websockets\\wsproto_impl.py",
    "ast_data": "FunctionDef name:resume_writing arg:self arguments arg Call"
  },
  {
    "library": "django",
    "name": "ExtractIsoYear",
    "source_code": "class ExtractIsoYear(Extract):\n    lookup_name = 'iso_year'",
    "docstring": "Return the ISO-8601 week-numbering year.",
    "type": "class",
    "file_path": "django\\django\\db\\models\\functions\\datetime.py",
    "ast_data": "ClassDef name:ExtractIsoYear Assign"
  },
  {
    "library": "pytorch",
    "name": "is_torchelastic_launched",
    "source_code": "def is_torchelastic_launched() -> bool:\n    return os.getenv('TORCHELASTIC_RUN_ID') is not None",
    "docstring": "Check whether this process was launched with `` maps to the rendezvous id which is always a non-null value indicating the job id for peer discovery purposes..",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:is_torchelastic_launched arguments Return return:yes Compare Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "@_api.delete_parameter('3.10', 'keep_empty', addendum='This parameter does nothing.')\ndef __init__(self, filename, keep_empty=None, metadata=None):\n    self._filename = filename\n    self._metadata = metadata\n    self._file = None",
    "docstring": "Create a new PdfPages object. Parameters ---------- filename : str or path-like or file-like Plots using will be written to a file at this location. The file is opened when a figure is saved for the first time (overwriting any older file with the same name). metadata : dict, optional Information dictionary object (see PDF reference section 10.2.1 'Document Information Dictionary'), e.g.: `None`.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:filename arg:keep_empty arg:metadata arguments arg arg arg arg Assign Assign Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "ceil",
    "source_code": "@tf_export('math.ceil', v1=['math.ceil', 'ceil'])\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints('ceil')\ndef ceil(x, name=None):\n    return gen_math_ops.ceil(x, name)",
    "docstring": "Return the ceiling of the input, element-wise. For example: >>> tf.math.ceil([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) Args: x: A . Must be one of the following types: , , , . name: A name for the operation (optional). Returns: A . Has the same type as . @compatibility(numpy) Equivalent to np.ceil @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:ceil arg:x arg:name arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "__torch_function__",
    "source_code": "@classmethod\ndef __torch_function__(cls, func, types, args=(), kwargs=None):\n    if kwargs is None:\n        kwargs = {}\n    if func is torch.nn.functional.scaled_dot_product_attention:\n        return cls._dispatch(*args, **kwargs)\n    return super().__torch_function__(func, types, args, kwargs)",
    "docstring": "Defines the behavior of torch.nn.functional.scaled_dot_product_attention when the attn_bias is an AttnBias",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\attention\\bias.py",
    "ast_data": "FunctionDef name:__torch_function__ arg:cls arg:func arg:types arg:args arg:kwargs arguments arg arg arg arg arg If Compare Assign If Compare Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "add_post_transform",
    "source_code": "def add_post_transform(self, transform: type[Transform]) -> None:\n    self.registry.add_post_transform(transform)",
    "docstring": "Register a Docutils transform to be applied before writing. Add the standard docutils :class: subclass *transform* to the list of transforms that are applied before Sphinx writes a document. :param transform: A transform class",
    "type": "method",
    "file_path": "sphinx\\sphinx\\application.py",
    "ast_data": "FunctionDef name:add_post_transform arg:self arg:transform arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_prepare_feed_values",
    "source_code": "def _prepare_feed_values(model, inputs, targets, sample_weights, mode):\n    if model._distribution_strategy:\n        if isinstance(inputs, (data_types.DatasetV1, data_types.DatasetV2)):\n            inputs = distributed_training_utils_v1.get_iterator(inputs, model._distribution_strategy)\n\n        def get_distributed_inputs():\n            return distributed_training_utils_v1._prepare_feed_values(model, inputs, targets, sample_weights, mode)\n        if context.executing_eagerly():\n            return get_distributed_inputs\n        else:\n            return get_distributed_inputs()\n    if isinstance(inputs, (data_types.DatasetV1, data_types.DatasetV2, iterator_ops.Iterator)):\n        inputs, targets, sample_weights = model._standardize_user_data(inputs, extract_tensors_from_dataset=True)\n    inputs = training_utils_v1.ModelInputs(inputs).as_list()\n    targets = list(targets or [])\n    sample_weights = list(sample_weights or [])\n    ins = inputs + targets + sample_weights\n    if mode == ModeKeys.TRAIN and (not isinstance(backend.symbolic_learning_phase(), int)):\n        ins += [True]\n    return ins",
    "docstring": "Prepare feed values to the model execution function. Args: model: Model to prepare feed values for. inputs: List or dict of model inputs. targets: Optional list of model targets. sample_weights: Optional list of sample weight arrays. mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT. Returns: Feed values for the model in the given mode.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_arrays_v1.py",
    "ast_data": "FunctionDef name:_prepare_feed_values arg:model arg:inputs arg:targets arg:sample_weights arg:mode arguments arg arg arg arg arg If If Call Assign Call FunctionDef name:get_distributed_inputs arguments Return return:yes Call If Call Return return:yes Return return:yes Call If Call Assign Call Assign Call Call Assign Call BoolOp Assign Call BoolOp Assign If BoolOp Compare Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "empty",
    "source_code": "def empty(*size: Any, dtype: Optional[_dtype]=None, device: Optional[_device]=None) -> torch.Tensor:\n    if len(size) == 1 and isinstance(size[0], Sequence):\n        size = tuple(size[0])\n    else:\n        size = tuple(size)\n    if dtype is None:\n        dtype = torch.get_default_dtype()\n    if device is None:\n        device = torch.get_default_device()\n    return _SymmetricMemory.empty_strided_p2p(size=size, stride=torch._prims_common.make_contiguous_strides_for(size), dtype=dtype, device=torch.device(device))",
    "docstring": "empty(*size, *, dtype=None, device=None) -> Tensor Similar to :func:. The returned tensor can be used by :func: to establish a symmetric memory tensor among participating processes. Args: size (int...): a sequence of integers defining the shape of the output tensor. Can be a variable number of arguments or a collection like a list or tuple. Keyword args: dtype (:class:, optional): the desired data type of returned tensor. Default: if `torch.set_default_dtypetorch.devicetorch.set_default_devicedevice` will be the CPU for CPU tensor types and the current CUDA device for CUDA tensor types.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_symmetric_memory\\__init__.py",
    "ast_data": "FunctionDef name:empty arguments arg arg arg If BoolOp Compare Call Call Assign Call Assign Call If Compare Assign Call If Compare Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "add_to_set",
    "source_code": "@staticmethod\ndef add_to_set(event_name: str, log_level: CompileEventLogLevel, key: str, value: Any):\n    chromium_log = get_chromium_event_logger()\n    if log_level == CompileEventLogLevel.CHROMIUM or log_level == CompileEventLogLevel.PT2_COMPILE:\n        chromium_log.add_to_set(event_name, key, value)\n    else:\n        assert log_level == CompileEventLogLevel.COMPILATION_METRIC\n        top_event = chromium_log.get_outermost_event()\n        if event_name != top_event:\n            raise RuntimeError(\"Log level is COMPILATION_METRIC, but event_name isn't the toplevel event. CompilationMetrics must be logged to the toplevel event. Consider using `add_to_set_metric` directly.\")\n        metrics_context = get_metrics_context()\n        if not metrics_context.in_progress():\n            raise RuntimeError('No metrics context is in progress. Please only call this function within a metrics context/dynamo_timed.')\n        metrics_context.add_to_set(key, value)\n        chromium_log.add_to_set(event_name, key, value)",
    "docstring": "Add metadata to a set of values with key . Creates a set if it doesn't exist.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\utils.py",
    "ast_data": "FunctionDef name:add_to_set arg:event_name arg:log_level arg:key arg:value arguments arg arg arg arg Assign Call If BoolOp Compare Compare Call Compare Assign Call If Compare Raise Call Assign Call If Call Raise Call Call Call"
  },
  {
    "library": "numpy",
    "name": "_hist_bin_auto",
    "source_code": "def _hist_bin_auto(x, range):\n    fd_bw = _hist_bin_fd(x, range)\n    sturges_bw = _hist_bin_sturges(x, range)\n    sqrt_bw = _hist_bin_sqrt(x, range)\n    fd_bw_corrected = max(fd_bw, sqrt_bw / 2)\n    return min(fd_bw_corrected, sturges_bw)",
    "docstring": "Histogram bin estimator that uses the minimum width of a relaxed Freedman-Diaconis and Sturges estimators if the FD bin width does not result in a large number of bins. The relaxed Freedman-Diaconis estimator limits the bin width to half the sqrt estimated to avoid small bins. The FD estimator is usually the most robust method, but its width estimate tends to be too large for small and bad for data with limited variance. The Sturges estimator is quite good for small (<1000) datasets and is the default in the R language. This method gives good off-the-shelf behaviour. Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. range : Tuple with range for the histogram Returns ------- h : An estimate of the optimal bin width for the given data. See Also -------- _hist_bin_fd, _hist_bin_sturges",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_histograms_impl.py",
    "ast_data": "FunctionDef name:_hist_bin_auto arg:x arg:range arguments arg arg Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_list_of_hosts",
    "source_code": "def get_list_of_hosts(strategy: tpu_strategy.TPUStrategy) -> List[Text]:\n    list_of_hosts = []\n    for tpu_device in _sort_device_spec_strings(strategy.extended.worker_devices):\n        host = device_util.get_host_for_device(tpu_device)\n        if host not in list_of_hosts:\n            list_of_hosts.append(host)\n    assert len(list_of_hosts) == strategy.extended.num_hosts\n    return list_of_hosts",
    "docstring": "Returns a sorted list of CPU devices for the remote jobs. Args: strategy: A TPUStrategy object. Returns: A sorted list of device host strings.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2_utils.py",
    "ast_data": "FunctionDef name:get_list_of_hosts arg:strategy arguments arg Assign For Call Assign Call If Compare Call Compare Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "force_recompile",
    "source_code": "@staticmethod\ndef force_recompile(gm):\n    if isinstance(gm, _LazyGraphModule):\n        gm.real_recompile()",
    "docstring": "Sometimes we need force a recompile as a workaround - we want to do the real recompilation before symbolic_trace to avoid error:",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\_lazy_graph_module.py",
    "ast_data": "FunctionDef name:force_recompile arg:gm arguments arg If Call Call"
  },
  {
    "library": "matplotlib",
    "name": "remove",
    "source_code": "def remove(self):\n    for artist in self._artists:\n        artist.remove()",
    "docstring": "Remove the handles artist from the figure.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:remove arg:self arguments arg For Call"
  },
  {
    "library": "scrapy",
    "name": "MutableAsyncChain",
    "source_code": "class MutableAsyncChain(AsyncIterator[_T]):\n\n    def __init__(self, *args: Iterable[_T] | AsyncIterator[_T]):\n        self.data: AsyncIterator[_T] = _async_chain(*args)\n\n    def extend(self, *iterables: Iterable[_T] | AsyncIterator[_T]) -> None:\n        self.data = _async_chain(self.data, _async_chain(*iterables))\n\n    def __aiter__(self) -> Self:\n        return self\n\n    async def __anext__(self) -> _T:\n        return await self.data.__anext__()",
    "docstring": "Similar to MutableChain but for async iterables",
    "type": "class",
    "file_path": "scrapy\\scrapy\\utils\\python.py",
    "ast_data": "ClassDef name:MutableAsyncChain FunctionDef name:__init__ arg:self arguments arg arg Call FunctionDef name:extend arg:self arguments arg arg Assign Call Call FunctionDef name:__aiter__ arg:self arguments arg Return return:yes AsyncFunctionDef name:__anext__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "statfiles",
    "source_code": "def statfiles(self):\n    return [f for f in os.listdir(self.path) if f.startswith('cp_') and f.endswith('.prof')]",
    "docstring": "Compose a list of statistics file names. :returns: A list of available profiles. :rtype: list[str]",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\profiler.py",
    "ast_data": "FunctionDef name:statfiles arg:self arguments arg Return return:yes Call BoolOp Call Call"
  },
  {
    "library": "tensorflow",
    "name": "manual_to_auto_spmd_partition",
    "source_code": "def manual_to_auto_spmd_partition(tensor, manual_sharding, full_shape, single_dim=-1, unspecified_dims=None):\n    return tf2xla.spmd_shard_to_full_shape(tensor, manual_sharding=manual_sharding, full_shape=full_shape, dim=single_dim, unspecified_dims=unspecified_dims or [])",
    "docstring": "Switches from manual partitioning to automatic SPMD partitioning. Converts a shard-shaped tensor (manually partitioned in SPMD-style) to a full-shaped tensor to be partitioned automatically by the SPMD partitioner. Args: tensor: A tf.Tensor in shard shape. manual_sharding: a serialized string of OpSharding to be used in manual partitioning. full_shape: the shape of tensor before partitioning. single_dim: If >= 0, the conversion will happen only on this dim in subgroups. unspecified_dims: An optional list of dimensions unspecified. Returns: A full-shaped tensor to be partitioned automatically by the SPMD partitioner.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\xla\\experimental\\xla_sharding.py",
    "ast_data": "FunctionDef name:manual_to_auto_spmd_partition arg:tensor arg:manual_sharding arg:full_shape arg:single_dim arg:unspecified_dims arguments arg arg arg arg arg Return return:yes Call BoolOp"
  },
  {
    "library": "pytorch",
    "name": "FuncTorchInterpreterVariable",
    "source_code": "class FuncTorchInterpreterVariable(BaseTorchVariable):\n\n    @classmethod\n    def create_with_source(cls, value, source):\n        install_guard(source.make_guard(GuardBuilder.ID_MATCH))\n        return cls(value, source=source)\n\n    def call_method(self, tx, name, args: list[VariableTracker], kwargs: dict[str, VariableTracker]) -> 'VariableTracker':\n        if name == 'key':\n            return variables.EnumVariable(self.value.key())\n        elif name == 'process':\n            return tx.inline_user_function_return(variables.UserFunctionVariable(self.value.process.__func__), [self] + args, kwargs)\n        elif name in ['level', 'batch_size', 'randomness']:\n            return variables.ConstantVariable.create(getattr(self.value, name)())\n        elif name == 'lower':\n            assert not args and (not kwargs)\n            return variables.TemporarilyPopInterpreterStackCtxManagerVariable.create(tx, None)\n        return super().call_method(tx, name, args, kwargs)",
    "docstring": "represents torch._functorch.pyfunctorch.FuncTorchInterpreter",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\torch.py",
    "ast_data": "ClassDef name:FuncTorchInterpreterVariable FunctionDef name:create_with_source arg:cls arg:value arg:source arguments arg arg arg Call Call Return return:yes Call FunctionDef name:call_method arg:self arg:tx arg:name arg:args arg:kwargs arguments arg arg arg arg arg If Compare Return return:yes Call Call If Compare Return return:yes Call Call If Compare Return return:yes Call Call Call If Compare BoolOp Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "sort_indices",
    "source_code": "def sort_indices(self):\n    if self.has_sorted_indices:\n        return\n    R, C = self.blocksize\n    M, N = self.shape\n    bsr_sort_indices(M // R, N // C, R, C, self.indptr, self.indices, self.data.ravel())\n    self.has_sorted_indices = True",
    "docstring": "Sort the indices of this array/matrix *in place*",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_bsr.py",
    "ast_data": "FunctionDef name:sort_indices arg:self arguments arg If Return return:no Assign Assign Call Call Assign"
  },
  {
    "library": "django",
    "name": "Promise",
    "source_code": "class Promise:\n    pass",
    "docstring": "Base class for the proxy class created in the closure of the lazy function. It's used to recognize promises in code.",
    "type": "class",
    "file_path": "django\\django\\utils\\functional.py",
    "ast_data": "ClassDef name:Promise"
  },
  {
    "library": "numpy",
    "name": "save",
    "source_code": "@array_function_dispatch(_save_dispatcher)\ndef save(file, arr, allow_pickle=True, fix_imports=np._NoValue):\n    if fix_imports is not np._NoValue:\n        warnings.warn(\"The 'fix_imports' flag is deprecated and has no effect. (Deprecated in NumPy 2.1)\", DeprecationWarning, stacklevel=2)\n    if hasattr(file, 'write'):\n        file_ctx = contextlib.nullcontext(file)\n    else:\n        file = os.fspath(file)\n        if not file.endswith('.npy'):\n            file = file + '.npy'\n        file_ctx = open(file, 'wb')\n    with file_ctx as fid:\n        arr = np.asanyarray(arr)\n        format.write_array(fid, arr, allow_pickle=allow_pickle, pickle_kwargs={'fix_imports': fix_imports})",
    "docstring": "Save an array to a binary file in NumPy `fix_importsnumpy.lib.format`. Any data saved to the file is appended to the end of the file. Examples -------- >>> import numpy as np >>> from tempfile import TemporaryFile >>> outfile = TemporaryFile() >>> x = np.arange(10) >>> np.save(outfile, x) >>> _ = outfile.seek(0) # Only needed to simulate closing & reopening file >>> np.load(outfile) array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) >>> with open('test.npy', 'wb') as f: ... np.save(f, np.array([1, 2])) ... np.save(f, np.array([1, 3])) >>> with open('test.npy', 'rb') as f: ... a = np.load(f) ... b = np.load(f) >>> print(a, b) # [1 2] [1 3]",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_npyio_impl.py",
    "ast_data": "FunctionDef name:save arg:file arg:arr arg:allow_pickle arg:fix_imports arguments arg arg arg arg If Compare Call If Call Assign Call Assign Call If Call Assign Assign Call With Assign Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "limited_join",
    "source_code": "def limited_join(sep: str, items: list[str], max_chars: int=30, overflow_marker: str='...') -> str:\n    full_str = sep.join(items)\n    if len(full_str) < max_chars:\n        return full_str\n    n_chars = 0\n    n_items = 0\n    for item in items:\n        n_chars += len(item) + len(sep)\n        if n_chars < max_chars - len(overflow_marker):\n            n_items += 1\n        else:\n            break\n    return sep.join([*list(items[:n_items]), overflow_marker])",
    "docstring": "Join a number of strings into one, limiting the length to *max_chars*. If the string overflows this limit, replace the last fitting item by *overflow_marker*. Returns: joined_string",
    "type": "function",
    "file_path": "sphinx\\sphinx\\ext\\autosummary\\__init__.py",
    "ast_data": "FunctionDef name:limited_join arg:sep arg:items arg:max_chars arg:overflow_marker arguments arg arg arg arg Assign Call If Compare Call Return return:yes Assign Assign For Call Call If Compare Call Return return:yes Call Call"
  },
  {
    "library": "seaborn",
    "name": "_BaseGrid",
    "source_code": "class _BaseGrid:\n\n    def set(self, **kwargs):\n        for ax in self.axes.flat:\n            if ax is not None:\n                ax.set(**kwargs)\n        return self\n\n    @property\n    def fig(self):\n        return self._figure\n\n    @property\n    def figure(self):\n        return self._figure\n\n    def apply(self, func, *args, **kwargs):\n        func(self, *args, **kwargs)\n        return self\n\n    def pipe(self, func, *args, **kwargs):\n        return func(self, *args, **kwargs)\n\n    def savefig(self, *args, **kwargs):\n        kwargs = kwargs.copy()\n        kwargs.setdefault('bbox_inches', 'tight')\n        self.figure.savefig(*args, **kwargs)",
    "docstring": "Base class for grids of subplots.",
    "type": "class",
    "file_path": "seaborn\\seaborn\\axisgrid.py",
    "ast_data": "ClassDef name:_BaseGrid FunctionDef name:set arg:self arguments arg arg For If Compare Call Return return:yes FunctionDef name:fig arg:self arguments arg Return return:yes FunctionDef name:figure arg:self arguments arg Return return:yes FunctionDef name:apply arg:self arg:func arguments arg arg arg arg Call Return return:yes FunctionDef name:pipe arg:self arg:func arguments arg arg arg arg Return return:yes Call FunctionDef name:savefig arg:self arguments arg arg arg Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_Block",
    "source_code": "class _Block(object):\n\n    def __init__(self):\n        self.is_loop_type = False\n        self.create_guard_current = False\n        self.create_guard_next = False",
    "docstring": "Tracks information about lexical blocks as they are visited in the AST. Mainly, this object tracks the creation of block guards that replace statements (e.g. ). Attributes: create_guard_current: bool, whether to create a guard for the current statement. create_guard_next: bool, whether to create a guard for the next statement. is_loop_type: bool, whether this block is the body of a loop.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\converters\\continue_statements.py",
    "ast_data": "ClassDef name:_Block FunctionDef name:__init__ arg:self arguments arg Assign Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "subplots_adjust",
    "source_code": "def subplots_adjust(self, left=None, bottom=None, right=None, top=None, wspace=None, hspace=None):\n    if self.get_layout_engine() is not None and (not self.get_layout_engine().adjust_compatible):\n        _api.warn_external('This figure was using a layout engine that is incompatible with subplots_adjust and/or tight_layout; not calling subplots_adjust.')\n        return\n    self.subplotpars.update(left, bottom, right, top, wspace, hspace)\n    for ax in self.axes:\n        if ax.get_subplotspec() is not None:\n            ax._set_position(ax.get_subplotspec().get_position(self))\n    self.stale = True",
    "docstring": "Adjust the subplot layout parameters. Unset parameters are left unmodified; initial values are given by :rc:. .. plot:: _embedded_plots/figure_subplots_adjust.py Parameters ---------- left : float, optional The position of the left edge of the subplots, as a fraction of the figure width. right : float, optional The position of the right edge of the subplots, as a fraction of the figure width. bottom : float, optional The position of the bottom edge of the subplots, as a fraction of the figure height. top : float, optional The position of the top edge of the subplots, as a fraction of the figure height. wspace : float, optional The width of the padding between subplots, as a fraction of the average Axes width. hspace : float, optional The height of the padding between subplots, as a fraction of the average Axes height.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:subplots_adjust arg:self arg:left arg:bottom arg:right arg:top arg:wspace arg:hspace arguments arg arg arg arg arg arg arg If BoolOp Compare Call Call Call Return return:no Call For If Compare Call Call Call Call Assign"
  },
  {
    "library": "scikit-learn",
    "name": "_random_reassign",
    "source_code": "def _random_reassign(self):\n    self._n_since_last_reassign += self._batch_size\n    if (self._counts == 0).any() or self._n_since_last_reassign >= 10 * self.n_clusters:\n        self._n_since_last_reassign = 0\n        return True\n    return False",
    "docstring": "Check if a random reassignment needs to be done. Do random reassignments each time 10 * n_clusters samples have been processed. If there are empty clusters we always want to reassign.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_kmeans.py",
    "ast_data": "FunctionDef name:_random_reassign arg:self arguments arg If BoolOp Call Compare Compare Assign Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, name, buckets, description, *labels):\n    super(Sampler, self).__init__('Sampler', _sampler_methods, len(labels), name, buckets.buckets, description, *labels)",
    "docstring": "Creates a new Sampler. Args: name: name of the new metric. buckets: bucketing strategy of the new metric. description: description of the new metric. *labels: The label list of the new metric.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:name arg:buckets arg:description arguments arg arg arg arg arg Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "clean_astext",
    "source_code": "def clean_astext(node: Element) -> str:\n    node = node.deepcopy()\n    for img in node.findall(nodes.image):\n        img['alt'] = ''\n    for raw in list(node.findall(nodes.raw)):\n        raw.parent.remove(raw)\n    return node.astext()",
    "docstring": "Like node.astext(), but ignore images.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\nodes.py",
    "ast_data": "FunctionDef name:clean_astext arg:node arguments arg Assign Call For Call Assign For Call Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "update_or_create",
    "source_code": "def update_or_create(self, defaults=None, create_defaults=None, **kwargs):\n    update_defaults = defaults or {}\n    if create_defaults is None:\n        create_defaults = update_defaults\n    self._for_write = True\n    with transaction.atomic(using=self.db):\n        obj, created = self.select_for_update().get_or_create(create_defaults, **kwargs)\n        if created:\n            return (obj, created)\n        for k, v in resolve_callables(update_defaults):\n            setattr(obj, k, v)\n        update_fields = set(update_defaults)\n        concrete_field_names = self.model._meta._non_pk_concrete_field_names\n        if concrete_field_names.issuperset(update_fields):\n            pk_fields = self.model._meta.pk_fields\n            for field in self.model._meta.local_concrete_fields:\n                if not (field in pk_fields or field.__class__.pre_save is Field.pre_save):\n                    update_fields.add(field.name)\n                    if field.name != field.attname:\n                        update_fields.add(field.attname)\n            obj.save(using=self.db, update_fields=update_fields)\n        else:\n            obj.save(using=self.db)\n    return (obj, False)",
    "docstring": "Look up an object with the given kwargs, updating one with defaults if it exists, otherwise create a new one. Optionally, an object can be created with different values than defaults by using create_defaults. Return a tuple (object, created), where created is a boolean specifying whether an object was created.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:update_or_create arg:self arg:defaults arg:create_defaults arguments arg arg arg arg Assign BoolOp If Compare Assign Assign With Call Assign Call Call If Return return:yes For Call Call Assign Call Assign If Call Assign For If BoolOp Compare Compare Call If Compare Call Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X):\n    return self._transform(X)",
    "docstring": "Return the predictions for X for each estimator. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vectors, where is the number of samples and is the number of features. Returns ------- y_preds : ndarray of shape (n_samples, n_estimators) Prediction outputs for each estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_stacking.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "sample",
    "source_code": "def sample(self, n_samples=1):\n    check_is_fitted(self)\n    if n_samples < 1:\n        raise ValueError(\"Invalid value for 'n_samples': %d . The sampling requires at least one sample.\" % self.n_components)\n    _, n_features = self.means_.shape\n    rng = check_random_state(self.random_state)\n    n_samples_comp = rng.multinomial(n_samples, self.weights_)\n    if self.covariance_type == 'full':\n        X = np.vstack([rng.multivariate_normal(mean, covariance, int(sample)) for mean, covariance, sample in zip(self.means_, self.covariances_, n_samples_comp)])\n    elif self.covariance_type == 'tied':\n        X = np.vstack([rng.multivariate_normal(mean, self.covariances_, int(sample)) for mean, sample in zip(self.means_, n_samples_comp)])\n    else:\n        X = np.vstack([mean + rng.standard_normal(size=(sample, n_features)) * np.sqrt(covariance) for mean, covariance, sample in zip(self.means_, self.covariances_, n_samples_comp)])\n    y = np.concatenate([np.full(sample, j, dtype=int) for j, sample in enumerate(n_samples_comp)])\n    return (X, y)",
    "docstring": "Generate random samples from the fitted Gaussian distribution. Parameters ---------- n_samples : int, default=1 Number of samples to generate. Returns ------- X : array, shape (n_samples, n_features) Randomly generated sample. y : array, shape (nsamples,) Component labels.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\mixture\\_base.py",
    "ast_data": "FunctionDef name:sample arg:self arg:n_samples arguments arg arg Call If Compare Raise Call Assign Assign Call Assign Call If Compare Assign Call Call Call Call If Compare Assign Call Call Call Call Assign Call Call Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_norm",
    "source_code": "def _get_norm(self, delta_begin: torch.Tensor, delta_end: torch.Tensor, density: torch.Tensor) -> torch.Tensor:\n    norm = (delta_end * delta_end * delta_end - delta_begin * delta_begin * delta_begin) / 3\n    return density * norm",
    "docstring": "Compute the norm of the values uniformaly distributed between delta_begin and delta_end. Currently only L2 norm is supported. norm = density * (integral_{begin, end} x^2) = density * (end^3 - begin^3) / 3",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\observer.py",
    "ast_data": "FunctionDef name:_get_norm arg:self arg:delta_begin arg:delta_end arg:density arguments arg arg arg arg Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "BaseHeuristicSingleton",
    "source_code": "class BaseHeuristicSingleton(type):\n    _instances: dict[type[Any], Any] = {}\n    _lock: Lock = Lock()\n\n    def __call__(cls: BaseHeuristicSingleton, *args: Any, **kwargs: Any) -> BaseConfigHeuristic:\n        with cls._lock:\n            if cls not in cls._instances:\n                instance = super().__call__()\n                cls._instances[cls] = instance\n            return cls._instances[cls]",
    "docstring": "Thread-safe implementation of single to be used in the config heuristic subclasses to ensure heavy __init__ calls are not repeatedly run",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\template_heuristics.py",
    "ast_data": "ClassDef name:BaseHeuristicSingleton Call FunctionDef name:__call__ arg:cls arguments arg arg arg With If Compare Assign Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "restore_variables",
    "source_code": "def restore_variables(self, sess, saver, import_scope=None):\n    with sess.graph.as_default():\n        if saver is None and (not variables._all_saveable_objects(scope=import_scope)):\n            tf_logging.info('The specified SavedModel has no variables; no checkpoints were restored.')\n        elif isinstance(saver, tf_saver.Saver):\n            saver.restore(sess, self._variables_path)\n        else:\n            raise ValueError('No tf.train.Saver object was passed to the function `SavedModelLoader.restore_variables`. Since there are variables in the graph, a saver is required.')",
    "docstring": "Restore SavedModel variable values into the session. Args: sess: tf.compat.v1.Session to restore variable values. saver: a tf.compat.v1.train.Saver object. Can be None if there are no variables in graph. This may be the saver returned by the load_graph() function, or a default . import_scope: Optional -- if specified, prepend this string followed by '/' to all loaded tensor names. This scope is applied to tensor instances loaded into the passed session, but it is *not* written through to the static protocol buffer that is returned. Raises: ValueError: if no saver was passed to the saver argument, and there are variables in the graph.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\loader_impl.py",
    "ast_data": "FunctionDef name:restore_variables arg:self arg:sess arg:saver arg:import_scope arguments arg arg arg arg With Call If BoolOp Compare Call Call If Call Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "as_python_constant",
    "source_code": "def as_python_constant(self):\n    return self.__variable.as_python_constant()",
    "docstring": "Returns the Python value this variable would have, but only if it is completely known at compile-time (e.g., it is constant). WARNING: Do NOT mutate the returned constant. The returned constant may or may not correspond to the actual value this variable may take on at runtime; for example, if the variable in question is a constant list, we may return a copy of that list.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\comptime.py",
    "ast_data": "FunctionDef name:as_python_constant arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "enable_fake_quant",
    "source_code": "def enable_fake_quant(mod):\n    if isinstance(mod, FakeQuantizeBase) or _is_fake_quant_script_module(mod):\n        mod.enable_fake_quant()",
    "docstring": "Enable fake quantization for the module. Enable fake quantization for this module, if applicable. Example usage:: # model is any PyTorch model model.apply(torch.ao.quantization.enable_fake_quant)",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fake_quantize.py",
    "ast_data": "FunctionDef name:enable_fake_quant arg:mod arguments arg If BoolOp Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_map_captures_to_created_tensors",
    "source_code": "def _map_captures_to_created_tensors(original_captures, tensor_map, function):\n    export_captures = []\n    for exterior, interior in original_captures:\n        mapped_resource = tensor_map.get(exterior, None)\n        if mapped_resource is None:\n            _raise_untracked_capture_error(function.name, exterior, interior)\n        export_captures.append(mapped_resource)\n    return export_captures",
    "docstring": "Maps eager tensors captured by a function to Graph resources for export. Args: original_captures: A dictionary mapping from tensors captured by the function to interior placeholders for those tensors (inside the function body). tensor_map: A dictionary mapping from resource tensors owned by the eager context to resource tensors in the exported graph. function: Function with the original captures. Only used when raising the AssertionError. Returns: A list of stand-in tensors which belong to the exported graph, corresponding to the function's captures. Raises: AssertionError: If the function references a resource which is not part of .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\saved_model_exported_concrete.py",
    "ast_data": "FunctionDef name:_map_captures_to_created_tensors arg:original_captures arg:tensor_map arg:function arguments arg arg arg Assign For Assign Call If Compare Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "geos",
    "source_code": "@property\ndef geos(self):\n    if self.geos_support:\n        from django.contrib.gis.geos import GEOSGeometry\n        return GEOSGeometry(self._geos_ptr(), self.srid)\n    else:\n        from django.contrib.gis.geos import GEOSException\n        raise GEOSException(f'GEOS does not support {self.__class__.__qualname__}.')",
    "docstring": "Return a GEOSGeometry object from this OGRGeometry.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:geos arg:self arguments arg If Return return:yes Call Call Raise Call"
  },
  {
    "library": "pandas",
    "name": "_transform_with_numba",
    "source_code": "@final\ndef _transform_with_numba(self, func, *args, engine_kwargs=None, **kwargs):\n    data = self._obj_with_exclusions\n    index_sorting = self._grouper.result_ilocs\n    df = data if data.ndim == 2 else data.to_frame()\n    starts, ends, sorted_index, sorted_data = self._numba_prep(df)\n    numba_.validate_udf(func)\n    args, kwargs = prepare_function_arguments(func, args, kwargs, num_required_args=2)\n    numba_transform_func = numba_.generate_numba_transform_func(func, **get_jit_arguments(engine_kwargs))\n    result = numba_transform_func(sorted_data, sorted_index, starts, ends, len(df.columns), *args)\n    result = result.take(np.argsort(index_sorting), axis=0)\n    index = data.index\n    if data.ndim == 1:\n        result_kwargs = {'name': data.name}\n        result = result.ravel()\n    else:\n        result_kwargs = {'columns': data.columns}\n    return data._constructor(result, index=index, **result_kwargs)",
    "docstring": "Perform groupby transform routine with the numba engine. This routine mimics the data splitting routine of the DataSplitter class to generate the indices of each group in the sorted data and then passes the data and indices into a Numba jitted function.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\groupby\\groupby.py",
    "ast_data": "FunctionDef name:_transform_with_numba arg:self arg:func arguments arg arg arg arg arg Assign Assign Assign Compare Call Assign Call Call Assign Call Assign Call Call Assign Call Call Assign Call Call Assign If Compare Assign Assign Call Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_disallowed_checksums",
    "source_code": "def get_disallowed_checksums(binary: str) -> set[str]:\n    proc = subprocess.run([binary, 'query', 'kind(http_archive, //external:*)', '--output=xml'], capture_output=True, check=True, text=True)\n    root = ET.fromstring(proc.stdout)\n    disallowed_checksums = set()\n    for rule in root.findall('.//rule[@class=\"http_archive\"]'):\n        urls_node = rule.find('.//list[@name=\"urls\"]')\n        if urls_node is None:\n            continue\n        urls = [n.get('value') for n in urls_node.findall('.//string')]\n        checksum_node = rule.find('.//string[@name=\"sha256\"]')\n        if checksum_node is None:\n            continue\n        checksum = checksum_node.get('value')\n        if not checksum:\n            continue\n        if not is_required_checksum(urls):\n            disallowed_checksums.add(checksum)\n    return disallowed_checksums",
    "docstring": "Return the set of disallowed checksums from all http_archive rules",
    "type": "function",
    "file_path": "pytorch\\tools\\linter\\adapters\\bazel_linter.py",
    "ast_data": "FunctionDef name:get_disallowed_checksums arg:binary arguments arg Assign Call Assign Call Assign Call For Call Assign Call If Compare Assign Call Call Assign Call If Compare Assign Call If If Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_formula",
    "source_code": "def _formula(ln: int, p: int, d: int, k: int, s: int) -> int:\n    return (ln + 2 * p - d * (k - 1) - 1) // s + 1",
    "docstring": "Formula to apply to calculate the length of some dimension of the output See: Args: ln: length of the dimension p: padding in that dim d: dilation in that dim k: kernel size in that dim s: stride in that dim Returns: The output length",
    "type": "function",
    "file_path": "pytorch\\torch\\_meta_registrations.py",
    "ast_data": "FunctionDef name:_formula arg:ln arg:p arg:d arg:k arg:s arguments arg arg arg arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get",
    "source_code": "@dispatch.add_dispatch_support\ndef get(identifier):\n    if identifier is None:\n        return linear\n    if isinstance(identifier, str):\n        identifier = str(identifier)\n        return deserialize(identifier)\n    elif isinstance(identifier, dict):\n        return deserialize(identifier)\n    elif callable(identifier):\n        return identifier\n    else:\n        raise TypeError('Could not interpret activation function identifier: {}'.format(identifier))",
    "docstring": "Returns function. Args: identifier: Function or string Returns: Function corresponding to the input string or input function. For example: >>> tf.keras.activations.get('softmax') >>> tf.keras.activations.get(tf.keras.activations.softmax) >>> tf.keras.activations.get(None) >>> tf.keras.activations.get(abs) >>> tf.keras.activations.get('abcd') Traceback (most recent call last): ... ValueError: Unknown activation function:abcd Raises: ValueError: Input is an unknown function or string, i.e., the input does not denote any defined function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\activations.py",
    "ast_data": "FunctionDef name:get arg:identifier arguments arg If Compare Return return:yes If Call Assign Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_tf_dtype",
    "source_code": "def get_tf_dtype(self, allowed_set=None):\n    if allowed_set:\n        index = self.get_int(0, len(allowed_set) - 1)\n        if allowed_set[index] not in _TF_DTYPES:\n            raise tf.errors.InvalidArgumentError(None, None, 'Given dtype {} is not accepted.'.format(allowed_set[index]))\n        return allowed_set[index]\n    else:\n        index = self.get_int(0, len(_TF_DTYPES) - 1)\n        return _TF_DTYPES[index]",
    "docstring": "Return a random tensorflow dtype. Args: allowed_set: An allowlisted set of dtypes to choose from instead of all of them. Returns: A random type from the list containing all TensorFlow types.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\security\\fuzzing\\python_fuzzing.py",
    "ast_data": "FunctionDef name:get_tf_dtype arg:self arg:allowed_set arguments arg arg If Assign Call Call If Compare Raise Call Call Return return:yes Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "find_csv_files",
    "source_code": "def find_csv_files(path, perf_compare):\n\n    def is_csv(f):\n        if perf_compare:\n            regex = 'training_(torchbench|huggingface|timm_models)\\\\.csv'\n            return re.match(regex, f) is not None\n        else:\n            return f.endswith('_performance.csv')\n    csv_files = []\n    for root, dirs, files in os.walk(path):\n        for file in files:\n            if is_csv(file):\n                csv_files.append(os.path.join(root, file))\n    return csv_files",
    "docstring": "Recursively search for all CSV files in directory and subdirectories whose name contains a target string.",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\dynamo\\summarize_perf.py",
    "ast_data": "FunctionDef name:find_csv_files arg:path arg:perf_compare arguments arg arg FunctionDef name:is_csv arg:f arguments arg If Assign Return return:yes Compare Call Return return:yes Call Assign For Call For If Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "searchsorted",
    "source_code": "def searchsorted(arr: ArrayLike, value: NumpyValueArrayLike | ExtensionArray, side: Literal['left', 'right']='left', sorter: NumpySorter | None=None) -> npt.NDArray[np.intp] | np.intp:\n    if sorter is not None:\n        sorter = ensure_platform_int(sorter)\n    if isinstance(arr, np.ndarray) and arr.dtype.kind in 'iu' and (is_integer(value) or is_integer_dtype(value)):\n        iinfo = np.iinfo(arr.dtype.type)\n        value_arr = np.array([value]) if is_integer(value) else np.array(value)\n        if (value_arr >= iinfo.min).all() and (value_arr <= iinfo.max).all():\n            dtype = arr.dtype\n        else:\n            dtype = value_arr.dtype\n        if is_integer(value):\n            value = cast(int, dtype.type(value))\n        else:\n            value = pd_array(cast(ArrayLike, value), dtype=dtype)\n    else:\n        arr = ensure_wrapped_if_datetimelike(arr)\n    return arr.searchsorted(value, side=side, sorter=sorter)",
    "docstring": "Find indices where elements should be inserted to maintain order. Find the indices into a sorted array (a) such that, if the corresponding elements in were inserted before the indices, the order of would be preserved. Assuming that is sorted: ====== ================================ returned index satisfies ====== ================================ left `sortersorterarrself`). sorter : 1-D array-like, optional Optional array of integer indices that sort array a into ascending order. They are typically the result of argsort. Returns ------- array of ints or int If value is array-like, array of insertion points. If value is scalar, a single integer. See Also -------- numpy.searchsorted : Similar method from NumPy.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\algorithms.py",
    "ast_data": "FunctionDef name:searchsorted arg:arr arg:value arg:side arg:sorter arguments arg arg arg arg If Compare Assign Call If BoolOp Call Compare BoolOp Call Call Assign Call Assign Call Call Call If BoolOp Call Compare Call Compare Assign Assign If Call Assign Call Call Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "LecunUniform",
    "source_code": "class LecunUniform(VarianceScaling):\n\n    def __init__(self, seed=None):\n        super(LecunUniform, self).__init__(scale=1.0, mode='fan_in', distribution='uniform', seed=seed)\n\n    def get_config(self):\n        return {'seed': self.seed}",
    "docstring": "Lecun uniform initializer. Also available via the shortcut function . Draws samples from a uniform distribution within , where ( is the number of input units in the weight tensor). Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.LecunUniform() >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.LecunUniform() >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) Args: seed: A Python integer. An initializer created with a given seed will always produce the same random tensor for a given shape and dtype. References: - Self-Normalizing Neural Networks, [Klambauer et al., 2017]( # pylint: disable=line-too-long ([pdf]( - Efficient Backprop, [Lecun et al., 1998](",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\initializers\\initializers_v2.py",
    "ast_data": "ClassDef name:LecunUniform FunctionDef name:__init__ arg:self arg:seed arguments arg arg Call Call FunctionDef name:get_config arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "factorize_from_iterables",
    "source_code": "def factorize_from_iterables(iterables) -> tuple[list[np.ndarray], list[Index]]:\n    if len(iterables) == 0:\n        return ([], [])\n    codes, categories = zip(*(factorize_from_iterable(it) for it in iterables))\n    return (list(codes), list(categories))",
    "docstring": "A higher-level wrapper over . Parameters ---------- iterables : list-like of list-likes Returns ------- codes : list of ndarrays categories : list of Indexes Notes ----- See for more info.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\arrays\\categorical.py",
    "ast_data": "FunctionDef name:factorize_from_iterables arg:iterables arguments arg If Compare Call Return return:no Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_transform",
    "source_code": "def get_transform(self):\n    if self._transform is None:\n        self._transform = self.axes.transData\n    elif not isinstance(self._transform, mtransforms.Transform) and hasattr(self._transform, '_as_mpl_transform'):\n        self._transform = self._transform._as_mpl_transform(self.axes)\n    return self._transform",
    "docstring": "Return the instance used by this ContourSet.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\contour.py",
    "ast_data": "FunctionDef name:get_transform arg:self arguments arg If Compare Assign If BoolOp Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "disable_v2_dtype_behavior",
    "source_code": "def disable_v2_dtype_behavior():\n    global V2_DTYPE_BEHAVIOR\n    V2_DTYPE_BEHAVIOR = False",
    "docstring": "Disables the V2 dtype behavior for Keras layers. See .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_utils.py",
    "ast_data": "FunctionDef name:disable_v2_dtype_behavior arguments Assign"
  },
  {
    "library": "pytorch",
    "name": "_format_time",
    "source_code": "def _format_time(time_us):\n    US_IN_SECOND = 1000.0 * 1000.0\n    US_IN_MS = 1000.0\n    if time_us >= US_IN_SECOND:\n        return f'{time_us / US_IN_SECOND:.3f}s'\n    if time_us >= US_IN_MS:\n        return f'{time_us / US_IN_MS:.3f}ms'\n    return f'{time_us:.3f}us'",
    "docstring": "Define how to format time in FunctionEvent.",
    "type": "function",
    "file_path": "pytorch\\torch\\autograd\\profiler_util.py",
    "ast_data": "FunctionDef name:_format_time arg:time_us arguments arg Assign Assign If Compare Return return:yes If Compare Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "minpos",
    "source_code": "@property\ndef minpos(self):\n    return self._minpos",
    "docstring": "The minimum positive value in both directions within the Bbox. This is useful when dealing with logarithmic scales and other scales where negative bounds result in floating point errors, and will be used as the minimum extent instead of *p0*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:minpos arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_caching_key",
    "source_code": "def get_caching_key(self, user_context):\n    raise NotImplementedError('subclasses must override this')",
    "docstring": "Returns a unique key to use for caching. Subclasses must override this. Calls made to with functions that have the same code object and caching key will return a cached instance on subsequent invocations. Args: user_context: The context object which was passed to . Returns: extra_locals: A hashable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\transpiler.py",
    "ast_data": "FunctionDef name:get_caching_key arg:self arg:user_context arguments arg arg Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "get_cursor_data",
    "source_code": "def get_cursor_data(self, event):\n    xmin, xmax, ymin, ymax = self.get_extent()\n    if self.origin == 'upper':\n        ymin, ymax = (ymax, ymin)\n    arr = self.get_array()\n    data_extent = Bbox([[xmin, ymin], [xmax, ymax]])\n    array_extent = Bbox([[0, 0], [arr.shape[1], arr.shape[0]]])\n    trans = self.get_transform().inverted()\n    trans += BboxTransform(boxin=data_extent, boxout=array_extent)\n    point = trans.transform([event.x, event.y])\n    if any(np.isnan(point)):\n        return None\n    j, i = point.astype(int)\n    if not 0 <= i < arr.shape[0] or not 0 <= j < arr.shape[1]:\n        return None\n    else:\n        return arr[i, j]",
    "docstring": "Return the image value at the event position or *None* if the event is outside the image. See Also -------- matplotlib.artist.Artist.get_cursor_data",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\image.py",
    "ast_data": "FunctionDef name:get_cursor_data arg:self arg:event arguments arg arg Assign Call If Compare Assign Assign Call Assign Call Assign Call Assign Call Call Call Assign Call If Call Call Return return:no Assign Call If BoolOp Compare Compare Return return:no Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "copy_sharding",
    "source_code": "def copy_sharding(from_tensor, to_tensor, use_sharding_op=False):\n    sharding = get_tensor_sharding(from_tensor)\n    if sharding is None:\n        return to_tensor\n    if isinstance(to_tensor, resource_variable_ops.BaseResourceVariable) and context.xla_sharding_for_resource_variables_enabled():\n        proto = xla_data_pb2.OpSharding()\n        proto.ParseFromString(sharding)\n        to_tensor._set_xla_sharding(proto)\n        return to_tensor\n    if use_sharding_op:\n        to_tensor = tf2xla.sharding(to_tensor, sharding=sharding)\n    attr_value = attr_value_pb2.AttrValue(s=sharding)\n    to_tensor.op._set_attr('_XlaSharding', attr_value)\n    return to_tensor",
    "docstring": "Copies the a tensor's sharding to another. Args: from_tensor: Source tensor. Must be the sole output of an op. to_tensor: the tensor the annotate with the copy. use_sharding_op: whether to create a sharding op on . Returns: A tensor with sharding annotation copied from .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\xla\\experimental\\xla_sharding.py",
    "ast_data": "FunctionDef name:copy_sharding arg:from_tensor arg:to_tensor arg:use_sharding_op arguments arg arg arg Assign Call If Compare Return return:yes If BoolOp Call Call Assign Call Call Call Return return:yes If Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "save_model_secs",
    "source_code": "@property\ndef save_model_secs(self):\n    return self._save_model_secs",
    "docstring": "Return the delay between checkpoints. Returns: A timestamp.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py",
    "ast_data": "FunctionDef name:save_model_secs arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "_is_valid_arg",
    "source_code": "def _is_valid_arg(self, arg: Any) -> bool:\n    if isinstance(arg, (str,)) and os.path.exists(arg):\n        return True\n    if isinstance(arg, (Tensor,)):\n        return True\n    if isinstance(arg, (np.ndarray,)):\n        return True\n    if isinstance(arg, Image.Image):\n        return True\n    return False",
    "docstring": "Check if the argument is a valid type for conversion. Args: arg: The argument to check. Returns: bool: True if valid, False otherwise.",
    "type": "method",
    "file_path": "kornia\\kornia\\core\\module.py",
    "ast_data": "FunctionDef name:_is_valid_arg arg:self arg:arg arguments arg arg If BoolOp Call Call Return return:yes If Call Return return:yes If Call Return return:yes If Call Return return:yes Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_create_arrays",
    "source_code": "def _create_arrays(broadcast_shape, dim_sizes, list_of_core_dims, dtypes, results=None):\n    shapes = _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims)\n    if dtypes is None:\n        dtypes = [None] * len(shapes)\n    if results is None:\n        arrays = tuple((np.empty(shape=shape, dtype=dtype) for shape, dtype in zip(shapes, dtypes)))\n    else:\n        arrays = tuple((np.empty_like(result, shape=shape, dtype=dtype) for result, shape, dtype in zip(results, shapes, dtypes)))\n    return arrays",
    "docstring": "Helper for creating output arrays in vectorize.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_function_base_impl.py",
    "ast_data": "FunctionDef name:_create_arrays arg:broadcast_shape arg:dim_sizes arg:list_of_core_dims arg:dtypes arg:results arguments arg arg arg arg arg Assign Call If Compare Assign Call If Compare Assign Call Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X):\n    raw_predictions = self._raw_predict(X)\n    if raw_predictions.shape[1] == 1:\n        encoded_classes = (raw_predictions.ravel() > 0).astype(int)\n    else:\n        encoded_classes = np.argmax(raw_predictions, axis=1)\n    return self.classes_[encoded_classes]",
    "docstring": "Predict classes for X. Parameters ---------- X : array-like, shape (n_samples, n_features) The input samples. Returns ------- y : ndarray, shape (n_samples,) The predicted classes.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\gradient_boosting.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Assign Call If Compare Assign Call Compare Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "start_rasterizing",
    "source_code": "def start_rasterizing(self):\n    pass",
    "docstring": "Switch to the raster renderer. Used by .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:start_rasterizing arg:self arguments arg"
  },
  {
    "library": "scipy",
    "name": "from_power_basis",
    "source_code": "@classmethod\ndef from_power_basis(cls, pp, extrapolate=None):\n    if not isinstance(pp, PPoly):\n        raise TypeError(f'.from_power_basis only accepts PPoly instances. Got {type(pp)} instead.')\n    dx = np.diff(pp.x)\n    k = pp.c.shape[0] - 1\n    rest = (None,) * (pp.c.ndim - 2)\n    c = np.zeros_like(pp.c)\n    for a in range(k + 1):\n        factor = pp.c[a] / comb(k, k - a) * dx[(slice(None),) + rest] ** (k - a)\n        for j in range(k - a, k + 1):\n            c[j] += factor * comb(j, k - a)\n    if extrapolate is None:\n        extrapolate = pp.extrapolate\n    return cls.construct_fast(c, pp.x, extrapolate, pp.axis)",
    "docstring": "Construct a piecewise polynomial in Bernstein basis from a power basis polynomial. Parameters ---------- pp : PPoly A piecewise polynomial in the power basis extrapolate : bool or 'periodic', optional If bool, determines whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. If 'periodic', periodic extrapolation is used. Default is True.",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_interpolate.py",
    "ast_data": "FunctionDef name:from_power_basis arg:cls arg:pp arg:extrapolate arguments arg arg arg If Call Raise Call Call Assign Call Assign Assign Assign Call For Call Assign Call Call For Call Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "Partitioner",
    "source_code": "@tf_export('distribute.experimental.partitioners.Partitioner', v1=[])\nclass Partitioner(object):\n\n    def __call__(self, shape, dtype, axis=0):\n        raise NotImplementedError",
    "docstring": "Partitioner base class: all partitiners inherit from this class. Partitioners should implement a method with the following signature:",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\sharded_variable.py",
    "ast_data": "ClassDef name:Partitioner FunctionDef name:__call__ arg:self arg:shape arg:dtype arg:axis arguments arg arg arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "stream",
    "source_code": "def stream(stream: Stream) -> AbstractContextManager:\n    return StreamContext(stream)",
    "docstring": "Wrapper around the Context-manager StreamContext that selects a given stream. N.B. This function only exists to facilitate device-agnostic code",
    "type": "function",
    "file_path": "pytorch\\torch\\cpu\\__init__.py",
    "ast_data": "FunctionDef name:stream arg:stream arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_params",
    "source_code": "def set_params(self, **kwargs):\n    if 'nbins' in kwargs:\n        self._nbins = kwargs.pop('nbins')\n        if self._nbins != 'auto':\n            self._nbins = int(self._nbins)\n    if 'symmetric' in kwargs:\n        self._symmetric = kwargs.pop('symmetric')\n    if 'prune' in kwargs:\n        prune = kwargs.pop('prune')\n        _api.check_in_list(['upper', 'lower', 'both', None], prune=prune)\n        self._prune = prune\n    if 'min_n_ticks' in kwargs:\n        self._min_n_ticks = max(1, kwargs.pop('min_n_ticks'))\n    if 'steps' in kwargs:\n        steps = kwargs.pop('steps')\n        if steps is None:\n            self._steps = np.array([1, 1.5, 2, 2.5, 3, 4, 5, 6, 8, 10])\n        else:\n            self._steps = self._validate_steps(steps)\n        self._extended_steps = self._staircase(self._steps)\n    if 'integer' in kwargs:\n        self._integer = kwargs.pop('integer')\n    if kwargs:\n        raise _api.kwarg_error('set_params', kwargs)",
    "docstring": "Set parameters for this locator. Parameters ---------- nbins : int or 'auto', optional see steps : array-like, optional see integer : bool, optional see symmetric : bool, optional see prune : {'lower', 'upper', 'both', None}, optional see min_n_ticks : int, optional see",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:set_params arg:self arguments arg arg If Compare Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call Call Assign If Compare Assign Call Call If Compare Assign Call If Compare Assign Call Assign Call Assign Call If Compare Assign Call If Raise Call"
  },
  {
    "library": "pytorch",
    "name": "is_metal_capture_enabled",
    "source_code": "def is_metal_capture_enabled() -> bool:\n    return torch._C._mps_isCaptureEnabled()",
    "docstring": "Checks if context manager is usable To enable metal capture, set MTL_CAPTURE_ENABLED envvar",
    "type": "function",
    "file_path": "pytorch\\torch\\mps\\profiler.py",
    "ast_data": "FunctionDef name:is_metal_capture_enabled arguments Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_inverse",
    "source_code": "def _inverse(self, y):\n    raise NotImplementedError('inverse not implemented')",
    "docstring": "Subclass implementation for public function.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bijector_impl.py",
    "ast_data": "FunctionDef name:_inverse arg:self arg:y arguments arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "vjp",
    "source_code": "@exposed_in('torch.func')\ndef vjp(func: Callable, *primals, has_aux: bool=False):\n    return _vjp_with_argnums(func, *primals, has_aux=has_aux)",
    "docstring": "Standing for the vector-Jacobian product, returns a tuple containing the results of `vjpgradvjpvjpvjp`.",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\eager_transforms.py",
    "ast_data": "FunctionDef name:vjp arg:func arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_shard_state_dict",
    "source_code": "def _shard_state_dict(state_dict: dict[str, torch.Tensor], placement_strategies: dict[Node, PlacementStrategy], graph_signature: ExportGraphSignature, mesh: DeviceMesh) -> None:\n    for node, placement_strategy in placement_strategies.items():\n        if node.op != 'placeholder':\n            continue\n        if node.name in graph_signature.inputs_to_parameters:\n            fqn = graph_signature.inputs_to_parameters[node.name]\n        elif node.name in graph_signature.inputs_to_buffers:\n            fqn = graph_signature.inputs_to_buffers[node.name]\n        else:\n            continue\n        assert fqn in state_dict, f'{fqn} not found in state dict: {state_dict.keys()}'\n        original_param = state_dict[fqn]\n        dtensor_param = distribute_tensor(original_param, mesh, placement_strategy.output_spec.placements)\n        local_param = dtensor_param.to_local()\n        state_dict[fqn] = torch.nn.Parameter(local_param) if isinstance(original_param, torch.nn.Parameter) else local_param",
    "docstring": "Inplace partition the weights based on the placement strategy",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\experimental\\_tp_transform.py",
    "ast_data": "FunctionDef name:_shard_state_dict arg:state_dict arg:placement_strategies arg:graph_signature arg:mesh arguments arg arg arg arg For Call If Compare If Compare Assign If Compare Assign Compare Call Assign Assign Call Assign Call Assign Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "kernel_metrics",
    "source_code": "def kernel_metrics():\n    return PAIRWISE_KERNEL_FUNCTIONS",
    "docstring": "Valid metrics for pairwise_kernels. This function simply returns the valid pairwise distance metrics. It exists, however, to allow for a verbose description of the mapping for each of the valid strings. The valid distance metrics, and the function they map to, are: =============== ======================================== metric Function =============== ======================================== 'additive_chi2' sklearn.pairwise.additive_chi2_kernel 'chi2' sklearn.pairwise.chi2_kernel 'linear' sklearn.pairwise.linear_kernel 'poly' sklearn.pairwise.polynomial_kernel 'polynomial' sklearn.pairwise.polynomial_kernel 'rbf' sklearn.pairwise.rbf_kernel 'laplacian' sklearn.pairwise.laplacian_kernel 'sigmoid' sklearn.pairwise.sigmoid_kernel 'cosine' sklearn.pairwise.cosine_similarity =============== ======================================== Read more in the :ref:. Returns ------- kernel_metrics : dict Returns valid metrics for pairwise_kernels.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\metrics\\pairwise.py",
    "ast_data": "FunctionDef name:kernel_metrics arguments Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "validate_per_replica_inputs",
    "source_code": "def validate_per_replica_inputs(distribution_strategy, x):\n    per_replica_list = nest.flatten(x, expand_composites=True)\n    x_values_list = []\n    for x in per_replica_list:\n        x_values = distribution_strategy.unwrap(x)\n        for value in x_values:\n            if not tensor_util.is_tf_type(value):\n                raise ValueError('Dataset input to the model should be tensors instead they are of type {}'.format(type(value)))\n        if not context.executing_eagerly():\n            validate_all_tensor_shapes(x, x_values)\n        validate_all_tensor_types(x, x_values)\n        x_values_list.append(x_values[0])\n    return x_values_list",
    "docstring": "Validates PerReplica dataset input list. Args: distribution_strategy: The current DistributionStrategy used to call , and . x: A list of PerReplica objects that represent the input or target values. Returns: List containing the first element of each of the PerReplica objects in the input list. Raises: ValueError: If any of the objects in the is not a tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_training_utils_v1.py",
    "ast_data": "FunctionDef name:validate_per_replica_inputs arg:distribution_strategy arg:x arguments arg arg Assign Call Assign For Assign Call For If Call Raise Call Call Call If Call Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_array_like",
    "source_code": "def _array_like(x, x0):\n    x = np.reshape(x, np.shape(x0))\n    wrap = getattr(x0, '__array_wrap__', x.__array_wrap__)\n    return wrap(x)",
    "docstring": "Return ndarray as same array subclass and shape as",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_nonlin.py",
    "ast_data": "FunctionDef name:_array_like arg:x arg:x0 arguments arg arg Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "ConversionWarning",
    "source_code": "class ConversionWarning(UserWarning):\n    pass",
    "docstring": "Warning issued when a string converter has a problem. Notes ----- In a is issued if raising exceptions is explicitly suppressed with the \"invalid_raise\" keyword.",
    "type": "class",
    "file_path": "numpy\\numpy\\lib\\_iotools.py",
    "ast_data": "ClassDef name:ConversionWarning"
  },
  {
    "library": "matplotlib",
    "name": "get_dpi",
    "source_code": "def get_dpi(self):\n    return self._parent.dpi",
    "docstring": "Return the resolution of the parent figure in dots-per-inch as a float.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:get_dpi arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "MV2Block",
    "source_code": "class MV2Block(Module):\n\n    def __init__(self, inp: int, oup: int, stride: int=1, expansion: int=4) -> None:\n        super().__init__()\n        self.stride = stride\n        hidden_dim = int(inp * expansion)\n        self.use_res_connect = self.stride == 1 and inp == oup\n        if expansion == 1:\n            self.conv = nn.Sequential(nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False), nn.BatchNorm2d(hidden_dim), nn.SiLU(), nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup))\n        else:\n            self.conv = nn.Sequential(nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False), nn.BatchNorm2d(hidden_dim), nn.SiLU(), nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False), nn.BatchNorm2d(hidden_dim), nn.SiLU(), nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup))\n\n    def forward(self, x: Tensor) -> Tensor:\n        if self.use_res_connect:\n            return x + self.conv(x)\n        else:\n            return self.conv(x)",
    "docstring": "MV2 block described in MobileNetV2. Paper: Based on: Args: inp: input channel. oup: output channel. stride: stride for convolution, defaults to 1, set to 2 if down-sample. expansion: expansion ratio for hidden dimension, defaults to 4.",
    "type": "class",
    "file_path": "kornia\\kornia\\contrib\\vit_mobile.py",
    "ast_data": "ClassDef name:MV2Block FunctionDef name:__init__ arg:self arg:inp arg:oup arg:stride arg:expansion arguments arg arg arg arg arg Call Call Assign Assign Call Assign BoolOp Compare Compare If Compare Assign Call Call Call Call Call Call Assign Call Call Call Call Call Call Call Call Call FunctionDef name:forward arg:self arg:x arguments arg arg If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_op_with_optional_float_cast",
    "source_code": "def _op_with_optional_float_cast(g: jit_utils.GraphContext, op_name, *args, **kwargs):\n    opset_before = kwargs.pop('opset_before', None)\n    target_float_t = kwargs.pop('target_float_t', _type_utils.JitScalarType.FLOAT)\n    inputs = list(args)\n    dtype_0 = _type_utils.JitScalarType.from_value(inputs[0])\n    require_cast = not _is_fp(inputs[0]) and (opset_before is None or GLOBALS.export_onnx_opset_version < opset_before)\n    if require_cast:\n        for input in inputs:\n            if input.isCompleteTensor():\n                input_scalar_type = _type_utils.JitScalarType.from_value(input)\n                if input_scalar_type != dtype_0:\n                    raise errors.SymbolicValueError(f'Inputs of {op_name} must have same dtype.Got {dtype_0.scalar_name()} and {input_scalar_type.scalar_name()}', input)\n        for i, input in enumerate(inputs):\n            if input.isCompleteTensor() and (not _is_fp(input)):\n                inputs[i] = g.op('Cast', input, to_i=target_float_t.onnx_type())\n    self = g.op(op_name, *inputs, **kwargs)\n    if require_cast:\n        self = g.op('Cast', self, to_i=dtype_0.onnx_type())\n    return self",
    "docstring": "Some PyTorch operators (e.g., Clip/Min/ReLU/Pad) are super set of ONNX in terms of data types. This function maximizes the exportability of PyTorch-ONNX by allowing ONNX-unsupported PyTorch operator data type. For example, can be used to mimic (opset version < 12). Args: g (torch._C.Graph): graph to write the ONNX representation into. op_name (str): operator name in ONNX. *args (tuple): operands to the operator. **kwargs (dict): attributes to the operator along with \"opset_before\" (optional, None by default) indicating the smallest opset version to trigger such casting behavior and \"target_float_t\" (optional, torch.onnx.JitScalarType.FLOAT by default) indicating the data type of internal operator. Returns: Optional[torch._C.Value, Tuple[torch._C.Value, ...]]: output(s) of the operator.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\symbolic_helper.py",
    "ast_data": "FunctionDef name:_op_with_optional_float_cast arg:g arg:op_name arguments arg arg arg arg Assign Call Assign Call Assign Call Assign Call Assign BoolOp Call BoolOp Compare Compare If For If Call Assign Call If Compare Raise Call Call Call For Call If BoolOp Call Call Assign Call Call Assign Call If Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_setup_function_captures",
    "source_code": "def _setup_function_captures(self, concrete_function_name, nodes):\n    if concrete_function_name in self._restored_concrete_functions:\n        return\n    self._restored_concrete_functions.add(concrete_function_name)\n    concrete_function = self._concrete_functions[concrete_function_name]\n    proto = self._proto.concrete_functions[concrete_function_name]\n    inputs = [nodes[node_id] for node_id in proto.bound_inputs]\n    restore_captures.restore_captures(concrete_function, inputs)",
    "docstring": "Setup captures and variables in a restored function.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\load.py",
    "ast_data": "FunctionDef name:_setup_function_captures arg:self arg:concrete_function_name arg:nodes arguments arg arg arg If Compare Return return:no Call Assign Assign Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "count_up_to",
    "source_code": "@deprecated(None, 'Prefer Dataset.range instead.')\ndef count_up_to(self, limit):\n    return gen_state_ops.resource_count_up_to(self.handle, limit=limit, T=self.dtype)",
    "docstring": "Increments this variable until it reaches . When that Op is run it tries to increment the variable by . If incrementing the variable would bring it above then the Op raises the exception . If no error is raised, the Op outputs the value of the variable before the increment. This is essentially a shortcut for . Args: limit: value at which incrementing the variable raises an error. Returns: A that will hold the variable value before the increment. If no other Op modifies this variable, the values produced will all be distinct.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:count_up_to arg:self arg:limit arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_executor",
    "source_code": "@tf_export('__internal__.eager_context.get_executor', v1=[])\ndef get_executor():\n    return context().executor",
    "docstring": "Get the Executor of the current thread. Returns: The Executor of the current thread.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:get_executor arguments Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "append_cpu_child",
    "source_code": "def append_cpu_child(self, child):\n    assert self.device_type == DeviceType.CPU\n    assert isinstance(child, FunctionEvent)\n    assert child.device_type == DeviceType.CPU\n    self.cpu_children.append(child)",
    "docstring": "Append a CPU child of type FunctionEvent. One is supposed to append only direct children to the event to have correct self cpu time being reported.",
    "type": "method",
    "file_path": "pytorch\\torch\\autograd\\profiler_util.py",
    "ast_data": "FunctionDef name:append_cpu_child arg:self arg:child arguments arg arg Compare Call Compare Call"
  },
  {
    "library": "pandas",
    "name": "_maybe_convert_platform_interval",
    "source_code": "def _maybe_convert_platform_interval(values) -> ArrayLike:\n    if isinstance(values, (list, tuple)) and len(values) == 0:\n        return np.array([], dtype=np.int64)\n    elif not is_list_like(values) or isinstance(values, ABCDataFrame):\n        return values\n    elif isinstance(getattr(values, 'dtype', None), CategoricalDtype):\n        values = np.asarray(values)\n    elif not hasattr(values, 'dtype') and (not isinstance(values, (list, tuple, range))):\n        return values\n    else:\n        values = extract_array(values, extract_numpy=True)\n    if not hasattr(values, 'dtype'):\n        values = np.asarray(values)\n        if values.dtype.kind in 'iu' and values.dtype != np.int64:\n            values = values.astype(np.int64)\n    return values",
    "docstring": "Try to do platform conversion, with special casing for IntervalArray. Wrapper around maybe_convert_platform that alters the default return dtype in certain cases to be compatible with IntervalArray. For example, empty lists return with integer dtype instead of object dtype, which is prohibited for IntervalArray. Parameters ---------- values : array-like Returns ------- array",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\arrays\\interval.py",
    "ast_data": "FunctionDef name:_maybe_convert_platform_interval arg:values arguments arg If BoolOp Call Compare Call Return return:yes Call If BoolOp Call Call Return return:yes If Call Call Assign Call If BoolOp Call Call Return return:yes Assign Call If Call Assign Call If BoolOp Compare Compare Assign Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "ImageException",
    "source_code": "class ImageException(FileException):\n    pass",
    "docstring": "General image error exception",
    "type": "class",
    "file_path": "scrapy\\scrapy\\pipelines\\images.py",
    "ast_data": "ClassDef name:ImageException"
  },
  {
    "library": "matplotlib",
    "name": "inverted",
    "source_code": "def inverted(self):\n    raise NotImplementedError()",
    "docstring": "Return the corresponding inverse transformation. It holds ``. The return value of this method should be treated as temporary. An update to *self* does not cause a corresponding update to its inverted copy.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:inverted arg:self arguments arg Raise Call"
  },
  {
    "library": "django",
    "name": "deconstruct",
    "source_code": "def deconstruct(self):\n    qs_class = self._queryset_class\n    if getattr(self, '_built_with_as_manager', False):\n        return (True, None, '%s.%s' % (qs_class.__module__, qs_class.__name__), None, None)\n    else:\n        module_name = self.__module__\n        name = self.__class__.__name__\n        module = import_module(module_name)\n        if not hasattr(module, name):\n            raise ValueError(\"Could not find manager %s in %s.\\nPlease note that you need to inherit from managers you dynamically generated with 'from_queryset()'.\" % (name, module_name))\n        return (False, '%s.%s' % (module_name, name), None, self._constructor_args[0], self._constructor_args[1])",
    "docstring": "Return a 5-tuple of the form (as_manager (True), manager_class, queryset_class, args, kwargs). Raise a ValueError if the manager is dynamically generated.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\manager.py",
    "ast_data": "FunctionDef name:deconstruct arg:self arguments arg Assign If Call Return return:yes Assign Assign Assign Call If Call Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "precompile",
    "source_code": "def precompile(self):\n    autotuning_log.debug('Precompiling %s', self)\n    CUDACodeCache.compile(self.source_code, 'so')\n    autotuning_log.debug('Done precompiling %s', self)",
    "docstring": "Precompile the CUDA source code to populate the CUDACodeCache. This may happen in a separate thread pool.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\autotune_process.py",
    "ast_data": "FunctionDef name:precompile arg:self arguments arg Call Call Call"
  },
  {
    "library": "pandas",
    "name": "is_full_slice",
    "source_code": "def is_full_slice(obj, line: int) -> bool:\n    return isinstance(obj, slice) and obj.start == 0 and (obj.stop == line) and (obj.step is None)",
    "docstring": "We have a full length slice.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\common.py",
    "ast_data": "FunctionDef name:is_full_slice arg:obj arg:line arguments arg arg Return return:yes BoolOp Call Compare Compare Compare"
  },
  {
    "library": "tensorflow",
    "name": "func_dump",
    "source_code": "def func_dump(func):\n    if os.name == 'nt':\n        raw_code = marshal.dumps(func.__code__).replace(b'\\\\', b'/')\n        code = codecs.encode(raw_code, 'base64').decode('ascii')\n    else:\n        raw_code = marshal.dumps(func.__code__)\n        code = codecs.encode(raw_code, 'base64').decode('ascii')\n    defaults = func.__defaults__\n    if func.__closure__:\n        closure = tuple((c.cell_contents for c in func.__closure__))\n    else:\n        closure = None\n    return (code, defaults, closure)",
    "docstring": "Serializes a user defined function. Args: func: the function to serialize. Returns: A tuple .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\generic_utils.py",
    "ast_data": "FunctionDef name:func_dump arg:func arguments arg If Compare Assign Call Call Assign Call Call Assign Call Assign Call Call Assign If Assign Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "sum",
    "source_code": "@deprecate_nonkeyword_arguments(version='4.0', allowed_args=['self'], name='sum')\ndef sum(self, axis: Axis | None=None, skipna: bool=True, numeric_only: bool=False, min_count: int=0, **kwargs):\n    return NDFrame.sum(self, axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count, **kwargs)",
    "docstring": "Return the sum of the values over the requested axis. This is equivalent to the method `Series` handles all-NA and empty series identically. >>> pd.Series([np.nan]).sum() 0.0 >>> pd.Series([np.nan]).sum(min_count=1) nan",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:sum arg:self arg:axis arg:skipna arg:numeric_only arg:min_count arguments arg arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_GeneratorDataset",
    "source_code": "class _GeneratorDataset(dataset_ops.DatasetSource):\n\n    def __init__(self, init_args, init_func, next_func, finalize_func, output_signature, name=None):\n        self._init_args = init_args\n        self._init_structure = structure.type_spec_from_value(init_args)\n        self._init_func = structured_function.StructuredFunctionWrapper(init_func, self._transformation_name(), input_structure=self._init_structure)\n        self._next_func = structured_function.StructuredFunctionWrapper(next_func, self._transformation_name(), input_structure=self._init_func.output_structure)\n        self._finalize_func = structured_function.StructuredFunctionWrapper(finalize_func, self._transformation_name(), input_structure=self._init_func.output_structure)\n        self._output_signature = output_signature\n        self._name = name\n        variant_tensor = gen_dataset_ops.generator_dataset(structure.to_tensor_list(self._init_structure, self._init_args) + self._init_func.function.captured_inputs, self._next_func.function.captured_inputs, self._finalize_func.function.captured_inputs, init_func=self._init_func.function, next_func=self._next_func.function, finalize_func=self._finalize_func.function, **self._common_args)\n        super().__init__(variant_tensor)\n\n    @property\n    def element_spec(self):\n        return self._output_signature\n\n    def _transformation_name(self):\n        return 'Dataset.from_generator()'",
    "docstring": "A that generates elements by invoking a function.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\from_generator_op.py",
    "ast_data": "ClassDef name:_GeneratorDataset FunctionDef name:__init__ arg:self arg:init_args arg:init_func arg:next_func arg:finalize_func arg:output_signature arg:name arguments arg arg arg arg arg arg arg Assign Assign Call Assign Call Call Assign Call Call Assign Call Call Assign Assign Assign Call Call Call Call FunctionDef name:element_spec arg:self arguments arg Return return:yes FunctionDef name:_transformation_name arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "get_step_message",
    "source_code": "def get_step_message(log, start, end, title, message, details):\n    if end not in log:\n        return ''\n    res = f'-----------------------------------------------\\n### {title}\\n\\n{message}\\n\\n'\n    if details:\n        res += '<details>\\n\\n```\\n' + log[log.find(start) + len(start) + 1:log.find(end) - 1] + '\\n```\\n\\n</details>\\n\\n'\n    return res",
    "docstring": "Get the message for a specific test. Parameters ---------- log : str The log of the linting job. start : str The string that marks the start of the test. end : str The string that marks the end of the test. title : str The title for this section. message : str The message to be added at the beginning of the section. details : bool Whether to add the details of each step. Returns ------- message : str The message to be added to the comment.",
    "type": "function",
    "file_path": "scikit-learn\\build_tools\\get_comment.py",
    "ast_data": "FunctionDef name:get_step_message arg:log arg:start arg:end arg:title arg:message arg:details arguments arg arg arg arg arg arg If Compare Return return:yes Assign If Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "axisinfo",
    "source_code": "@staticmethod\ndef axisinfo(unit, axis):\n    return None",
    "docstring": "Return an for the axis with the specified units.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\units.py",
    "ast_data": "FunctionDef name:axisinfo arg:unit arg:axis arguments arg arg Return return:no"
  },
  {
    "library": "matplotlib",
    "name": "_check_unsampled_image",
    "source_code": "def _check_unsampled_image(self):\n    return False",
    "docstring": "Return whether the image is better to be drawn unsampled. The derived class needs to override it.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\image.py",
    "ast_data": "FunctionDef name:_check_unsampled_image arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_benchmarkRunOp",
    "source_code": "def _benchmarkRunOp(self, name, target, iters):\n    times = []\n    with ops.Graph().as_default():\n        v = variables.Variable(random_ops.random_normal([]))\n        with session.Session(target) as sess:\n            sess.run(v.initializer)\n            sess.run(v.op)\n            for _ in range(iters):\n                start_time = time.time()\n                sess.run(v.op)\n                end_time = time.time()\n                times.append(end_time - start_time)\n    print('%s %f' % (name, np.median(times)))\n    self.report_benchmark(iters=1, wall_time=np.median(times), name=name)",
    "docstring": "Runs a microbenchmark to measure the cost of running an op. Reports the median cost of running a trivial (Variable) op. Args: name: A human-readable name for logging the output. target: The session target to use for the benchmark. iters: The number of iterations to perform.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\session_benchmark.py",
    "ast_data": "FunctionDef name:_benchmarkRunOp arg:self arg:name arg:target arg:iters arguments arg arg arg arg Assign With Call Call Assign Call Call With Call Call Call For Call Assign Call Call Assign Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_ReduceGradientArgs",
    "source_code": "def _ReduceGradientArgs(x, y, gx, gy):\n    if gx is not None or gy is not None:\n        bx, by = SmartBroadcastGradientArgs(x, y)\n        gx = _ReduceGradientArg(gx, bx)\n        gy = _ReduceGradientArg(gy, by)\n    return (gx, gy)",
    "docstring": "Reduces gradients of both arguments of a broadcasting binary op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_ReduceGradientArgs arg:x arg:y arg:gx arg:gy arguments arg arg arg arg If BoolOp Compare Compare Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_match_pattern",
    "source_code": "def _match_pattern(match_pattern: list[Callable]) -> Optional[Node]:\n    a = arg\n    for i, match in enumerate(match_pattern):\n        if not match(a):\n            return None\n        if i < len(match_pattern) - 1:\n            if match == match_tuple:\n                a = a.args[0][0]\n            else:\n                a = a.args[0]\n    return a",
    "docstring": "Traverse up the graph and match the args one by one. If there is a match, return the last matched node, or None otherwise.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\utils.py",
    "ast_data": "FunctionDef name:_match_pattern arg:match_pattern arguments arg Assign For Call If Call Return return:no If Compare Call If Compare Assign Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "from_cholesky",
    "source_code": "@staticmethod\ndef from_cholesky(cholesky):\n    return CovViaCholesky(cholesky)",
    "docstring": "Representation of a covariance provided via the (lower) Cholesky factor Parameters ---------- cholesky : array_like The lower triangular Cholesky factor of the covariance matrix. Notes ----- Let the covariance matrix be :math: and :math: be the lower Cholesky factor such that :math:. Whitening of a data point :math: is performed by computing :math:. :math: is calculated as :math:, where the :math: operation is performed element-wise. This class does not support singular covariance matrices because the Cholesky decomposition does not exist for a singular covariance matrix. Examples -------- Prepare a symmetric positive definite covariance matrix `CovarianceCovariance` object against reference implementation. >>> from scipy.linalg import solve_triangular >>> res = cov.whiten(x) >>> ref = solve_triangular(L, x, lower=True) >>> np.allclose(res, ref) True >>> res = cov.log_pdet >>> ref = np.linalg.slogdet(A)[-1] >>> np.allclose(res, ref) True",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_covariance.py",
    "ast_data": "FunctionDef name:from_cholesky arg:cholesky arguments arg Return return:yes Call"
  },
  {
    "library": "virtualenv",
    "name": "crc32",
    "source_code": "def crc32(data):\n    return _crc32(data.encode()) & 4294967295",
    "docstring": "Python version idempotent.",
    "type": "function",
    "file_path": "virtualenv\\tasks\\update_embedded.py",
    "ast_data": "FunctionDef name:crc32 arg:data arguments arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "MissingValues",
    "source_code": "class MissingValues(_Constraint):\n\n    def __init__(self, numeric_only=False):\n        super().__init__()\n        self.numeric_only = numeric_only\n        self._constraints = [_InstancesOf(Integral), Interval(Real, None, None, closed='both'), _NanConstraint(), _PandasNAConstraint()]\n        if not self.numeric_only:\n            self._constraints.extend([_InstancesOf(str), _NoneConstraint()])\n\n    def is_satisfied_by(self, val):\n        return any((c.is_satisfied_by(val) for c in self._constraints))\n\n    def __str__(self):\n        return f'{', '.join([str(c) for c in self._constraints[:-1]])} or {self._constraints[-1]}'",
    "docstring": "Helper constraint for the parameters. Convenience for [ Integral, Interval(Real, None, None, closed=\"both\"), str, # when numeric_only is False None, # when numeric_only is False _NanConstraint(), _PandasNAConstraint(), ] Parameters ---------- numeric_only : bool, default=False Whether to consider only numeric missing value markers.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\utils\\_param_validation.py",
    "ast_data": "ClassDef name:MissingValues FunctionDef name:__init__ arg:self arg:numeric_only arguments arg arg Call Call Assign Assign Call Call Call Call If Call Call Call FunctionDef name:is_satisfied_by arg:self arg:val arguments arg arg Return return:yes Call Call FunctionDef name:__str__ arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "default_latex_use_xindy",
    "source_code": "def default_latex_use_xindy(config: Config) -> bool:\n    return config.latex_engine in {'xelatex', 'lualatex'}",
    "docstring": "Better default latex_use_xindy settings for specific engines.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\builders\\latex\\__init__.py",
    "ast_data": "FunctionDef name:default_latex_use_xindy arg:config arguments arg Return return:yes Compare"
  },
  {
    "library": "pytorch",
    "name": "_find_q_dq_node_for_user",
    "source_code": "def _find_q_dq_node_for_user(produer: torch.fx.Node, user: torch.fx.Node) -> tuple[Any, Any]:\n    dq_node = None\n    for n in user.args:\n        if isinstance(n, torch.fx.Node) and n.op == 'call_function' and (n.target in _DEQUANTIZE_OPS):\n            if _is_connected(produer, n):\n                dq_node = n\n                break\n    if dq_node is None:\n        for n in user.kwargs:\n            if isinstance(n, torch.fx.Node) and n.op == 'call_function' and (n.target in _DEQUANTIZE_OPS):\n                if _is_connected(produer, n):\n                    dq_node = n\n                    break\n    if dq_node is None:\n        return (None, None)\n    q_node = None\n    if dq_node.args[0].op == 'call_function' and dq_node.args[0].target in _QUANTIZE_OPS:\n        q_node = dq_node.args[0]\n    return (q_node, dq_node)",
    "docstring": "Find q, dq pair corresponding to [producer -> q -> dq -> user] Utils works by finding dq arg of user and ensuring it is connected to producer",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\utils.py",
    "ast_data": "FunctionDef name:_find_q_dq_node_for_user arg:produer arg:user arguments arg arg Assign For If BoolOp Call Compare Compare If Call Assign If Compare For If BoolOp Call Compare Compare If Call Assign If Compare Return return:no Assign If BoolOp Compare Compare Assign Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "download",
    "source_code": "@cherrypy.expose\ndef download(self):\n    path = os.path.join(absDir, 'pdf_file.pdf')\n    return static.serve_file(path, 'application/x-download', 'attachment', os.path.basename(path))",
    "docstring": "Send file to the HTTP client accessing `` URI.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\tutorial\\tut09_files.py",
    "ast_data": "FunctionDef name:download arg:self arguments arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "seaborn",
    "name": "_check_dict_entries",
    "source_code": "def _check_dict_entries(self, levels: list, values: dict) -> None:\n    missing = set(levels) - set(values)\n    if missing:\n        formatted = ', '.join(map(repr, sorted(missing, key=str)))\n        err = f'No entry in {self.variable} dictionary for {formatted}'\n        raise ValueError(err)",
    "docstring": "Input check when values are provided as a dictionary.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\properties.py",
    "ast_data": "FunctionDef name:_check_dict_entries arg:self arg:levels arg:values arguments arg arg arg Assign Call Call If Assign Call Call Call Assign Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "check_cv",
    "source_code": "def check_cv(cv=5, y=None, *, classifier=False):\n    cv = 5 if cv is None else cv\n    if isinstance(cv, numbers.Integral):\n        if classifier and y is not None and (type_of_target(y, input_name='y') in ('binary', 'multiclass')):\n            return StratifiedKFold(cv)\n        else:\n            return KFold(cv)\n    if not hasattr(cv, 'split') or isinstance(cv, str):\n        if not isinstance(cv, Iterable) or isinstance(cv, str):\n            raise ValueError('Expected cv as an integer, cross-validation object (from sklearn.model_selection) or an iterable. Got %s.' % cv)\n        return _CVIterableWrapper(cv)\n    return cv",
    "docstring": "Input checker utility for building a cross-validator. Parameters ---------- cv : int, cross-validation generator, iterable or None, default=5 Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross validation, - integer, to specify the number of folds. - :term:, - An iterable that generates (train, test) splits as arrays of indices. For integer/None inputs, if classifier is True and `StratifiedKFoldKFoldUser Guide ` method. Examples -------- >>> from sklearn.model_selection import check_cv >>> check_cv(cv=5, y=None, classifier=False) KFold(...) >>> check_cv(cv=5, y=[1, 1, 0, 0, 0, 0], classifier=True) StratifiedKFold(...)",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py",
    "ast_data": "FunctionDef name:check_cv arg:cv arg:y arguments arg arg arg Assign Compare If Call If BoolOp Compare Compare Call Return return:yes Call Return return:yes Call If BoolOp Call Call If BoolOp Call Call Raise Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "inverse_stft_window_fn",
    "source_code": "@tf_export('signal.inverse_stft_window_fn')\n@dispatch.add_dispatch_support\ndef inverse_stft_window_fn(frame_step, forward_window_fn=window_ops.hann_window, name=None):\n\n    def inverse_stft_window_fn_inner(frame_length, dtype):\n        with ops.name_scope(name, 'inverse_stft_window_fn', [forward_window_fn]):\n            frame_step_ = ops.convert_to_tensor(frame_step, name='frame_step')\n            frame_step_.shape.assert_has_rank(0)\n            frame_length = ops.convert_to_tensor(frame_length, name='frame_length')\n            frame_length.shape.assert_has_rank(0)\n            forward_window = forward_window_fn(frame_length, dtype=dtype)\n            denom = math_ops.square(forward_window)\n            overlaps = -(-frame_length // frame_step_)\n            denom = array_ops.pad(denom, [(0, overlaps * frame_step_ - frame_length)])\n            denom = array_ops.reshape(denom, [overlaps, frame_step_])\n            denom = math_ops.reduce_sum(denom, 0, keepdims=True)\n            denom = array_ops.tile(denom, [overlaps, 1])\n            denom = array_ops.reshape(denom, [overlaps * frame_step_])\n            return forward_window / denom[:frame_length]\n    return inverse_stft_window_fn_inner",
    "docstring": "Generates a window function that can be used in . Constructs a window that is equal to the forward window with a further pointwise amplitude correction. is equivalent to in the case where it would produce an exact inverse. See examples in documentation for usage. Args: frame_step: An integer scalar . The number of samples to step. forward_window_fn: window_fn used in the forward transform, . name: An optional name for the operation. Returns: A callable that takes a window length and a keyword argument and returns a of samples in the provided datatype. The returned window is suitable for reconstructing original waveform in inverse_stft.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\signal\\spectral_ops.py",
    "ast_data": "FunctionDef name:inverse_stft_window_fn arg:frame_step arg:forward_window_fn arg:name arguments arg arg arg FunctionDef name:inverse_stft_window_fn_inner arg:frame_length arg:dtype arguments arg arg With Call Assign Call Call Assign Call Call Assign Call Assign Call Assign Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "Constant",
    "source_code": "class Constant(Initializer):\n\n    def __init__(self, value=0):\n        self.value = value\n\n    def __call__(self, shape, dtype=None, **kwargs):\n        del kwargs\n        return constant_op.constant(self.value, dtype=_get_dtype(dtype), shape=shape)\n\n    def get_config(self):\n        return {'value': self.value}",
    "docstring": "Initializer that generates tensors with constant values. Also available via the shortcut function . Only scalar values are allowed. The constant value provided must be convertible to the dtype requested when calling the initializer. Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.Constant(3.) >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.Constant(3.) >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) Args: value: A Python scalar.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\initializers\\initializers_v2.py",
    "ast_data": "ClassDef name:Constant FunctionDef name:__init__ arg:self arg:value arguments arg arg Assign FunctionDef name:__call__ arg:self arg:shape arg:dtype arguments arg arg arg arg Return return:yes Call Call FunctionDef name:get_config arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "is_",
    "source_code": "@final\ndef is_(self, other) -> bool:\n    if self is other:\n        return True\n    elif not hasattr(other, '_id'):\n        return False\n    elif self._id is None or other._id is None:\n        return False\n    else:\n        return self._id is other._id",
    "docstring": "More flexible, faster check like `` but also checks metadata. Examples -------- >>> idx1 = pd.Index([\"1\", \"2\", \"3\"]) >>> idx1.is_(idx1.view()) True >>> idx1.is_(idx1.copy()) False",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:is_ arg:self arg:other arguments arg arg If Compare Return return:yes If Call Return return:yes If BoolOp Compare Compare Return return:yes Return return:yes Compare"
  },
  {
    "library": "pytorch",
    "name": "test_call_statement",
    "source_code": "def test_call_statement(self, kernel, input_nodes, names_str: str='') -> str:\n    _, __, arg_types = kernel.args.cpp_argdefs(cutlass_utils.DTYPE_TO_CUTLASS_TYPE)\n    arg_names = [name.strip() for name in names_str.strip().split(',')]\n    arg_names = self._update_arg_names_for_test_call_statement(arg_names, input_nodes)\n    arguments = [f'(({arg_type}){arg_name}_data.get())' for arg_type, arg_name in zip(arg_types, arg_names)]\n    return f'{kernel.kernel_name}({', '.join(arguments)}, M, N, K, B, lda, ldb, ldc, ldd, swizzle, workspace_size_ptr, (uint8_t*)workspace_data.get(), 0);'",
    "docstring": "Helper method to render the Cutlass CUDA C++ code required for calling the GEMM operation in the standalone test runner that might also be generated along with the rest of the code, if the corresponding config is enabled. Returns a C++ statement that calls the GEMM operation with the correct arguments.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\gemm_template.py",
    "ast_data": "FunctionDef name:test_call_statement arg:self arg:kernel arg:input_nodes arg:names_str arguments arg arg arg arg Assign Call Assign Call Call Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_unbatch",
    "source_code": "def _unbatch(self, batch_size):\n    return self._copy(param_specs=nest.map_structure(lambda spec: spec._unbatch(), self._param_specs))",
    "docstring": "Returns a TypeSpec representing a single element of this TypeSpec.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "FunctionDef name:_unbatch arg:self arg:batch_size arguments arg arg Return return:yes Call Call arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "copy",
    "source_code": "def copy(self) -> ParameterDict:\n    return ParameterDict(OrderedDict(((k, self[k]) for k in self._keys)))",
    "docstring": "Return a copy of this :class: instance.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\container.py",
    "ast_data": "FunctionDef name:copy arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "batch_shape",
    "source_code": "@property\ndef batch_shape(self):\n    return tensor_shape.as_shape(self._batch_shape())",
    "docstring": "Shape of a single sample from a single event index as a . May be partially defined or unknown. The batch dimensions are indexes into independent, non-identical parameterizations of this distribution. Returns: batch_shape: , possibly unknown.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:batch_shape arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n    self._fit(X)\n    return self",
    "docstring": "Fit the model with X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. y : Ignored Ignored. Returns ------- self : object Returns the instance itself.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_pca.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_available_if_estimator_has",
    "source_code": "def _available_if_estimator_has(attr):\n\n    def _check(self):\n        if hasattr(self, 'estimators_'):\n            return all((hasattr(est, attr) for est in self.estimators_))\n        if hasattr(self.estimator, attr):\n            return True\n        return False\n    return available_if(_check)",
    "docstring": "Return a function to check if the sub-estimator(s) has(have) . Helper for Chain implementations.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\multioutput.py",
    "ast_data": "FunctionDef name:_available_if_estimator_has arg:attr arguments arg FunctionDef name:_check arg:self arguments arg If Call Return return:yes Call Call If Call Return return:yes Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "loc",
    "source_code": "@property\ndef loc(self):\n    return self._loc",
    "docstring": "Distribution parameter for the location.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\laplace.py",
    "ast_data": "FunctionDef name:loc arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "Rule",
    "source_code": "class Rule(object):\n\n    def __init__(self, module_prefix):\n        self._prefix = module_prefix\n\n    def matches(self, module_name):\n        return module_name.startswith(self._prefix + '.') or module_name == self._prefix",
    "docstring": "Base class for conversion rules.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\core\\config_lib.py",
    "ast_data": "ClassDef name:Rule FunctionDef name:__init__ arg:self arg:module_prefix arguments arg arg Assign FunctionDef name:matches arg:self arg:module_name arguments arg arg Return return:yes BoolOp Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "output_shapes",
    "source_code": "@property\n@deprecation.deprecated(None, 'Use `tf.compat.v1.data.get_output_shapes(iterator)`.')\ndef output_shapes(self):\n    return nest.map_structure(lambda component_spec: component_spec._to_legacy_output_shapes(), self._element_spec)",
    "docstring": "Returns the shape of each component of an element of this iterator. Returns: A (nested) structure of objects corresponding to each component of an element of this dataset.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\iterator_ops.py",
    "ast_data": "FunctionDef name:output_shapes arg:self arguments arg Return return:yes Call arguments arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "is_subclassed",
    "source_code": "def is_subclassed(layer):\n    return layer.__module__.find('keras.engine') == -1 and layer.__module__.find('keras.layers') == -1",
    "docstring": "Returns True if the object is a subclassed layer or subclassed model.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_utils.py",
    "ast_data": "FunctionDef name:is_subclassed arg:layer arguments arg Return return:yes BoolOp Compare Call Compare Call"
  },
  {
    "library": "kornia",
    "name": "data",
    "source_code": "@property\ndef data(self) -> Tensor:\n    return self._data",
    "docstring": "Return the underlying tensor data.",
    "type": "method",
    "file_path": "kornia\\kornia\\image\\image.py",
    "ast_data": "FunctionDef name:data arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "eye",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef eye(size, dtype=None, name=None):\n    if dtype is None:\n        dtype = floatx()\n    tf_dtype = dtypes_module.as_dtype(dtype)\n    return variable(linalg_ops.eye(size, dtype=tf_dtype), dtype, name)",
    "docstring": "Instantiate an identity matrix and returns it. Args: size: Integer, number of rows/columns. dtype: String, data type of returned Keras variable. name: String, name of returned Keras variable. Returns: A Keras variable, an identity matrix. Example: >>> kvar = tf.keras.backend.eye(3) >>> tf.keras.backend.eval(kvar) array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]], dtype=float32)",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:eye arg:size arg:dtype arg:name arguments arg arg arg If Compare Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "valmap",
    "source_code": "def valmap(func, d, factory=dict):\n    rv = factory()\n    rv.update(zip(d.keys(), map(func, d.values())))\n    return rv",
    "docstring": "Apply function to values of dictionary >>> bills = {\"Alice\": [20, 15, 30], \"Bob\": [10, 35]} >>> valmap(sum, bills) # doctest: +SKIP {'Alice': 65, 'Bob': 45} See Also: keymap itemmap",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\unification\\unification_tools.py",
    "ast_data": "FunctionDef name:valmap arg:func arg:d arg:factory arguments arg arg arg Assign Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "SiLU",
    "source_code": "class SiLU(Module):\n    __constants__ = ['inplace']\n    inplace: bool\n\n    def __init__(self, inplace: bool=False):\n        super().__init__()\n        self.inplace = inplace\n\n    def forward(self, input: Tensor) -> Tensor:\n        return F.silu(input, inplace=self.inplace)\n\n    def extra_repr(self) -> str:\n        inplace_str = 'inplace=True' if self.inplace else ''\n        return inplace_str",
    "docstring": "Applies the Sigmoid Linear Unit (SiLU) function, element-wise. The SiLU function is also known as the swish function. .. math:: \\text{silu}(x) = x * \\sigma(x), \\text{where } \\sigma(x) \\text{ is the logistic sigmoid.} .. note:: See _ where the SiLU (Sigmoid Linear Unit) was originally coined, and see _ and _ where the SiLU was experimented with later. Shape: - Input: :math:, where :math: means any number of dimensions. - Output: :math:, same shape as the input. .. image:: ../scripts/activation_images/SiLU.png Examples:: >>> m = nn.SiLU() >>> input = torch.randn(2) >>> output = m(input)",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\activation.py",
    "ast_data": "ClassDef name:SiLU Assign FunctionDef name:__init__ arg:self arg:inplace arguments arg arg Call Call Assign FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:extra_repr arg:self arguments arg Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_nan_allsame",
    "source_code": "def _nan_allsame(a, axis, keepdims=False):\n    if axis is None:\n        if a.size == 0:\n            return True\n        a = a.ravel()\n        axis = 0\n    else:\n        shp = a.shape\n        if shp[axis] == 0:\n            shp = shp[:axis] + (1,) * keepdims + shp[axis + 1:]\n            return np.full(shp, fill_value=True, dtype=bool)\n    a0 = _first_nonnan(a, axis=axis)\n    return ((a0 == a) | np.isnan(a)).all(axis=axis, keepdims=keepdims)",
    "docstring": "Determine if the values along an axis are all the same. nan values are ignored. must be a numpy array. is assumed to be normalized; that is, 0 >> from numpy import nan, array >>> a = array([[ 3., 3., nan, 3.], ... [ 1., nan, 2., 4.], ... [nan, nan, 9., -1.], ... [nan, 5., 4., 3.], ... [ 2., 2., 2., 2.], ... [nan, nan, nan, nan]]) >>> _nan_allsame(a, axis=1, keepdims=True) array([[ True], [False], [False], [False], [ True], [ True]])",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_util.py",
    "ast_data": "FunctionDef name:_nan_allsame arg:a arg:axis arg:keepdims arguments arg arg arg If Compare If Compare Return return:yes Assign Call Assign Assign If Compare Assign Return return:yes Call Assign Call Return return:yes Call Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "shape",
    "source_code": "@property\ndef shape(self):\n    return self._shape",
    "docstring": "The shape of this variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:shape arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "jnjnp_zeros",
    "source_code": "def jnjnp_zeros(nt):\n    if not isscalar(nt) or floor(nt) != nt or nt > 1200:\n        raise ValueError('Number must be integer <= 1200.')\n    nt = int(nt)\n    n, m, t, zo = _specfun.jdzo(nt)\n    return (zo[1:nt + 1], n[:nt], m[:nt], t[:nt])",
    "docstring": "Compute zeros of integer-order Bessel functions Jn and Jn'. Results are arranged in order of the magnitudes of the zeros. Parameters ---------- nt : int Number (<=1200) of zeros to compute Returns ------- zo[l-1] : ndarray Value of the lth zero of Jn(x) and Jn'(x). Of length . n[l-1] : ndarray Order of the Jn(x) or Jn'(x) associated with lth zero. Of length . m[l-1] : ndarray Serial number of the zeros of Jn(x) or Jn'(x) associated with lth zero. Of length . t[l-1] : ndarray 0 if lth zero in zo is zero of Jn(x), 1 if it is a zero of Jn'(x). Of length . See Also -------- jn_zeros, jnp_zeros : to get separated arrays of zeros. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special Functions\", John Wiley and Sons, 1996, chapter 5.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_basic.py",
    "ast_data": "FunctionDef name:jnjnp_zeros arg:nt arguments arg If BoolOp Call Compare Call Compare Raise Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "from_config",
    "source_code": "@staticmethod\ndef from_config(variant: str, pretrained: bool=False, **kwargs: Any) -> VisionTransformer:\n    model_type, patch_size_str = variant.split('/')\n    patch_size = int(patch_size_str)\n    model_config = {'vit_ti': {'embed_dim': 192, 'depth': 12, 'num_heads': 3}, 'vit_s': {'embed_dim': 384, 'depth': 12, 'num_heads': 6}, 'vit_b': {'embed_dim': 768, 'depth': 12, 'num_heads': 12}, 'vit_l': {'embed_dim': 1024, 'depth': 24, 'num_heads': 16}, 'vit_h': {'embed_dim': 1280, 'depth': 32, 'num_heads': 16}}[model_type]\n    kwargs.update(model_config, patch_size=patch_size)\n    model = VisionTransformer(**kwargs)\n    if pretrained:\n        url = _get_weight_url(variant)\n        state_dict = torch.hub.load_state_dict_from_url(url)\n        model.load_state_dict(state_dict)\n    return model",
    "docstring": "Build ViT model based on the given config string. The format is `kornia.contrib.vit.VisionTransformer`. Returns: The respective ViT model Example: >>> from kornia.contrib import VisionTransformer >>> vit_model = VisionTransformer.from_config(\"vit_b/16\", pretrained=True)",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\vit.py",
    "ast_data": "FunctionDef name:from_config arg:variant arg:pretrained arguments arg arg arg Assign Call Assign Call Assign Call Assign Call If Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "OrderedSetHolder",
    "source_code": "@dataclasses.dataclass\nclass OrderedSetHolder:\n    items: list[Any]",
    "docstring": "See FxGraphHashDetails. Holds a sorted list to support stable hashing of set kwargs.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\codecache.py",
    "ast_data": "ClassDef name:OrderedSetHolder"
  },
  {
    "library": "numpy",
    "name": "unique_values",
    "source_code": "@array_function_dispatch(_unique_values_dispatcher)\ndef unique_values(x):\n    return unique(x, return_index=False, return_inverse=False, return_counts=False, equal_nan=False, sorted=False)",
    "docstring": "Returns the unique elements of an input array . This function is an Array API compatible alternative to:: np.unique(x, equal_nan=False, sorted=False) .. versionchanged:: 2.3 The algorithm was changed to a faster one that does not rely on sorting, and hence the results are no longer implicitly sorted. Parameters ---------- x : array_like Input array. It will be flattened if it is not already 1-D. Returns ------- out : ndarray The unique elements of an input array. See Also -------- unique : Find the unique elements of an array. Examples -------- >>> import numpy as np >>> np.unique_values([1, 1, 2]) array([1, 2]) # may vary",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_arraysetops_impl.py",
    "ast_data": "FunctionDef name:unique_values arg:x arguments arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "cpenmsg",
    "source_code": "def cpenmsg(solver, iprint, cpen):\n    if abs(iprint) < 2:\n        return\n    elif iprint > 0:\n        fname = ''\n    else:\n        fname = f'{solver.strip()}_output.txt'\n    if abs(iprint) >= 3:\n        message = f'\\nSet CPEN to {cpen}'\n    else:\n        message = f'\\n\\nSet CPEN to {cpen}'\n    if len(fname) > 0:\n        with open(fname, 'a') as f:\n            f.write(message)\n    else:\n        print(message)",
    "docstring": "This function prints a message when CPEN is updated.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\pyprima\\pyprima\\src\\pyprima\\common\\message.py",
    "ast_data": "FunctionDef name:cpenmsg arg:solver arg:iprint arg:cpen arguments arg arg arg If Compare Call Return return:no If Compare Assign Assign Call If Compare Call Assign Assign If Compare Call With Call Call Call"
  },
  {
    "library": "scipy",
    "name": "restart_reduce",
    "source_code": "def restart_reduce(self, rank):\n    if self.collapsed is not None:\n        return\n    assert rank > 0\n    if len(self.cs) > rank:\n        del self.cs[:]\n        del self.ds[:]",
    "docstring": "Reduce the rank of the matrix by dropping all vectors.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_nonlin.py",
    "ast_data": "FunctionDef name:restart_reduce arg:self arg:rank arguments arg arg If Compare Return return:no Compare If Compare Call"
  },
  {
    "library": "pandas",
    "name": "_validate",
    "source_code": "@staticmethod\ndef _validate(data):\n    if isinstance(data, ABCMultiIndex):\n        raise AttributeError('Can only use .str accessor with Index, not MultiIndex')\n    allowed_types = ['string', 'empty', 'bytes', 'mixed', 'mixed-integer']\n    data = extract_array(data)\n    values = getattr(data, 'categories', data)\n    inferred_dtype = lib.infer_dtype(values, skipna=True)\n    if inferred_dtype not in allowed_types:\n        raise AttributeError(f'Can only use .str accessor with string values, not {inferred_dtype}')\n    return inferred_dtype",
    "docstring": "Auxiliary function for StringMethods, infers and checks dtype of data. This is a \"first line of defence\" at the creation of the StringMethods- object, and just checks that the dtype is in the *union* of the allowed types over all string methods below; this restriction is then refined on a per-method basis using the decorator @forbid_nonstring_types (more info in the corresponding docstring). This really should exclude all series/index with any non-string values, but that isn't practical for performance reasons until we have a str dtype (GH 9343 / 13877) Parameters ---------- data : The content of the Series Returns ------- dtype : inferred dtype of data",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\strings\\accessor.py",
    "ast_data": "FunctionDef name:_validate arg:data arguments arg If Call Raise Call Assign Assign Call Assign Call Assign Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_RendezvousState",
    "source_code": "class _RendezvousState:\n    round: int\n    complete: bool\n    deadline: Optional[datetime]\n    closed: bool\n    participants: dict[_NodeDesc, int]\n    wait_list: set[_NodeDesc]\n    redundancy_list: set[_NodeDesc]\n    last_heartbeats: dict[_NodeDesc, datetime]\n\n    def __init__(self) -> None:\n        self.round = 0\n        self.complete = False\n        self.deadline = None\n        self.closed = False\n        self.participants = {}\n        self.wait_list = set()\n        self.redundancy_list = set()\n        self.last_heartbeats = {}",
    "docstring": "Hold the state of a rendezvous. Attributes: round: The current round of the rendezvous. complete: A boolean value indicating whether the current round of the rendezvous is complete. deadline: The time at which the current round of the rendezvous will be considered complete if it is still waiting for nodes to join. closed: A boolean value indicating whether the rendezvous is closed. participants: A dictionary of the participants and their corresponding ranks. wait_list: A set of nodes that are waiting to participate in the next round of the rendezvous. redundancy_list: A set of nodes that are redundant in the current round and can join the next rendezvous without triggering re-rendezvous. last_heartbeats: A dictionary containing each node's last heartbeat time.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\dynamic_rendezvous.py",
    "ast_data": "ClassDef name:_RendezvousState FunctionDef name:__init__ arg:self arguments arg Assign Assign Assign Assign Assign Assign Call Assign Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "write_issue_comment",
    "source_code": "def write_issue_comment(self, repo: str, issue_number: int, body: str) -> requests.Response:\n    endpoint = f'repos/{repo}/issues/{issue_number}/comments'\n    return self._make_request('POST', endpoint, body=body)",
    "docstring": "Writes a comment on an issue (or PR). 28#create-an-issue-comment Arguments: repo: a string of the form , e.g. openxla/xla issue_number: the issue (or PR) to comment on body: the body of the comment Returns: a requests.Response object containing the response from the API. Raises: requests.exceptions.HTTPError",
    "type": "method",
    "file_path": "tensorflow\\third_party\\xla\\.github\\workflows\\github_api.py",
    "ast_data": "FunctionDef name:write_issue_comment arg:self arg:repo arg:issue_number arg:body arguments arg arg arg arg Assign Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "batch_2x2_ellipse",
    "source_code": "def batch_2x2_ellipse(m: Tensor) -> Tuple[Tensor, Tensor]:\n    am = m[..., 0, 0]\n    bm = m[..., 0, 1]\n    cm = m[..., 1, 0]\n    dm = m[..., 1, 1]\n    a = am ** 2 + bm ** 2\n    b = am * cm + bm * dm\n    d = cm ** 2 + dm ** 2\n    trh = (a + d) / 2\n    sqrtdisc = torch.sqrt(((a - d) / 2) ** 2 + b ** 2)\n    eigenvals = torch.stack([trh + sqrtdisc, trh - sqrtdisc], dim=-1).clamp(min=0)\n    dens = eigenvals - a.unsqueeze(-1)\n    dens[torch.abs(dens) < 1e-06] = 1e-06\n    eigenvecs = torch.stack([b.unsqueeze(-1) / dens, torch.ones_like(dens)], dim=-2)\n    eigenvecs = eigenvecs / torch.norm(eigenvecs, dim=-2, keepdim=True)\n    return (eigenvals, eigenvecs)",
    "docstring": "Returns Eigenvalues and Eigenvectors of batch of 2x2 matrices.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\adalam\\utils.py",
    "ast_data": "FunctionDef name:batch_2x2_ellipse arg:m arguments arg Assign Assign Assign Assign Assign Assign Assign Assign Assign Call Assign Call Call Assign Call Assign Compare Call Assign Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "unpack_tuple_and_ellipses",
    "source_code": "def unpack_tuple_and_ellipses(item: tuple):\n    if len(item) > 1:\n        if item[0] is Ellipsis:\n            item = item[1:]\n        elif item[-1] is Ellipsis:\n            item = item[:-1]\n    if len(item) > 1:\n        raise IndexError('too many indices for array.')\n    item = item[0]\n    return item",
    "docstring": "Possibly unpack arr[..., n] to arr[n]",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\indexers\\utils.py",
    "ast_data": "FunctionDef name:unpack_tuple_and_ellipses arg:item arguments arg If Compare Call If Compare Assign If Compare Assign If Compare Call Raise Call Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_partial_fit_ovo_binary",
    "source_code": "def _partial_fit_ovo_binary(estimator, X, y, i, j, partial_fit_params):\n    cond = np.logical_or(y == i, y == j)\n    y = y[cond]\n    if len(y) != 0:\n        y_binary = np.zeros_like(y)\n        y_binary[y == j] = 1\n        partial_fit_params_subset = _check_method_params(X, params=partial_fit_params, indices=cond)\n        return _partial_fit_binary(estimator, X[cond], y_binary, partial_fit_params=partial_fit_params_subset)\n    return estimator",
    "docstring": "Partially fit a single binary estimator(one-vs-one).",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\multiclass.py",
    "ast_data": "FunctionDef name:_partial_fit_ovo_binary arg:estimator arg:X arg:y arg:i arg:j arg:partial_fit_params arguments arg arg arg arg arg arg Assign Call Compare Compare Assign If Compare Call Assign Call Assign Compare Assign Call Return return:yes Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_sortable_by",
    "source_code": "def get_sortable_by(self, request):\n    return self.sortable_by if self.sortable_by is not None else self.get_list_display(request)",
    "docstring": "Hook for specifying which fields can be sorted in the changelist.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:get_sortable_by arg:self arg:request arguments arg arg Return return:yes Compare Call"
  },
  {
    "library": "scikit-learn",
    "name": "min",
    "source_code": "def min(self, y: Array | complex, /, copy: bool | None=None, xp: ModuleType | None=None) -> Array:\n    xp = array_namespace(self._x) if xp is None else xp\n    mxp = meta_namespace(self._x, xp=xp)\n    y = xp.asarray(y)\n    return self._op(_AtOp.MIN, mxp.minimum, mxp.minimum, y, copy=copy, xp=xp)",
    "docstring": "Apply `` and return the updated array.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_extra\\_lib\\_at.py",
    "ast_data": "FunctionDef name:min arg:copy arg:xp arguments arg arg arg arg Assign Compare Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_RealInterval",
    "source_code": "class _RealInterval(_Interval):\n\n    def __str__(self):\n        a, b = self.endpoints\n        a, b = (self._get_endpoint_str(a, 'f1'), self._get_endpoint_str(b, 'f2'))\n        left_inclusive, right_inclusive = self.inclusive\n        left = '[' if left_inclusive else '('\n        right = ']' if right_inclusive else ')'\n        return f'{left}{a}, {b}{right}'\n\n    def _get_endpoint_str(self, endpoint, funcname):\n        if callable(endpoint):\n            if endpoint.__doc__ is not None:\n                return endpoint.__doc__\n            params = inspect.signature(endpoint).parameters.values()\n            params = [p.name for p in params if p.kind == inspect.Parameter.KEYWORD_ONLY]\n            return f'{funcname}({','.join(params)})'\n        return self.symbols.get(endpoint, f'{endpoint}')",
    "docstring": "Represents a simply-connected subset of the real line; i.e., an interval Completes the implementation of the class for intervals on the real line. Methods ------- define_parameters(*parameters) (Inherited) Records any parameters used to define the endpoints of the domain. get_numerical_endpoints(parameter_values) (Inherited) Gets the numerical values of the domain endpoints, which may have been defined symbolically. contains(item, parameter_values) (Inherited) Determines whether the argument is contained within the domain __str__() Returns a string representation of the domain, e.g. \"[a, b)\".",
    "type": "class",
    "file_path": "scipy\\scipy\\stats\\_distribution_infrastructure.py",
    "ast_data": "ClassDef name:_RealInterval FunctionDef name:__str__ arg:self arguments arg Assign Assign Call Call Assign Assign Assign Return return:yes FunctionDef name:_get_endpoint_str arg:self arg:endpoint arg:funcname arguments arg arg arg If Call If Compare Return return:yes Assign Call Call Assign Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_unstack_activations",
    "source_code": "def _unstack_activations(self, activations: Dict[str, tensor.Tensor]):\n    flattened_activations = []\n    table_to_current_offset = {table_name: 0 for table_name in self._stacked_table_to_tables}\n    for table_name in self._stacked_table_to_tables:\n        activation_shape = activations[table_name].shape\n        activations[table_name] = array_ops.reshape(activations[table_name], [self._num_sc_per_chip, -1, activation_shape[-1]])\n    for _, feature in self._flat_features:\n        sample_count = functools.reduce(operator.mul, feature.output_shape)\n        table_name = self._table_to_stacked_table_offset[feature.table.name][0]\n        extra_cols = self._table_to_padding_columns[feature.table.name]\n        activation = array_ops.slice(activations[table_name], [0, table_to_current_offset[table_name], 0], [self._num_sc_per_chip, sample_count // self._num_sc_per_chip, feature.table.dim - extra_cols])\n        activation = array_ops.reshape(activation, list(feature.output_shape) + [feature.table.dim - extra_cols])\n        flattened_activations.append(activation)\n        table_to_current_offset[table_name] += sample_count // self._num_sc_per_chip\n    return nest.pack_sequence_as(self._feature_config, flattened_activations)",
    "docstring": "Untack the incoming per table activations into per feature.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3.py",
    "ast_data": "FunctionDef name:_unstack_activations arg:self arg:activations arguments arg arg Assign Assign For Assign Assign Call For Assign Call Assign Assign Assign Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "ConverterLockError",
    "source_code": "class ConverterLockError(ConverterError):\n    pass",
    "docstring": "Exception raised when an attempt is made to upgrade a locked converter.",
    "type": "class",
    "file_path": "numpy\\numpy\\lib\\_iotools.py",
    "ast_data": "ClassDef name:ConverterLockError"
  },
  {
    "library": "pandas",
    "name": "__array__",
    "source_code": "def __array__(self, dtype: NpDtype | None=None, copy: bool | None=None) -> np.ndarray:\n    if copy is False:\n        if not self._hasna:\n            return np.array(self._data, dtype=dtype, copy=copy)\n        raise ValueError('Unable to avoid copy while creating an array as requested.')\n    if copy is None:\n        copy = False\n    return self.to_numpy(dtype=dtype, copy=copy)",
    "docstring": "the array interface, return my values We return an object array here to preserve our scalar values",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\masked.py",
    "ast_data": "FunctionDef name:__array__ arg:self arg:dtype arg:copy arguments arg arg arg If Compare If Return return:yes Call Raise Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "verify",
    "source_code": "def verify(self, **kwargs):\n    torch.testing.assert_close(self.extern.output_tensor, self.expected, **kwargs)",
    "docstring": "Verify the correctness of the benchmarking results",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\select_algorithm.py",
    "ast_data": "FunctionDef name:verify arg:self arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_create_save_counter",
    "source_code": "def _maybe_create_save_counter(self):\n    if self._save_counter is None:\n        with ops.device('/cpu:0'):\n            self._save_counter = data_structures.NoDependency(add_variable(self, name='save_counter', initializer=0, dtype=dtypes.int64, trainable=False))",
    "docstring": "Create a save counter if it does not yet exist.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:_maybe_create_save_counter arg:self arguments arg If Compare With Call Assign Call Call"
  },
  {
    "library": "matplotlib",
    "name": "inside_circle",
    "source_code": "def inside_circle(cx, cy, r):\n    r2 = r ** 2\n\n    def _f(xy):\n        x, y = xy\n        return (x - cx) ** 2 + (y - cy) ** 2 < r2\n    return _f",
    "docstring": "Return a function that checks whether a point is in a circle with center (*cx*, *cy*) and radius *r*. The returned function has the signature:: f(xy: tuple[float, float]) -> bool",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\bezier.py",
    "ast_data": "FunctionDef name:inside_circle arg:cx arg:cy arg:r arguments arg arg arg Assign FunctionDef name:_f arg:xy arguments arg Assign Return return:yes Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "batch_flatten",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef batch_flatten(x):\n    x = array_ops.reshape(x, array_ops_stack.stack([-1, prod(shape(x)[1:])]))\n    return x",
    "docstring": "Turn a nD tensor into a 2D tensor with same 0th dimension. In other words, it flattens each data samples of a batch. Args: x: A tensor or variable. Returns: A tensor. Examples: Flattening a 3D tensor to 2D by collapsing the last dimension. >>> x_batch = tf.keras.backend.ones(shape=(2, 3, 4, 5)) >>> x_batch_flatten = batch_flatten(x_batch) >>> tf.keras.backend.int_shape(x_batch_flatten) (2, 60)",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:batch_flatten arg:x arguments arg Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_set_func_attr",
    "source_code": "def _set_func_attr(self, attr_name, func_name) -> None:\n    func = attr_value_pb2.NameAttrList(name=func_name)\n    self._set_attr(attr_name, attr_value_pb2.AttrValue(func=func))",
    "docstring": "Private method used to set a function attribute in the node_def.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_set_func_attr arg:self arg:attr_name arg:func_name arguments arg arg arg Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "register_graph_pattern",
    "source_code": "def register_graph_pattern(pattern: PatternExpr, extra_check: Callable[[Match], bool]=_return_true, *, pass_dict: _PassDictsType, prepend: bool=False) -> Callable[[Callable[..., Any]], Callable[..., Any]]:\n\n    def decorator(handler: Callable[..., Any]) -> Callable[..., Any]:\n        assert callable(handler)\n        GraphPatternEntry(pattern=pattern, extra_check=extra_check, handler=handler).register(pass_dict, prepend=prepend)\n        return handler\n    return decorator",
    "docstring": "Register a pattern that runs a function on the FX graph, allowing custom transformation code.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\pattern_matcher.py",
    "ast_data": "FunctionDef name:register_graph_pattern arg:pattern arg:extra_check arguments arg arg arg arg FunctionDef name:decorator arg:handler arguments arg Call Call Call Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "Deserializer",
    "source_code": "class Deserializer:\n\n    def __init__(self, stream_or_string, **options):\n        self.options = options\n        if isinstance(stream_or_string, str):\n            self.stream = StringIO(stream_or_string)\n        else:\n            self.stream = stream_or_string\n\n    def __iter__(self):\n        return self\n\n    def __next__(self):\n        raise NotImplementedError('subclasses of Deserializer must provide a __next__() method')",
    "docstring": "Abstract base deserializer class.",
    "type": "class",
    "file_path": "django\\django\\core\\serializers\\base.py",
    "ast_data": "ClassDef name:Deserializer FunctionDef name:__init__ arg:self arg:stream_or_string arguments arg arg arg Assign If Call Assign Call Assign FunctionDef name:__iter__ arg:self arguments arg Return return:yes FunctionDef name:__next__ arg:self arguments arg Raise Call"
  },
  {
    "library": "scipy",
    "name": "min_distance_rectangle",
    "source_code": "def min_distance_rectangle(self, other, p=2.0):\n    return minkowski_distance(0, np.maximum(0, np.maximum(self.mins - other.maxes, other.mins - self.maxes)), p)",
    "docstring": "Compute the minimum distance between points in the two hyperrectangles. Parameters ---------- other : hyperrectangle Input. p : float Input.",
    "type": "method",
    "file_path": "scipy\\scipy\\spatial\\_kdtree.py",
    "ast_data": "FunctionDef name:min_distance_rectangle arg:self arg:other arg:p arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "ObjMismatchError",
    "source_code": "class ObjMismatchError(Exception):\n    pass",
    "docstring": "Raised when an importer found a different object with the same name as the user-provided one.",
    "type": "class",
    "file_path": "pytorch\\torch\\package\\importer.py",
    "ast_data": "ClassDef name:ObjMismatchError"
  },
  {
    "library": "scipy",
    "name": "arclength",
    "source_code": "def arclength(eps, a, b, x, epsrel=0.01, limit=100):\n    return quad(lambda phi: np.sqrt(1 + fp(eps, a, b, x, phi) ** 2), 0, np.pi, epsrel=epsrel, limit=100)[0]",
    "docstring": "Compute Arc length of f. Note that the arc length of a function f from t0 to t1 is given by int_t0^t1 sqrt(1 + f'(t)^2) dt",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_precompute\\wright_bessel.py",
    "ast_data": "FunctionDef name:arclength arg:eps arg:a arg:b arg:x arg:epsrel arg:limit arguments arg arg arg arg arg arg Return return:yes Call arguments arg Call Call"
  },
  {
    "library": "cryptography",
    "name": "sign",
    "source_code": "@abc.abstractmethod\ndef sign(self, data: utils.Buffer, signature_algorithm: EllipticCurveSignatureAlgorithm) -> bytes:\n    pass",
    "docstring": "Signs the data",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ec.py",
    "ast_data": "FunctionDef name:sign arg:self arg:data arg:signature_algorithm arguments arg arg arg"
  },
  {
    "library": "tensorflow",
    "name": "back_prop",
    "source_code": "@property\ndef back_prop(self):\n    return self._back_prop",
    "docstring": "True iff backprop is enabled for this while loop.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:back_prop arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, full_shape, var_offset):\n    if not isinstance(full_shape, (list, tuple)):\n        raise TypeError('`full_shape` must be a sequence (like tuple or list) instead of ' + type(full_shape).__name__)\n    if not isinstance(var_offset, (list, tuple)):\n        raise TypeError('`var_offset` must be a sequence (like tuple or list) instead of ' + type(var_offset).__name__)\n    if len(var_offset) != len(full_shape):\n        raise ValueError('Expected equal length, but `var_offset` is of length {} while full_shape is of length {}.'.format(len(var_offset), len(full_shape)))\n    for offset, shape in zip(var_offset, full_shape):\n        if offset < 0 or offset >= shape:\n            raise ValueError('Expected 0 <= offset < shape but found offset={}, shape={} for var_offset={}, full_shape={}'.format(offset, shape, var_offset, full_shape))\n    self._full_shape = full_shape\n    self._var_offset = var_offset",
    "docstring": "Constructor. Args: full_shape: Tuple or list of indicating the full combined shape of the partitioned variables. var_offset: Tuple or list of specifying offset of this partition with respect to the full variable for each dimension. Raises: TypeError: If or is not a sequence. ValueError: If or differ in length. If exceeds in any dimension.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variable_scope.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:full_shape arg:var_offset arguments arg arg arg If Call Raise Call Call If Call Raise Call Call If Compare Call Call Raise Call Call Call Call For Call If BoolOp Compare Compare Raise Call Call Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "set_usetex",
    "source_code": "@_docstring.kwarg_doc('bool, default: :rc:`text.usetex`')\ndef set_usetex(self, usetex):\n    self._usetex = bool(mpl._val_or_rc(usetex, 'text.usetex'))\n    self.stale = True",
    "docstring": "Parameters ---------- usetex : bool or None Whether to render using TeX, `text.usetex`.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:set_usetex arg:self arg:usetex arguments arg arg Assign Call Call Assign Call"
  },
  {
    "library": "kornia",
    "name": "_get_lazy_distance_matrix",
    "source_code": "def _get_lazy_distance_matrix(desc1: Tensor, desc2: Tensor, dm_: Optional[Tensor]=None) -> Tensor:\n    if dm_ is None:\n        dm = _cdist(desc1, desc2)\n    else:\n        KORNIA_CHECK_DM_DESC(desc1, desc2, dm_)\n        dm = dm_\n    return dm",
    "docstring": "Check validity of provided distance matrix, or calculates L2-distance matrix if dm is not provided. Args: desc1: Batch of descriptors of a shape :math:. desc2: Batch of descriptors of a shape :math:. dm_: Tensor containing the distances from each descriptor in desc1 to each descriptor in desc2, shape of :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\matching.py",
    "ast_data": "FunctionDef name:_get_lazy_distance_matrix arg:desc1 arg:desc2 arg:dm_ arguments arg arg arg If Compare Assign Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "partitioned_dim_sizes",
    "source_code": "@property\ndef partitioned_dim_sizes(self):\n    return self._partitioned_dim_sizes",
    "docstring": "The partitioned dimension sizes for this shape. Returns: A of 0-D or 1-D integer .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor_shape.py",
    "ast_data": "FunctionDef name:partitioned_dim_sizes arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "get_limit_choices_to",
    "source_code": "def get_limit_choices_to(self):\n    if callable(self.remote_field.limit_choices_to):\n        return self.remote_field.limit_choices_to()\n    return self.remote_field.limit_choices_to",
    "docstring": "Return `` for this model field. If it is a callable, it will be invoked and the result will be returned.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\related.py",
    "ast_data": "FunctionDef name:get_limit_choices_to arg:self arguments arg If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_generic_mapping",
    "source_code": "def is_generic_mapping(tp):\n    return tp not in (collections.abc.Mapping, typing.Mapping) and getattr(tp, '__origin__', None) in (collections.abc.Mapping, typing.Mapping)",
    "docstring": "Returns true if is a parameterized typing.Mapping value.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\type_annotations.py",
    "ast_data": "FunctionDef name:is_generic_mapping arg:tp arguments arg Return return:yes BoolOp Compare Compare Call"
  },
  {
    "library": "matplotlib",
    "name": "get_major_formatter",
    "source_code": "def get_major_formatter(self):\n    return self.major.formatter",
    "docstring": "Get the formatter of the major ticker.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:get_major_formatter arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "pandasSQL_builder",
    "source_code": "def pandasSQL_builder(con, schema: str | None=None, need_transaction: bool=False) -> PandasSQL:\n    import sqlite3\n    if isinstance(con, sqlite3.Connection) or con is None:\n        return SQLiteDatabase(con)\n    sqlalchemy = import_optional_dependency('sqlalchemy', errors='ignore')\n    if isinstance(con, str) and sqlalchemy is None:\n        raise ImportError('Using URI string without sqlalchemy installed.')\n    if sqlalchemy is not None and isinstance(con, (str, sqlalchemy.engine.Connectable)):\n        return SQLDatabase(con, schema, need_transaction)\n    adbc = import_optional_dependency('adbc_driver_manager.dbapi', errors='ignore')\n    if adbc and isinstance(con, adbc.Connection):\n        return ADBCDatabase(con)\n    warnings.warn('pandas only supports SQLAlchemy connectable (engine/connection) or database string URI or sqlite3 DBAPI2 connection. Other DBAPI2 objects are not tested. Please consider using SQLAlchemy.', UserWarning, stacklevel=find_stack_level())\n    return SQLiteDatabase(con)",
    "docstring": "Convenience function to return the correct PandasSQL subclass based on the provided parameters. Also creates a sqlalchemy connection and transaction if necessary.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\sql.py",
    "ast_data": "FunctionDef name:pandasSQL_builder arg:con arg:schema arg:need_transaction arguments arg arg arg If BoolOp Call Compare Return return:yes Call Assign Call If BoolOp Call Compare Raise Call If BoolOp Compare Call Return return:yes Call Assign Call If BoolOp Call Return return:yes Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_tag_callable",
    "source_code": "def _tag_callable(loss):\n    if callable(loss):\n        with autocast_variable.enable_auto_cast_variables(None):\n            loss = loss()\n    if loss is None:\n        return None\n    if not tensor_util.is_tf_type(loss):\n        loss = tensor_conversion.convert_to_tensor_v2_with_dispatch(loss, dtype=backend.floatx())\n    loss._unconditional_loss = True\n    return loss",
    "docstring": "Tags callable loss tensor as .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:_tag_callable arg:loss arguments arg If Call With Call Assign Call If Compare Return return:no If Call Assign Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "forward",
    "source_code": "def forward(self, *args, **kwargs):\n    return self.module(*args, **kwargs)",
    "docstring": "Forward pass.",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\swa_utils.py",
    "ast_data": "FunctionDef name:forward arg:self arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "vorbis_window",
    "source_code": "@tf_export('signal.vorbis_window')\n@dispatch.add_dispatch_support\ndef vorbis_window(window_length, dtype=dtypes.float32, name=None):\n    with ops.name_scope(name, 'vorbis_window'):\n        window_length = _check_params(window_length, dtype)\n        arg = math_ops.cast(math_ops.range(window_length), dtype=dtype)\n        window = math_ops.sin(np.pi / 2.0 * math_ops.pow(math_ops.sin(np.pi / math_ops.cast(window_length, dtype=dtype) * (arg + 0.5)), 2.0))\n    return window",
    "docstring": "Generate a [Vorbis power complementary window][vorbis]. Args: window_length: A scalar indicating the window length to generate. dtype: The data type to produce. Must be a floating point type. name: An optional name for the operation. Returns: A of shape of type . [vorbis]:",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\signal\\window_ops.py",
    "ast_data": "FunctionDef name:vorbis_window arg:window_length arg:dtype arg:name arguments arg arg arg With Call Assign Call Assign Call Call Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "to_tensors",
    "source_code": "def to_tensors(self, value: Any) -> List[core.Tensor]:\n    del value\n    return []",
    "docstring": "Breaks down a value of this type into Tensors. For a TraceType instance, the number of tensors generated for corresponding value should be constant. Args: value: A value belonging to this TraceType Returns: List of Tensors.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\types\\trace.py",
    "ast_data": "FunctionDef name:to_tensors arg:self arg:value arguments arg arg Return return:no"
  },
  {
    "library": "pytorch",
    "name": "name",
    "source_code": "@property\ndef name(self) -> str:\n    return 'c10d'",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\c10d_rendezvous_backend.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__nonzero__",
    "source_code": "def __nonzero__(self):\n    self._disallow_bool_casting()",
    "docstring": "Dummy method to prevent a tensor from being used as a Python . This is the Python 2.x counterpart to above. Raises: .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor.py",
    "ast_data": "FunctionDef name:__nonzero__ arg:self arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "validate_outputs",
    "source_code": "def validate_outputs(self, _, outputs, args, output_metadata):\n    op = ops.get('validate_outputs')\n    proxy_args = pytree.tree_map(self.to_proxy, (outputs, *args))\n    new_proxy_outputs = self.fx_tracer.create_proxy('call_function', op, args=proxy_args, kwargs={})\n    assert len(output_metadata) == len(outputs)\n    self.bind_objects_to_proxies(outputs, new_proxy_outputs)\n    return outputs",
    "docstring": "Proxies a call to ops.validate_outputs(outputs, *args) into the graph",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\compiled_autograd.py",
    "ast_data": "FunctionDef name:validate_outputs arg:self arg:_ arg:outputs arg:args arg:output_metadata arguments arg arg arg arg arg Assign Call Assign Call Assign Call Compare Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "linear_spine",
    "source_code": "@classmethod\ndef linear_spine(cls, axes, spine_type, **kwargs):\n    if spine_type == 'left':\n        path = mpath.Path([(0.0, 0.999), (0.0, 0.999)])\n    elif spine_type == 'right':\n        path = mpath.Path([(1.0, 0.999), (1.0, 0.999)])\n    elif spine_type == 'bottom':\n        path = mpath.Path([(0.999, 0.0), (0.999, 0.0)])\n    elif spine_type == 'top':\n        path = mpath.Path([(0.999, 1.0), (0.999, 1.0)])\n    else:\n        raise ValueError('unable to make path for spine \"%s\"' % spine_type)\n    result = cls(axes, spine_type, path, **kwargs)\n    result.set_visible(mpl.rcParams[f'axes.spines.{spine_type}'])\n    return result",
    "docstring": "Create and return a linear .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\spines.py",
    "ast_data": "FunctionDef name:linear_spine arg:cls arg:axes arg:spine_type arguments arg arg arg arg If Compare Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call Raise Call Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "unpickle_lazyobject",
    "source_code": "def unpickle_lazyobject(wrapped):\n    return wrapped",
    "docstring": "Used to unpickle lazy objects. Just return its argument, which will be the wrapped object.",
    "type": "function",
    "file_path": "django\\django\\utils\\functional.py",
    "ast_data": "FunctionDef name:unpickle_lazyobject arg:wrapped arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "get_database_version",
    "source_code": "def get_database_version(self):\n    return divmod(self.pg_version, 10000)",
    "docstring": "Return a tuple of the database's version. E.g. for pg_version 120004, return (12, 4).",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\postgresql\\base.py",
    "ast_data": "FunctionDef name:get_database_version arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_insert_assert_async",
    "source_code": "def _insert_assert_async(self, last_node, op, lower, upper, assert_msg):\n    self.counter += 1\n    graph = last_node.graph\n    with graph.inserting_after(last_node):\n        cmp = graph.call_function(op, (lower, upper), {})\n    with graph.inserting_after(cmp):\n        cmp_tensor = graph.call_function(torch.ops.aten.scalar_tensor.default, (cmp,), {})\n    with graph.inserting_after(cmp_tensor):\n        assert_async = graph.call_function(torch.ops.aten._assert_async.msg, (cmp_tensor, assert_msg), {})\n    return assert_async",
    "docstring": "Inserts assert_async call_function nodes in the graph. This function is called **during** the interpreter-based pass.",
    "type": "method",
    "file_path": "pytorch\\torch\\_export\\passes\\add_runtime_assertions_for_constraints_pass.py",
    "ast_data": "FunctionDef name:_insert_assert_async arg:self arg:last_node arg:op arg:lower arg:upper arg:assert_msg arguments arg arg arg arg arg arg Assign With Call Assign Call With Call Assign Call With Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_FusedBatchNormGradGrad",
    "source_code": "@ops.RegisterGradient('FusedBatchNormGrad')\ndef _FusedBatchNormGradGrad(op: ops.Operation, *grad):\n    data_format = op.get_attr('data_format')\n    epsilon = op.get_attr('epsilon')\n    is_training = op.get_attr('is_training')\n    grad_y = op.inputs[0]\n    x = op.inputs[1]\n    scale = op.inputs[2]\n    pop_mean = op.inputs[3]\n    pop_var = op.inputs[4]\n    grad_grad_x = grad[0]\n    grad_grad_scale = grad[1]\n    grad_grad_offset = grad[2]\n    with backprop.GradientTape() as tape:\n        tape.watch(grad_y)\n        tape.watch(x)\n        tape.watch(scale)\n        grad_x, grad_scale, grad_offset = _BatchNormGrad(grad_y, x, scale, pop_mean, pop_var, epsilon, data_format, is_training)\n        grad_initial = [grad_grad_x, grad_grad_scale, grad_grad_offset]\n    grad_grad_y, grad_x, grad_scale = tape.gradient([grad_x, grad_scale, grad_offset], [grad_y, x, scale], grad_initial)\n    return (grad_grad_y, grad_x, grad_scale, None, None)",
    "docstring": "Returns the gradients for the 3 inputs of FusedBatchNormGrad. Args: op: The FusedBatchNormGradOp for which we need to compute gradients. *grad: An argument list for tensors of gradients wrt the outputs with grad[0] as grad_grad_x, grad[1] as grad_grad_scale, grad[2] as grad_grad_offset. Returns: A tuple (grad_grad_y, grad_x, grad_scale, None, None), where grad_grad_y is the gradient for grad_y, grad_x the gradient for x, grad_scale the gradient for scale.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_fused_batch_norm_grad.py",
    "ast_data": "FunctionDef name:_FusedBatchNormGradGrad arg:op arguments arg arg Assign Call Assign Call Assign Call Assign Assign Assign Assign Assign Assign Assign Assign With Call Call Call Call Assign Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "index",
    "source_code": "@cherrypy.expose\ndef index(self):\n    return \"<html>\\n<body>Try some <a href='%s?a=7'>other</a> path,\\nor a <a href='%s?n=14'>default</a> path.<br />\\nOr, just look at the pretty picture:<br />\\n<img src='%s' />\\n</body></html>\" % (url('other'), url('else'), url('files/made_with_cherrypy_small.png'))",
    "docstring": "Render HTML-template at the root path of the web-app.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\scaffold\\__init__.py",
    "ast_data": "FunctionDef name:index arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "kornia",
    "name": "IntegralImage",
    "source_code": "class IntegralImage(Module):\n\n    def forward(self, input: Tensor) -> Tensor:\n        return integral_image(input)",
    "docstring": "Calculates integral of the input image tensor. This particular version sums over the last two dimensions. Args: image: the input image tensor with shape :math:. Returns: Integral tensor for the input image tensor with shape :math:. Shape: - Input: :math: - Output: :math: Examples: >>> input = torch.ones(1, 5, 5) >>> output = IntegralImage()(input) >>> output tensor([[[ 1., 2., 3., 4., 5.], [ 2., 4., 6., 8., 10.], [ 3., 6., 9., 12., 15.], [ 4., 8., 12., 16., 20.], [ 5., 10., 15., 20., 25.]]])",
    "type": "class",
    "file_path": "kornia\\kornia\\enhance\\integral.py",
    "ast_data": "ClassDef name:IntegralImage FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "num_graph_execution_traces",
    "source_code": "def num_graph_execution_traces(self):\n    return len(self._graph_execution_trace_digests)",
    "docstring": "Get the number of graph execution traces read so far.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py",
    "ast_data": "FunctionDef name:num_graph_execution_traces arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "LabToRgb",
    "source_code": "class LabToRgb(Module):\n    ONNX_DEFAULT_INPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n    ONNX_DEFAULT_OUTPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n\n    def forward(self, image: torch.Tensor, clip: bool=True) -> torch.Tensor:\n        return lab_to_rgb(image, clip)",
    "docstring": "Convert an image from Lab to RGB. Returns: RGB version of the image. Range may not be in :math:. Shape: - image: :math: - output: :math: Examples: >>> input = torch.rand(2, 3, 4, 5) >>> rgb = LabToRgb() >>> output = rgb(input) # 2x3x4x5 References: [1] [2] [3]",
    "type": "class",
    "file_path": "kornia\\kornia\\color\\lab.py",
    "ast_data": "ClassDef name:LabToRgb FunctionDef name:forward arg:self arg:image arg:clip arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "get_result",
    "source_code": "def get_result(self, x, flag=_ECONVERGED):\n    return (x, self.function_calls, self.iterations, flag)",
    "docstring": "Package the result and statistics into a tuple.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_zeros_py.py",
    "ast_data": "FunctionDef name:get_result arg:self arg:x arg:flag arguments arg arg arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_create_nodes_and_group",
    "source_code": "def _create_nodes_and_group(self, key: str) -> Node:\n    assert self._handle is not None\n    paths = key.split('/')\n    path = '/'\n    for p in paths:\n        if not len(p):\n            continue\n        new_path = path\n        if not path.endswith('/'):\n            new_path += '/'\n        new_path += p\n        group = self.get_node(new_path)\n        if group is None:\n            group = self._handle.create_group(path, p)\n        path = new_path\n    return group",
    "docstring": "Create nodes from key and return group name.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:_create_nodes_and_group arg:self arg:key arguments arg arg Compare Assign Call Assign For If Call Assign If Call Assign Call If Compare Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_validate_namespace_whitelist",
    "source_code": "def _validate_namespace_whitelist(namespace_whitelist):\n    if namespace_whitelist is None:\n        return None\n    if not isinstance(namespace_whitelist, list):\n        raise TypeError(f'`namespace_whitelist` must be a list of strings. Got: {namespace_whitelist} with type {type(namespace_whitelist)}.')\n    processed = []\n    for namespace in namespace_whitelist:\n        if not isinstance(namespace, str):\n            raise ValueError(f'Whitelisted namespace must be a string. Got: {namespace} of type {type(namespace)}.')\n        processed.append(compat.as_str(namespace))\n    return processed",
    "docstring": "Validates namespace whitelist argument.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\save_options.py",
    "ast_data": "FunctionDef name:_validate_namespace_whitelist arg:namespace_whitelist arguments arg If Compare Return return:no If Call Raise Call Call Assign For If Call Raise Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, conv_result, input_var, c_out, kernel, padding, stride, dilation, matching_constraint_vars):\n    self.conv_result = conv_result\n    self.input_var = input_var\n    self.c_out = c_out\n    self.kernel = kernel\n    self.padding = padding\n    self.stride = stride\n    self.dilation = dilation\n    self.matching_constraint = matching_constraint_vars",
    "docstring": ":param conv_result: the convolution result :param input_var: input to convolution :param c_out: output chanel type :param kernel: kernel tuple",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:conv_result arg:input_var arg:c_out arg:kernel arg:padding arg:stride arg:dilation arg:matching_constraint_vars arguments arg arg arg arg arg arg arg arg arg Assign Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "authlib",
    "name": "_append_params",
    "source_code": "def _append_params(oauth_params, params):\n    merged = list(params)\n    merged.extend(oauth_params)\n    merged.sort(key=lambda i: i[0].startswith('oauth_'))\n    return merged",
    "docstring": "Append OAuth params to an existing set of parameters. Both params and oauth_params is must be lists of 2-tuples. Per _ and _ of the spec. .. _: .. _:",
    "type": "function",
    "file_path": "authlib\\authlib\\oauth1\\rfc5849\\parameters.py",
    "ast_data": "FunctionDef name:_append_params arg:oauth_params arg:params arguments arg arg Assign Call Call Call arguments arg Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "python_code",
    "source_code": "@compatibility(is_backward_compatible=True)\ndef python_code(self, root_module: str, *, verbose: bool=False, include_stride: bool=False, include_device: bool=False, colored: bool=False) -> PythonCode:\n    namespace = _Namespace()\n\n    def node_repr(n: Node):\n        return namespace.create_name(n.name, n)\n\n    @contextmanager\n    def override_node_repr(graph: Graph):\n        orig_repr_fns = {}\n        for node in graph.nodes:\n            orig_repr_fns[node] = node._repr_fn\n            node._repr_fn = node_repr\n        try:\n            yield None\n        finally:\n            for node in graph.nodes:\n                node._repr_fn = orig_repr_fns[node]\n    with override_node_repr(self):\n        return self._python_code(root_module, namespace, verbose=verbose, include_stride=include_stride, include_device=include_device, colored=colored)",
    "docstring": "Turn this `src` -> the objects that they reference.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\graph.py",
    "ast_data": "FunctionDef name:python_code arg:self arg:root_module arguments arg arg arg arg arg arg Assign Call FunctionDef name:node_repr arg:n arguments arg Return return:yes Call FunctionDef name:override_node_repr arg:graph arguments arg Assign For Assign Assign Try For Assign With Call Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "endswith",
    "source_code": "def endswith(self, suffix, start=0, end=None):\n    return endswith(self, suffix, start, end)",
    "docstring": "Returns a boolean array which is where the string element in ends with , otherwise . See Also -------- char.endswith",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:endswith arg:self arg:suffix arg:start arg:end arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "sign",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef sign(x):\n    return math_ops.sign(x)",
    "docstring": "Element-wise sign. Args: x: Tensor or variable. Returns: A tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:sign arg:x arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_check_list_per_page",
    "source_code": "def _check_list_per_page(self, obj):\n    if not isinstance(obj.list_per_page, int):\n        return must_be('an integer', option='list_per_page', obj=obj, id='admin.E118')\n    else:\n        return []",
    "docstring": "Check that list_per_page is an integer.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\checks.py",
    "ast_data": "FunctionDef name:_check_list_per_page arg:self arg:obj arguments arg arg If Call Return return:yes Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "get_tensor",
    "source_code": "def get_tensor(self):\n    return load_tensor_from_event_file(self.file_path)",
    "docstring": "Get tensor from the dump () file. Returns: The tensor loaded from the dump () file.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:get_tensor arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "copy_osx_pyobjc",
    "source_code": "def copy_osx_pyobjc(text):\n    text = _stringifyText(text)\n    newStr = Foundation.NSString.stringWithString_(text).nsstring()\n    newData = newStr.dataUsingEncoding_(Foundation.NSUTF8StringEncoding)\n    board = AppKit.NSPasteboard.generalPasteboard()\n    board.declareTypes_owner_([AppKit.NSStringPboardType], None)\n    board.setData_forType_(newData, AppKit.NSStringPboardType)",
    "docstring": "Copy string argument to clipboard",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\clipboard\\__init__.py",
    "ast_data": "FunctionDef name:copy_osx_pyobjc arg:text arguments arg Assign Call Assign Call Call Assign Call Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "bulk_restore",
    "source_code": "def bulk_restore(self, filename_tensor, saveables, preferred_shard, restore_sequentially):\n    del restore_sequentially\n    all_tensors = []\n    for saveable in saveables:\n        if saveable.device:\n            device = saveable_object_util.set_cpu0(saveable.device)\n        else:\n            device = None\n        with ops.device(device):\n            all_tensors.extend(self.restore_op(filename_tensor, saveable, preferred_shard))\n    return all_tensors",
    "docstring": "Restore all tensors contained in saveables. By default, this issues separate calls to for each saveable. Subclasses may override to load multiple saveables in a single call. Args: filename_tensor: String Tensor. saveables: List of BaseSaverBuilder.SaveableObject objects. preferred_shard: Int. Shard to open first when loading a sharded file. restore_sequentially: Unused. Bool. If true, each restore is sequential. Returns: A list of Tensors resulting from reading 'saveable' from 'filename'.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\saver.py",
    "ast_data": "FunctionDef name:bulk_restore arg:self arg:filename_tensor arg:saveables arg:preferred_shard arg:restore_sequentially arguments arg arg arg arg arg Assign For If Assign Call Assign With Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "FixedWidthFieldParser",
    "source_code": "class FixedWidthFieldParser(PythonParser):\n\n    def __init__(self, f: ReadCsvBuffer[str], **kwds) -> None:\n        self.colspecs = kwds.pop('colspecs')\n        self.infer_nrows = kwds.pop('infer_nrows')\n        PythonParser.__init__(self, f, **kwds)\n\n    def _make_reader(self, f: IO[str] | ReadCsvBuffer[str]) -> FixedWidthReader:\n        return FixedWidthReader(f, self.colspecs, self.delimiter, self.comment, self.skiprows, self.infer_nrows)\n\n    def _remove_empty_lines(self, lines: list[list[T]]) -> list[list[T]]:\n        return [line for line in lines if any((not isinstance(e, str) or e.strip() for e in line))]",
    "docstring": "Specialization that Converts fixed-width fields into DataFrames. See PythonParser for details.",
    "type": "class",
    "file_path": "pandas\\pandas\\io\\parsers\\python_parser.py",
    "ast_data": "ClassDef name:FixedWidthFieldParser FunctionDef name:__init__ arg:self arg:f arguments arg arg arg Assign Call Assign Call Call FunctionDef name:_make_reader arg:self arg:f arguments arg arg Return return:yes Call FunctionDef name:_remove_empty_lines arg:self arg:lines arguments arg arg Return return:yes Call BoolOp Call Call"
  },
  {
    "library": "kornia",
    "name": "AddWeighted",
    "source_code": "class AddWeighted(Module):\n\n    def __init__(self, alpha: Union[float, Tensor], beta: Union[float, Tensor], gamma: Union[float, Tensor]) -> None:\n        super().__init__()\n        self.alpha = alpha\n        self.beta = beta\n        self.gamma = gamma\n\n    def forward(self, src1: Tensor, src2: Tensor) -> Tensor:\n        return add_weighted(src1, self.alpha, src2, self.beta, self.gamma)",
    "docstring": "Calculate the weighted sum of two Tensors. The function calculates the weighted sum of two Tensors as follows: .. math:: out = src1 * alpha + src2 * beta + gamma Args: alpha: weight of the src1 elements as Union[float, Tensor]. beta: weight of the src2 elements as Union[float, Tensor]. gamma: scalar added to each sum as Union[float, Tensor]. Shape: - Input1: Tensor with an arbitrary shape, equal to shape of Input2. - Input2: Tensor with an arbitrary shape, equal to shape of Input1. - Output: Weighted tensor with shape equal to src1 and src2 shapes. Example: >>> input1 = torch.rand(1, 1, 5, 5) >>> input2 = torch.rand(1, 1, 5, 5) >>> output = AddWeighted(0.5, 0.5, 1.0)(input1, input2) >>> output.shape torch.Size([1, 1, 5, 5]) Notes: Tensor alpha/beta/gamma have to be with shape broadcastable to src1 and src2 shapes.",
    "type": "class",
    "file_path": "kornia\\kornia\\enhance\\core.py",
    "ast_data": "ClassDef name:AddWeighted FunctionDef name:__init__ arg:self arg:alpha arg:beta arg:gamma arguments arg arg arg arg Call Call Assign Assign Assign FunctionDef name:forward arg:self arg:src1 arg:src2 arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "gradient_proba",
    "source_code": "def gradient_proba(self, y_true, raw_prediction, sample_weight=None, gradient_out=None, proba_out=None, n_threads=1):\n    if gradient_out is None:\n        if proba_out is None:\n            gradient_out = np.empty_like(raw_prediction)\n            proba_out = np.empty_like(raw_prediction)\n        else:\n            gradient_out = np.empty_like(proba_out)\n    elif proba_out is None:\n        proba_out = np.empty_like(gradient_out)\n    self.closs.gradient_proba(y_true=y_true, raw_prediction=raw_prediction, sample_weight=sample_weight, gradient_out=gradient_out, proba_out=proba_out, n_threads=n_threads)\n    return (gradient_out, proba_out)",
    "docstring": "Compute gradient and class probabilities fow raw_prediction. Parameters ---------- y_true : C-contiguous array of shape (n_samples,) Observed, true target values. raw_prediction : array of shape (n_samples, n_classes) Raw prediction values (in link space). sample_weight : None or C-contiguous array of shape (n_samples,) Sample weights. gradient_out : None or array of shape (n_samples, n_classes) A location into which the gradient is stored. If None, a new array might be created. proba_out : None or array of shape (n_samples, n_classes) A location into which the class probabilities are stored. If None, a new array might be created. n_threads : int, default=1 Might use openmp thread parallelism. Returns ------- gradient : array of shape (n_samples, n_classes) Element-wise gradients. proba : array of shape (n_samples, n_classes) Element-wise class probabilities.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\_loss\\loss.py",
    "ast_data": "FunctionDef name:gradient_proba arg:self arg:y_true arg:raw_prediction arg:sample_weight arg:gradient_out arg:proba_out arg:n_threads arguments arg arg arg arg arg arg arg If Compare If Compare Assign Call Assign Call Assign Call If Compare Assign Call Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "AssertionSession",
    "source_code": "class AssertionSession(AssertionClient, Session):\n    token_auth_class = AssertionAuth\n    JWT_BEARER_GRANT_TYPE = JWTBearerGrant.GRANT_TYPE\n    ASSERTION_METHODS = {JWT_BEARER_GRANT_TYPE: JWTBearerGrant.sign}\n    DEFAULT_GRANT_TYPE = JWT_BEARER_GRANT_TYPE\n\n    def __init__(self, token_endpoint, issuer, subject, audience=None, grant_type=None, claims=None, token_placement='header', scope=None, default_timeout=None, leeway=60, **kwargs):\n        Session.__init__(self)\n        self.default_timeout = default_timeout\n        update_session_configure(self, kwargs)\n        AssertionClient.__init__(self, session=self, token_endpoint=token_endpoint, issuer=issuer, subject=subject, audience=audience, grant_type=grant_type, claims=claims, token_placement=token_placement, scope=scope, leeway=leeway, **kwargs)\n\n    def request(self, method, url, withhold_token=False, auth=None, **kwargs):\n        if self.default_timeout:\n            kwargs.setdefault('timeout', self.default_timeout)\n        if not withhold_token and auth is None:\n            auth = self.token_auth\n        return super().request(method, url, auth=auth, **kwargs)",
    "docstring": "Constructs a new Assertion Framework for OAuth 2.0 Authorization Grants per RFC7521_. .. _RFC7521:",
    "type": "class",
    "file_path": "authlib\\authlib\\integrations\\requests_client\\assertion_session.py",
    "ast_data": "ClassDef name:AssertionSession Assign Assign Assign Assign FunctionDef name:__init__ arg:self arg:token_endpoint arg:issuer arg:subject arg:audience arg:grant_type arg:claims arg:token_placement arg:scope arg:default_timeout arg:leeway arguments arg arg arg arg arg arg arg arg arg arg arg arg Call Assign Call Call FunctionDef name:request arg:self arg:method arg:url arg:withhold_token arg:auth arguments arg arg arg arg arg arg If Call If BoolOp Compare Assign Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "SoftplusTransform",
    "source_code": "class SoftplusTransform(Transform):\n    domain = constraints.real\n    codomain = constraints.positive\n    bijective = True\n    sign = +1\n\n    def __eq__(self, other):\n        return isinstance(other, SoftplusTransform)\n\n    def _call(self, x):\n        return softplus(x)\n\n    def _inverse(self, y):\n        return (-y).expm1().neg().log() + y\n\n    def log_abs_det_jacobian(self, x, y):\n        return -softplus(-x)",
    "docstring": "Transform via the mapping :math:. The implementation reverts to the linear function when :math:.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributions\\transforms.py",
    "ast_data": "ClassDef name:SoftplusTransform Assign Assign Assign Assign FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes Call FunctionDef name:_call arg:self arg:x arguments arg arg Return return:yes Call FunctionDef name:_inverse arg:self arg:y arguments arg arg Return return:yes Call Call Call FunctionDef name:log_abs_det_jacobian arg:self arg:x arg:y arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_get_param_to_param_key",
    "source_code": "def _get_param_to_param_key(optim: torch.optim.Optimizer, model: Optional[nn.Module]=None, is_named_optimizer: bool=False, param_to_fqns: Optional[dict[nn.Parameter, list[str]]]=None, flat_param_to_fqn: Optional[dict[FlatParameter, str]]=None) -> dict[nn.Parameter, Union[int, str]]:\n    param_id_to_param = _get_param_key_to_param(optim, model, is_named_optimizer, param_to_fqns, flat_param_to_fqn)\n    return {param: param_id for param_id, param in param_id_to_param.items()}",
    "docstring": "Constructs the inverse mapping of :func:. This API only supports the case where is a regular optimizer, not NamedOptimizer. So the parameter keys will be parameter ids.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_optim_utils.py",
    "ast_data": "FunctionDef name:_get_param_to_param_key arg:optim arg:model arg:is_named_optimizer arg:param_to_fqns arg:flat_param_to_fqn arguments arg arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "to_unicode",
    "source_code": "def to_unicode(text: str | bytes, encoding: str | None=None, errors: str='strict') -> str:\n    if isinstance(text, str):\n        return text\n    if not isinstance(text, (bytes, str)):\n        raise TypeError(f'to_unicode must receive a bytes or str object, got {type(text).__name__}')\n    if encoding is None:\n        encoding = 'utf-8'\n    return text.decode(encoding, errors)",
    "docstring": "Return the unicode representation of a bytes object `` is already an unicode object, return it as-is.",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\python.py",
    "ast_data": "FunctionDef name:to_unicode arg:text arg:encoding arg:errors arguments arg arg arg If Call Return return:yes If Call Raise Call Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "__repr__",
    "source_code": "def __repr__(self):\n    return '%s.%s(%s)' % (self.__module__, self.__class__.__name__, dict.__repr__(self))",
    "docstring": "Render representation of a :class: instance.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\reprconf.py",
    "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_flush",
    "source_code": "def _flush(self):\n    if self.compressobj is not None:\n        compressed = self.compressobj.flush()\n        self.file.write(compressed)\n        self.compressobj = None",
    "docstring": "Flush the compression object.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py",
    "ast_data": "FunctionDef name:_flush arg:self arguments arg If Compare Assign Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "name",
    "source_code": "@property\ndef name(self):\n    return self._name",
    "docstring": "Name prepended to all ops created by this .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_fontname",
    "source_code": "def get_fontname(self):\n    return self._fontproperties.get_name()",
    "docstring": "Return the font name as a string. See Also -------- .font_manager.FontProperties.get_name",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:get_fontname arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "__bit_generator_ctor",
    "source_code": "def __bit_generator_ctor(bit_generator: str | type[BitGenerator]='MT19937'):\n    if isinstance(bit_generator, type):\n        bit_gen_class = bit_generator\n    elif bit_generator in BitGenerators:\n        bit_gen_class = BitGenerators[bit_generator]\n    else:\n        raise ValueError(str(bit_generator) + ' is not a known BitGenerator module.')\n    return bit_gen_class()",
    "docstring": "Pickling helper function that returns a bit generator object Parameters ---------- bit_generator : type[BitGenerator] or str BitGenerator class or string containing the name of the BitGenerator Returns ------- BitGenerator BitGenerator instance",
    "type": "function",
    "file_path": "numpy\\numpy\\random\\_pickle.py",
    "ast_data": "FunctionDef name:__bit_generator_ctor arg:bit_generator arguments arg If Call Assign If Compare Assign Raise Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "score",
    "source_code": "def score(self, X, y, sample_weight=None, **score_params):\n    _raise_for_params(score_params, self, 'score')\n    scoring = self._get_scorer()\n    if _routing_enabled():\n        routed_params = process_routing(self, 'score', sample_weight=sample_weight, **score_params)\n    else:\n        routed_params = Bunch()\n        routed_params.scorer = Bunch(score={})\n        if sample_weight is not None:\n            routed_params.scorer.score['sample_weight'] = sample_weight\n    return scoring(self, X, y, **routed_params.scorer.score)",
    "docstring": "Score using the option on the given test data and labels. Parameters ---------- X : array-like of shape (n_samples, n_features) Test samples. y : array-like of shape (n_samples,) True labels for X. sample_weight : array-like of shape (n_samples,), default=None Sample weights. **score_params : dict Parameters to pass to the method of the underlying scorer. .. versionadded:: 1.4 Returns ------- score : float Score of self.predict(X) w.r.t. y.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_logistic.py",
    "ast_data": "FunctionDef name:score arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg arg Call Assign Call If Call Assign Call Assign Call Assign Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "RevocationEndpoint",
    "source_code": "class RevocationEndpoint(_RevocationEndpoint):\n\n    def query_token(self, token, token_type_hint):\n        token_model = self.server.token_model\n        if token_type_hint == 'access_token':\n            rv = _query_access_token(token_model, token)\n        elif token_type_hint == 'refresh_token':\n            rv = _query_refresh_token(token_model, token)\n        else:\n            rv = _query_access_token(token_model, token)\n            if not rv:\n                rv = _query_refresh_token(token_model, token)\n        return rv\n\n    def revoke_token(self, token, request):\n        token.revoked = True\n        token.save()",
    "docstring": "The revocation endpoint for OAuth authorization servers allows clients to notify the authorization server that a previously obtained refresh or access token is no longer needed. Register it into authorization server, and create token endpoint response for token revocation:: from django.views.decorators.http import require_http_methods # see register into authorization server instance server.register_endpoint(RevocationEndpoint) @require_http_methods([\"POST\"]) def revoke_token(request): return server.create_endpoint_response( RevocationEndpoint.ENDPOINT_NAME, request )",
    "type": "class",
    "file_path": "authlib\\authlib\\integrations\\django_oauth2\\endpoints.py",
    "ast_data": "ClassDef name:RevocationEndpoint FunctionDef name:query_token arg:self arg:token arg:token_type_hint arguments arg arg arg Assign If Compare Assign Call If Compare Assign Call Assign Call If Assign Call Return return:yes FunctionDef name:revoke_token arg:self arg:token arg:request arguments arg arg arg Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_make_callable_signature",
    "source_code": "def _make_callable_signature(obj):\n    if inspect.isclass(obj) or inspect.isfunction(obj):\n        if obj.__name__ == '<lambda>':\n            return _make_lambda_name(obj)\n        return obj.__name__\n    elif inspect.ismethod(obj):\n        obj_self = obj.__self__\n        if isinstance(obj_self, type):\n            cls_name = obj_self.__name__\n        else:\n            cls_name = obj_self.__class__.__name__\n        return f'{cls_name}.{obj.__name__}'\n    else:\n        raise TypeError(f'Only class/function/methods are valid inputs, got {type(obj)}')",
    "docstring": "Generate signature for function/method.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\core\\function\\capture\\free_vars_detect.py",
    "ast_data": "FunctionDef name:_make_callable_signature arg:obj arguments arg If BoolOp Call Call If Compare Return return:yes Call Return return:yes If Call Assign If Call Assign Assign Return return:yes Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "CompiledAOTI",
    "source_code": "@dataclasses.dataclass\nclass CompiledAOTI(OutputCode):\n    filename: Union[str, list[str]]\n\n    def __call__(self, inputs: Sequence[Any]) -> Any:\n        raise NotImplementedError('NYI')\n\n    def post_compile(self, example_inputs: Sequence[InputType], constants: CompiledFxGraphConstants, graph_kwargs: _CompileFxKwargs) -> None:\n        pass\n\n    def set_triton_bundle(self, triton_bundle: Any) -> None:\n        pass",
    "docstring": "Class holding an AOTInductor compiled so.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\output_code.py",
    "ast_data": "ClassDef name:CompiledAOTI FunctionDef name:__call__ arg:self arg:inputs arguments arg arg Raise Call FunctionDef name:post_compile arg:self arg:example_inputs arg:constants arg:graph_kwargs arguments arg arg arg arg FunctionDef name:set_triton_bundle arg:self arg:triton_bundle arguments arg arg"
  },
  {
    "library": "pytorch",
    "name": "ComputeShapeSignature",
    "source_code": "class ComputeShapeSignature:\n\n    def __init__(self, kernel_name: str, f: NativeFunction, *, symint: bool) -> None:\n        self.__schema = LazyIrSchema(f.func, symint=symint)\n        self.__dispatch_args = ', '.join([a.decl() for a in dispatcher.arguments(f.func, symint=symint)])\n        self.__call_args = ', '.join([f'{arg.name}' for arg in self.__schema.filtered_args(generator=True)])\n        self.__kernel_name = kernel_name\n\n    def __decl_suffix(self) -> str:\n        return f'{self.__kernel_name}({self.__dispatch_args})'\n\n    def __call_suffix(self) -> str:\n        return f'{self.__kernel_name}({self.__call_args})'\n\n    @property\n    def shape_decl(self) -> str:\n        return f'TORCH_API std::vector<torch::lazy::Shape> compute_shape_{self.__decl_suffix()}'\n\n    @property\n    def shape_call(self) -> str:\n        return f'torch::lazy::compute_shape_{self.__call_suffix()}'",
    "docstring": "Here we use the base name as the suffix of the signature to avoid generating for in-place variants.",
    "type": "class",
    "file_path": "pytorch\\torchgen\\dest\\lazy_ir.py",
    "ast_data": "ClassDef name:ComputeShapeSignature FunctionDef name:__init__ arg:self arg:kernel_name arg:f arguments arg arg arg arg Assign Call Assign Call Call Call Assign Call Call Assign FunctionDef name:__decl_suffix arg:self arguments arg Return return:yes FunctionDef name:__call_suffix arg:self arguments arg Return return:yes FunctionDef name:shape_decl arg:self arguments arg Return return:yes Call FunctionDef name:shape_call arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "enable_grad",
    "source_code": "class enable_grad(_NoParamDecoratorContextManager):\n\n    def __enter__(self) -> None:\n        self.prev = torch.is_grad_enabled()\n        torch._C._set_grad_enabled(True)\n\n    def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:\n        torch._C._set_grad_enabled(self.prev)",
    "docstring": "Context-manager that enables gradient calculation. Enables gradient calculation, if it has been disabled via :class: or :class:. This context manager is thread local; it will not affect computation in other threads. Also functions as a decorator. .. note:: enable_grad is one of several mechanisms that can enable or disable gradients locally see :ref: for more information on how they compare. .. note:: This API does not apply to :ref:. Example:: >>> # xdoctest: +SKIP >>> x = torch.tensor([1.], requires_grad=True) >>> with torch.no_grad(): ... with torch.enable_grad(): ... y = x * 2 >>> y.requires_grad True >>> y.backward() >>> x.grad tensor([2.]) >>> @torch.enable_grad() ... def doubler(x): ... return x * 2 >>> with torch.no_grad(): ... z = doubler(x) >>> z.requires_grad True >>> @torch.enable_grad() ... def tripler(x): ... return x * 3 >>> with torch.no_grad(): ... z = tripler(x) >>> z.requires_grad True",
    "type": "class",
    "file_path": "pytorch\\torch\\autograd\\grad_mode.py",
    "ast_data": "ClassDef name:enable_grad FunctionDef name:__enter__ arg:self arguments arg Assign Call Call FunctionDef name:__exit__ arg:self arg:exc_type arg:exc_value arg:traceback arguments arg arg arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_ConvertNumpyArrayToLiteral",
    "source_code": "def _ConvertNumpyArrayToLiteral(ndarray):\n    type_record = types_.MAP_DTYPE_TO_RECORD[str(ndarray.dtype)]\n    literal = xla_data_pb2.LiteralProto()\n    literal.shape.CopyFrom(xla_shape.CreateShapeFromNumpy(ndarray).message)\n    if ndarray.ndim == 0:\n        getattr(literal, type_record.literal_field_name).append(ndarray.astype(type_record.literal_field_type).item())\n    elif ndarray.dtype in {_np.bool_, _np.dtype('bool')}:\n        for element in _np.nditer(ndarray):\n            getattr(literal, type_record.literal_field_name).append(type_record.literal_field_type(element))\n    else:\n        ndarray_flat = ndarray.ravel(order='A')\n        getattr(literal, type_record.literal_field_name).extend(ndarray_flat)\n    return literal",
    "docstring": "Converts a Numpy array to a XLA literal.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\xla\\python_api\\xla_literal.py",
    "ast_data": "FunctionDef name:_ConvertNumpyArrayToLiteral arg:ndarray arguments arg Assign Call Assign Call Call Call If Compare Call Call Call Call If Compare Call For Call Call Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_categorical_column_weighted",
    "source_code": "def is_categorical_column_weighted(self):\n    if isinstance(self.categorical_column, (fc._WeightedCategoricalColumn, fc_lib.WeightedCategoricalColumn)):\n        return True\n    return False",
    "docstring": "Check if the categorical column of the embedding column is weighted.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\feature_column_v2.py",
    "ast_data": "FunctionDef name:is_categorical_column_weighted arg:self arguments arg If Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_node_to_partition_mapping",
    "source_code": "def get_node_to_partition_mapping(partitions: list[Partition]) -> dict[Node, int]:\n    node_to_partition: dict[Node, int] = {}\n    for partition in partitions:\n        for node in partition.nodes:\n            node_to_partition[node] = partition.partition_id\n    return node_to_partition",
    "docstring": "Given a list of partitions,return node to partition mapping",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\accelerator_partitioner.py",
    "ast_data": "FunctionDef name:get_node_to_partition_mapping arg:partitions arguments arg For For Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "extract_sub_graph",
    "source_code": "@deprecation.deprecated(date=None, instructions=_DEPRECATION_MSG)\n@tf_export(v1=['graph_util.extract_sub_graph'])\ndef extract_sub_graph(graph_def, dest_nodes):\n    if not isinstance(graph_def, graph_pb2.GraphDef):\n        raise TypeError(f'graph_def must be a graph_pb2.GraphDef proto, but got type {type(graph_def)}.')\n    if isinstance(dest_nodes, str):\n        raise TypeError(f'dest_nodes must be an iterable of strings, but got type {type(dest_nodes)}.')\n    name_to_input_name, name_to_node, name_to_seq_num = _extract_graph_summary(graph_def)\n    _assert_nodes_are_present(name_to_node, dest_nodes)\n    nodes_to_keep = _bfs_for_reachable_nodes(dest_nodes, name_to_input_name)\n    nodes_to_keep_list = sorted(list(nodes_to_keep), key=lambda n: name_to_seq_num[n])\n    out = graph_pb2.GraphDef()\n    for n in nodes_to_keep_list:\n        out.node.extend([copy.deepcopy(name_to_node[n])])\n    out.library.CopyFrom(graph_def.library)\n    out.versions.CopyFrom(graph_def.versions)\n    return out",
    "docstring": "Extract the subgraph that can reach any of the nodes in 'dest_nodes'. Args: graph_def: A graph_pb2.GraphDef proto. dest_nodes: An iterable of strings specifying the destination node names. Returns: The GraphDef of the sub-graph. Raises: TypeError: If 'graph_def' is not a graph_pb2.GraphDef proto.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\graph_util_impl.py",
    "ast_data": "FunctionDef name:extract_sub_graph arg:graph_def arg:dest_nodes arguments arg arg If Call Raise Call Call If Call Raise Call Call Assign Call Call Assign Call Assign Call Call arguments arg Assign Call For Call Call Call Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_underline_thickness",
    "source_code": "def get_underline_thickness(self):\n    return self._header[b'UnderlineThickness']",
    "docstring": "Return the underline thickness as float.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_afm.py",
    "ast_data": "FunctionDef name:get_underline_thickness arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_CSRSparseMatrixToSparseTensorGrad",
    "source_code": "@ops.RegisterGradient('CSRSparseMatrixToSparseTensor')\ndef _CSRSparseMatrixToSparseTensorGrad(op: ops.Operation, *grads):\n    return sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(indices=op.outputs[0], values=grads[1], dense_shape=op.outputs[2])",
    "docstring": "Gradient for csr_sparse_matrix_to_sparse_tensor op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\sparse\\sparse_csr_matrix_grad.py",
    "ast_data": "FunctionDef name:_CSRSparseMatrixToSparseTensorGrad arg:op arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_invert_topology",
    "source_code": "def _invert_topology(self):\n    tasks = np.full(list(self.mesh_shape), -1, dtype=np.int32)\n    devices = np.full(list(self.mesh_shape), -1, dtype=np.int32)\n    for task in range(self.device_coordinates.shape[0]):\n        for device in range(self.device_coordinates.shape[1]):\n            x, y, z, core = self.device_coordinates[task, device, :]\n            tasks[x, y, z, core] = task\n            devices[x, y, z, core] = device\n    return (tasks, devices)",
    "docstring": "Inverts a [task,device,axis] topology to [x,y,z] -> task/device maps.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\topology.py",
    "ast_data": "FunctionDef name:_invert_topology arg:self arguments arg Assign Call Call Assign Call Call For Call For Call Assign Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "prepare_softmax_extra_check",
    "source_code": "def prepare_softmax_extra_check(match):\n    return config.online_softmax and match.kwargs['x'].meta['val'].device.type == 'cuda' and (config.cuda_backend == 'triton')",
    "docstring": "We only have triton online softmax kernels currently.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\post_grad.py",
    "ast_data": "FunctionDef name:prepare_softmax_extra_check arg:match arguments arg Return return:yes BoolOp Compare Compare"
  },
  {
    "library": "tensorflow",
    "name": "output_shape",
    "source_code": "@property\ndef output_shape(self):\n    return nest.map_structure(backend.int_shape, self.output)",
    "docstring": "Retrieves the output shape(s) of a layer. Only applicable if the layer has one output, or if all outputs have the same shape. Returns: Output shape, as an integer shape tuple (or list of shape tuples, one tuple per output tensor). Raises: AttributeError: if the layer has no defined output shape. RuntimeError: if called in Eager mode.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\functional.py",
    "ast_data": "FunctionDef name:output_shape arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "ContainedLookup",
    "source_code": "@BaseSpatialField.register_lookup\nclass ContainedLookup(GISLookup):\n    lookup_name = 'contained'",
    "docstring": "The 'contained' operator returns true if A's bounding box is completely contained by B's bounding box.",
    "type": "class",
    "file_path": "django\\django\\contrib\\gis\\db\\models\\lookups.py",
    "ast_data": "ClassDef name:ContainedLookup Assign"
  },
  {
    "library": "pandas",
    "name": "ColumnNullType",
    "source_code": "class ColumnNullType(enum.IntEnum):\n    NON_NULLABLE = 0\n    USE_NAN = 1\n    USE_SENTINEL = 2\n    USE_BITMASK = 3\n    USE_BYTEMASK = 4",
    "docstring": "Integer enum for null type representation. Attributes ---------- NON_NULLABLE : int Non-nullable column. USE_NAN : int Use explicit float NaN value. USE_SENTINEL : int Sentinel value besides NaN/NaT. USE_BITMASK : int The bit is set/unset representing a null on a certain position. USE_BYTEMASK : int The byte is set/unset representing a null on a certain position.",
    "type": "class",
    "file_path": "pandas\\pandas\\core\\interchange\\dataframe_protocol.py",
    "ast_data": "ClassDef name:ColumnNullType Assign Assign Assign Assign Assign"
  },
  {
    "library": "cherrypy",
    "name": "stop",
    "source_code": "def stop(self):\n    if self.thread is None:\n        self.bus.log('No thread running for %s.' % self.name or self.__class__.__name__)\n    else:\n        if self.thread is not threading.current_thread():\n            name = self.thread.name\n            self.thread.cancel()\n            if not self.thread.daemon:\n                self.bus.log('Joining %r' % name)\n                self.thread.join()\n            self.bus.log('Stopped thread %r.' % name)\n        self.thread = None",
    "docstring": "Stop our callback's background task thread.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\plugins.py",
    "ast_data": "FunctionDef name:stop arg:self arguments arg If Compare Call BoolOp If Compare Call Assign Call If Call Call Call Assign"
  },
  {
    "library": "numpy",
    "name": "dist_info",
    "source_code": "def dist_info(self):\n    if hasattr(self, '_dist_info'):\n        return self._dist_info\n    cc_type = getattr(self._ccompiler, 'compiler_type', '')\n    if cc_type in ('intelem', 'intelemw'):\n        platform = 'x86_64'\n    elif cc_type in ('intel', 'intelw', 'intele'):\n        platform = 'x86'\n    else:\n        from distutils.util import get_platform\n        platform = get_platform()\n    cc_info = getattr(self._ccompiler, 'compiler', getattr(self._ccompiler, 'compiler_so', ''))\n    if not cc_type or cc_type == 'unix':\n        if hasattr(cc_info, '__iter__'):\n            compiler = cc_info[0]\n        else:\n            compiler = str(cc_info)\n    else:\n        compiler = cc_type\n    if hasattr(cc_info, '__iter__') and len(cc_info) > 1:\n        extra_args = ' '.join(cc_info[1:])\n    else:\n        extra_args = os.environ.get('CFLAGS', '')\n        extra_args += os.environ.get('CPPFLAGS', '')\n    self._dist_info = (platform, compiler, extra_args)\n    return self._dist_info",
    "docstring": "Return a tuple containing info about (platform, compiler, extra_args), required by the abstract class '_CCompiler' for discovering the platform environment. This is also used as a cache factor in order to detect any changes happening from outside.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py",
    "ast_data": "FunctionDef name:dist_info arg:self arguments arg If Call Return return:yes Assign Call If Compare Assign If Compare Assign Assign Call Assign Call Call If BoolOp Compare If Call Assign Assign Call Assign If BoolOp Call Compare Call Assign Call Assign Call Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_markevery",
    "source_code": "def get_markevery(self):\n    return self._markevery",
    "docstring": "Return the markevery setting for marker subsampling. See also .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:get_markevery arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_hz_to_erb",
    "source_code": "def _hz_to_erb(hz):\n    EarQ = 9.26449\n    minBW = 24.7\n    return hz / EarQ + minBW",
    "docstring": "Utility for converting from frequency (Hz) to the Equivalent Rectangular Bandwidth (ERB) scale ERB = frequency / EarQ + minBW",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_filter_design.py",
    "ast_data": "FunctionDef name:_hz_to_erb arg:hz arguments arg Assign Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "get_units",
    "source_code": "@classmethod\ndef get_units(cls, wkt):\n    return gdal.SpatialReference(wkt).units",
    "docstring": "Return a tuple of (unit_value, unit_name) for the given WKT without using any of the database fields.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\base\\models.py",
    "ast_data": "FunctionDef name:get_units arg:cls arg:wkt arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "font_path",
    "source_code": "@property\ndef font_path(self):\n    psfont = self._get_pdftexmap_entry()\n    if psfont.filename is None:\n        raise ValueError('No usable font file found for {} ({}); the font may lack a Type-1 version'.format(psfont.psname.decode('ascii'), psfont.texname.decode('ascii')))\n    return Path(psfont.filename)",
    "docstring": "The to the font for this glyph.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\dviread.py",
    "ast_data": "FunctionDef name:font_path arg:self arguments arg Assign Call If Compare Raise Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "book",
    "source_code": "@property\ndef book(self) -> OpenDocumentSpreadsheet:\n    return self._book",
    "docstring": "Book instance of class odf.opendocument.OpenDocumentSpreadsheet. This attribute can be used to access engine-specific features.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\excel\\_odswriter.py",
    "ast_data": "FunctionDef name:book arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_compute_n_patches",
    "source_code": "def _compute_n_patches(i_h, i_w, p_h, p_w, max_patches=None):\n    n_h = i_h - p_h + 1\n    n_w = i_w - p_w + 1\n    all_patches = n_h * n_w\n    if max_patches:\n        if isinstance(max_patches, Integral) and max_patches < all_patches:\n            return max_patches\n        elif isinstance(max_patches, Integral) and max_patches >= all_patches:\n            return all_patches\n        elif isinstance(max_patches, Real) and 0 < max_patches < 1:\n            return int(max_patches * all_patches)\n        else:\n            raise ValueError('Invalid value for max_patches: %r' % max_patches)\n    else:\n        return all_patches",
    "docstring": "Compute the number of patches that will be extracted in an image. Read more in the :ref:. Parameters ---------- i_h : int The image height i_w : int The image with p_h : int The height of a patch p_w : int The width of a patch max_patches : int or float, default=None The maximum number of patches to extract. If is a float between 0 and 1, it is taken to be a proportion of the total number of patches. If is None, all possible patches are extracted.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\feature_extraction\\image.py",
    "ast_data": "FunctionDef name:_compute_n_patches arg:i_h arg:i_w arg:p_h arg:p_w arg:max_patches arguments arg arg arg arg arg Assign Assign Assign If If BoolOp Call Compare Return return:yes If BoolOp Call Compare Return return:yes If BoolOp Call Compare Return return:yes Call Raise Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "UfuncsFromDLP",
    "source_code": "class UfuncsFromDLP(Benchmark):\n    params = [[16, 32, (16, 16), (64, 64)], TYPES1]\n    param_names = ['shape', 'npdtypes']\n    timeout = 10\n\n    def setup(self, shape, npdtypes):\n        values = get_squares_()\n        self.xarg = values.get(npdtypes)[0]\n\n    def time_from_dlpack(self, shape, npdtypes):\n        np.from_dlpack(self.xarg)",
    "docstring": "Benchmark for creation functions",
    "type": "class",
    "file_path": "numpy\\benchmarks\\benchmarks\\bench_creation.py",
    "ast_data": "ClassDef name:UfuncsFromDLP Assign Assign Assign FunctionDef name:setup arg:self arg:shape arg:npdtypes arguments arg arg arg Assign Call Assign Call FunctionDef name:time_from_dlpack arg:self arg:shape arg:npdtypes arguments arg arg arg Call"
  },
  {
    "library": "scipy",
    "name": "PermutationMethod",
    "source_code": "@dataclass\nclass PermutationMethod(ResamplingMethod):\n    rng: object\n    _rng: object = field(init=False, repr=False, default=None)\n\n    @property\n    def random_state(self):\n        return self._random_state\n\n    @random_state.setter\n    def random_state(self, val):\n        self._random_state = val\n\n    @property\n    def rng(self):\n        return self._rng\n\n    def __init__(self, n_resamples=9999, batch=None, random_state=None, *, rng=None):\n        self._rng = rng\n        self._random_state = random_state\n        super().__init__(n_resamples=n_resamples, batch=batch)\n\n    def _asdict(self):\n        d = dict(n_resamples=self.n_resamples, batch=self.batch)\n        if self.rng is not None:\n            d['rng'] = self.rng\n        if self.random_state is not None:\n            d['random_state'] = self.random_state\n        return d",
    "docstring": "Configuration information for a permutation hypothesis test. Instances of this class can be passed into the parameter of some hypothesis test functions to perform a permutation version of the hypothesis tests. Attributes ---------- n_resamples : int, optional The number of resamples to perform. Default is 9999. batch : int, optional The number of resamples to process in each vectorized call to the statistic. Batch sizes >>1 tend to be faster when the statistic is vectorized, but memory usage scales linearly with the batch size. Default is `numpy.random.Generatorrngrngnumpy.random.Generatornumpy.random.default_rngrngrngrandom_staterandom_staterandom_staterandom_statenumpy.randomnumpy.random.RandomStaterandom_staterandom_staterandom_stateSPEC-007 numpy.random.RandomStatenumpy.random.Generatorrandom_staterngrandom_staterandom_staterngrng` should be used in new code.",
    "type": "class",
    "file_path": "scipy\\scipy\\stats\\_resampling.py",
    "ast_data": "ClassDef name:PermutationMethod Call FunctionDef name:random_state arg:self arguments arg Return return:yes FunctionDef name:random_state arg:self arg:val arguments arg arg Assign FunctionDef name:rng arg:self arguments arg Return return:yes FunctionDef name:__init__ arg:self arg:n_resamples arg:batch arg:random_state arguments arg arg arg arg arg Assign Assign Call Call FunctionDef name:_asdict arg:self arguments arg Assign Call If Compare Assign If Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "decorated",
    "source_code": "@parameterized.named_parameters(*params)\n@functools.wraps(f)\ndef decorated(self, model_type, *args, **kwargs):\n    if model_type == 'functional':\n        _test_functional_model_type(f, self, *args, **kwargs)\n    elif model_type == 'subclass':\n        _test_subclass_model_type(f, self, *args, **kwargs)\n    elif model_type == 'sequential':\n        _test_sequential_model_type(f, self, *args, **kwargs)\n    else:\n        raise ValueError('Unknown model type: %s' % (model_type,))",
    "docstring": "A run of a single test case w/ the specified model type.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\keras_parameterized.py",
    "ast_data": "FunctionDef name:decorated arg:self arg:model_type arguments arg arg arg arg If Compare Call If Compare Call If Compare Call Raise Call Call Call"
  },
  {
    "library": "pandas",
    "name": "__repr__",
    "source_code": "def __repr__(self) -> str:\n    footer = self._get_repr_footer()\n    length = len(self)\n    max_len = 10\n    if length > max_len:\n        num = max_len // 2\n        head = self[:num]._get_values_repr()\n        tail = self[-(max_len - num):]._get_values_repr()\n        body = f'{head[:-1]}, ..., {tail[1:]}'\n        length_info = f'Length: {len(self)}'\n        result = f'{body}\\n{length_info}\\n{footer}'\n    elif length > 0:\n        body = self._get_values_repr()\n        result = f'{body}\\n{footer}'\n    else:\n        body = '[]'\n        result = f'{body}, {footer}'\n    return result",
    "docstring": "String representation.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\categorical.py",
    "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Assign Call Assign Call Assign If Compare Assign Assign Call Assign Call Assign Assign Call Assign If Compare Assign Call Assign Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "reorder_for_partition_with_simple_dependency",
    "source_code": "def reorder_for_partition_with_simple_dependency(self, nodes: list[BaseSchedulerNode]) -> list[BaseSchedulerNode]:\n    front: list[BaseSchedulerNode] = []\n    middle: list[BaseSchedulerNode] = []\n    back: list[BaseSchedulerNode] = []\n\n    def only_output_user(node: BaseSchedulerNode) -> bool:\n        for buf in node.get_outputs():\n            for use in buf.users:\n                if not isinstance(use.node, OutputNode):\n                    return False\n        return True\n    for node in nodes:\n        should_partition = self.should_partition(node)\n        if should_partition and len(node.unmet_dependencies) == 0:\n            front.append(node)\n        elif should_partition and only_output_user(node):\n            back.append(node)\n        else:\n            middle.append(node)\n    return front + middle + back",
    "docstring": "Reorder a node if it should be partitioned and has simple dependency: 1. move a partitioned node to the front if it has no dependency 2. move a partitioned node to the back if it is only used by OutputNode 3. otherwise do not reorder",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:reorder_for_partition_with_simple_dependency arg:self arg:nodes arguments arg arg FunctionDef name:only_output_user arg:node arguments arg For Call For If Call Return return:yes Return return:yes For Assign Call If BoolOp Compare Call Call If BoolOp Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_randtobest1",
    "source_code": "def _randtobest1(self, samples):\n    r0, r1, r2 = samples[..., :3].T\n    bprime = np.copy(self.population[r0])\n    bprime += self.scale * (self.population[0] - bprime)\n    bprime += self.scale * (self.population[r1] - self.population[r2])\n    return bprime",
    "docstring": "randtobest1bin, randtobest1exp",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_differentialevolution.py",
    "ast_data": "FunctionDef name:_randtobest1 arg:self arg:samples arguments arg arg Assign Assign Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "collected_footnote",
    "source_code": "class collected_footnote(nodes.footnote):\n    pass",
    "docstring": "Footnotes that are collected are assigned this class.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\writers\\latex.py",
    "ast_data": "ClassDef name:collected_footnote"
  },
  {
    "library": "pytorch",
    "name": "step",
    "source_code": "@_use_grad_for_differentiable\ndef step(self, closure=None):\n    self._cuda_graph_capture_health_check()\n    loss = None\n    if closure is not None:\n        with torch.enable_grad():\n            loss = closure()\n    for group in self.param_groups:\n        params: list[Tensor] = []\n        grads: list[Tensor] = []\n        prevs: list[Tensor] = []\n        step_sizes: list[Tensor] = []\n        state_steps: list[Tensor] = []\n        etaminus, etaplus = group['etas']\n        step_size_min, step_size_max = group['step_sizes']\n        foreach = group['foreach']\n        maximize = group['maximize']\n        has_complex = self._init_group(group, params, grads, prevs, step_sizes, state_steps)\n        rprop(params, grads, prevs, step_sizes, state_steps, step_size_min=step_size_min, step_size_max=step_size_max, etaminus=etaminus, etaplus=etaplus, foreach=foreach, maximize=maximize, differentiable=group['differentiable'], capturable=group['capturable'], has_complex=has_complex)\n    return loss",
    "docstring": "Perform a single optimization step. Args: closure (Callable, optional): A closure that reevaluates the model and returns the loss.",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\rprop.py",
    "ast_data": "FunctionDef name:step arg:self arg:closure arguments arg arg Call Assign If Compare With Call Assign Call For Assign Assign Assign Assign Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_func_graph_as_default_inner_cm",
    "source_code": "@tf_contextlib.contextmanager\ndef _func_graph_as_default_inner_cm(func_graph: FuncGraph, outer_cm: ContextManager[ops.Graph]):\n    graph = ops.get_default_graph()\n    old_strategy_stack = func_graph._distribution_strategy_stack\n    func_graph._distribution_strategy_stack = list(graph._distribution_strategy_stack)\n    old_device_stack = func_graph._device_function_stack\n    if not context.executing_eagerly() and (device_stack_has_callable(graph._device_function_stack) or (func_graph._distribution_strategy_stack and (not ops.executing_eagerly_outside_functions()))):\n        func_graph._device_function_stack = graph._device_function_stack.copy()\n    old_creator_stack = func_graph._variable_creator_stack\n    func_graph._variable_creator_stack = graph._variable_creator_stack\n    old_graph_key = func_graph._graph_key\n    func_graph._graph_key = graph._graph_key\n    old_scope_exit_callbacks = func_graph._scope_exit_callbacks\n    func_graph._scope_exit_callbacks = []\n    with outer_cm as g:\n        try:\n            yield g\n        finally:\n            try:\n                for fn in func_graph._scope_exit_callbacks:\n                    fn()\n            finally:\n                func_graph._scope_exit_callbacks = old_scope_exit_callbacks\n                func_graph._distribution_strategy_stack = old_strategy_stack\n                func_graph._device_function_stack = old_device_stack\n                func_graph._variable_creator_stack = old_creator_stack\n                func_graph._graph_key = old_graph_key",
    "docstring": "Context manager for copying distribute.Strategy scope information.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\func_graph.py",
    "ast_data": "FunctionDef name:_func_graph_as_default_inner_cm arg:func_graph arg:outer_cm arguments arg arg Assign Call Assign Assign Call Assign If BoolOp Call BoolOp Call BoolOp Call Assign Call Assign Assign Assign Assign Assign Assign With Try Try For Call Assign Assign Assign Assign Assign"
  },
  {
    "library": "pygame",
    "name": "symtree",
    "source_code": "def symtree(srcdir: Path, destdir: Path, verbose: bool=False):\n    if not destdir.is_dir():\n        rmpath(destdir, verbose)\n        if verbose:\n            print(f\"- Creating directory symlink from '{destdir}' pointing to '{srcdir}'\")\n        destdir.symlink_to(srcdir)\n        return\n    for path in srcdir.glob('*'):\n        destpath = destdir / path.name\n        if path.is_dir():\n            symtree(path, destpath, verbose)\n        else:\n            rmpath(destpath, verbose)\n            if verbose:\n                print(f\"- Creating file symlink from '{destpath}' pointing to '{path}'\")\n            destpath.symlink_to(path)",
    "docstring": "This function creates symlinks pointing to srcdir, from destdir, such that existing folders and files in the tree of destdir are retained",
    "type": "function",
    "file_path": "pygame\\buildconfig\\macdependencies\\install_mac_deps.py",
    "ast_data": "FunctionDef name:symtree arg:srcdir arg:destdir arg:verbose arguments arg arg arg If Call Call If Call Call Return return:no For Call Assign If Call Call Call If Call Call"
  },
  {
    "library": "django",
    "name": "_chain",
    "source_code": "def _chain(self):\n    obj = self._clone()\n    if obj._sticky_filter:\n        obj.query.filter_is_sticky = True\n        obj._sticky_filter = False\n    return obj",
    "docstring": "Return a copy of the current QuerySet that's ready for another operation.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:_chain arg:self arguments arg Assign Call If Assign Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_xlabel",
    "source_code": "def set_xlabel(self, xlabel, fontdict=None, labelpad=None, *, loc=None, **kwargs):\n    if labelpad is not None:\n        self.xaxis.labelpad = labelpad\n    protected_kw = ['x', 'horizontalalignment', 'ha']\n    if {*kwargs} & {*protected_kw}:\n        if loc is not None:\n            raise TypeError(f\"Specifying 'loc' is disallowed when any of its corresponding low level keyword arguments ({protected_kw}) are also supplied\")\n    else:\n        loc = mpl._val_or_rc(loc, 'xaxis.labellocation')\n        _api.check_in_list(('left', 'center', 'right'), loc=loc)\n        x = {'left': 0, 'center': 0.5, 'right': 1}[loc]\n        kwargs.update(x=x, horizontalalignment=loc)\n    return self.xaxis.set_label_text(xlabel, fontdict, **kwargs)",
    "docstring": "Set the label for the x-axis. Parameters ---------- xlabel : str The label text. labelpad : float, default: :rc: Spacing in points from the Axes bounding box including ticks and tick labels. If None, the previous value is left as is. loc : {'left', 'center', 'right'}, default: :rc: The label position. This is a high-level alternative for passing parameters *x* and *horizontalalignment*. Other Parameters ---------------- **kwargs : properties properties control the appearance of the label. See Also -------- text : Documents the properties supported by .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:set_xlabel arg:self arg:xlabel arg:fontdict arg:labelpad arguments arg arg arg arg arg arg If Compare Assign Assign If If Compare Raise Call Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "get_jwks",
    "source_code": "def get_jwks(self):\n    raise NotImplementedError()",
    "docstring": "Return the JWKs that will be used to sign the JWT access token. Developers MUST re-implement this method:: def get_jwks(self): return load_jwks(\"jwks.json\")",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc9068\\token.py",
    "ast_data": "FunctionDef name:get_jwks arg:self arguments arg Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict_log_proba",
    "source_code": "def predict_log_proba(self, X):\n    proba = self.predict_proba(X)\n    if self.n_outputs_ == 1:\n        return np.log(proba)\n    else:\n        for k in range(self.n_outputs_):\n            proba[k] = np.log(proba[k])\n        return proba",
    "docstring": "Predict class log-probabilities for X. The predicted class log-probabilities of an input sample is computed as the log of the mean predicted class probabilities of the trees in the forest. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, its dtype will be converted to `classes_`.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_forest.py",
    "ast_data": "FunctionDef name:predict_log_proba arg:self arg:X arguments arg arg Assign Call If Compare Return return:yes Call For Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, locator, tz=None, formats=None, offset_formats=None, zero_formats=None, show_offset=True, *, usetex=None):\n    self._locator = locator\n    self._tz = tz\n    self.defaultfmt = '%Y'\n    if formats:\n        if len(formats) != 6:\n            raise ValueError('formats argument must be a list of 6 format strings (or None)')\n        self.formats = formats\n    else:\n        self.formats = ['%Y', '%b', '%d', '%H:%M', '%H:%M', '%S.%f']\n    if zero_formats:\n        if len(zero_formats) != 6:\n            raise ValueError('zero_formats argument must be a list of 6 format strings (or None)')\n        self.zero_formats = zero_formats\n    elif formats:\n        self.zero_formats = [''] + self.formats[:-1]\n    else:\n        self.zero_formats = [''] + self.formats[:-1]\n        self.zero_formats[3] = '%b-%d'\n    if offset_formats:\n        if len(offset_formats) != 6:\n            raise ValueError('offset_formats argument must be a list of 6 format strings (or None)')\n        self.offset_formats = offset_formats\n    else:\n        self.offset_formats = ['', '%Y', '%Y-%b', '%Y-%b-%d', '%Y-%b-%d', '%Y-%b-%d %H:%M']\n    self.offset_string = ''\n    self.show_offset = show_offset\n    self._usetex = mpl._val_or_rc(usetex, 'text.usetex')",
    "docstring": "Autoformat the date labels. The default format is used to form an initial string, and then redundant elements are removed.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\dates.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:locator arg:tz arg:formats arg:offset_formats arg:zero_formats arg:show_offset arguments arg arg arg arg arg arg arg arg Assign Assign Assign If If Compare Call Raise Call Assign Assign If If Compare Call Raise Call Assign If Assign Assign Assign If If Compare Call Raise Call Assign Assign Assign Assign Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "get_glyphs_subset",
    "source_code": "def get_glyphs_subset(fontfile, characters):\n    options = subset.Options(glyph_names=True, recommended_glyphs=True)\n    options.drop_tables += ['FFTM', 'PfEd', 'BDF', 'meta', 'MERG', 'TSIV', 'Zapf', 'bdat', 'bloc', 'cidg', 'fdsc', 'feat', 'fmtx', 'fond', 'just', 'kerx', 'ltag', 'morx', 'trak', 'xref']\n    if fontfile.endswith('.ttc'):\n        options.font_number = 0\n    font = subset.load_font(fontfile, options)\n    subsetter = subset.Subsetter(options=options)\n    subsetter.populate(text=characters)\n    subsetter.subset(font)\n    return font",
    "docstring": "Subset a TTF font Reads the named fontfile and restricts the font to the characters. Parameters ---------- fontfile : str Path to the font file characters : str Continuous set of characters to include in subset Returns ------- fontTools.ttLib.ttFont.TTFont An open font object representing the subset, which needs to be closed by the caller.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\_backend_pdf_ps.py",
    "ast_data": "FunctionDef name:get_glyphs_subset arg:fontfile arg:characters arguments arg arg Assign Call If Call Assign Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "__repr__",
    "source_code": "def __repr__(self):\n    return '<Raster object at %s>' % hex(addressof(self._ptr))",
    "docstring": "Short-hand representation because WKB may be very large.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\raster\\source.py",
    "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_delegate_method",
    "source_code": "def _delegate_method(keras_tensor_cls, method_name):\n\n    def delegate(self, *args, **kwargs):\n        return InstanceMethod(method_name)(self, args, kwargs)\n    setattr(keras_tensor_cls, method_name, delegate)",
    "docstring": "Register method on a KerasTensor class. Calling this function times with the same arguments should be a no-op. This method exposes an instance method on the KerasTensor class that will use an layer to run the desired method on the represented intermediate values in the model. Args: keras_tensor_cls: The KerasTensor subclass that should expose the property. method_name: The name of the method to expose and delegate to the represented (Composite)Tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\core.py",
    "ast_data": "FunctionDef name:_delegate_method arg:keras_tensor_cls arg:method_name arguments arg arg FunctionDef name:delegate arg:self arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_convert_fetches_to_input_format",
    "source_code": "def _convert_fetches_to_input_format(self, input_fetches, current_fetches):\n    if isinstance(input_fetches, tensor_lib.Tensor):\n        if len(current_fetches) != 1:\n            raise RuntimeError('Tensor tracer input/output fetches do not match.')\n        return current_fetches[0]\n    elif len(current_fetches) != len(current_fetches):\n        raise RuntimeError('Tensor tracer input/output fetches do not match.')\n    elif isinstance(input_fetches, tuple):\n        return tuple(current_fetches)\n    else:\n        return current_fetches",
    "docstring": "Changes current_fetches' format, so that it matches input_fetches.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:_convert_fetches_to_input_format arg:self arg:input_fetches arg:current_fetches arguments arg arg arg If Call If Compare Call Raise Call Return return:yes If Compare Call Call Raise Call If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_require_listlike",
    "source_code": "def _require_listlike(level, arr, arrname: str):\n    if level is not None and (not is_list_like(level)):\n        if not is_list_like(arr):\n            raise TypeError(f'{arrname} must be list-like')\n        if len(arr) > 0 and is_list_like(arr[0]):\n            raise TypeError(f'{arrname} must be list-like')\n        level = [level]\n        arr = [arr]\n    elif level is None or is_list_like(level):\n        if not is_list_like(arr) or not is_list_like(arr[0]):\n            raise TypeError(f'{arrname} must be list of lists-like')\n    return (level, arr)",
    "docstring": "Ensure that level is either None or listlike, and arr is list-of-listlike.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "FunctionDef name:_require_listlike arg:level arg:arr arg:arrname arguments arg arg arg If BoolOp Compare Call If Call Raise Call If BoolOp Compare Call Call Raise Call Assign Assign If BoolOp Compare Call If BoolOp Call Call Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "one_hot",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef one_hot(indices, num_classes):\n    return array_ops.one_hot(indices, depth=num_classes, axis=-1)",
    "docstring": "Computes the one-hot representation of an integer tensor. Args: indices: nD integer tensor of shape num_classes: Integer, number of classes to consider. Returns: (n + 1)D one hot representation of the input with shape Returns: The one-hot tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:one_hot arg:indices arg:num_classes arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "generate",
    "source_code": "def generate(arglist, git_tag_override=None):\n    spec, head_symlink, _, dest_file = arglist\n    data = json.load(open(spec))\n    git_version = None\n    if not data['git']:\n        git_version = b'unknown'\n    else:\n        old_branch = data['branch']\n        new_branch = parse_branch_ref(head_symlink)\n        if new_branch != old_branch:\n            raise RuntimeError(\"Run ./configure again, branch was '%s' but is now '%s'\" % (old_branch, new_branch))\n        git_version = get_git_version(data['path'], git_tag_override)\n    write_version_info(dest_file, git_version)",
    "docstring": "Generate version_info.cc as given . Args: arglist: should be a sequence that contains spec, head_symlink, ref_symlink, destination_file. is the filename where version_info.cc will be written is a filename where the file contains a JSON dictionary 'git' bool that is true if the source is in a git repo 'path' base path of the source code 'branch' the name of the ref specification of the current branch/tag is a filename to HEAD that is cross-referenced against what is contained in the json branch designation. is unused in this script but passed, because the build system uses that file to detect when commits happen. git_tag_override: Override the value for the git tag. This is useful for releases where we want to build the release before the git tag is created. Raises: RuntimeError: If ./configure needs to be run, RuntimeError will be raised.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\git\\gen_git_source.py",
    "ast_data": "FunctionDef name:generate arg:arglist arg:git_tag_override arguments arg arg Assign Assign Call Call Assign If Assign Assign Assign Call If Compare Raise Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_prune_invalid_ids",
    "source_code": "def _prune_invalid_ids(sparse_ids, sparse_weights):\n    is_id_valid = math_ops.greater_equal(sparse_ids.values, 0)\n    if sparse_weights is not None:\n        is_id_valid = math_ops.logical_and(is_id_valid, array_ops.ones_like(sparse_weights.values, dtype=dtypes.bool))\n    sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_id_valid)\n    if sparse_weights is not None:\n        sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_id_valid)\n    return (sparse_ids, sparse_weights)",
    "docstring": "Prune invalid IDs (< 0) from the input ids and weights.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\embedding_ops.py",
    "ast_data": "FunctionDef name:_prune_invalid_ids arg:sparse_ids arg:sparse_weights arguments arg arg Assign Call If Compare Assign Call Call Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "z",
    "source_code": "@property\ndef z(self):\n    if self.is_3d:\n        return capi.getz(self.ptr, 0)",
    "docstring": "Return the Z coordinate for this Point.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:z arg:self arguments arg If Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "rvs",
    "source_code": "def rvs(self, dim, size=1, random_state=None):\n    random_state = self._get_random_state(random_state)\n    size = int(size)\n    dim = self._process_parameters(dim)\n    size = (size,) if size > 1 else ()\n    z = 1 / math.sqrt(2) * (random_state.normal(size=size + (dim, dim)) + 1j * random_state.normal(size=size + (dim, dim)))\n    q, r = np.linalg.qr(z)\n    d = r.diagonal(offset=0, axis1=-2, axis2=-1)\n    q *= (d / abs(d))[..., np.newaxis, :]\n    return q",
    "docstring": "Draw random samples from U(N). Parameters ---------- dim : integer Dimension of space (N). size : integer, optional Number of samples to draw (default 1). Returns ------- rvs : ndarray or scalar Random size N-dimensional matrices, dimension (size, dim, dim)",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:rvs arg:self arg:dim arg:size arg:random_state arguments arg arg arg arg Assign Call Assign Call Assign Call Assign Compare Assign Call Call Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_apply_pairwise",
    "source_code": "def _apply_pairwise(self, target: DataFrame | Series, other: DataFrame | Series | None, pairwise: bool | None, func: Callable[[DataFrame | Series, DataFrame | Series], DataFrame | Series], numeric_only: bool) -> DataFrame | Series:\n    target = self._create_data(target, numeric_only)\n    if other is None:\n        other = target\n        pairwise = True if pairwise is None else pairwise\n    elif not isinstance(other, (ABCDataFrame, ABCSeries)):\n        raise ValueError('other must be a DataFrame or Series')\n    elif other.ndim == 2 and numeric_only:\n        other = self._make_numeric_only(other)\n    return flex_binary_moment(target, other, func, pairwise=bool(pairwise))",
    "docstring": "Apply the given pairwise function given 2 pandas objects (DataFrame/Series)",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\window\\rolling.py",
    "ast_data": "FunctionDef name:_apply_pairwise arg:self arg:target arg:other arg:pairwise arg:func arg:numeric_only arguments arg arg arg arg arg arg Assign Call If Compare Assign Assign Compare If Call Raise Call If BoolOp Compare Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "add_scheduler_init_hook",
    "source_code": "def add_scheduler_init_hook(pre_fn: Callable[..., Any], post_fn: Optional[Callable[..., Any]]=None) -> Any:\n    from torch._inductor.scheduler import Scheduler\n    orig_fn = Scheduler.__init__\n\n    def wrapper(scheduler: Any, nodes: Any) -> Any:\n        pre_fn(scheduler, nodes)\n        out = orig_fn(scheduler, nodes)\n        if post_fn:\n            post_fn(scheduler, nodes)\n        return out\n    return unittest.mock.patch.object(Scheduler, '__init__', wrapper)",
    "docstring": "Add hook functions to be called at the beginning and end of Scheduler.__init__. Used for unit tests.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\utils.py",
    "ast_data": "FunctionDef name:add_scheduler_init_hook arg:pre_fn arg:post_fn arguments arg arg Assign FunctionDef name:wrapper arg:scheduler arg:nodes arguments arg arg Call Assign Call If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "print_results",
    "source_code": "def print_results(self):\n    results = self.average_results()\n    results = sorted(results, key=lambda x: (x.nfail, x.mean_time))\n    if not results:\n        return\n    print('')\n    print('=========================================================')\n    print(f'Optimizer benchmark: {self.function_name}')\n    print(f'dimensions: {results[0].ndim}, extra kwargs: {str(self.minimizer_kwargs)}')\n    print(f'averaged over {results[0].ntrials} starting configurations')\n    print('  Optimizer    nfail   nfev    njev    nhev    time')\n    print('---------------------------------------------------------')\n    for res in results:\n        print(f'{res.name:11s}  | {res.nfail:4d}  | {res.mean_nfev:4d}  | {res.mean_njev:4d}  | {res.mean_nhev:4d}  | {res.mean_time:.6g}')",
    "docstring": "print the current list of results",
    "type": "method",
    "file_path": "scipy\\benchmarks\\benchmarks\\optimize.py",
    "ast_data": "FunctionDef name:print_results arg:self arguments arg Assign Call Assign Call arguments arg If Return return:no Call Call Call Call Call Call Call Call For Call"
  },
  {
    "library": "scikit-learn",
    "name": "_inverse_binarize_multiclass",
    "source_code": "def _inverse_binarize_multiclass(y, classes):\n    classes = np.asarray(classes)\n    if sp.issparse(y):\n        y = y.tocsr()\n        n_samples, n_outputs = y.shape\n        outputs = np.arange(n_outputs)\n        row_max = min_max_axis(y, 1)[1]\n        row_nnz = np.diff(y.indptr)\n        y_data_repeated_max = np.repeat(row_max, row_nnz)\n        y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)\n        if row_max[-1] == 0:\n            y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])\n        index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])\n        y_ind_ext = np.append(y.indices, [0])\n        y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]\n        y_i_argmax[np.where(row_nnz == 0)[0]] = 0\n        samples = np.arange(n_samples)[(row_nnz > 0) & (row_max.ravel() == 0)]\n        for i in samples:\n            ind = y.indices[y.indptr[i]:y.indptr[i + 1]]\n            y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]\n        return classes[y_i_argmax]\n    else:\n        return classes.take(y.argmax(axis=1), mode='clip')",
    "docstring": "Inverse label binarization transformation for multiclass. Multiclass uses the maximal score instead of a threshold.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_label.py",
    "ast_data": "FunctionDef name:_inverse_binarize_multiclass arg:y arg:classes arguments arg arg Assign Call If Call Assign Call Assign Assign Call Assign Call Assign Call Assign Call Assign Call Compare If Compare Assign Call Call Assign Call Assign Call Assign Assign Call Compare Assign Call Compare Compare Call For Assign Assign Call Return return:yes Return return:yes Call Call"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, *a, **kw):\n    pass",
    "docstring": "Initialize a hook map instance post-construction.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cprequest.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg arg arg"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, port=2223, task_type=None, task_id=None, rpc_layer=None, environment=None):\n    self._task_type = task_type\n    self._task_id = task_id\n    self._rpc_layer = rpc_layer\n    self._environment = environment\n    self._port = str(port)",
    "docstring": "Creates a new SageMakerClusterResolver. Args: port: (integer, optional) Override default port usage of 2223 task_type: (String, optional) Overrides the task type. task_id: (Integer, optional) Overrides the task index. rpc_layer: (String, optional) Overrides the rpc layer TensorFlow uses. environment: (String, optional) Overrides the environment TensorFlow operates in.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\sagemaker_cluster_resolver.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:port arg:task_type arg:task_id arg:rpc_layer arg:environment arguments arg arg arg arg arg arg Assign Assign Assign Assign Assign Call"
  },
  {
    "library": "pytorch",
    "name": "is_closed",
    "source_code": "@abstractmethod\ndef is_closed(self) -> bool:\n    pass",
    "docstring": "Check whether the rendezvous has been closed. A closed rendezvous means all future attempts to re-rendezvous within same job will fail. `set_closed` have semantics of eventual propagation and should not be used for synchronization. The intention is that if at least one node decides the job is finished, it will close the rendezvous, and other nodes will soon observe this and stop running as well.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\api.py",
    "ast_data": "FunctionDef name:is_closed arg:self arguments arg"
  },
  {
    "library": "kornia",
    "name": "keypoints",
    "source_code": "@property\ndef keypoints(self) -> Optional[Tensor]:\n    return self.points[0] if isinstance(self.points, tuple) else None",
    "docstring": "The keypoints from the .",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\models\\structures.py",
    "ast_data": "FunctionDef name:keypoints arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "handle_page",
    "source_code": "def handle_page(self, pagename: str, addctx: dict[str, Any], templatename: str='page.html', *, outfilename: Path | None=None, event_arg: Any=None) -> None:\n    if pagename.startswith('genindex') and 'genindexentries' in addctx:\n        if not self.use_index:\n            return\n        self.fix_genindex(addctx['genindexentries'])\n    addctx['doctype'] = self.doctype\n    super().handle_page(pagename, addctx, templatename, outfilename=outfilename, event_arg=event_arg)",
    "docstring": "Create a rendered page. This method is overwritten for genindex pages in order to fix href link attributes.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\_epub_base.py",
    "ast_data": "FunctionDef name:handle_page arg:self arg:pagename arg:addctx arg:templatename arguments arg arg arg arg arg arg If BoolOp Call Compare If Return return:no Call Assign Call Call"
  },
  {
    "library": "scipy",
    "name": "_handle_lhs_derivatives",
    "source_code": "def _handle_lhs_derivatives(t, k, xval, ab, kl, ku, deriv_ords, offset=0):\n    left = _dierckx.find_interval(t, k, float(xval), k, False)\n    for row in range(deriv_ords.shape[0]):\n        nu = deriv_ords[row]\n        wrk = _dierckx.evaluate_all_bspl(t, k, xval, left, nu)\n        for a in range(k + 1):\n            clmn = left - k + a\n            ab[kl + ku + offset + row - clmn, clmn] = wrk[a]",
    "docstring": "Fill in the entries of the colocation matrix corresponding to known derivatives at . The colocation matrix is in the banded storage, as prepared by _coloc. No error checking. Parameters ---------- t : ndarray, shape (nt + k + 1,) knots k : integer B-spline order xval : float The value at which to evaluate the derivatives at. ab : ndarray, shape(2*kl + ku + 1, nt), Fortran order B-spline colocation matrix. This argument is modified *in-place*. kl : integer Number of lower diagonals of ab. ku : integer Number of upper diagonals of ab. deriv_ords : 1D ndarray Orders of derivatives known at xval offset : integer, optional Skip this many rows of the matrix ab.",
    "type": "function",
    "file_path": "scipy\\scipy\\interpolate\\_bsplines.py",
    "ast_data": "FunctionDef name:_handle_lhs_derivatives arg:t arg:k arg:xval arg:ab arg:kl arg:ku arg:deriv_ords arg:offset arguments arg arg arg arg arg arg arg arg Assign Call Call For Call Assign Assign Call For Call Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "enable_torchbind_tracing",
    "source_code": "@contextmanager\ndef enable_torchbind_tracing():\n    try:\n        KNOWN_TYPES.append(torch.ScriptObject)\n        torch.ScriptMethod.__call__ = torchbind_method_redispatch\n        yield\n    finally:\n        assert KNOWN_TYPES.pop() is torch.ScriptObject, 'Someone else messed with KNOWN_TYPES during tracing, exploding.'\n        torch.ScriptMethod.__call__ = _orig_scriptmethod_call",
    "docstring": "Context manager that acts as a feature flag to enable torchbind tracing behavior. Once torchbind tracing has been stabilized, we can remove this and turn it always on.",
    "type": "function",
    "file_path": "pytorch\\torch\\_higher_order_ops\\torchbind.py",
    "ast_data": "FunctionDef name:enable_torchbind_tracing arguments Try Call Assign Compare Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "set_position",
    "source_code": "def set_position(self, pos):\n    self._pos = pos",
    "docstring": "Set the position of the rectangle. Parameters ---------- pos : tuple of 4 floats position of the rectangle that will be divided",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_divider.py",
    "ast_data": "FunctionDef name:set_position arg:self arg:pos arguments arg arg Assign"
  },
  {
    "library": "django",
    "name": "empty",
    "source_code": "@property\ndef empty(self):\n    return capi.geos_isempty(self.ptr)",
    "docstring": "Return a boolean indicating whether the set of points in this Geometry are empty.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:empty arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "__call__",
    "source_code": "def __call__(self, x):\n    k = self._compute_k(len(x))\n    exp = (np.arange(k + 1, 1, -1), np.arange(2, k + 2))\n    levels = k + 1 - np.concatenate([exp[0], exp[1][1:]])\n    percentiles = 100 * np.concatenate([0.5 ** exp[0], 1 - 0.5 ** exp[1]])\n    if self.k_depth == 'full':\n        percentiles[0] = 0\n        percentiles[-1] = 100\n    values = np.percentile(x, percentiles)\n    fliers = np.asarray(x[(x < values.min()) | (x > values.max())])\n    median = np.percentile(x, 50)\n    return {'k': k, 'levels': levels, 'percs': percentiles, 'values': values, 'fliers': fliers, 'median': median}",
    "docstring": "Evaluate the letter values.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_statistics.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:x arguments arg arg Assign Call Call Assign Call Call Assign Call Assign Call If Compare Assign Assign Assign Call Assign Call Compare Call Compare Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "is_available",
    "source_code": "def is_available() -> bool:\n    return device_count() > 0",
    "docstring": "Return a bool indicating if XPU is currently available.",
    "type": "function",
    "file_path": "pytorch\\torch\\xpu\\__init__.py",
    "ast_data": "FunctionDef name:is_available arguments Return return:yes Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "is_square",
    "source_code": "@property\ndef is_square(self):\n    auto_square_check = self.domain_dimension == self.range_dimension\n    if self._is_square_set_or_implied_by_hints is False and auto_square_check:\n        raise ValueError('User set is_square hint to False, but the operator was square.')\n    if self._is_square_set_or_implied_by_hints is None:\n        return auto_square_check\n    return self._is_square_set_or_implied_by_hints",
    "docstring": "Return depending on if this operator is square.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "FunctionDef name:is_square arg:self arguments arg Assign Compare If BoolOp Compare Raise Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "scipy",
    "name": "fmin_ncg",
    "source_code": "def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-05, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0, callback=None, c1=0.0001, c2=0.9):\n    opts = {'xtol': avextol, 'eps': epsilon, 'maxiter': maxiter, 'disp': disp, 'return_all': retall}\n    callback = _wrap_callback(callback)\n    res = _minimize_newtoncg(f, x0, args, fprime, fhess, fhess_p, callback=callback, c1=c1, c2=c2, **opts)\n    if full_output:\n        retlist = (res['x'], res['fun'], res['nfev'], res['njev'], res['nhev'], res['status'])\n        if retall:\n            retlist += (res['allvecs'],)\n        return retlist\n    elif retall:\n        return (res['x'], res['allvecs'])\n    else:\n        return res['x']",
    "docstring": "Unconstrained minimization of a function using the Newton-CG method. Parameters ---------- f : callable `methodfhess_pfhessfhessfhess_pfhessfhess_pfprimefhess_pfprimec1c2`. References ---------- Wright & Nocedal, 'Numerical Optimization', 1999, p. 140.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_optimize.py",
    "ast_data": "FunctionDef name:fmin_ncg arg:f arg:x0 arg:fprime arg:fhess_p arg:fhess arg:args arg:avextol arg:epsilon arg:maxiter arg:full_output arg:disp arg:retall arg:callback arg:c1 arg:c2 arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg Assign Assign Call Assign Call If Assign If Return return:yes If Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "_format_names",
    "source_code": "def _format_names(self, objs):\n    names = {'app_label': self.app_label.lower(), 'class': self.model_name}\n    new_objs = []\n    for obj in objs:\n        obj = obj.clone()\n        obj.name %= names\n        new_objs.append(obj)\n    return new_objs",
    "docstring": "App label/class name interpolation for object names.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\options.py",
    "ast_data": "FunctionDef name:_format_names arg:self arg:objs arguments arg arg Assign Call Assign For Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_convert_to_sparse_tensors",
    "source_code": "def _convert_to_sparse_tensors(sp_inputs):\n    if isinstance(sp_inputs, list):\n        return [_convert_to_sparse_tensor(sp_input) for sp_input in sp_inputs]\n    if isinstance(sp_inputs, tuple):\n        return (_convert_to_sparse_tensor(sp_input) for sp_input in sp_inputs)\n    raise TypeError('Inputs must be a list or tuple.')",
    "docstring": "Convert to objects and return them. Args: sp_inputs: or of or objects. Returns: converted to objects. Raises: ValueError: if any item in is neither nor .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_ops.py",
    "ast_data": "FunctionDef name:_convert_to_sparse_tensors arg:sp_inputs arguments arg If Call Return return:yes Call If Call Return return:yes Call Raise Call"
  },
  {
    "library": "django",
    "name": "time_format",
    "source_code": "def time_format(value, format=None, use_l10n=None):\n    return dateformat.time_format(value, get_format(format or 'TIME_FORMAT', use_l10n=use_l10n))",
    "docstring": "Format a datetime.time object using a localizable format. If use_l10n is provided and is not None, it forces the value to be localized (or not), otherwise it's always localized.",
    "type": "function",
    "file_path": "django\\django\\utils\\formats.py",
    "ast_data": "FunctionDef name:time_format arg:value arg:format arg:use_l10n arguments arg arg arg Return return:yes Call Call BoolOp"
  },
  {
    "library": "pytorch",
    "name": "dequantize_per_tensor_tensor2",
    "source_code": "@impl(quantized_decomposed_lib, 'dequantize_per_tensor.tensor2', 'CompositeExplicitAutograd')\ndef dequantize_per_tensor_tensor2(input: torch.Tensor, scale: torch.Tensor, zero_point: torch.Tensor, quant_min: torch.Tensor, quant_max: torch.Tensor, dtype: torch.dtype, *, out_dtype: Optional[torch.dtype]=None) -> torch.Tensor:\n    assert zero_point.numel() == 1, f'Expecting zero_point tensor to be one element, but received : {zero_point.numel()}'\n    assert scale.numel() == 1, f'Expecting scale tensor to be one element, but received : {scale.numel()}'\n    return dequantize_per_tensor(input, scale.item(), zero_point.item(), quant_min.item(), quant_max.item(), dtype, out_dtype=out_dtype)",
    "docstring": "Affine dequantization for the Tensor using the same quantization parameters to map from quantized values to floating point values Same as but scale and zero_point are Scalar Tensor instead of scalar values",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_decomposed.py",
    "ast_data": "FunctionDef name:dequantize_per_tensor_tensor2 arg:input arg:scale arg:zero_point arg:quant_min arg:quant_max arg:dtype arguments arg arg arg arg arg arg arg Compare Call Call Compare Call Call Return return:yes Call Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "find_patterns",
    "source_code": "def find_patterns(node: Node, index_stack: list[int], current_pattern: list[Node], matched_patterns: list[list[Node]], seen: set[tuple[Node, tuple[int, ...]]]):\n    if len(index_stack) == 0 and len(current_pattern) > 0:\n        matched_patterns.append(copy.copy(current_pattern))\n        current_pattern.clear()\n    state = (node, tuple(index_stack))\n    if state in seen:\n        return\n    seen.add(state)\n    for user in node.users:\n        if user.op == 'call_function' and user.target == tuple:\n            for i, user_arg in enumerate(user.args[0]):\n                if user_arg == node:\n                    index_stack.append(i)\n                    current_pattern.append(user)\n                    find_patterns(user, index_stack, current_pattern, matched_patterns, seen)\n        elif user.op == 'call_function' and user.target == operator.getitem:\n            if len(index_stack) > 0:\n                if user.args[1] == index_stack[-1]:\n                    index_stack.pop()\n                    current_pattern.append(user)\n                    find_patterns(user, index_stack, current_pattern, matched_patterns, seen)\n    return matched_patterns",
    "docstring": "Traverse the graph recursively to match for the N-tuple - N-getitem patterns, starting at the given node. We use a stack to keep track of the expected indices, since these are reversed from the indices. In the above example, the stack after (b -> tuple -> tuple) will be [0, 1], which will be popped by getitem(1) first and then by getitem(0). TODO: traverse upwards from the output and handle the case when tuple is not a separate node, e.g. graph.call_function(operator.getitem, args=(a, (b, c)))",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\utils.py",
    "ast_data": "FunctionDef name:find_patterns arg:node arg:index_stack arg:current_pattern arg:matched_patterns arg:seen arguments arg arg arg arg arg If BoolOp Compare Call Compare Call Call Call Call Assign Call If Compare Return return:no Call For If BoolOp Compare Compare For Call If Compare Call Call Call If BoolOp Compare Compare If Compare Call If Compare Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "pretty_printed_signature",
    "source_code": "def pretty_printed_signature(self, verbose=True):\n    assert self.function_type is not None\n    if verbose:\n        return repr(self.function_type)\n    else:\n        return str(self.function_type)",
    "docstring": "Returns a string summarizing the signature of this concrete function.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py",
    "ast_data": "FunctionDef name:pretty_printed_signature arg:self arg:verbose arguments arg arg Compare If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "trace_plot",
    "source_code": "def trace_plot(data, device=None, plot_segments=False):\n    return _format_viz(data, 'Active Memory Timeline' if not plot_segments else 'Active Cached Memory Timeline', device)",
    "docstring": "Generate a visualization over time of the memory usage recorded by the trace as an html file. Args: data: Memory snapshot as generated from torch.cuda.memory._snapshot() device (torch.device, optional): Generate the trace for this device, needed if multiple devices have allocations. plot_segments (bool, optional): Plots memory returned from cudaMalloc, rather than individual allocations. Defaults to False. Returns: str: HTML of visualization",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\_memory_viz.py",
    "ast_data": "FunctionDef name:trace_plot arg:data arg:device arg:plot_segments arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "verify",
    "source_code": "def verify(self, password, encoded):\n    raise NotImplementedError('subclasses of BasePasswordHasher must provide a verify() method')",
    "docstring": "Check if the given password is correct.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\hashers.py",
    "ast_data": "FunctionDef name:verify arg:self arg:password arg:encoded arguments arg arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "isgenerator",
    "source_code": "def isgenerator(object):\n    return _inspect.isgenerator(tf_decorator.unwrap(object)[1])",
    "docstring": "TFDecorator-aware replacement for inspect.isgenerator.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_inspect.py",
    "ast_data": "FunctionDef name:isgenerator arg:object arguments arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "evaluate_callable_usecols",
    "source_code": "def evaluate_callable_usecols(usecols: Callable[[Hashable], object] | SequenceT, names: Iterable[Hashable]) -> SequenceT | set[int]:\n    if callable(usecols):\n        return {i for i, name in enumerate(names) if usecols(name)}\n    return usecols",
    "docstring": "Check whether or not the 'usecols' parameter is a callable. If so, enumerates the 'names' parameter and returns a set of indices for each entry in 'names' that evaluates to True. If not a callable, returns 'usecols'.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\parsers\\base_parser.py",
    "ast_data": "FunctionDef name:evaluate_callable_usecols arg:usecols arg:names arguments arg arg If Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "__str__",
    "source_code": "def __str__(self):\n    return self.wkt",
    "docstring": "WKT is used for the string representation.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:__str__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_check_iterator_valid",
    "source_code": "def _check_iterator_valid(datapipe, iterator_id, next_method_exists=False) -> None:\n    if next_method_exists:\n        if datapipe._valid_iterator_id is not None and datapipe._valid_iterator_id != 0:\n            extra_msg = \"\\nNote that this exception is raised inside your IterDataPipe's a `__next__` method\"\n            raise RuntimeError(_gen_invalid_iterdatapipe_msg(datapipe) + extra_msg + _feedback_msg)\n    elif hasattr(datapipe, '_is_child_datapipe') and datapipe._is_child_datapipe is True:\n        if hasattr(datapipe, '_check_valid_iterator_id'):\n            if not datapipe._check_valid_iterator_id(iterator_id):\n                raise RuntimeError(f'This iterator has been invalidated, because a new iterator has been created from one of the ChildDataPipes of {_generate_iterdatapipe_msg(datapipe.main_datapipe)}.' + _feedback_msg)\n        else:\n            raise RuntimeError('ChildDataPipe must have method `_check_valid_iterator_id`.')\n    elif datapipe._valid_iterator_id != iterator_id:\n        raise RuntimeError(_gen_invalid_iterdatapipe_msg(datapipe) + _feedback_msg)",
    "docstring": "Given an instance of a DataPipe and an iterator ID, check if the IDs match, and if not, raises an exception. In the case of ChildDataPipe, the ID gets compared to the one stored in as well.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\data\\datapipes\\_hook_iterator.py",
    "ast_data": "FunctionDef name:_check_iterator_valid arg:datapipe arg:iterator_id arg:next_method_exists arguments arg arg arg If If BoolOp Compare Compare Assign Raise Call Call If BoolOp Call Compare If Call If Call Raise Call Call Raise Call If Compare Raise Call Call"
  },
  {
    "library": "scipy",
    "name": "non_reentrant",
    "source_code": "def non_reentrant(err_msg=None):\n\n    def decorator(func):\n        msg = err_msg\n        if msg is None:\n            msg = f'{func.__name__} is not re-entrant'\n        lock = ReentrancyLock(msg)\n        return lock.decorate(func)\n    return decorator",
    "docstring": "Decorate a function with a threading lock and prevent reentrant calls.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_threadsafety.py",
    "ast_data": "FunctionDef name:non_reentrant arg:err_msg arguments arg FunctionDef name:decorator arg:func arguments arg Assign If Compare Assign Assign Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_tensor_version_fake",
    "source_code": "@_tensor_version.py_impl(FakeTensorMode)\ndef _tensor_version_fake(fake_mode, self_tensor):\n    return fake_mode.shape_env.create_unbacked_symint()",
    "docstring": "The initial dynamo capture of _tensor_version + _unsafe_set_version_counter turns the into an unbacked SymInt so that we don't need to specialize on the of input tensors to the graph.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\tensor_version_op.py",
    "ast_data": "FunctionDef name:_tensor_version_fake arg:fake_mode arg:self_tensor arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "seealso",
    "source_code": "class seealso(nodes.Admonition, nodes.Element):\n    pass",
    "docstring": "Custom \"see also\" admonition.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\addnodes.py",
    "ast_data": "ClassDef name:seealso"
  },
  {
    "library": "seaborn",
    "name": "fit_logx",
    "source_code": "def fit_logx(self, grid):\n    X, y = (np.c_[np.ones(len(self.x)), self.x], self.y)\n    grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n    def reg_func(_x, _y):\n        _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n        return np.linalg.pinv(_x).dot(_y)\n    yhat = grid.dot(reg_func(X, y))\n    if self.ci is None:\n        return (yhat, None)\n    beta_boots = algo.bootstrap(X, y, func=reg_func, n_boot=self.n_boot, units=self.units, seed=self.seed).T\n    yhat_boots = grid.dot(beta_boots).T\n    return (yhat, yhat_boots)",
    "docstring": "Fit the model in log-space.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\regression.py",
    "ast_data": "FunctionDef name:fit_logx arg:self arg:grid arguments arg arg Assign Call Call Assign Call Call Call FunctionDef name:reg_func arg:_x arg:_y arguments arg arg Assign Call Return return:yes Call Call Assign Call Call If Compare Return return:yes Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_load_edges",
    "source_code": "def _load_edges(self):\n    for node_id, proto in enumerate(self._proto.nodes):\n        if node_id not in self.model_layer_dependencies:\n            self._add_object_graph_edges(proto, node_id)",
    "docstring": "Add edges for all nodes that are not waiting on initialization.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\load.py",
    "ast_data": "FunctionDef name:_load_edges arg:self arguments arg For Call If Compare Call"
  },
  {
    "library": "matplotlib",
    "name": "_update_trajectory",
    "source_code": "def _update_trajectory(self, xm, ym, broken_streamlines=True):\n    if self._current_xy != (xm, ym):\n        if self[ym, xm] == 0:\n            self._traj.append((ym, xm))\n            self._mask[ym, xm] = 1\n            self._current_xy = (xm, ym)\n        elif broken_streamlines:\n            raise InvalidIndexError\n        else:\n            pass",
    "docstring": "Update current trajectory position in mask. If the new position has already been filled, raise .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\streamplot.py",
    "ast_data": "FunctionDef name:_update_trajectory arg:self arg:xm arg:ym arg:broken_streamlines arguments arg arg arg arg If Compare If Compare Call Assign Assign If Raise"
  },
  {
    "library": "pytorch",
    "name": "remove_zero_terms",
    "source_code": "def remove_zero_terms(base, divisor):\n    if not statically_known(base >= 0):\n        return base\n    for v in base.free_symbols:\n        if v in var_ranges:\n            rest = sympy.Wild('_rest', exclude=[v])\n            m = base.match(v + rest)\n            if m and v not in m[rest].free_symbols:\n                gcd = sympy.gcd(m[rest], divisor)\n                if gcd == divisor:\n                    if statically_known(v < divisor):\n                        base = m[rest]\n    return base",
    "docstring": "Symbols smaller than the divisor are zero",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\sizevars.py",
    "ast_data": "FunctionDef name:remove_zero_terms arg:base arg:divisor arguments arg arg If Call Compare Return return:yes For If Compare Assign Call Assign Call If BoolOp Compare Assign Call If Compare If Call Compare Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "from_graphable",
    "source_code": "def from_graphable(flat_args, spec):\n    stuff = pytree.tree_unflatten(flat_args, spec)\n    return stuff",
    "docstring": "The inverse of to_graphable.",
    "type": "function",
    "file_path": "pytorch\\torch\\_higher_order_ops\\flat_apply.py",
    "ast_data": "FunctionDef name:from_graphable arg:flat_args arg:spec arguments arg arg Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_get_scorers",
    "source_code": "def _get_scorers(self):\n    refit_metric = 'score'\n    if callable(self.scoring):\n        scorers = self.scoring\n    elif self.scoring is None or isinstance(self.scoring, str):\n        scorers = check_scoring(self.estimator, self.scoring)\n    else:\n        scorers = _check_multimetric_scoring(self.estimator, self.scoring)\n        self._check_refit_for_multimetric(scorers)\n        refit_metric = self.refit\n        scorers = _MultimetricScorer(scorers=scorers, raise_exc=self.error_score == 'raise')\n    return (scorers, refit_metric)",
    "docstring": "Get the scorer(s) to be used. This is used in ``. Returns ------- scorers, refit_metric",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_search.py",
    "ast_data": "FunctionDef name:_get_scorers arg:self arguments arg Assign If Call Assign If BoolOp Compare Call Assign Call Assign Call Call Assign Assign Call Compare Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, pad=0.3):\n    self.pad = pad",
    "docstring": "Parameters ---------- pad : float, default: 0.3 The amount of padding around the original box.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:pad arguments arg arg Assign"
  },
  {
    "library": "pytorch",
    "name": "_is_integer",
    "source_code": "def _is_integer(x) -> bool:\n    if isinstance(x, (int, torch.SymInt)):\n        return True\n    if np is not None and isinstance(x, np.integer):\n        return True\n    return isinstance(x, Tensor) and (not x.is_floating_point())",
    "docstring": "Type check the input number is an integer. Will return True for int, SymInt, Numpy integers and Tensors with integer elements.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:_is_integer arg:x arguments arg If Call Return return:yes If BoolOp Compare Call Return return:yes Return return:yes BoolOp Call Call"
  },
  {
    "library": "pytorch",
    "name": "_format_exceptions_for_all_strategies",
    "source_code": "def _format_exceptions_for_all_strategies(results: list[_capture_strategies.Result]) -> str:\n    return '\\n'.join([f\"# ⚠️ Errors from strategy '{result.strategy}': -----------------------\\n\\n{_format_exception(result.exception)}\\n\" for result in results if result.exception is not None])",
    "docstring": "Format all the exceptions from the capture strategies.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_core.py",
    "ast_data": "FunctionDef name:_format_exceptions_for_all_strategies arg:results arguments arg Return return:yes Call Call Compare"
  },
  {
    "library": "pytorch",
    "name": "indirect_load",
    "source_code": "def indirect_load(self, name: str, index: sympy.Expr) -> CSEVariable:\n    prior = self.loads\n    try:\n        self.loads = self.compute\n        return self.load(name, index)\n    finally:\n        self.loads = prior",
    "docstring": "A load the depends on an index we have read",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\common.py",
    "ast_data": "FunctionDef name:indirect_load arg:self arg:name arg:index arguments arg arg arg Assign Try Assign Return return:yes Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "main",
    "source_code": "def main():\n    parser = argparse.ArgumentParser(description='Cherry picking automation.')\n    parser.add_argument('--filename', help='path to whl file we are copying', required=True)\n    parser.add_argument('--new_py_ver', help='two digit py version eg. 27 or 33', required=True)\n    args = parser.parse_args()\n    args.filename = os.path.abspath(args.filename)\n    check_existence(args.filename)\n    regex_groups = re.search(TF_NIGHTLY_REGEX, args.filename)\n    directory = regex_groups.group(1)\n    package = regex_groups.group(2)\n    version = regex_groups.group(3)\n    origin_tag = regex_groups.group(4)\n    old_py_ver = re.search('(cp\\\\d\\\\d)', origin_tag).group(1)\n    new_tag = origin_tag.replace(old_py_ver, 'cp' + args.new_py_ver)\n    copy_binary(directory, origin_tag, new_tag, version, package)",
    "docstring": "This script copies binaries. Requirements: filename: The path to the whl file AND new_py_ver: Create a nightly tag with current date Raises: RuntimeError: If the whl file was not found",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\ci_build\\copy_binary.py",
    "ast_data": "FunctionDef name:main arguments Assign Call Call Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Call"
  },
  {
    "library": "pandas",
    "name": "_validate_ndim",
    "source_code": "@final\n@cache_readonly\ndef _validate_ndim(self) -> bool:\n    return not is_1d_only_ea_dtype(self.dtype)",
    "docstring": "We validate dimension for blocks that can hold 2D values, which for now means numpy dtypes or EA dtypes like DatetimeTZDtype and PeriodDtype.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\blocks.py",
    "ast_data": "FunctionDef name:_validate_ndim arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_mark_tensor_parallel_shardings",
    "source_code": "def _mark_tensor_parallel_shardings(gm: GraphModule, graph_signature: ExportGraphSignature, mesh: DeviceMesh, parameter_placements: dict[str, Placement]) -> dict[Node, PlacementStrategy]:\n    placement_strategies: dict[Node, PlacementStrategy] = {}\n    num_params_and_buffers = len(graph_signature.inputs_to_parameters) + len(graph_signature.inputs_to_buffers)\n    placeholder_idx: int = 0\n    for node in gm.graph.nodes:\n        if node.op == 'placeholder':\n            if placeholder_idx < num_params_and_buffers:\n                fqn: str = _get_input_node_fqn(node.name, graph_signature)\n                placement: Placement = parameter_placements[fqn] if fqn in parameter_placements else Replicate()\n                placement_strategies[node] = _create_placement_strategy(node, mesh, placements=(placement,))\n                placeholder_idx += 1\n            else:\n                placement_strategies[node] = _create_placement_strategy(node, mesh, placements=(Replicate(),))\n    return placement_strategies",
    "docstring": "Mark the placement strategies of the parameter and buffer placeholder nodes.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\experimental\\_tp_transform.py",
    "ast_data": "FunctionDef name:_mark_tensor_parallel_shardings arg:gm arg:graph_signature arg:mesh arg:parameter_placements arguments arg arg arg arg Assign Call Call For If Compare If Compare Call Compare Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "type_constraint",
    "source_code": "@property\ndef type_constraint(self) -> Optional[trace.TraceType]:\n    return self.annotation if self.annotation is not self.empty else None",
    "docstring": "A supertype that the parameter's type must subtype for validity.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_type.py",
    "ast_data": "FunctionDef name:type_constraint arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "pandas",
    "name": "_make_block",
    "source_code": "def _make_block(values: ArrayLike, placement: np.ndarray) -> Block:\n    dtype = values.dtype\n    klass = get_block_type(dtype)\n    placement_obj = BlockPlacement(placement)\n    if isinstance(dtype, ExtensionDtype) and dtype._supports_2d or isinstance(values, (DatetimeArray, TimedeltaArray)):\n        values = ensure_block_shape(values, ndim=2)\n    values = maybe_coerce_values(values)\n    return klass(values, ndim=2, placement=placement_obj)",
    "docstring": "This is an analogue to blocks.new_block(_2d) that ensures: 1) correct dimension for EAs that support 2D (), and 2) correct EA class for datetime64/timedelta64 (). The input is assumed to be either numpy array or ExtensionArray: - In case of a numpy array, it is assumed to already be in the expected shape for Blocks (2D, (cols, rows)). - In case of an ExtensionArray the input can be 1D, also for EAs that are internally stored as 2D. For the rest no preprocessing or validation is done, except for those dtypes that are internally stored as EAs but have an exact numpy equivalent (and at the moment use that numpy dtype), i.e. datetime64/timedelta64.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\internals\\api.py",
    "ast_data": "FunctionDef name:_make_block arg:values arg:placement arguments arg arg Assign Assign Call Assign Call If BoolOp BoolOp Call Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_validate_dct_arguments",
    "source_code": "def _validate_dct_arguments(input_tensor, dct_type, n, axis, norm):\n    if axis != -1:\n        raise NotImplementedError('axis must be -1. Got: %s' % axis)\n    if n is not None and n < 1:\n        raise ValueError('n should be a positive integer or None')\n    if dct_type not in (1, 2, 3, 4):\n        raise ValueError('Types I, II, III and IV (I)DCT are supported.')\n    if dct_type == 1:\n        if norm == 'ortho':\n            raise ValueError('Normalization is not supported for the Type-I DCT.')\n        if input_tensor.shape[-1] is not None and input_tensor.shape[-1] < 2:\n            raise ValueError('Type-I DCT requires the dimension to be greater than one.')\n    if norm not in (None, 'ortho'):\n        raise ValueError(\"Unknown normalization. Expected None or 'ortho', got: %s\" % norm)",
    "docstring": "Checks that DCT/IDCT arguments are compatible and well formed.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\signal\\dct_ops.py",
    "ast_data": "FunctionDef name:_validate_dct_arguments arg:input_tensor arg:dct_type arg:n arg:axis arg:norm arguments arg arg arg arg arg If Compare Raise Call If BoolOp Compare Compare Raise Call If Compare Raise Call If Compare If Compare Raise Call If BoolOp Compare Compare Raise Call If Compare Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "_sym_decorrelation",
    "source_code": "def _sym_decorrelation(W):\n    s, u = linalg.eigh(np.dot(W, W.T))\n    s = np.clip(s, a_min=np.finfo(W.dtype).tiny, a_max=None)\n    return np.linalg.multi_dot([u * (1.0 / np.sqrt(s)), u.T, W])",
    "docstring": "Symmetric decorrelation i.e. W <- (W * W.T) ^{-1/2} * W",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_fastica.py",
    "ast_data": "FunctionDef name:_sym_decorrelation arg:W arguments arg Assign Call Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "arg_name",
    "source_code": "def arg_name(self, node: IRNode) -> Optional[str]:\n    if node is None:\n        return None\n    return self.args.arg_name(node.get_name())",
    "docstring": "Returns arg name of a given input or output node.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\common.py",
    "ast_data": "FunctionDef name:arg_name arg:self arg:node arguments arg arg If Compare Return return:no Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "flatten_dict_items",
    "source_code": "def flatten_dict_items(dictionary):\n    return _pywrap_nest.FlattenDictItems(dictionary)",
    "docstring": "Returns a dictionary with flattened keys and values. This function flattens the keys and values of a dictionary, which can be arbitrarily nested structures, and returns the flattened version of such structures: The input dictionary must satisfy two properties: 1. Its keys and values should have the same exact nested structure. 2. The set of all flattened keys of the dictionary must not contain repeated keys. Args: dictionary: the dictionary to zip Returns: The zipped dictionary. Raises: TypeError: If the input is not a dictionary. ValueError: If any key and value do not have the same structure layout, or if keys are not unique.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\nest.py",
    "ast_data": "FunctionDef name:flatten_dict_items arg:dictionary arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "num_buckets",
    "source_code": "@abc.abstractproperty\ndef num_buckets(self):\n    pass",
    "docstring": "Returns number of buckets in this sparse feature.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:num_buckets arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "get_known_fqn",
    "source_code": "def get_known_fqn(self, mod):\n    return self._known_modules.get(mod, None)",
    "docstring": "Return the fqn for the given module if it is known to the ``.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_tools\\mod_tracker.py",
    "ast_data": "FunctionDef name:get_known_fqn arg:self arg:mod arguments arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict_proba",
    "source_code": "def predict_proba(self, raw_prediction):\n    return self.link.inverse(raw_prediction)",
    "docstring": "Predict probabilities. Parameters ---------- raw_prediction : array of shape (n_samples, n_classes) Raw prediction values (in link space). Returns ------- proba : array of shape (n_samples, n_classes) Element-wise class probabilities.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\_loss\\loss.py",
    "ast_data": "FunctionDef name:predict_proba arg:self arg:raw_prediction arguments arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n    X = validate_data(self, X, ensure_min_samples=2)\n    return self._fit(X)",
    "docstring": "Fit the hierarchical clustering from features, or distance matrix. Parameters ---------- X : array-like, shape (n_samples, n_features) or (n_samples, n_samples) Training instances to cluster, or distances between instances if ``. y : Ignored Not used, present here for API consistency by convention. Returns ------- self : object Returns the fitted instance.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_agglomerative.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "apply_transform_mask",
    "source_code": "def apply_transform_mask(self, input: Tensor, params: Dict[str, Tensor], flags: Dict[str, Any], transform: Optional[Tensor]=None) -> Tensor:\n    raise NotImplementedError",
    "docstring": "Process masks corresponding to the inputs that are transformed.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\base.py",
    "ast_data": "FunctionDef name:apply_transform_mask arg:self arg:input arg:params arg:flags arg:transform arguments arg arg arg arg arg Raise"
  },
  {
    "library": "scipy",
    "name": "mean",
    "source_code": "def mean(self, alpha):\n    alpha = _dirichlet_check_parameters(alpha)\n    out = alpha / np.sum(alpha)\n    return _squeeze_output(out)",
    "docstring": "Mean of the Dirichlet distribution. Parameters ---------- %(_dirichlet_doc_default_callparams)s Returns ------- mu : ndarray or scalar Mean of the Dirichlet distribution.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:mean arg:self arg:alpha arguments arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_sparse_fit",
    "source_code": "def _sparse_fit(self, X, random_state):\n    n_samples, n_features = X.shape\n    references = self.references_ * 100\n    self.quantiles_ = []\n    for feature_idx in range(n_features):\n        column_nnz_data = X.data[X.indptr[feature_idx]:X.indptr[feature_idx + 1]]\n        if self.subsample is not None and len(column_nnz_data) > self.subsample:\n            column_subsample = self.subsample * len(column_nnz_data) // n_samples\n            if self.ignore_implicit_zeros:\n                column_data = np.zeros(shape=column_subsample, dtype=X.dtype)\n            else:\n                column_data = np.zeros(shape=self.subsample, dtype=X.dtype)\n            column_data[:column_subsample] = random_state.choice(column_nnz_data, size=column_subsample, replace=False)\n        else:\n            if self.ignore_implicit_zeros:\n                column_data = np.zeros(shape=len(column_nnz_data), dtype=X.dtype)\n            else:\n                column_data = np.zeros(shape=n_samples, dtype=X.dtype)\n            column_data[:len(column_nnz_data)] = column_nnz_data\n        if not column_data.size:\n            self.quantiles_.append([0] * len(references))\n        else:\n            self.quantiles_.append(np.nanpercentile(column_data, references))\n    self.quantiles_ = np.transpose(self.quantiles_)\n    self.quantiles_ = np.maximum.accumulate(self.quantiles_)",
    "docstring": "Compute percentiles for sparse matrices. Parameters ---------- X : sparse matrix of shape (n_samples, n_features) The data used to scale along the features axis. The sparse matrix needs to be nonnegative. If a sparse matrix is provided, it will be converted into a sparse ``.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py",
    "ast_data": "FunctionDef name:_sparse_fit arg:self arg:X arg:random_state arguments arg arg arg Assign Assign Assign For Call Assign If BoolOp Compare Compare Call Assign Call If Assign Call Assign Call Assign Call If Assign Call Call Assign Call Assign Call If Call Call Call Call Assign Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "_get_flat_param_offsets",
    "source_code": "def _get_flat_param_offsets(self) -> list[tuple[int, int]]:\n    cumulative_sum = list(accumulate(self.flat_param._numels_with_padding))\n    starts = [0] + cumulative_sum[:-1]\n    ends = [end - 1 for end in cumulative_sum]\n    param_offsets = list(zip(starts, ends))\n    return param_offsets",
    "docstring": "Return [start, end] offsets of each original parameter's flattened data in the unsharded flat parameter (without padding). NOTE: The returned list includes elements for alignment padding.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py",
    "ast_data": "FunctionDef name:_get_flat_param_offsets arg:self arguments arg Assign Call Call Assign Assign Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "find_coalesced_group_with_non_p2p",
    "source_code": "def find_coalesced_group_with_non_p2p(pg_name: str, entries: list[dict[str, Any]], _pg_guids: dict[tuple[str, int], str], rank: int) -> list[tuple[int, dict[str, Any]]]:\n    found = []\n    collective_seq_id = None\n    for i, e in enumerate(entries):\n        if _pg_guids[e['process_group'][0], rank] != pg_name:\n            continue\n        elif collective_seq_id is None:\n            collective_seq_id = e['p2p_seq_id'] if e['is_p2p'] else e['collective_seq_id']\n            found.append((i, e))\n        elif not e['is_p2p'] and e['collective_seq_id'] == collective_seq_id:\n            found.append((i, e))\n        elif e['is_p2p'] and e['p2p_seq_id'] == collective_seq_id:\n            found.append((i, e))\n        else:\n            break\n    if len(found) > 1:\n        name = found[-1][1]['profiling_name']\n        if name.startswith('nccl:') and (not name.endswith('_coalesced')):\n            logger.error('Rank %s does not have a coalesced end.', rank)\n        return found\n    return []",
    "docstring": "Given a list of entries, if the collective_seq_id of the first entry matches that of subsequent ones, build an return a list of entries terminating in a 'coalesced' op entry all sharing a collective_seq_id",
    "type": "function",
    "file_path": "pytorch\\tools\\flight_recorder\\components\\utils.py",
    "ast_data": "FunctionDef name:find_coalesced_group_with_non_p2p arg:pg_name arg:entries arg:_pg_guids arg:rank arguments arg arg arg arg Assign Assign For Call If Compare If Compare Assign Call If BoolOp Compare Call If BoolOp Compare Call If Compare Call Assign If BoolOp Call Call Call Return return:yes Return return:no"
  },
  {
    "library": "pytorch",
    "name": "PositionalEncoding",
    "source_code": "class PositionalEncoding(nn.Module):\n\n    def __init__(self, d_model, dropout=0.1, max_len=5000):\n        super().__init__()\n        self.dropout = nn.Dropout(p=dropout)\n        pe = torch.zeros(max_len, d_model)\n        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n        div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))\n        pe[:, 0::2] = torch.sin(position * div_term)\n        pe[:, 1::2] = torch.cos(position * div_term)\n        pe = pe.unsqueeze(0).transpose(0, 1)\n        self.register_buffer('pe', pe)\n\n    def forward(self, x):\n        x = x + self.pe[:x.size(0), :]\n        return self.dropout(x)",
    "docstring": "Inject some information about the relative or absolute position of the tokens in the sequence. The positional encodings have the same dimension as the embeddings, so that the two can be summed. Here, we use sine and cosine functions of different frequencies. .. math:: \\text{PosEncoder}(pos, 2i) = sin(pos/10000^(2i/d_model)) \\text{PosEncoder}(pos, 2i+1) = cos(pos/10000^(2i/d_model)) \\text{where pos is the word position and i is the embed idx) Args: d_model: the embed dim (required). dropout: the dropout value (default=0.1). max_len: the max. length of the incoming sequence (default=5000). Examples: >>> pos_encoder = PositionalEncoding(d_model)",
    "type": "class",
    "file_path": "pytorch\\benchmarks\\functional_autograd_benchmark\\torchaudio_models.py",
    "ast_data": "ClassDef name:PositionalEncoding FunctionDef name:__init__ arg:self arg:d_model arg:dropout arg:max_len arguments arg arg arg arg Call Call Assign Call Assign Call Assign Call Call Assign Call Call Call Call Assign Call Assign Call Assign Call Call Call FunctionDef name:forward arg:self arg:x arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_ValidationScoreCallback",
    "source_code": "class _ValidationScoreCallback:\n\n    def __init__(self, estimator, X_val, y_val, sample_weight_val, classes=None):\n        self.estimator = clone(estimator)\n        self.estimator.t_ = 1\n        if classes is not None:\n            self.estimator.classes_ = classes\n        self.X_val = X_val\n        self.y_val = y_val\n        self.sample_weight_val = sample_weight_val\n\n    def __call__(self, coef, intercept):\n        est = self.estimator\n        est.coef_ = coef.reshape(1, -1)\n        est.intercept_ = np.atleast_1d(intercept)\n        return est.score(self.X_val, self.y_val, self.sample_weight_val)",
    "docstring": "Callback for early stopping based on validation score",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_stochastic_gradient.py",
    "ast_data": "ClassDef name:_ValidationScoreCallback FunctionDef name:__init__ arg:self arg:estimator arg:X_val arg:y_val arg:sample_weight_val arg:classes arguments arg arg arg arg arg arg Assign Call Assign If Compare Assign Assign Assign Assign FunctionDef name:__call__ arg:self arg:coef arg:intercept arguments arg arg arg Assign Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "bundle_large_tensor",
    "source_code": "def bundle_large_tensor(t):\n    return InflatableArg(value=t, fmt='{}')",
    "docstring": "Wrap a tensor to allow bundling regardless of size.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\bundled_inputs.py",
    "ast_data": "FunctionDef name:bundle_large_tensor arg:t arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "load",
    "source_code": "def load(fp, encode_nominal=False, return_type=DENSE):\n    decoder = ArffDecoder()\n    return decoder.decode(fp, encode_nominal=encode_nominal, return_type=return_type)",
    "docstring": "Load a file-like object containing the ARFF document and convert it into a Python object. :param fp: a file-like object. :param encode_nominal: boolean, if True perform a label encoding while reading the .arff file. :param return_type: determines the data structure used to store the dataset. Can be one of , , , or . Consult the sections on _ and _. :return: a dictionary.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\_arff.py",
    "ast_data": "FunctionDef name:load arg:fp arg:encode_nominal arg:return_type arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "title",
    "source_code": "@set_module('numpy.strings')\n@array_function_dispatch(_unary_op_dispatcher)\ndef title(a):\n    a_arr = np.asarray(a)\n    return _vec_string(a_arr, a_arr.dtype, 'title')",
    "docstring": "Return element-wise title cased version of string or unicode. Title case words start with uppercase characters, all remaining cased characters are lowercase. Calls :meth: element-wise. For 8-bit strings, this method is locale-dependent. Parameters ---------- a : array-like, with `` dtype, depending on input types See Also -------- str.title Examples -------- >>> import numpy as np >>> c=np.array(['a1b c','1b ca','b ca1','ca1b'],'S5'); c array(['a1b c', '1b ca', 'b ca1', 'ca1b'], dtype='|S5') >>> np.strings.title(c) array(['A1B C', '1B Ca', 'B Ca1', 'Ca1B'], dtype='|S5')",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\strings.py",
    "ast_data": "FunctionDef name:title arg:a arguments arg Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_default_alldims",
    "source_code": "def _default_alldims(dim: Optional[DimsType], x: TensorLikeType) -> list[int]:\n    if dim is None:\n        return list(range(x.ndim))\n    elif not isinstance(dim, Sequence):\n        return [dim]\n    else:\n        return list(dim)",
    "docstring": "Convert Optional[DimsType] to a simple list, defaulting to all dimensions",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\fft.py",
    "ast_data": "FunctionDef name:_default_alldims arg:dim arg:x arguments arg arg If Compare Return return:yes Call Call If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "assert_positive_v2",
    "source_code": "@tf_export('debugging.assert_positive', v1=[])\n@dispatch.add_dispatch_support\ndef assert_positive_v2(x, message=None, summarize=None, name=None):\n    return assert_positive(x=x, summarize=summarize, message=message, name=name)",
    "docstring": "Assert the condition holds element-wise. This Op checks that holds for every element of . If is empty, this is trivially satisfied. If is not positive everywhere, , as well as the first entries of are printed, and is raised. Args: x: Numeric . message: A string to prefix to the default message. summarize: Print this many entries of each tensor. name: A name for this operation (optional). Defaults to \"assert_positive\". Returns: Op raising unless is all positive. This can be used with inside of s to block followup computation until the check has executed. @compatibility(eager) returns None @end_compatibility Raises: InvalidArgumentError: if the check can be performed immediately and is False. The check can be performed immediately during eager execution or if is statically known.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\check_ops.py",
    "ast_data": "FunctionDef name:assert_positive_v2 arg:x arg:message arg:summarize arg:name arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "compute_jac_indices",
    "source_code": "def compute_jac_indices(n, m, k):\n    i_col = np.repeat(np.arange((m - 1) * n), n)\n    j_col = np.tile(np.arange(n), n * (m - 1)) + np.repeat(np.arange(m - 1) * n, n ** 2)\n    i_bc = np.repeat(np.arange((m - 1) * n, m * n + k), n)\n    j_bc = np.tile(np.arange(n), n + k)\n    i_p_col = np.repeat(np.arange((m - 1) * n), k)\n    j_p_col = np.tile(np.arange(m * n, m * n + k), (m - 1) * n)\n    i_p_bc = np.repeat(np.arange((m - 1) * n, m * n + k), k)\n    j_p_bc = np.tile(np.arange(m * n, m * n + k), n + k)\n    i = np.hstack((i_col, i_col, i_bc, i_bc, i_p_col, i_p_bc))\n    j = np.hstack((j_col, j_col + n, j_bc, j_bc + (m - 1) * n, j_p_col, j_p_bc))\n    return (i, j)",
    "docstring": "Compute indices for the collocation system Jacobian construction. See for the explanation.",
    "type": "function",
    "file_path": "scipy\\scipy\\integrate\\_bvp.py",
    "ast_data": "FunctionDef name:compute_jac_indices arg:n arg:m arg:k arguments arg arg arg Assign Call Call Assign Call Call Call Call Assign Call Call Assign Call Call Assign Call Call Assign Call Call Assign Call Call Assign Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "fiedler_companion",
    "source_code": "def fiedler_companion(a):\n    a = np.atleast_1d(a)\n    if a.ndim > 1:\n        return np.apply_along_axis(fiedler_companion, -1, a)\n    if a.size <= 2:\n        if a.size == 2:\n            return np.array([[-(a / a[0])[-1]]])\n        return np.array([], dtype=a.dtype)\n    if a[0] == 0.0:\n        raise ValueError('Leading coefficient is zero.')\n    a = a / a[0]\n    n = a.size - 1\n    c = np.zeros((n, n), dtype=a.dtype)\n    c[range(3, n, 2), range(1, n - 2, 2)] = 1.0\n    c[range(2, n, 2), range(1, n - 1, 2)] = -a[3::2]\n    c[range(0, n - 2, 2), range(2, n, 2)] = 1.0\n    c[range(0, n - 1, 2), range(1, n, 2)] = -a[2::2]\n    c[[0, 1], 0] = [-a[1], 1]\n    return c",
    "docstring": "Returns a Fiedler companion matrix Given a polynomial coefficient array ``N >> import numpy as np >>> from scipy.linalg import fiedler_companion, eigvals >>> p = np.poly(np.arange(1, 9, 2)) # [1., -16., 86., -176., 105.] >>> fc = fiedler_companion(p) >>> fc array([[ 16., -86., 1., 0.], [ 1., 0., 0., 0.], [ 0., 176., 0., -105.], [ 0., 1., 0., 0.]]) >>> eigvals(fc) array([7.+0.j, 5.+0.j, 3.+0.j, 1.+0.j])",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_special_matrices.py",
    "ast_data": "FunctionDef name:fiedler_companion arg:a arguments arg Assign Call If Compare Return return:yes Call If Compare If Compare Return return:yes Call Return return:yes Call If Compare Raise Call Assign Assign Assign Call Assign Call Call Assign Call Call Assign Call Call Assign Call Call Assign Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "build_ffi_for_binding",
    "source_code": "def build_ffi_for_binding(module_name: str, module_prefix: str, modules: list[str]):\n    types = []\n    includes = []\n    functions = []\n    customizations = []\n    for name in modules:\n        __import__(module_prefix + name)\n        module = sys.modules[module_prefix + name]\n        types.append(module.TYPES)\n        functions.append(module.FUNCTIONS)\n        includes.append(module.INCLUDES)\n        customizations.append(module.CUSTOMIZATIONS)\n    verify_source = '\\n'.join(includes + customizations)\n    return build_ffi(module_name, cdef_source='\\n'.join(types + functions), verify_source=verify_source)",
    "docstring": "Modules listed in ``: A string containing arbitrary top-level C code, this can be used to do things like test for a define and provide an alternate implementation based on that.",
    "type": "function",
    "file_path": "cryptography\\src\\_cffi_src\\utils.py",
    "ast_data": "FunctionDef name:build_ffi_for_binding arg:module_name arg:module_prefix arg:modules arguments arg arg arg Assign Assign Assign Assign For Call Assign Call Call Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_layer_policy",
    "source_code": "def get_layer_policy(layer):\n    if not isinstance(layer, base_layer.Layer):\n        raise ValueError('get_policy can only be called on a layer, but got: %s' % (layer,))\n    return layer.dtype_policy",
    "docstring": "Returns the dtype policy of a layer. Warning: This function is deprecated. Use instead. Args: layer: A . Returns: The of the layer.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\get_layer_policy.py",
    "ast_data": "FunctionDef name:get_layer_policy arg:layer arguments arg If Call Raise Call Return return:yes"
  },
  {
    "library": "django",
    "name": "deactivate",
    "source_code": "def deactivate():\n    if hasattr(_active, 'value'):\n        del _active.value",
    "docstring": "Unset the time zone for the current thread. Django will then use the time zone defined by settings.TIME_ZONE.",
    "type": "function",
    "file_path": "django\\django\\utils\\timezone.py",
    "ast_data": "FunctionDef name:deactivate arguments If Call"
  },
  {
    "library": "pytorch",
    "name": "_dump_snapshot",
    "source_code": "def _dump_snapshot(filename='dump_snapshot.pickle'):\n    s = _snapshot()\n    with open(filename, 'wb') as f:\n        pickle.dump(s, f)",
    "docstring": "Save a pickled version of the dictionary to a file. This file can be opened by the interactive snapshot viewer at pytorch.org/memory_viz Args: filename (str, optional): Name of the file to create. Defaults to \"dump_snapshot.pickle\".",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\memory.py",
    "ast_data": "FunctionDef name:_dump_snapshot arg:filename arguments arg Assign Call With Call Call"
  },
  {
    "library": "sphinx",
    "name": "add_crossref_type",
    "source_code": "def add_crossref_type(self, directivename: str, rolename: str, indextemplate: str='', ref_nodeclass: type[nodes.TextElement] | None=None, objname: str='', override: bool=False) -> None:\n    self.registry.add_crossref_type(directivename, rolename, indextemplate, ref_nodeclass, objname, override=override)",
    "docstring": "Register a new crossref object type. This method is very similar to :meth: except that the directive it generates must be empty, and will produce no output. That means that you can add semantic targets to your sources, and refer to them using custom roles instead of generic ones (like :rst:role:). Example call:: app.add_crossref_type( 'topic', 'topic', 'single: %s', docutils.nodes.emphasis ) Example usage:: .. topic:: application API The application API ------------------- Some random text here. See also :topic:. (Of course, the element following the `` directive needn't be a section.) :param override: If false, do not install it if another cross-reference type is already installed as the same name If true, unconditionally install the cross-reference type. .. versionchanged:: 1.8 Add *override* keyword.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\application.py",
    "ast_data": "FunctionDef name:add_crossref_type arg:self arg:directivename arg:rolename arg:indextemplate arg:ref_nodeclass arg:objname arg:override arguments arg arg arg arg arg arg arg Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, grid_helper, nth_coord, value, axis_direction=None):\n    super().__init__(nth_coord, value)\n    self.value = value\n    self.grid_helper = grid_helper\n    self._extremes = (-np.inf, np.inf)\n    self._line_num_points = 100",
    "docstring": "nth_coord = along which coordinate value varies. nth_coord = 0 -> x axis, nth_coord = 1 -> y axis",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\grid_helper_curvelinear.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:grid_helper arg:nth_coord arg:value arg:axis_direction arguments arg arg arg arg arg Call Call Assign Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "_get_shard",
    "source_code": "@staticmethod\ndef _get_shard(tensor: Tensor, rank: int, world_size: int) -> tuple[Tensor, int]:\n    chunk, numel_to_pad = FlatParamHandle._get_unpadded_shard(tensor, rank, world_size)\n    shard = chunk.clone()\n    if numel_to_pad > 0:\n        shard = F.pad(shard, [0, numel_to_pad])\n    return (shard, numel_to_pad)",
    "docstring": "Return the shard of `clone` may be deallocated after this method returns.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py",
    "ast_data": "FunctionDef name:_get_shard arg:tensor arg:rank arg:world_size arguments arg arg arg Assign Call Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "RevivedNetwork",
    "source_code": "class RevivedNetwork(RevivedLayer):\n\n    @classmethod\n    def _init_from_metadata(cls, metadata):\n        revived_obj = cls(name=metadata['name'])\n        with utils.no_automatic_dependency_tracking_scope(revived_obj):\n            revived_obj._expects_training_arg = metadata['expects_training_arg']\n            config = metadata.get('config')\n            if generic_utils.validate_config(config):\n                revived_obj._config = config\n            if metadata.get('activity_regularizer') is not None:\n                revived_obj.activity_regularizer = regularizers.deserialize(metadata['activity_regularizer'])\n        return (revived_obj, _revive_setter)",
    "docstring": "Keras network of layers loaded from a SavedModel.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\load.py",
    "ast_data": "ClassDef name:RevivedNetwork FunctionDef name:_init_from_metadata arg:cls arg:metadata arguments arg arg Assign Call With Call Assign Assign Call If Call Assign If Compare Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "apply",
    "source_code": "def apply(self, model_args: Sequence[Any], model_kwargs: Mapping[str, Any], model: torch.nn.Module | Callable | torch_export.ExportedProgram | None=None) -> tuple[Sequence[Any], Mapping[str, Any]]:\n    return (tuple(model_args) + tuple(model_kwargs.values()), {})",
    "docstring": "Merge the input kwargs into the input args. Args: model_args: The model args. model_kwargs: The model kwargs. model: The PyTorch model. Returns: A tuple of the model args and kwargs. kwargs is always empty.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\io_adapter.py",
    "ast_data": "FunctionDef name:apply arg:self arg:model_args arg:model_kwargs arg:model arguments arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "non_field_errors",
    "source_code": "def non_field_errors(self):\n    return self.errors.get(NON_FIELD_ERRORS, self.error_class(error_class='nonfield', renderer=self.renderer))",
    "docstring": "Return an ErrorList of errors that aren't associated with a particular field -- i.e., from Form.clean(). Return an empty ErrorList if there are none.",
    "type": "method",
    "file_path": "django\\django\\forms\\forms.py",
    "ast_data": "FunctionDef name:non_field_errors arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "pmf",
    "source_code": "def pmf(self, x, row, col):\n    return np.exp(self.logpmf(x, row, col))",
    "docstring": "Probability of table to occur in the distribution. Parameters ---------- %(_doc_x)s %(_doc_row_col)s Returns ------- pmf : ndarray or scalar Probability mass function evaluated at . Notes ----- %(_doc_row_col_note)s If row and column marginals of do not match and , zero is returned. Examples -------- >>> from scipy.stats import random_table >>> import numpy as np >>> x = [[1, 5, 1], [2, 3, 1]] >>> row = np.sum(x, axis=1) >>> col = np.sum(x, axis=0) >>> random_table.pmf(x, row, col) 0.19580419580419592 Alternatively, the object may be called (as a function) to fix the row and column vector sums, returning a \"frozen\" distribution. >>> d = random_table(row, col) >>> d.pmf(x) 0.19580419580419592",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:pmf arg:self arg:x arg:row arg:col arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "dominated_nodes",
    "source_code": "def dominated_nodes(initial_queue: Iterable[torch.fx.Node], skip_filter: Optional[Callable[[Any], bool]]=None) -> OrderedSet[torch.fx.Node]:\n    initial_queue = list(initial_queue)\n    dominated_set = OrderedSet(initial_queue)\n    while initial_queue:\n        node = initial_queue.pop()\n        for user in node.users:\n            if skip_filter and skip_filter(user):\n                continue\n            if user not in dominated_set:\n                dominated_set.add(user)\n                initial_queue.append(user)\n    return dominated_set",
    "docstring": "Returns the set of nodes whose values depend on those within initial_queue",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\utils.py",
    "ast_data": "FunctionDef name:dominated_nodes arg:initial_queue arg:skip_filter arguments arg arg Assign Call Assign Call While Assign Call For If BoolOp Call If Compare Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "P",
    "source_code": "def P(self):\n    if self.data.minute == 0 and self.data.hour == 0:\n        return _('midnight')\n    if self.data.minute == 0 and self.data.hour == 12:\n        return _('noon')\n    return '%s %s' % (self.f(), self.a())",
    "docstring": "Time, in 12-hour hours, minutes and 'a.m.'/'p.m.', with minutes left off if they're zero and the strings 'midnight' and 'noon' if appropriate. Examples: '1 a.m.', '1:30 p.m.', 'midnight', 'noon', '12:30 p.m.' Proprietary extension.",
    "type": "method",
    "file_path": "django\\django\\utils\\dateformat.py",
    "ast_data": "FunctionDef name:P arg:self arguments arg If BoolOp Compare Compare Return return:yes Call If BoolOp Compare Compare Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "to_string",
    "source_code": "def to_string(self):\n    return self._as_string",
    "docstring": "Return a string representation of this . Returns: a string of the form /job:/replica:/task:/device::.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\device_spec.py",
    "ast_data": "FunctionDef name:to_string arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "cpu",
    "source_code": "def cpu(self):\n    _warn_typed_storage_removal()\n    return self._new_wrapped_storage(self._untyped_storage.cpu())",
    "docstring": "Return a CPU copy of this storage if it's not already on the CPU.",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:cpu arg:self arguments arg Call Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "unwrap",
    "source_code": "def unwrap(v):\n    if type(v) in {tuple, list}:\n        return type(v)((unwrap(vi) for vi in v))\n    return v._data if isinstance(v, TensorWrapper) else v",
    "docstring": "Unwrap nested type.",
    "type": "function",
    "file_path": "kornia\\kornia\\core\\tensor_wrapper.py",
    "ast_data": "FunctionDef name:unwrap arg:v arguments arg If Compare Call Return return:yes Call Call Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "read_lines_to_boundary",
    "source_code": "def read_lines_to_boundary(self, fp_out=None):\n    endmarker = self.boundary + b'--'\n    delim = b''\n    prev_lf = True\n    lines = []\n    seen = 0\n    while True:\n        line = self.fp.readline(1 << 16)\n        if not line:\n            raise EOFError('Illegal end of multipart body.')\n        if line.startswith(b'--') and prev_lf:\n            strippedline = line.strip()\n            if strippedline == self.boundary:\n                break\n            if strippedline == endmarker:\n                self.fp.finish()\n                break\n        line = delim + line\n        if line.endswith(b'\\r\\n'):\n            delim = b'\\r\\n'\n            line = line[:-2]\n            prev_lf = True\n        elif line.endswith(b'\\n'):\n            delim = b'\\n'\n            line = line[:-1]\n            prev_lf = True\n        else:\n            delim = b''\n            prev_lf = False\n        if fp_out is None:\n            lines.append(line)\n            seen += len(line)\n            if seen > self.maxrambytes:\n                fp_out = self.make_file()\n                for line in lines:\n                    fp_out.write(line)\n        else:\n            fp_out.write(line)\n    if fp_out is None:\n        result = b''.join(lines)\n        return result\n    else:\n        fp_out.seek(0)\n        return fp_out",
    "docstring": "Read bytes from self.fp and return or write them to a file. If the 'fp_out' argument is None (the default), all bytes read are returned in a single byte string. If the 'fp_out' argument is not None, it must be a file-like object that supports the 'write' method; all bytes read will be written to the fp, and that fp is returned.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpreqbody.py",
    "ast_data": "FunctionDef name:read_lines_to_boundary arg:self arg:fp_out arguments arg arg Assign Assign Assign Assign Assign While Assign Call If Raise Call If BoolOp Call Assign Call If Compare If Compare Call Assign If Call Assign Assign Assign If Call Assign Assign Assign Assign Assign If Compare Call Call If Compare Assign Call For Call Call If Compare Assign Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_prepare",
    "source_code": "def _prepare(f, xs_dtypes, xs_shapes):\n    if context.executing_eagerly():\n\n        def decorated_eager(*xs_data):\n            return f(*map(ops.convert_to_tensor, xs_data))\n        return decorated_eager\n    xs = [array_ops.placeholder(x_dtype, shape=x_shape) for x_dtype, x_shape in zip(xs_dtypes, xs_shapes)]\n    y = f(*xs)\n    sess = ops.get_default_session()\n\n    def decorated_graph(*xs_data):\n        xs_data = [_to_numpy(a) for a in xs_data]\n        return sess.run(y, feed_dict=dict(zip(xs, xs_data)))\n    return decorated_graph",
    "docstring": "Return a function that executes 'f'. In TF 2.x, this is the same as . In TF 1.x, returns a Python function that executes the graph defined by in a Session. Args: f: the function. xs_dtypes: dtypes of f's arguments. xs_shapes: shapes of f's arguments. Returns:",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\gradient_checker_v2.py",
    "ast_data": "FunctionDef name:_prepare arg:f arg:xs_dtypes arg:xs_shapes arguments arg arg arg If Call FunctionDef name:decorated_eager arguments arg Return return:yes Call Call Return return:yes Assign Call Call Assign Call Assign Call FunctionDef name:decorated_graph arguments arg Assign Call Return return:yes Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "assert_consumed",
    "source_code": "def assert_consumed(self):\n    pretty_printer = ObjectGraphProtoPrettyPrinter(self._checkpoint.object_graph_proto)\n    self.assert_existing_objects_matched()\n    ignore_node_ids = []\n    if self._options.experimental_skip_slot_variables:\n        for node in self._checkpoint.object_graph_proto.nodes:\n            for sv in node.slot_variables:\n                ignore_node_ids.append(sv.slot_variable_node_id)\n    for node_id, node in enumerate(self._checkpoint.object_graph_proto.nodes):\n        if not node.attributes:\n            continue\n        if node_id in ignore_node_ids:\n            continue\n        trackable = self._checkpoint.object_by_proto_id.get(node_id, None)\n        if trackable is None:\n            raise AssertionError(f'Unresolved object in checkpoint {pretty_printer.node_names[node_id]}: {node}')\n    if not self._options.experimental_skip_slot_variables and self._checkpoint.slot_restorations:\n        raise AssertionError(f'Unresolved slot restorations: {self._checkpoint.slot_restorations}')\n    if self._checkpoint.unused_attributes:\n        unused_attribute_messages = []\n        for node_id, attribute in self._checkpoint.unused_attributes.items():\n            obj = self._checkpoint.object_by_proto_id[node_id]\n            unused_attribute_messages.append(f'{pretty_printer.node_names[node_id]} ({obj}): {attribute}')\n        joined_attribute_messages = '\\n'.join(unused_attribute_messages)\n        raise AssertionError(f'Unused attributes in these objects (the attributes exist in the checkpoint but were not restored):\\n{joined_attribute_messages}')\n    return self",
    "docstring": "Asserts that all objects in the checkpoint have been created/matched. Returns: for chaining. Raises: AssertionError: If there are any Python objects in the dependency graph which have not been restored from this checkpoint or a later , or if there are any checkpointed values which have not been matched to Python objects.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:assert_consumed arg:self arguments arg Assign Call Call Assign If For For Call For Call If If Compare Assign Call If Compare Raise Call If BoolOp Raise Call If Assign For Call Assign Call Assign Call Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_close_streaming",
    "source_code": "def _close_streaming(self):\n    sys.stdout.flush()\n    sys.stderr.flush()\n    sys.stdout.close()\n    sys.stderr.close()\n    self._resources.streaming_pipe_w.close()",
    "docstring": "Close stdout, stderr and streaming pipe. We need to explicitly close them since Tensorflow may take a while to exit, so that the reading threads in the main process can exit more quickly.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_process_runner.py",
    "ast_data": "FunctionDef name:_close_streaming arg:self arguments arg Call Call Call Call Call"
  },
  {
    "library": "kornia",
    "name": "KORNIA_UNWRAP",
    "source_code": "def KORNIA_UNWRAP(maybe_obj: object, typ: Any) -> Any:\n    return cast(typ, maybe_obj)",
    "docstring": "Unwraps an optional contained value that may or not be present. Args: maybe_obj: the object to unwrap. typ: expected type after unwrap.",
    "type": "function",
    "file_path": "kornia\\kornia\\core\\check.py",
    "ast_data": "FunctionDef name:KORNIA_UNWRAP arg:maybe_obj arg:typ arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_report_filepath",
    "source_code": "def _get_report_filepath(self):\n    found, report_file_path = self.get_flag_value(FLAG_NAME_REPORT_FILE)\n    if found and report_file_path and self.use_test_undeclared_outputs_dir():\n        if os.path.isabs(report_file_path):\n            raise ValueError('If use_test_undeclared_outputs_dir is set,report_file_path cannot be an absolute path (%s)' % report_file_path)\n        outputs_dir = self._env.get(_TEST_UNDECLARED_OUTPUTS_DIR_ENV_VAR)\n        report_file_path = os.path.join(outputs_dir, report_file_path)\n    return report_file_path",
    "docstring": "Sets the path of the output report file.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer_flags.py",
    "ast_data": "FunctionDef name:_get_report_filepath arg:self arguments arg Assign Call If BoolOp Call If Call Raise Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_print_pil",
    "source_code": "def _print_pil(self, filename_or_obj, fmt, pil_kwargs, metadata=None):\n    FigureCanvasAgg.draw(self)\n    mpl.image.imsave(filename_or_obj, self.buffer_rgba(), format=fmt, origin='upper', dpi=self.figure.dpi, metadata=metadata, pil_kwargs=pil_kwargs)",
    "docstring": "Draw the canvas, then save it using (to which *pil_kwargs* and *metadata* are forwarded).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_agg.py",
    "ast_data": "FunctionDef name:_print_pil arg:self arg:filename_or_obj arg:fmt arg:pil_kwargs arg:metadata arguments arg arg arg arg arg Call Call Call"
  },
  {
    "library": "scipy",
    "name": "centroid",
    "source_code": "@lazy_cython\ndef centroid(y):\n    return linkage(y, method='centroid', metric='euclidean')",
    "docstring": "Perform centroid/UPGMC linkage. See for more information on the input matrix, return structure, and algorithm. The following are common calling conventions: 1. `linkagescipy.cluster.hierarchy.linkagescipy.cluster.hierarchy.fclusterscipy.cluster.hierarchy.dendrogram` can be used to generate a plot of the dendrogram.",
    "type": "function",
    "file_path": "scipy\\scipy\\cluster\\hierarchy.py",
    "ast_data": "FunctionDef name:centroid arg:y arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_path_to_3d_segment_with_codes",
    "source_code": "def _path_to_3d_segment_with_codes(path, zs=0, zdir='z'):\n    zs = np.broadcast_to(zs, len(path))\n    pathsegs = path.iter_segments(simplify=False, curves=False)\n    seg_codes = [((x, y, z), code) for ((x, y), code), z in zip(pathsegs, zs)]\n    if seg_codes:\n        seg, codes = zip(*seg_codes)\n        seg3d = [juggle_axes(x, y, z, zdir) for x, y, z in seg]\n    else:\n        seg3d = []\n        codes = []\n    return (seg3d, list(codes))",
    "docstring": "Convert a path to a 3D segment with path codes.",
    "type": "function",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py",
    "ast_data": "FunctionDef name:_path_to_3d_segment_with_codes arg:path arg:zs arg:zdir arguments arg arg arg Assign Call Call Assign Call Assign Call If Assign Call Assign Call Assign Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "sync",
    "source_code": "def sync():\n    pass",
    "docstring": "No-op. Used to synchronize the contents of the Python registry with C++.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\op_def_registry.py",
    "ast_data": "FunctionDef name:sync arguments"
  },
  {
    "library": "cherrypy",
    "name": "_ClassPropertyDescriptor",
    "source_code": "class _ClassPropertyDescriptor(object):\n\n    def __init__(self, fget, fset=None):\n        self.fget = fget\n        self.fset = fset\n\n    def __get__(self, obj, klass=None):\n        if klass is None:\n            klass = type(obj)\n        return self.fget.__get__(obj, klass)()",
    "docstring": "Descript for read-only class-based property. Turns a classmethod-decorated func into a read-only property of that class type (means the value cannot be set).",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\_helper.py",
    "ast_data": "ClassDef name:_ClassPropertyDescriptor FunctionDef name:__init__ arg:self arg:fget arg:fset arguments arg arg arg Assign Assign FunctionDef name:__get__ arg:self arg:obj arg:klass arguments arg arg arg If Compare Assign Call Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "spvs_fine",
    "source_code": "@torch.no_grad()\ndef spvs_fine(data: dict[str, Any], config: dict[str, Any]) -> None:\n    w_pt0_i, pt1_i = (data['spv_w_pt0_i'], data['spv_pt1_i'])\n    scale = config['LOFTR']['RESOLUTION'][1]\n    radius = config['LOFTR']['FINE_WINDOW_SIZE'] // 2\n    b_ids, i_ids, j_ids = (data['b_ids'], data['i_ids'], data['j_ids'])\n    scale = scale * data['scale1'][b_ids] if 'scale0' in data else scale\n    expec_f_gt = (w_pt0_i[b_ids, i_ids] - pt1_i[b_ids, j_ids]) / scale / radius\n    data.update({'expec_f_gt': expec_f_gt})",
    "docstring": "Perform fine supervision. Update: data (dict):{ \"expec_f_gt\": [M, 2]}",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\loftr\\utils\\supervision.py",
    "ast_data": "FunctionDef name:spvs_fine arg:data arg:config arguments arg arg Assign Assign Assign Assign Assign Compare Assign Call Call"
  },
  {
    "library": "kornia",
    "name": "_fspecial_gauss_1d",
    "source_code": "def _fspecial_gauss_1d(self, size: int, sigma: float, device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None) -> torch.Tensor:\n    coords = torch.arange(size, device=device, dtype=dtype)\n    coords -= size // 2\n    g = torch.exp(-coords ** 2 / (2 * sigma ** 2))\n    g /= g.sum()\n    return g.reshape(-1)",
    "docstring": "Create 1-D gauss kernel. Args: size: the size of gauss kernel. sigma: sigma of normal distribution. device: device to store the result on. dtype: dtype of the result. Returns: 1D kernel (size).",
    "type": "method",
    "file_path": "kornia\\kornia\\losses\\ms_ssim.py",
    "ast_data": "FunctionDef name:_fspecial_gauss_1d arg:self arg:size arg:sigma arg:device arg:dtype arguments arg arg arg arg arg Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "chromium_event_timed",
    "source_code": "@contextmanager\ndef chromium_event_timed(event_name: str, reset_event_log_on_exit: bool=False, log_pt2_compile_event: bool=False) -> Generator[Any, None, None]:\n    chromium_event_log = get_chromium_event_logger()\n    chromium_start_time = time.time_ns()\n    chromium_event_log.log_event_start(event_name, chromium_start_time, {}, log_pt2_compile_event)\n    try:\n        yield\n    finally:\n        chromium_event_log.log_event_end(event_name, time.time_ns(), {}, chromium_start_time, log_pt2_compile_event)\n        if reset_event_log_on_exit:\n            chromium_event_log.reset()",
    "docstring": "Context manager that creates a chromium start and end event. Chromium event logging is integrated with dynamo_timed, so you probably want to use that instead. Use this context manager only if you want to avoid dynamo_timed.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\utils.py",
    "ast_data": "FunctionDef name:chromium_event_timed arg:event_name arg:reset_event_log_on_exit arg:log_pt2_compile_event arguments arg arg arg Assign Call Assign Call Call Try Call Call If Call"
  },
  {
    "library": "pytorch",
    "name": "OneHotCategoricalStraightThrough",
    "source_code": "class OneHotCategoricalStraightThrough(OneHotCategorical):\n    has_rsample = True\n\n    def rsample(self, sample_shape: _size=torch.Size()) -> Tensor:\n        samples = self.sample(sample_shape)\n        probs = self._categorical.probs\n        return samples + (probs - probs.detach())",
    "docstring": "Creates a reparameterizable :class: distribution based on the straight- through gradient estimator from [1]. [1] Estimating or Propagating Gradients Through Stochastic Neurons for Conditional Computation (Bengio et al., 2013)",
    "type": "class",
    "file_path": "pytorch\\torch\\distributions\\one_hot_categorical.py",
    "ast_data": "ClassDef name:OneHotCategoricalStraightThrough Assign FunctionDef name:rsample arg:self arg:sample_shape arguments arg arg Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "cryptography",
    "name": "name",
    "source_code": "@property\n@abc.abstractmethod\ndef name(self) -> str:\n    pass",
    "docstring": "A string naming this mode (e.g. \"ECB\", \"CBC\").",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\ciphers\\modes.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "initialize_system",
    "source_code": "@tf_export(v1=['tpu.initialize_system'])\ndef initialize_system(embedding_config: Optional[embedding_pb2.TPUEmbeddingConfiguration]=None, job: Optional[Text]=None, compilation_failure_closes_chips: bool=True, tpu_cancellation_closes_chips: Optional[bool]=None) -> core_types.Tensor:\n    config_string = '' if embedding_config is None else embedding_config.SerializeToString()\n    tpu_cancellation_closes_chips_enum = 0\n    if tpu_cancellation_closes_chips is not None:\n        if tpu_cancellation_closes_chips:\n            tpu_cancellation_closes_chips_enum = 1\n        else:\n            tpu_cancellation_closes_chips_enum = 2\n    with ops.device(_tpu_system_device_name(job)):\n        topology = tpu_ops.configure_distributed_tpu(compilation_failure_closes_chips=compilation_failure_closes_chips, tpu_cancellation_closes_chips=tpu_cancellation_closes_chips_enum)\n        if embedding_config is None:\n            return topology\n        with ops.control_dependencies([topology]):\n            embedding_init = tpu_ops.configure_tpu_embedding(config=config_string)\n        with ops.control_dependencies([embedding_init]):\n            return array_ops.identity(topology, name='tpu_init_identity')",
    "docstring": "Initializes a distributed TPU system for use with TensorFlow. Args: embedding_config: If not None, a proto describing the desired configuration of the hardware embedding lookup tables. If embedding_config is None, no hardware embeddings can be used. job: The job (the XXX in TensorFlow device specification /job:XXX) that contains the TPU devices that will be initialized. If job=None it is assumed there is only one job in the TensorFlow flock, and an error will be returned if this assumption does not hold. compilation_failure_closes_chips: Set the configuration whether we want to close TPU chips when there is a compilation failure. tpu_cancellation_closes_chips: Set the configuration whether we want to close TPU chips when a TPU execution is cancelled. If the value is None, the behavior will be determined by the command line flag for the TPU worker. WARNING: this argument only applies to TFRT TPU runtime. Returns: A serialized that describes the TPU system. Note: the topology must be evaluated using before it can be used.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu.py",
    "ast_data": "FunctionDef name:initialize_system arg:embedding_config arg:job arg:compilation_failure_closes_chips arg:tpu_cancellation_closes_chips arguments arg arg arg arg Assign Compare Call Assign If Compare If Assign Assign With Call Call Assign Call If Compare Return return:yes With Call Assign Call With Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_count_nonzero",
    "source_code": "def _count_nonzero(X, axis=None, sample_weight=None, xp=None, device=None):\n    from .sparsefuncs import count_nonzero\n    xp, _ = get_namespace(X, sample_weight, xp=xp)\n    if _is_numpy_namespace(xp) and sp.issparse(X):\n        return count_nonzero(X, axis=axis, sample_weight=sample_weight)\n    assert X.ndim == 2\n    weights = xp.ones_like(X, device=device)\n    if sample_weight is not None:\n        sample_weight = xp.asarray(sample_weight, device=device)\n        sample_weight = xp.reshape(sample_weight, (sample_weight.shape[0], 1))\n        weights = xp.astype(weights, sample_weight.dtype) * sample_weight\n    zero_scalar = xp.asarray(0, device=device, dtype=weights.dtype)\n    return xp.sum(xp.where(X != 0, weights, zero_scalar), axis=axis)",
    "docstring": "A variant of for the Array API. If the array is sparse, and we are using the numpy namespace then we simply call the original function. This function only supports 2D arrays.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_array_api.py",
    "ast_data": "FunctionDef name:_count_nonzero arg:X arg:axis arg:sample_weight arg:xp arg:device arguments arg arg arg arg arg Assign Call If BoolOp Call Call Return return:yes Call Compare Assign Call If Compare Assign Call Assign Call Assign Call Assign Call Return return:yes Call Call Compare"
  },
  {
    "library": "sphinx",
    "name": "prune",
    "source_code": "def prune(self, docnames: Iterable[str]) -> None:\n    new_titles = {}\n    new_alltitles = {}\n    new_filenames = {}\n    for docname in docnames:\n        if docname in self._titles:\n            new_titles[docname] = self._titles[docname]\n            new_alltitles[docname] = self._all_titles[docname]\n            new_filenames[docname] = self._filenames[docname]\n    self._titles = new_titles\n    self._filenames = new_filenames\n    self._all_titles = new_alltitles\n    for wordnames in self._mapping.values():\n        wordnames.intersection_update(docnames)\n    for wordnames in self._title_mapping.values():\n        wordnames.intersection_update(docnames)",
    "docstring": "Remove data for all docnames not in the list.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\search\\__init__.py",
    "ast_data": "FunctionDef name:prune arg:self arg:docnames arguments arg arg Assign Assign Assign For If Compare Assign Assign Assign Assign Assign Assign For Call Call For Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_inter_op_parallelism_threads",
    "source_code": "@tf_export('config.threading.get_inter_op_parallelism_threads')\ndef get_inter_op_parallelism_threads():\n    return context.context().inter_op_parallelism_threads",
    "docstring": "Get number of threads used for parallelism between independent operations. Determines the number of threads used by independent non-blocking operations. 0 means the system picks an appropriate number. Returns: Number of parallel threads",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\config.py",
    "ast_data": "FunctionDef name:get_inter_op_parallelism_threads arguments Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_attrs_items",
    "source_code": "def _get_attrs_items(obj):\n    attrs = getattr(obj.__class__, '__attrs_attrs__')\n    attr_names = (a.name for a in attrs)\n    return [(attr_name, getattr(obj, attr_name)) for attr_name in attr_names]",
    "docstring": "Returns a list of (name, value) pairs from an attrs instance. TODO(b/268078256): check if this comment is valid, and if so, ensure it's handled in the function below. The list will be sorted by name. Args: obj: an object. Returns: A list of (attr_name, attr_value) pairs, sorted by attr_name.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\nest_util.py",
    "ast_data": "FunctionDef name:_get_attrs_items arg:obj arguments arg Assign Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "logical_or",
    "source_code": "def logical_or(a, b):\n    a_value = get_static_value(a)\n    if a_value is not None:\n        if np.isscalar(a_value):\n            if a_value:\n                return a_value\n            else:\n                return _maybe_static(b)\n        else:\n            return a_value | _maybe_static(b)\n    else:\n        return a | _maybe_static(b)",
    "docstring": "A version of tf.logical_or that eagerly evaluates if possible.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_utils.py",
    "ast_data": "FunctionDef name:logical_or arg:a arg:b arguments arg arg Assign Call If Compare If Call If Return return:yes Return return:yes Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "done",
    "source_code": "def done(self) -> bool:\n    return super().done()",
    "docstring": "Return `wait`).",
    "type": "method",
    "file_path": "pytorch\\torch\\futures\\__init__.py",
    "ast_data": "FunctionDef name:done arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "variables",
    "source_code": "@property\ndef variables(self):\n    return tuple(self._flatten(predicate=_is_variable, expand_composites=True))",
    "docstring": "Sequence of variables owned by this module and its submodules. Note: this method uses reflection to find variables on the current instance and submodules. For performance reasons you may wish to cache the result of calling this method if you don't expect the return value to change. Returns: A sequence of variables for the current module (sorted by attribute name) followed by variables from all submodules recursively (breadth first).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\module\\module.py",
    "ast_data": "FunctionDef name:variables arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_set_input_tensors",
    "source_code": "def _set_input_tensors(self, interpreter: _interpreter.Interpreter, tensor_data: Sequence[np.ndarray], initialize: bool) -> None:\n    input_details = interpreter.get_input_details()\n    if len(input_details) != len(tensor_data):\n        raise ValueError('Number of inputs provided ({}) does not match number of inputs to the model ({})'.format(len(tensor_data), len(input_details)))\n    if initialize:\n        for input_detail, tensor in zip(input_details, tensor_data):\n            interpreter.resize_tensor_input(input_detail['index'], tensor.shape)\n        interpreter.allocate_tensors()\n    for input_detail, tensor in zip(input_details, tensor_data):\n        if tensor.dtype == np.float32 and input_detail['dtype'] == np.int8:\n            quant_params = _get_quant_params(input_detail)\n            if quant_params:\n                scale, zero_point = quant_params\n                tensor = np.round(tensor / scale + zero_point).astype(np.int8)\n        interpreter.set_tensor(input_detail['index'], tensor)",
    "docstring": "Sets input tensors into TFLite model Interpreter. Args: interpreter: a tf.lite.Interpreter object with allocated tensors. tensor_data: a list of Numpy array data. initialize: set to true when input is first set for the interpreter, to set input shapes and allocate tensors. Raises: ValueError: when inputs can't be set, or size of provided inputs does not match size of model inputs.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\tools\\optimize\\debugging\\python\\debugger.py",
    "ast_data": "FunctionDef name:_set_input_tensors arg:self arg:interpreter arg:tensor_data arg:initialize arguments arg arg arg arg Assign Call If Compare Call Call Raise Call Call Call Call If For Call Call Call For Call If BoolOp Compare Compare Assign Call If Assign Assign Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X):\n    check_is_fitted(self, 'estimator_')\n    pos_label = self._curve_scorer._get_pos_label()\n    y_score, _ = _get_response_values_binary(self.estimator_, X, self._get_response_method(), pos_label=pos_label)\n    return _threshold_scores_to_class_labels(y_score, self.best_threshold_, self.classes_, pos_label)",
    "docstring": "Predict the target of new samples. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The samples, as accepted by . Returns ------- class_labels : ndarray of shape (n_samples,) The predicted class.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_classification_threshold.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_get_device",
    "source_code": "def _get_device(device: Union[int, str, torch.device]) -> torch.device:\n    if isinstance(device, str):\n        device = torch.device(device)\n    elif isinstance(device, int):\n        device = torch.device('xpu', device)\n    return device",
    "docstring": "Return the torch.device type object from the passed in device. Args: device (torch.device or int or str): selected device.",
    "type": "function",
    "file_path": "pytorch\\torch\\xpu\\__init__.py",
    "ast_data": "FunctionDef name:_get_device arg:device arguments arg If Call Assign Call If Call Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "vander",
    "source_code": "def vander(x, n=None):\n    _vander = np.vander(x, n)\n    m = getmask(x)\n    if m is not nomask:\n        _vander[m] = 0\n    return _vander",
    "docstring": "Masked values in the input array result in rows of zeros.",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\extras.py",
    "ast_data": "FunctionDef name:vander arg:x arg:n arguments arg arg Assign Call Assign Call If Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "IdentityReader",
    "source_code": "@tf_export(v1=['IdentityReader'])\nclass IdentityReader(ReaderBase):\n\n    @deprecation.deprecated(None, 'Queue-based input pipelines have been replaced by `tf.data`. Use `tf.data.Dataset.map(...)`.')\n    def __init__(self, name=None):\n        rr = gen_io_ops.identity_reader_v2(name=name)\n        super(IdentityReader, self).__init__(rr, supports_serialize=True)",
    "docstring": "A Reader that outputs the queued work as both the key and value. To use, enqueue strings in a Queue. Read will take the front work string and output (work, work). See ReaderBase for supported methods. @compatibility(eager) Readers are not compatible with eager execution. Instead, please use to get data into your model. @end_compatibility",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\io_ops.py",
    "ast_data": "ClassDef name:IdentityReader FunctionDef name:__init__ arg:self arg:name arguments arg arg Assign Call Call Call Call Call"
  },
  {
    "library": "django",
    "name": "get_default_redirect_url",
    "source_code": "def get_default_redirect_url(self):\n    if self.next_page:\n        return resolve_url(self.next_page)\n    elif settings.LOGOUT_REDIRECT_URL:\n        return resolve_url(settings.LOGOUT_REDIRECT_URL)\n    else:\n        return self.request.path",
    "docstring": "Return the default redirect URL.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\views.py",
    "ast_data": "FunctionDef name:get_default_redirect_url arg:self arguments arg If Return return:yes Call If Return return:yes Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "arglist_to_dict",
    "source_code": "def arglist_to_dict(arglist: list[str]) -> dict[str, str]:\n    return dict((x.split('=', 1) for x in arglist))",
    "docstring": "Convert a list of arguments like ['arg1=val1', 'arg2=val2', ...] to a dict",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\conf.py",
    "ast_data": "FunctionDef name:arglist_to_dict arg:arglist arguments arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "get_current",
    "source_code": "def get_current(self, request=None):\n    from django.conf import settings\n    if getattr(settings, 'SITE_ID', ''):\n        site_id = settings.SITE_ID\n        return self._get_site_by_id(site_id)\n    elif request:\n        return self._get_site_by_request(request)\n    raise ImproperlyConfigured('You\\'re using the Django \"sites framework\" without having set the SITE_ID setting. Create a site in your database and set the SITE_ID setting or pass a request to Site.objects.get_current() to fix this error.')",
    "docstring": "Return the current Site based on the SITE_ID in the project's settings. If SITE_ID isn't defined, return the site with domain matching request.get_host(). The `` object is cached the first time it's retrieved from the database.",
    "type": "method",
    "file_path": "django\\django\\contrib\\sites\\models.py",
    "ast_data": "FunctionDef name:get_current arg:self arg:request arguments arg arg If Call Assign Return return:yes Call If Return return:yes Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "function_call_options",
    "source_code": "@function_call_options.setter\ndef function_call_options(self, options):\n    self._thread_local_data.function_call_options = options",
    "docstring": "Returns function call options for current thread.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:function_call_options arg:self arg:options arguments arg arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "_with_precomputed_row_lengths",
    "source_code": "def _with_precomputed_row_lengths(self):\n    return RowPartition(row_splits=self._row_splits, row_lengths=self.row_lengths(), value_rowids=self._value_rowids, nrows=self._nrows, nvals=self._nvals, uniform_row_length=self._uniform_row_length, internal=_row_partition_factory_key)",
    "docstring": "Returns a copy of with precomputed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py",
    "ast_data": "FunctionDef name:_with_precomputed_row_lengths arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_reindex_multi",
    "source_code": "def _reindex_multi(self, axes: dict[str, Index], fill_value) -> DataFrame:\n    new_index, row_indexer = self.index.reindex(axes['index'])\n    new_columns, col_indexer = self.columns.reindex(axes['columns'])\n    if row_indexer is not None and col_indexer is not None:\n        indexer = (row_indexer, col_indexer)\n        new_values = take_2d_multi(self.values, indexer, fill_value=fill_value)\n        return self._constructor(new_values, index=new_index, columns=new_columns, copy=False)\n    else:\n        return self._reindex_with_indexers({0: [new_index, row_indexer], 1: [new_columns, col_indexer]}, fill_value=fill_value)",
    "docstring": "We are guaranteed non-Nones in the axes.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:_reindex_multi arg:self arg:axes arg:fill_value arguments arg arg arg Assign Call Assign Call If BoolOp Compare Compare Assign Assign Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "conjugate",
    "source_code": "def conjugate(self, copy=True):\n    if np.issubdtype(self.dtype, np.complexfloating):\n        return self.tocsr(copy=copy).conjugate(copy=False)\n    elif copy:\n        return self.copy()\n    else:\n        return self",
    "docstring": "Element-wise complex conjugation. If the array/matrix is of non-complex data type and is False, this method does nothing and the data is not copied. Parameters ---------- copy : bool, optional If True, the result is guaranteed to not share data with self. Returns ------- A : The element-wise complex conjugate.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_base.py",
    "ast_data": "FunctionDef name:conjugate arg:self arg:copy arguments arg arg If Call Return return:yes Call Call If Return return:yes Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "getargs",
    "source_code": "def getargs(co):\n    if not iscode(co):\n        raise TypeError('arg is not a code object')\n    nargs = co.co_argcount\n    names = co.co_varnames\n    args = list(names[:nargs])\n    for i in range(nargs):\n        if args[i][:1] in ['', '.']:\n            raise TypeError('tuple function arguments are not supported')\n    varargs = None\n    if co.co_flags & CO_VARARGS:\n        varargs = co.co_varnames[nargs]\n        nargs = nargs + 1\n    varkw = None\n    if co.co_flags & CO_VARKEYWORDS:\n        varkw = co.co_varnames[nargs]\n    return (args, varargs, varkw)",
    "docstring": "Get information about the arguments accepted by a code object. Three things are returned: (args, varargs, varkw), where 'args' is a list of argument names (possibly containing nested lists), and 'varargs' and 'varkw' are the names of the * and ** arguments or None.",
    "type": "function",
    "file_path": "numpy\\numpy\\_utils\\_inspect.py",
    "ast_data": "FunctionDef name:getargs arg:co arguments arg If Call Raise Call Assign Assign Assign Call For Call If Compare Raise Call Assign If Assign Assign Assign If Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_add_trackable",
    "source_code": "@doc_controls.for_subclass_implementers\ndef _add_trackable(self, trackable_object, trainable):\n    if isinstance(trackable_object, base_layer_utils.TrackableWeightHandler):\n        handler = trackable_object\n    else:\n        handler = base_layer_utils.TrackableWeightHandler(trackable_object)\n    if trainable:\n        self._trainable_weights.append(handler)\n    else:\n        self._non_trainable_weights.append(handler)\n    return handler",
    "docstring": "Adds a Trackable object to this layer's state. Args: trackable_object: The tf.tracking.Trackable object to add. trainable: Boolean, whether the variable should be part of the layer's \"trainable_variables\" (e.g. variables, biases) or \"non_trainable_variables\" (e.g. BatchNorm mean and variance). Returns: The TrackableWeightHandler used to track this object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:_add_trackable arg:self arg:trackable_object arg:trainable arguments arg arg arg If Call Assign Assign Call If Call Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "apply_translated_message",
    "source_code": "def apply_translated_message(self, original_message: str, translated_message: str) -> None:\n    raise NotImplementedError",
    "docstring": "Apply translated message.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\addnodes.py",
    "ast_data": "FunctionDef name:apply_translated_message arg:self arg:original_message arg:translated_message arguments arg arg arg Raise"
  },
  {
    "library": "kornia",
    "name": "get_grid_dict",
    "source_code": "def get_grid_dict(patch_size: int=32) -> Dict[str, Tensor]:\n    kgrid = create_meshgrid(height=patch_size, width=patch_size, normalized_coordinates=True)\n    x = kgrid[0, :, :, 0]\n    y = kgrid[0, :, :, 1]\n    rho, phi = cart2pol(x, y)\n    grid_dict = {'x': x, 'y': y, 'rho': rho, 'phi': phi}\n    return grid_dict",
    "docstring": "Get cartesian and polar parametrizations of grid.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\mkd.py",
    "ast_data": "FunctionDef name:get_grid_dict arg:patch_size arguments arg Assign Call Assign Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__call__",
    "source_code": "def __call__(self, x, pos=None):\n    raise NotImplementedError('Derived must override')",
    "docstring": "Return the format for tick value *x* at position pos. `` indicates an unspecified location.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:x arg:pos arguments arg arg arg Raise Call"
  },
  {
    "library": "pandas",
    "name": "find_titles",
    "source_code": "def find_titles(rst_file: str) -> Iterable[tuple[str, int]]:\n    with open(rst_file, encoding='utf-8') as fd:\n        previous_line = ''\n        for i, line in enumerate(fd):\n            line_no_last_elem = line[:-1]\n            line_chars = set(line_no_last_elem)\n            if len(line_chars) == 1 and line_chars.pop() in symbols and (len(line_no_last_elem) == len(previous_line)):\n                yield (re.sub('[`\\\\*_]', '', previous_line), i)\n            previous_line = line_no_last_elem",
    "docstring": "Algorithm to identify particular text that should be considered headings in an RST file. See for details on what constitutes a string as a heading in RST. Parameters ---------- rst_file : str RST file to scan through for headings. Yields ------- title : str A heading found in the rst file. line_number : int The corresponding line number of the heading.",
    "type": "function",
    "file_path": "pandas\\scripts\\validate_rst_title_capitalization.py",
    "ast_data": "FunctionDef name:find_titles arg:rst_file arguments arg With Call Assign For Call Assign Assign Call If BoolOp Compare Call Compare Call Compare Call Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "get_cmake_cache_variables_from_file",
    "source_code": "def get_cmake_cache_variables_from_file(cmake_cache_file: IO[str]) -> dict[str, CMakeValue]:\n    results = {}\n    for i, line in enumerate(cmake_cache_file, 1):\n        line = line.strip()\n        if not line or line.startswith(('#', '//')):\n            continue\n        matched = re.match('(\"?)(.+?)\\\\1(?::\\\\s*([a-zA-Z_-][a-zA-Z0-9_-]*)?)?\\\\s*=\\\\s*(.*)', line)\n        if matched is None:\n            raise ValueError(f'Unexpected line {i} in {repr(cmake_cache_file)}: {line}')\n        _, variable, type_, value = matched.groups()\n        if type_ is None:\n            type_ = ''\n        if type_.upper() in ('INTERNAL', 'STATIC'):\n            continue\n        results[variable] = convert_cmake_value_to_python_value(value, type_)\n    return results",
    "docstring": "Gets values in CMakeCache.txt into a dictionary. Args: cmake_cache_file: A CMakeCache.txt file object. Returns: dict: A `` containing the value of cached CMake variables.",
    "type": "function",
    "file_path": "pytorch\\tools\\setup_helpers\\cmake_utils.py",
    "ast_data": "FunctionDef name:get_cmake_cache_variables_from_file arg:cmake_cache_file arguments arg Assign For Call Assign Call If BoolOp Call Assign Call If Compare Raise Call Call Assign Call If Compare Assign If Compare Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "copy_from",
    "source_code": "def copy_from(self, other):\n    assert not self.is_final\n    if self.parent is not None:\n        assert other.parent is not None\n        self.parent.copy_from(other.parent)\n    self.isolated_names = copy.copy(other.isolated_names)\n    self.modified = copy.copy(other.modified)\n    self.read = copy.copy(other.read)\n    self.deleted = copy.copy(other.deleted)\n    self.bound = copy.copy(other.bound)\n    self.annotations = copy.copy(other.annotations)\n    self.params = copy.copy(other.params)",
    "docstring": "Recursively copies the contents of this scope from another scope.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\static_analysis\\activity.py",
    "ast_data": "FunctionDef name:copy_from arg:self arg:other arguments arg arg If Compare Compare Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "def fit(self, X, y=None):\n    self._reset()\n    return self.partial_fit(X, y)",
    "docstring": "Compute the minimum and maximum to be used for later scaling. Parameters ---------- X : array-like of shape (n_samples, n_features) The data used to compute the per-feature minimum and maximum used for later scaling along the features axis. y : None Ignored. Returns ------- self : object Fitted scaler.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "get_coeffs",
    "source_code": "def get_coeffs(self):\n    data = self._data\n    k, n = (data[5], data[7])\n    return data[9][:n - k - 1]",
    "docstring": "Return spline coefficients.",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_fitpack2.py",
    "ast_data": "FunctionDef name:get_coeffs arg:self arguments arg Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "memory_allocated",
    "source_code": "def memory_allocated(device: _device_t=None) -> int:\n    return memory_stats(device=device).get('allocated_bytes.all.current', 0)",
    "docstring": "Return the current GPU memory occupied by tensors in bytes for a given device. Args: device (torch.device or int or str, optional): selected device. Returns statistic for the current device, given by :func:, if :attr: is `xpu-smi` since some unused memory can be held by the caching allocator and some context needs to be created on GPU.",
    "type": "function",
    "file_path": "pytorch\\torch\\xpu\\memory.py",
    "ast_data": "FunctionDef name:memory_allocated arg:device arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "stack",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef stack(x, axis=0):\n    return array_ops_stack.stack(x, axis=axis)",
    "docstring": "Stacks a list of rank tensors into a rank tensor. Args: x: List of tensors. axis: Axis along which to perform stacking. Returns: A tensor. Example: >>> a = tf.constant([[1, 2],[3, 4]]) >>> b = tf.constant([[10, 20],[30, 40]]) >>> tf.keras.backend.stack((a, b))",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:stack arg:x arg:axis arguments arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "where",
    "source_code": "def where(cond, left_op, right_op, use_numexpr: bool=True):\n    assert _where is not None\n    if use_numexpr:\n        return _where(cond, left_op, right_op)\n    else:\n        return _where_standard(cond, left_op, right_op)",
    "docstring": "Evaluate the where condition cond on left_op and right_op. Parameters ---------- cond : np.ndarray[bool] left_op : return if cond is True right_op : return if cond is False use_numexpr : bool, default True Whether to try to use numexpr.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\computation\\expressions.py",
    "ast_data": "FunctionDef name:where arg:cond arg:left_op arg:right_op arg:use_numexpr arguments arg arg arg arg Compare If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "NestedFixedRule",
    "source_code": "class NestedFixedRule(FixedRule):\n\n    def __init__(self, higher, lower):\n        self.higher = higher\n        self.lower = lower\n        self.xp = None\n\n    @property\n    def nodes_and_weights(self):\n        if self.higher is not None:\n            return self.higher.nodes_and_weights\n        else:\n            raise NotImplementedError\n\n    @property\n    def lower_nodes_and_weights(self):\n        if self.lower is not None:\n            return self.lower.nodes_and_weights\n        else:\n            raise NotImplementedError\n\n    def estimate_error(self, f, a, b, args=()):\n        nodes, weights = self.nodes_and_weights\n        lower_nodes, lower_weights = self.lower_nodes_and_weights\n        if self.xp is None:\n            self.xp = array_namespace(nodes)\n        error_nodes = self.xp.concat([nodes, lower_nodes], axis=0)\n        error_weights = self.xp.concat([weights, -lower_weights], axis=0)\n        return self.xp.abs(_apply_fixed_rule(f, a, b, error_nodes, error_weights, args, self.xp))",
    "docstring": "A cubature rule with error estimate given by the difference between two underlying fixed rules. If constructed as ``, this will use:: estimate(f, a, b) := higher.estimate(f, a, b) estimate_error(f, a, b) := \\|higher.estimate(f, a, b) - lower.estimate(f, a, b)| (where the absolute value is taken elementwise). Attributes ---------- higher : Rule Higher accuracy rule. lower : Rule Lower accuracy rule. See Also -------- GaussKronrodQuadrature Examples -------- >>> from scipy.integrate import cubature >>> from scipy.integrate._rules import ( ... GaussLegendreQuadrature, NestedFixedRule, ProductNestedFixed ... ) >>> higher = GaussLegendreQuadrature(10) >>> lower = GaussLegendreQuadrature(5) >>> rule = NestedFixedRule( ... higher, ... lower ... ) >>> rule_2d = ProductNestedFixed([rule, rule])",
    "type": "class",
    "file_path": "scipy\\scipy\\integrate\\_rules\\_base.py",
    "ast_data": "ClassDef name:NestedFixedRule FunctionDef name:__init__ arg:self arg:higher arg:lower arguments arg arg arg Assign Assign Assign FunctionDef name:nodes_and_weights arg:self arguments arg If Compare Return return:yes Raise FunctionDef name:lower_nodes_and_weights arg:self arguments arg If Compare Return return:yes Raise FunctionDef name:estimate_error arg:self arg:f arg:a arg:b arg:args arguments arg arg arg arg arg Assign Assign If Compare Assign Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "send_messages",
    "source_code": "def send_messages(self, email_messages):\n    if not email_messages:\n        return 0\n    with self._lock:\n        new_conn_created = self.open()\n        if not self.connection or new_conn_created is None:\n            return 0\n        num_sent = 0\n        try:\n            for message in email_messages:\n                sent = self._send(message)\n                if sent:\n                    num_sent += 1\n        finally:\n            if new_conn_created:\n                self.close()\n    return num_sent",
    "docstring": "Send one or more EmailMessage objects and return the number of email messages sent.",
    "type": "method",
    "file_path": "django\\django\\core\\mail\\backends\\smtp.py",
    "ast_data": "FunctionDef name:send_messages arg:self arg:email_messages arguments arg arg If Return return:yes With Assign Call If BoolOp Compare Return return:yes Assign Try For Assign Call If If Call Return return:yes"
  },
  {
    "library": "virtualenv",
    "name": "Activator",
    "source_code": "class Activator(ABC):\n\n    def __init__(self, options) -> None:\n        self.flag_prompt = os.path.basename(os.getcwd()) if options.prompt == '.' else options.prompt\n\n    @classmethod\n    def supports(cls, interpreter):\n        return True\n\n    @classmethod\n    def add_parser_arguments(cls, parser, interpreter):\n        pass\n\n    @abstractmethod\n    def generate(self, creator):\n        raise NotImplementedError",
    "docstring": "Generates activate script for the virtual environment.",
    "type": "class",
    "file_path": "virtualenv\\src\\virtualenv\\activation\\activator.py",
    "ast_data": "ClassDef name:Activator FunctionDef name:__init__ arg:self arg:options arguments arg arg Assign Compare Call Call FunctionDef name:supports arg:cls arg:interpreter arguments arg arg Return return:yes FunctionDef name:add_parser_arguments arg:cls arg:parser arg:interpreter arguments arg arg arg FunctionDef name:generate arg:self arg:creator arguments arg arg Raise"
  },
  {
    "library": "pytorch",
    "name": "get_safe_globals",
    "source_code": "def get_safe_globals() -> list[Union[Callable, tuple[Callable, str]]]:\n    return _weights_only_unpickler._get_safe_globals()",
    "docstring": "Returns the list of user-added globals that are safe for `` load.",
    "type": "function",
    "file_path": "pytorch\\torch\\serialization.py",
    "ast_data": "FunctionDef name:get_safe_globals arguments Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_co_vertices",
    "source_code": "def get_co_vertices(self):\n    if self.width < self.height:\n        ret = self.get_patch_transform().transform([(1, 0), (-1, 0)])\n    else:\n        ret = self.get_patch_transform().transform([(0, 1), (0, -1)])\n    return [tuple(x) for x in ret]",
    "docstring": "Return the co-vertices coordinates of the ellipse. The definition can be found _ .. versionadded:: 3.8",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_co_vertices arg:self arguments arg If Compare Assign Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, sv, sess):\n    super(SVTimerCheckpointThread, self).__init__(sv.coord, sv.save_model_secs)\n    self._sv = sv\n    self._sess = sess",
    "docstring": "Create a . Args: sv: A . sess: A .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:sv arg:sess arguments arg arg arg Call Call Assign Assign"
  },
  {
    "library": "scipy",
    "name": "notes",
    "source_code": "@click.command()\n@click.argument('version_args', nargs=2)\n@click.pass_context\ndef notes(ctx_obj, version_args):\n    if version_args:\n        sys.argv = version_args\n        log_start = sys.argv[0]\n        log_end = sys.argv[1]\n    cmd = ['python', 'tools/write_release_and_log.py', f'{log_start}', f'{log_end}']\n    click.secho(' '.join(cmd), bold=True, fg='bright_blue')\n    util.run(cmd)",
    "docstring": "Release notes and log generation. Example: spin notes v1.7.0 v1.8.0",
    "type": "function",
    "file_path": "scipy\\.spin\\cmds.py",
    "ast_data": "FunctionDef name:notes arg:ctx_obj arg:version_args arguments arg arg If Assign Assign Assign Assign Call Call Call Call Call"
  },
  {
    "library": "django",
    "name": "z",
    "source_code": "@property\ndef z(self):\n    if self.is_3d:\n        return self._listarr(capi.getz)",
    "docstring": "Return the Z coordinates in a list.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:z arg:self arguments arg If Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "smart_case",
    "source_code": "def smart_case(pred_fn_pairs, default=None, exclusive=False, name='smart_case'):\n    return control_flow_case._case_helper(smart_cond, pred_fn_pairs, default, exclusive, name, allow_python_preds=True)",
    "docstring": "Like tf.case, except attempts to statically evaluate predicates. If any predicate in is a bool or has a constant value, the associated callable will be called or omitted depending on its value. Otherwise this functions like tf.case. Args: pred_fn_pairs: Dict or list of pairs of a boolean scalar tensor and a callable which returns a list of tensors. default: Optional callable that returns a list of tensors. exclusive: True iff at most one predicate is allowed to evaluate to . name: A name for this operation (optional). Returns: The tensors returned by the first pair whose predicate evaluated to True, or those returned by if none does. Raises: TypeError: If is not a list/dictionary. TypeError: If is a list but does not contain 2-tuples. TypeError: If is not callable for any i, or is not callable.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\smart_cond.py",
    "ast_data": "FunctionDef name:smart_case arg:pred_fn_pairs arg:default arg:exclusive arg:name arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "mark_as",
    "source_code": "def mark_as(dispatch_type):\n    return functools.partial(Dispatchable, dispatch_type=dispatch_type)",
    "docstring": "Creates a utility function to mark something as a specific type. Examples -------- >>> mark_int = mark_as(int) >>> mark_int(1) , value=1>",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_uarray\\_backend.py",
    "ast_data": "FunctionDef name:mark_as arg:dispatch_type arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_causal_attention_mask",
    "source_code": "def _causal_attention_mask(g: jit_utils.GraphContext, query: torch._C.Value, key: torch._C.Value) -> torch._C.Value:\n    query_shape = g.op('Shape', query)\n    key_shape = g.op('Shape', key)\n    last_idx = g.op('Constant', value_t=torch.tensor([-1], dtype=torch.int64))\n    second_last_idx = g.op('Constant', value_t=torch.tensor([-2], dtype=torch.int64))\n    target_length = g.op('Slice', query_shape, second_last_idx, last_idx)\n    source_length = g.op('Slice', key_shape, second_last_idx, last_idx)\n    size = g.op('Concat', target_length, source_length, axis_i=0)\n    const_one = g.op('Constant', value_t=torch.tensor([1.0]))\n    attn_mask = g.op('Expand', const_one, size)\n    attn_mask = g.op('Trilu', attn_mask, upper_i=0)\n    const_zero = g.op('Constant', value_t=torch.tensor([0.0]))\n    const_neg_inf = g.op('Constant', value_t=torch.tensor([-float('inf')]))\n    attn_mask = g.op('Where', g.op('Equal', attn_mask, const_zero), const_neg_inf, const_zero)\n    return attn_mask",
    "docstring": "Create a causal mask for the given query and key tensors. Equivalent to:: mask = torch.ones(L, S, dtype=torch.bool).tril(diagonal=0) attn_mask = torch.zeros(L, S, dtype=torch.float) attn_mask = attn_mask.masked_fill(not mask, -float(\"inf\")) Args: query: Tensor of shape [..., L, E] key: Tensor of shape [..., S, E] Returns: Tensor of shape [L, S]",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\symbolic_opset14.py",
    "ast_data": "FunctionDef name:_causal_attention_mask arg:g arg:query arg:key arguments arg arg arg Assign Call Assign Call Assign Call Call Assign Call Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Call Assign Call Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "graph_parents",
    "source_code": "@property\n@deprecation.deprecated(None, 'Do not call `graph_parents`.')\ndef graph_parents(self):\n    return self._graph_parents",
    "docstring": "List of graph dependencies of this .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "FunctionDef name:graph_parents arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "validate_userinfo_encrypted_response_alg",
    "source_code": "def validate_userinfo_encrypted_response_alg(self):\n    self._validate_claim_value('userinfo_encrypted_response_alg')",
    "docstring": "JWE [JWE] alg algorithm [JWA] REQUIRED for encrypting UserInfo Responses. If both signing and encryption are requested, the response will be signed then encrypted, with the result being a Nested JWT, as defined in [JWT]. The default, if omitted, is that no encryption is performed.",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\registration\\claims.py",
    "ast_data": "FunctionDef name:validate_userinfo_encrypted_response_alg arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_EnterGrad",
    "source_code": "@ops.RegisterGradient('Enter')\ndef _EnterGrad(op, grad):\n    graph = ops.get_default_graph()\n    grad_ctxt = graph._get_control_flow_context()\n    if grad_ctxt is None:\n        return grad\n    if not grad_ctxt.back_prop:\n        return grad\n    if grad_ctxt.grad_state is None:\n        return grad\n    if op.get_attr('is_constant'):\n        if isinstance(grad, tensor.Tensor):\n            result = grad_ctxt.AddBackpropAccumulator(op, grad)\n        elif isinstance(grad, indexed_slices.IndexedSlices):\n            result = grad_ctxt.AddBackpropIndexedSlicesAccumulator(op, grad)\n        else:\n            raise TypeError(f'Type {type(grad)} not supported,must be Tensor or Indexed Slices')\n    else:\n        result = exit(grad)\n        grad_ctxt.loop_exits.append(result)\n        grad_ctxt.ExitResult([result])\n    return result",
    "docstring": "Gradients for an Enter are calculated using an Exit op. For loop variables, grad is the gradient so just add an exit. For loop invariants, we need to add an accumulator loop.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_grad.py",
    "ast_data": "FunctionDef name:_EnterGrad arg:op arg:grad arguments arg arg Assign Call Assign Call If Compare Return return:yes If Return return:yes If Compare Return return:yes If Call If Call Assign Call If Call Assign Call Raise Call Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "cryptography",
    "name": "put_mpint",
    "source_code": "def put_mpint(self, val: int) -> None:\n    self.put_sshstr(_to_mpint(val))",
    "docstring": "Big-endian bigint prefixed with u32 length",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\serialization\\ssh.py",
    "ast_data": "FunctionDef name:put_mpint arg:self arg:val arguments arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "kaiser_window",
    "source_code": "@tf_export('signal.kaiser_window')\n@dispatch.add_dispatch_support\ndef kaiser_window(window_length, beta=12.0, dtype=dtypes.float32, name=None):\n    with ops.name_scope(name, 'kaiser_window'):\n        window_length = _check_params(window_length, dtype)\n        window_length_const = tensor_util.constant_value(window_length)\n        if window_length_const == 1:\n            return array_ops.ones([1], dtype=dtype)\n        halflen_float = (math_ops.cast(window_length, dtype=dtypes.float32) - 1.0) / 2.0\n        arg = math_ops.range(-halflen_float, halflen_float + 0.1, dtype=dtypes.float32)\n        arg = math_ops.cast(arg, dtype=dtype)\n        beta = math_ops.cast(beta, dtype=dtype)\n        one = math_ops.cast(1.0, dtype=dtype)\n        halflen_float = math_ops.cast(halflen_float, dtype=dtype)\n        num = beta * math_ops.sqrt(nn_ops.relu(one - math_ops.square(arg / halflen_float)))\n        window = math_ops.exp(num - beta) * (special_math_ops.bessel_i0e(num) / special_math_ops.bessel_i0e(beta))\n    return window",
    "docstring": "Generate a [Kaiser window][kaiser]. Args: window_length: A scalar indicating the window length to generate. beta: Beta parameter for Kaiser window, see reference below. dtype: The data type to produce. Must be a floating point type. name: An optional name for the operation. Returns: A of shape of type . [kaiser]:",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\signal\\window_ops.py",
    "ast_data": "FunctionDef name:kaiser_window arg:window_length arg:beta arg:dtype arg:name arguments arg arg arg arg With Call Assign Call Assign Call If Compare Return return:yes Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "bound_method_wrapper",
    "source_code": "def bound_method_wrapper(*args, **kwargs):\n    strong_bound_method_wrapper = weak_bound_method_wrapper()\n    wrapped_fn = strong_bound_method_wrapper.__wrapped__\n    if wrapped_fn is strong_bound_method_wrapper.__original_wrapped__:\n        wrapped_fn = original_function.python_function\n        return wrapped_fn(weak_instance(), *args, **kwargs)\n    return wrapped_fn(*args, **kwargs)",
    "docstring": "Wraps either a dummy MethodType or a converted AutoGraph function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\polymorphic_function.py",
    "ast_data": "FunctionDef name:bound_method_wrapper arguments arg arg Assign Call Assign If Compare Assign Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "post_localSGD_hook",
    "source_code": "def post_localSGD_hook(state: PostLocalSGDState, bucket: dist.GradBucket) -> torch.futures.Future[torch.Tensor]:\n    global_group_to_use = state.process_group if state.process_group is not None else dist.group.WORLD\n    input_tensor = bucket.buffer()\n    if state.iter < state.start_localSGD_iter:\n        state.maybe_increase_iter(bucket)\n        return default._allreduce_fut(global_group_to_use, input_tensor)\n    if not state.post_local_gradient_allreduce:\n        fut: torch.futures.Future[torch.Tensor] = torch.futures.Future()\n        fut.set_result(input_tensor)\n        return fut\n    if state.subgroup is None:\n        state.subgroup, _ = dist.new_subgroups()\n    return default._allreduce_fut(state.subgroup, input_tensor)",
    "docstring": "Run post-localSGD algorithm. This DDP communication hook is used for running post-localSGD algorithm, by combining with a model averaging component (e.g., :class:) that runs after the optimizer step. Args: state (PostLocalSGDState): State information to run post-localSGD. Users mainly need to tune `` module.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\ddp_comm_hooks\\post_localSGD_hook.py",
    "ast_data": "FunctionDef name:post_localSGD_hook arg:state arg:bucket arguments arg arg Assign Compare Assign Call If Compare Call Return return:yes Call If Call Call Return return:yes If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "clear",
    "source_code": "def clear(self, event):\n    if self.ignore(event) or self.canvas.is_saving():\n        return\n    if self.useblit:\n        self.background = self.canvas.copy_from_bbox(self.ax.bbox)",
    "docstring": "Internal event handler to clear the cursor.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:clear arg:self arg:event arguments arg arg If BoolOp Call Call Return return:no If Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_assert_same_base_type",
    "source_code": "def _assert_same_base_type(items, expected_type=None):\n    original_expected_type = expected_type\n    mismatch = False\n    for item in items:\n        if item is not None:\n            item_type = item.dtype.base_dtype\n            if not expected_type:\n                expected_type = item_type\n            elif expected_type != item_type:\n                mismatch = True\n                break\n    if mismatch:\n        expected_type = original_expected_type\n        original_item_str = None\n        for item in items:\n            if item is not None:\n                item_type = item.dtype.base_dtype\n                if not expected_type:\n                    expected_type = item_type\n                    original_item_str = item.name if hasattr(item, 'name') else str(item)\n                elif expected_type != item_type:\n                    raise ValueError('%s, type=%s, must be of the same type (%s)%s.' % (item.name if hasattr(item, 'name') else str(item), item_type, expected_type, ' as %s' % original_item_str if original_item_str else ''))\n        return expected_type\n    else:\n        return expected_type",
    "docstring": "Asserts all items are of the same base type. Args: items: List of graph items (e.g., , , , , or ). Can include elements, which will be ignored. expected_type: Expected type. If not specified, assert all items are of the same base type. Returns: Validated type, or none if neither expected_type nor items provided. Raises: ValueError: If any types do not match.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\check_ops.py",
    "ast_data": "FunctionDef name:_assert_same_base_type arg:items arg:expected_type arguments arg arg Assign Assign For If Compare Assign If Assign If Compare Assign If Assign Assign For If Compare Assign If Assign Assign Call Call If Compare Raise Call Call Call Return return:yes Return return:yes"
  },
  {
    "library": "pygame",
    "name": "encode",
    "source_code": "def encode(pos, b_box):\n    return (pos[0] < b_box.left) * LEFT_EDGE + (pos[0] > b_box.right) * RIGHT_EDGE + (pos[1] < b_box.top) * TOP_EDGE + (pos[1] > b_box.bottom) * BOTTOM_EDGE",
    "docstring": "returns a code that defines position with respect to a bounding box",
    "type": "function",
    "file_path": "pygame\\src_py\\draw_py.py",
    "ast_data": "FunctionDef name:encode arg:pos arg:b_box arguments arg arg Return return:yes Compare Compare Compare Compare"
  },
  {
    "library": "tensorflow",
    "name": "create_pseudo_input_names",
    "source_code": "def create_pseudo_input_names(inputs):\n    return _create_pseudo_names(inputs, prefix='input_')",
    "docstring": "Create pseudo input names for a subclassed Model.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\compile_utils.py",
    "ast_data": "FunctionDef name:create_pseudo_input_names arg:inputs arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "clone",
    "source_code": "def clone(self):\n    obj = Empty()\n    obj.__class__ = self.__class__\n    obj.__dict__ = self.__dict__.copy()\n    obj.alias_refcount = self.alias_refcount.copy()\n    obj.alias_map = self.alias_map.copy()\n    obj.external_aliases = self.external_aliases.copy()\n    obj.table_map = self.table_map.copy()\n    obj.where = self.where.clone()\n    obj.annotations = self.annotations.copy()\n    if self.annotation_select_mask is not None:\n        obj.annotation_select_mask = self.annotation_select_mask.copy()\n    if self.combined_queries:\n        obj.combined_queries = tuple([query.clone() for query in self.combined_queries])\n    obj._annotation_select_cache = None\n    obj.extra = self.extra.copy()\n    if self.extra_select_mask is not None:\n        obj.extra_select_mask = self.extra_select_mask.copy()\n    if self._extra_select_cache is not None:\n        obj._extra_select_cache = self._extra_select_cache.copy()\n    if self.select_related is not False:\n        obj.select_related = copy.deepcopy(obj.select_related)\n    if 'subq_aliases' in self.__dict__:\n        obj.subq_aliases = self.subq_aliases.copy()\n    obj.used_aliases = self.used_aliases.copy()\n    obj._filtered_relations = self._filtered_relations.copy()\n    obj.__dict__.pop('base_table', None)\n    return obj",
    "docstring": "Return a copy of the current Query. A lightweight alternative to deepcopy().",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\query.py",
    "ast_data": "FunctionDef name:clone arg:self arguments arg Assign Call Assign Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call If Compare Assign Call If Assign Call Call Assign Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_key",
    "source_code": "def _get_key(self) -> tuple[int, int, int, int]:\n    return (len(self.replacements), len(self.divisible), self.num_deferred_runtime_asserts, len(self.unbacked_var_to_val))",
    "docstring": "Defines the current \"state\" of the guards we've accumulated in this ShapeEnv. Determines when we need to invalidate our cache",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:_get_key arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "close",
    "source_code": "def close(self) -> None:\n    self.archive_file.write_end_of_file()",
    "docstring": "Close the archive.",
    "type": "method",
    "file_path": "pytorch\\torch\\export\\pt2_archive\\_package.py",
    "ast_data": "FunctionDef name:close arg:self arguments arg Call"
  },
  {
    "library": "numpy",
    "name": "_info",
    "source_code": "def _info(obj, output=None):\n    extra = ''\n    tic = ''\n    bp = lambda x: x\n    cls = getattr(obj, '__class__', type(obj))\n    nm = getattr(cls, '__name__', cls)\n    strides = obj.strides\n    endian = obj.dtype.byteorder\n    if output is None:\n        output = sys.stdout\n    print('class: ', nm, file=output)\n    print('shape: ', obj.shape, file=output)\n    print('strides: ', strides, file=output)\n    print('itemsize: ', obj.itemsize, file=output)\n    print('aligned: ', bp(obj.flags.aligned), file=output)\n    print('contiguous: ', bp(obj.flags.contiguous), file=output)\n    print('fortran: ', obj.flags.fortran, file=output)\n    print(f'data pointer: {hex(obj.ctypes._as_parameter_.value)}{extra}', file=output)\n    print('byteorder: ', end=' ', file=output)\n    if endian in ['|', '=']:\n        print(f'{tic}{sys.byteorder}{tic}', file=output)\n        byteswap = False\n    elif endian == '>':\n        print(f'{tic}big{tic}', file=output)\n        byteswap = sys.byteorder != 'big'\n    else:\n        print(f'{tic}little{tic}', file=output)\n        byteswap = sys.byteorder != 'little'\n    print('byteswap: ', bp(byteswap), file=output)\n    print(f'type: {obj.dtype}', file=output)",
    "docstring": "Provide information about ndarray obj. Parameters ---------- obj : ndarray Must be ndarray, not checked. output Where printed output goes. Notes ----- Copied over from the numarray module prior to its removal. Adapted somewhat as only numpy is an option now. Called by info.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_utils_impl.py",
    "ast_data": "FunctionDef name:_info arg:obj arg:output arguments arg arg Assign Assign Assign arguments arg Assign Call Call Assign Call Assign Assign If Compare Assign Call Call Call Call Call Call Call Call Call Call Call Call If Compare Call Assign If Compare Call Assign Compare Call Assign Compare Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "adjust_contrast",
    "source_code": "@tf_export('image.adjust_contrast')\n@dispatch.add_dispatch_support\ndef adjust_contrast(images, contrast_factor):\n    with ops.name_scope(None, 'adjust_contrast', [images, contrast_factor]) as name:\n        images = ops.convert_to_tensor(images, name='images')\n        orig_dtype = images.dtype\n        if orig_dtype in (dtypes.float16, dtypes.float32):\n            flt_images = images\n        else:\n            flt_images = convert_image_dtype(images, dtypes.float32)\n        adjusted = gen_image_ops.adjust_contrastv2(flt_images, contrast_factor=contrast_factor, name=name)\n        return convert_image_dtype(adjusted, orig_dtype, saturate=True)",
    "docstring": "Adjust contrast of RGB or grayscale images. This is a convenience method that converts RGB images to float representation, adjusts their contrast, and then converts them back to the original data type. If several adjustments are chained, it is advisable to minimize the number of redundant conversions. is a tensor of at least 3 dimensions. The last 3 dimensions are interpreted as . The other dimensions only represent a collection of images, such as Contrast is adjusted independently for each channel of each image. For each channel, this Op computes the mean of the image pixels in the channel and then adjusts each component of each pixel to . must be in the interval . Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.adjust_contrast(x, 2.) Args: images: Images to adjust. At least 3-D. contrast_factor: A float multiplier for adjusting contrast. Returns: The contrast-adjusted image or images.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:adjust_contrast arg:images arg:contrast_factor arguments arg arg With Call Assign Call Assign If Compare Assign Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "__iter__",
    "source_code": "def __iter__(self):\n    return iter(self._node)",
    "docstring": "Iterate over the nodes.",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\_digraph.py",
    "ast_data": "FunctionDef name:__iter__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_ReincarnatedPerDeviceGenerator",
    "source_code": "class _ReincarnatedPerDeviceGenerator(dataset_ops.DatasetV2):\n\n    def __init__(self, per_device_dataset, incarnation_id):\n        if hasattr(per_device_dataset, '_name'):\n            self._name = per_device_dataset._name\n        self._element_spec = per_device_dataset.element_spec\n        self._init_func = per_device_dataset._init_func\n        self._init_captured_args = self._init_func.captured_inputs\n        self._next_func = per_device_dataset._next_func\n        self._next_captured_args = per_device_dataset._next_captured_args\n        self._next_captured_args[per_device_dataset._incarnation_id_index] = incarnation_id\n        self._finalize_func = per_device_dataset._finalize_func\n        self._finalize_captured_args = per_device_dataset._finalize_captured_args\n        variant_tensor = gen_dataset_ops.generator_dataset(self._init_captured_args, self._next_captured_args, self._finalize_captured_args, init_func=self._init_func, next_func=self._next_func, finalize_func=self._finalize_func, **self._flat_structure)\n        super(_ReincarnatedPerDeviceGenerator, self).__init__(variant_tensor)\n\n    def _inputs(self):\n        return []\n\n    @property\n    def element_spec(self):\n        return self._element_spec",
    "docstring": "Creates a _PerDeviceGenerator-like dataset with a new incarnation_id. Re-uses the functions from the provided per_device_dataset and just switches out the function argument corresponding to the incarnation_id.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\multi_device_iterator_ops.py",
    "ast_data": "ClassDef name:_ReincarnatedPerDeviceGenerator FunctionDef name:__init__ arg:self arg:per_device_dataset arg:incarnation_id arguments arg arg arg If Call Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Call Call Call FunctionDef name:_inputs arg:self arguments arg Return return:no FunctionDef name:element_spec arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_SymExprHash",
    "source_code": "@dataclass\nclass _SymExprHash:\n    sym_obj: Union[SymInt, SymFloat, SymBool]\n\n    def __hash__(self) -> int:\n        return hash((type(self.sym_obj), self.sym_obj.node.expr))\n\n    def __eq__(self, value) -> bool:\n        if not isinstance(value, _SymExprHash):\n            return False\n        return self.sym_obj.node.expr == value.sym_obj.node.expr",
    "docstring": "Hash for a py_sym_types that will use the underlying sympy expression",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\dedupe_symint_uses.py",
    "ast_data": "ClassDef name:_SymExprHash FunctionDef name:__hash__ arg:self arguments arg Return return:yes Call Call FunctionDef name:__eq__ arg:self arg:value arguments arg arg If Call Return return:yes Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "merge_summary",
    "source_code": "@deprecated('2016-11-30', 'Please switch to tf.summary.merge.')\ndef merge_summary(inputs, collections=None, name=None):\n    with ops.name_scope(name, 'MergeSummary', inputs):\n        val = gen_logging_ops.merge_summary(inputs=inputs, name=name)\n        _Collect(val, collections, [])\n    return val",
    "docstring": "Merges summaries. This op is deprecated. Please switch to tf.compat.v1.summary.merge, which has identical behavior. This op creates a []( protocol buffer that contains the union of all the values in the input summaries. When the Op is run, it reports an error if multiple values in the summaries to merge use the same tag. Args: inputs: A list of objects containing serialized protocol buffers. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to . name: A name for the operation (optional). Returns: A scalar of type . The serialized protocol buffer resulting from the merging.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\logging_ops.py",
    "ast_data": "FunctionDef name:merge_summary arg:inputs arg:collections arg:name arguments arg arg arg With Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "get_toc",
    "source_code": "def get_toc(self) -> None:\n    doctree = self.env.get_and_resolve_doctree(self.config.master_doc, self, prune_toctrees=False, includehidden=True)\n    self.refnodes = self.get_refnodes(doctree, [])\n    master_dir = Path(self.config.master_doc).parent\n    for item in self.refnodes:\n        item['refuri'] = str(master_dir / item['refuri'])\n    self.toc_add_files(self.refnodes)",
    "docstring": "Get the total table of contents, containing the master_doc and pre and post files not managed by Sphinx.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\_epub_base.py",
    "ast_data": "FunctionDef name:get_toc arg:self arguments arg Assign Call Assign Call Assign Call For Assign Call Call"
  },
  {
    "library": "django",
    "name": "__init__",
    "source_code": "def __init__(self, stream_or_string, **options):\n    self.options = options\n    if isinstance(stream_or_string, str):\n        self.stream = StringIO(stream_or_string)\n    else:\n        self.stream = stream_or_string",
    "docstring": "Init this serializer given a stream or a string",
    "type": "method",
    "file_path": "django\\django\\core\\serializers\\base.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:stream_or_string arguments arg arg arg Assign If Call Assign Call Assign"
  },
  {
    "library": "numpy",
    "name": "argwhere",
    "source_code": "@array_function_dispatch(_argwhere_dispatcher)\ndef argwhere(a):\n    if np.ndim(a) == 0:\n        a = shape_base.atleast_1d(a)\n        return argwhere(a)[:, :0]\n    return transpose(nonzero(a))",
    "docstring": "Find the indices of array elements that are non-zero, grouped by element. Parameters ---------- a : array_like Input data. Returns ------- index_array : (N, a.ndim) ndarray Indices of elements that are non-zero. Indices are grouped by element. This array will have shape `` instead. Examples -------- >>> import numpy as np >>> x = np.arange(6).reshape(2,3) >>> x array([[0, 1, 2], [3, 4, 5]]) >>> np.argwhere(x>1) array([[0, 2], [1, 0], [1, 1], [1, 2]])",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\numeric.py",
    "ast_data": "FunctionDef name:argwhere arg:a arguments arg If Compare Call Assign Call Return return:yes Call Return return:yes Call Call Call"
  },
  {
    "library": "kornia",
    "name": "build",
    "source_code": "@staticmethod\ndef build(model_name: str='Unet', encoder_name: str='resnet34', encoder_weights: Optional[str]='imagenet', in_channels: int=3, classes: int=1, activation: str='softmax', **kwargs: Any) -> SemanticSegmentation:\n    preproc_params = smp.encoders.get_preprocessing_params(encoder_name)\n    preprocessor = SegmentationModelsBuilder.get_preprocessing_pipeline(preproc_params)\n    segmentation_model = getattr(smp, model_name)(encoder_name=encoder_name, encoder_weights=encoder_weights, in_channels=in_channels, classes=classes, activation=activation, **kwargs)\n    return SemanticSegmentation(model=segmentation_model, pre_processor=preprocessor, post_processor=nn.Identity(), name=f'{model_name}_{encoder_name}')",
    "docstring": "SegmentationModel is a module that wraps a segmentation model. This module uses SegmentationModel library for segmentation. Args: model_name: Name of the model to use. Valid options are: \"Unet\", \"UnetPlusPlus\", \"MAnet\", \"LinkNet\", \"FPN\", \"PSPNet\", \"PAN\", \"DeepLabV3\", \"DeepLabV3Plus\". encoder_name: Name of the encoder to use. encoder_depth: Depth of the encoder. encoder_weights: Weights of the encoder. decoder_channels: Number of channels in the decoder. in_channels: Number of channels in the input. classes: Number of classes to predict. activation: Type of activation layer. **kwargs: Additional arguments to pass to the model. Detailed arguments can be found at: Note: Only encoder weights are available. Pretrained weights for the whole model are not available.",
    "type": "method",
    "file_path": "kornia\\kornia\\models\\segmentation\\segmentation_models.py",
    "ast_data": "FunctionDef name:build arg:model_name arg:encoder_name arg:encoder_weights arg:in_channels arg:classes arg:activation arguments arg arg arg arg arg arg arg Assign Call Assign Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "exceeds_recompile_limit",
    "source_code": "def exceeds_recompile_limit(cache_size: CacheSizeRelevantForFrame, compile_id: CompileId) -> tuple[bool, str]:\n    if cache_size.will_compilation_exceed_accumulated_limit():\n        return (True, 'accumulated_recompile_limit')\n    if cache_size.will_compilation_exceed_specific_limit(config.recompile_limit):\n        return (True, 'recompile_limit')\n    assert compile_id.frame_compile_id is not None\n    if compile_id.frame_compile_id >= config.accumulated_recompile_limit:\n        return (True, 'accumulated_recompile_limit')\n    return (False, '')",
    "docstring": "Checks if we are exceeding the cache size limit.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\cache_size.py",
    "ast_data": "FunctionDef name:exceeds_recompile_limit arg:cache_size arg:compile_id arguments arg arg If Call Return return:yes If Call Return return:yes Compare If Compare Return return:yes Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "AcceptableProtocolsContextFactory",
    "source_code": "@implementer(IPolicyForHTTPS)\nclass AcceptableProtocolsContextFactory:\n\n    def __init__(self, context_factory: Any, acceptable_protocols: list[bytes]):\n        verifyObject(IPolicyForHTTPS, context_factory)\n        self._wrapped_context_factory: Any = context_factory\n        self._acceptable_protocols: list[bytes] = acceptable_protocols\n\n    def creatorForNetloc(self, hostname: bytes, port: int) -> ClientTLSOptions:\n        options: ClientTLSOptions = self._wrapped_context_factory.creatorForNetloc(hostname, port)\n        _setAcceptableProtocols(options._ctx, self._acceptable_protocols)\n        return options",
    "docstring": "Context factory to used to override the acceptable protocols to set up the [OpenSSL.SSL.Context] for doing NPN and/or ALPN negotiation.",
    "type": "class",
    "file_path": "scrapy\\scrapy\\core\\downloader\\contextfactory.py",
    "ast_data": "ClassDef name:AcceptableProtocolsContextFactory FunctionDef name:__init__ arg:self arg:context_factory arg:acceptable_protocols arguments arg arg arg Call FunctionDef name:creatorForNetloc arg:self arg:hostname arg:port arguments arg arg arg Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_accumulate_sharded_grad",
    "source_code": "@no_type_check\ndef _accumulate_sharded_grad(state: _FSDPState, handle: FlatParamHandle, sharded_grad: torch.Tensor) -> torch.Tensor:\n    flat_param = handle.flat_param\n    _cast_grad_to_param_dtype(state, sharded_grad, flat_param)\n    accumulate_grad = hasattr(flat_param, '_saved_grad_shard')\n    if accumulate_grad:\n        _check_grad_to_accumulate(sharded_grad, flat_param._saved_grad_shard)\n        flat_param._saved_grad_shard += sharded_grad\n    else:\n        flat_param._saved_grad_shard = sharded_grad\n    grad_to_offload = flat_param._saved_grad_shard\n    return grad_to_offload",
    "docstring": "Accumulates the reduce-scattered sharded gradient with any existing sharded gradient if needed, returning the gradient to offload (if CPU offloading is enabled).",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_runtime_utils.py",
    "ast_data": "FunctionDef name:_accumulate_sharded_grad arg:state arg:handle arg:sharded_grad arguments arg arg arg Assign Call Assign Call If Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_is_default_attr_value",
    "source_code": "def _is_default_attr_value(op_def, attr_name, attr_value):\n    for attr_def in op_def.attr:\n        if attr_def.name == attr_name:\n            if not attr_def.HasField('default_value'):\n                return False\n            return not c_api.EqualAttrValueWrapper(attr_value.SerializeToString(), attr_def.default_value.SerializeToString())\n    return False",
    "docstring": "Checks if given attribute matches the default value in the op def.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\meta_graph.py",
    "ast_data": "FunctionDef name:_is_default_attr_value arg:op_def arg:attr_name arg:attr_value arguments arg arg arg For If Compare If Call Return return:yes Return return:yes Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "set_module",
    "source_code": "def set_module(obj, mod):\n    if not isinstance(mod, str):\n        raise TypeError('The mod argument should be a string')\n    obj.__module__ = mod",
    "docstring": "Set the module attribute on a python object for a given object for nicer printing",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\__init__.py",
    "ast_data": "FunctionDef name:set_module arg:obj arg:mod arguments arg arg If Call Raise Call Assign"
  },
  {
    "library": "scipy",
    "name": "dogleg_step",
    "source_code": "def dogleg_step(x, newton_step, g, a, b, tr_bounds, lb, ub):\n    lb_total, ub_total, orig_l, orig_u, tr_l, tr_u = find_intersection(x, tr_bounds, lb, ub)\n    bound_hits = np.zeros_like(x, dtype=int)\n    if in_bounds(newton_step, lb_total, ub_total):\n        return (newton_step, bound_hits, False)\n    to_bounds, _ = step_size_to_bound(np.zeros_like(x), -g, lb_total, ub_total)\n    cauchy_step = -minimize_quadratic_1d(a, b, 0, to_bounds)[0] * g\n    step_diff = newton_step - cauchy_step\n    step_size, hits = step_size_to_bound(cauchy_step, step_diff, lb_total, ub_total)\n    bound_hits[(hits < 0) & orig_l] = -1\n    bound_hits[(hits > 0) & orig_u] = 1\n    tr_hit = np.any((hits < 0) & tr_l | (hits > 0) & tr_u)\n    return (cauchy_step + step_size * step_diff, bound_hits, tr_hit)",
    "docstring": "Find dogleg step in a rectangular region. Returns ------- step : ndarray, shape (n,) Computed dogleg step. bound_hits : ndarray of int, shape (n,) Each component shows whether a corresponding variable hits the initial bound after the step is taken: * 0 - a variable doesn't hit the bound. * -1 - lower bound is hit. * 1 - upper bound is hit. tr_hit : bool Whether the step hit the boundary of the trust-region.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_lsq\\dogbox.py",
    "ast_data": "FunctionDef name:dogleg_step arg:x arg:newton_step arg:g arg:a arg:b arg:tr_bounds arg:lb arg:ub arguments arg arg arg arg arg arg arg arg Assign Call Assign Call If Call Return return:yes Assign Call Call Assign Call Assign Assign Call Assign Compare Assign Compare Assign Call Compare Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "VariableAggregationV2",
    "source_code": "@tf_export('VariableAggregation', v1=[])\nclass VariableAggregationV2(enum.Enum):\n    NONE = 0\n    SUM = 1\n    MEAN = 2\n    ONLY_FIRST_REPLICA = 3\n\n    def __hash__(self):\n        return hash(self.value)\n\n    def __eq__(self, other):\n        if self is other:\n            return True\n        elif isinstance(other, VariableAggregation):\n            return int(self.value) == int(other.value)\n        else:\n            return False",
    "docstring": "Indicates how a distributed variable will be aggregated. distributes a model by making multiple copies (called \"replicas\") acting on different elements of the input batch in a data parallel model. When performing some variable-update operation, for example , in a model, we need to resolve how to combine the different values for computed in the different replicas. * : This is the default, giving an error if you use a variable-update operation with multiple replicas. * : Add the updates across replicas. * : Take the arithmetic mean (\"average\") of the updates across replicas. * : This is for when every replica is performing the same update, but we only want to perform the update once. Used, e.g., for the global step counter. For example: >>> strategy = tf.distribute.MirroredStrategy([\"GPU:0\", \"GPU:1\"]) >>> with strategy.scope(): ... v = tf.Variable(5.0, aggregation=tf.VariableAggregation.MEAN) >>> @tf.function ... def update_fn(): ... return v.assign_add(1.0) >>> strategy.run(update_fn) PerReplica:{ 0: , 1: }",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "ClassDef name:VariableAggregationV2 Assign Assign Assign Assign FunctionDef name:__hash__ arg:self arguments arg Return return:yes Call FunctionDef name:__eq__ arg:self arg:other arguments arg arg If Compare Return return:yes If Call Return return:yes Compare Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "ConcreteFunction",
    "source_code": "@tf_export('types.experimental.ConcreteFunction', v1=[])\nclass ConcreteFunction(Callable, metaclass=abc.ABCMeta):\n\n    @property\n    @abc.abstractmethod\n    def inference_fn(self) -> AtomicFunction:\n        pass",
    "docstring": "Base class for differentiable graph functions. A encapsulates the original graph function definition with support for differentiability under contexts. In the process, it may generate new graph functions (using the original) to efficiently perform forwards and backwards passes.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\types\\core.py",
    "ast_data": "ClassDef name:ConcreteFunction FunctionDef name:inference_fn arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, variant_tensor):\n    self._variant_tensor_attr = variant_tensor\n    self._graph_attr = ops.get_default_graph()\n    self._options_attr = options_lib.Options()\n    for input_dataset in self._inputs():\n        input_options = None\n        if isinstance(input_dataset, data_types.DatasetV1):\n            if hasattr(input_dataset, '_dataset'):\n                if not isinstance(input_dataset._dataset, data_types.DatasetV2):\n                    raise TypeError(f'Each input of dataset {type(self)} should be a subclass of `tf.data.Dataset` but encountered {type(input_dataset._dataset)}.')\n                input_options = input_dataset._dataset._options_attr\n        elif isinstance(input_dataset, data_types.DatasetV2):\n            input_options = input_dataset._options_attr\n        else:\n            raise TypeError(f'Each input of dataset {type(self)} should be a subclass of `tf.data.Dataset` but encountered {type(input_dataset)}.')\n        if input_options is not None:\n            self._options_attr = self._options_attr.merge(input_options)\n    self._options_attr._set_mutable(False)",
    "docstring": "Creates a DatasetV2 object. This is a difference between DatasetV1 and DatasetV2. DatasetV1 does not take anything in its constructor whereas in the DatasetV2, we expect subclasses to create a variant_tensor and pass it in to the super() call. Args: variant_tensor: A DT_VARIANT tensor that represents the dataset.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:variant_tensor arguments arg arg Assign Assign Call Assign Call For Call Assign If Call If Call If Call Raise Call Call Call Assign If Call Assign Raise Call Call Call If Compare Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "use_agent_store",
    "source_code": "@property\ndef use_agent_store(self) -> bool:\n    return os.getenv('TORCH_DISABLE_SHARE_RDZV_TCP_STORE', '0') != '1'",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\dynamic_rendezvous.py",
    "ast_data": "FunctionDef name:use_agent_store arg:self arguments arg Return return:yes Compare Call"
  },
  {
    "library": "scikit-learn",
    "name": "_compute_partial_dependence_recursion",
    "source_code": "def _compute_partial_dependence_recursion(self, grid, target_features):\n    if self.init is not None:\n        warnings.warn('Using recursion method with a non-constant init predictor will lead to incorrect partial dependence values. Got init=%s.' % self.init, UserWarning)\n    grid = np.asarray(grid, dtype=DTYPE, order='C')\n    n_estimators, n_trees_per_stage = self.estimators_.shape\n    averaged_predictions = np.zeros((n_trees_per_stage, grid.shape[0]), dtype=np.float64, order='C')\n    target_features = np.asarray(target_features, dtype=np.intp, order='C')\n    for stage in range(n_estimators):\n        for k in range(n_trees_per_stage):\n            tree = self.estimators_[stage, k].tree_\n            tree.compute_partial_dependence(grid, target_features, averaged_predictions[k])\n    averaged_predictions *= self.learning_rate\n    return averaged_predictions",
    "docstring": "Fast partial dependence computation. Parameters ---------- grid : ndarray of shape (n_samples, n_target_features), dtype=np.float32 The grid points on which the partial dependence should be evaluated. target_features : ndarray of shape (n_target_features,), dtype=np.intp The set of target features for which the partial dependence should be evaluated. Returns ------- averaged_predictions : ndarray of shape (n_trees_per_iteration_, n_samples) The value of the partial dependence function on each grid point.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_gb.py",
    "ast_data": "FunctionDef name:_compute_partial_dependence_recursion arg:self arg:grid arg:target_features arguments arg arg arg If Compare Call Assign Call Assign Assign Call Assign Call For Call For Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_data_parallel_params",
    "source_code": "@staticmethod\ndef _get_data_parallel_params(module, named_params=False):\n    for param in module.parameters() if not named_params else module.named_parameters():\n        if not hasattr(param, '_ddp_ignored'):\n            yield param",
    "docstring": "Return a generator of parameters managed by a given DDP unit.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\parallel\\distributed.py",
    "ast_data": "FunctionDef name:_get_data_parallel_params arg:module arg:named_params arguments arg arg For Call Call If Call"
  },
  {
    "library": "scrapy",
    "name": "setmodule",
    "source_code": "def setmodule(self, module: ModuleType | str, priority: int | str='project') -> None:\n    self._assert_mutability()\n    if isinstance(module, str):\n        module = import_module(module)\n    for key in dir(module):\n        if key.isupper():\n            self.set(key, getattr(module, key), priority)",
    "docstring": "Store settings from a module with a given priority. This is a helper function that calls :meth: for every globally declared uppercase variable of `~scrapy.settings.SETTINGS_PRIORITIES` or an integer :type priority: str or int",
    "type": "method",
    "file_path": "scrapy\\scrapy\\settings\\__init__.py",
    "ast_data": "FunctionDef name:setmodule arg:self arg:module arg:priority arguments arg arg arg Call If Call Assign Call For Call If Call Call Call"
  },
  {
    "library": "numpy",
    "name": "time_randint_slow",
    "source_code": "def time_randint_slow(self):\n    np.random.randint(0, 2 ** 30 + 1, size=10 ** 5)",
    "docstring": "Compare to uint32 below",
    "type": "method",
    "file_path": "numpy\\benchmarks\\benchmarks\\bench_random.py",
    "ast_data": "FunctionDef name:time_randint_slow arg:self arguments arg Call"
  },
  {
    "library": "scipy",
    "name": "nu2lambda",
    "source_code": "@xp_capabilities()\ndef nu2lambda(nu: 'npt.ArrayLike') -> Any:\n    xp = array_namespace(nu)\n    return c / _asarray(nu, xp=xp, subok=True)",
    "docstring": "Convert optical frequency to wavelength. Parameters ---------- nu : array_like Optical frequency to be converted. Returns ------- lambda : float or array of floats Equivalent wavelength(s). Notes ----- Computes `` where c = 299792458.0, i.e., the (vacuum) speed of light in meters/second. Examples -------- >>> from scipy.constants import nu2lambda, speed_of_light >>> import numpy as np >>> nu2lambda(np.array((1, speed_of_light))) array([ 2.99792458e+08, 1.00000000e+00])",
    "type": "function",
    "file_path": "scipy\\scipy\\constants\\_constants.py",
    "ast_data": "FunctionDef name:nu2lambda arg:nu arguments arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "text",
    "source_code": "@_docstring.interpd\ndef text(self, x, y, s, fontdict=None, **kwargs):\n    effective_kwargs = {'verticalalignment': 'baseline', 'horizontalalignment': 'left', 'transform': self.transData, 'clip_on': False, **(fontdict if fontdict is not None else {}), **kwargs}\n    t = mtext.Text(x, y, text=s, **effective_kwargs)\n    if t.get_clip_path() is None:\n        t.set_clip_path(self.patch)\n    self._add_text(t)\n    return t",
    "docstring": "Add text to the Axes. Add the text *s* to the Axes at location *x*, *y* in data coordinates, with a default `/gallery/text_labels_and_annotations/text_alignment.rcParams.Text.Text~matplotlib.text.Text~matplotlib.patches.Rectangle` properties. For example:: >>> text(x, y, s, bbox=dict(facecolor='red', alpha=0.5))",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_axes.py",
    "ast_data": "FunctionDef name:text arg:self arg:x arg:y arg:s arg:fontdict arguments arg arg arg arg arg arg Assign Compare Assign Call If Compare Call Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "lex",
    "source_code": "def lex(s, name=None, trim_whitespace=True, line_offset=0, delimiters=None):\n    if delimiters is None:\n        delimiters = (Template.default_namespace['start_braces'], Template.default_namespace['end_braces'])\n    in_expr = False\n    chunks = []\n    last = 0\n    last_pos = (line_offset + 1, 1)\n    token_re = re.compile('%s|%s' % (re.escape(delimiters[0]), re.escape(delimiters[1])))\n    for match in token_re.finditer(s):\n        expr = match.group(0)\n        pos = find_position(s, match.end(), last, last_pos)\n        if expr == delimiters[0] and in_expr:\n            raise TemplateError('%s inside expression' % delimiters[0], position=pos, name=name)\n        elif expr == delimiters[1] and (not in_expr):\n            raise TemplateError('%s outside expression' % delimiters[1], position=pos, name=name)\n        if expr == delimiters[0]:\n            part = s[last:match.start()]\n            if part:\n                chunks.append(part)\n            in_expr = True\n        else:\n            chunks.append((s[last:match.start()], last_pos))\n            in_expr = False\n        last = match.end()\n        last_pos = pos\n    if in_expr:\n        raise TemplateError('No %s to finish last expression' % delimiters[1], name=name, position=last_pos)\n    part = s[last:]\n    if part:\n        chunks.append(part)\n    if trim_whitespace:\n        chunks = trim_lex(chunks)\n    return chunks",
    "docstring": "Lex a string into chunks: >>> lex('hey') ['hey'] >>> lex('hey {{you}}') ['hey ', ('you', (1, 7))] >>> lex('hey {{') Traceback (most recent call last): ... TemplateError: No }} to finish last expression at line 1 column 7 >>> lex('hey }}') Traceback (most recent call last): ... TemplateError: }} outside expression at line 1 column 7 >>> lex('hey {{ {{') Traceback (most recent call last): ... TemplateError: {{ inside expression at line 1 column 10",
    "type": "function",
    "file_path": "numpy\\numpy\\_build_utils\\tempita\\_tempita.py",
    "ast_data": "FunctionDef name:lex arg:s arg:name arg:trim_whitespace arg:line_offset arg:delimiters arguments arg arg arg arg arg If Compare Assign Assign Assign Assign Assign Assign Call Call Call For Call Assign Call Assign Call Call If BoolOp Compare Raise Call If BoolOp Compare Raise Call If Compare Assign Call If Call Assign Call Call Assign Assign Call Assign If Raise Call Assign If Call If Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_insert_logger",
    "source_code": "def _insert_logger(model: GraphModule, node: Node, debug_handle: int) -> Node:\n    from torch.ao.quantization.fx.utils import get_new_attr_name_with_prefix\n    with model.graph.inserting_after(node):\n        get_new_attr_name = get_new_attr_name_with_prefix(f'{node.name}_logger')\n        logger_name = get_new_attr_name(model)\n        setattr(model, logger_name, OutputLogger(debug_handle, node.name, node.meta.get('nn_module_stack')))\n        logger_node = model.graph.call_module(logger_name, (node,), {})\n    orig_users = list(node.users.keys())\n    for user_node in orig_users:\n        if user_node is logger_node:\n            continue\n        user_node.replace_input_with(node, logger_node)\n    return logger_node",
    "docstring": "For a given node, adds an OutputLogger that observes the output of that node, and all its users use the OutputLogger output instead. The OutputLogger will contain the debug_handle which can be used to compare graphs after transforms",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\_numeric_debugger.py",
    "ast_data": "FunctionDef name:_insert_logger arg:model arg:node arg:debug_handle arguments arg arg arg With Call Assign Call Assign Call Call Call Call Assign Call Assign Call Call For If Compare Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_world_size",
    "source_code": "def get_world_size(group: Optional[ProcessGroup]=None) -> int:\n    if _rank_not_in_group(group):\n        return -1\n    return _get_group_size(group)",
    "docstring": "Return the number of processes in the current process group. Args: group (ProcessGroup, optional): The process group to work on. If None, the default process group will be used. Returns: The world size of the process group -1, if not part of the group",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:get_world_size arg:group arguments arg If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "__array__",
    "source_code": "@ravel_compat\ndef __array__(self, dtype: NpDtype | None=None, copy: bool | None=None) -> np.ndarray:\n    if copy is False:\n        raise ValueError('Unable to avoid copy while creating an array as requested.')\n    ret = take_nd(self.categories._values, self._codes)\n    return np.asarray(ret, dtype=dtype)",
    "docstring": "The numpy array interface. Users should not call this directly. Rather, it is invoked by :func: and :func:. Parameters ---------- dtype : np.dtype or None Specifies the the dtype for the array. copy : bool or None, optional See :func:. Returns ------- numpy.array A numpy array of either the specified dtype or, if dtype==None (default), the same dtype as categorical.categories.dtype. See Also -------- numpy.asarray : Convert input to numpy.ndarray. Examples -------- >>> cat = pd.Categorical([\"a\", \"b\"], ordered=True) The following calls `` >>> np.asarray(cat) array(['a', 'b'], dtype=object)",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\categorical.py",
    "ast_data": "FunctionDef name:__array__ arg:self arg:dtype arg:copy arguments arg arg arg If Compare Raise Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "process_global_benchmarks",
    "source_code": "def process_global_benchmarks(f):\n    with open(f) as fi:\n        dct = json.load(fi)\n    nfev = []\n    nsuccess = []\n    mean_time = []\n    solvers = dct[list(dct.keys())[0]].keys()\n    for problem, results in dct.items():\n        _nfev = []\n        _nsuccess = []\n        _mean_time = []\n        for solver, vals in results.items():\n            _nfev.append(vals['mean_nfev'])\n            _nsuccess.append(vals['nsuccess'] / vals['ntrials'] * 100)\n            _mean_time.append(vals['mean_time'])\n        nfev.append(_nfev)\n        nsuccess.append(_nsuccess)\n        mean_time.append(_mean_time)\n    nfev = pd.DataFrame(data=nfev, index=dct.keys(), columns=solvers)\n    nsuccess = pd.DataFrame(data=nsuccess, index=dct.keys(), columns=solvers)\n    mean_time = pd.DataFrame(data=mean_time, index=dct.keys(), columns=solvers)\n    return (nfev, nsuccess, mean_time)",
    "docstring": "Processes the global benchmarks results into pandas DataFrame. Parameters ---------- f: {str, file-like} Global Benchmarks output Returns ------- nfev, success_rate, mean_time pd.DataFrame for the mean number of nfev, success_rate, mean_time for each optimisation problem.",
    "type": "function",
    "file_path": "scipy\\benchmarks\\process_global_benchmarks.py",
    "ast_data": "FunctionDef name:process_global_benchmarks arg:f arguments arg With Call Assign Call Assign Assign Assign Assign Call Call Call For Call Assign Assign Assign For Call Call Call Call Call Call Call Assign Call Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "register_after_fork",
    "source_code": "def register_after_fork(func):\n    _register(func)",
    "docstring": "Register a callable to be executed in the child process after a fork. Note: In python = 3.7 it also works with ``. Args: func (function): Function taking no arguments to be called in the child after fork",
    "type": "function",
    "file_path": "pytorch\\torch\\multiprocessing\\_atfork.py",
    "ast_data": "FunctionDef name:register_after_fork arg:func arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, server_port, stream_handler_class):\n    self._server_port = server_port\n    self._stream_handler_class = stream_handler_class\n    self._server_lock = threading.Lock()\n    self._server_started = False\n    self._stop_requested = False\n    self._debug_ops_state_change_queue = queue.Queue()\n    self._gated_grpc_debug_watches = set()\n    self._breakpoints = set()",
    "docstring": "Constructor. Args: server_port: (int) Port number to bind to. stream_handler_class: A class of the base class that will be used to constructor stream handler objects during calls.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\grpc_debug_server.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:server_port arg:stream_handler_class arguments arg arg arg Assign Assign Assign Call Assign Assign Assign Call Assign Call Assign Call"
  },
  {
    "library": "django",
    "name": "bytes_to_text",
    "source_code": "def bytes_to_text(s, encoding):\n    if isinstance(s, bytes):\n        return str(s, encoding, 'replace')\n    else:\n        return s",
    "docstring": "Convert bytes objects to strings, using the given encoding. Illegally encoded input characters are replaced with Unicode \"unknown\" codepoint (�). Return any non-bytes objects without change.",
    "type": "function",
    "file_path": "django\\django\\http\\request.py",
    "ast_data": "FunctionDef name:bytes_to_text arg:s arg:encoding arguments arg arg If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "predict_signature_def",
    "source_code": "@tf_export(v1=['saved_model.predict_signature_def', 'saved_model.signature_def_utils.predict_signature_def'])\n@deprecation.deprecated_endpoints('saved_model.signature_def_utils.predict_signature_def')\ndef predict_signature_def(inputs, outputs):\n    if inputs is None or not inputs:\n        raise ValueError('Prediction `inputs` cannot be None or empty.')\n    if outputs is None or not outputs:\n        raise ValueError('Prediction `outputs` cannot be None or empty.')\n    signature_inputs = {key: utils.build_tensor_info(tensor) for key, tensor in inputs.items()}\n    signature_outputs = {key: utils.build_tensor_info(tensor) for key, tensor in outputs.items()}\n    signature_def = build_signature_def(signature_inputs, signature_outputs, signature_constants.PREDICT_METHOD_NAME)\n    return signature_def",
    "docstring": "Creates prediction signature from given inputs and outputs. This function produces signatures intended for use with the TensorFlow Serving Predict API (tensorflow_serving/apis/prediction_service.proto). This API imposes no constraints on the input and output types. Args: inputs: dict of string to . outputs: dict of string to . Returns: A prediction-flavored signature_def. Raises: ValueError: If inputs or outputs is .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\signature_def_utils_impl.py",
    "ast_data": "FunctionDef name:predict_signature_def arg:inputs arg:outputs arguments arg arg If BoolOp Compare Raise Call If BoolOp Compare Raise Call Assign Call Call Assign Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "get_f77flags",
    "source_code": "def get_f77flags(src):\n    flags = {}\n    with open(src, encoding='latin1') as f:\n        i = 0\n        for line in f:\n            i += 1\n            if i > 20:\n                break\n            m = _f77flags_re.match(line)\n            if not m:\n                continue\n            fcname = m.group('fcname').strip()\n            fflags = m.group('fflags').strip()\n            flags[fcname] = split_quoted(fflags)\n    return flags",
    "docstring": "Search the first 20 lines of fortran 77 code for line pattern Return a dictionary {:}.",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\fcompiler\\__init__.py",
    "ast_data": "FunctionDef name:get_f77flags arg:src arguments arg Assign With Call Assign For If Compare Assign Call If Assign Call Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "z",
    "source_code": "@property\ndef z(self):\n    return self._cs.getOrdinate(2, 0) if self.hasz else None",
    "docstring": "Return the Z component of the Point.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\point.py",
    "ast_data": "FunctionDef name:z arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "save_pointcloud_ply",
    "source_code": "def save_pointcloud_ply(filename: str, pointcloud: torch.Tensor) -> None:\n    if not isinstance(filename, str) and filename[-3:] == '.ply':\n        raise TypeError(f'Input filename must be a string in with the .ply  extension. Got {filename}')\n    if not torch.is_tensor(pointcloud):\n        raise TypeError(f'Input pointcloud type is not a torch.Tensor. Got {type(pointcloud)}')\n    if not len(pointcloud.shape) >= 2 and pointcloud.shape[-1] == 3:\n        raise TypeError(f'Input pointcloud must be in the following shape HxWx3. Got {pointcloud.shape}.')\n    xyz_vec: torch.Tensor = pointcloud.reshape(-1, 3)\n    with open(filename, 'w') as f:\n        data_str: str = ''\n        num_points: int = xyz_vec.shape[0]\n        for idx in range(num_points):\n            xyz = xyz_vec[idx]\n            if not bool(torch.isfinite(xyz).any()):\n                num_points -= 1\n                continue\n            x: float = float(xyz[0])\n            y: float = float(xyz[1])\n            z: float = float(xyz[2])\n            data_str += f'{x} {y} {z}\\n'\n        f.write('ply\\n')\n        f.write('format ascii 1.0\\n')\n        f.write('comment arraiy generated\\n')\n        f.write(f'element vertex {num_points}\\n')\n        f.write('property double x\\n')\n        f.write('property double y\\n')\n        f.write('property double z\\n')\n        f.write('end_header\\n')\n        f.write(data_str)",
    "docstring": "Save to disk a pointcloud in PLY format. Args: filename: the path to save the pointcloud. pointcloud: tensor containing the pointcloud to save. The tensor must be in the shape of :math: where the last component is assumed to be a 3d point coordinate :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\utils\\pointcloud_io.py",
    "ast_data": "FunctionDef name:save_pointcloud_ply arg:filename arg:pointcloud arguments arg arg If BoolOp Call Compare Raise Call If Call Raise Call Call If BoolOp Compare Call Compare Raise Call Call With Call For Call Assign If Call Call Call Call Call Call Call Call Call Call Call Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "InverseJacobian",
    "source_code": "class InverseJacobian:\n\n    def __init__(self, jacobian):\n        self.jacobian = jacobian\n        self.matvec = jacobian.solve\n        self.update = jacobian.update\n        if hasattr(jacobian, 'setup'):\n            self.setup = jacobian.setup\n        if hasattr(jacobian, 'rsolve'):\n            self.rmatvec = jacobian.rsolve\n\n    @property\n    def shape(self):\n        return self.jacobian.shape\n\n    @property\n    def dtype(self):\n        return self.jacobian.dtype",
    "docstring": "A simple wrapper that inverts the Jacobian using the method. .. legacy:: class See the newer, more consistent interfaces in :mod:. Parameters ---------- jacobian : Jacobian The Jacobian to invert. Attributes ---------- shape Matrix dimensions (M, N) dtype Data type of the matrix.",
    "type": "class",
    "file_path": "scipy\\scipy\\optimize\\_nonlin.py",
    "ast_data": "ClassDef name:InverseJacobian FunctionDef name:__init__ arg:self arg:jacobian arguments arg arg Assign Assign Assign If Call Assign If Call Assign FunctionDef name:shape arg:self arguments arg Return return:yes FunctionDef name:dtype arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "pproc_gpool",
    "source_code": "def pproc_gpool(self):\n    gpool_l = []\n    for v in self.gpool:\n        gpool_l.append(v.x_a)\n    G = self._mapwrapper(self.wgcons.gcons, gpool_l)\n    for v, g in zip(self.gpool, G):\n        v.feasible = g",
    "docstring": "Process all constraints in parallel.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_shgo_lib\\_vertex.py",
    "ast_data": "FunctionDef name:pproc_gpool arg:self arguments arg Assign For Call Assign Call For Call Assign"
  },
  {
    "library": "scipy",
    "name": "_apply_field",
    "source_code": "def _apply_field(data, field, no_pattern=False):\n    if field is None:\n        return data\n    if field == 'pattern':\n        if no_pattern:\n            return data\n        else:\n            return np.zeros(0)\n    dtype = _field_to_dtype.get(field, None)\n    if dtype is None:\n        raise ValueError('Invalid field.')\n    return np.asarray(data, dtype=dtype)",
    "docstring": "Ensure that `` is compatible with the specified MatrixMarket field type. Parameters ---------- data : ndarray Input array. field : str Matrix Market field, such as 'real', 'complex', 'integer', 'pattern'. no_pattern : bool, optional Whether an empty array may be returned for a 'pattern' field. Returns ------- data : ndarray Input data if no conversion necessary, or a converted version",
    "type": "function",
    "file_path": "scipy\\scipy\\io\\_fast_matrix_market\\__init__.py",
    "ast_data": "FunctionDef name:_apply_field arg:data arg:field arg:no_pattern arguments arg arg arg If Compare Return return:yes If Compare If Return return:yes Return return:yes Call Assign Call If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_get_names_from_handles",
    "source_code": "def _get_names_from_handles(self, handle: FlatParamHandle) -> list[list[str]]:\n    fqns: list[list[str]] = []\n    if handle:\n        flat_param = handle.flat_param\n        if flat_param in self.param_to_fqn:\n            fqns.append(self.param_to_fqn[flat_param])\n    return fqns",
    "docstring": "Returns a list of FQNs for each handle in ``. If a handle is invalid, then its FQNs are omitted from the returned list.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_exec_order_utils.py",
    "ast_data": "FunctionDef name:_get_names_from_handles arg:self arg:handle arguments arg arg If Assign If Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "v2_dtype_behavior_enabled",
    "source_code": "def v2_dtype_behavior_enabled():\n    if V2_DTYPE_BEHAVIOR is None:\n        return tf2.enabled()\n    return V2_DTYPE_BEHAVIOR",
    "docstring": "Returns True if the V2 dtype behavior is enabled.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_utils.py",
    "ast_data": "FunctionDef name:v2_dtype_behavior_enabled arguments If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "rank",
    "source_code": "def rank(self) -> Optional[int]:\n    return self._rank",
    "docstring": "Returns the rank of remote worker representing the remote device. Returns `` if no rank is available.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\remote_device.py",
    "ast_data": "FunctionDef name:rank arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "map",
    "source_code": "def map(self, func: Callable | Mapping | Series | None=None, na_action: Literal['ignore'] | None=None, **kwargs) -> Series:\n    if func is None:\n        if 'arg' in kwargs:\n            func = kwargs.pop('arg')\n            warnings.warn('The parameter `arg` has been renamed to `func`, and it will stop being supported in a future version of pandas.', FutureWarning, stacklevel=find_stack_level())\n        else:\n            raise ValueError('The `func` parameter is required')\n    if callable(func):\n        func = functools.partial(func, **kwargs)\n    new_values = self._map_values(func, na_action=na_action)\n    return self._constructor(new_values, index=self.index, copy=False).__finalize__(self, method='map')",
    "docstring": "Map values of Series according to an input mapping or function. Used for substituting each value in a Series with another value, that may be derived from a function, a `Seriesargto_replacevalue` can be used: >>> s.map(\"I am a {}\".format, na_action=\"ignore\") 0 I am a cat 1 I am a dog 2 NaN 3 I am a rabbit dtype: object",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:map arg:self arg:func arg:na_action arguments arg arg arg arg If Compare If Compare Assign Call Call Call Raise Call If Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "evaluate_generator",
    "source_code": "def evaluate_generator(self, generator, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False, verbose=0):\n    warnings.warn('`Model.evaluate_generator` is deprecated and will be removed in a future version. Please use `Model.evaluate`, which supports generators.')\n    self._check_call_args('evaluate_generator')\n    return self.evaluate(generator, steps=steps, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, verbose=verbose, callbacks=callbacks)",
    "docstring": "Evaluates the model on a data generator. DEPRECATED: now supports generators, so there is no longer any need to use this endpoint.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py",
    "ast_data": "FunctionDef name:evaluate_generator arg:self arg:generator arg:steps arg:callbacks arg:max_queue_size arg:workers arg:use_multiprocessing arg:verbose arguments arg arg arg arg arg arg arg arg Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "phase_spectrum",
    "source_code": "@_api.make_keyword_only('3.10', 'Fs')\n@_preprocess_data(replace_names=['x'])\n@_docstring.interpd\ndef phase_spectrum(self, x, Fs=None, Fc=None, window=None, pad_to=None, sides=None, **kwargs):\n    if Fc is None:\n        Fc = 0\n    spec, freqs = mlab.phase_spectrum(x=x, Fs=Fs, window=window, pad_to=pad_to, sides=sides)\n    freqs += Fc\n    lines = self.plot(freqs, spec, **kwargs)\n    self.set_xlabel('Frequency')\n    self.set_ylabel('Phase (radians)')\n    return (spec, freqs, lines[0])",
    "docstring": "Plot the phase spectrum. Compute the phase spectrum (unwrapped angle spectrum) of *x*. Data is padded to a length of *pad_to* and the windowing function *window* is applied to the signal. Parameters ---------- x : 1-D array or sequence Array or sequence containing the data %(Spectral)s %(Single_Spectrum)s Fc : int, default: 0 The center frequency of *x*, which offsets the x extents of the plot to reflect the frequency range used when a signal is acquired and then filtered and downsampled to baseband. Returns ------- spectrum : 1-D array The values for the phase spectrum in radians (real valued). freqs : 1-D array The frequencies corresponding to the elements in *spectrum*. line : The line created by this function. Other Parameters ---------------- data : indexable object, optional DATA_PARAMETER_PLACEHOLDER **kwargs Keyword arguments control the properties: %(Line2D:kwdoc)s See Also -------- magnitude_spectrum Plots the magnitudes of the corresponding frequencies. angle_spectrum Plots the wrapped version of this function. specgram Can plot the phase spectrum of segments within the signal in a colormap.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_axes.py",
    "ast_data": "FunctionDef name:phase_spectrum arg:self arg:x arg:Fs arg:Fc arg:window arg:pad_to arg:sides arguments arg arg arg arg arg arg arg arg If Compare Assign Assign Call Assign Call Call Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "imshow",
    "source_code": "@_preprocess_data()\n@_docstring.interpd\ndef imshow(self, X, cmap=None, norm=None, *, aspect=None, interpolation=None, alpha=None, vmin=None, vmax=None, colorizer=None, origin=None, extent=None, interpolation_stage=None, filternorm=True, filterrad=4.0, resample=None, url=None, **kwargs):\n    im = mimage.AxesImage(self, cmap=cmap, norm=norm, colorizer=colorizer, interpolation=interpolation, origin=origin, extent=extent, filternorm=filternorm, filterrad=filterrad, resample=resample, interpolation_stage=interpolation_stage, **kwargs)\n    if aspect is None and (not (im.is_transform_set() and (not im.get_transform().contains_branch(self.transData)))):\n        aspect = mpl.rcParams['image.aspect']\n    if aspect is not None:\n        self.set_aspect(aspect)\n    im.set_data(X)\n    im.set_alpha(alpha)\n    if im.get_clip_path() is None:\n        im.set_clip_path(self.patch)\n    im._check_exclusionary_keywords(colorizer, vmin=vmin, vmax=vmax)\n    im._scale_norm(norm, vmin, vmax)\n    im.set_url(url)\n    im.set_extent(im.get_extent())\n    self.add_image(im)\n    return im",
    "docstring": "Display data as an image, i.e., on a 2D regular raster. The input may either be actual RGB(A) data, or 2D scalar data, which will be rendered as a pseudocolor image. For displaying a grayscale image, set up the colormapping using the parameters `/gallery/images_contours_and_fields/image_antialiasingimage.interpolation.Axes.set_aspectimage.aspect.Axes.set_aspectimage.interpolation/gallery/images_contours_and_fields/interpolation_methods/gallery/images_contours_and_fields/image_antialiasing/gallery/images_contours_and_fields/image_antialiasingimage.originimshow_extentimshow_extentimage.resample.AxesImage.Artist.set_url~matplotlib.image.AxesImage~matplotlib.artist.Artist.AxesImage~matplotlib.pyplot.imshow` expects RGB images adopting the straight (unassociated) alpha representation.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_axes.py",
    "ast_data": "FunctionDef name:imshow arg:self arg:X arg:cmap arg:norm arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg Assign Call If BoolOp Compare BoolOp Call Call Call Assign If Compare Call Call Call If Compare Call Call Call Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "enter_scope",
    "source_code": "def enter_scope(self, scf_scope=False):\n    self.symbols.append({'types': {}, 'symbols': {}})\n    self.curr_table = self.symbols[len(self.symbols) - 1]\n    if scf_scope:\n        self.scf_scope += 1",
    "docstring": "Enter a new scope - at function level.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\tfr\\python\\tfr_gen.py",
    "ast_data": "FunctionDef name:enter_scope arg:self arg:scf_scope arguments arg arg Call Assign Call If"
  },
  {
    "library": "django",
    "name": "get_limit_choices_to",
    "source_code": "def get_limit_choices_to(self):\n    if callable(self.limit_choices_to):\n        return self.limit_choices_to()\n    return self.limit_choices_to",
    "docstring": "Return `` for this form field. If it is a callable, invoke it and return the result.",
    "type": "method",
    "file_path": "django\\django\\forms\\models.py",
    "ast_data": "FunctionDef name:get_limit_choices_to arg:self arguments arg If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "__init__",
    "source_code": "def __init__(self, df: DataFrame, allow_copy: bool=True) -> None:\n    self._df = df.rename(columns=str)\n    self._allow_copy = allow_copy\n    for i, _col in enumerate(self._df.columns):\n        rechunked = maybe_rechunk(self._df.iloc[:, i], allow_copy=allow_copy)\n        if rechunked is not None:\n            self._df.isetitem(i, rechunked)",
    "docstring": "Constructor - an instance of this (private) class is returned from .",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\interchange\\dataframe.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:df arg:allow_copy arguments arg arg arg Assign Call Assign For Call Assign Call If Compare Call"
  },
  {
    "library": "sphinx",
    "name": "__init__",
    "source_code": "def __init__(self, class_names: list[str], currmodule: str, show_builtins: bool=False, private_bases: bool=False, parts: int=0, aliases: dict[str, str] | None=None, top_classes: Set[str]=frozenset(), include_subclasses: bool=False) -> None:\n    self.class_names = class_names\n    classes: Collection[type[Any]] = self._import_classes(class_names, currmodule)\n    if include_subclasses:\n        classes_set = {*classes}\n        for cls in tuple(classes_set):\n            classes_set.update(_subclasses(cls))\n        classes = classes_set\n    self.class_info = self._class_info(classes, show_builtins, private_bases, parts, aliases, top_classes)\n    if not self.class_info:\n        msg = 'No classes found for inheritance diagram'\n        raise InheritanceException(msg)",
    "docstring": "*class_names* is a list of child classes to show bases from. If *show_builtins* is True, then Python builtins will be shown in the graph.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\ext\\inheritance_diagram.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:class_names arg:currmodule arg:show_builtins arg:private_bases arg:parts arg:aliases arg:top_classes arg:include_subclasses arguments arg arg arg arg arg arg arg arg arg Call Assign Call If Assign For Call Call Call Assign Assign Call If Assign Raise Call"
  },
  {
    "library": "pytorch",
    "name": "tree_map",
    "source_code": "def tree_map(func: Callable[..., Any], tree: PyTree, *rests: PyTree, is_leaf: Optional[Callable[[PyTree], bool]]=None) -> PyTree:\n    return optree.tree_map(func, tree, *rests, is_leaf=is_leaf, none_is_leaf=True, namespace='torch')",
    "docstring": "Map a multi-input function over pytree args to produce a new pytree. See also :func:. >>> tree_map(lambda x: x + 1, {\"x\": 7, \"y\": (42, 64)}) {'x': 8, 'y': (43, 65)} >>> tree_map(lambda x: x is None, {\"x\": 7, \"y\": (42, 64), \"z\": None}) {'x': False, 'y': (False, False), 'z': True} If multiple inputs are given, the structure of the tree is taken from the first input; subsequent inputs need only have `True`.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_cxx_pytree.py",
    "ast_data": "FunctionDef name:tree_map arg:func arg:tree arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_set_position",
    "source_code": "def _set_position(self, pos, which='both'):\n    if not isinstance(pos, mtransforms.BboxBase):\n        pos = mtransforms.Bbox.from_bounds(*pos)\n    for ax in self._twinned_axes.get_siblings(self):\n        if which in ('both', 'active'):\n            ax._position.set(pos)\n        if which in ('both', 'original'):\n            ax._originalPosition.set(pos)\n    self.stale = True",
    "docstring": "Private version of set_position. Call this internally to get the same functionality of , but not to take the axis out of the constrained_layout hierarchy.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:_set_position arg:self arg:pos arg:which arguments arg arg arg If Call Assign Call For Call If Compare Call If Compare Call Assign"
  },
  {
    "library": "cherrypy",
    "name": "wait",
    "source_code": "def wait(self, state, interval=0.1, channel=None):\n    states = set(always_iterable(state))\n    while self.state not in states:\n        time.sleep(interval)\n        self.publish(channel)",
    "docstring": "Poll for the given state(s) at intervals; publish to channel.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\wspbus.py",
    "ast_data": "FunctionDef name:wait arg:self arg:state arg:interval arg:channel arguments arg arg arg arg Assign Call Call While Compare Call Call"
  },
  {
    "library": "tensorflow",
    "name": "barrier_ref",
    "source_code": "@property\ndef barrier_ref(self):\n    return self._barrier_ref",
    "docstring": "Get the underlying barrier reference.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:barrier_ref arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ipu",
    "source_code": "def ipu(self, device: Optional[Union[int, device]]=None) -> Self:\n    return self._apply(lambda t: t.ipu(device))",
    "docstring": "Move all model parameters and buffers to the IPU. This also makes associated parameters and buffers different objects. So it should be called before constructing the optimizer if the module will live on IPU while being optimized. .. note:: This method modifies the module in-place. Arguments: device (int, optional): if specified, all parameters will be copied to that device Returns: Module: self",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:ipu arg:self arg:device arguments arg arg Return return:yes Call arguments arg Call"
  },
  {
    "library": "cherrypy",
    "name": "_make_absolute",
    "source_code": "@staticmethod\ndef _make_absolute(filename):\n    return filename if os.path.isabs(filename) else os.path.normpath(os.path.join(_module__file__base, filename))",
    "docstring": "Ensure filename is absolute to avoid effect of os.chdir.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\plugins.py",
    "ast_data": "FunctionDef name:_make_absolute arg:filename arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_build_tree",
    "source_code": "def _build_tree(self) -> bytes:\n    raise AbstractMethodError(self)",
    "docstring": "Build tree from data. This method initializes the root and builds attributes and elements with optional namespaces.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\xml.py",
    "ast_data": "FunctionDef name:_build_tree arg:self arguments arg Raise Call"
  },
  {
    "library": "django",
    "name": "OverlapsBelowLookup",
    "source_code": "@BaseSpatialField.register_lookup\nclass OverlapsBelowLookup(GISLookup):\n    lookup_name = 'overlaps_below'",
    "docstring": "The 'overlaps_below' operator returns true if A's bounding box overlaps or is below B's bounding box.",
    "type": "class",
    "file_path": "django\\django\\contrib\\gis\\db\\models\\lookups.py",
    "ast_data": "ClassDef name:OverlapsBelowLookup Assign"
  },
  {
    "library": "tensorflow",
    "name": "TPUSyncOnReadVariable",
    "source_code": "class TPUSyncOnReadVariable(TPUVariableMixin, values.SyncOnReadVariable):\n\n    def assign_sub(self, *args, **kwargs):\n        if tpu_util.enclosing_tpu_context() is None:\n            return values.SyncOnReadVariable.assign_sub(self, *args, **kwargs)\n        else:\n            return tpu_util.make_raw_assign_fn(gen_resource_variable_ops.assign_sub_variable_op)(self, *args, **kwargs)\n\n    def assign_add(self, *args, **kwargs):\n        if tpu_util.enclosing_tpu_context() is None:\n            return values.SyncOnReadVariable.assign_add(self, *args, **kwargs)\n        else:\n            return tpu_util.make_raw_assign_fn(gen_resource_variable_ops.assign_add_variable_op)(self, *args, **kwargs)\n\n    def assign(self, *args, **kwargs):\n        if tpu_util.enclosing_tpu_context() is None:\n            return values.SyncOnReadVariable.assign(self, *args, **kwargs)\n        else:\n            return tpu_util.make_raw_assign_fn(gen_resource_variable_ops.assign_variable_op)(self, *args, **kwargs)",
    "docstring": "Holds a map from replica to variables whose values are reduced on save.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_values.py",
    "ast_data": "ClassDef name:TPUSyncOnReadVariable FunctionDef name:assign_sub arg:self arguments arg arg arg If Compare Call Return return:yes Call Return return:yes Call Call FunctionDef name:assign_add arg:self arguments arg arg arg If Compare Call Return return:yes Call Return return:yes Call Call FunctionDef name:assign arg:self arguments arg arg arg If Compare Call Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_py_equal",
    "source_code": "def _py_equal(a, b):\n    return a == b",
    "docstring": "Overload of \"equal\" that falls back to Python's default implementation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\logical.py",
    "ast_data": "FunctionDef name:_py_equal arg:a arg:b arguments arg arg Return return:yes Compare"
  },
  {
    "library": "kornia",
    "name": "apply_non_transform_mask",
    "source_code": "def apply_non_transform_mask(self, input: Tensor, params: Dict[str, Tensor], flags: Dict[str, Any], transform: Optional[Tensor]=None) -> Tensor:\n    return input",
    "docstring": "Process masks corresponding to the inputs that are no transformation applied.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\_2d\\geometric\\base.py",
    "ast_data": "FunctionDef name:apply_non_transform_mask arg:self arg:input arg:params arg:flags arg:transform arguments arg arg arg arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "CUDASanitizer",
    "source_code": "class CUDASanitizer:\n\n    def __init__(self) -> None:\n        self.dispatch = CUDASanitizerDispatchMode()\n        self.enabled = False\n\n    def enable(self):\n        self.dispatch.__enter__()\n        self.enabled = True\n\n    def disable(self):\n        self.dispatch.__exit__(None, None, None)\n        self.enabled = False\n\n    def __del__(self):\n        if sys is not None and (not sys.is_finalizing()) and self.enabled:\n            self.disable()",
    "docstring": "Manages the lifetime of a CUDASanitizer dispatch mode object. The CUDASanitizer class wraps the entering/exiting functions of the dispatch mode context manager in the enable function/destructor, respectively. This is to explicitly set the lifetime of the dispatch mode object to that of the application. This approach was deemed more elegant than using the atexit module.",
    "type": "class",
    "file_path": "pytorch\\torch\\cuda\\_sanitizer.py",
    "ast_data": "ClassDef name:CUDASanitizer FunctionDef name:__init__ arg:self arguments arg Assign Call Assign FunctionDef name:enable arg:self arguments arg Call Assign FunctionDef name:disable arg:self arguments arg Call Assign FunctionDef name:__del__ arg:self arguments arg If BoolOp Compare Call Call"
  },
  {
    "library": "scipy",
    "name": "_moment_raw",
    "source_code": "def _moment_raw(self, order=1, *, method=None):\n    methods = self._moment_methods if method is None else {method}\n    return self._moment_raw_dispatch(order, methods=methods, **self._parameters)",
    "docstring": "Raw distribution moment about the origin.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distribution_infrastructure.py",
    "ast_data": "FunctionDef name:_moment_raw arg:self arg:order arguments arg arg arg Assign Compare Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_omp_path_residues",
    "source_code": "def _omp_path_residues(X_train, y_train, X_test, y_test, copy=True, fit_intercept=True, max_iter=100):\n    if copy:\n        X_train = X_train.copy()\n        y_train = y_train.copy()\n        X_test = X_test.copy()\n        y_test = y_test.copy()\n    if fit_intercept:\n        X_mean = X_train.mean(axis=0)\n        X_train -= X_mean\n        X_test -= X_mean\n        y_mean = y_train.mean(axis=0)\n        y_train = as_float_array(y_train, copy=False)\n        y_train -= y_mean\n        y_test = as_float_array(y_test, copy=False)\n        y_test -= y_mean\n    coefs = orthogonal_mp(X_train, y_train, n_nonzero_coefs=max_iter, tol=None, precompute=False, copy_X=False, return_path=True)\n    if coefs.ndim == 1:\n        coefs = coefs[:, np.newaxis]\n    return np.dot(coefs.T, X_test.T) - y_test",
    "docstring": "Compute the residues on left-out data for a full LARS path. Parameters ---------- X_train : ndarray of shape (n_samples, n_features) The data to fit the LARS on. y_train : ndarray of shape (n_samples) The target variable to fit LARS on. X_test : ndarray of shape (n_samples, n_features) The data to compute the residues on. y_test : ndarray of shape (n_samples) The target variable to compute the residues on. copy : bool, default=True Whether X_train, X_test, y_train and y_test should be copied. If False, they may be overwritten. fit_intercept : bool, default=True Whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (i.e. data is expected to be centered). max_iter : int, default=100 Maximum numbers of iterations to perform, therefore maximum features to include. 100 by default. Returns ------- residues : ndarray of shape (n_samples, max_features) Residues of the prediction on the test data.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_omp.py",
    "ast_data": "FunctionDef name:_omp_path_residues arg:X_train arg:y_train arg:X_test arg:y_test arg:copy arg:fit_intercept arg:max_iter arguments arg arg arg arg arg arg arg If Assign Call Assign Call Assign Call Assign Call If Assign Call Assign Call Assign Call Assign Call Assign Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "parse_node",
    "source_code": "def parse_node(self, response: Response, selector: Selector) -> Any:\n    if hasattr(self, 'parse_item'):\n        return self.parse_item(response, selector)\n    raise NotImplementedError",
    "docstring": "This method must be overridden with your custom spider functionality",
    "type": "method",
    "file_path": "scrapy\\scrapy\\spiders\\feed.py",
    "ast_data": "FunctionDef name:parse_node arg:self arg:response arg:selector arguments arg arg arg If Call Return return:yes Call Raise"
  },
  {
    "library": "scipy",
    "name": "integrate",
    "source_code": "def integrate(self, ranges, extrapolate=None):\n    ndim = len(self.x)\n    if extrapolate is None:\n        extrapolate = self.extrapolate\n    else:\n        extrapolate = bool(extrapolate)\n    if not hasattr(ranges, '__len__') or len(ranges) != ndim:\n        raise ValueError('Range not a sequence of correct length')\n    self._ensure_c_contiguous()\n    c = self.c\n    for n, (a, b) in enumerate(ranges):\n        swap = list(range(c.ndim))\n        swap.insert(1, swap[ndim - n])\n        del swap[ndim - n + 1]\n        c = c.transpose(swap)\n        p = PPoly.construct_fast(c, self.x[n], extrapolate=extrapolate)\n        out = p.integrate(a, b, extrapolate=extrapolate)\n        c = out.reshape(c.shape[2:])\n    return c",
    "docstring": "Compute a definite integral over a piecewise polynomial. Parameters ---------- ranges : ndim-tuple of 2-tuples float Sequence of lower and upper bounds for each dimension, `` extrapolate : bool, optional Whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. Returns ------- ig : array_like Definite integral of the piecewise polynomial over [a[0], b[0]] x ... x [a[ndim-1], b[ndim-1]]",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_interpolate.py",
    "ast_data": "FunctionDef name:integrate arg:self arg:ranges arg:extrapolate arguments arg arg arg Assign Call If Compare Assign Assign Call If BoolOp Call Compare Call Raise Call Call Assign For Call Assign Call Call Call Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "block_size",
    "source_code": "@property\n@abc.abstractmethod\ndef block_size(self) -> int:\n    pass",
    "docstring": "The size of a block as an integer in bits (e.g. 64, 128).",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\_cipheralgorithm.py",
    "ast_data": "FunctionDef name:block_size arg:self arguments arg"
  },
  {
    "library": "numpy",
    "name": "f2cexpr",
    "source_code": "def f2cexpr(expr):\n    expr = re.sub('\\\\blen\\\\b', 'f2py_slen', expr)\n    return expr",
    "docstring": "Rewrite Fortran expression as f2py supported C expression. Due to the lack of a proper expression parser in f2py, this function uses a heuristic approach that assumes that Fortran arithmetic expressions are valid C arithmetic expressions when mapping Fortran function calls to the corresponding C function/CPP macros calls.",
    "type": "function",
    "file_path": "numpy\\numpy\\f2py\\capi_maps.py",
    "ast_data": "FunctionDef name:f2cexpr arg:expr arguments arg Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "narrow_tensor",
    "source_code": "def narrow_tensor(tensor: torch.Tensor, metadata: ShardMetadata) -> torch.Tensor:\n    return narrow_tensor_by_index(tensor, metadata.shard_offsets, metadata.shard_sizes)",
    "docstring": "Narrow the tensor according to the metadata",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\_utils.py",
    "ast_data": "FunctionDef name:narrow_tensor arg:tensor arg:metadata arguments arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "image_to_tensor",
    "source_code": "def image_to_tensor(image: Any, keepdim: bool=True) -> Tensor:\n    if len(image.shape) > 4 or len(image.shape) < 2:\n        raise ValueError('Input size must be a two, three or four dimensional array')\n    input_shape = image.shape\n    tensor: Tensor = torch.from_numpy(image)\n    if len(input_shape) == 2:\n        tensor = tensor.unsqueeze(0)\n    elif len(input_shape) == 3:\n        tensor = tensor.permute(2, 0, 1)\n    elif len(input_shape) == 4:\n        tensor = tensor.permute(0, 3, 1, 2)\n        keepdim = True\n    else:\n        raise ValueError(f'Cannot process image with shape {input_shape}')\n    return tensor.unsqueeze(0) if not keepdim else tensor",
    "docstring": "Convert a numpy image to a PyTorch 4d tensor image. Args: image: image of the form :math:, :math: or :math:. keepdim: If `(B, H, W, C)(B, C, H, W)(C, H, W)` otherwise. Example: >>> img = np.ones((3, 3)) >>> image_to_tensor(img).shape torch.Size([1, 3, 3]) >>> img = np.ones((4, 4, 1)) >>> image_to_tensor(img).shape torch.Size([1, 4, 4]) >>> img = np.ones((4, 4, 3)) >>> image_to_tensor(img, keepdim=False).shape torch.Size([1, 3, 4, 4])",
    "type": "function",
    "file_path": "kornia\\kornia\\utils\\image.py",
    "ast_data": "FunctionDef name:image_to_tensor arg:image arg:keepdim arguments arg arg If BoolOp Compare Call Compare Call Raise Call Assign Call If Compare Call Assign Call If Compare Call Assign Call If Compare Call Assign Call Assign Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "constraint",
    "source_code": "@property\ndef constraint(self):\n    raise NotImplementedError",
    "docstring": "Returns the constraint function associated with this variable. Returns: The constraint function that was passed to the variable constructor. Can be if no constraint was passed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:constraint arg:self arguments arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "metrics",
    "source_code": "@property\ndef metrics(self):\n    if not self._built:\n        return []\n    return self._metrics_in_order",
    "docstring": "All metrics in this container.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\compile_utils.py",
    "ast_data": "FunctionDef name:metrics arg:self arguments arg If Return return:no Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_session_handle",
    "source_code": "@tf_export(v1=['get_session_handle'])\ndef get_session_handle(data, name=None):\n    if not isinstance(data, tensor_lib.Tensor):\n        raise TypeError('`data` must be of type Tensor.')\n    with ops.colocate_with(data):\n        return gen_data_flow_ops.get_session_handle(data, name=name)",
    "docstring": "Return the handle of . This is EXPERIMENTAL and subject to change. Keep \"in-place\" in the runtime and create a handle that can be used to retrieve in a subsequent run(). Combined with , we can keep a tensor produced in one run call in place, and use it as the input in a future run call. Args: data: A tensor to be stored in the session. name: Optional name prefix for the return tensor. Returns: A scalar string tensor representing a unique handle for . Raises: TypeError: if is not a Tensor. Example:",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\session_ops.py",
    "ast_data": "FunctionDef name:get_session_handle arg:data arg:name arguments arg arg If Call Raise Call With Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_lines",
    "source_code": "def get_lines(self):\n    return [h for h in self.legend_handles if isinstance(h, Line2D)]",
    "docstring": "Return the list of \\s in the legend.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\legend.py",
    "ast_data": "FunctionDef name:get_lines arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "all_locale_paths",
    "source_code": "def all_locale_paths():\n    globalpath = os.path.join(os.path.dirname(sys.modules[settings.__module__].__file__), 'locale')\n    app_paths = []\n    for app_config in apps.get_app_configs():\n        locale_path = os.path.join(app_config.path, 'locale')\n        if os.path.exists(locale_path):\n            app_paths.append(locale_path)\n    return [globalpath, *settings.LOCALE_PATHS, *app_paths]",
    "docstring": "Return a list of paths to user-provides languages files.",
    "type": "function",
    "file_path": "django\\django\\utils\\translation\\trans_real.py",
    "ast_data": "FunctionDef name:all_locale_paths arguments Assign Call Call Assign For Call Assign Call If Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "parallel_info",
    "source_code": "def parallel_info() -> str:\n    return torch._C._parallel_info()",
    "docstring": "Returns detailed string with parallelization settings",
    "type": "function",
    "file_path": "pytorch\\torch\\__config__.py",
    "ast_data": "FunctionDef name:parallel_info arguments Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_get_aten_name",
    "source_code": "def _get_aten_name(self, node: torch.fx.Node) -> registration.OpName:\n    if node.target == operator.getitem:\n        return registration.OpName.from_name_parts(namespace='aten', op_name='getitem')\n    if isinstance(node.target, torch._ops.OpOverloadPacket):\n        if node.target != torch.ops.aten.sym_size:\n            raise RuntimeError(f'Unsupported OverloadPacket: {node.target}, aten.sym_size is the only allowed OverloadPacket!')\n        aten_op_default = node.target.default\n        return registration.OpName.from_op_overload(op_overload=aten_op_default)\n    if isinstance(node.target, types.BuiltinFunctionType):\n        for node_arg in node.args:\n            if not isinstance(node_arg, (torch.fx.Node, int, float)) or (isinstance(node_arg, torch.fx.Node) and (not fx_type_utils.is_torch_symbolic_type(node_arg.meta['val']))):\n                raise RuntimeError(f'Unsupported node arg: {node_arg} (type {type(node_arg)}) with builtin function: {node.target}, only int/float/SymInt/SymFloat is supported with built-in ops!')\n        return registration.OpName.from_builtin_function(node.target)\n    if isinstance(node.target, torch._ops.OpOverload):\n        return registration.OpName.from_op_overload(op_overload=node.target)\n    raise RuntimeError(f'Unknown call_function target: {node.target}')",
    "docstring": "Get the OpName from the target. Args: node: The TorchFX node to get the aten name for. Returns: The internal op name within dataclass: registration.OpName.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\onnxfunction_dispatcher.py",
    "ast_data": "FunctionDef name:_get_aten_name arg:self arg:node arguments arg arg If Compare Return return:yes Call If Call If Compare Raise Call Assign Return return:yes Call If Call For If BoolOp Call BoolOp Call Call Raise Call Call Return return:yes Call If Call Return return:yes Call Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "_get_elem_at_rank",
    "source_code": "def _get_elem_at_rank(rank, data, n_negative, n_zeros):\n    if rank < n_negative:\n        return data[rank]\n    if rank - n_negative < n_zeros:\n        return 0\n    return data[rank - n_zeros]",
    "docstring": "Find the value in data augmented with n_zeros for the given rank",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\sparsefuncs.py",
    "ast_data": "FunctionDef name:_get_elem_at_rank arg:rank arg:data arg:n_negative arg:n_zeros arguments arg arg arg arg If Compare Return return:yes If Compare Return return:yes Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_unbox_scalar",
    "source_code": "def _unbox_scalar(self, value: DTScalarOrNaT) -> np.int64 | np.datetime64 | np.timedelta64:\n    raise AbstractMethodError(self)",
    "docstring": "Unbox the integer value of a scalar . Parameters ---------- value : Period, Timestamp, Timedelta, or NaT Depending on subclass. Returns ------- int Examples -------- >>> arr = pd.array(np.array([\"1970-01-01\"], \"datetime64[ns]\")) >>> arr._unbox_scalar(arr[0]) np.datetime64('1970-01-01T00:00:00.000000000')",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py",
    "ast_data": "FunctionDef name:_unbox_scalar arg:self arg:value arguments arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "scatter_kwargs",
    "source_code": "def scatter_kwargs(inputs: tuple[Any, ...], kwargs: Optional[dict[str, Any]], target_gpus: Sequence[Union[int, torch.device]], dim: int=0) -> tuple[tuple[Any, ...], tuple[dict[str, Any], ...]]:\n    scattered_inputs = scatter(inputs, target_gpus, dim) if inputs else []\n    scattered_kwargs = scatter(kwargs, target_gpus, dim) if kwargs else []\n    if len(scattered_inputs) < len(scattered_kwargs):\n        scattered_inputs.extend((() for _ in range(len(scattered_kwargs) - len(scattered_inputs))))\n    elif len(scattered_kwargs) < len(inputs):\n        scattered_kwargs.extend(({} for _ in range(len(scattered_inputs) - len(scattered_kwargs))))\n    return (tuple(scattered_inputs), tuple(scattered_kwargs))",
    "docstring": "Scatter with support for kwargs dictionary.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\parallel\\scatter_gather.py",
    "ast_data": "FunctionDef name:scatter_kwargs arg:inputs arg:kwargs arg:target_gpus arg:dim arguments arg arg arg arg Assign Call Assign Call If Compare Call Call Call Call Call Call If Compare Call Call Call Call Call Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_feature_names_out",
    "source_code": "def get_feature_names_out(self, input_features=None):\n    check_is_fitted(self)\n    input_features = _check_feature_names_in(self, input_features)\n    transformer_with_feature_names_out = []\n    for name, trans, *_ in self._iter(fitted=True, column_as_labels=False, skip_empty_columns=True, skip_drop=True):\n        feature_names_out = self._get_feature_name_out_for_transformer(name, trans, input_features)\n        if feature_names_out is None:\n            continue\n        transformer_with_feature_names_out.append((name, feature_names_out))\n    if not transformer_with_feature_names_out:\n        return np.array([], dtype=object)\n    return self._add_prefix_for_feature_names_out(transformer_with_feature_names_out)",
    "docstring": "Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Input features. - If is , then is used as feature names in. If is not defined, then the following input feature names are generated: . - If is an array-like, then must match if is defined. Returns ------- feature_names_out : ndarray of str objects Transformed feature names.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\compose\\_column_transformer.py",
    "ast_data": "FunctionDef name:get_feature_names_out arg:self arg:input_features arguments arg arg Call Assign Call Assign For Call Assign Call If Compare Call If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_ezclump",
    "source_code": "def _ezclump(mask):\n    if mask.ndim > 1:\n        mask = mask.ravel()\n    idx = (mask[1:] ^ mask[:-1]).nonzero()\n    idx = idx[0] + 1\n    if mask[0]:\n        if len(idx) == 0:\n            return [slice(0, mask.size)]\n        r = [slice(0, idx[0])]\n        r.extend((slice(left, right) for left, right in zip(idx[1:-1:2], idx[2::2])))\n    else:\n        if len(idx) == 0:\n            return []\n        r = [slice(left, right) for left, right in zip(idx[:-1:2], idx[1::2])]\n    if mask[-1]:\n        r.append(slice(idx[-1], mask.size))\n    return r",
    "docstring": "Finds the clumps (groups of data with the same values) for a 1D bool array. Returns a series of slices.",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\extras.py",
    "ast_data": "FunctionDef name:_ezclump arg:mask arguments arg If Compare Assign Call Assign Call Assign If If Compare Call Return return:yes Call Assign Call Call Call Call If Compare Call Return return:no Assign Call Call If Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "write_image",
    "source_code": "def write_image(path_file: str | Path, image: Tensor, quality: int=80) -> None:\n    if not isinstance(path_file, Path):\n        path_file = Path(path_file)\n    KORNIA_CHECK(path_file.suffix in ['.jpg', '.jpeg', '.png', '.tiff'], f'Invalid file extension: {path_file}, only .jpg, .jpeg, .png and .tiff are supported.')\n    KORNIA_CHECK(image.dim() >= 2, f'Invalid image shape: {image.shape}. Must be at least 2D.')\n    img_np = tensor_to_image(image, keepdim=True, force_contiguous=True)\n    if image.dtype == torch.uint8:\n        _write_uint8_image(path_file, img_np, quality)\n    elif image.dtype == torch.uint16:\n        _write_uint16_image(path_file, img_np)\n    elif image.dtype == torch.float32:\n        _write_float32_image(path_file, img_np)\n    else:\n        raise NotImplementedError(f'Unsupported image dtype: {image.dtype}')",
    "docstring": "Save an image file using the Kornia Rust backend. Args: path_file: Path to a valid image file. image: Image tensor with shape :math:, and . quality: The quality of the JPEG encoding. If the file extension is .png or .tiff, the quality is ignored. Return: None.",
    "type": "function",
    "file_path": "kornia\\kornia\\io\\io.py",
    "ast_data": "FunctionDef name:write_image arg:path_file arg:image arg:quality arguments arg arg arg If Call Assign Call Call Compare Call Compare Call Assign Call If Compare Call If Compare Call If Compare Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_array_internal",
    "source_code": "def _array_internal(val, dtype=None, copy=True, ndmin=0):\n    result_t = val\n    if not isinstance(result_t, tensor_lib.Tensor):\n        dtype = np_utils.result_type_unary(result_t, dtype)\n        result_t = np_arrays.convert_to_tensor(result_t, dtype_hint=dtype)\n        result_t = math_ops.cast(result_t, dtype=dtype)\n    elif dtype:\n        result_t = math_ops.cast(result_t, dtype)\n    if copy:\n        result_t = array_ops.identity(result_t)\n    max_ndmin = 32\n    if ndmin > max_ndmin:\n        raise ValueError(f'ndmin bigger than allowable number of dimensions: {max_ndmin}.')\n    if ndmin == 0:\n        return result_t\n    ndims = array_ops.rank(result_t)\n\n    def true_fn():\n        old_shape = array_ops.shape(result_t)\n        new_shape = array_ops.concat([array_ops.ones(ndmin - ndims, dtypes.int32), old_shape], axis=0)\n        return array_ops.reshape(result_t, new_shape)\n    result_t = np_utils.cond(np_utils.greater(ndmin, ndims), true_fn, lambda: result_t)\n    return result_t",
    "docstring": "Main implementation of np.array().",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_array_ops.py",
    "ast_data": "FunctionDef name:_array_internal arg:val arg:dtype arg:copy arg:ndmin arguments arg arg arg arg Assign If Call Assign Call Assign Call Assign Call If Assign Call If Assign Call Assign If Compare Raise Call If Compare Return return:yes Assign Call FunctionDef name:true_fn arguments Assign Call Assign Call Call Return return:yes Call Assign Call Call arguments Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "sink_waits",
    "source_code": "def sink_waits(snodes: list[BaseSchedulerNode]) -> list[BaseSchedulerNode]:\n    return _schedule_for_comm(snodes, raise_comms=False, sink_waits=True, reorder_for_overlap=False)",
    "docstring": "Greedily schedules waits as late as possible.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\comms.py",
    "ast_data": "FunctionDef name:sink_waits arg:snodes arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_MinMax",
    "source_code": "@_implements(_CalibrationMethod.CALIBRATION_METHOD_MIN_MAX)\nclass _MinMax(_CalibrationAlgorithmBase):\n\n    def get_min_max_value(self) -> tuple[float, float]:\n        return (self._statistics.min_max_statistics.global_min, self._statistics.min_max_statistics.global_max)",
    "docstring": "MinMaxCalibrationAlgorithm for calculating min and max values of calibration result. MinMax calibration calculates the global min and global max values. global min = min of given sample inputs global max = max of given sample inputs",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\calibrator\\calibration_algorithm.py",
    "ast_data": "ClassDef name:_MinMax FunctionDef name:get_min_max_value arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "delete",
    "source_code": "def delete(self, loc) -> MultiIndex:\n    new_codes = [np.delete(level_codes, loc) for level_codes in self.codes]\n    return MultiIndex(levels=self.levels, codes=new_codes, names=self.names, verify_integrity=False)",
    "docstring": "Make new index with passed location deleted Returns ------- new_index : MultiIndex",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "FunctionDef name:delete arg:self arg:loc arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "acount",
    "source_code": "async def acount(self):\n    if self._cache_acount is not None:\n        return self._cache_acount\n    c = getattr(self.object_list, 'acount', None)\n    if iscoroutinefunction(c) and (not inspect.isbuiltin(c)) and method_has_no_args(c):\n        count = await c()\n    else:\n        count = len(self.object_list)\n    self._cache_acount = count\n    return count",
    "docstring": "See Paginator.count().",
    "type": "method",
    "file_path": "django\\django\\core\\paginator.py",
    "ast_data": "AsyncFunctionDef name:acount arg:self arguments arg If Compare Return return:yes Assign Call If BoolOp Call Call Call Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "post_process_images",
    "source_code": "def post_process_images(self, doctree: Node) -> None:\n    super().post_process_images(doctree)\n    if self.config.html_scaled_image_link and self.html_scaled_image_link:\n        for node in doctree.findall(nodes.image):\n            if not any((key in node for key in ('scale', 'width', 'height'))):\n                continue\n            if isinstance(node.parent, nodes.reference):\n                continue\n            if 'no-scaled-link' in node['classes']:\n                continue\n            uri = node['uri']\n            reference = nodes.reference('', '', internal=True)\n            if uri in self.images:\n                reference['refuri'] = posixpath.join(self.imgpath, self.images[uri])\n            else:\n                reference['refuri'] = uri\n            node.replace_self(reference)\n            reference.append(node)",
    "docstring": "Pick the best candidate for an image and link down-scaled images to their high resolution version.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\html\\__init__.py",
    "ast_data": "FunctionDef name:post_process_images arg:self arg:doctree arguments arg arg Call Call If BoolOp For Call If Call Compare If Call If Compare Assign Assign Call If Compare Assign Call Assign Call Call"
  },
  {
    "library": "kornia",
    "name": "_initialise_cluster_centers",
    "source_code": "def _initialise_cluster_centers(self, X: Tensor, num_clusters: int) -> Tensor:\n    num_samples: int = len(X)\n    perm = torch.randperm(num_samples, device=X.device)\n    idx = perm[:num_clusters]\n    initial_state = X[idx]\n    return initial_state",
    "docstring": "Chooses num_cluster points from X as the initial cluster centers. Args: X: 2D input tensor to be clustered num_clusters: number of desired cluster centers Returns: 2D Tensor with num_cluster rows",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\kmeans.py",
    "ast_data": "FunctionDef name:_initialise_cluster_centers arg:self arg:X arg:num_clusters arguments arg arg arg Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "loglog",
    "source_code": "@_docstring.interpd\ndef loglog(self, *args, **kwargs):\n    dx = {k: v for k, v in kwargs.items() if k in ['base', 'subs', 'nonpositive', 'basex', 'subsx', 'nonposx']}\n    self.set_xscale('log', **dx)\n    dy = {k: v for k, v in kwargs.items() if k in ['base', 'subs', 'nonpositive', 'basey', 'subsy', 'nonposy']}\n    self.set_yscale('log', **dy)\n    return self.plot(*args, **{k: v for k, v in kwargs.items() if k not in {*dx, *dy}})",
    "docstring": "Make a plot with log scaling on both the x- and y-axis. Call signatures:: loglog([x], y, [fmt], data=None, **kwargs) loglog([x], y, [fmt], [x2], y2, [fmt2], ..., **kwargs) This is just a thin wrapper around which additionally changes both the x-axis and the y-axis to log scaling. All the concepts and parameters of plot can be used here as well. The additional parameters *base*, *subs* and *nonpositive* control the x/y-axis properties. They are just forwarded to and . To use different properties on the x-axis and the y-axis, use e.g. `.Axes.set_xscale.Axes.set_yscale.plot.Line2D` Objects representing the plotted data.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_axes.py",
    "ast_data": "FunctionDef name:loglog arg:self arguments arg arg arg Assign Call Compare Call Assign Call Compare Call Return return:yes Call Call Compare"
  },
  {
    "library": "scikit-learn",
    "name": "_calc_impute",
    "source_code": "def _calc_impute(self, dist_pot_donors, n_neighbors, fit_X_col, mask_fit_X_col):\n    donors_idx = np.argpartition(dist_pot_donors, n_neighbors - 1, axis=1)[:, :n_neighbors]\n    donors_dist = dist_pot_donors[np.arange(donors_idx.shape[0])[:, None], donors_idx]\n    weight_matrix = _get_weights(donors_dist, self.weights)\n    if weight_matrix is not None:\n        weight_matrix[np.isnan(weight_matrix)] = 0.0\n    else:\n        weight_matrix = np.ones_like(donors_dist)\n        weight_matrix[np.isnan(donors_dist)] = 0.0\n    donors = fit_X_col.take(donors_idx)\n    donors_mask = mask_fit_X_col.take(donors_idx)\n    donors = np.ma.array(donors, mask=donors_mask)\n    return np.ma.average(donors, axis=1, weights=weight_matrix).data",
    "docstring": "Helper function to impute a single column. Parameters ---------- dist_pot_donors : ndarray of shape (n_receivers, n_potential_donors) Distance matrix between the receivers and potential donors from training set. There must be at least one non-nan distance between a receiver and a potential donor. n_neighbors : int Number of neighbors to consider. fit_X_col : ndarray of shape (n_potential_donors,) Column of potential donors from training set. mask_fit_X_col : ndarray of shape (n_potential_donors,) Missing mask for fit_X_col. Returns ------- imputed_values: ndarray of shape (n_receivers,) Imputed values for receiver.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\impute\\_knn.py",
    "ast_data": "FunctionDef name:_calc_impute arg:self arg:dist_pot_donors arg:n_neighbors arg:fit_X_col arg:mask_fit_X_col arguments arg arg arg arg arg Assign Call Assign Call Assign Call If Compare Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "spectrogram",
    "source_code": "def spectrogram(self, x: np.ndarray, y: np.ndarray | None=None, detr: Callable[[np.ndarray], np.ndarray] | Literal['linear', 'constant'] | None=None, *, p0: int | None=None, p1: int | None=None, k_offset: int=0, padding: PAD_TYPE='zeros', axis: int=-1) -> np.ndarray:\n    Sx = self.stft_detrend(x, detr, p0, p1, k_offset=k_offset, padding=padding, axis=axis)\n    if y is None or y is x:\n        return Sx.real ** 2 + Sx.imag ** 2\n    Sy = self.stft_detrend(y, detr, p0, p1, k_offset=k_offset, padding=padding, axis=axis)\n    return Sx * Sy.conj()",
    "docstring": "Calculate spectrogram or cross-spectrogram. The spectrogram is the absolute square of the STFT, i.e., it is `~ShortTimeFFT.stftstft_detrendfft_modexxfft_mode~scipy.signal.detrenddetrdetrp_minp_max(n)xxxxyxaxisxcsd|Sxy|²SxySxxSyy~coherence~from_win_equals_dualf_i(t)ShortTimeFFThopx~ShortTimeFFT.stftscipy.signal.ShortTimeFFT`: Class this method belongs to.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_short_time_fft.py",
    "ast_data": "FunctionDef name:spectrogram arg:self arg:x arg:y arg:detr arguments arg arg arg arg arg arg arg arg arg Assign Call If BoolOp Compare Compare Return return:yes Assign Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "deepcopy_dict",
    "source_code": "def deepcopy_dict(params: Dict[str, Any]) -> Dict[str, Any]:\n    out = {}\n    for k, v in params.items():\n        if isinstance(v, Tensor):\n            out.update({k: v.clone()})\n        else:\n            out.update({k: v})\n    return out",
    "docstring": "Perform deep copy on any dict. Support tensor copying here.",
    "type": "function",
    "file_path": "kornia\\kornia\\augmentation\\utils\\helpers.py",
    "ast_data": "FunctionDef name:deepcopy_dict arg:params arguments arg Assign For Call If Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "visit",
    "source_code": "def visit(source: Union[ModuleType, type], dest: Union[ModuleType, SubConfigProxy], prefix: str) -> None:\n    if sys.version_info[:2] < (3, 10):\n        type_hints = getattr(source, '__annotations__', {})\n    else:\n        type_hints = inspect.get_annotations(source)\n    for key, value in list(source.__dict__.items()):\n        if key.startswith('__') or isinstance(value, (ModuleType, FunctionType)) or (hasattr(value, '__module__') and value.__module__ == 'typing') or (isinstance(value, type) and issubclass(value, _Config)):\n            continue\n        name = f'{prefix}{key}'\n        annotated_type = type_hints.get(key, None)\n        if isinstance(value, CONFIG_TYPES):\n            config[name] = _ConfigEntry(_Config(default=value, value_type=annotated_type))\n            if dest is module:\n                delattr(module, key)\n        elif isinstance(value, _Config):\n            if annotated_type is not None and value.value_type is None:\n                value.value_type = annotated_type\n            config[name] = _ConfigEntry(value)\n            if dest is module:\n                delattr(module, key)\n        elif isinstance(value, type):\n            assert value.__module__ == module.__name__\n            proxy = SubConfigProxy(module, f'{name}.')\n            visit(value, proxy, f'{name}.')\n            if dest is module:\n                setattr(dest, key, proxy)\n            else:\n                dest.__dict__[key] = proxy\n        else:\n            raise AssertionError(f'Unhandled config {key}={value} ({type(value)})')",
    "docstring": "Walk the module structure and move everything to module._config",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_config_module.py",
    "ast_data": "FunctionDef name:visit arg:source arg:dest arg:prefix arguments arg arg arg If Compare Assign Call Assign Call For Call Call If BoolOp Call Call BoolOp Call Compare BoolOp Call Call Assign Assign Call If Call Assign Call Call If Compare Call If Call If BoolOp Compare Compare Assign Assign Call If Compare Call If Call Compare Assign Call Call If Compare Call Assign Raise Call Call"
  },
  {
    "library": "django",
    "name": "to_python",
    "source_code": "def to_python(self, value):\n    if value in self.empty_values:\n        return None\n    if isinstance(value, datetime.time):\n        return value\n    return super().to_python(value)",
    "docstring": "Validate that the input can be converted to a time. Return a Python datetime.time object.",
    "type": "method",
    "file_path": "django\\django\\forms\\fields.py",
    "ast_data": "FunctionDef name:to_python arg:self arg:value arguments arg arg If Compare Return return:no If Call Return return:yes Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, variable):\n    if not isinstance(variable, variables.Variable):\n        raise ValueError('variable must be of type tf.ResourceVariable, but got: %s' % variable)\n    if not variable.dtype.is_floating:\n        raise ValueError('variable must be a floating point variable but has type: %s' % variable.dtype.name)\n    self._variable = variable\n    self._op = 'delegate'",
    "docstring": "Creates an AutoCastVariable instance. Args: variable: A floating-point resource variable to wrap. Raises: ValueError: If is not a floating-point resource variable",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\autocast_variable.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:variable arguments arg arg If Call Raise Call If Raise Call Assign Assign"
  },
  {
    "library": "cryptography",
    "name": "exchange",
    "source_code": "@abc.abstractmethod\ndef exchange(self, peer_public_key: DHPublicKey) -> bytes:\n    pass",
    "docstring": "Given peer's DHPublicKey, carry out the key exchange and return shared key as bytes.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\dh.py",
    "ast_data": "FunctionDef name:exchange arg:self arg:peer_public_key arguments arg arg"
  },
  {
    "library": "kornia",
    "name": "_generate_color_map",
    "source_code": "def _generate_color_map(self, base_colormap: list[RGBColor], num_colors: int) -> Tensor:\n    tensor_colors = tensor(list(base_colormap), dtype=self._dtype, device=self._device).T\n    return interpolate(tensor_colors[None, ...], size=num_colors, mode='linear')[0, ...]",
    "docstring": "Generate a colormap tensor using interpolation. Args: base_colormap: A list of RGB colors defining the colormap. num_colors: Number of colors in the colormap. Returns: A tensor representing the colormap.",
    "type": "method",
    "file_path": "kornia\\kornia\\color\\colormap.py",
    "ast_data": "FunctionDef name:_generate_color_map arg:self arg:base_colormap arg:num_colors arguments arg arg arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "is_unique",
    "source_code": "@property\ndef is_unique(self) -> bool:\n    return True",
    "docstring": "return if the index has unique values",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\range.py",
    "ast_data": "FunctionDef name:is_unique arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "split",
    "source_code": "def split(self, d, split):\n    mid = np.copy(self.maxes)\n    mid[d] = split\n    less = Rectangle(self.mins, mid)\n    mid = np.copy(self.mins)\n    mid[d] = split\n    greater = Rectangle(mid, self.maxes)\n    return (less, greater)",
    "docstring": "Produce two hyperrectangles by splitting. In general, if you need to compute maximum and minimum distances to the children, it can be done more efficiently by updating the maximum and minimum distances to the parent. Parameters ---------- d : int Axis to split hyperrectangle along. split : float Position along axis to split at.",
    "type": "method",
    "file_path": "scipy\\scipy\\spatial\\_kdtree.py",
    "ast_data": "FunctionDef name:split arg:self arg:d arg:split arguments arg arg arg Assign Call Assign Assign Call Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "idxmax",
    "source_code": "def idxmax(self, axis: Axis=0, skipna: bool=True, numeric_only: bool=False) -> Series:\n    axis = self._get_axis_number(axis)\n    if self.empty and len(self.axes[axis]):\n        axis_dtype = self.axes[axis].dtype\n        return self._constructor_sliced(dtype=axis_dtype)\n    if numeric_only:\n        data = self._get_numeric_data()\n    else:\n        data = self\n    res = data._reduce(nanops.nanargmax, 'argmax', axis=axis, skipna=skipna, numeric_only=False)\n    indices = res._values\n    if (indices == -1).any():\n        warnings.warn(f'The behavior of {type(self).__name__}.idxmax with all-NA values, or any-NA and skipna=False, is deprecated. In a future version this will raise ValueError', FutureWarning, stacklevel=find_stack_level())\n    index = data._get_axis(axis)\n    result = algorithms.take(index._values, indices, allow_fill=True, fill_value=index._na_value)\n    final_result = data._constructor_sliced(result, index=data._get_agg_axis(axis))\n    return final_result.__finalize__(self, method='idxmax')",
    "docstring": "Return index of first occurrence of maximum over requested axis. NA/null values are excluded. Parameters ---------- axis : {{0 or 'index', 1 or 'columns'}}, default 0 The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise. skipna : bool, default True Exclude NA/null values. If the entire DataFrame is NA, or if `floatintboolean`. >>> df.idxmax(axis=\"columns\") Pork co2_emissions Wheat Products consumption Beef co2_emissions dtype: object",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:idxmax arg:self arg:axis arg:skipna arg:numeric_only arguments arg arg arg arg Assign Call If BoolOp Call Assign Return return:yes Call If Assign Call Assign Assign Call Assign If Call Compare Call Call Call Assign Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "common_dtype_categorical_compat",
    "source_code": "def common_dtype_categorical_compat(objs: Sequence[Index | ArrayLike], dtype: DtypeObj) -> DtypeObj:\n    if lib.is_np_dtype(dtype, 'iu'):\n        for obj in objs:\n            obj_dtype = getattr(obj, 'dtype', None)\n            if isinstance(obj_dtype, CategoricalDtype):\n                if isinstance(obj, ABCIndex):\n                    hasnas = obj.hasnans\n                else:\n                    hasnas = cast('Categorical', obj)._hasna\n                if hasnas:\n                    dtype = np.dtype(np.float64)\n                    break\n    return dtype",
    "docstring": "Update the result of find_common_type to account for NAs in a Categorical. Parameters ---------- objs : list[np.ndarray | ExtensionArray | Index] dtype : np.dtype or ExtensionDtype Returns ------- np.dtype or ExtensionDtype",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\cast.py",
    "ast_data": "FunctionDef name:common_dtype_categorical_compat arg:objs arg:dtype arguments arg arg If Call For Assign Call If Call If Call Assign Assign Call If Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_ops_from_nodedef",
    "source_code": "def get_ops_from_nodedef(node_def):\n    if not node_def.device:\n        node_def.device = '/cpu:0'\n    kernel_class = _pywrap_kernel_registry.TryFindKernelClass(node_def.SerializeToString())\n    op = str(node_def.op)\n    if kernel_class or op in OPS_WITHOUT_KERNEL_ALLOWLIST:\n        return (op, str(kernel_class.decode('utf-8')) if kernel_class else None)\n    else:\n        tf_logging.warning('Warning: no kernel found for op %s', op)\n        return None",
    "docstring": "Gets the op and kernel needed from the given NodeDef. Args: node_def: TF NodeDef to get op/kernel information. Returns: A tuple of (op_name, kernel_name). If the op is not in the allowlist of ops without kernel and there is no kernel found, then return None.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\selective_registration_header_lib.py",
    "ast_data": "FunctionDef name:get_ops_from_nodedef arg:node_def arguments arg If Assign Assign Call Call Assign Call If BoolOp Compare Return return:yes Call Call Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "_get_saver_def_or_none",
    "source_code": "def _get_saver_def_or_none(exported_model: exported_model_pb2.ExportedModel) -> Optional[saver_pb2.SaverDef]:\n    if exported_model.HasField('saver_def'):\n        return exported_model.saver_def\n    return None",
    "docstring": "Returns the SaverDef from ExportedModel, None otherwise. Args: exported_model: ExportedModel to take the SaverDef from. Returns: SaverDef instance if the field is set. None otherwise.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\py_function_lib.py",
    "ast_data": "FunctionDef name:_get_saver_def_or_none arg:exported_model arguments arg If Call Return return:yes Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, graph_def, input_tensors, output_tensors, input_arrays_with_shape=None, output_arrays=None, experimental_debug_info_func=None):\n    super(TFLiteFrozenGraphConverter, self).__init__(experimental_debug_info_func)\n    self._graph_def = graph_def\n    self._input_tensors = input_tensors\n    self._output_tensors = output_tensors\n    self._control_output_arrays = None\n    if not self._has_valid_tensors():\n        self._input_arrays_with_shape = input_arrays_with_shape\n        self._output_arrays = output_arrays\n    if input_tensors is not None and input_arrays_with_shape is not None:\n        logging.warning('input_arrays_with_shape will be ignored when both the given input_tensors and input_arrays_with_shape are not None.')\n    if output_tensors is not None and output_arrays is not None:\n        logging.warning('output_arrays will be ignored when both the given output_tensors and output_arrays are not None.')",
    "docstring": "Constructor for TFLiteConverter. Args: graph_def: Frozen TensorFlow GraphDef. input_tensors: List of input tensors. Type and shape are computed using and . output_tensors: List of output tensors (only .name is used from this). input_arrays_with_shape: Tuple of strings representing input tensor names and list of integers representing input shapes (e.g., [(\"foo\", [1, 16, 16, 3])]). Use only when graph cannot be loaded into TensorFlow and when and are None. (default None) output_arrays: List of output tensors to freeze graph with. Use only when graph cannot be loaded into TensorFlow and when and are None. (default None) experimental_debug_info_func: An experimental function to retrieve the graph debug info for a set of nodes from the . Raises: ValueError: Invalid arguments.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:graph_def arg:input_tensors arg:output_tensors arg:input_arrays_with_shape arg:output_arrays arg:experimental_debug_info_func arguments arg arg arg arg arg arg arg Call Call Assign Assign Assign Assign If Call Assign Assign If BoolOp Compare Compare Call If BoolOp Compare Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "_call",
    "source_code": "def _call(self, device, args):\n    with context.eager_mode():\n        ret = self._func(*args)\n        device_name = device\n        if device_name is None:\n            device_name = '/job:localhost/replica:0/task:0/device:CPU:0'\n        with ops.device(device):\n            if isinstance(ret, (tuple, list)):\n                outputs = [_maybe_copy_to_context_device(self._convert(x, dtype=dtype), device_name) for x, dtype in zip(ret, self._out_dtypes)]\n            elif ret is None:\n                outputs = None\n            else:\n                outputs = _maybe_copy_to_context_device(self._convert(ret, dtype=self._out_dtypes[0]), device_name)\n    return outputs",
    "docstring": "Passes to , which is executed eagerly.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\script_ops.py",
    "ast_data": "FunctionDef name:_call arg:self arg:device arg:args arguments arg arg arg With Call Assign Call Assign If Compare Assign With Call If Call Assign Call Call Call If Compare Assign Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_metrics_from_layers",
    "source_code": "def _get_metrics_from_layers(layers):\n    metrics = []\n    layers = layer_utils.filter_empty_layer_containers(layers)\n    for layer in layers:\n        if isinstance(layer, Model):\n            metrics.extend(layer._metrics)\n            metrics.extend(_get_metrics_from_layers(layer.layers))\n        else:\n            metrics.extend(layer.metrics)\n    return metrics",
    "docstring": "Returns list of metrics from the given layers. This will not include the metrics of a model layer. Args: layers: List of layers. Returns: List of metrics.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py",
    "ast_data": "FunctionDef name:_get_metrics_from_layers arg:layers arguments arg Assign Assign Call For If Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_directory",
    "source_code": "@tf_export(v1=['gfile.IsDirectory'])\ndef is_directory(dirname):\n    return is_directory_v2(dirname)",
    "docstring": "Returns whether the path is a directory or not. Args: dirname: string, path to a potential directory Returns: True, if the path is a directory; False otherwise",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py",
    "ast_data": "FunctionDef name:is_directory arg:dirname arguments arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=False)\ndef fit(self, X, y, sample_weight=None, **fit_params):\n    _raise_for_params(fit_params, self, 'fit')\n    X, y = validate_data(self, X, y, accept_sparse=['csr', 'csc'], dtype=None, ensure_all_finite=False, multi_output=True)\n    return self._fit(X, y, max_samples=self.max_samples, sample_weight=sample_weight, **fit_params)",
    "docstring": "Build a Bagging ensemble of estimators from the training set (X, y). Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The training input samples. Sparse matrices are accepted only if they are supported by the base estimator. y : array-like of shape (n_samples,) The target values (class labels in classification, real numbers in regression). sample_weight : array-like of shape (n_samples,), default=None Sample weights. If None, then samples are equally weighted. Note that this is supported only if the base estimator supports sample weighting. **fit_params : dict Parameters to pass to the underlying estimators. .. versionadded:: 1.5 Only available if , which can be set by using `Metadata Routing User Guide ` for more details. Returns ------- self : object Fitted estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_bagging.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg arg Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "TritonKernelArtifacts",
    "source_code": "@dataclasses.dataclass(frozen=True)\nclass TritonKernelArtifacts:\n    kernel_hash: str\n    device: int\n    artifacts: list[TritonKernelArtifact]",
    "docstring": "Collection of artifacts for a particular kernel.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\triton_bundler.py",
    "ast_data": "ClassDef name:TritonKernelArtifacts Call"
  },
  {
    "library": "matplotlib",
    "name": "add_artist",
    "source_code": "def add_artist(self, art):\n    if art.figure != self.canvas.figure:\n        raise RuntimeError\n    art.set_animated(True)\n    self._artists.append(art)",
    "docstring": "Add an artist to be managed. Parameters ---------- art : Artist The artist to be added. Will be set to 'animated' (just to be safe). *art* must be in the figure associated with the canvas this class is managing.",
    "type": "method",
    "file_path": "matplotlib\\galleries\\users_explain\\animations\\blitting.py",
    "ast_data": "FunctionDef name:add_artist arg:self arg:art arguments arg arg If Compare Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "counter_names",
    "source_code": "def counter_names():\n    return torch._C._lazy._counter_names()",
    "docstring": "Retrieves all the currently active counter names.",
    "type": "function",
    "file_path": "pytorch\\torch\\_lazy\\metrics.py",
    "ast_data": "FunctionDef name:counter_names arguments Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_arcball",
    "source_code": "def _arcball(self, x: float, y: float) -> np.ndarray:\n    s = mpl.rcParams['axes3d.trackballsize'] / 2\n    b = mpl.rcParams['axes3d.trackballborder'] / s\n    x /= s\n    y /= s\n    r2 = x * x + y * y\n    r = np.sqrt(r2)\n    ra = 1 + b\n    a = b * (1 + b / 2)\n    ri = 2 / (ra + 1 / ra)\n    if r < ri:\n        p = np.array([np.sqrt(1 - r2), x, y])\n    elif r < ra:\n        dr = ra - r\n        p = np.array([a - np.sqrt((a + dr) * (a - dr)), x, y])\n        p /= np.linalg.norm(p)\n    else:\n        p = np.array([0, x / r, y / r])\n    return p",
    "docstring": "Convert a point (x, y) to a point on a virtual trackball. This is Ken Shoemake's arcball (a sphere), modified to soften the abrupt edge (optionally). See: Ken Shoemake, \"ARCBALL: A user interface for specifying three-dimensional rotation using a mouse.\" in Proceedings of Graphics Interface '92, 1992, pp. 151-156, The smoothing of the edge is inspired by Gavin Bell's arcball (a sphere combined with a hyperbola), but here, the sphere is combined with a section of a cylinder, so it has finite support.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:_arcball arg:self arg:x arg:y arguments arg arg arg Assign Assign Assign Assign Call Assign Assign Assign If Compare Assign Call Call If Compare Assign Assign Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "torch_key_cache",
    "source_code": "def torch_key_cache(func: Callable[[], bytes]) -> Callable[[], bytes]:\n    _cache: list[bytes] = []\n\n    def wrapper() -> bytes:\n        if len(_cache) == 0:\n            _cache.append(func())\n        return _cache[0]\n\n    def set_val(val: bytes) -> None:\n        assert len(_cache) == 0\n        _cache.append(val)\n\n    def clear() -> None:\n        _cache.clear()\n    wrapper.set = set_val\n    wrapper.clear = clear\n    return wrapper",
    "docstring": "This function is a reimplementation of functools.lru_cache with a set function that allows prepopulating the cache.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\codecache.py",
    "ast_data": "FunctionDef name:torch_key_cache arg:func arguments arg FunctionDef name:wrapper arguments If Compare Call Call Call Return return:yes FunctionDef name:set_val arg:val arguments arg Compare Call Call FunctionDef name:clear arguments Call Assign Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "as_ul",
    "source_code": "def as_ul(self):\n    return self.render(self.template_name_ul)",
    "docstring": "Render as elements excluding the surrounding tag.",
    "type": "method",
    "file_path": "django\\django\\forms\\utils.py",
    "ast_data": "FunctionDef name:as_ul arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "maybe_promote_tensors",
    "source_code": "def maybe_promote_tensors(*tensors, force_same_dtype=False):\n    if ops.is_auto_dtype_conversion_enabled():\n        return tensors\n    if not tensors:\n        return tensors\n    if not ops.is_numpy_style_type_promotion():\n        if not force_same_dtype:\n            return tensors\n        promoted_tensors = []\n        promoted_tensors.append(tensors[0])\n        dtype = tensors[0].dtype.base_dtype\n        for tensor in tensors[1:]:\n            promoted_tensors.append(ops.convert_to_tensor(tensor, dtype, name='x'))\n        return promoted_tensors\n    result_type = np_dtypes._result_type(*[_maybe_get_dtype(x) for x in nest.flatten(tensors)])\n\n    def _promote_or_cast(x):\n        if isinstance(x, tensor_lib.Tensor):\n            x = gen_math_ops.cast(x, result_type)\n        else:\n            x = ops.convert_to_tensor(x, result_type)\n        return x\n    return [_promote_or_cast(x) for x in tensors]",
    "docstring": "Promotes tensors if numpy style promotion is enabled. This function promotes according to numpy promotion rules if numpy style promotion is enabled. Otherwise, if is , it force-casts to 's dtype. Note that this force-cast can be problematic. For example, when some elements can be silently downcasted. Args: *tensors: the list of tensors to promote. force_same_dtype: bool (optional, default to ). When numpy style promotion is disabled and is , this function will force-casts to 's dtype (which could be problematic). Returns: The promoted list of tensors.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\override_binary_operator.py",
    "ast_data": "FunctionDef name:maybe_promote_tensors arguments arg arg If Call Return return:yes If Return return:yes If Call If Return return:yes Assign Call Assign For Call Call Return return:yes Assign Call Call Call FunctionDef name:_promote_or_cast arg:x arguments arg If Call Assign Call Assign Call Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "reduce_sum",
    "source_code": "def reduce_sum(self, x):\n    return self.reduce(lambda y: math_ops.reduce_sum(y, axis=0), x)",
    "docstring": "Performs a sum reduction on across pfor iterations. Note that this currently may not work inside a control flow construct. Args: x: an unvectorized Tensor. Returns: A Tensor that has same rank as . The value is the sum of the values of across the pfor iterations.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "FunctionDef name:reduce_sum arg:self arg:x arguments arg arg Return return:yes Call arguments arg Call"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X):\n    check_is_fitted(self)\n    X, Xt, mask_missing_values, complete_mask = self._initial_imputation(X, in_fit=False)\n    X_indicator = super()._transform_indicator(complete_mask)\n    if self.n_iter_ == 0 or np.all(mask_missing_values):\n        return super()._concatenate_indicator(Xt, X_indicator)\n    imputations_per_round = len(self.imputation_sequence_) // self.n_iter_\n    i_rnd = 0\n    if self.verbose > 0:\n        print('[IterativeImputer] Completing matrix with shape %s' % (X.shape,))\n    start_t = time()\n    for it, estimator_triplet in enumerate(self.imputation_sequence_):\n        Xt, _ = self._impute_one_feature(Xt, mask_missing_values, estimator_triplet.feat_idx, estimator_triplet.neighbor_feat_idx, estimator=estimator_triplet.estimator, fit_mode=False)\n        if not (it + 1) % imputations_per_round:\n            if self.verbose > 1:\n                print('[IterativeImputer] Ending imputation round %d/%d, elapsed time %0.2f' % (i_rnd + 1, self.n_iter_, time() - start_t))\n            i_rnd += 1\n    _assign_where(Xt, X, cond=~mask_missing_values)\n    return super()._concatenate_indicator(Xt, X_indicator)",
    "docstring": "Impute all missing values in . Note that this is stochastic, and that if is not fixed, repeated calls, or permuted input, results will differ. Parameters ---------- X : array-like of shape (n_samples, n_features) The input data to complete. Returns ------- Xt : array-like, shape (n_samples, n_features) The imputed input data.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\impute\\_iterative.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Call Assign Call Assign Call Call If BoolOp Compare Call Return return:yes Call Call Assign Call Assign If Compare Call Assign Call For Call Assign Call If If Compare Call Call Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_get_cleaned_column_resolvers",
    "source_code": "@final\ndef _get_cleaned_column_resolvers(self) -> dict[Hashable, Series]:\n    from pandas.core.computation.parsing import clean_column_name\n    from pandas.core.series import Series\n    if isinstance(self, ABCSeries):\n        return {clean_column_name(self.name): self}\n    dtypes = self.dtypes\n    return {clean_column_name(k): Series(v, copy=False, index=self.index, name=k, dtype=dtype).__finalize__(self) for k, v, dtype in zip(self.columns, self._iter_column_arrays(), dtypes)}",
    "docstring": "Return the special character free column resolvers of a DataFrame. Column names with special characters are 'cleaned up' so that they can be referred to by backtick quoting. Used in :meth:.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:_get_cleaned_column_resolvers arg:self arguments arg If Call Return return:yes Call Assign Return return:yes Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "construct_tensor_variable",
    "source_code": "def construct_tensor_variable(target_cls, tx, proxy, example_value, subclass_type, options):\n    example_value = _clone_input(example_value, tx.fake_mode)\n    set_example_value(proxy.node, example_value)\n    tx.output.current_tracer.track_unbacked_symbols(example_value, proxy)\n    specialized_props = target_cls.specialize(example_value)\n    if isinstance(example_value, torch._subclasses.fake_tensor.FakeTensor) and example_value.fake_mode is tx.fake_mode:\n        if subclass_type:\n            tensor_type = subclass_type\n        elif isinstance(example_value, torch.nn.Parameter):\n            tensor_type = torch.nn.Parameter\n        elif isinstance(example_value, torch.nn.Buffer):\n            tensor_type = torch.nn.Buffer\n        else:\n            tensor_type = torch.Tensor\n        specialized_props['class_type'] = tensor_type\n    options.update(specialized_props)\n    return target_cls(proxy, **options)",
    "docstring": "Actually construct a tensor variable after all the pre-processing from wrapping a pre-existing or newly created tensor value.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\builder.py",
    "ast_data": "FunctionDef name:construct_tensor_variable arg:target_cls arg:tx arg:proxy arg:example_value arg:subclass_type arg:options arguments arg arg arg arg arg arg Assign Call Call Call Assign Call If BoolOp Call Compare If Assign If Call Assign If Call Assign Assign Assign Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "stop",
    "source_code": "def stop(self):\n    if self.running:\n        self.httpserver.stop()\n        if isinstance(self.bind_addr, tuple):\n            portend.free(*self.bound_addr, timeout=Timeouts.free)\n        self.running = False\n        self.bus.log('HTTP Server %s shut down' % self.httpserver)\n    else:\n        self.bus.log('HTTP Server %s already shut down' % self.httpserver)",
    "docstring": "Stop the HTTP server.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\servers.py",
    "ast_data": "FunctionDef name:stop arg:self arguments arg If Call If Call Call Assign Call Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, axis, nonpositive='mask', *, one_half='\\\\frac{1}{2}', use_overline=False):\n    self._transform = LogitTransform(nonpositive)\n    self._use_overline = use_overline\n    self._one_half = one_half",
    "docstring": "Parameters ---------- axis : Currently unused. nonpositive : {'mask', 'clip'} Determines the behavior for values beyond the open interval ]0, 1[. They can either be masked as invalid, or clipped to a number very close to 0 or 1. use_overline : bool, default: False Indicate the usage of survival notation (\\overline{x}) in place of standard notation (1-x) for probability close to one. one_half : str, default: r\"\\frac{1}{2}\" The string used for ticks formatter to represent 1/2.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\scale.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:axis arg:nonpositive arguments arg arg arg arg arg Assign Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "get",
    "source_code": "def get(self, timeout=None, tag=None):\n    with self._queue_lock:\n        while self._should_process_closures and self._queue.empty() and (tag is None or self._tagged_queue[tag].empty()):\n            if not self._closures_queued_condition.wait(timeout=timeout):\n                return None\n        if not self._should_process_closures:\n            return None\n        if tag is not None and (not self._tagged_queue[tag].empty()):\n            closure = self._tagged_queue[tag].get(block=False)\n            return closure\n        closure = self._queue.get(block=False)\n        metric_utils.monitor_int('queued_closures', self._queue.qsize())\n        assert closure.tag is None\n        assert tag is None or self._tagged_queue[tag].empty()\n        self._queue_free_slot_condition.notify()\n        self.inflight_closure_count += 1\n        return closure",
    "docstring": "Return a closure from the queue to be executed. It will try to fetch an item from the queue with the given tag. If this queue is empty, it will then check the global queue. Args: timeout: timeout when waiting for a closure to be put. tag: optional tag to specify which queue to query first before querying the global queue. Returns: a closure or None after timeout.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py",
    "ast_data": "FunctionDef name:get arg:self arg:timeout arg:tag arguments arg arg arg With While BoolOp Call BoolOp Compare Call If Call Return return:no If Return return:no If BoolOp Compare Call Assign Call Return return:yes Assign Call Call Call Compare BoolOp Compare Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_real_div_flops",
    "source_code": "@ops.RegisterStatistics('RealDiv', 'flops')\ndef _real_div_flops(graph, node):\n    return _binary_per_element_op_flops(graph, node)",
    "docstring": "Compute flops for RealDiv operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py",
    "ast_data": "FunctionDef name:_real_div_flops arg:graph arg:node arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "__call__",
    "source_code": "def __call__(self):\n    vmin, vmax = self.axis.get_view_interval()\n    return self.tick_values(vmin, vmax)",
    "docstring": "Return the locations of the ticks.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:__call__ arg:self arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_SubsampleMetaSplitter",
    "source_code": "class _SubsampleMetaSplitter:\n\n    def __init__(self, *, base_cv, fraction, subsample_test, random_state):\n        self.base_cv = base_cv\n        self.fraction = fraction\n        self.subsample_test = subsample_test\n        self.random_state = random_state\n\n    def split(self, X, y, **kwargs):\n        for train_idx, test_idx in self.base_cv.split(X, y, **kwargs):\n            train_idx = resample(train_idx, replace=False, random_state=self.random_state, n_samples=int(self.fraction * len(train_idx)))\n            if self.subsample_test:\n                test_idx = resample(test_idx, replace=False, random_state=self.random_state, n_samples=int(self.fraction * len(test_idx)))\n            yield (train_idx, test_idx)",
    "docstring": "Splitter that subsamples a given fraction of the dataset",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_search_successive_halving.py",
    "ast_data": "ClassDef name:_SubsampleMetaSplitter FunctionDef name:__init__ arg:self arguments arg arg arg arg arg Assign Assign Assign Assign FunctionDef name:split arg:self arg:X arg:y arguments arg arg arg arg For Call Assign Call Call Call If Assign Call Call Call"
  },
  {
    "library": "numpy",
    "name": "flatten_sequence",
    "source_code": "def flatten_sequence(iterable):\n    for elm in iter(iterable):\n        if hasattr(elm, '__iter__'):\n            yield from flatten_sequence(elm)\n        else:\n            yield elm",
    "docstring": "Flattens a compound of nested iterables.",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:flatten_sequence arg:iterable arguments arg For Call If Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_ybound",
    "source_code": "def set_ybound(self, lower=None, upper=None, view_margin=None):\n    self._set_bound3d(self.get_ybound, self.set_ylim, self.yaxis_inverted, lower, upper, view_margin)",
    "docstring": "Set the lower and upper numerical bounds of the y-axis. This method will honor axis inversion regardless of parameter order. It will not change the autoscaling setting (). Parameters ---------- lower, upper : float or None The lower and upper bounds. If *None*, the respective axis bound is not modified. view_margin : float or None The margin to apply to the bounds. If *None*, the margin is handled by . See Also -------- get_ybound get_ylim, set_ylim invert_yaxis, yaxis_inverted",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:set_ybound arg:self arg:lower arg:upper arg:view_margin arguments arg arg arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "SupportsTracingProtocol",
    "source_code": "@runtime_checkable\nclass SupportsTracingProtocol(Protocol):\n\n    @doc_controls.doc_private\n    @abc.abstractmethod\n    def __tf_tracing_type__(self, context: TracingContext) -> TraceType:\n        pass",
    "docstring": "A protocol allowing custom classes to control tf.function retracing.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\types\\trace.py",
    "ast_data": "ClassDef name:SupportsTracingProtocol FunctionDef name:__tf_tracing_type__ arg:self arg:context arguments arg arg"
  },
  {
    "library": "pandas",
    "name": "_dtype_to_kind",
    "source_code": "def _dtype_to_kind(dtype_str: str) -> str:\n    if dtype_str.startswith(('string', 'bytes')):\n        kind = 'string'\n    elif dtype_str.startswith('float'):\n        kind = 'float'\n    elif dtype_str.startswith('complex'):\n        kind = 'complex'\n    elif dtype_str.startswith(('int', 'uint')):\n        kind = 'integer'\n    elif dtype_str.startswith('datetime64'):\n        kind = dtype_str\n    elif dtype_str.startswith('timedelta'):\n        kind = 'timedelta64'\n    elif dtype_str.startswith('bool'):\n        kind = 'bool'\n    elif dtype_str.startswith('category'):\n        kind = 'category'\n    elif dtype_str.startswith('period'):\n        kind = 'integer'\n    elif dtype_str == 'object':\n        kind = 'object'\n    elif dtype_str == 'str':\n        kind = 'str'\n    else:\n        raise ValueError(f'cannot interpret dtype of [{dtype_str}]')\n    return kind",
    "docstring": "Find the \"kind\" string describing the given dtype name.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:_dtype_to_kind arg:dtype_str arguments arg If Call Assign If Call Assign If Call Assign If Call Assign If Call Assign If Call Assign If Call Assign If Call Assign If Call Assign If Compare Assign If Compare Assign Raise Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_wrapped_fun",
    "source_code": "def _wrapped_fun(*fargs):\n    _wrapped_fun.nfev += 1\n    return fun(*fargs)",
    "docstring": "Wrapped to track the number of times the function has been called.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_root.py",
    "ast_data": "FunctionDef name:_wrapped_fun arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "__call__",
    "source_code": "def __call__(self, t):\n    t = np.asarray(t)\n    if t.ndim == 0:\n        return self._call_single(t)\n    order = np.argsort(t)\n    reverse = np.empty_like(order)\n    reverse[order] = np.arange(order.shape[0])\n    t_sorted = t[order]\n    segments = np.searchsorted(self.ts_sorted, t_sorted, side=self.side)\n    segments -= 1\n    segments[segments < 0] = 0\n    segments[segments > self.n_segments - 1] = self.n_segments - 1\n    if not self.ascending:\n        segments = self.n_segments - 1 - segments\n    ys = []\n    group_start = 0\n    for segment, group in groupby(segments):\n        group_end = group_start + len(list(group))\n        y = self.interpolants[segment](t_sorted[group_start:group_end])\n        ys.append(y)\n        group_start = group_end\n    ys = np.hstack(ys)\n    ys = ys[:, reverse]\n    return ys",
    "docstring": "Evaluate the solution. Parameters ---------- t : float or array_like with shape (n_points,) Points to evaluate at. Returns ------- y : ndarray, shape (n_states,) or (n_states, n_points) Computed values. Shape depends on whether is a scalar or a 1-D array.",
    "type": "method",
    "file_path": "scipy\\scipy\\integrate\\_ivp\\common.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:t arguments arg arg Assign Call If Compare Return return:yes Call Assign Call Assign Call Assign Call Assign Assign Call Assign Compare Assign Compare If Assign Assign Assign For Call Assign Call Call Assign Call Call Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_bad",
    "source_code": "def get_bad(self):\n    if not self._isinit:\n        self._init()\n    return np.array(self._lut[self._i_bad])",
    "docstring": "Get the color for masked values.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:get_bad arg:self arguments arg If Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_DenseResourceVariableProcessor",
    "source_code": "class _DenseResourceVariableProcessor(_OptimizableVariable):\n\n    def __init__(self, v):\n        self._v = v\n\n    def target(self):\n        return self._v\n\n    def update_op(self, optimizer, g):\n        if isinstance(g, indexed_slices.IndexedSlices):\n            if self._v.constraint is not None:\n                raise RuntimeError('Cannot use a constraint function on a sparse variable.')\n            return optimizer._resource_apply_sparse_duplicate_indices(g.values, self._v, g.indices)\n        if context.xla_sharding_for_resource_variables_enabled():\n            assign_ops = []\n            for variable_dict in optimizer._slots.values():\n                for slot_var in variable_dict.values():\n                    if isinstance(slot_var, resource_variable_ops.BaseResourceVariable) and slot_var._get_xla_sharding() is not None:\n                        assign_ops.append(slot_var.assign(slot_var.read_value()))\n            with ops.control_dependencies(assign_ops):\n                update_op = optimizer._resource_apply_dense(g, self._v)\n        else:\n            update_op = optimizer._resource_apply_dense(g, self._v)\n        if self._v.constraint is not None:\n            with ops.control_dependencies([update_op]):\n                return self._v.assign(self._v.constraint(self._v))\n        else:\n            return update_op",
    "docstring": "Processor for dense ResourceVariables.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\training\\optimizer.py",
    "ast_data": "ClassDef name:_DenseResourceVariableProcessor FunctionDef name:__init__ arg:self arg:v arguments arg arg Assign FunctionDef name:target arg:self arguments arg Return return:yes FunctionDef name:update_op arg:self arg:optimizer arg:g arguments arg arg arg If Call If Compare Raise Call Return return:yes Call If Call Assign For Call For Call If BoolOp Call Compare Call Call Call Call With Call Assign Call Assign Call If Compare With Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "__init__",
    "source_code": "def __init__(self, estimator, errorbar=None, **boot_kws):\n    self.estimator = estimator\n    method, level = _validate_errorbar_arg(errorbar)\n    self.error_method = method\n    self.error_level = level\n    self.boot_kws = boot_kws",
    "docstring": "Data aggregator that produces an estimate and error bar interval. Parameters ---------- estimator : callable or string Function (or method name) that maps a vector to a scalar. errorbar : string, (string, number) tuple, or callable Name of errorbar method (either \"ci\", \"pi\", \"se\", or \"sd\"), or a tuple with a method name and a level parameter, or a function that maps from a vector to a (min, max) interval, or None to hide errorbar. See the :doc: for more information. boot_kws Additional keywords are passed to bootstrap when error_method is \"ci\".",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_statistics.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:estimator arg:errorbar arguments arg arg arg arg Assign Assign Call Assign Assign Assign"
  },
  {
    "library": "scipy",
    "name": "derivatives",
    "source_code": "def derivatives(self, x, der=None):\n    x, x_shape = self._prepare_x(x)\n    y = self._evaluate_derivatives(x, der)\n    y = y.reshape((y.shape[0],) + x_shape + self._y_extra_shape)\n    if self._y_axis != 0 and x_shape != ():\n        nx = len(x_shape)\n        ny = len(self._y_extra_shape)\n        s = [0] + list(range(nx + 1, nx + self._y_axis + 1)) + list(range(1, nx + 1)) + list(range(nx + 1 + self._y_axis, nx + ny + 1))\n        y = y.transpose(s)\n    return y",
    "docstring": "Evaluate several derivatives of the polynomial at the point Produce an array of derivatives evaluated at the point . Parameters ---------- x : array_like Point or points at which to evaluate the derivatives der : int or list or None, optional How many derivatives to evaluate, or None for all potentially nonzero derivatives (that is, a number equal to the number of points), or a list of derivatives to evaluate. This number includes the function value as the '0th' derivative. Returns ------- d : ndarray Array with derivatives; `x`. Examples -------- >>> from scipy.interpolate import KroghInterpolator >>> KroghInterpolator([0,0,0],[1,2,3]).derivatives(0) array([1.0,2.0,3.0]) >>> KroghInterpolator([0,0,0],[1,2,3]).derivatives([0,0]) array([[1.0,1.0], [2.0,2.0], [3.0,3.0]])",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_polyint.py",
    "ast_data": "FunctionDef name:derivatives arg:self arg:x arg:der arguments arg arg arg Assign Call Assign Call Assign Call If BoolOp Compare Compare Assign Call Assign Call Assign Call Call Call Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_broadcast_to_uniform_shape",
    "source_code": "def _broadcast_to_uniform_shape(rt_input, shape, broadcast_inner_dimensions):\n    if isinstance(rt_input, ragged_tensor.RaggedTensor):\n        raise ValueError('Incompatible with shape: ragged rank mismatch')\n    if broadcast_inner_dimensions:\n        return array_ops.broadcast_to(rt_input, shape.inner_dim_sizes)\n    else:\n        return rt_input",
    "docstring": "Broadcasts rt_input to the uniform shape .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor_shape.py",
    "ast_data": "FunctionDef name:_broadcast_to_uniform_shape arg:rt_input arg:shape arg:broadcast_inner_dimensions arguments arg arg arg If Call Raise Call If Return return:yes Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "visit_FunctionDef",
    "source_code": "def visit_FunctionDef(self, node: ast.FunctionDef) -> None:\n    if self.current_function is None:\n        self.add_entry(node.name)\n        if self.is_final(node.decorator_list):\n            self.add_final_entry(node.name)\n        if self.is_overload(node.decorator_list):\n            self.add_overload_entry(node)\n        self.context.append(node.name)\n        self.current_function = node\n        for child in node.body:\n            self.visit(child)\n        self.context.pop()\n        self.current_function = None",
    "docstring": "Handles FunctionDef node and set context.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\pycode\\parser.py",
    "ast_data": "FunctionDef name:visit_FunctionDef arg:self arg:node arguments arg arg If Compare Call If Call Call If Call Call Call Assign For Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_call_concrete_function",
    "source_code": "def _call_concrete_function(function, inputs):\n    expected_structure = function.graph.structured_input_signature\n    flatten_inputs = nest.flatten_up_to(expected_structure, inputs, expand_composites=True)\n    flatten_expected = nest.flatten(expected_structure, expand_composites=True)\n    tensor_inputs = []\n    for arg, expected in zip(flatten_inputs, flatten_expected):\n        if isinstance(expected, tensor.TensorSpec):\n            tensor_inputs.append(ops.convert_to_tensor(arg, dtype_hint=expected.dtype))\n        elif isinstance(expected, resource_variable_ops.VariableSpec):\n            tensor_inputs.append(arg.handle)\n    result = function._call_flat(tensor_inputs, function.captured_inputs)\n    if isinstance(result, ops.Operation):\n        return None\n    return result",
    "docstring": "Calls a restored Function with structured inputs. This differs from in that inputs and outputs are structured and that it casts inputs to tensors if needed. Note: this does not checks that non-tensor inputs match. That should be done before via . Args: function: ConcreteFunction to call. inputs: Structured inputs compatible with . Returns: The structured function output.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\function_deserialization.py",
    "ast_data": "FunctionDef name:_call_concrete_function arg:function arg:inputs arguments arg arg Assign Assign Call Assign Call Assign For Call If Call Call Call If Call Call Assign Call If Call Return return:no Return return:yes"
  },
  {
    "library": "django",
    "name": "BaseDeleteView",
    "source_code": "class BaseDeleteView(DeletionMixin, FormMixin, BaseDetailView):\n    form_class = Form\n\n    def post(self, request, *args, **kwargs):\n        self.object = self.get_object()\n        form = self.get_form()\n        if form.is_valid():\n            return self.form_valid(form)\n        else:\n            return self.form_invalid(form)\n\n    def form_valid(self, form):\n        success_url = self.get_success_url()\n        self.object.delete()\n        return HttpResponseRedirect(success_url)",
    "docstring": "Base view for deleting an object. This requires subclassing to provide a response mixin.",
    "type": "class",
    "file_path": "django\\django\\views\\generic\\edit.py",
    "ast_data": "ClassDef name:BaseDeleteView Assign FunctionDef name:post arg:self arg:request arguments arg arg arg arg Assign Call Assign Call If Call Return return:yes Call Return return:yes Call FunctionDef name:form_valid arg:self arg:form arguments arg arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_SwitchRefOrTensor",
    "source_code": "def _SwitchRefOrTensor(data, pred, name='Switch'):\n    data = ops.convert_to_tensor_or_composite(data, name='data')\n    with ops.colocate_with(data, ignore_existing=True):\n        if isinstance(data, tensor_lib.Tensor):\n            if data.dtype._is_ref_dtype:\n                return ref_switch(data, pred, name=name)\n        return switch(data, pred, name=name)",
    "docstring": "Forwards to an output determined by . If is false, the input is forwarded to the first output. Otherwise, the data goes to the second output. This op handles s and . Args: data: The tensor to be forwarded to the appropriate output. pred: A scalar that specifies which output port will receive data. name: A name for this operation (optional). Returns: : If is true, data will be forwarded to , otherwise it goes to . Raises: TypeError: if data is not a Tensor or IndexedSlices",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:_SwitchRefOrTensor arg:data arg:pred arg:name arguments arg arg arg Assign Call With Call If Call If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "Author",
    "source_code": "class Author(SphinxDirective):\n    has_content = False\n    required_arguments = 1\n    optional_arguments = 0\n    final_argument_whitespace = True\n    option_spec: ClassVar[OptionSpec] = {}\n\n    def run(self) -> list[Node]:\n        if not self.config.show_authors:\n            return []\n        para: Element = nodes.paragraph(translatable=False)\n        emph = nodes.emphasis()\n        para += emph\n        if self.name == 'sectionauthor':\n            text = _('Section author: ')\n        elif self.name == 'moduleauthor':\n            text = _('Module author: ')\n        elif self.name == 'codeauthor':\n            text = _('Code author: ')\n        else:\n            text = _('Author: ')\n        emph += nodes.Text(text)\n        inodes, messages = self.parse_inline(self.arguments[0])\n        emph.extend(inodes)\n        ret: list[Node] = [para]\n        ret += messages\n        return ret",
    "docstring": "Directive to give the name of the author of the current document or section. Shown in the output only if the show_authors option is on.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\directives\\other.py",
    "ast_data": "ClassDef name:Author Assign Assign Assign Assign FunctionDef name:run arg:self arguments arg If Return return:no Call Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, nextapp, throws=(KeyboardInterrupt, SystemExit)):\n    self.nextapp = nextapp\n    self.throws = throws",
    "docstring": "Initialize exception trapper.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpwsgi.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:nextapp arg:throws arguments arg arg arg Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "BboxTransformTo",
    "source_code": "class BboxTransformTo(Affine2DBase):\n    is_separable = True\n\n    def __init__(self, boxout, **kwargs):\n        _api.check_isinstance(BboxBase, boxout=boxout)\n        super().__init__(**kwargs)\n        self._boxout = boxout\n        self.set_children(boxout)\n        self._mtx = None\n        self._inverted = None\n    __str__ = _make_str_method('_boxout')\n\n    def get_matrix(self):\n        if self._invalid:\n            outl, outb, outw, outh = self._boxout.bounds\n            if DEBUG and (outw == 0 or outh == 0):\n                raise ValueError('Transforming to a singular bounding box.')\n            self._mtx = np.array([[outw, 0.0, outl], [0.0, outh, outb], [0.0, 0.0, 1.0]], float)\n            self._inverted = None\n            self._invalid = 0\n        return self._mtx",
    "docstring": "is a transformation that linearly transforms points from the unit bounding box to a given .",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "ClassDef name:BboxTransformTo Assign FunctionDef name:__init__ arg:self arg:boxout arguments arg arg arg Call Call Call Assign Call Assign Assign Assign Call FunctionDef name:get_matrix arg:self arguments arg If Assign If BoolOp BoolOp Compare Compare Raise Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "ImageToTensor",
    "source_code": "class ImageToTensor(nn.Module):\n\n    def __init__(self, keepdim: bool=False) -> None:\n        super().__init__()\n        self.keepdim = keepdim\n\n    def forward(self, x: Any) -> Tensor:\n        return image_to_tensor(x, keepdim=self.keepdim)",
    "docstring": "Converts a numpy image to a PyTorch 4d tensor image. Args: keepdim: If `(B, H, W, C)`.",
    "type": "class",
    "file_path": "kornia\\kornia\\utils\\image.py",
    "ast_data": "ClassDef name:ImageToTensor FunctionDef name:__init__ arg:self arg:keepdim arguments arg arg Call Call Assign FunctionDef name:forward arg:self arg:x arguments arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "setdiag",
    "source_code": "def setdiag(self, values, k=0):\n    M, N = self.shape\n    if k > 0 and k >= N or (k < 0 and -k >= M):\n        raise ValueError('k exceeds array dimensions')\n    self._setdiag(np.asarray(values), k)",
    "docstring": "Set diagonal or off-diagonal elements of the array/matrix. Parameters ---------- values : array_like New values of the diagonal elements. Values may have any length. If the diagonal is longer than values, then the remaining diagonal entries will not be set. If values are longer than the diagonal, then the remaining values are ignored. If a scalar value is given, all of the diagonal is set to it. k : int, optional Which off-diagonal to set, corresponding to elements a[i,i+k]. Default: 0 (the main diagonal).",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_base.py",
    "ast_data": "FunctionDef name:setdiag arg:self arg:values arg:k arguments arg arg arg Assign If BoolOp BoolOp Compare Compare BoolOp Compare Compare Raise Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_percentile_of_score",
    "source_code": "def _percentile_of_score(a, score, axis):\n    B = a.shape[axis]\n    return ((a < score).sum(axis=axis) + (a <= score).sum(axis=axis)) / (2 * B)",
    "docstring": "Vectorized, simplified . Uses logic of the 'mean' value of percentileofscore's kind parameter. Unlike , the percentile returned is a fraction in [0, 1].",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_resampling.py",
    "ast_data": "FunctionDef name:_percentile_of_score arg:a arg:score arg:axis arguments arg arg arg Assign Return return:yes Call Compare Call Compare"
  },
  {
    "library": "sphinx",
    "name": "resolve_reference_any_inventory",
    "source_code": "def resolve_reference_any_inventory(env: BuildEnvironment, honor_disabled_refs: bool, node: pending_xref, contnode: TextElement) -> nodes.reference | None:\n    return _resolve_reference(None, env.domains, InventoryAdapter(env).main_inventory, honor_disabled_refs, frozenset(env.config.intersphinx_disabled_reftypes), node, contnode)",
    "docstring": "Attempt to resolve a missing reference via intersphinx references. Resolution is tried with the target as is in any inventory.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\ext\\intersphinx\\_resolve.py",
    "ast_data": "FunctionDef name:resolve_reference_any_inventory arg:env arg:honor_disabled_refs arg:node arg:contnode arguments arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_profile_data_generator",
    "source_code": "def _get_profile_data_generator(self):\n    node_to_file_path = {}\n    node_to_line_number = {}\n    node_to_func_name = {}\n    node_to_op_type = {}\n    for op in self._graph.get_operations():\n        for trace_entry in reversed(op.traceback):\n            file_path = trace_entry[0]\n            line_num = trace_entry[1]\n            func_name = trace_entry[2]\n            if not source_utils.guess_is_tensorflow_py_library(file_path):\n                break\n        node_to_file_path[op.name] = file_path\n        node_to_line_number[op.name] = line_num\n        node_to_func_name[op.name] = func_name\n        node_to_op_type[op.name] = op.type\n\n    def profile_data_generator(device_step_stats):\n        for node_stats in device_step_stats.node_stats:\n            if node_stats.node_name == '_SOURCE' or node_stats.node_name == '_SINK':\n                continue\n            yield profiling.ProfileDatum(device_step_stats.device, node_stats, node_to_file_path.get(node_stats.node_name, ''), node_to_line_number.get(node_stats.node_name, 0), node_to_func_name.get(node_stats.node_name, ''), node_to_op_type.get(node_stats.node_name, ''))\n    return profile_data_generator",
    "docstring": "Get function that generates objects. Returns: A function that generates objects.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\profile_analyzer_cli.py",
    "ast_data": "FunctionDef name:_get_profile_data_generator arg:self arguments arg Assign Assign Assign Assign For Call For Call Assign Assign Assign If Call Assign Assign Assign Assign FunctionDef name:profile_data_generator arg:device_step_stats arguments arg For If BoolOp Compare Compare Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_gen_columns",
    "source_code": "def _gen_columns(self) -> Iterator[str]:\n    for col in self.ids:\n        yield pprint_thing(col)",
    "docstring": "Iterator with string representation of column names.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:_gen_columns arg:self arguments arg For Call"
  },
  {
    "library": "pytorch",
    "name": "set_stream",
    "source_code": "def set_stream(stream: Stream):\n    if stream is None:\n        return\n    _set_stream_by_id(stream_id=stream.stream_id, device_index=stream.device_index, device_type=stream.device_type)",
    "docstring": "Set the current stream.This is a wrapper API to set the stream. Usage of this function is discouraged in favor of the ``.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:set_stream arg:stream arguments arg If Compare Return return:no Call"
  },
  {
    "library": "pytorch",
    "name": "silu",
    "source_code": "def silu(input: Tensor, inplace: bool=False) -> Tensor:\n    if has_torch_function_unary(input):\n        return handle_torch_function(silu, (input,), input, inplace=inplace)\n    if inplace:\n        return torch._C._nn.silu_(input)\n    return torch._C._nn.silu(input)",
    "docstring": "Apply the Sigmoid Linear Unit (SiLU) function, element-wise. The SiLU function is also known as the swish function. .. math:: \\text{silu}(x) = x * \\sigma(x), \\text{where } \\sigma(x) \\text{ is the logistic sigmoid.} .. note:: See _ where the SiLU (Sigmoid Linear Unit) was originally coined, and see _ and _ where the SiLU was experimented with later. See :class: for more details.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:silu arg:input arg:inplace arguments arg arg If Call Return return:yes Call If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pygame",
    "name": "detect_arch",
    "source_code": "def detect_arch():\n    msystem = os.environ.get('MSYSTEM', '')\n    if msystem.startswith('MINGW32'):\n        return 'mingw32'\n    elif msystem.startswith('MINGW64'):\n        return 'mingw64'\n    elif msystem.startswith('UCRT64'):\n        return 'ucrt64'\n    elif msystem.startswith('CLANG32'):\n        return 'clang32'\n    elif msystem.startswith('CLANGARM64'):\n        return 'clangarm64'\n    elif msystem.startswith('CLANG64'):\n        return 'clang64'\n    elif sys.maxsize > 2 ** 32:\n        return 'mingw64'\n    else:\n        return 'mingw32'",
    "docstring": "Returns one of: \"clang32\", \"clang64\", \"mingw32\", \"mingw64\", \"ucrt64\", \"clangarm64\". Based on the MSYSTEM environment variable with a fallback.",
    "type": "function",
    "file_path": "pygame\\buildconfig\\download_msys2_prebuilt.py",
    "ast_data": "FunctionDef name:detect_arch arguments Assign Call If Call Return return:yes If Call Return return:yes If Call Return return:yes If Call Return return:yes If Call Return return:yes If Call Return return:yes If Compare Return return:yes Return return:yes"
  },
  {
    "library": "virtualenv",
    "name": "satisfies",
    "source_code": "def satisfies(self, spec):\n    if spec.is_abs and self.is_abs and (self.path != spec.path):\n        return False\n    if spec.implementation is not None and spec.implementation.lower() != self.implementation.lower():\n        return False\n    if spec.architecture is not None and spec.architecture != self.architecture:\n        return False\n    if spec.free_threaded is not None and spec.free_threaded != self.free_threaded:\n        return False\n    for our, req in zip((self.major, self.minor, self.micro), (spec.major, spec.minor, spec.micro)):\n        if req is not None and our is not None and (our != req):\n            return False\n    return True",
    "docstring": "Called when there's a candidate metadata spec to see if compatible - e.g. PEP-514 on Windows.",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\discovery\\py_spec.py",
    "ast_data": "FunctionDef name:satisfies arg:self arg:spec arguments arg arg If BoolOp Compare Return return:yes If BoolOp Compare Compare Call Call Return return:yes If BoolOp Compare Compare Return return:yes If BoolOp Compare Compare Return return:yes For Call If BoolOp Compare Compare Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "cross_entropy",
    "source_code": "def cross_entropy(self, other, name='cross_entropy'):\n    with self._name_scope(name):\n        return self._cross_entropy(other)",
    "docstring": "Computes the (Shannon) cross entropy. Denote this distribution () by and the distribution by . Assuming are absolutely continuous with respect to one another and permit densities and , (Shanon) cross entropy is defined as: where denotes the support of the random variable . Args: other: instance. name: Python prepended to names of ops created by this function. Returns: cross_entropy: with shape representing different calculations of (Shanon) cross entropy.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:cross_entropy arg:self arg:other arg:name arguments arg arg arg With Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_ExtractAttrProto",
    "source_code": "def _ExtractAttrProto(op_type_name, op_def, attrs, attr_protos):\n    for attr_def in op_def.attr:\n        key = attr_def.name\n        value = attrs[key]\n        if attr_def.HasField('default_value') and value is None:\n            attr_value = attr_value_pb2.AttrValue()\n            attr_value.CopyFrom(attr_def.default_value)\n            attr_protos[key] = attr_value\n            continue\n        attr_value = value_to_attr_value(value, attr_def.type, key)\n        if attr_def.type.startswith('list('):\n            _SatisfiesLengthConstraint(len(value), attr_def, key, op_type_name)\n        if attr_def.HasField('allowed_values'):\n            if attr_def.type == 'string':\n                _SatisfiesAllowedStringsConstraint(attr_value.s, attr_def, key, op_type_name)\n            elif attr_def.type == 'list(string)':\n                for value in attr_value.list.s:\n                    _SatisfiesAllowedStringsConstraint(value, attr_def, key, op_type_name)\n        if attr_def.has_minimum and attr_def.type == 'int':\n            _SatisfiesIntMinimumConstraint(attr_value.i, attr_def, key, op_type_name)\n        if attr_def.type == 'type':\n            _SatisfiesTypeConstraint(attr_value.type, attr_def, key)\n        if attr_def.type == 'list(type)':\n            for value in attr_value.list.type:\n                _SatisfiesTypeConstraint(value, attr_def, key)\n        attr_protos[key] = attr_value",
    "docstring": "Extracts . For use in _apply_op_helper.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\op_def_library.py",
    "ast_data": "FunctionDef name:_ExtractAttrProto arg:op_type_name arg:op_def arg:attrs arg:attr_protos arguments arg arg arg arg For Assign Assign If BoolOp Call Compare Assign Call Call Assign Assign Call If Call Call Call If Call If Compare Call If Compare For Call If BoolOp Compare Call If Compare Call If Compare For Call Assign"
  },
  {
    "library": "sphinx",
    "name": "read_png_depth",
    "source_code": "def read_png_depth(filename: str | os.PathLike[str]) -> int | None:\n    with open(filename, 'rb') as f:\n        f.seek(-(LEN_IEND + LEN_DEPTH), 2)\n        depthchunk = f.read(LEN_DEPTH)\n        if not depthchunk.startswith(DEPTH_CHUNK_LEN + DEPTH_CHUNK_START):\n            return None\n        else:\n            return struct.unpack('!i', depthchunk[14:18])[0]",
    "docstring": "Read the special tEXt chunk indicating the depth from a PNG file.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\png.py",
    "ast_data": "FunctionDef name:read_png_depth arg:filename arguments arg With Call Call Assign Call If Call Return return:no Return return:yes Call"
  },
  {
    "library": "django",
    "name": "srs_double",
    "source_code": "def srs_double(f):\n    return double_output(f, [c_void_p, POINTER(c_int)], errcheck=True)",
    "docstring": "Create a function prototype for the OSR routines that take the OSRSpatialReference object and return a double value.",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\gdal\\prototypes\\srs.py",
    "ast_data": "FunctionDef name:srs_double arg:f arguments arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "clear",
    "source_code": "def clear(self):\n    self._mtx = IdentityTransform._mtx.copy()\n    self.invalidate()\n    return self",
    "docstring": "Reset the underlying matrix to the identity transform.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:clear arg:self arguments arg Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "_reverse_one_to_one_field_names",
    "source_code": "@cached_property\ndef _reverse_one_to_one_field_names(self):\n    return frozenset((field.name for field in self.related_objects if field.one_to_one))",
    "docstring": "Return a set of reverse one to one field names pointing to the current model.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\options.py",
    "ast_data": "FunctionDef name:_reverse_one_to_one_field_names arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "radius",
    "source_code": "@property\ndef radius(self):\n    sq_radius = self.squared_sum_ / self.n_samples_ - self.sq_norm_\n    return sqrt(max(0, sq_radius))",
    "docstring": "Return radius of the subcluster",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_birch.py",
    "ast_data": "FunctionDef name:radius arg:self arguments arg Assign Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "draw_idle",
    "source_code": "def draw_idle(self):\n    if not (getattr(self, '_draw_pending', False) or getattr(self, '_is_drawing', False)):\n        self._draw_pending = True\n        QtCore.QTimer.singleShot(0, self._draw_idle)",
    "docstring": "Queue redraw of the Agg buffer and request Qt paintEvent.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_qt.py",
    "ast_data": "FunctionDef name:draw_idle arg:self arguments arg If BoolOp Call Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_tensors_from_legacy_saveable",
    "source_code": "def _get_tensors_from_legacy_saveable(trackable_data: _TrackableData, node_ids: Dict[base.Trackable, int], call_with_mapped_captures: Callable[..., Any], object_graph_proto: trackable_object_graph_pb2.TrackableObjectGraph) -> Tuple[base.Trackable, Dict[str, Any]]:\n    object_names = object_identity.ObjectIdentityDictionary()\n    object_names[trackable_data.trackable] = trackable_data.object_name\n    object_map = object_identity.ObjectIdentityDictionary()\n    object_map[trackable_data.trackable] = trackable_data.object_to_save\n    checkpoint_factory_map, _ = save_util_v1.get_checkpoint_factories_and_keys(object_names, object_map)\n    named_saveable_objects, _ = save_util_v1.generate_saveable_objects(checkpoint_factory_map, object_graph_proto, node_ids, object_map, call_with_mapped_captures, saveables_cache=None)\n    trackable = saveable_object_util.SaveableCompatibilityConverter(trackable_data.object_to_save, named_saveable_objects)\n    return (trackable, trackable._serialize_to_tensors())",
    "docstring": "Gets tensors to serialize from a Trackable with legacy SaveableObjects.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\save_util.py",
    "ast_data": "FunctionDef name:_get_tensors_from_legacy_saveable arg:trackable_data arg:node_ids arg:call_with_mapped_captures arg:object_graph_proto arguments arg arg arg arg Assign Call Assign Assign Call Assign Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_AngleGrad",
    "source_code": "@ops.RegisterGradient('Angle')\ndef _AngleGrad(op: ops.Operation, grad):\n    x = op.inputs[0]\n    with ops.control_dependencies([grad]):\n        re = math_ops.real(x)\n        im = math_ops.imag(x)\n        z = math_ops.reciprocal(math_ops.complex(im, re))\n        zero = constant_op.constant(0, dtype=grad.dtype)\n        complex_grad = math_ops.complex(grad, zero)\n        return -complex_grad * z",
    "docstring": "Returns .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_AngleGrad arg:op arg:grad arguments arg arg Assign With Call Assign Call Assign Call Assign Call Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "n",
    "source_code": "def n(self):\n    return self.data.month",
    "docstring": "Month without leading zeros; i.e. '1' to '12'",
    "type": "method",
    "file_path": "django\\django\\utils\\dateformat.py",
    "ast_data": "FunctionDef name:n arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_calculate_replicas_with_values",
    "source_code": "def _calculate_replicas_with_values(strategy, input_workers, optional_list):\n    worker_has_values = []\n    for worker, optionals in zip(input_workers.worker_devices, optional_list):\n        with ops.device(worker):\n            device_has_values = [math_ops.cast(v.has_value(), dtypes.int64) for v in optionals]\n            worker_has_values.append(math_ops.reduce_sum(device_has_values, keepdims=True))\n    client_has_values = math_ops.reduce_sum(worker_has_values, keepdims=True)\n    if strategy.extended._in_multi_worker_mode():\n        global_has_values = strategy.reduce(reduce_util.ReduceOp.SUM, client_has_values, axis=None)\n        return array_ops.reshape(global_has_values, [])\n    else:\n        return array_ops.reshape(client_has_values, [])",
    "docstring": "Computes the number of replicas that have values. Args: strategy: the . input_workers: the . optional_list: a list of lists . The values from each compute device grouped by the input device. Returns: A scalar Tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py",
    "ast_data": "FunctionDef name:_calculate_replicas_with_values arg:strategy arg:input_workers arg:optional_list arguments arg arg arg Assign For Call With Call Assign Call Call Call Call Assign Call If Call Assign Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "tick_right",
    "source_code": "def tick_right(self):\n    label = True\n    if 'label1On' in self._major_tick_kw:\n        label = self._major_tick_kw['label1On'] or self._major_tick_kw['label2On']\n    self.set_ticks_position('right')\n    self.set_tick_params(which='both', labelright=label)",
    "docstring": "Move ticks and ticklabels (if present) to the right of the Axes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:tick_right arg:self arguments arg Assign If Compare Assign BoolOp Call Call"
  },
  {
    "library": "pytorch",
    "name": "set_tensor",
    "source_code": "def set_tensor(self, name: str, value: torch.Tensor) -> None:\n    prefix, _, attr = name.rpartition('.')\n    set_tensor(self.get_submodule(prefix), attr, value)",
    "docstring": "Set the attribute specified by the given path to value. For example, to set the attribute mod.layer1.conv1.weight, use accessor.set_tensor(\"layer1.conv1.weight\", value)",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\utils\\_named_member_accessor.py",
    "ast_data": "FunctionDef name:set_tensor arg:self arg:name arg:value arguments arg arg arg Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "RaggedTensorSpec",
    "source_code": "class RaggedTensorSpec(object):\n    pass",
    "docstring": "Interface for internal isinstance checks to ops/ragged/ragged_tensor.py. This helps to avoid circular dependencies.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\types\\internal.py",
    "ast_data": "ClassDef name:RaggedTensorSpec"
  },
  {
    "library": "pytorch",
    "name": "relu",
    "source_code": "@register_decomposition(aten.relu)\n@_inplace_wrapper\n@out_wrapper()\n@elementwise_type_promotion_wrapper(type_promoting_args=('a',), type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT)\ndef relu(a: TensorLikeType, inplace: bool=False) -> TensorLikeType:\n    if inplace:\n        raise NotImplementedError\n    return torch.where(torch.le(a, 0), 0, a)",
    "docstring": "Reference implementation of torch.nn.functional.relu",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\nn\\functional\\__init__.py",
    "ast_data": "FunctionDef name:relu arg:a arg:inplace arguments arg arg If Raise Return return:yes Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_output_nodes_of_partitions",
    "source_code": "def _get_output_nodes_of_partitions(self, partition_list: list[SourcePartition]) -> list[torch.fx.Node]:\n    output_node_list = []\n    for partition in partition_list:\n        if len(partition.output_nodes) > 1:\n            raise ValueError('Input partition has more than one output node')\n        output_node = partition.output_nodes[0]\n        assert isinstance(output_node, Node)\n        output_node_list.append(output_node)\n    if len(output_node_list) != len(partition_list):\n        raise ValueError('length of output_node_list should equal to length of partition_list')\n    return output_node_list",
    "docstring": "Helper function to get the output node list from partition list",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\x86_inductor_quantizer.py",
    "ast_data": "FunctionDef name:_get_output_nodes_of_partitions arg:self arg:partition_list arguments arg arg Assign For If Compare Call Raise Call Assign Call Call If Compare Call Call Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_quantized_weight_bias_dict",
    "source_code": "def get_quantized_weight_bias_dict(self):\n    quantized_weight_bias_dict = {}\n    for wn in self._flat_weights_names:\n        if hasattr(self, wn):\n            if wn.startswith('weight'):\n                weight_or_bias = get_quantized_weight(self, wn)\n            else:\n                weight_or_bias = getattr(self, wn)\n        else:\n            weight_or_bias = None\n        quantized_weight_bias_dict[wn] = weight_or_bias\n    return quantized_weight_bias_dict",
    "docstring": "dictionary from flat_weight_name to quantized weight or (unquantized) bias e.g. { \"weight_ih_l0\": quantized_weight, \"bias_ih_l0\": unquantized_bias, ... }",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\reference\\modules\\rnn.py",
    "ast_data": "FunctionDef name:get_quantized_weight_bias_dict arg:self arguments arg Assign For If Call If Call Assign Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_with_dependencies",
    "source_code": "def _with_dependencies(self, dependencies):\n    new_row_splits = control_flow_ops.with_dependencies(dependencies, self._row_splits)\n    return RowPartition(row_splits=new_row_splits, row_lengths=self._row_lengths, value_rowids=self._value_rowids, nrows=self._nrows, uniform_row_length=self._uniform_row_length, internal=_row_partition_factory_key)",
    "docstring": "Returns a new RowPartition equal to self with control dependencies. Specifically, self._row_splits is gated by the given control dependencies. Used to add sanity checks to the constructors. Args: dependencies: a list of tensors to use as dependencies. Returns: A new RowPartition object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py",
    "ast_data": "FunctionDef name:_with_dependencies arg:self arg:dependencies arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "unique",
    "source_code": "def unique(self, level: Hashable | None=None) -> Self:\n    if level is not None:\n        self._validate_index_level(level)\n    if self.is_unique:\n        return self._view()\n    result = super().unique()\n    return self._shallow_copy(result)",
    "docstring": "Return unique values in the index. Unique values are returned in order of appearance, this does NOT sort. Parameters ---------- level : int or hashable, optional Only return values from specified level (for MultiIndex). If int, gets the level by integer position, else by level name. Returns ------- Index Unique values in the index. See Also -------- unique : Numpy array of unique values in that column. Series.unique : Return unique values of Series object. Examples -------- >>> idx = pd.Index([1, 1, 2, 3, 3]) >>> idx.unique() Index([1, 2, 3], dtype='int64')",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:unique arg:self arg:level arguments arg arg If Compare Call If Return return:yes Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pygame",
    "name": "array_colorkey",
    "source_code": "def array_colorkey(surface):\n    size = surface.get_size()\n    array = numpy.empty(size, numpy.uint8)\n    surface_to_array(array, surface, 'C')\n    return array",
    "docstring": "pygame.surfarray.array_colorkey(Surface): return array copy the colorkey values into a 2d array Create a new array with the colorkey transparency value from each pixel. If the pixel matches the colorkey it will be fully transparent; otherwise it will be fully opaque. This will work on any type of Surface format. If the image has no colorkey a solid opaque array will be returned. This function will temporarily lock the Surface as pixels are copied.",
    "type": "function",
    "file_path": "pygame\\src_py\\surfarray.py",
    "ast_data": "FunctionDef name:array_colorkey arg:surface arguments arg Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_build_directory",
    "source_code": "def _get_build_directory(name: str, verbose: bool) -> str:\n    root_extensions_directory = os.environ.get('TORCH_EXTENSIONS_DIR')\n    if root_extensions_directory is None:\n        root_extensions_directory = get_default_build_root()\n        cu_str = 'cpu' if torch.version.cuda is None else f'cu{torch.version.cuda.replace('.', '')}'\n        python_version = f'py{sys.version_info.major}{sys.version_info.minor}{getattr(sys, 'abiflags', '')}'\n        build_folder = f'{python_version}_{cu_str}'\n        root_extensions_directory = os.path.join(root_extensions_directory, build_folder)\n    if verbose:\n        logger.info('Using %s as PyTorch extensions root...', root_extensions_directory)\n    build_directory = os.path.join(root_extensions_directory, name)\n    if not os.path.exists(build_directory):\n        if verbose:\n            logger.debug('Creating extension directory %s...', build_directory)\n        os.makedirs(build_directory, exist_ok=True)\n    return build_directory",
    "docstring": "Get the build directory for an extension. Args: name: The name of the extension verbose: Whether to print verbose information Returns: The path to the build directory",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\cpp_extension.py",
    "ast_data": "FunctionDef name:_get_build_directory arg:name arg:verbose arguments arg arg Assign Call If Compare Assign Call Assign Compare Call Assign Call Assign Assign Call If Call Assign Call If Call If Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "transform_for_annotation",
    "source_code": "def transform_for_annotation(self, model: torch.fx.GraphModule) -> torch.fx.GraphModule:\n    return _convert_scalars_to_attrs(model)",
    "docstring": "Transforms scalar values to tensor attributes",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\xnnpack_quantizer.py",
    "ast_data": "FunctionDef name:transform_for_annotation arg:self arg:model arguments arg arg Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "after_content",
    "source_code": "def after_content(self) -> None:\n    pass",
    "docstring": "Called after parsing content. Used to reset information about the current directive context on the build environment.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\directives\\__init__.py",
    "ast_data": "FunctionDef name:after_content arg:self arguments arg"
  },
  {
    "library": "django",
    "name": "messages",
    "source_code": "def messages(request):\n    return {'messages': get_messages(request), 'DEFAULT_MESSAGE_LEVELS': DEFAULT_LEVELS}",
    "docstring": "Return a lazy 'messages' context variable as well as 'DEFAULT_MESSAGE_LEVELS'.",
    "type": "function",
    "file_path": "django\\django\\contrib\\messages\\context_processors.py",
    "ast_data": "FunctionDef name:messages arg:request arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "diag",
    "source_code": "@tf_export.tf_export('experimental.numpy.diag', v1=[])\n@np_utils.np_doc('diag')\ndef diag(v, k=0):\n    v = asarray(v)\n    v_rank = array_ops.rank(v)\n    v.shape.with_rank_at_most(2)\n    control_flow_assert.Assert(np_utils.logical_or(math_ops.equal(v_rank, 1), math_ops.equal(v_rank, 2)), [v_rank])\n\n    def _diag(v, k):\n        return np_utils.cond(math_ops.equal(array_ops.size(v), 0), lambda: array_ops.zeros([abs(k), abs(k)], dtype=v.dtype), lambda: array_ops.matrix_diag(v, k=k))\n\n    def _diag_part(v, k):\n        v_shape = array_ops.shape(v)\n        v, k = np_utils.cond(np_utils.logical_or(np_utils.less_equal(k, -1 * np_utils.getitem(v_shape, 0)), np_utils.greater_equal(k, np_utils.getitem(v_shape, 1))), lambda: (array_ops.zeros([0, 0], dtype=v.dtype), 0), lambda: (v, k))\n        result = array_ops.matrix_diag_part(v, k=k)\n        return result\n    result = np_utils.cond(math_ops.equal(v_rank, 1), lambda: _diag(v, k), lambda: _diag_part(v, k))\n    return result",
    "docstring": "Raises an error if input is not 1- or 2-d.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_array_ops.py",
    "ast_data": "FunctionDef name:diag arg:v arg:k arguments arg arg Assign Call Assign Call Call Call Call Call Call FunctionDef name:_diag arg:v arg:k arguments arg arg Return return:yes Call Call Call arguments Call Call Call arguments Call FunctionDef name:_diag_part arg:v arg:k arguments arg arg Assign Call Assign Call Call Call Call Call Call arguments Call arguments Assign Call Return return:yes Assign Call Call arguments Call arguments Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "isroutine",
    "source_code": "def isroutine(object):\n    return _inspect.isroutine(tf_decorator.unwrap(object)[1])",
    "docstring": "TFDecorator-aware replacement for inspect.isroutine.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_inspect.py",
    "ast_data": "FunctionDef name:isroutine arg:object arguments arg Return return:yes Call Call"
  },
  {
    "library": "virtualenv",
    "name": "Discovery",
    "source_code": "class Discovery(PluginLoader):\n    pass",
    "docstring": "Discovery plugins.",
    "type": "class",
    "file_path": "virtualenv\\src\\virtualenv\\run\\plugin\\discovery.py",
    "ast_data": "ClassDef name:Discovery"
  },
  {
    "library": "matplotlib",
    "name": "_make_twin_axes",
    "source_code": "def _make_twin_axes(self, *args, **kwargs):\n    if 'sharex' in kwargs and 'sharey' in kwargs:\n        if kwargs['sharex'] is not self and kwargs['sharey'] is not self:\n            raise ValueError('Twinned Axes may share only one axis')\n    ss = self.get_subplotspec()\n    if ss:\n        twin = self.get_figure(root=False).add_subplot(ss, *args, **kwargs)\n    else:\n        twin = self.get_figure(root=False).add_axes(self.get_position(True), *args, **kwargs, axes_locator=_TransformedBoundsLocator([0, 0, 1, 1], self.transAxes))\n    self.set_adjustable('datalim')\n    twin.set_adjustable('datalim')\n    twin.set_zorder(self.zorder)\n    self._twinned_axes.join(self, twin)\n    return twin",
    "docstring": "Make a twinx Axes of self. This is used for twinx and twiny.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:_make_twin_axes arg:self arguments arg arg arg If BoolOp Compare Compare If BoolOp Compare Compare Raise Call Assign Call If Assign Call Call Assign Call Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "set_context",
    "source_code": "def set_context(context=None, font_scale=1, rc=None):\n    context_object = plotting_context(context, font_scale, rc)\n    mpl.rcParams.update(context_object)",
    "docstring": "Set the parameters that control the scaling of plot elements. These parameters correspond to label size, line thickness, etc. Calling this function modifies the global matplotlib . For more information, see the :doc:. The base context is \"notebook\", and the other contexts are \"paper\", \"talk\", and \"poster\", which are version of the notebook parameters scaled by different values. Font elements can also be scaled independently of (but relative to) the other values. See :func: to get the parameter values. Parameters ---------- context : dict, or one of {paper, notebook, talk, poster} A dictionary of parameters or the name of a preconfigured set. font_scale : float, optional Separate scaling factor to independently scale the size of the font elements. rc : dict, optional Parameter mappings to override the values in the preset seaborn context dictionaries. This only updates parameters that are considered part of the context definition. Examples -------- .. include:: ../docstrings/set_context.rst",
    "type": "function",
    "file_path": "seaborn\\seaborn\\rcmod.py",
    "ast_data": "FunctionDef name:set_context arg:context arg:font_scale arg:rc arguments arg arg arg Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "is_composite_or_composite_value",
    "source_code": "def is_composite_or_composite_value(tensor):\n    return isinstance(tensor, (composite_tensor.CompositeTensor, sparse_tensor.SparseTensorValue, ragged_tensor_value.RaggedTensorValue))",
    "docstring": "Returns true if 'tensor' is a CompositeTensor or a CT Value object.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py",
    "ast_data": "FunctionDef name:is_composite_or_composite_value arg:tensor arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_ensure_data",
    "source_code": "def _ensure_data(values: ArrayLike) -> np.ndarray:\n    if not isinstance(values, ABCMultiIndex):\n        values = extract_array(values, extract_numpy=True)\n    if is_object_dtype(values.dtype):\n        return ensure_object(np.asarray(values))\n    elif isinstance(values.dtype, BaseMaskedDtype):\n        values = cast('BaseMaskedArray', values)\n        if not values._hasna:\n            return _ensure_data(values._data)\n        return np.asarray(values)\n    elif isinstance(values.dtype, CategoricalDtype):\n        values = cast('Categorical', values)\n        return values.codes\n    elif is_bool_dtype(values.dtype):\n        if isinstance(values, np.ndarray):\n            return np.asarray(values).view('uint8')\n        else:\n            return np.asarray(values).astype('uint8', copy=False)\n    elif is_integer_dtype(values.dtype):\n        return np.asarray(values)\n    elif is_float_dtype(values.dtype):\n        if values.dtype.itemsize in [2, 12, 16]:\n            return ensure_float64(values)\n        return np.asarray(values)\n    elif is_complex_dtype(values.dtype):\n        return cast(np.ndarray, values)\n    elif needs_i8_conversion(values.dtype):\n        npvalues = values.view('i8')\n        npvalues = cast(np.ndarray, npvalues)\n        return npvalues\n    values = np.asarray(values, dtype=object)\n    return ensure_object(values)",
    "docstring": "routine to ensure that our data is of the correct input dtype for lower-level routines This will coerce: - ints -> int64 - uint -> uint64 - bool -> uint8 - datetimelike -> i8 - datetime64tz -> i8 (in local tz) - categorical -> codes Parameters ---------- values : np.ndarray or ExtensionArray Returns ------- np.ndarray",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\algorithms.py",
    "ast_data": "FunctionDef name:_ensure_data arg:values arguments arg If Call Assign Call If Call Return return:yes Call Call If Call Assign Call If Return return:yes Call Return return:yes Call If Call Assign Call Return return:yes If Call If Call Return return:yes Call Call Return return:yes Call Call If Call Return return:yes Call If Call If Compare Return return:yes Call Return return:yes Call If Call Return return:yes Call If Call Assign Call Assign Call Return return:yes Assign Call Return return:yes Call"
  },
  {
    "library": "cryptography",
    "name": "__copy__",
    "source_code": "@abc.abstractmethod\ndef __copy__(self) -> X448PrivateKey:\n    pass",
    "docstring": "Returns a copy.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\x448.py",
    "ast_data": "FunctionDef name:__copy__ arg:self arguments arg"
  },
  {
    "library": "pandas",
    "name": "quantile_compat",
    "source_code": "def quantile_compat(values: ArrayLike, qs: npt.NDArray[np.float64], interpolation: str) -> ArrayLike:\n    if isinstance(values, np.ndarray):\n        fill_value = na_value_for_dtype(values.dtype, compat=False)\n        mask = isna(values)\n        return quantile_with_mask(values, mask, fill_value, qs, interpolation)\n    else:\n        return values._quantile(qs, interpolation)",
    "docstring": "Compute the quantiles of the given values for each quantile in . Parameters ---------- values : np.ndarray or ExtensionArray qs : np.ndarray[float64] interpolation : str Returns ------- np.ndarray or ExtensionArray",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\array_algos\\quantile.py",
    "ast_data": "FunctionDef name:quantile_compat arg:values arg:qs arg:interpolation arguments arg arg arg If Call Assign Call Assign Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "SimpleLazyObject",
    "source_code": "class SimpleLazyObject(LazyObject):\n\n    def __init__(self, func):\n        self.__dict__['_setupfunc'] = func\n        super().__init__()\n\n    def _setup(self):\n        self._wrapped = self._setupfunc()\n\n    def __repr__(self):\n        if self._wrapped is empty:\n            repr_attr = self._setupfunc\n        else:\n            repr_attr = self._wrapped\n        return '<%s: %r>' % (type(self).__name__, repr_attr)\n\n    def __copy__(self):\n        if self._wrapped is empty:\n            return SimpleLazyObject(self._setupfunc)\n        else:\n            return copy.copy(self._wrapped)\n\n    def __deepcopy__(self, memo):\n        if self._wrapped is empty:\n            result = SimpleLazyObject(self._setupfunc)\n            memo[id(self)] = result\n            return result\n        return copy.deepcopy(self._wrapped, memo)\n    __add__ = new_method_proxy(operator.add)\n\n    @new_method_proxy\n    def __radd__(self, other):\n        return other + self",
    "docstring": "A lazy object initialized from any function. Designed for compound objects of unknown type. For builtins or objects of known type, use django.utils.functional.lazy.",
    "type": "class",
    "file_path": "django\\django\\utils\\functional.py",
    "ast_data": "ClassDef name:SimpleLazyObject FunctionDef name:__init__ arg:self arg:func arguments arg arg Assign Call Call FunctionDef name:_setup arg:self arguments arg Assign Call FunctionDef name:__repr__ arg:self arguments arg If Compare Assign Assign Return return:yes Call FunctionDef name:__copy__ arg:self arguments arg If Compare Return return:yes Call Return return:yes Call FunctionDef name:__deepcopy__ arg:self arg:memo arguments arg arg If Compare Assign Call Assign Call Return return:yes Return return:yes Call Assign Call FunctionDef name:__radd__ arg:self arg:other arguments arg arg Return return:yes"
  },
  {
    "library": "virtualenv",
    "name": "build_parser_only",
    "source_code": "def build_parser_only(args=None):\n    return build_parser(args)[0]",
    "docstring": "Used to provide a parser for the doc generation.",
    "type": "function",
    "file_path": "virtualenv\\src\\virtualenv\\run\\__init__.py",
    "ast_data": "FunctionDef name:build_parser_only arg:args arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "heartbeat",
    "source_code": "@property\ndef heartbeat(self) -> timedelta:\n    return self._heartbeat",
    "docstring": "Get the keep-alive heartbeat timeout.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\dynamic_rendezvous.py",
    "ast_data": "FunctionDef name:heartbeat arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_fully_clipped_to_axes",
    "source_code": "def _fully_clipped_to_axes(self):\n    clip_box = self.get_clip_box()\n    clip_path = self.get_clip_path()\n    return self.axes is not None and self.get_clip_on() and (clip_box is not None or clip_path is not None) and (clip_box is None or np.all(clip_box.extents == self.axes.bbox.extents)) and (clip_path is None or (isinstance(clip_path, TransformedPatchPath) and clip_path._patch is self.axes.patch))",
    "docstring": "Return a boolean flag, `get_clip_onclip_boxclip_path` (if set).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:_fully_clipped_to_axes arg:self arguments arg Assign Call Assign Call Return return:yes BoolOp Compare Call BoolOp Compare Compare BoolOp Compare Call Compare BoolOp Compare BoolOp Call Compare"
  },
  {
    "library": "scipy",
    "name": "entropy",
    "source_code": "def entropy(self):\n    return self._dist._entropy(self.dim, self.kappa)",
    "docstring": "Calculate the differential entropy of the von Mises-Fisher distribution. Returns ------- h: float Entropy of the Von Mises-Fisher distribution.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:entropy arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "reduction_prefix_array",
    "source_code": "def reduction_prefix_array(acc_var: Union[str, CSEVariable], acc_type: str, reduction_type: str, dtype: torch.dtype, len: Union[str, int], init_fn):\n    code_buffer = IndentedBuffer()\n    acc_decl = f'auto {acc_var}_arr = std::make_unique<{acc_type}[]>({len});' if cpp_builder.is_msvc_cl() else f'{acc_type} {acc_var}_arr[{len}];'\n    code_buffer.writeline(f'{acc_decl}')\n    code_buffer.writelines([f'for (int i = 0; i < {len}; i++)', '{', f'    {acc_var}_arr[i] = {init_fn(reduction_type, dtype)};', '}'])\n    return code_buffer",
    "docstring": "MSVC don't support dynamic array(VLA). So we use std::unique_ptr here. Ref: MSVC is the only one compiler without VLA. support. Since MSVC can't get good performance here. We just use unique_ptr make it works on MSVC. For other compilers, we continue to use VLA to get best performence.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp.py",
    "ast_data": "FunctionDef name:reduction_prefix_array arg:acc_var arg:acc_type arg:reduction_type arg:dtype arg:len arg:init_fn arguments arg arg arg arg arg arg Assign Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, obj, saveables):\n    self._obj = obj\n    self._saveables = saveables",
    "docstring": "Constructor. Args: obj: A Trackable object. saveables: A list of saveables for .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\saving\\saveable_object_util.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:obj arg:saveables arguments arg arg arg Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "TFRecordDatasetV1",
    "source_code": "@tf_export(v1=['data.TFRecordDataset'])\nclass TFRecordDatasetV1(dataset_ops.DatasetV1Adapter):\n\n    def __init__(self, filenames, compression_type=None, buffer_size=None, num_parallel_reads=None, name=None):\n        wrapped = TFRecordDatasetV2(filenames, compression_type, buffer_size, num_parallel_reads, name=name)\n        super(TFRecordDatasetV1, self).__init__(wrapped)\n    __init__.__doc__ = TFRecordDatasetV2.__init__.__doc__\n\n    @property\n    def _filenames(self):\n        return self._dataset._filenames\n\n    @_filenames.setter\n    def _filenames(self, value):\n        self._dataset._filenames = value",
    "docstring": "A comprising records from one or more TFRecord files.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\readers.py",
    "ast_data": "ClassDef name:TFRecordDatasetV1 FunctionDef name:__init__ arg:self arg:filenames arg:compression_type arg:buffer_size arg:num_parallel_reads arg:name arguments arg arg arg arg arg arg Assign Call Call Call Assign FunctionDef name:_filenames arg:self arguments arg Return return:yes FunctionDef name:_filenames arg:self arg:value arguments arg arg Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "merge",
    "source_code": "def merge(self, options):\n    merged = copy.deepcopy(self)\n    if options is None:\n        return merged\n    if options.bytes_per_pack != 0:\n        merged.bytes_per_pack = options.bytes_per_pack\n    if options.timeout_seconds is not None:\n        merged.timeout_seconds = options.timeout_seconds\n    if options.implementation != CommunicationImplementation.AUTO:\n        merged.implementation = options.implementation\n    return merged",
    "docstring": "Merges with another options and returns a new one. Values specified in the takes precedence if they're not the default. Args: options: a . Returns: A new .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\collective_util.py",
    "ast_data": "FunctionDef name:merge arg:self arg:options arguments arg arg Assign Call If Compare Return return:yes If Compare Assign If Compare Assign If Compare Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_build_graph",
    "source_code": "def _build_graph(self):\n    if self.kernel == 'knn':\n        self.nn_fit = None\n    n_samples = self.X_.shape[0]\n    affinity_matrix = self._get_kernel(self.X_)\n    laplacian = csgraph_laplacian(affinity_matrix, normed=True)\n    laplacian = -laplacian\n    if sparse.issparse(laplacian):\n        diag_mask = laplacian.row == laplacian.col\n        laplacian.data[diag_mask] = 0.0\n    else:\n        laplacian.flat[::n_samples + 1] = 0.0\n    return laplacian",
    "docstring": "Graph matrix for Label Spreading computes the graph laplacian",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\semi_supervised\\_label_propagation.py",
    "ast_data": "FunctionDef name:_build_graph arg:self arguments arg If Compare Assign Assign Assign Call Assign Call Assign If Call Assign Compare Assign Assign Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "init",
    "source_code": "def init(self) -> None:\n    pass",
    "docstring": "Load necessary templates and perform initialization. The default implementation does nothing.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\__init__.py",
    "ast_data": "FunctionDef name:init arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "OffloadPolicy",
    "source_code": "@dataclass\nclass OffloadPolicy:\n    pass",
    "docstring": "This base class represents the policy of no offloading and is only used as the default value for the `` arg.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fsdp_api.py",
    "ast_data": "ClassDef name:OffloadPolicy"
  },
  {
    "library": "pandas",
    "name": "dtype",
    "source_code": "@cache_readonly\ndef dtype(self) -> DtypeObj:\n    return self._data.dtype",
    "docstring": "Return the dtype object of the underlying data. See Also -------- Index.inferred_type: Return a string of the type inferred from the values. Examples -------- >>> idx = pd.Index([1, 2, 3]) >>> idx Index([1, 2, 3], dtype='int64') >>> idx.dtype dtype('int64')",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:dtype arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "savefig",
    "source_code": "def savefig(self, figure=None, **kwargs):\n    if not isinstance(figure, Figure):\n        if figure is None:\n            manager = Gcf.get_active()\n        else:\n            manager = Gcf.get_fig_manager(figure)\n        if manager is None:\n            raise ValueError(f'No figure {figure}')\n        figure = manager.canvas.figure\n    figure.savefig(self, format='pdf', backend='pdf', **kwargs)",
    "docstring": "Save a to this file as a new page. Any other keyword arguments are passed to . Parameters ---------- figure : or int, default: the active figure The figure, or index of the figure, that is saved to the file.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py",
    "ast_data": "FunctionDef name:savefig arg:self arg:figure arguments arg arg arg If Call If Compare Assign Call Assign Call If Compare Raise Call Assign Call"
  },
  {
    "library": "sphinx",
    "name": "literal_emphasis",
    "source_code": "class literal_emphasis(nodes.emphasis, not_smartquotable):\n    pass",
    "docstring": "Node that behaves like , but further text processors are not applied (e.g. smartypants for HTML output).",
    "type": "class",
    "file_path": "sphinx\\sphinx\\addnodes.py",
    "ast_data": "ClassDef name:literal_emphasis"
  },
  {
    "library": "django",
    "name": "get_queryset",
    "source_code": "def get_queryset(self):\n    if self.queryset is None:\n        if self.model:\n            return self.model._default_manager.all()\n        else:\n            raise ImproperlyConfigured('%(cls)s is missing a QuerySet. Define %(cls)s.model, %(cls)s.queryset, or override %(cls)s.get_queryset().' % {'cls': self.__class__.__name__})\n    return self.queryset.all()",
    "docstring": "Return the that will be used to look up the object. This method is called by the default implementation of get_object() and may not be called if get_object() is overridden.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\detail.py",
    "ast_data": "FunctionDef name:get_queryset arg:self arguments arg If Compare If Return return:yes Call Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_resource_resolver",
    "source_code": "@auto_control_deps.register_acd_resource_resolver\ndef _resource_resolver(op, resource_reads, resource_writes):\n    updated = False\n    if op.type in ['DatasetToSingleElement', 'DatasetToTFRecord', 'ReduceDataset']:\n        reads, writes = _collect_resource_inputs(op)\n        for inp in reads:\n            if inp not in resource_reads:\n                updated = True\n                resource_reads.add(inp)\n        for inp in writes:\n            if inp not in resource_writes:\n                updated = True\n                resource_writes.add(inp)\n    if op.type in ['IteratorGetNext', 'IteratorGetNextSync', 'IteratorGetNextAsOptional']:\n        iterator_resource = op.inputs[0]\n        make_iterator_ops = [op for op in iterator_resource.consumers() if op.type == 'MakeIterator']\n        if len(make_iterator_ops) == 1:\n            reads, writes = _collect_resource_inputs(make_iterator_ops[0])\n            for inp in reads:\n                if inp not in resource_reads:\n                    updated = True\n                    resource_reads.add(inp)\n            for inp in writes:\n                if inp not in resource_writes:\n                    updated = True\n                    resource_writes.add(inp)\n    return updated",
    "docstring": "Updates resource inputs for tf.data ops with indirect dependencies.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:_resource_resolver arg:op arg:resource_reads arg:resource_writes arguments arg arg arg Assign If Compare Assign Call For If Compare Assign Call For If Compare Assign Call If Compare Assign Assign Call Compare If Compare Call Assign Call For If Compare Assign Call For If Compare Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "GetMethodMode",
    "source_code": "class GetMethodMode(BaseTorchFunctionMode):\n\n    def __torch_function__(self, func, types, args=(), kwargs=None):\n        kwargs = kwargs or {}\n        nonlocal most_recent_func\n        most_recent_func = func\n        return func(*args, **kwargs)",
    "docstring": "Mode to extract the correct methods from torch function invocations (Used to get the correct torch.Tensor methods from builtins)",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\torch_function.py",
    "ast_data": "ClassDef name:GetMethodMode FunctionDef name:__torch_function__ arg:self arg:func arg:types arg:args arg:kwargs arguments arg arg arg arg arg Assign BoolOp Assign Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "ellipse_to_laf",
    "source_code": "def ellipse_to_laf(ells: Tensor) -> Tensor:\n    KORNIA_CHECK_SHAPE(ells, ['B', 'N', '5'])\n    B, N, _ = ells.shape\n    a11 = ells[..., 2:3].abs().sqrt()\n    a12 = torch.zeros_like(a11)\n    a22 = ells[..., 4:5].abs().sqrt()\n    a21 = ells[..., 3:4] / (a11 + a22).clamp(1e-09)\n    A = stack([a11, a12, a21, a22], dim=-1).view(B, N, 2, 2).inverse()\n    out = concatenate([A, ells[..., :2].view(B, N, 2, 1)], dim=3)\n    return out",
    "docstring": "Convert ellipse regions to LAF format. Ellipse (a, b, c) and upright covariance matrix [a11 a12; 0 a22] are connected by inverse matrix square root: A = invsqrt([a b; b c]). See also Args: ells: tensor :math: of ellipses in Oxford format [x y a b c]. Returns: LAF :math: Example: >>> input = torch.ones(1, 10, 5) # BxNx5 >>> output = ellipse_to_laf(input) # BxNx2x3",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\laf.py",
    "ast_data": "FunctionDef name:ellipse_to_laf arg:ells arguments arg Call Assign Assign Call Call Assign Call Assign Call Call Assign Call Assign Call Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "polycompanion",
    "source_code": "def polycompanion(c):\n    [c] = pu.as_series([c])\n    if len(c) < 2:\n        raise ValueError('Series must have maximum degree of at least 1.')\n    if len(c) == 2:\n        return np.array([[-c[0] / c[1]]])\n    n = len(c) - 1\n    mat = np.zeros((n, n), dtype=c.dtype)\n    bot = mat.reshape(-1)[n::n + 1]\n    bot[...] = 1\n    mat[:, -1] -= c[:-1] / c[-1]\n    return mat",
    "docstring": "Return the companion matrix of c. The companion matrix for power series cannot be made symmetric by scaling the basis, so this function differs from those for the orthogonal polynomials. Parameters ---------- c : array_like 1-D array of polynomial coefficients ordered from low to high degree. Returns ------- mat : ndarray Companion matrix of dimensions (deg, deg). Examples -------- >>> from numpy.polynomial import polynomial as P >>> c = (1, 2, 3) >>> P.polycompanion(c) array([[ 0. , -0.33333333], [ 1. , -0.66666667]])",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\polynomial.py",
    "ast_data": "FunctionDef name:polycompanion arg:c arguments arg Assign Call If Compare Call Raise Call If Compare Call Return return:yes Call Assign Call Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "ExtractIsoWeekDay",
    "source_code": "class ExtractIsoWeekDay(Extract):\n    lookup_name = 'iso_week_day'",
    "docstring": "Return Monday=1 through Sunday=7, based on ISO-8601.",
    "type": "class",
    "file_path": "django\\django\\db\\models\\functions\\datetime.py",
    "ast_data": "ClassDef name:ExtractIsoWeekDay Assign"
  },
  {
    "library": "tensorflow",
    "name": "_add_sparse_key",
    "source_code": "def _add_sparse_key(self, key, dtype):\n    if key in self.sparse_keys:\n        original_dtype = self.sparse_types[self.sparse_keys.index(key)]\n        if original_dtype != dtype:\n            raise ValueError(f'Conflicting type {original_dtype} vs {dtype} for feature {key}.')\n    else:\n        self.sparse_keys.append(key)\n        self.sparse_types.append(dtype)",
    "docstring": "Adds a sparse key & dtype, checking for duplicates.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parsing_config.py",
    "ast_data": "FunctionDef name:_add_sparse_key arg:self arg:key arg:dtype arguments arg arg arg If Compare Assign Call If Compare Raise Call Call Call"
  },
  {
    "library": "cherrypy",
    "name": "key_for",
    "source_code": "def key_for(self, obj):\n    for key, val in self.items():\n        if val is obj:\n            return key\n    raise ValueError('The given object could not be found: %r' % obj)",
    "docstring": "For the given value, return its corresponding key.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\win32.py",
    "ast_data": "FunctionDef name:key_for arg:self arg:obj arguments arg arg For Call If Compare Return return:yes Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_RequiredParameter",
    "source_code": "class _RequiredParameter:\n\n    def __repr__(self) -> str:\n        return '<required parameter>'",
    "docstring": "Singleton class representing a required parameter for an Optimizer.",
    "type": "class",
    "file_path": "pytorch\\torch\\optim\\optimizer.py",
    "ast_data": "ClassDef name:_RequiredParameter FunctionDef name:__repr__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "rms_norm",
    "source_code": "def rms_norm(input: Tensor, normalized_shape: list[int], weight: Optional[Tensor]=None, eps: Optional[float]=None) -> Tensor:\n    if has_torch_function_variadic(input, weight):\n        return handle_torch_function(rms_norm, (input, weight), input, normalized_shape, weight=weight, eps=eps)\n    return torch.rms_norm(input, normalized_shape, weight, eps)",
    "docstring": "Apply Root Mean Square Layer Normalization. See :class: for details.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:rms_norm arg:input arg:normalized_shape arg:weight arg:eps arguments arg arg arg arg If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_interpolation",
    "source_code": "def set_interpolation(self, s):\n    if s is not None and s not in ('nearest', 'bilinear'):\n        raise NotImplementedError('Only nearest neighbor and bilinear interpolations are supported')\n    super().set_interpolation(s)",
    "docstring": "Parameters ---------- s : {'nearest', 'bilinear'} or None If None, use :rc:.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\image.py",
    "ast_data": "FunctionDef name:set_interpolation arg:self arg:s arguments arg arg If BoolOp Compare Compare Raise Call Call Call"
  },
  {
    "library": "scipy",
    "name": "vstack",
    "source_code": "def vstack(blocks, format=None, dtype=None):\n    blocks = np.asarray(blocks, dtype='object')\n    if any((isinstance(b, sparray) for b in blocks.flat)):\n        return _block([[b] for b in blocks], format, dtype)\n    else:\n        return _block([[b] for b in blocks], format, dtype, return_spmatrix=True)",
    "docstring": "Stack sparse arrays vertically (row wise) Parameters ---------- blocks sequence of sparse arrays with compatible shapes format : str, optional sparse format of the result (e.g., \"csr\") by default an appropriate sparse array format is returned. This choice is subject to change. dtype : dtype, optional The data-type of the output array. If not given, the dtype is determined from that of . Returns ------- new_array : sparse matrix or array If any block in blocks is a sparse array, return a sparse array. Otherwise return a sparse matrix. If you want a sparse array built from blocks that are not sparse arrays, use ``. See Also -------- hstack : stack sparse matrices horizontally (column wise) Examples -------- >>> from scipy.sparse import coo_array, vstack >>> A = coo_array([[1, 2], [3, 4]]) >>> B = coo_array([[5, 6]]) >>> vstack([A, B]).toarray() array([[1, 2], [3, 4], [5, 6]])",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\_construct.py",
    "ast_data": "FunctionDef name:vstack arg:blocks arg:format arg:dtype arguments arg arg arg Assign Call If Call Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_run_id",
    "source_code": "def get_run_id(self) -> str:\n    return self._settings.run_id",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\dynamic_rendezvous.py",
    "ast_data": "FunctionDef name:get_run_id arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "load_state_dict",
    "source_code": "def load_state_dict(self, state_dict: dict[str, Any]) -> None:\n    state = state_dict['state']\n    data_groups, defaults = (state_dict['data_groups'], state_dict['defaults'])\n    self.__set_state__({'state': state, 'data_groups': data_groups, 'defaults': defaults})",
    "docstring": "The load_state_dict() restores the state of the sparsifier based on the state_dict Args: * state_dict - the dictionary that to which the current sparsifier needs to be restored to",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\activation_sparsifier\\activation_sparsifier.py",
    "ast_data": "FunctionDef name:load_state_dict arg:self arg:state_dict arguments arg arg Assign Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_call_if_callable",
    "source_code": "def _call_if_callable(self, param):\n    return param() if callable(param) else param",
    "docstring": "Call the function if param is callable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py",
    "ast_data": "FunctionDef name:_call_if_callable arg:self arg:param arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "cherrypy",
    "name": "description",
    "source_code": "@property\ndef description(self):\n    if self.bind_addr is None:\n        on_what = 'unknown interface (dynamic?)'\n    elif isinstance(self.bind_addr, tuple):\n        on_what = self._get_base()\n    else:\n        on_what = 'socket file: %s' % self.bind_addr\n    return on_what",
    "docstring": "A description about where this server is bound.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\servers.py",
    "ast_data": "FunctionDef name:description arg:self arguments arg If Compare Assign If Call Assign Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_cpp_triangulation",
    "source_code": "def get_cpp_triangulation(self):\n    from matplotlib import _tri\n    if self._cpp_triangulation is None:\n        self._cpp_triangulation = _tri.Triangulation(self.x, self.y, self.triangles, self.mask if self.mask is not None else (), self._edges if self._edges is not None else (), self._neighbors if self._neighbors is not None else (), not self.is_delaunay)\n    return self._cpp_triangulation",
    "docstring": "Return the underlying C++ Triangulation object, creating it if necessary.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triangulation.py",
    "ast_data": "FunctionDef name:get_cpp_triangulation arg:self arguments arg If Compare Assign Call Compare Compare Compare Return return:yes"
  },
  {
    "library": "numpy",
    "name": "check_inline",
    "source_code": "def check_inline(cmd):\n    cmd._check_compiler()\n    body = textwrap.dedent('\\n        #ifndef __cplusplus\\n        static %(inline)s int static_func (void)\\n        {\\n            return 0;\\n        }\\n        %(inline)s int nostatic_func (void)\\n        {\\n            return 0;\\n        }\\n        #endif')\n    for kw in ['inline', '__inline__', '__inline']:\n        st = cmd.try_compile(body % {'inline': kw}, None, None)\n        if st:\n            return kw\n    return ''",
    "docstring": "Return the inline identifier (may be empty).",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\command\\autodist.py",
    "ast_data": "FunctionDef name:check_inline arg:cmd arguments arg Call Assign Call For Assign Call If Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "loaded_partition_graphs",
    "source_code": "def loaded_partition_graphs(self):\n    return bool(self._debug_graphs)",
    "docstring": "Test whether partition graphs have been loaded.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:loaded_partition_graphs arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "fetch_inventory",
    "source_code": "def fetch_inventory(app: Sphinx, uri: InventoryURI, inv: str) -> Inventory:\n    return _fetch_inventory(target_uri=uri, inv_location=inv, config=_InvConfig.from_config(app.config), srcdir=app.srcdir).data",
    "docstring": "Fetch, parse and return an intersphinx inventory file.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\ext\\intersphinx\\_load.py",
    "ast_data": "FunctionDef name:fetch_inventory arg:app arg:uri arg:inv arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "internal_convert_to_tensor_or_indexed_slices",
    "source_code": "def internal_convert_to_tensor_or_indexed_slices(value, dtype=None, name=None, as_ref=False):\n    if isinstance(value, ops.EagerTensor) and (not context.executing_eagerly()):\n        return ops.convert_to_tensor(value, dtype=dtype, name=name, as_ref=as_ref)\n    elif isinstance(value, internal.NativeObject):\n        if dtype and (not dtypes.as_dtype(dtype).is_compatible_with(value.dtype)):\n            raise ValueError(f'Incompatible tensor conversion requested to `dtype` {dtypes.as_dtype(dtype).name} for `value` ({value}) with dtype {value.dtype.name}.')\n        return value\n    else:\n        return ops.convert_to_tensor(value, dtype=dtype, name=name, as_ref=as_ref)",
    "docstring": "Converts the given object to a or an . If is an or it is returned unmodified. Otherwise, it is converted to a using . Args: value: An , , or an object that can be consumed by . dtype: (Optional.) The required of the returned or . name: (Optional.) A name to use if a new is created. as_ref: True if the caller wants the results as ref tensors. Returns: A , , or based on . Raises: ValueError: If does not match the element type of .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\indexed_slices.py",
    "ast_data": "FunctionDef name:internal_convert_to_tensor_or_indexed_slices arg:value arg:dtype arg:name arg:as_ref arguments arg arg arg arg If BoolOp Call Call Return return:yes Call If Call If BoolOp Call Call Raise Call Call Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_add_deprecated_arg_value_notice_to_docstring",
    "source_code": "def _add_deprecated_arg_value_notice_to_docstring(doc, date, instructions, deprecated_name_value_dict):\n    deprecation_string = ', '.join(('%s=%r' % (key, value) for key, value in sorted(deprecated_name_value_dict.items())))\n    when = 'in a future version' if date is None else 'after %s' % date\n    return decorator_utils.add_notice_to_docstring(doc, instructions, 'DEPRECATED FUNCTION ARGUMENT VALUES', '(deprecated argument values)', ['SOME ARGUMENT VALUES ARE DEPRECATED: `(%s)`. They will be removed %s.' % (deprecation_string, when), 'Instructions for updating:'], notice_type='Deprecated')",
    "docstring": "Adds a deprecation notice to a docstring for deprecated arguments.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\deprecation.py",
    "ast_data": "FunctionDef name:_add_deprecated_arg_value_notice_to_docstring arg:doc arg:date arg:instructions arg:deprecated_name_value_dict arguments arg arg arg arg Assign Call Call Call Assign Compare Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "get_tree",
    "source_code": "def get_tree(base, exclude, coverage=the_coverage):\n    tree = {}\n    runs = coverage.data.executed_files()\n    for path in runs:\n        if not _skip_file(path, exclude) and (not os.path.isdir(path)):\n            _graft(path, tree)\n    return tree",
    "docstring": "Return covered module names as a nested dict.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\covercp.py",
    "ast_data": "FunctionDef name:get_tree arg:base arg:exclude arg:coverage arguments arg arg arg Assign Assign Call For If BoolOp Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "TemplateDoesNotExist",
    "source_code": "class TemplateDoesNotExist(Exception):\n\n    def __init__(self, msg, tried=None, backend=None, chain=None):\n        self.backend = backend\n        if tried is None:\n            tried = []\n        self.tried = tried\n        if chain is None:\n            chain = []\n        self.chain = chain\n        super().__init__(msg)",
    "docstring": "The exception used when a template does not exist. Optional arguments: backend The template backend class used when raising this exception. tried A list of sources that were tried when finding the template. This is formatted as a list of tuples containing (origin, status), where origin is an Origin object or duck type and status is a string with the reason the template wasn't found. chain A list of intermediate TemplateDoesNotExist exceptions. This is used to encapsulate multiple exceptions when loading templates from multiple engines.",
    "type": "class",
    "file_path": "django\\django\\template\\exceptions.py",
    "ast_data": "ClassDef name:TemplateDoesNotExist FunctionDef name:__init__ arg:self arg:msg arg:tried arg:backend arg:chain arguments arg arg arg arg arg Assign If Compare Assign Assign If Compare Assign Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_gather_to",
    "source_code": "def _gather_to(self, value, destinations, axis, options=None):\n    _require_cross_replica_or_default_context_extended(self)\n    assert not isinstance(destinations, (list, tuple))\n    if options is None:\n        options = collective_util.Options()\n    return self._gather_to_implementation(value, destinations, axis, options)",
    "docstring": "Gather across replicas along axis-th dimension to . gathers or -like object, along -th dimension. It supports only dense tensors but NOT sparse tensor. This API can only be called in cross-replica context. Args: value: a , or a like object. destinations: a , a , a alike object, or a device string. It specifies the devices to reduce to. To perform an all-gather, pass the same to and . Note that if it's a , the value is reduced to the devices of that variable, and this method doesn't update the variable. axis: 0-D int32 Tensor. Dimension along which to gather. Must be in the range [0, rank(value)). options: a . Options to perform collective operations. This overrides the default options if the takes one in the constructor. See for details of the options. Returns: A tensor or value gathered to .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:_gather_to arg:self arg:value arg:destinations arg:axis arg:options arguments arg arg arg arg arg Call Call If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "r",
    "source_code": "@property\ndef r(self) -> So2:\n    return self._rotation",
    "docstring": "Return the underlying .",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\se2.py",
    "ast_data": "FunctionDef name:r arg:self arguments arg Return return:yes"
  },
  {
    "library": "authlib",
    "name": "fetch_access_token",
    "source_code": "async def fetch_access_token(self, redirect_uri=None, **kwargs):\n    metadata = await self.load_server_metadata()\n    token_endpoint = self.access_token_url or metadata.get('token_endpoint')\n    async with self._get_oauth_client(**metadata) as client:\n        if redirect_uri is not None:\n            client.redirect_uri = redirect_uri\n        params = {}\n        if self.access_token_params:\n            params.update(self.access_token_params)\n        params.update(kwargs)\n        token = await client.fetch_token(token_endpoint, **params)\n    return token",
    "docstring": "Fetch access token in the final step. :param redirect_uri: Callback or Redirect URI that is used in previous :meth:. :param kwargs: Extra parameters to fetch access token. :return: A token dict.",
    "type": "method",
    "file_path": "authlib\\authlib\\integrations\\base_client\\async_app.py",
    "ast_data": "AsyncFunctionDef name:fetch_access_token arg:self arg:redirect_uri arguments arg arg arg Assign Call Assign BoolOp Call Call If Compare Assign Assign If Call Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "configs",
    "source_code": "@property\ndef configs(self) -> list[BackendPatternConfig]:\n    return list(self._pattern_complex_format_to_config.values())",
    "docstring": "Return a copy of the list of configs set in this .",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\backend_config.py",
    "ast_data": "FunctionDef name:configs arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "Server",
    "source_code": "@tf_export('distribute.experimental.rpc.Server', v1=[])\nclass Server(object):\n\n    @staticmethod\n    def create(rpc_layer, address):\n        if rpc_layer != 'grpc':\n            raise ValueError('Only GRPC backend is supported at the moment.')\n        return GrpcServer(address=address)\n\n    def register(self, method_name: str, func: Union[def_function.Function, tf_function.ConcreteFunction]):\n        raise NotImplementedError('Please use create_server method to create aconcrete subclass of Server.')\n\n    def start(self):\n        raise NotImplementedError('Please use create_server method to create aconcrete subclass of Server.')",
    "docstring": "A Server base class for accepting RPCs for registered tf.functions. Functions can be registered on the server and are exposed via RPCs.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\experimental\\rpc\\rpc_ops.py",
    "ast_data": "ClassDef name:Server FunctionDef name:create arg:rpc_layer arg:address arguments arg arg If Compare Raise Call Return return:yes Call FunctionDef name:register arg:self arg:method_name arg:func arguments arg arg arg Raise Call FunctionDef name:start arg:self arguments arg Raise Call Call"
  },
  {
    "library": "pandas",
    "name": "get_resampler",
    "source_code": "def get_resampler(obj: Series | DataFrame, **kwds) -> Resampler:\n    tg = TimeGrouper(obj, **kwds)\n    return tg._get_resampler(obj)",
    "docstring": "Create a TimeGrouper and return our resampler.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\resample.py",
    "ast_data": "FunctionDef name:get_resampler arg:obj arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_cache_dispatch",
    "source_code": "def _cache_dispatch(self, request, target):\n    if target is not None:\n        if len(self._dispatch_cache) > _MAX_DISPATCH_CACHE:\n            self._dispatch_cache.popitem(last=False)\n        self._dispatch_cache[request] = target",
    "docstring": "Caches the dispatch lookup result for a target.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\type_dispatch.py",
    "ast_data": "FunctionDef name:_cache_dispatch arg:self arg:request arg:target arguments arg arg arg If Compare If Compare Call Call Assign"
  },
  {
    "library": "pygame",
    "name": "sprites",
    "source_code": "def sprites(self):\n    return list(self.spritedict)",
    "docstring": "get a list of sprites in the group Group.sprites(): return list Returns an object that can be looped over with a 'for' loop. (For now, it is always a list, but this could change in a future version of pygame.) Alternatively, you can get the same information by iterating directly over the sprite group, e.g. 'for sprite in group'.",
    "type": "method",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:sprites arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "getmro",
    "source_code": "def getmro(cls):\n    return _inspect.getmro(cls)",
    "docstring": "TFDecorator-aware replacement for inspect.getmro.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\tf_inspect.py",
    "ast_data": "FunctionDef name:getmro arg:cls arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "add_state",
    "source_code": "def add_state(self, state):\n    self._validate_state(state)\n    self._state.add(state)",
    "docstring": "Add a state to define the widget's behavior. See the parameters for details. Parameters ---------- state : str Must be a supported state of the selector. See the parameters for details. Raises ------ ValueError When the state is not supported by the selector.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:add_state arg:self arg:state arguments arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "with_stdout_output",
    "source_code": "def with_stdout_output(self):\n    self._options['output'] = 'stdout'\n    return self",
    "docstring": "Print the result to stdout.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\option_builder.py",
    "ast_data": "FunctionDef name:with_stdout_output arg:self arguments arg Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "changes",
    "source_code": "def changes(self, graph, trim_to_apps=None, convert_apps=None, migration_name=None):\n    changes = self._detect_changes(convert_apps, graph)\n    changes = self.arrange_for_graph(changes, graph, migration_name)\n    if trim_to_apps:\n        changes = self._trim_to_apps(changes, trim_to_apps)\n    return changes",
    "docstring": "Main entry point to produce a list of applicable changes. Take a graph to base names on and an optional set of apps to try and restrict to (restriction is not guaranteed)",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\autodetector.py",
    "ast_data": "FunctionDef name:changes arg:self arg:graph arg:trim_to_apps arg:convert_apps arg:migration_name arguments arg arg arg arg arg Assign Call Assign Call If Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "register_torch_dispatch",
    "source_code": "def register_torch_dispatch(self, torch_dispatch_class: Any, fn: Optional[Callable]=None, /) -> Callable:\n\n    def register(fn):\n        if torch_dispatch_class not in self._torch_dispatch_fns:\n\n            def inner(*args, **kwargs):\n                return self._torch_dispatch_fns[torch_dispatch_class](*args, **kwargs)\n            self._lib._register_torch_dispatch_rule(self._name, torch_dispatch_class, inner)\n        self._torch_dispatch_fns[torch_dispatch_class] = fn\n        return fn\n    if fn is None:\n        return register\n    else:\n        return register(fn)",
    "docstring": "Registers a torch_dispatch rule for the given operator and `torch.library.register_torch_dispatch` for examples and more details.",
    "type": "method",
    "file_path": "pytorch\\torch\\_library\\custom_ops.py",
    "ast_data": "FunctionDef name:register_torch_dispatch arguments arg arg arg FunctionDef name:register arg:fn arguments arg If Compare FunctionDef name:inner arguments arg arg Return return:yes Call Call Assign Return return:yes If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "output",
    "source_code": "def output(self, u, t, x0=None):\n    return dlsim(self, u, t, x0=x0)",
    "docstring": "Return the response of the discrete-time system to input . See for details.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:output arg:self arg:u arg:t arg:x0 arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_auto_wrap_is_configured",
    "source_code": "def _auto_wrap_is_configured(estimator):\n    auto_wrap_output_keys = getattr(estimator, '_sklearn_auto_wrap_output_keys', set())\n    return hasattr(estimator, 'get_feature_names_out') and 'transform' in auto_wrap_output_keys",
    "docstring": "Return True if estimator is configured for auto-wrapping the transform method. sets to if auto wrapping is manually disabled.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_set_output.py",
    "ast_data": "FunctionDef name:_auto_wrap_is_configured arg:estimator arguments arg Assign Call Call Return return:yes BoolOp Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "_stop",
    "source_code": "def _stop(self) -> None:\n    self._server.stop()",
    "docstring": "Stops the server. Raises: tf.errors.OpError: Or one of its subclasses if an error occurs while stopping the server.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\service\\server_lib.py",
    "ast_data": "FunctionDef name:_stop arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "ismethod",
    "source_code": "def ismethod(object):\n    return _inspect.ismethod(tf_decorator.unwrap(object)[1])",
    "docstring": "TFDecorator-aware replacement for inspect.ismethod.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_inspect.py",
    "ast_data": "FunctionDef name:ismethod arg:object arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "add_metadata_json",
    "source_code": "def add_metadata_json(self, key: str, value: str):\n    torch.autograd._add_metadata_json(key, value)",
    "docstring": "Adds a user defined metadata with a string key and a valid json value into the trace file",
    "type": "method",
    "file_path": "pytorch\\torch\\profiler\\profiler.py",
    "ast_data": "FunctionDef name:add_metadata_json arg:self arg:key arg:value arguments arg arg arg Call"
  },
  {
    "library": "scipy",
    "name": "_random",
    "source_code": "def _random(self, n: IntNumber=1, *, workers: IntNumber=1) -> np.ndarray:\n    workers = _validate_workers(workers)\n    sample = [van_der_corput(n, bdim, start_index=self.num_generated, scramble=self.scramble, permutations=self._permutations[i], workers=workers) for i, bdim in enumerate(self.base)]\n    return np.array(sample).T.reshape(n, self.d)",
    "docstring": "Draw in the half-open interval `n10^3`. Returns ------- sample : array_like (n, d) QMC sample.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_qmc.py",
    "ast_data": "FunctionDef name:_random arg:self arg:n arguments arg arg arg Assign Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "send",
    "source_code": "@_exception_logger\ndef send(tensor: torch.Tensor, dst: Optional[int]=None, group: Optional[ProcessGroup]=None, tag: int=0, group_dst: Optional[int]=None) -> None:\n    group = _group_or_default_group(group)\n    group_dst = _canonicalize_group_rank(group, dst, group_dst)\n    _check_not_self_rank(group, group_dst, 'destination')\n    work = isend(tensor, group=group, tag=tag, group_dst=group_dst)\n    if work is not None:\n        work.wait()",
    "docstring": "Send a tensor synchronously. .. warning:: ``.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:send arg:tensor arg:dst arg:group arg:tag arg:group_dst arguments arg arg arg arg arg Assign Call Assign Call Call Assign Call If Compare Call"
  },
  {
    "library": "matplotlib",
    "name": "_copy_tick_props",
    "source_code": "def _copy_tick_props(self, src, dest):\n    if src is None or dest is None:\n        return\n    dest.label1.update_from(src.label1)\n    dest.label2.update_from(src.label2)\n    dest.tick1line.update_from(src.tick1line)\n    dest.tick2line.update_from(src.tick2line)\n    dest.gridline.update_from(src.gridline)\n    dest.update_from(src)\n    dest._loc = src._loc\n    dest._size = src._size\n    dest._width = src._width\n    dest._base_pad = src._base_pad\n    dest._labelrotation = src._labelrotation\n    dest._zorder = src._zorder\n    dest._tickdir = src._tickdir",
    "docstring": "Copy the properties from *src* tick to *dest* tick.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:_copy_tick_props arg:self arg:src arg:dest arguments arg arg arg If BoolOp Compare Compare Return return:no Call Call Call Call Call Call Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "to_proto",
    "source_code": "def to_proto(self, export_scope=None):\n    if export_scope is None or self.queue.name.startswith(export_scope):\n        queue_runner_def = queue_runner_pb2.QueueRunnerDef()\n        queue_runner_def.queue_name = ops.strip_name_scope(self.queue.name, export_scope)\n        for enqueue_op in self.enqueue_ops:\n            queue_runner_def.enqueue_op_name.append(ops.strip_name_scope(enqueue_op.name, export_scope))\n        queue_runner_def.close_op_name = ops.strip_name_scope(self.close_op.name, export_scope)\n        queue_runner_def.cancel_op_name = ops.strip_name_scope(self.cancel_op.name, export_scope)\n        queue_runner_def.queue_closed_exception_types.extend([errors.error_code_from_exception_type(cls) for cls in self._queue_closed_exception_types])\n        return queue_runner_def\n    else:\n        return None",
    "docstring": "Converts this to a protocol buffer. Args: export_scope: Optional . Name scope to remove. Returns: A protocol buffer, or if the is not in the specified name scope.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\queue_runner_impl.py",
    "ast_data": "FunctionDef name:to_proto arg:self arg:export_scope arguments arg arg If BoolOp Compare Call Assign Call Assign Call For Call Call Assign Call Assign Call Call Call Return return:yes Return return:no"
  },
  {
    "library": "pytorch",
    "name": "ReLU6",
    "source_code": "class ReLU6(torch.nn.ReLU):\n\n    def __init__(self, inplace=False):\n        super().__init__(inplace)\n        self.inplace = inplace\n\n    def forward(self, input):\n        return torch.ops.quantized.relu6(input, self.inplace)\n\n    def _get_name(self):\n        return 'QuantizedReLU6'\n\n    @staticmethod\n    def from_float(mod, use_precomputed_fake_quant=False):\n        return ReLU6(mod.inplace)",
    "docstring": "Applies the element-wise function: :math:, where :math: is the zero_point, and :math: is the quantized representation of number 6. Args: inplace: can optionally do the operation in-place. Default: `(N, *)*(N, *)`, same shape as the input .. image:: ../scripts/activation_images/ReLU6.png Examples:: >>> m = nn.quantized.ReLU6() >>> input = torch.randn(2) >>> # xdoctest: +SKIP >>> input = torch.quantize_per_tensor(input, 1.0, 0, dtype=torch.qint32) >>> output = m(input)",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\modules\\activation.py",
    "ast_data": "ClassDef name:ReLU6 FunctionDef name:__init__ arg:self arg:inplace arguments arg arg Call Call Assign FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:_get_name arg:self arguments arg Return return:yes FunctionDef name:from_float arg:mod arg:use_precomputed_fake_quant arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_xheight",
    "source_code": "def get_xheight(self):\n    return self._header[b'XHeight']",
    "docstring": "Return the xheight as float.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_afm.py",
    "ast_data": "FunctionDef name:get_xheight arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_get_empty_reduction_result",
    "source_code": "def _get_empty_reduction_result(shape: Shape, axis: AxisInt) -> np.ndarray:\n    shp = np.array(shape)\n    dims = np.arange(len(shape))\n    ret = np.empty(shp[dims != axis], dtype=np.float64)\n    ret.fill(np.nan)\n    return ret",
    "docstring": "The result from a reduction on an empty ndarray. Parameters ---------- shape : Tuple[int, ...] axis : int Returns ------- np.ndarray",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\nanops.py",
    "ast_data": "FunctionDef name:_get_empty_reduction_result arg:shape arg:axis arguments arg arg Assign Call Assign Call Call Assign Call Compare Call Return return:yes"
  },
  {
    "library": "virtualenv",
    "name": "session_via_cli",
    "source_code": "def session_via_cli(args, options=None, setup_logging=True, env=None):\n    env = os.environ if env is None else env\n    parser, elements = build_parser(args, options, setup_logging, env)\n    options = parser.parse_args(args)\n    options.py_version = parser._interpreter.version_info\n    creator, seeder, activators = tuple((e.create(options) for e in elements))\n    return Session(options.verbosity, options.app_data, parser._interpreter, creator, seeder, activators)",
    "docstring": "Create a virtualenv session (same as cli_run, but this does not perform the creation). Use this if you just want to query what the virtual environment would look like, but not actually create it. :param args: the command line arguments :param options: passing in a `` to use handlers already registered :param env: environment variables to use :return: the session object of the creation (its structure for now is experimental and might change on short notice)",
    "type": "function",
    "file_path": "virtualenv\\src\\virtualenv\\run\\__init__.py",
    "ast_data": "FunctionDef name:session_via_cli arg:args arg:options arg:setup_logging arg:env arguments arg arg arg arg Assign Compare Assign Call Assign Call Assign Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "state_dict",
    "source_code": "def state_dict(self) -> dict[str, Any]:\n    ...",
    "docstring": "Objects should return their state_dict representation as a dictionary. The output of this function will be checkpointed, and later restored in . .. warning:: Because of the inplace nature of restoring a checkpoint, this function is also called during . Returns: Dict: The objects state dict",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\stateful.py",
    "ast_data": "FunctionDef name:state_dict arg:self arguments arg"
  },
  {
    "library": "django",
    "name": "explain",
    "source_code": "def explain(self, *, format=None, **options):\n    return self.query.explain(using=self.db, format=format, **options)",
    "docstring": "Runs an EXPLAIN on the SQL query this QuerySet would perform, and returns the results.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:explain arg:self arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "prepare_aot",
    "source_code": "def prepare_aot(aot: list[str], srcs_dir: str) -> None:\n    for file in aot:\n        if 'external/local_tsl/' in file:\n            copy_file(file, srcs_dir, 'external/local_tsl/')\n        elif 'external/local_xla/' in file:\n            copy_file(file, srcs_dir, 'external/local_xla/')\n        else:\n            copy_file(file, srcs_dir)\n    shutil.move(os.path.join(srcs_dir, 'tensorflow/tools/pip_package/xla_build/CMakeLists.txt'), os.path.join(srcs_dir, 'CMakeLists.txt'))",
    "docstring": "Rearrange xla_aot files in target the target directory. Args: aot: a list of paths to files that should be in xla_aot directory. srcs_dir: target directory where files are copied to.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\pip_package\\build_pip_package.py",
    "ast_data": "FunctionDef name:prepare_aot arg:aot arg:srcs_dir arguments arg arg For If Compare Call If Compare Call Call Call Call Call"
  },
  {
    "library": "django",
    "name": "ExtractWeekDay",
    "source_code": "class ExtractWeekDay(Extract):\n    lookup_name = 'week_day'",
    "docstring": "Return Sunday=1 through Saturday=7. To replicate this in Python: (mydatetime.isoweekday() % 7) + 1",
    "type": "class",
    "file_path": "django\\django\\db\\models\\functions\\datetime.py",
    "ast_data": "ClassDef name:ExtractWeekDay Assign"
  },
  {
    "library": "tensorflow",
    "name": "predict_generator",
    "source_code": "def predict_generator(self, generator, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False, verbose=0):\n    warnings.warn('`Model.predict_generator` is deprecated and will be removed in a future version. Please use `Model.predict`, which supports generators.')\n    return self.predict(generator, steps=steps, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, verbose=verbose, callbacks=callbacks)",
    "docstring": "Generates predictions for the input samples from a data generator. DEPRECATED: now supports generators, so there is no longer any need to use this endpoint.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py",
    "ast_data": "FunctionDef name:predict_generator arg:self arg:generator arg:steps arg:callbacks arg:max_queue_size arg:workers arg:use_multiprocessing arg:verbose arguments arg arg arg arg arg arg arg arg Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "_get_tls_cacert",
    "source_code": "def _get_tls_cacert(url: str, certs: str | dict[str, str] | None) -> str | bool:\n    if not certs:\n        return True\n    elif isinstance(certs, str | tuple):\n        return certs\n    else:\n        hostname = urlsplit(url).netloc\n        if '@' in hostname:\n            _, hostname = hostname.split('@', 1)\n        return certs.get(hostname, True)",
    "docstring": "Get additional CA cert for a specific URL.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\requests.py",
    "ast_data": "FunctionDef name:_get_tls_cacert arg:url arg:certs arguments arg arg If Return return:yes If Call Return return:yes Assign Call If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_get_slice_axis",
    "source_code": "def _get_slice_axis(self, slice_obj: slice, axis: AxisInt):\n    obj = self.obj\n    if not need_slice(slice_obj):\n        return obj.copy(deep=False)\n    labels = obj._get_axis(axis)\n    indexer = labels.slice_indexer(slice_obj.start, slice_obj.stop, slice_obj.step)\n    if isinstance(indexer, slice):\n        return self.obj._slice(indexer, axis=axis)\n    else:\n        return self.obj.take(indexer, axis=axis)",
    "docstring": "This is pretty simple as we just have to deal with labels.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexing.py",
    "ast_data": "FunctionDef name:_get_slice_axis arg:self arg:slice_obj arg:axis arguments arg arg arg Assign If Call Return return:yes Call Assign Call Assign Call If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "Linear",
    "source_code": "class Linear(torch.ao.nn.qat.Linear):\n\n    def __init__(self, in_features: int, out_features: int, bias: bool=True, qconfig: Optional['QConfig']=None, device: Optional[Union[int, str, torch.device]]=None, dtype: Optional[str]=None) -> None:\n        super().__init__(in_features, out_features, bias, qconfig, device, dtype)\n        if not torch.ao.quantization.qconfig._activation_is_memoryless(qconfig):\n            raise ValueError('Dynamic QAT requires a memoryless observer.' + 'This means a MovingAverage observer with averaging constant equal to 1')",
    "docstring": "A linear module attached with FakeQuantize modules for weight, used for dynamic quantization aware training. We adopt the same interface as , please see for documentation. Similar to , with FakeQuantize modules initialized to default.",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\nn\\qat\\dynamic\\modules\\linear.py",
    "ast_data": "ClassDef name:Linear FunctionDef name:__init__ arg:self arg:in_features arg:out_features arg:bias arg:qconfig arg:device arg:dtype arguments arg arg arg arg arg arg arg Call Call If Call Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "_encode_relation",
    "source_code": "def _encode_relation(self, name):\n    for char in ' %{},':\n        if char in name:\n            name = '\"%s\"' % name\n            break\n    return '%s %s' % (_TK_RELATION, name)",
    "docstring": "(INTERNAL) Decodes a relation line. The relation declaration is a line with the format `` is a string. :param name: a string. :return: a string with the encoded relation declaration.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\externals\\_arff.py",
    "ast_data": "FunctionDef name:_encode_relation arg:self arg:name arguments arg arg For If Compare Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_get_w_centers_ranges",
    "source_code": "def _get_w_centers_ranges(self):\n    minx, maxx, miny, maxy, minz, maxz = self.get_w_lims()\n    cx = (maxx + minx) / 2\n    cy = (maxy + miny) / 2\n    cz = (maxz + minz) / 2\n    dx = maxx - minx\n    dy = maxy - miny\n    dz = maxz - minz\n    return (cx, cy, cz, dx, dy, dz)",
    "docstring": "Get 3D world centers and axis ranges.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:_get_w_centers_ranges arg:self arguments arg Assign Call Assign Assign Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "attrs",
    "source_code": "@property\ndef attrs(self) -> dict[Hashable, Any]:\n    return self._attrs",
    "docstring": "Dictionary of global attributes of this dataset. .. warning:: attrs is experimental and may change without warning. See Also -------- DataFrame.flags : Global flags applying to this object. Notes ----- Many operations that create new datasets will copy ``. Examples -------- For Series: >>> ser = pd.Series([1, 2, 3]) >>> ser.attrs = {\"A\": [10, 20, 30]} >>> ser.attrs {'A': [10, 20, 30]} For DataFrame: >>> df = pd.DataFrame({\"A\": [1, 2], \"B\": [3, 4]}) >>> df.attrs = {\"A\": [10, 20, 30]} >>> df.attrs {'A': [10, 20, 30]}",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:attrs arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_sparse_tensors",
    "source_code": "@abc.abstractmethod\ndef get_sparse_tensors(self, transformation_cache, state_manager):\n    pass",
    "docstring": "Returns an IdWeightPair. is a pair of s which represents ids and weights. is typically a x of . is either a of or to indicate all weights should be taken to be 1. If specified, must have exactly the same shape and indices as . Expected is same as parsing output of a which is a ragged matrix. Args: transformation_cache: A object to access features. state_manager: A to create / access resources such as lookup tables.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:get_sparse_tensors arg:self arg:transformation_cache arg:state_manager arguments arg arg arg"
  },
  {
    "library": "django",
    "name": "_check_fieldsets",
    "source_code": "def _check_fieldsets(self, obj):\n    if obj.fieldsets is None:\n        return []\n    elif not isinstance(obj.fieldsets, (list, tuple)):\n        return must_be('a list or tuple', option='fieldsets', obj=obj, id='admin.E007')\n    else:\n        seen_fields = []\n        return list(chain.from_iterable((self._check_fieldsets_item(obj, fieldset, 'fieldsets[%d]' % index, seen_fields) for index, fieldset in enumerate(obj.fieldsets))))",
    "docstring": "Check that fieldsets is properly formatted and doesn't contain duplicates.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\checks.py",
    "ast_data": "FunctionDef name:_check_fieldsets arg:self arg:obj arguments arg arg If Compare Return return:no If Call Return return:yes Call Assign Return return:yes Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "restore_region",
    "source_code": "def restore_region(self, region, bbox=None, xy=None):\n    if bbox is not None or xy is not None:\n        if bbox is None:\n            x1, y1, x2, y2 = region.get_extents()\n        elif isinstance(bbox, BboxBase):\n            x1, y1, x2, y2 = bbox.extents\n        else:\n            x1, y1, x2, y2 = bbox\n        if xy is None:\n            ox, oy = (x1, y1)\n        else:\n            ox, oy = xy\n        self._renderer.restore_region(region, int(x1), int(y1), int(x2), int(y2), int(ox), int(oy))\n    else:\n        self._renderer.restore_region(region)",
    "docstring": "Restore the saved region. If bbox (instance of BboxBase, or its extents) is given, only the region specified by the bbox will be restored. *xy* (a pair of floats) optionally specifies the new position (the LLC of the original region, not the LLC of the bbox) where the region will be restored. >>> region = renderer.copy_from_bbox() >>> x1, y1, x2, y2 = region.get_extents() >>> renderer.restore_region(region, bbox=(x1+dx, y1, x2, y2), ... xy=(x1-dx, y1))",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_agg.py",
    "ast_data": "FunctionDef name:restore_region arg:self arg:region arg:bbox arg:xy arguments arg arg arg arg If BoolOp Compare Compare If Compare Assign Call If Call Assign Assign If Compare Assign Assign Call Call Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "GenerateModelReport",
    "source_code": "def GenerateModelReport(metagraph, assume_valid_feeds=True, debug=False):\n    return tf_wrap.GenerateModelReport(metagraph.SerializeToString(), assume_valid_feeds, debug)",
    "docstring": "Report what's known statically about each node in the provided metagraph. Args: metagraph: A TensorFlow MetaGraphDef. assume_valid_feeds: If True, assume that the shape of the fed nodes is valid debug: Add some information useful for debugging. Returns: A string containing the report.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\grappler\\model_analyzer.py",
    "ast_data": "FunctionDef name:GenerateModelReport arg:metagraph arg:assume_valid_feeds arg:debug arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "authlib",
    "name": "get_headers",
    "source_code": "def get_headers(self):\n    headers = super().get_headers()\n    extras = []\n    if self.realm:\n        extras.append(f'realm=\"{self.realm}\"')\n    if self.extra_attributes:\n        extras.extend([f'{k}=\"{self.extra_attributes[k]}\"' for k in self.extra_attributes])\n    extras.append(f'error=\"{self.error}\"')\n    error_description = self.get_error_description()\n    extras.append(f'error_description=\"{error_description}\"')\n    headers.append(('WWW-Authenticate', 'Bearer ' + ', '.join(extras)))\n    return headers",
    "docstring": "If the protected resource request does not include authentication credentials or does not contain an access token that enables access to the protected resource, the resource server MUST include the HTTP \"WWW-Authenticate\" response header field; it MAY include it in response to other conditions as well.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6750\\errors.py",
    "ast_data": "FunctionDef name:get_headers arg:self arguments arg Assign Call Call Assign If Call If Call Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "RgbToYuv",
    "source_code": "class RgbToYuv(Module):\n    ONNX_DEFAULT_INPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n    ONNX_DEFAULT_OUTPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n\n    def forward(self, input: Tensor) -> Tensor:\n        return rgb_to_yuv(input)",
    "docstring": "Convert an image from RGB to YUV. The image data is assumed to be in the range of :math:. YUV formula follows M/PAL values (see _, Table 2, items 2.5 and 2.6). Returns: YUV version of the image. Shape: - image: :math: - output: :math: Examples: >>> input = torch.rand(2, 3, 4, 5) >>> yuv = RgbToYuv() >>> output = yuv(input) # 2x3x4x5 Reference:: [1]",
    "type": "class",
    "file_path": "kornia\\kornia\\color\\yuv.py",
    "ast_data": "ClassDef name:RgbToYuv FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "split_hostlist",
    "source_code": "def split_hostlist(hostlist):\n    in_brackets = False\n    cur_host = ''\n    for c in hostlist:\n        if in_brackets:\n            assert c != '['\n            if c == ']':\n                in_brackets = False\n        elif c == '[':\n            in_brackets = True\n        elif c == ',':\n            assert cur_host != ''\n            yield cur_host\n            cur_host = ''\n            continue\n        cur_host += c\n    if cur_host:\n        yield cur_host",
    "docstring": "Split hostlist at commas outside of range expressions ('[3-5]').",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\slurm_cluster_resolver.py",
    "ast_data": "FunctionDef name:split_hostlist arg:hostlist arguments arg Assign Assign For If Compare If Compare Assign If Compare Assign If Compare Compare Assign If"
  },
  {
    "library": "pytorch",
    "name": "vmap",
    "source_code": "@deprecated('Please use `torch.vmap` instead of `torch._vmap_internals.vmap`.', category=FutureWarning)\ndef vmap(func: Callable, in_dims: in_dims_t=0, out_dims: out_dims_t=0) -> Callable:\n    return _vmap(func, in_dims, out_dims)",
    "docstring": "Please use torch.vmap instead of this API.",
    "type": "function",
    "file_path": "pytorch\\torch\\_vmap_internals.py",
    "ast_data": "FunctionDef name:vmap arg:func arg:in_dims arg:out_dims arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "is_statically_launchable",
    "source_code": "def is_statically_launchable(self):\n    if not self.compile_results:\n        return False\n    return all((isinstance(x, StaticTritonCompileResult) for x in self.compile_results))",
    "docstring": "Checks if every compiled kernel is statically launchable, which allows us to efficiently cache it in FXGraphCache",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\triton_heuristics.py",
    "ast_data": "FunctionDef name:is_statically_launchable arg:self arguments arg If Return return:yes Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "binary_crossentropy",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef binary_crossentropy(target, output, from_logits=False):\n    target = tensor_conversion.convert_to_tensor_v2_with_dispatch(target)\n    output = tensor_conversion.convert_to_tensor_v2_with_dispatch(output)\n    if hasattr(output, '_keras_logits'):\n        output = output._keras_logits\n        if from_logits:\n            warnings.warn('\"`binary_crossentropy` received `from_logits=True`, but the `output` argument was produced by a sigmoid or softmax activation and thus does not represent logits. Was this intended?\"')\n        from_logits = True\n    if from_logits:\n        return nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output)\n    if (not isinstance(output, (ops.EagerTensor, variables_module.Variable)) and output.op.type == 'Sigmoid') and (not hasattr(output, '_keras_history')):\n        assert len(output.op.inputs) == 1\n        output = output.op.inputs[0]\n        return nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output)\n    epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype)\n    output = clip_ops.clip_by_value(output, epsilon_, 1.0 - epsilon_)\n    bce = target * math_ops.log(output + epsilon())\n    bce += (1 - target) * math_ops.log(1 - output + epsilon())\n    return -bce",
    "docstring": "Binary crossentropy between an output tensor and a target tensor. Args: target: A tensor with the same shape as . output: A tensor. from_logits: Whether is expected to be a logits tensor. By default, we consider that encodes a probability distribution. Returns: A tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:binary_crossentropy arg:target arg:output arg:from_logits arguments arg arg arg Assign Call Assign Call If Call Assign If Call Assign If Return return:yes Call If BoolOp BoolOp Call Compare Call Compare Call Assign Return return:yes Call Assign Call Call Assign Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "list",
    "source_code": "def list(self):\n    return self._registry.keys()",
    "docstring": "Lists registered items. Returns: A list of names of registered objects.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\registry.py",
    "ast_data": "FunctionDef name:list arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "DivideDelegateWithName",
    "source_code": "class DivideDelegateWithName:\n\n    def __init__(self, x, name):\n        self.x = x\n        self.name = name\n\n    def __truediv__(self, y):\n        return _truediv_python3(self.x, y, self.name)\n\n    def __floordiv__(self, y):\n        return floordiv(self.x, y, self.name)\n\n    def __div__(self, y):\n        return _div_python2(self.x, y, self.name)",
    "docstring": "Use Python2/Python3 division delegation to implement divide for tensors.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "ClassDef name:DivideDelegateWithName FunctionDef name:__init__ arg:self arg:x arg:name arguments arg arg arg Assign Assign FunctionDef name:__truediv__ arg:self arg:y arguments arg arg Return return:yes Call FunctionDef name:__floordiv__ arg:self arg:y arguments arg arg Return return:yes Call FunctionDef name:__div__ arg:self arg:y arguments arg arg Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "physical_lines_for_line",
    "source_code": "def physical_lines_for_line(self, line: list[Cell]) -> int:\n    physical_lines = 1\n    for cell in line:\n        physical_lines = max(physical_lines, len(cell.wrapped))\n    return physical_lines",
    "docstring": "For a given line, compute the number of physical lines it spans due to text wrapping.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\writers\\text.py",
    "ast_data": "FunctionDef name:physical_lines_for_line arg:self arg:line arguments arg arg Assign For Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "@compatibility(is_backward_compatible=True)\ndef __init__(self, owning_module: Optional['GraphModule']=None, tracer_cls: Optional[type['Tracer']]=None, tracer_extras: Optional[dict[str, Any]]=None):\n    self._root: Node = Node(self, '', 'root', '', (), {})\n    self._used_names: dict[str, int] = {}\n    self._insert = self._root.prepend\n    self._len = 0\n    self._graph_namespace = _Namespace()\n    self._owning_module = owning_module\n    self._tracer_cls = tracer_cls\n    self._tracer_extras = tracer_extras\n    self._codegen = CodeGen()\n    self._co_fields: dict[str, Any] = {}\n    self._find_nodes_lookup_table = _FindNodesLookupTable()",
    "docstring": "Construct an empty Graph.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\graph.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:owning_module arg:tracer_cls arg:tracer_extras arguments arg arg arg arg Call Assign Assign Assign Call Assign Assign Assign Assign Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "check_axis_name",
    "source_code": "@staticmethod\ndef check_axis_name(name: str) -> bool:\n    is_valid, _ = ParsedExpression.check_axis_name_return_reason(name)\n    return is_valid",
    "docstring": "Check if the name is a valid axis name. Args: name (str): the axis name to check Returns: bool: whether the axis name is valid",
    "type": "method",
    "file_path": "pytorch\\functorch\\einops\\_parsing.py",
    "ast_data": "FunctionDef name:check_axis_name arg:name arguments arg Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "func_accepts_kwargs",
    "source_code": "def func_accepts_kwargs(func):\n    return any((p for p in _get_callable_parameters(func) if p.kind == p.VAR_KEYWORD))",
    "docstring": "Return True if function 'func' accepts keyword arguments **kwargs.",
    "type": "function",
    "file_path": "django\\django\\utils\\inspect.py",
    "ast_data": "FunctionDef name:func_accepts_kwargs arg:func arguments arg Return return:yes Call Call Compare"
  },
  {
    "library": "matplotlib",
    "name": "set_patch_line",
    "source_code": "def set_patch_line(self):\n    self._patch_type = 'line'\n    self.stale = True",
    "docstring": "Set the spine to be linear.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\spines.py",
    "ast_data": "FunctionDef name:set_patch_line arg:self arguments arg Assign Assign"
  },
  {
    "library": "pandas",
    "name": "duplicated",
    "source_code": "def duplicated(self, keep: DropKeep='first') -> Series:\n    res = self._duplicated(keep=keep)\n    result = self._constructor(res, index=self.index, copy=False)\n    return result.__finalize__(self, method='duplicated')",
    "docstring": "Indicate duplicate Series values. Duplicated values are indicated as ``, all duplicates are True: >>> animals.duplicated(keep=False) 0 True 1 False 2 True 3 False 4 True dtype: bool",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:duplicated arg:self arg:keep arguments arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "validate_augmented_graph_view",
    "source_code": "def validate_augmented_graph_view(augmented_graph_view):\n    for name, dep in augmented_graph_view.list_children(augmented_graph_view.root):\n        if name == SIGNATURE_ATTRIBUTE_NAME:\n            if not isinstance(dep, _SignatureMap):\n                raise ValueError(f\"Exporting an object {augmented_graph_view.root} which has an attribute named '{SIGNATURE_ATTRIBUTE_NAME}'. This is a reserved attribute used to store SavedModel signatures in objects which come from `tf.saved_model.load`. Delete this attribute (e.g. `del obj.{SIGNATURE_ATTRIBUTE_NAME}`) before saving if this shadowing is acceptable.\")\n            break",
    "docstring": "Performs signature-related sanity checks on .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\signature_serialization.py",
    "ast_data": "FunctionDef name:validate_augmented_graph_view arg:augmented_graph_view arguments arg For Call If Compare If Call Raise Call"
  },
  {
    "library": "scipy",
    "name": "line_search_BFGS",
    "source_code": "def line_search_BFGS(f, xk, pk, gfk, old_fval, args=(), c1=0.0001, alpha0=1):\n    r = line_search_armijo(f, xk, pk, gfk, old_fval, args=args, c1=c1, alpha0=alpha0)\n    return (r[0], r[1], 0, r[2])",
    "docstring": "Compatibility wrapper for",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_linesearch.py",
    "ast_data": "FunctionDef name:line_search_BFGS arg:f arg:xk arg:pk arg:gfk arg:old_fval arg:args arg:c1 arg:alpha0 arguments arg arg arg arg arg arg arg arg Assign Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "walk_callable",
    "source_code": "def walk_callable(node: ast.AST) -> Iterable[ast.AST]:\n    todo: deque[ast.AST] = deque([node])\n    walked_func_def = False\n    while todo:\n        node = todo.popleft()\n        if isinstance(node, ast.FunctionDef):\n            if walked_func_def:\n                continue\n            walked_func_def = True\n        todo.extend(ast.iter_child_nodes(node))\n        yield node",
    "docstring": "Similar to ``, but walks only function body and skips nested functions defined within the node.",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\misc.py",
    "ast_data": "FunctionDef name:walk_callable arg:node arguments arg Call Assign While Assign Call If Call If Assign Call Call"
  },
  {
    "library": "scrapy",
    "name": "CrawlerRunner",
    "source_code": "class CrawlerRunner(CrawlerRunnerBase):\n\n    def __init__(self, settings: dict[str, Any] | Settings | None=None):\n        super().__init__(settings)\n        self._active: set[Deferred[None]] = set()\n\n    def crawl(self, crawler_or_spidercls: type[Spider] | str | Crawler, *args: Any, **kwargs: Any) -> Deferred[None]:\n        if isinstance(crawler_or_spidercls, Spider):\n            raise ValueError('The crawler_or_spidercls argument cannot be a spider object, it must be a spider class (or a Crawler object)')\n        crawler = self.create_crawler(crawler_or_spidercls)\n        return self._crawl(crawler, *args, **kwargs)\n\n    @inlineCallbacks\n    def _crawl(self, crawler: Crawler, *args: Any, **kwargs: Any) -> Generator[Deferred[Any], Any, None]:\n        self.crawlers.add(crawler)\n        d = crawler.crawl(*args, **kwargs)\n        self._active.add(d)\n        try:\n            yield d\n        finally:\n            self.crawlers.discard(crawler)\n            self._active.discard(d)\n            self.bootstrap_failed |= not getattr(crawler, 'spider', None)\n\n    def stop(self) -> Deferred[Any]:\n        return self._stop()\n\n    @inlineCallbacks\n    def join(self) -> Generator[Deferred[Any], Any, None]:\n        while self._active:\n            yield DeferredList(self._active)",
    "docstring": "This is a convenient helper class that keeps track of, manages and runs crawlers inside an already setup :mod:. The CrawlerRunner object must be instantiated with a :class: object. This class shouldn't be needed (since Scrapy is responsible of using it accordingly) unless writing scripts that manually handle the crawling process. See :ref: for an example. This class provides Deferred-based APIs. Use :class: for modern coroutine APIs.",
    "type": "class",
    "file_path": "scrapy\\scrapy\\crawler.py",
    "ast_data": "ClassDef name:CrawlerRunner FunctionDef name:__init__ arg:self arg:settings arguments arg arg Call Call Call FunctionDef name:crawl arg:self arg:crawler_or_spidercls arguments arg arg arg arg If Call Raise Call Assign Call Return return:yes Call FunctionDef name:_crawl arg:self arg:crawler arguments arg arg arg arg Call Assign Call Call Try Call Call Call FunctionDef name:stop arg:self arguments arg Return return:yes Call FunctionDef name:join arg:self arguments arg While Call"
  },
  {
    "library": "pytorch",
    "name": "add_global",
    "source_code": "def add_global(name_hint: str, obj: Any):\n    if _is_from_torch(obj) and obj != torch.device:\n        return _get_qualified_name(obj)\n    global_name = namespace.create_name(name_hint, obj)\n    if global_name in globals_:\n        assert globals_[global_name] == obj\n        return global_name\n    globals_[global_name] = obj\n    return global_name",
    "docstring": "Add an obj to be tracked as a global. We call this for names that reference objects external to the Graph, like functions or types. Returns: the global name that should be used to reference 'obj' in generated source.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\graph.py",
    "ast_data": "FunctionDef name:add_global arg:name_hint arg:obj arguments arg arg If BoolOp Call Compare Return return:yes Call Assign Call If Compare Compare Return return:yes Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ConstantPad3d",
    "source_code": "class ConstantPad3d(_ConstantPadNd):\n    padding: tuple[int, int, int, int, int, int]\n\n    def __init__(self, padding: _size_6_t, value: float) -> None:\n        super().__init__(value)\n        self.padding = _ntuple(6)(padding)",
    "docstring": "Pads the input tensor boundaries with a constant value. For -dimensional padding, use :func:. Args: padding (int, tuple): the size of the padding. If is , uses the same padding in all boundaries. If a 6-, uses (:math:, :math:, :math:, :math:, :math:, :math:) Shape: - Input: :math: or :math:. - Output: :math: or :math:, where :math: :math: :math: Examples:: >>> m = nn.ConstantPad3d(3, 3.5) >>> input = torch.randn(16, 3, 10, 20, 30) >>> output = m(input) >>> # using different paddings for different sides >>> m = nn.ConstantPad3d((3, 3, 6, 6, 0, 1), 3.5) >>> output = m(input)",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\padding.py",
    "ast_data": "ClassDef name:ConstantPad3d FunctionDef name:__init__ arg:self arg:padding arg:value arguments arg arg arg Call Call Assign Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_xbound",
    "source_code": "def set_xbound(self, lower=None, upper=None, view_margin=None):\n    self._set_bound3d(self.get_xbound, self.set_xlim, self.xaxis_inverted, lower, upper, view_margin)",
    "docstring": "Set the lower and upper numerical bounds of the x-axis. This method will honor axis inversion regardless of parameter order. It will not change the autoscaling setting (). Parameters ---------- lower, upper : float or None The lower and upper bounds. If *None*, the respective axis bound is not modified. view_margin : float or None The margin to apply to the bounds. If *None*, the margin is handled by . See Also -------- get_xbound get_xlim, set_xlim invert_xaxis, xaxis_inverted",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:set_xbound arg:self arg:lower arg:upper arg:view_margin arguments arg arg arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "memory_reserved",
    "source_code": "def memory_reserved(device: 'Device'=None) -> int:\n    return memory_stats(device=device).get('reserved_bytes.all.current', 0)",
    "docstring": "Return the current GPU memory managed by the caching allocator in bytes for a given device. Args: device (torch.device or int, optional): selected device. Returns statistic for the current device, given by :func:, if :attr: is `cuda-memory-management` for more details about GPU memory management.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\memory.py",
    "ast_data": "FunctionDef name:memory_reserved arg:device arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_optional_of_element_type",
    "source_code": "def get_optional_of_element_type(types):\n    elem_type = types[1] if type(None) == types[0] else types[0]\n    elem_type = get_type(elem_type)\n    return 'Optional[' + elem_type + ']'",
    "docstring": "Extract element type, return as from consolidated types. Helper function to extracts the type of the element to be annotated to Optional from the list of consolidated types and returns . TODO: To remove this check once Union support lands.",
    "type": "function",
    "file_path": "pytorch\\torch\\jit\\_monkeytype_config.py",
    "ast_data": "FunctionDef name:get_optional_of_element_type arg:types arguments arg Assign Compare Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "CanReshape",
    "source_code": "class CanReshape(Constraint):\n\n    def __init__(self, src, target):\n        self.src = src\n        self.target = target\n\n    def __repr__(self):\n        return f'can-reshape({self.src}, {self.target})'\n\n    def __eq__(self, other):\n        if isinstance(other, CanReshape):\n            return self.src == other.src and self.target == other.target\n        else:\n            return False",
    "docstring": "can_reshape constraint",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint.py",
    "ast_data": "ClassDef name:CanReshape FunctionDef name:__init__ arg:self arg:src arg:target arguments arg arg arg Assign Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:__eq__ arg:self arg:other arguments arg arg If Call Return return:yes BoolOp Compare Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "t0",
    "source_code": "@property\ndef t0(self):\n    return self._t0",
    "docstring": "Absolute timestamp of the first dumped tensor across all devices. Returns: () absolute timestamp of the first dumped tensor, in microseconds.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:t0 arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "RendezvousError",
    "source_code": "class RendezvousError(Exception):\n    pass",
    "docstring": "Represents the base type for rendezvous errors.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\api.py",
    "ast_data": "ClassDef name:RendezvousError"
  },
  {
    "library": "sphinx",
    "name": "AutoIndexUpgrader",
    "source_code": "class AutoIndexUpgrader(SphinxTransform):\n    default_priority = 210\n\n    def apply(self, **kwargs: Any) -> None:\n        for node in self.document.findall(addnodes.index):\n            if 'entries' in node and any((len(entry) == 4 for entry in node['entries'])):\n                msg = __('4 column based index found. It might be a bug of extensions you use: %r') % node['entries']\n                logger.warning(msg, location=node)\n                for i, entry in enumerate(node['entries']):\n                    if len(entry) == 4:\n                        node['entries'][i] = (*entry, None)",
    "docstring": "Detect old style (4 column based indices) and automatically upgrade to new style.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\transforms\\__init__.py",
    "ast_data": "ClassDef name:AutoIndexUpgrader Assign FunctionDef name:apply arg:self arguments arg arg For Call If BoolOp Compare Call Compare Call Assign Call Call For Call If Compare Call Assign"
  },
  {
    "library": "numpy",
    "name": "compressed",
    "source_code": "def compressed(x):\n    return asanyarray(x).compressed()",
    "docstring": "Return all the non-masked data as a 1-D array. This function is equivalent to calling the \"compressed\" method of a , see for details. See Also -------- ma.MaskedArray.compressed : Equivalent method. Examples -------- >>> import numpy as np Create an array with negative values masked: >>> import numpy as np >>> x = np.array([[1, -1, 0], [2, -1, 3], [7, 4, -1]]) >>> masked_x = np.ma.masked_array(x, mask=x >> masked_x masked_array( data=[[1, --, 0], [2, --, 3], [7, 4, --]], mask=[[False, True, False], [False, True, False], [False, False, True]], fill_value=999999) Compress the masked array into a 1-D array of non-masked values: >>> np.ma.compressed(masked_x) array([1, 0, 2, 3, 7, 4])",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:compressed arg:x arguments arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_get_preamble",
    "source_code": "def _get_preamble():\n    font_size_pt = FontProperties(size=mpl.rcParams['font.size']).get_size_in_points()\n    return '\\n'.join(['\\\\def\\\\mathdefault#1{#1}', '\\\\everymath=\\\\expandafter{\\\\the\\\\everymath\\\\displaystyle}', '\\\\IfFileExists{scrextend.sty}{', '  \\\\usepackage[fontsize=%fpt]{scrextend}' % font_size_pt, '}{', '  \\\\renewcommand{\\\\normalsize}{\\\\fontsize{%f}{%f}\\\\selectfont}' % (font_size_pt, 1.2 * font_size_pt), '  \\\\normalsize', '}', mpl.rcParams['pgf.preamble'], *(['\\\\ifdefined\\\\pdftexversion\\\\else  % non-pdftex case.', '  \\\\usepackage{fontspec}'] + ['  \\\\%s{%s}[Path=\\\\detokenize{%s/}]' % (command, path.name, path.parent.as_posix()) for command, path in zip(['setmainfont', 'setsansfont', 'setmonofont'], [pathlib.Path(fm.findfont(family)) for family in ['serif', 'sans\\\\-serif', 'monospace']])] + ['\\\\fi'] if mpl.rcParams['pgf.rcfonts'] else []), mpl.texmanager._usepackage_if_not_loaded('underscore', option='strings')])",
    "docstring": "Prepare a LaTeX preamble based on the rcParams configuration.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pgf.py",
    "ast_data": "FunctionDef name:_get_preamble arguments Assign Call Call Return return:yes Call Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "import_module",
    "source_code": "def import_module(self, name: str, package=None):\n    name = self._mangler.demangle(name)\n    return self._gcd_import(name)",
    "docstring": "Load a module from the package if it hasn't already been loaded, and then return the module. Modules are loaded locally to the importer and will appear in ``. Returns: types.ModuleType: The (possibly already) loaded module.",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\package_importer.py",
    "ast_data": "FunctionDef name:import_module arg:self arg:name arg:package arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_MinOrMaxGrad",
    "source_code": "def _MinOrMaxGrad(op: ops.Operation, grad):\n    input_shape = array_ops.shape(op.inputs[0])\n    y = op.outputs[0]\n    if not op.get_attr('keep_dims'):\n        output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])\n        y = array_ops.reshape(y, output_shape_kept_dims)\n        grad = array_ops.reshape(grad, output_shape_kept_dims)\n    else:\n        output_shape_kept_dims = array_ops.shape(y)\n    indicators = math_ops.cast(math_ops.equal(y, op.inputs[0]), grad.dtype)\n    num_selected = array_ops.reshape(math_ops.reduce_sum(indicators, op.inputs[1]), output_shape_kept_dims)\n    return [math_ops.divide(indicators, num_selected) * grad, None]",
    "docstring": "Gradient for Min or Max. Amazingly it's precisely the same code.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_MinOrMaxGrad arg:op arg:grad arguments arg arg Assign Call Assign If Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "contains_anchor",
    "source_code": "def contains_anchor(response: Response, anchor: str) -> bool:\n    parser = AnchorCheckParser(anchor)\n    for chunk in response.iter_content(chunk_size=4096, decode_unicode=True):\n        if isinstance(chunk, bytes):\n            chunk = chunk.decode()\n        parser.feed(chunk)\n        if parser.found:\n            break\n    parser.close()\n    return parser.found",
    "docstring": "Determine if an anchor is contained within an HTTP response.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\builders\\linkcheck.py",
    "ast_data": "FunctionDef name:contains_anchor arg:response arg:anchor arguments arg arg Assign Call For Call If Call Assign Call Call If Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "aggregate",
    "source_code": "@abc.abstractmethod\ndef aggregate(self, batch_outs, batch_start=None, batch_end=None):\n    raise NotImplementedError('Must be implemented in subclasses.')",
    "docstring": "Aggregates batch-level results into total results. Args: batch_outs: A list of batch-level outputs. batch_start: The start index of this batch. Always if is . batch_end: The end index of this batch. Always if is .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py",
    "ast_data": "FunctionDef name:aggregate arg:self arg:batch_outs arg:batch_start arg:batch_end arguments arg arg arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_embedding_activations_grad",
    "source_code": "@ops.RegisterGradient('TPUEmbeddingActivations')\ndef _embedding_activations_grad(activations_op, grad_wrt_activations):\n    g = ops.get_default_graph()\n    table_id = activations_op.get_attr('table_id')\n    lookup_id = activations_op.get_attr('lookup_id')\n    table_gradients = g.get_collection_ref('tpu_embedding_gradients_table_%d' % table_id)\n    if not table_gradients:\n        raise RuntimeError('Gradients for TPUEmbedding have been generated in non-training mode.This is not expected. Consider putting your Optimizer.minimize code behind the training mode condition check\\n')\n    if lookup_id < 0 or lookup_id >= len(table_gradients):\n        raise RuntimeError('Gradients (w.r.t. TPUEmbedding activations) generated for table_id {} and lookup_id {}. The lookup_id attribute is outside the expected range [0, {}).'.format(table_id, lookup_id, len(table_gradients)))\n    if table_gradients[lookup_id] is not None:\n        raise RuntimeError('Duplicate gradients (w.r.t. TPUEmbedding activations) generated for table_id {} and lookup_id {}. This happens when there are multiple calls to tf.gradients in a graph containing TPU embeddings. TF cannot identify which gradient to use for updating the embedding variables. Consider placing tf.StopGradient around tensors where variable update is not required. Previous gradients were generated by the following callstack: {}.'.format(table_id, lookup_id, table_gradients[lookup_id].op.traceback))\n    table_gradients[lookup_id] = array_ops.identity(grad_wrt_activations)\n    return [array_ops.zeros(arg.shape, dtype=dtypes.float32) for arg in activations_op.inputs]",
    "docstring": "Saves the gradient of embedding activations ops in a graph collection.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\ops\\tpu_ops.py",
    "ast_data": "FunctionDef name:_embedding_activations_grad arg:activations_op arg:grad_wrt_activations arguments arg arg Assign Call Assign Call Assign Call Assign Call If Raise Call If BoolOp Compare Compare Call Raise Call Call Call If Compare Raise Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "contour",
    "source_code": "@_preprocess_data()\n@_docstring.interpd\ndef contour(self, *args, **kwargs):\n    kwargs['filled'] = False\n    contours = mcontour.QuadContourSet(self, *args, **kwargs)\n    self._request_autoscale_view()\n    return contours",
    "docstring": "Plot contour lines. Call signature:: contour([X, Y,] Z, /, [levels], **kwargs) The arguments *X*, *Y*, *Z* are positional-only. %(contour_doc)s",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_axes.py",
    "ast_data": "FunctionDef name:contour arg:self arguments arg arg arg Assign Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "svd_lowrank",
    "source_code": "def svd_lowrank(A: Tensor, q: Optional[int]=6, niter: Optional[int]=2, M: Optional[Tensor]=None) -> tuple[Tensor, Tensor, Tensor]:\n    if not torch.jit.is_scripting():\n        tensor_ops = (A, M)\n        if not set(map(type, tensor_ops)).issubset((torch.Tensor, type(None))) and has_torch_function(tensor_ops):\n            return handle_torch_function(svd_lowrank, tensor_ops, A, q=q, niter=niter, M=M)\n    return _svd_lowrank(A, q=q, niter=niter, M=M)",
    "docstring": "Return the singular value decomposition `AA \\approx U \\operatorname{diag}(S) V^{\\text{H}}MA - MAQk `_).",
    "type": "function",
    "file_path": "pytorch\\torch\\_lowrank.py",
    "ast_data": "FunctionDef name:svd_lowrank arg:A arg:q arg:niter arg:M arguments arg arg arg arg If Call Assign If BoolOp Call Call Call Call Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "register",
    "source_code": "def register(self, constraint, factory=None):\n    if factory is None:\n        return lambda factory: self.register(constraint, factory)\n    if isinstance(constraint, constraints.Constraint):\n        constraint = type(constraint)\n    if not isinstance(constraint, type) or not issubclass(constraint, constraints.Constraint):\n        raise TypeError(f'Expected constraint to be either a Constraint subclass or instance, but got {constraint}')\n    self._registry[constraint] = factory\n    return factory",
    "docstring": "Registers a :class: subclass in this registry. Usage:: @my_registry.register(MyConstraintClass) def construct_transform(constraint): assert isinstance(constraint, MyConstraint) return MyTransform(constraint.arg_constraints) Args: constraint (subclass of :class:): A subclass of :class:, or a singleton object of the desired class. factory (Callable): A callable that inputs a constraint object and returns a :class: object.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributions\\constraint_registry.py",
    "ast_data": "FunctionDef name:register arg:self arg:constraint arg:factory arguments arg arg arg If Compare Return return:yes arguments arg Call If Call Assign Call If BoolOp Call Call Raise Call Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "LazyOperatorNormInfo",
    "source_code": "class LazyOperatorNormInfo:\n\n    def __init__(self, A, A_1_norm=None, ell=2, scale=1):\n        self._A = A\n        self._A_1_norm = A_1_norm\n        self._ell = ell\n        self._d = {}\n        self._scale = scale\n\n    def set_scale(self, scale):\n        self._scale = scale\n\n    def onenorm(self):\n        if self._A_1_norm is None:\n            self._A_1_norm = _exact_1_norm(self._A)\n        return self._scale * self._A_1_norm\n\n    def d(self, p):\n        if p not in self._d:\n            est = _onenormest_matrix_power(self._A, p, self._ell)\n            self._d[p] = est ** (1.0 / p)\n        return self._scale * self._d[p]\n\n    def alpha(self, p):\n        return max(self.d(p), self.d(p + 1))",
    "docstring": "Information about an operator is lazily computed. The information includes the exact 1-norm of the operator, in addition to estimates of 1-norms of powers of the operator. This uses the notation of Computing the Action (2011). This class is specialized enough to probably not be of general interest outside of this module.",
    "type": "class",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_expm_multiply.py",
    "ast_data": "ClassDef name:LazyOperatorNormInfo FunctionDef name:__init__ arg:self arg:A arg:A_1_norm arg:ell arg:scale arguments arg arg arg arg arg Assign Assign Assign Assign Assign FunctionDef name:set_scale arg:self arg:scale arguments arg arg Assign FunctionDef name:onenorm arg:self arguments arg If Compare Assign Call Return return:yes FunctionDef name:d arg:self arg:p arguments arg arg If Compare Assign Call Assign Return return:yes FunctionDef name:alpha arg:self arg:p arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_color",
    "source_code": "def set_color(self, color):\n    if not cbook._str_equal(color, 'auto'):\n        mpl.colors._check_color_like(color=color)\n    self._color = color\n    self.stale = True",
    "docstring": "Set the foreground color of the text Parameters ---------- color : :mpltype:",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:set_color arg:self arg:color arguments arg arg If Call Call Assign Assign"
  },
  {
    "library": "scikit-learn",
    "name": "_unique_np",
    "source_code": "def _unique_np(values, return_inverse=False, return_counts=False):\n    xp, _ = get_namespace(values)\n    inverse, counts = (None, None)\n    if return_inverse and return_counts:\n        uniques, _, inverse, counts = xp.unique_all(values)\n    elif return_inverse:\n        uniques, inverse = xp.unique_inverse(values)\n    elif return_counts:\n        uniques, counts = xp.unique_counts(values)\n    else:\n        uniques = xp.unique_values(values)\n    if uniques.size and is_scalar_nan(uniques[-1]):\n        nan_idx = _searchsorted(uniques, xp.nan, xp=xp)\n        uniques = uniques[:nan_idx + 1]\n        if return_inverse:\n            inverse[inverse > nan_idx] = nan_idx\n        if return_counts:\n            counts[nan_idx] = xp.sum(counts[nan_idx:])\n            counts = counts[:nan_idx + 1]\n    ret = (uniques,)\n    if return_inverse:\n        ret += (inverse,)\n    if return_counts:\n        ret += (counts,)\n    return ret[0] if len(ret) == 1 else ret",
    "docstring": "Helper function to find unique values for numpy arrays that correctly accounts for nans. See documentation for details.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_encode.py",
    "ast_data": "FunctionDef name:_unique_np arg:values arg:return_inverse arg:return_counts arguments arg arg arg Assign Call Assign If BoolOp Assign Call If Assign Call If Assign Call Assign Call If BoolOp Call Assign Call Assign If Assign Compare If Assign Call Assign Assign If If Return return:yes Compare Call"
  },
  {
    "library": "pandas",
    "name": "value_counts",
    "source_code": "def value_counts(self, dropna: bool=True) -> Series:\n    pa_type = self._pa_array.type\n    if pa_version_under11p0 and pa.types.is_duration(pa_type):\n        data = self._pa_array.cast(pa.int64())\n    else:\n        data = self._pa_array\n    from pandas import Index, Series\n    vc = data.value_counts()\n    values = vc.field(0)\n    counts = vc.field(1)\n    if dropna and data.null_count > 0:\n        mask = values.is_valid()\n        values = values.filter(mask)\n        counts = counts.filter(mask)\n    if pa_version_under11p0 and pa.types.is_duration(pa_type):\n        values = values.cast(pa_type)\n    counts = ArrowExtensionArray(counts)\n    index = Index(type(self)(values))\n    return Series(counts, index=index, name='count', copy=False)",
    "docstring": "Return a Series containing counts of each unique value. Parameters ---------- dropna : bool, default True Don't include counts of missing values. Returns ------- counts : Series See Also -------- Series.value_counts",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\arrow\\array.py",
    "ast_data": "FunctionDef name:value_counts arg:self arg:dropna arguments arg arg Assign If BoolOp Call Assign Call Call Assign Assign Call Assign Call Assign Call If BoolOp Compare Assign Call Assign Call Assign Call If BoolOp Call Assign Call Assign Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, fmt, tz=None, *, usetex=None):\n    self.tz = _get_tzinfo(tz)\n    self.fmt = fmt\n    self._usetex = mpl._val_or_rc(usetex, 'text.usetex')",
    "docstring": "Parameters ---------- fmt : str format string tz : str or , default: :rc: Ticks timezone. If a string, *tz* is passed to . usetex : bool, default: :rc: To enable/disable the use of TeX's math mode for rendering the results of the formatter.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\dates.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:fmt arg:tz arguments arg arg arg arg Assign Call Assign Assign Call"
  },
  {
    "library": "authlib",
    "name": "ClientCredentialsGrant",
    "source_code": "class ClientCredentialsGrant(BaseGrant, TokenEndpointMixin):\n    GRANT_TYPE = 'client_credentials'\n\n    def validate_token_request(self):\n        client = self.authenticate_token_endpoint_client()\n        log.debug('Validate token request of %r', client)\n        if not client.check_grant_type(self.GRANT_TYPE):\n            raise UnauthorizedClientError(f\"The client is not authorized to use 'grant_type={self.GRANT_TYPE}'\")\n        self.request.client = client\n        self.validate_requested_scope()\n\n    @hooked\n    def create_token_response(self):\n        token = self.generate_token(scope=self.request.payload.scope, include_refresh_token=False)\n        log.debug('Issue token %r to %r', token, self.client)\n        self.save_token(token)\n        return (200, token, self.TOKEN_RESPONSE_HEADER)",
    "docstring": "The client can request an access token using only its client credentials (or other supported means of authentication) when the client is requesting access to the protected resources under its control, or those of another resource owner that have been previously arranged with the authorization server. The client credentials grant type MUST only be used by confidential clients:: +---------+ +---------------+ | | | | | |>--(A)- Client Authentication --->| Authorization | | Client | | Server | | |<--(B)---- Access Token ---------<| | | | | | +---------+ +---------------+",
    "type": "class",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\grants\\client_credentials.py",
    "ast_data": "ClassDef name:ClientCredentialsGrant Assign FunctionDef name:validate_token_request arg:self arguments arg Assign Call Call If Call Raise Call Assign Call FunctionDef name:create_token_response arg:self arguments arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_joinstyle",
    "source_code": "def get_joinstyle(self):\n    return self._joinstyle.name",
    "docstring": "Return the .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:get_joinstyle arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_variable_nodes_from_graph_def",
    "source_code": "def _get_variable_nodes_from_graph_def(graph_def):\n    variables = [n for n in graph_def.node if n.op == 'VarHandleOp']\n    variable_name_map = dict(((n.name, n) for n in variables))\n    child_map = collections.defaultdict(lambda: [])\n    for n in graph_def.node:\n        for inp in n.input:\n            if not inp.startswith('^'):\n                child_map[inp].append(n)\n    variables = {}\n    for v_name, v_node in variable_name_map.items():\n        queue = list(child_map[v_name])\n        processed = set([])\n        while queue:\n            n_current = queue.pop()\n            if n_current.name in processed:\n                continue\n            processed.add(n_current.name)\n            if n_current.op in _PASS_THROUGH_VARIABLE_OPS:\n                children = child_map.get(n_current.name, [])\n                queue.extend(children)\n            elif n_current.op not in _READ_ONLY_VARIABLE_OPS:\n                variables[v_name] = (v_node, True)\n                queue = []\n        if v_name not in variables:\n            variables[v_name] = (v_node, False)\n    return variables",
    "docstring": "Get the list of Variable nodes from . Args: graph_def: An instance of . This GraphDef *must* have already been optimized by Grappler. In particular, function inlining must have already happened. Returns: A dict mapping string names of variables to tuples , where is the corresponding to variable, and is a python bool describing whether the variable is modified during runtime.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\saved_model_aot_compile.py",
    "ast_data": "FunctionDef name:_get_variable_nodes_from_graph_def arg:graph_def arguments arg Assign Compare Assign Call Assign Call arguments For For If Call Call Assign For Call Assign Call Assign Call While Assign Call If Compare Call If Compare Assign Call Call If Compare Assign Assign If Compare Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_quantize_and_dequantize_weight_decomposed",
    "source_code": "def _quantize_and_dequantize_weight_decomposed(weight: torch.Tensor, weight_qscheme: torch.qscheme, weight_dtype: torch.dtype, weight_scale: torch.Tensor, weight_zero_point: torch.Tensor, weight_axis_int: int, weight_quant_min: typing.Optional[int], weight_quant_max: typing.Optional[int]) -> torch.Tensor:\n    if weight_qscheme in [torch.per_tensor_affine, torch.per_channel_affine, torch.per_channel_affine_float_qparams]:\n        weight_quant = _quantize_weight_decomposed(weight, weight_qscheme, weight_dtype, weight_scale, weight_zero_point, weight_axis_int, weight_quant_min, weight_quant_max)\n        weight_dequant = _dequantize_weight_decomposed(weight_quant, weight_qscheme, weight_dtype, weight_scale, weight_zero_point, weight_axis_int, weight_quant_min, weight_quant_max)\n    else:\n        weight_dequant = weight\n    return weight_dequant",
    "docstring": "Quantize and then dequantize the weight based on the quantization parameters",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\reference\\modules\\utils.py",
    "ast_data": "FunctionDef name:_quantize_and_dequantize_weight_decomposed arg:weight arg:weight_qscheme arg:weight_dtype arg:weight_scale arg:weight_zero_point arg:weight_axis_int arg:weight_quant_min arg:weight_quant_max arguments arg arg arg arg arg arg arg arg If Compare Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ExportOptions",
    "source_code": "@deprecated('torch.onnx.dynamo_export is deprecated since 2.7.0. Please use torch.onnx.export(..., dynamo=True) instead.', category=None)\nclass ExportOptions:\n\n    def __init__(self, *, dynamic_shapes: bool | None=True, fake_context: ONNXFakeContext | None=None, onnx_registry: OnnxRegistry | None=None):\n        self.dynamic_shapes = dynamic_shapes\n        self.fake_context = fake_context\n        self.onnx_registry = onnx_registry",
    "docstring": "Options to influence the TorchDynamo ONNX exporter. .. deprecated:: 2.7 Please use ``, all input shapes are considered static. fake_context: The fake context used for symbolic tracing. onnx_registry: The ONNX registry used to register ATen operators to ONNX functions.",
    "type": "class",
    "file_path": "pytorch\\torch\\onnx\\_internal\\_exporter_legacy.py",
    "ast_data": "ClassDef name:ExportOptions FunctionDef name:__init__ arg:self arguments arg arg arg arg Assign Assign Assign Call"
  },
  {
    "library": "pytorch",
    "name": "_is_fsdp_root",
    "source_code": "def _is_fsdp_root(state: _FSDPState, module: nn.Module) -> bool:\n    _lazy_init(state, module)\n    assert state._is_root is not None\n    return state._is_root",
    "docstring": "Returns if `` 's state.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_runtime_utils.py",
    "ast_data": "FunctionDef name:_is_fsdp_root arg:state arg:module arguments arg arg Call Compare Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_hatch",
    "source_code": "def set_hatch(self, hatch):\n    mhatch._validate_hatch_pattern(hatch)\n    self._hatch = hatch\n    self.stale = True",
    "docstring": "Set the hatching pattern. *hatch* can be one of:: / - diagonal hatching \\ - back diagonal | - vertical - - horizontal + - crossed x - crossed diagonal o - small circle O - large circle . - dots * - stars Letters can be combined, in which case all the specified hatchings are done. If same letter repeats, it increases the density of hatching of that pattern. Parameters ---------- hatch : {'/', '\\\\', '|', '-', '+', 'x', 'o', 'O', '.', '*'}",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:set_hatch arg:self arg:hatch arguments arg arg Call Assign Assign"
  },
  {
    "library": "django",
    "name": "get_initial_for_field",
    "source_code": "def get_initial_for_field(self, field, field_name):\n    value = self.initial.get(field_name, field.initial)\n    if callable(value):\n        value = value()\n    if isinstance(value, (datetime.datetime, datetime.time)) and (not field.widget.supports_microseconds):\n        value = value.replace(microsecond=0)\n    return value",
    "docstring": "Return initial data for field on form. Use initial data from the form or the field, in that order. Evaluate callable values.",
    "type": "method",
    "file_path": "django\\django\\forms\\forms.py",
    "ast_data": "FunctionDef name:get_initial_for_field arg:self arg:field arg:field_name arguments arg arg arg Assign Call If Call Assign Call If BoolOp Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "disconnect",
    "source_code": "def disconnect(self, cid):\n    self._pickled_cids.discard(cid)\n    for signal, proxy in self._func_cid_map:\n        if self._func_cid_map[signal, proxy] == cid:\n            break\n    else:\n        return\n    assert self.callbacks[signal][cid] == proxy\n    del self.callbacks[signal][cid]\n    self._func_cid_map.pop((signal, proxy))\n    if len(self.callbacks[signal]) == 0:\n        del self.callbacks[signal]",
    "docstring": "Disconnect the callback registered with callback id *cid*. No error is raised if such a callback does not exist.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\cbook.py",
    "ast_data": "FunctionDef name:disconnect arg:self arg:cid arguments arg arg Call For If Compare Return return:no Compare Call If Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "_add_new_tf_operations",
    "source_code": "def _add_new_tf_operations(self, compute_devices=True) -> list['Operation']:\n    self._check_not_finalized()\n    new_ops = [self._create_op_from_tf_operation(c_op, compute_device=compute_devices) for c_op in self.new_operations()]\n    for op in new_ops:\n        new_control_inputs = self._control_dependencies_for_inputs(op.inputs)\n        op._add_control_inputs(new_control_inputs)\n        op._control_flow_post_processing()\n    return new_ops",
    "docstring": "Creates in this graph for any new TF_Operations. This is useful for when TF_Operations are indirectly created by the C API outside of the Operation constructor (e.g. by TF_ImportGraphDef, TF_FinishWhile). This ensures there are corresponding Operations for all TF_Operations in the underlying TF_Graph. Args: compute_devices: (Optional.) If True, device functions will be executed to compute the device properties of each new Operation. Returns: A list of the new objects.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_add_new_tf_operations arg:self arg:compute_devices arguments arg arg Call Assign Call Call For Assign Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, axis, functions, base=10):\n    forward, inverse = functions\n    self.subs = None\n    self._transform = FuncTransform(forward, inverse) + LogTransform(base)",
    "docstring": "Parameters ---------- axis : The axis for the scale. functions : (callable, callable) two-tuple of the forward and inverse functions for the scale. The forward function must be monotonic. Both functions must have the signature:: def forward(values: array-like) -> array-like base : float, default: 10 Logarithmic base of the scale.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\scale.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:axis arg:functions arg:base arguments arg arg arg arg Assign Assign Assign Call Call"
  },
  {
    "library": "pandas",
    "name": "_translate_key",
    "source_code": "def _translate_key(key: str) -> str:\n    d = _get_deprecated_option(key)\n    if d:\n        return d.rkey or key\n    else:\n        return key",
    "docstring": "if key id deprecated and a replacement key defined, will return the replacement key, otherwise returns as - is",
    "type": "function",
    "file_path": "pandas\\pandas\\_config\\config.py",
    "ast_data": "FunctionDef name:_translate_key arg:key arguments arg Assign Call If Return return:yes BoolOp Return return:yes"
  },
  {
    "library": "kornia",
    "name": "_chroma_upsampling",
    "source_code": "def _chroma_upsampling(input_c: Tensor) -> Tensor:\n    output_c: Tensor = rescale(input_c[:, None], factor=2.0, interpolation='bilinear', align_corners=False, antialias=False)\n    return output_c[:, 0]",
    "docstring": "Perform chroma upsampling. Args: input_c (Tensor): Cb or Cr component to be upsampled of the shape :math:. Returns: output_c (Tensor): Upsampled C(b or r) component of the shape :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\enhance\\jpeg.py",
    "ast_data": "FunctionDef name:_chroma_upsampling arg:input_c arguments arg Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "batch_scatter_update",
    "source_code": "@tf_export(v1=['batch_scatter_update'])\n@deprecation.deprecated('2018-11-29', 'Use the batch_scatter_update method of Variable instead.')\ndef batch_scatter_update(ref, indices, updates, use_locking=True, name=None):\n    with ops.name_scope(name):\n        indices = ops.convert_to_tensor(indices, name='indices')\n        indices_shape = array_ops.shape(indices)\n        indices_dimensions = indices.get_shape().ndims\n        if indices_dimensions is None:\n            raise ValueError('batch_gather does not allow indices with unknown shape.')\n        nd_indices = array_ops.expand_dims(indices, axis=-1)\n        nd_indices_list = []\n        for dimension in range(indices_dimensions - 1):\n            dimension_size = indices_shape[dimension]\n            shape_to_broadcast = [1] * (indices_dimensions + 1)\n            shape_to_broadcast[dimension] = dimension_size\n            dimension_range = array_ops.reshape(gen_math_ops._range(0, dimension_size, 1), shape_to_broadcast)\n            if dimension_range.dtype.base_dtype != nd_indices.dtype:\n                dimension_range = gen_math_ops.cast(dimension_range, nd_indices.dtype)\n            nd_indices_list.append(dimension_range * array_ops.ones_like(nd_indices))\n        nd_indices_list.append(nd_indices)\n        final_indices = array_ops.concat(nd_indices_list, axis=-1)\n        return scatter_nd_update(ref, final_indices, updates, use_locking=use_locking)",
    "docstring": "Generalization of to axis different than 0. Analogous to . This assumes that , and have a series of leading dimensions that are the same for all of them, and the updates are performed on the last dimension of indices. In other words, the dimensions should be the following: where And the operation performed can be expressed as: When indices is a 1D tensor, this operation is equivalent to . To avoid this operation there would be 2 alternatives: 1) Reshaping the variable by merging the first dimensions. However, this is not possible because returns a Tensor, which we cannot use on. 2) Looping over the first of the variable and using on the subtensors that result of slicing the first dimension. This is a valid option for , but less efficient than this implementation. See also and . Args: ref: to scatter onto. indices: Tensor containing indices as described above. updates: Tensor of updates to apply to . use_locking: Boolean indicating whether to lock the writing operation. name: Optional scope name string. Returns: Ref to after it has been modified. Raises: ValueError: If the initial of , , and are not the same.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\state_ops.py",
    "ast_data": "FunctionDef name:batch_scatter_update arg:ref arg:indices arg:updates arg:use_locking arg:name arguments arg arg arg arg arg With Call Assign Call Assign Call Assign Call If Compare Raise Call Assign Call Assign For Call Assign Assign Assign Assign Call Call If Compare Assign Call Call Call Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "rng_bit_generator",
    "source_code": "def rng_bit_generator(algorithm, initial_state, shape, dtype):\n    alg_int = random_ops_util.convert_alg_to_int(algorithm)\n    return gen_xla_ops.xla_rng_bit_generator(alg_int, initial_state, shape, dtype=dtype)",
    "docstring": "Stateless PRNG bit generator. Wraps the XLA RngBitGenerator operator, documented at Args: algorithm: The PRNG algorithm to use, one of tf.random.Algorithm.{PHILOX, THREEFRY, AUTO_SELECT}. initial_state: Initial state for the PRNG algorithm. For THREEFRY, it should be a u64[2] and for PHILOX a u64[3]. shape: The output shape of the generated data. dtype: The type of the tensor. Returns: a tuple with a new state and generated data of the given shape.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\tf2xla\\python\\xla.py",
    "ast_data": "FunctionDef name:rng_bit_generator arg:algorithm arg:initial_state arg:shape arg:dtype arguments arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "summary_op",
    "source_code": "@property\ndef summary_op(self):\n    return self._summary_op",
    "docstring": "Return the Summary Tensor used by the chief supervisor. Returns: A string Tensor for the summary or .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py",
    "ast_data": "FunctionDef name:summary_op arg:self arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "get_flags_linker_so",
    "source_code": "def get_flags_linker_so(self):\n    return self._get_command_flags('linker_so')",
    "docstring": "List of linker flags to build a shared library.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\fcompiler\\__init__.py",
    "ast_data": "FunctionDef name:get_flags_linker_so arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "BlankChoiceIterator",
    "source_code": "class BlankChoiceIterator(BaseChoiceIterator):\n\n    def __init__(self, choices, blank_choice):\n        self.choices = choices\n        self.blank_choice = blank_choice\n\n    def __iter__(self):\n        choices, other = tee(self.choices)\n        if not any((value in ('', None) for value, _ in flatten_choices(other))):\n            yield from self.blank_choice\n        yield from choices",
    "docstring": "Iterator to lazily inject a blank choice.",
    "type": "class",
    "file_path": "django\\django\\utils\\choices.py",
    "ast_data": "ClassDef name:BlankChoiceIterator FunctionDef name:__init__ arg:self arg:choices arg:blank_choice arguments arg arg arg Assign Assign FunctionDef name:__iter__ arg:self arguments arg Assign Call If Call Compare Call"
  },
  {
    "library": "pytorch",
    "name": "state",
    "source_code": "@property\n@abstractmethod\ndef state(self) -> _RendezvousState:\n    pass",
    "docstring": "Get the local state.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\dynamic_rendezvous.py",
    "ast_data": "FunctionDef name:state arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "_get_applicable_entries",
    "source_code": "def _get_applicable_entries(self, transformer_field, full_name, name):\n    function_transformers = getattr(self._api_change_spec, transformer_field, {})\n    glob_name = '*.' + name if name else None\n    transformers = []\n    if full_name in function_transformers:\n        transformers.append(function_transformers[full_name])\n    if glob_name in function_transformers:\n        transformers.append(function_transformers[glob_name])\n    if '*' in function_transformers:\n        transformers.append(function_transformers['*'])\n    return transformers",
    "docstring": "Get all list entries indexed by name that apply to full_name or name.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\ast_edits.py",
    "ast_data": "FunctionDef name:_get_applicable_entries arg:self arg:transformer_field arg:full_name arg:name arguments arg arg arg arg Assign Call Assign Assign If Compare Call If Compare Call If Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "in_save_context",
    "source_code": "def in_save_context():\n    return _save_context.in_save_context()",
    "docstring": "Returns whether under a save context.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\save_context.py",
    "ast_data": "FunctionDef name:in_save_context arguments Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "explode",
    "source_code": "def explode(self) -> DataFrame:\n    from pandas import concat\n    pa_type = self._pa_array.type\n    return concat([self.field(i) for i in range(pa_type.num_fields)], axis='columns')",
    "docstring": "Extract all child fields of a struct as a DataFrame. Returns ------- pandas.DataFrame The data corresponding to all child fields. See Also -------- Series.struct.field : Return a single child field as a Series. Examples -------- >>> import pyarrow as pa >>> s = pd.Series( ... [ ... {\"version\": 1, \"project\": \"pandas\"}, ... {\"version\": 2, \"project\": \"pandas\"}, ... {\"version\": 1, \"project\": \"numpy\"}, ... ], ... dtype=pd.ArrowDtype( ... pa.struct([(\"version\", pa.int64()), (\"project\", pa.string())]) ... ), ... ) >>> s.struct.explode() version project 0 1 pandas 1 2 pandas 2 1 numpy",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\arrow\\accessors.py",
    "ast_data": "FunctionDef name:explode arg:self arguments arg Assign Return return:yes Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_append_legend_handles_labels",
    "source_code": "@final\ndef _append_legend_handles_labels(self, handle: Artist, label: str) -> None:\n    self.legend_handles.append(handle)\n    self.legend_labels.append(label)",
    "docstring": "Append current handle and label to ``. These will be used to make the legend.",
    "type": "method",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\core.py",
    "ast_data": "FunctionDef name:_append_legend_handles_labels arg:self arg:handle arg:label arguments arg arg arg Call Call"
  },
  {
    "library": "sphinx",
    "name": "_reorder_index_target_nodes",
    "source_code": "def _reorder_index_target_nodes(start_node: nodes.target) -> None:\n    nodes_to_reorder: list[nodes.target | addnodes.index] = []\n    node: nodes.Node\n    for node in start_node.findall(descend=False, siblings=True):\n        if isinstance(node, nodes.target | addnodes.index):\n            nodes_to_reorder.append(node)\n            continue\n        break\n    if len(nodes_to_reorder) < 2:\n        return\n    parent = nodes_to_reorder[0].parent\n    if parent == nodes_to_reorder[-1].parent:\n        first_idx = parent.index(nodes_to_reorder[0])\n        last_idx = parent.index(nodes_to_reorder[-1])\n        if first_idx + len(nodes_to_reorder) - 1 == last_idx:\n            parent[first_idx:last_idx + 1] = sorted(nodes_to_reorder, key=_sort_key)",
    "docstring": "Sort target and index nodes. Find all consecutive target and index nodes starting from ``, and move all index nodes to before the first target node.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\transforms\\__init__.py",
    "ast_data": "FunctionDef name:_reorder_index_target_nodes arg:start_node arguments arg For Call If Call Call If Compare Call Return return:no Assign If Compare Assign Call Assign Call If Compare Call Assign Call"
  },
  {
    "library": "numpy",
    "name": "_mareconstruct",
    "source_code": "def _mareconstruct(subtype, baseclass, baseshape, basetype):\n    _data = ndarray.__new__(baseclass, baseshape, basetype)\n    _mask = ndarray.__new__(ndarray, baseshape, make_mask_descr(basetype))\n    return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype)",
    "docstring": "Internal function that builds a new MaskedArray from the information stored in a pickle.",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:_mareconstruct arg:subtype arg:baseclass arg:baseshape arg:basetype arguments arg arg arg arg Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "rename_fields",
    "source_code": "@array_function_dispatch(_rename_fields_dispatcher)\ndef rename_fields(base, namemapper):\n\n    def _recursive_rename_fields(ndtype, namemapper):\n        newdtype = []\n        for name in ndtype.names:\n            newname = namemapper.get(name, name)\n            current = ndtype[name]\n            if current.names is not None:\n                newdtype.append((newname, _recursive_rename_fields(current, namemapper)))\n            else:\n                newdtype.append((newname, current))\n        return newdtype\n    newdtype = _recursive_rename_fields(base.dtype, namemapper)\n    return base.view(newdtype)",
    "docstring": "Rename the fields from a flexible-datatype ndarray or recarray. Nested fields are supported. Parameters ---------- base : ndarray Input array whose fields must be modified. namemapper : dictionary Dictionary mapping old field names to their new version. Examples -------- >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))], ... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])]) >>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'}) array([(1, (2., [ 3., 30.])), (4, (5., [ 6., 60.]))], dtype=[('A', '<i8'), ('b', [('ba', '<f8'), ('BB', '<f8', (2,))])])",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\recfunctions.py",
    "ast_data": "FunctionDef name:rename_fields arg:base arg:namemapper arguments arg arg FunctionDef name:_recursive_rename_fields arg:ndtype arg:namemapper arguments arg arg Assign For Assign Call Assign If Compare Call Call Call Return return:yes Assign Call Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "_nan_mask",
    "source_code": "def _nan_mask(a, out=None):\n    if a.dtype.kind not in 'fc':\n        return True\n    y = np.isnan(a, out=out)\n    y = np.invert(y, out=y)\n    return y",
    "docstring": "Parameters ---------- a : array-like Input array with at least 1 dimension. out : ndarray, optional Alternate output array in which to place the result. The default is ``.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_nanfunctions_impl.py",
    "ast_data": "FunctionDef name:_nan_mask arg:a arg:out arguments arg arg If Compare Return return:yes Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_prepend_none_dimension",
    "source_code": "def _prepend_none_dimension(features):\n    if features:\n        modified_features = dict(features)\n        for key, feature in features.items():\n            if isinstance(feature, FixedLenSequenceFeature):\n                if not feature.allow_missing:\n                    raise ValueError('Unsupported: FixedLenSequenceFeature requires allow_missing to be True.')\n                modified_features[key] = FixedLenSequenceFeature([None] + list(feature.shape), feature.dtype, feature.allow_missing, feature.default_value)\n        return modified_features\n    else:\n        return features",
    "docstring": "Returns a copy of features with adjusted FixedLenSequenceFeature shapes.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parsing_ops.py",
    "ast_data": "FunctionDef name:_prepend_none_dimension arg:features arguments arg If Assign Call For Call If Call If Raise Call Assign Call Call Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "PostGISSpatialRefSys",
    "source_code": "class PostGISSpatialRefSys(models.Model, SpatialRefSysMixin):\n    srid = models.IntegerField(primary_key=True)\n    auth_name = models.CharField(max_length=256)\n    auth_srid = models.IntegerField()\n    srtext = models.CharField(max_length=2048)\n    proj4text = models.CharField(max_length=2048)\n\n    class Meta:\n        app_label = 'gis'\n        db_table = 'spatial_ref_sys'\n        managed = False\n\n    @property\n    def wkt(self):\n        return self.srtext",
    "docstring": "The 'spatial_ref_sys' table from PostGIS. See the PostGIS documentation at Ch. 4.2.1.",
    "type": "class",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\postgis\\models.py",
    "ast_data": "ClassDef name:PostGISSpatialRefSys Assign Call Assign Call Assign Call Assign Call Assign Call ClassDef name:Meta Assign Assign Assign FunctionDef name:wkt arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "enable_tensor_equality",
    "source_code": "@tf_export(v1=['enable_tensor_equality'])\ndef enable_tensor_equality():\n    logging.vlog(1, 'Enabling tensor equality')\n    _tensor_equality_api_usage_gauge.get_cell().set(True)\n    Tensor._USE_EQUALITY = True",
    "docstring": "Compare Tensors with element-wise comparison and thus be unhashable. Comparing tensors with element-wise allows comparisons such as tf.Variable(1.0) == 1.0. Element-wise equality implies that tensors are unhashable. Thus tensors can no longer be directly used in sets or as a key in a dictionary.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor.py",
    "ast_data": "FunctionDef name:enable_tensor_equality arguments Call Call Call Assign Call"
  },
  {
    "library": "seaborn",
    "name": "plot_marginals",
    "source_code": "def plot_marginals(self, func, **kwargs):\n    seaborn_func = str(func.__module__).startswith('seaborn') and (not func.__name__ == 'distplot')\n    func_params = signature(func).parameters\n    kwargs = kwargs.copy()\n    if self.hue is not None:\n        kwargs['hue'] = self.hue\n        self._inject_kwargs(func, kwargs, self._hue_params)\n    if 'legend' in func_params:\n        kwargs.setdefault('legend', False)\n    if 'orientation' in func_params:\n        orient_kw_x = {'orientation': 'vertical'}\n        orient_kw_y = {'orientation': 'horizontal'}\n    elif 'vertical' in func_params:\n        orient_kw_x = {'vertical': False}\n        orient_kw_y = {'vertical': True}\n    if seaborn_func:\n        func(x=self.x, ax=self.ax_marg_x, **kwargs)\n    else:\n        plt.sca(self.ax_marg_x)\n        func(self.x, **orient_kw_x, **kwargs)\n    if seaborn_func:\n        func(y=self.y, ax=self.ax_marg_y, **kwargs)\n    else:\n        plt.sca(self.ax_marg_y)\n        func(self.y, **orient_kw_y, **kwargs)\n    self.ax_marg_x.yaxis.get_label().set_visible(False)\n    self.ax_marg_y.xaxis.get_label().set_visible(False)\n    return self",
    "docstring": "Draw univariate plots on each marginal axes. Parameters ---------- func : plotting callable If a seaborn function, it should accept `JointGrid` for easy method chaining.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\axisgrid.py",
    "ast_data": "FunctionDef name:plot_marginals arg:self arg:func arguments arg arg arg Assign BoolOp Call Call Compare Assign Call Assign Call If Compare Assign Call If Compare Call If Compare Assign Assign If Compare Assign Assign If Call Call Call If Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "annotate",
    "source_code": "def annotate(self, model: torch.fx.GraphModule) -> torch.fx.GraphModule:\n    if self.global_config and self.global_config.input_activation.is_dynamic:\n        model = self._annotate_for_dynamic_quantization_config(model)\n    else:\n        model = self._annotate_for_static_quantization_config(model)\n    propagate_annotation(model)\n    return model",
    "docstring": "just handling global spec for now",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\xnnpack_quantizer.py",
    "ast_data": "FunctionDef name:annotate arg:self arg:model arguments arg arg If BoolOp Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_compute_absolute_step",
    "source_code": "def _compute_absolute_step(rel_step, x0, f0, method):\n    sign_x0 = (x0 >= 0).astype(float) * 2 - 1\n    rstep = _eps_for_method(x0.dtype, f0.dtype, method)\n    if rel_step is None:\n        abs_step = rstep * sign_x0 * np.maximum(1.0, np.abs(x0))\n    else:\n        abs_step = rel_step * sign_x0 * np.abs(x0)\n        dx = x0 + abs_step - x0\n        abs_step = np.where(dx == 0, rstep * sign_x0 * np.maximum(1.0, np.abs(x0)), abs_step)\n    return abs_step",
    "docstring": "Computes an absolute step from a relative step for finite difference calculation. Parameters ---------- rel_step: None or array-like Relative step for the finite difference calculation x0 : np.ndarray Parameter vector f0 : np.ndarray or scalar method : {'2-point', '3-point', 'cs'} Returns ------- h : float The absolute step size Notes ----- will always be np.float64. However, if or are smaller floating point dtypes (e.g. np.float32), then the absolute step size will be calculated from the smallest floating point size.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_numdiff.py",
    "ast_data": "FunctionDef name:_compute_absolute_step arg:rel_step arg:x0 arg:f0 arg:method arguments arg arg arg arg Assign Call Compare Assign Call If Compare Assign Call Call Assign Call Assign Assign Call Compare Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_wrap_result",
    "source_code": "def _wrap_result(name: str, data, sparse_index, fill_value, dtype: Dtype | None=None) -> SparseArray:\n    if name.startswith('__'):\n        name = name[2:-2]\n    if name in ('eq', 'ne', 'lt', 'gt', 'le', 'ge'):\n        dtype = bool\n    fill_value = lib.item_from_zerodim(fill_value)\n    if is_bool_dtype(dtype):\n        fill_value = bool(fill_value)\n    return SparseArray(data, sparse_index=sparse_index, fill_value=fill_value, dtype=dtype)",
    "docstring": "wrap op result to have correct dtype",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\arrays\\sparse\\array.py",
    "ast_data": "FunctionDef name:_wrap_result arg:name arg:data arg:sparse_index arg:fill_value arg:dtype arguments arg arg arg arg arg If Call Assign If Compare Assign Assign Call If Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "add_issue_labels",
    "source_code": "def add_issue_labels(self, repo: str, issue_number: int, labels: list[str]) -> requests.Response:\n    endpoint = f'repos/{repo}/issues/{issue_number}/labels'\n    return self._make_request('POST', endpoint, labels=labels)",
    "docstring": "Adds labels to an issue (or PR). Arguments: repo: a string of the form , e.g. openxla/xla issue_number: the issue (or PR) to set the status of labels: the labels to add to the issue Returns: a requests.Response object containing the response from the API. Raises: requests.exceptions.HTTPError",
    "type": "method",
    "file_path": "tensorflow\\third_party\\xla\\.github\\workflows\\github_api.py",
    "ast_data": "FunctionDef name:add_issue_labels arg:self arg:repo arg:issue_number arg:labels arguments arg arg arg arg Assign Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "DeFMO",
    "source_code": "class DeFMO(Module):\n\n    def __init__(self, pretrained: bool=False) -> None:\n        super().__init__()\n        self.encoder = EncoderDeFMO()\n        self.rendering = RenderingDeFMO()\n        if pretrained:\n            pretrained_dict = torch.hub.load_state_dict_from_url(urls['defmo_encoder'], map_location=torch.device('cpu'))\n            self.encoder.load_state_dict(pretrained_dict, strict=True)\n            pretrained_dict_ren = torch.hub.load_state_dict_from_url(urls['defmo_rendering'], map_location=torch.device('cpu'))\n            self.rendering.load_state_dict(pretrained_dict_ren, strict=True)\n        self.eval()\n\n    def forward(self, input_data: Tensor) -> Tensor:\n        latent = self.encoder(input_data)\n        x_out = self.rendering(latent)\n        return x_out",
    "docstring": "Module that disentangle a fast-moving object from the background and performs deblurring. This is based on the original code from paper \"DeFMO: Deblurring and Shape Recovery of Fast Moving Objects\". See :cite: for more details. Args: pretrained: Download and set pretrained weights to the model. Default: false. Returns: Temporal super-resolution without background. Shape: - Input: (B, 6, H, W) - Output: (B, S, 4, H, W) Examples: >>> import kornia >>> input = torch.rand(2, 6, 240, 320) >>> defmo = kornia.feature.DeFMO() >>> tsr_nobgr = defmo(input) # 2x24x4x240x320",
    "type": "class",
    "file_path": "kornia\\kornia\\feature\\defmo.py",
    "ast_data": "ClassDef name:DeFMO FunctionDef name:__init__ arg:self arg:pretrained arguments arg arg Call Call Assign Call Assign Call If Assign Call Call Call Assign Call Call Call Call FunctionDef name:forward arg:self arg:input_data arguments arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_width",
    "source_code": "def get_width(self):\n    return self._width",
    "docstring": "Return the width of the ellipse.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_width arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_pre_load_state_dict_hook",
    "source_code": "@staticmethod\ndef _pre_load_state_dict_hook(module: nn.Module, state_dict: dict[str, Any], prefix: str, *args: Any) -> None:\n    _replace_by_prefix(state_dict, prefix, prefix + f'{_CHECKPOINT_PREFIX}')",
    "docstring": "` is called before ``, it will add back the module prefix so that non-checkpointed modules can be loaded into checkpoint_wrapper modules properly.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\_checkpoint\\checkpoint_wrapper.py",
    "ast_data": "FunctionDef name:_pre_load_state_dict_hook arg:module arg:state_dict arg:prefix arguments arg arg arg arg Call"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X):\n    check_is_fitted(self)\n    X = validate_data(self, X, accept_sparse=('csr', 'csc'), dtype=[np.float64, np.float32], reset=False)\n    W = self._solve_W(X, self.components_, self._transform_max_iter)\n    return W",
    "docstring": "Transform the data X according to the fitted MiniBatchNMF model. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Data matrix to be transformed by the model. Returns ------- W : ndarray of shape (n_samples, n_components) Transformed data.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_nmf.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_serialize",
    "source_code": "def _serialize(self):\n    res = dict()\n    if self._self_request:\n        res['$self_request'] = self._self_request._serialize()\n    for name, route_mapping in self._route_mappings.items():\n        res[name] = dict()\n        res[name]['mapping'] = route_mapping.mapping._serialize()\n        res[name]['router'] = route_mapping.router._serialize()\n    return res",
    "docstring": "Serialize the object. Returns ------- obj : dict A serialized version of the instance in the form of a dictionary.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py",
    "ast_data": "FunctionDef name:_serialize arg:self arguments arg Assign Call If Assign Call For Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "public_numbers",
    "source_code": "@abc.abstractmethod\ndef public_numbers(self) -> EllipticCurvePublicNumbers:\n    pass",
    "docstring": "Returns an EllipticCurvePublicNumbers.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ec.py",
    "ast_data": "FunctionDef name:public_numbers arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "has_deprecation_decorator",
    "source_code": "def has_deprecation_decorator(symbol):\n    decorators, symbol = tf_decorator.unwrap(symbol)\n    if contains_deprecation_decorator(decorators):\n        return True\n    if tf_inspect.isfunction(symbol):\n        return False\n    if not tf_inspect.isclass(symbol):\n        return False\n    if not hasattr(symbol, '__init__'):\n        return False\n    init_decorators, _ = tf_decorator.unwrap(symbol.__init__)\n    return contains_deprecation_decorator(init_decorators)",
    "docstring": "Checks if given object has a deprecation decorator. We check if deprecation decorator is in decorators as well as whether symbol is a class whose __init__ method has a deprecation decorator. Args: symbol: Python object. Returns: True if symbol has deprecation decorator.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\module_wrapper.py",
    "ast_data": "FunctionDef name:has_deprecation_decorator arg:symbol arguments arg Assign Call If Call Return return:yes If Call Return return:yes If Call Return return:yes If Call Return return:yes Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "ordered",
    "source_code": "@staticmethod\n@memoize\ndef ordered(size, dtype, rnd):\n    return np.arange(size, dtype=dtype)",
    "docstring": "Returns an ordered array.",
    "type": "method",
    "file_path": "numpy\\benchmarks\\benchmarks\\bench_function_base.py",
    "ast_data": "FunctionDef name:ordered arg:size arg:dtype arg:rnd arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "is_broadcast_compatible",
    "source_code": "def is_broadcast_compatible(shape_x, shape_y):\n    if shape_x.ndims is None or shape_y.ndims is None:\n        return False\n    return _broadcast_shape_helper(shape_x, shape_y) is not None",
    "docstring": "Returns True if and are broadcast compatible. Args: shape_x: A shape_y: A Returns: True if a shape exists that both and can be broadcasted to. False otherwise.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\common_shapes.py",
    "ast_data": "FunctionDef name:is_broadcast_compatible arg:shape_x arg:shape_y arguments arg arg If BoolOp Compare Compare Return return:yes Return return:yes Compare Call"
  },
  {
    "library": "pandas",
    "name": "recode_for_categories",
    "source_code": "def recode_for_categories(codes: np.ndarray, old_categories, new_categories, copy: bool=True) -> np.ndarray:\n    if len(old_categories) == 0:\n        if copy:\n            return codes.copy()\n        return codes\n    elif new_categories.equals(old_categories):\n        if copy:\n            return codes.copy()\n        return codes\n    indexer = coerce_indexer_dtype(new_categories.get_indexer_for(old_categories), new_categories)\n    new_codes = take_nd(indexer, codes, fill_value=-1)\n    return new_codes",
    "docstring": "Convert a set of codes for to a new set of categories Parameters ---------- codes : np.ndarray old_categories, new_categories : Index copy: bool, default True Whether to copy if the codes are unchanged. Returns ------- new_codes : np.ndarray[np.int64] Examples -------- >>> old_cat = pd.Index([\"b\", \"a\", \"c\"]) >>> new_cat = pd.Index([\"a\", \"b\"]) >>> codes = np.array([0, 1, 1, 2]) >>> recode_for_categories(codes, old_cat, new_cat) array([ 1, 0, 0, -1], dtype=int8)",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\arrays\\categorical.py",
    "ast_data": "FunctionDef name:recode_for_categories arg:codes arg:old_categories arg:new_categories arg:copy arguments arg arg arg arg If Compare Call If Return return:yes Call Return return:yes If Call If Return return:yes Call Return return:yes Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "preset_metadata_json",
    "source_code": "def preset_metadata_json(self, key: str, value: str):\n    self.preset_metadata[key] = value",
    "docstring": "Preset a user defined metadata when the profiler is not started and added into the trace file later. Metadata is in the format of a string key and a valid json value",
    "type": "method",
    "file_path": "pytorch\\torch\\profiler\\profiler.py",
    "ast_data": "FunctionDef name:preset_metadata_json arg:self arg:key arg:value arguments arg arg arg Assign"
  },
  {
    "library": "pandas",
    "name": "update_info",
    "source_code": "def update_info(self, info) -> None:\n    for key in self._info_fields:\n        value = getattr(self, key, None)\n        idx = info.setdefault(self.name, {})\n        existing_value = idx.get(key)\n        if key in idx and value is not None and (existing_value != value):\n            if key in ['freq', 'index_name']:\n                ws = attribute_conflict_doc % (key, existing_value, value)\n                warnings.warn(ws, AttributeConflictWarning, stacklevel=find_stack_level())\n                idx[key] = None\n                setattr(self, key, None)\n            else:\n                raise ValueError(f'invalid info for [{self.name}] for [{key}], existing_value [{existing_value}] conflicts with new value [{value}]')\n        elif value is not None or existing_value is not None:\n            idx[key] = value",
    "docstring": "set/update the info for this indexable with the key/value if there is a conflict raise/warn as needed",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:update_info arg:self arg:info arguments arg arg For Assign Call Assign Call Assign Call If BoolOp Compare Compare Compare If Compare Assign Call Call Assign Call Raise Call If BoolOp Compare Compare Assign"
  },
  {
    "library": "django",
    "name": "_topology",
    "source_code": "def _topology(self, gptr):\n    return GEOSGeometry(gptr, srid=self.srid)",
    "docstring": "Return Geometry from the given pointer.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:_topology arg:self arg:gptr arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, vals, rows, cols, shape):\n    self.n, self.m = shape\n    self.vals = np.asarray(vals, dtype=np.float64)\n    self.rows = np.asarray(rows, dtype=np.int32)\n    self.cols = np.asarray(cols, dtype=np.int32)",
    "docstring": "Create a sparse matrix in coo format. *vals*: arrays of values of non-null entries of the matrix *rows*: int arrays of rows of non-null entries of the matrix *cols*: int arrays of cols of non-null entries of the matrix *shape*: 2-tuple (n, m) of matrix shape",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triinterpolate.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:vals arg:rows arg:cols arg:shape arguments arg arg arg arg arg Assign Assign Call Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "concatenate",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef concatenate(tensors, axis=-1):\n    if axis < 0:\n        rank = ndim(tensors[0])\n        if rank:\n            axis %= rank\n        else:\n            axis = 0\n    if py_all((is_sparse(x) for x in tensors)):\n        return sparse_ops.sparse_concat(axis, tensors)\n    elif py_all((isinstance(x, ragged_tensor.RaggedTensor) for x in tensors)):\n        return array_ops.concat(tensors, axis)\n    else:\n        return array_ops.concat([to_dense(x) for x in tensors], axis)",
    "docstring": "Concatenates a list of tensors alongside the specified axis. Args: tensors: list of tensors to concatenate. axis: concatenation axis. Returns: A tensor. Example: >>> a = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> b = tf.constant([[10, 20, 30], [40, 50, 60], [70, 80, 90]]) >>> tf.keras.backend.concatenate((a, b), axis=-1)",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:concatenate arg:tensors arg:axis arguments arg arg If Compare Assign Call If Assign If Call Call Return return:yes Call If Call Call Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "linear_units",
    "source_code": "@property\ndef linear_units(self):\n    units, name = capi.linear_units(self.ptr, byref(c_char_p()))\n    return units",
    "docstring": "Return the value of the linear units.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\srs.py",
    "ast_data": "FunctionDef name:linear_units arg:self arguments arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_memory_growth",
    "source_code": "def get_memory_growth(self, dev):\n    self._initialize_physical_devices()\n    if dev not in self._physical_devices:\n        raise ValueError('Unrecognized device: %s' % repr(dev))\n    return self._memory_growth_map[dev]",
    "docstring": "Get if memory growth is enabled for a PhysicalDevice.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:get_memory_growth arg:self arg:dev arguments arg arg Call If Compare Raise Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_should_compress",
    "source_code": "def _should_compress(num_rows, num_cols, matrix_approximation_rank, min_compression_rate):\n    uncompressed_size = num_rows * num_cols\n    compressed_size = (num_rows + num_cols) * matrix_approximation_rank\n    return (compressed_size * min_compression_rate < uncompressed_size, uncompressed_size, compressed_size)",
    "docstring": "Recommend if tensor given is worth compressing. Returns a recommendation as to whether the 2D tensor described by the arguments is worth compressing, including statistics describing the expected savings from compression. We consider a tensor worth compressing when ``.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\ddp_comm_hooks\\powerSGD_hook.py",
    "ast_data": "FunctionDef name:_should_compress arg:num_rows arg:num_cols arg:matrix_approximation_rank arg:min_compression_rate arguments arg arg arg arg Assign Assign Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "_Inputs",
    "source_code": "def _Inputs(op: ops.Operation, xs_set):\n    if _IsFunction(op.graph):\n        inputs = []\n        for t in op.inputs:\n            if t not in xs_set:\n                t = _MaybeCaptured(t)\n            inputs.append(t)\n        return inputs\n    else:\n        return op.inputs",
    "docstring": "Returns the inputs of op, crossing closure boundaries where necessary. Args: op: Operation xs_set: ObjectIdentitySet of Tensors we are differentiating w.r.t. Returns: A list of tensors. The tensors may be from multiple Graph/FuncGraphs if op is in a FuncGraph and has captured inputs.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\gradients_util.py",
    "ast_data": "FunctionDef name:_Inputs arg:op arg:xs_set arguments arg arg If Call Assign For If Compare Assign Call Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_to_proto_fn",
    "source_code": "def _to_proto_fn(v, export_scope=None):\n    return v.to_proto(export_scope=export_scope)",
    "docstring": "Converts Variable and ResourceVariable to VariableDef for collections.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py",
    "ast_data": "FunctionDef name:_to_proto_fn arg:v arg:export_scope arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_insert_reshard_gm",
    "source_code": "def _insert_reshard_gm(gm: torch.fx.GraphModule, node: Node, input_arg: Node, input_arg_spec: DTensorSpec, desired_spec: DTensorSpec) -> None:\n    input_arg_spec.tensor_meta = input_arg.meta['tensor_meta']\n    desired_spec.tensor_meta = input_arg.meta['tensor_meta']\n    input_arg_tensor = input_arg.meta['val']\n\n    def reshard_fn(local_tensor: torch.Tensor) -> torch.Tensor:\n        return redistribute_local_tensor(local_tensor, input_arg_spec, desired_spec)\n    reshard_gm = make_fx(reshard_fn)(input_arg_tensor)\n    reshard_gm_nodes = list(reshard_gm.graph.nodes)\n    input_node = reshard_gm_nodes[0]\n    with gm.graph.inserting_before(node):\n        for reshard_node in reshard_gm.graph.nodes:\n            if reshard_node.op not in ['placeholder', 'output']:\n                reshard_node.meta['nn_module_stack'] = copy.copy(input_arg.meta['nn_module_stack']) if not input_arg.op == 'placeholder' else copy.copy(node.meta['nn_module_stack'])\n        output_node = gm.graph.graph_copy(reshard_gm.graph, val_map={input_node: input_arg})\n    node.replace_input_with(input_arg, output_node)",
    "docstring": "Transform the graph for tensor redistribution.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\experimental\\_tp_transform.py",
    "ast_data": "FunctionDef name:_insert_reshard_gm arg:gm arg:node arg:input_arg arg:input_arg_spec arg:desired_spec arguments arg arg arg arg arg Assign Assign Assign FunctionDef name:reshard_fn arg:local_tensor arguments arg Return return:yes Call Assign Call Call Assign Call Assign With Call For If Compare Assign Compare Call Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "TangentInfo",
    "source_code": "class TangentInfo(collections.namedtuple('TangentInfo', ['indices', 'tangents'])):\n\n    def __new__(cls, indices=None, tangents=None):\n        if indices is None:\n            indices = ()\n        if tangents is None:\n            tangents = []\n        return super(TangentInfo, cls).__new__(cls, indices, tangents)",
    "docstring": "Packed forward accumulator state. The return value of .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\forwardprop_util.py",
    "ast_data": "ClassDef name:TangentInfo Call FunctionDef name:__new__ arg:cls arg:indices arg:tangents arguments arg arg arg If Compare Assign If Compare Assign Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "serve",
    "source_code": "def serve(request, path, document_root=None, show_indexes=False):\n    path = posixpath.normpath(path).lstrip('/')\n    fullpath = Path(safe_join(document_root, path))\n    if fullpath.is_dir():\n        if show_indexes:\n            return directory_index(path, fullpath)\n        raise Http404(_('Directory indexes are not allowed here.'))\n    if not fullpath.exists():\n        raise Http404(_('“%(path)s” does not exist') % {'path': fullpath})\n    statobj = fullpath.stat()\n    if not was_modified_since(request.META.get('HTTP_IF_MODIFIED_SINCE'), statobj.st_mtime):\n        return HttpResponseNotModified()\n    content_type, encoding = mimetypes.guess_type(str(fullpath))\n    content_type = content_type or 'application/octet-stream'\n    response = FileResponse(fullpath.open('rb'), content_type=content_type)\n    response.headers['Last-Modified'] = http_date(statobj.st_mtime)\n    if encoding:\n        response.headers['Content-Encoding'] = encoding\n    return response",
    "docstring": "Serve static files below a given point in the directory structure. To use, put a URL pattern such as:: from django.views.static import serve path('', serve, {'document_root': '/path/to/my/files/'}) in your URLconf. You must provide the ``.",
    "type": "function",
    "file_path": "django\\django\\views\\static.py",
    "ast_data": "FunctionDef name:serve arg:request arg:path arg:document_root arg:show_indexes arguments arg arg arg arg Assign Call Call Assign Call Call If Call If Return return:yes Call Raise Call Call If Call Raise Call Call Assign Call If Call Call Return return:yes Call Assign Call Call Assign BoolOp Assign Call Call Assign Call If Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__enter__",
    "source_code": "def __enter__(self) -> 'MetricsContext':\n    if self._level == 0:\n        self._metrics = {}\n        self._start_time_ns = time.time_ns()\n    self._level += 1\n    return self",
    "docstring": "Initialize metrics recording.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\metrics_context.py",
    "ast_data": "FunctionDef name:__enter__ arg:self arguments arg If Compare Assign Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "PythonEngine",
    "source_code": "class PythonEngine(AbstractEngine):\n    has_neg_frac = False\n\n    def evaluate(self):\n        return self.expr()\n\n    def _evaluate(self) -> None:\n        pass",
    "docstring": "Evaluate an expression in Python space. Mostly for testing purposes.",
    "type": "class",
    "file_path": "pandas\\pandas\\core\\computation\\engines.py",
    "ast_data": "ClassDef name:PythonEngine Assign FunctionDef name:evaluate arg:self arguments arg Return return:yes Call FunctionDef name:_evaluate arg:self arguments arg"
  },
  {
    "library": "matplotlib",
    "name": "_parse_optional",
    "source_code": "def _parse_optional(fh):\n    optional = {b'StartKernData': _parse_kern_pairs, b'StartComposites': _parse_composites}\n    d = {b'StartKernData': {}, b'StartComposites': {}}\n    for line in fh:\n        line = line.rstrip()\n        if not line:\n            continue\n        key = line.split()[0]\n        if key in optional:\n            d[key] = optional[key](fh)\n    return (d[b'StartKernData'], d[b'StartComposites'])",
    "docstring": "Parse the optional fields for kern pair data and composites. Returns ------- kern_data : dict A dict containing kerning information. May be empty. See . composites : dict A dict containing composite information. May be empty. See .",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\_afm.py",
    "ast_data": "FunctionDef name:_parse_optional arg:fh arguments arg Assign Assign For Assign Call If Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "Gear",
    "source_code": "class Gear(Benchmark):\n\n    def __init__(self, dimensions=4):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([12.0] * self.N, [60.0] * self.N))\n        self.global_optimum = [[16, 19, 43, 49]]\n        self.fglob = 2.7e-12\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return (1.0 / 6.931 - floor(x[0]) * floor(x[1]) / floor(x[2]) / floor(x[3])) ** 2",
    "docstring": "Gear objective function. This class defines the Gear [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Gear}}({x}) = \\left \\{ \\frac{1.0}{6.931} - \\frac{\\lfloor x_1\\rfloor \\lfloor x_2 \\rfloor } {\\lfloor x_3 \\rfloor \\lfloor x_4 \\rfloor } \\right\\}^2 with :math: for :math:. *Global optimum*: :math: for :math:, where the various :math: may be permuted. .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_G.py",
    "ast_data": "ClassDef name:Gear FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call Call Call"
  },
  {
    "library": "scrapy",
    "name": "SettingsAttribute",
    "source_code": "class SettingsAttribute:\n\n    def __init__(self, value: Any, priority: int):\n        self.value: Any = value\n        self.priority: int\n        if isinstance(self.value, BaseSettings):\n            self.priority = max(self.value.maxpriority(), priority)\n        else:\n            self.priority = priority\n\n    def set(self, value: Any, priority: int) -> None:\n        if priority >= self.priority:\n            if isinstance(self.value, BaseSettings):\n                value = BaseSettings(value, priority=priority)\n            self.value = value\n            self.priority = priority\n\n    def __repr__(self) -> str:\n        return f'<SettingsAttribute value={self.value!r} priority={self.priority}>'",
    "docstring": "Class for storing data related to settings attributes. This class is intended for internal usage, you should try Settings class for settings configuration, not this one.",
    "type": "class",
    "file_path": "scrapy\\scrapy\\settings\\__init__.py",
    "ast_data": "ClassDef name:SettingsAttribute FunctionDef name:__init__ arg:self arg:value arg:priority arguments arg arg arg If Call Assign Call Call Assign FunctionDef name:set arg:self arg:value arg:priority arguments arg arg arg If Compare If Call Assign Call Assign Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "move_constructors_to_gpu",
    "source_code": "def move_constructors_to_gpu(graph: fx.Graph) -> None:\n    ConstructorMoverPass(get_gpu_type())(graph)",
    "docstring": "Moves intermediary tensors which are constructed on the cpu to gpu when safe",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\post_grad.py",
    "ast_data": "FunctionDef name:move_constructors_to_gpu arg:graph arguments arg Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_combine_handle_data",
    "source_code": "def _combine_handle_data(handle, initial_value):\n    assert handle.dtype == dtypes.resource\n    variable_handle_data = get_eager_safe_handle_data(handle)\n    if initial_value.dtype != dtypes.variant:\n        return variable_handle_data\n    extra_handle_data = get_eager_safe_handle_data(initial_value)\n    if extra_handle_data is not None and extra_handle_data.is_set:\n        if variable_handle_data is None or not variable_handle_data.is_set or len(variable_handle_data.shape_and_type) != 1:\n            raise RuntimeError(f\"Expected VarHandleOp to return a length==1 shape_and_type, but saw: '{variable_handle_data}'\")\n        variable_handle_data.shape_and_type.extend(extra_handle_data.shape_and_type)\n    return variable_handle_data",
    "docstring": "Concats HandleData from tensors and . Args: handle: A of dtype . initial_value: A . Returns: A . If has dtype , the contains the concatenation of the shape_and_type from both and . Raises: RuntimeError: If handle, which was returned by VarHandleOp, either has no handle data, or its len(handle_data.shape_and_type) != 1.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:_combine_handle_data arg:handle arg:initial_value arguments arg arg Compare Assign Call If Compare Return return:yes Assign Call If BoolOp Compare If BoolOp Compare Compare Call Raise Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_create_module_name_filter",
    "source_code": "def _create_module_name_filter(module_name: str) -> FilterFn:\n    filter_fn = _get_module_name_filter(module_name)\n\n    def check_all_nodes_from_module(nodes: list[Node]) -> bool:\n        all_nodes_from_module_name: bool = all((filter_fn(n) for n in nodes))\n        return all_nodes_from_module_name\n    return check_all_nodes_from_module",
    "docstring": "Create a filter function for a given module name. The filter function takes a list of nodes (as determined by the annotate function) and return True if *all* nodes come from the specified module name, False otherwise. For example: linear_1: \"f32[3, 10]\" = torch.ops.aten.linear.default(...) # comes from a module with name relu: \"f32[3, 10]\" = torch.ops.aten.relu.default(linear_1); # comes from a module with name >> module_name_filter = _create_module_name_filter_inner(\"sub\") >> print(module_name_filter([relu, linear_1])) # True # These two nodes are determined by function and from \"sub\".",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\x86_inductor_quantizer.py",
    "ast_data": "FunctionDef name:_create_module_name_filter arg:module_name arguments arg Assign Call FunctionDef name:check_all_nodes_from_module arg:nodes arguments arg Call Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "validate_args",
    "source_code": "@property\ndef validate_args(self):\n    return self._validate_args",
    "docstring": "Python indicating possibly expensive checks are enabled.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:validate_args arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "average_parameters",
    "source_code": "def average_parameters(self, params: Union[Iterable[torch.nn.Parameter], Iterable[dict[str, torch.nn.Parameter]]]):\n    if self.step >= self.warmup_steps:\n        group = self._find_process_group()\n        if group is not None:\n            utils.average_parameters_or_parameter_groups(params, group)\n    self.step += 1",
    "docstring": "Averages parameters or parameter groups of an optimizer. Averaging only occurs if ``, only the largest period is used, and the corresponding process group is used for averaging parameters. Args: params: The parameters of a model or parameter groups of an optimizer.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\model_averaging\\hierarchical_model_averager.py",
    "ast_data": "FunctionDef name:average_parameters arg:self arg:params arguments arg arg If Compare Assign Call If Compare Call"
  },
  {
    "library": "django",
    "name": "OutputWrapper",
    "source_code": "class OutputWrapper:\n\n    @property\n    def style_func(self):\n        return self._style_func\n\n    @style_func.setter\n    def style_func(self, style_func):\n        if style_func and self.isatty():\n            self._style_func = style_func\n        else:\n            self._style_func = lambda x: x\n\n    def __init__(self, out, ending='\\n'):\n        self._out = out\n        self.style_func = None\n        self.ending = ending\n\n    def __getattr__(self, name):\n        return getattr(self._out, name)\n\n    def flush(self):\n        if hasattr(self._out, 'flush'):\n            self._out.flush()\n\n    def isatty(self):\n        return hasattr(self._out, 'isatty') and self._out.isatty()\n\n    def write(self, msg='', style_func=None, ending=None):\n        ending = self.ending if ending is None else ending\n        if ending and (not msg.endswith(ending)):\n            msg += ending\n        style_func = style_func or self.style_func\n        self._out.write(style_func(msg))",
    "docstring": "Wrapper around stdout/stderr",
    "type": "class",
    "file_path": "django\\django\\core\\management\\base.py",
    "ast_data": "ClassDef name:OutputWrapper FunctionDef name:style_func arg:self arguments arg Return return:yes FunctionDef name:style_func arg:self arg:style_func arguments arg arg If BoolOp Call Assign Assign arguments arg FunctionDef name:__init__ arg:self arg:out arg:ending arguments arg arg arg Assign Assign Assign FunctionDef name:__getattr__ arg:self arg:name arguments arg arg Return return:yes Call FunctionDef name:flush arg:self arguments arg If Call Call FunctionDef name:isatty arg:self arguments arg Return return:yes BoolOp Call Call FunctionDef name:write arg:self arg:msg arg:style_func arg:ending arguments arg arg arg arg Assign Compare If BoolOp Call Assign BoolOp Call Call"
  },
  {
    "library": "kornia",
    "name": "rotate_half",
    "source_code": "def rotate_half(x: Tensor) -> Tensor:\n    x = x.unflatten(-1, (-1, 2))\n    x1, x2 = x.unbind(dim=-1)\n    return stack((-x2, x1), dim=-1).flatten(start_dim=-2)",
    "docstring": "Apply half rotation.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\lightglue.py",
    "ast_data": "FunctionDef name:rotate_half arg:x arguments arg Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "alpha_gen",
    "source_code": "class alpha_gen(rv_continuous):\n    _support_mask = rv_continuous._open_support_mask\n\n    def _shape_info(self):\n        return [_ShapeInfo('a', False, (0, np.inf), (False, False))]\n\n    def _pdf(self, x, a):\n        return 1.0 / x ** 2 / _norm_cdf(a) * _norm_pdf(a - 1.0 / x)\n\n    def _logpdf(self, x, a):\n        return -2 * np.log(x) + _norm_logpdf(a - 1.0 / x) - np.log(_norm_cdf(a))\n\n    def _cdf(self, x, a):\n        return _norm_cdf(a - 1.0 / x) / _norm_cdf(a)\n\n    def _ppf(self, q, a):\n        return 1.0 / np.asarray(a - _norm_ppf(q * _norm_cdf(a)))\n\n    def _stats(self, a):\n        return [np.inf] * 2 + [np.nan] * 2",
    "docstring": "An alpha continuous random variable. %(before_notes)s Notes ----- The probability density function for ([1]_, [2]_) is: .. math:: f(x, a) = \\frac{1}{x^2 \\Phi(a) \\sqrt{2\\pi}} * \\exp(-\\frac{1}{2} (a-1/x)^2) where :math: is the normal CDF, :math:, and :math:. takes `` as a shape parameter. %(after_notes)s References ---------- .. [1] Johnson, Kotz, and Balakrishnan, \"Continuous Univariate Distributions, Volume 1\", Second Edition, John Wiley and Sons, p. 173 (1994). .. [2] Anthony A. Salvia, \"Reliability applications of the Alpha Distribution\", IEEE Transactions on Reliability, Vol. R-34, No. 3, pp. 251-252 (1985). %(example)s",
    "type": "class",
    "file_path": "scipy\\scipy\\stats\\_continuous_distns.py",
    "ast_data": "ClassDef name:alpha_gen Assign FunctionDef name:_shape_info arg:self arguments arg Return return:yes Call FunctionDef name:_pdf arg:self arg:x arg:a arguments arg arg arg Return return:yes Call Call FunctionDef name:_logpdf arg:self arg:x arg:a arguments arg arg arg Return return:yes Call Call Call Call FunctionDef name:_cdf arg:self arg:x arg:a arguments arg arg arg Return return:yes Call Call FunctionDef name:_ppf arg:self arg:q arg:a arguments arg arg arg Return return:yes Call Call Call FunctionDef name:_stats arg:self arg:a arguments arg arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n    X = validate_data(self, X, accept_sparse='csr', ensure_non_negative=True)\n    if self.sample_interval is None and self.sample_steps not in (1, 2, 3):\n        raise ValueError('If sample_steps is not in [1, 2, 3], you need to provide sample_interval')\n    return self",
    "docstring": "Only validates estimator's parameters. This method allows to: (i) validate the estimator's parameters and (ii) be consistent with the scikit-learn transformer API. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. y : array-like, shape (n_samples,) or (n_samples, n_outputs), default=None Target values (None for unsupervised transformations). Returns ------- self : object Returns the transformer.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\kernel_approximation.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Assign Call If BoolOp Compare Compare Raise Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_pickradius",
    "source_code": "def get_pickradius(self):\n    return self._pickradius",
    "docstring": "Return the pick radius used for containment tests. See for more details.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:get_pickradius arg:self arguments arg Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "Sitemap",
    "source_code": "class Sitemap:\n\n    def __init__(self, xmltext: str | bytes):\n        xmlp = lxml.etree.XMLParser(recover=True, remove_comments=True, resolve_entities=False)\n        self._root = lxml.etree.fromstring(xmltext, parser=xmlp)\n        rt = self._root.tag\n        assert isinstance(rt, str)\n        self.type = rt.split('}', 1)[1] if '}' in rt else rt\n\n    def __iter__(self) -> Iterator[dict[str, Any]]:\n        for elem in self._root.getchildren():\n            d: dict[str, Any] = {}\n            for el in elem.getchildren():\n                tag = el.tag\n                assert isinstance(tag, str)\n                name = tag.split('}', 1)[1] if '}' in tag else tag\n                if name == 'link':\n                    if 'href' in el.attrib:\n                        d.setdefault('alternate', []).append(el.get('href'))\n                else:\n                    d[name] = el.text.strip() if el.text else ''\n            if 'loc' in d:\n                yield d",
    "docstring": "Class to parse Sitemap (type=urlset) and Sitemap Index (type=sitemapindex) files",
    "type": "class",
    "file_path": "scrapy\\scrapy\\utils\\sitemap.py",
    "ast_data": "ClassDef name:Sitemap FunctionDef name:__init__ arg:self arg:xmltext arguments arg arg Assign Call Assign Call Assign Call Assign Compare Call FunctionDef name:__iter__ arg:self arguments arg For Call For Call Assign Call Assign Compare Call If Compare If Compare Call Call Call Assign Call If Compare"
  },
  {
    "library": "pytorch",
    "name": "intern",
    "source_code": "def intern(self, include: 'GlobPattern', *, exclude: 'GlobPattern'=(), allow_empty: bool=True):\n    self.patterns[GlobGroup(include, exclude=exclude)] = _PatternInfo(_ModuleProviderAction.INTERN, allow_empty)",
    "docstring": "Specify modules that should be packaged. A module must match some `mockclose`, no such exception is thrown.",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\package_exporter.py",
    "ast_data": "FunctionDef name:intern arg:self arg:include arguments arg arg arg arg Assign Call Call"
  },
  {
    "library": "scrapy",
    "name": "getwithbase",
    "source_code": "def getwithbase(self, name: _SettingsKeyT) -> BaseSettings:\n    if not isinstance(name, str):\n        raise ValueError(f'Base setting key must be a string, got {name}')\n    compbs = BaseSettings()\n    compbs.update(self[name + '_BASE'])\n    compbs.update(self[name])\n    return compbs",
    "docstring": "Get a composition of a dictionary-like setting and its counterpart. :param name: name of the dictionary-like setting :type name: str",
    "type": "method",
    "file_path": "scrapy\\scrapy\\settings\\__init__.py",
    "ast_data": "FunctionDef name:getwithbase arg:self arg:name arguments arg arg If Call Raise Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_markerfacecolor",
    "source_code": "def set_markerfacecolor(self, fc):\n    self._set_markercolor('markerfacecolor', True, fc)",
    "docstring": "Set the marker face color. Parameters ---------- fc : :mpltype:",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:set_markerfacecolor arg:self arg:fc arguments arg arg Call"
  },
  {
    "library": "authlib",
    "name": "resolve_issuer_client",
    "source_code": "def resolve_issuer_client(self, issuer):\n    raise NotImplementedError()",
    "docstring": "Fetch client via \"iss\" in assertion claims. Developers MUST implement this method in subclass, e.g.:: def resolve_issuer_client(self, issuer): return Client.query_by_iss(issuer) :param issuer: \"iss\" value in assertion :return: Client instance",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc7523\\jwt_bearer.py",
    "ast_data": "FunctionDef name:resolve_issuer_client arg:self arg:issuer arguments arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "is_available",
    "source_code": "def is_available():\n    return torch._C.has_openmp",
    "docstring": "Return whether PyTorch is built with OpenMP support.",
    "type": "function",
    "file_path": "pytorch\\torch\\backends\\openmp\\__init__.py",
    "ast_data": "FunctionDef name:is_available arguments Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_process_contour_level_args",
    "source_code": "def _process_contour_level_args(self, args, z_dtype):\n    levels_arg = self.levels\n    if levels_arg is None:\n        if args:\n            levels_arg = args[0]\n        elif np.issubdtype(z_dtype, bool):\n            levels_arg = [0, 0.5, 1] if self.filled else [0.5]\n    if isinstance(levels_arg, Integral) or levels_arg is None:\n        self._ensure_locator_exists(levels_arg)\n        self.levels = self._autolev()\n    else:\n        self.levels = np.asarray(levels_arg, np.float64)\n    if self.filled and len(self.levels) < 2:\n        raise ValueError('Filled contours require at least 2 levels.')\n    if len(self.levels) > 1 and np.min(np.diff(self.levels)) <= 0.0:\n        raise ValueError('Contour levels must be increasing')",
    "docstring": "Determine the contour levels and store in self.levels.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\contour.py",
    "ast_data": "FunctionDef name:_process_contour_level_args arg:self arg:args arg:z_dtype arguments arg arg arg Assign If Compare If Assign If Call Assign If BoolOp Call Compare Call Assign Call Assign Call If BoolOp Compare Call Raise Call If BoolOp Compare Call Compare Call Call Raise Call"
  },
  {
    "library": "django",
    "name": "_post_init",
    "source_code": "def _post_init(self):\n    self._cs = GEOSCoordSeq(capi.get_cs(self.ptr), self.hasz) if self.has_cs else None",
    "docstring": "Perform post-initialization setup.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:_post_init arg:self arguments arg Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "find_nodes_prefer_channels_last",
    "source_code": "def find_nodes_prefer_channels_last(self) -> OrderedSet[Node]:\n    output_set = OrderedSet[Node]()\n    for n in reversed(self.module.graph.nodes):\n        if n.target == torch.ops.aten.convolution.default:\n            output_set.add(n)\n            continue\n        for user in n.users:\n            if user in output_set:\n                output_set.add(n)\n                break\n    for n in self.module.graph.nodes:\n        if n in output_set:\n            output_set.update(n.users)\n    return output_set",
    "docstring": "The rule to decide if an node prefer channels last is simple. 1. if it's input/output of a convolution 2. if one of its user prefers channels last We have rule 1 because cudnn runs a faster convolution kernel for channels last inputs; Rule 2 is also important. It makes sure that indirect inputs to convolution also prefers channels last. Consider the scenario: conv -> batch-norm -> relu -> conv Without rule 2, batch-norm output may use a contiguous layout. That will cause 2 extra copies: 1. the output of batch-norm should be channels last initially since its input is a conv's output. Forcing the batch-norm's output to be contiguous results in the first copy 2. The second conv's input is initially contiguous. This layout is propagated from the batch-norm's output. We need convert it to channels last layout which results in the second copy. With rule 2, we makes sure all the tensors in the chain uses channels last layout. So both copies can be saved.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\graph.py",
    "ast_data": "FunctionDef name:find_nodes_prefer_channels_last arg:self arguments arg Assign Call For Call If Compare Call For If Compare Call For If Compare Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "create_global_plan",
    "source_code": "@abc.abstractmethod\ndef create_global_plan(self, global_plan: list[LoadPlan]) -> list[LoadPlan]:\n    pass",
    "docstring": "Compute the global load plan and return plans for each rank. . N.B. This is called on the coordinator rank only",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\planner.py",
    "ast_data": "FunctionDef name:create_global_plan arg:self arg:global_plan arguments arg arg"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, scaffold=None, master='', config=None, checkpoint_dir=None, checkpoint_filename_with_path=None):\n    self._checkpoint_dir = checkpoint_dir\n    self._checkpoint_filename_with_path = checkpoint_filename_with_path\n    self._scaffold = scaffold or Scaffold()\n    self._session_manager = None\n    self._master = master\n    self._config = config",
    "docstring": "Initializes a chief session creator. Args: scaffold: A used for gathering or building supportive ops. If not specified a default one is created. It's used to finalize the graph. master: representation of the TensorFlow master to use. config: proto used to configure the session. checkpoint_dir: A string. Optional path to a directory where to restore variables. checkpoint_filename_with_path: Full file name path to the checkpoint file.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\monitored_session.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:scaffold arg:master arg:config arg:checkpoint_dir arg:checkpoint_filename_with_path arguments arg arg arg arg arg arg Assign Assign Assign BoolOp Call Assign Assign Assign"
  },
  {
    "library": "pandas",
    "name": "merge",
    "source_code": "def merge(self, other, inplace: bool=False):\n    holidays = self.merge_class(self, other)\n    if inplace:\n        self.rules = holidays\n    else:\n        return holidays",
    "docstring": "Merge holiday calendars together. The caller's class rules take precedence. The merge will be done based on each holiday's name. Parameters ---------- other : holiday calendar inplace : bool (default=False) If True set rule_table to holidays, else return array of Holidays",
    "type": "method",
    "file_path": "pandas\\pandas\\tseries\\holiday.py",
    "ast_data": "FunctionDef name:merge arg:self arg:other arg:inplace arguments arg arg arg Assign Call If Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_font_supports_glyph",
    "source_code": "def _font_supports_glyph(fonttype, glyph):\n    if fonttype == 3:\n        return glyph <= 255\n    if fonttype == 42:\n        return glyph <= 65535\n    raise NotImplementedError()",
    "docstring": "Returns True if the font is able to provide codepoint *glyph* in a PDF. For a Type 3 font, this method returns True only for single-byte characters. For Type 42 fonts this method return True if the character is from the Basic Multilingual Plane.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py",
    "ast_data": "FunctionDef name:_font_supports_glyph arg:fonttype arg:glyph arguments arg arg If Compare Return return:yes Compare If Compare Return return:yes Compare Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "from_proto",
    "source_code": "@classmethod\ndef from_proto(cls, proto: layout_pb2.MeshProto) -> 'Mesh':\n    return cls._new_object(mesh_proto=proto)",
    "docstring": "Construct a mesh instance from input .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\layout.py",
    "ast_data": "FunctionDef name:from_proto arg:cls arg:proto arguments arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "set_measured",
    "source_code": "def set_measured(self, value):\n    if value is True:\n        capi.set_measured(self.ptr, 1)\n    elif value is False:\n        capi.set_measured(self.ptr, 0)\n    else:\n        raise ValueError(f\"Input to 'set_measured' must be a boolean, got '{value!r}'.\")",
    "docstring": "Set if this geometry has M coordinates.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:set_measured arg:self arg:value arguments arg arg If Compare Call If Compare Call Raise Call"
  },
  {
    "library": "django",
    "name": "_save_parents",
    "source_code": "def _save_parents(self, cls, using, update_fields, force_insert, updated_parents=None):\n    meta = cls._meta\n    inserted = False\n    if updated_parents is None:\n        updated_parents = {}\n    for parent, field in meta.parents.items():\n        if field and getattr(self, parent._meta.pk.attname) is None and (getattr(self, field.attname) is not None):\n            setattr(self, parent._meta.pk.attname, getattr(self, field.attname))\n        if (parent_updated := updated_parents.get(parent)) is None:\n            parent_inserted = self._save_parents(cls=parent, using=using, update_fields=update_fields, force_insert=force_insert, updated_parents=updated_parents)\n            updated = self._save_table(cls=parent, using=using, update_fields=update_fields, force_insert=parent_inserted or issubclass(parent, force_insert))\n            if not updated:\n                inserted = True\n            updated_parents[parent] = updated\n        elif not parent_updated:\n            inserted = True\n        if field:\n            setattr(self, field.attname, self._get_pk_val(parent._meta))\n            if field.is_cached(self):\n                field.delete_cached_value(self)\n    return inserted",
    "docstring": "Save all the parents of cls using values from self.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\base.py",
    "ast_data": "FunctionDef name:_save_parents arg:self arg:cls arg:using arg:update_fields arg:force_insert arg:updated_parents arguments arg arg arg arg arg arg Assign Assign If Compare Assign For Call If BoolOp Compare Call Compare Call Call Call If Compare Call Assign Call Assign Call BoolOp Call If Assign Assign If Assign If Call Call If Call Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, *args, **kwargs):\n    super(ChannelFailures, self).__init__(*args, **kwargs)\n    self._exceptions = list()",
    "docstring": "Initialize ChannelFailures errors wrapper.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\wspbus.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg arg arg Call Call Assign Call"
  },
  {
    "library": "kornia",
    "name": "drop_add_residual_stochastic_depth",
    "source_code": "def drop_add_residual_stochastic_depth(x: Tensor, residual_func: Callable[[Tensor], Tensor], sample_drop_ratio: float=0.0) -> Tensor:\n    b, n, d = x.shape\n    sample_subset_size = max(int(b * (1 - sample_drop_ratio)), 1)\n    brange = torch.randperm(b, device=x.device)[:sample_subset_size]\n    x_subset = x[brange]\n    residual = residual_func(x_subset)\n    x_flat = x.flatten(1)\n    residual = residual.flatten(1)\n    residual_scale_factor = b / sample_subset_size\n    x_plus_residual = torch.index_add(x_flat, 0, brange, residual.to(dtype=x.dtype), alpha=residual_scale_factor)\n    return x_plus_residual.view_as(x)",
    "docstring": "Add residual connection.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\dedode\\transformer\\layers\\block.py",
    "ast_data": "FunctionDef name:drop_add_residual_stochastic_depth arg:x arg:residual_func arg:sample_drop_ratio arguments arg arg arg Assign Assign Call Call Assign Call Assign Assign Call Assign Call Assign Call Assign Assign Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "remove_index",
    "source_code": "def remove_index(self, model, index):\n    if index.contains_expressions and (not self.connection.features.supports_expression_indexes):\n        return None\n    self.execute(index.remove_sql(model, self))",
    "docstring": "Remove an index from a model.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\schema.py",
    "ast_data": "FunctionDef name:remove_index arg:self arg:model arg:index arguments arg arg arg If BoolOp Return return:no Call Call"
  },
  {
    "library": "pytorch",
    "name": "RunProcsResult",
    "source_code": "@dataclass\nclass RunProcsResult:\n    return_values: dict[int, Any] = field(default_factory=dict)\n    failures: dict[int, ProcessFailure] = field(default_factory=dict)\n    stdouts: dict[int, str] = field(default_factory=dict)\n    stderrs: dict[int, str] = field(default_factory=dict)\n\n    def is_failed(self) -> bool:\n        return len(self.failures) > 0",
    "docstring": "Results of a completed run of processes started with `` - path to stderr.log (empty string if no redirect)",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\elastic\\multiprocessing\\api.py",
    "ast_data": "ClassDef name:RunProcsResult Call Call Call Call FunctionDef name:is_failed arg:self arguments arg Return return:yes Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "device",
    "source_code": "@property\ndef device(self) -> str:\n    return pywrap_tf_session.TF_OperationDevice(self._c_op)",
    "docstring": "The name of the device to which this op has been assigned, if any. Returns: The string name of the device to which this op has been assigned, or an empty string if it has not been assigned to a device.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:device arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "load_constants",
    "source_code": "def load_constants(self, constants_map: dict[str, torch.Tensor], *, check_full_update: bool, user_managed: bool=False) -> None:\n    self.loader.load_constants(constants_map, False, check_full_update, user_managed)",
    "docstring": "Given a mapping of constant fqns to tensors, load the constants into the model. You can use `` to get the list of constant fqns that are needed in the compiled model. Args: constants_map: A mapping of constant fqns to tensors. check_full_update: Whether to add check to see if all the constants are updated and have values.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\package\\package.py",
    "ast_data": "FunctionDef name:load_constants arg:self arg:constants_map arguments arg arg arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "OnRunStartAction",
    "source_code": "class OnRunStartAction:\n    DEBUG_RUN = 'debug_run'\n    PROFILE_RUN = 'profile_run'\n    NON_DEBUG_RUN = 'non_debug_run'",
    "docstring": "Enum-like values for possible action to take on start of a run() call.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\framework.py",
    "ast_data": "ClassDef name:OnRunStartAction Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, sess, hooks):\n    _WrappedSession.__init__(self, sess)\n    self._hooks = hooks\n    self._should_stop = False",
    "docstring": "Initializes a _HookedSession object. Args: sess: A or a object. hooks: An iterable of `SessionRunHook' objects.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\monitored_session.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:sess arg:hooks arguments arg arg arg Call Assign Assign"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n    feature_names = []\n    vocab = {}\n    for x in X:\n        for f, v in x.items():\n            if isinstance(v, str):\n                feature_name = '%s%s%s' % (f, self.separator, v)\n            elif isinstance(v, Number) or v is None:\n                feature_name = f\n            elif isinstance(v, Mapping):\n                raise TypeError(f'Unsupported value type {type(v)} for {f}: {v}.\\nMapping objects are not supported.')\n            elif isinstance(v, Iterable):\n                feature_name = None\n                self._add_iterable_element(f, v, feature_names, vocab)\n            if feature_name is not None:\n                if feature_name not in vocab:\n                    vocab[feature_name] = len(feature_names)\n                    feature_names.append(feature_name)\n    if self.sort:\n        feature_names.sort()\n        vocab = {f: i for i, f in enumerate(feature_names)}\n    self.feature_names_ = feature_names\n    self.vocabulary_ = vocab\n    return self",
    "docstring": "Learn a list of feature name -> indices mappings. Parameters ---------- X : Mapping or iterable over Mappings Dict(s) or Mapping(s) from feature names (arbitrary Python objects) to feature values (strings or convertible to dtype). .. versionchanged:: 0.24 Accepts multiple string values for one categorical feature. y : (ignored) Ignored parameter. Returns ------- self : object DictVectorizer class instance.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_extraction\\_dict_vectorizer.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Assign Assign For For Call If Call Assign If BoolOp Call Compare Assign If Call Raise Call Call If Call Assign Call If Compare If Compare Assign Call Call If Call Assign Call Assign Assign Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "feature_ahead",
    "source_code": "def feature_ahead(self, names):\n    assert not isinstance(names, str) and hasattr(names, '__iter__')\n    implies = self.feature_implies(names, keep_origins=True)\n    ahead = [n for n in names if n not in implies]\n    if len(ahead) == 0:\n        ahead = self.feature_sorted(names, reverse=True)[:1]\n    return ahead",
    "docstring": "Return list of features in 'names' after remove any implied features and keep the origins. Parameters ---------- 'names': sequence sequence of CPU feature names in uppercase. Returns ------- list of CPU features sorted as-is 'names' Examples -------- >>> self.feature_ahead([\"SSE2\", \"SSE3\", \"SSE41\"]) [\"SSE41\"] # assume AVX2 and FMA3 implies each other and AVX2 # is the highest interest >>> self.feature_ahead([\"SSE2\", \"SSE3\", \"SSE41\", \"AVX2\", \"FMA3\"]) [\"AVX2\"] # assume AVX2 and FMA3 don't implies each other >>> self.feature_ahead([\"SSE2\", \"SSE3\", \"SSE41\", \"AVX2\", \"FMA3\"]) [\"AVX2\", \"FMA3\"]",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py",
    "ast_data": "FunctionDef name:feature_ahead arg:self arg:names arguments arg arg BoolOp Call Call Assign Call Assign Compare If Compare Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "legendre",
    "source_code": "def legendre(n, monic=False):\n    if n < 0:\n        raise ValueError('n must be nonnegative.')\n    if n == 0:\n        n1 = n + 1\n    else:\n        n1 = n\n    x, w = roots_legendre(n1)\n    if n == 0:\n        x, w = ([], [])\n    hn = 2.0 / (2 * n + 1)\n    kn = _gam(2 * n + 1) / _gam(n + 1) ** 2 / 2.0 ** n\n    p = orthopoly1d(x, w, hn, kn, wfunc=lambda x: 1.0, limits=(-1, 1), monic=monic, eval_func=lambda x: _ufuncs.eval_legendre(n, x))\n    return p",
    "docstring": "Legendre polynomial. Defined to be the solution of .. math:: \\frac{d}{dx}\\left[(1 - x^2)\\frac{d}{dx}P_n(x)\\right] + n(n + 1)P_n(x) = 0; :math: is a polynomial of degree :math:. Parameters ---------- n : int Degree of the polynomial. monic : bool, optional If , scale the leading coefficient to be 1. Default is . Returns ------- P : orthopoly1d Legendre polynomial. Notes ----- The polynomials :math: are orthogonal over :math: with weight function 1. Examples -------- Generate the 3rd-order Legendre polynomial 1/2*(5x^3 + 0x^2 - 3x + 0): >>> from scipy.special import legendre >>> legendre(3) poly1d([ 2.5, 0. , -1.5, 0. ])",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_orthogonal.py",
    "ast_data": "FunctionDef name:legendre arg:n arg:monic arguments arg arg If Compare Raise Call If Compare Assign Assign Assign Call If Compare Assign Assign Assign Call Call Assign Call arguments arg arguments arg Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_get_binary_io_classes",
    "source_code": "@functools.lru_cache\ndef _get_binary_io_classes() -> tuple[type, ...]:\n    binary_classes: tuple[type, ...] = (BufferedIOBase, RawIOBase)\n    zstd = import_optional_dependency('zstandard', errors='ignore')\n    if zstd is not None:\n        with zstd.ZstdDecompressor().stream_reader(b'') as reader:\n            binary_classes += (type(reader),)\n    return binary_classes",
    "docstring": "IO classes that that expect bytes",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\common.py",
    "ast_data": "FunctionDef name:_get_binary_io_classes arguments Assign Call If Compare With Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "write_bytes",
    "source_code": "def write_bytes(self, name: str, data: bytes) -> None:\n    assert isinstance(data, bytes), f'Expected bytes but got {type(data)}'\n    self.archive_file.write_record(name, data, len(data))",
    "docstring": "Write a bytes object to the archive. name: The destination file inside the archive. data: The bytes object to write.",
    "type": "method",
    "file_path": "pytorch\\torch\\export\\pt2_archive\\_package.py",
    "ast_data": "FunctionDef name:write_bytes arg:self arg:name arg:data arguments arg arg arg Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "generalize",
    "source_code": "def generalize(self, context: FunctionContext, function_type: function_type_lib.FunctionType) -> function_type_lib.FunctionType:\n    if context in self._dispatch_dict:\n        return self._dispatch_dict[context].try_generalizing_function_type(function_type)\n    else:\n        return function_type",
    "docstring": "Try to generalize a FunctionType within a FunctionContext.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_cache.py",
    "ast_data": "FunctionDef name:generalize arg:self arg:context arg:function_type arguments arg arg arg If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "_object_hierarchy_parts",
    "source_code": "def _object_hierarchy_parts(self, sig_node: desc_signature) -> tuple[str, ...]:\n    return ()",
    "docstring": "Returns a tuple of strings, one entry for each part of the object's hierarchy (e.g. `_toc_entry_name` method. This method must not be used outwith table of contents generation.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\directives\\__init__.py",
    "ast_data": "FunctionDef name:_object_hierarchy_parts arg:self arg:sig_node arguments arg arg Return return:no"
  },
  {
    "library": "pytorch",
    "name": "StaticallyLaunchedAutotuner",
    "source_code": "@dataclasses.dataclass(frozen=True)\nclass StaticallyLaunchedAutotuner:\n    cache_key: str\n    kernel_name: str\n    kernel: 'CachingAutotuner'",
    "docstring": "Represents a statically compiled CachingAutotuner object that we can save directly in the cache. A CachingAutotuner is made up of a list of StaticTritonCompileResults, each of which uses the cubin from a TritonKernelArtifact. Statically saved here have their cubin files saved by a corresponding TritonBundleEntry.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\triton_bundler.py",
    "ast_data": "ClassDef name:StaticallyLaunchedAutotuner Call"
  },
  {
    "library": "pandas",
    "name": "prep_table",
    "source_code": "def prep_table(self, frame, name: str, if_exists: Literal['fail', 'replace', 'append', 'delete_rows']='fail', index: bool | str | list[str] | None=True, index_label=None, schema=None, dtype: DtypeArg | None=None) -> SQLTable:\n    if dtype:\n        if not is_dict_like(dtype):\n            dtype = dict.fromkeys(frame, dtype)\n        else:\n            dtype = cast(dict, dtype)\n        from sqlalchemy.types import TypeEngine\n        for col, my_type in dtype.items():\n            if isinstance(my_type, type) and issubclass(my_type, TypeEngine):\n                pass\n            elif isinstance(my_type, TypeEngine):\n                pass\n            else:\n                raise ValueError(f'The type of {col} is not a SQLAlchemy type')\n    table = SQLTable(name, self, frame=frame, index=index, if_exists=if_exists, index_label=index_label, schema=schema, dtype=dtype)\n    table.create()\n    return table",
    "docstring": "Prepares table in the database for data insertion. Creates it if needed, etc.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\sql.py",
    "ast_data": "FunctionDef name:prep_table arg:self arg:frame arg:name arg:if_exists arg:index arg:index_label arg:schema arg:dtype arguments arg arg arg arg arg arg arg arg If If Call Assign Call Assign Call For Call If BoolOp Call Call If Call Raise Call Assign Call Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "ensure_eol",
    "source_code": "def ensure_eol(self) -> None:\n    if self.body and self.body[-1][-1:] != '\\n':\n        self.body.append('\\n')",
    "docstring": "Ensure the last line in body is terminated by new line.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\writers\\texinfo.py",
    "ast_data": "FunctionDef name:ensure_eol arg:self arguments arg If BoolOp Compare Call"
  },
  {
    "library": "matplotlib",
    "name": "jet",
    "source_code": "def jet() -> None:\n    set_cmap('jet')",
    "docstring": "Set the colormap to 'jet'. This changes the default colormap as well as the colormap of the current image if there is one. See `` for more information.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:jet arguments Call"
  },
  {
    "library": "pytorch",
    "name": "validate",
    "source_code": "def validate(self):\n    if self._validated:\n        return\n    for constraint in self.constraints:\n        _validate_pass_schedule_constraint(constraint, self.passes)\n    self._validated = True",
    "docstring": "Validates that current pass schedule defined by is valid according to all constraints in",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\passes\\pass_manager.py",
    "ast_data": "FunctionDef name:validate arg:self arguments arg If Return return:no For Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_extract_failed_ps_instances",
    "source_code": "def _extract_failed_ps_instances(err_msg):\n    tasks = re.findall('/job:ps/replica:0/task:[0-9]+', err_msg)\n    return set((int(t.split(':')[-1]) for t in tasks))",
    "docstring": "Return a set of potentially failing ps instances from error message.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py",
    "ast_data": "FunctionDef name:_extract_failed_ps_instances arg:err_msg arguments arg Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "cryptography",
    "name": "key_sizes",
    "source_code": "@property\n@abc.abstractmethod\ndef key_sizes(self) -> frozenset[int]:\n    pass",
    "docstring": "Valid key sizes for this algorithm in bits",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\_cipheralgorithm.py",
    "ast_data": "FunctionDef name:key_sizes arg:self arguments arg"
  },
  {
    "library": "django",
    "name": "is_active",
    "source_code": "def is_active(self, request):\n    return settings.DEBUG is False",
    "docstring": "This filter is to add safety in production environments (i.e. DEBUG is False). If DEBUG is True then your site is not safe anyway. This hook is provided as a convenience to easily activate or deactivate the filter on a per request basis.",
    "type": "method",
    "file_path": "django\\django\\views\\debug.py",
    "ast_data": "FunctionDef name:is_active arg:self arg:request arguments arg arg Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "_load_all_device_dumps",
    "source_code": "def _load_all_device_dumps(self, partition_graphs, validate):\n    device_dirs = _glob(os.path.join(self._dump_root, METADATA_FILE_PREFIX + DEVICE_TAG + '*'))\n    self._device_names = []\n    self._t0s = {}\n    self._dump_tensor_data = {}\n    self._dump_graph_file_paths = {}\n    self._debug_watches = {}\n    self._watch_key_to_devices = {}\n    self._watch_key_to_datum = {}\n    self._watch_key_to_rel_time = {}\n    self._watch_key_to_dump_size_bytes = {}\n    for device_dir in device_dirs:\n        device_name = device_path_to_device_name(device_dir)\n        self._device_names.append(device_name)\n        self._load_device_dumps(device_name, device_dir)\n    self._load_partition_graphs(partition_graphs, validate)\n    self._calculate_t0()\n    for device_name in self._device_names:\n        self._create_tensor_watch_maps(device_name)",
    "docstring": "Load the dump data for all devices.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:_load_all_device_dumps arg:self arg:partition_graphs arg:validate arguments arg arg arg Assign Call Call Assign Assign Assign Assign Assign Assign Assign Assign Assign For Assign Call Call Call Call Call For Call"
  },
  {
    "library": "sphinx",
    "name": "texinfo_visit_inheritance_diagram",
    "source_code": "def texinfo_visit_inheritance_diagram(self: TexinfoTranslator, node: inheritance_diagram) -> None:\n    graph = node['graph']\n    graph_hash = get_graph_hash(node)\n    name = 'inheritance%s' % graph_hash\n    dotcode = graph._generate_dot(name, config=self.config, graph_attrs={'size': '\"6.0,6.0\"'})\n    render_dot_texinfo(self, node, dotcode, {}, 'inheritance')\n    raise nodes.SkipNode",
    "docstring": "Output the graph for Texinfo. This will insert a PNG.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\ext\\inheritance_diagram.py",
    "ast_data": "FunctionDef name:texinfo_visit_inheritance_diagram arg:self arg:node arguments arg arg Assign Assign Call Assign Assign Call Call Raise"
  },
  {
    "library": "django",
    "name": "field_types",
    "source_code": "@property\ndef field_types(self):\n    return [OGRFieldTypes[capi.get_field_type(capi.get_field_defn(self._ldefn, i))] for i in range(self.num_fields)]",
    "docstring": "Return a list of the types of fields in this Layer. For example, return the list [OFTInteger, OFTReal, OFTString] for an OGR layer that has an integer, a floating-point, and string fields.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\layer.py",
    "ast_data": "FunctionDef name:field_types arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_ndims_from_shape",
    "source_code": "def _ndims_from_shape(shape):\n    if shape.get_shape().ndims not in (None, 1):\n        raise ValueError('input is not a valid shape: not 1D')\n    if not shape.dtype.is_integer:\n        raise TypeError('input is not a valid shape: wrong dtype')\n    if shape.get_shape().is_fully_defined():\n        return constant_op.constant(shape.get_shape().as_list()[0])\n    return array_ops.shape(shape)[0]",
    "docstring": "Returns 's implied by a shape.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\transformed_distribution.py",
    "ast_data": "FunctionDef name:_ndims_from_shape arg:shape arguments arg If Compare Call Raise Call If Raise Call If Call Call Return return:yes Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "transpose",
    "source_code": "def transpose(*t_args, **t_kwargs):\n    raise NotImplementedError('recompute_grad tried to transpose grad of {}. Consider not using recompute_grad in forward modeautodiff'.format(f.__name__))",
    "docstring": "Gradient function calculation for forward mode autodiff.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\custom_gradient.py",
    "ast_data": "FunctionDef name:transpose arguments arg arg Raise Call Call"
  },
  {
    "library": "pandas",
    "name": "_validate_usecols_names",
    "source_code": "@final\ndef _validate_usecols_names(self, usecols: SequenceT, names: Sequence) -> SequenceT:\n    missing = [c for c in usecols if c not in names]\n    if len(missing) > 0:\n        raise ValueError(f'Usecols do not match columns, columns expected but not found: {missing}')\n    return usecols",
    "docstring": "Validates that all usecols are present in a given list of names. If not, raise a ValueError that shows what usecols are missing. Parameters ---------- usecols : iterable of usecols The columns to validate are present in names. names : iterable of names The column names to check against. Returns ------- usecols : iterable of usecols The parameter if the validation succeeds. Raises ------ ValueError : Columns were missing. Error message will list them.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\parsers\\base_parser.py",
    "ast_data": "FunctionDef name:_validate_usecols_names arg:self arg:usecols arg:names arguments arg arg arg Assign Compare If Compare Call Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_SparseSegmentSumWithNumSegmentsGrad",
    "source_code": "@ops.RegisterGradient('SparseSegmentSumWithNumSegments')\ndef _SparseSegmentSumWithNumSegmentsGrad(op: ops.Operation, grad):\n    if _GetOpAttrOrNone(op, 'sparse_gradient'):\n        return (_SparseSegmentReduceGradV2(op, grad), None, None, None)\n    dim0 = array_ops.shape(op.inputs[0])[0]\n    if compat.forward_compatible(2021, 6, 10):\n        return (math_ops.sparse_segment_sum_grad(grad, op.inputs[1], op.inputs[2], dim0), None, None, None)\n    else:\n        return (math_ops.unsorted_segment_sum(array_ops.gather(grad, op.inputs[2]), op.inputs[1], dim0), None, None, None)",
    "docstring": "Gradient for SparseSegmentSumWithNumSegments.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_SparseSegmentSumWithNumSegmentsGrad arg:op arg:grad arguments arg arg If Call Return return:yes Call Assign Call If Call Return return:yes Call Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "equals_exact",
    "source_code": "def equals_exact(self, other, tolerance=0):\n    return capi.geos_equalsexact(self.ptr, other.ptr, float(tolerance))",
    "docstring": "Return true if the two Geometries are exactly equal, up to a specified tolerance.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:equals_exact arg:self arg:other arg:tolerance arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "add_ref",
    "source_code": "def add_ref(self, timestamp: int) -> None:\n    self._ref_times.append(timestamp)",
    "docstring": "Adds a reference to this tensor with the specified timestamp. Args: timestamp: Timestamp of object reference as an integer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py",
    "ast_data": "FunctionDef name:add_ref arg:self arg:timestamp arguments arg arg Call"
  },
  {
    "library": "sphinx",
    "name": "prepend_prolog",
    "source_code": "def prepend_prolog(content: StringList, prolog: str) -> None:\n    if prolog:\n        pos = 0\n        for line in content:\n            if FIELD_NAME_RE.match(line):\n                pos += 1\n            else:\n                break\n        if pos > 0:\n            content.insert(pos, '', '<generated>', 0)\n            pos += 1\n        lineno = 0\n        for lineno, line in enumerate(prolog.splitlines()):\n            content.insert(pos + lineno, line, '<rst_prolog>', lineno)\n        content.insert(pos + lineno + 1, '', '<generated>', 0)",
    "docstring": "Prepend a string to content body as prolog.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\rst.py",
    "ast_data": "FunctionDef name:prepend_prolog arg:content arg:prolog arguments arg arg If Assign For If Call If Compare Call Assign For Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_prefer_static_concat_shape",
    "source_code": "def _prefer_static_concat_shape(first_shape, second_shape_int_list):\n    second_shape_int_list_static = [tensor_util.constant_value(s) for s in second_shape_int_list]\n    if isinstance(first_shape, tensor_shape.TensorShape) and all((s is not None for s in second_shape_int_list_static)):\n        return first_shape.concatenate(second_shape_int_list_static)\n    return array_ops.concat([first_shape, second_shape_int_list], axis=0)",
    "docstring": "Concatenate a shape with a list of integers as statically as possible. Args: first_shape: or instance. If a , must return . second_shape_int_list: of scalar integer s. Returns: representing concatenating and as statically as possible.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_kronecker.py",
    "ast_data": "FunctionDef name:_prefer_static_concat_shape arg:first_shape arg:second_shape_int_list arguments arg arg Assign Call If BoolOp Call Call Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "chunk_constant_value",
    "source_code": "def chunk_constant_value(node: node_def_pb2.NodeDef, size: int):\n    if node.op == _CONST_OP:\n        tensor_proto = node.attr['value'].tensor\n        if tensor_proto.tensor_content:\n            b = tensor_proto.tensor_content\n        else:\n            b = tensor_util.MakeNdarray(tensor_proto).tobytes()\n        kept_attributes = {key: getattr(tensor_proto, key) for key in _KEEP_TENSOR_PROTO_FIELDS}\n        tensor_proto.Clear()\n        for field, val in kept_attributes.items():\n            if isinstance(val, message.Message):\n                getattr(tensor_proto, field).MergeFrom(val)\n            else:\n                setattr(tensor_proto, field, val)\n        return b\n    else:\n        attributes_and_sizes = ', '.join([f'{key}: {util.format_bytes(val.ByteSize())}' for key, val in node.attr.items()])\n        raise ValueError(f'Unable to split GraphDef because at least one of the nodes individually exceeds the max size of {util.format_bytes(constants.max_size())}. Currently only Const nodes can be further split.\\nNode info:\\n\\tsize: {util.format_bytes(size)}\\n\\tname: {node.name}\\n\\top: {node.op}\\n\\tinputs: {node.input}\\n\\top: {node.op}\\n\\tdevice: {node.device}\\n\\tattr (and sizes): {attributes_and_sizes}')",
    "docstring": "Extracts and clears the constant value from a NodeDef. Args: node: NodeDef with const value to extract. size: Size of NodeDef (for error reporting). Returns: Bytes representation of the Constant tensor content.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\proto_splitter\\split_graph_def.py",
    "ast_data": "FunctionDef name:chunk_constant_value arg:node arg:size arguments arg arg If Compare Assign If Assign Assign Call Call Assign Call Call For Call If Call Call Call Call Return return:yes Assign Call Call Call Call Raise Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "on_graph_execution_trace",
    "source_code": "def on_graph_execution_trace(self, graph_execution_trace_index, graph_execution_trace):\n    if self._limit > 0 and len(self._alerts) >= self._limit:\n        return\n    if graph_execution_trace.tensor_debug_mode == debug_event_pb2.TensorDebugMode.FULL_TENSOR:\n        tensor_value = self._debug_data_reader.graph_execution_trace_to_tensor_value(graph_execution_trace)\n        self._check_full_tensor_value(tensor_value, graph_execution_trace.wall_time, graph_execution_trace.op_type, graph_execution_trace.output_slot, graph_execution_trace_index=graph_execution_trace_index)\n    elif graph_execution_trace.debug_tensor_value:\n        self._check_debug_tensor_value(graph_execution_trace.tensor_debug_mode, graph_execution_trace.debug_tensor_value, graph_execution_trace.wall_time, graph_execution_trace.op_type, graph_execution_trace.output_slot, graph_execution_trace_index=graph_execution_trace_index)",
    "docstring": "Monitor method for GraphExecutionTrace data object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_monitors.py",
    "ast_data": "FunctionDef name:on_graph_execution_trace arg:self arg:graph_execution_trace_index arg:graph_execution_trace arguments arg arg arg If BoolOp Compare Compare Call Return return:no If Compare Assign Call Call If Call"
  },
  {
    "library": "scipy",
    "name": "_format_b_constraints",
    "source_code": "def _format_b_constraints(b):\n    if b is None:\n        return np.array([], dtype=float)\n    b = np.array(b, dtype=float, copy=True).squeeze()\n    return b if b.size != 1 else b.reshape(-1)",
    "docstring": "Format the upper bounds of the constraints to a 1-D array Parameters ---------- b : 1-D array 1-D array of values representing the upper-bound of each (in)equality constraint (row) in ``.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_linprog_util.py",
    "ast_data": "FunctionDef name:_format_b_constraints arg:b arguments arg If Compare Return return:yes Call Assign Call Call Return return:yes Compare Call"
  },
  {
    "library": "kornia",
    "name": "rgba_to_bgr",
    "source_code": "def rgba_to_bgr(image: Tensor) -> Tensor:\n    if not isinstance(image, Tensor):\n        raise TypeError(f'Input type is not a Tensor. Got {type(image)}')\n    if len(image.shape) < 3 or image.shape[-3] != 4:\n        raise ValueError(f'Input size must have a shape of (*, 4, H, W).Got {image.shape}')\n    x_rgb: Tensor = rgba_to_rgb(image)\n    return rgb_to_bgr(x_rgb)",
    "docstring": "Convert an image from RGBA to BGR. Args: image: RGBA Image to be converted to BGR of shape :math:. Returns: RGB version of the image with shape :math:. Example: >>> input = torch.rand(2, 4, 4, 5) >>> output = rgba_to_bgr(input) # 2x3x4x5",
    "type": "function",
    "file_path": "kornia\\kornia\\color\\rgb.py",
    "ast_data": "FunctionDef name:rgba_to_bgr arg:image arguments arg If Call Raise Call Call If BoolOp Compare Call Compare Raise Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "__init__",
    "source_code": "def __init__(self, loc=None, shape=1, df=1, allow_singular=False, seed=None):\n    self._dist = multivariate_t_gen(seed)\n    dim, loc, shape, df = self._dist._process_parameters(loc, shape, df)\n    self.dim, self.loc, self.shape, self.df = (dim, loc, shape, df)\n    self.shape_info = _PSD(shape, allow_singular=allow_singular)",
    "docstring": "Create a frozen multivariate t distribution. Parameters ---------- %(_mvt_doc_default_callparams)s Examples -------- >>> import numpy as np >>> from scipy.stats import multivariate_t >>> loc = np.zeros(3) >>> shape = np.eye(3) >>> df = 10 >>> dist = multivariate_t(loc, shape, df) >>> dist.rvs() array([[ 0.81412036, -1.53612361, 0.42199647]]) >>> dist.pdf([1, 1, 1]) array([0.01237803])",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:loc arg:shape arg:df arg:allow_singular arg:seed arguments arg arg arg arg arg arg Assign Call Assign Call Assign Assign Call"
  },
  {
    "library": "kornia",
    "name": "LAFDescriptor",
    "source_code": "class LAFDescriptor(Module):\n\n    def __init__(self, patch_descriptor_module: Optional[Module]=None, patch_size: int=32, grayscale_descriptor: bool=True) -> None:\n        super().__init__()\n        if patch_descriptor_module is None:\n            patch_descriptor_module = HardNet(True)\n        self.descriptor = patch_descriptor_module\n        self.patch_size = patch_size\n        self.grayscale_descriptor = grayscale_descriptor\n\n    def __repr__(self) -> str:\n        return f\"{self.__class__.__name__}(descriptor={self.descriptor.__repr__()}, patch_size={self.patch_size}, grayscale_descriptor='{self.grayscale_descriptor})\"\n\n    def forward(self, img: Tensor, lafs: Tensor) -> Tensor:\n        return get_laf_descriptors(img, lafs, self.descriptor, self.patch_size, self.grayscale_descriptor)",
    "docstring": "Module to get local descriptors, corresponding to LAFs (keypoints). Internally uses :func:. Args: patch_descriptor_module: patch descriptor module, e.g. :class: or :class:. Default: :class:. patch_size: patch size in pixels, which descriptor expects. grayscale_descriptor: `` if patch_descriptor expects single-channel image.",
    "type": "class",
    "file_path": "kornia\\kornia\\feature\\integrated.py",
    "ast_data": "ClassDef name:LAFDescriptor FunctionDef name:__init__ arg:self arg:patch_descriptor_module arg:patch_size arg:grayscale_descriptor arguments arg arg arg arg Call Call If Compare Assign Call Assign Assign Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call FunctionDef name:forward arg:self arg:img arg:lafs arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "from_dict",
    "source_code": "@classmethod\ndef from_dict(cls, backend_config_dict: dict[str, Any]) -> BackendConfig:\n    conf = cls(backend_config_dict.get(NAME_DICT_KEY, ''))\n    for d in backend_config_dict.get(CONFIGS_DICT_KEY, []):\n        if isinstance(d, BackendPatternConfig):\n            conf.set_backend_pattern_config(d)\n        elif isinstance(d, dict):\n            conf.set_backend_pattern_config(BackendPatternConfig.from_dict(d))\n        else:\n            raise ValueError(f\"Expected backend_config_dict['{CONFIGS_DICT_KEY}'] to be a dictionary\")\n    return conf",
    "docstring": "Create a `BackendPatternConfig`",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\backend_config.py",
    "ast_data": "FunctionDef name:from_dict arg:cls arg:backend_config_dict arguments arg arg Assign Call Call For Call If Call Call If Call Call Call Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "should_save_summary",
    "source_code": "@property\ndef should_save_summary(self):\n    return self._strategy.extended.should_save_summary",
    "docstring": "Whether to save summaries.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_coordinator.py",
    "ast_data": "FunctionDef name:should_save_summary arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "disable_mlir_bridge",
    "source_code": "@tf_export('config.experimental.disable_mlir_bridge')\ndef disable_mlir_bridge():\n    context.context().enable_mlir_bridge = False",
    "docstring": "Disables experimental MLIR-Based TensorFlow Compiler Bridge.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\config.py",
    "ast_data": "FunctionDef name:disable_mlir_bridge arguments Assign Call Call"
  },
  {
    "library": "matplotlib",
    "name": "prepare_data",
    "source_code": "def prepare_data(d, init):\n    if init not in d:\n        d = {**d, init: str(init)}\n    name2short = {name: short for short, name in d.items()}\n    short2name = {short: name for name, short in name2short.items()}\n    canonical_init = name2short[d[init]]\n    return [canonical_init] + sorted(short2name.items(), key=lambda short_and_name: short_and_name[1])",
    "docstring": "Prepare entry for FormLayout. *d* is a mapping of shorthands to style names (a single style may have multiple shorthands, in particular the shorthands , , and are synonyms); *init* is one shorthand of the initial style. This function returns an list suitable for initializing a FormLayout combobox, namely .",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\qt_editor\\figureoptions.py",
    "ast_data": "FunctionDef name:prepare_data arg:d arg:init arguments arg arg If Compare Assign Call Assign Call Assign Call Assign Return return:yes Call Call arguments arg"
  },
  {
    "library": "pytorch",
    "name": "sign",
    "source_code": "@property\ndef sign(self) -> int:\n    raise NotImplementedError",
    "docstring": "Returns the sign of the determinant of the Jacobian, if applicable. In general this only makes sense for bijective transforms.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributions\\transforms.py",
    "ast_data": "FunctionDef name:sign arg:self arguments arg Raise"
  },
  {
    "library": "numpy",
    "name": "_zseries_int",
    "source_code": "def _zseries_int(zs):\n    n = 1 + len(zs) // 2\n    ns = np.array([-1, 0, 1], dtype=zs.dtype)\n    zs = _zseries_mul(zs, ns)\n    div = np.arange(-n, n + 1) * 2\n    zs[:n] /= div[:n]\n    zs[n + 1:] /= div[n + 1:]\n    zs[n] = 0\n    return zs",
    "docstring": "Integrate a z-series. The integral is with respect to x, not z. This is achieved by a change of variable using dx/dz given in the module notes. Parameters ---------- zs : z-series The z-series to integrate Returns ------- integral : z-series The indefinite integral Notes ----- The zseries for x (ns) has been multiplied by two in order to avoid using floats that are incompatible with Decimal and likely other specialized scalar types. This scaling has been compensated by dividing the resulting zs by two.",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\chebyshev.py",
    "ast_data": "FunctionDef name:_zseries_int arg:zs arguments arg Assign Call Assign Call Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "authlib",
    "name": "hooked",
    "source_code": "def hooked(func=None, before=None, after=None):\n\n    def decorator(func):\n        before_name = before or f'before_{func.__name__}'\n        after_name = after or f'after_{func.__name__}'\n\n        def wrapper(self, *args, **kwargs):\n            self.execute_hook(before_name, *args, **kwargs)\n            result = func(self, *args, **kwargs)\n            self.execute_hook(after_name, result)\n            return result\n        return wrapper\n    if callable(func):\n        return decorator(func)\n    return decorator",
    "docstring": "Execute hooks before and after the decorated method.",
    "type": "function",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\hooks.py",
    "ast_data": "FunctionDef name:hooked arg:func arg:before arg:after arguments arg arg arg FunctionDef name:decorator arg:func arguments arg Assign BoolOp Assign BoolOp FunctionDef name:wrapper arg:self arguments arg arg arg Call Assign Call Call Return return:yes Return return:yes If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "BuiltInTheme",
    "source_code": "class BuiltInTheme(Theme):\n\n    def __init__(self, name: str, config: Config) -> None:\n        super().__init__(name)\n        if name == 'howto':\n            self.docclass = config.latex_docclass.get('howto', 'article')\n        else:\n            self.docclass = config.latex_docclass.get('manual', 'report')\n        if name in {'manual', 'howto'}:\n            self.wrapperclass = 'sphinx' + name\n        else:\n            self.wrapperclass = name\n        if name == 'howto' and (not self.docclass.startswith('j')):\n            self.toplevel_sectioning = 'section'\n        else:\n            self.toplevel_sectioning = 'chapter'",
    "docstring": "A built-in LaTeX theme.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\builders\\latex\\theming.py",
    "ast_data": "ClassDef name:BuiltInTheme FunctionDef name:__init__ arg:self arg:name arg:config arguments arg arg arg Call Call If Compare Assign Call Assign Call If Compare Assign Assign If BoolOp Compare Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "from_value",
    "source_code": "@classmethod\ndef from_value(cls, value):\n    return cls(value.shape, dtype=value.dtype, trainable=value.trainable)",
    "docstring": "Creates a from the given . 's shape, dtype, and trainable attributes will be used to create the new . Example: >>> v = tf.Variable([1., 2., 3.]) >>> VariableSpec.from_value(v) VariableSpec(shape=(3,), dtype=tf.float32, trainable=True, alias_id=None) Args: value: A Variable. Returns: A created from .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:from_value arg:cls arg:value arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_gradients",
    "source_code": "def get_gradients(self, loss, params):\n    params = nest.flatten(params)\n    with backend.get_graph().as_default(), backend.name_scope(self._name + '/gradients'):\n        grads = gradients.gradients(loss, params)\n        for grad, param in zip(grads, params):\n            if grad is None:\n                raise ValueError('Variable {} has `None` for gradient. Please make sure that all of your ops have a gradient defined (i.e. are differentiable). Common ops without gradient: K.argmax, K.round, K.eval.'.format(param))\n    return grads",
    "docstring": "Returns gradients of with respect to . Should be used only in legacy v1 graph mode. Args: loss: Loss tensor. params: List of variables. Returns: List of gradient tensors. Raises: ValueError: In case any gradient cannot be computed (e.g. if gradient function not implemented).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py",
    "ast_data": "FunctionDef name:get_gradients arg:self arg:loss arg:params arguments arg arg arg Assign Call With Call Call Call Assign Call For Call If Compare Raise Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "linprog_verbose_callback",
    "source_code": "def linprog_verbose_callback(res):\n    x = res['x']\n    fun = res['fun']\n    phase = res['phase']\n    status = res['status']\n    nit = res['nit']\n    message = res['message']\n    complete = res['complete']\n    saved_printoptions = np.get_printoptions()\n    np.set_printoptions(linewidth=500, formatter={'float': lambda x: f'{x: 12.4f}'})\n    if status:\n        print('--------- Simplex Early Exit -------\\n')\n        print(f'The simplex method exited early with status {status:d}')\n        print(message)\n    elif complete:\n        print('--------- Simplex Complete --------\\n')\n        print(f'Iterations required: {nit}')\n    else:\n        print(f'--------- Iteration {nit:d}  ---------\\n')\n    if nit > 0:\n        if phase == 1:\n            print('Current Pseudo-Objective Value:')\n        else:\n            print('Current Objective Value:')\n        print('f = ', fun)\n        print()\n        print('Current Solution Vector:')\n        print('x = ', x)\n        print()\n    np.set_printoptions(**saved_printoptions)",
    "docstring": "A sample callback function demonstrating the linprog callback interface. This callback produces detailed output to sys.stdout before each iteration and after the final iteration of the simplex algorithm. Parameters ---------- res : A consisting of the following fields: x : 1-D array The independent variable vector which optimizes the linear programming problem. fun : float Value of the objective function. success : bool True if the algorithm succeeded in finding an optimal solution. slack : 1-D array The values of the slack variables. Each slack variable corresponds to an inequality constraint. If the slack is zero, then the corresponding constraint is active. con : 1-D array The (nominally zero) residuals of the equality constraints, that is, `` : Serious numerical difficulties encountered nit : int The number of iterations performed. message : str A string descriptor of the exit status of the optimization.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_linprog.py",
    "ast_data": "FunctionDef name:linprog_verbose_callback arg:res arguments arg Assign Assign Assign Assign Assign Assign Assign Assign Call Call arguments arg If Call Call Call If Call Call Call If Compare If Compare Call Call Call Call Call Call Call Call"
  },
  {
    "library": "django",
    "name": "get_objects",
    "source_code": "def get_objects(count_only=False):\n    if use_natural_foreign_keys:\n        models = serializers.sort_dependencies(app_list.items(), allow_cycles=True)\n    else:\n        models = []\n        for app_config, model_list in app_list.items():\n            if model_list is None:\n                models.extend(app_config.get_models())\n            else:\n                models.extend(model_list)\n    for model in models:\n        if model in excluded_models:\n            continue\n        if model._meta.proxy and model._meta.proxy_for_model not in models:\n            warnings.warn(\"%s is a proxy model and won't be serialized.\" % model._meta.label, category=ProxyModelWarning)\n        if not model._meta.proxy and router.allow_migrate_model(using, model):\n            if use_base_manager:\n                objects = model._base_manager\n            else:\n                objects = model._default_manager\n            queryset = objects.using(using).order_by(model._meta.pk.name)\n            if primary_keys:\n                queryset = queryset.filter(pk__in=primary_keys)\n            if count_only:\n                yield queryset.order_by().count()\n            else:\n                chunk_size = 2000 if queryset._prefetch_related_lookups else None\n                yield from queryset.iterator(chunk_size=chunk_size)",
    "docstring": "Collate the objects to be serialized. If count_only is True, just count the number of objects to be serialized.",
    "type": "method",
    "file_path": "django\\django\\core\\management\\commands\\dumpdata.py",
    "ast_data": "FunctionDef name:get_objects arg:count_only arguments arg If Assign Call Call Assign For Call If Compare Call Call Call For If Compare If BoolOp Compare Call If BoolOp Call If Assign Assign Assign Call Call If Assign Call If Call Call Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "_on_key_release",
    "source_code": "def _on_key_release(self, event):\n    pass",
    "docstring": "Key release event handler.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:_on_key_release arg:self arg:event arguments arg arg"
  },
  {
    "library": "matplotlib",
    "name": "SimpleArrow",
    "source_code": "class SimpleArrow(_Base):\n    ArrowAxisClass = _FancyAxislineStyle.SimpleArrow\n\n    def __init__(self, size=1):\n        self.size = size\n        super().__init__()\n\n    def new_line(self, axis_artist, transform):\n        linepath = Path([(0, 0), (0, 1)])\n        axisline = self.ArrowAxisClass(axis_artist, linepath, transform, line_mutation_scale=self.size)\n        return axisline",
    "docstring": "A simple arrow.",
    "type": "class",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axisline_style.py",
    "ast_data": "ClassDef name:SimpleArrow Assign FunctionDef name:__init__ arg:self arg:size arguments arg arg Assign Call Call FunctionDef name:new_line arg:self arg:axis_artist arg:transform arguments arg arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "virtualenv",
    "name": "_validate",
    "source_code": "def _validate(self):\n    pass",
    "docstring": "no op.",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\config\\convert.py",
    "ast_data": "FunctionDef name:_validate arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, c):\n    self.c = c",
    "docstring": ":param c: character or number",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:c arguments arg arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "placeholder_value",
    "source_code": "@doc_controls.do_not_doc_inheritable\ndef placeholder_value(self, placeholder_context):\n    return super().placeholder_value(placeholder_context)",
    "docstring": "See tf.types.experimental.TraceType base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\dtypes.py",
    "ast_data": "FunctionDef name:placeholder_value arg:self arg:placeholder_context arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "unique",
    "source_code": "def unique(self, name=None) -> 'DatasetV2':\n    from tensorflow.python.data.ops import unique_op\n    return unique_op._unique(self, name)",
    "docstring": "A transformation that discards duplicate elements of a . Use this transformation to produce a dataset that contains one instance of each unique element in the input. For example: >>> dataset = tf.data.Dataset.from_tensor_slices([1, 37, 2, 37, 2, 1]) >>> dataset = dataset.unique() >>> sorted([a.item() for a in dataset.as_numpy_iterator()]) [1, 2, 37] Note: This transformation only supports datasets which fit into memory and have elements of either , or type. Args: name: (Optional.) A name for the tf.data operation. Returns: A new with the transformation applied as described above.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:unique arg:self arg:name arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "is_first",
    "source_code": "@property\ndef is_first(self):\n    return self.stage_index == 0",
    "docstring": "Returns true if this stage is the first stage in the pipeline.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py",
    "ast_data": "FunctionDef name:is_first arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "cherrypy",
    "name": "ByteCountWrapper",
    "source_code": "class ByteCountWrapper(object):\n\n    def __init__(self, rfile):\n        self.rfile = rfile\n        self.bytes_read = 0\n\n    def read(self, size=-1):\n        data = self.rfile.read(size)\n        self.bytes_read += len(data)\n        return data\n\n    def readline(self, size=-1):\n        data = self.rfile.readline(size)\n        self.bytes_read += len(data)\n        return data\n\n    def readlines(self, sizehint=0):\n        total = 0\n        lines = []\n        line = self.readline()\n        while line:\n            lines.append(line)\n            total += len(line)\n            if 0 < sizehint <= total:\n                break\n            line = self.readline()\n        return lines\n\n    def close(self):\n        self.rfile.close()\n\n    def __iter__(self):\n        return self\n\n    def next(self):\n        data = self.rfile.next()\n        self.bytes_read += len(data)\n        return data",
    "docstring": "Wraps a file-like object, counting the number of bytes read.",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\lib\\cpstats.py",
    "ast_data": "ClassDef name:ByteCountWrapper FunctionDef name:__init__ arg:self arg:rfile arguments arg arg Assign Assign FunctionDef name:read arg:self arg:size arguments arg arg Assign Call Call Return return:yes FunctionDef name:readline arg:self arg:size arguments arg arg Assign Call Call Return return:yes FunctionDef name:readlines arg:self arg:sizehint arguments arg arg Assign Assign Assign Call While Call Call If Compare Assign Call Return return:yes FunctionDef name:close arg:self arguments arg Call FunctionDef name:__iter__ arg:self arguments arg Return return:yes FunctionDef name:next arg:self arguments arg Assign Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_char_ngrams",
    "source_code": "def _char_ngrams(self, text_document):\n    text_document = self._white_spaces.sub(' ', text_document)\n    text_len = len(text_document)\n    min_n, max_n = self.ngram_range\n    if min_n == 1:\n        ngrams = list(text_document)\n        min_n += 1\n    else:\n        ngrams = []\n    ngrams_append = ngrams.append\n    for n in range(min_n, min(max_n + 1, text_len + 1)):\n        for i in range(text_len - n + 1):\n            ngrams_append(text_document[i:i + n])\n    return ngrams",
    "docstring": "Tokenize text_document into a sequence of character n-grams",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_extraction\\text.py",
    "ast_data": "FunctionDef name:_char_ngrams arg:self arg:text_document arguments arg arg Assign Call Assign Call Assign If Compare Assign Call Assign Assign For Call Call For Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "nested_row_splits",
    "source_code": "@property\ndef nested_row_splits(self):\n    rt_nested_splits = [self.row_splits]\n    rt_values = self.values\n    while isinstance(rt_values, RaggedTensorValue):\n        rt_nested_splits.append(rt_values.row_splits)\n        rt_values = rt_values.values\n    return tuple(rt_nested_splits)",
    "docstring": "The row_splits for all ragged dimensions in this ragged tensor value.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor_value.py",
    "ast_data": "FunctionDef name:nested_row_splits arg:self arguments arg Assign Assign While Call Call Assign Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_get_agg_axis",
    "source_code": "def _get_agg_axis(self, axis_num: int) -> Index:\n    if axis_num == 0:\n        return self.columns\n    elif axis_num == 1:\n        return self.index\n    else:\n        raise ValueError(f'Axis must be 0 or 1 (got {axis_num!r})')",
    "docstring": "Let's be explicit about this.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:_get_agg_axis arg:self arg:axis_num arguments arg arg If Compare Return return:yes If Compare Return return:yes Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "FuncFormatter",
    "source_code": "class FuncFormatter(Formatter):\n\n    def __init__(self, func):\n        self.func = func\n        self.offset_string = ''\n\n    def __call__(self, x, pos=None):\n        return self.func(x, pos)\n\n    def get_offset(self):\n        return self.offset_string\n\n    def set_offset_string(self, ofs):\n        self.offset_string = ofs",
    "docstring": "Use a user-defined function for formatting. The function should take in two inputs (a tick value ``), and return a string containing the corresponding tick label.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "ClassDef name:FuncFormatter FunctionDef name:__init__ arg:self arg:func arguments arg arg Assign Assign FunctionDef name:__call__ arg:self arg:x arg:pos arguments arg arg arg Return return:yes Call FunctionDef name:get_offset arg:self arguments arg Return return:yes FunctionDef name:set_offset_string arg:self arg:ofs arguments arg arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "_translate_to_fulltype_for_flat_tensors",
    "source_code": "def _translate_to_fulltype_for_flat_tensors(spec: type_spec.TypeSpec) -> List[full_type_pb2.FullTypeDef]:\n    if isinstance(spec, RaggedTensorSpec):\n        dt = spec.dtype\n        elem_t = _DT_TO_FT.get(dt)\n        if elem_t is None:\n            logging.vlog(1, 'dtype %s that has no conversion to fulltype.', dt)\n        elif elem_t == full_type_pb2.TFT_LEGACY_VARIANT:\n            logging.vlog(1, 'Ragged tensors containing variants are not supported.', dt)\n        else:\n            assert len(spec._flat_tensor_specs) == 1\n            return [full_type_pb2.FullTypeDef(type_id=full_type_pb2.TFT_RAGGED, args=[full_type_pb2.FullTypeDef(type_id=elem_t)])]\n    return [full_type_pb2.FullTypeDef(type_id=full_type_pb2.TFT_UNSET) for t in spec._flat_tensor_specs]",
    "docstring": "Convert a TypeSec to a list of FullTypeDef. The FullTypeDef created corresponds to the encoding used with datasets (and map_fn) that uses variants (and not FullTypeDef corresponding to the default \"component\" encoding). Currently, the only use of this is for information about the contents of ragged tensors, so only ragged tensors return useful full type information and other types return TFT_UNSET. While this could be improved in the future, this function is intended for temporary use and expected to be removed when type inference support is sufficient. Args: spec: A TypeSpec for one element of a dataset or map_fn. Returns: A list of FullTypeDef corresponding to SPEC. The length of this list is always the same as the length of spec._flat_tensor_specs.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_utils.py",
    "ast_data": "FunctionDef name:_translate_to_fulltype_for_flat_tensors arg:spec arguments arg If Call Assign Assign Call If Compare Call If Compare Call Compare Call Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_antialiased",
    "source_code": "def set_antialiased(self, aa):\n    if aa is None:\n        aa = self._get_default_antialiased()\n    self._antialiaseds = np.atleast_1d(np.asarray(aa, bool))\n    self.stale = True",
    "docstring": "Set the antialiasing state for rendering. Parameters ---------- aa : bool or list of bools",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:set_antialiased arg:self arg:aa arguments arg arg If Compare Assign Call Assign Call Call Assign"
  },
  {
    "library": "cherrypy",
    "name": "is_iterator",
    "source_code": "def is_iterator(obj):\n    from types import GeneratorType\n    if isinstance(obj, GeneratorType):\n        return True\n    elif not hasattr(obj, '__iter__'):\n        return False\n    else:\n        return iter(obj) is obj",
    "docstring": "Detect if the object provided implements the iterator protocol. (i.e. like a generator). This will return False for objects which are iterable, but not iterators themselves.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\__init__.py",
    "ast_data": "FunctionDef name:is_iterator arg:obj arguments arg If Call Return return:yes If Call Return return:yes Return return:yes Compare Call"
  },
  {
    "library": "cherrypy",
    "name": "Host",
    "source_code": "class Host(object):\n    ip = '0.0.0.0'\n    port = 80\n    name = 'unknown.tld'\n\n    def __init__(self, ip, port, name=None):\n        self.ip = ip\n        self.port = port\n        if name is None:\n            name = ip\n        self.name = name\n\n    def __repr__(self):\n        return 'httputil.Host(%r, %r, %r)' % (self.ip, self.port, self.name)",
    "docstring": "An internet address. name Should be the client's host name. If not available (because no DNS lookup is performed), the IP address should be used instead.",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\lib\\httputil.py",
    "ast_data": "ClassDef name:Host Assign Assign Assign FunctionDef name:__init__ arg:self arg:ip arg:port arg:name arguments arg arg arg arg Assign Assign If Compare Assign Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "generate_equalization_qconfig",
    "source_code": "def generate_equalization_qconfig(self) -> EqualizationQConfig:\n    return default_equalization_qconfig",
    "docstring": "This returns the equalization configuration for a module. For now, it just returns the default, but as more equalization options become possible, this method can get more fleshed out with more nuanced granularity. Returns the generated equalization QConfig according to what a valid configuration is",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\detector.py",
    "ast_data": "FunctionDef name:generate_equalization_qconfig arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "FigureCanvasTemplate",
    "source_code": "class FigureCanvasTemplate(FigureCanvasBase):\n    manager_class = FigureManagerTemplate\n\n    def draw(self):\n        renderer = RendererTemplate(self.figure.dpi)\n        self.figure.draw(renderer)\n    filetypes = {**FigureCanvasBase.filetypes, 'foo': 'My magic Foo format'}\n\n    def print_foo(self, filename, **kwargs):\n        self.draw()\n\n    def get_default_filetype(self):\n        return 'foo'",
    "docstring": "The canvas the figure renders into. Calls the draw and print fig methods, creates the renderers, etc. Note: GUI templates will want to connect events for button presses, mouse movements and key presses to functions that call the base class methods button_press_event, button_release_event, motion_notify_event, key_press_event, and key_release_event. See the implementations of the interactive backends for examples. Attributes ---------- figure : A high-level Figure instance",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_template.py",
    "ast_data": "ClassDef name:FigureCanvasTemplate Assign FunctionDef name:draw arg:self arguments arg Assign Call Call Assign FunctionDef name:print_foo arg:self arg:filename arguments arg arg arg Call FunctionDef name:get_default_filetype arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_custom_device",
    "source_code": "def is_custom_device(device_name):\n    return context().is_custom_device(device_name)",
    "docstring": "Calls TFE_IsCustomDevice. Enables using C extensions specifying a custom device from Python. See the experimental eager C API in tensorflow/c/eager/c_api_experimental.h for details. Args: device_name: A string indicating the name to check whether it is a registered custom device. Returns: A boolean.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:is_custom_device arg:device_name arguments arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "M2MDeserializationError",
    "source_code": "class M2MDeserializationError(Exception):\n\n    def __init__(self, original_exc, pk):\n        self.original_exc = original_exc\n        self.pk = pk",
    "docstring": "Something bad happened during deserialization of a ManyToManyField.",
    "type": "class",
    "file_path": "django\\django\\core\\serializers\\base.py",
    "ast_data": "ClassDef name:M2MDeserializationError FunctionDef name:__init__ arg:self arg:original_exc arg:pk arguments arg arg arg Assign Assign"
  },
  {
    "library": "django",
    "name": "urlizetrunc",
    "source_code": "@register.filter(is_safe=True, needs_autoescape=True)\n@stringfilter\ndef urlizetrunc(value, limit, autoescape=True):\n    return mark_safe(_urlize(value, trim_url_limit=int(limit), nofollow=True, autoescape=autoescape))",
    "docstring": "Convert URLs into clickable links, truncating URLs to the given character limit, and adding 'rel=nofollow' attribute to discourage spamming. Argument: Length to truncate URLs to.",
    "type": "function",
    "file_path": "django\\django\\template\\defaultfilters.py",
    "ast_data": "FunctionDef name:urlizetrunc arg:value arg:limit arg:autoescape arguments arg arg arg Return return:yes Call Call Call Call"
  },
  {
    "library": "seaborn",
    "name": "__init__",
    "source_code": "def __init__(self, *, bw_method=None, bw_adjust=1, gridsize=200, cut=3, clip=None, cumulative=False):\n    if clip is None:\n        clip = (None, None)\n    self.bw_method = bw_method\n    self.bw_adjust = bw_adjust\n    self.gridsize = gridsize\n    self.cut = cut\n    self.clip = clip\n    self.cumulative = cumulative\n    if cumulative and _no_scipy:\n        raise RuntimeError('Cumulative KDE evaluation requires scipy')\n    self.support = None",
    "docstring": "Initialize the estimator with its parameters. Parameters ---------- bw_method : string, scalar, or callable, optional Method for determining the smoothing bandwidth to use; passed to :class:. bw_adjust : number, optional Factor that multiplicatively scales the value chosen using ``. Increasing will make the curve smoother. See Notes. gridsize : int, optional Number of points on each dimension of the evaluation grid. cut : number, optional Factor, multiplied by the smoothing bandwidth, that determines how far the evaluation grid extends past the extreme datapoints. When set to 0, truncate the curve at the data limits. clip : pair of numbers or None, or a pair of such pairs Do not evaluate the density outside of these limits. cumulative : bool, optional If True, estimate a cumulative distribution function. Requires scipy.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_statistics.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg arg arg arg arg arg arg If Compare Assign Assign Assign Assign Assign Assign Assign If BoolOp Raise Call Assign"
  },
  {
    "library": "pandas",
    "name": "asi8",
    "source_code": "@property\ndef asi8(self) -> npt.NDArray[np.int64]:\n    return self._ndarray.view('i8')",
    "docstring": "Integer representation of the values. Returns ------- ndarray An ndarray with int64 dtype.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py",
    "ast_data": "FunctionDef name:asi8 arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "powers_",
    "source_code": "@property\ndef powers_(self):\n    check_is_fitted(self)\n    combinations = self._combinations(n_features=self.n_features_in_, min_degree=self._min_degree, max_degree=self._max_degree, interaction_only=self.interaction_only, include_bias=self.include_bias)\n    return np.vstack([np.bincount(c, minlength=self.n_features_in_) for c in combinations])",
    "docstring": "Exponent for each of the inputs in the output.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_polynomial.py",
    "ast_data": "FunctionDef name:powers_ arg:self arguments arg Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "optgroups",
    "source_code": "def optgroups(self, name, value, attr=None):\n    default = (None, [], 0)\n    groups = [default]\n    has_selected = False\n    selected_choices = {str(v) for v in value if str(v) not in self.choices.field.empty_values}\n    if not self.is_required and (not self.allow_multiple_selected):\n        default[1].append(self.create_option(name, '', '', False, 0))\n    remote_model_opts = self.field.remote_field.model._meta\n    to_field_name = getattr(self.field.remote_field, 'field_name', remote_model_opts.pk.attname)\n    to_field_name = remote_model_opts.get_field(to_field_name).attname\n    choices = ((getattr(obj, to_field_name), self.choices.field.label_from_instance(obj)) for obj in self.choices.queryset.using(self.db).filter(**{'%s__in' % to_field_name: selected_choices}))\n    for option_value, option_label in choices:\n        selected = str(option_value) in value and (has_selected is False or self.allow_multiple_selected)\n        has_selected |= selected\n        index = len(default[1])\n        subgroup = default[1]\n        subgroup.append(self.create_option(name, option_value, option_label, selected_choices, index))\n    return groups",
    "docstring": "Return selected options based on the ModelChoiceIterator.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\widgets.py",
    "ast_data": "FunctionDef name:optgroups arg:self arg:name arg:value arg:attr arguments arg arg arg arg Assign Assign Assign Assign Call Compare Call If BoolOp Call Call Assign Assign Call Assign Call Assign Call Call Call Call For Assign BoolOp Compare Call BoolOp Compare Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "check_num_samples",
    "source_code": "def check_num_samples(ins, batch_size=None, steps=None, steps_name='steps'):\n    if steps is not None and batch_size is not None:\n        raise ValueError('If ' + steps_name + ' is set, the `batch_size` must be None.')\n    if check_steps_argument(ins, steps, steps_name):\n        return None\n    if hasattr(ins[0], 'shape'):\n        return int(ins[0].shape[0])\n    return None",
    "docstring": "Determine the number of samples provided for training and evaluation. The number of samples is not defined when running with , in which case the number of samples is set to . Args: ins: List of tensors to be fed to the Keras function. batch_size: Integer batch size or if not defined. steps: Total number of steps (batches of samples) before declaring finished. Ignored with the default value of . steps_name: The public API's parameter name for . Raises: ValueError: when is and the attribute does not exist. Also raises ValueError when is not and is not because they are mutually exclusive. Returns: When steps is , returns the number of samples to be processed based on the size of the first dimension of the first input numpy array. When steps is not and is , returns .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py",
    "ast_data": "FunctionDef name:check_num_samples arg:ins arg:batch_size arg:steps arg:steps_name arguments arg arg arg arg If BoolOp Compare Compare Raise Call If Call Return return:no If Call Return return:yes Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "SparseTopKCategoricalAccuracy",
    "source_code": "class SparseTopKCategoricalAccuracy(MeanMetricWrapper):\n\n    def __init__(self, k=5, name='sparse_top_k_categorical_accuracy', dtype=None):\n        super(SparseTopKCategoricalAccuracy, self).__init__(sparse_top_k_categorical_accuracy, name, dtype=dtype, k=k)",
    "docstring": "Computes how often integer targets are in the top predictions. Args: k: (Optional) Number of top elements to look at for computing accuracy. Defaults to 5. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.SparseTopKCategoricalAccuracy(k=1) >>> m.update_state([2, 1], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]) >>> m.result().numpy() 0.5 >>> m.reset_state() >>> m.update_state([2, 1], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]], ... sample_weight=[0.7, 0.3]) >>> m.result().numpy() 0.3 Usage with API:",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py",
    "ast_data": "ClassDef name:SparseTopKCategoricalAccuracy FunctionDef name:__init__ arg:self arg:k arg:name arg:dtype arguments arg arg arg arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "set_deterministic_debug_mode",
    "source_code": "def set_deterministic_debug_mode(debug_mode: _Union[builtins.int, str]) -> None:\n    if not isinstance(debug_mode, (builtins.int, str)):\n        raise TypeError(f'debug_mode must be str or int, but got {type(debug_mode)}')\n    if isinstance(debug_mode, str):\n        if debug_mode == 'default':\n            debug_mode = 0\n        elif debug_mode == 'warn':\n            debug_mode = 1\n        elif debug_mode == 'error':\n            debug_mode = 2\n        else:\n            raise RuntimeError(f'invalid value of debug_mode, expected one of `default`, `warn`, `error`, but got {debug_mode}')\n    if debug_mode == 0:\n        _C._set_deterministic_algorithms(False)\n    elif debug_mode == 1:\n        _C._set_deterministic_algorithms(True, warn_only=True)\n    elif debug_mode == 2:\n        _C._set_deterministic_algorithms(True)\n    else:\n        raise RuntimeError(f'invalid value of debug_mode, expected 0, 1, or 2, but got {debug_mode}')",
    "docstring": "Sets the debug mode for deterministic operations. .. note:: This is an alternative interface for :func:. Refer to that function's documentation for details about affected operations. Args: debug_mode(str or int): If \"default\" or 0, don't error or warn on nondeterministic operations. If \"warn\" or 1, warn on nondeterministic operations. If \"error\" or 2, error on nondeterministic operations.",
    "type": "function",
    "file_path": "pytorch\\torch\\__init__.py",
    "ast_data": "FunctionDef name:set_deterministic_debug_mode arg:debug_mode arguments arg If Call Raise Call Call If Call If Compare Assign If Compare Assign If Compare Assign Raise Call If Compare Call If Compare Call If Compare Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "stateless_random_contrast",
    "source_code": "@tf_export('image.stateless_random_contrast', v1=[])\n@dispatch.add_dispatch_support\ndef stateless_random_contrast(image, lower, upper, seed):\n    if upper <= lower:\n        raise ValueError('upper must be > lower.')\n    if lower < 0:\n        raise ValueError('lower must be non-negative.')\n    contrast_factor = stateless_random_ops.stateless_random_uniform(shape=[], minval=lower, maxval=upper, seed=seed)\n    return adjust_contrast(image, contrast_factor)",
    "docstring": "Adjust the contrast of images by a random factor deterministically. Guarantees the same results given the same independent of how many times the function is called, and independent of global seed settings (e.g. ). Args: image: An image tensor with 3 or more dimensions. lower: float. Lower bound for the random contrast factor. upper: float. Upper bound for the random contrast factor. seed: A shape [2] Tensor, the seed to the random number generator. Must have dtype or . (When using XLA, only is allowed.) Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> seed = (1, 2) >>> tf.image.stateless_random_contrast(x, 0.2, 0.5, seed) Returns: The contrast-adjusted image(s). Raises: ValueError: if or if .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:stateless_random_contrast arg:image arg:lower arg:upper arg:seed arguments arg arg arg arg If Compare Raise Call If Compare Raise Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "is_built",
    "source_code": "def is_built() -> bool:\n    return torch._C._has_mps",
    "docstring": "Return whether PyTorch is built with MPS support. Note that this doesn't necessarily mean MPS is available; just that if this PyTorch binary were run a machine with working MPS drivers and devices, we would be able to use it.",
    "type": "function",
    "file_path": "pytorch\\torch\\backends\\mps\\__init__.py",
    "ast_data": "FunctionDef name:is_built arguments Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "apply_grad_to_update_var",
    "source_code": "def apply_grad_to_update_var(var, grad):\n    if isinstance(var, tensor.Tensor):\n        raise NotImplementedError('Trying to update a Tensor ', var)\n    apply_kwargs = {}\n    if isinstance(grad, indexed_slices.IndexedSlices):\n        if var.constraint is not None:\n            raise RuntimeError('Cannot use a constraint function on a sparse variable.')\n        if 'apply_state' in self._sparse_apply_args:\n            apply_kwargs['apply_state'] = apply_state\n        return self._resource_apply_sparse_duplicate_indices(grad.values, var, grad.indices, **apply_kwargs)\n    if 'apply_state' in self._dense_apply_args:\n        apply_kwargs['apply_state'] = apply_state\n    update_op = self._resource_apply_dense(grad, var, **apply_kwargs)\n    if var.constraint is not None:\n        with ops.control_dependencies([update_op]):\n            return var.assign(var.constraint(var))\n    else:\n        return update_op",
    "docstring": "Apply gradient to variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py",
    "ast_data": "FunctionDef name:apply_grad_to_update_var arg:var arg:grad arguments arg arg If Call Raise Call Assign If Call If Compare Raise Call If Compare Assign Return return:yes Call If Compare Assign Assign Call If Compare With Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "get_session",
    "source_code": "def get_session(self) -> ort.InferenceSession:\n    return self._session",
    "docstring": "Get the current ONNXRuntime InferenceSession. Returns: ort.InferenceSession: The current ONNXRuntime session.",
    "type": "method",
    "file_path": "kornia\\kornia\\core\\mixin\\onnx.py",
    "ast_data": "FunctionDef name:get_session arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_semimajor",
    "source_code": "def set_semimajor(self, a):\n    self.a = float(a)\n    self._path = None\n    self.stale = True",
    "docstring": "Set the semi-major axis *a* of the annulus. Parameters ---------- a : float",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:set_semimajor arg:self arg:a arguments arg arg Assign Call Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "new_timer",
    "source_code": "def new_timer(self, interval=None, callbacks=None):\n    return self._timer_cls(interval=interval, callbacks=callbacks)",
    "docstring": "Create a new backend-specific subclass of . This is useful for getting periodic events through the backend's native event loop. Implemented only for backends with GUIs. Parameters ---------- interval : int Timer interval in milliseconds. callbacks : list[tuple[callable, tuple, dict]] Sequence of (func, args, kwargs) where `` will be removed from the timer. Examples -------- >>> timer = fig.canvas.new_timer(callbacks=[(f1, (1,), {'a': 3})])",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:new_timer arg:self arg:interval arg:callbacks arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "apply_transform",
    "source_code": "def apply_transform(self, input: Tensor, params: Dict[str, Tensor], flags: Dict[str, Any], transform: Optional[Tensor]=None) -> Tensor:\n    return input.add(params['gradient'].to(input)).clamp(0, 1)",
    "docstring": "Apply random gaussian gradient illumination to the input image.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\_2d\\intensity\\linear_illumination.py",
    "ast_data": "FunctionDef name:apply_transform arg:self arg:input arg:params arg:flags arg:transform arguments arg arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "dropout",
    "source_code": "def dropout(input: Tensor, p: float=0.5, training: bool=True, inplace: bool=False) -> Tensor:\n    if has_torch_function_unary(input):\n        return handle_torch_function(dropout, (input,), input, p=p, training=training, inplace=inplace)\n    if p < 0.0 or p > 1.0:\n        raise ValueError(f'dropout probability has to be between 0 and 1, but got {p}')\n    return _VF.dropout_(input, p, training) if inplace else _VF.dropout(input, p, training)",
    "docstring": "During training, randomly zeroes some elements of the input tensor with probability :attr:. Uses samples from a Bernoulli distribution. See :class: for details. Args: p: probability of an element to be zeroed. Default: 0.5 training: apply dropout if is ``",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:dropout arg:input arg:p arg:training arg:inplace arguments arg arg arg arg If Call Return return:yes Call If BoolOp Compare Compare Raise Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "slice_arrays",
    "source_code": "def slice_arrays(arrays, start=None, stop=None):\n    if arrays is None:\n        return [None]\n    if isinstance(start, list) and stop is not None:\n        raise ValueError('The stop argument has to be None if the value of start is a list.')\n    elif isinstance(arrays, list):\n        if hasattr(start, '__len__'):\n            if hasattr(start, 'shape'):\n                start = start.tolist()\n            return [None if x is None else x[start] for x in arrays]\n        return [None if x is None else None if not hasattr(x, '__getitem__') else x[start:stop] for x in arrays]\n    else:\n        if hasattr(start, '__len__'):\n            if hasattr(start, 'shape'):\n                start = start.tolist()\n            return arrays[start]\n        if hasattr(start, '__getitem__'):\n            return arrays[start:stop]\n        return [None]",
    "docstring": "Slice an array or list of arrays. This takes an array-like, or a list of array-likes, and outputs: - arrays[start:stop] if is an array-like - [x[start:stop] for x in arrays] if is a list Can also work on list/array of indices: Args: arrays: Single array or list of arrays. start: can be an integer index (start index) or a list/array of indices stop: integer (stop index); should be None if was a list. Returns: A slice of the array(s). Raises: ValueError: If the value of start is a list and stop is not None.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\generic_utils.py",
    "ast_data": "FunctionDef name:slice_arrays arg:arrays arg:start arg:stop arguments arg arg arg If Compare Return return:no If BoolOp Call Compare Raise Call If Call If Call If Call Assign Call Return return:yes Compare Return return:yes Compare Call If Call If Call Assign Call Return return:yes If Call Return return:yes Return return:no"
  },
  {
    "library": "pandas",
    "name": "value_counts",
    "source_code": "def value_counts(self, dropna: bool=True) -> Series:\n    from pandas import CategoricalIndex, Series\n    code, cat = (self._codes, self.categories)\n    ncat, mask = (len(cat), code >= 0)\n    ix, clean = (np.arange(ncat), mask.all())\n    if dropna or clean:\n        obs = code if clean else code[mask]\n        count = np.bincount(obs, minlength=ncat or 0)\n    else:\n        count = np.bincount(np.where(mask, code, ncat))\n        ix = np.append(ix, -1)\n    ix = coerce_indexer_dtype(ix, self.dtype.categories)\n    ix_categorical = self._from_backing_data(ix)\n    return Series(count, index=CategoricalIndex(ix_categorical), dtype='int64', name='count', copy=False)",
    "docstring": "Return a Series containing counts of each category. Every category will have an entry, even those with a count of 0. Parameters ---------- dropna : bool, default True Don't include counts of NaN. Returns ------- counts : Series See Also -------- Series.value_counts",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\categorical.py",
    "ast_data": "FunctionDef name:value_counts arg:self arg:dropna arguments arg arg Assign Assign Call Compare Assign Call Call If BoolOp Assign Assign Call BoolOp Assign Call Call Assign Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "standardize_constraints",
    "source_code": "def standardize_constraints(constraints, x0, meth):\n    all_constraint_types = (NonlinearConstraint, LinearConstraint, dict)\n    new_constraint_types = all_constraint_types[:-1]\n    if constraints is None:\n        constraints = []\n    elif isinstance(constraints, all_constraint_types):\n        constraints = [constraints]\n    else:\n        constraints = list(constraints)\n    if meth in ['trust-constr', 'cobyqa', 'new', 'cobyla']:\n        for i, con in enumerate(constraints):\n            if not isinstance(con, new_constraint_types):\n                constraints[i] = old_constraint_to_new(i, con)\n    else:\n        for i, con in enumerate(list(constraints)):\n            if isinstance(con, new_constraint_types):\n                old_constraints = new_constraint_to_old(con, x0)\n                constraints[i] = old_constraints[0]\n                constraints.extend(old_constraints[1:])\n    return constraints",
    "docstring": "Converts constraints to the form required by the solver.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_minimize.py",
    "ast_data": "FunctionDef name:standardize_constraints arg:constraints arg:x0 arg:meth arguments arg arg arg Assign Assign If Compare Assign If Call Assign Assign Call If Compare For Call If Call Assign Call For Call Call If Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "TransferFunctionDiscrete",
    "source_code": "class TransferFunctionDiscrete(TransferFunction, dlti):\n    pass",
    "docstring": "Discrete-time Linear Time Invariant system in transfer function form. Represents the system as the transfer function :math:, where :math: are elements of the numerator , :math: are elements of the denominator , and `TransferFunctiondltiTransferFunctiondltiStateSpaceTransferFunctionZerosPolesGainTrueTransferFunctionABCDH(z) = \\frac{z^2 + 3z + 3}{z^2 + 2z + 1}` with a sampling time of 0.5 seconds: >>> from scipy import signal >>> num = [1, 3, 3] >>> den = [1, 2, 1] >>> signal.TransferFunction(num, den, dt=0.5) TransferFunctionDiscrete( array([ 1., 3., 3.]), array([ 1., 2., 1.]), dt: 0.5 )",
    "type": "class",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "ClassDef name:TransferFunctionDiscrete"
  },
  {
    "library": "django",
    "name": "__str__",
    "source_code": "def __str__(self):\n    sql, params = self.sql_with_params()\n    return sql % params",
    "docstring": "Return the query as a string of SQL with the parameter values substituted in (use sql_with_params() to see the unsubstituted string). Parameter values won't necessarily be quoted correctly, since that is done by the database interface at execution time.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\query.py",
    "ast_data": "FunctionDef name:__str__ arg:self arguments arg Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "set_object_type",
    "source_code": "def set_object_type(self, object_type: Union[Callable, str], qconfig_list: list[QConfigAny]) -> QConfigMultiMapping:\n    self._insert_qconfig_list('object_type_qconfigs', [object_type], qconfig_list)\n    return self",
    "docstring": "Set object type QConfigs see :func: for more info",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\ns\\fx\\qconfig_multi_mapping.py",
    "ast_data": "FunctionDef name:set_object_type arg:self arg:object_type arg:qconfig_list arguments arg arg arg Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "cols_to_vars",
    "source_code": "def cols_to_vars(self):\n    return self._cols_to_vars",
    "docstring": "Returns a dict mapping _FeatureColumns to variables. See for more information. This is not populated till is called i.e. layer is built.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py",
    "ast_data": "FunctionDef name:cols_to_vars arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "create_test_case_repro",
    "source_code": "@classmethod\ndef create_test_case_repro(cls, proto: bytes, inputs, outputs, dir: str, name: str | None=None):\n    if name is None:\n        name = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S_%f')\n    return onnx_proto_utils.export_as_test_case(proto, _to_numpy(inputs), _to_numpy(outputs), name, dir)",
    "docstring": "Create a repro under \"{dir}/test_{name}\" for an ONNX test case. The test case contains the model and the inputs/outputs data. The directory structure is as follows: dir ├── test_ │ ├── model.onnx │ └── test_data_set_0 │ ├── input_0.pb │ ├── input_1.pb │ ├── output_0.pb │ └── output_1.pb Args: proto: ONNX model proto. inputs: Inputs to the model. outputs: Outputs of the model. dir: Directory to save the repro. name: Name of the test case. If not specified, a name based on current time will be generated. Returns: Path to the repro.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\verification.py",
    "ast_data": "FunctionDef name:create_test_case_repro arg:cls arg:proto arg:inputs arg:outputs arg:dir arg:name arguments arg arg arg arg arg arg If Compare Assign Call Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "do_partition",
    "source_code": "def do_partition(self) -> GraphModule:\n    module_with_submodules = split_module(self.graph_module, self.torch_module, lambda node: self.node_to_partition[node])\n    return module_with_submodules",
    "docstring": "Return a new fx module with submodule nodes (partitions).",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\accelerator_partitioner.py",
    "ast_data": "FunctionDef name:do_partition arg:self arguments arg Assign Call arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "PoolMemoryPlanningLine",
    "source_code": "@dataclasses.dataclass\nclass PoolMemoryPlanningLine(MemoryPlanningLine):\n    group: BufferGroup\n    timestep: Optional[int] = None\n\n    @property\n    def node(self):\n        return self.group.node",
    "docstring": "Abstract base class for {Alloc,Dealloc}FromPoolLine",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\memory_planning.py",
    "ast_data": "ClassDef name:PoolMemoryPlanningLine FunctionDef name:node arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "finite_homology_growth",
    "source_code": "def finite_homology_growth(self):\n    if self.LMC.size == 0:\n        return\n    self.hgrd = self.LMC.size - self.hgr\n    self.hgr = self.LMC.size\n    if self.hgrd <= self.minhgrd:\n        self.stop_global = True\n    if self.disp:\n        logging.info(f'Current homology growth = {self.hgrd}  (minimum growth = {self.minhgrd})')\n    return self.stop_global",
    "docstring": "Stop the algorithm if homology group rank did not grow in iteration.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_shgo.py",
    "ast_data": "FunctionDef name:finite_homology_growth arg:self arguments arg If Compare Return return:no Assign Assign If Compare Assign If Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_zsort",
    "source_code": "def set_zsort(self, zsort):\n    self._zsortfunc = self._zsort_functions[zsort]\n    self._sort_zpos = None\n    self.stale = True",
    "docstring": "Set the calculation method for the z-order. Parameters ---------- zsort : {'average', 'min', 'max'} The function applied on the z-coordinates of the vertices in the viewer's coordinate system, to determine the z-order.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py",
    "ast_data": "FunctionDef name:set_zsort arg:self arg:zsort arguments arg arg Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "DotDimensionNumbers",
    "source_code": "class DotDimensionNumbers:\n    __slots__ = ('lhs_contracting_dimensions', 'rhs_contracting_dimensions', 'lhs_batch_dimensions', 'rhs_batch_dimensions')\n\n    def __init__(self):\n        self.lhs_contracting_dimensions = []\n        self.rhs_contracting_dimensions = []\n        self.lhs_batch_dimensions = []\n        self.rhs_batch_dimensions = []",
    "docstring": "Python representation of a xla.DotDimensionNumbers protobuf.",
    "type": "class",
    "file_path": "tensorflow\\third_party\\xla\\xla\\python\\xla_client.py",
    "ast_data": "ClassDef name:DotDimensionNumbers Assign FunctionDef name:__init__ arg:self arguments arg Assign Assign Assign Assign"
  },
  {
    "library": "seaborn",
    "name": "share",
    "source_code": "def share(self, **shares: bool | str) -> Plot:\n    new = self._clone()\n    new._shares.update(shares)\n    return new",
    "docstring": "Control sharing of axis limits and ticks across subplots. Keywords correspond to variables defined in the plot, and values can be boolean (to share across all subplots), or one of \"row\" or \"col\" (to share more selectively across one dimension of a grid). Behavior for non-coordinate variables is currently undefined. Examples -------- .. include:: ../docstrings/objects.Plot.share.rst",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\plot.py",
    "ast_data": "FunctionDef name:share arg:self arguments arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_check_color_like",
    "source_code": "def _check_color_like(**kwargs):\n    for k, v in kwargs.items():\n        if not is_color_like(v):\n            raise ValueError(f\"{v!r} is not a valid value for {k}: supported inputs are (r, g, b) and (r, g, b, a) 0-1 float tuples; '#rrggbb', '#rrggbbaa', '#rgb', '#rgba' strings; named color strings; string reprs of 0-1 floats for grayscale values; 'C0', 'C1', ... strings for colors of the color cycle; and pairs combining one of the above with an alpha value\")",
    "docstring": "For each *key, value* pair in *kwargs*, check that *value* is color-like.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:_check_color_like arguments arg For Call If Call Raise Call"
  },
  {
    "library": "cryptography",
    "name": "rfc4514_string",
    "source_code": "def rfc4514_string(self, attr_name_overrides: _OidNameMap | None=None) -> str:\n    attr_name = attr_name_overrides.get(self.oid) if attr_name_overrides else None\n    if attr_name is None:\n        attr_name = self.rfc4514_attribute_name\n    return f'{attr_name}={_escape_dn_value(self.value)}'",
    "docstring": "Format as RFC4514 Distinguished Name string. Use short attribute name if available, otherwise fall back to OID dotted string.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\x509\\name.py",
    "ast_data": "FunctionDef name:rfc4514_string arg:self arg:attr_name_overrides arguments arg arg Assign Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_repr_latex_",
    "source_code": "@final\ndef _repr_latex_(self):\n    if config.get_option('styler.render.repr') == 'latex':\n        return self.to_latex()\n    else:\n        return None",
    "docstring": "Returns a LaTeX representation for a particular object. Mainly for use with nbconvert (jupyter notebook conversion to pdf).",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:_repr_latex_ arg:self arguments arg If Compare Call Return return:yes Call Return return:no"
  },
  {
    "library": "matplotlib",
    "name": "_get_unmasked_polys",
    "source_code": "def _get_unmasked_polys(self):\n    mask = np.any(np.ma.getmaskarray(self._coordinates), axis=-1)\n    mask = mask[0:-1, 0:-1] | mask[1:, 1:] | mask[0:-1, 1:] | mask[1:, 0:-1]\n    arr = self.get_array()\n    if arr is not None:\n        arr = np.ma.getmaskarray(arr)\n        if arr.ndim == 3:\n            mask |= np.any(arr, axis=-1)\n        elif arr.ndim == 2:\n            mask |= arr\n        else:\n            mask |= arr.reshape(self._coordinates[:-1, :-1, :].shape[:2])\n    return ~mask",
    "docstring": "Get the unmasked regions using the coordinates and array",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:_get_unmasked_polys arg:self arguments arg Assign Call Call Assign Assign Call If Compare Assign Call If Compare Call If Compare Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "disconnect_all",
    "source_code": "def disconnect_all(signal: TypingAny=Any, sender: TypingAny=Any) -> None:\n    for receiver in liveReceivers(getAllReceivers(sender, signal)):\n        disconnect(receiver, signal=signal, sender=sender)",
    "docstring": "Disconnect all signal handlers. Useful for cleaning up after running tests.",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\signal.py",
    "ast_data": "FunctionDef name:disconnect_all arg:signal arg:sender arguments arg arg For Call Call Call"
  },
  {
    "library": "numpy",
    "name": "legadd",
    "source_code": "def legadd(c1, c2):\n    return pu._add(c1, c2)",
    "docstring": "Add one Legendre series to another. Returns the sum of two Legendre series + . The arguments are sequences of coefficients ordered from lowest order term to highest, i.e., [1,2,3] represents the series ``. Parameters ---------- c1, c2 : array_like 1-D arrays of Legendre series coefficients ordered from low to high. Returns ------- out : ndarray Array representing the Legendre series of their sum. See Also -------- legsub, legmulx, legmul, legdiv, legpow Notes ----- Unlike multiplication, division, etc., the sum of two Legendre series is a Legendre series (without having to \"reproject\" the result onto the basis set) so addition, just like that of \"standard\" polynomials, is simply \"component-wise.\" Examples -------- >>> from numpy.polynomial import legendre as L >>> c1 = (1,2,3) >>> c2 = (3,2,1) >>> L.legadd(c1,c2) array([4., 4., 4.])",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\legendre.py",
    "ast_data": "FunctionDef name:legadd arg:c1 arg:c2 arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "binary_elementwise_apis",
    "source_code": "def binary_elementwise_apis():\n    return tuple(_BINARY_ELEMENTWISE_APIS)",
    "docstring": "Returns a list of APIs that have been registered as binary elementwise.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\dispatch.py",
    "ast_data": "FunctionDef name:binary_elementwise_apis arguments Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "filter_meta_fields",
    "source_code": "def filter_meta_fields(app: Sphinx, domain: str, objtype: str, content: Element) -> None:\n    if domain != 'py':\n        return\n    for node in content:\n        if isinstance(node, nodes.field_list):\n            fields = cast('list[nodes.field]', node)\n            for field in reversed(fields):\n                field_name = cast('nodes.field_body', field[0]).astext().strip()\n                if field_name == 'meta' or field_name.startswith('meta '):\n                    node.remove(field)",
    "docstring": "Filter `` field from its docstring.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\domains\\python\\__init__.py",
    "ast_data": "FunctionDef name:filter_meta_fields arg:app arg:domain arg:objtype arg:content arguments arg arg arg arg If Compare Return return:no For If Call Assign Call For Call Assign Call Call Call If BoolOp Compare Call Call"
  },
  {
    "library": "django",
    "name": "exists",
    "source_code": "def exists(self):\n    if self._result_cache is None:\n        return self.query.has_results(using=self.db)\n    return bool(self._result_cache)",
    "docstring": "Return True if the QuerySet would have any results, False otherwise.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:exists arg:self arguments arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_check_X",
    "source_code": "def _check_X(X):\n    if hasattr(X, '__array__') and hasattr(X, 'shape') or hasattr(X, '__dataframe__') or sparse.issparse(X):\n        return X\n    return check_array(X, ensure_all_finite='allow-nan', dtype=object)",
    "docstring": "Use check_array only when necessary, e.g. on lists and other non-array-likes.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\compose\\_column_transformer.py",
    "ast_data": "FunctionDef name:_check_X arg:X arguments arg If BoolOp BoolOp Call Call Call Call Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "TFRecordWriter",
    "source_code": "@tf_export('data.experimental.TFRecordWriter')\n@deprecation.deprecated(None, 'To write TFRecords to disk, use `tf.io.TFRecordWriter`. To save and load the contents of a dataset, use `tf.data.experimental.save` and `tf.data.experimental.load`')\nclass TFRecordWriter:\n\n    def __init__(self, filename, compression_type=None):\n        self._filename = ops.convert_to_tensor(filename, dtypes.string, name='filename')\n        self._compression_type = convert.optional_param_to_tensor('compression_type', compression_type, argument_default='', argument_dtype=dtypes.string)\n\n    def write(self, dataset):\n        if not isinstance(dataset, data_types.DatasetV2):\n            raise TypeError(f'Invalid `dataset.` Expected a `tf.data.Dataset` object but got {type(dataset)}.')\n        if not dataset_ops.get_structure(dataset).is_compatible_with(tensor_spec.TensorSpec([], dtypes.string)):\n            raise TypeError(f'Invalid `dataset`. Expected a`dataset` that produces scalar `tf.string` elements, but got a dataset which produces elements with shapes {dataset_ops.get_legacy_output_shapes(dataset)} and types {dataset_ops.get_legacy_output_types(dataset)}.')\n        dataset = dataset._apply_debug_options()\n        return gen_experimental_dataset_ops.dataset_to_tf_record(dataset._variant_tensor, self._filename, self._compression_type)",
    "docstring": "Writes a dataset to a TFRecord file. The elements of the dataset must be scalar strings. To serialize dataset elements as strings, you can use the function. To read back the elements, use . To shard a across multiple TFRecord files:",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\writers.py",
    "ast_data": "ClassDef name:TFRecordWriter FunctionDef name:__init__ arg:self arg:filename arg:compression_type arguments arg arg arg Assign Call Assign Call FunctionDef name:write arg:self arg:dataset arguments arg arg If Call Raise Call Call If Call Call Call Raise Call Call Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "start_rasterizing",
    "source_code": "def start_rasterizing(self):\n    self.figure.dpi = self.dpi\n    self._raster_renderer = self._raster_renderer_class(self._width * self.dpi, self._height * self.dpi, self.dpi)\n    self._renderer = self._raster_renderer\n    if self._bbox_inches_restore:\n        r = process_figure_for_rasterizing(self.figure, self._bbox_inches_restore, self._raster_renderer)\n        self._bbox_inches_restore = r",
    "docstring": "Enter \"raster\" mode. All subsequent drawing commands (until is called) will be drawn with the raster backend.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_mixed.py",
    "ast_data": "FunctionDef name:start_rasterizing arg:self arguments arg Assign Assign Call Assign If Assign Call Assign"
  },
  {
    "library": "django",
    "name": "reset",
    "source_code": "def reset(self):\n    pass",
    "docstring": "Reset any state maintained by the loader instance (e.g. cached templates or cached loader modules).",
    "type": "method",
    "file_path": "django\\django\\template\\loaders\\base.py",
    "ast_data": "FunctionDef name:reset arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "adjust_gamma",
    "source_code": "@tf_export('image.adjust_gamma')\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef adjust_gamma(image, gamma=1, gain=1):\n    with ops.name_scope(None, 'adjust_gamma', [image, gamma, gain]) as name:\n        image = ops.convert_to_tensor(image, name='image')\n        orig_dtype = image.dtype\n        if orig_dtype in [dtypes.float16, dtypes.float32]:\n            flt_image = image\n        else:\n            flt_image = convert_image_dtype(image, dtypes.float32)\n        assert_op = _assert(gamma >= 0, ValueError, 'Gamma should be a non-negative real number.')\n        if assert_op:\n            gamma = control_flow_ops.with_dependencies(assert_op, gamma)\n        adjusted_img = gain * flt_image ** gamma\n        return convert_image_dtype(adjusted_img, orig_dtype, saturate=True)",
    "docstring": "Performs [Gamma Correction]( on the input image. Also known as Power Law Transform. This function converts the input images at first to float representation, then transforms them pixelwise according to the equation , and then converts the back to the original data type. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.adjust_gamma(x, 0.2) Args: image : RGB image or images to adjust. gamma : A scalar or tensor. Non-negative real number. gain : A scalar or tensor. The constant multiplier. Returns: A Tensor. A Gamma-adjusted tensor of the same shape and type as . Raises: ValueError: If gamma is negative. Notes: For gamma greater than 1, the histogram will shift towards left and the output image will be darker than the input image. For gamma less than 1, the histogram will shift towards right and the output image will be brighter than the input image. References: [Wikipedia](",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:adjust_gamma arg:image arg:gamma arg:gain arguments arg arg arg With Call Assign Call Assign If Compare Assign Assign Call Assign Call Compare If Assign Call Assign Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "create_bundled",
    "source_code": "def create_bundled(d, outstream, include_files=False):\n    collected = collect_license(d)\n    sorted_keys = sorted(collected.keys())\n    outstream.write('The PyTorch repository and source distributions bundle several libraries that are \\n')\n    outstream.write('compatibly licensed.  We list these here.')\n    files_to_include = []\n    for k in sorted_keys:\n        c = collected[k]\n        files = ',\\n     '.join(c['Files'])\n        license_file = ',\\n     '.join(c['License_file'])\n        outstream.write('\\n\\n')\n        outstream.write(f'Name: {c['Name']}\\n')\n        outstream.write(f'License: {c['License']}\\n')\n        outstream.write(f'Files: {files}\\n')\n        outstream.write('  For details, see')\n        if include_files:\n            outstream.write(' the files concatenated below: ')\n            files_to_include += c['License_file']\n        else:\n            outstream.write(': ')\n        outstream.write(license_file)\n    for fname in files_to_include:\n        outstream.write('\\n\\n')\n        outstream.write(fname)\n        outstream.write('\\n' + '-' * len(fname) + '\\n')\n        with open(fname, 'r') as fid:\n            outstream.write(fid.read())",
    "docstring": "Write the information to an open outstream",
    "type": "function",
    "file_path": "pytorch\\third_party\\build_bundled.py",
    "ast_data": "FunctionDef name:create_bundled arg:d arg:outstream arg:include_files arguments arg arg arg Assign Call Assign Call Call Call Call Assign For Assign Assign Call Assign Call Call Call Call Call Call If Call Call Call For Call Call Call Call With Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "__call__",
    "source_code": "def __call__(self):\n    raise NotImplementedError('Derived must override')",
    "docstring": "Return the locations of the ticks.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:__call__ arg:self arguments arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "TensorboardEventHandler",
    "source_code": "class TensorboardEventHandler:\n\n    def __init__(self, writer: 'SummaryWriter') -> None:\n        self._writer = writer\n\n    def __call__(self, event: Event) -> None:\n        if event.name == STAT_EVENT:\n            for k, v in event.data.items():\n                self._writer.add_scalar(k, v, walltime=event.timestamp.timestamp())",
    "docstring": "TensorboardEventHandler is an event handler that will write known events to the provided SummaryWriter. This currently only supports `` events which are logged as scalars. Example: >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_MONITOR) >>> # xdoctest: +REQUIRES(module:tensorboard) >>> from torch.utils.tensorboard import SummaryWriter >>> from torch.monitor import TensorboardEventHandler, register_event_handler >>> writer = SummaryWriter(\"log_dir\") >>> register_event_handler(TensorboardEventHandler(writer))",
    "type": "class",
    "file_path": "pytorch\\torch\\monitor\\__init__.py",
    "ast_data": "ClassDef name:TensorboardEventHandler FunctionDef name:__init__ arg:self arg:writer arguments arg arg Assign FunctionDef name:__call__ arg:self arg:event arguments arg arg If Compare For Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "collect_function_renames",
    "source_code": "def collect_function_renames():\n    renames = set()\n    all_v2_names = get_all_v2_names()\n\n    def visit(unused_path, unused_parent, children):\n        for child in children:\n            _, attr = tf_decorator.unwrap(child[1])\n            api_names_v1 = [name for name in tf_export.get_v1_names(attr) if '.__internal__.' not in name]\n            api_names_v2 = tf_export.get_v2_names(attr)\n            if not api_names_v2:\n                api_names_v2 = [name for name in api_names_v1 if name in all_v2_names]\n            deprecated_api_names = set(api_names_v1) - set(api_names_v2)\n            for name in deprecated_api_names:\n                renames.add((name, get_canonical_name(api_names_v2, name)))\n    visitor = public_api.PublicAPIVisitor(visit)\n    visitor.do_not_descend_map['tf'].append('contrib')\n    visitor.private_map['tf.compat'] = ['v1', 'v2']\n    traverse.traverse(tf.version, visitor)\n    traverse.traverse(tf.compat.v1, visitor)\n    traverse.traverse(tf.compat.v2, visitor)\n    return renames",
    "docstring": "Looks for functions/classes that need to be renamed in TF 2.0. Returns: Set of tuples of the form (current name, new name).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\update\\generate_v2_renames_map.py",
    "ast_data": "FunctionDef name:collect_function_renames arguments Assign Call Assign Call FunctionDef name:visit arg:unused_path arg:unused_parent arg:children arguments arg arg arg For Assign Call Assign Call Compare Assign Call If Assign Compare Assign Call Call For Call Call Assign Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_track_assets",
    "source_code": "def _maybe_track_assets(self, graph_def):\n    asset_tracker = {}\n    for node in graph_def.node:\n        if node.name.startswith('FileIdentity'):\n            asset_tracker[node.input[0]] = None\n    if not asset_tracker:\n        return {}\n    for node in graph_def.node:\n        if node.name in asset_tracker:\n            tensor_proto = node.attr['value'].tensor\n            with context.eager_mode(), ops.device('CPU'):\n                node_value = gen_parsing_ops.parse_tensor(tensor_proto.SerializeToString(), dtypes.string).numpy()\n            asset_tracker[node.name] = [self._track_trackable(asset.Asset(n), name=node.name + '_' + str(i), overwrite=True) for i, n in enumerate(node_value)]\n    return asset_tracker",
    "docstring": "Finds and tracks nodes in that refer to asset files. Args: graph_def: Serialized graph representation of this dataset. Returns: A dictionary mapping the node name of an asset constant to a tracked object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:_maybe_track_assets arg:self arg:graph_def arguments arg arg Assign For If Call Assign If Return return:no For If Compare Assign With Call Call Assign Call Call Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_in_multi_worker_mode",
    "source_code": "def _in_multi_worker_mode(self):\n    return self._cluster_spec is not None",
    "docstring": "Whether this strategy indicates working in multi-worker settings.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\parameter_server_strategy.py",
    "ast_data": "FunctionDef name:_in_multi_worker_mode arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "pytorch",
    "name": "shutdown_compile_workers",
    "source_code": "def shutdown_compile_workers() -> None:\n    for pool in _pool_set:\n        pool.shutdown()\n    after_fork()",
    "docstring": "Shut down all outstanding compile-worker pools.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\async_compile.py",
    "ast_data": "FunctionDef name:shutdown_compile_workers arguments For Call Call"
  },
  {
    "library": "pytorch",
    "name": "MDNotImplementedError",
    "source_code": "class MDNotImplementedError(NotImplementedError):\n    pass",
    "docstring": "A NotImplementedError for multiple dispatch",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\experimental\\unification\\multipledispatch\\dispatcher.py",
    "ast_data": "ClassDef name:MDNotImplementedError"
  },
  {
    "library": "numpy",
    "name": "FindAttributes",
    "source_code": "class FindAttributes(ast.NodeVisitor):\n\n    def __init__(self):\n        self.attributes = set()\n\n    def visit_FunctionDef(self, node):\n        if node.name == '__getattr__':\n            return\n        self.attributes.add(node.name)\n        return\n\n    def visit_ClassDef(self, node):\n        if not node.name.startswith('_'):\n            self.attributes.add(node.name)\n\n    def visit_AnnAssign(self, node):\n        self.attributes.add(node.target.id)",
    "docstring": "Find top-level attributes/functions/classes in stubs files. Do this by walking the stubs ast. See e.g. for more information on working with Python's ast.",
    "type": "class",
    "file_path": "numpy\\tools\\functions_missing_types.py",
    "ast_data": "ClassDef name:FindAttributes FunctionDef name:__init__ arg:self arguments arg Assign Call FunctionDef name:visit_FunctionDef arg:self arg:node arguments arg arg If Compare Return return:no Call Return return:no FunctionDef name:visit_ClassDef arg:self arg:node arguments arg arg If Call Call FunctionDef name:visit_AnnAssign arg:self arg:node arguments arg arg Call"
  },
  {
    "library": "django",
    "name": "scale",
    "source_code": "@property\ndef scale(self):\n    return TransformPoint(self, 'scale')",
    "docstring": "Pixel scale in units of the raster projection.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\raster\\source.py",
    "ast_data": "FunctionDef name:scale arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_tensorrt_backend_config_dict",
    "source_code": "def get_tensorrt_backend_config_dict():\n    return get_tensorrt_backend_config().to_dict()",
    "docstring": "Return the for the TensorRT backend in dictionary form.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\tensorrt.py",
    "ast_data": "FunctionDef name:get_tensorrt_backend_config_dict arguments Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "SafeString",
    "source_code": "class SafeString(str, SafeData):\n    __slots__ = ()\n\n    def __add__(self, rhs):\n        if isinstance(rhs, str):\n            t = super().__add__(rhs)\n            if isinstance(rhs, SafeData):\n                t = SafeString(t)\n            return t\n        return NotImplemented\n\n    def __str__(self):\n        return self",
    "docstring": "A str subclass that has been specifically marked as \"safe\" for HTML output purposes.",
    "type": "class",
    "file_path": "django\\django\\utils\\safestring.py",
    "ast_data": "ClassDef name:SafeString Assign FunctionDef name:__add__ arg:self arg:rhs arguments arg arg If Call Assign Call Call If Call Assign Call Return return:yes Return return:yes FunctionDef name:__str__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, diag, is_non_singular=None, is_self_adjoint=None, is_positive_definite=None, is_square=None, name='LinearOperatorDiag'):\n    parameters = dict(diag=diag, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, name=name)\n    with ops.name_scope(name, values=[diag]):\n        self._diag = linear_operator_util.convert_nonref_to_tensor(diag, name='diag')\n        self._check_diag(self._diag)\n        if not self._diag.dtype.is_complex:\n            if is_self_adjoint is False:\n                raise ValueError('A real diagonal operator is always self adjoint.')\n            else:\n                is_self_adjoint = True\n        if is_square is False:\n            raise ValueError('Only square diagonal operators currently supported.')\n        is_square = True\n        super(LinearOperatorDiag, self).__init__(dtype=self._diag.dtype, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, parameters=parameters, name=name)",
    "docstring": "Initialize a . Args: diag: Shape with . The diagonal of the operator. Allowed dtypes: , , , , . is_non_singular: Expect that this operator is non-singular. is_self_adjoint: Expect that this operator is equal to its hermitian transpose. If is real, this is auto-set to . is_positive_definite: Expect that this operator is positive definite, meaning the quadratic form has positive real part for all nonzero . Note that we do not require the operator to be self-adjoint to be positive-definite. See: is_square: Expect that this operator acts like square [batch] matrices. name: A name for this . Raises: TypeError: If is not an allowed type. ValueError: If is real, and is not .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_diag.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:diag arg:is_non_singular arg:is_self_adjoint arg:is_positive_definite arg:is_square arg:name arguments arg arg arg arg arg arg arg Assign Call With Call Assign Call Call If If Compare Raise Call Assign If Compare Raise Call Assign Call Call"
  },
  {
    "library": "kornia",
    "name": "identity",
    "source_code": "@classmethod\ndef identity(cls, batch_size: Optional[int]=None, device: Optional[Device]=None, dtype: Dtype=None) -> Se2:\n    t: Tensor = tensor([0.0, 0.0], device=device, dtype=dtype)\n    if batch_size is not None:\n        KORNIA_CHECK(batch_size >= 1, msg='batch_size must be positive')\n        t = t.repeat(batch_size, 1)\n    return cls(So2.identity(batch_size, device, dtype), Vector2(t))",
    "docstring": "Create a Se2 group representing an identity rotation and zero translation. Args: batch_size: the batch size of the underlying data. device: device to place the result on. dtype: dtype of the result. Example: >>> s = Se2.identity(1) >>> s.r Parameter containing: tensor([1.+0.j], requires_grad=True) >>> s.t x: tensor([0.]) y: tensor([0.])",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\se2.py",
    "ast_data": "FunctionDef name:identity arg:cls arg:batch_size arg:device arg:dtype arguments arg arg arg arg Call If Compare Call Compare Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "mocked_modules",
    "source_code": "def mocked_modules(self) -> list[str]:\n    return self._nodes_with_action_type(_ModuleProviderAction.MOCK)",
    "docstring": "Return all modules that are currently mocked. Returns: A list containing the names of modules which will be mocked in this package.",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\package_exporter.py",
    "ast_data": "FunctionDef name:mocked_modules arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_node_index",
    "source_code": "def get_node_index(layer, config_node_index):\n    if isinstance(layer, input_layer_module.InputLayer):\n        return 0\n    return node_index_map.get((layer.name, config_node_index), None)",
    "docstring": "Returns node index in layer (might differ from config_node_index).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\functional.py",
    "ast_data": "FunctionDef name:get_node_index arg:layer arg:config_node_index arguments arg arg If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "log_loss",
    "source_code": "@validate_params({'y_true': ['array-like'], 'y_pred': ['array-like'], 'normalize': ['boolean'], 'sample_weight': ['array-like', None], 'labels': ['array-like', None]}, prefer_skip_nested_validation=True)\ndef log_loss(y_true, y_pred, *, normalize=True, sample_weight=None, labels=None):\n    transformed_labels, y_pred = _validate_multiclass_probabilistic_prediction(y_true, y_pred, sample_weight, labels)\n    eps = np.finfo(y_pred.dtype).eps\n    y_pred = np.clip(y_pred, eps, 1 - eps)\n    loss = -xlogy(transformed_labels, y_pred).sum(axis=1)\n    return float(_average(loss, weights=sample_weight, normalize=normalize))",
    "docstring": "Log loss, aka logistic loss or cross-entropy loss. This is the loss function used in (multinomial) logistic regression and extensions of it such as neural networks, defined as the negative log-likelihood of a logistic model that returns `y \\in \\{0,1\\}p = \\operatorname{Pr}(y = 1)User Guide ~sklearn.preprocessing.LabelBinarizery_pred[eps, 1-eps]epsy_pred`. .. versionadded:: 0.18 Returns ------- loss : float Log loss, aka logistic loss or cross-entropy loss. Notes ----- The logarithm used is the natural logarithm (base-e). References ---------- C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer, p. 209. Examples -------- >>> from sklearn.metrics import log_loss >>> log_loss([\"spam\", \"ham\", \"ham\", \"spam\"], ... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]]) 0.21616",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\metrics\\_classification.py",
    "ast_data": "FunctionDef name:log_loss arg:y_true arg:y_pred arguments arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call Call Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "set_upward",
    "source_code": "def set_upward(self, key, value):\n    context = self.dicts[-1]\n    for d in reversed(self.dicts):\n        if key in d:\n            context = d\n            break\n    context[key] = value",
    "docstring": "Set a variable in one of the higher contexts if it exists there, otherwise in the current context.",
    "type": "method",
    "file_path": "django\\django\\template\\context.py",
    "ast_data": "FunctionDef name:set_upward arg:self arg:key arg:value arguments arg arg arg Assign For Call If Compare Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "parse_entry",
    "source_code": "def parse_entry(entry):\n    items = entry.split('=')\n    key = items[0].strip()\n    if len(items) > 1:\n        value = items[1]\n        return (key, value)\n    else:\n        return (None, None)",
    "docstring": "Parse a \"key=value\" pair separated by '=' eg: var_name=False",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\optimize_for_inference_lib.py",
    "ast_data": "FunctionDef name:parse_entry arg:entry arguments arg Assign Call Assign Call If Compare Call Assign Return return:yes Return return:no"
  },
  {
    "library": "scikit-learn",
    "name": "_compute_covariance",
    "source_code": "def _compute_covariance(self, X, sqrt_sw):\n    if not self.fit_intercept:\n        X_mean = np.zeros(X.shape[1], dtype=X.dtype)\n        return (safe_sparse_dot(X.T, X, dense_output=True), X_mean)\n    n_samples = X.shape[0]\n    sample_weight_matrix = sparse.dia_matrix((sqrt_sw, 0), shape=(n_samples, n_samples))\n    X_weighted = sample_weight_matrix.dot(X)\n    X_mean, _ = mean_variance_axis(X_weighted, axis=0)\n    X_mean = X_mean * n_samples / sqrt_sw.dot(sqrt_sw)\n    weight_sum = sqrt_sw.dot(sqrt_sw)\n    return (safe_sparse_dot(X.T, X, dense_output=True) - weight_sum * np.outer(X_mean, X_mean), X_mean)",
    "docstring": "Computes covariance matrix X^TX with possible centering. Parameters ---------- X : sparse matrix of shape (n_samples, n_features) The preprocessed design matrix. sqrt_sw : ndarray of shape (n_samples,) square roots of sample weights Returns ------- covariance : ndarray of shape (n_features, n_features) The covariance matrix. X_mean : ndarray of shape (n_feature,) The weighted mean of `` for each feature. Notes ----- Since X is sparse it has not been centered in preprocessing, but it has been scaled by sqrt(sample weights). When self.fit_intercept is False no centering is done. The centered X is never actually computed because centering would break the sparsity of X.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_ridge.py",
    "ast_data": "FunctionDef name:_compute_covariance arg:self arg:X arg:sqrt_sw arguments arg arg arg If Assign Call Return return:yes Call Assign Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "y",
    "source_code": "@property\ndef y(self):\n    return self._listarr(self._cs.getY)",
    "docstring": "Return a list or numpy array of the Y variable.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\linestring.py",
    "ast_data": "FunctionDef name:y arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_format_labels",
    "source_code": "def _format_labels(bins: Index, precision: int, right: bool=True, include_lowest: bool=False) -> IntervalIndex:\n    closed: IntervalLeftRight = 'right' if right else 'left'\n    formatter: Callable[[Any], Timestamp] | Callable[[Any], Timedelta]\n    if _is_dt_or_td(bins.dtype):\n        unit = dtype_to_unit(bins.dtype)\n        formatter = lambda x: x\n        adjust = lambda x: x - Timedelta(1, unit=unit).as_unit(unit)\n    else:\n        precision = _infer_precision(precision, bins)\n        formatter = lambda x: _round_frac(x, precision)\n        adjust = lambda x: x - 10 ** (-precision)\n    breaks = [formatter(b) for b in bins]\n    if right and include_lowest:\n        breaks[0] = adjust(breaks[0])\n    if _is_dt_or_td(bins.dtype):\n        breaks = type(bins)(breaks).as_unit(unit)\n    return IntervalIndex.from_breaks(breaks, closed=closed)",
    "docstring": "based on the dtype, return our labels",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\reshape\\tile.py",
    "ast_data": "FunctionDef name:_format_labels arg:bins arg:precision arg:right arg:include_lowest arguments arg arg arg arg If Call Assign Call Assign arguments arg Assign arguments arg Call Call Assign Call Assign arguments arg Call Assign arguments arg Assign Call If BoolOp Assign Call If Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_draw_rasterized",
    "source_code": "def _draw_rasterized(figure, artists, renderer):\n\n    class _MinimalArtist:\n\n        def get_rasterized(self):\n            return True\n\n        def get_agg_filter(self):\n            return None\n\n        def __init__(self, figure, artists):\n            self.figure = figure\n            self.artists = artists\n\n        def get_figure(self, root=False):\n            if root:\n                return self.figure.get_figure(root=True)\n            else:\n                return self.figure\n\n        @martist.allow_rasterization\n        def draw(self, renderer):\n            for a in self.artists:\n                a.draw(renderer)\n    return _MinimalArtist(figure, artists).draw(renderer)",
    "docstring": "A helper function for rasterizing the list of artists. The bookkeeping to track if we are or are not in rasterizing mode with the mixed-mode backends is relatively complicated and is now handled in the matplotlib.artist.allow_rasterization decorator. This helper defines the absolute minimum methods and attributes on a shim class to be compatible with that decorator and then uses it to rasterize the list of artists. This is maybe too-clever, but allows us to reuse the same code that is used on normal artists to participate in the \"are we rasterizing\" accounting. Please do not use this outside of the \"rasterize below a given zorder\" functionality of Axes. Parameters ---------- figure : matplotlib.figure.Figure The figure all of the artists belong to (not checked). We need this because we can at the figure level suppress composition and insert each rasterized artist as its own image. artists : List[matplotlib.artist.Artist] The list of Artists to be rasterized. These are assumed to all be in the same Figure. renderer : matplotlib.backendbases.RendererBase The currently active renderer Returns ------- None",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:_draw_rasterized arg:figure arg:artists arg:renderer arguments arg arg arg ClassDef name:_MinimalArtist FunctionDef name:get_rasterized arg:self arguments arg Return return:yes FunctionDef name:get_agg_filter arg:self arguments arg Return return:no FunctionDef name:__init__ arg:self arg:figure arg:artists arguments arg arg arg Assign Assign FunctionDef name:get_figure arg:self arg:root arguments arg arg If Return return:yes Call Return return:yes FunctionDef name:draw arg:self arg:renderer arguments arg arg For Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_assert_all_ranks_match",
    "source_code": "def _assert_all_ranks_match(values):\n    ranks = [_get_all_ranks(st) for st in values]\n    for other_ranks in ranks[1:]:\n        if other_ranks != ranks[0]:\n            raise ValueError('Ranks of sub-message do not match')",
    "docstring": "Raises an error if the ranks of submessages are not identical.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_array_ops.py",
    "ast_data": "FunctionDef name:_assert_all_ranks_match arg:values arguments arg Assign Call For If Compare Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "_normalize_score_results",
    "source_code": "def _normalize_score_results(scores, scaler_score_key='score'):\n    if isinstance(scores[0], dict):\n        return _aggregate_score_dicts(scores)\n    return {scaler_score_key: scores}",
    "docstring": "Creates a scoring dictionary based on the type of",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_validation.py",
    "ast_data": "FunctionDef name:_normalize_score_results arg:scores arg:scaler_score_key arguments arg arg If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_inverse_event_shape",
    "source_code": "def _inverse_event_shape(self, output_shape):\n    return tensor_shape.TensorShape(output_shape)",
    "docstring": "Subclass implementation for public function.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bijector_impl.py",
    "ast_data": "FunctionDef name:_inverse_event_shape arg:self arg:output_shape arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "create_pseudo_output_names",
    "source_code": "def create_pseudo_output_names(outputs):\n    return _create_pseudo_names(outputs, prefix='output_')",
    "docstring": "Create pseudo output names for a subclassed Model.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\tflite_keras_util.py",
    "ast_data": "FunctionDef name:create_pseudo_output_names arg:outputs arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "scatter_sub",
    "source_code": "def scatter_sub(self, sparse_delta, use_locking=False, name=None):\n    per_var_sparse_delta = self._decompose_indexed_slices(sparse_delta)\n    for i, v in enumerate(self._variables):\n        new_name = None\n        if name is not None:\n            new_name = '{}/part_{}'.format(name, i)\n        v.scatter_sub(per_var_sparse_delta[i], name=new_name)\n    return self",
    "docstring": "Implements tf.Variable.scatter_sub.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\sharded_variable.py",
    "ast_data": "FunctionDef name:scatter_sub arg:self arg:sparse_delta arg:use_locking arg:name arguments arg arg arg arg Assign Call For Call Assign If Compare Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "is_namedtuple_instance",
    "source_code": "def is_namedtuple_instance(obj: object) -> bool:\n    return is_namedtuple_class(type(obj))",
    "docstring": "Return whether the object is an instance of namedtuple.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_pytree.py",
    "ast_data": "FunctionDef name:is_namedtuple_instance arg:obj arguments arg Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "escape_arg",
    "source_code": "def escape_arg(self, s: str) -> str:\n    s = self.escape(s)\n    s = s.replace(',', '@comma{}')\n    s = ' '.join(s.split()).strip()\n    return s",
    "docstring": "Return an escaped string suitable for use as an argument to a Texinfo command.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\writers\\texinfo.py",
    "ast_data": "FunctionDef name:escape_arg arg:self arg:s arguments arg arg Assign Call Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "replace",
    "source_code": "@set_module('numpy.strings')\n@array_function_dispatch(_replace_dispatcher)\ndef replace(a, old, new, count=-1):\n    count = np.asanyarray(count)\n    if not np.issubdtype(count.dtype, np.integer):\n        raise TypeError(f\"unsupported type {count.dtype} for operand 'count'\")\n    arr = np.asanyarray(a)\n    old_dtype = getattr(old, 'dtype', None)\n    old = np.asanyarray(old)\n    new_dtype = getattr(new, 'dtype', None)\n    new = np.asanyarray(new)\n    if np.result_type(arr, old, new).char == 'T':\n        return _replace(arr, old, new, count)\n    a_dt = arr.dtype\n    old = old.astype(old_dtype or a_dt, copy=False)\n    new = new.astype(new_dtype or a_dt, copy=False)\n    max_int64 = np.iinfo(np.int64).max\n    counts = _count_ufunc(arr, old, 0, max_int64)\n    counts = np.where(count < 0, counts, np.minimum(counts, count))\n    buffersizes = str_len(arr) + counts * (str_len(new) - str_len(old))\n    out_dtype = f'{arr.dtype.char}{buffersizes.max()}'\n    out = np.empty_like(arr, shape=buffersizes.shape, dtype=out_dtype)\n    return _replace(arr, old, new, counts, out=out)",
    "docstring": "For each element in `` dtype, depending on input types See Also -------- str.replace Examples -------- >>> import numpy as np >>> a = np.array([\"That is a mango\", \"Monkeys eat mangos\"]) >>> np.strings.replace(a, 'mango', 'banana') array(['That is a banana', 'Monkeys eat bananas'], dtype='>> a = np.array([\"The dish is fresh\", \"This is it\"]) >>> np.strings.replace(a, 'is', 'was') array(['The dwash was fresh', 'Thwas was it'], dtype='<U19')",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\strings.py",
    "ast_data": "FunctionDef name:replace arg:a arg:old arg:new arg:count arguments arg arg arg arg Assign Call If Call Raise Call Assign Call Assign Call Assign Call Assign Call Assign Call If Compare Call Return return:yes Call Assign Assign Call BoolOp Assign Call BoolOp Assign Call Assign Call Assign Call Compare Call Assign Call Call Call Assign Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "JSONSerializer",
    "source_code": "class JSONSerializer:\n\n    def dumps(self, obj):\n        return json.dumps(obj, separators=(',', ':')).encode('latin-1')\n\n    def loads(self, data):\n        return json.loads(data.decode('latin-1'))",
    "docstring": "Simple wrapper around json to be used in signing.dumps and signing.loads.",
    "type": "class",
    "file_path": "django\\django\\core\\signing.py",
    "ast_data": "ClassDef name:JSONSerializer FunctionDef name:dumps arg:self arg:obj arguments arg arg Return return:yes Call Call FunctionDef name:loads arg:self arg:data arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "hermsub",
    "source_code": "def hermsub(c1, c2):\n    return pu._sub(c1, c2)",
    "docstring": "Subtract one Hermite series from another. Returns the difference of two Hermite series - . The sequences of coefficients are from lowest order term to highest, i.e., [1,2,3] represents the series ``. Parameters ---------- c1, c2 : array_like 1-D arrays of Hermite series coefficients ordered from low to high. Returns ------- out : ndarray Of Hermite series coefficients representing their difference. See Also -------- hermadd, hermmulx, hermmul, hermdiv, hermpow Notes ----- Unlike multiplication, division, etc., the difference of two Hermite series is a Hermite series (without having to \"reproject\" the result onto the basis set) so subtraction, just like that of \"standard\" polynomials, is simply \"component-wise.\" Examples -------- >>> from numpy.polynomial.hermite import hermsub >>> hermsub([1, 2, 3, 4], [1, 2, 3]) array([0., 0., 0., 4.])",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\hermite.py",
    "ast_data": "FunctionDef name:hermsub arg:c1 arg:c2 arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, name, description, *labels):\n    super(IntGauge, self).__init__('IntGauge', _int_gauge_methods, len(labels), name, description, *labels)",
    "docstring": "Creates a new IntGauge. Args: name: name of the new metric. description: description of the new metric. *labels: The label list of the new metric.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:name arg:description arguments arg arg arg arg Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "InductorOutput",
    "source_code": "class InductorOutput(Generic[TOut], ABC):\n\n    @abstractmethod\n    def pre_save(self) -> None:\n        ...\n\n    @abstractmethod\n    def load(self, example_inputs) -> TOut:\n        ...\n\n    @abstractmethod\n    def post_compile(self, result: TOut, fx_config: _CompileFxKwargs) -> TOut:\n        ...",
    "docstring": "Class representing a single inductor output",
    "type": "class",
    "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\autograd_cache.py",
    "ast_data": "ClassDef name:InductorOutput FunctionDef name:pre_save arg:self arguments arg FunctionDef name:load arg:self arg:example_inputs arguments arg arg FunctionDef name:post_compile arg:self arg:result arg:fx_config arguments arg arg arg"
  },
  {
    "library": "pytorch",
    "name": "flags",
    "source_code": "@contextmanager\ndef flags(enabled=False):\n    with __allow_nonbracketed_mutation():\n        orig_flags = set_flags(enabled)\n    try:\n        yield\n    finally:\n        with __allow_nonbracketed_mutation():\n            set_flags(orig_flags[0])",
    "docstring": "Context manager for setting if nnpack is enabled globally",
    "type": "function",
    "file_path": "pytorch\\torch\\backends\\nnpack\\__init__.py",
    "ast_data": "FunctionDef name:flags arg:enabled arguments arg With Call Assign Call Try With Call Call"
  },
  {
    "library": "tensorflow",
    "name": "MatchingFilesDataset",
    "source_code": "class MatchingFilesDataset(dataset_ops.DatasetSource):\n\n    def __init__(self, patterns):\n        self._patterns = ops.convert_to_tensor(patterns, dtype=dtypes.string, name='patterns')\n        variant_tensor = ged_ops.matching_files_dataset(self._patterns)\n        super(MatchingFilesDataset, self).__init__(variant_tensor)\n\n    @property\n    def element_spec(self):\n        return tensor_spec.TensorSpec([], dtypes.string)",
    "docstring": "A that list the files according to the input patterns.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\matching_files.py",
    "ast_data": "ClassDef name:MatchingFilesDataset FunctionDef name:__init__ arg:self arg:patterns arguments arg arg Assign Call Assign Call Call Call FunctionDef name:element_spec arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_get_param_module_infos",
    "source_code": "def _get_param_module_infos(params: list[nn.Parameter], modules: tuple[nn.Module, ...]) -> list[ParamModuleInfo]:\n    params_set = set(params)\n    param_to_module_info: dict[nn.Parameter, ParamModuleInfo] = {}\n    for module in modules:\n        for _, submodule in module.named_modules(remove_duplicate=False):\n            for param_name, param in _named_parameters_with_duplicates(submodule, recurse=False):\n                if param in params_set:\n                    if param not in param_to_module_info:\n                        param_to_module_info[param] = ParamModuleInfo(submodule, param_name)\n                    else:\n                        param_to_module_info[param].shared_modules.append(submodule)\n                        param_to_module_info[param].shared_param_names.append(param_name)\n    if len(param_to_module_info) != len(params):\n        raise AssertionError(f'Some parameters are not in the module tree of {module}')\n    return [param_to_module_info[param] for param in params]",
    "docstring": "Shared parameter: lin1.weight = lin2.weight Shared module: mlp.lin1 = mlp.lin2 We do not remove duplicates when traversing both modules and parameters to find shared modules' parameters and shared parameters within a module.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fsdp_param_group.py",
    "ast_data": "FunctionDef name:_get_param_module_infos arg:params arg:modules arguments arg arg Assign Call For For Call For Call If Compare If Compare Assign Call Call Call If Compare Call Call Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_copy_nodes",
    "source_code": "def _get_copy_nodes(self):\n    copy_nodes = []\n    for node in self._node_inputs:\n        if is_copy_node(node):\n            copy_nodes.append(node)\n    return copy_nodes",
    "docstring": "Find all Copy nodes in the loaded graph.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_graphs.py",
    "ast_data": "FunctionDef name:_get_copy_nodes arg:self arguments arg Assign For If Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "sparsity_compute_fn",
    "source_code": "@staticmethod\ndef sparsity_compute_fn(s_0, s_f, t, t_0, dt, n, initially_zero=False):\n    if initially_zero and t < t_0:\n        return 0\n    s_t = s_f + (s_0 - s_f) * (1.0 - (t - t_0) / (dt * n)) ** 3\n    s_t = _clamp(s_t, s_0, s_f)\n    return s_t",
    "docstring": "\"Computes the current level of sparsity. Based on Args: s_0: Initial level of sparsity, :math: s_f: Target level of sparsity, :math: t: Current step, :math: t_0: Initial step, :math: dt: Pruning frequency, :math: n: Pruning steps, :math: initially_zero: Sets the level of sparsity to 0 before t_0. If False, sets to s_0 Returns: The sparsity level :math: at the current step :math:",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\pruning\\scheduler\\cubic_scheduler.py",
    "ast_data": "FunctionDef name:sparsity_compute_fn arg:s_0 arg:s_f arg:t arg:t_0 arg:dt arg:n arg:initially_zero arguments arg arg arg arg arg arg arg If BoolOp Compare Return return:yes Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "raw_generate",
    "source_code": "def raw_generate(output_file, source_dir, git_tag_override=None):\n    git_version = get_git_version(source_dir, git_tag_override)\n    write_version_info(output_file, git_version)",
    "docstring": "Simple generator used for cmake/make build systems. This does not create any symlinks. It requires the build system to build unconditionally. Args: output_file: Output filename for the version info cc source_dir: Base path of the source code git_tag_override: Override the value for the git tag. This is useful for releases where we want to build the release before the git tag is created.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\git\\gen_git_source.py",
    "ast_data": "FunctionDef name:raw_generate arg:output_file arg:source_dir arg:git_tag_override arguments arg arg arg Assign Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_decade_less_equal",
    "source_code": "def _decade_less_equal(x, base):\n    return x if x == 0 else -_decade_greater_equal(-x, base) if x < 0 else base ** np.floor(np.log(x) / np.log(base))",
    "docstring": "Return the largest integer power of *base* that's less or equal to *x*. If *x* is negative, the exponent will be *greater*.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:_decade_less_equal arg:x arg:base arguments arg arg Return return:yes Compare Compare Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_sort_or_argsort",
    "source_code": "def _sort_or_argsort(values, axis, direction, return_argsort):\n    if direction not in _SORT_IMPL:\n        valid_directions = ', '.join(sorted(_SORT_IMPL.keys()))\n        raise ValueError(f'Argument `direction` should be one of {valid_directions}. Received: direction={direction}')\n    axis = framework_ops.convert_to_tensor(axis, name='axis')\n    axis_static = tensor_util.constant_value(axis)\n    if axis.shape.ndims not in (None, 0) or axis_static is None:\n        raise ValueError(f'Argument `axis` must be a constant scalar. Received: axis={axis}.')\n    axis_static = int(axis_static)\n    values = framework_ops.convert_to_tensor(values, name='values')\n    return _SORT_IMPL[direction](values, axis_static, return_argsort)",
    "docstring": "Internal sort/argsort implementation. Args: values: The input values. axis: The axis along which to sort. direction: 'ASCENDING' or 'DESCENDING'. return_argsort: Whether to return the argsort result. Returns: Either the sorted values, or the indices of the sorted values in the original tensor. See the and docstrings. Raises: ValueError: If axis is not a constant scalar, or the direction is invalid.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\sort_ops.py",
    "ast_data": "FunctionDef name:_sort_or_argsort arg:values arg:axis arg:direction arg:return_argsort arguments arg arg arg arg If Compare Assign Call Call Call Raise Call Assign Call Assign Call If BoolOp Compare Compare Raise Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "tofile",
    "source_code": "def tofile(self, fid, sep='', format='%s'):\n    raise NotImplementedError('MaskedArray.tofile() not implemented yet.')",
    "docstring": "Save a masked array to a file in binary format. .. warning:: This function is not implemented yet. Raises ------ NotImplementedError When is called.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:tofile arg:self arg:fid arg:sep arg:format arguments arg arg arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "realize_as_comm_buffer",
    "source_code": "def realize_as_comm_buffer(x: ir.TensorBox, comm_buffer_type: ir.CommBufferType, group_name: str) -> None:\n    x.realize()\n    buffer = _get_data(x)\n    assert isinstance(buffer, ir.Buffer)\n    layout = buffer.get_output_spec()\n    if isinstance(layout, ir.CommBufferLayout):\n        return\n    if not isinstance(layout, ir.FlexibleLayout):\n        raise AssertionError(f'A buffer can only be realized as a comm buffer if it has `FlexibleLayout` (got {layout}).')\n    if is_symbolic(buffer.get_numel()):\n        raise AssertionError(f'A buffer with symbolic shape cannot be converted to a comm buffer (got {layout}).')\n    buffer.layout = ir.CommBufferLayout(layout=layout, comm_buffer_type=comm_buffer_type, group_name=group_name)",
    "docstring": "Realize an input as a comm buffer of the specified . Specifically, this realizes the underlying buffer if it's still unrealized and changes the layout of the buffer to .",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\comm_lowering.py",
    "ast_data": "FunctionDef name:realize_as_comm_buffer arg:x arg:comm_buffer_type arg:group_name arguments arg arg arg Call Assign Call Call Assign Call If Call Return return:no If Call Raise Call If Call Call Raise Call Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "_index_dvi_to_freetype",
    "source_code": "def _index_dvi_to_freetype(self, idx):\n    if self._encoding is None:\n        psfont = PsfontsMap(find_tex_file('pdftex.map'))[self.texname]\n        if psfont.filename is None:\n            raise ValueError('No usable font file found for {} ({}); the font may lack a Type-1 version'.format(psfont.psname.decode('ascii'), psfont.texname.decode('ascii')))\n        face = font_manager.get_font(psfont.filename)\n        if psfont.encoding:\n            self._encoding = [face.get_name_index(name) for name in _parse_enc(psfont.encoding)]\n        else:\n            self._encoding = face._get_type1_encoding_vector()\n    return self._encoding[idx]",
    "docstring": "Convert dvi glyph indices to FreeType ones.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\dviread.py",
    "ast_data": "FunctionDef name:_index_dvi_to_freetype arg:self arg:idx arguments arg arg If Compare Assign Call Call If Compare Raise Call Call Call Call Assign Call If Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "mark_non_differentiable",
    "source_code": "def mark_non_differentiable(self, *args: torch.Tensor):\n    self.non_differentiable = args",
    "docstring": "Mark outputs as non-differentiable. This should be called at most once, in either the :func: or :func: methods, and all arguments should be tensor outputs. This will mark outputs as not requiring gradients, increasing the efficiency of backward computation. You still need to accept a gradient for each output in :meth:, but it's always going to be a zero tensor with the same shape as the shape of a corresponding output. This is used e.g. for indices returned from a sort. See example:: >>> class Func(Function): >>> @staticmethod >>> def forward(ctx, x): >>> sorted, idx = x.sort() >>> ctx.mark_non_differentiable(idx) >>> ctx.save_for_backward(x, idx) >>> return sorted, idx >>> >>> @staticmethod >>> @once_differentiable >>> def backward(ctx, g1, g2): # still need to accept g2 >>> x, idx = ctx.saved_tensors >>> grad_input = torch.zeros_like(x) >>> grad_input.index_add_(0, idx, g1) >>> return grad_input",
    "type": "method",
    "file_path": "pytorch\\torch\\autograd\\function.py",
    "ast_data": "FunctionDef name:mark_non_differentiable arg:self arguments arg arg Assign"
  },
  {
    "library": "matplotlib",
    "name": "window_none",
    "source_code": "def window_none(x):\n    return x",
    "docstring": "No window function; simply return *x*. See Also -------- window_hanning : Another window algorithm.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\mlab.py",
    "ast_data": "FunctionDef name:window_none arg:x arguments arg Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "add_role",
    "source_code": "def add_role(self, name: str, role: Any, override: bool=False) -> None:\n    logger.debug('[app] adding role: %r', (name, role))\n    if not override and docutils.is_role_registered(name):\n        logger.warning(__('role %r is already registered and will not be overridden'), name, type='app', subtype='add_role')\n    docutils.register_role(name, role)",
    "docstring": "Register a Docutils role. :param name: The name of role :param role: A role function :param override: If false, do not install it if another role is already installed as the same name If true, unconditionally install the role. For more details about role functions, see __ . .. versionchanged:: 1.8 Add *override* keyword.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\application.py",
    "ast_data": "FunctionDef name:add_role arg:self arg:name arg:role arg:override arguments arg arg arg arg Call If BoolOp Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "IsCondSwitch",
    "source_code": "def IsCondSwitch(op):\n    if not IsSwitch(op):\n        return False\n    if not op.outputs:\n        return False\n    is_cond_switch = True\n    for o in op.outputs:\n        for c in o.consumers():\n            ctxt = c._get_control_flow_context()\n            if IsLoopEnter(c):\n                ctxt = ctxt.outer_context\n            is_cond_switch = is_cond_switch and (ctxt is not None and ctxt.IsCondContext())\n    return is_cond_switch",
    "docstring": "Return true if is the Switch for a conditional.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_util.py",
    "ast_data": "FunctionDef name:IsCondSwitch arg:op arguments arg If Call Return return:yes If Return return:yes Assign For For Call Assign Call If Call Assign Assign BoolOp BoolOp Compare Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_validate",
    "source_code": "def _validate(self) -> None:\n    if len(self._ndarray) and (not lib.is_string_array(self._ndarray, skipna=True)):\n        raise ValueError('StringArrayNumpySemantics requires a sequence of strings or NaN')\n    if self._ndarray.dtype != 'object':\n        raise ValueError(f\"StringArrayNumpySemantics requires a sequence of strings or NaN. Got '{self._ndarray.dtype}' dtype instead.\")",
    "docstring": "Validate that we only store NaN or strings.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\string_.py",
    "ast_data": "FunctionDef name:_validate arg:self arguments arg If BoolOp Call Call Raise Call If Compare Raise Call"
  },
  {
    "library": "django",
    "name": "is_aware",
    "source_code": "def is_aware(value):\n    return value.utcoffset() is not None",
    "docstring": "Determine if a given datetime.datetime is aware. The concept is defined in Python's docs: Assuming value.tzinfo is either None or a proper datetime.tzinfo, value.utcoffset() implements the appropriate logic.",
    "type": "function",
    "file_path": "django\\django\\utils\\timezone.py",
    "ast_data": "FunctionDef name:is_aware arg:value arguments arg Return return:yes Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "as_dict",
    "source_code": "def as_dict(self):\n    for k, v in zip(self._input_names, self._flattened_inputs):\n        yield (k, v)",
    "docstring": "An iterable over a dictionary version of inputs.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py",
    "ast_data": "FunctionDef name:as_dict arg:self arguments arg For Call"
  },
  {
    "library": "pytorch",
    "name": "MetadataKey",
    "source_code": "@dataclass(frozen=True)\nclass MetadataKey:\n    size: tuple[SymIntEqByExpr, ...]\n    layout: torch.layout\n    is_sparse: bool\n    stride: Optional[tuple[SymIntEqByExpr, ...]]\n    storage_offset: Optional[SymIntEqByExpr]\n    is_conj: bool\n    is_neg: bool\n\n    @staticmethod\n    def make(t):\n        is_sparse = is_sparse_any(t)\n        return MetadataKey(size=tuple((SymIntEqByExpr(s) for s in t.size())), layout=t.layout, is_sparse=is_sparse, stride=None if is_sparse else tuple((SymIntEqByExpr(s) for s in t.stride())), storage_offset=None if is_sparse else SymIntEqByExpr(t.storage_offset()), is_conj=t.is_conj(), is_neg=t.is_neg())",
    "docstring": "This should be equal whenever has_same_metadata would return True",
    "type": "class",
    "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\functional_utils.py",
    "ast_data": "ClassDef name:MetadataKey FunctionDef name:make arg:t arguments arg Assign Call Return return:yes Call Call Call Call Call Call Call Call Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "viewLim",
    "source_code": "@property\ndef viewLim(self):\n    self._unstale_viewLim()\n    return self._viewLim",
    "docstring": "The view limits as in data coordinates.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:viewLim arg:self arguments arg Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "tocoo",
    "source_code": "def tocoo(self, copy=True):\n    M, N = self.shape\n    R, C = self.blocksize\n    indptr_diff = np.diff(self.indptr)\n    if indptr_diff.dtype.itemsize > np.dtype(np.intp).itemsize:\n        indptr_diff_limited = indptr_diff.astype(np.intp)\n        if np.any(indptr_diff_limited != indptr_diff):\n            raise ValueError('Matrix too big to convert')\n        indptr_diff = indptr_diff_limited\n    idx_dtype = self._get_index_dtype(maxval=max(M, N))\n    row = (R * np.arange(M // R, dtype=idx_dtype)).repeat(indptr_diff)\n    row = row.repeat(R * C).reshape(-1, R, C)\n    row += np.tile(np.arange(R, dtype=idx_dtype).reshape(-1, 1), (1, C))\n    row = row.reshape(-1)\n    col = (C * self.indices).astype(idx_dtype, copy=False).repeat(R * C).reshape(-1, R, C)\n    col += np.tile(np.arange(C, dtype=idx_dtype), (R, 1))\n    col = col.reshape(-1)\n    data = self.data.reshape(-1)\n    if copy:\n        data = data.copy()\n    return self._coo_container((data, (row, col)), shape=self.shape)",
    "docstring": "Convert this array/matrix to COOrdinate format. When copy=False the data array will be shared between this array/matrix and the resultant coo_array/coo_matrix.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_bsr.py",
    "ast_data": "FunctionDef name:tocoo arg:self arg:copy arguments arg arg Assign Assign Assign Call If Compare Call Assign Call If Call Compare Raise Call Assign Assign Call Call Assign Call Call Assign Call Call Call Call Call Assign Call Assign Call Call Call Call Call Assign Call Assign Call If Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "printoptions",
    "source_code": "@contextlib.contextmanager\ndef printoptions(**kwargs):\n    old_kwargs = get_printoptions()\n    set_printoptions(**kwargs)\n    try:\n        yield\n    finally:\n        set_printoptions(**old_kwargs)",
    "docstring": "Context manager that temporarily changes the print options. Accepted arguments are same as :func:.",
    "type": "function",
    "file_path": "pytorch\\torch\\_tensor_str.py",
    "ast_data": "FunctionDef name:printoptions arguments arg Assign Call Call Try Call"
  },
  {
    "library": "numpy",
    "name": "ndincr",
    "source_code": "def ndincr(self):\n    warnings.warn('`ndindex.ndincr()` is deprecated, use `next(ndindex)` instead', DeprecationWarning, stacklevel=2)\n    next(self)",
    "docstring": "Increment the multi-dimensional index by one. This method is for backward compatibility only: do not use. .. deprecated:: 1.20.0 This method has been advised against since numpy 1.8.0, but only started emitting DeprecationWarning as of this version.",
    "type": "method",
    "file_path": "numpy\\numpy\\lib\\_index_tricks_impl.py",
    "ast_data": "FunctionDef name:ndincr arg:self arguments arg Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X):\n    check_is_fitted(self)\n    X = validate_data(self, X, reset=False)\n    ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors, return_distance=False)\n    weights = barycenter_weights(X, self.nbrs_._fit_X, ind, reg=self.reg)\n    X_new = np.empty((X.shape[0], self.n_components))\n    for i in range(X.shape[0]):\n        X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])\n    return X_new",
    "docstring": "Transform new points into embedding space. Parameters ---------- X : array-like of shape (n_samples, n_features) Training set. Returns ------- X_new : ndarray of shape (n_samples, n_components) Returns the instance itself. Notes ----- Because of scaling performed by this method, it is discouraged to use it together with methods that are not scale-invariant (like SVMs).",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\manifold\\_locally_linear.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Call Assign Call Assign Call Assign Call Assign Call For Call Assign Call Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "load_public",
    "source_code": "def load_public(self, data: memoryview) -> tuple[rsa.RSAPublicKey, memoryview]:\n    (e, n), data = self.get_public(data)\n    public_numbers = rsa.RSAPublicNumbers(e, n)\n    public_key = public_numbers.public_key()\n    return (public_key, data)",
    "docstring": "Make RSA public key from data.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\serialization\\ssh.py",
    "ast_data": "FunctionDef name:load_public arg:self arg:data arguments arg arg Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "fac_magnitude",
    "source_code": "@property\ndef fac_magnitude(self) -> float:\n    if self.scaling == 'magnitude':\n        return 1\n    if self._fac_mag is None:\n        self._fac_mag = 1 / abs(sum(self.win))\n    return self._fac_mag",
    "docstring": "Factor to multiply the STFT values by to scale each frequency slice to a magnitude spectrum. It is 1 if attribute `scale_to`. See Also -------- fac_psd: Scaling factor for to a power spectral density spectrum. scale_to: Scale window to obtain 'magnitude' or 'psd' scaling. scaling: Normalization applied to the window function. ShortTimeFFT: Class this property belongs to.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_short_time_fft.py",
    "ast_data": "FunctionDef name:fac_magnitude arg:self arguments arg If Compare Return return:yes If Compare Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "forward_backward",
    "source_code": "def forward_backward(self, num_doutputs=None):\n    if num_doutputs is None:\n        num_doutputs = self._num_inference_outputs\n    forward_backward = self._cached_function_pairs.get(num_doutputs)\n    if forward_backward is not None:\n        return forward_backward\n    forward, backward = self._construct_forward_backward(num_doutputs)\n    self._cached_function_pairs[num_doutputs] = (forward, backward)\n    return (forward, backward)",
    "docstring": "A possibly-cached pair of forward and backward functions.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py",
    "ast_data": "FunctionDef name:forward_backward arg:self arg:num_doutputs arguments arg arg If Compare Assign Assign Call If Compare Return return:yes Assign Call Assign Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "connect",
    "source_code": "def connect(self, event: str, callback: Callable[..., Any], priority: int=500) -> int:\n    listener_id = self.events.connect(event, callback, priority)\n    logger.debug('[app] connecting event %r (%d): %r [id=%s]', event, priority, callback, listener_id)\n    return listener_id",
    "docstring": "Register *callback* to be called when *event* is emitted. For details on available core events and the arguments of callback functions, please see :ref:. :param event: The name of target event :param callback: Callback function for the event :param priority: The priority of the callback. The callbacks will be invoked in order of *priority* (ascending). :return: A listener ID. It can be used for :meth:. .. versionchanged:: 3.0 Support *priority*",
    "type": "method",
    "file_path": "sphinx\\sphinx\\application.py",
    "ast_data": "FunctionDef name:connect arg:self arg:event arg:callback arg:priority arguments arg arg arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "temporarily_disable_optionals_in_gradients",
    "source_code": "@contextlib.contextmanager\ndef temporarily_disable_optionals_in_gradients():\n    previously_enabled = optionals_in_gradients_enabled()\n    try:\n        disable_optionals_in_gradients()\n        yield\n    finally:\n        if previously_enabled:\n            enable_optionals_in_gradients()",
    "docstring": "Temporarily disables generation of optionals in gradients. Should be a no-op if it is already disabled. Yields: None.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:temporarily_disable_optionals_in_gradients arguments Assign Call Try Call If Call"
  },
  {
    "library": "tensorflow",
    "name": "_no_dependency",
    "source_code": "def _no_dependency(self, value):\n    return data_structures.NoDependency(value)",
    "docstring": "Override to allow TrackableBase to disable dependency tracking.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\autotrackable.py",
    "ast_data": "FunctionDef name:_no_dependency arg:self arg:value arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_cb_parent_spans",
    "source_code": "def get_cb_parent_spans(cbax):\n    rowstart = np.inf\n    rowstop = -np.inf\n    colstart = np.inf\n    colstop = -np.inf\n    for parent in cbax._colorbar_info['parents']:\n        ss = parent.get_subplotspec()\n        rowstart = min(ss.rowspan.start, rowstart)\n        rowstop = max(ss.rowspan.stop, rowstop)\n        colstart = min(ss.colspan.start, colstart)\n        colstop = max(ss.colspan.stop, colstop)\n    rowspan = range(rowstart, rowstop)\n    colspan = range(colstart, colstop)\n    return (rowspan, colspan)",
    "docstring": "Figure out which subplotspecs this colorbar belongs to. Parameters ---------- cbax : Axes for the colorbar.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\_constrained_layout.py",
    "ast_data": "FunctionDef name:get_cb_parent_spans arg:cbax arguments arg Assign Assign Assign Assign For Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "python_function",
    "source_code": "@property\ndef python_function(self):\n    return self._python_function",
    "docstring": "The python function wrapped in this tf.function.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\polymorphic_function.py",
    "ast_data": "FunctionDef name:python_function arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_update_module_param",
    "source_code": "def _update_module_param(param_list: list[tuple[nn.Module, str, nn.Parameter]]):\n    for item in param_list:\n        parent_module, module_path, t = item\n        assert hasattr(parent_module, module_path)\n        delattr(parent_module, module_path)\n        setattr(parent_module, module_path, t)",
    "docstring": "Update parameters within the module",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\parallel\\ddp.py",
    "ast_data": "FunctionDef name:_update_module_param arg:param_list arguments arg For Assign Call Call Call"
  },
  {
    "library": "pandas",
    "name": "var",
    "source_code": "@deprecate_nonkeyword_arguments(version='4.0', allowed_args=['self'], name='var')\ndef var(self, axis: Axis | None=0, skipna: bool=True, ddof: int=1, numeric_only: bool=False, **kwargs) -> Series | Any:\n    result = super().var(axis=axis, skipna=skipna, ddof=ddof, numeric_only=numeric_only, **kwargs)\n    if isinstance(result, Series):\n        result = result.__finalize__(self, method='var')\n    return result",
    "docstring": "Return unbiased variance over requested axis. Normalized by N-1 by default. This can be changed using the ddof argument. Parameters ---------- axis : {index (0), columns (1)} For this parameter is unused and defaults to 0. .. warning:: The behavior of DataFrame.var with `` can be set to normalize by N instead of N-1: >>> df.var(ddof=0) age 264.687500 height 0.042275 dtype: float64",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:var arg:self arg:axis arg:skipna arg:ddof arg:numeric_only arguments arg arg arg arg arg arg Assign Call Call If Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_dtype_to_default_stata_fmt",
    "source_code": "def _dtype_to_default_stata_fmt(dtype: np.dtype, column: Series, dta_version: int=114, force_strl: bool=False) -> str:\n    if dta_version < 117:\n        max_str_len = 244\n    else:\n        max_str_len = 2045\n        if force_strl:\n            return '%9s'\n    if dtype.type is np.object_:\n        itemsize = max_len_string_array(ensure_object(column._values))\n        if itemsize > max_str_len:\n            if dta_version >= 117:\n                return '%9s'\n            else:\n                raise ValueError(excessive_string_length_error.format(column.name))\n        return '%' + str(max(itemsize, 1)) + 's'\n    elif dtype == np.float64:\n        return '%10.0g'\n    elif dtype == np.float32:\n        return '%9.0g'\n    elif dtype == np.int32:\n        return '%12.0g'\n    elif dtype in (np.int8, np.int16):\n        return '%8.0g'\n    else:\n        raise NotImplementedError(f'Data type {dtype} not supported.')",
    "docstring": "Map numpy dtype to stata's default format for this type. Not terribly important since users can change this in Stata. Semantics are object -> \"%DDs\" where DD is the length of the string. If not a string, raise ValueError float64 -> \"%10.0g\" float32 -> \"%9.0g\" int64 -> \"%9.0g\" int32 -> \"%12.0g\" int16 -> \"%8.0g\" int8 -> \"%8.0g\" strl -> \"%9s\"",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\stata.py",
    "ast_data": "FunctionDef name:_dtype_to_default_stata_fmt arg:dtype arg:column arg:dta_version arg:force_strl arguments arg arg arg arg If Compare Assign Assign If Return return:yes If Compare Assign Call Call If Compare If Compare Return return:yes Raise Call Call Return return:yes Call Call If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_get_in_backward_optimizers",
    "source_code": "def _get_in_backward_optimizers(module: torch.nn.Module) -> list[torch.optim.Optimizer]:\n    optims: list[torch.optim.Optimizer] = []\n    for param in module.parameters():\n        optims.extend(getattr(param, '_in_backward_optimizers', []))\n    return optims",
    "docstring": "Return a list of in-backward optimizers applied to `` methods called by the user and are intended to be used for things like checkpointing. Args: module: (torch.nn.Module): model to retrieve in-backward optimizers for Returns: List[torch.optim.Optimizer]: the in-backward optimizers. Example:: _apply_optimizer_in_backward(torch.optim.SGD, model.parameters(), {\"lr\": 0.01}) optims = _get_optimizers_in_backward(model)",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\optim\\apply_optimizer_in_backward.py",
    "ast_data": "FunctionDef name:_get_in_backward_optimizers arg:module arguments arg For Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "BypassFxGraphCache",
    "source_code": "class BypassFxGraphCache(Exception):\n    pass",
    "docstring": "Exception to indicate that the FxGraphCache should be bypassed.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\codecache.py",
    "ast_data": "ClassDef name:BypassFxGraphCache"
  },
  {
    "library": "pytorch",
    "name": "decompose_APoT",
    "source_code": "def decompose_APoT(self, x):\n    x = x[2:]\n    blocks = []\n    while x:\n        blocks.append(x[0:self.k])\n        x = x[self.k:]\n    return blocks",
    "docstring": "Decompose binary representation of APoT values into list of k-sized blocks Args: x (Tensor): binary representation of APoT quantized tensor",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\experimental\\linear.py",
    "ast_data": "FunctionDef name:decompose_APoT arg:self arg:x arguments arg arg Assign Assign While Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "type_check_node",
    "source_code": "def type_check_node(self, n: Node):\n    if n.type is None:\n        n.type = Dyn\n    if n.op == 'placeholder':\n        return n.type\n    elif n.op == 'get_attr':\n        t = get_parameter(self.traced, n.target)\n        if isinstance(t.data, torch.Tensor):\n            n.type = TensorType(t.data.shape)\n        return n.type\n    elif n.op == 'call_function':\n        if n.target == getattr:\n            assert getattr in _INFERENCE_RULES\n            return _INFERENCE_RULES[n.target](n, self.traced)\n        elif n.target in _INFERENCE_RULES:\n            return _INFERENCE_RULES[n.target](n)\n        else:\n            raise RuntimeError(f'No inference rule registered for target {n.target}!')\n    elif n.op == 'call_module':\n        module_instance = self.traced.get_submodule(n.target)\n        if type(module_instance) in _INFERENCE_RULES:\n            return _INFERENCE_RULES[type(module_instance)](n, module_instance)\n        else:\n            raise RuntimeError(f'No inference rule registered for class {type(module_instance)}!')\n    elif n.op == 'output':\n\n        def get_node_type(a):\n            return a.type\n        n.type = torch.fx.node.map_arg(n.args[0], get_node_type)\n        return n.type\n    else:\n        raise NotImplementedError(f'Method {n.op} not yet implemented')",
    "docstring": "Type check a given fx node. Current operations: - Reshape - Transpose - Add - Relu - conv2d - batchnorm2d - flatten - maxpool2d - adaptiveavgpool2d - linear",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\graph_gradual_typechecker.py",
    "ast_data": "FunctionDef name:type_check_node arg:self arg:n arguments arg arg If Compare Assign If Compare Return return:yes If Compare Assign Call If Call Assign Call Return return:yes If Compare If Compare Compare Return return:yes Call If Compare Return return:yes Call Raise Call If Compare Assign Call If Compare Call Return return:yes Call Call Raise Call Call If Compare FunctionDef name:get_node_type arg:a arguments arg Return return:yes Assign Call Return return:yes Raise Call"
  },
  {
    "library": "pytorch",
    "name": "get_original_value_of_constant",
    "source_code": "def get_original_value_of_constant(self, name: str) -> torch.Tensor:\n    assert name in self.allocated_constant_name and name in self.constants, 'Can not find the original value for ' + name\n    orig_name = get_cloned_parameter_buffer_name(self.allocated_constant_name[name])\n    return self.module.meta[orig_name] if orig_name in self.module.meta else self.constants[name]",
    "docstring": "In AOTI, module buffers may have been mutated during the tracing and compilation. Thus we need to read from previously stored original buffers, to make sure the generated model.so uses correct initial values.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\graph.py",
    "ast_data": "FunctionDef name:get_original_value_of_constant arg:self arg:name arguments arg arg BoolOp Compare Compare Assign Call Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "TPUUninitializedVariable",
    "source_code": "class TPUUninitializedVariable(resource_variable_ops.UninitializedVariable):\n\n    def read_value(self):\n        self._lazy_scope.initialize_all()\n        return super().read_value()\n\n    def assign_sub(self, delta, use_locking=None, name=None, read_value=True):\n        self._lazy_scope.initialize_all()\n        return super().assign_sub(delta, use_locking=use_locking, name=name, read_value=read_value)\n\n    def assign(self, value, use_locking=None, name=None, read_value=True):\n        self._lazy_scope.initialize_all()\n        return super().assign(value, use_locking=use_locking, name=name, read_value=read_value)\n\n    def assign_add(self, delta, use_locking=None, name=None, read_value=True):\n        self._lazy_scope.initialize_all()\n        return super().assign_add(delta, use_locking=use_locking, name=name, read_value=read_value)",
    "docstring": "UninitializedVariable component for TPU. Sometimes user might assign (different values) to a single component of a mirrored TPU variable. Thus we need to initialize_all when the assign* or read is invoked on a single component.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_util.py",
    "ast_data": "ClassDef name:TPUUninitializedVariable FunctionDef name:read_value arg:self arguments arg Call Return return:yes Call Call FunctionDef name:assign_sub arg:self arg:delta arg:use_locking arg:name arg:read_value arguments arg arg arg arg arg Call Return return:yes Call Call FunctionDef name:assign arg:self arg:value arg:use_locking arg:name arg:read_value arguments arg arg arg arg arg Call Return return:yes Call Call FunctionDef name:assign_add arg:self arg:delta arg:use_locking arg:name arg:read_value arguments arg arg arg arg arg Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "cond",
    "source_code": "def cond(self, name='cond'):\n    with self._name_scope(name):\n        return self._cond()",
    "docstring": "Returns the condition number of this linear operator. Args: name: A name for this . Returns: Shape of same as .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "FunctionDef name:cond arg:self arg:name arguments arg arg With Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "has_arg",
    "source_code": "def has_arg(fn, name, accept_all=False):\n    arg_spec = tf_inspect.getfullargspec(fn)\n    if accept_all and arg_spec.varkw is not None:\n        return True\n    return name in arg_spec.args or name in arg_spec.kwonlyargs",
    "docstring": "Checks if a callable accepts a given keyword argument. Args: fn: Callable to inspect. name: Check if can be called with as a keyword argument. accept_all: What to return if there is no parameter called but the function accepts a argument. Returns: bool, whether accepts a keyword argument.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\generic_utils.py",
    "ast_data": "FunctionDef name:has_arg arg:fn arg:name arg:accept_all arguments arg arg arg Assign Call If BoolOp Compare Return return:yes Return return:yes BoolOp Compare Compare"
  },
  {
    "library": "django",
    "name": "g",
    "source_code": "def g(self):\n    return self.data.hour % 12 or 12",
    "docstring": "Hour, 12-hour format without leading zeros; i.e. '1' to '12'",
    "type": "method",
    "file_path": "django\\django\\utils\\dateformat.py",
    "ast_data": "FunctionDef name:g arg:self arguments arg Return return:yes BoolOp"
  },
  {
    "library": "pytorch",
    "name": "figure_to_image",
    "source_code": "def figure_to_image(figures, close=True):\n    import matplotlib.pyplot as plt\n    import matplotlib.backends.backend_agg as plt_backend_agg\n\n    def render_to_rgb(figure):\n        canvas = plt_backend_agg.FigureCanvasAgg(figure)\n        canvas.draw()\n        data: npt.NDArray = np.frombuffer(canvas.buffer_rgba(), dtype=np.uint8)\n        w, h = figure.canvas.get_width_height()\n        image_hwc = data.reshape([h, w, 4])[:, :, 0:3]\n        image_chw = np.moveaxis(image_hwc, source=2, destination=0)\n        if close:\n            plt.close(figure)\n        return image_chw\n    if isinstance(figures, list):\n        images = [render_to_rgb(figure) for figure in figures]\n        return np.stack(images)\n    else:\n        image = render_to_rgb(figures)\n        return image",
    "docstring": "Render matplotlib figure to numpy format. Note that this requires the `` package. Args: figures (matplotlib.pyplot.figure or list of figures): figure or a list of figures close (bool): Flag to automatically close the figure Returns: numpy.array: image in [CHW] order",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\tensorboard\\_utils.py",
    "ast_data": "FunctionDef name:figure_to_image arg:figures arg:close arguments arg arg FunctionDef name:render_to_rgb arg:figure arguments arg Assign Call Call Call Call Assign Call Assign Call Assign Call If Call Return return:yes If Call Assign Call Return return:yes Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_table_list",
    "source_code": "def get_table_list(self, cursor):\n    cursor.execute(\"\\n            SELECT name, type FROM sqlite_master\\n            WHERE type in ('table', 'view') AND NOT name='sqlite_sequence'\\n            ORDER BY name\")\n    return [TableInfo(row[0], row[1][0]) for row in cursor.fetchall()]",
    "docstring": "Return a list of table and view names in the current database.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\sqlite3\\introspection.py",
    "ast_data": "FunctionDef name:get_table_list arg:self arg:cursor arguments arg arg Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "tree_leaves",
    "source_code": "def tree_leaves(tree: PyTree, is_leaf: Optional[Callable[[PyTree], bool]]=None) -> list[Any]:\n    return list(tree_iter(tree, is_leaf=is_leaf))",
    "docstring": "Get a list of leaves of a pytree.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_pytree.py",
    "ast_data": "FunctionDef name:tree_leaves arg:tree arg:is_leaf arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_Intermediate",
    "source_code": "class _Intermediate(_Node):\n\n    def convert_variable_to_constant(self, incoming_edge, tensor_data):\n        node = self.converted_self()\n        node.update_dtype('T', incoming_edge.destination.index, tensor_data.dtype)\n        if '_output_shapes' in node.node.attr:\n            del node.node.attr['_output_shapes']\n        for edge in self.outgoing_edges:\n            edge.destination.convertible.convert_variable_to_constant(edge, tensor_data)",
    "docstring": "Specialization of _Node to intermediate ops.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "ClassDef name:_Intermediate FunctionDef name:convert_variable_to_constant arg:self arg:incoming_edge arg:tensor_data arguments arg arg arg Assign Call Call If Compare For Call"
  },
  {
    "library": "pytorch",
    "name": "_flatten_sparse_tensors",
    "source_code": "def _flatten_sparse_tensors(tensors):\n    flat_indices = torch._C._nn.flatten_dense_tensors([torch.Tensor._indices(t) for t in tensors])\n    flat_values = torch._C._nn.flatten_dense_tensors([torch.Tensor._values(t) for t in tensors])\n    return (flat_indices, flat_values)",
    "docstring": "Flatten sparse tensors into two contiguous 1D buffers, one of indices and one of values. Assume tensors are of same sparse type. Args: tensors (Iterable[Tensor]): sparse tensors to flatten. Returns: A tuple of two contiguous 1D buffers, one containing input tensors' indices and the other containing the values.",
    "type": "function",
    "file_path": "pytorch\\torch\\_utils.py",
    "ast_data": "FunctionDef name:_flatten_sparse_tensors arg:tensors arguments arg Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "online",
    "source_code": "def online(self, engine: str='numba', engine_kwargs=None) -> OnlineExponentialMovingWindow:\n    return OnlineExponentialMovingWindow(obj=self.obj, com=self.com, span=self.span, halflife=self.halflife, alpha=self.alpha, min_periods=self.min_periods, adjust=self.adjust, ignore_na=self.ignore_na, times=self.times, engine=engine, engine_kwargs=engine_kwargs, selection=self._selection)",
    "docstring": "Return an `` and will be applied to the function Returns ------- OnlineExponentialMovingWindow",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\window\\ewm.py",
    "ast_data": "FunctionDef name:online arg:self arg:engine arg:engine_kwargs arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_column_name_with_class_name",
    "source_code": "def _column_name_with_class_name(fc):\n    return fc.__class__.__name__ + ':' + fc.name",
    "docstring": "Returns a unique name for the feature column used during deduping. Without this two FeatureColumns that have the same name and where one wraps the other, such as an IndicatorColumn wrapping a SequenceCategoricalColumn, will fail to deserialize because they will have the same name in columns_by_name, causing the wrong column to be returned. Args: fc: A FeatureColumn. Returns: A unique name as a string.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\serialization.py",
    "ast_data": "FunctionDef name:_column_name_with_class_name arg:fc arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "relate",
    "source_code": "def relate(self, other):\n    return capi.geos_relate(self.ptr, other.ptr).decode()",
    "docstring": "Return the DE-9IM intersection matrix for this Geometry and the other.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:relate arg:self arg:other arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "_valid_boxes",
    "source_code": "def _valid_boxes(self, boxes: Boxes | Tensor) -> Boxes:\n    if isinstance(boxes, Tensor):\n        KORNIA_CHECK_SHAPE(boxes.data, ['K', '4'])\n        boxes = Boxes(boxes, mode='xyxy')\n    if boxes.mode == 'xyxy':\n        boxes_xyxy = boxes\n    else:\n        boxes_xyxy = Boxes(boxes.to_tensor(mode='xyxy'), mode='xyxy')\n    return boxes_xyxy",
    "docstring": "Validate the boxes shape and ensure to be a Boxes into xyxy mode.",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\visual_prompter.py",
    "ast_data": "FunctionDef name:_valid_boxes arg:self arg:boxes arguments arg arg If Call Call Assign Call If Compare Assign Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "set_global_seed",
    "source_code": "def set_global_seed(seed):\n    context()._set_global_seed(seed)",
    "docstring": "Sets the eager mode seed.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:set_global_seed arg:seed arguments arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_function_def",
    "source_code": "def get_function_def(fname, graph):\n    if context.executing_eagerly():\n        if context.context().has_function(fname):\n            return context.context().get_function_def(fname)\n    else:\n        while graph is not None:\n            if graph._is_function(fname):\n                return graph._get_function(fname).cached_definition\n            graph = getattr(graph, 'outer_graph', None)",
    "docstring": "Gets a function definition with in the current context.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\function_def_to_graph.py",
    "ast_data": "FunctionDef name:get_function_def arg:fname arg:graph arguments arg arg If Call If Call Call Return return:yes Call Call While Compare If Call Return return:yes Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "sigmoid",
    "source_code": "@_onnx_symbolic('aten::sigmoid')\n@symbolic_helper.quantized_args(True, scale=1.0 / 256.0, zero_point=0)\ndef sigmoid(g: jit_utils.GraphContext, self):\n    return g.op('Sigmoid', self)",
    "docstring": "Converts the corresponding PyTorch function into ONNX operators. It is not meant to be called directly by a user. Args: g (jit_utils.GraphContext): Graph context. self (Tensor): the input tensor. Returns: ONNX operator",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\symbolic_opset9.py",
    "ast_data": "FunctionDef name:sigmoid arg:g arg:self arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "RasterParse",
    "source_code": "class RasterParse(NamedTuple):\n    ox: float\n    oy: float\n    width: float\n    height: float\n    depth: float\n    image: NDArray[np.uint8]",
    "docstring": "The namedtuple type returned by ``. Attributes ---------- ox, oy : float The offsets are always zero. width, height, depth : float The global metrics. image : 2D array of uint8 A raster image.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\_mathtext.py",
    "ast_data": "ClassDef name:RasterParse"
  },
  {
    "library": "sphinx",
    "name": "_is_node_in_substitution_definition",
    "source_code": "def _is_node_in_substitution_definition(node: nodes.Node) -> bool:\n    while node.parent:\n        if isinstance(node, nodes.substitution_definition):\n            return True\n        node = node.parent\n    return False",
    "docstring": "Check \"node\" to test if it is in a substitution definition.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\builders\\gettext.py",
    "ast_data": "FunctionDef name:_is_node_in_substitution_definition arg:node arguments arg While If Call Return return:yes Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "__ne__",
    "source_code": "def __ne__(self, other: object) -> ArrayLike:\n    return ~(self == other)",
    "docstring": "Return for (element-wise in-equality).",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\base.py",
    "ast_data": "FunctionDef name:__ne__ arg:self arg:other arguments arg arg Return return:yes Compare"
  },
  {
    "library": "scrapy",
    "name": "ScrapesContract",
    "source_code": "class ScrapesContract(Contract):\n    name = 'scrapes'\n\n    def post_process(self, output: list[Any]) -> None:\n        for x in output:\n            if is_item(x):\n                missing = [arg for arg in self.args if arg not in ItemAdapter(x)]\n                if missing:\n                    missing_fields = ', '.join(missing)\n                    raise ContractFail(f'Missing fields: {missing_fields}')",
    "docstring": "Contract to check presence of fields in scraped items @scrapes page_name page_body",
    "type": "class",
    "file_path": "scrapy\\scrapy\\contracts\\default.py",
    "ast_data": "ClassDef name:ScrapesContract Assign FunctionDef name:post_process arg:self arg:output arguments arg arg For If Call Assign Compare Call If Assign Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "get_parameter",
    "source_code": "def get_parameter(traced, target: str):\n    module_path, _, param_name = target.rpartition('.')\n    mod: torch.nn.Module = traced.get_submodule(module_path)\n    if not hasattr(mod, param_name):\n        raise AttributeError(mod._get_name() + ' has no attribute `' + param_name + '`')\n    param: torch.nn.Parameter = getattr(mod, param_name)\n    return param",
    "docstring": "Returns the parameter given by ``",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\graph_gradual_typechecker.py",
    "ast_data": "FunctionDef name:get_parameter arg:traced arg:target arguments arg arg Assign Call Call If Call Raise Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_extend_op",
    "source_code": "def _extend_op(values, leaf_op, empty_st_op=None):\n    if not isinstance(values, Sequence):\n        raise ValueError('Expected a list')\n    if not values:\n        raise ValueError('List cannot be empty')\n    if empty_st_op is None:\n        empty_st_op = empty_st_op_like_zeros(leaf_op)\n    value = values[0]\n    if isinstance(value, StructuredTensor):\n        empty_result = empty_st_op(values)\n        if not value.field_names():\n            return empty_result\n        new_fields = {}\n        for k in value.field_names():\n            new_fields[k] = _extend_op([v.field_value(k) for v in values], leaf_op, empty_st_op)\n        return StructuredTensor.from_fields(new_fields, shape=empty_result.shape)\n    else:\n        return leaf_op(values)",
    "docstring": "Extend an op from RaggedTensor and Tensor to StructuredTensor. Visits all children of the structured tensor, and children of children, applying leaf_op whenever it reaches a leaf, and empty_st_op whenever it reaches an internal node without children. Args: values: a list of structured tensors, ragged tensors, or tensors. All must have the same type. If they are structured tensors, they must have the same paths. leaf_op: an op for handling non-structured tensor. empty_st_op: op to create a structured tensor without fields. Returns: the result of the extended op (a StructuredTensor, RaggedTensor, or Tensor) Raises: ValueError: If values is not a Sequence or is empty.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_array_ops.py",
    "ast_data": "FunctionDef name:_extend_op arg:values arg:leaf_op arg:empty_st_op arguments arg arg arg If Call Raise Call If Raise Call If Compare Assign Call Assign If Call Assign Call If Call Return return:yes Assign For Call Assign Call Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "__rmul__",
    "source_code": "def __rmul__(self, other):\n    return multiply(other, self)",
    "docstring": "Multiply other by self, and return a new masked array.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:__rmul__ arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "UserPassesTestMixin",
    "source_code": "class UserPassesTestMixin(AccessMixin):\n\n    def test_func(self):\n        raise NotImplementedError('{} is missing the implementation of the test_func() method.'.format(self.__class__.__name__))\n\n    def get_test_func(self):\n        return self.test_func\n\n    def dispatch(self, request, *args, **kwargs):\n        user_test_result = self.get_test_func()()\n        if not user_test_result:\n            return self.handle_no_permission()\n        return super().dispatch(request, *args, **kwargs)",
    "docstring": "Deny a request with a permission error if the test_func() method returns False.",
    "type": "class",
    "file_path": "django\\django\\contrib\\auth\\mixins.py",
    "ast_data": "ClassDef name:UserPassesTestMixin FunctionDef name:test_func arg:self arguments arg Raise Call Call FunctionDef name:get_test_func arg:self arguments arg Return return:yes FunctionDef name:dispatch arg:self arg:request arguments arg arg arg arg Assign Call Call If Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "prepare_writing",
    "source_code": "def prepare_writing(self, docnames: Set[str]) -> None:\n    pass",
    "docstring": "A place where you can add logic before :meth: is run",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\__init__.py",
    "ast_data": "FunctionDef name:prepare_writing arg:self arg:docnames arguments arg arg"
  },
  {
    "library": "numpy",
    "name": "fill_diagonal",
    "source_code": "@array_function_dispatch(_fill_diagonal_dispatcher)\ndef fill_diagonal(a, val, wrap=False):\n    if a.ndim < 2:\n        raise ValueError('array must be at least 2-d')\n    end = None\n    if a.ndim == 2:\n        step = a.shape[1] + 1\n        if not wrap:\n            end = a.shape[1] * a.shape[1]\n    else:\n        if not np.all(diff(a.shape) == 0):\n            raise ValueError('All dimensions of input must be of equal length')\n        step = 1 + np.cumprod(a.shape[:-1]).sum()\n    a.flat[:end:step] = val",
    "docstring": "Fill the main diagonal of the given array of any dimensionality. For an array with `valvaldiag_indicesnumpy.flipudnumpy.fliplr`. >>> a = np.zeros((3, 3), int); >>> np.fill_diagonal(np.fliplr(a), [1,2,3]) # Horizontal flip >>> a array([[0, 0, 1], [0, 2, 0], [3, 0, 0]]) >>> np.fill_diagonal(np.flipud(a), [1,2,3]) # Vertical flip >>> a array([[0, 0, 3], [0, 2, 0], [1, 0, 0]]) Note that the order in which the diagonal is filled varies depending on the flip function.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_index_tricks_impl.py",
    "ast_data": "FunctionDef name:fill_diagonal arg:a arg:val arg:wrap arguments arg arg arg If Compare Raise Call Assign If Compare Assign If Assign If Call Compare Call Raise Call Assign Call Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "keys",
    "source_code": "@_copy_to_script_wrapper\ndef keys(self) -> Iterable[str]:\n    return self._modules.keys()",
    "docstring": "Return an iterable of the ModuleDict keys.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\container.py",
    "ast_data": "FunctionDef name:keys arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "always_record_summaries",
    "source_code": "def always_record_summaries():\n    return record_if(True)",
    "docstring": "Sets the should_record_summaries Tensor to always true.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "FunctionDef name:always_record_summaries arguments Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "__call__",
    "source_code": "def __call__(self, estimate_mode_type: str) -> Self:\n    if estimate_mode_type == 'operator-level-benchmark':\n        self._estimate = RuntimeEstimator._benchmark_estimate\n    elif estimate_mode_type == 'operator-level-cost-model':\n        self._estimate = RuntimeEstimator._roofline_estimate\n    else:\n        raise NotImplementedError(f'estimate_mode_type {estimate_mode_type} not supported')\n    self._estimate_mode_type = estimate_mode_type\n    return self",
    "docstring": "Sets the estimate mode type. Currently supported modes: - \"operator-level-benchmark\": Estimates runtime using operator benchmarking. - \"operator-level-cost-model\": Estimates runtime using roofline cost model. Args: estimate_mode_type (str): The type of estimate mode to use. Returns: RuntimeEstimator: The runtime estimator instance. Raises: NotImplementedError: If the estimate mode type is not supported.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_tools\\runtime_estimator.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:estimate_mode_type arguments arg arg If Compare Assign If Compare Assign Raise Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "div",
    "source_code": "@tf_export(v1=['div'])\n@dispatch.register_binary_elementwise_api\n@dispatch.add_dispatch_support\n@deprecation.deprecated(date=None, instructions='Deprecated in favor of operator or tf.math.divide.')\ndef div(x, y, name=None):\n    return _div_python2(x, y, name)",
    "docstring": "Divides x / y elementwise (using Python 2 division operator semantics). @compatibility(TF2) This function is deprecated in TF2. Prefer using the Tensor division operator, , or , which obey the Python 3 division operator semantics. @end_compatibility This function divides and , forcing Python 2 semantics. That is, if and are both integers then the result will be an integer. This is in contrast to Python 3, where division with is always a float while division with is always an integer. Args: x: numerator of real numeric type. y: denominator of real numeric type. name: A name for the operation (optional). Returns: returns the quotient of x and y.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:div arg:x arg:y arg:name arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "register_module_backward_hook",
    "source_code": "def register_module_backward_hook(hook: Callable[['Module', _grad_t, _grad_t], Union[None, _grad_t]]) -> RemovableHandle:\n    global _global_is_full_backward_hook\n    if _global_is_full_backward_hook is True:\n        raise RuntimeError('Cannot use both regular backward hooks and full backward hooks as a global Module hook. Please use only one of them.')\n    _global_is_full_backward_hook = False\n    handle = RemovableHandle(_global_backward_hooks)\n    _global_backward_hooks[handle.id] = hook\n    return handle",
    "docstring": "Register a backward hook common to all the modules. This function is deprecated in favor of :func: and the behavior of this function will change in future versions. Returns: :class:: a handle that can be used to remove the added hook by calling ``",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:register_module_backward_hook arg:hook arguments arg If Compare Raise Call Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "KORNIA_CHECK_IS_GRAY",
    "source_code": "def KORNIA_CHECK_IS_GRAY(x: Tensor, msg: Optional[str]=None, raises: bool=True) -> bool:\n    if len(x.shape) < 2 or (len(x.shape) >= 3 and x.shape[-3] != 1):\n        if raises:\n            raise TypeError(f'Not a gray tensor. Got: {type(x)}.\\n{msg}')\n        return False\n    return True",
    "docstring": "Check whether an image tensor is grayscale. Args: x: image tensor to evaluate. msg: message to show in the exception. raises: bool indicating whether an exception should be raised upon failure. Raises: TypeException: if the tensor has not a shape :math: or :math: and raises is True. Example: >>> img = torch.rand(2, 1, 4, 4) >>> KORNIA_CHECK_IS_GRAY(img, \"Image is not grayscale\") True",
    "type": "function",
    "file_path": "kornia\\kornia\\core\\check.py",
    "ast_data": "FunctionDef name:KORNIA_CHECK_IS_GRAY arg:x arg:msg arg:raises arguments arg arg arg If BoolOp Compare Call BoolOp Compare Call Compare If Raise Call Call Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "_output_type_handler",
    "source_code": "@staticmethod\ndef _output_type_handler(cursor, name, defaultType, length, precision, scale):\n    if defaultType == Database.NUMBER:\n        if scale == -127:\n            if precision == 0:\n                outconverter = FormatStylePlaceholderCursor._output_number_converter\n            else:\n                outconverter = float\n        elif precision > 0:\n            outconverter = FormatStylePlaceholderCursor._get_decimal_converter(precision, scale)\n        else:\n            outconverter = FormatStylePlaceholderCursor._output_number_converter\n        return cursor.var(Database.STRING, size=255, arraysize=cursor.arraysize, outconverter=outconverter)\n    elif defaultType == Database.DB_TYPE_NCLOB:\n        return cursor.var(Database.DB_TYPE_NCLOB, arraysize=cursor.arraysize)",
    "docstring": "Called for each db column fetched from cursors. Return numbers as the appropriate Python type, and NCLOB with JSON as strings.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\oracle\\base.py",
    "ast_data": "FunctionDef name:_output_type_handler arg:cursor arg:name arg:defaultType arg:length arg:precision arg:scale arguments arg arg arg arg arg arg If Compare If Compare If Compare Assign Assign If Compare Assign Call Assign Return return:yes Call If Compare Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_bounds",
    "source_code": "def get_bounds(self):\n    return self._bounds",
    "docstring": "Get the bounds of the spine.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\spines.py",
    "ast_data": "FunctionDef name:get_bounds arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "in_train_phase",
    "source_code": "@doc_controls.do_not_generate_docs\ndef in_train_phase(x, alt, training=None):\n    from tensorflow.python.keras.engine import base_layer_utils\n    if training is None:\n        training = base_layer_utils.call_context().training\n    if training is None:\n        training = learning_phase()\n    if not tensor_util.is_tf_type(training):\n        if training == 1 or training is True:\n            if callable(x):\n                return x()\n            else:\n                return x\n        elif training == 0 or training is False:\n            if callable(alt):\n                return alt()\n            else:\n                return alt\n    x = switch(training, x, alt)\n    return x",
    "docstring": "Selects in train phase, and otherwise. Note that should have the *same shape* as . Args: x: What to return in train phase (tensor or callable that returns a tensor). alt: What to return otherwise (tensor or callable that returns a tensor). training: Optional scalar tensor (or Python boolean, or Python integer) specifying the learning phase. Returns: Either or based on the flag. the flag defaults to .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:in_train_phase arg:x arg:alt arg:training arguments arg arg arg If Compare Assign Call If Compare Assign Call If Call If BoolOp Compare Compare If Call Return return:yes Call Return return:yes If BoolOp Compare Compare If Call Return return:yes Call Return return:yes Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_static_event_size",
    "source_code": "def _static_event_size(x):\n    return tensor_shape.dimension_value(x.shape.with_rank_at_least(1)[-1])",
    "docstring": "Returns the static size of a specific dimension or .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\util.py",
    "ast_data": "FunctionDef name:_static_event_size arg:x arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "concat",
    "source_code": "def concat(self, name=None):\n    return self._implementation.concat(name=name)",
    "docstring": "Return the values in the TensorArray as a concatenated . All of the values must have been written, their ranks must match, and and their shapes must all match for all dimensions except the first. Args: name: A name for the operation (optional). Returns: All the tensors in the TensorArray concatenated into one tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:concat arg:self arg:name arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "AutogradEngineVariable",
    "source_code": "class AutogradEngineVariable(UserDefinedObjectVariable):\n\n    def __init__(self, value, value_type=None, **kwargs) -> None:\n        super().__init__(value=value, value_type=value_type, **kwargs)\n\n    def call_method(self, tx: 'InstructionTranslator', name, args: 'list[VariableTracker]', kwargs: 'dict[str, VariableTracker]') -> 'VariableTracker':\n        if name == 'queue_callback':\n            if torch._dynamo.compiled_autograd.in_compiled_autograd_region:\n                assert tx.one_graph, 'queue_callback() is only supported when Compiled Autograd is enabled with fullgraph=True'\n                return variables.UserFunctionVariable(torch._dynamo.external_utils.FakeCompiledAutogradEngine.queue_callback, source=self.source).call_function(tx, (tx.output.side_effects.get_ca_final_callbacks_var(), *args), kwargs)\n            else:\n                unimplemented_v2(gb_type='Unsupported torch._C._ImperativeEngine.queue_callback()', context=f'call_method {self} {name}', explanation='queue_callback() is only supported when Compiled Autograd is enabled with fullgraph=True.', hints=[])\n        else:\n            unimplemented_v2(gb_type='Unsupported torch._C._ImperativeEngine method', context=f'call_method {self} {name}', explanation=f'Dynamo only supports the `queue_callback` method on a torch._C._ImperativeEngine instance, but found: `{name}`.', hints=[])",
    "docstring": "Represents a torch._C._ImperativeEngine instance.",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\misc.py",
    "ast_data": "ClassDef name:AutogradEngineVariable FunctionDef name:__init__ arg:self arg:value arg:value_type arguments arg arg arg arg Call Call FunctionDef name:call_method arg:self arg:tx arg:name arg:args arg:kwargs arguments arg arg arg arg arg If Compare If Return return:yes Call Call Call Call Call"
  },
  {
    "library": "pygame",
    "name": "wait",
    "source_code": "def wait():\n    _ft_init_check()\n    return pygame.event.wait()",
    "docstring": "wait() -> Event wait for an event",
    "type": "function",
    "file_path": "pygame\\src_py\\fastevent.py",
    "ast_data": "FunctionDef name:wait arguments Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "__ge__",
    "source_code": "def __ge__(self, other):\n    return greater_equal(self, other)",
    "docstring": "Return (self >= other) element-wise. See Also -------- greater_equal",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:__ge__ arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_get_value",
    "source_code": "def _get_value(self, label, takeable: bool=False):\n    if takeable:\n        return self._values[label]\n    loc = self.index.get_loc(label)\n    if is_integer(loc):\n        return self._values[loc]\n    if isinstance(self.index, MultiIndex):\n        mi = self.index\n        new_values = self._values[loc]\n        if len(new_values) == 1 and mi.nlevels == 1:\n            return new_values[0]\n        new_index = mi[loc]\n        new_index = maybe_droplevels(new_index, label)\n        new_ser = self._constructor(new_values, index=new_index, name=self.name, copy=False)\n        if isinstance(loc, slice):\n            new_ser._mgr.add_references(self._mgr)\n        return new_ser.__finalize__(self)\n    else:\n        return self.iloc[loc]",
    "docstring": "Quickly retrieve single value at passed index label. Parameters ---------- label : object takeable : interpret the index as indexers, default False Returns ------- scalar value",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:_get_value arg:self arg:label arg:takeable arguments arg arg arg If Return return:yes Assign Call If Call Return return:yes If Call Assign Assign If BoolOp Compare Call Compare Return return:yes Assign Assign Call Assign Call If Call Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "compute_mask",
    "source_code": "@generic_utils.default\ndef compute_mask(self, inputs, mask=None):\n    if not self.supports_masking:\n        if any((m is not None for m in nest.flatten(mask))):\n            raise TypeError('Layer ' + self.name + ' does not support masking, but was passed an input_mask: ' + str(mask))\n        return None\n    return mask",
    "docstring": "Computes an output mask tensor. Args: inputs: Tensor or list of tensors. mask: Tensor or list of tensors. Returns: None or a tensor (or list of tensors, one per output tensor of the layer).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py",
    "ast_data": "FunctionDef name:compute_mask arg:self arg:inputs arg:mask arguments arg arg arg If If Call Compare Call Raise Call Call Return return:no Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "register",
    "source_code": "def register(name):\n    if not isinstance(name, str):\n        raise TypeError('Expected `name` to be a string; got %r' % (name,))\n    if not _REGISTERED_NAME_RE.match(name):\n        raise ValueError(\"Registered name must have the form '{project_name}.{type_name}' (e.g. 'my_project.MyTypeSpec'); got %r.\" % name)\n\n    def decorator_fn(cls):\n        if not (isinstance(cls, type) and issubclass(cls, internal.TypeSpec)):\n            raise TypeError('Expected `cls` to be a TypeSpec; got %r' % (cls,))\n        if cls in _TYPE_SPEC_TO_NAME:\n            raise ValueError('Class %s.%s has already been registered with name %s.' % (cls.__module__, cls.__name__, _TYPE_SPEC_TO_NAME[cls]))\n        if name in _NAME_TO_TYPE_SPEC:\n            raise ValueError('Name %s has already been registered for class %s.%s.' % (name, _NAME_TO_TYPE_SPEC[name].__module__, _NAME_TO_TYPE_SPEC[name].__name__))\n        _TYPE_SPEC_TO_NAME[cls] = name\n        _NAME_TO_TYPE_SPEC[name] = cls\n        return cls\n    return decorator_fn",
    "docstring": "Decorator used to register a globally unique name for a TypeSpec subclass. Args: name: The name of the type spec. Must be globally unique. Must have the form . E.g. . Returns: A class decorator that registers the decorated class with the given name.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec_registry.py",
    "ast_data": "FunctionDef name:register arg:name arguments arg If Call Raise Call If Call Raise Call FunctionDef name:decorator_fn arg:cls arguments arg If BoolOp Call Call Raise Call If Compare Raise Call If Compare Raise Call Assign Assign Return return:yes Return return:yes"
  },
  {
    "library": "scipy",
    "name": "todense",
    "source_code": "def todense(self, order=None, out=None):\n    return super().todense(order, out)",
    "docstring": "Return a dense representation of this sparse matrix. Parameters ---------- order : {'C', 'F'}, optional Whether to store multi-dimensional data in C (row-major) or Fortran (column-major) order in memory. The default is 'None', which provides no ordering guarantees. Cannot be specified in conjunction with the argument. out : ndarray, 2-D, optional If specified, uses this array (or ) as the output buffer instead of allocating a new array to return. The provided array must have the same shape and dtype as the sparse matrix on which you are calling the method. Returns ------- arr : numpy.matrix, 2-D A NumPy matrix object with the same shape and containing the same data represented by the sparse matrix, with the requested memory order. If was passed and was an array (rather than a ), it will be filled with the appropriate values and returned wrapped in a object that shares the same memory.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_matrix.py",
    "ast_data": "FunctionDef name:todense arg:self arg:order arg:out arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "add_deferred_loading",
    "source_code": "def add_deferred_loading(self, field_names):\n    existing, defer = self.deferred_loading\n    if defer:\n        self.deferred_loading = (existing.union(field_names), True)\n    elif (new_existing := existing.difference(field_names)):\n        self.deferred_loading = (new_existing, False)\n    else:\n        self.clear_deferred_loading()\n        if (new_only := set(field_names).difference(existing)):\n            self.deferred_loading = (new_only, True)",
    "docstring": "Add the given list of model field names to the set of fields to exclude from loading from the database when automatic column selection is done. Add the new field names to any existing field names that are deferred (or removed from any existing field names that are marked as the only ones for immediate loading).",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\query.py",
    "ast_data": "FunctionDef name:add_deferred_loading arg:self arg:field_names arguments arg arg Assign If Assign Call If Call Assign Call If Call Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "get_figheight",
    "source_code": "def get_figheight(self):\n    return self.bbox_inches.height",
    "docstring": "Return the figure height in inches.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:get_figheight arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "scale",
    "source_code": "def scale(self, sx, sy=None):\n    if sy is None:\n        sy = sx\n    self._mtx[0, 0] *= sx\n    self._mtx[0, 1] *= sx\n    self._mtx[0, 2] *= sx\n    self._mtx[1, 0] *= sy\n    self._mtx[1, 1] *= sy\n    self._mtx[1, 2] *= sy\n    self.invalidate()\n    return self",
    "docstring": "Add a scale in place. If *sy* is None, the same scale is applied in both the *x*- and *y*-directions. Returns *self*, so this method can easily be chained with more calls to :meth:, :meth:, :meth: and :meth:.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:scale arg:self arg:sx arg:sy arguments arg arg arg If Compare Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_count_initializer_size",
    "source_code": "def _count_initializer_size(graph: ir.Graph) -> int:\n    return sum((v.const_value.nbytes for v in graph.initializers.values() if v.const_value is not None))",
    "docstring": "Count the total size of the initializers in bytes.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_onnx_program.py",
    "ast_data": "FunctionDef name:_count_initializer_size arg:graph arguments arg Return return:yes Call Call Compare"
  },
  {
    "library": "numpy",
    "name": "_compute_hash",
    "source_code": "def _compute_hash(idirs, hashfunc):\n    released = paver.path.path(idirs).listdir()\n    checksums = []\n    for fpath in sorted(released):\n        with open(fpath, 'rb') as fin:\n            fhash = hashfunc(fin.read())\n            checksums.append(f'{fhash.hexdigest()}  {os.path.basename(fpath)}')\n    return checksums",
    "docstring": "Hash files using given hashfunc. Parameters ---------- idirs : directory path Directory containing files to be hashed. hashfunc : hash function Function to be used to hash the files.",
    "type": "function",
    "file_path": "numpy\\pavement.py",
    "ast_data": "FunctionDef name:_compute_hash arg:idirs arg:hashfunc arguments arg arg Assign Call Call Assign For Call With Call Assign Call Call Call Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "grayscale_to_rgb",
    "source_code": "def grayscale_to_rgb(image: Tensor) -> Tensor:\n    KORNIA_CHECK_IS_TENSOR(image)\n    if len(image.shape) < 3 or image.shape[-3] != 1:\n        raise ValueError(f'Input size must have a shape of (*, 1, H, W). Got {image.shape}.')\n    return concatenate([image, image, image], -3)",
    "docstring": "Convert a grayscale image to RGB version of image. .. image:: _static/img/grayscale_to_rgb.png The image data is assumed to be in the range of (0, 1). Args: image: grayscale image tensor to be converted to RGB with shape :math:. Returns: RGB version of the image with shape :math:. Example: >>> input = torch.randn(2, 1, 4, 5) >>> gray = grayscale_to_rgb(input) # 2x3x4x5",
    "type": "function",
    "file_path": "kornia\\kornia\\color\\gray.py",
    "ast_data": "FunctionDef name:grayscale_to_rgb arg:image arguments arg Call If BoolOp Compare Call Compare Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_set_diag_operators",
    "source_code": "def _set_diag_operators(self, diag_update, is_diag_update_positive):\n    if diag_update is not None:\n        self._diag_operator = linear_operator_diag.LinearOperatorDiag(self._diag_update, is_positive_definite=is_diag_update_positive)\n    else:\n        if tensor_shape.dimension_value(self.u.shape[-1]) is not None:\n            r = tensor_shape.dimension_value(self.u.shape[-1])\n        else:\n            r = array_ops.shape(self.u)[-1]\n        self._diag_operator = linear_operator_identity.LinearOperatorIdentity(num_rows=r, dtype=self.dtype)",
    "docstring": "Set attributes self._diag_update and self._diag_operator.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_low_rank_update.py",
    "ast_data": "FunctionDef name:_set_diag_operators arg:self arg:diag_update arg:is_diag_update_positive arguments arg arg arg If Compare Assign Call If Compare Call Assign Call Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "VarLenFeature",
    "source_code": "@tf_export('io.VarLenFeature', v1=['VarLenFeature', 'io.VarLenFeature'])\nclass VarLenFeature(collections.namedtuple('VarLenFeature', ['dtype'])):\n    pass",
    "docstring": "Configuration for parsing a variable-length input feature. Fields: dtype: Data type of input.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parsing_config.py",
    "ast_data": "ClassDef name:VarLenFeature Call Call"
  },
  {
    "library": "tensorflow",
    "name": "start_queue_runners",
    "source_code": "def start_queue_runners(self, sess, queue_runners=None):\n    if context.executing_eagerly():\n        raise RuntimeError('Queues are not compatible with eager execution.')\n    if queue_runners is None:\n        queue_runners = self._graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS)\n    threads = []\n    for qr in queue_runners:\n        threads.extend(qr.create_threads(sess, coord=self._coord, daemon=True, start=True))\n    return threads",
    "docstring": "Start threads for . Note that the queue runners collected in the graph key are already started automatically when you create a session with the supervisor, so unless you have non-collected queue runners to start you do not need to call this explicitly. Args: sess: A . queue_runners: A list of . If not specified, we'll use the list of queue runners gathered in the graph under the key . Returns: The list of threads started for the . Raises: RuntimeError: If called with eager execution enabled. @compatibility(eager) Queues are not compatible with eager execution. To ingest data when eager execution is enabled, use the API. @end_compatibility",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py",
    "ast_data": "FunctionDef name:start_queue_runners arg:self arg:sess arg:queue_runners arguments arg arg arg If Call Raise Call If Compare Assign Call Assign For Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_unused_constant",
    "source_code": "def _unused_constant(node: torch.fx.Node) -> Optional[list[torch.fx.Node]]:\n    if len(node.users) > 1:\n        return None\n    lift_fresh_node = next(iter(node.users.keys()))\n    if not (lift_fresh_node.op == 'call_function' and lift_fresh_node.target in (torch.ops.aten.lift_fresh.default, torch.ops.aten.lift_fresh_copy.default)):\n        return None\n    if len(lift_fresh_node.users) > 1:\n        return None\n    detach_node = next(iter(lift_fresh_node.users.keys()))\n    if not (detach_node.op == 'call_function' and detach_node.target in (torch.ops.aten.detach_.default, torch.ops.aten.detach.default)):\n        return None\n    if len(detach_node.users) > 0:\n        return None\n    else:\n        return [detach_node, lift_fresh_node, node]",
    "docstring": "If there is a tensor constant created while tracing, here is how the graph looks like: %_tensor_constant0 : [num_users=1] = get_attr[target=_tensor_constant0] %lift_fresh_copy : ) %detach_ : ) To check to see if the tensor constant is being used, we want to traverse to the detach node to see if it's actually being used. This function returns None if this constant is being used, otherwise it returns the lift_fresh and detach node to be removed later.",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\passes\\lift_constants_pass.py",
    "ast_data": "FunctionDef name:_unused_constant arg:node arguments arg If Compare Call Return return:no Assign Call Call Call If BoolOp Compare Compare Return return:no If Compare Call Return return:no Assign Call Call Call If BoolOp Compare Compare Return return:no If Compare Call Return return:no Return return:yes"
  },
  {
    "library": "numpy",
    "name": "copy",
    "source_code": "def copy(self):\n    return self.__class__(self.coef, self.domain, self.window, self.symbol)",
    "docstring": "Return a copy. Returns ------- new_series : series Copy of self.",
    "type": "method",
    "file_path": "numpy\\numpy\\polynomial\\_polybase.py",
    "ast_data": "FunctionDef name:copy arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "sharded_type_as",
    "source_code": "def sharded_type_as(args, kwargs, pg):\n    st = args[0]\n    tensor = args[1]\n    if isinstance(tensor, ShardedTensor):\n        tensor = tensor.local_tensor()\n    new_local_shards = [Shard(shard.tensor.type_as(tensor), shard.metadata) for shard in st.local_shards()]\n    st_meta = copy.deepcopy(st._metadata)\n    st_meta.tensor_properties.dtype = tensor.dtype\n    return (new_local_shards, st_meta)",
    "docstring": "Handles ``. Return: new_local_shards (List[Shard]): Local shards for the new sharded tensor. st_meta (ShardedTensorMetadata): Metadata of the new sharded tensor.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\_ops\\tensor_ops.py",
    "ast_data": "FunctionDef name:sharded_type_as arg:args arg:kwargs arg:pg arguments arg arg arg Assign Assign If Call Assign Call Assign Call Call Call Assign Call Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "array",
    "source_code": "def array(data, dtype=None, copy=False, order=None, mask=nomask, fill_value=None, keep_mask=True, hard_mask=False, shrink=True, subok=True, ndmin=0):\n    return MaskedArray(data, mask=mask, dtype=dtype, copy=copy, subok=subok, keep_mask=keep_mask, hard_mask=hard_mask, fill_value=fill_value, ndmin=ndmin, shrink=shrink, order=order)",
    "docstring": "Shortcut to MaskedArray. The options are in a different order for convenience and backwards compatibility.",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:array arg:data arg:dtype arg:copy arg:order arg:mask arg:fill_value arg:keep_mask arg:hard_mask arg:shrink arg:subok arg:ndmin arguments arg arg arg arg arg arg arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "cluster_resolver",
    "source_code": "@property\ndef cluster_resolver(self):\n    return self.extended._cluster_resolver",
    "docstring": "Returns the cluster resolver associated with this strategy. As a multi-worker strategy, provides the associated . If the user provides one in , that instance is returned; if the user does not, a default is provided.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\collective_all_reduce_strategy.py",
    "ast_data": "FunctionDef name:cluster_resolver arg:self arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "write_release",
    "source_code": "@task\ndef write_release(options):\n    rdir = options.installers.releasedir\n    write_release_task(options, os.path.join(rdir, 'README'))",
    "docstring": "Write the README files. Two README files are generated from the release notes, one in `` decorator.",
    "type": "function",
    "file_path": "numpy\\pavement.py",
    "ast_data": "FunctionDef name:write_release arg:options arguments arg Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "aten_add",
    "source_code": "@onnx_impl((aten.add.Tensor, aten.add.Scalar, operator.add), trace_only=True)\ndef aten_add(self: TReal, other: TReal, alpha: float=1.0) -> TReal:\n    if alpha != 1.0:\n        alpha = op.CastLike(alpha, other)\n        other = op.Mul(other, alpha)\n    return op.Add(self, other)",
    "docstring": "add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_torchlib\\ops\\core.py",
    "ast_data": "FunctionDef name:aten_add arg:self arg:other arg:alpha arguments arg arg arg If Compare Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "get_storer",
    "source_code": "def get_storer(self, key: str) -> GenericFixed | Table:\n    group = self.get_node(key)\n    if group is None:\n        raise KeyError(f'No object named {key} in the file')\n    s = self._create_storer(group)\n    s.infer_axes()\n    return s",
    "docstring": "return the storer object for a key, raise if not in the file",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:get_storer arg:self arg:key arguments arg arg Assign Call If Compare Raise Call Assign Call Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "find_tags",
    "source_code": "def find_tags(self) -> dict[str, tuple[str, int, int]]:\n    self.analyze()\n    return self.tags",
    "docstring": "Find class, function and method definitions and their location.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\pycode\\__init__.py",
    "ast_data": "FunctionDef name:find_tags arg:self arguments arg Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "check_function_argument_count",
    "source_code": "def check_function_argument_count(func, input_arity, infeed_queue):\n\n    def format_error(complaint, quantity):\n        return '%s %d argument%s' % (complaint, quantity, '' if quantity == 1 else 's')\n    num_args_supplied = input_arity\n    if infeed_queue is not None:\n        num_args_supplied += infeed_queue.number_of_tuple_elements\n    arg_spec = tf_inspect.getargspec(func)\n    num_func_args = len(arg_spec.args)\n    if arg_spec.defaults is None:\n        num_func_defaults = 0\n    else:\n        num_func_defaults = len(arg_spec.defaults)\n    min_func_args = num_func_args - num_func_defaults\n    if num_args_supplied < min_func_args:\n        if num_func_defaults == 0 and arg_spec.varargs is None:\n            return format_error('exactly', num_func_args)\n        else:\n            return format_error('at least', min_func_args)\n    if arg_spec.varargs is None and num_args_supplied > num_func_args:\n        if num_func_defaults == 0:\n            return format_error('exactly', num_func_args)\n        else:\n            return format_error('at most', num_func_args)\n    return None",
    "docstring": "Validate the number of input arguments to an XLA function. Args: func: the Python function that will be called to generate the body of an XLA computation graph. input_arity: the number of explicit arguments supplied by the caller. infeed_queue: if not None, the infeed queue that will supply additional arguments to the function. Returns: None if function can be called with the supplied number of arguments, or an error string if it cannot.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\xla\\xla.py",
    "ast_data": "FunctionDef name:check_function_argument_count arg:func arg:input_arity arg:infeed_queue arguments arg arg arg FunctionDef name:format_error arg:complaint arg:quantity arguments arg arg Return return:yes Compare Assign If Compare Assign Call Assign Call If Compare Assign Assign Call Assign If Compare If BoolOp Compare Compare Return return:yes Call Return return:yes Call If BoolOp Compare Compare If Compare Return return:yes Call Return return:yes Call Return return:no"
  },
  {
    "library": "kornia",
    "name": "Scale",
    "source_code": "class Scale(Module):\n\n    def __init__(self, scale_factor: Tensor, center: Union[None, Tensor]=None, mode: str='bilinear', padding_mode: str='zeros', align_corners: bool=True) -> None:\n        super().__init__()\n        self.scale_factor: Tensor = scale_factor\n        self.center: Union[None, Tensor] = center\n        self.mode: str = mode\n        self.padding_mode: str = padding_mode\n        self.align_corners: bool = align_corners\n\n    def forward(self, input: Tensor) -> Tensor:\n        return scale(input, self.scale_factor, self.center, self.mode, self.padding_mode, self.align_corners)",
    "docstring": "Scale the tensor by a factor. Args: scale_factor: The scale factor apply. The tensor must have a shape of (B) or (B, 2), where B is batch size. If (B), isotropic scaling will perform. If (B, 2), x-y-direction specific scaling will perform. center: The center through which to scale. The tensor must have a shape of (B, 2), where B is batch size and last dimension contains cx and cy. mode: interpolation mode to calculate output values ``. align_corners: interpolation flag. Returns: The scaled tensor with the same shape as the input. Example: >>> img = torch.rand(1, 3, 4, 4) >>> scale_factor = torch.tensor([[2., 2.]]) >>> out = Scale(scale_factor)(img) >>> print(out.shape) torch.Size([1, 3, 4, 4])",
    "type": "class",
    "file_path": "kornia\\kornia\\geometry\\transform\\affwarp.py",
    "ast_data": "ClassDef name:Scale FunctionDef name:__init__ arg:self arg:scale_factor arg:center arg:mode arg:padding_mode arg:align_corners arguments arg arg arg arg arg arg Call Call FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "NoneTensor",
    "source_code": "class NoneTensor(composite_tensor.CompositeTensor):\n\n    @property\n    def _type_spec(self):\n        return NoneTensorSpec()",
    "docstring": "Composite tensor representation for value.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\none_tensor.py",
    "ast_data": "ClassDef name:NoneTensor FunctionDef name:_type_spec arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "can_order_nccl",
    "source_code": "def can_order_nccl(self):\n    return self._use_ordering_token()",
    "docstring": "Whether this launcher can order NCCL operations.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_utils.py",
    "ast_data": "FunctionDef name:can_order_nccl arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_decompose_scatter_mutating",
    "source_code": "def _decompose_scatter_mutating(graph: torch.fx.Graph, node: torch.fx.Node) -> torch.fx.Node:\n    assert node.target in (_generalized_scatter, _inplace_generalized_scatter)\n    inp, src, view_ops = node.args\n    assert not node.kwargs\n    if node.target is _generalized_scatter:\n        inp = graph_call_function(graph, aten.clone, inp)\n    tmp = inp\n    for view in view_ops:\n        tmp = graph_call_function(graph, view.target, tmp, *view.args, **view.kwargs)\n    graph_call_function(graph, aten.copy_.default, tmp, src)\n    return inp",
    "docstring": "Decompose _generalized_scatter using mutations e.g. _generalized_scatter(inp, src, [(aten.slice, 0, 0, 10), (aten.slice, 1, 10, -10)]) will become inp_updated = aten.clone(inp) slice1 = aten.slice(inp_updated, 0, 0, 10) slice2 = aten.slice(slice1, 1, 10, -10) slice2.copy_(src)",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\reinplace.py",
    "ast_data": "FunctionDef name:_decompose_scatter_mutating arg:graph arg:node arguments arg arg Compare Assign If Compare Assign Call Assign For Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "deserialize",
    "source_code": "def deserialize(config, custom_objects=None):\n    from tensorflow.python.keras.mixed_precision import loss_scale_optimizer\n    all_classes = {'adadelta': adadelta_v2.Adadelta, 'adagrad': adagrad_v2.Adagrad, 'adam': adam_v2.Adam, 'adamax': adamax_v2.Adamax, 'nadam': nadam_v2.Nadam, 'rmsprop': rmsprop_v2.RMSprop, 'sgd': gradient_descent_v2.SGD, 'ftrl': ftrl.Ftrl, 'lossscaleoptimizer': loss_scale_optimizer.LossScaleOptimizer, 'lossscaleoptimizerv1': loss_scale_optimizer.LossScaleOptimizer}\n    if config['class_name'].lower() in all_classes:\n        config['class_name'] = config['class_name'].lower()\n    return deserialize_keras_object(config, module_objects=all_classes, custom_objects=custom_objects, printable_module_name='optimizer')",
    "docstring": "Inverse of the function. Args: config: Optimizer configuration dictionary. custom_objects: Optional dictionary mapping names (strings) to custom objects (classes and functions) to be considered during deserialization. Returns: A Keras Optimizer instance.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizers.py",
    "ast_data": "FunctionDef name:deserialize arg:config arg:custom_objects arguments arg arg Assign If Compare Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "half_to_int",
    "source_code": "def half_to_int(f: float) -> int:\n    buf = struct.pack('f', f)\n    return struct.unpack('i', buf)[0]",
    "docstring": "Casts a half-precision float value into an integer. Converts a half precision floating point value, such as or , into an integer value which can be written into the half_val field of a TensorProto for storage. To undo the effects of this conversion, use int_to_half().",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\tensorboard\\summary.py",
    "ast_data": "FunctionDef name:half_to_int arg:f arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, criterion, description=None, font_attr=DEFAULT_TENSOR_ELEMENT_HIGHLIGHT_FONT_ATTR):\n    self.criterion = criterion\n    self.description = description\n    self.font_attr = font_attr",
    "docstring": "Constructor of HighlightOptions. Args: criterion: (callable) A callable of the following signature: def to_highlight(X): # Args: # X: The tensor to highlight elements in. # # Returns: # (boolean ndarray) A boolean ndarray of the same shape as X # indicating which elements are to be highlighted (iff True). This callable will be used as the argument of np.argwhere() to determine which elements of the tensor are to be highlighted. description: (str) Description of the highlight criterion embodied by criterion. font_attr: (str) Font attribute to be applied to the highlighted elements.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\tensor_format.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:criterion arg:description arg:font_attr arguments arg arg arg arg Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "register",
    "source_code": "def register(self, method_name: str, func: Union[def_function.Function, tf_function.ConcreteFunction]):\n    raise NotImplementedError('Please use create_server method to create aconcrete subclass of Server.')",
    "docstring": "Method for registering tf.function on server. Registered methods can be invoked remotely from clients. Args: method_name: Name of the tf.function. Clients use this method_name to make RPCs. func: A or ConcreteFunction to register.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\experimental\\rpc\\rpc_ops.py",
    "ast_data": "FunctionDef name:register arg:self arg:method_name arg:func arguments arg arg arg Raise Call"
  },
  {
    "library": "scipy",
    "name": "isspmatrix",
    "source_code": "def isspmatrix(x):\n    return isinstance(x, spmatrix)",
    "docstring": "Is of a sparse matrix type? Parameters ---------- x object to check for being a sparse matrix Returns ------- bool True if is a sparse matrix, False otherwise Examples -------- >>> import numpy as np >>> from scipy.sparse import csr_array, csr_matrix, isspmatrix >>> isspmatrix(csr_matrix([[5]])) True >>> isspmatrix(csr_array([[5]])) False >>> isspmatrix(np.array([[5]])) False >>> isspmatrix(5) False",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\_base.py",
    "ast_data": "FunctionDef name:isspmatrix arg:x arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_yaxis_text2_transform",
    "source_code": "def get_yaxis_text2_transform(self, pad_points):\n    labels_align = mpl.rcParams['ytick.alignment']\n    return (self.get_yaxis_transform(which='tick2') + mtransforms.ScaledTranslation(pad_points / 72, 0, self.get_figure(root=False).dpi_scale_trans), labels_align, 'left')",
    "docstring": "Returns ------- transform : Transform The transform used for drawing secondary y-axis labels, which will add *pad_points* of padding (in points) between the axis and the label. The x-direction is in axis coordinates and the y-direction is in data coordinates valign : {'center', 'top', 'bottom', 'baseline', 'center_baseline'} The text vertical alignment. halign : {'center', 'left', 'right'} The text horizontal alignment. Notes ----- This transformation is primarily used by the class, and is meant to be overridden by new kinds of projections that may need to place axis elements in different locations.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:get_yaxis_text2_transform arg:self arg:pad_points arguments arg arg Assign Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_width_height_descent",
    "source_code": "def get_width_height_descent(self, text, prop):\n    return self._get_box_metrics(_escape_and_apply_props(text, prop))",
    "docstring": "Get the width, total height, and descent (in TeX points) for a text typeset by the current LaTeX environment.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pgf.py",
    "ast_data": "FunctionDef name:get_width_height_descent arg:self arg:text arg:prop arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_signature_def_fn",
    "source_code": "@abc.abstractmethod\ndef _get_signature_def_fn(self):\n    pass",
    "docstring": "Returns a function that produces a SignatureDef given desired outputs.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\utils_v1\\export_output.py",
    "ast_data": "FunctionDef name:_get_signature_def_fn arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "_make_storage",
    "source_code": "def _make_storage(self, *args, **kwargs):\n    return list(*args, **kwargs)",
    "docstring": "Determines the backing storage (overridden in subclasses).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\data_structures.py",
    "ast_data": "FunctionDef name:_make_storage arg:self arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "compression_formats",
    "source_code": "@cached_property\ndef compression_formats(self):\n    compression_formats = {None: (open, 'rb'), 'gz': (gzip.GzipFile, 'rb'), 'zip': (SingleZipReader, 'r'), 'stdin': (lambda *args: sys.stdin, None)}\n    if has_bz2:\n        compression_formats['bz2'] = (bz2.BZ2File, 'r')\n    if has_lzma:\n        compression_formats['lzma'] = (lzma.LZMAFile, 'r')\n        compression_formats['xz'] = (lzma.LZMAFile, 'r')\n    return compression_formats",
    "docstring": "A dict mapping format names to (open function, mode arg) tuples.",
    "type": "method",
    "file_path": "django\\django\\core\\management\\commands\\loaddata.py",
    "ast_data": "FunctionDef name:compression_formats arg:self arguments arg Assign arguments arg If Assign If Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "icdf",
    "source_code": "def icdf(self, value: Tensor) -> Tensor:\n    raise NotImplementedError",
    "docstring": "Returns the inverse cumulative density/mass function evaluated at . Args: value (Tensor):",
    "type": "method",
    "file_path": "pytorch\\torch\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:icdf arg:self arg:value arguments arg arg Raise"
  },
  {
    "library": "matplotlib",
    "name": "get_patches",
    "source_code": "def get_patches(self):\n    return silent_list('Patch', [h for h in self.legend_handles if isinstance(h, Patch)])",
    "docstring": "Return the list of \\s in the legend.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\legend.py",
    "ast_data": "FunctionDef name:get_patches arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "CategoricalCrossentropy",
    "source_code": "class CategoricalCrossentropy(MeanMetricWrapper):\n\n    def __init__(self, name='categorical_crossentropy', dtype=None, from_logits=False, label_smoothing=0):\n        super(CategoricalCrossentropy, self).__init__(categorical_crossentropy, name, dtype=dtype, from_logits=from_logits, label_smoothing=label_smoothing)",
    "docstring": "Computes the crossentropy metric between the labels and predictions. This is the crossentropy metric class to be used when there are multiple label classes (2 or more). Here we assume that labels are given as a representation. eg., When labels values are [2, 0, 1], = [[0, 0, 1], [1, 0, 0], [0, 1, 0]]. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. from_logits: (Optional) Whether output is expected to be a logits tensor. By default, we consider that output encodes a probability distribution. label_smoothing: (Optional) Float in [0, 1]. When > 0, label values are smoothed, meaning the confidence on label values are relaxed. e.g. means that we will use a value of for label and for label \" Standalone usage: >>> # EPSILON = 1e-7, y = y_true, y = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON) >>> # ycompile()` API:",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py",
    "ast_data": "ClassDef name:CategoricalCrossentropy FunctionDef name:__init__ arg:self arg:name arg:dtype arg:from_logits arg:label_smoothing arguments arg arg arg arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_as_type_list",
    "source_code": "def _as_type_list(dtypes):\n    assert dtypes is not None\n    if not (isinstance(dtypes, list) or isinstance(dtypes, tuple)):\n        return [dtypes]\n    else:\n        return list(dtypes)",
    "docstring": "Convert dtypes to a list of types.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:_as_type_list arg:dtypes arguments arg Compare If BoolOp Call Call Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "and_masks",
    "source_code": "def and_masks(*mask_mods: _mask_mod_signature) -> _mask_mod_signature:\n    if not all((callable(arg) for arg in mask_mods)):\n        raise RuntimeError(f'All inputs should be callable mask_mods: {mask_mods}')\n\n    def and_mask(b, h, q_idx, kv_idx):\n        result = b.new_ones((), dtype=torch.bool)\n        for mask in mask_mods:\n            result = result & mask(b, h, q_idx, kv_idx)\n        return result\n    return and_mask",
    "docstring": "Returns a mask_mod that's the intersection of provided mask_mods",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\attention\\flex_attention.py",
    "ast_data": "FunctionDef name:and_masks arguments arg If Call Call Raise Call FunctionDef name:and_mask arg:b arg:h arg:q_idx arg:kv_idx arguments arg arg arg arg Assign Call For Assign Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "global_norm",
    "source_code": "@tf_export('linalg.global_norm', v1=['linalg.global_norm', 'global_norm'])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints('global_norm')\ndef global_norm(t_list, name=None):\n    if not isinstance(t_list, collections_abc.Sequence) or isinstance(t_list, str):\n        raise TypeError(f'`t_list` should be a sequence of tensors. Received {type(t_list)}.')\n    t_list = list(t_list)\n    with ops.name_scope(name, 'global_norm', t_list) as name:\n        values = [ops.convert_to_tensor(t.values if isinstance(t, indexed_slices.IndexedSlices) else t, name='t_%d' % i) if t is not None else t for i, t in enumerate(t_list)]\n        half_squared_norms = []\n        for v in values:\n            if v is not None:\n                with ops.colocate_with(v):\n                    half_squared_norms.append(gen_nn_ops.l2_loss(v))\n        half_squared_norm = math_ops.reduce_sum(array_ops_stack.stack(half_squared_norms))\n        norm = math_ops.sqrt(half_squared_norm * constant_op.constant(2.0, dtype=half_squared_norm.dtype), name='global_norm')\n    return norm",
    "docstring": "Computes the global norm of multiple tensors. Given a tuple or list of tensors , this operation returns the global norm of the elements in all tensors in . The global norm is computed as: Any entries in that are of type None are ignored. Args: t_list: A tuple or list of mixed , , or None. name: A name for the operation (optional). Returns: A 0-D (scalar) of type . Raises: TypeError: If is not a sequence.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\clip_ops.py",
    "ast_data": "FunctionDef name:global_norm arg:t_list arg:name arguments arg arg If BoolOp Call Call Raise Call Call Assign Call With Call Assign Compare Call Call Call Assign For If Compare With Call Call Call Assign Call Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "is_forward_ref",
    "source_code": "def is_forward_ref(tp):\n    if hasattr(typing, 'ForwardRef'):\n        return isinstance(tp, typing.ForwardRef)\n    elif hasattr(typing, '_ForwardRef'):\n        return isinstance(tp, typing._ForwardRef)\n    else:\n        return False",
    "docstring": "Returns true if is a typing forward reference.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\type_annotations.py",
    "ast_data": "FunctionDef name:is_forward_ref arg:tp arguments arg If Call Return return:yes Call If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_path_info",
    "source_code": "def get_path_info(self, filtered_relation=None):\n    opts = self.remote_field.model._meta\n    from_opts = self.model._meta\n    return [PathInfo(from_opts=from_opts, to_opts=opts, target_fields=self.foreign_related_fields, join_field=self, m2m=False, direct=True, filtered_relation=filtered_relation)]",
    "docstring": "Get path from this field to the related model.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\related.py",
    "ast_data": "FunctionDef name:get_path_info arg:self arg:filtered_relation arguments arg arg Assign Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "is_subtype_of",
    "source_code": "@abc.abstractmethod\ndef is_subtype_of(self, other: 'TraceType') -> bool:\n    pass",
    "docstring": "Returns True if is a subtype of . For example, uses subtyping for dispatch: if is True, then an argument of can be used as argument to a traced with an a . Args: other: A TraceType object to be compared against. Example:",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\types\\trace.py",
    "ast_data": "FunctionDef name:is_subtype_of arg:self arg:other arguments arg arg"
  },
  {
    "library": "matplotlib",
    "name": "get_thetamin",
    "source_code": "def get_thetamin(self):\n    return np.rad2deg(self.viewLim.xmin)",
    "docstring": "Get the minimum theta limit in degrees.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\polar.py",
    "ast_data": "FunctionDef name:get_thetamin arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "typeof_index",
    "source_code": "@typeof_impl.register(Index)\ndef typeof_index(val, c) -> IndexType:\n    arrty = typeof_impl(val._numba_data, c)\n    assert arrty.ndim == 1\n    return IndexType(arrty.dtype, arrty.layout, type(val))",
    "docstring": "This will assume that only strings are in object dtype index. (you should check this before this gets lowered down to numba)",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\_numba\\extensions.py",
    "ast_data": "FunctionDef name:typeof_index arg:val arg:c arguments arg arg Assign Call Compare Return return:yes Call Call Call"
  },
  {
    "library": "kornia",
    "name": "PinholeModel",
    "source_code": "class PinholeModel(CameraModelBase):\n\n    def __init__(self, image_size: ImageSize, params: Tensor) -> None:\n        if params.shape[-1] != 4 or len(params.shape) > 2:\n            raise ValueError('params must be of shape (B, 4) for PINHOLE Camera')\n        super().__init__(AffineTransform(), Z1Projection(), image_size, params)\n\n    def matrix(self) -> Tensor:\n        z = zeros_like(self.fx)\n        row1 = stack((self.fx, z, self.cx), -1)\n        row2 = stack((z, self.fy, self.cy), -1)\n        row3 = stack((z, z, z), -1)\n        K = stack((row1, row2, row3), -2)\n        K[..., -1, -1] = 1.0\n        return K\n\n    def scale(self, scale_factor: Tensor) -> PinholeModel:\n        fx = self.fx * scale_factor\n        fy = self.fy * scale_factor\n        cx = self.cx * scale_factor\n        cy = self.cy * scale_factor\n        params = stack((fx, fy, cx, cy), -1)\n        image_size = ImageSize(self.image_size.height * scale_factor, self.image_size.width * scale_factor)\n        return PinholeModel(image_size, params)",
    "docstring": "Class to represent Pinhole Camera Model. The pinhole camera model describes the mathematical relationship between the coordinates of a point in three-dimensional space and its projection onto the image plane of an ideal pinhole camera, where the camera aperture is described as a point and no lenses are used to focus light. See more: Example: >>> cam = CameraModel(ImageSize(480, 640), CameraModelType.PINHOLE, torch.Tensor([328., 328., 320., 240.])) >>> cam CameraModel(ImageSize(height=480, width=640), PinholeModel, tensor([328., 328., 320., 240.]))",
    "type": "class",
    "file_path": "kornia\\kornia\\sensors\\camera\\camera_model.py",
    "ast_data": "ClassDef name:PinholeModel FunctionDef name:__init__ arg:self arg:image_size arg:params arguments arg arg arg If BoolOp Compare Compare Call Raise Call Call Call Call Call FunctionDef name:matrix arg:self arguments arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Return return:yes FunctionDef name:scale arg:self arg:scale_factor arguments arg arg Assign Assign Assign Assign Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_strategy",
    "source_code": "@tf_export('distribute.get_strategy')\ndef get_strategy() -> 'StrategyBase':\n    return _get_per_thread_mode().strategy",
    "docstring": "Returns the current object. Typically only used in a cross-replica context: Returns: A object. Inside a block, it returns , otherwise it returns the default (single-replica) object.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:get_strategy arguments Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_is_dates_only",
    "source_code": "@cache_readonly\ndef _is_dates_only(self) -> bool:\n    if isinstance(self.freq, Tick):\n        delta = Timedelta(self.freq)\n        if delta % dt.timedelta(days=1) != dt.timedelta(days=0):\n            return False\n    return self._values._is_dates_only",
    "docstring": "Return a boolean if we are only dates (and don't have a timezone) Returns ------- bool",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\datetimes.py",
    "ast_data": "FunctionDef name:_is_dates_only arg:self arguments arg If Call Assign Call If Compare Call Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_add_uniform_scaling_initializer_transformer",
    "source_code": "def _add_uniform_scaling_initializer_transformer(parent, node, full_name, name, logs):\n    for keyword_arg in node.keywords:\n        if keyword_arg.arg == 'factor':\n            keyword_arg.arg = 'scale'\n    distribution_value = '\"uniform\"'\n    ast_value = pasta.parse(distribution_value)\n    node.keywords.append(ast.keyword(arg='distribution', value=ast_value))\n    lineno = node.func.value.lineno\n    col_offset = node.func.value.col_offset\n    node.func.value = ast_edits.full_name_node('tf.compat.v1.keras.initializers')\n    node.func.value.lineno = lineno\n    node.func.value.col_offset = col_offset\n    node.func.attr = 'VarianceScaling'\n    return node",
    "docstring": "Updates references to uniform_unit_scaling_initializer. Transforms: tf.uniform_unit_scaling_initializer(factor, seed, dtype) to tf.compat.v1.keras.initializers.VarianceScaling( scale=factor, distribution=\"uniform\", seed=seed) Note: to apply this transformation, symbol must be added to reordered_function_names above.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\tf_upgrade_v2.py",
    "ast_data": "FunctionDef name:_add_uniform_scaling_initializer_transformer arg:parent arg:node arg:full_name arg:name arg:logs arguments arg arg arg arg arg For If Compare Assign Assign Assign Call Call Call Assign Assign Assign Call Assign Assign Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "DiscriminantAnalysisPredictionMixin",
    "source_code": "class DiscriminantAnalysisPredictionMixin:\n\n    def decision_function(self, X):\n        y_scores = self._decision_function(X)\n        if len(self.classes_) == 2:\n            return y_scores[:, 1] - y_scores[:, 0]\n        return y_scores\n\n    def predict(self, X):\n        scores = self._decision_function(X)\n        return self.classes_.take(scores.argmax(axis=1))\n\n    def predict_proba(self, X):\n        return np.exp(self.predict_log_proba(X))\n\n    def predict_log_proba(self, X):\n        scores = self._decision_function(X)\n        log_likelihood = scores - scores.max(axis=1)[:, np.newaxis]\n        return log_likelihood - np.log(np.exp(log_likelihood).sum(axis=1)[:, np.newaxis])",
    "docstring": "Mixin class for QuadraticDiscriminantAnalysis and NearestCentroid.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\discriminant_analysis.py",
    "ast_data": "ClassDef name:DiscriminantAnalysisPredictionMixin FunctionDef name:decision_function arg:self arg:X arguments arg arg Assign Call If Compare Call Return return:yes Return return:yes FunctionDef name:predict arg:self arg:X arguments arg arg Assign Call Return return:yes Call Call FunctionDef name:predict_proba arg:self arg:X arguments arg arg Return return:yes Call Call FunctionDef name:predict_log_proba arg:self arg:X arguments arg arg Assign Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "numpy",
    "name": "is_free_format",
    "source_code": "def is_free_format(fname):\n    result = False\n    if Path(fname).suffix.lower() in COMMON_FREE_EXTENSIONS:\n        result = True\n    with openhook(fname, 'r') as fhandle:\n        line = fhandle.readline()\n        n = 15\n        if _has_f_header(line):\n            n = 0\n        elif _has_f90_header(line):\n            n = 0\n            result = True\n        while n > 0 and line:\n            if line[0] != '!' and line.strip():\n                n -= 1\n                if line[0] != '\\t' and _free_f90_start(line[:5]) or line[-2:-1] == '&':\n                    result = True\n                    break\n            line = fhandle.readline()\n    return result",
    "docstring": "Check if file is in free format Fortran.",
    "type": "function",
    "file_path": "numpy\\numpy\\f2py\\crackfortran.py",
    "ast_data": "FunctionDef name:is_free_format arg:fname arguments arg Assign If Compare Call Call Assign With Call Assign Call Assign If Call Assign If Call Assign Assign While BoolOp Compare If BoolOp Compare Call If BoolOp BoolOp Compare Call Compare Assign Assign Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "AuthlibBaseError",
    "source_code": "class AuthlibBaseError(Exception):\n    error = None\n    description = ''\n    uri = None\n\n    def __init__(self, error=None, description=None, uri=None):\n        if error is not None:\n            self.error = error\n        if description is not None:\n            self.description = description\n        if uri is not None:\n            self.uri = uri\n        message = f'{self.error}: {self.description}'\n        super().__init__(message)\n\n    def __repr__(self):\n        return f'<{self.__class__.__name__} \"{self.error}\">'",
    "docstring": "Base Exception for all errors in Authlib.",
    "type": "class",
    "file_path": "authlib\\authlib\\common\\errors.py",
    "ast_data": "ClassDef name:AuthlibBaseError Assign Assign Assign FunctionDef name:__init__ arg:self arg:error arg:description arg:uri arguments arg arg arg arg If Compare Assign If Compare Assign If Compare Assign Assign Call Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_nanmedian1d",
    "source_code": "def _nanmedian1d(arr1d, overwrite_input=False):\n    arr1d_parsed, _, overwrite_input = _remove_nan_1d(arr1d, overwrite_input=overwrite_input)\n    if arr1d_parsed.size == 0:\n        return arr1d[-1]\n    return np.median(arr1d_parsed, overwrite_input=overwrite_input)",
    "docstring": "Private function for rank 1 arrays. Compute the median ignoring NaNs. See nanmedian for parameter usage",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_nanfunctions_impl.py",
    "ast_data": "FunctionDef name:_nanmedian1d arg:arr1d arg:overwrite_input arguments arg arg Assign Call If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_node_get",
    "source_code": "def _node_get(node: _C.Node, key: str):\n    assert isinstance(node, _C.Node)\n    sel = node.kindOf(key)\n    return getattr(node, sel)(key)",
    "docstring": "Gets attributes of a node which is polymorphic over return type.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\symbolic_helper.py",
    "ast_data": "FunctionDef name:_node_get arg:node arg:key arguments arg arg Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "Sequence",
    "source_code": "class Sequence(object):\n\n    @abstractmethod\n    def __getitem__(self, index):\n        raise NotImplementedError\n\n    @abstractmethod\n    def __len__(self):\n        raise NotImplementedError\n\n    def on_epoch_end(self):\n        pass\n\n    def __iter__(self):\n        for item in (self[i] for i in range(len(self))):\n            yield item",
    "docstring": "Base object for fitting to a sequence of data, such as a dataset. Every must implement the and the methods. If you want to modify your dataset between epochs you may implement . The method should return a complete batch. Notes: are a safer way to do multiprocessing. This structure guarantees that the network will only train once on each sample per epoch which is not the case with generators. Examples:",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\data_utils.py",
    "ast_data": "ClassDef name:Sequence FunctionDef name:__getitem__ arg:self arg:index arguments arg arg Raise FunctionDef name:__len__ arg:self arguments arg Raise FunctionDef name:on_epoch_end arg:self arguments arg FunctionDef name:__iter__ arg:self arguments arg For Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_save_function",
    "source_code": "def get_save_function(registered_name):\n    return _saver_registry.name_lookup(registered_name)[0]",
    "docstring": "Returns save function registered to name.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\registration\\registration.py",
    "ast_data": "FunctionDef name:get_save_function arg:registered_name arguments arg Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "now",
    "source_code": "def now(self):\n    return datetime.datetime.now()",
    "docstring": "Generate the session specific concept of 'now'. Other session providers can override this to use alternative, possibly timezone aware, versions of 'now'.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\sessions.py",
    "ast_data": "FunctionDef name:now arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "to_func_graph",
    "source_code": "def to_func_graph(atomic: AtomicFunction) -> func_graph_module.FuncGraph:\n    input_signature, output_signature = function_type_lib.to_structured_signature(atomic.function_type)\n    with ops.Graph().as_default():\n        for f in atomic.children:\n            ops.get_default_graph()._add_function(f)\n        result = function_def_to_graph.function_def_to_graph(atomic.definition, structured_input_signature=input_signature, structured_outputs=output_signature, propagate_device_spec=True, include_library_functions=False)\n        for f in atomic.children:\n            result._add_function(f)\n    for i, input_type in enumerate(atomic.function_type.flat_inputs):\n        handle_data = input_type.dtype._handle_data\n        if handle_data:\n            handle_data_util.set_handle_data(result.inputs[i], handle_data.shape_inference)\n        result.inputs[i].set_shape(input_type.shape)\n    for i, output_type in enumerate(atomic.function_type.flat_outputs):\n        handle_data = output_type.dtype._handle_data\n        if handle_data:\n            handle_data_util.set_handle_data(result.outputs[i], handle_data.shape_inference)\n        result.outputs[i].set_shape(output_type.shape)\n    result.collective_manager_ids_used = (atomic.call_options.collective_manager_ids_used,)\n    return result",
    "docstring": "Generate a FuncGraph from an AtomicFunction.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\atomic_function.py",
    "ast_data": "FunctionDef name:to_func_graph arg:atomic arguments arg Assign Call With Call Call For Call Call Assign Call For Call For Call Assign If Call Call For Call Assign If Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "GlobalAveragePooling2D",
    "source_code": "class GlobalAveragePooling2D(GlobalPooling2D):\n\n    def call(self, inputs):\n        if self.data_format == 'channels_last':\n            return backend.mean(inputs, axis=[1, 2], keepdims=self.keepdims)\n        else:\n            return backend.mean(inputs, axis=[2, 3], keepdims=self.keepdims)",
    "docstring": "Global average pooling operation for spatial data. Examples: >>> input_shape = (2, 4, 5, 3) >>> x = tf.random.normal(input_shape) >>> y = tf.keras.layers.GlobalAveragePooling2D()(x) >>> print(y.shape) (2, 3) Args: data_format: A string, one of (default) or . The ordering of the dimensions in the inputs. corresponds to inputs with shape while corresponds to inputs with shape . It defaults to the value found in your Keras config file at . If you never set it, then it will be \"channels_last\". keepdims: A boolean, whether to keep the spatial dimensions or not. If is (default), the rank of the tensor is reduced for spatial dimensions. If is , the spatial dimensions are retained with length 1. The behavior is the same as for or . Input shape: - If : 4D tensor with shape . - If : 4D tensor with shape . Output shape: - If =False: 2D tensor with shape . - If =True: - If : 4D tensor with shape - If : 4D tensor with shape",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\pooling.py",
    "ast_data": "ClassDef name:GlobalAveragePooling2D FunctionDef name:call arg:self arg:inputs arguments arg arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_prune_invalid_weights_ragged",
    "source_code": "def _prune_invalid_weights_ragged(ids, weights):\n    if weights is not None:\n        is_weights_valid = math_ops.greater(weights.values, 0)\n        nrows = ids.nrows()\n        pruned_values = array_ops.boolean_mask_v2(ids.values, is_weights_valid)\n        pruned_value_rowids = array_ops.boolean_mask_v2(ids.value_rowids(), is_weights_valid)\n        ids = ragged_tensor.RaggedTensor.from_value_rowids(pruned_values, pruned_value_rowids, nrows=nrows, validate=False)\n        pruned_weights_values = array_ops.boolean_mask_v2(weights.values, is_weights_valid)\n        weights = ragged_tensor.RaggedTensor.from_value_rowids(pruned_weights_values, pruned_value_rowids, nrows=nrows, validate=False)\n    return (ids, weights)",
    "docstring": "Prune invalid weights (< 0) from the input ids and weights.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_embedding_ops.py",
    "ast_data": "FunctionDef name:_prune_invalid_weights_ragged arg:ids arg:weights arguments arg arg If Compare Assign Call Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit_predict",
    "source_code": "def fit_predict(self, X, y=None, sample_weight=None):\n    self.fit(X, sample_weight=sample_weight)\n    return self.labels_",
    "docstring": "Compute clusters from a data or distance matrix and predict labels. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features), or (n_samples, n_samples) Training instances to cluster, or distances between instances if `` is by itself a core sample; a sample with a negative weight may inhibit its eps-neighbor from being core. Note that weights are absolute, and default to 1. Returns ------- labels : ndarray of shape (n_samples,) Cluster labels. Noisy samples are given the label -1.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_dbscan.py",
    "ast_data": "FunctionDef name:fit_predict arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "randomize_weights",
    "source_code": "def randomize_weights(model, random_seed=0, buffers_to_skip=None):\n    random.seed(random_seed)\n    buffers = model.buffers\n    buffer_ids = range(1, len(buffers))\n    if buffers_to_skip is not None:\n        buffer_ids = [idx for idx in buffer_ids if idx not in buffers_to_skip]\n    buffer_types = {}\n    for graph in model.subgraphs:\n        for op in graph.operators:\n            if op.inputs is None:\n                break\n            for input_idx in op.inputs:\n                tensor = graph.tensors[input_idx]\n                buffer_types[tensor.buffer] = type_to_name(tensor.type)\n    for i in buffer_ids:\n        buffer_i_data = buffers[i].data\n        buffer_i_size = 0 if buffer_i_data is None else buffer_i_data.size\n        if buffer_i_size == 0:\n            continue\n        buffer_type = buffer_types.get(i, 'INT8')\n        if buffer_type.startswith('FLOAT'):\n            format_code = 'e' if buffer_type == 'FLOAT16' else 'f'\n            for offset in range(0, buffer_i_size, struct.calcsize(format_code)):\n                value = random.uniform(-0.5, 0.5)\n                struct.pack_into(format_code, buffer_i_data, offset, value)\n        else:\n            for j in range(buffer_i_size):\n                buffer_i_data[j] = random.randint(0, 255)",
    "docstring": "Randomize weights in a model. Args: model: The model in which to randomize weights. random_seed: The input to the random number generator (default value is 0). buffers_to_skip: The list of buffer indices to skip. The weights in these buffers are left unmodified.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\tools\\flatbuffer_utils.py",
    "ast_data": "FunctionDef name:randomize_weights arg:model arg:random_seed arg:buffers_to_skip arguments arg arg arg Call Assign Assign Call Call If Compare Assign Compare Assign For For If Compare For Assign Assign Call For Assign Assign Compare If Compare Assign Call If Call Assign Compare For Call Call Assign Call Call For Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "to_tvm_tensor",
    "source_code": "def to_tvm_tensor(torch_tensor):\n    if torch_tensor.dtype == torch.bool:\n        return tvm.nd.array(torch_tensor.cpu().numpy())\n    return tvm.nd.from_dlpack(torch_tensor)",
    "docstring": "A helper function to transfer a torch.tensor to NDArray.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\backends\\tvm.py",
    "ast_data": "FunctionDef name:to_tvm_tensor arg:torch_tensor arguments arg If Compare Return return:yes Call Call Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "_dequantize",
    "source_code": "def _dequantize(input: Tensor, jpeg_quality: Tensor, quantization_table: Tensor) -> Tensor:\n    quantization_table_scaled: Tensor = quantization_table[:, None] * _jpeg_quality_to_scale(jpeg_quality)[:, None, None, None]\n    output: Tensor = input * differentiable_polynomial_floor(differentiable_clipping((quantization_table_scaled + 50.0) / 100.0, 1, 255))\n    return output",
    "docstring": "Perform dequantization. Args: input (Tensor): Input tensor of the shape :math:. jpeg_quality (Tensor): Compression strength to be applied, shape is :math:. quantization_table (Tensor): Quantization table of the shape :math: or :math:. Returns: output (Tensor): Quantized output tensor of the shape :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\enhance\\jpeg.py",
    "ast_data": "FunctionDef name:_dequantize arg:input arg:jpeg_quality arg:quantization_table arguments arg arg arg Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "invalid_unique_memory_format",
    "source_code": "def invalid_unique_memory_format(tensor, valid_memory_formats):\n    n_legality = 0\n    for memory_format in valid_memory_formats:\n        if tensor.is_contiguous(memory_format=memory_format):\n            n_legality += 1\n    return n_legality != 1",
    "docstring": "Returns True if the tensor cannot be uniquely mapped to any of the given memory formats, False otherwise.",
    "type": "method",
    "file_path": "pytorch\\torch\\_tensor.py",
    "ast_data": "FunctionDef name:invalid_unique_memory_format arg:tensor arg:valid_memory_formats arguments arg arg Assign For If Call Return return:yes Compare"
  },
  {
    "library": "matplotlib",
    "name": "get_sizes",
    "source_code": "def get_sizes(self):\n    return self._sizes",
    "docstring": "Return the sizes ('areas') of the elements in the collection. Returns ------- array The 'area' of each element.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:get_sizes arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_atol_for_type",
    "source_code": "def _atol_for_type(dtype_or_dtype_name):\n    if dtype_or_dtype_name is None:\n        dtype_or_dtype_name = numpy.float64\n    return numpy.finfo(dtype_or_dtype_name).eps * 100",
    "docstring": "Return the absolute tolerance for a given numpy dtype.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_array_api.py",
    "ast_data": "FunctionDef name:_atol_for_type arg:dtype_or_dtype_name arguments arg If Compare Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "prepopulated_fields_js",
    "source_code": "def prepopulated_fields_js(context):\n    prepopulated_fields = []\n    if 'adminform' in context:\n        prepopulated_fields.extend(context['adminform'].prepopulated_fields)\n    if 'inline_admin_formsets' in context:\n        for inline_admin_formset in context['inline_admin_formsets']:\n            for inline_admin_form in inline_admin_formset:\n                if inline_admin_form.original is None:\n                    prepopulated_fields.extend(inline_admin_form.prepopulated_fields)\n    prepopulated_fields_json = []\n    for field in prepopulated_fields:\n        prepopulated_fields_json.append({'id': '#%s' % field['field'].auto_id, 'name': field['field'].name, 'dependency_ids': ['#%s' % dependency.auto_id for dependency in field['dependencies']], 'dependency_list': [dependency.name for dependency in field['dependencies']], 'maxLength': field['field'].field.max_length or 50, 'allowUnicode': getattr(field['field'].field, 'allow_unicode', False)})\n    context.update({'prepopulated_fields': prepopulated_fields, 'prepopulated_fields_json': json.dumps(prepopulated_fields_json)})\n    return context",
    "docstring": "Create a list of prepopulated_fields that should render JavaScript for the prepopulated fields for both the admin form and inlines.",
    "type": "function",
    "file_path": "django\\django\\contrib\\admin\\templatetags\\admin_modify.py",
    "ast_data": "FunctionDef name:prepopulated_fields_js arg:context arguments arg Assign If Compare Call If Compare For For If Compare Call Assign For Call BoolOp Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "set_root_module",
    "source_code": "def set_root_module(self, root_module: type[torch.nn.Module]) -> BackendPatternConfig:\n    self.root_module = root_module\n    return self",
    "docstring": "Set the module that represents the root for this pattern. When we construct the reference quantized model during the convert phase, the root modules (e.g. torch.nn.Linear for torch.ao.nn.intrinsic.LinearReLU) will be swapped to the corresponding reference quantized modules (e.g. torch.ao.nn.reference.quantized.Linear). This allows custom backends to specify custom reference quantized module implementations to match the numerics of their lowered operators. Since this is a one-to-one mapping, both the root module and the reference quantized module must be specified in the same BackendPatternConfig in order for the conversion to take place.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\backend_config.py",
    "ast_data": "FunctionDef name:set_root_module arg:self arg:root_module arguments arg arg Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_ContextSwitchStack",
    "source_code": "class _ContextSwitchStack(threading.local):\n\n    def __init__(self, eager):\n        super().__init__()\n        self.stack = []\n        if eager:\n            self.push(is_building_function=False, enter_context_fn=eager_mode, device_stack=None)\n\n    def push(self, is_building_function, enter_context_fn, device_stack):\n        self.stack.append(ContextSwitch(is_building_function, enter_context_fn, device_stack))\n\n    def pop(self):\n        self.stack.pop()",
    "docstring": "A thread-local stack of context switches.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "ClassDef name:_ContextSwitchStack FunctionDef name:__init__ arg:self arg:eager arguments arg arg Call Call Assign If Call FunctionDef name:push arg:self arg:is_building_function arg:enter_context_fn arg:device_stack arguments arg arg arg arg Call Call FunctionDef name:pop arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "normalize_cluster_spec",
    "source_code": "def normalize_cluster_spec(cluster_spec):\n    if isinstance(cluster_spec, (dict, cluster_pb2.ClusterDef)):\n        return server_lib.ClusterSpec(cluster_spec)\n    elif not isinstance(cluster_spec, server_lib.ClusterSpec):\n        raise ValueError(\"`cluster_spec' should be dict or a `tf.train.ClusterSpec` or a `tf.train.ClusterDef` object\")\n    return cluster_spec",
    "docstring": "Makes into a object. Args: cluster_spec: a dict, ClusterDef or ClusterSpec object specifying the cluster configurations. Returns: a object. Raises: ValueError: if is not a dict or a or a .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distribute_coordinator_utils.py",
    "ast_data": "FunctionDef name:normalize_cluster_spec arg:cluster_spec arguments arg If Call Return return:yes Call If Call Raise Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "depend",
    "source_code": "@property\ndef depend(self) -> Any:\n    return self._depend",
    "docstring": "Return the name of the feature to source a default value from.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_marks\\base.py",
    "ast_data": "FunctionDef name:depend arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "mark_safe",
    "source_code": "@keep_lazy(SafeString)\ndef mark_safe(s):\n    if hasattr(s, '__html__'):\n        return s\n    if callable(s):\n        return _safety_decorator(mark_safe, s)\n    return SafeString(s)",
    "docstring": "Explicitly mark a string as safe for (HTML) output purposes. The returned object can be used everywhere a string is appropriate. If used on a method as a decorator, mark the returned data as safe. Can be called multiple times on a single string.",
    "type": "function",
    "file_path": "django\\django\\utils\\safestring.py",
    "ast_data": "FunctionDef name:mark_safe arg:s arguments arg If Call Return return:yes If Call Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_func_name",
    "source_code": "def _get_func_name():\n    return tf_inspect.stack()[1][3]",
    "docstring": "Get the name of current function. Returns: String that is the name of current function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\tensorflow_builder\\compat_checker\\compat_checker.py",
    "ast_data": "FunctionDef name:_get_func_name arguments Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "summation",
    "source_code": "def summation(self, seq: list[Union[int, str]]) -> Union[int, str]:\n    items = self._constant_fold(sum, seq)\n    if len(items) <= 1:\n        return items[0]\n    return ' + '.join(map(str, items))",
    "docstring": "Codegen for sum function with constant folding, constants are represented as int",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\triton_heuristics.py",
    "ast_data": "FunctionDef name:summation arg:self arg:seq arguments arg arg Assign Call If Compare Call Return return:yes Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "DownloadFileCollector",
    "source_code": "class DownloadFileCollector(EnvironmentCollector):\n\n    def clear_doc(self, app: Sphinx, env: BuildEnvironment, docname: str) -> None:\n        env.dlfiles.purge_doc(docname)\n\n    def merge_other(self, app: Sphinx, env: BuildEnvironment, docnames: Set[str], other: BuildEnvironment) -> None:\n        env.dlfiles.merge_other(docnames, other.dlfiles)\n\n    def process_doc(self, app: Sphinx, doctree: nodes.document) -> None:\n        for node in doctree.findall(addnodes.download_reference):\n            targetname = node['reftarget']\n            if '://' in targetname:\n                node['refuri'] = targetname\n            else:\n                rel_filename, filename = app.env.relfn2path(targetname, app.env.docname)\n                app.env.note_dependency(rel_filename)\n                if not os.access(filename, os.R_OK):\n                    logger.warning(__('download file not readable: %s'), filename, location=node, type='download', subtype='not_readable')\n                    continue\n                node['filename'] = app.env.dlfiles.add_file(app.env.docname, rel_filename).as_posix()",
    "docstring": "Download files collector for sphinx.environment.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\environment\\collectors\\asset.py",
    "ast_data": "ClassDef name:DownloadFileCollector FunctionDef name:clear_doc arg:self arg:app arg:env arg:docname arguments arg arg arg arg Call FunctionDef name:merge_other arg:self arg:app arg:env arg:docnames arg:other arguments arg arg arg arg arg Call FunctionDef name:process_doc arg:self arg:app arg:doctree arguments arg arg arg For Call Assign If Compare Assign Assign Call Call If Call Call Call Assign Call Call"
  },
  {
    "library": "django",
    "name": "datetime_trunc_sql",
    "source_code": "def datetime_trunc_sql(self, lookup_type, sql, params, tzname):\n    raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_trunc_sql() method')",
    "docstring": "Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute', or 'second', return the SQL that truncates the given datetime field field_name to a datetime object with only the given specificity.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:datetime_trunc_sql arg:self arg:lookup_type arg:sql arg:params arg:tzname arguments arg arg arg arg arg Raise Call"
  },
  {
    "library": "scipy",
    "name": "minkowski_distance",
    "source_code": "def minkowski_distance(x, y, p=2):\n    x = np.asarray(x)\n    y = np.asarray(y)\n    if p == np.inf or p == 1:\n        return minkowski_distance_p(x, y, p)\n    else:\n        return minkowski_distance_p(x, y, p) ** (1.0 / p)",
    "docstring": "Compute the L**p distance between two arrays. The last dimensions of and must be the same length. Any other dimensions must be compatible for broadcasting. Parameters ---------- x : (..., K) array_like Input array. y : (..., K) array_like Input array. p : float, 1 >> from scipy.spatial import minkowski_distance >>> minkowski_distance([[0, 0], [0, 0]], [[1, 1], [0, 1]]) array([ 1.41421356, 1. ])",
    "type": "function",
    "file_path": "scipy\\scipy\\spatial\\_kdtree.py",
    "ast_data": "FunctionDef name:minkowski_distance arg:x arg:y arg:p arguments arg arg arg Assign Call Assign Call If BoolOp Compare Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "transform",
    "source_code": "def transform(node, ctx):\n    node = qual_names.resolve(node)\n    node = CallTreeTransformer(ctx).visit(node)\n    return node",
    "docstring": "Transform function call to the compiled counterparts. Args: node: AST ctx: EntityContext Returns: A tuple (node, new_names): node: The transformed AST new_names: set(string), containing any newly-generated names",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\converters\\call_trees.py",
    "ast_data": "FunctionDef name:transform arg:node arg:ctx arguments arg arg Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "create_data_for_split",
    "source_code": "def create_data_for_split(df: DataFrame, are_all_object_dtype_cols: bool, object_dtype_indices: list[int]) -> Generator[list]:\n    if are_all_object_dtype_cols:\n        for tup in df.itertuples(index=False, name=None):\n            yield list(map(maybe_box_native, tup))\n    else:\n        for tup in df.itertuples(index=False, name=None):\n            data = list(tup)\n            if object_dtype_indices:\n                for i in object_dtype_indices:\n                    data[i] = maybe_box_native(data[i])\n            yield data",
    "docstring": "Simple helper method to create data for to `` to create the main output data",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\methods\\to_dict.py",
    "ast_data": "FunctionDef name:create_data_for_split arg:df arg:are_all_object_dtype_cols arg:object_dtype_indices arguments arg arg arg If For Call Call Call For Call Assign Call If For Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "transform_ast",
    "source_code": "def transform_ast(self, node, ctx):\n    raise NotImplementedError('subclasses must override this')",
    "docstring": "Performs an actual transformation of a function's AST. Subclasses must implement this method, and do not usually call it. Args: node: One or more ast.AST nodes representing the AST to be transformed. ctx: transformer.Context.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\transpiler.py",
    "ast_data": "FunctionDef name:transform_ast arg:self arg:node arg:ctx arguments arg arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "this_before_that_pass_constraint",
    "source_code": "def this_before_that_pass_constraint(this: Callable, that: Callable):\n\n    def depends_on(a: Callable, b: Callable):\n        return a != that or b != this\n    return depends_on",
    "docstring": "Defines a partial order ('depends on' function) where must occur before .",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\passes\\pass_manager.py",
    "ast_data": "FunctionDef name:this_before_that_pass_constraint arg:this arg:that arguments arg arg FunctionDef name:depends_on arg:a arg:b arguments arg arg Return return:yes BoolOp Compare Compare Return return:yes"
  },
  {
    "library": "authlib",
    "name": "check_client_secret",
    "source_code": "def check_client_secret(self, client_secret):\n    raise NotImplementedError()",
    "docstring": "Check client_secret matching with the client. For instance, in the client table, the column is called ``:: import secrets def check_client_secret(self, client_secret): return secrets.compare_digest(self.client_secret, client_secret) :param client_secret: A string of client secret :return: bool",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\models.py",
    "ast_data": "FunctionDef name:check_client_secret arg:self arg:client_secret arguments arg arg Raise Call"
  },
  {
    "library": "django",
    "name": "_generate_temp_name",
    "source_code": "def _generate_temp_name(self, for_name):\n    suffix = hex(hash(for_name)).upper()[1:]\n    return self.normalize_name(for_name + '_' + suffix)",
    "docstring": "Generate temporary names for workarounds that need temp columns.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\oracle\\schema.py",
    "ast_data": "FunctionDef name:_generate_temp_name arg:self arg:for_name arguments arg arg Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "open",
    "source_code": "def open(self, mode: str='a', **kwargs) -> None:\n    tables = _tables()\n    if self._mode != mode:\n        if self._mode in ['a', 'w'] and mode in ['r', 'r+']:\n            pass\n        elif mode in ['w']:\n            if self.is_open:\n                raise PossibleDataLossError(f'Re-opening the file [{self._path}] with mode [{self._mode}] will delete the current file!')\n        self._mode = mode\n    if self.is_open:\n        self.close()\n    if self._complevel and self._complevel > 0:\n        self._filters = _tables().Filters(self._complevel, self._complib, fletcher32=self._fletcher32)\n    if _table_file_open_policy_is_strict and self.is_open:\n        msg = 'Cannot open HDF5 file, which is already opened, even in read-only mode.'\n        raise ValueError(msg)\n    self._handle = tables.open_file(self._path, self._mode, **kwargs)",
    "docstring": "Open the file in the specified mode Parameters ---------- mode : {'a', 'w', 'r', 'r+'}, default 'a' See HDFStore docstring or tables.open_file for info about modes **kwargs These parameters will be passed to the PyTables open_file method.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:open arg:self arg:mode arguments arg arg arg Assign Call If Compare If BoolOp Compare Compare If Compare If Raise Call Assign If Call If BoolOp Compare Assign Call Call If BoolOp Assign Raise Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_name_scope",
    "source_code": "@contextlib.contextmanager\ndef _name_scope(self, name=None, values=None):\n    with ops.name_scope(self.name):\n        with ops.name_scope(name, values=(values or []) + self.graph_parents) as scope:\n            yield scope",
    "docstring": "Helper function to standardize op scope.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bijector_impl.py",
    "ast_data": "FunctionDef name:_name_scope arg:self arg:name arg:values arguments arg arg arg With Call With Call BoolOp"
  },
  {
    "library": "matplotlib",
    "name": "set_filternorm",
    "source_code": "def set_filternorm(self, filternorm):\n    self._filternorm = bool(filternorm)\n    self.stale = True",
    "docstring": "Set whether the resize filter normalizes the weights. See help for . Parameters ---------- filternorm : bool",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\image.py",
    "ast_data": "FunctionDef name:set_filternorm arg:self arg:filternorm arguments arg arg Assign Call Assign"
  },
  {
    "library": "pytorch",
    "name": "check_all_tuning_directions",
    "source_code": "def check_all_tuning_directions(self, func: Callable[['triton.Config'], float], best_config, best_timing):\n    candidate_values_list = []\n    effective_fields = []\n    for field in self.tunable_fields:\n        old_value = get_field(best_config, field)\n        if old_value is None:\n            continue\n        candidate_values = self.get_neighbour_values(field, old_value, radius=self.inductor_meta.get('coordinate_descent_search_radius', 1), include_self=True)\n        candidate_values_list.append(candidate_values)\n        effective_fields.append(field)\n    choices = itertools.product(*candidate_values_list)\n    improved = False\n    for choice in choices:\n        assert len(choice) == len(effective_fields)\n        candidate_config = copy.deepcopy(best_config)\n        for new_val, field in zip(choice, effective_fields):\n            set_field(candidate_config, field, new_val)\n        cmp_res, candidate_timing = self.compare_config(func, candidate_config, best_config, best_timing)\n        if cmp_res:\n            improved = True\n            best_config = candidate_config\n            best_timing = candidate_timing\n    return (improved, best_config, best_timing)",
    "docstring": "Check all directions. We only do this once the regular coordinate descent tuning find no better choices any more. We only have a few tunable fields, so this should be fine.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\coordinate_descent_tuner.py",
    "ast_data": "FunctionDef name:check_all_tuning_directions arg:self arg:func arg:best_config arg:best_timing arguments arg arg arg arg Assign Assign For Assign Call If Compare Assign Call Call Call Call Assign Call Assign For Compare Call Call Assign Call For Call Call Assign Call If Assign Assign Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_pad_bytes_new",
    "source_code": "def _pad_bytes_new(name: str | bytes, length: int) -> bytes:\n    if isinstance(name, str):\n        name = bytes(name, 'utf-8')\n    return name + b'\\x00' * (length - len(name))",
    "docstring": "Takes a bytes instance and pads it with null bytes until it's length chars.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\stata.py",
    "ast_data": "FunctionDef name:_pad_bytes_new arg:name arg:length arguments arg arg If Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_create_slots",
    "source_code": "def _create_slots(self, var_list):\n    pass",
    "docstring": "Create all slots needed by the variables. Args: var_list: A list of objects.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\optimizer.py",
    "ast_data": "FunctionDef name:_create_slots arg:self arg:var_list arguments arg arg"
  },
  {
    "library": "tensorflow",
    "name": "__tf_tracing_type__",
    "source_code": "@doc_controls.doc_private\n@abc.abstractmethod\ndef __tf_tracing_type__(self, context: TracingContext) -> TraceType:\n    pass",
    "docstring": "Returns the tracing type of this object. The tracing type is used to build the signature of a tf.function when traced, and to match arguments with existing signatures. When a Function object is called, tf.function looks at the tracing type of the call arguments. If an existing signature of matching type exists, it will be used. Otherwise, a new function is traced, and its signature will use the tracing type of the call arguments. Args: context: a context reserved for internal/future usage. Returns: The tracing type of this object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\types\\trace.py",
    "ast_data": "FunctionDef name:__tf_tracing_type__ arg:self arg:context arguments arg arg"
  },
  {
    "library": "pytorch",
    "name": "activation_is_statically_quantized",
    "source_code": "def activation_is_statically_quantized(qconfig):\n    return activation_dtype(qconfig) in [torch.quint8, torch.qint8, torch.qint32, torch.float16, torch.uint8, torch.int8, torch.int16, torch.int32, torch.float8_e5m2, torch.float8_e4m3fn] and (not activation_is_dynamically_quantized(qconfig))",
    "docstring": "Given a qconfig, decide if the activation needs to be quantized or not, this includes quantizing to quint8, qint8 and qint32 and float16",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\utils.py",
    "ast_data": "FunctionDef name:activation_is_statically_quantized arg:qconfig arguments arg Return return:yes BoolOp Compare Call Call"
  },
  {
    "library": "django",
    "name": "encode",
    "source_code": "def encode(self, password, salt):\n    raise NotImplementedError('subclasses of BasePasswordHasher must provide an encode() method')",
    "docstring": "Create an encoded database value. The result is normally formatted as \"algorithm$salt$hash\" and must be fewer than 128 characters.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\hashers.py",
    "ast_data": "FunctionDef name:encode arg:self arg:password arg:salt arguments arg arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "custom_module_supports_equalization",
    "source_code": "def custom_module_supports_equalization(module) -> bool:\n    return type(module) in CUSTOM_MODULE_SUPP_LIST",
    "docstring": "Checks if the custom node supports equalization.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_equalize.py",
    "ast_data": "FunctionDef name:custom_module_supports_equalization arg:module arguments arg Return return:yes Compare Call"
  },
  {
    "library": "django",
    "name": "media",
    "source_code": "def media(request):\n    return {'MEDIA_URL': settings.MEDIA_URL}",
    "docstring": "Add media-related context variables to the context.",
    "type": "function",
    "file_path": "django\\django\\template\\context_processors.py",
    "ast_data": "FunctionDef name:media arg:request arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "set_preinitialized_function_spec",
    "source_code": "def set_preinitialized_function_spec(concrete_fn, spec):\n    if spec is None:\n        concrete_fn._function_type = None\n        return\n    unconstrained_type = function_type_lib.FunctionType([function_type_lib.Parameter(p.name, p.kind, p.optional, None) for p in spec.function_type.parameters.values()])\n    arg_specs, kwarg_specs = concrete_fn.structured_input_signature\n    input_function_type, _ = function_type_lib.canonicalize_to_monomorphic(arg_specs, {function_type_lib.sanitize_arg_name(k): v for k, v in kwarg_specs.items()}, spec.default_values, {}, unconstrained_type)\n    output_type = trace_type.from_value(concrete_fn.graph.structured_outputs)\n    function_type = function_type_lib.FunctionType(input_function_type.parameters.values(), return_annotation=output_type)\n    concrete_fn._function_type = function_type",
    "docstring": "Set the FunctionType of the ConcreteFunction using FunctionSpec.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\function_deserialization.py",
    "ast_data": "FunctionDef name:set_preinitialized_function_spec arg:concrete_fn arg:spec arguments arg arg If Compare Assign Return return:no Assign Call Call Call Assign Assign Call Call Call Assign Call Assign Call Call Assign"
  },
  {
    "library": "cryptography",
    "name": "issuer_name",
    "source_code": "def issuer_name(self, name: Name) -> CertificateBuilder:\n    if not isinstance(name, Name):\n        raise TypeError('Expecting x509.Name object.')\n    if self._issuer_name is not None:\n        raise ValueError('The issuer name may only be set once.')\n    return CertificateBuilder(name, self._subject_name, self._public_key, self._serial_number, self._not_valid_before, self._not_valid_after, self._extensions)",
    "docstring": "Sets the CA's distinguished name.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\x509\\base.py",
    "ast_data": "FunctionDef name:issuer_name arg:self arg:name arguments arg arg If Call Raise Call If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "integers",
    "source_code": "def integers(self, l_bounds: 'npt.ArrayLike', *, u_bounds: 'npt.ArrayLike | None'=None, n: IntNumber=1, endpoint: bool=False, workers: IntNumber=1) -> np.ndarray:\n    if u_bounds is None:\n        u_bounds = l_bounds\n        l_bounds = 0\n    u_bounds = np.atleast_1d(u_bounds)\n    l_bounds = np.atleast_1d(l_bounds)\n    if endpoint:\n        u_bounds = u_bounds + 1\n    if not np.issubdtype(l_bounds.dtype, np.integer) or not np.issubdtype(u_bounds.dtype, np.integer):\n        message = \"'u_bounds' and 'l_bounds' must be integers or array-like of integers\"\n        raise ValueError(message)\n    if isinstance(self, Halton):\n        sample = self.random(n=n, workers=workers)\n    else:\n        sample = self.random(n=n)\n    sample = scale(sample, l_bounds=l_bounds, u_bounds=u_bounds)\n    sample = np.floor(sample).astype(np.int64)\n    return sample",
    "docstring": "Draw integers from (inclusive) to (exclusive), or if endpoint=True, (inclusive) to (inclusive). Parameters ---------- l_bounds : int or array-like of ints Lowest (signed) integers to be drawn (unless `u_boundsHalton[0, 1)[a, b), b>aab` the upper bounds, the following transformation is used: .. math:: \\text{floor}((b - a) \\cdot \\text{sample} + a)",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_qmc.py",
    "ast_data": "FunctionDef name:integers arg:self arg:l_bounds arguments arg arg arg arg arg arg If Compare Assign Assign Assign Call Assign Call If Assign If BoolOp Call Call Assign Raise Call If Call Assign Call Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, boxout, **kwargs):\n    _api.check_isinstance(BboxBase, boxout=boxout)\n    super().__init__(**kwargs)\n    self._boxout = boxout\n    self.set_children(boxout)\n    self._mtx = None\n    self._inverted = None",
    "docstring": "Create a new that linearly transforms points from the unit bounding box to *boxout*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:boxout arguments arg arg arg Call Call Call Assign Call Assign Assign"
  },
  {
    "library": "numpy",
    "name": "MAError",
    "source_code": "class MAError(Exception):\n    pass",
    "docstring": "Class for masked array related errors.",
    "type": "class",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "ClassDef name:MAError"
  },
  {
    "library": "tensorflow",
    "name": "_num_buckets",
    "source_code": "@property\ndef _num_buckets(self):\n    return self.num_buckets",
    "docstring": "Returns number of buckets in this sparse feature.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py",
    "ast_data": "FunctionDef name:_num_buckets arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "count_neighbors",
    "source_code": "def count_neighbors(self, other, r, p=2.0, weights=None, cumulative=True):\n    return super().count_neighbors(other, r, p, weights, cumulative)",
    "docstring": "Count how many nearby pairs can be formed. Count the number of pairs `cumulative=Falsequery_ball_tree`: >>> indexes = kd_tree1.query_ball_tree(kd_tree2, r=0.2) >>> sum([len(i) for i in indexes]) 1",
    "type": "method",
    "file_path": "scipy\\scipy\\spatial\\_kdtree.py",
    "ast_data": "FunctionDef name:count_neighbors arg:self arg:other arg:r arg:p arg:weights arg:cumulative arguments arg arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_n_features_out",
    "source_code": "@property\ndef _n_features_out(self):\n    return self.n_components_",
    "docstring": "Number of transformed output features.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_dict_learning.py",
    "ast_data": "FunctionDef name:_n_features_out arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "bucketize",
    "source_code": "def bucketize(self, values: CSEVariable, boundaries: tuple[str, sympy.Expr, sympy.Expr, sympy.Expr], boundary_indices: CSEVariable, indexing_dtype: torch.dtype, right: bool, sorter: Optional[tuple[str, sympy.Expr]]=None, sorter_indices: Optional[CSEVariable]=None) -> CSEVariable:\n    self.autotune_hints.add(AutotuneHint.ONE_ELEMENT_PER_THREAD)\n    boundaries_ptr = self.args.input(boundaries[0])\n    boundary_size = self.index_to_str(boundaries[1])\n    boundaries_underlying_numel = self.index_to_str(boundaries[2])\n    boundary_stride = self.index_to_str(boundaries[3])\n    sorter_ptr = self.args.input(sorter[0]) if sorter else 'None'\n    sorter_stride = self.index_to_str(sorter[1]) if sorter else 'None'\n    if indexing_dtype == torch.int32:\n        triton_dtype = 'tl.int32'\n    elif indexing_dtype == torch.int64:\n        triton_dtype = 'tl.int64'\n    else:\n        raise NotImplementedError('Bucketize only supports indexing with int32 and int64')\n    result = self.cse.generate(self.compute, f'triton_helpers.bucketize_binary_search({values}, {boundaries_ptr}, {boundary_size}, {boundaries_underlying_numel}, {boundary_stride}, {boundary_indices}, {triton_dtype}, {right}, {sorter_ptr}, {sorter_stride}, {sorter_indices}, )', dtype=indexing_dtype)\n    return result",
    "docstring": "See [Note: Inductor bucketize op]",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py",
    "ast_data": "FunctionDef name:bucketize arg:self arg:values arg:boundaries arg:boundary_indices arg:indexing_dtype arg:right arg:sorter arg:sorter_indices arguments arg arg arg arg arg arg arg arg Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call If Compare Assign If Compare Assign Raise Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "antiderivative",
    "source_code": "def antiderivative(self, nu=1):\n    c = self.c.copy()\n    ct = len(self.t) - len(c)\n    if ct > 0:\n        c = np.r_[c, np.zeros((ct,) + c.shape[1:])]\n    tck = _fitpack_impl.splantider((self.t, c, self.k), nu)\n    if self.extrapolate == 'periodic':\n        extrapolate = False\n    else:\n        extrapolate = self.extrapolate\n    return self.construct_fast(*tck, extrapolate=extrapolate, axis=self.axis)",
    "docstring": "Return a B-spline representing the antiderivative. Parameters ---------- nu : int, optional Antiderivative order. Default is 1. Returns ------- b : object A new instance representing the antiderivative. Notes ----- If antiderivative is computed and ``, it will be set to False for the returned instance. This is done because the antiderivative is no longer periodic and its correct evaluation outside of the initially given x interval is difficult. See Also -------- splder, splantider",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_bsplines.py",
    "ast_data": "FunctionDef name:antiderivative arg:self arg:nu arguments arg arg Assign Call Assign Call Call If Compare Assign Call Assign Call If Compare Assign Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "VersionAwareLayers",
    "source_code": "class VersionAwareLayers(object):\n\n    def __getattr__(self, name):\n        serialization.populate_deserializable_objects()\n        if name in serialization.LOCAL.ALL_OBJECTS:\n            return serialization.LOCAL.ALL_OBJECTS[name]\n        return super(VersionAwareLayers, self).__getattr__(name)",
    "docstring": "Utility to be used internally to access layers in a V1/V2-aware fashion. When using layers within the Keras codebase, under the constraint that e.g. should be the version corresponding to the current runtime (TF1 or TF2), do not simply access since it would ignore e.g. an early call. Instead, use an instance of (which you can use just like the module).",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\__init__.py",
    "ast_data": "ClassDef name:VersionAwareLayers FunctionDef name:__getattr__ arg:self arg:name arguments arg arg Call If Compare Return return:yes Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "HandlerNpoints",
    "source_code": "class HandlerNpoints(HandlerBase):\n\n    def __init__(self, marker_pad=0.3, numpoints=None, **kwargs):\n        super().__init__(**kwargs)\n        self._numpoints = numpoints\n        self._marker_pad = marker_pad\n\n    def get_numpoints(self, legend):\n        if self._numpoints is None:\n            return legend.numpoints\n        else:\n            return self._numpoints\n\n    def get_xdata(self, legend, xdescent, ydescent, width, height, fontsize):\n        numpoints = self.get_numpoints(legend)\n        if numpoints > 1:\n            pad = self._marker_pad * fontsize\n            xdata = np.linspace(-xdescent + pad, -xdescent + width - pad, numpoints)\n            xdata_marker = xdata\n        else:\n            xdata = [-xdescent, -xdescent + width]\n            xdata_marker = [-xdescent + 0.5 * width]\n        return (xdata, xdata_marker)",
    "docstring": "A legend handler that shows *numpoints* points in the legend entry.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\legend_handler.py",
    "ast_data": "ClassDef name:HandlerNpoints FunctionDef name:__init__ arg:self arg:marker_pad arg:numpoints arguments arg arg arg arg Call Call Assign Assign FunctionDef name:get_numpoints arg:self arg:legend arguments arg arg If Compare Return return:yes Return return:yes FunctionDef name:get_xdata arg:self arg:legend arg:xdescent arg:ydescent arg:width arg:height arg:fontsize arguments arg arg arg arg arg arg arg Assign Call If Compare Assign Assign Call Assign Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "elapsed_time",
    "source_code": "def elapsed_time(self, end_event):\n    return super().elapsed_time(end_event)",
    "docstring": "Return the time elapsed. Time reported in milliseconds after the event was recorded and before the end_event was recorded.",
    "type": "method",
    "file_path": "pytorch\\torch\\cuda\\streams.py",
    "ast_data": "FunctionDef name:elapsed_time arg:self arg:end_event arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_save_model_and_copy_assets",
    "source_code": "def _save_model_and_copy_assets(exported_model: exported_model_pb2.ExportedModel, src_saved_model_path: str, dst_saved_model_path: str, signature_def_map: Mapping[str, meta_graph_pb2.SignatureDef], tags: Collection[str]) -> bool:\n    save_model.save_model_v1(exported_model.graph_def, dst_saved_model_path, signature_def_map, tags, init_op_name=exported_model.init_node_name, saver_def=_get_saver_def_or_none(exported_model), checkpoint_dir=exported_model.checkpoint_dir, function_aliases=exported_model.function_aliases, asset_file_defs=exported_model.asset_file_defs)\n    _copy_assets(src_saved_model_path, dst_saved_model_path)\n    return True",
    "docstring": "Saves the model and copies the assets from the source model. Args: exported_model: ExportedModel to save. src_saved_model_path: Path to the source SavedModel. This will be used to copy the asset files to . dst_saved_model_path: Destination path to save the exported model. signature_def_map: Signature key -> SignatureDef mapping. tags: Tags to attach to the saved MetaGraphDef. Returns: upon successfully saving the model.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\py_function_lib.py",
    "ast_data": "FunctionDef name:_save_model_and_copy_assets arg:exported_model arg:src_saved_model_path arg:dst_saved_model_path arg:signature_def_map arg:tags arguments arg arg arg arg arg Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_hatch_linewidth",
    "source_code": "def get_hatch_linewidth(self):\n    return self._hatch_linewidth",
    "docstring": "Return the hatch linewidth.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_hatch_linewidth arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "devices",
    "source_code": "def devices(self) -> list[Device]:\n    return ['cpu']",
    "docstring": "The devices supported by NumPy. For NumPy, this always returns ``. Returns ------- devices : list[Device] The devices supported by NumPy. See Also -------- __array_namespace_info__.capabilities, __array_namespace_info__.default_device, __array_namespace_info__.default_dtypes, __array_namespace_info__.dtypes Examples -------- >>> info = np.__array_namespace_info__() >>> info.devices() ['cpu']",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\numpy\\_info.py",
    "ast_data": "FunctionDef name:devices arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "dog_response",
    "source_code": "def dog_response(input: Tensor) -> Tensor:\n    KORNIA_CHECK_SHAPE(input, ['B', 'C', 'L', 'H', 'W'])\n    return input[:, :, 1:] - input[:, :, :-1]",
    "docstring": "Compute the Difference-of-Gaussian response. Args: input: a given the gaussian 5d tensor :math:. Return: the response map per channel with shape :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\responses.py",
    "ast_data": "FunctionDef name:dog_response arg:input arguments arg Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_num_slices_in_dimension",
    "source_code": "def _num_slices_in_dimension(self, axis):\n    if not isinstance(axis, int):\n        raise TypeError('axis must be an integer')\n    if axis < 0:\n        rank = self.rank\n        if rank is None:\n            raise ValueError(\"You can't use negative values if the rank is undefined\")\n        axis = axis + rank\n    if axis == 0:\n        return self._dimension(0)\n    if axis <= self.num_row_partitions:\n        return self.row_partitions[axis - 1].nvals()\n    remainder = axis - (self.num_row_partitions - 1)\n    return _reduce_prod_patch(self.inner_shape[:remainder])",
    "docstring": "The total size of a dimension (like nvals). Effectively, this is self[:axis+1]._num_elements() Example: shape = DynamicRaggedShape._from_inner_shape([2, 3, 4]) shape._num_slices_in_dimension(0) = 2 shape._num_slices_in_dimension(1) = 6 shape._num_slices_in_dimension(2) = 24 shape._num_slices_in_dimension(-1) = 24 shape._num_slices_in_dimension(-2) = 6 shape._num_slices_in_dimension(-2) = 2 Args: axis: the last axis to include in the number of elements. If negative, then axis = axis + rank. Returns: The number of elements in the shape.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:_num_slices_in_dimension arg:self arg:axis arguments arg arg If Call Raise Call If Compare Assign If Compare Raise Call Assign If Compare Return return:yes Call If Compare Return return:yes Call Assign Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "tanm",
    "source_code": "@_apply_over_batch(('A', 2))\ndef tanm(A):\n    A = _asarray_square(A)\n    return _maybe_real(A, solve(cosm(A), sinm(A)))",
    "docstring": "Compute the matrix tangent. This routine uses expm to compute the matrix exponentials. Parameters ---------- A : (N, N) array_like Input array. Returns ------- tanm : (N, N) ndarray Matrix tangent of Examples -------- >>> import numpy as np >>> from scipy.linalg import tanm, sinm, cosm >>> a = np.array([[1.0, 3.0], [1.0, 4.0]]) >>> t = tanm(a) >>> t array([[ -2.00876993, -8.41880636], [ -2.80626879, -10.42757629]]) Verify tanm(a) = sinm(a).dot(inv(cosm(a))) >>> s = sinm(a) >>> c = cosm(a) >>> s.dot(np.linalg.inv(c)) array([[ -2.00876993, -8.41880636], [ -2.80626879, -10.42757629]])",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_matfuncs.py",
    "ast_data": "FunctionDef name:tanm arg:A arguments arg Assign Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_fuser_method_in_reversed_nested_tuple_format",
    "source_code": "def _get_fuser_method_in_reversed_nested_tuple_format(config: BackendPatternConfig) -> Callable:\n    assert config.fuser_method is not None\n    if config._pattern_complex_format is not None:\n        return config.fuser_method\n    if not isinstance(config.pattern, tuple):\n        raise ValueError('Expected pattern to be a tuple, got: ', config.pattern)\n    if len(config.pattern) == 2:\n        return _reverse2(config.fuser_method)\n    elif len(config.pattern) == 3:\n        return _reverse3(config.fuser_method)\n    else:\n        raise ValueError('Expected a tuple with 2 or 3 elements, got: ', config.pattern)",
    "docstring": "Return the fuser method specified in the given config in the reversed nested tuple format used internally in the quantization pattern matching code. If pattern is specified in the reversed nested tuple format, we assume the fuser method is also specified in this format and simply return it as is. Otherwise, we convert the fuser method as follows: * Given f(is_qat, conv, relu), return f'(is_qat, relu, conv) * Given f(is_qat, conv, bn, relu), return f'(is_qat, relu, bn_conv), where bn_conv is a 2-tuple (bn, conv) The first argument of a fuser method is always and is not affected in the conversion. We currently only support functions with 3 or 4 arguments.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\utils.py",
    "ast_data": "FunctionDef name:_get_fuser_method_in_reversed_nested_tuple_format arg:config arguments arg Compare If Compare Return return:yes If Call Raise Call If Compare Call Return return:yes Call If Compare Call Return return:yes Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_terminate_process_handler",
    "source_code": "def _terminate_process_handler(signum: int, frame: Optional[FrameType]) -> None:\n    sigval = signal.Signals(signum)\n    raise SignalException(f'Process {os.getpid()} got signal: {sigval}', sigval=sigval)",
    "docstring": "Termination handler that raises exceptions on the main process. When the process receives death signal(SIGTERM, SIGINT), this termination handler will be invoked. It raises the `` exception that should be processed by the user code. Python does not terminate process after the termination handler is finished, so the exception should not be silently ignored, otherwise the process will never be terminated.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\elastic\\multiprocessing\\api.py",
    "ast_data": "FunctionDef name:_terminate_process_handler arg:signum arg:frame arguments arg arg Assign Call Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "inverse_min_event_ndims",
    "source_code": "@property\ndef inverse_min_event_ndims(self):\n    return self._inverse_min_event_ndims",
    "docstring": "Returns the minimal number of dimensions bijector.inverse operates on.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bijector_impl.py",
    "ast_data": "FunctionDef name:inverse_min_event_ndims arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_basefile",
    "source_code": "@classmethod\ndef get_basefile(cls, tex, fontsize, dpi=None):\n    src = cls._get_tex_source(tex, fontsize) + str(dpi)\n    filehash = hashlib.sha256(src.encode('utf-8'), usedforsecurity=False).hexdigest()\n    filepath = Path(cls._texcache)\n    num_letters, num_levels = (2, 2)\n    for i in range(0, num_letters * num_levels, num_letters):\n        filepath = filepath / Path(filehash[i:i + 2])\n    filepath.mkdir(parents=True, exist_ok=True)\n    return os.path.join(filepath, filehash)",
    "docstring": "Return a filename based on a hash of the string, fontsize, and dpi.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\texmanager.py",
    "ast_data": "FunctionDef name:get_basefile arg:cls arg:tex arg:fontsize arg:dpi arguments arg arg arg arg Assign Call Call Assign Call Call Call Assign Call Assign For Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "import_proj",
    "source_code": "def import_proj(self, proj):\n    capi.from_proj(self.ptr, proj)",
    "docstring": "Import the Spatial Reference from a PROJ string.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\srs.py",
    "ast_data": "FunctionDef name:import_proj arg:self arg:proj arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_fall_back_unconverted",
    "source_code": "def _fall_back_unconverted(f, args, kwargs, options, exc):\n    warning_template = 'AutoGraph could not transform %s and will run it as-is.\\n%sCause: %s\\nTo silence this warning, decorate the function with @tf.autograph.experimental.do_not_convert'\n    if isinstance(exc, errors.InaccessibleSourceCodeError):\n        if ag_ctx.INSPECT_SOURCE_SUPPORTED:\n            logging.warning(warning_template, f, '', exc)\n    elif isinstance(exc, errors.UnsupportedLanguageElementError):\n        if not conversion.is_in_allowlist_cache(f, options):\n            logging.warning(warning_template, f, '', exc)\n    else:\n        file_bug_message = 'Please report this to the TensorFlow team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output.\\n'\n        logging.warning(warning_template, f, file_bug_message, exc)\n    return _call_unconverted(f, args, kwargs, options)",
    "docstring": "Falls back to calling the function unconverted, in case of error.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\impl\\api.py",
    "ast_data": "FunctionDef name:_fall_back_unconverted arg:f arg:args arg:kwargs arg:options arg:exc arguments arg arg arg arg arg Assign If Call If Call If Call If Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "limit_offset_sql",
    "source_code": "def limit_offset_sql(self, low_mark, high_mark):\n    limit, offset = self._get_limit_offset_params(low_mark, high_mark)\n    return ' '.join((sql for sql in ('LIMIT %d' % limit if limit else None, 'OFFSET %d' % offset if offset else None) if sql))",
    "docstring": "Return LIMIT/OFFSET SQL clause.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:limit_offset_sql arg:self arg:low_mark arg:high_mark arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "DropItem",
    "source_code": "class DropItem(Exception):\n\n    def __init__(self, message: str, log_level: str | None=None):\n        super().__init__(message)\n        self.log_level = log_level",
    "docstring": "Drop item from the item pipeline",
    "type": "class",
    "file_path": "scrapy\\scrapy\\exceptions.py",
    "ast_data": "ClassDef name:DropItem FunctionDef name:__init__ arg:self arg:message arg:log_level arguments arg arg arg Call Call Assign"
  },
  {
    "library": "django",
    "name": "BaseUpdateView",
    "source_code": "class BaseUpdateView(ModelFormMixin, ProcessFormView):\n\n    def get(self, request, *args, **kwargs):\n        self.object = self.get_object()\n        return super().get(request, *args, **kwargs)\n\n    def post(self, request, *args, **kwargs):\n        self.object = self.get_object()\n        return super().post(request, *args, **kwargs)",
    "docstring": "Base view for updating an existing object. This requires subclassing to provide a response mixin.",
    "type": "class",
    "file_path": "django\\django\\views\\generic\\edit.py",
    "ast_data": "ClassDef name:BaseUpdateView FunctionDef name:get arg:self arg:request arguments arg arg arg arg Assign Call Return return:yes Call Call FunctionDef name:post arg:self arg:request arguments arg arg arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "contains",
    "source_code": "def contains(self, other):\n    found = []\n\n    def visit(expr, found=found):\n        if found:\n            return expr\n        elif expr == other:\n            found.append(1)\n            return expr\n    self.traverse(visit)\n    return len(found) != 0",
    "docstring": "Check if self contains other.",
    "type": "method",
    "file_path": "numpy\\numpy\\f2py\\symbolic.py",
    "ast_data": "FunctionDef name:contains arg:self arg:other arguments arg arg Assign FunctionDef name:visit arg:expr arg:found arguments arg arg If Return return:yes If Compare Call Return return:yes Call Return return:yes Compare Call"
  },
  {
    "library": "pytorch",
    "name": "prepare_datasets",
    "source_code": "def prepare_datasets(self, df, other_datasets, cat_feature2cats, ranking=False):\n    test_size, val_size = self.get_test_and_val_size()\n    df_train_val, df_test = train_test_split(df, test_size=test_size, random_state=42)\n    train_val_size = 1 - test_size\n    df_train, df_val = train_test_split(df_train_val, test_size=val_size / train_val_size, random_state=42)\n    datasets = {'train': df_train, 'val': df_val, 'test': df_test}\n    self.add_real_datasets(datasets, other_datasets, cat_feature2cats, ranking)\n    return datasets",
    "docstring": "Splits the dataframe into train, val, and test sets. Also adds other datasets, specified by the user, to the train set.",
    "type": "method",
    "file_path": "pytorch\\torchgen\\_autoheuristic\\train_decision.py",
    "ast_data": "FunctionDef name:prepare_datasets arg:self arg:df arg:other_datasets arg:cat_feature2cats arg:ranking arguments arg arg arg arg arg Assign Call Assign Call Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_state",
    "source_code": "def get_state(self) -> ParserState:\n    return self._state_stack[-1]",
    "docstring": "Get the current of the parser.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_mathtext.py",
    "ast_data": "FunctionDef name:get_state arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "subplot2grid",
    "source_code": "def subplot2grid(shape: tuple[int, int], loc: tuple[int, int], rowspan: int=1, colspan: int=1, fig: Figure | None=None, **kwargs) -> matplotlib.axes.Axes:\n    if fig is None:\n        fig = gcf()\n    rows, cols = shape\n    gs = GridSpec._check_gridspec_exists(fig, rows, cols)\n    subplotspec = gs.new_subplotspec(loc, rowspan=rowspan, colspan=colspan)\n    return fig.add_subplot(subplotspec, **kwargs)",
    "docstring": "Create a subplot at a specific location inside a regular grid. Parameters ---------- shape : (int, int) Number of rows and of columns of the grid in which to place axis. loc : (int, int) Row number and column number of the axis location within the grid. rowspan : int, default: 1 Number of rows for the axis to span downwards. colspan : int, default: 1 Number of columns for the axis to span to the right. fig : , optional Figure to place the subplot in. Defaults to the current figure. **kwargs Additional keyword arguments are handed to . Returns ------- The Axes of the subplot. The returned Axes can actually be an instance of a subclass, such as for polar projections. Notes ----- The following call :: ax = subplot2grid((nrows, ncols), (row, col), rowspan, colspan) is identical to :: fig = gcf() gs = fig.add_gridspec(nrows, ncols) ax = fig.add_subplot(gs[row:row+rowspan, col:col+colspan])",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:subplot2grid arg:shape arg:loc arg:rowspan arg:colspan arg:fig arguments arg arg arg arg arg arg If Compare Assign Call Assign Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "RandomNormal",
    "source_code": "@tf_export('random_normal_initializer', v1=[])\nclass RandomNormal(Initializer):\n\n    def __init__(self, mean=0.0, stddev=0.05, seed=None):\n        self.mean = mean\n        self.stddev = stddev\n        self.seed = seed\n        self._random_generator = _RandomGenerator(seed)\n\n    def __call__(self, shape, dtype=dtypes.float32, **kwargs):\n        self._validate_kwargs(kwargs)\n        dtype = _assert_float_dtype(dtype)\n        if _PARTITION_SHAPE in kwargs:\n            shape = kwargs[_PARTITION_SHAPE]\n        return self._random_generator.random_normal(shape, self.mean, self.stddev, dtype)\n\n    def get_config(self):\n        return {'mean': self.mean, 'stddev': self.stddev, 'seed': self.seed}",
    "docstring": "Initializer that generates tensors with a normal distribution. Initializers allow you to pre-specify an initialization strategy, encoded in the Initializer object, without knowing the shape and dtype of the variable being initialized. Examples: >>> def make_variables(k, initializer): ... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)), ... tf.Variable(initializer(shape=[k, k], dtype=tf.float32))) >>> v1, v2 = make_variables(3, ... tf.random_normal_initializer(mean=1., stddev=2.)) >>> v1 >>> v2 >> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.)) (, <tf.Variable...shape=(4, 4) ... Args: mean: a python scalar or a scalar tensor. Mean of the random values to generate. stddev: a python scalar or a scalar tensor. Standard deviation of the random values to generate. seed: A Python integer. Used to create random seeds. See for behavior.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops_v2.py",
    "ast_data": "ClassDef name:RandomNormal FunctionDef name:__init__ arg:self arg:mean arg:stddev arg:seed arguments arg arg arg arg Assign Assign Assign Assign Call FunctionDef name:__call__ arg:self arg:shape arg:dtype arguments arg arg arg arg Call Assign Call If Compare Assign Return return:yes Call FunctionDef name:get_config arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_child",
    "source_code": "def get_child(self):\n    return self._child",
    "docstring": "Return the child.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py",
    "ast_data": "FunctionDef name:get_child arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "range_input_producer",
    "source_code": "@tf_export(v1=['train.range_input_producer'])\n@deprecation.deprecated(None, 'Queue-based input pipelines have been replaced by `tf.data`. Use `tf.data.Dataset.range(limit).shuffle(limit).repeat(num_epochs)`. If `shuffle=False`, omit the `.shuffle(...)`.')\ndef range_input_producer(limit, num_epochs=None, shuffle=True, seed=None, capacity=32, shared_name=None, name=None):\n    with ops.name_scope(name, 'input_producer', [limit]) as name:\n        range_tensor = math_ops.range(limit)\n        return input_producer(range_tensor, [], num_epochs, shuffle, seed, capacity, shared_name, 'fraction_of_%d_full' % capacity, name)",
    "docstring": "Produces the integers from 0 to limit-1 in a queue. Note: if is not , this function creates local counter . Use to initialize local variables. Args: limit: An int32 scalar tensor. num_epochs: An integer (optional). If specified, produces each integer times before generating an OutOfRange error. If not specified, can cycle through the integers an unlimited number of times. shuffle: Boolean. If true, the integers are randomly shuffled within each epoch. seed: An integer (optional). Seed used if shuffle == True. capacity: An integer. Sets the queue capacity. shared_name: (optional). If set, this queue will be shared under the given name across multiple sessions. name: A name for the operations (optional). Returns: A Queue with the output integers. A for the Queue is added to the current 's collection. @compatibility(eager) Input pipelines based on Queues are not supported when eager execution is enabled. Please use the API to ingest data under eager execution. @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\input.py",
    "ast_data": "FunctionDef name:range_input_producer arg:limit arg:num_epochs arg:shuffle arg:seed arg:capacity arg:shared_name arg:name arguments arg arg arg arg arg arg arg With Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_stack_cache_key",
    "source_code": "def _stack_cache_key(pfor_input: _PforInput):\n    op_type = pfor_input.op_type\n    assert op_type in ['StackPushV2', 'StackPopV2'], op_type\n    orig_handle = pfor_input.op.inputs[0]\n    while orig_handle.op.type in ['Identity', 'Enter']:\n        orig_handle = orig_handle.op.inputs[0]\n    assert orig_handle.op.type == 'StackV2', orig_handle.op\n    return (ops.get_default_graph(), pfor_input.pfor, orig_handle)",
    "docstring": "Create cache key corresponding to a stack handle.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "FunctionDef name:_stack_cache_key arg:pfor_input arguments arg Assign Compare Assign While Compare Assign Compare Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "build_request",
    "source_code": "def build_request(url, headers, body, initial_request: Request) -> Request:\n    updated_request = Request(method=initial_request.method, url=url, headers=headers, content=body)\n    if hasattr(initial_request, 'extensions'):\n        updated_request.extensions = initial_request.extensions\n    return updated_request",
    "docstring": "Make sure that all the data from initial request is passed to the updated object.",
    "type": "function",
    "file_path": "authlib\\authlib\\integrations\\httpx_client\\utils.py",
    "ast_data": "FunctionDef name:build_request arg:url arg:headers arg:body arg:initial_request arguments arg arg arg arg Assign Call If Call Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "lagweight",
    "source_code": "def lagweight(x):\n    w = np.exp(-x)\n    return w",
    "docstring": "Weight function of the Laguerre polynomials. The weight function is :math: and the interval of integration is :math:. The Laguerre polynomials are orthogonal, but not normalized, with respect to this weight function. Parameters ---------- x : array_like Values at which the weight function will be computed. Returns ------- w : ndarray The weight function at . Examples -------- >>> from numpy.polynomial.laguerre import lagweight >>> x = np.array([0, 1, 2]) >>> lagweight(x) array([1. , 0.36787944, 0.13533528])",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\laguerre.py",
    "ast_data": "FunctionDef name:lagweight arg:x arguments arg Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "filter_matches",
    "source_code": "def filter_matches(scores: Tensor, th: float) -> Tuple[Tensor, Tensor, Tensor, Tensor]:\n    max0, max1 = (scores[:, :-1, :-1].max(2), scores[:, :-1, :-1].max(1))\n    m0, m1 = (max0.indices, max1.indices)\n    indices0 = arange(m0.shape[1], device=m0.device)[None]\n    indices1 = arange(m1.shape[1], device=m1.device)[None]\n    mutual0 = indices0 == m1.gather(1, m0)\n    mutual1 = indices1 == m0.gather(1, m1)\n    max0_exp = max0.values.exp()\n    zero = max0_exp.new_tensor(0)\n    mscores0 = where(mutual0, max0_exp, zero)\n    mscores1 = where(mutual1, mscores0.gather(1, m1), zero)\n    valid0 = mutual0 & (mscores0 > th)\n    valid1 = mutual1 & valid0.gather(1, m1)\n    m0 = where(valid0, m0, -1)\n    m1 = where(valid1, m1, -1)\n    return (m0, m1, mscores0, mscores1)",
    "docstring": "Obtain matches from a log assignment matrix [Bx M+1 x N+1].",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\lightglue.py",
    "ast_data": "FunctionDef name:filter_matches arg:scores arg:th arguments arg arg Assign Call Call Assign Assign Call Assign Call Assign Compare Call Assign Compare Call Assign Call Assign Call Assign Call Assign Call Call Assign Compare Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "f2",
    "source_code": "def f2(x, h, k):\n    return 1.0 / h * sc.log1p(-h * np.exp(-x))",
    "docstring": "cdf = (1.0 - h*np.exp(-x))**(1.0/h) logcdf = ...",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_continuous_distns.py",
    "ast_data": "FunctionDef name:f2 arg:x arg:h arg:k arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "_fromnxfunction",
    "source_code": "class _fromnxfunction:\n\n    def __init__(self, funcname):\n        self.__name__ = funcname\n        self.__qualname__ = funcname\n        self.__doc__ = self.getdoc()\n\n    def getdoc(self):\n        npfunc = getattr(np, self.__name__, None)\n        doc = getattr(npfunc, '__doc__', None)\n        if doc:\n            sig = ma.get_object_signature(npfunc)\n            doc = ma.doc_note(doc, 'The function is applied to both the _data and the _mask, if any.')\n            if sig:\n                sig = self.__name__ + sig + '\\n\\n'\n            return sig + doc\n        return\n\n    def __call__(self, *args, **params):\n        pass",
    "docstring": "Defines a wrapper to adapt NumPy functions to masked arrays. An instance of can be called with the same parameters as the wrapped NumPy function. The docstring of is adapted from the wrapped function as well, see . This class should not be used directly. Instead, one of its extensions that provides support for a specific type of input should be used. Parameters ---------- funcname : str The name of the function to be adapted. The function should be in the NumPy namespace (i.e. ``).",
    "type": "class",
    "file_path": "numpy\\numpy\\ma\\extras.py",
    "ast_data": "ClassDef name:_fromnxfunction FunctionDef name:__init__ arg:self arg:funcname arguments arg arg Assign Assign Assign Call FunctionDef name:getdoc arg:self arguments arg Assign Call Assign Call If Assign Call Assign Call If Assign Return return:yes Return return:no FunctionDef name:__call__ arg:self arguments arg arg arg"
  },
  {
    "library": "pytorch",
    "name": "UninitializedParameter",
    "source_code": "class UninitializedParameter(UninitializedTensorMixin, Parameter):\n    cls_to_become = Parameter\n\n    def __new__(cls, requires_grad=True, device=None, dtype=None) -> None:\n        factory_kwargs = {'device': device, 'dtype': dtype}\n        data = torch.empty(0, **factory_kwargs)\n        return torch.Tensor._make_subclass(cls, data, requires_grad)\n\n    def __deepcopy__(self, memo):\n        if id(self) in memo:\n            return memo[id(self)]\n        else:\n            result = type(self)(self.requires_grad, self.data.device, self.data.dtype)\n            memo[id(self)] = result\n            return result",
    "docstring": "A parameter that is not initialized. Uninitialized Parameters are a special case of :class: where the shape of the data is still unknown. Unlike a :class:, uninitialized parameters hold no data and attempting to access some properties, like their shape, will throw a runtime error. The only operations that can be performed on a uninitialized parameter are changing its datatype, moving it to a different device and converting it to a regular :class:. The default device or dtype to use when the parameter is materialized can be set during construction using e.g. ``.",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\parameter.py",
    "ast_data": "ClassDef name:UninitializedParameter Assign FunctionDef name:__new__ arg:cls arg:requires_grad arg:device arg:dtype arguments arg arg arg arg Assign Assign Call Return return:yes Call FunctionDef name:__deepcopy__ arg:self arg:memo arguments arg arg If Compare Call Return return:yes Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ExplainOutput",
    "source_code": "@dataclasses.dataclass\nclass ExplainOutput:\n    graphs: list[torch.fx.GraphModule]\n    graph_count: int\n    graph_break_count: int\n    break_reasons: list[Any]\n    op_count: int\n    ops_per_graph: Optional[list[torch.fx.Node]] = None\n    out_guards: Optional[list[_guards.Guard]] = None\n    compile_times: Optional[str] = None\n\n    def __str__(self) -> str:\n        output = f'Graph Count: {self.graph_count}\\n'\n        output += f'Graph Break Count: {self.graph_break_count}\\n'\n        output += f'Op Count: {self.op_count}\\n'\n        output += 'Break Reasons:\\n'\n        for idx, break_reason in enumerate(self.break_reasons):\n            output += f'  Break Reason {idx + 1}:\\n'\n            output += f'    Reason: {break_reason.reason}\\n'\n            output += '    User Stack:\\n'\n            for frame_summary in break_reason.user_stack:\n                output += f'      {frame_summary}\\n'\n        if self.ops_per_graph is not None:\n            output += 'Ops per Graph:\\n'\n            for idx, ops in enumerate(self.ops_per_graph):\n                output += f'  Ops {idx + 1}:\\n'\n                for op in ops:\n                    output += f'    {op}\\n'\n        if self.out_guards is not None:\n            output += 'Out Guards:\\n'\n            for i, guard in enumerate(self.out_guards):\n                output += f'  Guard {i + 1}:\\n'\n                output += f'    {str(guard)}'\n        if self.compile_times is not None:\n            output += f'Compile Times: {self.compile_times}\\n'\n        return output",
    "docstring": "This is the output of :func: There is no reason to create this class directly.",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\backends\\debugging.py",
    "ast_data": "ClassDef name:ExplainOutput FunctionDef name:__str__ arg:self arguments arg Assign For Call For If Compare For Call For If Compare For Call Call If Compare Return return:yes"
  },
  {
    "library": "scipy",
    "name": "NewFunction01",
    "source_code": "class NewFunction01(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n        self.global_optimum = [[-8.46668984648, -9.99980944557]]\n        self.fglob = -0.184648852475\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return abs(cos(sqrt(abs(x[0] ** 2 + x[1])))) ** 0.5 + 0.01 * (x[0] + x[1])",
    "docstring": "NewFunction01 objective function. This class defines the NewFunction01 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{NewFunction01}}(x) = \\left | {\\cos\\left(\\sqrt{\\left|{x_{1}^{2} + x_{2}}\\right|}\\right)} \\right |^{0.5} + (x_{1} + x_{2})/100 with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Mishra, S. Global Optimization by Differential Evolution and Particle Swarm Methods: Evaluation on Some Benchmark Functions. Munich Personal RePEc Archive, 2006, 1005 TODO line 355",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_N.py",
    "ast_data": "ClassDef name:NewFunction01 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "FindPeaks",
    "source_code": "class FindPeaks(Benchmark):\n    param_names = ['distance']\n    params = [[None, 8, 64, 512, 4096]]\n\n    def setup(self, distance):\n        self.x = electrocardiogram()\n\n    def time_find_peaks(self, distance):\n        find_peaks(self.x, distance=distance)",
    "docstring": "Benchmark . Notes ----- The first value of is None in which case the benchmark shows the actual speed of the underlying maxima finding function.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\peak_finding.py",
    "ast_data": "ClassDef name:FindPeaks Assign Assign FunctionDef name:setup arg:self arg:distance arguments arg arg Assign Call FunctionDef name:time_find_peaks arg:self arg:distance arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_EnforceShapeInvariant",
    "source_code": "def _EnforceShapeInvariant(merge_var, next_var):\n    if isinstance(merge_var, tensor_lib.Tensor):\n        m_shape = merge_var.get_shape()\n        n_shape = next_var.get_shape()\n        if not _ShapeLessThanOrEqual(n_shape, m_shape):\n            enter = merge_var.op.inputs[0].op\n            assert util.IsLoopEnter(enter)\n            input_t = enter.inputs[0]\n            raise ValueError(\"Input tensor '%s' enters the loop with shape %s, but has shape %s after one iteration. To allow the shape to vary across iterations, use the `shape_invariants` argument of tf.while_loop to specify a less-specific shape.\" % (input_t.name, input_t.shape, n_shape))\n    else:\n        raise TypeError(f\"'merge_var' must be a Tensor. Received: {type(merge_var)}.\")",
    "docstring": "Check if the shapes of the loops variables are invariants. Args: merge_var: The tensor representing the initial values of the loop variables. next_var: The tensor representing the values of the loop variables after one loop iteration. Raises: ValueError: If any tensor in has a more specific shape than its corresponding tensor in .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:_EnforceShapeInvariant arg:merge_var arg:next_var arguments arg arg If Call Assign Call Assign Call If Call Assign Call Assign Raise Call Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_validate_representative_dataset",
    "source_code": "def _validate_representative_dataset(representative_dataset: rd.RepresentativeDatasetOrMapping, signature_keys: Collection[str]) -> None:\n    if isinstance(representative_dataset, Mapping):\n        if set(signature_keys) != set(representative_dataset.keys()):\n            raise ValueError(f'The signature keys and the keys of representative dataset map do not match. Signature keys: {set(signature_keys)}, representative dataset map: {set(representative_dataset.keys())}.')\n    elif len(signature_keys) > 1:\n        raise ValueError(f'Representative dataset is not a mapping (got: {type(representative_dataset)}), but there is more than one signature key provided. Please provide a map of {{signature_key -> dataset}} with more than one signature key.')",
    "docstring": "Validates the representative dataset, based on the signature keys. Representative dataset can be provided in two different forms: a single instance of or a map of signature key to the corresponding . These have a relationship with . This function validates the following conditions: * If , then should be a mapping where the keys exactly match the elements in . * If , then both a mapping and a single instance of are allowed. * This function also assumes . Args: representative_dataset: A or a map of string to to be validated. signature_keys: A collection of strings that contains the signature keys, each identifying a . Raises: ValueError: Iff does not satisfy the conditions above.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\py_function_lib.py",
    "ast_data": "FunctionDef name:_validate_representative_dataset arg:representative_dataset arg:signature_keys arguments arg arg If Call If Compare Call Call Call Raise Call Call Call Call If Compare Call Raise Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "requests",
    "source_code": "@property\ndef requests(self):\n    return self._requests",
    "docstring": "Dictionary of the form: ``.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py",
    "ast_data": "FunctionDef name:requests arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "deserialize",
    "source_code": "@abc.abstractmethod\ndef deserialize(self, encoded_accumulator):\n    pass",
    "docstring": "Deserialize an accumulator received from 'serialize()'. This function deserializes an accumulator serialized by 'serialize()'. Args: encoded_accumulator: A byte string representing an accumulator. Returns: The accumulator represented by the passed byte_string.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_preprocessing_layer.py",
    "ast_data": "FunctionDef name:deserialize arg:self arg:encoded_accumulator arguments arg arg"
  },
  {
    "library": "kornia",
    "name": "guided_blur",
    "source_code": "def guided_blur(guidance: Tensor, input: Tensor, kernel_size: tuple[int, int] | int, eps: float | Tensor, border_type: str='reflect', subsample: int=1) -> Tensor:\n    KORNIA_CHECK_IS_TENSOR(guidance)\n    KORNIA_CHECK_SHAPE(guidance, ['B', 'C', 'H', 'W'])\n    if input is not guidance:\n        KORNIA_CHECK_IS_TENSOR(input)\n        KORNIA_CHECK_SHAPE(input, ['B', 'C', 'H', 'W'])\n        KORNIA_CHECK(guidance.shape[0] == input.shape[0] and guidance.shape[-2:] == input.shape[-2:], 'guidance and input should have the same batch size and spatial dimensions')\n    if guidance.shape[1] == 1:\n        return _guided_blur_grayscale_guidance(guidance, input, kernel_size, eps, border_type, subsample)\n    else:\n        return _guided_blur_multichannel_guidance(guidance, input, kernel_size, eps, border_type, subsample)",
    "docstring": "Blur a tensor using a Guided filter. .. image:: _static/img/guided_blur.png The operator is an edge-preserving image smoothing filter. See :cite: and :cite: for details. Guidance and input can have different number of channels. Arguments: guidance: the guidance tensor with shape :math:. input: the input tensor with shape :math:. kernel_size: the size of the kernel. eps: regularization parameter. Smaller values preserve more edges. border_type: the padding mode to be applied before convolving. The expected modes are: `input(B, C, H, W)`. Examples: >>> guidance = torch.rand(2, 3, 5, 5) >>> input = torch.rand(2, 4, 5, 5) >>> output = guided_blur(guidance, input, 3, 0.1) >>> output.shape torch.Size([2, 4, 5, 5])",
    "type": "function",
    "file_path": "kornia\\kornia\\filters\\guided.py",
    "ast_data": "FunctionDef name:guided_blur arg:guidance arg:input arg:kernel_size arg:eps arg:border_type arg:subsample arguments arg arg arg arg arg arg Call Call If Compare Call Call Call BoolOp Compare Compare If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "experimental_as_proto",
    "source_code": "@abc.abstractmethod\ndef experimental_as_proto(self) -> message.Message:\n    raise NotImplementedError",
    "docstring": "Returns a proto representing this instance.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\trace_type\\serialization.py",
    "ast_data": "FunctionDef name:experimental_as_proto arg:self arguments arg Raise"
  },
  {
    "library": "pytorch",
    "name": "delta",
    "source_code": "def delta(self, other: 'CallgrindStats', inclusive: bool=False) -> FunctionCounts:\n    return self.stats(inclusive=inclusive) - other.stats(inclusive=inclusive)",
    "docstring": "Diff two sets of counts. One common reason to collect instruction counts is to determine the the effect that a particular change will have on the number of instructions needed to perform some unit of work. If a change increases that number, the next logical question is \"why\". This generally involves looking at what part if the code increased in instruction count. This function automates that process so that one can easily diff counts on both an inclusive and exclusive basis.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\benchmark\\utils\\valgrind_wrapper\\timer_interface.py",
    "ast_data": "FunctionDef name:delta arg:self arg:other arg:inclusive arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_GetDenseDimensions",
    "source_code": "def _GetDenseDimensions(list_of_lists):\n    if not isinstance(list_of_lists, (list, tuple)):\n        return []\n    elif not list_of_lists:\n        return [0]\n    else:\n        return [len(list_of_lists)] + _GetDenseDimensions(list_of_lists[0])",
    "docstring": "Returns the inferred dense dimensions of a list of lists.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_util.py",
    "ast_data": "FunctionDef name:_GetDenseDimensions arg:list_of_lists arguments arg If Call Return return:no If Return return:yes Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "compute_live_ranges",
    "source_code": "def compute_live_ranges(self, lines):\n    timestep = 0\n    worklist = collections.deque(lines)\n    while worklist:\n        if isinstance(worklist[0], MemoryPlanningLine):\n            timestep += 1\n            while worklist and isinstance(worklist[0], MemoryPlanningLine):\n                line = worklist.popleft()\n                if isinstance(line, PoolMemoryPlanningLine):\n                    line.group.update_usage(timestep)\n                    line.timestep = timestep\n        else:\n            worklist.popleft()\n    timestep += 1\n    assert self.buffer_groups is not None\n    for group in self.buffer_groups:\n        if group.is_output:\n            group.update_usage(timestep)",
    "docstring": "Populate every BufferGroup.live_ranges field based on first/last usage",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\memory_planning.py",
    "ast_data": "FunctionDef name:compute_live_ranges arg:self arg:lines arguments arg arg Assign Assign Call While If Call While BoolOp Call Assign Call If Call Call Assign Call Compare For If Call"
  },
  {
    "library": "tensorflow",
    "name": "is_empty",
    "source_code": "def is_empty(x):\n    if not nest.is_nested(x):\n        return False\n    if isinstance(x, collections_abc.Mapping):\n        return is_empty(list(x.values()))\n    for item in x:\n        if not is_empty(item):\n            return False\n    return True",
    "docstring": "Check whether a possibly nested structure is empty.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\framework.py",
    "ast_data": "FunctionDef name:is_empty arg:x arguments arg If Call Return return:yes If Call Return return:yes Call Call Call For If Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "causal_upper_left",
    "source_code": "def causal_upper_left(*size) -> CausalBias:\n    assert len(size) == 2, 'causal_upper_left only supports 2D tensors'\n    seq_len_q, seq_len_kv = size\n    return CausalBias(CausalVariant.UPPER_LEFT, seq_len_q, seq_len_kv)",
    "docstring": "Creates an upper-left triangular causal bias. This function generates a upper-left triangular matrix to represent causal attention bias with a diagonal offset set so that the inclusive values are aligned to the upper left corner of the matrix. This equivalent to the argument in . The equivalent pytorch code for constructing this bias is: .. code-block:: python torch.tril(torch.ones(size, dtype=torch.bool)) For instance, with , the materialized bias tensor will be: .. code-block:: text [[1, 0, 0, 0], [1, 1, 0, 0], [1, 1, 1, 0]] Args: size: The size of the bias matrix. Returns: CausalBias: The UPPER_LEFT triangular causal bias variant.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\attention\\bias.py",
    "ast_data": "FunctionDef name:causal_upper_left arguments arg Compare Call Assign Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_open_file",
    "source_code": "def _open_file(self) -> None:\n    if not self._entered:\n        warnings.warn('StataReader is being used without using a context manager. Using StataReader as a context manager is the only supported method.', ResourceWarning, stacklevel=find_stack_level())\n    handles = get_handle(self._original_path_or_buf, 'rb', storage_options=self._storage_options, is_text=False, compression=self._compression)\n    if hasattr(handles.handle, 'seekable') and handles.handle.seekable():\n        self._path_or_buf = handles.handle\n        self._close_file = handles.close\n    else:\n        with handles:\n            self._path_or_buf = BytesIO(handles.handle.read())\n        self._close_file = self._path_or_buf.close\n    self._read_header()\n    self._setup_dtype()",
    "docstring": "Open the file (with compression options, etc.), and read header information.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\stata.py",
    "ast_data": "FunctionDef name:_open_file arg:self arguments arg If Call Call Assign Call If BoolOp Call Call Assign Assign With Assign Call Call Assign Call Call"
  },
  {
    "library": "cherrypy",
    "name": "urljoin",
    "source_code": "def urljoin(*atoms):\n    url = '/'.join([x for x in atoms if x])\n    while '//' in url:\n        url = url.replace('//', '/')\n    return url or '/'",
    "docstring": "Return the given path \\*atoms, joined into a single URL. This will correctly join a SCRIPT_NAME and PATH_INFO into the original URL, even if either atom is blank.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\httputil.py",
    "ast_data": "FunctionDef name:urljoin arguments arg Assign Call While Compare Assign Call Return return:yes BoolOp"
  },
  {
    "library": "matplotlib",
    "name": "get_horizontal_stem_width",
    "source_code": "def get_horizontal_stem_width(self):\n    return self._header.get(b'StdHW', None)",
    "docstring": "Return the standard horizontal stem width as float, or *None* if not specified in AFM file.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_afm.py",
    "ast_data": "FunctionDef name:get_horizontal_stem_width arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "derivative",
    "source_code": "def derivative(self, n=1):\n    with FITPACK_LOCK:\n        tck = _fitpack_impl.splder(self._eval_args, n)\n    ext = 1 if self.ext == 3 else self.ext\n    return UnivariateSpline._from_tck(tck, ext=ext)",
    "docstring": "Construct a new spline representing the derivative of this spline. Parameters ---------- n : int, optional Order of derivative to evaluate. Default: 1 Returns ------- spline : UnivariateSpline Spline of order k2=k-n representing the derivative of this spline. See Also -------- splder, antiderivative Notes ----- .. versionadded:: 0.13.0 Examples -------- This can be used for finding maxima of a curve: >>> import numpy as np >>> from scipy.interpolate import UnivariateSpline >>> x = np.linspace(0, 10, 70) >>> y = np.sin(x) >>> spl = UnivariateSpline(x, y, k=4, s=0) Now, differentiate the spline and find the zeros of the derivative. (NB: only works for order 3 splines, so we fit an order 4 spline): >>> spl.derivative().roots() / np.pi array([ 0.50000001, 1.5 , 2.49999998]) This agrees well with roots :math: of :math:.",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_fitpack2.py",
    "ast_data": "FunctionDef name:derivative arg:self arg:n arguments arg arg With Assign Call Assign Compare Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit_transform",
    "source_code": "def fit_transform(self, X, y=None, sample_weight=None):\n    return self.fit(X, sample_weight=sample_weight)._transform(X)",
    "docstring": "Compute clustering and transform X to cluster-distance space. Equivalent to fit(X).transform(X), but more efficiently implemented. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) New data to transform. y : Ignored Not used, present here for API consistency by convention. sample_weight : array-like of shape (n_samples,), default=None The weights for each observation in X. If None, all observations are assigned equal weight. Returns ------- X_new : ndarray of shape (n_samples, n_clusters) X transformed in the new space.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_kmeans.py",
    "ast_data": "FunctionDef name:fit_transform arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "is_ndonnx_array",
    "source_code": "def is_ndonnx_array(x: object) -> TypeIs[ndx.Array]:\n    cls = cast(Hashable, type(x))\n    return _issubclass_fast(cls, 'ndonnx', 'Array')",
    "docstring": "Return True if is a ndonnx Array. This function does not import ndonnx if it has not already been imported and is therefore cheap to use. See Also -------- array_namespace is_array_api_obj is_numpy_array is_cupy_array is_ndonnx_array is_dask_array is_jax_array is_pydata_sparse_array",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\common\\_helpers.py",
    "ast_data": "FunctionDef name:is_ndonnx_array arg:x arguments arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "add",
    "source_code": "def add(self, name: str) -> None:\n    if name in self.events:\n        raise ExtensionError(__('Event %r already present') % name)\n    self.events[name] = ''",
    "docstring": "Register a custom Sphinx event.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\events.py",
    "ast_data": "FunctionDef name:add arg:self arg:name arguments arg arg If Compare Raise Call Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "unit_regular_polygon",
    "source_code": "@classmethod\ndef unit_regular_polygon(cls, numVertices):\n    if numVertices <= 16:\n        path = cls._unit_regular_polygons.get(numVertices)\n    else:\n        path = None\n    if path is None:\n        theta = 2 * np.pi / numVertices * np.arange(numVertices + 1) + np.pi / 2\n        verts = np.column_stack((np.cos(theta), np.sin(theta)))\n        path = cls(verts, closed=True, readonly=True)\n        if numVertices <= 16:\n            cls._unit_regular_polygons[numVertices] = path\n    return path",
    "docstring": "Return a :class: instance for a unit regular polygon with the given *numVertices* such that the circumscribing circle has radius 1.0, centered at (0, 0).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\path.py",
    "ast_data": "FunctionDef name:unit_regular_polygon arg:cls arg:numVertices arguments arg arg If Compare Assign Call Assign If Compare Assign Call Assign Call Call Call Assign Call If Compare Assign Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "log_traceback",
    "source_code": "def log_traceback(severity=logging.ERROR, debug=False):\n    cherrypy.log('', 'HTTP', severity=severity, traceback=True)",
    "docstring": "Write the last error's traceback to the cherrypy error log.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\cptools.py",
    "ast_data": "FunctionDef name:log_traceback arg:severity arg:debug arguments arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "_parent_name",
    "source_code": "def _parent_name(target: str) -> tuple[str, str]:\n    *parent, name = target.rsplit('.', 1)\n    return (parent[0] if parent else '', name)",
    "docstring": "Splits a qualname into parent path and last atom. For example, -> (, )",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\optimization.py",
    "ast_data": "FunctionDef name:_parent_name arg:target arguments arg Assign Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "show_msg",
    "source_code": "@cherrypy.expose\ndef show_msg(self):\n    return 'Hello world!'",
    "docstring": "Render a \"Hello world!\" message on `` URI.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\tutorial\\tut02_expose_methods.py",
    "ast_data": "FunctionDef name:show_msg arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ELU",
    "source_code": "class ELU(torch.nn.ELU):\n\n    def __init__(self, scale, zero_point, alpha=1.0):\n        super().__init__(alpha)\n        self.scale = scale\n        self.zero_point = zero_point\n\n    def forward(self, input):\n        return torch.ao.nn.quantized.functional.elu(input, self.scale, self.zero_point, self.alpha)\n\n    def _get_name(self):\n        return 'QuantizedELU'\n\n    @staticmethod\n    def from_float(mod, use_precomputed_fake_quant=False):\n        scale, zero_point = mod.activation_post_process.calculate_qparams()\n        return ELU(float(scale), int(zero_point), mod.alpha)\n\n    @classmethod\n    def from_reference(cls, mod, scale, zero_point):\n        return cls(float(scale), int(zero_point), mod.alpha)",
    "docstring": "This is the quantized equivalent of :class:. Args: scale: quantization scale of the output tensor zero_point: quantization zero point of the output tensor alpha: the alpha constant",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\modules\\activation.py",
    "ast_data": "ClassDef name:ELU FunctionDef name:__init__ arg:self arg:scale arg:zero_point arg:alpha arguments arg arg arg arg Call Call Assign Assign FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:_get_name arg:self arguments arg Return return:yes FunctionDef name:from_float arg:mod arg:use_precomputed_fake_quant arguments arg arg Assign Call Return return:yes Call Call Call FunctionDef name:from_reference arg:cls arg:mod arg:scale arg:zero_point arguments arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_get_row_repeat",
    "source_code": "def _get_row_repeat(self, row) -> int:\n    from odf.namespaces import TABLENS\n    return int(row.attributes.get((TABLENS, 'number-rows-repeated'), 1))",
    "docstring": "Return number of times this row was repeated Repeating an empty row appeared to be a common way of representing sparse rows in the table.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\excel\\_odfreader.py",
    "ast_data": "FunctionDef name:_get_row_repeat arg:self arg:row arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_ghstack_dependent_prs",
    "source_code": "def get_ghstack_dependent_prs(repo: GitRepo, pr: GitHubPR, only_closed: bool=True) -> list[tuple[str, GitHubPR]]:\n    assert pr.is_ghstack_pr()\n    orig_ref = f'{repo.remote}/{pr.get_ghstack_orig_ref()}'\n    rev_list = repo.revlist(f'{pr.default_branch()}..{orig_ref}')\n    if len(rev_list) == 0:\n        raise RuntimeError(f'PR {pr.pr_num} does not have any revisions associated with it')\n    skip_len = len(rev_list) - 1\n    for branch in repo.branches_containing_ref(orig_ref):\n        candidate = repo.revlist(f'{pr.default_branch()}..{branch}')\n        if len(candidate) > len(rev_list):\n            candidate, rev_list = (rev_list, candidate)\n        if rev_list[-len(candidate):] != candidate:\n            raise RuntimeError(f'Branch {branch} revlist {', '.join(candidate)} is not a subset of {', '.join(rev_list)}')\n    if skip_len > 0:\n        rev_list = rev_list[:-skip_len]\n    rc: list[tuple[str, GitHubPR]] = []\n    for pr_, sha in _revlist_to_prs(repo, pr, rev_list):\n        if not pr_.is_closed():\n            if not only_closed:\n                rc.append(('', pr_))\n            continue\n        commit_sha = get_pr_commit_sha(repo, pr_)\n        rc.append((commit_sha, pr_))\n    return rc",
    "docstring": "Get the PRs in the stack that are above this PR (inclusive). Throws error if stack have branched or original branches are gone",
    "type": "function",
    "file_path": "pytorch\\.github\\scripts\\trymerge.py",
    "ast_data": "FunctionDef name:get_ghstack_dependent_prs arg:repo arg:pr arg:only_closed arguments arg arg arg Call Assign Call Assign Call Call If Compare Call Raise Call Assign Call For Call Assign Call Call If Compare Call Call Assign If Compare Call Raise Call Call Call If Compare Assign For Call If Call If Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "serialize",
    "source_code": "def serialize(metric):\n    return serialize_keras_object(metric)",
    "docstring": "Serializes metric function or instance. Args: metric: A Keras instance or a metric function. Returns: Metric configuration dictionary.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py",
    "ast_data": "FunctionDef name:serialize arg:metric arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "validate_parse_dates_presence",
    "source_code": "def validate_parse_dates_presence(parse_dates: bool | list, columns: Sequence[Hashable]) -> set:\n    if not isinstance(parse_dates, list):\n        return set()\n    missing = set()\n    unique_cols = set()\n    for col in parse_dates:\n        if isinstance(col, str):\n            if col not in columns:\n                missing.add(col)\n            else:\n                unique_cols.add(col)\n        elif col in columns:\n            unique_cols.add(col)\n        else:\n            unique_cols.add(columns[col])\n    if missing:\n        missing_cols = ', '.join(sorted(missing))\n        raise ValueError(f\"Missing column provided to 'parse_dates': '{missing_cols}'\")\n    return unique_cols",
    "docstring": "Check if parse_dates are in columns. If user has provided names for parse_dates, check if those columns are available. Parameters ---------- columns : list List of names of the dataframe. Returns ------- The names of the columns which will get parsed later if a list is given as specification. Raises ------ ValueError If column to parse_date is not in dataframe.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\parsers\\base_parser.py",
    "ast_data": "FunctionDef name:validate_parse_dates_presence arg:parse_dates arg:columns arguments arg arg If Call Return return:yes Call Assign Call Assign Call For If Call If Compare Call Call If Compare Call Call If Assign Call Call Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "static_sizes_strides",
    "source_code": "def static_sizes_strides(self, ex: torch.Tensor) -> tuple[list[sympy.Expr], list[sympy.Expr]]:\n    size = [sympy.Integer(i) for i in ex.size()]\n    stride = [sympy.Integer(i) for i in ex.stride()]\n    return (size, stride)",
    "docstring": "Primarily used to weights",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\graph.py",
    "ast_data": "FunctionDef name:static_sizes_strides arg:self arg:ex arguments arg arg Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_AllToAllRotater",
    "source_code": "class _AllToAllRotater(_RingRotater):\n\n    def __init__(self, pg: dist.ProcessGroup, seq_dim: int) -> None:\n        self._pg = pg\n        self._seq_dim = seq_dim\n        self._buffer: Optional[torch.Tensor] = None\n\n    def exchange_buffers(self, curr_buffer: torch.Tensor) -> None:\n        curr_buffer = curr_buffer.contiguous()\n        size = dist.get_world_size(self._pg)\n        dsts = list(range(1, size)) + [0]\n        self._buffer = ft_c.permute_tensor(curr_buffer, dsts, self._pg)\n\n    def next_buffer(self) -> torch.Tensor:\n        assert self._buffer is not None\n        return _maybe_wait(self._buffer)",
    "docstring": "Use all_to_all to send the kv to the next rank",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\tensor\\experimental\\_attention.py",
    "ast_data": "ClassDef name:_AllToAllRotater FunctionDef name:__init__ arg:self arg:pg arg:seq_dim arguments arg arg arg Assign Assign FunctionDef name:exchange_buffers arg:self arg:curr_buffer arguments arg arg Assign Call Assign Call Assign Call Call Assign Call FunctionDef name:next_buffer arg:self arguments arg Compare Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "sharded_grad",
    "source_code": "@property\ndef sharded_grad(self) -> Optional[Tensor]:\n    flat_param = self.flat_param\n    grad: Optional[Tensor]\n    if hasattr(flat_param, '_cpu_grad'):\n        grad = flat_param._cpu_grad\n    elif hasattr(flat_param, '_saved_grad_shard'):\n        grad = flat_param._saved_grad_shard\n    else:\n        _p_assert(flat_param.grad is None or not self.uses_sharded_strategy or self._training_state in (HandleTrainingState.FORWARD, HandleTrainingState.IDLE), 'Sharded strategies should use `_cpu_grad` or `_saved_grad_shard` unless in IDLE or FORWARD')\n        grad = flat_param.grad\n    return grad",
    "docstring": "Return the handle's sharded gradient.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py",
    "ast_data": "FunctionDef name:sharded_grad arg:self arguments arg Assign If Call Assign If Call Assign Call BoolOp Compare Compare Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "render",
    "source_code": "def render(self):\n    retval = self\n    if not self._is_rendered:\n        self.content = self.rendered_content\n        for post_callback in self._post_render_callbacks:\n            newretval = post_callback(retval)\n            if newretval is not None:\n                retval = newretval\n    return retval",
    "docstring": "Render (thereby finalizing) the content of the response. If the content has already been rendered, this is a no-op. Return the baked response instance.",
    "type": "method",
    "file_path": "django\\django\\template\\response.py",
    "ast_data": "FunctionDef name:render arg:self arguments arg Assign If Assign For Assign Call If Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "dtype",
    "source_code": "@property\ndef dtype(self):\n    return self._dtype",
    "docstring": "The dtype of this variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:dtype arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__setattr__",
    "source_code": "def __setattr__(self, name, value):\n    if name == 'lr':\n        name = 'learning_rate'\n    if hasattr(self, '_hyper') and name in self._hyper:\n        self._set_hyper(name, value)\n    else:\n        super(OptimizerV2, self).__setattr__(name, value)",
    "docstring": "Override setattr to support dynamic hyperparameter setting.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py",
    "ast_data": "FunctionDef name:__setattr__ arg:self arg:name arg:value arguments arg arg arg If Compare Assign If BoolOp Call Compare Call Call Call"
  },
  {
    "library": "pandas",
    "name": "codes",
    "source_code": "@property\ndef codes(self) -> FrozenList:\n    return self._codes",
    "docstring": "Codes of the MultiIndex. Codes are the position of the index value in the list of level values for each level. Returns ------- tuple of numpy.ndarray The codes of the MultiIndex. Each array in the tuple corresponds to a level in the MultiIndex. See Also -------- MultiIndex.set_codes : Set new codes on MultiIndex. Examples -------- >>> arrays = [[1, 1, 2, 2], [\"red\", \"blue\", \"red\", \"blue\"]] >>> mi = pd.MultiIndex.from_arrays(arrays, names=(\"number\", \"color\")) >>> mi.codes FrozenList([[0, 0, 1, 1], [1, 0, 1, 0]])",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "FunctionDef name:codes arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self):\n    super().__init__()\n    self.stats['float'] = None\n    self.stats['quantized'] = None\n    self.count = 0\n    self.float_sum = None\n    self.quant_sum = None",
    "docstring": "Set up initial values for float and quantized stats, count, float sum, and quant sum.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\_correct_bias.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg Call Call Assign Assign Assign Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "FFMpegFileWriter",
    "source_code": "@writers.register('ffmpeg_file')\nclass FFMpegFileWriter(FFMpegBase, FileMovieWriter):\n    supported_formats = ['png', 'jpeg', 'tiff', 'raw', 'rgba']\n\n    def _args(self):\n        args = []\n        if self.frame_format in {'raw', 'rgba'}:\n            args += ['-f', 'image2', '-vcodec', 'rawvideo', '-video_size', '%dx%d' % self.frame_size, '-pixel_format', 'rgba']\n        args += ['-framerate', str(self.fps), '-i', self._base_temp_name()]\n        if not self._tmpdir:\n            args += ['-frames:v', str(self._frame_counter)]\n        if _log.getEffectiveLevel() > logging.DEBUG:\n            args += ['-loglevel', 'error']\n        return [self.bin_path(), *args, *self.output_args]",
    "docstring": "File-based ffmpeg writer. Frames are written to temporary files on disk and then stitched together at the end. This effectively works as a slideshow input to ffmpeg with the fps passed as `their notes on frame rates`_ for further details. .. _their notes on frame rates:",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\animation.py",
    "ast_data": "ClassDef name:FFMpegFileWriter Assign FunctionDef name:_args arg:self arguments arg Assign If Compare Call Call If Call If Compare Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_calculate_deltas",
    "source_code": "def _calculate_deltas(times: np.ndarray | NDFrame, halflife: float | TimedeltaConvertibleTypes | None) -> npt.NDArray[np.float64]:\n    unit = dtype_to_unit(times.dtype)\n    if isinstance(times, ABCSeries):\n        times = times._values\n    _times = np.asarray(times.view(np.int64), dtype=np.float64)\n    _halflife = float(Timedelta(halflife).as_unit(unit)._value)\n    return np.diff(_times) / _halflife",
    "docstring": "Return the diff of the times divided by the half-life. These values are used in the calculation of the ewm mean. Parameters ---------- times : np.ndarray, Series Times corresponding to the observations. Must be monotonically increasing and `` dtype. halflife : float, str, timedelta, optional Half-life specifying the decay Returns ------- np.ndarray Diff of the times divided by the half-life",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\window\\ewm.py",
    "ast_data": "FunctionDef name:_calculate_deltas arg:times arg:halflife arguments arg arg Assign Call If Call Assign Assign Call Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_asarray_with_order",
    "source_code": "def _asarray_with_order(array, dtype=None, order=None, copy=None, *, xp=None, device=None):\n    xp, _ = get_namespace(array, xp=xp)\n    if _is_numpy_namespace(xp):\n        if copy is True:\n            array = numpy.array(array, order=order, dtype=dtype)\n        else:\n            array = numpy.asarray(array, order=order, dtype=dtype)\n        return xp.asarray(array)\n    else:\n        return xp.asarray(array, dtype=dtype, copy=copy, device=device)",
    "docstring": "Helper to support the order kwarg only for NumPy-backed arrays Memory layout parameter is not exposed in the Array API standard, however some input validation code in scikit-learn needs to work both for classes and functions that will leverage Array API only operations and for code that inherently relies on NumPy backed data containers with specific memory layout constraints (e.g. our own Cython code). The purpose of this helper is to make it possible to share code for data container validation without memory copies for both downstream use cases: the parameter is only enforced if the input array implementation is NumPy based, otherwise is just silently ignored.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_array_api.py",
    "ast_data": "FunctionDef name:_asarray_with_order arg:array arg:dtype arg:order arg:copy arguments arg arg arg arg arg arg Assign Call If Call If Compare Assign Call Assign Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_n_splits",
    "source_code": "@abstractmethod\ndef get_n_splits(self, X=None, y=None, groups=None):\n    pass",
    "docstring": "Returns the number of splitting iterations in the cross-validator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py",
    "ast_data": "FunctionDef name:get_n_splits arg:self arg:X arg:y arg:groups arguments arg arg arg arg"
  },
  {
    "library": "pandas",
    "name": "_escape_latex",
    "source_code": "def _escape_latex(s: str) -> str:\n    return s.replace('\\\\', 'ab2§=§8yz').replace('ab2§=§8yz ', 'ab2§=§8yz\\\\space ').replace('&', '\\\\&').replace('%', '\\\\%').replace('$', '\\\\$').replace('",
    "docstring": "Replace the characters `` in the string with LaTeX-safe sequences. Use this if you need to display text that might contain such characters in LaTeX. Parameters ---------- s : str Input to be escaped Return ------ str : Escaped string",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\formats\\style_render.py",
    "ast_data": "FunctionDef name:_escape_latex arg:s arguments arg Return return:yes Call Call Call Call Call Call Call Call Call Call Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_get_number_of_auxiliary_rows",
    "source_code": "def _get_number_of_auxiliary_rows(self) -> int:\n    dot_row = 1\n    prompt_row = 1\n    num_rows = dot_row + prompt_row\n    if self.show_dimensions:\n        num_rows += len(self.dimensions_info.splitlines())\n    if self.header:\n        num_rows += 1\n    return num_rows",
    "docstring": "Get number of rows occupied by prompt, dots and dimension info.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\format.py",
    "ast_data": "FunctionDef name:_get_number_of_auxiliary_rows arg:self arguments arg Assign Assign Assign If Call Call If Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "assign",
    "source_code": "def assign(val, name=None):\n    if name is None:\n        name = parent_name + '_assign'\n    return var._strided_slice_assign(begin=begin, end=end, strides=strides, value=val, name=name, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask)",
    "docstring": "Closure that holds all the arguments to create an assignment.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:assign arg:val arg:name arguments arg arg If Compare Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_set_graph_parents",
    "source_code": "def _set_graph_parents(self, graph_parents):\n    graph_parents = [] if graph_parents is None else graph_parents\n    for i, t in enumerate(graph_parents):\n        if t is None or not (linear_operator_util.is_ref(t) or tensor_util.is_tf_type(t)):\n            raise ValueError('Graph parent item %d is not a Tensor; %s.' % (i, t))\n    self._graph_parents = graph_parents",
    "docstring": "Set self._graph_parents. Called during derived class init. This method allows derived classes to set graph_parents, without triggering a deprecation warning (which is invoked if is passed during . Args: graph_parents: Iterable over Tensors.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "FunctionDef name:_set_graph_parents arg:self arg:graph_parents arguments arg arg Assign Compare For Call If BoolOp Compare BoolOp Call Call Raise Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "currentframe",
    "source_code": "def currentframe():\n    return _inspect.stack()[1][0]",
    "docstring": "TFDecorator-aware replacement for inspect.currentframe.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_inspect.py",
    "ast_data": "FunctionDef name:currentframe arguments Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "generate_env_vars_string",
    "source_code": "def generate_env_vars_string(*, stable_output=False):\n    if stable_output:\n        return '# env var omitted due to stable_output=True'\n    allow_list = ['TORCH', 'DYNAMO', 'INDUCTOR', 'TRITON']\n    skip_list = ['TRITON_LIBDEVICE_PATH', 'TRITON_PTXAS_PATH', 'TRITON_LIBCUDA_PATH']\n\n    def filter(key):\n        return any((string in key for string in allow_list)) and key not in skip_list\n    config_lines = [f\"os.environ['{key}'] = '{value}'\" for key, value in os.environ.items() if filter(key)]\n    config_string = '\\n'.join(config_lines)\n    return f'import os\\n{config_string}\\n    '",
    "docstring": "Generate a string configuration for environment variables related to Dynamo, Inductor, and Triton.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\debug_utils.py",
    "ast_data": "FunctionDef name:generate_env_vars_string arguments arg If Return return:yes Assign Assign FunctionDef name:filter arg:key arguments arg Return return:yes BoolOp Call Compare Compare Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "Schwefel02",
    "source_code": "class Schwefel02(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-100.0] * self.N, [100.0] * self.N))\n        self.custom_bounds = ([-4.0, 4.0], [-4.0, 4.0])\n        self.global_optimum = [[0.0 for _ in range(self.N)]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        mat = repeat(atleast_2d(x), self.N, axis=0)\n        inner = sum(tril(mat), axis=1)\n        return sum(inner ** 2)",
    "docstring": "Schwefel 2 objective function. This class defines the Schwefel 2 [1]_ global optimization problem. This is a unimodal minimization problem defined as follows: .. math:: f_{\\text{Schwefel02}}(x) = \\sum_{i=1}^n \\left(\\sum_{j=1}^i x_i \\right)^2 Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_S.py",
    "ast_data": "ClassDef name:Schwefel02 Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_awaitable_wait",
    "source_code": "def _awaitable_wait(aw):\n    return torch._C._awaitable_wait(aw)",
    "docstring": "Request await the result of execution, if Await is not completed yet, the func will be called immediately.",
    "type": "function",
    "file_path": "pytorch\\torch\\jit\\_await.py",
    "ast_data": "FunctionDef name:_awaitable_wait arg:aw arguments arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "write_meson_build",
    "source_code": "def write_meson_build(self, build_dir: Path) -> None:\n    meson_template = MesonTemplate(self.modulename, self.sources, self.dependencies, self.libraries, self.library_dirs, self.include_dirs, self.extra_objects, self.flib_flags, self.fc_flags, self.build_type, sys.executable)\n    src = meson_template.generate_meson_build()\n    Path(build_dir).mkdir(parents=True, exist_ok=True)\n    meson_build_file = Path(build_dir) / 'meson.build'\n    meson_build_file.write_text(src)\n    return meson_build_file",
    "docstring": "Writes the meson build file at specified location",
    "type": "method",
    "file_path": "numpy\\numpy\\f2py\\_backends\\_meson.py",
    "ast_data": "FunctionDef name:write_meson_build arg:self arg:build_dir arguments arg arg Assign Call Assign Call Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "benchmark_config",
    "source_code": "@tf_export('test.benchmark_config')\ndef benchmark_config():\n    config = config_pb2.ConfigProto()\n    config.graph_options.rewrite_options.dependency_optimization = rewriter_config_pb2.RewriterConfig.OFF\n    return config",
    "docstring": "Returns a tf.compat.v1.ConfigProto for disabling the dependency optimizer. Returns: A TensorFlow ConfigProto object.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\platform\\benchmark.py",
    "ast_data": "FunctionDef name:benchmark_config arguments Assign Call Assign Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "validate_introspection_endpoint_auth_signing_alg_values_supported",
    "source_code": "def validate_introspection_endpoint_auth_signing_alg_values_supported(self):\n    _validate_alg_values(self, 'introspection_endpoint_auth_signing_alg_values_supported', self.introspection_endpoint_auth_methods_supported)",
    "docstring": "OPTIONAL. JSON array containing a list of the JWS signing algorithms (\"alg\" values) supported by the introspection endpoint for the signature on the JWT [JWT] used to authenticate the client at the introspection endpoint for the \"private_key_jwt\" and \"client_secret_jwt\" authentication methods. This metadata entry MUST be present if either of these authentication methods are specified in the \"introspection_endpoint_auth_methods_supported\" entry. No default algorithms are implied if this entry is omitted. The value \"none\" MUST NOT be used.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc8414\\models.py",
    "ast_data": "FunctionDef name:validate_introspection_endpoint_auth_signing_alg_values_supported arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "ndtr",
    "source_code": "def ndtr(x, name='ndtr'):\n    with ops.name_scope(name, values=[x]):\n        x = ops.convert_to_tensor(x, name='x')\n        if x.dtype.as_numpy_dtype not in [np.float32, np.float64]:\n            raise TypeError('x.dtype=%s is not handled, see docstring for supported types.' % x.dtype)\n        return _ndtr(x)",
    "docstring": "Normal distribution function. Returns the area under the Gaussian probability density function, integrated from minus infinity to x: Args: x: of type , . name: Python string. A name for the operation (default=\"ndtr\"). Returns: ndtr: with . Raises: TypeError: if is not floating-type.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\special_math.py",
    "ast_data": "FunctionDef name:ndtr arg:x arg:name arguments arg arg With Call Assign Call If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "Schaffer02",
    "source_code": "class Schaffer02(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-100.0] * self.N, [100.0] * self.N))\n        self.custom_bounds = [(-10, 10), (-10, 10)]\n        self.global_optimum = [[0.0 for _ in range(self.N)]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        num = sin(x[0] ** 2 - x[1] ** 2) ** 2 - 0.5\n        den = (1 + 0.001 * (x[0] ** 2 + x[1] ** 2)) ** 2\n        return 0.5 + num / den",
    "docstring": "Schaffer 2 objective function. This class defines the Schaffer 2 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Schaffer02}}(x) = 0.5 + \\frac{\\sin^2 (x_1^2 - x_2^2)^2 - 0.5} {1 + 0.001(x_1^2 + x_2^2)^2} with :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Mishra, S. Some new test functions for global optimization and performance of repulsive particle swarm method. Munich Personal RePEc Archive, 2006, 2718",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_S.py",
    "ast_data": "ClassDef name:Schaffer02 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "micro",
    "source_code": "@property\ndef micro(self) -> int:\n    return self.release[2] if len(self.release) >= 3 else 0",
    "docstring": "The third item of :attr: or `` if unavailable. >>> Version(\"1.2.3\").micro 3 >>> Version(\"1\").micro 0",
    "type": "method",
    "file_path": "pytorch\\torch\\_vendor\\packaging\\version.py",
    "ast_data": "FunctionDef name:micro arg:self arguments arg Return return:yes Compare Call"
  },
  {
    "library": "pytorch",
    "name": "Dropout",
    "source_code": "class Dropout(torch.nn.Dropout):\n\n    def forward(self, input):\n        return input\n\n    def _get_name(self):\n        return 'QuantizedDropout'\n\n    @classmethod\n    def from_float(cls, mod, use_precomputed_fake_quant=False):\n        return cls(mod.p, mod.inplace)\n\n    @classmethod\n    def from_reference(cls, mod, scale, zero_point):\n        return cls(mod.p, mod.inplace)",
    "docstring": "This is the quantized equivalent of :class:. And this is a placeholder to enable models where fp32 tensors had dropout to work with quantized tensors in train and eval mode. Args: p: probability of an element to be zeroed inplace: can optionally do the operation in-place. Default: ``",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\modules\\dropout.py",
    "ast_data": "ClassDef name:Dropout FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes FunctionDef name:_get_name arg:self arguments arg Return return:yes FunctionDef name:from_float arg:cls arg:mod arg:use_precomputed_fake_quant arguments arg arg arg Return return:yes Call FunctionDef name:from_reference arg:cls arg:mod arg:scale arg:zero_point arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_asset_filename_to_add",
    "source_code": "def get_asset_filename_to_add(asset_filepath, asset_filename_map):\n    asset_filename = os.path.basename(asset_filepath)\n    if asset_filename not in asset_filename_map:\n        return asset_filename\n    other_asset_filepath = asset_filename_map[asset_filename]\n    if other_asset_filepath == asset_filepath:\n        return asset_filename\n    if not file_io.filecmp(asset_filepath, other_asset_filepath):\n        return _get_unique_asset_filename(asset_filename, asset_filename_map)\n    return asset_filename",
    "docstring": "Get a unique basename to add to the SavedModel if this file is unseen. Assets come from users as full paths, and we save them out to the SavedModel as basenames. In some cases, the basenames collide. Here, we dedupe asset basenames by first checking if the file is the same, and, if different, generate and return an index-suffixed basename that can be used to add the asset to the SavedModel. Args: asset_filepath: the full path to the asset that is being saved asset_filename_map: a dict of filenames used for saving the asset in the SavedModel to full paths from which the filenames were derived. Returns: Uniquified filename string if the file is not a duplicate, or the original filename if the file has already been seen and saved.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\builder_impl.py",
    "ast_data": "FunctionDef name:get_asset_filename_to_add arg:asset_filepath arg:asset_filename_map arguments arg arg Assign Call If Compare Return return:yes Assign If Compare Return return:yes If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "read_var_header",
    "source_code": "def read_var_header(self):\n    hdr = self._matrix_reader.read_header()\n    remaining_bytes = reduce(mul, hdr.dims, np.int64(hdr.dtype.itemsize))\n    if hdr.is_complex and (not hdr.mclass == mxSPARSE_CLASS):\n        remaining_bytes *= 2\n    next_position = self.mat_stream.tell() + remaining_bytes\n    return (hdr, next_position)",
    "docstring": "Read and return header, next position Parameters ---------- None Returns ------- header : object object that can be passed to self.read_var_array, and that has attributes `` next_position : int position in stream of next variable",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\matlab\\_mio4.py",
    "ast_data": "FunctionDef name:read_var_header arg:self arguments arg Assign Call Assign Call Call If BoolOp Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_generate_zero_filled_state",
    "source_code": "def _generate_zero_filled_state(batch_size_tensor, state_size, dtype):\n    if batch_size_tensor is None or dtype is None:\n        raise ValueError('batch_size and dtype cannot be None while constructing initial state: batch_size={}, dtype={}'.format(batch_size_tensor, dtype))\n\n    def create_zeros(unnested_state_size):\n        flat_dims = tensor_shape.TensorShape(unnested_state_size).as_list()\n        init_state_size = [batch_size_tensor] + flat_dims\n        return array_ops.zeros(init_state_size, dtype=dtype)\n    if nest.is_nested(state_size):\n        return nest.map_structure(create_zeros, state_size)\n    else:\n        return create_zeros(state_size)",
    "docstring": "Generate a zero filled tensor with shape [batch_size, state_size].",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\recurrent.py",
    "ast_data": "FunctionDef name:_generate_zero_filled_state arg:batch_size_tensor arg:state_size arg:dtype arguments arg arg arg If BoolOp Compare Compare Raise Call Call FunctionDef name:create_zeros arg:unnested_state_size arguments arg Assign Call Call Assign Return return:yes Call If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "cryptography",
    "name": "extensions",
    "source_code": "@property\n@abc.abstractmethod\ndef extensions(self) -> Extensions:\n    pass",
    "docstring": "Returns an Extensions object containing a list of Revoked extensions.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\x509\\base.py",
    "ast_data": "FunctionDef name:extensions arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "_annotate_linear_node_helper",
    "source_code": "def _annotate_linear_node_helper(self, linear_node: torch.fx.Node, annotate_output: bool, quantization_config: Optional[QuantizationConfig]) -> None:\n    if quantization_config is None:\n        _annotate_nodes_not_quantize(linear_node)\n        return\n    input_qspec_map = {}\n    assert linear_node.target in (torch.ops.aten.linear.default,)\n    has_bias = len(linear_node.args) == 3\n    input_index = 0\n    weight_index = 1\n    bias_index = 2\n    input_node = linear_node.args[input_index]\n    assert isinstance(input_node, Node)\n    input_qspec_map[input_node] = get_input_act_qspec(quantization_config)\n    weight_node = linear_node.args[weight_index]\n    assert isinstance(weight_node, Node)\n    input_qspec_map[weight_node] = get_weight_qspec(quantization_config)\n    bias_node = linear_node.args[bias_index] if has_bias else None\n    if isinstance(bias_node, Node):\n        input_qspec_map[bias_node] = get_bias_qspec(quantization_config)\n    if annotate_output:\n        linear_node.meta[QUANT_ANNOTATION_KEY] = _X86InductorQuantizationAnnotation(input_qspec_map=input_qspec_map, _annotated=True, _is_output_of_quantized_pattern=True)\n    else:\n        linear_node.meta[QUANT_ANNOTATION_KEY] = _X86InductorQuantizationAnnotation(input_qspec_map=input_qspec_map, _annotated=True)",
    "docstring": "Helper function to annotate the linear node",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\x86_inductor_quantizer.py",
    "ast_data": "FunctionDef name:_annotate_linear_node_helper arg:self arg:linear_node arg:annotate_output arg:quantization_config arguments arg arg arg arg If Compare Call Return return:no Assign Compare Assign Compare Call Assign Assign Assign Assign Call Assign Call Assign Call Assign Call Assign If Call Assign Call If Assign Call Assign Call"
  },
  {
    "library": "django",
    "name": "check_apps_ready",
    "source_code": "def check_apps_ready(self):\n    if not self.apps_ready:\n        from django.conf import settings\n        settings.INSTALLED_APPS\n        raise AppRegistryNotReady(\"Apps aren't loaded yet.\")",
    "docstring": "Raise an exception if all apps haven't been imported yet.",
    "type": "method",
    "file_path": "django\\django\\apps\\registry.py",
    "ast_data": "FunctionDef name:check_apps_ready arg:self arguments arg If Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, name, freevars, extra_locals):\n    self._name = name\n    self._freevars = freevars\n    self._extra_locals = extra_locals\n    self._unbound_factory = None\n    self.module = None\n    self.source_map = None",
    "docstring": "Creates a new factory for a Python function. Args: name: The function name. freevars: The list of non-global free variables for the function. extra_locals: Dict[Text, Any], names and values for custom variables that are accessible to the generated code as local variables.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\transpiler.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:name arg:freevars arg:extra_locals arguments arg arg arg arg Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X):\n    check_is_fitted(self)\n    decision_func = self.decision_function(X)\n    is_inlier = np.ones_like(decision_func, dtype=int)\n    is_inlier[decision_func < 0] = -1\n    return is_inlier",
    "docstring": "Predict if a particular sample is an outlier or not. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``. This is because, predict may actually be faster without parallelization for a small number of samples, such as for 1000 samples or less. The user can set the number of jobs in the joblib context to control the number of parallel jobs. .. code-block:: python from joblib import parallel_backend # Note, we use threading here as the predict method is not CPU bound. with parallel_backend(\"threading\", n_jobs=4): model.predict(X)",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_iforest.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Call Assign Call Assign Call Assign Compare Return return:yes"
  },
  {
    "library": "numpy",
    "name": "hardmask",
    "source_code": "@property\ndef hardmask(self):\n    return self._hardmask",
    "docstring": "Specifies whether values can be unmasked through assignments. By default, assigning definite values to masked array entries will unmask them. When is `m` has a soft mask, assigning an element value unmasks that element: >>> m[8] = 42 >>> m masked_array(data=[0, 1, 2, 3, 4, 5, --, --, 42, --], mask=[False, False, False, False, False, False, True, True, False, True], fill_value=999999) After hardening, the mask is not affected by assignments: >>> hardened = np.ma.harden_mask(m) >>> assert m.hardmask and hardened is m >>> m[:] = 23 >>> m masked_array(data=[23, 23, 23, 23, 23, 23, --, --, 23, --], mask=[False, False, False, False, False, False, True, True, False, True], fill_value=999999)",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:hardmask arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_calc_mat_mul_flops",
    "source_code": "@ops.RegisterStatistics('MatMul', 'flops')\ndef _calc_mat_mul_flops(graph, node):\n    transpose_a = node.attr['transpose_a'].b\n    a_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])\n    a_shape.assert_is_fully_defined()\n    if transpose_a:\n        k = int(a_shape[0])\n    else:\n        k = int(a_shape[1])\n    output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)\n    output_shape.assert_is_fully_defined()\n    output_count = np.prod(output_shape.as_list())\n    return ops.OpStats('flops', k * output_count * 2)",
    "docstring": "Calculates the compute resources needed for MatMul.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py",
    "ast_data": "FunctionDef name:_calc_mat_mul_flops arg:graph arg:node arguments arg arg Assign Assign Call Call If Assign Call Assign Call Assign Call Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_mlir_ssa_values",
    "source_code": "def _get_mlir_ssa_values(self, name_prefix, out_types):\n    out_ssa_values = []\n    if not out_types:\n        return ('', out_ssa_values)\n    out_name = self._ssa_name(name_prefix)\n    if len(out_types) == 1:\n        out_name_suffix = ''\n        out_ssa_values.append(out_name)\n    else:\n        out_name_suffix = ':{}'.format(len(out_types))\n        for idx, _ in enumerate(out_types):\n            out_ssa_values.append('{}#{}'.format(out_name, idx))\n    return ('{}{}'.format(out_name, out_name_suffix), out_ssa_values)",
    "docstring": "Create MLIR convention SSA values.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\tfr\\python\\tfr_gen.py",
    "ast_data": "FunctionDef name:_get_mlir_ssa_values arg:self arg:name_prefix arg:out_types arguments arg arg arg Assign If Return return:yes Assign Call If Compare Call Assign Call Assign Call Call For Call Call Call Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "validate_request_object_encryption_alg_values_supported",
    "source_code": "def validate_request_object_encryption_alg_values_supported(self):\n    validate_array_value(self, 'request_object_encryption_alg_values_supported')",
    "docstring": "OPTIONAL. JSON array containing a list of the JWE encryption algorithms (alg values) supported by the OP for Request Objects. These algorithms are used both when the Request Object is passed by value and when it is passed by reference.",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\discovery\\models.py",
    "ast_data": "FunctionDef name:validate_request_object_encryption_alg_values_supported arg:self arguments arg Call"
  },
  {
    "library": "scipy",
    "name": "braycurtis",
    "source_code": "def braycurtis(u, v, w=None):\n    u = _validate_vector(u)\n    v = _validate_vector(v, dtype=np.float64)\n    l1_diff = abs(u - v)\n    l1_sum = abs(u + v)\n    if w is not None:\n        w = _validate_weights(w)\n        l1_diff = w * l1_diff\n        l1_sum = w * l1_sum\n    return l1_diff.sum() / l1_sum.sum()",
    "docstring": "Compute the Bray-Curtis distance between two 1-D arrays. Bray-Curtis distance is defined as .. math:: \\sum{|u_i-v_i|} / \\sum{|u_i+v_i|} The Bray-Curtis distance is in the range [0, 1] if all coordinates are positive, and is undefined if the inputs are of length zero. Parameters ---------- u : (N,) array_like Input array. v : (N,) array_like Input array. w : (N,) array_like, optional The weights for each value in and . Default is None, which gives each value a weight of 1.0 Returns ------- braycurtis : double The Bray-Curtis distance between 1-D arrays and . Examples -------- >>> from scipy.spatial import distance >>> distance.braycurtis([1, 0, 0], [0, 1, 0]) 1.0 >>> distance.braycurtis([1, 1, 0], [0, 1, 0]) 0.33333333333333331",
    "type": "function",
    "file_path": "scipy\\scipy\\spatial\\distance.py",
    "ast_data": "FunctionDef name:braycurtis arg:u arg:v arg:w arguments arg arg arg Assign Call Assign Call Assign Call Assign Call If Compare Assign Call Assign Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "parse_example_spec",
    "source_code": "@property\ndef parse_example_spec(self):\n    return self.source_column.parse_example_spec",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:parse_example_spec arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, dtypes, shapes=None, names=None, shared_name=None, ordered=False, capacity=0, memory_limit=0):\n    super(MapStagingArea, self).__init__(dtypes, shapes, names, shared_name, capacity, memory_limit)\n    self._ordered = ordered\n    if ordered:\n        self._put_fn = gen_data_flow_ops.ordered_map_stage\n        self._pop_fn = gen_data_flow_ops.ordered_map_unstage\n        self._popitem_fn = gen_data_flow_ops.ordered_map_unstage_no_key\n        self._peek_fn = gen_data_flow_ops.ordered_map_peek\n        self._size_fn = gen_data_flow_ops.ordered_map_size\n        self._incomplete_size_fn = gen_data_flow_ops.ordered_map_incomplete_size\n        self._clear_fn = gen_data_flow_ops.ordered_map_clear\n    else:\n        self._put_fn = gen_data_flow_ops.map_stage\n        self._pop_fn = gen_data_flow_ops.map_unstage\n        self._popitem_fn = gen_data_flow_ops.map_unstage_no_key\n        self._peek_fn = gen_data_flow_ops.map_peek\n        self._size_fn = gen_data_flow_ops.map_size\n        self._incomplete_size_fn = gen_data_flow_ops.map_incomplete_size\n        self._clear_fn = gen_data_flow_ops.map_clear",
    "docstring": "Args: dtypes: A list of types. The length of dtypes must equal the number of tensors in each element. capacity: (Optional.) Maximum number of elements. An integer. If zero, the Staging Area is unbounded memory_limit: (Optional.) Maximum number of bytes of all tensors in the Staging Area (excluding keys). An integer. If zero, the Staging Area is unbounded ordered: (Optional.) If True the underlying data structure is a tree ordered on key. Otherwise assume a hashtable. shapes: (Optional.) Constraints on the shapes of tensors in an element. A list of shape tuples or None. This list is the same length as dtypes. If the shape of any tensors in the element are constrained, all must be; shapes can be None if the shapes should not be constrained. names: (Optional.) If provided, the and methods will use dictionaries with these names as keys. Must be None or a list or tuple of the same length as . shared_name: (Optional.) A name to be used for the shared object. By passing the same name to two different python objects they will share the underlying staging area. Must be a string. Raises: ValueError: If one of the arguments is invalid.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:dtypes arg:shapes arg:names arg:shared_name arg:ordered arg:capacity arg:memory_limit arguments arg arg arg arg arg arg arg arg Call Call Assign If Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, *args, **kwargs):\n    super(UnionClusterResolver, self).__init__()\n    self._rpc_layer = kwargs.pop('rpc_layer', None)\n    self._task_type = kwargs.pop('task_type', None)\n    self._task_id = kwargs.pop('task_id', None)\n    if kwargs:\n        raise ValueError('Unexpected kwargs provided {!r}'.format(kwargs))\n    if not args:\n        raise ValueError('At least one ClusterResolver is required.')\n    for cluster_resolver in args:\n        if not isinstance(cluster_resolver, ClusterResolver):\n            raise TypeError('All arguments must be a sub-class of `ClusterResolver.`')\n    self._cluster_resolvers = args",
    "docstring": "Initializes a UnionClusterResolver with other ClusterResolvers. Args: *args: objects to be unionized. **kwargs: rpc_layer - (Optional) Override value for the RPC layer used by TensorFlow. task_type - (Optional) Override value for the current task type. task_id - (Optional) Override value for the current task index. Raises: TypeError: If any argument is not a subclass of . ValueError: If there are no arguments passed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\cluster_resolver.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg arg arg Call Call Assign Call Assign Call Assign Call If Raise Call Call If Raise Call For If Call Raise Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "parse_example_spec",
    "source_code": "@property\ndef parse_example_spec(self):\n    return self.categorical_column.parse_example_spec",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:parse_example_spec arg:self arguments arg Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "generate_clickable_map",
    "source_code": "def generate_clickable_map(self) -> str:\n    if self.clickable:\n        return '\\n'.join((self.content[0], *self.clickable, self.content[-1]))\n    else:\n        return ''",
    "docstring": "Generate clickable map tags if clickable item exists. If not exists, this only returns empty string.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\ext\\graphviz.py",
    "ast_data": "FunctionDef name:generate_clickable_map arg:self arguments arg If Return return:yes Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit_predict",
    "source_code": "def fit_predict(self, X, y=None, **kwargs):\n    if _routing_enabled():\n        transform_params = self.get_metadata_routing().consumes(method='predict', params=kwargs.keys())\n        if transform_params:\n            warnings.warn(f\"This object ({self.__class__.__name__}) has a `predict` method which consumes metadata, but `fit_predict` does not forward metadata to `predict`. Please implement a custom `fit_predict` method to forward metadata to `predict` as well.Alternatively, you can explicitly do `set_predict_request`and set all values to `False` to disable metadata routed to `predict`, if that's an option.\", UserWarning)\n    return self.fit(X, **kwargs).predict(X)",
    "docstring": "Perform fit on X and returns labels for X. Returns -1 for outliers and 1 for inliers. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. y : Ignored Not used, present for API consistency by convention. **kwargs : dict Arguments to be passed to ``. .. versionadded:: 1.4 Returns ------- y : ndarray of shape (n_samples,) 1 for inliers, -1 for outliers.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\base.py",
    "ast_data": "FunctionDef name:fit_predict arg:self arg:X arg:y arguments arg arg arg arg If Call Assign Call Call Call If Call Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "escape_id",
    "source_code": "def escape_id(self, s: str) -> str:\n    bad_chars = ',:()'\n    for bc in bad_chars:\n        s = s.replace(bc, ' ')\n    if re.search('[^ .]', s):\n        s = s.replace('.', ' ')\n    s = ' '.join(s.split()).strip()\n    return self.escape(s)",
    "docstring": "Return an escaped string suitable for node names and anchors.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\writers\\texinfo.py",
    "ast_data": "FunctionDef name:escape_id arg:self arg:s arguments arg arg Assign For Assign Call If Call Assign Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "set_execution_mode",
    "source_code": "def set_execution_mode(mode):\n    context().execution_mode = mode",
    "docstring": "Sets execution mode for the current thread.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:set_execution_mode arg:mode arguments arg Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "add_event",
    "source_code": "def add_event(self, event):\n    if not self._closed:\n        self._try_put(event)",
    "docstring": "Adds an event to the event file. Args: event: An protocol buffer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\event_file_writer.py",
    "ast_data": "FunctionDef name:add_event arg:self arg:event arguments arg arg If Call"
  },
  {
    "library": "django",
    "name": "save_model",
    "source_code": "def save_model(self, request, obj, form, change):\n    obj.save()",
    "docstring": "Given a model instance save it to the database.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:save_model arg:self arg:request arg:obj arg:form arg:change arguments arg arg arg arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "set_logical_device_configuration",
    "source_code": "def set_logical_device_configuration(self, dev, virtual_devices):\n    self._initialize_physical_devices()\n    if dev not in self._physical_devices:\n        raise ValueError('Unrecognized device: %s' % repr(dev))\n    if dev.device_type == 'CPU':\n        for vdev in virtual_devices:\n            if vdev.memory_limit is not None:\n                raise ValueError('Setting memory limit on CPU virtual devices is currently not supported')\n            if vdev.experimental_priority is not None:\n                raise ValueError('Setting experimental_priority on CPU virtual  devices is currently not supported')\n            if vdev.experimental_device_ordinal is not None:\n                raise ValueError('Setting experimental_device_ordinal on CPU virtual  devices is currently not supported')\n    elif dev.device_type == 'GPU':\n        for vdev in virtual_devices:\n            if vdev.memory_limit is None:\n                raise ValueError('Setting memory limit is required for GPU virtual devices')\n    else:\n        raise ValueError('Virtual devices are not supported for %s' % dev.device_type)\n    if self._virtual_device_map.get(dev) == virtual_devices:\n        return\n    if self._context_handle is not None:\n        raise RuntimeError('Virtual devices cannot be modified after being initialized')\n    self._virtual_device_map[dev] = virtual_devices",
    "docstring": "Set the virtual device configuration for a PhysicalDevice.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:set_logical_device_configuration arg:self arg:dev arg:virtual_devices arguments arg arg arg Call If Compare Raise Call Call If Compare For If Compare Raise Call If Compare Raise Call If Compare Raise Call If Compare For If Compare Raise Call Raise Call If Compare Call Return return:no If Compare Raise Call Assign"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X, check_input=True):\n    check_is_fitted(self)\n    X = self._validate_X_predict(X, check_input)\n    proba = self.tree_.predict(X)\n    n_samples = X.shape[0]\n    if is_classifier(self):\n        if self.n_outputs_ == 1:\n            return self.classes_.take(np.argmax(proba, axis=1), axis=0)\n        else:\n            class_type = self.classes_[0].dtype\n            predictions = np.zeros((n_samples, self.n_outputs_), dtype=class_type)\n            for k in range(self.n_outputs_):\n                predictions[:, k] = self.classes_[k].take(np.argmax(proba[:, k], axis=1), axis=0)\n            return predictions\n    elif self.n_outputs_ == 1:\n        return proba[:, 0]\n    else:\n        return proba[:, :, 0]",
    "docstring": "Predict class or regression value for X. For a classification model, the predicted class for each sample in X is returned. For a regression model, the predicted value based on X is returned. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``. check_input : bool, default=True Allow to bypass several input checking. Don't use this parameter unless you know what you're doing. Returns ------- y : array-like of shape (n_samples,) or (n_samples, n_outputs) The predicted classes, or the predict values.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\tree\\_classes.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arg:check_input arguments arg arg arg Call Assign Call Assign Call Assign If Call If Compare Return return:yes Call Call Assign Assign Call For Call Assign Call Call Return return:yes If Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "CondWrapper",
    "source_code": "@function.Defun(*cond_dtypes, func_name='%s_Wrapper' % cond.name)\ndef CondWrapper(*args):\n    return cond(*args[:len(body_input_types)])",
    "docstring": "A wrapper that handles loop-carried captured inputs.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\functional_ops.py",
    "ast_data": "FunctionDef name:CondWrapper arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "manpage",
    "source_code": "class manpage(nodes.Inline, nodes.FixedTextElement):\n    pass",
    "docstring": "Node for references to manpages.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\addnodes.py",
    "ast_data": "ClassDef name:manpage"
  },
  {
    "library": "pandas",
    "name": "__repr__",
    "source_code": "def __repr__(self) -> str:\n    parened = (f'({pprint_thing(opr)})' for opr in self.operands)\n    return pprint_thing(f' {self.op} '.join(parened))",
    "docstring": "Print a generic n-ary operator and its operands using infix notation.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\computation\\ops.py",
    "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_sweep",
    "source_code": "def _sweep(self):\n    if not self._max_to_keep:\n        return\n    while len(self._maybe_delete) > self._max_to_keep:\n        filename, timestamp = self._maybe_delete.popitem(last=False)\n        if self._keep_checkpoint_every_n_hours and timestamp - self._keep_checkpoint_every_n_hours * 3600.0 >= self._last_preserved_timestamp:\n            self._last_preserved_timestamp = timestamp\n            continue\n        _delete_file_if_exists(filename + '.index')\n        _delete_file_if_exists(filename + '.data-?????-of-?????')",
    "docstring": "Deletes or preserves managed checkpoints.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint_management.py",
    "ast_data": "FunctionDef name:_sweep arg:self arguments arg If Return return:no While Compare Call Assign Call If BoolOp Compare Assign Call Call"
  },
  {
    "library": "kornia",
    "name": "detect_bilinear",
    "source_code": "def detect_bilinear(self, heatmap: Tensor, cand_h: Tensor, cand_w: Tensor) -> Tensor:\n    cand_h_floor = torch.floor(cand_h).to(torch.long)\n    cand_h_ceil = torch.ceil(cand_h).to(torch.long)\n    cand_w_floor = torch.floor(cand_w).to(torch.long)\n    cand_w_ceil = torch.ceil(cand_w).to(torch.long)\n    cand_samples_feat = heatmap[cand_h_floor, cand_w_floor] * (cand_h_ceil - cand_h) * (cand_w_ceil - cand_w) + heatmap[cand_h_floor, cand_w_ceil] * (cand_h_ceil - cand_h) * (cand_w - cand_w_floor) + heatmap[cand_h_ceil, cand_w_floor] * (cand_h - cand_h_floor) * (cand_w_ceil - cand_w) + heatmap[cand_h_ceil, cand_w_ceil] * (cand_h - cand_h_floor) * (cand_w - cand_w_floor)\n    return cand_samples_feat",
    "docstring": "Detect by bilinear sampling.",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\sold2\\sold2_detector.py",
    "ast_data": "FunctionDef name:detect_bilinear arg:self arg:heatmap arg:cand_h arg:cand_w arguments arg arg arg arg Assign Call Call Assign Call Call Assign Call Call Assign Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_slot_names",
    "source_code": "def get_slot_names(self, *args, **kwargs):\n    return self._opt.get_slot_names(*args, **kwargs)",
    "docstring": "Return a list of the names of slots created by the . This simply wraps the get_slot_names() from the actual optimizer. Args: *args: Arguments for get_slot(). **kwargs: Keyword arguments for get_slot(). Returns: A list of strings.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\sync_replicas_optimizer.py",
    "ast_data": "FunctionDef name:get_slot_names arg:self arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_eigvalsh_to_eps",
    "source_code": "def _eigvalsh_to_eps(spectrum, cond=None, rcond=None):\n    if rcond is not None:\n        cond = rcond\n    if cond in [None, -1]:\n        t = spectrum.dtype.char.lower()\n        factor = {'f': 1000.0, 'd': 1000000.0}\n        cond = factor[t] * np.finfo(t).eps\n    eps = cond * np.max(abs(spectrum))\n    return eps",
    "docstring": "Determine which eigenvalues are \"small\" given the spectrum. This is for compatibility across various linear algebra functions that should agree about whether or not a Hermitian matrix is numerically singular and what is its numerical matrix rank. This is designed to be compatible with scipy.linalg.pinvh. Parameters ---------- spectrum : 1d ndarray Array of eigenvalues of a Hermitian matrix. cond, rcond : float, optional Cutoff for small eigenvalues. Singular values smaller than rcond * largest_eigenvalue are considered zero. If None or -1, suitable machine precision is used. Returns ------- eps : float Magnitude cutoff for numerical negligibility.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:_eigvalsh_to_eps arg:spectrum arg:cond arg:rcond arguments arg arg arg If Compare Assign If Compare Assign Call Assign Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "def fit(self, X, y=None):\n    self._reset()\n    return self.partial_fit(X, y)",
    "docstring": "Compute the maximum absolute value to be used for later scaling. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data used to compute the per-feature minimum and maximum used for later scaling along the features axis. y : None Ignored. Returns ------- self : object Fitted scaler.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "map_structure_up_to",
    "source_code": "def map_structure_up_to(shallow_tree, func, *inputs):\n    return nest_util.map_structure_up_to(nest_util.Modality.DATA, shallow_tree, func, *inputs)",
    "docstring": "Applies a function or op to a number of partially flattened inputs. The are flattened up to before being mapped. Use Case: Sometimes we wish to apply a function to a partially flattened sequence (for example when the function itself takes sequence inputs). We achieve this by specifying a shallow structure, we wish to flatten up to. The , can be thought of as having the same structure as , but with leaf nodes that are themselves tree structures. This function, therefore, will return something with the same base structure as . Examples: Args: shallow_tree: a shallow tree, common to all the inputs. func: callable which will be applied to each input individually. *inputs: arbitrarily nested combination of objects that are compatible with shallow_tree. The function is applied to corresponding partially flattened elements of each input, so the function must support arity of . Raises: TypeError: If is a sequence but is not. TypeError: If the sequence types of are different from . ValueError: If the sequence lengths of are different from . Returns: result of repeatedly applying , with same structure as .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\util\\nest.py",
    "ast_data": "FunctionDef name:map_structure_up_to arg:shallow_tree arg:func arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "PixelFormat",
    "source_code": "@dataclass(frozen=True)\nclass PixelFormat:\n    color_space: ColorSpace\n    bit_depth: int",
    "docstring": "Data class to represent the pixel format of an image. Args: color_space: color space. bit_depth: the number of bits per channel. Example: >>> pixel_format = PixelFormat(ColorSpace.RGB, 8) >>> pixel_format.color_space >>> pixel_format.bit_depth 8",
    "type": "class",
    "file_path": "kornia\\kornia\\image\\base.py",
    "ast_data": "ClassDef name:PixelFormat Call"
  },
  {
    "library": "scipy",
    "name": "var",
    "source_code": "def var(self, df, scale):\n    dim, df, scale = self._process_parameters(df, scale)\n    out = self._var(dim, df, scale)\n    return _squeeze_output(out) if out is not None else out",
    "docstring": "Variance of the inverse Wishart distribution. Only valid if the degrees of freedom are greater than the dimension of the scale matrix plus three. Parameters ---------- %(_doc_default_callparams)s Returns ------- var : float The variance of the distribution",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:var arg:self arg:df arg:scale arguments arg arg arg Assign Call Assign Call Return return:yes Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "_GlobalShuffleDataset",
    "source_code": "class _GlobalShuffleDataset(dataset_ops.UnaryUnchangedStructureDataset):\n\n    def __init__(self, input_dataset: dataset_ops.DatasetV2, seed: Optional[Union[int, tensor.Tensor]]=None, reshuffle_each_iteration: bool=True, name: Optional[str]=None):\n        options = options_lib.Options()\n        options.experimental_warm_start = False\n        input_dataset = input_dataset.with_options(options)\n        self._input_dataset = input_dataset\n        self._seed, self._seed2 = random_seed.get_seed(seed)\n        self._reshuffle_each_iteration = reshuffle_each_iteration\n        self._name = name\n        variant_tensor = ged_ops.global_shuffle_dataset(self._input_dataset._variant_tensor, seed=self._seed, seed2=self._seed2, seed_generator=gen_dataset_ops.dummy_seed_generator(), reshuffle_each_iteration=self._reshuffle_each_iteration, **self._common_args)\n        super().__init__(input_dataset, variant_tensor)",
    "docstring": "Shuffles all elements in the input dataset.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\global_shuffle_op.py",
    "ast_data": "ClassDef name:_GlobalShuffleDataset FunctionDef name:__init__ arg:self arg:input_dataset arg:seed arg:reshuffle_each_iteration arg:name arguments arg arg arg arg arg Assign Call Assign Assign Call Assign Assign Call Assign Assign Assign Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_add_attributes_to_object_graph_for_registered_savers",
    "source_code": "def _add_attributes_to_object_graph_for_registered_savers(unmapped_registered_savers, object_graph_proto, node_ids, object_map):\n    registered_savers = collections.defaultdict(dict)\n    for saver_name, trackables in unmapped_registered_savers.items():\n        for object_name, trackable in trackables.items():\n            object_proto = object_graph_proto.nodes[node_ids[trackable]]\n            object_proto.registered_saver.name = saver_name\n            object_proto.registered_saver.object_name = object_name\n            object_to_save = util.get_mapped_trackable(trackable, object_map)\n            registered_savers[saver_name][object_name] = object_to_save\n    return registered_savers",
    "docstring": "Fills the object graph proto with data about the registered savers.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\save_util_v1.py",
    "ast_data": "FunctionDef name:_add_attributes_to_object_graph_for_registered_savers arg:unmapped_registered_savers arg:object_graph_proto arg:node_ids arg:object_map arguments arg arg arg arg Assign Call For Call For Call Assign Assign Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "end_object",
    "source_code": "def end_object(self, obj):\n    pass",
    "docstring": "Called when serializing of an object ends.",
    "type": "method",
    "file_path": "django\\django\\core\\serializers\\base.py",
    "ast_data": "FunctionDef name:end_object arg:self arg:obj arguments arg arg"
  },
  {
    "library": "kornia",
    "name": "__init__",
    "source_code": "def __init__(self, origin: Tensor, direction: Tensor) -> None:\n    super().__init__()\n    self._origin = Parameter(origin)\n    self._direction = Parameter(direction)",
    "docstring": "Initialize a parametrized line of direction and origin. Args: origin: any point on the line of any dimension. direction: the normalized vector direction of any dimension. Example: >>> o = torch.tensor([0.0, 0.0]) >>> d = torch.tensor([1.0, 1.0]) >>> l = ParametrizedLine(o, d)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\line.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:origin arg:direction arguments arg arg arg Call Call Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_inputs_with_flattening",
    "source_code": "def _inputs_with_flattening(pfor_input: _PforInput, input_indices):\n    if input_indices is None:\n        input_indices = []\n    pfor_input.stack_inputs(stack_indices=input_indices)\n    inputs = []\n    for i in range(pfor_input.num_inputs):\n        if i in input_indices:\n            inp = pfor_input.stacked_input(i)\n            inp = _flatten_first_two_dims(inp)\n        else:\n            inp = pfor_input.unstacked_input(i)\n        inputs.append(inp)\n    return inputs",
    "docstring": "Stacks and flattens first dim of inputs at indices .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "FunctionDef name:_inputs_with_flattening arg:pfor_input arg:input_indices arguments arg arg If Compare Assign Call Assign For Call If Compare Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "_ControlCodes",
    "source_code": "class _ControlCodes(dict):\n\n    def key_for(self, obj):\n        for key, val in self.items():\n            if val is obj:\n                return key\n        raise ValueError('The given object could not be found: %r' % obj)",
    "docstring": "Control codes used to \"signal\" a service via ControlService. User-defined control codes are in the range 128-255. We generally use the standard Python value for the Linux signal and add 128. Example: >>> signal.SIGUSR1 10 control_codes['graceful'] = 128 + 10",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\process\\win32.py",
    "ast_data": "ClassDef name:_ControlCodes FunctionDef name:key_for arg:self arg:obj arguments arg arg For Call If Compare Return return:yes Raise Call"
  },
  {
    "library": "pygame",
    "name": "download_prebuilts",
    "source_code": "def download_prebuilts(temp_dir, x86=True, x64=True):\n    if not os.path.exists(temp_dir):\n        print(f'Making dir :{temp_dir}:')\n        os.makedirs(temp_dir)\n    for url, checksum in get_urls(x86=x86, x64=x64):\n        download_sha1_unzip(url, checksum, temp_dir, 1)",
    "docstring": "For downloading prebuilt dependencies.",
    "type": "function",
    "file_path": "pygame\\buildconfig\\download_win_prebuilt.py",
    "ast_data": "FunctionDef name:download_prebuilts arg:temp_dir arg:x86 arg:x64 arguments arg arg arg If Call Call Call For Call Call"
  },
  {
    "library": "pytorch",
    "name": "visit_AnnAssign",
    "source_code": "def visit_AnnAssign(self, node):\n    return ast.Assign(targets=[node.target], value=ast.Call(func=ast.Name(id='annotate', ctx=ast.Load()), args=[node.value, node.annotation], keywords=[]))",
    "docstring": "Swap out Python's AnnAssign with an Assign node where the annotation function is called. Example: Original: y: Tensor_Type(1,2,3, Dyn) = f2(x) Output: y = annotate(f2(x),Tensor_Type((1,2,3,Dyn)))",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\rewriter.py",
    "ast_data": "FunctionDef name:visit_AnnAssign arg:self arg:node arguments arg arg Return return:yes Call Call Call Call"
  },
  {
    "library": "authlib",
    "name": "parse_request_authorization",
    "source_code": "def parse_request_authorization(self, request):\n    auth = request.headers.get('Authorization')\n    if not auth:\n        raise MissingAuthorizationError(self._default_auth_type, self._default_realm)\n    token_parts = auth.split(None, 1)\n    if len(token_parts) != 2:\n        raise UnsupportedTokenTypeError(self._default_auth_type, self._default_realm)\n    token_type, token_string = token_parts\n    validator = self.get_token_validator(token_type)\n    return (validator, token_string)",
    "docstring": "Parse the token and token validator from request Authorization header. Here is an example of Authorization header:: Authorization: Bearer a-token-string This method will parse this header, if it can find the validator for ``. :return: validator, token_string :raise: MissingAuthorizationError :raise: UnsupportedTokenTypeError",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\resource_protector.py",
    "ast_data": "FunctionDef name:parse_request_authorization arg:self arg:request arguments arg arg Assign Call If Raise Call Assign Call If Compare Call Raise Call Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "stateless_random_flip_left_right",
    "source_code": "@tf_export('image.stateless_random_flip_left_right', v1=[])\n@dispatch.add_dispatch_support\ndef stateless_random_flip_left_right(image, seed):\n    random_func = functools.partial(stateless_random_ops.stateless_random_uniform, seed=seed)\n    return _random_flip(image, 1, random_func, 'stateless_random_flip_left_right')",
    "docstring": "Randomly flip an image horizontally (left to right) deterministically. Guarantees the same results given the same independent of how many times the function is called, and independent of global seed settings (e.g. ). Example usage: >>> image = np.array([[[1], [2]], [[3], [4]]]) >>> seed = (2, 3) >>> tf.image.stateless_random_flip_left_right(image, seed).numpy().tolist() [[[2], [1]], [[4], [3]]] Args: image: 4-D Tensor of shape or 3-D Tensor of shape . seed: A shape [2] Tensor, the seed to the random number generator. Must have dtype or . (When using XLA, only is allowed.) Returns: A tensor of the same type and shape as .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:stateless_random_flip_left_right arg:image arg:seed arguments arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, loc, *, pad=0.4, borderpad=0.5, child=None, prop=None, frameon=True, bbox_to_anchor=None, bbox_transform=None, **kwargs):\n    super().__init__(**kwargs)\n    self.set_bbox_to_anchor(bbox_to_anchor, bbox_transform)\n    self.set_child(child)\n    if isinstance(loc, str):\n        loc = _api.check_getitem(self.codes, loc=loc)\n    self.loc = loc\n    self.borderpad = borderpad\n    self.pad = pad\n    if prop is None:\n        self.prop = FontProperties(size=mpl.rcParams['legend.fontsize'])\n    else:\n        self.prop = FontProperties._from_any(prop)\n        if isinstance(prop, dict) and 'size' not in prop:\n            self.prop.set_size(mpl.rcParams['legend.fontsize'])\n    self.patch = FancyBboxPatch(xy=(0.0, 0.0), width=1.0, height=1.0, facecolor='w', edgecolor='k', mutation_scale=self.prop.get_size_in_points(), snap=True, visible=frameon, boxstyle='square,pad=0')",
    "docstring": "Parameters ---------- loc : str The box location. Valid locations are 'upper left', 'upper center', 'upper right', 'center left', 'center', 'center right', 'lower left', 'lower center', 'lower right'. For backward compatibility, numeric values are accepted as well. See the parameter *loc* of for details. pad : float, default: 0.4 Padding around the child as fraction of the fontsize. borderpad : float, default: 0.5 Padding between the offsetbox frame and the *bbox_to_anchor*. child : The box that will be anchored. prop : This is only used as a reference for paddings. If not given, :rc: is used. frameon : bool Whether to draw a frame around the box. bbox_to_anchor : , 2-tuple, or 4-tuple of floats Box that is used to position the legend in conjunction with *loc*. bbox_transform : None or :class: The transform for the bounding box (*bbox_to_anchor*). **kwargs All other parameters are passed on to . Notes ----- See for a detailed description of the anchoring mechanism.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:loc arguments arg arg arg arg arg arg arg arg arg arg Call Call Call Call If Call Assign Call Assign Assign Assign If Compare Assign Call Assign Call If BoolOp Call Compare Call Assign Call Call"
  },
  {
    "library": "scipy",
    "name": "gradient_and_jacobian",
    "source_code": "def gradient_and_jacobian(self, z):\n    x = self.get_variables(z)\n    s = self.get_slack(z)\n    g = self.grad(x)\n    J_eq, J_ineq = self.jac(x)\n    return (self._compute_gradient(g), self._compute_jacobian(J_eq, J_ineq, s))",
    "docstring": "Returns scaled gradient. Return scaled gradient: gradient = [ grad(x) ] [ -barrier_parameter*ones(n_ineq) ] and scaled Jacobian matrix: jacobian = [ jac_eq(x) 0 ] [ jac_ineq(x) S ] Both of them scaled by the previously defined scaling factor.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_trustregion_constr\\tr_interior_point.py",
    "ast_data": "FunctionDef name:gradient_and_jacobian arg:self arg:z arguments arg arg Assign Call Assign Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "permute_dimensions",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef permute_dimensions(x, pattern):\n    return array_ops.transpose(x, perm=pattern)",
    "docstring": "Permutes axes in a tensor. Args: x: Tensor or variable. pattern: A tuple of dimension indices, e.g. . Returns: A tensor. Example: >>> a = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]) >>> a >>> tf.keras.backend.permute_dimensions(a, pattern=(1, 0))",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:permute_dimensions arg:x arg:pattern arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "save",
    "source_code": "def save(self, new_export_dir=None):\n    is_input_text_proto = file_io.file_exists(file_io.join(compat.as_bytes(self._export_dir), compat.as_bytes(constants.SAVED_MODEL_FILENAME_PBTXT)))\n    if not new_export_dir:\n        new_export_dir = self._export_dir\n    if is_input_text_proto:\n        path = file_io.join(compat.as_bytes(new_export_dir), compat.as_bytes(constants.SAVED_MODEL_FILENAME_PBTXT))\n        file_io.write_string_to_file(path, str(self._saved_model))\n    else:\n        path = file_io.join(compat.as_bytes(new_export_dir), compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB))\n        file_io.write_string_to_file(path, self._saved_model.SerializeToString(deterministic=True))\n    tf_logging.info('SavedModel written to: %s', compat.as_text(path))",
    "docstring": "Saves the updated . Args: new_export_dir: Path where the updated will be saved. If None, the input will be overriden with the updates. Raises: errors.OpError: If there are errors during the file save operation.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\method_name_updater.py",
    "ast_data": "FunctionDef name:save arg:self arg:new_export_dir arguments arg arg Assign Call Call Call Call If Assign If Assign Call Call Call Call Call Assign Call Call Call Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_wrap_result",
    "source_code": "def _wrap_result(result, is_complex, shape=None):\n    if is_complex:\n        z = _real2complex(result)\n    else:\n        z = result\n    if shape is not None:\n        z = z.reshape(shape)\n    return z",
    "docstring": "Convert from real to complex and reshape result arrays.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_spectral.py",
    "ast_data": "FunctionDef name:_wrap_result arg:result arg:is_complex arg:shape arguments arg arg arg If Assign Call Assign If Compare Assign Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "_inner_axes",
    "source_code": "@property\ndef _inner_axes(self):\n    if self._col_wrap is None:\n        return self.axes[:-1, 1:].flat\n    else:\n        axes = []\n        n_empty = self._nrow * self._ncol - self._n_facets\n        for i, ax in enumerate(self.axes):\n            append = i % self._ncol and i < self._ncol * (self._nrow - 1) and (i < self._ncol * (self._nrow - 1) - n_empty)\n            if append:\n                axes.append(ax)\n        return np.array(axes, object).flat",
    "docstring": "Return a flat array of the inner axes.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\axisgrid.py",
    "ast_data": "FunctionDef name:_inner_axes arg:self arguments arg If Compare Return return:yes Assign Assign For Call Assign BoolOp Compare Compare If Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_restore_sparse_tensors",
    "source_code": "def _restore_sparse_tensors(stored_list, sparse_info_list):\n    received_sequence = isinstance(stored_list, collections_abc.Sequence)\n    if not received_sequence:\n        stored_list = (stored_list,)\n    tensors = [_restore_sparse(sparse_map_op=info.map_op, sparse_handles=array_ops.squeeze(s, [1]), rank=tensor_shape.dimension_value(info.rank + 1)) if info.sparse else s for s, info in zip(stored_list, sparse_info_list)]\n    has_st = any((isinstance(x, sparse_tensor.SparseTensor) for x in tensors))\n    if has_st:\n        t_values = [x.values if isinstance(x, sparse_tensor.SparseTensor) else x for x in tensors]\n        with_deps = lambda x: control_flow_ops.with_dependencies(t_values, x)\n        ensure_restore_tensors = [sparse_tensor.SparseTensor(indices=with_deps(x.indices), values=with_deps(x.values), dense_shape=with_deps(x.dense_shape)) if isinstance(x, sparse_tensor.SparseTensor) else with_deps(x) for x in tensors]\n    else:\n        ensure_restore_tensors = tensors\n    return ensure_restore_tensors if received_sequence else tensors[0]",
    "docstring": "Restore SparseTensors after dequeue in batch, batch_join, etc.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\input.py",
    "ast_data": "FunctionDef name:_restore_sparse_tensors arg:stored_list arg:sparse_info_list arguments arg arg Assign Call If Assign Assign Call Call Call Call Assign Call Call If Assign Call Assign arguments arg Call Assign Call Call Call Call Call Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "IntegerFieldFloatRounding",
    "source_code": "class IntegerFieldFloatRounding:\n\n    def get_prep_lookup(self):\n        if isinstance(self.rhs, float):\n            self.rhs = math.ceil(self.rhs)\n        return super().get_prep_lookup()",
    "docstring": "Allow floats to work as query values for IntegerField. Without this, the decimal portion of the float would always be discarded.",
    "type": "class",
    "file_path": "django\\django\\db\\models\\lookups.py",
    "ast_data": "ClassDef name:IntegerFieldFloatRounding FunctionDef name:get_prep_lookup arg:self arguments arg If Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "get_asgi_application",
    "source_code": "def get_asgi_application():\n    django.setup(set_prefix=False)\n    return ASGIHandler()",
    "docstring": "The public interface to Django's ASGI support. Return an ASGI 3 callable. Avoids making django.core.handlers.ASGIHandler a public API, in case the internal implementation changes or moves in the future.",
    "type": "function",
    "file_path": "django\\django\\core\\asgi.py",
    "ast_data": "FunctionDef name:get_asgi_application arguments Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "group_norm",
    "source_code": "def group_norm(input: Tensor, num_groups: int, weight: Optional[Tensor]=None, bias: Optional[Tensor]=None, eps: float=1e-05) -> Tensor:\n    if has_torch_function_variadic(input, weight, bias):\n        return handle_torch_function(group_norm, (input, weight, bias), input, num_groups, weight=weight, bias=bias, eps=eps)\n    if input.dim() < 2:\n        raise RuntimeError(f'Expected at least 2 dimensions for input tensor but received {input.dim()}')\n    _verify_batch_size([input.size(0) * input.size(1) // num_groups, num_groups] + list(input.size()[2:]))\n    return torch.group_norm(input, num_groups, weight, bias, eps, torch.backends.cudnn.enabled)",
    "docstring": "Apply Group Normalization for last certain number of dimensions. See :class: for details.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:group_norm arg:input arg:num_groups arg:weight arg:bias arg:eps arguments arg arg arg arg arg If Call Return return:yes Call If Compare Call Raise Call Call Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_recursively_add_node_arg_to_stack",
    "source_code": "def _recursively_add_node_arg_to_stack(self, arg: Any) -> None:\n    if isinstance(arg, Node):\n        self.stack.append(arg)\n    elif isinstance(arg, torch.fx.immutable_collections.immutable_list) or type(arg) is tuple:\n        for inner_arg in arg:\n            self._recursively_add_node_arg_to_stack(inner_arg)\n    elif isinstance(arg, torch.fx.immutable_collections.immutable_dict):\n        for value in arg.values():\n            self._recursively_add_node_arg_to_stack(value)",
    "docstring": "Adds all of the nodes in this arg to the stack, properly navigating through list, dicts and tuples.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\ns\\fx\\graph_matcher.py",
    "ast_data": "FunctionDef name:_recursively_add_node_arg_to_stack arg:self arg:arg arguments arg arg If Call Call If BoolOp Call Compare Call For Call If Call For Call Call"
  },
  {
    "library": "seaborn",
    "name": "_deprecate_ci",
    "source_code": "def _deprecate_ci(errorbar, ci):\n    if ci is not deprecated and ci != 'deprecated':\n        if ci is None:\n            errorbar = None\n        elif ci == 'sd':\n            errorbar = 'sd'\n        else:\n            errorbar = ('ci', ci)\n        msg = f'\\n\\nThe `ci` parameter is deprecated. Use `errorbar={repr(errorbar)}` for the same effect.\\n'\n        warnings.warn(msg, FutureWarning, stacklevel=3)\n    return errorbar",
    "docstring": "Warn on usage of ci= and convert to appropriate errorbar= arg. ci was deprecated when errorbar was added in 0.12. It should not be removed completely for some time, but it can be moved out of function definitions (and extracted from kwargs) after one cycle.",
    "type": "function",
    "file_path": "seaborn\\seaborn\\utils.py",
    "ast_data": "FunctionDef name:_deprecate_ci arg:errorbar arg:ci arguments arg arg If BoolOp Compare Compare If Compare Assign If Compare Assign Assign Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "BaseMonitor",
    "source_code": "class BaseMonitor(object):\n\n    def __init__(self, debug_events_reader):\n        self._debug_data_reader = debug_events_reader\n        debug_events_reader._add_monitor(self)\n\n    def on_execution(self, execution_index, execution):\n        pass\n\n    def on_graph_execution_trace(self, graph_execution_trace_index, graph_execution_trace):\n        pass",
    "docstring": "Base class for debug event data monitors.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_monitors.py",
    "ast_data": "ClassDef name:BaseMonitor FunctionDef name:__init__ arg:self arg:debug_events_reader arguments arg arg Assign Call FunctionDef name:on_execution arg:self arg:execution_index arg:execution arguments arg arg arg FunctionDef name:on_graph_execution_trace arg:self arg:graph_execution_trace_index arg:graph_execution_trace arguments arg arg arg"
  },
  {
    "library": "pandas",
    "name": "validate_fillna_kwargs",
    "source_code": "def validate_fillna_kwargs(value, method, validate_scalar_dict_value: bool=True):\n    from pandas.core.missing import clean_fill_method\n    if value is None and method is None:\n        raise ValueError(\"Must specify a fill 'value' or 'method'.\")\n    if value is None and method is not None:\n        method = clean_fill_method(method)\n    elif value is not None and method is None:\n        if validate_scalar_dict_value and isinstance(value, (list, tuple)):\n            raise TypeError(f'\"value\" parameter must be a scalar or dict, but you passed a \"{type(value).__name__}\"')\n    elif value is not None and method is not None:\n        raise ValueError(\"Cannot specify both 'value' and 'method'.\")\n    return (value, method)",
    "docstring": "Validate the keyword arguments to 'fillna'. This checks that exactly one of 'value' and 'method' is specified. If 'method' is specified, this validates that it's a valid method. Parameters ---------- value, method : object The 'value' and 'method' keyword arguments for 'fillna'. validate_scalar_dict_value : bool, default True Whether to validate that 'value' is a scalar or dict. Specifically, validate that it is not a list or tuple. Returns ------- value, method : object",
    "type": "function",
    "file_path": "pandas\\pandas\\util\\_validators.py",
    "ast_data": "FunctionDef name:validate_fillna_kwargs arg:value arg:method arg:validate_scalar_dict_value arguments arg arg arg If BoolOp Compare Compare Raise Call If BoolOp Compare Compare Assign Call If BoolOp Compare Compare If BoolOp Call Raise Call Call If BoolOp Compare Compare Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ReplicationPad3d",
    "source_code": "class ReplicationPad3d(_ReplicationPadNd):\n    padding: tuple[int, int, int, int, int, int]\n\n    def __init__(self, padding: _size_6_t) -> None:\n        super().__init__()\n        self.padding = _ntuple(6)(padding)",
    "docstring": "Pads the input tensor using replication of the input boundary. For -dimensional padding, use :func:. Args: padding (int, tuple): the size of the padding. If is , uses the same padding in all boundaries. If a 6-, uses (:math:, :math:, :math:, :math:, :math:, :math:) Shape: - Input: :math: or :math:. - Output: :math: or :math:, where :math: :math: :math: Examples:: >>> # xdoctest: +IGNORE_WANT(\"non-deterministic\") >>> m = nn.ReplicationPad3d(3) >>> input = torch.randn(16, 3, 8, 320, 480) >>> output = m(input) >>> # using different paddings for different sides >>> m = nn.ReplicationPad3d((3, 3, 6, 6, 1, 1)) >>> output = m(input)",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\padding.py",
    "ast_data": "ClassDef name:ReplicationPad3d FunctionDef name:__init__ arg:self arg:padding arguments arg arg Call Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "LinearReLU",
    "source_code": "class LinearReLU(_FusedModule):\n\n    def __init__(self, linear, relu):\n        assert type_before_parametrizations(linear) == Linear and type_before_parametrizations(relu) == ReLU, f'Incorrect types for input modules{type_before_parametrizations(linear)}{type_before_parametrizations(relu)}'\n        super().__init__(linear, relu)",
    "docstring": "This is a sequential container which calls the Linear and ReLU modules. During quantization this will be replaced with the corresponding fused module.",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\nn\\intrinsic\\modules\\fused.py",
    "ast_data": "ClassDef name:LinearReLU FunctionDef name:__init__ arg:self arg:linear arg:relu arguments arg arg arg BoolOp Compare Call Compare Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "as_dict",
    "source_code": "@property\ndef as_dict(self) -> dict[str, str]:\n    return {'runtime': self.runtime.value, 'autograd': self.autograd.value, 'language': 'Python' if self.language == Language.PYTHON else 'C++'}",
    "docstring": "Dict representation for CI reporting.",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\instruction_counts\\core\\api.py",
    "ast_data": "FunctionDef name:as_dict arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "matplotlib",
    "name": "get_shape",
    "source_code": "def get_shape(self):\n    if self._A is None:\n        raise RuntimeError('You must first set the image array')\n    return self._A.shape",
    "docstring": "Return the shape of the image as tuple (numrows, numcols, channels).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\image.py",
    "ast_data": "FunctionDef name:get_shape arg:self arguments arg If Compare Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "timestamp",
    "source_code": "@property\ndef timestamp(self):\n    return self._timestamp",
    "docstring": "Timestamp of when this tensor value was dumped. Returns: () The timestamp in microseconds.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:timestamp arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "record_operation_forwardprop_only",
    "source_code": "def record_operation_forwardprop_only(op_type, output_tensors, input_tensors, backward_function, forwardprop_output_indices):\n    pywrap_tfe.TFE_Py_TapeSetRecordOperationForwardprop(op_type, output_tensors, input_tensors, backward_function, forwardprop_output_indices)",
    "docstring": "Records the operation on all forward accumulators in the stack. Args: op_type: a string for the operation type, used in the backprop code output_tensors: a list of Python Tensor objects output by the operation input_tensors: a list of input Tensors to the recorded operation backward_function: the function to be called to, given the gradients of the output tensors, produce the gradients of the input tensors. This function is automatically transposed to produce output gradients given input gradients. forwardprop_output_indices: indicates any output_tensors which contain JVPs. Typically these will have come from TFE_Py_PackForwardGradients. May be None or an empty sequence if there are no JVP outputs from the operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\record.py",
    "ast_data": "FunctionDef name:record_operation_forwardprop_only arg:op_type arg:output_tensors arg:input_tensors arg:backward_function arg:forwardprop_output_indices arguments arg arg arg arg arg Call"
  },
  {
    "library": "pandas",
    "name": "set_uuid",
    "source_code": "def set_uuid(self, uuid: str) -> Styler:\n    self.uuid = uuid\n    return self",
    "docstring": "Set the uuid applied to `id HTML elements. Styler.set_tooltips : Set the DataFrame of strings on ` is typically a more specific identifier, such as `idc1idTable visualization `_ for more examples.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\style.py",
    "ast_data": "FunctionDef name:set_uuid arg:self arg:uuid arguments arg arg Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_set_rng_state_offset",
    "source_code": "def _set_rng_state_offset(offset: int, device: Union[int, str, torch.device]='cuda') -> None:\n    final_device = _get_device(device)\n\n    def cb():\n        default_generator = _get_generator(final_device)\n        default_generator.set_offset(offset)\n    _lazy_call(cb)",
    "docstring": "Set the random number generator state offset of the specified GPU. Args: offset (int): The desired offset device (torch.device or int, optional): The device to set the RNG state. Default: ``, the current CUDA device).",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:_set_rng_state_offset arg:offset arg:device arguments arg arg Assign Call FunctionDef name:cb arguments Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_validate_inference_input_output_types",
    "source_code": "def _validate_inference_input_output_types(self, quant_mode):\n    default_types = [_dtypes.float32]\n    if quant_mode.is_integer_quantization():\n        if quant_mode.is_post_training_int16x8_quantization():\n            all_types = default_types + [_dtypes.int16]\n        else:\n            all_types = default_types + [_dtypes.int8, _dtypes.uint8, _dtypes.int16]\n        if self.inference_input_type not in all_types or self.inference_output_type not in all_types:\n            all_types_names = ['tf.' + t.name for t in all_types]\n            raise ValueError('The inference_input_type and inference_output_type must be in {}.'.format(all_types_names))\n    elif self.inference_input_type not in default_types or self.inference_output_type not in default_types:\n        raise ValueError('The inference_input_type and inference_output_type must be tf.float32.')",
    "docstring": "Validate inference_input_type and inference_output_type flags.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "FunctionDef name:_validate_inference_input_output_types arg:self arg:quant_mode arguments arg arg Assign If Call If Call Assign Assign If BoolOp Compare Compare Assign Raise Call Call If BoolOp Compare Compare Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_del_nested_attr",
    "source_code": "def _del_nested_attr(obj: nn.Module, names: list[str]) -> None:\n    if len(names) == 1:\n        delattr(obj, names[0])\n    else:\n        _del_nested_attr(getattr(obj, names[0]), names[1:])",
    "docstring": "Deletes the attribute specified by the given list of names. For example, to delete the attribute obj.conv.weight, use _del_nested_attr(obj, ['conv', 'weight'])",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\functional_autograd_benchmark\\utils.py",
    "ast_data": "FunctionDef name:_del_nested_attr arg:obj arg:names arguments arg arg If Compare Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "is_period_dtype",
    "source_code": "def is_period_dtype(arr_or_dtype) -> bool:\n    warnings.warn('is_period_dtype is deprecated and will be removed in a future version. Use `isinstance(dtype, pd.PeriodDtype)` instead', DeprecationWarning, stacklevel=2)\n    if isinstance(arr_or_dtype, ExtensionDtype):\n        return arr_or_dtype.type is Period\n    if arr_or_dtype is None:\n        return False\n    return PeriodDtype.is_dtype(arr_or_dtype)",
    "docstring": "Check whether an array-like or dtype is of the Period dtype. .. deprecated:: 2.2.0 Use isinstance(dtype, pd.PeriodDtype) instead. Parameters ---------- arr_or_dtype : array-like or dtype The array-like or dtype to check. Returns ------- boolean Whether or not the array-like or dtype is of the Period dtype. See Also -------- api.types.is_timedelta64_ns_dtype : Check whether the provided array or dtype is of the timedelta64[ns] dtype. api.types.is_timedelta64_dtype: Check whether an array-like or dtype is of the timedelta64 dtype. Examples -------- >>> from pandas.core.dtypes.common import is_period_dtype >>> is_period_dtype(object) False >>> is_period_dtype(pd.PeriodDtype(freq=\"D\")) True >>> is_period_dtype([1, 2, 3]) False >>> is_period_dtype(pd.Period(\"2017-01-01\")) False >>> is_period_dtype(pd.PeriodIndex([], freq=\"Y\")) True",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\common.py",
    "ast_data": "FunctionDef name:is_period_dtype arg:arr_or_dtype arguments arg Call If Call Return return:yes Compare If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_select_worker_slice",
    "source_code": "def _select_worker_slice(worker_id, structured):\n\n    def _get(x):\n        return x._values[worker_id] if isinstance(x, PerWorkerValues) else x\n    return nest.map_structure(_get, structured)",
    "docstring": "Selects the worker slice of each of the items in .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py",
    "ast_data": "FunctionDef name:_select_worker_slice arg:worker_id arg:structured arguments arg arg FunctionDef name:_get arg:x arguments arg Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_clean_up_graph_metadata",
    "source_code": "def _clean_up_graph_metadata(gm: torch.fx.GraphModule) -> None:\n    for node in gm.graph.nodes:\n        if 'sharding' in node.meta:\n            del node.meta['sharding']\n        if 'val' in node.meta and isinstance(node.meta['val'], torch.Tensor):\n            local_tensor_meta = _extract_tensor_metadata(node.meta['val'])\n            node.meta['tensor_meta'] = local_tensor_meta",
    "docstring": "Clean up the graph by removing sharding and partitioning related metadata",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\experimental\\_tp_transform.py",
    "ast_data": "FunctionDef name:_clean_up_graph_metadata arg:gm arguments arg For If Compare If BoolOp Compare Call Assign Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "to_input_signature",
    "source_code": "def to_input_signature(function_type):\n    constrained_parameters = list(function_type.parameters.keys())\n    if 'self' in constrained_parameters:\n        constrained_parameters.pop(0)\n    if not constrained_parameters:\n        return tuple()\n    constraints = []\n    is_auto_constrained = False\n    for parameter_name in constrained_parameters:\n        parameter = function_type.parameters[parameter_name]\n        constraint = None\n        if parameter.type_constraint:\n            constraint = parameter.type_constraint.placeholder_value(trace_type.InternalPlaceholderContext(unnest_only=True))\n            if any((not isinstance(arg, tensor.TensorSpec) for arg in nest.flatten([constraint], expand_composites=True))):\n                is_auto_constrained = True\n                break\n            else:\n                constraints.append(constraint)\n    if is_auto_constrained and (not constraints):\n        return tuple()\n    return tuple(constraints) if constraints else None",
    "docstring": "Extracts an input_signature from function_type instance.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\function_type_utils.py",
    "ast_data": "FunctionDef name:to_input_signature arg:function_type arguments arg Assign Call Call If Compare Call If Return return:yes Call Assign Assign For Assign Assign If Assign Call Call If Call Call Call Assign Call If BoolOp Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pygame",
    "name": "array_blue",
    "source_code": "def array_blue(surface):\n    size = surface.get_size()\n    array = numpy.empty(size, numpy.uint8)\n    surface_to_array(array, surface, 'B')\n    return array",
    "docstring": "pygame.surfarray.array_blue(Surface): return array copy pixel blue into a 2d array Copy the pixel blue values from a Surface into a 2D array. This will work for any type of Surface format. This function will temporarily lock the Surface as pixels are copied (see the Surface.lock - lock the Surface memory for pixel access method).",
    "type": "function",
    "file_path": "pygame\\src_py\\surfarray.py",
    "ast_data": "FunctionDef name:array_blue arg:surface arguments arg Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_StopOps",
    "source_code": "def _StopOps(from_ops: list[ops.Operation], stop_gradient_ops: list[ops.Operation], pending_count, xs_set):\n    stop_ops = set()\n    for op in from_ops:\n        is_stop_op = True\n        for inp in _NonEagerInputs(op, xs_set):\n            if pending_count[inp.op] > 0:\n                is_stop_op = False\n                break\n        if is_stop_op:\n            stop_ops.add(op)\n    stop_ops.update((op for op in stop_gradient_ops))\n    return stop_ops",
    "docstring": "The set of ops that terminate the gradient computation. This computes the frontier of the forward graph *before* which backprop should stop. Operations in the returned set will not be differentiated. This set is defined as the subset of containing ops that have no predecessor in . is the result of . An 'op' has predecessors in iff pending_count[op] > 0. In addition, none of will be differentiated. Args: from_ops: list of Operations. stop_gradient_ops: list of Operations never to backprop through. pending_count: mapping from operation to number of backprop inputs. xs_set: ObjectIdentitySet of Tensors. Returns: The set of operations.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\gradients_util.py",
    "ast_data": "FunctionDef name:_StopOps arg:from_ops arg:stop_gradient_ops arg:pending_count arg:xs_set arguments arg arg arg arg Assign Call For Assign For Call If Compare Assign If Call Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "get_mapping",
    "source_code": "def get_mapping(self, scale: Scale, data: Series) -> Mapping:\n    if isinstance(scale, Nominal):\n        return self._get_nominal_mapping(scale, data)\n    elif isinstance(scale, Boolean):\n        return self._get_boolean_mapping(scale, data)\n    if scale.values is None:\n        mapping = color_palette('ch:', as_cmap=True)\n    elif isinstance(scale.values, tuple):\n        mapping = blend_palette(scale.values, as_cmap=True)\n    elif isinstance(scale.values, str):\n        mapping = color_palette(scale.values, as_cmap=True)\n    elif callable(scale.values):\n        mapping = scale.values\n    else:\n        scale_class = scale.__class__.__name__\n        msg = ' '.join([f'Scale values for {self.variable} with a {scale_class} mapping', f'must be string, tuple, or callable; not {type(scale.values)}.'])\n        raise TypeError(msg)\n\n    def _mapping(x):\n        invalid = ~np.isfinite(x)\n        out = mapping(x)[:, :3]\n        out[invalid] = np.nan\n        return out\n    return _mapping",
    "docstring": "Return a function that maps from data domain to color values.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\properties.py",
    "ast_data": "FunctionDef name:get_mapping arg:self arg:scale arg:data arguments arg arg arg If Call Return return:yes Call If Call Return return:yes Call If Compare Assign Call If Call Assign Call If Call Assign Call If Call Assign Assign Assign Call Call Raise Call FunctionDef name:_mapping arg:x arguments arg Assign Call Assign Call Assign Return return:yes Return return:yes"
  },
  {
    "library": "authlib",
    "name": "resolve_private_key",
    "source_code": "def resolve_private_key(self):\n    return None",
    "docstring": "Return the server JSON Web Key Set. This is used to sign userinfo payloads:: def resolve_private_key(self): return server_private_jwk_set() This method must be implemented by developers to support JWT userinfo signing.",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\core\\userinfo.py",
    "ast_data": "FunctionDef name:resolve_private_key arg:self arguments arg Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "flatten",
    "source_code": "def flatten(self):\n    return [_tensor_name_base(x.name) for x in self.flatten_nodes()]",
    "docstring": "Return a list of all node names in aggregation sorted sorter.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\op_hint.py",
    "ast_data": "FunctionDef name:flatten arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "value",
    "source_code": "def value(self, row, col, device_name_filter=None, node_name_filter=None, op_type_filter=None):\n    menu_item = None\n    if col == 0:\n        text = self._profile_datum_list[row].node_exec_stats.node_name\n    elif col == 1:\n        text = self._profile_datum_list[row].op_type\n    elif col == 2:\n        text = str(self.formatted_start_time[row])\n    elif col == 3:\n        text = str(self.formatted_op_time[row])\n    elif col == 4:\n        text = str(self.formatted_exec_time[row])\n    elif col == 5:\n        command = 'ps'\n        if device_name_filter:\n            command += ' --%s %s' % (_DEVICE_NAME_FILTER_FLAG, device_name_filter)\n        if node_name_filter:\n            command += ' --%s %s' % (_NODE_NAME_FILTER_FLAG, node_name_filter)\n        if op_type_filter:\n            command += ' --%s %s' % (_OP_TYPE_FILTER_FLAG, op_type_filter)\n        command += ' %s --init_line %d' % (self._profile_datum_list[row].file_path, self._profile_datum_list[row].line_number)\n        menu_item = debugger_cli_common.MenuItem(None, command)\n        text = self._profile_datum_list[row].file_line_func\n    else:\n        raise IndexError('Invalid column index %d.' % col)\n    return RL(text, font_attr=menu_item)",
    "docstring": "Get the content of a cell of the table. Args: row: (int) row index. col: (int) column index. device_name_filter: Regular expression to filter by device name. node_name_filter: Regular expression to filter by node name. op_type_filter: Regular expression to filter by op type. Returns: A debuggre_cli_common.RichLine object representing the content of the cell, potentially with a clickable MenuItem. Raises: IndexError: if row index is out of range.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\profile_analyzer_cli.py",
    "ast_data": "FunctionDef name:value arg:self arg:row arg:col arg:device_name_filter arg:node_name_filter arg:op_type_filter arguments arg arg arg arg arg arg Assign If Compare Assign If Compare Assign If Compare Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign If If If Assign Call Assign Raise Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_proj",
    "source_code": "def get_proj(self):\n    box_aspect = self._roll_to_vertical(self._box_aspect)\n    worldM = proj3d.world_transformation(*self.get_xlim3d(), *self.get_ylim3d(), *self.get_zlim3d(), pb_aspect=box_aspect)\n    R = 0.5 * box_aspect\n    elev_rad = np.deg2rad(self.elev)\n    azim_rad = np.deg2rad(self.azim)\n    p0 = np.cos(elev_rad) * np.cos(azim_rad)\n    p1 = np.cos(elev_rad) * np.sin(azim_rad)\n    p2 = np.sin(elev_rad)\n    ps = self._roll_to_vertical([p0, p1, p2])\n    eye = R + self._dist * ps\n    u, v, w = self._calc_view_axes(eye)\n    self._view_u = u\n    self._view_v = v\n    self._view_w = w\n    if self._focal_length == np.inf:\n        viewM = proj3d._view_transformation_uvw(u, v, w, eye)\n        projM = proj3d._ortho_transformation(-self._dist, self._dist)\n    else:\n        eye_focal = R + self._dist * ps * self._focal_length\n        viewM = proj3d._view_transformation_uvw(u, v, w, eye_focal)\n        projM = proj3d._persp_transformation(-self._dist, self._dist, self._focal_length)\n    M0 = np.dot(viewM, worldM)\n    M = np.dot(projM, M0)\n    return M",
    "docstring": "Create the projection matrix from the current viewing position.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:get_proj arg:self arguments arg Assign Call Assign Call Call Call Call Assign Assign Call Assign Call Assign Call Call Assign Call Call Assign Call Assign Call Assign Assign Call Assign Assign Assign If Compare Assign Call Assign Call Assign Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "Kern",
    "source_code": "class Kern(Node):\n    height = 0\n    depth = 0\n\n    def __init__(self, width: float):\n        super().__init__()\n        self.width = width\n\n    def __repr__(self) -> str:\n        return 'k%.02f' % self.width\n\n    def shrink(self) -> None:\n        super().shrink()\n        if self.size < NUM_SIZE_LEVELS:\n            self.width *= SHRINK_FACTOR",
    "docstring": "A node has a width field to specify a (normally negative) amount of spacing. This spacing correction appears in horizontal lists between letters like A and V when the font designer said that it looks better to move them closer together or further apart. A kern node can also appear in a vertical list, when its *width* denotes additional spacing in the vertical direction.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\_mathtext.py",
    "ast_data": "ClassDef name:Kern Assign Assign FunctionDef name:__init__ arg:self arg:width arguments arg arg Call Call Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:shrink arg:self arguments arg Call Call If Compare"
  },
  {
    "library": "tensorflow",
    "name": "_tf_core_yield_sorted_items",
    "source_code": "def _tf_core_yield_sorted_items(iterable):\n    if isinstance(iterable, list):\n        for item in enumerate(iterable):\n            yield item\n    elif type(iterable) == tuple:\n        for item in enumerate(iterable):\n            yield item\n    elif isinstance(iterable, (dict, _collections_abc.Mapping)):\n        for key in _tf_core_sorted(iterable):\n            yield (key, iterable[key])\n    elif _is_attrs(iterable):\n        for item in _get_attrs_items(iterable):\n            yield item\n    elif is_namedtuple(iterable):\n        for field in iterable._fields:\n            yield (field, getattr(iterable, field))\n    elif _is_composite_tensor(iterable):\n        type_spec = iterable._type_spec\n        yield (type_spec.value_type.__name__, type_spec._to_components(iterable))\n    elif _is_type_spec(iterable):\n        yield (iterable.value_type.__name__, iterable._component_specs)\n    elif isinstance(iterable, CustomNestProtocol):\n        flat_component = iterable.__tf_flatten__()[1]\n        assert isinstance(flat_component, tuple)\n        yield from enumerate(flat_component)\n    else:\n        for item in enumerate(iterable):\n            yield item",
    "docstring": "Yield (key, value) pairs for in a deterministic order. For Sequences, the key will be an int, the array index of a value. For Mappings, the key will be the dictionary key. For objects (e.g. namedtuples), the key will be the attribute name. In all cases, the keys will be iterated in sorted order. Args: iterable: an iterable. Yields: The iterable's (key, value) pairs, in order of sorted keys.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\nest_util.py",
    "ast_data": "FunctionDef name:_tf_core_yield_sorted_items arg:iterable arguments arg If Call For Call If Compare Call For Call If Call For Call If Call For Call If Call For Call If Call Assign Call If Call If Call Assign Call Call Call For Call"
  },
  {
    "library": "tensorflow",
    "name": "tensor_equals",
    "source_code": "@tf_export('__operators__.eq', v1=[])\n@dispatch.add_dispatch_support\ndef tensor_equals(self, other):\n    if other is None:\n        return False\n    g = getattr(self, 'graph', None)\n    if tensor_lib.Tensor._USE_EQUALITY and ops.executing_eagerly_outside_functions() and (g is None or g.building_function):\n        self, other = override_binary_operator.maybe_promote_tensors(self, other)\n        return gen_math_ops.equal(self, other, incompatible_shape_error=False)\n    else:\n        return self is other",
    "docstring": "The operation invoked by the operator. Compares two tensors element-wise for equality if they are broadcast-compatible; or returns False if they are not broadcast-compatible. (Note that this behavior differs from , which raises an exception if the two tensors are not broadcast-compatible.) Purpose in the API: This method is exposed in TensorFlow's API so that library developers can register dispatching for to allow it to handle custom composite tensors & other custom objects. The API symbol is not intended to be called by users directly and does appear in TensorFlow's generated documentation. Args: self: The left-hand side of the operator. other: The right-hand side of the operator. Returns: The result of the elementwise operation, or if the arguments are not broadcast-compatible.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:tensor_equals arg:self arg:other arguments arg arg If Compare Return return:yes Assign Call If BoolOp Call BoolOp Compare Assign Call Return return:yes Call Return return:yes Compare Call"
  },
  {
    "library": "sphinx",
    "name": "ReSTDirective",
    "source_code": "class ReSTDirective(ReSTMarkup):\n\n    def handle_signature(self, sig: str, signode: desc_signature) -> str:\n        name, args = parse_directive(sig)\n        desc_name = f'.. {name}::'\n        signode['fullname'] = name.strip()\n        signode += addnodes.desc_name(desc_name, desc_name)\n        if len(args) > 0:\n            signode += addnodes.desc_addname(args, args)\n        return name\n\n    def get_index_text(self, objectname: str, name: str) -> str:\n        return _('%s (directive)') % name\n\n    def before_content(self) -> None:\n        if self.names:\n            directives = self.env.ref_context.setdefault('rst:directives', [])\n            directives.append(self.names[0])\n\n    def after_content(self) -> None:\n        directives = self.env.ref_context.setdefault('rst:directives', [])\n        if directives:\n            directives.pop()",
    "docstring": "Description of a reST directive.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\domains\\rst.py",
    "ast_data": "ClassDef name:ReSTDirective FunctionDef name:handle_signature arg:self arg:sig arg:signode arguments arg arg arg Assign Call Assign Assign Call Call If Compare Call Call Return return:yes FunctionDef name:get_index_text arg:self arg:objectname arg:name arguments arg arg arg Return return:yes Call FunctionDef name:before_content arg:self arguments arg If Assign Call Call FunctionDef name:after_content arg:self arguments arg Assign Call If Call"
  },
  {
    "library": "pytorch",
    "name": "set_state",
    "source_code": "@abstractmethod\ndef set_state(self, state: bytes, token: Optional[Token]=None) -> Optional[tuple[bytes, Token, bool]]:\n    pass",
    "docstring": "Set the rendezvous state. The new rendezvous state is set conditionally: - If the specified `get_state`. Returns: A tuple of the serialized rendezvous state, its fencing token, and a boolean value indicating whether our set attempt succeeded. Raises: RendezvousConnectionError: The connection to the backend has failed. RendezvousStateError: The rendezvous state is corrupt.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\dynamic_rendezvous.py",
    "ast_data": "FunctionDef name:set_state arg:self arg:state arg:token arguments arg arg arg"
  },
  {
    "library": "kornia",
    "name": "rotate_laf",
    "source_code": "def rotate_laf(LAF: Tensor, angles_degrees: Tensor) -> Tensor:\n    KORNIA_CHECK_LAF(LAF)\n    B, N = LAF.shape[:2]\n    rotmat = angle_to_rotation_matrix(angles_degrees).view(B * N, 2, 2)\n    out_laf = LAF.clone()\n    out_laf[:, :, :2, :2] = torch.bmm(LAF[:, :, :2, :2].reshape(B * N, 2, 2), rotmat).reshape(B, N, 2, 2)\n    return out_laf",
    "docstring": "Apply additional rotation to the LAFs. Compared to , the resulting rotation is original LAF orientation plus angles_degrees. Args: LAF: :math: angles_degrees: :math: in degrees. Returns: LAF oriented with angles :math:",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\laf.py",
    "ast_data": "FunctionDef name:rotate_laf arg:LAF arg:angles_degrees arguments arg arg Call Assign Assign Call Call Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_input_shape_at",
    "source_code": "@doc_controls.do_not_doc_inheritable\ndef get_input_shape_at(self, node_index):\n    return self._get_node_attribute_at_index(node_index, 'input_shapes', 'input shape')",
    "docstring": "Retrieves the input shape(s) of a layer at a given node. Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. will correspond to the first time the layer was called. Returns: A shape tuple (or list of shape tuples if the layer has multiple inputs). Raises: RuntimeError: If called in Eager mode.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:get_input_shape_at arg:self arg:node_index arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "shape",
    "source_code": "@property\ndef shape(self):\n    return self._shape",
    "docstring": "Returns the that represents the shape of the tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor.py",
    "ast_data": "FunctionDef name:shape arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "right_multiplied_operator",
    "source_code": "def right_multiplied_operator(J, d):\n    J = aslinearoperator(J)\n\n    def matvec(x):\n        return J.matvec(np.ravel(x) * d)\n\n    def matmat(X):\n        return J.matmat(X * d[:, np.newaxis])\n\n    def rmatvec(x):\n        return d * J.rmatvec(x)\n    return LinearOperator(J.shape, matvec=matvec, matmat=matmat, rmatvec=rmatvec)",
    "docstring": "Return J diag(d) as LinearOperator.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_lsq\\common.py",
    "ast_data": "FunctionDef name:right_multiplied_operator arg:J arg:d arguments arg arg Assign Call FunctionDef name:matvec arg:x arguments arg Return return:yes Call Call FunctionDef name:matmat arg:X arguments arg Return return:yes Call FunctionDef name:rmatvec arg:x arguments arg Return return:yes Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, size=1):\n    self.size = size\n    super().__init__()",
    "docstring": "Parameters ---------- size : float Size of the arrow as a fraction of the ticklabel size.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axisline_style.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:size arguments arg arg Assign Call Call"
  },
  {
    "library": "django",
    "name": "setup",
    "source_code": "def setup(set_prefix=True):\n    from django.apps import apps\n    from django.conf import settings\n    from django.urls import set_script_prefix\n    from django.utils.log import configure_logging\n    configure_logging(settings.LOGGING_CONFIG, settings.LOGGING)\n    if set_prefix:\n        set_script_prefix('/' if settings.FORCE_SCRIPT_NAME is None else settings.FORCE_SCRIPT_NAME)\n    apps.populate(settings.INSTALLED_APPS)",
    "docstring": "Configure the settings (this happens as a side effect of accessing the first setting), configure logging and populate the app registry. Set the thread-local urlresolvers script prefix if is True.",
    "type": "function",
    "file_path": "django\\django\\__init__.py",
    "ast_data": "FunctionDef name:setup arg:set_prefix arguments arg Call If Call Compare Call"
  },
  {
    "library": "matplotlib",
    "name": "hsv",
    "source_code": "def hsv() -> None:\n    set_cmap('hsv')",
    "docstring": "Set the colormap to 'hsv'. This changes the default colormap as well as the colormap of the current image if there is one. See `` for more information.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:hsv arguments Call"
  },
  {
    "library": "matplotlib",
    "name": "_move_path_to_path_or_stream",
    "source_code": "def _move_path_to_path_or_stream(src, dst):\n    if is_writable_file_like(dst):\n        fh = open(src, encoding='latin-1') if file_requires_unicode(dst) else open(src, 'rb')\n        with fh:\n            shutil.copyfileobj(fh, dst)\n    else:\n        shutil.move(src, dst, copy_function=shutil.copyfile)",
    "docstring": "Move the contents of file at *src* to path-or-filelike *dst*. If *dst* is a path, the metadata of *src* are *not* copied.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_ps.py",
    "ast_data": "FunctionDef name:_move_path_to_path_or_stream arg:src arg:dst arguments arg arg If Call Assign Call Call Call With Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_cuda_version_all",
    "source_code": "def get_cuda_version_all():\n    key = 'cuda_ver_all'\n    out, err = run_shell_cmd(cmds_all[PLATFORM.lower()][key])\n    ret_val = out.split(b'\\n')\n    filtered = []\n    for item in ret_val:\n        if item not in ['\\n', '']:\n            filtered.append(item)\n    all_vers = []\n    for item in filtered:\n        ver_re = re.search('.*/cuda(\\\\-[\\\\d]+\\\\.[\\\\d]+)?', item.decode('utf-8'))\n        if ver_re.group(1):\n            all_vers.append(ver_re.group(1).strip('-'))\n    if err and FLAGS.debug:\n        print('Error in detecting CUDA version:\\n %s' % str(err))\n    return all_vers",
    "docstring": "Retrieves all additional CUDA versions available (other than default). For retrieving default CUDA version, use function. stderr is silenced by default. Setting FLAGS.debug mode will not enable it. Remove command from to enable stderr. Returns: List of all CUDA versions found (except default version). e.g. ['10.1', '10.2']",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\tensorflow_builder\\config_detector\\config_detector.py",
    "ast_data": "FunctionDef name:get_cuda_version_all arguments Assign Assign Call Call Assign Call Assign For If Compare Call Assign For Assign Call Call If Call Call Call Call If BoolOp Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "alter_db_tablespace",
    "source_code": "def alter_db_tablespace(self, model, old_db_tablespace, new_db_tablespace):\n    self.execute(self.sql_retablespace_table % {'table': self.quote_name(model._meta.db_table), 'old_tablespace': self.quote_name(old_db_tablespace), 'new_tablespace': self.quote_name(new_db_tablespace)})",
    "docstring": "Move a model's table between tablespaces.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\schema.py",
    "ast_data": "FunctionDef name:alter_db_tablespace arg:self arg:model arg:old_db_tablespace arg:new_db_tablespace arguments arg arg arg arg Call Call Call Call"
  },
  {
    "library": "authlib",
    "name": "resolve_client_public_keys",
    "source_code": "def resolve_client_public_keys(self, client: ClientMixin):\n    raise NotImplementedError()",
    "docstring": "Resolve the client public key for verifying the JWT signature. A client may have many public keys, in this case, we can retrieve it via `` value in headers. Developers MUST implement this method:: class JWTAuthenticationRequest(rfc9101.JWTAuthenticationRequest): def resolve_client_public_key(self, client): if client.jwks_uri: return requests.get(client.jwks_uri).json return client.jwks",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc9101\\authorization_server.py",
    "ast_data": "FunctionDef name:resolve_client_public_keys arg:self arg:client arguments arg arg Raise Call"
  },
  {
    "library": "numpy",
    "name": "__iadd__",
    "source_code": "def __iadd__(self, other):\n    m = getmask(other)\n    if self._mask is nomask:\n        if m is not nomask and m.any():\n            self._mask = make_mask_none(self.shape, self.dtype)\n            self._mask += m\n    elif m is not nomask:\n        self._mask += m\n    other_data = getdata(other)\n    other_data = np.where(self._mask, other_data.dtype.type(0), other_data)\n    self._data.__iadd__(other_data)\n    return self",
    "docstring": "Add other to self in-place.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:__iadd__ arg:self arg:other arguments arg arg Assign Call If Compare If BoolOp Compare Call Assign Call If Compare Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "run_end_callbacks",
    "source_code": "def run_end_callbacks(self) -> None:\n    for callback in self.end_callbacks:\n        callback()",
    "docstring": "Execute all registered end callbacks.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\callback.py",
    "ast_data": "FunctionDef name:run_end_callbacks arg:self arguments arg For Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_bound_instance",
    "source_code": "def _get_bound_instance(target):\n    decorators, target = unwrap(target)\n    for decorator in decorators:\n        if inspect.ismethod(decorator.decorated_target):\n            return decorator.decorated_target.__self__",
    "docstring": "Returns the instance any of the targets is attached to.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\tf_decorator.py",
    "ast_data": "FunctionDef name:_get_bound_instance arg:target arguments arg Assign Call For If Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_list_product",
    "source_code": "def _list_product(lst):\n    result = 1\n    for item in lst:\n        result *= item\n    return result",
    "docstring": "Computes product of element of the list.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py",
    "ast_data": "FunctionDef name:_list_product arg:lst arguments arg Assign For Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "@available_if(_estimator_has('predict'))\ndef predict(self, X, **predict_params):\n    _raise_for_params(predict_params, self, 'predict')\n    check_is_fitted(self)\n    if _routing_enabled():\n        routed_params = process_routing(self, 'predict', **predict_params)\n    else:\n        routed_params = Bunch(estimator=Bunch(predict={}))\n    return self.estimator_.predict(self.transform(X), **routed_params.estimator.predict)",
    "docstring": "Reduce X to the selected features and predict using the estimator. Parameters ---------- X : array of shape [n_samples, n_features] The input samples. **predict_params : dict Parameters to route to the `enable_metadata_routing=TrueMetadata Routing User Guide ` for more details. Returns ------- y : array of shape [n_samples] The predicted target values.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_selection\\_rfe.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg arg Call Call If Call Assign Call Assign Call Call Return return:yes Call Call Call Call"
  },
  {
    "library": "django",
    "name": "__init__",
    "source_code": "def __init__(self, connection_reset=False):\n    self.connection_reset = connection_reset",
    "docstring": "If ``, Django knows will halt the upload without consuming the rest of the upload. This will cause the browser to show a \"connection reset\" error.",
    "type": "method",
    "file_path": "django\\django\\core\\files\\uploadhandler.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:connection_reset arguments arg arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "@deprecation.deprecated('2019-01-01', 'The TensorFlow Distributions library has moved to TensorFlow Probability (https://github.com/tensorflow/probability). You should update all references to use `tfp.distributions` instead of `tf.distributions`.', warn_once=True)\ndef __init__(self, concentration1=None, concentration0=None, validate_args=False, allow_nan_stats=True, name='Beta'):\n    parameters = dict(locals())\n    with ops.name_scope(name, values=[concentration1, concentration0]) as name:\n        self._concentration1 = self._maybe_assert_valid_concentration(ops.convert_to_tensor(concentration1, name='concentration1'), validate_args)\n        self._concentration0 = self._maybe_assert_valid_concentration(ops.convert_to_tensor(concentration0, name='concentration0'), validate_args)\n        check_ops.assert_same_float_dtype([self._concentration1, self._concentration0])\n        self._total_concentration = self._concentration1 + self._concentration0\n    super(Beta, self).__init__(dtype=self._total_concentration.dtype, validate_args=validate_args, allow_nan_stats=allow_nan_stats, reparameterization_type=distribution.FULLY_REPARAMETERIZED, parameters=parameters, graph_parents=[self._concentration1, self._concentration0, self._total_concentration], name=name)",
    "docstring": "Initialize a batch of Beta distributions. Args: concentration1: Positive floating-point indicating mean number of successes; aka \"alpha\". Implies and , i.e., . concentration0: Positive floating-point indicating mean number of failures; aka \"beta\". Otherwise has same semantics as . validate_args: Python , default . When distribution parameters are checked for validity despite possibly degrading runtime performance. When invalid inputs may silently render incorrect outputs. allow_nan_stats: Python , default . When , statistics (e.g., mean, mode, variance) use the value \"\" to indicate the result is undefined. When , an exception is raised if one or more of the statistic's batch members are undefined. name: Python name prefixed to Ops created by this class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\beta.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:concentration1 arg:concentration0 arg:validate_args arg:allow_nan_stats arg:name arguments arg arg arg arg arg arg Assign Call Call With Call Assign Call Call Assign Call Call Call Assign Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_minor_index_fancy",
    "source_code": "def _minor_index_fancy(self, idx):\n    idx_dtype = self._get_index_dtype((self.indices, self.indptr))\n    indices = self.indices.astype(idx_dtype, copy=False)\n    indptr = self.indptr.astype(idx_dtype, copy=False)\n    idx = np.asarray(idx, dtype=idx_dtype).ravel()\n    M, N = self._swap(self._shape_as_2d)\n    k = len(idx)\n    new_shape = self._swap((M, k)) if self.ndim == 2 else (k,)\n    if k == 0:\n        return self.__class__(new_shape, dtype=self.dtype)\n    col_offsets = np.zeros(N, dtype=idx_dtype)\n    res_indptr = np.empty_like(self.indptr, dtype=idx_dtype)\n    csr_column_index1(k, idx, M, N, indptr, indices, col_offsets, res_indptr)\n    col_order = np.argsort(idx).astype(idx_dtype, copy=False)\n    nnz = res_indptr[-1]\n    res_indices = np.empty(nnz, dtype=idx_dtype)\n    res_data = np.empty(nnz, dtype=self.dtype)\n    csr_column_index2(col_order, col_offsets, len(self.indices), indices, self.data, res_indices, res_data)\n    return self.__class__((res_data, res_indices, res_indptr), shape=new_shape, copy=False)",
    "docstring": "Index along the minor axis where idx is an array of ints.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_compressed.py",
    "ast_data": "FunctionDef name:_minor_index_fancy arg:self arg:idx arguments arg arg Assign Call Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Compare Call If Compare Return return:yes Call Assign Call Assign Call Call Assign Call Call Assign Assign Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "is_stationary",
    "source_code": "def is_stationary(self):\n    return np.all([kernel.is_stationary() for kernel in self.kernels])",
    "docstring": "Returns whether the kernel is stationary.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:is_stationary arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict_proba",
    "source_code": "def predict_proba(self, X, **params):\n    _raise_for_params(params, self, 'predict_proba')\n    check_is_fitted(self)\n    X = validate_data(self, X, accept_sparse=['csr', 'csc'], dtype=None, ensure_all_finite=False, reset=False)\n    if _routing_enabled():\n        routed_params = process_routing(self, 'predict_proba', **params)\n    else:\n        routed_params = Bunch()\n        routed_params.estimator = Bunch(predict_proba=Bunch())\n    n_jobs, _, starts = _partition_estimators(self.n_estimators, self.n_jobs)\n    all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose, **self._parallel_args())((delayed(_parallel_predict_proba)(self.estimators_[starts[i]:starts[i + 1]], self.estimators_features_[starts[i]:starts[i + 1]], X, self.n_classes_, predict_params=routed_params.estimator.get('predict', None), predict_proba_params=routed_params.estimator.get('predict_proba', None)) for i in range(n_jobs)))\n    proba = sum(all_proba) / self.n_estimators\n    return proba",
    "docstring": "Predict class probabilities for X. The predicted class probabilities of an input sample is computed as the mean predicted class probabilities of the base estimators in the ensemble. If base estimators do not implement a `predict_probapredictsklearn.set_config(enable_metadata_routing=True)Metadata Routing User Guide classes_`.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_bagging.py",
    "ast_data": "FunctionDef name:predict_proba arg:self arg:X arguments arg arg arg Call Call Assign Call If Call Assign Call Assign Call Assign Call Call Assign Call Assign Call Call Call Call Call Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_host_for_device",
    "source_code": "def get_host_for_device(device):\n    spec = tf_device.DeviceSpec.from_string(device)\n    return tf_device.DeviceSpec(job=spec.job, replica=spec.replica, task=spec.task, device_type='CPU', device_index=0).to_string()",
    "docstring": "Returns the corresponding host device for the given device.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\device_util.py",
    "ast_data": "FunctionDef name:get_host_for_device arg:device arguments arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_validate_symbol_names",
    "source_code": "def _validate_symbol_names(self) -> None:\n    all_symbol_names = set(self._names) | set(self._names_v1)\n    if self._api_name == TENSORFLOW_API_NAME:\n        for subpackage in SUBPACKAGE_NAMESPACES:\n            if any((n.startswith(subpackage) for n in all_symbol_names)):\n                raise InvalidSymbolNameError('@tf_export is not allowed to export symbols under %s.*' % subpackage)\n    elif not all((n.startswith(self._api_name) for n in all_symbol_names)):\n        raise InvalidSymbolNameError('Can only export symbols under package name of component.')",
    "docstring": "Validate you are exporting symbols under an allowed package. We need to ensure things exported by tf_export, etc. export symbols under disjoint top-level package names. For TensorFlow, we check that it does not export anything under subpackage names used by components (keras, etc.). For each component, we check that it exports everything under its own subpackage. Raises: InvalidSymbolNameError: If you try to export symbol under disallowed name.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\util\\tf_export.py",
    "ast_data": "FunctionDef name:_validate_symbol_names arg:self arguments arg Assign Call Call If Compare For If Call Call Raise Call If Call Call Raise Call"
  },
  {
    "library": "django",
    "name": "localize",
    "source_code": "@register.filter(is_safe=False)\ndef localize(value):\n    return str(formats.localize(value, use_l10n=True))",
    "docstring": "Force a value to be rendered as a localized value.",
    "type": "function",
    "file_path": "django\\django\\templatetags\\l10n.py",
    "ast_data": "FunctionDef name:localize arg:value arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_reduce_scatter_base",
    "source_code": "@deprecated('`torch.distributed._reduce_scatter_base` is a private function and will be deprecated. Please use `torch.distributed.reduce_scatter_tensor` instead.', category=FutureWarning)\ndef _reduce_scatter_base(output, input, op=ReduceOp.SUM, group=None, async_op=False):\n    return reduce_scatter_tensor(output, input, op, group, async_op)",
    "docstring": "Reduces, then scatters a flattened tensor to all processes in a group. Args: output (Tensor): Output tensor. input (Tensor): Input tensor that is of size output tensor size times world size group (ProcessGroup, optional): The process group to work on. If None, the default process group will be used. async_op (bool, optional): Whether this op should be an async op. Returns: Async work handle, if async_op is set to True. None, if not async_op or if not part of the group. .. warning:: is a private function. Users should use instead.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:_reduce_scatter_base arg:output arg:input arg:op arg:group arg:async_op arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "primapow2",
    "source_code": "def primapow2(x):\n    return x * x",
    "docstring": "Believe it or now, x**2 is not always the same as x*x in Python. In Fortran they appear to be identical. Here's a quick one-line to find an example on your system (well, two liner after importing numpy): list(filter(lambda x: x[1], [(x:=np.random.random(), x**2 - x*x != 0) for _ in range(10000)]))",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\pyprima\\pyprima\\src\\pyprima\\common\\linalg.py",
    "ast_data": "FunctionDef name:primapow2 arg:x arguments arg Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "get",
    "source_code": "def get(self):\n    raise NotImplementedError",
    "docstring": "Return the current variant if in the cache, else None.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\caching.py",
    "ast_data": "FunctionDef name:get arg:self arguments arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "_ragged_tensor_categorical_crossentropy",
    "source_code": "@dispatch.dispatch_for_types(categorical_crossentropy, ragged_tensor.RaggedTensor)\ndef _ragged_tensor_categorical_crossentropy(y_true, y_pred, from_logits=False, label_smoothing=0, axis=-1):\n    fn = functools.partial(categorical_crossentropy, from_logits=from_logits, label_smoothing=label_smoothing, axis=axis)\n    return _ragged_tensor_apply_loss(fn, y_true, y_pred)",
    "docstring": "Implements support for handling RaggedTensors. Args: y_true: Tensor of one-hot true targets. y_pred: Tensor of predicted targets. from_logits: Whether is expected to be a logits tensor. By default, we assume that encodes a probability distribution. label_smoothing: Float in [0, 1]. If > then smooth the labels. For example, if , use for non-target labels and for target labels. axis: The axis along which to compute crossentropy (the features axis). Defaults to -1. Returns: Categorical crossentropy loss value. Expected shape: (batch, sequence_len, n_classes) with sequence_len being variable per batch. Return shape: (batch, sequence_len). When used by CategoricalCrossentropy() with the default reduction (SUM_OVER_BATCH_SIZE), the reduction averages the loss over the number of elements independent of the batch. E.g. if the RaggedTensor has 2 batches with [2, 1] values respectively the resulting loss is the sum of the individual loss values divided by 3.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py",
    "ast_data": "FunctionDef name:_ragged_tensor_categorical_crossentropy arg:y_true arg:y_pred arg:from_logits arg:label_smoothing arg:axis arguments arg arg arg arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "cherrypy",
    "name": "put",
    "source_code": "def put(self, variant, size):\n    request = cherrypy.serving.request\n    response = cherrypy.serving.response\n    uri = cherrypy.url(qs=request.query_string)\n    uricache = self.store.get(uri)\n    if uricache is None:\n        uricache = AntiStampedeCache()\n        uricache.selecting_headers = [e.value for e in response.headers.elements('Vary')]\n        self.store[uri] = uricache\n    if len(self.store) < self.maxobjects:\n        total_size = self.cursize + size\n        if size < self.maxobj_size and total_size < self.maxsize:\n            expiration_time = response.time + self.delay\n            bucket = self.expirations.setdefault(expiration_time, [])\n            bucket.append((size, uri, uricache.selecting_headers))\n            header_values = [request.headers.get(h, '') for h in uricache.selecting_headers]\n            uricache[tuple(sorted(header_values))] = variant\n            self.tot_puts += 1\n            self.cursize = total_size",
    "docstring": "Store the current variant in the cache.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\caching.py",
    "ast_data": "FunctionDef name:put arg:self arg:variant arg:size arguments arg arg arg Assign Assign Assign Call Assign Call If Compare Assign Call Assign Call Assign If Compare Call Assign If BoolOp Compare Compare Assign Assign Call Call Assign Call Assign Call Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "format_ticks",
    "source_code": "def format_ticks(self, values):\n    self.set_locs(values)\n    return [self(value, i) for i, value in enumerate(values)]",
    "docstring": "Return the tick labels for all the ticks at once.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:format_ticks arg:self arg:values arguments arg arg Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "swap_class",
    "source_code": "def swap_class(cls, v2_cls, v1_cls, use_v2):\n    if cls == object:\n        return cls\n    if cls in (v2_cls, v1_cls):\n        return v2_cls if use_v2 else v1_cls\n    new_bases = []\n    for base in cls.__bases__:\n        if use_v2 and issubclass(base, v1_cls) or (not use_v2 and issubclass(base, v2_cls)):\n            new_base = swap_class(base, v2_cls, v1_cls, use_v2)\n        else:\n            new_base = base\n        new_bases.append(new_base)\n    cls.__bases__ = tuple(new_bases)\n    return cls",
    "docstring": "Swaps in v2_cls or v1_cls depending on graph mode.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\version_utils.py",
    "ast_data": "FunctionDef name:swap_class arg:cls arg:v2_cls arg:v1_cls arg:use_v2 arguments arg arg arg arg If Compare Return return:yes If Compare Return return:yes Assign For If BoolOp BoolOp Call BoolOp Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "gaussian_discrete",
    "source_code": "def gaussian_discrete(window_size: int, sigma: Tensor | float, *, device: Optional[Device]=None, dtype: Optional[Dtype]=None) -> Tensor:\n    if isinstance(sigma, float):\n        sigma = tensor([[sigma]], device=device, dtype=dtype)\n    KORNIA_CHECK_SHAPE(sigma, ['B', '1'])\n    sigma2 = sigma * sigma\n    tail = int(window_size // 2) + 1\n    bessels = [_modified_bessel_0(sigma2), _modified_bessel_1(sigma2), *(_modified_bessel_i(k, sigma2) for k in range(2, tail))]\n    out = concatenate(bessels[:0:-1] + bessels, -1) * sigma2.exp()\n    return out / out.sum(-1, keepdim=True)",
    "docstring": "Discrete Gaussian kernel based on the modified Bessel functions. Adapted from: Args: window_size: the size which drives the filter amount. sigma: gaussian standard deviation. If a tensor, should be in a shape :math: device: This value will be used if sigma is a float. Device desired to compute. dtype: This value will be used if sigma is a float. Dtype desired for compute. Returns: A tensor withshape :math:, with discrete Gaussian values computed by modified Bessel function.",
    "type": "function",
    "file_path": "kornia\\kornia\\filters\\kernels.py",
    "ast_data": "FunctionDef name:gaussian_discrete arg:window_size arg:sigma arguments arg arg arg arg If Call Assign Call Call Assign Assign Call Assign Call Call Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "ONNXRuntimeOptions",
    "source_code": "@deprecated('torch.onnx.dynamo_export is deprecated since 2.7.0. Please use torch.onnx.export(..., dynamo=True) instead.')\nclass ONNXRuntimeOptions:\n    session_options: Sequence[onnxruntime.SessionOptions] | None = None\n    'ONNX Runtime session options.'\n    execution_providers: Sequence[str | tuple[str, dict[Any, Any]]] | None = None\n    'ONNX Runtime execution providers to use during model execution.'\n    execution_provider_options: Sequence[dict[Any, Any]] | None = None\n    'ONNX Runtime execution provider options.'\n\n    def __init__(self, *, session_options: Sequence[onnxruntime.SessionOptions] | None=None, execution_providers: Sequence[str | tuple[str, dict[Any, Any]]] | None=None, execution_provider_options: Sequence[dict[Any, Any]] | None=None):\n        self.session_options = session_options\n        self.execution_providers = execution_providers\n        self.execution_provider_options = execution_provider_options",
    "docstring": "Options to influence the execution of the ONNX model through ONNX Runtime. .. deprecated:: 2.7 Please use `` instead. Attributes: session_options: ONNX Runtime session options. execution_providers: ONNX Runtime execution providers to use during model execution. execution_provider_options: ONNX Runtime execution provider options.",
    "type": "class",
    "file_path": "pytorch\\torch\\onnx\\_internal\\_exporter_legacy.py",
    "ast_data": "ClassDef name:ONNXRuntimeOptions FunctionDef name:__init__ arg:self arguments arg arg arg arg Assign Assign Assign Call"
  },
  {
    "library": "kornia",
    "name": "conv1x1",
    "source_code": "def conv1x1(in_planes: int, out_planes: int, stride: int=1) -> nn.Conv2d:\n    return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=0, bias=False)",
    "docstring": "1x1 convolution without padding.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\loftr\\backbone\\resnet_fpn.py",
    "ast_data": "FunctionDef name:conv1x1 arg:in_planes arg:out_planes arg:stride arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "from_spec",
    "source_code": "@classmethod\ndef from_spec(cls, spec):\n    dtype = dtypes.as_dtype(spec.dtype)\n    minimum = getattr(spec, 'minimum', dtype.min)\n    maximum = getattr(spec, 'maximum', dtype.max)\n    return BoundedTensorSpec(spec.shape, dtype, minimum, maximum, spec.name)",
    "docstring": "Returns a with the same shape and dtype as . If is a , then the new spec's bounds are set to and ; otherwise, the bounds are set to and . >>> spec = tf.TensorSpec(shape=[8, 3], dtype=tf.int32, name=\"x\") >>> BoundedTensorSpec.from_spec(spec) BoundedTensorSpec(shape=(8, 3), dtype=tf.int32, name='x', minimum=array(-2147483648, dtype=int32), maximum=array(2147483647, dtype=int32)) Args: spec: The used to create the new .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor.py",
    "ast_data": "FunctionDef name:from_spec arg:cls arg:spec arguments arg arg Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "PowerSum",
    "source_code": "class PowerSum(Benchmark):\n\n    def __init__(self, dimensions=4):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([0.0] * self.N, [4.0] * self.N))\n        self.global_optimum = [[1.0, 2.0, 2.0, 3.0]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        b = [8.0, 18.0, 44.0, 114.0]\n        k = atleast_2d(arange(self.N) + 1).T\n        return sum((sum(x ** k, axis=1) - b) ** 2)",
    "docstring": "Power sum objective function. This class defines the Power Sum global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{PowerSum}}(x) = \\sum_{k=1}^n\\left[\\left(\\sum_{i=1}^n x_i^k \\right) - b_k \\right]^2 Where, in this exercise, :math: Here, :math: for :math:. *Global optimum*: :math: for :math: .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_P.py",
    "ast_data": "ClassDef name:PowerSum FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "use",
    "source_code": "def use(self, styles: dict[str, Any]) -> Styler:\n    self._todo.extend(styles.get('apply', []))\n    table_attributes: str = self.table_attributes or ''\n    obj_table_atts: str = '' if styles.get('table_attributes') is None else str(styles.get('table_attributes'))\n    self.set_table_attributes((table_attributes + ' ' + obj_table_atts).strip())\n    if styles.get('table_styles'):\n        self.set_table_styles(styles.get('table_styles'), overwrite=False)\n    for obj in ['index', 'columns']:\n        hide_obj = styles.get('hide_' + obj)\n        if hide_obj is not None:\n            if isinstance(hide_obj, bool):\n                n = getattr(self, obj).nlevels\n                setattr(self, 'hide_' + obj + '_', [hide_obj] * n)\n            else:\n                setattr(self, 'hide_' + obj + '_', hide_obj)\n    self.hide_index_names = styles.get('hide_index_names', False)\n    self.hide_column_names = styles.get('hide_column_names', False)\n    if styles.get('css'):\n        self.css = styles.get('css')\n    return self",
    "docstring": "Set the styles on the current Styler. Possibly uses styles from ``, or a boolean list for hidden levels. - \"hide_index_names\": whether index names are hidden. - \"hide_column_names\": whether column header names are hidden. - \"css\": the css class names used. Returns ------- Styler Instance of class with defined styler attributes added. See Also -------- Styler.export : Export the non data dependent attributes to the current Styler. Examples -------- >>> styler = pd.DataFrame([[1, 2], [3, 4]]).style >>> styler2 = pd.DataFrame([[9, 9, 9]]).style >>> styler.hide(axis=0).highlight_max(axis=1) # doctest: +SKIP >>> export = styler.export() >>> styler2.use(export) # doctest: +SKIP",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\style.py",
    "ast_data": "FunctionDef name:use arg:self arg:styles arguments arg arg Call Call BoolOp Compare Call Call Call Call Call If Call Call Call For Assign Call If Compare If Call Assign Call Call Call Assign Call Assign Call If Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "get_perpendicular",
    "source_code": "def get_perpendicular(lines: Tensor, points: Tensor) -> Tensor:\n    KORNIA_CHECK_SHAPE(lines, ['*', 'N', '3'])\n    KORNIA_CHECK_SHAPE(points, ['*', 'N', 'two'])\n    if points.shape[2] == 2:\n        points_h: Tensor = convert_points_to_homogeneous(points)\n    elif points.shape[2] == 3:\n        points_h = points\n    else:\n        raise AssertionError(points.shape)\n    infinity_point = lines * torch.tensor([1, 1, 0], dtype=lines.dtype, device=lines.device).view(1, 1, 3)\n    perp: Tensor = points_h.cross(infinity_point, dim=2)\n    return perp",
    "docstring": "Compute the perpendicular to a line, through the point. Args: lines: tensor containing the set of lines :math:. points: tensor containing the set of points :math:. Returns: a tensor with shape :math: containing a vector of the epipolar perpendicular lines. Each line is described as :math: and encoding the vectors as :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\epipolar\\fundamental.py",
    "ast_data": "FunctionDef name:get_perpendicular arg:lines arg:points arguments arg arg Call Call If Compare Call If Compare Assign Raise Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "loss_scale",
    "source_code": "@property\ndef loss_scale(self):\n    return self._loss_scale",
    "docstring": "Returns the loss scale of this Policy. Returns: A , or None.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\policy.py",
    "ast_data": "FunctionDef name:loss_scale arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "conv_rule",
    "source_code": "@register_algebraic_expressions_inference_rule(Conv2d)\ndef conv_rule(n: Node, module_instance):\n    assert isinstance(n.args[0], Node)\n    arg_type = n.args[0].type\n    if isinstance(arg_type, TensorType) and isinstance(n.type, TensorType):\n        w_in = arg_type.__args__[3]\n        h_in = arg_type.__args__[2]\n        h_out = calculate_out_dimension(h_in, module_instance, 0)\n        w_out = calculate_out_dimension(w_in, module_instance, 1)\n        new_type = TensorType((n.type.__args__[0], n.type.__args__[1], h_out, w_out))\n        n.type = new_type\n        return new_type",
    "docstring": "Represents the outout in terms of an algrbraic expression w.r.t the input when possible",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\graph_gradual_typechecker.py",
    "ast_data": "FunctionDef name:conv_rule arg:n arg:module_instance arguments arg arg Call Assign If BoolOp Call Call Assign Assign Assign Call Assign Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "manual_seed",
    "source_code": "def manual_seed(seed: int) -> None:\n    seed = int(seed)\n\n    def cb():\n        idx = current_device()\n        default_generator = torch.xpu.default_generators[idx]\n        default_generator.manual_seed(seed)\n    _lazy_call(cb, seed=True)",
    "docstring": "Set the seed for generating random numbers for the current GPU. It's safe to call this function if XPU is not available; in that case, it is silently ignored. Args: seed (int): The desired seed. .. warning:: If you are working with a multi-GPU model, this function is insufficient to get determinism. To seed all GPUs, use :func:.",
    "type": "function",
    "file_path": "pytorch\\torch\\xpu\\random.py",
    "ast_data": "FunctionDef name:manual_seed arg:seed arguments arg Assign Call FunctionDef name:cb arguments Assign Call Assign Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "default_dtypes",
    "source_code": "def default_dtypes(self, *, device: Device | None=None) -> dict[str, dtype[intp | float64 | complex128]]:\n    if device not in ['cpu', None]:\n        raise ValueError(f'Device not understood. Only \"cpu\" is allowed, but received: {device}')\n    return {'real floating': dtype(float64), 'complex floating': dtype(complex128), 'integral': dtype(intp), 'indexing': dtype(intp)}",
    "docstring": "The default data types used for new NumPy arrays. For NumPy, this always returns the following dictionary: - **\"real floating\"**: `` is allowed. Returns ------- dtypes : dict A dictionary describing the default data types used for new NumPy arrays. See Also -------- __array_namespace_info__.capabilities, __array_namespace_info__.default_device, __array_namespace_info__.dtypes, __array_namespace_info__.devices Examples -------- >>> info = np.__array_namespace_info__() >>> info.default_dtypes() {'real floating': numpy.float64, 'complex floating': numpy.complex128, 'integral': numpy.int64, 'indexing': numpy.int64}",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\numpy\\_info.py",
    "ast_data": "FunctionDef name:default_dtypes arg:self arguments arg arg If Compare Raise Call Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "is_oss",
    "source_code": "def is_oss():\n    return len(sys.argv) >= 1 and 'bazel' in sys.argv[0]",
    "docstring": "Returns whether the test is run under OSS.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_process_runner.py",
    "ast_data": "FunctionDef name:is_oss arguments Return return:yes BoolOp Compare Call Compare"
  },
  {
    "library": "pytorch",
    "name": "_fuse_linear_bn_leaky_relu",
    "source_code": "def _fuse_linear_bn_leaky_relu(is_qat, linear, bn, leaky_relu):\n    assert linear.training == bn.training and bn.training == leaky_relu.training, 'Linear, BN and LeakyReLU all must be in the same mode (train or eval).'\n    if is_qat:\n        raise NotImplementedError(f'Cannot fuse train modules: {(linear, bn, leaky_relu)}')\n    else:\n        map_to_fused_module_eval = {nn.Linear: nni.LinearLeakyReLU}\n        fused_module = map_to_fused_module_eval.get(type(linear), None)\n        if fused_module is not None:\n            fused_linear = nn.utils.fusion.fuse_linear_bn_eval(linear, bn)\n            fm = fused_module(fused_linear, leaky_relu)\n            return fm\n        else:\n            raise NotImplementedError(f'Cannot fuse eval modules: {(linear, bn, leaky_relu)}')",
    "docstring": "Given the linear, bn and leaky_relu modules, fuses them and returns the fused module Args: is_qat: a flag for whether we are using quantization aware training fusion or post training quantization fusion linear: Module instance of type Linear bn: BatchNorm1d instance that needs to be fused with the linear layer leaky_relu: LeakyReLU instance that needs to be fused with the linear layer Examples:: >>> # xdoctest: +SKIP(failing) >>> m1 = nn.Linear(20, 10) >>> b1 = nn.BatchNorm1d(10) >>> lr = nn.LeakyReLU(0.01) >>> m2 = _fuse_linear_bn_leaky_relu(m1, b1, lr)",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\onednn.py",
    "ast_data": "FunctionDef name:_fuse_linear_bn_leaky_relu arg:is_qat arg:linear arg:bn arg:leaky_relu arguments arg arg arg arg BoolOp Compare Compare If Raise Call Assign Assign Call Call If Compare Assign Call Assign Call Return return:yes Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_RandomGenerator",
    "source_code": "class _RandomGenerator(object):\n\n    def __init__(self, seed=None):\n        super(_RandomGenerator, self).__init__()\n        if seed is not None:\n            self.seed = [seed, 0]\n        else:\n            self.seed = None\n\n    def random_normal(self, shape, mean=0.0, stddev=1, dtype=dtypes.float32):\n        if self.seed:\n            op = stateless_random_ops.stateless_random_normal\n        else:\n            op = random_ops.random_normal\n        return op(shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=self.seed)\n\n    def random_uniform(self, shape, minval, maxval, dtype):\n        if self.seed:\n            op = stateless_random_ops.stateless_random_uniform\n        else:\n            op = random_ops.random_uniform\n        return op(shape=shape, minval=minval, maxval=maxval, dtype=dtype, seed=self.seed)\n\n    def truncated_normal(self, shape, mean, stddev, dtype):\n        if self.seed:\n            op = stateless_random_ops.stateless_truncated_normal\n        else:\n            op = random_ops.truncated_normal\n        return op(shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=self.seed)",
    "docstring": "Random generator that selects appropriate random ops.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\initializers\\initializers_v2.py",
    "ast_data": "ClassDef name:_RandomGenerator FunctionDef name:__init__ arg:self arg:seed arguments arg arg Call Call If Compare Assign Assign FunctionDef name:random_normal arg:self arg:shape arg:mean arg:stddev arg:dtype arguments arg arg arg arg arg If Assign Assign Return return:yes Call FunctionDef name:random_uniform arg:self arg:shape arg:minval arg:maxval arg:dtype arguments arg arg arg arg arg If Assign Assign Return return:yes Call FunctionDef name:truncated_normal arg:self arg:shape arg:mean arg:stddev arg:dtype arguments arg arg arg arg arg If Assign Assign Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "stop",
    "source_code": "def stop(self):\n    self.ready = False\n    self.scgiserver._keepGoing = False\n    self.scgiserver._threadPool.maxSpare = 0",
    "docstring": "Stop the HTTP server.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\servers.py",
    "ast_data": "FunctionDef name:stop arg:self arguments arg Assign Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "_gci",
    "source_code": "def _gci(self):\n    ax = self._axstack.current()\n    if ax is None:\n        return None\n    im = ax._gci()\n    if im is not None:\n        return im\n    for ax in reversed(self.axes):\n        im = ax._gci()\n        if im is not None:\n            return im\n    return None",
    "docstring": "Get the current colorable artist. Specifically, returns the current instance ( created by or , created by or , etc.), or *None* if no such instance has been defined. The current image is an attribute of the current Axes, or the nearest earlier Axes in the current figure that contains an image. Notes ----- Historically, the only colorable artists were images; hence the name `` (get current image).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:_gci arg:self arguments arg Assign Call If Compare Return return:no Assign Call If Compare Return return:yes For Call Assign Call If Compare Return return:yes Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "compatible",
    "source_code": "@_tf_export('lite.experimental.authoring.compatible')\ndef compatible(target=None, converter_target_spec=None, **kwargs):\n    if target is None:\n\n        def wrapper(target):\n            return _Compatible(target, converter_target_spec, **kwargs)\n        return wrapper\n    else:\n        return _Compatible(target, converter_target_spec, **kwargs)",
    "docstring": "Wraps into a callable function with TFLite compatibility checking. Example: WARNING: Experimental interface, subject to change. Args: target: A to decorate. converter_target_spec : target_spec of TFLite converter parameter. **kwargs: The keyword arguments of the decorator class _Compatible. Returns: A callable object of .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\authoring\\authoring.py",
    "ast_data": "FunctionDef name:compatible arg:target arg:converter_target_spec arguments arg arg arg If Compare FunctionDef name:wrapper arg:target arguments arg Return return:yes Call Return return:yes Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "zpk2ss",
    "source_code": "def zpk2ss(z, p, k):\n    return tf2ss(*zpk2tf(z, p, k))",
    "docstring": "Zero-pole-gain representation to state-space representation Parameters ---------- z, p : sequence Zeros and poles. k : float System gain. Returns ------- A, B, C, D : ndarray State space representation of the system, in controller canonical form.",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_lti_conversion.py",
    "ast_data": "FunctionDef name:zpk2ss arg:z arg:p arg:k arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_get_pvalue",
    "source_code": "def _get_pvalue(statistic, distribution, alternative, symmetric=True, xp=None):\n    xp = array_namespace(statistic) if xp is None else xp\n    if alternative == 'less':\n        pvalue = distribution.cdf(statistic)\n    elif alternative == 'greater':\n        pvalue = distribution.sf(statistic)\n    elif alternative == 'two-sided':\n        pvalue = 2 * (distribution.sf(xp.abs(statistic)) if symmetric else xp.minimum(distribution.cdf(statistic), distribution.sf(statistic)))\n    else:\n        message = \"`alternative` must be 'less', 'greater', or 'two-sided'.\"\n        raise ValueError(message)\n    return pvalue",
    "docstring": "Get p-value given the statistic, (continuous) distribution, and alternative",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_stats_py.py",
    "ast_data": "FunctionDef name:_get_pvalue arg:statistic arg:distribution arg:alternative arg:symmetric arg:xp arguments arg arg arg arg arg Assign Compare Call If Compare Assign Call If Compare Assign Call If Compare Assign Call Call Call Call Call Assign Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "num_cores_per_replica",
    "source_code": "@property\ndef num_cores_per_replica(self) -> int:\n    return self._num_cores_per_replica",
    "docstring": "The number of cores per replica.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\device_assignment.py",
    "ast_data": "FunctionDef name:num_cores_per_replica arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "ll",
    "source_code": "@property\ndef ll(self):\n    return (self.min_x, self.min_y)",
    "docstring": "Return the lower-left coordinate.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\envelope.py",
    "ast_data": "FunctionDef name:ll arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_unary_assert_doc",
    "source_code": "def _unary_assert_doc(sym, sym_name):\n\n    def _decorator(func):\n        opname = func.__name__\n        cap_sym_name = sym_name.capitalize()\n        func.__doc__ = '\\n    Assert the condition `x {sym}` holds element-wise.\\n\\n    When running in graph mode, you should add a dependency on this operation\\n    to ensure that it runs. Example of adding a dependency to an operation:\\n\\n    ```python\\n    with tf.control_dependencies([tf.debugging.{opname}(x, y)]):\\n      output = tf.reduce_sum(x)\\n    ```\\n\\n    {sym_name} means, for every element `x[i]` of `x`, we have `x[i] {sym}`.\\n    If `x` is empty this is trivially satisfied.\\n\\n    Args:\\n      x:  Numeric `Tensor`.\\n      data:  The tensors to print out if the condition is False.  Defaults to\\n        error message and first few entries of `x`.\\n      summarize: Print this many entries of each tensor.\\n      message: A string to prefix to the default message.\\n      name: A name for this operation (optional).  Defaults to \"{opname}\".\\n\\n    Returns:\\n      Op that raises `InvalidArgumentError` if `x {sym}` is False.\\n      @compatibility(eager)\\n        returns None\\n      @end_compatibility\\n\\n    Raises:\\n      InvalidArgumentError: if the check can be performed immediately and\\n        `x {sym}` is False. The check can be performed immediately during\\n        eager execution or if `x` is statically known.\\n    '.format(sym=sym, sym_name=cap_sym_name, opname=opname)\n        return func\n    return _decorator",
    "docstring": "Common docstring for assert_* ops that evaluate a unary predicate over every element of a tensor. Args: sym: Mathematical symbol for the check performed on each element, i.e. \"> 0\" sym_name: English-language name for the op described by sym Returns: Decorator that adds the appropriate docstring to the function for symbol .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\check_ops.py",
    "ast_data": "FunctionDef name:_unary_assert_doc arg:sym arg:sym_name arguments arg arg FunctionDef name:_decorator arg:func arguments arg Assign Assign Call Assign Call Return return:yes Return return:yes"
  },
  {
    "library": "numpy",
    "name": "chebcompanion",
    "source_code": "def chebcompanion(c):\n    [c] = pu.as_series([c])\n    if len(c) < 2:\n        raise ValueError('Series must have maximum degree of at least 1.')\n    if len(c) == 2:\n        return np.array([[-c[0] / c[1]]])\n    n = len(c) - 1\n    mat = np.zeros((n, n), dtype=c.dtype)\n    scl = np.array([1.0] + [np.sqrt(0.5)] * (n - 1))\n    top = mat.reshape(-1)[1::n + 1]\n    bot = mat.reshape(-1)[n::n + 1]\n    top[0] = np.sqrt(0.5)\n    top[1:] = 1 / 2\n    bot[...] = top\n    mat[:, -1] -= c[:-1] / c[-1] * (scl / scl[-1]) * 0.5\n    return mat",
    "docstring": "Return the scaled companion matrix of c. The basis polynomials are scaled so that the companion matrix is symmetric when is a Chebyshev basis polynomial. This provides better eigenvalue estimates than the unscaled case and for basis polynomials the eigenvalues are guaranteed to be real if is used to obtain them. Parameters ---------- c : array_like 1-D array of Chebyshev series coefficients ordered from low to high degree. Returns ------- mat : ndarray Scaled companion matrix of dimensions (deg, deg).",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\chebyshev.py",
    "ast_data": "FunctionDef name:chebcompanion arg:c arguments arg Assign Call If Compare Call Raise Call If Compare Call Return return:yes Call Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_solid_joinstyle",
    "source_code": "@_docstring.interpd\ndef set_solid_joinstyle(self, s):\n    js = JoinStyle(s)\n    if self._solidjoinstyle != js:\n        self.stale = True\n    self._solidjoinstyle = js",
    "docstring": "How to join segments if the line is solid (not ). The default joinstyle is :rc:. Parameters ---------- s : or %(JoinStyle)s",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:set_solid_joinstyle arg:self arg:s arguments arg arg Assign Call If Compare Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_load",
    "source_code": "def _load(self):\n    module = importlib.import_module(self.__name__)\n    self._parent_module_globals[self._local_name] = module\n    self.__dict__.update(module.__dict__)\n    return module",
    "docstring": "Load the module and insert it into the parent's globals.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\generic_utils.py",
    "ast_data": "FunctionDef name:_load arg:self arguments arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "simple_reduce",
    "source_code": "def simple_reduce(self, rank):\n    if self.collapsed is not None:\n        return\n    assert rank > 0\n    while len(self.cs) > rank:\n        del self.cs[0]\n        del self.ds[0]",
    "docstring": "Reduce the rank of the matrix by dropping oldest vectors.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_nonlin.py",
    "ast_data": "FunctionDef name:simple_reduce arg:self arg:rank arguments arg arg If Compare Return return:no Compare While Compare Call"
  },
  {
    "library": "scikit-learn",
    "name": "_raise_typeerror",
    "source_code": "def _raise_typeerror(X):\n    input_type = X.format if sp.issparse(X) else type(X)\n    err = 'Expected a CSR or CSC sparse matrix, got %s.' % input_type\n    raise TypeError(err)",
    "docstring": "Raises a TypeError if X is not a CSR or CSC matrix",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\sparsefuncs.py",
    "ast_data": "FunctionDef name:_raise_typeerror arg:X arguments arg Assign Call Call Assign Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_add_function_recursive",
    "source_code": "def _add_function_recursive(self, function, overwrite=False) -> None:\n    if self._is_function(function.name):\n        if overwrite:\n            self._remove_function(function.name)\n            self._add_function(function)\n    else:\n        self._add_function(function)\n    if hasattr(function, 'children'):\n        for f in function.children:\n            if self._is_function(f.name):\n                if overwrite:\n                    self._remove_function(f.name)\n                    self._add_function(f)\n            else:\n                self._add_function(f)",
    "docstring": "Adds function to the graph including other functions in its graph.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_add_function_recursive arg:self arg:function arg:overwrite arguments arg arg arg If Call If Call Call Call If Call For If Call If Call Call Call"
  },
  {
    "library": "django",
    "name": "_update_cookie",
    "source_code": "def _update_cookie(self, encoded_data, response):\n    if encoded_data:\n        response.set_cookie(self.cookie_name, encoded_data, domain=settings.SESSION_COOKIE_DOMAIN, secure=settings.SESSION_COOKIE_SECURE or None, httponly=settings.SESSION_COOKIE_HTTPONLY or None, samesite=settings.SESSION_COOKIE_SAMESITE)\n    else:\n        response.delete_cookie(self.cookie_name, domain=settings.SESSION_COOKIE_DOMAIN, samesite=settings.SESSION_COOKIE_SAMESITE)",
    "docstring": "Either set the cookie with the encoded data if there is any data to store, or delete the cookie.",
    "type": "method",
    "file_path": "django\\django\\contrib\\messages\\storage\\cookie.py",
    "ast_data": "FunctionDef name:_update_cookie arg:self arg:encoded_data arg:response arguments arg arg arg If Call BoolOp BoolOp Call"
  },
  {
    "library": "pytorch",
    "name": "_handle_scatter_graph",
    "source_code": "def _handle_scatter_graph(self, scatter_graph):\n    assert isinstance(scatter_graph, ir.ComputedBuffer), f'scatter_graph must be an instance of ComputeBuffer but got {type(scatter_graph)}'\n\n    def contiguous_strides(x):\n        return sum((x_i * stride for x_i, stride in zip(x, scatter_graph.get_stride())))\n    return scatter_graph.data.store_output(scatter_graph.name, contiguous_strides, [])",
    "docstring": "Handle processing for a single scatter graph. Args: scatter_graph: The scatter graph to process",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\select_algorithm.py",
    "ast_data": "FunctionDef name:_handle_scatter_graph arg:self arg:scatter_graph arguments arg arg Call Call FunctionDef name:contiguous_strides arg:x arguments arg Return return:yes Call Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "num_chunks",
    "source_code": "@abstractmethod\ndef num_chunks(self) -> int:\n    pass",
    "docstring": "Return the number of chunks the column consists of.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\interchange\\dataframe_protocol.py",
    "ast_data": "FunctionDef name:num_chunks arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "wait_event",
    "source_code": "def wait_event(self, event) -> None:\n    event.wait(self)",
    "docstring": "Make all future work submitted to the stream wait for an event. Args: event (torch.xpu.Event): an event to wait for.",
    "type": "method",
    "file_path": "pytorch\\torch\\xpu\\streams.py",
    "ast_data": "FunctionDef name:wait_event arg:self arg:event arguments arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "create_local_plan",
    "source_code": "@abc.abstractmethod\ndef create_local_plan(self) -> LoadPlan:\n    pass",
    "docstring": "Create a LoadPlan based on state_dict and metadata provided by set_up_planner. . N.B. This is called on every rank.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\planner.py",
    "ast_data": "FunctionDef name:create_local_plan arg:self arguments arg"
  },
  {
    "library": "scikit-learn",
    "name": "type_name",
    "source_code": "def type_name(t):\n    module = t.__module__\n    qualname = t.__qualname__\n    if module == 'builtins':\n        return qualname\n    elif t == numbers.Real:\n        return 'float'\n    elif t == numbers.Integral:\n        return 'int'\n    return f'{module}.{qualname}'",
    "docstring": "Convert type into humman readable string.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\validation.py",
    "ast_data": "FunctionDef name:type_name arg:t arguments arg Assign Assign If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Return return:yes"
  },
  {
    "library": "kornia",
    "name": "rt_matrix",
    "source_code": "@property\ndef rt_matrix(self) -> Tensor:\n    return self.extrinsics[..., :3, :4]",
    "docstring": "Return the 3x4 rotation-translation matrix. Returns: tensor of shape :math:.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\camera\\pinhole.py",
    "ast_data": "FunctionDef name:rt_matrix arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_enter_graph",
    "source_code": "@contextlib.contextmanager\ndef _enter_graph(g, eager, creator_stack=None):\n    if eager:\n        with g.as_default(), context.eager_mode():\n            if creator_stack is not None:\n                g._variable_creator_stack = creator_stack\n            yield\n    else:\n        with g.as_default():\n            if creator_stack is not None:\n                g._variable_creator_stack = creator_stack\n            yield",
    "docstring": "Context manager for selecting a graph and maybe eager mode.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\mirrored_run.py",
    "ast_data": "FunctionDef name:_enter_graph arg:g arg:eager arg:creator_stack arguments arg arg arg If With Call Call If Compare Assign With Call If Compare Assign"
  },
  {
    "library": "numpy",
    "name": "drop_metadata",
    "source_code": "def drop_metadata(dtype, /):\n    if dtype.fields is not None:\n        found_metadata = dtype.metadata is not None\n        names = []\n        formats = []\n        offsets = []\n        titles = []\n        for name, field in dtype.fields.items():\n            field_dt = drop_metadata(field[0])\n            if field_dt is not field[0]:\n                found_metadata = True\n            names.append(name)\n            formats.append(field_dt)\n            offsets.append(field[1])\n            titles.append(None if len(field) < 3 else field[2])\n        if not found_metadata:\n            return dtype\n        structure = {'names': names, 'formats': formats, 'offsets': offsets, 'titles': titles, 'itemsize': dtype.itemsize}\n        return np.dtype(structure, align=dtype.isalignedstruct)\n    elif dtype.subdtype is not None:\n        subdtype, shape = dtype.subdtype\n        new_subdtype = drop_metadata(subdtype)\n        if dtype.metadata is None and new_subdtype is subdtype:\n            return dtype\n        return np.dtype((new_subdtype, shape))\n    else:\n        if dtype.metadata is None:\n            return dtype\n        return np.dtype(dtype.str)",
    "docstring": "Returns the dtype unchanged if it contained no metadata or a copy of the dtype if it (or any of its structure dtypes) contained metadata. This utility is used by and to drop metadata before saving. .. note:: Due to its limitation this function may move to a more appropriate home or change in the future and is considered semi-public API only. .. warning:: This function does not preserve more strange things like record dtypes and user dtypes may simply return the wrong thing. If you need to be sure about the latter, check the result with: ``.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_utils_impl.py",
    "ast_data": "FunctionDef name:drop_metadata arguments arg If Compare Assign Compare Assign Assign Assign Assign For Call Assign Call If Compare Assign Call Call Call Call Compare Call If Return return:yes Assign Return return:yes Call If Compare Assign Assign Call If BoolOp Compare Compare Return return:yes Return return:yes Call If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "set_tensors",
    "source_code": "def set_tensors(self, names: Iterable[str], values: Iterable[torch.Tensor]) -> None:\n    if not isinstance(names, (list, tuple)):\n        names = list(names)\n    if not isinstance(values, (list, tuple)):\n        values = list(values)\n    assert len(names) == len(values), 'names and values must have the same length'\n    for name, value in zip(names, values):\n        self.set_tensor(name, value)",
    "docstring": "Set the attributes specified by the given paths to values. For example, to set the attributes mod.layer1.conv1.weight and mod.layer1.conv1.bias, use accessor.set_tensors([\"layer1.conv1.weight\", \"layer1.conv1.bias\"], [weight, bias])",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\utils\\_named_member_accessor.py",
    "ast_data": "FunctionDef name:set_tensors arg:self arg:names arg:values arguments arg arg arg If Call Assign Call If Call Assign Call Compare Call Call For Call Call"
  },
  {
    "library": "tensorflow",
    "name": "control_flow_op",
    "source_code": "@staticmethod\ndef control_flow_op(op):\n    return control_flow_util.IsSwitch(op) or control_flow_util.IsMerge(op)",
    "docstring": "Returns true if op is one of the special ops of in a while loop. Args: op: A tf.Operation. Returns: True if the given op is one of [Switch, Merge, Enter, Exit, NextIteration, LoopCond], which are all building blocks for TF while loops.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:control_flow_op arg:op arguments arg Return return:yes BoolOp Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_in_unstable_openblas_configuration",
    "source_code": "def _in_unstable_openblas_configuration():\n    import numpy\n    import scipy\n    modules_info = _get_threadpool_controller().info()\n    open_blas_used = any((info['internal_api'] == 'openblas' for info in modules_info))\n    if not open_blas_used:\n        return False\n    openblas_arm64_stable_version = parse_version('0.3.16')\n    for info in modules_info:\n        if info['internal_api'] != 'openblas':\n            continue\n        openblas_version = info.get('version')\n        openblas_architecture = info.get('architecture')\n        if openblas_version is None or openblas_architecture is None:\n            return True\n        if openblas_architecture == 'neoversen1' and parse_version(openblas_version) < openblas_arm64_stable_version:\n            return True\n    return False",
    "docstring": "Return True if in an unstable configuration for OpenBLAS",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\fixes.py",
    "ast_data": "FunctionDef name:_in_unstable_openblas_configuration arguments Assign Call Call Assign Call Compare If Return return:yes Assign Call For If Compare Assign Call Assign Call If BoolOp Compare Compare Return return:yes If BoolOp Compare Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "run_tac",
    "source_code": "def run_tac(model_path, targets, output_path):\n    if not model_path:\n        raise ValueError('Invalid model_path.')\n    if not targets:\n        raise ValueError('Targets are not specified.')\n    if not output_path:\n        raise ValueError('Invalid output_path.')\n    return _pywrap_tac_wrapper.run_tac(model_path, targets, output_path)",
    "docstring": "Run target aware conversion for the given tflite model file. Args: model_path: Path to the tflite model file. targets: A list of string of the desired targets. E.g., ['GPU', 'CPU']. output_path: The output path. Returns: Whether the optimization succeeded. Raises: ValueError: Invalid model_path. Targets are not specified. Invalid output_path.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\lite\\experimental\\tac\\tac.py",
    "ast_data": "FunctionDef name:run_tac arg:model_path arg:targets arg:output_path arguments arg arg arg If Raise Call If Raise Call If Raise Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "_normalize_input",
    "source_code": "@staticmethod\ndef _normalize_input(x: torch.Tensor, eps: float=1e-06) -> torch.Tensor:\n    sp, mp = torch.std_mean(x, dim=(-3, -2, -1), keepdim=True)\n    return (x - mp.detach()) / (sp.detach() + eps)",
    "docstring": "Utility function that normalizes the input by batch.",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\orientation.py",
    "ast_data": "FunctionDef name:_normalize_input arg:x arg:eps arguments arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "key_path_to_source",
    "source_code": "def key_path_to_source(kp: KeyPath) -> Source:\n    source: Source = LocalSource('args')\n    for k in kp:\n        if isinstance(k, SequenceKey):\n            source = GetItemSource(source, k.idx)\n        elif isinstance(k, MappingKey):\n            source = GetItemSource(source, k.key)\n        elif isinstance(k, GetAttrKey):\n            source = AttrSource(source, k.name)\n        else:\n            raise ValueError(f'Unknown KeyEntry {k}')\n    return source",
    "docstring": "Given a key path, return the source for the key path.",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\non_strict_utils.py",
    "ast_data": "FunctionDef name:key_path_to_source arg:kp arguments arg Call For If Call Assign Call If Call Assign Call If Call Assign Call Raise Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "mapparms",
    "source_code": "def mapparms(old, new):\n    oldlen = old[1] - old[0]\n    newlen = new[1] - new[0]\n    off = (old[1] * new[0] - old[0] * new[1]) / oldlen\n    scl = newlen / oldlen\n    return (off, scl)",
    "docstring": "Linear map parameters between domains. Return the parameters of the linear map `oldnew` maps the first domain to the second. See Also -------- getdomain, mapdomain Notes ----- Also works for complex numbers, and thus can be used to calculate the parameters required to map any line in the complex plane to any other line therein. Examples -------- >>> from numpy.polynomial import polyutils as pu >>> pu.mapparms((-1,1),(-1,1)) (0.0, 1.0) >>> pu.mapparms((1,-1),(-1,1)) (-0.0, -1.0) >>> i = complex(0,1) >>> pu.mapparms((-i,-1),(1,i)) ((1+1j), (1-0j))",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\polyutils.py",
    "ast_data": "FunctionDef name:mapparms arg:old arg:new arguments arg arg Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "parameters",
    "source_code": "@property\ndef parameters(self):\n    return {k: v for k, v in self._parameters.items() if not k.startswith('__') and k != 'self'}",
    "docstring": "Dictionary of parameters used to instantiate this .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:parameters arg:self arguments arg Return return:yes Call BoolOp Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "should_checkpoint",
    "source_code": "@property\ndef should_checkpoint(self):\n    return self._strategy.extended.should_checkpoint",
    "docstring": "Whether to save checkpoint.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distribute_coordinator_utils.py",
    "ast_data": "FunctionDef name:should_checkpoint arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "codegen_device_copy",
    "source_code": "def codegen_device_copy(self, src, dst, non_blocking: bool):\n    self.writeline(f'AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_copy_({dst}, {src}, {non_blocking}));')",
    "docstring": "This function is overridden by cpp_wrapper_cpu_array_ref, so we don't need to handle cases where dst is not an AtenTensorHandle.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp_wrapper_cpu.py",
    "ast_data": "FunctionDef name:codegen_device_copy arg:self arg:src arg:dst arg:non_blocking arguments arg arg arg arg Call"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, appid=None, logger_root='cherrypy'):\n    self.logger_root = logger_root\n    self.appid = appid\n    if appid is None:\n        self.error_log = logging.getLogger('%s.error' % logger_root)\n        self.access_log = logging.getLogger('%s.access' % logger_root)\n    else:\n        self.error_log = logging.getLogger('%s.error.%s' % (logger_root, appid))\n        self.access_log = logging.getLogger('%s.access.%s' % (logger_root, appid))\n    self.error_log.setLevel(logging.INFO)\n    self.access_log.setLevel(logging.INFO)\n    self.error_log.addHandler(NullHandler())\n    self.access_log.addHandler(NullHandler())\n    cherrypy.engine.subscribe('graceful', self.reopen_files)",
    "docstring": "Initialize a CherryPy log manager.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cplogging.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:appid arg:logger_root arguments arg arg arg Assign Assign If Compare Assign Call Assign Call Assign Call Assign Call Call Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_to_sparse_input_and_drop_ignore_values",
    "source_code": "def _to_sparse_input_and_drop_ignore_values(input_tensor, ignore_value=None):\n    input_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(input_tensor)\n    if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):\n        return input_tensor\n    with ops.name_scope(None, 'to_sparse_input', (input_tensor, ignore_value)):\n        if ignore_value is None:\n            if input_tensor.dtype == dtypes.string:\n                ignore_value = ''\n            elif input_tensor.dtype.is_integer:\n                ignore_value = -1\n            else:\n                ignore_value = input_tensor.dtype.as_numpy_dtype()\n        ignore_value = math_ops.cast(ignore_value, input_tensor.dtype, name='ignore_value')\n        indices = array_ops.where_v2(math_ops.not_equal(input_tensor, ignore_value), name='indices')\n        return sparse_tensor_lib.SparseTensor(indices=indices, values=array_ops.gather_nd(input_tensor, indices, name='values'), dense_shape=array_ops.shape(input_tensor, out_type=dtypes.int64, name='dense_shape'))",
    "docstring": "Converts a to a , dropping ignore_value cells. If is already a , just return it. Args: input_tensor: A string or integer . ignore_value: Entries in equal to this value will be absent from the resulting . If , default value of 's dtype will be used ('' for , -1 for ). Returns: A with the same shape as . Raises: ValueError: when 's rank is .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:_to_sparse_input_and_drop_ignore_values arg:input_tensor arg:ignore_value arguments arg arg Assign Call If Call Return return:yes With Call If Compare If Compare Assign If Assign Assign Call Assign Call Assign Call Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_add_extension_type_constructor",
    "source_code": "def _add_extension_type_constructor(cls):\n    if '__init__' in cls.__dict__:\n        _wrap_user_constructor(cls)\n    else:\n        _build_extension_type_constructor(cls)",
    "docstring": "Creates a constructor for a ExtensionType or ExtensionTypeSpec subclass.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type.py",
    "ast_data": "FunctionDef name:_add_extension_type_constructor arg:cls arguments arg If Compare Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__call__",
    "source_code": "@abc.abstractmethod\ndef __call__(self, shardable_tensors: Sequence[ShardableTensor]) -> Sequence[Shard]:\n    pass",
    "docstring": "Returns a list of shards for the given shardable tensors.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\sharding\\sharding_util.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:shardable_tensors arguments arg arg"
  },
  {
    "library": "django",
    "name": "default_trim_value",
    "source_code": "def default_trim_value():\n    return geos_version_tuple() >= (3, 12)",
    "docstring": "GEOS changed the default value in 3.12.0. Can be replaced by True when 3.12.0 becomes the minimum supported version.",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\geos\\prototypes\\io.py",
    "ast_data": "FunctionDef name:default_trim_value arguments Return return:yes Compare Call"
  },
  {
    "library": "pytorch",
    "name": "_maybe_wait",
    "source_code": "def _maybe_wait(tensor: torch.Tensor) -> torch.Tensor:\n    if isinstance(tensor, ft_c.AsyncCollectiveTensor):\n        return tensor.wait()\n    return tensor",
    "docstring": "When tracing the code, the result tensor is not an AsyncCollectiveTensor, so we cannot call ``.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\experimental\\_attention.py",
    "ast_data": "FunctionDef name:_maybe_wait arg:tensor arguments arg If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_null_merge",
    "source_code": "@property\ndef is_null_merge(self):\n    return not bool(self._spec.to_string())",
    "docstring": "Indicate whether the wrapped spec is empty. In the degenerate case where self._spec is an empty specification, a caller may wish to skip a merge step entirely. (However this class does not have enough information to make that determination.) Returns: A boolean indicating whether a device merge will be trivial.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\device.py",
    "ast_data": "FunctionDef name:is_null_merge arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "unify_mask_base_type",
    "source_code": "def unify_mask_base_type(buffer: IndentedBuffer, vars: tuple[CSEVariable, ...], dtype=torch.float):\n    new_vars = (V.kernel.cse.generate(buffer, f'{V.kernel._get_mask_cast(var, dtype)}') for var in vars)\n    return new_vars",
    "docstring": "Given list of cse variables, Cast each to new mask base dtype and return casted cse variable.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp_utils.py",
    "ast_data": "FunctionDef name:unify_mask_base_type arg:buffer arg:vars arg:dtype arguments arg arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "forward",
    "source_code": "def forward(self, *args, **kwargs):\n    return self.fn(*args, **kwargs)",
    "docstring": "Simple forward that just calls the `WrapperModule.__init__`.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\export_utils.py",
    "ast_data": "FunctionDef name:forward arg:self arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "sums",
    "source_code": "@property\ndef sums(self) -> list[int]:\n    return [idx for idx, placement in enumerate(self.placements) if placement.is_partial()]",
    "docstring": "sums is a property we derive from of the distributed tensor. It simply return a list of ints where sums[i] denotes the pending sum (partial) on mesh dim i",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_dtensor_spec.py",
    "ast_data": "FunctionDef name:sums arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "quaternion_exp_to_log",
    "source_code": "def quaternion_exp_to_log(quaternion: Tensor, eps: float=1e-08) -> Tensor:\n    if not isinstance(quaternion, Tensor):\n        raise TypeError(f'Input type is not a Tensor. Got {type(quaternion)}')\n    if not quaternion.shape[-1] == 4:\n        raise ValueError(f'Input must be a tensor of shape (*, 4). Got {quaternion.shape}')\n    quaternion_vector: Tensor = tensor([])\n    quaternion_scalar: Tensor = tensor([])\n    quaternion_scalar = quaternion[..., 0:1]\n    quaternion_vector = quaternion[..., 1:4]\n    norm_q: Tensor = torch.norm(quaternion_vector, p=2, dim=-1, keepdim=True).clamp(min=eps)\n    quaternion_log: Tensor = quaternion_vector * torch.acos(torch.clamp(quaternion_scalar, min=-1.0, max=1.0)) / norm_q\n    return quaternion_log",
    "docstring": "Apply the log map to a quaternion. The quaternion should be in (w, x, y, z) format. Args: quaternion: a tensor containing a quaternion to be converted. The tensor can be of shape :math:. eps: a small number for clamping. Return: the quaternion log map of shape :math:. Example: >>> quaternion = tensor((1., 0., 0., 0.)) >>> quaternion_exp_to_log(quaternion, eps=torch.finfo(quaternion.dtype).eps) tensor([0., 0., 0.])",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\conversions.py",
    "ast_data": "FunctionDef name:quaternion_exp_to_log arg:quaternion arg:eps arguments arg arg If Call Raise Call Call If Compare Raise Call Call Call Assign Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "draw_path",
    "source_code": "def draw_path(self, gc, path, transform, rgbFace=None):\n    raise NotImplementedError",
    "docstring": "Draw a instance using the given affine transform.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:draw_path arg:self arg:gc arg:path arg:transform arg:rgbFace arguments arg arg arg arg arg Raise"
  },
  {
    "library": "matplotlib",
    "name": "edit_all_margins_min",
    "source_code": "def edit_all_margins_min(self, todo, size):\n    for i in range(len(self.margin_vals[todo])):\n        self.edit_margin_min(todo, size, i)",
    "docstring": "Change the minimum size of all the margin of all the cells in the layout grid. Parameters ---------- todo : {'left', 'right', 'bottom', 'top'} The margin to alter. size : float Minimum size of the margin. If it is larger than the existing minimum it updates the margin size. Fraction of figure size.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_layoutgrid.py",
    "ast_data": "FunctionDef name:edit_all_margins_min arg:self arg:todo arg:size arguments arg arg arg For Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_layout_engine",
    "source_code": "def set_layout_engine(self, layout=None, **kwargs):\n    if layout is None:\n        if mpl.rcParams['figure.autolayout']:\n            layout = 'tight'\n        elif mpl.rcParams['figure.constrained_layout.use']:\n            layout = 'constrained'\n        else:\n            self._layout_engine = None\n            return\n    if layout == 'tight':\n        new_layout_engine = TightLayoutEngine(**kwargs)\n    elif layout == 'constrained':\n        new_layout_engine = ConstrainedLayoutEngine(**kwargs)\n    elif layout == 'compressed':\n        new_layout_engine = ConstrainedLayoutEngine(compress=True, **kwargs)\n    elif layout == 'none':\n        if self._layout_engine is not None:\n            new_layout_engine = PlaceHolderLayoutEngine(self._layout_engine.adjust_compatible, self._layout_engine.colorbar_gridspec)\n        else:\n            new_layout_engine = None\n    elif isinstance(layout, LayoutEngine):\n        new_layout_engine = layout\n    else:\n        raise ValueError(f\"Invalid value for 'layout': {layout!r}\")\n    if self._check_layout_engines_compat(self._layout_engine, new_layout_engine):\n        self._layout_engine = new_layout_engine\n    else:\n        raise RuntimeError('Colorbar layout of new layout engine not compatible with old engine, and a colorbar has been created.  Engine not changed.')",
    "docstring": "Set the layout engine for this figure. Parameters ---------- layout : {'constrained', 'compressed', 'tight', 'none', , None} - 'constrained' will use - 'compressed' will also use , but with a correction that attempts to make a good layout for fixed-aspect ratio Axes. - 'tight' uses - 'none' removes layout engine. If a instance, that instance will be used. If , the behavior is controlled by :rc: (which if behaves as if 'tight' was passed) and :rc: (which if behaves as if 'constrained' was passed). If both are , :rc: takes priority. Users and libraries can define their own layout engines and pass the instance directly as well. **kwargs The keyword arguments are passed to the layout engine to set things like padding and margin sizes. Only used if *layout* is a string.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:set_layout_engine arg:self arg:layout arguments arg arg arg If Compare If Assign If Assign Assign Return return:no If Compare Assign Call If Compare Assign Call If Compare Assign Call If Compare If Compare Assign Call Assign If Call Assign Raise Call If Call Assign Raise Call"
  },
  {
    "library": "scipy",
    "name": "__init__",
    "source_code": "def __init__(self, field=None, field_args=(), g_cons=None, g_cons_args=(), workers=1):\n    super().__init__()\n    self.index = -1\n    self.Vertex = VertexScalarField\n    self.field = field\n    self.field_args = field_args\n    self.wfield = FieldWrapper(field, field_args)\n    self.g_cons = g_cons\n    self.g_cons_args = g_cons_args\n    self.wgcons = ConstraintWrapper(g_cons, g_cons_args)\n    self.gpool = set()\n    self.fpool = set()\n    self.sfc_lock = False\n    self.workers = workers\n    self._mapwrapper = MapWrapper(workers)\n    if workers == 1:\n        self.process_gpool = self.proc_gpool\n        if g_cons is None:\n            self.process_fpool = self.proc_fpool_nog\n        else:\n            self.process_fpool = self.proc_fpool_g\n    else:\n        self.process_gpool = self.pproc_gpool\n        if g_cons is None:\n            self.process_fpool = self.pproc_fpool_nog\n        else:\n            self.process_fpool = self.pproc_fpool_g",
    "docstring": "Class for a vertex cache for a simplicial complex with an associated field. Parameters ---------- field : callable Scalar or vector field callable. field_args : tuple, optional Any additional fixed parameters needed to completely specify the field function g_cons : dict or sequence of dict, optional Constraints definition. Function(s) `multiprocessing.Pool `) to compute the field functions in parallel.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_shgo_lib\\_vertex.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:field arg:field_args arg:g_cons arg:g_cons_args arg:workers arguments arg arg arg arg arg arg Call Call Assign Assign Assign Assign Assign Call Assign Assign Assign Call Assign Call Assign Call Assign Assign Assign Call If Compare Assign If Compare Assign Assign Assign If Compare Assign Assign"
  },
  {
    "library": "kornia",
    "name": "LuvToRgb",
    "source_code": "class LuvToRgb(Module):\n    ONNX_DEFAULT_INPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n    ONNX_DEFAULT_OUTPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n\n    def forward(self, image: torch.Tensor) -> torch.Tensor:\n        return luv_to_rgb(image)",
    "docstring": "Convert an image from Luv to RGB. Returns: RGB version of the image. Shape: - image: :math: - output: :math: Examples: >>> input = torch.rand(2, 3, 4, 5) >>> rgb = LuvToRgb() >>> output = rgb(input) # 2x3x4x5 References: [1] [2] [3]",
    "type": "class",
    "file_path": "kornia\\kornia\\color\\luv.py",
    "ast_data": "ClassDef name:LuvToRgb FunctionDef name:forward arg:self arg:image arguments arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_rvs_2d",
    "source_code": "def _rvs_2d(self, mu, kappa, size, random_state):\n    mean_angle = np.arctan2(mu[1], mu[0])\n    angle_samples = random_state.vonmises(mean_angle, kappa, size=size)\n    samples = np.stack([np.cos(angle_samples), np.sin(angle_samples)], axis=-1)\n    return samples",
    "docstring": "In 2D, the von Mises-Fisher distribution reduces to the von Mises distribution which can be efficiently sampled by numpy. This method is much faster than the general rejection sampling based algorithm.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:_rvs_2d arg:self arg:mu arg:kappa arg:size arg:random_state arguments arg arg arg arg arg Assign Call Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_pack_sequence_as",
    "source_code": "def _pack_sequence_as(structured_outputs, op_outputs):\n    outputs_with_nones = []\n    counter = 0\n    for output in nest.flatten(structured_outputs, expand_composites=True):\n        if output is None:\n            outputs_with_nones.append(None)\n        else:\n            outputs_with_nones.append(op_outputs[counter])\n            counter += 1\n    return func_graph_module.pack_sequence_as(structured_outputs, outputs_with_nones)",
    "docstring": "Packs the outputs of the gradient If/Case op. The branch functions may contain None's in the list of . has those outputs missing. So we need to add those Nones to the list of and then pack it in the same structure as . Args: structured_outputs: structured_outputs from one of the branch functions. op_outputs: List of output tensors of the op. Returns: packed like .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\cond_v2.py",
    "ast_data": "FunctionDef name:_pack_sequence_as arg:structured_outputs arg:op_outputs arguments arg arg Assign Assign For Call If Compare Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "enable_constraint_checking",
    "source_code": "def enable_constraint_checking(self):\n    self.needs_rollback, needs_rollback = (False, self.needs_rollback)\n    try:\n        with self.cursor() as cursor:\n            cursor.execute('SET foreign_key_checks=1')\n    finally:\n        self.needs_rollback = needs_rollback",
    "docstring": "Re-enable foreign key checks after they have been disabled.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\mysql\\base.py",
    "ast_data": "FunctionDef name:enable_constraint_checking arg:self arguments arg Assign Try With Call Call Assign"
  },
  {
    "library": "authlib",
    "name": "validate_request_parameter_supported",
    "source_code": "def validate_request_parameter_supported(self):\n    _validate_boolean_value(self, 'request_parameter_supported')",
    "docstring": "OPTIONAL. Boolean value specifying whether the OP supports use of the request parameter, with true indicating support. If omitted, the default value is false.",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\discovery\\models.py",
    "ast_data": "FunctionDef name:validate_request_parameter_supported arg:self arguments arg Call"
  },
  {
    "library": "kornia",
    "name": "build_pyramid",
    "source_code": "def build_pyramid(input: Tensor, max_level: int, border_type: str='reflect', align_corners: bool=False) -> list[Tensor]:\n    KORNIA_CHECK_SHAPE(input, ['B', 'C', 'H', 'W'])\n    KORNIA_CHECK(isinstance(max_level, int) or max_level < 0, f'Invalid max_level, it must be a positive integer. Got: {max_level}')\n    pyramid: list[Tensor] = []\n    pyramid.append(input)\n    for _ in range(max_level - 1):\n        img_curr: Tensor = pyramid[-1]\n        img_down: Tensor = pyrdown(img_curr, border_type, align_corners)\n        pyramid.append(img_down)\n    return pyramid",
    "docstring": "Construct the Gaussian pyramid for a tensor image. .. image:: _static/img/build_pyramid.png The function constructs a vector of images and builds the Gaussian pyramid by recursively applying pyrDown to the previously built pyramid layers. Args: input : the tensor to be used to construct the pyramid. max_level: 0-based index of the last (the smallest) pyramid layer. It must be non-negative. border_type: the padding mode to be applied before convolving. The expected modes are: `(B, C, H, W)[(B, C, H, W), (B, C, H/2, W/2), ...]`",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\transform\\pyramid.py",
    "ast_data": "FunctionDef name:build_pyramid arg:input arg:max_level arg:border_type arg:align_corners arguments arg arg arg arg Call Call BoolOp Call Compare Call For Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_check_shape",
    "source_code": "def _check_shape(shape, key):\n    assert shape is not None\n    if not nest.is_nested(shape):\n        shape = [shape]\n    shape = tuple(shape)\n    for dimension in shape:\n        if not isinstance(dimension, six.integer_types):\n            raise TypeError('shape dimensions must be integer. shape: {}, key: {}'.format(shape, key))\n        if dimension < 1:\n            raise ValueError('shape dimensions must be greater than 0. shape: {}, key: {}'.format(shape, key))\n    return shape",
    "docstring": "Returns shape if it's valid, raises error otherwise.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py",
    "ast_data": "FunctionDef name:_check_shape arg:shape arg:key arguments arg arg Compare If Call Assign Assign Call For If Call Raise Call Call If Compare Raise Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "ChebyshevQuadrature",
    "source_code": "class ChebyshevQuadrature(LSQBenchmarkProblem):\n    INITIAL_GUESSES = [(1 + np.arange(11, dtype=float)) / 12]\n\n    def __init__(self, x0):\n        super().__init__(11, 11, 0.002799761, x0)\n        cp = Chebyshev(1)\n        self.T_all = [cp.basis(i, domain=[0.0, 1.0]) for i in range(11)]\n\n    def fun(self, x):\n        f = np.empty(self.n)\n        for i in range(self.m):\n            T = self.T_all[i]\n            f[i] = np.mean(T(x)) - T.integ(lbnd=0.0)(1.0)\n        return f\n\n    def jac(self, x):\n        J = np.empty((self.m, self.n))\n        for i in range(self.m):\n            T = self.T_all[i]\n            J[i] = T.deriv()(x)\n        J /= self.n\n        return J",
    "docstring": "The problem of determining the optimal nodes of a quadrature formula with equal weights, [1]_. Number of variables --- 11, number of residuals --- 11, no bounds. .. [1] Brett M. Averick et al. \"The MINPACK-2 Test Problem Collection\", p. 30",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\lsq_problems.py",
    "ast_data": "ClassDef name:ChebyshevQuadrature Assign Call FunctionDef name:__init__ arg:self arg:x0 arguments arg arg Call Call Assign Call Assign Call Call FunctionDef name:fun arg:self arg:x arguments arg arg Assign Call For Call Assign Assign Call Call Call Call Return return:yes FunctionDef name:jac arg:self arg:x arguments arg arg Assign Call For Call Assign Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_isscalar",
    "source_code": "def _isscalar(x):\n    return np.isscalar(x) or (hasattr(x, 'shape') and x.shape == ())",
    "docstring": "Check whether x is if a scalar type, or 0-dim",
    "type": "function",
    "file_path": "scipy\\scipy\\interpolate\\_polyint.py",
    "ast_data": "FunctionDef name:_isscalar arg:x arguments arg Return return:yes BoolOp Call BoolOp Call Compare"
  },
  {
    "library": "pandas",
    "name": "headers",
    "source_code": "@property\ndef headers(self) -> Sequence[str]:\n    if self.with_counts:\n        return [' # ', 'Column', 'Non-Null Count', 'Dtype']\n    return [' # ', 'Column', 'Dtype']",
    "docstring": "Headers names of the columns in verbose table.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:headers arg:self arguments arg If Return return:yes Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "_assign_default_kwargs",
    "source_code": "def _assign_default_kwargs(kws, call_func, source_func):\n    needed = inspect.signature(call_func).parameters\n    defaults = inspect.signature(source_func).parameters\n    for param in needed:\n        if param in defaults and param not in kws:\n            kws[param] = defaults[param].default\n    return kws",
    "docstring": "Assign default kwargs for call_func using values from source_func.",
    "type": "function",
    "file_path": "seaborn\\seaborn\\utils.py",
    "ast_data": "FunctionDef name:_assign_default_kwargs arg:kws arg:call_func arg:source_func arguments arg arg arg Assign Call Assign Call For If BoolOp Compare Compare Assign Return return:yes"
  },
  {
    "library": "authlib",
    "name": "check_redirect_uri",
    "source_code": "def check_redirect_uri(self, redirect_uri):\n    raise NotImplementedError()",
    "docstring": "Validate redirect_uri parameter in Authorization Endpoints. For instance, in the client table, there is an `` column:: def check_redirect_uri(self, redirect_uri): return redirect_uri in self.allowed_redirect_uris :param redirect_uri: A URL string for redirecting. :return: bool",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\models.py",
    "ast_data": "FunctionDef name:check_redirect_uri arg:self arg:redirect_uri arguments arg arg Raise Call"
  },
  {
    "library": "pandas",
    "name": "fillna",
    "source_code": "def fillna(self, value):\n    if not is_scalar(value):\n        raise TypeError(f\"'value' must be a scalar, passed: {type(value).__name__}\")\n    if self.hasnans:\n        result = self.putmask(self._isnan, value)\n        return Index._with_infer(result, name=self.name)\n    return self._view()",
    "docstring": "Fill NA/NaN values with the specified value. Parameters ---------- value : scalar Scalar value to use to fill holes (e.g. 0). This value cannot be a list-likes. Returns ------- Index NA/NaN values replaced with . See Also -------- DataFrame.fillna : Fill NaN values of a DataFrame. Series.fillna : Fill NaN Values of a Series. Examples -------- >>> idx = pd.Index([np.nan, np.nan, 3]) >>> idx.fillna(0) Index([0.0, 0.0, 3.0], dtype='float64')",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:fillna arg:self arg:value arguments arg arg If Call Raise Call Call If Assign Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_nanquantile_unchecked",
    "source_code": "def _nanquantile_unchecked(a, q, axis=None, out=None, overwrite_input=False, method='linear', keepdims=np._NoValue, weights=None):\n    if a.size == 0:\n        return np.nanmean(a, axis, out=out, keepdims=keepdims)\n    return fnb._ureduce(a, func=_nanquantile_ureduce_func, q=q, weights=weights, keepdims=keepdims, axis=axis, out=out, overwrite_input=overwrite_input, method=method)",
    "docstring": "Assumes that q is in [0, 1], and is an ndarray",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_nanfunctions_impl.py",
    "ast_data": "FunctionDef name:_nanquantile_unchecked arg:a arg:q arg:axis arg:out arg:overwrite_input arg:method arg:keepdims arg:weights arguments arg arg arg arg arg arg arg arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_make_elementwise_binary_prim",
    "source_code": "def _make_elementwise_binary_prim(name: str, *, type_promotion: ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND, **kwargs):\n    return _make_prim(schema=f'{name}(Tensor self, Tensor other) -> Tensor', meta=partial(_prim_elementwise_meta, type_promotion=type_promotion), return_type=RETURN_TYPE.NEW, **kwargs)",
    "docstring": "Creates an elementwise binary prim.",
    "type": "function",
    "file_path": "pytorch\\torch\\_prims\\__init__.py",
    "ast_data": "FunctionDef name:_make_elementwise_binary_prim arg:name arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "na_value",
    "source_code": "@property\ndef na_value(self) -> object:\n    return np.nan",
    "docstring": "Default NA value to use for this type. This is used in e.g. ExtensionArray.take. This should be the user-facing \"boxed\" version of the NA value, not the physical NA value for storage. e.g. for JSONArray, this is an empty dictionary.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\base.py",
    "ast_data": "FunctionDef name:na_value arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_recurrent_dropout_mask_for_cell",
    "source_code": "def get_recurrent_dropout_mask_for_cell(self, inputs, training, count=1):\n    if self.recurrent_dropout == 0:\n        return None\n    init_kwargs = dict(inputs=inputs, training=training, count=count)\n    return self._recurrent_dropout_mask_cache.setdefault(kwargs=init_kwargs)",
    "docstring": "Get the recurrent dropout mask for RNN cell. It will create mask based on context if there isn't any existing cached mask. If a new mask is generated, it will update the cache in the cell. Args: inputs: The input tensor whose shape will be used to generate dropout mask. training: Boolean tensor, whether its in training mode, dropout will be ignored in non-training mode. count: Int, how many dropout mask will be generated. It is useful for cell that has internal weights fused together. Returns: List of mask tensor, generated or cached mask based on context.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\recurrent.py",
    "ast_data": "FunctionDef name:get_recurrent_dropout_mask_for_cell arg:self arg:inputs arg:training arg:count arguments arg arg arg arg If Compare Return return:no Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_has_plotted_object",
    "source_code": "@final\n@staticmethod\ndef _has_plotted_object(ax: Axes) -> bool:\n    return len(ax.lines) != 0 or len(ax.artists) != 0 or len(ax.containers) != 0",
    "docstring": "check whether ax has data",
    "type": "method",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\core.py",
    "ast_data": "FunctionDef name:_has_plotted_object arg:ax arguments arg Return return:yes BoolOp Compare Call Compare Call Compare Call"
  },
  {
    "library": "pytorch",
    "name": "ObserverBase",
    "source_code": "class ObserverBase(ABC, nn.Module):\n\n    def __init__(self, dtype, is_dynamic: bool=False):\n        super().__init__()\n        self.dtype = dtype\n        self.is_dynamic = is_dynamic\n\n    @abstractmethod\n    def forward(self, x):\n        pass\n\n    @abstractmethod\n    def calculate_qparams(self, **kwargs):\n        pass\n    with_args = classmethod(_with_args)\n    with_callable_args = classmethod(_with_callable_args)",
    "docstring": "Base observer Module. Any observer implementation should derive from this class. Concrete observers should follow the same API. In forward, they will update the statistics of the observed Tensor. And they should provide a function that computes the quantization parameters given the collected statistics. Args: dtype: dtype argument to the node needed to implement the reference model spec. is_dynamic: indicator for whether the observer is a placeholder for dynamic quantization or static quantization",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\quantization\\observer.py",
    "ast_data": "ClassDef name:ObserverBase FunctionDef name:__init__ arg:self arg:dtype arg:is_dynamic arguments arg arg arg Call Call Assign Assign FunctionDef name:forward arg:self arg:x arguments arg arg FunctionDef name:calculate_qparams arg:self arguments arg arg Assign Call Assign Call"
  },
  {
    "library": "cherrypy",
    "name": "put",
    "source_code": "def put(self, obj, size):\n    raise NotImplementedError",
    "docstring": "Store the current variant in the cache.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\caching.py",
    "ast_data": "FunctionDef name:put arg:self arg:obj arg:size arguments arg arg arg Raise"
  },
  {
    "library": "pandas",
    "name": "_excel2num",
    "source_code": "def _excel2num(x: str) -> int:\n    index = 0\n    for c in x.upper().strip():\n        cp = ord(c)\n        if cp < ord('A') or cp > ord('Z'):\n            raise ValueError(f'Invalid column name: {x}')\n        index = index * 26 + cp - ord('A') + 1\n    return index - 1",
    "docstring": "Convert Excel column name like 'AB' to 0-based column index. Parameters ---------- x : str The Excel column name to convert to a 0-based column index. Returns ------- num : int The column index corresponding to the name. Raises ------ ValueError Part of the Excel column name was invalid.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\excel\\_util.py",
    "ast_data": "FunctionDef name:_excel2num arg:x arguments arg Assign For Call Call Assign Call If BoolOp Compare Call Compare Call Raise Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "AUTUMN",
    "source_code": "@deprecated('0.7.2', extra_reason=\"The `AUTUMN()` class is deprecated and will be removed in next kornia versions (0.8.0 - dec 2024).    In favor of using `ColorMap(base='autumn')` instead.\")\nclass AUTUMN(ColorMap):\n\n    def __init__(self, num_colors: int=64, device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None) -> None:\n        super().__init__(base=ColorMapType.autumn, num_colors=num_colors, device=device, dtype=dtype)",
    "docstring": "The GNU Octave colormap . .. image:: _static/img/AUTUMN.png",
    "type": "class",
    "file_path": "kornia\\kornia\\color\\colormap.py",
    "ast_data": "ClassDef name:AUTUMN FunctionDef name:__init__ arg:self arg:num_colors arg:device arg:dtype arguments arg arg arg arg Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "add_linkcode_domain",
    "source_code": "def add_linkcode_domain(domain: str, keys: list[str], override: bool=False) -> None:\n    if override or domain not in _DOMAIN_KEYS:\n        _DOMAIN_KEYS[domain] = list(keys)",
    "docstring": "Register a new list of keys to use for a domain. .. versionadded:: 8.2",
    "type": "function",
    "file_path": "sphinx\\sphinx\\ext\\linkcode.py",
    "ast_data": "FunctionDef name:add_linkcode_domain arg:domain arg:keys arg:override arguments arg arg arg If BoolOp Compare Assign Call"
  },
  {
    "library": "scikit-learn",
    "name": "grow",
    "source_code": "def grow(self):\n    while self.splittable_nodes:\n        self.split_next()\n    self._apply_shrinkage()",
    "docstring": "Grow the tree, from root to leaves.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\grower.py",
    "ast_data": "FunctionDef name:grow arg:self arguments arg While Call Call"
  },
  {
    "library": "scipy",
    "name": "_get_index_dtype",
    "source_code": "def _get_index_dtype(self, arrays=(), maxval=None, check_contents=False):\n    from ._sputils import get_index_dtype\n    return get_index_dtype(arrays, maxval, check_contents and (not isinstance(self, sparray)))",
    "docstring": "Determine index dtype for array. This wraps _sputils.get_index_dtype, providing compatibility for both array and matrix API sparse matrices. Matrix API sparse matrices would attempt to downcast the indices - which can be computationally expensive and undesirable for users. The array API changes this behaviour. See discussion: The get_index_dtype import is due to implementation details of the test suite. It allows the decorator `` to mock a lower int32 max value for checks on the matrix API's downcasting behaviour.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_base.py",
    "ast_data": "FunctionDef name:_get_index_dtype arg:self arg:arrays arg:maxval arg:check_contents arguments arg arg arg arg Return return:yes Call BoolOp Call"
  },
  {
    "library": "tensorflow",
    "name": "bessel_y0",
    "source_code": "@tf_export('math.special.bessel_y0')\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef bessel_y0(x, name=None):\n    with ops.name_scope(name, 'bessel_y0', [x]):\n        return gen_special_math_ops.bessel_y0(x)",
    "docstring": "Computes the Bessel y0 function of element-wise. Modified Bessel function of order 0. >>> tf.math.special.bessel_y0([0.5, 1., 2., 4.]).numpy() array([-0.44451873, 0.08825696, 0.51037567, -0.01694074], dtype=float32) Args: x: A or . Must be one of the following types: , , . name: A name for the operation (optional). Returns: A or , respectively. Has the same type as . @compatibility(scipy) Equivalent to scipy.special.y0 @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\special_math_ops.py",
    "ast_data": "FunctionDef name:bessel_y0 arg:x arg:name arguments arg arg With Call Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "eliminate_quotes",
    "source_code": "def eliminate_quotes(s):\n    d = {}\n\n    def repl(m):\n        kind, value = m.groups()[:2]\n        if kind:\n            kind = kind[:-1]\n        p = {\"'\": 'SINGLE', '\"': 'DOUBLE'}[value[0]]\n        k = f'{kind}@__f2py_QUOTES_{p}_{COUNTER.__next__()}@'\n        d[k] = value\n        return k\n    new_s = re.sub('({kind}_|)({single_quoted}|{double_quoted})'.format(kind='\\\\w[\\\\w\\\\d_]*', single_quoted=\"('([^'\\\\\\\\]|(\\\\\\\\.))*')\", double_quoted='(\"([^\"\\\\\\\\]|(\\\\\\\\.))*\")'), repl, s)\n    assert '\"' not in new_s\n    assert \"'\" not in new_s\n    return (new_s, d)",
    "docstring": "Replace quoted substrings of input string. Return a new string and a mapping of replacements.",
    "type": "function",
    "file_path": "numpy\\numpy\\f2py\\symbolic.py",
    "ast_data": "FunctionDef name:eliminate_quotes arg:s arguments arg Assign FunctionDef name:repl arg:m arguments arg Assign Call If Assign Assign Assign Call Assign Return return:yes Assign Call Call Compare Compare Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "wrap",
    "source_code": "def wrap(module: nn.Module, **wrap_overrides: Any) -> nn.Module:\n    if _ConfigAutoWrap.in_autowrap_context:\n        assert _ConfigAutoWrap.wrapper_cls is not None\n        wrap_overrides = {**_ConfigAutoWrap.kwargs, **wrap_overrides}\n        return _wrap(module, _ConfigAutoWrap.wrapper_cls, **wrap_overrides)\n    return module",
    "docstring": "Annotate that a module should be wrapped. Annotated modules will only be wrapped if inside of an :func: context manager. This allows a module to be initialized both with and without a wrapper without code change. The class that this function wraps the passed in `enable_wrapenable_wrap` context",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\wrap.py",
    "ast_data": "FunctionDef name:wrap arg:module arguments arg arg If Compare Assign Return return:yes Call Return return:yes"
  },
  {
    "library": "django",
    "name": "contains",
    "source_code": "def contains(source, inst):\n    if isinstance(source, inst):\n        return True\n    if isinstance(source, NonCapture):\n        for elt in source:\n            if contains(elt, inst):\n                return True\n    return False",
    "docstring": "Return True if the \"source\" contains an instance of \"inst\". False, otherwise.",
    "type": "function",
    "file_path": "django\\django\\utils\\regex_helper.py",
    "ast_data": "FunctionDef name:contains arg:source arg:inst arguments arg arg If Call Return return:yes If Call For If Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_num_nonjoined_procs",
    "source_code": "def _get_num_nonjoined_procs(self):\n    num_nonjoined_procs = torch.zeros(1, device=self._device)\n    dist.all_reduce(num_nonjoined_procs, group=self._process_group)\n    return num_nonjoined_procs.item()",
    "docstring": "Return the number of non-joined processes by shadowing an all-reduce in the non-joined processes.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\join.py",
    "ast_data": "FunctionDef name:_get_num_nonjoined_procs arg:self arguments arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "AugmentationBase2D",
    "source_code": "class AugmentationBase2D(_AugmentationBase):\n\n    def validate_tensor(self, input: Tensor) -> None:\n        _validate_input_dtype(input, accepted_dtypes=[float16, float32, float64])\n        if len(input.shape) != 4:\n            raise RuntimeError(f'Expect (B, C, H, W). Got {input.shape}.')\n\n    def transform_tensor(self, input: Tensor, *, shape: Optional[Tensor]=None, match_channel: bool=True) -> Tensor:\n        _validate_input_dtype(input, accepted_dtypes=[float16, float32, float64])\n        if shape is None:\n            return _transform_input(input)\n        else:\n            return _transform_input_by_shape(input, reference_shape=shape, match_channel=match_channel)",
    "docstring": "AugmentationBase2D base class for customized augmentation implementations. AugmentationBase2D aims at offering a generic base class for a greater level of customization. If the subclass contains routined matrix-based transformations, might be a better fit. Args: p: probability for applying an augmentation. This param controls the augmentation probabilities element-wise for a batch. p_batch: probability for applying an augmentation to a batch. This param controls the augmentation probabilities batch-wise. same_on_batch: apply the same transformation across the batch. keepdim: whether to keep the output shape the same as input ``.",
    "type": "class",
    "file_path": "kornia\\kornia\\augmentation\\_2d\\base.py",
    "ast_data": "ClassDef name:AugmentationBase2D FunctionDef name:validate_tensor arg:self arg:input arguments arg arg Call If Compare Call Raise Call FunctionDef name:transform_tensor arg:self arg:input arguments arg arg arg arg Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "weighted_moving_average",
    "source_code": "def weighted_moving_average(value, decay, weight, truediv=True, collections=None, name=None):\n    if collections is None:\n        collections = [ops.GraphKeys.GLOBAL_VARIABLES]\n    with variable_scope.variable_scope(name, 'WeightedMovingAvg', [value, weight, decay]) as scope:\n        value_x_weight_var = variable_scope.get_variable('value_x_weight', shape=value.get_shape(), dtype=value.dtype, initializer=init_ops.zeros_initializer(), trainable=False, collections=collections)\n        weight_var = variable_scope.get_variable('weight', shape=weight.get_shape(), dtype=weight.dtype, initializer=init_ops.zeros_initializer(), trainable=False, collections=collections)\n        numerator = assign_moving_average(value_x_weight_var, value * weight, decay, zero_debias=False)\n        denominator = assign_moving_average(weight_var, weight, decay, zero_debias=False)\n        if truediv:\n            return math_ops.truediv(numerator, denominator, name=scope.name)\n        else:\n            return math_ops.divide(numerator, denominator, name=scope.name)",
    "docstring": "Compute the weighted moving average of . Conceptually, the weighted moving average is: , where a moving average updates by the rule Internally, this Op keeps moving average variables of both and . Args: value: A numeric . decay: A float or float value. The moving average decay. weight: that keeps the current value of a weight. Shape should be able to multiply . truediv: Boolean, if , dividing by is floating point division. If , use division implied by dtypes. collections: List of graph collections keys to add the internal variables and to. Defaults to . name: Optional name of the returned operation. Defaults to \"WeightedMovingAvg\". Returns: An Operation that updates and returns the weighted moving average.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\moving_averages.py",
    "ast_data": "FunctionDef name:weighted_moving_average arg:value arg:decay arg:weight arg:truediv arg:collections arg:name arguments arg arg arg arg arg arg If Compare Assign With Call Assign Call Call Call Assign Call Call Call Assign Call Assign Call If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "flatten",
    "source_code": "def flatten(debug=False):\n\n    def flattener(input):\n        numchunks = 0\n        for x in input:\n            if not is_iterator(x):\n                numchunks += 1\n                yield x\n            else:\n                for y in flattener(x):\n                    numchunks += 1\n                    yield y\n        if debug:\n            cherrypy.log('Flattened %d chunks' % numchunks, 'TOOLS.FLATTEN')\n    response = cherrypy.serving.response\n    response.body = flattener(response.body)",
    "docstring": "Wrap response.body in a generator that recursively iterates over body. This allows cherrypy.response.body to consist of 'nested generators'; that is, a set of generators that yield generators.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\cptools.py",
    "ast_data": "FunctionDef name:flatten arg:debug arguments arg FunctionDef name:flattener arg:input arguments arg Assign For If Call For Call If Call Assign Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "set_global_step",
    "source_code": "def set_global_step(self, new_global_step, name=None):\n    return gen_data_flow_ops.accumulator_set_global_step(self._accumulator_ref, math_ops.cast(ops.convert_to_tensor(new_global_step), _dtypes.int64), name=name)",
    "docstring": "Sets the global time step of the accumulator. The operation logs a warning if we attempt to set to a time step that is lower than the accumulator's own time step. Args: new_global_step: Value of new time step. Can be a variable or a constant name: Optional name for the operation. Returns: Operation that sets the accumulator's time step.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:set_global_step arg:self arg:new_global_step arg:name arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "check_diffs",
    "source_code": "def check_diffs(hunks: Iterable[diff_parser.Hunk], *, prohibited_regex: str, suppression_regex: str | None=None) -> list[RegexLocation]:\n    prohibited_regex = re.compile(prohibited_regex)\n    if suppression_regex is not None:\n        suppression_regex = re.compile(suppression_regex)\n\n    def should_not_suppress(line) -> bool:\n        if suppression_regex:\n            return not suppression_regex.search(line)\n        return True\n    regex_locations = []\n    for hunk in hunks:\n        for line_no, line in hunk.added_lines():\n            if should_not_suppress(line):\n                regex_locations.extend([RegexLocation(hunk.file, line_no, line, regex_match.group()) for regex_match in prohibited_regex.finditer(line)])\n    return regex_locations",
    "docstring": "Checks FileDiffs for prohibited regexes. Arguments: hunks: A sequence of Hunk objects representing the hunks of the diff. prohibited_regex: The regex that isn't allowed in the diff. suppression_regex: A regex used as an escape hatch to allow the prohibited regex in the diff. If this is found on the same line as prohibited_regex, there is no error. Returns: A list of RegexLocations where the prohibited_regex is found.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\build_tools\\lint\\check_contents.py",
    "ast_data": "FunctionDef name:check_diffs arg:hunks arguments arg arg arg Assign Call If Compare Assign Call FunctionDef name:should_not_suppress arg:line arguments arg If Return return:yes Call Return return:yes Assign For For Call If Call Call Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "logcdf",
    "source_code": "def logcdf(self, x, mean=None, cov=1, allow_singular=False, maxpts=None, abseps=1e-05, releps=1e-05, *, lower_limit=None, rng=None):\n    params = self._process_parameters(mean, cov, allow_singular)\n    dim, mean, cov_object = params\n    cov = cov_object.covariance\n    x = self._process_quantiles(x, dim)\n    if not maxpts:\n        maxpts = 1000000 * dim\n    rng = self._get_random_state(rng)\n    cdf = self._cdf(x, mean, cov, maxpts, abseps, releps, lower_limit, rng)\n    cdf = cdf + 0j if np.any(cdf < 0) else cdf\n    out = np.log(cdf)\n    return out",
    "docstring": "Log of the multivariate normal cumulative distribution function. Parameters ---------- x : array_like Quantiles, with the last axis of denoting the components. %(_mvn_doc_default_callparams)s maxpts : integer, optional The maximum number of points to use for integration (default `xx` Notes ----- %(_mvn_doc_callparams_note)s .. versionadded:: 1.0.0",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:logcdf arg:self arg:x arg:mean arg:cov arg:allow_singular arg:maxpts arg:abseps arg:releps arguments arg arg arg arg arg arg arg arg arg arg Assign Call Assign Assign Assign Call If Assign Assign Call Assign Call Assign Call Compare Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "_geomgen",
    "source_code": "def _geomgen(self, gen_func, other=None):\n    if isinstance(other, OGRGeometry):\n        return OGRGeometry(gen_func(self.ptr, other.ptr), self.srs)\n    else:\n        return OGRGeometry(gen_func(self.ptr), self.srs)",
    "docstring": "A helper routine for the OGR routines that generate geometries.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:_geomgen arg:self arg:gen_func arg:other arguments arg arg arg If Call Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "HistGradientBoostingClassifierBenchmark",
    "source_code": "class HistGradientBoostingClassifierBenchmark(Predictor, Estimator, Benchmark):\n    param_names = []\n    params = ()\n\n    def setup_cache(self):\n        super().setup_cache()\n\n    def make_data(self, params):\n        data = _synth_classification_dataset(n_samples=10000, n_features=100, n_classes=5)\n        return data\n\n    def make_estimator(self, params):\n        estimator = HistGradientBoostingClassifier(max_iter=100, max_leaf_nodes=15, early_stopping=False, random_state=0)\n        return estimator\n\n    def make_scorers(self):\n        make_gen_classif_scorers(self)",
    "docstring": "Benchmarks for HistGradientBoostingClassifier.",
    "type": "class",
    "file_path": "scikit-learn\\asv_benchmarks\\benchmarks\\ensemble.py",
    "ast_data": "ClassDef name:HistGradientBoostingClassifierBenchmark Assign Assign FunctionDef name:setup_cache arg:self arguments arg Call Call FunctionDef name:make_data arg:self arg:params arguments arg arg Assign Call Return return:yes FunctionDef name:make_estimator arg:self arg:params arguments arg arg Assign Call Return return:yes FunctionDef name:make_scorers arg:self arguments arg Call"
  },
  {
    "library": "matplotlib",
    "name": "reset_margins",
    "source_code": "def reset_margins(layoutgrids, fig):\n    for sfig in fig.subfigs:\n        reset_margins(layoutgrids, sfig)\n    for ax in fig.axes:\n        if ax.get_in_layout():\n            gs = ax.get_gridspec()\n            if gs in layoutgrids:\n                layoutgrids[gs].reset_margins()\n    layoutgrids[fig].reset_margins()",
    "docstring": "Reset the margins in the layoutboxes of *fig*. Margins are usually set as a minimum, so if the figure gets smaller the minimum needs to be zero in order for it to grow again.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\_constrained_layout.py",
    "ast_data": "FunctionDef name:reset_margins arg:layoutgrids arg:fig arguments arg arg For Call For If Call Assign Call If Compare Call Call"
  },
  {
    "library": "scipy",
    "name": "CrownedCross",
    "source_code": "class CrownedCross(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n        self.global_optimum = [[0, 0]]\n        self.fglob = 0.0001\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        u = 100 - sqrt(x[0] ** 2 + x[1] ** 2) / pi\n        v = sin(x[0]) * sin(x[1])\n        return 0.0001 * (abs(v * exp(abs(u))) + 1) ** 0.1",
    "docstring": "Crowned Cross objective function. This class defines the Crowned Cross [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{CrownedCross}}(x) = 0.0001 \\left(\\left|{e^{\\left|{100 - \\frac{\\sqrt{x_{1}^{2} + x_{2}^{2}}}{\\pi}}\\right|} \\sin\\left(x_{1}\\right) \\sin\\left(x_{2}\\right)}\\right| + 1\\right)^{0.1} with :math: for :math:. *Global optimum*: :math:. The global minimum is found on the planes :math: and :math: ..[1] Mishra, S. Global Optimization by Differential Evolution and Particle Swarm Methods: Evaluation on Some Benchmark Functions Munich University, 2006",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_C.py",
    "ast_data": "ClassDef name:CrownedCross FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Assign Call Call Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "date2num",
    "source_code": "def date2num(d):\n    d = cbook._unpack_to_numpy(d)\n    iterable = np.iterable(d)\n    if not iterable:\n        d = [d]\n    masked = np.ma.is_masked(d)\n    mask = np.ma.getmask(d)\n    d = np.asarray(d)\n    if not np.issubdtype(d.dtype, np.datetime64):\n        if not d.size:\n            return d\n        tzi = getattr(d[0], 'tzinfo', None)\n        if tzi is not None:\n            d = [dt.astimezone(UTC).replace(tzinfo=None) for dt in d]\n            d = np.asarray(d)\n        d = d.astype('datetime64[us]')\n    d = np.ma.masked_array(d, mask=mask) if masked else d\n    d = _dt64_to_ordinalf(d)\n    return d if iterable else d[0]",
    "docstring": "Convert datetime objects to Matplotlib dates. Parameters ---------- d : or or sequences of these Returns ------- float or sequence of floats Number of days since the epoch. See for the epoch, which can be changed by :rc: or . If the epoch is \"1970-01-01T00:00:00\" (default) then noon Jan 1 1970 (\"1970-01-01T12:00:00\") returns 0.5. Notes ----- The Gregorian calendar is assumed; this is not universal practice. For details see the module docstring.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\dates.py",
    "ast_data": "FunctionDef name:date2num arg:d arguments arg Assign Call Assign Call If Assign Assign Call Assign Call Assign Call If Call If Return return:yes Assign Call If Compare Assign Call Call Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_mul1220",
    "source_code": "def _mul1220(num1, num2):\n    return num1 * num2 >> 20",
    "docstring": "Multiply two numbers in 12.20 fixed point format.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\dviread.py",
    "ast_data": "FunctionDef name:_mul1220 arg:num1 arg:num2 arguments arg arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "round",
    "source_code": "def round(self, decimals: int=0, *args, **kwargs):\n    if self.dtype.kind == 'b':\n        return self\n    nv.validate_round(args, kwargs)\n    values = np.round(self._data, decimals=decimals, **kwargs)\n    return self._maybe_mask_result(values, self._mask.copy())",
    "docstring": "Round each value in the array a to the given number of decimals. Parameters ---------- decimals : int, default 0 Number of decimal places to round to. If decimals is negative, it specifies the number of positions to the left of the decimal point. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- NumericArray Rounded values of the NumericArray. See Also -------- numpy.around : Round values of an np.array. DataFrame.round : Round values of a DataFrame. Series.round : Round values of a Series.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\masked.py",
    "ast_data": "FunctionDef name:round arg:self arg:decimals arguments arg arg arg arg If Compare Return return:yes Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_compute_score_samples",
    "source_code": "def _compute_score_samples(self, X, subsample_features):\n    n_samples = X.shape[0]\n    depths = np.zeros(n_samples, order='f')\n    average_path_length_max_samples = _average_path_length([self._max_samples])\n    lock = threading.Lock()\n    Parallel(verbose=self.verbose, require='sharedmem')((delayed(_parallel_compute_tree_depths)(tree, X, features if subsample_features else None, self._decision_path_lengths[tree_idx], self._average_path_length_per_tree[tree_idx], depths, lock) for tree_idx, (tree, features) in enumerate(zip(self.estimators_, self.estimators_features_))))\n    denominator = len(self.estimators_) * average_path_length_max_samples\n    scores = 2 ** (-np.divide(depths, denominator, out=np.ones_like(depths), where=denominator != 0))\n    return scores",
    "docstring": "Compute the score of each samples in X going through the extra trees. Parameters ---------- X : array-like or sparse matrix Data matrix. subsample_features : bool Whether features should be subsampled. Returns ------- scores : ndarray of shape (n_samples,) The score of each sample in X.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_iforest.py",
    "ast_data": "FunctionDef name:_compute_score_samples arg:self arg:X arg:subsample_features arguments arg arg arg Assign Assign Call Assign Call Assign Call Call Call Call Call Call Call Assign Call Assign Call Call Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "broadcasting_binary_op_wrapper",
    "source_code": "def broadcasting_binary_op_wrapper(x, y, broadcast_dims=None, name=None):\n    broadcast_dims = broadcast_dims or []\n    broadcast_dims = ops.convert_to_tensor(broadcast_dims, dtypes.int64)\n    x, y = gen_xla_ops.xla_broadcast_helper(x, y, broadcast_dims)\n    return fn(x, y, name=name)",
    "docstring": "Inner wrapper function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\tf2xla\\python\\xla.py",
    "ast_data": "FunctionDef name:broadcasting_binary_op_wrapper arg:x arg:y arg:broadcast_dims arg:name arguments arg arg arg arg Assign BoolOp Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_reset",
    "source_code": "def _reset(self):\n    if hasattr(self, 'scale_'):\n        del self.scale_\n        del self.n_samples_seen_\n        del self.mean_\n        del self.var_",
    "docstring": "Reset internal data-dependent state of the scaler, if necessary. __init__ parameters are not touched.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py",
    "ast_data": "FunctionDef name:_reset arg:self arguments arg If Call"
  },
  {
    "library": "scipy",
    "name": "read_ints",
    "source_code": "def read_ints(self, dtype='i4'):\n    return self.read_record(dtype)",
    "docstring": "Reads a record of a given type from the file, defaulting to an integer type (`` in Fortran). Parameters ---------- dtype : dtype, optional Data type specifying the size and endianness of the data. Returns ------- data : ndarray A 1-D array object. See Also -------- read_reals read_record",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\_fortran.py",
    "ast_data": "FunctionDef name:read_ints arg:self arg:dtype arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "update_from",
    "source_code": "def update_from(self, other):\n    super().update_from(other)\n    self._linestyle = other._linestyle\n    self._linewidth = other._linewidth\n    self._color = other._color\n    self._gapcolor = other._gapcolor\n    self._markersize = other._markersize\n    self._markerfacecolor = other._markerfacecolor\n    self._markerfacecoloralt = other._markerfacecoloralt\n    self._markeredgecolor = other._markeredgecolor\n    self._markeredgewidth = other._markeredgewidth\n    self._unscaled_dash_pattern = other._unscaled_dash_pattern\n    self._dash_pattern = other._dash_pattern\n    self._dashcapstyle = other._dashcapstyle\n    self._dashjoinstyle = other._dashjoinstyle\n    self._solidcapstyle = other._solidcapstyle\n    self._solidjoinstyle = other._solidjoinstyle\n    self._marker = MarkerStyle(marker=other._marker)\n    self._drawstyle = other._drawstyle",
    "docstring": "Copy properties from *other* to self.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:update_from arg:self arg:other arguments arg arg Call Call Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "trigger",
    "source_code": "def trigger(self, sender, event, data=None):\n    pass",
    "docstring": "Called when this tool gets used. This method is called by . Parameters ---------- event : The canvas event that caused this tool to be called. sender : object Object that requested the tool to be triggered. data : object Extra data.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py",
    "ast_data": "FunctionDef name:trigger arg:self arg:sender arg:event arg:data arguments arg arg arg arg"
  },
  {
    "library": "tensorflow",
    "name": "shape",
    "source_code": "@property\ndef shape(self):\n    return self._shape()",
    "docstring": "of this . If this operator acts like the batch matrix with , then this returns , equivalent to . Returns: , statically determined, may be undefined.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "FunctionDef name:shape arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "param_parse",
    "source_code": "def param_parse(d, params):\n    if '(' in d:\n        dname = d[:d.find('(')]\n        ddims = d[d.find('(') + 1:d.rfind(')')]\n        index = int(param_parse(ddims, params))\n        return str(params[dname][index])\n    elif d in params:\n        return str(params[d])\n    else:\n        for p in params:\n            re_1 = re.compile('(?P<before>.*?)\\\\b' + p + '\\\\b(?P<after>.*)', re.I)\n            m = re_1.match(d)\n            while m:\n                d = m.group('before') + str(params[p]) + m.group('after')\n                m = re_1.match(d)\n        return d",
    "docstring": "Recursively parse array dimensions. Parses the declaration of an array variable or parameter keyword, and is called recursively if the dimension for this array is a previously defined parameter (found in ). Parameters ---------- d : str Fortran expression describing the dimension of an array. params : dict Previously parsed parameters declared in the Fortran source file. Returns ------- out : str Parsed dimension expression. Examples -------- * If the line being analyzed is then and we return immediately, with >>> d = '2' >>> param_parse(d, params) 2 * If the line being analyzed is then ; since is a previously parsed parameter, and , we call recursively, to obtain >>> d = 'pa' >>> params = {'pa': 3} >>> param_parse(d, params) 3 * If the line being analyzed is then ; since is a previously parsed parameter, and , we call recursively, to obtain >>> d = 'pa(1)' >>> params = dict(pa={1: 3, 2: 5}) >>> param_parse(d, params) 3",
    "type": "function",
    "file_path": "numpy\\numpy\\f2py\\crackfortran.py",
    "ast_data": "FunctionDef name:param_parse arg:d arg:params arguments arg arg If Compare Assign Call Assign Call Call Assign Call Call Return return:yes Call If Compare Return return:yes Call For Assign Call Assign Call While Assign Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "supports_default_grad",
    "source_code": "def supports_default_grad(t):\n    if t.dtype == dtypes.resource:\n        handle_data = resource_variable_ops.get_eager_safe_handle_data(t)\n        if handle_data is None or not handle_data.is_set or len(handle_data.shape_and_type) != 1:\n            return False\n    return True",
    "docstring": "Whether tensor supports creating a default gradient. This function assumes that is of a trainable type. Args: t: Tensor Returns: Bool",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\default_gradient.py",
    "ast_data": "FunctionDef name:supports_default_grad arg:t arguments arg If Compare Assign Call If BoolOp Compare Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "resampled",
    "source_code": "def resampled(self, lutshape):\n    if not np.iterable(lutshape) or len(lutshape) != len(self):\n        raise ValueError(f'lutshape must be of length {len(self)}')\n    new_cmap = self.copy()\n    for i, s in enumerate(lutshape):\n        if s is not None:\n            new_cmap._colormaps[i] = self[i].resampled(s)\n    return new_cmap",
    "docstring": "Return a new colormap with *lutshape* entries. Parameters ---------- lutshape : tuple of (, ) The tuple must have a length matching the number of variates. For each element in the tuple, if , the corresponding colorbar is resampled, if , the corresponding colorbar is not resampled. Returns ------- MultivarColormap",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:resampled arg:self arg:lutshape arguments arg arg If BoolOp Call Compare Call Call Raise Call Call Assign Call For Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "grad_fn",
    "source_code": "def grad_fn(*args, **kwds):\n    return implicit_val_and_grad(f)(*args, **kwds)[1]",
    "docstring": "Computes the gradient of the wrapped function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\backprop.py",
    "ast_data": "FunctionDef name:grad_fn arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_has_externally_shared_axis",
    "source_code": "def _has_externally_shared_axis(ax1: Axes, compare_axis: str) -> bool:\n    if compare_axis == 'x':\n        axes = ax1.get_shared_x_axes()\n    elif compare_axis == 'y':\n        axes = ax1.get_shared_y_axes()\n    else:\n        raise ValueError(\"_has_externally_shared_axis() needs 'x' or 'y' as a second parameter\")\n    axes_siblings = axes.get_siblings(ax1)\n    ax1_points = ax1.get_position().get_points()\n    for ax2 in axes_siblings:\n        if not np.array_equal(ax1_points, ax2.get_position().get_points()):\n            return True\n    return False",
    "docstring": "Return whether an axis is externally shared. Parameters ---------- ax1 : matplotlib.axes.Axes Axis to query. compare_axis : str or according to whether the X-axis or Y-axis is being compared. Returns ------- bool if the axis is externally shared. Otherwise . Notes ----- If two axes with different positions are sharing an axis, they can be referred to as *externally* sharing the common axis. If two axes sharing an axis also have the same position, they can be referred to as *internally* sharing the common axis (a.k.a twinning). _handle_shared_axes() is only interested in axes externally sharing an axis, regardless of whether either of the axes is also internally sharing with a third axis.",
    "type": "function",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\tools.py",
    "ast_data": "FunctionDef name:_has_externally_shared_axis arg:ax1 arg:compare_axis arguments arg arg If Compare Assign Call If Compare Assign Call Raise Call Assign Call Assign Call Call For If Call Call Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_tile_variant_with_length",
    "source_code": "def _tile_variant_with_length(t, length):\n    if _is_variant_with_internal_stacking(t):\n        return t\n    original_tensor = t\n    t.set_shape([])\n    t = array_ops.reshape(t, [-1])\n    with ops.device('CPU:0'):\n        result = array_ops.tile(t, length)\n        handle_data_util.copy_handle_data(original_tensor, result)\n        return result",
    "docstring": "stacks times.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "FunctionDef name:_tile_variant_with_length arg:t arg:length arguments arg arg If Call Return return:yes Assign Call Assign Call With Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_loop_body_lowp_fp",
    "source_code": "def get_loop_body_lowp_fp(_body: LoopBody) -> tuple[Optional[torch.dtype], bool]:\n    sub_blocks = [_body.root_block] + list(_body.subblocks.values())\n    _lowp_fp_type: Optional[torch.dtype] = None\n    _use_fp32 = False\n    for sub_block in sub_blocks:\n        for _node in sub_block.graph.nodes:\n            if _node.op == 'placeholder' or _node.target in ('get_index', 'index_expr'):\n                continue\n            if _node.target not in ['load', 'store', 'abs', 'neg', 'output']:\n                _use_fp32 = True\n            if hasattr(_node, 'meta') and _node.meta:\n                assert OptimizationContext.key in _node.meta\n                opt_ctx: OptimizationContext = _node.meta[OptimizationContext.key]\n                if not opt_ctx.dtype or opt_ctx.dtype not in DTYPE_LOWP_FP:\n                    _use_fp32 = True\n                elif _lowp_fp_type is not None:\n                    if _lowp_fp_type != opt_ctx.dtype:\n                        warnings.warn('bf16 and fp16 are mixed in the scheduler node.')\n                else:\n                    _lowp_fp_type = opt_ctx.dtype\n            else:\n                _use_fp32 = True\n    return (_lowp_fp_type, _use_fp32)",
    "docstring": "Returns the low precision data type (torch.float16/torch.bfloat16) contained in the nodes and if all the nodes can codegen with this data type without converting to float. Otherwise returns None and True.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp.py",
    "ast_data": "FunctionDef name:get_loop_body_lowp_fp arg:_body arguments arg Assign Call Call Assign For For If BoolOp Compare Compare If Compare Assign If BoolOp Call Compare If BoolOp Compare Assign If Compare If Compare Call Assign Assign Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "set_versioning_method",
    "source_code": "def set_versioning_method(self, method: str | Callable[[Node], bool], compare: bool) -> None:\n    condition: Literal[False] | Callable[[Node], bool]\n    if callable(method):\n        condition = method\n    else:\n        if method not in versioning_conditions:\n            raise ValueError('invalid versioning method: %r' % method)\n        condition = versioning_conditions[method]\n    if self.versioning_condition not in {None, condition}:\n        msg = __('This environment is incompatible with the selected builder, please choose another doctree directory.')\n        raise SphinxError(msg)\n    self.versioning_condition = condition\n    self.versioning_compare = compare",
    "docstring": "Set the doctree versioning method for this environment. Versioning methods are a builder property; only builders with the same versioning method can share the same doctree directory. Therefore, we raise an exception if the user tries to use an environment with an incompatible versioning method.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\environment\\__init__.py",
    "ast_data": "FunctionDef name:set_versioning_method arg:self arg:method arg:compare arguments arg arg arg If Call Assign If Compare Raise Call Assign If Compare Assign Call Raise Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "@doc_controls.do_not_generate_docs\ndef __init__(self, values, row_partition, internal=False):\n    if not internal:\n        raise ValueError('RaggedTensor constructor is private; please use one of the factory methods instead (e.g., RaggedTensor.from_row_lengths())')\n    _assert_is_supported_ragged_values_type(values)\n    if not isinstance(row_partition, RowPartition):\n        raise TypeError(f'Argument `row_partition` must be a RowPartition. Received {row_partition}.')\n    values.shape.with_rank_at_least(1)\n    if isinstance(values, RaggedTensor):\n        assert row_partition.dtype == values._row_partition.dtype\n    self._values = values\n    self._row_partition = row_partition",
    "docstring": "Creates a with a specified partitioning for . This constructor is private -- please use one of the following ops to build s: * * * * * * * * Args: values: A potentially ragged tensor of any dtype and shape . row_partition: A object, representing the arrangement of the lists at the top level. internal: True if the constructor is being called by one of the factory methods. If false, an exception will be raised. Raises: ValueError: If internal = False. Note that this method is intended only for internal use. TypeError: If values is not a or , or row_partition is not a .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:values arg:row_partition arg:internal arguments arg arg arg arg If Raise Call Call If Call Raise Call Call If Call Compare Assign Assign"
  },
  {
    "library": "kornia",
    "name": "apply_transform_keypoint",
    "source_code": "def apply_transform_keypoint(self, input: Keypoints, params: Dict[str, Tensor], flags: Dict[str, Any], transform: Optional[Tensor]=None) -> Keypoints:\n    if transform is None:\n        if self.transform_matrix is None:\n            raise RuntimeError('No valid transformation matrix found. Please either pass one or forward one first.')\n        transform = self.transform_matrix\n    input = self.apply_non_transform_keypoint(input, params, flags, transform)\n    return input.transform_keypoints_(transform)",
    "docstring": "Process keypoints corresponding to the inputs that are transformed.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\_2d\\geometric\\base.py",
    "ast_data": "FunctionDef name:apply_transform_keypoint arg:self arg:input arg:params arg:flags arg:transform arguments arg arg arg arg arg If Compare If Compare Raise Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__bool__",
    "source_code": "def __bool__(self):\n    self._disallow_bool_casting()",
    "docstring": "Dummy method to prevent a tensor from being used as a Python . This overload raises a when the user inadvertently treats a as a boolean (most commonly in an or statement), in code that was not converted by AutoGraph. For example: Raises: .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor.py",
    "ast_data": "FunctionDef name:__bool__ arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "UnsupportedFeaturesChecker",
    "source_code": "class UnsupportedFeaturesChecker(gast.NodeVisitor):\n\n    def visit_Attribute(self, node):\n        if node.attr is not None and node.attr.startswith('__') and (not node.attr.endswith('__')):\n            raise errors.UnsupportedLanguageElementError('mangled names are not yet supported')\n        self.generic_visit(node)\n\n    def visit_For(self, node):\n        if node.orelse:\n            raise errors.UnsupportedLanguageElementError('for/else statement not yet supported')\n        self.generic_visit(node)\n\n    def visit_While(self, node):\n        if node.orelse:\n            raise errors.UnsupportedLanguageElementError('while/else statement not yet supported')\n        self.generic_visit(node)\n\n    def visit_Yield(self, node):\n        raise errors.UnsupportedLanguageElementError('generators are not supported')\n\n    def visit_YieldFrom(self, node):\n        raise errors.UnsupportedLanguageElementError('generators are not supported')",
    "docstring": "Quick check for Python features we know we don't support. Any features detected will cause AutoGraph to not compile a function.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\core\\unsupported_features_checker.py",
    "ast_data": "ClassDef name:UnsupportedFeaturesChecker FunctionDef name:visit_Attribute arg:self arg:node arguments arg arg If BoolOp Compare Call Call Raise Call Call FunctionDef name:visit_For arg:self arg:node arguments arg arg If Raise Call Call FunctionDef name:visit_While arg:self arg:node arguments arg arg If Raise Call Call FunctionDef name:visit_Yield arg:self arg:node arguments arg arg Raise Call FunctionDef name:visit_YieldFrom arg:self arg:node arguments arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "get_default_config",
    "source_code": "def get_default_config(self, row):\n    return None",
    "docstring": "Returns the default config for a given sample. The default config could for example be the config that is the chosen by a current handwritten heuristic. This can for example be used in get_unsafe_leaf to compare the predicted config with the default config.",
    "type": "method",
    "file_path": "pytorch\\torchgen\\_autoheuristic\\train_decision.py",
    "ast_data": "FunctionDef name:get_default_config arg:self arg:row arguments arg arg Return return:no"
  },
  {
    "library": "django",
    "name": "get_content_type",
    "source_code": "def get_content_type(self):\n    return ContentType.objects.get_for_model(self.model, for_concrete_model=self.for_concrete_model)",
    "docstring": "Return the content type associated with this field's model.",
    "type": "method",
    "file_path": "django\\django\\contrib\\contenttypes\\fields.py",
    "ast_data": "FunctionDef name:get_content_type arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "pr",
    "source_code": "def pr(left, right):\n    if left is None:\n        return right\n    elif right is None:\n        return left\n    k = klass\n    if isinstance(left, ConditionBinOp):\n        if isinstance(right, ConditionBinOp):\n            k = JointConditionBinOp\n        elif isinstance(left, k):\n            return left\n        elif isinstance(right, k):\n            return right\n    elif isinstance(left, FilterBinOp):\n        if isinstance(right, FilterBinOp):\n            k = JointFilterBinOp\n        elif isinstance(left, k):\n            return left\n        elif isinstance(right, k):\n            return right\n    return k(self.op, left, right, queryables=self.queryables, encoding=self.encoding).evaluate()",
    "docstring": "create and return a new specialized BinOp from myself",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\computation\\pytables.py",
    "ast_data": "FunctionDef name:pr arg:left arg:right arguments arg arg If Compare Return return:yes If Compare Return return:yes Assign If Call If Call Assign If Call Return return:yes If Call Return return:yes If Call If Call Assign If Call Return return:yes If Call Return return:yes Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "le",
    "source_code": "def le(self, other, level=None, fill_value=None, axis: Axis=0) -> Series:\n    return self._flex_method(other, operator.le, level=level, fill_value=fill_value, axis=axis)",
    "docstring": "Return Less than or equal to of series and other, element-wise (binary operator ). Equivalent to ``series >> a = pd.Series([1, 1, 1, np.nan, 1], index=['a', 'b', 'c', 'd', 'e']) >>> a a 1.0 b 1.0 c 1.0 d NaN e 1.0 dtype: float64 >>> b = pd.Series([0, 1, 2, np.nan, 1], index=['a', 'b', 'c', 'd', 'f']) >>> b a 0.0 b 1.0 c 2.0 d NaN f 1.0 dtype: float64 >>> a.le(b, fill_value=0) a False b True c True d False e False f True dtype: bool",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:le arg:self arg:other arg:level arg:fill_value arg:axis arguments arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "FuncTransform",
    "source_code": "class FuncTransform(Transform):\n    input_dims = output_dims = 1\n\n    def __init__(self, forward, inverse):\n        super().__init__()\n        if callable(forward) and callable(inverse):\n            self._forward = forward\n            self._inverse = inverse\n        else:\n            raise ValueError('arguments to FuncTransform must be functions')\n\n    def transform_non_affine(self, values):\n        return self._forward(values)\n\n    def inverted(self):\n        return FuncTransform(self._inverse, self._forward)",
    "docstring": "A simple transform that takes and arbitrary function for the forward and inverse transform.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\scale.py",
    "ast_data": "ClassDef name:FuncTransform Assign FunctionDef name:__init__ arg:self arg:forward arg:inverse arguments arg arg arg Call Call If BoolOp Call Call Assign Assign Raise Call FunctionDef name:transform_non_affine arg:self arg:values arguments arg arg Return return:yes Call FunctionDef name:inverted arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_axislabel_pos_angle",
    "source_code": "def get_axislabel_pos_angle(self, axes):\n    angle = [0, 90][self.nth_coord]\n    fixed_coord = 1 - self.nth_coord\n    data_to_axes = axes.transData - axes.transAxes\n    p = data_to_axes.transform([self._value, self._value])\n    verts = self._to_xy(0.5, const=p[fixed_coord])\n    return (verts, angle) if 0 <= verts[fixed_coord] <= 1 else (None, None)",
    "docstring": "Return the label reference position in transAxes. get_label_transform() returns a transform of (transAxes+offset)",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axislines.py",
    "ast_data": "FunctionDef name:get_axislabel_pos_angle arg:self arg:axes arguments arg arg Assign Assign Assign Assign Call Assign Call Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "_dummy_tensor_fn",
    "source_code": "def _dummy_tensor_fn(value_structure):\n\n    def create_dummy_tensor(spec):\n        if hasattr(spec, '_create_empty_value'):\n            return spec._create_empty_value()\n        if isinstance(spec, ragged_tensor.RaggedTensorSpec):\n            feature_shape = spec._shape[:1].concatenate(spec._shape[1 + spec._ragged_rank:])\n            feature_type = spec._dtype\n        else:\n            feature_shape = spec.shape\n            feature_type = spec.dtype\n        dims = [dim if dim is not None else 0 for dim in feature_shape.as_list()] if feature_shape else []\n        if dims and (isinstance(spec, ragged_tensor.RaggedTensorSpec) or feature_shape.is_fully_defined()):\n            dims[0] = tensor_shape.Dimension(0)\n        if isinstance(spec, sparse_tensor.SparseTensorSpec):\n            return sparse_tensor.SparseTensor(values=array_ops.zeros(0, feature_type), indices=array_ops.zeros((0, len(dims)), dtypes.int64), dense_shape=dims)\n        dummy_tensor = array_ops.zeros(tensor_shape.TensorShape(dims), feature_type)\n        if isinstance(spec, ragged_tensor.RaggedTensorSpec):\n            row_splits = array_ops.zeros(1, spec._row_splits_dtype)\n            dummy_tensor = ragged_tensor.RaggedTensor.from_nested_row_splits(dummy_tensor, (row_splits,) * spec._ragged_rank, validate=False)\n        return dummy_tensor\n    return nest.map_structure(create_dummy_tensor, value_structure)",
    "docstring": "A function to create dummy tensors from .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py",
    "ast_data": "FunctionDef name:_dummy_tensor_fn arg:value_structure arguments arg FunctionDef name:create_dummy_tensor arg:spec arguments arg If Call Return return:yes Call If Call Assign Call Assign Assign Assign Assign Compare Call If BoolOp BoolOp Call Call Assign Call If Call Return return:yes Call Call Call Call Assign Call Call If Call Assign Call Assign Call Return return:yes Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "TypeAliasForwardRef",
    "source_code": "class TypeAliasForwardRef:\n\n    def __init__(self, name: str) -> None:\n        self.name = name\n\n    def __call__(self) -> None:\n        pass\n\n    def __eq__(self, other: object) -> bool:\n        return self.name == other\n\n    def __hash__(self) -> int:\n        return hash(self.name)\n\n    def __repr__(self) -> str:\n        return f'{self.__class__.__name__}({self.name!r})'",
    "docstring": "Pseudo typing class for :confval:. This avoids the error on evaluating the type inside :func:.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\util\\inspect.py",
    "ast_data": "ClassDef name:TypeAliasForwardRef FunctionDef name:__init__ arg:self arg:name arguments arg arg Assign FunctionDef name:__call__ arg:self arguments arg FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes Compare FunctionDef name:__hash__ arg:self arguments arg Return return:yes Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_validate_codes",
    "source_code": "def _validate_codes(self, level: Index, code: np.ndarray) -> np.ndarray:\n    null_mask = isna(level)\n    if np.any(null_mask):\n        code = np.where(null_mask[code], -1, code)\n    return code",
    "docstring": "Reassign code values as -1 if their corresponding levels are NaN. Parameters ---------- code : Index Code to reassign. level : np.ndarray Level to check for missing values (NaN, NaT, None). Returns ------- new code where code value = -1 if it corresponds to a level with missing values (NaN, NaT, None).",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "FunctionDef name:_validate_codes arg:self arg:level arg:code arguments arg arg arg Assign Call If Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "from_product",
    "source_code": "@classmethod\ndef from_product(cls, iterables: Sequence[Iterable[Hashable]], sortorder: int | None=None, names: Sequence[Hashable] | Hashable | lib.NoDefault=lib.no_default) -> MultiIndex:\n    if not is_list_like(iterables):\n        raise TypeError('Input must be a list / sequence of iterables.')\n    if is_iterator(iterables):\n        iterables = list(iterables)\n    codes, levels = factorize_from_iterables(iterables)\n    if names is lib.no_default:\n        names = [getattr(it, 'name', None) for it in iterables]\n    codes = cartesian_product(codes)\n    return cls(levels, codes, sortorder=sortorder, names=names)",
    "docstring": "Make a MultiIndex from the cartesian product of multiple iterables. Parameters ---------- iterables : list / sequence of iterables Each iterable has unique labels for each level of the index. sortorder : int or None Level of sortedness (must be lexicographically sorted by that level). names : list / sequence of str, optional Names for the levels in the index. If not explicitly provided, names will be inferred from the elements of iterables if an element has a name attribute. Returns ------- MultiIndex See Also -------- MultiIndex.from_arrays : Convert list of arrays to MultiIndex. MultiIndex.from_tuples : Convert list of tuples to MultiIndex. MultiIndex.from_frame : Make a MultiIndex from a DataFrame. Examples -------- >>> numbers = [0, 1, 2] >>> colors = [\"green\", \"purple\"] >>> pd.MultiIndex.from_product([numbers, colors], names=[\"number\", \"color\"]) MultiIndex([(0, 'green'), (0, 'purple'), (1, 'green'), (1, 'purple'), (2, 'green'), (2, 'purple')], names=['number', 'color'])",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "FunctionDef name:from_product arg:cls arg:iterables arg:sortorder arg:names arguments arg arg arg arg If Call Raise Call If Call Assign Call Assign Call If Compare Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "autoscale",
    "source_code": "def autoscale(self, A):\n    with self.callbacks.blocked():\n        self.vmin = self.vmax = None\n        self.autoscale_None(A)\n    self._changed()",
    "docstring": "Set *vmin*, *vmax* to min, max of *A*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:autoscale arg:self arg:A arguments arg arg With Call Assign Call Call"
  },
  {
    "library": "scrapy",
    "name": "get_host_regex",
    "source_code": "def get_host_regex(self, spider: Spider) -> re.Pattern[str]:\n    allowed_domains = getattr(spider, 'allowed_domains', None)\n    if not allowed_domains:\n        return re.compile('')\n    url_pattern = re.compile('^https?://.*$')\n    port_pattern = re.compile(':\\\\d+$')\n    domains = []\n    for domain in allowed_domains:\n        if domain is None:\n            continue\n        if url_pattern.match(domain):\n            message = f'allowed_domains accepts only domains, not URLs. Ignoring URL entry {domain} in allowed_domains.'\n            warnings.warn(message)\n        elif port_pattern.search(domain):\n            message = f'allowed_domains accepts only domains without ports. Ignoring entry {domain} in allowed_domains.'\n            warnings.warn(message)\n        else:\n            domains.append(re.escape(domain))\n    regex = f'^(.*\\\\.)?({'|'.join(domains)})$'\n    return re.compile(regex)",
    "docstring": "Override this method to implement a different offsite policy",
    "type": "method",
    "file_path": "scrapy\\scrapy\\downloadermiddlewares\\offsite.py",
    "ast_data": "FunctionDef name:get_host_regex arg:self arg:spider arguments arg arg Assign Call If Return return:yes Call Assign Call Assign Call Assign For If Compare If Call Assign Call If Call Assign Call Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "ragged_rank",
    "source_code": "@property\ndef ragged_rank(self):\n    values_is_ragged = isinstance(self._values, RaggedTensorValue)\n    return self._values.ragged_rank + 1 if values_is_ragged else 1",
    "docstring": "The number of ragged dimensions in this ragged tensor value.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor_value.py",
    "ast_data": "FunctionDef name:ragged_rank arg:self arguments arg Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "get_metadata_routing",
    "source_code": "def get_metadata_routing(self):\n    router = MetadataRouter(owner=self.__class__.__name__).add(estimator=self.estimator, method_mapping=MethodMapping().add(callee='fit', caller='fit')).add(splitter=self.cv, method_mapping=MethodMapping().add(callee='split', caller='fit')).add(scorer=self._get_curve_scorer(), method_mapping=MethodMapping().add(callee='score', caller='fit'))\n    return router",
    "docstring": "Get metadata routing of this object. Please check :ref: on how the routing mechanism works. Returns ------- routing : MetadataRouter A :class: encapsulating routing information.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_classification_threshold.py",
    "ast_data": "FunctionDef name:get_metadata_routing arg:self arguments arg Assign Call Call Call Call Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "laf_from_three_points",
    "source_code": "def laf_from_three_points(threepts: Tensor) -> Tensor:\n    laf = stack([threepts[..., 0] - threepts[..., 2], threepts[..., 1] - threepts[..., 2], threepts[..., 2]], dim=-1)\n    return laf",
    "docstring": "Convert three points to local affine frame. Order is (0,0), (0, 1), (1, 0). Args: threepts: :math:. Returns: laf :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\laf.py",
    "ast_data": "FunctionDef name:laf_from_three_points arg:threepts arguments arg Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "find_fundamental",
    "source_code": "def find_fundamental(points1: Tensor, points2: Tensor, weights: Optional[Tensor]=None, method: Literal['8POINT', '7POINT']='8POINT') -> Tensor:\n    if method.upper() == '7POINT':\n        result = run_7point(points1, points2)\n    elif method.upper() == '8POINT':\n        result = run_8point(points1, points2, weights)\n    else:\n        raise ValueError(f\"Invalid method: {method}. Supported methods are '7POINT' and '8POINT'.\")\n    return result",
    "docstring": "Find the fundamental matrix. Args: points1: A set of points in the first image with a tensor shape :math:. points2: A set of points in the second image with a tensor shape :math:. weights: Tensor containing the weights per point correspondence with a shape of :math:. method: The method to use for computing the fundamental matrix. Supported methods are \"7POINT\" and \"8POINT\". Returns: the computed fundamental matrix with shape :math:, where number of fundamental matrix. Raises: ValueError: If an invalid method is provided.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\epipolar\\fundamental.py",
    "ast_data": "FunctionDef name:find_fundamental arg:points1 arg:points2 arg:weights arg:method arguments arg arg arg arg If Compare Call Assign Call If Compare Call Assign Call Raise Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "IsInForObjects",
    "source_code": "class IsInForObjects:\n    variants = ['nans', 'short', 'long', 'long_floats']\n    params = [variants, variants]\n    param_names = ['series_type', 'vals_type']\n\n    def setup(self, series_type, vals_type):\n        N_many = 10 ** 5\n        if series_type == 'nans':\n            ser_vals = np.full(10 ** 4, np.nan)\n        elif series_type == 'short':\n            ser_vals = np.arange(2)\n        elif series_type == 'long':\n            ser_vals = np.arange(N_many)\n        elif series_type == 'long_floats':\n            ser_vals = np.arange(N_many, dtype=np.float64)\n        self.series = Series(ser_vals).astype(object)\n        if vals_type == 'nans':\n            values = np.full(10 ** 4, np.nan)\n        elif vals_type == 'short':\n            values = np.arange(2)\n        elif vals_type == 'long':\n            values = np.arange(N_many)\n        elif vals_type == 'long_floats':\n            values = np.arange(N_many, dtype=np.float64)\n        self.values = values.astype(object)\n\n    def time_isin(self, series_type, vals_type):\n        self.series.isin(self.values)",
    "docstring": "A subset of the cartesian product of cases have special motivations: \"nans\" x \"nans\" if nan-objects are different objects, this has the potential to trigger O(n^2) running time \"short\" x \"long\" running time dominated by the preprocessing \"long\" x \"short\" running time dominated by look-up \"long\" x \"long\" no dominating part \"long_floats\" x \"long_floats\" because of nans floats are special no dominating part",
    "type": "class",
    "file_path": "pandas\\asv_bench\\benchmarks\\algos\\isin.py",
    "ast_data": "ClassDef name:IsInForObjects Assign Assign Assign FunctionDef name:setup arg:self arg:series_type arg:vals_type arguments arg arg arg Assign If Compare Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call Assign Call Call If Compare Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call Assign Call FunctionDef name:time_isin arg:self arg:series_type arg:vals_type arguments arg arg arg Call"
  },
  {
    "library": "cryptography",
    "name": "__copy__",
    "source_code": "@abc.abstractmethod\ndef __copy__(self) -> EllipticCurvePrivateKey:\n    pass",
    "docstring": "Returns a copy.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ec.py",
    "ast_data": "FunctionDef name:__copy__ arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, node, function, enclosing_graph, first_function_input, type_attribute, function_attributes):\n    super(_FunctionCaller, self).__init__(node, function, enclosing_graph)\n    self._first_function_input = first_function_input\n    self._type_attribute = type_attribute\n    self._function_attributes = function_attributes",
    "docstring": "Initializes a _FunctionCaller. Args: node: As in _Node. function: As in _Node. enclosing_graph: As in _Node. first_function_input: The index of the first NodeDef input that is tied to the function inputs. It is assumed that the rest of the NodeDef inputs map one to one to function inputs. type_attribute: The name of the NodeDef attribute that defines the input types. It is assumed that the types listed here map one-to-one with the function inputs (that is, they do _not_ specify types for inputs that are not passed to functions). function_attributes: The names of the NodeDef attributes containing references to functions.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:node arg:function arg:enclosing_graph arg:first_function_input arg:type_attribute arg:function_attributes arguments arg arg arg arg arg arg arg Call Call Assign Assign Assign"
  },
  {
    "library": "scikit-learn",
    "name": "__call__",
    "source_code": "def __call__(self, y_true, raw_prediction, sample_weight=None, n_threads=1):\n    return np.average(self.loss(y_true=y_true, raw_prediction=raw_prediction, sample_weight=None, loss_out=None, n_threads=n_threads), weights=sample_weight)",
    "docstring": "Compute the weighted average loss. Parameters ---------- y_true : C-contiguous array of shape (n_samples,) Observed, true target values. raw_prediction : C-contiguous array of shape (n_samples,) or array of shape (n_samples, n_classes) Raw prediction values (in link space). sample_weight : None or C-contiguous array of shape (n_samples,) Sample weights. n_threads : int, default=1 Might use openmp thread parallelism. Returns ------- loss : float Mean or averaged loss function.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\_loss\\loss.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:y_true arg:raw_prediction arg:sample_weight arg:n_threads arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "algorithm",
    "source_code": "@property\ndef algorithm(self):\n    return self._alg",
    "docstring": "The RNG algorithm id (a Python integer or scalar integer Tensor).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\stateful_random_ops.py",
    "ast_data": "FunctionDef name:algorithm arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "broadcast",
    "source_code": "def broadcast(tensor, devices=None, *, out=None):\n    tensor = _handle_complex(tensor)\n    if not (devices is None) ^ (out is None):\n        raise RuntimeError(f\"Exactly one of 'devices' and 'out' must be specified, but got devices={devices} and out={out}\")\n    if devices is not None:\n        devices = [_get_device_index(d) for d in devices]\n        return torch._C._broadcast(tensor, devices)\n    else:\n        return torch._C._broadcast_out(tensor, out)",
    "docstring": "Broadcasts a tensor to specified GPU devices. Args: tensor (Tensor): tensor to broadcast. Can be on CPU or GPU. devices (Iterable[torch.device, str or int], optional): an iterable of GPU devices, among which to broadcast. out (Sequence[Tensor], optional, keyword-only): the GPU tensors to store output results. .. note:: Exactly one of :attr: and :attr: must be specified. Returns: - If :attr: is specified, a tuple containing copies of :attr:, placed on :attr:. - If :attr: is specified, a tuple containing :attr: tensors, each containing a copy of :attr:.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\parallel\\comm.py",
    "ast_data": "FunctionDef name:broadcast arg:tensor arg:devices arguments arg arg arg Assign Call If Compare Compare Raise Call If Compare Assign Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "tri",
    "source_code": "@finalize_array_function_like\n@set_module('numpy')\ndef tri(N, M=None, k=0, dtype=float, *, like=None):\n    if like is not None:\n        return _tri_with_like(like, N, M=M, k=k, dtype=dtype)\n    if M is None:\n        M = N\n    m = greater_equal.outer(arange(N, dtype=_min_int(0, N)), arange(-k, M - k, dtype=_min_int(-k, M - k)))\n    m = m.astype(dtype, copy=False)\n    return m",
    "docstring": "An array with ones at and below the given diagonal and zeros elsewhere. Parameters ---------- N : int Number of rows in the array. M : int, optional Number of columns in the array. By default, is taken equal to . k : int, optional The sub-diagonal at and below which the array is filled. = 0 is the main diagonal, while 0 is above. The default is 0. dtype : dtype, optional Data type of the returned array. The default is float. ${ARRAY_FUNCTION_LIKE} .. versionadded:: 1.20.0 Returns ------- tri : ndarray of shape (N, M) Array with its lower triangle filled with ones and zero elsewhere; in other words ``j >> import numpy as np >>> np.tri(3, 5, 2, dtype=int) array([[1, 1, 1, 0, 0], [1, 1, 1, 1, 0], [1, 1, 1, 1, 1]]) >>> np.tri(3, 5, -1) array([[0., 0., 0., 0., 0.], [1., 0., 0., 0., 0.], [1., 1., 0., 0., 0.]])",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_twodim_base_impl.py",
    "ast_data": "FunctionDef name:tri arg:N arg:M arg:k arg:dtype arguments arg arg arg arg arg If Compare Return return:yes Call If Compare Assign Assign Call Call Call Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_is_custom_module_mha",
    "source_code": "def _is_custom_module_mha(node: Node, named_modules: dict[str, torch.nn.Module], qconfig: QConfigAny=None, qhandler: Optional[Any]=None) -> bool:\n    mod = _get_module(node, named_modules)\n    if qconfig is not None and qhandler is not None:\n        assert isinstance(qhandler, torch.ao.quantization.fx.quantize_handler.QuantizeHandler)\n        return isinstance(mod, torch.nn.MultiheadAttention) and activation_is_statically_quantized(qconfig) and qhandler.is_custom_module()\n    else:\n        return isinstance(mod, torch.ao.nn.quantizable.MultiheadAttention)",
    "docstring": "Return whether this refers to the custom module MultiheadAttention flow.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\utils.py",
    "ast_data": "FunctionDef name:_is_custom_module_mha arg:node arg:named_modules arg:qconfig arg:qhandler arguments arg arg arg arg Assign Call If BoolOp Compare Compare Call Return return:yes BoolOp Call Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "left",
    "source_code": "@property\ndef left(self) -> Index:\n    from pandas import Index\n    return Index(self._left, copy=False)",
    "docstring": "Return the left endpoints of each Interval in the IntervalArray as an Index. This property provides access to the left endpoints of the intervals contained within the IntervalArray. This can be useful for analyses where the starting point of each interval is of interest, such as in histogram creation, data aggregation, or any scenario requiring the identification of the beginning of defined ranges. This property returns a `` object containing the midpoint for each interval. See Also -------- arrays.IntervalArray.right : Return the right endpoints of each Interval in the IntervalArray as an Index. arrays.IntervalArray.mid : Return the midpoint of each Interval in the IntervalArray as an Index. arrays.IntervalArray.contains : Check elementwise if the Intervals contain the value. Examples -------- >>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(2, 5)]) >>> interv_arr [(0, 1], (2, 5]] Length: 2, dtype: interval[int64, right] >>> interv_arr.left Index([0, 2], dtype='int64')",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\interval.py",
    "ast_data": "FunctionDef name:left arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "pt2_compile",
    "source_code": "@staticmethod\ndef pt2_compile(event_name: str, **metadata: object):\n    CompileEventLogger.add_data(event_name, CompileEventLogLevel.PT2_COMPILE, overwrite=False, **metadata)",
    "docstring": "Add to in chromium and PT2 Compile Events. Each key/value of metadata will appear in the chromium trace. Each kwarg name becomes a column in PT2 Compile Events, with the corresponding kwarg value. should be the name of a timed event span passed to , with log_to_pt2_compile_events=True.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\utils.py",
    "ast_data": "FunctionDef name:pt2_compile arg:event_name arguments arg arg Call"
  },
  {
    "library": "kornia",
    "name": "to_numpy",
    "source_code": "def to_numpy(self, x: Any) -> 'np.array':\n    if isinstance(x, (Tensor,)):\n        return x.cpu().detach().numpy()\n    if isinstance(x, (np.ndarray,)):\n        return x\n    if isinstance(x, (Image.Image,)):\n        return np.array(x)\n    raise TypeError('Input type not supported')",
    "docstring": "Convert input to numpy array. Args: x: The input to convert. Returns: np.array: The converted numpy array.",
    "type": "method",
    "file_path": "kornia\\kornia\\core\\mixin\\image_module.py",
    "ast_data": "FunctionDef name:to_numpy arg:self arg:x arguments arg arg If Call Return return:yes Call Call Call If Call Return return:yes If Call Return return:yes Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "back_up",
    "source_code": "def back_up(self, epoch):\n    backend.set_value(self._ckpt_saved_epoch, epoch)\n    if self.write_checkpoint_manager.save():\n        distributed_file_utils.remove_temp_dirpath(self.write_checkpoint_manager.directory, self._model.distribute_strategy)",
    "docstring": "Back up the current state of training into a checkpoint file. Args: epoch: The current epoch information to be saved.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\worker_training_state.py",
    "ast_data": "FunctionDef name:back_up arg:self arg:epoch arguments arg arg Call If Call Call"
  },
  {
    "library": "tensorflow",
    "name": "jobs",
    "source_code": "@property\ndef jobs(self):\n    return list(self._cluster_spec.keys())",
    "docstring": "Returns a list of job names in this cluster. Returns: A list of strings, corresponding to the names of jobs in this cluster.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\server_lib.py",
    "ast_data": "FunctionDef name:jobs arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "handle_raw_input",
    "source_code": "def handle_raw_input(self, input_data, META, content_length, boundary, encoding=None):\n    pass",
    "docstring": "Handle the raw input from the client. Parameters: :input_data: An object that supports reading via .read(). :META: ``. :content_length: The (integer) value of the Content-Length header from the client. :boundary: The boundary from the Content-Type header. Be sure to prepend two '--'.",
    "type": "method",
    "file_path": "django\\django\\core\\files\\uploadhandler.py",
    "ast_data": "FunctionDef name:handle_raw_input arg:self arg:input_data arg:META arg:content_length arg:boundary arg:encoding arguments arg arg arg arg arg arg"
  },
  {
    "library": "tensorflow",
    "name": "_resource_apply_sparse",
    "source_code": "def _resource_apply_sparse(self, grad, handle, indices):\n    raise NotImplementedError()",
    "docstring": "Add ops to apply sparse gradients to the variable . Similar to , the argument to this method has been de-duplicated. Optimizers which deal correctly with non-unique indices may instead override to avoid this overhead. Args: grad: a representing the gradient for the affected indices. handle: a of dtype which points to the variable to be updated. indices: a of integral type representing the indices for which the gradient is nonzero. Indices are unique. Returns: An which updates the value of the variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\optimizer.py",
    "ast_data": "FunctionDef name:_resource_apply_sparse arg:self arg:grad arg:handle arg:indices arguments arg arg arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_add_collections",
    "source_code": "def _add_collections(self, assets_collection, main_op, train_op):\n    self._save_and_write_assets(assets_collection)\n    self._maybe_add_main_op(main_op)\n    self._add_train_op(train_op)",
    "docstring": "Add asset and op collections to be saved.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\builder_impl.py",
    "ast_data": "FunctionDef name:_add_collections arg:self arg:assets_collection arg:main_op arg:train_op arguments arg arg arg arg Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "start_pan",
    "source_code": "def start_pan(self, x, y, button):\n    self._pan_start = types.SimpleNamespace(lim=self.viewLim.frozen(), trans=self.transData.frozen(), trans_inverse=self.transData.inverted().frozen(), bbox=self.bbox.frozen(), x=x, y=y)",
    "docstring": "Called when a pan operation has started. Parameters ---------- x, y : float The mouse coordinates in display coords. button : The pressed mouse button. Notes ----- This is intended to be overridden by new projection types.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:start_pan arg:self arg:x arg:y arg:button arguments arg arg arg arg Assign Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_nested_row_partitions",
    "source_code": "@property\ndef _nested_row_partitions(self):\n    partitions = [self._row_partition]\n    rt_values = self.values\n    while isinstance(rt_values, RaggedTensor):\n        partitions.append(rt_values._row_partition)\n        rt_values = rt_values.values\n    return tuple(partitions)",
    "docstring": "Returns the row partitions for this .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:_nested_row_partitions arg:self arguments arg Assign Assign While Call Call Assign Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n    X = validate_data(self, X, accept_sparse=('csr', 'csc'), dtype=np.float64, ensure_all_finite='allow-nan')\n    if hasattr(X, 'toarray'):\n        _, self.variances_ = mean_variance_axis(X, axis=0)\n        if self.threshold == 0:\n            mins, maxes = min_max_axis(X, axis=0)\n            peak_to_peaks = maxes - mins\n    else:\n        self.variances_ = np.nanvar(X, axis=0)\n        if self.threshold == 0:\n            peak_to_peaks = np.ptp(X, axis=0)\n    if self.threshold == 0:\n        compare_arr = np.array([self.variances_, peak_to_peaks])\n        self.variances_ = np.nanmin(compare_arr, axis=0)\n    if np.all(~np.isfinite(self.variances_) | (self.variances_ <= self.threshold)):\n        msg = 'No feature in X meets the variance threshold {0:.5f}'\n        if X.shape[0] == 1:\n            msg += ' (X contains only one sample)'\n        raise ValueError(msg.format(self.threshold))\n    return self",
    "docstring": "Learn empirical variances from X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Data from which to compute variances, where is the number of samples and is the number of features. y : any, default=None Ignored. This parameter exists only for compatibility with sklearn.pipeline.Pipeline. Returns ------- self : object Returns the instance itself.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_selection\\_variance_threshold.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Assign Call If Call Assign Call If Compare Assign Call Assign Assign Call If Compare Assign Call If Compare Assign Call Assign Call If Call Call Compare Assign If Compare Raise Call Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "poly2lag",
    "source_code": "def poly2lag(pol):\n    [pol] = pu.as_series([pol])\n    res = 0\n    for p in pol[::-1]:\n        res = lagadd(lagmulx(res), p)\n    return res",
    "docstring": "poly2lag(pol) Convert a polynomial to a Laguerre series. Convert an array representing the coefficients of a polynomial (relative to the \"standard\" basis) ordered from lowest degree to highest, to an array of the coefficients of the equivalent Laguerre series, ordered from lowest to highest degree. Parameters ---------- pol : array_like 1-D array containing the polynomial coefficients Returns ------- c : ndarray 1-D array containing the coefficients of the equivalent Laguerre series. See Also -------- lag2poly Notes ----- The easy way to do conversions between polynomial basis sets is to use the convert method of a class instance. Examples -------- >>> import numpy as np >>> from numpy.polynomial.laguerre import poly2lag >>> poly2lag(np.arange(4)) array([ 23., -63., 58., -18.])",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\laguerre.py",
    "ast_data": "FunctionDef name:poly2lag arg:pol arguments arg Assign Call Assign For Assign Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_make_random_matrix",
    "source_code": "def _make_random_matrix(self, n_components, n_features):\n    random_state = check_random_state(self.random_state)\n    return _gaussian_random_matrix(n_components, n_features, random_state=random_state)",
    "docstring": "Generate the random projection matrix. Parameters ---------- n_components : int, Dimensionality of the target projection space. n_features : int, Dimensionality of the original source space. Returns ------- components : ndarray of shape (n_components, n_features) The generated random matrix.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\random_projection.py",
    "ast_data": "FunctionDef name:_make_random_matrix arg:self arg:n_components arg:n_features arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_eval_indexed_slices",
    "source_code": "def _eval_indexed_slices(a):\n    if isinstance(a, indexed_slices.IndexedSlices) and context.executing_eagerly():\n        return indexed_slices.IndexedSlicesValue(indices=[x.numpy() for x in a.indices], values=[x.numpy() for x in a.values], dense_shape=a.dense_shape)\n    return a",
    "docstring": "Converts IndexedSlices to IndexedSlicesValue with numpy indices/values. When eager execution is enabled, converts IndexedSlices to IndexedSlicesValue with numpy indices/values. Args: a: any value. Returns: If a is IndexedSlices and eager execution is enabled, calls numpy() on a's fields. Otherwise returns a unchanged.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\gradient_checker_v2.py",
    "ast_data": "FunctionDef name:_eval_indexed_slices arg:a arguments arg If BoolOp Call Call Return return:yes Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "get_mem_info",
    "source_code": "def get_mem_info():\n    import psutil\n    vm = psutil.virtual_memory()\n    return {'memtotal': vm.total, 'memavailable': vm.available}",
    "docstring": "Get information about available memory",
    "type": "function",
    "file_path": "scipy\\benchmarks\\benchmarks\\common.py",
    "ast_data": "FunctionDef name:get_mem_info arguments Assign Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "defer_fail",
    "source_code": "def defer_fail(_failure: Failure) -> Deferred[Any]:\n    from twisted.internet import reactor\n    d: Deferred[Any] = Deferred()\n    reactor.callLater(_DEFER_DELAY, d.errback, _failure)\n    return d",
    "docstring": "Same as twisted.internet.defer.fail but delay calling errback until next reactor loop It delays by 100ms so reactor has a chance to go through readers and writers before attending pending delayed calls, so do not set delay to zero.",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\defer.py",
    "ast_data": "FunctionDef name:defer_fail arg:_failure arguments arg Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_element_from_tensor_info",
    "source_code": "def _get_element_from_tensor_info(tensor_info, graph):\n    encoding = tensor_info.WhichOneof('encoding')\n    if encoding == 'name':\n        return graph.as_graph_element(tensor_info.name)\n    elif encoding == 'coo_sparse':\n        return sparse_tensor.SparseTensor(graph.get_tensor_by_name(tensor_info.coo_sparse.indices_tensor_name), graph.get_tensor_by_name(tensor_info.coo_sparse.values_tensor_name), graph.get_tensor_by_name(tensor_info.coo_sparse.dense_shape_tensor_name))\n    elif encoding == 'composite_tensor':\n        spec_proto = struct_pb2.StructuredValue(type_spec_value=tensor_info.composite_tensor.type_spec)\n        spec = nested_structure_coder.decode_proto(spec_proto)\n        components = [graph.get_tensor_by_name(component.name) for component in tensor_info.composite_tensor.components]\n        return spec._from_components(components)\n    else:\n        raise ValueError(f\"Invalid TensorInfo.encoding: {encoding}. Valid encodings are 'name', 'coo_sparse', and 'composite_tensor'.\")",
    "docstring": "Simplified copy of the deprecated .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\wrap_function.py",
    "ast_data": "FunctionDef name:_get_element_from_tensor_info arg:tensor_info arg:graph arguments arg arg Assign Call If Compare Return return:yes Call If Compare Return return:yes Call Call Call Call If Compare Assign Call Assign Call Assign Call Return return:yes Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_raise_kernel_warnings",
    "source_code": "def _raise_kernel_warnings(params: SDPAParams) -> None:\n    if WARN_FOR_UNFUSED_KERNELS:\n        if not can_use_efficient_attention(params):\n            warn(\"Efficient attention can't be used because:\")\n            can_use_efficient_attention(params, True)\n        if not can_use_flash_attention(params):\n            warn(\"Flash attention can't be used because:\")\n            can_use_flash_attention(params, True)",
    "docstring": "If WARN_FOR_UNFUSED_KERNELS is set to True, this will raise warnings for all the reasons why the fused kernels can't be run. If using subclasses",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\attention\\__init__.py",
    "ast_data": "FunctionDef name:_raise_kernel_warnings arg:params arguments arg If If Call Call Call If Call Call Call"
  },
  {
    "library": "authlib",
    "name": "refresh_token",
    "source_code": "def refresh_token(self, url=None, refresh_token=None, body='', auth=None, headers=None, **kwargs):\n    session_kwargs = self._extract_session_request_params(kwargs)\n    refresh_token = refresh_token or self.token.get('refresh_token')\n    if 'scope' not in kwargs and self.scope:\n        kwargs['scope'] = self.scope\n    body = prepare_token_request('refresh_token', body, refresh_token=refresh_token, **kwargs)\n    if headers is None:\n        headers = DEFAULT_HEADERS.copy()\n    if url is None:\n        url = self.metadata.get('token_endpoint')\n    for hook in self.compliance_hook['refresh_token_request']:\n        url, headers, body = hook(url, headers, body)\n    if auth is None:\n        auth = self.client_auth(self.token_endpoint_auth_method)\n    return self._refresh_token(url, refresh_token=refresh_token, body=body, headers=headers, auth=auth, **session_kwargs)",
    "docstring": "Fetch a new access token using a refresh token. :param url: Refresh Token endpoint, must be HTTPS. :param refresh_token: The refresh_token to use. :param body: Optional application/x-www-form-urlencoded body to add the include in the token request. Prefer kwargs over body. :param auth: An auth tuple or method as accepted by requests. :param headers: Dict to default request headers with. :return: A :class: object (a dict too).",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\client.py",
    "ast_data": "FunctionDef name:refresh_token arg:self arg:url arg:refresh_token arg:body arg:auth arg:headers arguments arg arg arg arg arg arg arg Assign Call Assign BoolOp Call If BoolOp Compare Assign Assign Call If Compare Assign Call If Compare Assign Call For Assign Call If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "unshard",
    "source_code": "def unshard(self, async_op: bool=False) -> Optional[UnshardHandle]:\n    state = self._get_fsdp_state()\n    fsdp_param_group = state._fsdp_param_group\n    if fsdp_param_group is not None:\n        fsdp_param_group.lazy_init()\n        fsdp_param_group.unshard(async_op=async_op)\n    handle = _UnshardHandleImpl(fsdp_param_group)\n    if async_op:\n        return handle\n    handle.wait()\n    return None",
    "docstring": "Unshards the module's parameters by allocating memory and all-gathering the parameters. This method is *not* recursive. The unshard follows the :class:, so it will all-gather following `UnshardHandlewaitwait` explicitly if the wait should happen before pre-forward.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fully_shard.py",
    "ast_data": "FunctionDef name:unshard arg:self arg:async_op arguments arg arg Assign Call Assign If Compare Call Call Assign Call If Return return:yes Call Return return:no"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self):\n    self.bus = cherrypy.engine\n    self.httpserver = None\n    self.interrupt = None\n    self.running = False",
    "docstring": "Initialize Server instance.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpserver.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "call_concrete_function_callbacks",
    "source_code": "def call_concrete_function_callbacks(concrete_fn):\n    for callback in CONCRETE_FUNCTION_CALLBACKS:\n        callback(concrete_fn)",
    "docstring": "Calls registered callbacks against new ConcreteFunctions.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\transform.py",
    "ast_data": "FunctionDef name:call_concrete_function_callbacks arg:concrete_fn arguments arg For Call"
  },
  {
    "library": "scipy",
    "name": "_check_bounds",
    "source_code": "def _check_bounds(self, x_new):\n    below_bounds = x_new < self.x[0]\n    above_bounds = x_new > self.x[-1]\n    if self.bounds_error and below_bounds.any():\n        below_bounds_value = x_new[np.argmax(below_bounds)]\n        raise ValueError(f\"A value ({below_bounds_value}) in x_new is below the interpolation range's minimum value ({self.x[0]}).\")\n    if self.bounds_error and above_bounds.any():\n        above_bounds_value = x_new[np.argmax(above_bounds)]\n        raise ValueError(f\"A value ({above_bounds_value}) in x_new is above the interpolation range's maximum value ({self.x[-1]}).\")\n    return (below_bounds, above_bounds)",
    "docstring": "Check the inputs for being in the bounds of the interpolated data. Parameters ---------- x_new : array Returns ------- out_of_bounds : bool array The mask on x_new of values that are out of the bounds.",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_interpolate.py",
    "ast_data": "FunctionDef name:_check_bounds arg:self arg:x_new arguments arg arg Assign Compare Assign Compare If BoolOp Call Assign Call Raise Call If BoolOp Call Assign Call Raise Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_complete_version",
    "source_code": "def get_complete_version(version=None):\n    if version is None:\n        from django import VERSION as version\n    else:\n        assert len(version) == 5\n        assert version[3] in ('alpha', 'beta', 'rc', 'final')\n    return version",
    "docstring": "Return a tuple of the django version. If version argument is non-empty, check for correctness of the tuple provided.",
    "type": "function",
    "file_path": "django\\django\\utils\\version.py",
    "ast_data": "FunctionDef name:get_complete_version arg:version arguments arg If Compare Compare Call Compare Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "get_metadata_routing",
    "source_code": "def get_metadata_routing(self):\n    router = MetadataRouter(owner=self.__class__.__name__).add_self_request(self).add(splitter=check_cv(self.cv), method_mapping=MethodMapping().add(caller='fit', callee='split'))\n    return router",
    "docstring": "Get metadata routing of this object. Please check :ref: on how the routing mechanism works. .. versionadded:: 1.4 Returns ------- routing : MetadataRouter A :class: encapsulating routing information.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_coordinate_descent.py",
    "ast_data": "FunctionDef name:get_metadata_routing arg:self arguments arg Assign Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "sync_only_middleware",
    "source_code": "def sync_only_middleware(func):\n    func.sync_capable = True\n    func.async_capable = False\n    return func",
    "docstring": "Mark a middleware factory as returning a sync middleware. This is the default.",
    "type": "function",
    "file_path": "django\\django\\utils\\decorators.py",
    "ast_data": "FunctionDef name:sync_only_middleware arg:func arguments arg Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "list_children",
    "source_code": "def list_children(self, obj):\n    if obj not in self._children_cache:\n        children = self._children_cache[obj] = {}\n        for name, child in super(_AugmentedGraphView, self).list_children(obj, save_type=base.SaveType.SAVEDMODEL, cache=self._serialization_cache):\n            if isinstance(child, defun.ConcreteFunction):\n                child = self._maybe_uncache_variable_captures(child)\n            children[name] = child\n        if isinstance(obj, def_function.Function) and (not children):\n            self.untraced_functions.append(obj.name)\n    for name, child in self._children_cache[obj].items():\n        yield base.TrackableReference(name, child)",
    "docstring": "Lists children of for SavedModel.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\save.py",
    "ast_data": "FunctionDef name:list_children arg:self arg:obj arguments arg arg If Compare Assign For Call Call If Call Assign Call Assign If BoolOp Call Call For Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_Callable",
    "source_code": "class _Callable(object):\n\n    def __init__(self, session, callable_options):\n        self._session = session\n        self._handle = None\n        options_ptr = tf_session.TF_NewBufferFromString(compat.as_bytes(callable_options.SerializeToString()))\n        try:\n            self._handle = tf_session.TF_SessionMakeCallable(session._session, options_ptr)\n        finally:\n            tf_session.TF_DeleteBuffer(options_ptr)\n\n    def __call__(self, *args, **kwargs):\n        run_metadata = kwargs.get('run_metadata', None)\n        try:\n            run_metadata_ptr = tf_session.TF_NewBuffer() if run_metadata else None\n            ret = tf_session.TF_SessionRunCallable(self._session._session, self._handle, args, run_metadata_ptr)\n            if run_metadata:\n                proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)\n                run_metadata.ParseFromString(compat.as_bytes(proto_data))\n        finally:\n            if run_metadata_ptr:\n                tf_session.TF_DeleteBuffer(run_metadata_ptr)\n        return ret\n\n    def __del__(self):\n        if self._handle is not None and self._session._session is not None and (not self._session._closed):\n            tf_session.TF_SessionReleaseCallable(self._session._session, self._handle)",
    "docstring": "Experimental wrapper for the C++ API.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\client\\session.py",
    "ast_data": "ClassDef name:_Callable FunctionDef name:__init__ arg:self arg:session arg:callable_options arguments arg arg arg Assign Assign Assign Call Call Call Try Assign Call Call FunctionDef name:__call__ arg:self arguments arg arg arg Assign Call Try Assign Call Assign Call If Assign Call Call Call If Call Return return:yes FunctionDef name:__del__ arg:self arguments arg If BoolOp Compare Compare Call"
  },
  {
    "library": "django",
    "name": "deserialize_db_from_string",
    "source_code": "def deserialize_db_from_string(self, data):\n    data = StringIO(data)\n    table_names = set()\n    with atomic(using=self.connection.alias):\n        with self.connection.constraint_checks_disabled():\n            for obj in serializers.deserialize('json', data, using=self.connection.alias):\n                obj.save()\n                table_names.add(obj.object.__class__._meta.db_table)\n        self.connection.check_constraints(table_names=table_names)",
    "docstring": "Reload the database with data from a string generated by the serialize_db_to_string() method.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\creation.py",
    "ast_data": "FunctionDef name:deserialize_db_from_string arg:self arg:data arguments arg arg Assign Call Assign Call With Call With Call For Call Call Call Call"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "def forward(self, img: Tensor, mask: Optional[Tensor]=None) -> Tuple[Tensor, Tensor, Tensor]:\n    lafs, responses = self.detector(img, mask)\n    lafs = scale_laf(lafs, self.scaling_coef)\n    descs = self.descriptor(img, lafs)\n    return (lafs, responses, descs)",
    "docstring": "Run forward. Args: img: image to extract features with shape :math:. mask: a mask with weights where to apply the response function. The shape must be the same as the input image. Returns: - Detected local affine frames with shape :math:. - Response function values for corresponding lafs with shape :math:. - Local descriptors of shape :math: where :math: is descriptor size.",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\integrated.py",
    "ast_data": "FunctionDef name:forward arg:self arg:img arg:mask arguments arg arg arg Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "sign",
    "source_code": "@abc.abstractmethod\ndef sign(self, data: Buffer) -> bytes:\n    pass",
    "docstring": "Signs the data.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ed448.py",
    "ast_data": "FunctionDef name:sign arg:self arg:data arguments arg arg"
  },
  {
    "library": "scipy",
    "name": "_Remove_From_Func",
    "source_code": "class _Remove_From_Func:\n\n    def __init__(self, fun_in, i_fixed, x_fixed, min_dim=None, remove=0):\n        self.fun_in = fun_in\n        self.i_fixed = i_fixed\n        self.x_fixed = x_fixed\n        self.min_dim = min_dim\n        self.remove = remove\n\n    def __call__(self, x_in, *args, **kwargs):\n        x_out = np.zeros_like(self.i_fixed, dtype=x_in.dtype)\n        x_out[self.i_fixed] = self.x_fixed\n        x_out[~self.i_fixed] = x_in\n        y_out = self.fun_in(x_out, *args, **kwargs)\n        y_out = np.array(y_out)\n        if self.min_dim == 1:\n            y_out = np.atleast_1d(y_out)\n        elif self.min_dim == 2:\n            y_out = np.atleast_2d(y_out)\n        if self.remove == 1:\n            y_out = y_out[..., ~self.i_fixed]\n        elif self.remove == 2:\n            y_out = y_out[~self.i_fixed, ~self.i_fixed]\n        return y_out",
    "docstring": "Wraps a function such that fixed variables need not be passed in",
    "type": "class",
    "file_path": "scipy\\scipy\\optimize\\_minimize.py",
    "ast_data": "ClassDef name:_Remove_From_Func FunctionDef name:__init__ arg:self arg:fun_in arg:i_fixed arg:x_fixed arg:min_dim arg:remove arguments arg arg arg arg arg arg Assign Assign Assign Assign Assign FunctionDef name:__call__ arg:self arg:x_in arguments arg arg arg arg Assign Call Assign Assign Assign Call Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign If Compare Assign Return return:yes"
  },
  {
    "library": "authlib",
    "name": "patch",
    "source_code": "def patch(self, url, **kwargs):\n    return self.request('PATCH', url, **kwargs)",
    "docstring": "Invoke PATCH http request. If `` configured, shortcut is available:: client.patch(\"profile\", json={\"name\": \"Hsiaoming Yang\"})",
    "type": "method",
    "file_path": "authlib\\authlib\\integrations\\base_client\\sync_app.py",
    "ast_data": "FunctionDef name:patch arg:self arg:url arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "_modified_bessel_0",
    "source_code": "def _modified_bessel_0(x: Tensor) -> Tensor:\n    ax = torch.abs(x)\n    out = zeros_like(x)\n    idx_a = ax < 3.75\n    if idx_a.any():\n        y = x[idx_a] / 3.75 * (x[idx_a] / 3.75)\n        out[idx_a] = 1.0 + y * (3.5156229 + y * (3.0899424 + y * (1.2067492 + y * (0.2659732 + y * (0.0360768 + y * 0.0045813)))))\n    idx_b = ~idx_a\n    if idx_b.any():\n        y = 3.75 / ax[idx_b]\n        ans = 0.00916281 + y * (-0.02057706 + y * (0.02635537 + y * (-0.01647633 + y * 0.00392377)))\n        coef = 0.39894228 + y * (0.01328592 + y * (0.00225319 + y * (-0.00157565 + y * ans)))\n        out[idx_b] = ax[idx_b].exp() / ax[idx_b].sqrt() * coef\n    return out",
    "docstring": "Adapted from:",
    "type": "function",
    "file_path": "kornia\\kornia\\filters\\kernels.py",
    "ast_data": "FunctionDef name:_modified_bessel_0 arg:x arguments arg Assign Call Assign Call Assign Compare If Call Assign Assign Assign If Call Assign Assign Assign Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "trainable_weights",
    "source_code": "@property\ndef trainable_weights(self):\n    return self.trainable_variables",
    "docstring": "List of trainable weights/variables created by the Template.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\template.py",
    "ast_data": "FunctionDef name:trainable_weights arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_as_tensor_fullprec",
    "source_code": "def _as_tensor_fullprec(t):\n    ty = type(t)\n    if ty is builtins.float:\n        return torch.as_tensor(t, dtype=torch.float64)\n    elif ty is builtins.int:\n        return torch.as_tensor(t, dtype=torch.int64)\n    else:\n        return torch.as_tensor(t)",
    "docstring": "Like torch.as_tensor, but when given Python data types it will keep them in full precision. Used for calling convention for Dynamo.",
    "type": "function",
    "file_path": "pytorch\\torch\\__init__.py",
    "ast_data": "FunctionDef name:_as_tensor_fullprec arg:t arguments arg Assign Call If Compare Return return:yes Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "join",
    "source_code": "def join(self):\n    self.closure_queue.wait()",
    "docstring": "Blocks until all scheduled functions are executed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py",
    "ast_data": "FunctionDef name:join arg:self arguments arg Call"
  },
  {
    "library": "scrapy",
    "name": "setdefault_in_component_priority_dict",
    "source_code": "def setdefault_in_component_priority_dict(self, name: _SettingsKeyT, cls: type, priority: int | None) -> None:\n    component_priority_dict = self.getdict(name)\n    for cls_or_path in tuple(component_priority_dict):\n        if load_object(cls_or_path) == cls:\n            return\n    component_priority_dict[cls] = priority\n    self.set(name, component_priority_dict, self.getpriority(name) or 0)",
    "docstring": "Set the *cls* component in the *name* :ref: setting with *priority* if not already defined (even as an import string). If *cls* is not already defined, it is set regardless of the priority of the *name* setting. The setting priority is not affected by this change either.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\settings\\__init__.py",
    "ast_data": "FunctionDef name:setdefault_in_component_priority_dict arg:self arg:name arg:cls arg:priority arguments arg arg arg arg Assign Call For Call If Compare Call Return return:no Assign Call BoolOp Call"
  },
  {
    "library": "pytorch",
    "name": "normalize_to_torch_size",
    "source_code": "def normalize_to_torch_size(size) -> torch.Size:\n    if isinstance(size, torch.Size):\n        return size\n    if isinstance(size, int):\n        torch_size = [size]\n    elif len(size) == 1 and isinstance(size[0], Sequence):\n        torch_size = list(size[0])\n    else:\n        torch_size = list(size)\n    return torch.Size(torch_size)",
    "docstring": "Unify variable types of size argument to torch.Size Acceptable types include: int, Sequence[int], Tuple[int], Tuple[Sequence[int]], or torch.Size",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_utils.py",
    "ast_data": "FunctionDef name:normalize_to_torch_size arg:size arguments arg If Call Return return:yes If Call Assign If BoolOp Compare Call Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "masked_all",
    "source_code": "def masked_all(shape, dtype=float):\n    a = masked_array(np.empty(shape, dtype), mask=np.ones(shape, make_mask_descr(dtype)))\n    return a",
    "docstring": "Empty masked array with all elements masked. Return an empty masked array of the given shape and dtype, where all the data are masked. Parameters ---------- shape : int or tuple of ints Shape of the required MaskedArray, e.g., `numpy.ma.zerosnumpy.ma.onesnumpy.ma.fullmasked_alldtype` parameter defines the underlying data type. >>> a = np.ma.masked_all((3, 3)) >>> a.dtype dtype('float64') >>> a = np.ma.masked_all((3, 3), dtype=np.int32) >>> a.dtype dtype('int32')",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\extras.py",
    "ast_data": "FunctionDef name:masked_all arg:shape arg:dtype arguments arg arg Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "row_partitions",
    "source_code": "@property\ndef row_partitions(self):\n    return self._row_partitions",
    "docstring": "The row_partitions of the shape.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:row_partitions arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "FuseCustomConfig",
    "source_code": "class FuseCustomConfig:\n\n    def __init__(self) -> None:\n        self.preserved_attributes: list[str] = []\n\n    def __repr__(self):\n        dict_nonempty = {k: v for k, v in self.__dict__.items() if len(v) > 0}\n        return f'FuseCustomConfig({dict_nonempty})'\n\n    def set_preserved_attributes(self, attributes: list[str]) -> FuseCustomConfig:\n        self.preserved_attributes = attributes\n        return self\n\n    @classmethod\n    def from_dict(cls, fuse_custom_config_dict: dict[str, Any]) -> FuseCustomConfig:\n        conf = cls()\n        conf.set_preserved_attributes(fuse_custom_config_dict.get(PRESERVED_ATTRIBUTES_DICT_KEY, []))\n        return conf\n\n    def to_dict(self) -> dict[str, Any]:\n        d: dict[str, Any] = {}\n        if len(self.preserved_attributes) > 0:\n            d[PRESERVED_ATTRIBUTES_DICT_KEY] = self.preserved_attributes\n        return d",
    "docstring": "Custom configuration for :func:. Example usage:: fuse_custom_config = FuseCustomConfig().set_preserved_attributes([\"attr1\", \"attr2\"])",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\custom_config.py",
    "ast_data": "ClassDef name:FuseCustomConfig FunctionDef name:__init__ arg:self arguments arg FunctionDef name:__repr__ arg:self arguments arg Assign Call Compare Call Return return:yes FunctionDef name:set_preserved_attributes arg:self arg:attributes arguments arg arg Assign Return return:yes FunctionDef name:from_dict arg:cls arg:fuse_custom_config_dict arguments arg arg Assign Call Call Call Return return:yes FunctionDef name:to_dict arg:self arguments arg If Compare Call Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "lower",
    "source_code": "@set_module('numpy.strings')\n@array_function_dispatch(_unary_op_dispatcher)\ndef lower(a):\n    a_arr = np.asarray(a)\n    return _vec_string(a_arr, a_arr.dtype, 'lower')",
    "docstring": "Return an array with the elements converted to lowercase. Call :meth: element-wise. For 8-bit strings, this method is locale-dependent. Parameters ---------- a : array-like, with `` dtype, depending on input types See Also -------- str.lower Examples -------- >>> import numpy as np >>> c = np.array(['A1B C', '1BCA', 'BCA1']); c array(['A1B C', '1BCA', 'BCA1'], dtype='>> np.strings.lower(c) array(['a1b c', '1bca', 'bca1'], dtype='<U5')",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\strings.py",
    "ast_data": "FunctionDef name:lower arg:a arguments arg Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "options",
    "source_code": "def options(self, name, value, attrs=None):\n    for group in self.optgroups(name, value, attrs):\n        yield from group[1]",
    "docstring": "Yield a flat list of options for this widget.",
    "type": "method",
    "file_path": "django\\django\\forms\\widgets.py",
    "ast_data": "FunctionDef name:options arg:self arg:name arg:value arg:attrs arguments arg arg arg arg For Call"
  },
  {
    "library": "cherrypy",
    "name": "_Vars",
    "source_code": "class _Vars(object):\n\n    def __init__(self, target):\n        self.target = target\n\n    def setdefault(self, key, default):\n        if not hasattr(self.target, key):\n            setattr(self.target, key, default)\n        return getattr(self.target, key)",
    "docstring": "Adapter allowing setting a default attribute on a function or class.",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\_cpconfig.py",
    "ast_data": "ClassDef name:_Vars FunctionDef name:__init__ arg:self arg:target arguments arg arg Assign FunctionDef name:setdefault arg:self arg:key arg:default arguments arg arg arg If Call Call Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "userinfo",
    "source_code": "async def userinfo(self, **kwargs):\n    metadata = await self.load_server_metadata()\n    resp = await self.get(metadata['userinfo_endpoint'], **kwargs)\n    resp.raise_for_status()\n    data = resp.json()\n    return UserInfo(data)",
    "docstring": "Fetch user info from ``.",
    "type": "method",
    "file_path": "authlib\\authlib\\integrations\\base_client\\async_openid.py",
    "ast_data": "AsyncFunctionDef name:userinfo arg:self arguments arg arg Assign Call Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "HA2",
    "source_code": "def HA2(self, entity_body=''):\n    if self.qop is None or self.qop == 'auth':\n        a2 = '%s:%s' % (self.http_method, self.uri)\n    elif self.qop == 'auth-int':\n        a2 = '%s:%s:%s' % (self.http_method, self.uri, H(entity_body))\n    else:\n        raise ValueError(self.errmsg('Unrecognized value for qop!'))\n    return H(a2)",
    "docstring": "Return the H(A2) string. See :rfc: section 3.2.2.3.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\auth_digest.py",
    "ast_data": "FunctionDef name:HA2 arg:self arg:entity_body arguments arg arg If BoolOp Compare Compare Assign If Compare Assign Call Raise Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "create",
    "source_code": "@staticmethod\ndef create(value, **kwargs) -> VariableTracker:\n    source = kwargs.get('source', None)\n    if isinstance(value, set):\n        items = [ConstantVariable.create(x) for x in value]\n        return variables.SetVariable(items, **kwargs)\n    elif isinstance(value, frozenset):\n        items = [ConstantVariable.create(x) for x in value]\n        return variables.FrozensetVariable(items, **kwargs)\n    elif isinstance(value, (list, tuple)):\n        items = []\n        for i, x in enumerate(value):\n            item_source = GetItemSource(source, i) if source else None\n            items.append(ConstantVariable.create(x, source=item_source))\n        return variables.BaseListVariable.cls_for(type(value))(items, **kwargs)\n    return ConstantVariable(value, **kwargs)",
    "docstring": "Create a based on the given value, and supports automatic routing for collection types like (in which case we'd create for the leaf items). NOTE: the caller must install the proper guards if needed; most often the guard will be .",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\constant.py",
    "ast_data": "FunctionDef name:create arg:value arguments arg arg Assign Call If Call Assign Call Return return:yes Call If Call Assign Call Return return:yes Call If Call Assign For Call Assign Call Call Call Return return:yes Call Call Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "set_html_assets_policy",
    "source_code": "def set_html_assets_policy(self, policy: Literal['always', 'per_page']) -> None:\n    if policy not in {'always', 'per_page'}:\n        raise ValueError('policy %s is not supported' % policy)\n    self.registry.html_assets_policy = policy",
    "docstring": "Set the policy to include assets in HTML pages. - always: include the assets in all the pages - per_page: include the assets only in pages where they are used .. versionadded: 4.1",
    "type": "method",
    "file_path": "sphinx\\sphinx\\application.py",
    "ast_data": "FunctionDef name:set_html_assets_policy arg:self arg:policy arguments arg arg If Compare Raise Call Assign"
  },
  {
    "library": "pytorch",
    "name": "create_script_module",
    "source_code": "def create_script_module(nn_module, stubs_fn, share_types=True, is_tracing=False):\n    assert not isinstance(nn_module, torch.jit.RecursiveScriptModule)\n    check_module_initialized(nn_module)\n    concrete_type = get_module_concrete_type(nn_module, share_types)\n    if not is_tracing:\n        AttributeTypeIsSupportedChecker().check(nn_module)\n    return create_script_module_impl(nn_module, concrete_type, stubs_fn)",
    "docstring": "Create a new ScriptModule from an nn.Module. Args: nn_module: The original Python nn.Module that we are creating a ScriptModule for. stubs_fn: Lambda that takes an nn.Module and generates a list of ScriptMethodStubs to compile. share_types: Whether to share underlying JIT types between modules (if possible). NOTE: Only set to False this when we cannot guarantee type sharing will work correctly. This only happens today for traced modules, where the same module can produce different traced methods depending on the inputs. is_tracing: Whether this function is called during tracing or scripting. If tracing, we don't need to do AttributeTypeIsSupportedChecker because all the unsupported attributes will be baked as constant in the tracing graph. In addition, this check significantly slows down the traced modules when the module size is big.",
    "type": "function",
    "file_path": "pytorch\\torch\\jit\\_recursive.py",
    "ast_data": "FunctionDef name:create_script_module arg:nn_module arg:stubs_fn arg:share_types arg:is_tracing arguments arg arg arg arg Call Call Assign Call If Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, cell):\n    self._cell = cell",
    "docstring": "Creates a new BoolGaugeCell. Args: cell: A c pointer of TFE_MonitoringBoolGaugeCell.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:cell arguments arg arg Assign"
  },
  {
    "library": "scipy",
    "name": "_build_evaluation_coefficients",
    "source_code": "def _build_evaluation_coefficients(x, y, kernel, epsilon, powers, shift, scale):\n    q = x.shape[0]\n    p = y.shape[0]\n    r = powers.shape[0]\n    kernel_func = NAME_TO_FUNC[kernel]\n    yeps = y * epsilon\n    xeps = x * epsilon\n    xhat = (x - shift) / scale\n    vec = np.empty((q, p + r), dtype=float)\n    for i in range(q):\n        kernel_vector(xeps[i], yeps, kernel_func, vec[i, :p])\n        polynomial_vector(xhat[i], powers, vec[i, p:])\n    return vec",
    "docstring": "Construct the coefficients needed to evaluate the RBF. Parameters ---------- x : (Q, N) float ndarray Evaluation point coordinates. y : (P, N) float ndarray Data point coordinates. kernel : str Name of the RBF. epsilon : float Shape parameter. powers : (R, N) int ndarray The exponents for each monomial in the polynomial. shift : (N,) float ndarray Shifts the polynomial domain for numerical stability. scale : (N,) float ndarray Scales the polynomial domain for numerical stability. Returns ------- (Q, P + R) float ndarray",
    "type": "function",
    "file_path": "scipy\\scipy\\interpolate\\_rbfinterp_pythran.py",
    "ast_data": "FunctionDef name:_build_evaluation_coefficients arg:x arg:y arg:kernel arg:epsilon arg:powers arg:shift arg:scale arguments arg arg arg arg arg arg arg Assign Assign Assign Assign Assign Assign Assign Assign Call For Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "in_dir",
    "source_code": "@contextmanager\ndef in_dir(dir=None):\n    cwd = os.getcwd()\n    if dir is None:\n        yield cwd\n        return\n    os.chdir(dir)\n    yield dir\n    os.chdir(cwd)",
    "docstring": "Change directory to given directory for duration of `in_tempdir` again.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_tmpdirs.py",
    "ast_data": "FunctionDef name:in_dir arg:dir arguments arg Assign Call If Compare Return return:no Call Call"
  },
  {
    "library": "matplotlib",
    "name": "secondary_yaxis",
    "source_code": "@_docstring.interpd\ndef secondary_yaxis(self, location, functions=None, *, transform=None, **kwargs):\n    if not (location in ['left', 'right'] or isinstance(location, Real)):\n        raise ValueError('secondary_yaxis location must be either a float or \"left\"/\"right\"')\n    secondary_ax = SecondaryAxis(self, 'y', location, functions, transform, **kwargs)\n    self.add_child_axes(secondary_ax)\n    return secondary_ax",
    "docstring": "Add a second y-axis to this . For example if we want to have a second scale for the data plotted on the yaxis. %(_secax_docstring)s Examples -------- Add a secondary Axes that converts from radians to degrees .. plot:: fig, ax = plt.subplots() ax.plot(range(1, 360, 5), range(1, 360, 5)) ax.set_ylabel('degrees') secax = ax.secondary_yaxis('right', functions=(np.deg2rad, np.rad2deg)) secax.set_ylabel('radians') To add a secondary axis relative to your data, you can pass a transform to the new axis. .. plot:: fig, ax = plt.subplots() ax.plot(range(0, 5), range(-1, 4)) # Pass 'ax.transData' as a transform to place the axis # relative to your data at x=3 secax = ax.secondary_yaxis(3, transform=ax.transData)",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_axes.py",
    "ast_data": "FunctionDef name:secondary_yaxis arg:self arg:location arg:functions arguments arg arg arg arg arg If BoolOp Compare Call Raise Call Assign Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "infer_filename",
    "source_code": "def infer_filename(self) -> str | None:\n    if self.name is None:\n        return None\n    filename = Path(self.name)\n    if filename.suffix == '.tar':\n        return filename.with_suffix('').name\n    elif filename.suffix in ('.tar.gz', '.tar.bz2', '.tar.xz'):\n        return filename.with_suffix('').with_suffix('').name\n    return filename.name",
    "docstring": "If an explicit archive_name is not given, we still want the file inside the zip file not to be named something.tar, because that causes confusion (GH39465).",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\common.py",
    "ast_data": "FunctionDef name:infer_filename arg:self arguments arg If Compare Return return:no Assign Call If Compare Return return:yes Call If Compare Return return:yes Call Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "dendrogram",
    "source_code": "def dendrogram(data, *, linkage=None, axis=1, label=True, metric='euclidean', method='average', rotate=False, tree_kws=None, ax=None):\n    if _no_scipy:\n        raise RuntimeError('dendrogram requires scipy to be installed')\n    plotter = _DendrogramPlotter(data, linkage=linkage, axis=axis, metric=metric, method=method, label=label, rotate=rotate)\n    if ax is None:\n        ax = plt.gca()\n    return plotter.plot(ax=ax, tree_kws=tree_kws)",
    "docstring": "Draw a tree diagram of relationships within a matrix Parameters ---------- data : pandas.DataFrame Rectangular data linkage : numpy.array, optional Linkage matrix axis : int, optional Which axis to use to calculate linkage. 0 is rows, 1 is columns. label : bool, optional If True, label the dendrogram at leaves with column or row names metric : str, optional Distance metric. Anything valid for scipy.spatial.distance.pdist method : str, optional Linkage method to use. Anything valid for scipy.cluster.hierarchy.linkage rotate : bool, optional When plotting the matrix, whether to rotate it 90 degrees counter-clockwise, so the leaves face right tree_kws : dict, optional Keyword arguments for the `` that is used for plotting the lines of the dendrogram tree. ax : matplotlib axis, optional Axis to plot on, otherwise uses current axis Returns ------- dendrogramplotter : _DendrogramPlotter A Dendrogram plotter object. Notes ----- Access the reordered dendrogram indices with dendrogramplotter.reordered_ind",
    "type": "function",
    "file_path": "seaborn\\seaborn\\matrix.py",
    "ast_data": "FunctionDef name:dendrogram arg:data arguments arg arg arg arg arg arg arg arg arg If Raise Call Assign Call If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "run_monitored_proc",
    "source_code": "def run_monitored_proc(code):\n    if not sys.platform.startswith('linux'):\n        raise RuntimeError('Peak memory monitoring only works on Linux')\n    code = textwrap.dedent(code)\n    process = subprocess.Popen([sys.executable, '-c', code])\n    peak_memusage = -1\n    start = time.time()\n    while True:\n        ret = process.poll()\n        if ret is not None:\n            break\n        with open(f'/proc/{process.pid}/status') as f:\n            procdata = f.read()\n        m = re.search('VmRSS:\\\\s*(\\\\d+)\\\\s*kB', procdata, re.S | re.I)\n        if m is not None:\n            memusage = float(m.group(1)) * 1000.0\n            peak_memusage = max(memusage, peak_memusage)\n        time.sleep(0.01)\n    process.wait()\n    duration = time.time() - start\n    if process.returncode != 0:\n        raise AssertionError(f'Running failed:\\n{code}')\n    return (duration, peak_memusage)",
    "docstring": "Run code in a new Python process, and monitor peak memory usage. Returns ------- duration : float Duration in seconds (including Python startup time) peak_memusage : float Peak memory usage (rough estimate only) in bytes",
    "type": "function",
    "file_path": "scipy\\benchmarks\\benchmarks\\common.py",
    "ast_data": "FunctionDef name:run_monitored_proc arg:code arguments arg If Call Raise Call Assign Call Assign Call Assign Assign Call While Assign Call If Compare With Call Assign Call Assign Call If Compare Assign Call Call Assign Call Call Call Assign Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "TPUSystemMetadata",
    "source_code": "@tf_export('tpu.experimental.TPUSystemMetadata')\nclass TPUSystemMetadata(collections.namedtuple('TPUSystemMetadata', ['num_cores', 'num_hosts', 'num_of_cores_per_host', 'topology', 'devices'])):\n\n    def __new__(cls, num_cores, num_hosts, num_of_cores_per_host, topology, devices):\n        return super(TPUSystemMetadata, cls).__new__(cls, num_cores, num_hosts, num_of_cores_per_host, topology, devices)",
    "docstring": "Describes some metadata about the TPU system. Attributes: num_cores: interger. Total number of TPU cores in the TPU system. num_hosts: interger. Total number of hosts (TPU workers) in the TPU system. num_of_cores_per_host: interger. Number of TPU cores per host (TPU worker). topology: an instance of , which describes the physical topology of TPU system. devices: a tuple of strings, which describes all the TPU devices in the system.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_system_metadata.py",
    "ast_data": "ClassDef name:TPUSystemMetadata Call FunctionDef name:__new__ arg:cls arg:num_cores arg:num_hosts arg:num_of_cores_per_host arg:topology arg:devices arguments arg arg arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "numpy",
    "name": "lagvander3d",
    "source_code": "def lagvander3d(x, y, z, deg):\n    return pu._vander_nd_flat((lagvander, lagvander, lagvander), (x, y, z), deg)",
    "docstring": "Pseudo-Vandermonde matrix of given degrees. Returns the pseudo-Vandermonde matrix of degrees and sample points `lmnxyz`0 >> import numpy as np >>> from numpy.polynomial.laguerre import lagvander3d >>> x = np.array([0]) >>> y = np.array([2]) >>> z = np.array([0]) >>> lagvander3d(x, y, z, [2, 1, 3]) array([[ 1., 1., 1., 1., -1., -1., -1., -1., 1., 1., 1., 1., -1., -1., -1., -1., 1., 1., 1., 1., -1., -1., -1., -1.]])",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\laguerre.py",
    "ast_data": "FunctionDef name:lagvander3d arg:x arg:y arg:z arg:deg arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "write_data",
    "source_code": "@abc.abstractmethod\ndef write_data(self, plan: SavePlan, planner: SavePlanner) -> Future[list[WriteResult]]:\n    pass",
    "docstring": "Write all items from `resolve_data` - They might be views or not contiguous. Only the projection needs to be saved. Args: plan (SavePlan): The save plan to execute. planner (SavePlanner): Planner object to be used to resolve items to data. Returns: A future that completes to a list of WriteResult",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\storage.py",
    "ast_data": "FunctionDef name:write_data arg:self arg:plan arg:planner arguments arg arg arg"
  },
  {
    "library": "matplotlib",
    "name": "get_font_names",
    "source_code": "def get_font_names(self):\n    return list({font.name for font in self.ttflist})",
    "docstring": "Return the list of available fonts.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\font_manager.py",
    "ast_data": "FunctionDef name:get_font_names arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "deprecate_privatize_attribute",
    "source_code": "class deprecate_privatize_attribute:\n\n    def __init__(self, *args, **kwargs):\n        self.deprecator = deprecated(*args, **kwargs)\n\n    def __set_name__(self, owner, name):\n        setattr(owner, name, self.deprecator(property(lambda self: getattr(self, f'_{name}'), lambda self, value: setattr(self, f'_{name}', value)), name=name))",
    "docstring": "Helper to deprecate public access to an attribute (or method). This helper should only be used at class scope, as follows:: class Foo: attr = _deprecate_privatize_attribute(*args, **kwargs) where *all* parameters are forwarded to . This form makes `` (same name but with a leading underscore), with a deprecation warning. Note that the attribute name is derived from *the name this helper is assigned to*. This helper also works for deprecating methods.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\_api\\deprecation.py",
    "ast_data": "ClassDef name:deprecate_privatize_attribute FunctionDef name:__init__ arg:self arguments arg arg arg Assign Call FunctionDef name:__set_name__ arg:self arg:owner arg:name arguments arg arg arg Call Call Call arguments arg Call arguments arg arg Call"
  },
  {
    "library": "django",
    "name": "reduce",
    "source_code": "def reduce(self, operation, app_label):\n    if self.elidable:\n        return [operation]\n    elif operation.elidable:\n        return [self]\n    return False",
    "docstring": "Return either a list of operations the actual operation should be replaced with or a boolean that indicates whether or not the specified operation can be optimized across.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\operations\\base.py",
    "ast_data": "FunctionDef name:reduce arg:self arg:operation arg:app_label arguments arg arg arg If Return return:yes If Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_converter",
    "source_code": "def set_converter(self, converter):\n    self._set_converter(converter)\n    self._converter_is_explicit = True",
    "docstring": "Set the unit converter for axis. Parameters ---------- converter :",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:set_converter arg:self arg:converter arguments arg arg Call Assign"
  },
  {
    "library": "sphinx",
    "name": "PyDecoratorMethod",
    "source_code": "class PyDecoratorMethod(PyMethod):\n\n    def run(self) -> list[Node]:\n        self.name = 'py:method'\n        return super().run()\n\n    def handle_signature(self, sig: str, signode: desc_signature) -> tuple[str, str]:\n        ret = super().handle_signature(sig, signode)\n        signode.insert(0, addnodes.desc_addname('@', '@'))\n        return ret\n\n    def needs_arglist(self) -> bool:\n        return False",
    "docstring": "Description of a decoratormethod.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\domains\\python\\__init__.py",
    "ast_data": "ClassDef name:PyDecoratorMethod FunctionDef name:run arg:self arguments arg Assign Return return:yes Call Call FunctionDef name:handle_signature arg:self arg:sig arg:signode arguments arg arg arg Assign Call Call Call Call Return return:yes FunctionDef name:needs_arglist arg:self arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "shape",
    "source_code": "def shape(obj):\n    return np.shape(getdata(obj))",
    "docstring": "maskedarray version of the numpy function.",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:shape arg:obj arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_TileGrad",
    "source_code": "@ops.RegisterGradient('Tile')\ndef _TileGrad(op: ops.Operation, grad):\n    input_shape = array_ops.shape(op.inputs[0], out_type=op.inputs[1].dtype)\n    split_shape = array_ops.reshape(array_ops.transpose(array_ops_stack.stack([op.inputs[1], input_shape])), [-1])\n    axes = math_ops.range(0, array_ops.size(split_shape), 2)\n    if isinstance(grad, indexed_slices_lib.IndexedSlices):\n        input_shape_0 = math_ops.cast(input_shape[0], grad.indices.dtype)\n        grad = math_ops.unsorted_segment_sum(grad.values, math_ops.mod(grad.indices, input_shape_0), input_shape_0)\n        split_shape = array_ops.concat([[1], split_shape[1:]], axis=0)\n    input_grad = math_ops.reduce_sum(array_ops.reshape(grad, split_shape), axes)\n    if not context.executing_eagerly():\n        input_grad.set_shape(op.inputs[0].get_shape())\n    return [input_grad, None]",
    "docstring": "Sum reduces grad along the tiled dimensions.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_grad.py",
    "ast_data": "FunctionDef name:_TileGrad arg:op arg:grad arguments arg arg Assign Call Assign Call Call Call Assign Call Call If Call Assign Call Assign Call Call Assign Call Assign Call Call If Call Call Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "decompose_essential_matrix",
    "source_code": "def decompose_essential_matrix(E_mat: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n    KORNIA_CHECK_SHAPE(E_mat, ['*', '3', '3'])\n    U, _, V = _torch_svd_cast(E_mat)\n    Vt = V.transpose(-2, -1)\n    mask = ones_like(E_mat)\n    mask[..., -1:] *= -1.0\n    maskt = mask.transpose(-2, -1)\n    U = where((torch.det(U) < 0.0)[..., None, None], U * mask, U)\n    Vt = where((torch.det(Vt) < 0.0)[..., None, None], Vt * maskt, Vt)\n    W = cross_product_matrix(torch.tensor([[0.0, 0.0, 1.0]]).type_as(E_mat))\n    W[..., 2, 2] += 1.0\n    U_W_Vt = U @ W @ Vt\n    U_Wt_Vt = U @ W.transpose(-2, -1) @ Vt\n    R1 = U_W_Vt\n    R2 = U_Wt_Vt\n    T = U[..., -1:]\n    return (R1, R2, T)",
    "docstring": "Decompose an essential matrix to possible rotations and translation. This function decomposes the essential matrix E using svd decomposition [96] and give the possible solutions: :math:. Args: E_mat: The essential matrix in the form of :math:. Returns: A tuple containing the first and second possible rotation matrices and the translation vector. The shape of the tensors with be same input :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\epipolar\\essential.py",
    "ast_data": "FunctionDef name:decompose_essential_matrix arg:E_mat arguments arg Call Assign Call Assign Call Assign Call Assign Call Assign Call Compare Call Assign Call Compare Call Assign Call Call Call Assign Assign Call Assign Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "bessel_k0",
    "source_code": "@tf_export('math.special.bessel_k0')\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef bessel_k0(x, name=None):\n    with ops.name_scope(name, 'bessel_k0', [x]):\n        return gen_special_math_ops.bessel_k0(x)",
    "docstring": "Computes the Bessel k0 function of element-wise. Modified Bessel function of order 0. It is preferable to use the numerically stabler function instead. >>> tf.math.special.bessel_k0([0.5, 1., 2., 4.]).numpy() array([0.92441907, 0.42102444, 0.11389387, 0.01115968], dtype=float32) Args: x: A or . Must be one of the following types: , , . name: A name for the operation (optional). Returns: A or , respectively. Has the same type as . @compatibility(scipy) Equivalent to scipy.special.k0 @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\special_math_ops.py",
    "ast_data": "FunctionDef name:bessel_k0 arg:x arg:name arguments arg arg With Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_custom_getter",
    "source_code": "def _get_custom_getter():\n\n    def inner_custom_getter(getter, *args, **kwargs):\n        cast_to_bfloat16 = False\n        requested_dtype = kwargs['dtype']\n        if requested_dtype == dtypes.bfloat16:\n            kwargs['dtype'] = dtypes.float32\n            cast_to_bfloat16 = True\n        var = getter(*args, **kwargs)\n        if cast_to_bfloat16:\n            var = math_ops.cast(var, dtypes.bfloat16)\n        return var\n    return inner_custom_getter",
    "docstring": "Returns a custom getter that this class's methods must be called under. All methods of this class must be called under a variable scope that was passed this custom getter. Example: Currently, this custom getter only does anything if self.use_tf_layers is True. In that case, it causes variables to be stored as dtype self.variable_type, then casted to the requested dtype, instead of directly storing the variable as the requested dtype.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\bfloat16.py",
    "ast_data": "FunctionDef name:_get_custom_getter arguments FunctionDef name:inner_custom_getter arg:getter arguments arg arg arg Assign Assign If Compare Assign Assign Assign Call If Assign Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "named_modules",
    "source_code": "def named_modules(self, remove_duplicate: bool=True) -> Iterable[tuple[str, 'torch.nn.Module']]:\n    yield from self.module.named_modules(remove_duplicate=remove_duplicate)",
    "docstring": "Iterate over all the modules in the module.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\utils\\_named_member_accessor.py",
    "ast_data": "FunctionDef name:named_modules arg:self arg:remove_duplicate arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "convert_to_list",
    "source_code": "def convert_to_list(values, sparse_default_value=None):\n    if tf_utils.is_ragged(values):\n        if isinstance(values, ragged_tensor.RaggedTensor) and (not context.executing_eagerly()):\n            values = backend.get_session(values).run(values)\n        values = values.to_list()\n    if isinstance(values, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):\n        if sparse_default_value is None:\n            if dtypes.as_dtype(values.values.dtype) == dtypes.string:\n                sparse_default_value = ''\n            else:\n                sparse_default_value = -1\n        dense_tensor = sparse_ops.sparse_tensor_to_dense(values, default_value=sparse_default_value)\n        values = backend.get_value(dense_tensor)\n    if isinstance(values, tensor.Tensor):\n        values = backend.get_value(values)\n    if isinstance(values, np.ndarray):\n        values = values.tolist()\n    return values",
    "docstring": "Convert a TensorLike, CompositeTensor, or ndarray into a Python list.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_preprocessing_layer.py",
    "ast_data": "FunctionDef name:convert_to_list arg:values arg:sparse_default_value arguments arg arg If Call If BoolOp Call Call Assign Call Call Assign Call If Call If Compare If Compare Call Assign Assign Assign Call Assign Call If Call Assign Call If Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "set_modules_to_forward_prefetch",
    "source_code": "def set_modules_to_forward_prefetch(self, modules: list[FSDPModule]) -> None:\n    _assert_all_fsdp_modules(modules)\n    self._get_fsdp_state()._states_to_forward_prefetch = [module._get_fsdp_state() for module in modules]",
    "docstring": "Sets the FSDP modules for which this FSDP module should explicitly prefetch all-gathers in forward. The prefetching runs after this module's all-gather copy-out. Passing a singleton list containing the next FSDP module gives the same all-gather overlap behavior as the default overlap behavior, except the prefetched all-gather is issued earlier from the CPU. Passing a list with at least length two is required for more aggressive overlap and will use more reserved memory. Args: modules (List[FSDPModule]): FSDP modules to prefetch.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fully_shard.py",
    "ast_data": "FunctionDef name:set_modules_to_forward_prefetch arg:self arg:modules arguments arg arg Call Assign Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "unique_labels",
    "source_code": "def unique_labels(*ys):\n    ys = attach_unique(*ys, return_tuple=True)\n    xp, is_array_api_compliant = get_namespace(*ys)\n    if len(ys) == 0:\n        raise ValueError('No argument has been passed.')\n    ys_types = set((type_of_target(x) for x in ys))\n    if ys_types == {'binary', 'multiclass'}:\n        ys_types = {'multiclass'}\n    if len(ys_types) > 1:\n        raise ValueError('Mix type of y not allowed, got types %s' % ys_types)\n    label_type = ys_types.pop()\n    if label_type == 'multilabel-indicator' and len(set((check_array(y, accept_sparse=['csr', 'csc', 'coo']).shape[1] for y in ys))) > 1:\n        raise ValueError('Multi-label binary indicator input with different numbers of labels')\n    _unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)\n    if not _unique_labels:\n        raise ValueError('Unknown label type: %s' % repr(ys))\n    if is_array_api_compliant:\n        unique_ys = xp.concat([_unique_labels(y, xp=xp) for y in ys])\n        return xp.unique_values(unique_ys)\n    ys_labels = set(chain.from_iterable(((i for i in _unique_labels(y, xp=xp)) for y in ys)))\n    if len(set((isinstance(label, str) for label in ys_labels))) > 1:\n        raise ValueError('Mix of label input types (string and number)')\n    return xp.asarray(sorted(ys_labels))",
    "docstring": "Extract an ordered array of unique labels. We don't allow: - mix of multilabel and multiclass (single label) targets - mix of label indicator matrix and anything else, because there are no explicit labels) - mix of label indicator matrices of different sizes - mix of string and integer labels At the moment, we also don't allow \"multiclass-multioutput\" input type. Parameters ---------- *ys : array-likes Label values. Returns ------- out : ndarray of shape (n_unique_labels,) An ordered array of unique labels. Examples -------- >>> from sklearn.utils.multiclass import unique_labels >>> unique_labels([3, 5, 5, 5, 7, 7]) array([3, 5, 7]) >>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4]) array([1, 2, 3, 4]) >>> unique_labels([1, 2, 10], [5, 11]) array([ 1, 2, 5, 10, 11])",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\multiclass.py",
    "ast_data": "FunctionDef name:unique_labels arguments arg Assign Call Assign Call If Compare Call Raise Call Assign Call Call If Compare Assign If Compare Call Raise Call Assign Call If BoolOp Compare Compare Call Call Call Raise Call Assign Call If Raise Call Call If Assign Call Call Return return:yes Call Assign Call Call Call If Compare Call Call Call Raise Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "getsourcelines",
    "source_code": "def getsourcelines(object):\n    return _inspect.getsourcelines(tf_decorator.unwrap(object)[1])",
    "docstring": "TFDecorator-aware replacement for inspect.getsourcelines.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\tf_inspect.py",
    "ast_data": "FunctionDef name:getsourcelines arg:object arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_imports_for_module",
    "source_code": "def _get_imports_for_module(module: str, output_package: str, symbols_by_module: Mapping[str, set[_Entrypoint]], generated_imports_by_module: Mapping[str, set[str]], file_prefixes_to_strip: Sequence[str], module_prefix: str, use_lazy_loading: bool, subpackage_rewrite: Optional[str]) -> str:\n    content = ''\n    symbol_imports = list(symbols_by_module[module])\n    symbol_imports = sorted(symbol_imports, key=lambda s: f'{s.exported_symbol.file_name}:{s.name}')\n    generated_imports = sorted(generated_imports_by_module[module])\n    for imp in generated_imports:\n        if subpackage_rewrite:\n            imp = imp.replace(output_package, subpackage_rewrite)\n        last_dot = imp.rfind('.')\n        if use_lazy_loading:\n            content += f\"  '{imp[last_dot + 1:]}': ('', '{imp}'),\\n\"\n        else:\n            content += f'from {imp[:last_dot]} import {imp[last_dot + 1:]}\\n'\n    for s in symbol_imports:\n        content += f'{s.get_import(file_prefixes_to_strip, module_prefix, use_lazy_loading=use_lazy_loading)}\\n'\n    return content",
    "docstring": "Returns the imports for a module. Args: module: The module to get imports for. output_package: The package to use for the imports. symbols_by_module: The symbols that should be exposed by each module. generated_imports_by_module: The sub-modules that should be exposed by each module. file_prefixes_to_strip: The prefixes to strip from the file names of the imports. module_prefix: A prefix to add to the non-generated imports. use_lazy_loading: Whether to use lazy loading or not. subpackage_rewrite: The subpackage to use for the imports.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\api\\generator2\\generator\\generator.py",
    "ast_data": "FunctionDef name:_get_imports_for_module arg:module arg:output_package arg:symbols_by_module arg:generated_imports_by_module arg:file_prefixes_to_strip arg:module_prefix arg:use_lazy_loading arg:subpackage_rewrite arguments arg arg arg arg arg arg arg arg Assign Assign Call Assign Call arguments arg Assign Call For If Assign Call Assign Call If For Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "barycenter_kneighbors_graph",
    "source_code": "def barycenter_kneighbors_graph(X, n_neighbors, reg=0.001, n_jobs=None):\n    knn = NearestNeighbors(n_neighbors=n_neighbors + 1, n_jobs=n_jobs).fit(X)\n    X = knn._fit_X\n    n_samples = knn.n_samples_fit_\n    ind = knn.kneighbors(X, return_distance=False)[:, 1:]\n    data = barycenter_weights(X, X, ind, reg=reg)\n    indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)\n    return csr_matrix((data.ravel(), ind.ravel(), indptr), shape=(n_samples, n_samples))",
    "docstring": "Computes the barycenter weighted graph of k-Neighbors for points in X Parameters ---------- X : {array-like, NearestNeighbors} Sample data, shape = (n_samples, n_features), in the form of a numpy array or a NearestNeighbors object. n_neighbors : int Number of neighbors for each sample. reg : float, default=1e-3 Amount of regularization when solving the least-squares problem. Only relevant if mode='barycenter'. If None, use the default. n_jobs : int or None, default=None The number of parallel jobs to run for neighbors search. `joblib.parallel_backendGlossary ` for more details. Returns ------- A : sparse matrix in CSR format, shape = [n_samples, n_samples] A[i, j] is assigned the weight of edge that connects i to j. See Also -------- sklearn.neighbors.kneighbors_graph sklearn.neighbors.radius_neighbors_graph",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\manifold\\_locally_linear.py",
    "ast_data": "FunctionDef name:barycenter_kneighbors_graph arg:X arg:n_neighbors arg:reg arg:n_jobs arguments arg arg arg arg Assign Call Call Assign Assign Assign Call Assign Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "NodeAnno",
    "source_code": "class NodeAnno(NoValue):\n    IS_LOCAL = 'Symbol is local to the function scope being analyzed.'\n    IS_PARAM = 'Symbol is a parameter to the function being analyzed.'\n    IS_MODIFIED_SINCE_ENTRY = 'Symbol has been explicitly replaced in the current function scope.'\n    ARGS_SCOPE = 'The scope for the argument list of a function call.'\n    COND_SCOPE = 'The scope for the test node of a conditional statement.'\n    ITERATE_SCOPE = 'The scope for the iterate assignment of a for loop.'\n    ARGS_AND_BODY_SCOPE = 'The scope for the main body of a function or lambda, including its arguments.'\n    BODY_SCOPE = 'The scope for the main body of a statement (True branch for if statements, main body for loops).'\n    ORELSE_SCOPE = 'The scope for the orelse body of a statement (False branch for if statements, orelse body for loops).'",
    "docstring": "Additional annotations used by the static analyzer. These are in addition to the basic annotations declared in anno.py.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\static_analysis\\annos.py",
    "ast_data": "ClassDef name:NodeAnno Assign Assign Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "pandas",
    "name": "obj_to_write",
    "source_code": "@property\n@abstractmethod\ndef obj_to_write(self) -> NDFrame | Mapping[IndexLabel, Any]:\n    pass",
    "docstring": "Object to write in JSON format.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\json\\_json.py",
    "ast_data": "FunctionDef name:obj_to_write arg:self arguments arg"
  },
  {
    "library": "django",
    "name": "alter_db_table",
    "source_code": "def alter_db_table(self, model, old_db_table, new_db_table):\n    if old_db_table == new_db_table or (self.connection.features.ignores_table_name_case and old_db_table.lower() == new_db_table.lower()):\n        return\n    self.execute(self.sql_rename_table % {'old_table': self.quote_name(old_db_table), 'new_table': self.quote_name(new_db_table)})\n    for sql in self.deferred_sql:\n        if isinstance(sql, Statement):\n            sql.rename_table_references(old_db_table, new_db_table)",
    "docstring": "Rename the table a model points to.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\schema.py",
    "ast_data": "FunctionDef name:alter_db_table arg:self arg:model arg:old_db_table arg:new_db_table arguments arg arg arg arg If BoolOp Compare BoolOp Compare Call Call Return return:no Call Call Call For If Call Call"
  },
  {
    "library": "pytorch",
    "name": "treespec_dumps",
    "source_code": "def treespec_dumps(treespec: TreeSpec, protocol: Optional[int]=None) -> str:\n    if not _is_pytreespec_instance(treespec):\n        raise TypeError(f'treespec_dumps(treespec): Expected `treespec` to be instance of PyTreeSpec but got item of type {type(treespec)}.')\n    dummy_tree = tree_unflatten([0] * treespec.num_leaves, treespec)\n    orig_treespec = python_pytree.tree_structure(dummy_tree)\n    return python_pytree.treespec_dumps(orig_treespec, protocol=protocol)",
    "docstring": "Serialize a treespec to a JSON string.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_cxx_pytree.py",
    "ast_data": "FunctionDef name:treespec_dumps arg:treespec arg:protocol arguments arg arg If Call Raise Call Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "wait_for_apps_ready",
    "source_code": "def wait_for_apps_ready(self, app_reg, django_main_thread):\n    while django_main_thread.is_alive():\n        if app_reg.ready_event.wait(timeout=0.1):\n            return True\n    else:\n        logger.debug('Main Django thread has terminated before apps are ready.')\n        return False",
    "docstring": "Wait until Django reports that the apps have been loaded. If the given thread has terminated before the apps are ready, then a SyntaxError or other non-recoverable error has been raised. In that case, stop waiting for the apps_ready event and continue processing. Return True if the thread is alive and the ready event has been triggered, or False if the thread is terminated while waiting for the event.",
    "type": "method",
    "file_path": "django\\django\\utils\\autoreload.py",
    "ast_data": "FunctionDef name:wait_for_apps_ready arg:self arg:app_reg arg:django_main_thread arguments arg arg arg While Call If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_apply_py_to_tf_passes",
    "source_code": "def _apply_py_to_tf_passes(node, ctx):\n    node = return_statements.transform(node, ctx, False)\n    node = control_flow.transform(node, ctx)\n    return node",
    "docstring": "Apply transformations from PyToTF to match tf.function tracing.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\tfr\\python\\tfr_gen.py",
    "ast_data": "FunctionDef name:_apply_py_to_tf_passes arg:node arg:ctx arguments arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "MeanAbsoluteError",
    "source_code": "class MeanAbsoluteError(MeanMetricWrapper):\n\n    def __init__(self, name='mean_absolute_error', dtype=None):\n        super(MeanAbsoluteError, self).__init__(mean_absolute_error, name, dtype=dtype)",
    "docstring": "Computes the mean absolute error between the labels and predictions. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.MeanAbsoluteError() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.result().numpy() 0.25 >>> m.reset_state() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]], ... sample_weight=[1, 0]) >>> m.result().numpy() 0.5 Usage with API:",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py",
    "ast_data": "ClassDef name:MeanAbsoluteError FunctionDef name:__init__ arg:self arg:name arg:dtype arguments arg arg arg Call Call"
  },
  {
    "library": "numpy",
    "name": "flatnotmasked_contiguous",
    "source_code": "def flatnotmasked_contiguous(a):\n    m = getmask(a)\n    if m is nomask:\n        return [slice(0, a.size)]\n    i = 0\n    result = []\n    for k, g in itertools.groupby(m.ravel()):\n        n = len(list(g))\n        if not k:\n            result.append(slice(i, i + n))\n        i += n\n    return result",
    "docstring": "Find contiguous unmasked data in a masked array. Parameters ---------- a : array_like The input array. Returns ------- slice_list : list A sorted sequence of objects (start index, end index). See Also -------- flatnotmasked_edges, notmasked_contiguous, notmasked_edges clump_masked, clump_unmasked Notes ----- Only accepts 2-D arrays at most. Examples -------- >>> import numpy as np >>> a = np.ma.arange(10) >>> np.ma.flatnotmasked_contiguous(a) [slice(0, 10, None)] >>> mask = (a 8) | (a == 5) >>> a[mask] = np.ma.masked >>> np.array(a[~a.mask]) array([3, 4, 6, 7, 8]) >>> np.ma.flatnotmasked_contiguous(a) [slice(3, 5, None), slice(6, 9, None)] >>> a[:] = np.ma.masked >>> np.ma.flatnotmasked_contiguous(a) []",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\extras.py",
    "ast_data": "FunctionDef name:flatnotmasked_contiguous arg:a arguments arg Assign Call If Compare Return return:yes Call Assign Assign For Call Call Assign Call Call If Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "args_to_mixed_eager_tensors",
    "source_code": "def args_to_mixed_eager_tensors(lists, ctx):\n    del ctx\n    assert len(lists) > 1\n    lists_ret = [[]]\n    for l in lists[1:]:\n        if len(l) != len(lists[0]):\n            raise ValueError('Expected list arguments to be the same length: %d != %d (%r vs. %r).' % (len(lists[0]), len(l), lists[0], l))\n        lists_ret.append([])\n    types = []\n    for i in range(len(lists[0])):\n        dtype = None\n        for l in lists:\n            if isinstance(l[i], core_types.Value):\n                dtype = l[i].dtype\n                break\n        if dtype is None:\n            lists_ret[0].append(tensor_conversion_registry.convert(lists[0][i]))\n            dtype = lists_ret[0][i].dtype\n            for j in range(1, len(lists)):\n                lists_ret[j].append(tensor_conversion_registry.convert(lists[j][i], dtype=dtype))\n        else:\n            for j in range(len(lists)):\n                lists_ret[j].append(tensor_conversion_registry.convert(lists[j][i], dtype=dtype))\n        types.append(dtype.as_datatype_enum)\n    return (types, lists_ret)",
    "docstring": "Converts a list of same-length lists of values to eager tensors.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\execute.py",
    "ast_data": "FunctionDef name:args_to_mixed_eager_tensors arg:lists arg:ctx arguments arg arg Compare Call Assign For If Compare Call Call Raise Call Call Call Call Assign For Call Call Assign For If Call Assign If Compare Call Call Assign For Call Call Call Call For Call Call Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_angles",
    "source_code": "def get_angles(self):\n    return np.rad2deg(self._angles)",
    "docstring": "Get the angles of the first axes, degrees CCW from the x-axis.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:get_angles arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__deepcopy__",
    "source_code": "def __deepcopy__(self, memo):\n    with distribute_lib.enter_or_assert_strategy(self._distribute_strategy):\n        new_values = []\n        for value in self._values:\n            with ops.device(value.device):\n                new_values.append(copy.deepcopy(value, memo))\n    copied_variable = type(self)(strategy=self._distribute_strategy, values=new_values, aggregation=self._aggregation, var_policy=copy.deepcopy(self._policy, memo))\n    memo[id(self)] = copied_variable\n    return copied_variable",
    "docstring": "Perform a deepcopy of the . Unlike the deepcopy of a regular tf.Variable, this keeps the original strategy and devices of the . To avoid confusion with the behavior of deepcopy on a regular (which does copy into new devices), we only allow a deepcopy of a within its originating strategy scope. Args: memo: The memoization object for . Returns: A deep copy of the current . Raises: RuntimeError: If trying to deepcopy into a different strategy.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py",
    "ast_data": "FunctionDef name:__deepcopy__ arg:self arg:memo arguments arg arg With Call Assign For With Call Call Call Assign Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "__init__",
    "source_code": "def __init__(self, expression, pos, length=None, **extra):\n    if not hasattr(pos, 'resolve_expression'):\n        if pos < 1:\n            raise ValueError(\"'pos' must be greater than 0\")\n    expressions = [expression, pos]\n    if length is not None:\n        expressions.append(length)\n    super().__init__(*expressions, **extra)",
    "docstring": "expression: the name of a field, or an expression returning a string pos: an integer > 0, or an expression returning an integer length: an optional number of characters to return",
    "type": "method",
    "file_path": "django\\django\\db\\models\\functions\\text.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:expression arg:pos arg:length arguments arg arg arg arg arg If Call If Compare Raise Call Assign If Compare Call Call Call"
  },
  {
    "library": "kornia",
    "name": "generate_patch_grid_from_normalized_LAF",
    "source_code": "def generate_patch_grid_from_normalized_LAF(img: Tensor, LAF: Tensor, PS: int=32) -> Tensor:\n    KORNIA_CHECK_LAF(LAF)\n    B, N, _, _ = LAF.size()\n    _, ch, h, w = img.size()\n    LAF_renorm = denormalize_laf(LAF, img)\n    grid = F.affine_grid(LAF_renorm.view(B * N, 2, 3), [B * N, ch, PS, PS], align_corners=False)\n    grid[..., :, 0] = 2.0 * grid[..., :, 0].clone() / float(w - 1) - 1.0\n    grid[..., :, 1] = 2.0 * grid[..., :, 1].clone() / float(h - 1) - 1.0\n    return grid",
    "docstring": "Generate affine grid. Args: img: image tensor of shape :math:. LAF: laf with shape :math:. PS: patch size to be extracted. Returns: grid :math:",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\laf.py",
    "ast_data": "FunctionDef name:generate_patch_grid_from_normalized_LAF arg:img arg:LAF arg:PS arguments arg arg arg Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "ShowBase",
    "source_code": "class ShowBase(_Backend):\n\n    def __call__(self, block=None):\n        return self.show(block=block)",
    "docstring": "Simple base class to generate a `` method.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "ClassDef name:ShowBase FunctionDef name:__call__ arg:self arg:block arguments arg arg Return return:yes Call"
  },
  {
    "library": "cryptography",
    "name": "sign",
    "source_code": "def sign(self, private_key: CertificateIssuerPrivateKeyTypes, algorithm: _AllowedHashTypes | None, backend: typing.Any=None, *, rsa_padding: padding.PSS | padding.PKCS1v15 | None=None, ecdsa_deterministic: bool | None=None) -> Certificate:\n    if self._subject_name is None:\n        raise ValueError('A certificate must have a subject name')\n    if self._issuer_name is None:\n        raise ValueError('A certificate must have an issuer name')\n    if self._serial_number is None:\n        raise ValueError('A certificate must have a serial number')\n    if self._not_valid_before is None:\n        raise ValueError('A certificate must have a not valid before time')\n    if self._not_valid_after is None:\n        raise ValueError('A certificate must have a not valid after time')\n    if self._public_key is None:\n        raise ValueError('A certificate must have a public key')\n    if rsa_padding is not None:\n        if not isinstance(rsa_padding, (padding.PSS, padding.PKCS1v15)):\n            raise TypeError('Padding must be PSS or PKCS1v15')\n        if not isinstance(private_key, rsa.RSAPrivateKey):\n            raise TypeError('Padding is only supported for RSA keys')\n    if ecdsa_deterministic is not None:\n        if not isinstance(private_key, ec.EllipticCurvePrivateKey):\n            raise TypeError('Deterministic ECDSA is only supported for EC keys')\n    return rust_x509.create_x509_certificate(self, private_key, algorithm, rsa_padding, ecdsa_deterministic)",
    "docstring": "Signs the certificate using the CA's private key.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\x509\\base.py",
    "ast_data": "FunctionDef name:sign arg:self arg:private_key arg:algorithm arg:backend arguments arg arg arg arg arg arg If Compare Raise Call If Compare Raise Call If Compare Raise Call If Compare Raise Call If Compare Raise Call If Compare Raise Call If Compare If Call Raise Call If Call Raise Call If Compare If Call Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_merge",
    "source_code": "def _merge(self, old, new):\n    if old is None:\n        return new\n    elif new is not None and old is not new:\n        raise ValueError('Incompatible values: %s != %s' % (old, new))\n    return old",
    "docstring": "Helper to merge which handles merging one value.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bijector_impl.py",
    "ast_data": "FunctionDef name:_merge arg:self arg:old arg:new arguments arg arg arg If Compare Return return:yes If BoolOp Compare Compare Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "guard_or_true",
    "source_code": "def guard_or_true(a: BoolLikeType) -> bool:\n    return _guard_or(a, True)",
    "docstring": "Try to guard a, if data dependent error encountered just return true.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:guard_or_true arg:a arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "save_jit_module_to_flatbuffer",
    "source_code": "def save_jit_module_to_flatbuffer(m, f, _extra_files=None):\n    extra_files = _extra_files\n    if extra_files is None:\n        extra_files = {}\n    if isinstance(f, (str, os.PathLike)):\n        f = os.fspath(f)\n        torch._C._save_jit_module(m._c, f, extra_files)\n    else:\n        s = torch._C._save_jit_module_to_bytes(m._c, extra_files)\n        f.write(s)",
    "docstring": "Save an offline version of this module for use in a separate process. The saved module serializes all of the methods, submodules, parameters, and attributes of this module. It can be loaded into the C++ API using `torch.jit.jit_module_from_flatbufferScriptModuletorch.loadScriptModule` to save. f: A string for file path Example: .. testcode:: import torch import io class MyModule(torch.nn.Module): def forward(self, x): return x + 10 m = torch.jit.script(MyModule()) # Save to file torch.jit.save_jit_module_to_flatbuffer(m, 'scriptmodule.ff')",
    "type": "function",
    "file_path": "pytorch\\torch\\jit\\_serialization.py",
    "ast_data": "FunctionDef name:save_jit_module_to_flatbuffer arg:m arg:f arg:_extra_files arguments arg arg arg Assign If Compare Assign If Call Assign Call Call Assign Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "linear_kernel",
    "source_code": "@validate_params({'X': ['array-like', 'sparse matrix'], 'Y': ['array-like', 'sparse matrix', None], 'dense_output': ['boolean']}, prefer_skip_nested_validation=True)\ndef linear_kernel(X, Y=None, dense_output=True):\n    X, Y = check_pairwise_arrays(X, Y)\n    return safe_sparse_dot(X, Y.T, dense_output=dense_output)",
    "docstring": "Compute the linear kernel between X and Y. Read more in the :ref:. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples_X, n_features) A feature array. Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), default=None An optional second feature array. If , uses . dense_output : bool, default=True Whether to return dense output even when the input is sparse. If `X @ Y.T`. Examples -------- >>> from sklearn.metrics.pairwise import linear_kernel >>> X = [[0, 0, 0], [1, 1, 1]] >>> Y = [[1, 0, 0], [1, 1, 0]] >>> linear_kernel(X, Y) array([[0., 0.], [1., 2.]])",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\metrics\\pairwise.py",
    "ast_data": "FunctionDef name:linear_kernel arg:X arg:Y arg:dense_output arguments arg arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "apply",
    "source_code": "def apply(self) -> DataFrame | Series:\n    if is_list_like(self.func):\n        if self.engine == 'numba':\n            raise NotImplementedError(\"the 'numba' engine doesn't support lists of callables yet\")\n        return self.apply_list_or_dict_like()\n    if len(self.columns) == 0 and len(self.index) == 0:\n        return self.apply_empty_result()\n    if isinstance(self.func, str):\n        if self.engine == 'numba':\n            raise NotImplementedError(\"the 'numba' engine doesn't support using a string as the callable function\")\n        return self.apply_str()\n    elif isinstance(self.func, np.ufunc):\n        if self.engine == 'numba':\n            raise NotImplementedError(\"the 'numba' engine doesn't support using a numpy ufunc as the callable function\")\n        with np.errstate(all='ignore'):\n            results = self.obj._mgr.apply('apply', func=self.func)\n        return self.obj._constructor_from_mgr(results, axes=results.axes)\n    if self.result_type == 'broadcast':\n        if self.engine == 'numba':\n            raise NotImplementedError(\"the 'numba' engine doesn't support result_type='broadcast'\")\n        return self.apply_broadcast(self.obj)\n    elif not all(self.obj.shape):\n        return self.apply_empty_result()\n    elif self.raw:\n        return self.apply_raw(engine=self.engine, engine_kwargs=self.engine_kwargs)\n    return self.apply_standard()",
    "docstring": "compute the results",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\apply.py",
    "ast_data": "FunctionDef name:apply arg:self arguments arg If Call If Compare Raise Call Return return:yes Call If BoolOp Compare Call Compare Call Return return:yes Call If Call If Compare Raise Call Return return:yes Call If Call If Compare Raise Call With Call Assign Call Return return:yes Call If Compare If Compare Raise Call Return return:yes Call If Call Return return:yes Call If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "scalar_summary",
    "source_code": "@deprecated('2016-11-30', 'Please switch to tf.summary.scalar. Note that tf.summary.scalar uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on the scope they are created in. Also, passing a tensor or list of tags to a scalar summary op is no longer supported.')\ndef scalar_summary(tags, values, collections=None, name=None):\n    with ops.name_scope(name, 'ScalarSummary', [tags, values]) as scope:\n        val = gen_logging_ops.scalar_summary(tags=tags, values=values, name=scope)\n        _Collect(val, collections, [ops.GraphKeys.SUMMARIES])\n    return val",
    "docstring": "Outputs a protocol buffer with scalar values. This ops is deprecated. Please switch to tf.summary.scalar. For an explanation of why this op was deprecated, and information on how to migrate, look ['here']( The input and must have the same shape. The generated summary has a summary value for each tag-value pair in and . Args: tags: A . Tags for the summaries. values: A real numeric Tensor. Values for the summaries. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to . name: A name for the operation (optional). Returns: A scalar of type . The serialized protocol buffer.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\logging_ops.py",
    "ast_data": "FunctionDef name:scalar_summary arg:tags arg:values arg:collections arg:name arguments arg arg arg arg With Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "gen_tvar",
    "source_code": "def gen_tvar(curr):\n    curr += 1\n    return (TVar(curr), curr)",
    "docstring": "Generate a tensor variable :param curr: The current counter :return: a tensor variable and the updated counter",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\util.py",
    "ast_data": "FunctionDef name:gen_tvar arg:curr arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "def fit(self, X, y):\n    return super().fit(X, y)",
    "docstring": "Fit a semi-supervised label propagation model to X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. y : array-like of shape (n_samples,) Target class values with unlabeled points marked as -1. All unlabeled samples will be transductively assigned labels internally, which are stored in . Returns ------- self : object Returns the instance itself.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\semi_supervised\\_label_propagation.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "GaussianRandomProjection",
    "source_code": "class GaussianRandomProjection(BaseRandomProjection):\n\n    def __init__(self, n_components='auto', *, eps=0.1, compute_inverse_components=False, random_state=None):\n        super().__init__(n_components=n_components, eps=eps, compute_inverse_components=compute_inverse_components, random_state=random_state)\n\n    def _make_random_matrix(self, n_components, n_features):\n        random_state = check_random_state(self.random_state)\n        return _gaussian_random_matrix(n_components, n_features, random_state=random_state)\n\n    def transform(self, X):\n        check_is_fitted(self)\n        X = validate_data(self, X, accept_sparse=['csr', 'csc'], reset=False, dtype=[np.float64, np.float32])\n        return X @ self.components_.T",
    "docstring": "Reduce dimensionality through Gaussian random projection. The components of the random matrix are drawn from N(0, 1 / n_components). Read more in the :ref:. .. versionadded:: 0.13 Parameters ---------- n_components : int or 'auto', default='auto' Dimensionality of the target projection space. n_components can be automatically adjusted according to the number of samples in the dataset and the bound given by the Johnson-Lindenstrauss lemma. In that case the quality of the embedding is controlled by the `n_componentsGlossary compute_inverse_componentsfitn_features_in_fitX` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- SparseRandomProjection : Reduce dimensionality through sparse random projection. Examples -------- >>> import numpy as np >>> from sklearn.random_projection import GaussianRandomProjection >>> rng = np.random.RandomState(42) >>> X = rng.rand(25, 3000) >>> transformer = GaussianRandomProjection(random_state=rng) >>> X_new = transformer.fit_transform(X) >>> X_new.shape (25, 2759)",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\random_projection.py",
    "ast_data": "ClassDef name:GaussianRandomProjection FunctionDef name:__init__ arg:self arg:n_components arguments arg arg arg arg arg Call Call FunctionDef name:_make_random_matrix arg:self arg:n_components arg:n_features arguments arg arg arg Assign Call Return return:yes Call FunctionDef name:transform arg:self arg:X arguments arg arg Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_ThetaShift",
    "source_code": "class _ThetaShift(mtransforms.ScaledTranslation):\n\n    def __init__(self, axes, pad, mode):\n        super().__init__(pad, pad, axes.get_figure(root=False).dpi_scale_trans)\n        self.set_children(axes._realViewLim)\n        self.axes = axes\n        self.mode = mode\n        self.pad = pad\n    __str__ = mtransforms._make_str_method('axes', 'pad', 'mode')\n\n    def get_matrix(self):\n        if self._invalid:\n            if self.mode == 'rlabel':\n                angle = np.deg2rad(self.axes.get_rlabel_position() * self.axes.get_theta_direction()) + self.axes.get_theta_offset() - np.pi / 2\n            elif self.mode == 'min':\n                angle = self.axes._realViewLim.xmin - np.pi / 2\n            elif self.mode == 'max':\n                angle = self.axes._realViewLim.xmax + np.pi / 2\n            self._t = (self.pad * np.cos(angle) / 72, self.pad * np.sin(angle) / 72)\n        return super().get_matrix()",
    "docstring": "Apply a padding shift based on axes theta limits. This is used to create padding for radial ticks. Parameters ---------- axes : The owning Axes; used to determine limits. pad : float The padding to apply, in points. mode : {'min', 'max', 'rlabel'} Whether to shift away from the start (``).",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\polar.py",
    "ast_data": "ClassDef name:_ThetaShift FunctionDef name:__init__ arg:self arg:axes arg:pad arg:mode arguments arg arg arg arg Call Call Call Call Assign Assign Assign Assign Call FunctionDef name:get_matrix arg:self arguments arg If If Compare Assign Call Call Call Call If Compare Assign If Compare Assign Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "copy",
    "source_code": "def copy(self):\n    return self.__class__(self, copy=True)",
    "docstring": "Returns a copy of this array/matrix. No data/indices will be shared between the returned value and current array/matrix.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_base.py",
    "ast_data": "FunctionDef name:copy arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "device_v2",
    "source_code": "@tf_export('device', v1=[])\ndef device_v2(device_name) -> ContextManager[None]:\n    if callable(device_name):\n        raise RuntimeError('tf.device does not support functions.')\n    return device(device_name)",
    "docstring": "Specifies the device for ops created/executed in this context. This function specifies the device to be used for ops created/executed in a particular context. Nested contexts will inherit and also create/execute their ops on the specified device. If a specific device is not required, consider not using this function so that a device can be automatically assigned. In general the use of this function is optional. can be fully specified, as in \"/job:worker/task:1/device:cpu:0\", or partially specified, containing only a subset of the \"/\"-separated fields. Any fields which are specified will override device annotations from outer scopes. For example: Args: device_name: The device name to use in the context. Returns: A context manager that specifies the default device to use for newly created ops. Raises: RuntimeError: If a function is passed in.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:device_v2 arg:device_name arguments arg If Call Raise Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "multiply",
    "source_code": "def multiply(self, y: Array | complex, /, copy: bool | None=None, xp: ModuleType | None=None) -> Array:\n    return self._op(_AtOp.MULTIPLY, operator.imul, operator.mul, y, copy=copy, xp=xp)",
    "docstring": "Apply `` and return the updated array.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_extra\\_lib\\_at.py",
    "ast_data": "FunctionDef name:multiply arg:copy arg:xp arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "create_android_ndk_rule",
    "source_code": "def create_android_ndk_rule(environ_cp):\n    if is_windows() or is_cygwin():\n        default_ndk_path = cygpath('%s/Android/Sdk/ndk-bundle' % environ_cp['APPDATA'])\n    elif is_macos():\n        default_ndk_path = '%s/library/Android/Sdk/ndk-bundle' % environ_cp['HOME']\n    else:\n        default_ndk_path = '%s/Android/Sdk/ndk-bundle' % environ_cp['HOME']\n\n    def valid_ndk_path(path):\n        return os.path.exists(path) and os.path.exists(os.path.join(path, 'source.properties'))\n    android_ndk_home_path = prompt_loop_or_load_from_env(environ_cp, var_name='ANDROID_NDK_HOME', var_default=default_ndk_path, ask_for_var='Please specify the home path of the Android NDK to use.', check_success=valid_ndk_path, error_msg='The path %s or its child file \"source.properties\" does not exist.')\n    write_action_env_to_bazelrc('ANDROID_NDK_HOME', android_ndk_home_path)\n    write_action_env_to_bazelrc('ANDROID_NDK_API_LEVEL', get_ndk_api_level(environ_cp, android_ndk_home_path))",
    "docstring": "Set ANDROID_NDK_HOME and write Android NDK WORKSPACE rule.",
    "type": "function",
    "file_path": "tensorflow\\configure.py",
    "ast_data": "FunctionDef name:create_android_ndk_rule arg:environ_cp arguments arg If BoolOp Call Call Assign Call If Call Assign Assign FunctionDef name:valid_ndk_path arg:path arguments arg Return return:yes BoolOp Call Call Call Assign Call Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_load_csv",
    "source_code": "def _load_csv(F):\n    names = F.readline().decode('ascii').strip().split(',')\n    rec = np.loadtxt(F, skiprows=0, delimiter=',', dtype='S22,f4,f4')\n    rec.dtype.names = names\n    return rec",
    "docstring": "Load csv file. Parameters ---------- F : file object CSV file open in byte mode. Returns ------- rec : np.ndarray record array representing the data",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\datasets\\_species_distributions.py",
    "ast_data": "FunctionDef name:_load_csv arg:F arguments arg Assign Call Call Call Call Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_parameters",
    "source_code": "def _get_parameters(self, m, recurse=True):\n\n    def model_parameters(m):\n        ps = m._former_parameters.values() if hasattr(m, '_former_parameters') else m.parameters(recurse=False)\n        yield from ps\n    for mod in m.modules() if recurse else [m]:\n        yield from model_parameters(mod)",
    "docstring": "Return a generator of module parameters.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\parallel\\distributed.py",
    "ast_data": "FunctionDef name:_get_parameters arg:self arg:m arg:recurse arguments arg arg arg FunctionDef name:model_parameters arg:m arguments arg Assign Call Call Call For Call Call"
  },
  {
    "library": "matplotlib",
    "name": "sorted_by_field",
    "source_code": "def sorted_by_field(issues, field='closed_at', reverse=False):\n    return sorted(issues, key=lambda i: i[field], reverse=reverse)",
    "docstring": "Return a list of issues sorted by closing date.",
    "type": "function",
    "file_path": "matplotlib\\tools\\github_stats.py",
    "ast_data": "FunctionDef name:sorted_by_field arg:issues arg:field arg:reverse arguments arg arg arg Return return:yes Call arguments arg"
  },
  {
    "library": "scikit-learn",
    "name": "_NaNCounter",
    "source_code": "class _NaNCounter(Counter):\n\n    def __init__(self, items):\n        super().__init__(self._generate_items(items))\n\n    def _generate_items(self, items):\n        for item in items:\n            if not is_scalar_nan(item):\n                yield item\n                continue\n            if not hasattr(self, 'nan_count'):\n                self.nan_count = 0\n            self.nan_count += 1\n\n    def __missing__(self, key):\n        if hasattr(self, 'nan_count') and is_scalar_nan(key):\n            return self.nan_count\n        raise KeyError(key)",
    "docstring": "Counter with support for nan values.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\utils\\_encode.py",
    "ast_data": "ClassDef name:_NaNCounter FunctionDef name:__init__ arg:self arg:items arguments arg arg Call Call Call FunctionDef name:_generate_items arg:self arg:items arguments arg arg For If Call If Call Assign FunctionDef name:__missing__ arg:self arg:key arguments arg arg If BoolOp Call Call Return return:yes Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "default_device",
    "source_code": "def default_device(self):\n    return 'cpu'",
    "docstring": "The default device used for new NumPy arrays. For NumPy, this always returns ``. See Also -------- __array_namespace_info__.capabilities, __array_namespace_info__.default_dtypes, __array_namespace_info__.dtypes, __array_namespace_info__.devices Returns ------- device : Device The default device used for new NumPy arrays. Examples -------- >>> info = np.__array_namespace_info__() >>> info.default_device() 'cpu'",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\numpy\\_info.py",
    "ast_data": "FunctionDef name:default_device arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_infer_precision",
    "source_code": "def _infer_precision(base_precision: int, bins: Index) -> int:\n    for precision in range(base_precision, 20):\n        levels = np.asarray([_round_frac(b, precision) for b in bins])\n        if algos.unique(levels).size == bins.size:\n            return precision\n    return base_precision",
    "docstring": "Infer an appropriate precision for _round_frac",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\reshape\\tile.py",
    "ast_data": "FunctionDef name:_infer_precision arg:base_precision arg:bins arguments arg arg For Call Assign Call Call If Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "import_source",
    "source_code": "@cache_method\ndef import_source(self, module_name):\n    if 'torch_package' in module_name:\n        value = torch.package.package_importer._package_imported_modules[module_name]\n        alias = module_name.replace('>', '_').replace('<', '_').replace('.', '_dot_')\n    else:\n        value = _import_module(module_name)\n        alias = f'__import_{module_name.replace('.', '_dot_')}'\n    f_globals = self.output.global_scope\n    assert alias not in f_globals or f_globals[alias] is value\n    f_globals[alias] = value\n    self.output.update_co_names(alias)\n    return GlobalSource(alias)",
    "docstring": "Create an alias to a module for use in guards",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\symbolic_convert.py",
    "ast_data": "FunctionDef name:import_source arg:self arg:module_name arguments arg arg If Compare Assign Assign Call Call Call Assign Call Assign Call Assign BoolOp Compare Compare Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "CovViaPSD",
    "source_code": "class CovViaPSD(Covariance):\n\n    def __init__(self, psd):\n        self._LP = psd.U\n        self._log_pdet = psd.log_pdet\n        self._rank = psd.rank\n        self._covariance = psd._M\n        self._shape = psd._M.shape\n        self._psd = psd\n        self._allow_singular = False\n\n    def _whiten(self, x):\n        return x @ self._LP\n\n    def _support_mask(self, x):\n        return self._psd._support_mask(x)",
    "docstring": "Representation of a covariance provided via an instance of _PSD",
    "type": "class",
    "file_path": "scipy\\scipy\\stats\\_covariance.py",
    "ast_data": "ClassDef name:CovViaPSD FunctionDef name:__init__ arg:self arg:psd arguments arg arg Assign Assign Assign Assign Assign Assign Assign FunctionDef name:_whiten arg:self arg:x arguments arg arg Return return:yes FunctionDef name:_support_mask arg:self arg:x arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "tensor_rank_tensor",
    "source_code": "def tensor_rank_tensor(self, name='tensor_rank_tensor'):\n    with self._name_scope(name):\n        return self._tensor_rank_tensor()",
    "docstring": "Rank (in the sense of tensors) of matrix corresponding to this operator. If this operator acts like the batch matrix with , then this returns . Args: name: A name for this . Returns: , determined at runtime.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "FunctionDef name:tensor_rank_tensor arg:self arg:name arguments arg arg With Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "unlocalize",
    "source_code": "@register.filter(is_safe=False)\ndef unlocalize(value):\n    return str(formats.localize(value, use_l10n=False))",
    "docstring": "Force a value to be rendered as a non-localized value.",
    "type": "function",
    "file_path": "django\\django\\templatetags\\l10n.py",
    "ast_data": "FunctionDef name:unlocalize arg:value arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "eval",
    "source_code": "def eval(self, feed_dict=None, session=None):\n    indices, values, dense_shape = _eval_using_default_session([self.indices, self.values, self.dense_shape], feed_dict, self.graph, session)\n    return SparseTensorValue(indices, values, dense_shape)",
    "docstring": "Evaluates this sparse tensor in a . Calling this method will execute all preceding operations that produce the inputs needed for the operation that produces this tensor. *N.B.* Before invoking , its graph must have been launched in a session, and either a default session must be available, or must be specified explicitly. Args: feed_dict: A dictionary that maps objects to feed values. See for a description of the valid feed values. session: (Optional.) The to be used to evaluate this sparse tensor. If none, the default session will be used. Returns: A object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\sparse_tensor.py",
    "ast_data": "FunctionDef name:eval arg:self arg:feed_dict arg:session arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_get_training_state",
    "source_code": "def _get_training_state(handle: FlatParamHandle) -> HandleTrainingState:\n    _p_assert(handle, 'Expects a non-empty handle')\n    return handle._training_state",
    "docstring": "Returns the training state of the handles in ``.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_runtime_utils.py",
    "ast_data": "FunctionDef name:_get_training_state arg:handle arguments arg Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "rewrite_script_object_meta",
    "source_code": "def rewrite_script_object_meta(gm: torch.fx.GraphModule) -> dict[str, _ConstantAttributeType]:\n    constants: dict[str, _ConstantAttributeType] = {}\n    for node in gm.graph.nodes:\n        if 'val' not in node.meta:\n            continue\n        old_meta = node.meta['val']\n        if isinstance(old_meta, torch.ScriptObject):\n            class_fqn = old_meta._type().qualified_name()\n            new_meta = CustomObjArgument(node.name, class_fqn)\n            constants[node.name] = old_meta\n            node.meta['val'] = new_meta\n        elif isinstance(old_meta, FakeScriptObject):\n            class_fqn = old_meta.script_class_name\n            new_meta = CustomObjArgument(node.name, class_fqn, old_meta)\n            constants[node.name] = old_meta\n            node.meta['val'] = new_meta\n    return constants",
    "docstring": "When tracing, we produce a graph with FakeScriptObject in the meta[\"val\"]. For now, we rewrie meta[\"val\"] to be a placeholder CustomObjArgument",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\passes\\lift_constants_pass.py",
    "ast_data": "FunctionDef name:rewrite_script_object_meta arg:gm arguments arg For If Compare Assign If Call Assign Call Call Assign Call Assign Assign If Call Assign Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "split_outside_bracket",
    "source_code": "def split_outside_bracket(line: str, delimiter: str=',') -> list[str]:\n    bracket_count = 0\n    curr_token = ''\n    res = []\n    for char in line:\n        if char == '[':\n            bracket_count += 1\n        elif char == ']':\n            bracket_count -= 1\n        elif char == delimiter and bracket_count == 0:\n            res.append(curr_token)\n            curr_token = ''\n            continue\n        curr_token += char\n    res.append(curr_token)\n    return res",
    "docstring": "Given a line of text, split it on comma unless the comma is within a bracket '[]'.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\data\\datapipes\\gen_pyi.py",
    "ast_data": "FunctionDef name:split_outside_bracket arg:line arg:delimiter arguments arg arg Assign Assign Assign For If Compare If Compare If BoolOp Compare Compare Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "enter_finally_section",
    "source_code": "def enter_finally_section(self, section_id):\n    self.finally_section_subgraphs[section_id] = [None, None]\n    if self.leaves:\n        self.finally_section_has_direct_flow[section_id] = True\n    else:\n        self.finally_section_has_direct_flow[section_id] = False\n    self.pending_finally_sections.add(section_id)",
    "docstring": "Enters a finally section.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\cfg.py",
    "ast_data": "FunctionDef name:enter_finally_section arg:self arg:section_id arguments arg arg Assign If Assign Assign Call"
  },
  {
    "library": "scipy",
    "name": "determine_backend",
    "source_code": "def determine_backend(value, dispatch_type, *, domain, only=True, coerce=False):\n    dispatchables = (Dispatchable(value, dispatch_type, coerce),)\n    backend = _uarray.determine_backend(domain, dispatchables, coerce)\n    return set_backend(backend, coerce=coerce, only=only)",
    "docstring": "Set the backend to the first active backend that supports `determine_backendmarking ` fails since the types don't match. Instead, we need to first find a backend suitable for all of our objects. >>> with ua.set_backend(ex.BackendA), ua.set_backend(ex.BackendB): ... x = ex.TypeA() ... with ua.determine_backend(x, \"mark\", domain=\"ua_examples\"): ... res = ex.creation_multimethod() ... ex.call_multimethod(res, x) TypeA",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_uarray\\_backend.py",
    "ast_data": "FunctionDef name:determine_backend arg:value arg:dispatch_type arguments arg arg arg arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_grey",
    "source_code": "@classmethod\ndef get_grey(cls, tex, fontsize=None, dpi=None):\n    fontsize = mpl._val_or_rc(fontsize, 'font.size')\n    dpi = mpl._val_or_rc(dpi, 'savefig.dpi')\n    key = (cls._get_tex_source(tex, fontsize), dpi)\n    alpha = cls._grey_arrayd.get(key)\n    if alpha is None:\n        pngfile = cls.make_png(tex, fontsize, dpi)\n        rgba = mpl.image.imread(os.path.join(cls._texcache, pngfile))\n        cls._grey_arrayd[key] = alpha = rgba[:, :, -1]\n    return alpha",
    "docstring": "Return the alpha channel.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\texmanager.py",
    "ast_data": "FunctionDef name:get_grey arg:cls arg:tex arg:fontsize arg:dpi arguments arg arg arg arg Assign Call Assign Call Assign Call Assign Call If Compare Assign Call Assign Call Call Assign Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "temp_data",
    "source_code": "@property\ndef temp_data(self) -> _CurrentDocument:\n    return self.current_document",
    "docstring": "Returns the temporary data storage for the current document. Kept for backwards compatibility.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\environment\\__init__.py",
    "ast_data": "FunctionDef name:temp_data arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_chief",
    "source_code": "@property\ndef is_chief(self):\n    return self._is_chief_node",
    "docstring": "Returns whether the task is a chief node.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distribute_coordinator_utils.py",
    "ast_data": "FunctionDef name:is_chief arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "get_path_to_parent",
    "source_code": "def get_path_to_parent(self, parent):\n    if self.model is parent:\n        return []\n    proxied_model = self.concrete_model\n    path = []\n    opts = self\n    for int_model in self.get_base_chain(parent):\n        if int_model is proxied_model:\n            opts = int_model._meta\n        else:\n            final_field = opts.parents[int_model]\n            targets = (final_field.remote_field.get_related_field(),)\n            opts = int_model._meta\n            path.append(PathInfo(from_opts=final_field.model._meta, to_opts=opts, target_fields=targets, join_field=final_field, m2m=False, direct=True, filtered_relation=None))\n    return path",
    "docstring": "Return a list of PathInfos containing the path from the current model to the parent model, or an empty list if parent is not a parent of the current model.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\options.py",
    "ast_data": "FunctionDef name:get_path_to_parent arg:self arg:parent arguments arg arg If Compare Return return:no Assign Assign Assign For Call If Compare Assign Assign Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "connect",
    "source_code": "def connect(self, v):\n    if v is not self and v not in self.nn:\n        self.nn.add(v)\n        v.nn.add(self)\n        self.check_min = True\n        self.check_max = True\n        v.check_min = True\n        v.check_max = True",
    "docstring": "Connects self to another vertex object v. Parameters ---------- v : VertexBase or VertexScalarField object",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_shgo_lib\\_vertex.py",
    "ast_data": "FunctionDef name:connect arg:self arg:v arguments arg arg If BoolOp Compare Compare Call Call Assign Assign Assign Assign"
  },
  {
    "library": "pandas",
    "name": "_maybe_disable_logical_methods",
    "source_code": "@final\ndef _maybe_disable_logical_methods(self, opname: str_t) -> None:\n    if isinstance(self, ABCMultiIndex):\n        raise TypeError(f'cannot perform {opname} with {type(self).__name__}')",
    "docstring": "raise if this Index subclass does not support any or all.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_maybe_disable_logical_methods arg:self arg:opname arguments arg arg If Call Raise Call Call"
  },
  {
    "library": "cryptography",
    "name": "add_attribute",
    "source_code": "def add_attribute(self, oid: ObjectIdentifier, value: bytes, *, _tag: _ASN1Type | None=None) -> CertificateSigningRequestBuilder:\n    if not isinstance(oid, ObjectIdentifier):\n        raise TypeError('oid must be an ObjectIdentifier')\n    if not isinstance(value, bytes):\n        raise TypeError('value must be bytes')\n    if _tag is not None and (not isinstance(_tag, _ASN1Type)):\n        raise TypeError('tag must be _ASN1Type')\n    _reject_duplicate_attribute(oid, self._attributes)\n    if _tag is not None:\n        tag = _tag.value\n    else:\n        tag = None\n    return CertificateSigningRequestBuilder(self._subject_name, self._extensions, [*self._attributes, (oid, value, tag)])",
    "docstring": "Adds an X.509 attribute with an OID and associated value.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\x509\\base.py",
    "ast_data": "FunctionDef name:add_attribute arg:self arg:oid arg:value arguments arg arg arg arg If Call Raise Call If Call Raise Call If BoolOp Compare Call Raise Call Call If Compare Assign Assign Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_ensure_term",
    "source_code": "def _ensure_term(where, scope_level: int):\n    level = scope_level + 1\n    if isinstance(where, (list, tuple)):\n        where = [Term(term, scope_level=level + 1) if maybe_expression(term) else term for term in where if term is not None]\n    elif maybe_expression(where):\n        where = Term(where, scope_level=level)\n    return where if where is None or len(where) else None",
    "docstring": "Ensure that the where is a Term or a list of Term. This makes sure that we are capturing the scope of variables that are passed create the terms here with a frame_level=2 (we are 2 levels down)",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:_ensure_term arg:where arg:scope_level arguments arg arg Assign If Call Assign Call Call Compare If Call Assign Call Return return:yes BoolOp Compare Call"
  },
  {
    "library": "numpy",
    "name": "unique_inverse",
    "source_code": "@array_function_dispatch(_unique_inverse_dispatcher)\ndef unique_inverse(x):\n    result = unique(x, return_index=False, return_inverse=True, return_counts=False, equal_nan=False)\n    return UniqueInverseResult(*result)",
    "docstring": "Find the unique elements of and indices to reconstruct . This function is an Array API compatible alternative to:: np.unique(x, return_inverse=True, equal_nan=False, sorted=False) but returns a namedtuple for easier access to each output. .. note:: This function currently always returns a sorted result, however, this could change in any NumPy minor release. Parameters ---------- x : array_like Input array. It will be flattened if it is not already 1-D. Returns ------- out : namedtuple The result containing: * values - The unique elements of an input array. * inverse_indices - The indices from the set of unique elements that reconstruct . See Also -------- unique : Find the unique elements of an array. Examples -------- >>> import numpy as np >>> x = [1, 1, 2] >>> uniq = np.unique_inverse(x) >>> uniq.values array([1, 2]) >>> uniq.inverse_indices array([0, 0, 1])",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_arraysetops_impl.py",
    "ast_data": "FunctionDef name:unique_inverse arg:x arguments arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "readahead_file_path",
    "source_code": "@tf_export(v1=['resource_loader.readahead_file_path'])\ndef readahead_file_path(path, readahead='128M'):\n    return path",
    "docstring": "Readahead files not implemented; simply returns given path.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\platform\\resource_loader.py",
    "ast_data": "FunctionDef name:readahead_file_path arg:path arg:readahead arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "elapsed_time",
    "source_code": "def elapsed_time(self, end_event: 'Event') -> float:\n    return torch._C._mps_elapsedTimeOfEvents(self.__eventId, end_event.__eventId)",
    "docstring": "Returns the time elapsed in milliseconds after the event was recorded and before the end_event was recorded.",
    "type": "method",
    "file_path": "pytorch\\torch\\mps\\event.py",
    "ast_data": "FunctionDef name:elapsed_time arg:self arg:end_event arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_annotate_nodes_not_quantize",
    "source_code": "def _annotate_nodes_not_quantize(nodes: Union[Node, list[Node]]) -> None:\n    if not isinstance(nodes, list):\n        nodes = [nodes]\n    for node in nodes:\n        node.meta[QUANT_ANNOTATION_KEY] = _X86InductorQuantizationAnnotation(_annotated=True)",
    "docstring": "Annotate nodes to exclude them from quantization (their is ).",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\x86_inductor_quantizer.py",
    "ast_data": "FunctionDef name:_annotate_nodes_not_quantize arg:nodes arguments arg If Call Assign For Assign Call"
  },
  {
    "library": "pandas",
    "name": "_slice",
    "source_code": "def _slice(self, slicer: slice | npt.NDArray[np.bool_] | npt.NDArray[np.intp]) -> ExtensionArray:\n    if self.ndim == 2:\n        if not isinstance(slicer, slice):\n            raise AssertionError('invalid slicing for a 1-ndim ExtensionArray', slicer)\n        new_locs = range(1)[slicer]\n        if not len(new_locs):\n            raise AssertionError('invalid slicing for a 1-ndim ExtensionArray', slicer)\n        slicer = slice(None)\n    return self.values[slicer]",
    "docstring": "Return a slice of my values. Parameters ---------- slicer : slice, ndarray[int], or ndarray[bool] Valid (non-reducing) indexer for self.values. Returns ------- ExtensionArray",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\blocks.py",
    "ast_data": "FunctionDef name:_slice arg:self arg:slicer arguments arg arg If Compare If Call Raise Call Assign Call If Call Raise Call Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "get_library_dirs",
    "source_code": "@functools.lru_cache(maxsize=128)\ndef get_library_dirs(self):\n    opt = FCompiler.get_library_dirs(self)\n    flang_dir = dirname(self.executables['compiler_f77'][0])\n    opt.append(normpath(join(flang_dir, '..', 'lib')))\n    return opt",
    "docstring": "List of compiler library directories.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\fcompiler\\pg.py",
    "ast_data": "FunctionDef name:get_library_dirs arg:self arguments arg Assign Call Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "print_value_stack",
    "source_code": "def print_value_stack(self, *, file=None, stacklevel=0):\n    tx = self.__get_tx(stacklevel)\n    for s in tx.stack:\n        print(f'- {s.debug_repr()}', file=file)",
    "docstring": "Print the current Python value stack. Note that this is NOT the same as the traceback; use print_bt() to print that. Note that at stacklevel=0, this will typically be empty, as comptime cannot currently be used in an expression context where there would be intermediates on the stack. If you would find this useful, please file a bug at NB: Stack grows downwards in our print",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\comptime.py",
    "ast_data": "FunctionDef name:print_value_stack arg:self arguments arg arg arg Assign Call For Call Call"
  },
  {
    "library": "kornia",
    "name": "SequentialOpsInterface",
    "source_code": "class SequentialOpsInterface(Generic[T], metaclass=ABCMeta):\n\n    @classmethod\n    def get_instance_module_param(cls, param: ParamItem) -> Dict[str, Tensor]:\n        if isinstance(param, ParamItem) and isinstance(param.data, dict):\n            _params = param.data\n        else:\n            raise TypeError(f'Expected param (ParamItem.data) be a dictionary. Gotcha {param}.')\n        return _params\n\n    @classmethod\n    def get_sequential_module_param(cls, param: ParamItem) -> List[ParamItem]:\n        if isinstance(param, ParamItem) and isinstance(param.data, list):\n            _params = param.data\n        else:\n            raise TypeError(f'Expected param (ParamItem.data) be a list. Gotcha {param}.')\n        return _params\n\n    @classmethod\n    @abstractmethod\n    def transform(cls, input: T, module: Module, param: ParamItem, extra_args: Optional[Dict[str, Any]]=None) -> T:\n        raise NotImplementedError\n\n    @classmethod\n    @abstractmethod\n    def inverse(cls, input: T, module: Module, param: ParamItem, extra_args: Optional[Dict[str, Any]]=None) -> T:\n        raise NotImplementedError",
    "docstring": "Abstract interface for applying and inversing transformations.",
    "type": "class",
    "file_path": "kornia\\kornia\\augmentation\\container\\ops.py",
    "ast_data": "ClassDef name:SequentialOpsInterface FunctionDef name:get_instance_module_param arg:cls arg:param arguments arg arg If BoolOp Call Call Assign Raise Call Return return:yes FunctionDef name:get_sequential_module_param arg:cls arg:param arguments arg arg If BoolOp Call Call Assign Raise Call Return return:yes FunctionDef name:transform arg:cls arg:input arg:module arg:param arg:extra_args arguments arg arg arg arg arg Raise FunctionDef name:inverse arg:cls arg:input arg:module arg:param arg:extra_args arguments arg arg arg arg arg Raise"
  },
  {
    "library": "scikit-learn",
    "name": "predict_proba",
    "source_code": "def predict_proba(self, X):\n    check_is_fitted(self)\n    latent_mean, latent_var = self.latent_mean_and_variance(X)\n    alpha = 1 / (2 * latent_var)\n    gamma = LAMBDAS * latent_mean\n    integrals = np.sqrt(np.pi / alpha) * erf(gamma * np.sqrt(alpha / (alpha + LAMBDAS ** 2))) / (2 * np.sqrt(latent_var * 2 * np.pi))\n    pi_star = (COEFS * integrals).sum(axis=0) + 0.5 * COEFS.sum()\n    return np.vstack((1 - pi_star, pi_star)).T",
    "docstring": "Return probability estimates for the test vector X. Parameters ---------- X : array-like of shape (n_samples, n_features) or list of object Query points where the GP is evaluated for classification. Returns ------- C : array-like of shape (n_samples, n_classes) Returns the probability of the samples for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute ``.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\_gpc.py",
    "ast_data": "FunctionDef name:predict_proba arg:self arg:X arguments arg arg Call Assign Call Assign Assign Assign Call Call Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "counts",
    "source_code": "def counts(self, *, denoise: bool=False) -> int:\n    stats = self.stmt_exclusive_stats\n    return (stats.denoise() if denoise else stats).sum()",
    "docstring": "Returns the total number of instructions executed. See for an explanation of the arg.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\benchmark\\utils\\valgrind_wrapper\\timer_interface.py",
    "ast_data": "FunctionDef name:counts arg:self arguments arg arg Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "assert_no_leak_if_all_possibly_except_one",
    "source_code": "@trace.trace_wrapper\ndef assert_no_leak_if_all_possibly_except_one(self):\n    self._python_memory_checker.assert_no_leak_if_all_possibly_except_one()",
    "docstring": "Raises an exception if a leak is detected. This algorithm classifies a series of allocations as a leak if it's the same type(Python) or it happens at the same stack trace(C++) at every snapshot, but possibly except one snapshot.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\memory_checker.py",
    "ast_data": "FunctionDef name:assert_no_leak_if_all_possibly_except_one arg:self arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "_get_observer_from_activation_post_process",
    "source_code": "def _get_observer_from_activation_post_process(activation_post_process: Union[ObserverBase, FakeQuantizeBase]) -> ObserverBase:\n    if isinstance(activation_post_process, ObserverBase):\n        return activation_post_process\n    else:\n        assert isinstance(activation_post_process, FakeQuantizeBase)\n        return activation_post_process.activation_post_process",
    "docstring": "If is an observer, return the observer. If is a fake quantize, return the internal observer.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\utils.py",
    "ast_data": "FunctionDef name:_get_observer_from_activation_post_process arg:activation_post_process arguments arg If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_matches_version",
    "source_code": "def _matches_version(actual_version, required_version):\n    if actual_version is None:\n        return False\n    actual_version = actual_version.strip()\n    required_version = required_version.strip()\n    return actual_version.startswith(required_version)",
    "docstring": "Checks whether some version meets the requirements. All elements of the required_version need to be present in the actual_version. required_version actual_version result ----------------------------------------- 1 1.1 True 1.2 1 False 1.2 1.3 False 1 True Args: required_version: The version specified by the user. actual_version: The version detected from the CUDA installation. Returns: Whether the actual version matches the required one.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\third_party\\gpus\\find_cuda_config.py",
    "ast_data": "FunctionDef name:_matches_version arg:actual_version arg:required_version arguments arg arg If Compare Return return:yes Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "produce_guards_expression",
    "source_code": "def produce_guards_expression(self, placeholders: Sequence[Union[SymInt, FakeTensor]], *, guards: Optional[list[ShapeGuard]]=None, ignore_static: bool=True) -> Optional[str]:\n    from torch._dynamo.source import LocalSource\n    arg_names = [f't{i}' for i in range(len(placeholders))]\n    produced_guards = self.produce_guards(placeholders, [LocalSource(a) for a in arg_names], guards=guards, ignore_static=ignore_static)\n    if produced_guards:\n        return ' and '.join(produced_guards)\n    return None",
    "docstring": "Expected to be used with evaluate_guards_expression(). Produces the guards for the given placeholders and returns a string expression to be evaluated by evaluate_guards_expression given concrete values for the placeholders.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:produce_guards_expression arg:self arg:placeholders arguments arg arg arg arg Assign Call Call Assign Call Call If Return return:yes Call Return return:no"
  },
  {
    "library": "matplotlib",
    "name": "get_dash_joinstyle",
    "source_code": "def get_dash_joinstyle(self):\n    return self._dashjoinstyle.name",
    "docstring": "Return the for dashed lines. See also .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:get_dash_joinstyle arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "y",
    "source_code": "@property\ndef y(self):\n    return self._cs.getOrdinate(1, 0)",
    "docstring": "Return the Y component of the Point.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\point.py",
    "ast_data": "FunctionDef name:y arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "run",
    "source_code": "def run():\n    if not _SMCLI_INPUTS.value and (not _SMCLI_INPUT_EXPRS.value) and (not _SMCLI_INPUT_EXAMPLES.value):\n        raise AttributeError('At least one of --inputs, --input_exprs or --input_examples must be required')\n    tensor_key_feed_dict = load_inputs_from_input_arg_string(_SMCLI_INPUTS.value, _SMCLI_INPUT_EXPRS.value, _SMCLI_INPUT_EXAMPLES.value)\n    run_saved_model_with_feed_dict(_SMCLI_DIR.value, _SMCLI_TAG_SET.value, _SMCLI_SIGNATURE_DEF.value, tensor_key_feed_dict, _SMCLI_OUTDIR.value, _SMCLI_OVERWRITE.value, worker=_SMCLI_WORKER.value, init_tpu=_SMCLI_INIT_TPU.value, use_tfrt=_SMCLI_USE_TFRT.value, tf_debug=_SMCLI_TF_DEBUG.value)",
    "docstring": "Function triggered by run command. Raises: AttributeError: An error when neither --inputs nor --input_exprs is passed to run command.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\saved_model_cli.py",
    "ast_data": "FunctionDef name:run arguments If BoolOp Raise Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "_track_obj",
    "source_code": "def _track_obj(self, item: Any, variable: VariableTracker, mutation_type_cls=ValueMutationExisting):\n    if id(item) in self.id_to_variable:\n        raise AssertionError(f'{variable} is already tracked for mutation. This could be because you are not using VariableBuilder to construct the variable tracker. Source of new object: {variable.source}. Source of previously tracked object: {self.id_to_variable[id(item)].source}.')\n    variable.mutation_type = mutation_type_cls()\n    self.id_to_variable[id(item)] = variable\n    self.keepalive.append(item)\n    return variable",
    "docstring": "Start tracking an existing or new variable for mutation",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\side_effects.py",
    "ast_data": "FunctionDef name:_track_obj arg:self arg:item arg:variable arg:mutation_type_cls arguments arg arg arg arg If Compare Call Raise Call Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_sketch_params",
    "source_code": "def get_sketch_params(self):\n    return self._sketch",
    "docstring": "Return the sketch parameters for the artist. Returns ------- tuple or A 3-tuple with the following elements: * `None` if no sketch parameters were set.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:get_sketch_params arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "password_changed",
    "source_code": "def password_changed(password, user=None, password_validators=None):\n    if password_validators is None:\n        password_validators = get_default_password_validators()\n    for validator in password_validators:\n        password_changed = getattr(validator, 'password_changed', lambda *a: None)\n        password_changed(password, user)",
    "docstring": "Inform all validators that have implemented a password_changed() method that the password has been changed.",
    "type": "function",
    "file_path": "django\\django\\contrib\\auth\\password_validation.py",
    "ast_data": "FunctionDef name:password_changed arg:password arg:user arg:password_validators arguments arg arg arg If Compare Assign Call For Assign Call arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "apply",
    "source_code": "def apply(self, fn: Callable[[nn.Module], None]) -> 'FullyShardedDataParallel':\n    uninitialized = self._is_root is None\n    self._assert_state(TrainingState.IDLE)\n    with _unshard_params_for_summon(self, self, writeback=True, rank0_only=False, offload_to_cpu=False, with_grads=False):\n        ret = super().apply(fn)\n    if uninitialized and self._is_root:\n        for module in traversal_utils._get_fsdp_states(self):\n            module._reset_lazy_init()\n    return ret",
    "docstring": "Apply `nn-init-docModule` -> None): function to be applied to each submodule Returns: Module: self",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\fully_sharded_data_parallel.py",
    "ast_data": "FunctionDef name:apply arg:self arg:fn arguments arg arg Assign Compare Call With Call Assign Call Call If BoolOp For Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "leggrid3d",
    "source_code": "def leggrid3d(x, y, z, c):\n    return pu._gridnd(legval, c, x, y, z)",
    "docstring": "Evaluate a 3-D Legendre series on the Cartesian product of x, y, and z. This function returns the values: .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c) where the points `axbyczxyzxyzxyzccxyzxyzcxy`. See Also -------- legval, legval2d, leggrid2d, legval3d",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\legendre.py",
    "ast_data": "FunctionDef name:leggrid3d arg:x arg:y arg:z arg:c arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "AggregateIndexedSlicesGradients",
    "source_code": "def AggregateIndexedSlicesGradients(grads):\n    if len(grads) < 1:\n        return None\n    if len(grads) == 1:\n        return grads[0]\n    grads = [g for g in grads if g is not None]\n    if any((isinstance(g, tensor_lib.Tensor) for g in grads)):\n        return math_ops.add_n(grads)\n    grads = math_ops._as_indexed_slices_list(grads)\n    grads = [FlattenNestedIndexedSlices(x) for x in grads]\n    concat_grad = indexed_slices.IndexedSlices(array_ops.concat([x.values for x in grads], axis=0), array_ops.concat([x.indices for x in grads], axis=0), grads[0].dense_shape)\n    return concat_grad",
    "docstring": "Aggregates gradients containing s.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\backprop_util.py",
    "ast_data": "FunctionDef name:AggregateIndexedSlicesGradients arg:grads arguments arg If Compare Call Return return:no If Compare Call Return return:yes Assign Compare If Call Call Return return:yes Call Assign Call Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_alpha",
    "source_code": "def set_alpha(self, alpha):\n    artist.Artist._set_alpha_for_array(self, alpha)\n    self._set_facecolor(self._original_facecolor)\n    self._set_edgecolor(self._original_edgecolor)\n    self._set_hatchcolor(self._original_hatchcolor)",
    "docstring": "Set the transparency of the collection. Parameters ---------- alpha : float or array of float or None If not None, *alpha* values must be between 0 and 1, inclusive. If an array is provided, its length must match the number of elements in the collection. Masked values and nans are not supported.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:set_alpha arg:self arg:alpha arguments arg arg Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_Symmetric",
    "source_code": "class _Symmetric(_Square):\n\n    def check(self, value):\n        square_check = super().check(value)\n        if not square_check.all():\n            return square_check\n        return torch.isclose(value, value.mT, atol=1e-06).all(-2).all(-1)",
    "docstring": "Constrain to Symmetric square matrices.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributions\\constraints.py",
    "ast_data": "ClassDef name:_Symmetric FunctionDef name:check arg:self arg:value arguments arg arg Assign Call Call If Call Return return:yes Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "_unicode_ci_compare",
    "source_code": "def _unicode_ci_compare(s1, s2):\n    return unicodedata.normalize('NFKC', s1).casefold() == unicodedata.normalize('NFKC', s2).casefold()",
    "docstring": "Perform case-insensitive comparison of two identifiers, using the recommended algorithm from Unicode Technical Report 36, section 2.11.2(B)(2).",
    "type": "function",
    "file_path": "django\\django\\contrib\\auth\\forms.py",
    "ast_data": "FunctionDef name:_unicode_ci_compare arg:s1 arg:s2 arguments arg arg Return return:yes Compare Call Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "SearchLanguage",
    "source_code": "class SearchLanguage:\n    lang: str = ''\n    language_name: str = ''\n    stopwords: Set[str] = frozenset()\n    js_splitter_code: str = ''\n    js_stemmer_rawcode: str = ''\n    js_stemmer_code = '\\n/**\\n * Dummy stemmer for languages without stemming rules.\\n */\\nvar Stemmer = function () {\\n  this.stemWord = function (w) {\\n    return w;\\n  };\\n};\\n'\n    _word_re = re.compile('\\\\w+')\n\n    def __init__(self, options: dict[str, str]) -> None:\n        self.options = options\n\n    def split(self, input: str) -> list[str]:\n        return self._word_re.findall(input)\n\n    def stem(self, word: str) -> str:\n        return word\n\n    def word_filter(self, word: str) -> bool:\n        return not word.isdigit() and word not in self.stopwords",
    "docstring": "This class is the base class for search natural language preprocessors. If you want to add support for a new language, you should override the methods of this class. You should override class property too (e.g. 'en', 'fr' and so on). .. attribute:: stopwords This is a set of stop words of the target language. Default is empty. This word is used for building index and embedded in JS. .. attribute:: js_splitter_code Return splitter function of JavaScript version. The function should be named as `` method. This string is embedded as-is in searchtools.js. This class is used to preprocess search word which Sphinx HTML readers type, before searching index. Default implementation does nothing.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\search\\__init__.py",
    "ast_data": "ClassDef name:SearchLanguage Call Assign Assign Call FunctionDef name:__init__ arg:self arg:options arguments arg arg Assign FunctionDef name:split arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:stem arg:self arg:word arguments arg arg Return return:yes FunctionDef name:word_filter arg:self arg:word arguments arg arg Return return:yes BoolOp Call Compare"
  },
  {
    "library": "django",
    "name": "TagHelperNode",
    "source_code": "class TagHelperNode(Node):\n\n    def __init__(self, func, takes_context, args, kwargs):\n        self.func = func\n        self.takes_context = takes_context\n        self.args = args\n        self.kwargs = kwargs\n\n    def get_resolved_arguments(self, context):\n        resolved_args = [var.resolve(context) for var in self.args]\n        if self.takes_context:\n            resolved_args = [context, *resolved_args]\n        resolved_kwargs = {k: v.resolve(context) for k, v in self.kwargs.items()}\n        return (resolved_args, resolved_kwargs)",
    "docstring": "Base class for tag helper nodes such as SimpleNode and InclusionNode. Manages the positional and keyword arguments to be passed to the decorated function.",
    "type": "class",
    "file_path": "django\\django\\template\\library.py",
    "ast_data": "ClassDef name:TagHelperNode FunctionDef name:__init__ arg:self arg:func arg:takes_context arg:args arg:kwargs arguments arg arg arg arg arg Assign Assign Assign Assign FunctionDef name:get_resolved_arguments arg:self arg:context arguments arg arg Assign Call If Assign Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "create_unspecified_symbol",
    "source_code": "@record_shapeenv_event()\ndef create_unspecified_symbol(self, val: Union[int, SymInt, float, SymFloat], source: Source, dynamic_dim: DimDynamic=DimDynamic.DUCK, constraint_dim: DimConstraint=None, symbolic_context: Optional[StatelessSymbolicContext]=None) -> sympy.Expr:\n    return self.create_symbol(val, source, dynamic_dim, constraint_dim, positive=None, do_not_specialize_zero_one=True, symbolic_context=symbolic_context)",
    "docstring": "Create a symbol with an unspecified value Compared to standard symbols we do not assume the value is positive, nor do we specialze on zero or one values.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:create_unspecified_symbol arg:self arg:val arg:source arg:dynamic_dim arg:constraint_dim arg:symbolic_context arguments arg arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_reformat_reports_for_visualizer",
    "source_code": "def _reformat_reports_for_visualizer(self) -> OrderedDict:\n    module_fqns_to_features: dict[str, dict] = {}\n    for report_name in self._generated_reports:\n        module_info = self._generated_reports[report_name]\n        for module_fqn in module_info:\n            if module_fqn in module_fqns_to_features:\n                new_info: dict = module_info[module_fqn]\n                present_info: dict = module_fqns_to_features[module_fqn]\n                if self._is_same_info_for_same_key(new_info, present_info):\n                    module_fqns_to_features[module_fqn] = {**new_info, **present_info}\n                else:\n                    error_str = 'You have the same key with different values across detectors. '\n                    error_str += 'Someone incorrectly implemented a detector with conflicting keys to existing detectors.'\n                    raise ValueError(error_str)\n            else:\n                module_fqns_to_features[module_fqn] = module_info[module_fqn]\n    features_by_module: OrderedDict[str, dict] = OrderedDict()\n    for fqn, _module in self._model.named_modules():\n        if fqn in module_fqns_to_features:\n            features_by_module[fqn] = module_fqns_to_features[fqn]\n    return features_by_module",
    "docstring": "Takes the generated reports and reformats them into the format that is desired by the ModelReportVisualizer Returns an OrderedDict mapping module_fqns to their features",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\model_report.py",
    "ast_data": "FunctionDef name:_reformat_reports_for_visualizer arg:self arguments arg For Assign For If Compare If Call Assign Assign Raise Call Assign Call For Call If Compare Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "InvalidIndexError",
    "source_code": "class InvalidIndexError(Exception):\n    pass",
    "docstring": "Exception raised when attempting to use an invalid index key. This exception is triggered when a user attempts to access or manipulate data in a pandas DataFrame or Series using an index key that is not valid for the given object. This may occur in cases such as using a malformed slice, a mismatched key for a ``, or attempting to access an index element that does not exist. See Also -------- MultiIndex : A multi-level, or hierarchical, index object for pandas objects. Examples -------- >>> idx = pd.MultiIndex.from_product([[\"x\", \"y\"], [0, 1]]) >>> df = pd.DataFrame([[1, 1, 2, 2], [3, 3, 4, 4]], columns=idx) >>> df x y 0 1 0 1 0 1 1 2 2 1 3 3 4 4 >>> df[:, 0] Traceback (most recent call last): InvalidIndexError: (slice(None, None, None), 0)",
    "type": "class",
    "file_path": "pandas\\pandas\\errors\\__init__.py",
    "ast_data": "ClassDef name:InvalidIndexError"
  },
  {
    "library": "tensorflow",
    "name": "_get_gcc_major_version",
    "source_code": "def _get_gcc_major_version(path_to_gcc: str) -> int:\n    logging.info('Running echo __GNUC__ | %s -E -P -', path_to_gcc)\n    gcc_version_proc = subprocess.run([path_to_gcc, '-E', '-P', '-'], input='__GNUC__', check=True, capture_output=True, text=True)\n    major_version = int(gcc_version_proc.stdout)\n    logging.info('%s reports major version %s.', path_to_gcc, major_version)\n    return major_version",
    "docstring": "Gets the major version of the gcc at . Args: path_to_gcc: Path to a gcc executable Returns: The major version.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\build_tools\\configure\\configure.py",
    "ast_data": "FunctionDef name:_get_gcc_major_version arg:path_to_gcc arguments arg Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "register_buffer",
    "source_code": "def register_buffer(self, name: str, tensor: Optional[Tensor], persistent: bool=True) -> None:\n    if persistent is False and isinstance(self, torch.jit.ScriptModule):\n        raise RuntimeError('ScriptModule does not support non-persistent buffers')\n    if '_buffers' not in self.__dict__:\n        raise AttributeError('cannot assign buffer before Module.__init__() call')\n    elif not isinstance(name, str):\n        raise TypeError(f'buffer name should be a string. Got {torch.typename(name)}')\n    elif '.' in name:\n        raise KeyError('buffer name can\\'t contain \".\"')\n    elif name == '':\n        raise KeyError('buffer name can\\'t be empty string \"\"')\n    elif hasattr(self, name) and name not in self._buffers:\n        raise KeyError(f\"attribute '{name}' already exists\")\n    elif tensor is not None and (not isinstance(tensor, torch.Tensor)):\n        raise TypeError(f\"cannot assign '{torch.typename(tensor)}' object to buffer '{name}' (torch Tensor or None required)\")\n    else:\n        for hook in _global_buffer_registration_hooks.values():\n            output = hook(self, name, tensor)\n            if output is not None:\n                tensor = output\n        self._buffers[name] = tensor\n        if persistent:\n            self._non_persistent_buffers_set.discard(name)\n        else:\n            self._non_persistent_buffers_set.add(name)",
    "docstring": "Add a buffer to the module. This is typically used to register a buffer that should not be considered a model parameter. For example, BatchNorm's `persistentstate_dictcudastate_dictstate_dict`. Example:: >>> # xdoctest: +SKIP(\"undefined vars\") >>> self.register_buffer('running_mean', torch.zeros(num_features))",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:register_buffer arg:self arg:name arg:tensor arg:persistent arguments arg arg arg arg If BoolOp Compare Call Raise Call If Compare Raise Call If Call Raise Call Call If Compare Raise Call If Compare Raise Call If BoolOp Call Compare Raise Call If BoolOp Compare Call Raise Call Call For Call Assign Call If Compare Assign Assign If Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_stage_module",
    "source_code": "def get_stage_module(self, stage_idx: int) -> torch.nn.Module:\n    if stage_idx < 0 or stage_idx >= self.num_stages:\n        raise ValueError(f'Invalid stage index {stage_idx}!')\n    return getattr(self.split_gm, f'submod_{stage_idx}')",
    "docstring": "Return a stage module corresponding to of the .",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\_IR.py",
    "ast_data": "FunctionDef name:get_stage_module arg:self arg:stage_idx arguments arg arg If BoolOp Compare Compare Raise Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X, return_std=False):\n    check_is_fitted(self)\n    n_samples = _num_samples(X)\n    y = np.full((n_samples, self.n_outputs_), self.constant_, dtype=np.array(self.constant_).dtype)\n    y_std = np.zeros((n_samples, self.n_outputs_))\n    if self.n_outputs_ == 1:\n        y = np.ravel(y)\n        y_std = np.ravel(y_std)\n    return (y, y_std) if return_std else y",
    "docstring": "Perform classification on test vectors X. Parameters ---------- X : array-like of shape (n_samples, n_features) Test data. return_std : bool, default=False Whether to return the standard deviation of posterior prediction. All zeros in this case. .. versionadded:: 0.20 Returns ------- y : array-like of shape (n_samples,) or (n_samples, n_outputs) Predicted target values for X. y_std : array-like of shape (n_samples,) or (n_samples, n_outputs) Standard deviation of predictive distribution of query points.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\dummy.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arg:return_std arguments arg arg arg Call Assign Call Assign Call Call Assign Call If Compare Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "name_scope_v1",
    "source_code": "@tf_export(v1=['name_scope'])\nclass name_scope_v1(contextlib.AbstractContextManager[Optional[str]]):\n    __slots__ = ['_name', '_name_scope']\n\n    @property\n    def name(self):\n        return self._name\n\n    def __init__(self, name, default_name=None, values=None) -> None:\n        self._name_scope = name_scope(name, default_name, values, skip_on_eager=False)\n        self._name = default_name if name is None else name\n\n    def __enter__(self) -> Optional[str]:\n        return self._name_scope.__enter__()\n\n    def __exit__(self, *exc_info) -> Optional[bool]:\n        return self._name_scope.__exit__(*exc_info)",
    "docstring": "A context manager for use when defining a Python op. This context manager validates that the given are from the same graph, makes that graph the default graph, and pushes a name scope in that graph (see for more details on that). For example, to define a new Python op called :",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "ClassDef name:name_scope_v1 Assign FunctionDef name:name arg:self arguments arg Return return:yes FunctionDef name:__init__ arg:self arg:name arg:default_name arg:values arguments arg arg arg arg Assign Call Assign Compare FunctionDef name:__enter__ arg:self arguments arg Return return:yes Call FunctionDef name:__exit__ arg:self arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "rewrap",
    "source_code": "def rewrap(decorator_func, previous_target, new_target):\n    cur = decorator_func\n    innermost_decorator = None\n    target = None\n    while _has_tf_decorator_attr(cur):\n        innermost_decorator = cur\n        target = getattr(cur, '_tf_decorator')\n        if target.decorated_target is previous_target:\n            break\n        cur = target.decorated_target\n        assert cur is not None\n    if innermost_decorator is None:\n        assert decorator_func is previous_target\n        return new_target\n    target.decorated_target = new_target\n    if inspect.ismethod(innermost_decorator):\n        if hasattr(innermost_decorator, '__func__'):\n            innermost_decorator.__func__.__wrapped__ = new_target\n        elif hasattr(innermost_decorator, 'im_func'):\n            innermost_decorator.im_func.__wrapped__ = new_target\n        else:\n            innermost_decorator.__wrapped__ = new_target\n    else:\n        innermost_decorator.__wrapped__ = new_target\n    return decorator_func",
    "docstring": "Injects a new target into a function built by make_decorator. This function allows replacing a function wrapped by , assuming the decorator that wraps the function is written as described below. The decorator function must use instead of the wrapped function that is normally used: Example: # Instead of this: def simple_parametrized_wrapper(*args, **kwds): return wrapped_fn(*args, **kwds) tf_decorator.make_decorator(simple_parametrized_wrapper, wrapped_fn) # Write this: def simple_parametrized_wrapper(*args, **kwds): return simple_parametrized_wrapper.__wrapped__(*args, **kwds) tf_decorator.make_decorator(simple_parametrized_wrapper, wrapped_fn) Note that this process modifies decorator_func. Args: decorator_func: Callable returned by . previous_target: Callable that needs to be replaced. new_target: Callable to replace previous_target with. Returns: The updated decorator. If decorator_func is not a tf_decorator, new_target is returned.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\tf_decorator.py",
    "ast_data": "FunctionDef name:rewrap arg:decorator_func arg:previous_target arg:new_target arguments arg arg arg Assign Assign Assign While Call Assign Assign Call If Compare Assign Compare If Compare Compare Return return:yes Assign If Call If Call Assign If Call Assign Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_extended_shape",
    "source_code": "def _extended_shape(self, sample_shape: _size=torch.Size()) -> torch.Size:\n    if not isinstance(sample_shape, torch.Size):\n        sample_shape = torch.Size(sample_shape)\n    return torch.Size(sample_shape + self._batch_shape + self._event_shape)",
    "docstring": "Returns the size of the sample returned by the distribution, given a . Note, that the batch and event shapes of a distribution instance are fixed at the time of construction. If this is empty, the returned shape is upcast to (1,). Args: sample_shape (torch.Size): the size of the sample to be drawn.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:_extended_shape arg:self arg:sample_shape arguments arg arg Call If Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_bbox_patch",
    "source_code": "def get_bbox_patch(self):\n    return self._bbox_patch",
    "docstring": "Return the bbox Patch, or None if the is not made. For more details see .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:get_bbox_patch arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "determinant",
    "source_code": "def determinant(self, name='det'):\n    if self.is_square is False:\n        raise NotImplementedError('Determinant not implemented for an operator that is expected to not be square.')\n    with self._name_scope(name):\n        return self._determinant()",
    "docstring": "Determinant for every batch member. Args: name: A name for this . Returns: with shape and same as . Raises: NotImplementedError: If is .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "FunctionDef name:determinant arg:self arg:name arguments arg arg If Compare Raise Call With Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "CommandParser",
    "source_code": "class CommandParser(ArgumentParser):\n\n    def __init__(self, *, missing_args_message=None, called_from_command_line=None, **kwargs):\n        self.missing_args_message = missing_args_message\n        self.called_from_command_line = called_from_command_line\n        super().__init__(**kwargs)\n\n    def parse_args(self, args=None, namespace=None):\n        if self.missing_args_message and (not (args or any((not arg.startswith('-') for arg in args)))):\n            self.error(self.missing_args_message)\n        return super().parse_args(args, namespace)\n\n    def error(self, message):\n        if self.called_from_command_line:\n            super().error(message)\n        else:\n            raise CommandError('Error: %s' % message)\n\n    def add_subparsers(self, **kwargs):\n        parser_class = kwargs.get('parser_class', type(self))\n        if issubclass(parser_class, CommandParser):\n            kwargs['parser_class'] = partial(parser_class, called_from_command_line=self.called_from_command_line)\n        return super().add_subparsers(**kwargs)",
    "docstring": "Customized ArgumentParser class to improve some error messages and prevent SystemExit in several occasions, as SystemExit is unacceptable when a command is called programmatically.",
    "type": "class",
    "file_path": "django\\django\\core\\management\\base.py",
    "ast_data": "ClassDef name:CommandParser FunctionDef name:__init__ arg:self arguments arg arg arg arg Assign Assign Call Call FunctionDef name:parse_args arg:self arg:args arg:namespace arguments arg arg arg If BoolOp BoolOp Call Call Call Return return:yes Call Call FunctionDef name:error arg:self arg:message arguments arg arg If Call Call Raise Call FunctionDef name:add_subparsers arg:self arguments arg arg Assign Call Call If Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "get_distance",
    "source_code": "def get_distance(self, f, value, lookup_type):\n    if not value:\n        return []\n    value = value[0]\n    if isinstance(value, Distance):\n        if f.geodetic(self.connection):\n            if lookup_type == 'dwithin':\n                raise ValueError('Only numeric values of degree units are allowed on geographic DWithin queries.')\n            dist_param = value.m\n        else:\n            dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))\n    else:\n        dist_param = value\n    return [dist_param]",
    "docstring": "Return the distance parameters for the given geometry field, lookup value, and lookup type.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\spatialite\\operations.py",
    "ast_data": "FunctionDef name:get_distance arg:self arg:f arg:value arg:lookup_type arguments arg arg arg arg If Return return:no Assign If Call If Call If Compare Raise Call Assign Assign Call Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, node_def, op, message, *args):\n    super(CancelledError, self).__init__(node_def, op, message, CANCELLED, *args)",
    "docstring": "Creates a .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:node_def arg:op arg:message arguments arg arg arg arg arg Call Call"
  },
  {
    "library": "django",
    "name": "unref_alias",
    "source_code": "def unref_alias(self, alias, amount=1):\n    self.alias_refcount[alias] -= amount",
    "docstring": "Decreases the reference count for this alias.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\query.py",
    "ast_data": "FunctionDef name:unref_alias arg:self arg:alias arg:amount arguments arg arg arg"
  },
  {
    "library": "sphinx",
    "name": "PyCurrentModule",
    "source_code": "class PyCurrentModule(SphinxDirective):\n    has_content = False\n    required_arguments = 1\n    optional_arguments = 0\n    final_argument_whitespace = False\n    option_spec: ClassVar[OptionSpec] = {}\n\n    def run(self) -> list[Node]:\n        modname = self.arguments[0].strip()\n        if modname == 'None':\n            self.env.ref_context.pop('py:module', None)\n        else:\n            self.env.ref_context['py:module'] = modname\n        return []",
    "docstring": "This directive is just to tell Sphinx that we're documenting stuff in module foo, but links to module foo won't lead here.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\domains\\python\\__init__.py",
    "ast_data": "ClassDef name:PyCurrentModule Assign Assign Assign Assign FunctionDef name:run arg:self arguments arg Assign Call If Compare Call Assign Return return:no"
  },
  {
    "library": "matplotlib",
    "name": "tight_layout",
    "source_code": "def tight_layout(self, *, pad=1.08, h_pad=None, w_pad=None, rect=None):\n    engine = TightLayoutEngine(pad=pad, h_pad=h_pad, w_pad=w_pad, rect=rect)\n    try:\n        previous_engine = self.get_layout_engine()\n        self.set_layout_engine(engine)\n        engine.execute(self)\n        if previous_engine is not None and (not isinstance(previous_engine, (TightLayoutEngine, PlaceHolderLayoutEngine))):\n            _api.warn_external('The figure layout has changed to tight')\n    finally:\n        self.set_layout_engine('none')",
    "docstring": "Adjust the padding between and around subplots. To exclude an artist on the Axes from the bounding box calculation that determines the subplot parameters (i.e. legend, or annotation), set `` for that artist. Parameters ---------- pad : float, default: 1.08 Padding between the figure edge and the edges of subplots, as a fraction of the font size. h_pad, w_pad : float, default: *pad* Padding (height/width) between edges of adjacent subplots, as a fraction of the font size. rect : tuple (left, bottom, right, top), default: (0, 0, 1, 1) A rectangle in normalized figure coordinates into which the whole subplots area (including labels) will fit. See Also -------- .Figure.set_layout_engine .pyplot.tight_layout",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:tight_layout arg:self arguments arg arg arg arg arg Assign Call Try Assign Call Call Call If BoolOp Compare Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "data_ptr",
    "source_code": "def data_ptr(self) -> int:\n    return self._data_ptr",
    "docstring": "NB: returns the data ptr even if the storage has expired",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py",
    "ast_data": "FunctionDef name:data_ptr arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "close",
    "source_code": "def close(self):\n    self._file.write(b'\\\\end{document}\\\\n')\n    if self._n_figures > 0:\n        self._run_latex()\n    self._file.close()",
    "docstring": "Finalize this object, running LaTeX in a temporary directory and moving the final pdf file to *filename*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pgf.py",
    "ast_data": "FunctionDef name:close arg:self arguments arg Call If Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "python_version",
    "source_code": "def python_version(self):\n    python_version_path = '.data/python_version'\n    return self.zip_reader.get_record(python_version_path).decode('utf-8').strip() if self.zip_reader.has_record(python_version_path) else None",
    "docstring": "Returns the version of python that was used to create this package. Note: this function is experimental and not Forward Compatible. The plan is to move this into a lock file later on. Returns: :class: a python version e.g. 3.8.9 or None if no version was stored with this package",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\package_importer.py",
    "ast_data": "FunctionDef name:python_version arg:self arguments arg Assign Return return:yes Call Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "partial_fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef partial_fit(self, X, y):\n    if not hasattr(self, 'coef_'):\n        self._more_validate_params(for_partial_fit=True)\n    lr = 'pa1' if self.loss == 'epsilon_insensitive' else 'pa2'\n    return self._partial_fit(X, y, alpha=1.0, C=self.C, loss='epsilon_insensitive', learning_rate=lr, max_iter=1, sample_weight=None, coef_init=None, intercept_init=None)",
    "docstring": "Fit linear model with Passive Aggressive algorithm. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Subset of training data. y : numpy array of shape [n_samples] Subset of target values. Returns ------- self : object Fitted estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_passive_aggressive.py",
    "ast_data": "FunctionDef name:partial_fit arg:self arg:X arg:y arguments arg arg arg If Call Call Assign Compare Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "num_points",
    "source_code": "@property\ndef num_points(self):\n    return self.num_coords",
    "docstring": "Return the number points, or coordinates, in the Geometry.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:num_points arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_inputs_outputs",
    "source_code": "def get_inputs_outputs(signature_def):\n    inputs_tensor_info = signature_def.inputs\n    outputs_tensor_info = signature_def.outputs\n\n    def gather_names(tensor_info):\n        return [tensor_info[key].name for key in tensor_info]\n    inputs = gather_names(inputs_tensor_info)\n    outputs = gather_names(outputs_tensor_info)\n    return (inputs, outputs)",
    "docstring": "Get inputs and outputs from SignatureDef. Args: signature_def: SignatureDef in the meta_graph_def for conversion. Returns: The inputs and outputs in the graph for conversion.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\convert_saved_model.py",
    "ast_data": "FunctionDef name:get_inputs_outputs arg:signature_def arguments arg Assign Assign FunctionDef name:gather_names arg:tensor_info arguments arg Return return:yes Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "clone",
    "source_code": "def clone(self) -> 'inference_mode':\n    return self.__class__(self.mode)",
    "docstring": "Create a copy of this class",
    "type": "method",
    "file_path": "pytorch\\torch\\autograd\\grad_mode.py",
    "ast_data": "FunctionDef name:clone arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "create_small_matmul_savedmodel",
    "source_code": "def create_small_matmul_savedmodel(out_dir):\n    root = autotrackable.AutoTrackable()\n    root.f = def_function.function(lambda x, y: math_ops.matmul(x, y), input_signature=[tensor_spec.TensorSpec([3, 5], dtypes.float32), tensor_spec.TensorSpec([5, 4], dtypes.float32)])\n    root.f(x=array_ops.zeros((3, 5)), y=array_ops.zeros((5, 4)))\n    save_dir = os.path.join(out_dir, 'x_matmul_y_small')\n    save.save(root, save_dir, root.f)\n    with open(os.path.join(save_dir, 'variables', 'variables.index'), 'w'):\n        pass",
    "docstring": "Create a SavedModel that performs a small matmul.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\make_aot_compile_models.py",
    "ast_data": "FunctionDef name:create_small_matmul_savedmodel arg:out_dir arguments arg Assign Call Assign Call arguments arg arg Call Call Call Call Call Call Assign Call Call With Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_static_check_for_same_dimensions",
    "source_code": "def _static_check_for_same_dimensions(operators):\n    if len(operators) < 2:\n        return\n    domain_dimensions = [(op.name, tensor_shape.dimension_value(op.domain_dimension)) for op in operators if tensor_shape.dimension_value(op.domain_dimension) is not None]\n    if len(set((value for name, value in domain_dimensions))) > 1:\n        raise ValueError(f'All `operators` must have the same `domain_dimension`. Received: {domain_dimensions}.')\n    range_dimensions = [(op.name, tensor_shape.dimension_value(op.range_dimension)) for op in operators if tensor_shape.dimension_value(op.range_dimension) is not None]\n    if len(set((value for name, value in range_dimensions))) > 1:\n        raise ValueError(f'All operators must have the same `range_dimension`. Received: {range_dimensions}.')",
    "docstring": "ValueError if operators determined to have different dimensions.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_addition.py",
    "ast_data": "FunctionDef name:_static_check_for_same_dimensions arg:operators arguments arg If Compare Call Return return:no Assign Call Compare Call If Compare Call Call Raise Call Assign Call Compare Call If Compare Call Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "is_generator_or_sequence",
    "source_code": "def is_generator_or_sequence(x):\n    builtin_iterators = (str, list, tuple, dict, set, frozenset)\n    if isinstance(x, (tensor.Tensor, np.ndarray) + builtin_iterators):\n        return False\n    return tf_inspect.isgenerator(x) or isinstance(x, Sequence) or isinstance(x, typing.Iterator)",
    "docstring": "Check if is a Keras generator type.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\data_utils.py",
    "ast_data": "FunctionDef name:is_generator_or_sequence arg:x arguments arg Assign If Call Return return:yes Return return:yes BoolOp Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "scope_name",
    "source_code": "def scope_name():\n    return context().scope_name",
    "docstring": "Name of the current scope.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:scope_name arguments Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "ThemeError",
    "source_code": "class ThemeError(SphinxError):\n    category = 'Theme error'",
    "docstring": "Theme error.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\errors.py",
    "ast_data": "ClassDef name:ThemeError Assign"
  },
  {
    "library": "sphinx",
    "name": "is_builtin_classmethod_like",
    "source_code": "def is_builtin_classmethod_like(obj: Any, cls: Any=None, name: str | None=None) -> bool:\n    if is_classmethod_descriptor(obj, cls, name):\n        return True\n    if isbuiltin(obj) and getattr(obj, '__self__', None) is not None and isclass(obj.__self__):\n        return True\n    if cls and name:\n        sentinel = object()\n        for basecls in getmro(cls):\n            meth = basecls.__dict__.get(name, sentinel)\n            if meth is not sentinel:\n                return is_classmethod_descriptor(meth, None, None) or (isbuiltin(meth) and getattr(meth, '__self__', None) is not None and isclass(meth.__self__))\n    return False",
    "docstring": "Check if the object looks like a class method implemented in C. This is equivalent to test that *obj* is a class method descriptor or is a built-in object with a `` satisfies those properties for some parent class in *cls* MRO.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\inspect.py",
    "ast_data": "FunctionDef name:is_builtin_classmethod_like arg:obj arg:cls arg:name arguments arg arg arg If Call Return return:yes If BoolOp Call Compare Call Call Return return:yes If BoolOp Assign Call For Call Assign Call If Compare Return return:yes BoolOp Call BoolOp Call Compare Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "update_renames_v2",
    "source_code": "def update_renames_v2(output_file_path):\n    function_renames = collect_function_renames()\n    constant_renames = collect_constant_renames()\n    all_renames = function_renames.union(constant_renames)\n    manual_renames = all_renames_v2.manual_symbol_renames\n    rename_lines = [get_rename_line(name, canonical_name) for name, canonical_name in all_renames if 'tf.' + name not in manual_renames]\n    renames_file_text = '%srenames = {\\n%s\\n}\\n' % (_FILE_HEADER, ',\\n'.join(sorted(rename_lines)))\n    file_io.write_string_to_file(output_file_path, renames_file_text)",
    "docstring": "Writes a Python dictionary mapping deprecated to canonical API names. Args: output_file_path: File path to write output to. Any existing contents would be replaced.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\update\\generate_v2_renames_map.py",
    "ast_data": "FunctionDef name:update_renames_v2 arg:output_file_path arguments arg Assign Call Assign Call Assign Call Assign Assign Call Compare Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "update_checkpoint_state",
    "source_code": "@deprecation.deprecated(date=None, instructions='Use `tf.train.CheckpointManager` to manage checkpoints rather than manually editing the Checkpoint proto.')\n@tf_export(v1=['train.update_checkpoint_state'])\ndef update_checkpoint_state(save_dir, model_checkpoint_path, all_model_checkpoint_paths=None, latest_filename=None, all_model_checkpoint_timestamps=None, last_preserved_timestamp=None):\n    update_checkpoint_state_internal(save_dir=save_dir, model_checkpoint_path=model_checkpoint_path, all_model_checkpoint_paths=all_model_checkpoint_paths, latest_filename=latest_filename, save_relative_paths=False, all_model_checkpoint_timestamps=all_model_checkpoint_timestamps, last_preserved_timestamp=last_preserved_timestamp)",
    "docstring": "Updates the content of the 'checkpoint' file. This updates the checkpoint file containing a CheckpointState proto. Args: save_dir: Directory where the model was saved. model_checkpoint_path: The checkpoint file. all_model_checkpoint_paths: List of strings. Paths to all not-yet-deleted checkpoints, sorted from oldest to newest. If this is a non-empty list, the last element must be equal to model_checkpoint_path. These paths are also saved in the CheckpointState proto. latest_filename: Optional name of the checkpoint file. Default to 'checkpoint'. all_model_checkpoint_timestamps: Optional list of timestamps (floats, seconds since the Epoch) indicating when the checkpoints in were created. last_preserved_timestamp: A float, indicating the number of seconds since the Epoch when the last preserved checkpoint was written, e.g. due to a parameter (see for an implementation). Raises: RuntimeError: If any of the model checkpoint paths conflict with the file containing CheckpointSate.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint_management.py",
    "ast_data": "FunctionDef name:update_checkpoint_state arg:save_dir arg:model_checkpoint_path arg:all_model_checkpoint_paths arg:latest_filename arg:all_model_checkpoint_timestamps arg:last_preserved_timestamp arguments arg arg arg arg arg arg Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_import_kernel",
    "source_code": "def _import_kernel(self, code: str, kernel_name: str) -> CachingAutotuner:\n    module_code = '\\n'.join([self.prologue, code])\n    mod = PyCodeCache.load(module_code)\n    kernel = getattr(mod, kernel_name)\n    if not isinstance(kernel, CachingAutotuner):\n        raise NotImplementedError(textwrap.dedent(f'\\n                Unsupported type for kernel {kernel_name}: {type(kernel)}.\\n                FX conversion only supports Triton kernels.\\n            '))\n    return kernel",
    "docstring": "Imports a kernel from source, possibly autotuning block parameters.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\wrapper_fxir.py",
    "ast_data": "FunctionDef name:_import_kernel arg:self arg:code arg:kernel_name arguments arg arg arg Assign Call Assign Call Assign Call If Call Raise Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "CachedMetricsHelper",
    "source_code": "class CachedMetricsHelper:\n\n    def __init__(self) -> None:\n        self.cached_metrics = {}\n        for metric in get_metric_fields():\n            self.cached_metrics[metric] = globals()[metric]\n\n    def get_deltas(self) -> CachedMetricsDeltas:\n        delta_metrics = {}\n        for metric in get_metric_fields():\n            delta_metrics[metric] = globals()[metric] - self.cached_metrics[metric]\n        return CachedMetricsDeltas(**delta_metrics)\n\n    @staticmethod\n    def apply_deltas(delta: CachedMetricsDeltas) -> None:\n        for metric in get_metric_fields():\n            globals()[metric] += getattr(delta, metric)",
    "docstring": "A helper class to help calculate and apply counter deltas for those metrics we want to save with cache entries (e.g., FxGraphCache) and apply on a cache hit.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\metrics.py",
    "ast_data": "ClassDef name:CachedMetricsHelper FunctionDef name:__init__ arg:self arguments arg Assign For Call Assign Call FunctionDef name:get_deltas arg:self arguments arg Assign For Call Assign Call Return return:yes Call FunctionDef name:apply_deltas arg:delta arguments arg For Call Call Call"
  },
  {
    "library": "django",
    "name": "Node",
    "source_code": "@total_ordering\nclass Node:\n\n    def __init__(self, key):\n        self.key = key\n        self.children = set()\n        self.parents = set()\n\n    def __eq__(self, other):\n        return self.key == other\n\n    def __lt__(self, other):\n        return self.key < other\n\n    def __hash__(self):\n        return hash(self.key)\n\n    def __getitem__(self, item):\n        return self.key[item]\n\n    def __str__(self):\n        return str(self.key)\n\n    def __repr__(self):\n        return '<%s: (%r, %r)>' % (self.__class__.__name__, self.key[0], self.key[1])\n\n    def add_child(self, child):\n        self.children.add(child)\n\n    def add_parent(self, parent):\n        self.parents.add(parent)",
    "docstring": "A single node in the migration graph. Contains direct links to adjacent nodes in either direction.",
    "type": "class",
    "file_path": "django\\django\\db\\migrations\\graph.py",
    "ast_data": "ClassDef name:Node FunctionDef name:__init__ arg:self arg:key arguments arg arg Assign Assign Call Assign Call FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes Compare FunctionDef name:__lt__ arg:self arg:other arguments arg arg Return return:yes Compare FunctionDef name:__hash__ arg:self arguments arg Return return:yes Call FunctionDef name:__getitem__ arg:self arg:item arguments arg arg Return return:yes FunctionDef name:__str__ arg:self arguments arg Return return:yes Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:add_child arg:self arg:child arguments arg arg Call FunctionDef name:add_parent arg:self arg:parent arguments arg arg Call"
  },
  {
    "library": "scikit-learn",
    "name": "staged_score",
    "source_code": "def staged_score(self, X, y, sample_weight=None):\n    X = self._check_X(X)\n    for y_pred in self.staged_predict(X):\n        if is_classifier(self):\n            yield accuracy_score(y, y_pred, sample_weight=sample_weight)\n        else:\n            yield r2_score(y, y_pred, sample_weight=sample_weight)",
    "docstring": "Return staged scores for X, y. This generator method yields the ensemble score after each iteration of boosting and therefore allows monitoring, such as to determine the score on a test set after each boost. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. COO, DOK, and LIL are converted to CSR. y : array-like of shape (n_samples,) Labels for X. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Yields ------ z : float",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_weight_boosting.py",
    "ast_data": "FunctionDef name:staged_score arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg Assign Call For Call If Call Call Call"
  },
  {
    "library": "django",
    "name": "_get_ext_ring",
    "source_code": "def _get_ext_ring(self):\n    return self[0]",
    "docstring": "Get the exterior ring of the Polygon.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\polygon.py",
    "ast_data": "FunctionDef name:_get_ext_ring arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_width_char",
    "source_code": "def get_width_char(self, c, isord=False):\n    if not isord:\n        c = ord(c)\n    return self._metrics[c].width",
    "docstring": "Get the width of the character from the character metric WX field.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_afm.py",
    "ast_data": "FunctionDef name:get_width_char arg:self arg:c arg:isord arguments arg arg arg If Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "core",
    "source_code": "@tf_export(v1=['tpu.core'])\ndef core(num: int) -> Text:\n    return 'device:TPU_REPLICATED_CORE:{}'.format(num)",
    "docstring": "Returns the device name for a core in a replicated TPU computation. Args: num: the virtual core number within each replica to which operators should be assigned. Returns: A device name, suitable for passing to .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_name_util.py",
    "ast_data": "FunctionDef name:core arg:num arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_While",
    "source_code": "class _While(_FunctionCaller):\n\n    def __init__(self, node, function, enclosing_graph):\n        super(_While, self).__init__(node, function, enclosing_graph, first_function_input=0, type_attribute='T', function_attributes=['body', 'cond'])\n\n    def convert_variable_to_constant(self, incoming_edge, tensor_data):\n        super(_While, self).convert_variable_to_constant(incoming_edge, tensor_data)\n        node = self.converted_self()\n        if node.node.attr['output_shapes'].list.shape:\n            node.node.attr['output_shapes'].list.shape[incoming_edge.destination.index].CopyFrom(tensor_shape_pb2.TensorShapeProto(dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=dim) for dim in tensor_data.numpy.shape]))\n        body_name = self._node.attr['body'].func.name\n        body = self._enclosing_graph.functions[body_name].converted_self().function\n        body.signature.output_arg[incoming_edge.destination.index].type = tensor_data.dtype",
    "docstring": "Specialization of _Node to While-like operations.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "ClassDef name:_While FunctionDef name:__init__ arg:self arg:node arg:function arg:enclosing_graph arguments arg arg arg arg Call Call FunctionDef name:convert_variable_to_constant arg:self arg:incoming_edge arg:tensor_data arguments arg arg arg Call Call Assign Call If Call Call Call Assign Assign Call Assign"
  },
  {
    "library": "seaborn",
    "name": "_check_var_list_or_boolean",
    "source_code": "def _check_var_list_or_boolean(self, param: str, grouping_vars: Any) -> None:\n    value = getattr(self, param)\n    if not (isinstance(value, bool) or (isinstance(value, list) and all((isinstance(v, str) for v in value)))):\n        param_name = f'{self.__class__.__name__}.{param}'\n        raise TypeError(f'{param_name} must be a boolean or list of strings.')\n    self._check_grouping_vars(param, grouping_vars, stacklevel=3)",
    "docstring": "Do input checks on grouping parameters.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_stats\\density.py",
    "ast_data": "FunctionDef name:_check_var_list_or_boolean arg:self arg:param arg:grouping_vars arguments arg arg arg Assign Call If BoolOp Call BoolOp Call Call Call Assign Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "template",
    "source_code": "def template(num_stages, num_warps, triton_meta, num_consumer_groups=0, num_buffers_warp_spec=0, filename=None, inductor_meta=None):\n    config_args = {'num_stages': num_stages, 'num_warps': num_warps}\n    if HAS_WARP_SPEC:\n        config_args.update({'num_consumer_groups': num_consumer_groups, 'num_buffers_warp_spec': num_buffers_warp_spec})\n    return cached_autotune(None, [triton.Config({}, **config_args)], triton_meta=triton_meta, inductor_meta=inductor_meta, heuristic_type=HeuristicType.TEMPLATE, filename=filename)",
    "docstring": "Compile a triton template",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\triton_heuristics.py",
    "ast_data": "FunctionDef name:template arg:num_stages arg:num_warps arg:triton_meta arg:num_consumer_groups arg:num_buffers_warp_spec arg:filename arg:inductor_meta arguments arg arg arg arg arg arg arg Assign If Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_process_parameters",
    "source_code": "def _process_parameters(self, dim):\n    if dim is None or not np.isscalar(dim) or dim < 1 or (dim != int(dim)):\n        raise ValueError('Dimension of vector must be specified, and must be an integer greater than 0.')\n    return int(dim)",
    "docstring": "Dimension N must be specified; it cannot be inferred.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:_process_parameters arg:self arg:dim arguments arg arg If BoolOp Compare Call Compare Compare Call Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "update_non_slot",
    "source_code": "def update_non_slot(self, colocate_with, fn, args=(), kwargs=None, group=True):\n    _require_cross_replica_or_default_context_extended(self)\n    if kwargs is None:\n        kwargs = {}\n    fn = autograph.tf_convert(fn, autograph_ctx.control_status_ctx(), convert_by_default=False)\n    with self._container_strategy().scope():\n        return self._update_non_slot(colocate_with, fn, args, kwargs, group)",
    "docstring": "Runs on devices. Used to update non-slot variables. DEPRECATED: TF 1.x ONLY. Args: colocate_with: Devices returned by . fn: Function to execute. args: Tuple or list. Positional arguments to pass to . kwargs: Dict with keyword arguments to pass to . group: Boolean. Defaults to True. If False, the return value will be unwrapped. Returns: Return value of , possibly merged across devices.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:update_non_slot arg:self arg:colocate_with arg:fn arg:args arg:kwargs arg:group arguments arg arg arg arg arg arg Call If Compare Assign Assign Call Call With Call Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "all",
    "source_code": "def all(self, axis=None, out=None):\n    return N.ndarray.all(self, axis, out, keepdims=True)._collapse(axis)",
    "docstring": "Test whether all matrix elements along a given axis evaluate to True. Parameters ---------- See for complete descriptions See Also -------- numpy.all Notes ----- This is the same as , but it returns a object. Examples -------- >>> x = np.matrix(np.arange(12).reshape((3,4))); x matrix([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]]) >>> y = x[0]; y matrix([[0, 1, 2, 3]]) >>> (x == y) matrix([[ True, True, True, True], [False, False, False, False], [False, False, False, False]]) >>> (x == y).all() False >>> (x == y).all(0) matrix([[False, False, False, False]]) >>> (x == y).all(1) matrix([[ True], [False], [False]])",
    "type": "method",
    "file_path": "numpy\\numpy\\matrixlib\\defmatrix.py",
    "ast_data": "FunctionDef name:all arg:self arg:axis arg:out arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "for_update_sql",
    "source_code": "def for_update_sql(self, nowait=False, skip_locked=False, of=(), no_key=False):\n    return 'FOR%s UPDATE%s%s%s' % (' NO KEY' if no_key else '', ' OF %s' % ', '.join(of) if of else '', ' NOWAIT' if nowait else '', ' SKIP LOCKED' if skip_locked else '')",
    "docstring": "Return the FOR UPDATE SQL clause to lock rows for an update operation.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:for_update_sql arg:self arg:nowait arg:skip_locked arg:of arg:no_key arguments arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_logical_equal",
    "source_code": "def _logical_equal(x, y):\n    x_ = _static_value(x)\n    y_ = _static_value(y)\n    if x_ is None or y_ is None:\n        return math_ops.equal(x, y)\n    return constant_op.constant(np.array_equal(x_, y_))",
    "docstring": "Convenience function which attempts to statically compute .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\transformed_distribution.py",
    "ast_data": "FunctionDef name:_logical_equal arg:x arg:y arguments arg arg Assign Call Assign Call If BoolOp Compare Compare Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "hfftn",
    "source_code": "@_dispatch\ndef hfftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *, plan=None):\n    return (Dispatchable(x, np.ndarray),)",
    "docstring": "Compute the N-D FFT of Hermitian symmetric complex input, i.e., a signal with a real spectrum. This function computes the N-D discrete Fourier Transform for a Hermitian symmetric complex input over any number of axes in an M-D array by means of the Fast Fourier Transform (FFT). In other words, `irfftssslen(s)sfftxfft~scipy.fft.fftaxessxsssssaxesaxesxhfftns` assumes an even output length in the final transformation axis. When performing the final complex to real transformation, the Hermitian symmetry requires that the last imaginary component along that axis must be 0 and so it is ignored. To avoid losing information, the correct length of the real input *must* be given. Examples -------- >>> import scipy.fft >>> import numpy as np >>> x = np.ones((3, 2, 2)) >>> scipy.fft.hfftn(x) array([[[12., 0.], [ 0., 0.]], [[ 0., 0.], [ 0., 0.]], [[ 0., 0.], [ 0., 0.]]])",
    "type": "function",
    "file_path": "scipy\\scipy\\fft\\_basic.py",
    "ast_data": "FunctionDef name:hfftn arg:x arg:s arg:axes arg:norm arg:overwrite_x arg:workers arguments arg arg arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_languages_for_item",
    "source_code": "def get_languages_for_item(self, item):\n    return self._languages()",
    "docstring": "Languages for which this item is displayed.",
    "type": "method",
    "file_path": "django\\django\\contrib\\sitemaps\\__init__.py",
    "ast_data": "FunctionDef name:get_languages_for_item arg:self arg:item arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "trigger_tool",
    "source_code": "def trigger_tool(self, name):\n    self.toolmanager.trigger_tool(name, sender=self)",
    "docstring": "Trigger the tool. Parameters ---------- name : str Name (id) of the tool triggered from within the container.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:trigger_tool arg:self arg:name arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "__call__",
    "source_code": "def __call__(self, inputs, state, scope=None):\n    return self._call_wrapped_cell(inputs, state, cell_call_fn=self.cell.__call__, scope=scope)",
    "docstring": "Runs the RNN cell step computation. We assume that the wrapped RNNCell is being built within its method. We directly use the wrapped cell's in the overridden wrapper method. This allows to use the wrapped cell and the non-wrapped cell equivalently when using . Args: inputs: A tensor with wrapped cell's input. state: A tensor or tuple of tensors with wrapped cell's state. scope: VariableScope for the subgraph created in the wrapped cells' . Returns: A pair containing: - Output: A tensor with cell's output. - New state: A tensor or tuple of tensors with new wrapped cell's state.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\legacy_rnn\\rnn_cell_impl.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:inputs arg:state arg:scope arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, K, copy=True):\n    check_is_fitted(self)\n    xp, _ = get_namespace(K)\n    K = validate_data(self, K, copy=copy, force_writeable=True, dtype=_array_api.supported_float_dtypes(xp), reset=False)\n    K_pred_cols = (xp.sum(K, axis=1) / self.K_fit_rows_.shape[0])[:, None]\n    K -= self.K_fit_rows_\n    K -= K_pred_cols\n    K += self.K_fit_all_\n    return K",
    "docstring": "Center kernel matrix. Parameters ---------- K : ndarray of shape (n_samples1, n_samples2) Kernel matrix. copy : bool, default=True Set to False to perform inplace computation. Returns ------- K_new : ndarray of shape (n_samples1, n_samples2) Returns the instance itself.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py",
    "ast_data": "FunctionDef name:transform arg:self arg:K arg:copy arguments arg arg arg Call Assign Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "reset",
    "source_code": "def reset(self):\n    if np.any(self.val != self.valinit):\n        self.set_val(self.valinit)",
    "docstring": "Reset the slider to the initial value.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:reset arg:self arguments arg If Call Compare Call"
  },
  {
    "library": "kornia",
    "name": "perform_keep_shape_video",
    "source_code": "def perform_keep_shape_video(f: Callable[..., Tensor]) -> Callable[..., Tensor]:\n\n    @wraps(f)\n    def _wrapper(input: Tensor, *args: Any, **kwargs: Any) -> Tensor:\n        if not isinstance(input, Tensor):\n            raise TypeError(f'Input input type is not a Tensor. Got {type(input)}')\n        if input.numel() == 0:\n            raise ValueError('Invalid input tensor, it is empty.')\n        input_shape = input.shape\n        input = _to_bcdhw(input)\n        output = f(input, *args, **kwargs)\n        if len(input_shape) == 4:\n            output = output[0]\n        if len(input_shape) == 3:\n            output = output[0, 0]\n        if len(input_shape) > 5:\n            output = output.view(*input_shape[:-4] + output.shape[-4:])\n        return output\n    return _wrapper",
    "docstring": "Apply to an image of arbitrary leading dimensions . It works by first viewing the image as , applying the function and re-viewing the image as original shape.",
    "type": "function",
    "file_path": "kornia\\kornia\\utils\\image.py",
    "ast_data": "FunctionDef name:perform_keep_shape_video arg:f arguments arg FunctionDef name:_wrapper arg:input arguments arg arg arg If Call Raise Call Call If Compare Call Raise Call Assign Assign Call Assign Call If Compare Call Assign If Compare Call Assign If Compare Call Assign Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "name",
    "source_code": "@property\ndef name(self):\n    return self.key",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_get_param_names",
    "source_code": "def _get_param_names(self, *, method, return_alias, ignore_self_request):\n    res = set()\n    if self._self_request and (not ignore_self_request):\n        res = res.union(self._self_request._get_param_names(method=method, return_alias=return_alias))\n    for name, route_mapping in self._route_mappings.items():\n        for caller, callee in route_mapping.mapping:\n            if caller == method:\n                res = res.union(route_mapping.router._get_param_names(method=callee, return_alias=True, ignore_self_request=False))\n    return res",
    "docstring": "Get names of all metadata that can be consumed or routed by specified method. This method returns the names of all metadata, even the `selfselfself._self_request_route_params` has no effect. Returns ------- names : set of str A set of strings with the names of all parameters.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py",
    "ast_data": "FunctionDef name:_get_param_names arg:self arguments arg arg arg arg Assign Call If BoolOp Assign Call Call For Call For If Compare Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__len__",
    "source_code": "def __len__(self) -> int:\n    return len(self._stack)",
    "docstring": "Return number of items on the stack, and used for truth-value testing.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\traceable_stack.py",
    "ast_data": "FunctionDef name:__len__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "start",
    "source_code": "def start(self):\n    if not self.httpserver:\n        self.httpserver, self.bind_addr = self.httpserver_from_self()\n    super(Server, self).start()",
    "docstring": "Start the HTTP server.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpserver.py",
    "ast_data": "FunctionDef name:start arg:self arguments arg If Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "make_contiguous_for_perm",
    "source_code": "def make_contiguous_for_perm(t: torch.Tensor, perm: list[int]) -> torch.Tensor:\n    inv_perm = [0] * len(perm)\n    for i, p in enumerate(perm):\n        inv_perm[p] = i\n    return t.permute(perm).contiguous().permute(inv_perm)",
    "docstring": "Restride such that is contiguous.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_symmetric_memory\\__init__.py",
    "ast_data": "FunctionDef name:make_contiguous_for_perm arg:t arg:perm arguments arg arg Assign Call For Call Assign Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_rmin",
    "source_code": "def set_rmin(self, rmin):\n    self.viewLim.y0 = rmin",
    "docstring": "Set the inner radial limit. Parameters ---------- rmin : float",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\polar.py",
    "ast_data": "FunctionDef name:set_rmin arg:self arg:rmin arguments arg arg Assign"
  },
  {
    "library": "pytorch",
    "name": "make_triton_contiguous",
    "source_code": "def make_triton_contiguous(t):\n    if min(t.stride()) > 1:\n        return t.contiguous()\n    else:\n        return t",
    "docstring": "Return input as a triton-contiguous tensor. A triton-contiguous tensor is defined as a tensor that has strides with minimal value smaller than or equal to 1. While triton kernels support triton-non-contiguous tensors (all strides being greater than 1) arguments, a considerable slow-down occurs because tensor data is copied element-wise rather than chunk-wise. Zero strides is assumed to not have this defect.",
    "type": "function",
    "file_path": "pytorch\\torch\\sparse\\_triton_ops.py",
    "ast_data": "FunctionDef name:make_triton_contiguous arg:t arguments arg If Compare Call Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_transform_scalar_arithmetic",
    "source_code": "def _transform_scalar_arithmetic(gm: torch.fx.GraphModule, node: torch.fx.Node):\n    to_standard_op = {'mul': torch.ops.aten.mul.Scalar, 'add': torch.ops.aten.add.Scalar}\n    assert isinstance(node.target, torch._ops.OpOverload)\n    opname, args = (node.target._opname, node.args)\n    op_res_node = gm.graph.call_function(to_standard_op[opname], args)\n    return (op_res_node, _SCALE, _ZERO_POINT)",
    "docstring": "Transform scalar overload for basic arithmetic.",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\passes\\replace_quantized_ops_with_standard_ops_pass.py",
    "ast_data": "FunctionDef name:_transform_scalar_arithmetic arg:gm arg:node arguments arg arg Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "curve",
    "source_code": "@property\n@abc.abstractmethod\ndef curve(self) -> EllipticCurve:\n    pass",
    "docstring": "The EllipticCurve that this key is on.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ec.py",
    "ast_data": "FunctionDef name:curve arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "diff",
    "source_code": "def diff(self, other):\n    r = self.dynamo_guards.difference(other.dynamo_guards)\n    if len(r) == 0:\n        return None\n    return r",
    "docstring": "Produces a delta against another GuardsCheckpointState. Returns None if no delta is found, otherwise, return a set() of mismatched Guard type objects.",
    "type": "method",
    "file_path": "pytorch\\torch\\_guards.py",
    "ast_data": "FunctionDef name:diff arg:self arg:other arguments arg arg Assign Call If Compare Call Return return:no Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "predict_on_batch",
    "source_code": "def predict_on_batch(self, x):\n    self._check_call_args('predict_on_batch')\n    _disallow_inside_tf_function('predict_on_batch')\n    with self.distribute_strategy.scope():\n        iterator = data_adapter.single_batch_iterator(self.distribute_strategy, x)\n        self.predict_function = self.make_predict_function()\n        outputs = self.predict_function(iterator)\n    return tf_utils.sync_to_numpy_or_python_type(outputs)",
    "docstring": "Returns predictions for a single batch of samples. Args: x: Input data. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). - A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs). Returns: Numpy array(s) of predictions. Raises: RuntimeError: If is wrapped in . ValueError: In case of mismatch between given number of inputs and expectations of the model.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py",
    "ast_data": "FunctionDef name:predict_on_batch arg:self arg:x arguments arg arg Call Call With Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "MinimumLengthValidator",
    "source_code": "class MinimumLengthValidator:\n\n    def __init__(self, min_length=8):\n        self.min_length = min_length\n\n    def validate(self, password, user=None):\n        if len(password) < self.min_length:\n            raise ValidationError(self.get_error_message(), code='password_too_short', params={'min_length': self.min_length})\n\n    def get_error_message(self):\n        return ngettext('This password is too short. It must contain at least %d character.', 'This password is too short. It must contain at least %d characters.', self.min_length) % self.min_length\n\n    def get_help_text(self):\n        return ngettext('Your password must contain at least %(min_length)d character.', 'Your password must contain at least %(min_length)d characters.', self.min_length) % {'min_length': self.min_length}",
    "docstring": "Validate that the password is of a minimum length.",
    "type": "class",
    "file_path": "django\\django\\contrib\\auth\\password_validation.py",
    "ast_data": "ClassDef name:MinimumLengthValidator FunctionDef name:__init__ arg:self arg:min_length arguments arg arg Assign FunctionDef name:validate arg:self arg:password arg:user arguments arg arg arg If Compare Call Raise Call Call FunctionDef name:get_error_message arg:self arguments arg Return return:yes Call FunctionDef name:get_help_text arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "create_onnx_friendly_decomposition_table",
    "source_code": "def create_onnx_friendly_decomposition_table(onnx_registered_ops: set[_registration.TorchOp]) -> dict[_registration.TorchOp, Callable]:\n    decomposition_table: dict[_registration.TorchOp, Callable] = {}\n    for op_overload, decomp_fn in itertools.chain(torch.export.default_decompositions().items(), torch._decomp.decomposition_table.items()):\n        if op_overload in onnx_registered_ops:\n            continue\n        if not hasattr(op_overload, '_schema'):\n            continue\n        if op_overload in decomposition_table:\n            continue\n        decomposition_table[op_overload] = decomp_fn\n    return decomposition_table",
    "docstring": "This function creates a dictionary of op overloads and their decomposition functions for ops that do not have ONNX symbolic functions. If an op already has an ONNX symbolic function, its decomposition function is excluded from the table. The decomposition table is a subset of PyTorch's built-in aten-to-aten decomposition. Args: onnx_registered_ops: All ops that have an ONNX decomposition implemented. Returns: Dict[torch._ops.OperatorBase, Callable]: A dictionary that maps op overloads to their corresponding decomposition functions.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_decomp.py",
    "ast_data": "FunctionDef name:create_onnx_friendly_decomposition_table arg:onnx_registered_ops arguments arg For Call Call Call Call If Compare If Call If Compare Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "vec_like",
    "source_code": "def vec_like(n: int, tensor: Tensor, shared_memory: bool=False) -> Tensor:\n    if n <= 0:\n        raise AssertionError(type(n), n)\n    if len(tensor.shape) < 1:\n        raise AssertionError(tensor.shape)\n    vec = zeros(n, 1, device=tensor.device, dtype=tensor.dtype)\n    return vec[None].expand(tensor.shape[0], n, 1) if shared_memory else vec[None].repeat(tensor.shape[0], 1, 1)",
    "docstring": "Return a 2-D tensor with a vector containing zeros with the same batch size as the input. Args: n: the number of rows :math:. tensor: image tensor that will determine the batch size of the output matrix. The expected shape is :math:. shared_memory: when set, all samples in the batch will share the same memory. Returns: The vector with the same batch size as the input :math:. Notes: When the dimension to expand is of size 1, using torch.expand(...) yields the same tensor as torch.repeat(...) without using extra memory. Thus, when the tensor obtained by this method will be later assigned - use this method with shared_memory=False, otherwise, prefer using it with shared_memory=True.",
    "type": "function",
    "file_path": "kornia\\kornia\\utils\\misc.py",
    "ast_data": "FunctionDef name:vec_like arg:n arg:tensor arg:shared_memory arguments arg arg arg If Compare Raise Call Call If Compare Call Raise Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_validation_error_message",
    "source_code": "def _validation_error_message(self, value, allow_listlike: bool=False) -> str:\n    if hasattr(value, 'dtype') and getattr(value, 'ndim', 0) > 0:\n        msg_got = f'{value.dtype} array'\n    else:\n        msg_got = f\"'{type(value).__name__}'\"\n    if allow_listlike:\n        msg = f\"value should be a '{self._scalar_type.__name__}', 'NaT', or array of those. Got {msg_got} instead.\"\n    else:\n        msg = f\"value should be a '{self._scalar_type.__name__}' or 'NaT'. Got {msg_got} instead.\"\n    return msg",
    "docstring": "Construct an exception message on validation error. Some methods allow only scalar inputs, while others allow either scalar or listlike. Parameters ---------- allow_listlike: bool, default False Returns ------- str",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py",
    "ast_data": "FunctionDef name:_validation_error_message arg:self arg:value arg:allow_listlike arguments arg arg arg If BoolOp Call Compare Call Assign Assign Call If Assign Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_universal_flags",
    "source_code": "def _universal_flags(self, cmd):\n    if not sys.platform == 'darwin':\n        return []\n    arch_flags = []\n    c_archs = self._c_arch_flags()\n    if 'i386' in c_archs:\n        c_archs[c_archs.index('i386')] = 'i686'\n    for arch in ['ppc', 'i686', 'x86_64', 'ppc64', 's390x']:\n        if _can_target(cmd, arch) and arch in c_archs:\n            arch_flags.extend(['-arch', arch])\n    return arch_flags",
    "docstring": "Return a list of -arch flags for every supported architecture.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\fcompiler\\gnu.py",
    "ast_data": "FunctionDef name:_universal_flags arg:self arg:cmd arguments arg arg If Compare Return return:no Assign Assign Call If Compare Assign Call For If BoolOp Call Compare Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ConvBn1d",
    "source_code": "class ConvBn1d(_FusedModule):\n\n    def __init__(self, conv, bn):\n        assert type_before_parametrizations(conv) == Conv1d and type_before_parametrizations(bn) == BatchNorm1d, f'Incorrect types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(bn)}'\n        super().__init__(conv, bn)",
    "docstring": "This is a sequential container which calls the Conv 1d and Batch Norm 1d modules. During quantization this will be replaced with the corresponding fused module.",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\nn\\intrinsic\\modules\\fused.py",
    "ast_data": "ClassDef name:ConvBn1d FunctionDef name:__init__ arg:self arg:conv arg:bn arguments arg arg arg BoolOp Compare Call Compare Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_top_nodes",
    "source_code": "def get_top_nodes(partition: Partition) -> list[Node]:\n    top_nodes: list[Node] = []\n    for node in partition.nodes:\n        if node.op in {'placeholder', 'get_attr'}:\n            continue\n        input_nodes: dict[Node, None] = {}\n        map_arg(node.args, input_nodes.setdefault)\n        map_arg(node.kwargs, input_nodes.setdefault)\n        if not any((n in partition.nodes and n.op not in {'placeholder', 'get_attr'} for n in input_nodes)):\n            top_nodes.append(node)\n    return top_nodes",
    "docstring": "Given a partition, return a list of nodes on the top bfs level",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\partitioner_utils.py",
    "ast_data": "FunctionDef name:get_top_nodes arg:partition arguments arg For If Compare Call Call If Call BoolOp Compare Compare Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "RgbToHsv",
    "source_code": "class RgbToHsv(Module):\n    ONNX_DEFAULT_INPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n    ONNX_DEFAULT_OUTPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n\n    def __init__(self, eps: float=1e-06) -> None:\n        super().__init__()\n        self.eps = eps\n\n    def forward(self, image: torch.Tensor) -> torch.Tensor:\n        return rgb_to_hsv(image, self.eps)",
    "docstring": "Convert an image from RGB to HSV. The image data is assumed to be in the range of (0, 1). Args: eps: scalar to enforce numarical stability. Returns: HSV version of the image. Shape: - image: :math: - output: :math: Example: >>> input = torch.rand(2, 3, 4, 5) >>> hsv = RgbToHsv() >>> output = hsv(input) # 2x3x4x5",
    "type": "class",
    "file_path": "kornia\\kornia\\color\\hsv.py",
    "ast_data": "ClassDef name:RgbToHsv FunctionDef name:__init__ arg:self arg:eps arguments arg arg Call Call Assign FunctionDef name:forward arg:self arg:image arguments arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "holidays",
    "source_code": "def holidays(self, start=None, end=None, return_name: bool=False):\n    if self.rules is None:\n        raise Exception(f'Holiday Calendar {self.name} does not have any rules specified')\n    if start is None:\n        start = AbstractHolidayCalendar.start_date\n    if end is None:\n        end = AbstractHolidayCalendar.end_date\n    start = Timestamp(start)\n    end = Timestamp(end)\n    if self._cache is None or start < self._cache[0] or end > self._cache[1]:\n        pre_holidays = [rule.dates(start, end, return_name=True) for rule in self.rules]\n        if pre_holidays:\n            holidays = concat(pre_holidays)\n        else:\n            holidays = Series(index=DatetimeIndex([]), dtype=object)\n        self._cache = (start, end, holidays.sort_index())\n    holidays = self._cache[2]\n    holidays = holidays[start:end]\n    if return_name:\n        return holidays\n    else:\n        return holidays.index",
    "docstring": "Returns a curve with holidays between start_date and end_date Parameters ---------- start : starting date, datetime-like, optional end : ending date, datetime-like, optional return_name : bool, optional If True, return a series that has dates and holiday names. False will only return a DatetimeIndex of dates. Returns ------- DatetimeIndex of holidays",
    "type": "method",
    "file_path": "pandas\\pandas\\tseries\\holiday.py",
    "ast_data": "FunctionDef name:holidays arg:self arg:start arg:end arg:return_name arguments arg arg arg arg If Compare Raise Call If Compare Assign If Compare Assign Assign Call Assign Call If BoolOp Compare Compare Compare Assign Call If Assign Call Assign Call Call Assign Call Assign Assign If Return return:yes Return return:yes"
  },
  {
    "library": "numpy",
    "name": "quote_args",
    "source_code": "def quote_args(args):\n    import warnings\n    warnings.warn('\"quote_args\" is deprecated.', DeprecationWarning, stacklevel=2)\n    args = list(args)\n    for i in range(len(args)):\n        a = args[i]\n        if ' ' in a and a[0] not in '\"\\'':\n            args[i] = '\"%s\"' % a\n    return args",
    "docstring": "Quote list of arguments. .. deprecated:: 1.22.",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\misc_util.py",
    "ast_data": "FunctionDef name:quote_args arg:args arguments arg Call Assign Call For Call Call Assign If BoolOp Compare Compare Assign Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "isattributedescriptor",
    "source_code": "def isattributedescriptor(obj: Any) -> bool:\n    if inspect.isdatadescriptor(obj):\n        return True\n    if isdescriptor(obj):\n        unwrapped = unwrap(obj)\n        if isfunction(unwrapped) or isbuiltin(unwrapped) or ismethod(unwrapped):\n            return False\n        if is_cython_function_or_method(unwrapped):\n            return False\n        if isclass(unwrapped):\n            return False\n        if isinstance(unwrapped, _DESCRIPTOR_LIKE):\n            return False\n        return type(unwrapped).__name__ != 'instancemethod'\n    return False",
    "docstring": "Check if the object is an attribute-like descriptor.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\inspect.py",
    "ast_data": "FunctionDef name:isattributedescriptor arg:obj arguments arg If Call Return return:yes If Call Assign Call If BoolOp Call Call Call Return return:yes If Call Return return:yes If Call Return return:yes If Call Return return:yes Return return:yes Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_ok",
    "source_code": "def is_ok(self):\n    self._check_status()\n    return math_ops.equal(self._error_code, constant_op.constant(0, dtype=dtypes.int64))",
    "docstring": "Returns True if RPC is successful, otherwise returns False. This call will block for RPC result.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\experimental\\rpc\\rpc_ops.py",
    "ast_data": "FunctionDef name:is_ok arg:self arguments arg Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "block_diag",
    "source_code": "def block_diag(*tensors: list[TensorLikeType]) -> TensorLikeType:\n    return _block_diag_iterable(tensors)",
    "docstring": "This is used as an input to PythonRefInfo. expects arguments splatted, but expects only one argument that is a list of Tensors.",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\__init__.py",
    "ast_data": "FunctionDef name:block_diag arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_printoptions",
    "source_code": "def get_printoptions() -> dict[str, Any]:\n    return dataclasses.asdict(PRINT_OPTS)",
    "docstring": "Gets the current options for printing, as a dictionary that can be passed as `` to set_printoptions().",
    "type": "function",
    "file_path": "pytorch\\torch\\_tensor_str.py",
    "ast_data": "FunctionDef name:get_printoptions arguments Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "update_params",
    "source_code": "def update_params(self, params, grads):\n    updates = self._get_updates(grads)\n    for param, update in zip((p for p in params), updates):\n        param += update",
    "docstring": "Update parameters with given gradients Parameters ---------- params : list of length = len(coefs_) + len(intercepts_) The concatenated list containing coefs_ and intercepts_ in MLP model. Used for initializing velocities and updating params grads : list of length = len(params) Containing gradients with respect to coefs_ and intercepts_ in MLP model. So length should be aligned with params",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neural_network\\_stochastic_optimizers.py",
    "ast_data": "FunctionDef name:update_params arg:self arg:params arg:grads arguments arg arg arg Assign Call For Call"
  },
  {
    "library": "pandas",
    "name": "_reconstruct_data",
    "source_code": "def _reconstruct_data(values: ArrayLikeT, dtype: DtypeObj, original: AnyArrayLike) -> ArrayLikeT:\n    if isinstance(values, ABCExtensionArray) and values.dtype == dtype:\n        return values\n    if not isinstance(dtype, np.dtype):\n        cls = dtype.construct_array_type()\n        values = cls._from_sequence(values, dtype=dtype)\n    else:\n        values = values.astype(dtype, copy=False)\n    return values",
    "docstring": "reverse of _ensure_data Parameters ---------- values : np.ndarray or ExtensionArray dtype : np.dtype or ExtensionDtype original : AnyArrayLike Returns ------- ExtensionArray or np.ndarray",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\algorithms.py",
    "ast_data": "FunctionDef name:_reconstruct_data arg:values arg:dtype arg:original arguments arg arg arg If BoolOp Call Compare Return return:yes If Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_build_ddp_param_buckets",
    "source_code": "def _build_ddp_param_buckets(self) -> None:\n    for bucket_assignments in self._bucket_assignments_per_rank:\n        for bucket_assignment in bucket_assignments.values():\n            params = bucket_assignment.parameters\n            bucket_size = 0\n            dtype = None\n            for param in params:\n                assert _is_trainable(param), 'Model parameter corresponding to a gradient in a DDP bucket should require a gradient'\n                bucket_size += param.numel()\n                dtype = param.dtype\n            assert bucket_size > 0, 'Empty bucket'\n            tensor = torch.empty(bucket_size, dtype=dtype, device=bucket_assignment.device)\n            offset = 0\n            for param in params:\n                offset_next = offset + param.numel()\n                tensor[offset:offset_next].copy_(param.data.flatten())\n                param.data = tensor[offset:offset_next].view_as(param.data)\n                offset = offset_next\n            bucket_assignment.tensor = tensor",
    "docstring": "Build the DDP bucket with parameters assigned to this rank. For each DDP bucket with parameters assigned to this rank, flattens the data of those parameters into a single tensor and saves the tensor to the `_DDPBucketAssignmentDistributedDataParallel` guarantees that the parameters corresponding to a gradient bucket have the same device and the same dtype.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\optim\\zero_redundancy_optimizer.py",
    "ast_data": "FunctionDef name:_build_ddp_param_buckets arg:self arguments arg For For Call Assign Assign Assign For Call Call Assign Compare Assign Call Assign For Assign Call Call Call Assign Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_set_xla_sharding",
    "source_code": "def _set_xla_sharding(self, xla_sharding):\n    if self._variable_read and (not context.executing_eagerly()):\n        logging.warning(\"This variable (%s) has already been read (ie. a ReadVariableOp has already been generated) and a new XlaShardingOp using this sharding will not be created unless it is read again. If that's not possible, please set the XLA sharding before reading the variable.\", self.name)\n    self._xla_sharding = xla_sharding",
    "docstring": "Annotates this with . will be used to create an whenever a is created. Args: xla_sharding: The xla.OpSharding proto to annotate this ResourceVariable with.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:_set_xla_sharding arg:self arg:xla_sharding arguments arg arg If BoolOp Call Call Assign"
  },
  {
    "library": "sphinx",
    "name": "add_source_suffix",
    "source_code": "def add_source_suffix(self, suffix: str, filetype: str, override: bool=False) -> None:\n    self.registry.add_source_suffix(suffix, filetype, override=override)",
    "docstring": "Register a suffix of source files. Same as :confval:. The users can override this using the config setting. :param override: If false, do not install it the same suffix is already installed. If true, unconditionally install the suffix. .. versionadded:: 1.8",
    "type": "method",
    "file_path": "sphinx\\sphinx\\application.py",
    "ast_data": "FunctionDef name:add_source_suffix arg:self arg:suffix arg:filetype arg:override arguments arg arg arg arg Call"
  },
  {
    "library": "numpy",
    "name": "__mul__",
    "source_code": "def __mul__(self, other):\n    if self._delegate_binop(other):\n        return NotImplemented\n    return multiply(self, other)",
    "docstring": "Multiply self by other, and return a new masked array.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:__mul__ arg:self arg:other arguments arg arg If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_FresnelSinGrad",
    "source_code": "@ops.RegisterGradient('FresnelSin')\ndef _FresnelSinGrad(op: ops.Operation, grad):\n    x = op.inputs[0]\n    with ops.control_dependencies([grad]):\n        return grad * math_ops.sin(np.pi / 2.0 * math_ops.square(x))",
    "docstring": "Compute gradient of fresnel_sin(x) with respect to its argument.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_FresnelSinGrad arg:op arg:grad arguments arg arg Assign With Call Return return:yes Call Call Call"
  },
  {
    "library": "kornia",
    "name": "dtype",
    "source_code": "@property\ndef dtype(self) -> torch.dtype:\n    return self.data.dtype",
    "docstring": "Return the image data type.",
    "type": "method",
    "file_path": "kornia\\kornia\\image\\image.py",
    "ast_data": "FunctionDef name:dtype arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_create_table_builder",
    "source_code": "def _create_table_builder(self) -> _SeriesTableBuilder:\n    if self.verbose or self.verbose is None:\n        return _SeriesTableBuilderVerbose(info=self.info, with_counts=self.show_counts)\n    else:\n        return _SeriesTableBuilderNonVerbose(info=self.info)",
    "docstring": "Create instance of table builder based on verbosity.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:_create_table_builder arg:self arguments arg If BoolOp Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_TensorDataset",
    "source_code": "class _TensorDataset(dataset_ops.DatasetSource):\n\n    def __init__(self, element, name=None):\n        element = structure.normalize_element(element)\n        self._structure = structure.type_spec_from_value(element)\n        self._tensors = structure.to_tensor_list(self._structure, element)\n        self._name = name\n        variant_tensor = gen_dataset_ops.tensor_dataset(self._tensors, output_shapes=structure.get_flat_tensor_shapes(self._structure), metadata=self._metadata.SerializeToString())\n        super().__init__(variant_tensor)\n\n    @property\n    def element_spec(self):\n        return self._structure",
    "docstring": "A with a single element.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\from_tensors_op.py",
    "ast_data": "ClassDef name:_TensorDataset FunctionDef name:__init__ arg:self arg:element arg:name arguments arg arg arg Assign Call Assign Call Assign Call Assign Assign Call Call Call Call Call FunctionDef name:element_spec arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_write_bytes",
    "source_code": "def _write_bytes(self, value: bytes) -> None:\n    self.handles.handle.write(value)",
    "docstring": "Helper to assert file is open before writing.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\stata.py",
    "ast_data": "FunctionDef name:_write_bytes arg:self arg:value arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "not_",
    "source_code": "def not_(a):\n    if tensor_util.is_tf_type(a):\n        return _tf_not(a)\n    return _py_not(a)",
    "docstring": "Functional form of \"not\".",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\logical.py",
    "ast_data": "FunctionDef name:not_ arg:a arguments arg If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "map_only",
    "source_code": "def map_only(type_or_types_or_pred: Union[TypeAny, Callable[[Any], bool]], /) -> MapOnlyFn[FnAny[Any]]:\n    if isinstance(type_or_types_or_pred, (type, tuple)) or (sys.version_info >= (3, 10) and isinstance(type_or_types_or_pred, types.UnionType)):\n\n        def pred(x: Any) -> bool:\n            return isinstance(x, type_or_types_or_pred)\n    elif callable(type_or_types_or_pred):\n        pred = type_or_types_or_pred\n    else:\n        raise TypeError('Argument must be a type, a tuple of types, or a callable.')\n\n    def wrapper(func: Callable[[T], Any]) -> Callable[[Any], Any]:\n\n        @functools.wraps(func)\n        def wrapped(x: T) -> Any:\n            if pred(x):\n                return func(x)\n            return x\n        return wrapped\n    return wrapper",
    "docstring": "Suppose you are writing a tree_map over tensors, leaving everything else unchanged. Ordinarily you would have to write: def go(t): if isinstance(t, Tensor): return ... else: return t With this function, you only need to write: @map_only(Tensor) def go(t): return ... You can also directly use 'tree_map_only'",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_cxx_pytree.py",
    "ast_data": "FunctionDef name:map_only arguments arg If BoolOp Call BoolOp Compare Call FunctionDef name:pred arg:x arguments arg Return return:yes Call If Call Assign Raise Call FunctionDef name:wrapper arg:func arguments arg FunctionDef name:wrapped arg:x arguments arg If Call Return return:yes Call Return return:yes Call Return return:yes Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "predict_proba",
    "source_code": "def predict_proba(self, X):\n    raw_predictions = self._raw_predict(X)\n    return self._loss.predict_proba(raw_predictions)",
    "docstring": "Predict class probabilities for X. Parameters ---------- X : array-like, shape (n_samples, n_features) The input samples. Returns ------- p : ndarray, shape (n_samples, n_classes) The class probabilities of the input samples.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\gradient_boosting.py",
    "ast_data": "FunctionDef name:predict_proba arg:self arg:X arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "expired",
    "source_code": "def expired(self):\n    return datetime.datetime.now(datetime.timezone.utc) >= self.expiration",
    "docstring": "Check whether the timer has expired.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\locking.py",
    "ast_data": "FunctionDef name:expired arg:self arguments arg Return return:yes Compare Call"
  },
  {
    "library": "cherrypy",
    "name": "connect",
    "source_code": "def connect(self, name, route, controller, **kwargs):\n    self.controllers[name] = controller\n    self.mapper.connect(name, route, controller=name, **kwargs)",
    "docstring": "Mount an HTTP handler into the router.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpdispatch.py",
    "ast_data": "FunctionDef name:connect arg:self arg:name arg:route arg:controller arguments arg arg arg arg arg Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "unbatch",
    "source_code": "@deprecation.deprecated(None, 'Use `tf.data.Dataset.unbatch()`.')\n@tf_export('data.experimental.unbatch')\ndef unbatch():\n\n    def _apply_fn(dataset):\n        return dataset.unbatch()\n    return _apply_fn",
    "docstring": "Splits elements of a dataset into multiple elements on the batch dimension. For example, if elements of the dataset are shaped , where may vary for each input element, then for each element in the dataset, the unbatched dataset will contain consecutive elements of shape . Returns: A transformation function, which can be passed to .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\batching.py",
    "ast_data": "FunctionDef name:unbatch arguments FunctionDef name:_apply_fn arg:dataset arguments arg Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "DistributedVarOp",
    "source_code": "class DistributedVarOp(object):\n\n    def __init__(self, name, graph, traceback, typ):\n        self.name = name\n        self.graph = graph\n        self.traceback = traceback\n        self.type = typ\n\n    def __eq__(self, o):\n        if not isinstance(o, self.__class__):\n            raise NotImplementedError\n        return self.name == o.name and self.graph == o.graph and (self.traceback == o.traceback) and (self.type == o.type)\n\n    def __hash__(self):\n        return hash((self.name, self.graph, tuple(self.traceback), self.type))",
    "docstring": "A class that looks like .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py",
    "ast_data": "ClassDef name:DistributedVarOp FunctionDef name:__init__ arg:self arg:name arg:graph arg:traceback arg:typ arguments arg arg arg arg arg Assign Assign Assign Assign FunctionDef name:__eq__ arg:self arg:o arguments arg arg If Call Raise Return return:yes BoolOp Compare Compare Compare Compare FunctionDef name:__hash__ arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "copy_translation_js",
    "source_code": "def copy_translation_js(self) -> None:\n    if (js_file := self._get_translations_js()):\n        copyfile(js_file, self._static_dir / 'translations.js', force=True)",
    "docstring": "Copy a JavaScript file for translations.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\html\\__init__.py",
    "ast_data": "FunctionDef name:copy_translation_js arg:self arguments arg If Call Call"
  },
  {
    "library": "pytorch",
    "name": "Buffer",
    "source_code": "class Buffer(torch.Tensor, metaclass=_BufferMeta):\n\n    def __new__(cls, data=None, *, persistent=True):\n        if data is None:\n            data = torch.empty(0)\n        t = data.detach().requires_grad_(data.requires_grad)\n        t.persistent = persistent\n        t._is_buffer = True\n        return t\n    __torch_function__ = _disabled_torch_function_impl",
    "docstring": "A kind of Tensor that should not be considered a model parameter. For example, BatchNorm's `~torch.TensorModule~torch.nn.Module.buffers~torch.nn.Module.register_bufferstate_dict`",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\parameter.py",
    "ast_data": "ClassDef name:Buffer FunctionDef name:__new__ arg:cls arg:data arguments arg arg arg If Compare Assign Call Assign Call Call Assign Assign Return return:yes Assign"
  },
  {
    "library": "pytorch",
    "name": "sparsity",
    "source_code": "def sparsity(self) -> float:\n    total_size = self.numel()\n    computed_blocks = self.kv_num_blocks.sum()\n    if self.full_kv_num_blocks is not None:\n        computed_blocks += self.full_kv_num_blocks.sum()\n    computed_size = computed_blocks.item() * self.BLOCK_SIZE[0] * self.BLOCK_SIZE[1]\n    dense_ratio = computed_size / total_size\n    return 100 * (1 - dense_ratio)",
    "docstring": "Computes the percentage of blocks that are sparse (i.e. not computed)",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\attention\\flex_attention.py",
    "ast_data": "FunctionDef name:sparsity arg:self arguments arg Assign Call Assign Call If Compare Call Assign Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_edgecolor",
    "source_code": "def set_edgecolor(self, color):\n    self._shared_setter('edgecolor', color)",
    "docstring": "Set the edge color of the rectangle and the connectors. Parameters ---------- color : :mpltype: or None",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\inset.py",
    "ast_data": "FunctionDef name:set_edgecolor arg:self arg:color arguments arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "get_num_bytes",
    "source_code": "def get_num_bytes(t: torch.Tensor) -> int:\n    num_bytes = t.untyped_storage().nbytes()\n    mem_consumed = math.ceil(num_bytes / _PYTORCH_MIN_ALLOCATE) * _PYTORCH_MIN_ALLOCATE\n    return mem_consumed",
    "docstring": "Calculates the memory consumption of a tensor. Args: t (torch.Tensor): The input tensor. Returns: int: The memory consumption of the tensor in bytes.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_tools\\runtime_estimator.py",
    "ast_data": "FunctionDef name:get_num_bytes arg:t arguments arg Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_wrap_define_function",
    "source_code": "def _wrap_define_function(original_function):\n\n    def wrapper(*args, **kwargs):\n        has_old_names = False\n        for old_name, new_name in _RENAMED_ARGUMENTS.items():\n            if old_name in kwargs:\n                has_old_names = True\n                value = kwargs.pop(old_name)\n                kwargs[new_name] = value\n        if has_old_names:\n            _logging.warning('Use of the keyword argument names (flag_name, default_value, docstring) is deprecated, please use (name, default, help) instead.')\n        return original_function(*args, **kwargs)\n    return tf_decorator.make_decorator(original_function, wrapper)",
    "docstring": "Wraps absl.flags's define functions so tf.flags accepts old names.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\platform\\flags.py",
    "ast_data": "FunctionDef name:_wrap_define_function arg:original_function arguments arg FunctionDef name:wrapper arguments arg arg Assign For Call If Compare Assign Assign Call Assign If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_trim_zeros_float",
    "source_code": "def _trim_zeros_float(str_floats: ArrayLike | list[str], decimal: str='.') -> list[str]:\n    trimmed = str_floats\n    number_regex = re.compile(f'^\\\\s*[\\\\+-]?[0-9]+\\\\{decimal}[0-9]*$')\n\n    def is_number_with_decimal(x) -> bool:\n        return re.match(number_regex, x) is not None\n\n    def should_trim(values: ArrayLike | list[str]) -> bool:\n        numbers = [x for x in values if is_number_with_decimal(x)]\n        return len(numbers) > 0 and all((x.endswith('0') for x in numbers))\n    while should_trim(trimmed):\n        trimmed = [x[:-1] if is_number_with_decimal(x) else x for x in trimmed]\n    result = [x + '0' if is_number_with_decimal(x) and x.endswith(decimal) else x for x in trimmed]\n    return result",
    "docstring": "Trims the maximum number of trailing zeros equally from all numbers containing decimals, leaving just one if necessary.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\formats\\format.py",
    "ast_data": "FunctionDef name:_trim_zeros_float arg:str_floats arg:decimal arguments arg arg Assign Assign Call FunctionDef name:is_number_with_decimal arg:x arguments arg Return return:yes Compare Call FunctionDef name:should_trim arg:values arguments arg Assign Call Return return:yes BoolOp Compare Call Call Call While Call Assign Call Assign BoolOp Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "relu6",
    "source_code": "@register_decomposition(aten.relu6)\n@_inplace_wrapper\n@out_wrapper()\ndef relu6(a: TensorLikeType, inplace: bool=False) -> TensorLikeType:\n    if inplace:\n        raise NotImplementedError\n    return torch.nn.functional.hardtanh(a, 0, 6)",
    "docstring": "Reference implementation of torch.nn.functional.relu6",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\nn\\functional\\__init__.py",
    "ast_data": "FunctionDef name:relu6 arg:a arg:inplace arguments arg arg If Raise Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "logcdf",
    "source_code": "def logcdf(self, k, *args, **kwds):\n    args, loc, _ = self._parse_args(*args, **kwds)\n    k, loc = map(asarray, (k, loc))\n    args = tuple(map(asarray, args))\n    _a, _b = self._get_support(*args)\n    k = asarray(k - loc)\n    cond0 = self._argcheck(*args)\n    cond1 = (k >= _a) & (k < _b)\n    cond2 = k >= _b\n    cond = cond0 & cond1\n    output = empty(shape(cond), 'd')\n    output.fill(-inf)\n    place(output, 1 - cond0 + np.isnan(k), self.badvalue)\n    place(output, cond2 * (cond0 == cond0), 0.0)\n    if np.any(cond):\n        goodargs = argsreduce(cond, *(k,) + args)\n        place(output, cond, self._logcdf(*goodargs))\n    if output.ndim == 0:\n        return output[()]\n    return output",
    "docstring": "Log of the cumulative distribution function at k of the given RV. Parameters ---------- k : array_like, int Quantiles. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). Returns ------- logcdf : array_like Log of the cumulative distribution function evaluated at k.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:logcdf arg:self arg:k arguments arg arg arg arg Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Assign Compare Compare Assign Compare Assign Assign Call Call Call Call Call Call Compare If Call Assign Call Call Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "add",
    "source_code": "def add(self, *, method_mapping, **objs):\n    method_mapping = deepcopy(method_mapping)\n    for name, obj in objs.items():\n        self._route_mappings[name] = RouterMappingPair(mapping=method_mapping, router=get_routing_for_object(obj))\n    return self",
    "docstring": "Add named objects with their corresponding method mapping. Parameters ---------- method_mapping : MethodMapping The mapping between the child and the parent's methods. **objs : dict A dictionary of objects from which metadata is extracted by calling :func: on them. Returns ------- self : MetadataRouter Returns .",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py",
    "ast_data": "FunctionDef name:add arg:self arguments arg arg arg Assign Call For Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, full_name=None, full_shape=None, var_offset=None, var_shape=None, save_slice_info_def=None, import_scope=None):\n    if save_slice_info_def:\n        assert isinstance(save_slice_info_def, variable_pb2.SaveSliceInfoDef)\n        self.full_name = ops.prepend_name_scope(save_slice_info_def.full_name, import_scope=import_scope)\n        self.full_shape = list(save_slice_info_def.full_shape)\n        self.var_offset = list(save_slice_info_def.var_offset)\n        self.var_shape = list(save_slice_info_def.var_shape)\n    else:\n        self.full_name = full_name\n        self.full_shape = full_shape\n        self.var_offset = var_offset\n        self.var_shape = var_shape",
    "docstring": "Create a . Args: full_name: Name of the full variable of which this is a slice. full_shape: Shape of the full variable, as a list of int. var_offset: Offset of this into the full variable, as a list of int. var_shape: Shape of this , as a list of int. save_slice_info_def: protocol buffer. If not , recreates the SaveSliceInfo object its contents. and other arguments are mutually exclusive. import_scope: Optional . Name scope to add. Only used when initializing from protocol buffer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:full_name arg:full_shape arg:var_offset arg:var_shape arg:save_slice_info_def arg:import_scope arguments arg arg arg arg arg arg arg If Call Assign Call Assign Call Assign Call Assign Call Assign Assign Assign Assign"
  },
  {
    "library": "django",
    "name": "_store",
    "source_code": "def _store(self, messages, response, remove_oldest=True, *args, **kwargs):\n    unstored_messages = []\n    serialized_messages = MessagePartSerializer().dumps(messages)\n    encoded_data = self._encode_parts(serialized_messages)\n    if self.max_cookie_size:\n        cookie = SimpleCookie()\n\n        def is_too_large_for_cookie(data):\n            return data and len(cookie.value_encode(data)[1]) > self.max_cookie_size\n\n        def compute_msg(some_serialized_msg):\n            return self._encode_parts([*some_serialized_msg, self.not_finished_json], encode_empty=True)\n        if is_too_large_for_cookie(encoded_data):\n            if remove_oldest:\n                idx = bisect_keep_right(serialized_messages, fn=lambda m: is_too_large_for_cookie(compute_msg(m)))\n                unstored_messages = messages[:idx]\n                encoded_data = compute_msg(serialized_messages[idx:])\n            else:\n                idx = bisect_keep_left(serialized_messages, fn=lambda m: is_too_large_for_cookie(compute_msg(m)))\n                unstored_messages = messages[idx:]\n                encoded_data = compute_msg(serialized_messages[:idx])\n    self._update_cookie(encoded_data, response)\n    return unstored_messages",
    "docstring": "Store the messages to a cookie and return a list of any messages which could not be stored. If the encoded data is larger than ``, remove messages until the data fits (these are the messages which are returned), and add the not_finished sentinel value to indicate as much.",
    "type": "method",
    "file_path": "django\\django\\contrib\\messages\\storage\\cookie.py",
    "ast_data": "FunctionDef name:_store arg:self arg:messages arg:response arg:remove_oldest arguments arg arg arg arg arg arg Assign Assign Call Call Assign Call If Assign Call FunctionDef name:is_too_large_for_cookie arg:data arguments arg Return return:yes BoolOp Compare Call Call FunctionDef name:compute_msg arg:some_serialized_msg arguments arg Return return:yes Call If Call If Assign Call arguments arg Call Call Assign Assign Call Assign Call arguments arg Call Call Assign Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "set_custom_trace_id_callback",
    "source_code": "def set_custom_trace_id_callback(self, callback):\n    self.custom_trace_id_callback = callback",
    "docstring": "Sets a callback to be called when a new trace ID is generated.",
    "type": "method",
    "file_path": "pytorch\\torch\\profiler\\profiler.py",
    "ast_data": "FunctionDef name:set_custom_trace_id_callback arg:self arg:callback arguments arg arg Assign"
  },
  {
    "library": "cryptography",
    "name": "key_size",
    "source_code": "@property\n@abc.abstractmethod\ndef key_size(self) -> int:\n    pass",
    "docstring": "The bit length of the public modulus.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\rsa.py",
    "ast_data": "FunctionDef name:key_size arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "_is_ref_dtype",
    "source_code": "@property\ndef _is_ref_dtype(self):\n    return self._type_enum > 100",
    "docstring": "Returns if this represents a reference type.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\dtypes.py",
    "ast_data": "FunctionDef name:_is_ref_dtype arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "django",
    "name": "get_paginate_by",
    "source_code": "def get_paginate_by(self, queryset):\n    return self.paginate_by",
    "docstring": "Get the number of items to paginate by, or `` for no pagination.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\list.py",
    "ast_data": "FunctionDef name:get_paginate_by arg:self arg:queryset arguments arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "load_cache_artifacts",
    "source_code": "def load_cache_artifacts(serialized_artifacts: bytes) -> Optional['CacheInfo']:\n    from ._cache import CacheArtifactManager, CacheInfo\n    return CacheArtifactManager.deserialize(serialized_artifacts)",
    "docstring": "Hot loads cache artifacts that were previously serialized via save_cache_artifacts Example: # From a previous invocation artifacts = torch.compiler.save_cache_artifacts() torch.compiler.load_cache_artifacts(artifacts[0])",
    "type": "function",
    "file_path": "pytorch\\torch\\compiler\\__init__.py",
    "ast_data": "FunctionDef name:load_cache_artifacts arg:serialized_artifacts arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "context",
    "source_code": "@contextlib.contextmanager\ndef context(style, after_reset=False):\n    with mpl.rc_context():\n        if after_reset:\n            mpl.rcdefaults()\n        use(style)\n        yield",
    "docstring": "Context manager for using style settings temporarily. Parameters ---------- style : str, dict, Path or list A style specification. Valid options are: str - One of the style names in (a builtin style or a style installed in the user library path). - A dotted name of the form \"package.style_name\"; in that case, \"package\" should be an importable Python package name, e.g. at `.rc_params_from_filematplotlib.rcParams.rc_params_from_file`. list A list of style specifiers (str, Path or dict), which are applied from first to last in the list. after_reset : bool If True, apply style after resetting settings to their defaults; otherwise, apply style on top of the current settings.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\style\\core.py",
    "ast_data": "FunctionDef name:context arg:style arg:after_reset arguments arg arg With Call If Call Call"
  },
  {
    "library": "pytorch",
    "name": "ExplainWithBackend",
    "source_code": "class ExplainWithBackend:\n\n    def __init__(self, backend) -> None:\n        from .registry import lookup_backend\n        self.backend = lookup_backend(backend)\n        self.graphs = []\n        self.op_count = 0\n        self.break_reasons = []\n\n    def __call__(self, gm: torch.fx.GraphModule, example_inputs):\n        gm, self.graphs, self.op_count, _, self.break_reasons = _explain_graph_detail(gm, self.graphs, self.op_count, [], self.break_reasons)\n        return self.backend(gm, example_inputs)\n\n    def output(self) -> ExplainOutput:\n        graph_count = len(self.graphs)\n        output = ExplainOutput(self.graphs, graph_count, graph_count - 1, self.break_reasons, self.op_count)\n        return output",
    "docstring": "This class is intended to be used as a backend for . It is composable with other backends. When used in this way, it accumulates information about graph breaks, ops, and other info and provides a string representation summarizing this information. Attributes: backend (str): The name of the backend to use for optimization. graphs (list): A list of the graphs captured by TorchDynamo. op_count (int): The total number of operations in all optimized graphs. break_reasons (list): A list of graph break reasons with stack traces. Example Usage: def fn(x): x = torch.sigmoid(x) return x torch._dynamo.reset() eb = ExplainWithBackend(\"inductor\") optimized_fn = torch.compile(fn, backend=eb) result = optimized_fn(torch.randn(5)) print(eb.output())",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\backends\\debugging.py",
    "ast_data": "ClassDef name:ExplainWithBackend FunctionDef name:__init__ arg:self arg:backend arguments arg arg Assign Call Assign Assign Assign FunctionDef name:__call__ arg:self arg:gm arg:example_inputs arguments arg arg arg Assign Call Return return:yes Call FunctionDef name:output arg:self arguments arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "verify_ninja_availability",
    "source_code": "def verify_ninja_availability():\n    if not is_ninja_available():\n        raise RuntimeError('Ninja is required to load C++ extensions (pip install ninja to get it)')",
    "docstring": "Raise `ninja `_ build system is not available on the system, does nothing otherwise.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\cpp_extension.py",
    "ast_data": "FunctionDef name:verify_ninja_availability arguments If Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "softplus",
    "source_code": "@register_decomposition(aten.softplus)\n@_inplace_wrapper\n@out_wrapper()\n@elementwise_type_promotion_wrapper(type_promoting_args=('a',), type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT)\ndef softplus(a: TensorLikeType, beta: Optional[NumberType]=None, threshold: NumberType=20, inplace: bool=False) -> TensorLikeType:\n    if inplace:\n        raise NotImplementedError\n    rhs: TensorLikeType\n    if beta is not None:\n        python_type = utils.dtype_to_type(a.dtype)\n        if not utils.is_weakly_lesser_type(type(beta), python_type):\n            msg = f'beta argument of type {type(beta)} cannot be safely cast to type {python_type}!'\n            raise ValueError(msg)\n        scaled_input = a * beta\n        rhs = torch.true_divide(torch.log1p(torch.exp(scaled_input)), beta)\n    else:\n        scaled_input = a\n        rhs = torch.log1p(torch.exp(scaled_input))\n    return torch.where(scaled_input > threshold, a, rhs)",
    "docstring": "Reference implementation of torch.nn.functional.softplus",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\nn\\functional\\__init__.py",
    "ast_data": "FunctionDef name:softplus arg:a arg:beta arg:threshold arg:inplace arguments arg arg arg arg If Raise If Compare Assign Call If Call Call Assign Call Raise Call Assign Assign Call Call Call Assign Assign Call Call Return return:yes Call Compare Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "set_preserved_attributes",
    "source_code": "def set_preserved_attributes(self, attributes: list[str]) -> ConvertCustomConfig:\n    self.preserved_attributes = attributes\n    return self",
    "docstring": "Set the names of the attributes that will persist in the graph module even if they are not used in the model's `` method.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\custom_config.py",
    "ast_data": "FunctionDef name:set_preserved_attributes arg:self arg:attributes arguments arg arg Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "c2c",
    "source_code": "def c2c(forward, x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *, plan=None):\n    if plan is not None:\n        raise NotImplementedError('Passing a precomputed plan is not yet supported by scipy.fft functions')\n    tmp = _asfarray(x)\n    overwrite_x = overwrite_x or _datacopied(tmp, x)\n    norm = _normalization(norm, forward)\n    workers = _workers(workers)\n    if n is not None:\n        tmp, copied = _fix_shape_1d(tmp, n, axis)\n        overwrite_x = overwrite_x or copied\n    elif tmp.shape[axis] < 1:\n        message = f'invalid number of data points ({tmp.shape[axis]}) specified'\n        raise ValueError(message)\n    out = tmp if overwrite_x and tmp.dtype.kind == 'c' else None\n    return pfft.c2c(tmp, (axis,), forward, norm, out, workers)",
    "docstring": "Return discrete Fourier transform of real or complex sequence.",
    "type": "function",
    "file_path": "scipy\\scipy\\fft\\_pocketfft\\basic.py",
    "ast_data": "FunctionDef name:c2c arg:forward arg:x arg:n arg:axis arg:norm arg:overwrite_x arg:workers arguments arg arg arg arg arg arg arg arg If Compare Raise Call Assign Call Assign BoolOp Call Assign Call Assign Call If Compare Assign Call Assign BoolOp If Compare Assign Raise Call Assign BoolOp Compare Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "AsinhScale",
    "source_code": "class AsinhScale(ScaleBase):\n    name = 'asinh'\n    auto_tick_multipliers = {3: (2,), 4: (2,), 5: (2,), 8: (2, 4), 10: (2, 5), 16: (2, 4, 8), 64: (4, 16), 1024: (256, 512)}\n\n    def __init__(self, axis, *, linear_width=1.0, base=10, subs='auto', **kwargs):\n        super().__init__(axis)\n        self._transform = AsinhTransform(linear_width)\n        self._base = int(base)\n        if subs == 'auto':\n            self._subs = self.auto_tick_multipliers.get(self._base)\n        else:\n            self._subs = subs\n    linear_width = property(lambda self: self._transform.linear_width)\n\n    def get_transform(self):\n        return self._transform\n\n    def set_default_locators_and_formatters(self, axis):\n        axis.set(major_locator=AsinhLocator(self.linear_width, base=self._base), minor_locator=AsinhLocator(self.linear_width, base=self._base, subs=self._subs), minor_formatter=NullFormatter())\n        if self._base > 1:\n            axis.set_major_formatter(LogFormatterSciNotation(self._base))\n        else:\n            axis.set_major_formatter('{x:.3g}')",
    "docstring": "A quasi-logarithmic scale based on the inverse hyperbolic sine (asinh) For values close to zero, this is essentially a linear scale, but for large magnitude values (either positive or negative) it is asymptotically logarithmic. The transition between these linear and logarithmic regimes is smooth, and has no discontinuities in the function gradient in contrast to the (\"symlog\") scale. Specifically, the transformation of an axis coordinate :math: is :math: where :math: is the effective width of the linear region of the transformation. In that region, the transformation is :math:. For large values of :math: the transformation behaves as :math:. .. note:: This API is provisional and may be revised in the future based on early user feedback.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\scale.py",
    "ast_data": "ClassDef name:AsinhScale Assign Assign FunctionDef name:__init__ arg:self arg:axis arguments arg arg arg arg arg arg Call Call Assign Call Assign Call If Compare Assign Call Assign Assign Call arguments arg FunctionDef name:get_transform arg:self arguments arg Return return:yes FunctionDef name:set_default_locators_and_formatters arg:self arg:axis arguments arg arg Call Call Call Call If Compare Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_get_draw_artists",
    "source_code": "def _get_draw_artists(self, renderer):\n    artists = self.get_children()\n    artists.remove(self.patch)\n    artists = sorted((artist for artist in artists if not artist.get_animated()), key=lambda artist: artist.get_zorder())\n    for ax in self._localaxes:\n        locator = ax.get_axes_locator()\n        ax.apply_aspect(locator(ax, renderer) if locator else None)\n        for child in ax.get_children():\n            if hasattr(child, 'apply_aspect'):\n                locator = child.get_axes_locator()\n                child.apply_aspect(locator(child, renderer) if locator else None)\n    return artists",
    "docstring": "Also runs apply_aspect",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:_get_draw_artists arg:self arg:renderer arguments arg arg Assign Call Call Assign Call Call arguments arg Call For Assign Call Call Call For Call If Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "GetContainingWhileContext",
    "source_code": "def GetContainingWhileContext(ctxt, stop_ctxt=None):\n    while ctxt:\n        if ctxt.IsWhileContext() or ctxt == stop_ctxt:\n            return ctxt\n        ctxt = ctxt.outer_context\n    return None",
    "docstring": "Returns the first ancestor WhileContext of . Returns if is a WhileContext, or None if is not in a while loop. Args: ctxt: ControlFlowContext stop_ctxt: ControlFlowContext, optional. If provided, the search will end if it sees stop_ctxt. Returns: if is a WhileContext, the most nested WhileContext containing , or None if is not in a while loop. If is not , this returns if it matches in its traversal.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_util.py",
    "ast_data": "FunctionDef name:GetContainingWhileContext arg:ctxt arg:stop_ctxt arguments arg arg While If BoolOp Call Compare Return return:yes Assign Return return:no"
  },
  {
    "library": "pandas",
    "name": "standardize_mapping",
    "source_code": "def standardize_mapping(into):\n    if not inspect.isclass(into):\n        if isinstance(into, defaultdict):\n            return partial(defaultdict, into.default_factory)\n        into = type(into)\n    if not issubclass(into, abc.Mapping):\n        raise TypeError(f'unsupported type: {into}')\n    if into == defaultdict:\n        raise TypeError('to_dict() only accepts initialized defaultdicts')\n    return into",
    "docstring": "Helper function to standardize a supplied mapping. Parameters ---------- into : instance or subclass of collections.abc.Mapping Must be a class, an initialized collections.defaultdict, or an instance of a collections.abc.Mapping subclass. Returns ------- mapping : a collections.abc.Mapping subclass or other constructor a callable object that can accept an iterator to create the desired Mapping. See Also -------- DataFrame.to_dict Series.to_dict",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\common.py",
    "ast_data": "FunctionDef name:standardize_mapping arg:into arguments arg If Call If Call Return return:yes Call Assign Call If Call Raise Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_active_ddp_module",
    "source_code": "@classmethod\ndef _get_active_ddp_module(cls):\n    return cls._active_ddp_module",
    "docstring": "requires DDP's status and module for cooperative optimization.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\parallel\\distributed.py",
    "ast_data": "FunctionDef name:_get_active_ddp_module arg:cls arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_forward_pass_fast",
    "source_code": "def _forward_pass_fast(self, X, check_input=True):\n    if check_input:\n        X = validate_data(self, X, accept_sparse=['csr', 'csc'], reset=False)\n    activation = X\n    hidden_activation = ACTIVATIONS[self.activation]\n    for i in range(self.n_layers_ - 1):\n        activation = safe_sparse_dot(activation, self.coefs_[i])\n        activation += self.intercepts_[i]\n        if i != self.n_layers_ - 2:\n            hidden_activation(activation)\n    output_activation = ACTIVATIONS[self.out_activation_]\n    output_activation(activation)\n    return activation",
    "docstring": "Predict using the trained model This is the same as _forward_pass but does not record the activations of all layers and only returns the last layer's activation. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input data. check_input : bool, default=True Perform input data validation or not. Returns ------- y_pred : ndarray of shape (n_samples,) or (n_samples, n_outputs) The decision function of the samples for each class in the model.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neural_network\\_multilayer_perceptron.py",
    "ast_data": "FunctionDef name:_forward_pass_fast arg:self arg:X arg:check_input arguments arg arg arg If Assign Call Assign Assign For Call Assign Call If Compare Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_decompose_indexed_slices",
    "source_code": "def _decompose_indexed_slices(self, indexed_slices):\n    per_var_indices, partition_assignments = self._decompose_indices(indexed_slices.indices)\n    per_var_values = data_flow_ops.dynamic_partition(indexed_slices.values, partition_assignments, len(self._variables))\n    return [indexed_slices_lib.IndexedSlices(values=per_var_values[i], indices=per_var_indices[i]) for i in range(len(self._variables))]",
    "docstring": "Decompose a global into a list of per-variable ones.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\sharded_variable.py",
    "ast_data": "FunctionDef name:_decompose_indexed_slices arg:self arg:indexed_slices arguments arg arg Assign Call Assign Call Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "code",
    "source_code": "@property\ndef code(self):\n    return self.forward.code",
    "docstring": "Return a pretty-printed representation (as valid Python syntax) of the internal graph for the `inspecting-code` for details.",
    "type": "method",
    "file_path": "pytorch\\torch\\jit\\_script.py",
    "ast_data": "FunctionDef name:code arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "wait_for_other_workers",
    "source_code": "def wait_for_other_workers():\n    return dc_context.get_current_worker_context().wait_for_other_workers()",
    "docstring": "Waits for other workers to reach the same call to this method.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_worker_util.py",
    "ast_data": "FunctionDef name:wait_for_other_workers arguments Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_rasterization_zorder",
    "source_code": "def set_rasterization_zorder(self, z):\n    self._rasterization_zorder = z\n    self.stale = True",
    "docstring": "Set the zorder threshold for rasterization for vector graphics output. All artists with a zorder below the given value will be rasterized if they support rasterization. This setting is ignored for pixel-based output. See also :doc:. Parameters ---------- z : float or None The zorder below which artists are rasterized. If `` rasterization based on zorder is deactivated.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:set_rasterization_zorder arg:self arg:z arguments arg arg Assign Assign"
  },
  {
    "library": "pandas",
    "name": "_element",
    "source_code": "def _element(html_element: str, html_class: str | None, value: Any, is_visible: bool, **kwargs) -> dict:\n    if 'display_value' not in kwargs or kwargs['display_value'] is None:\n        kwargs['display_value'] = value\n    return {'type': html_element, 'value': value, 'class': html_class, 'is_visible': is_visible, **kwargs}",
    "docstring": "Template to return container with information for a or element.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\formats\\style_render.py",
    "ast_data": "FunctionDef name:_element arg:html_element arg:html_class arg:value arg:is_visible arguments arg arg arg arg arg If BoolOp Compare Compare Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "find_matched_symbols",
    "source_code": "def find_matched_symbols(symbols_regex: re.Pattern[str], test_globs: list[str]=CPP_TEST_GLOBS) -> set[str]:\n    matched_symbols = set()\n    for cpp_test_glob in test_globs:\n        for test_file in REPO_ROOT.glob(cpp_test_glob):\n            with open(test_file) as tf:\n                for test_file_line in tf:\n                    test_file_line = test_file_line.strip()\n                    if test_file_line.startswith(('//', '#')) or test_file_line == '':\n                        continue\n                    matches = re.findall(symbols_regex, test_file_line)\n                    for m in matches:\n                        if m != '':\n                            matched_symbols.add(m)\n    return matched_symbols",
    "docstring": "Goes through all lines not starting with // in the cpp files and accumulates a list of matches with the symbols_regex. Note that we expect symbols_regex to be sorted in reverse alphabetical order to allow superset regexes to get matched.",
    "type": "function",
    "file_path": "pytorch\\tools\\linter\\adapters\\header_only_linter.py",
    "ast_data": "FunctionDef name:find_matched_symbols arg:symbols_regex arg:test_globs arguments arg arg Assign Call For For Call With Call For Assign Call If BoolOp Call Compare Assign Call For If Compare Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "LongestMaxSize",
    "source_code": "class LongestMaxSize(Resize):\n\n    def __init__(self, max_size: int, resample: Union[str, int, Resample]=Resample.BILINEAR.name, align_corners: bool=True, p: float=1.0) -> None:\n        super().__init__(size=max_size, side='long', resample=resample, align_corners=align_corners, p=p)",
    "docstring": "Rescale an image so that maximum side is equal to max_size, keeping the aspect ratio of the initial image. Args: max_size: maximum size of the image after the transformation.",
    "type": "class",
    "file_path": "kornia\\kornia\\augmentation\\_2d\\geometric\\resize.py",
    "ast_data": "ClassDef name:LongestMaxSize FunctionDef name:__init__ arg:self arg:max_size arg:resample arg:align_corners arg:p arguments arg arg arg arg arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "will_fusion_create_cycle",
    "source_code": "def will_fusion_create_cycle(self, node1: BaseSchedulerNode, node2: BaseSchedulerNode) -> bool:\n    visited = OrderedSet[FusedSchedulerNode]()\n\n    def found_path(node: BaseSchedulerNode) -> bool:\n        if isinstance(node, FusedSchedulerNode) and node not in visited:\n            visited.add(node)\n            if node.get_operation_names().issubset(combined_ancestors):\n                return False\n            else:\n                return bool(combined_names & node.ancestors) or any((found_path(self.name_to_fused_node[n]) for n in node.ancestors - combined_ancestors))\n        return False\n    combined_names = node1.get_operation_names()._dict.keys() | node2.get_operation_names()._dict.keys()\n    combined_ancestors = (node1.ancestors._dict.keys() | node2.ancestors._dict.keys()) - combined_names\n    cycle = any((found_path(self.name_to_fused_node[n]) for n in combined_ancestors))\n    if cycle:\n        WhyNoFuse(node1, node2)('will create cycle')\n    return cycle",
    "docstring": "Finds whether there's a path from node1 to node2 (or vice-versa) caused indirectly by other fusions.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:will_fusion_create_cycle arg:self arg:node1 arg:node2 arguments arg arg arg Assign Call FunctionDef name:found_path arg:node arguments arg If BoolOp Call Compare Call If Call Call Return return:yes Return return:yes BoolOp Call Call Call Return return:yes Assign Call Call Call Call Assign Call Call Assign Call Call If Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "is_platform_linux",
    "source_code": "def is_platform_linux() -> bool:\n    return sys.platform == 'linux'",
    "docstring": "Checking if the running platform is linux. Returns ------- bool True if the running platform is linux.",
    "type": "function",
    "file_path": "pandas\\pandas\\compat\\__init__.py",
    "ast_data": "FunctionDef name:is_platform_linux arguments Return return:yes Compare"
  },
  {
    "library": "matplotlib",
    "name": "Node",
    "source_code": "class Node:\n\n    def __init__(self) -> None:\n        self.size = 0\n\n    def __repr__(self) -> str:\n        return type(self).__name__\n\n    def get_kerning(self, next: Node | None) -> float:\n        return 0.0\n\n    def shrink(self) -> None:\n        self.size += 1\n\n    def render(self, output: Output, x: float, y: float) -> None:\n        pass",
    "docstring": "A node in the TeX box model.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\_mathtext.py",
    "ast_data": "ClassDef name:Node FunctionDef name:__init__ arg:self arguments arg Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call FunctionDef name:get_kerning arg:self arg:next arguments arg arg Return return:yes FunctionDef name:shrink arg:self arguments arg FunctionDef name:render arg:self arg:output arg:x arg:y arguments arg arg arg arg"
  },
  {
    "library": "scikit-learn",
    "name": "_check_X_y",
    "source_code": "def _check_X_y(self, X, y, reset=True):\n    return validate_data(self, X, y, accept_sparse='csr', reset=reset)",
    "docstring": "Validate X and y in fit methods.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\naive_bayes.py",
    "ast_data": "FunctionDef name:_check_X_y arg:self arg:X arg:y arg:reset arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "block_stmt",
    "source_code": "def block_stmt(stmt: str, indent: int=0) -> str:\n    block_size = 100\n    loop_count = number // block_size\n    if loop_count == 1:\n        loop_count = 0\n    remainder = number - block_size * loop_count\n    blocked_stmt = ''\n    if loop_count:\n        unrolled_stmts = textwrap.indent('\\n'.join([stmt] * block_size), ' ' * 4)\n        blocked_stmt += f'for _ in range({loop_count}):\\n{unrolled_stmts}\\n'\n    if remainder:\n        blocked_stmt += '\\n'.join([stmt] * remainder)\n    return textwrap.indent(blocked_stmt, ' ' * indent)",
    "docstring": "Partially unroll benchmark loop. The naive template looks something like: \"for _ in range({number}): {stmt}\" However a loop in Python is surprisingly expensive, and significantly increases the number of background Python instructions. So instead we partially unroll the loops, with a block size of 100 chosen to keep the instruction overhead from low while also not ballooning the size of the generated file.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\benchmark\\utils\\valgrind_wrapper\\timer_interface.py",
    "ast_data": "FunctionDef name:block_stmt arg:stmt arg:indent arguments arg arg Assign Assign If Compare Assign Assign Assign If Assign Call Call If Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_split_tensor",
    "source_code": "def _split_tensor(self, tensor: torch.Tensor, num_chunks: int, *, with_padding: bool=True, contiguous: bool=True) -> tuple[list[torch.Tensor], list[int]]:\n    assert self.dim <= tensor.ndim, f'Sharding dim {self.dim} greater than tensor ndim {tensor.ndim}'\n    tensor_list = list(torch.chunk(tensor, num_chunks, dim=self.dim))\n    tensor_list = fill_empty_tensor_to_shards(tensor_list, self.dim, num_chunks - len(tensor_list))\n    full_chunk_size = (tensor.size(self.dim) + num_chunks - 1) // num_chunks\n    shard_list: list[torch.Tensor] = []\n    pad_sizes: list[int] = []\n    for shard in tensor_list:\n        if with_padding:\n            pad_size = full_chunk_size - shard.size(self.dim)\n            shard = pad_tensor(shard, self.dim, pad_size)\n            pad_sizes.append(pad_size)\n        if contiguous:\n            shard = shard.contiguous()\n        shard_list.append(shard)\n    return (shard_list, pad_sizes)",
    "docstring": "This function uses torch.chunk to split a tensor into num_chunks shards along the Shard placement dimension, and return a list of shards with their pad sizes. Keyword args: with_padding (bool, optional): when True, we pad the tensor on the last few ranks before calling the collectives (i.e. scatter/all_gather, etc.). This is because collectives usually require equal size tensor inputs",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\placement_types.py",
    "ast_data": "FunctionDef name:_split_tensor arg:self arg:tensor arg:num_chunks arguments arg arg arg arg arg Compare Assign Call Call Assign Call Call Assign Call For If Assign Call Assign Call Call If Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_as_graph_element",
    "source_code": "def _as_graph_element(obj):\n    conv_fn = getattr(obj, '_as_graph_element', None)\n    if conv_fn and callable(conv_fn):\n        return conv_fn()\n    return None",
    "docstring": "Convert to a graph element if possible, otherwise return . Args: obj: Object to convert. Returns: The result of if that method is available; otherwise .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_as_graph_element arg:obj arguments arg Assign Call If BoolOp Call Return return:yes Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "tf_record_random_reader",
    "source_code": "def tf_record_random_reader(path):\n    return _pywrap_record_io.RandomRecordReader(path)",
    "docstring": "Creates a reader that allows random-access reads from a TFRecords file. The created reader object has the following method: - , which returns a tuple of , where is the TFRecord read at the offset, and is the ending offset of the read record. The method throws a if data is corrupted at the given offset. The method throws if the offset is out of range for the TFRecords file. Usage example: Args: path: The path to the TFRecords file. Returns: An object that supports random-access reading of the serialized TFRecords. Raises: IOError: If cannot be opened for reading.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\tf_record.py",
    "ast_data": "FunctionDef name:tf_record_random_reader arg:path arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_BesselK0Grad",
    "source_code": "@ops.RegisterGradient('BesselK0')\ndef _BesselK0Grad(op: ops.Operation, grad):\n    x = op.inputs[0]\n    with ops.control_dependencies([grad]):\n        partial_x = -special_math_ops.bessel_k1(x)\n        return grad * partial_x",
    "docstring": "Compute gradient of bessel_k0(x) with respect to its argument.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_BesselK0Grad arg:op arg:grad arguments arg arg Assign With Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "tree_flatten",
    "source_code": "def tree_flatten(tree: PyTree, is_leaf: Optional[Callable[[PyTree], bool]]=None) -> tuple[list[Any], TreeSpec]:\n\n    def helper(node: PyTree, leaves: list[Any]) -> TreeSpec:\n        if tree_is_leaf(node, is_leaf=is_leaf):\n            leaves.append(node)\n            return _LEAF_SPEC\n        node_type = _get_node_type(node)\n        flatten_fn = SUPPORTED_NODES[node_type].flatten_fn\n        children, context = flatten_fn(node)\n        subspecs = [helper(child, leaves) for child in children]\n        return TreeSpec(node_type, context, subspecs)\n    leaves: list[Any] = []\n    treespec = helper(tree, leaves)\n    return (leaves, treespec)",
    "docstring": "Flattens a pytree into a list of values and a TreeSpec that can be used to reconstruct the pytree.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_pytree.py",
    "ast_data": "FunctionDef name:tree_flatten arg:tree arg:is_leaf arguments arg arg FunctionDef name:helper arg:node arg:leaves arguments arg arg If Call Call Return return:yes Assign Call Assign Assign Call Assign Call Return return:yes Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_extents",
    "source_code": "def get_extents(self):\n    return self.get_path().get_extents(self.get_transform())",
    "docstring": "Return the 's axis-aligned extents as a .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_extents arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "is_strictly_increasing",
    "source_code": "@tf_export('math.is_strictly_increasing', v1=['math.is_strictly_increasing', 'debugging.is_strictly_increasing', 'is_strictly_increasing'])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints('debugging.is_strictly_increasing', 'is_strictly_increasing')\ndef is_strictly_increasing(x, name=None):\n    with ops.name_scope(name, 'is_strictly_increasing', [x]):\n        diff = _get_results_for_monotonic_comparison(x, math_ops.greater)\n        return math_ops.reduce_all(diff)",
    "docstring": "Returns if is strictly increasing. Elements of are compared in row-major order. The tensor is strictly increasing if for every adjacent pair we have TensorTensorTruexx` is not a numeric tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\check_ops.py",
    "ast_data": "FunctionDef name:is_strictly_increasing arg:x arg:name arguments arg arg With Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "common_reduction_strategy",
    "source_code": "def common_reduction_strategy(input_strategy: OpStrategy, reduce_dims: list[int], keep_dim: bool=False, reduction_linear: bool=True, reduction_op: ReductionOpType='sum') -> OpStrategy:\n    reduction_strategy = OpStrategy([])\n    for strtg in input_strategy.strategies:\n        if not reduction_linear:\n            input_placements = replicate_reduction_dims(strtg.output_spec.placements, reduce_dims)\n        else:\n            input_placements = strtg.output_spec.placements\n        input_spec = DTensorSpec(mesh=input_strategy.mesh, placements=input_placements, tensor_meta=strtg.output_spec.tensor_meta)\n        reduce_dims_map = _infer_reduce_dims_map(reduce_dims, input_spec.ndim, keep_dim)\n        out_placements = map_placements_after_reduction(input_spec.placements, reduce_dims, reduce_dims_map, reduction_op)\n        redistribute_cost = [generate_redistribute_costs(input_strategy, input_spec)]\n        reduction_strategy.strategies.append(PlacementStrategy(output_specs=DTensorSpec(mesh=input_strategy.mesh, placements=out_placements), input_specs=(input_spec,), redistribute_cost=redistribute_cost))\n    return reduction_strategy",
    "docstring": "reduction_linear means that the reduction follows this rule: f([f(a), f(b)]) = f([a, b]) reduction linear should be super set of linearity.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_ops\\_math_ops.py",
    "ast_data": "FunctionDef name:common_reduction_strategy arg:input_strategy arg:reduce_dims arg:keep_dim arg:reduction_linear arg:reduction_op arguments arg arg arg arg arg Assign Call For If Assign Call Assign Assign Call Assign Call Assign Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_y",
    "source_code": "def set_y(self, y):\n    self._y0 = y\n    self.stale = True",
    "docstring": "Set the bottom coordinate of the rectangle.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:set_y arg:self arg:y arguments arg arg Assign Assign"
  },
  {
    "library": "django",
    "name": "x",
    "source_code": "@property\ndef x(self):\n    return capi.getx(self.ptr, 0)",
    "docstring": "Return the X coordinate for this Point.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:x arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "update",
    "source_code": "def update(self, config):\n    self._apply(Parser.load(config))",
    "docstring": "Update self from a dict, file, or filename.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\reprconf.py",
    "ast_data": "FunctionDef name:update arg:self arg:config arguments arg arg Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X):\n    return super().predict(X)",
    "docstring": "Perform classification on an array of test vectors X. The predicted class C for each sample in X is returned. Parameters ---------- X : array-like of shape (n_samples, n_features) Vector to be scored, where is the number of samples and is the number of features. Returns ------- C : ndarray of shape (n_samples,) Estimated probabilities.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\discriminant_analysis.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_as_graph_element",
    "source_code": "def _as_graph_element(self):\n    return self._graph_element",
    "docstring": "Conversion function for Graph.as_graph_element().",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:_as_graph_element arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "cityblock",
    "source_code": "def cityblock(u, v, w=None):\n    u = _validate_vector(u)\n    v = _validate_vector(v)\n    l1_diff = abs(u - v)\n    if w is not None:\n        w = _validate_weights(w)\n        l1_diff = w * l1_diff\n    return l1_diff.sum()",
    "docstring": "Compute the City Block (Manhattan) distance. Computes the Manhattan distance between two 1-D arrays and , which is defined as .. math:: \\sum_i {\\left| u_i - v_i \\right|}. Parameters ---------- u : (N,) array_like Input array. v : (N,) array_like Input array. w : (N,) array_like, optional The weights for each value in and . Default is None, which gives each value a weight of 1.0 Returns ------- cityblock : double The City Block (Manhattan) distance between vectors and . Examples -------- >>> from scipy.spatial import distance >>> distance.cityblock([1, 0, 0], [0, 1, 0]) 2 >>> distance.cityblock([1, 0, 0], [0, 2, 0]) 3 >>> distance.cityblock([1, 0, 0], [1, 1, 0]) 1",
    "type": "function",
    "file_path": "scipy\\scipy\\spatial\\distance.py",
    "ast_data": "FunctionDef name:cityblock arg:u arg:v arg:w arguments arg arg arg Assign Call Assign Call Assign Call If Compare Assign Call Assign Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "perm",
    "source_code": "def perm(N, k, exact=False):\n    if exact:\n        N = np.squeeze(N)[()]\n        k = np.squeeze(k)[()]\n        if not (isscalar(N) and isscalar(k)):\n            raise ValueError('`N` and `k` must be scalar integers with `exact=True`.')\n        floor_N, floor_k = (int(N), int(k))\n        non_integral = not (floor_N == N and floor_k == k)\n        if non_integral:\n            raise ValueError('Non-integer `N` and `k` with `exact=True` is not supported.')\n        if k > N or N < 0 or k < 0:\n            return 0\n        val = 1\n        for i in range(floor_N - floor_k + 1, floor_N + 1):\n            val *= i\n        return val\n    else:\n        k, N = (asarray(k), asarray(N))\n        cond = (k <= N) & (N >= 0) & (k >= 0)\n        vals = poch(N - k + 1, k)\n        if isinstance(vals, np.ndarray):\n            vals[~cond] = 0\n        elif not cond:\n            vals = np.float64(0)\n        return vals",
    "docstring": "Permutations of N things taken k at a time, i.e., k-permutations of N. It's also known as \"partial permutations\". Parameters ---------- N : int, ndarray Number of things. k : int, ndarray Number of elements taken. exact : bool, optional If `Nkpoch`. Returns ------- val : int, ndarray The number of k-permutations of N. Notes ----- - Array arguments accepted only for exact=False case. - If k > N, N >> import numpy as np >>> from scipy.special import perm >>> k = np.array([3, 4]) >>> n = np.array([10, 10]) >>> perm(n, k) array([ 720., 5040.]) >>> perm(10, 3, exact=True) 720",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_basic.py",
    "ast_data": "FunctionDef name:perm arg:N arg:k arg:exact arguments arg arg arg If Assign Call Assign Call If BoolOp Call Call Raise Call Assign Call Call Assign BoolOp Compare Compare If Raise Call If BoolOp Compare Compare Compare Return return:yes Assign For Call Return return:yes Assign Call Call Assign Compare Compare Compare Assign Call If Call Assign If Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_mean_flops",
    "source_code": "@ops.RegisterStatistics('Mean', 'flops')\ndef _mean_flops(graph, node):\n    return _reduction_op_flops(graph, node, reduce_flops=1, finalize_flops=1)",
    "docstring": "Compute flops for Mean operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py",
    "ast_data": "FunctionDef name:_mean_flops arg:graph arg:node arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, wrapped_list):\n    self._non_append_mutation_value = False\n    self._external_modification_value = False\n    super().__init__(wrapped_list)\n    self._last_wrapped_list_snapshot = list(self._storage)",
    "docstring": "Construct a new list wrapper. Args: wrapped_list: The initial value of the data structure. A shallow copy may be maintained for error checking. itself should not be modified directly after constructing the , and if changes are detected the will throw an exception on save.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\data_structures.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:wrapped_list arguments arg arg Assign Assign Call Call Assign Call"
  },
  {
    "library": "scipy",
    "name": "_kpp",
    "source_code": "def _kpp(data, k, rng, xp):\n    ndim = len(data.shape)\n    if ndim == 1:\n        data = data[:, None]\n    dims = data.shape[1]\n    init = xp.empty((int(k), dims))\n    for i in range(k):\n        if i == 0:\n            data_idx = rng_integers(rng, data.shape[0])\n        else:\n            D2 = cdist(init[:i, :], data, metric='sqeuclidean').min(axis=0)\n            probs = D2 / D2.sum()\n            cumprobs = probs.cumsum()\n            r = rng.uniform()\n            cumprobs = np.asarray(cumprobs)\n            data_idx = int(np.searchsorted(cumprobs, r))\n        init = xpx.at(init)[i, :].set(data[data_idx, :])\n    if ndim == 1:\n        init = init[:, 0]\n    return init",
    "docstring": "Picks k points in the data based on the kmeans++ method. Parameters ---------- data : ndarray Expect a rank 1 or 2 array. Rank 1 is assumed to describe 1-D data, rank 2 multidimensional data, in which case one row is one observation. k : int Number of samples to generate. rng : or Random number generator. Returns ------- init : ndarray A 'k' by 'N' containing the initial centroids. References ---------- .. [1] D. Arthur and S. Vassilvitskii, \"k-means++: the advantages of careful seeding\", Proceedings of the Eighteenth Annual ACM-SIAM Symposium on Discrete Algorithms, 2007.",
    "type": "function",
    "file_path": "scipy\\scipy\\cluster\\vq.py",
    "ast_data": "FunctionDef name:_kpp arg:data arg:k arg:rng arg:xp arguments arg arg arg arg Assign Call If Compare Assign Assign Assign Call Call For Call If Compare Assign Call Assign Call Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Call If Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "RegisterKL",
    "source_code": "@tf_export(v1=['distributions.RegisterKL'])\nclass RegisterKL:\n\n    @deprecation.deprecated('2019-01-01', 'The TensorFlow Distributions library has moved to TensorFlow Probability (https://github.com/tensorflow/probability). You should update all references to use `tfp.distributions` instead of `tf.distributions`.', warn_once=True)\n    def __init__(self, dist_cls_a, dist_cls_b):\n        self._key = (dist_cls_a, dist_cls_b)\n\n    def __call__(self, kl_fn):\n        if not callable(kl_fn):\n            raise TypeError('kl_fn must be callable, received: %s' % kl_fn)\n        if self._key in _DIVERGENCES:\n            raise ValueError('KL(%s || %s) has already been registered to: %s' % (self._key[0].__name__, self._key[1].__name__, _DIVERGENCES[self._key]))\n        _DIVERGENCES[self._key] = kl_fn\n        return kl_fn",
    "docstring": "Decorator to register a KL divergence implementation function. Usage: @distributions.RegisterKL(distributions.Normal, distributions.Normal) def _kl_normal_mvn(norm_a, norm_b): # Return KL(norm_a || norm_b)",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\kullback_leibler.py",
    "ast_data": "ClassDef name:RegisterKL FunctionDef name:__init__ arg:self arg:dist_cls_a arg:dist_cls_b arguments arg arg arg Assign Call FunctionDef name:__call__ arg:self arg:kl_fn arguments arg arg If Call Raise Call If Compare Raise Call Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "MultipleObjectsReturned",
    "source_code": "class MultipleObjectsReturned(Exception):\n    pass",
    "docstring": "The query returned multiple objects when only one was expected.",
    "type": "class",
    "file_path": "django\\django\\core\\exceptions.py",
    "ast_data": "ClassDef name:MultipleObjectsReturned"
  },
  {
    "library": "tensorflow",
    "name": "dim_size_dtype",
    "source_code": "@property\ndef dim_size_dtype(self):\n    return self._inner_dim_sizes.dtype",
    "docstring": "DType used by this shape for dimension sizes.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor_shape.py",
    "ast_data": "FunctionDef name:dim_size_dtype arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "concat",
    "source_code": "def concat(self, other: Styler) -> Styler:\n    if not isinstance(other, Styler):\n        raise TypeError('`other` must be of type `Styler`')\n    if not self.data.columns.equals(other.data.columns):\n        raise ValueError('`other.data` must have same columns as `Styler.data`')\n    if not self.data.index.nlevels == other.data.index.nlevels:\n        raise ValueError('number of index levels must be same in `other` as in `Styler`. See documentation for suggestions.')\n    self.concatenated.append(other)\n    return self",
    "docstring": "Append another Styler to combine the output into a single table. .. versionadded:: 1.5.0 Parameters ---------- other : Styler The other Styler object which has already been styled and formatted. The data for this Styler must have the same columns as the original, and the number of index levels must also be the same to render correctly. Returns ------- Styler Instance of class with specified Styler appended. See Also -------- Styler.clear : Reset the ``, with placeholder levels. >>> df = pd.DataFrame( ... [[1], [2]], index=pd.MultiIndex.from_product([[0], [1, 2]]) ... ) >>> descriptors = df.agg([\"sum\"]) >>> descriptors.index = pd.MultiIndex.from_product([[\"\"], descriptors.index]) >>> df.style.concat(descriptors.style) # doctest: +SKIP",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\style.py",
    "ast_data": "FunctionDef name:concat arg:self arg:other arguments arg arg If Call Raise Call If Call Raise Call If Compare Raise Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "on_train_end",
    "source_code": "@doc_controls.for_subclass_implementers\ndef on_train_end(self, logs=None):\n    pass",
    "docstring": "Called at the end of training. Subclasses should override for any actions to run. Args: logs: Dict. Currently the output of the last call to is passed to this argument for this method but that may change in the future.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:on_train_end arg:self arg:logs arguments arg arg"
  },
  {
    "library": "scikit-learn",
    "name": "meta_namespace",
    "source_code": "def meta_namespace(*arrays: Array | complex | None, xp: ModuleType | None=None) -> ModuleType:\n    xp = array_namespace(*arrays) if xp is None else xp\n    if not is_dask_namespace(xp):\n        return xp\n    metas = [cast(Array | None, getattr(a, '_meta', None)) for a in arrays]\n    return array_namespace(*metas)",
    "docstring": "Get the namespace of Dask chunks. On all other backends, just return the namespace of the arrays. Parameters ---------- *arrays : Array | int | float | complex | bool | None Input arrays. xp : array_namespace, optional The standard-compatible namespace for the input arrays. Default: infer. Returns ------- array_namespace If xp is Dask, the namespace of the Dask chunks; otherwise, the namespace of the arrays.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_extra\\_lib\\_utils\\_helpers.py",
    "ast_data": "FunctionDef name:meta_namespace arguments arg arg Assign Compare Call If Call Return return:yes Assign Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X):\n    check_is_fitted(self)\n    X = self._check_test_data(X)\n    return self._transform(X)",
    "docstring": "Transform X to a cluster-distance space. In the new space, each dimension is the distance to the cluster centers. Note that even if X is sparse, the array returned by will typically be dense. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) New data to transform. Returns ------- X_new : ndarray of shape (n_samples, n_clusters) X transformed in the new space.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_kmeans.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Call Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n    return self._fit(X, partial=False)",
    "docstring": "Build a CF Tree for the input data. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Input data. y : Ignored Not used, present here for API consistency by convention. Returns ------- self Fitted estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_birch.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "read_sas",
    "source_code": "@doc(decompression_options=_shared_docs['decompression_options'] % 'filepath_or_buffer')\ndef read_sas(filepath_or_buffer: FilePath | ReadBuffer[bytes], *, format: str | None=None, index: Hashable | None=None, encoding: str | None=None, chunksize: int | None=None, iterator: bool=False, compression: CompressionOptions='infer') -> DataFrame | SASReader:\n    if format is None:\n        buffer_error_msg = 'If this is a buffer object rather than a string name, you must specify a format string'\n        filepath_or_buffer = stringify_path(filepath_or_buffer)\n        if not isinstance(filepath_or_buffer, str):\n            raise ValueError(buffer_error_msg)\n        fname = filepath_or_buffer.lower()\n        if '.xpt' in fname:\n            format = 'xport'\n        elif '.sas7bdat' in fname:\n            format = 'sas7bdat'\n        else:\n            raise ValueError(f'unable to infer format of SAS file from filename: {fname!r}')\n    reader: SASReader\n    if format.lower() == 'xport':\n        from pandas.io.sas.sas_xport import XportReader\n        reader = XportReader(filepath_or_buffer, index=index, encoding=encoding, chunksize=chunksize, compression=compression)\n    elif format.lower() == 'sas7bdat':\n        from pandas.io.sas.sas7bdat import SAS7BDATReader\n        reader = SAS7BDATReader(filepath_or_buffer, index=index, encoding=encoding, chunksize=chunksize, compression=compression)\n    else:\n        raise ValueError('unknown SAS format')\n    if iterator or chunksize:\n        return reader\n    with reader:\n        return reader.read()",
    "docstring": "Read SAS files stored as either XPORT or SAS7BDAT format files. Parameters ---------- filepath_or_buffer : str, path object, or file-like object String, path object (implementing `chunksize` lines at a time, returns iterator. iterator : bool, defaults to False If True, returns an iterator for reading the file incrementally. {decompression_options} Returns ------- DataFrame, SAS7BDATReader, or XportReader DataFrame if iterator=False and chunksize=None, else SAS7BDATReader or XportReader, file format is inferred from file extension. See Also -------- read_csv : Read a comma-separated values (csv) file into a pandas DataFrame. read_excel : Read an Excel file into a pandas DataFrame. read_spss : Read an SPSS file into a pandas DataFrame. read_orc : Load an ORC object into a pandas DataFrame. read_feather : Load a feather-format object into a pandas DataFrame. Examples -------- >>> df = pd.read_sas(\"sas_data.sas7bdat\") # doctest: +SKIP",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\sas\\sasreader.py",
    "ast_data": "FunctionDef name:read_sas arg:filepath_or_buffer arguments arg arg arg arg arg arg arg If Compare Assign Assign Call If Call Raise Call Assign Call If Compare Assign If Compare Assign Raise Call If Compare Call Assign Call If Compare Call Assign Call Raise Call If BoolOp Return return:yes With Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "AxisInfo",
    "source_code": "class AxisInfo:\n\n    def __init__(self, majloc=None, minloc=None, majfmt=None, minfmt=None, label=None, default_limits=None):\n        self.majloc = majloc\n        self.minloc = minloc\n        self.majfmt = majfmt\n        self.minfmt = minfmt\n        self.label = label\n        self.default_limits = default_limits",
    "docstring": "Information to support default axis labeling, tick labeling, and limits. An instance of this class must be returned by .",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\units.py",
    "ast_data": "ClassDef name:AxisInfo FunctionDef name:__init__ arg:self arg:majloc arg:minloc arg:majfmt arg:minfmt arg:label arg:default_limits arguments arg arg arg arg arg arg arg Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "scikit-learn",
    "name": "n_dims",
    "source_code": "@property\ndef n_dims(self):\n    return self.theta.shape[0]",
    "docstring": "Returns the number of non-fixed hyperparameters of the kernel.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:n_dims arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "value",
    "source_code": "def value(self):\n    with c_api_util.tf_buffer() as buffer_:\n        pywrap_tfe.TFE_MonitoringSamplerCellValue(self._cell, buffer_)\n        proto_data = pywrap_tf_session.TF_GetBuffer(buffer_)\n    histogram_proto = summary_pb2.HistogramProto()\n    histogram_proto.ParseFromString(compat.as_bytes(proto_data))\n    return histogram_proto",
    "docstring": "Retrieves the current distribution of samples. Returns: A HistogramProto describing the distribution of samples.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py",
    "ast_data": "FunctionDef name:value arg:self arguments arg With Call Call Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_handle_complex",
    "source_code": "def _handle_complex(tensor):\n    return torch.view_as_real(tensor) if not isinstance(tensor, torch.nn.UninitializedParameter) and tensor.is_complex() else tensor",
    "docstring": "Returns a real view of a tensor if complex dtype else just the tensor need to check if a UninitializedParameter because otherwise checking is_complex is an error for a LazyModule",
    "type": "function",
    "file_path": "pytorch\\torch\\_utils.py",
    "ast_data": "FunctionDef name:_handle_complex arg:tensor arguments arg Return return:yes BoolOp Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "GradientTape",
    "source_code": "class GradientTape(object):\n\n    def __init__(self, persistent=False):\n        self._c_tape = _tape.Tape(persistent)\n        ctx = context_stack.get_default()\n        self._tape_context = _tape.TapeContext(ctx, self._c_tape, gradient_registry.get_global_registry())\n        self._ctx_manager = None\n\n    def watch(self, t):\n        self._c_tape.Watch(t)\n\n    def gradient(self, targets, sources, output_gradients=None):\n        ctx = context_stack.get_default()\n        flat_targets = nest.flatten(targets)\n        flat_sources = nest.flatten(sources)\n        out_grads = self._c_tape.ComputeGradient(ctx, flat_targets, flat_sources, output_gradients or [])\n        return nest.pack_sequence_as(sources, out_grads)\n\n    def __enter__(self):\n        self._ctx_manager = context_stack.set_default(self._tape_context)\n        self._ctx_manager.__enter__()\n        return self\n\n    def __exit__(self, typ, value, traceback):\n        self._ctx_manager.__exit__(typ, value, traceback)\n        self._ctx_manager = None",
    "docstring": "GradientTape using the unified API.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\experimental\\tape.py",
    "ast_data": "ClassDef name:GradientTape FunctionDef name:__init__ arg:self arg:persistent arguments arg arg Assign Call Assign Call Assign Call Call Assign FunctionDef name:watch arg:self arg:t arguments arg arg Call FunctionDef name:gradient arg:self arg:targets arg:sources arg:output_gradients arguments arg arg arg arg Assign Call Assign Call Assign Call Assign Call BoolOp Return return:yes Call FunctionDef name:__enter__ arg:self arguments arg Assign Call Call Return return:yes FunctionDef name:__exit__ arg:self arg:typ arg:value arg:traceback arguments arg arg arg arg Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_parse_func_attr_value",
    "source_code": "def _parse_func_attr_value(key, value):\n    if isinstance(value, attr_value_pb2.AttrValue):\n        return value\n    elif isinstance(value, bool):\n        return attr_value_pb2.AttrValue(b=value)\n    elif isinstance(value, int):\n        return attr_value_pb2.AttrValue(i=value)\n    elif isinstance(value, float):\n        return attr_value_pb2.AttrValue(f=value)\n    elif isinstance(value, (str, bytes)):\n        return attr_value_pb2.AttrValue(s=compat.as_bytes(value))\n    elif isinstance(value, list):\n        list_value = attr_value_pb2.AttrValue.ListValue()\n        for v in value:\n            if isinstance(v, bool):\n                list_value.b.append(v)\n            elif isinstance(v, int):\n                list_value.i.append(v)\n            elif isinstance(v, float):\n                list_value.f.append(v)\n            elif isinstance(v, (str, bytes)):\n                list_value.s.append(compat.as_bytes(v))\n            else:\n                raise ValueError(f'Attributes for {key} must be bool, int, float, or string. Got {type(v)}.')\n        return attr_value_pb2.AttrValue(list=list_value)\n    else:\n        raise ValueError(f'Attribute {key} must be bool, int, float, string, list, or AttrValue. Got {type(value)}.')",
    "docstring": "Converts a python object to an attr_value_pb2.AttrValue object.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\attributes.py",
    "ast_data": "FunctionDef name:_parse_func_attr_value arg:key arg:value arguments arg arg If Call Return return:yes If Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Call Call If Call Assign Call For If Call Call If Call Call If Call Call If Call Call Call Raise Call Call Return return:yes Call Raise Call Call"
  },
  {
    "library": "matplotlib",
    "name": "currency",
    "source_code": "def currency(x, pos):\n    if x >= 1000000.0:\n        s = f'${x * 1e-06:1.1f}M'\n    else:\n        s = f'${x * 0.001:1.0f}K'\n    return s",
    "docstring": "The two arguments are the value and tick position",
    "type": "function",
    "file_path": "matplotlib\\galleries\\tutorials\\lifecycle.py",
    "ast_data": "FunctionDef name:currency arg:x arg:pos arguments arg arg If Compare Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "on_batch_begin",
    "source_code": "@doc_controls.for_subclass_implementers\n@generic_utils.default\ndef on_batch_begin(self, batch, logs=None):\n    pass",
    "docstring": "A backwards compatibility alias for .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:on_batch_begin arg:self arg:batch arg:logs arguments arg arg arg"
  },
  {
    "library": "matplotlib",
    "name": "add_artist",
    "source_code": "def add_artist(self, artist, clip=False):\n    artist.set_figure(self)\n    self.artists.append(artist)\n    artist._remove_method = self.artists.remove\n    if not artist.is_transform_set():\n        artist.set_transform(self.transSubfigure)\n    if clip and artist.get_clip_path() is None:\n        artist.set_clip_path(self.patch)\n    self.stale = True\n    return artist",
    "docstring": "Add an to the figure. Usually artists are added to objects using ; this method can be used in the rare cases where one needs to add artists directly to the figure instead. Parameters ---------- artist : The artist to add to the figure. If the added artist has no transform previously set, its transform will be set to `~matplotlib.artist.Artist` The added artist.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:add_artist arg:self arg:artist arg:clip arguments arg arg arg Call Call Assign If Call Call If BoolOp Compare Call Call Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_fit_edges_polyfit",
    "source_code": "def _fit_edges_polyfit(x, window_length, polyorder, deriv, delta, axis, y):\n    halflen = window_length // 2\n    _fit_edge(x, 0, window_length, 0, halflen, axis, polyorder, deriv, delta, y)\n    n = x.shape[axis]\n    _fit_edge(x, n - window_length, n, n - halflen, n, axis, polyorder, deriv, delta, y)",
    "docstring": "Use polynomial interpolation of x at the low and high ends of the axis to fill in the halflen values in y. This function just calls _fit_edge twice, once for each end of the axis.",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_savitzky_golay.py",
    "ast_data": "FunctionDef name:_fit_edges_polyfit arg:x arg:window_length arg:polyorder arg:deriv arg:delta arg:axis arg:y arguments arg arg arg arg arg arg arg Assign Call Assign Call"
  },
  {
    "library": "django",
    "name": "work_path",
    "source_code": "@cached_property\ndef work_path(self):\n    if not self.is_templatized:\n        return self.path\n    filename = f'{self.translatable.file}.py'\n    return os.path.join(self.translatable.dirpath, filename)",
    "docstring": "Path to a file which is being fed into GNU gettext pipeline. This may be either a translatable or its preprocessed version.",
    "type": "method",
    "file_path": "django\\django\\core\\management\\commands\\makemessages.py",
    "ast_data": "FunctionDef name:work_path arg:self arguments arg If Return return:yes Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "NoUpdateSpec",
    "source_code": "class NoUpdateSpec(APIChangeSpec):\n\n    def __init__(self):\n        self.function_handle = {}\n        self.function_reorders = {}\n        self.function_keyword_renames = {}\n        self.symbol_renames = {}\n        self.function_warnings = {}\n        self.change_to_function = {}\n        self.module_deprecations = {}\n        self.function_transformers = {}\n        self.import_renames = {}",
    "docstring": "A specification of an API change which doesn't change anything.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\ast_edits.py",
    "ast_data": "ClassDef name:NoUpdateSpec FunctionDef name:__init__ arg:self arguments arg Assign Assign Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "split",
    "source_code": "def split(self) -> tuple[Sequence[Union[message.Message, bytes]], chunk_pb2.ChunkedMessage]:\n    if self._parent_splitter:\n        raise ValueError(\"A child ComposableSplitter's `split` method should not be called directly, since it inherit chunks from a parent object. Please call the parent's `split()` method instead.\")\n    assert self._chunks is not None\n    assert self._chunked_message is not None\n    if not self._built:\n        self.build_chunks()\n        self._fix_chunks()\n        self._built = True\n    return (self._chunks, self._chunked_message)",
    "docstring": "Splits a proto message into a Sequence of protos/bytes.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\tools\\proto_splitter\\split.py",
    "ast_data": "FunctionDef name:split arg:self arguments arg If Raise Call Compare Compare If Call Call Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_parse_gufunc_signature",
    "source_code": "def _parse_gufunc_signature(signature):\n    signature = re.sub('\\\\s+', '', signature)\n    if not re.match(_SIGNATURE, signature):\n        raise ValueError(f'not a valid gufunc signature: {signature}')\n    return tuple(([tuple(re.findall(_DIMENSION_NAME, arg)) for arg in re.findall(_ARGUMENT, arg_list)] for arg_list in signature.split('->')))",
    "docstring": "Parse string signatures for a generalized universal function. Arguments --------- signature : string Generalized universal function signature, e.g., ``. Returns ------- Tuple of input and output core dimensions parsed from the signature, each of the form List[Tuple[str, ...]].",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_function_base_impl.py",
    "ast_data": "FunctionDef name:_parse_gufunc_signature arg:signature arguments arg Assign Call If Call Raise Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_serializable_data_groups",
    "source_code": "def _get_serializable_data_groups(self):\n    data_groups: dict[str, Any] = defaultdict()\n    for name, config in self.data_groups.items():\n        new_config = {key: value for key, value in config.items() if key not in ['hook', 'layer']}\n        data_groups[name] = new_config\n    return data_groups",
    "docstring": "Exclude hook and layer from the config keys before serializing TODO: Might have to treat functions (reduce_fn, mask_fn etc) in a different manner while serializing. For time-being, functions are treated the same way as other attributes",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\activation_sparsifier\\activation_sparsifier.py",
    "ast_data": "FunctionDef name:_get_serializable_data_groups arg:self arguments arg Call For Call Assign Call Compare Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "sorted_samples",
    "source_code": "def sorted_samples(self):\n    self.Ind_sorted = np.argsort(self.C, axis=0)\n    self.Xs = self.C[self.Ind_sorted]\n    return (self.Ind_sorted, self.Xs)",
    "docstring": "Find indexes of the sorted sampling points",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_shgo.py",
    "ast_data": "FunctionDef name:sorted_samples arg:self arguments arg Assign Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_width",
    "source_code": "def get_width(self):\n    return self._width",
    "docstring": "Return the width of the rectangle.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_width arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_condition_3_13",
    "source_code": "def _condition_3_13(A_1_norm, n0, m_max, ell):\n    p_max = _compute_p_max(m_max)\n    a = 2 * ell * p_max * (p_max + 3)\n    b = _theta[m_max] / float(n0 * m_max)\n    return A_1_norm <= a * b",
    "docstring": "A helper function for the _expm_multiply_* functions. Parameters ---------- A_1_norm : float The precomputed 1-norm of A. n0 : int Number of columns in the _expm_multiply_* B matrix. m_max : int A value related to a bound. ell : int The number of columns used in the 1-norm approximation. This is usually taken to be small, maybe between 1 and 5. Returns ------- value : bool Indicates whether or not the condition has been met. Notes ----- This is condition (3.13) in Al-Mohy and Higham (2011).",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_expm_multiply.py",
    "ast_data": "FunctionDef name:_condition_3_13 arg:A_1_norm arg:n0 arg:m_max arg:ell arguments arg arg arg arg Assign Call Assign Assign Call Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "_get_and_write_pystate_feed_additions",
    "source_code": "def _get_and_write_pystate_feed_additions(pystate_trackables: List[_TrackableData], cache: Union[Dict[base.Trackable, Any], None], object_graph_proto=None) -> Tuple[Dict[base.Trackable, Any], Dict[base.Trackable, Any]]:\n    serialized_tensors = object_identity.ObjectIdentityDictionary()\n    feed_additions = {}\n    for td in pystate_trackables:\n        trackable = td.object_to_save\n        checkpoint_key = trackable_utils.checkpoint_key(td.object_name, python_state.PYTHON_STATE)\n        if trackable in cache:\n            save_string = cache[td.object_to_save][python_state.PYTHON_STATE]\n        else:\n            with ops.device('/cpu:0'):\n                save_string = constant_op.constant('', dtype=dtypes.string)\n                cache[trackable] = {python_state.PYTHON_STATE: save_string}\n        with ops.init_scope():\n            value = trackable.serialize()\n        feed_additions[save_string] = value\n        serialized_tensors[trackable] = {checkpoint_key: save_string}\n        object_graph_proto.nodes[td.node_id].attributes.add(name=python_state.PYTHON_STATE, checkpoint_key=checkpoint_key, full_name=util.get_full_name(trackable))\n    return (serialized_tensors, feed_additions)",
    "docstring": "Gets feed additions needed for checkpointing Python State.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\save_util.py",
    "ast_data": "FunctionDef name:_get_and_write_pystate_feed_additions arg:pystate_trackables arg:cache arg:object_graph_proto arguments arg arg arg Assign Call Assign For Assign Assign Call If Compare Assign With Call Assign Call Assign With Call Assign Call Assign Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "cc_diff",
    "source_code": "def cc_diff(x, a, b, period=None, _cache=_cache):\n    if isinstance(_cache, threading.local):\n        if not hasattr(_cache, 'cc_diff_cache'):\n            _cache.cc_diff_cache = {}\n        _cache = _cache.cc_diff_cache\n    tmp = asarray(x)\n    if iscomplexobj(tmp):\n        return cc_diff(tmp.real, a, b, period, _cache) + 1j * cc_diff(tmp.imag, a, b, period, _cache)\n    if period is not None:\n        a = a * 2 * pi / period\n        b = b * 2 * pi / period\n    n = len(x)\n    omega = _cache.get((n, a, b))\n    if omega is None:\n        if len(_cache) > 20:\n            while _cache:\n                _cache.popitem()\n\n        def kernel(k, a=a, b=b):\n            return cosh(a * k) / cosh(b * k)\n        omega = convolve.init_convolution_kernel(n, kernel)\n        _cache[n, a, b] = omega\n    overwrite_x = _datacopied(tmp, x)\n    return convolve.convolve(tmp, omega, overwrite_x=overwrite_x)",
    "docstring": "Return (a,b)-cosh/cosh pseudo-derivative of a periodic sequence. If x_j and y_j are Fourier coefficients of periodic functions x and y, respectively, then:: y_j = cosh(j*a*2*pi/period)/cosh(j*b*2*pi/period) * x_j Parameters ---------- x : array_like The array to take the pseudo-derivative from. a,b : float Defines the parameters of the sinh/sinh pseudo-differential operator. period : float, optional The period of the sequence x. Default is `x`",
    "type": "function",
    "file_path": "scipy\\scipy\\fftpack\\_pseudo_diffs.py",
    "ast_data": "FunctionDef name:cc_diff arg:x arg:a arg:b arg:period arg:_cache arguments arg arg arg arg arg If Call If Call Assign Assign Assign Call If Call Return return:yes Call Call If Compare Assign Assign Assign Call Assign Call If Compare If Compare Call While Call FunctionDef name:kernel arg:k arg:a arg:b arguments arg arg arg Return return:yes Call Call Assign Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "clone",
    "source_code": "def clone(self, body: LoopBody):\n    copy = LoopBodyBlock.__new__(LoopBodyBlock)\n    copy.__dict__.update({**self.__dict__, 'body': body})\n    return copy",
    "docstring": "Shallow copy with a new parent LoopBody",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\loop_body.py",
    "ast_data": "FunctionDef name:clone arg:self arg:body arguments arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_infer_matching_dtype",
    "source_code": "def _infer_matching_dtype(tensors, dtype_hierarchy):\n    assert all((t.dtype in dtype_hierarchy for t in tensors))\n    inferred_dtype = max([t.dtype for t in tensors], key=dtype_hierarchy.index)\n    return [math_ops.cast(t, inferred_dtype) for t in tensors]",
    "docstring": "Infers a matching dtype for tensors, and casts them to that dtype.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_math_ops.py",
    "ast_data": "FunctionDef name:_infer_matching_dtype arg:tensors arg:dtype_hierarchy arguments arg arg Call Compare Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "PinballLoss",
    "source_code": "class PinballLoss(BaseLoss):\n    differentiable = False\n    need_update_leaves_values = True\n\n    def __init__(self, sample_weight=None, quantile=0.5):\n        check_scalar(quantile, 'quantile', target_type=numbers.Real, min_val=0, max_val=1, include_boundaries='neither')\n        super().__init__(closs=CyPinballLoss(quantile=float(quantile)), link=IdentityLink())\n        self.approx_hessian = True\n        self.constant_hessian = sample_weight is None\n\n    def fit_intercept_only(self, y_true, sample_weight=None):\n        if sample_weight is None:\n            return np.percentile(y_true, 100 * self.closs.quantile, axis=0)\n        else:\n            return _weighted_percentile(y_true, sample_weight, 100 * self.closs.quantile)",
    "docstring": "Quantile loss aka pinball loss, for regression. Domain: y_true and y_pred all real numbers quantile in (0, 1) Link: y_pred = raw_prediction For a given sample x_i, the pinball loss is defined as:: loss(x_i) = rho_{quantile}(y_true_i - raw_prediction_i) rho_{quantile}(u) = u * (quantile - 1_{u= 0 Note: 2 * PinballLoss(quantile=0.5) equals AbsoluteError(). Note that the exact hessian = 0 almost everywhere (except at one point, therefore differentiable = False). Optimization routines like in HGBT, however, need a hessian > 0. Therefore, we assign 1. Additional Attributes --------------------- quantile : float The quantile level of the quantile to be estimated. Must be in range (0, 1).",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\_loss\\loss.py",
    "ast_data": "ClassDef name:PinballLoss Assign Assign FunctionDef name:__init__ arg:self arg:sample_weight arg:quantile arguments arg arg arg Call Call Call Call Call Call Assign Assign Compare FunctionDef name:fit_intercept_only arg:self arg:y_true arg:sample_weight arguments arg arg arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "pipe",
    "source_code": "@final\n@doc(klass=_shared_doc_kwargs['klass'])\ndef pipe(self, func: Callable[Concatenate[Self, P], T] | tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T:\n    return common.pipe(self.copy(deep=False), func, *args, **kwargs)",
    "docstring": "Apply chainable functions that expect Series or DataFrames. Parameters ---------- func : function Function to apply to the {klass}. `~pandas.Series` in the second argument: >>> def subtract_national_insurance(rate, df, rate_increase): ... new_rate = rate + rate_increase ... return df * (1 - new_rate) >>> ( ... df.pipe(subtract_federal_tax) ... .pipe(subtract_state_tax, rate=0.12) ... .pipe( ... (subtract_national_insurance, \"df\"), rate=0.05, rate_increase=0.02 ... ) ... ) Salary Others 0 5892.48 736.56 1 6997.32 NaN 2 3682.80 1473.12",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:pipe arg:self arg:func arguments arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pygame",
    "name": "poll",
    "source_code": "def poll(self):\n    _check_init()\n    self._check_open()\n    result = self._input.Poll()\n    if result == _pypm.TRUE:\n        return True\n    if result == _pypm.FALSE:\n        return False\n    err_text = _pypm.GetErrorText(result)\n    raise MidiException((result, err_text))",
    "docstring": "returns true if there's data, or false if not. Input.poll(): return Bool raises a MidiException on error.",
    "type": "method",
    "file_path": "pygame\\src_py\\midi.py",
    "ast_data": "FunctionDef name:poll arg:self arguments arg Call Call Assign Call If Compare Return return:yes If Compare Return return:yes Assign Call Raise Call"
  },
  {
    "library": "django",
    "name": "max_name_length",
    "source_code": "def max_name_length(self):\n    return 63",
    "docstring": "Return the maximum length of an identifier. The maximum length of an identifier is 63 by default, but can be changed by recompiling PostgreSQL after editing the NAMEDATALEN macro in src/include/pg_config_manual.h. This implementation returns 63, but can be overridden by a custom database backend that inherits most of its behavior from this one.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\postgresql\\operations.py",
    "ast_data": "FunctionDef name:max_name_length arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "optimize_for_inference",
    "source_code": "def optimize_for_inference(input_graph_def: graph_pb2.GraphDef, input_node_names: Sequence[str], output_node_names: Sequence[str], placeholder_type_enum: int, toco_compatible: bool=False, placeholder_to_const_names=None) -> graph_pb2.GraphDef:\n    ensure_graph_is_valid(input_graph_def)\n    optimized_graph_def = input_graph_def\n    optimized_graph_def = convert_placeholder_to_const(optimized_graph_def, placeholder_to_const_names)\n    optimized_graph_def = strip_unused_lib.strip_unused(optimized_graph_def, input_node_names, output_node_names, placeholder_type_enum)\n    optimized_graph_def = graph_util.remove_training_nodes(optimized_graph_def, output_node_names)\n    optimized_graph_def = fuse_decomposed_batch_norm(optimized_graph_def)\n    optimized_graph_def = fold_batch_norms(optimized_graph_def)\n    if not toco_compatible:\n        optimized_graph_def = fuse_resize_and_conv(optimized_graph_def, output_node_names)\n    ensure_graph_is_valid(optimized_graph_def)\n    return optimized_graph_def",
    "docstring": "Applies a series of inference optimizations on the input graph. Args: input_graph_def: A GraphDef containing a training model. input_node_names: A list of names of the nodes that are fed inputs during inference. output_node_names: A list of names of the nodes that produce the final results. placeholder_type_enum: The AttrValue enum for the placeholder data type, or a list that specifies one value per input node name. toco_compatible: Boolean, if True, only runs optimizations that result in TOCO compatible graph operations (default=False). placeholder_to_const_names: A list of names of the PlaceholderWithDefault nodes to be converted to Constant. Returns: An optimized version of the input graph.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\optimize_for_inference_lib.py",
    "ast_data": "FunctionDef name:optimize_for_inference arg:input_graph_def arg:input_node_names arg:output_node_names arg:placeholder_type_enum arg:toco_compatible arg:placeholder_to_const_names arguments arg arg arg arg arg arg Call Assign Assign Call Assign Call Assign Call Assign Call Assign Call If Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "_get_new_session_key",
    "source_code": "def _get_new_session_key(self):\n    while True:\n        session_key = get_random_string(32, VALID_KEY_CHARS)\n        if not self.exists(session_key):\n            return session_key",
    "docstring": "Return session key that isn't being used.",
    "type": "method",
    "file_path": "django\\django\\contrib\\sessions\\backends\\base.py",
    "ast_data": "FunctionDef name:_get_new_session_key arg:self arguments arg While Assign Call If Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "CalcProduct",
    "source_code": "class CalcProduct(Constraint):\n\n    def __init__(self, start, end, flattened, dims_to_flatten):\n        assert isinstance(dims_to_flatten, list)\n        assert isinstance(flattened, TVar)\n        assert isinstance(start, int)\n        assert isinstance(end, int)\n        self.start = start\n        self.end = end\n        self.dims_to_flatten = dims_to_flatten\n        self.flattened = flattened\n\n    def __eq__(self, other):\n        if isinstance(other, CalcProduct):\n            return self.start == other.start and self.end == other.end and (self.dims_to_flatten == other.dims_to_flatten) and (self.flattened == other.flattened)\n        else:\n            return False\n\n    def __repr__(self):\n        return f'{self.flattened} = CalcProduct({self.start}, {self.end}, {self.dims_to_flatten})'",
    "docstring": "Given correct dimensions, calculate the product for flatten accounting for Dyn",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint.py",
    "ast_data": "ClassDef name:CalcProduct FunctionDef name:__init__ arg:self arg:start arg:end arg:flattened arg:dims_to_flatten arguments arg arg arg arg arg Call Call Call Call Assign Assign Assign Assign FunctionDef name:__eq__ arg:self arg:other arguments arg arg If Call Return return:yes BoolOp Compare Compare Compare Compare Return return:yes FunctionDef name:__repr__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "visible_fields",
    "source_code": "def visible_fields(self):\n    return [field for field in self if not field.is_hidden]",
    "docstring": "Return a list of BoundField objects that aren't hidden fields. The opposite of the hidden_fields() method.",
    "type": "method",
    "file_path": "django\\django\\forms\\forms.py",
    "ast_data": "FunctionDef name:visible_fields arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "sharex",
    "source_code": "def sharex(self, other):\n    _api.check_isinstance(_AxesBase, other=other)\n    if self._sharex is not None and other is not self._sharex:\n        raise ValueError('x-axis is already shared')\n    self._shared_axes['x'].join(self, other)\n    self._sharex = other\n    self.xaxis.major = other.xaxis.major\n    self.xaxis.minor = other.xaxis.minor\n    x0, x1 = other.get_xlim()\n    self.set_xlim(x0, x1, emit=False, auto=other.get_autoscalex_on())\n    self.xaxis._scale = other.xaxis._scale",
    "docstring": "Share the x-axis with *other*. This is equivalent to passing `` when constructing the Axes, and cannot be used if the x-axis is already being shared with another Axes. Note that it is not possible to unshare axes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:sharex arg:self arg:other arguments arg arg Call If BoolOp Compare Compare Raise Call Call Assign Assign Assign Assign Call Call Call Assign"
  },
  {
    "library": "numpy",
    "name": "make_config_py",
    "source_code": "def make_config_py(self, name='__config__'):\n    self.py_modules.append((self.name, name, generate_config_py))",
    "docstring": "Generate package __config__.py file containing system_info information used during building the package. This file is installed to the package installation directory.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\misc_util.py",
    "ast_data": "FunctionDef name:make_config_py arg:self arg:name arguments arg arg Call"
  },
  {
    "library": "cherrypy",
    "name": "encode_header_item",
    "source_code": "@classmethod\ndef encode_header_item(cls, item):\n    if isinstance(item, str):\n        item = cls.encode(item)\n    return item.translate(header_translate_table, header_translate_deletechars)",
    "docstring": "Encode an HTTP header for sending over the wire.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\httputil.py",
    "ast_data": "FunctionDef name:encode_header_item arg:cls arg:item arguments arg arg If Call Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "__call__",
    "source_code": "def __call__(self, df):\n    if not hasattr(df, 'iloc'):\n        raise ValueError('make_column_selector can only be applied to pandas dataframes')\n    df_row = df.iloc[:1]\n    if self.dtype_include is not None or self.dtype_exclude is not None:\n        df_row = df_row.select_dtypes(include=self.dtype_include, exclude=self.dtype_exclude)\n    cols = df_row.columns\n    if self.pattern is not None:\n        cols = cols[cols.str.contains(self.pattern, regex=True)]\n    return cols.tolist()",
    "docstring": "Callable for column selection to be used by a :class:. Parameters ---------- df : dataframe of shape (n_features, n_samples) DataFrame to select columns from.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\compose\\_column_transformer.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:df arguments arg arg If Call Raise Call Assign If BoolOp Compare Compare Assign Call Assign If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, angleA=0, angleB=0, armA=None, armB=None, rad=0.0):\n    self.angleA = angleA\n    self.angleB = angleB\n    self.armA = armA\n    self.armB = armB\n    self.rad = rad",
    "docstring": "Parameters ---------- angleA : float Starting angle of the path. angleB : float Ending angle of the path. armA : float or None Length of the starting arm. armB : float or None Length of the ending arm. rad : float Rounding radius of the edges.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:angleA arg:angleB arg:armA arg:armB arg:rad arguments arg arg arg arg arg arg Assign Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "before_iteration",
    "source_code": "def before_iteration(self):\n    if self.check_inefficient_unroll and self.iterations > INEFFICIENT_UNROLL_MIN_ITERATIONS:\n        self.ops_before_iteration = self._get_ops()\n        self.check_op_count_after_iteration = True",
    "docstring": "Called before each iteration in a Python loop.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\control_flow.py",
    "ast_data": "FunctionDef name:before_iteration arg:self arguments arg If BoolOp Compare Assign Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_forward",
    "source_code": "def _forward(self, x):\n    raise NotImplementedError('forward not implemented.')",
    "docstring": "Subclass implementation for public function.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bijector_impl.py",
    "ast_data": "FunctionDef name:_forward arg:self arg:x arguments arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "wait",
    "source_code": "def wait(future):\n    return torch._C.wait(future)",
    "docstring": "Force completion of a asynchronous task, returning the result of the task. See :func: for docs and examples. Args: future (torch.jit.Future[T]): an asynchronous task reference, created through Returns: : the return value of the completed task",
    "type": "function",
    "file_path": "pytorch\\torch\\jit\\_async.py",
    "ast_data": "FunctionDef name:wait arg:future arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "DisableBreakpoints",
    "source_code": "class DisableBreakpoints:\n\n    def __enter__(self) -> None:\n        target = get_target()\n        if target.DisableAllBreakpoints() is False:\n            print('[-] error: failed to disable all breakpoints.')\n\n    def __exit__(self, etype: Any, evalue: Any, tb: Any) -> None:\n        target = get_target()\n        if target.EnableAllBreakpoints() is False:\n            print('[-] error: failed to enable all breakpoints.')",
    "docstring": "Context-manager to temporarily disable all lldb breakpoints, useful if there is a risk to hit one during the evaluation of one of our custom commands",
    "type": "class",
    "file_path": "pytorch\\tools\\lldb\\pytorch_lldb.py",
    "ast_data": "ClassDef name:DisableBreakpoints FunctionDef name:__enter__ arg:self arguments arg Assign Call If Compare Call Call FunctionDef name:__exit__ arg:self arg:etype arg:evalue arg:tb arguments arg arg arg arg Assign Call If Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "annotate_orig_fx_with_snodes",
    "source_code": "def annotate_orig_fx_with_snodes(gm: torch.fx.GraphModule, snodes: SchedulerNodeList) -> None:\n    node_name_to_buf_name: dict[str, str] = {}\n    update_orig_fx_node_name_to_buf_name(snodes, node_name_to_buf_name)\n    if node_name_to_buf_name is None:\n        return\n    node_name_to_buf_meta = get_node_name_to_buf_meta(node_name_to_buf_name)\n    for node in gm.graph.nodes:\n        if node.name in node_name_to_buf_meta:\n            node.meta['buf_meta'] = node_name_to_buf_meta.get(node.name)",
    "docstring": "Creates a FX Graph from a list of SchedulerNode objects.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\debug.py",
    "ast_data": "FunctionDef name:annotate_orig_fx_with_snodes arg:gm arg:snodes arguments arg arg Call If Compare Return return:no Assign Call For If Compare Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "get_radii",
    "source_code": "def get_radii(self):\n    return (self.a, self.b)",
    "docstring": "Return the semi-major and semi-minor radii of the annulus.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_radii arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "check_for_ordered",
    "source_code": "def check_for_ordered(self, op) -> None:\n    if not self.ordered:\n        raise TypeError(f'Categorical is not ordered for operation {op}\\nyou can use .as_ordered() to change the Categorical to an ordered one\\n')",
    "docstring": "assert that we are ordered",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\categorical.py",
    "ast_data": "FunctionDef name:check_for_ordered arg:self arg:op arguments arg arg If Raise Call"
  },
  {
    "library": "scipy",
    "name": "change_dimensions",
    "source_code": "def change_dimensions(self, ndim):\n    if self.change_dimensionality:\n        self._dimensions = ndim\n    else:\n        raise ValueError('dimensionality cannot be changed for thisproblem')",
    "docstring": "Changes the dimensionality of the benchmark problem The dimensionality will only be changed if the problem is suitable Parameters ---------- ndim : int The new dimensionality for the problem.",
    "type": "method",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_benchmark.py",
    "ast_data": "FunctionDef name:change_dimensions arg:self arg:ndim arguments arg arg If Assign Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "depth",
    "source_code": "@property\ndef depth(self):\n    return 1",
    "docstring": "Return the number of transforms which have been chained together to form this Transform instance. .. note:: For the special case of a Composite transform, the maximum depth of the two is returned.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:depth arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "compile",
    "source_code": "def compile(self, run_eagerly=None, steps_per_execution=None):\n    if steps_per_execution is None:\n        steps_per_execution = 1\n    self._configure_steps_per_execution(steps_per_execution)\n    if run_eagerly is None:\n        run_eagerly = self.dynamic\n    self._run_eagerly = run_eagerly\n    self._is_compiled = True",
    "docstring": "Configures the layer for . Arguments: run_eagerly: Bool. Defaults to . If , this 's logic will not be wrapped in a . Recommended to leave this as unless your cannot be run inside a . steps_per_execution: Int. Defaults to 1. The number of batches to run during each call. Running multiple batches inside a single call can greatly improve performance on TPUs or small models with a large Python overhead.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_preprocessing_layer.py",
    "ast_data": "FunctionDef name:compile arg:self arg:run_eagerly arg:steps_per_execution arguments arg arg arg If Compare Assign Call If Compare Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "serialize_feature_columns",
    "source_code": "def serialize_feature_columns(feature_columns):\n    return [serialize_feature_column(fc) for fc in feature_columns]",
    "docstring": "Serializes a list of FeatureColumns. Returns a list of Keras-style config dicts that represent the input FeatureColumns and can be used with for reconstructing the original columns. Args: feature_columns: A list of FeatureColumns. Returns: Keras serialization for the list of FeatureColumns. Raises: ValueError if called with input that is not a list of FeatureColumns.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\serialization.py",
    "ast_data": "FunctionDef name:serialize_feature_columns arg:feature_columns arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "create_results_comparison",
    "source_code": "def create_results_comparison(results_grouped) -> Any:\n    results_comparison = {}\n    for subgraph_name, subgraph_results in results_grouped.items():\n        candidates = {}\n        for subgraph_inner_name, subgraph_inner_result in subgraph_results.items():\n            if subgraph_inner_name == '0':\n                continue\n            cmp_raw = subgraph_inner_result['comparisons']\n            cmp_raw_tensor = torch.stack(cmp_raw)\n            candidates[subgraph_inner_name] = {'qconfig_str': subgraph_inner_result['qconfig_str'], 'comparison_fn_name': subgraph_inner_result['comparison_fn_name'], 'cmp_raw': cmp_raw_tensor, 'cmp_mean': torch.mean(cmp_raw_tensor)}\n        results_comparison[subgraph_name] = {'ref_node_name': subgraph_results['0']['ref_node_name'], 'ref_node_target_type': subgraph_results['0']['ref_node_target_type'], 'fqn': subgraph_results['0']['fqn'], 'candidates': candidates}\n    return results_comparison",
    "docstring": "Input: { 'subgraph_0': { '0': { 'ref_node_name': '...', 'ref_node_target_type': ..., 'values': [torch.tensor(...), ...], 'qconfig_str': '', 'comparisons': [], 'comparison_fn_name': '', 'fqn': '...', }, '1': { 'ref_node_name': '...', 'ref_node_target_type': ..., 'values': [torch.tensor(...), ...], 'qconfig_str': '...', 'comparisons': [torch.tensor(...), ...], 'comparison_fn_name': 'sqnr', 'fqn': '...', }, }, } Output: { 'subgraph_0': { 'ref_node_name': '...', 'ref_node_target_type': '...', 'fqn': '...', 'candidates': { '1': { 'qconfig_str': ..., 'comparison_fn_name': 'sqnr', 'cmp_raw': [..., ...], 'cmp_mean': ..., }, ..., }, }, }",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\ns\\fx\\n_shadows_utils.py",
    "ast_data": "FunctionDef name:create_results_comparison arg:results_grouped arguments arg Assign For Call Assign For Call If Compare Assign Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_math_fontfamily",
    "source_code": "def get_math_fontfamily(self):\n    return self._fontproperties.get_math_fontfamily()",
    "docstring": "Return the font family name for math text rendered by Matplotlib. The default value is :rc:. See Also -------- set_math_fontfamily",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:get_math_fontfamily arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "ragged_rank",
    "source_code": "@property\ndef ragged_rank(self):\n    values_is_ragged = isinstance(self._values, RaggedTensor)\n    return self._values.ragged_rank + 1 if values_is_ragged else 1",
    "docstring": "The number of times the RaggedTensor's flat_values is partitioned. Examples: >>> values = tf.ragged.constant([[1, 2, 3], [4], [5, 6], [7, 8, 9, 10]]) >>> values.ragged_rank 1 >>> rt = tf.RaggedTensor.from_uniform_row_length(values, 2) >>> rt.ragged_rank 2 Returns: A Python indicating the number of times the underlying Tensor has been partitioned to add a new dimension. I.e., .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:ragged_rank arg:self arguments arg Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_OptimStateKey",
    "source_code": "class _OptimStateKey(NamedTuple):\n    unflat_param_names: tuple[str, ...]\n    is_fsdp_managed: bool",
    "docstring": "This represents an optimizer state key that may be used commonly across ranks. It is based on the unflattened parameter names rather than parameter IDs to make it independent of each rank's own optimizer construction.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_optim_utils.py",
    "ast_data": "ClassDef name:_OptimStateKey"
  },
  {
    "library": "pytorch",
    "name": "min_x_blocks_sub_kernel",
    "source_code": "def min_x_blocks_sub_kernel(self, sub_kernel: TritonKernel, num: int) -> None:\n    min_x_blocks: Union[int, str] = 0\n    x_numels: Union[int, str] = 0\n    for tree in sub_kernel.range_trees:\n        simplified_tree_numel = V.graph.sizevars.simplify(tree.numel)\n        if tree.prefix == 'x':\n            if isinstance(simplified_tree_numel, (Integer, int)):\n                x_numels = int(simplified_tree_numel)\n            else:\n                x_numels = f'{tree.prefix}numel_{num}'\n            if sub_kernel.no_x_dim:\n                min_x_blocks = x_numels\n                x_numels = -min_x_blocks if isinstance(x_numels, int) else '-' + cast(str, x_numels)\n            elif isinstance(simplified_tree_numel, (Integer, int)):\n                x_numels = int(simplified_tree_numel)\n            else:\n                x_numels = f'{tree.prefix}numel_{num}'\n    self.min_x_blocks_list.append(min_x_blocks)\n    self.x_numels_list.append(x_numels)",
    "docstring": "Kernels with no_x_dim being true has no tunable XBLOCK. They have a fixed number of X blocks. Grid calculation needs to make sure that they are assigned with enough number of blocks.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\triton_combo_kernel.py",
    "ast_data": "FunctionDef name:min_x_blocks_sub_kernel arg:self arg:sub_kernel arg:num arguments arg arg arg For Assign Call If Compare If Call Assign Call Assign If Assign Assign Call Call If Call Assign Call Assign Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_array_perimeter",
    "source_code": "def _array_perimeter(arr):\n    forward = np.s_[0:-1]\n    backward = np.s_[-1:0:-1]\n    return np.concatenate((arr[0, forward], arr[forward, -1], arr[-1, backward], arr[backward, 0]))",
    "docstring": "Get the elements on the perimeter of *arr*. Parameters ---------- arr : ndarray, shape (M, N) The input array. Returns ------- ndarray, shape (2*(M - 1) + 2*(N - 1),) The elements on the perimeter of the array:: [arr[0, 0], ..., arr[0, -1], ..., arr[-1, -1], ..., arr[-1, 0], ...] Examples -------- >>> i, j = np.ogrid[:3, :4] >>> a = i*10 + j >>> a array([[ 0, 1, 2, 3], [10, 11, 12, 13], [20, 21, 22, 23]]) >>> _array_perimeter(a) array([ 0, 1, 2, 3, 13, 23, 22, 21, 20, 10])",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\cbook.py",
    "ast_data": "FunctionDef name:_array_perimeter arg:arr arguments arg Assign Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_RaiseNoGradWrtInitialLoopValError",
    "source_code": "def _RaiseNoGradWrtInitialLoopValError(op: ops.Operation, from_ops: list[ops.Operation], xs_set):\n    target_op = None\n    queue = collections.deque([op])\n    visited = set()\n    while queue:\n        curr_op = queue.popleft()\n        if curr_op in visited:\n            continue\n        visited.add(curr_op)\n        if curr_op in from_ops:\n            target_op = curr_op\n            break\n        queue.extend((t.op for t in _NonEagerInputs(curr_op, xs_set)))\n    assert target_op\n    raise ValueError(f\"Cannot compute gradient inside while loop with respect to op '{target_op.name}'. We do not support taking the gradient wrt or through the initial value of a loop variable. Gradients can be computed through loop invariants or wrt the input parameters to the loop body.\")",
    "docstring": "Raises an error if we backprop through a loop var.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\gradients_util.py",
    "ast_data": "FunctionDef name:_RaiseNoGradWrtInitialLoopValError arg:op arg:from_ops arg:xs_set arguments arg arg arg Assign Assign Call Assign Call While Assign Call If Compare Call If Compare Assign Call Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "inverse_shape",
    "source_code": "def inverse_shape(self, shape):\n    return shape",
    "docstring": "Infers the shapes of the inverse computation, given the output shape. Defaults to preserving shape.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributions\\transforms.py",
    "ast_data": "FunctionDef name:inverse_shape arg:self arg:shape arguments arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "DelayReplaceLine",
    "source_code": "class DelayReplaceLine(DeferredLineBase):\n\n    def __init__(self, key: str, value_fn: Callable[[], str], line: str):\n        super().__init__(line)\n        self.key = key\n        self.value_fn = value_fn\n\n    def __call__(self) -> str:\n        return self.line.replace(self.key, self.value_fn())\n\n    def _new_line(self, line: str) -> DelayReplaceLine:\n        return DelayReplaceLine(self.key, self.value_fn, line)",
    "docstring": "At end of codegen call",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\utils.py",
    "ast_data": "ClassDef name:DelayReplaceLine FunctionDef name:__init__ arg:self arg:key arg:value_fn arg:line arguments arg arg arg arg Call Call Assign Assign FunctionDef name:__call__ arg:self arguments arg Return return:yes Call Call FunctionDef name:_new_line arg:self arg:line arguments arg arg Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "validate_acr",
    "source_code": "def validate_acr(self):\n    return self._validate_claim_value('acr')",
    "docstring": "OPTIONAL. Authentication Context Class Reference. String specifying an Authentication Context Class Reference value that identifies the Authentication Context Class that the authentication performed satisfied. The value \"0\" indicates the End-User authentication did not meet the requirements of _ level 1. Authentication using a long-lived browser cookie, for instance, is one example where the use of \"level 0\" is appropriate. Authentications with level 0 SHOULD NOT be used to authorize access to any resource of any monetary value. An absolute URI or an _ registered name SHOULD be used as the acr value; registered names MUST NOT be used with a different meaning than that which is registered. Parties using this claim will need to agree upon the meanings of the values used, which may be context-specific. The acr value is a case sensitive string. .. _: .. _:",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\core\\claims.py",
    "ast_data": "FunctionDef name:validate_acr arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_finish",
    "source_code": "def _finish(self, update_ops, name_scope):\n    return control_flow_ops.group(*update_ops, name=name_scope)",
    "docstring": "Do what is needed to finish the update. This is called with the using the \"name\" that users have chosen for the application of gradients. Args: update_ops: List of objects to update variables. This list contains the values returned by the and calls. name_scope: String. Name to use for the returned operation. Returns: The operation to apply updates.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\optimizer.py",
    "ast_data": "FunctionDef name:_finish arg:self arg:update_ops arg:name_scope arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "MD5PasswordHasher",
    "source_code": "class MD5PasswordHasher(BasePasswordHasher):\n    algorithm = 'md5'\n\n    def encode(self, password, salt):\n        self._check_encode_args(password, salt)\n        hash = hashlib.md5((salt + password).encode()).hexdigest()\n        return '%s$%s$%s' % (self.algorithm, salt, hash)\n\n    def decode(self, encoded):\n        algorithm, salt, hash = encoded.split('$', 2)\n        assert algorithm == self.algorithm\n        return {'algorithm': algorithm, 'hash': hash, 'salt': salt}\n\n    def verify(self, password, encoded):\n        decoded = self.decode(encoded)\n        encoded_2 = self.encode(password, decoded['salt'])\n        return constant_time_compare(encoded, encoded_2)\n\n    def safe_summary(self, encoded):\n        decoded = self.decode(encoded)\n        return {_('algorithm'): decoded['algorithm'], _('salt'): mask_hash(decoded['salt'], show=2), _('hash'): mask_hash(decoded['hash'])}\n\n    def must_update(self, encoded):\n        decoded = self.decode(encoded)\n        return must_update_salt(decoded['salt'], self.salt_entropy)\n\n    def harden_runtime(self, password, encoded):\n        pass",
    "docstring": "The Salted MD5 password hashing algorithm (not recommended)",
    "type": "class",
    "file_path": "django\\django\\contrib\\auth\\hashers.py",
    "ast_data": "ClassDef name:MD5PasswordHasher Assign FunctionDef name:encode arg:self arg:password arg:salt arguments arg arg arg Call Assign Call Call Call Return return:yes FunctionDef name:decode arg:self arg:encoded arguments arg arg Assign Call Compare Return return:yes FunctionDef name:verify arg:self arg:password arg:encoded arguments arg arg arg Assign Call Assign Call Return return:yes Call FunctionDef name:safe_summary arg:self arg:encoded arguments arg arg Assign Call Return return:yes Call Call Call Call Call FunctionDef name:must_update arg:self arg:encoded arguments arg arg Assign Call Return return:yes Call FunctionDef name:harden_runtime arg:self arg:password arg:encoded arguments arg arg arg"
  },
  {
    "library": "pygame",
    "name": "size",
    "source_code": "def size(self, text):\n    return self.get_rect(text).size",
    "docstring": "size(text) -> (width, height) determine the amount of space needed to render text",
    "type": "method",
    "file_path": "pygame\\src_py\\ftfont.py",
    "ast_data": "FunctionDef name:size arg:self arg:text arguments arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "nanargmin",
    "source_code": "def nanargmin(values: np.ndarray, *, axis: AxisInt | None=None, skipna: bool=True, mask: npt.NDArray[np.bool_] | None=None) -> int | np.ndarray:\n    values, mask = _get_values(values, True, fill_value_typ='+inf', mask=mask)\n    result = values.argmin(axis)\n    result = _maybe_arg_null_out(result, axis, mask, skipna)\n    return result",
    "docstring": "Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : int or ndarray[int] The index/indices of min value in specified axis or -1 in the NA case Examples -------- >>> from pandas.core import nanops >>> arr = np.array([1, 2, 3, np.nan, 4]) >>> nanops.nanargmin(arr) np.int64(0) >>> arr = np.array(range(12), dtype=np.float64).reshape(4, 3) >>> arr[2:, 0] = np.nan >>> arr array([[ 0., 1., 2.], [ 3., 4., 5.], [nan, 7., 8.], [nan, 10., 11.]]) >>> nanops.nanargmin(arr, axis=1) array([0, 0, 1, 1])",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\nanops.py",
    "ast_data": "FunctionDef name:nanargmin arg:values arguments arg arg arg arg Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "on_epoch_end",
    "source_code": "def on_epoch_end(self):\n    pass",
    "docstring": "A hook called after each epoch.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\data_adapter.py",
    "ast_data": "FunctionDef name:on_epoch_end arg:self arguments arg"
  },
  {
    "library": "authlib",
    "name": "check_grant_type",
    "source_code": "def check_grant_type(self, grant_type):\n    raise NotImplementedError()",
    "docstring": "Validate if the client can handle the given grant_type. There are four grant types defined by RFC6749: * authorization_code * implicit * client_credentials * password For instance, there is a `` column in your client:: def check_grant_type(self, grant_type): return grant_type in self.grant_types :param grant_type: the requested grant_type string. :return: bool",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\models.py",
    "ast_data": "FunctionDef name:check_grant_type arg:self arg:grant_type arguments arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_direct_serialization_deserialize",
    "source_code": "def _direct_serialization_deserialize(body, nodes):\n\n    class DummyModule(torch.nn.Module):\n\n        def __init__(self, body):\n            super().__init__()\n            self.__dict__.update(body)\n    dummy = DummyModule(body)\n    return fx.GraphModule(dummy, nodes.to_graph())",
    "docstring": "Custom method for serialization. DO AS I SAY -- NOT AS I DO. This violates the principle that GraphModules serialize via code export & re-tracing. We allow for this here because **PIPE STAGES SHOULD NOT BE PERSISTED TO DISK -- THIS IS ONLY FOR TRANSMISSION VIA RPC**. Persisting these instances to disk will expose internal implementation details of and related data structures and is NOT advised.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\_IR.py",
    "ast_data": "FunctionDef name:_direct_serialization_deserialize arg:body arg:nodes arguments arg arg ClassDef name:DummyModule FunctionDef name:__init__ arg:self arg:body arguments arg arg Call Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "mpl_disconnect",
    "source_code": "def mpl_disconnect(self, cid):\n    self.callbacks.disconnect(cid)",
    "docstring": "Disconnect the callback with id *cid*. Examples -------- :: cid = canvas.mpl_connect('button_press_event', on_press) # ... later canvas.mpl_disconnect(cid)",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:mpl_disconnect arg:self arg:cid arguments arg arg Call"
  },
  {
    "library": "scipy",
    "name": "_indenter",
    "source_code": "def _indenter(s, n=0):\n    split = s.split('\\n')\n    indent = ' ' * n\n    return ('\\n' + indent).join(split)",
    "docstring": "Ensures that lines after the first are indented by the specified amount",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_util.py",
    "ast_data": "FunctionDef name:_indenter arg:s arg:n arguments arg arg Assign Call Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "__str__",
    "source_code": "def __str__(self) -> str:\n    placeholder_names: list[str] = []\n    maybe_return_typename: list[str] = ['']\n    node_strs = [node.format_node(placeholder_names) for node in self.nodes]\n    param_str = ', '.join(placeholder_names)\n    s = f'graph({param_str}){maybe_return_typename[0]}:'\n    for node_str in node_strs:\n        if node_str:\n            s += '\\n    ' + node_str\n    return s",
    "docstring": "Return a human-readable (not machine-readable) string representation of this Graph",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\graph.py",
    "ast_data": "FunctionDef name:__str__ arg:self arguments arg Assign Call Assign Call Assign For If Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "extra_repr",
    "source_code": "@torch.jit.export\ndef extra_repr(self):\n    return f'fake_quant_enabled={self.fake_quant_enabled}, observer_enabled={self.observer_enabled}, scale={self.scale}, zero_point={self.zero_point}, dtype={self.dtype}, quant_min={self.activation_post_process.quant_min}, quant_max={self.activation_post_process.quant_max}, qscheme={self.qscheme}'",
    "docstring": "Define a string representation of the object's attributes.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fake_quantize.py",
    "ast_data": "FunctionDef name:extra_repr arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_is_homogeneous_type",
    "source_code": "@property\ndef _is_homogeneous_type(self) -> bool:\n    return len({block.values.dtype for block in self._mgr.blocks}) <= 1",
    "docstring": "Whether all the columns in a DataFrame have the same type. Returns ------- bool Examples -------- >>> DataFrame({\"A\": [1, 2], \"B\": [3, 4]})._is_homogeneous_type True >>> DataFrame({\"A\": [1, 2], \"B\": [3.0, 4.0]})._is_homogeneous_type False Items with the same type but different sizes are considered different types. >>> DataFrame( ... { ... \"A\": np.array([1, 2], dtype=np.int32), ... \"B\": np.array([1, 2], dtype=np.int64), ... } ... )._is_homogeneous_type False",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:_is_homogeneous_type arg:self arguments arg Return return:yes Compare Call"
  },
  {
    "library": "pandas",
    "name": "_read_axes",
    "source_code": "def _read_axes(self, where, start: int | None=None, stop: int | None=None) -> list[tuple[np.ndarray, np.ndarray] | tuple[Index, Index]]:\n    selection = Selection(self, where=where, start=start, stop=stop)\n    values = selection.select()\n    results = []\n    for a in self.axes:\n        a.set_info(self.info)\n        res = a.convert(values, nan_rep=self.nan_rep, encoding=self.encoding, errors=self.errors)\n        results.append(res)\n    return results",
    "docstring": "Create the axes sniffed from the table. Parameters ---------- where : ??? start : int or None, default None stop : int or None, default None Returns ------- List[Tuple[index_values, column_values]]",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:_read_axes arg:self arg:where arg:start arg:stop arguments arg arg arg arg Assign Call Assign Call Assign For Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "UnknownVariable",
    "source_code": "class UnknownVariable(VariableTracker):\n    pass",
    "docstring": "It could be anything!",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\misc.py",
    "ast_data": "ClassDef name:UnknownVariable"
  },
  {
    "library": "tensorflow",
    "name": "assert_rank_v2",
    "source_code": "@tf_export('debugging.assert_rank', 'assert_rank', v1=[])\n@dispatch.add_dispatch_support\ndef assert_rank_v2(x, rank, message=None, name=None):\n    return assert_rank(x=x, rank=rank, message=message, name=name)",
    "docstring": "Assert that has rank equal to . This Op checks that the rank of is equal to . If has a different rank, , as well as the shape of are printed, and is raised. Args: x: . rank: Scalar integer . message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to \"assert_rank\". Returns: Op raising unless has specified rank. If static checks determine has correct rank, a is returned. This can be used with inside of s to block followup computation until the check has executed. @compatibility(eager) returns None @end_compatibility Raises: InvalidArgumentError: if the check can be performed immediately and does not have rank . The check can be performed immediately during eager execution or if the shape of is statically known.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\check_ops.py",
    "ast_data": "FunctionDef name:assert_rank_v2 arg:x arg:rank arg:message arg:name arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "assert_array_almost_equal",
    "source_code": "def assert_array_almost_equal(actual, desired, decimal=6, *args, **kwds):\n    rtol, atol = (0, 1.5 * 10 ** (-decimal))\n    return xp_assert_close(actual, desired, *args, atol=atol, rtol=rtol, check_dtype=False, check_shape=False, **kwds)",
    "docstring": "Backwards compatible replacement. In new code, use xp_assert_close instead.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_array_api_no_0d.py",
    "ast_data": "FunctionDef name:assert_array_almost_equal arg:actual arg:desired arg:decimal arguments arg arg arg arg arg Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "find",
    "source_code": "def find(self, path, find_all=False, **kwargs):\n    raise NotImplementedError('subclasses of BaseFinder must provide a find() method')",
    "docstring": "Given a relative file path, find an absolute file path. If the `` parameter is False (default) return only the first found file path; if True, return a list of all found files paths.",
    "type": "method",
    "file_path": "django\\django\\contrib\\staticfiles\\finders.py",
    "ast_data": "FunctionDef name:find arg:self arg:path arg:find_all arguments arg arg arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "pow",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef pow(x, a):\n    return math_ops.pow(x, a)",
    "docstring": "Element-wise exponentiation. Args: x: Tensor or variable. a: Python integer. Returns: A tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:pow arg:x arg:a arguments arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "DeckkersAarts",
    "source_code": "class DeckkersAarts(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-20.0] * self.N, [20.0] * self.N))\n        self.custom_bounds = ([-1, 1], [14, 16])\n        self.global_optimum = [[0.0, 14.9451209]]\n        self.fglob = -24776.518342168\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return 100000.0 * x[0] ** 2 + x[1] ** 2 - (x[0] ** 2 + x[1] ** 2) ** 2 + 1e-05 * (x[0] ** 2 + x[1] ** 2) ** 4",
    "docstring": "Deckkers-Aarts objective function. This class defines the Deckkers-Aarts [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{DeckkersAarts}}(x) = 10^5x_1^2 + x_2^2 - (x_1^2 + x_2^2)^2 + 10^{-5}(x_1^2 + x_2^2)^4 with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO: jamil solution and global minimum are slightly wrong.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_D.py",
    "ast_data": "ClassDef name:DeckkersAarts FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "get_refnodes",
    "source_code": "def get_refnodes(self, doctree: Node, result: list[dict[str, Any]]) -> list[dict[str, Any]]:\n    if isinstance(doctree, nodes.reference) and doctree.get('refuri'):\n        refuri = doctree['refuri']\n        if refuri.startswith(('http://', 'https://', 'irc:', 'mailto:')):\n            return result\n        classes = doctree.parent.attributes['classes']\n        for level in range(8, 0, -1):\n            if self.toctree_template % level in classes:\n                result.append({'level': level, 'refuri': html.escape(refuri), 'text': ssp(html.escape(doctree.astext()))})\n                break\n    elif isinstance(doctree, nodes.Element):\n        for elem in doctree:\n            result = self.get_refnodes(elem, result)\n    return result",
    "docstring": "Collect section titles, their depth in the toc and the refuri.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\_epub_base.py",
    "ast_data": "FunctionDef name:get_refnodes arg:self arg:doctree arg:result arguments arg arg arg If BoolOp Call Call Assign If Call Return return:yes Assign For Call If Compare Call Call Call Call Call If Call For Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, from_logits=False, label_smoothing=0, axis=-1, reduction=losses_utils.ReductionV2.AUTO, name='categorical_crossentropy'):\n    super().__init__(categorical_crossentropy, name=name, reduction=reduction, from_logits=from_logits, label_smoothing=label_smoothing, axis=axis)",
    "docstring": "Initializes instance. Args: from_logits: Whether is expected to be a logits tensor. By default, we assume that encodes a probability distribution. label_smoothing: Float in [0, 1]. When > 0, label values are smoothed, meaning the confidence on label values are relaxed. For example, if , use for non-target labels and for target labels. axis: The axis along which to compute crossentropy (the features axis). Defaults to -1. reduction: Type of to apply to loss. Default value is . indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to . When used with , outside of built-in training loops such as and , using or will raise an error. Please see this custom training [tutorial]( for more details. name: Optional name for the instance. Defaults to 'categorical_crossentropy'.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:from_logits arg:label_smoothing arg:axis arg:reduction arg:name arguments arg arg arg arg arg arg Call Call"
  },
  {
    "library": "numpy",
    "name": "legval3d",
    "source_code": "def legval3d(x, y, z, c):\n    return pu._valnd(legval, c, x, y, z)",
    "docstring": "Evaluate a 3-D Legendre series at points (x, y, z). This function returns the values: .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * L_i(x) * L_j(y) * L_k(z) The parameters , , and are converted to arrays only if they are tuples or a lists, otherwise they are treated as a scalars and they must have the same shape after conversion. In either case, either , , and or their elements must support multiplication and addition both with themselves and with the elements of . If has fewer than 3 dimensions, ones are implicitly appended to its shape to make it 3-D. The shape of the result will be c.shape[3:] + x.shape. Parameters ---------- x, y, z : array_like, compatible object The three dimensional series is evaluated at the points `xyzxyzcxyz`. See Also -------- legval, legval2d, leggrid2d, leggrid3d",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\legendre.py",
    "ast_data": "FunctionDef name:legval3d arg:x arg:y arg:z arg:c arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "increment_toplevel",
    "source_code": "@staticmethod\ndef increment_toplevel(key: str, value: int=1, log_level: CompileEventLogLevel=CompileEventLogLevel.COMPILATION_METRIC):\n    chromium_log = get_chromium_event_logger()\n    top_event = chromium_log.get_outermost_event()\n    if top_event is None:\n        raise RuntimeError('No toplevel event active. Please only call this function within a metrics context/dynamo_timed.')\n    CompileEventLogger.increment(top_event, log_level, key, value)",
    "docstring": "Increments a value on the toplevel metric. By default, logs to metric.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\utils.py",
    "ast_data": "FunctionDef name:increment_toplevel arg:key arg:value arg:log_level arguments arg arg arg Assign Call Assign Call If Compare Raise Call Call"
  },
  {
    "library": "cryptography",
    "name": "public_bytes",
    "source_code": "@abc.abstractmethod\ndef public_bytes(self, encoding: _serialization.Encoding, format: _serialization.PublicFormat) -> bytes:\n    pass",
    "docstring": "Returns the key serialized as bytes.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\dh.py",
    "ast_data": "FunctionDef name:public_bytes arg:self arg:encoding arg:format arguments arg arg arg"
  },
  {
    "library": "django",
    "name": "normalize_eols",
    "source_code": "def normalize_eols(raw_contents):\n    lines_list = raw_contents.splitlines()\n    if lines_list and lines_list[-1]:\n        lines_list.append('')\n    return '\\n'.join(lines_list)",
    "docstring": "Take a block of raw text that will be passed through str.splitlines() to get universal newlines treatment. Return the resulting block of text with normalized EOL sequences ready to be written to disk using current platform's native EOLs.",
    "type": "function",
    "file_path": "django\\django\\core\\management\\commands\\makemessages.py",
    "ast_data": "FunctionDef name:normalize_eols arg:raw_contents arguments arg Assign Call If BoolOp Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "SubprocessTimeoutError",
    "source_code": "@tf_export('__internal__.distribute.multi_process_runner.SubprocessTimeoutError', v1=[])\nclass SubprocessTimeoutError(RuntimeError):\n\n    def __init__(self, msg, mpr_result):\n        super(SubprocessTimeoutError, self).__init__(msg)\n        self.mpr_result = mpr_result",
    "docstring": "An error that indicates there is at least one subprocess timing out. When this is raised, a namedtuple object representing the multi-process run result can be retrieved by 's attribute. See for more information.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_process_runner.py",
    "ast_data": "ClassDef name:SubprocessTimeoutError FunctionDef name:__init__ arg:self arg:msg arg:mpr_result arguments arg arg arg Call Call Assign Call"
  },
  {
    "library": "django",
    "name": "units_func",
    "source_code": "def units_func(f):\n    return double_output(f, [c_void_p, POINTER(c_char_p)], strarg=True)",
    "docstring": "Create a ctypes function prototype for OSR units functions, e.g., OSRGetAngularUnits, OSRGetLinearUnits.",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\gdal\\prototypes\\srs.py",
    "ast_data": "FunctionDef name:units_func arg:f arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "strategy",
    "source_code": "@property\ndef strategy(self):\n    return self._strategy",
    "docstring": "Returns the associated with the .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py",
    "ast_data": "FunctionDef name:strategy arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "reason",
    "source_code": "@staticmethod\ndef reason(op_idx, details):\n    return '%d %s' % (op_idx, details)",
    "docstring": "Returns reason why the Op at op_idx is traced or not.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:reason arg:op_idx arg:details arguments arg arg Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "_toc_entry_name",
    "source_code": "def _toc_entry_name(self, sig_node: desc_signature) -> str:\n    return ''",
    "docstring": "Returns the text of the table of contents entry for the object. This function is called once, in :py:meth:, to set the name for the table of contents entry (a special attribute `_object_hierarchy_parts!PyObject._toc_entry_namehandle_signature()` method.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\directives\\__init__.py",
    "ast_data": "FunctionDef name:_toc_entry_name arg:self arg:sig_node arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "visit_Subscript",
    "source_code": "def visit_Subscript(self, node):\n    s = node.slice\n    if anno.hasanno(node, anno.Basic.QN):\n        qn = anno.getanno(node, anno.Basic.QN)\n        if isinstance(node.ctx, gast.Load):\n            self.reads.add(qn)\n    elif isinstance(s, (gast.Tuple, gast.Slice)):\n        if anno.hasanno(node.value, anno.Basic.QN):\n            self.complex_reads.add(anno.getanno(node.value, anno.Basic.QN))\n    value_qn = anno.getanno(node.value, anno.Basic.QN, None)\n    if value_qn in self.exclude:\n        node.value = self.generic_visit(node.value)\n    else:\n        node.value = self.visit(node.value)\n    node.slice = self.visit(s)\n    return node",
    "docstring": "Visits nodes with subscript in the AST.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\gradient_input_output_exclusions.py",
    "ast_data": "FunctionDef name:visit_Subscript arg:self arg:node arguments arg arg Assign If Call Assign Call If Call Call If Call If Call Call Call Assign Call If Compare Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "mark_dynamic",
    "source_code": "@forbid_in_graph\ndef mark_dynamic(t, index, *, min=None, max=None):\n    if is_traceable_wrapper_subclass(t):\n        _apply_func_to_inner_tensors_of_same_dim(mark_dynamic, t, index, min=min, max=max)\n    if isinstance(index, int):\n        if not hasattr(t, '_dynamo_dynamic_indices'):\n            t._dynamo_dynamic_indices = set()\n            t._dynamo_dynamic_range = set()\n        t._dynamo_dynamic_indices.add(index)\n        t._dynamo_dynamic_range.add(_DimRange(index, min, max))\n        return\n    assert isinstance(index, (list, tuple))\n    for i in index:\n        mark_dynamic(t, i, min=min, max=max)",
    "docstring": "Mark a tensor as having a dynamic dim and set corresponding min and max range for the dim. [Note - on the state of mark_dynamic] The behavior of having a dynamic dimension on a tensor is governed by a few factors: 1) torch._dynamo.config dynamic_shapes True or False. a) dynamic_shapes=True - dynamic_shapes must be True for mark_dynamic to work. a) dynamic_shapes=False - This config will raise an exception when used in conjunction with mark_dynamic. We will eventually support this. 2) If the dimension is fully constrained - as in, it does not allow more than a single value in both eager (torch.compile, torch._dynamo.optimize) mode and export mode (torch._dynamo.export), we will raise an error 3) If the dimension is partially constrained - allowing at least 2 values but not the full unbounded range of shapes, in eager we will pass it through, but export will raise an error. 4) Attempts to trace this function will explicitly raise. As such, all calls to mark_dynamic must be made before torch.compile.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\decorators.py",
    "ast_data": "FunctionDef name:mark_dynamic arg:t arg:index arguments arg arg arg arg If Call Call If Call If Call Assign Call Assign Call Call Call Call Return return:no Call For Call"
  },
  {
    "library": "pytorch",
    "name": "reify",
    "source_code": "@abstractmethod\ndef reify(self, envs: dict[int, dict[str, str]]) -> LogsDest:\n    pass",
    "docstring": "Given the environment variables, builds destination of log files for each of the local ranks. Envs parameter contains env variables dict for each of the local ranks, where entries are defined in: :func:.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\multiprocessing\\api.py",
    "ast_data": "FunctionDef name:reify arg:self arg:envs arguments arg arg"
  },
  {
    "library": "scipy",
    "name": "bmat",
    "source_code": "def bmat(blocks, format=None, dtype=None):\n    blocks = np.asarray(blocks, dtype='object')\n    if any((isinstance(b, sparray) for b in blocks.flat)):\n        return _block(blocks, format, dtype)\n    else:\n        return _block(blocks, format, dtype, return_spmatrix=True)",
    "docstring": "Build a sparse array or matrix from sparse sub-blocks Note: is preferred over `block_arrayblocks`. See Also -------- block_array Examples -------- >>> from scipy.sparse import coo_array, bmat >>> A = coo_array([[1, 2], [3, 4]]) >>> B = coo_array([[5], [6]]) >>> C = coo_array([[7]]) >>> bmat([[A, B], [None, C]]).toarray() array([[1, 2, 5], [3, 4, 6], [0, 0, 7]]) >>> bmat([[A, None], [None, C]]).toarray() array([[1, 2, 0], [3, 4, 0], [0, 0, 7]])",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\_construct.py",
    "ast_data": "FunctionDef name:bmat arg:blocks arg:format arg:dtype arguments arg arg arg Assign Call If Call Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_make_finalize_func",
    "source_code": "def _make_finalize_func(self, finalize_func):\n    self._finalize_func = structured_function.StructuredFunctionWrapper(finalize_func, self._transformation_name(), input_structure=self._state_structure)",
    "docstring": "Make wrapping defun for finalize_func.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\grouping.py",
    "ast_data": "FunctionDef name:_make_finalize_func arg:self arg:finalize_func arguments arg arg Assign Call Call"
  },
  {
    "library": "sphinx",
    "name": "add_html_theme",
    "source_code": "def add_html_theme(self, name: str, theme_path: str | os.PathLike[str]) -> None:\n    logger.debug('[app] adding HTML theme: %r, %r', name, theme_path)\n    self.registry.add_html_theme(name, theme_path)",
    "docstring": "Register a HTML Theme. The *name* is a name of theme, and *theme_path* is a full path to the theme (refs: :ref:). .. versionadded:: 1.6",
    "type": "method",
    "file_path": "sphinx\\sphinx\\application.py",
    "ast_data": "FunctionDef name:add_html_theme arg:self arg:name arg:theme_path arguments arg arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "false_negatives_at_thresholds",
    "source_code": "@tf_export(v1=['metrics.false_negatives_at_thresholds'])\ndef false_negatives_at_thresholds(labels, predictions, thresholds, weights=None, metrics_collections=None, updates_collections=None, name=None):\n    if context.executing_eagerly():\n        raise RuntimeError('tf.metrics.false_negatives_at_thresholds is not supported when eager execution is enabled.')\n    with variable_scope.variable_scope(name, 'false_negatives', (predictions, labels, weights)):\n        values, update_ops = _confusion_matrix_at_thresholds(labels, predictions, thresholds, weights=weights, includes=('fn',))\n        fn_value = _aggregate_variable(values['fn'], metrics_collections)\n        if updates_collections:\n            ops.add_to_collections(updates_collections, update_ops['fn'])\n        return (fn_value, update_ops['fn'])",
    "docstring": "Computes false negatives at provided threshold values. If is , weights default to 1. Use weights of 0 to mask values. Args: labels: A whose shape matches . Will be cast to . predictions: A floating point of arbitrary shape and whose values are in the range . thresholds: A python list or tuple of float thresholds in . weights: Optional whose rank is either 0, or the same rank as , and must be broadcastable to (i.e., all dimensions must be either , or the same as the corresponding dimension). metrics_collections: An optional list of collections that should be added to. updates_collections: An optional list of collections that should be added to. name: An optional variable_scope name. Returns: false_negatives: A float of shape . update_op: An operation that updates the variable and returns its current value. Raises: ValueError: If and have mismatched shapes, or if is not and its shape doesn't match , or if either or are not a list or tuple. RuntimeError: If eager execution is enabled.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\metrics_impl.py",
    "ast_data": "FunctionDef name:false_negatives_at_thresholds arg:labels arg:predictions arg:thresholds arg:weights arg:metrics_collections arg:updates_collections arg:name arguments arg arg arg arg arg arg arg If Call Raise Call With Call Assign Call Assign Call If Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "Adjiman",
    "source_code": "class Adjiman(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = ([-1.0, 2.0], [-1.0, 1.0])\n        self.global_optimum = [[2.0, 0.10578]]\n        self.fglob = -2.02180678\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return cos(x[0]) * sin(x[1]) - x[0] / (x[1] ** 2 + 1)",
    "docstring": "Adjiman objective function. The Adjiman [1]_ global optimization problem is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Adjiman}}(x) = \\cos(x_1)\\sin(x_2) - \\frac{x_1}{(x_2^2 + 1)} with, :math: and :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_A.py",
    "ast_data": "ClassDef name:Adjiman FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "SubgraphTemplate",
    "source_code": "class SubgraphTemplate(KernelTemplate):\n    index_counter = itertools.count()\n\n    def __init__(self, name: str, make_fx_graph: Callable[..., Any]):\n        self.name = f'{name}_{next(SubgraphTemplate.index_counter)}'\n        self.make_fx_graph = make_fx_graph\n\n    def generate(self, input_nodes: list[Buffer], layout: Layout, **kwargs: Any) -> SubgraphChoiceCaller:\n        return SubgraphChoiceCaller(name=self.name, input_nodes=input_nodes, layout=layout, description='', make_fx_graph=self.make_fx_graph)",
    "docstring": "A template for subgraph evaluation to be used in autotuning. This class allows creating customized subgraphs that can be appended as choices during the autotuning process, enabling the selection of optimal implementations for complex operations.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\subgraph.py",
    "ast_data": "ClassDef name:SubgraphTemplate Assign Call FunctionDef name:__init__ arg:self arg:name arg:make_fx_graph arguments arg arg arg Assign Call Assign FunctionDef name:generate arg:self arg:input_nodes arg:layout arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_div_python2",
    "source_code": "def _div_python2(x, y, name=None):\n    with ops.name_scope(name, 'div', [x, y]) as name:\n        x = ops.convert_to_tensor(x, name='x')\n        y = ops.convert_to_tensor(y, name='y', dtype=x.dtype.base_dtype)\n        x_dtype = x.dtype.base_dtype\n        y_dtype = y.dtype.base_dtype\n        if x_dtype != y_dtype:\n            raise TypeError(f'`x` and `y` must have the same dtype, got {x_dtype!r} != {y_dtype!r}.')\n        if x_dtype.is_floating or x_dtype.is_complex:\n            return gen_math_ops.real_div(x, y, name=name)\n        else:\n            return gen_math_ops.floor_div(x, y, name=name)",
    "docstring": "Divide two values using Python 2 semantics. Used for Tensor.__div__. Args: x: numerator of real numeric type. y: denominator of real numeric type. name: A name for the operation (optional). Returns: returns the quotient of x and y.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:_div_python2 arg:x arg:y arg:name arguments arg arg arg With Call Assign Call Assign Call Assign Assign If Compare Raise Call If BoolOp Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "idelete",
    "source_code": "def idelete(self, indexer) -> BlockManager:\n    is_deleted = np.zeros(self.shape[0], dtype=np.bool_)\n    is_deleted[indexer] = True\n    taker = (~is_deleted).nonzero()[0]\n    nbs = self._slice_take_blocks_ax0(taker, only_slice=True, ref_inplace_op=True)\n    new_columns = self.items[~is_deleted]\n    axes = [new_columns, self.axes[1]]\n    return type(self)(tuple(nbs), axes, verify_integrity=False)",
    "docstring": "Delete selected locations, returning a new BlockManager.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:idelete arg:self arg:indexer arguments arg arg Assign Call Assign Assign Call Assign Call Assign Assign Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "no_automatic_dependency_tracking_scope",
    "source_code": "@tf_contextlib.contextmanager\ndef no_automatic_dependency_tracking_scope(obj):\n    previous_value = getattr(obj, '_setattr_tracking', True)\n    obj._setattr_tracking = False\n    try:\n        yield\n    finally:\n        obj._setattr_tracking = previous_value",
    "docstring": "A context that disables automatic dependency tracking when assigning attrs. Objects that inherit from Autotrackable automatically creates dependencies to trackable objects through attribute assignments, and wraps data structures (lists or dicts) with trackable classes. This scope may be used to temporarily disable this behavior. This works similar to the decorator . Example usage: Args: obj: A trackable object. Yields: a scope in which the object doesn't track dependencies.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\utils.py",
    "ast_data": "FunctionDef name:no_automatic_dependency_tracking_scope arg:obj arguments arg Assign Call Assign Try Assign"
  },
  {
    "library": "pandas",
    "name": "doc",
    "source_code": "def doc(*docstrings: None | str | Callable, **params: object) -> Callable[[F], F]:\n\n    def decorator(decorated: F) -> F:\n        docstring_components: list[str | Callable] = []\n        if decorated.__doc__:\n            docstring_components.append(dedent(decorated.__doc__))\n        for docstring in docstrings:\n            if docstring is None:\n                continue\n            if hasattr(docstring, '_docstring_components'):\n                docstring_components.extend(docstring._docstring_components)\n            elif isinstance(docstring, str) or docstring.__doc__:\n                docstring_components.append(docstring)\n        params_applied = [component.format(**params) if isinstance(component, str) and len(params) > 0 else component for component in docstring_components]\n        decorated.__doc__ = ''.join([component if isinstance(component, str) else dedent(component.__doc__ or '') for component in params_applied])\n        decorated._docstring_components = docstring_components\n        return decorated\n    return decorator",
    "docstring": "A decorator to take docstring templates, concatenate them and perform string substitution on them. This decorator will add a variable \"_docstring_components\" to the wrapped callable to keep track the original docstring template for potential usage. If it should be consider as a template, it will be saved as a string. Otherwise, it will be saved as callable, and later user __doc__ and dedent to get docstring. Parameters ---------- *docstrings : None, str, or callable The string / docstring / docstring template to be appended in order after default docstring under callable. **params The string which would be used to format docstring template.",
    "type": "function",
    "file_path": "pandas\\pandas\\util\\_decorators.py",
    "ast_data": "FunctionDef name:doc arguments arg arg FunctionDef name:decorator arg:decorated arguments arg If Call Call For If Compare If Call Call If BoolOp Call Call Assign BoolOp Call Compare Call Call Assign Call Call Call BoolOp Assign Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "defer",
    "source_code": "def defer(self, *fields):\n    self._not_support_combined_queries('defer')\n    if self._fields is not None:\n        raise TypeError('Cannot call defer() after .values() or .values_list()')\n    clone = self._chain()\n    if fields == (None,):\n        clone.query.clear_deferred_loading()\n    else:\n        clone.query.add_deferred_loading(fields)\n    return clone",
    "docstring": "Defer the loading of data for certain fields until they are accessed. Add the set of deferred fields to any existing set of deferred fields. The only exception to this is if None is passed in as the only parameter, in which case remove all deferrals.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:defer arg:self arguments arg arg Call If Compare Raise Call Assign Call If Compare Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "axes",
    "source_code": "@property\ndef axes(self) -> list[Index]:\n    return [self.index, self.columns]",
    "docstring": "Return a list representing the axes of the DataFrame. It has the row axis labels and column axis labels as the only members. They are returned in that order. See Also -------- DataFrame.index: The index (row labels) of the DataFrame. DataFrame.columns: The column labels of the DataFrame. Examples -------- >>> df = pd.DataFrame({\"col1\": [1, 2], \"col2\": [3, 4]}) >>> df.axes [RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'], dtype='object')]",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:axes arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "lazy_compile",
    "source_code": "def lazy_compile(**compile_kwargs):\n\n    def decorate_fn(fn):\n\n        @functools.wraps(fn)\n        def compile_hook(*args, **kwargs):\n            compiled_fn = torch.compile(fn, **compile_kwargs)\n            globals()[fn.__name__] = functools.wraps(fn)(compiled_fn)\n            return compiled_fn(*args, **kwargs)\n        return compile_hook\n    return decorate_fn",
    "docstring": "Lazily wrap a function with torch.compile on the first call This avoids eagerly importing dynamo.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_content_store.py",
    "ast_data": "FunctionDef name:lazy_compile arguments arg FunctionDef name:decorate_fn arg:fn arguments arg FunctionDef name:compile_hook arguments arg arg Assign Call Assign Call Call Call Return return:yes Call Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "model_input_signature",
    "source_code": "def model_input_signature(model, keep_original_batch_size=False):\n    if hasattr(model, 'save_spec'):\n        input_specs = model.save_spec(dynamic_batch=not keep_original_batch_size)\n        if input_specs is None:\n            return None\n        input_specs = input_specs[0][0]\n    else:\n        input_specs = model._get_save_spec(dynamic_batch=not keep_original_batch_size)\n        if input_specs is None:\n            return None\n    input_specs = _enforce_names_consistency(input_specs)\n    if isinstance(input_specs, collections_abc.Sequence) and len(input_specs) == 1:\n        return input_specs\n    else:\n        return [input_specs]",
    "docstring": "Inspect model to get its input signature. The model's input signature is a list with a single (possibly-nested) object. This is due to the Keras-enforced restriction that tensor inputs must be passed in as the first argument. For example, a model with input {'feature1': , 'feature2': } will have input signature: [{'feature1': TensorSpec, 'feature2': TensorSpec}] Args: model: Keras Model object. keep_original_batch_size: A boolean indicating whether we want to keep using the original batch size or set it to None. Default is , which means that the batch dim of the returned input signature will always be set to . Returns: A list containing either a single TensorSpec or an object with nested TensorSpecs. This list does not contain the argument.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\tflite_keras_util.py",
    "ast_data": "FunctionDef name:model_input_signature arg:model arg:keep_original_batch_size arguments arg arg If Call Assign Call If Compare Return return:no Assign Assign Call If Compare Return return:no Assign Call If BoolOp Call Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "embedding_lookup",
    "source_code": "def embedding_lookup(self, features: Any, weights: Optional[Any]=None) -> Any:\n    raise NotImplementedError",
    "docstring": "Lookup the embedding table using the input features.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_base.py",
    "ast_data": "FunctionDef name:embedding_lookup arg:self arg:features arg:weights arguments arg arg arg Raise"
  },
  {
    "library": "matplotlib",
    "name": "_get_aspect_ratio",
    "source_code": "def _get_aspect_ratio(self):\n    figure_size = self.get_figure().get_size_inches()\n    ll, ur = self.get_position() * figure_size\n    width, height = ur - ll\n    return height / (width * self.get_data_ratio())",
    "docstring": "Convenience method to calculate the aspect ratio of the Axes in the display coordinate system.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_axes.py",
    "ast_data": "FunctionDef name:_get_aspect_ratio arg:self arguments arg Assign Call Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_populate_tensor_meta",
    "source_code": "def _populate_tensor_meta(node: Node, output_spec: OutputSpecType) -> None:\n    if isinstance(node.meta['val'], Sequence):\n        assert isinstance(output_spec, Sequence)\n        for spec, fake_tensor in zip(output_spec, node.meta['val']):\n            assert spec is not None\n            spec.tensor_meta = TensorMeta(shape=fake_tensor.shape, stride=fake_tensor.stride(), dtype=fake_tensor.dtype)\n    else:\n        assert isinstance(output_spec, DTensorSpec)\n        output_spec.tensor_meta = TensorMeta(shape=node.meta['val'].shape, stride=node.meta['val'].stride(), dtype=node.meta['val'].dtype)",
    "docstring": "Util function to populate tensor meta of output_spec based on node metadata.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\experimental\\_tp_transform.py",
    "ast_data": "FunctionDef name:_populate_tensor_meta arg:node arg:output_spec arguments arg arg If Call Call For Call Compare Assign Call Call Call Assign Call Call"
  },
  {
    "library": "django",
    "name": "_prepare_cursor",
    "source_code": "def _prepare_cursor(self, cursor):\n    self.validate_thread_sharing()\n    if self.queries_logged:\n        wrapped_cursor = self.make_debug_cursor(cursor)\n    else:\n        wrapped_cursor = self.make_cursor(cursor)\n    return wrapped_cursor",
    "docstring": "Validate the connection is usable and perform database cursor wrapping.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\base.py",
    "ast_data": "FunctionDef name:_prepare_cursor arg:self arg:cursor arguments arg arg Call If Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_postprocess_flash_output",
    "source_code": "def _postprocess_flash_output(inpt_tensor: torch.Tensor, og_size: int) -> torch.Tensor:\n    if inpt_tensor.size(-1) != og_size:\n        return inpt_tensor[..., :og_size]\n    return inpt_tensor",
    "docstring": "Handles the unpad of the last dimension",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\attention\\_utils.py",
    "ast_data": "FunctionDef name:_postprocess_flash_output arg:inpt_tensor arg:og_size arguments arg arg If Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "__init__",
    "source_code": "def __init__(self, templates=None):\n    self._templates = templates\n    self._engines = {}",
    "docstring": "templates is an optional list of template engine definitions (structured like settings.TEMPLATES).",
    "type": "method",
    "file_path": "django\\django\\template\\utils.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:templates arguments arg arg Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "get_elapsed_time_ms",
    "source_code": "def get_elapsed_time_ms(start_time_in_seconds: float):\n    end_time = time.time()\n    return int((end_time - start_time_in_seconds) * 1000)",
    "docstring": "Return the elapsed time in millis from the given start time.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\elastic\\metrics\\api.py",
    "ast_data": "FunctionDef name:get_elapsed_time_ms arg:start_time_in_seconds arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=False)\ndef fit(self, X, y, **fit_params):\n    if y is None:\n        raise ValueError(f'This {self.__class__.__name__} estimator requires y to be passed, but the target y is None.')\n    y = check_array(y, input_name='y', accept_sparse=False, ensure_all_finite=True, ensure_2d=False, dtype='numeric', allow_nd=True)\n    self._training_dim = y.ndim\n    if y.ndim == 1:\n        y_2d = y.reshape(-1, 1)\n    else:\n        y_2d = y\n    self._fit_transformer(y_2d)\n    y_trans = self.transformer_.transform(y_2d)\n    if y_trans.ndim == 2 and y_trans.shape[1] == 1:\n        y_trans = y_trans.squeeze(axis=1)\n    self.regressor_ = self._get_regressor(get_clone=True)\n    if _routing_enabled():\n        routed_params = process_routing(self, 'fit', **fit_params)\n    else:\n        routed_params = Bunch(regressor=Bunch(fit=fit_params))\n    self.regressor_.fit(X, y_trans, **routed_params.regressor.fit)\n    if hasattr(self.regressor_, 'feature_names_in_'):\n        self.feature_names_in_ = self.regressor_.feature_names_in_\n    return self",
    "docstring": "Fit the model according to the given training data. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vector, where is the number of samples and is the number of features. y : array-like of shape (n_samples,) Target values. **fit_params : dict - If (default): Parameters directly passed to the method of the underlying regressor. - If : Parameters safely routed to the method of the underlying regressor. .. versionchanged:: 1.6 See :ref: for more details. Returns ------- self : object Fitted estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\compose\\_target.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg arg If Compare Raise Call Assign Call Assign If Compare Assign Call Assign Call Assign Call If BoolOp Compare Compare Assign Call Assign Call If Call Assign Call Assign Call Call Call If Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "graph_placeholder",
    "source_code": "def graph_placeholder(dtype, shape, name=None):\n    dtype = dtype.base_dtype\n    dtype_value = attr_value_pb2.AttrValue(type=dtype.as_datatype_enum)\n    if isinstance(shape, (list, tuple)):\n        shape = tensor_shape.TensorShape(shape)\n    shape = attr_value_pb2.AttrValue(shape=shape.as_proto())\n    g = ops.get_default_graph()\n    attrs = {'dtype': dtype_value, 'shape': shape}\n    op = g._create_op_internal('Placeholder', [], [dtype], input_types=[], attrs=attrs, name=name)\n    result, = op.outputs\n    if op_callbacks.should_invoke_op_callbacks():\n        callback_outputs = op_callbacks.invoke_op_callbacks('Placeholder', tuple(), attrs, tuple(op.outputs), op_name=name, graph=g)\n        if callback_outputs is not None:\n            result, = callback_outputs\n    return result",
    "docstring": "Graph-only version of tf.compat.v1.placeholder(), for internal use only.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\graph_only_ops.py",
    "ast_data": "FunctionDef name:graph_placeholder arg:dtype arg:shape arg:name arguments arg arg arg Assign Assign Call If Call Assign Call Assign Call Call Assign Call Assign Assign Call Assign If Call Assign Call Call Call If Compare Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "def forward(self, patch_src: Tensor, src_homo_dst: Optional[Tensor]=None) -> Tensor:\n    _warped_grid = self._warped_grid\n    if src_homo_dst is not None:\n        warped_patch = homography_warp(patch_src, src_homo_dst, (self.height, self.width), mode=self.mode, padding_mode=self.padding_mode, align_corners=self.align_corners, normalized_coordinates=self.normalized_coordinates)\n    elif _warped_grid is not None:\n        if not _warped_grid.device == patch_src.device:\n            raise TypeError(f'Patch and warped grid must be on the same device. Got patch.device: {patch_src.device} warped_grid.device: {_warped_grid.device}. Whether recall precompute_warp_grid() with the correct device for the homograhy or change the patch device.')\n        warped_patch = F.grid_sample(patch_src, _warped_grid, mode=self.mode, padding_mode=self.padding_mode, align_corners=self.align_corners)\n    else:\n        raise RuntimeError('Unknown warping. If homographies are not provided                                 they must be preset using the method:                                 precompute_warp_grid().')\n    return warped_patch",
    "docstring": "Warp a tensor from source into reference frame. Args: patch_src: The tensor to warp. src_homo_dst: The homography or stack of homographies from destination to source. The homography assumes normalized coordinates [-1, 1] if normalized_coordinates is True. Return: Patch sampled at locations from source to destination. Shape: - Input: :math: and :math: - Output: :math: Example: >>> input = torch.rand(1, 3, 32, 32) >>> homography = torch.eye(3).view(1, 3, 3) >>> warper = HomographyWarper(32, 32) >>> # without precomputing the warp >>> output = warper(input, homography) # NxCxHxW >>> # precomputing the warp >>> warper.precompute_warp_grid(homography) >>> output = warper(input) # NxCxHxW",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\transform\\homography_warper.py",
    "ast_data": "FunctionDef name:forward arg:self arg:patch_src arg:src_homo_dst arguments arg arg arg Assign If Compare Assign Call If Compare If Compare Raise Call Assign Call Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "tree_structure",
    "source_code": "def tree_structure(tree: PyTree, is_leaf: Optional[Callable[[PyTree], bool]]=None) -> TreeSpec:\n    return optree.tree_structure(tree, is_leaf=is_leaf, none_is_leaf=True, namespace='torch')",
    "docstring": "Get the treespec for a pytree. See also :func:. >>> tree = {\"b\": (2, [3, 4]), \"a\": 1, \"c\": None, \"d\": 5} >>> tree_structure(tree) PyTreeSpec({'b': (*, [*, *]), 'a': *, 'c': *, 'd': *}, NoneIsLeaf, namespace='torch') >>> tree_structure(1) PyTreeSpec(*, NoneIsLeaf, namespace='torch') >>> tree_structure(None) PyTreeSpec(*, NoneIsLeaf, namespace='torch') Args: tree (pytree): A pytree to flatten. is_leaf (callable, optional): An extra leaf predicate function that will be called at each flattening step. The function should have a single argument with signature `True`, the whole subtree being treated as a leaf. Otherwise, the default pytree registry will be used to determine a node is a leaf or not. If the function is not specified, the default pytree registry will be used. Returns: A treespec object representing the structure of the pytree.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_cxx_pytree.py",
    "ast_data": "FunctionDef name:tree_structure arg:tree arg:is_leaf arguments arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "gradient",
    "source_code": "def gradient(tensor: torch.Tensor, kernel: torch.Tensor, structuring_element: Optional[torch.Tensor]=None, origin: Optional[List[int]]=None, border_type: str='geodesic', border_value: float=0.0, max_val: float=10000.0, engine: str='unfold') -> torch.Tensor:\n    return dilation(tensor, kernel=kernel, structuring_element=structuring_element, origin=origin, border_type=border_type, border_value=border_value, max_val=max_val, engine=engine) - erosion(tensor, kernel=kernel, structuring_element=structuring_element, origin=origin, border_type=border_type, border_value=border_value, max_val=max_val, engine=engine)",
    "docstring": "Return the morphological gradient of an image. .. image:: _static/img/gradient.png That means, (dilation - erosion) applying the same kernel in each channel. The kernel must have 2 dimensions. Args: tensor: Image with shape :math:. kernel: Positions of non-infinite elements of a flat structuring element. Non-zero values give the set of neighbors of the center over which the operation is applied. Its shape is :math:. For full structural elements use torch.ones_like(structural_element). structuring_element: Structuring element used for the grayscale dilation. It may be a non-flat structuring element. origin: Origin of the structuring element. Default is None and uses the center of the structuring element as origin (rounding towards zero). border_type: It determines how the image borders are handled, where `(B, C, H, W)here `__. Example: >>> tensor = torch.rand(1, 3, 5, 5) >>> kernel = torch.ones(3, 3) >>> gradient_img = gradient(tensor, kernel)",
    "type": "function",
    "file_path": "kornia\\kornia\\morphology\\morphology.py",
    "ast_data": "FunctionDef name:gradient arg:tensor arg:kernel arg:structuring_element arg:origin arg:border_type arg:border_value arg:max_val arg:engine arguments arg arg arg arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "TransformerTags",
    "source_code": "@dataclass(slots=True)\nclass TransformerTags:\n    preserves_dtype: list[str] = field(default_factory=lambda: ['float64'])",
    "docstring": "Tags for the transformer. Parameters ---------- preserves_dtype : list[str], default=[\"float64\"] Applies only on transformers. It corresponds to the data types which will be preserved such that is the same as after calling . If this list is empty, then the transformer is not expected to preserve the data type. The first value in the list is considered as the default data type, corresponding to the data type of the output when the input data type is not going to be preserved.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\utils\\_tags.py",
    "ast_data": "ClassDef name:TransformerTags Call arguments Call"
  },
  {
    "library": "cherrypy",
    "name": "_ThreadData",
    "source_code": "class _ThreadData(_local):\n    pass",
    "docstring": "A container for thread-specific data.",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\__init__.py",
    "ast_data": "ClassDef name:_ThreadData"
  },
  {
    "library": "tensorflow",
    "name": "_assert_ranks_condition",
    "source_code": "def _assert_ranks_condition(x, ranks, static_condition, dynamic_condition, data, summarize):\n    for rank in ranks:\n        assert_type(rank, dtypes.int32)\n    ranks_static = tuple([tensor_util.constant_value(rank) for rank in ranks])\n    if not any((r is None for r in ranks_static)):\n        for rank_static in ranks_static:\n            if rank_static.ndim != 0:\n                raise ValueError('Rank must be a scalar.')\n        x_rank_static = x.get_shape().ndims\n        if x_rank_static is not None:\n            if not static_condition(x_rank_static, ranks_static):\n                raise ValueError('Static rank condition failed', x_rank_static, ranks_static)\n            return control_flow_ops.no_op(name='static_checks_determined_all_ok')\n    condition = dynamic_condition(array_ops.rank(x), ranks)\n    for rank, rank_static in zip(ranks, ranks_static):\n        if rank_static is None:\n            this_data = ['Rank must be a scalar. Received rank: ', rank]\n            rank_check = assert_rank(rank, 0, data=this_data)\n            condition = control_flow_ops.with_dependencies([rank_check], condition)\n    return control_flow_assert.Assert(condition, data, summarize=summarize)",
    "docstring": "Assert has a rank that satisfies a given condition. Args: x: Numeric . ranks: Scalar . static_condition: A python function that takes and returns if the condition is satisfied, otherwise. dynamic_condition: An that takes [actual_rank, given_ranks] and return if the condition is satisfied, otherwise. data: The tensors to print out if the condition is false. Defaults to error message and first few entries of . summarize: Print this many entries of each tensor. Returns: Op raising if fails dynamic_condition. Raises: ValueError: If static checks determine fails static_condition.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\check_ops.py",
    "ast_data": "FunctionDef name:_assert_ranks_condition arg:x arg:ranks arg:static_condition arg:dynamic_condition arg:data arg:summarize arguments arg arg arg arg arg arg For Call Assign Call Call If Call Compare For If Compare Raise Call Assign Call If Compare If Call Raise Call Return return:yes Call Assign Call Call For Call If Compare Assign Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_prepare_output_masks",
    "source_code": "def _prepare_output_masks(self):\n    return [getattr(x, '_keras_mask', None) for x in self.outputs]",
    "docstring": "Returns masks corresponding to model outputs.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py",
    "ast_data": "FunctionDef name:_prepare_output_masks arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_unflatten_sparse_tensors",
    "source_code": "def _unflatten_sparse_tensors(flat, tensors):\n    flat_indices, flat_values = flat\n    indices = torch._C._nn.unflatten_dense_tensors(flat_indices, [torch.Tensor._indices(t) for t in tensors])\n    values = torch._C._nn.unflatten_dense_tensors(flat_values, [torch.Tensor._values(t) for t in tensors])\n    outputs = []\n    for t, i, v in zip(tensors, indices, values):\n        outputs.append(t.new(i, v, t.size()))\n    return tuple(outputs)",
    "docstring": "View flat buffer (containing indices and values) using the sizes of tensors. Assume that tensors are of same sparse type, and that flat is given by _flatten_sparse_tensors. Args: flat (tuple(Tensor, Tensor)): flattened indices and values of sparse tensors to unflatten. tensors (Iterable[Tensor]): sparse tensors whose sizes will be used to unflatten flat. Returns: Unflattened sparse tensors with sizes same as tensors and values from flat.",
    "type": "function",
    "file_path": "pytorch\\torch\\_utils.py",
    "ast_data": "FunctionDef name:_unflatten_sparse_tensors arg:flat arg:tensors arguments arg arg Assign Assign Call Call Assign Call Call Assign For Call Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_IgammaGrad",
    "source_code": "@ops.RegisterGradient('Igamma')\ndef _IgammaGrad(op: ops.Operation, grad):\n    a = op.inputs[0]\n    x = op.inputs[1]\n    sa = array_ops.shape(a)\n    sx = array_ops.shape(x)\n    ra, rx = gen_array_ops.broadcast_gradient_args(sa, sx)\n    with ops.control_dependencies([grad]):\n        partial_a = gen_math_ops.igamma_grad_a(a, x)\n        partial_x = math_ops.exp(-x + (a - 1) * math_ops.log(x) - math_ops.lgamma(a))\n        return (array_ops.reshape(math_ops.reduce_sum(partial_a * grad, ra), sa), array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx))",
    "docstring": "Returns gradient of igamma(a, x) with respect to a and x.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_IgammaGrad arg:op arg:grad arguments arg arg Assign Assign Assign Call Assign Call Assign Call With Call Assign Call Assign Call Call Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "new_cond_branch",
    "source_code": "def new_cond_branch(self, section_id):\n    assert section_id in self.cond_leaves\n    if section_id in self.cond_entry:\n        self.cond_leaves[section_id].append(self.leaves)\n        self.leaves = self.cond_entry[section_id]\n    else:\n        self.cond_entry[section_id] = self.leaves",
    "docstring": "Begins a new branch in a cond section.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\cfg.py",
    "ast_data": "FunctionDef name:new_cond_branch arg:self arg:section_id arguments arg arg Compare If Compare Call Assign Assign"
  },
  {
    "library": "django",
    "name": "BaseDetailView",
    "source_code": "class BaseDetailView(SingleObjectMixin, View):\n\n    def get(self, request, *args, **kwargs):\n        self.object = self.get_object()\n        context = self.get_context_data(object=self.object)\n        return self.render_to_response(context)",
    "docstring": "Base view for displaying a single object. This requires subclassing to provide a response mixin.",
    "type": "class",
    "file_path": "django\\django\\views\\generic\\detail.py",
    "ast_data": "ClassDef name:BaseDetailView FunctionDef name:get arg:self arg:request arguments arg arg arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "forward",
    "source_code": "def forward(self, x, name='forward'):\n    return self._call_forward(x, name)",
    "docstring": "Returns the forward evaluation, i.e., X = g(Y). Args: x: . The input to the \"forward\" evaluation. name: The name to give this op. Returns: . Raises: TypeError: if is specified and is not . NotImplementedError: if is not implemented.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bijector_impl.py",
    "ast_data": "FunctionDef name:forward arg:self arg:x arg:name arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "add",
    "source_code": "def add(self, datum, location_ids):\n    node_name = datum.node_exec_stats.node_name\n    if node_name in self._node_name_to_sample:\n        sample = self._node_name_to_sample[node_name]\n        sample.location_id.extend(location_ids)\n    else:\n        sample = profile_pb2.Sample()\n        sample.value.extend([0, 0, 0])\n        label = sample.label.add()\n        label.key = self._string_table.index_of('node_name')\n        label.str = self._string_table.index_of(node_name)\n        label = sample.label.add()\n        label.key = self._string_table.index_of('op_type')\n        label.str = self._string_table.index_of(datum.op_type)\n        self._node_name_to_sample[node_name] = sample\n    sample.value[0] += 1\n    sample.value[1] += datum.node_exec_stats.all_end_rel_micros\n    sample.value[2] += datum.node_exec_stats.op_end_rel_micros - datum.node_exec_stats.op_start_rel_micros",
    "docstring": "Adds a sample data point. Args: datum: to add a sample for. location_ids: List of numberic location ids for this sample.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\pprof_profiler.py",
    "ast_data": "FunctionDef name:add arg:self arg:datum arg:location_ids arguments arg arg arg Assign If Compare Assign Call Assign Call Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "get_min_max_value",
    "source_code": "def get_min_max_value(self) -> tuple[float, float]:\n    freq_max_idx = np.argmax(self._hist_freq)\n    return self._get_min_max_value_by_expanding_range(freq_max_idx)",
    "docstring": "Finds min and max starting from the index of the max frequency. The HistogramMseMaxFrequency method starts from the bin with the highest frequency and expands the range to both sides. This performs well when data is well spread on both sides of the max frequency. Returns: (min_value, max_value): Min and max calculated using method to expand the range based on max frequency.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\calibrator\\calibration_algorithm.py",
    "ast_data": "FunctionDef name:get_min_max_value arg:self arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "fullvalue",
    "source_code": "def fullvalue(self):\n    if self.file:\n        self.file.seek(0)\n        value = self.file.read()\n        self.file.seek(0)\n    else:\n        value = self.value\n    value = self.decode_entity(value)\n    return value",
    "docstring": "Return this entity as a string, whether stored in a file or not.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpreqbody.py",
    "ast_data": "FunctionDef name:fullvalue arg:self arguments arg If Call Assign Call Call Assign Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, cost_class: float=1, cost_bbox: float=1, cost_giou: float=1):\n    super().__init__()\n    self.cost_class = cost_class\n    self.cost_bbox = cost_bbox\n    self.cost_giou = cost_giou\n    assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, 'all costs cant be 0'",
    "docstring": "Creates the matcher Params: cost_class: This is the relative weight of the classification error in the matching cost cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\functional_autograd_benchmark\\torchvision_models.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:cost_class arg:cost_bbox arg:cost_giou arguments arg arg arg arg Call Call Assign Assign Assign BoolOp Compare Compare Compare"
  },
  {
    "library": "numpy",
    "name": "find_duplicate",
    "source_code": "@set_module('numpy.rec')\ndef find_duplicate(list):\n    return [item for item, counts in Counter(list).items() if counts > 1]",
    "docstring": "Find duplication in a list, return a list of duplicated elements",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\records.py",
    "ast_data": "FunctionDef name:find_duplicate arg:list arguments arg Return return:yes Call Call Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "OnSessionInitAction",
    "source_code": "class OnSessionInitAction:\n    PROCEED = 'proceed'\n    REMOTE_INSTR_LOOP = 'remote_instr_loop'",
    "docstring": "Enum-like values for possible action to take on session init.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\framework.py",
    "ast_data": "ClassDef name:OnSessionInitAction Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "ops",
    "source_code": "@property\ndef ops(self) -> OpsHandler[Any]:\n    return _ops._get_handler()",
    "docstring": "The operator handler specific to the current codegen task",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\virtualized.py",
    "ast_data": "FunctionDef name:ops arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "incomplete_size",
    "source_code": "def incomplete_size(self, name=None):\n    if name is None:\n        name = '%s_incomplete_size' % self._name\n    return self._incomplete_size_fn(shared_name=self._name, name=name, dtypes=self._dtypes, capacity=self._capacity, memory_limit=self._memory_limit)",
    "docstring": "Returns the number of incomplete elements in the staging area. Args: name: A name for the operation (optional) Returns: The created op",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:incomplete_size arg:self arg:name arguments arg arg If Compare Assign Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "inverse",
    "source_code": "def inverse(self) -> Se2:\n    r_inv: So2 = self.r.inverse()\n    _t = -1 * self.t\n    if isinstance(_t, int):\n        raise TypeError('Unexpected integer from `-1 * translation`')\n    return Se2(r_inv, r_inv * _t)",
    "docstring": "Return the inverse transformation. Example: >>> s = Se2(So2.identity(1), torch.ones(1,2)) >>> s_inv = s.inverse() >>> s_inv.r Parameter containing: tensor([1.+0.j], requires_grad=True) >>> s_inv.t Parameter containing: tensor([[-1., -1.]], requires_grad=True)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\se2.py",
    "ast_data": "FunctionDef name:inverse arg:self arguments arg Call Assign If Call Raise Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_has_none_qconfig",
    "source_code": "def _has_none_qconfig(node: Argument, node_name_to_qconfig: dict[str, QConfigAny]) -> bool:\n    return isinstance(node, Node) and node.name in node_name_to_qconfig and (node_name_to_qconfig[node.name] is None)",
    "docstring": "Check if a node has a qconfig of None, i.e. user requested to not quantize the node",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\convert.py",
    "ast_data": "FunctionDef name:_has_none_qconfig arg:node arg:node_name_to_qconfig arguments arg arg Return return:yes BoolOp Call Compare Compare"
  },
  {
    "library": "matplotlib",
    "name": "control_points",
    "source_code": "@property\ndef control_points(self):\n    return self._cpoints",
    "docstring": "The control points of the curve.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\bezier.py",
    "ast_data": "FunctionDef name:control_points arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "get_path_info",
    "source_code": "def get_path_info(environ):\n    path_info = get_bytes_from_wsgi(environ, 'PATH_INFO', '/')\n    return repercent_broken_unicode(path_info).decode()",
    "docstring": "Return the HTTP request's PATH_INFO as a string.",
    "type": "function",
    "file_path": "django\\django\\core\\handlers\\wsgi.py",
    "ast_data": "FunctionDef name:get_path_info arg:environ arguments arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "KeyValTupleParam",
    "source_code": "class KeyValTupleParam(KeyValTuple):\n    pass",
    "docstring": "Dummy class for correctly rendering key-value tuples from parameters.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\utils\\_pprint.py",
    "ast_data": "ClassDef name:KeyValTupleParam"
  },
  {
    "library": "tensorflow",
    "name": "custom_getter",
    "source_code": "def custom_getter(getter, name, *args, **kwargs):\n    partitioner = kwargs.get('partitioner', None)\n    if partitioner is not None:\n        kwargs['partitioner'] = None\n        logging.warning('Partitioned variables are not supported on TPU. Got `partitioner` that is %s for variable %s. Setting `partitioner` to `None`.', partitioner, name)\n    if saved_custom_getter is None:\n        return getter(name, *args, **kwargs)\n    else:\n        return saved_custom_getter(getter, name, *args, **kwargs)",
    "docstring": "Variables on TPU have a few restrictions.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu.py",
    "ast_data": "FunctionDef name:custom_getter arg:getter arg:name arguments arg arg arg arg Assign Call If Compare Assign Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "record_untuned_is_enabled",
    "source_code": "def record_untuned_is_enabled() -> bool:\n    return torch._C._cuda_record_untuned_is_enabled()",
    "docstring": "Returns whether TunableOp operations are recorded for offline tuning.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\tunable.py",
    "ast_data": "FunctionDef name:record_untuned_is_enabled arguments Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, base=10.0, subs=(1.0,), *, numticks=None):\n    if numticks is None:\n        if mpl.rcParams['_internal.classic_mode']:\n            numticks = 15\n        else:\n            numticks = 'auto'\n    self._base = float(base)\n    self._set_subs(subs)\n    self.numticks = numticks",
    "docstring": "Parameters ---------- base : float, default: 10.0 The base of the log used, so major ticks are placed at `~.axis.Axis.get_tick_space`, but otherwise falls back to 9.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:base arg:subs arguments arg arg arg arg If Compare If Assign Assign Assign Call Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "get_widths",
    "source_code": "def get_widths(self):\n    return self._widths * 2",
    "docstring": "Get the lengths of the first axes (e.g., major axis).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:get_widths arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "clip_by_average_norm",
    "source_code": "@deprecation.deprecated(date=None, instructions='clip_by_average_norm is deprecated in TensorFlow 2.0. Please use clip_by_norm(t, clip_norm * tf.cast(tf.size(t), tf.float32), name) instead.')\n@tf_export(v1=['clip_by_average_norm'])\n@dispatch.add_dispatch_support\ndef clip_by_average_norm(t, clip_norm, name=None):\n    with ops.name_scope(name, 'clip_by_average_norm', [t, clip_norm]) as name:\n        t = ops.convert_to_tensor(t, name='t')\n        n_element = math_ops.cast(array_ops.size(t), dtypes.float32)\n        l2norm_inv = math_ops.rsqrt(math_ops.reduce_sum(t * t, math_ops.range(array_ops.rank(t))))\n        tclip = array_ops.identity(t * clip_norm * math_ops.minimum(l2norm_inv * n_element, constant_op.constant(1.0) / clip_norm), name=name)\n    return tclip",
    "docstring": "Clips tensor values to a maximum average L2-norm. Given a tensor , and a maximum clip value , this operation normalizes so that its average L2-norm is less than or equal to . Specifically, if the average L2-norm is already less than or equal to , then is not modified. If the average L2-norm is greater than , then this operation returns a tensor of the same type and shape as with its values set to: In this case, the average L2-norm of the output tensor is . This operation is typically used to clip gradients before applying them with an optimizer. Args: t: A . clip_norm: A 0-D (scalar) > 0. A maximum clipping value. name: A name for the operation (optional). Returns: A clipped .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\clip_ops.py",
    "ast_data": "FunctionDef name:clip_by_average_norm arg:t arg:clip_norm arg:name arguments arg arg arg With Call Assign Call Assign Call Call Assign Call Call Call Call Assign Call Call Call Return return:yes Call Call"
  },
  {
    "library": "scrapy",
    "name": "InactiveStreamClosed",
    "source_code": "class InactiveStreamClosed(ConnectionClosed):\n\n    def __init__(self, request: Request) -> None:\n        self.request = request\n\n    def __str__(self) -> str:\n        return f'InactiveStreamClosed: Connection was closed without sending the request {self.request!r}'",
    "docstring": "Connection was closed without sending request headers of the stream. This happens when a stream is waiting for other streams to close and connection is lost.",
    "type": "class",
    "file_path": "scrapy\\scrapy\\core\\http2\\stream.py",
    "ast_data": "ClassDef name:InactiveStreamClosed FunctionDef name:__init__ arg:self arg:request arguments arg arg Assign FunctionDef name:__str__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_sketch_params",
    "source_code": "def set_sketch_params(self, scale=None, length=None, randomness=None):\n    self._sketch = None if scale is None else (scale, length or 128.0, randomness or 16.0)",
    "docstring": "Set the sketch parameters. Parameters ---------- scale : float, optional The amplitude of the wiggle perpendicular to the source line, in pixels. If scale is , or not provided, no sketch filter will be provided. length : float, default: 128 The length of the wiggle along the line, in pixels. randomness : float, default: 16 The scale factor by which the length is shrunken or expanded.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:set_sketch_params arg:self arg:scale arg:length arg:randomness arguments arg arg arg arg Assign Compare BoolOp BoolOp"
  },
  {
    "library": "pandas",
    "name": "set_default_names",
    "source_code": "def set_default_names(data):\n    if com.all_not_none(*data.index.names):\n        nms = data.index.names\n        if len(nms) == 1 and data.index.name == 'index':\n            warnings.warn(\"Index name of 'index' is not round-trippable.\", stacklevel=find_stack_level())\n        elif len(nms) > 1 and any((x.startswith('level_') for x in nms)):\n            warnings.warn(\"Index names beginning with 'level_' are not round-trippable.\", stacklevel=find_stack_level())\n        return data\n    data = data.copy(deep=False)\n    if data.index.nlevels > 1:\n        data.index.names = com.fill_missing_names(data.index.names)\n    else:\n        data.index.name = data.index.name or 'index'\n    return data",
    "docstring": "Sets index names to 'index' for regular, or 'level_x' for Multi",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\json\\_table_schema.py",
    "ast_data": "FunctionDef name:set_default_names arg:data arguments arg If Call Assign If BoolOp Compare Call Compare Call Call If BoolOp Compare Call Call Call Call Call Return return:yes Assign Call If Compare Assign Call Assign BoolOp Return return:yes"
  },
  {
    "library": "django",
    "name": "handle_default_options",
    "source_code": "def handle_default_options(options):\n    if options.settings:\n        os.environ['DJANGO_SETTINGS_MODULE'] = options.settings\n    if options.pythonpath:\n        sys.path.insert(0, options.pythonpath)",
    "docstring": "Include any default options that all commands should accept here so that ManagementUtility can handle them before searching for user commands.",
    "type": "function",
    "file_path": "django\\django\\core\\management\\base.py",
    "ast_data": "FunctionDef name:handle_default_options arg:options arguments arg If Assign If Call"
  },
  {
    "library": "tensorflow",
    "name": "row_splits_dtype",
    "source_code": "@property\ndef row_splits_dtype(self):\n    return self._row_splits_dtype",
    "docstring": "The of the RaggedTensor's . Examples: >>> rt = tf.ragged.constant([[1, 2, 3], [4]], row_splits_dtype=tf.int64) >>> tf.type_spec_from_value(rt).row_splits_dtype tf.int64 Returns: A for the RaggedTensor's tensor. One of or .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:row_splits_dtype arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "subset_size",
    "source_code": "def subset_size(self, x):\n    return self._sizes[self[x]]",
    "docstring": "Get the size of the subset containing . Note that this method is faster than `x`.",
    "type": "method",
    "file_path": "scipy\\scipy\\_lib\\_disjoint_set.py",
    "ast_data": "FunctionDef name:subset_size arg:self arg:x arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "tracing_enabled",
    "source_code": "def tracing_enabled():\n    return _thread_local_data.enable_call_tracing",
    "docstring": "Whether to add extra traces to the queue.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\save_impl.py",
    "ast_data": "FunctionDef name:tracing_enabled arguments Return return:yes"
  },
  {
    "library": "django",
    "name": "unset_installed_apps",
    "source_code": "def unset_installed_apps(self):\n    self.app_configs = self.stored_app_configs.pop()\n    self.apps_ready = self.models_ready = self.ready = True\n    self.clear_cache()",
    "docstring": "Cancel a previous call to set_installed_apps().",
    "type": "method",
    "file_path": "django\\django\\apps\\registry.py",
    "ast_data": "FunctionDef name:unset_installed_apps arg:self arguments arg Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "output_shape",
    "source_code": "@property\ndef output_shape(self):\n    if not self._inbound_nodes:\n        raise AttributeError('The layer has never been called and thus has no defined output shape.')\n    all_output_shapes = set([str(node.output_shapes) for node in self._inbound_nodes])\n    if len(all_output_shapes) == 1:\n        return self._inbound_nodes[0].output_shapes\n    else:\n        raise AttributeError('The layer \"%s\" has multiple inbound nodes, with different output shapes. Hence the notion of \"output shape\" is ill-defined for the layer. Use `get_output_shape_at(node_index)` instead.' % self.name)",
    "docstring": "Retrieves the output shape(s) of a layer. Only applicable if the layer has one output, or if all outputs have the same shape. Returns: Output shape, as an integer shape tuple (or list of shape tuples, one tuple per output tensor). Raises: AttributeError: if the layer has no defined output shape. RuntimeError: if called in Eager mode.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py",
    "ast_data": "FunctionDef name:output_shape arg:self arguments arg If Raise Call Assign Call Call If Compare Call Return return:yes Raise Call"
  },
  {
    "library": "numpy",
    "name": "variable",
    "source_code": "@property\ndef variable(self):\n    return self._variable",
    "docstring": "The name of the polynomial variable",
    "type": "method",
    "file_path": "numpy\\numpy\\lib\\_polynomial_impl.py",
    "ast_data": "FunctionDef name:variable arg:self arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "cache_flush",
    "source_code": "def cache_flush(self):\n    if not self._cache_path:\n        return\n    self.dist_log('write cache to path ->', self._cache_path)\n    cdict = self.__dict__.copy()\n    for attr in self.__dict__.keys():\n        if re.match(self._cache_ignore, attr):\n            cdict.pop(attr)\n    d = os.path.dirname(self._cache_path)\n    if not os.path.exists(d):\n        os.makedirs(d)\n    repr_dict = pprint.pformat(cdict, compact=True)\n    with open(self._cache_path, 'w') as f:\n        f.write(textwrap.dedent(\"            # AUTOGENERATED DON'T EDIT\\n            # Please make changes to the code generator             (distutils/ccompiler_opt.py)\\n            hash = {}\\n            data = \\\\\\n            \").format(self._cache_hash))\n        f.write(repr_dict)",
    "docstring": "Force update the cache.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py",
    "ast_data": "FunctionDef name:cache_flush arg:self arguments arg If Return return:no Call Assign Call For Call If Call Call Assign Call If Call Call Assign Call With Call Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_majorticklocs",
    "source_code": "def get_majorticklocs(self):\n    return self.major.locator()",
    "docstring": "Return this Axis' major tick locations in data coordinates.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:get_majorticklocs arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "cryptography",
    "name": "add_extension",
    "source_code": "def add_extension(self, extval: ExtensionType, critical: bool) -> CertificateRevocationListBuilder:\n    if not isinstance(extval, ExtensionType):\n        raise TypeError('extension must be an ExtensionType')\n    extension = Extension(extval.oid, critical, extval)\n    _reject_duplicate_extension(extension, self._extensions)\n    return CertificateRevocationListBuilder(self._issuer_name, self._last_update, self._next_update, [*self._extensions, extension], self._revoked_certificates)",
    "docstring": "Adds an X.509 extension to the certificate revocation list.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\x509\\base.py",
    "ast_data": "FunctionDef name:add_extension arg:self arg:extval arg:critical arguments arg arg arg If Call Raise Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "RandomBoxBlur",
    "source_code": "class RandomBoxBlur(IntensityAugmentationBase2D):\n\n    def __init__(self, kernel_size: Tuple[int, int]=(3, 3), border_type: str='reflect', normalized: bool=True, same_on_batch: bool=False, p: float=0.5, keepdim: bool=False) -> None:\n        super().__init__(p=p, same_on_batch=same_on_batch, p_batch=1.0, keepdim=keepdim)\n        self.flags = {'kernel_size': kernel_size, 'border_type': border_type, 'normalized': normalized}\n\n    def apply_transform(self, input: Tensor, params: Dict[str, Tensor], flags: Dict[str, Any], transform: Optional[Tensor]=None) -> Tensor:\n        return box_blur(input, flags['kernel_size'], flags['border_type'], flags['normalized'])",
    "docstring": "Add random blur with a box filter to an image tensor. .. image:: _static/img/RandomBoxBlur.png Args: kernel_size: the blurring kernel size. border_type: the padding mode to be applied before convolving. The expected modes are: `kornia.filters.box_blur`. Examples: >>> img = torch.ones(1, 1, 24, 24) >>> out = RandomBoxBlur((7, 7))(img) >>> out.shape torch.Size([1, 1, 24, 24]) To apply the exact augmenation again, you may take the advantage of the previous parameter state: >>> input = torch.randn(1, 3, 32, 32) >>> aug = RandomBoxBlur((7, 7), p=1.) >>> (aug(input) == aug(input, params=aug._params)).all() tensor(True)",
    "type": "class",
    "file_path": "kornia\\kornia\\augmentation\\_2d\\intensity\\box_blur.py",
    "ast_data": "ClassDef name:RandomBoxBlur FunctionDef name:__init__ arg:self arg:kernel_size arg:border_type arg:normalized arg:same_on_batch arg:p arg:keepdim arguments arg arg arg arg arg arg arg Call Call Assign FunctionDef name:apply_transform arg:self arg:input arg:params arg:flags arg:transform arguments arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "redirect",
    "source_code": "def redirect(url='', internal=True, debug=False):\n    if debug:\n        cherrypy.log('Redirecting %sto: %s' % ({True: 'internal ', False: ''}[internal], url), 'TOOLS.REDIRECT')\n    if internal:\n        raise cherrypy.InternalRedirect(url)\n    else:\n        raise cherrypy.HTTPRedirect(url)",
    "docstring": "Raise InternalRedirect or HTTPRedirect to the given url.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\cptools.py",
    "ast_data": "FunctionDef name:redirect arg:url arg:internal arg:debug arguments arg arg arg If Call If Raise Call Raise Call"
  },
  {
    "library": "pandas",
    "name": "_get_colors_from_colormap",
    "source_code": "def _get_colors_from_colormap(colormap: str | Colormap, num_colors: int) -> list[Color]:\n    cmap = _get_cmap_instance(colormap)\n    return [cmap(num) for num in np.linspace(0, 1, num=num_colors)]",
    "docstring": "Get colors from colormap.",
    "type": "function",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\style.py",
    "ast_data": "FunctionDef name:_get_colors_from_colormap arg:colormap arg:num_colors arguments arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "patch_object",
    "source_code": "def patch_object(obj: object, name: str, value: object) -> object:\n    if isinstance(obj, ConfigModule):\n        return obj.patch(name, value)\n    return mock.patch.object(obj, name, value)",
    "docstring": "Workaround issue with ConfigModule",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_config_module.py",
    "ast_data": "FunctionDef name:patch_object arg:obj arg:name arg:value arguments arg arg arg If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "rfft",
    "source_code": "def rfft(x, n=None, axis=-1, overwrite_x=False):\n    return _pocketfft.rfft_fftpack(x, n, axis, None, overwrite_x)",
    "docstring": "Discrete Fourier transform of a real sequence. Parameters ---------- x : array_like, real-valued The data to transform. n : int, optional Defines the length of the Fourier transform. If is not specified (the default) then `xxscipy.fft.rfft`. Examples -------- >>> from scipy.fftpack import fft, rfft >>> a = [9, -9, 1, 3] >>> fft(a) array([ 4. +0.j, 8.+12.j, 16. +0.j, 8.-12.j]) >>> rfft(a) array([ 4., 8., 12., 16.])",
    "type": "function",
    "file_path": "scipy\\scipy\\fftpack\\_basic.py",
    "ast_data": "FunctionDef name:rfft arg:x arg:n arg:axis arg:overwrite_x arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "bias_add",
    "source_code": "@tf_export('nn.bias_add')\n@dispatch.add_dispatch_support\ndef bias_add(value, bias, data_format=None, name=None):\n    with ops.name_scope(name, 'BiasAdd', [value, bias]) as name:\n        if data_format is not None:\n            if data_format.startswith('NC'):\n                data_format = 'NCHW'\n            elif data_format.startswith('N') and data_format.endswith('C'):\n                data_format = 'NHWC'\n            else:\n                raise ValueError(f'`data_format` must be of the form `N...C` or `NC...`. Received: data_format={data_format}')\n        if not context.executing_eagerly():\n            value = ops.convert_to_tensor(value, name='input')\n            bias = ops.convert_to_tensor(bias, dtype=value.dtype, name='bias')\n        return gen_nn_ops.bias_add(value, bias, data_format=data_format, name=name)",
    "docstring": "Adds to . This is (mostly) a special case of where is restricted to 1-D. Broadcasting is supported, so may have any number of dimensions. Unlike , the type of is allowed to differ from in the case where both types are quantized. Args: value: A with type , , , , , , , , or . bias: A 1-D with size matching the channel dimension of . Must be the same type as unless is a quantized type, in which case a different quantized type may be used. data_format: A string. 'N...C' and 'NC...' are supported. If (the default) is specified then 'N..C' is assumed. name: A name for the operation (optional). Returns: A with the same type as . Raises: ValueError if data format is unrecognized, if has less than two dimensions when is 'N..C'/ or has less then three dimensions when is , if does not have exactly one dimension (is a vector), or if the size of does not match the size of the channel dimension of .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py",
    "ast_data": "FunctionDef name:bias_add arg:value arg:bias arg:data_format arg:name arguments arg arg arg arg With Call If Compare If Call Assign If BoolOp Call Call Assign Raise Call If Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "__dlpack_device__",
    "source_code": "def __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]:\n    return (DlpackDeviceType.CPU, None)",
    "docstring": "Device type and device ID for where the data in the buffer resides.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\interchange\\buffer.py",
    "ast_data": "FunctionDef name:__dlpack_device__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_element_size",
    "source_code": "def _element_size(dtype):\n    if not isinstance(dtype, torch.dtype):\n        raise RuntimeError(f'expected torch.dtype, but got {type(dtype)}')\n    if dtype.is_complex:\n        return torch.finfo(dtype).bits >> 2\n    elif dtype.is_floating_point:\n        return torch.finfo(dtype).bits >> 3\n    elif dtype == torch.bool:\n        return 1\n    else:\n        return torch.iinfo(dtype).bits >> 3",
    "docstring": "Returns the element size for a dtype, in bytes",
    "type": "function",
    "file_path": "pytorch\\torch\\_utils.py",
    "ast_data": "FunctionDef name:_element_size arg:dtype arguments arg If Call Raise Call Call If Return return:yes Call If Return return:yes Call If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_child_arguments",
    "source_code": "def get_child_arguments():\n    import __main__\n    py_script = Path(sys.argv[0])\n    exe_entrypoint = py_script.with_suffix('.exe')\n    args = [sys.executable] + ['-W%s' % o for o in sys.warnoptions]\n    if sys.implementation.name in ('cpython', 'pypy'):\n        args.extend((f'-X{key}' if value is True else f'-X{key}={value}' for key, value in sys._xoptions.items()))\n    if getattr(__main__, '__spec__', None) is not None and (not exe_entrypoint.exists()):\n        spec = __main__.__spec__\n        if (spec.name == '__main__' or spec.name.endswith('.__main__')) and spec.parent:\n            name = spec.parent\n        else:\n            name = spec.name\n        args += ['-m', name]\n        args += sys.argv[1:]\n    elif not py_script.exists():\n        if exe_entrypoint.exists():\n            return [exe_entrypoint, *sys.argv[1:]]\n        script_entrypoint = py_script.with_name('%s-script.py' % py_script.name)\n        if script_entrypoint.exists():\n            return [*args, script_entrypoint, *sys.argv[1:]]\n        raise RuntimeError('Script %s does not exist.' % py_script)\n    else:\n        args += sys.argv\n    return args",
    "docstring": "Return the executable. This contains a workaround for Windows if the executable is reported to not have the .exe extension which can cause bugs on reloading.",
    "type": "function",
    "file_path": "django\\django\\utils\\autoreload.py",
    "ast_data": "FunctionDef name:get_child_arguments arguments Assign Call Assign Call Assign If Compare Call Compare Call If BoolOp Compare Call Call Assign If BoolOp BoolOp Compare Call Assign Assign If Call If Call Return return:yes Assign Call If Call Return return:yes Raise Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "is_float_dtype",
    "source_code": "def is_float_dtype(arr_or_dtype) -> bool:\n    return _is_dtype_type(arr_or_dtype, classes(np.floating)) or _is_dtype(arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in 'f')",
    "docstring": "Check whether the provided array or dtype is of a float dtype. The function checks for floating-point data types, which represent real numbers that may have fractional components. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a float dtype. See Also -------- api.types.is_numeric_dtype : Check whether the provided array or dtype is of a numeric dtype. api.types.is_integer_dtype : Check whether the provided array or dtype is of an integer dtype. api.types.is_object_dtype : Check whether an array-like or dtype is of the object dtype. Examples -------- >>> from pandas.api.types import is_float_dtype >>> is_float_dtype(str) False >>> is_float_dtype(int) False >>> is_float_dtype(float) True >>> is_float_dtype(np.array([\"a\", \"b\"])) False >>> is_float_dtype(pd.Series([1, 2])) False >>> is_float_dtype(pd.Index([1, 2.0])) True",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\common.py",
    "ast_data": "FunctionDef name:is_float_dtype arg:arr_or_dtype arguments arg Return return:yes BoolOp Call Call Call arguments arg BoolOp Call Compare"
  },
  {
    "library": "django",
    "name": "get_template",
    "source_code": "def get_template(self, template_name):\n    raise NotImplementedError('subclasses of BaseEngine must provide a get_template() method')",
    "docstring": "Load and return a template for the given name. Raise TemplateDoesNotExist if no such template exists.",
    "type": "method",
    "file_path": "django\\django\\template\\backends\\base.py",
    "ast_data": "FunctionDef name:get_template arg:self arg:template_name arguments arg arg Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "_draw_polygon",
    "source_code": "def _draw_polygon(self):\n    self._draw_polygon_without_update()\n    self.update()",
    "docstring": "Redraw the polygon based on the new vertex positions.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:_draw_polygon arg:self arguments arg Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_register_style",
    "source_code": "def _register_style(style_list, cls=None, *, name=None):\n    if cls is None:\n        return functools.partial(_register_style, style_list, name=name)\n    style_list[name or cls.__name__.lower()] = cls\n    return cls",
    "docstring": "Class decorator that stashes a class in a (style) dictionary.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:_register_style arg:style_list arg:cls arguments arg arg arg If Compare Return return:yes Call Assign BoolOp Call Return return:yes"
  },
  {
    "library": "django",
    "name": "extend",
    "source_code": "def extend(self, vals):\n    self[len(self):] = vals",
    "docstring": "Standard list extend method",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\mutable_list.py",
    "ast_data": "FunctionDef name:extend arg:self arg:vals arguments arg arg Assign Call"
  },
  {
    "library": "scipy",
    "name": "SphereBivariateSpline",
    "source_code": "class SphereBivariateSpline(_BivariateSplineBase):\n\n    def __call__(self, theta, phi, dtheta=0, dphi=0, grid=True):\n        theta = np.asarray(theta)\n        phi = np.asarray(phi)\n        if theta.size > 0 and (theta.min() < 0.0 or theta.max() > np.pi):\n            raise ValueError('requested theta out of bounds.')\n        return _BivariateSplineBase.__call__(self, theta, phi, dx=dtheta, dy=dphi, grid=grid)\n\n    def ev(self, theta, phi, dtheta=0, dphi=0):\n        return self.__call__(theta, phi, dtheta=dtheta, dphi=dphi, grid=False)",
    "docstring": "Bivariate spline s(x,y) of degrees 3 on a sphere, calculated from a given set of data points (theta,phi,r). .. versionadded:: 0.11.0 See Also -------- bisplrep : a function to find a bivariate B-spline representation of a surface bisplev : a function to evaluate a bivariate B-spline and its derivatives UnivariateSpline : a smooth univariate spline to fit a given set of data points. SmoothBivariateSpline : a smoothing bivariate spline through the given points LSQUnivariateSpline : a univariate spline using weighted least-squares fitting",
    "type": "class",
    "file_path": "scipy\\scipy\\interpolate\\_fitpack2.py",
    "ast_data": "ClassDef name:SphereBivariateSpline FunctionDef name:__call__ arg:self arg:theta arg:phi arg:dtheta arg:dphi arg:grid arguments arg arg arg arg arg arg Assign Call Assign Call If BoolOp Compare BoolOp Compare Call Compare Call Raise Call Return return:yes Call FunctionDef name:ev arg:self arg:theta arg:phi arg:dtheta arg:dphi arguments arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_repr_png_",
    "source_code": "def _repr_png_(self):\n    X = np.tile(np.linspace(0, 1, _REPR_PNG_SIZE[0]), (_REPR_PNG_SIZE[1], 1))\n    pixels = self(X, bytes=True)\n    png_bytes = io.BytesIO()\n    title = self.name + ' colormap'\n    author = f'Matplotlib v{mpl.__version__}, https://matplotlib.org'\n    pnginfo = PngInfo()\n    pnginfo.add_text('Title', title)\n    pnginfo.add_text('Description', title)\n    pnginfo.add_text('Author', author)\n    pnginfo.add_text('Software', author)\n    Image.fromarray(pixels).save(png_bytes, format='png', pnginfo=pnginfo)\n    return png_bytes.getvalue()",
    "docstring": "Generate a PNG representation of the Colormap.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:_repr_png_ arg:self arguments arg Assign Call Call Assign Call Assign Call Assign Assign Assign Call Call Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "average",
    "source_code": "def average(self, var):\n    return self._averages.get(var.ref(), None)",
    "docstring": "Returns the holding the average of . Args: var: A object. Returns: A object or if the moving average of is not maintained.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\moving_averages.py",
    "ast_data": "FunctionDef name:average arg:self arg:var arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "indexing",
    "source_code": "def indexing(self, index: sympy.Expr, *, dense_indexing=False, copy_shape=None, override_mask=None, block_ptr=False):\n    return super().indexing(index, dense_indexing=False, copy_shape=self.template_out, override_mask=self.template_mask, block_ptr=block_ptr)",
    "docstring": "Override the default indexing to use our custom mask and force dense indexing.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\select_algorithm.py",
    "ast_data": "FunctionDef name:indexing arg:self arg:index arguments arg arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "op",
    "source_code": "@property\ndef op(self) -> ops.Operation:\n    return self._variable.op",
    "docstring": "The of this variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py",
    "ast_data": "FunctionDef name:op arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_batch_reduce_destination",
    "source_code": "def _batch_reduce_destination(x):\n    if isinstance(x, tensor_lib.Tensor):\n        return x.device\n    else:\n        return x",
    "docstring": "Returns the destinations for batch all-reduce.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:_batch_reduce_destination arg:x arguments arg If Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "SequentialSampler",
    "source_code": "class SequentialSampler(Sampler[int]):\n    data_source: Sized\n\n    def __init__(self, data_source: Sized) -> None:\n        self.data_source = data_source\n\n    def __iter__(self) -> Iterator[int]:\n        return iter(range(len(self.data_source)))\n\n    def __len__(self) -> int:\n        return len(self.data_source)",
    "docstring": "Samples elements sequentially, always in the same order. Args: data_source (Dataset): dataset to sample from",
    "type": "class",
    "file_path": "pytorch\\torch\\utils\\data\\sampler.py",
    "ast_data": "ClassDef name:SequentialSampler FunctionDef name:__init__ arg:self arg:data_source arguments arg arg Assign FunctionDef name:__iter__ arg:self arguments arg Return return:yes Call Call Call FunctionDef name:__len__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_assert_valid_dtypes",
    "source_code": "def _assert_valid_dtypes(self, tensors):\n    valid_dtypes = self._valid_dtypes()\n    for t in tensors:\n        dtype = t.dtype.base_dtype\n        if dtype not in valid_dtypes:\n            raise ValueError('Invalid type %r for %s, expected: %s.' % (dtype, t.name, [v for v in valid_dtypes]))",
    "docstring": "Asserts tensors are all valid types (see ). Args: tensors: Tensors to check. Raises: ValueError: If any tensor is not a valid type.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\optimizer.py",
    "ast_data": "FunctionDef name:_assert_valid_dtypes arg:self arg:tensors arguments arg arg Assign Call For Assign If Compare Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_patch_wrapped_functions",
    "source_code": "def _patch_wrapped_functions(patcher: _Patcher):\n    for (_, name), frame_dict in _wrapped_fns_to_patch.copy().items():\n        if name not in frame_dict and hasattr(builtins, name):\n            orig_fn = getattr(builtins, name)\n        else:\n            orig_fn = frame_dict[name]\n        patcher.patch(frame_dict, name, _create_wrapped_func(orig_fn))\n    for cls, name in _wrapped_methods_to_patch:\n        patcher.patch_method(cls, name, _create_wrapped_method(cls, name))",
    "docstring": "Go through `_create_wrapped_func` wrapper.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\_symbolic_trace.py",
    "ast_data": "FunctionDef name:_patch_wrapped_functions arg:patcher arguments arg For Call Call If BoolOp Compare Call Assign Call Assign Call Call For Call Call"
  },
  {
    "library": "pandas",
    "name": "extend_blocks",
    "source_code": "def extend_blocks(result, blocks=None) -> list[Block]:\n    if blocks is None:\n        blocks = []\n    if isinstance(result, list):\n        for r in result:\n            if isinstance(r, list):\n                blocks.extend(r)\n            else:\n                blocks.append(r)\n    else:\n        assert isinstance(result, Block), type(result)\n        blocks.append(result)\n    return blocks",
    "docstring": "return a new extended blocks, given the result",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\internals\\blocks.py",
    "ast_data": "FunctionDef name:extend_blocks arg:result arg:blocks arguments arg arg If Compare Assign If Call For If Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "autoscale_None",
    "source_code": "def autoscale_None(self, A):\n    if A is None:\n        raise TypeError('You must first set_array for mappable')\n    self.norm.autoscale_None(A)",
    "docstring": "Autoscale the scalar limits on the norm instance using the current array, changing only limits that are None",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colorizer.py",
    "ast_data": "FunctionDef name:autoscale_None arg:self arg:A arguments arg arg If Compare Raise Call Call"
  },
  {
    "library": "django",
    "name": "keep_lazy",
    "source_code": "def keep_lazy(*resultclasses):\n    if not resultclasses:\n        raise TypeError('You must pass at least one argument to keep_lazy().')\n\n    def decorator(func):\n        lazy_func = lazy(func, *resultclasses)\n\n        @wraps(func)\n        def wrapper(*args, **kwargs):\n            if any((isinstance(arg, Promise) for arg in itertools.chain(args, kwargs.values()))):\n                return lazy_func(*args, **kwargs)\n            return func(*args, **kwargs)\n        return wrapper\n    return decorator",
    "docstring": "A decorator that allows a function to be called with one or more lazy arguments. If none of the args are lazy, the function is evaluated immediately, otherwise a __proxy__ is returned that will evaluate the function when needed.",
    "type": "function",
    "file_path": "django\\django\\utils\\functional.py",
    "ast_data": "FunctionDef name:keep_lazy arguments arg If Raise Call FunctionDef name:decorator arg:func arguments arg Assign Call FunctionDef name:wrapper arguments arg arg If Call Call Call Call Return return:yes Call Return return:yes Call Call Return return:yes Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "predict_proba",
    "source_code": "@available_if(_estimator_has('predict_proba'))\ndef predict_proba(self, X):\n    _check_is_fitted(self)\n    estimator = getattr(self, 'estimator_', self.estimator)\n    return estimator.predict_proba(X)",
    "docstring": "Predict class probabilities for using the fitted estimator. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vectors, where is the number of samples and is the number of features. Returns ------- probabilities : ndarray of shape (n_samples, n_classes) The class probabilities of the input samples.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_classification_threshold.py",
    "ast_data": "FunctionDef name:predict_proba arg:self arg:X arguments arg arg Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_padding_can_be_fused",
    "source_code": "def _padding_can_be_fused():\n    current_node = V.graph.current_node\n    if current_node is None:\n        return True\n    users = tuple(current_node.users)\n    if len(users) == 1 and users[0].target in (aten.mm.default, aten.addmm.default):\n        return False\n    return True",
    "docstring": "Conservatively check if padding can be fused with downstream op. 1. if the downstream op is a sum, then there is little benefit to do inplace padding 2. if the downstream op is a matmul, doing inplace padding can save membw.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\lowering.py",
    "ast_data": "FunctionDef name:_padding_can_be_fused arguments Assign If Compare Return return:yes Assign Call If BoolOp Compare Call Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_convert_variables_to_tensors",
    "source_code": "def _convert_variables_to_tensors(self):\n    components = self._type_spec._to_components(self)\n    tensor_components = variable_utils.convert_variables_to_tensors(components)\n    return self._type_spec._from_components(tensor_components)",
    "docstring": "Recursively converts ResourceVariables in the LinearOperator to Tensors. The usage of violates the contract of , since it is called on a different nested structure (one containing only s) than specifies (one that may contain s). Since 's method just passes the contents of the nested structure to to rebuild the operator, and any that may be instantiated with may also be instantiated with s, this usage is valid. Returns: tensor_operator: with all internal Variables converted to Tensors.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "FunctionDef name:_convert_variables_to_tensors arg:self arguments arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "Thunk",
    "source_code": "class Thunk(Generic[R]):\n    f: Optional[Callable[[], R]]\n    r: Optional[R]\n    __slots__ = ['f', 'r']\n\n    def __init__(self, f: Callable[[], R]):\n        self.f = f\n        self.r = None\n\n    def force(self) -> R:\n        if self.f is None:\n            return self.r\n        self.r = self.f()\n        self.f = None\n        return self.r",
    "docstring": "A simple lazy evaluation implementation that lets you delay execution of a function. It properly handles releasing the function once it is forced.",
    "type": "class",
    "file_path": "pytorch\\torch\\utils\\_thunk.py",
    "ast_data": "ClassDef name:Thunk Assign FunctionDef name:__init__ arg:self arg:f arguments arg arg Assign Assign FunctionDef name:force arg:self arguments arg If Compare Return return:yes Assign Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "geos",
    "source_code": "def geos(self, query):\n    from django.contrib.gis.geos import Point\n    return Point(self.lon_lat(query), srid=4326)",
    "docstring": "Return a GEOS Point object for the given query.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geoip2.py",
    "ast_data": "FunctionDef name:geos arg:self arg:query arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "arg_byref",
    "source_code": "def arg_byref(args, offset=-1):\n    return args[offset]._obj.value",
    "docstring": "Return the pointer argument's by-reference value.",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\gdal\\prototypes\\errcheck.py",
    "ast_data": "FunctionDef name:arg_byref arg:args arg:offset arguments arg arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "describe_categorical",
    "source_code": "@property\ndef describe_categorical(self):\n    if not self.dtype[0] == DtypeKind.CATEGORICAL:\n        raise TypeError('describe_categorical only works on a column with categorical dtype!')\n    return {'is_ordered': self._col.cat.ordered, 'is_dictionary': True, 'categories': PandasColumn(pd.Series(self._col.cat.categories))}",
    "docstring": "If the dtype is categorical, there are two options: - There are only values in the data buffer. - There is a separate non-categorical Column encoding for categorical values. Raises TypeError if the dtype is not categorical Content of returned dict: - \"is_ordered\" : bool, whether the ordering of dictionary indices is semantically meaningful. - \"is_dictionary\" : bool, whether a dictionary-style mapping of categorical values to other objects exists - \"categories\" : Column representing the (implicit) mapping of indices to category values (e.g. an array of cat1, cat2, ...). None if not a dictionary-style categorical.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\interchange\\column.py",
    "ast_data": "FunctionDef name:describe_categorical arg:self arguments arg If Compare Raise Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "asof_locs",
    "source_code": "def asof_locs(self, where: Index, mask: npt.NDArray[np.bool_]) -> npt.NDArray[np.intp]:\n    locs = self._values[mask].searchsorted(where._values, side='right')\n    locs = np.where(locs > 0, locs - 1, 0)\n    result = np.arange(len(self), dtype=np.intp)[mask].take(locs)\n    first_value = self._values[mask.argmax()]\n    result[(locs == 0) & (where._values < first_value)] = -1\n    return result",
    "docstring": "Return the locations (indices) of labels in the index. As in the :meth:, if the label (a particular entry in `pandas.Index.asof` to ignore certain values in the index during calculation. >>> mask[1] = False >>> idx.asof_locs(where, mask) array([-1, 0, 0])",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:asof_locs arg:self arg:where arg:mask arguments arg arg arg Assign Call Assign Call Compare Assign Call Call Call Assign Call Assign Compare Compare Return return:yes"
  },
  {
    "library": "django",
    "name": "save",
    "source_code": "def save(self, commit=True):\n    if self.errors:\n        raise ValueError(\"The %s could not be %s because the data didn't validate.\" % (self.instance._meta.object_name, 'created' if self.instance._state.adding else 'changed'))\n    if commit:\n        self.instance.save()\n        self._save_m2m()\n    else:\n        self.save_m2m = self._save_m2m\n    return self.instance",
    "docstring": "Save this form's self.instance object if commit=True. Otherwise, add a save_m2m() method to the form which can be called after the instance is saved manually at a later time. Return the model instance.",
    "type": "method",
    "file_path": "django\\django\\forms\\models.py",
    "ast_data": "FunctionDef name:save arg:self arg:commit arguments arg arg If Raise Call If Call Call Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "get_covariance",
    "source_code": "def get_covariance(self):\n    check_is_fitted(self)\n    cov = np.dot(self.components_.T, self.components_)\n    cov.flat[::len(cov) + 1] += self.noise_variance_\n    return cov",
    "docstring": "Compute data covariance with the FactorAnalysis model. `` Returns ------- cov : ndarray of shape (n_features, n_features) Estimated covariance of data.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_factor_analysis.py",
    "ast_data": "FunctionDef name:get_covariance arg:self arguments arg Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "register_state_dict_post_hook",
    "source_code": "def register_state_dict_post_hook(self, hook: Callable[['Optimizer', StateDict], Optional[StateDict]], prepend: bool=False) -> RemovableHandle:\n    handle = hooks.RemovableHandle(self._optimizer_state_dict_post_hooks)\n    self._optimizer_state_dict_post_hooks[handle.id] = hook\n    if prepend:\n        self._optimizer_state_dict_post_hooks.move_to_end(handle.id, last=False)\n    return handle",
    "docstring": "Register a state dict post-hook which will be called after :meth: is called. It should have the following signature:: hook(optimizer, state_dict) -> state_dict or None The hook will be called with arguments `torch.utils.hooks.RemoveableHandle`",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\optimizer.py",
    "ast_data": "FunctionDef name:register_state_dict_post_hook arg:self arg:hook arg:prepend arguments arg arg arg Assign Call Assign If Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "OriginInfo",
    "source_code": "class OriginInfo(collections.namedtuple('OriginInfo', ('loc', 'function_name', 'source_code_line', 'comment'))):\n\n    def as_frame(self):\n        return (self.loc.filename, self.loc.lineno, self.function_name, self.source_code_line)\n\n    def __repr__(self):\n        if self.loc.filename:\n            return '{}:{}:{}'.format(os.path.split(self.loc.filename)[1], self.loc.lineno, self.loc.col_offset)\n        return '<no file>:{}:{}'.format(self.loc.lineno, self.loc.col_offset)",
    "docstring": "Container for information about the source code before conversion. Attributes: loc: Location function_name: Optional[Text] source_code_line: Text comment: Optional[Text]",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\origin_info.py",
    "ast_data": "ClassDef name:OriginInfo Call FunctionDef name:as_frame arg:self arguments arg Return return:yes FunctionDef name:__repr__ arg:self arguments arg If Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_positions",
    "source_code": "def set_positions(self, positions):\n    if positions is None:\n        positions = []\n    if np.ndim(positions) != 1:\n        raise ValueError('positions must be one-dimensional')\n    lineoffset = self.get_lineoffset()\n    linelength = self.get_linelength()\n    pos_idx = 0 if self.is_horizontal() else 1\n    segments = np.empty((len(positions), 2, 2))\n    segments[:, :, pos_idx] = np.sort(positions)[:, None]\n    segments[:, 0, 1 - pos_idx] = lineoffset + linelength / 2\n    segments[:, 1, 1 - pos_idx] = lineoffset - linelength / 2\n    self.set_segments(segments)",
    "docstring": "Set the positions of the events.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:set_positions arg:self arg:positions arguments arg arg If Compare Assign If Compare Call Raise Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Assign Assign Call"
  },
  {
    "library": "kornia",
    "name": "Points2D",
    "source_code": "class Points2D:\n\n    def __init__(self, points_2d: Tensor, camera_ids: List[int]) -> None:\n        self._points_2d = points_2d\n        self._camera_ids = camera_ids\n\n    @property\n    def points_2d(self) -> Tensor:\n        return self._points_2d\n\n    @property\n    def camera_ids(self) -> List[int]:\n        return self._camera_ids",
    "docstring": "A class to hold ray 2d pixel coordinates and a camera id for each. Args: points_2d: tensor with ray pixel coordinates (the coordinates in the image plane that correspond to the ray):math: camera_ids: list of camera ids for each pixel coordinates: List[int]",
    "type": "class",
    "file_path": "kornia\\kornia\\nerf\\samplers.py",
    "ast_data": "ClassDef name:Points2D FunctionDef name:__init__ arg:self arg:points_2d arg:camera_ids arguments arg arg arg Assign Assign FunctionDef name:points_2d arg:self arguments arg Return return:yes FunctionDef name:camera_ids arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_rescale",
    "source_code": "def _rescale(self):\n    for widget in self.winfo_children():\n        if isinstance(widget, (tk.Button, tk.Checkbutton)):\n            if hasattr(widget, '_image_file'):\n                NavigationToolbar2Tk._set_image_for_button(self, widget)\n            else:\n                pass\n        elif isinstance(widget, tk.Frame):\n            widget.configure(height='18p')\n            widget.pack_configure(padx='3p')\n        elif isinstance(widget, tk.Label):\n            pass\n        else:\n            _log.warning('Unknown child class %s', widget.winfo_class)\n    self._label_font.configure(size=10)",
    "docstring": "Scale all children of the toolbar to current DPI setting. Before this is called, the Tk scaling setting will have been updated to match the new DPI. Tk widgets do not update for changes to scaling, but all measurements made after the change will match the new scaling. Thus this function re-applies all the same sizes in points, which Tk will scale correctly to pixels.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\_backend_tk.py",
    "ast_data": "FunctionDef name:_rescale arg:self arguments arg For Call If Call If Call Call If Call Call Call If Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "ConstrainedLayoutEngine",
    "source_code": "class ConstrainedLayoutEngine(LayoutEngine):\n    _adjust_compatible = False\n    _colorbar_gridspec = False\n\n    def __init__(self, *, h_pad=None, w_pad=None, hspace=None, wspace=None, rect=(0, 0, 1, 1), compress=False, **kwargs):\n        super().__init__(**kwargs)\n        self.set(w_pad=mpl.rcParams['figure.constrained_layout.w_pad'], h_pad=mpl.rcParams['figure.constrained_layout.h_pad'], wspace=mpl.rcParams['figure.constrained_layout.wspace'], hspace=mpl.rcParams['figure.constrained_layout.hspace'], rect=(0, 0, 1, 1))\n        self.set(w_pad=w_pad, h_pad=h_pad, wspace=wspace, hspace=hspace, rect=rect)\n        self._compress = compress\n\n    def execute(self, fig):\n        width, height = fig.get_size_inches()\n        w_pad = self._params['w_pad'] / width\n        h_pad = self._params['h_pad'] / height\n        return do_constrained_layout(fig, w_pad=w_pad, h_pad=h_pad, wspace=self._params['wspace'], hspace=self._params['hspace'], rect=self._params['rect'], compress=self._compress)\n\n    def set(self, *, h_pad=None, w_pad=None, hspace=None, wspace=None, rect=None):\n        for td in self.set.__kwdefaults__:\n            if locals()[td] is not None:\n                self._params[td] = locals()[td]",
    "docstring": "Implements the `constrainedlayout_guide` for details.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\layout_engine.py",
    "ast_data": "ClassDef name:ConstrainedLayoutEngine Assign Assign FunctionDef name:__init__ arg:self arguments arg arg arg arg arg arg arg arg Call Call Call Call Assign FunctionDef name:execute arg:self arg:fig arguments arg arg Assign Call Assign Assign Return return:yes Call FunctionDef name:set arg:self arguments arg arg arg arg arg arg For If Compare Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_distribution_strategy_scope",
    "source_code": "@contextlib.contextmanager\ndef _distribution_strategy_scope(self):\n    if self._distribution_strategy and (not distribute_lib.has_strategy()):\n        with self._distribution_strategy.scope():\n            yield self._distribution_strategy.scope()\n    else:\n        yield",
    "docstring": "Returns the this optimizer was created under.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py",
    "ast_data": "FunctionDef name:_distribution_strategy_scope arg:self arguments arg If BoolOp Call With Call Call"
  },
  {
    "library": "tensorflow",
    "name": "APIAnalysisSpec",
    "source_code": "class APIAnalysisSpec:\n    pass",
    "docstring": "This class defines how s should be generated. It specifies how to map imports and symbols to s. This class must provide the following fields: * : maps function names to s * : maps imports represented as (full module name, alias) tuples to s notifications) For an example, see .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\ast_edits.py",
    "ast_data": "ClassDef name:APIAnalysisSpec"
  },
  {
    "library": "pandas",
    "name": "_trim_zeros_single_float",
    "source_code": "def _trim_zeros_single_float(str_float: str) -> str:\n    str_float = str_float.rstrip('0')\n    if str_float.endswith('.'):\n        str_float += '0'\n    return str_float",
    "docstring": "Trims trailing zeros after a decimal point, leaving just one if necessary.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\formats\\format.py",
    "ast_data": "FunctionDef name:_trim_zeros_single_float arg:str_float arguments arg Assign Call If Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "project_points_z1",
    "source_code": "def project_points_z1(points_in_camera: Tensor) -> Tensor:\n    KORNIA_CHECK_SHAPE(points_in_camera, ['*', '3'])\n    return points_in_camera[..., :2] / points_in_camera[..., 2:3]",
    "docstring": "Project one or more points from the camera frame into the canonical z=1 plane through perspective division. .. math:: \\begin{bmatrix} u \\\\ v \\\\ w \\end{bmatrix} = \\begin{bmatrix} x \\\\ y \\\\ z \\end{bmatrix} / z .. note:: This function has a precondition that the points are in front of the camera, i.e. z > 0. If this is not the case, the points will be projected to the canonical plane, but the resulting points will be behind the camera and causing numerical issues for z == 0. Args: points_in_camera: Tensor representing the points to project with shape (..., 3). Returns: Tensor representing the projected points with shape (..., 2). Example: >>> points = torch.tensor([1., 2., 3.]) >>> project_points_z1(points) tensor([0.3333, 0.6667])",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\camera\\projection_z1.py",
    "ast_data": "FunctionDef name:project_points_z1 arg:points_in_camera arguments arg Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_gs_decorrelation",
    "source_code": "def _gs_decorrelation(w, W, j):\n    w -= np.linalg.multi_dot([w, W[:j].T, W[:j]])\n    return w",
    "docstring": "Orthonormalize w wrt the first j rows of W. Parameters ---------- w : ndarray of shape (n,) Array to be orthogonalized W : ndarray of shape (p, n) Null space definition j : int < p The no of (from the first) rows of Null space W wrt which w is orthogonalized. Notes ----- Assumes that W is orthogonal w changed in place",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_fastica.py",
    "ast_data": "FunctionDef name:_gs_decorrelation arg:w arg:W arg:j arguments arg arg arg Call Return return:yes"
  },
  {
    "library": "django",
    "name": "only",
    "source_code": "def only(self, *fields):\n    self._not_support_combined_queries('only')\n    if self._fields is not None:\n        raise TypeError('Cannot call only() after .values() or .values_list()')\n    if fields == (None,):\n        raise TypeError('Cannot pass None as an argument to only().')\n    for field in fields:\n        field = field.split(LOOKUP_SEP, 1)[0]\n        if field in self.query._filtered_relations:\n            raise ValueError('only() is not supported with FilteredRelation.')\n    clone = self._chain()\n    clone.query.add_immediate_loading(fields)\n    return clone",
    "docstring": "Essentially, the opposite of defer(). Only the fields passed into this method and that are not already specified as deferred are loaded immediately when the queryset is evaluated.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:only arg:self arguments arg arg Call If Compare Raise Call If Compare Raise Call For Assign Call If Compare Raise Call Assign Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "AppendableMultiSeriesTable",
    "source_code": "class AppendableMultiSeriesTable(AppendableSeriesTable):\n    pandas_kind = 'series_table'\n    table_type = 'appendable_multiseries'\n\n    def write(self, obj, **kwargs) -> None:\n        name = obj.name or 'values'\n        newobj, self.levels = self.validate_multiindex(obj)\n        assert isinstance(self.levels, list)\n        cols = list(self.levels)\n        cols.append(name)\n        newobj.columns = Index(cols)\n        super().write(obj=newobj, **kwargs)",
    "docstring": "support the new appendable table formats",
    "type": "class",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "ClassDef name:AppendableMultiSeriesTable Assign Assign FunctionDef name:write arg:self arg:obj arguments arg arg arg Assign BoolOp Assign Call Call Assign Call Call Assign Call Call Call"
  },
  {
    "library": "numpy",
    "name": "as_apply",
    "source_code": "def as_apply(func, *args, **kwargs):\n    return Expr(Op.APPLY, (func, tuple(map(as_expr, args)), {k: as_expr(v) for k, v in kwargs.items()}))",
    "docstring": "Return object as APPLY expression (function call, constructor, etc.)",
    "type": "function",
    "file_path": "numpy\\numpy\\f2py\\symbolic.py",
    "ast_data": "FunctionDef name:as_apply arg:func arguments arg arg arg Return return:yes Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "transpose_input",
    "source_code": "def transpose_input(from_cudnn):\n    order = 'F' if from_cudnn else 'C'\n\n    def transform(kernel):\n        return kernel.T.reshape(kernel.shape, order=order)\n    return transform",
    "docstring": "Makes a function that transforms input kernels from/to CuDNN format. It keeps the shape, but changes between the layout (Fortran/C). Eg.: It can be passed to . Args: from_cudnn: if source weights are in CuDNN format, if they're in plain Keras format. Returns: Function that converts input kernel to the other format.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\hdf5_format.py",
    "ast_data": "FunctionDef name:transpose_input arg:from_cudnn arguments arg Assign FunctionDef name:transform arg:kernel arguments arg Return return:yes Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n    validate_data(self, X, accept_sparse='csr')\n    return self",
    "docstring": "Only validates estimator's parameters. This method allows to: (i) validate the estimator's parameters and (ii) be consistent with the scikit-learn transformer API. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data to estimate the normalization parameters. y : Ignored Not used, present here for API consistency by convention. Returns ------- self : object Fitted transformer.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "getquoted",
    "source_code": "def getquoted(self):\n    if self.is_geometry:\n        return b'%s(%s)' % (b'ST_GeogFromWKB' if self.geography else b'ST_GeomFromEWKB', sql.quote(self.ewkb).encode())\n    else:\n        return b\"'%s'::raster\" % self.ewkb.hex().encode()",
    "docstring": "Return a properly quoted string for use in PostgreSQL/PostGIS.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\postgis\\adapter.py",
    "ast_data": "FunctionDef name:getquoted arg:self arguments arg If Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "prepare_softmax_replacement",
    "source_code": "def prepare_softmax_replacement(x, dim):\n    from torch._inductor.inductor_prims import prepare_softmax_online\n    xmax, xsum = prepare_softmax_online(x, dim)\n    xsub = x - xmax\n    return (xmax, xsum, xsub, xsub.exp())",
    "docstring": "Return xsub since otherwise log-softmax can not be matched due to a use of this intermediate node. Same reason to return xsub.exp() for softmax.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\post_grad.py",
    "ast_data": "FunctionDef name:prepare_softmax_replacement arg:x arg:dim arguments arg arg Assign Call Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "mark_inset",
    "source_code": "@_docstring.interpd\ndef mark_inset(parent_axes, inset_axes, loc1, loc2, **kwargs):\n    rect = _TransformedBboxWithCallback(inset_axes.viewLim, parent_axes.transData, callback=parent_axes._unstale_viewLim)\n    kwargs.setdefault('fill', bool({'fc', 'facecolor', 'color'}.intersection(kwargs)))\n    pp = BboxPatch(rect, **kwargs)\n    parent_axes.add_patch(pp)\n    p1 = BboxConnector(inset_axes.bbox, rect, loc1=loc1, **kwargs)\n    inset_axes.add_patch(p1)\n    p1.set_clip_on(False)\n    p2 = BboxConnector(inset_axes.bbox, rect, loc1=loc2, **kwargs)\n    inset_axes.add_patch(p2)\n    p2.set_clip_on(False)\n    return (pp, p1, p2)",
    "docstring": "Draw a box to mark the location of an area represented by an inset axes. This function draws a box in *parent_axes* at the bounding box of *inset_axes*, and shows a connection with the inset axes by drawing lines at the corners, giving a \"zoomed in\" effect. Parameters ---------- parent_axes : Axes which contains the area of the inset axes. inset_axes : The inset axes. loc1, loc2 : {1, 2, 3, 4} Corners to use for connecting the inset axes and the area in the parent axes. **kwargs Patch properties for the lines and box drawn: %(Patch:kwdoc)s Returns ------- pp : The patch drawn to represent the area of the inset axes. p1, p2 : The patches connecting two corners of the inset axes and its area.",
    "type": "function",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\inset_locator.py",
    "ast_data": "FunctionDef name:mark_inset arg:parent_axes arg:inset_axes arg:loc1 arg:loc2 arguments arg arg arg arg arg Assign Call Call Call Call Assign Call Call Assign Call Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "serialize_hoo_outputs",
    "source_code": "def serialize_hoo_outputs(self, node: torch.fx.Node) -> list[Argument]:\n    meta_val = node.meta['val']\n    if isinstance(meta_val, tuple):\n        outputs = []\n        for i, element_meta_val in enumerate(meta_val):\n            user_node = self._output_node_at_index(node, i)\n            if isinstance(element_meta_val, list):\n                assert user_node is not None\n                tensors = []\n                for j, m in enumerate(element_meta_val):\n                    if not isinstance(m, torch.Tensor):\n                        raise SerializeError(f'Serialize list output with type {type(m)} nyi')\n                    name = self._output_node_name_at_index(user_node, j)\n                    tensors.append(self.serialize_tensor_output(name, m))\n                outputs.append(Argument.create(as_tensors=tensors))\n            else:\n                name = user_node.name if user_node is not None else f'{node.name}_unused_{i}'\n                outputs.append(self.serialize_output(name, element_meta_val))\n        return outputs\n    else:\n        return [self.serialize_output(node.name, meta_val)]",
    "docstring": "For serializing HOO outputs since HOOs do not have a schema.",
    "type": "method",
    "file_path": "pytorch\\torch\\_export\\serde\\serialize.py",
    "ast_data": "FunctionDef name:serialize_hoo_outputs arg:self arg:node arguments arg arg Assign If Call Assign For Call Assign Call If Call Compare Assign For Call If Call Raise Call Call Assign Call Call Call Call Call Assign Compare Call Call Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_fire_reducer_autograd_hook",
    "source_code": "def _fire_reducer_autograd_hook(self, idx, *unused):\n    self.reducer._autograd_hook(idx)",
    "docstring": "Fire the reducer's autograd hook to allreduce params in a Reducer bucket. Note that this is only used during mixed precision training as the Reducer's hooks installed during construction time would not be called as we're working in the low precision parameter setting.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\parallel\\distributed.py",
    "ast_data": "FunctionDef name:_fire_reducer_autograd_hook arg:self arg:idx arguments arg arg arg Call"
  },
  {
    "library": "scipy",
    "name": "_expm_multiply_interval_core_2",
    "source_code": "def _expm_multiply_interval_core_2(A, X, h, mu, m_star, s, q, tol):\n    d = q // s\n    j = q // d\n    r = q - d * j\n    input_shape = X.shape[1:]\n    K_shape = (m_star + 1,) + input_shape\n    K = np.empty(K_shape, dtype=X.dtype)\n    for i in range(j + 1):\n        Z = X[i * d]\n        K[0] = Z\n        high_p = 0\n        if i < j:\n            effective_d = d\n        else:\n            effective_d = r\n        for k in range(1, effective_d + 1):\n            F = K[0]\n            c1 = _exact_inf_norm(F)\n            for p in range(1, m_star + 1):\n                if p == high_p + 1:\n                    K[p] = h * A.dot(K[p - 1]) / float(p)\n                    high_p = p\n                coeff = float(pow(k, p))\n                F = F + coeff * K[p]\n                inf_norm_K_p_1 = _exact_inf_norm(K[p])\n                c2 = coeff * inf_norm_K_p_1\n                if c1 + c2 <= tol * _exact_inf_norm(F):\n                    break\n                c1 = c2\n            X[k + i * d] = np.exp(k * h * mu) * F\n    return (X, 2)",
    "docstring": "A helper function, for the case q > s and q % s > 0.",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_expm_multiply.py",
    "ast_data": "FunctionDef name:_expm_multiply_interval_core_2 arg:A arg:X arg:h arg:mu arg:m_star arg:s arg:q arg:tol arguments arg arg arg arg arg arg arg arg Assign Assign Assign Assign Assign Assign Call For Call Assign Assign Assign If Compare Assign Assign For Call Assign Assign Call For Call If Compare Assign Call Call Assign Assign Call Call Assign Assign Call Assign If Compare Call Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "on_value_event",
    "source_code": "def on_value_event(self, event):\n    raise NotImplementedError('on_value_event() is not implemented in the base servicer class')",
    "docstring": "Callback for Event proto received through the gRPC stream. This Event proto carries a Tensor in its summary.value[0] field. Args: event: The Event proto from the stream to be processed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\grpc_debug_server.py",
    "ast_data": "FunctionDef name:on_value_event arg:self arg:event arguments arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "get_growth_interval",
    "source_code": "def get_growth_interval(self) -> int:\n    return self._growth_interval",
    "docstring": "Return a Python int containing the growth interval.",
    "type": "method",
    "file_path": "pytorch\\torch\\amp\\grad_scaler.py",
    "ast_data": "FunctionDef name:get_growth_interval arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "euclidean_distance",
    "source_code": "def euclidean_distance(x: Tensor, y: Tensor, keepdim: bool=False, eps: float=1e-06) -> Tensor:\n    KORNIA_CHECK_SHAPE(x, ['*', 'N'])\n    KORNIA_CHECK_SHAPE(y, ['*', 'N'])\n    return (x - y + eps).pow(2).sum(-1, keepdim).sqrt()",
    "docstring": "Compute the Euclidean distance between two set of n-dimensional points. More: Args: x: first set of points of shape :math:. y: second set of points of shape :math:. keepdim: whether to keep the dimension after reduction. eps: small value to have numerical stability.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\linalg.py",
    "ast_data": "FunctionDef name:euclidean_distance arg:x arg:y arg:keepdim arg:eps arguments arg arg arg arg Call Call Return return:yes Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_find_smallest_angle",
    "source_code": "def _find_smallest_angle(query, vectors):\n    abs_cosine = np.abs(query.dot(vectors))\n    index = np.argmax(abs_cosine)\n    return index",
    "docstring": "Find the column of vectors that is most aligned with the query. Both query and the columns of vectors must have their l2 norm equal to 1. Parameters ---------- query : ndarray of shape (n_samples,) Normalized query vector. vectors : ndarray of shape (n_samples, n_features) Vectors to which we compare query, as columns. Must be normalized.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_ridge.py",
    "ast_data": "FunctionDef name:_find_smallest_angle arg:query arg:vectors arguments arg arg Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "unwrap",
    "source_code": "@doc_controls.do_not_doc_inheritable\n@deprecated(None, 'use `experimental_local_results` instead.')\ndef unwrap(self, value):\n    return self._extended._local_results(value)",
    "docstring": "Returns the list of all local per-replica values contained in . DEPRECATED: Please use instead. Note: This only returns values on the workers initiated by this client. When using a like , each worker will be its own client, and this function will only return values computed on that worker. Args: value: A value returned by , , or a variable created in . Returns: A tuple of values contained in . If represents a single value, this returns",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:unwrap arg:self arg:value arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "reset",
    "source_code": "@abc.abstractmethod\ndef reset(self, checkpoint_id: Union[str, os.PathLike, None]=None) -> None:\n    ...",
    "docstring": "Calls to indicates a brand new checkpoint read is going to happen. A checkpoint_id may be present if users set the checkpoint_id for this checkpoint read. The meaning of the checkpiont_id is storage-dependent. It can be a path to a folder/file or a key for a key-value storage. Args: checkpoint_id (Union[str, os.PathLike, None]): The ID of this checkpoint instance. The meaning of the checkpoint_id depends on the storage. It can be a path to a folder or to a file. It can also be a key if the storage is more like a key-value store. (Default: ``)",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\storage.py",
    "ast_data": "FunctionDef name:reset arg:self arg:checkpoint_id arguments arg arg"
  },
  {
    "library": "numpy",
    "name": "join",
    "source_code": "def join(self, seq):\n    return join(self, seq)",
    "docstring": "Return a string which is the concatenation of the strings in the sequence . See Also -------- char.join",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:join arg:self arg:seq arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "ConvBnReLU1d",
    "source_code": "class ConvBnReLU1d(ConvBn1d):\n    _FLOAT_MODULE: ClassVar[type[nn.Module]] = nni.ConvBnReLU1d\n    _FLOAT_CONV_MODULE: ClassVar[type[nn.Conv1d]] = nn.Conv1d\n    _FLOAT_BN_MODULE: ClassVar[type[nn.BatchNorm1d]] = nn.BatchNorm1d\n    _FLOAT_RELU_MODULE: ClassVar[Optional[type[nn.Module]]] = nn.ReLU\n    _FUSED_FLOAT_MODULE: ClassVar[Optional[type[nn.Module]]] = nni.ConvReLU1d\n\n    def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=None, padding_mode='zeros', eps=1e-05, momentum=0.1, freeze_bn=False, qconfig=None):\n        super().__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, padding_mode, eps, momentum, freeze_bn, qconfig)\n\n    def forward(self, input):\n        return F.relu(self._forward(input))\n\n    @classmethod\n    def from_float(cls, mod, use_precomputed_fake_quant=False):\n        return super().from_float(mod, use_precomputed_fake_quant)",
    "docstring": "A ConvBnReLU1d module is a module fused from Conv1d, BatchNorm1d and ReLU, attached with FakeQuantize modules for weight, used in quantization aware training. We combined the interface of :class: and :class: and :class:. Similar to , with FakeQuantize modules initialized to default. Attributes: weight_fake_quant: fake quant module for weight",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\nn\\intrinsic\\qat\\modules\\conv_fused.py",
    "ast_data": "ClassDef name:ConvBnReLU1d FunctionDef name:__init__ arg:self arg:in_channels arg:out_channels arg:kernel_size arg:stride arg:padding arg:dilation arg:groups arg:bias arg:padding_mode arg:eps arg:momentum arg:freeze_bn arg:qconfig arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg Call Call FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call Call FunctionDef name:from_float arg:cls arg:mod arg:use_precomputed_fake_quant arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "bode",
    "source_code": "def bode(self, w=None, n=100):\n    return bode(self, w=w, n=n)",
    "docstring": "Calculate Bode magnitude and phase data of a continuous-time system. Returns a 3-tuple containing arrays of frequencies [rad/s], magnitude [dB] and phase [deg]. See for details. Examples -------- >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> sys = signal.TransferFunction([1], [1, 1]) >>> w, mag, phase = sys.bode() >>> plt.figure() >>> plt.semilogx(w, mag) # Bode magnitude plot >>> plt.figure() >>> plt.semilogx(w, phase) # Bode phase plot >>> plt.show()",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:bode arg:self arg:w arg:n arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_fill_empty_info",
    "source_code": "def _fill_empty_info(self) -> None:\n    self.add_object_type_line()\n    self.add_index_range_line()\n    self._lines.append(f'Empty {type(self.data).__name__}\\n')",
    "docstring": "Add lines to the info table, pertaining to empty dataframe.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:_fill_empty_info arg:self arguments arg Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_load_boolean_flag",
    "source_code": "def _load_boolean_flag(name: str, *, this_will: str, deprecated: bool=False, default: bool=False) -> bool:\n    undefined = os.getenv(name) is None\n    state = os.getenv(name) == '1'\n    if state:\n        if deprecated:\n            logger.error('Experimental flag %s is deprecated. Please remove it from your environment.', name)\n        else:\n            logger.warning('Experimental flag %s is enabled. This will %s.', name, this_will)\n    if undefined:\n        state = default\n    return state",
    "docstring": "Load a boolean flag from environment variable. Args: name: The name of the environment variable. this_will: A string that describes what this flag will do. deprecated: Whether this flag is deprecated. default: The default value if envvar not defined.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_flags.py",
    "ast_data": "FunctionDef name:_load_boolean_flag arg:name arguments arg arg arg arg Assign Compare Call Assign Compare Call If If Call Call If Assign Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "format_exc",
    "source_code": "def format_exc(exc=None):\n    try:\n        if exc is None:\n            exc = _exc_info()\n        if exc == (None, None, None):\n            return ''\n        import traceback\n        return ''.join(traceback.format_exception(*exc))\n    finally:\n        del exc",
    "docstring": "Return exc (or sys.exc_info if None), formatted.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\_cperror.py",
    "ast_data": "FunctionDef name:format_exc arg:exc arguments arg Try If Compare Assign Call If Compare Return return:yes Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_to_string",
    "source_code": "def _to_string(s):\n    if isinstance(s, _six.binary_type):\n        return s.decode('utf-8')\n    return s",
    "docstring": "Decode s if it is a sequence of bytes.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\tensorrt\\trt_convert.py",
    "ast_data": "FunctionDef name:_to_string arg:s arguments arg If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "experimental_from_proto",
    "source_code": "@classmethod\ndef experimental_from_proto(cls, proto: struct_pb2.BoundedTensorSpecProto) -> 'BoundedTensorSpec':\n    return BoundedTensorSpec(shape=tensor_shape.TensorShape.experimental_from_proto(proto.shape), dtype=proto.dtype, minimum=tensor_util.MakeNdarray(proto.minimum), maximum=tensor_util.MakeNdarray(proto.maximum), name=proto.name if proto.name else None)",
    "docstring": "Returns a BoundedTensorSpec instance based on the serialized proto.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor.py",
    "ast_data": "FunctionDef name:experimental_from_proto arg:cls arg:proto arguments arg arg Return return:yes Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "AutotuneArgs",
    "source_code": "@dataclasses.dataclass\nclass AutotuneArgs:\n    triton: BenchmarkTensors\n    extern: BenchmarkTensors\n    expected: Optional[torch.Tensor] = None\n\n    def get_benchmark_tensors(self, extern=False) -> BenchmarkTensors:\n        bench_tensors = self.extern if extern else self.triton\n        return bench_tensors\n\n    @classmethod\n    def from_choice_args(cls, example_inputs: list[torch.Tensor], example_inputs_extern: list[torch.Tensor], out: torch.Tensor, out_extern: torch.Tensor, expected: Optional[torch.Tensor]=None) -> Self:\n        return cls(triton=BenchmarkTensors(example_inputs, out), extern=BenchmarkTensors(example_inputs_extern, out_extern), expected=expected)\n\n    def verify(self, **kwargs):\n        torch.testing.assert_close(self.extern.output_tensor, self.expected, **kwargs)",
    "docstring": "During autotuning, we need to pass the same inputs to all choices. Note: Since we typically have a mix of external choices and triton choices, we create two lists of inputs for the same underlying buffers: - External inputs (for aten kernels): Include offset for sliced tensors - Triton inputs: Use base pointer for sliced tensors, without offset",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\select_algorithm.py",
    "ast_data": "ClassDef name:AutotuneArgs FunctionDef name:get_benchmark_tensors arg:self arg:extern arguments arg arg Assign Return return:yes FunctionDef name:from_choice_args arg:cls arg:example_inputs arg:example_inputs_extern arg:out arg:out_extern arg:expected arguments arg arg arg arg arg arg Return return:yes Call Call Call FunctionDef name:verify arg:self arguments arg arg Call"
  },
  {
    "library": "pandas",
    "name": "_transform_doc",
    "source_code": "def _transform_doc(self) -> etree._XSLTResultTree:\n    from lxml.etree import XSLT\n    transformer = XSLT(self.xsl_doc)\n    new_doc = transformer(self.xml_doc)\n    return new_doc",
    "docstring": "Transform original tree using stylesheet. This method will transform original xml using XSLT script into am ideally flatter xml document for easier parsing and migration to Data Frame.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\xml.py",
    "ast_data": "FunctionDef name:_transform_doc arg:self arguments arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_benchmark_tensors",
    "source_code": "def get_benchmark_tensors(self, extern=False) -> BenchmarkTensors:\n    bench_tensors = self.extern if extern else self.triton\n    return bench_tensors",
    "docstring": "Returns the inputs and output tensors for a given choice.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\select_algorithm.py",
    "ast_data": "FunctionDef name:get_benchmark_tensors arg:self arg:extern arguments arg arg Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "hermpow",
    "source_code": "def hermpow(c, pow, maxpower=16):\n    return pu._pow(hermmul, c, pow, maxpower)",
    "docstring": "Raise a Hermite series to a power. Returns the Hermite series raised to the power . The argument is a sequence of coefficients ordered from low to high. i.e., [1,2,3] is the series `` Parameters ---------- c : array_like 1-D array of Hermite series coefficients ordered from low to high. pow : integer Power to which the series will be raised maxpower : integer, optional Maximum power allowed. This is mainly to limit growth of the series to unmanageable size. Default is 16 Returns ------- coef : ndarray Hermite series of power. See Also -------- hermadd, hermsub, hermmulx, hermmul, hermdiv Examples -------- >>> from numpy.polynomial.hermite import hermpow >>> hermpow([1, 2, 3], 2) array([81., 52., 82., 12., 9.])",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\hermite.py",
    "ast_data": "FunctionDef name:hermpow arg:c arg:pow arg:maxpower arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "restore",
    "source_code": "def restore(self):\n    pass",
    "docstring": "Restore the graphics context from the stack - needed only for backends that save graphics contexts on a stack.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:restore arg:self arguments arg"
  },
  {
    "library": "pandas",
    "name": "dtypes",
    "source_code": "@property\n@abstractmethod\ndef dtypes(self) -> Iterable[Dtype]:\n    pass",
    "docstring": "Dtypes. Returns ------- dtypes : sequence Dtype of each of the DataFrame's columns (or one series column).",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:dtypes arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "close",
    "source_code": "def close(self):\n    with ops.control_dependencies([self.flush()]):\n        with ops.device('cpu:0'):\n            return gen_summary_ops.close_summary_writer(self._resource)",
    "docstring": "See .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "FunctionDef name:close arg:self arguments arg With Call Call With Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "should_partition",
    "source_code": "def should_partition(self, node: BaseSchedulerNode) -> bool:\n    if isinstance(node, FusedSchedulerNode):\n        return any((self.should_partition(snode) for snode in node.snodes))\n    if not node.is_gpu():\n        return True\n    if node.node is None:\n        return True\n    if isinstance(node.node, ir.DeviceCopy):\n        return True\n    if isinstance(node.node, ir.Conditional):\n        return True\n    if getattr(node.node, 'unbacked_bindings', None):\n        return True\n    if is_cudagraph_unsafe_op(node.node):\n        return True\n    return False",
    "docstring": "Return True if we should partition the inductor graph on this node",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:should_partition arg:self arg:node arguments arg arg If Call Return return:yes Call Call If Call Return return:yes If Compare Return return:yes If Call Return return:yes If Call Return return:yes If Call Return return:yes If Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, max_shard_bytes, max_shards=None, bytes_per_string=16):\n    if max_shard_bytes < 1:\n        raise ValueError(f'Argument `max_shard_bytes` must be positive. Received {max_shard_bytes}')\n    if max_shards and max_shards < 1:\n        raise ValueError(f'Argument `max_shards` must be positive. Received {max_shards}')\n    if bytes_per_string < 1:\n        raise ValueError(f'Argument `bytes_per_string` must be positive. Received: {bytes_per_string}')\n    self._max_shard_bytes = max_shard_bytes\n    self._max_shards = max_shards\n    self._bytes_per_string = bytes_per_string",
    "docstring": "Creates a new . Args: max_shard_bytes: The maximum size any given shard is allowed to be. max_shards: The maximum number of shards in created taking precedence over . bytes_per_string: If the partition value is of type string, this provides an estimate of how large each string is.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\sharded_variable.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:max_shard_bytes arg:max_shards arg:bytes_per_string arguments arg arg arg arg If Compare Raise Call If BoolOp Compare Raise Call If Compare Raise Call Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "export_constant",
    "source_code": "def export_constant(self, module_name: str, name: str) -> None:\n    module = sys.modules[module_name]\n    api_constants_attr = API_ATTRS[self._api_name].constants\n    api_constants_attr_v1 = API_ATTRS_V1[self._api_name].constants\n    if not hasattr(module, api_constants_attr):\n        setattr(module, api_constants_attr, [])\n    getattr(module, api_constants_attr).append((self._names, name))\n    if not hasattr(module, api_constants_attr_v1):\n        setattr(module, api_constants_attr_v1, [])\n    getattr(module, api_constants_attr_v1).append((self._names_v1, name))",
    "docstring": "Store export information for constants/string literals. Export information is stored in the module where constants/string literals are defined. e.g. Args: module_name: (string) Name of the module to store constant at. name: (string) Current constant name.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\util\\tf_export.py",
    "ast_data": "FunctionDef name:export_constant arg:self arg:module_name arg:name arguments arg arg arg Assign Assign Assign If Call Call Call Call If Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_dimensions_compatible",
    "source_code": "@staticmethod\ndef _dimensions_compatible(nrows, nvals, uniform_row_length):\n    nrows = tensor_shape.dimension_value(nrows[0])\n    nvals = tensor_shape.dimension_value(nvals[0])\n    ncols = tensor_shape.dimension_value(uniform_row_length[0])\n    if nrows == 0 and nvals not in (0, None):\n        return False\n    if ncols == 0 and nvals not in (0, None):\n        return False\n    if ncols is not None and nvals is not None:\n        if ncols != 0 and nvals % ncols != 0:\n            return False\n        if nrows is not None and nvals != ncols * nrows:\n            return False\n    return True",
    "docstring": "Returns true if the given dimensions are compatible.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py",
    "ast_data": "FunctionDef name:_dimensions_compatible arg:nrows arg:nvals arg:uniform_row_length arguments arg arg arg Assign Call Assign Call Assign Call If BoolOp Compare Compare Return return:yes If BoolOp Compare Compare Return return:yes If BoolOp Compare Compare If BoolOp Compare Compare Return return:yes If BoolOp Compare Compare Return return:yes Return return:yes"
  },
  {
    "library": "scipy",
    "name": "initialize",
    "source_code": "def initialize(self, n, approx_type):\n    self.first_iteration = True\n    self.n = n\n    self.approx_type = approx_type\n    if approx_type not in ('hess', 'inv_hess'):\n        raise ValueError(\"`approx_type` must be 'hess' or 'inv_hess'.\")\n    if self.approx_type == 'hess':\n        self.B = np.eye(n, dtype=float)\n    else:\n        self.H = np.eye(n, dtype=float)",
    "docstring": "Initialize internal matrix. Allocate internal memory for storing and updating the Hessian or its inverse. Parameters ---------- n : int Problem dimension. approx_type : {'hess', 'inv_hess'} Selects either the Hessian or the inverse Hessian. When set to 'hess' the Hessian will be stored and updated. When set to 'inv_hess' its inverse will be used instead.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_hessian_update_strategy.py",
    "ast_data": "FunctionDef name:initialize arg:self arg:n arg:approx_type arguments arg arg arg Assign Assign Assign If Compare Raise Call If Compare Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_call",
    "source_code": "def _call(sig, *inputs, **kwargs):\n    if len(inputs) != len(sig.input_arg):\n        raise ValueError(f'Expected {len(sig.input_arg):d} arguments, got {len(inputs):d}.')\n    name = kwargs.pop('name', None)\n    g = ops.get_default_graph()\n    func_name = sig.name\n    if name is None:\n        name = func_name\n    attrs = _parse_kwargs_as_attrs(func_name, **kwargs)\n    output_types = [dtypes.DType(x.type) for x in sig.output_arg]\n    op = g._create_op_internal(func_name, list(inputs), output_types, name=name, attrs=attrs, op_def=sig)\n    if op.outputs:\n        if len(op.outputs) == 1:\n            ret = op.outputs[0]\n        else:\n            ret = tuple(op.outputs)\n    else:\n        ret = op\n    return (ret, op)",
    "docstring": "Adds a node calling a function. This adds a op to the default graph that calls the function of signature , passing the tensors in as arguments. It returns the outputs of the call, which are one or more tensors. is OpDefArg.a object. You can pass an optional keyword parameter to name the added operation. You can pass an optional keyword parameter to instruct the runtime not to inline the function body into the call site. Args: sig: OpDefArg. The signature of the function. *inputs: arguments to the function. **kwargs: Optional keyword arguments. Can only contain 'name' or 'noinline'. Returns: A 2-element tuple. First element: a Tensor if the function returns a single value; a list of Tensors if the function returns multiple value; the Operation if the function returns no values. Second element: the Operation. Raises: ValueError: if the arguments are invalid.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\function.py",
    "ast_data": "FunctionDef name:_call arg:sig arguments arg arg arg If Compare Call Call Raise Call Call Call Assign Call Assign Call Assign If Compare Assign Assign Call Assign Call Assign Call Call If If Compare Call Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_update_selection_poly",
    "source_code": "def _update_selection_poly(self, vmin, vmax):\n    verts = self.poly.xy\n    if self.orientation == 'vertical':\n        verts[0] = verts[4] = (0.25, vmin)\n        verts[1] = (0.25, vmax)\n        verts[2] = (0.75, vmax)\n        verts[3] = (0.75, vmin)\n    else:\n        verts[0] = verts[4] = (vmin, 0.25)\n        verts[1] = (vmin, 0.75)\n        verts[2] = (vmax, 0.75)\n        verts[3] = (vmax, 0.25)",
    "docstring": "Update the vertices of the *self.poly* slider in-place to cover the data range *vmin*, *vmax*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:_update_selection_poly arg:self arg:vmin arg:vmax arguments arg arg arg Assign If Compare Assign Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "dequantize_per_tensor_tensor",
    "source_code": "@impl(quantized_decomposed_lib, 'dequantize_per_tensor.tensor', 'CompositeExplicitAutograd')\ndef dequantize_per_tensor_tensor(input: torch.Tensor, scale: torch.Tensor, zero_point: torch.Tensor, quant_min: int, quant_max: int, dtype: torch.dtype, *, out_dtype: Optional[torch.dtype]=None) -> torch.Tensor:\n    assert zero_point.numel() == 1, f'Expecting zero_point tensor to be one element, but received : {zero_point.numel()}'\n    assert scale.numel() == 1, f'Expecting scale tensor to be one element, but received : {scale.numel()}'\n    return dequantize_per_tensor(input, scale.item(), zero_point.item(), quant_min, quant_max, dtype, out_dtype=out_dtype)",
    "docstring": "Affine dequantization for the Tensor using the same quantization parameters to map from quantized values to floating point values Same as but scale and zero_point are Scalar Tensor instead of scalar values",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_decomposed.py",
    "ast_data": "FunctionDef name:dequantize_per_tensor_tensor arg:input arg:scale arg:zero_point arg:quant_min arg:quant_max arg:dtype arguments arg arg arg arg arg arg arg Compare Call Call Compare Call Call Return return:yes Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "demo",
    "source_code": "@staticmethod\ndef demo():\n    import matplotlib.pyplot as plt\n    fig = plt.figure(figsize=(4, 1.2))\n    ax = fig.add_axes([0, 0, 1, 0.8])\n    ax.set_title('Cap style')\n    for x, style in enumerate(['butt', 'round', 'projecting']):\n        ax.text(x + 0.25, 0.85, style, ha='center')\n        xx = [x, x + 0.5]\n        yy = [0, 0]\n        ax.plot(xx, yy, lw=12, color='tab:blue', solid_capstyle=style)\n        ax.plot(xx, yy, lw=1, color='black')\n        ax.plot(xx, yy, 'o', color='tab:red', markersize=3)\n    ax.set_ylim(-0.5, 1.5)\n    ax.set_axis_off()\n    fig.show()",
    "docstring": "Demonstrate how each CapStyle looks for a thick line segment.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_enums.py",
    "ast_data": "FunctionDef name:demo arguments Assign Call Assign Call Call For Call Call Assign Assign Call Call Call Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "footnote_spot",
    "source_code": "def footnote_spot(tree: nodes.document) -> tuple[Element, int]:\n    fns = list(tree.findall(nodes.footnote))\n    if fns:\n        fn = fns[-1]\n        return (fn.parent, fn.parent.index(fn) + 1)\n    for node in tree.findall(nodes.rubric):\n        if len(node) == 1 and node.astext() == FOOTNOTES_RUBRIC_NAME:\n            return (node.parent, node.parent.index(node) + 1)\n    doc = next(tree.findall(nodes.document))\n    rub = nodes.rubric()\n    rub.append(nodes.Text(FOOTNOTES_RUBRIC_NAME))\n    doc.append(rub)\n    return (doc, doc.index(rub) + 1)",
    "docstring": "Find or create a spot to place footnotes. The function returns the tuple (parent, index).",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\_epub_base.py",
    "ast_data": "FunctionDef name:footnote_spot arg:tree arguments arg Assign Call Call If Assign Return return:yes Call For Call If BoolOp Compare Call Compare Call Return return:yes Call Assign Call Call Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "ordering",
    "source_code": "def ordering(signatures):\n    signatures = list(map(tuple, signatures))\n    edges = [(a, b) for a in signatures for b in signatures if edge(a, b)]\n    edges = groupby(first, edges)\n    for s in signatures:\n        if s not in edges:\n            edges[s] = []\n    edges = {k: [b for a, b in v] for k, v in edges.items()}\n    return _toposort(edges)",
    "docstring": "A sane ordering of signatures to check, first to last Topological sort of edges as given by ``",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\unification\\match.py",
    "ast_data": "FunctionDef name:ordering arg:signatures arguments arg Assign Call Call Assign Call Assign Call For If Compare Assign Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "call_module",
    "source_code": "def call_module(args, *, version=4, module, Tout, Sout, platforms=(), function_list=(), has_token_input_output=False, disabled_checks=()):\n    res = gen_xla_ops.xla_call_module(args, version=version, module=module, dim_args_spec=(), Tout=Tout, Sout=Sout, platforms=platforms, function_list=function_list, has_token_input_output=has_token_input_output, disabled_checks=disabled_checks)\n    if isinstance(res, ops.Operation):\n        res = ()\n    return res",
    "docstring": "See documentation for the XlaCallModule op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\tf2xla\\python\\xla.py",
    "ast_data": "FunctionDef name:call_module arg:args arguments arg arg arg arg arg arg arg arg arg Assign Call If Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_zeros_dtype",
    "source_code": "def get_zeros_dtype(t):\n    if t.dtype == dtypes.resource:\n        handle_data = resource_variable_ops.get_eager_safe_handle_data(t)\n        if handle_data is None or not handle_data.is_set or len(handle_data.shape_and_type) != 1:\n            raise ValueError('Internal error: Tried to take gradients (or similar) of a variable without handle data:\\n%s' % str(t))\n        return handle_data.shape_and_type[0].dtype\n    return t.dtype",
    "docstring": "Return the dtype for the default gradient for a Tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\default_gradient.py",
    "ast_data": "FunctionDef name:get_zeros_dtype arg:t arguments arg If Compare Assign Call If BoolOp Compare Compare Call Raise Call Call Return return:yes Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_unstack",
    "source_code": "def _unstack(self, unstacker, fill_value, new_placement: npt.NDArray[np.intp], needs_masking: npt.NDArray[np.bool_]):\n    new_values, mask = unstacker.get_new_values(self.values.T, fill_value=fill_value)\n    mask = mask.any(0)\n    new_values = new_values.T[mask]\n    new_placement = new_placement[mask]\n    bp = BlockPlacement(new_placement)\n    blocks = [new_block_2d(new_values, placement=bp)]\n    return (blocks, mask)",
    "docstring": "Return a list of unstacked blocks of self Parameters ---------- unstacker : reshape._Unstacker fill_value : int Only used in ExtensionBlock._unstack new_placement : np.ndarray[np.intp] allow_fill : bool needs_masking : np.ndarray[bool] Returns ------- blocks : list of Block New blocks of unstacked values. mask : array-like of bool The mask of columns of we should keep.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\blocks.py",
    "ast_data": "FunctionDef name:_unstack arg:self arg:unstacker arg:fill_value arg:new_placement arg:needs_masking arguments arg arg arg arg arg Assign Call Assign Call Assign Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "GenericKernelMixin",
    "source_code": "class GenericKernelMixin:\n\n    @property\n    def requires_vector_input(self):\n        return False",
    "docstring": "Mixin for kernels which operate on generic objects such as variable- length sequences, trees, and graphs. .. versionadded:: 0.22",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "ClassDef name:GenericKernelMixin FunctionDef name:requires_vector_input arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "data",
    "source_code": "def data(self, text):\n    self.__data.append(text)",
    "docstring": "Add character data to the output stream. Parameters ---------- text : str Character data.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_svg.py",
    "ast_data": "FunctionDef name:data arg:self arg:text arguments arg arg Call"
  },
  {
    "library": "scipy",
    "name": "Dolan",
    "source_code": "class Dolan(Benchmark):\n\n    def __init__(self, dimensions=5):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-100.0] * self.N, [100.0] * self.N))\n        self.global_optimum = [[-74.10522498, 44.33511286, 6.21069214, 18.42772233, -16.5839403]]\n        self.fglob = 0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return abs((x[0] + 1.7 * x[1]) * sin(x[0]) - 1.5 * x[2] - 0.1 * x[3] * cos(x[3] + x[4] - x[0]) + 0.2 * x[4] ** 2 - x[1] - 1)",
    "docstring": "Dolan objective function. This class defines the Dolan [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Dolan}}(x) = \\lvert (x_1 + 1.7 x_2)\\sin(x_1) - 1.5 x_3 - 0.1 x_4\\cos(x_5 + x_5 - x_1) + 0.2 x_5^2 - x_2 - 1 \\rvert with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015 TODO Jamil equation is missing the absolute brackets around the entire expression.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_D.py",
    "ast_data": "ClassDef name:Dolan FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "partial_fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef partial_fit(self, X, y=None, sample_weight=None):\n    if not hasattr(self, 'coef_'):\n        self._more_validate_params(for_partial_fit=True)\n    alpha = self.nu / 2\n    return self._partial_fit(X, alpha, C=1.0, loss=self.loss, learning_rate=self.learning_rate, max_iter=1, sample_weight=sample_weight, coef_init=None, offset_init=None)",
    "docstring": "Fit linear One-Class SVM with Stochastic Gradient Descent. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Subset of the training data. y : Ignored Not used, present for API consistency by convention. sample_weight : array-like, shape (n_samples,), optional Weights applied to individual samples. If not provided, uniform weights are assumed. Returns ------- self : object Returns a fitted instance of self.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_stochastic_gradient.py",
    "ast_data": "FunctionDef name:partial_fit arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg If Call Call Assign Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_putmask",
    "source_code": "def _putmask(self, mask: npt.NDArray[np.bool_], value) -> None:\n    value = self._validate_setitem_value(value)\n    np.putmask(self._ndarray, mask, value)",
    "docstring": "Analogue to np.putmask(self, mask, value) Parameters ---------- mask : np.ndarray[bool] value : scalar or listlike Raises ------ TypeError If value cannot be cast to self.dtype.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\_mixins.py",
    "ast_data": "FunctionDef name:_putmask arg:self arg:mask arg:value arguments arg arg arg Assign Call Call"
  },
  {
    "library": "django",
    "name": "NamedValuesListIterable",
    "source_code": "class NamedValuesListIterable(ValuesListIterable):\n\n    def __iter__(self):\n        queryset = self.queryset\n        if queryset._fields:\n            names = queryset._fields\n        else:\n            query = queryset.query\n            names = [*query.extra_select, *query.values_select, *query.annotation_select]\n        tuple_class = create_namedtuple_class(*names)\n        new = tuple.__new__\n        for row in super().__iter__():\n            yield new(tuple_class, row)",
    "docstring": "Iterable returned by QuerySet.values_list(named=True) that yields a namedtuple for each row.",
    "type": "class",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "ClassDef name:NamedValuesListIterable FunctionDef name:__iter__ arg:self arguments arg Assign If Assign Assign Assign Assign Call Assign For Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "ismethod",
    "source_code": "def ismethod(func):\n    if hasattr(inspect, 'signature'):\n        signature = inspect.signature(func)\n        return signature.parameters.get('self', None) is not None\n    else:\n        spec = inspect.getfullargspec(func)\n        return spec and spec.args and (spec.args[0] == 'self')",
    "docstring": "Is func a method? Note that this has to work as the method is defined but before the class is defined. At this stage methods look like functions.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\unification\\multipledispatch\\core.py",
    "ast_data": "FunctionDef name:ismethod arg:func arguments arg If Call Assign Call Return return:yes Compare Call Assign Call Return return:yes BoolOp Compare"
  },
  {
    "library": "tensorflow",
    "name": "prefer_static_rank",
    "source_code": "def prefer_static_rank(x):\n    return prefer_static_value(array_ops.rank(x))",
    "docstring": "Return static rank of tensor if available, else . Args: x: (already converted). Returns: Numpy array (if static rank is obtainable), else .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\util.py",
    "ast_data": "FunctionDef name:prefer_static_rank arg:x arguments arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "is_nullable",
    "source_code": "def is_nullable(self, field):\n    return field.null or (field.empty_strings_allowed and connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls)",
    "docstring": "Check if the given field should be treated as nullable. Some backends treat '' as null and Django treats such fields as nullable for those backends. In such situations field.null can be False even if we should treat the field as nullable.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\query.py",
    "ast_data": "FunctionDef name:is_nullable arg:self arg:field arguments arg arg Return return:yes BoolOp BoolOp"
  },
  {
    "library": "pytorch",
    "name": "_full_pre_state_dict_hook",
    "source_code": "@no_type_check\ndef _full_pre_state_dict_hook(fsdp_state: _FSDPState, module: nn.Module, *args, **kwargs) -> None:\n    if getattr(fsdp_state, '_device_mesh', False):\n        _mesh_resources.get_root_mesh(fsdp_state._device_mesh)\n    _common_pre_state_dict_hook(module, fsdp_state)\n    _common_unshard_pre_state_dict_hook(module, fsdp_state, offload_to_cpu=fsdp_state._state_dict_config.offload_to_cpu, rank0_only=cast(FullStateDictConfig, fsdp_state._state_dict_config).rank0_only)",
    "docstring": "Hook that runs before model.state_dict() is called. pre-state_dict hook is not actually supported by ``.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_state_dict_utils.py",
    "ast_data": "FunctionDef name:_full_pre_state_dict_hook arg:fsdp_state arg:module arguments arg arg arg arg If Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_is_compiled",
    "source_code": "def _is_compiled() -> bool:\n    return torch._C._mtia_isBuilt()",
    "docstring": "Return true if compiled with MTIA support.",
    "type": "function",
    "file_path": "pytorch\\torch\\mtia\\__init__.py",
    "ast_data": "FunctionDef name:_is_compiled arguments Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_read_uint16",
    "source_code": "def _read_uint16(f):\n    return np.uint16(struct.unpack('>H', f.read(4)[2:4])[0])",
    "docstring": "Read an unsigned 16-bit integer",
    "type": "function",
    "file_path": "scipy\\scipy\\io\\_idl.py",
    "ast_data": "FunctionDef name:_read_uint16 arg:f arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "kornia",
    "name": "create_meshgrid",
    "source_code": "def create_meshgrid(height: int, width: int, normalized_coordinates: bool=True, device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None) -> Tensor:\n    xs: Tensor = torch.linspace(0, width - 1, width, device=device, dtype=dtype)\n    ys: Tensor = torch.linspace(0, height - 1, height, device=device, dtype=dtype)\n    if normalized_coordinates:\n        xs = (xs / (width - 1) - 0.5) * 2\n        ys = (ys / (height - 1) - 0.5) * 2\n    base_grid: Tensor = stack(torch_meshgrid([xs, ys], indexing='ij'), dim=-1)\n    return base_grid.permute(1, 0, 2).unsqueeze(0)",
    "docstring": "Generate a coordinate grid for an image. When the flag `[-1,1]torch.nn.functional.grid_sample[-1,1]torch.nn.functional.grid_sample(1, H, W, 2)`. Example: >>> create_meshgrid(2, 2) tensor([[[[-1., -1.], [ 1., -1.]], [[-1., 1.], [ 1., 1.]]]]) >>> create_meshgrid(2, 2, normalized_coordinates=False) tensor([[[[0., 0.], [1., 0.]], [[0., 1.], [1., 1.]]]])",
    "type": "function",
    "file_path": "kornia\\kornia\\utils\\grid.py",
    "ast_data": "FunctionDef name:create_meshgrid arg:height arg:width arg:normalized_coordinates arg:device arg:dtype arguments arg arg arg arg arg Call Call If Assign Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "fill_value",
    "source_code": "@property\ndef fill_value(self):\n    return self._fill_value_orig",
    "docstring": "The fill value.",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_interpolate.py",
    "ast_data": "FunctionDef name:fill_value arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "build_ArmComputeLibrary",
    "source_code": "def build_ArmComputeLibrary() -> None:\n    print('Building Arm Compute Library')\n    acl_build_flags = ['debug=0', 'neon=1', 'opencl=0', 'os=linux', 'openmp=1', 'cppthreads=0', 'arch=armv8a', 'multi_isa=1', 'fixed_format_kernels=1', 'build=native']\n    acl_install_dir = '/acl'\n    acl_checkout_dir = os.getenv('ACL_SOURCE_DIR', 'ComputeLibrary')\n    if os.path.isdir(acl_install_dir):\n        shutil.rmtree(acl_install_dir)\n    if not os.path.isdir(acl_checkout_dir) or not len(os.listdir(acl_checkout_dir)):\n        check_call(['git', 'clone', 'https://github.com/ARM-software/ComputeLibrary.git', '-b', 'v25.02', '--depth', '1', '--shallow-submodules'])\n    check_call(['scons', 'Werror=1', f'-j{os.cpu_count()}'] + acl_build_flags, cwd=acl_checkout_dir)\n    for d in ['arm_compute', 'include', 'utils', 'support', 'src', 'build']:\n        shutil.copytree(f'{acl_checkout_dir}/{d}', f'{acl_install_dir}/{d}')",
    "docstring": "Using ArmComputeLibrary for aarch64 PyTorch",
    "type": "function",
    "file_path": "pytorch\\.ci\\aarch64_linux\\aarch64_wheel_ci_build.py",
    "ast_data": "FunctionDef name:build_ArmComputeLibrary arguments Call Assign Assign Assign Call If Call Call If BoolOp Call Call Call Call Call Call For Call"
  },
  {
    "library": "seaborn",
    "name": "map_dataframe",
    "source_code": "def map_dataframe(self, func, *args, **kwargs):\n    kw_color = kwargs.pop('color', None)\n    for (row_i, col_j, hue_k), data_ijk in self.facet_data():\n        if not data_ijk.values.size:\n            continue\n        modify_state = not str(func.__module__).startswith('seaborn')\n        ax = self.facet_axis(row_i, col_j, modify_state)\n        kwargs['color'] = self._facet_color(hue_k, kw_color)\n        for kw, val_list in self.hue_kws.items():\n            kwargs[kw] = val_list[hue_k]\n        if self._hue_var is not None:\n            kwargs['label'] = self.hue_names[hue_k]\n        if self._dropna:\n            data_ijk = data_ijk.dropna()\n        kwargs['data'] = data_ijk\n        self._facet_plot(func, ax, args, kwargs)\n    axis_labels = [kwargs.get('x', None), kwargs.get('y', None)]\n    for i, val in enumerate(args[:2]):\n        axis_labels[i] = val\n    self._finalize_grid(axis_labels)\n    return self",
    "docstring": "Like `datamapcolorhuelabelfunc` in the order the variables are specified in the call. kwargs : keyword arguments All keyword arguments are passed to the plotting function. Returns ------- self : object Returns self.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\axisgrid.py",
    "ast_data": "FunctionDef name:map_dataframe arg:self arg:func arguments arg arg arg arg Assign Call For Call If Assign Call Call Assign Call Assign Call For Call Assign If Compare Assign If Assign Call Assign Call Assign Call Call For Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_ZetaGrad",
    "source_code": "@ops.RegisterGradient('Zeta')\ndef _ZetaGrad(op: ops.Operation, grad):\n    x = op.inputs[0]\n    q = op.inputs[1]\n    sx = array_ops.shape(x)\n    sq = array_ops.shape(q)\n    unused_rx, rq = gen_array_ops.broadcast_gradient_args(sx, sq)\n    with ops.control_dependencies([grad]):\n        x = math_ops.conj(x)\n        q = math_ops.conj(q)\n        partial_q = -x * math_ops.zeta(x + 1, q)\n        return (None, array_ops.reshape(math_ops.reduce_sum(partial_q * grad, rq), sq))",
    "docstring": "Returns gradient of zeta(x, q) with respect to x and q.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_ZetaGrad arg:op arg:grad arguments arg arg Assign Assign Assign Call Assign Call Assign Call With Call Assign Call Assign Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "Container",
    "source_code": "class Container(tuple):\n\n    def __repr__(self):\n        return f'<{type(self).__name__} object of {len(self)} artists>'\n\n    def __new__(cls, *args, **kwargs):\n        return tuple.__new__(cls, args[0])\n\n    def __init__(self, kl, label=None):\n        self._callbacks = cbook.CallbackRegistry(signals=['pchanged'])\n        self._remove_method = None\n        self._label = str(label) if label is not None else None\n\n    def remove(self):\n        for c in cbook.flatten(self, scalarp=lambda x: isinstance(x, Artist)):\n            if c is not None:\n                c.remove()\n        if self._remove_method:\n            self._remove_method(self)\n\n    def get_children(self):\n        return [child for child in cbook.flatten(self) if child is not None]\n    get_label = Artist.get_label\n    set_label = Artist.set_label\n    add_callback = Artist.add_callback\n    remove_callback = Artist.remove_callback\n    pchanged = Artist.pchanged",
    "docstring": "Base class for containers. Containers are classes that collect semantically related Artists such as the bars of a bar plot.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\container.py",
    "ast_data": "ClassDef name:Container FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call Call FunctionDef name:__new__ arg:cls arguments arg arg arg Return return:yes Call FunctionDef name:__init__ arg:self arg:kl arg:label arguments arg arg arg Assign Call Assign Assign Compare Call FunctionDef name:remove arg:self arguments arg For Call arguments arg Call If Compare Call If Call FunctionDef name:get_children arg:self arguments arg Return return:yes Call Compare Assign Assign Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "AdaptiveMaxPool1d",
    "source_code": "class AdaptiveMaxPool1d(_AdaptiveMaxPoolNd):\n    output_size: _size_1_t\n\n    def forward(self, input: Tensor):\n        return F.adaptive_max_pool1d(input, self.output_size, self.return_indices)",
    "docstring": "Applies a 1D adaptive max pooling over an input signal composed of several input planes. The output size is :math:, for any input size. The number of output features is equal to the number of input planes. Args: output_size: the target output size :math:. return_indices: if `(N, C, L_{in})(C, L_{in})(N, C, L_{out})(C, L_{out})L_{out}=\\text{output\\_size}`. Examples: >>> # target output size of 5 >>> m = nn.AdaptiveMaxPool1d(5) >>> input = torch.randn(1, 64, 8) >>> output = m(input)",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\pooling.py",
    "ast_data": "ClassDef name:AdaptiveMaxPool1d FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_height_ratios",
    "source_code": "def set_height_ratios(self, height_ratios):\n    if height_ratios is None:\n        height_ratios = [1] * self._nrows\n    elif len(height_ratios) != self._nrows:\n        raise ValueError('Expected the given number of height ratios to match the number of rows of the grid')\n    self._row_height_ratios = height_ratios",
    "docstring": "Set the relative heights of the rows. *height_ratios* must be of length *nrows*. Each row gets a relative height of ``.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\gridspec.py",
    "ast_data": "FunctionDef name:set_height_ratios arg:self arg:height_ratios arguments arg arg If Compare Assign If Compare Call Raise Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_is_ast_str",
    "source_code": "def _is_ast_str(node):\n    allowed_types = [ast.Str]\n    if hasattr(ast, 'Bytes'):\n        allowed_types += [ast.Bytes]\n    if hasattr(ast, 'JoinedStr'):\n        allowed_types += [ast.JoinedStr]\n    if hasattr(ast, 'FormattedValue'):\n        allowed_types += [ast.FormattedValue]\n    return isinstance(node, allowed_types)",
    "docstring": "Determine whether this node represents a string.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\tf_upgrade_v2.py",
    "ast_data": "FunctionDef name:_is_ast_str arg:node arguments arg Assign If Call If Call If Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_Checkpointer",
    "source_code": "class _Checkpointer:\n\n    def __init__(self, storage_writer: StorageWriter, storage_reader: StorageReader, *, process_group: Optional[dist.ProcessGroup]=None, coordinator_rank: int=0, no_dist: bool=False, load_planner: Optional[LoadPlanner]=None, save_planner: Optional[SavePlanner]=None):\n        self.storage_writer = storage_writer\n        self.storage_reader = storage_reader\n        self.process_group = process_group\n        self.coordinator_rank = coordinator_rank\n        self.no_dist = no_dist\n        self.load_planner = load_planner\n        self.save_planner = save_planner\n\n    def save(self, state_dict: STATE_DICT_TYPE) -> Metadata:\n        return saver.save(state_dict, self.storage_writer, process_group=self.process_group, coordinator_rank=self.coordinator_rank, no_dist=self.no_dist, planner=self.save_planner)\n\n    def async_save(self, state_dict: STATE_DICT_TYPE) -> Future:\n        return saver.async_save(state_dict, storage_writer=self.storage_writer, process_group=self.process_group, planner=self.save_planner)\n\n    def load(self, state_dict: dict[str, Any]) -> None:\n        loader.load(state_dict, storage_reader=self.storage_reader, process_group=self.process_group, planner=self.load_planner)",
    "docstring": "This base class specefies a high level API for saving and loading distributed 's. It provides an abstraction over the low-level APIs provided by :py:mod:, essentially calling :py:meth: and :py:meth: with the provided storage readers and writers. .. warning:: This feature is experimental and subject to removal/change.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\_checkpointer.py",
    "ast_data": "ClassDef name:_Checkpointer FunctionDef name:__init__ arg:self arg:storage_writer arg:storage_reader arguments arg arg arg arg arg arg arg arg Assign Assign Assign Assign Assign Assign Assign FunctionDef name:save arg:self arg:state_dict arguments arg arg Return return:yes Call FunctionDef name:async_save arg:self arg:state_dict arguments arg arg Return return:yes Call FunctionDef name:load arg:self arg:state_dict arguments arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "install_dims",
    "source_code": "def install_dims(self, var, dims, offset, is_store):\n    if var not in self.buffer_dimensions:\n        self.buffer_dimensions[var] = dims\n        self.buffer_offsets[var] = offset\n        return True\n    if self.buffer_offsets[var] != offset or len(self.buffer_dimensions[var]) != len(dims):\n        return False\n    if is_store:\n        return self.buffer_dimensions[var] == dims\n    for old, new in zip(self.buffer_dimensions[var], dims):\n        if old.stride != new.stride:\n            return False\n        if old.size != new.size or old.expr != new.expr:\n            old.size = V.graph.sizevars.evaluate_max(old.size, new.size)\n            old.expr = None\n    return True",
    "docstring": "Try to set self.buffer_dimensions[var], return True on success",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\halide.py",
    "ast_data": "FunctionDef name:install_dims arg:self arg:var arg:dims arg:offset arg:is_store arguments arg arg arg arg arg If Compare Assign Assign Return return:yes If BoolOp Compare Compare Call Call Return return:yes If Return return:yes Compare For Call If Compare Return return:yes If BoolOp Compare Compare Assign Call Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "flatten",
    "source_code": "def flatten(self, order='C'):\n    return N.ndarray.flatten(self, order=order)",
    "docstring": "Return a flattened copy of the matrix. All elements of the matrix are placed into a single row. Parameters ---------- order : {'C', 'F', 'A', 'K'}, optional 'C' means to flatten in row-major (C-style) order. 'F' means to flatten in column-major (Fortran-style) order. 'A' means to flatten in column-major order if is Fortran *contiguous* in memory, row-major order otherwise. 'K' means to flatten in the order the elements occur in memory. The default is 'C'. Returns ------- y : matrix A copy of the matrix, flattened to a matrix where is the number of elements in the original matrix. See Also -------- ravel : Return a flattened array. flat : A 1-D flat iterator over the matrix. Examples -------- >>> m = np.matrix([[1,2], [3,4]]) >>> m.flatten() matrix([[1, 2, 3, 4]]) >>> m.flatten('F') matrix([[1, 3, 2, 4]])",
    "type": "method",
    "file_path": "numpy\\numpy\\matrixlib\\defmatrix.py",
    "ast_data": "FunctionDef name:flatten arg:self arg:order arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "register_backend",
    "source_code": "def register_backend(backend_name, construct_rpc_backend_options_handler, init_backend_handler):\n    global BackendType\n    if backend_registered(backend_name):\n        raise RuntimeError(f'RPC backend {backend_name}: already registered')\n    existing_enum_dict = {member.name: member.value for member in BackendType}\n    extended_enum_dict = dict({backend_name: BackendValue(construct_rpc_backend_options_handler=construct_rpc_backend_options_handler, init_backend_handler=init_backend_handler)}, **existing_enum_dict)\n    BackendType = enum.Enum(value='BackendType', names=extended_enum_dict)\n    BackendType.__repr__ = _backend_type_repr\n    if BackendType.__doc__:\n        BackendType.__doc__ = _backend_type_doc\n    return BackendType[backend_name]",
    "docstring": "Registers a new RPC backend. Args: backend_name (str): backend string to identify the handler. construct_rpc_backend_options_handler (function): Handler that is invoked when rpc_backend.construct_rpc_backend_options(**dict) is called. init_backend_handler (function): Handler that is invoked when the function is called with a backend. This returns the agent.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\rpc\\backend_registry.py",
    "ast_data": "FunctionDef name:register_backend arg:backend_name arg:construct_rpc_backend_options_handler arg:init_backend_handler arguments arg arg arg If Call Raise Call Assign Assign Call Call Assign Call Assign If Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "entropy",
    "source_code": "def entropy(self, mean=None, cov=1):\n    dim, mean, cov_object = self._process_parameters(mean, cov)\n    return 0.5 * (cov_object.rank * (_LOG_2PI + 1) + cov_object.log_pdet)",
    "docstring": "Compute the differential entropy of the multivariate normal. Parameters ---------- %(_mvn_doc_default_callparams)s Returns ------- h : scalar Entropy of the multivariate normal distribution Notes ----- %(_mvn_doc_callparams_note)s",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:entropy arg:self arg:mean arg:cov arguments arg arg arg Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "close",
    "source_code": "def close(self):\n    if self.zip is not None:\n        self.zip.close()\n        self.zip = None\n    if self.fid is not None:\n        self.fid.close()\n        self.fid = None\n    self.f = None",
    "docstring": "Close the file.",
    "type": "method",
    "file_path": "numpy\\numpy\\lib\\_npyio_impl.py",
    "ast_data": "FunctionDef name:close arg:self arguments arg If Compare Call Assign If Compare Call Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "_register_subclass_spec_proxy_in_tracer",
    "source_code": "def _register_subclass_spec_proxy_in_tracer(tracer, name, spec):\n    fx_name = name + '0'\n    if hasattr(tracer.root, fx_name):\n        assert getattr(tracer.root, fx_name) == spec\n        return tracer.create_proxy('get_attr', fx_name, (), {})\n    qualname = tracer.get_fresh_qualname(name)\n    setattr(tracer.root, qualname, spec)\n    return tracer.create_proxy('get_attr', qualname, (), {})",
    "docstring": "This is a wrapper utility method on top of tracer to cache the already registered subclass spec attribute. This is useful because Subclass.__init__ will be same for each subclass. By default, fx will create multiple attributes/proxies for given attribute.",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\wrappers.py",
    "ast_data": "FunctionDef name:_register_subclass_spec_proxy_in_tracer arg:tracer arg:name arg:spec arguments arg arg arg Assign If Call Compare Call Return return:yes Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "StrMethodFormatter",
    "source_code": "class StrMethodFormatter(Formatter):\n\n    def __init__(self, fmt):\n        self.fmt = fmt\n\n    def __call__(self, x, pos=None):\n        return _UnicodeMinusFormat().format(self.fmt, x=x, pos=pos)",
    "docstring": "Use a new-style format string (as used by ) to format the tick. The field used for the tick value must be labeled *x* and the field used for the tick position must be labeled *pos*. The formatter will respect :rc: when formatting negative numeric values. It is typically unnecessary to explicitly construct objects, as directly accepts the format string itself.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "ClassDef name:StrMethodFormatter FunctionDef name:__init__ arg:self arg:fmt arguments arg arg Assign FunctionDef name:__call__ arg:self arg:x arg:pos arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_on_write_update_replica",
    "source_code": "def _on_write_update_replica(var, update_fn, value, **kwargs):\n    if var.aggregation == vs.VariableAggregation.NONE:\n        return update_fn(var._get_on_device_or_primary(), value, **kwargs)\n    if not distribute_lib.get_strategy().extended._use_merge_call():\n        if var.aggregation == vs.VariableAggregation.MEAN and (not var.dtype.is_floating) and tensor_util.is_tf_type(value):\n            raise ValueError('Cannot update non-float variables with tf.VariableAggregation.MEAN aggregation in replica context. Either change the variable dtype to float or update it in cross-replica context.')\n        aggregated_value = apply_aggregation_replica_context(value, var.aggregation, var)\n        values_util.mark_as_unsaveable()\n        return distribute_lib.get_replica_context()._update(var, update_fn, args=(aggregated_value,), kwargs=kwargs, group=True)\n    else:\n\n        def merge_fn(strategy, value, **kwargs):\n            if var.aggregation == vs.VariableAggregation.MEAN and (not var.dtype.is_floating) and isinstance(value, PerReplica):\n                raise ValueError('Cannot update non-float variables with tf.VariableAggregation.MEAN aggregation in replica context. Either change the variable dtype to float or update it in cross-replica context.')\n            assert strategy == var.distribute_strategy\n            v = values_util.apply_aggregation(strategy, value, var.aggregation, var)\n            return var._update_cross_replica(update_fn, v, **kwargs)\n        return distribute_lib.get_replica_context().merge_call(merge_fn, args=(value,), kwargs=kwargs)",
    "docstring": "Updates variables with ON_WRITE synchronization in replica context.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py",
    "ast_data": "FunctionDef name:_on_write_update_replica arg:var arg:update_fn arg:value arguments arg arg arg arg If Compare Return return:yes Call Call If Call Call If BoolOp Compare Call Raise Call Assign Call Call Return return:yes Call Call FunctionDef name:merge_fn arg:strategy arg:value arguments arg arg arg If BoolOp Compare Call Raise Call Compare Assign Call Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_output_mask_at",
    "source_code": "@doc_controls.do_not_doc_inheritable\ndef get_output_mask_at(self, node_index):\n    output = self.get_output_at(node_index)\n    if isinstance(output, list):\n        return [getattr(x, '_keras_mask', None) for x in output]\n    else:\n        return getattr(output, '_keras_mask', None)",
    "docstring": "Retrieves the output mask tensor(s) of a layer at a given node. Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. will correspond to the first time the layer was called. Returns: A mask tensor (or list of tensors if the layer has multiple outputs).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:get_output_mask_at arg:self arg:node_index arguments arg arg Assign Call If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "paint",
    "source_code": "def paint(self):\n    return Op.paint_path(self.fill(), self.stroke())",
    "docstring": "Return the appropriate pdf operator to cause the path to be stroked, filled, or both.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py",
    "ast_data": "FunctionDef name:paint arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "composite_transform_factory",
    "source_code": "def composite_transform_factory(a, b):\n    if isinstance(a, IdentityTransform):\n        return b\n    elif isinstance(b, IdentityTransform):\n        return a\n    elif isinstance(a, Affine2D) and isinstance(b, Affine2D):\n        return CompositeAffine2D(a, b)\n    return CompositeGenericTransform(a, b)",
    "docstring": "Create a new composite transform that is the result of applying transform a then transform b. Shortcut versions of the blended transform are provided for the case where both child transforms are affine, or one or the other is the identity transform. Composite transforms may also be created using the '+' operator, e.g.:: c = a + b",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:composite_transform_factory arg:a arg:b arguments arg arg If Call Return return:yes If Call Return return:yes If BoolOp Call Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "PReLU",
    "source_code": "class PReLU(Module):\n    __constants__ = ['num_parameters']\n    num_parameters: int\n\n    def __init__(self, num_parameters: int=1, init: float=0.25, device=None, dtype=None) -> None:\n        factory_kwargs = {'device': device, 'dtype': dtype}\n        self.num_parameters = num_parameters\n        super().__init__()\n        self.init = init\n        self.weight = Parameter(torch.empty(num_parameters, **factory_kwargs))\n        self.reset_parameters()\n\n    def reset_parameters(self):\n        torch.nn.init.constant_(self.weight, self.init)\n\n    def forward(self, input: Tensor) -> Tensor:\n        return F.prelu(input, self.weight)\n\n    def extra_repr(self) -> str:\n        return f'num_parameters={self.num_parameters}'",
    "docstring": "Applies the element-wise PReLU function. .. math:: \\text{PReLU}(x) = \\max(0,x) + a * \\min(0,x) or .. math:: \\text{PReLU}(x) = \\begin{cases} x, & \\text{ if } x \\ge 0 \\\\ ax, & \\text{ otherwise } \\end{cases} Here :math: is a learnable parameter. When called without arguments, uses a single parameter :math: across all input channels. If called with , a separate :math: is used for each input channel. .. note:: weight decay should not be used when learning :math: for good performance. .. note:: Channel dim is the 2nd dim of input. When input has dims >> m = nn.PReLU() >>> input = torch.randn(2) >>> output = m(input)",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\activation.py",
    "ast_data": "ClassDef name:PReLU Assign FunctionDef name:__init__ arg:self arg:num_parameters arg:init arg:device arg:dtype arguments arg arg arg arg arg Assign Assign Call Call Assign Assign Call Call Call FunctionDef name:reset_parameters arg:self arguments arg Call FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:extra_repr arg:self arguments arg Return return:yes"
  },
  {
    "library": "authlib",
    "name": "create_authorization_url",
    "source_code": "def create_authorization_url(self, url, request_token=None, **kwargs):\n    kwargs['oauth_token'] = request_token or self.auth.token\n    if self.auth.redirect_uri:\n        kwargs['oauth_callback'] = self.auth.redirect_uri\n    return add_params_to_uri(url, kwargs.items())",
    "docstring": "Create an authorization URL by appending request_token and optional kwargs to url. This is the second step in the OAuth 1 workflow. The user should be redirected to this authorization URL, grant access to you, and then be redirected back to you. The redirection back can either be specified during client registration or by supplying a callback URI per request. :param url: The authorization endpoint URL. :param request_token: The previously obtained request token. :param kwargs: Optional parameters to append to the URL. :returns: The authorization URL with new parameters embedded.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth1\\client.py",
    "ast_data": "FunctionDef name:create_authorization_url arg:self arg:url arg:request_token arguments arg arg arg arg Assign BoolOp If Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_tensor_t",
    "source_code": "def _tensor_t(self):\n    return self.transpose()",
    "docstring": "Returns a Tensor which is the transpose of this Tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_math_ops.py",
    "ast_data": "FunctionDef name:_tensor_t arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "__enter__",
    "source_code": "def __enter__(self):\n    cherrypy.serving.request.toolmaps[self.namespace] = map = {}\n\n    def populate(k, v):\n        toolname, arg = k.split('.', 1)\n        bucket = map.setdefault(toolname, {})\n        bucket[arg] = v\n    return populate",
    "docstring": "Populate request.toolmaps from tools specified in config.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cptools.py",
    "ast_data": "FunctionDef name:__enter__ arg:self arguments arg Assign FunctionDef name:populate arg:k arg:v arguments arg arg Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "DAGNode",
    "source_code": "class DAGNode:\n\n    def __init__(self, submodule_node: Node, input_nodes: list[Node], output_nodes: list[Node], logical_device_ids: list[int], size_bytes: int) -> None:\n        self.submodule_node: Node = submodule_node\n        self.input_nodes: list[Node] = input_nodes\n        self.output_nodes: list[Node] = output_nodes\n        self.logical_device_ids: list[int] = logical_device_ids\n        self.size_bytes = size_bytes\n\n    def __str__(self) -> str:\n        return str(self.submodule_node)",
    "docstring": "DAGNode class maintains useful information for a partition (submodule), and its input submodules and output submodules.",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\experimental\\accelerator_partitioner.py",
    "ast_data": "ClassDef name:DAGNode FunctionDef name:__init__ arg:self arg:submodule_node arg:input_nodes arg:output_nodes arg:logical_device_ids arg:size_bytes arguments arg arg arg arg arg arg Assign FunctionDef name:__str__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "sync_to_numpy_or_python_type",
    "source_code": "def sync_to_numpy_or_python_type(tensors):\n    if isinstance(tensors, coordinator_lib.RemoteValue):\n        return tensors.fetch()\n\n    def _to_single_numpy_or_python_type(t):\n        if isinstance(t, tensor_lib.Tensor):\n            x = t.numpy()\n            return x.item() if np.ndim(x) == 0 else x\n        return t\n    return nest.map_structure(_to_single_numpy_or_python_type, tensors)",
    "docstring": "Syncs and converts a structure of s to arrays or Python scalar types. For each tensor, it calls . If the result is a scalar value, it converts it to a Python type, such as a float or int, by calling . Numpy scalars are converted, as Python types are often more convenient to deal with. This is especially useful for bfloat16 Numpy scalars, which don't support as many operations as other Numpy values. Async strategies (such as and ) are forced to sync during this process. Args: tensors: A structure of tensors. Returns: , but scalar tensors are converted to Python types and non-scalar tensors are converted to Numpy arrays.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_utils.py",
    "ast_data": "FunctionDef name:sync_to_numpy_or_python_type arg:tensors arguments arg If Call Return return:yes Call FunctionDef name:_to_single_numpy_or_python_type arg:t arguments arg If Call Assign Call Return return:yes Compare Call Call Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "__reduce__",
    "source_code": "def __reduce__(self):\n    python_code = self._real_recompile()\n    dict_without_graph = self.__dict__.copy()\n    import_block = _format_import_block(python_code.globals, sys_importer)\n    del dict_without_graph['_graph']\n    return (reduce_graph_module, (dict_without_graph, import_block))",
    "docstring": "Follow GraphModule.__reduce__ but call 'self._real_recompile' rather than 'self.recompile' since for a _LazyGraphModule, self.recompile just mark the need of recompilation and does not return the PythonCode object.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\_lazy_graph_module.py",
    "ast_data": "FunctionDef name:__reduce__ arg:self arguments arg Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "augment_model_with_bundled_inputs",
    "source_code": "def augment_model_with_bundled_inputs(model: torch.jit.ScriptModule, inputs: Optional[Sequence[tuple[Any, ...]]]=None, _receive_inflate_expr: Optional[list[str]]=None, info: Optional[list[str]]=None, skip_size_check=False) -> None:\n    if not isinstance(model, torch.jit.ScriptModule):\n        raise Exception('Only ScriptModule is supported.')\n    forward: Callable = model.forward\n    if not hasattr(forward, '__name__'):\n        forward.__name__ = 'forward'\n    augment_many_model_functions_with_bundled_inputs(model, inputs={forward: inputs}, _receive_inflate_expr=_receive_inflate_expr, info={forward: info} if info else None, skip_size_check=skip_size_check)",
    "docstring": "Add bundled sample inputs to a model for the forward function. Models with bundled inputs can be invoked in a uniform manner by benchmarking and code coverage tools. Augmented models will support the following methods: Returns a list of tuples suitable for passing to the model like Equivalent to , but slightly easier to call from C++. Returns a dictionary mapping function names to a metadata dictionary. This nested dictionary maps preset strings like: 'get_inputs_function_name' -> the name of a function attribute in this model that can be run to get back a list of inputs corresponding to that function. 'info' -> the user provided extra information about the bundled inputs Inputs can be specified in one of two ways: - The model can define . If the user chooses this method inputs should be None - is a list of inputs of form List[Tuple[Any, ...]]. A list of tuples where the elements of each tuple are the args that make up one input.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\bundled_inputs.py",
    "ast_data": "FunctionDef name:augment_model_with_bundled_inputs arg:model arg:inputs arg:_receive_inflate_expr arg:info arg:skip_size_check arguments arg arg arg arg arg If Call Raise Call If Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "_adjust_attributes_of_avg_pool",
    "source_code": "def _adjust_attributes_of_avg_pool(expand_size: int, kernel_size: Sequence[int] | int, stride: Sequence[int] | int, padding: Sequence[int] | int) -> tuple[Sequence[int], Sequence[int], Sequence[int]]:\n    if isinstance(kernel_size, int):\n        kernel_shape = [kernel_size] * expand_size\n    else:\n        kernel_shape = kernel_size\n    if isinstance(padding, int):\n        pads = [padding] * expand_size * 2\n    elif len(padding) == 1:\n        pads = padding * expand_size * 2\n    elif len(padding) == 2:\n        pads = padding * expand_size\n    else:\n        pads = padding * 2\n    if isinstance(stride, int):\n        strides = [stride] * expand_size\n    elif not stride:\n        strides = kernel_shape\n    else:\n        strides = stride\n    return (kernel_shape, strides, pads)",
    "docstring": "Adjust attributes of avg_pool to match ONNX specification.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\symbolic_opset10.py",
    "ast_data": "FunctionDef name:_adjust_attributes_of_avg_pool arg:expand_size arg:kernel_size arg:stride arg:padding arguments arg arg arg arg If Call Assign Assign If Call Assign If Compare Call Assign If Compare Call Assign Assign If Call Assign If Assign Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "Sargan",
    "source_code": "class Sargan(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-100.0] * self.N, [100.0] * self.N))\n        self.custom_bounds = [(-5, 5), (-5, 5)]\n        self.global_optimum = [[0.0 for _ in range(self.N)]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        x0 = x[:-1]\n        x1 = roll(x, -1)[:-1]\n        return sum(self.N * (x ** 2 + 0.4 * sum(x0 * x1)))",
    "docstring": "Sargan objective function. This class defines the Sargan [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Sargan}}(x) = \\sum_{i=1}^{n} n \\left (x_i^2 + 0.4 \\sum_{i \\neq j}^{n} x_ix_j \\right) Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_S.py",
    "ast_data": "ClassDef name:Sargan Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "split",
    "source_code": "def split(self, value, lengths, name=None):\n    value = ops.convert_to_tensor(value, preferred_dtype=self._dtype, name='value')\n    _check_dtypes(value, self._dtype)\n    lengths = ops.convert_to_tensor(lengths)\n    sum_lengths = math_ops.reduce_sum(lengths)\n    if lengths.shape.ndims != 1:\n        raise errors_impl.InvalidArgumentError(None, None, 'Expected lengths to be a vector, received shape: %s ' % lengths.shape.as_list())\n    elif value.shape.ndims == 0:\n        raise errors_impl.InvalidArgumentError(None, None, 'Expected value to be at least a vector, but received shape: %s ' % value.shape.as_list())\n    elif sum_lengths.numpy() != value.shape.as_list()[0]:\n        raise errors_impl.InvalidArgumentError(None, None, \"Expected sum of lengths to be equal to values.shape[0], but sum of lengths is %d and value's shape is: %s \" % (sum_lengths.numpy(), value.shape.as_list()))\n    elif not self._dynamic_size and lengths.shape[0] != len(self._tensor_array):\n        raise errors_impl.InvalidArgumentError(None, None, \"TensorArray's size is not equal to the size of lengths (%d vs. %d), and the TensorArray is not marked as dynamically resizeable.\" % (len(self._tensor_array), lengths.shape[0]))\n    else:\n        self._tensor_array = array_ops.split(value, lengths, name=name)\n        return self.parent()",
    "docstring": "See TensorArray.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:split arg:self arg:value arg:lengths arg:name arguments arg arg arg arg Assign Call Call Assign Call Assign Call If Compare Raise Call Call If Compare Raise Call Call If Compare Call Call Raise Call Call Call If BoolOp Compare Call Raise Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "cdf",
    "source_code": "def cdf(self, k):\n    pmfs = self.build_u_freqs_array(np.max(k))\n    cdfs = np.cumsum(pmfs)\n    return cdfs[k]",
    "docstring": "Cumulative distribution function",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_mannwhitneyu.py",
    "ast_data": "FunctionDef name:cdf arg:self arg:k arguments arg arg Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "read_tensor_tracer_event_file",
    "source_code": "def read_tensor_tracer_event_file(event_file):\n    step_occurrence_count = collections.defaultdict(int)\n    step_occurrence_list = []\n    for trace_event in summary_iterator.summary_iterator(event_file):\n        if not trace_event.HasField('summary'):\n            continue\n        if len(trace_event.summary.value) != 1:\n            raise ValueError('Single step contains %d summary values, expected 1.' % len(trace_event.summary.value))\n        step = trace_event.step\n        step_occurrence_count[step] += 1\n        occurrence_idx = step_occurrence_count[step] - 1\n        occurrence_size = len(step_occurrence_list)\n        if occurrence_idx == occurrence_size:\n            new_occurrence = collections.defaultdict(dict)\n            step_occurrence_list.append(new_occurrence)\n        elif occurrence_idx > occurrence_size:\n            raise ValueError('Unexpected: occurrence_idx (%d) > occurrence_size (%d)' % (occurrence_idx, occurrence_size))\n        tensor_value = trace_event.summary.value[0]\n        tensor_name = tensor_value.tag\n        real_shape = [d.size for d in tensor_value.tensor.tensor_shape.dim]\n        tensor_content = np.frombuffer(tensor_value.tensor.tensor_content, dtypes.DType(tensor_value.tensor.dtype).as_numpy_dtype()).reshape(real_shape)\n        step_occurrence_list[occurrence_idx][step][tensor_name] = tensor_content\n    return step_occurrence_list",
    "docstring": "Reads the event file written by tensor tracer. This can be used to read the full tensors written into binary event files by by TensorTracer with trace_mode=full_tensor_summary. Example usage: result_dict_list = tensor_tracer.read_tensor_tracer_event_file( event_file_path) for result_dict in result_dict_list: for step, tensor_dict in result_dict.items(): for tensor_name, full_tensor_content in tensor_dict.items(): logging.info(tensor_name, full_tensor_content) Args: event_file: Path to the event file that contains only tensor tracer events. Returns: A list of event dictionaries, each of which with the form: {step_number: {tensor_name: tensor_content}}. This is a list instead of a single event dictionary because it is possible that an event file may have multiple event traces, each of them covering the same step ranges. Raises: ValueError: If an unexpected trace is found.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:read_tensor_tracer_event_file arg:event_file arguments arg Assign Call Assign For Call If Call If Compare Call Raise Call Call Assign Assign Assign Call If Compare Assign Call Call If Compare Raise Call Assign Assign Assign Assign Call Call Call Call Assign Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "_percentile_interval",
    "source_code": "def _percentile_interval(data, width):\n    edge = (100 - width) / 2\n    percentiles = (edge, 100 - edge)\n    return np.nanpercentile(data, percentiles)",
    "docstring": "Return a percentile interval from data of a given width.",
    "type": "function",
    "file_path": "seaborn\\seaborn\\_statistics.py",
    "ast_data": "FunctionDef name:_percentile_interval arg:data arg:width arguments arg arg Assign Assign Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "partial_fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef partial_fit(self, X, y, classes=None):\n    if not hasattr(self, 'classes_'):\n        self._more_validate_params(for_partial_fit=True)\n        if self.class_weight == 'balanced':\n            raise ValueError(\"class_weight 'balanced' is not supported for partial_fit. For 'balanced' weights, use `sklearn.utils.compute_class_weight` with `class_weight='balanced'`. In place of y you can use a large enough subset of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.\")\n    lr = 'pa1' if self.loss == 'hinge' else 'pa2'\n    return self._partial_fit(X, y, alpha=1.0, C=self.C, loss='hinge', learning_rate=lr, max_iter=1, classes=classes, sample_weight=None, coef_init=None, intercept_init=None)",
    "docstring": "Fit linear model with Passive Aggressive algorithm. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Subset of the training data. y : array-like of shape (n_samples,) Subset of the target values. classes : ndarray of shape (n_classes,) Classes across all calls to partial_fit. Can be obtained by via , where y_all is the target vector of the entire dataset. This argument is required for the first call to partial_fit and can be omitted in the subsequent calls. Note that y doesn't need to contain all labels in . Returns ------- self : object Fitted estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_passive_aggressive.py",
    "ast_data": "FunctionDef name:partial_fit arg:self arg:X arg:y arg:classes arguments arg arg arg arg If Call Call If Compare Raise Call Assign Compare Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "name",
    "source_code": "@property\ndef name(self):\n    if self.projected:\n        return self.attr_value('PROJCS')\n    elif self.geographic:\n        return self.attr_value('GEOGCS')\n    elif self.local:\n        return self.attr_value('LOCAL_CS')\n    else:\n        return None",
    "docstring": "Return the name of this Spatial Reference.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\srs.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg If Return return:yes Call If Return return:yes Call If Return return:yes Call Return return:no"
  },
  {
    "library": "django",
    "name": "ask_rename",
    "source_code": "def ask_rename(self, model_name, old_name, new_name, field_instance):\n    return self.defaults.get('ask_rename', False)",
    "docstring": "Was this field really renamed?",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\questioner.py",
    "ast_data": "FunctionDef name:ask_rename arg:self arg:model_name arg:old_name arg:new_name arg:field_instance arguments arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_generate_cache_header_key",
    "source_code": "def _generate_cache_header_key(key_prefix, request):\n    url = md5(request.build_absolute_uri().encode('ascii'), usedforsecurity=False)\n    cache_key = 'views.decorators.cache.cache_header.%s.%s' % (key_prefix, url.hexdigest())\n    return _i18n_cache_key_suffix(request, cache_key)",
    "docstring": "Return a cache key for the header cache.",
    "type": "function",
    "file_path": "django\\django\\utils\\cache.py",
    "ast_data": "FunctionDef name:_generate_cache_header_key arg:key_prefix arg:request arguments arg arg Assign Call Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_inverse_binarize_thresholding",
    "source_code": "def _inverse_binarize_thresholding(y, output_type, classes, threshold):\n    if output_type == 'binary' and y.ndim == 2 and (y.shape[1] > 2):\n        raise ValueError(\"output_type='binary', but y.shape = {0}\".format(y.shape))\n    if output_type != 'binary' and y.shape[1] != len(classes):\n        raise ValueError('The number of class is not equal to the number of dimension of y.')\n    classes = np.asarray(classes)\n    if sp.issparse(y):\n        if threshold > 0:\n            if y.format not in ('csr', 'csc'):\n                y = y.tocsr()\n            y.data = np.array(y.data > threshold, dtype=int)\n            y.eliminate_zeros()\n        else:\n            y = np.array(y.toarray() > threshold, dtype=int)\n    else:\n        y = np.array(y > threshold, dtype=int)\n    if output_type == 'binary':\n        if sp.issparse(y):\n            y = y.toarray()\n        if y.ndim == 2 and y.shape[1] == 2:\n            return classes[y[:, 1]]\n        elif len(classes) == 1:\n            return np.repeat(classes[0], len(y))\n        else:\n            return classes[y.ravel()]\n    elif output_type == 'multilabel-indicator':\n        return y\n    else:\n        raise ValueError('{0} format is not supported'.format(output_type))",
    "docstring": "Inverse label binarization transformation using thresholding.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_label.py",
    "ast_data": "FunctionDef name:_inverse_binarize_thresholding arg:y arg:output_type arg:classes arg:threshold arguments arg arg arg arg If BoolOp Compare Compare Compare Raise Call Call If BoolOp Compare Compare Call Raise Call Assign Call If Call If Compare If Compare Assign Call Assign Call Compare Call Assign Call Compare Call Assign Call Compare If Compare If Call Assign Call If BoolOp Compare Compare Return return:yes If Compare Call Return return:yes Call Call Return return:yes Call If Compare Return return:yes Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_assert_valid_concentration",
    "source_code": "def _maybe_assert_valid_concentration(self, concentration, validate_args):\n    if not validate_args:\n        return concentration\n    return control_flow_ops.with_dependencies([check_ops.assert_positive(concentration, message='Concentration parameter must be positive.'), check_ops.assert_rank_at_least(concentration, 1, message='Concentration parameter must have >=1 dimensions.'), check_ops.assert_less(1, array_ops.shape(concentration)[-1], message='Concentration parameter must have event_size >= 2.')], concentration)",
    "docstring": "Checks the validity of the concentration parameter.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\dirichlet.py",
    "ast_data": "FunctionDef name:_maybe_assert_valid_concentration arg:self arg:concentration arg:validate_args arguments arg arg arg If Return return:yes Return return:yes Call Call Call Call Call"
  },
  {
    "library": "django",
    "name": "color_interp",
    "source_code": "def color_interp(self, as_string=False):\n    color = capi.get_band_color_interp(self._ptr)\n    if as_string:\n        color = GDAL_COLOR_TYPES[color]\n    return color",
    "docstring": "Return the GDAL color interpretation for this band.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\raster\\band.py",
    "ast_data": "FunctionDef name:color_interp arg:self arg:as_string arguments arg arg Assign Call If Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "determine_grid",
    "source_code": "def determine_grid(grid: TritonGrid, example_grid: Optional[TritonGrid]=None):\n    if wrapper is None or callable(grid):\n        return (grid, grid)\n    sympy_grid = tuple((_convert_to_sympy_expr(g) for g in grid))\n    if not example_grid:\n        example_grid = sympy_grid\n    return (wrapper.codegen_python_shape_tuple(sympy_grid), wrapper.codegen_python_shape_tuple(tuple((wrapper.generate_example_arg_value(g, type(g)) for g in example_grid))) if config.triton.autotune_at_compile_time else None)",
    "docstring": "This function return a tuple of two values: the first one is for the real grid which is used in the generated code; the second one is an example grid with concreate values which is used in the autotune block to run the generated kernels at compile time.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\wrapper.py",
    "ast_data": "FunctionDef name:determine_grid arg:grid arg:example_grid arguments arg arg If BoolOp Compare Call Return return:yes Assign Call Call If Assign Return return:yes Call Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "memory_usage",
    "source_code": "def memory_usage(self, deep: bool=False) -> int:\n    return self.nbytes",
    "docstring": "Memory usage of my values Parameters ---------- deep : bool Introspect the data deeply, interrogate dtypes for system-level memory consumption Returns ------- bytes used Notes ----- Memory usage does not include memory consumed by elements that are not components of the array if deep=False See Also -------- numpy.ndarray.nbytes",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\range.py",
    "ast_data": "FunctionDef name:memory_usage arg:self arg:deep arguments arg arg Return return:yes"
  },
  {
    "library": "pygame",
    "name": "switch_layer",
    "source_code": "def switch_layer(self, layer1_nr, layer2_nr):\n    sprites1 = self.remove_sprites_of_layer(layer1_nr)\n    for spr in self.get_sprites_from_layer(layer2_nr):\n        self.change_layer(spr, layer1_nr)\n    self.add(*sprites1, layer=layer2_nr)",
    "docstring": "switch the sprites from layer1_nr to layer2_nr LayeredUpdates.switch_layer(layer1_nr, layer2_nr): return None The layers number must exist. This method does not check for the existence of the given layers.",
    "type": "method",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:switch_layer arg:self arg:layer1_nr arg:layer2_nr arguments arg arg arg Assign Call For Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_quantile",
    "source_code": "def _quantile(self, qs: npt.NDArray[np.float64], interpolation: str) -> BaseMaskedArray:\n    res = quantile_with_mask(self._data, mask=self._mask, fill_value=np.nan, qs=qs, interpolation=interpolation)\n    if self._hasna:\n        if self.ndim == 2:\n            raise NotImplementedError\n        if self.isna().all():\n            out_mask = np.ones(res.shape, dtype=bool)\n            if is_integer_dtype(self.dtype):\n                res = np.zeros(res.shape, dtype=self.dtype.numpy_dtype)\n        else:\n            out_mask = np.zeros(res.shape, dtype=bool)\n    else:\n        out_mask = np.zeros(res.shape, dtype=bool)\n    return self._maybe_mask_result(res, mask=out_mask)",
    "docstring": "Dispatch to quantile_with_mask, needed because we do not have _from_factorized. Notes ----- We assume that all impacted cases are 1D-only.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\masked.py",
    "ast_data": "FunctionDef name:_quantile arg:self arg:qs arg:interpolation arguments arg arg arg Assign Call If If Compare Raise If Call Call Assign Call If Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "SqlDatasetV1",
    "source_code": "@tf_export(v1=['data.experimental.SqlDataset'])\nclass SqlDatasetV1(dataset_ops.DatasetV1Adapter):\n\n    @functools.wraps(SqlDatasetV2.__init__)\n    def __init__(self, driver_name, data_source_name, query, output_types):\n        wrapped = SqlDatasetV2(driver_name, data_source_name, query, output_types)\n        super(SqlDatasetV1, self).__init__(wrapped)",
    "docstring": "A consisting of the results from a SQL query.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\readers.py",
    "ast_data": "ClassDef name:SqlDatasetV1 FunctionDef name:__init__ arg:self arg:driver_name arg:data_source_name arg:query arg:output_types arguments arg arg arg arg arg Assign Call Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "contiguous_regions",
    "source_code": "def contiguous_regions(mask):\n    mask = np.asarray(mask, dtype=bool)\n    if not mask.size:\n        return []\n    idx, = np.nonzero(mask[:-1] != mask[1:])\n    idx += 1\n    idx = idx.tolist()\n    if mask[0]:\n        idx = [0] + idx\n    if mask[-1]:\n        idx.append(len(mask))\n    return list(zip(idx[::2], idx[1::2]))",
    "docstring": "Return a list of (ind0, ind1) such that `` is True and we cover all such regions.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\cbook.py",
    "ast_data": "FunctionDef name:contiguous_regions arg:mask arguments arg Assign Call If Return return:no Assign Call Compare Assign Call If Assign If Call Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "matrix_normal_frozen",
    "source_code": "class matrix_normal_frozen(multi_rv_frozen):\n\n    def __init__(self, mean=None, rowcov=1, colcov=1, seed=None):\n        self._dist = matrix_normal_gen(seed)\n        self.dims, self.mean, self.rowcov, self.colcov = self._dist._process_parameters(mean, rowcov, colcov)\n        self.rowpsd = _PSD(self.rowcov, allow_singular=False)\n        self.colpsd = _PSD(self.colcov, allow_singular=False)\n\n    def logpdf(self, X):\n        X = self._dist._process_quantiles(X, self.dims)\n        out = self._dist._logpdf(self.dims, X, self.mean, self.rowpsd.U, self.rowpsd.log_pdet, self.colpsd.U, self.colpsd.log_pdet)\n        return _squeeze_output(out)\n\n    def pdf(self, X):\n        return np.exp(self.logpdf(X))\n\n    def rvs(self, size=1, random_state=None):\n        return self._dist.rvs(self.mean, self.rowcov, self.colcov, size, random_state)\n\n    def entropy(self):\n        return self._dist._entropy(self.dims, self.rowpsd.log_pdet, self.colpsd.log_pdet)",
    "docstring": "Create a frozen matrix normal distribution. Parameters ---------- %(_matnorm_doc_default_callparams)s seed : {None, int, , }, optional If is the singleton is used. If is an int, a new `seedNone`. Examples -------- >>> import numpy as np >>> from scipy.stats import matrix_normal >>> distn = matrix_normal(mean=np.zeros((3,3))) >>> X = distn.rvs(); X array([[-0.02976962, 0.93339138, -0.09663178], [ 0.67405524, 0.28250467, -0.93308929], [-0.31144782, 0.74535536, 1.30412916]]) >>> distn.pdf(X) 2.5160642368346784e-05 >>> distn.logpdf(X) -10.590229595124615",
    "type": "class",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "ClassDef name:matrix_normal_frozen FunctionDef name:__init__ arg:self arg:mean arg:rowcov arg:colcov arg:seed arguments arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call FunctionDef name:logpdf arg:self arg:X arguments arg arg Assign Call Assign Call Return return:yes Call FunctionDef name:pdf arg:self arg:X arguments arg arg Return return:yes Call Call FunctionDef name:rvs arg:self arg:size arg:random_state arguments arg arg arg Return return:yes Call FunctionDef name:entropy arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "name",
    "source_code": "@property\ndef name(self):\n    if context.executing_eagerly():\n        return self._name\n    return self._barrier_ref.op.name",
    "docstring": "The name of the underlying barrier.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg If Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "movedim",
    "source_code": "def movedim(input: TensorLikeType, source: Union[int, DimsSequenceType], destination: Union[int, DimsSequenceType]) -> TensorLikeType:\n    if type(source) is int:\n        source = (source,)\n    if type(destination) is int:\n        destination = (destination,)\n    torch._check(len(source) == len(destination), lambda: f'movedim: Invalid source or destination dims: source ({list(source)} dims) should contain the same number of dims as destination ({list(destination)} dims)')\n    rank = input.ndim\n    ss = tuple(utils.canonicalize_dims(rank=rank, indices=source))\n    ds = tuple(utils.canonicalize_dims(rank=rank, indices=destination))\n    sss = set(ss)\n    dss = set(ds)\n    torch._check(len(ss) == len(sss), lambda: f'movedim: repeated dim in `source` ({list(source)})')\n    torch._check(len(ds) == len(dss), lambda: f'movedim: repeated dim in `destination` ({list(destination)})')\n    m = dict(zip(ds, ss))\n    dims = []\n    si = 0\n    for di in range(rank):\n        s = m.get(di)\n        if s is not None:\n            dims.append(s)\n        else:\n            while si in sss:\n                si += 1\n            dims.append(si)\n            si += 1\n    result = torch.permute(input, tuple(dims))\n    return result",
    "docstring": "Reference implementation of torch.movedim",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\__init__.py",
    "ast_data": "FunctionDef name:movedim arg:input arg:source arg:destination arguments arg arg arg If Compare Call Assign If Compare Call Assign Call Compare Call Call arguments Call Call Assign Assign Call Call Assign Call Call Assign Call Assign Call Call Compare Call Call arguments Call Call Compare Call Call arguments Call Assign Call Call Assign Assign For Call Assign Call If Compare Call While Compare Call Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "add_gridspec",
    "source_code": "def add_gridspec(self, nrows=1, ncols=1, **kwargs):\n    _ = kwargs.pop('figure', None)\n    gs = GridSpec(nrows=nrows, ncols=ncols, figure=self, **kwargs)\n    return gs",
    "docstring": "Low-level API for creating a that has this figure as a parent. This is a low-level API, allowing you to create a gridspec and subsequently add subplots based on the gridspec. Most users do not need that freedom and should use the higher-level methods or . Parameters ---------- nrows : int, default: 1 Number of rows in grid. ncols : int, default: 1 Number of columns in grid. Returns ------- Other Parameters ---------------- **kwargs Keyword arguments are passed to . See Also -------- matplotlib.pyplot.subplots Examples -------- Adding a subplot that spans two rows:: fig = plt.figure() gs = fig.add_gridspec(2, 2) ax1 = fig.add_subplot(gs[0, 0]) ax2 = fig.add_subplot(gs[1, 0]) # spans two rows: ax3 = fig.add_subplot(gs[:, 1])",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:add_gridspec arg:self arg:nrows arg:ncols arguments arg arg arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "update_last_triggered_step",
    "source_code": "def update_last_triggered_step(self, step):\n    raise NotImplementedError",
    "docstring": "Update the last triggered time and step number. Args: step: The current step. Returns: A pair , where is the number of seconds between the current trigger and the last one (a float), and is the number of steps between the current trigger and the last one. Both values will be set to on the first trigger.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\basic_session_run_hooks.py",
    "ast_data": "FunctionDef name:update_last_triggered_step arg:self arg:step arguments arg arg Raise"
  },
  {
    "library": "pytorch",
    "name": "try_end_curr_recording",
    "source_code": "def try_end_curr_recording(self, function_id: FunctionID) -> None:\n    assert self.in_recording\n    assert self.current_node is not None\n    if self.can_start_new_generation():\n        self.dealloc_current_path_weakrefs()\n        self.clear_current_path_state_and_set_to_none()\n        return\n    if self.current_node.all_outputs_are_dead():\n        self.clear_current_path_state_and_set_to_none()\n        return\n    self.check_warn_on_unable_to_start_executing(function_id)",
    "docstring": "Check if the current recording can be terminated, either because all outputs of the previously recorded node are dead or because it was executed in a different generation. Will set current_node to None and in_recording to False if successful.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py",
    "ast_data": "FunctionDef name:try_end_curr_recording arg:self arg:function_id arguments arg arg Compare If Call Call Call Return return:no If Call Call Return return:no Call"
  },
  {
    "library": "scipy",
    "name": "Mishra07",
    "source_code": "class Mishra07(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n        self.custom_bounds = [(-2, 2), (-2, 2)]\n        self.global_optimum = [[sqrt(self.N) for i in range(self.N)]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return (prod(x) - factorial(self.N)) ** 2.0",
    "docstring": "Mishra 7 objective function. This class defines the Mishra 7 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Mishra07}}(x) = \\left [\\prod_{i=1}^{n} x_i - n! \\right]^2 Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_M.py",
    "ast_data": "ClassDef name:Mishra07 Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Call Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "time_to_readable_str",
    "source_code": "def time_to_readable_str(value_us, force_time_unit=None):\n    if not value_us:\n        return '0'\n    if force_time_unit:\n        if force_time_unit not in TIME_UNITS:\n            raise ValueError('Invalid time unit: %s' % force_time_unit)\n        order = TIME_UNITS.index(force_time_unit)\n        time_unit = force_time_unit\n        return '{:.10g}{}'.format(value_us / math.pow(10.0, 3 * order), time_unit)\n    else:\n        order = min(len(TIME_UNITS) - 1, int(math.log(value_us, 10) / 3))\n        time_unit = TIME_UNITS[order]\n        return '{:.3g}{}'.format(value_us / math.pow(10.0, 3 * order), time_unit)",
    "docstring": "Convert time value to human-readable string. Args: value_us: time value in microseconds. force_time_unit: force the output to use the specified time unit. Must be in TIME_UNITS. Returns: Human-readable string representation of the time value. Raises: ValueError: if force_time_unit value is not in TIME_UNITS.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\cli_shared.py",
    "ast_data": "FunctionDef name:time_to_readable_str arg:value_us arg:force_time_unit arguments arg arg If Return return:yes If If Compare Raise Call Assign Call Assign Return return:yes Call Call Assign Call Call Call Call Assign Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_test_mode",
    "source_code": "@contextmanager\ndef _test_mode() -> Generator[None, None, None]:\n    global _is_test_mode\n    prev = _is_test_mode\n    try:\n        _is_test_mode = True\n        yield\n    finally:\n        _is_test_mode = prev",
    "docstring": "Forces `` namespace to use fallback implementations. The context manager is not thread safe.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_symmetric_memory\\__init__.py",
    "ast_data": "FunctionDef name:_test_mode arguments Assign Try Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "initial_seed",
    "source_code": "def initial_seed() -> int:\n    return default_generator.initial_seed()",
    "docstring": "Returns the initial seed for generating random numbers as a Python . .. note:: The returned seed is for the default generator on CPU only.",
    "type": "function",
    "file_path": "pytorch\\torch\\random.py",
    "ast_data": "FunctionDef name:initial_seed arguments Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "make_function_type",
    "source_code": "def make_function_type(python_function, input_signature):\n    _validate_signature(input_signature)\n    function_type = function_type_lib.FunctionType.from_callable(python_function)\n    default_values = function_type_lib.FunctionType.get_default_values(python_function)\n    if input_signature is not None:\n        input_signature = tuple(input_signature)\n        function_type = function_type_lib.add_type_constraints(function_type, input_signature, default_values)\n    return (function_type, default_values)",
    "docstring": "Generates a FunctionType for python_function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\function_type_utils.py",
    "ast_data": "FunctionDef name:make_function_type arg:python_function arg:input_signature arguments arg arg Call Assign Call Assign Call If Compare Assign Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "firstof",
    "source_code": "@register.tag\ndef firstof(parser, token):\n    bits = token.split_contents()[1:]\n    asvar = None\n    if not bits:\n        raise TemplateSyntaxError(\"'firstof' statement requires at least one argument\")\n    if len(bits) >= 2 and bits[-2] == 'as':\n        asvar = bits[-1]\n        bits = bits[:-2]\n    return FirstOfNode([parser.compile_filter(bit) for bit in bits], asvar)",
    "docstring": "Output the first variable passed that is not False. Output nothing if all the passed variables are False. Sample usage:: {% firstof var1 var2 var3 as myvar %} This is equivalent to:: {% if var1 %} {{ var1 }} {% elif var2 %} {{ var2 }} {% elif var3 %} {{ var3 }} {% endif %} but much cleaner! You can also use a literal string as a fallback value in case all passed variables are False:: {% firstof var1 var2 var3 \"fallback value\" %} If you want to disable auto-escaping of variables you can use:: {% autoescape off %} {% firstof var1 var2 var3 \"fallback value\" %} {% autoescape %} Or if only some variables should be escaped, you can use:: {% firstof var1 var2|safe var3 \"fallback value\"|safe %}",
    "type": "function",
    "file_path": "django\\django\\template\\defaulttags.py",
    "ast_data": "FunctionDef name:firstof arg:parser arg:token arguments arg arg Assign Call Assign If Raise Call If BoolOp Compare Call Compare Assign Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_weights",
    "source_code": "def get_weights(self):\n    strategy = self._distribution_strategy or self._compile_time_distribution_strategy\n    if strategy:\n        with strategy.scope():\n            return base_layer.Layer.get_weights(self)\n    return base_layer.Layer.get_weights(self)",
    "docstring": "Retrieves the weights of the model. Returns: A flat list of Numpy arrays.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py",
    "ast_data": "FunctionDef name:get_weights arg:self arguments arg Assign BoolOp If With Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "stop_gradient",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef stop_gradient(variables):\n    if isinstance(variables, (list, tuple)):\n        return map(array_ops.stop_gradient, variables)\n    return array_ops.stop_gradient(variables)",
    "docstring": "Returns but with zero gradient w.r.t. every other variable. Args: variables: Tensor or list of tensors to consider constant with respect to any other variable. Returns: A single tensor or a list of tensors (depending on the passed argument) that has no gradient with respect to any other variable.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:stop_gradient arg:variables arguments arg If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "estimate_spectral_norm_diff",
    "source_code": "def estimate_spectral_norm_diff(A, B, its=20, rng=None):\n    from scipy.sparse.linalg import aslinearoperator\n    rng = np.random.default_rng(rng)\n    A = aslinearoperator(A)\n    B = aslinearoperator(B)\n    if _is_real(A):\n        return _backend.idd_diffsnorm(A, B, its=its, rng=rng)\n    else:\n        return _backend.idz_diffsnorm(A, B, its=its, rng=rng)",
    "docstring": "Estimate spectral norm of the difference of two matrices by the randomized power method. .. This function automatically detects the matrix data type and calls the appropriate backend. For details, see :func: and :func:. Parameters ---------- A : :class: First matrix given as a :class: with the and methods (to apply the matrix and its adjoint). B : :class: Second matrix given as a :class: with the and methods (to apply the matrix and its adjoint). its : int, optional Number of power method iterations. rng : , optional Pseudorandom number generator state. When is None, a new is created using entropy from the operating system. Types other than are passed to to instantiate a `rand`, the argument is ignored. Returns ------- float Spectral norm estimate of matrix difference.",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\interpolative.py",
    "ast_data": "FunctionDef name:estimate_spectral_norm_diff arg:A arg:B arg:its arg:rng arguments arg arg arg arg Assign Call Assign Call Assign Call If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, fp, headers, boundary):\n    Entity.__init__(self, fp, headers)\n    self.boundary = boundary\n    self.file = None\n    self.value = None",
    "docstring": "Initialize an entity part.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpreqbody.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:fp arg:headers arg:boundary arguments arg arg arg arg Call Assign Assign Assign"
  },
  {
    "library": "django",
    "name": "disable_action",
    "source_code": "def disable_action(self, name):\n    del self._actions[name]",
    "docstring": "Disable a globally-registered action. Raise KeyError for invalid names.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\sites.py",
    "ast_data": "FunctionDef name:disable_action arg:self arg:name arguments arg arg"
  },
  {
    "library": "cherrypy",
    "name": "get_ha1_dict_plain",
    "source_code": "def get_ha1_dict_plain(user_password_dict):\n\n    def get_ha1(realm, username):\n        password = user_password_dict.get(username)\n        if password:\n            return md5_hex('%s:%s:%s' % (username, realm, password))\n        return None\n    return get_ha1",
    "docstring": "Return a get_ha1 function which obtains a plaintext password. user_password_dict is a dictionary of the form: {username : password}. If you want a simple dictionary-based authentication scheme, with plaintext passwords, use get_ha1_dict_plain(my_userpass_dict) as the value for the get_ha1 argument to digest_auth().",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\auth_digest.py",
    "ast_data": "FunctionDef name:get_ha1_dict_plain arg:user_password_dict arguments arg FunctionDef name:get_ha1 arg:realm arg:username arguments arg arg Assign Call If Return return:yes Call Return return:no Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "deserialize",
    "source_code": "def deserialize(self, stamp_token, serialized_proto):\n    return gen_boosted_trees_ops.boosted_trees_deserialize_ensemble(self.resource_handle, stamp_token, serialized_proto)",
    "docstring": "Deserialize the input proto and resets the ensemble from it. Args: stamp_token: int64 scalar Tensor to denote the stamp of the resource. serialized_proto: string scalar Tensor of the serialized proto. Returns: Operation (for dependencies).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\boosted_trees_ops.py",
    "ast_data": "FunctionDef name:deserialize arg:self arg:stamp_token arg:serialized_proto arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "MergeKwargsIntoArgsInputStep",
    "source_code": "class MergeKwargsIntoArgsInputStep(InputAdaptStep):\n\n    def apply(self, model_args: Sequence[Any], model_kwargs: Mapping[str, Any], model: torch.nn.Module | Callable | torch_export.ExportedProgram | None=None) -> tuple[Sequence[Any], Mapping[str, Any]]:\n        return (tuple(model_args) + tuple(model_kwargs.values()), {})",
    "docstring": "Merge the input kwargs into the input args.",
    "type": "class",
    "file_path": "pytorch\\torch\\onnx\\_internal\\io_adapter.py",
    "ast_data": "ClassDef name:MergeKwargsIntoArgsInputStep FunctionDef name:apply arg:self arg:model_args arg:model_kwargs arg:model arguments arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "__init__",
    "source_code": "def __init__(self, klass, field, load_func=None):\n    self._klass = klass\n    self._load_func = load_func or klass\n    super().__init__(field)",
    "docstring": "Initialize on the given Geometry or Raster class (not an instance) and the corresponding field.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\models\\proxy.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:klass arg:field arg:load_func arguments arg arg arg arg Assign Assign BoolOp Call Call"
  },
  {
    "library": "pytorch",
    "name": "default_matching",
    "source_code": "@compatibility(is_backward_compatible=False)\ndef default_matching(name: str, target_version: int) -> str:\n    return name",
    "docstring": "Default matching method",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\passes\\param_fetch.py",
    "ast_data": "FunctionDef name:default_matching arg:name arg:target_version arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "read",
    "source_code": "def read(self, n=-1):\n    self._preread_check()\n    if n == -1:\n        length = self.size() - self.tell()\n    else:\n        length = n\n    return self._prepare_value(self._read_buf.read(length))",
    "docstring": "Returns the contents of a file as a string. Starts reading from current position in file. Args: n: Read bytes if . If , reads to end of file. Returns: bytes of the file (or whole file) in bytes mode or bytes of the string if in string (regular) mode.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py",
    "ast_data": "FunctionDef name:read arg:self arg:n arguments arg arg Call If Compare Assign Call Call Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_with_precomputed_nvals",
    "source_code": "def _with_precomputed_nvals(self):\n    return RowPartition(row_splits=self.row_splits(), row_lengths=self._row_lengths, value_rowids=self._value_rowids, nrows=self._nrows, nvals=self.nvals(), uniform_row_length=self._uniform_row_length, internal=_row_partition_factory_key)",
    "docstring": "Returns a copy of with precomputed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py",
    "ast_data": "FunctionDef name:_with_precomputed_nvals arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "set_matlab_compatible",
    "source_code": "def set_matlab_compatible(self):\n    self.mat_dtype = True\n    self.squeeze_me = False\n    self.chars_as_strings = False",
    "docstring": "Sets options to return arrays as MATLAB loads them",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\matlab\\_miobase.py",
    "ast_data": "FunctionDef name:set_matlab_compatible arg:self arguments arg Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "find_function_to_export",
    "source_code": "def find_function_to_export(saveable_view):\n    children = saveable_view.list_children(saveable_view.root)\n    possible_signatures = []\n    for name, child in children:\n        if not isinstance(child, (def_function.Function, defun.ConcreteFunction)):\n            continue\n        if name == DEFAULT_SIGNATURE_ATTR:\n            return child\n        concrete = _get_signature(child)\n        if concrete is not None and _valid_signature(concrete):\n            possible_signatures.append(concrete)\n    if len(possible_signatures) == 1:\n        single_function = possible_signatures[0]\n        signature = _get_signature(single_function)\n        if signature and _valid_signature(signature):\n            return signature\n    return None",
    "docstring": "Function to export, None if no suitable function was found.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\signature_serialization.py",
    "ast_data": "FunctionDef name:find_function_to_export arg:saveable_view arguments arg Assign Call Assign For If Call If Compare Return return:yes Assign Call If BoolOp Compare Call Call If Compare Call Assign Assign Call If BoolOp Call Return return:yes Return return:no"
  },
  {
    "library": "cherrypy",
    "name": "set_response",
    "source_code": "def set_response(self):\n    response = cherrypy.serving.response\n    clean_headers(self.code)\n    response.status = self.status\n    tb = None\n    if cherrypy.serving.request.show_tracebacks:\n        tb = format_exc()\n    response.headers.pop('Content-Length', None)\n    content = self.get_error_page(self.status, traceback=tb, message=self._message)\n    response.body = content\n    _be_ie_unfriendly(self.code)",
    "docstring": "Modify ``. Modifies status, headers, and body. CherryPy uses this internally, but you can also use it to create an HTTPError object and set its output without *raising* the exception.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cperror.py",
    "ast_data": "FunctionDef name:set_response arg:self arguments arg Assign Call Assign Assign If Assign Call Call Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_write_profile",
    "source_code": "def _write_profile(self, filename):\n    print_mdl.WriteProfile(filename)",
    "docstring": "Writes the profile to a file.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\model_analyzer.py",
    "ast_data": "FunctionDef name:_write_profile arg:self arg:filename arguments arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "LSTM",
    "source_code": "class LSTM(torch.ao.nn.quantizable.LSTM):\n    _FLOAT_MODULE = torch.ao.nn.quantizable.LSTM\n\n    def _get_name(self):\n        return 'QuantizedLSTM'\n\n    @classmethod\n    def from_float(cls, *args, **kwargs):\n        raise NotImplementedError('It looks like you are trying to convert a non-observed LSTM module. Please, see the examples on quantizable LSTMs.')\n\n    @classmethod\n    def from_observed(cls, other):\n        assert isinstance(other, cls._FLOAT_MODULE)\n        converted = torch.ao.quantization.convert(other, inplace=False, remove_qconfig=True)\n        converted.__class__ = cls\n        return converted",
    "docstring": "A quantized long short-term memory (LSTM). For the description and the argument types, please, refer to :class: Attributes: layers : instances of the .. note:: To access the weights and biases, you need to access them per layer. See examples in :class: Examples:: >>> # xdoctest: +SKIP >>> custom_module_config = { ... 'float_to_observed_custom_module_class': { ... nn.LSTM: nn.quantizable.LSTM, ... }, ... 'observed_to_quantized_custom_module_class': { ... nn.quantizable.LSTM: nn.quantized.LSTM, ... } ... } >>> tq.prepare(model, prepare_custom_module_class=custom_module_config) >>> tq.convert(model, convert_custom_module_class=custom_module_config)",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\modules\\rnn.py",
    "ast_data": "ClassDef name:LSTM Assign FunctionDef name:_get_name arg:self arguments arg Return return:yes FunctionDef name:from_float arg:cls arguments arg arg arg Raise Call FunctionDef name:from_observed arg:cls arg:other arguments arg arg Call Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "enumerate",
    "source_code": "def enumerate(self, start=0, name=None) -> 'DatasetV2':\n    max_value = np.iinfo(dtypes.int64.as_numpy_dtype).max\n    range_dataset = Dataset.range(start, max_value, name=name)\n    range_dataset = _apply_rewrite(range_dataset, 'replicate_on_split')\n    return Dataset.zip((range_dataset, self), name=name)",
    "docstring": "Enumerates the elements of this dataset. It is similar to python's . >>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3]) >>> dataset = dataset.enumerate(start=5) >>> for pos, element in dataset.as_numpy_iterator(): ... print(tuple((pos.item(), element.item()))) (5, 1) (6, 2) (7, 3) >>> # The (nested) structure of the input dataset determines the >>> # structure of elements in the resulting dataset. >>> dataset = tf.data.Dataset.from_tensor_slices([(7, 8), (9, 10)]) >>> dataset = dataset.enumerate() >>> for pos, element in dataset.as_numpy_iterator(): ... print(tuple((pos.item(), element))) (0, array([7, 8], dtype=int32)) (1, array([ 9, 10], dtype=int32)) Args: start: A scalar , representing the start value for enumeration. name: Optional. A name for the tf.data operations used by . Returns: A new with the transformation applied as described above.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:enumerate arg:self arg:start arg:name arguments arg arg arg Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_url",
    "source_code": "def set_url(self, url):\n    self._url = url",
    "docstring": "Set the url for links in compatible backends.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:set_url arg:self arg:url arguments arg arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "get_func_graph_output",
    "source_code": "def get_func_graph_output(t):\n    for output in tensor.graph.outputs:\n        if output is t:\n            return t\n    identity_op = t.consumers()[0]\n    if identity_op.type == 'Identity' and any((identity_op.outputs[0] is t for t in tensor.graph.outputs)):\n        return identity_op.outputs[0]\n    return None",
    "docstring": "Returns t or Identity(t) whichever exists in graph outputs else None.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\while_v2.py",
    "ast_data": "FunctionDef name:get_func_graph_output arg:t arguments arg For If Compare Return return:yes Assign Call If BoolOp Compare Call Compare Return return:yes Return return:no"
  },
  {
    "library": "django",
    "name": "count",
    "source_code": "def count(self, val):\n    count = 0\n    for i in self:\n        if val == i:\n            count += 1\n    return count",
    "docstring": "Standard list count method",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\mutable_list.py",
    "ast_data": "FunctionDef name:count arg:self arg:val arguments arg arg Assign For If Compare Return return:yes"
  },
  {
    "library": "scipy",
    "name": "Exponential",
    "source_code": "class Exponential(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-1.0] * self.N, [1.0] * self.N))\n        self.global_optimum = [[0.0 for _ in range(self.N)]]\n        self.fglob = -1.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return -exp(-0.5 * sum(x ** 2.0))",
    "docstring": "Exponential [1] objective function. This class defines the Exponential global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Exponential}}(x) = -e^{-0.5 \\sum_{i=1}^n x_i^2} Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO Jamil are missing a minus sign on fglob",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_E.py",
    "ast_data": "ClassDef name:Exponential Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "flag",
    "source_code": "def flag() -> None:\n    set_cmap('flag')",
    "docstring": "Set the colormap to 'flag'. This changes the default colormap as well as the colormap of the current image if there is one. See `` for more information.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:flag arguments Call"
  },
  {
    "library": "pytorch",
    "name": "load_state_dict",
    "source_code": "@override\ndef load_state_dict(self, state_dict: dict[str, Any]) -> None:\n    _schedulers = state_dict.pop('_schedulers')\n    self.__dict__.update(state_dict)\n    state_dict['_schedulers'] = _schedulers\n    for idx, s in enumerate(_schedulers):\n        self._schedulers[idx].load_state_dict(s)",
    "docstring": "Load the scheduler's state. Args: state_dict (dict): scheduler state. Should be an object returned from a call to :meth:.",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\lr_scheduler.py",
    "ast_data": "FunctionDef name:load_state_dict arg:self arg:state_dict arguments arg arg Assign Call Call Assign For Call Call"
  },
  {
    "library": "scipy",
    "name": "FortranEOFError",
    "source_code": "class FortranEOFError(TypeError, OSError):\n    pass",
    "docstring": "Indicates that the file ended properly. This error descends from TypeError because the code used to raise TypeError (and this was the only way to know that the file had ended) so users might have ``.",
    "type": "class",
    "file_path": "scipy\\scipy\\io\\_fortran.py",
    "ast_data": "ClassDef name:FortranEOFError"
  },
  {
    "library": "tensorflow",
    "name": "CompositeTensorGradientProtocol",
    "source_code": "@runtime_checkable\nclass CompositeTensorGradientProtocol(Protocol):\n    __composite_gradient__: CompositeTensorGradient",
    "docstring": "Protocol for adding gradient support to CompositeTensors.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\composite_tensor_gradient.py",
    "ast_data": "ClassDef name:CompositeTensorGradientProtocol"
  },
  {
    "library": "pytorch",
    "name": "maybe_disable_thunkify",
    "source_code": "@contextmanager\ndef maybe_disable_thunkify() -> Generator[None, None, None]:\n    proxy_mode = get_proxy_mode()\n    if proxy_mode is not None:\n        with _enable_thunkify(proxy_mode.tracer, enable=False):\n            yield\n    else:\n        yield",
    "docstring": "Within a context, disable thunkification. See :func: for more details. This is helpful if you have a wrapper function which you want to enable thunkification on, but in some segment on the inside (say, the original user function), you want to disable thunkification as you know it is not needed there.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\proxy_tensor.py",
    "ast_data": "FunctionDef name:maybe_disable_thunkify arguments Assign Call If Compare With Call"
  },
  {
    "library": "pandas",
    "name": "_gotitem",
    "source_code": "def _gotitem(self, key, ndim: int, subset=None):\n    grouper = self._grouper\n    if subset is None:\n        subset = self.obj\n        if key is not None:\n            subset = subset[key]\n        else:\n            assert subset.ndim == 1\n    if ndim == 1:\n        assert subset.ndim == 1\n    grouped = get_groupby(subset, by=None, grouper=grouper, group_keys=self.group_keys)\n    return grouped",
    "docstring": "Sub-classes to define. Return a sliced object. Parameters ---------- key : string / list of selections ndim : {1, 2} requested ndim of result subset : object, default None subset to act on",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\resample.py",
    "ast_data": "FunctionDef name:_gotitem arg:self arg:key arg:ndim arg:subset arguments arg arg arg arg Assign If Compare Assign If Compare Assign Compare If Compare Compare Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "to_mask",
    "source_code": "def to_mask(self, height: int, width: int) -> torch.Tensor:\n    if self._data.requires_grad:\n        raise RuntimeError(\"Boxes.to_tensor isn't differentiable. Please, create boxes from tensors with `requires_grad=False`.\")\n    if self._is_batched:\n        mask = torch.zeros((self._data.shape[0], self._data.shape[1], height, width), dtype=self.dtype, device=self.device)\n    else:\n        mask = zeros((self._data.shape[0], height, width), dtype=self.dtype, device=self.device)\n    clipped_boxes_xyxy = cast(torch.Tensor, self.to_tensor('xyxy', as_padded_sequence=True))\n    clipped_boxes_xyxy[..., ::2].clamp_(0, width)\n    clipped_boxes_xyxy[..., 1::2].clamp_(0, height)\n    for mask_channel, box_xyxy in zip(mask.view(-1, height, width), clipped_boxes_xyxy.view(-1, 4).round().int()):\n        mask_channel[box_xyxy[1]:box_xyxy[3], box_xyxy[0]:box_xyxy[2]] = 1\n    return mask",
    "docstring": "Convert 2D boxes to masks. Covered area is 1 and the remaining is 0. Args: height: height of the masked image/images. width: width of the masked image/images. Returns: the output mask tensor, shape of :math: or :math: and dtype of :func: (it can be any floating point dtype). Note: It is currently non-differentiable. Examples: >>> boxes = Boxes(torch.tensor([[ # Equivalent to boxes = Boxes.from_tensor([[1,1,4,3]]) ... [1., 1.], ... [4., 1.], ... [4., 3.], ... [1., 3.], ... ]])) # 1x4x2 >>> boxes.to_mask(5, 5) tensor([[[0., 0., 0., 0., 0.], [0., 1., 1., 1., 1.], [0., 1., 1., 1., 1.], [0., 1., 1., 1., 1.], [0., 0., 0., 0., 0.]]])",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\boxes.py",
    "ast_data": "FunctionDef name:to_mask arg:self arg:height arg:width arguments arg arg arg If Raise Call If Assign Call Assign Call Assign Call Call Call Call For Call Call Call Call Call Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "to_zpk",
    "source_code": "def to_zpk(self):\n    return ZerosPolesGain(*tf2zpk(self.num, self.den), **self._dt_dict)",
    "docstring": "Convert system representation to . Returns ------- sys : instance of Zeros, poles, gain representation of the current system",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:to_zpk arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "register_loss_scale_wrapper",
    "source_code": "@tf_export('__internal__.mixed_precision.register_loss_scale_wrapper', v1=[])\ndef register_loss_scale_wrapper(optimizer_cls, wrapper_fn, wrapper_cls=None):\n    _REGISTERED_WRAPPER_OPTIMIZER_CLS[optimizer_cls] = (wrapper_fn, wrapper_cls or wrapper_fn)",
    "docstring": "Registers a loss scale optimizer wrapper. automatically wraps an optimizer with an optimizer wrapper that performs loss scaling. This function registers a triple that is used by , where is called to create a instance that wraps an instance. Args: optimizer_cls: A base optimizer class, e.g. . wrapper_fn: A function that takes in arguments \"optimizer\" and \"loss_scale\", and returns a loss scale optimizer of type \"wrapper_cls\" that wraps \"optimizer\". wrapper_cls: A loss scale optimizer class. Defaults to , in which case should be a loss scale optimizer class whose constructor takes in arguments \"optimizer\" and \"loss_scale\".",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\experimental\\mixed_precision.py",
    "ast_data": "FunctionDef name:register_loss_scale_wrapper arg:optimizer_cls arg:wrapper_fn arg:wrapper_cls arguments arg arg arg Assign BoolOp Call"
  },
  {
    "library": "django",
    "name": "generate_altered_options",
    "source_code": "def generate_altered_options(self):\n    models_to_check = self.kept_model_keys.union(self.kept_proxy_keys, self.kept_unmanaged_keys, self.old_unmanaged_keys & self.new_model_keys, self.old_model_keys & self.new_unmanaged_keys)\n    for app_label, model_name in sorted(models_to_check):\n        old_model_name = self.renamed_models.get((app_label, model_name), model_name)\n        old_model_state = self.from_state.models[app_label, old_model_name]\n        new_model_state = self.to_state.models[app_label, model_name]\n        old_options = {key: value for key, value in old_model_state.options.items() if key in AlterModelOptions.ALTER_OPTION_KEYS}\n        new_options = {key: value for key, value in new_model_state.options.items() if key in AlterModelOptions.ALTER_OPTION_KEYS}\n        if old_options != new_options:\n            self.add_operation(app_label, operations.AlterModelOptions(name=model_name, options=new_options))",
    "docstring": "Work out if any non-schema-affecting options have changed and make an operation to represent them in state changes (in case Python code in migrations needs them).",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\autodetector.py",
    "ast_data": "FunctionDef name:generate_altered_options arg:self arguments arg Assign Call For Call Assign Call Assign Assign Assign Call Compare Assign Call Compare If Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "_iterate_state_dict",
    "source_code": "def _iterate_state_dict(iter_object: Any, dtensor_func: Callable, sharded_tensor_func: Callable, tensor_func: Callable):\n    if isinstance(iter_object, DTensor):\n        return dtensor_func(iter_object)\n    elif isinstance(iter_object, ShardedTensor):\n        return sharded_tensor_func(iter_object)\n    elif isinstance(iter_object, torch.Tensor):\n        return tensor_func(iter_object)\n    elif isinstance(iter_object, (int, float, str, bytes, io.BytesIO)) or iter_object is None:\n        return iter_object\n    elif isinstance(iter_object, dict):\n        for key, value in iter_object.items():\n            iter_object[key] = _iterate_state_dict(value, dtensor_func, sharded_tensor_func, tensor_func)\n        return iter_object\n    elif isinstance(iter_object, (list, tuple)):\n        ret = [_iterate_state_dict(v, dtensor_func, sharded_tensor_func, tensor_func) for v in iter_object]\n        if isinstance(iter_object, tuple):\n            ret = tuple(ret)\n        return ret",
    "docstring": "Iterate through the state dict, applying the given functions to each tensor type and update the state dict in place. Args: iter_object (Any): the target state_dict. sharded_tensor_func (Callable): the function to apply to ShardedTensor dtensor_func (Callable): the function to apply to DTensor tensor_func (Callable): the function to apply to Tensor # TODO: let state_dict_util._iterate_state_dict() to support in place option so we don't need to have two versions of _iterate_state_dict.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\planner_helpers.py",
    "ast_data": "FunctionDef name:_iterate_state_dict arg:iter_object arg:dtensor_func arg:sharded_tensor_func arg:tensor_func arguments arg arg arg arg If Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Call If BoolOp Call Compare Return return:yes If Call For Call Assign Call Return return:yes If Call Assign Call If Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_update_losses",
    "source_code": "def _update_losses(self, stages, losses):\n    if not isinstance(stages, list):\n        stages = [stages]\n    contains_last_stage = any((stage.is_last for stage in stages))\n    if contains_last_stage and losses is not None:\n        if len(self._internal_losses) != self._n_microbatches:\n            raise RuntimeError(f'Expecting {self._n_microbatches} losses but got {len(self._internal_losses)}')\n        losses.clear()\n        losses.extend(self._internal_losses)\n    self._internal_losses.clear()",
    "docstring": "Update the losses to those in the internal state",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\schedules.py",
    "ast_data": "FunctionDef name:_update_losses arg:self arg:stages arg:losses arguments arg arg arg If Call Assign Assign Call If BoolOp Compare If Compare Call Raise Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "promote",
    "source_code": "def promote(self, source_path, new_name):\n    if not isinstance(new_name, str):\n        raise ValueError('new_name is not a string')\n    if not isinstance(source_path, (list, tuple)):\n        raise ValueError('source_path must be a list or tuple')\n    if len(source_path) < 2:\n        raise ValueError('source_path must have length at least two')\n    grandparent_path = source_path[:-2]\n    new_field = self._promote_helper(source_path, grandparent_path)\n    new_path = grandparent_path + (new_name,)\n    return self.with_updates({new_path: new_field})",
    "docstring": "Promotes a field, merging dimensions between grandparent and parent. >>> d = [ ... {'docs': [{'tokens':[1, 2]}, {'tokens':[3]}]}, ... {'docs': [{'tokens':[7]}]}] >>> st = tf.experimental.StructuredTensor.from_pyval(d) >>> st2 =st.promote(('docs','tokens'), 'docs_tokens') >>> st2[0]['docs_tokens'] >>> st2[1]['docs_tokens'] Args: source_path: the path of the field or substructure to promote; must have length at least 2. new_name: the name of the new field (must be a string). Returns: a modified structured tensor with the new field as a child of the grandparent of the source_path. Raises: ValueError: if source_path is not a list or a tuple or has a length less than two, or new_name is not a string, or the rank of source_path is unknown and it is needed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor.py",
    "ast_data": "FunctionDef name:promote arg:self arg:source_path arg:new_name arguments arg arg arg If Call Raise Call If Call Raise Call If Compare Call Raise Call Assign Assign Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "parents",
    "source_code": "@abc.abstractproperty\ndef parents(self):\n    pass",
    "docstring": "Returns a list of immediate raw feature and FeatureColumn dependencies. For example: # For the following feature columns a = numeric_column('f1') c = crossed_column(a, 'f2') # The expected parents are: a.parents = ['f1'] c.parents = [a, 'f2']",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2_types.py",
    "ast_data": "FunctionDef name:parents arg:self arguments arg"
  },
  {
    "library": "numpy",
    "name": "_parse_possible_contraction",
    "source_code": "def _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, path_cost, naive_cost):\n    contract = _find_contraction(positions, input_sets, output_set)\n    idx_result, new_input_sets, idx_removed, idx_contract = contract\n    new_size = _compute_size_by_dict(idx_result, idx_dict)\n    if new_size > memory_limit:\n        return None\n    old_sizes = (_compute_size_by_dict(input_sets[p], idx_dict) for p in positions)\n    removed_size = sum(old_sizes) - new_size\n    cost = _flop_count(idx_contract, idx_removed, len(positions), idx_dict)\n    sort = (-removed_size, cost)\n    if path_cost + cost > naive_cost:\n        return None\n    return [sort, positions, new_input_sets]",
    "docstring": "Compute the cost (removed size + flops) and resultant indices for performing the contraction specified by ``. Parameters ---------- positions : tuple of int The locations of the proposed tensors to contract. input_sets : list of sets The indices found on each tensors. output_set : set The output indices of the expression. idx_dict : dict Mapping of each index to its size. memory_limit : int The total allowed size for an intermediary tensor. path_cost : int The contraction cost so far. naive_cost : int The cost of the unoptimized expression. Returns ------- cost : (int, int) A tuple containing the size of any indices removed, and the flop cost. positions : tuple of int The locations of the proposed tensors to contract. new_input_sets : list of sets The resulting new list of indices if this proposed contraction is performed.",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\einsumfunc.py",
    "ast_data": "FunctionDef name:_parse_possible_contraction arg:positions arg:input_sets arg:output_set arg:idx_dict arg:memory_limit arg:path_cost arg:naive_cost arguments arg arg arg arg arg arg arg Assign Call Assign Assign Call If Compare Return return:no Assign Call Assign Call Assign Call Call Assign If Compare Return return:no Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "OS",
    "source_code": "class OS(ArgparseableEnum):\n    LINUX = enum.auto()\n    DARWIN = enum.auto()\n    WINDOWS = enum.auto()",
    "docstring": "Modeled after the values returned by .",
    "type": "class",
    "file_path": "tensorflow\\third_party\\xla\\build_tools\\configure\\configure.py",
    "ast_data": "ClassDef name:OS Assign Call Assign Call Assign Call"
  },
  {
    "library": "scipy",
    "name": "define_parameters",
    "source_code": "def define_parameters(self, *parameters):\n    new_symbols = {param.name: param.symbol for param in parameters}\n    self.symbols.update(new_symbols)",
    "docstring": "Records any parameters used to define the endpoints of the domain. Adds the keyword name of each parameter and its text representation to the attribute as key:value pairs. For instance, a parameter may be passed into to a distribution's initializer using the keyword , and the corresponding string representation may be '\\log(a)'. To form the text representation of the domain for use in documentation, the _Domain object needs to map from the keyword name used in the code to the string representation. Returns None, but updates the attribute. Parameters ---------- *parameters : _Parameter objects Parameters that may define the endpoints of the domain.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distribution_infrastructure.py",
    "ast_data": "FunctionDef name:define_parameters arg:self arguments arg arg Assign Call"
  },
  {
    "library": "pandas",
    "name": "grouped_reduce",
    "source_code": "def grouped_reduce(self, func: Callable) -> Self:\n    result_blocks: list[Block] = []\n    for blk in self.blocks:\n        if blk.is_object:\n            for sb in blk._split():\n                applied = sb.apply(func)\n                result_blocks = extend_blocks(applied, result_blocks)\n        else:\n            applied = blk.apply(func)\n            result_blocks = extend_blocks(applied, result_blocks)\n    if len(result_blocks) == 0:\n        nrows = 0\n    else:\n        nrows = result_blocks[0].values.shape[-1]\n    index = default_index(nrows)\n    return type(self).from_blocks(result_blocks, [self.axes[0], index])",
    "docstring": "Apply grouped reduction function blockwise, returning a new BlockManager. Parameters ---------- func : grouped reduction function Returns ------- BlockManager",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:grouped_reduce arg:self arg:func arguments arg arg For If For Call Assign Call Assign Call Assign Call Assign Call If Compare Call Assign Assign Assign Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "__init__",
    "source_code": "def __init__(self, routers=None):\n    self._routers = routers",
    "docstring": "If routers is not specified, default to settings.DATABASE_ROUTERS.",
    "type": "method",
    "file_path": "django\\django\\db\\utils.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:routers arguments arg arg Assign"
  },
  {
    "library": "numpy",
    "name": "polymul",
    "source_code": "def polymul(c1, c2):\n    [c1, c2] = pu.as_series([c1, c2])\n    ret = np.convolve(c1, c2)\n    return pu.trimseq(ret)",
    "docstring": "Multiply one polynomial by another. Returns the product of two polynomials * . The arguments are sequences of coefficients, from lowest order term to highest, e.g., [1,2,3] represents the polynomial `` Parameters ---------- c1, c2 : array_like 1-D arrays of coefficients representing a polynomial, relative to the \"standard\" basis, and ordered from lowest order term to highest. Returns ------- out : ndarray Of the coefficients of their product. See Also -------- polyadd, polysub, polymulx, polydiv, polypow Examples -------- >>> from numpy.polynomial import polynomial as P >>> c1 = (1, 2, 3) >>> c2 = (3, 2, 1) >>> P.polymul(c1, c2) array([ 3., 8., 14., 8., 3.])",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\polynomial.py",
    "ast_data": "FunctionDef name:polymul arg:c1 arg:c2 arguments arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_data",
    "source_code": "def set_data(self, t, f1, f2, *, where=None):\n    t, f1, f2 = self.axes._fill_between_process_units(self.t_direction, self._f_direction, t, f1, f2)\n    verts = self._make_verts(t, f1, f2, where)\n    self.set_verts(verts)",
    "docstring": "Set new values for the two bounding curves. Parameters ---------- t : array-like The ``. Note that this definition implies that an isolated *True* value between two *False* values in *where* will not result in filling. Both sides of the *True* position remain unfilled due to the adjacent *False* values. See Also -------- .PolyCollection.set_verts, .Line2D.set_data",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:set_data arg:self arg:t arg:f1 arg:f2 arguments arg arg arg arg arg Assign Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "device_name_map",
    "source_code": "def device_name_map(self):\n    return {device_id: self._device_by_id[device_id].device_name for device_id in self._device_by_id}",
    "docstring": "Get a map mapping device IDs to device names.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py",
    "ast_data": "FunctionDef name:device_name_map arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "predict",
    "source_code": "def predict(self, model, x, batch_size=None, verbose=0, steps=None, callbacks=None, **kwargs):\n    dist_utils.validate_inputs(x=x, y=None)\n    batch_size, steps = dist_utils.process_batch_and_step_size(model._distribution_strategy, x, batch_size, steps, ModeKeys.PREDICT)\n    batch_size = model._validate_or_infer_batch_size(batch_size, steps, x)\n    dataset = model._distribution_standardize_user_data(x, batch_size=batch_size, allow_partial_batch=True)\n    if backend.is_tpu_strategy(model._distribution_strategy):\n        steps = training_utils_v1.infer_steps_for_dataset(model, dataset, steps, steps_name='steps')\n        if steps is None:\n            raise ValueError('Number of steps could not be inferred from the data, please pass the steps argument.')\n        if not context.executing_eagerly():\n            return experimental_tpu_predict_loop(model, dataset, verbose=verbose, steps=steps, callbacks=callbacks)\n    return training_arrays_v1.predict_loop(model, dataset, batch_size=batch_size, verbose=verbose, steps=steps, callbacks=callbacks)",
    "docstring": "Predict loop for Distribution Strategies.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_distributed_v1.py",
    "ast_data": "FunctionDef name:predict arg:self arg:model arg:x arg:batch_size arg:verbose arg:steps arg:callbacks arguments arg arg arg arg arg arg arg arg Call Assign Call Assign Call Assign Call If Call Assign Call If Compare Raise Call If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_use_merge_call",
    "source_code": "def _use_merge_call(self):\n    return True",
    "docstring": "Whether to use merge-calls inside the distributed strategy.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:_use_merge_call arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "__xor__",
    "source_code": "def __xor__(self, other):\n    return self.sym_difference(other)",
    "docstring": "Return the symmetric difference of this Geometry and the other.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:__xor__ arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "decorate",
    "source_code": "def decorate(f):\n    if vcs not in HANDLERS:\n        HANDLERS[vcs] = {}\n    HANDLERS[vcs][method] = f\n    return f",
    "docstring": "Store f in HANDLERS[vcs][method].",
    "type": "function",
    "file_path": "pandas\\pandas\\_version.py",
    "ast_data": "FunctionDef name:decorate arg:f arguments arg If Compare Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_DepthwiseConv2dNativeBackpropInputGrad",
    "source_code": "@ops.RegisterGradient('DepthwiseConv2dNativeBackpropInput')\ndef _DepthwiseConv2dNativeBackpropInputGrad(op: ops.Operation, grad):\n    return [None, gen_nn_ops.depthwise_conv2d_native_backprop_filter(grad, array_ops.shape(op.inputs[1]), op.inputs[2], dilations=op.get_attr('dilations'), strides=op.get_attr('strides'), padding=op.get_attr('padding'), explicit_paddings=op.get_attr('explicit_paddings'), data_format=op.get_attr('data_format')), gen_nn_ops.depthwise_conv2d_native(grad, op.inputs[1], dilations=op.get_attr('dilations'), strides=op.get_attr('strides'), padding=op.get_attr('padding'), explicit_paddings=op.get_attr('explicit_paddings'), data_format=op.get_attr('data_format'))]",
    "docstring": "The derivatives for deconvolution. Args: op: the Deconvolution op. grad: the tensor representing the gradient w.r.t. the output Returns: the gradients w.r.t. the input and the filter",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_grad.py",
    "ast_data": "FunctionDef name:_DepthwiseConv2dNativeBackpropInputGrad arg:op arg:grad arguments arg arg Return return:yes Call Call Call Call Call Call Call Call Call Call Call Call Call Call"
  },
  {
    "library": "cherrypy",
    "name": "index",
    "source_code": "@cherrypy.expose\ndef index(self):\n    return '\\n            <a href=\"./remi\">Remi Delon</a><br/>\\n            <a href=\"./hendrik\">Hendrik Mans</a><br/>\\n            <a href=\"./lorenzo\">Lorenzo Lamas</a><br/>\\n        '",
    "docstring": "Produce HTTP response body of the users app index URI.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\tutorial\\tut06_default_method.py",
    "ast_data": "FunctionDef name:index arg:self arguments arg Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "fit_lowess",
    "source_code": "def fit_lowess(self):\n    from statsmodels.nonparametric.smoothers_lowess import lowess\n    grid, yhat = lowess(self.y, self.x).T\n    return (grid, yhat)",
    "docstring": "Fit a locally-weighted regression, which returns its own grid.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\regression.py",
    "ast_data": "FunctionDef name:fit_lowess arg:self arguments arg Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_is_torchpackage_dummy",
    "source_code": "def _is_torchpackage_dummy(self, module):\n    if not getattr(module, '__torch_package__', False):\n        return False\n    if not hasattr(module, '__path__'):\n        return False\n    if not hasattr(module, '__file__'):\n        return True\n    return module.__file__ is None",
    "docstring": "Returns true iff this module is an empty PackageNode in a torch.package. If you intern but never use in your code, then will be an empty module with no source. This can break cases where we are trying to re-package an object after adding a real dependency on , since OrderedImportere will resolve to the dummy package and stop there. See:",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\importer.py",
    "ast_data": "FunctionDef name:_is_torchpackage_dummy arg:self arg:module arguments arg arg If Call Return return:yes If Call Return return:yes If Call Return return:yes Return return:yes Compare"
  },
  {
    "library": "pytorch",
    "name": "set_closed",
    "source_code": "@abstractmethod\ndef set_closed(self):\n    pass",
    "docstring": "Mark the rendezvous as closed.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\api.py",
    "ast_data": "FunctionDef name:set_closed arg:self arguments arg"
  },
  {
    "library": "django",
    "name": "extent",
    "source_code": "@property\ndef extent(self):\n    return self.envelope.tuple",
    "docstring": "Return the envelope as a 4-tuple, instead of as an Envelope object.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:extent arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "ListWrapper",
    "source_code": "class ListWrapper(object):\n\n    def __init__(self, list_to_wrap):\n        self._list = list_to_wrap\n\n    def as_list(self):\n        return self._list",
    "docstring": "A wrapper for lists to be treated as elements for .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_utils.py",
    "ast_data": "ClassDef name:ListWrapper FunctionDef name:__init__ arg:self arg:list_to_wrap arguments arg arg Assign FunctionDef name:as_list arg:self arguments arg Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "handle_data_source",
    "source_code": "def handle_data_source(data: object) -> pd.DataFrame | Mapping | None:\n    if isinstance(data, pd.DataFrame) or hasattr(data, '__dataframe__'):\n        data = convert_dataframe_to_pandas(data)\n    elif data is not None and (not isinstance(data, Mapping)):\n        err = f'Data source must be a DataFrame or Mapping, not {type(data)!r}.'\n        raise TypeError(err)\n    return data",
    "docstring": "Convert the data source object to a common union representation.",
    "type": "function",
    "file_path": "seaborn\\seaborn\\_core\\data.py",
    "ast_data": "FunctionDef name:handle_data_source arg:data arguments arg If BoolOp Call Call Assign Call If BoolOp Compare Call Assign Call Raise Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "proj_transform",
    "source_code": "def proj_transform(xs, ys, zs, M):\n    vec = _vec_pad_ones(xs, ys, zs)\n    return _proj_transform_vec(vec, M)",
    "docstring": "Transform the points by the projection matrix *M*.",
    "type": "function",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\proj3d.py",
    "ast_data": "FunctionDef name:proj_transform arg:xs arg:ys arg:zs arg:M arguments arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "def fit(self, X, y):\n    super().fit(X, y)\n    self.x_scores_ = self._x_scores\n    self.y_scores_ = self._y_scores\n    return self",
    "docstring": "Fit model to data. Parameters ---------- X : array-like of shape (n_samples, n_features) Training vectors, where is the number of samples and is the number of predictors. y : array-like of shape (n_samples,) or (n_samples, n_targets) Target vectors, where is the number of samples and is the number of response variables. Returns ------- self : object Fitted model.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cross_decomposition\\_pls.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Call Call Assign Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "__dataframe__",
    "source_code": "@abstractmethod\ndef __dataframe__(self, nan_as_null: bool=False, allow_copy: bool=True):\n    pass",
    "docstring": "Construct a new interchange object, potentially changing the parameters.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\interchange\\dataframe_protocol.py",
    "ast_data": "FunctionDef name:__dataframe__ arg:self arg:nan_as_null arg:allow_copy arguments arg arg arg"
  },
  {
    "library": "pandas",
    "name": "_iset_split_block",
    "source_code": "def _iset_split_block(self, blkno_l: int, blk_locs: np.ndarray | list[int], value: ArrayLike | None=None, refs: BlockValuesRefs | None=None) -> None:\n    blk = self.blocks[blkno_l]\n    if self._blklocs is None:\n        self._rebuild_blknos_and_blklocs()\n    nbs_tup = tuple(blk.delete(blk_locs))\n    if value is not None:\n        locs = blk.mgr_locs.as_array[blk_locs]\n        first_nb = new_block_2d(value, BlockPlacement(locs), refs=refs)\n    else:\n        first_nb = nbs_tup[0]\n        nbs_tup = tuple(nbs_tup[1:])\n    nr_blocks = len(self.blocks)\n    blocks_tup = self.blocks[:blkno_l] + (first_nb,) + self.blocks[blkno_l + 1:] + nbs_tup\n    self.blocks = blocks_tup\n    if not nbs_tup and value is not None:\n        return\n    self._blklocs[first_nb.mgr_locs.indexer] = np.arange(len(first_nb))\n    for i, nb in enumerate(nbs_tup):\n        self._blklocs[nb.mgr_locs.indexer] = np.arange(len(nb))\n        self._blknos[nb.mgr_locs.indexer] = i + nr_blocks",
    "docstring": "Removes columns from a block by splitting the block. Avoids copying the whole block through slicing and updates the manager after determining the new block structure. Optionally adds a new block, otherwise has to be done by the caller. Parameters ---------- blkno_l: The block number to operate on, relevant for updating the manager blk_locs: The locations of our block that should be deleted. value: The value to set as a replacement. refs: The reference tracking object of the value to set.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:_iset_split_block arg:self arg:blkno_l arg:blk_locs arg:value arg:refs arguments arg arg arg arg arg Assign If Compare Call Assign Call Call If Compare Assign Assign Call Call Assign Assign Call Assign Call Assign Assign If BoolOp Compare Return return:no Assign Call Call For Call Assign Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "forward",
    "source_code": "def forward(self, x: torch.Tensor, output_size: Optional[list[int]]=None) -> torch.Tensor:\n    assert isinstance(self.padding, tuple)\n    output_padding = self._output_padding(input, output_size, self.stride, self.padding, self.kernel_size, self.dilation)\n    weight_quant_dequant = self.get_weight()\n    result = F.conv_transpose3d(x, weight_quant_dequant, self.bias, self.stride, self.padding, output_padding, self.groups, self.dilation)\n    return result",
    "docstring": "we have: w(float) -- quant - dequant x(float) ------------- F.convTranspose3d --- In the full model, we will see w(float) -- quant - *dequant x -- quant --- *dequant -- *F.convTranspose3d --- *quant - dequant and the backend should be able to fuse the ops with into a quantized conv3d",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\reference\\modules\\conv.py",
    "ast_data": "FunctionDef name:forward arg:self arg:x arg:output_size arguments arg arg arg Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "memory_usage",
    "source_code": "def memory_usage(self, index: bool=True, deep: bool=False) -> Series:\n    result = self._constructor_sliced([c.memory_usage(index=False, deep=deep) for col, c in self.items()], index=self.columns, dtype=np.intp)\n    if index:\n        index_memory_usage = self._constructor_sliced(self.index.memory_usage(deep=deep), index=['Index'])\n        result = index_memory_usage._append(result)\n    return result",
    "docstring": "Return the memory usage of each column in bytes. The memory usage can optionally include the contribution of the index and elements of dtype. This value is displayed in by default. This can be suppressed by setting `objectFrequently Asked Questions object` dtype columns is ignored by default: >>> df.memory_usage(deep=True) Index 128 int64 40000 float64 40000 complex128 80000 object 180000 bool 5000 dtype: int64 Use a Categorical for efficient storage of an object-dtype column with many repeated values. >>> df[\"object\"].astype(\"category\").memory_usage(deep=True) 5136",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:memory_usage arg:self arg:index arg:deep arguments arg arg arg Assign Call Call Call If Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_linop_cholesky",
    "source_code": "def _linop_cholesky(self) -> linear_operator.LinearOperator:\n\n    def _is_llt_product(self):\n        if len(self.operators) != 2:\n            return False\n        if not linear_operator_util.is_aat_form(self.operators):\n            return False\n        return isinstance(self.operators[0], linear_operator_lower_triangular.LinearOperatorLowerTriangular)\n    if not _is_llt_product(self):\n        return linear_operator_lower_triangular.LinearOperatorLowerTriangular(linalg_ops.cholesky(self.to_dense()), is_non_singular=True, is_self_adjoint=False, is_square=True)\n    left_op = self.operators[0]\n    if left_op.is_positive_definite:\n        return left_op\n    diag_sign = array_ops.expand_dims(math_ops.sign(left_op.diag_part()), axis=-2)\n    return linear_operator_lower_triangular.LinearOperatorLowerTriangular(tril=left_op.tril / diag_sign, is_non_singular=left_op.is_non_singular, is_self_adjoint=left_op.is_self_adjoint, is_positive_definite=True if left_op.is_positive_definite else None, is_square=True)",
    "docstring": "Computes Cholesky(LinearOperatorComposition).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_composition.py",
    "ast_data": "FunctionDef name:_linop_cholesky arg:self arguments arg FunctionDef name:_is_llt_product arg:self arguments arg If Compare Call Return return:yes If Call Return return:yes Return return:yes Call If Call Return return:yes Call Call Call Assign If Return return:yes Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "prepare_loss_weights",
    "source_code": "def prepare_loss_weights(training_endpoints, loss_weights=None):\n    if loss_weights is None:\n        for e in training_endpoints:\n            e.loss_weight = 1.0\n    elif isinstance(loss_weights, collections.abc.Mapping):\n        generic_utils.check_for_unexpected_keys('loss_weights', loss_weights, [e.output_name for e in training_endpoints])\n        for e in training_endpoints:\n            e.loss_weight = loss_weights.get(e.output_name, 1.0)\n    elif isinstance(loss_weights, list):\n        if len(loss_weights) != len(training_endpoints):\n            raise ValueError('When passing a list as loss_weights, it should have one entry per model output. The model has ' + str(len(training_endpoints)) + ' outputs, but you passed loss_weights=' + str(loss_weights))\n        for w, e in zip(loss_weights, training_endpoints):\n            e.loss_weight = w\n    else:\n        raise TypeError('Could not interpret loss_weights argument: ' + str(loss_weights) + ' - expected a list of dicts.')",
    "docstring": "Converts loss weights to a list of loss weights. The result loss weights will be populated on the training endpoint. Args: training_endpoints: List of model training endpoints. loss_weights: Optional list or dictionary specifying scalar coefficients (Python floats) to weight the loss contributions of different model outputs. The loss value that will be minimized by the model will then be the *weighted sum* of all individual losses, weighted by the coefficients. If a list, it is expected to have a 1:1 mapping to the model's outputs. If a dict, it is expected to map output names (strings) to scalar coefficients. Raises: ValueError: If loss weight is a dict with key not in model output names, or if loss is a list with len not equal to model outputs.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py",
    "ast_data": "FunctionDef name:prepare_loss_weights arg:training_endpoints arg:loss_weights arguments arg arg If Compare For Assign If Call Call For Assign Call If Call If Compare Call Call Raise Call Call Call Call For Call Assign Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "gather_nd",
    "source_code": "def gather_nd(self, indices, name=None):\n    val = self._variable.gather_nd(indices, name=name)\n    return math_ops.cast(val, self._cast_dtype)",
    "docstring": "Gather slices of the variable into a Tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\autocast_variable.py",
    "ast_data": "FunctionDef name:gather_nd arg:self arg:indices arg:name arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "ReLU6",
    "source_code": "class ReLU6(Hardtanh):\n\n    def __init__(self, inplace: bool=False):\n        super().__init__(0.0, 6.0, inplace)\n\n    def extra_repr(self) -> str:\n        inplace_str = 'inplace=True' if self.inplace else ''\n        return inplace_str",
    "docstring": "Applies the ReLU6 function element-wise. .. math:: \\text{ReLU6}(x) = \\min(\\max(0,x), 6) Args: inplace: can optionally do the operation in-place. Default: `(*)*(*)`, same shape as the input. .. image:: ../scripts/activation_images/ReLU6.png Examples:: >>> m = nn.ReLU6() >>> input = torch.randn(2) >>> output = m(input)",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\activation.py",
    "ast_data": "ClassDef name:ReLU6 FunctionDef name:__init__ arg:self arg:inplace arguments arg arg Call Call FunctionDef name:extra_repr arg:self arguments arg Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "GemanMcclureLoss",
    "source_code": "class GemanMcclureLoss(Module):\n\n    def __init__(self, reduction: str='none') -> None:\n        super().__init__()\n        self.reduction = reduction\n\n    def forward(self, img1: Tensor, img2: Tensor) -> Tensor:\n        return geman_mcclure_loss(img1=img1, img2=img2, reduction=self.reduction)",
    "docstring": "Criterion that computes the Geman-McClure loss [2]. According to [1], we compute the Geman-McClure loss as follows: .. math:: \\text{WL}(x, y) = \\frac{2 (x - y)^{2}}{(x - y)^{2} + 4} Where: - :math: is the prediction. - :math: is the target to be regressed to. Reference: [1] [2] Bayesian image analysis: An application to single photon emission tomography, Geman and McClure, 1985 Args: reduction: Specifies the reduction to apply to the output: `(*)`. - img2: the target tensor with the same shape as img1. Example: >>> criterion = GemanMcclureLoss(reduction=\"mean\") >>> img1 = torch.randn(2, 3, 32, 2107, requires_grad=True) >>> img2 = torch.randn(2, 3, 32, 2107) >>> output = criterion(img1, img2) >>> output.backward()",
    "type": "class",
    "file_path": "kornia\\kornia\\losses\\geman_mcclure.py",
    "ast_data": "ClassDef name:GemanMcclureLoss FunctionDef name:__init__ arg:self arg:reduction arguments arg arg Call Call Assign FunctionDef name:forward arg:self arg:img1 arg:img2 arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "build_tensor_info_from_op",
    "source_code": "def build_tensor_info_from_op(op):\n    if context.executing_eagerly():\n        raise RuntimeError('`build_tensor_info_from_op` is not supported in eager execution.')\n    return meta_graph_pb2.TensorInfo(dtype=types_pb2.DT_INVALID, tensor_shape=tensor_shape.unknown_shape().as_proto(), name=op.name)",
    "docstring": "Utility function to build TensorInfo proto from an Op. Note that this function should be used with caution. It is strictly restricted to TensorFlow internal use-cases only. Please make sure you do need it before using it. This utility function overloads the TensorInfo proto by setting the name to the Op's name, dtype to DT_INVALID and tensor_shape as None. One typical usage is for the Op of the call site for the defunned function: Args: op: An Op whose name is used to build the TensorInfo. The name that points to the Op could be fetched at run time in the Loader session. Returns: A TensorInfo protocol buffer constructed based on the supplied argument. Raises: RuntimeError: If eager execution is enabled.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\utils_impl.py",
    "ast_data": "FunctionDef name:build_tensor_info_from_op arg:op arguments arg If Call Raise Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "output_classes",
    "source_code": "@property\ndef output_classes(self):\n    return nest.map_structure(lambda component_spec: component_spec._to_legacy_output_classes(), self._element_spec)",
    "docstring": "Returns the class of each component of an element of this iterator. The expected values are and . Returns: A nested structure of Python objects corresponding to each component of an element of this dataset.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py",
    "ast_data": "FunctionDef name:output_classes arg:self arguments arg Return return:yes Call arguments arg Call"
  },
  {
    "library": "matplotlib",
    "name": "locked_x0",
    "source_code": "@property\ndef locked_x0(self):\n    if self._locked_points.mask[0, 0]:\n        return None\n    else:\n        return self._locked_points[0, 0]",
    "docstring": "float or None: The value used for the locked x0.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:locked_x0 arg:self arguments arg If Return return:no Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "proc_time",
    "source_code": "def proc_time(s):\n    return time.time() - s['Start Time']",
    "docstring": "Compute current HTTP request processing time.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\cpstats.py",
    "ast_data": "FunctionDef name:proc_time arg:s arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "CheckpointSaverListener",
    "source_code": "@tf_export(v1=['train.CheckpointSaverListener'])\nclass CheckpointSaverListener:\n\n    def begin(self):\n        pass\n\n    def before_save(self, session, global_step_value):\n        pass\n\n    def after_save(self, session, global_step_value):\n        pass\n\n    def end(self, session, global_step_value):\n        pass",
    "docstring": "Interface for listeners that take action before or after checkpoint save. triggers only in steps when is triggered, and provides callbacks at the following points: - before using the session - before each call to - after each call to - at the end of session To use a listener, implement a class and pass the listener to a , as in this example: A may simply take some action after every checkpoint save. It is also possible for the listener to use its own schedule to act less frequently, e.g. based on global_step_value. In this case, implementors should implement the method to handle actions related to the last checkpoint save. But the listener should not act twice if already handled this last checkpoint save. A can request training to be stopped, by returning True in . Please note that, in replicated distributed training setting, only should use this behavior. Otherwise each worker will do their own evaluation, which may be wasteful of resources.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\training\\basic_session_run_hooks.py",
    "ast_data": "ClassDef name:CheckpointSaverListener FunctionDef name:begin arg:self arguments arg FunctionDef name:before_save arg:self arg:session arg:global_step_value arguments arg arg arg FunctionDef name:after_save arg:self arg:session arg:global_step_value arguments arg arg arg FunctionDef name:end arg:self arg:session arg:global_step_value arguments arg arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "elu",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef elu(x, alpha=1.0):\n    res = nn.elu(x)\n    if alpha == 1:\n        return res\n    else:\n        return array_ops.where_v2(x > 0, res, alpha * res)",
    "docstring": "Exponential linear unit. Args: x: A tensor or variable to compute the activation function for. alpha: A scalar, slope of negative section. Returns: A tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:elu arg:x arg:alpha arguments arg arg Assign Call If Compare Return return:yes Return return:yes Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "set_number_of_shards",
    "source_code": "def set_number_of_shards(self, number_of_shards):\n    for policy in self._sharding_policies:\n        policy.set_number_of_shards(number_of_shards)\n        policy.set_number_of_partitions(self._number_of_partitions)\n    self._validate()",
    "docstring": "Sets the number of shards to use for the InfeedQueue. Args: number_of_shards: number of ways to shard the InfeedQueue. Raises: ValueError: if number_of_shards is not > 0; or the policies have been frozen and number_of_shards was already set to something else.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_feed.py",
    "ast_data": "FunctionDef name:set_number_of_shards arg:self arg:number_of_shards arguments arg arg For Call Call Call"
  },
  {
    "library": "numpy",
    "name": "logn",
    "source_code": "@set_module('numpy.lib.scimath')\n@array_function_dispatch(_logn_dispatcher)\ndef logn(n, x):\n    x = _fix_real_lt_zero(x)\n    n = _fix_real_lt_zero(n)\n    return nx.log(x) / nx.log(n)",
    "docstring": "Take log base n of x. If contains negative inputs, the answer is computed and returned in the complex domain. Parameters ---------- n : array_like The integer base(s) in which the log is taken. x : array_like The value(s) whose log base is (are) required. Returns ------- out : ndarray or scalar The log base of the value(s). If was a scalar, so is , otherwise an array is returned. Examples -------- >>> import numpy as np >>> np.set_printoptions(precision=4) >>> np.emath.logn(2, [4, 8]) array([2., 3.]) >>> np.emath.logn(2, [-4, -8, 8]) array([2.+4.5324j, 3.+4.5324j, 3.+0.j ])",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_scimath_impl.py",
    "ast_data": "FunctionDef name:logn arg:n arg:x arguments arg arg Assign Call Assign Call Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_and_update_output_shapes_from_input",
    "source_code": "def _get_and_update_output_shapes_from_input(self, per_replica_input_shapes: Optional[List[TensorShape]]=None, per_replica_batch_size: Optional[int]=None):\n    per_replica_output_shapes = None\n    if per_replica_batch_size and per_replica_input_shapes is None:\n        logging.warning('per_replica_batch_size argument will be deprecated, please specify all the input shapes using per_replica_input_shapes argument.')\n        per_replica_output_shapes = self._get_output_shapes_from_batch_size(per_replica_batch_size)\n    if per_replica_input_shapes is not None:\n        if isinstance(per_replica_input_shapes, int):\n            logging.warning('Passing batch size to per_replica_input_shapes argument will be deprecated, please specify all the input shapes using per_replica_input_shapes argument.')\n            per_replica_output_shapes = self._get_output_shapes_from_batch_size(per_replica_input_shapes)\n        else:\n            nest.assert_same_structure(nest.flatten(per_replica_input_shapes), nest.flatten(self._feature_config))\n            per_replica_input_shapes = nest.flatten(per_replica_input_shapes)\n            per_replica_output_shapes = self._get_output_shapes_from_input_shapes(per_replica_input_shapes)\n    if per_replica_output_shapes is not None:\n        self._check_output_shapes(per_replica_output_shapes)\n        self._update_output_shapes(per_replica_output_shapes)\n    self._check_output_shapes_fully_defined()",
    "docstring": "Get and update the per replica output shapes from the input.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2.py",
    "ast_data": "FunctionDef name:_get_and_update_output_shapes_from_input arg:self arg:per_replica_input_shapes arg:per_replica_batch_size arguments arg arg arg Assign If BoolOp Compare Call Assign Call If Compare If Call Call Assign Call Call Call Call Assign Call Assign Call If Compare Call Call Call"
  },
  {
    "library": "django",
    "name": "get_ancestor_link",
    "source_code": "def get_ancestor_link(self, ancestor):\n    if ancestor in self.parents:\n        return self.parents[ancestor]\n    for parent in self.parents:\n        parent_link = parent._meta.get_ancestor_link(ancestor)\n        if parent_link:\n            return self.parents[parent] or parent_link",
    "docstring": "Return the field on the current model which points to the given \"ancestor\". This is possible an indirect link (a pointer to a parent model, which points, eventually, to the ancestor). Used when constructing table joins for model inheritance. Return None if the model isn't an ancestor of this one.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\options.py",
    "ast_data": "FunctionDef name:get_ancestor_link arg:self arg:ancestor arguments arg arg If Compare Return return:yes For Assign Call If Return return:yes BoolOp"
  },
  {
    "library": "scrapy",
    "name": "get_oldest",
    "source_code": "def get_oldest(class_name: str) -> Any:\n    for cls, wdict in live_refs.items():\n        if cls.__name__ == class_name:\n            if not wdict:\n                break\n            return min(wdict.items(), key=itemgetter(1))[0]\n    return None",
    "docstring": "Get the oldest object for a specific class name",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\trackref.py",
    "ast_data": "FunctionDef name:get_oldest arg:class_name arguments arg For Call If Compare If Return return:yes Call Call Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "assert_nontrivial_match",
    "source_code": "@abc.abstractmethod\ndef assert_nontrivial_match(self):\n    pass",
    "docstring": "Raises an exception if only the root object matched.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:assert_nontrivial_match arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "_get_parser",
    "source_code": "def _get_parser(use_v2_converter):\n    parser = argparse.ArgumentParser(description='Command line tool to run TensorFlow Lite Converter.')\n    parser.add_argument('--output_file', type=str, help='Full filepath of the output file.', required=True)\n    if use_v2_converter:\n        _get_tf2_flags(parser)\n    else:\n        _get_tf1_flags(parser)\n    parser.add_argument('--experimental_new_converter', action=_ParseBooleanFlag, nargs='?', default=True, help='Experimental flag, subject to change. Enables MLIR-based conversion instead of TOCO conversion. (default True)')\n    parser.add_argument('--experimental_new_quantizer', action=_ParseBooleanFlag, nargs='?', help='Experimental flag, subject to change. Enables MLIR-based quantizer instead of flatbuffer conversion. (default True)')\n    return parser",
    "docstring": "Returns an ArgumentParser for tflite_convert. Args: use_v2_converter: Indicates which converter to return. Return: ArgumentParser.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\tflite_convert.py",
    "ast_data": "FunctionDef name:_get_parser arg:use_v2_converter arguments arg Assign Call Call If Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "If",
    "source_code": "def If(cond, inputs, then_branch, else_branch, name=None):\n    if isinstance(then_branch, function._DefinedFunction):\n        tlist = [_.type for _ in then_branch.definition.signature.output_arg]\n        return gen_functional_ops._if(cond, inputs, tlist, then_branch, else_branch, name=name)\n    then_out = then_branch.structured_outputs\n    else_out = else_branch.structured_outputs\n    nest.assert_same_structure(then_out, else_out, expand_composites=True)\n    tlist = nest.flatten(then_branch.output_dtypes)\n    ret = gen_functional_ops._if(cond, inputs, tlist, then_branch, else_branch, name=name)\n    return nest.pack_sequence_as(then_out, ret, expand_composites=True)",
    "docstring": "output = Cond(inputs) ? then_branch(inputs) : else_branch(inputs). Args: cond: A . A scalar. If the scalar is not a boolean, the scalar is converted to a boolean according to the following rule: if the scalar is a numerical value, non-zero means True and zero means False; if the scalar is a string, non-empty means True and empty means False. inputs: A list of input tensors. then_branch: A function takes 'inputs' and returns a list of tensors, whose types are the same as what else_branch returns. else_branch: A function takes 'inputs' and returns a list of tensors. whose types are the same as what then_branch returns. name: A name for the operation (optional). Returns: A list of tensors returned by either then_branch(inputs) or else_branch(inputs).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\functional_ops.py",
    "ast_data": "FunctionDef name:If arg:cond arg:inputs arg:then_branch arg:else_branch arg:name arguments arg arg arg arg arg If Call Assign Return return:yes Call Assign Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_transpose_vectorized",
    "source_code": "def _transpose_vectorized(M):\n    return np.transpose(M, [0, 2, 1])",
    "docstring": "Transposition of an array of matrices *M*.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triinterpolate.py",
    "ast_data": "FunctionDef name:_transpose_vectorized arg:M arguments arg Return return:yes Call"
  },
  {
    "library": "cryptography",
    "name": "key_size",
    "source_code": "@property\n@abc.abstractmethod\ndef key_size(self) -> int:\n    pass",
    "docstring": "The bit length of the prime modulus.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\dh.py",
    "ast_data": "FunctionDef name:key_size arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "_create_value_mapping",
    "source_code": "def _create_value_mapping(graph: ir.Graph) -> dict[str, ir.Value]:\n    values: dict[str, ir.Value] = {}\n    values.update(graph.initializers)\n    for input in graph.inputs:\n        if not input.name:\n            continue\n        values[input.name] = input\n    for node in graph:\n        for value in node.outputs:\n            if not value.name:\n                continue\n            values[value.name] = value\n    return values",
    "docstring": "Return a dictionary mapping names to values in the graph. The mapping does not include values from subgraphs. Args: graph: The graph to extract the mapping from. Returns: A dictionary mapping names to values.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_onnx_program.py",
    "ast_data": "FunctionDef name:_create_value_mapping arg:graph arguments arg Call For If Assign For For If Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "key_get",
    "source_code": "def key_get(obj: Any, kp: KeyPath) -> Any:\n    raise NotImplementedError('KeyPaths are not yet supported in cxx_pytree.')",
    "docstring": "Given an object and a key path, return the value at the key path.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_cxx_pytree.py",
    "ast_data": "FunctionDef name:key_get arg:obj arg:kp arguments arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "pretty_print_tree",
    "source_code": "def pretty_print_tree(self):\n    GraphInfoPrettyPrinter(self).pretty_print()",
    "docstring": "Pretty print tree. Each node represents a subgraph, showing the number of nodes in the subgraph and a check mark if the subgraph has output mismatch between torch and ONNX. The id of the subgraph is shown under the node. The object for any subgraph can be retrieved by calling . Example:: ==================================== Tree: ===================================== 5 X __2 X __1 ✓ id: | id: 0 | id: 00 | | | |__1 X (aten::relu) | id: 01 | |__3 X __1 ✓ id: 1 | id: 10 | |__2 X __1 X (aten::relu) id: 11 | id: 110 | |__1 ✓ id: 111 =========================== Mismatch leaf subgraphs: =========================== ['01', '110'] ============================= Mismatch node kinds: ============================= {'aten::relu': 2}",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\verification.py",
    "ast_data": "FunctionDef name:pretty_print_tree arg:self arguments arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, replacements):\n    self.replacements = replacements\n    self.in_replacements = False\n    self.preserved_annos = {anno.Basic.DIRECTIVES, anno.Basic.EXTRA_LOOP_TEST, anno.Basic.ORIGIN, anno.Basic.SKIP_PROCESSING, anno.Static.ORIG_DEFINITIONS, 'function_context_name'}",
    "docstring": "Create a new ReplaceTransformer. Args: replacements: A mapping from placeholder names to (lists of) AST nodes that these placeholders will be replaced by.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\templates.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:replacements arguments arg arg Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "select_replica_mirrored",
    "source_code": "def select_replica_mirrored(replica_id, structured):\n    assert_mirrored(structured)\n    return select_replica(replica_id, structured)",
    "docstring": "Specialize a nest of regular & mirrored values for one replica.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_utils.py",
    "ast_data": "FunctionDef name:select_replica_mirrored arg:replica_id arg:structured arguments arg arg Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_GatherGrad",
    "source_code": "@ops.RegisterGradient('Gather')\ndef _GatherGrad(op: ops.Operation, grad):\n    params = op.inputs[0]\n    with ops.colocate_with(params):\n        params_shape = array_ops.shape(params)\n    indices = op.inputs[1]\n    size = array_ops.expand_dims(array_ops.size(indices), 0)\n    values_shape = array_ops.concat([size, params_shape[1:]], 0)\n    values = array_ops.reshape(_IndexedSlicesToTensorNoWarning(grad), values_shape)\n    indices = array_ops.reshape(indices, size)\n    return [indexed_slices_lib.IndexedSlices(values, indices, params_shape), None]",
    "docstring": "Gradient for Gather op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_grad.py",
    "ast_data": "FunctionDef name:_GatherGrad arg:op arg:grad arguments arg arg Assign With Call Assign Call Assign Assign Call Call Assign Call Assign Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "add_patch",
    "source_code": "def add_patch(self, p):\n    _api.check_isinstance(mpatches.Patch, p=p)\n    self._set_artist_props(p)\n    if p.get_clip_path() is None:\n        p.set_clip_path(self.patch)\n    self._update_patch_limits(p)\n    self._children.append(p)\n    p._remove_method = self._children.remove\n    return p",
    "docstring": "Add a to the Axes; return the patch.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:add_patch arg:self arg:p arguments arg arg Call Call If Compare Call Call Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "fix_extended_args",
    "source_code": "def fix_extended_args(instructions: list[Instruction]) -> int:\n    output: list[Instruction] = []\n\n    def maybe_pop_n(n):\n        for _ in range(n):\n            if output and output[-1].opcode == dis.EXTENDED_ARG:\n                output.pop()\n    for inst in instructions:\n        if inst.opcode == dis.EXTENDED_ARG:\n            inst.arg = 0\n        elif inst.arg and inst.arg > 16777215:\n            maybe_pop_n(3)\n            output.append(create_instruction('EXTENDED_ARG', arg=inst.arg >> 24))\n            output.append(create_instruction('EXTENDED_ARG', arg=inst.arg >> 16))\n            output.append(create_instruction('EXTENDED_ARG', arg=inst.arg >> 8))\n        elif inst.arg and inst.arg > 65535:\n            maybe_pop_n(2)\n            output.append(create_instruction('EXTENDED_ARG', arg=inst.arg >> 16))\n            output.append(create_instruction('EXTENDED_ARG', arg=inst.arg >> 8))\n        elif inst.arg and inst.arg > 255:\n            maybe_pop_n(1)\n            output.append(create_instruction('EXTENDED_ARG', arg=inst.arg >> 8))\n        output.append(inst)\n    added = len(output) - len(instructions)\n    assert added >= 0\n    instructions[:] = output\n    return added",
    "docstring": "Fill in correct argvals for EXTENDED_ARG ops",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\bytecode_transformation.py",
    "ast_data": "FunctionDef name:fix_extended_args arg:instructions arguments arg FunctionDef name:maybe_pop_n arg:n arguments arg For Call If BoolOp Compare Call For If Compare Assign If BoolOp Compare Call Call Call Call Call Call Call If BoolOp Compare Call Call Call Call Call If BoolOp Compare Call Call Call Call Assign Call Call Compare Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_get_nearest_indexer",
    "source_code": "@final\ndef _get_nearest_indexer(self, target: Index, limit: int | None, tolerance) -> npt.NDArray[np.intp]:\n    if not len(self):\n        return self._get_fill_indexer(target, 'pad')\n    left_indexer = self.get_indexer(target, 'pad', limit=limit)\n    right_indexer = self.get_indexer(target, 'backfill', limit=limit)\n    left_distances = self._difference_compat(target, left_indexer)\n    right_distances = self._difference_compat(target, right_indexer)\n    op = operator.lt if self.is_monotonic_increasing else operator.le\n    indexer = np.where(op(left_distances, right_distances) | (right_indexer == -1), left_indexer, right_indexer)\n    if tolerance is not None:\n        indexer = self._filter_indexer_tolerance(target, indexer, tolerance)\n    return indexer",
    "docstring": "Get the indexer for the nearest index labels; requires an index with values that can be subtracted from each other (e.g., not strings or tuples).",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_get_nearest_indexer arg:self arg:target arg:limit arg:tolerance arguments arg arg arg arg If Call Return return:yes Call Assign Call Assign Call Assign Call Assign Call Assign Assign Call Call Compare If Compare Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_validate_datetimelike_monotonic",
    "source_code": "def _validate_datetimelike_monotonic(self) -> None:\n    if self._on.hasnans:\n        self._raise_monotonic_error('values must not have NaT')\n    for group_indices in self._grouper.indices.values():\n        group_on = self._on.take(group_indices)\n        if not (group_on.is_monotonic_increasing or group_on.is_monotonic_decreasing):\n            on = 'index' if self.on is None else self.on\n            raise ValueError(f'Each group within {on} must be monotonic. Sort the values in {on} first.')",
    "docstring": "Validate that each group in self._on is monotonic",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\window\\rolling.py",
    "ast_data": "FunctionDef name:_validate_datetimelike_monotonic arg:self arguments arg If Call For Call Assign Call If BoolOp Assign Compare Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_BiasAddGradV1",
    "source_code": "@ops.RegisterGradient('BiasAddV1')\ndef _BiasAddGradV1(unused_bias_op: ops.Operation, received_grad):\n    reduction_dim_tensor = math_ops.range(array_ops.rank(received_grad) - 1)\n    return (received_grad, math_ops.reduce_sum(received_grad, reduction_dim_tensor))",
    "docstring": "Return the gradients for the 2 inputs of bias_op. The first input of unused_bias_op is the tensor t, and its gradient is just the gradient the unused_bias_op received. The second input of unused_bias_op is the bias vector which has one fewer dimension than \"received_grad\" (the batch dimension.) Its gradient is the received gradient Summed on the batch dimension, which is the first dimension. Args: unused_bias_op: The BiasOp for which we need to generate gradients. received_grad: Tensor. The gradients passed to the BiasOp. Returns: Two tensors, the first one for the \"tensor\" input of the BiasOp, the second one for the \"bias\" input of the BiasOp.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_grad.py",
    "ast_data": "FunctionDef name:_BiasAddGradV1 arg:unused_bias_op arg:received_grad arguments arg arg Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "disconnect",
    "source_code": "def disconnect(self, cid):\n    self._observers.disconnect(cid)",
    "docstring": "Remove the observer with connection id *cid*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:disconnect arg:self arg:cid arguments arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "_compute_edge_sizes",
    "source_code": "def _compute_edge_sizes(n_fft, window_size):\n    left = (n_fft - window_size) // 2\n    right = n_fft - left - window_size\n    return (left, right)",
    "docstring": "Helper function to compute the sizes of the edges (left and right) of a given window centered within an FFT size.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\symbolic_opset17.py",
    "ast_data": "FunctionDef name:_compute_edge_sizes arg:n_fft arg:window_size arguments arg arg Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "set_callback_parameters",
    "source_code": "def set_callback_parameters(callback_list, model, do_validation=False, batch_size=None, epochs=None, steps_per_epoch=None, samples=None, verbose=1, mode=ModeKeys.TRAIN):\n    metric_names = model.metrics_names\n    for cbk in callback_list:\n        if isinstance(cbk, (BaseLogger, ProgbarLogger)):\n            cbk.stateful_metrics = metric_names[1:]\n    callback_metrics = []\n    if mode != ModeKeys.PREDICT:\n        callback_metrics = copy.copy(metric_names)\n        if do_validation:\n            callback_metrics += ['val_' + n for n in metric_names]\n    callback_params = {'batch_size': batch_size, 'epochs': epochs, 'steps': steps_per_epoch, 'samples': samples, 'verbose': verbose, 'do_validation': do_validation, 'metrics': callback_metrics}\n    callback_list.set_params(callback_params)",
    "docstring": "Sets callback parameters. Args: callback_list: CallbackList instance. model: Model being trained. do_validation: Whether or not validation loop will be run. batch_size: Number of samples per batch. epochs: Number of epoch to train. steps_per_epoch: Number of batches to run per training epoch. samples: Number of training samples. verbose: int, 0 or 1. Keras logging verbosity to pass to ProgbarLogger. mode: String. One of ModeKeys.TRAIN, ModeKeys.TEST, or ModeKeys.PREDICT. Which loop mode to configure callbacks for.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:set_callback_parameters arg:callback_list arg:model arg:do_validation arg:batch_size arg:epochs arg:steps_per_epoch arg:samples arg:verbose arg:mode arguments arg arg arg arg arg arg arg arg arg Assign For If Call Assign Assign If Compare Assign Call If Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_ensure_node_in_anf",
    "source_code": "def _ensure_node_in_anf(self, parent, field, node):\n    if node is None:\n        return node\n    if _is_trivial(node):\n        return node\n    if isinstance(node, list):\n        return [self._ensure_node_in_anf(parent, field, n) for n in node]\n    if isinstance(node, gast.keyword):\n        node.value = self._ensure_node_in_anf(parent, field, node.value)\n        return node\n    if isinstance(node, (gast.Starred, gast.withitem, gast.slice)):\n        return self._ensure_fields_in_anf(node, parent, field)\n    if self._should_transform(parent, field, node):\n        return self._do_transform_node(node)\n    else:\n        return node",
    "docstring": "Puts in A-normal form, by replacing it with a variable if needed. The exact definition of A-normal form is given by the configuration. The parent and the incoming field name are only needed because the configuration may be context-dependent. Args: parent: An AST node, the parent of . field: The field name under which is the child of . node: An AST node, potentially to be replaced with a variable reference. Returns: node: An AST node; the argument if transformation was not necessary, or the new variable reference if it was.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\common_transformers\\anf.py",
    "ast_data": "FunctionDef name:_ensure_node_in_anf arg:self arg:parent arg:field arg:node arguments arg arg arg arg If Compare Return return:yes If Call Return return:yes If Call Return return:yes Call If Call Assign Call Return return:yes If Call Return return:yes Call If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_allocate_pids",
    "source_code": "def _allocate_pids(self) -> None:\n    self._allocators_pid = self._alloc_pid()\n    self._chrome_trace.emit_pid('Allocators', self._allocators_pid)\n    for dev_stats in self._step_stats.dev_stats:\n        device_pid = self._alloc_pid()\n        self._device_pids[dev_stats.device] = device_pid\n        tensors_pid = self._alloc_pid()\n        self._tensor_pids[dev_stats.device] = tensors_pid\n        self._chrome_trace.emit_pid(dev_stats.device + ' Compute', device_pid)\n        self._chrome_trace.emit_pid(dev_stats.device + ' Tensors', tensors_pid)",
    "docstring": "Allocate fake process ids for each device in the step_stats_pb2.StepStats.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py",
    "ast_data": "FunctionDef name:_allocate_pids arg:self arguments arg Assign Call Call For Assign Call Assign Assign Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "broadcast_dynamic_shape",
    "source_code": "def broadcast_dynamic_shape(shape_x, shape_y):\n    if not isinstance(shape_x, RaggedTensorDynamicShape):\n        raise TypeError('shape_x must be a RaggedTensorDynamicShape')\n    if not isinstance(shape_y, RaggedTensorDynamicShape):\n        raise TypeError('shape_y must be a RaggedTensorDynamicShape')\n    if shape_x.rank is None or shape_y.rank is None:\n        raise ValueError('Unable to broadcast: unknown rank')\n    broadcast_rank = max(shape_x.rank, shape_y.rank)\n    shape_x = shape_x.broadcast_to_rank(broadcast_rank)\n    shape_y = shape_y.broadcast_to_rank(broadcast_rank)\n    for axis in range(broadcast_rank):\n        shape_x = shape_x.broadcast_dimension(axis, shape_y.dimension_size(axis))\n        shape_y = shape_y.broadcast_dimension(axis, shape_x.dimension_size(axis))\n    return shape_x",
    "docstring": "Returns the shape formed by broadcasting two shapes to be compatible. Args: shape_x: A shape_y: A Returns: A . Raises: ValueError: If and are not broadcast-compatible.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor_shape.py",
    "ast_data": "FunctionDef name:broadcast_dynamic_shape arg:shape_x arg:shape_y arguments arg arg If Call Raise Call If Call Raise Call If BoolOp Compare Compare Raise Call Assign Call Assign Call Assign Call For Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "query",
    "source_code": "def query(self) -> bool:\n    return super().query()",
    "docstring": "Check if all the work submitted has been completed. Returns: A boolean indicating if all kernels in this stream are completed.",
    "type": "method",
    "file_path": "pytorch\\torch\\xpu\\streams.py",
    "ast_data": "FunctionDef name:query arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_detect_fake_mode",
    "source_code": "def _detect_fake_mode(self) -> fake_tensor.FakeTensorMode | None:\n    fake_tensors = [node.meta.get('val') for node in self.module.graph.nodes]\n    with unset_fake_temporarily():\n        return torch._dynamo.utils.detect_fake_mode(fake_tensors)",
    "docstring": "Detect fake mode from the graph. Scan through all nodes in graph and their meta['val'] to detect fake mode.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\_pass.py",
    "ast_data": "FunctionDef name:_detect_fake_mode arg:self arguments arg Assign Call With Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_formsets_with_inlines",
    "source_code": "def get_formsets_with_inlines(self, request, obj=None):\n    for inline in self.get_inline_instances(request, obj):\n        yield (inline.get_formset(request, obj), inline)",
    "docstring": "Yield formsets and the corresponding inlines.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:get_formsets_with_inlines arg:self arg:request arg:obj arguments arg arg arg For Call Call"
  },
  {
    "library": "pandas",
    "name": "is_true_slices",
    "source_code": "def is_true_slices(line: abc.Iterable) -> abc.Generator[bool, None, None]:\n    for k in line:\n        yield (isinstance(k, slice) and (not is_null_slice(k)))",
    "docstring": "Find non-trivial slices in \"line\": yields a bool.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\common.py",
    "ast_data": "FunctionDef name:is_true_slices arg:line arguments arg For BoolOp Call Call"
  },
  {
    "library": "scipy",
    "name": "fprime",
    "source_code": "def fprime(self, x, *args):\n    if self.vals is None or x != self.x:\n        self(x, *args)\n    return self.vals[1]",
    "docstring": "Calculate f' or use a cached value if available",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_root_scalar.py",
    "ast_data": "FunctionDef name:fprime arg:self arg:x arguments arg arg arg If BoolOp Compare Compare Call Return return:yes"
  },
  {
    "library": "django",
    "name": "j",
    "source_code": "def j(self):\n    return self.data.day",
    "docstring": "Day of the month without leading zeros; i.e. '1' to '31'",
    "type": "method",
    "file_path": "django\\django\\utils\\dateformat.py",
    "ast_data": "FunctionDef name:j arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "clip_grad_value_",
    "source_code": "@_no_grad\ndef clip_grad_value_(parameters: _tensor_or_tensors, clip_value: float, foreach: Optional[bool]=None) -> None:\n    if isinstance(parameters, torch.Tensor):\n        parameters = [parameters]\n    clip_value = float(clip_value)\n    grads = [p.grad for p in parameters if p.grad is not None]\n    grouped_grads = _group_tensors_by_device_and_dtype([grads])\n    for (device, _), ([grads], _) in grouped_grads.items():\n        if foreach is None and _has_foreach_support(cast(list[Tensor], grads), device=device) or (foreach and _device_has_foreach_support(device)):\n            torch._foreach_clamp_min_(cast(list[Tensor], grads), -clip_value)\n            torch._foreach_clamp_max_(cast(list[Tensor], grads), clip_value)\n        elif foreach:\n            raise RuntimeError(f\"foreach=True was passed, but can't use the foreach API on {device.type} tensors\")\n        else:\n            for grad in grads:\n                cast(Tensor, grad).clamp_(min=-clip_value, max=clip_value)",
    "docstring": "Clip the gradients of an iterable of parameters at specified value. Gradients are modified in-place. Args: parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a single Tensor that will have gradients normalized clip_value (float): maximum allowed value of the gradients. The gradients are clipped in the range :math: foreach (bool, optional): use the faster foreach-based implementation If ``",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\utils\\clip_grad.py",
    "ast_data": "FunctionDef name:clip_grad_value_ arg:parameters arg:clip_value arg:foreach arguments arg arg arg If Call Assign Assign Call Assign Compare Assign Call For Call If BoolOp BoolOp Compare Call Call BoolOp Call Call Call Call Call If Raise Call For Call Call"
  },
  {
    "library": "tensorflow",
    "name": "compiler_ir_generator",
    "source_code": "def compiler_ir_generator(stage='hlo', device_name=None, platform_name=None):\n    if device_name is not None:\n        if platform_name is not None:\n            raise ValueError('device_name and platform_name cannot be provided at the same time.')\n        warnings.warn('device_name is being deprecated. Use platform_name.')\n    device_name = compiler_ir.maybe_get_device_name(device_name)\n    res_bytes = context.context().get_compiler_ir(device_name=device_name, platform_name=platform_name, function_name=fn_name, flat_args=list(filtered_flat_args), captured_inputs=concrete_fn.captured_inputs, stage=stage)\n    if stage in ('stablehlo_serialized', 'hlo_serialized', 'optimized_hlo_serialized', 'optimized_hlo_proto_serialized'):\n        return res_bytes\n    else:\n        return res_bytes.decode('utf-8')",
    "docstring": "Gets the compiler IR bytes. Args: stage: The exported stage for the given function. device_name: The name of the device with the form as \"/job:localhost/replica:0/task:0/device:CPU:0\", \"/device:TPU:0\" etc. When this is used, actual device is used for getting the compiler IR. platform_name: The name of the platform, e.g. \"TPU\". See the comment in in . Returns: The compiler IR bytes.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\polymorphic_function.py",
    "ast_data": "FunctionDef name:compiler_ir_generator arg:stage arg:device_name arg:platform_name arguments arg arg arg If Compare If Compare Raise Call Call Assign Call Assign Call Call Call If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self):\n    self.clear()\n    t = threading.Thread(target=self.expire_cache, name='expire_cache')\n    self.expiration_thread = t\n    t.daemon = True\n    t.start()",
    "docstring": "Initialize in-memory cache store.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\caching.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg Call Assign Call Assign Assign Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X):\n    check_is_fitted(self)\n    return np.average(self._predict(X), axis=1, weights=self._weights_not_none)",
    "docstring": "Predict regression target for X. The predicted regression target of an input sample is computed as the mean predicted regression targets of the estimators in the ensemble. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Returns ------- y : ndarray of shape (n_samples,) The predicted values.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_voting.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "visit_Assert",
    "source_code": "def visit_Assert(self, node):\n    n = ast.parse('torch._assert()', mode='eval')\n    assert isinstance(n, ast.Expression)\n    call_node = n.body\n    assert isinstance(call_node, ast.Call)\n    msg = node.msg if node.msg else ast.Constant(value='', kind=None)\n    call_node.args = [node.test, msg]\n    expr_wrapper = ast.Expr(value=call_node)\n    return ast.copy_location(expr_wrapper, node)",
    "docstring": "Swap out the Assert node (Python's ) with a callsite to the symbolically-traceable torch._assert function",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\rewriter.py",
    "ast_data": "FunctionDef name:visit_Assert arg:self arg:node arguments arg arg Assign Call Call Assign Call Assign Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_gather_saveables_for_checkpoint",
    "source_code": "def _gather_saveables_for_checkpoint(self):\n\n    def _saveable_factory(name=self.name):\n        saveables = []\n        dims = len(self._variables[0].shape)\n        var_offset = [0 for _ in range(dims)]\n        for v in self._variables:\n            save_slice_info = variables_lib.Variable.SaveSliceInfo(full_name=self.name, full_shape=self.shape.as_list(), var_offset=copy.copy(var_offset), var_shape=v.shape.as_list())\n            saveables.append(saveable_object_util.ResourceVariableSaveable(v, save_slice_info.spec, name))\n            var_offset[0] += int(v.shape[0])\n        return saveables\n    return {trackable.VARIABLE_VALUE_KEY: _saveable_factory}",
    "docstring": "Return a for each shard. See .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\sharded_variable.py",
    "ast_data": "FunctionDef name:_gather_saveables_for_checkpoint arg:self arguments arg FunctionDef name:_saveable_factory arg:name arguments arg Assign Assign Call Assign Call For Assign Call Call Call Call Call Call Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_dispatch_opset_version",
    "source_code": "def _dispatch_opset_version(target: OpsetVersion, registered_opsets: Collection[OpsetVersion]) -> Optional[OpsetVersion]:\n    if not registered_opsets:\n        return None\n    descending_registered_versions = sorted(registered_opsets, reverse=True)\n    if target >= _constants.ONNX_BASE_OPSET:\n        for version in descending_registered_versions:\n            if version <= target:\n                return version\n        return None\n    for version in reversed(descending_registered_versions):\n        if target <= version <= _constants.ONNX_BASE_OPSET:\n            return version\n    return None",
    "docstring": "Finds the registered opset given a target opset version and the available opsets. Args: target: The target opset version. registered_opsets: The available opsets. Returns: The registered opset version.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\registration.py",
    "ast_data": "FunctionDef name:_dispatch_opset_version arg:target arg:registered_opsets arguments arg arg If Return return:no Assign Call If Compare For If Compare Return return:yes Return return:no For Call If Compare Return return:yes Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "_id",
    "source_code": "@property\ndef _id(self) -> int:\n    return self._id_value",
    "docstring": "The unique integer id of this operation.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_id arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_slice_axis_for_step",
    "source_code": "def _slice_axis_for_step(self, index: Index, result: Sized | None=None) -> Index:\n    return index if result is None or len(result) == len(index) else index[::self.step]",
    "docstring": "Slices the index for a given result and the preset step.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\window\\rolling.py",
    "ast_data": "FunctionDef name:_slice_axis_for_step arg:self arg:index arg:result arguments arg arg arg Return return:yes BoolOp Compare Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "_calculate_scale",
    "source_code": "def _calculate_scale(head_dim_size: int, scale: Optional[float]) -> float:\n    if scale is not None:\n        return scale\n    return 1.0 / math.sqrt(head_dim_size)",
    "docstring": "For FlashAttention we pad the head dimension to be a multiple of 8 so we need to scale the output by the original head size and not the padded.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\attention\\_utils.py",
    "ast_data": "FunctionDef name:_calculate_scale arg:head_dim_size arg:scale arguments arg arg If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "from_proto",
    "source_code": "@staticmethod\ndef from_proto(saver_def, import_scope=None):\n    return Saver(saver_def=saver_def, name=import_scope)",
    "docstring": "Returns a object created from . Args: saver_def: a protocol buffer. import_scope: Optional . Name scope to use. Returns: A built from saver_def.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\saver.py",
    "ast_data": "FunctionDef name:from_proto arg:saver_def arg:import_scope arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, rdzv_impl: 'EtcdRendezvous', local_addr: Optional[str]):\n    self._rdzv_impl = rdzv_impl\n    self._local_addr = local_addr",
    "docstring": "Args: rdzv_impl: the implementation of the rendezvous local_addr: the local address of the current node",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\etcd_rendezvous.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:rdzv_impl arg:local_addr arguments arg arg arg Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "DatasetV1",
    "source_code": "@tf_export(v1=['__internal__.types.data.Dataset'])\nclass DatasetV1(DatasetV2, abc.ABC):\n    pass",
    "docstring": "Represents the TensorFlow 1 type .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\types\\data.py",
    "ast_data": "ClassDef name:DatasetV1 Call"
  },
  {
    "library": "numpy",
    "name": "_hist_bin_sturges",
    "source_code": "def _hist_bin_sturges(x, range):\n    del range\n    return _ptp(x) / (np.log2(x.size) + 1.0)",
    "docstring": "Sturges histogram bin estimator. A very simplistic estimator based on the assumption of normality of the data. This estimator has poor performance for non-normal data, which becomes especially obvious for large data sets. The estimate depends only on size of the data. Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. Returns ------- h : An estimate of the optimal bin width for the given data.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_histograms_impl.py",
    "ast_data": "FunctionDef name:_hist_bin_sturges arg:x arg:range arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_set_shape",
    "source_code": "def _set_shape(self, shape):\n    shape = tensor_shape.as_shape(shape)\n    if shape.rank is None:\n        return\n    shape = shape.as_list()\n    if shape[0] is not None:\n        self._row_partition._row_splits.set_shape(shape[0] + 1)\n    dtype = self._row_partition.dtype\n    for i, partition in enumerate(self._nested_row_partitions):\n        size = shape[i + 1]\n        if size is not None:\n            if partition._uniform_row_length is not None:\n                old_row_length = tensor_util.constant_value(partition._uniform_row_length)\n                if old_row_length is not None:\n                    if size == old_row_length:\n                        continue\n                    else:\n                        raise ValueError(f'Inconsistent size for axis {i + 1}: {old_row_length} vs. {size}.')\n            partition._uniform_row_length = ops.convert_to_tensor(size, dtype)\n            if partition._nrows is None:\n                partition._nrows = array_ops.size(partition._row_splits, out_type=dtype) - 1\n    if hasattr(self.flat_values, 'set_shape'):\n        flat_shape = tensor_shape.as_shape([None] + shape[self.ragged_rank + 1:])\n        self.flat_values.set_shape(flat_shape)",
    "docstring": "Updates the static shape of to be . * If a dimension of has known rank, and is encoded via partitioning, then this will update the corresponding partition to define and . * If a dimension of has a known rank, and is encoded as one of the dimensions, then will be used to update its shape. Warning: Using this method to assert an incorrect shape for a RaggedTensor (i.e., one that's not consistent with its actual shape) can cause segmentation faults and very difficult-to-diagnose behavior. Only use this method if you are certain that the shape is correct. Args: shape: specifying the shape for this .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:_set_shape arg:self arg:shape arguments arg arg Assign Call If Compare Return return:no Assign Call If Compare Call Assign For Call Assign If Compare If Compare Assign Call If Compare If Compare Raise Call Assign Call If Compare Assign Call If Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__enter__",
    "source_code": "def __enter__(self):\n    return self",
    "docstring": "Make usable with \"with\" statement.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py",
    "ast_data": "FunctionDef name:__enter__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "unpack_inputs",
    "source_code": "def unpack_inputs(self, bound_parameters: inspect.BoundArguments) -> List[core.Tensor]:\n    flat = []\n    for p in self._sorted_parameters:\n        flat.extend(p.type_constraint.to_tensors(bound_parameters.arguments[p.name]))\n    dealiased_inputs = []\n    ids_used = set()\n    for tensor, input_type in zip(flat, self.flat_inputs):\n        alias_id = input_type._alias_id()\n        if alias_id is None or alias_id not in ids_used:\n            dealiased_inputs.append(tensor)\n        if alias_id is not None:\n            ids_used.add(alias_id)\n    return dealiased_inputs",
    "docstring": "Unpacks python arguments to flat tensor inputs accepted by this type.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_type.py",
    "ast_data": "FunctionDef name:unpack_inputs arg:self arg:bound_parameters arguments arg arg Assign For Call Call Assign Assign Call For Call Assign Call If BoolOp Compare Compare Call If Compare Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "log_event_start",
    "source_code": "def log_event_start(self, event_name: str, time_ns: int, metadata: dict[str, Any], log_pt2_compile_event: bool=False, compile_id: Optional[CompileId]=None) -> None:\n    compile_id = compile_id or torch._guards.CompileContext.current_compile_id()\n    metadata['compile_id'] = str(compile_id)\n    self._log_timed_event(event_name, time_ns, 'B', metadata)\n    self.get_stack().append(event_name)\n    self.add_event_data(event_name, **metadata)\n    if log_pt2_compile_event:\n        self.get_pt2_compile_substack().append(event_name)",
    "docstring": "Logs the start of a single event. :param str event_name Name of event to appear in trace :param time_ns Timestamp in nanoseconds :param metadata: Any extra metadata associated with this event :param log_pt2_compile_event: If True, log to pt2_compile_events :param compile_id: Explicit compile_id (rather than using the current context)",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\utils.py",
    "ast_data": "FunctionDef name:log_event_start arg:self arg:event_name arg:time_ns arg:metadata arg:log_pt2_compile_event arg:compile_id arguments arg arg arg arg arg arg Assign BoolOp Call Assign Call Call Call Call Call If Call Call"
  },
  {
    "library": "tensorflow",
    "name": "inferred_steps",
    "source_code": "@property\ndef inferred_steps(self):\n    return self._inferred_steps",
    "docstring": "The inferred steps per epoch of the created . This will be in the case where: (1) A of unknown cardinality was passed to the , and (2) was not provided, and (3) The first epoch of iteration has not yet completed. Returns: The inferred steps per epoch of the created .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\data_adapter.py",
    "ast_data": "FunctionDef name:inferred_steps arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "isClose",
    "source_code": "def isClose(x, y, relative_tolerance):\n    if math.isnan(x) or math.isnan(y):\n        return math.isnan(x) == math.isnan(y)\n    if math.isinf(x) or math.isinf(y):\n        return x == y\n    return abs(x - y) <= relative_tolerance * max(abs(x), abs(y))",
    "docstring": "Returns True if x is close to y given the relative tolerance or if x and y are both inf, both -inf, or both NaNs. This function does not distinguish between signalling and non-signalling NaN. Args: x: float value to be compared y: float value to be compared relative_tolerance: float. The allowable difference between the two values being compared is determined by multiplying the relative tolerance by the maximum of the two values. If this is not provided, then all floats are compared using string comparison.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\protobuf\\compare.py",
    "ast_data": "FunctionDef name:isClose arg:x arg:y arg:relative_tolerance arguments arg arg arg If BoolOp Call Call Return return:yes Compare Call Call If BoolOp Call Call Return return:yes Compare Return return:yes Compare Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_unsafe_globals_in_checkpoint",
    "source_code": "def get_unsafe_globals_in_checkpoint(f: FileLike) -> list[str]:\n    default_safe_globals_strings = set(_weights_only_unpickler._get_allowed_globals().keys())\n    user_safe_global_strings = set(_weights_only_unpickler._get_user_allowed_globals().keys())\n    safe_global_strings = default_safe_globals_strings.union(user_safe_global_strings)\n    with _open_file_like(f, 'rb') as opened_file:\n        if not _is_zipfile(opened_file):\n            raise ValueError('Expected input to be a checkpoint returned by torch.save')\n        with _open_zipfile_reader(opened_file) as zip_file:\n            if _is_torchscript_zip(zip_file):\n                raise ValueError('Expected input to be a checkpoint returned by torch.save but got a torchscript checkpoint')\n            data_file = io.BytesIO(zip_file.get_record('data.pkl'))\n            all_globals = _weights_only_unpickler.get_globals_in_pkl(data_file)\n            return list(all_globals.difference(safe_global_strings))",
    "docstring": "Returns a list of strings of functions/classes in a `add_safe_globalssafe_globals`.",
    "type": "function",
    "file_path": "pytorch\\torch\\serialization.py",
    "ast_data": "FunctionDef name:get_unsafe_globals_in_checkpoint arg:f arguments arg Assign Call Call Call Assign Call Call Call Assign Call With Call If Call Raise Call With Call If Call Raise Call Assign Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "insert",
    "source_code": "def insert(self, index, val):\n    if not isinstance(index, int):\n        raise TypeError('%s is not a legal index' % index)\n    self[index:index] = [val]",
    "docstring": "Standard list insert method",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\mutable_list.py",
    "ast_data": "FunctionDef name:insert arg:self arg:index arg:val arguments arg arg arg If Call Raise Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_restore_slot_variable",
    "source_code": "def _restore_slot_variable(self, slot_name, variable, slot_variable):\n    variable_key = _var_key(variable)\n    deferred_restorations = self._deferred_slot_restorations.get(slot_name, {}).pop(variable_key, [])\n    deferred_restorations.sort(key=lambda position: position.restore_uid, reverse=True)\n    for checkpoint_position in deferred_restorations:\n        checkpoint_position.restore(slot_variable)",
    "docstring": "Restore a newly created slot variable's value.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\optimizer.py",
    "ast_data": "FunctionDef name:_restore_slot_variable arg:self arg:slot_name arg:variable arg:slot_variable arguments arg arg arg arg Assign Call Assign Call Call Call arguments arg For Call"
  },
  {
    "library": "scipy",
    "name": "dt",
    "source_code": "@property\ndef dt(self):\n    return self._dt",
    "docstring": "Return the sampling time of the system.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:dt arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_names_from_handle_indices",
    "source_code": "def _get_names_from_handle_indices(self, handle_indices: tuple[int, ...]) -> list[list[str]]:\n    fqns: list[list[str]] = []\n    for index in handle_indices:\n        if index is None or index < 0 or index >= len(self.all_handles):\n            continue\n        handle = self.all_handles[index]\n        flat_param = handle.flat_param\n        fqns.append(self.param_to_fqn[flat_param])\n    return fqns",
    "docstring": "Returns a list of FQNs for each handle in ``. If a handle index is invalid, then its FQNs are omitted from the returned list.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_exec_order_utils.py",
    "ast_data": "FunctionDef name:_get_names_from_handle_indices arg:self arg:handle_indices arguments arg arg For If BoolOp Compare Compare Compare Call Assign Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_split_line",
    "source_code": "def _split_line(s: str, parts):\n    out = {}\n    start = 0\n    for name, length in parts:\n        out[name] = s[start:start + length].strip()\n        start += length\n    del out['_']\n    return out",
    "docstring": "Parameters ---------- s: str Fixed-length string to split parts: list of (name, length) pairs Used to break up string, name '_' will be filtered from output. Returns ------- Dict of name:contents of string at given location.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\sas\\sas_xport.py",
    "ast_data": "FunctionDef name:_split_line arg:s arg:parts arguments arg arg Assign Assign For Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "reset_cache",
    "source_code": "@receiver(setting_changed)\ndef reset_cache(*, setting, **kwargs):\n    if setting in ('LANGUAGES', 'LANGUAGE_CODE'):\n        check_for_language.cache_clear()\n        get_languages.cache_clear()\n        get_supported_language_variant.cache_clear()",
    "docstring": "Reset global state when LANGUAGES setting has been changed, as some languages should no longer be accepted.",
    "type": "function",
    "file_path": "django\\django\\utils\\translation\\trans_real.py",
    "ast_data": "FunctionDef name:reset_cache arguments arg arg If Compare Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_executorch_backend_config",
    "source_code": "def get_executorch_backend_config() -> BackendConfig:\n    return BackendConfig('executorch').set_backend_pattern_configs(_get_linear_configs()).set_backend_pattern_configs(_get_conv_configs()).set_backend_pattern_configs(_get_binary_ops_configs()).set_backend_pattern_configs(_get_share_qparams_ops_configs()).set_backend_pattern_configs(_get_bn_configs()).set_backend_pattern_configs(_get_cat_configs()).set_backend_pattern_configs(_get_embedding_op_configs())",
    "docstring": "Return the for backends PyTorch lowers to through the Executorch stack.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\executorch.py",
    "ast_data": "FunctionDef name:get_executorch_backend_config arguments Return return:yes Call Call Call Call Call Call Call Call Call Call Call Call Call Call Call"
  },
  {
    "library": "cherrypy",
    "name": "stop",
    "source_code": "def stop(self):\n    self.fcgiserver._keepGoing = False\n    self.fcgiserver._threadPool.maxSpare = self.fcgiserver._threadPool._idleCount\n    self.ready = False",
    "docstring": "Stop the HTTP server.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\servers.py",
    "ast_data": "FunctionDef name:stop arg:self arguments arg Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "_deprecate_warnings",
    "source_code": "def _deprecate_warnings(func_name: str, extra_msg: str) -> None:\n    if not is_torchdynamo_compiling():\n        warnings.warn(f'{func_name} is deprecated and will be removed soon. {extra_msg}', FutureWarning, stacklevel=3)",
    "docstring": "Inject common validation logics for funcs via this decorator. Include verifying that input needs to be either a :class: or :class: and only 1D :class: is passed in.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\parallel\\_utils.py",
    "ast_data": "FunctionDef name:_deprecate_warnings arg:func_name arg:extra_msg arguments arg arg If Call Call"
  },
  {
    "library": "authlib",
    "name": "userinfo",
    "source_code": "def userinfo(self, **kwargs):\n    metadata = self.load_server_metadata()\n    resp = self.get(metadata['userinfo_endpoint'], **kwargs)\n    resp.raise_for_status()\n    data = resp.json()\n    return UserInfo(data)",
    "docstring": "Fetch user info from ``.",
    "type": "method",
    "file_path": "authlib\\authlib\\integrations\\base_client\\sync_openid.py",
    "ast_data": "FunctionDef name:userinfo arg:self arguments arg arg Assign Call Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "is_array_api_strict_namespace",
    "source_code": "def is_array_api_strict_namespace(xp: Namespace) -> bool:\n    return xp.__name__ == 'array_api_strict'",
    "docstring": "Returns True if is an array-api-strict namespace. See Also -------- array_namespace is_numpy_namespace is_cupy_namespace is_torch_namespace is_ndonnx_namespace is_dask_namespace is_jax_namespace is_pydata_sparse_namespace",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\common\\_helpers.py",
    "ast_data": "FunctionDef name:is_array_api_strict_namespace arg:xp arguments arg Return return:yes Compare"
  },
  {
    "library": "django",
    "name": "_get",
    "source_code": "def _get(self, *args, **kwargs):\n    data = self.request.COOKIES.get(self.cookie_name)\n    messages = self._decode(data)\n    all_retrieved = not (messages and messages[-1] == self.not_finished)\n    if messages and (not all_retrieved):\n        messages.pop()\n    return (messages, all_retrieved)",
    "docstring": "Retrieve a list of messages from the messages cookie. If the not_finished sentinel value is found at the end of the message list, remove it and return a result indicating that not all messages were retrieved by this storage.",
    "type": "method",
    "file_path": "django\\django\\contrib\\messages\\storage\\cookie.py",
    "ast_data": "FunctionDef name:_get arg:self arguments arg arg arg Assign Call Assign Call Assign BoolOp Compare If BoolOp Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_ybound",
    "source_code": "def set_ybound(self, lower=None, upper=None):\n    if upper is None and np.iterable(lower):\n        lower, upper = lower\n    old_lower, old_upper = self.get_ybound()\n    if lower is None:\n        lower = old_lower\n    if upper is None:\n        upper = old_upper\n    self.set_ylim(sorted((lower, upper), reverse=bool(self.yaxis_inverted())), auto=None)",
    "docstring": "Set the lower and upper numerical bounds of the y-axis. This method will honor axis inversion regardless of parameter order. It will not change the autoscaling setting (). Parameters ---------- lower, upper : float or None The lower and upper bounds. If *None*, the respective axis bound is not modified. .. ACCEPTS: (lower: float, upper: float) See Also -------- get_ybound get_ylim, set_ylim invert_yaxis, yaxis_inverted",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:set_ybound arg:self arg:lower arg:upper arguments arg arg arg If BoolOp Compare Call Assign Assign Call If Compare Assign If Compare Assign Call Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "argsort",
    "source_code": "def argsort(x: Array, /, *, axis: int=-1, descending: py_bool=False, stable: py_bool=True) -> Array:\n    x, restore = _ensure_single_chunk(x, axis)\n    meta_xp = array_namespace(x._meta)\n    dtype = meta_xp.argsort(x._meta).dtype\n    meta = meta_xp.astype(x._meta, dtype)\n    x = da.map_blocks(meta_xp.argsort, x, axis=axis, meta=meta, dtype=dtype, descending=descending, stable=stable)\n    return restore(x)",
    "docstring": "Array API compatibility layer around the lack of argsort() in Dask. See the corresponding documentation in the array library and/or the array API specification for more details. Warnings -------- This function temporarily rechunks the array along into a single chunk. This can be extremely inefficient and can lead to out-of-memory errors.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\dask\\array\\_aliases.py",
    "ast_data": "FunctionDef name:argsort arguments arg arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_WeakTensorIterator",
    "source_code": "class _WeakTensorIterator(object):\n    __slots__ = ['_weak_tensor', '_index', '_limit']\n\n    def __init__(self, weak_tensor, dim0):\n        self._weak_tensor = weak_tensor\n        self._index = 0\n        self._limit = dim0\n\n    def __iter__(self):\n        return self\n\n    def __next__(self):\n        if self._index == self._limit:\n            raise StopIteration\n        result = WeakTensor.from_tensor(self._weak_tensor.tensor[self._index])\n        self._index += 1\n        return result",
    "docstring": "Iterates over the leading dim of a WeakTensor. Performs no error checks.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\weak_tensor.py",
    "ast_data": "ClassDef name:_WeakTensorIterator Assign FunctionDef name:__init__ arg:self arg:weak_tensor arg:dim0 arguments arg arg arg Assign Assign Assign FunctionDef name:__iter__ arg:self arguments arg Return return:yes FunctionDef name:__next__ arg:self arguments arg If Compare Raise Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "check_labels_file_header",
    "source_code": "def check_labels_file_header(filename):\n    with tf.gfile.Open(filename, 'rb') as f:\n        magic = read32(f)\n        read32(f)\n        if magic != 2049:\n            raise ValueError('Invalid magic number %d in MNIST file %s' % (magic, f.name))",
    "docstring": "Validate that filename corresponds to labels for the MNIST dataset.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\tutorials\\dataset.py",
    "ast_data": "FunctionDef name:check_labels_file_header arg:filename arguments arg With Call Assign Call Call If Compare Raise Call"
  },
  {
    "library": "kornia",
    "name": "clear_state",
    "source_code": "def clear_state(self) -> None:\n    self._params = None",
    "docstring": "Reset self._params state to None.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\container\\base.py",
    "ast_data": "FunctionDef name:clear_state arg:self arguments arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "HeNormal",
    "source_code": "class HeNormal(VarianceScaling):\n\n    def __init__(self, seed=None):\n        super(HeNormal, self).__init__(scale=2.0, mode='fan_in', distribution='truncated_normal', seed=seed)\n\n    def get_config(self):\n        return {'seed': self.seed}",
    "docstring": "He normal initializer. Also available via the shortcut function . It draws samples from a truncated normal distribution centered on 0 with where is the number of input units in the weight tensor. Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.HeNormal() >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.HeNormal() >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) Args: seed: A Python integer. An initializer created with a given seed will always produce the same random tensor for a given shape and dtype. References: [He et al., 2015]( # pylint: disable=line-too-long ([pdf](",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\initializers\\initializers_v2.py",
    "ast_data": "ClassDef name:HeNormal FunctionDef name:__init__ arg:self arg:seed arguments arg arg Call Call FunctionDef name:get_config arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "get_option",
    "source_code": "def get_option(pat: str) -> Any:\n    key = _get_single_key(pat)\n    root, k = _get_root(key)\n    return root[k]",
    "docstring": "Retrieve the value of the specified option. This method allows users to query the current value of a given option in the pandas configuration system. Options control various display, performance, and behavior-related settings within pandas. Parameters ---------- pat : str Regexp which should match a single option. .. warning:: Partial matches are supported for convenience, but unless you use the full option name (e.g. x.y.z.option_name), your code may break in future versions if new options with similar names are introduced. Returns ------- Any The value of the option. Raises ------ OptionError : if no such option exists See Also -------- set_option : Set the value of the specified option or options. reset_option : Reset one or more options to their default value. describe_option : Print the description for one or more registered options. Notes ----- For all available options, please view the :ref: or use ``. Examples -------- >>> pd.get_option(\"display.max_columns\") # doctest: +SKIP 4",
    "type": "function",
    "file_path": "pandas\\pandas\\_config\\config.py",
    "ast_data": "FunctionDef name:get_option arg:pat arguments arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "map",
    "source_code": "@Substitution(subset=subset_args)\ndef map(self, func: Callable, subset: Subset | None=None, **kwargs) -> Styler:\n    self._todo.append((lambda instance: instance._map, (func, subset), kwargs))\n    return self",
    "docstring": "Apply a CSS-styling function elementwise. Updates the HTML representation with the result. Parameters ---------- func : function `Table Visualization `_ user guide for more details.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\style.py",
    "ast_data": "FunctionDef name:map arg:self arg:func arg:subset arguments arg arg arg arg Call arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "delaxes",
    "source_code": "def delaxes(ax: matplotlib.axes.Axes | None=None) -> None:\n    if ax is None:\n        ax = gca()\n    ax.remove()",
    "docstring": "Remove an (defaulting to the current Axes) from its figure.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:delaxes arg:ax arguments arg If Compare Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_convert_to_dataset_creator",
    "source_code": "def _convert_to_dataset_creator(self, x, y, **kwargs):\n\n    def _dataset_fn(input_context):\n        del input_context\n        data_adapter_cls = select_data_adapter(x, y)\n        return data_adapter_cls(x=x, y=y, **kwargs).get_dataset()\n    if isinstance(x, _get_tensor_types()) and isinstance(y, _get_tensor_types()):\n        return dataset_creator.DatasetCreator(_dataset_fn)\n    else:\n        raise NotImplementedError('Only `tf.keras.utils.experimental.DatasetCreator`, `tf.Tensor`, numpy arrays and pandas dataframes are supported types at this time.')",
    "docstring": "Converts non-tf.data.Dataset to instances.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\data_adapter.py",
    "ast_data": "FunctionDef name:_convert_to_dataset_creator arg:self arg:x arg:y arguments arg arg arg arg FunctionDef name:_dataset_fn arg:input_context arguments arg Assign Call Return return:yes Call Call If BoolOp Call Call Call Call Return return:yes Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_restart_workers",
    "source_code": "@prof\ndef _restart_workers(self, worker_group: WorkerGroup) -> None:\n    role = worker_group.spec.role\n    logger.info('[%s] Stopping worker group', role)\n    self._stop_workers(worker_group)\n    worker_group.state = WorkerState.STOPPED\n    self._initialize_workers(worker_group)",
    "docstring": "Restart (stops, rendezvous, starts) all local workers in the group.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\agent\\server\\api.py",
    "ast_data": "FunctionDef name:_restart_workers arg:self arg:worker_group arguments arg arg Assign Call Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "call_function",
    "source_code": "def call_function(args=None, kwargs=None, tracing_options=None):\n    if not tracing_options:\n        tracing_options = TracingOptions()\n    args = args if args else ()\n    kwargs = kwargs if kwargs else {}\n    function = trace_function(args=args, kwargs=kwargs, tracing_options=tracing_options)\n    bound_args = function.function_type.bind(*args, **kwargs)\n    flat_inputs = function.function_type.unpack_inputs(bound_args)\n    return function._call_flat(flat_inputs, captured_inputs=function.captured_inputs)",
    "docstring": "Traces a function for args and kwargs and calls it after.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\tracing_compilation.py",
    "ast_data": "FunctionDef name:call_function arg:args arg:kwargs arg:tracing_options arguments arg arg arg If Assign Call Assign Assign Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "ComplexWarning",
    "source_code": "class ComplexWarning(RuntimeWarning):\n    pass",
    "docstring": "The warning raised when casting a complex dtype to a real dtype. As implemented, casting a complex number to a real discards its imaginary part, but this behavior may not be what the user actually wants.",
    "type": "class",
    "file_path": "numpy\\numpy\\exceptions.py",
    "ast_data": "ClassDef name:ComplexWarning"
  },
  {
    "library": "scipy",
    "name": "mean",
    "source_code": "def mean(self, alpha, n):\n    a, Sa, n = _dirichlet_multinomial_check_parameters(alpha, n)\n    n, Sa = (n[..., np.newaxis], Sa[..., np.newaxis])\n    return n * a / Sa",
    "docstring": "Mean of a Dirichlet multinomial distribution. Parameters ---------- %(_dirichlet_mn_doc_default_callparams)s Returns ------- out: ndarray Mean of a Dirichlet multinomial distribution.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:mean arg:self arg:alpha arg:n arguments arg arg arg Assign Call Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "matrix",
    "source_code": "def matrix(self) -> Tensor:\n    return quaternion_to_rotation_matrix(self.data)",
    "docstring": "Convert the quaternion to a rotation matrix of shape :math:. Example: >>> q = Quaternion.identity() >>> m = q.matrix() >>> m tensor([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]], grad_fn=)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\quaternion.py",
    "ast_data": "FunctionDef name:matrix arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_shutdown",
    "source_code": "@abc.abstractmethod\ndef _shutdown(self, death_sig: signal.Signals=signal.SIGTERM) -> None:\n    raise NotImplementedError",
    "docstring": "Clean up any resources that were allocated during the agent's work. Args: death_sig: Signal to send to the child process, SIGTERM is default",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\agent\\server\\api.py",
    "ast_data": "FunctionDef name:_shutdown arg:self arg:death_sig arguments arg arg Raise"
  },
  {
    "library": "cryptography",
    "name": "private_bytes",
    "source_code": "@abc.abstractmethod\ndef private_bytes(self, encoding: _serialization.Encoding, format: _serialization.PrivateFormat, encryption_algorithm: _serialization.KeySerializationEncryption) -> bytes:\n    pass",
    "docstring": "Returns the key serialized as bytes.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ec.py",
    "ast_data": "FunctionDef name:private_bytes arg:self arg:encoding arg:format arg:encryption_algorithm arguments arg arg arg arg"
  },
  {
    "library": "pytorch",
    "name": "JoinHook",
    "source_code": "class JoinHook:\n\n    def main_hook(self) -> None:\n        pass\n\n    def post_hook(self, is_last_joiner: bool) -> None:\n        pass",
    "docstring": "This defines a join hook, which provides two entry points in the join context manager. Entry points : a main hook, which is called repeatedly while there exists a non-joined process, and a post-hook, which is called once all processes have joined. To implement a join hook for the generic join context manager, define a class that inherits from :class: and override `` as appropriate.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\join.py",
    "ast_data": "ClassDef name:JoinHook FunctionDef name:main_hook arg:self arguments arg FunctionDef name:post_hook arg:self arg:is_last_joiner arguments arg arg"
  },
  {
    "library": "kornia",
    "name": "shift_rgb",
    "source_code": "def shift_rgb(image: Tensor, r_shift: Tensor, g_shift: Tensor, b_shift: Tensor) -> Tensor:\n    KORNIA_CHECK_IS_TENSOR(image)\n    KORNIA_CHECK_IS_COLOR(image, f'with shape {image.shape}')\n    shifts = [r_shift, g_shift, b_shift]\n    shifted = (image + stack(shifts, dim=1).view(-1, 3, 1, 1).to(image)).clamp_(min=0, max=1)\n    return shifted",
    "docstring": "Shift rgb channels. Shift each image's channel by either r_shift for red, g_shift for green and b_shift for blue channels.",
    "type": "function",
    "file_path": "kornia\\kornia\\enhance\\shift_rgb.py",
    "ast_data": "FunctionDef name:shift_rgb arg:image arg:r_shift arg:g_shift arg:b_shift arguments arg arg arg arg Call Call Assign Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "ref",
    "source_code": "def ref(self):\n    return object_identity.Reference(self)",
    "docstring": "Returns a hashable reference object to this KerasTensor. The primary use case for this API is to put KerasTensors in a set/dictionary. We can't put tensors in a set/dictionary as is not available and tensor equality () is supposed to produce a tensor representing if the two inputs are equal. See the documentation of for more info.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\keras_tensor.py",
    "ast_data": "FunctionDef name:ref arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_handler",
    "source_code": "def get_handler(self, *args, **options):\n    handler = super().get_handler(*args, **options)\n    use_static_handler = options['use_static_handler']\n    insecure_serving = options['insecure_serving']\n    if use_static_handler and (settings.DEBUG or insecure_serving):\n        return StaticFilesHandler(handler)\n    return handler",
    "docstring": "Return the static files serving handler wrapping the default handler, if static files should be served. Otherwise return the default handler.",
    "type": "method",
    "file_path": "django\\django\\contrib\\staticfiles\\management\\commands\\runserver.py",
    "ast_data": "FunctionDef name:get_handler arg:self arguments arg arg arg Assign Call Call Assign Assign If BoolOp BoolOp Return return:yes Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "__call__",
    "source_code": "def __call__(self, mean=None, cov=1, allow_singular=False, seed=None, **kwds):\n    return multivariate_normal_frozen(mean, cov, allow_singular=allow_singular, seed=seed, **kwds)",
    "docstring": "Create a frozen multivariate normal distribution. See for more information.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:mean arg:cov arg:allow_singular arg:seed arguments arg arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "to_fullargspec",
    "source_code": "def to_fullargspec(function_type: function_type_lib.FunctionType, default_values: Dict[str, Any]) -> inspect.FullArgSpec:\n    args = []\n    varargs = None\n    varkw = None\n    defaults = []\n    kwonlyargs = []\n    kwonlydefaults = {}\n    for parameter in function_type.parameters.values():\n        if parameter.kind in [inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD]:\n            args.append(parameter.name)\n            if parameter.default is not inspect.Parameter.empty:\n                defaults.append(default_values[parameter.name])\n        elif parameter.kind is inspect.Parameter.KEYWORD_ONLY:\n            kwonlyargs.append(parameter.name)\n            if parameter.default is not inspect.Parameter.empty:\n                kwonlydefaults[parameter.name] = default_values[parameter.name]\n        elif parameter.kind is inspect.Parameter.VAR_POSITIONAL:\n            varargs = parameter.name\n        elif parameter.kind is inspect.Parameter.VAR_KEYWORD:\n            varkw = parameter.name\n    return inspect.FullArgSpec(args, varargs, varkw, tuple(defaults) if defaults else None, kwonlyargs, kwonlydefaults if kwonlydefaults else None, annotations={})",
    "docstring": "Generates backwards compatible FullArgSpec from FunctionType.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\function_type_utils.py",
    "ast_data": "FunctionDef name:to_fullargspec arg:function_type arg:default_values arguments arg arg Assign Assign Assign Assign Assign Assign For Call If Compare Call If Compare Call If Compare Call If Compare Assign If Compare Assign If Compare Assign Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "histogram_raw",
    "source_code": "def histogram_raw(name, min, max, num, sum, sum_squares, bucket_limits, bucket_counts):\n    hist = HistogramProto(min=min, max=max, num=num, sum=sum, sum_squares=sum_squares, bucket_limit=bucket_limits, bucket=bucket_counts)\n    return Summary(value=[Summary.Value(tag=name, histo=hist)])",
    "docstring": "Output a protocol buffer with a histogram. The generated []( has one summary value containing a histogram for . Args: name: A name for the generated node. Will also serve as a series name in TensorBoard. min: A float or int min value max: A float or int max value num: Int number of values sum: Float or int sum of all values sum_squares: Float or int sum of squares for all values bucket_limits: A numeric with upper value per bucket bucket_counts: A numeric with number of values per bucket Returns: A scalar of type . The serialized protocol buffer.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\tensorboard\\summary.py",
    "ast_data": "FunctionDef name:histogram_raw arg:name arg:min arg:max arg:num arg:sum arg:sum_squares arg:bucket_limits arg:bucket_counts arguments arg arg arg arg arg arg arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "mfft",
    "source_code": "@property\ndef mfft(self) -> int:\n    return self._mfft",
    "docstring": "Length of input for the FFT used - may be larger than window length . If not set, defaults to the window length . See Also -------- f_pts: Number of points along the frequency axis. f: Frequencies values of the STFT. m_num: Number of samples in window . ShortTimeFFT: Class this property belongs to.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_short_time_fft.py",
    "ast_data": "FunctionDef name:mfft arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_xy",
    "source_code": "def set_xy(self, xy):\n    self._x0, self._y0 = xy\n    self.stale = True",
    "docstring": "Set the left and bottom coordinates of the rectangle. Parameters ---------- xy : (float, float)",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:set_xy arg:self arg:xy arguments arg arg Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_run_conversion",
    "source_code": "def _run_conversion(self, meta_graph_def):\n    grappler_session_config = config_pb2.ConfigProto()\n    custom_rewriter_config = _get_tensorrt_rewriter_config(conversion_params=self._conversion_params._replace(allow_build_at_runtime=True), is_dynamic_op=True, max_batch_size=None, disable_non_trt_optimizers=self._test_only_disable_non_trt_optimizers, use_implicit_batch=not self._use_dynamic_shape, profile_strategy=self._profile_strategy)\n    grappler_session_config.graph_options.rewrite_options.CopyFrom(custom_rewriter_config)\n    return tf_optimizer.OptimizeGraph(grappler_session_config, meta_graph_def, graph_id=b'tf_graph')",
    "docstring": "Run Grappler's OptimizeGraph() tool to convert the graph. Args: meta_graph_def: the MetaGraphDef instance to run the optimizations on. Returns: The optimized GraphDef.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\tensorrt\\trt_convert.py",
    "ast_data": "FunctionDef name:_run_conversion arg:self arg:meta_graph_def arguments arg arg Assign Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_get_counts",
    "source_code": "def _get_counts(values, uniques):\n    if values.dtype.kind in 'OU':\n        counter = _NaNCounter(values)\n        output = np.zeros(len(uniques), dtype=np.int64)\n        for i, item in enumerate(uniques):\n            with suppress(KeyError):\n                output[i] = counter[item]\n        return output\n    unique_values, counts = _unique_np(values, return_counts=True)\n    uniques_in_values = np.isin(uniques, unique_values, assume_unique=True)\n    if np.isnan(unique_values[-1]) and np.isnan(uniques[-1]):\n        uniques_in_values[-1] = True\n    unique_valid_indices = np.searchsorted(unique_values, uniques[uniques_in_values])\n    output = np.zeros_like(uniques, dtype=np.int64)\n    output[uniques_in_values] = counts[unique_valid_indices]\n    return output",
    "docstring": "Get the count of each of the in . The counts will use the order passed in by . For non-object dtypes, is assumed to be sorted and is at the end.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_encode.py",
    "ast_data": "FunctionDef name:_get_counts arg:values arg:uniques arguments arg arg If Compare Assign Call Assign Call Call For Call With Call Assign Return return:yes Assign Call Assign Call If BoolOp Call Call Assign Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "is_aten",
    "source_code": "def is_aten(domain: str) -> bool:\n    return domain == 'aten'",
    "docstring": "Check if the domain is official.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\jit_utils.py",
    "ast_data": "FunctionDef name:is_aten arg:domain arguments arg Return return:yes Compare"
  },
  {
    "library": "matplotlib",
    "name": "get_animated",
    "source_code": "def get_animated(self):\n    return self._animated",
    "docstring": "Return whether the artist is animated.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:get_animated arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "create_dedode_default",
    "source_code": "@classmethod\ndef create_dedode_default(cls, generator_type: str='C4', steerer_order: int=8) -> Module:\n    descriptor_dim = 256\n    if generator_type == 'C4':\n        generator = torch.block_diag(*(torch.tensor([[0.0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], [1, 0, 0, 0]]) for _ in range(descriptor_dim // 4)))\n        return cls(generator).eval()\n    elif generator_type == 'SO2':\n        lie_generator = torch.block_diag(torch.zeros([descriptor_dim - 12 * (descriptor_dim // 14), descriptor_dim - 12 * (descriptor_dim // 14)]), *(torch.tensor([[0.0, j], [-j, 0]]) for j in range(1, 7) for _ in range(descriptor_dim // 14)))\n        generator = torch.matrix_exp(2 * 3.14159 / steerer_order * lie_generator)\n        return cls(generator).eval()\n    else:\n        raise ValueError",
    "docstring": "Create a steerer for pretrained DeDoDe descriptors int the \"C-setting\" from the paper where descriptors were trained for fixed steerers. Args: generator_type: The type of steerer generator. One of 'C4', 'SO2', default is 'C4'. These can be used with the DeDoDe descriptors in Kornia with C4 or SO2 in the name respectively (so called C-setting steerers). steerer_order: The discretisation order for SO2-steerers (NOT used for C4-steerers). Returns: The pretrained model.",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\steerers.py",
    "ast_data": "FunctionDef name:create_dedode_default arg:cls arg:generator_type arg:steerer_order arguments arg arg arg Assign If Compare Assign Call Call Call Return return:yes Call Call If Compare Assign Call Call Call Call Call Assign Call Return return:yes Call Call Raise"
  },
  {
    "library": "pytorch",
    "name": "TypedExpr",
    "source_code": "@dataclass\nclass TypedExpr:\n    expr: _ExprType\n    dtype: torch.dtype\n\n    def is_constant(self):\n        return _is_constant(self.expr)\n\n    def __post_init__(self):\n        if _is_constant(self.expr):\n            self.expr = dtype_to_type(self.dtype)(self.expr)",
    "docstring": "A SymPy expression with associated type",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\index_propagation.py",
    "ast_data": "ClassDef name:TypedExpr FunctionDef name:is_constant arg:self arguments arg Return return:yes Call FunctionDef name:__post_init__ arg:self arguments arg If Call Assign Call Call"
  },
  {
    "library": "kornia",
    "name": "intrinsics_like",
    "source_code": "def intrinsics_like(focal: float, input: Tensor) -> Tensor:\n    if len(input.shape) != 4:\n        raise AssertionError(input.shape)\n    if focal <= 0:\n        raise AssertionError(focal)\n    _, _, H, W = input.shape\n    intrinsics = eye_like(3, input)\n    intrinsics[..., 0, 0] *= focal\n    intrinsics[..., 1, 1] *= focal\n    intrinsics[..., 0, 2] += 1.0 * W / 2\n    intrinsics[..., 1, 2] += 1.0 * H / 2\n    return intrinsics",
    "docstring": "Return a 3x3 intrinsics matrix, with same size as the input. The center of projection will be based in the input image size. Args: focal: the focal length for the camera matrix. input: image tensor that will determine the batch size and image height and width. It is assumed to be a tensor in the shape of :math:. Returns: The camera matrix with the shape of :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\epipolar\\projection.py",
    "ast_data": "FunctionDef name:intrinsics_like arg:focal arg:input arguments arg arg If Compare Call Raise Call If Compare Raise Call Assign Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_conv_bn_add_relu_extra_inputs_getter_right",
    "source_code": "def _conv_bn_add_relu_extra_inputs_getter_right(pattern):\n    _relu, add_pattern = pattern\n    _, extra_input, _bn_conv = add_pattern\n    return [extra_input]",
    "docstring": "get inputs pattern for extra inputs, inputs for root node are assumed to be copied over from root node to the fused node",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\onednn.py",
    "ast_data": "FunctionDef name:_conv_bn_add_relu_extra_inputs_getter_right arg:pattern arguments arg Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_handle_reader",
    "source_code": "def _get_handle_reader(graph, handle, dtype):\n    graph_key = TensorHandle._get_reader_key(handle)\n    result = graph._handle_readers.get(graph_key)\n    if result is None:\n        handle_device = TensorHandle._get_device_name(handle)\n        with graph.as_default(), graph.device(handle_device):\n            holder = array_ops.placeholder(dtypes.string)\n            _register_handle_feeder(holder.graph, holder, dtype)\n            reader = gen_data_flow_ops.get_session_tensor(holder, dtype)\n        result = (holder, reader)\n        graph._handle_readers[graph_key] = result\n    return result",
    "docstring": "Return a read subgraph for this handle.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\session_ops.py",
    "ast_data": "FunctionDef name:_get_handle_reader arg:graph arg:handle arg:dtype arguments arg arg arg Assign Call Assign Call If Compare Assign Call With Call Call Assign Call Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "ExecutableNotFoundError",
    "source_code": "class ExecutableNotFoundError(FileNotFoundError):\n    pass",
    "docstring": "Error raised when an executable that Matplotlib optionally depends on can't be found.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\__init__.py",
    "ast_data": "ClassDef name:ExecutableNotFoundError"
  },
  {
    "library": "django",
    "name": "results_iter",
    "source_code": "def results_iter(self, results=None, tuple_expected=False, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE):\n    if results is None:\n        results = self.execute_sql(MULTI, chunked_fetch=chunked_fetch, chunk_size=chunk_size)\n    fields = [s[0] for s in self.select[0:self.col_count]]\n    converters = self.get_converters(fields)\n    rows = chain.from_iterable(results)\n    if converters:\n        rows = self.apply_converters(rows, converters)\n    if self.has_composite_fields(fields):\n        rows = self.composite_fields_to_tuples(rows, fields)\n    if tuple_expected:\n        rows = map(tuple, rows)\n    return rows",
    "docstring": "Return an iterator over the results from executing this query.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\compiler.py",
    "ast_data": "FunctionDef name:results_iter arg:self arg:results arg:tuple_expected arg:chunked_fetch arg:chunk_size arguments arg arg arg arg arg If Compare Assign Call Assign Assign Call Assign Call If Assign Call If Call Assign Call If Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_reshape_if_necessary",
    "source_code": "def _reshape_if_necessary(tensor, new_shape):\n    new_shape = tuple((-1 if x is None else x for x in new_shape))\n    cur_shape = tuple((x.value for x in tensor.shape.dims))\n    if len(new_shape) == len(cur_shape) and all((not isinstance(d1, tensor_lib.Tensor) and (d0 == d1 or d1 == -1) for d0, d1 in zip(cur_shape, new_shape))):\n        return tensor\n    else:\n        return array_ops.reshape(tensor, new_shape)",
    "docstring": "Like reshape(), but avoids creating a new tensor if possible.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\special_math_ops.py",
    "ast_data": "FunctionDef name:_reshape_if_necessary arg:tensor arg:new_shape arguments arg arg Assign Call Compare Assign Call If BoolOp Compare Call Call Call BoolOp Call BoolOp Compare Compare Call Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "noop_hook",
    "source_code": "def noop_hook(_: Any, bucket: GradBucket) -> torch.futures.Future[torch.Tensor]:\n    fut: torch.futures.Future[torch.Tensor] = torch.futures.Future()\n    fut.set_result(bucket.buffer())\n    return fut",
    "docstring": "Return a future that wraps the input, so it is a no-op that does not incur any communication overheads. This hook should **only** be used for headroom analysis of allreduce optimization, instead of the normal gradient synchronization. For example, if only less than 10% speedup of training time can be observed after this hook is registered, it usually implies that allreduce is not a performance bottleneck for this case. Such instrumentation can be particularly useful if GPU traces cannot be easily retrieved or the trace analysis is complicated some factors such as the overlap between allreduce and computation or the desynchronization across ranks. Example:: >>> # xdoctest: +SKIP >>> ddp_model.register_comm_hook(None, noop_hook)",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\ddp_comm_hooks\\debugging_hooks.py",
    "ast_data": "FunctionDef name:noop_hook arg:_ arg:bucket arguments arg arg Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "UnliftableError",
    "source_code": "class UnliftableError(Exception):\n    ag_pass_through = True",
    "docstring": "Raised if a Tensor cannot be lifted from the graph.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\op_selector.py",
    "ast_data": "ClassDef name:UnliftableError Assign"
  },
  {
    "library": "tensorflow",
    "name": "insert",
    "source_code": "def insert(self, func):\n    token = self._next_unique_token()\n    self._funcs[token] = func\n    return token",
    "docstring": "Registers and returns a unique token for this entry.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\script_ops.py",
    "ast_data": "FunctionDef name:insert arg:self arg:func arguments arg arg Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "fsdp_modules",
    "source_code": "@staticmethod\ndef fsdp_modules(module: nn.Module, root_only: bool=False) -> list['FullyShardedDataParallel']:\n    if root_only:\n        return _get_fsdp_root_states(module)\n    return traversal_utils._get_fsdp_states(module)",
    "docstring": "Return all nested FSDP instances. This possibly includes ``.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\fully_sharded_data_parallel.py",
    "ast_data": "FunctionDef name:fsdp_modules arg:module arg:root_only arguments arg arg If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "parse_single_example",
    "source_code": "@tf_export(v1=['io.parse_single_example', 'parse_single_example'])\n@dispatch.add_dispatch_support\ndef parse_single_example(serialized, features, name=None, example_names=None):\n    return parse_single_example_v2(serialized, features, example_names, name)",
    "docstring": "Parses a single proto. Similar to , except: For dense tensors, the returned is identical to the output of , except there is no batch dimension, the output shape is the same as the shape given in . For s, the first (batch) column of the indices matrix is removed (the indices matrix is a column vector), the values vector is unchanged, and the first () entry of the shape vector is removed (it is now a single element vector). One might see performance advantages by batching protos with instead of using this function directly. Args: serialized: A scalar string Tensor, a single serialized Example. features: A mapping of feature keys to or values. name: A name for this operation (optional). example_names: (Optional) A scalar string Tensor, the associated name. Returns: A mapping feature keys to and values. Raises: ValueError: if any feature is invalid.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parsing_ops.py",
    "ast_data": "FunctionDef name:parse_single_example arg:serialized arg:features arg:name arg:example_names arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "total_count",
    "source_code": "@property\ndef total_count(self):\n    return self._total_count",
    "docstring": "Number of trials used to construct a sample.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\multinomial.py",
    "ast_data": "FunctionDef name:total_count arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_add_collection_def",
    "source_code": "@staticmethod\ndef _add_collection_def(meta_graph_def, key, export_scope=None):\n    meta_graph.add_collection_def(meta_graph_def, key, export_scope=export_scope)",
    "docstring": "Adds a collection to MetaGraphDef protocol buffer. Args: meta_graph_def: MetaGraphDef protocol buffer. key: One of the GraphKeys or user-defined string. export_scope: Optional . Name scope to remove.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\saver.py",
    "ast_data": "FunctionDef name:_add_collection_def arg:meta_graph_def arg:key arg:export_scope arguments arg arg arg Call"
  },
  {
    "library": "numpy",
    "name": "enabled",
    "source_code": "def enabled(self):\n    return self._enabled",
    "docstring": "Is the use of the display value enabled?",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:enabled arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_affine",
    "source_code": "def get_affine(self):\n    return IdentityTransform()",
    "docstring": "Get the affine part of this transform.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:get_affine arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "validate_token_request",
    "source_code": "def validate_token_request(self, request):\n    if not request.client_id:\n        raise MissingRequiredParameterError('oauth_consumer_key')\n    client = self._get_client(request)\n    if not client:\n        raise InvalidClientError()\n    if not request.token:\n        raise MissingRequiredParameterError('oauth_token')\n    token = self.get_temporary_credential(request)\n    if not token:\n        raise InvalidTokenError()\n    verifier = request.oauth_params.get('oauth_verifier')\n    if not verifier:\n        raise MissingRequiredParameterError('oauth_verifier')\n    if not token.check_verifier(verifier):\n        raise InvalidRequestError('Invalid \"oauth_verifier\"')\n    request.credential = token\n    self.validate_timestamp_and_nonce(request)\n    self.validate_oauth_signature(request)\n    return request",
    "docstring": "Validate request for issuing token.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth1\\rfc5849\\authorization_server.py",
    "ast_data": "FunctionDef name:validate_token_request arg:self arg:request arguments arg arg If Raise Call Assign Call If Raise Call If Raise Call Assign Call If Raise Call Assign Call If Raise Call If Call Raise Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "convert_lstm_weights",
    "source_code": "def convert_lstm_weights(weights, from_cudnn=True):\n    kernels = transform_kernels(weights[0], transpose_input(from_cudnn), n_gates)\n    recurrent_kernels = transform_kernels(weights[1], lambda k: k.T, n_gates)\n    if from_cudnn:\n        biases = np.sum(np.split(weights[2], 2, axis=0), axis=0)\n    else:\n        biases = np.tile(0.5 * weights[2], 2)\n    return [kernels, recurrent_kernels, biases]",
    "docstring": "Converts the weights between CuDNNLSTM and LSTM. Args: weights: Original weights. from_cudnn: Indicates whether original weights are from CuDNN layer. Returns: Updated weights compatible with LSTM.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\hdf5_format.py",
    "ast_data": "FunctionDef name:convert_lstm_weights arg:weights arg:from_cudnn arguments arg arg Assign Call Call Assign Call arguments arg If Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_OptimizerHookState",
    "source_code": "class _OptimizerHookState:\n    __slots__ = ['functional_optimizer', 'params_to_optimize']\n\n    def __init__(self, functional_optim, params=None):\n        self.functional_optimizer = functional_optim\n        self._check_valid_functional_optim()\n        self._set_params_to_optimize(params)\n\n    def _set_params_to_optimize(self, params):\n        if params is not None:\n            self.params_to_optimize = set(params)\n\n    def _check_valid_functional_optim(self):\n        if not hasattr(self.functional_optimizer, _FUNCTIONAL_OPTIM_STEP_METHOD_NAME):\n            raise ValueError(f'Class {type(self.functional_optimizer)} must implement method {_FUNCTIONAL_OPTIM_STEP_METHOD_NAME}.')",
    "docstring": "Holds state for running optimizer in-line after DDP communication hook. Currently contains only optimizer class which must have a method .",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\ddp_comm_hooks\\optimizer_overlap_hooks.py",
    "ast_data": "ClassDef name:_OptimizerHookState Assign FunctionDef name:__init__ arg:self arg:functional_optim arg:params arguments arg arg arg Assign Call Call FunctionDef name:_set_params_to_optimize arg:self arg:params arguments arg arg If Compare Assign Call FunctionDef name:_check_valid_functional_optim arg:self arguments arg If Call Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, writer: 'SummaryWriter') -> None:\n    self._writer = writer",
    "docstring": "Constructs the ``.",
    "type": "method",
    "file_path": "pytorch\\torch\\monitor\\__init__.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:writer arguments arg arg Assign"
  },
  {
    "library": "matplotlib",
    "name": "_set_scale",
    "source_code": "def _set_scale(self, scale, **kwargs):\n    self.long_axis._set_axes_scale(scale, **kwargs)",
    "docstring": "Set the colorbar long axis scale. Parameters ---------- scale : {\"linear\", \"log\", \"symlog\", \"logit\", ...} or The axis scale type to apply. **kwargs Different keyword arguments are accepted, depending on the scale. See the respective class keyword arguments: - - - - - - Notes ----- By default, Matplotlib supports the above-mentioned scales. Additionally, custom scales may be registered using . These scales can then also be used here.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colorbar.py",
    "ast_data": "FunctionDef name:_set_scale arg:self arg:scale arguments arg arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "LazyBatchNorm3d",
    "source_code": "class LazyBatchNorm3d(_LazyNormBase, _BatchNorm):\n    cls_to_become = BatchNorm3d\n\n    def _check_input_dim(self, input):\n        if input.dim() != 5:\n            raise ValueError(f'expected 5D input (got {input.dim()}D input)')",
    "docstring": "A :class: module with lazy initialization. Lazy initialization is done for the `BatchNorm3dweightbiasrunning_meanrunning_vartorch.nn.modules.lazy.LazyModuleMixinrunning_meanrunning_var`",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\batchnorm.py",
    "ast_data": "ClassDef name:LazyBatchNorm3d Assign FunctionDef name:_check_input_dim arg:self arg:input arguments arg arg If Compare Call Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_layer",
    "source_code": "def get_layer(self, name=None, index=None):\n    if index is not None and name is not None:\n        raise ValueError('Provide only a layer name or a layer index.')\n    if index is not None:\n        if len(self.layers) <= index:\n            raise ValueError('Was asked to retrieve layer at index ' + str(index) + ' but model only has ' + str(len(self.layers)) + ' layers.')\n        else:\n            return self.layers[index]\n    if name is not None:\n        for layer in self.layers:\n            if layer.name == name:\n                return layer\n        raise ValueError('No such layer: ' + name + '.')\n    raise ValueError('Provide either a layer name or layer index.')",
    "docstring": "Retrieves a layer based on either its name (unique) or index. If and are both provided, will take precedence. Indices are based on order of horizontal graph traversal (bottom-up). Args: name: String, name of layer. index: Integer, index of layer. Returns: A layer instance. Raises: ValueError: In case of invalid layer name or index.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py",
    "ast_data": "FunctionDef name:get_layer arg:self arg:name arg:index arguments arg arg arg If BoolOp Compare Compare Raise Call If Compare If Compare Call Raise Call Call Call Call Return return:yes If Compare For If Compare Return return:yes Raise Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, device):\n    super(OneDeviceStrategy, self).__init__(OneDeviceExtended(self, device))\n    distribute_lib.distribution_strategy_gauge.get_cell('V2').set('OneDeviceStrategy')",
    "docstring": "Creates a . Args: device: Device string identifier for the device on which the variables should be placed. See class docs for more details on how the device is used. Examples: \"/cpu:0\", \"/gpu:0\", \"/device:CPU:0\", \"/device:GPU:0\"",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\one_device_strategy.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:device arguments arg arg Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "add_resource",
    "source_code": "def add_resource(self, feature_column, name, resource):\n    del feature_column, name, resource\n    raise NotImplementedError('StateManager.add_resource')",
    "docstring": "Creates a new resource. Resources can be things such as tables, variables, trackables, etc. Args: feature_column: A object this resource corresponds to. name: Name of the resource. resource: The resource. Returns: The created resource.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:add_resource arg:self arg:feature_column arg:name arg:resource arguments arg arg arg arg Raise Call"
  },
  {
    "library": "scipy",
    "name": "_first_nonnan",
    "source_code": "def _first_nonnan(a, axis):\n    k = _argmin(np.isnan(a), axis=axis, keepdims=True)\n    return np.take_along_axis(a, k, axis=axis)",
    "docstring": "Return the first non-nan value along the given axis. If a slice is all nan, nan is returned for that slice. The shape of the return value corresponds to ``. Examples -------- >>> import numpy as np >>> nan = np.nan >>> a = np.array([[ 3., 3., nan, 3.], [ 1., nan, 2., 4.], [nan, nan, 9., -1.], [nan, 5., 4., 3.], [ 2., 2., 2., 2.], [nan, nan, nan, nan]]) >>> _first_nonnan(a, axis=0) array([[3., 3., 2., 3.]]) >>> _first_nonnan(a, axis=1) array([[ 3.], [ 1.], [ 9.], [ 5.], [ 2.], [nan]])",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_util.py",
    "ast_data": "FunctionDef name:_first_nonnan arg:a arg:axis arguments arg arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "BaseArchiveIndexView",
    "source_code": "class BaseArchiveIndexView(BaseDateListView):\n    context_object_name = 'latest'\n\n    def get_dated_items(self):\n        qs = self.get_dated_queryset()\n        date_list = self.get_date_list(qs, ordering='DESC')\n        if not date_list:\n            qs = qs.none()\n        return (date_list, qs, {})",
    "docstring": "Base view for archives of date-based items. This requires subclassing to provide a response mixin.",
    "type": "class",
    "file_path": "django\\django\\views\\generic\\dates.py",
    "ast_data": "ClassDef name:BaseArchiveIndexView Assign FunctionDef name:get_dated_items arg:self arguments arg Assign Call Assign Call If Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "variables",
    "source_code": "@property\n@doc_controls.do_not_generate_docs\ndef variables(self):\n    return self.weights",
    "docstring": "Returns the list of all layer variables/weights. Alias of . Note: This will not track the weights of nested that are not themselves Keras layers. Returns: A list of variables.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:variables arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "trimmed_mean_ci",
    "source_code": "def trimmed_mean_ci(data, limits=(0.2, 0.2), inclusive=(True, True), alpha=0.05, axis=None):\n    data = ma.array(data, copy=False)\n    trimmed = mstats.trimr(data, limits=limits, inclusive=inclusive, axis=axis)\n    tmean = trimmed.mean(axis)\n    tstde = mstats.trimmed_stde(data, limits=limits, inclusive=inclusive, axis=axis)\n    df = trimmed.count(axis) - 1\n    tppf = t.ppf(1 - alpha / 2.0, df)\n    return np.array((tmean - tppf * tstde, tmean + tppf * tstde))",
    "docstring": "Selected confidence interval of the trimmed mean along the given axis. Parameters ---------- data : array_like Input data. limits : {None, tuple}, optional None or a two item tuple. Tuple of the percentages to cut on each side of the array, with respect to the number of unmasked data, as floats between 0. and 1. If `data`. Defaults to None. Returns ------- trimmed_mean_ci : (2,) ndarray The lower and upper confidence intervals of the trimmed data.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mstats_extras.py",
    "ast_data": "FunctionDef name:trimmed_mean_ci arg:data arg:limits arg:inclusive arg:alpha arg:axis arguments arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "set_device_index",
    "source_code": "def set_device_index(device: _device_t, /) -> None:\n    device_index = _get_device_index(device)\n    torch._C._accelerator_setDeviceIndex(device_index)",
    "docstring": "Set the current device index to a given device. Args: device (:class:, str, int): a given device that must match the current :ref: device type. .. note:: This function is a no-op if this device index is negative.",
    "type": "function",
    "file_path": "pytorch\\torch\\accelerator\\__init__.py",
    "ast_data": "FunctionDef name:set_device_index arguments arg Assign Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X):\n    check_is_fitted(self)\n    X = self._check_X(X)\n    jll = self._joint_log_likelihood(X)\n    return self.classes_[np.argmax(jll, axis=1)]",
    "docstring": "Perform classification on an array of test vectors X. Parameters ---------- X : array-like of shape (n_samples, n_features) The input samples. Returns ------- C : ndarray of shape (n_samples,) Predicted target values for X.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\naive_bayes.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "check_default_value",
    "source_code": "def check_default_value(shape, default_value, dtype, key):\n    if default_value is None:\n        return None\n    if isinstance(default_value, int):\n        return _create_tuple(shape, default_value)\n    if isinstance(default_value, float) and dtype.is_floating:\n        return _create_tuple(shape, default_value)\n    if callable(getattr(default_value, 'tolist', None)):\n        default_value = default_value.tolist()\n    if nest.is_nested(default_value):\n        if not _is_shape_and_default_value_compatible(default_value, shape):\n            raise ValueError('The shape of default_value must be equal to given shape. default_value: {}, shape: {}, key: {}'.format(default_value, shape, key))\n        is_list_all_int = all((isinstance(v, int) for v in nest.flatten(default_value)))\n        is_list_has_float = any((isinstance(v, float) for v in nest.flatten(default_value)))\n        if is_list_all_int:\n            return _as_tuple(default_value)\n        if is_list_has_float and dtype.is_floating:\n            return _as_tuple(default_value)\n    raise TypeError('default_value must be compatible with dtype. default_value: {}, dtype: {}, key: {}'.format(default_value, dtype, key))",
    "docstring": "Returns default value as tuple if it's valid, otherwise raises errors. This function verifies that is compatible with both and . If it is not compatible, it raises an error. If it is compatible, it casts default_value to a tuple and returns it. is used only for error message. Args: shape: An iterable of integers specifies the shape of the . default_value: If a single value is provided, the same value will be applied as the default value for every item. If an iterable of values is provided, the shape of the should be equal to the given . dtype: defines the type of values. Default value is . Must be a non-quantized, real integer or floating point type. key: Column name, used only for error messages. Returns: A tuple which will be used as default value. Raises: TypeError: if is an iterable but not compatible with TypeError: if is not compatible with . ValueError: if is not convertible to .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\utils.py",
    "ast_data": "FunctionDef name:check_default_value arg:shape arg:default_value arg:dtype arg:key arguments arg arg arg arg If Compare Return return:no If Call Return return:yes Call If BoolOp Call Return return:yes Call If Call Call Assign Call If Call If Call Raise Call Call Assign Call Call Call Assign Call Call Call If Return return:yes Call If BoolOp Return return:yes Call Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_device_index",
    "source_code": "def _get_device_index(device: Any, optional: bool=False, allow_cpu: bool=False) -> int:\n    if isinstance(device, int):\n        return device\n    if isinstance(device, str):\n        device = torch.device(device)\n    if isinstance(device, torch.device):\n        if allow_cpu:\n            if device.type not in ['mtia', 'cpu']:\n                raise ValueError(f'Expected a mtia or cpu device, but got: {device}')\n        elif device.type != 'mtia':\n            raise ValueError(f'Expected a mtia device, but got: {device}')\n    if not torch.jit.is_scripting():\n        if isinstance(device, torch.mtia.device):\n            return device.idx\n    return _torch_get_device_index(device, optional, allow_cpu)",
    "docstring": "Get the device index from :attr:, which can be a torch.device object, a Python integer, or `deviceoptionalallow_cpudevicedeviceoptional`.",
    "type": "function",
    "file_path": "pytorch\\torch\\mtia\\_utils.py",
    "ast_data": "FunctionDef name:_get_device_index arg:device arg:optional arg:allow_cpu arguments arg arg arg If Call Return return:yes If Call Assign Call If Call If If Compare Raise Call If Compare Raise Call If Call If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "__dlpack_device__",
    "source_code": "@abstractmethod\ndef __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]:\n    pass",
    "docstring": "Device type and device ID for where the data in the buffer resides. Uses device type codes matching DLPack. Note: must be implemented even if `` is not.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\interchange\\dataframe_protocol.py",
    "ast_data": "FunctionDef name:__dlpack_device__ arg:self arguments arg"
  },
  {
    "library": "scikit-learn",
    "name": "_mask_edges_weights",
    "source_code": "def _mask_edges_weights(mask, edges, weights=None):\n    inds = np.arange(mask.size)\n    inds = inds[mask.ravel()]\n    ind_mask = np.logical_and(np.isin(edges[0], inds), np.isin(edges[1], inds))\n    edges = edges[:, ind_mask]\n    if weights is not None:\n        weights = weights[ind_mask]\n    if len(edges.ravel()):\n        maxval = edges.max()\n    else:\n        maxval = 0\n    order = np.searchsorted(np.flatnonzero(mask), np.arange(maxval + 1))\n    edges = order[edges]\n    if weights is None:\n        return edges\n    else:\n        return (edges, weights)",
    "docstring": "Apply a mask to edges (weighted or not)",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\feature_extraction\\image.py",
    "ast_data": "FunctionDef name:_mask_edges_weights arg:mask arg:edges arg:weights arguments arg arg arg Assign Call Assign Call Assign Call Call Call Assign If Compare Assign If Call Call Assign Call Assign Assign Call Call Call Assign If Compare Return return:yes Return return:yes"
  },
  {
    "library": "numpy",
    "name": "identity",
    "source_code": "@classmethod\ndef identity(cls, domain=None, window=None, symbol='x'):\n    if domain is None:\n        domain = cls.domain\n    if window is None:\n        window = cls.window\n    off, scl = pu.mapparms(window, domain)\n    coef = cls._line(off, scl)\n    return cls(coef, domain, window, symbol)",
    "docstring": "Identity function. If `` are the endpoints of the window. If None is given then the class window is used. The default is None. symbol : str, optional Symbol representing the independent variable. Default is 'x'. Returns ------- new_series : series Series of representing the identity.",
    "type": "method",
    "file_path": "numpy\\numpy\\polynomial\\_polybase.py",
    "ast_data": "FunctionDef name:identity arg:cls arg:domain arg:window arg:symbol arguments arg arg arg arg If Compare Assign If Compare Assign Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_configure",
    "source_code": "def _configure(self, session_config=None, cluster_spec=None, task_type=None, task_id=None):\n    if cluster_spec:\n        cluster_resolver = cluster_resolver_lib.SimpleClusterResolver(cluster_spec=multi_worker_util.normalize_cluster_spec(cluster_spec), task_type=task_type, task_id=task_id, num_accelerators={self._local_device_type: self._num_devices_per_worker}, rpc_layer=self._rpc_layer)\n        self._initialize_multi_worker(cluster_resolver)\n        assert isinstance(self._cross_device_ops, cross_device_ops_lib.CollectiveAllReduce)\n    if session_config:\n        session_config.CopyFrom(self._update_config_proto(session_config))",
    "docstring": "Configures the object. Args: session_config: a cluster_spec: a dict, ClusterDef or ClusterSpec object specifying the cluster configurations. task_type: the current task type, such as \"worker\". task_id: the current task id. Raises: ValueError: if is not in the .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\collective_all_reduce_strategy.py",
    "ast_data": "FunctionDef name:_configure arg:self arg:session_config arg:cluster_spec arg:task_type arg:task_id arguments arg arg arg arg arg If Assign Call Call Call Call If Call Call"
  },
  {
    "library": "django",
    "name": "timezone_name",
    "source_code": "@cached_property\ndef timezone_name(self):\n    if not settings.USE_TZ:\n        return settings.TIME_ZONE\n    elif self.settings_dict['TIME_ZONE'] is None:\n        return 'UTC'\n    else:\n        return self.settings_dict['TIME_ZONE']",
    "docstring": "Name of the time zone of the database connection.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\base.py",
    "ast_data": "FunctionDef name:timezone_name arg:self arguments arg If Return return:yes If Compare Return return:yes Return return:yes"
  },
  {
    "library": "pandas",
    "name": "parse",
    "source_code": "def parse(self, declarations_str: str) -> Iterator[tuple[str, str]]:\n    for decl in declarations_str.split(';'):\n        if not decl.strip():\n            continue\n        prop, sep, val = decl.partition(':')\n        prop = prop.strip().lower()\n        val = val.strip().lower()\n        if sep:\n            yield (prop, val)\n        else:\n            warnings.warn(f'Ill-formatted attribute: expected a colon in {decl!r}', CSSWarning, stacklevel=find_stack_level())",
    "docstring": "Generates (prop, value) pairs from declarations. In a future version may generate parsed tokens from tinycss/tinycss2 Parameters ---------- declarations_str : str",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\css.py",
    "ast_data": "FunctionDef name:parse arg:self arg:declarations_str arguments arg arg For Call If Call Assign Call Assign Call Call Assign Call Call If Call Call"
  },
  {
    "library": "pytorch",
    "name": "transform_get_item_tensor",
    "source_code": "@register_transformation_rule(GetItemTensor)\ndef transform_get_item_tensor(constraint, counter):\n    assert isinstance(constraint.index_tuple, tuple)\n    dims, counter = gen_tensor_dims(constraint.tensor_size, counter)\n    nat_constraints = gen_nat_constraints(dims)\n    none_c = constraint.index_tuple.count(None)\n    resulting_tensor_dims = (none_c + len(dims)) * [None]\n    dim_index = 0\n    for i in range(len(constraint.index_tuple)):\n        if constraint.index_tuple[i] is None:\n            resulting_tensor_dims[i] = 1\n        elif constraint.index_tuple[i] == slice(None, None, None):\n            pass\n        else:\n            raise NotImplementedError('Method not yet implemented')\n    dim_index = 0\n    for i in range(len(resulting_tensor_dims)):\n        if resulting_tensor_dims[i] is None:\n            resulting_tensor_dims[i] = dims[dim_index]\n            dim_index += 1\n    is_valid_index = valid_index_tensor(constraint.index_tuple, dims)\n    if len(resulting_tensor_dims) > 4:\n        return (F(), counter)\n    else:\n        constraints = [BinConstraintT(constraint.input_var, TensorType(dims), op_eq), BinConstraintT(constraint.res, TensorType(resulting_tensor_dims), op_eq), *nat_constraints, is_valid_index]\n        return (Conj(constraints), counter)",
    "docstring": "When the index is a tuple, then the output will be a tensor TODO: we have to check if this is the case for all HF models The cases we are covering here are a tuple with one of: - slice with default argument - None None appends 1 to the input tensor dimensions so each occurrence of 'None' increases the rank by 1 slice with default arguments does not change the rank",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_transformation.py",
    "ast_data": "FunctionDef name:transform_get_item_tensor arg:constraint arg:counter arguments arg arg Call Assign Call Assign Call Assign Call Assign Call Assign For Call Call If Compare Assign If Compare Call Raise Call Assign For Call Call If Compare Assign Assign Call If Compare Call Return return:yes Call Assign Call Call Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_ComplexAbsGrad",
    "source_code": "@ops.RegisterGradient('ComplexAbs')\ndef _ComplexAbsGrad(op: ops.Operation, grad):\n    return math_ops.div_no_nan(math_ops.complex(grad, array_ops.zeros_like(grad)) * op.inputs[0], math_ops.complex(op.outputs[0], array_ops.zeros_like(op.outputs[0])))",
    "docstring": "Returns the gradient of ComplexAbs.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_ComplexAbsGrad arg:op arg:grad arguments arg arg Return return:yes Call Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "is_macos_or_newer",
    "source_code": "@_lru_cache\ndef is_macos_or_newer(major: int, minor: int) -> bool:\n    return torch._C._mps_is_on_macos_or_newer(major, minor)",
    "docstring": "Return a bool indicating whether MPS is running on given MacOS or newer.",
    "type": "function",
    "file_path": "pytorch\\torch\\backends\\mps\\__init__.py",
    "ast_data": "FunctionDef name:is_macos_or_newer arg:major arg:minor arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_stream_from_external",
    "source_code": "def get_stream_from_external(data_ptr: int, device: Optional[_device_t]=None) -> Stream:\n    _lazy_init()\n    streamdata = torch._C._xpu_getStreamFromExternal(data_ptr, _get_device_index(device, optional=True))\n    return Stream(stream_id=streamdata[0], device_index=streamdata[1], device_type=streamdata[2])",
    "docstring": "Return a :class: from an external SYCL queue. This function is used to wrap SYCL queue created in other libraries in order to facilitate data exchange and multi-library interactions. .. note:: This function doesn't manage the queue life-cycle, it is the user responsibility to keep the referenced queue alive while this returned stream is being used. The different SYCL queue pointers will result in distinct :class: objects, even if the SYCL queues they dereference are equivalent. Args: data_ptr(int): Integer representation of the value passed externally. device(torch.device or int, optional): the device where the queue was originally created. It is the user responsibility to ensure the device is specified correctly.",
    "type": "function",
    "file_path": "pytorch\\torch\\xpu\\__init__.py",
    "ast_data": "FunctionDef name:get_stream_from_external arg:data_ptr arg:device arguments arg arg Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_grappler_config",
    "source_code": "def _grappler_config(self, optimizers=None):\n    if not optimizers:\n        optimizers = []\n    if not self.experimental_new_converter:\n        optimizers.append('constfold')\n    is_only_flex_enabled = set([OpsSet.SELECT_TF_OPS]) == set(self.target_spec.supported_ops)\n    if is_only_flex_enabled:\n        optimizers.append('layout')\n    return _get_grappler_config(optimizers)",
    "docstring": "Creates a tf.compat.v1.ConfigProto for configuring Grappler. Args: optimizers: List of strings that represents the list of optimizers. Returns: tf.ConfigProto.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "FunctionDef name:_grappler_config arg:self arg:optimizers arguments arg arg If Assign If Call Assign Compare Call Call If Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "make_logs",
    "source_code": "def make_logs(model, logs, outputs, mode, prefix=''):\n    metric_names = model.metrics_names\n    if mode in {ModeKeys.TRAIN, ModeKeys.TEST} and metric_names:\n        for label, output in zip(metric_names, outputs):\n            logs[prefix + label] = output\n    else:\n        logs['outputs'] = outputs\n    return logs",
    "docstring": "Computes logs for sending to methods.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:make_logs arg:model arg:logs arg:outputs arg:mode arg:prefix arguments arg arg arg arg arg Assign If BoolOp Compare For Call Assign Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "h",
    "source_code": "def h(self):\n    return '%02d' % (self.data.hour % 12 or 12)",
    "docstring": "Hour, 12-hour format; i.e. '01' to '12'",
    "type": "method",
    "file_path": "django\\django\\utils\\dateformat.py",
    "ast_data": "FunctionDef name:h arg:self arguments arg Return return:yes BoolOp"
  },
  {
    "library": "pytorch",
    "name": "scale",
    "source_code": "def scale(self, outputs: Union[torch.Tensor, Iterable[torch.Tensor]]) -> Union[torch.Tensor, Iterable[torch.Tensor]]:\n    if not self._enabled:\n        return outputs\n    if isinstance(outputs, torch.Tensor):\n        if self._scale is None:\n            self._lazy_init_scale_growth_tracker(outputs.device)\n        assert self._scale is not None\n        return outputs * self._scale.to(device=outputs.device, non_blocking=True)\n    stash: list[_MultiDeviceReplicator] = []\n\n    def apply_scale(val: Union[torch.Tensor, Iterable[torch.Tensor]]):\n        if isinstance(val, torch.Tensor):\n            if len(stash) == 0:\n                if self._scale is None:\n                    self._lazy_init_scale_growth_tracker(val.device)\n                assert self._scale is not None\n                stash.append(_MultiDeviceReplicator(self._scale))\n            return val * stash[0].get(val.device)\n        if isinstance(val, abc.Iterable):\n            iterable = map(apply_scale, val)\n            if isinstance(val, (list, tuple)):\n                return type(val)(iterable)\n            return iterable\n        raise ValueError('outputs must be a Tensor or an iterable of Tensors')\n    return apply_scale(outputs)",
    "docstring": "Multiplies ('scales') a tensor or list of tensors by the scale factor. Returns scaled outputs. If this instance of :class: is not enabled, outputs are returned unmodified. Args: outputs (Tensor or iterable of Tensors): Outputs to scale.",
    "type": "method",
    "file_path": "pytorch\\torch\\amp\\grad_scaler.py",
    "ast_data": "FunctionDef name:scale arg:self arg:outputs arguments arg arg If Return return:yes If Call If Compare Call Compare Return return:yes Call FunctionDef name:apply_scale arg:val arguments arg If Call If Compare Call If Compare Call Compare Call Call Return return:yes Call If Call Assign Call If Call Return return:yes Call Call Return return:yes Raise Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "_get_threading_ident",
    "source_code": "def _get_threading_ident():\n    if sys.version_info >= (3, 3):\n        return threading.get_ident()\n    return threading._get_ident()",
    "docstring": "Discover the current thread identifier.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\cpstats.py",
    "ast_data": "FunctionDef name:_get_threading_ident arguments If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pygame",
    "name": "_clip_and_draw_horizline",
    "source_code": "def _clip_and_draw_horizline(surf, color, x_from, in_y, x_to):\n    clip = surf.get_clip()\n    if in_y < clip.y or in_y >= clip.y + clip.h:\n        return\n    x_from = max(x_from, clip.x)\n    x_to = min(x_to, clip.x + clip.w - 1)\n    if x_to < clip.x or x_from >= clip.x + clip.w:\n        return\n    _drawhorzline(surf, color, x_from, in_y, x_to)",
    "docstring": "draw clipped horizontal line.",
    "type": "function",
    "file_path": "pygame\\src_py\\draw_py.py",
    "ast_data": "FunctionDef name:_clip_and_draw_horizline arg:surf arg:color arg:x_from arg:in_y arg:x_to arguments arg arg arg arg arg Assign Call If BoolOp Compare Compare Return return:no Assign Call Assign Call If BoolOp Compare Compare Return return:no Call"
  },
  {
    "library": "pytorch",
    "name": "get_comm_latency_between",
    "source_code": "def get_comm_latency_between(parent_partition: Partition, child_partition: Partition, transfer_rate_bytes_per_sec: float):\n    if parent_partition.logical_device_ids != [] and child_partition.logical_device_ids != [] and (parent_partition.logical_device_ids == child_partition.logical_device_ids):\n        return 0.0\n    comm_size = 0\n    visited_nodes = set()\n    for node in child_partition.nodes:\n        input_nodes: dict[Node, None] = {}\n        map_arg(node.args, input_nodes.setdefault)\n        map_arg(node.kwargs, input_nodes.setdefault)\n        for n in input_nodes:\n            if n in parent_partition.nodes and n not in visited_nodes:\n                size_bytes = getattr(n, 'size_bytes', None)\n                if size_bytes is not None:\n                    comm_size += size_bytes.output_size\n                visited_nodes.add(n)\n    return comm_size / transfer_rate_bytes_per_sec",
    "docstring": "Given two partitions (parent and child), calculate the communication latency between the two.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\partitioner_utils.py",
    "ast_data": "FunctionDef name:get_comm_latency_between arg:parent_partition arg:child_partition arg:transfer_rate_bytes_per_sec arguments arg arg arg If BoolOp Compare Compare Compare Return return:yes Assign Assign Call For Call Call For If BoolOp Compare Compare Assign Call If Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_use_composite_impl",
    "source_code": "def _use_composite_impl(fast, tensor_shape):\n    if fast is False:\n        return False\n    batch_shape = tensor_shape[:-2]\n    matrix_shape = tensor_shape[-2:]\n    if not tensor_shape.is_fully_defined():\n        return True\n    tensor_size = tensor_shape.num_elements() * matrix.dtype.size\n    is_io_bound = batch_shape.num_elements() > np.min(matrix_shape)\n    L2_CACHE_SIZE_GUESSTIMATE = 256000\n    if tensor_size > L2_CACHE_SIZE_GUESSTIMATE and is_io_bound:\n        return False\n    else:\n        return True",
    "docstring": "Determines whether to use the composite or specialized CPU kernel. When the total size of the tensor is larger than the cache size and the batch size is large compared to the smallest matrix dimension, then the composite implementation is inefficient since it has to read the entire tensor from memory multiple times. In this case we fall back to the original CPU kernel, which does all the computational steps on each matrix separately. Only fast mode is supported by the composite impl, so is returned if is . Args: fast: bool indicating if fast mode in the solver was requested. tensor_shape: The shape of the tensor. Returns: True if the composite impl should be used. False otherwise.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg_ops.py",
    "ast_data": "FunctionDef name:_use_composite_impl arg:fast arg:tensor_shape arguments arg arg If Compare Return return:yes Assign Assign If Call Return return:yes Assign Call Assign Compare Call Call Assign If BoolOp Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_assert_valid_sample",
    "source_code": "def _maybe_assert_valid_sample(self, x):\n    if not self.validate_args:\n        return x\n    return control_flow_ops.with_dependencies([check_ops.assert_positive(x, message='sample must be positive'), check_ops.assert_less(x, array_ops.ones([], self.dtype), message='sample must be less than `1`.')], x)",
    "docstring": "Checks the validity of a sample.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\beta.py",
    "ast_data": "FunctionDef name:_maybe_assert_valid_sample arg:self arg:x arguments arg arg If Return return:yes Return return:yes Call Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "load_digits",
    "source_code": "@validate_params({'n_class': [Interval(Integral, 1, 10, closed='both')], 'return_X_y': ['boolean'], 'as_frame': ['boolean']}, prefer_skip_nested_validation=True)\ndef load_digits(*, n_class=10, return_X_y=False, as_frame=False):\n    data, fdescr = load_gzip_compressed_csv_data(data_file_name='digits.csv.gz', descr_file_name='digits.rst', delimiter=',')\n    target = data[:, -1].astype(int, copy=False)\n    flat_data = data[:, :-1]\n    images = flat_data.view()\n    images.shape = (-1, 8, 8)\n    if n_class < 10:\n        idx = target < n_class\n        flat_data, target = (flat_data[idx], target[idx])\n        images = images[idx]\n    feature_names = ['pixel_{}_{}'.format(row_idx, col_idx) for row_idx in range(8) for col_idx in range(8)]\n    frame = None\n    target_columns = ['target']\n    if as_frame:\n        frame, flat_data, target = _convert_data_dataframe('load_digits', flat_data, target, feature_names, target_columns)\n    if return_X_y:\n        return (flat_data, target)\n    return Bunch(data=flat_data, target=target, frame=frame, feature_names=feature_names, target_names=np.arange(10), images=images, DESCR=fdescr)",
    "docstring": "Load and return the digits dataset (classification). Each datapoint is a 8x8 image of a digit. ================= ============== Classes 10 Samples per class ~180 Samples total 1797 Dimensionality 64 Features integers 0-16 ================= ============== This is a copy of the test set of the UCI ML hand-written digits datasets Read more in the :ref:. Parameters ---------- n_class : int, default=10 The number of classes to return. Between 0 and 10. return_X_y : bool, default=False If True, returns `datatargetreturn_X_ydatatarget~sklearn.utils.Bunchas_frame=Truedataas_frame=Truetargetas_frame=Truedatatargetas_frame=TrueXy` a series. .. versionadded:: 0.18 Examples -------- To load the data and visualize the images:: >>> from sklearn.datasets import load_digits >>> digits = load_digits() >>> print(digits.data.shape) (1797, 64) >>> import matplotlib.pyplot as plt >>> plt.matshow(digits.images[0], cmap=\"gray\") >>> plt.show()",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\datasets\\_base.py",
    "ast_data": "FunctionDef name:load_digits arguments arg arg arg Assign Call Assign Call Assign Assign Call Assign If Compare Assign Compare Assign Assign Assign Call Call Call Assign Assign If Assign Call If Return return:yes Return return:yes Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "install_global_by_id",
    "source_code": "def install_global_by_id(self, prefix, value) -> str:\n    name = f'{prefix}_{id(value)}_c{self.compile_id}'\n    if name in self.installed_globals:\n        return name\n    self.install_global_unsafe(name, value)\n    return name",
    "docstring": "Installs a global if it hasn't been installed already. This is determined by (prefix, id(value)) pair. Returns the name of the newly installed global.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\output_graph.py",
    "ast_data": "FunctionDef name:install_global_by_id arg:self arg:prefix arg:value arguments arg arg arg Assign Call If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "Activation",
    "source_code": "class Activation(Layer):\n\n    def __init__(self, activation, **kwargs):\n        super(Activation, self).__init__(**kwargs)\n        self.supports_masking = True\n        self.activation = activations.get(activation)\n\n    def call(self, inputs):\n        return self.activation(inputs)\n\n    def compute_output_shape(self, input_shape):\n        return input_shape\n\n    def get_config(self):\n        config = {'activation': activations.serialize(self.activation)}\n        base_config = super(Activation, self).get_config()\n        return dict(list(base_config.items()) + list(config.items()))",
    "docstring": "Applies an activation function to an output. Args: activation: Activation function, such as , or string name of built-in activation function, such as \"relu\". Usage: >>> layer = tf.keras.layers.Activation('relu') >>> output = layer([-3.0, -1.0, 0.0, 2.0]) >>> list(output.numpy()) [0.0, 0.0, 0.0, 2.0] >>> layer = tf.keras.layers.Activation(tf.nn.relu) >>> output = layer([-3.0, -1.0, 0.0, 2.0]) >>> list(output.numpy()) [0.0, 0.0, 0.0, 2.0] Input shape: Arbitrary. Use the keyword argument (tuple of integers, does not include the batch axis) when using this layer as the first layer in a model. Output shape: Same shape as input.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\core.py",
    "ast_data": "ClassDef name:Activation FunctionDef name:__init__ arg:self arg:activation arguments arg arg arg Call Call Assign Assign Call FunctionDef name:call arg:self arg:inputs arguments arg arg Return return:yes Call FunctionDef name:compute_output_shape arg:self arg:input_shape arguments arg arg Return return:yes FunctionDef name:get_config arg:self arguments arg Assign Call Assign Call Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "parents",
    "source_code": "@property\ndef parents(self):\n    return [self.key]",
    "docstring": "See 'FeatureColumn` base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:parents arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_register_state_dict_hook",
    "source_code": "def _register_state_dict_hook(self, hook):\n    if getattr(hook, '_from_public_api', False):\n        raise RuntimeError('Cannot register the same function as the state dict post hook that was previously registered via register_state_dict_post_hook')\n    handle = RemovableHandle(self._state_dict_hooks)\n    self._state_dict_hooks[handle.id] = hook\n    return handle",
    "docstring": "Register a post-hook for the :meth: method. It should have the following signature:: hook(module, state_dict, prefix, local_metadata) -> None or state_dict The registered hooks can modify the `~nn.Module.state_dict` is called from.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:_register_state_dict_hook arg:self arg:hook arguments arg arg If Call Raise Call Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "CategoricalColumn",
    "source_code": "class CategoricalColumn(fc_types.FeatureColumn):\n    IdWeightPair = collections.namedtuple('IdWeightPair', ('id_tensor', 'weight_tensor'))\n\n    @abc.abstractproperty\n    def num_buckets(self):\n        pass\n\n    @abc.abstractmethod\n    def get_sparse_tensors(self, transformation_cache, state_manager):\n        pass",
    "docstring": "Represents a categorical feature. A categorical feature typically handled with a of IDs.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "ClassDef name:CategoricalColumn Assign Call FunctionDef name:num_buckets arg:self arguments arg FunctionDef name:get_sparse_tensors arg:self arg:transformation_cache arg:state_manager arguments arg arg arg"
  },
  {
    "library": "django",
    "name": "OverlapsLeftLookup",
    "source_code": "@BaseSpatialField.register_lookup\nclass OverlapsLeftLookup(GISLookup):\n    lookup_name = 'overlaps_left'",
    "docstring": "The overlaps_left operator returns true if A's bounding box overlaps or is to the left of B's bounding box.",
    "type": "class",
    "file_path": "django\\django\\contrib\\gis\\db\\models\\lookups.py",
    "ast_data": "ClassDef name:OverlapsLeftLookup Assign"
  },
  {
    "library": "matplotlib",
    "name": "_get_unit",
    "source_code": "def _get_unit(self):\n    return 1",
    "docstring": "Return how many days a unit of the locator is; used for intelligent autoscaling.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\dates.py",
    "ast_data": "FunctionDef name:_get_unit arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "is_node_output_tensor",
    "source_code": "@compatibility(is_backward_compatible=False)\ndef is_node_output_tensor(node: torch.fx.Node) -> bool:\n    type_ = node.meta.get('type', None)\n    return type_ is not None and issubclass(type_, torch.Tensor)",
    "docstring": "Checks if the node output produces a Tensor or not. NOTE: This requires to run on the containing fx graph before calling this function. This is because it works by checking the metadata on the node. This metadata is produced by the .",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\passes\\tools_common.py",
    "ast_data": "FunctionDef name:is_node_output_tensor arg:node arguments arg Assign Call Return return:yes BoolOp Compare Call Call"
  },
  {
    "library": "tensorflow",
    "name": "argmax",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef argmax(x, axis=-1):\n    return math_ops.argmax(x, axis)",
    "docstring": "Returns the index of the maximum value along an axis. Args: x: Tensor or variable. axis: axis along which to perform the reduction. Returns: A tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:argmax arg:x arg:axis arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "fulltype_list_to_product",
    "source_code": "def fulltype_list_to_product(fulltype_list):\n    return full_type_pb2.FullTypeDef(type_id=full_type_pb2.TFT_PRODUCT, args=fulltype_list)",
    "docstring": "Convert a list of FullType Def into a single FullType Def.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_utils.py",
    "ast_data": "FunctionDef name:fulltype_list_to_product arg:fulltype_list arguments arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_dtypeortype",
    "source_code": "@classmethod\ndef _dtypeortype(cls, dtype):\n    if dtype.type == np.datetime64:\n        return dtype\n    return dtype.type",
    "docstring": "Returns dtype for datetime64 and type of dtype otherwise.",
    "type": "method",
    "file_path": "numpy\\numpy\\lib\\_iotools.py",
    "ast_data": "FunctionDef name:_dtypeortype arg:cls arg:dtype arguments arg arg If Compare Return return:yes Return return:yes"
  },
  {
    "library": "scipy",
    "name": "Michalewicz",
    "source_code": "class Michalewicz(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([0.0] * self.N, [pi] * self.N))\n        self.global_optimum = [[2.20290555, 1.570796]]\n        self.fglob = -1.8013\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        m = 10.0\n        i = arange(1, self.N + 1)\n        return -sum(sin(x) * sin(i * x ** 2 / pi) ** (2 * m))",
    "docstring": "Michalewicz objective function. This class defines the Michalewicz [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Michalewicz}}(x) = - \\sum_{i=1}^{2} \\sin\\left(x_i\\right) \\sin^{2 m}\\left(\\frac{i x_i^{2}}{\\pi}\\right) Where, in this exercise, :math:. with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Adorio, E. MVF - \"Multivariate Test Functions Library in C for Unconstrained Global Optimization\", 2005 TODO: could change dimensionality, but global minimum might change.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_M.py",
    "ast_data": "ClassDef name:Michalewicz FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_process_group",
    "source_code": "@staticmethod\ndef get_process_group(func, args) -> ProcessGroup:\n    if func in CollectiveOp.PG_ARG_1:\n        return ProcessGroup.unbox(args[1])\n    if func in CollectiveOp.PG_ARG_2:\n        return ProcessGroup.unbox(args[2])\n    if func in CollectiveOp.PG_ARG_3:\n        return _resolve_process_group(args[2])\n    if func in CollectiveOp.PG_ARG_4:\n        return _resolve_process_group(args[3])\n    raise TypeError(f'Func {func} not found in {collective_ops}')",
    "docstring": "Retrieve the process group for collective operations, except .",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_tools\\fake_collectives.py",
    "ast_data": "FunctionDef name:get_process_group arg:func arg:args arguments arg arg If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "main",
    "source_code": "def main(wheel_dirname):\n    if not op.exists(VCOMP140_SRC_PATH):\n        raise ValueError(f'Could not find {VCOMP140_SRC_PATH}.')\n    if not op.exists(MSVCP140_SRC_PATH):\n        raise ValueError(f'Could not find {MSVCP140_SRC_PATH}.')\n    if not op.isdir(wheel_dirname):\n        raise RuntimeError(f'Could not find {wheel_dirname} file.')\n    vcomp140_dll_filename = op.basename(VCOMP140_SRC_PATH)\n    msvcp140_dll_filename = op.basename(MSVCP140_SRC_PATH)\n    target_folder = op.join(wheel_dirname, TARGET_FOLDER)\n    distributor_init = op.join(wheel_dirname, DISTRIBUTOR_INIT)\n    if not op.exists(target_folder):\n        os.mkdir(target_folder)\n    print(f'Copying {VCOMP140_SRC_PATH} to {target_folder}.')\n    shutil.copy2(VCOMP140_SRC_PATH, target_folder)\n    print(f'Copying {MSVCP140_SRC_PATH} to {target_folder}.')\n    shutil.copy2(MSVCP140_SRC_PATH, target_folder)\n    print(\"Generating the '_distributor_init.py' file.\")\n    make_distributor_init_64_bits(distributor_init, vcomp140_dll_filename, msvcp140_dll_filename)",
    "docstring": "Embed vcomp140.dll and msvcp140.dll.",
    "type": "function",
    "file_path": "scikit-learn\\build_tools\\github\\vendor.py",
    "ast_data": "FunctionDef name:main arg:wheel_dirname arguments arg If Call Raise Call If Call Raise Call If Call Raise Call Assign Call Assign Call Assign Call Assign Call If Call Call Call Call Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "tricontourf",
    "source_code": "@_preprocess_data()\ndef tricontourf(self, *args, zdir='z', offset=None, axlim_clip=False, **kwargs):\n    had_data = self.has_data()\n    tri, args, kwargs = Triangulation.get_from_args_and_kwargs(*args, **kwargs)\n    X = tri.x\n    Y = tri.y\n    if 'Z' in kwargs:\n        Z = kwargs.pop('Z')\n    else:\n        Z, *args = args\n    jX, jY, jZ = art3d.rotate_axes(X, Y, Z, zdir)\n    tri = Triangulation(jX, jY, tri.triangles, tri.mask)\n    cset = super().tricontourf(tri, jZ, *args, **kwargs)\n    levels = self._add_contourf_set(cset, zdir, offset, axlim_clip)\n    self._auto_scale_contourf(X, Y, Z, zdir, levels, had_data)\n    return cset",
    "docstring": "Create a 3D filled contour plot. .. note:: This method currently produces incorrect output due to a longstanding bug in 3D PolyCollection rendering. Parameters ---------- X, Y, Z : array-like Input data. See for supported data shapes. zdir : {'x', 'y', 'z'}, default: 'z' The direction to use. offset : float, optional If specified, plot a projection of the contour lines at this position in a plane normal to zdir. axlim_clip : bool, default: False Whether to hide lines with a vertex outside the axes view limits. .. versionadded:: 3.10 data : indexable object, optional DATA_PARAMETER_PLACEHOLDER *args, **kwargs Other arguments are forwarded to . Returns ------- matplotlib.tri._tricontour.TriContourSet",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:tricontourf arg:self arguments arg arg arg arg arg arg Assign Call Assign Call Assign Assign If Compare Assign Call Assign Assign Call Assign Call Assign Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "lineType",
    "source_code": "def lineType(line):\n    if isBlank(line):\n        return COMMENT\n    elif isLabel(line):\n        return STATEMENT\n    elif isComment(line):\n        return COMMENT\n    elif isContinuation(line):\n        return CONTINUATION\n    else:\n        return STATEMENT",
    "docstring": "Return the type of a line of Fortran code.",
    "type": "function",
    "file_path": "numpy\\numpy\\linalg\\lapack_lite\\fortran.py",
    "ast_data": "FunctionDef name:lineType arg:line arguments arg If Call Return return:yes If Call Return return:yes If Call Return return:yes If Call Return return:yes Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "predict_log_proba",
    "source_code": "def predict_log_proba(self, X):\n    y_prob = self.predict_proba(X)\n    return np.log(y_prob, out=y_prob)",
    "docstring": "Return the log of probability estimates. Parameters ---------- X : ndarray of shape (n_samples, n_features) The input data. Returns ------- log_y_prob : ndarray of shape (n_samples, n_classes) The predicted log-probability of the sample for each class in the model, where classes are ordered as they are in . Equivalent to .",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neural_network\\_multilayer_perceptron.py",
    "ast_data": "FunctionDef name:predict_log_proba arg:self arg:X arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "vecdot",
    "source_code": "@array_function_dispatch(_vecdot_dispatcher)\ndef vecdot(x1, x2, /, *, axis=-1):\n    return _core_vecdot(x1, x2, axis=axis)",
    "docstring": "Computes the vector dot product. This function is restricted to arguments compatible with the Array API, contrary to :func:. Let :math: be a vector in `\\mathbf{b}\\overline{a_i}a_i`. Returns ------- output : ndarray The vector dot product of the input. See Also -------- numpy.vecdot Examples -------- Get the projected size along a given normal for an array of vectors. >>> v = np.array([[0., 5., 0.], [0., 0., 10.], [0., 6., 8.]]) >>> n = np.array([0., 0.6, 0.8]) >>> np.linalg.vecdot(v, n) array([ 3., 8., 10.])",
    "type": "function",
    "file_path": "numpy\\numpy\\linalg\\_linalg.py",
    "ast_data": "FunctionDef name:vecdot arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_debug_dir",
    "source_code": "def get_debug_dir(export_dir):\n    return file_io.join(compat.as_text(export_dir), compat.as_text(constants.DEBUG_DIRECTORY))",
    "docstring": "Returns path to the debug sub-directory in the SavedModel.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\path_helpers.py",
    "ast_data": "FunctionDef name:get_debug_dir arg:export_dir arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "acks",
    "source_code": "class acks(nodes.Element):\n    pass",
    "docstring": "Special node for \"acks\" lists.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\addnodes.py",
    "ast_data": "ClassDef name:acks"
  },
  {
    "library": "tensorflow",
    "name": "with_node_names",
    "source_code": "def with_node_names(self, start_name_regexes=None, show_name_regexes=None, hide_name_regexes=None, trim_name_regexes=None):\n    if start_name_regexes is not None:\n        self._options['start_name_regexes'] = copy.copy(start_name_regexes)\n    if show_name_regexes is not None:\n        self._options['show_name_regexes'] = copy.copy(show_name_regexes)\n    if hide_name_regexes is not None:\n        self._options['hide_name_regexes'] = copy.copy(hide_name_regexes)\n    if trim_name_regexes is not None:\n        self._options['trim_name_regexes'] = copy.copy(trim_name_regexes)\n    return self",
    "docstring": "Regular expressions used to select profiler nodes to display. After 'with_accounted_types' is evaluated, 'with_node_names' are evaluated as follows: For a profile data structure, profiler first finds the profiler nodes matching 'start_name_regexes', and starts displaying profiler nodes from there. Then, if a node matches 'show_name_regexes' and doesn't match 'hide_name_regexes', it's displayed. If a node matches 'trim_name_regexes', profiler stops further searching that branch. Args: start_name_regexes: list of node name regexes to start displaying. show_name_regexes: list of node names regexes to display. hide_name_regexes: list of node_names regexes that should be hidden. trim_name_regexes: list of node name regexes from where to stop. Returns: self",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\option_builder.py",
    "ast_data": "FunctionDef name:with_node_names arg:self arg:start_name_regexes arg:show_name_regexes arg:hide_name_regexes arg:trim_name_regexes arguments arg arg arg arg arg If Compare Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "tracking_metadata",
    "source_code": "@property\ndef tracking_metadata(self):\n    return json_utils.Encoder().encode(self.python_properties)",
    "docstring": "String stored in metadata field in the SavedModel proto. Returns: A serialized JSON storing information necessary for recreating this layer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\base_serialization.py",
    "ast_data": "FunctionDef name:tracking_metadata arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_rng_state",
    "source_code": "def get_rng_state(device: Union[int, str, torch.device]='cuda') -> Tensor:\n    _lazy_init()\n    if isinstance(device, str):\n        device = torch.device(device)\n    elif isinstance(device, int):\n        device = torch.device('cuda', device)\n    idx = device.index\n    if idx is None:\n        idx = current_device()\n    default_generator = torch.cuda.default_generators[idx]\n    return default_generator.get_state()",
    "docstring": "Return the random number generator state of the specified GPU as a ByteTensor. Args: device (torch.device or int, optional): The device to return the RNG state of. Default: ``, the current CUDA device). .. warning:: This function eagerly initializes CUDA.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\random.py",
    "ast_data": "FunctionDef name:get_rng_state arg:device arguments arg Call If Call Assign Call If Call Assign Call Assign If Compare Assign Call Assign Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_fit_infrequent_category_mapping",
    "source_code": "def _fit_infrequent_category_mapping(self, n_samples, category_counts, missing_indices):\n    if missing_indices:\n        category_counts_ = []\n        for feature_idx, count in enumerate(category_counts):\n            if feature_idx in missing_indices:\n                category_counts_.append(np.delete(count, missing_indices[feature_idx]))\n            else:\n                category_counts_.append(count)\n    else:\n        category_counts_ = category_counts\n    self._infrequent_indices = [self._identify_infrequent(category_count, n_samples, col_idx) for col_idx, category_count in enumerate(category_counts_)]\n    self._default_to_infrequent_mappings = []\n    for feature_idx, infreq_idx in enumerate(self._infrequent_indices):\n        cats = self.categories_[feature_idx]\n        if infreq_idx is None:\n            self._default_to_infrequent_mappings.append(None)\n            continue\n        n_cats = len(cats)\n        if feature_idx in missing_indices:\n            n_cats -= 1\n        mapping = np.empty(n_cats, dtype=np.int64)\n        n_infrequent_cats = infreq_idx.size\n        n_frequent_cats = n_cats - n_infrequent_cats\n        mapping[infreq_idx] = n_frequent_cats\n        frequent_indices = np.setdiff1d(np.arange(n_cats), infreq_idx)\n        mapping[frequent_indices] = np.arange(n_frequent_cats)\n        self._default_to_infrequent_mappings.append(mapping)",
    "docstring": "Fit infrequent categories. Defines the private attribute: . For feature , defines the mapping from the integer encoding returned by into infrequent categories. If is None, there were no infrequent categories in the training set. For example if categories 0, 2 and 4 were frequent, while categories 1, 3, 5 were infrequent for feature 7, then these categories are mapped to a single output: Defines private attribute: . is an array of indices such that are all the infrequent category labels. If the feature has no infrequent categories is None. .. versionadded:: 1.1 Parameters ---------- n_samples : int Number of samples in training set. category_counts: list of ndarray is the category counts corresponding to . missing_indices : dict Dict mapping from feature_idx to category index with a missing value.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_encoders.py",
    "ast_data": "FunctionDef name:_fit_infrequent_category_mapping arg:self arg:n_samples arg:category_counts arg:missing_indices arguments arg arg arg arg If Assign For Call If Compare Call Call Call Assign Assign Call Call Assign For Call Assign If Compare Call Assign Call If Compare Assign Call Assign Assign Assign Assign Call Call Assign Call Call"
  },
  {
    "library": "django",
    "name": "SystemCheckError",
    "source_code": "class SystemCheckError(CommandError):\n    pass",
    "docstring": "The system check framework detected unrecoverable errors.",
    "type": "class",
    "file_path": "django\\django\\core\\management\\base.py",
    "ast_data": "ClassDef name:SystemCheckError"
  },
  {
    "library": "tensorflow",
    "name": "_ensure_safe",
    "source_code": "def _ensure_safe(self):\n    if not self._safe_to_run():\n        raise RuntimeError('There is at least 1 reference to internal data\\n      in the interpreter in the form of a numpy array or slice. Be sure to\\n      only hold the function returned from tensor() if you are using raw\\n      data access.')",
    "docstring": "Makes sure no numpy arrays pointing to internal buffers are active. This should be called from any function that will call a function on _interpreter that may reallocate memory e.g. invoke(), ... Raises: RuntimeError: If there exist numpy objects pointing to internal memory then we throw.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\interpreter.py",
    "ast_data": "FunctionDef name:_ensure_safe arg:self arguments arg If Call Raise Call"
  },
  {
    "library": "django",
    "name": "_get_elided_page_range",
    "source_code": "def _get_elided_page_range(self, number, num_pages, page_range, on_each_side=3, on_ends=2):\n    if num_pages <= (on_each_side + on_ends) * 2:\n        for page in page_range:\n            yield page\n        return\n    if number > 1 + on_each_side + on_ends + 1:\n        for page in range(1, on_ends + 1):\n            yield page\n        yield self.ELLIPSIS\n        for page in range(number - on_each_side, number + 1):\n            yield page\n    else:\n        for page in range(1, number + 1):\n            yield page\n    if number < num_pages - on_each_side - on_ends - 1:\n        for page in range(number + 1, number + on_each_side + 1):\n            yield page\n        yield self.ELLIPSIS\n        for page in range(num_pages - on_ends + 1, num_pages + 1):\n            yield page\n    else:\n        for page in range(number + 1, num_pages + 1):\n            yield page",
    "docstring": "Return a 1-based range of pages with some values elided. If the page range is larger than a given size, the whole range is not provided and a compact form is returned instead, e.g. for a paginator with 50 pages, if page 43 were the current page, the output, with the default arguments, would be: 1, 2, …, 40, 41, 42, 43, 44, 45, 46, …, 49, 50.",
    "type": "method",
    "file_path": "django\\django\\core\\paginator.py",
    "ast_data": "FunctionDef name:_get_elided_page_range arg:self arg:number arg:num_pages arg:page_range arg:on_each_side arg:on_ends arguments arg arg arg arg arg arg If Compare For Return return:no If Compare For Call For Call For Call If Compare For Call For Call For Call"
  },
  {
    "library": "pytorch",
    "name": "process_outputs",
    "source_code": "def process_outputs(self, outputs: Any) -> Any:\n    return outputs",
    "docstring": "Transforms the outputs of the graph to be identical to the codegen. See `` for more details.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\graph.py",
    "ast_data": "FunctionDef name:process_outputs arg:self arg:outputs arguments arg arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_safely_castable_to_int",
    "source_code": "def _safely_castable_to_int(dt):\n    int_size = np.dtype(int).itemsize\n    safe = np.issubdtype(dt, np.signedinteger) and dt.itemsize <= int_size or (np.issubdtype(dt, np.unsignedinteger) and dt.itemsize < int_size)\n    return safe",
    "docstring": "Test whether the NumPy data type can be safely cast to an int.",
    "type": "function",
    "file_path": "scipy\\scipy\\ndimage\\_measurements.py",
    "ast_data": "FunctionDef name:_safely_castable_to_int arg:dt arguments arg Assign Call Assign BoolOp BoolOp Call Compare BoolOp Call Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "map_fn_v2",
    "source_code": "@tf_export('map_fn', v1=[])\n@deprecation.deprecated_arg_values(None, 'back_prop=False is deprecated. Consider using tf.stop_gradient instead.\\nInstead of:\\nresults = tf.map_fn(fn, elems, back_prop=False)\\nUse:\\nresults = tf.nest.map_structure(tf.stop_gradient, tf.map_fn(fn, elems))', warn_once=True, back_prop=False)\n@deprecation.deprecated_args(None, 'Use fn_output_signature instead', 'dtype')\ndef map_fn_v2(fn, elems, dtype=None, parallel_iterations=None, back_prop=True, swap_memory=False, infer_shape=True, name=None, fn_output_signature=None):\n    if fn_output_signature is None:\n        fn_output_signature = dtype\n    return map_fn(fn=fn, elems=elems, fn_output_signature=fn_output_signature, parallel_iterations=parallel_iterations, back_prop=back_prop, swap_memory=swap_memory, infer_shape=infer_shape, name=name)",
    "docstring": "Transform by applying to each element unstacked on axis 0.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\map_fn.py",
    "ast_data": "FunctionDef name:map_fn_v2 arg:fn arg:elems arg:dtype arg:parallel_iterations arg:back_prop arg:swap_memory arg:infer_shape arg:name arg:fn_output_signature arguments arg arg arg arg arg arg arg arg arg If Compare Assign Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "multiply_no_nan",
    "source_code": "@tf_export('math.multiply_no_nan')\n@dispatch.register_binary_elementwise_api\n@dispatch.add_dispatch_support\ndef multiply_no_nan(x, y, name=None):\n    with ops.name_scope(name, 'multiply_no_nan', [x, y]) as name:\n        x = ops.convert_to_tensor(x, name='x')\n        y = ops.convert_to_tensor(y, name='y', dtype=x.dtype.base_dtype)\n        x_dtype = x.dtype.base_dtype\n        y_dtype = y.dtype.base_dtype\n        if x_dtype != y_dtype:\n            raise TypeError(f'`x` and `y` must have the same dtype, got {x_dtype!r} != {y_dtype!r}')\n        return gen_math_ops.mul_no_nan(x, y, name=name)",
    "docstring": "Computes the product of x and y and returns 0 if the y is zero, even if x is NaN or infinite. Note this is noncommutative: if y is NaN or infinite and x is 0, the result will be NaN. Args: x: A . Must be one of the following types: , . y: A whose dtype is compatible with . name: A name for the operation (optional). Returns: The element-wise value of the x times y.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:multiply_no_nan arg:x arg:y arg:name arguments arg arg arg With Call Assign Call Assign Call Assign Assign If Compare Raise Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "as_numpy_dtype",
    "source_code": "@property\ndef as_numpy_dtype(self):\n    return _TF_TO_NP[self._type_enum]",
    "docstring": "Returns a Python object based on this .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\dtypes.py",
    "ast_data": "FunctionDef name:as_numpy_dtype arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "requires_grad_",
    "source_code": "def requires_grad_(self, requires_grad: bool=True) -> Self:\n    for p in self.parameters():\n        p.requires_grad_(requires_grad)\n    return self",
    "docstring": "Change if autograd should record operations on parameters in this module. This method sets the parameters' :attr: attributes in-place. This method is helpful for freezing part of the module for finetuning or training parts of a model individually (e.g., GAN training). See :ref: for a comparison between and several similar mechanisms that may be confused with it. Args: requires_grad (bool): whether autograd should record operations on parameters in this module. Default: ``. Returns: Module: self",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:requires_grad_ arg:self arg:requires_grad arguments arg arg For Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_aten_graph_module_for_pattern",
    "source_code": "def _get_aten_graph_module_for_pattern(pattern: Callable, example_inputs: tuple[Any, ...], is_cuda: bool=False, **kwargs) -> GraphModule:\n    if is_cuda:\n        example_inputs = tuple([x.cuda() if isinstance(x, torch.Tensor) else x for x in example_inputs])\n    aten_pattern = torch.export.export_for_training(pattern, example_inputs, kwargs, strict=True).module()\n    aten_pattern.graph.eliminate_dead_code()\n    aten_pattern.recompile()\n    for node in aten_pattern.graph.nodes:\n        if node.op == 'call_function' and node.target == torch.ops.aten.copy_.default and (len(node.users) == 0):\n            aten_pattern.graph.erase_node(node)\n    aten_pattern.graph.eliminate_dead_code()\n    aten_pattern.recompile()\n    return aten_pattern",
    "docstring": "Convert the pattern to an FX graph with decomposed aten ops.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\utils.py",
    "ast_data": "FunctionDef name:_get_aten_graph_module_for_pattern arg:pattern arg:example_inputs arg:is_cuda arguments arg arg arg arg If Assign Call Call Call Assign Call Call Call Call For If BoolOp Compare Compare Compare Call Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "copy",
    "source_code": "def copy(self):\n    return self.__copy__()",
    "docstring": "Return a copy of the colormap.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:copy arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pygame",
    "name": "get_ascent",
    "source_code": "def get_ascent(self):\n    return self.get_sized_ascender()",
    "docstring": "get_ascent() -> int get the ascent of the font",
    "type": "method",
    "file_path": "pygame\\src_py\\ftfont.py",
    "ast_data": "FunctionDef name:get_ascent arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "closest",
    "source_code": "def closest(self, x, y):\n    pts = np.column_stack([self.x, self.y])\n    pts = self.ax.transData.transform(pts)\n    diff = pts - [x, y]\n    dist = np.hypot(*diff.T)\n    min_index = np.argmin(dist)\n    return (min_index, dist[min_index])",
    "docstring": "Return index and pixel distance to closest index.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:closest arg:self arg:x arg:y arguments arg arg arg Assign Call Assign Call Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "list_source",
    "source_code": "def list_source(self, args, screen_info=None):\n    del screen_info\n    parsed = self._arg_parsers['list_source'].parse_args(args)\n    source_list = source_utils.list_source_files_against_dump(self._debug_dump, path_regex_allowlist=parsed.path_filter, node_name_regex_allowlist=parsed.node_name_filter)\n    top_lines = [RL('List of source files that created nodes in this run', 'bold')]\n    if parsed.path_filter:\n        top_lines.append(RL('File path regex filter: \"%s\"' % parsed.path_filter))\n    if parsed.node_name_filter:\n        top_lines.append(RL('Node name regex filter: \"%s\"' % parsed.node_name_filter))\n    top_lines.append(RL())\n    output = debugger_cli_common.rich_text_lines_from_rich_line_list(top_lines)\n    if not source_list:\n        output.append('[No source file information.]')\n        return output\n    output.extend(self._make_source_table([item for item in source_list if not item[1]], False))\n    output.extend(self._make_source_table([item for item in source_list if item[1]], True))\n    _add_main_menu(output, node_name=None)\n    return output",
    "docstring": "List Python source files that constructed nodes and tensors.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\analyzer_cli.py",
    "ast_data": "FunctionDef name:list_source arg:self arg:args arg:screen_info arguments arg arg arg Assign Call Assign Call Assign Call If Call Call If Call Call Call Call Assign Call If Call Return return:yes Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "memory_stats",
    "source_code": "def memory_stats(device: 'Device'=None) -> dict[str, Any]:\n    result = []\n\n    def _recurse_add_to_result(prefix, obj):\n        if isinstance(obj, dict):\n            if len(prefix) > 0:\n                prefix += '.'\n            for k, v in obj.items():\n                _recurse_add_to_result(prefix + k, v)\n        else:\n            result.append((prefix, obj))\n    stats = memory_stats_as_nested_dict(device=device)\n    _recurse_add_to_result('', stats)\n    result.sort()\n    return collections.OrderedDict(result)",
    "docstring": "Return a dictionary of CUDA memory allocator statistics for a given device. The return value of this function is a dictionary of statistics, each of which is a non-negative integer. Core statistics: - `, some stats are not meaningful, and are always reported as zero.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\memory.py",
    "ast_data": "FunctionDef name:memory_stats arg:device arguments arg Assign FunctionDef name:_recurse_add_to_result arg:prefix arg:obj arguments arg arg If Call If Compare Call For Call Call Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "render_view",
    "source_code": "def render_view(self, camera: PinholeCamera) -> Tensor:\n    rays: Ray = self._create_rays(camera)\n    with torch_inference_mode():\n        rgb_model = self._nerf_model(rays.origin, rays.direction)\n    rgb_image = rgb_model.view(self._image_size[0], self._image_size[1], 3)\n    return rgb_image",
    "docstring": "Render a novel synthesis view of a trained NeRF model for given camera. Args: camera: camera for image rendering: PinholeCamera. Returns: Rendered image with shape :math:.",
    "type": "method",
    "file_path": "kornia\\kornia\\nerf\\nerf_model.py",
    "ast_data": "FunctionDef name:render_view arg:self arg:camera arguments arg arg Call With Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "_bottom_axes",
    "source_code": "@property\ndef _bottom_axes(self):\n    if self._col_wrap is None:\n        return self.axes[-1, :].flat\n    else:\n        axes = []\n        n_empty = self._nrow * self._ncol - self._n_facets\n        for i, ax in enumerate(self.axes):\n            append = i >= self._ncol * (self._nrow - 1) or i >= self._ncol * (self._nrow - 1) - n_empty\n            if append:\n                axes.append(ax)\n        return np.array(axes, object).flat",
    "docstring": "Return a flat array of the bottom row of axes.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\axisgrid.py",
    "ast_data": "FunctionDef name:_bottom_axes arg:self arguments arg If Compare Return return:yes Assign Assign For Call Assign BoolOp Compare Compare If Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "metadata",
    "source_code": "@property\n@abstractmethod\ndef metadata(self) -> dict[str, Any]:\n    pass",
    "docstring": "The metadata for the column. See for more details.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\interchange\\dataframe_protocol.py",
    "ast_data": "FunctionDef name:metadata arg:self arguments arg"
  },
  {
    "library": "pandas",
    "name": "unit",
    "source_code": "@property\ndef unit(self) -> str_type:\n    return self._unit",
    "docstring": "The precision of the datetime data. See Also -------- DatetimeTZDtype.tz : Retrieves the timezone. Examples -------- >>> from zoneinfo import ZoneInfo >>> dtype = pd.DatetimeTZDtype(tz=ZoneInfo(\"America/Los_Angeles\")) >>> dtype.unit 'ns'",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py",
    "ast_data": "FunctionDef name:unit arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "is_running",
    "source_code": "@staticmethod\ndef is_running(state: 'WorkerState') -> bool:\n    return state in {WorkerState.HEALTHY, WorkerState.UNHEALTHY}",
    "docstring": "Return the state of the Worker. Returns: True if the worker state represents workers still running (e.g. that the process exists but not necessarily healthy).",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\agent\\server\\api.py",
    "ast_data": "FunctionDef name:is_running arg:state arguments arg Return return:yes Compare"
  },
  {
    "library": "scikit-learn",
    "name": "log_loss",
    "source_code": "def log_loss(y_true, y_prob, sample_weight=None):\n    eps = np.finfo(y_prob.dtype).eps\n    y_prob = np.clip(y_prob, eps, 1 - eps)\n    if y_prob.shape[1] == 1:\n        y_prob = np.append(1 - y_prob, y_prob, axis=1)\n    if y_true.shape[1] == 1:\n        y_true = np.append(1 - y_true, y_true, axis=1)\n    return -np.average(xlogy(y_true, y_prob), weights=sample_weight, axis=0).sum()",
    "docstring": "Compute Logistic loss for classification. Parameters ---------- y_true : array-like or label indicator matrix Ground truth (correct) labels. y_prob : array-like of float, shape = (n_samples, n_classes) Predicted probabilities, as returned by a classifier's predict_proba method. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- loss : float The degree to which the samples are correctly predicted.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\neural_network\\_base.py",
    "ast_data": "FunctionDef name:log_loss arg:y_true arg:y_prob arg:sample_weight arguments arg arg arg Assign Call Assign Call If Compare Assign Call If Compare Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "__call__",
    "source_code": "def __call__(self, X, Y=None, eval_gradient=False):\n    if eval_gradient:\n        K1, K1_gradient = self.k1(X, Y, eval_gradient=True)\n        K2, K2_gradient = self.k2(X, Y, eval_gradient=True)\n        return (K1 * K2, np.dstack((K1_gradient * K2[:, :, np.newaxis], K2_gradient * K1[:, :, np.newaxis])))\n    else:\n        return self.k1(X, Y) * self.k2(X, Y)",
    "docstring": "Return the kernel k(X, Y) and optionally its gradient. Parameters ---------- X : array-like of shape (n_samples_X, n_features) or list of object Left argument of the returned kernel k(X, Y) Y : array-like of shape (n_samples_Y, n_features) or list of object, default=None Right argument of the returned kernel k(X, Y). If None, k(X, X) is evaluated instead. eval_gradient : bool, default=False Determines whether the gradient with respect to the log of the kernel hyperparameter is computed. Returns ------- K : ndarray of shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), optional The gradient of the kernel k(X, X) with respect to the log of the hyperparameter of the kernel. Only returned when is True.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:X arg:Y arg:eval_gradient arguments arg arg arg arg If Assign Call Assign Call Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_WireProtocolOutput",
    "source_code": "@dataclass\nclass _WireProtocolOutput:\n    graph: OutputCode\n    metrics: CachedMetricsDeltas\n    logs: list[logging.LogRecord]\n    warning_replay: Optional[list[warnings.WarningMessage]]\n    shape_env: Optional[torch.fx.experimental.symbolic_shapes.ShapeEnv]\n\n    def serialize(self) -> _WireProtocolPickledOutput:\n        from torch.fx._graph_pickler import GraphPickler\n        if isinstance(self.graph, CompiledFxGraph):\n            self.graph.prepare_for_serialization()\n        return _WireProtocolPickledOutput(GraphPickler.dumps(self))",
    "docstring": "For _SerializedFxCompile - encapsulates all the data being transferred (returned) back from the child to the parent.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\compile_fx_ext.py",
    "ast_data": "ClassDef name:_WireProtocolOutput FunctionDef name:serialize arg:self arguments arg If Call Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "interval_contains",
    "source_code": "def interval_contains(interval, val):\n    a, b = interval\n    if a > b:\n        a, b = (b, a)\n    return a <= val <= b",
    "docstring": "Check, inclusively, whether an interval includes a given value. Parameters ---------- interval : (float, float) The endpoints of the interval. val : float Value to check is within interval. Returns ------- bool Whether *val* is within the *interval*.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:interval_contains arg:interval arg:val arguments arg arg Assign If Compare Assign Return return:yes Compare"
  },
  {
    "library": "scrapy",
    "name": "get_spider_middleware",
    "source_code": "def get_spider_middleware(self, cls: type[_T]) -> _T | None:\n    if not self.engine:\n        raise RuntimeError('Crawler.get_spider_middleware() can only be called after the crawl engine has been created.')\n    return self._get_component(cls, self.engine.scraper.spidermw.middlewares)",
    "docstring": "Return the run-time instance of a :ref: of the specified class or a subclass, or `engine_startedspider_opened`.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\crawler.py",
    "ast_data": "FunctionDef name:get_spider_middleware arg:self arg:cls arguments arg arg If Raise Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "segment_hits",
    "source_code": "def segment_hits(cx, cy, x, y, radius):\n    if len(x) <= 1:\n        res, = np.nonzero((cx - x) ** 2 + (cy - y) ** 2 <= radius ** 2)\n        return res\n    xr, yr = (x[:-1], y[:-1])\n    dx, dy = (x[1:] - xr, y[1:] - yr)\n    Lnorm_sq = dx ** 2 + dy ** 2\n    u = ((cx - xr) * dx + (cy - yr) * dy) / Lnorm_sq\n    candidates = (u >= 0) & (u <= 1)\n    point_hits = (cx - x) ** 2 + (cy - y) ** 2 <= radius ** 2\n    candidates = candidates & ~(point_hits[:-1] | point_hits[1:])\n    px, py = (xr + u * dx, yr + u * dy)\n    line_hits = (cx - px) ** 2 + (cy - py) ** 2 <= radius ** 2\n    line_hits = line_hits & candidates\n    points, = point_hits.ravel().nonzero()\n    lines, = line_hits.ravel().nonzero()\n    return np.concatenate((points, lines))",
    "docstring": "Return the indices of the segments in the polyline with coordinates (*cx*, *cy*) that are within a distance *radius* of the point (*x*, *y*).",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:segment_hits arg:cx arg:cy arg:x arg:y arg:radius arguments arg arg arg arg arg If Compare Call Assign Call Compare Return return:yes Assign Assign Assign Assign Assign Compare Compare Assign Compare Assign Assign Assign Compare Assign Assign Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "update_state",
    "source_code": "def update_state(self, y_true, y_pred, sample_weight=None):\n    return metrics_utils.update_confusion_matrix_variables({metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives}, y_true, y_pred, thresholds=self.thresholds, thresholds_distributed_evenly=self._thresholds_distributed_evenly, top_k=self.top_k, class_id=self.class_id, sample_weight=sample_weight)",
    "docstring": "Accumulates true positive and false positive statistics. Args: y_true: The ground truth values, with the same dimensions as . Will be cast to . y_pred: The predicted values. Each element must be in the range . sample_weight: Optional weighting of each example. Defaults to 1. Can be a whose rank is either 0, or the same rank as , and must be broadcastable to . Returns: Update op.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py",
    "ast_data": "FunctionDef name:update_state arg:self arg:y_true arg:y_pred arg:sample_weight arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "dims",
    "source_code": "@property\ndef dims(self):\n    if self._dims is None:\n        return None\n    return [as_dimension(d) for d in self._dims]",
    "docstring": "Deprecated. Returns list of dimensions for this shape. Suggest instead. Returns: A list containing s, or None if the shape is unspecified.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:dims arg:self arguments arg If Compare Return return:no Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "replace",
    "source_code": "@final\ndef replace(self, to_replace, value, inplace: bool=False, mask: npt.NDArray[np.bool_] | None=None) -> list[Block]:\n    values = self.values\n    if not self._can_hold_element(to_replace):\n        return [self.copy(deep=False)]\n    if mask is None:\n        mask = missing.mask_missing(values, to_replace)\n    if not mask.any():\n        return [self.copy(deep=False)]\n    elif self._can_hold_element(value) or (self.dtype == 'string' and is_re(value)):\n        blk = self._maybe_copy(inplace)\n        putmask_inplace(blk.values, mask, value)\n        return [blk]\n    elif self.ndim == 1 or self.shape[0] == 1:\n        if value is None or value is NA:\n            blk = self.astype(np.dtype(object))\n        else:\n            blk = self.coerce_to_target_dtype(value, raise_on_upcast=False)\n        return blk.replace(to_replace=to_replace, value=value, inplace=True, mask=mask)\n    else:\n        blocks = []\n        for i, nb in enumerate(self._split()):\n            blocks.extend(type(self).replace(nb, to_replace=to_replace, value=value, inplace=True, mask=mask[i:i + 1]))\n        return blocks",
    "docstring": "replace the to_replace value with value, possible to create new blocks here this is just a call to putmask.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\blocks.py",
    "ast_data": "FunctionDef name:replace arg:self arg:to_replace arg:value arg:inplace arg:mask arguments arg arg arg arg arg Assign If Call Return return:yes Call If Compare Assign Call If Call Return return:yes Call If BoolOp Call BoolOp Compare Call Assign Call Call Return return:yes If BoolOp Compare Compare If BoolOp Compare Compare Assign Call Call Assign Call Return return:yes Call Assign For Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "index_expanded_dims_and_copy_",
    "source_code": "def index_expanded_dims_and_copy_(dst: torch.Tensor, src: torch.Tensor, expanded_dims: list[int]) -> None:\n    dst = index_expanded_dims(dst, expanded_dims)\n    src = index_expanded_dims(src, expanded_dims)\n    dst.copy_(src)",
    "docstring": "Index into expanded dimensions of both dst and src then copy_",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\compile_fx.py",
    "ast_data": "FunctionDef name:index_expanded_dims_and_copy_ arg:dst arg:src arg:expanded_dims arguments arg arg arg Assign Call Assign Call Call"
  },
  {
    "library": "scipy",
    "name": "MultiModal",
    "source_code": "class MultiModal(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n        self.custom_bounds = [(-5, 5), (-5, 5)]\n        self.global_optimum = [[0.0 for _ in range(self.N)]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return sum(abs(x)) * prod(abs(x))",
    "docstring": "MultiModal objective function. This class defines the MultiModal global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{MultiModal}}(x) = \\left( \\sum_{i=1}^n \\lvert x_i \\rvert \\right) \\left( \\prod_{i=1}^n \\lvert x_i \\rvert \\right) Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_M.py",
    "ast_data": "ClassDef name:MultiModal Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "pick_loop_order",
    "source_code": "def pick_loop_order(stride_lengths: list[list[int]], sizes: Sequence[sympy.Expr], priority_idx: tuple[int, ...]=()) -> list[int]:\n\n    @functools.cmp_to_key\n    def index_cmp(a: int, b: int) -> int:\n        if sizes[a] == 1 or sizes[b] == 1:\n            return cmp(sizes[a] == 1, sizes[b] == 1)\n        stride_len_a = [abs(sl[a]) for sl in stride_lengths]\n        stride_len_b = [abs(sl[b]) for sl in stride_lengths]\n        a_first = sum((sl_b == 0 or sl_a < sl_b for sl_a, sl_b in zip(stride_len_a, stride_len_b)))\n        b_first = sum((sl_a == 0 or sl_b < sl_a for sl_a, sl_b in zip(stride_len_a, stride_len_b)))\n        if a_first > b_first:\n            return -1\n        if b_first > a_first:\n            return 1\n        return cmp(b, a)\n    order = list(reversed(range(len(stride_lengths[0]))))\n    if len(priority_idx) > 0:\n        stride_lengths = [stride_lengths[pi] for pi in priority_idx]\n    if config.pick_loop_orders:\n        order.sort(key=index_cmp)\n    return order",
    "docstring": "A heuristic to decide loop iteration orders. This has not been well tuned and may be something we should autotune.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:pick_loop_order arg:stride_lengths arg:sizes arg:priority_idx arguments arg arg arg FunctionDef name:index_cmp arg:a arg:b arguments arg arg If BoolOp Compare Compare Return return:yes Call Compare Compare Assign Call Assign Call Assign Call BoolOp Compare Compare Call Assign Call BoolOp Compare Compare Call If Compare Return return:yes If Compare Return return:yes Return return:yes Call Assign Call Call Call Call If Compare Call Assign If Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ConvBn2d",
    "source_code": "class ConvBn2d(_FusedModule):\n\n    def __init__(self, conv, bn):\n        assert type_before_parametrizations(conv) == Conv2d and type_before_parametrizations(bn) == BatchNorm2d, f'Incorrect types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(bn)}'\n        super().__init__(conv, bn)",
    "docstring": "This is a sequential container which calls the Conv 2d and Batch Norm 2d modules. During quantization this will be replaced with the corresponding fused module.",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\nn\\intrinsic\\modules\\fused.py",
    "ast_data": "ClassDef name:ConvBn2d FunctionDef name:__init__ arg:self arg:conv arg:bn arguments arg arg arg BoolOp Compare Call Compare Call Call Call Call Call"
  },
  {
    "library": "scrapy",
    "name": "overridden_settings",
    "source_code": "def overridden_settings(settings: Mapping[_SettingsKeyT, Any]) -> Iterable[tuple[str, Any]]:\n    for name, defvalue in iter_default_settings():\n        value = settings[name]\n        if not isinstance(defvalue, dict) and value != defvalue:\n            yield (name, value)",
    "docstring": "Return an iterable of the settings that have been overridden",
    "type": "function",
    "file_path": "scrapy\\scrapy\\settings\\__init__.py",
    "ast_data": "FunctionDef name:overridden_settings arg:settings arguments arg For Call Assign If BoolOp Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "filter_distributed_callbacks",
    "source_code": "def filter_distributed_callbacks(callbacks_list, model):\n    if not model._in_multi_worker_mode():\n        raise ValueError('filter_distributed_callbacks() should only be called when Keras is in multi worker mode.')\n    callbacks_list = callbacks_list or []\n    if not [c for c in callbacks_list if isinstance(c, callbacks.ModelCheckpoint)]:\n        logging.warning('ModelCheckpoint callback is not provided. Workers will need to restart training if any fails.')\n    if callbacks_list is None or is_current_worker_chief():\n        return callbacks_list\n    return [callback for callback in callbacks_list if not callback._chief_worker_only]",
    "docstring": "Filter Callbacks based on the worker context when running multi-worker. Args: callbacks_list: A list of instances. model: Keras model instance. Returns: The list of instances that should be run on this worker.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_training_utils_v1.py",
    "ast_data": "FunctionDef name:filter_distributed_callbacks arg:callbacks_list arg:model arguments arg arg If Call Raise Call Assign BoolOp If Call Call If BoolOp Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "scipy",
    "name": "MatrixRankWarning",
    "source_code": "class MatrixRankWarning(UserWarning):\n    pass",
    "docstring": "Warning for exactly singular matrices.",
    "type": "class",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_dsolve\\linsolve.py",
    "ast_data": "ClassDef name:MatrixRankWarning"
  },
  {
    "library": "pytorch",
    "name": "register_dispatch_func",
    "source_code": "def register_dispatch_func(aten_ops):\n\n    def wrapper(func):\n        for aten_op in aten_ops:\n            _MASKEDTENSOR_DISPATCH_TABLE[aten_op] = partial(func, aten_op)\n    return wrapper",
    "docstring": "Used for registering a new __torch_dispatch__ function to MaskedTensor Called via _MASKEDTENSOR_DISPATCH_TABLE The code to register a new function looks like: @register_dispatch_func(list_of_ops) def foo(func, *args, **kwargs):",
    "type": "function",
    "file_path": "pytorch\\torch\\masked\\maskedtensor\\_ops_refs.py",
    "ast_data": "FunctionDef name:register_dispatch_func arg:aten_ops arguments arg FunctionDef name:wrapper arg:func arguments arg For Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None):\n    self._more_validate_params()\n    return self._fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss, learning_rate=self.learning_rate, coef_init=coef_init, intercept_init=intercept_init, sample_weight=sample_weight)",
    "docstring": "Fit linear model with Stochastic Gradient Descent. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. y : ndarray of shape (n_samples,) Target values. coef_init : ndarray of shape (n_features,), default=None The initial coefficients to warm-start the optimization. intercept_init : ndarray of shape (1,), default=None The initial intercept to warm-start the optimization. sample_weight : array-like, shape (n_samples,), default=None Weights applied to individual samples (1. for unweighted). Returns ------- self : object Fitted estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_stochastic_gradient.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arg:coef_init arg:intercept_init arg:sample_weight arguments arg arg arg arg arg arg Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "placeholder_arguments",
    "source_code": "def placeholder_arguments(self, placeholder_context: trace.PlaceholderContext) -> inspect.BoundArguments:\n    arguments = collections.OrderedDict()\n    for parameter in self.parameters.values():\n        if parameter.kind in {Parameter.VAR_POSITIONAL, Parameter.VAR_KEYWORD}:\n            raise ValueError('Can not generate placeholder values for variable length function type.')\n        if not parameter.type_constraint:\n            raise ValueError('Can not generate placeholder value for partially defined function type.')\n        placeholder_context.update_naming_scope(parameter.name)\n        arguments[parameter.name] = parameter.type_constraint.placeholder_value(placeholder_context)\n    return inspect.BoundArguments(self, arguments)",
    "docstring": "Returns BoundArguments of values that can be used for tracing.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_type.py",
    "ast_data": "FunctionDef name:placeholder_arguments arg:self arg:placeholder_context arguments arg arg Assign Call For Call If Compare Raise Call If Raise Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "func",
    "source_code": "@property\ndef func(self):\n    return self._func",
    "docstring": "Returns the func given to this Template.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\template.py",
    "ast_data": "FunctionDef name:func arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "add_custom_scalars",
    "source_code": "def add_custom_scalars(self, layout):\n    torch._C._log_api_usage_once('tensorboard.logging.add_custom_scalars')\n    self._get_file_writer().add_summary(custom_scalars(layout))",
    "docstring": "Create special chart by collecting charts tags in 'scalars'. NOTE: This function can only be called once for each SummaryWriter() object. Because it only provides metadata to tensorboard, the function can be called before or after the training loop. Args: layout (dict): {categoryName: *charts*}, where *charts* is also a dictionary {chartName: *ListOfProperties*}. The first element in *ListOfProperties* is the chart's type (one of **Multiline** or **Margin**) and the second element should be a list containing the tags you have used in add_scalar function, which will be collected into the new chart. Examples:: layout = {'Taiwan':{'twse':['Multiline',['twse/0050', 'twse/2330']]}, 'USA':{ 'dow':['Margin', ['dow/aaa', 'dow/bbb', 'dow/ccc']], 'nasdaq':['Margin', ['nasdaq/aaa', 'nasdaq/bbb', 'nasdaq/ccc']]}} writer.add_custom_scalars(layout)",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\tensorboard\\writer.py",
    "ast_data": "FunctionDef name:add_custom_scalars arg:self arg:layout arguments arg arg Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_wrap_user_constructor",
    "source_code": "def _wrap_user_constructor(cls):\n    user_constructor = cls.__init__\n\n    def wrapped_init(self, *args, **kwargs):\n        self.__dict__[_IN_CONSTRUCTOR] = True\n        user_constructor(self, *args, **kwargs)\n        del self.__dict__[_IN_CONSTRUCTOR]\n        self._tf_extension_type_convert_fields()\n        self.__validate__()\n    cls.__init__ = tf_decorator.make_decorator(user_constructor, wrapped_init)",
    "docstring": "Wraps a user-defined constructor for tf.ExtensionType subclass .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type.py",
    "ast_data": "FunctionDef name:_wrap_user_constructor arg:cls arguments arg Assign FunctionDef name:wrapped_init arg:self arguments arg arg arg Assign Call Call Call Assign Call"
  },
  {
    "library": "cherrypy",
    "name": "on_check",
    "source_code": "def on_check(self, username):\n    pass",
    "docstring": "Process a successful check event. :param username: The checked user name. :type username: str",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\cptools.py",
    "ast_data": "FunctionDef name:on_check arg:self arg:username arguments arg arg"
  },
  {
    "library": "tensorflow",
    "name": "tracing_scope",
    "source_code": "@tf_contextlib.contextmanager\ndef tracing_scope():\n    previous_value = _thread_local_data.enable_call_tracing\n    previous_queue = _thread_local_data.trace_queue\n    try:\n        _thread_local_data.enable_call_tracing = True\n        _thread_local_data.trace_queue = []\n        yield\n    finally:\n        while _thread_local_data.trace_queue:\n            fn, args, kwargs, training = _thread_local_data.trace_queue.pop()\n            if training is not None:\n                with K.deprecated_internal_learning_phase_scope(training):\n                    fn.get_concrete_function(*args, **kwargs)\n            else:\n                fn.get_concrete_function(*args, **kwargs)\n        _thread_local_data.trace_queue = previous_queue\n        _thread_local_data.enable_call_tracing = previous_value",
    "docstring": "Enables tracing scope.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\save_impl.py",
    "ast_data": "FunctionDef name:tracing_scope arguments Assign Assign Try Assign Assign While Assign Call If Compare With Call Call Call Assign Assign"
  },
  {
    "library": "pandas",
    "name": "_pprint_dict",
    "source_code": "def _pprint_dict(seq: Mapping, _nest_lvl: int=0, max_seq_items: int | None=None, **kwds: Any) -> str:\n    fmt = '{{{things}}}'\n    pairs = []\n    pfmt = '{key}: {val}'\n    if max_seq_items is False:\n        nitems = len(seq)\n    else:\n        nitems = max_seq_items or get_option('max_seq_items') or len(seq)\n    for k, v in list(seq.items())[:nitems]:\n        pairs.append(pfmt.format(key=pprint_thing(k, _nest_lvl + 1, max_seq_items=max_seq_items, **kwds), val=pprint_thing(v, _nest_lvl + 1, max_seq_items=max_seq_items, **kwds)))\n    if nitems < len(seq):\n        return fmt.format(things=', '.join(pairs) + ', ...')\n    else:\n        return fmt.format(things=', '.join(pairs))",
    "docstring": "internal. pprinter for iterables. you should probably use pprint_thing() rather than calling this directly.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\formats\\printing.py",
    "ast_data": "FunctionDef name:_pprint_dict arg:seq arg:_nest_lvl arg:max_seq_items arguments arg arg arg arg Assign Assign Assign If Compare Assign Call Assign BoolOp Call Call For Call Call Call Call Call Call If Compare Call Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "initialize_or_restore",
    "source_code": "def initialize_or_restore(self, session=None):\n    if context.executing_eagerly():\n        return\n    if session is None:\n        session = get_session()\n    all_objects = util.list_objects(self._object_graph_view)\n    already_initialized_objects = object_identity.ObjectIdentitySet(self._checkpoint.object_by_proto_id.values())\n    initializers_for_non_restored_variables = [c.initializer for c in all_objects if hasattr(c, 'initializer') and c not in already_initialized_objects and (getattr(c, '_update_uid', self._checkpoint.restore_uid - 1) < self._checkpoint.restore_uid)]\n    self.run_restore_ops(session=session)\n    session.run(initializers_for_non_restored_variables)",
    "docstring": "Run operations to initialize or restore objects in the dependency graph. Any objects in the dependency graph which have initializers but are not in the checkpoint will have those initializers run, unless those variables are being restored by a later call to . This method has a sibling in which instead initializes variables. That type is returned if no checkpoint is specified in . Args: session: The session to run init/restore ops in. If , uses the default session.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:initialize_or_restore arg:self arg:session arguments arg arg If Call Return return:no If Compare Assign Call Assign Call Assign Call Call Assign BoolOp Call Compare Compare Call Call Call"
  },
  {
    "library": "pandas",
    "name": "is_monotonic_decreasing",
    "source_code": "@cache_readonly\ndef is_monotonic_decreasing(self) -> bool:\n    return self[::-1].is_monotonic_increasing",
    "docstring": "Return a boolean if the values are equal or decreasing.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "FunctionDef name:is_monotonic_decreasing arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "process_paired_modules_list_to_name",
    "source_code": "def process_paired_modules_list_to_name(model, paired_modules_list):\n    for group in paired_modules_list:\n        for i, item in enumerate(group):\n            if isinstance(item, torch.nn.Module):\n                group[i] = get_name_by_module(model, item)\n            elif not isinstance(item, str):\n                raise TypeError('item must be a nn.Module or a string')\n    return paired_modules_list",
    "docstring": "Processes a list of paired modules to a list of names of paired modules.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\_equalize.py",
    "ast_data": "FunctionDef name:process_paired_modules_list_to_name arg:model arg:paired_modules_list arguments arg arg For For Call If Call Assign Call If Call Raise Call Return return:yes"
  },
  {
    "library": "django",
    "name": "_make_single_date_lookup",
    "source_code": "def _make_single_date_lookup(self, date):\n    date_field = self.get_date_field()\n    if self.uses_datetime_field:\n        since = self._make_date_lookup_arg(date)\n        until = self._make_date_lookup_arg(date + datetime.timedelta(days=1))\n        return {'%s__gte' % date_field: since, '%s__lt' % date_field: until}\n    else:\n        return {date_field: date}",
    "docstring": "Get the lookup kwargs for filtering on a single date. If the date field is a DateTimeField, we can't just filter on date_field=date because that doesn't take the time into account.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\dates.py",
    "ast_data": "FunctionDef name:_make_single_date_lookup arg:self arg:date arguments arg arg Assign Call If Assign Call Assign Call Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "info_dict",
    "source_code": "def info_dict(self) -> dict[str, Union[PrimitiveInfoType, list[PrimitiveInfoType]]]:\n    if self.info_kwargs is not None and 'op' in self.info_kwargs:\n        op: Any = self.info_kwargs['op']\n        return {'backend': 'CUDA', 'op_type': type(op).__name__, 'op_conf_name': str(op.configuration_name()), 'op_arch': str(op.arch), 'tile_shape': str(op.tile_description.tile_shape), 'epilogue_schedule': str(op.epilogue_schedule), 'kernel_schedule': str(op.kernel_schedule), 'element_accumulator': str(op.accumulator_type()), 'op_name': str(op.procedural_name()), 'instruction_shape': str(op.tile_description.math_instruction.instruction_shape), 'swizzle': str(self.info_kwargs['swizzle'])}\n    else:\n        return {'backend': 'CUDA', 'op_type': 'unknown'}",
    "docstring": "Information returned here is logged to the autotune log file when that is enabled.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\cuda_kernel.py",
    "ast_data": "FunctionDef name:info_dict arg:self arguments arg If BoolOp Compare Compare Return return:yes Call Call Call Call Call Call Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "fill_binop",
    "source_code": "def fill_binop(left, right, fill_value):\n    if fill_value is not None:\n        left_mask = isna(left)\n        right_mask = isna(right)\n        mask = left_mask ^ right_mask\n        if left_mask.any():\n            left = left.copy()\n            left[left_mask & mask] = fill_value\n        if right_mask.any():\n            right = right.copy()\n            right[right_mask & mask] = fill_value\n    return (left, right)",
    "docstring": "If a non-None fill_value is given, replace null entries in left and right with this value, but only in positions where _one_ of left/right is null, not both. Parameters ---------- left : array-like right : array-like fill_value : object Returns ------- left : array-like right : array-like Notes ----- Makes copies if fill_value is not None and NAs are present.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:fill_binop arg:left arg:right arg:fill_value arguments arg arg arg If Compare Assign Call Assign Call Assign If Call Assign Call Assign If Call Assign Call Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "__getitem__",
    "source_code": "def __getitem__(self, idx: SetIndex, /) -> Self:\n    if self._idx is not _undef:\n        msg = 'Index has already been set'\n        raise ValueError(msg)\n    return type(self)(self._x, idx)",
    "docstring": "Allow for the alternate syntax `` and feels more intuitive coming from the JAX documentation.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_extra\\_lib\\_at.py",
    "ast_data": "FunctionDef name:__getitem__ arguments arg arg If Compare Assign Raise Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "tick_top",
    "source_code": "def tick_top(self):\n    label = True\n    if 'label1On' in self._major_tick_kw:\n        label = self._major_tick_kw['label1On'] or self._major_tick_kw['label2On']\n    self.set_ticks_position('top')\n    self.set_tick_params(which='both', labeltop=label)",
    "docstring": "Move ticks and ticklabels (if present) to the top of the Axes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:tick_top arg:self arguments arg Assign If Compare Assign BoolOp Call Call"
  },
  {
    "library": "kornia",
    "name": "_dct_8x8",
    "source_code": "def _dct_8x8(input: Tensor) -> Tensor:\n    dtype: Dtype = input.dtype\n    device: Device = input.device\n    index: Tensor = torch.arange(8, dtype=dtype, device=device)\n    x, y, u, v = torch.meshgrid(index, index, index, index)\n    dct_tensor: Tensor = ((2.0 * x + 1.0) * u * pi / 16.0).cos() * ((2.0 * y + 1.0) * v * pi / 16.0).cos()\n    alpha: Tensor = torch.ones(8, dtype=dtype, device=device)\n    alpha[0] = 1.0 / 2 ** 0.5\n    dct_scale: Tensor = torch.einsum('i, j -> ij', alpha, alpha) * 0.25\n    output: Tensor = dct_scale[None, None] * torch.tensordot(input - 128.0, dct_tensor)\n    return output",
    "docstring": "Perform an 8 x 8 discrete cosine transform. Args: input (Tensor): Patched input tensor of the shape :math:. Returns: output (Tensor): DCT output tensor of the shape :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\enhance\\jpeg.py",
    "ast_data": "FunctionDef name:_dct_8x8 arg:input arguments arg Call Assign Call Call Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "parse_name",
    "source_code": "def parse_name(self, fixture_name):\n    if fixture_name == READ_STDIN:\n        if not self.format:\n            raise CommandError('--format must be specified when reading from stdin.')\n        return (READ_STDIN, self.format, 'stdin')\n    parts = fixture_name.rsplit('.', 2)\n    if len(parts) > 1 and parts[-1] in self.compression_formats:\n        cmp_fmt = parts[-1]\n        parts = parts[:-1]\n    else:\n        cmp_fmt = None\n    if len(parts) > 1:\n        if parts[-1] in self.serialization_formats:\n            ser_fmt = parts[-1]\n            parts = parts[:-1]\n        else:\n            raise CommandError(\"Problem installing fixture '%s': %s is not a known serialization format.\" % ('.'.join(parts[:-1]), parts[-1]))\n    else:\n        ser_fmt = None\n    name = '.'.join(parts)\n    return (name, ser_fmt, cmp_fmt)",
    "docstring": "Split fixture name in name, serialization format, compression format.",
    "type": "method",
    "file_path": "django\\django\\core\\management\\commands\\loaddata.py",
    "ast_data": "FunctionDef name:parse_name arg:self arg:fixture_name arguments arg arg If Compare If Raise Call Return return:yes Assign Call If BoolOp Compare Call Compare Assign Assign Assign If Compare Call If Compare Assign Assign Raise Call Call Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "VariableSynchronization",
    "source_code": "@tf_export('VariableSynchronization')\nclass VariableSynchronization(enum.Enum):\n    AUTO = 0\n    NONE = 1\n    ON_WRITE = 2\n    ON_READ = 3",
    "docstring": "Indicates when a distributed variable will be synced. * : Indicates that the synchronization will be determined by the current (eg. With this would be ). * : Indicates that there will only be one copy of the variable, so there is no need to sync. * : Indicates that the variable will be updated across devices every time it is written. * : Indicates that the variable will be aggregated across devices when it is read (eg. when checkpointing or when evaluating an op that uses the variable). Example: >>> temp_grad=[tf.Variable([0.], trainable=False, ... synchronization=tf.VariableSynchronization.ON_READ, ... aggregation=tf.VariableAggregation.MEAN ... )]",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "ClassDef name:VariableSynchronization Assign Assign Assign Assign Call"
  },
  {
    "library": "pytorch",
    "name": "_validate_qmin_qmax",
    "source_code": "@torch.jit.export\ndef _validate_qmin_qmax(self, quant_min: int, quant_max: int) -> None:\n    assert quant_min <= 0 <= quant_max, 'Used-specified quantization range must include 0.'\n    assert quant_min < quant_max, 'qmin must be strictly less than qmax for user-specified quantization range.'",
    "docstring": "Validates that the user-specified quantization range is properly initialized and within the given bound supported by the observer dtype. To accommodate lower-bit quantization with respect to the existing torch.qint8 and torch.quint8 datatypes, the user can choose to use dynamic quantization range by passing in a tuple of initial qmin and qmax values. One use case is these customized qmin and qmax values are used to calculate static estimates of the scale and zero point for aggressive lower-bit fake quantization. These estimates are compared against parameters learned through backpropagation. The related literatures for scale and zero point via backpropagation are as follows: Learned Step Size Quantization: Trained Quantization Thresholds:",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\observer.py",
    "ast_data": "FunctionDef name:_validate_qmin_qmax arg:self arg:quant_min arg:quant_max arguments arg arg arg Compare Compare"
  },
  {
    "library": "django",
    "name": "postgis_full_version",
    "source_code": "def postgis_full_version(self):\n    return self._get_postgis_func('postgis_full_version')",
    "docstring": "Return PostGIS version number and compile-time options.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\postgis\\operations.py",
    "ast_data": "FunctionDef name:postgis_full_version arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "load",
    "source_code": "def load(self, spider_name: str) -> type[Spider]:\n    pass",
    "docstring": "Return the Spider class for the given spider name. If the spider name is not found, it must raise a KeyError.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\spiderloader.py",
    "ast_data": "FunctionDef name:load arg:self arg:spider_name arguments arg arg"
  },
  {
    "library": "numpy",
    "name": "getDependencies",
    "source_code": "def getDependencies(filename):\n    external_pat = re.compile('^\\\\s*EXTERNAL\\\\s', re.I)\n    routines = []\n    with open(filename) as fo:\n        for lineno, line in fortranSourceLines(fo):\n            m = external_pat.match(line)\n            if m:\n                names = line[m.end():].strip().split(',')\n                names = [n.strip().lower() for n in names]\n                names = [n for n in names if n]\n                routines.extend(names)\n    return routines",
    "docstring": "For a Fortran source file, return a list of routines declared as EXTERNAL in it.",
    "type": "function",
    "file_path": "numpy\\numpy\\linalg\\lapack_lite\\fortran.py",
    "ast_data": "FunctionDef name:getDependencies arg:filename arguments arg Assign Call Assign With Call For Call Assign Call If Assign Call Call Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_variables_dir",
    "source_code": "def get_variables_dir(export_dir):\n    return file_io.join(compat.as_text(export_dir), compat.as_text(constants.VARIABLES_DIRECTORY))",
    "docstring": "Return variables sub-directory in the SavedModel.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\path_helpers.py",
    "ast_data": "FunctionDef name:get_variables_dir arg:export_dir arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_resource_capture_helper",
    "source_code": "def _resource_capture_helper(self, tensor):\n    assert tensor.dtype == dtypes.resource\n    forward_graph_input_names = [t.name for t in self._forward_graph.inputs]\n    forward_graph_name_to_opdef = {op.name: op.node_def for op in self._forward_graph.get_operations()}\n    index = util.resource_input_index(tensor.name, forward_graph_input_names, forward_graph_name_to_opdef, self._forward_graph._functions)\n    input_placeholder = self._forward_graph.inputs[index]\n    tensor_in_outer_graph = self._forward_graph._while.inputs[index]\n    assert input_placeholder.dtype == dtypes.resource\n    assert tensor_in_outer_graph.dtype == dtypes.resource\n    if index != util.resource_input_index(self._forward_graph.outputs[index].name, forward_graph_input_names, forward_graph_name_to_opdef, self._forward_graph._functions):\n        raise AssertionError(f'Resource tensors must be loop invariants {tensor_in_outer_graph}')\n    self._indirect_captures[ops.tensor_id(tensor)] = self.capture(tensor_in_outer_graph)\n    return self._indirect_captures[ops.tensor_id(tensor)]",
    "docstring": "Returns the captured resource tensor. Resource-type tensors are not accumulated. If a resource tensor exists in the loop body it must either be a loop input or an output of a nested While op inside the loop body which had captured the external resource. Args: tensor: the external resource Tensor to be captured. Returns: Tensor in this graph.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\while_v2.py",
    "ast_data": "FunctionDef name:_resource_capture_helper arg:self arg:tensor arguments arg arg Compare Assign Assign Call Assign Call Assign Assign Compare Compare If Compare Call Raise Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "_repr_html_",
    "source_code": "def _repr_html_(self):\n    import base64\n    png_bytes = self._repr_png_()\n    png_base64 = base64.b64encode(png_bytes).decode('ascii')\n    return '<img ' + 'alt=\"' + self.name + ' color map\" ' + 'title=\"' + self.name + '\"' + 'src=\"data:image/png;base64,' + png_base64 + '\">'",
    "docstring": "Generate an HTML representation of the Colormap.",
    "type": "function",
    "file_path": "seaborn\\seaborn\\palettes.py",
    "ast_data": "FunctionDef name:_repr_html_ arg:self arguments arg Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "process_tempita",
    "source_code": "def process_tempita(fromfile, outfile=None):\n    if outfile is None:\n        outfile = os.path.splitext(fromfile)[0]\n    from_filename = tempita.Template.from_filename\n    template = from_filename(fromfile, encoding=sys.getdefaultencoding())\n    content = template.substitute()\n    with open(outfile, 'w') as f:\n        f.write(content)",
    "docstring": "Process tempita templated file and write out the result. The template file is expected to end in or : E.g. processing generates .",
    "type": "function",
    "file_path": "numpy\\numpy\\_build_utils\\tempita.py",
    "ast_data": "FunctionDef name:process_tempita arg:fromfile arg:outfile arguments arg arg If Compare Assign Call Assign Assign Call Call Assign Call With Call Call"
  },
  {
    "library": "numpy",
    "name": "swapaxes",
    "source_code": "@array_function_dispatch(_swapaxes_dispatcher)\ndef swapaxes(a, axis1, axis2):\n    return _wrapfunc(a, 'swapaxes', axis1, axis2)",
    "docstring": "Interchange two axes of an array. Parameters ---------- a : array_like Input array. axis1 : int First axis. axis2 : int Second axis. Returns ------- a_swapped : ndarray For NumPy >= 1.10.0, if is an ndarray, then a view of is returned; otherwise a new array is created. For earlier NumPy versions a view of is returned only if the order of the axes is changed, otherwise the input array is returned. Examples -------- >>> import numpy as np >>> x = np.array([[1,2,3]]) >>> np.swapaxes(x,0,1) array([[1], [2], [3]]) >>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]]) >>> x array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]) >>> np.swapaxes(x,0,2) array([[[0, 4], [2, 6]], [[1, 5], [3, 7]]])",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\fromnumeric.py",
    "ast_data": "FunctionDef name:swapaxes arg:a arg:axis1 arg:axis2 arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "Brent",
    "source_code": "class Brent(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n        self.custom_bounds = ([-10, 2], [-10, 2])\n        self.global_optimum = [[-10.0, -10.0]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return (x[0] + 10.0) ** 2.0 + (x[1] + 10.0) ** 2.0 + exp(-x[0] ** 2.0 - x[1] ** 2.0)",
    "docstring": "Brent objective function. The Brent [1]_ global optimization problem is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Brent}}(x) = (x_1 + 10)^2 + (x_2 + 10)^2 + e^{(-x_1^2 -x_2^2)} with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO solution is different to Jamil#24",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_B.py",
    "ast_data": "ClassDef name:Brent FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_ldl_construct_tri_factor",
    "source_code": "def _ldl_construct_tri_factor(lu, swap_vec, pivs, lower=True):\n    n = lu.shape[0]\n    perm = arange(n)\n    rs, re, ri = (n - 1, -1, -1) if lower else (0, n, 1)\n    for ind in range(rs, re, ri):\n        s_ind = swap_vec[ind]\n        if s_ind != ind:\n            col_s = ind if lower else 0\n            col_e = n if lower else ind + 1\n            if pivs[ind] == (0 if lower else 2):\n                col_s += -1 if lower else 0\n                col_e += 0 if lower else 1\n            lu[[s_ind, ind], col_s:col_e] = lu[[ind, s_ind], col_s:col_e]\n            perm[[s_ind, ind]] = perm[[ind, s_ind]]\n    return (lu, argsort(perm))",
    "docstring": "Helper function to construct explicit outer factors of LDL factorization. If lower is True the permuted factors are multiplied as L(1)*L(2)*...*L(k). Otherwise, the permuted factors are multiplied as L(k)*...*L(2)*L(1). See LAPACK documentation for more details. Parameters ---------- lu : ndarray The triangular array that is extracted from LAPACK routine call with ones on the diagonals. swap_vec : ndarray The array that defines the row swapping indices. If the kth entry is m then rows k,m are swapped. Notice that the mth entry is not necessarily k to avoid undoing the swapping. pivs : ndarray The array that defines the block diagonal structure returned by _ldl_sanitize_ipiv(). lower : bool, optional The boolean to switch between lower and upper triangular structure. Returns ------- lu : ndarray The square outer factor which satisfies the L * D * L.T = A perm : ndarray The permutation vector that brings the lu to the triangular form Notes ----- Note that the original argument \"lu\" is overwritten.",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_decomp_ldl.py",
    "ast_data": "FunctionDef name:_ldl_construct_tri_factor arg:lu arg:swap_vec arg:pivs arg:lower arguments arg arg arg arg Assign Assign Call Assign For Call Assign If Compare Assign Assign If Compare Assign Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "UploadFileException",
    "source_code": "class UploadFileException(Exception):\n    pass",
    "docstring": "Any error having to do with uploading files.",
    "type": "class",
    "file_path": "django\\django\\core\\files\\uploadhandler.py",
    "ast_data": "ClassDef name:UploadFileException"
  },
  {
    "library": "django",
    "name": "check_string",
    "source_code": "def check_string(result, func, cargs, offset=-1, str_result=False):\n    if str_result:\n        ptr = result\n        if not ptr:\n            s = None\n        else:\n            s = string_at(result)\n    else:\n        check_err(result)\n        ptr = ptr_byref(cargs, offset)\n        s = ptr.value\n    if ptr:\n        lgdal.VSIFree(ptr)\n    return s",
    "docstring": "Check the string output returned from the given function, and free the string pointer allocated by OGR. The keyword may be used when the result is the string pointer, otherwise the OGR error code is assumed. The keyword may be used to extract the string pointer passed in by-reference at the given slice offset in the function arguments.",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\gdal\\prototypes\\errcheck.py",
    "ast_data": "FunctionDef name:check_string arg:result arg:func arg:cargs arg:offset arg:str_result arguments arg arg arg arg arg If Assign If Assign Assign Call Call Assign Call Assign If Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "__getnewargs_ex__",
    "source_code": "def __getnewargs_ex__(self):\n    return (_tuple(self), self.__dict__)",
    "docstring": "Return self as a plain tuple. Used by copy and pickle.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_bunch.py",
    "ast_data": "FunctionDef name:__getnewargs_ex__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "Angle3",
    "source_code": "@_register_style(_style_list)\nclass Angle3(_Base):\n\n    def __init__(self, angleA=90, angleB=0):\n        self.angleA = angleA\n        self.angleB = angleB\n\n    def connect(self, posA, posB):\n        x1, y1 = posA\n        x2, y2 = posB\n        cosA = math.cos(math.radians(self.angleA))\n        sinA = math.sin(math.radians(self.angleA))\n        cosB = math.cos(math.radians(self.angleB))\n        sinB = math.sin(math.radians(self.angleB))\n        cx, cy = get_intersection(x1, y1, cosA, sinA, x2, y2, cosB, sinB)\n        vertices = [(x1, y1), (cx, cy), (x2, y2)]\n        codes = [Path.MOVETO, Path.CURVE3, Path.CURVE3]\n        return Path(vertices, codes)",
    "docstring": "Creates a simple quadratic Bézier curve between two points. The middle control point is placed at the intersecting point of two lines which cross the start and end point, and have a slope of *angleA* and *angleB*, respectively.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "ClassDef name:Angle3 FunctionDef name:__init__ arg:self arg:angleA arg:angleB arguments arg arg arg Assign Assign FunctionDef name:connect arg:self arg:posA arg:posB arguments arg arg arg Assign Assign Assign Call Call Assign Call Call Assign Call Call Assign Call Call Assign Call Assign Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "metrics",
    "source_code": "@tf.function\ndef metrics(expected_box_encodings, expected_scores, actual_box_encodings, actual_scores):\n    squashed_expected_scores = tf.math.divide(1.0, 1.0 + tf.math.exp(-expected_scores))\n    squashed_actual_scores = tf.math.divide(1.0, 1.0 + tf.math.exp(-actual_scores))\n    kld_metric = kl_divergence.symmetric_kl_divergence(expected_scores, actual_scores)\n    high_scoring_indices = tf.math.logical_or(tf.math.greater(squashed_expected_scores, 0.1), tf.math.greater(squashed_actual_scores, 0.1))\n    high_scoring_actual_boxes = tf.where(condition=tf.broadcast_to(input=high_scoring_indices, shape=tf.shape(actual_box_encodings)), x=actual_box_encodings, y=expected_box_encodings)\n    box_diff = high_scoring_actual_boxes - expected_box_encodings\n    box_squared_diff = tf.math.pow(box_diff, 2)\n    box_mse = tf.divide(tf.math.reduce_sum(box_squared_diff), tf.math.maximum(tf.math.count_nonzero(high_scoring_indices, dtype=tf.float32), 1.0))\n    ok = tf.logical_and(kld_metric < 0.1, box_mse < 0.01)\n    return [kld_metric, box_mse, ok]",
    "docstring": "Calculate metrics from expected and actual blazeface outputs. Args: expected_box_encodings: box encodings from model expected_scores: classifications from model actual_box_encodings: golden box encodings actual_scores: golden classifications Returns: two-item list with classification error and localization error",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\experimental\\acceleration\\mini_benchmark\\metrics\\blazeface_metrics.py",
    "ast_data": "FunctionDef name:metrics arg:expected_box_encodings arg:expected_scores arg:actual_box_encodings arg:actual_scores arguments arg arg arg arg Assign Call Call Assign Call Call Assign Call Assign Call Call Call Assign Call Call Call Assign Assign Call Assign Call Call Call Call Assign Call Compare Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_is_variant_with_internal_stacking",
    "source_code": "def _is_variant_with_internal_stacking(t):\n    type_id = _variant_type_id(t)\n    return type_id in _INTERNAL_STACKING_TYPE_IDS",
    "docstring": "Identifies variant tensors which pfor always maintains as scalars. For these, the pfor tensor is recorded as \"stacked\" if the content of the variant tensor (e.g. the elements of a TensorList) are all stacked. Args: t: A tensor to identify. Returns: True if is a TensorList/Optional, False not, None if unknown.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "FunctionDef name:_is_variant_with_internal_stacking arg:t arguments arg Assign Call Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "_XlaScope",
    "source_code": "class _XlaScope(object):\n\n    def __init__(self, count, depth):\n        self.count = count\n        self.depth = depth",
    "docstring": "Keeps track of previous XLA scope calls, and depth of current call.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\xla\\jit.py",
    "ast_data": "ClassDef name:_XlaScope FunctionDef name:__init__ arg:self arg:count arg:depth arguments arg arg arg Assign Assign"
  },
  {
    "library": "cherrypy",
    "name": "get_ranges",
    "source_code": "def get_ranges(headervalue, content_length):\n    if not headervalue:\n        return None\n    result = []\n    bytesunit, byteranges = headervalue.split('=', 1)\n    for brange in byteranges.split(','):\n        start, stop = [x.strip() for x in brange.split('-', 1)]\n        if start:\n            if not stop:\n                stop = content_length - 1\n            start, stop = (int(start), int(stop))\n            if start >= content_length:\n                continue\n            if stop < start:\n                return None\n            result.append((start, stop + 1))\n        else:\n            if not stop:\n                return None\n            if int(stop) > content_length:\n                result.append((0, content_length))\n            else:\n                result.append((content_length - int(stop), content_length))\n    return result",
    "docstring": "Return a list of (start, stop) indices from a Range header, or None. Each (start, stop) tuple will be composed of two ints, which are suitable for use in a slicing operation. That is, the header \"Range: bytes=3-6\", if applied against a Python string, is requesting resource[3:7]. This function will return the list [(3, 7)]. If this function returns an empty list, you should return HTTP 416.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\httputil.py",
    "ast_data": "FunctionDef name:get_ranges arg:headervalue arg:content_length arguments arg arg If Return return:no Assign Assign Call For Call Assign Call Call If If Assign Assign Call Call If Compare If Compare Return return:no Call If Return return:no If Compare Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "WriteItem",
    "source_code": "@dataclass(frozen=True)\nclass WriteItem:\n    index: MetadataIndex\n    type: WriteItemType\n    bytes_io_data: Optional[BytesIOWriteData] = None\n    tensor_data: Optional[TensorWriteData] = None\n\n    def tensor_storage_size(self) -> Optional[int]:\n        if self.tensor_data is None:\n            return None\n        numels = reduce(operator.mul, self.tensor_data.size, 1)\n        dtype_size = torch._utils._element_size(self.tensor_data.properties.dtype)\n        return numels * dtype_size",
    "docstring": "Dataclass which holds information about what needs to be written to storage.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\planner.py",
    "ast_data": "ClassDef name:WriteItem FunctionDef name:tensor_storage_size arg:self arguments arg If Compare Return return:no Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, widths, heights, angles, *, units='points', **kwargs):\n    super().__init__(**kwargs)\n    self.set_widths(widths)\n    self.set_heights(heights)\n    self.set_angles(angles)\n    self._units = units\n    self.set_transform(transforms.IdentityTransform())\n    self._transforms = np.empty((0, 3, 3))\n    self._paths = [mpath.Path.unit_circle()]",
    "docstring": "Parameters ---------- widths : array-like The lengths of the first axes (e.g., major axis lengths). heights : array-like The lengths of second axes. angles : array-like The angles of the first axes, degrees CCW from the x-axis. units : {'points', 'inches', 'dots', 'width', 'height', 'x', 'y', 'xy'} The units in which majors and minors are given; 'width' and 'height' refer to the dimensions of the axes, while 'x' and 'y' refer to the *offsets* data units. 'xy' differs from all others in that the angle as plotted varies with the aspect ratio, and equals the specified angle only when the aspect ratio is unity. Hence it behaves the same as the with `Collection`.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:widths arg:heights arg:angles arguments arg arg arg arg arg arg Call Call Call Call Call Assign Call Call Assign Call Assign Call"
  },
  {
    "library": "kornia",
    "name": "trans_x",
    "source_code": "@classmethod\ndef trans_x(cls, x: Tensor) -> Se2:\n    zs = zeros_like(x)\n    return cls.trans(x, zs)",
    "docstring": "Construct a x-axis translation. Args: x: the x-axis translation.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\se2.py",
    "ast_data": "FunctionDef name:trans_x arg:cls arg:x arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_PolygammaGrad",
    "source_code": "@ops.RegisterGradient('Polygamma')\ndef _PolygammaGrad(op: ops.Operation, grad):\n    n = op.inputs[0]\n    x = op.inputs[1]\n    sn = array_ops.shape(n)\n    sx = array_ops.shape(x)\n    unused_rn, rx = gen_array_ops.broadcast_gradient_args(sn, sx)\n    with ops.control_dependencies([grad]):\n        n = math_ops.conj(n)\n        x = math_ops.conj(x)\n        partial_x = math_ops.polygamma(n + 1, x)\n        return (None, array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx))",
    "docstring": "Returns gradient of psi(n, x) with respect to n and x.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_PolygammaGrad arg:op arg:grad arguments arg arg Assign Assign Assign Call Assign Call Assign Call With Call Assign Call Assign Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "ToolManagerMessageEvent",
    "source_code": "class ToolManagerMessageEvent:\n\n    def __init__(self, name, sender, message):\n        self.name = name\n        self.sender = sender\n        self.message = message",
    "docstring": "Event carrying messages from toolmanager. Messages usually get displayed to the user by the toolbar.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_managers.py",
    "ast_data": "ClassDef name:ToolManagerMessageEvent FunctionDef name:__init__ arg:self arg:name arg:sender arg:message arguments arg arg arg arg Assign Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, interval=None, callbacks=None):\n    self.callbacks = [] if callbacks is None else callbacks.copy()\n    self.interval = 1000 if interval is None else interval\n    self.single_shot = False",
    "docstring": "Parameters ---------- interval : int, default: 1000ms The time between timer events in milliseconds. Will be stored as `~.TimerBase.add_callback~.TimerBase.remove_callback` can be used.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:interval arg:callbacks arguments arg arg arg Assign Compare Call Assign Compare Assign"
  },
  {
    "library": "kornia",
    "name": "spatial_gradient",
    "source_code": "def spatial_gradient(input: Tensor, mode: str='sobel', order: int=1, normalized: bool=True) -> Tensor:\n    KORNIA_CHECK_IS_TENSOR(input)\n    KORNIA_CHECK_SHAPE(input, ['B', 'C', 'H', 'W'])\n    kernel = get_spatial_gradient_kernel2d(mode, order, device=input.device, dtype=input.dtype)\n    if normalized:\n        kernel = normalize_kernel2d(kernel)\n    b, c, h, w = input.shape\n    tmp_kernel = kernel[:, None, ...]\n    spatial_pad = [kernel.size(1) // 2, kernel.size(1) // 2, kernel.size(2) // 2, kernel.size(2) // 2]\n    out_channels: int = 3 if order == 2 else 2\n    padded_inp: Tensor = pad(input.reshape(b * c, 1, h, w), spatial_pad, 'replicate')\n    out = F.conv2d(padded_inp, tmp_kernel, groups=1, padding=0, stride=1)\n    return out.reshape(b, c, out_channels, h, w)",
    "docstring": "Compute the first order image derivative in both x and y using a Sobel operator. .. image:: _static/img/spatial_gradient.png Args: input: input image tensor with shape :math:. mode: derivatives modality, can be: or . order: the order of the derivatives. normalized: whether the output is normalized. Return: the derivatives of the input feature map. with shape :math:. .. note:: See a working example __. Examples: >>> input = torch.rand(1, 3, 4, 4) >>> output = spatial_gradient(input) # 1x3x2x4x4 >>> output.shape torch.Size([1, 3, 2, 4, 4])",
    "type": "function",
    "file_path": "kornia\\kornia\\filters\\sobel.py",
    "ast_data": "FunctionDef name:spatial_gradient arg:input arg:mode arg:order arg:normalized arguments arg arg arg arg Call Call Assign Call If Assign Call Assign Assign Assign Call Call Call Call Compare Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "maximum",
    "source_code": "def maximum(self, seq: list[Union[int, str]]) -> Union[int, str]:\n    items = self._constant_fold(max, seq)\n    if len(items) <= 1:\n        return items[0]\n    if self.mode == 'python':\n        return f'max({', '.join(map(str, items))})'\n    return functools.reduce(lambda x, y: f'std::max({x}, {y})', items)",
    "docstring": "Codegen for max function with constant folding, constants are represented as int",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\triton_heuristics.py",
    "ast_data": "FunctionDef name:maximum arg:self arg:seq arguments arg arg Assign Call If Compare Call Return return:yes If Compare Return return:yes Call Call Return return:yes Call arguments arg arg"
  },
  {
    "library": "tensorflow",
    "name": "_string_split_transformer",
    "source_code": "def _string_split_transformer(parent, node, full_name, name, logs):\n    for i, kw in enumerate(node.keywords):\n        if kw.arg == 'skip_empty':\n            if _is_ast_false(kw.value):\n                logs.append((ast_edits.INFO, node.lineno, node.col_offset, 'removed argument skip_empty for tf.string_split.'))\n                node.keywords.pop(i)\n                break\n            else:\n                return _rename_to_compat_v1(node, full_name, logs, \"tf.string_split's replacement no longer takes the skip_empty argument.\")\n    found_sep = False\n    for i, kw in enumerate(node.keywords):\n        if kw.arg == 'sep':\n            found_sep = True\n            if isinstance(kw.value, ast.Str):\n                if kw.value.s == '':\n                    node = _rename_func(node, full_name, 'tf.strings.bytes_split', logs, 'Splitting bytes is not handled by tf.strings.bytes_split().')\n                    node.keywords.pop(i)\n            else:\n                return _rename_to_compat_v1(node, full_name, logs, \"The semantics for tf.string_split's sep parameter have changed when sep is the empty string; but sep is not a string literal, so we can't tell if it's an empty string.\")\n    if not found_sep:\n        return _rename_to_compat_v1(node, full_name, logs, \"The semantics for tf.string_split's sep parameter have changed when sep unspecified: it now splits on all whitespace, not just the space character.\")\n    return _string_split_rtype_transformer(parent, node, full_name, name, logs)",
    "docstring": "Update tf.string_split arguments: skip_empty, sep, result_type, source.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\tf_upgrade_v2.py",
    "ast_data": "FunctionDef name:_string_split_transformer arg:parent arg:node arg:full_name arg:name arg:logs arguments arg arg arg arg arg For Call If Compare If Call Call Call Return return:yes Call Assign For Call If Compare Assign If Call If Compare Assign Call Call Return return:yes Call If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_ObjectIdentityWrapper",
    "source_code": "class _ObjectIdentityWrapper(object):\n    __slots__ = ['_wrapped', '__weakref__']\n\n    def __init__(self, wrapped):\n        self._wrapped = wrapped\n\n    @property\n    def unwrapped(self):\n        return self._wrapped\n\n    def _assert_type(self, other):\n        if not isinstance(other, _ObjectIdentityWrapper):\n            raise TypeError('Cannot compare wrapped object with unwrapped object')\n\n    def __lt__(self, other):\n        self._assert_type(other)\n        return id(self._wrapped) < id(other._wrapped)\n\n    def __gt__(self, other):\n        self._assert_type(other)\n        return id(self._wrapped) > id(other._wrapped)\n\n    def __eq__(self, other):\n        if other is None:\n            return False\n        self._assert_type(other)\n        return self._wrapped is other._wrapped\n\n    def __ne__(self, other):\n        return not self.__eq__(other)\n\n    def __hash__(self):\n        return id(self._wrapped)\n\n    def __repr__(self):\n        return '<{} wrapping {!r}>'.format(type(self).__name__, self._wrapped)",
    "docstring": "Wraps an object, mapping __eq__ on wrapper to \"is\" on wrapped. Since __eq__ is based on object identity, it's safe to also define __hash__ based on object ids. This lets us add unhashable types like trackable _ListWrapper objects to object-identity collections.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\object_identity.py",
    "ast_data": "ClassDef name:_ObjectIdentityWrapper Assign FunctionDef name:__init__ arg:self arg:wrapped arguments arg arg Assign FunctionDef name:unwrapped arg:self arguments arg Return return:yes FunctionDef name:_assert_type arg:self arg:other arguments arg arg If Call Raise Call FunctionDef name:__lt__ arg:self arg:other arguments arg arg Call Return return:yes Compare Call Call FunctionDef name:__gt__ arg:self arg:other arguments arg arg Call Return return:yes Compare Call Call FunctionDef name:__eq__ arg:self arg:other arguments arg arg If Compare Return return:yes Call Return return:yes Compare FunctionDef name:__ne__ arg:self arg:other arguments arg arg Return return:yes Call FunctionDef name:__hash__ arg:self arguments arg Return return:yes Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "seaborn",
    "name": "dim_ratios",
    "source_code": "def dim_ratios(self, colors, dendrogram_ratio, colors_ratio):\n    ratios = [dendrogram_ratio]\n    if colors is not None:\n        if np.ndim(colors) > 2:\n            n_colors = len(colors)\n        else:\n            n_colors = 1\n        ratios += [n_colors * colors_ratio]\n    ratios.append(1 - sum(ratios))\n    return ratios",
    "docstring": "Get the proportions of the figure taken up by each axes.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\matrix.py",
    "ast_data": "FunctionDef name:dim_ratios arg:self arg:colors arg:dendrogram_ratio arg:colors_ratio arguments arg arg arg arg Assign If Compare If Compare Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_parse_args",
    "source_code": "def _parse_args(*args, caller_name='function'):\n    X = Y = C = None\n    nargs = len(args)\n    if nargs == 2:\n        U, V = np.atleast_1d(*args)\n    elif nargs == 3:\n        U, V, C = np.atleast_1d(*args)\n    elif nargs == 4:\n        X, Y, U, V = np.atleast_1d(*args)\n    elif nargs == 5:\n        X, Y, U, V, C = np.atleast_1d(*args)\n    else:\n        raise _api.nargs_error(caller_name, takes='from 2 to 5', given=nargs)\n    nr, nc = (1, U.shape[0]) if U.ndim == 1 else U.shape\n    if X is not None:\n        X = X.ravel()\n        Y = Y.ravel()\n        if len(X) == nc and len(Y) == nr:\n            X, Y = (a.ravel() for a in np.meshgrid(X, Y))\n        elif len(X) != len(Y):\n            raise ValueError(f'X and Y must be the same size, but X.size is {X.size} and Y.size is {Y.size}.')\n    else:\n        indexgrid = np.meshgrid(np.arange(nc), np.arange(nr))\n        X, Y = (np.ravel(a) for a in indexgrid)\n    return (X, Y, U, V, C)",
    "docstring": "Helper function to parse positional parameters for colored vector plots. This is currently used for Quiver and Barbs. Parameters ---------- *args : list list of 2-5 arguments. Depending on their number they are parsed to:: U, V U, V, C X, Y, U, V X, Y, U, V, C caller_name : str Name of the calling method (used in error messages).",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\quiver.py",
    "ast_data": "FunctionDef name:_parse_args arguments arg arg Assign Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call Raise Call Assign Compare If Compare Assign Call Assign Call If BoolOp Compare Call Compare Call Assign Call Call If Compare Call Call Raise Call Assign Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "format_dateaxis",
    "source_code": "def format_dateaxis(subplot, freq: BaseOffset, index: DatetimeIndex | PeriodIndex) -> None:\n    import matplotlib.pyplot as plt\n    if isinstance(index, ABCPeriodIndex):\n        majlocator = TimeSeries_DateLocator(freq, dynamic_mode=True, minor_locator=False, plot_obj=subplot)\n        minlocator = TimeSeries_DateLocator(freq, dynamic_mode=True, minor_locator=True, plot_obj=subplot)\n        subplot.xaxis.set_major_locator(majlocator)\n        subplot.xaxis.set_minor_locator(minlocator)\n        majformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True, minor_locator=False, plot_obj=subplot)\n        minformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True, minor_locator=True, plot_obj=subplot)\n        subplot.xaxis.set_major_formatter(majformatter)\n        subplot.xaxis.set_minor_formatter(minformatter)\n        subplot.format_coord = functools.partial(_format_coord, freq)\n    elif isinstance(index, ABCTimedeltaIndex):\n        subplot.xaxis.set_major_formatter(TimeSeries_TimedeltaFormatter())\n    else:\n        raise TypeError('index type not supported')\n    plt.draw_if_interactive()",
    "docstring": "Pretty-formats the date axis (x-axis). Major and minor ticks are automatically set for the frequency of the current underlying series. As the dynamic mode is activated by default, changing the limits of the x axis will intelligently change the positions of the ticks.",
    "type": "function",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\timeseries.py",
    "ast_data": "FunctionDef name:format_dateaxis arg:subplot arg:freq arg:index arguments arg arg arg If Call Assign Call Assign Call Call Call Assign Call Assign Call Call Call Assign Call If Call Call Call Raise Call Call"
  },
  {
    "library": "pandas",
    "name": "maybe_mangle_lambdas",
    "source_code": "def maybe_mangle_lambdas(agg_spec: Any) -> Any:\n    is_dict = is_dict_like(agg_spec)\n    if not (is_dict or is_list_like(agg_spec)):\n        return agg_spec\n    mangled_aggspec = type(agg_spec)()\n    if is_dict:\n        for key, aggfuncs in agg_spec.items():\n            if is_list_like(aggfuncs) and (not is_dict_like(aggfuncs)):\n                mangled_aggfuncs = _managle_lambda_list(aggfuncs)\n            else:\n                mangled_aggfuncs = aggfuncs\n            mangled_aggspec[key] = mangled_aggfuncs\n    else:\n        mangled_aggspec = _managle_lambda_list(agg_spec)\n    return mangled_aggspec",
    "docstring": "Make new lambdas with unique names. Parameters ---------- agg_spec : Any An argument to GroupBy.agg. Non-dict-like are pass through as is. For dict-like a new spec is returned with name-mangled lambdas. Returns ------- mangled : Any Same type as the input. Examples -------- >>> maybe_mangle_lambdas(\"sum\") 'sum' >>> maybe_mangle_lambdas([lambda: 1, lambda: 2]) # doctest: +SKIP [, .f(*args, **kwargs)>]",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\apply.py",
    "ast_data": "FunctionDef name:maybe_mangle_lambdas arg:agg_spec arguments arg Assign Call If BoolOp Call Return return:yes Assign Call Call If For Call If BoolOp Call Call Assign Call Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "disable_eager_execution",
    "source_code": "@tf_export(v1=['disable_eager_execution'])\ndef disable_eager_execution() -> None:\n    _api_usage_gauge.get_cell().set(False)\n    logging.vlog(1, 'Disabling eager execution')\n    context.default_execution_mode = context.GRAPH_MODE\n    c = context.context_safe()\n    if c is not None:\n        c._thread_local_data.is_eager = False",
    "docstring": "Disables eager execution. This function can only be called before any Graphs, Ops, or Tensors have been created. @compatibility(TF2) This function is not necessary if you are using TF2. Eager execution is enabled by default. If you want to use Graph mode please consider [tf.function]( @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:disable_eager_execution arguments Call Call Call Assign Assign Call If Compare Assign Call"
  },
  {
    "library": "scikit-learn",
    "name": "split",
    "source_code": "def split(self, X, y=None, groups=None):\n    return super().split(X, y, groups)",
    "docstring": "Generate indices to split data into training and test set. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. y : array-like of shape (n_samples,), default=None The target variable for supervised learning problems. groups : array-like of shape (n_samples,) Group labels for the samples used while splitting the dataset into train/test set. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py",
    "ast_data": "FunctionDef name:split arg:self arg:X arg:y arg:groups arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "deserialize",
    "source_code": "def deserialize(config, custom_objects=None):\n    populate_deserializable_objects()\n    return generic_utils.deserialize_keras_object(config, module_objects=LOCAL.ALL_OBJECTS, custom_objects=custom_objects, printable_module_name='layer')",
    "docstring": "Instantiates a layer from a config dictionary. Args: config: dict of the form {'class_name': str, 'config': dict} custom_objects: dict mapping class names (or function names) of custom (non-Keras) objects to class/functions Returns: Layer instance (may be Model, Sequential, Network, Layer...)",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\serialization.py",
    "ast_data": "FunctionDef name:deserialize arg:config arg:custom_objects arguments arg arg Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "remove_temp_dirpath",
    "source_code": "def remove_temp_dirpath(dirpath, strategy):\n    if strategy is None:\n        strategy = distribute_lib.get_strategy()\n    if strategy is None:\n        return\n    if strategy.extended._in_multi_worker_mode() and (not strategy.extended.should_checkpoint):\n        file_io.delete_recursively(_get_temp_dir(dirpath, strategy))",
    "docstring": "Removes the temp path after writing is finished. Args: dirpath: Original dirpath that would be used without distribution. strategy: The tf.distribute strategy object currently used.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_file_utils.py",
    "ast_data": "FunctionDef name:remove_temp_dirpath arg:dirpath arg:strategy arguments arg arg If Compare Assign Call If Compare Return return:no If BoolOp Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_target_type_str",
    "source_code": "def get_target_type_str(node: Node, gm: GraphModule) -> str:\n    target_type = ''\n    if node.op in ('call_function', 'call_method'):\n        target_type = torch.typename(node.target)\n    elif node.op == 'call_module':\n        assert isinstance(node.target, str)\n        target_mod = getattr_from_fqn(gm, node.target)\n        target_type = torch.typename(target_mod)\n    return target_type",
    "docstring": "Returns a string representation of the type of the function or module pointed to by this node, or '' for other node types.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\ns\\fx\\utils.py",
    "ast_data": "FunctionDef name:get_target_type_str arg:node arg:gm arguments arg arg Assign If Compare Assign Call If Compare Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "step",
    "source_code": "def step(self, closure: Optional[Callable[[], float]]=None) -> Optional[float]:\n    return self._optimizer.step(closure=closure)",
    "docstring": "Perform a single optimization step. This will call :meth: on the wrapped optimizer.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\optim\\named_optimizer.py",
    "ast_data": "FunctionDef name:step arg:self arg:closure arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "update",
    "source_code": "def update(self, data):\n    data = np.atleast_1d(np.array(data, dtype=object))\n    convertible = True\n    for val in OrderedDict.fromkeys(data):\n        _api.check_isinstance((str, bytes), value=val)\n        if convertible:\n            convertible = self._str_is_convertible(val)\n        if val not in self._mapping:\n            self._mapping[val] = next(self._counter)\n    if data.size and convertible:\n        _log.info('Using categorical units to plot a list of strings that are all parsable as floats or dates. If these strings should be plotted as numbers, cast to the appropriate data type before plotting.')",
    "docstring": "Map new values to integer identifiers. Parameters ---------- data : iterable of str or bytes Raises ------ TypeError If elements in *data* are neither str nor bytes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\category.py",
    "ast_data": "FunctionDef name:update arg:self arg:data arguments arg arg Assign Call Call Assign For Call Call If Assign Call If Compare Assign Call If BoolOp Call"
  },
  {
    "library": "pytorch",
    "name": "wrap_backend_debug",
    "source_code": "def wrap_backend_debug(unconfigured_compiler_fn, compiler_name: str):\n    return WrapBackendDebug(unconfigured_compiler_fn, compiler_name)",
    "docstring": "A minifier decorator that wraps the TorchDynamo produced Fx graph modules. As opposed to wrap_compiler_debug, this wrapper intercepts at the TorchDynamo produced Fx Graph Module. This makes it backend-agnostic to some level, e.g., it is useful for minifying issues related to Aot Autograd tracing. If an error is found, we minify and save the minified repro in repro.tar.gz.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\repro\\after_dynamo.py",
    "ast_data": "FunctionDef name:wrap_backend_debug arg:unconfigured_compiler_fn arg:compiler_name arguments arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_check_params_groups_deprecation",
    "source_code": "def _check_params_groups_deprecation(fit_params, params, groups, version):\n    if params is not None and fit_params is not None:\n        raise ValueError(f'`params` and `fit_params` cannot both be provided. Pass parameters via `params`. `fit_params` is deprecated and will be removed in version {version}.')\n    elif fit_params is not None:\n        warnings.warn('`fit_params` is deprecated and will be removed in version {version}. Pass parameters via `params` instead.', FutureWarning)\n        params = fit_params\n    params = {} if params is None else params\n    _check_groups_routing_disabled(groups)\n    return params",
    "docstring": "A helper function to check deprecations on and . # TODO(SLEP6): To be removed when set_config(enable_metadata_routing=False) is not # possible.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_validation.py",
    "ast_data": "FunctionDef name:_check_params_groups_deprecation arg:fit_params arg:params arg:groups arg:version arguments arg arg arg arg If BoolOp Compare Compare Raise Call If Compare Call Assign Assign Compare Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "convert_missing_indexer",
    "source_code": "def convert_missing_indexer(indexer):\n    if isinstance(indexer, dict):\n        indexer = indexer['key']\n        if isinstance(indexer, bool):\n            raise KeyError('cannot use a single bool to index into setitem')\n        return (indexer, True)\n    return (indexer, False)",
    "docstring": "Reverse convert a missing indexer, which is a dict return the scalar indexer and a boolean indicating if we converted",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\indexing.py",
    "ast_data": "FunctionDef name:convert_missing_indexer arg:indexer arguments arg If Call Assign If Call Raise Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "record",
    "source_code": "def record(self) -> None:\n    torch._C._mps_recordEvent(self.__eventId)",
    "docstring": "Records the event in the default stream.",
    "type": "method",
    "file_path": "pytorch\\torch\\mps\\event.py",
    "ast_data": "FunctionDef name:record arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "HardwareFeature",
    "source_code": "@tf_export('tpu.experimental.HardwareFeature')\nclass HardwareFeature(object):\n\n    def __init__(self, tpu_hardware_feature_proto):\n        self.tpu_hardware_feature_proto = tpu_hardware_feature_proto\n\n    class EmbeddingFeature(enum.Enum):\n        UNSUPPORTED = 'UNSUPPORTED'\n        V1 = 'V1'\n        V2 = 'V2'\n\n    @classmethod\n    def _embedding_feature_proto_to_string(cls, embedding_feature_proto):\n        embedding_feature_proto_to_string_map = {topology_pb2.TPUHardwareFeature.EmbeddingFeature.UNSUPPORTED: HardwareFeature.EmbeddingFeature.UNSUPPORTED, topology_pb2.TPUHardwareFeature.EmbeddingFeature.V1: HardwareFeature.EmbeddingFeature.V1, topology_pb2.TPUHardwareFeature.EmbeddingFeature.V2: HardwareFeature.EmbeddingFeature.V2}\n        return embedding_feature_proto_to_string_map.get(embedding_feature_proto, HardwareFeature.EmbeddingFeature.UNSUPPORTED)\n\n    @property\n    def embedding_feature(self):\n        return HardwareFeature._embedding_feature_proto_to_string(self.tpu_hardware_feature_proto.embedding_feature)\n\n    @property\n    def num_embedding_devices_per_chip(self):\n        return self.tpu_hardware_feature_proto.num_embedding_devices_per_chip",
    "docstring": "class holds all the feature info about the TPU.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_hardware_feature.py",
    "ast_data": "ClassDef name:HardwareFeature FunctionDef name:__init__ arg:self arg:tpu_hardware_feature_proto arguments arg arg Assign ClassDef name:EmbeddingFeature Assign Assign Assign FunctionDef name:_embedding_feature_proto_to_string arg:cls arg:embedding_feature_proto arguments arg arg Assign Return return:yes Call FunctionDef name:embedding_feature arg:self arguments arg Return return:yes Call FunctionDef name:num_embedding_devices_per_chip arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "right_shift",
    "source_code": "def right_shift(a, n):\n    m = getmask(a)\n    if m is nomask:\n        d = umath.right_shift(filled(a), n)\n        return masked_array(d)\n    else:\n        d = umath.right_shift(filled(a, 0), n)\n        return masked_array(d, mask=m)",
    "docstring": "Shift the bits of an integer to the right. This is the masked array version of , for details see that function. See Also -------- numpy.right_shift Examples -------- >>> import numpy as np >>> import numpy.ma as ma >>> x = [11, 3, 8, 1] >>> mask = [0, 0, 0, 1] >>> masked_x = ma.masked_array(x, mask) >>> masked_x masked_array(data=[11, 3, 8, --], mask=[False, False, False, True], fill_value=999999) >>> ma.right_shift(masked_x,1) masked_array(data=[5, 1, 4, --], mask=[False, False, False, True], fill_value=999999)",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:right_shift arg:a arg:n arguments arg arg Assign Call If Compare Assign Call Call Return return:yes Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "resolve_bytes",
    "source_code": "def resolve_bytes(self, read_item: ReadItem) -> io.BytesIO:\n    raise NotImplementedError('LoadPlanner.resolve_bytes is not implemented')",
    "docstring": "Return the BytesIO to be used by the StorageReader to load . The BytesIO should alias with one on the underlying state_dict as StorageReader will replace its contents.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\planner.py",
    "ast_data": "FunctionDef name:resolve_bytes arg:self arg:read_item arguments arg arg Raise Call"
  },
  {
    "library": "django",
    "name": "get_context_data",
    "source_code": "def get_context_data(self, **kwargs):\n    return {'obj': kwargs.get('item'), 'site': kwargs.get('site')}",
    "docstring": "Return a dictionary to use as extra context if either `` are used. Default implementation preserves the old behavior of using {'obj': item, 'site': current_site} as the context.",
    "type": "method",
    "file_path": "django\\django\\contrib\\syndication\\views.py",
    "ast_data": "FunctionDef name:get_context_data arg:self arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "expanding",
    "source_code": "@final\ndef expanding(self, min_periods: int=1, method: str='single') -> ExpandingGroupby:\n    from pandas.core.window import ExpandingGroupby\n    return ExpandingGroupby(self._selected_obj, min_periods=min_periods, method=method, _grouper=self._grouper)",
    "docstring": "Return an expanding grouper, providing expanding functionality per group. Parameters ---------- min_periods : int, default 1 Minimum number of observations in window required to have a value; otherwise, result is `` in the method call. Returns ------- pandas.api.typing.ExpandingGroupby An object that supports expanding transformations over each group. See Also -------- Series.expanding : Expanding transformations for Series. DataFrame.expanding : Expanding transformations for DataFrames. Series.groupby : Apply a function groupby to a Series. DataFrame.groupby : Apply a function groupby. Examples -------- >>> df = pd.DataFrame( ... { ... \"Class\": [\"A\", \"A\", \"A\", \"B\", \"B\", \"B\"], ... \"Value\": [10, 20, 30, 40, 50, 60], ... } ... ) >>> df Class Value 0 A 10 1 A 20 2 A 30 3 B 40 4 B 50 5 B 60 >>> df.groupby(\"Class\").expanding().mean() Value Class A 0 10.0 1 15.0 2 20.0 B 3 40.0 4 45.0 5 50.0",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\groupby\\groupby.py",
    "ast_data": "FunctionDef name:expanding arg:self arg:min_periods arg:method arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "embedding_inference_rule",
    "source_code": "@register_inference_rule(torch.nn.modules.sparse.Embedding)\ndef embedding_inference_rule(n: Node, module_instance, symbols, constraints, counter):\n    assert isinstance(n.args[0], Node)\n    return gen_embedding_rules(n, symbols, module_instance.embedding_dim, counter)",
    "docstring": "The output shape differs from the input shape in the last dimension",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_generator.py",
    "ast_data": "FunctionDef name:embedding_inference_rule arg:n arg:module_instance arg:symbols arg:constraints arg:counter arguments arg arg arg arg arg Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "var",
    "source_code": "def var(self, mu=0, lmbda=1, a=1, b=1):\n    invalid, args = self._process_shapes(mu, lmbda, a, b)\n    mu, lmbda, a, b = args\n    invalid_x = invalid | ~(a > 1)\n    invalid_s2 = invalid | ~(a > 2)\n    var_x = b / ((a - 1) * lmbda)\n    var_s2 = b ** 2 / ((a - 1) ** 2 * (a - 2))\n    var_x, var_s2 = (np.asarray(var_x), np.asarray(var_s2))\n    var_x[invalid_x] = np.nan\n    var_s2[invalid_s2] = np.nan\n    return (var_x[()], var_s2[()])",
    "docstring": "The variance of the distribution. Parameters ---------- mu, lmbda, a, b : array_like, optional Shape parameters. and must be greater than zero, and must be greater than two. Returns ------- x, s2 : ndarray The variance of the distribution.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:var arg:self arg:mu arg:lmbda arg:a arg:b arguments arg arg arg arg arg Assign Call Assign Assign Compare Assign Compare Assign Assign Assign Call Call Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "maybe_enable_thunkify",
    "source_code": "@contextmanager\ndef maybe_enable_thunkify() -> Generator[None, None, None]:\n    proxy_mode = get_proxy_mode()\n    if proxy_mode is not None:\n        with _enable_thunkify(proxy_mode.tracer):\n            yield\n    else:\n        yield",
    "docstring": "Within this context manager, if you are doing make_fx tracing, we will thunkify all SymNode compute and avoid tracing it into the graph unless it is actually needed. You should prefer to avoid using this as much as possible, as lazy evaluation of SymNode tracing can lead to long chains of thunks which will stack overflow if you evaluate them. However, this is currently sometimes necessary as there are buggy parts of PT2 which will fail with \"s0 is not tracked with proxy\" error due to insufficient tracing of SymNode computation.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\proxy_tensor.py",
    "ast_data": "FunctionDef name:maybe_enable_thunkify arguments Assign Call If Compare With Call"
  },
  {
    "library": "pytorch",
    "name": "generate_detector_report",
    "source_code": "def generate_detector_report(self, model: nn.Module) -> tuple[str, dict[str, Any]]:\n    per_channel_info = self._detect_per_channel_helper(model)\n    further_optims_str = f'Further Optimizations for backend {self.backend_chosen}: \\n'\n    optimizations_possible = False\n    for fqn in per_channel_info:\n        fqn_dict = per_channel_info[fqn]\n        if fqn_dict[self.PER_CHAN_SUPPORTED_KEY] and (not fqn_dict[self.PER_CHAN_USED_KEY]):\n            optimizations_possible = True\n            further_optims_str += f'Module {fqn} can be configured to use per_channel quantization.\\n'\n    if optimizations_possible:\n        further_optims_str += 'To use per_channel quantization, make sure the qconfig has a per_channel weight observer.'\n    else:\n        further_optims_str += 'No further per_channel optimizations possible.'\n    return (further_optims_str, per_channel_info)",
    "docstring": "Checks if any Linear or Conv layers in the model utilize per_channel quantization. Only Linear and Conv layers can use per_channel as of now so only these two are currently checked. Looks at q_config format and backend to determine if per_channel can be utilized. Uses the DEFAULT_BACKEND_PER_CHANNEL_SUPPORTED_MODULES structure to determine support Args: model: The prepared and calibrated model we want to check if using per_channel Returns a tuple with two elements: String report of potential actions to improve model (if per_channel quantization is available in backend) Dictionary mapping per_channel quantizable elements to: whether per_channel quantization is supported by the backend if it is being utilized in the current model",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\detector.py",
    "ast_data": "FunctionDef name:generate_detector_report arg:self arg:model arguments arg arg Assign Call Assign Assign For Assign If BoolOp Assign If Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_funm_multiply_krylov_arnoldi",
    "source_code": "def _funm_multiply_krylov_arnoldi(A, b, bnorm, V, H, m):\n    dotprod = np.vdot if np.iscomplexobj(b) else np.dot\n    norm_tol = np.finfo(b.dtype.char).eps ** 2\n    V[:, 0] = b / bnorm\n    for k in range(0, m):\n        V[:, k + 1] = A.dot(V[:, k])\n        for i in range(0, k + 1):\n            H[i, k] = dotprod(V[:, i], V[:, k + 1])\n            V[:, k + 1] = V[:, k + 1] - H[i, k] * V[:, i]\n        H[k + 1, k] = norm(V[:, k + 1])\n        if H[k + 1, k] < norm_tol:\n            return (True, k)\n        V[:, k + 1] = V[:, k + 1] / H[k + 1, k]\n    return (False, m)",
    "docstring": "The Arnoldi iteration for constructing the basis V and the projection H = V * A V for the Krylov subspace Km(A, b) of order m. Parameters ---------- A : transposable linear operator The operator whose matrix function is of interest. b : ndarray The vector b to multiply the f(A) with. V : ndarray The n x (m + 1) matrix whose columns determines the basis for Krylov subspace Km(A, b). H : ndarray A (m + 1) x m upper Hessenberg matrix representing the projection of A onto Km(A, b). m : int The order of the Krylov subspace. Returns ------- breakdown : bool Indicate if the Arnoldi broke down or not iter : int Returns the last valid iteration.",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_funm_multiply_krylov.py",
    "ast_data": "FunctionDef name:_funm_multiply_krylov_arnoldi arg:A arg:b arg:bnorm arg:V arg:H arg:m arguments arg arg arg arg arg arg Assign Call Assign Call Assign For Call Assign Call For Call Assign Call Assign Assign Call If Compare Return return:yes Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "_apply_rel_filters",
    "source_code": "def _apply_rel_filters(self, queryset):\n    queryset._add_hints(instance=self.instance)\n    if self._db:\n        queryset = queryset.using(self._db)\n    queryset._defer_next_filter = True\n    return queryset._next_is_sticky().filter(**self.core_filters)",
    "docstring": "Filter the queryset for the instance this manager is bound to.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\related_descriptors.py",
    "ast_data": "FunctionDef name:_apply_rel_filters arg:self arg:queryset arguments arg arg Call If Assign Call Assign Return return:yes Call Call"
  },
  {
    "library": "authlib",
    "name": "create_authorization_url",
    "source_code": "def create_authorization_url(self, url, state=None, code_verifier=None, **kwargs):\n    if state is None:\n        state = generate_token()\n    response_type = self.metadata.get('response_type', 'code')\n    response_type = kwargs.pop('response_type', response_type)\n    if 'redirect_uri' not in kwargs:\n        kwargs['redirect_uri'] = self.redirect_uri\n    if 'scope' not in kwargs:\n        kwargs['scope'] = self.scope\n    if code_verifier and response_type == 'code' and (self.code_challenge_method == 'S256'):\n        kwargs['code_challenge'] = create_s256_code_challenge(code_verifier)\n        kwargs['code_challenge_method'] = self.code_challenge_method\n    for k in self.EXTRA_AUTHORIZE_PARAMS:\n        if k not in kwargs and k in self.metadata:\n            kwargs[k] = self.metadata[k]\n    uri = prepare_grant_uri(url, client_id=self.client_id, response_type=response_type, state=state, **kwargs)\n    return (uri, state)",
    "docstring": "Generate an authorization URL and state. :param url: Authorization endpoint url, must be HTTPS. :param state: An optional state string for CSRF protection. If not given it will be generated for you. :param code_verifier: An optional code_verifier for code challenge. :param kwargs: Extra parameters to include. :return: authorization_url, state",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\client.py",
    "ast_data": "FunctionDef name:create_authorization_url arg:self arg:url arg:state arg:code_verifier arguments arg arg arg arg arg If Compare Assign Call Assign Call Assign Call If Compare Assign If Compare Assign If BoolOp Compare Compare Assign Call Assign For If BoolOp Compare Compare Assign Assign Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "check_username_and_password",
    "source_code": "def check_username_and_password(self, username, password):\n    pass",
    "docstring": "Assert the login credentials. :param username: A user name sent from the login form. :type username: str :param password: A pass word sent from the login form. :type password: str :returns: A non-empty error string if the authentication fails. :rtype: str",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\cptools.py",
    "ast_data": "FunctionDef name:check_username_and_password arg:self arg:username arg:password arguments arg arg arg"
  },
  {
    "library": "tensorflow",
    "name": "dispatch",
    "source_code": "def dispatch(self, request: function_type.FunctionType) -> Optional[function_type.FunctionType]:\n    if request in self._dispatch_table:\n        return request\n    if request in self._dispatch_cache:\n        result = self._dispatch_cache.pop(request)\n        self._dispatch_cache[request] = result\n        return result\n    most_specific_supertype = None\n    for other in self._dispatch_table:\n        if request.is_supertype_of(other):\n            if most_specific_supertype is None or other.is_supertype_of(most_specific_supertype):\n                most_specific_supertype = other\n    self._cache_dispatch(request, most_specific_supertype)\n    return most_specific_supertype",
    "docstring": "Returns the most specific supertype target if it exists in the table.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\type_dispatch.py",
    "ast_data": "FunctionDef name:dispatch arg:self arg:request arguments arg arg If Compare Return return:yes If Compare Assign Call Assign Return return:yes Assign For If Call If BoolOp Compare Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "run_restore_ops",
    "source_code": "def run_restore_ops(self, session=None):\n    raise AssertionError('No checkpoint specified, so no restore ops are available (save_path=None to Saver.restore).')",
    "docstring": "For consistency with . Use for initializing if no checkpoint was passed to and restoring otherwise. Args: session: Not used.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:run_restore_ops arg:self arg:session arguments arg arg Raise Call"
  },
  {
    "library": "django",
    "name": "_check_list_select_related",
    "source_code": "def _check_list_select_related(self, obj):\n    if not isinstance(obj.list_select_related, (bool, list, tuple)):\n        return must_be('a boolean, tuple or list', option='list_select_related', obj=obj, id='admin.E117')\n    else:\n        return []",
    "docstring": "Check that list_select_related is a boolean, a list or a tuple.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\checks.py",
    "ast_data": "FunctionDef name:_check_list_select_related arg:self arg:obj arguments arg arg If Call Return return:yes Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "benchmark_map_fusion",
    "source_code": "def benchmark_map_fusion(self):\n    chain_lengths = [0, 1, 2, 5, 10, 20, 50]\n    for chain_length in chain_lengths:\n        self._benchmark_map_fusion(chain_length=chain_length, optimize_dataset=False)\n        self._benchmark_map_fusion(chain_length=chain_length, optimize_dataset=True)",
    "docstring": "Evaluates performance map of fusion.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\benchmarks\\optimize_benchmark.py",
    "ast_data": "FunctionDef name:benchmark_map_fusion arg:self arguments arg Assign For Call Call"
  },
  {
    "library": "numpy",
    "name": "convolve",
    "source_code": "def convolve(a, v, mode='full', propagate_mask=True):\n    return _convolve_or_correlate(np.convolve, a, v, mode, propagate_mask)",
    "docstring": "Returns the discrete, linear convolution of two one-dimensional sequences. Parameters ---------- a, v : array_like Input sequences. mode : {'valid', 'same', 'full'}, optional Refer to the docstring. propagate_mask : bool If True, then if any masked element is included in the sum for a result element, then the result is masked. If False, then the result element is only masked if no non-masked cells contribute towards it Returns ------- out : MaskedArray Discrete, linear convolution of and . See Also -------- numpy.convolve : Equivalent function in the top-level NumPy module.",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:convolve arg:a arg:v arg:mode arg:propagate_mask arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "is_namedtuple",
    "source_code": "def is_namedtuple(obj: Union[object, type]) -> bool:\n    cls = obj if isinstance(obj, type) else type(obj)\n    return is_namedtuple_class(cls)",
    "docstring": "Return whether the object is an instance of namedtuple or a subclass of namedtuple.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_pytree.py",
    "ast_data": "FunctionDef name:is_namedtuple arg:obj arguments arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_transform_one",
    "source_code": "def _transform_one(transformer, X, y, weight, params):\n    res = transformer.transform(X, **params.transform)\n    if weight is None:\n        return res\n    return res * weight",
    "docstring": "Call transform and apply weight to output. Parameters ---------- transformer : estimator Estimator to be used for transformation. X : {array-like, sparse matrix} of shape (n_samples, n_features) Input data to be transformed. y : ndarray of shape (n_samples,) Ignored. weight : float Weight to be applied to the output of the transformation. params : dict Parameters to be passed to the transformer's ``.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\pipeline.py",
    "ast_data": "FunctionDef name:_transform_one arg:transformer arg:X arg:y arg:weight arg:params arguments arg arg arg arg arg Assign Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_find_user_frame",
    "source_code": "def _find_user_frame(self):\n    frame = inspect.currentframe()\n    pt_files = ['torch/fx/proxy.py', 'torch/fx/_symbolic_trace.py', 'torch/fx/experimental/proxy_tensor.py', 'torch/_ops.py', 'torch/_tensor.py', 'torch/utils/_python_dispatch.py', 'torch/_prims_common/wrappers.py', 'torch/_refs/__init__.py', 'torch/_refs/nn/functional/__init__.py', 'torch/utils/_stats.py']\n    while frame:\n        frame = frame.f_back\n        if frame and all((not frame.f_code.co_filename.endswith(file) for file in pt_files)):\n            break\n    if not frame:\n        return None\n    return frame",
    "docstring": "Find the Python stack frame executing the user code during symbolic tracing.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\proxy.py",
    "ast_data": "FunctionDef name:_find_user_frame arg:self arguments arg Assign Call Assign While Assign If BoolOp Call Call If Return return:no Return return:yes"
  },
  {
    "library": "scipy",
    "name": "Target",
    "source_code": "class Target:\n\n    def __init__(self, start, end):\n        self.start = int(start)\n        self.end = int(end)\n        self.targets = []",
    "docstring": "Represents a single line read for a .ninja_log file. Start and end times are milliseconds.",
    "type": "class",
    "file_path": "scipy\\tools\\ninjatracing.py",
    "ast_data": "ClassDef name:Target FunctionDef name:__init__ arg:self arg:start arg:end arguments arg arg arg Assign Call Assign Call Assign"
  },
  {
    "library": "kornia",
    "name": "denormalize_laf",
    "source_code": "def denormalize_laf(LAF: Tensor, images: Tensor) -> Tensor:\n    KORNIA_CHECK_LAF(LAF)\n    _, _, h, w = images.size()\n    wf = float(w - 1)\n    hf = float(h - 1)\n    min_size = min(hf, wf)\n    coef = torch.ones(1, 1, 2, 3, dtype=LAF.dtype, device=LAF.device) * min_size\n    coef[0, 0, 0, 2] = wf\n    coef[0, 0, 1, 2] = hf\n    return coef.expand_as(LAF) * LAF",
    "docstring": "De-normalize LAFs from scale to image scale. The convention is that center of 5-pixel image (coordinates from 0 to 4) is 2, and not 2.5. B,N,H,W = images.size() MIN_SIZE = min(H - 1, W -1) [a11 a21 x] [a21 a22 y] becomes [a11*MIN_SIZE a21*MIN_SIZE x*(W-1)] [a21*MIN_SIZE a22*MIN_SIZE y*(W-1)] Args: LAF: :math: images: :math: Returns: the denormalized LAF: :math:, scale in pixels",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\laf.py",
    "ast_data": "FunctionDef name:denormalize_laf arg:LAF arg:images arguments arg arg Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "related_query_name",
    "source_code": "def related_query_name(self):\n    return self.remote_field.related_query_name or self.remote_field.related_name or self.opts.model_name",
    "docstring": "Define the name that can be used to identify this related object in a table-spanning query.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\related.py",
    "ast_data": "FunctionDef name:related_query_name arg:self arguments arg Return return:yes BoolOp"
  },
  {
    "library": "tensorflow",
    "name": "_get_return_value",
    "source_code": "def _get_return_value(self, tensors, indices):\n    tensors = self._create_device_transfers(tensors)\n    for output, i in zip(tensors, indices):\n        output.set_shape(self._shapes[i])\n    if self._names:\n        return {self._names[i]: t for t, i in zip(tensors, indices)}\n    return tensors",
    "docstring": "Return the value to return from a get op. If the staging area has names, return a dictionary with the names as keys. Otherwise return either a single tensor or a list of tensors depending on the length of . Args: tensors: List of tensors from the get op. indices: Indices of associated names and shapes Returns: A single tensor, a list of tensors, or a dictionary of tensors.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:_get_return_value arg:self arg:tensors arg:indices arguments arg arg arg Assign Call For Call Call If Return return:yes Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_disabled",
    "source_code": "def _disabled(self, *args, **kwargs) -> NoReturn:\n    raise TypeError(f\"'{type(self).__name__}' does not support mutable operations.\")",
    "docstring": "This method will not function because object is immutable.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\frozen.py",
    "ast_data": "FunctionDef name:_disabled arg:self arguments arg arg arg Raise Call Call"
  },
  {
    "library": "matplotlib",
    "name": "SymmetricalLogScale",
    "source_code": "class SymmetricalLogScale(ScaleBase):\n    name = 'symlog'\n\n    def __init__(self, axis, *, base=10, linthresh=2, subs=None, linscale=1):\n        self._transform = SymmetricalLogTransform(base, linthresh, linscale)\n        self.subs = subs\n    base = property(lambda self: self._transform.base)\n    linthresh = property(lambda self: self._transform.linthresh)\n    linscale = property(lambda self: self._transform.linscale)\n\n    def set_default_locators_and_formatters(self, axis):\n        axis.set_major_locator(SymmetricalLogLocator(self.get_transform()))\n        axis.set_major_formatter(LogFormatterSciNotation(self.base))\n        axis.set_minor_locator(SymmetricalLogLocator(self.get_transform(), self.subs))\n        axis.set_minor_formatter(NullFormatter())\n\n    def get_transform(self):\n        return self._transform",
    "docstring": "The symmetrical logarithmic scale is logarithmic in both the positive and negative directions from the origin. Since the values close to zero tend toward infinity, there is a need to have a range around zero that is linear. The parameter *linthresh* allows the user to specify the size of this range (-*linthresh*, *linthresh*). See :doc: for a detailed description. Parameters ---------- base : float, default: 10 The base of the logarithm. linthresh : float, default: 2 Defines the range `` to be stretched relative to the logarithmic range. Its value is the number of decades to use for each half of the linear range. For example, when *linscale* == 1.0 (the default), the space used for the positive and negative halves of the linear range will be equal to one decade in the logarithmic range.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\scale.py",
    "ast_data": "ClassDef name:SymmetricalLogScale Assign FunctionDef name:__init__ arg:self arg:axis arguments arg arg arg arg arg arg Assign Call Assign Assign Call arguments arg Assign Call arguments arg Assign Call arguments arg FunctionDef name:set_default_locators_and_formatters arg:self arg:axis arguments arg arg Call Call Call Call Call Call Call Call Call Call FunctionDef name:get_transform arg:self arguments arg Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "serve",
    "source_code": "def serve(path=None, port=8080):\n    if profile is None or pstats is None:\n        msg = \"Your installation of Python does not have a profile module. If you're on Debian, try `sudo apt-get install python-profiler`. See http://www.cherrypy.org/wiki/ProfilingOnDebian for details.\"\n        warnings.warn(msg)\n    cherrypy.config.update({'server.socket_port': int(port), 'server.thread_pool': 10, 'environment': 'production'})\n    cherrypy.quickstart(Profiler(path))",
    "docstring": "Serve the web app with profiler activated.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\profiler.py",
    "ast_data": "FunctionDef name:serve arg:path arg:port arguments arg arg If BoolOp Compare Compare Assign Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "TFAPIChangeSpec",
    "source_code": "class TFAPIChangeSpec(ast_edits.APIChangeSpec):\n\n    def __init__(self):\n        self.function_keyword_renames = {}\n        self.symbol_renames = {}\n        self.change_to_function = {}\n        self.function_reorders = {}\n        self.function_warnings = {}\n        self.function_transformers = {}\n        self.module_deprecations = module_deprecations_v2.MODULE_DEPRECATIONS\n        for symbol, replacement in all_renames_v2.addons_symbol_mappings.items():\n            warning = (ast_edits.WARNING, '(Manual edit required) `{}` has been migrated to `{}` in TensorFlow Addons. The API spec may have changed during the migration. Please see https://github.com/tensorflow/addons for more info.'.format(symbol, replacement))\n            self.function_warnings[symbol] = warning\n        self.import_renames = {'tensorflow': ast_edits.ImportRename('tensorflow.compat.v1', excluded_prefixes=['tensorflow.contrib', 'tensorflow.flags', 'tensorflow.compat', 'tensorflow.compat.v1', 'tensorflow.compat.v2', 'tensorflow.google'])}\n        self.max_submodule_depth = 2",
    "docstring": "List of maps that describe what changed in the API.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\tf_upgrade_v2_safety.py",
    "ast_data": "ClassDef name:TFAPIChangeSpec FunctionDef name:__init__ arg:self arguments arg Assign Assign Assign Assign Assign Assign Assign For Call Assign Call Assign Assign Call Assign"
  },
  {
    "library": "pytorch",
    "name": "edge",
    "source_code": "def edge(a, b, tie_breaker=hash):\n    if supercedes(a, b):\n        if supercedes(b, a):\n            return tie_breaker(a) > tie_breaker(b)\n        else:\n            return True\n    return False",
    "docstring": "A should be checked before B Tie broken by tie_breaker, defaults to ``",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\unification\\match.py",
    "ast_data": "FunctionDef name:edge arg:a arg:b arg:tie_breaker arguments arg arg arg If Call If Call Return return:yes Compare Call Call Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "using",
    "source_code": "def using(self, alias):\n    clone = self._chain()\n    clone._db = alias\n    return clone",
    "docstring": "Select which database this QuerySet should execute against.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:using arg:self arg:alias arguments arg arg Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_push_writer",
    "source_code": "def _push_writer(self, writer, step):\n    if self.update_freq == 'epoch':\n        return\n    should_record = lambda: math_ops.equal(step % self.update_freq, 0)\n    summary_context = (writer.as_default(step.value()), summary_ops_v2.record_if(should_record))\n    self._prev_summary_state.append(summary_context)\n    summary_context[0].__enter__()\n    summary_context[1].__enter__()",
    "docstring": "Sets the default writer for custom batch-level summaries.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:_push_writer arg:self arg:writer arg:step arguments arg arg arg If Compare Return return:no Assign arguments Call Assign Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "run_benchmark",
    "source_code": "def run_benchmark(self, dataset, num_elements, iters=1, warmup=True, apply_default_optimizations=False, session_config=None):\n    options = options_lib.Options()\n    options.experimental_optimization.apply_default_optimizations = apply_default_optimizations\n    dataset = dataset.with_options(options)\n    dataset = dataset.skip(num_elements - 1)\n    if context.executing_eagerly():\n        median_duration = self._run_eager_benchmark(iterable=dataset, iters=iters, warmup=warmup)\n        return median_duration / float(num_elements)\n    iterator = dataset_ops.make_initializable_iterator(dataset)\n    next_element = iterator.get_next()\n    op = nest.flatten(next_element)[0].op\n    median_duration = self._run_graph_benchmark(iterable=op, iters=iters, warmup=warmup, session_config=session_config, initializer=iterator.initializer)\n    return median_duration / float(num_elements)",
    "docstring": "Benchmarks the dataset. Runs the dataset times. In each iteration, the benchmark measures the time it takes to go through elements of the dataset. Args: dataset: Dataset to benchmark. num_elements: Number of dataset elements to iterate through each benchmark iteration. iters: Number of times to repeat the timing. warmup: If true, warms up the session caches by running an untimed run. apply_default_optimizations: Determines whether default optimizations should be applied. session_config: A ConfigProto protocol buffer with configuration options for the session. Applicable only for benchmarking in graph mode. Returns: A float, representing the per-element wall time of the dataset in seconds. This is the median time (with respect to ) it takes for the dataset to go through elements, divided by",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\benchmarks\\benchmark_base.py",
    "ast_data": "FunctionDef name:run_benchmark arg:self arg:dataset arg:num_elements arg:iters arg:warmup arg:apply_default_optimizations arg:session_config arguments arg arg arg arg arg arg arg Assign Call Assign Assign Call Assign Call If Call Assign Call Return return:yes Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "_matrix_mask",
    "source_code": "def _matrix_mask(data, mask):\n    if mask is None:\n        mask = np.zeros(data.shape, bool)\n    if isinstance(mask, pd.DataFrame):\n        if not mask.index.equals(data.index) and mask.columns.equals(data.columns):\n            err = 'Mask must have the same index and columns as data.'\n            raise ValueError(err)\n    elif hasattr(mask, '__array__'):\n        mask = np.asarray(mask)\n        if mask.shape != data.shape:\n            raise ValueError('Mask must have the same shape as data.')\n        mask = pd.DataFrame(mask, index=data.index, columns=data.columns, dtype=bool)\n    mask = mask | pd.isnull(data)\n    return mask",
    "docstring": "Ensure that data and mask are compatible and add missing values. Values will be plotted for cells where `` can be an array or a DataFrame.",
    "type": "function",
    "file_path": "seaborn\\seaborn\\matrix.py",
    "ast_data": "FunctionDef name:_matrix_mask arg:data arg:mask arguments arg arg If Compare Assign Call If Call If BoolOp Call Call Assign Raise Call If Call Assign Call If Compare Raise Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_get_xdg_cache_dir",
    "source_code": "def _get_xdg_cache_dir():\n    cache_dir = os.environ.get('XDG_CACHE_HOME')\n    if not cache_dir:\n        cache_dir = os.path.expanduser('~/.cache')\n        if cache_dir.startswith('~/'):\n            return None\n    return Path(cache_dir, 'matplotlib')",
    "docstring": "Return the XDG cache directory. See",
    "type": "function",
    "file_path": "matplotlib\\tools\\cache_zenodo_svg.py",
    "ast_data": "FunctionDef name:_get_xdg_cache_dir arguments Assign Call If Assign Call If Call Return return:no Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "estimators_samples_",
    "source_code": "@property\ndef estimators_samples_(self):\n    return [sample_indices for _, sample_indices in self._get_estimators_indices()]",
    "docstring": "The subset of drawn samples for each base estimator. Returns a dynamically generated list of indices identifying the samples used for fitting each member of the ensemble, i.e., the in-bag samples. Note: the list is re-created at each call to the property in order to reduce the object memory footprint by not storing the sampling data. Thus fetching the property may be slower than expected.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_bagging.py",
    "ast_data": "FunctionDef name:estimators_samples_ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_reset_lazy_init",
    "source_code": "def _reset_lazy_init(self) -> None:\n    self._is_root: Optional[bool] = None",
    "docstring": "Reset instance so :func: will run on the next forward.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\fully_sharded_data_parallel.py",
    "ast_data": "FunctionDef name:_reset_lazy_init arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "compute_self_time",
    "source_code": "def compute_self_time(self):\n    assert self.profile.kineto_results is not None\n    stack = deque(self.profile.kineto_results.experimental_event_tree())\n    while stack:\n        curr_event = stack.pop()\n        self_time = curr_event.duration_time_ns\n        for child_event in curr_event.children:\n            self_time -= child_event.duration_time_ns\n            stack.append(child_event)\n        assert EventKey(curr_event) not in self.metrics, f'Duplicate id: {curr_event.id}, {curr_event.name}'\n        self.metrics[EventKey(curr_event)] = EventMetrics(self_time_ns=self_time)\n        self.metrics[EventKey(curr_event)].duration_time_ns = curr_event.duration_time_ns",
    "docstring": "Computes event's self time(total time - time in child ops).",
    "type": "method",
    "file_path": "pytorch\\torch\\profiler\\_utils.py",
    "ast_data": "FunctionDef name:compute_self_time arg:self arguments arg Compare Assign Call Call While Assign Call Assign For Call Compare Call Assign Call Call Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "get_tool",
    "source_code": "def get_tool(self, name, warn=True):\n    if isinstance(name, backend_tools.ToolBase) and name.name in self._tools:\n        return name\n    if name not in self._tools:\n        if warn:\n            _api.warn_external(f'ToolManager does not control tool {name!r}')\n        return None\n    return self._tools[name]",
    "docstring": "Return the tool object with the given name. For convenience, this passes tool objects through. Parameters ---------- name : str or Name of the tool, or the tool itself. warn : bool, default: True Whether a warning should be emitted it no tool with the given name exists. Returns ------- or None The tool or None if no tool with the given name exists.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_managers.py",
    "ast_data": "FunctionDef name:get_tool arg:self arg:name arg:warn arguments arg arg arg If BoolOp Call Compare Return return:yes If Compare If Call Return return:no Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__setitem__",
    "source_code": "def __setitem__(self, key, value):\n    self._check_self_external_modification()\n    self._maybe_initialize_trackable()\n    no_dep = isinstance(value, NoDependency)\n    if isinstance(key, str):\n        value = self._track_value(value, name=key)\n    else:\n        value = wrap_or_unwrap(value)\n        if not no_dep and isinstance(value, base.Trackable):\n            self._self_non_string_key = True\n    self.__wrapped__[key] = value\n    self._update_snapshot()",
    "docstring": "Allow any modifications, but possibly mark the wrapper as unsaveable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\data_structures.py",
    "ast_data": "FunctionDef name:__setitem__ arg:self arg:key arg:value arguments arg arg arg Call Call Assign Call If Call Assign Call Assign Call If BoolOp Call Assign Assign Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=False)\ndef fit(self, X, y, **fit_params):\n    _raise_for_params(fit_params, self, 'fit', allow=['sample_weight'])\n    y_type = type_of_target(y, input_name='y')\n    if y_type in ('unknown', 'continuous'):\n        raise ValueError(f'Unknown label type: {y_type}. Maybe you are trying to fit a classifier, which expects discrete classes on a regression target with continuous values.')\n    elif y_type not in ('binary', 'multiclass'):\n        raise NotImplementedError(f'{self.__class__.__name__} only supports binary or multiclass classification. Multilabel and multi-output classification are not supported.')\n    self.le_ = LabelEncoder().fit(y)\n    self.classes_ = self.le_.classes_\n    transformed_y = self.le_.transform(y)\n    return super().fit(X, transformed_y, **fit_params)",
    "docstring": "Fit the estimators. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vectors, where is the number of samples and is the number of features. y : array-like of shape (n_samples,) Target values. **fit_params : dict Parameters to pass to the underlying estimators. .. versionadded:: 1.5 Only available if , which can be set by using `Metadata Routing User Guide ` for more details. Returns ------- self : object Returns the instance itself.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_voting.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg arg Call Assign Call If Compare Raise Call If Compare Raise Call Assign Call Call Assign Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "has_output",
    "source_code": "def has_output(self):\n    raise NotImplementedError('subclasses of ListFilter must provide a has_output() method')",
    "docstring": "Return True if some choices would be output for this filter.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\filters.py",
    "ast_data": "FunctionDef name:has_output arg:self arguments arg Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, pad=0.3):\n    self.pad = pad",
    "docstring": "Parameters ---------- pad : float, default: 0.3 The amount of padding around the original box.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:pad arguments arg arg Assign"
  },
  {
    "library": "matplotlib",
    "name": "_set_mathtext_path",
    "source_code": "def _set_mathtext_path(self):\n    from matplotlib.text import TextPath\n    text = TextPath(xy=(0, 0), s=self.get_marker(), usetex=mpl.rcParams['text.usetex'])\n    if len(text.vertices) == 0:\n        return\n    bbox = text.get_extents()\n    max_dim = max(bbox.width, bbox.height)\n    self._transform = Affine2D().translate(-bbox.xmin + 0.5 * -bbox.width, -bbox.ymin + 0.5 * -bbox.height).scale(1.0 / max_dim)\n    self._path = text\n    self._snap = False",
    "docstring": "Draw mathtext markers '$...$' using object. Submitted by tcb",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\markers.py",
    "ast_data": "FunctionDef name:_set_mathtext_path arg:self arguments arg Assign Call Call If Compare Call Return return:no Assign Call Assign Call Assign Call Call Call Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "LogSigmoid",
    "source_code": "class LogSigmoid(Module):\n\n    def forward(self, input: Tensor) -> Tensor:\n        return F.logsigmoid(input)",
    "docstring": "Applies the Logsigmoid function element-wise. .. math:: \\text{LogSigmoid}(x) = \\log\\left(\\frac{ 1 }{ 1 + \\exp(-x)}\\right) Shape: - Input: :math:, where :math: means any number of dimensions. - Output: :math:, same shape as the input. .. image:: ../scripts/activation_images/LogSigmoid.png Examples:: >>> m = nn.LogSigmoid() >>> input = torch.randn(2) >>> output = m(input)",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\activation.py",
    "ast_data": "ClassDef name:LogSigmoid FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_slice_single_param",
    "source_code": "def _slice_single_param(param, param_ndims_to_matrix_ndims, slices, batch_shape):\n    param = _broadcast_parameter_with_batch_shape(param, param_ndims_to_matrix_ndims, array_ops.ones_like(batch_shape))\n    if hasattr(param, 'batch_shape_tensor'):\n        param_batch_shape = param.batch_shape_tensor()\n    else:\n        param_batch_shape = array_ops.shape(param)\n    param_batch_rank = array_ops.size(param_batch_shape)\n    param_batch_shape = param_batch_shape[:param_batch_rank - param_ndims_to_matrix_ndims]\n    if tensor_util.constant_value(array_ops.size(batch_shape)) != 0 and tensor_util.constant_value(array_ops.size(param_batch_shape)) == 0:\n        return param\n    param_slices = _sanitize_slices(slices, intended_shape=batch_shape, deficient_shape=param_batch_shape)\n    if param_ndims_to_matrix_ndims > 0:\n        if Ellipsis not in [slc for slc in slices if not tensor_util.is_tensor(slc)]:\n            param_slices.append(Ellipsis)\n        param_slices += [slice(None)] * param_ndims_to_matrix_ndims\n    return param.__getitem__(tuple(param_slices))",
    "docstring": "Slices into the batch shape of a single parameter. Args: param: The original parameter to slice; either a or an object with batch shape (LinearOperator). param_ndims_to_matrix_ndims: number of right-most dimensions used for inferring matrix shape of the . For non-Tensor parameters, this is the number of this param's batch dimensions used by the matrix shape of the parent object. slices: iterable of slices received by . batch_shape: The parameterized object's batch shape . Returns: new_param: Instance of the same type as , batch-sliced according to .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\slicing.py",
    "ast_data": "FunctionDef name:_slice_single_param arg:param arg:param_ndims_to_matrix_ndims arg:slices arg:batch_shape arguments arg arg arg arg Assign Call Call If Call Assign Call Assign Call Assign Call Assign If BoolOp Compare Call Call Compare Call Call Return return:yes Assign Call If Compare If Compare Call Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "set_optimizer_experimental_options",
    "source_code": "def set_optimizer_experimental_options(self, options):\n    self._optimizer_experimental_options.update(options)\n    self._thread_local_data.function_call_options = None",
    "docstring": "Set experimental options for the optimizer. Args: options: Dictionary of options to modify",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:set_optimizer_experimental_options arg:self arg:options arguments arg arg Call Assign"
  },
  {
    "library": "pytorch",
    "name": "_ShardParamInfo",
    "source_code": "class _ShardParamInfo(NamedTuple):\n    in_shard: bool\n    offset_in_shard: Optional[int]\n    numel_in_shard: Optional[int]\n    intra_param_start_idx: Optional[int]\n    intra_param_end_idx: Optional[int]",
    "docstring": "Shard-related information for an original parameter.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py",
    "ast_data": "ClassDef name:_ShardParamInfo"
  },
  {
    "library": "authlib",
    "name": "get_server_metadata",
    "source_code": "def get_server_metadata(self) -> dict:\n    return {}",
    "docstring": "Return server metadata which includes supported grant types, response types and etc. When the `True`, all clients require that authorization requests use request objects, and an error will be returned when the authorization request payload is passed in the request body or query string:: class JWTAuthenticationRequest(rfc9101.JWTAuthenticationRequest): def get_server_metadata(self): return { \"issuer\": ..., \"authorization_endpoint\": ..., \"require_signed_request_object\": ..., }",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc9101\\authorization_server.py",
    "ast_data": "FunctionDef name:get_server_metadata arg:self arguments arg Return return:no"
  },
  {
    "library": "matplotlib",
    "name": "get_fignums",
    "source_code": "def get_fignums() -> list[int]:\n    return sorted(_pylab_helpers.Gcf.figs)",
    "docstring": "Return a list of existing figure numbers.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:get_fignums arguments Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_is_constant_holder",
    "source_code": "def _is_constant_holder(spec: 'TreeSpec') -> bool:\n    return isinstance(spec.context, ConstantNode)",
    "docstring": "Checks if the spec is from a pytree registered with register_constant",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_pytree.py",
    "ast_data": "FunctionDef name:_is_constant_holder arg:spec arguments arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "basis",
    "source_code": "@classmethod\ndef basis(cls, deg, domain=None, window=None, symbol='x'):\n    if domain is None:\n        domain = cls.domain\n    if window is None:\n        window = cls.window\n    ideg = int(deg)\n    if ideg != deg or ideg < 0:\n        raise ValueError('deg must be non-negative integer')\n    return cls([0] * ideg + [1], domain, window, symbol)",
    "docstring": "Series basis polynomial of degree . Returns the series representing the basis polynomial of degree . Parameters ---------- deg : int Degree of the basis polynomial for the series. Must be >= 0. domain : {None, array_like}, optional If given, the array must be of the form `deg` term set to one and all others zero.",
    "type": "method",
    "file_path": "numpy\\numpy\\polynomial\\_polybase.py",
    "ast_data": "FunctionDef name:basis arg:cls arg:deg arg:domain arg:window arg:symbol arguments arg arg arg arg arg If Compare Assign If Compare Assign Assign Call If BoolOp Compare Compare Raise Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "execute",
    "source_code": "def execute(self, *args, **options):\n    if options['force_color'] and options['no_color']:\n        raise CommandError(\"The --no-color and --force-color options can't be used together.\")\n    if options['force_color']:\n        self.style = color_style(force_color=True)\n    elif options['no_color']:\n        self.style = no_style()\n        self.stderr.style_func = None\n    if options.get('stdout'):\n        self.stdout = OutputWrapper(options['stdout'])\n    if options.get('stderr'):\n        self.stderr = OutputWrapper(options['stderr'])\n    if self.requires_system_checks and (not options['skip_checks']):\n        check_kwargs = self.get_check_kwargs(options)\n        self.check(**check_kwargs)\n    if self.requires_migrations_checks:\n        self.check_migrations()\n    output = self.handle(*args, **options)\n    if output:\n        if self.output_transaction:\n            connection = connections[options.get('database', DEFAULT_DB_ALIAS)]\n            output = '%s\\n%s\\n%s' % (self.style.SQL_KEYWORD(connection.ops.start_transaction_sql()), output, self.style.SQL_KEYWORD(connection.ops.end_transaction_sql()))\n        self.stdout.write(output)\n    return output",
    "docstring": "Try to execute this command, performing system checks if needed (as controlled by the `` attribute, except if force-skipped).",
    "type": "method",
    "file_path": "django\\django\\core\\management\\base.py",
    "ast_data": "FunctionDef name:execute arg:self arguments arg arg arg If BoolOp Raise Call If Assign Call If Assign Call Assign If Call Assign Call If Call Assign Call If BoolOp Assign Call Call If Call Assign Call If If Assign Call Assign Call Call Call Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_init_raw_predictions",
    "source_code": "def _init_raw_predictions(X, estimator, loss, use_predict_proba):\n    if use_predict_proba:\n        predictions = estimator.predict_proba(X)\n        if not loss.is_multiclass:\n            predictions = predictions[:, 1]\n        eps = np.finfo(np.float32).eps\n        predictions = np.clip(predictions, eps, 1 - eps, dtype=np.float64)\n    else:\n        predictions = estimator.predict(X).astype(np.float64)\n    if predictions.ndim == 1:\n        return loss.link.link(predictions).reshape(-1, 1)\n    else:\n        return loss.link.link(predictions)",
    "docstring": "Return the initial raw predictions. Parameters ---------- X : ndarray of shape (n_samples, n_features) The data array. estimator : object The estimator to use to compute the predictions. loss : BaseLoss An instance of a loss function class. use_predict_proba : bool Whether estimator.predict_proba is used instead of estimator.predict. Returns ------- raw_predictions : ndarray of shape (n_samples, K) The initial raw predictions. K is equal to 1 for binary classification and regression, and equal to the number of classes for multiclass classification. `` is casted into float64.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_gb.py",
    "ast_data": "FunctionDef name:_init_raw_predictions arg:X arg:estimator arg:loss arg:use_predict_proba arguments arg arg arg arg If Assign Call If Assign Assign Call Assign Call Assign Call Call If Compare Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_get_filtered_data",
    "source_code": "def _get_filtered_data(self, feature_filter: str, module_fqn_filter: str) -> OrderedDict[str, Any]:\n    filtered_dict: OrderedDict[str, Any] = OrdDict()\n    for module_fqn in self.generated_reports:\n        if module_fqn_filter == '' or module_fqn_filter in module_fqn:\n            filtered_dict[module_fqn] = {}\n            module_reports = self.generated_reports[module_fqn]\n            for feature_name in module_reports:\n                if feature_filter == '' or feature_filter in feature_name:\n                    filtered_dict[module_fqn][feature_name] = module_reports[feature_name]\n    return filtered_dict",
    "docstring": "Filters the data and returns it in the same ordered dictionary format so the relevant views can be displayed. Args: feature_filter (str): The feature filter, if we want to filter the set of data to only include a certain set of features that include feature_filter If feature = \"\", then we do not filter based on any features module_fqn_filter (str): The filter on prefix for the module fqn. All modules that have fqn with this prefix will be included If module_fqn_filter = \"\" we do not filter based on module fqn, and include all modules First, the data is filtered based on module_fqn, and then filtered based on feature Returns an OrderedDict (sorted in order of model) mapping: module_fqns -> feature_names -> values",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\model_report_visualizer.py",
    "ast_data": "FunctionDef name:_get_filtered_data arg:self arg:feature_filter arg:module_fqn_filter arguments arg arg arg Call For If BoolOp Compare Compare Assign Assign For If BoolOp Compare Compare Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_log_dirichlet_norm",
    "source_code": "def _log_dirichlet_norm(dirichlet_concentration):\n    return gammaln(np.sum(dirichlet_concentration)) - np.sum(gammaln(dirichlet_concentration))",
    "docstring": "Compute the log of the Dirichlet distribution normalization term. Parameters ---------- dirichlet_concentration : array-like of shape (n_samples,) The parameters values of the Dirichlet distribution. Returns ------- log_dirichlet_norm : float The log normalization of the Dirichlet distribution.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\mixture\\_bayesian_mixture.py",
    "ast_data": "FunctionDef name:_log_dirichlet_norm arg:dirichlet_concentration arguments arg Return return:yes Call Call Call Call"
  },
  {
    "library": "kornia",
    "name": "_to_float32",
    "source_code": "def _to_float32(image: Tensor) -> Tensor:\n    KORNIA_CHECK(image.dtype == torch.uint8)\n    return image.float() / 255.0",
    "docstring": "Convert an image tensor to float32.",
    "type": "function",
    "file_path": "kornia\\kornia\\io\\io.py",
    "ast_data": "FunctionDef name:_to_float32 arg:image arguments arg Call Compare Return return:yes Call"
  },
  {
    "library": "django",
    "name": "ChoicesType",
    "source_code": "class ChoicesType(EnumType):\n\n    def __new__(metacls, classname, bases, classdict, **kwds):\n        labels = []\n        for key in classdict._member_names:\n            value = classdict[key]\n            if isinstance(value, (list, tuple)) and len(value) > 1 and isinstance(value[-1], (Promise, str)):\n                *value, label = value\n                value = tuple(value)\n            else:\n                label = key.replace('_', ' ').title()\n            labels.append(label)\n            dict.__setitem__(classdict, key, value)\n        cls = super().__new__(metacls, classname, bases, classdict, **kwds)\n        for member, label in zip(cls.__members__.values(), labels):\n            member._label_ = label\n        return enum.unique(cls)\n\n    @property\n    def names(cls):\n        empty = ['__empty__'] if hasattr(cls, '__empty__') else []\n        return empty + [member.name for member in cls]\n\n    @property\n    def choices(cls):\n        empty = [(None, cls.__empty__)] if hasattr(cls, '__empty__') else []\n        return empty + [(member.value, member.label) for member in cls]\n\n    @property\n    def labels(cls):\n        return [label for _, label in cls.choices]\n\n    @property\n    def values(cls):\n        return [value for value, _ in cls.choices]",
    "docstring": "A metaclass for creating a enum choices.",
    "type": "class",
    "file_path": "django\\django\\db\\models\\enums.py",
    "ast_data": "ClassDef name:ChoicesType FunctionDef name:__new__ arg:metacls arg:classname arg:bases arg:classdict arguments arg arg arg arg arg Assign For Assign If BoolOp Call Compare Call Call Assign Assign Call Assign Call Call Call Call Assign Call Call For Call Call Assign Return return:yes Call FunctionDef name:names arg:cls arguments arg Assign Call Return return:yes FunctionDef name:choices arg:cls arguments arg Assign Call Return return:yes FunctionDef name:labels arg:cls arguments arg Return return:yes FunctionDef name:values arg:cls arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "vee",
    "source_code": "@staticmethod\ndef vee(omega: Tensor) -> Tensor:\n    check_se2_omega_shape(omega)\n    upsilon = omega[..., 2, :2]\n    theta = So2.vee(omega[..., :2, :2])\n    return concatenate((upsilon, theta[..., None]), -1)",
    "docstring": "Convert elements from lie algebra to vector space. Args: omega: 3x3-matrix representing lie algebra of shape :math:. Returns: vector of shape :math:. Example: >>> v = torch.ones(3) >>> omega_hat = Se2.hat(v) >>> Se2.vee(omega_hat) tensor([1., 1., 1.])",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\se2.py",
    "ast_data": "FunctionDef name:vee arg:omega arguments arg Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "canon_path",
    "source_code": "def canon_path(native_path: str | os.PathLike[str], /) -> str:\n    return os.fspath(native_path).replace(os.path.sep, SEP)",
    "docstring": "Return path in OS-independent form",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\osutil.py",
    "ast_data": "FunctionDef name:canon_path arguments arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_wrap_callback",
    "source_code": "def _wrap_callback(callback, method=None):\n    if callback is None or method in {'tnc', 'slsqp', 'cobyla', 'cobyqa'}:\n        return callback\n    sig = inspect.signature(callback)\n    if set(sig.parameters) == {'intermediate_result'}:\n\n        def wrapped_callback(res):\n            return callback(intermediate_result=res)\n    elif method == 'trust-constr':\n\n        def wrapped_callback(res):\n            return callback(np.copy(res.x), res)\n    elif method == 'differential_evolution':\n\n        def wrapped_callback(res):\n            return callback(np.copy(res.x), res.convergence)\n    else:\n\n        def wrapped_callback(res):\n            return callback(np.copy(res.x))\n    wrapped_callback.stop_iteration = False\n    return wrapped_callback",
    "docstring": "Wrap a user-provided callback so that attributes can be attached.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_optimize.py",
    "ast_data": "FunctionDef name:_wrap_callback arg:callback arg:method arguments arg arg If BoolOp Compare Compare Return return:yes Assign Call If Compare Call FunctionDef name:wrapped_callback arg:res arguments arg Return return:yes Call If Compare FunctionDef name:wrapped_callback arg:res arguments arg Return return:yes Call Call If Compare FunctionDef name:wrapped_callback arg:res arguments arg Return return:yes Call Call FunctionDef name:wrapped_callback arg:res arguments arg Return return:yes Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "build_results",
    "source_code": "def build_results(self, session, tensor_values):\n    full_values = []\n    assert len(self._final_fetches) == len(tensor_values)\n    i = 0\n    j = 0\n    for is_op in self._ops:\n        if is_op:\n            full_values.append(None)\n        else:\n            if self._fetches[i].ref() in self._feed_handles:\n                value = self._feed_handles[self._fetches[i].ref()].eval()\n            else:\n                value = self._feeds.get(self._fetches[i].ref())\n            if value is None:\n                value = tensor_values[j]\n                j += 1\n            dtype = self._fetch_handles.get(self._fetches[i].ref())\n            if dtype:\n                full_values.append(session_ops.TensorHandle(value, dtype, session))\n            else:\n                full_values.append(value)\n            i += 1\n    assert j == len(tensor_values)\n    return self._fetch_mapper.build_results(full_values)",
    "docstring": "Build results matching the original fetch shape. must be a list of the same length as the one returned by , and holding the requested fetch values. This method builds a struct with the same shape as the original passed to the constructor, in which the fetches are replaced by their fetched value. Args: session: The enclosing session. Used for tensor handles. tensor_values: List of values matching the list returned by fetches(). Returns: A structure of the same shape as the original argument but containing tensors or None (for fetched ops).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\session.py",
    "ast_data": "FunctionDef name:build_results arg:self arg:session arg:tensor_values arguments arg arg arg Assign Compare Call Call Assign Assign For If Call If Compare Call Assign Call Call Assign Call Call If Compare Assign Assign Call Call If Call Call Call Compare Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "inplace_wrapper",
    "source_code": "def inplace_wrapper(fn: Callable) -> Callable:\n\n    @wraps(fn)\n    def wrapped_fn(gm):\n        fn(gm)\n        return gm\n    return wrapped_fn",
    "docstring": "Convenience wrapper for passes which modify an object inplace. This wrapper makes them return the modified object instead. Args: fn (Callable[Object, Any]) Returns: wrapped_fn (Callable[Object, Object])",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\passes\\pass_manager.py",
    "ast_data": "FunctionDef name:inplace_wrapper arg:fn arguments arg FunctionDef name:wrapped_fn arg:gm arguments arg Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "WriteStackFrameWithId",
    "source_code": "def WriteStackFrameWithId(self, stack_frame_with_id):\n    debug_event = debug_event_pb2.DebugEvent(stack_frame_with_id=stack_frame_with_id)\n    self._EnsureTimestampAdded(debug_event)\n    _pywrap_debug_events_writer.WriteStackFrameWithId(self._dump_root, debug_event)",
    "docstring": "Write a StackFrameWithId proto with the writer. Args: stack_frame_with_id: A StackFrameWithId proto, describing the content a stack frame involved in the execution of the debugged TensorFlow program.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_writer.py",
    "ast_data": "FunctionDef name:WriteStackFrameWithId arg:self arg:stack_frame_with_id arguments arg arg Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_freeze_concrete_function",
    "source_code": "@convert_phase(Component.PREPARE_TF_MODEL, SubComponent.FREEZE_CONCRETE_FUNCTION)\ndef _freeze_concrete_function(self):\n    if len(self._funcs) == 0:\n        raise ValueError('No ConcreteFunction is specified.')\n    if len(self._funcs) > 1:\n        raise ValueError('This converter can only convert a single ConcreteFunction. Converting multiple functions is under development.')\n    frozen_func, graph_def = _convert_to_constants.convert_variables_to_constants_v2_as_graph(self._funcs[0], lower_control_flow=False)\n    input_tensors = [tensor for tensor in frozen_func.inputs if tensor.dtype != _dtypes.resource]\n    output_tensors = frozen_func.outputs\n    return (graph_def, input_tensors, output_tensors, frozen_func)",
    "docstring": "Convert the given ConcreteFunction to frozen graph. Returns: graph_def: The frozen GraphDef. input_tensors: List of input tensors. output_tensors: List of output tensors. frozen_func: The frozen ConcreteFunction. Raises: ValueError: none or multiple ConcreteFunctions provided.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "FunctionDef name:_freeze_concrete_function arg:self arguments arg If Compare Call Raise Call If Compare Call Raise Call Assign Call Assign Compare Assign Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "__call__",
    "source_code": "def __call__(self, *inputs: Any, input_names_to_handle: Optional[list[Any]]=None, output_type: str='tensor', **kwargs: Any) -> Any:\n    if not self._disable_features:\n        decorated_forward = self.convert_input_output(input_names_to_handle=input_names_to_handle, output_type=output_type)(super().__call__)\n        _output_image = decorated_forward(*inputs, **kwargs)\n        if output_type == 'tensor':\n            self._output_image = self._detach_tensor_to_cpu(_output_image)\n        else:\n            self._output_image = _output_image\n    else:\n        _output_image = super().__call__(*inputs, **kwargs)\n    return _output_image",
    "docstring": "Overwrite the __call__ function to handle various inputs. Args: inputs: Inputs to operate on. input_names_to_handle: List of input names to convert, if None, handle all inputs. output_type: Desired output type ('tensor', 'numpy', or 'pil'). kwargs: Additional arguments. Returns: Callable: Decorated function with converted input and output types.",
    "type": "method",
    "file_path": "kornia\\kornia\\core\\module.py",
    "ast_data": "FunctionDef name:__call__ arg:self arguments arg arg arg arg arg If Assign Call Call Call Assign Call If Compare Assign Call Assign Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_symbolic_tensor",
    "source_code": "def is_symbolic_tensor(tensor):\n    if isinstance(tensor, tensor_lib.Tensor):\n        return hasattr(tensor, 'graph')\n    elif is_extension_type(tensor):\n        component_tensors = nest.flatten(tensor, expand_composites=True)\n        return any((hasattr(t, 'graph') for t in component_tensors))\n    elif isinstance(tensor, variables.Variable):\n        return getattr(tensor, '_keras_history', False) or not context.executing_eagerly()\n    elif isinstance(tensor, tuple(_user_convertible_tensor_types)):\n        tensor = ops.convert_to_tensor_or_composite(tensor)\n        return is_symbolic_tensor(tensor)\n    else:\n        return False",
    "docstring": "Returns whether a tensor is symbolic (from a TF graph) or an eager tensor. A Variable can be seen as either: it is considered symbolic when we are in a graph scope, and eager when we are in an eager scope. Args: tensor: A tensor instance to test. Returns: True for symbolic tensors, False for eager tensors.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_utils.py",
    "ast_data": "FunctionDef name:is_symbolic_tensor arg:tensor arguments arg If Call Return return:yes Call If Call Assign Call Return return:yes Call Call If Call Return return:yes BoolOp Call Call If Call Call Assign Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_embedding_lookup_for_sparse_tensor",
    "source_code": "def _embedding_lookup_for_sparse_tensor(self, inp: sparse_tensor.SparseTensor, weight: Optional[sparse_tensor.SparseTensor], table: tf_variables.Variable, feature: tpu_embedding_v2_utils.FeatureConfig) -> tensor.Tensor:\n\n    def sparse_to_dense_computation(inp, weight):\n        if weight is None:\n            weight = sparse_tensor.SparseTensor(inp.indices, array_ops.ones_like(inp.values, dtype=dtypes.float32), dense_shape=inp.dense_shape)\n        inp = sparse_ops.sparse_tensor_to_dense(inp)\n        weight = sparse_ops.sparse_tensor_to_dense(weight)\n        return (inp, weight)\n    inp, weight = tpu_replication.outside_compilation(sparse_to_dense_computation, inp=inp, weight=weight)\n    embeddings = embedding_ops.embedding_lookup_v2(table, inp)\n    weight = array_ops.expand_dims(weight, -1)\n    embeddings *= weight\n    if not feature.output_shape and feature.max_sequence_length > 0:\n        embeddings = self._pad_or_truncate_with_sequence_length(embeddings, feature.max_sequence_length)\n    else:\n        embeddings = self._apply_combiner_to_embeddings(embeddings, weight, feature.table.combiner)\n    return embeddings",
    "docstring": "Embedding lookup for sparse tensor based on its feature config. Args: inp: a single SparseTensor input. weight: None or SparseTensor which has the same shape of the input. table: a table variable. feature: a feature config. Returns: Embedding lookup result.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v1.py",
    "ast_data": "FunctionDef name:_embedding_lookup_for_sparse_tensor arg:self arg:inp arg:weight arg:table arg:feature arguments arg arg arg arg arg FunctionDef name:sparse_to_dense_computation arg:inp arg:weight arguments arg arg If Compare Assign Call Call Assign Call Assign Call Return return:yes Assign Call Assign Call Assign Call If BoolOp Compare Assign Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "DisallowedHost",
    "source_code": "class DisallowedHost(SuspiciousOperation):\n    pass",
    "docstring": "HTTP_HOST header contains invalid value",
    "type": "class",
    "file_path": "django\\django\\core\\exceptions.py",
    "ast_data": "ClassDef name:DisallowedHost"
  },
  {
    "library": "tensorflow",
    "name": "_TransformedFnCache",
    "source_code": "class _TransformedFnCache(object):\n    __slots__ = ('_cache',)\n\n    def __init__(self):\n        self._cache = weakref.WeakKeyDictionary()\n\n    def _get_key(self, entity):\n        raise NotImplementedError('subclasses must override')\n\n    def has(self, entity, subkey):\n        key = self._get_key(entity)\n        parent = self._cache.get(key, None)\n        if parent is None:\n            return False\n        return subkey in parent\n\n    def __getitem__(self, entity):\n        key = self._get_key(entity)\n        parent = self._cache.get(key, None)\n        if parent is None:\n            self._cache[key] = parent = {}\n        return parent\n\n    def __len__(self):\n        return len(self._cache)",
    "docstring": "Generic hierarchical cache for transformed functions. The keys are soft references (i.e. they are discarded when the key is destroyed) created from the source function by . The subkeys are strong references and can be any value. Typically they identify different kinds of transformation.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\cache.py",
    "ast_data": "ClassDef name:_TransformedFnCache Assign FunctionDef name:__init__ arg:self arguments arg Assign Call FunctionDef name:_get_key arg:self arg:entity arguments arg arg Raise Call FunctionDef name:has arg:self arg:entity arg:subkey arguments arg arg arg Assign Call Assign Call If Compare Return return:yes Return return:yes Compare FunctionDef name:__getitem__ arg:self arg:entity arguments arg arg Assign Call Assign Call If Compare Assign Return return:yes FunctionDef name:__len__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_get_weekday",
    "source_code": "def _get_weekday(self, date):\n    week_format = self.get_week_format()\n    if week_format in {'%W', '%V'}:\n        return date.weekday()\n    elif week_format == '%U':\n        return (date.weekday() + 1) % 7\n    else:\n        raise ValueError('unknown week format: %s' % week_format)",
    "docstring": "Return the weekday for a given date. The first day according to the week format is 0 and the last day is 6.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\dates.py",
    "ast_data": "FunctionDef name:_get_weekday arg:self arg:date arguments arg arg Assign Call If Compare Return return:yes Call If Compare Return return:yes Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "assert_variables_initialized",
    "source_code": "@tf_export(v1=['assert_variables_initialized'])\n@tf_should_use.should_use_result\ndef assert_variables_initialized(var_list=None):\n    if var_list is None:\n        var_list = global_variables() + local_variables()\n    if not var_list:\n        var_list = []\n        for op in ops.get_default_graph().get_operations():\n            if op.type in ['Variable', 'VariableV2', 'AutoReloadVariable']:\n                var_list.append(op.outputs[0])\n    if not var_list:\n        return None\n    else:\n        ranks = []\n        for var in var_list:\n            with ops.colocate_with(var.op):\n                ranks.append(array_ops.rank_internal(var, optimize=False))\n        if len(ranks) == 1:\n            return ranks[0]\n        else:\n            return array_ops_stack.stack(ranks)",
    "docstring": "Returns an Op to check if variables are initialized. NOTE: This function is obsolete and will be removed in 6 months. Please change your implementation to use . When run, the returned Op will raise the exception if any of the variables has not yet been initialized. Note: This function is implemented by trying to fetch the values of the variables. If one of the variables is not initialized a message may be logged by the C++ runtime. This is expected. Args: var_list: List of objects to check. Defaults to the value of Returns: An Op, or None if there are no variables.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:assert_variables_initialized arg:var_list arguments arg If Compare Assign Call Call If Assign For Call Call If Compare Call If Return return:no Assign For With Call Call Call If Compare Call Return return:yes Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "integral_tensor",
    "source_code": "def integral_tensor(input: Tensor, dim: Optional[Tuple[int, ...]]=None) -> Tensor:\n    KORNIA_CHECK_SHAPE(input, ['*', 'D'])\n    if dim is None:\n        dim = (-1,)\n    KORNIA_CHECK(len(dim) > 0, 'dim must be a non-empty tuple.')\n    KORNIA_CHECK(len(dim) <= len(input.shape), 'dim must be a tuple of length <= input.shape.')\n    output = input\n    for i in dim:\n        output = output.cumsum(i)\n    return output",
    "docstring": "Calculate integral of the input tensor. The algorithm computes the integral image by summing over the specified dimensions. In case dim is specified, the contained dimensions must be unique and sorted in ascending order and not exceed the number of dimensions of the input tensor. Args: input: the input tensor with shape :math:. Where D is the number of dimensions. dim: the dimension to be summed. Returns: Integral tensor for the input tensor with shape :math:. Examples: >>> input = torch.ones(3, 5) >>> output = integral_tensor(input, (-2, -1)) >>> output tensor([[ 1., 2., 3., 4., 5.], [ 2., 4., 6., 8., 10.], [ 3., 6., 9., 12., 15.]])",
    "type": "function",
    "file_path": "kornia\\kornia\\enhance\\integral.py",
    "ast_data": "FunctionDef name:integral_tensor arg:input arg:dim arguments arg arg Call If Compare Assign Call Compare Call Call Compare Call Call Assign For Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "dequantize_per_channel",
    "source_code": "@impl(quantized_decomposed_lib, 'dequantize_per_channel', 'CompositeExplicitAutograd')\ndef dequantize_per_channel(input: torch.Tensor, scales: torch.Tensor, zero_points: Optional[torch.Tensor], axis: int, quant_min: int, quant_max: int, dtype: torch.dtype, *, out_dtype: Optional[torch.dtype]=None) -> torch.Tensor:\n    assert input.dtype == dtype, f'Expecting input to have dtype {dtype}, but got dtype: {input.dtype}'\n    if out_dtype is None:\n        out_dtype = torch.float32\n    assert axis < input.dim(), f'Expecting axis to be < {input.dim()}'\n    _quant_min_max_bounds_check(quant_min, quant_max, dtype)\n    input, permute_axis_list = _permute_to_axis_zero(input, axis)\n    new_shape = [1] * input.dim()\n    new_shape[0] = scales.shape[0]\n    scales = scales.view(new_shape)\n    if zero_points is not None:\n        res = (input - zero_points.view(new_shape)) * scales\n    else:\n        res = input * scales\n    res = res.to(out_dtype)\n    out = res.permute(tuple(permute_axis_list))\n    return out",
    "docstring": "Affine per channel dequantization for the Tensor using the same quantization parameters for each channel/axis to map from quantized values to floating point values Args: input (torch.Tensor): Tensor with dtype matching argument, e.g. (), it is a per channel quantized Tensor if combined with quantization parameter in the argument of this function (scales/zero_points/axis) scales (torch.Tensor): a list of scale quantization parameter for affine quantization, one per channel zero_points (torch.Tensor): a list of zero_point quantization parameter for affine quantization, one per channel quant_min (int): minimum quantized value for output Tensor (not used in computation, reserved for pattern matching) quant_max (int): maximum quantized value for output Tensor (not used in computation, reserved for pattern matching) dtype (torch.dtype): requested dtype for output Tensor (not used in computation, reserved for pattern matching) out_dtype (torch.dtype?): optional dtype for output Tensor Returns: dequantized float32 Tensor",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_decomposed.py",
    "ast_data": "FunctionDef name:dequantize_per_channel arg:input arg:scales arg:zero_points arg:axis arg:quant_min arg:quant_max arg:dtype arguments arg arg arg arg arg arg arg arg Compare If Compare Assign Compare Call Call Call Assign Call Assign Call Assign Assign Call If Compare Assign Call Assign Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pygame",
    "name": "__call__",
    "source_code": "def __call__(self, left, right):\n    ratio = self.ratio\n    leftrect = left.rect\n    width = leftrect.width\n    height = leftrect.height\n    leftrect = leftrect.inflate(width * ratio - width, height * ratio - height)\n    rightrect = right.rect\n    width = rightrect.width\n    height = rightrect.height\n    rightrect = rightrect.inflate(width * ratio - width, height * ratio - height)\n    return leftrect.colliderect(rightrect)",
    "docstring": "detect collision between two sprites using scaled rects pygame.sprite.collide_rect_ratio(ratio)(left, right): return bool Tests for collision between two sprites. Uses the pygame.Rect colliderect function to calculate the collision after scaling the rects by the stored ratio. Sprites must have \"rect\" attributes.",
    "type": "method",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:left arg:right arguments arg arg arg Assign Assign Assign Assign Assign Call Assign Assign Assign Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_void_scalar_to_string",
    "source_code": "def _void_scalar_to_string(x, is_repr=True):\n    options = format_options.get().copy()\n    if options['legacy'] <= 125:\n        return StructuredVoidFormat.from_data(array(x), **options)(x)\n    if options.get('formatter') is None:\n        options['formatter'] = {}\n    options['formatter'].setdefault('float_kind', str)\n    val_repr = StructuredVoidFormat.from_data(array(x), **options)(x)\n    if not is_repr:\n        return val_repr\n    cls = type(x)\n    cls_fqn = cls.__module__.replace('numpy', 'np') + '.' + cls.__name__\n    void_dtype = np.dtype((np.void, x.dtype))\n    return f'{cls_fqn}({val_repr}, dtype={void_dtype!s})'",
    "docstring": "Implements the repr for structured-void scalars. It is called from the scalartypes.c.src code, and is placed here because it uses the elementwise formatters defined above.",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\arrayprint.py",
    "ast_data": "FunctionDef name:_void_scalar_to_string arg:x arg:is_repr arguments arg arg Assign Call Call If Compare Return return:yes Call Call Call If Compare Call Assign Call Assign Call Call Call If Return return:yes Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "stack_inputs",
    "source_code": "def stack_inputs(self, stack_indices=None, tile_variants=False):\n    if stack_indices is None:\n        stack_indices = range(len(self._inputs))\n    length = self.pfor.loop_len_vector\n    for i in stack_indices:\n        inp = self._inputs[i]\n        is_variant = inp.t.dtype == dtypes.variant\n        if not inp.is_stacked:\n            self._inputs[i] = _stack(inp.t, length)\n            if tile_variants and is_variant:\n                self._inputs[i] = wrap(_tile_variant_with_length(self._inputs[i].t, length), True)\n        elif not tile_variants and is_variant:\n            self._inputs[i] = wrap(_untile_variant(self._inputs[i].t), True)",
    "docstring": "Stacks unstacked inputs at . Args: stack_indices: indices of inputs at which stacking is done. If None, stacking is done at all indices. tile_variants: If True, affected indices which have a variant dtype will be tiled after this operation to match the expected shape of a vectorized tensor. Variants generally need to be un-tiled when they are inputs to operations and tiled when returned.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "FunctionDef name:stack_inputs arg:self arg:stack_indices arg:tile_variants arguments arg arg arg If Compare Assign Call Call Assign For Assign Assign Compare If Assign Call If BoolOp Assign Call Call If BoolOp Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "get",
    "source_code": "def get(self, o: int) -> int:\n    return o // self.divisor",
    "docstring": "Divide object by divisor",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:get arg:self arg:o arguments arg arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "find_position",
    "source_code": "def find_position(string, index, last_index, last_pos):\n    lines = string.count('\\n', last_index, index)\n    if lines > 0:\n        column = index - string.rfind('\\n', last_index, index)\n    else:\n        column = last_pos[1] + (index - last_index)\n    return (last_pos[0] + lines, column)",
    "docstring": "Given a string and index, return (line, column)",
    "type": "function",
    "file_path": "scipy\\scipy\\_build_utils\\tempita\\_tempita.py",
    "ast_data": "FunctionDef name:find_position arg:string arg:index arg:last_index arg:last_pos arguments arg arg arg arg Assign Call If Compare Assign Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "transmute",
    "source_code": "def transmute(self, path, mutation_size, linewidth):\n    raise NotImplementedError('Derived must override')",
    "docstring": "The transmute method is the very core of the ArrowStyle class and must be overridden in the subclasses. It receives the *path* object along which the arrow will be drawn, and the *mutation_size*, with which the arrow head etc. will be scaled. The *linewidth* may be used to adjust the path so that it does not pass beyond the given points. It returns a tuple of a instance and a boolean. The boolean value indicate whether the path can be filled or not. The return value can also be a list of paths and list of booleans of the same length.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:transmute arg:self arg:path arg:mutation_size arg:linewidth arguments arg arg arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_map_tensor_from_recv_info",
    "source_code": "def _map_tensor_from_recv_info(self, recv_infos: tuple[InputInfo, ...]):\n\n    def get_recv_tensor(info):\n        if isinstance(info, _RecvInfo):\n            return info.buffer\n        else:\n            raise AssertionError(f'Expected _RecvInfo but got {type(info)}')\n    return map_aggregate(cast(Argument, recv_infos), get_recv_tensor)",
    "docstring": "Map tensors from recv infos to a list.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py",
    "ast_data": "FunctionDef name:_map_tensor_from_recv_info arg:self arg:recv_infos arguments arg arg FunctionDef name:get_recv_tensor arg:info arguments arg If Call Return return:yes Raise Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "can_use_flash_attention",
    "source_code": "def can_use_flash_attention(params: SDPAParams, debug: bool=False) -> bool:\n    return torch._C._can_use_flash_attention(params, debug)",
    "docstring": "Check if FlashAttention can be utilized in scaled_dot_product_attention. Args: params: An instance of SDPAParams containing the tensors for query, key, value, an optional attention mask, dropout rate, and a flag indicating if the attention is causal. debug: Whether to logging.warn debug information as to why FlashAttention could not be run. Defaults to False. Returns: True if FlashAttention can be used with the given parameters; otherwise, False. Note: This function is dependent on a CUDA-enabled build of PyTorch. It will return False in non-CUDA environments.",
    "type": "function",
    "file_path": "pytorch\\torch\\backends\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:can_use_flash_attention arg:params arg:debug arguments arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_metadata_routing",
    "source_code": "def get_metadata_routing(self):\n    raise NotImplementedError(f'{self.__class__.__name__} has not implemented metadata routing yet.')",
    "docstring": "Raise . This estimator does not support metadata routing yet.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py",
    "ast_data": "FunctionDef name:get_metadata_routing arg:self arguments arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "trace_joint_graph",
    "source_code": "def trace_joint_graph(fn, fw_inputs, fw_outputs):\n    from torch._functorch.aot_autograd import create_joint\n    dummy_aot_config = get_dummy_aot_autograd_config()\n\n    def joint_fn(*primals_and_tangents):\n        primals = primals_and_tangents[:len(fw_inputs)]\n        tangents = primals_and_tangents[len(fw_inputs):]\n        fw_outs, grads = create_joint(prepare_fw_with_masks(fn), aot_config=dummy_aot_config)(primals, tangents)\n        maybe_clone = clone_outputs_aliasing_inputs(primals_and_tangents)\n        return pytree.tree_map(maybe_clone, tuple(grads + list(fw_outs)))\n    primals = list(fw_inputs)\n    tangents = [_from_fun(out) for out in fw_outputs]\n    joint_operands = primals + tangents\n    return _maybe_reenter_make_fx(joint_fn)(*joint_operands)",
    "docstring": "Naively trace out a joint graph. This simplifies the reconstruction of joint graph in the min-cut partitioner later on.",
    "type": "function",
    "file_path": "pytorch\\torch\\_higher_order_ops\\invoke_subgraph.py",
    "ast_data": "FunctionDef name:trace_joint_graph arg:fn arg:fw_inputs arg:fw_outputs arguments arg arg arg Assign Call FunctionDef name:joint_fn arguments arg Assign Call Assign Call Assign Call Call Call Assign Call Return return:yes Call Call Call Assign Call Assign Call Assign Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_fontsize",
    "source_code": "def get_fontsize(self):\n    return self.prop.get_size_in_points()",
    "docstring": "Return the fontsize in points.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py",
    "ast_data": "FunctionDef name:get_fontsize arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "loads",
    "source_code": "@staticmethod\ndef loads(data: bytes, fake_mode: FakeTensorMode) -> object:\n    state = _UnpickleState(fake_mode)\n    with io.BytesIO(data) as stream:\n        unpickler = _GraphUnpickler(stream, state)\n        return unpickler.load()",
    "docstring": "Unpickle an object.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\_graph_pickler.py",
    "ast_data": "FunctionDef name:loads arg:data arg:fake_mode arguments arg arg Assign Call With Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "sparse_maximum",
    "source_code": "@tf_export('sparse.maximum', v1=['sparse.maximum', 'sparse_maximum'])\n@deprecation.deprecated_endpoints('sparse_maximum')\ndef sparse_maximum(sp_a, sp_b, name=None):\n    with ops.name_scope(name, 'SparseSparseMaximum', [sp_a.indices, sp_a.values, sp_b.indices, sp_b.values]) as name:\n        out_indices, out_values = gen_sparse_ops.sparse_sparse_maximum(sp_a.indices, sp_a.values, sp_a.dense_shape, sp_b.indices, sp_b.values, sp_b.dense_shape, name=name)\n    return sparse_tensor.SparseTensor(out_indices, out_values, sp_a.dense_shape)",
    "docstring": "Returns the element-wise max of two SparseTensors. Assumes the two SparseTensors have the same shape, i.e., no broadcasting. Example: >>> sp_zero = tf.sparse.SparseTensor([[0]], [0], [7]) >>> sp_one = tf.sparse.SparseTensor([[1]], [1], [7]) >>> res = tf.sparse.maximum(sp_zero, sp_one) >>> res.indices >>> res.values >>> res.dense_shape The reduction version of this elementwise operation is Args: sp_a: a operand whose dtype is real, and indices lexicographically ordered. sp_b: the other operand with the same requirements (and the same shape). name: optional name of the operation. Returns: output: the output SparseTensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_ops.py",
    "ast_data": "FunctionDef name:sparse_maximum arg:sp_a arg:sp_b arg:name arguments arg arg arg With Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "get_models",
    "source_code": "@functools.cache\ndef get_models(self, include_auto_created=False, include_swapped=False):\n    self.check_models_ready()\n    result = []\n    for app_config in self.app_configs.values():\n        result.extend(app_config.get_models(include_auto_created, include_swapped))\n    return result",
    "docstring": "Return a list of all installed models. By default, the following models aren't included: - auto-created models for many-to-many relations without an explicit intermediate table, - models that have been swapped out. Set the corresponding keyword argument to True to include such models.",
    "type": "method",
    "file_path": "django\\django\\apps\\registry.py",
    "ast_data": "FunctionDef name:get_models arg:self arg:include_auto_created arg:include_swapped arguments arg arg arg Call Assign For Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_toposort",
    "source_code": "def _toposort(edges):\n    incoming_edges = reverse_dict(edges)\n    incoming_edges = {k: set(val) for k, val in incoming_edges.items()}\n    S = {v for v in edges if v not in incoming_edges}\n    L = []\n    while S:\n        n = S.pop()\n        L.append(n)\n        for m in edges.get(n, ()):\n            assert n in incoming_edges[m]\n            incoming_edges[m].remove(n)\n            if not incoming_edges[m]:\n                S.add(m)\n    if any((incoming_edges.get(v, None) for v in edges)):\n        raise ValueError('Input has cycles')\n    return L",
    "docstring": "Topological sort algorithm by Kahn [1] - O(nodes + vertices) inputs: edges - a dict of the form {a: {b, c}} where b and c depend on a outputs: L - an ordered list of nodes that satisfy the dependencies of edges >>> # xdoctest: +SKIP >>> _toposort({1: (2, 3), 2: (3,)}) [1, 2, 3] Closely follows the wikipedia page [2] [1] Kahn, Arthur B. (1962), \"Topological sorting of large networks\", Communications of the ACM [2]",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\unification\\utils.py",
    "ast_data": "FunctionDef name:_toposort arg:edges arguments arg Assign Call Assign Call Call Assign Compare Assign While Assign Call Call For Call Compare Call If Call If Call Call Raise Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_banded_jac_wrapper",
    "source_code": "def _banded_jac_wrapper(jacfunc, ml, jac_params):\n\n    def jac_wrapper(t, y):\n        jac = asarray(jacfunc(t, y, *jac_params))\n        padded_jac = vstack((jac, zeros((ml, jac.shape[1]))))\n        return padded_jac\n    return jac_wrapper",
    "docstring": "Wrap a banded Jacobian function with a function that pads the Jacobian with rows of zeros.",
    "type": "function",
    "file_path": "scipy\\scipy\\integrate\\_ode.py",
    "ast_data": "FunctionDef name:_banded_jac_wrapper arg:jacfunc arg:ml arg:jac_params arguments arg arg arg FunctionDef name:jac_wrapper arg:t arg:y arguments arg arg Assign Call Call Assign Call Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "InstanceProperty",
    "source_code": "class InstanceProperty(Layer):\n\n    @trackable.no_automatic_dependency_tracking\n    def __init__(self, attr_name, **kwargs):\n        self.attr_name = attr_name\n        if 'name' not in kwargs:\n            kwargs['name'] = K.unique_object_name('input.' + self.attr_name, zero_based=True, avoid_observed_names=True)\n        kwargs['autocast'] = False\n        self._must_restore_from_config = True\n        super(InstanceProperty, self).__init__(**kwargs)\n        self._preserve_input_structure_in_config = True\n\n    def call(self, obj):\n        return getattr(obj, self.attr_name)\n\n    def get_config(self):\n        config = {'attr_name': self.attr_name}\n        base_config = super(InstanceProperty, self).get_config()\n        return dict(list(base_config.items()) + list(config.items()))\n\n    @classmethod\n    def from_config(cls, config, custom_objects=None):\n        return cls(**config)",
    "docstring": "Wraps an instance property access (e.g. ) in a Keras Layer. This layer takes an attribute name in the constructor and, when called on input tensor returns . KerasTensors specialized for specific extension types use it to represent instance property accesses on the represented object in the case where the property needs to be dynamically accessed as opposed to being statically computed from the typespec, e.g. x = keras.Input(..., ragged=True) out = x.flat_values",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\core.py",
    "ast_data": "ClassDef name:InstanceProperty FunctionDef name:__init__ arg:self arg:attr_name arguments arg arg arg Assign If Compare Assign Call Assign Assign Call Call Assign FunctionDef name:call arg:self arg:obj arguments arg arg Return return:yes Call FunctionDef name:get_config arg:self arguments arg Assign Assign Call Call Return return:yes Call Call Call Call Call FunctionDef name:from_config arg:cls arg:config arg:custom_objects arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_wrap_unconditional_loss",
    "source_code": "def _wrap_unconditional_loss(loss_fn, index):\n    fn = loss_fn.args[0] if isinstance(loss_fn, functools.partial) else loss_fn\n    if isinstance(fn, def_function.Function):\n        return fn\n    else:\n        return def_function.Function(fn, 'loss_fn_{}'.format(index), input_signature=[])",
    "docstring": "Wraps callable/unconditional loss, returning a serializable function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\save_impl.py",
    "ast_data": "FunctionDef name:_wrap_unconditional_loss arg:loss_fn arg:index arguments arg arg Assign Call If Call Return return:yes Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "raw_session",
    "source_code": "def raw_session(self):\n    return self._tf_sess()",
    "docstring": "Returns underlying object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\monitored_session.py",
    "ast_data": "FunctionDef name:raw_session arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "add_graph",
    "source_code": "def add_graph(self, graph_profile, walltime=None):\n    graph = graph_profile[0]\n    stepstats = graph_profile[1]\n    event = event_pb2.Event(graph_def=graph.SerializeToString())\n    self.add_event(event, None, walltime)\n    trm = event_pb2.TaggedRunMetadata(tag='step1', run_metadata=stepstats.SerializeToString())\n    event = event_pb2.Event(tagged_run_metadata=trm)\n    self.add_event(event, None, walltime)",
    "docstring": "Add a and step stats protocol buffer to the event file. Args: graph_profile: A and step stats protocol buffer. walltime: float. Optional walltime to override the default (current) walltime (from time.time()) seconds after epoch",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\tensorboard\\writer.py",
    "ast_data": "FunctionDef name:add_graph arg:self arg:graph_profile arg:walltime arguments arg arg arg Assign Assign Assign Call Call Call Assign Call Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "clear_signature_defs",
    "source_code": "def clear_signature_defs(tflite_model):\n    model = tflite_model\n    if not isinstance(tflite_model, bytearray):\n        model = bytearray(tflite_model)\n    return signature_def_util.ClearSignatureDefs(model)",
    "docstring": "Clears SignatureDefs from the Metadata of a TfLite flatbuffer buffer. Args: tflite_model: TFLite model buffer to remove signature_defs. Returns: buffer: A TFLite model binary identical to model buffer with no SignatureDef metadata. Raises: ValueError: tflite_model buffer does not contain a valid TFLite model.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\tools\\signature\\signature_def_utils.py",
    "ast_data": "FunctionDef name:clear_signature_defs arg:tflite_model arguments arg Assign If Call Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "copy",
    "source_code": "def copy(self, *args, **kwargs):\n    return self",
    "docstring": "Copy is a no-op on the maskedconstant, as it is a scalar",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:copy arg:self arguments arg arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "concat_map_codegen",
    "source_code": "def concat_map_codegen(func: Callable[[NativeFunction], Sequence[str]], xs: Iterable[NativeFunctionsGroup | NativeFunction], ops_list: list[OperatorName]=full_codegen) -> Iterator[str]:\n    for x in xs:\n        fs = list(x.functions()) if isinstance(x, NativeFunctionsGroup) else [x]\n        for f in fs:\n            if f.func.name in ops_list:\n                yield from func(f)",
    "docstring": "We code-gen for the functional variant, which is all we need for IR classes/lowerings/shape inferences, but we only code-gen additional entries for the inplace variant for the native functions.",
    "type": "function",
    "file_path": "pytorch\\torchgen\\gen_lazy_tensor.py",
    "ast_data": "FunctionDef name:concat_map_codegen arg:func arg:xs arg:ops_list arguments arg arg arg For Assign Call Call Call For If Compare Call"
  },
  {
    "library": "pytorch",
    "name": "KeywordArg",
    "source_code": "class KeywordArg(PatternExpr):\n\n    def __init__(self, name: str) -> None:\n        super().__init__()\n        self.name = name\n\n    def __repr__(self) -> str:\n        return f'KeywordArg({self.name!r})'\n\n    def _match(self, node: NodeOrConstant, ctx: MatchContext) -> MatchResult:\n        return Match(ctx, self, kwargs={self.name: node})\n\n    def pattern_eq(self, other: Any) -> bool:\n        other = typing.cast(Self, other)\n        return super().pattern_eq(other) and self.name == other.name",
    "docstring": "Capture a kwarg which will become an input to the handler.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\pattern_matcher.py",
    "ast_data": "ClassDef name:KeywordArg FunctionDef name:__init__ arg:self arg:name arguments arg arg Call Call Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:_match arg:self arg:node arg:ctx arguments arg arg arg Return return:yes Call FunctionDef name:pattern_eq arg:self arg:other arguments arg arg Assign Call Return return:yes BoolOp Call Call Compare"
  },
  {
    "library": "scipy",
    "name": "_get_mwu_z",
    "source_code": "def _get_mwu_z(U, n1, n2, t, axis=0, continuity=True):\n    mu = n1 * n2 / 2\n    n = n1 + n2\n    tie_term = (t ** 3 - t).sum(axis=-1)\n    s = np.sqrt(n1 * n2 / 12 * (n + 1 - tie_term / (n * (n - 1))))\n    numerator = U - mu\n    if continuity:\n        numerator -= 0.5\n    with np.errstate(divide='ignore', invalid='ignore'):\n        z = numerator / s\n    return z",
    "docstring": "Standardized MWU statistic",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mannwhitneyu.py",
    "ast_data": "FunctionDef name:_get_mwu_z arg:U arg:n1 arg:n2 arg:t arg:axis arg:continuity arguments arg arg arg arg arg arg Assign Assign Assign Call Assign Call Assign If With Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "pop",
    "source_code": "def pop(self, index=-1):\n    result = self[index]\n    del self[index]\n    return result",
    "docstring": "Standard list pop method",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\mutable_list.py",
    "ast_data": "FunctionDef name:pop arg:self arg:index arguments arg arg Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "key",
    "source_code": "@staticmethod\ndef key(kernel_src: str):\n    return code_hash(kernel_src, extra=torch_key())",
    "docstring": "Generates a cache key given a triton kernel's full source code. This source includes the inductor meta, compilation metadata, the kernel itself, etc. should be the exact string passed to async_compile.triton()'s first argument.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\async_compile.py",
    "ast_data": "FunctionDef name:key arg:kernel_src arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "initialize",
    "source_code": "@deprecated(None, \"Use the iterator's `initializer` property instead.\")\ndef initialize(self):\n    if eager_context.executing_eagerly():\n        self._iterator = self._dataset.make_one_shot_iterator()\n        return []\n    else:\n        return [self._iterator.initializer]",
    "docstring": "Initialize underlying iterators. Returns: A list of any initializer ops that should be run.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:initialize arg:self arguments arg If Call Assign Call Return return:no Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "_adapted_beta",
    "source_code": "def _adapted_beta(shape: Union[Tuple[int, ...], torch.Size], a: Union[float, Tensor], b: Union[float, Tensor], same_on_batch: bool=False) -> Tensor:\n    device, dtype = _extract_device_dtype([a if isinstance(a, Tensor) else None, b if isinstance(b, Tensor) else None])\n    a = as_tensor(a, device=device, dtype=dtype)\n    b = as_tensor(b, device=device, dtype=dtype)\n    dist = Beta(a, b, validate_args=False)\n    return _adapted_rsampling(shape, dist, same_on_batch)",
    "docstring": "Sample from a beta sampling function that accepts 'same_on_batch'. If same_on_batch is True, all values generated will be exactly same given a batch_size (shape[0]). By default, same_on_batch is set to False. By default, sampling happens on the default device and dtype. If a/b is a tensor, sampling will happen in the same device/dtype as a/b tensor.",
    "type": "function",
    "file_path": "kornia\\kornia\\augmentation\\utils\\helpers.py",
    "ast_data": "FunctionDef name:_adapted_beta arg:shape arg:a arg:b arg:same_on_batch arguments arg arg arg arg Assign Call Call Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "total_concentration",
    "source_code": "@property\ndef total_concentration(self):\n    return self._total_concentration",
    "docstring": "Sum of last dim of concentration parameter.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\dirichlet_multinomial.py",
    "ast_data": "FunctionDef name:total_concentration arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "tick",
    "source_code": "def tick(self):\n    raise NotImplementedError('subclasses must implement tick().')",
    "docstring": "This generator is called in a loop from run_loop. It's important that the method takes care of pausing or otherwise waiting for a period of time. This split between run_loop() and tick() is to improve the testability of the reloader implementations by decoupling the work they do from the loop.",
    "type": "method",
    "file_path": "django\\django\\utils\\autoreload.py",
    "ast_data": "FunctionDef name:tick arg:self arguments arg Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "set_widths",
    "source_code": "def set_widths(self, widths):\n    self._widths = 0.5 * np.asarray(widths).ravel()\n    self.stale = True",
    "docstring": "Set the lengths of the first axes (e.g., major axis).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:set_widths arg:self arg:widths arguments arg arg Assign Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_kl_categorical_categorical",
    "source_code": "@kullback_leibler.RegisterKL(Categorical, Categorical)\ndef _kl_categorical_categorical(a, b, name=None):\n    with ops.name_scope(name, 'kl_categorical_categorical', values=[a.logits, b.logits]):\n        delta_log_probs1 = nn_ops.log_softmax(a.logits) - nn_ops.log_softmax(b.logits)\n        return math_ops.reduce_sum(nn_ops.softmax(a.logits) * delta_log_probs1, axis=-1)",
    "docstring": "Calculate the batched KL divergence KL(a || b) with a and b Categorical. Args: a: instance of a Categorical distribution object. b: instance of a Categorical distribution object. name: (optional) Name to use for created operations. default is \"kl_categorical_categorical\". Returns: Batchwise KL(a || b)",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\categorical.py",
    "ast_data": "FunctionDef name:_kl_categorical_categorical arg:a arg:b arg:name arguments arg arg arg With Call Assign Call Call Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "add_prefix",
    "source_code": "def add_prefix(self, field_name):\n    return '%s-%s' % (self.prefix, field_name) if self.prefix else field_name",
    "docstring": "Return the field name with a prefix appended, if this Form has a prefix set. Subclasses may wish to override.",
    "type": "method",
    "file_path": "django\\django\\forms\\forms.py",
    "ast_data": "FunctionDef name:add_prefix arg:self arg:field_name arguments arg arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_safe_assign",
    "source_code": "def _safe_assign(X, values, *, row_indexer=None, column_indexer=None):\n    row_indexer = slice(None, None, None) if row_indexer is None else row_indexer\n    column_indexer = slice(None, None, None) if column_indexer is None else column_indexer\n    if hasattr(X, 'iloc'):\n        with warnings.catch_warnings():\n            warnings.simplefilter('ignore', FutureWarning)\n            X.iloc[row_indexer, column_indexer] = values\n    else:\n        X[row_indexer, column_indexer] = values",
    "docstring": "Safe assignment to a numpy array, sparse matrix, or pandas dataframe. Parameters ---------- X : {ndarray, sparse-matrix, dataframe} Array to be modified. It is expected to be 2-dimensional. values : ndarray The values to be assigned to . row_indexer : array-like, dtype={int, bool}, default=None A 1-dimensional array to select the rows of interest. If , all rows are selected. column_indexer : array-like, dtype={int, bool}, default=None A 1-dimensional array to select the columns of interest. If , all columns are selected.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_indexing.py",
    "ast_data": "FunctionDef name:_safe_assign arg:X arg:values arguments arg arg arg arg Assign Compare Call Assign Compare Call If Call With Call Call Assign Assign"
  },
  {
    "library": "kornia",
    "name": "Lambda",
    "source_code": "class Lambda(Module):\n\n    def __init__(self, func: Callable[..., Tensor]) -> None:\n        super().__init__()\n        if not callable(func):\n            raise TypeError(f'Argument lambd should be callable, got {type(func).__name__!r}')\n        self.func = func\n\n    def forward(self, img: Tensor, *args: Any, **kwargs: Any) -> Tensor:\n        return self.func(img, *args, **kwargs)",
    "docstring": "Applies user-defined lambda as a transform. Args: func: Callable function. Returns: The output of the user-defined lambda. Example: >>> import kornia >>> x = torch.rand(1, 3, 5, 5) >>> f = Lambda(lambda x: kornia.color.rgb_to_grayscale(x)) >>> f(x).shape torch.Size([1, 1, 5, 5])",
    "type": "class",
    "file_path": "kornia\\kornia\\contrib\\lambda_module.py",
    "ast_data": "ClassDef name:Lambda FunctionDef name:__init__ arg:self arg:func arguments arg arg Call Call If Call Raise Call Call Assign FunctionDef name:forward arg:self arg:img arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "captured_inputs",
    "source_code": "@property\ndef captured_inputs(self):\n    self._create_definition_if_needed()\n    return self._extra_inputs",
    "docstring": "Returns the list of implicitly captured inputs.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\function.py",
    "ast_data": "FunctionDef name:captured_inputs arg:self arguments arg Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "GRUCell",
    "source_code": "class GRUCell(RNNCellBase):\n\n    def __init__(self, input_size, hidden_size, bias=True, dtype=torch.qint8):\n        super().__init__(input_size, hidden_size, bias, num_chunks=3, dtype=dtype)\n\n    def _get_name(self):\n        return 'DynamicQuantizedGRUCell'\n\n    def forward(self, input: Tensor, hx: Optional[Tensor]=None) -> Tensor:\n        self.check_forward_input(input)\n        if hx is None:\n            hx = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device)\n        self.check_forward_hidden(input, hx, '')\n        return torch.ops.quantized.quantized_gru_cell_dynamic(input, hx, self._packed_weight_ih, self._packed_weight_hh, self.bias_ih, self.bias_hh)\n\n    @classmethod\n    def from_float(cls, mod, use_precomputed_fake_quant=False):\n        return super().from_float(mod, use_precomputed_fake_quant=use_precomputed_fake_quant)",
    "docstring": "A gated recurrent unit (GRU) cell A dynamic quantized GRUCell module with floating point tensor as inputs and outputs. Weights are quantized to 8 bits. We adopt the same interface as , please see for documentation. Examples:: >>> # xdoctest: +SKIP >>> rnn = nn.GRUCell(10, 20) >>> input = torch.randn(6, 3, 10) >>> hx = torch.randn(3, 20) >>> output = [] >>> for i in range(6): ... hx = rnn(input[i], hx) ... output.append(hx)",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\dynamic\\modules\\rnn.py",
    "ast_data": "ClassDef name:GRUCell FunctionDef name:__init__ arg:self arg:input_size arg:hidden_size arg:bias arg:dtype arguments arg arg arg arg arg Call Call FunctionDef name:_get_name arg:self arguments arg Return return:yes FunctionDef name:forward arg:self arg:input arg:hx arguments arg arg arg Call If Compare Assign Call Call Call Return return:yes Call FunctionDef name:from_float arg:cls arg:mod arg:use_precomputed_fake_quant arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "map_on_gpu",
    "source_code": "def map_on_gpu(map_func):\n\n    def _apply_fn(dataset):\n        return _MapOnGpuDataset(dataset, map_func)\n    return _apply_fn",
    "docstring": "Maps across the elements of this dataset. NOTE: This is a highly experimental version of that runs on GPU. It must be used after applying the transformation with a GPU device argument. Args: map_func: A function mapping a nested structure of tensors (having shapes and types defined by and ) to another nested structure of tensors. Returns: A transformation function, which can be passed to .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\prefetching_ops.py",
    "ast_data": "FunctionDef name:map_on_gpu arg:map_func arguments arg FunctionDef name:_apply_fn arg:dataset arguments arg Return return:yes Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "__contains__",
    "source_code": "def __contains__(self, key) -> bool:\n    if is_valid_na_for_dtype(key, self.categories.dtype):\n        return bool(self.isna().any())\n    return contains(self, key, container=self._codes)",
    "docstring": "Returns True if is in this Categorical.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\categorical.py",
    "ast_data": "FunctionDef name:__contains__ arg:self arg:key arguments arg arg If Call Return return:yes Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "entropy",
    "source_code": "def entropy(self) -> Tensor:\n    raise NotImplementedError",
    "docstring": "Returns entropy of distribution, batched over batch_shape. Returns: Tensor of shape batch_shape.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:entropy arg:self arguments arg Raise"
  },
  {
    "library": "pytorch",
    "name": "_get_root_modules",
    "source_code": "def _get_root_modules(modules: list[nn.Module]) -> list[nn.Module]:\n    root_modules: list[nn.Module] = []\n    module_to_modules: dict[nn.Module, set[nn.Module]] = {module: set(module.modules()) for module in modules}\n    for candidate_module in modules:\n        is_root_module = True\n        for module, _modules in module_to_modules.items():\n            is_child_module = candidate_module is not module and candidate_module in _modules\n            if is_child_module:\n                is_root_module = False\n                break\n        if is_root_module:\n            root_modules.append(candidate_module)\n    return root_modules",
    "docstring": "Returns the modules in ``.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\utils.py",
    "ast_data": "FunctionDef name:_get_root_modules arg:modules arguments arg Call Call For Assign For Call Assign BoolOp Compare Compare If Assign If Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "Sobel",
    "source_code": "class Sobel(Module):\n\n    def __init__(self, normalized: bool=True, eps: float=1e-06) -> None:\n        super().__init__()\n        self.normalized: bool = normalized\n        self.eps: float = eps\n\n    def __repr__(self) -> str:\n        return f'{self.__class__.__name__}(normalized={self.normalized})'\n\n    def forward(self, input: Tensor) -> Tensor:\n        return sobel(input, self.normalized, self.eps)",
    "docstring": "Compute the Sobel operator and returns the magnitude per channel. Args: normalized: if True, L1 norm of the kernel is set to 1. eps: regularization number to avoid NaN during backprop. Return: the sobel edge gradient magnitudes map. Shape: - Input: :math: - Output: :math: Examples: >>> input = torch.rand(1, 3, 4, 4) >>> output = Sobel()(input) # 1x3x4x4",
    "type": "class",
    "file_path": "kornia\\kornia\\filters\\sobel.py",
    "ast_data": "ClassDef name:Sobel FunctionDef name:__init__ arg:self arg:normalized arg:eps arguments arg arg arg Call Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "@deprecation.deprecated(None, 'Queue-based input pipelines have been replaced by `tf.data`. Use `tf.data.TFRecordDataset`.')\ndef __init__(self, name=None, options=None):\n    compression_type = python_io.TFRecordOptions.get_compression_type_string(options)\n    rr = gen_io_ops.tf_record_reader_v2(name=name, compression_type=compression_type)\n    super(TFRecordReader, self).__init__(rr)",
    "docstring": "Create a TFRecordReader. Args: name: A name for the operation (optional). options: A TFRecordOptions object (optional).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\io_ops.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:name arg:options arguments arg arg arg Assign Call Assign Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "sparse_read",
    "source_code": "def sparse_read(self, indices, name=None):\n    val = self._variable.sparse_read(indices, name=name)\n    return math_ops.cast(val, self._cast_dtype)",
    "docstring": "Reads the value of this variable sparsely, using .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\autocast_variable.py",
    "ast_data": "FunctionDef name:sparse_read arg:self arg:indices arg:name arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_SparseDenseCwiseMulGrad",
    "source_code": "@ops.RegisterGradient('SparseDenseCwiseMul')\ndef _SparseDenseCwiseMulGrad(op: ops.Operation, grad):\n    return _SparseDenseCwiseMulOrDivGrad(op, grad, True)",
    "docstring": "Gradients for SparseDenseCwiseMul.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_grad.py",
    "ast_data": "FunctionDef name:_SparseDenseCwiseMulGrad arg:op arg:grad arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_keystr",
    "source_code": "def get_keystr(key_path: KeyPath) -> str:\n    args_kwargs_key_path = key_path[0]\n    assert isinstance(args_kwargs_key_path, SequenceKey)\n    if args_kwargs_key_path.idx == 0:\n        return f'*args{keystr(key_path[1:])}'\n    else:\n        kwarg_key = key_path[1]\n        assert isinstance(kwarg_key, MappingKey)\n        name = str(kwarg_key)[1:-1]\n        return f'{name}{keystr(key_path[2:])}'",
    "docstring": "For a given index into the flat_args, return a human readable string describing how to access it, e.g. \"*args[\"foo\"][0].bar\"",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\utils.py",
    "ast_data": "FunctionDef name:get_keystr arg:key_path arguments arg Assign Call If Compare Return return:yes Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "fillna",
    "source_code": "def fillna(self, value):\n    raise NotImplementedError('fillna is not defined for MultiIndex')",
    "docstring": "fillna is not implemented for MultiIndex",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "FunctionDef name:fillna arg:self arg:value arguments arg arg Raise Call"
  },
  {
    "library": "pandas",
    "name": "set_closed",
    "source_code": "def set_closed(self, closed: IntervalClosedType) -> Self:\n    if closed not in VALID_CLOSED:\n        msg = f\"invalid option for 'closed': {closed}\"\n        raise ValueError(msg)\n    left, right = (self._left, self._right)\n    dtype = IntervalDtype(left.dtype, closed=closed)\n    return self._simple_new(left, right, dtype=dtype)",
    "docstring": "Return an identical IntervalArray closed on the specified side. Parameters ---------- closed : {'left', 'right', 'both', 'neither'} Whether the intervals are closed on the left-side, right-side, both or neither. Returns ------- IntervalArray A new IntervalArray with the specified side closures. See Also -------- IntervalArray.closed : Returns inclusive side of the Interval. arrays.IntervalArray.closed : Returns inclusive side of the IntervalArray. Examples -------- >>> index = pd.arrays.IntervalArray.from_breaks(range(4)) >>> index [(0, 1], (1, 2], (2, 3]] Length: 3, dtype: interval[int64, right] >>> index.set_closed(\"both\") [[0, 1], [1, 2], [2, 3]] Length: 3, dtype: interval[int64, both]",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\interval.py",
    "ast_data": "FunctionDef name:set_closed arg:self arg:closed arguments arg arg If Compare Assign Raise Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "highlight_null",
    "source_code": "@Substitution(subset=subset_args, props=properties_args, color=coloring_args.format(default='red'))\ndef highlight_null(self, color: str='red', subset: Subset | None=None, props: str | None=None) -> Styler:\n\n    def f(data: DataFrame, props: str) -> np.ndarray:\n        return np.where(pd.isna(data).to_numpy(), props, '')\n    if props is None:\n        props = f'background-color: {color};'\n    return self.apply(f, axis=None, subset=subset, props=props)",
    "docstring": "Highlight missing values with a style. Parameters ---------- %(color)s .. versionadded:: 1.5.0 %(subset)s %(props)s .. versionadded:: 1.3.0 Returns ------- Styler Instance of class where null values are highlighted with given style. See Also -------- Styler.highlight_max: Highlight the maximum with a style. Styler.highlight_min: Highlight the minimum with a style. Styler.highlight_between: Highlight a defined range with a style. Styler.highlight_quantile: Highlight values defined by a quantile with a style. Examples -------- >>> df = pd.DataFrame({\"A\": [1, 2], \"B\": [3, np.nan]}) >>> df.style.highlight_null(color=\"yellow\") # doctest: +SKIP Please see: _ for more examples.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\style.py",
    "ast_data": "FunctionDef name:highlight_null arg:self arg:color arg:subset arg:props arguments arg arg arg arg FunctionDef name:f arg:data arg:props arguments arg arg Return return:yes Call Call Call If Compare Assign Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_output_tensors",
    "source_code": "def _get_output_tensors(self, interpreter: _interpreter.Interpreter) -> List[np.ndarray]:\n    outputs = []\n    for output_detail in interpreter.get_output_details():\n        tensor = interpreter.get_tensor(output_detail['index'])\n        if output_detail['dtype'] == np.int8:\n            quant_params = _get_quant_params(output_detail)\n            if quant_params:\n                scale, zero_point = quant_params\n                tensor = ((tensor.astype(np.float32) - zero_point) * scale).astype(np.float32)\n        outputs.append(tensor)\n    return outputs",
    "docstring": "Returns output tensors of given TFLite model Interpreter. Args: interpreter: a tf.lite.Interpreter object with allocated tensors. Returns: a list of numpy arrays representing output tensor results.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\tools\\optimize\\debugging\\python\\debugger.py",
    "ast_data": "FunctionDef name:_get_output_tensors arg:self arg:interpreter arguments arg arg Assign For Call Assign Call If Compare Assign Call If Assign Assign Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "isspmatrix_coo",
    "source_code": "def isspmatrix_coo(x):\n    return isinstance(x, coo_matrix)",
    "docstring": "Is of coo_matrix type? Parameters ---------- x object to check for being a coo matrix Returns ------- bool True if is a coo matrix, False otherwise Examples -------- >>> from scipy.sparse import coo_array, coo_matrix, csr_matrix, isspmatrix_coo >>> isspmatrix_coo(coo_matrix([[5]])) True >>> isspmatrix_coo(coo_array([[5]])) False >>> isspmatrix_coo(csr_matrix([[5]])) False",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\_coo.py",
    "ast_data": "FunctionDef name:isspmatrix_coo arg:x arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "InverseDepthSmoothnessLoss",
    "source_code": "class InverseDepthSmoothnessLoss(nn.Module):\n\n    def forward(self, idepth: torch.Tensor, image: torch.Tensor) -> torch.Tensor:\n        return inverse_depth_smoothness_loss(idepth, image)",
    "docstring": "Criterion that computes image-aware inverse depth smoothness loss. .. math:: \\text{loss} = \\left | \\partial_x d_{ij} \\right | e^{-\\left \\| \\partial_x I_{ij} \\right \\|} + \\left | \\partial_y d_{ij} \\right | e^{-\\left \\| \\partial_y I_{ij} \\right \\|} Shape: - Inverse Depth: :math: - Image: :math: - Output: scalar Examples: >>> idepth = torch.rand(1, 1, 4, 5) >>> image = torch.rand(1, 3, 4, 5) >>> smooth = InverseDepthSmoothnessLoss() >>> loss = smooth(idepth, image)",
    "type": "class",
    "file_path": "kornia\\kornia\\losses\\depth_smooth.py",
    "ast_data": "ClassDef name:InverseDepthSmoothnessLoss FunctionDef name:forward arg:self arg:idepth arg:image arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "handle",
    "source_code": "@property\ndef handle(self):\n    return self._implementation.handle",
    "docstring": "The reference to the TensorArray.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:handle arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "__delattr__",
    "source_code": "def __delattr__(self, name):\n    super().__delattr__(name)\n    self.__dict__.pop(name, None)",
    "docstring": "Delete a setting and clear it from cache if needed.",
    "type": "method",
    "file_path": "django\\django\\conf\\__init__.py",
    "ast_data": "FunctionDef name:__delattr__ arg:self arg:name arguments arg arg Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "stride1_for_last_dim",
    "source_code": "def stride1_for_last_dim(self, result_for_complex_expression: bool=True) -> bool:\n    if len(self.var_names) == 0:\n        return True\n    terms = self.index.args if isinstance(self.index, sympy.Add) else [self.index]\n    last_sym = self.var_names[-1]\n    for term in terms:\n        if term == last_sym:\n            return True\n        if isinstance(term, sympy.Mul) and len(term.args) == 2 and (term.args[1] == last_sym) and isinstance(term.args[0], (int, sympy.Integer)) and (term.args[0] > 1):\n            return False\n    return result_for_complex_expression",
    "docstring": "Whether the stride for the last dimension is 1.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\dependencies.py",
    "ast_data": "FunctionDef name:stride1_for_last_dim arg:self arg:result_for_complex_expression arguments arg arg If Compare Call Return return:yes Assign Call Assign For If Compare Return return:yes If BoolOp Call Compare Call Compare Call Compare Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_fill_order",
    "source_code": "def get_fill_order(seq: Sequence[Union[int, torch.SymInt, Expr]], shape_env: Optional[ShapeEnv]=None) -> Sequence[int]:\n    if shape_env is None:\n        sorted_idx: Sequence[int] = argsort(seq)\n    else:\n        sorted_idx = argsort_sym(shape_env, seq)\n    return sorted_idx",
    "docstring": "Convert strides to fill order (argsort)",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\ir.py",
    "ast_data": "FunctionDef name:get_fill_order arg:seq arg:shape_env arguments arg arg If Compare Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_variable_call",
    "source_code": "@classmethod\ndef _variable_call(cls, *args, **kwargs):\n    return None",
    "docstring": "Override to be a no-op to avoid metaclass creating ResourceVariables.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\ps_values.py",
    "ast_data": "FunctionDef name:_variable_call arg:cls arguments arg arg arg Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "__call__",
    "source_code": "def __call__(self, shape, dtype=None, **kwargs):\n    _validate_kwargs(self.__class__.__name__, kwargs)\n    dtype = _get_dtype(dtype)\n    if not dtype.is_numpy_compatible or dtype == dtypes.string:\n        raise ValueError('Expected numeric or boolean dtype, got %s.' % dtype)\n    if _PARTITION_SHAPE in kwargs:\n        shape = kwargs[_PARTITION_SHAPE]\n    return array_ops.zeros(shape, dtype)",
    "docstring": "Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are supported. If not specified, is used, which default to unless you configured it otherwise (via ). **kwargs: Additional keyword arguments.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\initializers\\initializers_v2.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:shape arg:dtype arguments arg arg arg arg Call Assign Call If BoolOp Compare Raise Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "validate_shape",
    "source_code": "def validate_shape(shape: ShapeType):\n    assert isinstance(shape, Sequence), type(shape)\n    for l in shape:\n        validate_dim_length(l)",
    "docstring": "Validates that a sequence represents a valid shape.",
    "type": "function",
    "file_path": "pytorch\\torch\\_prims_common\\__init__.py",
    "ast_data": "FunctionDef name:validate_shape arg:shape arguments arg Call Call For Call"
  },
  {
    "library": "tensorflow",
    "name": "device_name",
    "source_code": "@tf_export('experimental.dtensor.device_name', v1=[])\ndef device_name() -> str:\n    return _dtensor_device().name",
    "docstring": "Returns the singleton DTensor device's name. This function can be used in the following way:",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\api.py",
    "ast_data": "FunctionDef name:device_name arguments Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "format_timedelta_ticks",
    "source_code": "@staticmethod\ndef format_timedelta_ticks(x, pos, n_decimals: int) -> str:\n    s, ns = divmod(x, 10 ** 9)\n    m, s = divmod(s, 60)\n    h, m = divmod(m, 60)\n    d, h = divmod(h, 24)\n    decimals = int(ns * 10 ** (n_decimals - 9))\n    s = f'{int(h):02d}:{int(m):02d}:{int(s):02d}'\n    if n_decimals > 0:\n        s += f'.{decimals:0{n_decimals}d}'\n    if d != 0:\n        s = f'{int(d):d} days {s}'\n    return s",
    "docstring": "Convert seconds to 'D days HH:MM:SS.F'",
    "type": "method",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\converter.py",
    "ast_data": "FunctionDef name:format_timedelta_ticks arg:x arg:pos arg:n_decimals arguments arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Call If Compare If Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "write_object_proto_for_resource_variable",
    "source_code": "def write_object_proto_for_resource_variable(resource_variable, proto, options, enforce_naming=True):\n    proto.variable.SetInParent()\n    if enforce_naming and (not resource_variable.name.endswith(':0')):\n        raise ValueError(f\"Cowardly refusing to save variable {resource_variable.name} because of unexpected suffix in the name (expected ':0')which won't be restored.\")\n    proto.variable.name = tensor_module.get_op_name(resource_variable.name)\n    proto.variable.trainable = resource_variable.trainable\n    proto.variable.dtype = resource_variable.dtype.as_datatype_enum\n    proto.variable.synchronization = resource_variable.synchronization.value\n    proto.variable.aggregation = resource_variable.aggregation.value\n    proto.variable.shape.CopyFrom(resource_variable.shape.as_proto())\n    if options.experimental_variable_policy._save_variable_devices():\n        if hasattr(resource_variable, 'device'):\n            proto.variable.device = resource_variable.device",
    "docstring": "Writes additional information of the variable into the SavedObject proto. This allows users to define a to provide extra information of the variable to the SavedObject. For example, DistributedVariable class would fill in components in the distributed context. Args: resource_variable: A or that has the information to be saved into the proto. proto: proto to update. options: A instance that configures save behavior. enforce_naming: A bool determining whether to check that names end in the expected string ':0'",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:write_object_proto_for_resource_variable arg:resource_variable arg:proto arg:options arg:enforce_naming arguments arg arg arg arg Call If BoolOp Call Raise Call Assign Call Assign Assign Assign Assign Call Call If Call If Call Assign"
  },
  {
    "library": "pandas",
    "name": "_apply",
    "source_code": "@no_type_check\ndef _apply(self, f, *args, **kwargs):\n\n    def func(x):\n        x = self._resampler_cls(x, timegrouper=self._timegrouper, gpr_index=self.ax)\n        if isinstance(f, str):\n            return getattr(x, f)(**kwargs)\n        return x.apply(f, *args, **kwargs)\n    result = self._groupby.apply(func)\n    if isinstance(result, ABCDataFrame) and len(result) == 0 and (not isinstance(result.index, PeriodIndex)):\n        result = result.set_index(_asfreq_compat(self.obj.index[:0], freq=self.freq), append=True)\n    return self._wrap_result(result)",
    "docstring": "Dispatch to _upsample; we are stripping all of the _upsample kwargs and performing the original function call on the grouped object.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\resample.py",
    "ast_data": "FunctionDef name:_apply arg:self arg:f arguments arg arg arg arg FunctionDef name:func arg:x arguments arg Assign Call If Call Return return:yes Call Call Return return:yes Call Assign Call If BoolOp Call Compare Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "random_uniform",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None):\n    if dtype is None:\n        dtype = floatx()\n    if seed is None:\n        seed = np.random.randint(10000000.0)\n    return random_ops.random_uniform(shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed)",
    "docstring": "Returns a tensor with uniform distribution of values. Args: shape: A tuple of integers, the shape of tensor to create. minval: A float, lower boundary of the uniform distribution to draw samples. maxval: A float, upper boundary of the uniform distribution to draw samples. dtype: String, dtype of returned tensor. seed: Integer, random seed. Returns: A tensor. Example: >>> random_uniform_tensor = tf.keras.backend.random_uniform(shape=(2,3), ... minval=0.0, maxval=1.0) >>> random_uniform_tensor",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:random_uniform arg:shape arg:minval arg:maxval arg:dtype arg:seed arguments arg arg arg arg arg If Compare Assign Call If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "std_call",
    "source_code": "def std_call(func):\n    if os.name == 'nt':\n        return lwingdal[func]\n    else:\n        return lgdal[func]",
    "docstring": "Return the correct STDCALL function for certain OSR routines on Win32 platforms.",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\gdal\\libgdal.py",
    "ast_data": "FunctionDef name:std_call arg:func arguments arg If Compare Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "UserDefinedTupleVariable",
    "source_code": "class UserDefinedTupleVariable(UserDefinedObjectVariable):\n    _nonvar_fields = UserDefinedObjectVariable._nonvar_fields\n\n    def __init__(self, value, **kwargs):\n        super().__init__(value, **kwargs)\n        self._tuple_vt = None\n\n    def set_underlying_tuple_vt(self, tuple_vt):\n        self._tuple_vt = tuple_vt\n\n    @staticmethod\n    def create(value, tuple_vt, **kwargs):\n        result = UserDefinedTupleVariable(value, **kwargs)\n        result.set_underlying_tuple_vt(tuple_vt)\n        return result\n\n    def call_method(self, tx, name, args: 'list[VariableTracker]', kwargs: 'dict[str, VariableTracker]') -> 'VariableTracker':\n        assert self._tuple_vt is not None\n        method = self._maybe_get_baseclass_method(name)\n        if method in tuple_methods:\n            return self._tuple_vt.call_method(tx, name, args, kwargs)\n        return super().call_method(tx, name, args, kwargs)\n\n    def unpack_var_sequence(self, tx):\n        assert self._tuple_vt is not None\n        if type(self.value).__iter__ is tuple.__iter__:\n            return self._tuple_vt.unpack_var_sequence(tx)\n        raise NotImplementedError",
    "docstring": "Represents user defined objects that are subclasses of tuple. Internally, it uses a TupleVariable to represent the tuple part of the variable tracker. For everything else, it falls back to UserDefinedObjectVariable.",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\user_defined.py",
    "ast_data": "ClassDef name:UserDefinedTupleVariable Assign FunctionDef name:__init__ arg:self arg:value arguments arg arg arg Call Call Assign FunctionDef name:set_underlying_tuple_vt arg:self arg:tuple_vt arguments arg arg Assign FunctionDef name:create arg:value arg:tuple_vt arguments arg arg arg Assign Call Call Return return:yes FunctionDef name:call_method arg:self arg:tx arg:name arg:args arg:kwargs arguments arg arg arg arg arg Compare Assign Call If Compare Return return:yes Call Return return:yes Call Call FunctionDef name:unpack_var_sequence arg:self arg:tx arguments arg arg Compare If Compare Call Return return:yes Call Raise"
  },
  {
    "library": "pytorch",
    "name": "_validate_fwd_outputs",
    "source_code": "def _validate_fwd_outputs(self, outputs: tuple[torch.Tensor, ...]):\n    expected_tensors_meta = self.get_outputs_meta()\n    validate_tensors_metadata(f'Stage {self.stage_index} forward outputs', expected_tensors_meta, outputs)",
    "docstring": "Raises a RuntimeError if this stage produces an output of unexpected shape/dtype. Most likely, this could be cause either by incorrect user specification of output shapes, or becuase shape inference was done on the original model but then at runtime the model is wrapped with something like mixed precision which changes output dtype.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py",
    "ast_data": "FunctionDef name:_validate_fwd_outputs arg:self arg:outputs arguments arg arg Assign Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "def fit(self, X, y=None):\n    self.fit_transform(X)\n    return self",
    "docstring": "Fit the model from data in X. Parameters ---------- X : array-like of shape (n_samples, n_features) Training vector, where is the number of samples and is the number of features. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns the instance itself.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_dict_learning.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "split",
    "source_code": "def split(self, X, y=None, groups=None):\n    return super().split(X, y, groups)",
    "docstring": "Generate indices to split data into training and test set. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. y : array-like of shape (n_samples,), default=None The target variable for supervised learning problems. groups : array-like of shape (n_samples,) Group labels for the samples used while splitting the dataset into train/test set. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py",
    "ast_data": "FunctionDef name:split arg:self arg:X arg:y arg:groups arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "weights",
    "source_code": "@property\ndef weights(self):\n    return self._dedup_weights(self._undeduplicated_weights)",
    "docstring": "Returns the list of all layer variables/weights. Note: This will not track the weights of nested that are not themselves Keras layers. Returns: A list of variables.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py",
    "ast_data": "FunctionDef name:weights arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "div",
    "source_code": "@register_decomposition(aten.div)\n@out_wrapper()\ndef div(a: Union[TensorLikeType, NumberType], b: Union[TensorLikeType, NumberType], *, rounding_mode: Optional[str]=None):\n    if rounding_mode is None:\n        return true_divide(a, b)\n    elif rounding_mode == 'trunc':\n        return trunc_divide(a, b)\n    elif rounding_mode == 'floor':\n        return floor_divide(a, b)\n    else:\n        msg = f\"div expected rounding_mode to be one of None, 'trunc', or 'floor' but found {rounding_mode}.\"\n        raise ValueError(msg)",
    "docstring": "Reference implementation of torch.div",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\__init__.py",
    "ast_data": "FunctionDef name:div arg:a arg:b arguments arg arg arg If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call Assign Raise Call Call Call"
  },
  {
    "library": "scipy",
    "name": "set_solout",
    "source_code": "def set_solout(self, solout):\n    if self._integrator.supports_solout:\n        self._integrator.set_solout(solout)\n        if self._y is not None:\n            self._integrator.reset(len(self._y), self.jac is not None)\n    else:\n        raise ValueError('selected integrator does not support solout, choose another one')",
    "docstring": "Set callable to be called at every successful integration step. Parameters ---------- solout : callable `` solout should return -1 to stop integration otherwise it should return None or 0",
    "type": "method",
    "file_path": "scipy\\scipy\\integrate\\_ode.py",
    "ast_data": "FunctionDef name:set_solout arg:self arg:solout arguments arg arg If Call If Compare Call Call Compare Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "unpack",
    "source_code": "def unpack(tensor):\n    parallel_device = _all_parallel_devices.get(tensor.device, None)\n    if parallel_device is None:\n        raise ValueError('{} is not a parallel device'.format(tensor.device))\n    return parallel_device.unpack(tensor)",
    "docstring": "Finds 's parallel device and unpacks its components.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\parallel_device\\parallel_device.py",
    "ast_data": "FunctionDef name:unpack arg:tensor arguments arg Assign Call If Compare Raise Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "format_value",
    "source_code": "def format_value(self, value):\n    if value is None and self.allow_multiple_selected:\n        return []\n    if not isinstance(value, (tuple, list)):\n        value = [value]\n    return [str(v) if v is not None else '' for v in value]",
    "docstring": "Return selected values as a list.",
    "type": "method",
    "file_path": "django\\django\\forms\\widgets.py",
    "ast_data": "FunctionDef name:format_value arg:self arg:value arguments arg arg If BoolOp Compare Return return:no If Call Assign Return return:yes Compare Call"
  },
  {
    "library": "django",
    "name": "TruncWeek",
    "source_code": "class TruncWeek(TruncBase):\n    kind = 'week'",
    "docstring": "Truncate to midnight on the Monday of the week.",
    "type": "class",
    "file_path": "django\\django\\db\\models\\functions\\datetime.py",
    "ast_data": "ClassDef name:TruncWeek Assign"
  },
  {
    "library": "tensorflow",
    "name": "create_edges",
    "source_code": "def create_edges(self):\n    super(_FunctionCaller, self).create_edges()\n    for attr_name in self._function_attributes:\n        attr = self._node.attr[attr_name]\n        if attr.HasField('func'):\n            function = self._enclosing_graph.functions[attr.func.name]\n            for index in range(len(self._node.input) - self._first_function_input):\n                self.add_outgoing_edge(_Edge(_EndPoint(self, index + self._first_function_input), _EndPoint(function, index)))\n        elif attr.HasField('list'):\n            for func in attr.list.func:\n                function = self._enclosing_graph.functions[func.name]\n                for index in range(len(self._node.input) - self._first_function_input):\n                    self.add_outgoing_edge(_Edge(_EndPoint(self, index + self._first_function_input), _EndPoint(function, index)))",
    "docstring": "Creates edges related to a function caller. Edges from a function caller to its called functions are always edges from _inputs_ to _inputs_: a FunctionDef input is given by the caller, based on its own inputs.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "FunctionDef name:create_edges arg:self arguments arg Call Call For Assign If Call Assign For Call Call Call Call Call Call If Call For Assign For Call Call Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "reposition_axes",
    "source_code": "def reposition_axes(layoutgrids, fig, renderer, *, w_pad=0, h_pad=0, hspace=0, wspace=0):\n    trans_fig_to_subfig = fig.transFigure - fig.transSubfigure\n    for sfig in fig.subfigs:\n        bbox = layoutgrids[sfig].get_outer_bbox()\n        sfig._redo_transform_rel_fig(bbox=bbox.transformed(trans_fig_to_subfig))\n        reposition_axes(layoutgrids, sfig, renderer, w_pad=w_pad, h_pad=h_pad, wspace=wspace, hspace=hspace)\n    for ax in fig._localaxes:\n        if ax.get_subplotspec() is None or not ax.get_in_layout():\n            continue\n        ss = ax.get_subplotspec()\n        gs = ss.get_gridspec()\n        if gs not in layoutgrids:\n            return\n        bbox = layoutgrids[gs].get_inner_bbox(rows=ss.rowspan, cols=ss.colspan)\n        newbbox = trans_fig_to_subfig.transform_bbox(bbox)\n        ax._set_position(newbbox)\n        offset = {'left': 0, 'right': 0, 'bottom': 0, 'top': 0}\n        for nn, cbax in enumerate(ax._colorbars[::-1]):\n            if ax == cbax._colorbar_info['parents'][0]:\n                reposition_colorbar(layoutgrids, cbax, renderer, offset=offset)",
    "docstring": "Reposition all the Axes based on the new inner bounding box.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\_constrained_layout.py",
    "ast_data": "FunctionDef name:reposition_axes arg:layoutgrids arg:fig arg:renderer arguments arg arg arg arg arg arg arg Assign For Assign Call Call Call Call For If BoolOp Compare Call Call Assign Call Assign Call If Compare Return return:no Assign Call Assign Call Call Assign For Call If Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "_is_shared_object",
    "source_code": "def _is_shared_object(filename):\n    if platform.system() == 'Linux':\n        if filename.endswith('.so'):\n            return True\n        else:\n            index = filename.rfind('.so.')\n            if index == -1:\n                return False\n            else:\n                return filename[index + 4].isdecimal()\n    elif platform.system() == 'Darwin':\n        return filename.endswith('.dylib')\n    elif platform.system() == 'Windows':\n        return filename.endswith('.dll')\n    else:\n        return False",
    "docstring": "Check the file to see if it is a shared object, only using extension.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\load_library.py",
    "ast_data": "FunctionDef name:_is_shared_object arg:filename arguments arg If Compare Call If Call Return return:yes Assign Call If Compare Return return:yes Return return:yes Call If Compare Call Return return:yes Call If Compare Call Return return:yes Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "Corana",
    "source_code": "class Corana(Benchmark):\n\n    def __init__(self, dimensions=4):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-5.0] * self.N, [5.0] * self.N))\n        self.global_optimum = [[0 for _ in range(self.N)]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        d = [1.0, 1000.0, 10.0, 100.0]\n        r = 0\n        for j in range(4):\n            zj = floor(abs(x[j] / 0.2) + 0.49999) * sign(x[j]) * 0.2\n            if abs(x[j] - zj) < 0.05:\n                r += 0.15 * (zj - 0.05 * sign(zj)) ** 2 * d[j]\n            else:\n                r += d[j] * x[j] * x[j]\n        return r",
    "docstring": "Corana objective function. This class defines the Corana [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Corana}}(x) = \\begin{cases} \\sum_{i=1}^n 0.15 d_i [z_i - 0.05\\textrm{sgn}(z_i)]^2 & \\textrm{if }|x_i-z_i| < 0.05 \\\\ d_ix_i^2 & \\textrm{otherwise}\\end{cases} Where, in this exercise: .. math:: z_i = 0.2 \\lfloor |x_i/s_i|+0.49999\\rfloor\\textrm{sgn}(x_i), d_i=(1,1000,10,100, ...) with :math: for :math:. *Global optimum*: :math: for :math: for :math: ..[1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_C.py",
    "ast_data": "ClassDef name:Corana FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Assign For Call Assign Call Call Call If Compare Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "pre_export_passes",
    "source_code": "@abc.abstractmethod\ndef pre_export_passes(self, options: ResolvedExportOptions, original_model: torch.nn.Module | Callable, fx_module: torch.fx.GraphModule, fx_module_args: Sequence[Any]):\n    ...",
    "docstring": "Applies pre-export passes to the FX graph. Pre-export passes are FX-to-FX graph transformations that make the graph more palatable for the FX-to-ONNX conversion. For example, it can be used to flatten model input/output, add explicit casts to the graph, replace/decompose operators, functionalize the graph, etc.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\_exporter_legacy.py",
    "ast_data": "FunctionDef name:pre_export_passes arg:self arg:options arg:original_model arg:fx_module arg:fx_module_args arguments arg arg arg arg arg"
  },
  {
    "library": "pytorch",
    "name": "save_storage",
    "source_code": "def save_storage(self, storage: Storage, offset: int=0) -> None:\n    assert self.handle is not None, 'Cannot save data to a file that is not registered.'\n    torch._C._gds_save_storage(self.handle, storage, offset)",
    "docstring": "Saves data from the storage into the file. This is a wrapper around ``. Args: storage (Storage): Storage to save data from. offset (int, optional): Offset into the file to start saving to. (Default: 0)",
    "type": "method",
    "file_path": "pytorch\\torch\\cuda\\gds.py",
    "ast_data": "FunctionDef name:save_storage arg:self arg:storage arg:offset arguments arg arg arg Compare Call"
  },
  {
    "library": "pytorch",
    "name": "benchmark_gpu",
    "source_code": "@time_and_count\ndef benchmark_gpu(self: Self, _callable: Callable[[], Any], **kwargs: Any) -> float:\n    do_bench_params = inspect.signature(self.triton_do_bench).parameters\n    for kwarg in list(kwargs.keys()):\n        if kwarg not in do_bench_params:\n            del kwargs[kwarg]\n    if 'quantiles' in kwargs:\n        return self.triton_do_bench(_callable, **kwargs)[0]\n    elif 'return_mode' in kwargs:\n        return self.triton_do_bench(_callable, **kwargs)\n    return self.triton_do_bench(_callable, **kwargs, return_mode='median')",
    "docstring": "Benchmark the GPU callable, , and return the runtime, in milliseconds. Arguments: - _callable: The GPU callable to benchmark. Keyword Arguments: - quantiles: Optionally, a tuple of floats denoting the requested quantiles. - return_mode: Optionally, the requested return mode. Currently, Triton's supports min, max, mean, and median return modes. - **kwargs: Additional kwargs passed to Triton's . Returns: - The runtime of , in milliseconds. If is specified, this is the first requested quantile. Else, if is specified, this is the requested return mode. Otherwise, this is the median.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\benchmarking.py",
    "ast_data": "FunctionDef name:benchmark_gpu arg:self arg:_callable arguments arg arg arg Assign Call For Call Call If Compare If Compare Return return:yes Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "PartialRender",
    "source_code": "class PartialRender:\n\n    def __init__(self, code, replacement_hooks) -> None:\n        super().__init__()\n        self.code = code\n        self.replacement_hooks = replacement_hooks\n\n    def finalize_hook(self, hook_key: str, strict=True) -> None:\n        if hook_key not in self.replacement_hooks:\n            if strict:\n                raise RuntimeError(f'{hook_key} not registered in self.replacement_hooks')\n            else:\n                return\n        assert self.replacement_hooks[hook_key] is not None, 'hook_key can only be called once'\n        self.code = self.code.replace(hook_key, self.replacement_hooks[hook_key]())\n        self.replacement_hooks[hook_key] = None\n\n    def finalize_all(self) -> str:\n        for key, fn in self.replacement_hooks.items():\n            self.code = self.code.replace(key, fn())\n        return self.code",
    "docstring": "Some parts of a template need to be generated at the end, but inserted into the template at the start. This allows doing a bunch of replacements after the initial render.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\select_algorithm.py",
    "ast_data": "ClassDef name:PartialRender FunctionDef name:__init__ arg:self arg:code arg:replacement_hooks arguments arg arg arg Call Call Assign Assign FunctionDef name:finalize_hook arg:self arg:hook_key arg:strict arguments arg arg arg If Compare If Raise Call Return return:no Compare Assign Call Call Assign FunctionDef name:finalize_all arg:self arguments arg For Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_StaticDim",
    "source_code": "class _StaticDim(Dim):\n\n    def __init__(self, value: int):\n        self.__name__ = str(value)\n        self.value = value\n\n    @property\n    def min(self):\n        return self.value\n\n    @property\n    def max(self):\n        return self.value",
    "docstring": "Class for static :func: types. This class is only for setting and checking static dim constraints, and the user should never interact with it.",
    "type": "class",
    "file_path": "pytorch\\torch\\export\\dynamic_shapes.py",
    "ast_data": "ClassDef name:_StaticDim FunctionDef name:__init__ arg:self arg:value arguments arg arg Assign Call Assign FunctionDef name:min arg:self arguments arg Return return:yes FunctionDef name:max arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_render",
    "source_code": "def _render(self, sparse_index: bool, sparse_columns: bool, max_rows: int | None=None, max_cols: int | None=None, blank: str=''):\n    self._compute()\n    dxs = []\n    ctx_len = len(self.index)\n    for i, concatenated in enumerate(self.concatenated):\n        concatenated.hide_index_ = self.hide_index_\n        concatenated.hidden_columns = self.hidden_columns\n        foot = f'{self.css['foot']}{i}'\n        concatenated.css = {**self.css, 'data': f'{foot}_data', 'row_heading': f'{foot}_row_heading', 'row': f'{foot}_row', 'foot': f'{foot}_foot'}\n        dx = concatenated._render(sparse_index, sparse_columns, max_rows, max_cols, blank)\n        dxs.append(dx)\n        for (r, c), v in concatenated.ctx.items():\n            self.ctx[r + ctx_len, c] = v\n        for (r, c), v in concatenated.ctx_index.items():\n            self.ctx_index[r + ctx_len, c] = v\n        ctx_len += len(concatenated.index)\n    d = self._translate(sparse_index, sparse_columns, max_rows, max_cols, blank, dxs)\n    return d",
    "docstring": "Computes and applies styles and then generates the general render dicts. Also extends the and attributes with those of concatenated stylers for use within",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\style_render.py",
    "ast_data": "FunctionDef name:_render arg:self arg:sparse_index arg:sparse_columns arg:max_rows arg:max_cols arg:blank arguments arg arg arg arg arg arg Call Assign Assign Call For Call Assign Assign Assign Assign Assign Call Call For Call Assign For Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "compile_shader",
    "source_code": "def compile_shader(source: str):\n    from pathlib import Path\n    from torch.utils._cpp_embed_headers import _embed_headers\n    if not hasattr(torch._C, '_mps_compileShader'):\n        raise RuntimeError('MPS is not available')\n    source = _embed_headers([l + '\\n' for l in source.split('\\n')], [Path(__file__).parent.parent / 'include'], set())\n    return torch._C._mps_compileShader(source)",
    "docstring": "Compiles compute shader from source and allows one to invoke kernels defined there from the comfort of Python runtime Example:: >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_MPS) >>> lib = torch.mps.compile_shader( ... \"kernel void full(device float* out, constant float& val, uint idx [[thread_position_in_grid]]) { out[idx] = val; }\" ... ) >>> x = torch.zeros(16, device=\"mps\") >>> lib.full(x, 3.14)",
    "type": "function",
    "file_path": "pytorch\\torch\\mps\\__init__.py",
    "ast_data": "FunctionDef name:compile_shader arg:source arguments arg If Call Raise Call Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "SharedQuantizationSpec",
    "source_code": "@dataclass(eq=True, frozen=True)\nclass SharedQuantizationSpec(QuantizationSpecBase):\n    edge_or_node: EdgeOrNode",
    "docstring": "Quantization spec for the Tensors whose quantization parameters are shared with other Tensors",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\quantizer.py",
    "ast_data": "ClassDef name:SharedQuantizationSpec Call"
  },
  {
    "library": "scipy",
    "name": "_penalized_nnlf",
    "source_code": "def _penalized_nnlf(self, theta, x):\n    loc, scale, args = self._unpack_loc_scale(theta)\n    if not self._argcheck(*args) or scale <= 0:\n        return inf\n    if isinstance(x, CensoredData):\n        x = (x - loc) / scale\n        n_log_scale = (len(x) - x.num_censored()) * log(scale)\n    else:\n        x = (x - loc) / scale\n        n_log_scale = len(x) * log(scale)\n    return self._nnlf_and_penalty(x, args) + n_log_scale",
    "docstring": "Penalized negative loglikelihood function. i.e., - sum (log pdf(x, theta), axis=0) + penalty where theta are the parameters (including loc and scale)",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:_penalized_nnlf arg:self arg:theta arg:x arguments arg arg arg Assign Call If BoolOp Call Compare Return return:yes If Call Assign Assign Call Call Call Assign Assign Call Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "JSConstructor",
    "source_code": "class JSConstructor(JSCallable):\n    allow_nesting = True\n\n    def get_display_prefix(self) -> list[Node]:\n        return [addnodes.desc_sig_keyword('class', 'class'), addnodes.desc_sig_space()]",
    "docstring": "Like a callable but with a different prefix.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\domains\\javascript.py",
    "ast_data": "ClassDef name:JSConstructor Assign FunctionDef name:get_display_prefix arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "derivatives",
    "source_code": "def derivatives(self, x):\n    with FITPACK_LOCK:\n        return _fitpack_impl.spalde(x, self._eval_args)",
    "docstring": "Return all derivatives of the spline at the point x. Parameters ---------- x : float The point to evaluate the derivatives at. Returns ------- der : ndarray, shape(k+1,) Derivatives of the orders 0 to k. Examples -------- >>> import numpy as np >>> from scipy.interpolate import UnivariateSpline >>> x = np.linspace(0, 3, 11) >>> y = x**2 >>> spl = UnivariateSpline(x, y) >>> spl.derivatives(1.5) array([2.25, 3.0, 2.0, 0])",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_fitpack2.py",
    "ast_data": "FunctionDef name:derivatives arg:self arg:x arguments arg arg With Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "empty_cache",
    "source_code": "def empty_cache() -> None:\n    if is_initialized():\n        torch._C._cuda_emptyCache()",
    "docstring": "Release all unoccupied cached memory currently held by the caching allocator so that those can be used in other GPU application and visible in . .. note:: :func: doesn't increase the amount of GPU memory available for PyTorch. However, it may help reduce fragmentation of GPU memory in certain cases. See :ref: for more details about GPU memory management.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\memory.py",
    "ast_data": "FunctionDef name:empty_cache arguments If Call Call"
  },
  {
    "library": "pytorch",
    "name": "slice_nd",
    "source_code": "def slice_nd(self, node, ranges: list[tuple[Any, Any]]) -> ir.ReinterpretView:\n    assert len(ranges) == len(node.get_size()), f'ranges={ranges!r}, node={node!r}'\n    sliced = wrap_with_tensorbox(node)\n    for dim, _range in enumerate(ranges):\n        if len(_range) == 0:\n            continue\n        assert len(_range) == 2\n        start, end = parse_expr_with_index_symbols(_range)\n        sliced = L.slice_(sliced, dim, start, end, clamp=False)\n    assert isinstance(sliced.data, ir.ReinterpretView), sliced.data\n    return sliced.data",
    "docstring": "Slice the given node with a list of ranges (start and end) corresponding to its dims. The dim is not sliced if the corresponding range is empty.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp_template_kernel.py",
    "ast_data": "FunctionDef name:slice_nd arg:self arg:node arg:ranges arguments arg arg arg Compare Call Call Call Assign Call For Call If Compare Call Compare Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "sparse_read",
    "source_code": "def sparse_read(self, indices, name=None):\n    per_var_indices, _ = self._decompose_indices(indices)\n    result = []\n    for i, v in enumerate(self._variables):\n        new_name = None\n        if name is not None:\n            new_name = '{}/part_{}'.format(name, i)\n        result.append(v.sparse_read(per_var_indices[i], name=new_name))\n    return array_ops.concat(result, axis=0)",
    "docstring": "Implements tf.Variable.sparse_read.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\sharded_variable.py",
    "ast_data": "FunctionDef name:sparse_read arg:self arg:indices arg:name arguments arg arg arg Assign Call Assign For Call Assign If Compare Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "inferred_type",
    "source_code": "@cache_readonly\ndef inferred_type(self) -> str_t:\n    return lib.infer_dtype(self._values, skipna=False)",
    "docstring": "Return a string of the type inferred from the values. See Also -------- Index.dtype : Return the dtype object of the underlying data. Examples -------- >>> idx = pd.Index([1, 2, 3]) >>> idx Index([1, 2, 3], dtype='int64') >>> idx.inferred_type 'integer'",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:inferred_type arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "EntityInfo",
    "source_code": "class EntityInfo(collections.namedtuple('EntityInfo', ('name', 'source_code', 'source_file', 'future_features', 'namespace'))):\n    pass",
    "docstring": "Contains information about a Python entity. Immutable. Examples of entities include functions and classes. Attributes: name: The name that identifies this entity. source_code: The entity's source code. source_file: The entity's source file. future_features: Tuple[Text], the future features that this entity was compiled with. See namespace: Dict[str, ], containing symbols visible to the entity (excluding parameters).",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\transformer.py",
    "ast_data": "ClassDef name:EntityInfo Call"
  },
  {
    "library": "pytorch",
    "name": "get_pr_info",
    "source_code": "@cache\ndef get_pr_info(github_repo: str, github_token: str, pr_number: int) -> dict[str, Any]:\n    github_api = f'https://api.github.com/repos/{github_repo}'\n    headers = {'Accept': 'application/vnd.github.v3+json', 'Authorization': f'token {github_token}'}\n    json_response: dict[str, Any] = download_json(url=f'{github_api}/issues/{pr_number}', headers=headers)\n    if not json_response:\n        log.warning(f'Failed to get the labels for #{pr_number}')\n        return {}\n    return json_response",
    "docstring": "Dynamically get PR information",
    "type": "function",
    "file_path": "pytorch\\.github\\scripts\\runner_determinator.py",
    "ast_data": "FunctionDef name:get_pr_info arg:github_repo arg:github_token arg:pr_number arguments arg arg arg Assign Assign Call If Call Return return:no Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_use_interchange_protocol",
    "source_code": "def _use_interchange_protocol(X):\n    return not _is_pandas_df(X) and hasattr(X, '__dataframe__')",
    "docstring": "Use interchange protocol for non-pandas dataframes that follow the protocol. Note: at this point we chose not to use the interchange API on pandas dataframe to ensure strict behavioral backward compatibility with older versions of scikit-learn.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\validation.py",
    "ast_data": "FunctionDef name:_use_interchange_protocol arg:X arguments arg Return return:yes BoolOp Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_is_tensor_shape_match",
    "source_code": "def _is_tensor_shape_match(self, shape_a: TensorShape, shape_b: TensorShape) -> bool:\n    for s_a, s_b in zip(shape_a.as_list(), shape_b.as_list()):\n        if s_a and s_b and (s_a != s_b):\n            return False\n    return True",
    "docstring": "Check if shape b matches with shape a.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2.py",
    "ast_data": "FunctionDef name:_is_tensor_shape_match arg:self arg:shape_a arg:shape_b arguments arg arg arg For Call Call Call If BoolOp Compare Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "update_list_args",
    "source_code": "def update_list_args(self, tx: 'InstructionTranslator', args, kwargs, py_args, py_kwargs):\n    for arg, py_arg in zip(args, py_args):\n        if isinstance(arg, ListVariable):\n            assert isinstance(py_arg, list), 'py_arg should be a list in optimizer variable'\n            for i, val in enumerate(py_arg):\n                tx.output.side_effects.mutation(arg)\n                if isinstance(val, torch.Tensor):\n                    arg.items.append(self.wrap_tensor(tx, val))\n                else:\n                    source = arg.source and GetItemSource(arg.source, i)\n                    arg.items.append(VariableTracker.build(tx, val, source))",
    "docstring": "Update the args and kwargs to the traced optimizer call",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\optimizer.py",
    "ast_data": "FunctionDef name:update_list_args arg:self arg:tx arg:args arg:kwargs arg:py_args arg:py_kwargs arguments arg arg arg arg arg arg For Call If Call Call For Call Call If Call Call Call Assign BoolOp Call Call Call"
  },
  {
    "library": "scipy",
    "name": "Sphere",
    "source_code": "class Sphere(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-5.12] * self.N, [5.12] * self.N))\n        self.global_optimum = [[0 for _ in range(self.N)]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return sum(x ** 2)",
    "docstring": "Sphere objective function. This class defines the Sphere [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Sphere}}(x) = \\sum_{i=1}^{n} x_i^2 Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO Jamil has stupid limits",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_S.py",
    "ast_data": "ClassDef name:Sphere Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "get_pascal_kernel_1d",
    "source_code": "def get_pascal_kernel_1d(kernel_size: int, norm: bool=False, *, device: Optional[Device]=None, dtype: Optional[Dtype]=None) -> Tensor:\n    pre: list[float] = []\n    cur: list[float] = []\n    for i in range(kernel_size):\n        cur = [1.0] * (i + 1)\n        for j in range(1, i // 2 + 1):\n            value = pre[j - 1] + pre[j]\n            cur[j] = value\n            if i != 2 * j:\n                cur[-j - 1] = value\n        pre = cur\n    out = tensor(cur, device=device, dtype=dtype)\n    if norm:\n        out = out / out.sum()\n    return out",
    "docstring": "Generate Yang Hui triangle (Pascal's triangle) by a given number. Args: kernel_size: height and width of the kernel. norm: if to normalize the kernel or not. Default: False. device: tensor device desired to create the kernel dtype: tensor dtype desired to create the kernel Returns: kernel shaped as :math: Examples: >>> get_pascal_kernel_1d(1) tensor([1.]) >>> get_pascal_kernel_1d(2) tensor([1., 1.]) >>> get_pascal_kernel_1d(3) tensor([1., 2., 1.]) >>> get_pascal_kernel_1d(4) tensor([1., 3., 3., 1.]) >>> get_pascal_kernel_1d(5) tensor([1., 4., 6., 4., 1.]) >>> get_pascal_kernel_1d(6) tensor([ 1., 5., 10., 10., 5., 1.])",
    "type": "function",
    "file_path": "kornia\\kornia\\filters\\kernels.py",
    "ast_data": "FunctionDef name:get_pascal_kernel_1d arg:kernel_size arg:norm arguments arg arg arg arg For Call Assign For Call Assign Assign If Compare Assign Assign Assign Call If Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "vars_and_sizes",
    "source_code": "def vars_and_sizes(self, index: sympy.Expr) -> tuple[list[sympy.Symbol], list[sympy.Expr]]:\n\n    def get_sort_key(x: IterationRangesEntry) -> tuple[int, bool]:\n        divisor_hint = V.graph.sizevars.size_hint(x.divisor, fallback=config.unbacked_symint_fallback)\n        length_is_one_hint = V.graph.sizevars.size_hint(x.length, fallback=config.unbacked_symint_fallback) == 1\n        return (divisor_hint, not length_is_one_hint)\n    nodes = [V.kernel.range_tree_nodes.get(s) for s in index.free_symbols]\n    nodes = [n for n in nodes if n and n.prefix == self.prefix]\n    nodes.sort(key=lambda x: get_sort_key(x))\n    divisor = sympy.S.One\n    index_vars = []\n    sizes = []\n\n    def add(node):\n        nonlocal divisor\n        index_vars.append(node.symbol())\n        sizes.append(node.length)\n        divisor = divisor * node.length\n    for node in nodes:\n        if not V.graph.sizevars.statically_known_equals(node.divisor, divisor):\n            add(self.lookup(divisor, FloorDiv(node.divisor, divisor)))\n            divisor = node.divisor\n        add(node)\n    if not V.graph.sizevars.statically_known_equals(self.numel, divisor):\n        add(self.lookup(divisor, FloorDiv(self.numel, divisor)))\n    return ([*reversed(index_vars)], [*reversed(sizes)])",
    "docstring": "Figure out vars from this tree used in index",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\simd.py",
    "ast_data": "FunctionDef name:vars_and_sizes arg:self arg:index arguments arg arg FunctionDef name:get_sort_key arg:x arguments arg Assign Call Assign Compare Call Return return:yes Assign Call Assign BoolOp Compare Call arguments arg Call Assign Assign Assign FunctionDef name:add arg:node arguments arg Call Call Call Assign For If Call Call Call Call Assign Call If Call Call Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__call__",
    "source_code": "def __call__(self, f: _T) -> _T:\n    _stats_registry.register(f, self._op_type + ',' + self._statistic_type)\n    return f",
    "docstring": "Registers \"f\" as the statistics function for \"op_type\".",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:f arguments arg arg Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_joint_log_likelihood",
    "source_code": "@abstractmethod\ndef _joint_log_likelihood(self, X):\n    pass",
    "docstring": "Compute the unnormalized posterior log probability of X I.e. `` for all rows x of X, as an array-like of shape (n_samples, n_classes). Public methods predict, predict_proba, predict_log_proba, and predict_joint_log_proba pass the input through _check_X before handing it over to _joint_log_likelihood. The term \"joint log likelihood\" is used interchangibly with \"joint log probability\".",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\naive_bayes.py",
    "ast_data": "FunctionDef name:_joint_log_likelihood arg:self arg:X arguments arg arg"
  },
  {
    "library": "scipy",
    "name": "upcast_scalar",
    "source_code": "def upcast_scalar(dtype, scalar):\n    return (np.array([0], dtype=dtype) * scalar).dtype",
    "docstring": "Determine data type for binary operation between an array of type and a scalar.",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\_sputils.py",
    "ast_data": "FunctionDef name:upcast_scalar arg:dtype arg:scalar arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_generate_kernel_call_helper",
    "source_code": "def _generate_kernel_call_helper(self, kernel_name: str, call_args, *, device=None, triton=True, arg_types=None, raw_keys=None, raw_args=None, triton_meta=None, graph_name='', original_fxnode_name=None):\n    assert arg_types is not None and len(call_args) == len(arg_types), f'Mismatch call_args and arg_types in generate_kernel_call:\\ncall_args: {call_args}\\narg_types: {arg_types}'\n    new_args = []\n    for idx, arg in enumerate(call_args):\n        if '*' in arg_types[idx]:\n            new_args.append(f'({arg_types[idx]})({arg}.data_ptr())')\n        else:\n            new_args.append(arg)\n    debug_printer_manager = V.graph.wrapper_code.debug_printer\n    debug_printer_manager.set_printer_args(call_args, kernel_name, None, None, 'cpp')\n    with debug_printer_manager:\n        self.writeline(self.wrap_kernel_call(kernel_name, new_args))",
    "docstring": "Generates kernel call code. triton: Defines whether the GPU backend uses Triton for codegen. Otherwise it uses the CUDA language for codegen. Only valid when cuda == True.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp_wrapper_cpu.py",
    "ast_data": "FunctionDef name:_generate_kernel_call_helper arg:self arg:kernel_name arg:call_args arguments arg arg arg arg arg arg arg arg arg arg arg BoolOp Compare Compare Call Call Assign For Call If Compare Call Call Assign Call With Call Call"
  },
  {
    "library": "django",
    "name": "execute",
    "source_code": "def execute(self, sql, params=()):\n    if not self.collect_sql and self.connection.in_atomic_block and (not self.connection.features.can_rollback_ddl):\n        raise TransactionManagementError(\"Executing DDL statements while in a transaction on databases that can't perform a rollback is prohibited.\")\n    sql = str(sql)\n    logger.debug('%s; (params %r)', sql, params, extra={'params': params, 'sql': sql})\n    if self.collect_sql:\n        ending = '' if sql.rstrip().endswith(';') else ';'\n        if params is not None:\n            self.collected_sql.append(sql % tuple(map(self.quote_value, params)) + ending)\n        else:\n            self.collected_sql.append(sql + ending)\n    else:\n        with self.connection.cursor() as cursor:\n            cursor.execute(sql, params)",
    "docstring": "Execute the given SQL statement, with optional parameters.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\schema.py",
    "ast_data": "FunctionDef name:execute arg:self arg:sql arg:params arguments arg arg arg If BoolOp Raise Call Assign Call Call If Assign Call Call If Compare Call Call Call Call With Call Call"
  },
  {
    "library": "tensorflow",
    "name": "size",
    "source_code": "def size(self, name=None):\n    if not self._dynamic_size and self._size is not None:\n        return ops.convert_to_tensor(self._size, dtype=dtypes.int32)\n    else:\n        return gen_data_flow_ops.tensor_array_size_v3(handle=self._handle, flow_in=self.flow, name=name)",
    "docstring": "See TensorArray.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:size arg:self arg:name arguments arg arg If BoolOp Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "output",
    "source_code": "def output(self, U, T, X0=None):\n    return lsim(self, U, T, X0=X0)",
    "docstring": "Return the response of a continuous-time system to input . See for details.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:output arg:self arg:U arg:T arg:X0 arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "add_root_elements",
    "source_code": "def add_root_elements(self, handler):\n    pass",
    "docstring": "Add elements in the root (i.e. feed/channel) element. Called from write().",
    "type": "method",
    "file_path": "django\\django\\utils\\feedgenerator.py",
    "ast_data": "FunctionDef name:add_root_elements arg:self arg:handler arguments arg arg"
  },
  {
    "library": "scrapy",
    "name": "stop",
    "source_code": "async def stop(self) -> None:\n    await deferred_to_future(self._stop())",
    "docstring": "Stops simultaneously all the crawling jobs taking place. Completes when they all have ended.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\crawler.py",
    "ast_data": "AsyncFunctionDef name:stop arg:self arguments arg Call Call"
  },
  {
    "library": "kornia",
    "name": "Transformer",
    "source_code": "class Transformer(Module):\n\n    def __init__(self, dim: int, depth: int, heads: int, dim_head: int, mlp_dim: int, dropout: float=0.0) -> None:\n        super().__init__()\n        self.layers = nn.ModuleList([])\n        for _ in range(depth):\n            self.layers.append(nn.ModuleList([PreNorm(dim, Attention(dim, heads, dim_head, dropout)), PreNorm(dim, FeedForward(dim, mlp_dim, dropout))]))\n\n    def forward(self, x: Tensor) -> Tensor:\n        for attn, ff in self.layers:\n            x = attn(x) + x\n            x = ff(x) + x\n        return x",
    "docstring": "Transformer block described in ViT. Paper: Based on: Args: dim: input dimension. depth: depth for transformer block. heads: number of heads in multi-head attention layer. dim_head: head size. mlp_dim: dimension of the FeedForward layer. dropout: dropout ratio, defaults to 0.",
    "type": "class",
    "file_path": "kornia\\kornia\\contrib\\vit_mobile.py",
    "ast_data": "ClassDef name:Transformer FunctionDef name:__init__ arg:self arg:dim arg:depth arg:heads arg:dim_head arg:mlp_dim arg:dropout arguments arg arg arg arg arg arg arg Call Call Assign Call For Call Call Call Call Call Call Call FunctionDef name:forward arg:self arg:x arguments arg arg For Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "option_context",
    "source_code": "@contextmanager\ndef option_context(*args) -> Generator[None]:\n    if len(args) % 2 != 0 or len(args) < 2:\n        raise ValueError('Provide an even amount of arguments as option_context(pat, val, pat, val...).')\n    ops = tuple(zip(args[::2], args[1::2]))\n    try:\n        undo = tuple(((pat, get_option(pat)) for pat, val in ops))\n        for pat, val in ops:\n            set_option(pat, val)\n        yield\n    finally:\n        for pat, val in undo:\n            set_option(pat, val)",
    "docstring": "Context manager to temporarily set options in a `User Guide `. Examples -------- >>> from pandas import option_context >>> with option_context(\"display.max_rows\", 10, \"display.max_columns\", 5): ... pass",
    "type": "function",
    "file_path": "pandas\\pandas\\_config\\config.py",
    "ast_data": "FunctionDef name:option_context arguments arg If BoolOp Compare Call Compare Call Raise Call Assign Call Call Try Assign Call Call For Call For Call"
  },
  {
    "library": "pytorch",
    "name": "all_reduce",
    "source_code": "def all_reduce(tensor, op=ReduceOp.SUM, group=group.WORLD):\n    return _AllReduce.apply(op, group, tensor)",
    "docstring": "Reduces the tensor data across all machines in such a way that all get the final result. After the call the returned tensor is going to be bitwise identical in all processes. Arguments: tensor (Tensor): Input of the collective. op (optional): One of the values from `` enum. Specifies an operation used for element-wise reductions. group (ProcessGroup, optional): The process group to work on. Returns: Tensor: Output of the collective",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\nn\\functional.py",
    "ast_data": "FunctionDef name:all_reduce arg:tensor arg:op arg:group arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "symiirorder1",
    "source_code": "def symiirorder1(signal, c0, z1, precision=-1.0):\n    xp = array_namespace(signal)\n    signal = xp_promote(signal, force_floating=True, xp=xp)\n    signal = np.asarray(signal)\n    if abs(z1) >= 1:\n        raise ValueError('|z1| must be less than 1.0')\n    if signal.ndim > 2:\n        raise ValueError('Input must be 1D or 2D')\n    squeeze_dim = False\n    if signal.ndim == 1:\n        signal = signal[None, :]\n        squeeze_dim = True\n    y0 = symiirorder1_ic(signal, z1, precision)\n    b = np.ones(1, dtype=signal.dtype)\n    a = np.r_[1, -z1]\n    a = a.astype(signal.dtype)\n    zii = y0 * z1\n    y1, _ = lfilter(b, a, axis_slice(signal, 1), zi=zii)\n    y1 = np.c_[y0, y1]\n    b = np.asarray([c0], dtype=signal.dtype)\n    out_last = -c0 / (z1 - 1.0) * axis_slice(y1, -1)\n    zii = out_last * z1\n    out, _ = lfilter(b, a, axis_slice(y1, -2, step=-1), zi=zii)\n    out = np.c_[axis_reverse(out), out_last]\n    if squeeze_dim:\n        out = out[0]\n    return xp.asarray(out)",
    "docstring": "Implement a smoothing IIR filter with mirror-symmetric boundary conditions using a cascade of first-order sections. The second section uses a reversed sequence. This implements a system with the following transfer function and mirror-symmetric boundary conditions:: c0 H(z) = --------------------- (1-z1/z) (1 - z1 z) The resulting signal will have mirror symmetric boundary conditions as well. Parameters ---------- signal : ndarray The input signal. If 2D, then the filter will be applied in a batched fashion across the last axis. c0, z1 : scalar Parameters in the transfer function. precision : Specifies the precision for calculating initial conditions of the recursive filter based on mirror-symmetric input. Returns ------- output : ndarray The filtered signal.",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_spline_filters.py",
    "ast_data": "FunctionDef name:symiirorder1 arg:signal arg:c0 arg:z1 arg:precision arguments arg arg arg arg Assign Call Assign Call Assign Call If Compare Call Raise Call If Compare Raise Call Assign If Compare Assign Assign Assign Call Assign Call Assign Assign Call Assign Assign Call Call Assign Assign Call Assign Call Assign Assign Call Call Assign Call If Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_thetalim",
    "source_code": "def set_thetalim(self, *args, **kwargs):\n    orig_lim = self.get_xlim()\n    if 'thetamin' in kwargs:\n        kwargs['xmin'] = np.deg2rad(kwargs.pop('thetamin'))\n    if 'thetamax' in kwargs:\n        kwargs['xmax'] = np.deg2rad(kwargs.pop('thetamax'))\n    new_min, new_max = self.set_xlim(*args, **kwargs)\n    if abs(new_max - new_min) > 2 * np.pi:\n        self.set_xlim(orig_lim)\n        raise ValueError('The angle range must be less than a full circle')\n    return tuple(np.rad2deg((new_min, new_max)))",
    "docstring": "Set the minimum and maximum theta values. Can take the following signatures: - `[0, 2\\pi]` to have an axis symmetric around 0. A ValueError is raised if the absolute angle difference is larger than a full circle.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\polar.py",
    "ast_data": "FunctionDef name:set_thetalim arg:self arguments arg arg arg Assign Call If Compare Assign Call Call If Compare Assign Call Call Assign Call If Compare Call Call Raise Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_step",
    "source_code": "def _step(time, output_ta_t, *states):\n    current_input = tuple((ta.read(time) for ta in input_ta))\n    current_input = nest.pack_sequence_as(inputs, current_input)\n    output, new_states = step_function(current_input, tuple(states) + tuple(constants))\n    flat_state = nest.flatten(states)\n    flat_new_state = nest.flatten(new_states)\n    for state, new_state in zip(flat_state, flat_new_state):\n        if isinstance(new_state, tensor_lib.Tensor):\n            new_state.set_shape(state.shape)\n    flat_output = nest.flatten(output)\n    output_ta_t = tuple((ta.write(time, out) for ta, out in zip(output_ta_t, flat_output)))\n    new_states = nest.pack_sequence_as(initial_states, flat_new_state)\n    return (time + 1, output_ta_t) + tuple(new_states)",
    "docstring": "RNN step function. Args: time: Current timestep value. output_ta_t: TensorArray. *states: List of states. Returns: Tuple:",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:_step arg:time arg:output_ta_t arguments arg arg arg Assign Call Call Assign Call Assign Call Call Call Assign Call Assign Call For Call If Call Call Assign Call Assign Call Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_input_shape_for_tensor",
    "source_code": "def _get_input_shape_for_tensor(self, tensor, feature, per_replica, path) -> TensorShape:\n    shape = tensor.shape.as_list()\n    if len(shape) < 1:\n        raise ValueError('Only rank 1 and above dense tensor is supported, find rank {} sparse tensor for input {}'.format(len(shape), path))\n    if len(shape) > 1 and shape[-1] != 1:\n        raise ValueError('Rank 2 or above dense tensor should have last dimension as 1 as the last dimension will always be reduced. Instead got dense tensor as shape {}'.format(shape))\n    if self._num_cores_per_replica and per_replica:\n        shape[0] = shape[0] // self._num_cores_per_replica\n    return TensorShape(shape)",
    "docstring": "Get the input shape for the dense tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2.py",
    "ast_data": "FunctionDef name:_get_input_shape_for_tensor arg:self arg:tensor arg:feature arg:per_replica arg:path arguments arg arg arg arg arg Assign Call If Compare Call Raise Call Call Call If BoolOp Compare Call Compare Raise Call Call If BoolOp Assign Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_getitem_iterable",
    "source_code": "def _getitem_iterable(self, key, axis: AxisInt):\n    self._validate_key(key, axis)\n    keyarr, indexer = self._get_listlike_indexer(key, axis)\n    return self.obj._reindex_with_indexers({axis: [keyarr, indexer]}, allow_dups=True)",
    "docstring": "Index current object with an iterable collection of keys. Parameters ---------- key : iterable Targeted labels. axis : int Dimension on which the indexing is being made. Raises ------ KeyError If no key was found. Will change in the future to raise if not all keys were found. Returns ------- scalar, DataFrame, or Series: indexed value(s).",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexing.py",
    "ast_data": "FunctionDef name:_getitem_iterable arg:self arg:key arg:axis arguments arg arg arg Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "total_concentration",
    "source_code": "@property\ndef total_concentration(self):\n    return self._total_concentration",
    "docstring": "Sum of last dim of concentration parameter.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\dirichlet.py",
    "ast_data": "FunctionDef name:total_concentration arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "get_diff_kernel2d_2nd_order",
    "source_code": "def get_diff_kernel2d_2nd_order(*, device: Optional[Device]=None, dtype: Optional[Dtype]=None) -> Tensor:\n    gxx = tensor([[0.0, 0.0, 0.0], [1.0, -2.0, 1.0], [0.0, 0.0, 0.0]], device=device, dtype=dtype)\n    gyy = gxx.transpose(0, 1)\n    gxy = tensor([[-1.0, 0.0, 1.0], [0.0, 0.0, 0.0], [1.0, 0.0, -1.0]], device=device, dtype=dtype)\n    return stack([gxx, gxy, gyy])",
    "docstring": "Return 2nd order gradient for diff operator.",
    "type": "function",
    "file_path": "kornia\\kornia\\filters\\kernels.py",
    "ast_data": "FunctionDef name:get_diff_kernel2d_2nd_order arguments arg arg Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "Problem18",
    "source_code": "class Problem18(Benchmark):\n\n    def __init__(self, dimensions=1):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = [(0.0, 6.0)]\n        self.global_optimum = 2\n        self.fglob = 0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        x = x[0]\n        if x <= 3:\n            return (x - 2.0) ** 2.0\n        return 2 * log(x - 2.0) + 1",
    "docstring": "Univariate Problem18 objective function. This class defines the Univariate Problem18 global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Problem18}}(x) = \\begin{cases}(x-2)^2 & \\textrm{if} \\hspace{5pt} x \\leq 3 \\\\ 2\\log(x-2)+1&\\textrm{otherwise}\\end{cases} Bound constraints: :math: .. figure:: figures/Problem18.png :alt: Univariate Problem18 function :align: center **Univariate Problem18 function** *Global optimum*: :math: for :math:",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_univariate.py",
    "ast_data": "ClassDef name:Problem18 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X):\n    X = check_array(X, dtype=[X_DTYPE], ensure_all_finite=False)\n    check_is_fitted(self)\n    if X.shape[1] != self.n_bins_non_missing_.shape[0]:\n        raise ValueError('This estimator was fitted with {} features but {} got passed to transform()'.format(self.n_bins_non_missing_.shape[0], X.shape[1]))\n    n_threads = _openmp_effective_n_threads(self.n_threads)\n    binned = np.zeros_like(X, dtype=X_BINNED_DTYPE, order='F')\n    _map_to_bins(X, self.bin_thresholds_, self.is_categorical_, self.missing_values_bin_idx_, n_threads, binned)\n    return binned",
    "docstring": "Bin data X. Missing values will be mapped to the last bin. For categorical features, the mapping will be incorrect for unknown categories. Since the BinMapper is given known_categories of the entire training data (i.e. before the call to train_test_split() in case of early-stopping), this never happens. Parameters ---------- X : array-like of shape (n_samples, n_features) The data to bin. Returns ------- X_binned : array-like of shape (n_samples, n_features) The binned data (fortran-aligned).",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\binning.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Assign Call Call If Compare Raise Call Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_replace_tensors_by_numpy_ndarrays",
    "source_code": "def _replace_tensors_by_numpy_ndarrays(repr_ds_map: rd.RepresentativeDatasetMapping) -> None:\n    with session.Session() as sess:\n        for signature_def_key in repr_ds_map:\n            ds = repr_ds_map[signature_def_key]\n            repr_ds_map[signature_def_key] = rd.replace_tensors_by_numpy_ndarrays(ds, sess)",
    "docstring": "Replaces tf.Tensors by their evaluated numpy arrays. This assumes that tf.Tensors in representative samples are created in the default Graph. It will raise an error if tensors are created in a different graph. Args: repr_ds_map: SignatureDef key -> RepresentativeDataset mapping.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\py_function_lib.py",
    "ast_data": "FunctionDef name:_replace_tensors_by_numpy_ndarrays arg:repr_ds_map arguments arg With Call For Assign Assign Call"
  },
  {
    "library": "sphinx",
    "name": "Index",
    "source_code": "class Index(ABC):\n    name: str\n    localname: str\n    shortname: str | None = None\n\n    def __init__(self, domain: Domain) -> None:\n        if not self.name or self.localname is None:\n            msg = f'Index subclass {self.__class__.__name__} has no valid name or localname'\n            raise SphinxError(msg)\n        self.domain = domain\n\n    @abstractmethod\n    def generate(self, docnames: Iterable[str] | None=None) -> tuple[list[tuple[str, list[IndexEntry]]], bool]:\n        raise NotImplementedError",
    "docstring": "An Index is the description for a domain-specific index. To add an index to a domain, subclass Index, overriding the three name attributes: * is an identifier used for generating file names. It is also used for a hyperlink target for the index. Therefore, users can refer the index page using `py-modindex``localnameshortnamegenerateindices~sphinx.application.Sphinx.add_index_to_domainref` role.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\domains\\_index.py",
    "ast_data": "ClassDef name:Index FunctionDef name:__init__ arg:self arg:domain arguments arg arg If BoolOp Compare Assign Raise Call Assign FunctionDef name:generate arg:self arg:docnames arguments arg arg Raise"
  },
  {
    "library": "pytorch",
    "name": "_check_graph_diff",
    "source_code": "def _check_graph_diff(model: torch.nn.Module | torch.jit.ScriptModule, test_input_groups: Sequence[tuple[tuple[Any, ...], Mapping[str, Any]]], export_options: _experimental.ExportOptions, model_to_graph_func: Callable[[torch.nn.Module, tuple[Any, ...], Mapping[str, Any], _experimental.ExportOptions], _C.Graph]) -> str:\n    if len(test_input_groups) < 2:\n        raise ValueError('Need at least two groups of test inputs to compare.')\n    ref_jit_graph = None\n    for args, kwargs in test_input_groups:\n        jit_graph = model_to_graph_func(model, args, kwargs, export_options)\n        if ref_jit_graph is None:\n            ref_jit_graph = jit_graph\n            continue\n        graph_diff_report = _GraphDiff(ref_jit_graph, jit_graph).diff_report()\n        if graph_diff_report:\n            return graph_diff_report\n    return ''",
    "docstring": "Check if graph produced by is the same across . Args: model: See :func:. test_input_groups: See :func:. export_options: See :func:. model_to_graph_func: A function to convert a PyTorch model to a JIT IR graph. Returns: graph_diff_report (str): A string representation of the graph difference.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\verification.py",
    "ast_data": "FunctionDef name:_check_graph_diff arg:model arg:test_input_groups arg:export_options arg:model_to_graph_func arguments arg arg arg arg If Compare Call Raise Call Assign For Assign Call If Compare Assign Assign Call Call If Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "report_download_progress",
    "source_code": "def report_download_progress(chunk_number: int, chunk_size: int, file_size: int) -> None:\n    if file_size != -1:\n        percent = min(1, chunk_number * chunk_size / file_size)\n        bar = '#' * int(64 * percent)\n        sys.stdout.write(f'\\r0% |{bar:<64}| {int(percent * 100)}%')",
    "docstring": "Pretty printer for file download progress.",
    "type": "function",
    "file_path": "pytorch\\tools\\linter\\adapters\\s3_init.py",
    "ast_data": "FunctionDef name:report_download_progress arg:chunk_number arg:chunk_size arg:file_size arguments arg arg arg If Compare Assign Call Assign Call Call Call"
  },
  {
    "library": "kornia",
    "name": "quaternion",
    "source_code": "@property\ndef quaternion(self) -> Quaternion:\n    return self._rotation.q",
    "docstring": "Return the underlying rotation(Quaternion).",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\se3.py",
    "ast_data": "FunctionDef name:quaternion arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "GetGradState",
    "source_code": "def GetGradState(self, op: ops.Operation, before):\n    if before and util.IsLoopExit(op):\n        forward_ctxt = op._get_control_flow_context()\n        forward_ctxt = forward_ctxt.outer_context\n        if forward_ctxt:\n            forward_ctxt = forward_ctxt.GetWhileContext()\n    else:\n        forward_ctxt = util.GetWhileContext(op)\n    if forward_ctxt:\n        return self._map.get(forward_ctxt)\n    return None",
    "docstring": "Return the grad state for this op if it's in a forward loop context.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_state.py",
    "ast_data": "FunctionDef name:GetGradState arg:self arg:op arg:before arguments arg arg arg If BoolOp Call Assign Call Assign If Assign Call Assign Call If Return return:yes Call Return return:no"
  },
  {
    "library": "pytorch",
    "name": "step",
    "source_code": "@_use_grad_for_differentiable\ndef step(self, closure=None):\n    self._cuda_graph_capture_health_check()\n    loss = None\n    if closure is not None:\n        with torch.enable_grad():\n            loss = closure()\n    for group in self.param_groups:\n        params_with_grad: list[Tensor] = []\n        grads: list[Tensor] = []\n        exp_avgs: list[Tensor] = []\n        exp_infs: list[Tensor] = []\n        state_steps: list[Tensor] = []\n        beta1, beta2 = group['betas']\n        eps = group['eps']\n        lr = group['lr']\n        weight_decay = group['weight_decay']\n        foreach = group['foreach']\n        maximize = group['maximize']\n        differentiable = group['differentiable']\n        capturable = group['capturable']\n        has_complex = self._init_group(group, params_with_grad, grads, exp_avgs, exp_infs, state_steps)\n        adamax(params_with_grad, grads, exp_avgs, exp_infs, state_steps, eps=eps, beta1=beta1, beta2=beta2, lr=lr, weight_decay=weight_decay, foreach=foreach, maximize=maximize, differentiable=differentiable, capturable=capturable, has_complex=has_complex)\n    return loss",
    "docstring": "Performs a single optimization step. Args: closure (Callable, optional): A closure that reevaluates the model and returns the loss.",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\adamax.py",
    "ast_data": "FunctionDef name:step arg:self arg:closure arguments arg arg Call Assign If Compare With Call Assign Call For Assign Assign Assign Assign Assign Assign Assign Assign Assign Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "MotionBlur",
    "source_code": "class MotionBlur(Module):\n\n    def __init__(self, kernel_size: int, angle: float, direction: float, border_type: str='constant', mode: str='nearest') -> None:\n        super().__init__()\n        self.kernel_size = kernel_size\n        self.angle = angle\n        self.direction = direction\n        self.border_type = border_type\n        self.mode = mode\n\n    def __repr__(self) -> str:\n        return f'{self.__class__.__name__} (kernel_size={self.kernel_size}, angle={self.angle}, direction={self.direction}, border_type={self.border_type})'\n\n    def forward(self, x: Tensor) -> Tensor:\n        return motion_blur(x, self.kernel_size, self.angle, self.direction, self.border_type)",
    "docstring": "Blur 2D images (4D tensor) using the motion filter. Args: kernel_size: motion kernel width and height. It should be odd and positive. angle: angle of the motion blur in degrees (anti-clockwise rotation). direction: forward/backward direction of the motion blur. Lower values towards -1.0 will point the motion blur towards the back (with angle provided via angle), while higher values towards 1.0 will point the motion blur forward. A value of 0.0 leads to a uniformly (but still angled) motion blur. border_type: the padding mode to be applied before convolving. The expected modes are: `(B, C, H, W)(B, C, H, W)` Examples: >>> input = torch.rand(2, 4, 5, 7) >>> motion_blur = MotionBlur(3, 35., 0.5) >>> output = motion_blur(input) # 2x4x5x7",
    "type": "class",
    "file_path": "kornia\\kornia\\filters\\motion.py",
    "ast_data": "ClassDef name:MotionBlur FunctionDef name:__init__ arg:self arg:kernel_size arg:angle arg:direction arg:border_type arg:mode arguments arg arg arg arg arg arg Call Call Assign Assign Assign Assign Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:forward arg:self arg:x arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "stride_ordered_for_memory_format",
    "source_code": "@staticmethod\ndef stride_ordered_for_memory_format(sizes, memory_format):\n    if memory_format == torch.channels_last:\n        return FlexibleLayout.stride_ordered(sizes, NHWC_STRIDE_ORDER)\n    elif memory_format == torch.channels_last_3d:\n        return FlexibleLayout.stride_ordered(sizes, NHWDC_STRIDE_ORDER)\n    elif memory_format == torch.contiguous_format:\n        return FlexibleLayout.contiguous_strides(sizes)\n    else:\n        log.debug('stride_ordered_for_memory_format, unsuppored memory_format: %s', memory_format)\n        raise NotImplementedError",
    "docstring": "Create a stride based on a memory format. Memory format is translasted into a stride order, so channels_last is the same as: FlexibleLayout.stride_ordered(sizes, [3, 0, 2, 1]) This interface does not support memory_format which should be used to deduce a format from another source",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ir.py",
    "ast_data": "FunctionDef name:stride_ordered_for_memory_format arg:sizes arg:memory_format arguments arg arg If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call Call Raise"
  },
  {
    "library": "tensorflow",
    "name": "_ragged_tensor_from_variant_grad",
    "source_code": "@ops.RegisterGradient('RaggedTensorFromVariant')\ndef _ragged_tensor_from_variant_grad(op, *grads):\n    variant_rank = op.inputs[0].shape.rank\n    if variant_rank == 0:\n        batched_input = False\n    elif variant_rank == 1:\n        batched_input = True\n    elif variant_rank is None:\n        batched_input = op.get_attr('output_ragged_rank') > 0\n    else:\n        raise ValueError('Unable to compute gradient: RaggedTensorToVariant can currently only generate 0D or 1D output.')\n    return [gen_ragged_conversion_ops.ragged_tensor_to_variant(rt_nested_splits=op.outputs[:-1], rt_dense_values=grads[-1], batched_input=batched_input)]",
    "docstring": "Gradient for RaggedTensorFromVariant op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_conversion_ops.py",
    "ast_data": "FunctionDef name:_ragged_tensor_from_variant_grad arg:op arguments arg arg Assign If Compare Assign If Compare Assign If Compare Assign Compare Call Raise Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "aggregate_and_return_name_for_input",
    "source_code": "def aggregate_and_return_name_for_input(self, out_graphdef):\n    flattened = self.flatten_nodes()\n    if self.aggregation == OpHint.AGGREGATE_FIRST or self.aggregation == OpHint.AGGREGATE_LAST:\n        assert len(flattened) == 1\n    if len(flattened) == 1 and self.aggregation != OpHint.AGGREGATE_STACK:\n        return _tensor_name_base(flattened[0].name)\n    else:\n        new_node = _node_def_pb2.NodeDef()\n        new_node.op = 'Pack'\n        new_node.name = 'OpHintStack-%s' % flattened[0].name\n        new_node.attr['N'].i = len(flattened)\n        new_node.attr['T'].type = flattened[0].attr['T'].type\n        for discrete in flattened:\n            new_node.input.append(_tensor_name_base(discrete.name))\n        out_graphdef.node.extend([new_node])\n        return new_node.name",
    "docstring": "This adds the nodes to out_graphdef and returns an aggregated output. In particular, if you have 4 inputs to a hint stub, this will be the node that you can use as an output. I.e. you have 4 timesteps from a static rnn, then a fused UnidirectionalLSTM will expect 1 input with all 4 time steps. So here we make a pack and return the output name of that pack. Args: out_graphdef: A graphdef that is ready to have this input added. Returns: The name of a pack that aggregates this node.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\op_hint.py",
    "ast_data": "FunctionDef name:aggregate_and_return_name_for_input arg:self arg:out_graphdef arguments arg arg Assign Call If BoolOp Compare Compare Compare Call If BoolOp Compare Call Compare Return return:yes Call Assign Call Assign Assign Assign Call Assign For Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "convert_n_shadows_model",
    "source_code": "def convert_n_shadows_model(model: GraphModule, custom_convert_fn: Optional[Callable]=None, custom_convert_kwargs: Optional[dict[str, Any]]=None) -> GraphModule:\n    for node in model.graph.nodes:\n        if node.name.startswith(SHADOW_WRAPPER_NODE_NAME_PREFIX):\n            orig_mod = getattr(model, node.name)\n            if custom_convert_fn is None:\n                converted_mod = torch.ao.quantization.quantize_fx.convert_fx(orig_mod)\n            else:\n                if custom_convert_kwargs is None:\n                    custom_convert_kwargs = {}\n                converted_mod = custom_convert_fn(orig_mod, **custom_convert_kwargs)\n            setattr(model, node.name, converted_mod)\n    return model",
    "docstring": "Given a model from , runs on each shadow submodule.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\ns\\_numeric_suite_fx.py",
    "ast_data": "FunctionDef name:convert_n_shadows_model arg:model arg:custom_convert_fn arg:custom_convert_kwargs arguments arg arg arg For If Call Assign Call If Compare Assign Call If Compare Assign Assign Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "legmulx",
    "source_code": "def legmulx(c):\n    [c] = pu.as_series([c])\n    if len(c) == 1 and c[0] == 0:\n        return c\n    prd = np.empty(len(c) + 1, dtype=c.dtype)\n    prd[0] = c[0] * 0\n    prd[1] = c[0]\n    for i in range(1, len(c)):\n        j = i + 1\n        k = i - 1\n        s = i + j\n        prd[j] = c[i] * j / s\n        prd[k] += c[i] * i / s\n    return prd",
    "docstring": "Multiply a Legendre series by x. Multiply the Legendre series by x, where x is the independent variable. Parameters ---------- c : array_like 1-D array of Legendre series coefficients ordered from low to high. Returns ------- out : ndarray Array representing the result of the multiplication. See Also -------- legadd, legsub, legmul, legdiv, legpow Notes ----- The multiplication uses the recursion relationship for Legendre polynomials in the form .. math:: xP_i(x) = ((i + 1)*P_{i + 1}(x) + i*P_{i - 1}(x))/(2i + 1) Examples -------- >>> from numpy.polynomial import legendre as L >>> L.legmulx([1,2,3]) array([ 0.66666667, 2.2, 1.33333333, 1.8]) # may vary",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\legendre.py",
    "ast_data": "FunctionDef name:legmulx arg:c arguments arg Assign Call If BoolOp Compare Call Compare Return return:yes Assign Call Call Assign Assign For Call Call Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "create_backend",
    "source_code": "def create_backend(params: RendezvousParameters) -> tuple[EtcdRendezvousBackend, Store]:\n    client = _create_etcd_client(params)\n    backend = EtcdRendezvousBackend(client, params.run_id, key_prefix='/torch/elastic/rendezvous')\n    store = EtcdStore(client, '/torch/elastic/store')\n    return (backend, store)",
    "docstring": "Create a new :py:class: from the specified parameters. +--------------+-----------------------------------------------------------+ | Parameter | Description | +==============+===========================================================+ | read_timeout | The read timeout, in seconds, for etcd operations. | | | Defaults to 60 seconds. | +--------------+-----------------------------------------------------------+ | protocol | The protocol to use to communicate with etcd. Valid | | | values are \"http\" and \"https\". Defaults to \"http\". | +--------------+-----------------------------------------------------------+ | ssl_cert | The path to the SSL client certificate to use along with | | | HTTPS. Defaults to ``. | +--------------+-----------------------------------------------------------+",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\etcd_rendezvous_backend.py",
    "ast_data": "FunctionDef name:create_backend arg:params arguments arg Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "ttest_1samp",
    "source_code": "def ttest_1samp(a, popmean, axis=0, alternative='two-sided'):\n    a, axis = _chk_asarray(a, axis)\n    if a.size == 0:\n        return (np.nan, np.nan)\n    x = a.mean(axis=axis)\n    v = a.var(axis=axis, ddof=1)\n    n = a.count(axis=axis)\n    df = ma.asanyarray(n - 1.0)\n    svar = (n - 1.0) * v / df\n    with np.errstate(divide='ignore', invalid='ignore'):\n        t = (x - popmean) / ma.sqrt(svar / n)\n    t, prob = _ttest_finish(df, t, alternative)\n    return Ttest_1sampResult(t, prob)",
    "docstring": "Calculates the T-test for the mean of ONE group of scores. Parameters ---------- a : array_like sample observation popmean : float or array_like expected value in null hypothesis, if array_like than it must have the same shape as excluding the axis dimension axis : int or None, optional Axis along which to compute test. If None, compute over the whole array . alternative : {'two-sided', 'less', 'greater'}, optional Defines the alternative hypothesis. The following options are available (default is 'two-sided'): * 'two-sided': the mean of the underlying distribution of the sample is different than the given population mean () * 'less': the mean of the underlying distribution of the sample is less than the given population mean () * 'greater': the mean of the underlying distribution of the sample is greater than the given population mean () .. versionadded:: 1.7.0 Returns ------- statistic : float or array t-statistic pvalue : float or array The p-value Notes ----- For more details on , see .",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mstats_basic.py",
    "ast_data": "FunctionDef name:ttest_1samp arg:a arg:popmean arg:axis arg:alternative arguments arg arg arg arg Assign Call If Compare Return return:yes Assign Call Assign Call Assign Call Assign Call Assign With Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "make_footnote_ref",
    "source_code": "def make_footnote_ref(doc: nodes.document, label: str) -> nodes.footnote_reference:\n    footnote_ref = nodes.footnote_reference('[#]_')\n    footnote_ref.append(nodes.Text(label))\n    doc.note_autofootnote_ref(footnote_ref)\n    return footnote_ref",
    "docstring": "Create a footnote_reference node with children",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\_epub_base.py",
    "ast_data": "FunctionDef name:make_footnote_ref arg:doc arg:label arguments arg arg Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_figlabels",
    "source_code": "def get_figlabels() -> list[Any]:\n    managers = _pylab_helpers.Gcf.get_all_fig_managers()\n    managers.sort(key=lambda m: m.num)\n    return [m.canvas.figure.get_label() for m in managers]",
    "docstring": "Return a list of existing figure labels.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:get_figlabels arguments Assign Call Call arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_sharded_pre_state_dict_hook",
    "source_code": "def _sharded_pre_state_dict_hook(fsdp_state: _FSDPState, module: nn.Module, *args, **kwargs) -> None:\n    if _has_fsdp_params(fsdp_state, module) and (not _module_handle(fsdp_state, module).uses_sharded_strategy):\n        raise RuntimeError('``sharded_state_dict`` can only be used when parameters are flatten and sharded.')\n    _common_pre_state_dict_hook(module, fsdp_state)\n    _common_unshard_pre_state_dict_hook(module, fsdp_state, offload_to_cpu=False, rank0_only=False)",
    "docstring": "Hook that runs before model.state_dict() is called. Check `` for the detail.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_state_dict_utils.py",
    "ast_data": "FunctionDef name:_sharded_pre_state_dict_hook arg:fsdp_state arg:module arguments arg arg arg arg If BoolOp Call Call Raise Call Call Call"
  },
  {
    "library": "numpy",
    "name": "compress_cols",
    "source_code": "def compress_cols(a):\n    a = asarray(a)\n    if a.ndim != 2:\n        raise NotImplementedError('compress_cols works for 2D arrays only.')\n    return compress_rowcols(a, 1)",
    "docstring": "Suppress whole columns of a 2-D array that contain masked values. This is equivalent to `compress_rowcolsxmasknomask`. Must be a 2D array. Returns ------- compressed_array : ndarray The compressed array. See Also -------- compress_rowcols Examples -------- >>> import numpy as np >>> a = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], ... [1, 0, 0], ... [0, 0, 0]]) >>> np.ma.compress_cols(a) array([[1, 2], [4, 5], [7, 8]])",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\extras.py",
    "ast_data": "FunctionDef name:compress_cols arg:a arguments arg Assign Call If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_callf",
    "source_code": "def _callf(self, x, error=True):\n    fx = self.f(x, *self.args)\n    self.function_calls += 1\n    if not np.isfinite(fx) and error:\n        raise ValueError(f'Invalid function value: f({x:f}) -> {fx} ')\n    return fx",
    "docstring": "Call the user-supplied function, update book-keeping",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_zeros_py.py",
    "ast_data": "FunctionDef name:_callf arg:self arg:x arg:error arguments arg arg arg Assign Call If BoolOp Call Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "as_standardized",
    "source_code": "def as_standardized(self) -> 'CallgrindStats':\n\n    def strip(stats: FunctionCounts) -> FunctionCounts:\n        transforms = (('^.+build/\\\\.\\\\./', 'build/../'), ('^.+/' + re.escape('build/aten/'), 'build/aten/'), ('^.+/' + re.escape('Python/'), 'Python/'), ('^.+/' + re.escape('Objects/'), 'Objects/'), ('\\\\s\\\\[.+\\\\]$', ''))\n        for before, after in transforms:\n            stats = stats.transform(lambda fn: re.sub(before, after, fn))\n        return stats\n    return CallgrindStats(task_spec=self.task_spec, number_per_run=self.number_per_run, built_with_debug_symbols=self.built_with_debug_symbols, baseline_inclusive_stats=strip(self.baseline_inclusive_stats), baseline_exclusive_stats=strip(self.baseline_exclusive_stats), stmt_inclusive_stats=strip(self.stmt_inclusive_stats), stmt_exclusive_stats=strip(self.stmt_exclusive_stats), stmt_callgrind_out=None)",
    "docstring": "Strip library names and some prefixes from function strings. When comparing two different sets of instruction counts, on stumbling block can be path prefixes. Callgrind includes the full filepath when reporting a function (as it should). However, this can cause issues when diffing profiles. If a key component such as Python or PyTorch was built in separate locations in the two profiles, which can result in something resembling:: 23234231 /tmp/first_build_dir/thing.c:foo(...) 9823794 /tmp/first_build_dir/thing.c:bar(...) ... 53453 .../aten/src/Aten/...:function_that_actually_changed(...) ... -9823794 /tmp/second_build_dir/thing.c:bar(...) -23234231 /tmp/second_build_dir/thing.c:foo(...) Stripping prefixes can ameliorate this issue by regularizing the strings and causing better cancellation of equivalent call sites when diffing.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\benchmark\\utils\\valgrind_wrapper\\timer_interface.py",
    "ast_data": "FunctionDef name:as_standardized arg:self arguments arg FunctionDef name:strip arg:stats arguments arg Assign Call Call Call For Assign Call arguments arg Call Return return:yes Return return:yes Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "statically_known_leq",
    "source_code": "def statically_known_leq(self, left: Expr, right: Union[Expr, int]) -> bool:\n    expr = left <= right\n    return self.is_expr_static_and_true(expr)",
    "docstring": "Returns a bool indicating if it is sound to optimize as if left is less than or equal to right.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\sizevars.py",
    "ast_data": "FunctionDef name:statically_known_leq arg:self arg:left arg:right arguments arg arg arg Assign Compare Return return:yes Call"
  },
  {
    "library": "django",
    "name": "suggest_name",
    "source_code": "def suggest_name(self):\n    if self.initial:\n        return 'initial'\n    raw_fragments = [op.migration_name_fragment for op in self.operations]\n    fragments = [re.sub('\\\\W+', '_', name) for name in raw_fragments if name]\n    if not fragments or len(fragments) != len(self.operations):\n        return 'auto_%s' % get_migration_name_timestamp()\n    name = fragments[0]\n    for fragment in fragments[1:]:\n        new_name = f'{name}_{fragment}'\n        if len(new_name) > 52:\n            name = f'{name}_and_more'\n            break\n        name = new_name\n    return name",
    "docstring": "Suggest a name for the operations this migration might represent. Names are not guaranteed to be unique, but put some effort into the fallback name to avoid VCS conflicts if possible.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\migration.py",
    "ast_data": "FunctionDef name:suggest_name arg:self arguments arg If Return return:yes Assign Assign Call If BoolOp Compare Call Call Return return:yes Call Assign For Assign If Compare Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "Reducer",
    "source_code": "@tf_export('data.experimental.Reducer')\nclass Reducer:\n\n    def __init__(self, init_func, reduce_func, finalize_func):\n        self._init_func = init_func\n        self._reduce_func = reduce_func\n        self._finalize_func = finalize_func\n\n    @property\n    def init_func(self):\n        return self._init_func\n\n    @property\n    def reduce_func(self):\n        return self._reduce_func\n\n    @property\n    def finalize_func(self):\n        return self._finalize_func",
    "docstring": "A reducer is used for reducing a set of elements. A reducer is represented as a tuple of the three functions: - init_func - to define initial value: key => initial state - reducer_func - operation to perform on values with same key: (old state, input) => new state - finalize_func - value to return in the end: state => result For example,",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\grouping.py",
    "ast_data": "ClassDef name:Reducer FunctionDef name:__init__ arg:self arg:init_func arg:reduce_func arg:finalize_func arguments arg arg arg arg Assign Assign Assign FunctionDef name:init_func arg:self arguments arg Return return:yes FunctionDef name:reduce_func arg:self arguments arg Return return:yes FunctionDef name:finalize_func arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "ExportOptions",
    "source_code": "@deprecated('torch.onnx.dynamo_export is deprecated since 2.7.0. Please use torch.onnx.export(..., dynamo=True) instead.')\nclass ExportOptions:\n\n    def __init__(self, *, dynamic_shapes: bool | None=None):\n        self.dynamic_shapes: bool | None = dynamic_shapes",
    "docstring": "Options for dynamo_export. .. deprecated:: 2.7 Please use ``, all input shapes are considered static.",
    "type": "class",
    "file_path": "pytorch\\torch\\onnx\\__init__.py",
    "ast_data": "ClassDef name:ExportOptions FunctionDef name:__init__ arg:self arguments arg arg Call"
  },
  {
    "library": "django",
    "name": "string_output",
    "source_code": "def string_output(func, argtypes, offset=-1, str_result=False, decoding=None):\n    func.argtypes = argtypes\n    if str_result:\n        func.restype = gdal_char_p\n    else:\n        func.restype = c_int\n\n    def _check_str(result, func, cargs):\n        res = check_string(result, func, cargs, offset=offset, str_result=str_result)\n        if res and decoding:\n            res = res.decode(decoding)\n        return res\n    func.errcheck = _check_str\n    return func",
    "docstring": "Generate a ctypes prototype for the given function with the given argument types that returns a string from a GDAL pointer. The flag indicates whether the allocated pointer should be freed via the GDAL library routine VSIFree -- but only applies only when is True.",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\gdal\\prototypes\\generation.py",
    "ast_data": "FunctionDef name:string_output arg:func arg:argtypes arg:offset arg:str_result arg:decoding arguments arg arg arg arg arg Assign If Assign Assign FunctionDef name:_check_str arg:result arg:func arg:cargs arguments arg arg arg Assign Call If BoolOp Assign Call Return return:yes Assign Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "write",
    "source_code": "@final\ndef write(self, build_docnames: Iterable[str] | None, updated_docnames: Iterable[str], method: Literal['all', 'specific', 'update']='update') -> None:\n    env = self.env\n    self.events.emit('write-started', self)\n    if build_docnames is None or build_docnames == ['__all__']:\n        build_docnames = env.found_docs\n    if method == 'update':\n        docnames = set(build_docnames) | set(updated_docnames)\n    else:\n        docnames = set(build_docnames)\n    if docnames:\n        logger.debug(__('docnames to write: %s'), ', '.join(sorted(docnames)))\n    else:\n        logger.debug(__('no docnames to write!'))\n    docnames |= {toc_docname for docname in docnames for toc_docname in env.files_to_rebuild.get(docname, ()) if toc_docname in env.found_docs}\n    env.toctree_includes = dict(sorted(env.toctree_includes.items()))\n    with progress_message(__('preparing documents')):\n        self.prepare_writing(docnames)\n    with progress_message(__('copying assets'), nonl=False):\n        self.copy_assets()\n    if docnames:\n        self.write_documents(docnames)",
    "docstring": "Write builder specific output files.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\__init__.py",
    "ast_data": "FunctionDef name:write arg:self arg:build_docnames arg:updated_docnames arg:method arguments arg arg arg arg Assign Call If BoolOp Compare Compare Assign If Compare Assign Call Call Assign Call If Call Call Call Call Call Call Call Compare Assign Call Call Call With Call Call Call With Call Call Call If Call"
  },
  {
    "library": "pytorch",
    "name": "_copy_state_dict",
    "source_code": "@torch.no_grad()\ndef _copy_state_dict(state_dict: dict[str, Any], copy_state_dict: dict[str, Any], non_blocking: bool=False, type_check: bool=True) -> dict[str, Any]:\n    return _iterate_state_dict(state_dict, _identity_func, _identity_func, _identity_func, pg=None, device=None, cpu_offload=False, ranks_only=(), companion_obj=copy_state_dict, type_check=type_check, non_blocking=non_blocking)",
    "docstring": "Copies all tensors in a given state dict into a different state_dict with the same structure. Additionally, a copied state dict with the same value references is returned. Editing the keys on this state dict will not affect the passed in copy_state_dict (but the value references are the same). .. warning:: It is expected by this function that state_dict and copy_state_dict share the same structure and data types. .. warning:: The current supported data types are torch.Tensor, DTensor, int, float, str, list, dict, None. Args: state_dict (Dict[str, Any]): the target state_dict. copy_state_dict (Dict[str, Any]): The state dict we are copying into. This state_dict must have exactly the same structure as the source . non_blocking: (bool): Whether copy ops should be performed asynchronously type_check (bool): check if the instance data type is a supported type that can be saved by DCP. The current supported data types are torch.Tensor, DTensor, int, float, str, list, dict, None. Returns: State Dict copy",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_state_dict_utils.py",
    "ast_data": "FunctionDef name:_copy_state_dict arg:state_dict arg:copy_state_dict arg:non_blocking arg:type_check arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_output_details",
    "source_code": "def get_output_details(self):\n    return [self._get_tensor_details(i, subgraph_index=0) for i in self._interpreter.OutputIndices()]",
    "docstring": "Gets model output tensor details. Returns: A list in which each item is a dictionary with details about an output tensor. The dictionary contains the same fields as described for .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\interpreter.py",
    "ast_data": "FunctionDef name:get_output_details arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, name: str, object_id: int, timestamp: int, pid: int, allocator: str, num_bytes: int) -> None:\n    self._name = name\n    self._pid = pid\n    self._object_id = object_id\n    self._create_time = timestamp\n    self._allocator = allocator\n    self._num_bytes = num_bytes\n    self._ref_times = []\n    self._unref_times = []",
    "docstring": "Creates an object to track tensor references. This class is not thread safe and is intended only for internal use by the 'Timeline' class in this file. Args: name: The name of the Tensor as a string. object_id: Chrome Trace object identifier assigned for this Tensor. timestamp: The creation timestamp of this event as a long integer. pid: Process identifier of the associated device, as an integer. allocator: Name of the allocator used to create the Tensor. num_bytes: Number of bytes allocated (long integer). Returns: A 'TensorTracker' object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:name arg:object_id arg:timestamp arg:pid arg:allocator arg:num_bytes arguments arg arg arg arg arg arg arg Assign Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "_record_allocation",
    "source_code": "def _record_allocation(self, buffer: CodegenBuffer, node: torch.fx.Node) -> None:\n    assert node not in self.buffer_to_node\n    self.buffer_to_node[buffer.get_name()] = node",
    "docstring": "Updates the symbol table to record that an Inductor buffer maps to the result of an FX node.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\wrapper_fxir.py",
    "ast_data": "FunctionDef name:_record_allocation arg:self arg:buffer arg:node arguments arg arg arg Compare Assign Call"
  },
  {
    "library": "kornia",
    "name": "__call__",
    "source_code": "def __call__(self, *inputs: np.ndarray) -> list[np.ndarray]:\n    ort_inputs = self._session.get_inputs()\n    ort_input_values = {ort_inputs[i].name: inputs[i] for i in range(len(ort_inputs))}\n    outputs = self._session.run(None, ort_input_values)\n    return outputs",
    "docstring": "Perform inference using the combined ONNX model. Args: *inputs: Inputs to the ONNX model. The number of inputs must match the expected inputs of the session. Returns: list: The outputs from the ONNX model inference.",
    "type": "method",
    "file_path": "kornia\\kornia\\core\\mixin\\onnx.py",
    "ast_data": "FunctionDef name:__call__ arg:self arguments arg arg Assign Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "write_header",
    "source_code": "def write_header(self, name, shape, P=miDOUBLE, T=mxFULL_CLASS, imagf=0):\n    header = np.empty((), mdtypes_template['header'])\n    M = not SYS_LITTLE_ENDIAN\n    O = 0\n    header['mopt'] = M * 1000 + O * 100 + P * 10 + T\n    header['mrows'] = shape[0]\n    header['ncols'] = shape[1]\n    header['imagf'] = imagf\n    header['namlen'] = len(name) + 1\n    self.write_bytes(header)\n    data = name + '\\x00'\n    self.write_string(data.encode('latin1'))",
    "docstring": "Write header for given data options Parameters ---------- name : str name of variable shape : sequence Shape of array as it will be read in matlab P : int, optional code for mat4 data type, one of `` imagf : int, optional flag indicating complex",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\matlab\\_mio4.py",
    "ast_data": "FunctionDef name:write_header arg:self arg:name arg:shape arg:P arg:T arg:imagf arguments arg arg arg arg arg arg Assign Call Assign Assign Assign Assign Assign Assign Assign Call Call Assign Call Call"
  },
  {
    "library": "seaborn",
    "name": "_define_bin_edges",
    "source_code": "def _define_bin_edges(self, x, weights, bins, binwidth, binrange, discrete):\n    if binrange is None:\n        start, stop = (x.min(), x.max())\n    else:\n        start, stop = binrange\n    if discrete:\n        bin_edges = np.arange(start - 0.5, stop + 1.5)\n    elif binwidth is not None:\n        step = binwidth\n        bin_edges = np.arange(start, stop + step, step)\n        if bin_edges.max() < stop or len(bin_edges) < 2:\n            bin_edges = np.append(bin_edges, bin_edges.max() + step)\n    else:\n        bin_edges = np.histogram_bin_edges(x, bins, binrange, weights)\n    return bin_edges",
    "docstring": "Inner function that takes bin parameters as arguments.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_statistics.py",
    "ast_data": "FunctionDef name:_define_bin_edges arg:self arg:x arg:weights arg:bins arg:binwidth arg:binrange arg:discrete arguments arg arg arg arg arg arg arg If Compare Assign Call Call Assign If Assign Call If Compare Assign Assign Call If BoolOp Compare Call Compare Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_from_mesh",
    "source_code": "@classmethod\ndef _from_mesh(cls, mesh: _pywrap_dtensor_device.Mesh):\n    return cls._new_object(mesh=mesh)",
    "docstring": "Creates a copy from an existing pywrap mesh object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\layout.py",
    "ast_data": "FunctionDef name:_from_mesh arg:cls arg:mesh arguments arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "subset",
    "source_code": "def subset(self, x):\n    if x not in self._indices:\n        raise KeyError(x)\n    result = [x]\n    nxt = self._nbrs[x]\n    while self._indices[nxt] != self._indices[x]:\n        result.append(nxt)\n        nxt = self._nbrs[nxt]\n    return set(result)",
    "docstring": "Get the subset containing . Parameters ---------- x : hashable object Input element. Returns ------- result : set Subset containing .",
    "type": "method",
    "file_path": "scipy\\scipy\\_lib\\_disjoint_set.py",
    "ast_data": "FunctionDef name:subset arg:self arg:x arguments arg arg If Compare Raise Call Assign Assign While Compare Call Assign Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_validate_n_bins",
    "source_code": "def _validate_n_bins(self, n_features):\n    orig_bins = self.n_bins\n    if isinstance(orig_bins, Integral):\n        return np.full(n_features, orig_bins, dtype=int)\n    n_bins = check_array(orig_bins, dtype=int, copy=True, ensure_2d=False)\n    if n_bins.ndim > 1 or n_bins.shape[0] != n_features:\n        raise ValueError('n_bins must be a scalar or array of shape (n_features,).')\n    bad_nbins_value = (n_bins < 2) | (n_bins != orig_bins)\n    violating_indices = np.where(bad_nbins_value)[0]\n    if violating_indices.shape[0] > 0:\n        indices = ', '.join((str(i) for i in violating_indices))\n        raise ValueError('{} received an invalid number of bins at indices {}. Number of bins must be at least 2, and must be an int.'.format(KBinsDiscretizer.__name__, indices))\n    return n_bins",
    "docstring": "Returns n_bins_, the number of bins per feature.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_discretization.py",
    "ast_data": "FunctionDef name:_validate_n_bins arg:self arg:n_features arguments arg arg Assign If Call Return return:yes Call Assign Call If BoolOp Compare Compare Raise Call Assign Compare Compare Assign Call If Compare Assign Call Call Raise Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "check_field_type",
    "source_code": "def check_field_type(self, field, field_type):\n    errors = []\n    if field_type.startswith('varchar') and field.unique and (field.max_length is None or int(field.max_length) > 255):\n        errors.append(checks.Warning('%s may not allow unique CharFields to have a max_length > 255.' % self.connection.display_name, obj=field, hint='See: https://docs.djangoproject.com/en/%s/ref/databases/#mysql-character-fields' % get_docs_version(), id='mysql.W003'))\n    if field.db_index and field_type.lower() in self.connection._limited_data_types:\n        errors.append(checks.Warning('%s does not support a database index on %s columns.' % (self.connection.display_name, field_type), hint=\"An index won't be created. Silence this warning if you don't care about it.\", obj=field, id='fields.W162'))\n    return errors",
    "docstring": "MySQL has the following field length restriction: No character (varchar) fields can have a length exceeding 255 characters if they have a unique index on them. MySQL doesn't support a database index on some data types.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\mysql\\validation.py",
    "ast_data": "FunctionDef name:check_field_type arg:self arg:field arg:field_type arguments arg arg arg Assign If BoolOp Call BoolOp Compare Compare Call Call Call Call If BoolOp Compare Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "scoreatpercentile",
    "source_code": "def scoreatpercentile(a, per, limit=(), interpolation_method='fraction', axis=None):\n    a = np.asarray(a)\n    if a.size == 0:\n        if np.isscalar(per):\n            return np.nan\n        else:\n            return np.full(np.asarray(per).shape, np.nan, dtype=np.float64)\n    if limit:\n        a = a[(limit[0] <= a) & (a <= limit[1])]\n    sorted_ = np.sort(a, axis=axis)\n    if axis is None:\n        axis = 0\n    return _compute_qth_percentile(sorted_, per, interpolation_method, axis)",
    "docstring": "Calculate the score at a given percentile of the input sequence. For example, the score at `interpolationlimitaijanumpy.percentilescoreatpercentilenumpy.percentile` for users that have numpy >= 1.9. Examples -------- >>> import numpy as np >>> from scipy import stats >>> a = np.arange(100) >>> stats.scoreatpercentile(a, 50) 49.5",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_stats_py.py",
    "ast_data": "FunctionDef name:scoreatpercentile arg:a arg:per arg:limit arg:interpolation_method arg:axis arguments arg arg arg arg arg Assign Call If Compare If Call Return return:yes Return return:yes Call Call If Assign Compare Compare Assign Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "seed",
    "source_code": "@property\ndef seed(self) -> Optional[int]:\n    return self._seed",
    "docstring": "The graph-level random seed of this graph.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:seed arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_compare_save_plans",
    "source_code": "def _compare_save_plans(plan: SavePlan, other_plan: SavePlan) -> bool:\n    if plan.usable != other_plan.usable:\n        return False\n    if len(plan.items) != len(other_plan.items):\n        return False\n    for plan_item, other_plan_item in zip(plan.items, other_plan.items):\n        if plan_item.type != other_plan_item.type:\n            return False\n        plan_metadata_index = plan_item.index\n        other_plan_metadata_index = other_plan_item.index\n        if plan_metadata_index.fqn != other_plan_metadata_index.fqn or plan_metadata_index.offset != other_plan_metadata_index.offset or plan_metadata_index.index != other_plan_metadata_index.index:\n            return False\n        tensor_data = plan_item.tensor_data\n        other_tensor_data = other_plan_item.tensor_data\n        if tensor_data and (not other_tensor_data) or (not tensor_data and other_tensor_data):\n            return False\n        if tensor_data and other_tensor_data:\n            if tensor_data.size != other_tensor_data.size:\n                return False\n            chunk = tensor_data.chunk\n            other_chunk = other_tensor_data.chunk\n            if chunk and (not other_chunk) or (not chunk and other_chunk):\n                return False\n            if chunk and other_chunk:\n                if chunk.offsets != other_chunk.offsets or chunk.sizes != other_chunk.sizes:\n                    return False\n    return True",
    "docstring": "Compare the two Save plans and return True if they are equal. Args: plan (SavePlan): First SavePlan to compare. other_plan (SavePlan): Second SavePlan to compare. Returns: True if the two plans are equal, False otherwise.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\planner_helpers.py",
    "ast_data": "FunctionDef name:_compare_save_plans arg:plan arg:other_plan arguments arg arg If Compare Return return:yes If Compare Call Call Return return:yes For Call If Compare Return return:yes Assign Assign If BoolOp Compare Compare Compare Return return:yes Assign Assign If BoolOp BoolOp BoolOp Return return:yes If BoolOp If Compare Return return:yes Assign Assign If BoolOp BoolOp BoolOp Return return:yes If BoolOp If BoolOp Compare Compare Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "eye",
    "source_code": "@register_decomposition(aten.eye)\n@out_wrapper()\ndef eye(n: int, m: Optional[int]=None, *, dtype: Optional[torch.dtype]=None, layout: torch.layout=torch.strided, device: Optional[DeviceLikeType]=None, pin_memory: bool=False, requires_grad: bool=False) -> TensorLikeType:\n    if m is None:\n        m = n\n    torch._check(n >= 0, lambda: f'n must be greater or equal to 0, got {n}')\n    torch._check(m >= 0, lambda: f'm must be greater or equal to 0, got {m}')\n    range_n = torch.arange(n, dtype=torch.int64, device=device, requires_grad=False)\n    range_m = torch.arange(m, dtype=torch.int64, device=device, requires_grad=False)\n    cond = range_n.unsqueeze(-1) == range_m\n    if dtype is torch.bool:\n        return cond\n    else:\n        one = torch.ones((1,), dtype=dtype, layout=layout, device=device, pin_memory=pin_memory, requires_grad=False)\n        return torch.where(cond, one, 0)",
    "docstring": "Reference implementation of torch.eye",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\__init__.py",
    "ast_data": "FunctionDef name:eye arg:n arg:m arguments arg arg arg arg arg arg arg If Compare Assign Call Compare arguments Call Compare arguments Assign Call Assign Call Assign Compare Call If Compare Return return:yes Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "desc_inline",
    "source_code": "class desc_inline(_desc_classes_injector, nodes.Inline, nodes.TextElement):\n    classes = ['sig', 'sig-inline']\n\n    def __init__(self, domain: str, *args: Any, **kwargs: Any) -> None:\n        super().__init__(*args, **kwargs, domain=domain)\n        self['classes'].append(domain)",
    "docstring": "Node for a signature fragment in inline text. This is for example used for roles like :rst:role:. This node always has the classes ``, and the name of the domain it belongs to.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\addnodes.py",
    "ast_data": "ClassDef name:desc_inline Assign FunctionDef name:__init__ arg:self arg:domain arguments arg arg arg arg Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_is_vnni_supported",
    "source_code": "def _is_vnni_supported() -> bool:\n    return torch._C._cpu._is_avx512_vnni_supported()",
    "docstring": "Returns a bool indicating if CPU supports VNNI.",
    "type": "function",
    "file_path": "pytorch\\torch\\cpu\\__init__.py",
    "ast_data": "FunctionDef name:_is_vnni_supported arguments Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_qat_conv_bn_pattern_no_conv_bias",
    "source_code": "def _qat_conv_bn_pattern_no_conv_bias(x: torch.Tensor, conv_weight: torch.Tensor, conv_bias: torch.Tensor, bn_weight: torch.Tensor, bn_bias: torch.Tensor, bn_running_mean: torch.Tensor, bn_running_var: torch.Tensor) -> torch.Tensor:\n    bn_eps = 1e-05\n    running_std = torch.sqrt(bn_running_var + bn_eps)\n    scale_factor = bn_weight / running_std\n    weight_shape = [1] * len(conv_weight.shape)\n    weight_in_channel_axis = 1 if _is_conv_transpose_fn(conv_fn) else 0\n    weight_shape[weight_in_channel_axis] = -1\n    bias_shape = [1] * len(conv_weight.shape)\n    bias_shape[1] = -1\n    scaled_weight = conv_weight * scale_factor.reshape(weight_shape)\n    x = conv_fn(x, scaled_weight, None)\n    x = x / scale_factor.reshape(bias_shape)\n    x = F.batch_norm(x, bn_running_mean, bn_running_var, bn_weight, bn_bias, training=True, eps=bn_eps)\n    return x",
    "docstring": "Same as , but handles the case with no conv bias.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\qat_utils.py",
    "ast_data": "FunctionDef name:_qat_conv_bn_pattern_no_conv_bias arg:x arg:conv_weight arg:conv_bias arg:bn_weight arg:bn_bias arg:bn_running_mean arg:bn_running_var arguments arg arg arg arg arg arg arg Assign Assign Call Assign Assign Call Assign Call Assign Assign Call Assign Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_per_process_memory_fraction",
    "source_code": "def get_per_process_memory_fraction(device: 'Device'=None) -> float:\n    _lazy_init()\n    if device is None:\n        device = torch.cuda.current_device()\n    device = _get_device_index(device)\n    return torch._C._cuda_getMemoryFraction(device)",
    "docstring": "Get memory fraction for a process. Args: device (torch.device or int, optional): selected device. If it is `` the default CUDA device is used. Returns: memory fraction, in range 0~1. Allowed memory equals total_memory * fraction.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\memory.py",
    "ast_data": "FunctionDef name:get_per_process_memory_fraction arg:device arguments arg Call If Compare Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "WayburnSeader01",
    "source_code": "class WayburnSeader01(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-5.0] * self.N, [5.0] * self.N))\n        self.custom_bounds = ([-2, 2], [-2, 2])\n        self.global_optimum = [[1.0, 2.0]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return (x[0] ** 6 + x[1] ** 4 - 17) ** 2 + (2 * x[0] + x[1] - 4) ** 2",
    "docstring": "Wayburn and Seader 1 objective function. This class defines the Wayburn and Seader 1 [1]_ global optimization problem. This is a unimodal minimization problem defined as follows: .. math:: f_{\\text{WayburnSeader01}}(x) = (x_1^6 + x_2^4 - 17)^2 + (2x_1 + x_2 - 4)^2 with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_W.py",
    "ast_data": "ClassDef name:WayburnSeader01 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "entropy",
    "source_code": "@validate_params({'labels': ['array-like']}, prefer_skip_nested_validation=True)\ndef entropy(labels):\n    xp, is_array_api_compliant, device_ = get_namespace_and_device(labels)\n    labels_len = labels.shape[0] if is_array_api_compliant else len(labels)\n    if labels_len == 0:\n        return 1.0\n    pi = xp.astype(xp.unique_counts(labels)[1], _max_precision_float_dtype(xp, device_))\n    if pi.size == 1:\n        return 0.0\n    pi_sum = xp.sum(pi)\n    return float(-xp.sum(pi / pi_sum * (xp.log(pi) - log(pi_sum))))",
    "docstring": "Calculate the entropy for a labeling. Parameters ---------- labels : array-like of shape (n_samples,), dtype=int The labels. Returns ------- entropy : float The entropy for a labeling. Notes ----- The logarithm used is the natural logarithm (base-e).",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\metrics\\cluster\\_supervised.py",
    "ast_data": "FunctionDef name:entropy arg:labels arguments arg Assign Call Assign Call If Compare Return return:yes Assign Call Call Call If Compare Return return:yes Assign Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_input_details",
    "source_code": "def get_input_details(self):\n    return [self._get_tensor_details(i, subgraph_index=0) for i in self._interpreter.InputIndices()]",
    "docstring": "Gets model input tensor details. Returns: A list in which each item is a dictionary with details about an input tensor. Each dictionary contains the following fields that describe the tensor: + : The tensor name. + : The tensor index in the interpreter. + : The shape of the tensor. + : Same as for models with known/fixed shapes. If any dimension sizes are unknown, they are indicated with . + : The numpy data type (such as or ). + : Deprecated, use . This field only works for per-tensor quantization, whereas works in all cases. + : A dictionary of parameters used to quantize the tensor: ~ : List of scales (one if per-tensor quantization). ~ : List of zero_points (one if per-tensor quantization). ~ : Specifies the dimension of per-axis quantization, in the case of multiple scales/zero_points. + : A dictionary of parameters used to encode a sparse tensor. This is empty if the tensor is dense.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\interpreter.py",
    "ast_data": "FunctionDef name:get_input_details arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "seed",
    "source_code": "def seed() -> None:\n\n    def cb():\n        idx = current_device()\n        default_generator = torch.xpu.default_generators[idx]\n        default_generator.seed()\n    _lazy_call(cb)",
    "docstring": "Set the seed for generating random numbers to a random number for the current GPU. It's safe to call this function if XPU is not available; in that case, it is silently ignored. .. warning:: If you are working with a multi-GPU model, this function will only initialize the seed on one GPU. To initialize all GPUs, use :func:.",
    "type": "function",
    "file_path": "pytorch\\torch\\xpu\\random.py",
    "ast_data": "FunctionDef name:seed arguments FunctionDef name:cb arguments Assign Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "yield_flat_up_to",
    "source_code": "def yield_flat_up_to(modality, shallow_tree, input_tree, is_nested_fn, path=()):\n    if modality == Modality.CORE:\n        yield from _tf_core_yield_flat_up_to(shallow_tree, input_tree, is_nested_fn, path)\n    elif modality == Modality.DATA:\n        yield from _tf_data_yield_flat_up_to(shallow_tree, input_tree)\n    else:\n        raise ValueError('Unknown modality used {} for nested structure'.format(modality))",
    "docstring": "Yields (path, value) pairs of input_tree flattened up to shallow_tree. - For Modality.CORE: See comments for _tf_core_yield_flat_up_to() below - For Modality.DATA: See comments for _tf_data_yield_flat_up_to() below Args: modality: enum value of supported modality [Modality.CORE or Modality.DATA] shallow_tree: Nested structure. Traverse no further than its leaf nodes. input_tree: Nested structure. Return the paths and values from this tree. Must have the same upper structure as shallow_tree. is_nested_fn: Arg valid for Modality.CORE only. Function used to test if a value should be treated as a nested structure. path: Arg valid for Modality.CORE only. Tuple. Optional argument, only used when recursing. The path from the root of the original shallow_tree, down to the root of the shallow_tree arg of this recursive call. Yields: Pairs of (path, value), where path the tuple path of a leaf node in shallow_tree, and value is the value of the corresponding node in input_tree.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\nest_util.py",
    "ast_data": "FunctionDef name:yield_flat_up_to arg:modality arg:shallow_tree arg:input_tree arg:is_nested_fn arg:path arguments arg arg arg arg arg If Compare Call If Compare Call Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_lr",
    "source_code": "@override\ndef get_lr(self) -> list[float]:\n    _warn_get_lr_called_within_step(self)\n    if not self._is_initial:\n        return [group['lr'] * lmbda(self.last_epoch) for lmbda, group in zip(self.lr_lambdas, self.optimizer.param_groups)]\n    else:\n        return [group['lr'] for group in self.optimizer.param_groups]",
    "docstring": "Compute the learning rate of each parameter group.",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\lr_scheduler.py",
    "ast_data": "FunctionDef name:get_lr arg:self arguments arg Call If Return return:yes Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit_intercept_only",
    "source_code": "def fit_intercept_only(self, y_true, sample_weight=None):\n    if sample_weight is None:\n        return np.percentile(y_true, 100 * self.closs.quantile, axis=0)\n    else:\n        return _weighted_percentile(y_true, sample_weight, 100 * self.closs.quantile)",
    "docstring": "Compute raw_prediction of an intercept-only model. This is the weighted median of the target, i.e. over the samples axis=0.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\_loss\\loss.py",
    "ast_data": "FunctionDef name:fit_intercept_only arg:self arg:y_true arg:sample_weight arguments arg arg arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "RotatedEllipse02",
    "source_code": "class RotatedEllipse02(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-500.0] * self.N, [500.0] * self.N))\n        self.custom_bounds = ([-2.0, 2.0], [-2.0, 2.0])\n        self.global_optimum = [[0.0, 0.0]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return x[0] ** 2.0 - x[0] * x[1] + x[1] ** 2.0",
    "docstring": "Rotated Ellipse 2 objective function. This class defines the Rotated Ellipse 2 [1]_ global optimization problem. This is a unimodal minimization problem defined as follows: .. math:: f_{\\text{RotatedEllipse02}}(x) = x_1^2 - x_1 x_2 + x_2^2 with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_R.py",
    "ast_data": "ClassDef name:RotatedEllipse02 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_validate_style_kwargs",
    "source_code": "def _validate_style_kwargs(default_style_kwargs, user_style_kwargs):\n    invalid_to_valid_kw = {'ls': 'linestyle', 'c': 'color', 'ec': 'edgecolor', 'fc': 'facecolor', 'lw': 'linewidth', 'mec': 'markeredgecolor', 'mfcalt': 'markerfacecoloralt', 'ms': 'markersize', 'mew': 'markeredgewidth', 'mfc': 'markerfacecolor', 'aa': 'antialiased', 'ds': 'drawstyle', 'font': 'fontproperties', 'family': 'fontfamily', 'name': 'fontname', 'size': 'fontsize', 'stretch': 'fontstretch', 'style': 'fontstyle', 'variant': 'fontvariant', 'weight': 'fontweight', 'ha': 'horizontalalignment', 'va': 'verticalalignment', 'ma': 'multialignment'}\n    for invalid_key, valid_key in invalid_to_valid_kw.items():\n        if invalid_key in user_style_kwargs and valid_key in user_style_kwargs:\n            raise TypeError(f'Got both {invalid_key} and {valid_key}, which are aliases of one another')\n    valid_style_kwargs = default_style_kwargs.copy()\n    for key in user_style_kwargs.keys():\n        if key in invalid_to_valid_kw:\n            valid_style_kwargs[invalid_to_valid_kw[key]] = user_style_kwargs[key]\n        else:\n            valid_style_kwargs[key] = user_style_kwargs[key]\n    return valid_style_kwargs",
    "docstring": "Create valid style kwargs by avoiding Matplotlib alias errors. Matplotlib raises an error when, for example, 'color' and 'c', or 'linestyle' and 'ls', are specified together. To avoid this, we automatically keep only the one specified by the user and raise an error if the user specifies both. Parameters ---------- default_style_kwargs : dict The Matplotlib style kwargs used by default in the scikit-learn display. user_style_kwargs : dict The user-defined Matplotlib style kwargs. Returns ------- valid_style_kwargs : dict The validated style kwargs taking into account both default and user-defined Matplotlib style kwargs.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_plotting.py",
    "ast_data": "FunctionDef name:_validate_style_kwargs arg:default_style_kwargs arg:user_style_kwargs arguments arg arg Assign For Call If BoolOp Compare Compare Raise Call Assign Call For Call If Compare Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "reduce_per_replica",
    "source_code": "def reduce_per_replica(values, strategy, reduction='first'):\n\n    def _reduce(v):\n        if reduction == 'concat' and _collective_all_reduce_multi_worker(strategy):\n            return _multi_worker_concat(v, strategy)\n        if not _is_per_replica_instance(v):\n            return v\n        elif reduction == 'first':\n            return strategy.unwrap(v)[0]\n        elif reduction == 'concat':\n            if _is_tpu_multi_host(strategy):\n                return _tpu_multi_host_concat(v, strategy)\n            else:\n                return concat(strategy.unwrap(v))\n        else:\n            raise ValueError('`reduction` must be \"first\" or \"concat\".')\n    return nest.map_structure(_reduce, values)",
    "docstring": "Reduce PerReplica objects. Args: values: Structure of objects or s. s are returned as-is. strategy: object. reduction: One of 'first', 'concat'. Returns: Structure of s.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py",
    "ast_data": "FunctionDef name:reduce_per_replica arg:values arg:strategy arg:reduction arguments arg arg arg FunctionDef name:_reduce arg:v arguments arg If BoolOp Compare Call Return return:yes Call If Call Return return:yes If Compare Return return:yes Call If Compare If Call Return return:yes Call Return return:yes Call Call Raise Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "cov",
    "source_code": "def cov(self, alpha, n):\n    a, Sa, n = _dirichlet_multinomial_check_parameters(alpha, n)\n    var = dirichlet_multinomial.var(a, n)\n    n, Sa = (n[..., np.newaxis, np.newaxis], Sa[..., np.newaxis, np.newaxis])\n    aiaj = a[..., :, np.newaxis] * a[..., np.newaxis, :]\n    cov = -n * aiaj / Sa ** 2 * (n + Sa) / (1 + Sa)\n    ii = np.arange(cov.shape[-1])\n    cov[..., ii, ii] = var\n    return cov",
    "docstring": "Covariance matrix of a Dirichlet multinomial distribution. Parameters ---------- %(_dirichlet_mn_doc_default_callparams)s Returns ------- out : array_like The covariance matrix of the distribution.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:cov arg:self arg:alpha arg:n arguments arg arg arg Assign Call Assign Call Assign Assign Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_compute_norm",
    "source_code": "def _compute_norm(t, n, dim):\n    dims = list(range(t.dim()))\n    if dim < 0:\n        dim = dims[dim]\n    dims.remove(dim)\n    norm = torch.norm(t, p=n, dim=dims)\n    return norm",
    "docstring": "Compute the L_n-norm of a tensor along all dimensions except for the specified dimension. The L_n-norm will be computed across all entries in tensor along all dimension except for the one identified by dim. Example: if is of shape, say, 3x2x4 and dim=2 (the last dim), then norm will have Size [4], and each entry will represent the -norm computed using the 3x2=6 entries for each of the 4 channels. Args: t (torch.Tensor): tensor representing the parameter to prune n (int, float, inf, -inf, 'fro', 'nuc'): See documentation of valid entries for argument p in torch.norm dim (int): dim identifying the channels to prune Returns: norm (torch.Tensor): L_n norm computed across all dimensions except for . By construction, .",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\utils\\prune.py",
    "ast_data": "FunctionDef name:_compute_norm arg:t arg:n arg:dim arguments arg arg arg Assign Call Call Call If Compare Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_variable_creator_scope",
    "source_code": "@tf_contextlib.contextmanager\ndef _variable_creator_scope(self, creator, priority=100) -> Iterator[None]:\n    old = self._variable_creator_stack\n    new = list(old)\n    new.append((priority, creator))\n    new.sort(key=lambda item: item[0])\n    self._thread_local._variable_creator_stack = new\n    try:\n        yield\n    finally:\n        if self._thread_local._variable_creator_stack is not new:\n            raise RuntimeError('Exiting variable_creator_scope without proper nesting.')\n        self._thread_local._variable_creator_stack = old",
    "docstring": "Scope which defines a variable creation function. Args: creator: A callable taking and . See the docstring. priority: Creators with a higher are called first. Within the same priority, creators are called inner-to-outer. Yields: is a context manager with a side effect, but doesn't return a value. Raises: RuntimeError: If variable creator scopes are not properly nested.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_variable_creator_scope arg:self arg:creator arg:priority arguments arg arg arg Assign Assign Call Call Call arguments arg Assign Try If Compare Raise Call Assign"
  },
  {
    "library": "pytorch",
    "name": "get_hook_stubs",
    "source_code": "def get_hook_stubs(nn_module):\n    check_module_initialized(nn_module)\n    hook_map: dict = {}\n    hook_stubs = []\n    for hook in nn_module._forward_hooks.values():\n        if hook.__name__ in hook_map:\n            if id(hook) != id(hook_map[hook.__name__]):\n                raise RuntimeError(f\"Hook '{hook.__name__}' on {type(nn_module).__name__} has at least two different python definitions. Please use unique names for all hooks.\")\n        else:\n            hook_map[hook.__name__] = hook\n        hook_stubs.append(make_stub(hook, hook.__name__))\n    pre_hook_stubs = []\n    for pre_hook in nn_module._forward_pre_hooks.values():\n        if pre_hook.__name__ in hook_map:\n            if id(pre_hook) != id(hook_map[pre_hook.__name__]):\n                raise RuntimeError(f\"Pre-hook '{pre_hook.__name__}' on {type(nn_module).__name__} has at least two different python definitions. Please use unique names for all hooks.\")\n        else:\n            hook_map[pre_hook.__name__] = pre_hook\n        pre_hook_stubs.append(make_stub(pre_hook, pre_hook.__name__))\n    return (hook_stubs, pre_hook_stubs)",
    "docstring": "Return forward hook and pre_hook ScriptModuleStubs.",
    "type": "function",
    "file_path": "pytorch\\torch\\jit\\_recursive.py",
    "ast_data": "FunctionDef name:get_hook_stubs arg:nn_module arguments arg Call Assign For Call If Compare If Compare Call Call Raise Call Call Assign Call Call Assign For Call If Compare If Compare Call Call Raise Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "time_matrix_conversion",
    "source_code": "def time_matrix_conversion(self, num_rotations):\n    Rotation.from_matrix(self.rotations.as_matrix())",
    "docstring": "Time converting rotation from and to matrices",
    "type": "method",
    "file_path": "scipy\\benchmarks\\benchmarks\\spatial.py",
    "ast_data": "FunctionDef name:time_matrix_conversion arg:self arg:num_rotations arguments arg arg Call Call"
  },
  {
    "library": "kornia",
    "name": "inverse",
    "source_code": "def inverse(self, input: Tensor, params: Optional[List[PatchParamItem]]=None, extra_args: Optional[Dict[str, Any]]=None) -> Tensor:\n    if self.is_intensity_only():\n        return input\n    raise NotImplementedError('PatchSequential inverse cannot be used with geometric transformations.')",
    "docstring": "Inverse transformation. Used to inverse a tensor according to the performed transformation by a forward pass, or with respect to provided parameters.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\container\\patch.py",
    "ast_data": "FunctionDef name:inverse arg:self arg:input arg:params arg:extra_args arguments arg arg arg arg If Call Return return:yes Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "loop_exits",
    "source_code": "@property\ndef loop_exits(self):\n    return self._loop_exits",
    "docstring": "The list of exit tensors for loop variables.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:loop_exits arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "get_state",
    "source_code": "def get_state():\n    return _uarray.get_state()",
    "docstring": "Returns an opaque object containing the current state of all the backends. Can be used for synchronization between threads/processes. See Also -------- set_state Sets the state returned by this function.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_uarray\\_backend.py",
    "ast_data": "FunctionDef name:get_state arguments Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "contains",
    "source_code": "def contains(self, mouseevent):\n    if self._different_canvas(mouseevent) or not self.get_visible() or self._renderer is None:\n        return (False, {})\n    bbox = Text.get_window_extent(self)\n    inside = bbox.x0 <= mouseevent.x <= bbox.x1 and bbox.y0 <= mouseevent.y <= bbox.y1\n    cattr = {}\n    if self._bbox_patch:\n        patch_inside, patch_cattr = self._bbox_patch.contains(mouseevent)\n        inside = inside or patch_inside\n        cattr['bbox_patch'] = patch_cattr\n    return (inside, cattr)",
    "docstring": "Return whether the mouse event occurred inside the axis-aligned bounding-box of the text.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:contains arg:self arg:mouseevent arguments arg arg If BoolOp Call Call Compare Return return:yes Assign Call Assign BoolOp Compare Compare Assign If Assign Call Assign BoolOp Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, axes, xref=None, yref=None):\n    self._axes = axes\n    if xref is None:\n        self._xref = Size.AxesX(axes)\n    else:\n        self._xref = xref\n    if yref is None:\n        self._yref = Size.AxesY(axes)\n    else:\n        self._yref = yref\n    super().__init__(fig=axes.get_figure(), pos=None, horizontal=[self._xref], vertical=[self._yref], aspect=None, anchor='C')",
    "docstring": "Parameters ---------- axes : :class: xref yref",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_divider.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:axes arg:xref arg:yref arguments arg arg arg arg Assign If Compare Assign Call Assign If Compare Assign Call Assign Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "UnsetMetadataPassedError",
    "source_code": "class UnsetMetadataPassedError(ValueError):\n\n    def __init__(self, *, message, unrequested_params, routed_params):\n        super().__init__(message)\n        self.unrequested_params = unrequested_params\n        self.routed_params = routed_params",
    "docstring": "Exception class to raise if a metadata is passed which is not explicitly requested (metadata=True) or not requested (metadata=False). .. versionadded:: 1.3 Parameters ---------- message : str The message unrequested_params : dict A dictionary of parameters and their values which are provided but not requested. routed_params : dict A dictionary of routed parameters.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\exceptions.py",
    "ast_data": "ClassDef name:UnsetMetadataPassedError FunctionDef name:__init__ arg:self arguments arg arg arg arg Call Call Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "get_interpolation_stage",
    "source_code": "def get_interpolation_stage(self):\n    return self._interpolation_stage",
    "docstring": "Return when interpolation happens during the transform to RGBA. One of 'data', 'rgba', 'auto'.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\image.py",
    "ast_data": "FunctionDef name:get_interpolation_stage arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "generate_non_native_lazy_ir_nodes",
    "source_code": "def generate_non_native_lazy_ir_nodes(non_native: list[dict[str, Any]], gen_lazy_ir: GenLazyIR) -> list[str]:\n    nodes = []\n    for op in non_native:\n        properties = LazyIrProperties('ShapeCache', 'CanBeReused', 'LowerDeclOnly')\n        for p in op.get('properties', []):\n            setattr(properties, p, True)\n        schema = LazyIrSchema(FunctionSchema.parse(op['func']), properties, symint=True)\n        schema.opkind = op.get('opkind')\n        nodes.append(gen_lazy_ir.gen(schema)[0])\n    return nodes",
    "docstring": "Generate the non-native lazy IR node classes",
    "type": "function",
    "file_path": "pytorch\\torchgen\\dest\\lazy_ir.py",
    "ast_data": "FunctionDef name:generate_non_native_lazy_ir_nodes arg:non_native arg:gen_lazy_ir arguments arg arg Assign For Assign Call For Call Call Assign Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_score_mod",
    "source_code": "def get_score_mod(self, score_mod: Optional[_score_mod_signature]) -> _score_mod_signature:\n    if score_mod is None:\n        score_mod = _identity\n\n    def new_score_mod(score: torch.Tensor, b: torch.Tensor, h: torch.Tensor, q_idx: torch.Tensor, physical_kv_idx: torch.Tensor):\n        physical_kv_block = physical_kv_idx // self.page_size\n        physical_kv_offset = physical_kv_idx % self.page_size\n        logical_block_idx = self.physical_to_logical[b, physical_kv_block]\n        logical_kv_idx = logical_block_idx * self.page_size + physical_kv_offset\n        return torch.where(logical_block_idx >= 0, score_mod(score, b, h, q_idx, logical_kv_idx), float('-inf'))\n    return new_score_mod",
    "docstring": "Converts a score_mod based on mapping from the physical block index to the logical block index. Args: score_mod (_score_mod_signature): score_mod based on the logical block index.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\attention\\experimental\\_paged_attention.py",
    "ast_data": "FunctionDef name:get_score_mod arg:self arg:score_mod arguments arg arg If Compare Assign FunctionDef name:new_score_mod arg:score arg:b arg:h arg:q_idx arg:physical_kv_idx arguments arg arg arg arg arg Assign Assign Assign Assign Return return:yes Call Compare Call Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "ASTIdAttribute",
    "source_code": "class ASTIdAttribute(ASTAttribute):\n\n    def __init__(self, id: str) -> None:\n        self.id = id\n\n    def __eq__(self, other: object) -> bool:\n        if not isinstance(other, ASTIdAttribute):\n            return NotImplemented\n        return self.id == other.id\n\n    def __hash__(self) -> int:\n        return hash(self.id)\n\n    def _stringify(self, transform: StringifyTransform) -> str:\n        return self.id\n\n    def describe_signature(self, signode: TextElement) -> None:\n        signode.append(nodes.Text(self.id))",
    "docstring": "For simple attributes defined by the user.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\util\\cfamily.py",
    "ast_data": "ClassDef name:ASTIdAttribute FunctionDef name:__init__ arg:self arg:id arguments arg arg Assign FunctionDef name:__eq__ arg:self arg:other arguments arg arg If Call Return return:yes Return return:yes Compare FunctionDef name:__hash__ arg:self arguments arg Return return:yes Call FunctionDef name:_stringify arg:self arg:transform arguments arg arg Return return:yes FunctionDef name:describe_signature arg:self arg:signode arguments arg arg Call Call"
  },
  {
    "library": "sphinx",
    "name": "process_doc",
    "source_code": "def process_doc(self, app: Sphinx, doctree: nodes.document) -> None:\n    raise NotImplementedError",
    "docstring": "Process a document and gather specific data from it. This method is called after the document is read. .. seealso:: :event:",
    "type": "method",
    "file_path": "sphinx\\sphinx\\environment\\collectors\\__init__.py",
    "ast_data": "FunctionDef name:process_doc arg:self arg:app arg:doctree arguments arg arg arg Raise"
  },
  {
    "library": "scrapy",
    "name": "normkey",
    "source_code": "def normkey(self, key: AnyStr) -> bytes:\n    return self._tobytes(key.title())",
    "docstring": "Normalize key to bytes",
    "type": "method",
    "file_path": "scrapy\\scrapy\\http\\headers.py",
    "ast_data": "FunctionDef name:normkey arg:self arg:key arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_tflite_converter",
    "source_code": "def _get_tflite_converter(flags):\n    input_arrays = _parse_array(flags.input_arrays)\n    input_shapes = None\n    if flags.input_shapes:\n        input_shapes_list = [_parse_array(shape, type_fn=int) for shape in flags.input_shapes.split(':')]\n        input_shapes = dict(list(zip(input_arrays, input_shapes_list)))\n    output_arrays = _parse_array(flags.output_arrays)\n    converter_kwargs = {'input_arrays': input_arrays, 'input_shapes': input_shapes, 'output_arrays': output_arrays}\n    if flags.graph_def_file:\n        converter_fn = lite.TFLiteConverter.from_frozen_graph\n        converter_kwargs['graph_def_file'] = flags.graph_def_file\n    elif flags.saved_model_dir:\n        converter_fn = lite.TFLiteConverter.from_saved_model\n        converter_kwargs['saved_model_dir'] = flags.saved_model_dir\n        converter_kwargs['tag_set'] = _parse_set(flags.saved_model_tag_set)\n        converter_kwargs['signature_key'] = flags.saved_model_signature_key\n    elif flags.keras_model_file:\n        converter_fn = lite.TFLiteConverter.from_keras_model_file\n        converter_kwargs['model_file'] = flags.keras_model_file\n    else:\n        raise ValueError('--graph_def_file, --saved_model_dir, or --keras_model_file must be specified.')\n    return converter_fn(**converter_kwargs)",
    "docstring": "Makes a TFLiteConverter object based on the flags provided. Args: flags: argparse.Namespace object containing TFLite flags. Returns: TFLiteConverter object. Raises: ValueError: Invalid flags.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\tflite_convert.py",
    "ast_data": "FunctionDef name:_get_tflite_converter arg:flags arguments arg Assign Call Assign If Assign Call Call Assign Call Call Call Assign Call Assign If Assign Assign If Assign Assign Assign Call Assign If Assign Assign Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_merge_call",
    "source_code": "def _merge_call(self, merge_fn, args, kwargs):\n    _push_per_thread_mode(_CrossReplicaThreadMode(self._strategy))\n    try:\n        return merge_fn(self._strategy, *args, **kwargs)\n    finally:\n        _pop_per_thread_mode()",
    "docstring": "Default implementation for single replica.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:_merge_call arg:self arg:merge_fn arg:args arg:kwargs arguments arg arg arg arg Call Call Try Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "headers",
    "source_code": "@property\n@abstractmethod\ndef headers(self) -> Sequence[str]:\n    pass",
    "docstring": "Headers names of the columns in verbose table.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:headers arg:self arguments arg"
  },
  {
    "library": "cherrypy",
    "name": "__lt__",
    "source_code": "def __lt__(self, other):\n    return self.priority < other.priority",
    "docstring": "Check if this hook's priority is lower than the other's. :param other: Another object to compare priority with. :type other: Hook :returns: Whether the other Hook's priority is higher than this one's. :rtype: bool Hooks sort by priority, ascending, such that hooks of lower priority are run first.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cprequest.py",
    "ast_data": "FunctionDef name:__lt__ arg:self arg:other arguments arg arg Return return:yes Compare"
  },
  {
    "library": "kornia",
    "name": "get_projective_transform",
    "source_code": "def get_projective_transform(center: Tensor, angles: Tensor, scales: Tensor) -> Tensor:\n    if not (len(center.shape) == 2 and center.shape[-1] == 3):\n        raise AssertionError(center.shape)\n    if not (len(angles.shape) == 2 and angles.shape[-1] == 3):\n        raise AssertionError(angles.shape)\n    if center.device != angles.device:\n        raise AssertionError(center.device, angles.device)\n    if center.dtype != angles.dtype:\n        raise AssertionError(center.dtype, angles.dtype)\n    axis_angle_rad: Tensor = deg2rad(angles)\n    rmat: Tensor = axis_angle_to_rotation_matrix(axis_angle_rad)\n    scaling_matrix: Tensor = eye_like(3, rmat)\n    scaling_matrix = scaling_matrix * scales.unsqueeze(dim=1)\n    rmat = rmat @ scaling_matrix.to(rmat)\n    from_origin_mat = eye_like(4, rmat, shared_memory=False)\n    from_origin_mat[..., :3, -1] += center\n    to_origin_mat = from_origin_mat.clone()\n    to_origin_mat = _torch_inverse_cast(from_origin_mat)\n    proj_mat = projection_from_Rt(rmat, torch.zeros_like(center)[..., None])\n    proj_mat = convert_affinematrix_to_homography3d(proj_mat)\n    proj_mat = from_origin_mat @ proj_mat @ to_origin_mat\n    return proj_mat[..., :3, :]",
    "docstring": "Calculate the projection matrix for a 3D rotation. .. warning:: This API signature it is experimental and might suffer some changes in the future. The function computes the projection matrix given the center and angles per axis. Args: center: center of the rotation (x,y,z) in the source with shape :math:. angles: axis angle vector containing the rotation angles in degrees in the form of (rx, ry, rz) with shape :math:. Internally it calls Rodrigues to compute the rotation matrix from axis-angle. scales: scale factor for x-y-z-directions with shape :math:. Returns: the projection matrix of 3D rotation with shape :math:. .. note:: This function is often used in conjunction with :func:.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\transform\\imgwarp.py",
    "ast_data": "FunctionDef name:get_projective_transform arg:center arg:angles arg:scales arguments arg arg arg If BoolOp Compare Call Compare Raise Call If BoolOp Compare Call Compare Raise Call If Compare Raise Call If Compare Raise Call Call Call Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "array_like_impl",
    "source_code": "def array_like_impl(array_fn, array_like_fn, tensor, dtype, name, optimize=True, layout=None):\n    if not tensor_util.is_tf_type(tensor):\n        tensor = ops.convert_to_tensor(tensor, name='tensor')\n    tensor_shape = tensor.shape\n    tensor_dtype = tensor.dtype\n    if context.executing_eagerly():\n        if dtype is not None and dtype != tensor_dtype:\n            return array_fn(shape_internal(tensor, optimize=optimize), dtype=dtype, name=name, layout=layout)\n        return d_api.call_with_layout(array_like_fn, layout, tensor, name=name)\n    if optimize and tensor_shape.is_fully_defined() and (tensor_dtype != dtypes.variant):\n        return array_fn(tensor_shape, dtype=dtype or tensor_dtype, name=name, layout=layout)\n    if dtype is not None and dtype != tensor_dtype and (dtype != dtypes.variant):\n        return array_fn(shape_internal(tensor, optimize=optimize), dtype=dtype, name=name, layout=layout)\n    return d_api.call_with_layout(array_like_fn, layout, tensor, name=name)",
    "docstring": "Internal implementation for ones_like and zeros_like API calls.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:array_like_impl arg:array_fn arg:array_like_fn arg:tensor arg:dtype arg:name arg:optimize arg:layout arguments arg arg arg arg arg arg arg If Call Assign Call Assign Assign If Call If BoolOp Compare Compare Return return:yes Call Call Return return:yes Call If BoolOp Call Compare Return return:yes Call BoolOp If BoolOp Compare Compare Compare Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_compatible_func",
    "source_code": "def get_compatible_func(op, func):\n    op_signature = _remove_annotation(tf_inspect.signature(op))\n    func_signature = _remove_annotation(tf_inspect.signature(func))\n    if op_signature == func_signature:\n        return func\n    op_pos_names = _get_required_param_names(op_signature)\n    func_pos_names = _get_required_param_names(func_signature)\n    if op_pos_names != func_pos_names:\n        raise AssertionError(f\"The decorated function's non-default arguments must be identical to that of the overridden op. func has {func_pos_names}. op has {op_pos_names}.\")\n    func_missing_params = {}\n    for name in set(op_signature.parameters.keys()) - set(func_signature.parameters.keys()):\n        p = op_signature.parameters[name]\n        if p.default is p.empty:\n            raise AssertionError(f\"The decorated function's signature must implement all of the non-default arguments of the overridden op. Argument `{name}` is unimplemented.\")\n        func_missing_params[name] = p\n\n    def compatible_func(*args, **kwargs):\n        bound = op_signature.bind(*args, **kwargs)\n        for name, param in func_missing_params.items():\n            if name not in bound.arguments:\n                continue\n            value = bound.arguments.pop(name)\n            if value is not param.default:\n                raise AssertionError(f'Dispatched op is called with argument `{name}` set to a non-default value, which is not supported by the decorated function')\n        return func(*bound.args, **bound.kwargs)\n    return compatible_func",
    "docstring": "Returns a compatible function. Args: op: a callable with whose signature the returned function is compatible. func: a callable which is called by the returned function. Returns: a compatible function, which conducts the actions of but can be called like , given that: - the list of required arguments in and are the same. - there is no override of the default arguments of that are not supported by .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\dispatch.py",
    "ast_data": "FunctionDef name:get_compatible_func arg:op arg:func arguments arg arg Assign Call Call Assign Call Call If Compare Return return:yes Assign Call Assign Call If Compare Raise Call Assign For Call Call Call Call Assign If Compare Raise Call Assign FunctionDef name:compatible_func arguments arg arg Assign Call For Call If Compare Assign Call If Compare Raise Call Return return:yes Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "essential_from_fundamental",
    "source_code": "def essential_from_fundamental(F_mat: torch.Tensor, K1: torch.Tensor, K2: torch.Tensor) -> torch.Tensor:\n    KORNIA_CHECK_SHAPE(F_mat, ['*', '3', '3'])\n    KORNIA_CHECK_SHAPE(K1, ['*', '3', '3'])\n    KORNIA_CHECK_SHAPE(K2, ['*', '3', '3'])\n    return K2.transpose(-2, -1) @ F_mat @ K1",
    "docstring": "Get Essential matrix from Fundamental and Camera matrices. Uses the method from Hartley/Zisserman 9.6 pag 257 (formula 9.12). Args: F_mat: The fundamental matrix with shape of :math:. K1: The camera matrix from first camera with shape :math:. K2: The camera matrix from second camera with shape :math:. Returns: The essential matrix with shape :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\epipolar\\essential.py",
    "ast_data": "FunctionDef name:essential_from_fundamental arg:F_mat arg:K1 arg:K2 arguments arg arg arg Call Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "check_answer",
    "source_code": "def check_answer(self, x, ftol):\n    if self.lb is not None and np.any(x < self.lb) or (self.ub is not None and np.any(x > self.ub)):\n        return False\n    f = np.sum(self.fun(x) ** 2)\n    return f < (1 + ftol) * self.fopt",
    "docstring": "Check if yields the objective value close enough to the optimal value. Parameters ---------- x : ndarray, shape (n,) The point to test. ftol : float Maximum allowed relative error in the objective function value. Returns ------- bool Whether is optimal enough. If violates bounds constraints then False is returned.",
    "type": "method",
    "file_path": "scipy\\benchmarks\\benchmarks\\lsq_problems.py",
    "ast_data": "FunctionDef name:check_answer arg:self arg:x arg:ftol arguments arg arg arg If BoolOp BoolOp Compare Call Compare BoolOp Compare Call Compare Return return:yes Assign Call Call Return return:yes Compare"
  },
  {
    "library": "numpy",
    "name": "ArithOp",
    "source_code": "class ArithOp(Enum):\n    POS = 1\n    NEG = 2\n    ADD = 3\n    SUB = 4\n    MUL = 5\n    DIV = 6\n    POW = 7",
    "docstring": "Used in Op.APPLY expression to specify the function part.",
    "type": "class",
    "file_path": "numpy\\numpy\\f2py\\symbolic.py",
    "ast_data": "ClassDef name:ArithOp Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "FakeSparsity",
    "source_code": "class FakeSparsity(nn.Module):\n\n    def __init__(self, mask):\n        super().__init__()\n        self.register_buffer('mask', mask)\n\n    def forward(self, x):\n        assert self.mask.shape == x.shape\n        return self.mask * x\n\n    def state_dict(self, *args, **kwargs):\n        return {}",
    "docstring": "Parametrization for the weights. Should be attached to the 'weight' or any other parameter that requires a mask applied to it. Note:: Once the mask is passed, the variable should not change the id. The contents of the mask can change, but the mask reference itself should not.",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\pruning\\sparsifier\\utils.py",
    "ast_data": "ClassDef name:FakeSparsity FunctionDef name:__init__ arg:self arg:mask arguments arg arg Call Call Call FunctionDef name:forward arg:self arg:x arguments arg arg Compare Return return:yes FunctionDef name:state_dict arg:self arguments arg arg arg Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "convert_op_hints_to_stubs",
    "source_code": "@_tf_export(v1=['lite.experimental.convert_op_hints_to_stubs'])\n@_deprecation.deprecated(None, 'Please follow instructions under https://www.tensorflow.org/lite/convert/operation_fusion for operationfusion in tflite.')\ndef convert_op_hints_to_stubs(session=None, graph_def=None, write_callback=lambda graph_def, comments: None):\n    if session is not None and graph_def is not None:\n        raise ValueError('Provide only one of session and graph_def.')\n    if session is not None:\n        return _convert_op_hints_to_stubs_helper(session.graph_def, write_callback)\n    elif graph_def is not None:\n        return _convert_op_hints_to_stubs_helper(graph_def, write_callback)\n    else:\n        raise ValueError('Must specify session or graph_def as input.')",
    "docstring": "Converts a graphdef with LiteOp hints into stub operations. This is used to prepare for toco conversion of complex intrinsic usages. Note: only one of session or graph_def should be used, not both. Args: session: A TensorFlow session that contains the graph to convert. graph_def: A graph def that we should convert. write_callback: A function pointer that can be used to write intermediate steps of graph transformation (optional). Returns: A new graphdef with all ops contained in OpHints being replaced by a single op call with the right parameters. Raises: ValueError: If both session and graph_def are provided.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\op_hint.py",
    "ast_data": "FunctionDef name:convert_op_hints_to_stubs arg:session arg:graph_def arg:write_callback arguments arg arg arg arguments arg arg If BoolOp Compare Compare Raise Call If Compare Return return:yes Call If Compare Return return:yes Call Raise Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "partial_fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef partial_fit(self, X, y, classes=None, sample_weight=None):\n    first_call = not hasattr(self, 'classes_')\n    X, y = self._check_X_y(X, y, reset=first_call)\n    _, n_features = X.shape\n    if _check_partial_fit_first_call(self, classes):\n        n_classes = len(classes)\n        self._init_counters(n_classes, n_features)\n    Y = label_binarize(y, classes=self.classes_)\n    if Y.shape[1] == 1:\n        if len(self.classes_) == 2:\n            Y = np.concatenate((1 - Y, Y), axis=1)\n        else:\n            Y = np.ones_like(Y)\n    if X.shape[0] != Y.shape[0]:\n        msg = 'X.shape[0]=%d and y.shape[0]=%d are incompatible.'\n        raise ValueError(msg % (X.shape[0], y.shape[0]))\n    Y = Y.astype(np.float64, copy=False)\n    if sample_weight is not None:\n        sample_weight = _check_sample_weight(sample_weight, X)\n        sample_weight = np.atleast_2d(sample_weight)\n        Y *= sample_weight.T\n    class_prior = self.class_prior\n    self._count(X, Y)\n    alpha = self._check_alpha()\n    self._update_feature_log_prob(alpha)\n    self._update_class_log_prior(class_prior=class_prior)\n    return self",
    "docstring": "Incremental fit on a batch of samples. This method is expected to be called several times consecutively on different chunks of a dataset so as to implement out-of-core or online learning. This is especially useful when the whole dataset is too big to fit in memory at once. This method has some performance overhead hence it is better to call partial_fit on chunks of data that are as large as possible (as long as fitting in the memory budget) to hide the overhead. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vectors, where is the number of samples and is the number of features. y : array-like of shape (n_samples,) Target values. classes : array-like of shape (n_classes,), default=None List of all the classes that can possibly appear in the y vector. Must be provided at the first call to partial_fit, can be omitted in subsequent calls. sample_weight : array-like of shape (n_samples,), default=None Weights applied to individual samples (1. for unweighted). Returns ------- self : object Returns the instance itself.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\naive_bayes.py",
    "ast_data": "FunctionDef name:partial_fit arg:self arg:X arg:y arg:classes arg:sample_weight arguments arg arg arg arg arg Assign Call Assign Call Assign If Call Assign Call Call Assign Call If Compare If Compare Call Assign Call Assign Call If Compare Assign Raise Call Assign Call If Compare Assign Call Assign Call Assign Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "check_trace_mode",
    "source_code": "@staticmethod\ndef check_trace_mode(device_type, trace_mode):\n    if trace_mode == tensor_tracer_flags.TRACE_MODE_FULL_TENSOR_SUMMARY:\n        if device_type != _DEVICE_TYPE_TPU:\n            raise ValueError('Device_type \"%s\" is not yet supported for trace mode \"%s\"' % (device_type, trace_mode))",
    "docstring": "Checks if the given trace mode work on the given device type. Args: device_type: Device type, TPU, GPU, CPU. trace_mode: Tensor tracer trace mode. Raises: ValueError: If the given trace mode is not supported for the device.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:check_trace_mode arg:device_type arg:trace_mode arguments arg arg If Compare If Compare Raise Call"
  },
  {
    "library": "scipy",
    "name": "get_result",
    "source_code": "def get_result(a, b, c, z, group):\n    expected, observed = (mp_hyp2f1(a, b, c, z), hyp2f1(a, b, c, z))\n    if np.isnan(observed) and np.isnan(expected) or expected == observed:\n        relative_error = 0.0\n        absolute_error = 0.0\n    elif np.isnan(observed):\n        relative_error = float('inf')\n        absolute_error = float('inf')\n    else:\n        absolute_error = abs(expected - observed)\n        relative_error = absolute_error / abs(expected)\n    return (a, b, c, z, abs(z), get_region(z), group, expected, observed, relative_error, absolute_error)",
    "docstring": "Get results for given parameter and value combination.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_precompute\\hyp2f1_data.py",
    "ast_data": "FunctionDef name:get_result arg:a arg:b arg:c arg:z arg:group arguments arg arg arg arg arg Assign Call Call If BoolOp BoolOp Call Call Compare Assign Assign If Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "AddWhileContext",
    "source_code": "def AddWhileContext(self, op, between_op_list, between_ops):\n    forward_ctxt = util.GetWhileContext(op)\n    grad_state = self._map.get(forward_ctxt)\n    if grad_state is None:\n        outer_forward_ctxt = forward_ctxt.outer_context\n        if outer_forward_ctxt:\n            outer_forward_ctxt = outer_forward_ctxt.GetWhileContext()\n        outer_grad_state = None\n        if outer_forward_ctxt:\n            outer_grad_state = self._map.get(outer_forward_ctxt)\n        grad_state = _GradLoopState(forward_ctxt, outer_grad_state)\n        self._map[forward_ctxt] = grad_state\n        for loop_exit in grad_state.forward_loop_exits:\n            if loop_exit.op not in between_ops:\n                between_ops.add(loop_exit.op)\n                between_op_list.append(loop_exit.op)",
    "docstring": "Add the grad state for the while loop that op belongs to. Note that op is an Exit, and this method must be called in the control flow context where gradients() is called. Note that this method modifies and .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_state.py",
    "ast_data": "FunctionDef name:AddWhileContext arg:self arg:op arg:between_op_list arg:between_ops arguments arg arg arg arg Assign Call Assign Call If Compare Assign If Assign Call Assign If Assign Call Assign Call Assign For If Compare Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_feature_key_name",
    "source_code": "def get_feature_key_name(self):\n    raise NotImplementedError('not impl')",
    "docstring": "Returns the feature key name in the features dict.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\feature_column.py",
    "ast_data": "FunctionDef name:get_feature_key_name arg:self arguments arg Raise Call"
  },
  {
    "library": "django",
    "name": "Z",
    "source_code": "def Z(self):\n    if self.timezone is None:\n        return ''\n    offset = self.timezone.utcoffset(self.data)\n    return offset.days * 86400 + offset.seconds",
    "docstring": "Time zone offset in seconds (i.e. '-43200' to '43200'). The offset for timezones west of UTC is always negative, and for those east of UTC is always positive. If timezone information is not available, return an empty string.",
    "type": "method",
    "file_path": "django\\django\\utils\\dateformat.py",
    "ast_data": "FunctionDef name:Z arg:self arguments arg If Compare Return return:yes Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_shuffle_forward_outputs",
    "source_code": "def _shuffle_forward_outputs(self, forward_wrapper):\n\n    def _index_map(original):\n        if original < self._num_inference_outputs:\n            return original\n        if original >= len(forward_wrapper.outputs):\n            return original - len(forward_wrapper.outputs) + self._num_inference_outputs\n        return original + len(forward_wrapper.output_tangents)\n    output_indices = nest.map_structure(_index_map, forward_wrapper.output_indices)\n    forward_wrapper.graph.outputs = forward_wrapper.outputs[:self._num_inference_outputs] + forward_wrapper.output_tangents + forward_wrapper.outputs[self._num_inference_outputs:]\n    return forward_wrapper._replace(output_indices=output_indices)",
    "docstring": "Reorders function outputs so captures are last.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py",
    "ast_data": "FunctionDef name:_shuffle_forward_outputs arg:self arg:forward_wrapper arguments arg arg FunctionDef name:_index_map arg:original arguments arg If Compare Return return:yes If Compare Call Return return:yes Call Return return:yes Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "sparse_tensor_to_dense",
    "source_code": "@tf_export('sparse.to_dense', v1=['sparse.to_dense', 'sparse_tensor_to_dense'])\n@deprecation.deprecated_endpoints('sparse_tensor_to_dense')\ndef sparse_tensor_to_dense(sp_input, default_value=None, validate_indices=True, name=None):\n    sp_input = _convert_to_sparse_tensor(sp_input)\n    if default_value is None:\n        default_value = array_ops.zeros([], dtype=sp_input.dtype)\n    return gen_sparse_ops.sparse_to_dense(sp_input.indices, sp_input.dense_shape, sp_input.values, default_value=default_value, validate_indices=validate_indices, name=name)",
    "docstring": "Converts a into a dense tensor. For this sparse tensor with three non-empty values: >>> sp_input = tf.sparse.SparseTensor( ... dense_shape=[3, 5], ... values=[7, 8, 9], ... indices =[[0, 1], ... [0, 3], ... [2, 0]]) The output will be a dense tensor with values: >>> tf.sparse.to_dense(sp_input).numpy() array([[0, 7, 0, 8, 0], [0, 0, 0, 0, 0], [9, 0, 0, 0, 0]], dtype=int32) Note: Indices must be without repeats. This is only tested if is . Args: sp_input: The input . default_value: Scalar value to set for indices not specified in . Defaults to zero. validate_indices: A boolean value. If , indices are checked to make sure they are sorted in lexicographic order and that there are no repeats. name: A name prefix for the returned tensors (optional). Returns: A dense tensor with shape and values specified by the non-empty values in . Indices not in are assigned . Raises: TypeError: If is not a .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_ops.py",
    "ast_data": "FunctionDef name:sparse_tensor_to_dense arg:sp_input arg:default_value arg:validate_indices arg:name arguments arg arg arg arg Assign Call If Compare Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "isrec",
    "source_code": "def isrec(self):\n    return bool(self.data.shape) and (not self._shape[0])",
    "docstring": "Returns whether the variable has a record dimension or not. A record dimension is a dimension along which additional data could be easily appended in the netcdf data structure without much rewriting of the data file. This attribute is a read-only property of the .",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\_netcdf.py",
    "ast_data": "FunctionDef name:isrec arg:self arguments arg Return return:yes BoolOp Call"
  },
  {
    "library": "tensorflow",
    "name": "_remove_redundant_quantize_ops",
    "source_code": "def _remove_redundant_quantize_ops(model):\n    if not model.signatureDefs:\n        _remove_redundant_quantize_ops_per_subgraph(model, 0, -1)\n        return\n    for signature_index, signature_def in enumerate(model.signatureDefs):\n        _remove_redundant_quantize_ops_per_subgraph(model, signature_def.subgraphIndex, signature_index)",
    "docstring": "Finds back to back quantize ops and remove the first quantize op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\util.py",
    "ast_data": "FunctionDef name:_remove_redundant_quantize_ops arg:model arguments arg If Call Return return:no For Call Call"
  },
  {
    "library": "scipy",
    "name": "set_shape",
    "source_code": "def set_shape(self, shape):\n    new_self = self.reshape(shape, copy=False).asformat(self.format)\n    self.__dict__ = new_self.__dict__",
    "docstring": "Set the shape of the matrix in-place",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_matrix.py",
    "ast_data": "FunctionDef name:set_shape arg:self arg:shape arguments arg arg Assign Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_Overdetermined",
    "source_code": "def _Overdetermined(op: ops.Operation, grad):\n    a = op.inputs[0]\n    b = op.inputs[1]\n    x = op.outputs[0]\n    l2_regularizer = math_ops.cast(op.inputs[2], a.dtype.base_dtype)\n    chol = linalg_ops._RegularizedGramianCholesky(a, l2_regularizer=l2_regularizer, first_kind=True)\n    z = linalg_ops.cholesky_solve(chol, grad)\n    xzt = math_ops.matmul(x, z, adjoint_b=True)\n    zx_sym = xzt + array_ops.matrix_transpose(xzt)\n    grad_a = -math_ops.matmul(a, zx_sym) + math_ops.matmul(b, z, adjoint_b=True)\n    grad_b = math_ops.matmul(a, z)\n    return (grad_a, grad_b, None)",
    "docstring": "Gradients for the overdetermined case of MatrixSolveLs. This is the backprop for the solution to the normal equations of the first kind: X = F(A, B) = (A^T * A + lambda * I)^{-1} * A^T * B which solve the least squares problem min ||A * X - B||_F^2 + lambda ||X||_F^2.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg_grad.py",
    "ast_data": "FunctionDef name:_Overdetermined arg:op arg:grad arguments arg arg Assign Assign Assign Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "TensorInfo",
    "source_code": "@dataclass\nclass TensorInfo:\n    allocation_stack_trace: Optional[traceback.StackSummary]\n    reads: list[Access] = field(default_factory=list)\n    write: Optional[Access] = None",
    "docstring": "Stores information about a single tensor and recent accesses to it. Args: allocation_stack_trace: the stack summary object captured during tensor allocation. Can be `` if the allocation wasn't caught by CSAN. reads: list of read accesses to the tensor that were performed since the last write. write: the last write access to the tensor.",
    "type": "class",
    "file_path": "pytorch\\torch\\cuda\\_sanitizer.py",
    "ast_data": "ClassDef name:TensorInfo Call"
  },
  {
    "library": "tensorflow",
    "name": "_popitem",
    "source_code": "def _popitem(self, indices=None, name=None):\n    if name is None:\n        name = '%s_get_nokey' % self._name\n    indices, dtypes = self._get_indices_and_dtypes(indices)\n    with ops.colocate_with(self._coloc_op):\n        key, result = self._popitem_fn(shared_name=self._name, indices=indices, dtypes=dtypes, name=name, capacity=self._capacity, memory_limit=self._memory_limit)\n    key = self._create_device_transfers(key)[0]\n    result = self._get_return_value(result, indices)\n    return (key, result)",
    "docstring": "If the staging area is ordered, the (key, value) with the smallest key will be returned. Otherwise, a random (key, value) will be returned. If the staging area is empty when this operation executes, it will block until there is an element to dequeue. Args: key: Key associated with the required data indices: Partial list of tensors to retrieve (optional). A list of integer or string indices. String indices are only valid if the Staging Area has names associated with it. name: A name for the operation (optional) Returns: The created op",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:_popitem arg:self arg:indices arg:name arguments arg arg arg If Compare Assign Assign Call With Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "rounding_regularization",
    "source_code": "def rounding_regularization(self, V: torch.Tensor, curr_iter: int) -> torch.Tensor:\n    assert curr_iter < self.max_iter, 'Current iteration strictly les sthan max iteration'\n    if curr_iter < self.warm_start * self.max_iter:\n        return torch.tensor(0.0)\n    else:\n        start_beta, end_beta = self.beta_range\n        warm_start_end_iter = self.warm_start * self.max_iter\n        rel_iter = (curr_iter - warm_start_end_iter) / (self.max_iter - warm_start_end_iter)\n        beta = end_beta + 0.5 * (start_beta - end_beta) * (1 + np.cos(rel_iter * np.pi))\n        h_alpha = torch.clamp(torch.sigmoid(V) * (ADAROUND_ZETA - ADAROUND_GAMMA) + ADAROUND_GAMMA, min=0, max=1)\n        inner_term = torch.add(2 * h_alpha, -1).abs().pow(beta)\n        regularization_term = torch.add(1, -inner_term).sum()\n        return regularization_term * self.reg_param",
    "docstring": "Major logics copied from official Adaround Implementation. Apply rounding regularization to the input tensor V.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\experimental\\adaround_loss.py",
    "ast_data": "FunctionDef name:rounding_regularization arg:self arg:V arg:curr_iter arguments arg arg arg Compare If Compare Return return:yes Call Assign Assign Assign Assign Call Assign Call Call Assign Call Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_mouse_move",
    "source_code": "def _mouse_move(self, event):\n    if self._xypress:\n        x, y = (event.x, event.y)\n        lastx, lasty, a, ind, view = self._xypress[0]\n        (x1, y1), (x2, y2) = np.clip([[lastx, lasty], [x, y]], a.bbox.min, a.bbox.max)\n        if self._zoom_mode == 'x':\n            y1, y2 = a.bbox.intervaly\n        elif self._zoom_mode == 'y':\n            x1, x2 = a.bbox.intervalx\n        self.toolmanager.trigger_tool('rubberband', self, data=(x1, y1, x2, y2))",
    "docstring": "Callback for mouse moves in zoom-to-rectangle mode.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py",
    "ast_data": "FunctionDef name:_mouse_move arg:self arg:event arguments arg arg If Assign Assign Assign Call If Compare Assign If Compare Assign Call"
  },
  {
    "library": "pygame",
    "name": "init",
    "source_code": "def init():\n    global _ft_init\n    if not pygame.display.get_init():\n        raise error('video system not initialized')\n    register_quit(_quit_hook)\n    _ft_init = True",
    "docstring": "init() -> None initialize pygame.fastevent",
    "type": "function",
    "file_path": "pygame\\src_py\\fastevent.py",
    "ast_data": "FunctionDef name:init arguments If Call Raise Call Call Assign"
  },
  {
    "library": "scipy",
    "name": "DiagBroyden",
    "source_code": "class DiagBroyden(GenericBroyden):\n\n    def __init__(self, alpha=None):\n        GenericBroyden.__init__(self)\n        self.alpha = alpha\n\n    def setup(self, x, F, func):\n        GenericBroyden.setup(self, x, F, func)\n        self.d = np.full((self.shape[0],), 1 / self.alpha, dtype=self.dtype)\n\n    def solve(self, f, tol=0):\n        return -f / self.d\n\n    def matvec(self, f):\n        return -f * self.d\n\n    def rsolve(self, f, tol=0):\n        return -f / self.d.conj()\n\n    def rmatvec(self, f):\n        return -f * self.d.conj()\n\n    def todense(self):\n        return np.diag(-self.d)\n\n    def _update(self, x, f, dx, df, dx_norm, df_norm):\n        self.d -= (df + self.d * dx) * dx / dx_norm ** 2",
    "docstring": "Find a root of a function, using diagonal Broyden Jacobian approximation. The Jacobian approximation is derived from previous iterations, by retaining only the diagonal of Broyden matrices. .. warning:: This algorithm may be useful for specific problems, but whether it will work may depend strongly on the problem. Parameters ---------- %(params_basic)s alpha : float, optional Initial guess for the Jacobian is (-1/alpha). %(params_extra)s See Also -------- root : Interface to root finding algorithms for multivariate functions. See `` in particular. Examples -------- The following functions define a system of nonlinear equations >>> def fun(x): ... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0, ... 0.5 * (x[1] - x[0])**3 + x[1]] A solution can be obtained as follows. >>> from scipy import optimize >>> sol = optimize.diagbroyden(fun, [0, 0]) >>> sol array([0.84116403, 0.15883384])",
    "type": "class",
    "file_path": "scipy\\scipy\\optimize\\_nonlin.py",
    "ast_data": "ClassDef name:DiagBroyden FunctionDef name:__init__ arg:self arg:alpha arguments arg arg Call Assign FunctionDef name:setup arg:self arg:x arg:F arg:func arguments arg arg arg arg Call Assign Call FunctionDef name:solve arg:self arg:f arg:tol arguments arg arg arg Return return:yes FunctionDef name:matvec arg:self arg:f arguments arg arg Return return:yes FunctionDef name:rsolve arg:self arg:f arg:tol arguments arg arg arg Return return:yes Call FunctionDef name:rmatvec arg:self arg:f arguments arg arg Return return:yes Call FunctionDef name:todense arg:self arguments arg Return return:yes Call FunctionDef name:_update arg:self arg:x arg:f arg:dx arg:df arg:dx_norm arg:df_norm arguments arg arg arg arg arg arg arg"
  },
  {
    "library": "pytorch",
    "name": "from_opschema",
    "source_code": "@classmethod\ndef from_opschema(cls, opschema: onnx.defs.OpSchema) -> OpSignature:\n    type_constraints = {constraint.type_param_str: TypeConstraintParam(name=constraint.type_param_str, allowed_types={_get_type_from_str(type_str) for type_str in constraint.allowed_type_strs}, description=constraint.description) for constraint in opschema.type_constraints}\n    params = [_convert_formal_parameter(param, type_constraints) for param in opschema.inputs]\n    for param in opschema.attributes.values():\n        default_attr = ir.serde.deserialize_attribute(param.default_value) if param.default_value is not None else None\n        if default_attr is not None:\n            default_attr.name = param.name\n        params.append(AttributeParameter(name=param.name, type=ir.AttributeType(param.type), required=param.required, default=default_attr))\n    outputs = [_convert_formal_parameter(param, type_constraints) for param in opschema.outputs]\n    return cls(domain=opschema.domain, name=opschema.name, overload='', params=params, outputs=outputs, opset_version=opschema.since_version)",
    "docstring": "Produce an OpSignature from an ONNX Opschema.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_schemas.py",
    "ast_data": "FunctionDef name:from_opschema arg:cls arg:opschema arguments arg arg Assign Call Call Assign Call For Call Assign Compare Call If Compare Assign Call Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_extract_submatrices",
    "source_code": "def _extract_submatrices(M, block_indices, block_size, axis):\n    assert block_indices.ndim == 1\n    assert axis in [0, 1]\n    r, c = M.shape\n    if axis == 0:\n        sh = [block_indices.shape[0], block_size, c]\n    else:\n        sh = [block_indices.shape[0], r, block_size]\n    dt = M.dtype\n    M_res = np.empty(sh, dtype=dt)\n    if axis == 0:\n        for ir in range(block_size):\n            M_res[:, ir, :] = M[block_indices * block_size + ir, :]\n    else:\n        for ic in range(block_size):\n            M_res[:, :, ic] = M[:, block_indices * block_size + ic]\n    return M_res",
    "docstring": "Extract selected blocks of a matrices *M* depending on parameters *block_indices* and *block_size*. Returns the array of extracted matrices *Mres* so that :: M_res[..., ir, :] = M[(block_indices*block_size+ir), :]",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triinterpolate.py",
    "ast_data": "FunctionDef name:_extract_submatrices arg:M arg:block_indices arg:block_size arg:axis arguments arg arg arg arg Compare Compare Assign If Compare Assign Assign Assign Assign Call If Compare For Call Assign For Call Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "AxisError",
    "source_code": "class AxisError(ValueError, IndexError):\n    __slots__ = ('_msg', 'axis', 'ndim')\n\n    def __init__(self, axis, ndim=None, msg_prefix=None):\n        if ndim is msg_prefix is None:\n            self._msg = axis\n            self.axis = None\n            self.ndim = None\n        else:\n            self._msg = msg_prefix\n            self.axis = axis\n            self.ndim = ndim\n\n    def __str__(self):\n        axis = self.axis\n        ndim = self.ndim\n        if axis is ndim is None:\n            return self._msg\n        else:\n            msg = f'axis {axis} is out of bounds for array of dimension {ndim}'\n            if self._msg is not None:\n                msg = f'{self._msg}: {msg}'\n            return msg",
    "docstring": "Axis supplied was invalid. This is raised whenever an `ValueErrorIndexErrorndim` if a custom exception message was provided. .. versionadded:: 1.22 Examples -------- >>> import numpy as np >>> array_1d = np.arange(10) >>> np.cumsum(array_1d, axis=1) Traceback (most recent call last): ... numpy.exceptions.AxisError: axis 1 is out of bounds for array of dimension 1 Negative axes are preserved: >>> np.cumsum(array_1d, axis=-2) Traceback (most recent call last): ... numpy.exceptions.AxisError: axis -2 is out of bounds for array of dimension 1 The class constructor generally takes the axis and arrays' dimensionality as arguments: >>> print(np.exceptions.AxisError(2, 1, msg_prefix='error')) error: axis 2 is out of bounds for array of dimension 1 Alternatively, a custom exception message can be passed: >>> print(np.exceptions.AxisError('Custom error message')) Custom error message",
    "type": "class",
    "file_path": "numpy\\numpy\\exceptions.py",
    "ast_data": "ClassDef name:AxisError Assign FunctionDef name:__init__ arg:self arg:axis arg:ndim arg:msg_prefix arguments arg arg arg arg If Compare Assign Assign Assign Assign Assign Assign FunctionDef name:__str__ arg:self arguments arg Assign Assign If Compare Return return:yes Assign If Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "num_buckets",
    "source_code": "@property\ndef num_buckets(self):\n    return self.categorical_column.num_buckets",
    "docstring": "Returns number of buckets in this sparse feature.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:num_buckets arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "execute",
    "source_code": "def execute(self, fig):\n    return",
    "docstring": "Do nothing.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\layout_engine.py",
    "ast_data": "FunctionDef name:execute arg:self arg:fig arguments arg arg Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, persistent=False, watch_accessed_variables=True):\n    self._tape = None\n    self._persistent = persistent\n    self._watch_accessed_variables = watch_accessed_variables\n    self._watched_variables = ()\n    self._recording = False",
    "docstring": "Creates a new GradientTape. Args: persistent: Boolean controlling whether a persistent gradient tape is created. False by default, which means at most one call can be made to the gradient() method on this object. watch_accessed_variables: Boolean controlling whether the tape will automatically any (trainable) variables accessed while the tape is active. Defaults to True meaning gradients can be requested from any result computed in the tape derived from reading a trainable . If False users must explicitly any s they want to request gradients from.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\backprop.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:persistent arg:watch_accessed_variables arguments arg arg arg Assign Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_resolve_own_rank",
    "source_code": "def _resolve_own_rank(self):\n    return int(_get_slurm_var('PROCID'))",
    "docstring": "Returns the rank of the current task in range [0, num_tasks).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\slurm_cluster_resolver.py",
    "ast_data": "FunctionDef name:_resolve_own_rank arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "num_workers",
    "source_code": "@property\ndef num_workers(self):\n    return self._num_workers",
    "docstring": "Returns number of workers in the cluster, including chief.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_coordinator.py",
    "ast_data": "FunctionDef name:num_workers arg:self arguments arg Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "from_crawler",
    "source_code": "@classmethod\n@abstractmethod\ndef from_crawler(cls, crawler: Crawler, robotstxt_body: bytes) -> Self:\n    pass",
    "docstring": "Parse the content of a robots.txt_ file as bytes. This must be a class method. It must return a new instance of the parser backend. :param crawler: crawler which made the request :type crawler: :class: instance :param robotstxt_body: content of a robots.txt_ file. :type robotstxt_body: bytes",
    "type": "method",
    "file_path": "scrapy\\scrapy\\robotstxt.py",
    "ast_data": "FunctionDef name:from_crawler arg:cls arg:crawler arg:robotstxt_body arguments arg arg arg"
  },
  {
    "library": "scrapy",
    "name": "MarshalItemExporter",
    "source_code": "class MarshalItemExporter(BaseItemExporter):\n\n    def __init__(self, file: BytesIO, **kwargs: Any):\n        super().__init__(**kwargs)\n        self.file: BytesIO = file\n\n    def export_item(self, item: Any) -> None:\n        marshal.dump(dict(self._get_serialized_fields(item)), self.file)",
    "docstring": "Exports items in a Python-specific binary format (see :mod:). :param file: The file-like object to use for exporting the data. Its `bytes~io.BytesIO` object, etc)",
    "type": "class",
    "file_path": "scrapy\\scrapy\\exporters.py",
    "ast_data": "ClassDef name:MarshalItemExporter FunctionDef name:__init__ arg:self arg:file arguments arg arg arg Call Call FunctionDef name:export_item arg:self arg:item arguments arg arg Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "set_image_data_format",
    "source_code": "def set_image_data_format(data_format):\n    global _IMAGE_DATA_FORMAT\n    if data_format not in {'channels_last', 'channels_first'}:\n        raise ValueError('Unknown data_format: ' + str(data_format))\n    _IMAGE_DATA_FORMAT = str(data_format)",
    "docstring": "Sets the value of the image data format convention. Args: data_format: string. or . Example: >>> tf.keras.backend.image_data_format() 'channels_last' >>> tf.keras.backend.set_image_data_format('channels_first') >>> tf.keras.backend.image_data_format() 'channels_first' >>> tf.keras.backend.set_image_data_format('channels_last') Raises: ValueError: In case of invalid value.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend_config.py",
    "ast_data": "FunctionDef name:set_image_data_format arg:data_format arguments arg If Compare Raise Call Call Assign Call"
  },
  {
    "library": "cherrypy",
    "name": "handle_exception",
    "source_code": "def handle_exception(self):\n    self._exceptions.append(sys.exc_info()[1])",
    "docstring": "Append the current exception to self.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\wspbus.py",
    "ast_data": "FunctionDef name:handle_exception arg:self arguments arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "RegressionOutput",
    "source_code": "class RegressionOutput(ExportOutput):\n\n    def __init__(self, value):\n        if not (isinstance(value, tensor.Tensor) and value.dtype.is_floating):\n            raise ValueError('Regression output value must be a float32 Tensor; got {}'.format(value))\n        self._value = value\n\n    @property\n    def value(self):\n        return self._value\n\n    def as_signature_def(self, receiver_tensors):\n        if len(receiver_tensors) != 1:\n            raise ValueError('Regression input must be a single string Tensor; got {}'.format(receiver_tensors))\n        (_, examples), = receiver_tensors.items()\n        if dtypes.as_dtype(examples.dtype) != dtypes.string:\n            raise ValueError('Regression input must be a single string Tensor; got {}'.format(receiver_tensors))\n        return signature_def_utils.regression_signature_def(examples, self.value)",
    "docstring": "Represents the output of a regression head.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\utils_v1\\export_output.py",
    "ast_data": "ClassDef name:RegressionOutput FunctionDef name:__init__ arg:self arg:value arguments arg arg If BoolOp Call Raise Call Call Assign FunctionDef name:value arg:self arguments arg Return return:yes FunctionDef name:as_signature_def arg:self arg:receiver_tensors arguments arg arg If Compare Call Raise Call Call Assign Call If Compare Call Raise Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "set_up_storage_reader",
    "source_code": "def set_up_storage_reader(self, metadata: Metadata, is_coordinator: bool) -> None:\n    self.is_coordinator = is_coordinator\n    if self.is_coordinator:\n        assert dist.get_rank() == self.coordinator_rank\n    assert self.checkpoint_id is not None",
    "docstring": "Implementation of the StorageReader method",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\format_utils.py",
    "ast_data": "FunctionDef name:set_up_storage_reader arg:self arg:metadata arg:is_coordinator arguments arg arg arg Assign If Compare Call Compare"
  },
  {
    "library": "pytorch",
    "name": "set_non_traceable_module_names",
    "source_code": "def set_non_traceable_module_names(self, module_names: list[str]) -> PrepareCustomConfig:\n    self.non_traceable_module_names = module_names\n    return self",
    "docstring": "Set the modules that are not symbolically traceable, identified by name.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\custom_config.py",
    "ast_data": "FunctionDef name:set_non_traceable_module_names arg:self arg:module_names arguments arg arg Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "CondOpArgsMismatchError",
    "source_code": "class CondOpArgsMismatchError(ArgsMismatchError):\n\n    def __init__(self, msg: str) -> None:\n        super().__init__(msg)",
    "docstring": "Internal error from cond() due to arguments mismatch.",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\exc.py",
    "ast_data": "ClassDef name:CondOpArgsMismatchError FunctionDef name:__init__ arg:self arg:msg arguments arg arg Call Call"
  },
  {
    "library": "pandas",
    "name": "is_array_like",
    "source_code": "def is_array_like(obj: object) -> bool:\n    return is_list_like(obj) and hasattr(obj, 'dtype')",
    "docstring": "Check if the object is array-like. For an object to be considered array-like, it must be list-like and have a attribute. Parameters ---------- obj : The object to check Returns ------- is_array_like : bool Whether has array-like properties. Examples -------- >>> is_array_like(np.array([1, 2, 3])) True >>> is_array_like(pd.Series([\"a\", \"b\"])) True >>> is_array_like(pd.Index([\"2016-01-01\"])) True >>> is_array_like([1, 2, 3]) False >>> is_array_like((\"a\", \"b\")) False",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\inference.py",
    "ast_data": "FunctionDef name:is_array_like arg:obj arguments arg Return return:yes BoolOp Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_symmetric_qnnpack_qconfig_mapping",
    "source_code": "def _get_symmetric_qnnpack_qconfig_mapping() -> QConfigMapping:\n    default_qconfig = default_symmetric_qnnpack_qconfig\n    return _get_default_qconfig_mapping_with_default_qconfig(False, 'qnnpack', default_qconfig)",
    "docstring": "Return a QConfigMapping that uses as the default QConfig.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\qconfig_mapping.py",
    "ast_data": "FunctionDef name:_get_symmetric_qnnpack_qconfig_mapping arguments Assign Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X, **predict_params):\n    check_is_fitted(self)\n    if _routing_enabled():\n        routed_params = process_routing(self, 'predict', **predict_params)\n    else:\n        routed_params = Bunch(regressor=Bunch(predict=predict_params))\n    pred = self.regressor_.predict(X, **routed_params.regressor.predict)\n    if pred.ndim == 1:\n        pred_trans = self.transformer_.inverse_transform(pred.reshape(-1, 1))\n    else:\n        pred_trans = self.transformer_.inverse_transform(pred)\n    if self._training_dim == 1 and pred_trans.ndim == 2 and (pred_trans.shape[1] == 1):\n        pred_trans = pred_trans.squeeze(axis=1)\n    return pred_trans",
    "docstring": "Predict using the base regressor, applying inverse. The regressor is used to predict and the or is applied before returning the prediction. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Samples. **predict_params : dict of str -> object - If (default): Parameters directly passed to the method of the underlying regressor. - If : Parameters safely routed to the method of the underlying regressor. .. versionchanged:: 1.6 See :ref: for more details. Returns ------- y_hat : ndarray of shape (n_samples,) Predicted values.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\compose\\_target.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg arg Call If Call Assign Call Assign Call Call Assign Call If Compare Assign Call Call Assign Call If BoolOp Compare Compare Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_functions",
    "source_code": "def _functions(self) -> list[StructuredFunctionWrapper]:\n    return []",
    "docstring": "Returns a list of functions associated with this dataset. Returns: A list of objects.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:_functions arg:self arguments arg Return return:no"
  },
  {
    "library": "pytorch",
    "name": "load_state_dict",
    "source_code": "def load_state_dict(self, state_dict: Mapping[str, Any]):\n    raise NotImplementedError('ShardedOptimizer load_state_dict not implemented yet!')",
    "docstring": "Loads the ShardedOptimizer state. Args: state_dict (dict): ShardedOptimizer state. Should be an object returned from a call to :meth:.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_optim\\api.py",
    "ast_data": "FunctionDef name:load_state_dict arg:self arg:state_dict arguments arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "amin",
    "source_code": "@_apply_docstring_templates\ndef amin(input: Union[Tensor, MaskedTensor], dim: DimOrDims=None, *, keepdim: Optional[bool]=False, dtype: Optional[DType]=None, mask: Optional[Tensor]=None) -> Tensor:\n    if dtype is None:\n        dtype = input.dtype\n    mask_input = _combine_input_and_mask(amin, input, mask)\n    dim_ = _canonical_dim(dim, mask_input.ndim)\n    if mask_input.layout == torch.strided:\n        return torch.amin(mask_input, dim_, bool(keepdim)).to(dtype=dtype)\n    elif mask_input.layout == torch.sparse_coo:\n        if mask is None:\n            raise ValueError('masked amax expects explicit mask for sparse_coo tensor input')\n        return _sparse_coo_scatter_reduction_helper(torch.amin, mask_input, dim_, bool(keepdim), dtype)\n    elif mask_input.layout == torch.sparse_csr:\n        if mask is None:\n            raise ValueError('masked amin expects explicit mask for sparse_csr tensor input')\n        return _sparse_csr_segment_reduction_helper(torch.amin, mask_input, dim_, bool(keepdim), dtype)\n    else:\n        raise ValueError(f'masked amin expects strided, sparse_coo or sparse_csr tensor (got {mask_input.layout} tensor)')",
    "docstring": "{reduction_signature} {reduction_descr} {reduction_identity_dtype} {reduction_args} {reduction_example}",
    "type": "function",
    "file_path": "pytorch\\torch\\masked\\_ops.py",
    "ast_data": "FunctionDef name:amin arg:input arg:dim arguments arg arg arg arg arg If Compare Assign Assign Call Assign Call If Compare Return return:yes Call Call Call If Compare If Compare Raise Call Return return:yes Call Call If Compare If Compare Raise Call Return return:yes Call Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "__debug_string__",
    "source_code": "def __debug_string__(self):\n    lines = []\n    to_process = [(self, 0)]\n    while to_process:\n        dataset, depth = to_process.pop()\n        lines.append('-' * 2 * depth + repr(dataset))\n        to_process.extend([(ds, depth + 1) for ds in dataset._inputs()])\n    return '\\n'.join(lines)",
    "docstring": "Returns a string showing the type of the dataset and its inputs. This string is intended only for debugging purposes, and may change without warning.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:__debug_string__ arg:self arguments arg Assign Assign While Assign Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "PyperclipException",
    "source_code": "class PyperclipException(RuntimeError):\n    pass",
    "docstring": "Exception raised when clipboard functionality is unsupported. Raised by ``.",
    "type": "class",
    "file_path": "pandas\\pandas\\errors\\__init__.py",
    "ast_data": "ClassDef name:PyperclipException"
  },
  {
    "library": "tensorflow",
    "name": "local_variables",
    "source_code": "@property\ndef local_variables(self):\n    if self._variables_created:\n        return ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES, self.variable_scope_name)\n    else:\n        return []",
    "docstring": "Returns the list of global variables created by the Template.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\template.py",
    "ast_data": "FunctionDef name:local_variables arg:self arguments arg If Return return:yes Call Return return:no"
  },
  {
    "library": "pytorch",
    "name": "check",
    "source_code": "@deprecated('`torch._prims_common.check` is deprecated and will be removed in the future. Please use `torch._check*` functions instead.', category=FutureWarning)\ndef check(b: bool, s: Callable[[], str], exc_type: type[Exception]=RuntimeError) -> None:\n    torch._check_with(exc_type, b, s)",
    "docstring": "Helper function for raising an error_type (default: RuntimeError) if a boolean condition fails. Error message is a callable producing a string (to avoid wasting time string formatting in non-error case, and also to make it easier for torchdynamo to trace.) .. note:: This function is planned for removal in the future. Please use functions instead.",
    "type": "function",
    "file_path": "pytorch\\torch\\_prims_common\\__init__.py",
    "ast_data": "FunctionDef name:check arg:b arg:s arg:exc_type arguments arg arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_validate_input",
    "source_code": "def _validate_input(flattened_layouts: Sequence[layout_lib.Layout], flattened_elem_spec: Sequence[tensor_spec.TensorSpec], dataset_already_batched: bool):\n    if not flattened_elem_spec:\n        raise ValueError('Expected input element spec of at least one element, was empty.')\n    first_elem_shape = flattened_elem_spec[0].shape\n    for layout, elem_spec in zip(flattened_layouts, flattened_elem_spec):\n        if elem_spec.shape.rank is None:\n            raise ValueError('Dataset element shape must have a valid rank, got spec %s.' % elem_spec)\n        expected_rank = elem_spec.shape.rank\n        if not dataset_already_batched:\n            expected_rank += 1\n        if layout.rank != expected_rank:\n            raise ValueError('Expected layout with rank %d for element spec %s, got layout %s. Check that the dataset is not batched before passing to DTensorDataset.' % (expected_rank, elem_spec, layout.sharding_specs))\n        if dataset_already_batched:\n            batch_dim_size = first_elem_shape.as_list()[0]\n            if batch_dim_size is None:\n                raise ValueError('Size of batch dimension of element spec %s is None. Ensure drop_remainder=True when batching the dataset.' % elem_spec)\n            if elem_spec.shape.as_list()[0] != batch_dim_size:\n                raise ValueError('Size of batch dimension of element spec %s does not match expected size %d.' % (elem_spec, batch_dim_size))",
    "docstring": "Checks that the dataset's layouts and element specs are compatible. Args: flattened_layouts: the flattened list of layouts used to distribute the dataset. flattened_elem_spec: the flattened list of element specs used in the dataset's components. dataset_already_batched: whether the dataset to be validated is already batched. Raises: ValueError: if the dataset's inputs are incompatible.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\input_util.py",
    "ast_data": "FunctionDef name:_validate_input arg:flattened_layouts arg:flattened_elem_spec arg:dataset_already_batched arguments arg arg arg If Raise Call Assign For Call If Compare Raise Call Assign If If Compare Raise Call If Assign Call If Compare Raise Call If Compare Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "assert_is_compatible_with",
    "source_code": "def assert_is_compatible_with(self, other):\n    if not self.is_compatible_with(other):\n        raise ValueError('Dimensions %s and %s are not compatible' % (self, other))",
    "docstring": "Raises an exception if is not compatible with this Dimension. Args: other: Another Dimension. Raises: ValueError: If and are not compatible (see is_compatible_with).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:assert_is_compatible_with arg:self arg:other arguments arg arg If Call Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "_normalize_shortcut",
    "source_code": "def _normalize_shortcut(self, key):\n    special = {'backspace': 'BackSpace', 'pagedown': 'Page_Down', 'pageup': 'Page_Up', 'scroll_lock': 'Scroll_Lock'}\n    parts = key.split('+')\n    mods = ['<' + mod + '>' for mod in parts[:-1]]\n    key = parts[-1]\n    if key in special:\n        key = special[key]\n    elif len(key) > 1:\n        key = key.capitalize()\n    elif key.isupper():\n        mods += ['<shift>']\n    return ''.join(mods) + key",
    "docstring": "Convert Matplotlib key presses to GTK+ accelerator identifiers. Related to .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_gtk3.py",
    "ast_data": "FunctionDef name:_normalize_shortcut arg:self arg:key arguments arg arg Assign Assign Call Assign Assign If Compare Assign If Compare Call Assign Call If Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "OpCodeMapper",
    "source_code": "class OpCodeMapper:\n\n    def __init__(self, data):\n        self.code_to_name = {}\n        for idx, d in enumerate(data['operator_codes']):\n            self.code_to_name[idx] = BuiltinCodeToName(d['builtin_code'])\n            if self.code_to_name[idx] == 'CUSTOM':\n                self.code_to_name[idx] = NameListToString(d['custom_code'])\n\n    def __call__(self, x):\n        if x not in self.code_to_name:\n            s = '<UNKNOWN>'\n        else:\n            s = self.code_to_name[x]\n        return '%s (%d)' % (s, x)",
    "docstring": "Maps an opcode index to an op name.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\lite\\tools\\visualize.py",
    "ast_data": "ClassDef name:OpCodeMapper FunctionDef name:__init__ arg:self arg:data arguments arg arg Assign For Call Assign Call If Compare Assign Call FunctionDef name:__call__ arg:self arg:x arguments arg arg If Compare Assign Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "PathPatchEffect",
    "source_code": "class PathPatchEffect(AbstractPathEffect):\n\n    def __init__(self, offset=(0, 0), **kwargs):\n        super().__init__(offset=offset)\n        self.patch = mpatches.PathPatch([], **kwargs)\n\n    def draw_path(self, renderer, gc, tpath, affine, rgbFace):\n        self.patch._path = tpath\n        self.patch.set_transform(affine + self._offset_transform(renderer))\n        self.patch.set_clip_box(gc.get_clip_rectangle())\n        clip_path = gc.get_clip_path()\n        if clip_path and self.patch.get_clip_path() is None:\n            self.patch.set_clip_path(*clip_path)\n        self.patch.draw(renderer)",
    "docstring": "Draws a instance whose Path comes from the original PathEffect artist.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\patheffects.py",
    "ast_data": "ClassDef name:PathPatchEffect FunctionDef name:__init__ arg:self arg:offset arguments arg arg arg Call Call Assign Call FunctionDef name:draw_path arg:self arg:renderer arg:gc arg:tpath arg:affine arg:rgbFace arguments arg arg arg arg arg arg Assign Call Call Call Call Assign Call If BoolOp Compare Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "from_nested_row_lengths",
    "source_code": "@classmethod\n@dispatch.add_dispatch_support\ndef from_nested_row_lengths(cls, flat_values, nested_row_lengths, name=None, validate=True):\n    if not isinstance(validate, bool):\n        raise TypeError(f'Argument `validate` must have type bool. Received {validate}.')\n    if isinstance(nested_row_lengths, tensor_lib.Tensor):\n        raise TypeError(f'Argument `nested_row_lengths` must be a list of Tensors. Received {nested_row_lengths}.')\n    with ops.name_scope(name, 'RaggedFromNestedRowlengths', [flat_values] + list(nested_row_lengths)):\n        result = flat_values\n        for lengths in reversed(nested_row_lengths):\n            result = cls.from_row_lengths(result, lengths, validate=validate)\n        return result",
    "docstring": "Creates a from a nested list of tensors. Equivalent to: Args: flat_values: A potentially ragged tensor. nested_row_lengths: A list of 1-D integer tensors. The th tensor is used as the for the th ragged dimension. name: A name prefix for the RaggedTensor (optional). validate: If true, then use assertions to check that the arguments form a valid . Note: these assertions incur a runtime cost, since they must be checked for each tensor value. Returns: A (or if is empty).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:from_nested_row_lengths arg:cls arg:flat_values arg:nested_row_lengths arg:name arg:validate arguments arg arg arg arg arg If Call Raise Call If Call Raise Call With Call Call Assign For Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_rank_ignoring_leading_dims_with_size_1",
    "source_code": "def _rank_ignoring_leading_dims_with_size_1(value):\n    if value.shape.rank is not None:\n        ndims = value.shape.rank\n        for dim in value.shape.dims:\n            if dim.value == 1:\n                ndims -= 1\n            elif dim.value is None:\n                ndims = None\n                break\n            else:\n                break\n        if ndims is not None:\n            return ndims\n    shape = array_ops.shape(value)\n    dim_is_one = math_ops.cast(math_ops.equal(shape, 1), dtypes.int32)\n    leading_ones = math_ops.cumprod(dim_is_one)\n    num_leading_ones = math_ops.reduce_sum(leading_ones)\n    return array_ops.rank(value) - num_leading_ones",
    "docstring": "Returns , ignoring any leading dimensions with size 1.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_conversion_ops.py",
    "ast_data": "FunctionDef name:_rank_ignoring_leading_dims_with_size_1 arg:value arguments arg If Compare Assign For If Compare If Compare Assign If Compare Return return:yes Assign Call Assign Call Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "DynamicReduce2DBench",
    "source_code": "class DynamicReduce2DBench(benchmark.DynamicShape, Reduce2DBench):\n\n    def __init__(self, mode, device, dtype, red_dim, dim0, dim1):\n        benchmark.DynamicShape.__init__(self)\n        Reduce2DBench.__init__(self, mode, device, dtype, red_dim, dim0, dim1)\n\n    def instantiate_input(self):\n        dim0, dim1 = self.rand_shape([self.dim0, self.dim1])\n        self.inputs = [self.randn([dim0, dim1], device=self.device, dtype=self.dtype, requires_grad=self.requires_grad)]\n\n    @staticmethod\n    def module():\n        return 'dynamicreduce2d'",
    "docstring": "A benchmark class to validate 2 dimensional reduction performance. Only a simple add is fused to induce the fuser and isolate reduction perf.",
    "type": "class",
    "file_path": "pytorch\\benchmarks\\tensorexpr\\reduction.py",
    "ast_data": "ClassDef name:DynamicReduce2DBench FunctionDef name:__init__ arg:self arg:mode arg:device arg:dtype arg:red_dim arg:dim0 arg:dim1 arguments arg arg arg arg arg arg arg Call Call FunctionDef name:instantiate_input arg:self arguments arg Assign Call Assign Call FunctionDef name:module arguments Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "register_layer",
    "source_code": "def register_layer(self, layer: nn.Module, aggregate_fn=None, reduce_fn=None, mask_fn=None, features=None, feature_dim=None, **sparse_config):\n    name = module_to_fqn(self.model, layer)\n    assert name is not None, 'layer not found in the model'\n    if name in self.data_groups:\n        warnings.warn('layer already attached to the sparsifier, deregistering the layer and registering with new config')\n        self.unregister_layer(name=name)\n    local_args = copy.deepcopy(self.defaults)\n    update_dict = {'aggregate_fn': aggregate_fn, 'reduce_fn': reduce_fn, 'mask_fn': mask_fn, 'features': features, 'feature_dim': feature_dim, 'layer': layer}\n    local_args.update(((arg, val) for arg, val in update_dict.items() if val is not None))\n    local_args['sparse_config'].update(sparse_config)\n    self._safe_rail_checks(local_args)\n    self.data_groups[name] = local_args\n    agg_hook = layer.register_forward_pre_hook(self._aggregate_hook(name=name))\n    self.state[name]['mask'] = None\n    self.data_groups[name]['hook'] = agg_hook\n    self.data_groups[name]['hook_state'] = 'aggregate'",
    "docstring": "Registers a layer for sparsification. The layer should be part of self.model. Specifically, registers a pre-forward hook to the layer. The hook will apply the aggregate_fn and store the aggregated activations that is input over each step. Note:: - There is no need to pass in the name of the layer as it is automatically computed as per the fqn convention. - All the functions (fn) passed as argument will be called at a dim, feature level.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\activation_sparsifier\\activation_sparsifier.py",
    "ast_data": "FunctionDef name:register_layer arg:self arg:layer arg:aggregate_fn arg:reduce_fn arg:mask_fn arg:features arg:feature_dim arguments arg arg arg arg arg arg arg arg Assign Call Compare If Compare Call Call Assign Call Assign Call Call Compare Call Call Assign Assign Call Call Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "as_proxy",
    "source_code": "def as_proxy(self):\n    return self.value.type.__name__",
    "docstring": "Similar to how numpy dtype descriptors (e.g. np.float32 ) are handled by NumpyVariable: np.dtype() objects are serialized as strings, torch._numpy wrappers will normalize to the torch dtype. This also handles unsupported things nicely (i.e. structured arrays and object arrays).",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\misc.py",
    "ast_data": "FunctionDef name:as_proxy arg:self arguments arg Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "strip_url",
    "source_code": "def strip_url(self, url: str, origin_only: bool=False) -> str | None:\n    if not url:\n        return None\n    return strip_url(url, strip_credentials=True, strip_fragment=True, strip_default_port=True, origin_only=origin_only)",
    "docstring": "If url is null, return no referrer. If url's scheme is a local scheme, then return no referrer. Set url's username to the empty string. Set url's password to null. Set url's fragment to null. If the origin-only flag is true, then: Set url's path to null. Set url's query to null. Return url.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\spidermiddlewares\\referer.py",
    "ast_data": "FunctionDef name:strip_url arg:self arg:url arg:origin_only arguments arg arg arg If Return return:no Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "MultiIndexUInt16Engine",
    "source_code": "class MultiIndexUInt16Engine(libindex.BaseMultiIndexCodesEngine, libindex.UInt16Engine):\n    _base = libindex.UInt16Engine\n    _codes_dtype = 'uint16'",
    "docstring": "Manages a MultiIndex by mapping label combinations to positive integers. The number of possible label combinations must not overflow the 16 bits integers.",
    "type": "class",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "ClassDef name:MultiIndexUInt16Engine Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "SessionRunArgs",
    "source_code": "@tf_export(v1=['train.SessionRunArgs'])\nclass SessionRunArgs(collections.namedtuple('SessionRunArgs', ['fetches', 'feed_dict', 'options'])):\n\n    def __new__(cls, fetches, feed_dict=None, options=None):\n        return super(SessionRunArgs, cls).__new__(cls, fetches, feed_dict, options)",
    "docstring": "Represents arguments to be added to a call. Args: fetches: Exactly like the 'fetches' argument to Session.Run(). Can be a single tensor or op, a list of 'fetches' or a dictionary of fetches. For example: fetches = global_step_tensor fetches = [train_op, summary_op, global_step_tensor] fetches = {'step': global_step_tensor, 'summ': summary_op} Note that this can recurse as expected: fetches = {'step': global_step_tensor, 'ops': [train_op, check_nan_op]} feed_dict: Exactly like the argument to options: Exactly like the argument to , i.e., a config_pb2.RunOptions proto.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\training\\session_run_hook.py",
    "ast_data": "ClassDef name:SessionRunArgs Call FunctionDef name:__new__ arg:cls arg:fetches arg:feed_dict arg:options arguments arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_onenormest_m1_power",
    "source_code": "def _onenormest_m1_power(A, p, t=2, itmax=5, compute_v=False, compute_w=False):\n    return onenormest(_MatrixM1PowerOperator(A, p), t=t, itmax=itmax, compute_v=compute_v, compute_w=compute_w)",
    "docstring": "Efficiently estimate the 1-norm of (A - I)^p. Parameters ---------- A : ndarray Matrix whose 1-norm of a power is to be computed. p : int Non-negative integer power. t : int, optional A positive parameter controlling the tradeoff between accuracy versus time and memory usage. Larger values take longer and use more memory but give more accurate output. itmax : int, optional Use at most this many iterations. compute_v : bool, optional Request a norm-maximizing linear operator input vector if True. compute_w : bool, optional Request a norm-maximizing linear operator output vector if True. Returns ------- est : float An underestimate of the 1-norm of the sparse matrix. v : ndarray, optional The vector such that ||Av||_1 == est*||v||_1. It can be thought of as an input to the linear operator that gives an output with particularly large norm. w : ndarray, optional The vector Av which has relatively large 1-norm. It can be thought of as an output of the linear operator that is relatively large in norm compared to the input.",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_matfuncs_inv_ssq.py",
    "ast_data": "FunctionDef name:_onenormest_m1_power arg:A arg:p arg:t arg:itmax arg:compute_v arg:compute_w arguments arg arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "empty",
    "source_code": "@final\n@property\ndef empty(self) -> bool:\n    return not self.size",
    "docstring": "Indicator whether Index is empty. An Index is considered empty if it has no elements. This property can be useful for quickly checking the state of an Index, especially in data processing and analysis workflows where handling of empty datasets might be required. Returns ------- bool If Index is empty, return True, if not return False. See Also -------- Index.size : Return the number of elements in the underlying data. Examples -------- >>> idx = pd.Index([1, 2, 3]) >>> idx Index([1, 2, 3], dtype='int64') >>> idx.empty False >>> idx_empty = pd.Index([]) >>> idx_empty Index([], dtype='object') >>> idx_empty.empty True If we only have NaNs in our DataFrame, it is not considered empty! >>> idx = pd.Index([np.nan, np.nan]) >>> idx Index([nan, nan], dtype='float64') >>> idx.empty False",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\base.py",
    "ast_data": "FunctionDef name:empty arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_StrongRef",
    "source_code": "class _StrongRef:\n\n    def __init__(self, obj):\n        self._obj = obj\n\n    def __call__(self):\n        return self._obj\n\n    def __eq__(self, other):\n        return isinstance(other, _StrongRef) and self._obj == other._obj\n\n    def __hash__(self):\n        return hash(self._obj)",
    "docstring": "Wrapper similar to a weakref, but keeping a strong reference to the object.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\cbook.py",
    "ast_data": "ClassDef name:_StrongRef FunctionDef name:__init__ arg:self arg:obj arguments arg arg Assign FunctionDef name:__call__ arg:self arguments arg Return return:yes FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes BoolOp Call Compare FunctionDef name:__hash__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_load_source_files",
    "source_code": "def _load_source_files(self):\n    source_files_iter = self._reader.source_files_iterator()\n    for debug_event, offset in source_files_iter:\n        source_file = debug_event.source_file\n        self._host_name_file_path_to_offset[source_file.host_name, source_file.file_path] = offset",
    "docstring": "Incrementally read the .source_files DebugEvent file.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py",
    "ast_data": "FunctionDef name:_load_source_files arg:self arguments arg Assign Call For Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "get_right_margin_bbox",
    "source_code": "def get_right_margin_bbox(self, rows=0, cols=0):\n    rows = np.atleast_1d(rows)\n    cols = np.atleast_1d(cols)\n    bbox = Bbox.from_extents(self.rights[cols[-1]].value() - self.margins['right'][cols[-1]].value() - self.margins['rightcb'][cols[-1]].value(), self.bottoms[rows[-1]].value(), self.rights[cols[-1]].value() - self.margins['rightcb'][cols[-1]].value(), self.tops[rows[0]].value())\n    return bbox",
    "docstring": "Return the left margin bounding box of the subplot specs given by rows and cols. rows and cols can be spans.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_layoutgrid.py",
    "ast_data": "FunctionDef name:get_right_margin_bbox arg:self arg:rows arg:cols arguments arg arg arg Assign Call Assign Call Assign Call Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "markerObject",
    "source_code": "def markerObject(self, path, trans, fill, stroke, lw, joinstyle, capstyle):\n    pathops = self.pathOperations(path, trans, simplify=False)\n    key = (tuple(pathops), bool(fill), bool(stroke), joinstyle, capstyle)\n    result = self.markers.get(key)\n    if result is None:\n        name = Name('M%d' % len(self.markers))\n        ob = self.reserveObject('marker %d' % len(self.markers))\n        bbox = path.get_extents(trans)\n        self.markers[key] = [name, ob, bbox, lw]\n    else:\n        if result[-1] < lw:\n            result[-1] = lw\n        name = result[0]\n    return name",
    "docstring": "Return name of a marker XObject representing the given path.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py",
    "ast_data": "FunctionDef name:markerObject arg:self arg:path arg:trans arg:fill arg:stroke arg:lw arg:joinstyle arg:capstyle arguments arg arg arg arg arg arg arg arg Assign Call Assign Call Call Call Assign Call If Compare Assign Call Call Assign Call Call Assign Call Assign If Compare Assign Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_locate",
    "source_code": "def _locate(self, x):\n    if isinstance(self.norm, (colors.NoNorm, colors.BoundaryNorm)):\n        b = self._boundaries\n        xn = x\n    else:\n        b = self.norm(self._boundaries, clip=False).filled()\n        xn = self.norm(x, clip=False).filled()\n    bunique = b[self._inside]\n    yunique = self._y\n    z = np.interp(xn, bunique, yunique)\n    return z",
    "docstring": "Given a set of color data values, return their corresponding colorbar data coordinates.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colorbar.py",
    "ast_data": "FunctionDef name:_locate arg:self arg:x arguments arg arg If Call Assign Assign Assign Call Call Assign Call Call Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "fclusterdata",
    "source_code": "@xp_capabilities(cpu_only=True, reason='Cython code', jax_jit=False, allow_dask_compute=True)\ndef fclusterdata(X, t, criterion='inconsistent', metric='euclidean', depth=2, method='single', R=None):\n    xp = array_namespace(X)\n    X = _asarray(X, order='C', dtype=xp.float64, xp=xp)\n    if X.ndim != 2:\n        raise TypeError('The observation matrix X must be an n by m array.')\n    Y = distance.pdist(X, metric=metric)\n    Z = linkage(Y, method=method)\n    if R is None:\n        R = inconsistent(Z, d=depth)\n    else:\n        R = _asarray(R, order='C', xp=xp)\n    T = fcluster(Z, criterion=criterion, depth=depth, R=R, t=t)\n    return T",
    "docstring": "Cluster observation data using a given metric. Clusters the original observations in the n-by-m data matrix X (n observations in m dimensions), using the euclidean distance metric to calculate distances between original observations, performs hierarchical clustering using the single linkage algorithm, and forms flat clusters using the inconsistency method with as the cut-off threshold. A 1-D array `fclusterinconsistentlinkagescipy.spatial.distance.pdistscipy.cluster.hierarchy.fcluster`, and the default settings) is four clusters with three data points each.",
    "type": "function",
    "file_path": "scipy\\scipy\\cluster\\hierarchy.py",
    "ast_data": "FunctionDef name:fclusterdata arg:X arg:t arg:criterion arg:metric arg:depth arg:method arg:R arguments arg arg arg arg arg arg arg Assign Call Assign Call If Compare Raise Call Assign Call Assign Call If Compare Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_logdir",
    "source_code": "def get_logdir(self):\n    return self.event_writer.get_logdir()",
    "docstring": "Return the directory where event file will be written.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\tensorboard\\writer.py",
    "ast_data": "FunctionDef name:get_logdir arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_on_mouse_wheel",
    "source_code": "def _on_mouse_wheel(self, event):\n    x, y = self._mpl_coords(event)\n    step = event.LinesPerAction * event.WheelRotation / event.WheelDelta\n    event.Skip()\n    if wx.Platform == '__WXMAC__':\n        if not hasattr(self, '_skipwheelevent'):\n            self._skipwheelevent = True\n        elif self._skipwheelevent:\n            self._skipwheelevent = False\n            return\n        else:\n            self._skipwheelevent = True\n    MouseEvent('scroll_event', self, x, y, step=step, modifiers=self._mpl_modifiers(event), guiEvent=event)._process()",
    "docstring": "Translate mouse wheel events into matplotlib events",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_wx.py",
    "ast_data": "FunctionDef name:_on_mouse_wheel arg:self arg:event arguments arg arg Assign Call Assign Call If Compare If Call Assign If Assign Return return:no Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_SparseSoftmaxCrossEntropyWithLogitsGrad",
    "source_code": "@ops.RegisterGradient('SparseSoftmaxCrossEntropyWithLogits')\ndef _SparseSoftmaxCrossEntropyWithLogitsGrad(op: ops.Operation, grad_loss, grad_grad):\n    softmax_grad = op.outputs[1]\n    grad = _BroadcastMul(grad_loss, softmax_grad)\n    logits = op.inputs[0]\n    if grad_grad is not None and (not getattr(grad_grad, '_is_zeros_tensor', False)):\n        softmax = gen_nn_ops.softmax(logits)\n        grad += (grad_grad - array_ops.squeeze(math_ops.matmul(array_ops.expand_dims(grad_grad, 1), array_ops.expand_dims(softmax, 2)), axis=1)) * softmax\n    return (grad, None)",
    "docstring": "Gradient function for SparseSoftmaxCrossEntropyWithLogits.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_grad.py",
    "ast_data": "FunctionDef name:_SparseSoftmaxCrossEntropyWithLogitsGrad arg:op arg:grad_loss arg:grad_grad arguments arg arg arg Assign Assign Call Assign If BoolOp Compare Call Assign Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "celu",
    "source_code": "@register_decomposition(aten.celu)\n@_inplace_wrapper\n@out_wrapper()\n@elementwise_type_promotion_wrapper(type_promoting_args=('a',), type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT)\ndef celu(a: TensorLikeType, alpha: Optional[NumberType]=None, inplace: bool=False) -> TensorLikeType:\n    if inplace:\n        raise NotImplementedError\n    rhs: TensorLikeType\n    if alpha is not None:\n        python_type = utils.dtype_to_type(a.dtype)\n        if not utils.is_weakly_lesser_type(type(alpha), python_type):\n            msg = f'alpha argument of type {type(alpha)} cannot be safely cast to type {python_type}!'\n            raise ValueError(msg)\n        rhs = alpha * torch.expm1(torch.true_divide(a, alpha))\n    else:\n        rhs = torch.expm1(a)\n    return torch.where(a > 0, a, rhs)",
    "docstring": "Reference implementation of torch.nn.functional.celu",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\nn\\functional\\__init__.py",
    "ast_data": "FunctionDef name:celu arg:a arg:alpha arg:inplace arguments arg arg arg If Raise If Compare Assign Call If Call Call Assign Call Raise Call Assign Call Call Assign Call Return return:yes Call Compare Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_BypassDispatchCache",
    "source_code": "@dataclass_slots\n@dataclass(frozen=True)\nclass _BypassDispatchCache(Exception):\n    reason: str",
    "docstring": "Signals cases that should skip FakeTensor caching.",
    "type": "class",
    "file_path": "pytorch\\torch\\_subclasses\\fake_tensor.py",
    "ast_data": "ClassDef name:_BypassDispatchCache Call"
  },
  {
    "library": "pandas",
    "name": "DeepChainMap",
    "source_code": "class DeepChainMap(ChainMap[_KT, _VT]):\n\n    def __setitem__(self, key: _KT, value: _VT) -> None:\n        for mapping in self.maps:\n            if key in mapping:\n                mapping[key] = value\n                return\n        self.maps[0][key] = value\n\n    def __delitem__(self, key: _KT) -> None:\n        for mapping in self.maps:\n            if key in mapping:\n                del mapping[key]\n                return\n        raise KeyError(key)",
    "docstring": "Variant of ChainMap that allows direct updates to inner scopes. Only works when all passed mapping are mutable.",
    "type": "class",
    "file_path": "pandas\\pandas\\core\\computation\\scope.py",
    "ast_data": "ClassDef name:DeepChainMap FunctionDef name:__setitem__ arg:self arg:key arg:value arguments arg arg arg For If Compare Assign Return return:no Assign FunctionDef name:__delitem__ arg:self arg:key arguments arg arg For If Compare Return return:no Raise Call"
  },
  {
    "library": "seaborn",
    "name": "position_candidates",
    "source_code": "def position_candidates(self, xyr_i, neighbors):\n    candidates = [xyr_i]\n    x_i, y_i, r_i = xyr_i\n    left_first = True\n    for x_j, y_j, r_j in neighbors:\n        dy = y_i - y_j\n        dx = np.sqrt(max((r_i + r_j) ** 2 - dy ** 2, 0)) * 1.05\n        cl, cr = ((x_j - dx, y_i, r_i), (x_j + dx, y_i, r_i))\n        if left_first:\n            new_candidates = [cl, cr]\n        else:\n            new_candidates = [cr, cl]\n        candidates.extend(new_candidates)\n        left_first = not left_first\n    return np.array(candidates)",
    "docstring": "Return a list of coordinates that might be valid by adjusting x.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\categorical.py",
    "ast_data": "FunctionDef name:position_candidates arg:self arg:xyr_i arg:neighbors arguments arg arg arg Assign Assign Assign For Assign Assign Call Call Assign If Assign Assign Call Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_skip_annotate",
    "source_code": "def _skip_annotate(nodes: list[Node], filter_fn: Optional[FilterFn]=None) -> bool:\n    if _is_any_annotated(nodes):\n        return True\n    if filter_fn and filter_fn(nodes):\n        return False\n    return True",
    "docstring": "Determine whether to skip annotation for a list of nodes.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\x86_inductor_quantizer.py",
    "ast_data": "FunctionDef name:_skip_annotate arg:nodes arg:filter_fn arguments arg arg If Call Return return:yes If BoolOp Call Return return:yes Return return:yes"
  },
  {
    "library": "kornia",
    "name": "transform_tensor",
    "source_code": "def transform_tensor(self, input: Tensor, *, shape: Optional[Tensor]=None, match_channel: bool=True) -> Tensor:\n    _validate_input_dtype(input, accepted_dtypes=[float16, float32, float64])\n    if shape is None:\n        return _transform_input3d(input)\n    else:\n        return _transform_input3d_by_shape(input, reference_shape=shape, match_channel=match_channel)",
    "docstring": "Convert any incoming (D, H, W), (C, D, H, W) and (B, C, D, H, W) into (B, C, D, H, W).",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\_3d\\base.py",
    "ast_data": "FunctionDef name:transform_tensor arg:self arg:input arguments arg arg arg arg Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "make_samplers",
    "source_code": "def make_samplers(self, device: torch.device, dtype: torch.dtype) -> None:\n    amount = _range_bound(self.amount, 'amount').to(device, dtype)\n    salt_and_pepper = _range_bound(self.salt_and_pepper, 'salt_and_pepper').to(device, dtype)\n    self.amount_sampler = UniformDistribution(amount[0], amount[1], validate_args=False)\n    self.salt_and_pepper_sampler = UniformDistribution(salt_and_pepper[0], salt_and_pepper[1], validate_args=False)",
    "docstring": "Create samplers for generating random noise parameters.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\random_generator\\_2d\\salt_pepper_noise.py",
    "ast_data": "FunctionDef name:make_samplers arg:self arg:device arg:dtype arguments arg arg arg Assign Call Call Assign Call Call Assign Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "additional_globals",
    "source_code": "def additional_globals(self) -> list[tuple[str, Any]]:\n    return []",
    "docstring": "If your codegen uses extra global values, add tuples of (identifier,reference to the value) here. For example, return ['List', typing.List] if you need `` in the global context.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\graph.py",
    "ast_data": "FunctionDef name:additional_globals arg:self arguments arg Return return:no"
  },
  {
    "library": "django",
    "name": "legend_tag",
    "source_code": "def legend_tag(self, contents=None, attrs=None, label_suffix=None):\n    return self.label_tag(contents, attrs, label_suffix, tag='legend')",
    "docstring": "Wrap the given contents in a , if the field has an ID attribute. Contents should be mark_safe'd to avoid HTML escaping. If contents aren't given, use the field's HTML-escaped label. If attrs are given, use them as HTML attributes on the tag. label_suffix overrides the form's label_suffix.",
    "type": "method",
    "file_path": "django\\django\\forms\\boundfield.py",
    "ast_data": "FunctionDef name:legend_tag arg:self arg:contents arg:attrs arg:label_suffix arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "reduce",
    "source_code": "def reduce(self, func: Callable) -> Self:\n    assert self.ndim == 2\n    res_blocks: list[Block] = []\n    for blk in self.blocks:\n        nbs = blk.reduce(func)\n        res_blocks.extend(nbs)\n    index = Index([None])\n    new_mgr = type(self).from_blocks(res_blocks, [self.items, index])\n    return new_mgr",
    "docstring": "Apply reduction function blockwise, returning a single-row BlockManager. Parameters ---------- func : reduction function Returns ------- BlockManager",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:reduce arg:self arg:func arguments arg arg Compare For Assign Call Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "serialize_many_sparse_tensors",
    "source_code": "def serialize_many_sparse_tensors(tensors):\n    ret = nest.pack_sequence_as(tensors, [sparse_ops.serialize_many_sparse(tensor, out_type=dtypes.variant) if sparse_tensor.is_sparse(tensor) else tensor for tensor in nest.flatten(tensors)])\n    return ret",
    "docstring": "Serializes many sparse tensors into a batch. Args: tensors: a tensor structure to serialize. Returns: with any sparse tensors replaced by the serialized batch.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\util\\sparse.py",
    "ast_data": "FunctionDef name:serialize_many_sparse_tensors arg:tensors arguments arg Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "wrap_storage_backend",
    "source_code": "@property\ndef wrap_storage_backend(self: torch.storage._StorageBase) -> bool:\n    return self.device.type == custom_backend_name",
    "docstring": "Return the internal :class:.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\backend_registration.py",
    "ast_data": "FunctionDef name:wrap_storage_backend arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "_resolve_task_configuration",
    "source_code": "def _resolve_task_configuration(self):\n    hostlist = self._resolve_hostlist()\n    tasks_per_node = expand_tasks_per_node(_get_slurm_var('STEP_TASKS_PER_NODE'))\n    return {host: num_tasks for host, num_tasks in zip(hostlist, tasks_per_node)}",
    "docstring": "Creates a mapping of hostnames to the number of tasks allocated on it. Reads the SLURM environment to determine the nodes involved in the current job step and number of tasks running on each node. Returns a dictionary mapping each hostname to the number of tasks.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\slurm_cluster_resolver.py",
    "ast_data": "FunctionDef name:_resolve_task_configuration arg:self arguments arg Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_path",
    "source_code": "def get_path(self):\n    return self._path",
    "docstring": "Get the of the polygon.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_path arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_top_margin_bbox",
    "source_code": "def get_top_margin_bbox(self, rows=0, cols=0):\n    rows = np.atleast_1d(rows)\n    cols = np.atleast_1d(cols)\n    bbox = Bbox.from_extents(self.lefts[cols[0]].value(), self.tops[rows[0]].value() - self.margins['topcb'][rows[0]].value(), self.rights[cols[-1]].value(), self.tops[rows[0]].value() - self.margins['topcb'][rows[0]].value() - self.margins['top'][rows[0]].value())\n    return bbox",
    "docstring": "Return the left margin bounding box of the subplot specs given by rows and cols. rows and cols can be spans.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_layoutgrid.py",
    "ast_data": "FunctionDef name:get_top_margin_bbox arg:self arg:rows arg:cols arguments arg arg arg Assign Call Assign Call Assign Call Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "determine_observer_insert_points",
    "source_code": "@abstractmethod\ndef determine_observer_insert_points(self, model) -> dict:\n    pass",
    "docstring": "Args model (nn.Module or subclass): model to find observer insertion points Returns a Dict mapping from unique observer fqns (where we want to insert them) to a Dict. This dict maps string keys to detector specific information",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\detector.py",
    "ast_data": "FunctionDef name:determine_observer_insert_points arg:self arg:model arguments arg arg"
  },
  {
    "library": "pytorch",
    "name": "nn_module_get_all_hooks",
    "source_code": "def nn_module_get_all_hooks(mod, check_forward_hooks=False, check_backward_hooks=False, check_state_dict_hooks=False):\n    hook_dicts_to_check = []\n    check_all_hooks = not check_forward_hooks and (not check_backward_hooks) and (not check_state_dict_hooks)\n    if check_forward_hooks or check_all_hooks:\n        hook_dicts_to_check.extend(forward_hook_names)\n    if check_backward_hooks or check_all_hooks:\n        hook_dicts_to_check.extend(backward_hook_names)\n    if check_state_dict_hooks:\n        hook_dicts_to_check.extend(state_dict_hook_names)\n    all_hooks = []\n    for hook_dict_name in hook_dicts_to_check:\n        hooks = getattr(mod, hook_dict_name, [])\n        for hook_name in hooks:\n            hook = hooks[hook_name]\n            all_hooks.append(hook)\n    return all_hooks",
    "docstring": "Sometimes its useful to differentiate between types of hooks such as forward/backward/pre hooks executed during module.__call__, and state_dict hooks which are executed separately.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\utils.py",
    "ast_data": "FunctionDef name:nn_module_get_all_hooks arg:mod arg:check_forward_hooks arg:check_backward_hooks arg:check_state_dict_hooks arguments arg arg arg arg Assign Assign BoolOp If BoolOp Call If BoolOp Call If Call Assign For Assign Call For Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_tensor_shape_tensor_conversion_function",
    "source_code": "def _tensor_shape_tensor_conversion_function(s, dtype=None, name=None, as_ref=False):\n    _ = as_ref\n    if not s.is_fully_defined():\n        raise ValueError(f'Cannot convert a partially known TensorShape {s} to a Tensor.')\n    s_list = s.as_list()\n    int64_value = 0\n    for dim in s_list:\n        if dim >= 2 ** 31:\n            int64_value = dim\n            break\n    if dtype is not None:\n        if dtype not in (dtypes.int32, dtypes.int64):\n            raise TypeError(f'Cannot convert TensorShape {s} to dtype {dtype}. Allowed dtypes are tf.int32 and tf.int64.')\n        if dtype == dtypes.int32 and int64_value:\n            raise ValueError(f'Cannot convert TensorShape {s} to dtype int32; a dimension is too large. Consider using tf.int64.')\n    else:\n        dtype = dtypes.int64 if int64_value else dtypes.int32\n    if name is None:\n        name = 'shape_as_tensor'\n    return constant(s_list, dtype=dtype, name=name)",
    "docstring": "Function to convert TensorShape to Tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\constant_op.py",
    "ast_data": "FunctionDef name:_tensor_shape_tensor_conversion_function arg:s arg:dtype arg:name arg:as_ref arguments arg arg arg arg Assign If Call Raise Call Assign Call Assign For If Compare Assign If Compare If Compare Raise Call If BoolOp Compare Raise Call Assign If Compare Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "broadcast_send_v2",
    "source_code": "def broadcast_send_v2(t, group_size, group_key, instance_key, communication_hint='auto', timeout=0):\n    return gen_collective_ops.collective_bcast_send_v2(t, group_size=group_size, group_key=group_key, instance_key=instance_key, communication_hint=communication_hint.lower(), timeout_seconds=timeout)",
    "docstring": "Broadcasts one tensor to a group of others, across devices. Args: t: the tensor to be sent. group_size: an int32 tensor. One plus the number of receiving tensors, i.e. the total number of devices participating. Each tensor must reside on a different device. group_key: an int32 tensor identifying the group of devices. instance_key: an int32 tensor identifying the participating group of Ops. communication_hint: preferred collective communication. The implementation may fall back to another mechanism. Options include , , and . timeout: If set to a non zero, set a completion timeout to detect staleness. If the timer goes off, a DeadlineExceededError is raised. The timeout value in seconds. This feature is experimental. Returns: An Op implementing the distributed broadcast send.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\collective_ops.py",
    "ast_data": "FunctionDef name:broadcast_send_v2 arg:t arg:group_size arg:group_key arg:instance_key arg:communication_hint arg:timeout arguments arg arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_unwrap_or_tile",
    "source_code": "def _unwrap_or_tile(self, wrapped_tensor):\n    output, is_stacked = (wrapped_tensor.t, wrapped_tensor.is_stacked)\n    if is_stacked:\n        return output\n    else:\n        return _stack(output, self._loop_len_vector).t",
    "docstring": "Given a wrapped tensor, unwrap if stacked. Otherwise, tiles it.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "FunctionDef name:_unwrap_or_tile arg:self arg:wrapped_tensor arguments arg arg Assign If Return return:yes Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_markeredgecolor",
    "source_code": "def get_markeredgecolor(self):\n    mec = self._markeredgecolor\n    if cbook._str_equal(mec, 'auto'):\n        if mpl.rcParams['_internal.classic_mode']:\n            if self._marker.get_marker() in ('.', ','):\n                return self._color\n            if self._marker.is_filled() and self._marker.get_fillstyle() != 'none':\n                return 'k'\n        return self._color\n    else:\n        return mec",
    "docstring": "Return the marker edge color. See also .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:get_markeredgecolor arg:self arguments arg Assign If Call If If Compare Call Return return:yes If BoolOp Call Compare Call Return return:yes Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_optimize",
    "source_code": "def _optimize(rebuild_ctx: Callable[[], Union[OptimizeContext, _NullDecorator]], backend='inductor', *, nopython=False, guard_export_fn=None, guard_fail_fn=None, guard_filter_fn=None, disable=False, dynamic=None) -> Union[OptimizeContext, _NullDecorator]:\n    check_if_dynamo_supported()\n    check_for_incompatible_configs()\n    hooks = Hooks(guard_export_fn=guard_export_fn, guard_fail_fn=guard_fail_fn, guard_filter_fn=guard_filter_fn)\n    torch._C._log_api_usage_once('torch._dynamo.optimize')\n    if disable or os.environ.get('TORCHDYNAMO_DISABLE', '') == '1' or (not justknobs_check('pytorch/compiler:enable_dynamo')):\n        return _NullDecorator()\n    if nopython:\n        return optimize_assert(backend, dynamic=dynamic, hooks=hooks, rebuild_ctx=rebuild_ctx)\n    backend = get_compiler_fn(backend)\n    backend_ctx_ctor = getattr(backend, 'backend_ctx_ctor', null_context)\n    return _optimize_catch_errors(convert_frame.convert_frame(backend, hooks=hooks), hooks, backend_ctx_ctor, dynamic=dynamic, compiler_config=backend.get_compiler_config() if hasattr(backend, 'get_compiler_config') else None, rebuild_ctx=rebuild_ctx)",
    "docstring": "The main entrypoint of TorchDynamo. Do graph capture and call backend() to optimize extracted graphs. Args: backend: One of the two things: - Either, a function/callable taking a torch.fx.GraphModule and example_inputs and returning a python callable that runs the graph faster. One can also provide additional context for the backend, like torch.jit.fuser(\"fuser2\"), by setting the backend_ctx_ctor attribute. See AOTAutogradMemoryEfficientFusionWithContext for the usage. - Or, a string backend name in nopython: If True, graph breaks will be errors and there will be a single whole-program graph. disable: If True, turn this decorator into a no-op dynamic: If True, upfront compile as dynamic a kernel as possible. If False, disable all dynamic shapes support (always specialize). If None, automatically detect when sizes vary and generate dynamic kernels upon recompile. Example Usage:: @torch._dynamo.optimize() def toy_example(a, b): ...",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\eval_frame.py",
    "ast_data": "FunctionDef name:_optimize arg:rebuild_ctx arg:backend arguments arg arg arg arg arg arg arg arg Call Call Assign Call Call If BoolOp Compare Call Call Return return:yes Call If Return return:yes Call Assign Call Assign Call Return return:yes Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "fs",
    "source_code": "@property\ndef fs(self) -> float:\n    return self._fs",
    "docstring": "Sampling frequency of input signal and of the window. The sampling frequency is the inverse of the sampling interval . A ``). ShortTimeFFT: Class this property belongs to.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_short_time_fft.py",
    "ast_data": "FunctionDef name:fs arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "RendezvousConnectionError",
    "source_code": "class RendezvousConnectionError(RendezvousError):\n    pass",
    "docstring": "Raised when the connection to a rendezvous backend has failed.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\api.py",
    "ast_data": "ClassDef name:RendezvousConnectionError"
  },
  {
    "library": "pytorch",
    "name": "increment",
    "source_code": "def increment(self, metric: str, value: int) -> None:\n    if self._level == 0:\n        raise RuntimeError(f'Cannot increment {metric} outside of a MetricsContext')\n    if metric not in self._metrics:\n        self._metrics[metric] = 0\n    self._metrics[metric] += value",
    "docstring": "Increment a metric by a given amount.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\metrics_context.py",
    "ast_data": "FunctionDef name:increment arg:self arg:metric arg:value arguments arg arg arg If Compare Raise Call If Compare Assign"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, head_length=0.4, head_width=0.4, tail_width=0.4):\n    self.head_length, self.head_width, self.tail_width = (head_length, head_width, tail_width)\n    super().__init__()",
    "docstring": "Parameters ---------- head_length : float, default: 0.4 Length of the arrow head. head_width : float, default: 0.4 Width of the arrow head. tail_width : float, default: 0.4 Width of the arrow tail.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:head_length arg:head_width arg:tail_width arguments arg arg arg arg Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "concat",
    "source_code": "@dispatch.dispatch_for_types(array_ops.concat, StructuredTensor)\ndef concat(values, axis, name: str='concat'):\n    if name is None:\n        name = 'concat'\n    _assert_concat_compatible_structured_tensors(values)\n\n    def leaf_op(values):\n        return array_ops.concat(values, axis)\n    axis = array_ops.get_positive_axis(axis, values[0].rank)\n    with ops.name_scope(name, 'StructuredConcat', values):\n        return _extend_op(values, leaf_op)",
    "docstring": "tf.concat for structured tensors. Does not support (yet) checks on illegal axis values, et cetera. Args: values: a sequence of StructuredTensors. axis: an axis to concatenate upon. name: the name of the op(s). Returns: the params reorganized according to indices.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_array_ops.py",
    "ast_data": "FunctionDef name:concat arg:values arg:axis arg:name arguments arg arg arg If Compare Assign Call FunctionDef name:leaf_op arg:values arguments arg Return return:yes Call Assign Call With Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_deserialization_dependencies",
    "source_code": "def _deserialization_dependencies(self, children):\n    return children",
    "docstring": "Returns concrete functions which must be loaded before this object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\polymorphic_function.py",
    "ast_data": "FunctionDef name:_deserialization_dependencies arg:self arg:children arguments arg arg Return return:yes"
  },
  {
    "library": "django",
    "name": "force_bytes",
    "source_code": "def force_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):\n    if isinstance(s, bytes):\n        if encoding == 'utf-8':\n            return s\n        else:\n            return s.decode('utf-8', errors).encode(encoding, errors)\n    if strings_only and is_protected_type(s):\n        return s\n    if isinstance(s, memoryview):\n        return bytes(s)\n    return str(s).encode(encoding, errors)",
    "docstring": "Similar to smart_bytes, except that lazy instances are resolved to strings, rather than kept as lazy objects. If strings_only is True, don't convert (some) non-string-like objects.",
    "type": "function",
    "file_path": "django\\django\\utils\\encoding.py",
    "ast_data": "FunctionDef name:force_bytes arg:s arg:encoding arg:strings_only arg:errors arguments arg arg arg arg If Call If Compare Return return:yes Return return:yes Call Call If BoolOp Call Return return:yes If Call Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "memory_usage_bytes",
    "source_code": "@property\n@abstractmethod\ndef memory_usage_bytes(self) -> int:\n    pass",
    "docstring": "Memory usage in bytes. Returns ------- memory_usage_bytes : int Object's total memory usage in bytes.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:memory_usage_bytes arg:self arguments arg"
  },
  {
    "library": "numpy",
    "name": "_multi_dot",
    "source_code": "def _multi_dot(arrays, order, i, j, out=None):\n    if i == j:\n        assert out is None\n        return arrays[i]\n    else:\n        return dot(_multi_dot(arrays, order, i, order[i, j]), _multi_dot(arrays, order, order[i, j] + 1, j), out=out)",
    "docstring": "Actually do the multiplication with the given order.",
    "type": "function",
    "file_path": "numpy\\numpy\\linalg\\_linalg.py",
    "ast_data": "FunctionDef name:_multi_dot arg:arrays arg:order arg:i arg:j arg:out arguments arg arg arg arg arg If Compare Compare Return return:yes Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "_check_readonly_fields",
    "source_code": "def _check_readonly_fields(self, obj):\n    if obj.readonly_fields == ():\n        return []\n    elif not isinstance(obj.readonly_fields, (list, tuple)):\n        return must_be('a list or tuple', option='readonly_fields', obj=obj, id='admin.E034')\n    else:\n        return list(chain.from_iterable((self._check_readonly_fields_item(obj, field_name, 'readonly_fields[%d]' % index) for index, field_name in enumerate(obj.readonly_fields))))",
    "docstring": "Check that readonly_fields refers to proper attribute or field.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\checks.py",
    "ast_data": "FunctionDef name:_check_readonly_fields arg:self arg:obj arguments arg arg If Compare Return return:no If Call Return return:yes Call Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "log",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef log(x):\n    return math_ops.log(x)",
    "docstring": "Element-wise log. Args: x: Tensor or variable. Returns: A tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:log arg:x arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_register_sharded_op_on_local_shards",
    "source_code": "def _register_sharded_op_on_local_shards(op, early_stop_func=None, extra_check=None, customized_func=None):\n\n    @_sharded_op_impl(op)\n    @_sharded_op_common(op, early_stop_func, extra_check)\n    def sharded_tensor_op_on_local_shards(types, args=(), kwargs=None, pg=None):\n        st = args[0]\n        st_metadata = st.metadata()\n        local_shards = st.local_shards()\n        local_shards_new = []\n        if customized_func:\n            local_shards_new, st_metadata = customized_func(args, kwargs, pg)\n        else:\n            for local_shard in local_shards:\n                args = (local_shard.tensor, *args[1:])\n                local_shards_new.append(Shard(op(*args, **kwargs), local_shard.metadata))\n        return ShardedTensor._init_from_local_shards_and_global_metadata(local_shards_new, st_metadata, process_group=pg, init_rrefs=st._init_rrefs, sharding_spec=st.sharding_spec())",
    "docstring": "Handles `` dispatch.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\_ops\\_common.py",
    "ast_data": "FunctionDef name:_register_sharded_op_on_local_shards arg:op arg:early_stop_func arg:extra_check arg:customized_func arguments arg arg arg arg FunctionDef name:sharded_tensor_op_on_local_shards arg:types arg:args arg:kwargs arg:pg arguments arg arg arg arg Assign Assign Call Assign Call Assign If Assign Call For Assign Call Call Call Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "manual_variable_initialization",
    "source_code": "@doc_controls.do_not_generate_docs\ndef manual_variable_initialization(value):\n    global _MANUAL_VAR_INIT\n    _MANUAL_VAR_INIT = value",
    "docstring": "Sets the manual variable initialization flag. This boolean flag determines whether variables should be initialized as they are instantiated (default), or if the user should handle the initialization (e.g. via ). Args: value: Python boolean.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:manual_variable_initialization arg:value arguments arg Assign"
  },
  {
    "library": "pytorch",
    "name": "to_dict",
    "source_code": "def to_dict(self) -> dict[str, Union[int, float, str, list[int], list[float], list[str]]]:\n    attrs: dict[str, Union[int, float, str, list[int], list[float], list[str]]] = {}\n    for i, key in enumerate(self.attr_keys):\n        attr_type = self.attr_types[i]\n        if attr_type == _INT_TYPE:\n            attrs[key] = self.attr_ints[self.attr_pos[i][0]]\n        elif attr_type == _FLOAT_TYPE:\n            attrs[key] = self.attr_floats[self.attr_pos[i][0]]\n        elif attr_type == _STRING_TYPE:\n            attrs[key] = self.attr_strs[self.attr_pos[i][0]]\n        elif attr_type == _FLOAT_SEQ_TYPE:\n            attrs[key] = self.attr_floats[self.attr_pos[i][0]:self.attr_pos[i][1]]\n        elif attr_type == _INT_SEQ_TYPE:\n            attrs[key] = self.attr_ints[self.attr_pos[i][0]:self.attr_pos[i][1]]\n        elif attr_type == _STRING_SEQ_TYPE:\n            attrs[key] = self.attr_strs[self.attr_pos[i][0]:self.attr_pos[i][1]]\n        else:\n            raise ValueError(f'Unsupported attribute type: {attr_type}')\n    return attrs",
    "docstring": "Convert the encoded attributes back to a dictionary for creating an ONNX node.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\ops\\_symbolic_impl.py",
    "ast_data": "FunctionDef name:to_dict arg:self arguments arg For Call Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign Raise Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_units",
    "source_code": "def get_units(self):\n    return self.units",
    "docstring": "Return the units for axis.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:get_units arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "batch_get_value",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef batch_get_value(tensors):\n    if context.executing_eagerly():\n        return [x.numpy() for x in tensors]\n    elif ops.inside_function():\n        raise RuntimeError('Cannot get value inside Tensorflow graph function.')\n    if tensors:\n        return get_session(tensors).run(tensors)\n    else:\n        return []",
    "docstring": "Returns the value of more than one tensor variable. Args: tensors: list of ops to run. Returns: A list of Numpy arrays. Raises: RuntimeError: If this method is called inside defun.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:batch_get_value arg:tensors arguments arg If Call Return return:yes Call If Call Raise Call If Return return:yes Call Call Return return:no"
  },
  {
    "library": "django",
    "name": "exists",
    "source_code": "def exists(self, name):\n    raise NotImplementedError('subclasses of Storage must provide an exists() method')",
    "docstring": "Return True if a file referenced by the given name already exists in the storage system, or False if the name is available for a new file.",
    "type": "method",
    "file_path": "django\\django\\core\\files\\storage\\base.py",
    "ast_data": "FunctionDef name:exists arg:self arg:name arguments arg arg Raise Call"
  },
  {
    "library": "django",
    "name": "recipients",
    "source_code": "def recipients(self):\n    return [email for email in self.to + self.cc + self.bcc if email]",
    "docstring": "Return a list of all recipients of the email (includes direct addressees as well as Cc and Bcc entries).",
    "type": "method",
    "file_path": "django\\django\\core\\mail\\message.py",
    "ast_data": "FunctionDef name:recipients arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "PassBase",
    "source_code": "@compatibility(is_backward_compatible=False)\nclass PassBase(abc.ABC):\n\n    def __call__(self, graph_module: GraphModule) -> Optional[PassResult]:\n        self.requires(graph_module)\n        res = self.call(graph_module)\n        self.ensures(graph_module)\n        return res\n\n    @abc.abstractmethod\n    def call(self, graph_module: GraphModule) -> Optional[PassResult]:\n        pass\n\n    def requires(self, graph_module: GraphModule) -> None:\n        pass\n\n    def ensures(self, graph_module: GraphModule) -> None:\n        pass",
    "docstring": "Base interface for implementing passes. It is required to implement the function so that we can directly pass instances of the Pass directly to the PassManager and call them as a function. We can directly pass an instance of a class implementing this interface into the PassManager's attribute.",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\passes\\infra\\pass_base.py",
    "ast_data": "ClassDef name:PassBase FunctionDef name:__call__ arg:self arg:graph_module arguments arg arg Call Assign Call Call Return return:yes FunctionDef name:call arg:self arg:graph_module arguments arg arg FunctionDef name:requires arg:self arg:graph_module arguments arg arg FunctionDef name:ensures arg:self arg:graph_module arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_reconstruct_non_debug_graph_def",
    "source_code": "def _reconstruct_non_debug_graph_def(self):\n    if self._non_debug_graph_def:\n        return\n    self._non_debug_graph_def = graph_pb2.GraphDef()\n    for node in self._debug_graph_def.node:\n        if is_copy_node(node.name) or is_debug_node(node.name):\n            continue\n        new_node = self._non_debug_graph_def.node.add()\n        new_node.CopyFrom(node)\n        del new_node.input[:]\n        for inp in self._node_inputs[node.name]:\n            new_node.input.append(inp)\n        for ctrl_inp in self._node_ctrl_inputs[node.name]:\n            new_node.input.append('^' + ctrl_inp)",
    "docstring": "Reconstruct non-debug GraphDef. Non-debug GraphDef means the original GraphDef without the Copy* and Debug nodes inserted by the debugger.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_graphs.py",
    "ast_data": "FunctionDef name:_reconstruct_non_debug_graph_def arg:self arguments arg If Return return:no Assign Call For If BoolOp Call Call Assign Call Call For Call For Call"
  },
  {
    "library": "tensorflow",
    "name": "EnterGradientColocation",
    "source_code": "def EnterGradientColocation(self, op: ops.Operation, gradient_uid):\n    if self._outer_context:\n        self._outer_context.EnterGradientColocation(op, gradient_uid)",
    "docstring": "Start building a gradient colocated with an op.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:EnterGradientColocation arg:self arg:op arg:gradient_uid arguments arg arg arg If Call"
  },
  {
    "library": "tensorflow",
    "name": "SliceTransformer",
    "source_code": "class SliceTransformer(converter.Base):\n\n    def _process_single_assignment(self, target, value):\n        if not isinstance(target, gast.Subscript):\n            return None\n        s = target.slice\n        if isinstance(s, (gast.Tuple, gast.Slice)):\n            return None\n        template = '\\n      target = ag__.set_item(target, key, item)\\n    '\n        return templates.replace(template, target=target.value, key=target.slice, item=value)\n\n    def visit_Assign(self, node):\n        node = self.generic_visit(node)\n        if len(node.targets) != 1:\n            raise NotImplementedError('multiple assignment')\n        replacement = self._process_single_assignment(node.targets[0], node.value)\n        if replacement is not None:\n            return replacement\n        return node\n\n    def visit_Subscript(self, node):\n        node = self.generic_visit(node)\n        s = node.slice\n        if isinstance(s, (gast.Tuple, gast.Slice)):\n            return node\n        if not isinstance(node.ctx, gast.Load):\n            return node\n        dtype = self.get_definition_directive(node.value, directives.set_element_type, 'dtype', default=templates.replace_as_expression('None'))\n        template = '\\n      ag__.get_item(\\n          target,\\n          key,\\n          opts=ag__.GetItemOpts(element_dtype=dtype))\\n    '\n        return templates.replace_as_expression(template, target=node.value, key=s, dtype=dtype)",
    "docstring": "Converts slicing operations to their TF counterpart. Currently, relying on the default slice operator that Tensor uses is insufficient, because TensorArray and tensor lists use dedicated index read and write functions.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\converters\\slices.py",
    "ast_data": "ClassDef name:SliceTransformer FunctionDef name:_process_single_assignment arg:self arg:target arg:value arguments arg arg arg If Call Return return:no Assign If Call Return return:no Assign Return return:yes Call FunctionDef name:visit_Assign arg:self arg:node arguments arg arg Assign Call If Compare Call Raise Call Assign Call If Compare Return return:yes Return return:yes FunctionDef name:visit_Subscript arg:self arg:node arguments arg arg Assign Call Assign If Call Return return:yes If Call Return return:yes Assign Call Call Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "parse_header_parameters",
    "source_code": "def parse_header_parameters(line, max_length=MAX_HEADER_LENGTH):\n    if max_length is not None and line and (len(line) > max_length):\n        raise ValueError('Unable to parse header parameters (value too long).')\n    m = Message()\n    m['content-type'] = line\n    params = m.get_params()\n    pdict = {}\n    key = params.pop(0)[0].lower()\n    for name, value in params:\n        if not name:\n            continue\n        if isinstance(value, tuple):\n            value = collapse_rfc2231_value(value)\n        pdict[name] = value\n    return (key, pdict)",
    "docstring": "Parse a Content-type like header. Return the main content-type and a dictionary of options. If is longer than , is raised.",
    "type": "function",
    "file_path": "django\\django\\utils\\http.py",
    "ast_data": "FunctionDef name:parse_header_parameters arg:line arg:max_length arguments arg arg If BoolOp Compare Compare Call Raise Call Assign Call Assign Assign Call Assign Assign Call Call For If If Call Assign Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "button_press_handler",
    "source_code": "def button_press_handler(event, canvas=None, toolbar=None):\n    if canvas is None:\n        canvas = event.canvas\n    if toolbar is None:\n        toolbar = canvas.toolbar\n    if toolbar is not None:\n        button_name = str(MouseButton(event.button))\n        if button_name in rcParams['keymap.back']:\n            toolbar.back()\n        elif button_name in rcParams['keymap.forward']:\n            toolbar.forward()",
    "docstring": "The default Matplotlib button actions for extra mouse buttons. Parameters are as for , except that *event* is a .",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:button_press_handler arg:event arg:canvas arg:toolbar arguments arg arg arg If Compare Assign If Compare Assign If Compare Assign Call Call If Compare Call If Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "enable_v2_tensorshape",
    "source_code": "@tf_export(v1=['enable_v2_tensorshape'])\ndef enable_v2_tensorshape():\n    global _TENSORSHAPE_V2_OVERRIDE\n    _TENSORSHAPE_V2_OVERRIDE = True\n    logging.vlog(1, 'Enabling v2 tensorshape')\n    _api_usage_gauge.get_cell().set(True)",
    "docstring": "In TensorFlow 2.0, iterating over a TensorShape instance returns values. This enables the new behavior. Concretely, returned a Dimension instance in V1, but it V2 it returns either an integer, or None. Examples:",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:enable_v2_tensorshape arguments Assign Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "from_dim_map",
    "source_code": "@classmethod\ndef from_dim_map(cls, mesh: DeviceMesh, dim_map: list[int], sums: list[int], tensor_meta: Optional[TensorMeta]=None) -> 'DTensorSpec':\n    placements: list[Placement] = [Replicate() for _ in range(mesh.ndim)]\n    for s in sums:\n        placements[s] = Partial()\n    for i, m in enumerate(dim_map):\n        if m >= 0:\n            placement = placements[m]\n            if placement.is_shard():\n                placement = cast(Shard, placement)\n                raise RuntimeError(f\"DeviceMesh dimension cann't be mapped to two dimension of the same tensor: {i} and {placement.dim}\")\n            elif placement.is_partial():\n                raise RuntimeError(f'DeviceMesh dimension {m} cannot be both shard and partial!')\n            placements[m] = Shard(i)\n    return cls(mesh, tuple(placements), tensor_meta=tensor_meta)",
    "docstring": "Construct a DTensorSpec from dim_map list and pending sum. Args: mesh (class:): device mesh to be used in the DTensorSpec dim_map (List[int]): a list of integer that represents sharding on each tensor dimension, see property doc for details sums (List[int]): a list of integer that represents the dist tensor have pending sum on which device mesh dimension. tensor meta (TensorMeta): DTensor metadata Return: a class: object",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_dtensor_spec.py",
    "ast_data": "FunctionDef name:from_dim_map arg:cls arg:mesh arg:dim_map arg:sums arg:tensor_meta arguments arg arg arg arg arg Call Call For Assign Call For Call If Compare Assign If Call Assign Call Raise Call If Call Raise Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "is_exception_branch",
    "source_code": "def is_exception_branch(branch: str) -> bool:\n    return branch.split('/')[0] in {'main', 'nightly', 'release', 'landchecks'}",
    "docstring": "Branches that get opted out of experiments by default, until they're explicitly enabled.",
    "type": "function",
    "file_path": "pytorch\\.github\\scripts\\runner_determinator.py",
    "ast_data": "FunctionDef name:is_exception_branch arg:branch arguments arg Return return:yes Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "_GetGrads",
    "source_code": "def _GetGrads(grads, op: ops.Operation):\n    if op in grads:\n        return grads[op]\n    else:\n        return [[] for _ in range(len(op.outputs))]",
    "docstring": "Gets all gradients for op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\gradients_util.py",
    "ast_data": "FunctionDef name:_GetGrads arg:grads arg:op arguments arg arg If Compare Return return:yes Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "add_distinct_fields",
    "source_code": "def add_distinct_fields(self, *field_names):\n    self.distinct_fields = field_names\n    self.distinct = True",
    "docstring": "Add and resolve the given fields to the query's \"distinct on\" clause.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\query.py",
    "ast_data": "FunctionDef name:add_distinct_fields arg:self arguments arg arg Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_pow_flops",
    "source_code": "@ops.RegisterStatistics('Pow', 'flops')\ndef _pow_flops(graph, node):\n    return _binary_per_element_op_flops(graph, node)",
    "docstring": "Compute flops for Pow operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py",
    "ast_data": "FunctionDef name:_pow_flops arg:graph arg:node arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "project_points_orthographic",
    "source_code": "def project_points_orthographic(points_in_camera: Tensor) -> Tensor:\n    KORNIA_CHECK_SHAPE(points_in_camera, ['*', '3'])\n    return points_in_camera[..., :2]",
    "docstring": "Project points from the camera frame into the canonical z=1 plane through orthographic projection. .. math:: \\begin{bmatrix} u \\\\ v \\end{bmatrix} = \\begin{bmatrix} x \\\\ y \\\\ z \\end{bmatrix} Args: points_in_camera: Tensor representing the points to project. Returns: Tensor representing the projected points. Example: >>> points = torch.tensor([1., 2., 3.]) >>> project_points_orthographic(points) tensor([1., 2.])",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\camera\\projection_orthographic.py",
    "ast_data": "FunctionDef name:project_points_orthographic arg:points_in_camera arguments arg Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_is_pairwise_metric",
    "source_code": "def _is_pairwise_metric(estimator):\n    metric = getattr(estimator, 'metric', None)\n    return bool(metric == 'precomputed')",
    "docstring": "Returns True if estimator accepts pairwise metric. Parameters ---------- estimator : object Estimator object to test. Returns ------- out : bool True if _pairwise is set to True and False otherwise.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\estimator_checks.py",
    "ast_data": "FunctionDef name:_is_pairwise_metric arg:estimator arguments arg Assign Call Return return:yes Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "_add_trackable_child",
    "source_code": "def _add_trackable_child(self, name, value):\n    self._track_trackable(value, name, overwrite=True)",
    "docstring": "Restores a connection between trackables when loading from SavedModel. SavedModel stores both the object metadata and its list of children. When loading, this function is used along with to load objects from the SavedModel: First, all saved objects are created with . After that is complete, the children are connected using . **Example** , and Keras layers use to track children. This is why users can call , and the variable will be automatically saved to the checkpoint. The implementation of this method for the listed objects is: Args: name: The name of the connection between the parent and child . value: The child object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\base.py",
    "ast_data": "FunctionDef name:_add_trackable_child arg:self arg:name arg:value arguments arg arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_OptionalImpl",
    "source_code": "class _OptionalImpl(Optional):\n\n    def __init__(self, variant_tensor, element_spec):\n        super().__init__()\n        self._variant_tensor = variant_tensor\n        self._element_spec = element_spec\n\n    def has_value(self, name=None):\n        with ops.colocate_with(self._variant_tensor):\n            return gen_optional_ops.optional_has_value(self._variant_tensor, name=name)\n\n    def get_value(self, name=None):\n        with ops.name_scope(name, 'OptionalGetValue', [self._variant_tensor]) as scope:\n            with ops.colocate_with(self._variant_tensor):\n                result = gen_optional_ops.optional_get_value(self._variant_tensor, name=scope, output_types=structure.get_flat_tensor_types(self._element_spec), output_shapes=structure.get_flat_tensor_shapes(self._element_spec))\n            return structure.from_tensor_list(self._element_spec, result)\n\n    @property\n    def element_spec(self):\n        return self._element_spec\n\n    @property\n    def _type_spec(self):\n        return OptionalSpec.from_value(self)",
    "docstring": "Concrete implementation of . NOTE(mrry): This implementation is kept private, to avoid defining in the public API.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\optional_ops.py",
    "ast_data": "ClassDef name:_OptionalImpl FunctionDef name:__init__ arg:self arg:variant_tensor arg:element_spec arguments arg arg arg Call Call Assign Assign FunctionDef name:has_value arg:self arg:name arguments arg arg With Call Return return:yes Call FunctionDef name:get_value arg:self arg:name arguments arg arg With Call With Call Assign Call Call Call Return return:yes Call FunctionDef name:element_spec arg:self arguments arg Return return:yes FunctionDef name:_type_spec arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "startswith",
    "source_code": "def startswith(self, prefix, start=0, end=None):\n    return startswith(self, prefix, start, end)",
    "docstring": "Returns a boolean array which is where the string element in starts with , otherwise . See Also -------- char.startswith",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:startswith arg:self arg:prefix arg:start arg:end arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "seq_id",
    "source_code": "def seq_id():\n    return '%06d' % next(_layoutboxobjnum)",
    "docstring": "Generate a short sequential id for layoutbox objects.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\_layoutgrid.py",
    "ast_data": "FunctionDef name:seq_id arguments Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "NumPyBackend",
    "source_code": "class NumPyBackend:\n    __ua_domain__ = 'numpy.scipy.fft'\n\n    @staticmethod\n    def __ua_function__(method, args, kwargs):\n        kwargs.pop('overwrite_x', None)\n        fn = getattr(np.fft, method.__name__, None)\n        return NotImplemented if fn is None else fn(*args, **kwargs)",
    "docstring": "Backend that uses numpy.fft",
    "type": "class",
    "file_path": "scipy\\scipy\\fft\\_debug_backends.py",
    "ast_data": "ClassDef name:NumPyBackend Assign FunctionDef name:__ua_function__ arg:method arg:args arg:kwargs arguments arg arg arg Call Assign Call Return return:yes Compare Call"
  },
  {
    "library": "django",
    "name": "login_not_required",
    "source_code": "def login_not_required(view_func):\n    view_func.login_required = False\n    return view_func",
    "docstring": "Decorator for views that allows access to unauthenticated requests.",
    "type": "function",
    "file_path": "django\\django\\contrib\\auth\\decorators.py",
    "ast_data": "FunctionDef name:login_not_required arg:view_func arguments arg Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "draw",
    "source_code": "def draw(self, n, type_, min, max, squeezed_base_shape, rng=None):\n    rng = np.random.default_rng(rng)\n\n    def ints(*args, **kwargs):\n        return rng.integers(*args, **kwargs, endpoint=True)\n    uniform = rng.uniform if isinstance(self, _RealInterval) else ints\n    min_nn, max_nn = (min.copy(), max.copy())\n    i = np.isnan(min_nn) | np.isnan(max_nn)\n    min_nn[i] = 0\n    max_nn[i] = 1\n    shape = (n,) + squeezed_base_shape\n    if type_ == 'in':\n        z = uniform(min_nn, max_nn, size=shape)\n    elif type_ == 'on':\n        z_on_shape = shape\n        z = np.ones(z_on_shape)\n        i = rng.random(size=n) < 0.5\n        z[i] = min\n        z[~i] = max\n    elif type_ == 'out':\n        z = min_nn - uniform(1, 5, size=shape)\n        zr = max_nn + uniform(1, 5, size=shape)\n        i = rng.random(size=n) < 0.5\n        z[i] = zr[i]\n    elif type_ == 'nan':\n        z = np.full(shape, np.nan)\n    return z",
    "docstring": "Draw random values from the domain. Parameters ---------- n : int The number of values to be drawn from the domain. type_ : str A string indicating whether the values are - strictly within the domain ('in'), - at one of the two endpoints ('on'), - strictly outside the domain ('out'), or - NaN ('nan'). min, max : ndarray The endpoints of the domain. squeezed_based_shape : tuple of ints See _RealParameter.draw. rng : np.Generator The Generator used for drawing random values.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distribution_infrastructure.py",
    "ast_data": "FunctionDef name:draw arg:self arg:n arg:type_ arg:min arg:max arg:squeezed_base_shape arg:rng arguments arg arg arg arg arg arg arg Assign Call FunctionDef name:ints arguments arg arg Return return:yes Call Assign Call Assign Call Call Assign Call Call Assign Assign Assign If Compare Assign Call If Compare Assign Assign Call Assign Compare Call Assign Assign If Compare Assign Call Assign Call Assign Compare Call Assign If Compare Assign Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "get_server_metadata",
    "source_code": "def get_server_metadata(self):\n    raise NotImplementedError()",
    "docstring": "Return server metadata which includes supported grant types, response types and etc.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc7591\\endpoint.py",
    "ast_data": "FunctionDef name:get_server_metadata arg:self arguments arg Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "_estimator_has",
    "source_code": "def _estimator_has(attr):\n\n    def check(self):\n        getattr(self.estimator, attr)\n        return True\n    return check",
    "docstring": "Check that final_estimator has . Used together with .",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\frozen\\_frozen.py",
    "ast_data": "FunctionDef name:_estimator_has arg:attr arguments arg FunctionDef name:check arg:self arguments arg Call Return return:yes Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "is_satisfied_by",
    "source_code": "@abstractmethod\ndef is_satisfied_by(self, val):\n    pass",
    "docstring": "Whether or not a value satisfies the constraint. Parameters ---------- val : object The value to check. Returns ------- is_satisfied : bool Whether or not the constraint is satisfied by this value.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_param_validation.py",
    "ast_data": "FunctionDef name:is_satisfied_by arg:self arg:val arguments arg arg"
  },
  {
    "library": "scipy",
    "name": "ks_1samp",
    "source_code": "@_rename_parameter('mode', 'method')\ndef ks_1samp(x, cdf, args=(), alternative='two-sided', method='auto'):\n    alternative = {'t': 'two-sided', 'g': 'greater', 'l': 'less'}.get(alternative.lower()[0], alternative)\n    return scipy.stats._stats_py.ks_1samp(x, cdf, args=args, alternative=alternative, method=method)",
    "docstring": "Computes the Kolmogorov-Smirnov test on one sample of masked values. Missing values in are discarded. Parameters ---------- x : array_like a 1-D array of observations of random variables. cdf : str or callable If a string, it should be the name of a distribution in . If a callable, that callable is used to calculate the cdf. args : tuple, sequence, optional Distribution parameters, used if is a string. alternative : {'two-sided', 'less', 'greater'}, optional Indicates the alternative hypothesis. Default is 'two-sided'. method : {'auto', 'exact', 'asymp'}, optional Defines the method used for calculating the p-value. The following options are available (default is 'auto'): * 'auto' : use 'exact' for small size arrays, 'asymp' for large * 'exact' : use approximation to exact distribution of test statistic * 'asymp' : use asymptotic distribution of test statistic Returns ------- d : float Value of the Kolmogorov Smirnov test p : float Corresponding p-value.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mstats_basic.py",
    "ast_data": "FunctionDef name:ks_1samp arg:x arg:cdf arg:args arg:alternative arg:method arguments arg arg arg arg arg Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_create_uninitialized_mirrored_tpu_variables",
    "source_code": "def _create_uninitialized_mirrored_tpu_variables(**kwargs):\n    if kwargs.get('initial_value', None) is None:\n        return _create_mirrored_tpu_variables(**kwargs)\n    value_list = []\n    initial_value = None\n    for i, d in enumerate(devices):\n        with ops.device(d):\n            if i == 0:\n                initial_value = kwargs.get('initial_value', None)\n                with maybe_init_scope():\n                    if initial_value is not None:\n                        if callable(initial_value):\n                            initial_value = initial_value()\n                        initial_value = ops.convert_to_tensor(initial_value, dtype=kwargs.get('dtype', None))\n            if i > 0:\n                var0name = value_list[0].name.split(':')[0]\n                kwargs['name'] = '%s/replica_%d/' % (var0name, i)\n            kwargs['initial_value'] = initial_value\n            if kwargs.get('dtype', None) is None:\n                kwargs['dtype'] = kwargs['initial_value'].dtype\n            if kwargs.get('shape', None) is None:\n                kwargs['shape'] = kwargs['initial_value'].shape\n            with context.device_policy(context.DEVICE_PLACEMENT_SILENT):\n                v = uninitialized_variable_creator(**kwargs)\n            assert not isinstance(v, tpu_values.TPUMirroredVariable)\n            value_list.append(v)\n    return value_list",
    "docstring": "Returns a list of s. The list contains s and can be used to initialize a . Args: **kwargs: the keyword arguments for creating a variable",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_strategy.py",
    "ast_data": "FunctionDef name:_create_uninitialized_mirrored_tpu_variables arguments arg If Compare Call Return return:yes Call Assign Assign For Call With Call If Compare Assign Call With Call If Compare If Call Assign Call Assign Call Call If Compare Assign Call Assign Assign If Compare Call Assign If Compare Call Assign With Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_default_session",
    "source_code": "@tf_export(v1=['get_default_session'])\ndef get_default_session():\n    return _default_session_stack.get_default()",
    "docstring": "Returns the default session for the current thread. The returned will be the innermost session on which a or context has been entered. NOTE: The default session is a property of the current thread. If you create a new thread, and wish to use the default session in that thread, you must explicitly add a in that thread's function. Returns: The default being used in the current thread.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\stack.py",
    "ast_data": "FunctionDef name:get_default_session arguments Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "tukeylambda_kurtosis",
    "source_code": "def tukeylambda_kurtosis(lam):\n    lam = np.asarray(lam)\n    shp = lam.shape\n    lam = np.atleast_1d(lam).astype(np.float64)\n    threshold = 0.055\n    low_mask = lam < -0.25\n    negqrtr_mask = lam == -0.25\n    small_mask = np.abs(lam) < threshold\n    reg_mask = ~(low_mask | negqrtr_mask | small_mask)\n    small = lam[small_mask]\n    reg = lam[reg_mask]\n    k = np.empty_like(lam)\n    k[low_mask] = np.nan\n    k[negqrtr_mask] = np.inf\n    if small.size > 0:\n        k[small_mask] = _tukeylambda_kurt_p(small) / _tukeylambda_kurt_q(small)\n    if reg.size > 0:\n        numer = 1.0 / (4 * reg + 1) - 4 * beta(3 * reg + 1, reg + 1) + 3 * beta(2 * reg + 1, 2 * reg + 1)\n        denom = 2 * (1.0 / (2 * reg + 1) - beta(reg + 1, reg + 1)) ** 2\n        k[reg_mask] = numer / denom - 3\n    k.shape = shp\n    return k",
    "docstring": "Kurtosis of the Tukey Lambda distribution. Parameters ---------- lam : array_like The lambda values at which to compute the variance. Returns ------- v : ndarray The variance. For lam < -0.25, the variance is not defined, so np.nan is returned. For lam = 0.25, np.inf is returned.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_tukeylambda_stats.py",
    "ast_data": "FunctionDef name:tukeylambda_kurtosis arg:lam arguments arg Assign Call Assign Assign Call Call Assign Assign Compare Assign Compare Assign Compare Call Assign Assign Assign Assign Call Assign Assign If Compare Assign Call Call If Compare Assign Call Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "batch_gather",
    "source_code": "@dispatch.dispatch_for_api(array_ops.batch_gather)\ndef batch_gather(params: ragged_tensor.RaggedOrDense, indices: ragged_tensor.RaggedOrDense, name=None):\n    return ragged_gather_ops.gather(params, indices, batch_dims=-1, name=name)",
    "docstring": "Gathers slices from according to with batch dims. This operation is similar to , but it assumes that the leading dimensions of and are batch dimensions, and performs a gather within each batch. In particular, when using this operation with batch dimensions : * has shape * has shape . * has shape . * Args: params: A potentially ragged tensor with shape (, ). indices: A potentially ragged tensor with shape (). name: A name for the operation (optional). Returns: A potentially ragged tensor with shape . . #### Example: >>> params = tf.ragged.constant([['a', 'b', 'c'], ['d'], [], ['e']]) >>> indices = tf.ragged.constant([[1, 2, 0], [], [], [0, 0]]) >>> tf.compat.v1.batch_gather(params, indices)",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_batch_gather_ops.py",
    "ast_data": "FunctionDef name:batch_gather arg:params arg:indices arg:name arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_all_gather_dtensor",
    "source_code": "def _all_gather_dtensor(tensor: DTensor, root_mesh: Optional[DeviceMesh]) -> torch.Tensor:\n    assert root_mesh == tensor.device_mesh, 'The device mesh of a tensor should be a root mesh.'\n    placements = list(copy.deepcopy(tensor.placements))\n    placements[-1] = Replicate()\n    tensor = tensor.redistribute(device_mesh=tensor.device_mesh, placements=placements)\n    return tensor.to_local()",
    "docstring": "All gather a DTensor in its sharded dimension and return the local tensor.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_shard_utils.py",
    "ast_data": "FunctionDef name:_all_gather_dtensor arg:tensor arg:root_mesh arguments arg arg Compare Assign Call Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "cdf",
    "source_code": "def cdf(self, x, *args, **kwds):\n    args, loc, scale = self._parse_args(*args, **kwds)\n    x, loc, scale = map(asarray, (x, loc, scale))\n    args = tuple(map(asarray, args))\n    _a, _b = self._get_support(*args)\n    dtyp = np.promote_types(x.dtype, np.float64)\n    x = np.asarray((x - loc) / scale, dtype=dtyp)\n    cond0 = self._argcheck(*args) & (scale > 0)\n    cond1 = self._open_support_mask(x, *args) & (scale > 0)\n    cond2 = (x >= np.asarray(_b)) & cond0\n    cond = cond0 & cond1\n    output = zeros(shape(cond), dtyp)\n    place(output, 1 - cond0 + np.isnan(x), self.badvalue)\n    place(output, cond2, 1.0)\n    if np.any(cond):\n        goodargs = argsreduce(cond, *(x,) + args)\n        place(output, cond, self._cdf(*goodargs))\n    if output.ndim == 0:\n        return output[()]\n    return output",
    "docstring": "Cumulative distribution function of the given RV. Parameters ---------- x : array_like quantiles arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- cdf : ndarray Cumulative distribution function evaluated at",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:cdf arg:self arg:x arguments arg arg arg arg Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Assign Call Compare Assign Call Compare Assign Compare Call Assign Assign Call Call Call Call Call If Call Assign Call Call Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "remove",
    "source_code": "def remove(self, module):\n    assert self._tensor_name is not None, f'Module {module} has to be pruned            before pruning can be removed'\n    weight = self.apply_mask(module)\n    if hasattr(module, self._tensor_name):\n        delattr(module, self._tensor_name)\n    orig = module._parameters[self._tensor_name + '_orig']\n    orig.data = weight.data\n    del module._parameters[self._tensor_name + '_orig']\n    del module._buffers[self._tensor_name + '_mask']\n    setattr(module, self._tensor_name, orig)",
    "docstring": "Remove the pruning reparameterization from a module. The pruned parameter named `` is removed from the buffers. Note: Pruning itself is NOT undone or reversed!",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\utils\\prune.py",
    "ast_data": "FunctionDef name:remove arg:self arg:module arguments arg arg Compare Assign Call If Call Call Assign Assign Call"
  },
  {
    "library": "django",
    "name": "VariableWrapper",
    "source_code": "class VariableWrapper:\n\n    def __init__(self, var):\n        self.var = var\n\n    def bind_parameter(self, cursor):\n        return self.var\n\n    def __getattr__(self, key):\n        return getattr(self.var, key)\n\n    def __setattr__(self, key, value):\n        if key == 'var':\n            self.__dict__[key] = value\n        else:\n            setattr(self.var, key, value)",
    "docstring": "An adapter class for cursor variables that prevents the wrapped object from being converted into a string when used to instantiate an OracleParam. This can be used generally for any other object that should be passed into Cursor.execute as-is.",
    "type": "class",
    "file_path": "django\\django\\db\\backends\\oracle\\base.py",
    "ast_data": "ClassDef name:VariableWrapper FunctionDef name:__init__ arg:self arg:var arguments arg arg Assign FunctionDef name:bind_parameter arg:self arg:cursor arguments arg arg Return return:yes FunctionDef name:__getattr__ arg:self arg:key arguments arg arg Return return:yes Call FunctionDef name:__setattr__ arg:self arg:key arg:value arguments arg arg arg If Compare Assign Call"
  },
  {
    "library": "kornia",
    "name": "contrast",
    "source_code": "def contrast(min_mag: float, max_mag: float) -> OperationBase:\n    return Contrast(None, 1.0, magnitude_range=(min_mag, max_mag))",
    "docstring": "Return contrast op.",
    "type": "function",
    "file_path": "kornia\\kornia\\augmentation\\auto\\rand_augment\\ops.py",
    "ast_data": "FunctionDef name:contrast arg:min_mag arg:max_mag arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "update_arg",
    "source_code": "@compatibility(is_backward_compatible=True)\ndef update_arg(self, idx: int, arg: Argument) -> None:\n    args = list(self.args)\n    args[idx] = arg\n    self.args = tuple(args)",
    "docstring": "Update an existing positional argument to contain the new value ``",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\node.py",
    "ast_data": "FunctionDef name:update_arg arg:self arg:idx arg:arg arguments arg arg arg Assign Call Assign Assign Call Call"
  },
  {
    "library": "scipy",
    "name": "ksone_gen",
    "source_code": "class ksone_gen(rv_continuous):\n\n    def _argcheck(self, n):\n        return (n >= 1) & (n == np.round(n))\n\n    def _shape_info(self):\n        return [_ShapeInfo('n', True, (1, np.inf), (True, False))]\n\n    def _pdf(self, x, n):\n        return -scu._smirnovp(n, x)\n\n    def _cdf(self, x, n):\n        return scu._smirnovc(n, x)\n\n    def _sf(self, x, n):\n        return sc.smirnov(n, x)\n\n    def _ppf(self, q, n):\n        return scu._smirnovci(n, q)\n\n    def _isf(self, q, n):\n        return sc.smirnovi(n, q)",
    "docstring": "Kolmogorov-Smirnov one-sided test statistic distribution. This is the distribution of the one-sided Kolmogorov-Smirnov (KS) statistics :math: and :math: for a finite sample size `D_n^+D_n^-FF_nksonenF`: >>> vals = ksone.ppf([0.001, 0.5, 0.999], n) >>> np.allclose([0.001, 0.5, 0.999], ksone.cdf(vals, n)) True",
    "type": "class",
    "file_path": "scipy\\scipy\\stats\\_continuous_distns.py",
    "ast_data": "ClassDef name:ksone_gen FunctionDef name:_argcheck arg:self arg:n arguments arg arg Return return:yes Compare Compare Call FunctionDef name:_shape_info arg:self arguments arg Return return:yes Call FunctionDef name:_pdf arg:self arg:x arg:n arguments arg arg arg Return return:yes Call FunctionDef name:_cdf arg:self arg:x arg:n arguments arg arg arg Return return:yes Call FunctionDef name:_sf arg:self arg:x arg:n arguments arg arg arg Return return:yes Call FunctionDef name:_ppf arg:self arg:q arg:n arguments arg arg arg Return return:yes Call FunctionDef name:_isf arg:self arg:q arg:n arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "input_shape",
    "source_code": "@property\n@doc_controls.do_not_doc_inheritable\ndef input_shape(self):\n    if not self._inbound_nodes:\n        raise AttributeError('The layer has never been called and thus has no defined input shape.')\n    all_input_shapes = set([str(node.input_shapes) for node in self._inbound_nodes])\n    if len(all_input_shapes) == 1:\n        return self._inbound_nodes[0].input_shapes\n    else:\n        raise AttributeError('The layer \"' + str(self.name) + ' has multiple inbound nodes, with different input shapes. Hence the notion of \"input shape\" is ill-defined for the layer. Use `get_input_shape_at(node_index)` instead.')",
    "docstring": "Retrieves the input shape(s) of a layer. Only applicable if the layer has exactly one input, i.e. if it is connected to one incoming layer, or if all inputs have the same shape. Returns: Input shape, as an integer shape tuple (or list of shape tuples, one tuple per input tensor). Raises: AttributeError: if the layer has no defined input_shape. RuntimeError: if called in Eager mode.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:input_shape arg:self arguments arg If Raise Call Assign Call Call If Compare Call Return return:yes Raise Call Call"
  },
  {
    "library": "scrapy",
    "name": "download",
    "source_code": "@inlineCallbacks\ndef download(self, request: Request) -> Generator[Deferred[Any], Any, Response]:\n    if self.spider is None:\n        raise RuntimeError(f'No open spider to crawl: {request}')\n    try:\n        response_or_request = (yield self._download(request))\n    finally:\n        assert self._slot is not None\n        self._slot.remove_request(request)\n    if isinstance(response_or_request, Request):\n        return (yield self.download(response_or_request))\n    return response_or_request",
    "docstring": "Return a Deferred which fires with a Response as result, only downloader middlewares are applied",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\engine.py",
    "ast_data": "FunctionDef name:download arg:self arg:request arguments arg arg If Compare Raise Call Try Assign Call Compare Call If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "rank",
    "source_code": "@dispatch.dispatch_for_api(array_ops.rank)\ndef rank(input: ragged_tensor.Ragged, name=None):\n    with ops.name_scope(name, 'RaggedRank', [input]) as name:\n        if not ragged_tensor.is_ragged(input):\n            return array_ops.rank(input, name)\n        return input.ragged_rank + array_ops.rank(input.flat_values)",
    "docstring": "Returns the rank of a RaggedTensor. Returns a 0-D representing the rank of . #### Example: >>> # shape of tensor 't' is [2, None, None] >>> t = tf.ragged.constant([[[1], [2, 2]], [[3, 3, 3], [4, 4, 4, 4]]]) >>> tf.rank(t).numpy().item() 3 Args: input: A name: A name for the operation (optional). Returns: A of type .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_array_ops.py",
    "ast_data": "FunctionDef name:rank arg:input arg:name arguments arg arg With Call If Call Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "get_flags_opt",
    "source_code": "def get_flags_opt(self):\n    return []",
    "docstring": "List of architecture independent compiler flags.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\fcompiler\\__init__.py",
    "ast_data": "FunctionDef name:get_flags_opt arg:self arguments arg Return return:no"
  },
  {
    "library": "scipy",
    "name": "besselap",
    "source_code": "def besselap(N, norm='phase'):\n    if abs(int(N)) != N:\n        raise ValueError('Filter order must be a nonnegative integer')\n    N = int(N)\n    if N == 0:\n        p = []\n        k = 1\n    else:\n        p = 1 / _bessel_zeros(N)\n        a_last = _falling_factorial(2 * N, N) // 2 ** N\n        if norm in ('delay', 'mag'):\n            k = a_last\n            if norm == 'mag':\n                norm_factor = _norm_factor(p, k)\n                p /= norm_factor\n                k = norm_factor ** (-N) * a_last\n        elif norm == 'phase':\n            p *= 10 ** (-math.log10(a_last) / N)\n            k = 1\n        else:\n            raise ValueError('normalization not understood')\n    return (asarray([]), asarray(p, dtype=complex), float(k))",
    "docstring": "Return (z,p,k) for analog prototype of an Nth-order Bessel filter. Parameters ---------- N : int The order of the filter. norm : {'phase', 'delay', 'mag'}, optional Frequency normalization: `Wn1105.095710.1145/363067.363115` .. [6] Miller and Bohn, \"A Bessel Filter Crossover, and Its Relation to Others\", RaneNote 147, 1998,",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_filter_design.py",
    "ast_data": "FunctionDef name:besselap arg:N arg:norm arguments arg arg If Compare Call Call Raise Call Assign Call If Compare Assign Assign Assign Call Assign Call If Compare Assign If Compare Assign Call Assign If Compare Call Assign Raise Call Return return:yes Call Call Call"
  },
  {
    "library": "cherrypy",
    "name": "process_multipart_form_data",
    "source_code": "def process_multipart_form_data(entity):\n    process_multipart(entity)\n    kept_parts = []\n    for part in entity.parts:\n        if part.name is None:\n            kept_parts.append(part)\n        else:\n            if part.filename is None:\n                value = part.fullvalue()\n            else:\n                value = part\n            if part.name in entity.params:\n                if not isinstance(entity.params[part.name], list):\n                    entity.params[part.name] = [entity.params[part.name]]\n                entity.params[part.name].append(value)\n            else:\n                entity.params[part.name] = value\n    entity.parts = kept_parts",
    "docstring": "Read ``.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\_cpreqbody.py",
    "ast_data": "FunctionDef name:process_multipart_form_data arg:entity arguments arg Call Assign For If Compare Call If Compare Assign Call Assign If Compare If Call Assign Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_categorical_column_with_hash_bucket",
    "source_code": "def _categorical_column_with_hash_bucket(key, hash_bucket_size, dtype=dtypes.string):\n    if hash_bucket_size is None:\n        raise ValueError('hash_bucket_size must be set. key: {}'.format(key))\n    if hash_bucket_size < 1:\n        raise ValueError('hash_bucket_size must be at least 1. hash_bucket_size: {}, key: {}'.format(hash_bucket_size, key))\n    fc_utils.assert_key_is_string(key)\n    fc_utils.assert_string_or_int(dtype, prefix='column_name: {}'.format(key))\n    return _HashedCategoricalColumn(key, hash_bucket_size, dtype)",
    "docstring": "Represents sparse feature where ids are set by hashing. Use this when your sparse features are in string or integer format, and you want to distribute your inputs into a finite number of buckets by hashing. output_id = Hash(input_feature_string) % bucket_size for string type input. For int type input, the value is converted to its string representation first and then hashed by the same formula. For input dictionary , is either or . If , missing values can be represented by for int and for string, which will be dropped by this feature column. Example: Args: key: A unique string identifying the input feature. It is used as the column name and the dictionary key for feature parsing configs, feature objects, and feature columns. hash_bucket_size: An int > 1. The number of buckets. dtype: The type of features. Only string and integer types are supported. Returns: A . Raises: ValueError: is not greater than 1. ValueError: is neither string nor integer.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py",
    "ast_data": "FunctionDef name:_categorical_column_with_hash_bucket arg:key arg:hash_bucket_size arg:dtype arguments arg arg arg If Compare Raise Call Call If Compare Raise Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "_respond_401",
    "source_code": "def _respond_401(realm, key, accept_charset, debug, **kwargs):\n    header = www_authenticate(realm, key, accept_charset=accept_charset, **kwargs)\n    if debug:\n        TRACE(header)\n    cherrypy.serving.response.headers['WWW-Authenticate'] = header\n    raise cherrypy.HTTPError(401, 'You are not authorized to access that resource')",
    "docstring": "Respond with 401 status and a WWW-Authenticate header.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\auth_digest.py",
    "ast_data": "FunctionDef name:_respond_401 arg:realm arg:key arg:accept_charset arg:debug arguments arg arg arg arg arg Assign Call If Call Assign Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "remove_function",
    "source_code": "def remove_function(name):\n    context().remove_function(name)",
    "docstring": "Remove a function from the context.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:remove_function arg:name arguments arg Call Call"
  },
  {
    "library": "scipy",
    "name": "moment",
    "source_code": "def moment(a, moment=1, axis=0):\n    a, axis = _chk_asarray(a, axis)\n    if a.size == 0:\n        moment_shape = list(a.shape)\n        del moment_shape[axis]\n        dtype = a.dtype.type if a.dtype.kind in 'fc' else np.float64\n        out_shape = moment_shape if np.isscalar(moment) else [len(moment)] + moment_shape\n        if len(out_shape) == 0:\n            return dtype(np.nan)\n        else:\n            return ma.array(np.full(out_shape, np.nan, dtype=dtype))\n    if not np.isscalar(moment):\n        mean = a.mean(axis, keepdims=True)\n        mmnt = [_moment(a, i, axis, mean=mean) for i in moment]\n        return ma.array(mmnt)\n    else:\n        return _moment(a, moment, axis)",
    "docstring": "Calculates the nth moment about the mean for a sample. Parameters ---------- a : array_like data moment : int, optional order of central moment that is returned axis : int or None, optional Axis along which the central moment is computed. Default is 0. If None, compute over the whole array . Returns ------- n-th central moment : ndarray or float The appropriate moment along the given axis or over all values if axis is None. The denominator for the moment calculation is the number of observations, no degrees of freedom correction is done. Notes ----- For more details about , see .",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mstats_basic.py",
    "ast_data": "FunctionDef name:moment arg:a arg:moment arg:axis arguments arg arg arg Assign Call If Compare Assign Call Assign Compare Assign Call Call If Compare Call Return return:yes Call Return return:yes Call Call If Call Assign Call Assign Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "escape_uri_path",
    "source_code": "def escape_uri_path(path):\n    return quote(path, safe=\"/:@&+$,-_.!~*'()\")",
    "docstring": "Escape the unsafe characters from the path portion of a Uniform Resource Identifier (URI).",
    "type": "function",
    "file_path": "django\\django\\utils\\encoding.py",
    "ast_data": "FunctionDef name:escape_uri_path arg:path arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "is_graphable",
    "source_code": "def is_graphable(val) -> bool:\n    return isinstance(val, torch.fx.node.base_types)",
    "docstring": "Definition: a graphable type is a type that that is an acceptable input/output type to a FX node.",
    "type": "function",
    "file_path": "pytorch\\torch\\_higher_order_ops\\flat_apply.py",
    "ast_data": "FunctionDef name:is_graphable arg:val arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "dump_dag",
    "source_code": "def dump_dag(self, module_with_submodules: GraphModule) -> DAG:\n    dag = DAG()\n    for node in module_with_submodules.graph.nodes:\n        if node.op == 'output':\n            break\n        if node.op in {'placeholder', 'get_attr'}:\n            continue\n        if node.target == operator.__getitem__:\n            continue\n        input_nodes: dict[Node, None] = {}\n        map_arg(node.args, input_nodes.setdefault)\n        map_arg(node.kwargs, input_nodes.setdefault)\n        if len(node.users) > 1:\n            output_nodes = list(node.users)\n        else:\n            output_nodes = [node]\n        partition_id = int(node.name.rsplit('_', 1)[-1])\n        device_ids = self.partitions[partition_id].logical_device_ids\n        size_bytes = self.partitions[partition_id].used_mem_bytes\n        dag.create_node(node, list(input_nodes), output_nodes, device_ids, size_bytes)\n    return dag",
    "docstring": "Return the dag structure and the new fx module with submodules.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\accelerator_partitioner.py",
    "ast_data": "FunctionDef name:dump_dag arg:self arg:module_with_submodules arguments arg arg Assign Call For If Compare If Compare If Compare Call Call If Compare Call Assign Call Assign Assign Call Call Assign Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "Sum",
    "source_code": "class Sum(Reduce):\n\n    def __init__(self, name='sum', dtype=None):\n        super(Sum, self).__init__(reduction=metrics_utils.Reduction.SUM, name=name, dtype=dtype)",
    "docstring": "Computes the (weighted) sum of the given values. For example, if values is [1, 3, 5, 7] then the sum is 16. If the weights were specified as [1, 1, 0, 0] then the sum would be 4. This metric creates one variable, , that is used to compute the sum of . This is ultimately returned as . If is , weights default to 1. Use of 0 to mask values. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.Sum() >>> m.update_state([1, 3, 5, 7]) >>> m.result().numpy() 16.0 Usage with API:",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py",
    "ast_data": "ClassDef name:Sum FunctionDef name:__init__ arg:self arg:name arg:dtype arguments arg arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "WithValuesCompositeTensorGradient",
    "source_code": "class WithValuesCompositeTensorGradient(CompositeTensorGradient):\n\n    def get_gradient_components(self, value):\n        return value.values\n\n    def replace_gradient_components(self, value, component_grads):\n        return value.with_values(component_grads)",
    "docstring": "CompositeTensorGradient based on and .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\composite_tensor_gradient.py",
    "ast_data": "ClassDef name:WithValuesCompositeTensorGradient FunctionDef name:get_gradient_components arg:self arg:value arguments arg arg Return return:yes FunctionDef name:replace_gradient_components arg:self arg:value arg:component_grads arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "quantize_jit",
    "source_code": "def quantize_jit(model, qconfig_dict, run_fn, run_args, inplace=False, debug=False):\n    torch._C._log_api_usage_once('quantization_api.quantize_jit.quantize_jit')\n    return _quantize_jit(model, qconfig_dict, run_fn, run_args, inplace, debug, quant_type=QuantType.STATIC)",
    "docstring": "Quantize the input float TorchScript model with post training static quantization. First it will prepare the model for calibration, then it calls which will run the calibration step, after that we will convert the model to a quantized model. Args: : input float TorchScript model : qconfig_dict is a dictionary with names of sub modules as key and qconfig for that module as value, empty key means the qconfig will be applied to whole model unless it's overwritten by more specific configurations, the qconfig for each module is either found in the dictionary or fallback to the qconfig of parent module. Right now qconfig_dict is the only way to configure how the model is quantized, and it is done in the granularity of module, that is, we only support one type of qconfig for each torch.nn.Module, and the qconfig for sub module will override the qconfig for parent module, empty string means global configuration. : a calibration function for calibrating the prepared model : positional arguments for : carry out model transformations in-place, the original module is mutated : flag for producing a debug friendly model (preserve weight attribute) Return: Quantized TorchSciprt model. Example:",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantize_jit.py",
    "ast_data": "FunctionDef name:quantize_jit arg:model arg:qconfig_dict arg:run_fn arg:run_args arg:inplace arg:debug arguments arg arg arg arg arg arg Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "to_python",
    "source_code": "def to_python(self, value):\n    if value in (True, 'True', 'true', '1'):\n        return True\n    elif value in (False, 'False', 'false', '0'):\n        return False\n    else:\n        return None",
    "docstring": "Explicitly check for the string 'True' and 'False', which is what a hidden field will submit for True and False, for 'true' and 'false', which are likely to be returned by JavaScript serializations of forms, and for '1' and '0', which is what a RadioField will submit. Unlike the Booleanfield, this field must check for True because it doesn't use the bool() function.",
    "type": "method",
    "file_path": "django\\django\\forms\\fields.py",
    "ast_data": "FunctionDef name:to_python arg:self arg:value arguments arg arg If Compare Return return:yes If Compare Return return:yes Return return:no"
  },
  {
    "library": "pytorch",
    "name": "gather",
    "source_code": "def gather(outputs: Any, target_device: Union[int, torch.device], dim: int=0) -> Any:\n\n    def gather_map(outputs):\n        out = outputs[0]\n        if isinstance(out, torch.Tensor):\n            return Gather.apply(target_device, dim, *outputs)\n        if out is None:\n            return None\n        if isinstance(out, dict):\n            if not all((len(out) == len(d) for d in outputs)):\n                raise ValueError('All dicts must have the same number of keys')\n            return type(out)(((k, gather_map([d[k] for d in outputs])) for k in out))\n        if _is_namedtuple(out):\n            return type(out)._make(map(gather_map, zip(*outputs)))\n        return type(out)(map(gather_map, zip(*outputs)))\n    try:\n        res = gather_map(outputs)\n    finally:\n        gather_map = None\n    return res",
    "docstring": "Gather tensors from different GPUs on a specified device. This function is useful for gathering the results of a distributed computation. It takes a sequence of objects, one for each GPU, and returns a single object on the specified device. Args: outputs (Any): A sequence of objects (potentially tensors) to gather. target_device (Union[int, torch.device]): The device to gather the tensors to. Use 'cpu' for CPU to avoid a deprecation warning. dim (int, optional): The dimension along which to gather. Default: 0. Returns: Any: A gathered object (potentially tensor) on the specified device.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\parallel\\scatter_gather.py",
    "ast_data": "FunctionDef name:gather arg:outputs arg:target_device arg:dim arguments arg arg arg FunctionDef name:gather_map arg:outputs arguments arg Assign If Call Return return:yes Call If Compare Return return:no If Call If Call Compare Call Call Raise Call Return return:yes Call Call Call If Call Return return:yes Call Call Call Call Return return:yes Call Call Call Call Try Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_LowerTriangular",
    "source_code": "class _LowerTriangular(Constraint):\n    event_dim = 2\n\n    def check(self, value):\n        value_tril = value.tril()\n        return (value_tril == value).view(value.shape[:-2] + (-1,)).min(-1)[0]",
    "docstring": "Constrain to lower-triangular square matrices.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributions\\constraints.py",
    "ast_data": "ClassDef name:_LowerTriangular Assign FunctionDef name:check arg:self arg:value arguments arg arg Assign Call Return return:yes Call Call Compare"
  },
  {
    "library": "matplotlib",
    "name": "_revert",
    "source_code": "def _revert(self, path, first_action=Path.LINETO):\n    reverse_path = []\n    next_code = first_action\n    for code, position in path[::-1]:\n        reverse_path.append((next_code, position))\n        next_code = code\n    return reverse_path",
    "docstring": "A path is not simply reversible by path[::-1] since the code specifies an action to take from the **previous** point.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\sankey.py",
    "ast_data": "FunctionDef name:_revert arg:self arg:path arg:first_action arguments arg arg arg Assign Assign For Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_has_nchw_support",
    "source_code": "def _has_nchw_support():\n    explicitly_on_cpu = _is_current_explicit_device('CPU')\n    gpus_available = bool(_get_available_gpus())\n    return not explicitly_on_cpu and gpus_available",
    "docstring": "Check whether the current scope supports NCHW ops. TensorFlow does not support NCHW on CPU. Therefore we check if we are not explicitly put on CPU, and have GPUs available. In this case there will be soft-placing on the GPU device. Returns: bool: if the current scope device placement would support nchw",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:_has_nchw_support arguments Assign Call Assign Call Call Return return:yes BoolOp"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, namespace: str, op_name: str, promote_args_positions: Sequence[int], promote_kwargs_names: Sequence[str], promotion_kind: _prims_common.ELEMENTWISE_TYPE_PROMOTION_KIND):\n    super().__init__(namespace, op_name)\n    self.promote_args_positions = promote_args_positions\n    self.promote_kwargs_names = promote_kwargs_names\n    self.promotion_kind = promotion_kind",
    "docstring": "Constructs a TypePromotionRule for elementwise operators. Args: namespace: Namespace of the op. E.g. 'aten' in 'torch.ops.aten.add'. op_name: Name of the op. E.g. 'add' in 'torch.ops.aten.add'. promote_args_positions: Positions of args to promote. promote_kwargs_names: Names of kwargs to promote. promotion_kind: Type promotion kind. Refer to [_prims_common.elementwise_dtypes]( for detail. # noqa: B950",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\type_promotion.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:namespace arg:op_name arg:promote_args_positions arg:promote_kwargs_names arg:promotion_kind arguments arg arg arg arg arg arg Call Call Assign Assign Assign"
  },
  {
    "library": "sphinx",
    "name": "dedent_docstring",
    "source_code": "def dedent_docstring(s: str) -> str:\n\n    def dummy() -> None:\n        pass\n    dummy.__doc__ = s\n    docstring = inspect.getdoc(dummy)\n    if docstring:\n        return docstring.lstrip('\\r\\n').rstrip('\\r\\n')\n    else:\n        return ''",
    "docstring": "Remove common leading indentation from docstring.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\pycode\\parser.py",
    "ast_data": "FunctionDef name:dedent_docstring arg:s arguments arg FunctionDef name:dummy arguments Assign Assign Call If Return return:yes Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "state",
    "source_code": "def state(self):\n    return self._get_tpu_property('state')",
    "docstring": "Return state of the TPU.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\client\\client.py",
    "ast_data": "FunctionDef name:state arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "check_existence",
    "source_code": "def check_existence(filename):\n    if not os.path.exists(filename):\n        raise RuntimeError('%s not found. Are you under the TensorFlow source root directory?' % filename)",
    "docstring": "Check the existence of file or dir.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\ci_build\\update_version.py",
    "ast_data": "FunctionDef name:check_existence arg:filename arguments arg If Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "max_memory_allocated",
    "source_code": "def max_memory_allocated(device: _device_t=None) -> int:\n    return memory_stats(device=device).get('allocated_bytes.all.peak', 0)",
    "docstring": "Return the maximum GPU memory occupied by tensors in bytes for a given device. By default, this returns the peak allocated memory since the beginning of this program. :func: can be used to reset the starting point in tracking this metric. For example, these two functions can measure the peak allocated memory usage of each iteration in a training loop. Args: device (torch.device or int or str, optional): selected device. Returns statistic for the current device, given by :func:, if :attr: is `` (default).",
    "type": "function",
    "file_path": "pytorch\\torch\\xpu\\memory.py",
    "ast_data": "FunctionDef name:max_memory_allocated arg:device arguments arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "update_units",
    "source_code": "def update_units(self, data):\n    if not self._converter_is_explicit:\n        converter = munits.registry.get_converter(data)\n    else:\n        converter = self._converter\n    if converter is None:\n        return False\n    neednew = self._converter != converter\n    self._set_converter(converter)\n    default = self._converter.default_units(data, self)\n    if default is not None and self.units is None:\n        self.set_units(default)\n    elif neednew:\n        self._update_axisinfo()\n    self.stale = True\n    return True",
    "docstring": "Introspect *data* for units converter and update the `` instance if necessary. Return *True* if *data* is registered for unit conversion.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:update_units arg:self arg:data arguments arg arg If Assign Call Assign If Compare Return return:yes Assign Compare Call Assign Call If BoolOp Compare Compare Call If Call Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_update_feature_log_prob",
    "source_code": "def _update_feature_log_prob(self, alpha):\n    smoothed_fc = self.feature_count_ + alpha\n    smoothed_cc = self.class_count_ + alpha * 2\n    self.feature_log_prob_ = np.log(smoothed_fc) - np.log(smoothed_cc.reshape(-1, 1))",
    "docstring": "Apply smoothing to raw counts and recompute log probabilities",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\naive_bayes.py",
    "ast_data": "FunctionDef name:_update_feature_log_prob arg:self arg:alpha arguments arg arg Assign Assign Assign Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_evaluate",
    "source_code": "def _evaluate(self, x):\n    raise NotImplementedError()",
    "docstring": "Actually evaluate the value of the interpolator.",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_polyint.py",
    "ast_data": "FunctionDef name:_evaluate arg:self arg:x arguments arg arg Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, y):\n    check_is_fitted(self)\n    y_is_multilabel = type_of_target(y).startswith('multilabel')\n    if y_is_multilabel and (not self.y_type_.startswith('multilabel')):\n        raise ValueError('The object was not fitted with multilabel input.')\n    return label_binarize(y, classes=self.classes_, pos_label=self.pos_label, neg_label=self.neg_label, sparse_output=self.sparse_output)",
    "docstring": "Transform multi-class labels to binary labels. The output of transform is sometimes referred to by some authors as the 1-of-K coding scheme. Parameters ---------- y : {array, sparse matrix} of shape (n_samples,) or (n_samples, n_classes) Target values. The 2-d matrix should only contain 0 and 1, represents multilabel classification. Sparse matrix can be CSR, CSC, COO, DOK, or LIL. Returns ------- Y : {ndarray, sparse matrix} of shape (n_samples, n_classes) Shape will be (n_samples, 1) for binary problems. Sparse matrix will be of CSR format.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_label.py",
    "ast_data": "FunctionDef name:transform arg:self arg:y arguments arg arg Call Assign Call Call If BoolOp Call Raise Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_box_pa",
    "source_code": "@classmethod\ndef _box_pa(cls, value, pa_type: pa.DataType | None=None) -> pa.Array | pa.ChunkedArray | pa.Scalar:\n    if isinstance(value, pa.Scalar) or not is_list_like(value):\n        return cls._box_pa_scalar(value, pa_type)\n    return cls._box_pa_array(value, pa_type)",
    "docstring": "Box value into a pyarrow Array, ChunkedArray or Scalar. Parameters ---------- value : any pa_type : pa.DataType | None Returns ------- pa.Array or pa.ChunkedArray or pa.Scalar",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\arrow\\array.py",
    "ast_data": "FunctionDef name:_box_pa arg:cls arg:value arg:pa_type arguments arg arg arg If BoolOp Call Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_tf_tensor_get_item",
    "source_code": "def _tf_tensor_get_item(target, i):\n    return target[i]",
    "docstring": "Overload of get_item that stages a Tensor (not Tensor list) read.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\slices.py",
    "ast_data": "FunctionDef name:_tf_tensor_get_item arg:target arg:i arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_new_group_key",
    "source_code": "def _get_new_group_key(self, devices):\n    new_key = self._group_key\n    self._group_key += 1\n    self._instance_key_table[new_key] = {}\n    for device in devices:\n        self._instance_key_table[new_key][device] = INSTANCE_KEY_START_NUMBER\n    return new_key",
    "docstring": "Returns a new group key. The caller should store and reuse the same group key for the same set of devices. Calling this method always returns a new group key. This method is not thread-safe. Args: devices: a list of canonical device strings in a collective group. Returns: a new group key.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_utils.py",
    "ast_data": "FunctionDef name:_get_new_group_key arg:self arg:devices arguments arg arg Assign Assign For Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "make_raw_assign_fn",
    "source_code": "def make_raw_assign_fn(raw_assign_fn, use_handle=True):\n\n    def assign_fn(var, value, use_locking=False, name=None, read_value=True):\n        del use_locking\n        handle = var.handle if use_handle else var\n        with _maybe_enter_graph(handle), _maybe_on_device(var):\n            op = raw_assign_fn(handle, ops.convert_to_tensor(value, dtype=var.dtype), name=name)\n            with ops.control_dependencies([op]):\n                if read_value:\n                    return var._read_variable_op() if use_handle else var.read_value()\n                else:\n                    return op\n    return assign_fn",
    "docstring": "Wrap with the proper graph context and device scope. Args: raw_assign_fn: the function to be wrapped. use_handle: if True, the will be applied to the handle of a variable; otherwise it will be applied to the variable itself. Returns: The wrapped function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_util.py",
    "ast_data": "FunctionDef name:make_raw_assign_fn arg:raw_assign_fn arg:use_handle arguments arg arg FunctionDef name:assign_fn arg:var arg:value arg:use_locking arg:name arg:read_value arguments arg arg arg arg arg Assign With Call Call Assign Call Call With Call If Return return:yes Call Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "register_artifact",
    "source_code": "def register_artifact(setting_name, description, visible=False, off_by_default=False, log_format=None):\n    log_registry.register_artifact_name(setting_name, description, visible, off_by_default, log_format)",
    "docstring": "Enables an artifact to be controlled by the env var and user API with name Args: setting_name: the shorthand name used in the env var and user API description: A description of what this outputs visible: Whether it gets suggested to users by default off_by_default: whether this artifact should be logged when the ancestor loggers are enabled at level DEBUG",
    "type": "function",
    "file_path": "pytorch\\torch\\_logging\\_internal.py",
    "ast_data": "FunctionDef name:register_artifact arg:setting_name arg:description arg:visible arg:off_by_default arg:log_format arguments arg arg arg arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "_use_unsharded_flat_param",
    "source_code": "def _use_unsharded_flat_param(self, padded_unsharded_flat_param: torch.Tensor) -> None:\n    unsharded_size = self.flat_param._unpadded_unsharded_size\n    flat_param_part = padded_unsharded_flat_param[:unsharded_size.numel()]\n    self.flat_param.data = flat_param_part\n    in_forward = self._training_state == HandleTrainingState.FORWARD\n    in_pre_backward = self._training_state == HandleTrainingState.BACKWARD_PRE\n    if self._use_orig_params:\n        if self._skipped_use_sharded_views and in_pre_backward:\n            return\n        self._use_unsharded_views(as_params=not in_forward and (not in_pre_backward))\n    elif in_forward:\n        self._use_unsharded_views(as_params=False)",
    "docstring": "Switch to use the *unpadded* unsharded flat parameter. This is a view into the *padded* unsharded flat parameter.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py",
    "ast_data": "FunctionDef name:_use_unsharded_flat_param arg:self arg:padded_unsharded_flat_param arguments arg arg Assign Assign Call Assign Assign Compare Assign Compare If If BoolOp Return return:no Call BoolOp If Call"
  },
  {
    "library": "django",
    "name": "ref_alias",
    "source_code": "def ref_alias(self, alias):\n    self.alias_refcount[alias] += 1",
    "docstring": "Increases the reference count for this alias.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\query.py",
    "ast_data": "FunctionDef name:ref_alias arg:self arg:alias arguments arg arg"
  },
  {
    "library": "pytorch",
    "name": "storage_metadata",
    "source_code": "def storage_metadata(self) -> TensorStorageMetadata:\n    return self._storage_meta",
    "docstring": "Returns a :class: object corresponding to the metadata for the local tensor on current rank",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_shards_wrapper.py",
    "ast_data": "FunctionDef name:storage_metadata arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "construct_disabled_join_config",
    "source_code": "@staticmethod\ndef construct_disabled_join_config():\n    return _JoinConfig(enable=False, throw_on_early_termination=False, is_first_joinable=False)",
    "docstring": "Return a :class: instance indicating that join-related logic should be disabled. e.g. if the caller is not in a join context manager.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\join.py",
    "ast_data": "FunctionDef name:construct_disabled_join_config arguments Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_vasicek_entropy",
    "source_code": "def _vasicek_entropy(X, m, *, xp):\n    n = X.shape[-1]\n    X = _pad_along_last_axis(X, m, xp=xp)\n    differences = X[..., 2 * m:] - X[..., :-2 * m]\n    logs = xp.log(n / (2 * m) * differences)\n    return xp.mean(logs, axis=-1)",
    "docstring": "Compute the Vasicek estimator as described in [6] Eq. 1.3.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_entropy.py",
    "ast_data": "FunctionDef name:_vasicek_entropy arg:X arg:m arguments arg arg arg Assign Assign Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_set_ticklabels",
    "source_code": "def _set_ticklabels(ax: Axes, labels: list[str], is_vertical: bool, **kwargs) -> None:\n    ticks = ax.get_xticks() if is_vertical else ax.get_yticks()\n    if len(ticks) != len(labels):\n        i, remainder = divmod(len(ticks), len(labels))\n        if Version(mpl.__version__) < Version('3.10'):\n            assert remainder == 0, remainder\n        labels *= i\n    if is_vertical:\n        ax.set_xticklabels(labels, **kwargs)\n    else:\n        ax.set_yticklabels(labels, **kwargs)",
    "docstring": "Set the tick labels of a given axis. Due to we need to handle the case of repeated ticks (due to ) and thus we duplicate the number of labels.",
    "type": "function",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\boxplot.py",
    "ast_data": "FunctionDef name:_set_ticklabels arg:ax arg:labels arg:is_vertical arguments arg arg arg arg Assign Call Call If Compare Call Call Assign Call Call Call If Compare Call Call Compare If Call Call"
  },
  {
    "library": "tensorflow",
    "name": "iterator_full_type_from_spec",
    "source_code": "def iterator_full_type_from_spec(element_spec):\n    args = fulltypes_for_flat_tensors(element_spec)\n    return full_type_pb2.FullTypeDef(type_id=full_type_pb2.TFT_PRODUCT, args=[full_type_pb2.FullTypeDef(type_id=full_type_pb2.TFT_ITERATOR, args=[full_type_pb2.FullTypeDef(type_id=full_type_pb2.TFT_PRODUCT, args=args)])])",
    "docstring": "Returns a FullTypeDef for an iterator for the elements. Args: element_spec: A nested structure of objects representing the element type specification. Returns: A FullTypeDef for an iterator for the element tensor representation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_utils.py",
    "ast_data": "FunctionDef name:iterator_full_type_from_spec arg:element_spec arguments arg Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "wheel",
    "source_code": "def wheel(self, *args: str, **popen_kwargs: Any) -> subprocess.CompletedProcess[str]:\n    return self.python('-m', 'wheel', *args, **popen_kwargs)",
    "docstring": "Run a wheel command in the virtual environment.",
    "type": "method",
    "file_path": "pytorch\\tools\\nightly.py",
    "ast_data": "FunctionDef name:wheel arg:self arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "legval",
    "source_code": "def legval(x, c, tensor=True):\n    c = np.array(c, ndmin=1, copy=None)\n    if c.dtype.char in '?bBhHiIlLqQpP':\n        c = c.astype(np.double)\n    if isinstance(x, (tuple, list)):\n        x = np.asarray(x)\n    if isinstance(x, np.ndarray) and tensor:\n        c = c.reshape(c.shape + (1,) * x.ndim)\n    if len(c) == 1:\n        c0 = c[0]\n        c1 = 0\n    elif len(c) == 2:\n        c0 = c[0]\n        c1 = c[1]\n    else:\n        nd = len(c)\n        c0 = c[-2]\n        c1 = c[-1]\n        for i in range(3, len(c) + 1):\n            tmp = c0\n            nd = nd - 1\n            c0 = c[-i] - c1 * ((nd - 1) / nd)\n            c1 = tmp + c1 * x * ((2 * nd - 1) / nd)\n    return c0 + c1 * x",
    "docstring": "Evaluate a Legendre series at points x. If is of length `xxccxctensortensortensorxxcccxcxxcc` is multidimensional. The default value is True. Returns ------- values : ndarray, algebra_like The shape of the return value is described above. See Also -------- legval2d, leggrid2d, legval3d, leggrid3d Notes ----- The evaluation uses Clenshaw recursion, aka synthetic division.",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\legendre.py",
    "ast_data": "FunctionDef name:legval arg:x arg:c arg:tensor arguments arg arg arg Assign Call If Compare Assign Call If Call Assign Call If BoolOp Call Assign Call If Compare Call Assign Assign If Compare Call Assign Assign Assign Call Assign Assign For Call Call Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "stateless_random_normal",
    "source_code": "@tf_export('random.stateless_normal')\n@dispatch.add_dispatch_support\ndef stateless_random_normal(shape, seed, mean=0.0, stddev=1.0, dtype=dtypes.float32, name=None, alg='auto_select'):\n    with ops.name_scope(name, 'stateless_random_normal', [shape, seed, mean, stddev]) as name:\n        shape = shape_util.shape_tensor(shape)\n        mean = ops.convert_to_tensor(mean, dtype=dtype, name='mean')\n        stddev = ops.convert_to_tensor(stddev, dtype=dtype, name='stddev')\n        key, counter, alg = random_ops_util.get_key_counter_alg(seed, alg)\n        rnd = gen_stateless_random_ops_v2.stateless_random_normal_v2(shape, key=key, counter=counter, dtype=dtype, alg=alg)\n        result = math_ops.add(rnd * stddev, mean, name=name)\n        shape_util.maybe_set_static_shape(result, shape)\n        return result",
    "docstring": "Outputs deterministic pseudorandom values from a normal distribution. This is a stateless version of : if run twice with the same seeds and shapes, it will produce the same pseudorandom numbers. The output is consistent across multiple runs on the same hardware (and between CPU and GPU), but may change between versions of TensorFlow or on non-CPU/GPU hardware. Args: shape: A 1-D integer Tensor or Python array. The shape of the output tensor. seed: A shape [2] Tensor, the seed to the random number generator. Must have dtype or . (When using XLA, only is allowed.) mean: A 0-D Tensor or Python value of type . The mean of the normal distribution. stddev: A 0-D Tensor or Python value of type . The standard deviation of the normal distribution. dtype: The float type of the output: , , , . Defaults to . name: A name for the operation (optional). alg: The RNG algorithm used to generate the random numbers. See for a detailed explanation. Returns: A tensor of the specified shape filled with random normal values.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\stateless_random_ops.py",
    "ast_data": "FunctionDef name:stateless_random_normal arg:shape arg:seed arg:mean arg:stddev arg:dtype arg:name arg:alg arguments arg arg arg arg arg arg arg With Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_infer_dimension",
    "source_code": "def _infer_dimension(spectrum, n_samples):\n    xp, _ = get_namespace(spectrum)\n    ll = xp.empty_like(spectrum)\n    ll[0] = -xp.inf\n    for rank in range(1, spectrum.shape[0]):\n        ll[rank] = _assess_dimension(spectrum, rank, n_samples)\n    return xp.argmax(ll)",
    "docstring": "Infers the dimension of a dataset with a given spectrum. The returned value will be in [1, n_features - 1].",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_pca.py",
    "ast_data": "FunctionDef name:_infer_dimension arg:spectrum arg:n_samples arguments arg arg Assign Call Assign Call Assign For Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_TensorArrayGatherGrad",
    "source_code": "@ops.RegisterGradient('TensorArrayGather')\n@ops.RegisterGradient('TensorArrayGatherV2')\n@ops.RegisterGradient('TensorArrayGatherV3')\ndef _TensorArrayGatherGrad(op: ops.Operation, grad):\n    handle = op.inputs[0]\n    indices = op.inputs[1]\n    flow = op.inputs[2]\n    dtype = op.get_attr('dtype')\n    grad_source = _GetGradSource(grad)\n    g = tensor_array_ops.TensorArray(dtype=dtype, handle=handle, flow=flow, colocate_with_first_write_call=False).grad(source=grad_source, flow=flow)\n    u_g = g.scatter(indices, grad)\n    return [None, None, u_g.flow]",
    "docstring": "Gradient for TensorArrayGather. Args: op: Forward TensorArrayGather op. grad: Gradient to TensorArrayGather. Returns: A flow , which can be used in control dependencies to force the write of to the gradient .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_grad.py",
    "ast_data": "FunctionDef name:_TensorArrayGatherGrad arg:op arg:grad arguments arg arg Assign Assign Assign Assign Call Assign Call Assign Call Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_make_interp_per_full_matr",
    "source_code": "def _make_interp_per_full_matr(x, y, t, k):\n    x, y, t = map(np.asarray, (x, y, t))\n    n = x.size\n    matr = np.zeros((n + k - 1, n + k - 1))\n    for i in range(k - 1):\n        bb = _dierckx.evaluate_all_bspl(t, k, x[0], k, i + 1)\n        matr[i, :k + 1] += bb\n        bb = _dierckx.evaluate_all_bspl(t, k, x[-1], n + k - 1, i + 1)[:-1]\n        matr[i, -k:] -= bb\n    for i in range(n):\n        xval = x[i]\n        if xval == t[k]:\n            left = k\n        else:\n            left = np.searchsorted(t, xval) - 1\n        bb = _dierckx.evaluate_all_bspl(t, k, xval, left)\n        matr[i + k - 1, left - k:left + 1] = bb\n    b = np.r_[[0] * (k - 1), y]\n    c = solve(matr, b)\n    return c",
    "docstring": "Returns a solution of a system for B-spline interpolation with periodic boundary conditions. First `` is supposed to be taken on circle.",
    "type": "function",
    "file_path": "scipy\\scipy\\interpolate\\_bsplines.py",
    "ast_data": "FunctionDef name:_make_interp_per_full_matr arg:x arg:y arg:t arg:k arguments arg arg arg arg Assign Call Assign Assign Call For Call Assign Call Assign Call For Call Assign If Compare Assign Assign Call Assign Call Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_gcc_version",
    "source_code": "def get_gcc_version():\n    key = 'gcc_ver'\n    out, err = run_shell_cmd(cmds_all[PLATFORM.lower()][key])\n    if err and FLAGS.debug:\n        print('Error in detecting GCC version:\\n %s' % str(err))\n    return out.strip(b'\\n')",
    "docstring": "Retrieves version of GCC detected. Returns: String that is the version of GCC. e.g. '7.3.0'",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\tensorflow_builder\\config_detector\\config_detector.py",
    "ast_data": "FunctionDef name:get_gcc_version arguments Assign Assign Call Call If BoolOp Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_convert_datetimes",
    "source_code": "def _convert_datetimes(sas_datetimes: pd.Series, unit: str) -> pd.Series:\n    td = (_sas_origin - _unix_origin).as_unit('s')\n    if unit == 's':\n        millis = cast_from_unit_vectorized(sas_datetimes._values, unit='s', out_unit='ms')\n        dt64ms = millis.view('M8[ms]') + td\n        return pd.Series(dt64ms, index=sas_datetimes.index, copy=False)\n    else:\n        vals = np.array(sas_datetimes, dtype='M8[D]') + td\n        return pd.Series(vals, dtype='M8[s]', index=sas_datetimes.index, copy=False)",
    "docstring": "Convert to Timestamp if possible, otherwise to datetime.datetime. SAS float64 lacks precision for more than ms resolution so the fit to datetime.datetime is ok. Parameters ---------- sas_datetimes : {Series, Sequence[float]} Dates or datetimes in SAS unit : {'d', 's'} \"d\" if the floats represent dates, \"s\" for datetimes Returns ------- Series Series of datetime64 dtype or datetime.datetime.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\sas\\sas7bdat.py",
    "ast_data": "FunctionDef name:_convert_datetimes arg:sas_datetimes arg:unit arguments arg arg Assign Call If Compare Assign Call Assign Call Return return:yes Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_for_each_trt_node",
    "source_code": "def _for_each_trt_node(self, graph_def, fn):\n    for node in graph_def.node:\n        if node.op == _TRT_ENGINE_OP_NAME:\n            fn(node)\n    for func in graph_def.library.function:\n        for node in func.node_def:\n            if node.op == _TRT_ENGINE_OP_NAME:\n                fn(node)",
    "docstring": "Helper method to manipulate all TRTEngineOps in a GraphDef.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\tensorrt\\trt_convert.py",
    "ast_data": "FunctionDef name:_for_each_trt_node arg:self arg:graph_def arg:fn arguments arg arg arg For If Compare Call For For If Compare Call"
  },
  {
    "library": "pytorch",
    "name": "get_graph_hash",
    "source_code": "def get_graph_hash(tensors):\n    return torch._C._lazy._get_graph_hash(tensors)",
    "docstring": "Return the graph hash for the passed in lazy tensors",
    "type": "function",
    "file_path": "pytorch\\torch\\_lazy\\computation.py",
    "ast_data": "FunctionDef name:get_graph_hash arg:tensors arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "choose_from_datasets",
    "source_code": "@staticmethod\ndef choose_from_datasets(datasets, choice_dataset, stop_on_empty_dataset=True) -> 'DatasetV2':\n    from tensorflow.python.data.ops import choose_from_datasets_op\n    return choose_from_datasets_op._choose_from_datasets(datasets, choice_dataset, stop_on_empty_dataset)",
    "docstring": "Creates a dataset that deterministically chooses elements from . For example, given the following datasets: The elements of will be: Args: datasets: A non-empty list of objects with compatible structure. choice_dataset: A of scalar tensors between and . stop_on_empty_dataset: If , selection stops if it encounters an empty dataset. If , it skips empty datasets. It is recommended to set it to . Otherwise, the selected elements start off as the user intends, but may change as input datasets become empty. This can be difficult to detect since the dataset starts off looking correct. Defaults to . Returns: A new with the transformation applied as described above. Raises: TypeError: If or has the wrong type. ValueError: If is empty.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:choose_from_datasets arg:datasets arg:choice_dataset arg:stop_on_empty_dataset arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "local",
    "source_code": "@property\ndef local(self):\n    return self.srs.local",
    "docstring": "Is this Spatial Reference local?",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\base\\models.py",
    "ast_data": "FunctionDef name:local arg:self arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_gcd",
    "source_code": "def _gcd(a, b):\n    if not (math.isfinite(a) and math.isfinite(b)):\n        raise ValueError(f'Can only find greatest common divisor of finite arguments, found \"{a}\" and \"{b}\"')\n    while b:\n        a, b = (b, a % b)\n    return a",
    "docstring": "Calculate the greatest common divisor of a and b",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\_internal.py",
    "ast_data": "FunctionDef name:_gcd arg:a arg:b arguments arg arg If BoolOp Call Call Raise Call While Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "as_deref",
    "source_code": "def as_deref(expr):\n    return Expr(Op.DEREF, expr)",
    "docstring": "Return object as dereferencing expression.",
    "type": "function",
    "file_path": "numpy\\numpy\\f2py\\symbolic.py",
    "ast_data": "FunctionDef name:as_deref arg:expr arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_next_csv_row",
    "source_code": "def _next_csv_row(filenames, num_cols, field_delim, use_quote_delim, header, file_io_fn):\n    for fn in filenames:\n        with file_io_fn(fn) as f:\n            rdr = csv.reader(f, delimiter=field_delim, quoting=csv.QUOTE_MINIMAL if use_quote_delim else csv.QUOTE_NONE)\n            row_num = 1\n            if header:\n                next(rdr)\n                row_num += 1\n            for csv_row in rdr:\n                if len(csv_row) != num_cols:\n                    raise ValueError(f'Problem inferring types: CSV row {row_num} has {len(csv_row)} number of fields. Expected: {num_cols}.')\n                row_num += 1\n                yield csv_row",
    "docstring": "Generator that yields rows of CSV file(s) in order.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\readers.py",
    "ast_data": "FunctionDef name:_next_csv_row arg:filenames arg:num_cols arg:field_delim arg:use_quote_delim arg:header arg:file_io_fn arguments arg arg arg arg arg arg For With Call Assign Call Assign If Call For If Compare Call Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "max",
    "source_code": "@property\ndef max(self):\n    if self.is_quantized or self.base_dtype in (bool, string, complex64, complex128):\n        raise TypeError(f'Cannot find maximum value of {self} with {('quantized type' if self.is_quantized else 'type')} {self.base_dtype}.')\n    try:\n        return ml_dtypes.finfo(self.as_numpy_dtype).max\n    except:\n        try:\n            return ml_dtypes.iinfo(self.as_numpy_dtype).max\n        except:\n            raise TypeError(f'Cannot find maximum value of {self}.')",
    "docstring": "Returns the maximum representable value in this data type. Raises: TypeError: if this is a non-numeric, unordered, or quantized type.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\dtypes.py",
    "ast_data": "FunctionDef name:max arg:self arguments arg If BoolOp Compare Raise Call Try Return return:yes Call ExceptHandler Try Return return:yes Call ExceptHandler Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "fontName",
    "source_code": "def fontName(self, fontprop):\n    if isinstance(fontprop, str):\n        filenames = [fontprop]\n    elif mpl.rcParams['pdf.use14corefonts']:\n        filenames = _fontManager._find_fonts_by_props(fontprop, fontext='afm', directory=RendererPdf._afm_font_dir)\n    else:\n        filenames = _fontManager._find_fonts_by_props(fontprop)\n    first_Fx = None\n    for fname in filenames:\n        Fx = self._fontNames.get(fname)\n        if not first_Fx:\n            first_Fx = Fx\n        if Fx is None:\n            Fx = next(self._internal_font_seq)\n            self._fontNames[fname] = Fx\n            _log.debug('Assigning font %s = %r', Fx, fname)\n            if not first_Fx:\n                first_Fx = Fx\n    return first_Fx",
    "docstring": "Select a font based on fontprop and return a name suitable for ``. If fontprop is a string, it will be interpreted as the filename of the font.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py",
    "ast_data": "FunctionDef name:fontName arg:self arg:fontprop arguments arg arg If Call Assign If Assign Call Assign Call Assign For Assign Call If Assign If Compare Assign Call Assign Call If Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "is_cupy_namespace",
    "source_code": "@lru_cache(100)\ndef is_cupy_namespace(xp: Namespace) -> bool:\n    return xp.__name__ in {'cupy', _compat_module_name() + '.cupy'}",
    "docstring": "Returns True if is a CuPy namespace. This includes both CuPy itself and the version wrapped by array-api-compat. See Also -------- array_namespace is_numpy_namespace is_torch_namespace is_ndonnx_namespace is_dask_namespace is_jax_namespace is_pydata_sparse_namespace is_array_api_strict_namespace",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\common\\_helpers.py",
    "ast_data": "FunctionDef name:is_cupy_namespace arg:xp arguments arg Return return:yes Compare Call Call"
  },
  {
    "library": "django",
    "name": "deleted_forms",
    "source_code": "@property\ndef deleted_forms(self):\n    if not self.is_valid() or not self.can_delete:\n        return []\n    if not hasattr(self, '_deleted_form_indexes'):\n        self._deleted_form_indexes = []\n        for i, form in enumerate(self.forms):\n            if i >= self.initial_form_count() and (not form.has_changed()):\n                continue\n            if self._should_delete_form(form):\n                self._deleted_form_indexes.append(i)\n    return [self.forms[i] for i in self._deleted_form_indexes]",
    "docstring": "Return a list of forms that have been marked for deletion.",
    "type": "method",
    "file_path": "django\\django\\forms\\formsets.py",
    "ast_data": "FunctionDef name:deleted_forms arg:self arguments arg If BoolOp Call Return return:no If Call Assign For Call If BoolOp Compare Call Call If Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "predict_log_proba",
    "source_code": "def predict_log_proba(self, X):\n    check_is_fitted(self)\n    X = self._check_X(X)\n    jll = self._joint_log_likelihood(X)\n    log_prob_x = logsumexp(jll, axis=1)\n    return jll - np.atleast_2d(log_prob_x).T",
    "docstring": "Return log-probability estimates for the test vector X. Parameters ---------- X : array-like of shape (n_samples, n_features) The input samples. Returns ------- C : array-like of shape (n_samples, n_classes) Returns the log-probability of the samples for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute :term:.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\naive_bayes.py",
    "ast_data": "FunctionDef name:predict_log_proba arg:self arg:X arguments arg arg Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "apply_transform_box",
    "source_code": "def apply_transform_box(self, input: Boxes, params: Dict[str, Tensor], flags: Dict[str, Any], transform: Optional[Tensor]=None) -> Boxes:\n    return input",
    "docstring": "Process masks corresponding to the inputs that are transformed.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\_2d\\geometric\\elastic_transform.py",
    "ast_data": "FunctionDef name:apply_transform_box arg:self arg:input arg:params arg:flags arg:transform arguments arg arg arg arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_handle_col_wise_sharding",
    "source_code": "def _handle_col_wise_sharding(input, world_size, weight, local_shard, max_norm, norm_type, padding_idx, pg):\n    gathered_inputs = all_gather(input, group=pg)\n    if max_norm is not None:\n        local_shard = _handle_max_norm_col_wise(max_norm, norm_type, local_shard, input, world_size, gathered_inputs, pg)\n    output = _handle_col_wise_sharding_base(torch.nn.functional.embedding, len(input.size()), input, world_size, weight, local_shard, pg, gathered_inputs, padding_idx=padding_idx)\n    return (output, local_shard)",
    "docstring": "Entry-point function to handle the logic of col-wise sharding of weight for embedding. (Detailed explanations of the logic can be found in the comment for sharded_embedding.) Args: input: list of ID used for lookup and aggregation. world_size: number of ranks. weight: sharded weight tensor. local_shard: col-wise shared local weight used for lookup. max_norm: If given, each embedding vector with norm larger than max_norm is renormalized to have norm max_norm. Note: this will modify weight in-place. norm_type: The p in the p-norm to compute for the max_norm option. padding_idx: If specified, the entries at padding_idx do not contribute to the gradient; therefore, the embedding vector at padding_idx is not updated during training, i.e. it remains as a fixed \"pad\". pg: process group. Returns: final result of lookup.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharding_spec\\chunk_sharding_spec_ops\\embedding.py",
    "ast_data": "FunctionDef name:_handle_col_wise_sharding arg:input arg:world_size arg:weight arg:local_shard arg:max_norm arg:norm_type arg:padding_idx arg:pg arguments arg arg arg arg arg arg arg arg Assign Call If Compare Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "wrap_module_to",
    "source_code": "def wrap_module_to(self: torch.nn.utils.rnn.PackedSequence, *args, **kwargs) -> torch.nn.utils.rnn.PackedSequence:\n    ex = torch.tensor((), dtype=self.data.dtype, device=self.data.device).to(*args, **kwargs)\n    if ex.device.type == custom_backend_name:\n        return self.to(*args, **kwargs)\n    kwargs.update({'device': custom_backend_name})\n    return self.to(*args, **kwargs)",
    "docstring": "Move all model parameters and buffers to the custom device. This also makes associated parameters and buffers different objects. So it should be called before constructing optimizer if the module will live on device while being optimized. .. note:: This method modifies the module in-place. Args: device (int, optional): if specified, all parameters will be copied to that device",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\backend_registration.py",
    "ast_data": "FunctionDef name:wrap_module_to arg:self arguments arg arg arg Assign Call Call If Compare Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_frame_on",
    "source_code": "def get_frame_on(self):\n    return self._frameon",
    "docstring": "Get whether the Axes rectangle patch is drawn.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:get_frame_on arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "constexpr_next_power_of_2",
    "source_code": "@triton_builtin\ndef constexpr_next_power_of_2(n: tl.constexpr, *, _builder: object=None) -> tl.constexpr:\n    assert isinstance(n, tl.constexpr)\n    return tl.constexpr(triton.next_power_of_2(n.value))",
    "docstring": "A version triton.next_power_of_two that can be used within a kernel on constants.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\triton_helpers.py",
    "ast_data": "FunctionDef name:constexpr_next_power_of_2 arg:n arguments arg arg Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_func",
    "source_code": "def _get_func(self):\n    if self._obj_func is not None:\n        return self._obj_func\n    else:\n        return self._func",
    "docstring": "Returns decorated function object. For a class method, use self._obj_func to provide instance.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\authoring\\authoring.py",
    "ast_data": "FunctionDef name:_get_func arg:self arguments arg If Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_eager_reshape",
    "source_code": "def _eager_reshape(tensor, shape, ctx):\n    attr_t = tensor._datatype_enum()\n    attr_tshape, (shape,) = execute.args_to_matching_eager([shape], ctx, [dtypes.int32, dtypes.int64], dtypes.int32)\n    inputs_flat = [tensor, shape]\n    attrs = ('T', attr_t, 'Tshape', attr_tshape)\n    [result] = execute.execute(b'Reshape', 1, inputs=inputs_flat, attrs=attrs, ctx=ctx)\n    return result",
    "docstring": "Eager-only version of Reshape op; requires tensor is an eager Tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\constant_op.py",
    "ast_data": "FunctionDef name:_eager_reshape arg:tensor arg:shape arg:ctx arguments arg arg arg Assign Call Assign Call Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "extract_type",
    "source_code": "def extract_type(self, ty: str) -> str:\n    if ty[0] == '*':\n        return 'O'\n    elif ty == 'nvTmaDesc':\n        raise NotImplementedError('nvTmaDesc kernels are not yet supported')\n    return StaticallyLaunchedCudaKernel.type_mappings()[ty]",
    "docstring": "Takes a triton type from CompiledKernel.signature and converts it into a single char encoding. _StaticCudaLauncher will switch on this char to figure out what type the underlying value should be passed to the triton kernel as.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\static_cuda_launcher.py",
    "ast_data": "FunctionDef name:extract_type arg:self arg:ty arguments arg arg If Compare Return return:yes If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "cryptography",
    "name": "finalize",
    "source_code": "@abc.abstractmethod\ndef finalize(self) -> bytes:\n    pass",
    "docstring": "Returns the results of processing the final block as bytes.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\ciphers\\base.py",
    "ast_data": "FunctionDef name:finalize arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "cuda",
    "source_code": "def cuda(self, device=None, non_blocking=False) -> Union[_StorageBase, TypedStorage]:\n    device2 = torch.device('cuda', device) if device else torch.device('cuda')\n    return self.to(device=device2, non_blocking=non_blocking)",
    "docstring": "Returns a copy of this object in CUDA memory. If this object is already in CUDA memory and on the correct device, then no copy is performed and the original object is returned. Args: device (int): The destination GPU id. Defaults to the current device. non_blocking (bool): If `` and the source is in pinned memory, the copy will be asynchronous with respect to the host. Otherwise, the argument has no effect.",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:cuda arg:self arg:device arg:non_blocking arguments arg arg arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "dtypes",
    "source_code": "@property\ndef dtypes(self) -> Iterable[Dtype]:\n    return self.data.dtypes",
    "docstring": "Dtypes. Returns ------- dtypes Dtype of each of the DataFrame's columns.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:dtypes arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_van_es_entropy",
    "source_code": "def _van_es_entropy(X, m, *, xp):\n    n = X.shape[-1]\n    difference = X[..., m:] - X[..., :-m]\n    term1 = 1 / (n - m) * xp.sum(xp.log((n + 1) / m * difference), axis=-1)\n    k = xp.arange(m, n + 1, dtype=term1.dtype)\n    return term1 + xp.sum(1 / k) + math.log(m) - math.log(n + 1)",
    "docstring": "Compute the van Es estimator as described in [6].",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_entropy.py",
    "ast_data": "FunctionDef name:_van_es_entropy arg:X arg:m arguments arg arg arg Assign Assign Assign Call Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "init_zero_coef",
    "source_code": "def init_zero_coef(self, X, dtype=None):\n    n_features = X.shape[1]\n    n_classes = self.base_loss.n_classes\n    if self.fit_intercept:\n        n_dof = n_features + 1\n    else:\n        n_dof = n_features\n    if self.base_loss.is_multiclass:\n        coef = np.zeros_like(X, shape=(n_classes, n_dof), dtype=dtype, order='F')\n    else:\n        coef = np.zeros_like(X, shape=n_dof, dtype=dtype)\n    return coef",
    "docstring": "Allocate coef of correct shape with zeros. Parameters: ----------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. dtype : data-type, default=None Overrides the data type of coef. With dtype=None, coef will have the same dtype as X. Returns ------- coef : ndarray of shape (n_dof,) or (n_classes, n_dof) Coefficients of a linear model.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_linear_loss.py",
    "ast_data": "FunctionDef name:init_zero_coef arg:self arg:X arg:dtype arguments arg arg arg Assign Assign If Assign Assign If Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "Mishra06",
    "source_code": "class Mishra06(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n        self.global_optimum = [[2.88631, 1.82326]]\n        self.fglob = -2.28395\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        a = 0.1 * ((x[0] - 1) ** 2 + (x[1] - 1) ** 2)\n        u = (cos(x[0]) + cos(x[1])) ** 2\n        v = (sin(x[0]) + sin(x[1])) ** 2\n        return a - log((sin(u) ** 2 - cos(v) ** 2 + x[0]) ** 2)",
    "docstring": "Mishra 6 objective function. This class defines the Mishra 6 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Mishra06}}(x) = -\\log{\\left [ \\sin^2 ((\\cos(x_1) + \\cos(x_2))^2) - \\cos^2 ((\\sin(x_1) + \\sin(x_2))^2) + x_1 \\right ]^2} + 0.01 \\left[(x_1 -1)^2 + (x_2 - 1)^2 \\right] with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Mishra, S. Global Optimization by Differential Evolution and Particle Swarm Methods: Evaluation on Some Benchmark Functions. Munich Personal RePEc Archive, 2006, 1005 TODO line 397",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_M.py",
    "ast_data": "ClassDef name:Mishra06 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Assign Call Call Assign Call Call Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_text",
    "source_code": "def set_text(self, s):\n    self._text.set_text(s)\n    self.stale = True",
    "docstring": "Set the text of this area as a string.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py",
    "ast_data": "FunctionDef name:set_text arg:self arg:s arguments arg arg Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "as_graph_def",
    "source_code": "def as_graph_def(self, from_version=None, add_shapes=False, use_pybind11_proto=False) -> graph_pb2.GraphDef:\n    if is_oss:\n        use_pybind11_proto = False\n    result, _ = self._as_graph_def(from_version, add_shapes, use_pybind11_proto=use_pybind11_proto)\n    return result",
    "docstring": "Returns a serialized representation of this graph. The serialized can be imported into another (using ) or used with the . This method is thread-safe. Args: from_version: Optional. If this is set, returns a containing only the nodes that were added to this graph since its property had the given value. add_shapes: If true, adds an \"_output_shapes\" list attr to each node with the inferred shapes of each of its outputs. use_pybind11_proto: If true, If true, uses the c++ pybind11_proto api to get the GraphDef proto directly from c++, instead of through a TF buffer. See for reference. Returns: A []( protocol buffer. Raises: ValueError: If the would be too large.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:as_graph_def arg:self arg:from_version arg:add_shapes arg:use_pybind11_proto arguments arg arg arg arg If Assign Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "ToolToggleBase",
    "source_code": "class ToolToggleBase(ToolBase):\n    radio_group = None\n    \"\\n    Attribute to group 'radio' like tools (mutually exclusive).\\n\\n    `str` that identifies the group or **None** if not belonging to a group.\\n    \"\n    cursor = None\n    'Cursor to use when the tool is active.'\n    default_toggled = False\n    'Default of toggled state.'\n\n    def __init__(self, *args, **kwargs):\n        self._toggled = kwargs.pop('toggled', self.default_toggled)\n        super().__init__(*args, **kwargs)\n\n    def trigger(self, sender, event, data=None):\n        if self._toggled:\n            self.disable(event)\n        else:\n            self.enable(event)\n        self._toggled = not self._toggled\n\n    def enable(self, event=None):\n        pass\n\n    def disable(self, event=None):\n        pass\n\n    @property\n    def toggled(self):\n        return self._toggled\n\n    def set_figure(self, figure):\n        toggled = self.toggled\n        if toggled:\n            if self.figure:\n                self.trigger(self, None)\n            else:\n                self._toggled = False\n        super().set_figure(figure)\n        if toggled:\n            if figure:\n                self.trigger(self, None)\n            else:\n                self._toggled = True",
    "docstring": "Toggleable tool. Every time it is triggered, it switches between enable and disable. Parameters ---------- `toggled` if present and True, sets the initial state of the Tool Arbitrary keyword arguments to be consumed by the Tool",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py",
    "ast_data": "ClassDef name:ToolToggleBase Assign Assign Assign FunctionDef name:__init__ arg:self arguments arg arg arg Assign Call Call Call FunctionDef name:trigger arg:self arg:sender arg:event arg:data arguments arg arg arg arg If Call Call Assign FunctionDef name:enable arg:self arg:event arguments arg arg FunctionDef name:disable arg:self arg:event arguments arg arg FunctionDef name:toggled arg:self arguments arg Return return:yes FunctionDef name:set_figure arg:self arg:figure arguments arg arg Assign If If Call Assign Call Call If If Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "hist2d",
    "source_code": "@_api.make_keyword_only('3.10', 'range')\n@_preprocess_data(replace_names=['x', 'y', 'weights'])\n@_docstring.interpd\ndef hist2d(self, x, y, bins=10, range=None, density=False, weights=None, cmin=None, cmax=None, **kwargs):\n    h, xedges, yedges = np.histogram2d(x, y, bins=bins, range=range, density=density, weights=weights)\n    if cmin is not None:\n        h[h < cmin] = None\n    if cmax is not None:\n        h[h > cmax] = None\n    pc = self.pcolormesh(xedges, yedges, h.T, **kwargs)\n    self.set_xlim(xedges[0], xedges[-1])\n    self.set_ylim(yedges[0], yedges[-1])\n    return (h, xedges, yedges, pc)",
    "docstring": "Make a 2D histogram plot. Parameters ---------- x, y : array-like, shape (n, ) Input values bins : None or int or [int, int] or array-like or [array, array] The bin specification: - If int, the number of bins for the two dimensions (`~.Axes.hist~.Axes.pcolormesh~.matplotlib.collections.QuadMesh~.Axes.pcolormesh~matplotlib.collections.QuadMesh.colors.LogNorm.colors.PowerNorm`.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_axes.py",
    "ast_data": "FunctionDef name:hist2d arg:self arg:x arg:y arg:bins arg:range arg:density arg:weights arg:cmin arg:cmax arguments arg arg arg arg arg arg arg arg arg arg Assign Call If Compare Assign Compare If Compare Assign Compare Assign Call Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "weight_norm",
    "source_code": "def weight_norm(module: Module, name: str='weight', dim: int=0):\n    _weight_norm = _WeightNorm(dim)\n    parametrize.register_parametrization(module, name, _weight_norm, unsafe=True)\n\n    def _weight_norm_compat_hook(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):\n        g_key = f'{prefix}{name}_g'\n        v_key = f'{prefix}{name}_v'\n        if g_key in state_dict and v_key in state_dict:\n            original0 = state_dict.pop(g_key)\n            original1 = state_dict.pop(v_key)\n            state_dict[f'{prefix}parametrizations.{name}.original0'] = original0\n            state_dict[f'{prefix}parametrizations.{name}.original1'] = original1\n    module._register_load_state_dict_pre_hook(_weight_norm_compat_hook)\n    return module",
    "docstring": "Apply weight normalization to a parameter in the given module. .. math:: \\mathbf{w} = g \\dfrac{\\mathbf{v}}{\\|\\mathbf{v}\\|} Weight normalization is a reparameterization that decouples the magnitude of a weight tensor from its direction. This replaces the parameter specified by :attr: with two parameters: one specifying the magnitude and one specifying the direction. By default, with ``. See Args: module (Module): containing module name (str, optional): name of weight parameter dim (int, optional): dimension over which to compute the norm Returns: The original module with the weight norm hook Example:: >>> m = weight_norm(nn.Linear(20, 40), name='weight') >>> m ParametrizedLinear( in_features=20, out_features=40, bias=True (parametrizations): ModuleDict( (weight): ParametrizationList( (0): _WeightNorm() ) ) ) >>> m.parametrizations.weight.original0.size() torch.Size([40, 1]) >>> m.parametrizations.weight.original1.size() torch.Size([40, 20])",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\utils\\parametrizations.py",
    "ast_data": "FunctionDef name:weight_norm arg:module arg:name arg:dim arguments arg arg arg Assign Call Call FunctionDef name:_weight_norm_compat_hook arg:state_dict arg:prefix arg:local_metadata arg:strict arg:missing_keys arg:unexpected_keys arg:error_msgs arguments arg arg arg arg arg arg arg Assign Assign If BoolOp Compare Compare Assign Call Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "codegen_template",
    "source_code": "def codegen_template(self, template_node: BaseSchedulerNode, epilogue_nodes: Sequence[BaseSchedulerNode], prologue_nodes: Sequence[BaseSchedulerNode]) -> Optional[str]:\n    raise NotImplementedError",
    "docstring": "Given a template node, generate a kernel. This function is only available for triton now. If the third-party backend behaves as a sub-class of TritonScheduling, it can override it or reuse it.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:codegen_template arg:self arg:template_node arg:epilogue_nodes arg:prologue_nodes arguments arg arg arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, op, specs, name):\n    self.op = op\n    self.specs = specs\n    self.name = name",
    "docstring": "Creates a object. Args: op: the \"producer\" object that this class wraps; it produces a list of tensors to save. E.g., a \"Variable\" object saving its backing tensor. specs: a list of SaveSpec, each element of which describes one tensor to save under this object. All Tensors must be on the same device. name: the name to save the object under.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\saving\\saveable_object.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:op arg:specs arg:name arguments arg arg arg arg Assign Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "_rgb_to_rgba",
    "source_code": "def _rgb_to_rgba(A):\n    rgba = np.zeros((A.shape[0], A.shape[1], 4), dtype=A.dtype)\n    rgba[:, :, :3] = A\n    if rgba.dtype == np.uint8:\n        rgba[:, :, 3] = 255\n    else:\n        rgba[:, :, 3] = 1.0\n    return rgba",
    "docstring": "Convert an RGB image to RGBA, as required by the image resample C++ extension.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\image.py",
    "ast_data": "FunctionDef name:_rgb_to_rgba arg:A arguments arg Assign Call Assign If Compare Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_orthogonal_matrix",
    "source_code": "def _orthogonal_matrix(self, n):\n    a = random_ops.random_normal([n, n], dtype=self.dtype, seed=self.seed)\n    if self.seed:\n        self.seed += 1\n    q, r = gen_linalg_ops.qr(a)\n    d = array_ops.diag_part(r)\n    q *= math_ops.sign(d)\n    return q",
    "docstring": "Construct an n x n orthogonal matrix. Args: n: Dimension. Returns: A n x n orthogonal matrix.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops.py",
    "ast_data": "FunctionDef name:_orthogonal_matrix arg:self arg:n arguments arg arg Assign Call If Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "allocate_output",
    "source_code": "def allocate_output(self, block: Allocation):\n    pools = self.get_pools(block)\n    if pools and config.memory_pool in ('outputs', 'combined'):\n        pools[-1].allocate_at_end(block)\n    else:\n        block.mark_allocated()\n        pools.append(AllocationPool(block.device, TemporalSplit([block]), can_expand=config.memory_pool == 'combined'))",
    "docstring": "Outputs get different pools so memory gets freed properly",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\memory_planning.py",
    "ast_data": "FunctionDef name:allocate_output arg:self arg:block arguments arg arg Assign Call If BoolOp Compare Call Call Call Call Call Compare"
  },
  {
    "library": "django",
    "name": "_url",
    "source_code": "def _url(self, hashed_name_func, name, force=False, hashed_files=None):\n    if settings.DEBUG and (not force):\n        hashed_name, fragment = (name, '')\n    else:\n        clean_name, fragment = urldefrag(name)\n        if urlsplit(clean_name).path.endswith('/'):\n            hashed_name = name\n        else:\n            args = (clean_name,)\n            if hashed_files is not None:\n                args += (hashed_files,)\n            hashed_name = hashed_name_func(*args)\n    final_url = super().url(hashed_name)\n    query_fragment = '?#' in name\n    if fragment or query_fragment:\n        urlparts = list(urlsplit(final_url))\n        if fragment and (not urlparts[4]):\n            urlparts[4] = fragment\n        if query_fragment and (not urlparts[3]):\n            urlparts[2] += '?'\n        final_url = urlunsplit(urlparts)\n    return unquote(final_url)",
    "docstring": "Return the non-hashed URL in DEBUG mode.",
    "type": "method",
    "file_path": "django\\django\\contrib\\staticfiles\\storage.py",
    "ast_data": "FunctionDef name:_url arg:self arg:hashed_name_func arg:name arg:force arg:hashed_files arguments arg arg arg arg arg If BoolOp Assign Assign Call If Call Call Assign Assign If Compare Assign Call Assign Call Call Assign Compare If BoolOp Assign Call Call If BoolOp Assign If BoolOp Assign Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "tox_after_run_commands",
    "source_code": "@impl\ndef tox_after_run_commands(tox_env: ToxEnv) -> None:\n    if tox_env.name == 'build-dists' and IS_GITHUB_ACTIONS_RUNTIME:\n        _log_debug_after_run_commands('Computing and storing the base64 representation of the combined dists SHA-256 hash in GHA...')\n        dists_dir_path = Path(__file__).parent / 'dist'\n        emulated_sha256sum_output = '\\n'.join((_produce_sha256sum_line(artifact_path) for artifact_path in dists_dir_path.glob('*')))\n        emulated_base64_w0_output = b64encode(emulated_sha256sum_output.encode()).decode()\n        with Path(environ['GITHUB_OUTPUT']).open(encoding=UNICODE_ENCODING, mode=FILE_APPEND_MODE) as outputs_file:\n            print(f'combined-dists-base64-encoded-sha256-hash={emulated_base64_w0_output!s}', file=outputs_file)",
    "docstring": "Compute combined dists hash post build-dists under GHA. :param tox_env: A tox environment object.",
    "type": "function",
    "file_path": "cherrypy\\toxfile.py",
    "ast_data": "FunctionDef name:tox_after_run_commands arg:tox_env arguments arg If BoolOp Compare Call Assign Call Assign Call Call Call Assign Call Call Call With Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "cache",
    "source_code": "@property\ndef cache(self) -> dict[InventoryURI, InventoryCacheEntry]:\n    return self.env.intersphinx_cache",
    "docstring": "Intersphinx cache. - Key is the URI of the remote inventory. - Element one is the key given in the Sphinx :confval:. - Element two is a time value for cache invalidation, an integer. - Element three is the loaded remote inventory of type :class:.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\ext\\intersphinx\\_shared.py",
    "ast_data": "FunctionDef name:cache arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "inferred_type",
    "source_code": "@property\ndef inferred_type(self) -> str:\n    return 'interval'",
    "docstring": "Return a string of the type inferred from the values",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\interval.py",
    "ast_data": "FunctionDef name:inferred_type arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_check_not_finalized",
    "source_code": "def _check_not_finalized(self) -> None:\n    if self._finalized:\n        raise RuntimeError('Graph is finalized and cannot be modified.')",
    "docstring": "Check if the graph is finalized. Raises: RuntimeError: If the graph finalized.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_check_not_finalized arg:self arguments arg If Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "update",
    "source_code": "def update(self, props):\n    return self._update_props(props, '{cls.__name__!r} object has no property {prop_name!r}')",
    "docstring": "Update this artist's properties from the dict *props*. Parameters ---------- props : dict",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:update arg:self arg:props arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "InvalidSymbolNameError",
    "source_code": "class InvalidSymbolNameError(Exception):\n    pass",
    "docstring": "Raised when trying to export symbol as an invalid or unallowed name.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\util\\tf_export.py",
    "ast_data": "ClassDef name:InvalidSymbolNameError"
  },
  {
    "library": "scipy",
    "name": "Ursem04",
    "source_code": "class Ursem04(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-2.0] * self.N, [2.0] * self.N))\n        self.global_optimum = [[0.0 for _ in range(self.N)]]\n        self.fglob = -1.5\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return -3 * sin(0.5 * pi * x[0] + 0.5 * pi) * (2 - sqrt(x[0] ** 2 + x[1] ** 2)) / 4",
    "docstring": "Ursem 4 objective function. This class defines the Ursem 4 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Ursem04}}(x) = -3 \\sin(0.5 \\pi x_1 + 0.5 \\pi) \\frac{2 - \\sqrt{x_1^2 + x_2 ^ 2}}{4} with :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_U.py",
    "ast_data": "ClassDef name:Ursem04 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "hermecompanion",
    "source_code": "def hermecompanion(c):\n    [c] = pu.as_series([c])\n    if len(c) < 2:\n        raise ValueError('Series must have maximum degree of at least 1.')\n    if len(c) == 2:\n        return np.array([[-c[0] / c[1]]])\n    n = len(c) - 1\n    mat = np.zeros((n, n), dtype=c.dtype)\n    scl = np.hstack((1.0, 1.0 / np.sqrt(np.arange(n - 1, 0, -1))))\n    scl = np.multiply.accumulate(scl)[::-1]\n    top = mat.reshape(-1)[1::n + 1]\n    bot = mat.reshape(-1)[n::n + 1]\n    top[...] = np.sqrt(np.arange(1, n))\n    bot[...] = top\n    mat[:, -1] -= scl * c[:-1] / c[-1]\n    return mat",
    "docstring": "Return the scaled companion matrix of c. The basis polynomials are scaled so that the companion matrix is symmetric when is an HermiteE basis polynomial. This provides better eigenvalue estimates than the unscaled case and for basis polynomials the eigenvalues are guaranteed to be real if is used to obtain them. Parameters ---------- c : array_like 1-D array of HermiteE series coefficients ordered from low to high degree. Returns ------- mat : ndarray Scaled companion matrix of dimensions (deg, deg).",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\hermite_e.py",
    "ast_data": "FunctionDef name:hermecompanion arg:c arguments arg Assign Call If Compare Call Raise Call If Compare Call Return return:yes Call Assign Call Assign Call Assign Call Call Call Assign Call Assign Call Assign Call Assign Call Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "ResultList",
    "source_code": "class ResultList(list):\n\n    def __init__(self, form, *items):\n        self.form = form\n        super().__init__(*items)",
    "docstring": "Wrapper class used to return items in a list_editable changelist, annotated with the form object for error reporting purposes. Needed to maintain backwards compatibility with existing admin templates.",
    "type": "class",
    "file_path": "django\\django\\contrib\\admin\\templatetags\\admin_list.py",
    "ast_data": "ClassDef name:ResultList FunctionDef name:__init__ arg:self arg:form arguments arg arg arg Assign Call Call"
  },
  {
    "library": "cherrypy",
    "name": "wait",
    "source_code": "def wait(self):\n    while not getattr(self.httpserver, 'ready', False):\n        if self.interrupt:\n            raise self.interrupt\n        time.sleep(0.1)\n    if os.environ.get('LISTEN_PID', None):\n        return\n    if not isinstance(self.bind_addr, tuple):\n        return\n    with _safe_wait(*self.bound_addr):\n        portend.occupied(*self.bound_addr, timeout=Timeouts.occupied)",
    "docstring": "Wait until the HTTP server is ready to receive requests.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\servers.py",
    "ast_data": "FunctionDef name:wait arg:self arguments arg While Call If Raise Call If Call Return return:no If Call Return return:no With Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_compile_weights_loss_and_weighted_metrics",
    "source_code": "@trackable.no_automatic_dependency_tracking\ndef _compile_weights_loss_and_weighted_metrics(self, sample_weights=None):\n    with backend.get_graph().as_default():\n        if sample_weights is not None:\n            self._update_sample_weight_modes(sample_weights)\n        self._prepare_sample_weights(sample_weights)\n        masks = self._prepare_output_masks()\n        self._handle_metrics(self.outputs, targets=self._targets, skip_target_masks=self._prepare_skip_target_masks(), sample_weights=self.sample_weights, masks=masks, return_weighted_metrics=True)\n        self.total_loss = self._prepare_total_loss(masks)",
    "docstring": "Compiles the model loss and weighted metric sub-graphs. This may be used to set graph tensors as sample weights (instead of creating placeholders). Args: sample_weights: List of tensors to use as the sample weights. Must be the same length as the number of outputs. If left as , placeholders are used instead.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py",
    "ast_data": "FunctionDef name:_compile_weights_loss_and_weighted_metrics arg:self arg:sample_weights arguments arg arg With Call Call If Compare Call Call Assign Call Call Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "to_tensor_shape",
    "source_code": "def to_tensor_shape(spec):\n    if spec.ndim is None and spec.shape is None:\n        return tensor_shape.TensorShape(None)\n    elif spec.shape is not None:\n        return tensor_shape.TensorShape(spec.shape)\n    else:\n        shape = [None] * spec.ndim\n        for a in spec.axes:\n            shape[a] = spec.axes[a]\n        return tensor_shape.TensorShape(shape)",
    "docstring": "Returns a tf.TensorShape object that matches the shape specifications. If the InputSpec's shape or ndim is defined, this method will return a fully or partially-known shape. Otherwise, the returned TensorShape is None. Args: spec: an InputSpec object. Returns: a tf.TensorShape object",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\input_spec.py",
    "ast_data": "FunctionDef name:to_tensor_shape arg:spec arguments arg If BoolOp Compare Compare Return return:yes Call If Compare Return return:yes Call Assign For Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_clear",
    "source_code": "def _clear(self, event):\n    if self.ignore(event) or self.canvas.is_saving():\n        return\n    self._background = self.canvas.copy_from_bbox(self.ax.bbox)\n    self.ax.draw_artist(self._checks)",
    "docstring": "Internal event handler to clear the buttons.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:_clear arg:self arg:event arguments arg arg If BoolOp Call Call Return return:no Assign Call Call"
  },
  {
    "library": "numpy",
    "name": "add_newdoc",
    "source_code": "def add_newdoc(place, obj, doc, warn_on_python=True):\n    new = getattr(__import__(place, globals(), {}, [obj]), obj)\n    if isinstance(doc, str):\n        if '${ARRAY_FUNCTION_LIKE}' in doc:\n            doc = overrides.get_array_function_like_doc(new, doc)\n        _add_docstring(new, doc.strip(), warn_on_python)\n    elif isinstance(doc, tuple):\n        attr, docstring = doc\n        _add_docstring(getattr(new, attr), docstring.strip(), warn_on_python)\n    elif isinstance(doc, list):\n        for attr, docstring in doc:\n            _add_docstring(getattr(new, attr), docstring.strip(), warn_on_python)",
    "docstring": "Add documentation to an existing object, typically one defined in C The purpose is to allow easier editing of the docstrings without requiring a re-compile. This exists primarily for internal use within numpy itself. Parameters ---------- place : str The absolute name of the module to import from obj : str or None The name of the object to add documentation to, typically a class or function name. doc : {str, Tuple[str, str], List[Tuple[str, str]]} If a string, the documentation to apply to If a tuple, then the first element is interpreted as an attribute of and the second as the docstring to apply - `UserWarningobjPyTypeObjectPyType_ReadyPy_INCREF` on the str and losing the reference, so the str will never be released If possible it should be avoided.",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\function_base.py",
    "ast_data": "FunctionDef name:add_newdoc arg:place arg:obj arg:doc arg:warn_on_python arguments arg arg arg arg Assign Call Call Call If Call If Compare Assign Call Call Call If Call Assign Call Call Call If Call For Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "bubble",
    "source_code": "def bubble(self, a):\n    if a not in self._axes:\n        raise ValueError('Axes has not been added yet')\n    self._axes[a] = next(self._counter)",
    "docstring": "Move an Axes, which must already exist in the stack, to the top.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:bubble arg:self arg:a arguments arg arg If Compare Raise Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "PaddingConfig",
    "source_code": "class PaddingConfig:\n    __slots__ = ('dimensions',)\n\n    def __init__(self):\n        self.dimensions = []",
    "docstring": "Python representation of a xla.PaddingConfig protobuf.",
    "type": "class",
    "file_path": "tensorflow\\third_party\\xla\\xla\\python\\xla_client.py",
    "ast_data": "ClassDef name:PaddingConfig Assign FunctionDef name:__init__ arg:self arguments arg Assign"
  },
  {
    "library": "pytorch",
    "name": "_LeafNode",
    "source_code": "class _LeafNode(_IRNode):\n\n    def __init__(self, node: torch.fx.Node, is_exported_program: bool=False):\n        self._node = node\n        self._stack_meta = _module_stack_meta_from_node(node, is_exported_program=is_exported_program)\n\n    @property\n    def fx_op(self) -> str:\n        return self._node.op\n\n    @property\n    def fx_node(self) -> torch.fx.Node:\n        return self._node\n\n    @property\n    def stack_meta(self) -> _ModuleStackMeta:\n        return self._stack_meta\n\n    @property\n    def stack_trace(self) -> str | None:\n        return self.fx_node.meta.get('stack_trace')\n\n    def __str__(self) -> str:\n        return f'LeafNode({self._node})'",
    "docstring": "Representing a single fx.Node.",
    "type": "class",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\modularization.py",
    "ast_data": "ClassDef name:_LeafNode FunctionDef name:__init__ arg:self arg:node arg:is_exported_program arguments arg arg arg Assign Assign Call FunctionDef name:fx_op arg:self arguments arg Return return:yes FunctionDef name:fx_node arg:self arguments arg Return return:yes FunctionDef name:stack_meta arg:self arguments arg Return return:yes FunctionDef name:stack_trace arg:self arguments arg Return return:yes Call FunctionDef name:__str__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_IsProtoClass",
    "source_code": "def _IsProtoClass(obj):\n    return isinstance(obj, type) and issubclass(obj, message.Message)",
    "docstring": "Returns whether the passed obj is a Protocol Buffer class.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\api\\lib\\python_object_to_proto_visitor.py",
    "ast_data": "FunctionDef name:_IsProtoClass arg:obj arguments arg Return return:yes BoolOp Call Call"
  },
  {
    "library": "django",
    "name": "_get_non_gfk_field",
    "source_code": "def _get_non_gfk_field(opts, name):\n    field = opts.get_field(name)\n    if field.is_relation and (field.many_to_one and (not field.related_model) or field.one_to_many):\n        raise FieldDoesNotExist()\n    if field.is_relation and (not field.many_to_many) and hasattr(field, 'attname') and (field.attname == name):\n        raise FieldIsAForeignKeyColumnName()\n    return field",
    "docstring": "For historical reasons, the admin app relies on GenericForeignKeys as being \"not found\" by get_field(). This could likely be cleaned up. Reverse relations should also be excluded as these aren't attributes of the model (rather something like ).",
    "type": "function",
    "file_path": "django\\django\\contrib\\admin\\utils.py",
    "ast_data": "FunctionDef name:_get_non_gfk_field arg:opts arg:name arguments arg arg Assign Call If BoolOp BoolOp BoolOp Raise Call If BoolOp Call Compare Raise Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "consolidate",
    "source_code": "def consolidate(self) -> Self:\n    if self.is_consolidated():\n        return self\n    bm = type(self)(self.blocks, self.axes, verify_integrity=False)\n    bm._is_consolidated = False\n    bm._consolidate_inplace()\n    return bm",
    "docstring": "Join together blocks having same dtype Returns ------- y : BlockManager",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:consolidate arg:self arguments arg If Call Return return:yes Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "can_fuse_horizontal",
    "source_code": "def can_fuse_horizontal(self, node1: BaseSchedulerNode, node2: BaseSchedulerNode) -> bool:\n    raise NotImplementedError",
    "docstring": "Check whether node1 and node2 can be horizontally fused or not.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:can_fuse_horizontal arg:self arg:node1 arg:node2 arguments arg arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "_compute_useful_frames",
    "source_code": "def _compute_useful_frames(tb, num):\n    defining_frame_index = _find_index_of_defining_frame(tb)\n    innermost_excluded = min(defining_frame_index + 2 + 1, len(tb))\n    outermost_included = max(innermost_excluded - num, 0)\n    return tb[outermost_included:innermost_excluded]",
    "docstring": "Return a list of frames, which form a 'useful' stack. Starting from the defining frame to the outermost one, this method computes the contiguous portion of the 'useful' stack trace and returns the selected frames. Args: tb: A list of traceback frames (as from Operation.traceback). num: total number of frames to return. Returns: A list of frames.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\error_interpolation.py",
    "ast_data": "FunctionDef name:_compute_useful_frames arg:tb arg:num arguments arg arg Assign Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, value, indices=None, name=None):\n    del name\n    super(CSRSparseMatrix, self).__init__()\n    if isinstance(value, sparse_tensor.SparseTensor):\n        if indices is not None:\n            raise ValueError('indices must be None if value is a SparseTensor.')\n        self._dtype = value.dtype\n        self._csr_matrix = sm_ops.sparse_tensor_to_csr_sparse_matrix(indices=value.indices, values=value.values, dense_shape=value.dense_shape)\n    else:\n        value = ops.convert_to_tensor(value)\n        self._dtype = value.dtype\n        if indices is not None:\n            indices = ops.convert_to_tensor(indices, dtype=dtypes.int64)\n        else:\n            indices = array_ops.stop_gradient(array_ops.where(value))\n        self._csr_matrix = sm_ops.dense_to_csr_sparse_matrix(value, indices)\n    if self._eager_mode:\n        self._csr_matrix._handle_data = _make_handle_data(value)",
    "docstring": "Construct a CSRSparseMatrix from a dense matrix or SparseTensor. Args: value: A dense or Tensor or . indices: The nonzero indices of (if is not a ). name: Optional op name. Raises: ValueError: if is a and is not .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\sparse\\sparse_csr_matrix_ops.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:value arg:indices arg:name arguments arg arg arg arg Call Call If Call If Compare Raise Call Assign Assign Call Assign Call Assign If Compare Assign Call Assign Call Call Assign Call If Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "fresnel_cos",
    "source_code": "@tf_export('math.special.fresnel_cos')\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef fresnel_cos(x, name=None):\n    with ops.name_scope(name, 'fresnel_cos', [x]):\n        return gen_special_math_ops.fresnel_cos(x)",
    "docstring": "Computes Fresnel's cosine integral of element-wise. The Fresnel cosine integral is defined as the integral of from to , with the domain of definition all real numbers. The Fresnel cosine integral is odd. >>> tf.math.special.fresnel_cos([-1., -0.1, 0.1, 1.]).numpy() array([-0.7798934 , -0.09999753, 0.09999753, 0.7798934 ], dtype=float32) This implementation is based off of the Cephes math library. Args: x: A or . Must be one of the following types: , . name: A name for the operation (optional). Returns: A or , respectively. Has the same type as . @compatibility(scipy) Equivalent to scipy.special.fresnel second output. @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\special_math_ops.py",
    "ast_data": "FunctionDef name:fresnel_cos arg:x arg:name arguments arg arg With Call Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "manifest_rc",
    "source_code": "def manifest_rc(name, type='dll'):\n    if type == 'dll':\n        rctype = 2\n    elif type == 'exe':\n        rctype = 1\n    else:\n        raise ValueError('Type %s not supported' % type)\n    return '#include \"winuser.h\"\\n%d RT_MANIFEST %s' % (rctype, name)",
    "docstring": "Return the rc file used to generate the res file which will be embedded as manifest for given manifest file name, of given type ('dll' or 'exe'). Parameters ---------- name : str name of the manifest file to embed type : str {'dll', 'exe'} type of the binary which will embed the manifest",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\mingw32ccompiler.py",
    "ast_data": "FunctionDef name:manifest_rc arg:name arg:type arguments arg arg If Compare Assign If Compare Assign Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "load_function_name",
    "source_code": "def load_function_name(self, fn_name, push_null, num_on_stack=0):\n    output = []\n    if push_null and sys.version_info >= (3, 11):\n        output.extend(add_push_null(self.create_load_global(fn_name, add=True)))\n        if num_on_stack > 0:\n            output.extend([*self.rot_n(num_on_stack + 2), *self.rot_n(num_on_stack + 2)])\n    else:\n        output.extend([self.create_load_global(fn_name, add=True), *self.rot_n(num_on_stack + 1)])\n    return output",
    "docstring": "Load the global fn_name on the stack num_on_stack down",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\codegen.py",
    "ast_data": "FunctionDef name:load_function_name arg:self arg:fn_name arg:push_null arg:num_on_stack arguments arg arg arg arg Assign If BoolOp Compare Call Call Call If Compare Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "buffer",
    "source_code": "def buffer(self, width, quadsegs=8):\n    return self._topology(capi.geos_buffer(self.ptr, width, quadsegs))",
    "docstring": "Return a geometry that represents all points whose distance from this Geometry is less than or equal to distance. Calculations are in the Spatial Reference System of this Geometry. The optional third parameter sets the number of segment used to approximate a quarter circle (defaults to 8). (Text from PostGIS documentation at ch. 6.1.3)",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:buffer arg:self arg:width arg:quadsegs arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, function_name, unique_function_id, node_name_prefix, attr_name, level=1, children_inputs_mappings=None):\n    self._function_name = function_name\n    self._unique_function_id = unique_function_id\n    self._next_global_index = 0\n    self._used_global_indices = set()\n    self._tag_to_global_index = {}\n    self._tag_to_next_sort_index = {}\n    self._node_name_prefix = node_name_prefix\n    self._attr_name = attr_name\n    self._level = level\n    self._children_inputs_mappings = children_inputs_mappings",
    "docstring": "Initialize ophint argument. Args: function_name: Name of the function that this tracks arguments for. unique_function_id: UUID of function that this tracks arguments for. node_name_prefix: How identities that are created are named. attr_name: Name of attribute to use to store the index for this hint. i.e. FUNCTION_INPUT_INDEX or FUNCTION_OUTPUT_INDEX level: Hierarchical level of the Ophint node, a number. children_inputs_mappings: Inputs/Outputs mapping for children hints.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\op_hint.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:function_name arg:unique_function_id arg:node_name_prefix arg:attr_name arg:level arg:children_inputs_mappings arguments arg arg arg arg arg arg arg Assign Assign Assign Assign Call Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "scikit-learn",
    "name": "_pairwise_similarity",
    "source_code": "def _pairwise_similarity(a, b, similarity):\n    a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b)\n    n_a = a_rows.shape[0]\n    n_b = b_rows.shape[0]\n    result = np.array([[similarity(a_rows[i], a_cols[i], b_rows[j], b_cols[j]) for j in range(n_b)] for i in range(n_a)])\n    return result",
    "docstring": "Computes pairwise similarity matrix. result[i, j] is the Jaccard coefficient of a's bicluster i and b's bicluster j.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\metrics\\cluster\\_bicluster.py",
    "ast_data": "FunctionDef name:_pairwise_similarity arg:a arg:b arg:similarity arguments arg arg arg Assign Call Assign Assign Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "record_operation_backprop_only",
    "source_code": "def record_operation_backprop_only(op_type, output_tensors, input_tensors, backward_function):\n    pywrap_tfe.TFE_Py_TapeSetRecordOperationBackprop(op_type, output_tensors, input_tensors, backward_function)",
    "docstring": "Records the operation on all backward tapes in the stack.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\record.py",
    "ast_data": "FunctionDef name:record_operation_backprop_only arg:op_type arg:output_tensors arg:input_tensors arg:backward_function arguments arg arg arg arg Call"
  },
  {
    "library": "scipy",
    "name": "kerp_zeros",
    "source_code": "def kerp_zeros(nt):\n    if not isscalar(nt) or floor(nt) != nt or nt <= 0:\n        raise ValueError('nt must be positive integer scalar.')\n    return _specfun.klvnzo(nt, 7)",
    "docstring": "Compute nt zeros of the derivative of the Kelvin function ker. Parameters ---------- nt : int Number of zeros to compute. Must be positive. Returns ------- ndarray First zeros of the derivative of the Kelvin function. See Also -------- ker, kerp References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special Functions\", John Wiley and Sons, 1996.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_basic.py",
    "ast_data": "FunctionDef name:kerp_zeros arg:nt arguments arg If BoolOp Call Compare Call Compare Raise Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "opening",
    "source_code": "def opening(tensor: torch.Tensor, kernel: torch.Tensor, structuring_element: Optional[torch.Tensor]=None, origin: Optional[List[int]]=None, border_type: str='geodesic', border_value: float=0.0, max_val: float=10000.0, engine: str='unfold') -> torch.Tensor:\n    if not isinstance(tensor, torch.Tensor):\n        raise TypeError(f'Input type is not a torch.Tensor. Got {type(tensor)}')\n    if len(tensor.shape) != 4:\n        raise ValueError(f'Input size must have 4 dimensions. Got {tensor.dim()}')\n    if not isinstance(kernel, torch.Tensor):\n        raise TypeError(f'Kernel type is not a torch.Tensor. Got {type(kernel)}')\n    if len(kernel.shape) != 2:\n        raise ValueError(f'Kernel size must have 2 dimensions. Got {kernel.dim()}')\n    return dilation(erosion(tensor, kernel=kernel, structuring_element=structuring_element, origin=origin, border_type=border_type, border_value=border_value, max_val=max_val, engine=engine), kernel=kernel, structuring_element=structuring_element, origin=origin, border_type=border_type, border_value=border_value, max_val=max_val, engine=engine)",
    "docstring": "Return the opened image, (that means, dilation after an erosion) applying the same kernel in each channel. .. image:: _static/img/opening.png The kernel must have 2 dimensions. Args: tensor: Image with shape :math:. kernel: Positions of non-infinite elements of a flat structuring element. Non-zero values give the set of neighbors of the center over which the operation is applied. Its shape is :math:. For full structural elements use torch.ones_like(structural_element). structuring_element: Structuring element used for the grayscale dilation. It may be a non-flat structuring element. origin: Origin of the structuring element. Default: `(B, C, H, W)here `__. Example: >>> tensor = torch.rand(1, 3, 5, 5) >>> kernel = torch.ones(3, 3) >>> opened_img = opening(tensor, kernel)",
    "type": "function",
    "file_path": "kornia\\kornia\\morphology\\morphology.py",
    "ast_data": "FunctionDef name:opening arg:tensor arg:kernel arg:structuring_element arg:origin arg:border_type arg:border_value arg:max_val arg:engine arguments arg arg arg arg arg arg arg arg If Call Raise Call Call If Compare Call Raise Call Call If Call Raise Call Call If Compare Call Raise Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_TensorScatterMaxGrad",
    "source_code": "@ops.RegisterGradient('TensorScatterMax')\ndef _TensorScatterMaxGrad(op: ops.Operation, grad):\n    return _TensorScatterMinOrMaxGrad(op, grad)",
    "docstring": "Gradient for TensorScatterMax op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_grad.py",
    "ast_data": "FunctionDef name:_TensorScatterMaxGrad arg:op arg:grad arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "int_output",
    "source_code": "def int_output(func, argtypes, errcheck=None):\n    func.argtypes = argtypes\n    func.restype = c_int\n    if errcheck:\n        func.errcheck = errcheck\n    return func",
    "docstring": "Generate a ctypes function that returns an integer value.",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\gdal\\prototypes\\generation.py",
    "ast_data": "FunctionDef name:int_output arg:func arg:argtypes arg:errcheck arguments arg arg arg Assign Assign If Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "DispatchKeyset_summary",
    "source_code": "def DispatchKeyset_summary(valobj: Any, internal_dict: Any, options: Any) -> str:\n    with DisableBreakpoints():\n        target = get_target()\n        keyset = valobj.GetName()\n        result = target.EvaluateExpression(f'torch::gdb::dispatch_keyset_string({keyset})')\n        str_result = str(result)\n        str_result = str_result[str_result.find('\"') + 1:-1]\n        return str_result",
    "docstring": "Print human readable representation of c10::DispatchKeyset",
    "type": "function",
    "file_path": "pytorch\\tools\\lldb\\pytorch_lldb.py",
    "ast_data": "FunctionDef name:DispatchKeyset_summary arg:valobj arg:internal_dict arg:options arguments arg arg arg With Call Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "max_y",
    "source_code": "@property\ndef max_y(self):\n    return self._envelope.MaxY",
    "docstring": "Return the value of the maximum Y coordinate.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\envelope.py",
    "ast_data": "FunctionDef name:max_y arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "new_empty_strided",
    "source_code": "@register_decomposition(aten.new_empty_strided)\n@out_wrapper()\ndef new_empty_strided(a: TensorLikeType, size: ShapeType, stride: StrideType, *, dtype: Optional[torch.dtype]=None, layout: Optional[torch.layout]=None, device: Optional[DeviceLikeType]=None, pin_memory: bool=False) -> TensorLikeType:\n    dtype = a.dtype if dtype is None else dtype\n    layout = a.layout if layout is None else layout\n    device = a.device if device is None else device\n    return torch.empty_strided(size, stride, dtype=dtype, device=device, pin_memory=pin_memory, layout=layout)",
    "docstring": "Reference implementation of torch.Tensor.new_empty_strided",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\__init__.py",
    "ast_data": "FunctionDef name:new_empty_strided arg:a arg:size arg:stride arguments arg arg arg arg arg arg arg Assign Compare Assign Compare Assign Compare Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_output_mean",
    "source_code": "def _output_mean(self):\n    self.mean = self.output.mean()",
    "docstring": "TODO (mingzhe): it is not necessary to sum up everything by myself, torch.autograd.backward do take a gradient tensor. By default, it is the same shape as your output tensor, with all 1s. Mathematically, it is the same as if the output is summed together. So we should be able to get ride of this method. dummy function for gradient calculation",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\operator_benchmark\\benchmark_pytorch.py",
    "ast_data": "FunctionDef name:_output_mean arg:self arguments arg Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_GetMaxSizeFromNestedMaximumIterations",
    "source_code": "def _GetMaxSizeFromNestedMaximumIterations(value, while_ctxt):\n    value_name = value.name\n    curr_ctxt = ops.get_default_graph()._get_control_flow_context()\n    curr_ctxt_name = curr_ctxt.name if curr_ctxt is not None else ''\n    max_size = constant_op.constant(1)\n    while while_ctxt not in (None, curr_ctxt):\n        max_iter = while_ctxt.maximum_iterations\n        if max_iter is None:\n            raise ValueError(\"Cannot create a gradient accumulator for tensor '%s' inside XLA while_loop because maximum_iterations was not passed to the tf.while_loop call ('%s').\" % (value_name, while_ctxt.name))\n        max_iter_ctxt = max_iter.op._get_control_flow_context()\n        if util.IsContainingContext(curr_ctxt, max_iter_ctxt):\n            max_size *= max_iter\n        else:\n            const_max_iter = tensor_util.constant_value(max_iter)\n            if const_max_iter is None:\n                raise ValueError(\"Cannot create a gradient accumulator for tensor '%s' inside XLA while_loop. maximum_iterations tensor '%s' for while_loop context '%s' must be statically known (e.g. a constant value or known shape dimension), or be defined at or outside the while loop context '%s' (currently defined in '%s').\" % (value_name, max_iter.name, while_ctxt.name, curr_ctxt_name, max_iter_ctxt.name))\n            max_size *= const_max_iter\n        while_ctxt = util.GetContainingWhileContext(while_ctxt.outer_context, stop_ctxt=curr_ctxt)\n    return max_size",
    "docstring": "Calculate a max_size for use by stack ops inside an XLA while_loop. Args: value: The value inside the while_loop forward context. Used for printing error messages. while_ctxt: The forward context inside which value resides. This does not always match the value's immediate context, as may be inside e.g. a cond context inside the while_loop. Returns: A tensor containing the to feed to a Stack initializer. Raises: ValueError: If is nested inside a that either lacks a parameter, or the parameter: - is inside a that is a parent of the calling context, and - cannot be evaluated at graph build time to a constant.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_state.py",
    "ast_data": "FunctionDef name:_GetMaxSizeFromNestedMaximumIterations arg:value arg:while_ctxt arguments arg arg Assign Assign Call Call Assign Compare Assign Call While Compare Assign If Compare Raise Call Assign Call If Call Assign Call If Compare Raise Call Assign Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "signal_child",
    "source_code": "def signal_child(service, command):\n    if command == 'stop':\n        win32serviceutil.StopService(service)\n    elif command == 'restart':\n        win32serviceutil.RestartService(service)\n    else:\n        win32serviceutil.ControlService(service, control_codes[command])",
    "docstring": "Send a control command to a service.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\process\\win32.py",
    "ast_data": "FunctionDef name:signal_child arg:service arg:command arguments arg arg If Compare Call If Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "config_list",
    "source_code": "def config_list(**configs):\n    generated_configs = []\n    reserved_names = ['attrs', 'attr_names', 'tags']\n    if any((attr not in configs for attr in reserved_names)):\n        raise ValueError('Missing attrs in configs')\n    _validate(configs)\n    cross_configs = None\n    if 'cross_product_configs' in configs:\n        cross_configs = cross_product_configs(**configs['cross_product_configs'])\n    for inputs in configs['attrs']:\n        tmp_result = [{configs['attr_names'][i]: input_value} for i, input_value in enumerate(inputs)]\n        tmp_result.append({'tags': '_'.join(configs['tags'])})\n        if cross_configs:\n            generated_configs += [tmp_result + list(config) for config in cross_configs]\n        else:\n            generated_configs.append(tmp_result)\n    return generated_configs",
    "docstring": "Generate configs based on the list of input shapes. This function will take input shapes specified in a list from user. Besides that, all other parameters will be cross producted first and each of the generated list will be merged with the input shapes list. Reserved Args: attr_names(reserved): a list of names for input shapes. attrs(reserved): a list of values for each input shape. corss_product: a dictionary of attributes which will be cross producted with the input shapes. tags(reserved): a tag used to filter inputs. Here is an example: attrs = [ [1, 2], [4, 5], ], attr_names = ['M', 'N'], cross_product_configs={ 'device': ['cpu', 'cuda'], }, we will generate [[{'M': 1}, {'N' : 2}, {'device' : 'cpu'}], [{'M': 1}, {'N' : 2}, {'device' : 'cuda'}], [{'M': 4}, {'N' : 5}, {'device' : 'cpu'}], [{'M': 4}, {'N' : 5}, {'device' : 'cuda'}]]",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\operator_benchmark\\benchmark_utils.py",
    "ast_data": "FunctionDef name:config_list arguments arg Assign Assign If Call Compare Raise Call Call Assign If Compare Assign Call For Assign Call Call Call If Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "transform_all_constraints_trace_time",
    "source_code": "def transform_all_constraints_trace_time(tracer_root, graph, node, counter=0):\n    dimension_dict = {}\n    generator = ConstraintGenerator(tracer_root, graph)\n    new_constraints, counter = generator.generate_constraints(counter)\n    condition_constraint = new_constraints.conjucts[-1]\n    new_constraints.conjucts = new_constraints.conjucts[:-1]\n    new_constraints, counter = iterate_till_fixed_point(new_constraints, counter)\n    assert isinstance(condition_constraint.lhs, BVar)\n    assert is_bool_expr(condition_constraint.rhs)\n    condition_constraint_rhs = condition_constraint.rhs\n    condition_constraint_rhs, counter = iterate_till_fixed_point(condition_constraint_rhs, counter)\n    transformed, counter = transform_to_z3(new_constraints, counter, dimension_dict)\n    transformed_condition_constraint, counter = transform_to_z3(condition_constraint_rhs, counter, dimension_dict)\n    negation_transformed_condition_constraint = z3.Not(transformed_condition_constraint)\n    return (z3.And([transformed, transformed_condition_constraint]), z3.And([transformed, negation_transformed_condition_constraint]))",
    "docstring": "Takes a node and a graph and generates two sets of constraints. One set constraints the node's constraints and another set constraints the negation of the node's constraints Args: tracer_root: the root for getting the module instances graph: the graph so far in the tracing process node: node that represents a conditional counter: variable tracking Returns: Two sets of constraints. One with a conjunction with the the conditional constraint and the other with a conjunction with its negation.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\transform_to_z3.py",
    "ast_data": "FunctionDef name:transform_all_constraints_trace_time arg:tracer_root arg:graph arg:node arg:counter arguments arg arg arg arg Assign Assign Call Assign Call Assign Assign Assign Call Call Call Assign Assign Call Assign Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "leaky_relu",
    "source_code": "def leaky_relu(input: Tensor, negative_slope: float=0.01, inplace: bool=False) -> Tensor:\n    if has_torch_function_unary(input):\n        return handle_torch_function(leaky_relu, (input,), input, negative_slope=negative_slope, inplace=inplace)\n    if inplace:\n        result = torch._C._nn.leaky_relu_(input, negative_slope)\n    else:\n        result = torch._C._nn.leaky_relu(input, negative_slope)\n    return result",
    "docstring": "leaky_relu(input, negative_slope=0.01, inplace=False) -> Tensor Applies element-wise, :math: See :class: for more details.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:leaky_relu arg:input arg:negative_slope arg:inplace arguments arg arg arg If Call Return return:yes Call If Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_MkldnnConvNd",
    "source_code": "class _MkldnnConvNd(torch.jit.ScriptModule):\n    __constants__ = ['stride', 'padding', 'dilation', 'groups']\n\n    def __init__(self, dense_module):\n        super().__init__()\n        self.stride = dense_module.stride\n        self.padding = dense_module.padding\n        self.dilation = dense_module.dilation\n        self.groups = dense_module.groups\n        if dense_module.bias is not None:\n            self.register_buffer('bias', dense_module.bias.to_mkldnn())\n        else:\n            self.register_buffer('bias', torch.zeros([dense_module.weight.size(0)], dtype=torch.float).to_mkldnn())\n\n    @torch.jit.script_method\n    def __getstate__(self):\n        return (self.weight.to_dense(), self.bias.to_dense(), self.training)\n\n    @torch.jit.script_method\n    def forward(self, x):\n        return torch.mkldnn_convolution(x, self.weight, self.bias, self.padding, self.stride, self.dilation, self.groups)",
    "docstring": "Common base of MkldnnConv1d and MkldnnConv2d.",
    "type": "class",
    "file_path": "pytorch\\torch\\utils\\mkldnn.py",
    "ast_data": "ClassDef name:_MkldnnConvNd Assign FunctionDef name:__init__ arg:self arg:dense_module arguments arg arg Call Call Assign Assign Assign Assign If Compare Call Call Call Call Call Call FunctionDef name:__getstate__ arg:self arguments arg Return return:yes Call Call FunctionDef name:forward arg:self arg:x arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, logdir, max_queue=10, flush_secs=120, filename_suffix=None):\n    self._logdir = str(logdir)\n    gfile.MakeDirs(self._logdir)\n    self._max_queue = max_queue\n    self._flush_secs = flush_secs\n    self._flush_complete = threading.Event()\n    self._flush_sentinel = object()\n    self._close_sentinel = object()\n    self._ev_writer = _pywrap_events_writer.EventsWriter(compat.as_bytes(os.path.join(self._logdir, 'events')))\n    if filename_suffix:\n        self._ev_writer.InitWithSuffix(compat.as_bytes(filename_suffix))\n    self._initialize()\n    self._closed = False",
    "docstring": "Creates a and an event file to write to. On construction the summary writer creates a new event file in . This event file will contain protocol buffers, which are written to disk via the add_event method. The other arguments to the constructor control the asynchronous writes to the event file: * : How often, in seconds, to flush the added summaries and events to disk. * : Maximum number of summaries or events pending to be written to disk before one of the 'add' calls block. Args: logdir: A string. Directory where event file will be written. max_queue: Integer. Size of the queue for pending events and summaries. flush_secs: Number. How often, in seconds, to flush the pending events and summaries to disk. filename_suffix: A string. Every event file's name is suffixed with .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\event_file_writer.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:logdir arg:max_queue arg:flush_secs arg:filename_suffix arguments arg arg arg arg arg Assign Call Call Assign Assign Assign Call Assign Call Assign Call Assign Call Call Call If Call Call Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "view_limits",
    "source_code": "def view_limits(self, vmin, vmax):\n    return mtransforms.nonsingular(vmin, vmax)",
    "docstring": "Select a scale for the range from vmin to vmax. Subclasses should override this method to change locator behaviour.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:view_limits arg:self arg:vmin arg:vmax arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "deep_deconstruct",
    "source_code": "def deep_deconstruct(self, obj):\n    if isinstance(obj, list):\n        return [self.deep_deconstruct(value) for value in obj]\n    elif isinstance(obj, tuple):\n        return tuple((self.deep_deconstruct(value) for value in obj))\n    elif isinstance(obj, dict):\n        return {key: self.deep_deconstruct(value) for key, value in obj.items()}\n    elif isinstance(obj, functools.partial):\n        return (obj.func, self.deep_deconstruct(obj.args), self.deep_deconstruct(obj.keywords))\n    elif isinstance(obj, COMPILED_REGEX_TYPE):\n        return RegexObject(obj)\n    elif isinstance(obj, type):\n        return obj\n    elif hasattr(obj, 'deconstruct'):\n        deconstructed = obj.deconstruct()\n        if isinstance(obj, models.Field):\n            deconstructed = deconstructed[1:]\n        path, args, kwargs = deconstructed\n        return (path, [self.deep_deconstruct(value) for value in args], {key: self.deep_deconstruct(value) for key, value in kwargs.items()})\n    else:\n        return obj",
    "docstring": "Recursive deconstruction for a field and its arguments. Used for full comparison for rename/alter; sometimes a single-level deconstruction will not compare correctly.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\autodetector.py",
    "ast_data": "FunctionDef name:deep_deconstruct arg:self arg:obj arguments arg arg If Call Return return:yes Call If Call Return return:yes Call Call If Call Return return:yes Call Call If Call Return return:yes Call Call If Call Return return:yes Call If Call Return return:yes If Call Assign Call If Call Assign Assign Return return:yes Call Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "normalize_transformation",
    "source_code": "def normalize_transformation(M: Tensor, eps: float=1e-08) -> Tensor:\n    if len(M.shape) < 2:\n        raise AssertionError(M.shape)\n    norm_val: Tensor = M[..., -1:, -1:]\n    return where(norm_val.abs() > eps, M / (norm_val + eps), M)",
    "docstring": "Normalize a given transformation matrix. The function trakes the transformation matrix and normalize so that the value in the last row and column is one. Args: M: The transformation to be normalized of any shape with a minimum size of 2x2. eps: small value to avoid unstabilities during the backpropagation. Returns: the normalized transformation matrix with same shape as the input.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\epipolar\\fundamental.py",
    "ast_data": "FunctionDef name:normalize_transformation arg:M arg:eps arguments arg arg If Compare Call Raise Call Return return:yes Call Compare Call"
  },
  {
    "library": "uvicorn",
    "name": "ws_handler",
    "source_code": "async def ws_handler(self, protocol: WebSocketServerProtocol, path: str) -> Any:\n    self.handshake_completed_event.set()\n    await self.wait_closed()",
    "docstring": "This is the main handler function for the 'websockets' implementation to call into. We just wait for close then return, and instead allow 'send' and 'receive' events to drive the flow.",
    "type": "method",
    "file_path": "uvicorn\\uvicorn\\protocols\\websockets\\websockets_impl.py",
    "ast_data": "AsyncFunctionDef name:ws_handler arg:self arg:protocol arg:path arguments arg arg arg Call Call"
  },
  {
    "library": "django",
    "name": "__setitem__",
    "source_code": "def __setitem__(self, index, val):\n    if isinstance(index, slice):\n        self._set_slice(index, val)\n    else:\n        index = self._checkindex(index)\n        self._check_allowed((val,))\n        self._set_single(index, val)",
    "docstring": "Set the item(s) at the specified index/slice.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\mutable_list.py",
    "ast_data": "FunctionDef name:__setitem__ arg:self arg:index arg:val arguments arg arg arg If Call Call Assign Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "is_dask_array",
    "source_code": "def is_dask_array(x: object) -> TypeIs[da.Array]:\n    cls = cast(Hashable, type(x))\n    return _issubclass_fast(cls, 'dask.array', 'Array')",
    "docstring": "Return True if is a dask.array Array. This function does not import dask if it has not already been imported and is therefore cheap to use. See Also -------- array_namespace is_array_api_obj is_numpy_array is_cupy_array is_torch_array is_ndonnx_array is_jax_array is_pydata_sparse_array",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\common\\_helpers.py",
    "ast_data": "FunctionDef name:is_dask_array arg:x arguments arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "apply",
    "source_code": "@classmethod\ndef apply(cls, module, name, mask):\n    return super().apply(module, name, mask=mask)",
    "docstring": "Add pruning on the fly and reparametrization of a tensor. Adds the forward pre-hook that enables pruning on the fly and the reparametrization of a tensor in terms of the original tensor and the pruning mask. Args: module (nn.Module): module containing the tensor to prune name (str): parameter name within `` on which pruning will act.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\utils\\prune.py",
    "ast_data": "FunctionDef name:apply arg:cls arg:module arg:name arg:mask arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "__init__",
    "source_code": "def __init__(self, histogram, *args, density=None, **kwargs):\n    self._histogram = histogram\n    self._density = density\n    if len(histogram) != 2:\n        raise ValueError('Expected length 2 for parameter histogram')\n    self._hpdf = np.asarray(histogram[0])\n    self._hbins = np.asarray(histogram[1])\n    if len(self._hpdf) + 1 != len(self._hbins):\n        raise ValueError('Number of elements in histogram content and histogram boundaries do not match, expected n and n+1.')\n    self._hbin_widths = self._hbins[1:] - self._hbins[:-1]\n    bins_vary = not np.allclose(self._hbin_widths, self._hbin_widths[0])\n    if density is None and bins_vary:\n        message = 'Bin widths are not constant. Assuming `density=True`.Specify `density` explicitly to silence this warning.'\n        warnings.warn(message, RuntimeWarning, stacklevel=2)\n        density = True\n    elif not density:\n        self._hpdf = self._hpdf / self._hbin_widths\n    self._hpdf = self._hpdf / float(np.sum(self._hpdf * self._hbin_widths))\n    self._hcdf = np.cumsum(self._hpdf * self._hbin_widths)\n    self._hpdf = np.hstack([0.0, self._hpdf, 0.0])\n    self._hcdf = np.hstack([0.0, self._hcdf])\n    kwargs['a'] = self.a = self._hbins[0]\n    kwargs['b'] = self.b = self._hbins[-1]\n    super().__init__(*args, **kwargs)",
    "docstring": "Create a new distribution using the given histogram Parameters ---------- histogram : tuple of array_like Tuple containing two array_like objects. The first containing the content of n bins, the second containing the (n+1) bin boundaries. In particular, the return value of np.histogram is accepted. density : bool, optional If False, assumes the histogram is proportional to counts per bin; otherwise, assumes it is proportional to a density. For constant bin widths, these are equivalent. If None (default), sets `density` explicitly to silence the warning.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_continuous_distns.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:histogram arguments arg arg arg arg arg Assign Assign If Compare Call Raise Call Assign Call Assign Call If Compare Call Call Raise Call Assign Assign Call If BoolOp Compare Assign Call Assign If Assign Assign Call Call Assign Call Assign Call Assign Call Assign Assign Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "def fit(self, X, Y, sample_weight=None, **fit_params):\n    super().fit(X, Y, sample_weight=sample_weight, **fit_params)\n    self.classes_ = [estimator.classes_ for estimator in self.estimators_]\n    return self",
    "docstring": "Fit the model to data matrix X and targets Y. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input data. Y : array-like of shape (n_samples, n_classes) The target values. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If , then samples are equally weighted. Only supported if the underlying classifier supports sample weights. **fit_params : dict of string -> object Parameters passed to the `` method of each step. .. versionadded:: 0.23 Returns ------- self : object Returns a fitted instance.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\multioutput.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:Y arg:sample_weight arguments arg arg arg arg arg Call Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_patch_circle",
    "source_code": "def set_patch_circle(self, center, radius):\n    self._patch_type = 'circle'\n    self._center = center\n    self._width = radius * 2\n    self._height = radius * 2\n    self.set_transform(self.axes.transAxes)\n    self.stale = True",
    "docstring": "Set the spine to be circular.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\spines.py",
    "ast_data": "FunctionDef name:set_patch_circle arg:self arg:center arg:radius arguments arg arg arg Assign Assign Assign Assign Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, nbins=None, **kwargs):\n    if nbins is not None:\n        kwargs['nbins'] = nbins\n    self.set_params(**{**self.default_params, **kwargs})",
    "docstring": "Parameters ---------- nbins : int or 'auto', default: 10 Maximum number of intervals; one less than max number of ticks. If the string 'auto', the number of bins will be automatically determined based on the length of the axis. steps : array-like, optional Sequence of acceptable tick multiples, starting with 1 and ending with 10. For example, if `axes.autolimit_mode` is 'round_numbers'). Removing such ticks is mostly useful for stacked or ganged plots, where the upper tick of an Axes overlaps with the lower tick of the axes above it. min_n_ticks : int, default: 2 Relax *nbins* and *integer* constraints if necessary to obtain this minimum number of ticks.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:nbins arguments arg arg arg If Compare Assign Call"
  },
  {
    "library": "django",
    "name": "django",
    "source_code": "@property\ndef django(self):\n    s = self.name.replace('25D', '')\n    if s in ('LinearRing', 'None'):\n        return None\n    elif s == 'Unknown':\n        s = 'Geometry'\n    elif s == 'PointZ':\n        s = 'Point'\n    return s + 'Field'",
    "docstring": "Return the Django GeometryField for this OGR Type.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geomtype.py",
    "ast_data": "FunctionDef name:django arg:self arguments arg Assign Call If Compare Return return:no If Compare Assign If Compare Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_maybe_squeeze_arg",
    "source_code": "def _maybe_squeeze_arg(self, arg):\n    if isinstance(arg, (np.ndarray, ExtensionArray)) and arg.ndim == self.values.ndim + 1:\n        assert arg.shape[1] == 1\n        arg = arg[:, 0]\n    elif isinstance(arg, ABCDataFrame):\n        assert arg.shape[1] == 1\n        arg = arg._ixs(0, axis=1)._values\n    return arg",
    "docstring": "If necessary, squeeze a (N, 1) ndarray to (N,)",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\blocks.py",
    "ast_data": "FunctionDef name:_maybe_squeeze_arg arg:self arg:arg arguments arg arg If BoolOp Call Compare Compare Assign If Call Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "size",
    "source_code": "@property\ndef size(self):\n    return sum((len(self._dump_tensor_data[device_name]) for device_name in self._dump_tensor_data))",
    "docstring": "Total number of dumped tensors in the dump root directory. Returns: () The total number of dumped tensors in the dump root directory.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:size arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "PlanckianJitterGenerator",
    "source_code": "class PlanckianJitterGenerator(RandomGeneratorBase):\n\n    def __init__(self, domain: List[float]) -> None:\n        super().__init__()\n        self.domain = domain\n\n    def make_samplers(self, device: torch.device, dtype: torch.dtype) -> None:\n        idx_range = _range_bound(self.domain, 'idx_range', device=device, dtype=dtype)\n        _joint_range_check(idx_range, 'idx_range', (0, self.domain[1]))\n        self.pl_idx_dist = UniformDistribution(idx_range[0], idx_range[1], validate_args=False)\n\n    def forward(self, batch_shape: Tuple[int, ...], same_on_batch: bool=False) -> Dict[str, torch.Tensor]:\n        batch_size = batch_shape[0]\n        _common_param_check(batch_size, same_on_batch)\n        pl_idx = _adapted_rsampling((batch_size,), self.pl_idx_dist, same_on_batch)\n        return {'idx': pl_idx.long()}",
    "docstring": "Generate random planckian jitter parameters for a batch of images.",
    "type": "class",
    "file_path": "kornia\\kornia\\augmentation\\random_generator\\_2d\\planckian_jitter.py",
    "ast_data": "ClassDef name:PlanckianJitterGenerator FunctionDef name:__init__ arg:self arg:domain arguments arg arg Call Call Assign FunctionDef name:make_samplers arg:self arg:device arg:dtype arguments arg arg arg Assign Call Call Assign Call FunctionDef name:forward arg:self arg:batch_shape arg:same_on_batch arguments arg arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_rescale_data",
    "source_code": "def _rescale_data(X, y, sample_weight, inplace=False):\n    xp, _ = get_namespace(X, y, sample_weight)\n    n_samples = X.shape[0]\n    sample_weight_sqrt = xp.sqrt(sample_weight)\n    if sp.issparse(X) or sp.issparse(y):\n        sw_matrix = sparse.dia_matrix((sample_weight_sqrt, 0), shape=(n_samples, n_samples))\n    if sp.issparse(X):\n        X = safe_sparse_dot(sw_matrix, X)\n    elif inplace:\n        X *= sample_weight_sqrt[:, None]\n    else:\n        X = X * sample_weight_sqrt[:, None]\n    if sp.issparse(y):\n        y = safe_sparse_dot(sw_matrix, y)\n    elif inplace:\n        if y.ndim == 1:\n            y *= sample_weight_sqrt\n        else:\n            y *= sample_weight_sqrt[:, None]\n    elif y.ndim == 1:\n        y = y * sample_weight_sqrt\n    else:\n        y = y * sample_weight_sqrt[:, None]\n    return (X, y, sample_weight_sqrt)",
    "docstring": "Rescale data sample-wise by square root of sample_weight. For many linear models, this enables easy support for sample_weight because (y - X w)' S (y - X w) with S = diag(sample_weight) becomes ||y_rescaled - X_rescaled w||_2^2 when setting y_rescaled = sqrt(S) y X_rescaled = sqrt(S) X Returns ------- X_rescaled : {array-like, sparse matrix} y_rescaled : {array-like, sparse matrix}",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_base.py",
    "ast_data": "FunctionDef name:_rescale_data arg:X arg:y arg:sample_weight arg:inplace arguments arg arg arg arg Assign Call Assign Assign Call If BoolOp Call Call Assign Call If Call Assign Call If Assign If Call Assign Call If If Compare If Compare Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "identity",
    "source_code": "def identity(self):\n    flow = array_ops.identity(self._flow)\n    return build_ta_with_new_flow(self, flow)",
    "docstring": "See TensorArray.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:identity arg:self arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "color_style",
    "source_code": "def color_style(force_color=False):\n    if not force_color and (not supports_color()):\n        return no_style()\n    return make_style(os.environ.get('DJANGO_COLORS', ''))",
    "docstring": "Return a Style object from the Django color scheme.",
    "type": "function",
    "file_path": "django\\django\\core\\management\\color.py",
    "ast_data": "FunctionDef name:color_style arg:force_color arguments arg If BoolOp Call Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "write",
    "source_code": "def write(self, filename: str, **kwargs) -> None:\n    with open(filename, mode='w', encoding='utf-8') as f:\n        json.dump({'docs': [d._asdict() for d in sorted(self.docs)], 'symbols': [s._asdict() for s in sorted(self.symbols)]}, f, **kwargs)",
    "docstring": "Writes exports to filename.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\api\\generator2\\shared\\exported_api.py",
    "ast_data": "FunctionDef name:write arg:self arg:filename arguments arg arg arg With Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "deserialize",
    "source_code": "def deserialize(name, custom_objects=None):\n    return deserialize_keras_object(name, module_objects=globals(), custom_objects=custom_objects, printable_module_name='loss function')",
    "docstring": "Deserializes a serialized loss class/function instance. Args: name: Loss configuration. custom_objects: Optional dictionary mapping names (strings) to custom objects (classes and functions) to be considered during deserialization. Returns: A Keras instance or a loss function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py",
    "ast_data": "FunctionDef name:deserialize arg:name arg:custom_objects arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "sub",
    "source_code": "@_onnx_symbolic('aten::sub')\ndef sub(g: jit_utils.GraphContext, self, other, alpha=None):\n    if alpha and symbolic_helper._scalar(symbolic_helper._maybe_get_scalar(alpha)) != 1:\n        other = g.op('Mul', other, alpha)\n    return g.op('Sub', self, other)",
    "docstring": "Consumes sub function and returns the corresponding ONNX operator. This function is not meant to be called directly by the user. Args: g (GraphContext): The graph context. self (Tensor): The first operand. other (Tensor): The second operand. alpha (Optional[Tensor]): A scaling factor to apply to the second operand. If is not provided, it defaults to 1. Returns: ONNX operator",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\symbolic_opset9.py",
    "ast_data": "FunctionDef name:sub arg:g arg:self arg:other arg:alpha arguments arg arg arg arg If BoolOp Compare Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "getformat",
    "source_code": "def getformat(self):\n    return self.format",
    "docstring": "Matrix storage format",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_matrix.py",
    "ast_data": "FunctionDef name:getformat arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_in_graph_bw_hooks",
    "source_code": "def _in_graph_bw_hooks(bw_state: BackwardState):\n    return torch.utils.hooks.BackwardHook(None, (functools.partial(trace_wrapped, fn=call_module_hooks_from_backward_state, bw_state=bw_state, hooks_name=user_hooks_name, module_name=module_name),), (functools.partial(trace_wrapped, fn=call_module_hooks_from_backward_state, bw_state=bw_state, hooks_name=user_pre_hooks_name, module_name=module_name),))",
    "docstring": "Rather than installing the user hooks in the graph (which don't survive AotAutograd), we install hooks that will call trace_wrapped in the backward pass that CompiledAutograd can turn into actual hook calls.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\distributed.py",
    "ast_data": "FunctionDef name:_in_graph_bw_hooks arg:bw_state arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "RendezvousInfo",
    "source_code": "class RendezvousInfo:\n\n    def __init__(self, store: Store, rank: int, world_size: int, bootstrap_store_info: RendezvousStoreInfo):\n        self._store = store\n        self._rank = rank\n        self._world_size = world_size\n        self._bootstrap_store_info = bootstrap_store_info\n\n    @property\n    def store(self) -> Store:\n        return self._store\n\n    @property\n    def rank(self) -> int:\n        return self._rank\n\n    @property\n    def world_size(self) -> int:\n        return self._world_size\n\n    @property\n    def bootstrap_store_info(self) -> Optional[RendezvousStoreInfo]:\n        return self._bootstrap_store_info",
    "docstring": "Holds the information about the rendezvous.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\api.py",
    "ast_data": "ClassDef name:RendezvousInfo FunctionDef name:__init__ arg:self arg:store arg:rank arg:world_size arg:bootstrap_store_info arguments arg arg arg arg arg Assign Assign Assign Assign FunctionDef name:store arg:self arguments arg Return return:yes FunctionDef name:rank arg:self arguments arg Return return:yes FunctionDef name:world_size arg:self arguments arg Return return:yes FunctionDef name:bootstrap_store_info arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "parameter_devices",
    "source_code": "@property\ndef parameter_devices(self):\n    raise NotImplementedError('must be implemented in descendants')",
    "docstring": "Returns the tuple of all devices used to place variables.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:parameter_devices arg:self arguments arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "python_type",
    "source_code": "def python_type(self):\n    return self.__variable.python_type()",
    "docstring": "Returns what type(v) would have returned for the variable at compile time.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\comptime.py",
    "ast_data": "FunctionDef name:python_type arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_valid_values",
    "source_code": "def get_valid_values(self, attr):\n    name = 'set_%s' % attr\n    if not hasattr(self.o, name):\n        raise AttributeError(f'{self.o} has no function {name}')\n    func = getattr(self.o, name)\n    if hasattr(func, '_kwarg_doc'):\n        return func._kwarg_doc\n    docstring = inspect.getdoc(func)\n    if docstring is None:\n        return 'unknown'\n    if docstring.startswith('Alias for '):\n        return None\n    match = self._get_valid_values_regex.search(docstring)\n    if match is not None:\n        return re.sub('\\n *', ' ', match.group(1))\n    param_name = func.__code__.co_varnames[1]\n    match = re.search(f'(?m)^ *\\\\*?{param_name} : (.+)', docstring)\n    if match:\n        return match.group(1)\n    return 'unknown'",
    "docstring": "Get the legal arguments for the setter associated with *attr*. This is done by querying the docstring of the setter for a line that begins with \"ACCEPTS:\" or \".. ACCEPTS:\", and then by looking for a numpydoc-style documentation for the setter's first argument.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:get_valid_values arg:self arg:attr arguments arg arg Assign If Call Raise Call Assign Call If Call Return return:yes Assign Call If Compare Return return:yes If Call Return return:no Assign Call If Compare Return return:yes Call Call Assign Assign Call If Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get",
    "source_code": "def get(self, object_id):\n    if object_id is None:\n        return\n    return self._obj_ids_to_obj.get(object_id)",
    "docstring": "Given a shared object ID, returns a previously instantiated object. Args: object_id: shared object ID to use when attempting to find already-loaded object. Returns: The object, if we've seen this ID before. Else, .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\generic_utils.py",
    "ast_data": "FunctionDef name:get arg:self arg:object_id arguments arg arg If Compare Return return:no Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "shape",
    "source_code": "@property\ndef shape(self):\n    return self._type_spec._shape",
    "docstring": "Returns the symbolically inferred for this Keras output.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\keras_tensor.py",
    "ast_data": "FunctionDef name:shape arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "maybe_disable_graph_partition",
    "source_code": "def maybe_disable_graph_partition(cpp_wrapper: bool, aot_mode: bool) -> AbstractContextManager[None, None]:\n    if cpp_wrapper or aot_mode:\n        return config.patch(graph_partition=False)\n    else:\n        return contextlib.nullcontext()",
    "docstring": "graph partition does not support cpp_wrapper and aot_mode yet.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\compile_fx.py",
    "ast_data": "FunctionDef name:maybe_disable_graph_partition arg:cpp_wrapper arg:aot_mode arguments arg arg If BoolOp Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "__new__",
    "source_code": "def __new__(cls, *args, **kwargs):\n    orig_cls = cls.__mro__[2]\n    self = orig_cls.__new__(orig_cls, *args, **kwargs)\n    self.__init__(*args, **kwargs)\n    return self",
    "docstring": "Override `` to remove the FSDP class and directly construct the original class for cases like indexing into a container module.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fully_shard.py",
    "ast_data": "FunctionDef name:__new__ arg:cls arguments arg arg arg Assign Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "get_variables",
    "source_code": "def get_variables(self, variable_names=None):\n    if isinstance(variable_names, str):\n        variable_names = [variable_names]\n    elif variable_names is not None:\n        variable_names = list(variable_names)\n    self.mat_stream.seek(0)\n    self.initialize_read()\n    mdict = {}\n    while not self.end_of_stream():\n        hdr, next_position = self.read_var_header()\n        name = 'None' if hdr.name is None else hdr.name.decode('latin1')\n        if variable_names is not None and name not in variable_names:\n            self.mat_stream.seek(next_position)\n            continue\n        mdict[name] = self.read_var_array(hdr)\n        self.mat_stream.seek(next_position)\n        if variable_names is not None:\n            variable_names.remove(name)\n            if len(variable_names) == 0:\n                break\n    return mdict",
    "docstring": "get variables from stream as dictionary Parameters ---------- variable_names : None or str or sequence of str, optional variable name, or sequence of variable names to get from Mat file / file stream. If None, then get all variables in file.",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\matlab\\_mio4.py",
    "ast_data": "FunctionDef name:get_variables arg:self arg:variable_names arguments arg arg If Call Assign If Compare Assign Call Call Call Assign While Call Assign Call Assign Compare Call If BoolOp Compare Compare Call Assign Call Call If Compare Call If Compare Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "bgr_to_rgba",
    "source_code": "def bgr_to_rgba(image: Tensor, alpha_val: Union[float, Tensor]) -> Tensor:\n    if not isinstance(image, Tensor):\n        raise TypeError(f'Input type is not a Tensor. Got {type(image)}')\n    if len(image.shape) < 3 or image.shape[-3] != 3:\n        raise ValueError(f'Input size must have a shape of (*, 3, H, W).Got {image.shape}')\n    if not isinstance(alpha_val, (float, Tensor)):\n        raise TypeError(f'alpha_val type is not a float or Tensor. Got {type(alpha_val)}')\n    x_rgb: Tensor = bgr_to_rgb(image)\n    return rgb_to_rgba(x_rgb, alpha_val)",
    "docstring": "Convert an image from BGR to RGBA. Args: image: BGR Image to be converted to RGBA of shape :math:. alpha_val: A float number for the alpha value or a tensor of shape :math:. Returns: RGBA version of the image with shape :math:. .. note:: The current functionality is NOT supported by Torchscript. Example: >>> input = torch.rand(2, 3, 4, 5) >>> output = bgr_to_rgba(input, 1.) # 2x4x4x5",
    "type": "function",
    "file_path": "kornia\\kornia\\color\\rgb.py",
    "ast_data": "FunctionDef name:bgr_to_rgba arg:image arg:alpha_val arguments arg arg If Call Raise Call Call If BoolOp Compare Call Compare Raise Call If Call Raise Call Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "chisquare",
    "source_code": "@xp_capabilities(jax_jit=False, allow_dask_compute=True)\n@_axis_nan_policy_factory(Power_divergenceResult, paired=True, n_samples=_pd_nsamples, too_small=-1)\ndef chisquare(f_obs, f_exp=None, ddof=0, axis=0, *, sum_check=True):\n    return _power_divergence(f_obs, f_exp=f_exp, ddof=ddof, axis=axis, lambda_='pearson', sum_check=sum_check)",
    "docstring": "Perform Pearson's chi-squared test. Pearson's chi-squared test [1]_ is a goodness-of-fit test for a multinomial distribution with given probabilities; that is, it assesses the null hypothesis that the observed frequencies (counts) are obtained by independent sampling of *N* observations from a categorical distribution with given expected frequencies. Parameters ---------- f_obs : array_like Observed frequencies in each category. f_exp : array_like, optional Expected frequencies in each category. By default, the categories are assumed to be equally likely. ddof : int, optional \"Delta degrees of freedom\": adjustment to the degrees of freedom for the p-value. The p-value is computed using a chi-squared distribution with `ddoff_obsf_expf_obsaxisf_obsf_expddofstatistichypothesis_chisquaref_obsf_expf_obsddofddoff_obsf_expf_obsf_expf_obsf_exphypothesis_chisquare`.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_stats_py.py",
    "ast_data": "FunctionDef name:chisquare arg:f_obs arg:f_exp arg:ddof arg:axis arguments arg arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "check_expression_support",
    "source_code": "def check_expression_support(self, expression):\n    pass",
    "docstring": "Check that the backend supports the provided expression. This is used on specific backends to rule out known expressions that have problematic or nonexistent implementations. If the expression has a known problem, the backend should raise NotSupportedError.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:check_expression_support arg:self arg:expression arguments arg arg"
  },
  {
    "library": "tensorflow",
    "name": "batch_scatter_update",
    "source_code": "def batch_scatter_update(self, sparse_delta, use_locking=False, name=None):\n    if not isinstance(sparse_delta, indexed_slices.IndexedSlices):\n        raise TypeError(f'Argument `sparse_delta` must be a `tf.IndexedSlices`. Received arg: {sparse_delta}')\n    return self._lazy_read(state_ops.batch_scatter_update(self, sparse_delta.indices, sparse_delta.values, use_locking=use_locking, name=name))",
    "docstring": "Assigns to this variable batch-wise. Analogous to . This assumes that this variable and the sparse_delta IndexedSlices have a series of leading dimensions that are the same for all of them, and the updates are performed on the last dimension of indices. In other words, the dimensions should be the following: where And the operation performed can be expressed as: When sparse_delta.indices is a 1D tensor, this operation is equivalent to . To avoid this operation one can looping over the first of the variable and using on the subtensors that result of slicing the first dimension. This is a valid option for , but less efficient than this implementation. Args: sparse_delta: to be assigned to this variable. use_locking: If , use locking during the operation. name: the name of the operation. Returns: The updated variable. Raises: TypeError: if is not an .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:batch_scatter_update arg:self arg:sparse_delta arg:use_locking arg:name arguments arg arg arg arg If Call Raise Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "Normal",
    "source_code": "class Normal(AbstractPathEffect):\n    pass",
    "docstring": "The \"identity\" PathEffect. The Normal PathEffect's sole purpose is to draw the original artist with no special path effect.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\patheffects.py",
    "ast_data": "ClassDef name:Normal"
  },
  {
    "library": "pytorch",
    "name": "uuid",
    "source_code": "@abstractmethod\ndef uuid(self) -> Optional[Any]:\n    pass",
    "docstring": "Return an ID to uniquely identify your custom pass implementation. Return None to skip inductor code caching entirely.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\custom_graph_pass.py",
    "ast_data": "FunctionDef name:uuid arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "get_fastpath_enabled",
    "source_code": "def get_fastpath_enabled() -> bool:\n    if not torch.jit.is_scripting():\n        return _is_fastpath_enabled\n    return True",
    "docstring": "Returns whether fast path for TransformerEncoder and MultiHeadAttention is enabled, or `` unless all conditions on inputs are met.",
    "type": "function",
    "file_path": "pytorch\\torch\\backends\\mha\\__init__.py",
    "ast_data": "FunctionDef name:get_fastpath_enabled arguments If Call Return return:yes Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "ViewcodeAnchorTransform",
    "source_code": "class ViewcodeAnchorTransform(SphinxPostTransform):\n    default_priority = 100\n\n    def run(self, **kwargs: Any) -> None:\n        if is_supported_builder(self.app.builder):\n            self.convert_viewcode_anchors()\n        else:\n            self.remove_viewcode_anchors()\n\n    def convert_viewcode_anchors(self) -> None:\n        for node in self.document.findall(viewcode_anchor):\n            anchor = nodes.inline('', _('[source]'), classes=['viewcode-link'])\n            refnode = make_refnode(self.app.builder, node['refdoc'], node['reftarget'], node['refid'], anchor)\n            node.replace_self(refnode)\n\n    def remove_viewcode_anchors(self) -> None:\n        for node in list(self.document.findall(viewcode_anchor)):\n            node.parent.remove(node)",
    "docstring": "Convert or remove viewcode_anchor nodes depends on builder.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\ext\\viewcode.py",
    "ast_data": "ClassDef name:ViewcodeAnchorTransform Assign FunctionDef name:run arg:self arguments arg arg If Call Call Call FunctionDef name:convert_viewcode_anchors arg:self arguments arg For Call Assign Call Call Assign Call Call FunctionDef name:remove_viewcode_anchors arg:self arguments arg For Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "wait_for_broadcasts",
    "source_code": "def wait_for_broadcasts(self) -> None:\n    assert len(self.broadcast_handles) == self.num_bucket_assignments, f'Missing at least one broadcast handle on rank {dist.get_rank()}'\n    _ = [x.wait() for x in self.broadcast_handles]\n    self.broadcast_handles.clear()",
    "docstring": "Wait for all parameter broadcasts. This function should be called once all broadcasts have been scheduled, meaning `` in preparation for the next iteration.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\optim\\zero_redundancy_optimizer.py",
    "ast_data": "FunctionDef name:wait_for_broadcasts arg:self arguments arg Compare Call Call Assign Call Call"
  },
  {
    "library": "cryptography",
    "name": "rsa_crt_dmp1",
    "source_code": "def rsa_crt_dmp1(private_exponent: int, p: int) -> int:\n    return private_exponent % (p - 1)",
    "docstring": "Compute the CRT private_exponent % (p - 1) value from the RSA private_exponent (d) and p.",
    "type": "function",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\rsa.py",
    "ast_data": "FunctionDef name:rsa_crt_dmp1 arg:private_exponent arg:p arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "row_limits",
    "source_code": "def row_limits(self, name=None):\n    with ops.name_scope(name, 'RaggedRowLimits', [self]):\n        return self._row_partition.row_limits()",
    "docstring": "Returns the limit indices for rows in this ragged tensor. These indices specify where the values for each row end in . is equal to . Args: name: A name prefix for the returned tensor (optional). Returns: A 1-D integer Tensor with shape . The returned tensor is nonnegative, and is sorted in ascending order. #### Example: >>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []]) >>> print(rt.values) tf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32) >>> print(rt.row_limits()) # indices of row limits in rt.values tf.Tensor([4 4 7 8 8], shape=(5,), dtype=int64)",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:row_limits arg:self arg:name arguments arg arg With Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "weighted",
    "source_code": "@lazy_cython\ndef weighted(y):\n    return linkage(y, method='weighted', metric='euclidean')",
    "docstring": "Perform weighted/WPGMA linkage on the condensed distance matrix. See for more information on the return structure and algorithm. Parameters ---------- y : ndarray The upper triangular of the distance matrix. The result of `linkagescipy.cluster.hierarchy.linkagescipy.cluster.hierarchy.fclusterscipy.cluster.hierarchy.dendrogram` can be used to generate a plot of the dendrogram.",
    "type": "function",
    "file_path": "scipy\\scipy\\cluster\\hierarchy.py",
    "ast_data": "FunctionDef name:weighted arg:y arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_normalize",
    "source_code": "def _normalize(a: Tensor, norm_dims: DimsType, eps: float) -> tuple[Tensor, Tensor, Tensor]:\n    norm_dims = utils.canonicalize_dims(a.ndim, norm_dims)\n    computation_dtype = utils.get_computation_dtype(a.dtype)\n    a_acc = _maybe_convert_to_dtype(a, computation_dtype)\n    assert isinstance(a_acc, TensorLike)\n    biased_var, mean = torch.var_mean(a_acc, dim=norm_dims, unbiased=False, keepdim=True)\n    rstd = torch.rsqrt(biased_var + eps)\n    out = (a_acc - mean) * rstd\n    return (out, mean, rstd)",
    "docstring": "Computes mean and 1/std of a tensor along norm_dims. Used as a helper function for normalization layers. Args: a (Tensor): input tensor norm_dims (DimsType): dimensions to normalize over eps (float): epsilon for numerical stability Returns: out (Tensor): normalized tensor. mean (Tensor): mean of the tensor along norm_dims. rstd (Tensor): 1/std of the tensor along norm_dims.",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\__init__.py",
    "ast_data": "FunctionDef name:_normalize arg:a arg:norm_dims arg:eps arguments arg arg arg Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "adaptive_avg_pool3d",
    "source_code": "def adaptive_avg_pool3d(input: Tensor, output_size: BroadcastingList2[int]) -> Tensor:\n    if not input.is_quantized:\n        raise ValueError(\"Input to 'quantized.functional.adaptive_avg_pool3d' must be quantized!\")\n    return torch.nn.functional.adaptive_avg_pool3d(input, output_size)",
    "docstring": "Applies a 3D adaptive average pooling over a quantized input signal composed of several quantized input planes. .. note:: The input quantization parameters propagate to the output. See :class: for details and output shape. Args: output_size: the target output size (single integer or double-integer tuple)",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\functional.py",
    "ast_data": "FunctionDef name:adaptive_avg_pool3d arg:input arg:output_size arguments arg arg If Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "serialize_sparse",
    "source_code": "@tf_export(v1=['io.serialize_sparse', 'serialize_sparse'])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints('serialize_sparse')\ndef serialize_sparse(sp_input, name=None, out_type=dtypes.string):\n    return serialize_sparse_v2(sp_input, out_type, name)",
    "docstring": "Serialize a into a 3-vector (1-D ) object. Args: sp_input: The input . name: A name prefix for the returned tensors (optional). out_type: The to use for serialization. Returns: A 3-vector (1-D ), with each column representing the serialized 's indices, values, and shape (respectively). Raises: TypeError: If is not a .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_ops.py",
    "ast_data": "FunctionDef name:serialize_sparse arg:sp_input arg:name arg:out_type arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "kornia",
    "name": "t",
    "source_code": "@property\ndef t(self) -> Vector3 | Tensor:\n    return self._translation",
    "docstring": "Return the underlying translation vector of shape :math:.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\se3.py",
    "ast_data": "FunctionDef name:t arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "all_to_all_v3",
    "source_code": "def all_to_all_v3(communicator, t, group_assignment=None, timeout_seconds=None):\n    if group_assignment is None:\n        group_assignment = []\n    return gen_collective_ops.collective_all_to_all_v3(communicator=communicator, input=t, group_assignment=group_assignment, timeout_seconds=timeout_seconds)",
    "docstring": "Exchanges tensors mutually. Args: communicator: the resource returned from . t: a . The first dimension should have the length as the size of the group. is sent to within the group. group_assignment: Optional int32 with shape [num_groups, num_ranks_per_group]. represents the ranks in the subgroup. timeout_seconds: If set to a non zero, set a completion timeout to detect staleness. If the timer goes off, a DeadlineExceededError is raised. The timeout value in seconds. This feature is experimental. Returns: a . is sent from within the group.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\collective_ops.py",
    "ast_data": "FunctionDef name:all_to_all_v3 arg:communicator arg:t arg:group_assignment arg:timeout_seconds arguments arg arg arg arg If Compare Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "merge_from",
    "source_code": "def merge_from(self, dev):\n    self.job, self.replica, self.task, self.device_type, self.device_index = self._get_combined_properties(dev)",
    "docstring": "Merge the properties of \"dev\" into this . Note: Will be removed in TensorFlow 2.x since DeviceSpecs will become immutable. Args: dev: a .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\device_spec.py",
    "ast_data": "FunctionDef name:merge_from arg:self arg:dev arguments arg arg Assign Call"
  },
  {
    "library": "scikit-learn",
    "name": "available_if",
    "source_code": "def available_if(check):\n    return lambda fn: _AvailableIfDescriptor(fn, check, attribute_name=fn.__name__)",
    "docstring": "An attribute that is available only if check returns a truthy value. Parameters ---------- check : callable When passed the object with the decorated method, this should return a truthy value if the attribute is available, and either return False or raise an AttributeError if not available. Returns ------- callable Callable makes the decorated method available if returns a truthy value, otherwise the decorated method is unavailable. Examples -------- >>> from sklearn.utils.metaestimators import available_if >>> class HelloIfEven: ... def __init__(self, x): ... self.x = x ... ... def _x_is_even(self): ... return self.x % 2 == 0 ... ... @available_if(_x_is_even) ... def say_hello(self): ... print(\"Hello\") ... >>> obj = HelloIfEven(1) >>> hasattr(obj, \"say_hello\") False >>> obj.x = 2 >>> hasattr(obj, \"say_hello\") True >>> obj.say_hello() Hello",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_available_if.py",
    "ast_data": "FunctionDef name:available_if arg:check arguments arg Return return:yes arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "enable_graph_collection",
    "source_code": "def enable_graph_collection():\n    context().enable_graph_collection()",
    "docstring": "Enables graph collection of executed functions. To retrieve the accumulated graphs call context.export_run_metadata() and to stop collecting graphs call context.disable_graph_collection().",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:enable_graph_collection arguments Call Call"
  },
  {
    "library": "cherrypy",
    "name": "__lt__",
    "source_code": "def __lt__(self, other):\n    if self.qvalue == other.qvalue:\n        return str(self) < str(other)\n    else:\n        return self.qvalue < other.qvalue",
    "docstring": "Check if this header qvalue is less than the other. This method uses string comparison as the second factor.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\httputil.py",
    "ast_data": "FunctionDef name:__lt__ arg:self arg:other arguments arg arg If Compare Return return:yes Compare Call Call Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "make_initializable_iterator",
    "source_code": "@deprecation.deprecated(None, 'This is a deprecated API that should only be used in TF 1 graph mode and legacy TF 2 graph mode available through `tf.compat.v1`. In all other situations -- namely, eager mode and inside `tf.function` -- you can consume dataset elements using `for elem in dataset: ...` or by explicitly creating iterator via `iterator = iter(dataset)` and fetching its elements via `values = next(iterator)`. Furthermore, this API is not available in TF 2. During the transition from TF 1 to TF 2 you can use `tf.compat.v1.data.make_initializable_iterator(dataset)` to create a TF 1 graph mode style iterator for a dataset created through TF 2 APIs. Note that this should be a transient state of your code base as there are in general no guarantees about the interoperability of TF 1 and TF 2 code.')\ndef make_initializable_iterator(self, shared_name=None) -> iterator_ops.Iterator:\n    return self._make_initializable_iterator(shared_name)",
    "docstring": "Creates an iterator for elements of this dataset. Note: The returned iterator will be in an uninitialized state, and you must run the operation before using it: Args: shared_name: (Optional.) If non-empty, the returned iterator will be shared under the given name across multiple sessions that share the same devices (e.g. when using a remote server). Returns: A for elements of this dataset. Raises: RuntimeError: If eager execution is enabled.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:make_initializable_iterator arg:self arg:shared_name arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "seaborn",
    "name": "Offset",
    "source_code": "class Offset(IntervalProperty):\n    _default_range = (0, 5)\n    _legend = False",
    "docstring": "Offset for edge-aligned text, in point units.",
    "type": "class",
    "file_path": "seaborn\\seaborn\\_core\\properties.py",
    "ast_data": "ClassDef name:Offset Assign Assign"
  },
  {
    "library": "django",
    "name": "get_format_modules",
    "source_code": "def get_format_modules(lang=None):\n    if lang is None:\n        lang = get_language()\n    if lang not in _format_modules_cache:\n        _format_modules_cache[lang] = list(iter_format_modules(lang, settings.FORMAT_MODULE_PATH))\n    return _format_modules_cache[lang]",
    "docstring": "Return a list of the format modules found.",
    "type": "function",
    "file_path": "django\\django\\utils\\formats.py",
    "ast_data": "FunctionDef name:get_format_modules arg:lang arguments arg If Compare Assign Call If Compare Assign Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "coeffs",
    "source_code": "@property\ndef coeffs(self):\n    return self._coeffs",
    "docstring": "The polynomial coefficients",
    "type": "method",
    "file_path": "numpy\\numpy\\lib\\_polynomial_impl.py",
    "ast_data": "FunctionDef name:coeffs arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_write_graph_section",
    "source_code": "def _write_graph_section(self, graph_order):\n    self._write_report('%s %s\\n' % (_MARKER_SECTION_BEGIN, _SECTION_NAME_GRAPH))\n    self._write_report('%s %s\\n' % (_FIELD_NAME_TOPOLOGICAL_SORT_SUCCEED, not graph_order.contains_cycle))\n    l = list(graph_order.topological_order_or_cycle)\n    for i in range(0, len(l)):\n        self._write_report('%d \"%s\"\\n' % (i, l[i].name))\n    self._write_report('%s %s\\n' % (_MARKER_SECTION_END, _SECTION_NAME_GRAPH))",
    "docstring": "Writes the graph section of the report.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer_report.py",
    "ast_data": "FunctionDef name:_write_graph_section arg:self arg:graph_order arguments arg arg Call Call Assign Call For Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_scoped_subscribe",
    "source_code": "def _scoped_subscribe(tensor, side_effects, control_cache):\n    with ops.device(tensor.device):\n        with _preserve_control_flow_context(tensor):\n            return _subscribe(tensor, side_effects, control_cache)",
    "docstring": "Helper method that subscribes a single tensor to a list of side_effects. This is a thin wrapper around and ensures that the side effect ops are added within the same device and control flow context of the subscribed tensor. Args: tensor: The to be subscribed. side_effects: List of side_effect functions, see subscribe for details. control_cache: helper to get control_outputs faster. Returns: The modified replacement to the passed in tensor which triggers the side effects or the given tensor, if it was already been subscribed.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\subscribe.py",
    "ast_data": "FunctionDef name:_scoped_subscribe arg:tensor arg:side_effects arg:control_cache arguments arg arg arg With Call With Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "list",
    "source_code": "def list(self, ignore_patterns):\n    for path in utils.get_files(self.storage, ignore_patterns):\n        yield (path, self.storage)",
    "docstring": "List all files of the storage.",
    "type": "method",
    "file_path": "django\\django\\contrib\\staticfiles\\finders.py",
    "ast_data": "FunctionDef name:list arg:self arg:ignore_patterns arguments arg arg For Call"
  },
  {
    "library": "scrapy",
    "name": "_defer_sleep",
    "source_code": "def _defer_sleep() -> Deferred[None]:\n    from twisted.internet import reactor\n    d: Deferred[None] = Deferred()\n    reactor.callLater(_DEFER_DELAY, d.callback, None)\n    return d",
    "docstring": "Like `` but doesn't call any real callbacks.",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\defer.py",
    "ast_data": "FunctionDef name:_defer_sleep arguments Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "savepoint_commit",
    "source_code": "def savepoint_commit(sid, using=None):\n    get_connection(using).savepoint_commit(sid)",
    "docstring": "Commit the most recent savepoint (if one exists). Do nothing if savepoints are not supported.",
    "type": "function",
    "file_path": "django\\django\\db\\transaction.py",
    "ast_data": "FunctionDef name:savepoint_commit arg:sid arg:using arguments arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "AbstractCheckpointAdapter",
    "source_code": "class AbstractCheckpointAdapter(abc.ABC):\n\n    @classmethod\n    @abc.abstractmethod\n    def create_from_checkpoint(cls, path: str):\n        pass\n\n    @abc.abstractmethod\n    def is_applicable(self, trackable: base.Trackable) -> bool:\n        pass\n\n    @abc.abstractmethod\n    def get_reshard_callback(self, name: str) -> Optional[ReshardCallback]:\n        pass\n\n    def maybe_reshard(self, name: str) -> tuple[str, Optional[ReshardCallback]]:\n        callback = self.get_reshard_callback(name)\n        if callback is None:\n            return (name, None)\n        if callback.object_name():\n            return (callback.object_name(), callback)\n        return (name, callback)",
    "docstring": "Abstract API for checkpoint adapter. This is an experimental API that specifies how checkpoint restore should be adapted for specific trackable objects.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint_adapter.py",
    "ast_data": "ClassDef name:AbstractCheckpointAdapter FunctionDef name:create_from_checkpoint arg:cls arg:path arguments arg arg FunctionDef name:is_applicable arg:self arg:trackable arguments arg arg FunctionDef name:get_reshard_callback arg:self arg:name arguments arg arg FunctionDef name:maybe_reshard arg:self arg:name arguments arg arg Assign Call If Compare Return return:yes If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "container",
    "source_code": "@property\ndef container(self):\n    if self._function is not None:\n        return self._function.function\n    return self._enclosing_graph.graph_def",
    "docstring": "The node container (either a graph or a function).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "FunctionDef name:container arg:self arguments arg If Compare Return return:yes Return return:yes"
  },
  {
    "library": "scipy",
    "name": "SpecialFunctionWarning",
    "source_code": "class SpecialFunctionWarning(Warning):\n    pass",
    "docstring": "Warning that can be emitted by special functions.",
    "type": "class",
    "file_path": "scipy\\scipy\\special\\_sf_error.py",
    "ast_data": "ClassDef name:SpecialFunctionWarning"
  },
  {
    "library": "pandas",
    "name": "get_strcols",
    "source_code": "def get_strcols(self) -> list[list[str]]:\n    strcols = self._get_strcols_without_index()\n    if self.index:\n        str_index = self._get_formatted_index(self.tr_frame)\n        strcols.insert(0, str_index)\n    return strcols",
    "docstring": "Render a DataFrame to a list of columns (as lists of strings).",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\format.py",
    "ast_data": "FunctionDef name:get_strcols arg:self arguments arg Assign Call If Assign Call Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "open_in_browser",
    "source_code": "def open_in_browser(response: TextResponse, _openfunc: Callable[[str], Any]=webbrowser.open) -> Any:\n    from scrapy.http import HtmlResponse, TextResponse\n    body = response.body\n    if isinstance(response, HtmlResponse):\n        if b'<base' not in body:\n            _remove_html_comments(body)\n            repl = f'\\\\0<base href=\"{response.url}\">'\n            body = re.sub(b'<head(?:[^<>]*?>)', to_bytes(repl), body, count=1)\n        ext = '.html'\n    elif isinstance(response, TextResponse):\n        ext = '.txt'\n    else:\n        raise TypeError(f'Unsupported response type: {response.__class__.__name__}')\n    fd, fname = tempfile.mkstemp(ext)\n    os.write(fd, body)\n    os.close(fd)\n    return _openfunc(f'file://{fname}')",
    "docstring": "Open *response* in a local web browser, adjusting the _ for external links to work, e.g. so that images and styles are displayed. .. _base tag: For example: .. code-block:: python from scrapy.utils.response import open_in_browser def parse_details(self, response): if \"item name\" not in response.body: open_in_browser(response)",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\response.py",
    "ast_data": "FunctionDef name:open_in_browser arg:response arg:_openfunc arguments arg arg Assign If Call If Compare Call Assign Assign Call Call Assign If Call Assign Raise Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "Step2",
    "source_code": "class Step2(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-100.0] * self.N, [100.0] * self.N))\n        self.custom_bounds = ([-5, 5], [-5, 5])\n        self.global_optimum = [[0.5 for _ in range(self.N)]]\n        self.fglob = 0.5\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return sum((floor(x) + 0.5) ** 2.0)",
    "docstring": "Step objective function. This class defines the Step 2 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Step}}(x) = \\sum_{i=1}^{n} \\left ( \\lfloor x_i + 0.5 \\rfloor \\right )^2 Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_S.py",
    "ast_data": "ClassDef name:Step2 Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_validate_fwd_input",
    "source_code": "def _validate_fwd_input(self, args, kwargs):\n    if self.is_first:\n        expected_args = self.args_recv_info[0]\n    else:\n        return\n    if len(kwargs):\n        return\n    expected_tensors_meta = [e.meta if isinstance(e, _RootArgPlaceholder) else e.buffer for e in expected_args]\n    validate_tensors_metadata(f'Stage {self.stage_index} forward inputs', expected_tensors_meta, args)",
    "docstring": "Raises a RuntimeError if shapes of input args/kwargs do not match the shapes configured for this stage.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py",
    "ast_data": "FunctionDef name:_validate_fwd_input arg:self arg:args arg:kwargs arguments arg arg arg If Assign Return return:no If Call Return return:no Assign Call Call"
  },
  {
    "library": "kornia",
    "name": "from_name",
    "source_code": "@staticmethod\ndef from_name(name: str) -> Sam:\n    if name in ['vit_b', 'vit_l', 'vit_h', 'mobile_sam']:\n        return Sam.from_config(SamConfig(name))\n    else:\n        raise ValueError(f'Invalid SAM model name: {name}')",
    "docstring": "Build/load the SAM model based on it's name. Args: name: The name of the SAM model. Valid names are: - 'vit_b' - 'vit_l' - 'vit_h' - 'mobile_sam' Returns: The respective SAM model",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\models\\sam\\model.py",
    "ast_data": "FunctionDef name:from_name arg:name arguments arg If Compare Return return:yes Call Call Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "mutatedy",
    "source_code": "def mutatedy(self):\n    return self._points[0, 1] != self._points_orig[0, 1] or self._points[1, 1] != self._points_orig[1, 1]",
    "docstring": "Return whether the y-limits have changed since init.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:mutatedy arg:self arguments arg Return return:yes BoolOp Compare Compare"
  },
  {
    "library": "pandas",
    "name": "is_view",
    "source_code": "@property\ndef is_view(self) -> bool:\n    return self.values._ndarray.base is not None",
    "docstring": "return a boolean if I am possibly a view",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\blocks.py",
    "ast_data": "FunctionDef name:is_view arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "scipy",
    "name": "_init_nd_shape_and_axes",
    "source_code": "def _init_nd_shape_and_axes(x, shape, axes):\n    noshape = shape is None\n    noaxes = axes is None\n    if not noaxes:\n        axes = _iterable_of_int(axes, 'axes')\n        axes = [a + x.ndim if a < 0 else a for a in axes]\n        if any((a >= x.ndim or a < 0 for a in axes)):\n            raise ValueError('axes exceeds dimensionality of input')\n        if len(set(axes)) != len(axes):\n            raise ValueError('all axes must be unique')\n    if not noshape:\n        shape = _iterable_of_int(shape, 'shape')\n        if axes and len(axes) != len(shape):\n            raise ValueError('when given, axes and shape arguments have to be of the same length')\n        if noaxes:\n            if len(shape) > x.ndim:\n                raise ValueError('shape requires more axes than are present')\n            axes = range(x.ndim - len(shape), x.ndim)\n        shape = [x.shape[a] if s == -1 else s for s, a in zip(shape, axes)]\n    elif noaxes:\n        shape = list(x.shape)\n        axes = range(x.ndim)\n    else:\n        shape = [x.shape[a] for a in axes]\n    if any((s < 1 for s in shape)):\n        raise ValueError(f'invalid number of data points ({shape}) specified')\n    return (tuple(shape), list(axes))",
    "docstring": "Handle shape and axes arguments for N-D transforms. Returns the shape and axes in a standard form, taking into account negative values and checking for various potential errors. Parameters ---------- x : ndarray The input array. shape : int or array_like of ints or None The shape of the result. If both and (see below) are None, is `shapeaxesshapeshapex` is used. axes : int or array_like of ints or None Axes along which the calculation is computed. The default is over all axes. Negative indices are automatically converted to their positive counterparts. Returns ------- shape : tuple The shape of the result as a tuple of integers. axes : list Axes along which the calculation is computed, as a list of integers.",
    "type": "function",
    "file_path": "scipy\\scipy\\fft\\_pocketfft\\helper.py",
    "ast_data": "FunctionDef name:_init_nd_shape_and_axes arg:x arg:shape arg:axes arguments arg arg arg Assign Compare Assign Compare If Assign Call Assign Compare If Call BoolOp Compare Compare Raise Call If Compare Call Call Call Raise Call If Assign Call If BoolOp Compare Call Call Raise Call If If Compare Call Raise Call Assign Call Call Assign Compare Call If Assign Call Assign Call Assign If Call Compare Raise Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "flatten",
    "source_code": "@doc_controls.do_not_doc_inheritable\ndef flatten(self):\n    return super().flatten()",
    "docstring": "See tf.types.experimental.TraceType base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\dtypes.py",
    "ast_data": "FunctionDef name:flatten arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_MultilinearModel",
    "source_code": "class _MultilinearModel(Model):\n\n    def __init__(self):\n        super().__init__(_lin_fcn, fjacb=_lin_fjb, fjacd=_lin_fjd, estimate=_lin_est, meta={'name': 'Arbitrary-dimensional Linear', 'equ': 'y = B_0 + Sum[i=1..m, B_i * x_i]', 'TeXequ': '$y=\\\\beta_0 + \\\\sum_{i=1}^m \\\\beta_i x_i$'})",
    "docstring": "Arbitrary-dimensional linear model This model is defined by :math: Examples -------- We can calculate orthogonal distance regression with an arbitrary dimensional linear model: >>> from scipy import odr >>> import numpy as np >>> x = np.linspace(0.0, 5.0) >>> y = 10.0 + 5.0 * x >>> data = odr.Data(x, y) >>> odr_obj = odr.ODR(data, odr.multilinear) >>> output = odr_obj.run() >>> print(output.beta) [10. 5.]",
    "type": "class",
    "file_path": "scipy\\scipy\\odr\\_models.py",
    "ast_data": "ClassDef name:_MultilinearModel FunctionDef name:__init__ arg:self arguments arg Call Call"
  },
  {
    "library": "scipy",
    "name": "_nearest_real_complex_idx",
    "source_code": "def _nearest_real_complex_idx(fro, to, which):\n    assert which in ('real', 'complex', 'any')\n    order = np.argsort(np.abs(fro - to))\n    if which == 'any':\n        return order[0]\n    else:\n        mask = np.isreal(fro[order])\n        if which == 'complex':\n            mask = ~mask\n        return order[np.nonzero(mask)[0][0]]",
    "docstring": "Get the next closest real or complex element based on distance",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_filter_design.py",
    "ast_data": "FunctionDef name:_nearest_real_complex_idx arg:fro arg:to arg:which arguments arg arg arg Compare Assign Call Call If Compare Return return:yes Assign Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_combine_masks",
    "source_code": "def _combine_masks(method, t, mask):\n    new_mask = mask\n    new_mask = new_mask.to(dtype=t.dtype)\n    if method.PRUNING_TYPE == 'unstructured':\n        slc = mask == 1\n    elif method.PRUNING_TYPE == 'structured':\n        if not hasattr(method, 'dim'):\n            raise AttributeError('Pruning methods of PRUNING_TYPE \"structured\" need to have the attribute `dim` defined.')\n        n_dims = t.dim()\n        dim = method.dim\n        if dim < 0:\n            dim = n_dims + dim\n        if dim < 0:\n            raise IndexError(f'Index is out of bounds for tensor with dimensions {n_dims}')\n        keep_channel = mask.sum(dim=[d for d in range(n_dims) if d != dim]) != 0\n        slc = [slice(None)] * n_dims\n        slc[dim] = keep_channel\n    elif method.PRUNING_TYPE == 'global':\n        n_dims = len(t.shape)\n        slc = [slice(None)] * n_dims\n    else:\n        raise ValueError(f'Unrecognized PRUNING_TYPE {method.PRUNING_TYPE}')\n    partial_mask = method.compute_mask(t[slc], default_mask=mask[slc])\n    new_mask[slc] = partial_mask.to(dtype=new_mask.dtype)\n    return new_mask",
    "docstring": "Combine the masks from all pruning methods and returns a new mask. Args: method (a BasePruningMethod subclass): pruning method currently being applied. t (torch.Tensor): tensor representing the parameter to prune (of same dimensions as mask). mask (torch.Tensor): mask from previous pruning iteration Returns: new_mask (torch.Tensor): new mask that combines the effects of the old mask and the new mask from the current pruning method (of same dimensions as mask and t).",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\utils\\prune.py",
    "ast_data": "FunctionDef name:_combine_masks arg:method arg:t arg:mask arguments arg arg arg Assign Assign Call If Compare Assign Compare If Compare If Call Raise Call Assign Call Assign If Compare Assign If Compare Raise Call Assign Compare Call Call Compare Assign Call Assign If Compare Assign Call Assign Call Raise Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "adafactor",
    "source_code": "@_disable_dynamo_if_unsupported(single_tensor_fn=_single_tensor_adafactor)\ndef adafactor(params: list[Tensor], grads: list[Tensor], row_vars: list[Optional[Tensor]], col_vars: list[Optional[Tensor]], variances: list[Optional[Tensor]], state_steps: list[Tensor], foreach: Optional[bool]=None, grad_scale: Optional[Tensor]=None, found_inf: Optional[Tensor]=None, has_complex: bool=False, *, d: float, lr: Union[float, Tensor], beta2_decay: float, weight_decay: float, eps1: float, eps2: float, maximize: bool):\n    if not torch.compiler.is_compiling() and (not all((isinstance(t, torch.Tensor) for t in state_steps))):\n        raise RuntimeError('`state_steps` argument must contain a list of singleton tensors')\n    if foreach:\n        func = _multi_tensor_adafactor\n    else:\n        func = _single_tensor_adafactor\n    func(params, grads, row_vars, col_vars, variances, state_steps, d=d, lr=lr, beta2_decay=beta2_decay, weight_decay=weight_decay, eps1=eps1, eps2=eps2, maximize=maximize, grad_scale=grad_scale, found_inf=found_inf, has_complex=has_complex)",
    "docstring": "Functional API that performs Adafactor algorithm computation. See :class: for details.",
    "type": "function",
    "file_path": "pytorch\\torch\\optim\\_adafactor.py",
    "ast_data": "FunctionDef name:adafactor arg:params arg:grads arg:row_vars arg:col_vars arg:variances arg:state_steps arg:foreach arg:grad_scale arg:found_inf arg:has_complex arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg If BoolOp Call Call Call Raise Call If Assign Assign Call Call"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "def forward(self, query: Tensor, reference_points: Tensor, value: Tensor, value_spatial_shapes: list[tuple[int, int]]) -> Tensor:\n    N, Lenq, _ = query.shape\n    _, Len_v, _ = value.shape\n    sampling_offsets = self.sampling_offsets(query).reshape(N, Lenq, self.num_heads, self.num_levels, self.num_points, 2)\n    attention_weights = self.attention_weights(query).reshape(N, Lenq, self.num_heads, self.num_levels * self.num_points)\n    attention_weights = attention_weights.softmax(-1).reshape(N, Lenq, self.num_heads, self.num_levels, self.num_points)\n    reference_points_cxcy = reference_points[:, :, None, :, None, :2]\n    reference_points_wh = reference_points[:, :, None, :, None, 2:]\n    sampling_locations = reference_points_cxcy + sampling_offsets / self.num_points * reference_points_wh * 0.5\n    value_buf = self.value_proj(value).reshape(N, Len_v, self.num_heads, self.head_dim)\n    out = _deformable_attention_kernel(value_buf, value_spatial_shapes, sampling_locations, attention_weights)\n    out = self.output_proj(out)\n    return out",
    "docstring": "Run forward. Args: query: shape (N, Lq, C) reference_points: shape (N, Lq, n_levels, 4) value: shape (N, Lv, C) value_spatial_shapes: [(H0, W0), (H1, W1), ...] Returns: output, shape (N, Lq, C)",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\models\\rt_detr\\architecture\\rtdetr_head.py",
    "ast_data": "FunctionDef name:forward arg:self arg:query arg:reference_points arg:value arg:value_spatial_shapes arguments arg arg arg arg arg Assign Assign Assign Call Call Assign Call Call Assign Call Call Assign Assign Assign Assign Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "deserialize",
    "source_code": "def deserialize(self, constants: CompiledFxGraphConstants) -> _WireProtocolOutput:\n    from torch.fx._graph_pickler import GraphPickler\n    fake_mode = _current_fake_mode()\n    result = GraphPickler.loads(self.value, fake_mode)\n    assert isinstance(result, _WireProtocolOutput)\n    if isinstance(result.graph, CompiledFxGraph):\n        result.graph.after_deserialization(constants)\n    return result",
    "docstring": "Turn this streamable object back into a _WireProtocolOutput.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\compile_fx_ext.py",
    "ast_data": "FunctionDef name:deserialize arg:self arg:constants arguments arg arg Assign Call Assign Call Call If Call Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "update",
    "source_code": "def update(self, config):\n    _if_filename_register_autoreload(config)\n    super(Config, self).update(config)",
    "docstring": "Update self from a dict, file or filename.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpconfig.py",
    "ast_data": "FunctionDef name:update arg:self arg:config arguments arg arg Call Call Call"
  },
  {
    "library": "numpy",
    "name": "variables",
    "source_code": "def variables(self):\n    return list(self._raw_data.keys())",
    "docstring": "Return the list of variable names. Parameters ---------- None Returns ------- names : list of str The names of all variables in the instance.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\npy_pkg_config.py",
    "ast_data": "FunctionDef name:variables arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_restrict_to_keys",
    "source_code": "def _restrict_to_keys(dictionary, goodkeys):\n    existingkeys = set(dictionary)\n    for key in existingkeys - set(goodkeys):\n        dictionary.pop(key, None)",
    "docstring": "Remove keys from dictionary if not in goodkeys - inplace",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_shgo.py",
    "ast_data": "FunctionDef name:_restrict_to_keys arg:dictionary arg:goodkeys arguments arg arg Assign Call For Call Call"
  },
  {
    "library": "pytorch",
    "name": "placeholder",
    "source_code": "@compatibility(is_backward_compatible=True)\ndef placeholder(self, name: str, type_expr: Optional[Any]=None, default_value: Any=inspect.Signature.empty) -> Node:\n    args = () if default_value is inspect.Signature.empty else (default_value,)\n    return self.create_node('placeholder', name, args=args, type_expr=type_expr)",
    "docstring": "Insert a `Noneinspect.Signature.empty`.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\graph.py",
    "ast_data": "FunctionDef name:placeholder arg:self arg:name arg:type_expr arg:default_value arguments arg arg arg arg Assign Compare Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "set_preserved_attributes",
    "source_code": "def set_preserved_attributes(self, attributes: list[str]) -> FuseCustomConfig:\n    self.preserved_attributes = attributes\n    return self",
    "docstring": "Set the names of the attributes that will persist in the graph module even if they are not used in the model's `` method.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\custom_config.py",
    "ast_data": "FunctionDef name:set_preserved_attributes arg:self arg:attributes arguments arg arg Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "network_endpoints",
    "source_code": "def network_endpoints(self):\n    if not self._use_api:\n        return list(_environment_var_to_network_endpoints(self._tpu))\n    response = self._fetch_cloud_tpu_metadata()\n    if response.get('state') != 'READY':\n        raise RuntimeError('TPU \"%s\" is not yet ready; state: \"%s\"' % (self._tpu, response.get('state')))\n    if 'networkEndpoints' in response:\n        return response['networkEndpoints']\n    else:\n        return [{'ipAddress': response['ipAddress'], 'port': response['port']}]",
    "docstring": "Return a list of tpu endpoints.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\client\\client.py",
    "ast_data": "FunctionDef name:network_endpoints arg:self arguments arg If Return return:yes Call Call Assign Call If Compare Call Raise Call Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "determine_observer_insert_points",
    "source_code": "def determine_observer_insert_points(self, model: nn.Module) -> dict:\n    return {}",
    "docstring": "There is no observers inserted for the PerChannelDetector. Returns an empty dictionary since no observers are added or needed",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\detector.py",
    "ast_data": "FunctionDef name:determine_observer_insert_points arg:self arg:model arguments arg arg Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "_init_from_args",
    "source_code": "def _init_from_args(self, maximum_iterations, parallel_iterations, back_prop, swap_memory, name):\n    if not isinstance(parallel_iterations, int) or parallel_iterations <= 0:\n        raise ValueError(\"'parallel_iterations' must be a positive integer: %s\" % parallel_iterations)\n    self._name = ops.get_default_graph().unique_name(name)\n    self._maximum_iterations = maximum_iterations\n    self._parallel_iterations = parallel_iterations\n    self._back_prop = back_prop\n    self._swap_memory = swap_memory\n    self._pivot_for_pred = None\n    self._pivot_for_body = None\n    self._pivot = None\n    self._loop_exits = []\n    self._loop_enters = []\n    self._graph = ops.get_default_graph()",
    "docstring": "Creates a new from arguments. Args: maximum_iterations: Optional upper bound on number of loop iterations. parallel_iterations: The number of iterations allowed to run in parallel. back_prop: Whether backprop is enabled for this while loop. swap_memory: Whether GPU-CPU memory swap is enabled for this loop. name: Optional name prefix for the returned tensors. Raises: ValueError: If has invalid value.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:_init_from_args arg:self arg:maximum_iterations arg:parallel_iterations arg:back_prop arg:swap_memory arg:name arguments arg arg arg arg arg arg If BoolOp Call Compare Raise Call Assign Call Call Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Call"
  },
  {
    "library": "scipy",
    "name": "__repr__",
    "source_code": "def __repr__(self):\n    return f'{self.__class__.__name__}(\\n{repr(self.zeros)},\\n{repr(self.poles)},\\n{repr(self.gain)},\\ndt: {repr(self.dt)}\\n)'",
    "docstring": "Return representation of the system.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "__call__",
    "source_code": "def __call__(self, value, clip=None):\n    result, is_scalar = self.process_value(value)\n    self.autoscale_None(result)\n    if not self.vmin <= self.vcenter <= self.vmax:\n        raise ValueError('vmin, vcenter, vmax must increase monotonically')\n    result = np.ma.masked_array(np.interp(result, [self.vmin, self.vcenter, self.vmax], [0, 0.5, 1], left=-np.inf, right=np.inf), mask=np.ma.getmask(result))\n    if is_scalar:\n        result = np.atleast_1d(result)[0]\n    return result",
    "docstring": "Map value to the interval [0, 1]. The *clip* argument is unused.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:value arg:clip arguments arg arg arg Assign Call Call If Compare Raise Call Assign Call Call Call If Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_single_zpksos",
    "source_code": "def _single_zpksos(z, p, k):\n    sos = np.zeros(6)\n    b, a = zpk2tf(z, p, k)\n    sos[3 - len(b):3] = b\n    sos[6 - len(a):6] = a\n    return sos",
    "docstring": "Create one second-order section from up to two zeros and poles",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_filter_design.py",
    "ast_data": "FunctionDef name:_single_zpksos arg:z arg:p arg:k arguments arg arg arg Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "LocalResourceRestoreContext",
    "source_code": "class LocalResourceRestoreContext(object):\n\n    def __init__(self, instance):\n        self.instance = instance",
    "docstring": "Class holding information of a distributed instance, e.g. StaticHashTable. Pairing use with context manager allows operations under this context manager to conveniently gets information of a component of the (and other restored distributed if we're supporting their distribution in the future), instead of looking it up from the mapping of the worker-to-resource handle. This is especially useful when we know which instance the operations should execute with and the mapping is not available yet.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\ps_values.py",
    "ast_data": "ClassDef name:LocalResourceRestoreContext FunctionDef name:__init__ arg:self arg:instance arguments arg arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "aggregate_and_return_name_for_output",
    "source_code": "def aggregate_and_return_name_for_output(self, fused_op_name, output_index, out_graphdef):\n    flattened = self.flatten_nodes()\n    if self.aggregation == OpHint.AGGREGATE_FIRST or self.aggregation == OpHint.AGGREGATE_LAST:\n        assert len(flattened) == 1\n    if len(flattened) == 1 and self.aggregation != OpHint.AGGREGATE_STACK:\n        temp_op = _LiteSingleOperand(flattened[0])\n        return temp_op.aggregate_and_return_name_for_output(fused_op_name, output_index, out_graphdef)\n    else:\n        stack_node = _node_def_pb2.NodeDef()\n        stack_node.op = 'Unpack'\n        stack_node.name = 'OpHintUnstack-%s' % flattened[0].name\n        stack_node.attr['num'].i = len(flattened)\n        output_type = flattened[0].attr['T'].type\n        stack_node.attr['T'].type = output_type\n        stack_node.input.append(_tensorflow_output_name(fused_op_name, output_index))\n        out_graphdef.node.extend([stack_node])\n        for idx, discrete in enumerate(flattened):\n            output_node = _copy.deepcopy(discrete)\n            del output_node.input[:]\n            output_node.input.append(_tensorflow_output_name(stack_node.name, idx))\n            out_graphdef.node.extend([output_node])\n        return output_type",
    "docstring": "This adds to all the unaggregated outputs. I.e. we are outputting from a fused stub, but we need to make it compatible with the unfused original graph so we insert an unpack. Ideally in a later stage the unpack -> pack sequences will be removed. Args: fused_op_name: The name of the stub we are in the process of fusing. output_index: The output output_index this object represents. out_graphdef: The graphdef we are in the process of buildings Returns: The type of the aggregated output (so we can finish building the stub op).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\op_hint.py",
    "ast_data": "FunctionDef name:aggregate_and_return_name_for_output arg:self arg:fused_op_name arg:output_index arg:out_graphdef arguments arg arg arg arg Assign Call If BoolOp Compare Compare Compare Call If BoolOp Compare Call Compare Assign Call Return return:yes Call Assign Call Assign Assign Assign Call Assign Assign Call Call Call For Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_add_device_to_stack",
    "source_code": "def _add_device_to_stack(self, device_name_or_function, offset=0) -> _UserDeviceSpec:\n    total_offset = 1 + offset\n    spec = _UserDeviceSpec(device_name_or_function)\n    self._device_function_stack.push_obj(spec, offset=total_offset)\n    return spec",
    "docstring": "Add device to stack manually, separate from a context manager.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_add_device_to_stack arg:self arg:device_name_or_function arg:offset arguments arg arg arg Assign Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "CompiledBackward",
    "source_code": "@dataclass\nclass CompiledBackward(GenericCompiledBackward[CompiledFxGraph], FxGraphCacheLoadable):\n\n    def _is_backward(self) -> bool:\n        return True",
    "docstring": "Cacheable entry for a forward function",
    "type": "class",
    "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\autograd_cache.py",
    "ast_data": "ClassDef name:CompiledBackward FunctionDef name:_is_backward arg:self arguments arg Return return:yes"
  },
  {
    "library": "authlib",
    "name": "get_acr",
    "source_code": "def get_acr(self) -> str:\n    return None",
    "docstring": "Get the \"acr\" (Authentication Method Class) value of the authorization code object.",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\core\\models.py",
    "ast_data": "FunctionDef name:get_acr arg:self arguments arg Return return:no"
  },
  {
    "library": "matplotlib",
    "name": "start_filter",
    "source_code": "def start_filter(self):\n    self._filter_renderers.append(self._renderer)\n    self._renderer = _RendererAgg(int(self.width), int(self.height), self.dpi)\n    self._update_methods()",
    "docstring": "Start filtering. It simply creates a new canvas (the old one is saved).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_agg.py",
    "ast_data": "FunctionDef name:start_filter arg:self arguments arg Call Assign Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "size",
    "source_code": "def size(self, name=None):\n    return self._implementation.size(name=name)",
    "docstring": "Return the size of the TensorArray.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:size arg:self arg:name arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "bench",
    "source_code": "def bench(self, launcher, *args, with_profiler=False, **kwargs):\n    if not self.custom_kernel and launcher.n_spills > self.inductor_meta.get('spill_threshold', 16):\n        log.debug('Skip config %s because of register spilling: %d', launcher.config, launcher.n_spills)\n        return float('inf')\n    device_interface = self.get_device_interface()\n    stream = device_interface.get_raw_stream(device_interface.current_device())\n    cpu_copies = self.copy_args_to_cpu_if_needed(*args, **kwargs)\n\n    def kernel_call():\n        cloned_args, cloned_kwargs = self.maybe_clone_args(cpu_copies, *args, **kwargs)\n        self.reset_to_zero_args(*args, **kwargs)\n        args_with_constexprs = self._get_args_with_constexprs(cloned_args, launcher)\n        launcher(*args_with_constexprs, **cloned_kwargs, stream=stream)\n        self.restore_args_from_cpu(cpu_copies)\n    if with_profiler:\n        from torch._inductor.utils import do_bench_using_profiling\n        return do_bench_using_profiling(kernel_call, warmup=10, rep=40)\n    if self.device_props.type == 'cpu':\n        return benchmarker.benchmark_cpu(kernel_call)\n    return benchmarker.benchmark_gpu(kernel_call, rep=40)",
    "docstring": "Measure the performance of a given launcher",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\triton_heuristics.py",
    "ast_data": "FunctionDef name:bench arg:self arg:launcher arguments arg arg arg arg arg If BoolOp Compare Call Call Return return:yes Call Assign Call Assign Call Call Assign Call FunctionDef name:kernel_call arguments Assign Call Call Assign Call Call Call If Return return:yes Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "rvs",
    "source_code": "def rvs(self, size=1):\n    size1d = tuple(np.atleast_1d(size))\n    N = np.prod(size1d)\n    x = np.zeros(N)\n    simulated, i = (0, 1)\n    while simulated < N:\n        k = N - simulated\n        u1 = self._umax * self._rng.uniform(size=k)\n        v1 = self._rng.uniform(self._vmin, self._vmax, size=k)\n        rvs = v1 / u1 + self._c\n        accept = u1 ** 2 <= self._pdf(rvs)\n        num_accept = np.sum(accept)\n        if num_accept > 0:\n            x[simulated:simulated + num_accept] = rvs[accept]\n            simulated += num_accept\n        if simulated == 0 and i * N >= 50000:\n            msg = f'Not a single random variate could be generated in {i * N} attempts. The ratio of uniforms method does not appear to work for the provided parameters. Please check the pdf and the bounds.'\n            raise RuntimeError(msg)\n        i += 1\n    return np.reshape(x, size1d)",
    "docstring": "Sampling of random variates Parameters ---------- size : int or tuple of ints, optional Number of random variates to be generated (default is 1). Returns ------- rvs : ndarray The random variates distributed according to the probability distribution defined by the pdf.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_sampling.py",
    "ast_data": "FunctionDef name:rvs arg:self arg:size arguments arg arg Assign Call Call Assign Call Assign Call Assign While Compare Assign Assign Call Assign Call Assign Assign Compare Call Assign Call If Compare Assign If BoolOp Compare Compare Assign Raise Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "unfold",
    "source_code": "def unfold(input: Tensor, kernel_size: BroadcastingList2[int], dilation: BroadcastingList2[int]=1, padding: BroadcastingList2[int]=0, stride: BroadcastingList2[int]=1) -> Tensor:\n    if has_torch_function_unary(input):\n        return handle_torch_function(unfold, (input,), input, kernel_size, dilation=dilation, padding=padding, stride=stride)\n    return torch._C._nn.im2col(input, _pair(kernel_size), _pair(dilation), _pair(padding), _pair(stride))",
    "docstring": "Extract sliding local blocks from a batched input tensor. .. warning:: Currently, only 4-D input tensors (batched image-like tensors) are supported. .. warning:: More than one element of the unfolded tensor may refer to a single memory location. As a result, in-place operations (especially ones that are vectorized) may result in incorrect behavior. If you need to write to the tensor, please clone it first. See :class: for details",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:unfold arg:input arg:kernel_size arg:dilation arg:padding arg:stride arguments arg arg arg arg arg If Call Return return:yes Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_file",
    "source_code": "def set_file(self, file):\n    self._file = os.fspath(file) if file is not None else None",
    "docstring": "Set the filename of the fontfile to use. In this case, all other properties will be ignored.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\font_manager.py",
    "ast_data": "FunctionDef name:set_file arg:self arg:file arguments arg arg Assign Compare Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_metadata_routing",
    "source_code": "def get_metadata_routing(self):\n    router = MetadataRouter(owner=self.__class__.__name__).add(regressor=self._get_regressor(), method_mapping=MethodMapping().add(caller='fit', callee='fit').add(caller='predict', callee='predict'))\n    return router",
    "docstring": "Get metadata routing of this object. Please check :ref: on how the routing mechanism works. .. versionadded:: 1.6 Returns ------- routing : MetadataRouter A :class: encapsulating routing information.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\compose\\_target.py",
    "ast_data": "FunctionDef name:get_metadata_routing arg:self arguments arg Assign Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_convert_fx_arg_to_onnx_arg",
    "source_code": "def _convert_fx_arg_to_onnx_arg(arg, node_name_to_values: dict[str, ir.Value | Sequence[ir.Value]], node_name_to_local_functions: dict[str, ir.Function]) -> Any:\n    if arg is None:\n        return None\n    if hasattr(arg, 'name'):\n        if isinstance(arg, torch.fx.Node) and arg.target == operator.getitem:\n            source = arg.all_input_nodes[0]\n            source_outputs = node_name_to_values[source.name]\n            if isinstance(source_outputs, Sequence):\n                return _handle_getitem_node(arg, node_name_to_values)\n            else:\n                pass\n        if isinstance(arg, torch.fx.Node) and arg.op == 'get_attr':\n            return node_name_to_local_functions[arg.name]\n        return node_name_to_values[arg.name]\n    if isinstance(arg, (list, tuple)):\n        return [_convert_fx_arg_to_onnx_arg(elem, node_name_to_values, node_name_to_local_functions) for elem in arg]\n    if isinstance(arg, (torch.device, torch.memory_format, torch.layout)):\n        return str(arg)\n    if isinstance(arg, torch.dtype):\n        return torch_dtype_to_onnx_dtype(arg)\n    return arg",
    "docstring": "Convert an FX argument to an ONNX compatible argument. This function - Converts a torch dtype to an integer - Converts a torch device/memory_format/layout to a string - Converts a torch.fx.Node to an ir.Value - Converts a sequence of torch.fx.Node to a sequence of ir.Value - Converts a get_attr node to an ir.Function",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_core.py",
    "ast_data": "FunctionDef name:_convert_fx_arg_to_onnx_arg arg:arg arg:node_name_to_values arg:node_name_to_local_functions arguments arg arg arg If Compare Return return:no If Call If BoolOp Call Compare Assign Assign If Call Return return:yes Call If BoolOp Call Compare Return return:yes Return return:yes If Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "invert",
    "source_code": "def invert(self) -> Self:\n    if self.filter is not None:\n        self.filter = (self.filter[0], self.generate_filter_op(invert=True), self.filter[2])\n    return self",
    "docstring": "invert the filter",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\computation\\pytables.py",
    "ast_data": "FunctionDef name:invert arg:self arguments arg If Compare Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "pre_func_eval",
    "source_code": "def pre_func_eval(work):\n    n = work.terms\n    h = work.h[:, xp.newaxis]\n    c = work.fac\n    d = c ** 0.5\n    if work.nit == 0:\n        hc = h / c ** xp.arange(n, dtype=work.dtype)\n        hc = xp.concat((-xp.flip(hc, axis=-1), hc), axis=-1)\n    else:\n        hc = xp.concat((-h, h), axis=-1) / c ** (n - 1)\n    if work.nit == 0:\n        hr = h / d ** xp.arange(2 * n, dtype=work.dtype)\n    else:\n        hr = xp.concat((h, h / d), axis=-1) / c ** (n - 1)\n    n_new = 2 * n if work.nit == 0 else 2\n    x_eval = xp.zeros((work.hdir.shape[0], n_new), dtype=work.dtype)\n    il, ic, ir = (work.il, work.ic, work.ir)\n    x_eval = xpx.at(x_eval)[ir].set(work.x[ir][:, xp.newaxis] + hr[ir])\n    x_eval = xpx.at(x_eval)[ic].set(work.x[ic][:, xp.newaxis] + hc[ic])\n    x_eval = xpx.at(x_eval)[il].set(work.x[il][:, xp.newaxis] - hr[il])\n    return x_eval",
    "docstring": "Determine the abscissae at which the function needs to be evaluated. See for a description of the stencil (pattern of the abscissae). In the first iteration, there is only one stored function value in , , so we need to evaluate at new points. In subsequent iterations, we evaluate at two new points. Note that is always flattened into a 1D array after broadcasting with all , so we add a new axis at the end and evaluate all point in one call to the function. For improvement: - Consider measuring the step size actually taken, since `hx` is too big to resolve the step. - We could probably save some work if there are no central difference steps or no one-sided steps.",
    "type": "function",
    "file_path": "scipy\\scipy\\differentiate\\_differentiate.py",
    "ast_data": "FunctionDef name:pre_func_eval arg:work arguments arg Assign Assign Assign Assign If Compare Assign Call Assign Call Call Assign Call If Compare Assign Call Assign Call Assign Compare Assign Call Assign Assign Call Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "AsinhNorm",
    "source_code": "@make_norm_from_scale(scale.AsinhScale, init=lambda linear_width=1, vmin=None, vmax=None, clip=False: None)\nclass AsinhNorm(Normalize):\n\n    @property\n    def linear_width(self):\n        return self._scale.linear_width\n\n    @linear_width.setter\n    def linear_width(self, value):\n        self._scale.linear_width = value",
    "docstring": "The inverse hyperbolic sine scale is approximately linear near the origin, but becomes logarithmic for larger positive or negative values. Unlike the , the transition between these linear and logarithmic regions is smooth, which may reduce the risk of visual artifacts. .. note:: This API is provisional and may be revised in the future based on early user feedback. Parameters ---------- linear_width : float, default: 1 The effective width of the linear region, beyond which the transformation becomes asymptotically logarithmic",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "ClassDef name:AsinhNorm FunctionDef name:linear_width arg:self arguments arg Return return:yes FunctionDef name:linear_width arg:self arg:value arguments arg arg Assign Call arguments arg arg arg arg"
  },
  {
    "library": "numpy",
    "name": "hermweight",
    "source_code": "def hermweight(x):\n    w = np.exp(-x ** 2)\n    return w",
    "docstring": "Weight function of the Hermite polynomials. The weight function is :math: and the interval of integration is :math:. the Hermite polynomials are orthogonal, but not normalized, with respect to this weight function. Parameters ---------- x : array_like Values at which the weight function will be computed. Returns ------- w : ndarray The weight function at . Examples -------- >>> import numpy as np >>> from numpy.polynomial.hermite import hermweight >>> x = np.arange(-2, 2) >>> hermweight(x) array([0.01831564, 0.36787944, 1. , 0.36787944])",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\hermite.py",
    "ast_data": "FunctionDef name:hermweight arg:x arguments arg Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_latest_eval_step_value",
    "source_code": "def _get_latest_eval_step_value(update_ops):\n    if isinstance(update_ops, dict):\n        update_ops = list(update_ops.values())\n    with ops.control_dependencies(update_ops):\n        return array_ops.identity(_get_or_create_eval_step().read_value())",
    "docstring": "Gets the eval step value after running . Args: update_ops: A list of or a dictionary of names to , which are run before reading the eval step value. Returns: A representing the value for the evaluation step.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\evaluation.py",
    "ast_data": "FunctionDef name:_get_latest_eval_step_value arg:update_ops arguments arg If Call Assign Call Call With Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_ready",
    "source_code": "@staticmethod\ndef _get_ready():\n    return 'ready'",
    "docstring": "No-op function to help mark when the subprocess pool is ready.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\async_compile.py",
    "ast_data": "FunctionDef name:_get_ready arguments Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "restore_global_state",
    "source_code": "@contextlib.contextmanager\ndef restore_global_state(self):\n    prior_global_state = self.tracing_context.global_context.copy_graphstate()\n    current_global_state: dict[str, tuple[Any, bool]] = {}\n    self.save_global_state(out=current_global_state)\n    try:\n        self.tracing_context.global_context.restore_graphstate(prior_global_state)\n        yield\n    finally:\n        self.tracing_context.global_context.restore_graphstate(GlobalContextCheckpointState(current_global_state))",
    "docstring": "Momentarily restores the global state to what it was prior to tracing the current output",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\output_graph.py",
    "ast_data": "FunctionDef name:restore_global_state arg:self arguments arg Assign Call Call Try Call Call Call"
  },
  {
    "library": "django",
    "name": "inclusion_tag",
    "source_code": "def inclusion_tag(self, filename, func=None, takes_context=None, name=None):\n\n    def dec(func):\n        params, varargs, varkw, defaults, kwonly, kwonly_defaults, _ = getfullargspec(unwrap(func))\n        function_name = name or func.__name__\n\n        @wraps(func)\n        def compile_func(parser, token):\n            bits = token.split_contents()[1:]\n            args, kwargs = parse_bits(parser, bits, params, varargs, varkw, defaults, kwonly, kwonly_defaults, takes_context, function_name)\n            return InclusionNode(func, takes_context, args, kwargs, filename)\n        self.tag(function_name, compile_func)\n        return func\n    return dec",
    "docstring": "Register a callable as an inclusion tag: @register.inclusion_tag('results.html') def show_results(poll): choices = poll.choice_set.all() return {'choices': choices}",
    "type": "method",
    "file_path": "django\\django\\template\\library.py",
    "ast_data": "FunctionDef name:inclusion_tag arg:self arg:filename arg:func arg:takes_context arg:name arguments arg arg arg arg arg FunctionDef name:dec arg:func arguments arg Assign Call Call Assign BoolOp FunctionDef name:compile_func arg:parser arg:token arguments arg arg Assign Call Assign Call Return return:yes Call Call Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "check_subgraphs_connected",
    "source_code": "@compatibility(is_backward_compatible=False)\ndef check_subgraphs_connected(subgraph1: SourcePartition, subgraph2: SourcePartition) -> bool:\n    for node in reversed(subgraph1.nodes):\n        for user in node.users.keys():\n            if user in subgraph2.nodes:\n                return True\n    return False",
    "docstring": "Given two subgraphs A and B (in the form of a list of nodes), checks if A has nodes connecting to at least one node in B -- aka there exists a node in B that uses a node in A (not the other way around).",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\passes\\utils\\source_matcher_utils.py",
    "ast_data": "FunctionDef name:check_subgraphs_connected arg:subgraph1 arg:subgraph2 arguments arg arg For Call For Call If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "alias_inplace_update",
    "source_code": "@deprecation.deprecated(None, 'Prefer tf.tensor_scatter_nd_update, which offers the same functionality with well-defined read-write semantics.')\ndef alias_inplace_update(x, i, v):\n    return _inplace_helper(x, i, v, gen_array_ops.inplace_update)",
    "docstring": "Applies an inplace update on input x at index i with value v. Aliases x. If i is None, x and v must be the same shape. Computes x = v; If i is a scalar, x has a rank 1 higher than v's. Computes x[i, :] = v; Otherwise, x and v must have the same rank. Computes x[i, :] = v; Args: x: A Tensor. i: None, a scalar or a vector. v: A Tensor. Returns: Returns x.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\inplace_ops.py",
    "ast_data": "FunctionDef name:alias_inplace_update arg:x arg:i arg:v arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "get_search_fields",
    "source_code": "def get_search_fields(self, request):\n    return self.search_fields",
    "docstring": "Return a sequence containing the fields to be searched whenever somebody submits a search query.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:get_search_fields arg:self arg:request arguments arg arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_handle_anti_join",
    "source_code": "@final\ndef _handle_anti_join(self, join_index: Index, left_indexer: npt.NDArray[np.intp] | None, right_indexer: npt.NDArray[np.intp] | None) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]:\n    if left_indexer is None:\n        left_indexer = np.arange(len(self.left))\n    if right_indexer is None:\n        right_indexer = np.arange(len(self.right))\n    assert self.how in {'left', 'right'}\n    if self.how == 'left':\n        filt = right_indexer == -1\n    else:\n        filt = left_indexer == -1\n    join_index = join_index[filt]\n    left_indexer = left_indexer[filt]\n    right_indexer = right_indexer[filt]\n    return (join_index, left_indexer, right_indexer)",
    "docstring": "Handle anti join by returning the correct join index and indexers Parameters ---------- join_index : Index join index left_indexer : np.ndarray[np.intp] or None left indexer right_indexer : np.ndarray[np.intp] or None right indexer Returns ------- Index, np.ndarray[np.intp] or None, np.ndarray[np.intp] or None",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\reshape\\merge.py",
    "ast_data": "FunctionDef name:_handle_anti_join arg:self arg:join_index arg:left_indexer arg:right_indexer arguments arg arg arg arg If Compare Assign Call Call If Compare Assign Call Call Compare If Compare Assign Compare Assign Compare Assign Assign Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "@available_if(_estimator_has('predict'))\ndef predict(self, X, **params):\n    check_is_fitted(self)\n    _raise_for_params(params, self, 'predict')\n    if _routing_enabled():\n        routed_params = process_routing(self, 'predict', **params)\n    else:\n        routed_params = Bunch(estimator=Bunch(predict={}))\n    X = validate_data(self, X, accept_sparse=True, ensure_all_finite=False, reset=False)\n    return self.estimator_.predict(X, **routed_params.estimator.predict)",
    "docstring": "Predict the classes of . Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Array representing the data. **params : dict of str -> object Parameters to pass to the underlying estimator's `enable_metadata_routing=TrueMetadata Routing User Guide ` for more details. Returns ------- y : ndarray of shape (n_samples,) Array with predicted labels.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\semi_supervised\\_self_training.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg arg Call Call If Call Assign Call Assign Call Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "create_triton_kernel",
    "source_code": "@staticmethod\ndef create_triton_kernel(tiling: dict[str, sympy.Expr], features: SIMDKernelFeatures, optimize_mask: bool) -> TritonKernel:\n    return TritonKernel(tiling, features=features, pid_cache={'tl.program_id(0)': 'pid_offset'}, optimize_mask=optimize_mask, override_cooperative_reduction=False)",
    "docstring": "Only allow optimize_mask=True when 1) sequential dispatch is used, 2) numels except x dimension are the same for each sub kernel.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\triton_combo_kernel.py",
    "ast_data": "FunctionDef name:create_triton_kernel arg:tiling arg:features arg:optimize_mask arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_warning_for_invalid_pattern",
    "source_code": "def get_warning_for_invalid_pattern(pattern):\n    if isinstance(pattern, str):\n        hint = \"Try removing the string '{}'. The list of urlpatterns should not have a prefix string as the first element.\".format(pattern)\n    elif isinstance(pattern, tuple):\n        hint = 'Try using path() instead of a tuple.'\n    else:\n        hint = None\n    return [Error('Your URL pattern {!r} is invalid. Ensure that urlpatterns is a list of path() and/or re_path() instances.'.format(pattern), hint=hint, id='urls.E004')]",
    "docstring": "Return a list containing a warning that the pattern is invalid. describe_pattern() cannot be used here, because we cannot rely on the urlpattern having regex or name attributes.",
    "type": "function",
    "file_path": "django\\django\\core\\checks\\urls.py",
    "ast_data": "FunctionDef name:get_warning_for_invalid_pattern arg:pattern arguments arg If Call Assign Call If Call Assign Assign Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "get_app_configs",
    "source_code": "def get_app_configs(self):\n    self.check_apps_ready()\n    return self.app_configs.values()",
    "docstring": "Import applications and return an iterable of app configs.",
    "type": "method",
    "file_path": "django\\django\\apps\\registry.py",
    "ast_data": "FunctionDef name:get_app_configs arg:self arguments arg Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_op_in_graph_mode",
    "source_code": "def _op_in_graph_mode(tensor):\n    if context.executing_eagerly():\n        return tensor\n    return tensor.op",
    "docstring": "Returns the tensor's op in graph mode, or the tensor in eager mode. This is useful because sometimes an op is needed in graph mode instead of a tensor. In eager mode, there are no ops. Args: tensor: A tensor. Returns: The tensor's op in graph mode. The tensor in eager mode.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\experimental\\loss_scale.py",
    "ast_data": "FunctionDef name:_op_in_graph_mode arg:tensor arguments arg If Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_expired_timers",
    "source_code": "@abc.abstractmethod\ndef get_expired_timers(self, deadline: float) -> dict[str, list[TimerRequest]]:\n    pass",
    "docstring": "Returns all expired timers for each worker_id. An expired timer is a timer for which the expiration_time is less than or equal to the provided deadline.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\timer\\api.py",
    "ast_data": "FunctionDef name:get_expired_timers arg:self arg:deadline arguments arg arg"
  },
  {
    "library": "tensorflow",
    "name": "replace_gradient_components",
    "source_code": "@abc.abstractmethod\ndef replace_gradient_components(self, value, component_grads):\n    raise NotImplementedError(f'{type(self).__name__}.replace_gradient_components()')",
    "docstring": "Replaces the gradient components in with . Args: value: A value with its gradient components compatible with . component_grads: A nested structure of or or (for unconnected gradients). Returns: A copy of , where the components that should be included in gradients have been replaced by ; or (if includes ).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\composite_tensor_gradient.py",
    "ast_data": "FunctionDef name:replace_gradient_components arg:self arg:value arg:component_grads arguments arg arg arg Raise Call Call"
  },
  {
    "library": "pygame",
    "name": "collide_rect",
    "source_code": "def collide_rect(left, right):\n    return left.rect.colliderect(right.rect)",
    "docstring": "collision detection between two sprites, using rects. pygame.sprite.collide_rect(left, right): return bool Tests for collision between two sprites. Uses the pygame.Rect colliderect function to calculate the collision. It is intended to be passed as a collided callback function to the *collide functions. Sprites must have \"rect\" attributes. New in pygame 1.8.0",
    "type": "function",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:collide_rect arg:left arg:right arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "TimerMac",
    "source_code": "class TimerMac(_macosx.Timer, TimerBase):\n    pass",
    "docstring": "Subclass of using CFRunLoop timer events.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_macosx.py",
    "ast_data": "ClassDef name:TimerMac"
  },
  {
    "library": "tensorflow",
    "name": "replace_composites_with_components",
    "source_code": "def replace_composites_with_components(structure):\n    if isinstance(structure, CompositeTensor):\n        return replace_composites_with_components(structure._type_spec._to_components(structure))\n    elif not nest.is_nested(structure):\n        return structure\n    else:\n        return nest.map_structure(replace_composites_with_components, structure, expand_composites=False)",
    "docstring": "Recursively replaces CompositeTensors with their components. Args: structure: A -compatible structure, possibly containing composite tensors. Returns: A copy of , where each composite tensor has been replaced by its components. The result will contain no composite tensors. Note that returns the same value as .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\composite_tensor.py",
    "ast_data": "FunctionDef name:replace_composites_with_components arg:structure arguments arg If Call Return return:yes Call Call If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_text_chars",
    "source_code": "def _text_chars(self, length, truncate, text):\n    truncate_len = calculate_truncate_chars_length(length, truncate)\n    s_len = 0\n    end_index = None\n    for i, char in enumerate(text):\n        if unicodedata.combining(char):\n            continue\n        s_len += 1\n        if end_index is None and s_len > truncate_len:\n            end_index = i\n        if s_len > length:\n            return add_truncation_text(text[:end_index or 0], truncate)\n    return text",
    "docstring": "Truncate a string after a certain number of chars.",
    "type": "method",
    "file_path": "django\\django\\utils\\text.py",
    "ast_data": "FunctionDef name:_text_chars arg:self arg:length arg:truncate arg:text arguments arg arg arg arg Assign Call Assign Assign For Call If Call If BoolOp Compare Compare Assign If Compare Return return:yes Call BoolOp Return return:yes"
  },
  {
    "library": "scipy",
    "name": "sos2tf",
    "source_code": "def sos2tf(sos):\n    xp = array_namespace(sos)\n    sos = xp.asarray(sos)\n    result_type = sos.dtype\n    if xp.isdtype(result_type, 'integral'):\n        result_type = xp_default_dtype(xp)\n    b = xp.asarray([1], dtype=result_type)\n    a = xp.asarray([1], dtype=result_type)\n    n_sections = sos.shape[0]\n    for section in range(n_sections):\n        b = _pu.polymul(b, sos[section, :3], xp=xp)\n        a = _pu.polymul(a, sos[section, 3:], xp=xp)\n    return (b, a)",
    "docstring": "Return a single transfer function from a series of second-order sections Parameters ---------- sos : array_like Array of second-order filter coefficients, must have shape `sosfilt` for the SOS filter format specification. Returns ------- b : ndarray Numerator polynomial coefficients. a : ndarray Denominator polynomial coefficients. Notes ----- .. versionadded:: 0.16.0 Examples -------- Find the polynomial representation of an elliptic filter using its 'sos' (second-order sections) format. >>> from scipy.signal import sos2tf >>> from scipy import signal >>> sos = signal.ellip(1, 0.001, 50, 0.1, output='sos') >>> sos2tf(sos) ( array([0.91256522, 0.91256522, 0. ]), array([1. , 0.82513043, 0. ]))",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_filter_design.py",
    "ast_data": "FunctionDef name:sos2tf arg:sos arguments arg Assign Call Assign Call Assign If Call Assign Call Assign Call Assign Call Assign For Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "experimental_run",
    "source_code": "@deprecated(None, 'This method is not available in TF 2.x. Please switch to using `run` instead.')\ndef experimental_run(self, fn, input_iterator=None):\n    return super(StrategyV1, self).experimental_run(fn, input_iterator)",
    "docstring": "Runs ops in on each replica, with inputs from . DEPRECATED: This method is not available in TF 2.x. Please switch to using instead. When eager execution is enabled, executes ops specified by on each replica. Otherwise, builds a graph to execute the ops on each replica. Each replica will take a single, different input from the inputs provided by one call on the input iterator. may call to access members such as . IMPORTANT: Depending on the implementation being used, and whether eager execution is enabled, may be called one or more times (once for each replica). Args: fn: The function to run. The inputs to the function must match the outputs of . The output must be a of s. input_iterator: (Optional) input iterator from which the inputs are taken. Returns: Merged return value of across replicas. The structure of the return value is the same as the return value from . Each element in the structure can either be (if the values are unsynchronized), (if the values are kept in sync), or (if running on a single replica).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:experimental_run arg:self arg:fn arg:input_iterator arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "add_histogram",
    "source_code": "def add_histogram(self, tag, values, global_step=None, bins='tensorflow', walltime=None, max_bins=None):\n    torch._C._log_api_usage_once('tensorboard.logging.add_histogram')\n    if isinstance(bins, str) and bins == 'tensorflow':\n        bins = self.default_bins\n    self._get_file_writer().add_summary(histogram(tag, values, bins, max_bins=max_bins), global_step, walltime)",
    "docstring": "Add histogram to summary. Args: tag (str): Data identifier values (torch.Tensor, numpy.ndarray, or string/blobname): Values to build histogram global_step (int): Global step value to record bins (str): One of {'tensorflow','auto', 'fd', ...}. This determines how the bins are made. You can find other options in: walltime (float): Optional override default walltime (time.time()) seconds after epoch of event Examples:: from torch.utils.tensorboard import SummaryWriter import numpy as np writer = SummaryWriter() for i in range(10): x = np.random.random(1000) writer.add_histogram('distribution centers', x + i, i) writer.close() Expected result: .. image:: _static/img/tensorboard/add_histogram.png :scale: 50 %",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\tensorboard\\writer.py",
    "ast_data": "FunctionDef name:add_histogram arg:self arg:tag arg:values arg:global_step arg:bins arg:walltime arg:max_bins arguments arg arg arg arg arg arg arg Call If BoolOp Call Compare Assign Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "inverse_transform",
    "source_code": "def inverse_transform(self, X):\n    check_is_fitted(self)\n    X = self._check_inputs(X, in_fit=False, accept_sparse_negative=True, copy=self.copy)\n    return self._transform(X, inverse=True)",
    "docstring": "Back-projection to the original space. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data used to scale along the features axis. If a sparse matrix is provided, it will be converted into a sparse `ignore_implicit_zeros` is False. Returns ------- X_original : {ndarray, sparse matrix} of (n_samples, n_features) The projected data.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py",
    "ast_data": "FunctionDef name:inverse_transform arg:self arg:X arguments arg arg Call Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_encode_comment",
    "source_code": "def _encode_comment(self, s=''):\n    if s:\n        return '%s %s' % (_TK_COMMENT, s)\n    else:\n        return '%s' % _TK_COMMENT",
    "docstring": "(INTERNAL) Encodes a comment line. Comments are single line strings starting, obligatorily, with the `` is None, this method will simply return an empty comment. :param s: (OPTIONAL) string. :return: a string with the encoded comment line.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\externals\\_arff.py",
    "ast_data": "FunctionDef name:_encode_comment arg:self arg:s arguments arg arg If Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "parse_cluster_spec",
    "source_code": "def parse_cluster_spec(cluster_spec, cluster, verbose=False):\n    job_strings = cluster_spec.split(',')\n    if not cluster_spec:\n        raise ValueError('Empty cluster_spec string')\n    for job_string in job_strings:\n        job_def = cluster.job.add()\n        if job_string.count('|') != 1:\n            raise ValueError(\"Not exactly one instance of '|' in cluster_spec\")\n        job_name = job_string.split('|')[0]\n        if not job_name:\n            raise ValueError('Empty job_name in cluster_spec')\n        job_def.name = job_name\n        if verbose:\n            logging.info('Added job named \"%s\"', job_name)\n        job_tasks = job_string.split('|')[1].split(';')\n        for i in range(len(job_tasks)):\n            if not job_tasks[i]:\n                raise ValueError('Empty task string at position %d' % i)\n            job_def.tasks[i] = job_tasks[i]\n            if verbose:\n                logging.info('  Added task \"%s\" to job \"%s\"', job_tasks[i], job_name)",
    "docstring": "Parse content of cluster_spec string and inject info into cluster protobuf. Args: cluster_spec: cluster specification string, e.g., \"local|localhost:2222;localhost:2223\" cluster: cluster protobuf. verbose: If verbose logging is requested. Raises: ValueError: if the cluster_spec string is invalid.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\grpc_tensorflow_server.py",
    "ast_data": "FunctionDef name:parse_cluster_spec arg:cluster_spec arg:cluster arg:verbose arguments arg arg arg Assign Call If Raise Call For Assign Call If Compare Call Raise Call Assign Call If Raise Call Assign If Call Assign Call Call For Call Call If Raise Call Assign If Call"
  },
  {
    "library": "tensorflow",
    "name": "record_if",
    "source_code": "@tf_export('summary.record_if', v1=[])\n@tf_contextlib.contextmanager\ndef record_if(condition):\n    old = _summary_state.is_recording\n    try:\n        _summary_state.is_recording = condition\n        yield\n    finally:\n        _summary_state.is_recording = old",
    "docstring": "Sets summary recording on or off per the provided boolean value. The provided value can be a python boolean, a scalar boolean Tensor, or or a callable providing such a value; if a callable is passed it will be invoked on-demand to determine whether summary writing will occur. Note that when calling record_if() in an eager mode context, if you intend to provide a varying condition like , you must wrap this in a callable to avoid immediate eager evaluation of the condition. In particular, using a callable is the only way to have your condition evaluated as part of the traced body of an @tf.function that is invoked from within the context. Args: condition: can be True, False, a bool Tensor, or a callable providing such. Yields: Returns a context manager that sets this value on enter and restores the previous value on exit.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "FunctionDef name:record_if arg:condition arguments arg Assign Try Assign Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_make_replicated_models_with_cloning",
    "source_code": "def _make_replicated_models_with_cloning(model, mode):\n    strategy = model._distribution_strategy\n    if model._compile_distribution:\n        clone_model_on_replicas(model, strategy, mode)\n    else:\n        _build_distributed_network(model, strategy, mode)",
    "docstring": "Build models on each replica.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_training_utils_v1.py",
    "ast_data": "FunctionDef name:_make_replicated_models_with_cloning arg:model arg:mode arguments arg arg Assign If Call Call"
  },
  {
    "library": "matplotlib",
    "name": "ToolCursorPosition",
    "source_code": "class ToolCursorPosition(ToolBase):\n\n    def __init__(self, *args, **kwargs):\n        self._id_drag = None\n        super().__init__(*args, **kwargs)\n\n    def set_figure(self, figure):\n        if self._id_drag:\n            self.canvas.mpl_disconnect(self._id_drag)\n        super().set_figure(figure)\n        if figure:\n            self._id_drag = self.canvas.mpl_connect('motion_notify_event', self.send_message)\n\n    def send_message(self, event):\n        if self.toolmanager.messagelock.locked():\n            return\n        from matplotlib.backend_bases import NavigationToolbar2\n        message = NavigationToolbar2._mouse_event_to_message(event)\n        self.toolmanager.message_event(message, self)",
    "docstring": "Send message with the current pointer position. This tool runs in the background reporting the position of the cursor.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py",
    "ast_data": "ClassDef name:ToolCursorPosition FunctionDef name:__init__ arg:self arguments arg arg arg Assign Call Call FunctionDef name:set_figure arg:self arg:figure arguments arg arg If Call Call Call If Assign Call FunctionDef name:send_message arg:self arg:event arguments arg arg If Call Return return:no Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "ExportDynamoConfig",
    "source_code": "@dataclasses.dataclass\nclass ExportDynamoConfig:\n    allow_rnn: bool = True\n    reorderable_logging_functions: set[Callable] = dataclasses.field(default_factory=set)\n    do_not_emit_runtime_asserts: bool = True\n    specialize_int: bool = True\n    specialize_float: bool = True\n    assume_static_by_default: bool = False\n    automatic_dynamic_shapes: bool = False\n    capture_dynamic_output_shape_ops: bool = True\n    capture_scalar_outputs: bool = True\n    prefer_deferred_runtime_asserts_over_guards: bool = False",
    "docstring": "Manage Export-specific configurations of Dynamo.",
    "type": "class",
    "file_path": "pytorch\\torch\\export\\_trace.py",
    "ast_data": "ClassDef name:ExportDynamoConfig Call"
  },
  {
    "library": "django",
    "name": "get_fields",
    "source_code": "def get_fields(self, field_name):\n    if field_name not in self.fields:\n        raise GDALException('invalid field name: %s' % field_name)\n    return [feat.get(field_name) for feat in self]",
    "docstring": "Return a list containing the given field name for every Feature in the Layer.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\layer.py",
    "ast_data": "FunctionDef name:get_fields arg:self arg:field_name arguments arg arg If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "final_reduction",
    "source_code": "def final_reduction(buffer, value: str, result_type: Optional[str]) -> str:\n    use_helper = reduction_type in ('any', 'max', 'min', 'prod')\n    module = 'triton_helpers' if use_helper else 'tl'\n    value = self.reduction_collapse_dims(buffer, value, dtype)\n    if reduction_type in ('max', 'min'):\n        value = self.reduction_resize(f'{module}.{reduction_type}2({value}, {dim})')\n    else:\n        value = self.reduction_resize(f'{module}.{reduction_type}({value}, {dim})')\n    if result_type is not None:\n        value = f'{value}.to({result_type})'\n    return value",
    "docstring": "Helper to generate a reduction call, e.g. tl.sum.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py",
    "ast_data": "FunctionDef name:final_reduction arg:buffer arg:value arg:result_type arguments arg arg arg Assign Compare Assign Assign Call If Compare Assign Call Assign Call If Compare Assign Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "subscribe",
    "source_code": "def subscribe(self):\n    for channel in self.bus.listeners:\n        method = getattr(self, channel, None)\n        if method is not None:\n            self.bus.subscribe(channel, method)",
    "docstring": "Register this object as a (multi-channel) listener on the bus.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\plugins.py",
    "ast_data": "FunctionDef name:subscribe arg:self arguments arg For Assign Call If Compare Call"
  },
  {
    "library": "django",
    "name": "__iter__",
    "source_code": "def __iter__(self):\n    self._fetch_all()\n    return iter(self._result_cache)",
    "docstring": "The queryset iterator protocol uses three nested iterators in the default case: 1. sql.compiler.execute_sql() - Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE) using cursor.fetchmany(). This part is responsible for doing some column masking, and returning the rows in chunks. 2. sql.compiler.results_iter() - Returns one row at time. At this point the rows are still just tuples. In some cases the return values are converted to Python values at this location. 3. self.iterator() - Responsible for turning the rows into model objects.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:__iter__ arg:self arguments arg Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_check_stop",
    "source_code": "def _check_stop(self):\n    return False",
    "docstring": "Hook for subclasses to provide their own stop condition. Returns: True if the session should stop, False otherwise.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\monitored_session.py",
    "ast_data": "FunctionDef name:_check_stop arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "add_event",
    "source_code": "def add_event(self, event):\n    self._warn_if_event_writer_is_closed()\n    self.event_writer.add_event(event)",
    "docstring": "Adds an event to the event file. Args: event: An protocol buffer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\writer.py",
    "ast_data": "FunctionDef name:add_event arg:self arg:event arguments arg arg Call Call"
  },
  {
    "library": "scipy",
    "name": "var",
    "source_code": "def var(self, df, scale):\n    dim, df, scale = self._process_parameters(df, scale)\n    out = self._var(dim, df, scale)\n    return _squeeze_output(out)",
    "docstring": "Variance of the Wishart distribution. Parameters ---------- %(_doc_default_callparams)s Returns ------- var : float The variance of the distribution",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:var arg:self arg:df arg:scale arguments arg arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "compiler_ir_generator",
    "source_code": "def compiler_ir_generator(stage='hlo', device_name=None, platform_name=None):\n    if device_name is not None:\n        if platform_name is not None:\n            raise ValueError('device_name and platform_name cannot be provided at the same time.')\n        warnings.warn('device_name is being deprecated. Use platform_name.')\n    device_name = maybe_get_device_name(device_name)\n    res_bytes = context.context().get_compiler_ir(device_name=device_name, platform_name=platform_name, function_name=fn_name, flat_args=filtered_flat_specs, captured_inputs=concrete_fn.captured_inputs, stage=stage)\n    if stage in ('stablehlo_serialized', 'hlo_serialized', 'optimized_hlo_serialized', 'optimized_hlo_proto_serialized'):\n        return res_bytes\n    else:\n        return res_bytes.decode('utf-8')",
    "docstring": "Gets the compiler IR bytes. Args: stage: The exported stage for the given function. device_name: The name of the device with the form as \"/job:localhost/replica:0/task:0/device:CPU:0\", \"/device:TPU:0\" etc. When this is used, actual device is needed for getting the compiler IR. platform_name: The name of the platform, e.g. \"TPU\". See the comment in in . Returns: The compiler IR bytes.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\compiler_ir.py",
    "ast_data": "FunctionDef name:compiler_ir_generator arg:stage arg:device_name arg:platform_name arguments arg arg arg If Compare If Compare Raise Call Call Assign Call Assign Call Call If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "request_namespace",
    "source_code": "def request_namespace(k, v):\n    if k[:5] == 'body.':\n        setattr(cherrypy.serving.request.body, k[5:], v)\n    else:\n        setattr(cherrypy.serving.request, k, v)",
    "docstring": "Attach request attributes declared in config.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\_cprequest.py",
    "ast_data": "FunctionDef name:request_namespace arg:k arg:v arguments arg arg If Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "raise_if_triton_unavailable",
    "source_code": "@classmethod\ndef raise_if_triton_unavailable(cls, device: torch.types.Device=None) -> None:\n    if not cls.is_triton_capable():\n        raise RuntimeError('This device is not capable of supporting Triton')",
    "docstring": "Raises a with the appropriate human-readable instructions to resolve the issue if Triton is not available for the given device, or the default device if is . The caller should ensure the presence of the 'triton' package before calling this method.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\device_interface.py",
    "ast_data": "FunctionDef name:raise_if_triton_unavailable arg:cls arg:device arguments arg arg If Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "get_cudnn_version",
    "source_code": "def get_cudnn_version():\n    key = 'cudnn_ver'\n    cmds = cmds_all[PLATFORM.lower()][key]\n    out, err = run_shell_cmd(cmds[0])\n    if err and FLAGS.debug:\n        print('Error in finding `cudnn.h`:\\n %s' % str(err))\n    if len(out.split(b' ')) > 1:\n        cmd = cmds[0] + ' | ' + cmds[1]\n        out_re, err_re = run_shell_cmd(cmd)\n        if err_re and FLAGS.debug:\n            print('Error in detecting cuDNN version:\\n %s' % str(err_re))\n        return out_re.strip(b'\\n')\n    else:\n        return",
    "docstring": "Retrieves the version of cuDNN library detected. Returns: String that is the version of cuDNN library detected. e.g. '7.5.0'",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\tensorflow_builder\\config_detector\\config_detector.py",
    "ast_data": "FunctionDef name:get_cudnn_version arguments Assign Assign Call Assign Call If BoolOp Call Call If Compare Call Call Assign Assign Call If BoolOp Call Call Return return:yes Call Return return:no"
  },
  {
    "library": "django",
    "name": "_non_pk_concrete_field_names",
    "source_code": "@cached_property\ndef _non_pk_concrete_field_names(self):\n    names = []\n    all_pk_fields = set(self.pk_fields)\n    for parent in self.all_parents:\n        all_pk_fields.update(parent._meta.pk_fields)\n    for field in self.concrete_fields:\n        if field not in all_pk_fields:\n            names.append(field.name)\n            if field.name != field.attname:\n                names.append(field.attname)\n    return frozenset(names)",
    "docstring": "Return a set of the non-pk concrete field names defined on the model.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\options.py",
    "ast_data": "FunctionDef name:_non_pk_concrete_field_names arg:self arguments arg Assign Assign Call For Call For If Compare Call If Compare Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "integrate_box_1d",
    "source_code": "def integrate_box_1d(self, low, high):\n    if self.d != 1:\n        raise ValueError('integrate_box_1d() only handles 1D pdfs')\n    stdev = ravel(sqrt(self.covariance))[0]\n    normalized_low = ravel((low - self.dataset) / stdev)\n    normalized_high = ravel((high - self.dataset) / stdev)\n    delta = special.ndtr(normalized_high) - special.ndtr(normalized_low)\n    value = np_vecdot(self.weights, delta)\n    return value",
    "docstring": "Computes the integral of a 1D pdf between two bounds. Parameters ---------- low : scalar Lower bound of integration. high : scalar Upper bound of integration. Returns ------- value : scalar The result of the integral. Raises ------ ValueError If the KDE is over more than one dimension.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_kde.py",
    "ast_data": "FunctionDef name:integrate_box_1d arg:self arg:low arg:high arguments arg arg arg If Compare Raise Call Assign Call Call Assign Call Assign Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "dim_map",
    "source_code": "@property\ndef dim_map(self) -> list[int]:\n    r = [-1] * self.ndim\n    for i, placement in enumerate(self.placements):\n        if placement.is_shard():\n            shard_dim = cast(Shard, placement).dim\n            if r[shard_dim] > -1:\n                raise ValueError(f'Tensor dim {shard_dim} is already sharded on mesh dim {r[shard_dim]}, DTensor operator implementation does not support things like hybrid sharding strategies yet (i.e. [Shard(0), Shard(0)])')\n            r[shard_dim] = i\n    return r",
    "docstring": "dim_map is a property we derive from of the distributed tensor. It simply return a list of ints where dim_map[i] denotes the sharding mapping to the mesh dimension, and len(dim_map) == dist_tensor.ndim dim_map[i] = -1: means tensor dim i replicate on mesh dim_map[i] = j: means tensor dim i shard on mesh dim j For example, we have a dist tensor that have the shape of [18, 20, 30], and device_mesh([0, 1, 2, 3]), placements: [Shard(1)], the dim_map of this placement would be: [-1, 0, -1]. This representation is pretty helpful during sharding propagation where we could know exactly each tensor dimension is sharded or not. Note that if placements contains , we have to explicitly deal with it, so that when we create a DTensorSpec with dim_map, we could properly record the pending sums.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_dtensor_spec.py",
    "ast_data": "FunctionDef name:dim_map arg:self arguments arg Assign For Call If Call Assign Call If Compare Raise Call Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "OdrStop",
    "source_code": "class OdrStop(Exception):\n    pass",
    "docstring": "Exception stopping fitting. You can raise this exception in your objective function to tell to stop fitting.",
    "type": "class",
    "file_path": "scipy\\scipy\\odr\\_odrpack.py",
    "ast_data": "ClassDef name:OdrStop"
  },
  {
    "library": "pytorch",
    "name": "find_device_based_on_size",
    "source_code": "def find_device_based_on_size(node) -> Device:\n    mem_size_needed = get_extra_size_of(node, set())\n    device = Device('', -1, -1)\n    for d in self.devices:\n        if d not in occupied_devices and d.available_mem_bytes >= mem_size_needed:\n            device = d\n            break\n    if device.available_mem_bytes < 0:\n        raise RuntimeError(str(node) + 'is too large to fit any device')\n    occupied_devices.append(device)\n    return device",
    "docstring": "Given a node, this function is to find a logical device that could fit the node.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\accelerator_partitioner.py",
    "ast_data": "FunctionDef name:find_device_based_on_size arg:node arguments arg Assign Call Call Assign Call For If BoolOp Compare Compare Assign If Compare Raise Call Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "Laguerre",
    "source_code": "class Laguerre(ABCPolyBase):\n    _add = staticmethod(lagadd)\n    _sub = staticmethod(lagsub)\n    _mul = staticmethod(lagmul)\n    _div = staticmethod(lagdiv)\n    _pow = staticmethod(lagpow)\n    _val = staticmethod(lagval)\n    _int = staticmethod(lagint)\n    _der = staticmethod(lagder)\n    _fit = staticmethod(lagfit)\n    _line = staticmethod(lagline)\n    _roots = staticmethod(lagroots)\n    _fromroots = staticmethod(lagfromroots)\n    domain = np.array(lagdomain)\n    window = np.array(lagdomain)\n    basis_name = 'L'",
    "docstring": "A Laguerre series class. The Laguerre class provides the standard Python numerical methods '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the attributes and methods listed below. Parameters ---------- coef : array_like Laguerre coefficients in order of increasing degree, i.e, `domain` for its use. The default value is [0., 1.]. symbol : str, optional Symbol used to represent the independent variable in string representations of the polynomial expression, e.g. for printing. The symbol must be a valid Python identifier. Default value is 'x'. .. versionadded:: 1.24",
    "type": "class",
    "file_path": "numpy\\numpy\\polynomial\\laguerre.py",
    "ast_data": "ClassDef name:Laguerre Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign"
  },
  {
    "library": "scipy",
    "name": "multi_rv_generic",
    "source_code": "class multi_rv_generic:\n\n    def __init__(self, seed=None):\n        super().__init__()\n        self._random_state = check_random_state(seed)\n\n    @property\n    def random_state(self):\n        return self._random_state\n\n    @random_state.setter\n    def random_state(self, seed):\n        self._random_state = check_random_state(seed)\n\n    def _get_random_state(self, random_state):\n        if random_state is not None:\n            return check_random_state(random_state)\n        else:\n            return self._random_state",
    "docstring": "Class which encapsulates common functionality between all multivariate distributions.",
    "type": "class",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "ClassDef name:multi_rv_generic FunctionDef name:__init__ arg:self arg:seed arguments arg arg Call Call Assign Call FunctionDef name:random_state arg:self arguments arg Return return:yes FunctionDef name:random_state arg:self arg:seed arguments arg arg Assign Call FunctionDef name:_get_random_state arg:self arg:random_state arguments arg arg If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "django",
    "name": "do_with",
    "source_code": "@register.tag('with')\ndef do_with(parser, token):\n    bits = token.split_contents()\n    remaining_bits = bits[1:]\n    extra_context = token_kwargs(remaining_bits, parser, support_legacy=True)\n    if not extra_context:\n        raise TemplateSyntaxError('%r expected at least one variable assignment' % bits[0])\n    if remaining_bits:\n        raise TemplateSyntaxError('%r received an invalid token: %r' % (bits[0], remaining_bits[0]))\n    nodelist = parser.parse(('endwith',))\n    parser.delete_first_token()\n    return WithNode(None, None, nodelist, extra_context=extra_context)",
    "docstring": "Add one or more values to the context (inside of this block) for caching and easy access. For example:: {% with total=person.some_sql_method %} {{ total }} object{{ total|pluralize }} {% endwith %} Multiple values can be added to the context:: {% with foo=1 bar=2 %} ... {% endwith %} The legacy format of `` is still accepted.",
    "type": "function",
    "file_path": "django\\django\\template\\defaulttags.py",
    "ast_data": "FunctionDef name:do_with arg:parser arg:token arguments arg arg Assign Call Assign Assign Call If Raise Call If Raise Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_zeros_slot",
    "source_code": "def _zeros_slot(self, var, slot_name, op_name):\n    named_slots = self._slot_dict(slot_name)\n    if _var_key(var) not in named_slots:\n        new_slot_variable = slot_creator.create_zeros_slot(var, op_name, copy_xla_sharding=True)\n        self._restore_slot_variable(slot_name=slot_name, variable=var, slot_variable=new_slot_variable)\n        named_slots[_var_key(var)] = new_slot_variable\n    return named_slots[_var_key(var)]",
    "docstring": "Find or create a slot initialized with 0.0. Args: var: A object. slot_name: Name for the slot. op_name: Name to use when scoping the Variable that needs to be created for the slot. Returns: A object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\optimizer.py",
    "ast_data": "FunctionDef name:_zeros_slot arg:self arg:var arg:slot_name arg:op_name arguments arg arg arg arg Assign Call If Compare Call Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "compress_csr",
    "source_code": "def compress_csr(self):\n    _, unique, indices = np.unique(self.m * self.rows + self.cols, return_index=True, return_inverse=True)\n    self.rows = self.rows[unique]\n    self.cols = self.cols[unique]\n    self.vals = np.bincount(indices, weights=self.vals)",
    "docstring": "Compress rows, cols, vals / summing duplicates. Sort for csr format.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triinterpolate.py",
    "ast_data": "FunctionDef name:compress_csr arg:self arguments arg Assign Call Assign Assign Assign Call"
  },
  {
    "library": "pytorch",
    "name": "register_handle",
    "source_code": "def register_handle(self) -> None:\n    assert self.handle is None, 'Cannot register a handle that is already registered.'\n    self.handle = torch._C._gds_register_handle(self.fd)",
    "docstring": "Registers file descriptor to cuFile Driver. This is a wrapper around ``.",
    "type": "method",
    "file_path": "pytorch\\torch\\cuda\\gds.py",
    "ast_data": "FunctionDef name:register_handle arg:self arguments arg Compare Assign Call"
  },
  {
    "library": "sphinx",
    "name": "process_field_xref",
    "source_code": "def process_field_xref(self, pnode: pending_xref) -> None:\n    pass",
    "docstring": "Process a pending xref created in a doc field. For example, attach information about the current scope.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\domains\\__init__.py",
    "ast_data": "FunctionDef name:process_field_xref arg:self arg:pnode arguments arg arg"
  },
  {
    "library": "pytorch",
    "name": "_export_operator_list",
    "source_code": "def _export_operator_list(module: LiteScriptModule):\n    return torch._C._export_operator_list(module._c)",
    "docstring": "Return a set of root operator names (with overload name) that are used by any method in this mobile module.",
    "type": "function",
    "file_path": "pytorch\\torch\\jit\\mobile\\__init__.py",
    "ast_data": "FunctionDef name:_export_operator_list arg:module arguments arg Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "convert_dvi_to_svg",
    "source_code": "def convert_dvi_to_svg(dvipath: Path, builder: Builder, out_path: Path) -> int | None:\n    name = 'dvisvgm'\n    command = [builder.config.imgmath_dvisvgm, '-o', out_path]\n    command.extend(builder.config.imgmath_dvisvgm_args)\n    command.append(dvipath)\n    _stdout, stderr = convert_dvi_to_image(command, name)\n    depth = None\n    if builder.config.imgmath_use_preview:\n        for line in stderr.splitlines():\n            matched = depthsvg_re.match(line)\n            if matched:\n                depth = round(float(matched.group(1)) * 100 / 72.27)\n                write_svg_depth(out_path, depth)\n                break\n    return depth",
    "docstring": "Convert DVI file to SVG image.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\ext\\imgmath.py",
    "ast_data": "FunctionDef name:convert_dvi_to_svg arg:dvipath arg:builder arg:out_path arguments arg arg arg Assign Assign Call Call Assign Call Assign If For Call Assign Call If Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "axes",
    "source_code": "@property\ndef axes(self):\n    return self._axes",
    "docstring": "An array of the :class: objects in the grid.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\axisgrid.py",
    "ast_data": "FunctionDef name:axes arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_parallel_predict_log_proba",
    "source_code": "def _parallel_predict_log_proba(estimators, estimators_features, X, n_classes, params):\n    n_samples = X.shape[0]\n    log_proba = np.empty((n_samples, n_classes))\n    log_proba.fill(-np.inf)\n    all_classes = np.arange(n_classes, dtype=int)\n    for estimator, features in zip(estimators, estimators_features):\n        log_proba_estimator = estimator.predict_log_proba(X[:, features], **params)\n        if n_classes == len(estimator.classes_):\n            log_proba = np.logaddexp(log_proba, log_proba_estimator)\n        else:\n            log_proba[:, estimator.classes_] = np.logaddexp(log_proba[:, estimator.classes_], log_proba_estimator[:, range(len(estimator.classes_))])\n            missing = np.setdiff1d(all_classes, estimator.classes_)\n            log_proba[:, missing] = np.logaddexp(log_proba[:, missing], -np.inf)\n    return log_proba",
    "docstring": "Private function used to compute log probabilities within a job.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_bagging.py",
    "ast_data": "FunctionDef name:_parallel_predict_log_proba arg:estimators arg:estimators_features arg:X arg:n_classes arg:params arguments arg arg arg arg arg Assign Assign Call Call Assign Call For Call Assign Call If Compare Call Assign Call Assign Call Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "batch_size",
    "source_code": "@property\ndef batch_size(self) -> int:\n    return self.intrinsics.shape[0]",
    "docstring": "Return the batch size of the storage. Returns: scalar with the batch size.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\camera\\pinhole.py",
    "ast_data": "FunctionDef name:batch_size arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_with_data",
    "source_code": "def _with_data(self, data, copy=True):\n    if copy:\n        return self.__class__((data, self.indices.copy(), self.indptr.copy()), shape=self.shape, dtype=data.dtype)\n    else:\n        return self.__class__((data, self.indices, self.indptr), shape=self.shape, dtype=data.dtype)",
    "docstring": "Returns a matrix with the same sparsity structure as self, but with different data. By default the structure arrays (i.e. .indptr and .indices) are copied.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_bsr.py",
    "ast_data": "FunctionDef name:_with_data arg:self arg:data arg:copy arguments arg arg arg If Return return:yes Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "batch",
    "source_code": "def batch(self, spec, batch_size):\n\n    def batch_field(f):\n        if isinstance(f, type_spec.BatchableTypeSpec):\n            return f.__batch_encoder__.batch(f, batch_size)\n        elif isinstance(f, tensor_shape.TensorShape):\n            return [batch_size] + f\n        else:\n            return f\n    fields = tuple(spec.__dict__.items())\n    batched_fields = nest.map_structure(batch_field, fields)\n    return _create_object_from_type_and_dict(type(spec), batched_fields)",
    "docstring": "Returns the TypeSpec representing a batch of values described by . The default definition returns a that is equal to , except that an outer axis with size is added to every nested and field. Subclasses may override this default definition, when necessary. Args: spec: The for an individual value. batch_size: An indicating the number of values that are batched together, or if the batch size is not known. Returns: A for a batch of values.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type.py",
    "ast_data": "FunctionDef name:batch arg:self arg:spec arg:batch_size arguments arg arg arg FunctionDef name:batch_field arg:f arguments arg If Call Return return:yes Call If Call Return return:yes Return return:yes Assign Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "__array__",
    "source_code": "def __array__(self, dtype: npt.DTypeLike | None=None, copy: bool | None=None) -> np.ndarray:\n    values = self._values\n    if copy is None:\n        arr = np.asarray(values, dtype=dtype)\n    else:\n        arr = np.array(values, dtype=dtype, copy=copy)\n    if copy is True:\n        return arr\n    if copy is False or astype_is_view(values.dtype, arr.dtype):\n        arr = arr.view()\n        arr.flags.writeable = False\n    return arr",
    "docstring": "Return the values as a NumPy array. Users should not call this directly. Rather, it is invoked by :func: and :func:. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to use for the resulting NumPy array. By default, the dtype is inferred from the data. copy : bool or None, optional See :func:. Returns ------- numpy.ndarray The values in the series converted to a :class: with the specified . See Also -------- array : Create a new array from data. Series.array : Zero-copy view to the array backing the Series. Series.to_numpy : Series method for similar behavior. Examples -------- >>> ser = pd.Series([1, 2, 3]) >>> np.asarray(ser) array([1, 2, 3]) For timezone-aware data, the timezones may be retained with `` >>> np.asarray(tzser, dtype=\"datetime64[ns]\") # doctest: +ELLIPSIS array(['1999-12-31T23:00:00.000000000', ...], dtype='datetime64[ns]')",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:__array__ arg:self arg:dtype arg:copy arguments arg arg arg Assign If Compare Assign Call Assign Call If Compare Return return:yes If BoolOp Compare Call Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "MultiDeviceIteratorSpec",
    "source_code": "class MultiDeviceIteratorSpec(type_spec.TypeSpec):\n    __slots__ = ['_devices', '_source_device', '_element_spec']\n\n    def __init__(self, devices, source_device, element_spec):\n        self._devices = devices\n        self._source_device = source_device\n        self._element_spec = element_spec\n\n    @property\n    def value_type(self):\n        return OwnedMultiDeviceIterator\n\n    def _serialize(self):\n        return (tuple(self._devices), self._source_device, self._element_spec)\n\n    @property\n    def _component_specs(self):\n        specs = [tensor_spec.TensorSpec([], dtypes.resource)]\n        for _ in range(len(self._devices)):\n            specs.append(iterator_ops.IteratorSpec(self._element_spec))\n        return specs\n\n    def _to_components(self, value):\n        c = [value._multi_device_iterator_resource]\n        c.extend(value._device_iterators)\n        return c\n\n    def _from_components(self, components):\n        return OwnedMultiDeviceIterator(dataset=None, devices=self._devices, source_device=self._source_device, components=components, element_spec=self._element_spec)\n\n    @staticmethod\n    def from_value(value):\n        return MultiDeviceIteratorSpec(value._devices, value._source_device, value.element_spec)",
    "docstring": "Type specification for .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\multi_device_iterator_ops.py",
    "ast_data": "ClassDef name:MultiDeviceIteratorSpec Assign FunctionDef name:__init__ arg:self arg:devices arg:source_device arg:element_spec arguments arg arg arg arg Assign Assign Assign FunctionDef name:value_type arg:self arguments arg Return return:yes FunctionDef name:_serialize arg:self arguments arg Return return:yes Call FunctionDef name:_component_specs arg:self arguments arg Assign Call For Call Call Call Call Return return:yes FunctionDef name:_to_components arg:self arg:value arguments arg arg Assign Call Return return:yes FunctionDef name:_from_components arg:self arg:components arguments arg arg Return return:yes Call FunctionDef name:from_value arg:value arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "benchmark_combo_kernel",
    "source_code": "def benchmark_combo_kernel(self, node_list: Sequence[BaseSchedulerNode]) -> tuple[float, float, list[Optional[str]]]:\n    raise NotImplementedError",
    "docstring": "Benchmark the list of nodes to combine and return the execution time and memory copy time in milliseconds on randomly generated inputs.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:benchmark_combo_kernel arg:self arg:node_list arguments arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_get_static_event_ndims",
    "source_code": "def _maybe_get_static_event_ndims(self, event_ndims):\n    event_ndims_ = distribution_util.maybe_get_static_value(event_ndims)\n    if isinstance(event_ndims_, (np.generic, np.ndarray)):\n        if event_ndims_.dtype not in (np.int32, np.int64):\n            raise ValueError('Expected integer dtype, got dtype {}'.format(event_ndims_.dtype))\n        if isinstance(event_ndims_, np.ndarray) and len(event_ndims_.shape):\n            raise ValueError('Expected a scalar integer, got {}'.format(event_ndims_))\n        event_ndims_ = int(event_ndims_)\n    return event_ndims_",
    "docstring": "Helper which returns tries to return an integer static value.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bijector_impl.py",
    "ast_data": "FunctionDef name:_maybe_get_static_event_ndims arg:self arg:event_ndims arguments arg arg Assign Call If Call If Compare Raise Call Call If BoolOp Call Call Raise Call Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_xdata",
    "source_code": "def set_xdata(self, x):\n    if not np.iterable(x):\n        raise RuntimeError('x must be a sequence')\n    self._xorig = copy.copy(x)\n    self._invalidx = True\n    self.stale = True",
    "docstring": "Set the data array for x. Parameters ---------- x : 1D array See Also -------- set_data set_ydata",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:set_xdata arg:self arg:x arguments arg arg If Call Raise Call Assign Call Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "get_rank",
    "source_code": "def get_rank(group: Optional[ProcessGroup]=None) -> int:\n    if _rank_not_in_group(group):\n        return -1\n    default_pg = _get_default_group()\n    if group is None or group is GroupMember.WORLD:\n        return default_pg.rank()\n    return get_group_rank(group, default_pg.rank())",
    "docstring": "Return the rank of the current process in the provided ``. Args: group (ProcessGroup, optional): The process group to work on. If None, the default process group will be used. Returns: The rank of the process group -1, if not part of the group",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:get_rank arg:group arguments arg If Call Return return:yes Assign Call If BoolOp Compare Compare Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "L1Loss",
    "source_code": "class L1Loss(_Loss):\n    __constants__ = ['reduction']\n\n    def __init__(self, size_average=None, reduce=None, reduction: str='mean') -> None:\n        super().__init__(size_average, reduce, reduction)\n\n    def forward(self, input: Tensor, target: Tensor) -> Tensor:\n        return F.l1_loss(input, target, reduction=self.reduction)",
    "docstring": "Creates a criterion that measures the mean absolute error (MAE) between each element in the input :math: and target :math:. The unreduced (i.e. with :attr: set to `Nreductionmean';}\\\\ \\operatorname{sum}(L), & \\text{if reduction} = \\text{xyNNNreductionsize_averagereducereductionsize_averagereducesize_averagesize_averagereducereduction(*)*(*)reduction(*)`, same shape as the input. Examples: >>> loss = nn.L1Loss() >>> input = torch.randn(3, 5, requires_grad=True) >>> target = torch.randn(3, 5) >>> output = loss(input, target) >>> output.backward()",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\loss.py",
    "ast_data": "ClassDef name:L1Loss Assign FunctionDef name:__init__ arg:self arg:size_average arg:reduce arg:reduction arguments arg arg arg arg Call Call FunctionDef name:forward arg:self arg:input arg:target arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "as_term_coeff",
    "source_code": "def as_term_coeff(obj):\n    if isinstance(obj, Expr):\n        obj = normalize(obj)\n        if obj.op is Op.INTEGER:\n            return (as_integer(1, obj.data[1]), obj.data[0])\n        if obj.op is Op.REAL:\n            return (as_real(1, obj.data[1]), obj.data[0])\n        if obj.op is Op.TERMS:\n            if len(obj.data) == 1:\n                (term, coeff), = obj.data.items()\n                return (term, coeff)\n        if obj.op is Op.APPLY and obj.data[0] is ArithOp.DIV:\n            t, c = as_term_coeff(obj.data[1][0])\n            return (as_apply(ArithOp.DIV, t, obj.data[1][1]), c)\n        return (obj, 1)\n    raise OpError(f'cannot convert {type(obj)} to term and coeff')",
    "docstring": "Return expression as term-coefficient pair.",
    "type": "function",
    "file_path": "numpy\\numpy\\f2py\\symbolic.py",
    "ast_data": "FunctionDef name:as_term_coeff arg:obj arguments arg If Call Assign Call If Compare Return return:yes Call If Compare Return return:yes Call If Compare If Compare Call Assign Call Return return:yes If BoolOp Compare Compare Assign Call Return return:yes Call Return return:yes Raise Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_is_arraylike_not_scalar",
    "source_code": "def _is_arraylike_not_scalar(array):\n    return _is_arraylike(array) and (not np.isscalar(array))",
    "docstring": "Return True if array is array-like and not a scalar",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\validation.py",
    "ast_data": "FunctionDef name:_is_arraylike_not_scalar arg:array arguments arg Return return:yes BoolOp Call Call"
  },
  {
    "library": "tensorflow",
    "name": "call_options",
    "source_code": "def call_options(self):\n    return ConversionOptions(recursive=self.recursive, user_requested=False, internal_convert_user_code=self.recursive, optional_features=self.optional_features)",
    "docstring": "Returns the corresponding options to be used for recursive conversion.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\core\\converter.py",
    "ast_data": "FunctionDef name:call_options arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "can_fast_delete",
    "source_code": "def can_fast_delete(self, *args, **kwargs):\n    return False",
    "docstring": "We always want to load the objects into memory so that we can display them to the user in confirm page.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\utils.py",
    "ast_data": "FunctionDef name:can_fast_delete arg:self arguments arg arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_SparseDenseCwiseMulOrDivGrad",
    "source_code": "def _SparseDenseCwiseMulOrDivGrad(op: ops.Operation, grad, is_mul):\n    x_indices = op.inputs[0]\n    x_shape = op.inputs[2]\n    y = op.inputs[3]\n    y_shape = math_ops.cast(array_ops.shape(y), dtypes.int64)\n    num_added_dims = array_ops.expand_dims(array_ops.size(x_shape) - array_ops.size(y_shape), 0)\n    augmented_y_shape = array_ops.concat([array_ops.ones(num_added_dims, ops.dtypes.int64), y_shape], 0)\n    scaling = x_shape // augmented_y_shape\n    scaled_indices = x_indices // scaling\n    scaled_indices = array_ops.slice(scaled_indices, array_ops.concat([[0], num_added_dims], 0), [-1, -1])\n    dense_vals = array_ops.gather_nd(y, scaled_indices)\n    if is_mul:\n        dx = grad * dense_vals\n        dy_val = grad * op.inputs[1]\n    else:\n        dx = grad / dense_vals\n        dy_val = grad * (-op.inputs[1] / math_ops.square(dense_vals))\n    dy = sparse_ops.sparse_add(array_ops.zeros_like(y), sparse_tensor.SparseTensor(scaled_indices, dy_val, y_shape))\n    return (None, dx, None, dy)",
    "docstring": "Common code for SparseDenseCwise{Mul,Div} gradients.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_grad.py",
    "ast_data": "FunctionDef name:_SparseDenseCwiseMulOrDivGrad arg:op arg:grad arg:is_mul arguments arg arg arg Assign Assign Assign Assign Call Call Assign Call Call Call Assign Call Call Assign Assign Assign Call Call Assign Call If Assign Assign Assign Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "for_fetch",
    "source_code": "@staticmethod\ndef for_fetch(fetch):\n    if fetch is None:\n        raise TypeError(f'Argument `fetch` = {fetch} has invalid type \"{type(fetch).__name__}\". Cannot be None')\n    elif isinstance(fetch, (list, tuple)):\n        return _ListFetchMapper(fetch)\n    elif isinstance(fetch, collections_abc.Mapping):\n        return _DictFetchMapper(fetch)\n    elif _is_attrs_instance(fetch):\n        return _AttrsFetchMapper(fetch)\n    else:\n        for tensor_type, fetch_fn, _, _ in _REGISTERED_EXPANSIONS:\n            if isinstance(fetch, tensor_type):\n                fetches, contraction_fn = fetch_fn(fetch)\n                return _ElementFetchMapper(fetches, contraction_fn)\n    raise TypeError(f'Argument `fetch` = {fetch} has invalid type \"{type(fetch).__name__}\"')",
    "docstring": "Creates fetch mapper that handles the structure of . The default graph must be the one from which we want to fetch values when this function is called. Args: fetch: An arbitrary fetch structure: singleton, list, tuple, namedtuple, or dict. Returns: An instance of a subclass of that handles the shape.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\session.py",
    "ast_data": "FunctionDef name:for_fetch arg:fetch arguments arg If Compare Raise Call Call If Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Call For If Call Assign Call Return return:yes Call Raise Call Call"
  },
  {
    "library": "matplotlib",
    "name": "__call__",
    "source_code": "def __call__(self):\n    return self._elements[self._pos] if self._elements else None",
    "docstring": "Return the current element, or None.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\cbook.py",
    "ast_data": "FunctionDef name:__call__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_joint_log_likelihood",
    "source_code": "def _joint_log_likelihood(self, X):\n    jll = safe_sparse_dot(X, self.feature_log_prob_.T)\n    if len(self.classes_) == 1:\n        jll += self.class_log_prior_\n    return jll",
    "docstring": "Calculate the class scores for the samples in X.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\naive_bayes.py",
    "ast_data": "FunctionDef name:_joint_log_likelihood arg:self arg:X arguments arg arg Assign Call If Compare Call Return return:yes"
  },
  {
    "library": "django",
    "name": "NothingNode",
    "source_code": "class NothingNode:\n    contains_aggregate = False\n    contains_over_clause = False\n\n    def as_sql(self, compiler=None, connection=None):\n        raise EmptyResultSet",
    "docstring": "A node that matches nothing.",
    "type": "class",
    "file_path": "django\\django\\db\\models\\sql\\where.py",
    "ast_data": "ClassDef name:NothingNode Assign Assign FunctionDef name:as_sql arg:self arg:compiler arg:connection arguments arg arg arg Raise"
  },
  {
    "library": "sphinx",
    "name": "convert_source_suffix",
    "source_code": "def convert_source_suffix(app: Sphinx, config: Config) -> None:\n    source_suffix = config.source_suffix\n    if isinstance(source_suffix, str):\n        config.source_suffix = {source_suffix: 'restructuredtext'}\n        logger.info(__('Converting `source_suffix = %r` to `source_suffix = %r`.'), source_suffix, config.source_suffix)\n    elif isinstance(source_suffix, list | tuple):\n        config.source_suffix = dict.fromkeys(source_suffix, 'restructuredtext')\n        logger.info(__('Converting `source_suffix = %r` to `source_suffix = %r`.'), source_suffix, config.source_suffix)\n    elif not isinstance(source_suffix, dict):\n        msg = __(\"The config value `source_suffix' expects a dictionary, a string, or a list of strings. Got `%r' instead (type %s).\")\n        raise ConfigError(msg % (source_suffix, type(source_suffix)))",
    "docstring": "Convert old styled source_suffix to new styled one. * old style: str or list * new style: a dict which maps from fileext to filetype",
    "type": "function",
    "file_path": "sphinx\\sphinx\\config.py",
    "ast_data": "FunctionDef name:convert_source_suffix arg:app arg:config arguments arg arg Assign If Call Assign Call Call If Call Assign Call Call Call If Call Assign Call Raise Call Call"
  },
  {
    "library": "django",
    "name": "token_kwargs",
    "source_code": "def token_kwargs(bits, parser, support_legacy=False):\n    if not bits:\n        return {}\n    match = kwarg_re.match(bits[0])\n    kwarg_format = match and match[1]\n    if not kwarg_format:\n        if not support_legacy:\n            return {}\n        if len(bits) < 3 or bits[1] != 'as':\n            return {}\n    kwargs = {}\n    while bits:\n        if kwarg_format:\n            match = kwarg_re.match(bits[0])\n            if not match or not match[1]:\n                return kwargs\n            key, value = match.groups()\n            del bits[:1]\n        else:\n            if len(bits) < 3 or bits[1] != 'as':\n                return kwargs\n            key, value = (bits[2], bits[0])\n            del bits[:3]\n        kwargs[key] = parser.compile_filter(value)\n        if bits and (not kwarg_format):\n            if bits[0] != 'and':\n                return kwargs\n            del bits[:1]\n    return kwargs",
    "docstring": "Parse token keyword arguments and return a dictionary of the arguments retrieved from the `bitssupport_legacy` to be keyword arguments, so return the dictionary as soon as an invalid argument format is reached.",
    "type": "function",
    "file_path": "django\\django\\template\\base.py",
    "ast_data": "FunctionDef name:token_kwargs arg:bits arg:parser arg:support_legacy arguments arg arg arg If Return return:no Assign Call Assign BoolOp If If Return return:no If BoolOp Compare Call Compare Return return:no Assign While If Assign Call If BoolOp Return return:yes Assign Call If BoolOp Compare Call Compare Return return:yes Assign Assign Call If BoolOp If Compare Return return:yes Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "_SSHFormatSKEd25519",
    "source_code": "class _SSHFormatSKEd25519:\n\n    def load_public(self, data: memoryview) -> tuple[ed25519.Ed25519PublicKey, memoryview]:\n        public_key, data = _lookup_kformat(_SSH_ED25519).load_public(data)\n        _, data = load_application(data)\n        return (public_key, data)\n\n    def get_public(self, data: memoryview) -> typing.NoReturn:\n        raise UnsupportedAlgorithm('sk-ssh-ed25519 private keys cannot be loaded')",
    "docstring": "The format of a sk-ssh-ed25519@openssh.com public key is: string \"sk-ssh-ed25519@openssh.com\" string public key string application (user-specified, but typically \"ssh:\")",
    "type": "class",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\serialization\\ssh.py",
    "ast_data": "ClassDef name:_SSHFormatSKEd25519 FunctionDef name:load_public arg:self arg:data arguments arg arg Assign Call Call Assign Call Return return:yes FunctionDef name:get_public arg:self arg:data arguments arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "nnmodule_has_hooks",
    "source_code": "def nnmodule_has_hooks(mod, check_forward_hooks=False, check_backward_hooks=False, check_state_dict_hooks=False):\n    hooks = nn_module_get_all_hooks(mod, check_forward_hooks=check_forward_hooks, check_backward_hooks=check_backward_hooks, check_state_dict_hooks=check_state_dict_hooks)\n    return bool(hooks)",
    "docstring": "Helper function to check if a module has any hooks attached to it.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\utils.py",
    "ast_data": "FunctionDef name:nnmodule_has_hooks arg:mod arg:check_forward_hooks arg:check_backward_hooks arg:check_state_dict_hooks arguments arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "AddBackpropLoopCounter",
    "source_code": "def AddBackpropLoopCounter(self, count, outer_grad_state):\n    in_separate_functions = count.graph is not ops.get_default_graph()\n    if in_separate_functions:\n        count = array_ops.identity(count)\n    else:\n        one = constant_op.constant(1, name='b_count')\n    self.Enter()\n    self.AddName(count.name)\n    enter_count = _Enter(count, self._name, is_constant=False, parallel_iterations=self._parallel_iterations, name='b_count')\n    self.loop_enters.append(enter_count)\n    merge_count = merge([enter_count, enter_count])[0]\n    self._pivot_for_pred = merge_count\n    if in_separate_functions:\n        one = constant_op.constant(1, name='b_count')\n    pred = math_ops.greater_equal(merge_count, one)\n    self._pivot = loop_cond(pred, name='b_count')\n    switch_count = switch(merge_count, self._pivot)\n    index = math_ops.subtract(switch_count[1], one)\n    self._pivot_for_body = index\n    next_count = _NextIteration(index)\n    merge_count.op._update_input(1, next_count)\n    final_zero = exit(switch_count[0], name='b_count')\n    self.loop_exits.append(final_zero)\n    if outer_grad_state is not None:\n        outer_grad_state.grad_sync._add_control_input(final_zero.op)\n    self.ExitResult([final_zero])\n    self.Exit()\n    return next_count",
    "docstring": "Add the backprop loop that controls the iterations. This is added to the backprop loop. It is used to control the loop termination of the backprop loop. Called in the outer context of this grad context. The pseudocode is: Note that a control dependency is added to to ensure the correct execution order of stack pop ops. Args: count: The number of iterations for backprop. outer_grad_state: The outer grad state. None if not nested. Returns: The loop index.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:AddBackpropLoopCounter arg:self arg:count arg:outer_grad_state arguments arg arg arg Assign Compare Call If Assign Call Assign Call Call Call Assign Call Call Assign Call Assign If Assign Call Assign Call Assign Call Assign Call Assign Call Assign Assign Call Call Assign Call Call If Compare Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_transform_features",
    "source_code": "def _transform_features(features, feature_columns):\n    feature_columns = _normalize_feature_columns(feature_columns)\n    outputs = {}\n    with ops.name_scope(None, default_name='transform_features', values=features.values()):\n        builder = _LazyBuilder(features)\n        for column in sorted(feature_columns, key=lambda x: x.name):\n            with ops.name_scope(None, default_name=column.name):\n                outputs[column] = builder.get(column)\n    return outputs",
    "docstring": "Returns transformed features based on features columns passed in. Please note that most probably you would not need to use this function. Please check and to see whether they will satisfy your use case or not. Example: Args: features: A mapping from key to tensors. s look up via these keys. For example will look at 'price' key in this dict. Values can be a or a depends on corresponding . feature_columns: An iterable containing all the s. Returns: A mapping to and values.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py",
    "ast_data": "FunctionDef name:_transform_features arg:features arg:feature_columns arguments arg arg Assign Call Assign With Call Call Assign Call For Call arguments arg With Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_iter",
    "source_code": "def _iter(self, fitted, column_as_labels, skip_drop, skip_empty_columns):\n    if fitted:\n        transformers = self.transformers_\n    else:\n        transformers = [(name, trans, column) for (name, trans, _), column in zip(self.transformers, self._columns)]\n        if self._remainder[2]:\n            transformers = chain(transformers, [self._remainder])\n    get_weight = (self.transformer_weights or {}).get\n    for name, trans, columns in transformers:\n        if skip_drop and trans == 'drop':\n            continue\n        if skip_empty_columns and _is_empty_column_selection(columns):\n            continue\n        if column_as_labels:\n            columns_is_scalar = np.isscalar(columns)\n            indices = self._transformer_to_input_indices[name]\n            columns = self.feature_names_in_[indices]\n            if columns_is_scalar:\n                columns = columns[0]\n        yield (name, trans, columns, get_weight(name))",
    "docstring": "Generate (name, trans, columns, weight) tuples. Parameters ---------- fitted : bool If True, use the fitted transformers (`` is already fitted. skip_drop : bool If True, 'drop' transformers are filtered out. skip_empty_columns : bool If True, transformers with empty selected columns are filtered out. Yields ------ A generator of tuples containing: - name : the name of the transformer - transformer : the transformer object - columns : the columns for that transformer - weight : the weight of the transformer",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\compose\\_column_transformer.py",
    "ast_data": "FunctionDef name:_iter arg:self arg:fitted arg:column_as_labels arg:skip_drop arg:skip_empty_columns arguments arg arg arg arg arg If Assign Assign Call If Assign Call Assign BoolOp For If BoolOp Compare If BoolOp Call If Assign Call Assign Assign If Assign Call"
  },
  {
    "library": "seaborn",
    "name": "lineplot",
    "source_code": "def lineplot(self, ax, kws):\n    grid, yhat, err_bands = self.fit_regression(ax)\n    edges = (grid[0], grid[-1])\n    fill_color = kws['color']\n    lw = kws.pop('lw', mpl.rcParams['lines.linewidth'] * 1.5)\n    kws.setdefault('linewidth', lw)\n    line, = ax.plot(grid, yhat, **kws)\n    if not self.truncate:\n        line.sticky_edges.x[:] = edges\n    if err_bands is not None:\n        ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=0.15)",
    "docstring": "Draw the model.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\regression.py",
    "ast_data": "FunctionDef name:lineplot arg:self arg:ax arg:kws arguments arg arg arg Assign Call Assign Assign Assign Call Call Assign Call If Assign If Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "_parse_slices",
    "source_code": "def _parse_slices(slicing_string):\n    parsed = []\n    for slice_string in slicing_string[1:-1].split(','):\n        indices = slice_string.split(':')\n        if len(indices) == 1:\n            parsed.append(int(indices[0].strip()))\n        elif 2 <= len(indices) <= 3:\n            parsed.append(slice(*[int(index.strip()) if index.strip() else None for index in indices]))\n        else:\n            raise ValueError('Invalid tensor-slicing string.')\n    return tuple(parsed)",
    "docstring": "Construct a tuple of slices from the slicing string. The string must be a valid slicing string. Args: slicing_string: (str) Input slicing string to be parsed. Returns: tuple(slice1, slice2, ...) Raises: ValueError: If tensor_slicing is not a valid numpy ndarray slicing str.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\command_parser.py",
    "ast_data": "FunctionDef name:_parse_slices arg:slicing_string arguments arg Assign For Call Assign Call If Compare Call Call Call Call If Compare Call Call Call Call Call Call Raise Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "MonteCarloMethod",
    "source_code": "@dataclass\nclass MonteCarloMethod(ResamplingMethod):\n    rvs: object = None\n    rng: object = None\n\n    def __init__(self, n_resamples=9999, batch=None, rvs=None, rng=None):\n        if rvs is not None and rng is not None:\n            message = 'Use of `rvs` and `rng` are mutually exclusive.'\n            raise ValueError(message)\n        self.n_resamples = n_resamples\n        self.batch = batch\n        self.rvs = rvs\n        self.rng = rng\n\n    def _asdict(self):\n        return dict(n_resamples=self.n_resamples, batch=self.batch, rvs=self.rvs, rng=self.rng)",
    "docstring": "Configuration information for a Monte Carlo hypothesis test. Instances of this class can be passed into the parameter of some hypothesis test functions to perform a Monte Carlo version of the hypothesis tests. Attributes ---------- n_resamples : int, optional The number of Monte Carlo samples to draw. Default is 9999. batch : int, optional The number of Monte Carlo samples to process in each vectorized call to the statistic. Batch sizes >>1 tend to be faster when the statistic is vectorized, but memory usage scales linearly with the batch size. Default is `rvsrvsrvsMonteCarloMethodscipy.stats.pearsonrnumpy.random.Generatorrngnumpy.random.Generatornumpy.random.Generatornumpy.random.default_rng`.",
    "type": "class",
    "file_path": "scipy\\scipy\\stats\\_resampling.py",
    "ast_data": "ClassDef name:MonteCarloMethod FunctionDef name:__init__ arg:self arg:n_resamples arg:batch arg:rvs arg:rng arguments arg arg arg arg arg If BoolOp Compare Compare Assign Raise Call Assign Assign Assign Assign FunctionDef name:_asdict arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "NormalsToRgb255",
    "source_code": "class NormalsToRgb255(Module):\n\n    def forward(self, image: Tensor) -> Tensor:\n        return normals_to_rgb255(image)",
    "docstring": "Convert surface normals to RGB [0, 255] for visualization purposes. Returns: RGB version of the image. Shape: - image: :math: - output: :math: Example: >>> input = torch.rand(2, 3, 4, 5) >>> rgb = NormalsToRgb255() >>> output = rgb(input) # 2x3x4x5",
    "type": "class",
    "file_path": "kornia\\kornia\\color\\rgb.py",
    "ast_data": "ClassDef name:NormalsToRgb255 FunctionDef name:forward arg:self arg:image arguments arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "validate_tensor",
    "source_code": "def validate_tensor(self, input: Tensor) -> None:\n    raise NotImplementedError",
    "docstring": "Check if the input tensor is formatted as expected.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\base.py",
    "ast_data": "FunctionDef name:validate_tensor arg:self arg:input arguments arg arg Raise"
  },
  {
    "library": "pytorch",
    "name": "forward",
    "source_code": "@torch.no_grad()\ndef forward(self, outputs, targets):\n    bs, num_queries = outputs['pred_logits'].shape[:2]\n    out_prob = outputs['pred_logits'].flatten(0, 1).softmax(-1)\n    out_bbox = outputs['pred_boxes'].flatten(0, 1)\n    tgt_ids = torch.cat([v['labels'] for v in targets])\n    tgt_bbox = torch.cat([v['boxes'] for v in targets])\n    cost_class = -out_prob[:, tgt_ids]\n    cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1)\n    cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox))\n    C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou\n    C = C.view(bs, num_queries, -1).cpu()\n    sizes = [len(v['boxes']) for v in targets]\n    if not scipy_available:\n        raise RuntimeError(\"The 'detr' model requires scipy to run. Please make sure you have it installed if you enable the 'detr' model.\")\n    indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))]\n    return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]",
    "docstring": "Performs the matching Params: outputs: This is a dict that contains at least these entries: \"pred_logits\": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits \"pred_boxes\": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing: \"labels\": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth objects in the target) containing the class labels \"boxes\": Tensor of dim [num_target_boxes, 4] containing the target box coordinates Returns: A list of size batch_size, containing tuples of (index_i, index_j) where: - index_i is the indices of the selected predictions (in order) - index_j is the indices of the corresponding selected targets (in order) For each batch element, it holds: len(index_i) = len(index_j) = min(num_queries, num_target_boxes)",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\functional_autograd_benchmark\\torchvision_models.py",
    "ast_data": "FunctionDef name:forward arg:self arg:outputs arg:targets arguments arg arg arg Assign Assign Call Call Assign Call Assign Call Assign Call Assign Assign Call Assign Call Call Call Assign Assign Call Call Assign Call If Raise Call Assign Call Call Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "conv3d",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef conv3d(x, kernel, strides=(1, 1, 1), padding='valid', data_format=None, dilation_rate=(1, 1, 1)):\n    if data_format is None:\n        data_format = image_data_format()\n    if data_format not in {'channels_first', 'channels_last'}:\n        raise ValueError('Unknown data_format: ' + str(data_format))\n    x, tf_data_format = _preprocess_conv3d_input(x, data_format)\n    padding = _preprocess_padding(padding)\n    x = nn.convolution(input=x, filter=kernel, dilation_rate=dilation_rate, strides=strides, padding=padding, data_format=tf_data_format)\n    if data_format == 'channels_first' and tf_data_format == 'NDHWC':\n        x = array_ops.transpose(x, (0, 4, 1, 2, 3))\n    return x",
    "docstring": "3D convolution. Args: x: Tensor or variable. kernel: kernel tensor. strides: strides tuple. padding: string, or . data_format: string, or . dilation_rate: tuple of 3 integers. Returns: A tensor, result of 3D convolution. Raises: ValueError: if is neither or .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:conv3d arg:x arg:kernel arg:strides arg:padding arg:data_format arg:dilation_rate arguments arg arg arg arg arg arg If Compare Assign Call If Compare Raise Call Call Assign Call Assign Call Assign Call If BoolOp Compare Compare Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_delimited_splitter",
    "source_code": "def _delimited_splitter(self, line):\n    if self.comments is not None:\n        line = line.split(self.comments)[0]\n    line = line.strip(' \\r\\n')\n    if not line:\n        return []\n    return line.split(self.delimiter)",
    "docstring": "Chop off comments, strip, and split at delimiter.",
    "type": "method",
    "file_path": "numpy\\numpy\\lib\\_iotools.py",
    "ast_data": "FunctionDef name:_delimited_splitter arg:self arg:line arguments arg arg If Compare Assign Call Assign Call If Return return:no Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_worker_info",
    "source_code": "def get_worker_info() -> Optional[WorkerInfo]:\n    return _worker_info",
    "docstring": "Returns the information about the current :class: iterator worker process. When called in a worker, this returns an object guaranteed to have the following attributes: * :attr:: the current worker id. * :attr:: the total number of workers. * :attr:: the random seed set for the current worker. This value is determined by main process RNG and the worker id. See :class:'s documentation for more details. * :attr:: the copy of the dataset object in **this** process. Note that this will be a different object in a different process than the one in the main process. When called in the main process, this returns `worker_init_fn~torch.utils.data.DataLoader` to seed other libraries used in dataset code.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\data\\_utils\\worker.py",
    "ast_data": "FunctionDef name:get_worker_info arguments Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "algorithm",
    "source_code": "@property\n@abc.abstractmethod\ndef algorithm(self) -> asym_utils.Prehashed | hashes.HashAlgorithm:\n    pass",
    "docstring": "The digest algorithm used with this signature.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ec.py",
    "ast_data": "FunctionDef name:algorithm arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "_extract_dist_info",
    "source_code": "def _extract_dist_info(self) -> None:\n    process_group = None\n    device = None\n    for joinable in self._joinables:\n        if process_group is None:\n            process_group = joinable.join_process_group\n        elif process_group != joinable.join_process_group:\n            raise ValueError('Using join context manager with multiple process groups')\n        if device is None:\n            device = joinable.join_device\n    self._process_group = process_group\n    self._rank = dist.get_rank(self._process_group)\n    self._device = device",
    "docstring": "Extract the process group and device information from the joinables. If there are multiple joinables, then the context manager uses the first specified device. Preconditions: `` objects.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\join.py",
    "ast_data": "FunctionDef name:_extract_dist_info arg:self arguments arg Assign Assign For If Compare Assign If Compare Raise Call If Compare Assign Assign Assign Call Assign"
  },
  {
    "library": "scikit-learn",
    "name": "StationaryKernelMixin",
    "source_code": "class StationaryKernelMixin:\n\n    def is_stationary(self):\n        return True",
    "docstring": "Mixin for kernels which are stationary: k(X, Y)= f(X-Y). .. versionadded:: 0.18",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "ClassDef name:StationaryKernelMixin FunctionDef name:is_stationary arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "ThermistorResistance",
    "source_code": "class ThermistorResistance(LSQBenchmarkProblem):\n    INITIAL_GUESSES = [np.array([0.02, 4000.0, 250.0])]\n\n    def __init__(self, x0_ind):\n        super().__init__(3, 16, 87.94585, x0_ind)\n        self.t = 5 + 45 * (1 + np.arange(self.m, dtype=float))\n        self.y = np.array([34780.0, 28610.0, 23650.0, 19630.0, 16370.0, 13720.0, 11540.0, 9744.0, 8261.0, 7030.0, 6005.0, 5147.0, 4427.0, 3820.0, 3307.0, 2872.0])\n\n    def fun(self, x):\n        return x[0] * np.exp(x[1] / (self.t + x[2])) - self.y\n\n    def jac(self, x):\n        J = np.empty((self.m, self.n))\n        e = np.exp(x[1] / (self.t + x[2]))\n        J[:, 0] = e\n        J[:, 1] = x[0] / (self.t + x[2]) * e\n        J[:, 2] = -x[0] * x[1] * (self.t + x[2]) ** (-2) * e\n        return J",
    "docstring": "The problem of fitting thermistor parameters to data, [1]_. Number of variables --- 3, number of residuals --- 16, no bounds. .. [1] Brett M. Averick et al. \"The MINPACK-2 Test Problem Collection\", p. 28",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\lsq_problems.py",
    "ast_data": "ClassDef name:ThermistorResistance Assign Call FunctionDef name:__init__ arg:self arg:x0_ind arguments arg arg Call Call Assign Call Assign Call FunctionDef name:fun arg:self arg:x arguments arg arg Return return:yes Call FunctionDef name:jac arg:self arg:x arguments arg arg Assign Call Assign Call Assign Assign Assign Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "append_epilog",
    "source_code": "def append_epilog(content: StringList, epilog: str) -> None:\n    if epilog:\n        if len(content) > 0:\n            source, lineno = content.info(-1)\n            lineno = cast('int', lineno)\n        else:\n            source = '<generated>'\n            lineno = 0\n        content.append('', source, lineno + 1)\n        for lineno, line in enumerate(epilog.splitlines()):\n            content.append(line, '<rst_epilog>', lineno)",
    "docstring": "Append a string to content body as epilog.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\rst.py",
    "ast_data": "FunctionDef name:append_epilog arg:content arg:epilog arguments arg arg If If Compare Call Assign Call Assign Call Assign Assign Call For Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "add_pr_curve_raw",
    "source_code": "def add_pr_curve_raw(self, tag, true_positive_counts, false_positive_counts, true_negative_counts, false_negative_counts, precision, recall, global_step=None, num_thresholds=127, weights=None, walltime=None):\n    torch._C._log_api_usage_once('tensorboard.logging.add_pr_curve_raw')\n    self._get_file_writer().add_summary(pr_curve_raw(tag, true_positive_counts, false_positive_counts, true_negative_counts, false_negative_counts, precision, recall, num_thresholds, weights), global_step, walltime)",
    "docstring": "Add precision recall curve with raw data. Args: tag (str): Data identifier true_positive_counts (torch.Tensor, numpy.ndarray, or string/blobname): true positive counts false_positive_counts (torch.Tensor, numpy.ndarray, or string/blobname): false positive counts true_negative_counts (torch.Tensor, numpy.ndarray, or string/blobname): true negative counts false_negative_counts (torch.Tensor, numpy.ndarray, or string/blobname): false negative counts precision (torch.Tensor, numpy.ndarray, or string/blobname): precision recall (torch.Tensor, numpy.ndarray, or string/blobname): recall global_step (int): Global step value to record num_thresholds (int): Number of thresholds used to draw the curve. walltime (float): Optional override default walltime (time.time()) seconds after epoch of event see:",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\tensorboard\\writer.py",
    "ast_data": "FunctionDef name:add_pr_curve_raw arg:self arg:tag arg:true_positive_counts arg:false_positive_counts arg:true_negative_counts arg:false_negative_counts arg:precision arg:recall arg:global_step arg:num_thresholds arg:weights arg:walltime arguments arg arg arg arg arg arg arg arg arg arg arg arg Call Call Call Call"
  },
  {
    "library": "django",
    "name": "max",
    "source_code": "@property\ndef max(self):\n    return self.statistics()[1]",
    "docstring": "Return the maximum pixel value for this band.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\raster\\band.py",
    "ast_data": "FunctionDef name:max arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_preprocess_for_cut",
    "source_code": "def _preprocess_for_cut(x) -> Index:\n    ndim = getattr(x, 'ndim', None)\n    if ndim is None:\n        x = np.asarray(x)\n    if x.ndim != 1:\n        raise ValueError('Input array must be 1 dimensional')\n    return Index(x)",
    "docstring": "handles preprocessing for cut where we convert passed input to array, strip the index information and store it separately",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\reshape\\tile.py",
    "ast_data": "FunctionDef name:_preprocess_for_cut arg:x arguments arg Assign Call If Compare Assign Call If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "safe_join",
    "source_code": "def safe_join(base, *paths):\n    final_path = abspath(join(base, *paths))\n    base_path = abspath(base)\n    if not normcase(final_path).startswith(normcase(base_path + sep)) and normcase(final_path) != normcase(base_path) and (dirname(normcase(base_path)) != normcase(base_path)):\n        raise SuspiciousFileOperation('The joined path ({}) is located outside of the base path component ({})'.format(final_path, base_path))\n    return final_path",
    "docstring": "Join one or more path components to the base path component intelligently. Return a normalized, absolute version of the final path. Raise SuspiciousFileOperation if the final path isn't located inside of the base path component.",
    "type": "function",
    "file_path": "django\\django\\utils\\_os.py",
    "ast_data": "FunctionDef name:safe_join arg:base arguments arg arg Assign Call Call Assign Call If BoolOp Call Call Call Compare Call Call Compare Call Call Call Raise Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "copy_tensor",
    "source_code": "def copy_tensor(composite_tensor_obj):\n    if isinstance(composite_tensor_obj, input_lib.DistributedIterator):\n        return composite_tensor_obj\n    with ops.device('/job:%s' % context.get_server_def().job_name):\n        return array_ops.identity(composite_tensor_obj)",
    "docstring": "Copy a remote tensor to local (coordinator).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\values.py",
    "ast_data": "FunctionDef name:copy_tensor arg:composite_tensor_obj arguments arg If Call Return return:yes With Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_ConversionContext",
    "source_code": "class _ConversionContext(enum.Enum):\n    VALUE = 1\n    SPEC = 2\n    DEFAULT = 3",
    "docstring": "Enum to indicate what kind of value is being converted. Used by and and their helper methods.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type_field.py",
    "ast_data": "ClassDef name:_ConversionContext Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "save",
    "source_code": "def save(self, representative_dataset: RepresentativeDatasetMapping) -> Mapping[str, _RepresentativeDatasetFile]:\n    raise NotImplementedError('Method \"save\" is not implemented.')",
    "docstring": "Saves the representative dataset. Args: representative_dataset: RepresentativeDatasetMapping which is a signature_def_key -> representative dataset mapping.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\representative_dataset.py",
    "ast_data": "FunctionDef name:save arg:self arg:representative_dataset arguments arg arg Raise Call"
  },
  {
    "library": "sphinx",
    "name": "_make_id",
    "source_code": "def _make_id(string: str) -> str:\n    id = string.translate(_non_id_translate_digraphs)\n    id = id.translate(_non_id_translate)\n    id = unicodedata.normalize('NFKD', id).encode('ascii', 'ignore').decode('ascii')\n    id = _non_id_chars.sub('-', ' '.join(id.split()))\n    id = _non_id_at_ends.sub('', id)\n    return str(id)",
    "docstring": "Convert into an identifier and return it. This function is a modified version of `` of docutils-0.16. Changes: * Allow to use capital alphabet characters * Allow to use dots (\".\") and underscores (\"_\") for an identifier without a leading character. # Author: David Goodger # Maintainer: docutils-develop@lists.sourceforge.net # Copyright: This module has been placed in the public domain.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\nodes.py",
    "ast_data": "FunctionDef name:_make_id arg:string arguments arg Assign Call Assign Call Assign Call Call Call Assign Call Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "hasz",
    "source_code": "@property\ndef hasz(self):\n    return self._z",
    "docstring": "Return whether this coordinate sequence is 3D. This property value is inherited from the parent Geometry.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\coordseq.py",
    "ast_data": "FunctionDef name:hasz arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "SharedResource",
    "source_code": "class SharedResource:\n\n    def __init__(self, is_debug_mode: bool=False) -> None:\n        self._data_list: list[UsageData] = []\n        self._data_errors: list[str] = []\n        self._data_logs: list[str] = []\n        self._lock = threading.Lock()\n\n    def get_and_reset(self) -> tuple[list[UsageData], list[str], list[str]]:\n        copy_data = []\n        copy_errors = []\n        copy_logs = []\n        with self._lock:\n            copy_data = copy.deepcopy(self._data_list)\n            copy_errors = copy.deepcopy(self._data_errors)\n            copy_logs = copy.deepcopy(self._data_logs)\n            self._data_list.clear()\n            self._data_errors.clear()\n            self._data_logs.clear()\n        return (copy_data, copy_errors, copy_logs)\n\n    def add_data(self, data: UsageData) -> None:\n        with self._lock:\n            self._data_list.append(data)\n\n    def add_error(self, error: Exception) -> None:\n        with self._lock:\n            self._data_errors.append(str(error))\n\n    def add_log(self, log: str) -> None:\n        with self._lock:\n            print('here log')\n            self._data_logs.append(log)",
    "docstring": "thread-safe utils for shared resources used in both worker processor and main processor during UsageLogger. It collects the usage data or errors from the worker processor, and output the aggregated data or errors to the main processor for logging.",
    "type": "class",
    "file_path": "pytorch\\tools\\stats\\monitor.py",
    "ast_data": "ClassDef name:SharedResource FunctionDef name:__init__ arg:self arg:is_debug_mode arguments arg arg Assign Call FunctionDef name:get_and_reset arg:self arguments arg Assign Assign Assign With Assign Call Assign Call Assign Call Call Call Call Return return:yes FunctionDef name:add_data arg:self arg:data arguments arg arg With Call FunctionDef name:add_error arg:self arg:error arguments arg arg With Call Call FunctionDef name:add_log arg:self arg:log arguments arg arg With Call Call"
  },
  {
    "library": "matplotlib",
    "name": "PatchCollection",
    "source_code": "class PatchCollection(Collection):\n\n    def __init__(self, patches, *, match_original=False, **kwargs):\n        if match_original:\n\n            def determine_facecolor(patch):\n                if patch.get_fill():\n                    return patch.get_facecolor()\n                return [0, 0, 0, 0]\n            kwargs['facecolors'] = [determine_facecolor(p) for p in patches]\n            kwargs['edgecolors'] = [p.get_edgecolor() for p in patches]\n            kwargs['linewidths'] = [p.get_linewidth() for p in patches]\n            kwargs['linestyles'] = [p.get_linestyle() for p in patches]\n            kwargs['antialiaseds'] = [p.get_antialiased() for p in patches]\n        super().__init__(**kwargs)\n        self.set_paths(patches)\n\n    def set_paths(self, patches):\n        paths = [p.get_transform().transform_path(p.get_path()) for p in patches]\n        self._paths = paths",
    "docstring": "A generic collection of patches. PatchCollection draws faster than a large number of equivalent individual Patches. It also makes it easier to assign a colormap to a heterogeneous collection of patches.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "ClassDef name:PatchCollection FunctionDef name:__init__ arg:self arg:patches arguments arg arg arg arg If FunctionDef name:determine_facecolor arg:patch arguments arg If Call Return return:yes Call Return return:yes Assign Call Assign Call Assign Call Assign Call Assign Call Call Call Call FunctionDef name:set_paths arg:self arg:patches arguments arg arg Assign Call Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "update_state",
    "source_code": "def update_state(self, data):\n    raise NotImplementedError",
    "docstring": "Accumulates statistics for the preprocessing layer. Arguments: data: A mini-batch of inputs to the layer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_preprocessing_layer.py",
    "ast_data": "FunctionDef name:update_state arg:self arg:data arguments arg arg Raise"
  },
  {
    "library": "scipy",
    "name": "splprep",
    "source_code": "def splprep(x, w=None, u=None, ub=None, ue=None, k=3, task=0, s=None, t=None, full_output=0, nest=None, per=0, quiet=1):\n    res = _impl.splprep(x, w, u, ub, ue, k, task, s, t, full_output, nest, per, quiet)\n    return res",
    "docstring": "Find the B-spline representation of an N-D curve. .. legacy:: function Specifically, we recommend using in new code. Given a list of N rank-1 arrays, , which represent a curve in N-dimensional space parametrized by , find a smooth approximating spline curve g(). Uses the FORTRAN routine parcur from FITPACK. Parameters ---------- x : array_like A list of sample vector arrays representing the curve. w : array_like, optional Strictly positive rank-1 array of weights the same length as . The weights are used in computing the weighted least-squares spline fit. If the errors in the values have standard-deviation given by the vector d, then should be 1/d. Default is `x[i]x[i-1]k`, is generated automatically. Now plot the result: >>> import matplotlib.pyplot as plt >>> fig, ax = plt.subplots() >>> ax.plot(x, y, 'ro') >>> ax.plot(new_points[0], new_points[1], 'r-') >>> plt.show()",
    "type": "function",
    "file_path": "scipy\\scipy\\interpolate\\_fitpack_py.py",
    "ast_data": "FunctionDef name:splprep arg:x arg:w arg:u arg:ub arg:ue arg:k arg:task arg:s arg:t arg:full_output arg:nest arg:per arg:quiet arguments arg arg arg arg arg arg arg arg arg arg arg arg arg Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "pyrex_sources",
    "source_code": "def pyrex_sources(self, sources, extension):\n    new_sources = []\n    ext_name = extension.name.split('.')[-1]\n    for source in sources:\n        base, ext = os.path.splitext(source)\n        if ext == '.pyx':\n            target_file = self.generate_a_pyrex_source(base, ext_name, source, extension)\n            new_sources.append(target_file)\n        else:\n            new_sources.append(source)\n    return new_sources",
    "docstring": "Pyrex not supported; this remains for Cython support (see below)",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\command\\build_src.py",
    "ast_data": "FunctionDef name:pyrex_sources arg:self arg:sources arg:extension arguments arg arg arg Assign Assign Call For Assign Call If Compare Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_lower_get_tensor_info_op",
    "source_code": "def _lower_get_tensor_info_op(model: GraphModule):\n    for n in model.graph.nodes:\n        if not is_get_tensor_info_node(n):\n            continue\n        maybe_dq = n.args[0]\n        if maybe_dq.op != 'call_method' or maybe_dq.target != 'dequantize':\n            continue\n        args = list(n.args)\n        args[0] = n.args[0].args[0]\n        n.args = tuple(args)",
    "docstring": "Modified the graph of the model inplace, to skip extra dequantize op before the general tensor shape ops when possible",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_lower_to_native_backend.py",
    "ast_data": "FunctionDef name:_lower_get_tensor_info_op arg:model arguments arg For If Call Assign If BoolOp Compare Compare Assign Call Assign Assign Call"
  },
  {
    "library": "kornia",
    "name": "project",
    "source_code": "def project(self, point_3d: Tensor) -> Tensor:\n    P = self.intrinsics @ self.extrinsics\n    return convert_points_from_homogeneous(transform_points(P, point_3d))",
    "docstring": "Project a 3d point in world coordinates onto the 2d camera plane. Args: point_3d: tensor containing the 3d points to be projected to the camera plane. The shape of the tensor can be :math:. Returns: tensor of (u, v) cam coordinates with shape :math:. Example: >>> _ = torch.manual_seed(0) >>> X = torch.rand(1, 3) >>> K = torch.eye(4)[None] >>> E = torch.eye(4)[None] >>> h = torch.ones(1) >>> w = torch.ones(1) >>> pinhole = kornia.geometry.camera.PinholeCamera(K, E, h, w) >>> pinhole.project(X) tensor([[5.6088, 8.6827]])",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\camera\\pinhole.py",
    "ast_data": "FunctionDef name:project arg:self arg:point_3d arguments arg arg Assign Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "tosparse",
    "source_code": "def tosparse(self):\n    N = len(self.grid_shape)\n    p = np.prod(self.grid_shape)\n    L = dia_array((p, p), dtype=np.int8)\n    for i in range(N):\n        dim = self.grid_shape[i]\n        data = np.ones([3, dim], dtype=np.int8)\n        data[1, :] *= -2\n        if self.boundary_conditions == 'neumann':\n            data[1, 0] = -1\n            data[1, -1] = -1\n        L_i = dia_array((data, [-1, 0, 1]), shape=(dim, dim), dtype=np.int8)\n        if self.boundary_conditions == 'periodic':\n            t = dia_array((dim, dim), dtype=np.int8)\n            t.setdiag([1], k=-dim + 1)\n            t.setdiag([1], k=dim - 1)\n            L_i += t\n        for j in range(i):\n            L_i = kron(eye_array(self.grid_shape[j], dtype=np.int8), L_i)\n        for j in range(i + 1, N):\n            L_i = kron(L_i, eye_array(self.grid_shape[j], dtype=np.int8))\n        L += L_i\n    return L.astype(self.dtype)",
    "docstring": "Constructs a sparse array from the Laplacian data. The returned sparse array format is dependent on the selected boundary conditions. Returns ------- L : scipy.sparse.sparray The shape is ``.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_special_sparse_arrays.py",
    "ast_data": "FunctionDef name:tosparse arg:self arguments arg Assign Call Assign Call Assign Call For Call Assign Assign Call If Compare Assign Assign Assign Call If Compare Assign Call Call Call For Call Assign Call Call For Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "sokalmichener",
    "source_code": "@_deprecated_sokalmichener\ndef sokalmichener(u, v, w=None):\n    u = _validate_vector(u)\n    v = _validate_vector(v)\n    if w is not None:\n        w = _validate_weights(w)\n    nff, nft, ntf, ntt = _nbool_correspond_all(u, v, w=w)\n    return float(2.0 * (ntf + nft)) / float(ntt + nff + 2.0 * (ntf + nft))",
    "docstring": "Compute the Sokal-Michener dissimilarity between two boolean 1-D arrays. .. deprecated:: 1.15.0 This function is deprecated and will be removed in SciPy 1.17.0. Replace usage of `uvc_{ij}\\mathtt{u[k]} = i\\mathtt{v[k]} = jk >> from scipy.spatial import distance >>> distance.sokalmichener([1, 0, 0], [0, 1, 0]) 0.8 >>> distance.sokalmichener([1, 0, 0], [1, 1, 0]) 0.5 >>> distance.sokalmichener([1, 0, 0], [2, 0, 0]) -1.0",
    "type": "function",
    "file_path": "scipy\\scipy\\spatial\\distance.py",
    "ast_data": "FunctionDef name:sokalmichener arg:u arg:v arg:w arguments arg arg arg Assign Call Assign Call If Compare Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "identity_matrix",
    "source_code": "def identity_matrix(self, input: Tensor) -> Tensor:\n    return eye_like(3, input)",
    "docstring": "Return identity matrix.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\auto\\operations\\policy.py",
    "ast_data": "FunctionDef name:identity_matrix arg:self arg:input arguments arg arg Return return:yes Call"
  },
  {
    "library": "pygame",
    "name": "copy",
    "source_code": "def copy(self):\n    return self.__class__(self.sprites())",
    "docstring": "copy a group with all the same sprites Group.copy(): return Group Returns a copy of the group that is an instance of the same class and has the same sprites in it.",
    "type": "method",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:copy arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "ChunkStorageMetadata",
    "source_code": "@dataclass\nclass ChunkStorageMetadata:\n    offsets: torch.Size\n    sizes: torch.Size",
    "docstring": "Each chunk is expected to have the same properties of the TensorStorageMetadata that includes it.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\metadata.py",
    "ast_data": "ClassDef name:ChunkStorageMetadata"
  },
  {
    "library": "tensorflow",
    "name": "_ragged_gather_grad",
    "source_code": "@ops.RegisterGradient('RaggedGather')\ndef _ragged_gather_grad(op, *grads):\n    param_nested_splits = op.inputs[:-2]\n    param_inner_values = op.inputs[-2]\n    indices = op.inputs[-1]\n    grad_inner_values = grads[-1]\n    combined_splits = param_nested_splits[0]\n    for row_splits in param_nested_splits[1:]:\n        combined_splits = array_ops.gather(row_splits, combined_splits)\n    flat_indices = array_ops.reshape(indices, [-1])\n    grad_indices = ragged_math_ops.range(array_ops.gather(combined_splits, flat_indices), array_ops.gather(combined_splits[1:], flat_indices)).values\n    param_inner_values_grad = indexed_slices.IndexedSlices(values=grad_inner_values, indices=grad_indices, dense_shape=array_ops.shape(param_inner_values))\n    return [None for _ in param_nested_splits] + [param_inner_values_grad, None]",
    "docstring": "Gradient for RaggedGather op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_gather_ops.py",
    "ast_data": "FunctionDef name:_ragged_gather_grad arg:op arguments arg arg Assign Assign Assign Assign Assign For Assign Call Assign Call Assign Call Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "is_django_path",
    "source_code": "def is_django_path(path):\n    return Path(django.__file__).parent in Path(path).parents",
    "docstring": "Return True if the given file path is nested under Django.",
    "type": "function",
    "file_path": "django\\django\\utils\\autoreload.py",
    "ast_data": "FunctionDef name:is_django_path arg:path arguments arg Return return:yes Compare Call Call"
  },
  {
    "library": "pandas",
    "name": "period_range",
    "source_code": "def period_range(start=None, end=None, periods: int | None=None, freq=None, name: Hashable | None=None) -> PeriodIndex:\n    if com.count_not_none(start, end, periods) != 2:\n        raise ValueError('Of the three parameters: start, end, and periods, exactly two must be specified')\n    if freq is None and (not isinstance(start, Period) and (not isinstance(end, Period))):\n        freq = 'D'\n    data, freq = PeriodArray._generate_range(start, end, periods, freq)\n    dtype = PeriodDtype(freq)\n    data = PeriodArray(data, dtype=dtype)\n    return PeriodIndex(data, name=name)",
    "docstring": "Return a fixed frequency PeriodIndex. The day (calendar) is the default frequency. Parameters ---------- start : str, datetime, date, pandas.Timestamp, or period-like, default None Left bound for generating periods. end : str, datetime, date, pandas.Timestamp, or period-like, default None Right bound for generating periods. periods : int, default None Number of periods to generate. freq : str or DateOffset, optional Frequency alias. By default the freq is taken from or if those are Period objects. Otherwise, the default is `this link` constructor. >>> pd.period_range( ... start=pd.Period(\"2017Q1\", freq=\"Q\"), ... end=pd.Period(\"2017Q2\", freq=\"Q\"), ... freq=\"M\", ... ) PeriodIndex(['2017-03', '2017-04', '2017-05', '2017-06'], dtype='period[M]')",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\indexes\\period.py",
    "ast_data": "FunctionDef name:period_range arg:start arg:end arg:periods arg:freq arg:name arguments arg arg arg arg arg If Compare Call Raise Call If BoolOp Compare BoolOp Call Call Assign Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "SubprocException",
    "source_code": "class SubprocException(Exception):\n\n    def __init__(self, details: str) -> None:\n        super().__init__(f'An exception occurred in a subprocess:\\n\\n{details}')",
    "docstring": "Thrown when a job in a subprocess raises an Exception.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\compile_worker\\subproc_pool.py",
    "ast_data": "ClassDef name:SubprocException FunctionDef name:__init__ arg:self arg:details arguments arg arg Call Call"
  },
  {
    "library": "authlib",
    "name": "deserialize",
    "source_code": "def deserialize(self, obj, key, decode=None, sender_key=None):\n    if isinstance(obj, dict):\n        return self.deserialize_json(obj, key, decode, sender_key)\n    obj = to_bytes(obj)\n    if obj.startswith(b'{') and obj.endswith(b'}'):\n        return self.deserialize_json(obj, key, decode, sender_key)\n    return self.deserialize_compact(obj, key, decode, sender_key)",
    "docstring": "Extract a JWE Serialization. It supports both compact and JSON serialization. :param obj: JWE compact serialization as bytes or JWE JSON serialization as dict or str :param key: Private key used to decrypt payload (optionally can be a tuple of kid and essentially key) :param decode: Function to decode payload data :param sender_key: Sender's public key in case JWEAlgorithmWithTagAwareKeyAgreement is used :return: dict with and keys",
    "type": "method",
    "file_path": "authlib\\authlib\\jose\\rfc7516\\jwe.py",
    "ast_data": "FunctionDef name:deserialize arg:self arg:obj arg:key arg:decode arg:sender_key arguments arg arg arg arg arg If Call Return return:yes Call Assign Call If BoolOp Call Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "MemoryEstimate",
    "source_code": "@dataclasses.dataclass\nclass MemoryEstimate:\n    reads: dict[str, OrderedSet[MemoryDep]] = dataclasses.field(default_factory=functools.partial(collections.defaultdict, OrderedSet))\n    writes: dict[str, OrderedSet[MemoryDep]] = dataclasses.field(default_factory=functools.partial(collections.defaultdict, OrderedSet))\n\n    def remove(self, name: str) -> None:\n        self.reads.pop(name, None)\n        self.writes.pop(name, None)\n\n    def __bool__(self) -> bool:\n        return bool(self.reads or self.writes)\n\n    def __repr__(self) -> str:\n        return f'MemoryEstimate(\\n            reads={[*itertools.chain.from_iterable(self.reads.values())]!r},\\n            writes={[*itertools.chain.from_iterable(self.writes.values())]!r}\\n        )'",
    "docstring": "Tracks the memory usage of a single loop in the generated kernel",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\simd_kernel_features.py",
    "ast_data": "ClassDef name:MemoryEstimate Call Call Call Call FunctionDef name:remove arg:self arg:name arguments arg arg Call Call FunctionDef name:__bool__ arg:self arguments arg Return return:yes Call BoolOp FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "freeze",
    "source_code": "def freeze(dynamo_gm: torch.fx.GraphModule, aot_autograd_gm: torch.fx.GraphModule, example_inputs: list[torch._subclasses.FakeTensor]) -> tuple[torch.fx.GraphModule, list[int]]:\n    with enter_freezing():\n        return _freeze(dynamo_gm, aot_autograd_gm, example_inputs)",
    "docstring": "Inlines parameters that are not mutated into constants and optimizes the graph through constant propagation and other techniques. If enabled, the function also discards the original parameters of the module for memory efficiency. Assumes that this function is run in dynamo tracing post aot_autograd. Args: dynamo_gm (torch.fx.GraphModule): The Dynamo constructed GraphModule. aot_autograd_gm (torch.fx.GraphModule): The aot_autograd constructed GraphModule to be frozen. example_inputs (List[torch.Tensor]): A list of example input tensors to be used in the freezing process. Returns: Tuple[torch.fx.GraphModule, List[int]]: A tuple containing the frozen GraphModule and a list of indices of the inputs that were preserved (not turned into constants).",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\freezing.py",
    "ast_data": "FunctionDef name:freeze arg:dynamo_gm arg:aot_autograd_gm arg:example_inputs arguments arg arg arg With Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, byhour=None, interval=1, tz=None):\n    if byhour is None:\n        byhour = range(24)\n    rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval, byminute=0, bysecond=0)\n    super().__init__(rule, tz=tz)",
    "docstring": "Parameters ---------- byhour : int or list of int, default: all hours Ticks will be placed on every hour in *byhour*. Default is `~datetime.tzinfotimezonedateutil.tz`.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\dates.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:byhour arg:interval arg:tz arguments arg arg arg arg If Compare Assign Call Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_set_read_only_resource_inputs_attr",
    "source_code": "def _set_read_only_resource_inputs_attr(op, branch_graphs):\n    read_only_indices = set(range(len(op.inputs) - 1))\n    for branch_graph in branch_graphs:\n        assert len(branch_graph.inputs) == len(op.inputs) - 1, 'should never happen'\n        if not read_only_indices:\n            break\n        branch_read_only_indices = acd.get_read_only_resource_input_indices_graph(branch_graph)\n        read_only_indices = read_only_indices.intersection(branch_read_only_indices)\n    read_only_indices = [i + 1 for i in read_only_indices]\n    ops.set_int_list_attr(op, acd.READ_ONLY_RESOURCE_INPUTS_ATTR, sorted(read_only_indices))",
    "docstring": "Sets the list of resource inputs which are read-only. This is used by AutomaticControlDependencies. Args: op: If or Case Operation. branch_graphs: List of branch FuncGraphs.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\cond_v2.py",
    "ast_data": "FunctionDef name:_set_read_only_resource_inputs_attr arg:op arg:branch_graphs arguments arg arg Assign Call Call Call For Compare Call Call If Assign Call Assign Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "_register_post_backward_final_callback",
    "source_code": "@no_type_check\ndef _register_post_backward_final_callback(state: _FSDPState, module: nn.Module) -> None:\n    _p_assert(state._is_root, 'Only the root FSDP instance should register the post-backward callback')\n    if state._post_backward_callback_queued:\n        return\n    _assert_in_training_states(state, [TrainingState.IDLE])\n    if not torch.distributed._functional_collectives.is_torchdynamo_compiling():\n        state._post_backward_callback_queued = True\n        Variable._execution_engine.queue_callback(functools.partial(_post_backward_final_callback, state, module))",
    "docstring": "Registers the post-backward final callback that runs at the end of the backward pass. This should be called from the root FSDP instance at the beginning of the pre-backward.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_runtime_utils.py",
    "ast_data": "FunctionDef name:_register_post_backward_final_callback arg:state arg:module arguments arg arg Call If Return return:no Call If Call Assign Call Call"
  },
  {
    "library": "matplotlib",
    "name": "is_delim",
    "source_code": "def is_delim(self):\n    return False",
    "docstring": "Is this a delimiter token?",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_type1font.py",
    "ast_data": "FunctionDef name:is_delim arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "putmask_inplace",
    "source_code": "def putmask_inplace(values: ArrayLike, mask: npt.NDArray[np.bool_], value: Any) -> None:\n    if not isinstance(values, np.ndarray) or (values.dtype == object and (not lib.is_scalar(value))) or (isinstance(value, np.ndarray) and (not np.can_cast(value.dtype, values.dtype))):\n        if is_list_like(value) and len(value) == len(values):\n            values[mask] = value[mask]\n        else:\n            values[mask] = value\n    else:\n        np.putmask(values, mask, value)",
    "docstring": "ExtensionArray-compatible implementation of np.putmask. The main difference is we do not handle repeating or truncating like numpy. Parameters ---------- values: np.ndarray or ExtensionArray mask : np.ndarray[bool] We assume extract_bool_array has already been called. value : Any",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\array_algos\\putmask.py",
    "ast_data": "FunctionDef name:putmask_inplace arg:values arg:mask arg:value arguments arg arg arg If BoolOp Call BoolOp Compare Call BoolOp Call Call If BoolOp Call Compare Call Call Assign Assign Call"
  },
  {
    "library": "django",
    "name": "dims",
    "source_code": "@property\ndef dims(self):\n    return capi.cs_getdims(self.ptr, byref(c_uint()))",
    "docstring": "Return the dimensions of this coordinate sequence.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\coordseq.py",
    "ast_data": "FunctionDef name:dims arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "numpy",
    "name": "new_ccompiler_opt",
    "source_code": "def new_ccompiler_opt(compiler, dispatch_hpath, **kwargs):\n    opt = CCompilerOpt(compiler, **kwargs)\n    if not os.path.exists(dispatch_hpath) or not opt.is_cached():\n        opt.generate_dispatch_header(dispatch_hpath)\n    return opt",
    "docstring": "Create a new instance of 'CCompilerOpt' and generate the dispatch header which contains the #definitions and headers of platform-specific instruction-sets for the enabled CPU baseline and dispatch-able features. Parameters ---------- compiler : CCompiler instance dispatch_hpath : str path of the dispatch header **kwargs: passed as-is to Returns ------- new instance of CCompilerOpt",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py",
    "ast_data": "FunctionDef name:new_ccompiler_opt arg:compiler arg:dispatch_hpath arguments arg arg arg Assign Call If BoolOp Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_filter_backed_symints",
    "source_code": "@classmethod\ndef _filter_backed_symints(cls: type[GuardedCache[T]], inputs: Sequence[InputType]) -> list[torch.SymInt]:\n    return [s for s in inputs if isinstance(s, torch.SymInt) and has_hint(s)]",
    "docstring": "Get the backed SymInt objects from the input list. Note that we can never have guards that depend on unbacked symint.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codecache.py",
    "ast_data": "FunctionDef name:_filter_backed_symints arg:cls arg:inputs arguments arg arg Return return:yes BoolOp Call Call"
  },
  {
    "library": "django",
    "name": "_if_match_passes",
    "source_code": "def _if_match_passes(target_etag, etags):\n    if not target_etag:\n        return False\n    elif etags == ['*']:\n        return True\n    elif target_etag.startswith('W/'):\n        return False\n    else:\n        return target_etag in etags",
    "docstring": "Test the If-Match comparison as defined in RFC 9110 Section 13.1.1.",
    "type": "function",
    "file_path": "django\\django\\utils\\cache.py",
    "ast_data": "FunctionDef name:_if_match_passes arg:target_etag arg:etags arguments arg arg If Return return:yes If Compare Return return:yes If Call Return return:yes Return return:yes Compare"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "def forward(self, imgs: Union[Tensor, List[Tensor]]) -> Tuple[Tensor, Tensor]:\n    resized_imgs: list[Tensor] = []\n    iters = len(imgs) if isinstance(imgs, list) else imgs.shape[0]\n    original_sizes = imgs[0].new_zeros((iters, 2))\n    for i in range(iters):\n        img = imgs[i]\n        original_sizes[i, 0] = img.shape[-2]\n        original_sizes[i, 1] = img.shape[-1]\n        resized_imgs.append(resize(img[None], size=self.size, interpolation=self.interpolation_mode))\n    return (concatenate(resized_imgs), original_sizes)",
    "docstring": "Run forward. Returns: resized_imgs: resized images in a batch. original_sizes: the original image sizes of (height, width).",
    "type": "method",
    "file_path": "kornia\\kornia\\models\\utils.py",
    "ast_data": "FunctionDef name:forward arg:self arg:imgs arguments arg arg Assign Call Call Assign Call For Call Assign Assign Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_revive_setter",
    "source_code": "def _revive_setter(layer, name, value):\n    if name in PUBLIC_ATTRIBUTES:\n        if isinstance(value, trackable.Trackable):\n            layer._track_trackable(value, name=name)\n        layer._serialized_attributes[name] = value\n    elif isinstance(layer, functional_lib.Functional) and re.match('^layer(_with_weights)?-[\\\\d+]', name) is not None:\n        layer._track_trackable(value, name, overwrite=True)\n    elif getattr(layer, name, None) is not None:\n        pass\n    else:\n        setattr(layer, name, value)",
    "docstring": "Setter function that saves some attributes to separate dictionary.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\load.py",
    "ast_data": "FunctionDef name:_revive_setter arg:layer arg:name arg:value arguments arg arg arg If Compare If Call Call Assign If BoolOp Call Compare Call Call If Compare Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_get_patch_verts",
    "source_code": "def _get_patch_verts(patch):\n    trans = patch.get_patch_transform()\n    path = patch.get_path()\n    polygons = path.to_polygons(trans)\n    return polygons[0] if len(polygons) else np.array([])",
    "docstring": "Return a list of vertices for the path of a patch.",
    "type": "function",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py",
    "ast_data": "FunctionDef name:_get_patch_verts arg:patch arguments arg Assign Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_resize_sequence",
    "source_code": "def _resize_sequence(seq, N):\n    num_elements = len(seq)\n    if N == num_elements:\n        return seq\n    elif N < num_elements:\n        return seq[:N]\n    else:\n        return list(itertools.islice(itertools.cycle(seq), N))",
    "docstring": "Trim the given sequence to exactly N elements. If there are more elements in the sequence, cut it. If there are less elements in the sequence, repeat them. Implementation detail: We maintain type stability for the output for N len(seq); this was good enough for the present use cases but is not a fixed design decision.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\cbook.py",
    "ast_data": "FunctionDef name:_resize_sequence arg:seq arg:N arguments arg arg Assign Call If Compare Return return:yes If Compare Return return:yes Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "__array__",
    "source_code": "def __array__(self, *args, **kwargs):\n    return self.get_affine().get_matrix()",
    "docstring": "Array interface to get at this Transform's affine matrix.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:__array__ arg:self arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "stop",
    "source_code": "def stop(self):\n    if self._execution_trace_running:\n        _disable_execution_trace_observer()\n        self._execution_trace_running = False",
    "docstring": "Stops to capture.",
    "type": "method",
    "file_path": "pytorch\\torch\\profiler\\profiler.py",
    "ast_data": "FunctionDef name:stop arg:self arguments arg If Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_PruneSparseTensor",
    "source_code": "def _PruneSparseTensor(unpruned, pruned_pattern):\n    pruned_indices = sparse_ops.sparse_reshape(pruned_pattern, shape=(-1,)).indices[..., 0]\n    unpruned_indices = sparse_ops.sparse_reshape(unpruned, shape=(-1,)).indices[..., 0]\n    best_match = array_ops.searchsorted(unpruned_indices, pruned_indices)\n    keep_indices = array_ops.gather(best_match, array_ops.where(math_ops.equal(array_ops.gather(unpruned_indices, best_match), pruned_indices)))\n    return (array_ops.gather_nd(unpruned.indices, keep_indices), array_ops.gather_nd(unpruned.values, keep_indices), pruned_pattern.dense_shape)",
    "docstring": "Helper function to prune COO sparse tensor. Given two sparse tensors 'unpruned' and 'pruned_pattern', generates another sparse tensor with indices and values fron 'unpruned' only if its indices also occur in pruned_pattern. Args: unpruned: COO matrix with unpruned indices pruned_pattern: COO matrix with pruned pattern. TODO(tabakg): This is far from optimal. Consider a C++ implementation. Returns: Indices, values, and dense_shape of the pruned matrix.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\sparse\\sparse_csr_matrix_grad.py",
    "ast_data": "FunctionDef name:_PruneSparseTensor arg:unpruned arg:pruned_pattern arguments arg arg Assign Call Assign Call Assign Call Assign Call Call Call Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "_num_plurals",
    "source_code": "@property\ndef _num_plurals(self):\n    match = re.search('nplurals=\\\\s*(\\\\d+)', self._plural_string or '')\n    if match:\n        return int(match[1])\n    return 2",
    "docstring": "Return the number of plurals for this catalog language, or 2 if no plural string is available.",
    "type": "method",
    "file_path": "django\\django\\views\\i18n.py",
    "ast_data": "FunctionDef name:_num_plurals arg:self arguments arg Assign Call BoolOp If Return return:yes Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_concat_same_type",
    "source_code": "@classmethod\ndef _concat_same_type(cls, to_concat: Sequence[Self]) -> Self:\n    raise AbstractMethodError(cls)",
    "docstring": "Concatenate multiple array of this dtype. Parameters ---------- to_concat : sequence of this type An array of the same dtype to concatenate. Returns ------- ExtensionArray See Also -------- api.extensions.ExtensionArray._explode : Transform each element of list-like to a row. api.extensions.ExtensionArray._formatter : Formatting function for scalar values. api.extensions.ExtensionArray._from_factorized : Reconstruct an ExtensionArray after factorization. Examples -------- >>> arr1 = pd.array([1, 2, 3]) >>> arr2 = pd.array([4, 5, 6]) >>> pd.arrays.IntegerArray._concat_same_type([arr1, arr2]) [1, 2, 3, 4, 5, 6] Length: 6, dtype: Int64",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\base.py",
    "ast_data": "FunctionDef name:_concat_same_type arg:cls arg:to_concat arguments arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "get_gencode_flags",
    "source_code": "def get_gencode_flags() -> str:\n    arch_list = get_arch_list()\n    if len(arch_list) == 0:\n        return ''\n    return f'-device {','.join((arch for arch in arch_list))}'",
    "docstring": "Return XPU AOT(ahead-of-time) build flags this library was compiled with.",
    "type": "function",
    "file_path": "pytorch\\torch\\xpu\\__init__.py",
    "ast_data": "FunctionDef name:get_gencode_flags arguments Assign Call If Compare Call Return return:yes Return return:yes Call"
  },
  {
    "library": "django",
    "name": "is_measured",
    "source_code": "@property\ndef is_measured(self):\n    return capi.is_measured(self.ptr)",
    "docstring": "Return True if the geometry has M coordinates.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:is_measured arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "reset",
    "source_code": "@staticmethod\ndef reset(target, containers=None, config=None):\n    if target is not None:\n        target = compat.as_bytes(target)\n    if containers is not None:\n        containers = [compat.as_bytes(c) for c in containers]\n    else:\n        containers = []\n    tf_session.TF_Reset(target, containers, config)",
    "docstring": "Resets resource containers on , and close all connected sessions. A resource container is distributed across all workers in the same cluster as . When a resource container on is reset, resources associated with that container will be cleared. In particular, all Variables in the container will become undefined: they lose their values and shapes. NOTE: (i) reset() is currently only implemented for distributed sessions. (ii) Any sessions on the master named by will be closed. If no resource containers are provided, all containers are reset. Args: target: The execution engine to connect to. containers: A list of resource container name strings, or if all of all the containers are to be reset. config: (Optional.) Protocol buffer with configuration options. Raises: tf.errors.OpError: Or one of its subclasses if an error occurs while resetting containers.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\session.py",
    "ast_data": "FunctionDef name:reset arg:target arg:containers arg:config arguments arg arg arg If Compare Assign Call If Compare Assign Call Assign Call"
  },
  {
    "library": "scipy",
    "name": "argmin",
    "source_code": "def argmin(self, axis=None, out=None, *, explicit=False):\n    return self._argminmax(axis, out, np.argmin, np.less, explicit)",
    "docstring": "Return indices of minimum elements along an axis. By default, implicit zero elements are taken into account. If there are several minimum values, the index of the first occurrence is returned. If is set, only explicitly stored elements will be considered. Parameters ---------- axis : {-2, -1, 0, 1, None}, optional Axis along which the argmin is computed. If None (default), index of the minimum element in the flatten data is returned. out : None, optional This argument is in the signature *solely* for NumPy compatibility reasons. Do not pass in anything except for the default value, as this argument is not used. explicit : {False, True} optional (default: False) When set to True, only explicitly stored elements will be considered. If axis is not None and an axis has no stored elements, argmin is undefined, so the index `axis` is 1.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_data.py",
    "ast_data": "FunctionDef name:argmin arg:self arg:axis arg:out arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "PerReplica",
    "source_code": "@tf_export('types.experimental.distributed.PerReplica', v1=[])\nclass PerReplica(DistributedValues):\n    pass",
    "docstring": "Holds a distributed value: a map from replica id to unsynchronized values. values exist on the worker devices, with a different value for each replica. They can be produced many ways, often by iterating through a distributed dataset returned by and . They are also the typical result returned by .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\types\\distribute.py",
    "ast_data": "ClassDef name:PerReplica Call"
  },
  {
    "library": "authlib",
    "name": "validate_subject_types_supported",
    "source_code": "def validate_subject_types_supported(self):\n    values = self.get('subject_types_supported')\n    if values is None:\n        raise ValueError('\"subject_types_supported\" is required')\n    if not isinstance(values, list):\n        raise ValueError('\"subject_types_supported\" MUST be JSON array')\n    valid_types = {'pairwise', 'public'}\n    if not valid_types.issuperset(set(values)):\n        raise ValueError('\"subject_types_supported\" contains invalid values')",
    "docstring": "REQUIRED. JSON array containing a list of the Subject Identifier types that this OP supports. Valid types include pairwise and public.",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\discovery\\models.py",
    "ast_data": "FunctionDef name:validate_subject_types_supported arg:self arguments arg Assign Call If Compare Raise Call If Call Raise Call Assign If Call Call Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "_prepare_data",
    "source_code": "def _prepare_data(self, X, y, sample_weight, solver):\n    accept_sparse = _get_valid_accept_sparse(sparse.issparse(X), solver)\n    X, y = validate_data(self, X, y, accept_sparse=accept_sparse, multi_output=True, y_numeric=False, force_writeable=True)\n    self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)\n    Y = self._label_binarizer.fit_transform(y)\n    if not self._label_binarizer.y_type_.startswith('multilabel'):\n        y = column_or_1d(y, warn=True)\n    sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)\n    if self.class_weight:\n        sample_weight = sample_weight * compute_sample_weight(self.class_weight, y)\n    return (X, y, sample_weight, Y)",
    "docstring": "Validate and and binarize . Parameters ---------- X : {ndarray, sparse matrix} of shape (n_samples, n_features) Training data. y : ndarray of shape (n_samples,) Target values. sample_weight : float or ndarray of shape (n_samples,), default=None Individual weights for each sample. If given a float, every sample will have the same weight. solver : str The solver used in to know which sparse format to support. Returns ------- X : {ndarray, sparse matrix} of shape (n_samples, n_features) Validated training data. y : ndarray of shape (n_samples,) Validated target values. sample_weight : ndarray of shape (n_samples,) Validated sample weights. Y : ndarray of shape (n_samples, n_classes) The binarized version of .",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_ridge.py",
    "ast_data": "FunctionDef name:_prepare_data arg:self arg:X arg:y arg:sample_weight arg:solver arguments arg arg arg arg arg Assign Call Call Assign Call Assign Call Assign Call If Call Assign Call Assign Call If Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_check_type",
    "source_code": "def _check_type(cond, message=None):\n    _check_with(TypeError, cond, message)",
    "docstring": "Throws error containing an optional message if the specified condition is False. Error type: `bool`",
    "type": "function",
    "file_path": "pytorch\\torch\\__init__.py",
    "ast_data": "FunctionDef name:_check_type arg:cond arg:message arguments arg arg Call"
  },
  {
    "library": "cryptography",
    "name": "public_bytes",
    "source_code": "@abc.abstractmethod\ndef public_bytes(self, encoding: _serialization.Encoding, format: _serialization.PublicFormat) -> bytes:\n    pass",
    "docstring": "Returns the key serialized as bytes.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\rsa.py",
    "ast_data": "FunctionDef name:public_bytes arg:self arg:encoding arg:format arguments arg arg arg"
  },
  {
    "library": "pytorch",
    "name": "_define_gemm_instance",
    "source_code": "def _define_gemm_instance(self, op: GemmOperation, evt_name: Optional[str]=None) -> tuple[str, str]:\n    assert cutlass_utils.try_import_cutlass()\n    import cutlass_library.library as cutlass_lib\n    from .cutlass_lib_extensions import gemm_operation_extensions as gemm_extensions\n    emitter = gemm_extensions.EmitGemmUniversal3xInstanceWithEVT(evt_name=evt_name)\n    if not hasattr(op, 'epilogue_functor') or not isinstance(op.epilogue_functor, enum.Enum):\n        op = copy.deepcopy(op)\n        op.epilogue_functor = cutlass_lib.EpilogueFunctor.LinearCombination\n    op_def = emitter.emit(op)\n    pattern = re.compile('\\\\s*struct\\\\s(.*?)\\\\s:')\n    decl = [line for line in op_def.split('\\n') if 'struct ' in line][-1]\n    match = pattern.match(decl)\n    if match is None:\n        raise RuntimeError('Invalid Gemm config: \\n' + op_def)\n    op_type = match.groups()[0]\n    if op.gemm_kind == cutlass_lib.GemmKind.Universal3x:\n        op_def += f'\\n  using {op_type}_device_type = cutlass::gemm::device::GemmUniversalAdapter<{op_type}>;\\n'\n        op_type = f'{op_type}_device_type'\n    return (op_def, op_type)",
    "docstring": "Defines and renders the Cutlass / CUDA C++ code for a given GEMM operation instance. This function uses the Cutlass library to generate key parts of the codegen process. General Matrix Multiply forms a core part of a number of scientific applications, so this efficient and adaptable implementation is crucial. Args: op (cutlass_library.gemm_op.GemmOperation): This is the core GEMM operation that we are defining and rendering. Returns: Tuple[str, str]: A tuple where the first part is a string that constitutes the defined GEMM operation in C++ code (render) and the second part is the string that specifies the operation type.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\gemm_template.py",
    "ast_data": "FunctionDef name:_define_gemm_instance arg:self arg:op arg:evt_name arguments arg arg arg Call Assign Call If BoolOp Call Call Assign Call Assign Assign Call Assign Call Assign Call Compare Assign Call If Compare Raise Call Assign Call If Compare Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "check_ndim",
    "source_code": "def check_ndim(values, placement: BlockPlacement, ndim: int) -> None:\n    if values.ndim > ndim:\n        raise ValueError(f'Wrong number of dimensions. values.ndim > ndim [{values.ndim} > {ndim}]')\n    if not is_1d_only_ea_dtype(values.dtype):\n        if values.ndim != ndim:\n            raise ValueError(f'Wrong number of dimensions. values.ndim != ndim [{values.ndim} != {ndim}]')\n        if len(placement) != len(values):\n            raise ValueError(f'Wrong number of items passed {len(values)}, placement implies {len(placement)}')\n    elif ndim == 2 and len(placement) != 1:\n        raise ValueError('need to split')",
    "docstring": "ndim inference and validation. Validates that values.ndim and ndim are consistent. Validates that len(values) and len(placement) are consistent. Parameters ---------- values : array-like placement : BlockPlacement ndim : int Raises ------ ValueError : the number of dimensions do not match",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\internals\\blocks.py",
    "ast_data": "FunctionDef name:check_ndim arg:values arg:placement arg:ndim arguments arg arg arg If Compare Raise Call If Call If Compare Raise Call If Compare Call Call Raise Call Call Call If BoolOp Compare Compare Call Raise Call"
  },
  {
    "library": "seaborn",
    "name": "var_levels",
    "source_code": "@property\ndef var_levels(self):\n    for var in self.variables:\n        if (map_obj := getattr(self, f'_{var}_map', None)) is not None:\n            self._var_levels[var] = map_obj.levels\n    return self._var_levels",
    "docstring": "Property interface to ordered list of variables levels. Each time it's accessed, it updates the var_levels dictionary with the list of levels in the current semantic mappers. But it also allows the dictionary to persist, so it can be used to set levels by a key. This is used to track the list of col/row levels using an attached FacetGrid object, but it's kind of messy and ideally fixed by improving the faceting logic so it interfaces better with the modern approach to tracking plot variables.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_base.py",
    "ast_data": "FunctionDef name:var_levels arg:self arguments arg For If Compare Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, node_def, op, message, *args):\n    super(NotFoundError, self).__init__(node_def, op, message, NOT_FOUND, *args)",
    "docstring": "Creates a .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:node_def arg:op arg:message arguments arg arg arg arg arg Call Call"
  },
  {
    "library": "django",
    "name": "DTDForbidden",
    "source_code": "class DTDForbidden(DefusedXmlException):\n\n    def __init__(self, name, sysid, pubid):\n        super().__init__()\n        self.name = name\n        self.sysid = sysid\n        self.pubid = pubid\n\n    def __str__(self):\n        tpl = \"DTDForbidden(name='{}', system_id={!r}, public_id={!r})\"\n        return tpl.format(self.name, self.sysid, self.pubid)",
    "docstring": "Document type definition is forbidden.",
    "type": "class",
    "file_path": "django\\django\\core\\serializers\\xml_serializer.py",
    "ast_data": "ClassDef name:DTDForbidden FunctionDef name:__init__ arg:self arg:name arg:sysid arg:pubid arguments arg arg arg arg Call Call Assign Assign Assign FunctionDef name:__str__ arg:self arguments arg Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_add_function",
    "source_code": "def _add_function(self, function) -> None:\n    self._check_not_finalized()\n    name = function.name\n    if getattr(function, 'grad_func_name', None) and getattr(function, 'python_grad_func', None):\n        raise ValueError('Gradient defined twice for function %s' % name)\n    with self._c_graph.get() as c_graph:\n        with function._c_func.get() as func:\n            if getattr(function, '_grad_func', None):\n                with function._grad_func._c_func.get() as gradient:\n                    pywrap_tf_session.TF_GraphCopyFunction(c_graph, func, gradient)\n            else:\n                pywrap_tf_session.TF_GraphCopyFunction(c_graph, func, None)\n    self._functions[compat.as_str(name)] = function\n    if self._graph_def_versions.min_consumer < 12:\n        self._graph_def_versions.min_consumer = 12",
    "docstring": "Adds a function to the graph. After the function has been added, you can call to the function by passing the function name in place of an op name to . Args: function: A object. Raises: ValueError: if another function is defined with the same name.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_add_function arg:self arg:function arguments arg arg Call Assign If BoolOp Call Call Raise Call With Call With Call If Call With Call Call Call Assign Call If Compare Assign"
  },
  {
    "library": "tensorflow",
    "name": "activity_regularizer",
    "source_code": "@property\ndef activity_regularizer(self):\n    return self._activity_regularizer",
    "docstring": "Optional regularizer function for the output of this layer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:activity_regularizer arg:self arguments arg Return return:yes"
  },
  {
    "library": "authlib",
    "name": "OpenIDCode",
    "source_code": "class OpenIDCode(OpenIDToken):\n\n    def __init__(self, require_nonce=False):\n        self.require_nonce = require_nonce\n\n    def exists_nonce(self, nonce, request):\n        raise NotImplementedError()\n\n    def validate_openid_authorization_request(self, grant, redirect_uri):\n        validate_nonce(grant.request, self.exists_nonce, self.require_nonce)\n\n    def __call__(self, grant):\n        grant.register_hook('after_create_token_response', self.process_token)\n        if is_openid_scope(grant.request.payload.scope):\n            grant.register_hook('after_validate_authorization_request_payload', self.validate_openid_authorization_request)\n            grant.register_hook('after_validate_consent_request', validate_request_prompt)",
    "docstring": "An extension from OpenID Connect for \"grant_type=code\" request. Developers MUST implement the missing methods:: class MyOpenIDCode(OpenIDCode): def get_jwt_config(self, grant): return {...} def exists_nonce(self, nonce, request): return check_if_nonce_in_cache(request.payload.client_id, nonce) def generate_user_info(self, user, scope): return {...} The register this extension with AuthorizationCodeGrant:: authorization_server.register_grant( AuthorizationCodeGrant, extensions=[MyOpenIDCode()] )",
    "type": "class",
    "file_path": "authlib\\authlib\\oidc\\core\\grants\\code.py",
    "ast_data": "ClassDef name:OpenIDCode FunctionDef name:__init__ arg:self arg:require_nonce arguments arg arg Assign FunctionDef name:exists_nonce arg:self arg:nonce arg:request arguments arg arg arg Raise Call FunctionDef name:validate_openid_authorization_request arg:self arg:grant arg:redirect_uri arguments arg arg arg Call FunctionDef name:__call__ arg:self arg:grant arguments arg arg Call If Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "register_revived_type",
    "source_code": "@tf_export('__internal__.saved_model.load.register_revived_type', v1=[])\ndef register_revived_type(identifier, predicate, versions):\n    versions.sort(key=lambda reg: reg.version, reverse=True)\n    if not versions:\n        raise AssertionError('Need at least one version of a registered type.')\n    version_numbers = set()\n    for registration in versions:\n        registration.identifier = identifier\n        if registration.version in version_numbers:\n            raise AssertionError(f'Got multiple registrations with version {registration.version} for type {identifier}.')\n        version_numbers.add(registration.version)\n    _REVIVED_TYPE_REGISTRY[identifier] = (predicate, versions)\n    _TYPE_IDENTIFIERS.append(identifier)",
    "docstring": "Register a type for revived objects. Args: identifier: A unique string identifying this class of objects. predicate: A Boolean predicate for this registration. Takes a trackable object as an argument. If True, may be used to save and restore the object. versions: A list of objects.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\revived_types.py",
    "ast_data": "FunctionDef name:register_revived_type arg:identifier arg:predicate arg:versions arguments arg arg arg Call arguments arg If Raise Call Assign Call For Assign If Compare Raise Call Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_init_saver",
    "source_code": "def _init_saver(self, saver=USE_DEFAULT):\n    if saver is Supervisor.USE_DEFAULT:\n        saver = self._get_first_op_from_collection(ops.GraphKeys.SAVERS)\n        if saver is None and variables.global_variables():\n            saver = saver_mod.Saver()\n            ops.add_to_collection(ops.GraphKeys.SAVERS, saver)\n    self._saver = saver",
    "docstring": "Initializes saver. Args: saver: A object. If set to USE_DEFAULT, create one that saves all the variables.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py",
    "ast_data": "FunctionDef name:_init_saver arg:self arg:saver arguments arg arg If Compare Assign Call If BoolOp Compare Call Assign Call Call Assign"
  },
  {
    "library": "pandas",
    "name": "describe_timestamp_1d",
    "source_code": "def describe_timestamp_1d(data: Series, percentiles: Sequence[float]) -> Series:\n    from pandas import Series\n    formatted_percentiles = format_percentiles(percentiles)\n    stat_index = ['count', 'mean', 'min'] + formatted_percentiles + ['max']\n    d = [data.count(), data.mean(), data.min()] + data.quantile(percentiles).tolist() + [data.max()]\n    return Series(d, index=stat_index, name=data.name)",
    "docstring": "Describe series containing datetime64 dtype. Parameters ---------- data : Series Series to be described. percentiles : list-like of numbers The percentiles to include in the output.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\methods\\describe.py",
    "ast_data": "FunctionDef name:describe_timestamp_1d arg:data arg:percentiles arguments arg arg Assign Call Assign Assign Call Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "next_char",
    "source_code": "def next_char(input_iter):\n    for ch in input_iter:\n        if ch != '\\\\':\n            yield (ch, False)\n            continue\n        ch = next(input_iter)\n        representative = ESCAPE_MAPPINGS.get(ch, ch)\n        if representative is None:\n            continue\n        yield (representative, True)",
    "docstring": "An iterator that yields the next character from \"pattern_iter\", respecting escape sequences. An escaped character is replaced by a representative of its class (e.g. \\w -> \"x\"). If the escaped character is one that is skipped, it is not returned (the next character is returned instead). Yield the next character, along with a boolean indicating whether it is a raw (unescaped) character or not.",
    "type": "function",
    "file_path": "django\\django\\utils\\regex_helper.py",
    "ast_data": "FunctionDef name:next_char arg:input_iter arguments arg For If Compare Assign Call Assign Call If Compare"
  },
  {
    "library": "scikit-learn",
    "name": "is_ndonnx_namespace",
    "source_code": "def is_ndonnx_namespace(xp: Namespace) -> bool:\n    return xp.__name__ == 'ndonnx'",
    "docstring": "Returns True if is an NDONNX namespace. See Also -------- array_namespace is_numpy_namespace is_cupy_namespace is_torch_namespace is_dask_namespace is_jax_namespace is_pydata_sparse_namespace is_array_api_strict_namespace",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\common\\_helpers.py",
    "ast_data": "FunctionDef name:is_ndonnx_namespace arg:xp arguments arg Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "__radd__",
    "source_code": "def __radd__(self, other):\n    return self + other",
    "docstring": "Returns the sum of and . Args: other: Another Dimension, or a value accepted by . Returns: A Dimension whose value is the sum of and .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:__radd__ arg:self arg:other arguments arg arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "parse_flags",
    "source_code": "def parse_flags(line):\n    d = {'include_dirs': [], 'library_dirs': [], 'libraries': [], 'macros': [], 'ignored': []}\n    flags = (' ' + line).split(' -')\n    for flag in flags:\n        flag = '-' + flag\n        if len(flag) > 0:\n            if flag.startswith('-I'):\n                d['include_dirs'].append(flag[2:].strip())\n            elif flag.startswith('-L'):\n                d['library_dirs'].append(flag[2:].strip())\n            elif flag.startswith('-l'):\n                d['libraries'].append(flag[2:].strip())\n            elif flag.startswith('-D'):\n                d['macros'].append(flag[2:].strip())\n            else:\n                d['ignored'].append(flag)\n    return d",
    "docstring": "Parse a line from a config file containing compile flags. Parameters ---------- line : str A single line containing one or more compile flags. Returns ------- d : dict Dictionary of parsed flags, split into relevant categories. These categories are the keys of : * 'include_dirs' * 'library_dirs' * 'libraries' * 'macros' * 'ignored'",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\npy_pkg_config.py",
    "ast_data": "FunctionDef name:parse_flags arg:line arguments arg Assign Assign Call For Assign If Compare Call If Call Call Call If Call Call Call If Call Call Call If Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "mesh_split",
    "source_code": "def mesh_split(tensor, device_mesh, tensor_split_dims_mapping, use_sharding_op=False, manual_mesh_dims=None, unspecified_dims=None):\n    sharding = mesh_split_sharding(device_mesh, tensor_split_dims_mapping, manual_mesh_dims)\n    return sharding.apply_to_tensor(tensor, use_sharding_op=use_sharding_op, unspecified_dims=unspecified_dims or [])",
    "docstring": "Returns a tensor that is split along multiple dimensions in a device mesh. Args: tensor: A tf.Tensor to split. device_mesh: An np.ndarray describing the topology of the device mesh and each element is the ID of the device in the topology. tensor_split_dims_mapping: A list of integers that map each tensor axis to the device mesh axis along which it is sharded. Its length is the tensor rank, and tensor_split_dims_mapping[i] is device mesh axis for tensor dimension i. Use -1 for tensor dimensions that are not sharded. use_sharding_op: If true, adds a sharding op to set the sharding. manual_mesh_dims: An optional list of mesh dims for manual subgroups. unspecified_dims: An optional list of dimensions unspecified. Raises: ValueError: The number of tensor split dimensions is larger than device mesh rank.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\xla\\experimental\\xla_sharding.py",
    "ast_data": "FunctionDef name:mesh_split arg:tensor arg:device_mesh arg:tensor_split_dims_mapping arg:use_sharding_op arg:manual_mesh_dims arg:unspecified_dims arguments arg arg arg arg arg arg Assign Call Return return:yes Call BoolOp"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "def forward(self, img: Tensor) -> Dict[str, Any]:\n    KORNIA_CHECK_SHAPE(img, ['B', '1', 'H', 'W'])\n    outputs = {}\n    net_outputs = self.model(img)\n    outputs['junction_heatmap'] = net_outputs['junctions']\n    outputs['line_heatmap'] = net_outputs['heatmap']\n    lines = []\n    for junc_prob, heatmap in zip(net_outputs['junctions'], net_outputs['heatmap']):\n        junctions = prob_to_junctions(junc_prob, self.grid_size, self.junc_detect_thresh, self.max_num_junctions)\n        line_map, junctions, _ = self.line_detector.detect(junctions, heatmap)\n        lines.append(line_map_to_segments(junctions, line_map))\n    outputs['line_segments'] = lines\n    return outputs",
    "docstring": "Run forward. Args: img: batched images with shape :math:. Returns: line_segments: list of N line segments in each of the B images :math:. junction_heatmap: raw junction heatmap of shape :math:. line_heatmap: raw line heatmap of shape :math:.",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\sold2\\sold2_detector.py",
    "ast_data": "FunctionDef name:forward arg:self arg:img arguments arg arg Call Assign Assign Call Assign Assign Assign For Call Assign Call Assign Call Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "process_inputs",
    "source_code": "@compatibility(is_backward_compatible=False)\ndef process_inputs(self, *args):\n    return self._codegen.process_inputs(*args)",
    "docstring": "Processes args so that they can be passed to the FX graph.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\graph.py",
    "ast_data": "FunctionDef name:process_inputs arg:self arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "cherrypy",
    "name": "_setup",
    "source_code": "def _setup(self):\n    conf = self._merged_args()\n    p = conf.pop('priority', None)\n    if p is None:\n        p = getattr(self.callable, 'priority', self._priority)\n    cherrypy.serving.request.hooks.attach(self._point, self.callable, priority=p, **conf)",
    "docstring": "Wire this tool into ``. The standard CherryPy request object will automatically call this method when the tool is \"turned on\" in config.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cptools.py",
    "ast_data": "FunctionDef name:_setup arg:self arguments arg Assign Call Assign Call If Compare Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_unpadded_shard",
    "source_code": "@staticmethod\ndef _get_unpadded_shard(tensor: Tensor, rank: int, world_size: int) -> tuple[Tensor, int]:\n    chunks = torch.flatten(tensor).chunk(world_size) if _is_truly_contiguous(tensor) else tensor.as_strided((tensor.numel(),), (1,)).chunk(world_size)\n    if len(chunks) < rank + 1:\n        chunk = chunks[0].new_empty(0)\n    else:\n        chunk = chunks[rank]\n    numel_to_pad = chunks[0].numel() - chunk.numel()\n    assert numel_to_pad >= 0, \"Chunk's size should be at most the first chunk's size\"\n    return (chunk, numel_to_pad)",
    "docstring": "Return the unpadded shard of `` is already flattened or may be viewed in the flattened shape (which is true in the expected usage), then this method does not allocate any new tensor memory.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py",
    "ast_data": "FunctionDef name:_get_unpadded_shard arg:tensor arg:rank arg:world_size arguments arg arg arg Assign Call Call Call Call Call Call If Compare Call Assign Call Assign Assign Call Call Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "array",
    "source_code": "@tf_export.tf_export('experimental.numpy.array', v1=[])\n@np_utils.np_doc_only('array')\ndef array(val, dtype=None, copy=True, ndmin=0):\n    if dtype:\n        dtype = np_utils.result_type(dtype)\n    return _array_internal(val, dtype, copy, ndmin)",
    "docstring": "Since Tensors are immutable, a copy is made only if val is placed on a different device than the current one. Even if is False, a new Tensor may need to be built to satisfy and . This is used only if is an ndarray or a Tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_array_ops.py",
    "ast_data": "FunctionDef name:array arg:val arg:dtype arg:copy arg:ndmin arguments arg arg arg arg If Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "scrapy",
    "name": "process_chain_both",
    "source_code": "def process_chain_both(callbacks: Iterable[Callable[Concatenate[_T, _P], Any]], errbacks: Iterable[Callable[Concatenate[Failure, _P], Any]], input: Any, *a: _P.args, **kw: _P.kwargs) -> Deferred:\n    warnings.warn('process_chain_both() is deprecated and will be removed in a future Scrapy version.', ScrapyDeprecationWarning, stacklevel=2)\n    d: Deferred = Deferred()\n    for cb, eb in zip(callbacks, errbacks):\n        d.addCallback(cb, *a, **kw)\n        d.addErrback(eb, *a, **kw)\n    if isinstance(input, failure.Failure):\n        d.errback(input)\n    else:\n        d.callback(input)\n    return d",
    "docstring": "Return a Deferred built by chaining the given callbacks and errbacks",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\defer.py",
    "ast_data": "FunctionDef name:process_chain_both arg:callbacks arg:errbacks arg:input arguments arg arg arg arg arg Call Call For Call Call Call If Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "register_db_case",
    "source_code": "def register_db_case(case: ExportCase) -> None:\n    if case.name in _EXAMPLE_CASES:\n        if case.name not in _EXAMPLE_CONFLICT_CASES:\n            _EXAMPLE_CONFLICT_CASES[case.name] = [_EXAMPLE_CASES[case.name]]\n        _EXAMPLE_CONFLICT_CASES[case.name].append(case)\n        return\n    _EXAMPLE_CASES[case.name] = case",
    "docstring": "Registers a user provided ExportCase into example bank.",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\db\\case.py",
    "ast_data": "FunctionDef name:register_db_case arg:case arguments arg If Compare If Compare Assign Call Return return:no Assign"
  },
  {
    "library": "authlib",
    "name": "query_token",
    "source_code": "def query_token(self, token_string, token_type_hint):\n    raise NotImplementedError()",
    "docstring": "Get the token from database/storage by the given token string. Developers should implement this method:: def query_token(self, token_string, token_type_hint): if token_type_hint == 'access_token': return Token.query_by_access_token(token_string) if token_type_hint == 'refresh_token': return Token.query_by_refresh_token(token_string) return Token.query_by_access_token(token_string) or Token.query_by_refresh_token(token_string)",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc7009\\revocation.py",
    "ast_data": "FunctionDef name:query_token arg:self arg:token_string arg:token_type_hint arguments arg arg arg Raise Call"
  },
  {
    "library": "pandas",
    "name": "_supports_2d",
    "source_code": "@property\ndef _supports_2d(self) -> bool:\n    return False",
    "docstring": "Do ExtensionArrays with this dtype support 2D arrays? Historically ExtensionArrays were limited to 1D. By returning True here, authors can indicate that their arrays support 2D instances. This can improve performance in some cases, particularly operations with . Arrays that support 2D values should: - implement Array.reshape - subclass the Dim2CompatTests in tests.extension.base - _concat_same_type should support keyword - _reduce and reductions should support keyword",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\base.py",
    "ast_data": "FunctionDef name:_supports_2d arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "wrap_single_convertor_instance",
    "source_code": "def wrap_single_convertor_instance(convert_single):\n\n    @functools.wraps(convert_single)\n    def __ua_convert__(self, dispatchables, coerce):\n        converted = []\n        for d in dispatchables:\n            c = convert_single(self, d.value, d.type, coerce and d.coercible)\n            if c is NotImplemented:\n                return NotImplemented\n            converted.append(c)\n        return converted\n    return __ua_convert__",
    "docstring": "Wraps a ``, the operation is assumed to be undefined. Accepts a signature of (value, type, coerce).",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_uarray\\_backend.py",
    "ast_data": "FunctionDef name:wrap_single_convertor_instance arg:convert_single arguments arg FunctionDef name:__ua_convert__ arg:self arg:dispatchables arg:coerce arguments arg arg arg Assign For Assign Call BoolOp If Compare Return return:yes Call Return return:yes Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "logentropy",
    "source_code": "@abstractmethod\ndef logentropy(self, *, method):\n    raise NotImplementedError()",
    "docstring": "Logarithm of the differential entropy In terms of probability density function :math: and support :math:, the differential entropy (or simply \"entropy\") of a continuous random variable :math: is: .. math:: h(X) = - \\int_{\\chi} f(x) \\log f(x) dx The definition for a discrete random variable is analogous, with the PMF replacing the PDF and a sum over the support replacing the integral. computes the logarithm of the differential entropy (\"log-entropy\"), :math:, but it may be numerically favorable compared to the naive implementation (computing :math: then taking the logarithm). Parameters ---------- method : {None, 'formula', 'logexp', 'quadrature} The strategy used to evaluate the log-entropy. By default (`methodmethod\\pinp.pi`. >>> X = stats.Uniform(a=-.1, b=.1) >>> X.entropy(), X.logentropy() (-1.6094379124341007, (0.4758849953271105+3.141592653589793j))",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_probability_distribution.py",
    "ast_data": "FunctionDef name:logentropy arg:self arguments arg arg Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "set_gid",
    "source_code": "def set_gid(self, id):\n    self._gid = id",
    "docstring": "Set the id.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:set_gid arg:self arg:id arguments arg arg Assign"
  },
  {
    "library": "matplotlib",
    "name": "disconnect",
    "source_code": "def disconnect(self):\n    for disconnector in self._disconnectors:\n        disconnector()",
    "docstring": "Disconnect the callbacks.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py",
    "ast_data": "FunctionDef name:disconnect arg:self arguments arg For Call"
  },
  {
    "library": "seaborn",
    "name": "save",
    "source_code": "def save(self, loc, **kwargs) -> Plot:\n    with theme_context(self._theme_with_defaults()):\n        self._plot().save(loc, **kwargs)\n    return self",
    "docstring": "Compile the plot and write it to a buffer or file on disk. Parameters ---------- loc : str, path, or buffer Location on disk to save the figure, or a buffer to write into. kwargs Other keyword arguments are passed through to :meth:.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\plot.py",
    "ast_data": "FunctionDef name:save arg:self arg:loc arguments arg arg arg With Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "upcast_acc_dtype",
    "source_code": "def upcast_acc_dtype(dtype: torch.dtype) -> torch.dtype:\n    if is_integer_dtype(dtype) and dtype.is_signed and (dtype.itemsize <= 4):\n        return torch.int32\n    return upcast_compute_type(dtype)",
    "docstring": "Implicit upcasts used for Triton reduction types",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py",
    "ast_data": "FunctionDef name:upcast_acc_dtype arg:dtype arguments arg If BoolOp Call Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "convert_variable_to_constant",
    "source_code": "def convert_variable_to_constant(self, incoming_edge, tensor_data):\n    index = incoming_edge.destination.index\n    for edge in self.outgoing_edges:\n        if edge.source.index == index:\n            edge.destination.convertible.convert_variable_to_constant(edge, tensor_data)\n    function = self.converted_self().function\n    function.signature.input_arg[index].type = tensor_data.dtype\n    if '_input_shapes' in function.attr:\n        function.attr['_input_shapes'].list.shape[index].unknown_rank = True\n        del function.attr['_input_shapes'].list.shape[index].dim[:]\n    arg_attrs = function.arg_attr[index].attr\n    if '_output_shapes' in arg_attrs:\n        arg_attrs['_output_shapes'].list.shape[0].unknown_rank = True\n        del arg_attrs['_output_shapes'].list.shape[0].dim[:]",
    "docstring": "Converts one function argument into a constant. Args: incoming_edge: The edge into the argument to be converted. tensor_data: The constant value.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "FunctionDef name:convert_variable_to_constant arg:self arg:incoming_edge arg:tensor_data arguments arg arg arg Assign For If Compare Call Assign Call Assign If Compare Assign Assign If Compare Assign"
  },
  {
    "library": "matplotlib",
    "name": "set_snap",
    "source_code": "def set_snap(self, snap):\n    self._snap = snap\n    self.stale = True",
    "docstring": "Set the snapping behavior. Snapping aligns positions with the pixel grid, which results in clearer images. For example, if a black line of 1px width was defined at a position in between two pixels, the resulting image would contain the interpolated value of that line in the pixel grid, which would be a grey value on both adjacent pixel positions. In contrast, snapping will move the line to the nearest integer pixel value, so that the resulting image will really contain a 1px wide black line. Snapping is currently only supported by the Agg and MacOSX backends. Parameters ---------- snap : bool or None Possible values: - *True*: Snap vertices to the nearest pixel center. - *False*: Do not modify vertex positions. - *None*: (auto) If the path contains only rectilinear line segments, round to the nearest pixel center.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:set_snap arg:self arg:snap arguments arg arg Assign Assign"
  },
  {
    "library": "django",
    "name": "name",
    "source_code": "@property\ndef name(self):\n    name = capi.get_fd_name(self._ldefn)\n    return force_str(name, self._ds.encoding, strings_only=True)",
    "docstring": "Return the name of this layer in the Data Source.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\layer.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_setup_mixed_precision_params",
    "source_code": "def _setup_mixed_precision_params(mixed_precision_config, root_module):\n    for param in root_module.parameters():\n        if hasattr(param, '_ddp_ignored') and param._ddp_ignored:\n            continue\n        if not hasattr(param, '_mp_param'):\n            param._mp_param = torch.zeros_like(param, device=param.device, dtype=mixed_precision_config.param_dtype, requires_grad=param.requires_grad)\n            _free_storage(param._mp_param)\n            param._fp_param = param.data",
    "docstring": "Create and free storage for the mixed precision parameters.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\parallel\\distributed.py",
    "ast_data": "FunctionDef name:_setup_mixed_precision_params arg:mixed_precision_config arg:root_module arguments arg arg For Call If BoolOp Call If Call Assign Call Call Assign"
  },
  {
    "library": "django",
    "name": "add",
    "source_code": "def add(self, data, conn_type):\n    if self.connector != conn_type:\n        obj = self.copy()\n        self.connector = conn_type\n        self.children = [obj, data]\n        return data\n    elif isinstance(data, Node) and (not data.negated) and (data.connector == conn_type or len(data) == 1):\n        self.children.extend(data.children)\n        return self\n    else:\n        self.children.append(data)\n        return data",
    "docstring": "Combine this tree and the data represented by data using the connector conn_type. The combine is done by squashing the node other away if possible. This tree (self) will never be pushed to a child node of the combined tree, nor will the connector or negated properties change. Return a node which can be used in place of data regardless if the node other got squashed or not.",
    "type": "method",
    "file_path": "django\\django\\utils\\tree.py",
    "ast_data": "FunctionDef name:add arg:self arg:data arg:conn_type arguments arg arg arg If Compare Assign Call Assign Assign Return return:yes If BoolOp Call BoolOp Compare Compare Call Call Return return:yes Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X):\n    check_is_fitted(self)\n    add_one = self.mode == 'distance'\n    return self.kneighbors_graph(X, mode=self.mode, n_neighbors=self.n_neighbors + add_one)",
    "docstring": "Compute the (weighted) graph of Neighbors for points in X. Parameters ---------- X : array-like of shape (n_samples_transform, n_features) Sample data. Returns ------- Xt : sparse matrix of shape (n_samples_transform, n_samples_fit) Xt[i, j] is assigned the weight of edge that connects i to j. Only the neighbors have an explicit value. The diagonal is always explicit. The matrix is of CSR format.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neighbors\\_graph.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Call Assign Compare Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_apply_params",
    "source_code": "def _apply_params(*args, **kwargs):\n\n    def _apply(fn):\n        return fn(*args, **kwargs)\n    return _apply",
    "docstring": "Returns a decorator that calls the decorated (higher-order) function with the given parameters.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\symbolic_helper.py",
    "ast_data": "FunctionDef name:_apply_params arguments arg arg FunctionDef name:_apply arg:fn arguments arg Return return:yes Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "frame_src",
    "source_code": "@property\ndef frame_src(self) -> str:\n    return self._frame_src",
    "docstring": "Name of the source frame.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\pose.py",
    "ast_data": "FunctionDef name:frame_src arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "_is_valid_arg",
    "source_code": "def _is_valid_arg(self, arg: Any) -> bool:\n    if isinstance(arg, (str,)) and os.path.exists(arg):\n        return True\n    if isinstance(arg, (Tensor,)):\n        return True\n    if isinstance(arg, (np.ndarray,)):\n        return True\n    if isinstance(arg, Image.Image):\n        return True\n    return False",
    "docstring": "Check if the argument is a valid type for conversion. Args: arg: The argument to check. Returns: bool: True if valid, False otherwise.",
    "type": "method",
    "file_path": "kornia\\kornia\\core\\mixin\\image_module.py",
    "ast_data": "FunctionDef name:_is_valid_arg arg:self arg:arg arguments arg arg If BoolOp Call Call Return return:yes If Call Return return:yes If Call Return return:yes If Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "CrossTrainerCache",
    "source_code": "@tf_export('data.experimental.service.CrossTrainerCache')\nclass CrossTrainerCache:\n\n    def __init__(self, trainer_id):\n        if not trainer_id:\n            raise ValueError('tf.data service cross-trainer cache requires a non-empty trainer ID.')\n        self.trainer_id = trainer_id\n\n    def _to_proto(self) -> data_service_pb2.CrossTrainerCacheOptions:\n        return data_service_pb2.CrossTrainerCacheOptions(trainer_id=self.trainer_id)",
    "docstring": "Options related to the tf.data service cross trainer cache. This is used to enable cross-trainer cache when distributing a dataset. For example: For more details, refer to",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\data_service_ops.py",
    "ast_data": "ClassDef name:CrossTrainerCache FunctionDef name:__init__ arg:self arg:trainer_id arguments arg arg If Raise Call Assign FunctionDef name:_to_proto arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "upcast",
    "source_code": "def upcast(func):\n\n    @functools.wraps(func)\n    def wrapped(tensor, *args, **kwds):\n        target_dtype = _dtypes_impl.default_dtypes().complex_dtype if tensor.is_complex() else _dtypes_impl.default_dtypes().float_dtype\n        tensor = _util.cast_if_needed(tensor, target_dtype)\n        return func(tensor, *args, **kwds)\n    return wrapped",
    "docstring": "NumPy fft casts inputs to 64 bit and *returns 64-bit results*.",
    "type": "function",
    "file_path": "pytorch\\torch\\_numpy\\fft.py",
    "ast_data": "FunctionDef name:upcast arg:func arguments arg FunctionDef name:wrapped arg:tensor arguments arg arg arg Assign Call Call Call Assign Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_UnsupportedGroupCVMixin",
    "source_code": "class _UnsupportedGroupCVMixin:\n\n    def split(self, X, y=None, groups=None):\n        if groups is not None:\n            warnings.warn(f'The groups parameter is ignored by {self.__class__.__name__}', UserWarning)\n        return super().split(X, y, groups=groups)",
    "docstring": "Mixin for splitters that do not support Groups.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py",
    "ast_data": "ClassDef name:_UnsupportedGroupCVMixin FunctionDef name:split arg:self arg:X arg:y arg:groups arguments arg arg arg arg If Compare Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "clear_gradient_debuggers",
    "source_code": "def clear_gradient_debuggers():\n    _gradient_debuggers.clear()",
    "docstring": "Clear all globally registered gradient debuggers.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_gradients.py",
    "ast_data": "FunctionDef name:clear_gradient_debuggers arguments Call"
  },
  {
    "library": "tensorflow",
    "name": "log_softmax",
    "source_code": "@tf_export(v1=['nn.log_softmax', 'math.log_softmax'])\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\n@deprecation.deprecated_args(None, 'dim is deprecated, use axis instead', 'dim')\ndef log_softmax(logits, axis=None, name=None, dim=None):\n    axis = deprecation.deprecated_argument_lookup('axis', axis, 'dim', dim)\n    if axis is None:\n        axis = -1\n    return _wrap_2d_function(logits, gen_nn_ops.log_softmax, axis, name)",
    "docstring": "Computes log softmax activations. For each batch and class we have logsoftmax = logits - log(reduce_sum(exp(logits), axis)) Args: logits: A non-empty . Must be one of the following types: , , . axis: The dimension softmax would be performed on. The default is -1 which indicates the last dimension. name: A name for the operation (optional). dim: Deprecated alias for . Returns: A . Has the same type as . Same shape as . Raises: InvalidArgumentError: if is empty or is beyond the last dimension of .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py",
    "ast_data": "FunctionDef name:log_softmax arg:logits arg:axis arg:name arg:dim arguments arg arg arg arg Assign Call If Compare Assign Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "rvs",
    "source_code": "def rvs(self, mu=0, lmbda=1, a=1, b=1, size=None, random_state=None):\n    random_state = self._get_random_state(random_state)\n    s2 = invgamma(a, scale=b).rvs(size=size, random_state=random_state)\n    scale = (s2 / lmbda) ** 0.5\n    x = norm(loc=mu, scale=scale).rvs(size=size, random_state=random_state)\n    dtype = np.result_type(1.0, mu, lmbda, a, b)\n    return (x.astype(dtype), s2.astype(dtype))",
    "docstring": "Draw random samples from the distribution. Parameters ---------- mu, lmbda, a, b : array_like, optional Shape parameters. , , and must be greater than zero. size : int or tuple of ints, optional Shape of samples to draw. random_state : {None, int, np.random.RandomState, np.random.Generator}, optional Used for drawing random variates. If is , the singleton is used. If is an int, a new `random_staterandom_stateNone`. Returns ------- x, s2 : ndarray Random variates.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:rvs arg:self arg:mu arg:lmbda arg:a arg:b arg:size arg:random_state arguments arg arg arg arg arg arg arg Assign Call Assign Call Call Assign Assign Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "properties",
    "source_code": "def properties(self):\n    return ArtistInspector(self).properties()",
    "docstring": "Return a dictionary of all the properties of the artist.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:properties arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_to_variant",
    "source_code": "def _to_variant(self, batched_input=False, name=None):\n    with ops.name_scope(name, 'RaggedToVariant', [self, batched_input]):\n        return gen_ragged_conversion_ops.ragged_tensor_to_variant(self.nested_row_splits, self.flat_values, batched_input, name)",
    "docstring": "Converts this into a Tensor. If is , then the is unbatched along the zero-th dimension, each component is encoded into a scalar Tensor, and these are stacked to return a 1-D Tensor. If is , then the is encoded as is and a scalar Tensor is returned. Example: >>> rt = tf.ragged.constant([[[0]], [[1]], [[2]]]) >>> rt._to_variant().shape.as_list() [] >>> rt._to_variant(batched_input=True).shape.as_list() [3] Args: batched_input: If , the is unbatched and converted to a vector. Set to by default. name: A name prefix for the returned tensors (optional). Returns: A Tensor that encodes this .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:_to_variant arg:self arg:batched_input arg:name arguments arg arg arg With Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "setup",
    "source_code": "def setup(self, fig, outfile, dpi=None, frame_prefix=None):\n    Path(outfile).parent.resolve(strict=True)\n    self.fig = fig\n    self.outfile = outfile\n    if dpi is None:\n        dpi = self.fig.dpi\n    self.dpi = dpi\n    self._adjust_frame_size()\n    if frame_prefix is None:\n        self._tmpdir = TemporaryDirectory()\n        self.temp_prefix = str(Path(self._tmpdir.name, 'tmp'))\n    else:\n        self._tmpdir = None\n        self.temp_prefix = frame_prefix\n    self._frame_counter = 0\n    self._temp_paths = list()\n    self.fname_format_str = '%s%%07d.%s'",
    "docstring": "Setup for writing the movie file. Parameters ---------- fig : The figure to grab the rendered frames from. outfile : str The filename of the resulting movie file. dpi : float, default: `finish`; if not *None*, no temporary files are deleted.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\animation.py",
    "ast_data": "FunctionDef name:setup arg:self arg:fig arg:outfile arg:dpi arg:frame_prefix arguments arg arg arg arg arg Call Call Assign Assign If Compare Assign Assign Call If Compare Assign Call Assign Call Call Assign Assign Assign Assign Call Assign"
  },
  {
    "library": "numpy",
    "name": "argmax",
    "source_code": "@array_function_dispatch(_argmax_dispatcher)\ndef argmax(a, axis=None, out=None, *, keepdims=np._NoValue):\n    kwds = {'keepdims': keepdims} if keepdims is not np._NoValue else {}\n    return _wrapfunc(a, 'argmax', axis=axis, out=out, **kwds)",
    "docstring": "Returns the indices of the maximum values along an axis. Parameters ---------- a : array_like Input array. axis : int, optional By default, the index is into the flattened array, otherwise along the specified axis. out : array, optional If provided, the result will be inserted into this array. It should be of the appropriate shape and dtype. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the array. .. versionadded:: 1.22.0 Returns ------- index_array : ndarray of ints Array of indices into the array. It has the same shape as `axiskeepdimsaxiskeepdimsTrue`, >>> x = np.arange(24).reshape((2, 3, 4)) >>> res = np.argmax(x, axis=1, keepdims=True) >>> res.shape (2, 1, 4)",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\fromnumeric.py",
    "ast_data": "FunctionDef name:argmax arg:a arg:axis arg:out arguments arg arg arg arg Assign Compare Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__call__",
    "source_code": "@abc.abstractmethod\ndef __call__(self):\n    pass",
    "docstring": "Returns the current loss scale as a scalar tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\experimental\\loss_scale.py",
    "ast_data": "FunctionDef name:__call__ arg:self arguments arg"
  },
  {
    "library": "scipy",
    "name": "_matvec",
    "source_code": "def _matvec(self, x):\n    s, y, n_corrs, rho = (self.sk, self.yk, self.n_corrs, self.rho)\n    q = np.array(x, dtype=self.dtype, copy=True)\n    if q.ndim == 2 and q.shape[1] == 1:\n        q = q.reshape(-1)\n    alpha = np.empty(n_corrs)\n    for i in range(n_corrs - 1, -1, -1):\n        alpha[i] = rho[i] * np.dot(s[i], q)\n        q = q - alpha[i] * y[i]\n    r = q\n    for i in range(n_corrs):\n        beta = rho[i] * np.dot(y[i], r)\n        r = r + s[i] * (alpha[i] - beta)\n    return r",
    "docstring": "Efficient matrix-vector multiply with the BFGS matrices. This calculation is described in Section (4) of [1]. Parameters ---------- x : ndarray An array with shape (n,) or (n,1). Returns ------- y : ndarray The matrix-vector product",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_lbfgsb_py.py",
    "ast_data": "FunctionDef name:_matvec arg:self arg:x arguments arg arg Assign Assign Call If BoolOp Compare Compare Assign Call Assign Call For Call Assign Call Assign Assign For Call Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "GraphID",
    "source_code": "@dataclasses.dataclass(frozen=True)\nclass GraphID:\n    id: int",
    "docstring": "Unique counter of a cuda graph recording",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py",
    "ast_data": "ClassDef name:GraphID Call"
  },
  {
    "library": "tensorflow",
    "name": "maybe_reshard",
    "source_code": "def maybe_reshard(self, name: str) -> tuple[str, Optional[ReshardCallback]]:\n    callback = self.get_reshard_callback(name)\n    if callback is None:\n        return (name, None)\n    if callback.object_name():\n        return (callback.object_name(), callback)\n    return (name, callback)",
    "docstring": "Returns the updated name and ReshardCallback applicable to it.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint_adapter.py",
    "ast_data": "FunctionDef name:maybe_reshard arg:self arg:name arguments arg arg Assign Call If Compare Return return:yes If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, termination_watcher_fn=None, exit_fn=None, grace_period=None, save_fn=None):\n    self.termination_watcher_fn = termination_watcher_fn\n    self.exit_fn = exit_fn\n    self.grace_period = grace_period\n    self.save_fn = save_fn",
    "docstring": "Creates a object. Args: termination_watcher_fn: a function to execute repeatedly that returns if a preemption signal is available and False otherwise. The function cannot block until a preemption signal is available, which prevents proper cleanup of the program. A change is **NOT** recommended for users on Google Borg or Google Cloud Platform. exit_fn: a function to execute after a checkpoint is saved and before the preemption happens. Usually, it should be in the form of , where varies by platform. A change is **NOT** recommended for users on Google Borg. Users on Google Cloud Platform may configure it to use a customized . grace_period: the length of time between receiving a preemption signal and the actual preemption. A change is **NOT** recommended for users on Google Borg, Google Cloud Platform, or users with a short grace period. save_fn: an optional function letting you configure how to save a checkpoint. This is useful if you'd like to pass extra argument to or . By default, if not configured, the API will save checkpoint without extra arguments.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\failure_handling\\failure_handling.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:termination_watcher_fn arg:exit_fn arg:grace_period arg:save_fn arguments arg arg arg arg arg Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_num_slices_in_dimension",
    "source_code": "def _num_slices_in_dimension(self, axis: int) -> Optional[int]:\n    if not isinstance(axis, int):\n        raise TypeError('axis must be an integer')\n    axis = array_ops.get_positive_axis(axis, self.rank, ndims_name='rank')\n    if axis == 0:\n        return self._dimension(0)\n    if axis <= self.num_row_partitions:\n        return self._row_partitions[axis - 1].nvals\n    remainder = axis - (self.num_row_partitions - 1)\n    head_inner_shape = self._static_inner_shape[:remainder]\n    return head_inner_shape.num_elements()",
    "docstring": "The total size of a dimension (like nvals). This is a static version of DynamicRaggedShape._num_slices_in_dimension() Example: Args: axis: the last dimension to include. Returns: the number of values in a dimension.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:_num_slices_in_dimension arg:self arg:axis arguments arg arg If Call Raise Call Assign Call If Compare Return return:yes Call If Compare Return return:yes Assign Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_find_line_box_crossings",
    "source_code": "def _find_line_box_crossings(xys, bbox):\n    crossings = []\n    dxys = xys[1:] - xys[:-1]\n    for sl in [slice(None), slice(None, None, -1)]:\n        us, vs = xys.T[sl]\n        dus, dvs = dxys.T[sl]\n        umin, vmin = bbox.min[sl]\n        umax, vmax = bbox.max[sl]\n        for u0, inside in [(umin, us > umin), (umax, us < umax)]:\n            cross = []\n            idxs, = (inside[:-1] ^ inside[1:]).nonzero()\n            vv = vs[idxs] + (u0 - us[idxs]) * dvs[idxs] / dus[idxs]\n            crossings.append([((u0, v)[sl], np.degrees(np.arctan2(*dxy[::-1]))) for v, dxy in zip(vv, dxys[idxs]) if vmin <= v <= vmax])\n    return crossings",
    "docstring": "Find the points where a polyline crosses a bbox, and the crossing angles. Parameters ---------- xys : (N, 2) array The polyline coordinates. bbox : The bounding box. Returns ------- list of ((float, float), float) Four separate lists of crossings, for the left, right, bottom, and top sides of the bbox, respectively. For each list, the entries are the `` of the crossing, where an angle of 0 means that the polyline is moving to the right at the crossing point. The entries are computed by linearly interpolating at each crossing between the nearest points on either side of the bbox edges.",
    "type": "function",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\grid_finder.py",
    "ast_data": "FunctionDef name:_find_line_box_crossings arg:xys arg:bbox arguments arg arg Assign Assign For Call Call Assign Assign Assign Assign For Compare Compare Assign Assign Call Assign Call Call Call Call Compare Return return:yes"
  },
  {
    "library": "pandas",
    "name": "nbytes",
    "source_code": "@property\ndef nbytes(self) -> int:\n    return self._values.nbytes",
    "docstring": "Return the number of bytes in the underlying data. See Also -------- Series.ndim : Number of dimensions of the underlying data. Series.size : Return the number of elements in the underlying data. Examples -------- For Series: >>> s = pd.Series([\"Ant\", \"Bear\", \"Cow\"]) >>> s 0 Ant 1 Bear 2 Cow dtype: object >>> s.nbytes 24 For Index: >>> idx = pd.Index([1, 2, 3]) >>> idx Index([1, 2, 3], dtype='int64') >>> idx.nbytes 24",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\base.py",
    "ast_data": "FunctionDef name:nbytes arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "linear",
    "source_code": "def linear(input: Tensor, weight: Tensor, bias: Optional[Tensor]=None, scale: Optional[float]=None, zero_point: Optional[int]=None) -> Tensor:\n    if scale is None:\n        scale = input.q_scale()\n    if zero_point is None:\n        zero_point = input.q_zero_point()\n    _packed_params = torch.ops.quantized.linear_prepack(weight, bias)\n    return torch.ops.quantized.linear(input, _packed_params, scale, zero_point)",
    "docstring": "Applies a linear transformation to the incoming quantized data: :math:. See :class: .. note:: Current implementation packs weights on every call, which has penalty on performance. If you want to avoid the overhead, use :class:. Args: input (Tensor): Quantized input of type weight (Tensor): Quantized weight of type bias (Tensor): None or fp32 bias of type scale (double): output scale. If None, derived from the input scale zero_point (long): output zero point. If None, derived from the input zero_point Shape: - Input: :math: where means any number of additional dimensions - Weight: :math: - Bias: :math: - Output: :math:",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\functional.py",
    "ast_data": "FunctionDef name:linear arg:input arg:weight arg:bias arg:scale arg:zero_point arguments arg arg arg arg arg If Compare Assign Call If Compare Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "__add__",
    "source_code": "def __add__(self, other):\n    return self.__class__([*self, *other])",
    "docstring": "add another list-like object",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\mutable_list.py",
    "ast_data": "FunctionDef name:__add__ arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_available_gpus",
    "source_code": "def _get_available_gpus():\n    if ops.executing_eagerly_outside_functions():\n        return [d.name for d in config.list_logical_devices('GPU')]\n    global _LOCAL_DEVICES\n    if _LOCAL_DEVICES is None:\n        _LOCAL_DEVICES = get_session().list_devices()\n    return [x.name for x in _LOCAL_DEVICES if x.device_type == 'GPU']",
    "docstring": "Get a list of available GPU devices (formatted as strings). Returns: A list of available GPU devices.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:_get_available_gpus arguments If Call Return return:yes Call If Compare Assign Call Call Return return:yes Compare"
  },
  {
    "library": "pygame",
    "name": "get_packages",
    "source_code": "def get_packages(arch: str) -> list:\n    deps = ['{}-SDL2', '{}-SDL2_ttf', '{}-SDL2_image', '{}-SDL2_mixer', '{}-portmidi', '{}-libpng', '{}-libjpeg-turbo', '{}-libtiff', '{}-zlib', '{}-libwebp', '{}-libvorbis', '{}-libogg', '{}-flac', '{}-libmodplug', '{}-mpg123', '{}-opus', '{}-opusfile', '{}-freetype', '{}-python-build', '{}-python-installer', '{}-python-setuptools', '{}-python-wheel', '{}-python-pip', '{}-python-numpy', '{}-python-sphinx', '{}-cmake', '{}-cc', '{}-cython']\n    full_arch_names = {'clang32': 'mingw-w64-clang-i686', 'clang64': 'mingw-w64-clang-x86_64', 'mingw32': 'mingw-w64-i686', 'mingw64': 'mingw-w64-x86_64', 'ucrt64': 'mingw-w64-ucrt-x86_64', 'clangarm64': 'mingw-w64-clang-aarch64'}\n    return [x.format(full_arch_names[arch]) for x in deps]",
    "docstring": "Returns a list of package names formatted with the specific architecture prefix. :param arch: The architecture identifier string, e.g., \"mingw64\", \"clang32\", etc. It is used to select the appropriate prefix for package names. :return: A list of fully formatted package names based on the given architecture. Example: If the 'arch' parameter is \"mingw32\", the return value will be a list like: [ 'mingw-w64-i686-SDL2', 'mingw-w64-i686-SDL2_ttf', 'mingw-w64-i686-SDL2_image', ... ]",
    "type": "function",
    "file_path": "pygame\\buildconfig\\download_msys2_prebuilt.py",
    "ast_data": "FunctionDef name:get_packages arg:arch arguments arg Assign Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_log_traced_frames",
    "source_code": "@atexit.register\ndef _log_traced_frames():\n    msg = '\\n'.join(dynamo_tls.traced_frame_infos)\n    msg = textwrap.indent(msg, '  * ')\n    msg = f'TorchDynamo attempted to trace the following frames: [\\n{msg}\\n]'\n    log.info(msg)",
    "docstring": "At program exit, log all of the frames Dynamo has attempted to trace from, excluding the continuation frames generated by Dynamo.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\eval_frame.py",
    "ast_data": "FunctionDef name:_log_traced_frames arguments Assign Call Assign Call Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "_get_compressed_triangulation",
    "source_code": "def _get_compressed_triangulation(self):\n    tri_mask = self._triangulation.mask\n    compressed_triangles = self._triangulation.get_masked_triangles()\n    ntri = self._triangulation.triangles.shape[0]\n    if tri_mask is not None:\n        tri_renum = self._total_to_compress_renum(~tri_mask)\n    else:\n        tri_renum = np.arange(ntri, dtype=np.int32)\n    valid_node = np.bincount(np.ravel(compressed_triangles), minlength=self._triangulation.x.size) != 0\n    compressed_x = self._triangulation.x[valid_node]\n    compressed_y = self._triangulation.y[valid_node]\n    node_renum = self._total_to_compress_renum(valid_node)\n    compressed_triangles = node_renum[compressed_triangles]\n    return (compressed_triangles, compressed_x, compressed_y, tri_renum, node_renum)",
    "docstring": "Compress (if masked) the encapsulated triangulation. Returns minimal-length triangles array (*compressed_triangles*) and coordinates arrays (*compressed_x*, *compressed_y*) that can still describe the unmasked triangles of the encapsulated triangulation. Returns ------- compressed_triangles : array-like the returned compressed triangulation triangles compressed_x : array-like the returned compressed triangulation 1st coordinate compressed_y : array-like the returned compressed triangulation 2nd coordinate tri_renum : int array renumbering table to translate the triangle numbers from the encapsulated triangulation into the new (compressed) renumbering. -1 for masked triangles (deleted from *compressed_triangles*). node_renum : int array renumbering table to translate the point numbers from the encapsulated triangulation into the new (compressed) renumbering. -1 for unused points (i.e. those deleted from *compressed_x* and *compressed_y*).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_tritools.py",
    "ast_data": "FunctionDef name:_get_compressed_triangulation arg:self arguments arg Assign Assign Call Assign If Compare Assign Call Assign Call Assign Compare Call Call Assign Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_set_lims",
    "source_code": "def _set_lims(self):\n    if self._orientation == 'x':\n        lims = self._parent.get_xlim()\n        set_lim = self.set_xlim\n    else:\n        lims = self._parent.get_ylim()\n        set_lim = self.set_ylim\n    order = lims[0] < lims[1]\n    lims = self._functions[0](np.array(lims))\n    neworder = lims[0] < lims[1]\n    if neworder != order:\n        lims = lims[::-1]\n    set_lim(lims)",
    "docstring": "Set the limits based on parent limits and the convert method between the parent and this secondary Axes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_secondary_axes.py",
    "ast_data": "FunctionDef name:_set_lims arg:self arguments arg If Compare Assign Call Assign Assign Call Assign Assign Compare Assign Call Call Assign Compare If Compare Assign Call"
  },
  {
    "library": "pytorch",
    "name": "atleast_3d",
    "source_code": "def atleast_3d(*tensors):\n    if has_torch_function(tensors):\n        return handle_torch_function(atleast_3d, tensors, *tensors)\n    if len(tensors) == 1:\n        tensors = tensors[0]\n    return _VF.atleast_3d(tensors)",
    "docstring": "Returns a 3-dimensional view of each input tensor with zero dimensions. Input tensors with three or more dimensions are returned as-is. Args: input (Tensor or list of Tensors) Returns: output (Tensor or tuple of Tensors) Example: >>> x = torch.tensor(0.5) >>> x tensor(0.5000) >>> torch.atleast_3d(x) tensor([[[0.5000]]]) >>> y = torch.arange(4).view(2, 2) >>> y tensor([[0, 1], [2, 3]]) >>> torch.atleast_3d(y) tensor([[[0], [1]], [[2], [3]]]) >>> x = torch.tensor(1).view(1, 1, 1) >>> x tensor([[[1]]]) >>> torch.atleast_3d(x) tensor([[[1]]]) >>> x = torch.tensor(0.5) >>> y = torch.tensor(1.0) >>> torch.atleast_3d((x, y)) (tensor([[[0.5000]]]), tensor([[[1.]]]))",
    "type": "function",
    "file_path": "pytorch\\torch\\functional.py",
    "ast_data": "FunctionDef name:atleast_3d arguments arg If Call Return return:yes Call If Compare Call Assign Return return:yes Call"
  },
  {
    "library": "pygame",
    "name": "__init__",
    "source_code": "def __init__(self, ratio):\n    self.ratio = ratio",
    "docstring": "creates a new collide_circle_ratio callable instance The given ratio is expected to be a floating point value used to scale the underlying sprite radius before checking for collisions. When the ratio is ratio=1.0, then it behaves exactly like the collide_circle method.",
    "type": "method",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:ratio arguments arg arg Assign"
  },
  {
    "library": "pytorch",
    "name": "T",
    "source_code": "class T(Constraint):\n\n    def __init__(self) -> None:\n        pass\n\n    def __eq__(self, other):\n        return isinstance(other, T)\n\n    def __repr__(self):\n        return 'True'",
    "docstring": "True",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint.py",
    "ast_data": "ClassDef name:T FunctionDef name:__init__ arg:self arguments arg FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, point, callable, name=None, priority=50):\n    self._point = point\n    self.callable = callable\n    self._name = name\n    self._priority = priority\n    self.__doc__ = self.callable.__doc__\n    self._setargs()",
    "docstring": "Initialize a CherryPy tool instance.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cptools.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:point arg:callable arg:name arg:priority arguments arg arg arg arg arg Assign Assign Assign Assign Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_dense_var_to_tensor",
    "source_code": "def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):\n    if tpu_util.enclosing_tpu_context() is None:\n        return self.read_value()\n    else:\n        return self._read_variable_op()",
    "docstring": "Converts a variable to a tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_replicated_variable.py",
    "ast_data": "FunctionDef name:_dense_var_to_tensor arg:self arg:dtype arg:name arg:as_ref arguments arg arg arg arg If Compare Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "chebweight",
    "source_code": "def chebweight(x):\n    w = 1.0 / (np.sqrt(1.0 + x) * np.sqrt(1.0 - x))\n    return w",
    "docstring": "The weight function of the Chebyshev polynomials. The weight function is :math: and the interval of integration is :math:. The Chebyshev polynomials are orthogonal, but not normalized, with respect to this weight function. Parameters ---------- x : array_like Values at which the weight function will be computed. Returns ------- w : ndarray The weight function at .",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\chebyshev.py",
    "ast_data": "FunctionDef name:chebweight arg:x arguments arg Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_len",
    "source_code": "def get_len(pattern):\n    len = 0\n    if isinstance(pattern, tuple):\n        for item in pattern:\n            len += get_len(item)\n    else:\n        len += 1\n    return len",
    "docstring": "this will calculate the length of the pattern by counting all the entries in the pattern. this will make sure (nn.ReLU, (nn.BatchNorm, nn.Conv2d)) comes before (nn.BatchNorm, nn.Conv2d) so that we can match the former first",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\pattern_utils.py",
    "ast_data": "FunctionDef name:get_len arg:pattern arguments arg Assign If Call For Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "Trace",
    "source_code": "@tf_export('profiler.experimental.Trace', v1=[])\nclass Trace(object):\n\n    def __init__(self, name, **kwargs):\n        if enabled:\n            self._traceme = _pywrap_traceme.TraceMe(name, **kwargs)\n        else:\n            self._traceme = None\n\n    def __enter__(self):\n        return self\n\n    def set_metadata(self, **kwargs):\n        if self._traceme and kwargs:\n            self._traceme.SetMetadata(**kwargs)\n\n    def __exit__(self, exc_type, exc_val, exc_tb):\n        if self._traceme:\n            self._traceme.Stop()",
    "docstring": "Context manager that generates a trace event in the profiler. A trace event will start when entering the context, and stop and save the result to the profiler when exiting the context. Open TensorBoard Profile tab and choose trace viewer to view the trace event in the timeline. Trace events are created only when the profiler is enabled. More information on how to use the profiler can be found at Example usage:",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\trace.py",
    "ast_data": "ClassDef name:Trace FunctionDef name:__init__ arg:self arg:name arguments arg arg arg If Assign Call Assign FunctionDef name:__enter__ arg:self arguments arg Return return:yes FunctionDef name:set_metadata arg:self arguments arg arg If BoolOp Call FunctionDef name:__exit__ arg:self arg:exc_type arg:exc_val arg:exc_tb arguments arg arg arg arg If Call Call"
  },
  {
    "library": "sphinx",
    "name": "is_longtable",
    "source_code": "def is_longtable(self) -> bool:\n    return self.row > 30 or 'longtable' in self.classes",
    "docstring": "True if and only if table uses longtable environment.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\writers\\latex.py",
    "ast_data": "FunctionDef name:is_longtable arg:self arguments arg Return return:yes BoolOp Compare Compare"
  },
  {
    "library": "scikit-learn",
    "name": "GridSearchBenchmark",
    "source_code": "class GridSearchBenchmark(Predictor, Estimator, Benchmark):\n    timeout = 20000\n    param_names = ['n_jobs']\n    params = (Benchmark.n_jobs_vals,)\n\n    def setup_cache(self):\n        super().setup_cache()\n\n    def make_data(self, params):\n        data = _synth_classification_dataset(n_samples=10000, n_features=100)\n        return data\n\n    def make_estimator(self, params):\n        n_jobs, = params\n        clf = RandomForestClassifier(random_state=0)\n        if Benchmark.data_size == 'large':\n            n_estimators_list = [10, 25, 50, 100, 500]\n            max_depth_list = [5, 10, None]\n            max_features_list = [0.1, 0.4, 0.8, 1.0]\n        else:\n            n_estimators_list = [10, 25, 50]\n            max_depth_list = [5, 10]\n            max_features_list = [0.1, 0.4, 0.8]\n        param_grid = {'n_estimators': n_estimators_list, 'max_depth': max_depth_list, 'max_features': max_features_list}\n        estimator = GridSearchCV(clf, param_grid, n_jobs=n_jobs, cv=4)\n        return estimator\n\n    def make_scorers(self):\n        make_gen_classif_scorers(self)",
    "docstring": "Benchmarks for GridSearch.",
    "type": "class",
    "file_path": "scikit-learn\\asv_benchmarks\\benchmarks\\model_selection.py",
    "ast_data": "ClassDef name:GridSearchBenchmark Assign Assign Assign FunctionDef name:setup_cache arg:self arguments arg Call Call FunctionDef name:make_data arg:self arg:params arguments arg arg Assign Call Return return:yes FunctionDef name:make_estimator arg:self arg:params arguments arg arg Assign Assign Call If Compare Assign Assign Assign Assign Assign Assign Assign Assign Call Return return:yes FunctionDef name:make_scorers arg:self arguments arg Call"
  },
  {
    "library": "scipy",
    "name": "subsets",
    "source_code": "def subsets(self):\n    result = []\n    visited = set()\n    for x in self:\n        if x not in visited:\n            xset = self.subset(x)\n            visited.update(xset)\n            result.append(xset)\n    return result",
    "docstring": "Get all the subsets in the disjoint set. Returns ------- result : list Subsets in the disjoint set.",
    "type": "method",
    "file_path": "scipy\\scipy\\_lib\\_disjoint_set.py",
    "ast_data": "FunctionDef name:subsets arg:self arguments arg Assign Assign Call For If Compare Assign Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "complex_filter",
    "source_code": "def complex_filter(self, filter_obj):\n    if isinstance(filter_obj, Q):\n        clone = self._chain()\n        clone.query.add_q(filter_obj)\n        return clone\n    else:\n        return self._filter_or_exclude(False, args=(), kwargs=filter_obj)",
    "docstring": "Return a new QuerySet instance with filter_obj added to the filters. filter_obj can be a Q object or a dictionary of keyword lookup arguments. This exists to support framework features such as 'limit_choices_to', and usually it will be more natural to use other methods.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:complex_filter arg:self arg:filter_obj arguments arg arg If Call Assign Call Call Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "close",
    "source_code": "def close(self):\n    if not self._closed:\n        self.flush()\n        self._session.run(self._close_op)\n        self._closed = True",
    "docstring": "Flushes the event file to disk and close the file. Call this method when you do not need the summary writer anymore.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\event_file_writer_v2.py",
    "ast_data": "FunctionDef name:close arg:self arguments arg If Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "attr_probs",
    "source_code": "def attr_probs(**probs):\n    return probs",
    "docstring": "return the inputs in a dictionary",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\operator_benchmark\\benchmark_utils.py",
    "ast_data": "FunctionDef name:attr_probs arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "safe",
    "source_code": "@register.filter(is_safe=True)\n@stringfilter\ndef safe(value):\n    return mark_safe(value)",
    "docstring": "Mark the value as a string that should not be auto-escaped.",
    "type": "function",
    "file_path": "django\\django\\template\\defaultfilters.py",
    "ast_data": "FunctionDef name:safe arg:value arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_tensor_methods",
    "source_code": "@functools.lru_cache(None)\ndef _get_tensor_methods() -> set[Callable]:\n    overridable_funcs = get_overridable_functions()\n    methods = set(overridable_funcs[torch.Tensor])\n    return methods",
    "docstring": "Returns a set of the overridable methods on ``",
    "type": "function",
    "file_path": "pytorch\\torch\\overrides.py",
    "ast_data": "FunctionDef name:_get_tensor_methods arguments Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "Maximum",
    "source_code": "class Maximum(_Merge):\n\n    def _merge_function(self, inputs):\n        output = inputs[0]\n        for i in range(1, len(inputs)):\n            output = math_ops.maximum(output, inputs[i])\n        return output",
    "docstring": "Layer that computes the maximum (element-wise) a list of inputs. It takes as input a list of tensors, all of the same shape, and returns a single tensor (also of the same shape). >>> tf.keras.layers.Maximum()([np.arange(5).reshape(5, 1), ... np.arange(5, 10).reshape(5, 1)]) >>> x1 = tf.keras.layers.Dense(8)(np.arange(10).reshape(5, 2)) >>> x2 = tf.keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2)) >>> maxed = tf.keras.layers.Maximum()([x1, x2]) >>> maxed.shape TensorShape([5, 8])",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\merge.py",
    "ast_data": "ClassDef name:Maximum FunctionDef name:_merge_function arg:self arg:inputs arguments arg arg Assign For Call Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__create_chunk_list__",
    "source_code": "def __create_chunk_list__(self) -> list[object]:\n    raise NotImplementedError('_Checkpointable._create_chunk_list is not implemented')",
    "docstring": "Return a list of based on object's contents.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_checkpointable.py",
    "ast_data": "FunctionDef name:__create_chunk_list__ arg:self arguments arg Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "_lazy_apply_wrapper",
    "source_code": "def _lazy_apply_wrapper(func: Callable[..., Array | ArrayLike | Sequence[Array | ArrayLike]], as_numpy: bool, multi_output: bool, xp: ModuleType) -> Callable[..., tuple[Array, ...]]:\n\n    @wraps(func)\n    def wrapper(*args: Array | complex | None, **kwargs: Any) -> tuple[Array, ...]:\n        args_list = []\n        device = None\n        for arg in args:\n            if arg is not None and (not is_python_scalar(arg)):\n                if device is None:\n                    device = _compat.device(arg)\n                if as_numpy:\n                    import numpy as np\n                    arg = cast(Array, np.asarray(arg))\n            args_list.append(arg)\n        assert device is not None\n        out = func(*args_list, **kwargs)\n        if multi_output:\n            assert isinstance(out, Sequence)\n            return tuple((xp.asarray(o, device=device) for o in out))\n        return (xp.asarray(out, device=device),)\n    return wrapper",
    "docstring": "Helper of . Given a function that accepts one or more arrays as positional arguments and returns a single array-like or a sequence of array-likes, return a function that accepts the same number of Array API arrays and always returns a tuple of Array API array. Any keyword arguments are passed through verbatim to the wrapped function.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_extra\\_lib\\_lazy.py",
    "ast_data": "FunctionDef name:_lazy_apply_wrapper arg:func arg:as_numpy arg:multi_output arg:xp arguments arg arg arg arg FunctionDef name:wrapper arguments arg arg Assign Assign For If BoolOp Compare Call If Compare Assign Call If Assign Call Call Call Compare Assign Call If Call Return return:yes Call Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "mgpu_tune_gemm_in_file",
    "source_code": "def mgpu_tune_gemm_in_file(filename_pattern: str, num_gpus: int) -> None:\n    unique_gemm_entries = _gather_unique_untuned_gemm_from_files(filename_pattern)\n    total_gpus = torch.cuda.device_count()\n    assert 1 <= num_gpus <= total_gpus\n    mp_context = mp.get_context('spawn')\n    futures = []\n    flush_results = []\n    h = 0\n    with concurrent.futures.ProcessPoolExecutor(max_workers=num_gpus, mp_context=mp_context, initializer=_check_tuning_assertions) as executor:\n        for line in unique_gemm_entries:\n            future = executor.submit(_process_single_offline_gemm, line, h)\n            futures.append(future)\n            h = (h + 1) % num_gpus\n        for future in concurrent.futures.as_completed(futures):\n            future.result()\n        for g in range(num_gpus):\n            flush_result = executor.submit(write_file)\n            flush_results.append(flush_result)\n        for flush_result in concurrent.futures.as_completed(flush_results):\n            flush_result.result()\n    torch.cuda.synchronize()\n    _gather_tunableop_results()",
    "docstring": "Process one or more files and distribute work over one or more GPUs.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\tunable.py",
    "ast_data": "FunctionDef name:mgpu_tune_gemm_in_file arg:filename_pattern arg:num_gpus arguments arg arg Assign Call Assign Call Compare Assign Call Assign Assign Assign With Call For Assign Call Call Assign For Call Call For Call Assign Call Call For Call Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "resolve_reference_in_inventory",
    "source_code": "def resolve_reference_in_inventory(env: BuildEnvironment, inv_name: InventoryName, node: pending_xref, contnode: TextElement) -> nodes.reference | None:\n    assert inventory_exists(env, inv_name)\n    return _resolve_reference(inv_name, env.domains, InventoryAdapter(env).named_inventory[inv_name], False, frozenset(env.config.intersphinx_disabled_reftypes), node, contnode)",
    "docstring": "Attempt to resolve a missing reference via intersphinx references. Resolution is tried in the given inventory with the target as is. Requires ``.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\ext\\intersphinx\\_resolve.py",
    "ast_data": "FunctionDef name:resolve_reference_in_inventory arg:env arg:inv_name arg:node arg:contnode arguments arg arg arg arg Call Return return:yes Call Call Call"
  },
  {
    "library": "pandas",
    "name": "density",
    "source_code": "@property\ndef density(self) -> float:\n    tmp = np.mean([column.array.density for _, column in self._parent.items()])\n    return tmp",
    "docstring": "Ratio of non-sparse points to total (dense) data points. See Also -------- DataFrame.sparse.from_spmatrix : Create a new DataFrame from a scipy sparse matrix. Examples -------- >>> df = pd.DataFrame({\"A\": pd.arrays.SparseArray([0, 1, 0, 1])}) >>> df.sparse.density np.float64(0.5)",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\sparse\\accessor.py",
    "ast_data": "FunctionDef name:density arg:self arguments arg Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "handle",
    "source_code": "def handle(self, op, args, kwargs):\n    pass",
    "docstring": "Handle the specified operation with the specified arguments.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\util\\dispatch.py",
    "ast_data": "FunctionDef name:handle arg:self arg:op arg:args arg:kwargs arguments arg arg arg arg"
  },
  {
    "library": "numpy",
    "name": "check_api_version",
    "source_code": "def check_api_version(apiversion):\n    curapi_hash, api_hash = get_api_versions(apiversion)\n    if not curapi_hash == api_hash:\n        msg = f'API mismatch detected, the C API version numbers have to be updated. Current C api version is {apiversion}, with checksum {curapi_hash}, but recorded checksum in _core/codegen_dir/cversions.txt is {api_hash}. If functions were added in the C API, you have to update C_API_VERSION in {__file__}.'\n        raise MismatchCAPIError(msg)",
    "docstring": "Emits a MismatchCAPIWarning if the C API version needs updating.",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\code_generators\\verify_c_api_version.py",
    "ast_data": "FunctionDef name:check_api_version arg:apiversion arguments arg Assign Call If Compare Assign Raise Call"
  },
  {
    "library": "django",
    "name": "path",
    "source_code": "def path(self, name):\n    raise NotImplementedError(\"This backend doesn't support absolute paths.\")",
    "docstring": "Return a local filesystem path where the file can be retrieved using Python's built-in open() function. Storage systems that can't be accessed using open() should *not* implement this method.",
    "type": "method",
    "file_path": "django\\django\\core\\files\\storage\\base.py",
    "ast_data": "FunctionDef name:path arg:self arg:name arguments arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_name_based_attribute_restore",
    "source_code": "def _name_based_attribute_restore(self, checkpoint):\n    self._self_name_based_restores.add(checkpoint)\n    if self._self_update_uid < checkpoint.restore_uid:\n        checkpoint.eager_restore(self)\n        self._self_update_uid = checkpoint.restore_uid",
    "docstring": "Restore the object's attributes from a name-based checkpoint.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\base.py",
    "ast_data": "FunctionDef name:_name_based_attribute_restore arg:self arg:checkpoint arguments arg arg Call If Compare Call Assign"
  },
  {
    "library": "django",
    "name": "fetch_returned_insert_rows",
    "source_code": "def fetch_returned_insert_rows(self, cursor):\n    return cursor.fetchall()",
    "docstring": "Given a cursor object that has just performed an INSERT...RETURNING statement into a table, return the list of returned data.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\sqlite3\\operations.py",
    "ast_data": "FunctionDef name:fetch_returned_insert_rows arg:self arg:cursor arguments arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "NewFunction02",
    "source_code": "class NewFunction02(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n        self.global_optimum = [[-9.94114736324, -9.99997128772]]\n        self.fglob = -0.199409030092\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return abs(sin(sqrt(abs(x[0] ** 2 + x[1])))) ** 0.5 + 0.01 * (x[0] + x[1])",
    "docstring": "NewFunction02 objective function. This class defines the NewFunction02 global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{NewFunction02}}(x) = \\left | {\\sin\\left(\\sqrt{\\lvert{x_{1}^{2} + x_{2}}\\rvert}\\right)} \\right |^{0.5} + (x_{1} + x_{2})/100 with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Mishra, S. Global Optimization by Differential Evolution and Particle Swarm Methods: Evaluation on Some Benchmark Functions. Munich Personal RePEc Archive, 2006, 1005 TODO Line 368 TODO WARNING, minimum value is estimated from running many optimisations and choosing the best.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_N.py",
    "ast_data": "ClassDef name:NewFunction02 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_colocated_node_name",
    "source_code": "def _get_colocated_node_name(colocated_node_name):\n    colocated_node_decoded = colocated_node_name.decode('utf-8')\n    if colocated_node_decoded.startswith('loc:@'):\n        return colocated_node_decoded[5:]\n    return colocated_node_decoded",
    "docstring": "Decodes colocated node name and returns it without loc:@ prepended.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\graph_util_impl.py",
    "ast_data": "FunctionDef name:_get_colocated_node_name arg:colocated_node_name arguments arg Assign Call If Call Return return:yes Return return:yes"
  },
  {
    "library": "kornia",
    "name": "sampson_epipolar_distance",
    "source_code": "def sampson_epipolar_distance(pts1: Tensor, pts2: Tensor, Fm: Tensor, squared: bool=True, eps: float=1e-08) -> Tensor:\n    if not isinstance(Fm, Tensor):\n        raise TypeError(f'Fm type is not a torch.Tensor. Got {type(Fm)}')\n    if len(Fm.shape) < 3 or not Fm.shape[-2:] == (3, 3):\n        raise ValueError(f'Fm must be a (*, 3, 3) tensor. Got {Fm.shape}')\n    if pts1.shape[-1] == 2:\n        pts1 = convert_points_to_homogeneous(pts1)\n    if pts2.shape[-1] == 2:\n        pts2 = convert_points_to_homogeneous(pts2)\n    F_t: Tensor = Fm.transpose(dim0=-2, dim1=-1)\n    line1_in_2: Tensor = pts1 @ F_t\n    line2_in_1: Tensor = pts2 @ Fm\n    numerator: Tensor = (pts2 * line1_in_2).sum(dim=-1).pow(2)\n    denominator: Tensor = line1_in_2[..., :2].norm(2, dim=-1).pow(2) + line2_in_1[..., :2].norm(2, dim=-1).pow(2)\n    out: Tensor = numerator / denominator\n    if squared:\n        return out\n    return (out + eps).sqrt()",
    "docstring": "Return Sampson distance for correspondences given the fundamental matrix. Args: pts1: correspondences from the left images with shape :math:. If they are not homogeneous, converted automatically. pts2: correspondences from the right images with shape :math:. If they are not homogeneous, converted automatically. Fm: Fundamental matrices with shape :math:. Called Fm to avoid ambiguity with torch.nn.functional. squared: if True (default), the squared distance is returned. eps: Small constant for safe sqrt. Returns: the computed Sampson distance with shape :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\epipolar\\_metrics.py",
    "ast_data": "FunctionDef name:sampson_epipolar_distance arg:pts1 arg:pts2 arg:Fm arg:squared arg:eps arguments arg arg arg arg arg If Call Raise Call Call If BoolOp Compare Call Compare Raise Call If Compare Assign Call If Compare Assign Call Call Call Call Call Call Call Call If Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "ASTEdgePattern",
    "source_code": "class ASTEdgePattern(collections.namedtuple('ASTEdgePattern', ['parent', 'field', 'child'])):\n    __slots__ = ()\n\n    def matches(self, parent, field, child):\n        if self.parent is ANY or isinstance(parent, self.parent):\n            pass\n        else:\n            return False\n        if self.field is ANY or field == self.field:\n            pass\n        else:\n            return False\n        return self.child is ANY or isinstance(child, self.child)",
    "docstring": "A pattern defining a type of AST edge. This consists of three components: - The type of the parent node, checked with isinstance, - The name of the field, checked with string equality, and - The type of the child node, also checked with isinstance. If all three match, the whole pattern is considered to match. In all three slots, the special value is treated as \"match anything\". The internal nodes are produced from the library rather than the standard module, which may affect checks.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\common_transformers\\anf.py",
    "ast_data": "ClassDef name:ASTEdgePattern Call Assign FunctionDef name:matches arg:self arg:parent arg:field arg:child arguments arg arg arg arg If BoolOp Compare Call Return return:yes If BoolOp Compare Compare Return return:yes Return return:yes BoolOp Compare Call"
  },
  {
    "library": "pytorch",
    "name": "_create_value_mapping",
    "source_code": "def _create_value_mapping(graph: ir.Graph) -> dict[str, ir.Value]:\n    values: dict[str, ir.Value] = {}\n    values.update(graph.initializers)\n    for input in graph.inputs:\n        if not input.name:\n            continue\n        values[input.name] = input\n    for node in graph:\n        for value in node.outputs:\n            if not value.name:\n                continue\n            values[value.name] = value\n    return values",
    "docstring": "Return a dictionary mapping names to values in the graph. The mapping does not include values from subgraphs. Args: graph: The graph to extract the mapping from. Returns: A dictionary mapping names to values.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_verification.py",
    "ast_data": "FunctionDef name:_create_value_mapping arg:graph arguments arg Call For If Assign For For If Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_ReduceGradientArg",
    "source_code": "def _ReduceGradientArg(grad, shape_axes_must_reduce):\n    shape, axes, must_reduce = shape_axes_must_reduce\n    if grad is not None and must_reduce:\n        grad = math_ops.reduce_sum(grad, axes, keepdims=True)\n        grad = array_ops.reshape(grad, shape)\n    return grad",
    "docstring": "Reduces gradients of one of the arguments of a broadcasting binary op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_ReduceGradientArg arg:grad arg:shape_axes_must_reduce arguments arg arg Assign If BoolOp Compare Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_transform",
    "source_code": "def _transform(self, X):\n    return euclidean_distances(X, self.cluster_centers_)",
    "docstring": "Guts of transform method; no input validation.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_kmeans.py",
    "ast_data": "FunctionDef name:_transform arg:self arg:X arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "update_mask",
    "source_code": "def update_mask(self, padding_mask, dataset_batch):\n    original_batch_size = self.get_real_batch_size(dataset_batch)\n    missing_count = self.padded_batch_size - original_batch_size\n    mask = backend.concatenate([array_ops.ones(original_batch_size), array_ops.zeros(missing_count)], axis=0)\n    return backend.concatenate([padding_mask, mask], axis=0)",
    "docstring": "Calculate and cache the amount of padding required for a batch.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\partial_batch_padding_handler.py",
    "ast_data": "FunctionDef name:update_mask arg:self arg:padding_mask arg:dataset_batch arguments arg arg arg Assign Call Assign Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "get_knots",
    "source_code": "def get_knots(self):\n    return self.tck[:2]",
    "docstring": "Return a tuple (tx,ty) where tx,ty contain knots positions of the spline with respect to x-, y-variable, respectively. The position of interior and additional knots are given as t[k+1:-k-1] and t[:k+1]=b, t[-k-1:]=e, respectively.",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_fitpack2.py",
    "ast_data": "FunctionDef name:get_knots arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "IndexedSlicesCompositeTensorGradient",
    "source_code": "class IndexedSlicesCompositeTensorGradient(composite_tensor_gradient.CompositeTensorGradient):\n\n    def get_gradient_components(self, value):\n        return value\n\n    def replace_gradient_components(self, value, component_grads):\n        return component_grads",
    "docstring": "CompositeTensorGradient for IndexedSlices.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\indexed_slices.py",
    "ast_data": "ClassDef name:IndexedSlicesCompositeTensorGradient FunctionDef name:get_gradient_components arg:self arg:value arguments arg arg Return return:yes FunctionDef name:replace_gradient_components arg:self arg:value arg:component_grads arguments arg arg arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "le",
    "source_code": "def le(self, x):\n    d, m = divmod(x, self.step)\n    if self.closeto(m / self.step, 1):\n        return d + 1\n    return d",
    "docstring": "Return the largest n: n*step <= x.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:le arg:self arg:x arguments arg arg Assign Call If Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_retrieve_recv_grads",
    "source_code": "def _retrieve_recv_grads(self, bwd_chunk_id: int):\n    recv_infos = self.grad_recv_info[bwd_chunk_id]\n    grads = self._map_tensor_from_recv_info(recv_infos)\n    return grads",
    "docstring": "Retrieve the gradients received for the current stage during backward.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py",
    "ast_data": "FunctionDef name:_retrieve_recv_grads arg:self arg:bwd_chunk_id arguments arg arg Assign Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "Placement",
    "source_code": "class Placement:\n\n    def is_shard(self, dim: Optional[int]=None) -> bool:\n        is_shard_instance = isinstance(self, Shard)\n        if dim is not None and is_shard_instance:\n            return cast(Shard, self).dim == dim\n        else:\n            return is_shard_instance\n\n    def is_replicate(self) -> bool:\n        return isinstance(self, Replicate)\n\n    def is_partial(self) -> bool:\n        return isinstance(self, Partial)",
    "docstring": "The base class for the Placement type, where it describes how a DTensor is placed onto the ``. This class is not meant to be used directly, mainly served as a typing stub.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\tensor\\placement_types.py",
    "ast_data": "ClassDef name:Placement FunctionDef name:is_shard arg:self arg:dim arguments arg arg Assign Call If BoolOp Compare Return return:yes Compare Call Return return:yes FunctionDef name:is_replicate arg:self arguments arg Return return:yes Call FunctionDef name:is_partial arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "access_file",
    "source_code": "@property\ndef access_file(self):\n    h = self._get_builtin_handler(self.access_log, 'file')\n    if h:\n        return h.baseFilename\n    return ''",
    "docstring": "The filename for self.access_log. If you set this to a string, it'll add the appropriate FileHandler for you. If you set it to ``, it will remove the handler.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cplogging.py",
    "ast_data": "FunctionDef name:access_file arg:self arguments arg Assign Call If Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "try_end_curr_execution",
    "source_code": "def try_end_curr_execution(self) -> None:\n    assert not self.in_recording\n    if self.current_node is None:\n        return\n    if self.can_start_new_generation():\n        self.clear_current_path_state_and_set_to_none()\n        return\n    if self.current_node.all_outputs_are_dead():\n        self.clear_current_path_state_and_set_to_none()",
    "docstring": "Check if the current executing node can be terminated, either because all outputs of the previously executed node are dead or because it was executed in a different generation. Will set current_node to None if successful.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py",
    "ast_data": "FunctionDef name:try_end_curr_execution arg:self arguments arg If Compare Return return:no If Call Call Return return:no If Call Call"
  },
  {
    "library": "scipy",
    "name": "HessianUpdateStrategy",
    "source_code": "class HessianUpdateStrategy:\n\n    def initialize(self, n, approx_type):\n        raise NotImplementedError('The method ``initialize(n, approx_type)`` is not implemented.')\n\n    def update(self, delta_x, delta_grad):\n        raise NotImplementedError('The method ``update(delta_x, delta_grad)`` is not implemented.')\n\n    def dot(self, p):\n        raise NotImplementedError('The method ``dot(p)`` is not implemented.')\n\n    def get_matrix(self):\n        raise NotImplementedError('The method ``get_matrix(p)`` is not implemented.')\n\n    def __matmul__(self, p):\n        return self.dot(p)",
    "docstring": "Interface for implementing Hessian update strategies. Many optimization methods make use of Hessian (or inverse Hessian) approximations, such as the quasi-Newton methods BFGS, SR1, L-BFGS. Some of these approximations, however, do not actually need to store the entire matrix or can compute the internal matrix product with a given vector in a very efficiently manner. This class serves as an abstract interface between the optimization algorithm and the quasi-Newton update strategies, giving freedom of implementation to store and update the internal matrix as efficiently as possible. Different choices of initialization and update procedure will result in different quasi-Newton strategies. Four methods should be implemented in derived classes: `` and used by the compatible solvers to approximate the Hessian (or inverse Hessian) used by the optimization algorithms.",
    "type": "class",
    "file_path": "scipy\\scipy\\optimize\\_hessian_update_strategy.py",
    "ast_data": "ClassDef name:HessianUpdateStrategy FunctionDef name:initialize arg:self arg:n arg:approx_type arguments arg arg arg Raise Call FunctionDef name:update arg:self arg:delta_x arg:delta_grad arguments arg arg arg Raise Call FunctionDef name:dot arg:self arg:p arguments arg arg Raise Call FunctionDef name:get_matrix arg:self arguments arg Raise Call FunctionDef name:__matmul__ arg:self arg:p arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_element_from_tensor_info",
    "source_code": "def get_element_from_tensor_info(tensor_info, graph=None, import_scope=None):\n    graph = graph or ops.get_default_graph()\n    return graph.as_graph_element(ops.prepend_name_scope(tensor_info.name, import_scope=import_scope))",
    "docstring": "Returns the element in the graph described by a TensorInfo proto. Args: tensor_info: A TensorInfo proto describing an Op or Tensor by name. graph: The tf.Graph in which tensors are looked up. If None, the current default graph is used. import_scope: If not None, names in are prefixed with this string before lookup. Returns: Op or tensor in described by . Raises: KeyError: If does not correspond to an op or tensor in",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\utils_impl.py",
    "ast_data": "FunctionDef name:get_element_from_tensor_info arg:tensor_info arg:graph arg:import_scope arguments arg arg arg Assign BoolOp Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "in_defun",
    "source_code": "def in_defun():\n    if context.executing_eagerly():\n        return False\n    graph = ops.get_default_graph()\n    while isinstance(graph, CondBranchFuncGraph) or isinstance(graph, WhileBodyFuncGraph) or isinstance(graph, WhileCondFuncGraph):\n        graph = graph.outer_graph\n    return isinstance(graph, FuncGraph)",
    "docstring": "Returns if the current graph is, or is nested in, a defun.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_util_v2.py",
    "ast_data": "FunctionDef name:in_defun arguments If Call Return return:yes Assign Call While BoolOp Call Call Call Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "RenameMethodsBase",
    "source_code": "class RenameMethodsBase(type):\n    renamed_methods = ()\n\n    def __new__(cls, name, bases, attrs):\n        new_class = super().__new__(cls, name, bases, attrs)\n        for base in inspect.getmro(new_class):\n            class_name = base.__name__\n            for renamed_method in cls.renamed_methods:\n                old_method_name = renamed_method[0]\n                old_method = base.__dict__.get(old_method_name)\n                new_method_name = renamed_method[1]\n                new_method = base.__dict__.get(new_method_name)\n                deprecation_warning = renamed_method[2]\n                wrapper = warn_about_renamed_method(class_name, *renamed_method)\n                if not new_method and old_method:\n                    warnings.warn('`%s.%s` method should be renamed `%s`.' % (class_name, old_method_name, new_method_name), deprecation_warning, 2)\n                    setattr(base, new_method_name, old_method)\n                    setattr(base, old_method_name, wrapper(old_method))\n                if not old_method and new_method:\n                    setattr(base, old_method_name, wrapper(new_method))\n        return new_class",
    "docstring": "Handles the deprecation paths when renaming a method. It does the following: 1) Define the new method if missing and complain about it. 2) Define the old method if missing. 3) Complain whenever an old method is called. See #15363 for more details.",
    "type": "class",
    "file_path": "django\\django\\utils\\deprecation.py",
    "ast_data": "ClassDef name:RenameMethodsBase Assign FunctionDef name:__new__ arg:cls arg:name arg:bases arg:attrs arguments arg arg arg arg Assign Call Call For Call Assign For Assign Assign Call Assign Assign Call Assign Assign Call If BoolOp Call Call Call Call If BoolOp Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "wedge",
    "source_code": "@classmethod\ndef wedge(cls, theta1, theta2, n=None):\n    return cls.arc(theta1, theta2, n, True)",
    "docstring": "Return a for the unit circle wedge from angles *theta1* to *theta2* (in degrees). *theta2* is unwrapped to produce the shortest wedge within 360 degrees. That is, if *theta2* > *theta1* + 360, the wedge will be from *theta1* to *theta2* - 360 and not a full circle plus some extra overlap. If *n* is provided, it is the number of spline segments to make. If *n* is not provided, the number of spline segments is determined based on the delta between *theta1* and *theta2*. See for the reference on the approximation used.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\path.py",
    "ast_data": "FunctionDef name:wedge arg:cls arg:theta1 arg:theta2 arg:n arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "SamConfig",
    "source_code": "@dataclass\nclass SamConfig:\n    model_type: Optional[str | int | SamModelType] = None\n    checkpoint: Optional[str] = None\n    pretrained: bool = False\n    encoder_embed_dim: Optional[int] = None\n    encoder_depth: Optional[int] = None\n    encoder_num_heads: Optional[int] = None\n    encoder_global_attn_indexes: Optional[tuple[int, ...]] = None",
    "docstring": "Encapsulate the Config to build a SAM model. Args: model_type: the available models are: - 0, 'vit_h' or :func: - 1, 'vit_l' or :func: - 2, 'vit_b' or :func: - 3, 'mobile_sam', or :func: checkpoint: URL or a path for a file with the weights of the model encoder_embed_dim: Patch embedding dimension. encoder_depth: Depth of ViT. encoder_num_heads: Number of attention heads in each ViT block. encoder_global_attn_indexes: Encoder indexes for blocks using global attention.",
    "type": "class",
    "file_path": "kornia\\kornia\\contrib\\models\\sam\\model.py",
    "ast_data": "ClassDef name:SamConfig"
  },
  {
    "library": "django",
    "name": "_plural_string",
    "source_code": "@property\ndef _plural_string(self):\n    if '' in self.translation._catalog:\n        for line in self.translation._catalog[''].split('\\n'):\n            if line.startswith('Plural-Forms:'):\n                return line.split(':', 1)[1].strip()\n    return None",
    "docstring": "Return the plural string (including nplurals) for this catalog language, or None if no plural string is available.",
    "type": "method",
    "file_path": "django\\django\\views\\i18n.py",
    "ast_data": "FunctionDef name:_plural_string arg:self arguments arg If Compare For Call If Call Return return:yes Call Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self):\n    self._last_step_outputs = {}\n    self._last_step_outputs_reduce_ops = {}\n    self._non_tensor_outputs = {}",
    "docstring": "Initialize an output context. Returns: A context object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg Assign Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "get_transform",
    "source_code": "def get_transform(self):\n    raise NotImplementedError()",
    "docstring": "Return the object associated with this scale.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\scale.py",
    "ast_data": "FunctionDef name:get_transform arg:self arguments arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_is_closed",
    "source_code": "def _is_closed(self):\n    return self._coordinated_creator.tf_sess is None",
    "docstring": "Return True if the monitored session is closed. For tests only. Returns: A boolean.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\monitored_session.py",
    "ast_data": "FunctionDef name:_is_closed arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "_check_element_shape",
    "source_code": "def _check_element_shape(self, shape):\n    if not shape.is_compatible_with(self.element_shape):\n        raise ValueError('Inconsistent shapes: saw %s but expected %s ' % (shape, self.element_shape))\n    if self._infer_shape:\n        self._element_shape[0] = self.element_shape.merge_with(shape)",
    "docstring": "Changes the element shape of the array given a shape to merge with. Args: shape: A object to merge with. Raises: ValueError: if the provided shape is incompatible with the current element shape of the .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:_check_element_shape arg:self arg:shape arguments arg arg If Call Raise Call If Assign Call"
  },
  {
    "library": "kornia",
    "name": "get_keypoint",
    "source_code": "def get_keypoint(self, keypoint: FaceKeypoint) -> torch.Tensor:\n    if keypoint == FaceKeypoint.EYE_LEFT:\n        out = self._data[..., (4, 5)]\n    elif keypoint == FaceKeypoint.EYE_RIGHT:\n        out = self._data[..., (6, 7)]\n    elif keypoint == FaceKeypoint.NOSE:\n        out = self._data[..., (8, 9)]\n    elif keypoint == FaceKeypoint.MOUTH_LEFT:\n        out = self._data[..., (10, 11)]\n    elif keypoint == FaceKeypoint.MOUTH_RIGHT:\n        out = self._data[..., (12, 13)]\n    else:\n        raise ValueError(f'Not valid keypoint type. Got: {keypoint}.')\n    return out",
    "docstring": "Get the [x y] position of a given facial keypoint. Args: keypoint: the keypoint type to return the position.",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\face_detection.py",
    "ast_data": "FunctionDef name:get_keypoint arg:self arg:keypoint arguments arg arg If Compare Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign Raise Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_group_permissions",
    "source_code": "def get_group_permissions(self, obj=None):\n    return _user_get_permissions(self, obj, 'group')",
    "docstring": "Return a list of permission strings that this user has through their groups. Query all available auth backends. If an object is passed in, return only permissions matching this object.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\models.py",
    "ast_data": "FunctionDef name:get_group_permissions arg:self arg:obj arguments arg arg Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "validate_token",
    "source_code": "def validate_token(self, token, scopes, request):\n    if not token:\n        raise InvalidTokenError(realm=self.realm, extra_attributes=self.extra_attributes)\n    if token.is_expired():\n        raise InvalidTokenError(realm=self.realm, extra_attributes=self.extra_attributes)\n    if token.is_revoked():\n        raise InvalidTokenError(realm=self.realm, extra_attributes=self.extra_attributes)\n    if self.scope_insufficient(token.get_scope(), scopes):\n        raise InsufficientScopeError()",
    "docstring": "Check if token is active and matches the requested scopes.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6750\\validator.py",
    "ast_data": "FunctionDef name:validate_token arg:self arg:token arg:scopes arg:request arguments arg arg arg arg If Raise Call If Call Raise Call If Call Raise Call If Call Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, on_ui_exit=None, config=None):\n    self._on_ui_exit = on_ui_exit\n    self._command_handler_registry = debugger_cli_common.CommandHandlerRegistry()\n    self._tab_completion_registry = debugger_cli_common.TabCompletionRegistry()\n    self._tab_completion_registry.register_tab_comp_context([''], self.CLI_EXIT_COMMANDS + [debugger_cli_common.CommandHandlerRegistry.HELP_COMMAND] + debugger_cli_common.CommandHandlerRegistry.HELP_COMMAND_ALIASES)\n    self._config = config or cli_config.CLIConfig()\n    self._config_argparser = argparse.ArgumentParser(description='config command', usage=argparse.SUPPRESS)\n    subparsers = self._config_argparser.add_subparsers()\n    set_parser = subparsers.add_parser('set')\n    set_parser.add_argument('property_name', type=str)\n    set_parser.add_argument('property_value', type=str)\n    set_parser = subparsers.add_parser('show')\n    self.register_command_handler('config', self._config_command_handler, self._config_argparser.format_help(), prefix_aliases=['cfg'])",
    "docstring": "Constructor of the base class. Args: on_ui_exit: () the callback to be called when the UI exits. config: An instance of carrying user-facing configurations.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\base_ui.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:on_ui_exit arg:config arguments arg arg arg Assign Assign Call Assign Call Call Assign BoolOp Call Assign Call Assign Call Assign Call Call Call Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "push_tape",
    "source_code": "def push_tape(tape):\n    pywrap_tfe.TFE_Py_TapeSetAdd(tape._tape)",
    "docstring": "Pushes an existing tape onto the tape stack.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\tape.py",
    "ast_data": "FunctionDef name:push_tape arg:tape arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_BatchNormWithGlobalNormalizationGrad",
    "source_code": "@ops.RegisterGradient('BatchNormWithGlobalNormalization')\ndef _BatchNormWithGlobalNormalizationGrad(op: ops.Operation, grad):\n    dx, dm, dv, db, dg = gen_nn_ops.batch_norm_with_global_normalization_grad(op.inputs[0], op.inputs[1], op.inputs[2], op.inputs[4], grad, op.get_attr('variance_epsilon'), op.get_attr('scale_after_normalization'))\n    return (dx, dm, dv, db, dg)",
    "docstring": "Return the gradients for the 5 inputs of BatchNormWithGlobalNormalization. We do not backprop anything for the mean and var intentionally as they are not being trained with backprop in the operation. Args: op: The BatchNormOp for which we need to generate gradients. grad: Tensor. The gradients passed to the BatchNormOp. Returns: dx: Backprop for input, which is (grad * (g * rsqrt(v + epsilon))) dm: Backprop for mean, which is sum_over_rest(grad * g) * (-1 / rsqrt(v + epsilon)) dv: Backprop for variance, which is sum_over_rest(grad * g * (x - m)) * (-1/2) * (v + epsilon) ^ (-3/2) db: Backprop for beta, which is grad reduced in all except the last dimension. dg: Backprop for gamma, which is (grad * ((x - m) * rsqrt(v + epsilon)))",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_grad.py",
    "ast_data": "FunctionDef name:_BatchNormWithGlobalNormalizationGrad arg:op arg:grad arguments arg arg Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_Storage",
    "source_code": "@dataclasses.dataclass\nclass _Storage:\n    ptr: int\n    allocation_id: int\n\n    def __repr__(self) -> str:\n        return f'{hex(self.ptr):>18} ({self.allocation_id})'\n\n    def __eq__(self, other: object) -> bool:\n        return isinstance(other, _Storage) and self.allocation_id == other.allocation_id\n\n    def __hash__(self) -> int:\n        return hash(self.allocation_id)",
    "docstring": "Bundle storage pointer and id. All profiling logic should use , however it is useful to print storage pointers for debugging and unit tests sometimes look up values using the storage data pointer of a live Tensor.",
    "type": "class",
    "file_path": "pytorch\\torch\\profiler\\_memory_profiler.py",
    "ast_data": "ClassDef name:_Storage FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes BoolOp Call Compare FunctionDef name:__hash__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "count_active_tables",
    "source_code": "def count_active_tables(self):\n    return len([1 for count in self.alias_refcount.values() if count])",
    "docstring": "Return the number of tables in this query with a non-zero reference count. After execution, the reference counts are zeroed, so tables added in compiler will not be seen by this method.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\query.py",
    "ast_data": "FunctionDef name:count_active_tables arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "from_dlpack",
    "source_code": "@classmethod\ndef from_dlpack(cls, data: DLPack) -> Image:\n    _data: Tensor = from_dlpack(data)\n    pixel_format = PixelFormat(color_space=ColorSpace.RGB, bit_depth=_data.element_size() * 8)\n    layout = ImageLayout(image_size=ImageSize(height=_data.shape[1], width=_data.shape[2]), channels=_data.shape[0], channels_order=ChannelsOrder.CHANNELS_FIRST)\n    return cls(_data, pixel_format, layout)",
    "docstring": "Construct an image tensor from a DLPack capsule. Args: data: a DLPack capsule from numpy, tvm or jax. Example: >>> x = np.ones((4, 5, 3)) >>> img = Image.from_dlpack(x.__dlpack__())",
    "type": "method",
    "file_path": "kornia\\kornia\\image\\image.py",
    "ast_data": "FunctionDef name:from_dlpack arg:cls arg:data arguments arg arg Call Assign Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_test_or_class_decorator",
    "source_code": "def _test_or_class_decorator(test_or_class, single_method_decorator):\n\n    def _decorate_test_or_class(obj):\n        if isinstance(obj, collections.abc.Iterable):\n            return itertools.chain.from_iterable((single_method_decorator(method) for method in obj))\n        if isinstance(obj, type):\n            cls = obj\n            for name, value in cls.__dict__.copy().items():\n                if callable(value) and name.startswith(unittest.TestLoader.testMethodPrefix):\n                    setattr(cls, name, single_method_decorator(value))\n            cls = type(cls).__new__(type(cls), cls.__name__, cls.__bases__, cls.__dict__.copy())\n            return cls\n        return single_method_decorator(obj)\n    if test_or_class is not None:\n        return _decorate_test_or_class(test_or_class)\n    return _decorate_test_or_class",
    "docstring": "Decorate a test or class with a decorator intended for one method. If the test_or_class is a class: This will apply the decorator to all test methods in the class. If the test_or_class is an iterable of already-parameterized test cases: This will apply the decorator to all the cases, and then flatten the resulting cross-product of test cases. This allows stacking the Keras parameterized decorators w/ each other, and to apply them to test methods that have already been marked with an absl parameterized decorator. Otherwise, treat the obj as a single method and apply the decorator directly. Args: test_or_class: A test method (that may have already been decorated with a parameterized decorator, or a test class that extends keras_parameterized.TestCase single_method_decorator: A parameterized decorator intended for a single test method. Returns: The decorated result.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\keras_parameterized.py",
    "ast_data": "FunctionDef name:_test_or_class_decorator arg:test_or_class arg:single_method_decorator arguments arg arg FunctionDef name:_decorate_test_or_class arg:obj arguments arg If Call Return return:yes Call Call If Call Assign For Call Call If BoolOp Call Call Call Call Assign Call Call Call Call Return return:yes Return return:yes Call If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "get_auth_time",
    "source_code": "def get_auth_time(self):\n    raise NotImplementedError()",
    "docstring": "Get \"auth_time\" value of the authorization code object.",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\core\\models.py",
    "ast_data": "FunctionDef name:get_auth_time arg:self arguments arg Raise Call"
  },
  {
    "library": "scipy",
    "name": "reconstruct_matrix_from_id",
    "source_code": "def reconstruct_matrix_from_id(B, idx, proj):\n    if _is_real(B):\n        return _backend.idd_reconid(B, idx, proj)\n    else:\n        return _backend.idz_reconid(B, idx, proj)",
    "docstring": "Reconstruct matrix from its ID. A matrix with skeleton matrix and ID indices and coefficients and , respectively, can be reconstructed as:: numpy.hstack([B, numpy.dot(B, proj)])[:,numpy.argsort(idx)] See also :func: and :func:. .. This function automatically detects the matrix data type and calls the appropriate backend. For details, see :func: and :func:. Parameters ---------- B : :class: Skeleton matrix. idx : :class: Column index array. proj : :class: Interpolation coefficients. Returns ------- :class: Reconstructed matrix.",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\interpolative.py",
    "ast_data": "FunctionDef name:reconstruct_matrix_from_id arg:B arg:idx arg:proj arguments arg arg arg If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_experimental_initialize_system",
    "source_code": "def _experimental_initialize_system(self):\n    tpu_cluster_resolver_lib.initialize_tpu_system(self._tpu_cluster_resolver)",
    "docstring": "Experimental method added to be used by Estimator. This is a private method only to be used by Estimator. Other frameworks should directly be calling",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_strategy.py",
    "ast_data": "FunctionDef name:_experimental_initialize_system arg:self arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "guard_cooperative_store",
    "source_code": "def guard_cooperative_store(self, name, buffer):\n    idx = self.cooperative_reduction_workspace_cache.increment_store_count()\n    buffer.writeline(DeferredLine(name, f'if rsplit_id == ({idx} % RSPLIT):'))\n    return buffer.indent()",
    "docstring": "For cooperative reductions only one thread block should write out the result. We rotate which thread block does each write for better parallelism",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py",
    "ast_data": "FunctionDef name:guard_cooperative_store arg:self arg:name arg:buffer arguments arg arg arg Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_internal_key",
    "source_code": "def _get_internal_key(self, key):\n    if is_train(key):\n        return KerasModeKeys.TRAIN\n    if is_eval(key):\n        return KerasModeKeys.TEST\n    if is_predict(key):\n        return KerasModeKeys.PREDICT\n    raise ValueError('Invalid mode key: {}.'.format(key))",
    "docstring": "Return keys used for the internal dictionary.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\model_utils\\mode_keys.py",
    "ast_data": "FunctionDef name:_get_internal_key arg:self arg:key arguments arg arg If Call Return return:yes If Call Return return:yes If Call Return return:yes Raise Call Call"
  },
  {
    "library": "matplotlib",
    "name": "StrCategoryFormatter",
    "source_code": "class StrCategoryFormatter(ticker.Formatter):\n\n    def __init__(self, units_mapping):\n        self._units = units_mapping\n\n    def __call__(self, x, pos=None):\n        return self.format_ticks([x])[0]\n\n    def format_ticks(self, values):\n        r_mapping = {v: self._text(k) for k, v in self._units.items()}\n        return [r_mapping.get(round(val), '') for val in values]\n\n    @staticmethod\n    def _text(value):\n        if isinstance(value, bytes):\n            value = value.decode(encoding='utf-8')\n        elif not isinstance(value, str):\n            value = str(value)\n        return value",
    "docstring": "String representation of the data at every tick.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\category.py",
    "ast_data": "ClassDef name:StrCategoryFormatter FunctionDef name:__init__ arg:self arg:units_mapping arguments arg arg Assign FunctionDef name:__call__ arg:self arg:x arg:pos arguments arg arg arg Return return:yes Call FunctionDef name:format_ticks arg:self arg:values arguments arg arg Assign Call Call Return return:yes Call Call FunctionDef name:_text arg:value arguments arg If Call Assign Call If Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "make_runtime_safe",
    "source_code": "def make_runtime_safe(self):\n    assert self.traced_tangent_metas is None\n\n    def extract_metadata(t):\n        if isinstance(t, torch.Tensor) and is_traceable_wrapper_subclass(t):\n            inner_tensors, flatten_spec = t.__tensor_flatten__()\n            return (inner_tensors, flatten_spec)\n        else:\n            return None\n    self.traced_tangent_metas = [extract_metadata(t) for t in self.traced_tangents]\n    self.traced_tangents = []\n    new_output_info = []\n    for out in self.output_info:\n        if config.view_replay_for_aliased_outputs:\n            new_out = out\n        else:\n            new_out = dataclasses.replace(out, functional_tensor=None)\n        new_output_info.append(new_out)\n    self.output_info = new_output_info\n    for inp_meta in self.subclass_inp_meta:\n        if isinstance(inp_meta, SubclassCreationMeta):\n            inp_meta.make_runtime_safe()\n    for inp_meta in self.subclass_fw_graph_out_meta:\n        if isinstance(inp_meta, SubclassCreationMeta):\n            inp_meta.make_runtime_safe()\n    for inp_meta in self.subclass_tangent_meta:\n        if isinstance(inp_meta, SubclassCreationMeta):\n            inp_meta.make_runtime_safe()",
    "docstring": "There are various fields in ViewAndMutationMeta that aren't serializable. This function is called after all tracing is completed to simplify certain fields in the metadata so that they can be safely cached. Doing so may lose information (in the case of traced_tangents), but none of the information is needed at runtime.",
    "type": "method",
    "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\schemas.py",
    "ast_data": "FunctionDef name:make_runtime_safe arg:self arguments arg Compare FunctionDef name:extract_metadata arg:t arguments arg If BoolOp Call Call Assign Call Return return:yes Return return:no Assign Call Assign Assign For If Assign Assign Call Call Assign For If Call Call For If Call Call For If Call Call"
  },
  {
    "library": "scipy",
    "name": "jac",
    "source_code": "@property\ndef jac(self):\n    if self._g is None:\n        self._g = self._jac(self._x)\n    return self._g",
    "docstring": "Value of Jacobian of objective function at current iteration.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_trustregion.py",
    "ast_data": "FunctionDef name:jac arg:self arguments arg If Compare Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "get_pruning_mask",
    "source_code": "def get_pruning_mask(self, confidences: Tensor, scores: Tensor, layer_index: int) -> Tensor:\n    keep = scores > 1 - self.conf.width_confidence\n    if confidences is not None:\n        keep |= confidences <= self.confidence_thresholds[layer_index]\n    return keep",
    "docstring": "Mask points which should be removed.",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\lightglue.py",
    "ast_data": "FunctionDef name:get_pruning_mask arg:self arg:confidences arg:scores arg:layer_index arguments arg arg arg arg Assign Compare If Compare Compare Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "positions",
    "source_code": "@property\ndef positions(self):\n    method = 'get_xdata' if self.direction == 'horizontal' else 'get_ydata'\n    return [getattr(line, method)()[0] for line in self.artists]",
    "docstring": "Positions of the handle in data coordinates.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:positions arg:self arguments arg Assign Compare Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "transform_var",
    "source_code": "def transform_var(tensor, counter, dimension_dict):\n    if isinstance(tensor, TensorType):\n        res = []\n        for t in tensor.__args__:\n            transformed, counter = transform_dimension(t, counter, dimension_dict)\n            res.append(transformed)\n        assert len(res) <= 4\n        if len(tensor.__args__) == 1:\n            return (tensor_type.tensor1(res[0]), counter)\n        elif len(tensor.__args__) == 2:\n            return (tensor_type.tensor2(res[0], res[1]), counter)\n        elif len(tensor.__args__) == 3:\n            return (tensor_type.tensor3(res[0], res[1], res[2]), counter)\n        elif len(tensor.__args__) == 4:\n            return (tensor_type.tensor4(res[0], res[1], res[2], res[3]), counter)\n    elif tensor == Dyn:\n        return (z3_dyn, counter)\n    elif isinstance(tensor, TVar):\n        return (z3.Const(tensor.tvar, tensor_type), counter)",
    "docstring": "Transforms tensor variables to a format understood by z3 Args: tensor: Tensor variable or a tensor type potentially with variable dimensions Returns: Transformed variable to a z3 format",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\transform_to_z3.py",
    "ast_data": "FunctionDef name:transform_var arg:tensor arg:counter arg:dimension_dict arguments arg arg arg If Call Assign For Assign Call Call Compare Call If Compare Call Return return:yes Call If Compare Call Return return:yes Call If Compare Call Return return:yes Call If Compare Call Return return:yes Call If Compare Return return:yes If Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_Policy",
    "source_code": "class _Policy(ABC):\n\n    @abstractmethod\n    def _run_policy(self, root_module: nn.Module, ignored_modules: set[nn.Module], root_kwargs: dict[str, Any]) -> dict[nn.Module, dict[str, Any]]:\n        ...",
    "docstring": "This defines an abstract base class that represents a policy for applying a module-level API.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\wrap.py",
    "ast_data": "ClassDef name:_Policy FunctionDef name:_run_policy arg:self arg:root_module arg:ignored_modules arg:root_kwargs arguments arg arg arg arg"
  },
  {
    "library": "tensorflow",
    "name": "_dirty",
    "source_code": "@property\ndef _dirty(self):\n    return self._self_external_modification or self._self_non_string_key",
    "docstring": "Check if there has already been a mutation which prevents saving.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\data_structures.py",
    "ast_data": "FunctionDef name:_dirty arg:self arguments arg Return return:yes BoolOp"
  },
  {
    "library": "scikit-learn",
    "name": "_iter_indices",
    "source_code": "def _iter_indices(self, X, y=None, groups=None):\n    n_samples = _num_samples(X)\n    n_train, n_test = _validate_shuffle_split(n_samples, self.test_size, self.train_size, default_test_size=self._default_test_size)\n    rng = check_random_state(self.random_state)\n    for i in range(self.n_splits):\n        permutation = rng.permutation(n_samples)\n        ind_test = permutation[:n_test]\n        ind_train = permutation[n_test:n_test + n_train]\n        yield (ind_train, ind_test)",
    "docstring": "Generate (train, test) indices",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py",
    "ast_data": "FunctionDef name:_iter_indices arg:self arg:X arg:y arg:groups arguments arg arg arg arg Assign Call Assign Call Assign Call For Call Assign Call Assign Assign"
  },
  {
    "library": "scipy",
    "name": "k_max",
    "source_code": "def k_max(self, n: int) -> int:\n    return self._post_padding(n)[0]",
    "docstring": "First sample index after signal end not touched by a time slice. - 1 is the largest sample index of the slice - 1 for a given input signal of samples. A detailed example is provided in the :ref: section of the :ref:. Parameters ---------- n : int Number of samples of input signal (must be ≥ half of the window length). See Also -------- k_min: The smallest possible signal index. p_min: The smallest possible slice index. p_max: Index of first non-overlapping upper time slice. p_num: Number of time slices, i.e., - . p_range: Determine and validate slice index range. ShortTimeFFT: Class this method belongs to.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_short_time_fft.py",
    "ast_data": "FunctionDef name:k_max arg:self arg:n arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "ParamModuleInfo",
    "source_code": "@dataclass\nclass ParamModuleInfo:\n    module: nn.Module\n    param_name: str\n    shared_modules: list[nn.Module] = field(default_factory=list)\n    shared_param_names: list[str] = field(default_factory=list)",
    "docstring": "For a parameter, this stores the module and the parameter name to be able to do a parameter swap via ``. We additionally save shared modules and shared parameter names to update them accordingly.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fsdp_param.py",
    "ast_data": "ClassDef name:ParamModuleInfo Call Call"
  },
  {
    "library": "scipy",
    "name": "_resolve_ufunc",
    "source_code": "def _resolve_ufunc(self, **kwargs):\n    if isinstance(self._ufunc_or_ufuncs, np.ufunc):\n        return self._ufunc_or_ufuncs\n    ufunc_key = self._key(**kwargs)\n    return self._ufunc_or_ufuncs[ufunc_key]",
    "docstring": "Resolve to a ufunc based on keyword arguments.",
    "type": "method",
    "file_path": "scipy\\scipy\\special\\_multiufuncs.py",
    "ast_data": "FunctionDef name:_resolve_ufunc arg:self arguments arg arg If Call Return return:yes Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_fft",
    "source_code": "def _fft(self, x):\n    x_complex = _to_complex(x)\n    return _FFT_OP[self.block_depth](x_complex)",
    "docstring": "FFT along the last self.block_depth dimensions of x. Args: x: with floating or complex . Should be in the form returned by self._vectorize_then_blockify. Returns: with .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_circulant.py",
    "ast_data": "FunctionDef name:_fft arg:self arg:x arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "cryptography",
    "name": "public_key",
    "source_code": "@abc.abstractmethod\ndef public_key(self) -> DHPublicKey:\n    pass",
    "docstring": "The DHPublicKey associated with this private key.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\dh.py",
    "ast_data": "FunctionDef name:public_key arg:self arguments arg"
  },
  {
    "library": "scipy",
    "name": "_transpose",
    "source_code": "def _transpose(self):\n    return _TransposedLinearOperator(self)",
    "docstring": "Default implementation of _transpose; defers to rmatvec + conj",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_interface.py",
    "ast_data": "FunctionDef name:_transpose arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "l2_penalty",
    "source_code": "def l2_penalty(self, weights, l2_reg_strength):\n    norm2_w = weights @ weights if weights.ndim == 1 else squared_norm(weights)\n    return 0.5 * l2_reg_strength * norm2_w",
    "docstring": "Compute L2 penalty term l2_reg_strength/2 *||w||_2^2.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_linear_loss.py",
    "ast_data": "FunctionDef name:l2_penalty arg:self arg:weights arg:l2_reg_strength arguments arg arg arg Assign Compare Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "findSystemFonts",
    "source_code": "def findSystemFonts(fontpaths=None, fontext='ttf'):\n    fontfiles = set()\n    fontexts = get_fontext_synonyms(fontext)\n    if fontpaths is None:\n        if sys.platform == 'win32':\n            installed_fonts = _get_win32_installed_fonts()\n            fontpaths = []\n        else:\n            installed_fonts = _get_fontconfig_fonts()\n            if sys.platform == 'darwin':\n                installed_fonts += _get_macos_fonts()\n                fontpaths = [*X11FontDirectories, *OSXFontDirectories]\n            else:\n                fontpaths = X11FontDirectories\n        fontfiles.update((str(path) for path in installed_fonts if path.suffix.lower()[1:] in fontexts))\n    elif isinstance(fontpaths, str):\n        fontpaths = [fontpaths]\n    for path in fontpaths:\n        fontfiles.update(map(os.path.abspath, list_fonts(path, fontexts)))\n    return [fname for fname in fontfiles if os.path.exists(fname)]",
    "docstring": "Search for fonts in the specified font paths. If no paths are given, will use a standard set of system paths, as well as the list of fonts tracked by fontconfig if fontconfig is installed and available. A list of TrueType fonts are returned by default with AFM fonts as an option.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\font_manager.py",
    "ast_data": "FunctionDef name:findSystemFonts arg:fontpaths arg:fontext arguments arg arg Assign Call Assign Call If Compare If Compare Assign Call Assign Assign Call If Compare Call Assign Assign Call Call Compare Call If Call Assign For Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "dump_backend_state",
    "source_code": "def dump_backend_state(gm, args, compiler_name, check_accuracy=False):\n    assert NNModuleToString.can_convert_to_string(gm)\n    return dump_backend_repro_as_file(gm, args, compiler_name, check_accuracy)",
    "docstring": "Dumps the dynamo graph to repro the issue. 1) It tries to convert Fx GraphModule to a string. If we can, it writes to a repro.py file. 2) If we can't convert Fx GraphModule to a string, we use to_folder to save the module and save a tar file.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\repro\\after_dynamo.py",
    "ast_data": "FunctionDef name:dump_backend_state arg:gm arg:args arg:compiler_name arg:check_accuracy arguments arg arg arg arg Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, canvas, parent=None, coordinates=True):\n    QtWidgets.QToolBar.__init__(self, parent)\n    self.setAllowedAreas(QtCore.Qt.ToolBarArea(_to_int(QtCore.Qt.ToolBarArea.TopToolBarArea) | _to_int(QtCore.Qt.ToolBarArea.BottomToolBarArea)))\n    self.coordinates = coordinates\n    self._actions = {}\n    self._subplot_dialog = None\n    for text, tooltip_text, image_file, callback in self.toolitems:\n        if text is None:\n            self.addSeparator()\n        else:\n            slot = getattr(self, callback)\n            slot = functools.wraps(slot)(functools.partial(slot))\n            slot = QtCore.Slot()(slot)\n            a = self.addAction(self._icon(image_file + '.png'), text, slot)\n            self._actions[callback] = a\n            if callback in ['zoom', 'pan']:\n                a.setCheckable(True)\n            if tooltip_text is not None:\n                a.setToolTip(tooltip_text)\n    if self.coordinates:\n        self.locLabel = QtWidgets.QLabel('', self)\n        self.locLabel.setAlignment(QtCore.Qt.AlignmentFlag(_to_int(QtCore.Qt.AlignmentFlag.AlignRight) | _to_int(QtCore.Qt.AlignmentFlag.AlignVCenter)))\n        self.locLabel.setSizePolicy(QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Ignored))\n        labelAction = self.addWidget(self.locLabel)\n        labelAction.setVisible(True)\n    NavigationToolbar2.__init__(self, canvas)",
    "docstring": "coordinates: should we show the coordinates on the right?",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_qt.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:canvas arg:parent arg:coordinates arguments arg arg arg arg Call Call Call Call Call Assign Assign Assign For If Compare Call Assign Call Assign Call Call Call Assign Call Call Assign Call Call Assign If Compare Call If Compare Call If Assign Call Call Call Call Call Call Call Assign Call Call Call"
  },
  {
    "library": "pandas",
    "name": "is_timedelta64_dtype",
    "source_code": "def is_timedelta64_dtype(arr_or_dtype) -> bool:\n    if isinstance(arr_or_dtype, np.dtype):\n        return arr_or_dtype.kind == 'm'\n    return _is_dtype_type(arr_or_dtype, classes(np.timedelta64))",
    "docstring": "Check whether an array-like or dtype is of the timedelta64 dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array-like or dtype to check. Returns ------- boolean Whether or not the array-like or dtype is of the timedelta64 dtype. See Also -------- api.types.is_timedelta64_ns_dtype : Check whether the provided array or dtype is of the timedelta64[ns] dtype. api.types.is_period_dtype : Check whether an array-like or dtype is of the Period dtype. Examples -------- >>> from pandas.core.dtypes.common import is_timedelta64_dtype >>> is_timedelta64_dtype(object) False >>> is_timedelta64_dtype(np.timedelta64) True >>> is_timedelta64_dtype([1, 2, 3]) False >>> is_timedelta64_dtype(pd.Series([], dtype=\"timedelta64[ns]\")) True >>> is_timedelta64_dtype(\"0 days\") False",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\common.py",
    "ast_data": "FunctionDef name:is_timedelta64_dtype arg:arr_or_dtype arguments arg If Call Return return:yes Compare Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "register",
    "source_code": "def register(self, method_name: str, func: Union[def_function.Function, tf_function.ConcreteFunction]):\n    if isinstance(func, def_function.Function):\n        if func.function_spec.arg_names:\n            if func.input_signature is None:\n                raise ValueError('Input signature not specified for the function.')\n        concrete_fn = func.get_concrete_function()\n        gen_rpc_ops.rpc_server_register(self._server_handle, method_name=method_name, captured_inputs=concrete_fn.captured_inputs, input_specs=get_input_specs_from_function(concrete_fn), output_specs=get_output_specs_from_function(concrete_fn), f=concrete_fn)\n    elif isinstance(func, tf_function.ConcreteFunction):\n        gen_rpc_ops.rpc_server_register(self._server_handle, method_name=method_name, captured_inputs=func.captured_inputs, input_specs=get_input_specs_from_function(func), output_specs=get_output_specs_from_function(func), f=func)\n    else:\n        raise ValueError('Only TF functions are supported with Register method')",
    "docstring": "Method for registering functions.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\experimental\\rpc\\rpc_ops.py",
    "ast_data": "FunctionDef name:register arg:self arg:method_name arg:func arguments arg arg arg If Call If If Compare Raise Call Assign Call Call Call Call If Call Call Call Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "globals_in_original_context",
    "source_code": "def globals_in_original_context(caller_fn_scope):\n    return _find_originating_frame(caller_fn_scope, innermost=True).f_globals",
    "docstring": "Executes the locals function in the context of a specified function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\py_builtins.py",
    "ast_data": "FunctionDef name:globals_in_original_context arg:caller_fn_scope arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "__call__",
    "source_code": "def __call__(self, x, pos=None):\n    ax_min, ax_max = self.axis.get_view_interval()\n    display_range = abs(ax_max - ax_min)\n    return self.fix_minus(self.format_pct(x, display_range))",
    "docstring": "Format the tick as a percentage with the appropriate scaling.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:x arg:pos arguments arg arg arg Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "_compute_virtual_index",
    "source_code": "def _compute_virtual_index(n, quantiles, alpha: float, beta: float):\n    return n * quantiles + (alpha + quantiles * (1 - alpha - beta)) - 1",
    "docstring": "Compute the floating point indexes of an array for the linear interpolation of quantiles. n : array_like The sample sizes. quantiles : array_like The quantiles values. alpha : float A constant used to correct the index computed. beta : float A constant used to correct the index computed. alpha and beta values depend on the chosen method (see quantile documentation) Reference: Hyndman&Fan paper \"Sample Quantiles in Statistical Packages\", DOI: 10.1080/00031305.1996.10473566",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_function_base_impl.py",
    "ast_data": "FunctionDef name:_compute_virtual_index arg:n arg:quantiles arg:alpha arg:beta arguments arg arg arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_greater_equal_flops",
    "source_code": "@ops.RegisterStatistics('GreaterEqual', 'flops')\ndef _greater_equal_flops(graph, node):\n    return _binary_per_element_op_flops(graph, node)",
    "docstring": "Compute flops for GreaterEqual operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py",
    "ast_data": "FunctionDef name:_greater_equal_flops arg:graph arg:node arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "_mrreconstruct",
    "source_code": "def _mrreconstruct(subtype, baseclass, baseshape, basetype):\n    _data = np.ndarray.__new__(baseclass, baseshape, basetype).view(subtype)\n    _mask = np.ndarray.__new__(np.ndarray, baseshape, 'b1')\n    return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype)",
    "docstring": "Build a new MaskedArray from the information stored in a pickle.",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\mrecords.py",
    "ast_data": "FunctionDef name:_mrreconstruct arg:subtype arg:baseclass arg:baseshape arg:basetype arguments arg arg arg arg Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_resolve_parameter_dtypes",
    "source_code": "def _resolve_parameter_dtypes(signature: _schemas.OpSignature, named_inputs: Mapping[str, AllowedArgType]) -> Mapping[_schemas.TypeConstraintParam, ir.TypeProtocol]:\n    type_binding = {}\n    for name, arg in named_inputs.items():\n        param = signature.params_map[name]\n        assert isinstance(param, _schemas.Parameter), f'Expected Parameter, got {type(param)}'\n        if isinstance(arg, (int, float, bool, str, Sequence, torch.Tensor)):\n            continue\n        elif isinstance(arg, ir.Value):\n            if arg.type is None:\n                continue\n            assert arg.type is not None, f'Expected type to be set for {arg}'\n            type_binding[param.type_constraint] = arg.type\n    return type_binding",
    "docstring": "Determine which parameter takes which type. Handle non-tensor input corner cases and type promotion. Requires: All ir.Value in name_inputs should have type set. Their type should be compatible with the type_constraint of the corresponding parameter in the signature. Args: signature: The OpSignature for the node. named_inputs: The mapping of parameter names to their arguments. Returns: A mapping of Constraint names to ir.TypeProtocol.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_building.py",
    "ast_data": "FunctionDef name:_resolve_parameter_dtypes arg:signature arg:named_inputs arguments arg arg Assign For Call Assign Call Call If Call If Call If Compare Compare Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, vcenter, vmin=None, vmax=None):\n    super().__init__(vmin=vmin, vmax=vmax)\n    self._vcenter = vcenter\n    if vcenter is not None and vmax is not None and (vcenter >= vmax):\n        raise ValueError('vmin, vcenter, and vmax must be in ascending order')\n    if vcenter is not None and vmin is not None and (vcenter <= vmin):\n        raise ValueError('vmin, vcenter, and vmax must be in ascending order')",
    "docstring": "Normalize data with a set center. Useful when mapping data with an unequal rates of change around a conceptual center, e.g., data that range from -2 to 4, with 0 as the midpoint. Parameters ---------- vcenter : float The data value that defines `` in the normalization. Defaults to the max value of the dataset. Examples -------- This maps data value -4000 to 0., 0 to 0.5, and +10000 to 1.0; data between is linearly interpolated:: >>> import matplotlib.colors as mcolors >>> offset = mcolors.TwoSlopeNorm(vmin=-4000., ... vcenter=0., vmax=10000) >>> data = [-4000., -2000., 0., 2500., 5000., 7500., 10000.] >>> offset(data) array([0., 0.25, 0.5, 0.625, 0.75, 0.875, 1.0])",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:vcenter arg:vmin arg:vmax arguments arg arg arg arg Call Call Assign If BoolOp Compare Compare Compare Raise Call If BoolOp Compare Compare Compare Raise Call"
  },
  {
    "library": "pandas",
    "name": "trim_front",
    "source_code": "def trim_front(strings: list[str]) -> list[str]:\n    if not strings:\n        return strings\n    smallest_leading_space = min((len(x) - len(x.lstrip()) for x in strings))\n    if smallest_leading_space > 0:\n        strings = [x[smallest_leading_space:] for x in strings]\n    return strings",
    "docstring": "Trims leading spaces evenly among all strings. Examples -------- >>> trim_front([\" a\", \" b\"]) ['a', 'b'] >>> trim_front([\" a\", \" \"]) ['a', '']",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:trim_front arg:strings arguments arg If Return return:yes Assign Call Call Call Call If Compare Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "minorticks_off",
    "source_code": "def minorticks_off(self):\n    self._minorlocator = ticker.NullLocator()\n    self.long_axis.set_minor_locator(self._minorlocator)",
    "docstring": "Turn the minor ticks of the colorbar off.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colorbar.py",
    "ast_data": "FunctionDef name:minorticks_off arg:self arguments arg Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "_FSDPRefType",
    "source_code": "class _FSDPRefType(_RefType):\n    SHARDED_PARAM = 'Sharded Param'\n    UNSHARDED_PARAM = 'Unsharded Param'\n    BUFFER = 'Buffer'\n    SHARDED_GRAD = 'Sharded Grad'\n    UNSHARDED_GRAD = 'Unsharded Grad'\n    ACT = 'Activation'\n    TEMP = 'Temp'\n    ALL_GATHER = 'All Gather'\n    REDUCE_SCATTER = 'Reduce Scatter'\n    OPT = 'OptState'\n    INP = 'Inputs'",
    "docstring": "Enumerates categories of memory usage in FSDP modules, including parameters, gradients, activations, and optimizer states. Attributes: SHARDED_PARAM (str): Memory usage of sharded parameters. UNSHARDED_PARAM (str): Memory usage of unsharded parameters. SHARDED_GRAD (str): Memory usage of sharded gradients corresponding to the sharded parameters. UNSHARDED_GRAD (str): Memory usage of unsharded gradients corresponding to the unsharded parameters. ACT (str): Memory usage of activations and tensors from forward and AC recomputation. TEMP (str): Memory usage of temporary tensors during the backward pass including gradients of activations. ALL_GATHER (str): Memory usage of all_gather output tensor. REDUCE_SCATTER (str): Memory usage of reduce_scatter input tensor. OPT (str): Memory usage of tensors storing optimizer states. INP (str): Memory usage of input tensors.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\_tools\\fsdp2_mem_tracker.py",
    "ast_data": "ClassDef name:_FSDPRefType Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "shard_full_optim_state_dict",
    "source_code": "@staticmethod\ndef shard_full_optim_state_dict(full_optim_state_dict: dict[str, Any], model: torch.nn.Module, optim_input: Optional[Union[list[dict[str, Any]], Iterable[torch.nn.Parameter]]]=None, optim: Optional[torch.optim.Optimizer]=None) -> dict[str, Any]:\n    FullyShardedDataParallel._warn_legacy_optim_state_dict('shard_full_optim_state_dict', 'optim_state_dict_to_load', stacklevel=2)\n    return FullyShardedDataParallel._optim_state_dict_to_load_impl(optim_state_dict=full_optim_state_dict, model=model, optim_input=optim_input, optim=optim, full_state_dict=True, is_named_optimizer=False)",
    "docstring": "Shard a full optimizer state-dict. Remaps the state in `full_optim_state_dictshard_full_optim_state_dictscatter_full_optim_state_dictFullyShardedDataParallellist`) Returns: Dict[str, Any]: The full optimizer state dict now remapped to flattened parameters instead of unflattened parameters and restricted to only include this rank's part of the optimizer state.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\fully_sharded_data_parallel.py",
    "ast_data": "FunctionDef name:shard_full_optim_state_dict arg:full_optim_state_dict arg:model arg:optim_input arg:optim arguments arg arg arg arg Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_flatten_non_tensor_optim_state",
    "source_code": "def _flatten_non_tensor_optim_state(state_name: str, non_tensors: list[Any], unflat_param_names: list[str]) -> Any:\n    non_none_non_tensors = [nt for nt in non_tensors if nt is not None]\n    non_tensor_set = set(non_tensors)\n    if len(non_none_non_tensors) != len(non_tensors) or len(non_tensor_set) != 1:\n        raise ValueError(f'All unflattened parameters comprising a single flat parameter must have scalar state with the same value and dtype but got values {non_tensor_set} for state {state_name} and  unflattened parameter names {unflat_param_names}')\n    non_tensor = next(iter(non_tensor_set))\n    return non_tensor",
    "docstring": "Flattens the non-tensor optimizer state given by the values `_flatten_zero_dim_tensor_optim_statelist`.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_optim_utils.py",
    "ast_data": "FunctionDef name:_flatten_non_tensor_optim_state arg:state_name arg:non_tensors arg:unflat_param_names arguments arg arg arg Assign Compare Assign Call If BoolOp Compare Call Call Compare Call Raise Call Assign Call Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "redirect",
    "source_code": "def redirect(self, url):\n    raise cherrypy.HTTPRedirect(url)",
    "docstring": "Perform an HTTP redirect to the given URL.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpdispatch.py",
    "ast_data": "FunctionDef name:redirect arg:self arg:url arguments arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_parent_name",
    "source_code": "def _parent_name(target):\n    r = target.rsplit('.', 1)\n    if len(r) == 1:\n        return ('', r[0])\n    else:\n        return (r[0], r[1])",
    "docstring": "Turn 'foo.bar' into ['foo', 'bar']",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\utils.py",
    "ast_data": "FunctionDef name:_parent_name arg:target arguments arg Assign Call If Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "pygame",
    "name": "find_freetype",
    "source_code": "def find_freetype():\n    pkg_config = DependencyProg('FREETYPE', 'FREETYPE_CONFIG', 'pkg-config freetype2', '2.0', ['freetype2'], '--modversion')\n    if pkg_config.found:\n        return pkg_config\n    freetype_config = DependencyProg('FREETYPE', 'FREETYPE_CONFIG', 'freetype-config', '2.0', ['freetype'], '--ftversion')\n    if freetype_config.found:\n        return freetype_config\n    return pkg_config",
    "docstring": "modern freetype uses pkg-config. However, some older systems don't have that.",
    "type": "function",
    "file_path": "pygame\\buildconfig\\config_darwin.py",
    "ast_data": "FunctionDef name:find_freetype arguments Assign Call If Return return:yes Assign Call If Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "used_dims_from_index",
    "source_code": "def used_dims_from_index(self, index: sympy.Expr):\n    used_dims = OrderedSet[sympy.Symbol]()\n    for sym in index.free_symbols:\n        assert isinstance(sym, sympy.Symbol)\n        if symbol_is_type(sym, SymT.TMP):\n            cse_var = self.lookup_cse_var(sym.name)\n            assert isinstance(cse_var, HalideCSEVariable) and cse_var.used_dims is not None\n            used_dims.update(cse_var.used_dims)\n        elif symbol_is_type(sym, SymT.HALIDE):\n            used_dims.add(sym)\n        elif symbol_is_type(sym, (SymT.UNBACKED_INT, SymT.SIZE, SymT.PRECOMPUTED_SIZE, SymT.INDEX)):\n            pass\n        else:\n            raise NotImplementedError(f'unhandled symbol {sym}')\n    return self.sort_used_dims(used_dims)",
    "docstring": "Detect which range trees are used to populate HalideCSEVariable.used_dims",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\halide.py",
    "ast_data": "FunctionDef name:used_dims_from_index arg:self arg:index arguments arg arg Assign Call For Call If Call Assign Call BoolOp Call Compare Call If Call Call If Call Raise Call Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "set_in_component_priority_dict",
    "source_code": "def set_in_component_priority_dict(self, name: _SettingsKeyT, cls: type, priority: int | None) -> None:\n    component_priority_dict = self.getdict(name)\n    for cls_or_path in tuple(component_priority_dict):\n        if not isinstance(cls_or_path, str):\n            continue\n        _cls = load_object(cls_or_path)\n        if _cls == cls:\n            del component_priority_dict[cls_or_path]\n    component_priority_dict[cls] = priority\n    self.set(name, component_priority_dict, self.getpriority(name) or 0)",
    "docstring": "Set the *cls* component in the *name* :ref: setting with *priority*. If *cls* already exists, its value is updated. If *cls* was present as an import string, even more than once, those keys are dropped and replaced by *cls*. This change is applied regardless of the priority of the *name* setting. The setting priority is not affected by this change either.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\settings\\__init__.py",
    "ast_data": "FunctionDef name:set_in_component_priority_dict arg:self arg:name arg:cls arg:priority arguments arg arg arg arg Assign Call For Call If Call Assign Call If Compare Assign Call BoolOp Call"
  },
  {
    "library": "django",
    "name": "L",
    "source_code": "def L(self):\n    return calendar.isleap(self.data.year)",
    "docstring": "Boolean for whether it is a leap year; i.e. True or False",
    "type": "method",
    "file_path": "django\\django\\utils\\dateformat.py",
    "ast_data": "FunctionDef name:L arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_has_conv_bias_filter",
    "source_code": "def _has_conv_bias_filter(match: 'InternalMatch', original_graph: Graph, pattern_graph: Graph) -> bool:\n    for n in match.nodes_map.values():\n        if _is_conv_or_conv_transpose_node(n):\n            return len(n.args) > 2 and n.args[2] is not None\n    raise ValueError('Could not find conv node in matched conv + bn pattern')",
    "docstring": "Match filter for the subgraph rewriter that returns True if the conv node in the original graph has bias.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\qat_utils.py",
    "ast_data": "FunctionDef name:_has_conv_bias_filter arg:match arg:original_graph arg:pattern_graph arguments arg arg arg For Call If Call Return return:yes BoolOp Compare Call Compare Raise Call"
  },
  {
    "library": "django",
    "name": "ngettext",
    "source_code": "def ngettext(singular, plural, number):\n    return do_ntranslate(singular, plural, number, 'ngettext')",
    "docstring": "Return a string of the translation of either the singular or plural, based on the number.",
    "type": "function",
    "file_path": "django\\django\\utils\\translation\\trans_real.py",
    "ast_data": "FunctionDef name:ngettext arg:singular arg:plural arg:number arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "ColorSpace",
    "source_code": "class ColorSpace(Enum):\n    UNKNOWN = 0\n    GRAY = 1\n    RGB = 2\n    BGR = 3",
    "docstring": "Enum that represents the color space of an image.",
    "type": "class",
    "file_path": "kornia\\kornia\\image\\base.py",
    "ast_data": "ClassDef name:ColorSpace Assign Assign Assign Assign"
  },
  {
    "library": "scipy",
    "name": "aps15_f",
    "source_code": "def aps15_f(x, n):\n    if x < 0:\n        return -0.859\n    if x > 2 * 0.001 / (1 + n):\n        return np.e - 1.859\n    return np.exp((n + 1) * x / 2 * 1000) - 1.859",
    "docstring": "piecewise linear, constant outside of [0, 0.002/(1+n)]",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_tstutils.py",
    "ast_data": "FunctionDef name:aps15_f arg:x arg:n arguments arg arg If Compare Return return:yes If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "add_autodocumenter",
    "source_code": "def add_autodocumenter(self, cls: type[Documenter], override: bool=False) -> None:\n    logger.debug('[app] adding autodocumenter: %r', cls)\n    from sphinx.ext.autodoc.directive import AutodocDirective\n    self.registry.add_documenter(cls.objtype, cls)\n    self.add_directive('auto' + cls.objtype, AutodocDirective, override=override)",
    "docstring": "Register a new documenter class for the autodoc extension. Add *cls* as a new documenter class for the :mod: extension. It must be a subclass of :class:. This allows auto-documenting new types of objects. See the source of the autodoc module for examples on how to subclass :class:. If *override* is True, the given *cls* is forcedly installed even if a documenter having the same name is already installed. See :ref:. .. versionadded:: 0.6 .. versionchanged:: 2.2 Add *override* keyword.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\application.py",
    "ast_data": "FunctionDef name:add_autodocumenter arg:self arg:cls arg:override arguments arg arg arg Call Call Call"
  },
  {
    "library": "numpy",
    "name": "good",
    "source_code": "def good(self, msg, *args):\n    if WARN >= self.threshold:\n        if args:\n            print(green_text(msg % _fix_args(args)))\n        else:\n            print(green_text(msg))\n        sys.stdout.flush()",
    "docstring": "If we log WARN messages, log this message as a 'nice' anti-warn message.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\log.py",
    "ast_data": "FunctionDef name:good arg:self arg:msg arguments arg arg arg If Compare If Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_map_subgraph_network",
    "source_code": "def _map_subgraph_network(inputs, outputs):\n    if not ops.executing_eagerly_outside_functions():\n        base_layer_utils.create_keras_history(outputs)\n    _, nodes_by_depth, layers, _ = _map_graph_network(inputs, outputs)\n    return (nest.flatten([nodes for nodes in nodes_by_depth.values()]), layers)",
    "docstring": "Returns the nodes and layers in the topology from to . Args: inputs: List of input tensors. outputs: List of output tensors. Returns: A tuple of List{Node] and List[Layer].",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\functional.py",
    "ast_data": "FunctionDef name:_map_subgraph_network arg:inputs arg:outputs arguments arg arg If Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "i18n_patterns",
    "source_code": "def i18n_patterns(*urls, prefix_default_language=True):\n    if not settings.USE_I18N:\n        return list(urls)\n    return [URLResolver(LocalePrefixPattern(prefix_default_language=prefix_default_language), list(urls))]",
    "docstring": "Add the language code prefix to every URL pattern within this function. This may only be used in the root URLconf, not in an included URLconf.",
    "type": "function",
    "file_path": "django\\django\\conf\\urls\\i18n.py",
    "ast_data": "FunctionDef name:i18n_patterns arguments arg arg If Return return:yes Call Return return:yes Call Call Call"
  },
  {
    "library": "cherrypy",
    "name": "from_fp",
    "source_code": "@classmethod\ndef from_fp(cls, fp, boundary):\n    headers = cls.read_headers(fp)\n    return cls(fp, headers, boundary)",
    "docstring": "Initialize an entity part from a file handle.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpreqbody.py",
    "ast_data": "FunctionDef name:from_fp arg:cls arg:fp arg:boundary arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, output):\n    self._output_type = _api.check_getitem({'path': 'vector', 'agg': 'raster', 'macosx': 'raster'}, output=output.lower())",
    "docstring": "Create a MathTextParser for the given backend *output*. Parameters ---------- output : {\"path\", \"agg\"} Whether to return a (\"path\") or a (\"agg\", or its synonym \"macosx\").",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\mathtext.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:output arguments arg arg Assign Call Call"
  },
  {
    "library": "scipy",
    "name": "_default_response_times",
    "source_code": "def _default_response_times(A, n):\n    vals = linalg.eigvals(A)\n    r = min(abs(real(vals)))\n    if r == 0.0:\n        r = 1.0\n    tc = 1.0 / r\n    t = linspace(0.0, 7 * tc, n)\n    return t",
    "docstring": "Compute a reasonable set of time samples for the response time. This function is used by and to compute the response time when the argument to the function is None. Parameters ---------- A : array_like The system matrix, which is square. n : int The number of time samples to generate. Returns ------- t : ndarray The 1-D array of length of time samples at which the response is to be computed.",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:_default_response_times arg:A arg:n arguments arg arg Assign Call Assign Call Call Call If Compare Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "reindex",
    "source_code": "@doc(klass=_shared_doc_kwargs['klass'], optional_reindex='')\ndef reindex(self, labels=None, *, index=None, columns=None, axis: Axis | None=None, method: ReindexMethod | None=None, copy: bool | lib.NoDefault=lib.no_default, level: Level | None=None, fill_value: Scalar | None=np.nan, limit: int | None=None, tolerance=None) -> Self:\n    self._check_copy_deprecation(copy)\n    if index is not None and columns is not None and (labels is not None):\n        raise TypeError(\"Cannot specify all of 'labels', 'index', 'columns'.\")\n    elif index is not None or columns is not None:\n        if axis is not None:\n            raise TypeError(\"Cannot specify both 'axis' and any of 'index' or 'columns'\")\n        if labels is not None:\n            if index is not None:\n                columns = labels\n            else:\n                index = labels\n    elif axis and self._get_axis_number(axis) == 1:\n        columns = labels\n    else:\n        index = labels\n    axes: dict[Literal['index', 'columns'], Any] = {'index': index, 'columns': columns}\n    method = clean_reindex_fill_method(method)\n    if all((self._get_axis(axis_name).identical(ax) for axis_name, ax in axes.items() if ax is not None)):\n        return self.copy(deep=False)\n    if self._needs_reindex_multi(axes, method, level):\n        return self._reindex_multi(axes, fill_value)\n    return self._reindex_axes(axes, level, limit, tolerance, method, fill_value).__finalize__(self, method='reindex')",
    "docstring": "Conform {klass} to new index with optional filling logic. Places NA/NaN in locations having no value in the previous index. A new object is produced unless the new index is equivalent to the current one and `copyCopy-on-Write copycopycopyuser guide ` for more.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:reindex arg:self arg:labels arguments arg arg arg arg arg arg arg arg arg arg arg Call If BoolOp Compare Compare Compare Raise Call If BoolOp Compare Compare If Compare Raise Call If Compare If Compare Assign Assign If BoolOp Compare Call Assign Assign Assign Call If Call Call Call Call Compare Return return:yes Call If Call Return return:yes Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "kl_div",
    "source_code": "def kl_div(input: Tensor, target: Tensor, size_average: Optional[bool]=None, reduce: Optional[bool]=None, reduction: str='mean', log_target: bool=False) -> Tensor:\n    if has_torch_function_variadic(input, target):\n        return handle_torch_function(kl_div, (input, target), input, target, size_average=size_average, reduce=reduce, reduction=reduction, log_target=log_target)\n    if size_average is not None or reduce is not None:\n        reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)\n    else:\n        if reduction == 'mean':\n            warnings.warn(\"reduction: 'mean' divides the total loss by both the batch size and the support size.'batchmean' divides only by the batch size, and aligns with the KL div math definition.'mean' will be changed to behave the same as 'batchmean' in the next major release.\")\n        if reduction == 'batchmean':\n            reduction_enum = _Reduction.get_enum('sum')\n        else:\n            reduction_enum = _Reduction.get_enum(reduction)\n    reduced = torch.kl_div(input, target, reduction_enum, log_target=log_target)\n    if reduction == 'batchmean' and input.dim() != 0:\n        reduced = reduced / input.size()[0]\n    return reduced",
    "docstring": "Compute the KL Divergence loss. Refer - The __ See :class: for details. Args: input: Tensor of arbitrary shape in log-probabilities. target: Tensor of the same shape as input. See :attr: for the target's interpretation. size_average (bool, optional): Deprecated (see :attr:). reduce (bool, optional): Deprecated (see :attr:). reduction (str, optional): Specifies the reduction to apply to the output: `size_averagereducereductionreductionreduction` which aligns with KL math definition.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:kl_div arg:input arg:target arg:size_average arg:reduce arg:reduction arg:log_target arguments arg arg arg arg arg arg If Call Return return:yes Call If BoolOp Compare Compare Assign Call If Compare Call If Compare Assign Call Assign Call Assign Call If BoolOp Compare Compare Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "add_callback",
    "source_code": "def add_callback(self, func, *args, **kwargs):\n    self.callbacks.append((func, args, kwargs))\n    return func",
    "docstring": "Register *func* to be called by timer when the event fires. Any additional arguments provided will be passed to *func*. This function returns *func*, which makes it possible to use it as a decorator.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:add_callback arg:self arg:func arguments arg arg arg arg Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_move_non_persistent_buffers_to_tensor_constants",
    "source_code": "def _move_non_persistent_buffers_to_tensor_constants(orig_mod: torch.nn.Module, graph_signature: ExportGraphSignature, constants: dict[str, _ConstantAttributeType]) -> None:\n    for spec in graph_signature.input_specs:\n        if spec.kind == InputKind.BUFFER and (not spec.persistent):\n            assert spec.target is not None\n            assert spec.target not in constants\n            constants[spec.target] = orig_mod.get_buffer(spec.target)",
    "docstring": "Moves non-persistent buffers to tensor constants.",
    "type": "function",
    "file_path": "pytorch\\torch\\export\\_trace.py",
    "ast_data": "FunctionDef name:_move_non_persistent_buffers_to_tensor_constants arg:orig_mod arg:graph_signature arg:constants arguments arg arg arg For If BoolOp Compare Compare Compare Assign Call"
  },
  {
    "library": "scipy",
    "name": "uniform_filter1d",
    "source_code": "@_ni_docstrings.docfiller\ndef uniform_filter1d(input, size, axis=-1, output=None, mode='reflect', cval=0.0, origin=0):\n    input = np.asarray(input)\n    axis = normalize_axis_index(axis, input.ndim)\n    if size < 1:\n        raise RuntimeError('incorrect filter size')\n    complex_output = input.dtype.kind == 'c'\n    output = _ni_support._get_output(output, input, complex_output=complex_output)\n    if size // 2 + origin < 0 or size // 2 + origin >= size:\n        raise ValueError('invalid origin')\n    mode = _ni_support._extend_mode_to_code(mode)\n    if not complex_output:\n        _nd_image.uniform_filter1d(input, size, axis, output, mode, cval, origin)\n    else:\n        _nd_image.uniform_filter1d(input.real, size, axis, output.real, mode, np.real(cval), origin)\n        _nd_image.uniform_filter1d(input.imag, size, axis, output.imag, mode, np.imag(cval), origin)\n    return output",
    "docstring": "Calculate a 1-D uniform filter along the given axis. The lines of the array along the given axis are filtered with a uniform filter of given size. Parameters ---------- %(input)s size : int length of uniform filter %(axis)s %(output)s %(mode_reflect)s %(cval)s %(origin)s Returns ------- result : ndarray Filtered array. Has same shape as . Examples -------- >>> from scipy.ndimage import uniform_filter1d >>> uniform_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3) array([4, 3, 4, 1, 4, 6, 6, 3])",
    "type": "function",
    "file_path": "scipy\\scipy\\ndimage\\_filters.py",
    "ast_data": "FunctionDef name:uniform_filter1d arg:input arg:size arg:axis arg:output arg:mode arg:cval arg:origin arguments arg arg arg arg arg arg arg Assign Call Assign Call If Compare Raise Call Assign Compare Assign Call If BoolOp Compare Compare Raise Call Assign Call If Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pygame",
    "name": "get_descent",
    "source_code": "def get_descent(self):\n    return self.get_sized_descender()",
    "docstring": "get_descent() -> int get the descent of the font",
    "type": "method",
    "file_path": "pygame\\src_py\\ftfont.py",
    "ast_data": "FunctionDef name:get_descent arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "_err_kws_backcompat",
    "source_code": "def _err_kws_backcompat(self, err_kws, errcolor, errwidth, capsize):\n\n    def deprecate_err_param(name, key, val):\n        if val is deprecated:\n            return\n        suggest = f\"err_kws={{'{key}': {val!r}}}\"\n        msg = f'\\n\\nThe `{name}` parameter is deprecated. And will be removed in v0.15.0. Pass `{suggest}` instead.\\n'\n        warnings.warn(msg, FutureWarning, stacklevel=4)\n        err_kws[key] = val\n    if errcolor is not None:\n        deprecate_err_param('errcolor', 'color', errcolor)\n    deprecate_err_param('errwidth', 'linewidth', errwidth)\n    if capsize is None:\n        capsize = 0\n        msg = '\\n\\nPassing `capsize=None` is deprecated and will be removed in v0.15.0. Pass `capsize=0` to disable caps.\\n'\n        warnings.warn(msg, FutureWarning, stacklevel=3)\n    return (err_kws, capsize)",
    "docstring": "Provide two cycles where existing signature-level err_kws are handled.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\categorical.py",
    "ast_data": "FunctionDef name:_err_kws_backcompat arg:self arg:err_kws arg:errcolor arg:errwidth arg:capsize arguments arg arg arg arg arg FunctionDef name:deprecate_err_param arg:name arg:key arg:val arguments arg arg arg If Compare Return return:no Assign Assign Call Assign If Compare Call Call If Compare Assign Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_path",
    "source_code": "def get_path(hatchpattern, density=6):\n    density = int(density)\n    patterns = [hatch_type(hatchpattern, density) for hatch_type in _hatch_types]\n    num_vertices = sum([pattern.num_vertices for pattern in patterns])\n    if num_vertices == 0:\n        return Path(np.empty((0, 2)))\n    vertices = np.empty((num_vertices, 2))\n    codes = np.empty(num_vertices, Path.code_type)\n    cursor = 0\n    for pattern in patterns:\n        if pattern.num_vertices != 0:\n            vertices_chunk = vertices[cursor:cursor + pattern.num_vertices]\n            codes_chunk = codes[cursor:cursor + pattern.num_vertices]\n            pattern.set_vertices_and_codes(vertices_chunk, codes_chunk)\n            cursor += pattern.num_vertices\n    return Path(vertices, codes)",
    "docstring": "Given a hatch specifier, *hatchpattern*, generates Path to render the hatch in a unit square. *density* is the number of lines per unit square.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\hatch.py",
    "ast_data": "FunctionDef name:get_path arg:hatchpattern arg:density arguments arg arg Assign Call Assign Call Assign Call If Compare Return return:yes Call Call Assign Call Assign Call Assign For If Compare Assign Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_create_table_builder",
    "source_code": "@abstractmethod\ndef _create_table_builder(self) -> _TableBuilderAbstract:\n    pass",
    "docstring": "Create instance of table builder.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:_create_table_builder arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "get_device_name",
    "source_code": "def get_device_name(device: Optional[_device_t]=None) -> str:\n    return get_device_properties(device).name",
    "docstring": "Get the name of a device. Args: device (torch.device or int or str, optional): device for which to return the name. This function is a no-op if this argument is a negative integer. It uses the current device, given by :func:, if :attr: is `` (default). Returns: str: the name of the device",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:get_device_name arg:device arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_field_tag",
    "source_code": "def get_field_tag(proto: message.Message, fields: FieldTypes) -> Sequence[chunk_pb2.FieldIndex]:\n    field_tags = []\n    for _, field_desc, map_key, list_index in _walk_fields(proto, fields):\n        field_tags.append(chunk_pb2.FieldIndex(field=field_desc.number))\n        if map_key is not None:\n            key_type = field_desc.message_type.fields_by_name['key'].type\n            field_tags.append(chunk_pb2.FieldIndex(map_key=_map_key_proto(key_type, map_key)))\n        elif list_index is not None:\n            field_tags.append(chunk_pb2.FieldIndex(index=list_index))\n    return field_tags",
    "docstring": "Generates FieldIndex proto for a nested field within a proto. Args: proto: Parent proto of any message type. fields: List of string/int/map key fields, e.g. [\"nodes\", \"attr\", \"value\"] can represent . Returns: A list of FieldIndex protos with the same length as .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\proto_splitter\\util.py",
    "ast_data": "FunctionDef name:get_field_tag arg:proto arg:fields arguments arg arg Assign For Call Call Call If Compare Assign Call Call Call If Compare Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_output_dtype",
    "source_code": "def get_output_dtype(node: torch.fx.Node) -> Optional[torch.dtype]:\n    if node.target == 'load':\n        assert len(node.args) == 3\n        return V.graph.get_dtype(node.args[1])\n    elif node.target in ['to_dtype', 'constant', 'index_expr']:\n        return node.args[-1]\n    elif node.target == 'to_dtype_bitcast':\n        return node.args[2]\n    else:\n        return None",
    "docstring": "Get output dtype for nodes that may produce lowp fp dt",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp.py",
    "ast_data": "FunctionDef name:get_output_dtype arg:node arguments arg If Compare Compare Call Return return:yes Call If Compare Return return:yes If Compare Return return:yes Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "InstanceMethod",
    "source_code": "class InstanceMethod(InstanceProperty):\n\n    def call(self, obj, args, kwargs):\n        method = getattr(obj, self.attr_name)\n        return method(*args, **kwargs)",
    "docstring": "Wraps an instance method access (e.g. in a Keras Layer. This layer takes an attribute name in the constructor and, when called on input tensor with additional arguments and returns . KerasTensors specialized for specific extension types use it to represent dynamic instance method calls on the represented object, e.g. x = keras.Input(..., ragged=True) new_values = keras.Input(...) out = x.with_values(new_values)",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\core.py",
    "ast_data": "ClassDef name:InstanceMethod FunctionDef name:call arg:self arg:obj arg:args arg:kwargs arguments arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "insert",
    "source_code": "def insert(self, keys, values, name=None):\n    with ops.name_scope(name, '%s_lookup_table_insert' % self.name, [self.resource_handle, keys, values]):\n        keys = ops.convert_to_tensor(keys, self._key_dtype, name='keys')\n        values = ops.convert_to_tensor(values, self._value_dtype, name='values')\n        with ops.colocate_with(self.resource_handle):\n            op = gen_lookup_ops.lookup_table_insert_v2(self.resource_handle, keys, values)\n    return op",
    "docstring": "Associates with . Args: keys: Keys to insert. Can be a tensor of any shape. Must match the table's key type. values: Values to be associated with keys. Must be a tensor of the same shape as and match the table's value type. name: A name for the operation (optional). Returns: The created Operation. Raises: TypeError: when or doesn't match the table data types.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "FunctionDef name:insert arg:self arg:keys arg:values arg:name arguments arg arg arg arg With Call Assign Call Assign Call With Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "check_constraints",
    "source_code": "def check_constraints(self, table_names=None):\n    pass",
    "docstring": "Backends can override this method if they can apply constraint checking (e.g. via \"SET CONSTRAINTS ALL IMMEDIATE\"). Should raise an IntegrityError if any invalid foreign key references are encountered.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\base.py",
    "ast_data": "FunctionDef name:check_constraints arg:self arg:table_names arguments arg arg"
  },
  {
    "library": "cherrypy",
    "name": "close",
    "source_code": "def close(self):\n    if is_closable_iterator(self._iterator):\n        self._iterator.close()",
    "docstring": "Close the underlying byte stream.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\encoding.py",
    "ast_data": "FunctionDef name:close arg:self arguments arg If Call Call"
  },
  {
    "library": "pytorch",
    "name": "warn_on_static_input_change",
    "source_code": "def warn_on_static_input_change(input_states):\n    for input, traced_input in zip(input_states[0], input_states[1]):\n        if isinstance(input, dict):\n            if list(input.keys()) != list(traced_input.keys()):\n                warning = 'We detected that you are modifying a dictionary that is an input to your model. Note that dictionaries are allowed as inputs in ONNX but they should be handled with care. Usages of dictionaries is not recommended, and should not be used except for configuration use. Also note that the order and values of the keys must remain the same. '\n                warnings.warn(warning)\n        elif isinstance(input, str):\n            if input != traced_input:\n                warning = 'The model seems to have string inputs/outputs. Note that strings will not appear as inputs/outputs of the ONNX graph. '\n                warnings.warn(warning)",
    "docstring": "Warns that changes to input dictionaries and strings won't take effect in the traced ONNX graph. We accept dictionaries and strings as ONNX inputs, but they should be only for configuration use. we detect here if these inputs are modified, and if so we warn the user that the changes won't take effect in the traced ONNX graph.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\utils.py",
    "ast_data": "FunctionDef name:warn_on_static_input_change arg:input_states arguments arg For Call If Call If Compare Call Call Call Call Assign Call If Call If Compare Assign Call"
  },
  {
    "library": "pytorch",
    "name": "set_kernel_enabled",
    "source_code": "@contextmanager\ndef set_kernel_enabled(self, device_type: str, enabled: bool=True):\n    action = 'enable' if enabled else 'disable'\n    originally_disabled = device_type in self._disabled_kernel\n    if device_type not in self._backend_fns:\n        log.warning('Attempted to %s kernel for %s but no kernel was registered for this device type.', action, device_type)\n    if not enabled:\n        if originally_disabled:\n            log.warning('Attempted to disable kernel for %s but it was already disabled.', device_type)\n        else:\n            self._disabled_kernel.add(device_type)\n    elif not originally_disabled:\n        log.warning('Attempted to enable kernel for  %s but it was already enabled.', device_type)\n    else:\n        self._disabled_kernel.remove(device_type)\n    try:\n        yield\n    finally:\n        if originally_disabled:\n            self._disabled_kernel.add(device_type)\n        else:\n            self._disabled_kernel.discard(device_type)",
    "docstring": "Disable or re-enable an already registered kernel for this custom operator. If the kernel is already disabled/enabled, this is a no-op. Note: If a kernel is first disabled and then registered, it is disabled until enabled again. Args: device_type (str): The device type to disable/enable the kernel for. disable (bool): Whether to disable or enable the kernel. Example: >>> inp = torch.randn(1) >>> >>> # define custom op . >>> @custom_op(\"mylib::f\", mutates_args=()) >>> def f(x: Tensor) -> Tensor: >>> return torch.zeros(1) >>> >>> print(f(inp)) # tensor([0.]), default kernel >>> >>> @f.register_kernel(\"cpu\") >>> def _(x): >>> return torch.ones(1) >>> >>> print(f(inp)) # tensor([1.]), CPU kernel >>> >>> # temporarily disable the CPU kernel >>> with f.set_kernel_enabled(\"cpu\", enabled = False): >>> print(f(inp)) # tensor([0.]) with CPU kernel disabled",
    "type": "method",
    "file_path": "pytorch\\torch\\_library\\custom_ops.py",
    "ast_data": "FunctionDef name:set_kernel_enabled arg:self arg:device_type arg:enabled arguments arg arg arg Assign Assign Compare If Compare Call If If Call Call If Call Call Try If Call Call"
  },
  {
    "library": "sphinx",
    "name": "process_doc",
    "source_code": "def process_doc(self, app: Sphinx, doctree: nodes.document) -> None:\n    cwd = Path.cwd()\n    deps = doctree.settings.record_dependencies\n    if not deps:\n        return\n    for dep in deps.list:\n        if isinstance(dep, bytes):\n            dep = dep.decode(fs_encoding)\n        relpath = _relative_path(cwd / dep, app.srcdir)\n        app.env.note_dependency(relpath)",
    "docstring": "Process docutils-generated dependency info.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\environment\\collectors\\dependencies.py",
    "ast_data": "FunctionDef name:process_doc arg:self arg:app arg:doctree arguments arg arg arg Assign Call Assign If Return return:no For If Call Assign Call Assign Call Call"
  },
  {
    "library": "cryptography",
    "name": "private_bytes",
    "source_code": "@abc.abstractmethod\ndef private_bytes(self, encoding: _serialization.Encoding, format: _serialization.PrivateFormat, encryption_algorithm: _serialization.KeySerializationEncryption) -> bytes:\n    pass",
    "docstring": "Returns the key serialized as bytes.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\rsa.py",
    "ast_data": "FunctionDef name:private_bytes arg:self arg:encoding arg:format arg:encryption_algorithm arguments arg arg arg arg"
  },
  {
    "library": "kornia",
    "name": "update",
    "source_code": "def update(self, image: Tensor) -> None:\n    if not (image.ndim == 4 and image.shape[0] == 1) and (not image.ndim == 3):\n        raise ValueError(f'Input tensor must be of shape (1, 3, H, W) or (3, H, W). Got {image.shape}')\n    if image.ndim == 3:\n        image = image.unsqueeze(0)\n    detections_raw: Union[Tensor, list[Tensor]] = self.detector(image)\n    detections = detections_raw[0].cpu().numpy()\n    detections = np.array([detections[:, 2], detections[:, 3], detections[:, 2] + detections[:, 4], detections[:, 3] + detections[:, 5], detections[:, 1], detections[:, 0]]).T\n    if detections.shape[0] == 0:\n        detections = np.empty((0, 6))\n    frame_raw = (tensor_to_image(image) * 255).astype(np.uint8)\n    return self.tracker.update(detections, frame_raw)",
    "docstring": "Update the tracker with a new image. Args: image: The input image.",
    "type": "method",
    "file_path": "kornia\\kornia\\models\\tracking\\boxmot_tracker.py",
    "ast_data": "FunctionDef name:update arg:self arg:image arguments arg arg If BoolOp BoolOp Compare Compare Compare Raise Call If Compare Assign Call Call Assign Call Call Assign Call If Compare Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "FastGFile",
    "source_code": "@tf_export(v1=['gfile.FastGFile'])\nclass FastGFile(_FileIO):\n\n    @deprecated(None, 'Use tf.gfile.GFile.')\n    def __init__(self, name, mode='r'):\n        super(FastGFile, self).__init__(name=name, mode=mode)",
    "docstring": "File I/O wrappers without thread locking. Note, that this is somewhat like builtin Python file I/O, but there are semantic differences to make it more efficient for some backing filesystems. For example, a write mode file will not be opened until the first write call (to minimize RPC invocations in network filesystems).",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\platform\\gfile.py",
    "ast_data": "ClassDef name:FastGFile FunctionDef name:__init__ arg:self arg:name arg:mode arguments arg arg arg Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "nested_data_to_arrays",
    "source_code": "def nested_data_to_arrays(data: Sequence, columns: Index | None, index: Index | None, dtype: DtypeObj | None) -> tuple[list[ArrayLike], Index, Index]:\n    if is_named_tuple(data[0]) and columns is None:\n        columns = ensure_index(data[0]._fields)\n    arrays, columns = to_arrays(data, columns, dtype=dtype)\n    columns = ensure_index(columns)\n    if index is None:\n        if isinstance(data[0], ABCSeries):\n            index = _get_names_from_index(data)\n        else:\n            index = default_index(len(data))\n    return (arrays, columns, index)",
    "docstring": "Convert a single sequence of arrays to multiple arrays.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\internals\\construction.py",
    "ast_data": "FunctionDef name:nested_data_to_arrays arg:data arg:columns arg:index arg:dtype arguments arg arg arg arg If BoolOp Call Compare Assign Call Assign Call Assign Call If Compare If Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "tweak",
    "source_code": "@property\n@abc.abstractmethod\ndef tweak(self) -> utils.Buffer:\n    pass",
    "docstring": "The value of the tweak for this mode as bytes.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\ciphers\\modes.py",
    "ast_data": "FunctionDef name:tweak arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "task_type",
    "source_code": "@property\ndef task_type(self):\n    return getattr(self, '_task_type', None)",
    "docstring": "Returns the task type this indicates. In TensorFlow distributed environment, each job may have an applicable task type. Valid task types in TensorFlow include 'chief': a worker that is designated with more responsibility, 'worker': a regular worker for training/evaluation, 'ps': a parameter server, or 'evaluator': an evaluator that evaluates the checkpoints for metrics. See [Multi-worker configuration]( for more information about 'chief' and 'worker' task type, which are most commonly used. Having access to such information is useful when user needs to run specific code according to task types. For example, Returns if such information is not available or is not applicable in the current distributed environment, such as training with . For more information, please see 's class doc.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\cluster_resolver.py",
    "ast_data": "FunctionDef name:task_type arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "LogicalDeviceConfiguration",
    "source_code": "@tf_export('config.LogicalDeviceConfiguration', 'config.experimental.VirtualDeviceConfiguration')\nclass LogicalDeviceConfiguration(collections.namedtuple('LogicalDeviceConfiguration', ['memory_limit', 'experimental_priority', 'experimental_device_ordinal'])):\n\n    def __new__(cls, memory_limit=None, experimental_priority=None, experimental_device_ordinal=None):\n        return super().__new__(cls, memory_limit, experimental_priority, experimental_device_ordinal)",
    "docstring": "Configuration class for a logical devices. The class specifies the parameters to configure a as it is initialized to a during runtime initialization. Not all fields are valid for all device types. See and for usage examples. Fields: memory_limit: (optional) Maximum memory (in MB) to allocate on the virtual device. Currently only supported for GPUs. experimental_priority: (optional) Priority to assign to a virtual device. Lower values have higher priorities and 0 is the default. Within a physical GPU, the GPU scheduler will prioritize ops on virtual devices with higher priority. Currently only supported for Nvidia GPUs. experimental_device_ordinal: (optional) Ordinal number to order the virtual device. LogicalDevice with lower ordinal number will receive a lower device id. Physical device id and location in the list is used to break ties. Currently only supported for Nvidia GPUs.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "ClassDef name:LogicalDeviceConfiguration Call FunctionDef name:__new__ arg:cls arg:memory_limit arg:experimental_priority arg:experimental_device_ordinal arguments arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "maybe_realign_inputs",
    "source_code": "def maybe_realign_inputs(ran_cudagraphs: BoxedBool, compiled_graph: CompiledFxGraph, inputs_to_check: Sequence[int]) -> None:\n    if not ran_cudagraphs:\n        assert compiled_graph.current_callable is not None\n        new_callable = align_inputs_from_check_idxs(compiled_graph.current_callable, inputs_to_check)\n        if new_callable is not compiled_graph.current_callable:\n            compiled_graph.current_callable = new_callable",
    "docstring": "Realigns input strides from inputs_to_check if we didn't end up running cudagraphs. Mutates if cudagraphs was run. Otherwise, does nothing.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\output_code.py",
    "ast_data": "FunctionDef name:maybe_realign_inputs arg:ran_cudagraphs arg:compiled_graph arg:inputs_to_check arguments arg arg arg If Compare Assign Call If Compare Assign"
  },
  {
    "library": "kornia",
    "name": "unproject",
    "source_code": "def unproject(self, points: Vector2, depth: Tensor) -> Vector3:\n    return self.projection.unproject(self.distortion.undistort(self.params, points), depth)",
    "docstring": "Unprojects 2D points from camera plane to 3D. Args: points: Vector2 representing 2D points. depth: Depth of the points. Returns: Vector3 representing the unprojected 3D points. Example: >>> points = Vector2(torch.Tensor([1.0, 1.0])) >>> cam = CameraModel(ImageSize(480, 640), CameraModelType.PINHOLE, torch.Tensor([328., 328., 320., 240.])) >>> cam.unproject(points, torch.Tensor([1.0])) x: tensor([-0.9726]) y: tensor([-0.7287]) z: tensor([1.])",
    "type": "method",
    "file_path": "kornia\\kornia\\sensors\\camera\\camera_model.py",
    "ast_data": "FunctionDef name:unproject arg:self arg:points arg:depth arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_pid_namespace_link",
    "source_code": "def _pid_namespace_link(pid: Optional[int]=None) -> str:\n    PID_NAMESPACE_PATH = '/proc/{}/ns/pid'\n    pid = pid or os.getpid()\n    return os.readlink(PID_NAMESPACE_PATH.format(pid))",
    "docstring": "Returns the link to the process's namespace, example: pid:[4026531836]",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_strobelight\\cli_function_profiler.py",
    "ast_data": "FunctionDef name:_pid_namespace_link arg:pid arguments arg Assign Assign BoolOp Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_targets",
    "source_code": "@property\ndef _targets(self):\n    return [e.training_target.target for e in self._training_endpoints if e.has_training_target()]",
    "docstring": "The output target tensors for the model.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py",
    "ast_data": "FunctionDef name:_targets arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_create_importances_bunch",
    "source_code": "def _create_importances_bunch(baseline_score, permuted_score):\n    importances = baseline_score - permuted_score\n    return Bunch(importances_mean=np.mean(importances, axis=1), importances_std=np.std(importances, axis=1), importances=importances)",
    "docstring": "Compute the importances as the decrease in score. Parameters ---------- baseline_score : ndarray of shape (n_features,) The baseline score without permutation. permuted_score : ndarray of shape (n_features, n_repeats) The permuted scores for the repetitions. Returns ------- importances : :class: Dictionary-like object, with the following attributes. importances_mean : ndarray, shape (n_features, ) Mean of feature importance over . importances_std : ndarray, shape (n_features, ) Standard deviation over . importances : ndarray, shape (n_features, n_repeats) Raw permutation importance scores.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\inspection\\_permutation_importance.py",
    "ast_data": "FunctionDef name:_create_importances_bunch arg:baseline_score arg:permuted_score arguments arg arg Assign Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "Initializer",
    "source_code": "class Initializer(object):\n\n    def __call__(self, shape, dtype=None, **kwargs):\n        raise NotImplementedError\n\n    def get_config(self):\n        return {}\n\n    @classmethod\n    def from_config(cls, config):\n        config.pop('dtype', None)\n        return cls(**config)",
    "docstring": "Initializer base class: all Keras initializers inherit from this class. Initializers should implement a method with the following signature: Optionally, you an also implement the method and the class method in order to support serialization -- just like with any Keras object. Here's a simple example: a random normal initializer. Note that we don't have to implement in the example above since the constructor arguments of the class the keys in the config returned by are the same. In this case, the default works fine.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\initializers\\initializers_v2.py",
    "ast_data": "ClassDef name:Initializer FunctionDef name:__call__ arg:self arg:shape arg:dtype arguments arg arg arg arg Raise FunctionDef name:get_config arg:self arguments arg Return return:no FunctionDef name:from_config arg:cls arg:config arguments arg arg Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "SerialTasks",
    "source_code": "class SerialTasks:\n\n    def __init__(self, nproc: int=1) -> None:\n        pass\n\n    def add_task(self, task_func: Callable[[Any], Any] | Callable[[], Any], arg: Any=None, result_func: Callable[[Any], Any] | None=None) -> None:\n        if arg is not None:\n            res = task_func(arg)\n        else:\n            res = task_func()\n        if result_func:\n            result_func(res)\n\n    def join(self) -> None:\n        pass",
    "docstring": "Has the same interface as ParallelTasks, but executes tasks directly.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\util\\parallel.py",
    "ast_data": "ClassDef name:SerialTasks FunctionDef name:__init__ arg:self arg:nproc arguments arg arg FunctionDef name:add_task arg:self arg:task_func arg:arg arg:result_func arguments arg arg arg arg If Compare Assign Call Assign Call If Call FunctionDef name:join arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "set_output_quantized_indexes",
    "source_code": "def set_output_quantized_indexes(self, indexes: list[int]) -> PrepareCustomConfig:\n    self.output_quantized_indexes = indexes\n    return self",
    "docstring": "Set the indexes of the outputs of the graph that should be quantized. Outputs are otherwise assumed to be in fp32 by default instead.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\custom_config.py",
    "ast_data": "FunctionDef name:set_output_quantized_indexes arg:self arg:indexes arguments arg arg Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "skip_default",
    "source_code": "def skip_default(self, field):\n    return False",
    "docstring": "Some backends don't accept default values for certain columns types (i.e. MySQL longtext and longblob).",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\schema.py",
    "ast_data": "FunctionDef name:skip_default arg:self arg:field arguments arg arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "is_ci_environment",
    "source_code": "def is_ci_environment() -> bool:\n    return os.environ.get('PANDAS_CI', '0') == '1'",
    "docstring": "Checking if running in a continuous integration environment by checking the PANDAS_CI environment variable. Returns ------- bool True if the running in a continuous integration environment.",
    "type": "function",
    "file_path": "pandas\\pandas\\compat\\__init__.py",
    "ast_data": "FunctionDef name:is_ci_environment arguments Return return:yes Compare Call"
  },
  {
    "library": "pytorch",
    "name": "expand_dims",
    "source_code": "def expand_dims(a: TensorLikeType, dimensions: DimsSequenceType, ndim=None) -> TensorLikeType:\n    if ndim is not None:\n        dims = sorted(utils.canonicalize_dims(ndim, dimensions))\n    else:\n        dims = sorted(utils.canonicalize_dims(a.ndim, dimensions))\n    if len(set(dims)) != len(dims):\n        msg = f'Received duplicate dimensions to expand in {str(dimensions)}'\n        raise ValueError(msg)\n    new_shape = list(a.shape)\n    for idx in dims:\n        new_shape.insert(idx, 1)\n    broadcast_dimensions = [idx for idx in range(len(new_shape)) if idx not in dimensions]\n    return broadcast_in_dim(a, new_shape, broadcast_dimensions)",
    "docstring": "Creates a view of a with a.ndim + len(dimensions) dimensions, with new dimensions of length one at the dimensions specified by dimensions.",
    "type": "function",
    "file_path": "pytorch\\torch\\_prims\\__init__.py",
    "ast_data": "FunctionDef name:expand_dims arg:a arg:dimensions arg:ndim arguments arg arg arg If Compare Assign Call Call Assign Call Call If Compare Call Call Call Assign Call Raise Call Assign Call For Call Assign Call Call Compare Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_tf_core_flatten",
    "source_code": "def _tf_core_flatten(structure, expand_composites=False):\n    if structure is None:\n        return [None]\n    expand_composites = bool(expand_composites)\n    return _pywrap_utils.Flatten(structure, expand_composites)",
    "docstring": "See comments for flatten() in tensorflow/python/util/nest.py.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\nest_util.py",
    "ast_data": "FunctionDef name:_tf_core_flatten arg:structure arg:expand_composites arguments arg arg If Compare Return return:no Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_ensure_same_dataset_graph",
    "source_code": "def _ensure_same_dataset_graph(dataset):\n    current_graph = ops.get_default_graph()\n    bfs_q = queue.Queue()\n    bfs_q.put(dataset)\n    visited = []\n    while not bfs_q.empty():\n        ds = bfs_q.get()\n        visited.append(ds)\n        ds_graph = ds._graph\n        if current_graph != ds_graph:\n            raise ValueError(f'The graph {current_graph} of the iterator is different from the graph {ds_graph} the dataset: {ds._variant_tensor} was created in. Make sure that the dataset is created in the same graph as the iterator.')\n        for input_ds in ds._inputs():\n            if input_ds not in visited:\n                bfs_q.put(input_ds)",
    "docstring": "Walks the dataset graph to ensure all datasets come from the same graph.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:_ensure_same_dataset_graph arg:dataset arguments arg Assign Call Assign Call Call Assign While Call Assign Call Call Assign If Compare Raise Call For Call If Compare Call"
  },
  {
    "library": "pytorch",
    "name": "macros",
    "source_code": "class macros:\n    local_rank = '${local_rank}'\n\n    @staticmethod\n    def substitute(args: list[Any], local_rank: str) -> list[str]:\n        args_sub = []\n        for arg in args:\n            if isinstance(arg, str):\n                sub = Template(arg).safe_substitute(local_rank=local_rank)\n                args_sub.append(sub)\n            else:\n                args_sub.append(arg)\n        return args_sub",
    "docstring": "Defines simple macros for caffe2.distributed.launch cmd args substitution",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\elastic\\utils\\api.py",
    "ast_data": "ClassDef name:macros Assign FunctionDef name:substitute arg:args arg:local_rank arguments arg arg Assign For If Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "squared_hinge",
    "source_code": "@dispatch.add_dispatch_support\ndef squared_hinge(y_true, y_pred):\n    y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)\n    y_true = math_ops.cast(y_true, y_pred.dtype)\n    y_true = _maybe_convert_labels(y_true)\n    return backend.mean(math_ops.square(math_ops.maximum(1.0 - y_true * y_pred, 0.0)), axis=-1)",
    "docstring": "Computes the squared hinge loss between and . Standalone usage: >>> y_true = np.random.choice([-1, 1], size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.squared_hinge(y_true, y_pred) >>> assert loss.shape == (2,) >>> assert np.array_equal( ... loss.numpy(), ... np.mean(np.square(np.maximum(1. - y_true * y_pred, 0.)), axis=-1)) Args: y_true: The ground truth values. values are expected to be -1 or 1. If binary (0 or 1) labels are provided we will convert them to -1 or 1. shape = . y_pred: The predicted values. shape = . Returns: Squared hinge loss values. shape = .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py",
    "ast_data": "FunctionDef name:squared_hinge arg:y_true arg:y_pred arguments arg arg Assign Call Assign Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "name",
    "source_code": "@property\ndef name(self):\n    return self.key",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "swap_tensors",
    "source_code": "def swap_tensors(self, names: Iterable[str], values: Iterable[torch.Tensor], allow_missing: bool=False) -> list[torch.Tensor]:\n    if not isinstance(names, (list, tuple)):\n        names = list(names)\n    if not isinstance(values, (list, tuple)):\n        values = list(values)\n    assert len(names) == len(values), 'names and values must have the same length'\n    return [self.swap_tensor(name, value, allow_missing=allow_missing) for name, value in zip(names, values)]",
    "docstring": "Swap the attributes specified by the given paths to values. For example, to swap the attributes mod.layer1.conv1.weight and mod.layer1.conv1.bias, use accessor.swap_tensors([\"layer1.conv1.weight\", \"layer1.conv1.bias\"], [weight, bias])",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\utils\\_named_member_accessor.py",
    "ast_data": "FunctionDef name:swap_tensors arg:self arg:names arg:values arg:allow_missing arguments arg arg arg arg If Call Assign Call If Call Assign Call Compare Call Call Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "pprint",
    "source_code": "def pprint(self):\n    names = self.dtype.names\n    maxlen = max((len(name) for name in names))\n    fmt = '%% %ds: %%s' % maxlen\n    rows = [fmt % (name, getattr(self, name)) for name in names]\n    return '\\n'.join(rows)",
    "docstring": "Pretty-print all fields.",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\records.py",
    "ast_data": "FunctionDef name:pprint arg:self arguments arg Assign Assign Call Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "__repr__",
    "source_code": "def __repr__(self):\n    cls = self.__class__\n    return '%s.%s(points=%r)' % (cls.__module__, cls.__name__, list(self))",
    "docstring": "Render a string representation of :class:.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cprequest.py",
    "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_get_bn_configs",
    "source_code": "def _get_bn_configs(dtype_configs: list[DTypeConfig]) -> list[BackendPatternConfig]:\n    bn_configs = []\n    bn_to_fused_bn = {torch.nn.BatchNorm2d: nni.BNReLU2d, torch.nn.BatchNorm3d: nni.BNReLU3d}\n    for bn in bn_to_fused_bn.keys():\n        fused_bn = bn_to_fused_bn[bn]\n        bn_configs.append(BackendPatternConfig((bn, nn.ReLU)).set_dtype_configs(dtype_configs).set_fuser_method(_sequential_wrapper2(fused_bn)).set_fused_module(fused_bn))\n        bn_configs.append(BackendPatternConfig((bn, F.relu)).set_dtype_configs(dtype_configs).set_fuser_method(_sequential_wrapper2(fused_bn)).set_fused_module(fused_bn))\n        bn_configs.append(BackendPatternConfig(bn).set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT).set_dtype_configs(dtype_configs))\n    for fused_bn in bn_to_fused_bn.values():\n        bn_configs.append(BackendPatternConfig(fused_bn).set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT).set_dtype_configs(dtype_configs))\n    return bn_configs",
    "docstring": "Get configs related to batchnorm.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\_common_operator_config_utils.py",
    "ast_data": "FunctionDef name:_get_bn_configs arg:dtype_configs arguments arg Assign Assign For Call Assign Call Call Call Call Call Call Call Call Call Call Call Call Call Call Call Call For Call Call Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "toms748",
    "source_code": "def toms748(f, a, b, args=(), k=1, xtol=_xtol, rtol=_rtol, maxiter=_iter, full_output=False, disp=True):\n    if xtol <= 0:\n        raise ValueError(f'xtol too small ({xtol:g} <= 0)')\n    if rtol < _rtol / 4:\n        raise ValueError(f'rtol too small ({rtol:g} < {_rtol / 4:g})')\n    maxiter = operator.index(maxiter)\n    if maxiter < 1:\n        raise ValueError('maxiter must be greater than 0')\n    if not np.isfinite(a):\n        raise ValueError(f'a is not finite {a}')\n    if not np.isfinite(b):\n        raise ValueError(f'b is not finite {b}')\n    if a >= b:\n        raise ValueError(f'a and b are not an interval [{a}, {b}]')\n    if not k >= 1:\n        raise ValueError(f'k too small ({k} < 1)')\n    if not isinstance(args, tuple):\n        args = (args,)\n    f = _wrap_nan_raise(f)\n    solver = TOMS748Solver()\n    result = solver.solve(f, a, b, args=args, k=k, xtol=xtol, rtol=rtol, maxiter=maxiter, disp=disp)\n    x, function_calls, iterations, flag = result\n    return _results_select(full_output, (x, function_calls, iterations, flag), 'toms748')",
    "docstring": "Find a root using TOMS Algorithm 748 method. Implements the Algorithm 748 method of Alefeld, Potro and Shi to find a root of the function on the interval `f(b)ff(a)f(b)ffmaxiterfull_outputfull_outputxrRootResultsRootResultsfRootResultsffk`abs(x - x0) >> def f(x): ... return (x**3 - 1) # only one real root at x = 1 >>> from scipy import optimize >>> root, results = optimize.toms748(f, 0, 2, full_output=True) >>> root 1.0 >>> results converged: True flag: converged function_calls: 11 iterations: 5 root: 1.0 method: toms748",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_zeros_py.py",
    "ast_data": "FunctionDef name:toms748 arg:f arg:a arg:b arg:args arg:k arg:xtol arg:rtol arg:maxiter arg:full_output arg:disp arguments arg arg arg arg arg arg arg arg arg arg If Compare Raise Call If Compare Raise Call Assign Call If Compare Raise Call If Call Raise Call If Call Raise Call If Compare Raise Call If Compare Raise Call If Call Assign Assign Call Assign Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "nargs_error",
    "source_code": "def nargs_error(name, takes, given):\n    return TypeError(f'{name}() takes {takes} positional arguments but {given} were given')",
    "docstring": "Generate a TypeError to be raised by function calls with wrong arity.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\_api\\__init__.py",
    "ast_data": "FunctionDef name:nargs_error arg:name arg:takes arg:given arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "enter_loop_section",
    "source_code": "def enter_loop_section(self, section_id, entry_node):\n    assert section_id not in self.section_entry\n    assert section_id not in self.continues\n    self.continues[section_id] = set()\n    node = self.add_ordinary_node(entry_node)\n    self.section_entry[section_id] = node",
    "docstring": "Enters a loop section. Loop sections define an entry node. The end of the section always flows back to the entry node. These admit continue jump nodes which also flow to the entry node. Args: section_id: Hashable, the same node that will be used in calls to the ast_node arg passed to add_continue_node entry_node: ast.AST, the entry node into the loop (e.g. the test node for while loops)",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\cfg.py",
    "ast_data": "FunctionDef name:enter_loop_section arg:self arg:section_id arg:entry_node arguments arg arg arg Compare Compare Assign Call Assign Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "reset_state",
    "source_code": "def reset_state(self):\n    if self._built:\n        metrics = self._metrics_in_order\n    else:\n        metrics = nest.flatten(self._user_metrics) + nest.flatten(self._user_weighted_metrics)\n    for metric_obj in metrics:\n        if isinstance(metric_obj, metrics_mod.Metric):\n            metric_obj.reset_state()",
    "docstring": "Resets the state of all s in this container.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\compile_utils.py",
    "ast_data": "FunctionDef name:reset_state arg:self arguments arg If Assign Assign Call Call For If Call Call"
  },
  {
    "library": "tensorflow",
    "name": "is_tf_type",
    "source_code": "@tf_export('is_tensor')\ndef is_tf_type(x):\n    return isinstance(x, tf_type_classes)",
    "docstring": "Checks whether is a TF-native type that can be passed to many TF ops. Use to differentiate types that can ingested by TensorFlow ops without any conversion (e.g., , , and ) from types that need to be converted into tensors before they are ingested (e.g., numpy and Python scalars). For example, in the following code block: we check to make sure that is a tensor (and convert it if not) before accessing its and . (But note that not all TensorFlow native types have shapes or dtypes; is an example of a TensorFlow native type that has neither shape nor dtype.) Args: x: A python object to check. Returns: if is a TensorFlow-native type.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_util.py",
    "ast_data": "FunctionDef name:is_tf_type arg:x arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "argumenttype_evalue_convert",
    "source_code": "def argumenttype_evalue_convert(self, t: Type, arg_name: str, *, mutable: bool=False) -> tuple[str, CType, list[str], list[str]]:\n    ctype = self.argument_type_gen(t, mutable=mutable, binds=arg_name).type\n    if isinstance(t, BaseType):\n        out_name = f'{arg_name}_base'\n        code, decl = self._gen_code_base_type(arg_name=arg_name, out_name=out_name, ctype=ctype)\n    elif isinstance(t, OptionalType):\n        out_name = f'{arg_name}_opt_out'\n        code, decl = self._gen_code_optional_type(arg_name=arg_name, out_name=out_name, t=t, ctype=ctype)\n    elif isinstance(t, ListType):\n        out_name = f'{arg_name}_list_out'\n        code, decl = self._gen_code_list_type(arg_name=arg_name, out_name=out_name, t=t, ctype=ctype)\n    else:\n        raise Exception(f'Cannot handle type {t}. arg_name: {arg_name}')\n    return (out_name, ctype, code, decl)",
    "docstring": "Takes in the type, name and mutability corresponding to an argument, and generates a tuple of: (1) the C++ code necessary to unbox the argument (2) A Binding corresponding to the newly created unboxed variable, including variable name and its CType :param t: a of an argument :param arg_name: argument name :param mutable: boolean for whether this argument type is mutable :return: unboxed result",
    "type": "method",
    "file_path": "pytorch\\torchgen\\executorch\\api\\unboxing.py",
    "ast_data": "FunctionDef name:argumenttype_evalue_convert arg:self arg:t arg:arg_name arguments arg arg arg arg Assign Call If Call Assign Assign Call If Call Assign Assign Call If Call Assign Assign Call Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "StrictMinMaxConstraint",
    "source_code": "@dataclass(frozen=True)\nclass StrictMinMaxConstraint(Constraint):\n    vr: ValueRanges\n\n    def render(self, source: Source) -> str:\n        return f'{self.vr.lower} <= {source.name()} <= {self.vr.upper}'",
    "docstring": "For clients: the size at this dimension must be within 'vr' (which specifies a lower and upper bound, inclusive-inclusive) AND it must be non-negative and should not be 0 or 1 (but see NB below). For backends: there must not be any guards on this dimension which are not implied by the given lower and upper bound. Regardless of the lower bound, the backend can assume the size is non-negative and that it is not 0 or 1. An unbounded StrictMinMaxConstraint can be thought of as a strict version of \"RelaxedUnspecConstraint\". NB: Export will often unsoundly assume that a graph works for 0/1, even though at trace time we assumed size is not 0 or 1. The idea is that if we produce a graph that works for a range of values, it will be OK for N=0/1 too.",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "ClassDef name:StrictMinMaxConstraint FunctionDef name:render arg:self arg:source arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "_read_random_apply",
    "source_code": "def _read_random_apply(self, random_apply: Union[int, bool, Tuple[int, int]], max_length: int) -> Union[Tuple[int, int], bool]:\n    if isinstance(random_apply, (bool,)) and random_apply is False:\n        random_apply = False\n    elif isinstance(random_apply, (bool,)) and random_apply is True:\n        random_apply = (max_length, max_length + 1)\n    elif isinstance(random_apply, (int,)):\n        random_apply = (random_apply, random_apply + 1)\n    elif isinstance(random_apply, (tuple,)) and len(random_apply) == 2 and isinstance(random_apply[0], (int,)) and isinstance(random_apply[1], (int,)):\n        random_apply = (random_apply[0], random_apply[1] + 1)\n    elif isinstance(random_apply, (tuple,)) and len(random_apply) == 1 and isinstance(random_apply[0], (int,)):\n        random_apply = (random_apply[0], max_length + 1)\n    else:\n        raise ValueError(f'Non-readable random_apply. Got {random_apply}.')\n    if random_apply is not False and (not (isinstance(random_apply, (tuple,)) and len(random_apply) == 2 and isinstance(random_apply[0], (int,)) and isinstance(random_apply[0], (int,)))):\n        raise AssertionError(f'Expect a tuple of (int, int). Got {random_apply}.')\n    return random_apply",
    "docstring": "Process the scenarios for random apply.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\container\\image.py",
    "ast_data": "FunctionDef name:_read_random_apply arg:self arg:random_apply arg:max_length arguments arg arg arg If BoolOp Call Compare Assign If BoolOp Call Compare Assign If Call Assign If BoolOp Call Compare Call Call Call Assign If BoolOp Call Compare Call Call Assign Raise Call If BoolOp Compare BoolOp Call Compare Call Call Call Raise Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "create_revocation_endpoint",
    "source_code": "def create_revocation_endpoint(session, token_model):\n    from authlib.oauth2.rfc7009 import RevocationEndpoint\n    query_token = create_query_token_func(session, token_model)\n\n    class _RevocationEndpoint(RevocationEndpoint):\n\n        def query_token(self, token, token_type_hint):\n            return query_token(token, token_type_hint)\n\n        def revoke_token(self, token, request):\n            now = int(time.time())\n            hint = request.form.get('token_type_hint')\n            token.access_token_revoked_at = now\n            if hint != 'access_token':\n                token.refresh_token_revoked_at = now\n            session.add(token)\n            session.commit()\n    return _RevocationEndpoint",
    "docstring": "Create a revocation endpoint class with SQLAlchemy session and token model. :param session: SQLAlchemy session :param token_model: Token model class",
    "type": "function",
    "file_path": "authlib\\authlib\\integrations\\sqla_oauth2\\functions.py",
    "ast_data": "FunctionDef name:create_revocation_endpoint arg:session arg:token_model arguments arg arg Assign Call ClassDef name:_RevocationEndpoint FunctionDef name:query_token arg:self arg:token arg:token_type_hint arguments arg arg arg Return return:yes Call FunctionDef name:revoke_token arg:self arg:token arg:request arguments arg arg arg Assign Call Call Assign Call Assign If Compare Assign Call Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "_key_func_2",
    "source_code": "def _key_func_2(entry: tuple[str, _IndexEntryTargets]) -> str:\n    key = unicodedata.normalize('NFD', entry[0].lower())\n    key = key.removeprefix('\\u200f')\n    if key[0:1].isalpha() or key.startswith('_'):\n        key = chr(127) + key\n    return key",
    "docstring": "Sort the sub-index entries",
    "type": "function",
    "file_path": "sphinx\\sphinx\\environment\\adapters\\indexentries.py",
    "ast_data": "FunctionDef name:_key_func_2 arg:entry arguments arg Assign Call Call Assign Call If BoolOp Call Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_enter_unshard_params_ctx",
    "source_code": "@no_type_check\ndef _enter_unshard_params_ctx(module: nn.Module, fsdp_state: _FSDPState, writeback: bool=False, rank0_only: bool=False, offload_to_cpu: bool=False, with_grads: bool=False) -> None:\n    assert module not in fsdp_state._unshard_params_ctx, 'Entering the ``_unshard_fsdp_state_params`` context but _unshard_params_ctx[module] is not None.'\n    fsdp_state._unshard_params_ctx[module] = _unshard_fsdp_state_params(module, fsdp_state, writeback=writeback, rank0_only=rank0_only, offload_to_cpu=offload_to_cpu, with_grads=with_grads)\n    fsdp_state._unshard_params_ctx[module].__enter__()",
    "docstring": "state_dict hooks cannot use the pure context call as the checkpoint flow requires to enter the context in the pre-hook but leave the context in the post-hook. This API enters the context of ``.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_state_dict_utils.py",
    "ast_data": "FunctionDef name:_enter_unshard_params_ctx arg:module arg:fsdp_state arg:writeback arg:rank0_only arg:offload_to_cpu arg:with_grads arguments arg arg arg arg arg arg Compare Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "collect_previous_mask",
    "source_code": "def collect_previous_mask(input_tensors):\n\n    def _collect_previous_mask(x):\n        return getattr(x, '_keras_mask', None)\n    return nest.map_structure(_collect_previous_mask, input_tensors)",
    "docstring": "Retrieves the output mask(s) of the previous node. Args: input_tensors: An arbitrary structure of Tensors. Returns: A mask tensor or list of mask tensors.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_utils.py",
    "ast_data": "FunctionDef name:collect_previous_mask arg:input_tensors arguments arg FunctionDef name:_collect_previous_mask arg:x arguments arg Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, initial_learning_rate, decay_steps, alpha=0.0, name=None):\n    super(CosineDecay, self).__init__()\n    self.initial_learning_rate = initial_learning_rate\n    self.decay_steps = decay_steps\n    self.alpha = alpha\n    self.name = name",
    "docstring": "Applies cosine decay to the learning rate. Args: initial_learning_rate: A scalar or Tensor or a Python number. The initial learning rate. decay_steps: A scalar or or a Python number. Number of steps to decay over. alpha: A scalar or Tensor or a Python number. Minimum learning rate value as a fraction of initial_learning_rate. name: String. Optional name of the operation. Defaults to 'CosineDecay'.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\learning_rate_schedule.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:initial_learning_rate arg:decay_steps arg:alpha arg:name arguments arg arg arg arg arg Call Call Assign Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "_has_tma_epilogue",
    "source_code": "@staticmethod\ndef _has_tma_epilogue(op: 'cutlass_library.gemm_op.GemmOperation') -> bool:\n    assert cutlass_utils.try_import_cutlass()\n    import cutlass_library.library as cutlass_lib\n    result = False\n    if op.gemm_kind == cutlass_lib.GemmKind.Universal3x:\n        epilogue_schedule_str = str(op.epilogue_schedule).split('.')[-1]\n        result = epilogue_schedule_str.lower().startswith('tma')\n    return result",
    "docstring": "Helper method: Determine whether a given Cutlass GEMM op has a TMA Epilogue",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\gemm_template.py",
    "ast_data": "FunctionDef name:_has_tma_epilogue arg:op arguments arg Call Assign If Compare Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "flex_attention_grid",
    "source_code": "@SymbolicGridFn\ndef flex_attention_grid(batch_size, q_heads, num_queries, d_model, meta, *, cdiv):\n    return (cdiv(num_queries, meta['BLOCK_M']), batch_size * q_heads, 1)",
    "docstring": "How is this kernel parallelized? We create a grid of (batch_size * num_heads, ceil_div(n_queries, query_block_size), 1) Each block is responsible for iterating over blocks of keys and values calculating the final attention output.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\kernel\\flex_attention.py",
    "ast_data": "FunctionDef name:flex_attention_grid arg:batch_size arg:q_heads arg:num_queries arg:d_model arg:meta arguments arg arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_remove_annotation",
    "source_code": "def _remove_annotation(sig):\n    parameters = [p.replace(annotation=p.empty) for p in sig.parameters.values()]\n    return sig.replace(parameters=parameters, return_annotation=sig.empty)",
    "docstring": "Removes annotation from a python Signature.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\dispatch.py",
    "ast_data": "FunctionDef name:_remove_annotation arg:sig arguments arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set",
    "source_code": "def set(self, *, pad=None, w_pad=None, h_pad=None, rect=None):\n    for td in self.set.__kwdefaults__:\n        if locals()[td] is not None:\n            self._params[td] = locals()[td]",
    "docstring": "Set the pads for tight_layout. Parameters ---------- pad : float Padding between the figure edge and the edges of subplots, as a fraction of the font size. w_pad, h_pad : float Padding (width/height) between edges of adjacent subplots. Defaults to *pad*. rect : tuple (left, bottom, right, top) rectangle in normalized figure coordinates that the subplots (including labels) will fit into.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\layout_engine.py",
    "ast_data": "FunctionDef name:set arg:self arguments arg arg arg arg arg For If Compare Call Assign Call"
  },
  {
    "library": "scrapy",
    "name": "replace",
    "source_code": "def replace(self, *args: Any, cls: type[Response] | None=None, **kwargs: Any) -> Response:\n    for x in self.attributes:\n        kwargs.setdefault(x, getattr(self, x))\n    if cls is None:\n        cls = self.__class__\n    return cls(*args, **kwargs)",
    "docstring": "Create a new Response with the same attributes except for those given new values",
    "type": "method",
    "file_path": "scrapy\\scrapy\\http\\response\\__init__.py",
    "ast_data": "FunctionDef name:replace arg:self arguments arg arg arg arg For Call Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "count_prefix",
    "source_code": "def count_prefix(self, prefix: str) -> int:\n    return sum((1 for record in self.archive_file.get_all_written_records() if record.startswith(prefix)))",
    "docstring": "Count the number of records that start with a given prefix.",
    "type": "method",
    "file_path": "pytorch\\torch\\export\\pt2_archive\\_package.py",
    "ast_data": "FunctionDef name:count_prefix arg:self arg:prefix arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "is_buffer",
    "source_code": "def is_buffer(program: 'ExportedProgram', node: torch.fx.Node) -> bool:\n    return node.name in program.graph_signature.inputs_to_buffers",
    "docstring": "Checks if the given node is a buffer within the exported program",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\utils.py",
    "ast_data": "FunctionDef name:is_buffer arg:program arg:node arguments arg arg Return return:yes Compare"
  },
  {
    "library": "matplotlib",
    "name": "HandlerLine2DCompound",
    "source_code": "class HandlerLine2DCompound(HandlerNpoints):\n\n    def create_artists(self, legend, orig_handle, xdescent, ydescent, width, height, fontsize, trans):\n        xdata, xdata_marker = self.get_xdata(legend, xdescent, ydescent, width, height, fontsize)\n        ydata = np.full_like(xdata, (height - ydescent) / 2)\n        legline = Line2D(xdata, ydata)\n        self.update_prop(legline, orig_handle, legend)\n        legline.set_drawstyle('default')\n        legline.set_marker('')\n        legline_marker = Line2D(xdata_marker, ydata[:len(xdata_marker)])\n        self.update_prop(legline_marker, orig_handle, legend)\n        legline_marker.set_linestyle('None')\n        if legend.markerscale != 1:\n            newsz = legline_marker.get_markersize() * legend.markerscale\n            legline_marker.set_markersize(newsz)\n        legline._legmarker = legline_marker\n        legline.set_transform(trans)\n        legline_marker.set_transform(trans)\n        return [legline, legline_marker]",
    "docstring": "Original handler for instances, that relies on combining a line-only with a marker-only artist. May be deprecated in the future.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\legend_handler.py",
    "ast_data": "ClassDef name:HandlerLine2DCompound FunctionDef name:create_artists arg:self arg:legend arg:orig_handle arg:xdescent arg:ydescent arg:width arg:height arg:fontsize arg:trans arguments arg arg arg arg arg arg arg arg arg Assign Call Assign Call Assign Call Call Call Call Assign Call Call Call Call If Compare Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_vectorize_call",
    "source_code": "def _vectorize_call(self, func, args):\n    if self.signature is not None:\n        res = self._vectorize_call_with_signature(func, args)\n    elif not args:\n        res = func()\n    else:\n        args = [asanyarray(a, dtype=object) for a in args]\n        ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args)\n        outputs = ufunc(*args, out=...)\n        if ufunc.nout == 1:\n            res = asanyarray(outputs, dtype=otypes[0])\n        else:\n            res = tuple((asanyarray(x, dtype=t) for x, t in zip(outputs, otypes)))\n    return res",
    "docstring": "Vectorized call to over positional .",
    "type": "method",
    "file_path": "numpy\\numpy\\lib\\_function_base_impl.py",
    "ast_data": "FunctionDef name:_vectorize_call arg:self arg:func arg:args arguments arg arg arg If Compare Assign Call If Assign Call Assign Call Assign Call Assign Call If Compare Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_DTensorIteratorSpec",
    "source_code": "class _DTensorIteratorSpec(iterator_ops.IteratorSpec):\n    __slots__ = ['_global_element_spec', '_layouts_str']\n\n    def __init__(self, global_element_spec: tensor_spec.TensorSpec, layouts_str: Any):\n        super().__init__(global_element_spec)\n        self._global_element_spec = global_element_spec\n        self._layouts_str = layouts_str\n\n    @property\n    def value_type(self):\n        return _DTensorIterator\n\n    def _serialize(self):\n        return (self._global_element_spec, self._layouts_str)\n\n    @property\n    def _component_specs(self):\n        return (tensor_spec.TensorSpec([], dtypes.resource),)\n\n    def _to_components(self, value):\n        return (value._iterator_resource_dtensor,)\n\n    def _from_components(self, components):\n        layouts = nest.map_structure(layout_lib.Layout.from_string, self._layouts_str)\n        return _DTensorIterator(dtensor_components=components, global_element_spec=self._global_element_spec, layouts=layouts)\n\n    @classmethod\n    def from_value(cls, value):\n        return cls(value._global_element_spec, value._layouts_str)",
    "docstring": "Type specification for .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\input_util.py",
    "ast_data": "ClassDef name:_DTensorIteratorSpec Assign FunctionDef name:__init__ arg:self arg:global_element_spec arg:layouts_str arguments arg arg arg Call Call Assign Assign FunctionDef name:value_type arg:self arguments arg Return return:yes FunctionDef name:_serialize arg:self arguments arg Return return:yes FunctionDef name:_component_specs arg:self arguments arg Return return:yes Call FunctionDef name:_to_components arg:self arg:value arguments arg arg Return return:yes FunctionDef name:_from_components arg:self arg:components arguments arg arg Assign Call Return return:yes Call FunctionDef name:from_value arg:cls arg:value arguments arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_script_name",
    "source_code": "def get_script_name(environ):\n    if settings.FORCE_SCRIPT_NAME is not None:\n        return settings.FORCE_SCRIPT_NAME\n    script_url = get_bytes_from_wsgi(environ, 'SCRIPT_URL', '') or get_bytes_from_wsgi(environ, 'REDIRECT_URL', '')\n    if script_url:\n        if b'//' in script_url:\n            script_url = _slashes_re.sub(b'/', script_url)\n        path_info = get_bytes_from_wsgi(environ, 'PATH_INFO', '')\n        script_name = script_url.removesuffix(path_info)\n    else:\n        script_name = get_bytes_from_wsgi(environ, 'SCRIPT_NAME', '')\n    return script_name.decode()",
    "docstring": "Return the equivalent of the HTTP request's SCRIPT_NAME environment variable. If Apache mod_rewrite is used, return what would have been the script name prior to any rewriting (so it's the script name as seen from the client's perspective), unless the FORCE_SCRIPT_NAME setting is set (to anything).",
    "type": "function",
    "file_path": "django\\django\\core\\handlers\\wsgi.py",
    "ast_data": "FunctionDef name:get_script_name arg:environ arguments arg If Compare Return return:yes Assign BoolOp Call Call If If Compare Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_has_any_perms_for_target_model",
    "source_code": "def _has_any_perms_for_target_model(self, request, perms):\n    opts = self.opts\n    for field in opts.fields:\n        if field.remote_field and field.remote_field.model != self.parent_model:\n            opts = field.remote_field.model._meta\n            break\n    return any((request.user.has_perm('%s.%s' % (opts.app_label, get_permission_codename(perm, opts))) for perm in perms))",
    "docstring": "This method is called only when the ModelAdmin's model is for an ManyToManyField's implicit through model (if self.opts.auto_created). Return True if the user has any of the given permissions ('add', 'change', etc.) for the model that points to the through model.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:_has_any_perms_for_target_model arg:self arg:request arg:perms arguments arg arg arg Assign For If BoolOp Compare Assign Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_op_context",
    "source_code": "@contextmanager\ndef _op_context(self, op: TargetType) -> Generator[None, None, None]:\n    previous = self._current_op\n    self._current_op = op\n    try:\n        yield\n    finally:\n        self._current_op = previous",
    "docstring": "Set which op is being processed in call function to know if we can mutate buffers",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\subgraph_lowering.py",
    "ast_data": "FunctionDef name:_op_context arg:self arg:op arguments arg arg Assign Assign Try Assign"
  },
  {
    "library": "tensorflow",
    "name": "_get_output_shapes_from_batch_size",
    "source_code": "def _get_output_shapes_from_batch_size(self, per_replica_batch_size):\n    output_shapes = []\n    for feature in nest.flatten(self._feature_config):\n        if not feature.output_shape and feature.max_sequence_length > 0:\n            output_shapes.append(TensorShape([per_replica_batch_size, feature.max_sequence_length]))\n        else:\n            output_shapes.append(TensorShape(per_replica_batch_size))\n    return output_shapes",
    "docstring": "Get the output shapes from the batch size.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2.py",
    "ast_data": "FunctionDef name:_get_output_shapes_from_batch_size arg:self arg:per_replica_batch_size arguments arg arg Assign For Call If BoolOp Compare Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "getqualifiedname",
    "source_code": "def getqualifiedname(namespace, object_, max_depth=5, visited=None):\n    if visited is None:\n        visited = set()\n    namespace = dict(namespace)\n    for name in namespace:\n        if object_ is namespace[name]:\n            return name\n    parent = tf_inspect.getmodule(object_)\n    if parent is not None and parent is not object_ and (parent is not namespace):\n        parent_name = getqualifiedname(namespace, parent, max_depth=0, visited=visited)\n        if parent_name is not None:\n            name_in_parent = getqualifiedname(parent.__dict__, object_, max_depth=0, visited=visited)\n            assert name_in_parent is not None, 'An object should always be found in its owner module'\n            return '{}.{}'.format(parent_name, name_in_parent)\n    if max_depth:\n        for name in namespace.keys():\n            value = namespace[name]\n            if tf_inspect.ismodule(value) and id(value) not in visited:\n                visited.add(id(value))\n                name_in_module = getqualifiedname(value.__dict__, object_, max_depth - 1, visited)\n                if name_in_module is not None:\n                    return '{}.{}'.format(name, name_in_module)\n    return None",
    "docstring": "Returns the name by which a value can be referred to in a given namespace. If the object defines a parent module, the function attempts to use it to locate the object. This function will recurse inside modules, but it will not search objects for attributes. The recursion depth is controlled by max_depth. Args: namespace: Dict[str, Any], the namespace to search into. object_: Any, the value to search. max_depth: Optional[int], a limit to the recursion depth when searching inside modules. visited: Optional[Set[int]], ID of modules to avoid visiting. Returns: Union[str, None], the fully-qualified name that resolves to the value o, or None if it couldn't be found.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\inspect_utils.py",
    "ast_data": "FunctionDef name:getqualifiedname arg:namespace arg:object_ arg:max_depth arg:visited arguments arg arg arg arg If Compare Assign Call Assign Call For If Compare Return return:yes Assign Call If BoolOp Compare Compare Compare Assign Call If Compare Assign Call Compare Return return:yes Call If For Call Assign If BoolOp Call Compare Call Call Call Assign Call If Compare Return return:yes Call Return return:no"
  },
  {
    "library": "pytorch",
    "name": "extract",
    "source_code": "@staticmethod\ndef extract(*, script=False, cpp=False, skip=0):\n    import torch._C._profiler\n    if script or cpp:\n        assert skip == 0, 'skip with script/cpp NYI'\n    return CapturedTraceback(torch._C._profiler.gather_traceback(python=True, script=script, cpp=cpp), 0 if script or cpp else skip + 1)",
    "docstring": "Like traceback.extract_stack(), but faster (approximately 20x faster); it is fast enough that you can unconditionally log stacks this way as part of normal execution. It returns a torch._C._profiler.CapturedTraceback object that must be formatted specially with format_captured_tb. By default, this only reports Python backtraces (like extract_stack). You can set the script/cpp kwargs to also turn on TorchScript/C++ trace reporting.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\_traceback.py",
    "ast_data": "FunctionDef name:extract arguments arg arg arg If BoolOp Compare Return return:yes Call Call BoolOp"
  },
  {
    "library": "pytorch",
    "name": "get_state",
    "source_code": "def get_state(self) -> Optional[tuple[bytes, Token]]:\n    base64_state: bytes = self._call_store('get', self._key)\n    return self._decode_state(base64_state)",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\c10d_rendezvous_backend.py",
    "ast_data": "FunctionDef name:get_state arg:self arguments arg Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "is_function",
    "source_code": "def is_function(fname, graph):\n    if context.executing_eagerly():\n        return context.context().has_function(fname)\n    else:\n        while graph is not None:\n            if graph._is_function(fname):\n                return True\n            if hasattr(graph, 'outer_graph'):\n                graph = graph.outer_graph\n            else:\n                return False",
    "docstring": "Checks for a function definition with in the current context.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\function_def_to_graph.py",
    "ast_data": "FunctionDef name:is_function arg:fname arg:graph arguments arg arg If Call Return return:yes Call Call While Compare If Call Return return:yes If Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "ensure_connection",
    "source_code": "@async_unsafe\ndef ensure_connection(self):\n    if self.connection is None:\n        if self.in_atomic_block and self.closed_in_transaction:\n            raise ProgrammingError('Cannot open a new connection in an atomic block.')\n        with self.wrap_database_errors:\n            self.connect()",
    "docstring": "Guarantee that a connection to the database is established.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\base.py",
    "ast_data": "FunctionDef name:ensure_connection arg:self arguments arg If Compare If BoolOp Raise Call With Call"
  },
  {
    "library": "tensorflow",
    "name": "predict_function",
    "source_code": "def predict_function(iterator):\n    outputs = step_function(self, iterator)\n    for _ in math_ops.range(self._steps_per_execution - 1):\n        directives.set_loop_options(shape_invariants=[(t, tf_utils.get_tensor_spec(t, dynamic_batch=True).shape) for t in nest.flatten(outputs)])\n        step_outputs = step_function(self, iterator)\n        outputs = nest.map_structure(lambda t1, t2: concat([t1, t2]), outputs, step_outputs)\n    return outputs",
    "docstring": "Runs an evaluation execution with multiple steps.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py",
    "ast_data": "FunctionDef name:predict_function arg:iterator arguments arg Assign Call For Call Call Call Call Assign Call Assign Call arguments arg arg Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "append_stacktrace_summary",
    "source_code": "def append_stacktrace_summary(node: Node):\n    nonlocal prev_stacktrace\n    if node.op not in {'placeholder', 'output'}:\n        stack_trace = node.stack_trace\n        if stack_trace:\n            if stack_trace != prev_stacktrace:\n                prev_stacktrace = stack_trace\n                if (parsed_stack_trace := _parse_stack_trace(stack_trace)):\n                    summary_str = parsed_stack_trace.get_summary_str()\n                else:\n                    summary_str = ''\n                body.append(f'\\n {dim(f'\n        elif prev_stacktrace != '':\n            prev_stacktrace = ''\n            no_stacktrace_msg = '# No stacktrace found for following nodes'\n            body.append(f'\\n{dim(no_stacktrace_msg)}\\n')",
    "docstring": "Append a summary of the stacktrace to the generated code. This is useful for debugging.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\graph.py",
    "ast_data": "FunctionDef name:append_stacktrace_summary arg:node arguments arg If Compare Assign If If Compare Assign If Call Assign Call Assign Call Call If Compare Assign Assign Call Call"
  },
  {
    "library": "pygame",
    "name": "write_short",
    "source_code": "def write_short(self, status, data1=0, data2=0):\n    _check_init()\n    self._check_open()\n    self._output.WriteShort(status, data1, data2)",
    "docstring": "write_short(status ) Output.write_short(status) Output.write_short(status, data1 = 0, data2 = 0) output MIDI information of 3 bytes or less. data fields are optional status byte could be: 0xc0 = program change 0x90 = note on etc. data bytes are optional and assumed 0 if omitted example: note 65 on with velocity 100 write_short(0x90,65,100)",
    "type": "method",
    "file_path": "pygame\\src_py\\midi.py",
    "ast_data": "FunctionDef name:write_short arg:self arg:status arg:data1 arg:data2 arguments arg arg arg arg Call Call Call"
  },
  {
    "library": "authlib",
    "name": "validate_op_tos_uri",
    "source_code": "def validate_op_tos_uri(self):\n    value = self.get('op_tos_uri')\n    if value and (not is_valid_url(value)):\n        raise ValueError('\"op_tos_uri\" MUST be a URL')",
    "docstring": "OPTIONAL. URL that the authorization server provides to the person registering the client to read about the authorization server's terms of service. The registration process SHOULD display this URL to the person registering the client if it is given. As described in Section 5, despite the identifier \"op_tos_uri\", appearing to be OpenID-specific, its usage in this specification is actually referring to a general OAuth 2.0 feature that is not specific to OpenID Connect.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc8414\\models.py",
    "ast_data": "FunctionDef name:validate_op_tos_uri arg:self arguments arg Assign Call If BoolOp Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "device_coordinates",
    "source_code": "@property\ndef device_coordinates(self):\n    return self._device_coordinates",
    "docstring": "Describes the mapping from TPU devices to topology coordinates. Returns: A rank 3 int32 array with shape . is the number of tasks in the TPU cluster, is the number of TPU devices per task, and is the number of axes in the TPU cluster topology. Each entry gives the -th coordinate in the topology of a task/device pair. TPU topologies are 4-dimensional, with dimensions .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\topology.py",
    "ast_data": "FunctionDef name:device_coordinates arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "partial_fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef partial_fit(self, X, y=None):\n    first_time = not hasattr(self, 'components_')\n    X = self._check_non_neg_array(X, reset_n_features=first_time, whom='LatentDirichletAllocation.partial_fit')\n    n_samples, n_features = X.shape\n    batch_size = self.batch_size\n    if first_time:\n        self._init_latent_vars(n_features, dtype=X.dtype)\n    if n_features != self.components_.shape[1]:\n        raise ValueError('The provided data has %d dimensions while the model was trained with feature size %d.' % (n_features, self.components_.shape[1]))\n    n_jobs = effective_n_jobs(self.n_jobs)\n    with Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1)) as parallel:\n        for idx_slice in gen_batches(n_samples, batch_size):\n            self._em_step(X[idx_slice, :], total_samples=self.total_samples, batch_update=False, parallel=parallel)\n    return self",
    "docstring": "Online VB with Mini-Batch update. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Document word matrix. y : Ignored Not used, present here for API consistency by convention. Returns ------- self Partially fitted estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_lda.py",
    "ast_data": "FunctionDef name:partial_fit arg:self arg:X arg:y arguments arg arg arg Assign Call Assign Call Assign Assign If Call If Compare Raise Call Assign Call With Call Call For Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "MathDirective",
    "source_code": "class MathDirective(Directive):\n    has_content = True\n    required_arguments = 0\n    optional_arguments = 0\n    final_argument_whitespace = False\n    option_spec = {'fontset': fontset_choice, 'fontsize': validate_float_or_None}\n\n    def run(self):\n        latex = ''.join(self.content)\n        node = latex_math(self.block_text)\n        node['latex'] = latex\n        node['fontset'] = self.options.get('fontset', 'cm')\n        node['fontsize'] = self.options.get('fontsize', setup.app.config.mathmpl_fontsize)\n        return [node]",
    "docstring": "The `` directive, as documented in the module's docstring.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\sphinxext\\mathmpl.py",
    "ast_data": "ClassDef name:MathDirective Assign Assign Assign Assign Assign FunctionDef name:run arg:self arguments arg Assign Call Assign Call Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "save_exported_model",
    "source_code": "def save_exported_model(self, dst_saved_model_path: str, exported_model_serialized: bytes, src_saved_model_path: str, tags: set[str], serialized_signature_def_map: dict[str, bytes]) -> Optional[bool]:\n    exported_model = exported_model_pb2.ExportedModel.FromString(exported_model_serialized)\n    signature_def_map = {}\n    for key, serialized_signature_def in serialized_signature_def_map.items():\n        signature_def_map[key] = meta_graph_pb2.SignatureDef.FromString(serialized_signature_def)\n    return _call_and_return_none_on_error(func=functools.partial(_save_model_and_copy_assets, exported_model, src_saved_model_path, dst_saved_model_path, signature_def_map, tags), error_msg=f'Failed to save model \"{dst_saved_model_path}\", signature_def_map: {signature_def_map}, tags: {tags}.')",
    "docstring": "Saves to as a SavedModel. Args: dst_saved_model_path: Destination path to save the exported model. exported_model_serialized: Exported model to export as SavedModel. src_saved_model_path: Path to the source SavedModel. This will be used to copy the asset files to . tags: Tags to attach to the saved MetaGraphDef. serialized_signature_def_map: Signature key -> serialized SignatureDef. Returns: upon successful execution. when an error is raised internally.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\py_function_lib.py",
    "ast_data": "FunctionDef name:save_exported_model arg:self arg:dst_saved_model_path arg:exported_model_serialized arg:src_saved_model_path arg:tags arg:serialized_signature_def_map arguments arg arg arg arg arg arg Assign Call Assign For Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "assign_add",
    "source_code": "def assign_add(self, var, value, use_locking=False, name=None, read_value=True):\n    with distribute_lib.enter_or_assert_strategy(var.distribute_strategy):\n        if distribute_lib.in_cross_replica_context() and (not values_util.in_replica_update_context()):\n            values_util.mark_as_unsaveable()\n            return values_util.on_read_assign_add_cross_replica(var, value, read_value=read_value)\n        else:\n            return values_util.on_write_assign_add(var, value, use_locking=use_locking, name=name, read_value=read_value)",
    "docstring": "Adds a value to this variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py",
    "ast_data": "FunctionDef name:assign_add arg:self arg:var arg:value arg:use_locking arg:name arg:read_value arguments arg arg arg arg arg arg With Call If BoolOp Call Call Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, path=None):\n    if not path:\n        path = os.path.join(os.path.dirname(__file__), 'profile')\n    self.path = path\n    if not os.path.exists(path):\n        os.makedirs(path)",
    "docstring": "Prepare the profiling app resources.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\profiler.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:path arguments arg arg If Assign Call Call Assign If Call Call"
  },
  {
    "library": "tensorflow",
    "name": "handle_call_time_value",
    "source_code": "def handle_call_time_value(self):\n\n    def closure():\n        dispatch_context = coordinator_context.get_current_dispatch_context()\n        if dispatch_context:\n            remote_value = self._per_worker_vars._values[dispatch_context.worker_index]\n            ret = dispatch_context.maybe_get_remote_value(remote_value)\n            return ret.handle\n        else:\n            return self._coordinator_instance.handle\n    return (closure, PerWorkerVariableSpec(value=self._coordinator_instance.handle))",
    "docstring": "Returns a closure to run for a handle at call time and its spec. This function is called in self.handle to create a placeholder which returns a handle on some worker or on the coordinator.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\ps_values.py",
    "ast_data": "FunctionDef name:handle_call_time_value arg:self arguments arg FunctionDef name:closure arguments Assign Call If Assign Assign Call Return return:yes Return return:yes Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_predict",
    "source_code": "def _predict(self, X, check_input=True):\n    y_pred = self._forward_pass_fast(X, check_input=check_input)\n    if self.n_outputs_ == 1:\n        y_pred = y_pred.ravel()\n    return self._label_binarizer.inverse_transform(y_pred)",
    "docstring": "Private predict method with optional input validation",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neural_network\\_multilayer_perceptron.py",
    "ast_data": "FunctionDef name:_predict arg:self arg:X arg:check_input arguments arg arg arg Assign Call If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "prim_tolist",
    "source_code": "@_onnx_symbolic('prim::tolist')\ndef prim_tolist(g: jit_utils.GraphContext, input, dim_val, elem_ty_val):\n    dim = symbolic_helper._maybe_get_const(dim_val, 'i')\n    if dim > 1:\n        return symbolic_helper._unimplemented('prim::tolist', 'dim_val > 1', input)\n    return input",
    "docstring": "tolist is currently supported only for 1D input tensors. dim_val and elem_ty_val represent dimension and type annotations that need to match dimension and type of the input tensor.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\symbolic_opset9.py",
    "ast_data": "FunctionDef name:prim_tolist arg:g arg:input arg:dim_val arg:elem_ty_val arguments arg arg arg arg Assign Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "enumerate_dataset",
    "source_code": "@deprecation.deprecated(None, 'Use `tf.data.Dataset.enumerate()`.')\n@tf_export('data.experimental.enumerate_dataset')\ndef enumerate_dataset(start=0):\n\n    def _apply_fn(dataset):\n        return dataset.enumerate(start)\n    return _apply_fn",
    "docstring": "A transformation that enumerates the elements of a dataset. It is similar to python's . For example: Args: start: A scalar , representing the start value for enumeration. Returns: A transformation function, which can be passed to .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\enumerate_ops.py",
    "ast_data": "FunctionDef name:enumerate_dataset arg:start arguments arg FunctionDef name:_apply_fn arg:dataset arguments arg Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "extra_repr",
    "source_code": "def extra_repr(self) -> str:\n    return ''",
    "docstring": "Return the extra representation of the module. To print customized extra information, you should re-implement this method in your own modules. Both single-line and multi-line strings are acceptable.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:extra_repr arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_preparse",
    "source_code": "def _preparse(source: str, f=_compose(_replace_locals, _replace_booleans, _rewrite_assign, clean_backtick_quoted_toks)) -> str:\n    assert callable(f), 'f must be callable'\n    return tokenize.untokenize((f(x) for x in tokenize_string(source)))",
    "docstring": "Compose a collection of tokenization functions. Parameters ---------- source : str A Python source code string f : callable This takes a tuple of (toknum, tokval) as its argument and returns a tuple with the same structure but possibly different elements. Defaults to the composition of `f` is a string.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\computation\\expr.py",
    "ast_data": "FunctionDef name:_preparse arg:source arg:f arguments arg arg Call Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__call__",
    "source_code": "def __call__(self, token, device, args):\n    func = self.get(token, None)\n    if func is None:\n        raise ValueError(f'Could not find callback with key={token} in the registry.')\n    if isinstance(func, EagerFunc):\n        return func(device, token, args)\n    else:\n        ret = func(*args)\n        if isinstance(ret, bytes):\n            ret = [ret]\n        if isinstance(ret, (tuple, list)):\n            return [self._convert(x) for x in ret]\n        else:\n            return self._convert(ret)",
    "docstring": "Calls the registered function for with args. Args: token: A key into this identifying which function to call. device: Name of the device on which outputs of 's corresponding operation should be placed. Used iff the function registered for is an EagerPyFunc. args: The arguments to pass to the function registered for . Returns: The output of the function registered for . Raises: ValueError: if no function is registered for .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\script_ops.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:token arg:device arg:args arguments arg arg arg arg Assign Call If Compare Raise Call If Call Return return:yes Call Assign Call If Call Assign If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "__init__",
    "source_code": "def __init__(self, width, significand, min=None, repeat=None):\n    self.width = width\n    self.significand = significand\n    self.repeat = repeat\n    self.min = min",
    "docstring": "Parameters ---------- width : int number of characters taken by the string (includes space).",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\_harwell_boeing\\_fortran_format_parser.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:width arg:significand arg:min arg:repeat arguments arg arg arg arg arg Assign Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "get_static_quant_module_class",
    "source_code": "def get_static_quant_module_class(float_module_class: Callable, additional_static_quant_mapping: Optional[dict[Callable, Any]]=None, is_reference: bool=False) -> Any:\n    if additional_static_quant_mapping is None:\n        additional_static_quant_mapping = {}\n    all_mappings = get_combined_dict(DEFAULT_REFERENCE_STATIC_QUANT_MODULE_MAPPINGS if is_reference else DEFAULT_STATIC_QUANT_MODULE_MAPPINGS, additional_static_quant_mapping)\n    static_quant_module_class = all_mappings.get(float_module_class, None)\n    assert static_quant_module_class is not None, f'Floating point module class {str(float_module_class)}' + ' does not have a corresponding quantized module class'\n    return copy.deepcopy(static_quant_module_class)",
    "docstring": "n Get the statically quantized module class corresponding to the floating point module class",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantization_mappings.py",
    "ast_data": "FunctionDef name:get_static_quant_module_class arg:float_module_class arg:additional_static_quant_mapping arg:is_reference arguments arg arg arg If Compare Assign Assign Call Assign Call Compare Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "IterInv",
    "source_code": "class IterInv(LinearOperator):\n\n    def __init__(self, M, ifunc=gmres_loose, tol=0):\n        self.M = M\n        if hasattr(M, 'dtype'):\n            self.dtype = M.dtype\n        else:\n            x = np.zeros(M.shape[1])\n            self.dtype = (M * x).dtype\n        self.shape = M.shape\n        if tol <= 0:\n            tol = 2 * np.finfo(self.dtype).eps\n        self.ifunc = ifunc\n        self.tol = tol\n\n    def _matvec(self, x):\n        b, info = self.ifunc(self.M, x, tol=self.tol)\n        if info != 0:\n            raise ValueError(f'Error in inverting M: function {self.ifunc.__name__} did not converge (info = {info}).')\n        return b",
    "docstring": "IterInv: helper class to repeatedly solve M*x=b using an iterative method.",
    "type": "class",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_eigen\\arpack\\arpack.py",
    "ast_data": "ClassDef name:IterInv FunctionDef name:__init__ arg:self arg:M arg:ifunc arg:tol arguments arg arg arg arg Assign If Call Assign Assign Call Assign Assign If Compare Assign Call Assign Assign FunctionDef name:_matvec arg:self arg:x arguments arg arg Assign Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "refline",
    "source_code": "def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n    line_kws['color'] = color\n    line_kws['linestyle'] = linestyle\n    if x is not None:\n        self.map(plt.axvline, x=x, **line_kws)\n    if y is not None:\n        self.map(plt.axhline, y=y, **line_kws)\n    return self",
    "docstring": "Add a reference line(s) to each facet. Parameters ---------- x, y : numeric Value(s) to draw the line(s) at. color : :mod: Specifies the color of the reference line(s). Pass `matplotlib.axes.Axes.axvlinematplotlib.axes.Axes.axhlineFacetGrid` for easy method chaining.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\axisgrid.py",
    "ast_data": "FunctionDef name:refline arg:self arguments arg arg arg arg arg arg Assign Assign If Compare Call If Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "Registry",
    "source_code": "class Registry(object):\n    __slots__ = ['_name', '_registry']\n\n    def __init__(self, name):\n        self._name = name\n        self._registry = {}\n\n    def register(self, candidate, name=None):\n        if not name:\n            name = candidate.__name__\n        if name in self._registry:\n            frame = self._registry[name][_LOCATION_TAG]\n            raise KeyError(\"Registering two %s with name '%s'! (Previous registration was in %s %s:%d)\" % (self._name, name, frame.name, frame.filename, frame.lineno))\n        logging.vlog(1, 'Registering %s (%s) in %s.', name, candidate, self._name)\n        stack = traceback.extract_stack(limit=3)\n        stack_index = min(2, len(stack) - 1)\n        if stack_index >= 0:\n            location_tag = stack[stack_index]\n        else:\n            location_tag = ('UNKNOWN', 'UNKNOWN', 'UNKNOWN', 'UNKNOWN', 'UNKNOWN')\n        self._registry[name] = {_TYPE_TAG: candidate, _LOCATION_TAG: location_tag}\n\n    def list(self):\n        return self._registry.keys()\n\n    def lookup(self, name):\n        name = compat.as_str(name)\n        if name in self._registry:\n            return self._registry[name][_TYPE_TAG]\n        else:\n            raise LookupError('%s registry has no entry for: %s' % (self._name, name))",
    "docstring": "Provides a registry for saving objects.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\registry.py",
    "ast_data": "ClassDef name:Registry Assign FunctionDef name:__init__ arg:self arg:name arguments arg arg Assign Assign FunctionDef name:register arg:self arg:candidate arg:name arguments arg arg arg If Assign If Compare Assign Raise Call Call Assign Call Assign Call Call If Compare Assign Assign Assign FunctionDef name:list arg:self arguments arg Return return:yes Call FunctionDef name:lookup arg:self arg:name arguments arg arg Assign Call If Compare Return return:yes Raise Call"
  },
  {
    "library": "django",
    "name": "User",
    "source_code": "class User(AbstractUser):\n\n    class Meta(AbstractUser.Meta):\n        swappable = 'AUTH_USER_MODEL'",
    "docstring": "Users within the Django authentication system are represented by this model. Username and password are required. Other fields are optional.",
    "type": "class",
    "file_path": "django\\django\\contrib\\auth\\models.py",
    "ast_data": "ClassDef name:User ClassDef name:Meta Assign"
  },
  {
    "library": "pytorch",
    "name": "_deregister_pytree_node",
    "source_code": "def _deregister_pytree_node(cls: type[Any]) -> None:\n    with _NODE_REGISTRY_LOCK:\n        del SUPPORTED_NODES[cls]\n        node_def = SUPPORTED_SERIALIZED_TYPES[cls]\n        del SERIALIZED_TYPE_TO_PYTHON_TYPE[node_def.serialized_type_name]\n        del SUPPORTED_SERIALIZED_TYPES[cls]\n        CONSTANT_NODES.discard(cls)",
    "docstring": "This is an internal function that is used to deregister a pytree node type for the Python pytree only. This should be only used inside PyTorch.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_pytree.py",
    "ast_data": "FunctionDef name:_deregister_pytree_node arg:cls arguments arg With Assign Call"
  },
  {
    "library": "django",
    "name": "GeometryCollection",
    "source_code": "class GeometryCollection(OGRGeometry):\n\n    def __getitem__(self, index):\n        if 0 <= index < self.geom_count:\n            return OGRGeometry(capi.clone_geom(capi.get_geom_ref(self.ptr, index)), self.srs)\n        else:\n            raise IndexError('Index out of range when accessing geometry in a collection: %s.' % index)\n\n    def __len__(self):\n        return self.geom_count\n\n    def add(self, geom):\n        if isinstance(geom, OGRGeometry):\n            if isinstance(geom, self.__class__):\n                for g in geom:\n                    capi.add_geom(self.ptr, g.ptr)\n            else:\n                capi.add_geom(self.ptr, geom.ptr)\n        elif isinstance(geom, str):\n            tmp = OGRGeometry(geom)\n            capi.add_geom(self.ptr, tmp.ptr)\n        else:\n            raise GDALException('Must add an OGRGeometry.')\n\n    @property\n    def point_count(self):\n        return sum((self[i].point_count for i in range(self.geom_count)))\n\n    @property\n    def tuple(self):\n        return tuple((self[i].tuple for i in range(self.geom_count)))\n    coords = tuple",
    "docstring": "The Geometry Collection class.",
    "type": "class",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "ClassDef name:GeometryCollection FunctionDef name:__getitem__ arg:self arg:index arguments arg arg If Compare Return return:yes Call Call Call Raise Call FunctionDef name:__len__ arg:self arguments arg Return return:yes FunctionDef name:add arg:self arg:geom arguments arg arg If Call If Call For Call Call If Call Assign Call Call Raise Call FunctionDef name:point_count arg:self arguments arg Return return:yes Call Call FunctionDef name:tuple arg:self arguments arg Return return:yes Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_load",
    "source_code": "@abc.abstractmethod\ndef _load(self) -> Callable[..., ops.Operation]:\n    raise NotImplementedError",
    "docstring": "Returns the load function for the optimizer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2_utils.py",
    "ast_data": "FunctionDef name:_load arg:self arguments arg Raise"
  },
  {
    "library": "matplotlib",
    "name": "set_box_aspect",
    "source_code": "def set_box_aspect(self, aspect=None):\n    axs = {*self._twinned_axes.get_siblings(self), *self._twinned_axes.get_siblings(self)}\n    if aspect is not None:\n        aspect = float(aspect)\n        for ax in axs:\n            ax.set_adjustable('datalim')\n    for ax in axs:\n        ax._box_aspect = aspect\n        ax.stale = True",
    "docstring": "Set the Axes box aspect, i.e. the ratio of height to width. This defines the aspect of the Axes in figure space and is not to be confused with the data aspect (see ). Parameters ---------- aspect : float or None Changes the physical dimensions of the Axes, such that the ratio of the Axes height to the Axes width in physical units is equal to *aspect*. Defining a box aspect will change the *adjustable* property to 'datalim' (see ). *None* will disable a fixed box aspect so that height and width of the Axes are chosen independently. See Also -------- matplotlib.axes.Axes.set_aspect for a description of aspect handling.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:set_box_aspect arg:self arg:aspect arguments arg arg Assign Call Call If Compare Assign Call For Call For Assign Assign"
  },
  {
    "library": "scikit-learn",
    "name": "_center_scale_xy",
    "source_code": "def _center_scale_xy(X, y, scale=True):\n    x_mean = X.mean(axis=0)\n    X -= x_mean\n    y_mean = y.mean(axis=0)\n    y -= y_mean\n    if scale:\n        x_std = X.std(axis=0, ddof=1)\n        x_std[x_std == 0.0] = 1.0\n        X /= x_std\n        y_std = y.std(axis=0, ddof=1)\n        y_std[y_std == 0.0] = 1.0\n        y /= y_std\n    else:\n        x_std = np.ones(X.shape[1])\n        y_std = np.ones(y.shape[1])\n    return (X, y, x_mean, y_mean, x_std, y_std)",
    "docstring": "Center X, y and scale if the scale parameter==True Returns ------- X, y, x_mean, y_mean, x_std, y_std",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\cross_decomposition\\_pls.py",
    "ast_data": "FunctionDef name:_center_scale_xy arg:X arg:y arg:scale arguments arg arg arg Assign Call Assign Call If Assign Call Assign Compare Assign Call Assign Compare Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_LoopCondGrad",
    "source_code": "@ops.RegisterGradient('LoopCond')\ndef _LoopCondGrad(_):\n    return None",
    "docstring": "Stop backprop for the predicate of a while loop.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_grad.py",
    "ast_data": "FunctionDef name:_LoopCondGrad arg:_ arguments arg Return return:no Call"
  },
  {
    "library": "scipy",
    "name": "_bootstrap_resample",
    "source_code": "def _bootstrap_resample(sample, n_resamples=None, rng=None):\n    n = sample.shape[-1]\n    i = rng_integers(rng, 0, n, (n_resamples, n))\n    resamples = sample[..., i]\n    return resamples",
    "docstring": "Bootstrap resample the sample.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_resampling.py",
    "ast_data": "FunctionDef name:_bootstrap_resample arg:sample arg:n_resamples arg:rng arguments arg arg arg Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, outputs):\n    self._outputs = self._wrap_and_check_outputs(outputs, self._SINGLE_OUTPUT_DEFAULT_NAME, error_label='Prediction')",
    "docstring": "Constructor for PredictOutput. Args: outputs: A or a dict of string to representing the predictions. Raises: ValueError: if the outputs is not dict, or any of its keys are not strings, or any of its values are not s.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\model_utils\\export_output.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:outputs arguments arg arg Assign Call"
  },
  {
    "library": "kornia",
    "name": "BoxBlur",
    "source_code": "class BoxBlur(Module):\n\n    def __init__(self, kernel_size: tuple[int, int] | int, border_type: str='reflect', separable: bool=False) -> None:\n        super().__init__()\n        self.kernel_size = kernel_size\n        self.border_type = border_type\n        self.separable = separable\n        if separable:\n            ky, kx = _unpack_2d_ks(self.kernel_size)\n            self.register_buffer('kernel_y', get_box_kernel1d(ky))\n            self.register_buffer('kernel_x', get_box_kernel1d(kx))\n            self.kernel_y: Tensor\n            self.kernel_x: Tensor\n        else:\n            self.register_buffer('kernel', get_box_kernel2d(kernel_size))\n            self.kernel: Tensor\n\n    def __repr__(self) -> str:\n        return f'{self.__class__.__name__}(kernel_size={self.kernel_size}, border_type={self.border_type}, separable={self.separable})'\n\n    def forward(self, input: Tensor) -> Tensor:\n        KORNIA_CHECK_IS_TENSOR(input)\n        if self.separable:\n            return filter2d_separable(input, self.kernel_x, self.kernel_y, self.border_type)\n        return filter2d(input, self.kernel, self.border_type)",
    "docstring": "Blur an image using the box filter. The function smooths an image using the kernel: .. math:: K = \\frac{1}{\\text{kernel_size}_x * \\text{kernel_size}_y} \\begin{bmatrix} 1 & 1 & 1 & \\cdots & 1 & 1 \\\\ 1 & 1 & 1 & \\cdots & 1 & 1 \\\\ \\vdots & \\vdots & \\vdots & \\ddots & \\vdots & \\vdots \\\\ 1 & 1 & 1 & \\cdots & 1 & 1 \\\\ \\end{bmatrix} Args: kernel_size: the blurring kernel size. border_type: the padding mode to be applied before convolving. The expected modes are: `(B, C, H, W)(B, C, H, W)` Example: >>> input = torch.rand(2, 4, 5, 7) >>> blur = BoxBlur((3, 3)) >>> output = blur(input) # 2x4x5x7 >>> output.shape torch.Size([2, 4, 5, 7])",
    "type": "class",
    "file_path": "kornia\\kornia\\filters\\blur.py",
    "ast_data": "ClassDef name:BoxBlur FunctionDef name:__init__ arg:self arg:kernel_size arg:border_type arg:separable arguments arg arg arg arg Call Call Assign Assign Assign If Assign Call Call Call Call Call Call Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:forward arg:self arg:input arguments arg arg Call If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "create_time",
    "source_code": "@property\ndef create_time(self) -> int:\n    return self._create_time",
    "docstring": "Timestamp when this tensor was created (long integer).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py",
    "ast_data": "FunctionDef name:create_time arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_in_multi_worker_mode",
    "source_code": "def _in_multi_worker_mode(self):\n    return False",
    "docstring": "Whether this strategy indicates working in multi-worker settings.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_strategy.py",
    "ast_data": "FunctionDef name:_in_multi_worker_mode arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "maybe_get_next_equalization_scale",
    "source_code": "def maybe_get_next_equalization_scale(node: Node, modules: dict[str, nn.Module]) -> Optional[torch.Tensor]:\n    next_inp_eq_obs = maybe_get_next_input_eq_obs(node, modules)\n    if next_inp_eq_obs:\n        if next_inp_eq_obs.equalization_scale.nelement() == 1 and next_inp_eq_obs.equalization_scale == torch.tensor(1):\n            return None\n        return next_inp_eq_obs.equalization_scale\n    return None",
    "docstring": "If the next next node is an InputEqualizationObserver then we want to return its equalization scale, else we return 1 This is used in the case where there are two connecting linear layers: linear1 -> LinearOutObs -> InputEqObs -> linear2 In this case, the node given is linear1 and we want to locate the InputEqObs.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_equalize.py",
    "ast_data": "FunctionDef name:maybe_get_next_equalization_scale arg:node arg:modules arguments arg arg Assign Call If If BoolOp Compare Call Compare Call Return return:no Return return:yes Return return:no"
  },
  {
    "library": "scikit-learn",
    "name": "score",
    "source_code": "def score(self, X, y=None):\n    return self.score_samples(X).mean()",
    "docstring": "Compute the per-sample average log-likelihood of the given data X. Parameters ---------- X : array-like of shape (n_samples, n_dimensions) List of n_features-dimensional data points. Each row corresponds to a single data point. y : Ignored Not used, present for API consistency by convention. Returns ------- log_likelihood : float Log-likelihood of under the Gaussian mixture model.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\mixture\\_base.py",
    "ast_data": "FunctionDef name:score arg:self arg:X arg:y arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "PseudoXMLBuilder",
    "source_code": "class PseudoXMLBuilder(XMLBuilder):\n    name = 'pseudoxml'\n    format = 'pseudoxml'\n    epilog = __('The pseudo-XML files are in %(outdir)s.')\n    out_suffix = '.pseudoxml'\n    _writer_class = PseudoXMLWriter",
    "docstring": "Builds pseudo-XML for display purposes.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\builders\\xml.py",
    "ast_data": "ClassDef name:PseudoXMLBuilder Assign Assign Assign Call Assign Assign"
  },
  {
    "library": "pandas",
    "name": "ensure_decoded",
    "source_code": "def ensure_decoded(s) -> str:\n    if isinstance(s, (np.bytes_, bytes)):\n        s = s.decode(get_option('display.encoding'))\n    return s",
    "docstring": "If we have bytes, decode them to unicode.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\computation\\common.py",
    "ast_data": "FunctionDef name:ensure_decoded arg:s arguments arg If Call Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "Price02",
    "source_code": "class Price02(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n        self.global_optimum = [[0.0, 0.0]]\n        self.fglob = 0.9\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return 1.0 + sum(sin(x) ** 2) - 0.1 * exp(-x[0] ** 2.0 - x[1] ** 2.0)",
    "docstring": "Price 2 objective function. This class defines the Price 2 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Price02}}(x) = 1 + \\sin^2(x_1) + \\sin^2(x_2) - 0.1e^{(-x_1^2 - x_2^2)} with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Price, W. A controlled random search procedure for global optimisation Computer Journal, 1977, 20, 367-370",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_P.py",
    "ast_data": "ClassDef name:Price02 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "numpy",
    "name": "ipython",
    "source_code": "@click.command(context_settings={'ignore_unknown_options': True})\n@click.argument('ipython_args', metavar='', nargs=-1)\n@meson.build_dir_option\ndef ipython(*, ipython_args, build_dir):\n    env = os.environ\n    env['PYTHONWARNINGS'] = env.get('PYTHONWARNINGS', 'all')\n    ctx = click.get_current_context()\n    ctx.invoke(build)\n    ppath = meson._set_pythonpath(build_dir)\n    print(f'💻 Launching IPython with PYTHONPATH=\"{ppath}\"')\n    preimport = \"import numpy as np; print(f'\\\\nPreimported NumPy {np.__version__} as np')\"\n    spin.util.run(['ipython', '--ignore-cwd', f'--TerminalIPythonApp.exec_lines={preimport}'] + list(ipython_args))",
    "docstring": "💻 Launch IPython shell with PYTHONPATH set OPTIONS are passed through directly to IPython, e.g.: spin ipython -i myscript.py",
    "type": "function",
    "file_path": "numpy\\.spin\\cmds.py",
    "ast_data": "FunctionDef name:ipython arguments arg arg Assign Assign Call Assign Call Call Assign Call Call Assign Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_subscribe_extend",
    "source_code": "def _subscribe_extend(tensor, side_effects):\n    assert len(tensor.op.inputs) == 1, 'Op {} must only have one input'.format(tensor.op.name)\n    source_tensor = tensor.op.inputs[0]\n    outs = []\n    name_scope = source_tensor.op.name + '/subscription/'\n    with ops.name_scope(name_scope):\n        for s in side_effects:\n            outs += s(source_tensor)\n    out_ops = [out.op if isinstance(out, tensor_lib.Tensor) else out for out in outs]\n    tensor.op._add_control_inputs(out_ops)\n    return tensor",
    "docstring": "Helper method to extend the list of side_effects for a subscribed tensor. Args: tensor: A as returned by subscribe(). side_effects: List of side_effect functions, see subscribe for details. Returns: The given subscribed tensor (for API consistency).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\subscribe.py",
    "ast_data": "FunctionDef name:_subscribe_extend arg:tensor arg:side_effects arguments arg arg Compare Call Call Assign Assign Assign With Call For Call Assign Call Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, newhandler, point='before_handler', name=None, priority=50):\n    self.newhandler = newhandler\n    self._point = point\n    self._name = name\n    self._priority = priority",
    "docstring": "Initialize a handler wrapper tool.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cptools.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:newhandler arg:point arg:name arg:priority arguments arg arg arg arg arg Assign Assign Assign Assign"
  },
  {
    "library": "scikit-learn",
    "name": "_fit",
    "source_code": "def _fit(self, v_pos, rng):\n    h_pos = self._mean_hiddens(v_pos)\n    v_neg = self._sample_visibles(self.h_samples_, rng)\n    h_neg = self._mean_hiddens(v_neg)\n    lr = float(self.learning_rate) / v_pos.shape[0]\n    update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T\n    update -= np.dot(h_neg.T, v_neg)\n    self.components_ += lr * update\n    self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0))\n    self.intercept_visible_ += lr * (np.asarray(v_pos.sum(axis=0)).squeeze() - v_neg.sum(axis=0))\n    h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0\n    self.h_samples_ = np.floor(h_neg, h_neg)",
    "docstring": "Inner fit for one mini-batch. Adjust the parameters to maximize the likelihood of v using Stochastic Maximum Likelihood (SML). Parameters ---------- v_pos : ndarray of shape (n_samples, n_features) The data to use for training. rng : RandomState instance Random number generator to use for sampling.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neural_network\\_rbm.py",
    "ast_data": "FunctionDef name:_fit arg:self arg:v_pos arg:rng arguments arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Call Call Call Call Call Call Call Assign Compare Call Assign Call"
  },
  {
    "library": "scikit-learn",
    "name": "_transform_X_ordinal",
    "source_code": "def _transform_X_ordinal(self, X_out, X_ordinal, X_unknown_mask, row_indices, encodings, target_mean):\n    if self.target_type_ == 'multiclass':\n        n_classes = len(self.classes_)\n        for e_idx, encoding in enumerate(encodings):\n            feat_idx = e_idx // n_classes\n            mean_idx = e_idx % n_classes\n            X_out[row_indices, e_idx] = encoding[X_ordinal[row_indices, feat_idx]]\n            X_out[X_unknown_mask[:, feat_idx], e_idx] = target_mean[mean_idx]\n    else:\n        for e_idx, encoding in enumerate(encodings):\n            X_out[row_indices, e_idx] = encoding[X_ordinal[row_indices, e_idx]]\n            X_out[X_unknown_mask[:, e_idx], e_idx] = target_mean",
    "docstring": "Transform X_ordinal using encodings. In the multiclass case, and have column (axis=1) size , while has length of size . deals with this by repeating feature indices by E.g., for 3 features, 2 classes: 0,0,1,1,2,2 Additionally, is of shape (,) so cycles through 0 to - 1, times.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_target_encoder.py",
    "ast_data": "FunctionDef name:_transform_X_ordinal arg:self arg:X_out arg:X_ordinal arg:X_unknown_mask arg:row_indices arg:encodings arg:target_mean arguments arg arg arg arg arg arg arg If Compare Assign Call For Call Assign Assign Assign Assign For Call Assign Assign"
  },
  {
    "library": "pandas",
    "name": "MultiIndexUInt32Engine",
    "source_code": "class MultiIndexUInt32Engine(libindex.BaseMultiIndexCodesEngine, libindex.UInt32Engine):\n    _base = libindex.UInt32Engine\n    _codes_dtype = 'uint32'",
    "docstring": "Manages a MultiIndex by mapping label combinations to positive integers. The number of possible label combinations must not overflow the 32 bits integers.",
    "type": "class",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "ClassDef name:MultiIndexUInt32Engine Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_BaseDistribution",
    "source_code": "class _BaseDistribution(metaclass=abc.ABCMeta):\n    pass",
    "docstring": "Abstract base class needed for resolving subclass hierarchy.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py",
    "ast_data": "ClassDef name:_BaseDistribution"
  },
  {
    "library": "matplotlib",
    "name": "append_axes",
    "source_code": "def append_axes(self, position, size, pad=None, *, axes_class=None, **kwargs):\n    create_axes, pack_start = _api.check_getitem({'left': (self.new_horizontal, True), 'right': (self.new_horizontal, False), 'bottom': (self.new_vertical, True), 'top': (self.new_vertical, False)}, position=position)\n    ax = create_axes(size, pad, pack_start=pack_start, axes_class=axes_class, **kwargs)\n    self._fig.add_axes(ax)\n    return ax",
    "docstring": "Add a new axes on a given side of the main axes. Parameters ---------- position : {\"left\", \"right\", \"bottom\", \"top\"} Where the new axes is positioned relative to the main axes. size : :mod: or float or str The axes width or height. float or str arguments are interpreted as `~mpl_toolkits.axes_grid1.axes_sizefigure.subplot.wspacefigure.subplot.hspace~.axes.Axes`, optional The type of the new axes. Defaults to the type of the main axes. **kwargs All extra keywords arguments are passed to the created axes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_divider.py",
    "ast_data": "FunctionDef name:append_axes arg:self arg:position arg:size arg:pad arguments arg arg arg arg arg arg Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "try_dispatch",
    "source_code": "def try_dispatch(self, sources, src_dir=None, ccompiler=None, **kwargs):\n    to_compile = {}\n    baseline_flags = self.cpu_baseline_flags()\n    include_dirs = kwargs.setdefault('include_dirs', [])\n    for src in sources:\n        output_dir = os.path.dirname(src)\n        if src_dir:\n            if not output_dir.startswith(src_dir):\n                output_dir = os.path.join(src_dir, output_dir)\n            if output_dir not in include_dirs:\n                include_dirs.append(output_dir)\n        has_baseline, targets, extra_flags = self.parse_targets(src)\n        nochange = self._generate_config(output_dir, src, targets, has_baseline)\n        for tar in targets:\n            tar_src = self._wrap_target(output_dir, src, tar, nochange=nochange)\n            flags = tuple(extra_flags + self.feature_flags(tar))\n            to_compile.setdefault(flags, []).append(tar_src)\n        if has_baseline:\n            flags = tuple(extra_flags + baseline_flags)\n            to_compile.setdefault(flags, []).append(src)\n        self.sources_status[src] = (has_baseline, targets)\n    objects = []\n    for flags, srcs in to_compile.items():\n        objects += self.dist_compile(srcs, list(flags), ccompiler=ccompiler, **kwargs)\n    return objects",
    "docstring": "Compile one or more dispatch-able sources and generates object files, also generates abstract C config headers and macros that used later for the final runtime dispatching process. The mechanism behind it is to takes each source file that specified in 'sources' and branching it into several files depend on special configuration statements that must be declared in the top of each source which contains targeted CPU features, then it compiles every branched source with the proper compiler flags. Parameters ---------- sources : list Must be a list of dispatch-able sources file paths, and configuration statements must be declared inside each file. src_dir : str Path of parent directory for the generated headers and wrapped sources. If None(default) the files will generated in-place. ccompiler : CCompiler Distutils instance to be used for compilation. If None (default), the provided instance during the initialization will be used instead. **kwargs : any Arguments to pass on to the Returns ------- list : generated object files Raises ------ CompileError Raises by on compiling failure. DistutilsError Some errors during checking the sanity of configuration statements. See Also -------- parse_targets : Parsing the configuration statements of dispatch-able sources.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py",
    "ast_data": "FunctionDef name:try_dispatch arg:self arg:sources arg:src_dir arg:ccompiler arguments arg arg arg arg arg Assign Assign Call Assign Call For Assign Call If If Call Assign Call If Compare Call Assign Call Assign Call For Assign Call Assign Call Call Call Call If Assign Call Call Call Assign Assign For Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_default_intervals",
    "source_code": "def set_default_intervals(self):\n    pass",
    "docstring": "Set the default limits for the axis data and view interval if they have not been not mutated yet.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:set_default_intervals arg:self arguments arg"
  },
  {
    "library": "matplotlib",
    "name": "cool",
    "source_code": "def cool() -> None:\n    set_cmap('cool')",
    "docstring": "Set the colormap to 'cool'. This changes the default colormap as well as the colormap of the current image if there is one. See `` for more information.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:cool arguments Call"
  },
  {
    "library": "pytorch",
    "name": "manual_seed",
    "source_code": "def manual_seed(seed: int) -> None:\n    seed = int(seed)\n\n    def cb():\n        idx = current_device()\n        default_generator = torch.cuda.default_generators[idx]\n        default_generator.manual_seed(seed)\n    _lazy_call(cb, seed=True)",
    "docstring": "Set the seed for generating random numbers for the current GPU. It's safe to call this function if CUDA is not available; in that case, it is silently ignored. Args: seed (int): The desired seed. .. warning:: If you are working with a multi-GPU model, this function is insufficient to get determinism. To seed all GPUs, use :func:.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\random.py",
    "ast_data": "FunctionDef name:manual_seed arg:seed arguments arg Assign Call FunctionDef name:cb arguments Assign Call Assign Call Call"
  },
  {
    "library": "scipy",
    "name": "__rmul__",
    "source_code": "def __rmul__(self, other):\n    if not self._check_binop_other(other) or isinstance(other, StateSpace):\n        return NotImplemented\n    a = self.A\n    b = self.B\n    c = np.dot(other, self.C)\n    d = np.dot(other, self.D)\n    common_dtype = np.result_type(a.dtype, b.dtype, c.dtype, d.dtype)\n    return StateSpace(np.asarray(a, dtype=common_dtype), np.asarray(b, dtype=common_dtype), np.asarray(c, dtype=common_dtype), np.asarray(d, dtype=common_dtype), **self._dt_dict)",
    "docstring": "Pre-multiply a scalar or matrix (but not StateSpace)",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:__rmul__ arg:self arg:other arguments arg arg If BoolOp Call Call Return return:yes Assign Assign Assign Call Assign Call Assign Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "apply",
    "source_code": "def apply(self, model_outputs: Any, model: torch.nn.Module | Callable | torch_export.ExportedProgram | None=None) -> Any:\n    return [torch.view_as_real(output.resolve_conj()) if isinstance(output, torch.Tensor) and torch.is_complex(output) else output for output in model_outputs]",
    "docstring": "Convert float tensors to complex tensors. Args: model_output: The model output. model: The PyTorch model. Returns: A tuple of the model output.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\io_adapter.py",
    "ast_data": "FunctionDef name:apply arg:self arg:model_outputs arg:model arguments arg arg arg Return return:yes BoolOp Call Call Call Call"
  },
  {
    "library": "django",
    "name": "validate",
    "source_code": "def validate(self, value):\n    if self.required and (not value):\n        raise ValidationError(self.error_messages['required'], code='required')\n    for val in value:\n        if not self.valid_value(val):\n            raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice', params={'value': val})",
    "docstring": "Validate that the input is a list or tuple.",
    "type": "method",
    "file_path": "django\\django\\forms\\fields.py",
    "ast_data": "FunctionDef name:validate arg:self arg:value arguments arg arg If BoolOp Raise Call For If Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_VirtualizedSerializer",
    "source_code": "@dataclass\nclass _VirtualizedSerializer:\n    aot_compilation: Any = None\n    choices: Any = None\n    local_buffer_context: Any = None\n    ops: Any = None\n    kernel: Any = None\n    current_node: Any = None\n\n    @classmethod\n    def serialize(cls) -> _VirtualizedSerializer:\n        kwargs = {}\n        for f in dataclasses.fields(cls):\n            kwargs[f.name] = getattr(V, f.name)\n        return _VirtualizedSerializer(**kwargs)\n\n    def patch(self) -> _VirtualizedSerializerContextManager:\n        return _VirtualizedSerializerContextManager(self)",
    "docstring": "This handles the data for serializing Virtualized.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\compile_fx_ext.py",
    "ast_data": "ClassDef name:_VirtualizedSerializer FunctionDef name:serialize arg:cls arguments arg Assign For Call Assign Call Return return:yes Call FunctionDef name:patch arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_check_overlap_initialized",
    "source_code": "def _check_overlap_initialized(self):\n    if self._overlap_with_ddp and self._overlap_info.status != _OverlapStatus.INITIALIZED:\n        raise RuntimeError('This method should not be called until this ZeroRedundancyOptimizer instance has been fully initialized')",
    "docstring": "Check the delayed initialization depending on the value of `_init_zero_for_overlap_init_zero_for_overlap` has not been called.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\optim\\zero_redundancy_optimizer.py",
    "ast_data": "FunctionDef name:_check_overlap_initialized arg:self arguments arg If BoolOp Compare Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "build_tokenizer",
    "source_code": "def build_tokenizer(self):\n    if self.tokenizer is not None:\n        return self.tokenizer\n    token_pattern = re.compile(self.token_pattern)\n    if token_pattern.groups > 1:\n        raise ValueError('More than 1 capturing group in token pattern. Only a single group should be captured.')\n    return token_pattern.findall",
    "docstring": "Return a function that splits a string into a sequence of tokens. Returns ------- tokenizer: callable A function to split a string into a sequence of tokens.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_extraction\\text.py",
    "ast_data": "FunctionDef name:build_tokenizer arg:self arguments arg If Compare Return return:yes Assign Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "LinearBn1d",
    "source_code": "class LinearBn1d(_FusedModule):\n\n    def __init__(self, linear, bn):\n        assert type_before_parametrizations(linear) == Linear and type_before_parametrizations(bn) == BatchNorm1d, f'Incorrect types for input modules{type_before_parametrizations(linear)}{type_before_parametrizations(bn)}'\n        super().__init__(linear, bn)",
    "docstring": "This is a sequential container which calls the Linear and BatchNorm1d modules. During quantization this will be replaced with the corresponding fused module.",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\nn\\intrinsic\\modules\\fused.py",
    "ast_data": "ClassDef name:LinearBn1d FunctionDef name:__init__ arg:self arg:linear arg:bn arguments arg arg arg BoolOp Compare Call Compare Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_initialize_trackable",
    "source_code": "@no_automatic_dependency_tracking\ndef _maybe_initialize_trackable(self):\n    if hasattr(self, '_self_unconditional_checkpoint_dependencies'):\n        return\n    self._self_unconditional_checkpoint_dependencies = []\n    self._self_unconditional_dependency_names = {}\n    self._self_unconditional_deferred_dependencies = {}\n    if hasattr(self, '_self_update_uid'):\n        raise AssertionError('Internal error: the object had an update UID set before its initialization code was run.')\n    self._self_update_uid = -1\n    self._self_name_based_restores = set()\n    self._self_saveable_object_factories = {}",
    "docstring": "Initialize dependency management. Not __init__, since most objects will forget to call it.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\base.py",
    "ast_data": "FunctionDef name:_maybe_initialize_trackable arg:self arguments arg If Call Return return:no Assign Assign Assign If Call Raise Call Assign Assign Call Assign"
  },
  {
    "library": "scipy",
    "name": "polynomial_vector",
    "source_code": "def polynomial_vector(x, powers, out):\n    for i in range(powers.shape[0]):\n        out[i] = np.prod(x ** powers[i])",
    "docstring": "Evaluate monomials, with exponents from , at the point .",
    "type": "function",
    "file_path": "scipy\\scipy\\interpolate\\_rbfinterp_pythran.py",
    "ast_data": "FunctionDef name:polynomial_vector arg:x arg:powers arg:out arguments arg arg arg For Call Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "_array_patch_perimeters",
    "source_code": "def _array_patch_perimeters(x, rstride, cstride):\n    assert rstride > 0 and cstride > 0\n    assert (x.shape[0] - 1) % rstride == 0\n    assert (x.shape[1] - 1) % cstride == 0\n    top = _unfold(x[:-1:rstride, :-1], 1, cstride, cstride)\n    bottom = _unfold(x[rstride::rstride, 1:], 1, cstride, cstride)[..., ::-1]\n    right = _unfold(x[:-1, cstride::cstride], 0, rstride, rstride)\n    left = _unfold(x[1:, :-1:cstride], 0, rstride, rstride)[..., ::-1]\n    return np.concatenate((top, right, bottom, left), axis=2).reshape(-1, 2 * (rstride + cstride))",
    "docstring": "Extract perimeters of patches from *arr*. Extracted patches are of size (*rstride* + 1) x (*cstride* + 1) and share perimeters with their neighbors. The ordering of the vertices matches that returned by ``. Parameters ---------- x : ndarray, shape (N, M) Input array rstride : int Vertical (row) stride between corresponding elements of each patch cstride : int Horizontal (column) stride between corresponding elements of each patch Returns ------- ndarray, shape (N/rstride * M/cstride, 2 * (rstride + cstride))",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\cbook.py",
    "ast_data": "FunctionDef name:_array_patch_perimeters arg:x arg:rstride arg:cstride arguments arg arg arg BoolOp Compare Compare Compare Compare Assign Call Assign Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "sosfiltfilt",
    "source_code": "def sosfiltfilt(sos, x, axis=-1, padtype='odd', padlen=None):\n    xp = array_namespace(sos, x)\n    sos, n_sections = _validate_sos(sos)\n    x = _validate_x(x)\n    ntaps = 2 * n_sections + 1\n    ntaps -= min((sos[:, 2] == 0).sum(), (sos[:, 5] == 0).sum())\n    edge, ext = _validate_pad(padtype, padlen, x, axis, ntaps=ntaps)\n    zi = sosfilt_zi(sos)\n    zi_shape = [1] * x.ndim\n    zi_shape[axis] = 2\n    zi.shape = [n_sections] + zi_shape\n    x_0 = axis_slice(ext, stop=1, axis=axis)\n    y, zf = sosfilt(sos, ext, axis=axis, zi=zi * x_0)\n    y_0 = axis_slice(y, start=-1, axis=axis)\n    y, zf = sosfilt(sos, axis_reverse(y, axis=axis), axis=axis, zi=zi * y_0)\n    y = axis_reverse(y, axis=axis)\n    if edge > 0:\n        y = axis_slice(y, start=edge, stop=-edge, axis=axis)\n    return xp.asarray(y)",
    "docstring": "A forward-backward digital filter using cascaded second-order sections. See for more complete information about this method. Parameters ---------- sos : array_like Array of second-order filter coefficients, must have shape `xpadtypexaxispadlenfiltfiltscipy.signalxsosfiltxyy2` has a significant phase delay. >>> plt.plot(t, x, alpha=0.5, label='x(t)') >>> plt.plot(t, y, label='y(t)') >>> plt.plot(t, y2, label='y2(t)') >>> plt.legend(framealpha=1, shadow=True) >>> plt.grid(alpha=0.25) >>> plt.xlabel('t') >>> plt.show()",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_signaltools.py",
    "ast_data": "FunctionDef name:sosfiltfilt arg:sos arg:x arg:axis arg:padtype arg:padlen arguments arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call Call Compare Call Compare Assign Call Assign Call Assign Assign Assign Assign Call Assign Call Assign Call Assign Call Call Assign Call If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "num_ran",
    "source_code": "def num_ran(self) -> int:\n    ret = len(self._vals)\n    for status in self._vals.values():\n        if status == Status.SKIPPED:\n            ret -= 1\n    return ret",
    "docstring": "Returns how many combos actually ran (weren't skipped).",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\fuzzer.py",
    "ast_data": "FunctionDef name:num_ran arg:self arguments arg Assign Call For Call If Compare Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_validate_datetimelike_monotonic",
    "source_code": "def _validate_datetimelike_monotonic(self) -> None:\n    if self._on.hasnans:\n        self._raise_monotonic_error('values must not have NaT')\n    if not (self._on.is_monotonic_increasing or self._on.is_monotonic_decreasing):\n        self._raise_monotonic_error('values must be monotonic')",
    "docstring": "Validate self._on is monotonic (increasing or decreasing) and has no NaT values for frequency windows.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\window\\rolling.py",
    "ast_data": "FunctionDef name:_validate_datetimelike_monotonic arg:self arguments arg If Call If BoolOp Call"
  },
  {
    "library": "pytorch",
    "name": "mode",
    "source_code": "@property\ndef mode(self) -> Tensor:\n    raise NotImplementedError(f'{self.__class__} does not implement mode')",
    "docstring": "Returns the mode of the distribution.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:mode arg:self arguments arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "rotate_transpose",
    "source_code": "def rotate_transpose(x, shift, name='rotate_transpose'):\n    with ops.name_scope(name, values=[x, shift]):\n        x = ops.convert_to_tensor(x, name='x')\n        shift = ops.convert_to_tensor(shift, name='shift')\n        check_ops.assert_integer(shift)\n        shift_value_static = tensor_util.constant_value(shift)\n        ndims = x.get_shape().ndims\n        if ndims is not None and shift_value_static is not None:\n            if ndims < 2:\n                return x\n            shift_value_static = np.sign(shift_value_static) * (abs(shift_value_static) % ndims)\n            if shift_value_static == 0:\n                return x\n            perm = np.roll(np.arange(ndims), shift_value_static)\n            return array_ops.transpose(x, perm=perm)\n        else:\n            ndims = array_ops.rank(x)\n            shift = array_ops.where_v2(math_ops.less(shift, 0), math_ops.mod(-shift, ndims), ndims - math_ops.mod(shift, ndims))\n            first = math_ops.range(0, shift)\n            last = math_ops.range(shift, ndims)\n            perm = array_ops.concat([last, first], 0)\n            return array_ops.transpose(x, perm=perm)",
    "docstring": "Circularly moves dims left or right. Effectively identical to: When additional graph-runtime checks are performed. These checks entail moving data from to GPU to CPU. Example: Args: x: . shift: . Number of dimensions to transpose left (shift0). name: Python . The name to give this op. Returns: rotated_x: Input with dimensions circularly rotated by shift. Raises: TypeError: if shift is not integer type.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\util.py",
    "ast_data": "FunctionDef name:rotate_transpose arg:x arg:shift arg:name arguments arg arg arg With Call Assign Call Assign Call Call Assign Call Assign Call If BoolOp Compare Compare If Compare Return return:yes Assign Call Call If Compare Return return:yes Assign Call Call Return return:yes Call Assign Call Assign Call Call Call Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_closed",
    "source_code": "def set_closed(self, closed):\n    if self._closed == bool(closed):\n        return\n    self._closed = bool(closed)\n    self.set_xy(self.get_xy())\n    self.stale = True",
    "docstring": "Set whether the polygon is closed. Parameters ---------- closed : bool True if the polygon is closed",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:set_closed arg:self arg:closed arguments arg arg If Compare Call Return return:no Assign Call Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "__iter__",
    "source_code": "def __iter__(self):\n    return iter(self.read_value())",
    "docstring": "When executing eagerly, iterates over the value of the variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:__iter__ arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "update_restore_inputs",
    "source_code": "def update_restore_inputs(self, checkpoint_key: str, shape_and_slice_spec: str) -> tuple[Sequence[str], Sequence[str]]:\n    keys = []\n    slices = []\n    logging.info('Updating restore v2 inputs for %s: %s', checkpoint_key, shape_and_slice_spec)\n    for i, layout in enumerate(self._to_shard_layout):\n        sub_checkpoint_key = checkpoint_key.replace(self._main_checkpoint_name, self._checkpoint_local_names[i])\n        logging.info('Will read sub key %s: %s', sub_checkpoint_key, layout.unsharded_shape)\n        keys.append(sub_checkpoint_key)\n        slices.append(_shard_info_str(layout.unsharded_shape, trackable_base.ShardInfo(offset=[0, 0], shape=layout.unsharded_shape)))\n    return (keys, slices)",
    "docstring": "Updates checkpoint key and slice spec acorrding to the resharding plan. Args: checkpoint_key: The input checkpoint key to be read. shape_and_slice_spec: The shape and slice spec of the checkpoint key to be read. Returns: A tuple of (keys, slices) that should be passed to restore_v2 inorder to reshard according to the resharding plan. The restored tensors from restore_v2 op will usually be passed to reshard method of this class to get the final resharded value.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3_checkpoint_adapter.py",
    "ast_data": "FunctionDef name:update_restore_inputs arg:self arg:checkpoint_key arg:shape_and_slice_spec arguments arg arg arg Assign Assign Call For Call Assign Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "smoke_tutorials",
    "source_code": "@click.command()\n@click.argument('pytest_args', nargs=-1, metavar='PYTEST-ARGS', required=False)\n@click.option('--tests', '-t', default=None, multiple=True, metavar='TESTS', help='Specify *rst files to smoke test')\n@click.option('--verbose', '-v', default=False, is_flag=True, help='verbosity')\n@meson.build_dir_option\n@click.pass_context\ndef smoke_tutorials(ctx, pytest_args, tests, verbose, build_dir, *args, **kwargs):\n    click.secho('Invoking `build` prior to running tests for tutorials:', bold=True, fg='bright_green')\n    ctx.invoke(build)\n    meson._set_pythonpath(build_dir)\n    cmd = ['pytest']\n    if tests:\n        cmd += list(tests)\n    else:\n        cmd += ['doc/source/tutorial', '--doctest-glob=*rst']\n    if verbose:\n        cmd += ['-v']\n    extra_argv = list(pytest_args[:]) if pytest_args else []\n    if extra_argv and extra_argv[0] == '--':\n        extra_argv = extra_argv[1:]\n    cmd += extra_argv\n    cmd_str = ' '.join(cmd)\n    click.secho(cmd_str, bold=True, fg='bright_blue')\n    util.run(cmd)",
    "docstring": "🔧 Run doctests of user-facing rst tutorials. To test all tutorials in the scipy doc/source/tutorial directory, use spin smoke-tutorials To run tests on a specific RST file: \b spin smoke-tutorials doc/source/reference/stats.rst spin smoke-tutorials -t doc/source/reference/stats.rst \b Note: ----- \b - This command only runs doctests and skips everything under tests/ - This command only doctests public objects: those which are accessible from the top-level file.",
    "type": "function",
    "file_path": "scipy\\.spin\\cmds.py",
    "ast_data": "FunctionDef name:smoke_tutorials arg:ctx arg:pytest_args arg:tests arg:verbose arg:build_dir arguments arg arg arg arg arg arg arg Call Call Call Assign If Call If Assign Call If BoolOp Compare Assign Assign Call Call Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "max_over_ndim",
    "source_code": "def max_over_ndim(input, axis_list, keepdim=False):\n    axis_list.sort(reverse=True)\n    for axis in axis_list:\n        input, _ = input.max(axis, keepdim)\n    return input",
    "docstring": "Apply 'torch.max' over the given axes.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\_equalize.py",
    "ast_data": "FunctionDef name:max_over_ndim arg:input arg:axis_list arg:keepdim arguments arg arg arg Call For Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_table_list",
    "source_code": "def get_table_list(self, cursor):\n    raise NotImplementedError('subclasses of BaseDatabaseIntrospection may require a get_table_list() method')",
    "docstring": "Return an unsorted list of TableInfo named tuples of all tables and views that exist in the database.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\introspection.py",
    "ast_data": "FunctionDef name:get_table_list arg:self arg:cursor arguments arg arg Raise Call"
  },
  {
    "library": "cryptography",
    "name": "encode_public",
    "source_code": "def encode_public(self, public_key: ed25519.Ed25519PublicKey, f_pub: _FragList) -> None:\n    raw_public_key = public_key.public_bytes(Encoding.Raw, PublicFormat.Raw)\n    f_pub.put_sshstr(raw_public_key)",
    "docstring": "Write Ed25519 public key",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\serialization\\ssh.py",
    "ast_data": "FunctionDef name:encode_public arg:self arg:public_key arg:f_pub arguments arg arg arg Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "disable_v2_behavior",
    "source_code": "@tf_export(v1=['disable_v2_behavior'])\ndef disable_v2_behavior():\n    _v2_behavior_usage_gauge.get_cell('disable').set(True)\n    tf2.disable()\n    ops.disable_eager_execution()\n    tensor_shape.disable_v2_tensorshape()\n    resource_variables_toggle.disable_resource_variables()\n    tensor.disable_tensor_equality()\n    control_flow_v2_toggles.disable_control_flow_v2()\n    for v2_disabler_name in _DATA_V2_CALLBACKS.list():\n        v2_disabler = _DATA_V2_CALLBACKS.lookup(v2_disabler_name)\n        v2_disabler()",
    "docstring": "Disables TensorFlow 2.x behaviors. This function can be called at the beginning of the program (before , or other structures have been created, and before devices have been initialized. It switches all global behaviors that are different between TensorFlow 1.x and 2.x to behave as intended for 1.x. User can call this function to disable 2.x behavior during complex migrations. @compatibility(TF2) Using this function indicates that your software is not compatible with eager execution and in TF2. To migrate to TF2, rewrite your code to be compatible with eager execution. Please refer to the [migration guide] ( for additional resource on the topic. @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\compat\\v2_compat.py",
    "ast_data": "FunctionDef name:disable_v2_behavior arguments Call Call Call Call Call Call Call Call For Call Assign Call Call Call"
  },
  {
    "library": "scipy",
    "name": "upcast_char",
    "source_code": "def upcast_char(*args):\n    t = _upcast_memo.get(args)\n    if t is not None:\n        return t\n    t = upcast(*map(np.dtype, args))\n    _upcast_memo[args] = t\n    return t",
    "docstring": "Same as but taking dtype.char as input (faster).",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\_sputils.py",
    "ast_data": "FunctionDef name:upcast_char arguments arg Assign Call If Compare Return return:yes Assign Call Call Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "BadCoefficients",
    "source_code": "class BadCoefficients(UserWarning):\n    pass",
    "docstring": "Warning about badly conditioned filter coefficients",
    "type": "class",
    "file_path": "scipy\\scipy\\signal\\_filter_design.py",
    "ast_data": "ClassDef name:BadCoefficients"
  },
  {
    "library": "tensorflow",
    "name": "tfdbg_file_version",
    "source_code": "def tfdbg_file_version(self):\n    return self._file_version",
    "docstring": "Get the tfdbg file format version.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py",
    "ast_data": "FunctionDef name:tfdbg_file_version arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "track_node_mutations",
    "source_code": "def track_node_mutations(self, node: Node, flat_args_kwargs: list[Any], id_to_initial_version: dict[int, int]) -> None:\n    mutated_arg_positions = OrderedSet[int]()\n    for i, arg in enumerate(flat_args_kwargs):\n        val_id = id(arg)\n        if val_id in id_to_initial_version and id_to_initial_version[val_id] != arg._version:\n            mutated_arg_positions.add(i)\n    if mutated_arg_positions:\n        self.node_to_mutated_arg_positions[node] = mutated_arg_positions",
    "docstring": "This function tracks which argument positions are mutated by the given node. Subgraph HOP does not support input mutations today so we will skip regions which have inputs that are mutated.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\graph_region_tracker.py",
    "ast_data": "FunctionDef name:track_node_mutations arg:self arg:node arg:flat_args_kwargs arg:id_to_initial_version arguments arg arg arg arg Assign Call For Call Assign Call If BoolOp Compare Compare Call If Assign"
  },
  {
    "library": "cherrypy",
    "name": "args",
    "source_code": "@property\ndef args(self):\n    return cherrypy.serving.request.args",
    "docstring": "The ordered args should be accessible from post dispatch hooks.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpdispatch.py",
    "ast_data": "FunctionDef name:args arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_data",
    "source_code": "def set_data(self, positions):\n    method = 'set_xdata' if self.direction == 'horizontal' else 'set_ydata'\n    for line, p in zip(self.artists, positions):\n        getattr(line, method)([p, p])",
    "docstring": "Set x- or y-positions of handles, depending on if the lines are vertical or horizontal. Parameters ---------- positions : tuple of length 2 Set the positions of the handle in data coordinates",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:set_data arg:self arg:positions arguments arg arg Assign Compare For Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_pad_or_truncate_with_sequence_length",
    "source_code": "def _pad_or_truncate_with_sequence_length(self, embeddings: tensor.Tensor, sequence_length: int) -> tensor.Tensor:\n    original_sequence_length = embeddings.shape[1]\n    if original_sequence_length > sequence_length:\n        embeddings = array_ops.slice(embeddings, begin=[0, 0, 0], size=[-1, sequence_length, -1])\n    else:\n        embeddings = array_ops.pad(embeddings, paddings=[[0, 0], [0, sequence_length - original_sequence_length], [0, 0]])\n    return embeddings",
    "docstring": "Pad or truncate the embedding lookup result based on the sequence length. Args: embeddings: A rank 3 Tensor of the embedding lookup result. sequence_length: number of the max sequence length set in the feature config. Returns: A Tensor with second last axis padded or truncated.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v1.py",
    "ast_data": "FunctionDef name:_pad_or_truncate_with_sequence_length arg:self arg:embeddings arg:sequence_length arguments arg arg arg Assign If Compare Assign Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "translation",
    "source_code": "def translation(language):\n    if language not in _translations:\n        _translations[language] = DjangoTranslation(language)\n    return _translations[language]",
    "docstring": "Return a translation object in the default 'django' domain.",
    "type": "function",
    "file_path": "django\\django\\utils\\translation\\trans_real.py",
    "ast_data": "FunctionDef name:translation arg:language arguments arg If Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_safe_to_run",
    "source_code": "def _safe_to_run(self):\n    return sys.getrefcount(self._interpreter) == 2",
    "docstring": "Returns true if there exist no numpy array buffers. This means it is safe to run tflite calls that may destroy internally allocated memory. This works, because in the wrapper.cc we have made the numpy base be the self._interpreter.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\interpreter.py",
    "ast_data": "FunctionDef name:_safe_to_run arg:self arguments arg Return return:yes Compare Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict_proba",
    "source_code": "def predict_proba(self, raw_prediction):\n    if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1:\n        raw_prediction = raw_prediction.squeeze(1)\n    proba = np.empty((raw_prediction.shape[0], 2), dtype=raw_prediction.dtype)\n    proba[:, 1] = self.link.inverse(raw_prediction)\n    proba[:, 0] = 1 - proba[:, 1]\n    return proba",
    "docstring": "Predict probabilities. Parameters ---------- raw_prediction : array of shape (n_samples,) or (n_samples, 1) Raw prediction values (in link space). Returns ------- proba : array of shape (n_samples, 2) Element-wise class probabilities.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\_loss\\loss.py",
    "ast_data": "FunctionDef name:predict_proba arg:self arg:raw_prediction arguments arg arg If BoolOp Compare Compare Assign Call Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_kind_name",
    "source_code": "def _get_kind_name(item):\n    if isinstance(item, (str, bytes)):\n        kind = 'bytes_list'\n    elif isinstance(item, int):\n        kind = 'int64_list'\n    elif isinstance(item, float):\n        kind = 'float_list'\n    elif isinstance(item, Any):\n        kind = 'any_list'\n    else:\n        kind = 'node_list'\n    return kind",
    "docstring": "Returns the kind name in CollectionDef. Args: item: A data item. Returns: The string representation of the kind in CollectionDef.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\meta_graph.py",
    "ast_data": "FunctionDef name:_get_kind_name arg:item arguments arg If Call Assign If Call Assign If Call Assign If Call Assign Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "decorate_axes",
    "source_code": "def decorate_axes(ax: Axes, freq: BaseOffset) -> None:\n    if not hasattr(ax, '_plot_data'):\n        ax._plot_data = []\n    ax.freq = freq\n    xaxis = ax.get_xaxis()\n    xaxis.freq = freq",
    "docstring": "Initialize axes for time-series plotting",
    "type": "function",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\timeseries.py",
    "ast_data": "FunctionDef name:decorate_axes arg:ax arg:freq arguments arg arg If Call Assign Assign Assign Call Assign"
  },
  {
    "library": "kornia",
    "name": "find_homography_lines_dlt_iterated",
    "source_code": "def find_homography_lines_dlt_iterated(ls1: Tensor, ls2: Tensor, weights: Tensor, soft_inl_th: float=4.0, n_iter: int=5) -> Tensor:\n    H: Tensor = find_homography_lines_dlt(ls1, ls2, weights)\n    for _ in range(n_iter - 1):\n        errors: Tensor = line_segment_transfer_error_one_way(ls1, ls2, H, False)\n        weights_new: Tensor = torch.exp(-errors / (2.0 * soft_inl_th ** 2))\n        H = find_homography_lines_dlt(ls1, ls2, weights_new)\n    return H",
    "docstring": "Compute the homography matrix using the iteratively-reweighted least squares (IRWLS) from line segments. The linear system is solved by using the Reweighted Least Squares Solution for the 4 line segments algorithm. Args: ls1: A set of line segments in the first image with a tensor shape :math:. ls2: A set of line segments in the second image with a tensor shape :math:. weights: Tensor containing the weights per point correspondence with a shape of :math:. Used for the first iteration of the IRWLS. soft_inl_th: Soft inlier threshold used for weight calculation. n_iter: number of iterations. Returns: the computed homography matrix with shape :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\homography.py",
    "ast_data": "FunctionDef name:find_homography_lines_dlt_iterated arg:ls1 arg:ls2 arg:weights arg:soft_inl_th arg:n_iter arguments arg arg arg arg arg Call For Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "total_form_count",
    "source_code": "def total_form_count(self):\n    if self.is_bound:\n        return min(self.management_form.cleaned_data[TOTAL_FORM_COUNT], self.absolute_max)\n    else:\n        initial_forms = self.initial_form_count()\n        total_forms = max(initial_forms, self.min_num) + self.extra\n        if initial_forms > self.max_num >= 0:\n            total_forms = initial_forms\n        elif total_forms > self.max_num >= 0:\n            total_forms = self.max_num\n    return total_forms",
    "docstring": "Return the total number of forms in this FormSet.",
    "type": "method",
    "file_path": "django\\django\\forms\\formsets.py",
    "ast_data": "FunctionDef name:total_form_count arg:self arguments arg If Return return:yes Call Assign Call Assign Call If Compare Assign If Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_optimizer_jit",
    "source_code": "@tf_export('config.optimizer.get_jit')\ndef get_optimizer_jit() -> str:\n    if context.context().optimizer_jit:\n        return 'autoclustering'\n    return ''",
    "docstring": "Returns JIT compilation configuration for code inside . Possible return values: - if [autoclustering]( is enabled - when no default compilation is applied.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\config.py",
    "ast_data": "FunctionDef name:get_optimizer_jit arguments If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_tensor_details",
    "source_code": "def _get_tensor_details(self, tensor_index, subgraph_index):\n    tensor_index = int(tensor_index)\n    subgraph_index = int(subgraph_index)\n    tensor_name = self._interpreter.TensorName(tensor_index, subgraph_index)\n    tensor_size = self._interpreter.TensorSize(tensor_index, subgraph_index)\n    tensor_size_signature = self._interpreter.TensorSizeSignature(tensor_index, subgraph_index)\n    tensor_type = self._interpreter.TensorType(tensor_index, subgraph_index)\n    tensor_quantization = self._interpreter.TensorQuantization(tensor_index, subgraph_index)\n    tensor_quantization_params = self._interpreter.TensorQuantizationParameters(tensor_index, subgraph_index)\n    tensor_sparsity_params = self._interpreter.TensorSparsityParameters(tensor_index, subgraph_index)\n    if not tensor_type:\n        raise ValueError('Could not get tensor details')\n    details = {'name': tensor_name, 'index': tensor_index, 'shape': tensor_size, 'shape_signature': tensor_size_signature, 'dtype': tensor_type, 'quantization': tensor_quantization, 'quantization_parameters': {'scales': tensor_quantization_params[0], 'zero_points': tensor_quantization_params[1], 'quantized_dimension': tensor_quantization_params[2]}, 'sparsity_parameters': tensor_sparsity_params}\n    return details",
    "docstring": "Gets tensor details. Args: tensor_index: Tensor index of tensor to query. subgraph_index: Index of the subgraph. Returns: A dictionary containing the following fields of the tensor: 'name': The tensor name. 'index': The tensor index in the subgraph. 'shape': The shape of the tensor. 'quantization': Deprecated, use 'quantization_parameters'. This field only works for per-tensor quantization, whereas 'quantization_parameters' work in all cases. 'quantization_parameters': The parameters used to quantize the tensor: 'scales': List of scales (one if per-tensor quantization) 'zero_points': List of zero_points (one if per-tensor quantization) 'quantized_dimension': Specifies the dimension of per-axis quantization, in the case of multiple scales/zero_points. Raises: ValueError: If tensor_index is invalid.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\interpreter.py",
    "ast_data": "FunctionDef name:_get_tensor_details arg:self arg:tensor_index arg:subgraph_index arguments arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call If Raise Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "ptr",
    "source_code": "@property\ndef ptr(self) -> int:\n    return self._x.__array_interface__['data'][0]",
    "docstring": "Pointer to start of the buffer as an integer.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\interchange\\buffer.py",
    "ast_data": "FunctionDef name:ptr arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "add_weighted",
    "source_code": "def add_weighted(src1: Tensor, alpha: Union[float, Tensor], src2: Tensor, beta: Union[float, Tensor], gamma: Union[float, Tensor]) -> Tensor:\n    KORNIA_CHECK_IS_TENSOR(src1)\n    KORNIA_CHECK_IS_TENSOR(src2)\n    KORNIA_CHECK(src1.shape == src2.shape, f'src1 and src2 have different shapes. Got {src1.shape} and {src2.shape}')\n    if isinstance(alpha, Tensor):\n        KORNIA_CHECK(src1.shape == alpha.shape, 'alpha has a different shape than src.')\n    else:\n        alpha = tensor(alpha, dtype=src1.dtype, device=src1.device)\n    if isinstance(beta, Tensor):\n        KORNIA_CHECK(src1.shape == beta.shape, 'beta has a different shape than src.')\n    else:\n        beta = tensor(beta, dtype=src1.dtype, device=src1.device)\n    if isinstance(gamma, Tensor):\n        KORNIA_CHECK(src1.shape == gamma.shape, 'gamma has a different shape than src.')\n    else:\n        gamma = tensor(gamma, dtype=src1.dtype, device=src1.device)\n    return src1 * alpha + src2 * beta + gamma",
    "docstring": "Calculate the weighted sum of two Tensors. .. image:: _static/img/add_weighted.png The function calculates the weighted sum of two Tensors as follows: .. math:: out = src1 * alpha + src2 * beta + gamma Args: src1: Tensor with an arbitrary shape, equal to shape of src2. alpha: weight of the src1 elements as Union[float, Tensor]. src2: Tensor with an arbitrary shape, equal to shape of src1. beta: weight of the src2 elements as Union[float, Tensor]. gamma: scalar added to each sum as Union[float, Tensor]. Returns: Weighted Tensor with shape equal to src1 and src2 shapes. Example: >>> input1 = torch.rand(1, 1, 5, 5) >>> input2 = torch.rand(1, 1, 5, 5) >>> output = add_weighted(input1, 0.5, input2, 0.5, 1.0) >>> output.shape torch.Size([1, 1, 5, 5]) Notes: Tensor alpha/beta/gamma have to be with shape broadcastable to src1 and src2 shapes.",
    "type": "function",
    "file_path": "kornia\\kornia\\enhance\\core.py",
    "ast_data": "FunctionDef name:add_weighted arg:src1 arg:alpha arg:src2 arg:beta arg:gamma arguments arg arg arg arg arg Call Call Call Compare If Call Call Compare Assign Call If Call Call Compare Assign Call If Call Call Compare Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "set_input_quantized_indexes",
    "source_code": "def set_input_quantized_indexes(self, indexes: list[int]) -> PrepareCustomConfig:\n    self.input_quantized_indexes = indexes\n    return self",
    "docstring": "Set the indexes of the inputs of the graph that should be quantized. Inputs are otherwise assumed to be in fp32 by default instead.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\custom_config.py",
    "ast_data": "FunctionDef name:set_input_quantized_indexes arg:self arg:indexes arguments arg arg Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_as2d",
    "source_code": "def _as2d(ar):\n    if ar.ndim == 2:\n        return ar\n    else:\n        aux = np.asarray(ar)\n        aux.shape = (ar.shape[0], 1)\n        return aux",
    "docstring": "If the input array is 2D return it, if it is 1D, append a dimension, making it a column vector.",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_eigen\\lobpcg\\lobpcg.py",
    "ast_data": "FunctionDef name:_as2d arg:ar arguments arg If Compare Return return:yes Assign Call Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "NumericNotFoundError",
    "source_code": "class NumericNotFoundError(NotFoundError):\n    pass",
    "docstring": "Numeric ( module not found. Get it from above location, install it, and retry setup.py.",
    "type": "class",
    "file_path": "numpy\\numpy\\distutils\\system_info.py",
    "ast_data": "ClassDef name:NumericNotFoundError"
  },
  {
    "library": "pandas",
    "name": "set_properties",
    "source_code": "@Substitution(subset=subset_args)\ndef set_properties(self, subset: Subset | None=None, **kwargs) -> Styler:\n    values = ''.join([f'{p}: {v};' for p, v in kwargs.items()])\n    return self.map(lambda x: values, subset=subset)",
    "docstring": "Set defined CSS-properties to each Styler.mapTable Visualization `_ user guide for more details.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\style.py",
    "ast_data": "FunctionDef name:set_properties arg:self arg:subset arguments arg arg arg Assign Call Call Return return:yes Call arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_promo_mode_enum",
    "source_code": "def _get_promo_mode_enum(dtype_conversion_mode) -> PromoMode:\n    if dtype_conversion_mode == 'off':\n        return PromoMode.OFF\n    if dtype_conversion_mode == 'legacy':\n        return PromoMode.LEGACY\n    elif dtype_conversion_mode == 'safe':\n        return PromoMode.SAFE\n    elif dtype_conversion_mode == 'all':\n        return PromoMode.ALL\n    else:\n        raise ValueError(f\"The provided promotion mode {dtype_conversion_mode} does not exist. Make sure the provided dtype conversion mode is one of the followings: 'off', 'legacy', 'safe' or 'all'.\")",
    "docstring": "Returns the corresponding PromoMode enum value from string.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_get_promo_mode_enum arg:dtype_conversion_mode arguments arg If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "_is_arraylike",
    "source_code": "def _is_arraylike(x):\n    if sp.issparse(x):\n        return False\n    return hasattr(x, '__len__') or hasattr(x, 'shape') or hasattr(x, '__array__')",
    "docstring": "Returns whether the input is array-like.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\validation.py",
    "ast_data": "FunctionDef name:_is_arraylike arg:x arguments arg If Call Return return:yes Return return:yes BoolOp Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_ensure_cmap",
    "source_code": "def _ensure_cmap(cmap):\n    if isinstance(cmap, colors.Colormap):\n        return cmap\n    cmap_name = mpl._val_or_rc(cmap, 'image.cmap')\n    if cmap_name not in _colormaps:\n        _api.check_in_list(sorted(_colormaps), cmap=cmap_name)\n    return mpl.colormaps[cmap_name]",
    "docstring": "Ensure that we have a object. For internal use to preserve type stability of errors. Parameters ---------- cmap : None, str, Colormap - if a , return it - if a string, look it up in mpl.colormaps - if None, look up the default color map in mpl.colormaps Returns ------- Colormap",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\cm.py",
    "ast_data": "FunctionDef name:_ensure_cmap arg:cmap arguments arg If Call Return return:yes Assign Call If Compare Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "set_preserved_attributes",
    "source_code": "def set_preserved_attributes(self, attributes: list[str]) -> PrepareCustomConfig:\n    self.preserved_attributes = attributes\n    return self",
    "docstring": "Set the names of the attributes that will persist in the graph module even if they are not used in the model's `` method.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\custom_config.py",
    "ast_data": "FunctionDef name:set_preserved_attributes arg:self arg:attributes arguments arg arg Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_split_subregion",
    "source_code": "def _split_subregion(a, b, xp, split_at=None):\n    xp = array_namespace(a, b)\n    if split_at is None:\n        split_at = (a + b) / 2\n    left = [xp.stack((a[i], split_at[i])) for i in range(a.shape[0])]\n    right = [xp.stack((split_at[i], b[i])) for i in range(b.shape[0])]\n    a_sub = _cartesian_product(left)\n    b_sub = _cartesian_product(right)\n    for i in range(a_sub.shape[0]):\n        yield (a_sub[i, ...], b_sub[i, ...])",
    "docstring": "Given the coordinates of a region like a=[0, 0] and b=[1, 1], yield the coordinates of all subregions, which in this case would be:: ([0, 0], [1/2, 1/2]), ([0, 1/2], [1/2, 1]), ([1/2, 0], [1, 1/2]), ([1/2, 1/2], [1, 1])",
    "type": "function",
    "file_path": "scipy\\scipy\\integrate\\_rules\\_base.py",
    "ast_data": "FunctionDef name:_split_subregion arg:a arg:b arg:xp arg:split_at arguments arg arg arg arg Assign Call If Compare Assign Assign Call Call Assign Call Call Assign Call Assign Call For Call"
  },
  {
    "library": "numpy",
    "name": "write_array_header_1_0",
    "source_code": "@set_module('numpy.lib.format')\ndef write_array_header_1_0(fp, d):\n    _write_array_header(fp, d, (1, 0))",
    "docstring": "Write the header for an array using the 1.0 format. Parameters ---------- fp : filelike object d : dict This has the appropriate entries for writing its string representation to the header of the file.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_format_impl.py",
    "ast_data": "FunctionDef name:write_array_header_1_0 arg:fp arg:d arguments arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "GrpcDebugWrapperSession",
    "source_code": "class GrpcDebugWrapperSession(framework.NonInteractiveDebugWrapperSession):\n\n    def __init__(self, sess, grpc_debug_server_addresses, watch_fn=None, thread_name_filter=None):\n        framework.NonInteractiveDebugWrapperSession.__init__(self, sess, watch_fn=watch_fn, thread_name_filter=thread_name_filter)\n        if isinstance(grpc_debug_server_addresses, str):\n            self._grpc_debug_server_urls = [self._normalize_grpc_url(grpc_debug_server_addresses)]\n        elif isinstance(grpc_debug_server_addresses, list):\n            self._grpc_debug_server_urls = []\n            for address in grpc_debug_server_addresses:\n                if not isinstance(address, str):\n                    raise TypeError('Expected type str in list grpc_debug_server_addresses, received type %s' % type(address))\n                self._grpc_debug_server_urls.append(self._normalize_grpc_url(address))\n        else:\n            raise TypeError('Expected type str or list in grpc_debug_server_addresses, received type %s' % type(grpc_debug_server_addresses))\n\n    def prepare_run_debug_urls(self, fetches, feed_dict):\n        return self._grpc_debug_server_urls\n\n    def _normalize_grpc_url(self, address):\n        return common.GRPC_URL_PREFIX + address if not address.startswith(common.GRPC_URL_PREFIX) else address",
    "docstring": "Debug Session wrapper that send debug data to gRPC stream(s).",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\grpc_wrapper.py",
    "ast_data": "ClassDef name:GrpcDebugWrapperSession FunctionDef name:__init__ arg:self arg:sess arg:grpc_debug_server_addresses arg:watch_fn arg:thread_name_filter arguments arg arg arg arg arg Call If Call Assign Call If Call Assign For If Call Raise Call Call Call Call Raise Call Call FunctionDef name:prepare_run_debug_urls arg:self arg:fetches arg:feed_dict arguments arg arg arg Return return:yes FunctionDef name:_normalize_grpc_url arg:self arg:address arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "extend",
    "source_code": "def extend(self, values: Iterable[Any]) -> Self:\n    if not isinstance(values, container_abcs.Iterable) or isinstance(values, torch.Tensor):\n        raise TypeError('ParameterList.extend should be called with an iterable, but got ' + type(values).__name__)\n    for value in values:\n        self.append(value)\n    return self",
    "docstring": "Append values from a Python iterable to the end of the list. Args: values (iterable): iterable of values to append",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\container.py",
    "ast_data": "FunctionDef name:extend arg:self arg:values arguments arg arg If BoolOp Call Call Raise Call Call For Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_repr_html_",
    "source_code": "def _repr_html_(self):\n    fmt = mpl.rcParams['animation.html']\n    if fmt == 'html5':\n        return self.to_html5_video()\n    elif fmt == 'jshtml':\n        return self.to_jshtml()",
    "docstring": "IPython display hook for rendering.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\animation.py",
    "ast_data": "FunctionDef name:_repr_html_ arg:self arguments arg Assign If Compare Return return:yes Call If Compare Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_kl_divergence",
    "source_code": "def _kl_divergence(params, P, degrees_of_freedom, n_samples, n_components, skip_num_points=0, compute_error=True):\n    X_embedded = params.reshape(n_samples, n_components)\n    dist = pdist(X_embedded, 'sqeuclidean')\n    dist /= degrees_of_freedom\n    dist += 1.0\n    dist **= (degrees_of_freedom + 1.0) / -2.0\n    Q = np.maximum(dist / (2.0 * np.sum(dist)), MACHINE_EPSILON)\n    if compute_error:\n        kl_divergence = 2.0 * np.dot(P, np.log(np.maximum(P, MACHINE_EPSILON) / Q))\n    else:\n        kl_divergence = np.nan\n    grad = np.ndarray((n_samples, n_components), dtype=params.dtype)\n    PQd = squareform((P - Q) * dist)\n    for i in range(skip_num_points, n_samples):\n        grad[i] = np.dot(np.ravel(PQd[i], order='K'), X_embedded[i] - X_embedded)\n    grad = grad.ravel()\n    c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom\n    grad *= c\n    return (kl_divergence, grad)",
    "docstring": "t-SNE objective function: gradient of the KL divergence of p_ijs and q_ijs and the absolute error. Parameters ---------- params : ndarray of shape (n_params,) Unraveled embedding. P : ndarray of shape (n_samples * (n_samples-1) / 2,) Condensed joint probability matrix. degrees_of_freedom : int Degrees of freedom of the Student's-t distribution. n_samples : int Number of samples. n_components : int Dimension of the embedded space. skip_num_points : int, default=0 This does not compute the gradient for points with indices below . This is useful when computing transforms of new data where you'd like to keep the old data fixed. compute_error: bool, default=True If False, the kl_divergence is not computed and returns NaN. Returns ------- kl_divergence : float Kullback-Leibler divergence of p_ij and q_ij. grad : ndarray of shape (n_params,) Unraveled gradient of the Kullback-Leibler divergence with respect to the embedding.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\manifold\\_t_sne.py",
    "ast_data": "FunctionDef name:_kl_divergence arg:params arg:P arg:degrees_of_freedom arg:n_samples arg:n_components arg:skip_num_points arg:compute_error arguments arg arg arg arg arg arg arg Assign Call Assign Call Assign Call Call If Assign Call Call Call Assign Assign Call Assign Call For Call Assign Call Call Assign Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "clean",
    "source_code": "def clean(self, value):\n    super().clean(value)\n    for field in self.fields:\n        value = field.clean(value)\n    return value",
    "docstring": "Validate the given value against all of self.fields, which is a list of Field instances.",
    "type": "method",
    "file_path": "django\\django\\forms\\fields.py",
    "ast_data": "FunctionDef name:clean arg:self arg:value arguments arg arg Call Call For Assign Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "sysfiles",
    "source_code": "def sysfiles(self):\n    search_mod_names = filter(re.compile(self.match).match, list(sys.modules.keys()))\n    mods = map(sys.modules.get, search_mod_names)\n    return set(filter(None, map(self._file_for_module, mods)))",
    "docstring": "Return a Set of sys.modules filenames to monitor.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\plugins.py",
    "ast_data": "FunctionDef name:sysfiles arg:self arguments arg Assign Call Call Call Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "cherrypy",
    "name": "__len__",
    "source_code": "def __len__(self):\n    raise NotImplementedError",
    "docstring": "Return the number of active sessions.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\sessions.py",
    "ast_data": "FunctionDef name:__len__ arg:self arguments arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "_set_control_flow_context",
    "source_code": "def _set_control_flow_context(self, ctx) -> None:\n    self._control_flow_context = ctx",
    "docstring": "Sets the current control flow context. Args: ctx: a context object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_set_control_flow_context arg:self arg:ctx arguments arg arg Assign"
  },
  {
    "library": "matplotlib",
    "name": "tricontourf",
    "source_code": "@_docstring.Substitution(func='tricontourf', type='regions')\n@_docstring.interpd\ndef tricontourf(ax, *args, **kwargs):\n    kwargs['filled'] = True\n    return TriContourSet(ax, *args, **kwargs)",
    "docstring": "%(_tricontour_doc)s hatches : list[str], optional A list of crosshatch patterns to use on the filled areas. If None, no hatching will be added to the contour. Notes ----- fills intervals that are closed at the top; that is, for boundaries *z1* and *z2*, the filled region is:: z1 < Z <= z2 except for the lowest interval, which is closed on both sides (i.e. it includes the lowest value).",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_tricontour.py",
    "ast_data": "FunctionDef name:tricontourf arg:ax arguments arg arg arg Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "softplus",
    "source_code": "@tf_export('math.softplus', 'nn.softplus', v1=['math.softplus', 'nn.softplus'])\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef softplus(features, name=None):\n    return gen_nn_ops.softplus(features, name)",
    "docstring": "Computes elementwise softplus: . is a smooth approximation of . Like , always takes on positive values. Example: >>> import tensorflow as tf >>> tf.math.softplus(tf.range(0, 2, dtype=tf.float32)).numpy() array([0.6931472, 1.3132616], dtype=float32) Args: features: name: Optional: name to associate with this operation. Returns:",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:softplus arg:features arg:name arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "adapt_datetimefield_value",
    "source_code": "def adapt_datetimefield_value(self, value):\n    if value is None:\n        return None\n    if timezone.is_aware(value):\n        if settings.USE_TZ:\n            value = timezone.make_naive(value, self.connection.timezone)\n        else:\n            raise ValueError('Oracle backend does not support timezone-aware datetimes when USE_TZ is False.')\n    return Oracle_datetime.from_datetime(value)",
    "docstring": "Transform a datetime value to an object compatible with what is expected by the backend driver for datetime columns. If naive datetime is passed assumes that is in UTC. Normally Django models.DateTimeField makes sure that if USE_TZ is True passed datetime is timezone aware.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\oracle\\operations.py",
    "ast_data": "FunctionDef name:adapt_datetimefield_value arg:self arg:value arguments arg arg If Compare Return return:no If Call If Assign Call Raise Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "BarAB",
    "source_code": "@_register_style(_style_list, name='|-|')\nclass BarAB(_Curve):\n    arrow = '|-|'\n\n    def __init__(self, widthA=1.0, angleA=0, widthB=1.0, angleB=0):\n        super().__init__(widthA=widthA, lengthA=0, angleA=angleA, widthB=widthB, lengthB=0, angleB=angleB)",
    "docstring": "An arrow with vertical bars `` at both ends.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "ClassDef name:BarAB Assign FunctionDef name:__init__ arg:self arg:widthA arg:angleA arg:widthB arg:angleB arguments arg arg arg arg arg Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_IsotonicRegressionGrad",
    "source_code": "@ops.RegisterGradient('IsotonicRegression')\ndef _IsotonicRegressionGrad(op: ops.Operation, grad_output, grad_segments):\n    del grad_segments\n    segments = op.outputs[1]\n    return _MeanAggregator(grad_output, segments)",
    "docstring": "Gradient for the isotonic regression function. Args: op: The IsotonicRegression tensorflow op. grad_output: Tensor of incoming gradients with respect to the output. grad_segments: Tensor of incoming gradients with respect to the segments. Returns: A tensor, same size as with the gradient with respect to the input.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_grad.py",
    "ast_data": "FunctionDef name:_IsotonicRegressionGrad arg:op arg:grad_output arg:grad_segments arguments arg arg arg Assign Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "manual_seed",
    "source_code": "def manual_seed(seed: int) -> None:\n    if not torch._C._has_mps:\n        return\n    seed = int(seed)\n    _get_default_mps_generator().manual_seed(seed)",
    "docstring": "Sets the seed for generating random numbers. Args: seed (int): The desired seed.",
    "type": "function",
    "file_path": "pytorch\\torch\\mps\\__init__.py",
    "ast_data": "FunctionDef name:manual_seed arg:seed arguments arg If Return return:no Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_is_none_or_undef",
    "source_code": "def _is_none_or_undef(value):\n    return value is None or isinstance(value, variables.UndefinedReturnValue) or isinstance(value, variables.Undefined)",
    "docstring": "Tests whether a value is None or undefined. AutoGraph represents undefined symbols using special objects of type Undefined or UndefinedReturnValue. Args: value: value to test Returns: Boolean",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\control_flow.py",
    "ast_data": "FunctionDef name:_is_none_or_undef arg:value arguments arg Return return:yes BoolOp Compare Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_rasterized",
    "source_code": "def set_rasterized(self, rasterized):\n    supports_rasterization = getattr(self.draw, '_supports_rasterization', False)\n    if rasterized and (not supports_rasterization):\n        _api.warn_external(f\"Rasterization of '{self}' will be ignored\")\n    self._rasterized = rasterized",
    "docstring": "Force rasterized (bitmap) drawing for vector graphics output. Rasterized drawing is not supported by all artists. If you try to enable this on an artist that does not support it, the command has no effect and a warning will be issued. This setting is ignored for pixel-based output. See also :doc:. Parameters ---------- rasterized : bool",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:set_rasterized arg:self arg:rasterized arguments arg arg Assign Call If BoolOp Call Assign"
  },
  {
    "library": "kornia",
    "name": "create_spiral_path",
    "source_code": "def create_spiral_path(cameras: PinholeCamera, rad: float, num_views: int, num_circles: int) -> PinholeCamera:\n    mean_center = cameras.translation_vector.mean(0, False).squeeze(-1)\n    device = cameras.intrinsics.device\n    t = torch.linspace(0, 2 * math.pi * num_circles, num_views, device=device)\n    cos_t = cos(t) * rad\n    sin_t = -sin(t) * rad\n    sin_05t = -sin(0.5 * t) * rad\n    translation_vector = torch.unsqueeze(mean_center, dim=0) + stack((cos_t, sin_t, sin_05t)).permute((1, 0))\n    mean_intrinsics = cameras.intrinsics.mean(0, True).repeat(num_views, 1, 1)\n    mean_extrinsics = cameras.extrinsics.mean(0, True).repeat(num_views, 1, 1)\n    extrinsics = mean_extrinsics\n    extrinsics[:, :3, 3] = translation_vector\n    height = torch.tensor([cameras.height[0]] * num_views, device=device)\n    width = torch.tensor([cameras.width[0]] * num_views, device=device)\n    return PinholeCamera(mean_intrinsics, extrinsics, height, width)",
    "docstring": "Create a PinholeCamera object with cameras that follow a spiral path. Used for novel view synthesis for face facing models. Args: cameras: Scene cameras used to train the NeRF model: PinholeCamera rad: Spiral radius: float num_views: Number of created cameras: int num_circles: Number of spiral circles: int",
    "type": "function",
    "file_path": "kornia\\kornia\\nerf\\camera_utils.py",
    "ast_data": "FunctionDef name:create_spiral_path arg:cameras arg:rad arg:num_views arg:num_circles arguments arg arg arg arg Assign Call Call Assign Assign Call Assign Call Assign Call Assign Call Assign Call Call Call Assign Call Call Assign Call Call Assign Assign Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "InsertUnnest",
    "source_code": "class InsertUnnest(list):\n\n    def __str__(self):\n        return 'UNNEST(%s)' % ', '.join(self)",
    "docstring": "Sentinel value to signal DatabaseOperations.bulk_insert_sql() that the UNNEST strategy should be used for the bulk insert.",
    "type": "class",
    "file_path": "django\\django\\db\\backends\\postgresql\\compiler.py",
    "ast_data": "ClassDef name:InsertUnnest FunctionDef name:__str__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "can_use_efficient_attention",
    "source_code": "def can_use_efficient_attention(params: SDPAParams, debug: bool=False) -> bool:\n    return torch._C._can_use_mem_efficient_attention(params, debug)",
    "docstring": "Check if efficient_attention can be utilized in scaled_dot_product_attention. Args: params: An instance of SDPAParams containing the tensors for query, key, value, an optional attention mask, dropout rate, and a flag indicating if the attention is causal. debug: Whether to logging.warn with information as to why efficient_attention could not be run. Defaults to False. Returns: True if efficient_attention can be used with the given parameters; otherwise, False. Note: This function is dependent on a CUDA-enabled build of PyTorch. It will return False in non-CUDA environments.",
    "type": "function",
    "file_path": "pytorch\\torch\\backends\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:can_use_efficient_attention arg:params arg:debug arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "all_min",
    "source_code": "def all_min(tensors):\n    return _apply_all_reduce('min', tensors)",
    "docstring": "Returns a list of tensors with the all-reduce min across . The computation is done with an all-reduce operation, so if only some of the returned tensors are evaluated then the computation will hang. Args: tensors: The input tensors across which to reduce; must be assigned to GPU devices. Returns: List of tensors, each with the minimum of the input tensors, where tensor i has the same device as .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nccl_ops.py",
    "ast_data": "FunctionDef name:all_min arg:tensors arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "set_is_last_backward",
    "source_code": "def set_is_last_backward(self, is_last_backward: bool) -> None:\n    state = self._get_fsdp_state()\n    state._state_ctx.is_last_backward = is_last_backward",
    "docstring": "Sets whether the next backward is the last one. On the last backward, FSDP waits on pending gradient reduction and clears internal data data structures for backward prefetching. This can be useful for microbatching.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fully_shard.py",
    "ast_data": "FunctionDef name:set_is_last_backward arg:self arg:is_last_backward arguments arg arg Assign Call Assign"
  },
  {
    "library": "scipy",
    "name": "boxcox_normplot",
    "source_code": "def boxcox_normplot(x, la, lb, plot=None, N=80):\n    return _normplot('boxcox', x, la, lb, plot, N)",
    "docstring": "Compute parameters for a Box-Cox normality plot, optionally show it. A Box-Cox normality plot shows graphically what the best transformation parameter is to use in to obtain a distribution that is close to normal. Parameters ---------- x : array_like Input array. la, lb : scalar The lower and upper bounds for the `boxcoxplotmatplotlib.pyplotlalbprobplotxplotboxcox_normplotprobplot` and plot it in the same plot: >>> _, maxlog = stats.boxcox(x) >>> ax.axvline(maxlog, color='r') >>> plt.show()",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_morestats.py",
    "ast_data": "FunctionDef name:boxcox_normplot arg:x arg:la arg:lb arg:plot arg:N arguments arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_process_exit_code",
    "source_code": "def get_process_exit_code(self, task_type, task_id):\n    with self._process_lock:\n        p = self._processes[task_type, task_id]\n    return p.exitcode if p else None",
    "docstring": "Returns the subprocess exit code given the task type and task id. Args: task_type: The task type. task_id: The task id. Returns: The subprocess exit code; if the subprocess has not exited yet. Raises: KeyError: If the corresponding subprocess is not found with and .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_process_runner.py",
    "ast_data": "FunctionDef name:get_process_exit_code arg:self arg:task_type arg:task_id arguments arg arg arg With Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_ignore_module",
    "source_code": "def _ignore_module(module: nn.Module, ignored_params: set[nn.Parameter], ignore_decision: dict[nn.Module, bool]) -> bool:\n    if module in ignore_decision:\n        return ignore_decision[module]\n    if len(list(module.buffers(recurse=False))) > 0:\n        ignore_decision[module] = False\n        return False\n    for _, param in module.named_parameters(recurse=False):\n        if param not in ignored_params:\n            ignore_decision[module] = False\n            return False\n    for child in list(module.children()):\n        ignore_child = _ignore_module(child, ignored_params, ignore_decision)\n        if not ignore_child:\n            ignore_decision[module] = False\n            return False\n    ignore_decision[module] = True\n    return True",
    "docstring": "Decide if it is safe to ignore a module for applying fully_shard.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fsdp_init.py",
    "ast_data": "FunctionDef name:_ignore_module arg:module arg:ignored_params arg:ignore_decision arguments arg arg arg If Compare Return return:yes If Compare Call Call Call Assign Return return:yes For Call If Compare Assign Return return:yes For Call Call Assign Call If Assign Return return:yes Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "JPEGGenerator",
    "source_code": "class JPEGGenerator(RandomGeneratorBase):\n\n    def __init__(self, jpeg_quality: Union[Tensor, float, Tuple[float, float], List[float]]=50.0) -> None:\n        super().__init__()\n        self.jpeg_quality: Union[Tensor, float, Tuple[float, float], List[float]] = jpeg_quality\n\n    def __repr__(self) -> str:\n        return f'RandomJPEG quality={self.jpeg_quality}'\n\n    def make_samplers(self, device: torch.device, dtype: torch.dtype) -> None:\n        jpeg_quality = _range_bound(self.jpeg_quality, 'jpeg_quality', center=50.0, bounds=(1, 100), device=device, dtype=dtype)\n        _joint_range_check(jpeg_quality, 'jpeg_quality', (1, 100))\n        self.jpeg_quality_sampler = UniformDistribution(jpeg_quality[0], jpeg_quality[1], validate_args=False)\n\n    def forward(self, batch_shape: Tuple[int, ...], same_on_batch: bool=False) -> Dict[str, Tensor]:\n        batch_size = batch_shape[0]\n        _common_param_check(batch_size, same_on_batch)\n        _device, _dtype = _extract_device_dtype([self.jpeg_quality])\n        jpeg_quality_value = _adapted_rsampling((batch_size,), self.jpeg_quality_sampler, same_on_batch)\n        return {'jpeg_quality': jpeg_quality_value.to(device=_device, dtype=_dtype)}",
    "docstring": "Generate random JPEG augmentation parameters for a batch. Args: jpeg_quality: The RandomJPEG quality to apply Returns: A dict of parameters to be passed for transformation. - jpeg_quality: element-wise contrast factors with a shape of (B,). Note: The generated random numbers are not reproducible across different devices and dtypes. By default, the parameters will be generated on CPU in float32. This can be changed by calling ``.",
    "type": "class",
    "file_path": "kornia\\kornia\\augmentation\\random_generator\\_2d\\jpeg.py",
    "ast_data": "ClassDef name:JPEGGenerator FunctionDef name:__init__ arg:self arg:jpeg_quality arguments arg arg Call Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:make_samplers arg:self arg:device arg:dtype arguments arg arg arg Assign Call Call Assign Call FunctionDef name:forward arg:self arg:batch_shape arg:same_on_batch arguments arg arg arg Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, coverage, root=None):\n    self.coverage = coverage\n    if root is None:\n        root = os.path.dirname(cherrypy.__file__)\n    self.root = root",
    "docstring": "Initialize the coverage stats application.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\covercp.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:coverage arg:root arguments arg arg arg Assign If Compare Assign Call Assign"
  },
  {
    "library": "cryptography",
    "name": "verify",
    "source_code": "@abc.abstractmethod\ndef verify(self, signature: Buffer, data: Buffer, algorithm: asym_utils.Prehashed | hashes.HashAlgorithm) -> None:\n    pass",
    "docstring": "Verifies the signature of the data.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\dsa.py",
    "ast_data": "FunctionDef name:verify arg:self arg:signature arg:data arg:algorithm arguments arg arg arg arg"
  },
  {
    "library": "tensorflow",
    "name": "GetProtos",
    "source_code": "def GetProtos(self):\n    return self._protos",
    "docstring": "Return the list of protos stored.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\tools\\api\\lib\\python_object_to_proto_visitor.py",
    "ast_data": "FunctionDef name:GetProtos arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "TanhTransform",
    "source_code": "class TanhTransform(Transform):\n    domain = constraints.real\n    codomain = constraints.interval(-1.0, 1.0)\n    bijective = True\n    sign = +1\n\n    def __eq__(self, other):\n        return isinstance(other, TanhTransform)\n\n    def _call(self, x):\n        return x.tanh()\n\n    def _inverse(self, y):\n        return torch.atanh(y)\n\n    def log_abs_det_jacobian(self, x, y):\n        return 2.0 * (math.log(2.0) - x - softplus(-2.0 * x))",
    "docstring": "Transform via the mapping :math:. It is equivalent to .. code-block:: python ComposeTransform( [ AffineTransform(0.0, 2.0), SigmoidTransform(), AffineTransform(-1.0, 2.0), ] ) However this might not be numerically stable, thus it is recommended to use instead. Note that one should use when it comes to values.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributions\\transforms.py",
    "ast_data": "ClassDef name:TanhTransform Assign Assign Call Assign Assign FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes Call FunctionDef name:_call arg:self arg:x arguments arg arg Return return:yes Call FunctionDef name:_inverse arg:self arg:y arguments arg arg Return return:yes Call FunctionDef name:log_abs_det_jacobian arg:self arg:x arg:y arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_parse_kernel_fn_code",
    "source_code": "def _parse_kernel_fn_code(kernel_module_code: str) -> str:\n    from .codecache import PyCodeCache\n    from .wrapper_benchmark import get_triton_kernel\n    mod = PyCodeCache.load(kernel_module_code)\n    kernel = get_triton_kernel(mod)\n    return inspect.getsource(kernel.fn.fn)",
    "docstring": "The kernel_module_code is the python module that contains kernel function code. kernel function is the proper triton kernel function annotated with @triton.jit",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\metrics.py",
    "ast_data": "FunctionDef name:_parse_kernel_fn_code arg:kernel_module_code arguments arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_chunk_sharding_params",
    "source_code": "def get_chunk_sharding_params(sharding_dim_size, world_size, spec, rank):\n    split_size = get_split_size(sharding_dim_size, world_size)\n    current_offsets = 0\n    start_pos = current_offsets\n    for idx, placement in enumerate(spec.placements):\n        chunk_size = get_chunked_dim_size(sharding_dim_size, split_size, idx)\n        if rank == placement.rank():\n            start_pos = current_offsets\n            break\n        current_offsets += chunk_size\n    return (start_pos, chunk_size)",
    "docstring": "Generate the start pos and offset length for the current rank for chunk sharding. Args: sharding_dim_size(int): The dimension length which we shard on. world_size(int): number of ranks. spec (:class:): sharding spec. rank(int): # of cuda process. Returns: start_pos(int): start position of sharded tensor on the given rank. chunk_size(int): chunk size of sharded tensor on the given rank.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharding_spec\\_internals.py",
    "ast_data": "FunctionDef name:get_chunk_sharding_params arg:sharding_dim_size arg:world_size arg:spec arg:rank arguments arg arg arg arg Assign Call Assign Assign For Call Assign Call If Compare Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "nth_element",
    "source_code": "def nth_element(input, n, reverse=False, name=None):\n    return gen_nn_ops.nth_element(input, n, reverse=reverse, name=name)",
    "docstring": "Finds values of the -th smallest value for the last dimension. Note that n is zero-indexed. If the input is a vector (rank-1), finds the entries which is the nth-smallest value in the vector and outputs their values as scalar tensor. For matrices (resp. higher rank input), computes the entries which is the nth-smallest value in each row (resp. vector along the last dimension). Thus, values.shape = input.shape[:-1] Args: input: 1-D or higher with last dimension at least . n: A of type . 0-D. Position of sorted vector to select along the last dimension (along each row for matrices). Valid range of n is reverse: An optional . Defaults to . When set to True, find the nth-largest value in the vector and vice versa. name: A name for the operation (optional). Returns: A . Has the same type as . The -th order statistic along each last dimensional slice.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py",
    "ast_data": "FunctionDef name:nth_element arg:input arg:n arg:reverse arg:name arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "cudagraphs_inner",
    "source_code": "def cudagraphs_inner(model, inputs, copy_outputs=True, copy_inputs=True):\n    assert isinstance(inputs, (list, tuple))\n    if copy_inputs:\n        static_inputs = [torch.zeros_like(x) for x in inputs]\n    else:\n        static_inputs = list(inputs)\n    torch.cuda.synchronize()\n    stream = torch.cuda.Stream()\n    stream.wait_stream(torch.cuda.current_stream())\n    with torch.cuda.stream(stream):\n        model(*inputs)\n    stream.synchronize()\n    torch.cuda.current_stream().wait_stream(stream)\n    torch.cuda.synchronize()\n    graph = torch.cuda.CUDAGraph()\n    with torch.cuda.graph(graph, stream=stream):\n        static_outputs = model(*static_inputs)\n    if not isinstance(static_outputs, (list, tuple)):\n        static_outputs = (static_outputs,)\n\n    def run(*new_inputs):\n        assert len(static_inputs) == len(new_inputs)\n        if copy_inputs:\n            for dst, src in zip(static_inputs, new_inputs):\n                dst.copy_(src)\n        graph.replay()\n        if copy_outputs:\n            return [x.clone() for x in static_outputs]\n        else:\n            return static_outputs\n    return run",
    "docstring": "This isn't registered as a backend, but is used in some benchmarks",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\backends\\cudagraphs.py",
    "ast_data": "FunctionDef name:cudagraphs_inner arg:model arg:inputs arg:copy_outputs arg:copy_inputs arguments arg arg arg arg Call If Assign Call Assign Call Call Assign Call Call Call With Call Call Call Call Call Call Assign Call With Call Assign Call If Call Assign FunctionDef name:run arguments arg Compare Call Call If For Call Call Call If Return return:yes Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "size",
    "source_code": "@tf_export(v1=['size'])\n@dispatch.add_dispatch_support\ndef size(input, name=None, out_type=None):\n    if out_type is None:\n        if flags.config().tf_shape_default_int64.value():\n            out_type = dtypes.int64\n        else:\n            out_type = dtypes.int32\n    return size_internal(input, name, optimize=True, out_type=out_type)",
    "docstring": "Returns the size of a tensor. Returns a 0-D representing the number of elements in of type . Defaults to tf.int32. For example: Args: input: A or . name: A name for the operation (optional). out_type: (Optional) The specified non-quantized numeric output type of the operation. Defaults to . (Note: there is an experimental flag, that changes the default to . This is an unsupported, experimental setting that causes known breakages.) Returns: A of type . Defaults to . @compatibility(numpy) Equivalent to np.size() @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:size arg:input arg:name arg:out_type arguments arg arg arg If Compare If Call Call Assign Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, graph, fetches, feeds, feed_handles=None):\n    with graph.as_default():\n        self._fetch_mapper = _FetchMapper.for_fetch(fetches)\n    self._fetches = []\n    self._targets = []\n    self._feeds = feeds\n    self._feed_handles = feed_handles or {}\n    self._ops = []\n    self._fetch_handles = {}\n    for fetch in self._fetch_mapper.unique_fetches():\n        if isinstance(fetch, ops.Operation):\n            self._assert_fetchable(graph, fetch)\n            self._targets.append(fetch)\n            self._ops.append(True)\n        else:\n            self._assert_fetchable(graph, fetch.op)\n            self._fetches.append(fetch)\n            self._ops.append(False)\n        if isinstance(fetch, tensor.Tensor) and (fetch.op.type == 'GetSessionHandle' or fetch.op.type == 'GetSessionHandleV2'):\n            self._fetch_handles[fetch.ref()] = fetch.op.inputs[0].dtype\n    self._final_fetches = [x for x in self._fetches if x.ref() not in feeds]",
    "docstring": "Creates a fetch handler. Args: graph: Graph of the fetches. Used to check for fetchability and to convert all fetches to tensors or ops as needed. fetches: An arbitrary fetch structure: singleton, list, tuple, namedtuple, or dict. feeds: A feed dict where keys are Tensors. feed_handles: A dict from feed Tensors to TensorHandle objects used as direct feeds.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\session.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:graph arg:fetches arg:feeds arg:feed_handles arguments arg arg arg arg arg With Call Assign Call Assign Assign Assign Assign BoolOp Assign Assign For Call If Call Call Call Call Call Call Call If BoolOp Call BoolOp Compare Compare Assign Call Assign Compare Call"
  },
  {
    "library": "pytorch",
    "name": "_step_microbatches",
    "source_code": "@abstractmethod\ndef _step_microbatches(self, arg_mbs: Optional[list]=None, kwarg_mbs: Optional[list]=None, target_mbs: Optional[list]=None, losses: Optional[list]=None):\n    raise NotImplementedError",
    "docstring": "Run one iteration of the pipeline schedule with list of microbatches. Will go through all the microbatches according to the schedule implementation. Args: microbatches: list of microbatch args.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\schedules.py",
    "ast_data": "FunctionDef name:_step_microbatches arg:self arg:arg_mbs arg:kwarg_mbs arg:target_mbs arg:losses arguments arg arg arg arg arg Raise"
  },
  {
    "library": "scikit-learn",
    "name": "get_config",
    "source_code": "def get_config():\n    return _get_threadlocal_config().copy()",
    "docstring": "Retrieve current values for configuration set by :func:. Returns ------- config : dict Keys are parameter names that can be passed to :func:. See Also -------- config_context : Context manager for global scikit-learn configuration. set_config : Set global scikit-learn configuration. Examples -------- >>> import sklearn >>> config = sklearn.get_config() >>> config.keys() dict_keys([...])",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\_config.py",
    "ast_data": "FunctionDef name:get_config arguments Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "FixedShardsPartitioner",
    "source_code": "@tf_export('distribute.experimental.partitioners.FixedShardsPartitioner', v1=[])\nclass FixedShardsPartitioner(Partitioner):\n\n    def __init__(self, num_shards):\n        self._num_shards = num_shards\n\n    def __call__(self, shape, dtype, axis=0):\n        del dtype\n        result = [1] * len(shape)\n        result[axis] = min(self._num_shards, shape.dims[axis].value)\n        return result",
    "docstring": "Partitioner that allocates a fixed number of shards. Examples: >>> # standalone usage: >>> partitioner = FixedShardsPartitioner(num_shards=2) >>> partitions = partitioner(tf.TensorShape([10, 3]), tf.float32) >>> [2, 1] >>> >>> # use in ParameterServerStrategy >>> # strategy = tf.distribute.experimental.ParameterServerStrategy( >>> # cluster_resolver=cluster_resolver, variable_partitioner=partitioner)",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\sharded_variable.py",
    "ast_data": "ClassDef name:FixedShardsPartitioner FunctionDef name:__init__ arg:self arg:num_shards arguments arg arg Assign FunctionDef name:__call__ arg:self arg:shape arg:dtype arg:axis arguments arg arg arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_linewidth",
    "source_code": "def set_linewidth(self, w):\n    w = float(w)\n    if self._linewidth != w:\n        self.stale = True\n    self._linewidth = w\n    self._dash_pattern = _scale_dashes(*self._unscaled_dash_pattern, w)",
    "docstring": "Set the line width in points. Parameters ---------- w : float Line width, in points.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:set_linewidth arg:self arg:w arguments arg arg Assign Call If Compare Assign Assign Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "set_in_layout",
    "source_code": "def set_in_layout(self, in_layout):\n    self._in_layout = in_layout",
    "docstring": "Set if artist is to be included in layout calculations, E.g. :ref:, , and ``. Parameters ---------- in_layout : bool",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:set_in_layout arg:self arg:in_layout arguments arg arg Assign"
  },
  {
    "library": "pytorch",
    "name": "SerializableAOTDispatchCompiler",
    "source_code": "class SerializableAOTDispatchCompiler(AOTDispatchCompiler):\n\n    def __init__(self, output_code_ty: type[TOutputCode], compiler_fn: Callable[[torch.fx.GraphModule, Sequence[InputType]], TOutputCode]):\n        self.output_code_ty = output_code_ty\n        self.compiler_fn = compiler_fn\n\n    def __call__(self, gm: torch.fx.GraphModule, example_inputs: Sequence[InputType]) -> OutputCode:\n        return self.compiler_fn(gm, example_inputs)",
    "docstring": "Represents an AOTDispatchCompiler that returns an OutputCode, and is therefore cacheable. SerializableAOTDispatchCompiler always return an OutputCode. A _CompileFxCallable usually gets converted into an AOTDispatchCompiler after binding all of the kwargs in _CompileFxKwargs.",
    "type": "class",
    "file_path": "pytorch\\torch\\_functorch\\aot_autograd.py",
    "ast_data": "ClassDef name:SerializableAOTDispatchCompiler FunctionDef name:__init__ arg:self arg:output_code_ty arg:compiler_fn arguments arg arg arg Assign Assign FunctionDef name:__call__ arg:self arg:gm arg:example_inputs arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_join_rocm_home",
    "source_code": "def _join_rocm_home(*paths) -> str:\n    if ROCM_HOME is None:\n        raise OSError('ROCM_HOME environment variable is not set. Please set it to your ROCm install root.')\n    return os.path.join(ROCM_HOME, *paths)",
    "docstring": "Join paths with ROCM_HOME, or raises an error if it ROCM_HOME is not set. This is basically a lazy way of raising an error for missing $ROCM_HOME only once we need to get any ROCm-specific path.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\cpp_extension.py",
    "ast_data": "FunctionDef name:_join_rocm_home arguments arg If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "flatten",
    "source_code": "def flatten(inputs, name=None, data_format='channels_last'):\n    warnings.warn('`tf.layers.flatten` is deprecated and will be removed in a future version. Please use `tf.keras.layers.Flatten` instead.')\n    layer = Flatten(name=name, data_format=data_format)\n    return layer.apply(inputs)",
    "docstring": "Flattens an input tensor while preserving the batch axis (axis 0). Args: inputs: Tensor input. name: The name of the layer (string). data_format: A string, one of (default) or . The ordering of the dimensions in the inputs. corresponds to inputs with shape while corresponds to inputs with shape . Returns: Reshaped tensor. Examples:",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\legacy_tf_layers\\core.py",
    "ast_data": "FunctionDef name:flatten arg:inputs arg:name arg:data_format arguments arg arg arg Call Assign Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "handle",
    "source_code": "def handle(self, record):\n    pass",
    "docstring": "Handle a log record doing no-op.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cplogging.py",
    "ast_data": "FunctionDef name:handle arg:self arg:record arguments arg arg"
  },
  {
    "library": "tensorflow",
    "name": "from_sparse",
    "source_code": "@classmethod\n@dispatch.add_dispatch_support\ndef from_sparse(cls, st_input, name=None, row_splits_dtype=dtypes.int64):\n    row_splits_dtype = dtypes.as_dtype(row_splits_dtype)\n    if not sparse_tensor.is_sparse(st_input):\n        raise TypeError(f'Argument `st_input` must be of type SparseTensor, but is of type {type(st_input).__name__}.')\n    with ops.name_scope(name, 'RaggedFromSparse', [st_input]):\n        st_input = sparse_tensor.convert_to_tensor_or_sparse_tensor(st_input, name='st_input')\n        if st_input.dense_shape.shape.ndims is None:\n            static_rank_from_dense_shape = None\n        else:\n            static_rank_from_dense_shape = st_input.dense_shape.shape.dims[0].value\n        if st_input.indices.shape.ndims is None:\n            static_rank_from_indices = None\n        else:\n            static_rank_from_indices = st_input.indices.shape.dims[1].value\n        if static_rank_from_dense_shape != 2 and static_rank_from_indices != 2:\n            raise ValueError('rank(st_input) must be 2.')\n        with ops.control_dependencies(_assert_sparse_indices_are_ragged_right(st_input.indices)):\n            segment_ids = math_ops.cast(st_input.indices[:, 0], row_splits_dtype)\n            num_segments = math_ops.cast(st_input.dense_shape[0], row_splits_dtype)\n            return cls.from_value_rowids(st_input.values, segment_ids, num_segments, validate=False)",
    "docstring": "Converts a 2D to a . Each row of the will contain the explicit values from the same row in . must be ragged-right. If not it is not ragged-right, then an error will be generated. Example: >>> indices = [[0, 0], [0, 1], [0, 2], [1, 0], [3, 0]] >>> st = tf.sparse.SparseTensor(indices=indices, ... values=[1, 2, 3, 4, 5], ... dense_shape=[4, 3]) >>> tf.RaggedTensor.from_sparse(st).to_list() [[1, 2, 3], [4], [], [5]] Currently, only two-dimensional are supported. Args: st_input: The sparse tensor to convert. Must have rank 2. name: A name prefix for the returned tensors (optional). row_splits_dtype: for the returned 's tensor. One of or . Returns: A with the same values as . . . Raises: ValueError: If the number of dimensions in is not known statically, or is not two.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:from_sparse arg:cls arg:st_input arg:name arg:row_splits_dtype arguments arg arg arg arg Assign Call If Call Raise Call Call With Call Assign Call If Compare Assign Assign If Compare Assign Assign If BoolOp Compare Compare Raise Call With Call Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "execute_from_command_line",
    "source_code": "def execute_from_command_line(argv=None):\n    utility = ManagementUtility(argv)\n    utility.execute()",
    "docstring": "Run a ManagementUtility.",
    "type": "function",
    "file_path": "django\\django\\core\\management\\__init__.py",
    "ast_data": "FunctionDef name:execute_from_command_line arg:argv arguments arg Assign Call Call"
  },
  {
    "library": "pandas",
    "name": "_nanquantile",
    "source_code": "def _nanquantile(values: np.ndarray, qs: npt.NDArray[np.float64], *, na_value, mask: npt.NDArray[np.bool_], interpolation: str):\n    if values.dtype.kind in 'mM':\n        result = _nanquantile(values.view('i8'), qs=qs, na_value=na_value.view('i8'), mask=mask, interpolation=interpolation)\n        return result.astype(values.dtype)\n    if mask.any():\n        assert mask.shape == values.shape\n        result = [_nanquantile_1d(val, m, qs, na_value, interpolation=interpolation) for val, m in zip(list(values), list(mask))]\n        if values.dtype.kind == 'f':\n            result = np.asarray(result, dtype=values.dtype).T\n        else:\n            result = np.asarray(result).T\n            if result.dtype != values.dtype and (not mask.all()) and (result == result.astype(values.dtype, copy=False)).all():\n                result = result.astype(values.dtype, copy=False)\n        return result\n    else:\n        return np.quantile(values, qs, axis=1, method=interpolation)",
    "docstring": "Wrapper for np.quantile that skips missing values. Parameters ---------- values : np.ndarray[ndim=2] over which to find quantiles qs : np.ndarray[float64] of quantile indices to find na_value : scalar value to return for empty or all-null values mask : np.ndarray[bool] locations in values that should be considered missing interpolation : str Returns ------- quantiles : scalar or array",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\array_algos\\quantile.py",
    "ast_data": "FunctionDef name:_nanquantile arg:values arg:qs arguments arg arg arg arg arg If Compare Assign Call Call Call Return return:yes Call If Call Compare Assign Call Call Call Call If Compare Assign Call Assign Call If BoolOp Compare Call Call Compare Call Assign Call Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "update_hash_with_array",
    "source_code": "def update_hash_with_array(hash_value, int_array):\n    if int_array is not None:\n        for i in int_array:\n            hash_value = update_hash_with_primitive_value(hash_value, i)\n    return hash_value",
    "docstring": "Update the hash value using a TFLite int array. Args: hash_value (int): The current hash value. int_array: A TFLite int array to incorporate into the hash. Returns: int: The updated hash value.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\util.py",
    "ast_data": "FunctionDef name:update_hash_with_array arg:hash_value arg:int_array arguments arg arg If Compare For Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "to_dense",
    "source_code": "def to_dense(self) -> np.ndarray:\n    return np.asarray(self, dtype=self.sp_values.dtype)",
    "docstring": "Convert SparseArray to a NumPy array. Returns ------- arr : NumPy array",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\sparse\\array.py",
    "ast_data": "FunctionDef name:to_dense arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "is_timedelta64_ns_dtype",
    "source_code": "def is_timedelta64_ns_dtype(arr_or_dtype) -> bool:\n    return _is_dtype(arr_or_dtype, lambda dtype: dtype == TD64NS_DTYPE)",
    "docstring": "Check whether the provided array or dtype is of the timedelta64[ns] dtype. This is a very specific dtype, so generic ones like will return False if passed into this function. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of the timedelta64[ns] dtype. See Also -------- api.types.is_timedelta64_dtype: Check whether an array-like or dtype is of the timedelta64 dtype. Examples -------- >>> from pandas.core.dtypes.common import is_timedelta64_ns_dtype >>> is_timedelta64_ns_dtype(np.dtype(\"m8[ns]\")) True >>> is_timedelta64_ns_dtype(np.dtype(\"m8[ps]\")) # Wrong frequency False >>> is_timedelta64_ns_dtype(np.array([1, 2], dtype=\"m8[ns]\")) True >>> is_timedelta64_ns_dtype(np.array([1, 2], dtype=np.timedelta64)) False",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\common.py",
    "ast_data": "FunctionDef name:is_timedelta64_ns_dtype arg:arr_or_dtype arguments arg Return return:yes Call arguments arg Compare"
  },
  {
    "library": "tensorflow",
    "name": "get_embedding_table_size",
    "source_code": "def get_embedding_table_size(self):\n    return (self.categorical_column._num_buckets, self.dimension)",
    "docstring": "Returns num_ids and width.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\feature_column_v2.py",
    "ast_data": "FunctionDef name:get_embedding_table_size arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "prob_to_junctions",
    "source_code": "def prob_to_junctions(prob: Tensor, dist: float, prob_thresh: float=0.01, top_k: int=0) -> Tensor:\n    junctions = stack(where(prob >= prob_thresh), -1).float()\n    if len(junctions) == 0:\n        return junctions\n    boxes = concatenate([junctions - dist / 2, junctions + dist / 2], 1)\n    scores = prob[prob >= prob_thresh]\n    remainings = nms(boxes, scores, 0.001)\n    junctions = junctions[remainings]\n    if top_k > 0:\n        k = min(len(junctions), top_k)\n        junctions = junctions[:k]\n    return junctions",
    "docstring": "Extract junctions from a probability map, apply NMS, and extract the top k candidates.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\sold2\\sold2_detector.py",
    "ast_data": "FunctionDef name:prob_to_junctions arg:prob arg:dist arg:prob_thresh arg:top_k arguments arg arg arg arg Assign Call Call Call Compare If Compare Call Return return:yes Assign Call Assign Compare Assign Call Assign If Compare Assign Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "constant",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef constant(value, dtype=None, shape=None, name=None):\n    if dtype is None:\n        dtype = floatx()\n    return constant_op.constant(value, dtype=dtype, shape=shape, name=name)",
    "docstring": "Creates a constant tensor. Args: value: A constant value (or list) dtype: The type of the elements of the resulting tensor. shape: Optional dimensions of resulting tensor. name: Optional name for the tensor. Returns: A Constant Tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:constant arg:value arg:dtype arg:shape arg:name arguments arg arg arg arg If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "tf_buffer",
    "source_code": "@tf_contextlib.contextmanager\ndef tf_buffer(data=None):\n    if data:\n        buf = c_api.TF_NewBufferFromString(compat.as_bytes(data))\n    else:\n        buf = c_api.TF_NewBuffer()\n    try:\n        yield buf\n    finally:\n        c_api.TF_DeleteBuffer(buf)",
    "docstring": "Context manager that creates and deletes TF_Buffer. Example usage: with tf_buffer() as buf: # get serialized graph def into buf ... proto_data = c_api.TF_GetBuffer(buf) graph_def.ParseFromString(compat.as_bytes(proto_data)) # buf has been deleted with tf_buffer(some_string) as buf: c_api.TF_SomeFunction(buf) # buf has been deleted Args: data: An optional , , or object. If not None, the yielded buffer will contain this data. Yields: Created TF_Buffer",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\c_api_util.py",
    "ast_data": "FunctionDef name:tf_buffer arg:data arguments arg If Assign Call Call Assign Call Try Call"
  },
  {
    "library": "tensorflow",
    "name": "TFDataServiceConfig",
    "source_code": "@dataclasses.dataclass\nclass TFDataServiceConfig:\n    dispatcher_address: str\n    job_name: str",
    "docstring": "Specifies the tf.data service configuration to use. Attributes: dispatcher_address: a string specifying the address of the tf.data service dispatcher server. job_name: a non-empty string identifying the shared job that will be created on tf.data service to process this dataset.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\input_util.py",
    "ast_data": "ClassDef name:TFDataServiceConfig"
  },
  {
    "library": "matplotlib",
    "name": "get_data_3d",
    "source_code": "def get_data_3d(self):\n    return self._verts3d",
    "docstring": "Get the current data Returns ------- verts3d : length-3 tuple or array-like The current data as a tuple or array-like.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py",
    "ast_data": "FunctionDef name:get_data_3d arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_class_properties",
    "source_code": "def get_class_properties(cls, self_name):\n    props = inspect.getmembers(cls, predicate=lambda m: isinstance(m, property))\n    unused_properties = getattr(cls, '__jit_unused_properties__', [])\n    properties = []\n    for prop in props:\n        if prop[0] not in unused_properties and (not should_drop(prop[1].fget)):\n            getter = get_jit_def(prop[1].fget, f'__{prop[0]}_getter', self_name=self_name)\n            setter = get_jit_def(prop[1].fset, f'__{prop[0]}_setter', self_name=self_name) if prop[1].fset else None\n            properties.append(Property(getter.range(), Ident(getter.range(), prop[0]), getter, setter))\n    return properties",
    "docstring": "Get a list of Property objects representing the properties of a class. Args: cls: The class to get properties of. self_name: The name of the class that the properties should belong to. Returns: A list of Property objects corresponding to the properties of cls. Property here refers to the subclass of TreeView.",
    "type": "function",
    "file_path": "pytorch\\torch\\jit\\frontend.py",
    "ast_data": "FunctionDef name:get_class_properties arg:cls arg:self_name arguments arg arg Assign Call arguments arg Call Assign Call Assign For If BoolOp Compare Call Assign Call Assign Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_Upgrade0To1",
    "source_code": "def _Upgrade0To1(self, data):\n    subgraph = {}\n    for key_to_promote in ['tensors', 'operators', 'inputs', 'outputs']:\n        subgraph[key_to_promote] = data[key_to_promote]\n        del data[key_to_promote]\n    data['subgraphs'] = [subgraph]",
    "docstring": "Upgrade data from Version 0 to Version 1. Changes: Added subgraphs (which contains a subset of formally global entries). Args: data: Dictionary representing the TensorFlow lite data to be upgraded. This will be modified in-place to be an upgraded version.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\schema\\upgrade_schema.py",
    "ast_data": "FunctionDef name:_Upgrade0To1 arg:self arg:data arguments arg arg Assign For Assign Assign"
  },
  {
    "library": "cryptography",
    "name": "key_size",
    "source_code": "@property\n@abc.abstractmethod\ndef key_size(self) -> int:\n    pass",
    "docstring": "The bit length of the prime modulus.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\dsa.py",
    "ast_data": "FunctionDef name:key_size arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "_implements_test_batch_hooks",
    "source_code": "def _implements_test_batch_hooks(self):\n    return not generic_utils.is_default(self.on_test_batch_begin) or not generic_utils.is_default(self.on_test_batch_end)",
    "docstring": "Determines if this Callback should be called for each test batch.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:_implements_test_batch_hooks arg:self arguments arg Return return:yes BoolOp Call Call"
  },
  {
    "library": "tensorflow",
    "name": "apply_gradients",
    "source_code": "def apply_gradients(self, grads_and_vars, global_step=None, name=None):\n    summed_grads_and_vars = []\n    for grad, var in grads_and_vars:\n        if grad is None:\n            summed_grads_and_vars.append((grad, var))\n        else:\n            with ops.colocate_with(grad):\n                summed_grads_and_vars.append((tpu_ops.cross_replica_sum(grad, self._group_assignment), var))\n    return self._opt.apply_gradients(summed_grads_and_vars, global_step, name)",
    "docstring": "Apply gradients to variables. Calls tpu_ops.cross_replica_sum() to sum gradient contributions across replicas, and then applies the real optimizer. Args: grads_and_vars: List of (gradient, variable) pairs as returned by compute_gradients(). global_step: Optional Variable to increment by one after the variables have been updated. name: Optional name for the returned operation. Default to the name passed to the Optimizer constructor. Returns: An that applies the gradients. If was not None, that operation also increments . Raises: ValueError: If the grads_and_vars is malformed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_optimizer.py",
    "ast_data": "FunctionDef name:apply_gradients arg:self arg:grads_and_vars arg:global_step arg:name arguments arg arg arg arg Assign For If Compare Call With Call Call Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "_copy_except__document",
    "source_code": "def _copy_except__document(el: Element) -> Element:\n    newnode = object.__new__(el.__class__)\n    newnode.children = []\n    newnode.rawsource = el.rawsource\n    newnode.tagname = el.tagname\n    newnode.attributes = {k: v if k not in {'ids', 'classes', 'names', 'dupnames', 'backrefs'} else v[:] for k, v in el.attributes.items()}\n    newnode.line = el.line\n    newnode.source = el.source\n    return newnode",
    "docstring": "Monkey-patch to not copy the `` attribute. See:",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\nodes.py",
    "ast_data": "FunctionDef name:_copy_except__document arg:el arguments arg Assign Call Assign Assign Assign Assign Compare Call Assign Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_VectorJacWrapper",
    "source_code": "class _VectorJacWrapper:\n\n    def __init__(self, jac, fun=None, finite_diff_options=None, sparse_jacobian=None):\n        self.fun = fun\n        self.jac = jac\n        self.finite_diff_options = finite_diff_options\n        self.sparse_jacobian = sparse_jacobian\n        self.njev = 0\n        self.nfev = 0\n\n    def __call__(self, x, f0=None, **kwds):\n        if callable(self.jac):\n            J = self.jac(x)\n            self.njev += 1\n        elif self.jac in FD_METHODS:\n            J, dct = approx_derivative(self.fun, x, f0=f0, **self.finite_diff_options)\n            self.nfev += dct['nfev']\n        if self.sparse_jacobian:\n            return sps.csr_array(J)\n        elif sps.issparse(J):\n            return J.toarray()\n        elif isinstance(J, LinearOperator):\n            return J\n        else:\n            return np.atleast_2d(J)",
    "docstring": "Wrapper class for Jacobian calculation",
    "type": "class",
    "file_path": "scipy\\scipy\\optimize\\_differentiable_functions.py",
    "ast_data": "ClassDef name:_VectorJacWrapper FunctionDef name:__init__ arg:self arg:jac arg:fun arg:finite_diff_options arg:sparse_jacobian arguments arg arg arg arg arg Assign Assign Assign Assign Assign Assign FunctionDef name:__call__ arg:self arg:x arg:f0 arguments arg arg arg arg If Call Assign Call If Compare Assign Call If Return return:yes Call If Call Return return:yes Call If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "DJBFFTNotFoundError",
    "source_code": "class DJBFFTNotFoundError(NotFoundError):\n    pass",
    "docstring": "DJBFFT ( libraries not found. Directories to search for the libraries can be specified in the numpy/distutils/site.cfg file (section [djbfft]) or by setting the DJBFFT environment variable.",
    "type": "class",
    "file_path": "numpy\\numpy\\distutils\\system_info.py",
    "ast_data": "ClassDef name:DJBFFTNotFoundError"
  },
  {
    "library": "django",
    "name": "units",
    "source_code": "@property\ndef units(self):\n    if self.projected or self.local:\n        return (self.linear_units, self.linear_name)\n    elif self.geographic:\n        return (self.angular_units, self.angular_name)\n    else:\n        return (None, None)",
    "docstring": "Return a tuple of the units and the name.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\base\\models.py",
    "ast_data": "FunctionDef name:units arg:self arguments arg If BoolOp Return return:yes If Return return:yes Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "has_weights",
    "source_code": "def has_weights(obj):\n    has_weight = hasattr(type(obj), 'trainable_weights') and hasattr(type(obj), 'non_trainable_weights')\n    return has_weight and (not isinstance(obj, type))",
    "docstring": "Implicit check for Layer-like objects.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\layer_utils.py",
    "ast_data": "FunctionDef name:has_weights arg:obj arguments arg Assign BoolOp Call Call Call Call Return return:yes BoolOp Call"
  },
  {
    "library": "scipy",
    "name": "Deb01",
    "source_code": "class Deb01(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-1.0] * self.N, [1.0] * self.N))\n        self.global_optimum = [[0.3, -0.3]]\n        self.fglob = -1.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return -(1.0 / self.N) * sum(sin(5 * pi * x) ** 6.0)",
    "docstring": "Deb 1 objective function. This class defines the Deb 1 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Deb01}}(x) = - \\frac{1}{N} \\sum_{i=1}^n \\sin^6(5 \\pi x_i) Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math:. The number of global minima is :math: that are evenly spaced in the function landscape, where :math: represents the dimension of the problem. .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_D.py",
    "ast_data": "ClassDef name:Deb01 Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "Methods0DBoolComplex",
    "source_code": "class Methods0DBoolComplex(Benchmark):\n    params = [['__bool__', '__complex__'], TYPES1]\n    param_names = ['methods', 'npdtypes']\n    timeout = 10\n\n    def setup(self, methname, npdtypes):\n        self.xarg = np.array(3, dtype=npdtypes)\n\n    def time_ndarray__0d__(self, methname, npdtypes):\n        meth = getattr(self.xarg, methname)\n        meth()",
    "docstring": "Zero dimension array methods",
    "type": "class",
    "file_path": "numpy\\benchmarks\\benchmarks\\bench_ufunc.py",
    "ast_data": "ClassDef name:Methods0DBoolComplex Assign Assign Assign FunctionDef name:setup arg:self arg:methname arg:npdtypes arguments arg arg arg Assign Call FunctionDef name:time_ndarray__0d__ arg:self arg:methname arg:npdtypes arguments arg arg arg Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "clone_model_on_replicas",
    "source_code": "def clone_model_on_replicas(model, strategy, mode, inputs=None, targets=None):\n    with backend.get_graph().as_default(), strategy.scope():\n        distributed_model = strategy.extended.call_for_each_replica(_clone_and_build_model, args=(model, mode, inputs, targets))\n        set_distributed_model(model, mode, distributed_model)\n    if mode == ModeKeys.TRAIN:\n        model._make_callback_model(distributed_model)",
    "docstring": "Create a cloned model on each replica.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_training_utils_v1.py",
    "ast_data": "FunctionDef name:clone_model_on_replicas arg:model arg:strategy arg:mode arg:inputs arg:targets arguments arg arg arg arg arg With Call Call Call Assign Call Call If Compare Call"
  },
  {
    "library": "pytorch",
    "name": "conv3x3",
    "source_code": "def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n    return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation)",
    "docstring": "3x3 convolution with padding",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\functional_autograd_benchmark\\torchvision_models.py",
    "ast_data": "FunctionDef name:conv3x3 arg:in_planes arg:out_planes arg:stride arg:groups arg:dilation arguments arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "factorized",
    "source_code": "def factorized(A):\n    if is_pydata_spmatrix(A):\n        A = A.to_scipy_sparse().tocsc()\n    if not hasattr(useUmfpack, 'u'):\n        useUmfpack.u = not noScikit\n    if useUmfpack.u:\n        if noScikit:\n            raise RuntimeError('Scikits.umfpack not installed.')\n        if not (issparse(A) and A.format == 'csc'):\n            A = csc_array(A)\n            warn('splu converted its input to CSC format', SparseEfficiencyWarning, stacklevel=2)\n        A = A._asfptype()\n        if A.dtype.char not in 'dD':\n            raise ValueError('convert matrix data to double, please, using .astype(), or set linsolve.useUmfpack.u = False')\n        umf_family, A = _get_umf_family(A)\n        umf = umfpack.UmfpackContext(umf_family)\n        umf.numeric(A)\n\n        def solve(b):\n            with np.errstate(divide='ignore', invalid='ignore'):\n                result = umf.solve(umfpack.UMFPACK_A, A, b, autoTranspose=True)\n            return result\n        return solve\n    else:\n        return splu(A).solve",
    "docstring": "Return a function for solving a sparse linear system, with A pre-factorized. Parameters ---------- A : (N, N) array_like Input. A in CSC format is most efficient. A CSR format matrix will be converted to CSC before factorization. Returns ------- solve : callable To solve the linear system of equations given in , the callable should be passed an ndarray of shape (N,). Examples -------- >>> import numpy as np >>> from scipy.sparse.linalg import factorized >>> from scipy.sparse import csc_array >>> A = np.array([[ 3. , 2. , -1. ], ... [ 2. , -2. , 4. ], ... [-1. , 0.5, -1. ]]) >>> solve = factorized(csc_array(A)) # Makes LU decomposition. >>> rhs1 = np.array([1, -2, 0]) >>> solve(rhs1) # Uses the LU factors. array([ 1., -2., -2.])",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_dsolve\\linsolve.py",
    "ast_data": "FunctionDef name:factorized arg:A arguments arg If Call Assign Call Call If Call Assign If If Raise Call If BoolOp Call Compare Assign Call Call Assign Call If Compare Raise Call Assign Call Assign Call Call FunctionDef name:solve arg:b arguments arg With Call Assign Call Return return:yes Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_expand_cmakedefines",
    "source_code": "def _expand_cmakedefines(line, cmake_vars):\n    match = _CMAKE_DEFINE_REGEX.match(line)\n    if match:\n        name = match.group(1)\n        suffix = match.group(2) or ''\n        if name in cmake_vars:\n            return '#define {}{}\\n'.format(name, _expand_variables(suffix, cmake_vars))\n        else:\n            return '/* #undef {} */\\n'.format(name)\n    match = _CMAKE_DEFINE01_REGEX.match(line)\n    if match:\n        name = match.group(1)\n        value = cmake_vars.get(name, '0')\n        return '#define {} {}\\n'.format(name, value)\n    return _expand_variables(line, cmake_vars)",
    "docstring": "Expands #cmakedefine declarations, using a dictionary 'cmake_vars'.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\third_party\\llvm_openmp\\expand_cmake_vars.py",
    "ast_data": "FunctionDef name:_expand_cmakedefines arg:line arg:cmake_vars arguments arg arg Assign Call If Assign Call Assign BoolOp Call If Compare Return return:yes Call Call Return return:yes Call Assign Call If Assign Call Assign Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_Hrot_from_J",
    "source_code": "def get_Hrot_from_J(self, J, return_area=False):\n    J_inv = _safe_inv22_vectorized(J)\n    Ji00 = J_inv[:, 0, 0]\n    Ji11 = J_inv[:, 1, 1]\n    Ji10 = J_inv[:, 1, 0]\n    Ji01 = J_inv[:, 0, 1]\n    H_rot = _to_matrix_vectorized([[Ji00 * Ji00, Ji10 * Ji10, Ji00 * Ji10], [Ji01 * Ji01, Ji11 * Ji11, Ji01 * Ji11], [2 * Ji00 * Ji01, 2 * Ji11 * Ji10, Ji00 * Ji11 + Ji10 * Ji01]])\n    if not return_area:\n        return H_rot\n    else:\n        area = 0.5 * (J[:, 0, 0] * J[:, 1, 1] - J[:, 0, 1] * J[:, 1, 0])\n        return (H_rot, area)",
    "docstring": "Parameters ---------- *J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at triangle first apex) Returns ------- Returns H_rot used to rotate Hessian from local basis of first apex, to global coordinates. if *return_area* is True, returns also the triangle area (0.5*det(J))",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triinterpolate.py",
    "ast_data": "FunctionDef name:get_Hrot_from_J arg:self arg:J arg:return_area arguments arg arg arg Assign Call Assign Assign Assign Assign Assign Call If Return return:yes Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get",
    "source_code": "def get(identifier):\n    if isinstance(identifier, (Optimizer, optimizer_v2.OptimizerV2)):\n        return identifier\n    elif isinstance(identifier, tf_optimizer_module.Optimizer):\n        opt = TFOptimizer(identifier)\n        backend.track_tf_optimizer(opt)\n        return opt\n    elif isinstance(identifier, dict):\n        return deserialize(identifier)\n    elif isinstance(identifier, str):\n        config = {'class_name': str(identifier), 'config': {}}\n        return deserialize(config)\n    else:\n        raise ValueError('Could not interpret optimizer identifier: {}'.format(identifier))",
    "docstring": "Retrieves a Keras Optimizer instance. Args: identifier: Optimizer identifier, one of - String: name of an optimizer - Dictionary: configuration dictionary. - Keras Optimizer instance (it will be returned unchanged). - TensorFlow Optimizer instance (it will be wrapped as a Keras Optimizer). Returns: A Keras Optimizer instance. Raises: ValueError: If cannot be interpreted.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizers.py",
    "ast_data": "FunctionDef name:get arg:identifier arguments arg If Call Return return:yes If Call Assign Call Call Return return:yes If Call Return return:yes Call If Call Assign Call Return return:yes Call Raise Call Call"
  },
  {
    "library": "pandas",
    "name": "validate_kwargs",
    "source_code": "def validate_kwargs(fname, kwargs, compat_args) -> None:\n    kwds = kwargs.copy()\n    _check_for_invalid_keys(fname, kwargs, compat_args)\n    _check_for_default_values(fname, kwds, compat_args)",
    "docstring": "Checks whether parameters passed to the **kwargs argument in a function are valid parameters as specified in and whether or not they are set to their default values. Parameters ---------- fname : str The name of the function being passed the parameter kwargs : dict The parameter passed into compat_args: dict A dictionary of keys that is allowed to have and their associated default values Raises ------ TypeError if contains keys not in ValueError if contains keys in that do not map to the default values specified in",
    "type": "function",
    "file_path": "pandas\\pandas\\util\\_validators.py",
    "ast_data": "FunctionDef name:validate_kwargs arg:fname arg:kwargs arg:compat_args arguments arg arg arg Assign Call Call Call"
  },
  {
    "library": "kornia",
    "name": "_valid_keypoints",
    "source_code": "def _valid_keypoints(self, keypoints: Keypoints | Tensor, labels: Tensor) -> Keypoints:\n    KORNIA_CHECK_SHAPE(keypoints.data, ['K', 'N', '2'])\n    KORNIA_CHECK_SHAPE(labels.data, ['K', 'N'])\n    KORNIA_CHECK(keypoints.shape[0] == labels.shape[0], 'The keypoints and labels should have the same batch size')\n    if isinstance(keypoints, Tensor):\n        keypoints = Keypoints.from_tensor(keypoints)\n    return keypoints",
    "docstring": "Validate the keypoints shape and ensure to be a Keypoints.",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\visual_prompter.py",
    "ast_data": "FunctionDef name:_valid_keypoints arg:self arg:keypoints arg:labels arguments arg arg arg Call Call Call Compare If Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, save_steps=None, save_secs=None, output_dir=None, summary_writer=None, scaffold=None, summary_op=None):\n    if scaffold is None and summary_op is None or (scaffold is not None and summary_op is not None):\n        raise ValueError('Exactly one of scaffold or summary_op must be provided.')\n    self._summary_op = summary_op\n    self._summary_writer = summary_writer\n    self._output_dir = output_dir\n    self._scaffold = scaffold\n    self._timer = SecondOrStepTimer(every_secs=save_secs, every_steps=save_steps)",
    "docstring": "Initializes a . Args: save_steps: , save summaries every N steps. Exactly one of and should be set. save_secs: , save summaries every N seconds. output_dir: , the directory to save the summaries to. Only used if no is supplied. summary_writer: . If and an was passed, one will be created accordingly. scaffold: to get summary_op if it's not provided. summary_op: of type containing the serialized protocol buffer or a list of . They are most likely an output by TF summary methods like or . It can be passed in as one tensor; if more than one, they must be passed in as a list. Raises: ValueError: Exactly one of scaffold or summary_op should be set.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\basic_session_run_hooks.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:save_steps arg:save_secs arg:output_dir arg:summary_writer arg:scaffold arg:summary_op arguments arg arg arg arg arg arg arg If BoolOp BoolOp Compare Compare BoolOp Compare Compare Raise Call Assign Assign Assign Assign Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "sufficient_statistics_v2",
    "source_code": "@tf_export('nn.sufficient_statistics', v1=[])\n@dispatch.add_dispatch_support\ndef sufficient_statistics_v2(x, axes, shift=None, keepdims=False, name=None):\n    return sufficient_statistics(x=x, axes=axes, shift=shift, keep_dims=keepdims, name=name)",
    "docstring": "Calculate the sufficient statistics for the mean and variance of . These sufficient statistics are computed using the one pass algorithm on an input that's optionally shifted. See: Args: x: A . axes: Array of ints. Axes along which to compute mean and variance. shift: A containing the value by which to shift the data for numerical stability, or if no shift is to be performed. A shift close to the true mean provides the most numerically stable results. keepdims: produce statistics with the same dimensionality as the input. name: Name used to scope the operations that compute the sufficient stats. Returns: Four objects of the same type as : * the count (number of elements to average over). * the (possibly shifted) sum of the elements in the array. * the (possibly shifted) sum of squares of the elements in the array. * the shift by which the mean must be corrected or None if is None.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_impl.py",
    "ast_data": "FunctionDef name:sufficient_statistics_v2 arg:x arg:axes arg:shift arg:keepdims arg:name arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scrapy",
    "name": "rel_has_nofollow",
    "source_code": "def rel_has_nofollow(rel: str | None) -> bool:\n    return rel is not None and 'nofollow' in rel.replace(',', ' ').split()",
    "docstring": "Return True if link rel attribute has nofollow type",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\misc.py",
    "ast_data": "FunctionDef name:rel_has_nofollow arg:rel arguments arg Return return:yes BoolOp Compare Compare Call Call"
  },
  {
    "library": "pygame",
    "name": "initsysfonts",
    "source_code": "def initsysfonts():\n    global is_init\n    if is_init:\n        return\n    if sys.platform == 'win32':\n        fonts = initsysfonts_win32()\n    elif sys.platform == 'darwin':\n        fonts = initsysfonts_darwin()\n    else:\n        fonts = initsysfonts_unix()\n    Sysfonts.update(fonts)\n    create_aliases()\n    is_init = True",
    "docstring": "Initialise the sysfont module, called once. Locates the installed fonts and creates some aliases for common font categories. Has different initialisation functions for different platforms.",
    "type": "function",
    "file_path": "pygame\\src_py\\sysfont.py",
    "ast_data": "FunctionDef name:initsysfonts arguments If Return return:no If Compare Assign Call If Compare Assign Call Assign Call Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "from_dynamo_produced_raw_meta",
    "source_code": "@classmethod\ndef from_dynamo_produced_raw_meta(cls, raw_meta: _DYNAMO_NN_MODULE_META_TYPE) -> _ModuleMeta:\n    module_name, (_qualified_name, module_class) = raw_meta\n    return _ModuleMeta(module_name.split('@')[0], module_class, raw_meta)",
    "docstring": "Create a module meta from raw meta produced by FX dynamo tracer.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\modularization.py",
    "ast_data": "FunctionDef name:from_dynamo_produced_raw_meta arg:cls arg:raw_meta arguments arg arg Assign Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "nunique",
    "source_code": "def nunique(self, dropna: bool=True) -> Series | DataFrame:\n    ids = self._grouper.ids\n    ngroups = self._grouper.ngroups\n    val = self.obj._values\n    codes, uniques = algorithms.factorize(val, use_na_sentinel=dropna, sort=False)\n    if self._grouper.has_dropped_na:\n        mask = ids >= 0\n        ids = ids[mask]\n        codes = codes[mask]\n    group_index = get_group_index(labels=[ids, codes], shape=(ngroups, len(uniques)), sort=False, xnull=dropna)\n    if dropna:\n        mask = group_index >= 0\n        if (~mask).any():\n            ids = ids[mask]\n            group_index = group_index[mask]\n    mask = duplicated(group_index, 'first')\n    res = np.bincount(ids[~mask], minlength=ngroups)\n    res = ensure_int64(res)\n    ri = self._grouper.result_index\n    result: Series | DataFrame = self.obj._constructor(res, index=ri, name=self.obj.name)\n    if not self.as_index:\n        result = self._insert_inaxis_grouper(result)\n        result.index = default_index(len(result))\n    return result",
    "docstring": "Return number of unique elements in the group. Parameters ---------- dropna : bool, default True Don't include NaN in the counts. Returns ------- Series Number of unique values within each group. See Also -------- core.resample.Resampler.nunique : Method nunique for Resampler. Examples -------- >>> lst = [\"a\", \"a\", \"b\", \"b\"] >>> ser = pd.Series([1, 2, 3, 3], index=lst) >>> ser a 1 a 2 b 3 b 3 dtype: int64 >>> ser.groupby(level=0).nunique() a 2 b 1 dtype: int64",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\groupby\\generic.py",
    "ast_data": "FunctionDef name:nunique arg:self arg:dropna arguments arg arg Assign Assign Assign Assign Call If Assign Compare Assign Assign Assign Call Call If Assign Compare If Call Assign Assign Assign Call Assign Call Assign Call Assign Call If Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_theta_direction",
    "source_code": "def set_theta_direction(self, direction):\n    mtx = self._direction.get_matrix()\n    if direction in ('clockwise', -1):\n        mtx[0, 0] = -1\n    elif direction in ('counterclockwise', 'anticlockwise', 1):\n        mtx[0, 0] = 1\n    else:\n        _api.check_in_list([-1, 1, 'clockwise', 'counterclockwise', 'anticlockwise'], direction=direction)\n    self._direction.invalidate()",
    "docstring": "Set the direction in which theta increases. clockwise, -1: Theta increases in the clockwise direction counterclockwise, anticlockwise, 1: Theta increases in the counterclockwise direction",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\polar.py",
    "ast_data": "FunctionDef name:set_theta_direction arg:self arg:direction arguments arg arg Assign Call If Compare Assign If Compare Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_kernel_constraint",
    "source_code": "def _kernel_constraint(self, kernel):\n    padding = backend.constant([[1, 1], [1, 1]], dtype='int32')\n    kernel_shape = backend.shape(kernel)[0]\n    start = backend.cast(kernel_shape / 2, 'int32')\n    kernel_new = backend.switch(backend.cast(math_ops.floormod(kernel_shape, 2), 'bool'), lambda: kernel[start - 1:start, start - 1:start], lambda: kernel[start - 1:start, start - 1:start] + backend.zeros((2, 2), dtype=kernel.dtype))\n    index = backend.switch(backend.cast(math_ops.floormod(kernel_shape, 2), 'bool'), lambda: backend.constant(0, dtype='int32'), lambda: backend.constant(1, dtype='int32'))\n    while_condition = lambda index, *args: backend.less(index, start)\n\n    def body_fn(i, array):\n        return (i + 1, array_ops.pad(array, padding, constant_values=kernel[start + i, start + i]))\n    _, kernel_new = while_loop.while_loop(while_condition, body_fn, [index, kernel_new], shape_invariants=[index.get_shape(), tensor_shape.TensorShape([None, None])])\n    return kernel_new",
    "docstring": "Radially constraints a kernel with shape (height, width, channels).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\constraints.py",
    "ast_data": "FunctionDef name:_kernel_constraint arg:self arg:kernel arguments arg arg Assign Call Assign Call Assign Call Assign Call Call Call arguments arguments Call Assign Call Call Call arguments Call arguments Call Assign arguments arg arg Call FunctionDef name:body_fn arg:i arg:array arguments arg arg Return return:yes Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_WrapperModule",
    "source_code": "class _WrapperModule(torch.nn.Module):\n\n    def __init__(self, fn):\n        super().__init__()\n        self.fn = fn\n\n    def forward(self, *args, **kwargs):\n        return self.fn(*args, **kwargs)",
    "docstring": "Class to wrap a callable in an :class:. Use this if you are trying to export a callable.",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\export_utils.py",
    "ast_data": "ClassDef name:_WrapperModule FunctionDef name:__init__ arg:self arg:fn arguments arg arg Call Call Assign FunctionDef name:forward arg:self arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_call_symbolic_op",
    "source_code": "def _call_symbolic_op(op_type: str, domain: str, args: Sequence[ir.Value | None], kwargs: dict[str, int | float | str | bool | list[int] | list[float] | list[str]], dtypes: Sequence[int], version: int | None, metadata_props: dict[str, str] | None) -> Sequence[ir.Value]:\n    assert _core.current_tracer is not None\n    tracer = _core.current_tracer\n    inputs = list(args)\n    for input in reversed(inputs):\n        if input is not None:\n            break\n        inputs.pop()\n    attributes = [attr for attr in ir_convenience.convert_attributes(kwargs) if attr.value is not None]\n    tracer.nodes.append((node := ir.Node(domain, op_type, inputs=inputs, attributes=attributes, num_outputs=len(dtypes), version=version, metadata_props=metadata_props)))\n    for value, dtype in zip(node.outputs, dtypes):\n        value.dtype = ir.DataType(dtype)\n    return node.outputs",
    "docstring": "Call an operator with the given arguments and keyword arguments. Arguments are always inputs, while keyword arguments are attributes.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_torchlib\\ops\\symbolic.py",
    "ast_data": "FunctionDef name:_call_symbolic_op arg:op_type arg:domain arg:args arg:kwargs arg:dtypes arg:version arg:metadata_props arguments arg arg arg arg arg arg arg Compare Assign Assign Call For Call If Compare Call Assign Call Compare Call Call Call For Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_metric_histogram",
    "source_code": "def _get_metric_histogram(histogram_proto):\n    ret = dict()\n    ret['min'] = histogram_proto.min\n    ret['max'] = histogram_proto.max\n    ret['num'] = histogram_proto.num\n    ret['sum'] = histogram_proto.sum\n    bucket_limits = histogram_proto.bucket_limit\n    bucket_vals = histogram_proto.bucket\n    ret['histogram'] = {}\n    bucket_limits.insert(0, 0)\n    for lb, ub, val in zip(bucket_limits[:-1], bucket_limits[1:], bucket_vals):\n        ret['histogram'][lb, ub] = val\n    return ret",
    "docstring": "Convert a histogram proto into a dict. Args: histogram_proto: a proto containing a Sampler metric's result histogram. Returns: A dict containing summary statistics and the raw histogram values.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\metric_utils.py",
    "ast_data": "FunctionDef name:_get_metric_histogram arg:histogram_proto arguments arg Assign Call Assign Assign Assign Assign Assign Assign Assign Call For Call Assign Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "namespace_handler",
    "source_code": "def namespace_handler(self, k, v):\n    if k == 'pipeline':\n        self.pipeline.extend(v)\n    elif k == 'response_class':\n        self.response_class = v\n    else:\n        name, arg = k.split('.', 1)\n        bucket = self.config.setdefault(name, {})\n        bucket[arg] = v",
    "docstring": "Config handler for the 'wsgi' namespace.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpwsgi.py",
    "ast_data": "FunctionDef name:namespace_handler arg:self arg:k arg:v arguments arg arg arg If Compare Call If Compare Assign Assign Call Assign Call Assign"
  },
  {
    "library": "pandas",
    "name": "na_value_for_dtype",
    "source_code": "def na_value_for_dtype(dtype: DtypeObj, compat: bool=True):\n    if isinstance(dtype, ExtensionDtype):\n        return dtype.na_value\n    elif dtype.kind in 'mM':\n        unit = np.datetime_data(dtype)[0]\n        return dtype.type('NaT', unit)\n    elif dtype.kind in 'fc':\n        return np.nan\n    elif dtype.kind in 'iu':\n        if compat:\n            return 0\n        return np.nan\n    elif dtype.kind == 'b':\n        if compat:\n            return False\n        return np.nan\n    return np.nan",
    "docstring": "Return a dtype compat na value Parameters ---------- dtype : string / dtype compat : bool, default True Returns ------- np.dtype or a pandas dtype Examples -------- >>> na_value_for_dtype(np.dtype(\"int64\")) 0 >>> na_value_for_dtype(np.dtype(\"int64\"), compat=False) nan >>> na_value_for_dtype(np.dtype(\"float64\")) nan >>> na_value_for_dtype(np.dtype(\"complex128\")) nan >>> na_value_for_dtype(np.dtype(\"bool\")) False >>> na_value_for_dtype(np.dtype(\"datetime64[ns]\")) np.datetime64('NaT')",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\missing.py",
    "ast_data": "FunctionDef name:na_value_for_dtype arg:dtype arg:compat arguments arg arg If Call Return return:yes If Compare Assign Call Return return:yes Call If Compare Return return:yes If Compare If Return return:yes Return return:yes If Compare If Return return:yes Return return:yes Return return:yes"
  },
  {
    "library": "pygame",
    "name": "update",
    "source_code": "def update(self, *args, **kwargs):\n    pass",
    "docstring": "method to control sprite behavior Sprite.update(*args, **kwargs): The default implementation of this method does nothing; it's just a convenient \"hook\" that you can override. This method is called by Group.update() with whatever arguments you give it. There is no need to use this method if not using the convenience method by the same name in the Group class.",
    "type": "method",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:update arg:self arguments arg arg arg"
  },
  {
    "library": "django",
    "name": "SplitDateTimeWidget",
    "source_code": "class SplitDateTimeWidget(MultiWidget):\n    supports_microseconds = False\n    template_name = 'django/forms/widgets/splitdatetime.html'\n\n    def __init__(self, attrs=None, date_format=None, time_format=None, date_attrs=None, time_attrs=None):\n        widgets = (DateInput(attrs=attrs if date_attrs is None else date_attrs, format=date_format), TimeInput(attrs=attrs if time_attrs is None else time_attrs, format=time_format))\n        super().__init__(widgets)\n\n    def decompress(self, value):\n        if value:\n            value = to_current_timezone(value)\n            return [value.date(), value.time()]\n        return [None, None]",
    "docstring": "A widget that splits datetime input into two boxes.",
    "type": "class",
    "file_path": "django\\django\\forms\\widgets.py",
    "ast_data": "ClassDef name:SplitDateTimeWidget Assign Assign FunctionDef name:__init__ arg:self arg:attrs arg:date_format arg:time_format arg:date_attrs arg:time_attrs arguments arg arg arg arg arg arg Assign Call Compare Call Compare Call Call FunctionDef name:decompress arg:self arg:value arguments arg arg If Assign Call Return return:yes Call Call Return return:no"
  },
  {
    "library": "scipy",
    "name": "_onenormest_matrix_power",
    "source_code": "def _onenormest_matrix_power(A, p, t=2, itmax=5, compute_v=False, compute_w=False, structure=None):\n    return scipy.sparse.linalg.onenormest(MatrixPowerOperator(A, p, structure=structure))",
    "docstring": "Efficiently estimate the 1-norm of A^p. Parameters ---------- A : ndarray Matrix whose 1-norm of a power is to be computed. p : int Non-negative integer power. t : int, optional A positive parameter controlling the tradeoff between accuracy versus time and memory usage. Larger values take longer and use more memory but give more accurate output. itmax : int, optional Use at most this many iterations. compute_v : bool, optional Request a norm-maximizing linear operator input vector if True. compute_w : bool, optional Request a norm-maximizing linear operator output vector if True. Returns ------- est : float An underestimate of the 1-norm of the sparse arrays. v : ndarray, optional The vector such that ||Av||_1 == est*||v||_1. It can be thought of as an input to the linear operator that gives an output with particularly large norm. w : ndarray, optional The vector Av which has relatively large 1-norm. It can be thought of as an output of the linear operator that is relatively large in norm compared to the input.",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_matfuncs.py",
    "ast_data": "FunctionDef name:_onenormest_matrix_power arg:A arg:p arg:t arg:itmax arg:compute_v arg:compute_w arg:structure arguments arg arg arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "y",
    "source_code": "@property\ndef y(self) -> Tensor:\n    return self.data[..., 2]",
    "docstring": "Return the :math: with shape :math:.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\quaternion.py",
    "ast_data": "FunctionDef name:y arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "cross",
    "source_code": "@tf_export('ragged.cross')\n@dispatch.add_dispatch_support\ndef cross(inputs, name=None):\n    return _cross_internal(inputs=inputs, hashed_output=False, name=name)",
    "docstring": "Generates feature cross from a list of tensors. The input tensors must have , and must all have the same number of rows. The result is a with the same number of rows as the inputs, where contains a list of all combinations of values formed by taking a single value from each input's corresponding row (). Values are combined by joining their strings with '_X_'. E.g.: >>> tf.ragged.cross([tf.ragged.constant([['a'], ['b', 'c']]), ... tf.ragged.constant([['d'], ['e']]), ... tf.ragged.constant([['f'], ['g']])]) Args: inputs: A list of or or . name: Optional name for the op. Returns: A 2D of type .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_array_ops.py",
    "ast_data": "FunctionDef name:cross arg:inputs arg:name arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "do_encode",
    "source_code": "def do_encode(self, bounded_tensor_spec_value, encode_fn):\n    encoded_bounded_tensor_spec = struct_pb2.StructuredValue()\n    encoded_bounded_tensor_spec.bounded_tensor_spec_value.CopyFrom(struct_pb2.BoundedTensorSpecProto(shape=encode_fn(bounded_tensor_spec_value.shape).tensor_shape_value, dtype=encode_fn(bounded_tensor_spec_value.dtype).tensor_dtype_value, name=bounded_tensor_spec_value.name, minimum=tensor_util.make_tensor_proto(bounded_tensor_spec_value.minimum), maximum=tensor_util.make_tensor_proto(bounded_tensor_spec_value.maximum)))\n    return encoded_bounded_tensor_spec",
    "docstring": "Returns an encoded proto for the given .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor.py",
    "ast_data": "FunctionDef name:do_encode arg:self arg:bounded_tensor_spec_value arg:encode_fn arguments arg arg arg Assign Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "add_index",
    "source_code": "def add_index(self, model, index):\n    if index.contains_expressions and (not self.connection.features.supports_expression_indexes):\n        return None\n    self.execute(index.create_sql(model, self), params=None)",
    "docstring": "Add an index on a model.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\schema.py",
    "ast_data": "FunctionDef name:add_index arg:self arg:model arg:index arguments arg arg arg If BoolOp Return return:no Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_NotOkStatusException",
    "source_code": "class _NotOkStatusException(Exception):\n\n    def __init__(self, message, code, payloads):\n        super(_NotOkStatusException, self).__init__()\n        self.message = message\n        self.code = code\n        self.payloads = payloads\n\n    def __str__(self):\n        e = _status_to_exception(self)\n        return '%s: %s' % (e.__class__.__name__, e)",
    "docstring": "Exception class to handle not ok Status.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\core.py",
    "ast_data": "ClassDef name:_NotOkStatusException FunctionDef name:__init__ arg:self arg:message arg:code arg:payloads arguments arg arg arg arg Call Call Assign Assign Assign FunctionDef name:__str__ arg:self arguments arg Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "all_gather_tensor",
    "source_code": "def all_gather_tensor(self: torch.Tensor, gather_dim: int, group: RANK_TYPES, tag: str='') -> torch.Tensor:\n    assert self.is_contiguous()\n    group_name = _resolve_group_name(group, tag)\n    group_size = c10d._get_group_size_by_name(group_name)\n    tensor = torch.ops._c10d_functional.all_gather_into_tensor(self, group_size, group_name)\n    res = _maybe_wrap_tensor(tensor)\n    if gather_dim != 0:\n        if isinstance(res, AsyncCollectiveTensor):\n            res = res.wait()\n        res = torch.cat(torch.chunk(res, group_size, dim=0), dim=gather_dim)\n    return res",
    "docstring": "Gather tensor data across from all machines and concatenate over ``. Note that it currently only supports gather_dim = 0. The input tensor is left unmodified. Group can be one of: List[int]: ranks participating in the collective. List[List[int]]: 2D mesh of ranks taking part of this collective in MPMD. ProcessGroup: Will perform a collective using the ranks and tag of the PG. DeviceMesh: Do a SPMD collective over all ranks of the mesh (DeviceMesh, int): Do a MPMD collective over one dimension of the DeviceMesh :: N.B. If you pass a PG or a 1D list to perform a MPMD collective, the compiler won't be able to recover that information and perform collective algebraic optimization. Use other forms of input for that.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_functional_collectives.py",
    "ast_data": "FunctionDef name:all_gather_tensor arg:self arg:gather_dim arg:group arg:tag arguments arg arg arg arg Call Assign Call Assign Call Assign Call Assign Call If Compare If Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "_batch_polygons",
    "source_code": "def _batch_polygons(polygons: List[Tensor]) -> Tensor:\n    B, N = (len(polygons), len(max(polygons, key=len)))\n    batched_polygons = torch.zeros(B, N, 2, dtype=polygons[0].dtype, device=polygons[0].device)\n    for b, p in enumerate(polygons):\n        batched_polygons[b] = torch.cat((p, p[-1:].expand(N - len(p), 2))) if len(p) < N else p\n    return batched_polygons",
    "docstring": "Convert a List of variable length polygons into a fixed size tensor. Works by repeating the last element in the tensor. Args: polygons: List of variable length polygons of shape [N_1 x 2, N_2 x 2, ..., N_B x 2]. B is the batch size, N_i is the number of points, 2 is (x, y). Returns: A fixed size tensor of shape (B, N, 2) where N = max_i(N_i)",
    "type": "function",
    "file_path": "kornia\\kornia\\utils\\draw.py",
    "ast_data": "FunctionDef name:_batch_polygons arg:polygons arguments arg Assign Call Call Call Assign Call For Call Assign Compare Call Call Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_get_coefficients",
    "source_code": "def _get_coefficients(self, other):\n    if isinstance(other, ABCPolyBase):\n        if not isinstance(other, self.__class__):\n            raise TypeError('Polynomial types differ')\n        elif not np.all(self.domain == other.domain):\n            raise TypeError('Domains differ')\n        elif not np.all(self.window == other.window):\n            raise TypeError('Windows differ')\n        elif self.symbol != other.symbol:\n            raise ValueError('Polynomial symbols differ')\n        return other.coef\n    return other",
    "docstring": "Interpret other as polynomial coefficients. The argument is checked to see if it is of the same class as self with identical domain and window. If so, return its coefficients, otherwise return . Parameters ---------- other : anything Object to be checked. Returns ------- coef The coefficients of if it is a compatible instance, of ABCPolyBase, otherwise . Raises ------ TypeError When is an incompatible instance of ABCPolyBase.",
    "type": "method",
    "file_path": "numpy\\numpy\\polynomial\\_polybase.py",
    "ast_data": "FunctionDef name:_get_coefficients arg:self arg:other arguments arg arg If Call If Call Raise Call If Call Compare Raise Call If Call Compare Raise Call If Compare Raise Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "batch_shuffle",
    "source_code": "def batch_shuffle(index_array, batch_size):\n    batch_count = int(len(index_array) / batch_size)\n    last_batch = index_array[batch_count * batch_size:]\n    index_array = index_array[:batch_count * batch_size]\n    index_array = index_array.reshape((batch_count, batch_size))\n    np.random.shuffle(index_array)\n    index_array = index_array.flatten()\n    return np.append(index_array, last_batch)",
    "docstring": "Shuffles an array in a batch-wise fashion. Useful for shuffling HDF5 arrays (where one cannot access arbitrary indices). Args: index_array: array of indices to be shuffled. batch_size: integer. Returns: The array, shuffled in a batch-wise fashion.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py",
    "ast_data": "FunctionDef name:batch_shuffle arg:index_array arg:batch_size arguments arg arg Assign Call Call Assign Assign Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_update",
    "source_code": "def _update(self, event):\n    if self.ignore(event) or event.button != 1:\n        return\n    if event.name == 'button_press_event' and self.ax.contains(event)[0]:\n        self.drag_active = True\n        event.canvas.grab_mouse(self.ax)\n    if not self.drag_active:\n        return\n    if event.name == 'button_release_event' or (event.name == 'button_press_event' and (not self.ax.contains(event)[0])):\n        self.drag_active = False\n        event.canvas.release_mouse(self.ax)\n        self._active_handle = None\n        return\n    xdata, ydata = self._get_data_coords(event)\n    handle_index = np.argmin(np.abs([h.get_xdata()[0] - xdata for h in self._handles] if self.orientation == 'horizontal' else [h.get_ydata()[0] - ydata for h in self._handles]))\n    handle = self._handles[handle_index]\n    if handle is not self._active_handle:\n        self._active_handle = handle\n    self._update_val_from_pos(xdata if self.orientation == 'horizontal' else ydata)",
    "docstring": "Update the slider position.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:_update arg:self arg:event arguments arg arg If BoolOp Call Compare Return return:no If BoolOp Compare Call Assign Call If Return return:no If BoolOp Compare BoolOp Compare Call Assign Call Assign Return return:no Assign Call Assign Call Call Compare Call Call Assign If Compare Assign Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "_collect_leaf_level_keys",
    "source_code": "def _collect_leaf_level_keys(cross):\n    leaf_level_keys = []\n    for k in cross.keys:\n        if isinstance(k, CrossedColumn):\n            leaf_level_keys.extend(_collect_leaf_level_keys(k))\n        else:\n            leaf_level_keys.append(k)\n    return leaf_level_keys",
    "docstring": "Collects base keys by expanding all nested crosses. Args: cross: A . Returns: A list of strings or instances.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:_collect_leaf_level_keys arg:cross arguments arg Assign For If Call Call Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "predict_proba",
    "source_code": "def predict_proba(self, X):\n    return np.exp(self.predict_log_proba(X))",
    "docstring": "Estimate class probabilities. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Input data. Returns ------- y_proba : ndarray of shape (n_samples, n_classes) Probability estimate of the sample for each class in the model, where classes are ordered as they are in .",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\discriminant_analysis.py",
    "ast_data": "FunctionDef name:predict_proba arg:self arg:X arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "BgrToRgb",
    "source_code": "class BgrToRgb(Module):\n    ONNX_DEFAULT_INPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n    ONNX_DEFAULT_OUTPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n\n    def forward(self, image: Tensor) -> Tensor:\n        return bgr_to_rgb(image)",
    "docstring": "Convert image from BGR to RGB. The image data is assumed to be in the range of (0, 1). Returns: RGB version of the image. Shape: - image: :math: - output: :math: Example: >>> input = torch.rand(2, 3, 4, 5) >>> rgb = BgrToRgb() >>> output = rgb(input) # 2x3x4x5",
    "type": "class",
    "file_path": "kornia\\kornia\\color\\rgb.py",
    "ast_data": "ClassDef name:BgrToRgb FunctionDef name:forward arg:self arg:image arguments arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "check_url_namespaces_unique",
    "source_code": "@register(Tags.urls)\ndef check_url_namespaces_unique(app_configs, **kwargs):\n    if not getattr(settings, 'ROOT_URLCONF', None):\n        return []\n    from django.urls import get_resolver\n    resolver = get_resolver()\n    all_namespaces = _load_all_namespaces(resolver)\n    counter = Counter(all_namespaces)\n    non_unique_namespaces = [n for n, count in counter.items() if count > 1]\n    errors = []\n    for namespace in non_unique_namespaces:\n        errors.append(Warning(\"URL namespace '{}' isn't unique. You may not be able to reverse all URLs in this namespace\".format(namespace), id='urls.W005'))\n    return errors",
    "docstring": "Warn if URL namespaces used in applications aren't unique.",
    "type": "function",
    "file_path": "django\\django\\core\\checks\\urls.py",
    "ast_data": "FunctionDef name:check_url_namespaces_unique arg:app_configs arguments arg arg If Call Return return:no Assign Call Assign Call Assign Call Assign Call Compare Assign For Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "cluster_spec",
    "source_code": "def cluster_spec(self):\n    if self._tpu != 'local':\n        network_endpoints = self._cloud_tpu_client.network_endpoints()\n        worker_list = ['%s:%s' % (endpoint['ipAddress'], endpoint['port']) for endpoint in network_endpoints]\n        cluster_spec = {self.task_type: worker_list}\n        if self._coordinator_address:\n            cluster_spec[self._coordinator_name] = [self._coordinator_address]\n        return server_lib.ClusterSpec(cluster_spec)\n    else:\n        return server_lib.ClusterSpec({})",
    "docstring": "Returns a ClusterSpec object based on the latest TPU information. We retrieve the information from the GCE APIs every time this method is called. Returns: A ClusterSpec containing host information returned from Cloud TPUs, or None. Raises: RuntimeError: If the provided TPU is not healthy.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\tpu\\tpu_cluster_resolver.py",
    "ast_data": "FunctionDef name:cluster_spec arg:self arguments arg If Compare Assign Call Assign Assign If Assign Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_TreeEnsembleSavable",
    "source_code": "class _TreeEnsembleSavable(saver.BaseSaverBuilder.SaveableObject):\n\n    def __init__(self, resource_handle, create_op, name):\n        stamp_token, serialized = gen_boosted_trees_ops.boosted_trees_serialize_ensemble(resource_handle)\n        slice_spec = ''\n        specs = [saver.BaseSaverBuilder.SaveSpec(stamp_token, slice_spec, name + '_stamp'), saver.BaseSaverBuilder.SaveSpec(serialized, slice_spec, name + '_serialized')]\n        super(_TreeEnsembleSavable, self).__init__(resource_handle, specs, name)\n        self.resource_handle = resource_handle\n        self._create_op = create_op\n\n    def restore(self, restored_tensors, unused_restored_shapes):\n        with ops.control_dependencies([self._create_op]):\n            return gen_boosted_trees_ops.boosted_trees_deserialize_ensemble(self.resource_handle, stamp_token=restored_tensors[0], tree_ensemble_serialized=restored_tensors[1])",
    "docstring": "SaveableObject implementation for TreeEnsemble.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\boosted_trees_ops.py",
    "ast_data": "ClassDef name:_TreeEnsembleSavable FunctionDef name:__init__ arg:self arg:resource_handle arg:create_op arg:name arguments arg arg arg arg Assign Call Assign Assign Call Call Call Call Assign Assign FunctionDef name:restore arg:self arg:restored_tensors arg:unused_restored_shapes arguments arg arg arg With Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "compress",
    "source_code": "def compress(self, condition, axis=None, out=None):\n    _data, _mask = (self._data, self._mask)\n    condition = np.asarray(condition)\n    _new = _data.compress(condition, axis=axis, out=out).view(type(self))\n    _new._update_from(self)\n    if _mask is not nomask:\n        _new._mask = _mask.compress(condition, axis=axis)\n    return _new",
    "docstring": "Return where condition is `~ma.MaskedArray~ma.MaskedArraycompressedcompresscompressed` does not. Examples -------- >>> import numpy as np >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) >>> x masked_array( data=[[1, --, 3], [--, 5, --], [7, --, 9]], mask=[[False, True, False], [ True, False, True], [False, True, False]], fill_value=999999) >>> x.compress([1, 0, 1]) masked_array(data=[1, 3], mask=[False, False], fill_value=999999) >>> x.compress([1, 0, 1], axis=1) masked_array( data=[[1, 3], [--, --], [7, 9]], mask=[[False, False], [ True, True], [False, False]], fill_value=999999)",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:compress arg:self arg:condition arg:axis arg:out arguments arg arg arg arg Assign Assign Call Assign Call Call Call Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "FileBackedGraphModule",
    "source_code": "@dataclasses.dataclass\nclass FileBackedGraphModule:\n    gm: GraphModule\n    compiled_fn: Callable[..., Any]\n\n    def __post_init__(self) -> None:\n        self.tempfile = tempfile.NamedTemporaryFile(mode='w+', suffix='.py', delete=False)\n        atexit.register(os.remove, self.tempfile.name)\n        with self.tempfile as f:\n            f.write(self.value)\n\n    @property\n    def __file__(self) -> str:\n        return self.tempfile.name\n\n    def call(self, args: list[Any]) -> Any:\n        return self.compiled_fn(*args)\n\n    @property\n    def value(self) -> str:\n        return self.gm.code",
    "docstring": "Output of FX wrapper codegen. Exposes the same methods as ModuleType, but these map back to a GraphModule instead of Python source.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\common.py",
    "ast_data": "ClassDef name:FileBackedGraphModule FunctionDef name:__post_init__ arg:self arguments arg Assign Call Call With Call FunctionDef name:__file__ arg:self arguments arg Return return:yes FunctionDef name:call arg:self arg:args arguments arg arg Return return:yes Call FunctionDef name:value arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "Reference",
    "source_code": "class Reference:\n\n    def __init__(self, id):\n        self.id = id\n\n    def __repr__(self):\n        return '<Reference %d>' % self.id\n\n    def pdfRepr(self):\n        return b'%d 0 R' % self.id\n\n    def write(self, contents, file):\n        write = file.write\n        write(b'%d 0 obj\\n' % self.id)\n        write(pdfRepr(contents))\n        write(b'\\nendobj\\n')",
    "docstring": "PDF reference object. Use PdfFile.reserveObject() to create References.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py",
    "ast_data": "ClassDef name:Reference FunctionDef name:__init__ arg:self arg:id arguments arg arg Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:pdfRepr arg:self arguments arg Return return:yes FunctionDef name:write arg:self arg:contents arg:file arguments arg arg arg Assign Call Call Call Call"
  },
  {
    "library": "kornia",
    "name": "pixel2cam",
    "source_code": "def pixel2cam(depth: Tensor, intrinsics_inv: Tensor, pixel_coords: Tensor) -> Tensor:\n    if not len(depth.shape) == 4 and depth.shape[1] == 1:\n        raise ValueError(f'Input depth has to be in the shape of Bx1xHxW. Got {depth.shape}')\n    if not len(intrinsics_inv.shape) == 3:\n        raise ValueError(f'Input intrinsics_inv has to be in the shape of Bx4x4. Got {intrinsics_inv.shape}')\n    if not len(pixel_coords.shape) == 4 and pixel_coords.shape[3] == 3:\n        raise ValueError(f'Input pixel_coords has to be in the shape of BxHxWx3. Got {intrinsics_inv.shape}')\n    cam_coords: Tensor = transform_points(intrinsics_inv[:, None], pixel_coords)\n    return cam_coords * depth.permute(0, 2, 3, 1)",
    "docstring": "Transform coordinates in the pixel frame to the camera frame. Args: depth: the source depth maps. Shape must be Bx1xHxW. intrinsics_inv: the inverse intrinsics camera matrix. Shape must be Bx4x4. pixel_coords: the grid with (u, v, 1) pixel coordinates. Shape must be BxHxWx3. Returns: tensor of shape BxHxWx3 with (x, y, z) cam coordinates.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\camera\\pinhole.py",
    "ast_data": "FunctionDef name:pixel2cam arg:depth arg:intrinsics_inv arg:pixel_coords arguments arg arg arg If BoolOp Compare Call Compare Raise Call If Compare Call Raise Call If BoolOp Compare Call Compare Raise Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_get_kill_signal",
    "source_code": "def _get_kill_signal() -> signal.Signals:\n    if IS_WINDOWS:\n        return signal.CTRL_C_EVENT\n    else:\n        return signal.SIGKILL",
    "docstring": "Get the kill signal. SIGKILL for unix, CTRL_C_EVENT for windows.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\elastic\\multiprocessing\\api.py",
    "ast_data": "FunctionDef name:_get_kill_signal arguments If Return return:yes Return return:yes"
  },
  {
    "library": "scipy",
    "name": "isshape",
    "source_code": "def isshape(x, nonneg=False, *, allow_nd=(2,)) -> bool:\n    ndim = len(x)\n    if ndim not in allow_nd:\n        return False\n    for d in x:\n        if not isintlike(d):\n            return False\n        if nonneg and d < 0:\n            return False\n    return True",
    "docstring": "Is x a valid tuple of dimensions? If nonneg, also checks that the dimensions are non-negative. Shapes of length in the tuple allow_nd are allowed.",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\_sputils.py",
    "ast_data": "FunctionDef name:isshape arg:x arg:nonneg arguments arg arg arg Assign Call If Compare Return return:yes For If Call Return return:yes If BoolOp Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "count_params",
    "source_code": "@doc_controls.do_not_generate_docs\ndef count_params(x):\n    return np.prod(x.shape.as_list())",
    "docstring": "Returns the static number of elements in a variable or tensor. Args: x: Variable or tensor. Returns: Integer, the number of scalars in . Example: >>> kvar = tf.keras.backend.zeros((2,3)) >>> tf.keras.backend.count_params(kvar) 6 >>> tf.keras.backend.eval(kvar) array([[0., 0., 0.], [0., 0., 0.]], dtype=float32)",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:count_params arg:x arguments arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "phase_shift",
    "source_code": "@property\ndef phase_shift(self) -> int | None:\n    return self._phase_shift",
    "docstring": "If set, add linear phase / * to each FFT slice of frequency . Shifting (more precisely ) an -point FFT input by samples results in a multiplication of the output by `delta_fphase_shiftmfft`. See Also -------- delta_f: Width of the frequency bins of the STFT. f: Frequencies values of the STFT. mfft: Length of input for the FFT used ShortTimeFFT: Class this property belongs to.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_short_time_fft.py",
    "ast_data": "FunctionDef name:phase_shift arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_shard_info_str",
    "source_code": "def _shard_info_str(shape, shard_info) -> str:\n    full_shape_str = ' '.join(('%d' % d for d in shape)) + ' '\n    slice_spec = ':'.join(('%d,%d' % (o, s) for o, s in zip(shard_info.offset, shard_info.shape)))\n    return full_shape_str + slice_spec",
    "docstring": "Created shape and shard_info string.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3_checkpoint_adapter.py",
    "ast_data": "FunctionDef name:_shard_info_str arg:shape arg:shard_info arguments arg arg Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "projected",
    "source_code": "@property\ndef projected(self):\n    return self.srs.projected",
    "docstring": "Is this Spatial Reference projected?",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\base\\models.py",
    "ast_data": "FunctionDef name:projected arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_insert_copy_of_subgraph_a_after_input_node_c",
    "source_code": "def _insert_copy_of_subgraph_a_after_input_node_c(input_node_c: Union[Node, list[Node]], input_node_c_2: Optional[Union[Node, list[Node]]], subgraph_a: NSSubgraph, gm_a: GraphModule, gm_b: GraphModule, node_name_prefix: str) -> Node:\n    assert isinstance(input_node_c, (Node, list))\n    nodes_of_a = [subgraph_a.end_node]\n    cur_node = subgraph_a.end_node\n    while cur_node != subgraph_a.start_node:\n        cur_node = get_normalized_nth_input(cur_node, gm_a, 0)\n        nodes_of_a.insert(0, cur_node)\n    cur_node_a = nodes_of_a[0]\n    cur_node_c = _insert_copy_of_node_a_after_input_node_c(input_node_c, input_node_c_2, cur_node_a, gm_a, gm_b, node_name_prefix)\n    for cur_idx_a in range(1, len(nodes_of_a)):\n        cur_node_a = nodes_of_a[cur_idx_a]\n        prev_node_c = cur_node_c\n        cur_node_c = _insert_copy_of_node_a_after_input_node_c(prev_node_c, None, cur_node_a, gm_a, gm_b, node_name_prefix)\n    return cur_node_c",
    "docstring": "TODO(before land): real docblock",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\ns\\fx\\graph_passes.py",
    "ast_data": "FunctionDef name:_insert_copy_of_subgraph_a_after_input_node_c arg:input_node_c arg:input_node_c_2 arg:subgraph_a arg:gm_a arg:gm_b arg:node_name_prefix arguments arg arg arg arg arg arg Call Assign Assign While Compare Assign Call Call Assign Assign Call For Call Call Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "_fit",
    "source_code": "def _fit(self, fit_data, weights=None):\n    fit_kws = {'bw_method': self.bw_method}\n    if weights is not None:\n        fit_kws['weights'] = weights\n    kde = gaussian_kde(fit_data, **fit_kws)\n    kde.set_bandwidth(kde.factor * self.bw_adjust)\n    return kde",
    "docstring": "Fit the scipy kde while adding bw_adjust logic and version check.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_statistics.py",
    "ast_data": "FunctionDef name:_fit arg:self arg:fit_data arg:weights arguments arg arg arg Assign If Compare Assign Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "session_creator",
    "source_code": "def session_creator(self, scaffold=None, config=None, checkpoint_dir=None, checkpoint_filename_with_path=None, max_wait_secs=7200):\n    if config:\n        session_config = copy.deepcopy(config)\n        session_config.MergeFrom(self._session_config)\n    else:\n        session_config = self._session_config\n    if not self._strategy or self._strategy.extended.experimental_should_init:\n        logging.info('Creating chief session creator with config: %r', config)\n        return monitored_session.ChiefSessionCreator(scaffold, master=self.master_target, config=session_config, checkpoint_dir=checkpoint_dir, checkpoint_filename_with_path=checkpoint_filename_with_path)\n    else:\n        logging.info('Creating worker session creator with config: %r', config)\n        return monitored_session.WorkerSessionCreator(scaffold, master=self.master_target, config=session_config, max_wait_secs=max_wait_secs)",
    "docstring": "Returns a session creator. The returned session creator will be configured with the correct master target and session configs. It will also run either init ops or ready ops by querying the object when is called on it. Args: scaffold: A used for gathering or building supportive ops. If not specified a default one is created. It's used to finalize the graph. config: proto used to configure the session. checkpoint_dir: A string. Optional path to a directory where to restore variables. checkpoint_filename_with_path: Full file name path to the checkpoint file. Only one of or can be specified. max_wait_secs: Maximum time to wait for the session to become available. Returns: a descendant of SessionCreator.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_coordinator.py",
    "ast_data": "FunctionDef name:session_creator arg:self arg:scaffold arg:config arg:checkpoint_dir arg:checkpoint_filename_with_path arg:max_wait_secs arguments arg arg arg arg arg arg If Assign Call Call Assign If BoolOp Call Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "logcdf",
    "source_code": "def logcdf(self, x, *args, **kwds):\n    args, loc, scale = self._parse_args(*args, **kwds)\n    x, loc, scale = map(asarray, (x, loc, scale))\n    args = tuple(map(asarray, args))\n    _a, _b = self._get_support(*args)\n    dtyp = np.promote_types(x.dtype, np.float64)\n    x = np.asarray((x - loc) / scale, dtype=dtyp)\n    cond0 = self._argcheck(*args) & (scale > 0)\n    cond1 = self._open_support_mask(x, *args) & (scale > 0)\n    cond2 = (x >= _b) & cond0\n    cond = cond0 & cond1\n    output = empty(shape(cond), dtyp)\n    output.fill(-inf)\n    place(output, (1 - cond0) * (cond1 == cond1) + np.isnan(x), self.badvalue)\n    place(output, cond2, 0.0)\n    if np.any(cond):\n        goodargs = argsreduce(cond, *(x,) + args)\n        place(output, cond, self._logcdf(*goodargs))\n    if output.ndim == 0:\n        return output[()]\n    return output",
    "docstring": "Log of the cumulative distribution function at x of the given RV. Parameters ---------- x : array_like quantiles arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- logcdf : array_like Log of the cumulative distribution function evaluated at x",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:logcdf arg:self arg:x arguments arg arg arg arg Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Assign Call Compare Assign Call Compare Assign Compare Assign Assign Call Call Call Call Compare Call Call If Call Assign Call Call Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "BadSerializer",
    "source_code": "class BadSerializer:\n    internal_use_only = False\n\n    def __init__(self, exception):\n        self.exception = exception\n\n    def __call__(self, *args, **kwargs):\n        raise self.exception",
    "docstring": "Stub serializer to hold exception raised during registration This allows the serializer registration to cache serializers and if there is an error raised in the process of creating a serializer it will be raised and passed along to the caller when the serializer is used.",
    "type": "class",
    "file_path": "django\\django\\core\\serializers\\__init__.py",
    "ast_data": "ClassDef name:BadSerializer Assign FunctionDef name:__init__ arg:self arg:exception arguments arg arg Assign FunctionDef name:__call__ arg:self arguments arg arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "inputs",
    "source_code": "@property\ndef inputs(self) -> Sequence[tensor_lib.Tensor]:\n    if self._inputs_val is None:\n        self._inputs_val = tuple((self.graph._get_tensor_by_tf_output(i) for i in pywrap_tf_session.GetOperationInputs(self._c_op)))\n    return self._inputs_val",
    "docstring": "The sequence of objects representing the data inputs of this op.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:inputs arg:self arguments arg If Compare Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__tf_flatten__",
    "source_code": "def __tf_flatten__(self):\n    pass",
    "docstring": "Flatten current object into (metadata, components). Returns: A of (metadata, components), where - metadata is a custom Python object that stands for the static config of the current object, which is supposed to be fixed and not affected by data transformation. - components is a that contains the modifiable fields of the current object. Implementation Note: - This method should not invoke any TensorFlow ops. - This method only needs to flatten the current level. If current object has an attribute that also need custom flattening, nest functions (such as ) will utilize this method to do recursive flattening. - Components must be a , not a",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\util\\custom_nest_protocol.py",
    "ast_data": "FunctionDef name:__tf_flatten__ arg:self arguments arg"
  },
  {
    "library": "scipy",
    "name": "getValue",
    "source_code": "def getValue(self):\n    return self.data.item()",
    "docstring": "Retrieve a scalar value from a of length one. Raises ------ ValueError If the netcdf variable is an array of length greater than one, this exception will be raised.",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\_netcdf.py",
    "ast_data": "FunctionDef name:getValue arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "blockwise_all",
    "source_code": "def blockwise_all(left: BlockManager, right: BlockManager, op) -> bool:\n    for info in _iter_block_pairs(left, right):\n        res = op(info.lvals, info.rvals)\n        if not res:\n            return False\n    return True",
    "docstring": "Blockwise reduction.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\internals\\ops.py",
    "ast_data": "FunctionDef name:blockwise_all arg:left arg:right arg:op arguments arg arg arg For Call Assign Call If Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "from_qualified_name",
    "source_code": "@classmethod\ndef from_qualified_name(cls, qualified_name: str) -> OpName:\n    namespace, opname_overload = qualified_name.split('::')\n    op_name, *overload = opname_overload.split('.', 1)\n    overload = overload[0] if overload else 'default'\n    return cls(namespace, op_name, overload)",
    "docstring": "When the name is ::[.]",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\registration.py",
    "ast_data": "FunctionDef name:from_qualified_name arg:cls arg:qualified_name arguments arg arg Assign Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "H",
    "source_code": "def H(s):\n    return md5_hex(s)",
    "docstring": "Return an `` HEX hash.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\auth_digest.py",
    "ast_data": "FunctionDef name:H arg:s arguments arg Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "unsubscribe",
    "source_code": "def unsubscribe(self):\n    self.bus.unsubscribe('start', self.start)\n    self.bus.unsubscribe('stop', self.stop)",
    "docstring": "Unsubcribe control methods to the bus lifecycle events.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\servers.py",
    "ast_data": "FunctionDef name:unsubscribe arg:self arguments arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "LinearReLU",
    "source_code": "class LinearReLU(nnq.Linear):\n    _FLOAT_MODULE = nni.LinearReLU\n\n    def __init__(self, in_features, out_features, bias=True, dtype=torch.qint8):\n        super().__init__(in_features, out_features, bias, dtype)\n\n    def forward(self, x: torch.Tensor) -> torch.Tensor:\n        return torch.ops.quantized.linear_relu(x, self._packed_params._packed_params, self.scale, self.zero_point)\n\n    def _get_name(self):\n        return 'QuantizedLinearReLU'\n\n    @classmethod\n    def from_float(cls, mod, use_precomputed_fake_quant=False):\n        return super().from_float(mod, use_precomputed_fake_quant)\n\n    @classmethod\n    def from_reference(cls, ref_linear_relu, output_scale, output_zero_point):\n        return super().from_reference(ref_linear_relu[0], output_scale, output_zero_point)",
    "docstring": "A LinearReLU module fused from Linear and ReLU modules We adopt the same interface as :class:. Attributes: Same as torch.ao.nn.quantized.Linear Examples:: >>> # xdoctest: +SKIP >>> m = nn.intrinsic.LinearReLU(20, 30) >>> input = torch.randn(128, 20) >>> output = m(input) >>> print(output.size()) torch.Size([128, 30])",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\nn\\intrinsic\\quantized\\modules\\linear_relu.py",
    "ast_data": "ClassDef name:LinearReLU Assign FunctionDef name:__init__ arg:self arg:in_features arg:out_features arg:bias arg:dtype arguments arg arg arg arg arg Call Call FunctionDef name:forward arg:self arg:x arguments arg arg Return return:yes Call FunctionDef name:_get_name arg:self arguments arg Return return:yes FunctionDef name:from_float arg:cls arg:mod arg:use_precomputed_fake_quant arguments arg arg arg Return return:yes Call Call FunctionDef name:from_reference arg:cls arg:ref_linear_relu arg:output_scale arg:output_zero_point arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "set_rng_state",
    "source_code": "def set_rng_state(new_state: Tensor, device: Union[int, str, torch.device]='cuda') -> None:\n    if not is_initialized():\n        with torch._C._DisableFuncTorch():\n            new_state = new_state.clone(memory_format=torch.contiguous_format)\n    if isinstance(device, str):\n        device = torch.device(device)\n    elif isinstance(device, int):\n        device = torch.device('cuda', device)\n\n    def cb():\n        idx = device.index\n        if idx is None:\n            idx = current_device()\n        default_generator = torch.cuda.default_generators[idx]\n        default_generator.set_state(new_state)\n    _lazy_call(cb)",
    "docstring": "Set the random number generator state of the specified GPU. Args: new_state (torch.ByteTensor): The desired state device (torch.device or int, optional): The device to set the RNG state. Default: ``, the current CUDA device).",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\random.py",
    "ast_data": "FunctionDef name:set_rng_state arg:new_state arg:device arguments arg arg If Call With Call Assign Call If Call Assign Call If Call Assign Call FunctionDef name:cb arguments Assign If Compare Assign Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__call__",
    "source_code": "def __call__(self, shape, dtype=dtypes.float32, **kwargs):\n    self._validate_kwargs(kwargs)\n    dtype = _assert_float_dtype(dtype)\n    if _PARTITION_SHAPE in kwargs:\n        shape = kwargs[_PARTITION_SHAPE]\n    return self._random_generator.random_normal(shape, self.mean, self.stddev, dtype)",
    "docstring": "Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only floating point types are supported. **kwargs: Additional keyword arguments. Raises: ValueError: If the dtype is not floating point",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops_v2.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:shape arg:dtype arguments arg arg arg arg Call Assign Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "DropPath",
    "source_code": "class DropPath(Module):\n\n    def __init__(self, drop_prob: float=0.0, scale_by_keep: bool=True) -> None:\n        super().__init__()\n        self.drop_prob = drop_prob\n        self.scale_by_keep = scale_by_keep\n\n    def forward(self, x: Tensor) -> Tensor:\n        if self.drop_prob == 0.0 or not self.training:\n            return x\n        keep_prob = 1 - self.drop_prob\n        shape = (x.shape[0],) + (1,) * (x.ndim - 1)\n        random_tensor = x.new_empty(shape).bernoulli_(keep_prob)\n        if keep_prob > 0.0 and self.scale_by_keep:\n            random_tensor.div_(keep_prob)\n        return x * random_tensor",
    "docstring": "Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).",
    "type": "class",
    "file_path": "kornia\\kornia\\contrib\\models\\common.py",
    "ast_data": "ClassDef name:DropPath FunctionDef name:__init__ arg:self arg:drop_prob arg:scale_by_keep arguments arg arg arg Call Call Assign Assign FunctionDef name:forward arg:self arg:x arguments arg arg If BoolOp Compare Return return:yes Assign Assign Assign Call Call If BoolOp Compare Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "RosenbrockModified",
    "source_code": "class RosenbrockModified(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-2.0] * self.N, [2.0] * self.N))\n        self.custom_bounds = ([-1.0, 0.5], [-1.0, 1.0])\n        self.global_optimum = [[-0.90955374, -0.95057172]]\n        self.fglob = 34.040243106640844\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        a = 74 + 100.0 * (x[1] - x[0] ** 2) ** 2 + (1 - x[0]) ** 2\n        a -= 400 * exp(-((x[0] + 1.0) ** 2 + (x[1] + 1.0) ** 2) / 0.1)\n        return a",
    "docstring": "Modified Rosenbrock objective function. This class defines the Modified Rosenbrock [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{RosenbrockModified}}(x) = 74 + 100(x_2 - x_1^2)^2 + (1 - x_1)^2 - 400 e^{-\\frac{(x_1+1)^2 + (x_2 + 1)^2}{0.1}} Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO: We have different global minimum compared to Jamil #106. This is possibly because of the (1-x) term is using the wrong parameter.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_R.py",
    "ast_data": "ClassDef name:RosenbrockModified FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "build_quadratic_1d",
    "source_code": "def build_quadratic_1d(J, g, s, diag=None, s0=None):\n    v = J.dot(s)\n    a = np.dot(v, v)\n    if diag is not None:\n        a += np.dot(s * diag, s)\n    a *= 0.5\n    b = np.dot(g, s)\n    if s0 is not None:\n        u = J.dot(s0)\n        b += np.dot(u, v)\n        c = 0.5 * np.dot(u, u) + np.dot(g, s0)\n        if diag is not None:\n            b += np.dot(s0 * diag, s)\n            c += 0.5 * np.dot(s0 * diag, s0)\n        return (a, b, c)\n    else:\n        return (a, b)",
    "docstring": "Parameterize a multivariate quadratic function along a line. The resulting univariate quadratic function is given as follows:: f(t) = 0.5 * (s0 + s*t).T * (J.T*J + diag) * (s0 + s*t) + g.T * (s0 + s*t) Parameters ---------- J : ndarray, sparse array or LinearOperator shape (m, n) Jacobian matrix, affects the quadratic term. g : ndarray, shape (n,) Gradient, defines the linear term. s : ndarray, shape (n,) Direction vector of a line. diag : None or ndarray with shape (n,), optional Addition diagonal part, affects the quadratic term. If None, assumed to be 0. s0 : None or ndarray with shape (n,), optional Initial point. If None, assumed to be 0. Returns ------- a : float Coefficient for t**2. b : float Coefficient for t. c : float Free term. Returned only if is provided.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_lsq\\common.py",
    "ast_data": "FunctionDef name:build_quadratic_1d arg:J arg:g arg:s arg:diag arg:s0 arguments arg arg arg arg arg Assign Call Assign Call If Compare Call Assign Call If Compare Assign Call Call Assign Call Call If Compare Call Call Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, bounds=None, inset_ax=None, zorder=None, **kwargs):\n    if bounds is None and inset_ax is None:\n        raise ValueError('At least one of bounds or inset_ax must be supplied')\n    self._inset_ax = inset_ax\n    if bounds is None:\n        self._auto_update_bounds = True\n        bounds = self._bounds_from_inset_ax()\n    else:\n        self._auto_update_bounds = False\n    x, y, width, height = bounds\n    self._rectangle = Rectangle((x, y), width, height, clip_on=False, **kwargs)\n    self._connectors = []\n    super().__init__()\n    self.set_zorder(zorder)\n    for prop in _shared_properties:\n        setattr(self, f'_{prop}', artist.getp(self._rectangle, prop))",
    "docstring": "Parameters ---------- bounds : [x0, y0, width, height], optional Lower-left corner of rectangle to be marked, and its width and height. If not set, the bounds will be calculated from the data limits of inset_ax, which must be supplied. inset_ax : , optional An optional inset Axes to draw connecting lines to. Two lines are drawn connecting the indicator box to the inset Axes on corners chosen so as to not overlap with the indicator box. zorder : float, default: 4.99 Drawing order of the rectangle and connector lines. The default, 4.99, is just below the default level of inset Axes. **kwargs Other keyword arguments are passed on to the patch.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\inset.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:bounds arg:inset_ax arg:zorder arguments arg arg arg arg arg If BoolOp Compare Compare Raise Call Assign If Compare Assign Assign Call Assign Assign Assign Call Assign Call Call Call For Call Call"
  },
  {
    "library": "kornia",
    "name": "invert",
    "source_code": "def invert(min_mag: float, max_mag: float) -> OperationBase:\n    return Invert(1.0)",
    "docstring": "Return invert op.",
    "type": "function",
    "file_path": "kornia\\kornia\\augmentation\\auto\\rand_augment\\ops.py",
    "ast_data": "FunctionDef name:invert arg:min_mag arg:max_mag arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "generate_disj",
    "source_code": "@register_transformation_rule(Disj)\ndef generate_disj(constraint, counter):\n    new = []\n    for c in constraint.disjuncts:\n        new_c, counter = transform_constraint(c, counter)\n        new.append(new_c)\n    return (Disj(new), counter)",
    "docstring": "Transform disjunctions",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_transformation.py",
    "ast_data": "FunctionDef name:generate_disj arg:constraint arg:counter arguments arg arg Assign For Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_chunk_n_rows",
    "source_code": "def get_chunk_n_rows(row_bytes, *, max_n_rows=None, working_memory=None):\n    if working_memory is None:\n        working_memory = get_config()['working_memory']\n    chunk_n_rows = int(working_memory * 2 ** 20 // row_bytes)\n    if max_n_rows is not None:\n        chunk_n_rows = min(chunk_n_rows, max_n_rows)\n    if chunk_n_rows < 1:\n        warnings.warn('Could not adhere to working_memory config. Currently %.0fMiB, %.0fMiB required.' % (working_memory, np.ceil(row_bytes * 2 ** (-20))))\n        chunk_n_rows = 1\n    return chunk_n_rows",
    "docstring": "Calculate how many rows can be processed within . Parameters ---------- row_bytes : int The expected number of bytes of memory that will be consumed during the processing of each row. max_n_rows : int, default=None The maximum return value. working_memory : int or float, default=None The number of rows to fit inside this number of MiB will be returned. When None (default), the value of `working_memoryrow_bytes exceeds MiB.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_chunking.py",
    "ast_data": "FunctionDef name:get_chunk_n_rows arg:row_bytes arguments arg arg arg If Compare Assign Call Assign Call If Compare Assign Call If Compare Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "load_ast",
    "source_code": "def load_ast(nodes, indentation='  ', include_source_map=False, delete_on_exit=True):\n    if not isinstance(nodes, (list, tuple)):\n        nodes = (nodes,)\n    source = parser.unparse(nodes, indentation=indentation)\n    module, _ = load_source(source, delete_on_exit)\n    if include_source_map:\n        source_map = origin_info.create_source_map(nodes, source, module.__file__)\n    else:\n        source_map = None\n    return (module, source, source_map)",
    "docstring": "Loads the given AST as a Python module. Compiling the AST code this way ensures that the source code is readable by e.g. or . Args: nodes: Union[ast.AST, Iterable[ast.AST]], the code to compile, as an AST object. indentation: Text, the string to use for indentation. include_source_map: bool, whether return a source map. delete_on_exit: bool, whether to delete the temporary file used for compilation on exit. Returns: Tuple[module, Text, Dict[LineLocation, OriginInfo]], containing: the module containing the unparsed nodes, the source code corresponding to nodes, and the source map. Is include_source_map is False, the source map will be None.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\loader.py",
    "ast_data": "FunctionDef name:load_ast arg:nodes arg:indentation arg:include_source_map arg:delete_on_exit arguments arg arg arg arg If Call Assign Assign Call Assign Call If Assign Call Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_DomainSafeDivide",
    "source_code": "class _DomainSafeDivide:\n\n    def __init__(self, tolerance=None):\n        self.tolerance = tolerance\n\n    def __call__(self, a, b):\n        if self.tolerance is None:\n            self.tolerance = np.finfo(float).tiny\n        a, b = (np.asarray(a), np.asarray(b))\n        with np.errstate(all='ignore'):\n            return umath.absolute(a) * self.tolerance >= umath.absolute(b)",
    "docstring": "Define a domain for safe division.",
    "type": "class",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "ClassDef name:_DomainSafeDivide FunctionDef name:__init__ arg:self arg:tolerance arguments arg arg Assign FunctionDef name:__call__ arg:self arg:a arg:b arguments arg arg arg If Compare Assign Call Assign Call Call With Call Return return:yes Compare Call Call"
  },
  {
    "library": "numpy",
    "name": "fromrecords",
    "source_code": "def fromrecords(reclist, dtype=None, shape=None, formats=None, names=None, titles=None, aligned=False, byteorder=None, fill_value=None, mask=ma.nomask):\n    _mask = getattr(reclist, '_mask', None)\n    if isinstance(reclist, np.ndarray):\n        if isinstance(reclist, ma.MaskedArray):\n            reclist = reclist.filled().view(np.ndarray)\n        if dtype is None:\n            dtype = reclist.dtype\n        reclist = reclist.tolist()\n    mrec = np.rec.fromrecords(reclist, dtype=dtype, shape=shape, formats=formats, names=names, titles=titles, aligned=aligned, byteorder=byteorder).view(mrecarray)\n    if fill_value is not None:\n        mrec.fill_value = fill_value\n    if mask is not ma.nomask:\n        mask = np.asarray(mask)\n        maskrecordlength = len(mask.dtype)\n        if maskrecordlength:\n            mrec._mask.flat = mask\n        elif mask.ndim == 2:\n            mrec._mask.flat = [tuple(m) for m in mask]\n        else:\n            mrec.__setmask__(mask)\n    if _mask is not None:\n        mrec._mask[:] = _mask\n    return mrec",
    "docstring": "Creates a MaskedRecords from a list of records. Parameters ---------- reclist : sequence A list of records. Each element of the sequence is first converted to a masked array if needed. If a 2D array is passed as argument, it is processed line by line dtype : {None, dtype}, optional Data type descriptor. shape : {None,int}, optional Number of records. If None, `` is defined from the shape of the first array in the list. formats : {None, sequence}, optional Sequence of formats for each individual field. If None, the formats will be autodetected by inspecting the fields and selecting the highest dtype possible. names : {None, sequence}, optional Sequence of the names of each field. fill_value : {None, sequence}, optional Sequence of data to be used as filling values. mask : {nomask, sequence}, optional. External mask to apply on the data. Notes ----- Lists of tuples should be preferred over lists of lists for faster processing.",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\mrecords.py",
    "ast_data": "FunctionDef name:fromrecords arg:reclist arg:dtype arg:shape arg:formats arg:names arg:titles arg:aligned arg:byteorder arg:fill_value arg:mask arguments arg arg arg arg arg arg arg arg arg arg Assign Call If Call If Call Assign Call Call If Compare Assign Assign Call Assign Call Call If Compare Assign If Compare Assign Call Assign Call If Assign If Compare Assign Call Call If Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "update_xla_tsl_imports",
    "source_code": "def update_xla_tsl_imports(srcs_dir: str) -> None:\n    replace_inplace(srcs_dir, 'from tsl', 'from tensorflow.tsl')\n    replace_inplace(srcs_dir, 'from local_xla.xla', 'from tensorflow.compiler.xla')\n    replace_inplace(srcs_dir, 'from xla', 'from tensorflow.compiler.xla')",
    "docstring": "Workaround for TSL and XLA vendoring.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\pip_package\\build_pip_package.py",
    "ast_data": "FunctionDef name:update_xla_tsl_imports arg:srcs_dir arguments arg Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "distributions_and_v1_and_v2_optimizers",
    "source_code": "def distributions_and_v1_and_v2_optimizers():\n    return combinations.combine(distribution=[strategy_combinations_base.one_device_strategy, strategy_combinations_base.mirrored_strategy_with_gpu_and_cpu, strategy_combinations_base.mirrored_strategy_with_two_gpus, strategy_combinations_base.mirrored_strategy_with_two_gpus_no_merge_call], optimizer_fn=optimizers_v1_and_v2)",
    "docstring": "A common set of combination with DistributionStrategies and Optimizers.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\optimizer_combinations.py",
    "ast_data": "FunctionDef name:distributions_and_v1_and_v2_optimizers arguments Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_get_getitem_freq",
    "source_code": "def _get_getitem_freq(self, key) -> BaseOffset | None:\n    is_period = isinstance(self.dtype, PeriodDtype)\n    if is_period:\n        freq = self.freq\n    elif self.ndim != 1:\n        freq = None\n    else:\n        key = check_array_indexer(self, key)\n        freq = None\n        if isinstance(key, slice):\n            if self.freq is not None and key.step is not None:\n                freq = key.step * self.freq\n            else:\n                freq = self.freq\n        elif key is Ellipsis:\n            freq = self.freq\n        elif com.is_bool_indexer(key):\n            new_key = lib.maybe_booleans_to_slice(key.view(np.uint8))\n            if isinstance(new_key, slice):\n                return self._get_getitem_freq(new_key)\n    return freq",
    "docstring": "Find the attribute to assign to the result of a __getitem__ lookup.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py",
    "ast_data": "FunctionDef name:_get_getitem_freq arg:self arg:key arguments arg arg Assign Call If Assign If Compare Assign Assign Call Assign If Call If BoolOp Compare Compare Assign Assign If Compare Assign If Call Assign Call Call If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_op_resolver_id",
    "source_code": "def _get_op_resolver_id(op_resolver_type=OpResolverType.AUTO):\n    return {OpResolverType.AUTO: 1, OpResolverType.BUILTIN: 1, OpResolverType.BUILTIN_REF: 2, OpResolverType.BUILTIN_WITHOUT_DEFAULT_DELEGATES: 3}.get(op_resolver_type, None)",
    "docstring": "Get a integer identifier for the op resolver.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\interpreter.py",
    "ast_data": "FunctionDef name:_get_op_resolver_id arg:op_resolver_type arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "float",
    "source_code": "def float(self) -> Image:\n    self._data = self.data.float()\n    return self",
    "docstring": "Return the image as float.",
    "type": "method",
    "file_path": "kornia\\kornia\\image\\image.py",
    "ast_data": "FunctionDef name:float arg:self arguments arg Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_linprog_rs",
    "source_code": "def _linprog_rs(c, c0, A, b, x0, callback, postsolve_args, maxiter=5000, tol=1e-12, disp=False, maxupdate=10, mast=False, pivot='mrc', **unknown_options):\n    _check_unknown_options(unknown_options)\n    messages = ['Optimization terminated successfully.', 'Iteration limit reached.', 'The problem appears infeasible, as the phase one auxiliary problem terminated successfully with a residual of {0:.1e}, greater than the tolerance {1} required for the solution to be considered feasible. Consider increasing the tolerance to be greater than {0:.1e}. If this tolerance is unacceptably large, the problem is likely infeasible.', 'The problem is unbounded, as the simplex algorithm found a basic feasible solution from which there is a direction with negative reduced cost in which all decision variables increase.', \"Numerical difficulties encountered; consider trying method='interior-point'.\", 'Problems with no constraints are trivially solved; please turn presolve on.', 'The guess x0 cannot be converted to a basic feasible solution. ']\n    if A.size == 0:\n        return (np.zeros(c.shape), 5, messages[5], 0)\n    x, basis, A, b, residual, status, iteration = _phase_one(A, b, x0, callback, postsolve_args, maxiter, tol, disp, maxupdate, mast, pivot)\n    if status == 0:\n        x, basis, status, iteration = _phase_two(c, A, x, basis, callback, postsolve_args, maxiter, tol, disp, maxupdate, mast, pivot, iteration)\n    return (x, status, messages[status].format(residual, tol), iteration)",
    "docstring": "Solve the following linear programming problem via a two-phase revised simplex algorithm.:: minimize: c @ x subject to: A @ x == b 0 <= x < oo User-facing documentation is in _linprog_doc.py. Parameters ---------- c : 1-D array Coefficients of the linear objective function to be minimized. c0 : float Constant term in objective function due to fixed (and eliminated) variables. (Currently unused.) A : 2-D array 2-D array which, when matrix-multiplied by `scipy.optimize.OptimizeResultunknown_options` is non-empty a warning is issued listing all unused options. Returns ------- x : 1-D array Solution vector. status : int An integer representing the exit status of the optimization:: 0 : Optimization terminated successfully 1 : Iteration limit reached 2 : Problem appears to be infeasible 3 : Problem appears to be unbounded 4 : Numerical difficulties encountered 5 : No constraints; turn presolve on 6 : Guess x0 cannot be converted to a basic feasible solution message : str A string descriptor of the exit status of the optimization. iteration : int The number of iterations taken to solve the problem.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_linprog_rs.py",
    "ast_data": "FunctionDef name:_linprog_rs arg:c arg:c0 arg:A arg:b arg:x0 arg:callback arg:postsolve_args arg:maxiter arg:tol arg:disp arg:maxupdate arg:mast arg:pivot arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg Call Assign If Compare Return return:yes Call Assign Call If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit_transform",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit_transform(self, X, y=None):\n    imputer_mask = self._fit(X, y)\n    if self.features_.size < self._n_features:\n        imputer_mask = imputer_mask[:, self.features_]\n    return imputer_mask",
    "docstring": "Generate missing values indicator for . Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input data to complete. y : Ignored Not used, present for API consistency by convention. Returns ------- Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_features_with_missing) The missing indicator for input data. The data type of will be boolean.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\impute\\_base.py",
    "ast_data": "FunctionDef name:fit_transform arg:self arg:X arg:y arguments arg arg arg Assign Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "geos_version",
    "source_code": "def geos_version():\n    return lgeos.GEOSversion()",
    "docstring": "Return the string version of the GEOS library.",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\geos\\libgeos.py",
    "ast_data": "FunctionDef name:geos_version arguments Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_available_name",
    "source_code": "def get_available_name(self, name, max_length=None):\n    name = str(name).replace('\\\\', '/')\n    dir_name, file_name = os.path.split(name)\n    if '..' in pathlib.PurePath(dir_name).parts:\n        raise SuspiciousFileOperation(\"Detected path traversal attempt in '%s'\" % dir_name)\n    validate_file_name(file_name)\n    file_ext = ''.join(pathlib.PurePath(file_name).suffixes)\n    file_root = file_name.removesuffix(file_ext)\n    while not self.is_name_available(name, max_length=max_length):\n        name = os.path.join(dir_name, self.get_alternative_name(file_root, file_ext))\n        if max_length is None:\n            continue\n        truncation = len(name) - max_length\n        if truncation > 0:\n            file_root = file_root[:-truncation]\n            if not file_root:\n                raise SuspiciousFileOperation('Storage can not find an available filename for \"%s\". Please make sure that the corresponding file field allows sufficient \"max_length\".' % name)\n            name = os.path.join(dir_name, self.get_alternative_name(file_root, file_ext))\n    return name",
    "docstring": "Return a filename that's free on the target storage system and available for new content to be written to.",
    "type": "method",
    "file_path": "django\\django\\core\\files\\storage\\base.py",
    "ast_data": "FunctionDef name:get_available_name arg:self arg:name arg:max_length arguments arg arg arg Assign Call Call Assign Call If Compare Call Raise Call Call Assign Call Call Assign Call While Call Assign Call Call If Compare Assign Call If Compare Assign If Raise Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_DynType",
    "source_code": "class _DynType:\n\n    def __init__(self) -> None:\n        self.__name__ = '_DynType'\n\n    def __eq__(self, other):\n        return isinstance(other, self.__class__)\n\n    def __str__(self):\n        return 'Dyn'\n\n    def __repr__(self):\n        return 'Dyn'",
    "docstring": "_DynType defines a type which stands for the absence of type information.",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\tensor_type.py",
    "ast_data": "ClassDef name:_DynType FunctionDef name:__init__ arg:self arguments arg Assign FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes Call FunctionDef name:__str__ arg:self arguments arg Return return:yes FunctionDef name:__repr__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "construct_lcb_simplicial",
    "source_code": "def construct_lcb_simplicial(self, v_min):\n    cbounds = [[x_b_i[0], x_b_i[1]] for x_b_i in self.bounds]\n    for vn in v_min.nn:\n        for i, x_i in enumerate(vn.x_a):\n            if x_i < v_min.x_a[i] and x_i > cbounds[i][0]:\n                cbounds[i][0] = x_i\n            if x_i > v_min.x_a[i] and x_i < cbounds[i][1]:\n                cbounds[i][1] = x_i\n    if self.disp:\n        logging.info(f'cbounds found for v_min.x_a = {v_min.x_a}')\n        logging.info(f'cbounds = {cbounds}')\n    return cbounds",
    "docstring": "Construct locally (approximately) convex bounds Parameters ---------- v_min : Vertex object The minimizer vertex Returns ------- cbounds : list of lists List of size dimension with length-2 list of bounds for each dimension.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_shgo.py",
    "ast_data": "FunctionDef name:construct_lcb_simplicial arg:self arg:v_min arguments arg arg Assign For For Call If BoolOp Compare Compare Assign If BoolOp Compare Compare Assign If Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_py_lazy_and",
    "source_code": "def _py_lazy_and(cond, b):\n    return cond and b()",
    "docstring": "Lazy-eval equivalent of \"and\" in Python.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\logical.py",
    "ast_data": "FunctionDef name:_py_lazy_and arg:cond arg:b arguments arg arg Return return:yes BoolOp Call"
  },
  {
    "library": "matplotlib",
    "name": "draw",
    "source_code": "def draw(self):\n    renderer = RendererTemplate(self.figure.dpi)\n    self.figure.draw(renderer)",
    "docstring": "Draw the figure using the renderer. It is important that this method actually walk the artist tree even if not output is produced because this will trigger deferred work (like computing limits auto-limits and tick values) that users may want access to before saving to disk.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_template.py",
    "ast_data": "FunctionDef name:draw arg:self arguments arg Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "from_local",
    "source_code": "@staticmethod\ndef from_local(local_tensor: torch.Tensor, device_mesh: Optional[DeviceMesh]=None, placements: Optional[Sequence[Placement]]=None, *, run_check: bool=False, shape: Optional[torch.Size]=None, stride: Optional[tuple[int, ...]]=None) -> 'DTensor':\n    device_mesh = device_mesh or _mesh_resources.get_current_mesh()\n    device_type = device_mesh.device_type\n    if device_type != local_tensor.device.type and (not local_tensor.is_meta):\n        local_tensor = local_tensor.to(device_type)\n    if placements is None:\n        placements = [Replicate() for _ in range(device_mesh.ndim)]\n    else:\n        placements = list(placements)\n        for idx, placement in enumerate(placements):\n            if placement.is_shard():\n                placement = cast(Shard, placement)\n                if placement.dim < 0:\n                    placements[idx] = Shard(placement.dim + local_tensor.ndim)\n    return _FromTorchTensor.apply(local_tensor, device_mesh, tuple(placements), run_check, shape, stride)",
    "docstring": "Create a :class: from a local torch.Tensor on each rank according to the `DeviceMeshPlacementReplicatelocal_tensorDTensorrequires_gradDTensorlocal_tensor` requires_grad or not.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_api.py",
    "ast_data": "FunctionDef name:from_local arg:local_tensor arg:device_mesh arg:placements arguments arg arg arg arg arg arg Assign BoolOp Call Assign If BoolOp Compare Assign Call If Compare Assign Call Call Assign Call For Call If Call Assign Call If Compare Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "validate_config",
    "source_code": "def validate_config(config):\n    return isinstance(config, dict) and _LAYER_UNDEFINED_CONFIG_KEY not in config",
    "docstring": "Determines whether config appears to be a valid layer config.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\generic_utils.py",
    "ast_data": "FunctionDef name:validate_config arg:config arguments arg Return return:yes BoolOp Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "_get_trackable_parent_error_string",
    "source_code": "def _get_trackable_parent_error_string(capture):\n    parent = getattr(capture, '_parent_trackable', None)\n    if parent is not None:\n        return f'Trackable referencing this tensor = {parent()}'\n    trackable_referrers = []\n    for primary_referrer in gc.get_referrers(capture):\n        if isinstance(primary_referrer, trackable.Trackable):\n            trackable_referrers.append(primary_referrer)\n        for secondary_referrer in gc.get_referrers(primary_referrer):\n            if isinstance(secondary_referrer, trackable.Trackable):\n                trackable_referrers.append(secondary_referrer)\n    return 'Trackable Python objects referring to this tensor (from gc.get_referrers, limited to two hops) = [\\n\\t\\t{}]'.format('\\n\\t\\t'.join([repr(obj) for obj in trackable_referrers]))",
    "docstring": "Gets error string with the capture's parent object.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\saved_model_exported_concrete.py",
    "ast_data": "FunctionDef name:_get_trackable_parent_error_string arg:capture arguments arg Assign Call If Compare Return return:yes Call Assign For Call If Call Call For Call If Call Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "InputAdapter",
    "source_code": "class InputAdapter:\n\n    def __init__(self, steps: list[InputAdaptStep] | None=None):\n        self._steps = steps or []\n\n    def append_step(self, step: InputAdaptStep) -> None:\n        self._steps.append(step)\n\n    def apply(self, *model_args, model: torch.nn.Module | Callable | torch_export.ExportedProgram | None=None, **model_kwargs) -> Sequence[int | float | bool | str | torch.Tensor | torch.dtype | None]:\n        args: Sequence[Any] = model_args\n        kwargs: Mapping[str, Any] = model_kwargs\n        for step in self._steps:\n            args, kwargs = step.apply(args, kwargs, model=model)\n        assert not kwargs\n        return args",
    "docstring": "A class that adapts the PyTorch model inputs to exported ONNX model inputs format.",
    "type": "class",
    "file_path": "pytorch\\torch\\onnx\\_internal\\io_adapter.py",
    "ast_data": "ClassDef name:InputAdapter FunctionDef name:__init__ arg:self arg:steps arguments arg arg Assign BoolOp FunctionDef name:append_step arg:self arg:step arguments arg arg Call FunctionDef name:apply arg:self arguments arg arg arg arg For Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "to_dict",
    "source_code": "def to_dict(self) -> dict[str, Any]:\n    return {NAME_DICT_KEY: self.name, CONFIGS_DICT_KEY: [c.to_dict() for c in self.configs]}",
    "docstring": "Convert this `~torch.ao.quantization.backend_config.BackendConfig.from_dict`.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\backend_config.py",
    "ast_data": "FunctionDef name:to_dict arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "isetitem",
    "source_code": "def isetitem(self, loc, value) -> None:\n    if isinstance(value, DataFrame):\n        if is_integer(loc):\n            loc = [loc]\n        if len(loc) != len(value.columns):\n            raise ValueError(f'Got {len(loc)} positions but value has {len(value.columns)} columns.')\n        for i, idx in enumerate(loc):\n            arraylike, refs = self._sanitize_column(value.iloc[:, i])\n            self._iset_item_mgr(idx, arraylike, inplace=False, refs=refs)\n        return\n    arraylike, refs = self._sanitize_column(value)\n    self._iset_item_mgr(loc, arraylike, inplace=False, refs=refs)",
    "docstring": "Set the given value in the column with position . This is a positional analogue to ``. Examples -------- >>> df = pd.DataFrame({\"A\": [1, 2], \"B\": [3, 4]}) >>> df.isetitem(1, [5, 6]) >>> df A B 0 1 5 1 2 6",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:isetitem arg:self arg:loc arg:value arguments arg arg arg If Call If Call Assign If Compare Call Call Raise Call Call Call For Call Assign Call Call Return return:no Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "reorder_pre_hook_nodes_to_schedule_asap",
    "source_code": "def reorder_pre_hook_nodes_to_schedule_asap(self):\n    for node in self.fx_tracer.graph.find_nodes(op='call_function', target=call_hook):\n        if node.kwargs.get('hook_type', None) != 'pre_hook':\n            continue\n        getitem_node = node.args[0]\n        input_nodes = self.get_all_nodes(node.args[1])\n        to_remove = []\n        to_append = []\n        hook_block = [node]\n        for n in input_nodes:\n            if n.op == 'call_function' and n.target == operator.getitem:\n                to_append.append(n.args[0])\n                to_remove.append(n)\n                hook_block.append(n)\n        for a, b in zip(to_remove, to_append):\n            input_nodes.remove(a)\n            input_nodes.append(b)\n        arg = max(input_nodes)\n        if arg is not node.prev and (not self.is_placeholder(arg)):\n            arg.append(getitem_node)\n            for n in hook_block:\n                getitem_node.append(n)",
    "docstring": "In this function, we schedule the pre hooks as soon as possible. This does not match eager behavior (schedule pre hook right before its registered node), but it can make acc grad be scheduled properly when the pre hooks are registered to them. After reordering acc grad node, we will reorder the pre hooks again to mimic eager behavior.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\compiled_autograd.py",
    "ast_data": "FunctionDef name:reorder_pre_hook_nodes_to_schedule_asap arg:self arguments arg For Call If Compare Call Assign Assign Call Assign Assign Assign For If BoolOp Compare Compare Call Call Call For Call Call Call Assign Call If BoolOp Compare Call Call For Call"
  },
  {
    "library": "django",
    "name": "setOrdinate",
    "source_code": "def setOrdinate(self, dimension, index, value):\n    self._checkindex(index)\n    self._checkdim(dimension)\n    capi.cs_setordinate(self.ptr, index, dimension, value)",
    "docstring": "Set the value for the given dimension and index.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\coordseq.py",
    "ast_data": "FunctionDef name:setOrdinate arg:self arg:dimension arg:index arg:value arguments arg arg arg arg Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "BuildCondBranch",
    "source_code": "def BuildCondBranch(self, fn):\n    pre_summaries = ops.get_collection(ops.GraphKeys._SUMMARY_COLLECTION)\n    original_result = fn()\n    post_summaries = ops.get_collection(ops.GraphKeys._SUMMARY_COLLECTION)\n    if len(post_summaries) > len(pre_summaries):\n        new_summaries = post_summaries[len(pre_summaries):]\n        summary_ref = ops.get_collection_ref(ops.GraphKeys._SUMMARY_COLLECTION)\n        summary_ref[:] = pre_summaries\n        with ops.control_dependencies(new_summaries):\n            if original_result is None:\n                return (no_op(), None)\n            elif not isinstance(original_result, ops.Operation):\n                original_result = variable_utils.convert_variables_to_tensors(original_result)\n                original_result = nest.map_structure(array_ops.identity, original_result, expand_composites=True)\n    if original_result is None:\n        return (None, None)\n    original_result = variable_utils.convert_variables_to_tensors(original_result)\n    result = nest.map_structure(self._BuildCondTensor, original_result, expand_composites=True)\n    if not isinstance(result, (list, _basetuple)):\n        result = [result]\n    return (original_result, result)",
    "docstring": "Add the subgraph defined by fn() to the graph.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:BuildCondBranch arg:self arg:fn arguments arg arg Assign Call Assign Call Assign Call If Compare Call Call Assign Call Assign Call Assign With Call If Compare Return return:yes Call If Call Assign Call Assign Call If Compare Return return:no Assign Call Assign Call If Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "generate_numba_apply_func",
    "source_code": "@functools.cache\ndef generate_numba_apply_func(func: Callable[..., Scalar], nopython: bool, nogil: bool, parallel: bool):\n    numba_func = jit_user_function(func)\n    if TYPE_CHECKING:\n        import numba\n    else:\n        numba = import_optional_dependency('numba')\n\n    @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)\n    def roll_apply(values: np.ndarray, begin: np.ndarray, end: np.ndarray, minimum_periods: int, *args: Any) -> np.ndarray:\n        result = np.empty(len(begin))\n        for i in numba.prange(len(result)):\n            start = begin[i]\n            stop = end[i]\n            window = values[start:stop]\n            count_nan = np.sum(np.isnan(window))\n            if len(window) - count_nan >= minimum_periods:\n                result[i] = numba_func(window, *args)\n            else:\n                result[i] = np.nan\n        return result\n    return roll_apply",
    "docstring": "Generate a numba jitted apply function specified by values from engine_kwargs. 1. jit the user's function 2. Return a rolling apply function with the jitted function inline Configurations specified in engine_kwargs apply to both the user's function _AND_ the rolling apply function. Parameters ---------- func : function function to be applied to each window and will be JITed nopython : bool nopython to be passed into numba.jit nogil : bool nogil to be passed into numba.jit parallel : bool parallel to be passed into numba.jit Returns ------- Numba function",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\window\\numba_.py",
    "ast_data": "FunctionDef name:generate_numba_apply_func arg:func arg:nopython arg:nogil arg:parallel arguments arg arg arg arg Assign Call If Assign Call FunctionDef name:roll_apply arg:values arg:begin arg:end arg:minimum_periods arguments arg arg arg arg arg Assign Call Call For Call Call Assign Assign Assign Assign Call Call If Compare Call Assign Call Assign Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "rank",
    "source_code": "@property\ndef rank(self):\n    inner_ndims = tensor_shape.dimension_value(self._inner_dim_sizes.shape[0])\n    if inner_ndims is None:\n        return None\n    else:\n        return len(self._partitioned_dim_sizes) + inner_ndims",
    "docstring": "The number of dimensions in this shape, or None if unknown.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor_shape.py",
    "ast_data": "FunctionDef name:rank arg:self arguments arg Assign Call If Compare Return return:no Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "getdoc",
    "source_code": "def getdoc(self):\n    npfunc = getattr(np, self.__name__, None)\n    doc = getattr(npfunc, '__doc__', None)\n    if doc:\n        sig = ma.get_object_signature(npfunc)\n        doc = ma.doc_note(doc, 'The function is applied to both the _data and the _mask, if any.')\n        if sig:\n            sig = self.__name__ + sig + '\\n\\n'\n        return sig + doc\n    return",
    "docstring": "Retrieve the docstring and signature from the function. The `` attribute of the function is used as the docstring for the new masked array version of the function. A note on application of the function to the mask is appended. Parameters ---------- None",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\extras.py",
    "ast_data": "FunctionDef name:getdoc arg:self arguments arg Assign Call Assign Call If Assign Call Assign Call If Assign Return return:yes Return return:no"
  },
  {
    "library": "numpy",
    "name": "random",
    "source_code": "@staticmethod\n@memoize\ndef random(size, dtype, rnd):\n    arr = np.arange(size, dtype=dtype)\n    rnd = np.random.RandomState(1792364059)\n    np.random.shuffle(arr)\n    rnd.shuffle(arr)\n    return arr",
    "docstring": "Returns a randomly-shuffled array.",
    "type": "method",
    "file_path": "numpy\\benchmarks\\benchmarks\\bench_function_base.py",
    "ast_data": "FunctionDef name:random arg:size arg:dtype arg:rnd arguments arg arg arg Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "typename",
    "source_code": "@set_module('numpy')\ndef typename(char):\n    return _namefromtype[char]",
    "docstring": "Return a description for the given data type code. Parameters ---------- char : str Data type code. Returns ------- out : str Description of the input data type code. See Also -------- dtype Examples -------- >>> import numpy as np >>> typechars = ['S1', '?', 'B', 'D', 'G', 'F', 'I', 'H', 'L', 'O', 'Q', ... 'S', 'U', 'V', 'b', 'd', 'g', 'f', 'i', 'h', 'l', 'q'] >>> for typechar in typechars: ... print(typechar, ' : ', np.typename(typechar)) ... S1 : character ? : bool B : unsigned char D : complex double precision G : complex long double precision F : complex single precision I : unsigned integer H : unsigned short L : unsigned long integer O : object Q : unsigned long long integer S : string U : unicode V : void b : signed char d : double precision g : long precision f : single precision i : integer h : short l : long integer q : long long integer",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_type_check_impl.py",
    "ast_data": "FunctionDef name:typename arg:char arguments arg Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "_is_unpack_form",
    "source_code": "def _is_unpack_form(obj: Any) -> bool:\n    return typing.get_origin(obj) is typing.Unpack",
    "docstring": "Check if the object is :class: or equivalent.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\typing.py",
    "ast_data": "FunctionDef name:_is_unpack_form arg:obj arguments arg Return return:yes Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "_check_tril",
    "source_code": "def _check_tril(self, tril):\n    if tril.shape.ndims is not None and tril.shape.ndims < 2:\n        raise ValueError('Argument tril must have at least 2 dimensions.  Found: %s' % tril)",
    "docstring": "Static check of the argument.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_lower_triangular.py",
    "ast_data": "FunctionDef name:_check_tril arg:self arg:tril arguments arg arg If BoolOp Compare Compare Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "set_size",
    "source_code": "def set_size(self, size):\n    self._size = size\n    self._invalid = True",
    "docstring": "Set the text size.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\textpath.py",
    "ast_data": "FunctionDef name:set_size arg:self arg:size arguments arg arg Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "default_stream",
    "source_code": "def default_stream(device: Optional[_device_t]=None) -> Stream:\n    _lazy_init()\n    streamdata = torch._C._cuda_getDefaultStream(_get_device_index(device, optional=True))\n    return Stream(stream_id=streamdata[0], device_index=streamdata[1], device_type=streamdata[2])",
    "docstring": "Return the default :class: for a given device. Args: device (torch.device or int, optional): selected device. Returns the default :class: for the current device, given by :func:, if :attr: is `` (default).",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:default_stream arg:device arguments arg Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "loss_cardinality",
    "source_code": "@torch.no_grad()\ndef loss_cardinality(self, outputs, targets, indices, num_boxes):\n    pred_logits = outputs['pred_logits']\n    device = pred_logits.device\n    tgt_lengths = torch.as_tensor([len(v['labels']) for v in targets], device=device)\n    card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1)\n    card_err = F.l1_loss(card_pred.float(), tgt_lengths.float())\n    losses = {'cardinality_error': card_err}\n    return losses",
    "docstring": "Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\functional_autograd_benchmark\\torchvision_models.py",
    "ast_data": "FunctionDef name:loss_cardinality arg:self arg:outputs arg:targets arg:indices arg:num_boxes arguments arg arg arg arg arg Assign Assign Assign Call Call Assign Call Compare Call Assign Call Call Call Assign Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "getfloat",
    "source_code": "def getfloat(self, name: _SettingsKeyT, default: float=0.0) -> float:\n    return float(self.get(name, default))",
    "docstring": "Get a setting value as a float. :param name: the setting name :type name: str :param default: the value to return if no setting is found :type default: object",
    "type": "method",
    "file_path": "scrapy\\scrapy\\settings\\__init__.py",
    "ast_data": "FunctionDef name:getfloat arg:self arg:name arg:default arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_finalize_leaf",
    "source_code": "def _finalize_leaf(self, node):\n    node.is_leaf = True\n    self.finalized_leaves.append(node)",
    "docstring": "Make node a leaf of the tree being grown.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\grower.py",
    "ast_data": "FunctionDef name:_finalize_leaf arg:self arg:node arguments arg arg Assign Call"
  },
  {
    "library": "scipy",
    "name": "_rvs",
    "source_code": "def _rvs(self, low, high, size=None, random_state=None):\n    if np.asarray(low).size == 1 and np.asarray(high).size == 1:\n        return rng_integers(random_state, low, high, size=size)\n    if size is not None:\n        low = np.broadcast_to(low, size)\n        high = np.broadcast_to(high, size)\n    randint = np.vectorize(partial(rng_integers, random_state), otypes=[np.dtype(int)])\n    return randint(low, high)",
    "docstring": "An array of *size* random integers >= ``.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_discrete_distns.py",
    "ast_data": "FunctionDef name:_rvs arg:self arg:low arg:high arg:size arg:random_state arguments arg arg arg arg arg If BoolOp Compare Call Compare Call Return return:yes Call If Compare Assign Call Assign Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_sized_alternatives_for_symbol",
    "source_code": "def get_sized_alternatives_for_symbol(self, fontname: str, sym: str) -> list[tuple[str, str]]:\n    return [(fontname, sym)]",
    "docstring": "Override if your font provides multiple sizes of the same symbol. Should return a list of symbols matching *sym* in various sizes. The expression renderer will select the most appropriate size for a given situation from this list.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_mathtext.py",
    "ast_data": "FunctionDef name:get_sized_alternatives_for_symbol arg:self arg:fontname arg:sym arguments arg arg arg Return return:yes"
  },
  {
    "library": "django",
    "name": "add",
    "source_code": "def add(self, geom):\n    if isinstance(geom, OGRGeometry):\n        if isinstance(geom, self.__class__):\n            for g in geom:\n                capi.add_geom(self.ptr, g.ptr)\n        else:\n            capi.add_geom(self.ptr, geom.ptr)\n    elif isinstance(geom, str):\n        tmp = OGRGeometry(geom)\n        capi.add_geom(self.ptr, tmp.ptr)\n    else:\n        raise GDALException('Must add an OGRGeometry.')",
    "docstring": "Add the geometry to this Geometry Collection.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:add arg:self arg:geom arguments arg arg If Call If Call For Call Call If Call Assign Call Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "histogram",
    "source_code": "def histogram(name, tensor, family=None, step=None):\n\n    def function(tag, scope):\n        return gen_summary_ops.write_histogram_summary(_summary_state.writer._resource, _choose_step(step), tag, array_ops.identity(tensor), name=scope)\n    return summary_writer_function(name, tensor, function, family=family)",
    "docstring": "Writes a histogram summary if possible.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "FunctionDef name:histogram arg:name arg:tensor arg:family arg:step arguments arg arg arg arg FunctionDef name:function arg:tag arg:scope arguments arg arg Return return:yes Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "is_registered",
    "source_code": "def is_registered(self, target: TorchOp) -> bool:\n    return bool(self.get_decomps(target))",
    "docstring": "Returns whether the given op is registered: torch.ops.... Args: target: The PyTorch node callable target. Returns: True if the given op is registered, otherwise False.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_registration.py",
    "ast_data": "FunctionDef name:is_registered arg:self arg:target arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_alt_path",
    "source_code": "def get_alt_path(self):\n    return self._alt_path",
    "docstring": "Return a for the alternate part of the marker. For unfilled markers, this is *None*; for filled markers, this is the area to be drawn with *markerfacecoloralt*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\markers.py",
    "ast_data": "FunctionDef name:get_alt_path arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "replicated",
    "source_code": "@classmethod\ndef replicated(cls, mesh: Mesh, rank: int) -> 'Layout':\n    return cls._new_object(mesh=mesh, rank=rank)",
    "docstring": "Returns a replicated layout of rank .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\layout.py",
    "ast_data": "FunctionDef name:replicated arg:cls arg:mesh arg:rank arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "ExtractWeek",
    "source_code": "class ExtractWeek(Extract):\n    lookup_name = 'week'",
    "docstring": "Return 1-52 or 53, based on ISO-8601, i.e., Monday is the first of the week.",
    "type": "class",
    "file_path": "django\\django\\db\\models\\functions\\datetime.py",
    "ast_data": "ClassDef name:ExtractWeek Assign"
  },
  {
    "library": "matplotlib",
    "name": "LogitScale",
    "source_code": "class LogitScale(ScaleBase):\n    name = 'logit'\n\n    def __init__(self, axis, nonpositive='mask', *, one_half='\\\\frac{1}{2}', use_overline=False):\n        self._transform = LogitTransform(nonpositive)\n        self._use_overline = use_overline\n        self._one_half = one_half\n\n    def get_transform(self):\n        return self._transform\n\n    def set_default_locators_and_formatters(self, axis):\n        axis.set_major_locator(LogitLocator())\n        axis.set_major_formatter(LogitFormatter(one_half=self._one_half, use_overline=self._use_overline))\n        axis.set_minor_locator(LogitLocator(minor=True))\n        axis.set_minor_formatter(LogitFormatter(minor=True, one_half=self._one_half, use_overline=self._use_overline))\n\n    def limit_range_for_scale(self, vmin, vmax, minpos):\n        if not np.isfinite(minpos):\n            minpos = 1e-07\n        return (minpos if vmin <= 0 else vmin, 1 - minpos if vmax >= 1 else vmax)",
    "docstring": "Logit scale for data between zero and one, both excluded. This scale is similar to a log scale close to zero and to one, and almost linear around 0.5. It maps the interval ]0, 1[ onto ]-infty, +infty[.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\scale.py",
    "ast_data": "ClassDef name:LogitScale Assign FunctionDef name:__init__ arg:self arg:axis arg:nonpositive arguments arg arg arg arg arg Assign Call Assign Assign FunctionDef name:get_transform arg:self arguments arg Return return:yes FunctionDef name:set_default_locators_and_formatters arg:self arg:axis arguments arg arg Call Call Call Call Call Call Call Call FunctionDef name:limit_range_for_scale arg:self arg:vmin arg:vmax arg:minpos arguments arg arg arg arg If Call Assign Return return:yes Compare Compare"
  },
  {
    "library": "tensorflow",
    "name": "_MulNoNanGrad",
    "source_code": "@ops.RegisterGradient('MulNoNan')\ndef _MulNoNanGrad(op: ops.Operation, grad):\n    x = op.inputs[0]\n    y = op.inputs[1]\n    if isinstance(grad, tensor.Tensor) and _ShapesFullySpecifiedAndEqual(x, y, grad):\n        return (gen_math_ops.mul_no_nan(grad, y), gen_math_ops.mul_no_nan(x, grad))\n    assert x.dtype.base_dtype == y.dtype.base_dtype, (x.dtype, ' vs. ', y.dtype)\n    gx = gen_math_ops.mul_no_nan(grad, y)\n    gy = gen_math_ops.mul_no_nan(x, grad)\n    return _ReduceGradientArgs(x, y, gx, gy)",
    "docstring": "The gradient of scalar multiplication with NaN-suppression.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_MulNoNanGrad arg:op arg:grad arguments arg arg Assign Assign If BoolOp Call Call Return return:yes Call Call Compare Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "autocorr",
    "source_code": "def autocorr(self, lag: int=1) -> float:\n    return self.corr(cast(Series, self.shift(lag)))",
    "docstring": "Compute the lag-N autocorrelation. This method computes the Pearson correlation between the Series and its shifted self. Parameters ---------- lag : int, default 1 Number of lags to apply before performing autocorrelation. Returns ------- float The Pearson correlation between self and self.shift(lag). See Also -------- Series.corr : Compute the correlation between two Series. Series.shift : Shift index by desired number of periods. DataFrame.corr : Compute pairwise correlation of columns. DataFrame.corrwith : Compute pairwise correlation between rows or columns of two DataFrame objects. Notes ----- If the Pearson correlation is not well defined return 'NaN'. Examples -------- >>> s = pd.Series([0.25, 0.5, 0.2, -0.05]) >>> s.autocorr() # doctest: +ELLIPSIS 0.10355... >>> s.autocorr(lag=2) # doctest: +ELLIPSIS -0.99999... If the Pearson correlation is not well defined, then 'NaN' is returned. >>> s = pd.Series([1, 0, 0, 0]) >>> s.autocorr() nan",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:autocorr arg:self arg:lag arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_get_buffer",
    "source_code": "@contextmanager\ndef _get_buffer(buf: FilePath | WriteBuffer[str] | None, encoding: str | None=None) -> Generator[WriteBuffer[str]] | Generator[StringIO]:\n    if buf is not None:\n        buf = stringify_path(buf)\n    else:\n        buf = StringIO()\n    if encoding is None:\n        encoding = 'utf-8'\n    elif not isinstance(buf, str):\n        raise ValueError('buf is not a file name and encoding is specified.')\n    if hasattr(buf, 'write'):\n        yield buf\n    elif isinstance(buf, str):\n        check_parent_directory(str(buf))\n        with open(buf, 'w', encoding=encoding, newline='') as f:\n            yield f\n    else:\n        raise TypeError('buf is not a file name and it has no write method')",
    "docstring": "Context manager to open, yield and close buffer for filenames or Path-like objects, otherwise yield buf unchanged.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\formats\\format.py",
    "ast_data": "FunctionDef name:_get_buffer arg:buf arg:encoding arguments arg arg If Compare Assign Call Assign Call If Compare Assign If Call Raise Call If Call If Call Call Call With Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "__enter__",
    "source_code": "def __enter__(self):\n    self._ctx_manager = context_stack.set_default(self._tape_context)\n    self._ctx_manager.__enter__()\n    return self",
    "docstring": "Enters a context inside which operations are recorded on this tape.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\experimental\\tape.py",
    "ast_data": "FunctionDef name:__enter__ arg:self arguments arg Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "load",
    "source_code": "@classmethod\ndef load(cls, source_code: str, dst_file_ext: str) -> tuple[DLLWrapper, str, str]:\n    if dst_file_ext != 'so':\n        raise RuntimeError(f'Only support loading a .so file for now. Requested file extension: {dst_file_ext}. Source code: {source_code}')\n    dst_file_path, hash_key, source_code_path = cls.compile(source_code, dst_file_ext)\n    return (DLLWrapper(dst_file_path), hash_key, source_code_path)",
    "docstring": "Compiles source code and loads the generated .so file. Returns a tuple of DLLWrapper, hash_key, source_code_path",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codecache.py",
    "ast_data": "FunctionDef name:load arg:cls arg:source_code arg:dst_file_ext arguments arg arg arg If Compare Raise Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, size: int, element_size: int, device: torch.device, reftype: _RefType) -> None:\n    self.size = size\n    self.element_size = element_size\n    self.reftype = reftype\n    self.device = device\n    self.mem_consumed = self._calculate_mem_consumed()",
    "docstring": "Initializes the `` object with tensor storage properties. Args: size (int): The number of elements in the tensor storage. element_size (int): The size of each element in the tensor storage. device (torch.device): The device on which the tensor is allocated. reftype (_RefType): The reference type of the tensor.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_tools\\mem_tracker.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:size arg:element_size arg:device arg:reftype arguments arg arg arg arg arg Assign Assign Assign Assign Assign Call"
  },
  {
    "library": "pytorch",
    "name": "OnnxExporterError",
    "source_code": "class OnnxExporterError(RuntimeError):\n    pass",
    "docstring": "Errors raised by the ONNX exporter. This is the base class for all exporter errors.",
    "type": "class",
    "file_path": "pytorch\\torch\\onnx\\errors.py",
    "ast_data": "ClassDef name:OnnxExporterError"
  },
  {
    "library": "django",
    "name": "clear",
    "source_code": "def clear(self):\n    for fname in self._list_cache_files():\n        self._delete(fname)",
    "docstring": "Remove all the cache files.",
    "type": "method",
    "file_path": "django\\django\\core\\cache\\backends\\filebased.py",
    "ast_data": "FunctionDef name:clear arg:self arguments arg For Call Call"
  },
  {
    "library": "numpy",
    "name": "ptp",
    "source_code": "def ptp(self, axis=None, out=None):\n    return N.ptp(self, axis, out)._align(axis)",
    "docstring": "Peak-to-peak (maximum - minimum) value along the given axis. Refer to for full documentation. See Also -------- numpy.ptp Notes ----- Same as , except, where that would return an object, this returns a object. Examples -------- >>> x = np.matrix(np.arange(12).reshape((3,4))); x matrix([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]]) >>> x.ptp() 11 >>> x.ptp(0) matrix([[8, 8, 8, 8]]) >>> x.ptp(1) matrix([[3], [3], [3]])",
    "type": "method",
    "file_path": "numpy\\numpy\\matrixlib\\defmatrix.py",
    "ast_data": "FunctionDef name:ptp arg:self arg:axis arg:out arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "is_in_onnx_export",
    "source_code": "def is_in_onnx_export() -> bool:\n    from torch.onnx._globals import GLOBALS\n    from torch.onnx._internal.exporter import _flags\n    return GLOBALS.in_onnx_export or _flags._is_onnx_exporting",
    "docstring": "Returns whether it is in the middle of ONNX export.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\__init__.py",
    "ast_data": "FunctionDef name:is_in_onnx_export arguments Return return:yes BoolOp"
  },
  {
    "library": "tensorflow",
    "name": "local_variables",
    "source_code": "@tf_export(v1=['local_variables'])\ndef local_variables(scope=None):\n    return ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES, scope)",
    "docstring": "Returns local variables. Local variables - per process variables, usually not saved/restored to checkpoint and used for temporary or intermediate values. For example, they can be used as counters for metrics computation or number of epochs this machine has read data. The function automatically adds the new variable to . This convenience function returns the contents of that collection. An alternative to local variables are global variables. See Args: scope: (Optional.) A string. If supplied, the resulting list is filtered to include only items whose attribute matches using . Items without a attribute are never returned if a scope is supplied. The choice of means that a without special tokens filters by prefix. Returns: A list of local objects.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:local_variables arg:scope arguments arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "is_stationary",
    "source_code": "def is_stationary(self):\n    return True",
    "docstring": "Returns whether the kernel is stationary.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:is_stationary arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "NullBooleanField",
    "source_code": "class NullBooleanField(BooleanField):\n    widget = NullBooleanSelect\n\n    def to_python(self, value):\n        if value in (True, 'True', 'true', '1'):\n            return True\n        elif value in (False, 'False', 'false', '0'):\n            return False\n        else:\n            return None\n\n    def validate(self, value):\n        pass",
    "docstring": "A field whose valid values are None, True, and False. Clean invalid values to None.",
    "type": "class",
    "file_path": "django\\django\\forms\\fields.py",
    "ast_data": "ClassDef name:NullBooleanField Assign FunctionDef name:to_python arg:self arg:value arguments arg arg If Compare Return return:yes If Compare Return return:yes Return return:no FunctionDef name:validate arg:self arg:value arguments arg arg"
  },
  {
    "library": "pytorch",
    "name": "arg_tree_leaves",
    "source_code": "def arg_tree_leaves(*args: PyTree, **kwargs: PyTree) -> list[Any]:\n    leaves: list[Any] = []\n    for a in args:\n        leaves.extend(tree_iter(a))\n    for a in kwargs.values():\n        leaves.extend(tree_iter(a))\n    return leaves",
    "docstring": "Get a flat list of arguments to this function A slightly faster version of tree_leaves((args, kwargs))",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_pytree.py",
    "ast_data": "FunctionDef name:arg_tree_leaves arguments arg arg For Call Call For Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_get_arrow_wedge",
    "source_code": "def _get_arrow_wedge(self, x0, y0, x1, y1, head_dist, cos_t, sin_t, linewidth):\n    dx, dy = (x0 - x1, y0 - y1)\n    cp_distance = np.hypot(dx, dy)\n    pad_projected = 0.5 * linewidth / sin_t\n    if cp_distance == 0:\n        cp_distance = 1\n    ddx = pad_projected * dx / cp_distance\n    ddy = pad_projected * dy / cp_distance\n    dx = dx / cp_distance * head_dist\n    dy = dy / cp_distance * head_dist\n    dx1, dy1 = (cos_t * dx + sin_t * dy, -sin_t * dx + cos_t * dy)\n    dx2, dy2 = (cos_t * dx - sin_t * dy, sin_t * dx + cos_t * dy)\n    vertices_arrow = [(x1 + ddx + dx1, y1 + ddy + dy1), (x1 + ddx, y1 + ddy), (x1 + ddx + dx2, y1 + ddy + dy2)]\n    codes_arrow = [Path.MOVETO, Path.LINETO, Path.LINETO]\n    return (vertices_arrow, codes_arrow, ddx, ddy)",
    "docstring": "Return the paths for arrow heads. Since arrow lines are drawn with capstyle=projected, The arrow goes beyond the desired point. This method also returns the amount of the path to be shrunken so that it does not overshoot.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:_get_arrow_wedge arg:self arg:x0 arg:y0 arg:x1 arg:y1 arg:head_dist arg:cos_t arg:sin_t arg:linewidth arguments arg arg arg arg arg arg arg arg arg Assign Assign Call Assign If Compare Assign Assign Assign Assign Assign Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "run_metadata_graphs",
    "source_code": "def run_metadata_graphs(name, data, step=None):\n    summary_metadata = summary_pb2.SummaryMetadata()\n    summary_metadata.plugin_data.plugin_name = 'graph_run_metadata_graph'\n    summary_metadata.plugin_data.content = b'1'\n    data = config_pb2.RunMetadata(function_graphs=data.function_graphs, partition_graphs=data.partition_graphs)\n    with summary_scope(name, 'graph_run_metadata_graph_summary', [data, step]) as (tag, _):\n        with ops.device('cpu:0'):\n            tensor = constant_op.constant(data.SerializeToString(), dtype=dtypes.string)\n        return write(tag=tag, tensor=tensor, step=step, metadata=summary_metadata)",
    "docstring": "Writes graphs from a RunMetadata summary. Args: name: A name for this summary. The summary tag used for TensorBoard will be this name prefixed by any active name scopes. data: A RunMetadata proto to write. step: Explicit -castable monotonic step value for this summary. If omitted, this defaults to , which must not be None. Returns: True on success, or false if no summary was written because no default summary writer was available. Raises: ValueError: if a default writer exists, but no step was provided and is None.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "FunctionDef name:run_metadata_graphs arg:name arg:data arg:step arguments arg arg arg Assign Call Assign Assign Assign Call With Call With Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "findobj",
    "source_code": "def findobj(self, match=None, include_self=True):\n    if match is None:\n\n        def matchfunc(x):\n            return True\n    elif isinstance(match, type) and issubclass(match, Artist):\n\n        def matchfunc(x):\n            return isinstance(x, match)\n    elif callable(match):\n        matchfunc = match\n    else:\n        raise ValueError('match must be None, a matplotlib.artist.Artist subclass, or a callable')\n    artists = reduce(operator.iadd, [c.findobj(matchfunc) for c in self.get_children()], [])\n    if include_self and matchfunc(self):\n        artists.append(self)\n    return artists",
    "docstring": "Find artist objects. Recursively find all instances contained in the artist. Parameters ---------- match A filter criterion for the matches. This can be - *None*: Return all objects contained in artist. - A function with signature `.Line2D.Artist`",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:findobj arg:self arg:match arg:include_self arguments arg arg arg If Compare FunctionDef name:matchfunc arg:x arguments arg Return return:yes If BoolOp Call Call FunctionDef name:matchfunc arg:x arguments arg Return return:yes Call If Call Assign Raise Call Assign Call Call Call If BoolOp Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "before_run",
    "source_code": "def before_run(self, run_context):\n    return None",
    "docstring": "Called before each call to run(). You can return from this call a object indicating ops or tensors to add to the upcoming call. These ops/tensors will be run together with the ops/tensors originally passed to the original run() call. The run args you return can also contain feeds to be added to the run() call. The argument is a that provides information about the upcoming call: the originally requested op/tensors, the TensorFlow Session. At this point graph is finalized and you can not add ops. Args: run_context: A object. Returns: None or a object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\session_run_hook.py",
    "ast_data": "FunctionDef name:before_run arg:self arg:run_context arguments arg arg Return return:no"
  },
  {
    "library": "matplotlib",
    "name": "mutatedx",
    "source_code": "def mutatedx(self):\n    return self._points[0, 0] != self._points_orig[0, 0] or self._points[1, 0] != self._points_orig[1, 0]",
    "docstring": "Return whether the x-limits have changed since init.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:mutatedx arg:self arguments arg Return return:yes BoolOp Compare Compare"
  },
  {
    "library": "pytorch",
    "name": "type_check",
    "source_code": "def type_check(self):\n    graph = self.traced.graph\n    for n in graph.nodes:\n        self.type_check_node(n)\n    return True",
    "docstring": "A gradual type checker for graphs Effect: every node's field type will be populated with a type after type-checking is done",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\graph_gradual_typechecker.py",
    "ast_data": "FunctionDef name:type_check arg:self arguments arg Assign For Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "pack_tangents",
    "source_code": "def pack_tangents(tensors):\n    return TangentInfo(*pywrap_tfe.TFE_Py_PackJVPs(tensors))",
    "docstring": "Packs forward accumulator state into a TangentInfo tuple. Args: tensors: A flat list of Tensors to pack forward accumulator state for. Returns: A tuple of (indices, tangents): indices: A sequence of sequences of two-element tuples. Each forward accumulator is represented as a sequence of tuples with (primal_index, jvp_index). Both integers index into the concatenated array. tangents: A flat list of Tensors. Best interpreted as a sequence to be appended to .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\forwardprop_util.py",
    "ast_data": "FunctionDef name:pack_tangents arg:tensors arguments arg Return return:yes Call Call"
  },
  {
    "library": "cryptography",
    "name": "__eq__",
    "source_code": "@abc.abstractmethod\ndef __eq__(self, other: object) -> bool:\n    pass",
    "docstring": "Checks equality.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ed25519.py",
    "ast_data": "FunctionDef name:__eq__ arg:self arg:other arguments arg arg"
  },
  {
    "library": "pandas",
    "name": "at",
    "source_code": "@property\ndef at(self) -> _AtIndexer:\n    return _AtIndexer('at', self)",
    "docstring": "Access a single value for a row/column label pair. Similar to `Fast scalar value getting and setting ` for more details. Examples -------- >>> df = pd.DataFrame( ... [[0, 2, 3], [0, 4, 1], [10, 20, 30]], ... index=[4, 5, 6], ... columns=[\"A\", \"B\", \"C\"], ... ) >>> df A B C 4 0 2 3 5 0 4 1 6 10 20 30 Get value at specified row/column pair >>> df.at[4, \"B\"] np.int64(2) Set value at specified row/column pair >>> df.at[4, \"B\"] = 10 >>> df.at[4, \"B\"] np.int64(10) Get value within a Series >>> df.loc[5].at[\"B\"] np.int64(4)",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexing.py",
    "ast_data": "FunctionDef name:at arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "wrapped_toco_convert",
    "source_code": "def wrapped_toco_convert(model_flags_str, toco_flags_str, input_data_str):\n    return _pywrap_toco_api.TocoConvert(model_flags_str, toco_flags_str, input_data_str, False)",
    "docstring": "Wraps TocoConvert with lazy loader.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\wrap_toco.py",
    "ast_data": "FunctionDef name:wrapped_toco_convert arg:model_flags_str arg:toco_flags_str arg:input_data_str arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "most_specific_common_supertype",
    "source_code": "def most_specific_common_supertype(self, others: Sequence['Parameter']) -> Optional['Parameter']:\n    if not self.type_constraint or any((not other.type_constraint for other in others)):\n        raise TypeError('Can not determine relationship between partially specified types.')\n    for other in others:\n        if (self.name, self.kind, self.optional) != (other.name, other.kind, other.optional):\n            return None\n    supertyped_constraint = self.type_constraint.most_specific_common_supertype([other.type_constraint for other in others])\n    if supertyped_constraint:\n        return Parameter(self.name, self.kind, self.optional, supertyped_constraint)\n    else:\n        return None",
    "docstring": "Returns a common supertype (if exists).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_type.py",
    "ast_data": "FunctionDef name:most_specific_common_supertype arg:self arg:others arguments arg arg If BoolOp Call Raise Call For If Compare Return return:no Assign Call If Return return:yes Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "relu",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef relu(x, alpha=0.0, max_value=None, threshold=0):\n    dtype = getattr(x, 'dtype', floatx())\n    if alpha != 0.0:\n        if max_value is None and threshold == 0:\n            return nn.leaky_relu(x, alpha=alpha)\n        if threshold != 0:\n            negative_part = nn.relu(-x + threshold)\n        else:\n            negative_part = nn.relu(-x)\n    clip_max = max_value is not None\n    if threshold != 0:\n        x = x * math_ops.cast(math_ops.greater(x, threshold), dtype=dtype)\n    elif max_value == 6:\n        x = nn.relu6(x)\n        clip_max = False\n    else:\n        x = nn.relu(x)\n    if clip_max:\n        max_value = _constant_to_tensor(max_value, x.dtype.base_dtype)\n        zero = _constant_to_tensor(0, x.dtype.base_dtype)\n        x = clip_ops.clip_by_value(x, zero, max_value)\n    if alpha != 0.0:\n        alpha = _to_tensor(alpha, x.dtype.base_dtype)\n        x -= alpha * negative_part\n    return x",
    "docstring": "Rectified linear unit. With default values, it returns element-wise . Otherwise, it follows: for , for , otherwise. Args: x: A tensor or variable. alpha: A scalar, slope of negative section (default=). max_value: float. Saturation threshold. threshold: float. Threshold value for thresholded activation. Returns: A tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:relu arg:x arg:alpha arg:max_value arg:threshold arguments arg arg arg arg Assign Call Call If Compare If BoolOp Compare Compare Return return:yes Call If Compare Assign Call Assign Call Assign Compare If Compare Assign Call Call If Compare Assign Call Assign Assign Call If Assign Call Assign Call Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "gen_greatest_upper_bound",
    "source_code": "def gen_greatest_upper_bound(constraint: TGreatestUpperBound, counter: int):\n    all_constraints = []\n    for i in range(1, MAX_TENSOR_RANK + 1):\n        c = []\n        dims1, counter = gen_tensor_dims(i, counter)\n        c1tensor = TensorType(dims1)\n        dims2, counter = gen_tensor_dims(i, counter)\n        c2tensor = TensorType(dims2)\n        dims3, counter = gen_tensor_dims(i, counter)\n        c3tensor = TensorType(dims3)\n        c += [BinConstraintT(constraint.rhs1, c1tensor, op_eq), BinConstraintT(constraint.rhs2, c2tensor, op_eq), BinConstraintT(constraint.res, c3tensor, op_eq)] + gen_nat_constraints(dims1 + dims2 + dims3)\n        assert len(c3tensor.__args__) == len(c1tensor.__args__) == len(c2tensor.__args__)\n        for i in range(len(c3tensor.__args__)):\n            c.append(DGreatestUpperBound(c3tensor.__args__[i], c1tensor.__args__[i], c2tensor.__args__[i]))\n        all_constraints.append(Conj(c))\n    return (all_constraints, counter)",
    "docstring": "Args: constraint: Greatest upper bound on tensors counter: variable tracking Returns: A set of equality constraints and DGreatestUpperBound constraints",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_transformation.py",
    "ast_data": "FunctionDef name:gen_greatest_upper_bound arg:constraint arg:counter arguments arg arg Assign For Call Assign Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Call Call Call Compare Call Call Call For Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "__getitem__",
    "source_code": "def __getitem__(self, c: str):\n    for a in self.axes:\n        if c == a.name:\n            return a\n    return None",
    "docstring": "return the axis for c",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:__getitem__ arg:self arg:c arguments arg arg For If Compare Return return:yes Return return:no"
  },
  {
    "library": "scipy",
    "name": "integral",
    "source_code": "def integral(self, a, b):\n    with FITPACK_LOCK:\n        return _fitpack_impl.splint(a, b, self._eval_args)",
    "docstring": "Return definite integral of the spline between two given points. Parameters ---------- a : float Lower limit of integration. b : float Upper limit of integration. Returns ------- integral : float The value of the definite integral of the spline between limits. Examples -------- >>> import numpy as np >>> from scipy.interpolate import UnivariateSpline >>> x = np.linspace(0, 3, 11) >>> y = x**2 >>> spl = UnivariateSpline(x, y) >>> spl.integral(0, 3) 9.0 which agrees with :math: between the limits of 0 and 3. A caveat is that this routine assumes the spline to be zero outside of the data limits: >>> spl.integral(-1, 4) 9.0 >>> spl.integral(-1, 0) 0.0",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_fitpack2.py",
    "ast_data": "FunctionDef name:integral arg:self arg:a arg:b arguments arg arg arg With Return return:yes Call"
  },
  {
    "library": "django",
    "name": "__sub__",
    "source_code": "def __sub__(self, other):\n    return self.difference(other)",
    "docstring": "Return the difference this Geometry and the other.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:__sub__ arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "emit_flow_end",
    "source_code": "def emit_flow_end(self, name: str, timestamp: int, pid: int, tid: int, flow_id: int) -> None:\n    event = self._create_event('t', 'DataFlow', name, pid, tid, timestamp)\n    event['id'] = flow_id\n    self._events.append(event)",
    "docstring": "Adds a flow end event to the trace. When matched with a flow start event (with the same 'flow_id') this will cause the trace viewer to draw an arrow between the start and end events. Args: name: The event name as a string. timestamp: The timestamp of this event as a long integer. pid: Identifier of the process generating this event as an integer. tid: Identifier of the thread generating this event as an integer. flow_id: Identifier of the flow as an integer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py",
    "ast_data": "FunctionDef name:emit_flow_end arg:self arg:name arg:timestamp arg:pid arg:tid arg:flow_id arguments arg arg arg arg arg arg Assign Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "TritonKernelArtifact",
    "source_code": "@dataclasses.dataclass(frozen=True)\nclass TritonKernelArtifact:\n    filename: str\n    payload: bytes = dataclasses.field(repr=False)",
    "docstring": "Artifact for an individual kernel converted to bytes. Bytes could be a cubin, json, ttir, or ttgir.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\triton_bundler.py",
    "ast_data": "ClassDef name:TritonKernelArtifact Call Call"
  },
  {
    "library": "tensorflow",
    "name": "validate_dataset_input",
    "source_code": "def validate_dataset_input(x, y, sample_weight, validation_split=None):\n    if y is not None:\n        raise ValueError('You passed a dataset or dataset iterator (%s) as input `x` to your model. In that case, you should not specify a target (`y`) argument, since the dataset or dataset iterator generates both input data and target data. Received: %s' % (x, y))\n    if sample_weight is not None:\n        raise ValueError('`sample_weight` argument is not supported when input `x` is a dataset or a dataset iterator. Instead, youcan provide sample_weight as the third element  of yourdataset, i.e. (inputs, targets, sample_weight). Received: x=%s, sample_weight=%s' % (x, sample_weight))\n    if validation_split is not None and validation_split != 0.0:\n        raise ValueError('`validation_split` argument is not supported when input `x` is a dataset or a dataset iterator. Received: x=%s, validation_split=%f' % (x, validation_split))",
    "docstring": "Validates user input arguments when a dataset iterator is passed. Args: x: Input data. A dataset or iterator. y: Target data. It could be either Numpy array(s) or TensorFlow tensor(s). Expected to be when is a dataset iterator. sample_weight: An optional sample-weight array passed by the user to weight the importance of each sample in . Expected to be when is a dataset iterator validation_split: Float between 0 and 1. Fraction of the training data to be used as validation data. Expected to be when is a dataset iterator. Raises: ValueError: if argument or or are provided by user.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py",
    "ast_data": "FunctionDef name:validate_dataset_input arg:x arg:y arg:sample_weight arg:validation_split arguments arg arg arg arg If Compare Raise Call If Compare Raise Call If BoolOp Compare Compare Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_make_var_reduction_prim",
    "source_code": "def _make_var_reduction_prim(name: str, impl_aten, doc):\n    return _make_prim(schema=f'{name}(Tensor inp, int[]? dims, float? correction=1, *, ScalarType? output_dtype=None) -> Tensor', meta=_var_reduction_meta, impl_aten=impl_aten, return_type=RETURN_TYPE.NEW, doc=doc)",
    "docstring": "Creates a reduction prim.",
    "type": "function",
    "file_path": "pytorch\\torch\\_prims\\__init__.py",
    "ast_data": "FunctionDef name:_make_var_reduction_prim arg:name arg:impl_aten arg:doc arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "generate_fx",
    "source_code": "@abc.abstractmethod\ndef generate_fx(self, options: ResolvedExportOptions, model: torch.nn.Module | Callable, model_args: Sequence[Any], model_kwargs: Mapping[str, Any]) -> torch.fx.GraphModule:\n    ...",
    "docstring": "Analyzes user `` and generates a FX graph. Args: options: The export options. model: The user model. model_args: The model's positional input arguments. model_kwargs: The model's keyword input arguments. Returns: The generated FX Graph.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\_exporter_legacy.py",
    "ast_data": "FunctionDef name:generate_fx arg:self arg:options arg:model arg:model_args arg:model_kwargs arguments arg arg arg arg arg"
  },
  {
    "library": "pytorch",
    "name": "bitshift_mul",
    "source_code": "def bitshift_mul(self, weight_val, r):\n    product = 0\n    idx = len(weight_val) - 1\n    place = 0\n    while idx >= 0:\n        block = weight_val[idx]\n        block = block[::-1]\n        curr_block_result = 0\n        for ele in block:\n            if int(ele):\n                curr_block_result += r << place\n            place += 1\n        idx -= 1\n        product += curr_block_result\n    return product",
    "docstring": "Compute multiplication of weight_val * r using bitshifting method discussed in APoT paper: Args: weight_val: list of binary digits representing APoT quantized weight value r: int representing uniformly quantized activation value",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\experimental\\linear.py",
    "ast_data": "FunctionDef name:bitshift_mul arg:self arg:weight_val arg:r arguments arg arg arg Assign Assign Call Assign While Compare Assign Assign Assign For If Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "PermFunction02",
    "source_code": "class PermFunction02(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-self.N] * self.N, [self.N + 1] * self.N))\n        self.custom_bounds = ([0, 1.5], [0, 1.0])\n        self.global_optimum = [1.0 / arange(1, self.N + 1)]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        b = 10\n        k = atleast_2d(arange(self.N) + 1).T\n        j = atleast_2d(arange(self.N) + 1)\n        s = (j + b) * (x ** k - (1.0 / j) ** k)\n        return sum(sum(s, axis=1) ** 2)",
    "docstring": "PermFunction 2 objective function. This class defines the Perm Function 2 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{PermFunction02}}(x) = \\sum_{k=1}^n \\left\\{ \\sum_{j=1}^n (j + \\beta) \\left[ \\left(x_j^k - {\\frac{1}{j}}^{k} \\right ) \\right] \\right\\}^2 Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Mishra, S. Global Optimization by Differential Evolution and Particle Swarm Methods: Evaluation on Some Benchmark Functions. Munich Personal RePEc Archive, 2006, 1005 TODO: line 582",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_P.py",
    "ast_data": "ClassDef name:PermFunction02 Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Assign Call Call Assign Call Call Assign Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "enable_math_sdp",
    "source_code": "def enable_math_sdp(enabled: bool):\n    torch._C._set_sdp_use_math(enabled)",
    "docstring": ".. warning:: This flag is beta and subject to change. Enables or disables math scaled dot product attention.",
    "type": "function",
    "file_path": "pytorch\\torch\\backends\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:enable_math_sdp arg:enabled arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "parent_frame_arguments",
    "source_code": "def parent_frame_arguments():\n    arg_names, variable_arg_name, keyword_arg_name, local_vars = tf_inspect._inspect.getargvalues(tf_inspect._inspect.stack()[1][0])\n    local_vars.pop(variable_arg_name, {})\n    keyword_args = local_vars.pop(keyword_arg_name, {})\n    final_args = {}\n    for arg_name in arg_names:\n        final_args[arg_name] = local_vars.pop(arg_name)\n    final_args.update(keyword_args)\n    return final_args",
    "docstring": "Returns parent frame arguments. When called inside a function, returns a dictionary with the caller's function arguments. These are positional arguments and keyword arguments (**kwargs), while variable arguments (*varargs) are excluded. When called at global scope, this will return an empty dictionary, since there are no arguments. WARNING: If caller function argument names are overloaded before invoking this method, then values will reflect the overloaded value. For this reason, we recommend calling at the beginning of the function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\util.py",
    "ast_data": "FunctionDef name:parent_frame_arguments arguments Assign Call Call Call Assign Call Assign For Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_admin_log",
    "source_code": "@register.tag\ndef get_admin_log(parser, token):\n    tokens = token.contents.split()\n    if len(tokens) < 4:\n        raise template.TemplateSyntaxError(\"'get_admin_log' statements require two arguments\")\n    if not tokens[1].isdigit():\n        raise template.TemplateSyntaxError(\"First argument to 'get_admin_log' must be an integer\")\n    if tokens[2] != 'as':\n        raise template.TemplateSyntaxError(\"Second argument to 'get_admin_log' must be 'as'\")\n    if len(tokens) > 4:\n        if tokens[4] != 'for_user':\n            raise template.TemplateSyntaxError(\"Fourth argument to 'get_admin_log' must be 'for_user'\")\n    return AdminLogNode(limit=tokens[1], varname=tokens[3], user=tokens[5] if len(tokens) > 5 else None)",
    "docstring": "Populate a template variable with the admin log for the given criteria. Usage:: {% get_admin_log [limit] as [varname] for_user [context_var_with_user_obj] %} Examples:: {% get_admin_log 10 as admin_log for_user 23 %} {% get_admin_log 10 as admin_log for_user user %} {% get_admin_log 10 as admin_log %} Note that `` can be a hard-coded integer (user ID) or the name of a template context variable containing the user object whose ID you want.",
    "type": "function",
    "file_path": "django\\django\\contrib\\admin\\templatetags\\log.py",
    "ast_data": "FunctionDef name:get_admin_log arg:parser arg:token arguments arg arg Assign Call If Compare Call Raise Call If Call Raise Call If Compare Raise Call If Compare Call If Compare Raise Call Return return:yes Call Compare Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, artist, ref_coord, unit='points'):\n    self._artist = artist\n    x, y = ref_coord\n    self._ref_coord = (x, y)\n    self.set_unit(unit)",
    "docstring": "Parameters ---------- artist : or or The object to compute the offset from. ref_coord : (float, float) If *artist* is an or , this values is the location to of the offset origin in fractions of the *artist* bounding box. If *artist* is a transform, the offset origin is the transform applied to this value. unit : {'points, 'pixels'}, default: 'points' The screen units to use (pixels or points) for the offset input.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:artist arg:ref_coord arg:unit arguments arg arg arg arg Assign Assign Assign Call"
  },
  {
    "library": "sphinx",
    "name": "validate_html_logo",
    "source_code": "def validate_html_logo(app: Sphinx, config: Config) -> None:\n    if config.html_logo and (not (app.confdir / config.html_logo).is_file()) and (not is_url(config.html_logo)):\n        logger.warning(__('logo file %r does not exist'), config.html_logo)\n        config.html_logo = None",
    "docstring": "Check html_logo setting.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\builders\\html\\__init__.py",
    "ast_data": "FunctionDef name:validate_html_logo arg:app arg:config arguments arg arg If BoolOp Call Call Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "register_load_state_dict_pre_hook",
    "source_code": "def register_load_state_dict_pre_hook(self, hook: Callable[['Optimizer', StateDict], Optional[StateDict]], prepend: bool=False) -> RemovableHandle:\n    handle = hooks.RemovableHandle(self._optimizer_load_state_dict_pre_hooks)\n    self._optimizer_load_state_dict_pre_hooks[handle.id] = hook\n    if prepend:\n        self._optimizer_load_state_dict_pre_hooks.move_to_end(handle.id, last=False)\n    return handle",
    "docstring": "Register a load_state_dict pre-hook which will be called before :meth: is called. It should have the following signature:: hook(optimizer, state_dict) -> state_dict or None The `torch.utils.hooks.RemoveableHandle`",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\optimizer.py",
    "ast_data": "FunctionDef name:register_load_state_dict_pre_hook arg:self arg:hook arg:prepend arguments arg arg arg Assign Call Assign If Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "interval",
    "source_code": "def interval(self, confidence, *args, **kwds):\n    alpha = confidence\n    alpha = asarray(alpha)\n    if np.any((alpha > 1) | (alpha < 0)):\n        raise ValueError('alpha must be between 0 and 1 inclusive')\n    q1 = (1.0 - alpha) / 2\n    q2 = (1.0 + alpha) / 2\n    a = self.ppf(q1, *args, **kwds)\n    b = self.ppf(q2, *args, **kwds)\n    return (a, b)",
    "docstring": "Confidence interval with equal areas around the median. Parameters ---------- confidence : array_like of float Probability that an rv will be drawn from the returned range. Each value should be in the range [0, 1]. arg1, arg2, ... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional location parameter, Default is 0. scale : array_like, optional scale parameter, Default is 1. Returns ------- a, b : ndarray of float end-points of range that contain `` (usually strictly less).",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:interval arg:self arg:confidence arguments arg arg arg arg Assign Assign Call If Call Compare Compare Raise Call Assign Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "geomspace",
    "source_code": "@array_function_dispatch(_geomspace_dispatcher)\ndef geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0):\n    start = asanyarray(start)\n    stop = asanyarray(stop)\n    if _nx.any(start == 0) or _nx.any(stop == 0):\n        raise ValueError('Geometric sequence cannot include zero')\n    dt = result_type(start, stop, float(num), _nx.zeros((), dtype))\n    if dtype is None:\n        dtype = dt\n    else:\n        dtype = _nx.dtype(dtype)\n    start = start.astype(dt, copy=True)\n    stop = stop.astype(dt, copy=True)\n    out_sign = _nx.sign(start)\n    start /= out_sign\n    stop = stop / out_sign\n    log_start = _nx.log10(start)\n    log_stop = _nx.log10(stop)\n    result = logspace(log_start, log_stop, num=num, endpoint=endpoint, base=10.0, dtype=dt)\n    if num > 0:\n        result[0] = start\n        if num > 1 and endpoint:\n            result[-1] = stop\n    result *= out_sign\n    if axis != 0:\n        result = _nx.moveaxis(result, 0, axis)\n    return result.astype(dtype, copy=False)",
    "docstring": "Return numbers spaced evenly on a log scale (a geometric progression). This is similar to , but with endpoints specified directly. Each output sample is a constant multiple of the previous. Parameters ---------- start : array_like The starting value of the sequence. stop : array_like The final value of the sequence, unless is False. In that case, `numstopdtypestartstopfloatnumhow-to-partitionendpoint` parameter: >>> import matplotlib.pyplot as plt >>> N = 10 >>> y = np.zeros(N) >>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=True), y + 1, 'o') [] >>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=False), y + 2, 'o') [] >>> plt.axis([0.5, 2000, 0, 3]) [0.5, 2000, 0, 3] >>> plt.grid(True, color='0.7', linestyle='-', which='both', axis='both') >>> plt.show()",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\function_base.py",
    "ast_data": "FunctionDef name:geomspace arg:start arg:stop arg:num arg:endpoint arg:dtype arg:axis arguments arg arg arg arg arg arg Assign Call Assign Call If BoolOp Call Compare Call Compare Raise Call Assign Call Call Call If Compare Assign Assign Call Assign Call Assign Call Assign Call Assign Assign Call Assign Call Assign Call If Compare Assign If BoolOp Compare Assign If Compare Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "benchmark_simple_fn",
    "source_code": "def benchmark_simple_fn(args, config, module_config, module_type, result):\n    print(f'Benchmarking {module_type.__name__}')\n    f_name = module_config.pt_fn.__name__ + ':Num Operands=' + str(module_config.num_params)\n    graph_mode_str = 'Graph mode' + ':' + str(module_config.graph_mode)\n    result_key = ','.join((f_name, graph_mode_str))\n    module = WrapperModule(module_type, module_config, args.debug, args.save)\n    latency_per_iter_ms = benchmark_module(config, module, args.use_throughput_benchmark)\n    result[result_key] = latency_per_iter_ms",
    "docstring": "Benchmarks a PyTorch traceable function specified in the config. Instantiates a wrapper object that wraps the object of module_type and runs the forward method using benchmark_module. Args: config: contains number of warmup and benchmark iterations. module_config: module_config which contains op, number of parameters that op takes and whether graph mode is enabled or not. module_type: Type of the module to be wrapped. e.g. SimpleAddModule for add op. result: dictionary instance to be populated with the benchmark result (latency per iter).",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\framework_overhead_benchmark\\framework_overhead_benchmark.py",
    "ast_data": "FunctionDef name:benchmark_simple_fn arg:args arg:config arg:module_config arg:module_type arg:result arguments arg arg arg arg arg Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign"
  },
  {
    "library": "pytorch",
    "name": "reorder_tensor_pre_hook_nodes",
    "source_code": "def reorder_tensor_pre_hook_nodes(self):\n    for node in self.fx_tracer.graph.find_nodes(op='call_function', target=call_hook):\n        if node.kwargs.get('hook_type', None) != 'tensor_pre_hook':\n            continue\n        getitem_node = node.args[0]\n        input_node = node.args[1]\n        if input_node is not node.prev and (not self.is_placeholder(input_node)):\n            input_node.append(getitem_node)\n            getitem_node.append(node)",
    "docstring": "Usage of AOTAutograd causes all the tensor_pre_hook nodes to get pushed to the end of the graph. This differs from eager mode, which schedules them as soon as possible. This pass attempts to reorder the graph to mimic eager behavior.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\compiled_autograd.py",
    "ast_data": "FunctionDef name:reorder_tensor_pre_hook_nodes arg:self arguments arg For Call If Compare Call Assign Assign If BoolOp Compare Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_init_profile_batch",
    "source_code": "def _init_profile_batch(self, profile_batch):\n    profile_batch_error_message = 'profile_batch must be a non-negative integer or 2-tuple of positive integers. A pair of positive integers signifies a range of batches to profile. Found: {}'.format(profile_batch)\n    if isinstance(profile_batch, str):\n        profile_batch = str(profile_batch).split(',')\n        profile_batch = nest.map_structure(int, profile_batch)\n    if isinstance(profile_batch, int):\n        self._start_batch = profile_batch\n        self._stop_batch = profile_batch\n    elif isinstance(profile_batch, (tuple, list)) and len(profile_batch) == 2:\n        self._start_batch, self._stop_batch = profile_batch\n    else:\n        raise ValueError(profile_batch_error_message)\n    if self._start_batch < 0 or self._stop_batch < self._start_batch:\n        raise ValueError(profile_batch_error_message)\n    self._profiler_started = False\n    if self._start_batch > 0:\n        self._start_profiler(logdir='')\n        self._stop_profiler(save=False)\n    self._is_tracing = False\n    self._should_trace = not (self._start_batch == 0 and self._stop_batch == 0)",
    "docstring": "Validate profile_batch value and set the range of batches to profile. Sets values of _start_batch and _stop_batch attributes, specifying the start and stop batch to profile. Setting disables profiling. Args: profile_batch: The range of batches to profile. Should be a non-negative integer or a comma separated string of pair of positive integers. A pair of positive integers signify a range of batches to profile. Raises: ValueError: If profile_batch is not an integer or a comma separated pair of positive integers.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:_init_profile_batch arg:self arg:profile_batch arguments arg arg Assign Call If Call Assign Call Call Assign Call If Call Assign Assign If BoolOp Call Compare Call Assign Raise Call If BoolOp Compare Compare Raise Call Assign If Compare Call Call Assign Assign BoolOp Compare Compare"
  },
  {
    "library": "tensorflow",
    "name": "_to_tensor_shape",
    "source_code": "def _to_tensor_shape(self):\n    alt = self\n    if alt._static_inner_shape.rank is None:\n        return tensor_shape.TensorShape(None)\n    if alt._static_inner_shape.rank == 0:\n        assert not alt._row_partitions\n        return alt._static_inner_shape\n    prefix = [alt._dimension(0)]\n    prefix.extend([rp.uniform_row_length for rp in alt._row_partitions])\n    suffix = alt._static_inner_shape[1:]\n    return tensor_shape.TensorShape(prefix) + suffix",
    "docstring": "Get a tensor shape corresponding to this type.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:_to_tensor_shape arg:self arguments arg Assign If Compare Return return:yes Call If Compare Return return:yes Assign Call Call Assign Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "prepare",
    "source_code": "def prepare(self, method, uri, headers, body):\n    content_type = to_native(headers.get('Content-Type', ''))\n    if self.signature_type == SIGNATURE_TYPE_BODY:\n        content_type = CONTENT_TYPE_FORM_URLENCODED\n    elif not content_type and extract_params(body):\n        content_type = CONTENT_TYPE_FORM_URLENCODED\n    if CONTENT_TYPE_FORM_URLENCODED in content_type:\n        headers['Content-Type'] = CONTENT_TYPE_FORM_URLENCODED\n        uri, headers, body = self.sign(method, uri, headers, body)\n    elif self.force_include_body:\n        uri, headers, body = self.sign(method, uri, headers, body)\n    else:\n        uri, headers, _ = self.sign(method, uri, headers, b'')\n        body = b''\n    return (uri, headers, body)",
    "docstring": "Add OAuth parameters to the request. Parameters may be included from the body if the content-type is urlencoded, if no content type is set, a guess is made.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth1\\rfc5849\\client_auth.py",
    "ast_data": "FunctionDef name:prepare arg:self arg:method arg:uri arg:headers arg:body arguments arg arg arg arg arg Assign Call Call If Compare Assign If BoolOp Call Assign If Compare Assign Assign Call If Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "getZ",
    "source_code": "def getZ(self, index):\n    return self.getOrdinate(2, index)",
    "docstring": "Get Z with the value at the given index.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\coordseq.py",
    "ast_data": "FunctionDef name:getZ arg:self arg:index arguments arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "DateMixin",
    "source_code": "class DateMixin:\n    date_field = None\n    allow_future = False\n\n    def get_date_field(self):\n        if self.date_field is None:\n            raise ImproperlyConfigured('%s.date_field is required.' % self.__class__.__name__)\n        return self.date_field\n\n    def get_allow_future(self):\n        return self.allow_future\n\n    @cached_property\n    def uses_datetime_field(self):\n        model = self.get_queryset().model if self.model is None else self.model\n        field = model._meta.get_field(self.get_date_field())\n        return isinstance(field, models.DateTimeField)\n\n    def _make_date_lookup_arg(self, value):\n        if self.uses_datetime_field:\n            value = datetime.datetime.combine(value, datetime.time.min)\n            if settings.USE_TZ:\n                value = timezone.make_aware(value)\n        return value\n\n    def _make_single_date_lookup(self, date):\n        date_field = self.get_date_field()\n        if self.uses_datetime_field:\n            since = self._make_date_lookup_arg(date)\n            until = self._make_date_lookup_arg(date + datetime.timedelta(days=1))\n            return {'%s__gte' % date_field: since, '%s__lt' % date_field: until}\n        else:\n            return {date_field: date}",
    "docstring": "Mixin class for views manipulating date-based data.",
    "type": "class",
    "file_path": "django\\django\\views\\generic\\dates.py",
    "ast_data": "ClassDef name:DateMixin Assign Assign FunctionDef name:get_date_field arg:self arguments arg If Compare Raise Call Return return:yes FunctionDef name:get_allow_future arg:self arguments arg Return return:yes FunctionDef name:uses_datetime_field arg:self arguments arg Assign Compare Call Assign Call Call Return return:yes Call FunctionDef name:_make_date_lookup_arg arg:self arg:value arguments arg arg If Assign Call If Assign Call Return return:yes FunctionDef name:_make_single_date_lookup arg:self arg:date arguments arg arg Assign Call If Assign Call Assign Call Call Return return:yes Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X):\n    check_is_fitted(self)\n    X = validate_data(self, X, accept_sparse=['csr', 'csc'], reset=False, dtype=[np.float64, np.float32])\n    return X @ self.components_.T",
    "docstring": "Project the data by using matrix product with the random matrix. Parameters ---------- X : {ndarray, sparse matrix} of shape (n_samples, n_features) The input data to project into a smaller dimensional space. Returns ------- X_new : ndarray of shape (n_samples, n_components) Projected array.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\random_projection.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Call Assign Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "iter_all",
    "source_code": "def iter_all(class_name: str) -> Iterable[Any]:\n    for cls, wdict in live_refs.items():\n        if cls.__name__ == class_name:\n            return wdict.keys()\n    return []",
    "docstring": "Iterate over all objects of the same class by its class name",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\trackref.py",
    "ast_data": "FunctionDef name:iter_all arg:class_name arguments arg For Call If Compare Return return:yes Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "can_decode",
    "source_code": "def can_decode(self, value):\n    if value.HasField('type_spec_value'):\n        type_spec_class_enum = value.type_spec_value.type_spec_class\n        return type_spec_class_enum == struct_pb2.TypeSpecProto.EXTENSION_TYPE_SPEC\n    return False",
    "docstring": "Returns true if can be decoded into a .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type.py",
    "ast_data": "FunctionDef name:can_decode arg:self arg:value arguments arg arg If Call Assign Return return:yes Compare Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "clear",
    "source_code": "def clear(self, figure):\n    if figure in self.views:\n        self.views[figure].clear()\n        self.positions[figure].clear()\n        self.home_views[figure].clear()\n        self.update_home_views()",
    "docstring": "Reset the Axes stack.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py",
    "ast_data": "FunctionDef name:clear arg:self arg:figure arguments arg arg If Compare Call Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "needs_arglist",
    "source_code": "def needs_arglist(self) -> bool:\n    return False",
    "docstring": "May return true if an empty argument list is to be generated even if the document contains none.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\domains\\python\\_object.py",
    "ast_data": "FunctionDef name:needs_arglist arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "AllocationMaximum",
    "source_code": "class AllocationMaximum(collections.namedtuple('AllocationMaximum', ('timestamp', 'num_bytes', 'tensors'))):\n    pass",
    "docstring": "Stores the maximum allocation for a given allocator within the timelne. Parameters: timestamp: when this maximum was reached. num_bytes: the total memory used at this time. tensors: the set of tensors allocated at this time.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py",
    "ast_data": "ClassDef name:AllocationMaximum Call"
  },
  {
    "library": "pytorch",
    "name": "_get_type_from_str",
    "source_code": "def _get_type_from_str(type_str: str) -> ir.TensorType | ir.SequenceType | ir.OptionalType:\n    striped = type_str.rstrip(')')\n    type_parts = striped.split('(')\n    dtype = ir.DataType[type_parts[-1].upper()]\n    type_: ir.TypeProtocol = ir.TensorType(ir.DataType.UNDEFINED)\n    for type_part in reversed(type_parts[:-1]):\n        if type_part == 'tensor':\n            type_ = ir.TensorType(dtype)\n        elif type_part == 'seq':\n            type_ = ir.SequenceType(type_)\n        elif type_part == 'optional':\n            type_ = ir.OptionalType(type_)\n        else:\n            raise ValueError(f\"Unknown type part: '{type_part}' in type '{type_str}'\")\n    return type_",
    "docstring": "Converter a type_str from ONNX Opschema to ir.TypeProtocol. A type str has the form of \"tensor(float)\" or composite type like \"seq(tensor(float))\".",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_schemas.py",
    "ast_data": "FunctionDef name:_get_type_from_str arg:type_str arguments arg Assign Call Assign Call Assign Call Call For Call If Compare Assign Call If Compare Assign Call If Compare Assign Call Raise Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "Problem13",
    "source_code": "class Problem13(Benchmark):\n\n    def __init__(self, dimensions=1):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = [(0.001, 0.99)]\n        self.global_optimum = 1.0 / sqrt(2)\n        self.fglob = -1.5874\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        x = x[0]\n        return -x ** (2.0 / 3.0) - (1.0 - x ** 2) ** (1.0 / 3.0)",
    "docstring": "Univariate Problem13 objective function. This class defines the Univariate Problem13 global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Problem13}}(x) = -x^{2/3} - (1 - x^2)^{1/3} Bound constraints: :math: .. figure:: figures/Problem13.png :alt: Univariate Problem13 function :align: center **Univariate Problem13 function** *Global optimum*: :math: for :math:",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_univariate.py",
    "ast_data": "ClassDef name:Problem13 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "__mul__",
    "source_code": "def __mul__(self, right: So2 | Tensor) -> So2 | Tensor:\n    z = self.z\n    if isinstance(right, So2):\n        return So2(z * right.z)\n    elif isinstance(right, (Vector2, Tensor)):\n        if isinstance(right, Tensor):\n            check_so2_t_shape(right)\n        x = right.data[..., 0]\n        y = right.data[..., 1]\n        real = z.real\n        imag = z.imag\n        out = stack((real * x - imag * y, imag * x + real * y), -1)\n        if isinstance(right, Tensor):\n            return out\n        else:\n            return Vector2(out)\n    else:\n        raise TypeError(f'Not So2 or Tensor type. Got: {type(right)}')",
    "docstring": "Perform a left-multiplication either rotation concatenation or point-transform. Args: right: the other So2 transformation. Return: The resulting So2 transformation.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\so2.py",
    "ast_data": "FunctionDef name:__mul__ arg:self arg:right arguments arg arg Assign If Call Return return:yes Call If Call If Call Call Assign Assign Assign Assign Assign Call If Call Return return:yes Return return:yes Call Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "input_iterable",
    "source_code": "@staticmethod\ndef input_iterable():\n    return False",
    "docstring": "A benchmark child class should return true if it utilizes the input iter arg",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\tensorexpr\\benchmark.py",
    "ast_data": "FunctionDef name:input_iterable arguments Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_has_params",
    "source_code": "@property\ndef _has_params(self) -> bool:\n    return hasattr(self, '_handle') and self._handle is not None",
    "docstring": "Returns whether this FSDP instance manages any parameters.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\fully_sharded_data_parallel.py",
    "ast_data": "FunctionDef name:_has_params arg:self arguments arg Return return:yes BoolOp Call Compare"
  },
  {
    "library": "pytorch",
    "name": "generate_from_torch_refs",
    "source_code": "@classmethod\ndef generate_from_torch_refs(cls) -> set[ElementwiseTypePromotionRule]:\n    rule_set = set()\n    rule_set.update(cls._parse_torch_refs(_refs))\n    rule_set.update(cls._parse_torch_refs(_nn_refs))\n    rule_set.update(cls._parse_torch_refs(_linalg_refs))\n    rule_set.update(cls._parse_torch_refs(_special_refs))\n    rule_set.update(cls._parse_torch_refs(_functional_refs))\n    return rule_set",
    "docstring": "Parse type promotion rules from reference ops under torch._C._refs.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\type_promotion.py",
    "ast_data": "FunctionDef name:generate_from_torch_refs arg:cls arguments arg Assign Call Call Call Call Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_wait_queue",
    "source_code": "def _wait_queue(self):\n    while True:\n        time.sleep(0.1)\n        if self.queue.unfinished_tasks == 0 or self.stop_signal.is_set():\n            return",
    "docstring": "Wait for the queue to be empty.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\data_utils.py",
    "ast_data": "FunctionDef name:_wait_queue arg:self arguments arg While Call If BoolOp Compare Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "from_gather_index",
    "source_code": "@classmethod\ndef from_gather_index(cls, gather_index):\n    return _GatherLayerBroadcaster(gather_index)",
    "docstring": "Create a broadcaster from a gather_index.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:from_gather_index arg:cls arg:gather_index arguments arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "Rotate",
    "source_code": "class Rotate(OperationBase):\n\n    def __init__(self, initial_magnitude: Optional[float]=15.0, initial_probability: float=0.5, magnitude_range: Tuple[float, float]=(0.0, 30.0), temperature: float=0.1, symmetric_megnitude: bool=True) -> None:\n        if symmetric_megnitude and magnitude_range[0] < 0:\n            raise ValueError(f'Lower bound of {self.__class__.__name__} is a symmetric operation. The lower bound must above 0. Got {magnitude_range[0]}.')\n        super().__init__(K.RandomRotation(magnitude_range, same_on_batch=False, p=initial_probability), initial_magnitude=[('degrees', initial_magnitude)], temperature=temperature, symmetric_megnitude=symmetric_megnitude)",
    "docstring": "Apply rotate operation. Args: initial_magnitude: the initial magnitude. initial_probability: the initial probability. If None, the augmentation will be randomly applied according to he augmentation sampling range. magnitude_range: the sampling range for random sampling and clamping the optimized magnitude. temperature: temperature for RelaxedBernoulli distribution used during training. symmetric_megnitude: if to randomly assign the magnitude as negative or not.",
    "type": "class",
    "file_path": "kornia\\kornia\\augmentation\\auto\\operations\\ops.py",
    "ast_data": "ClassDef name:Rotate FunctionDef name:__init__ arg:self arg:initial_magnitude arg:initial_probability arg:magnitude_range arg:temperature arg:symmetric_megnitude arguments arg arg arg arg arg arg If BoolOp Compare Raise Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "to_tensor",
    "source_code": "def to_tensor(self):\n    return self.tensor",
    "docstring": "Converts this 'WeakTensor' into a 'tf.Tensor'.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\weak_tensor.py",
    "ast_data": "FunctionDef name:to_tensor arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "rewrite_grad_indexed_slices",
    "source_code": "def rewrite_grad_indexed_slices(grads, body_grad_graph, loop_vars, forward_inputs):\n    inputs_with_grads = [t for g, t in zip(grads, forward_inputs) if g is not None]\n    structured_outputs = body_grad_graph.structured_outputs[3:]\n    for forward_input, output in zip(inputs_with_grads, structured_outputs):\n        if not isinstance(output, indexed_slices.IndexedSlices):\n            continue\n        if forward_input.dtype == dtypes.resource:\n            loop_vars = _rewrite_input_as_indexed_slices(body_grad_graph, output, forward_input, loop_vars)\n        else:\n            _rewrite_output_as_tensor(body_grad_graph, output)\n    return loop_vars",
    "docstring": "Handles special case of IndexedSlices returned from while gradient. Some gradient functions return IndexedSlices instead of a Tensor (e.g. the gradient of Gather ops). When this happens in the gradient of a while body, the resulting gradient body function will have mismatched inputs and outputs, since the input is a single Tensor, but the IndexedSlices gets unnested into three output Tensors. This function fixes this by rewriting the gradient body to have three inputs to match the three outputs, i.e., it effectively converts the input Tensor into an input IndexedSlices. It also returns new to reflect the new inputs. Args: grads: the input gradient Tensors to the while gradient computation. body_grad_graph: _WhileBodyGradFuncGraph. loop_vars: list of Tensors. The inputs to body_grad_graph. forward_inputs: list of Tensors. The (flat) inputs to the forward-pass While op. Returns: The new loop_vars to pass to body_grad_graph.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\while_v2_indexed_slices_rewriter.py",
    "ast_data": "FunctionDef name:rewrite_grad_indexed_slices arg:grads arg:body_grad_graph arg:loop_vars arg:forward_inputs arguments arg arg arg arg Assign Call Compare Assign For Call If Call If Compare Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_split_size",
    "source_code": "def get_split_size(dim_size, chunks):\n    return (dim_size + chunks - 1) // chunks",
    "docstring": "Computes the split size inline with ``. Returns: An int indicating the split size to use.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharding_spec\\_internals.py",
    "ast_data": "FunctionDef name:get_split_size arg:dim_size arg:chunks arguments arg arg Return return:yes"
  },
  {
    "library": "django",
    "name": "ur",
    "source_code": "@property\ndef ur(self):\n    return (self.max_x, self.max_y)",
    "docstring": "Return the upper-right coordinate.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\envelope.py",
    "ast_data": "FunctionDef name:ur arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "mean_absolute_percentage_error",
    "source_code": "@dispatch.add_dispatch_support\ndef mean_absolute_percentage_error(y_true, y_pred):\n    y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)\n    y_true = math_ops.cast(y_true, y_pred.dtype)\n    diff = math_ops.abs((y_true - y_pred) / backend.maximum(math_ops.abs(y_true), backend.epsilon()))\n    return 100.0 * backend.mean(diff, axis=-1)",
    "docstring": "Computes the mean absolute percentage error between and . Standalone usage: >>> y_true = np.random.random(size=(2, 3)) >>> y_true = np.maximum(y_true, 1e-7) # Prevent division by zero >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.mean_absolute_percentage_error(y_true, y_pred) >>> assert loss.shape == (2,) >>> assert np.array_equal( ... loss.numpy(), ... 100. * np.mean(np.abs((y_true - y_pred) / y_true), axis=-1)) Args: y_true: Ground truth values. shape = . y_pred: The predicted values. shape = . Returns: Mean absolute percentage error values. shape = .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py",
    "ast_data": "FunctionDef name:mean_absolute_percentage_error arg:y_true arg:y_pred arguments arg arg Assign Call Assign Call Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "is_submodule",
    "source_code": "def is_submodule(name_descendant: str, name_ancestor: str) -> bool:\n    return name_ancestor + '.' in name_descendant",
    "docstring": "if name_descendant is a submodule of name_ancestor, but not the same",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_tools\\ilp_utils.py",
    "ast_data": "FunctionDef name:is_submodule arg:name_descendant arg:name_ancestor arguments arg arg Return return:yes Compare"
  },
  {
    "library": "authlib",
    "name": "validate_request_uris",
    "source_code": "def validate_request_uris(self):\n    self._validate_uri('request_uris')",
    "docstring": "Array of request_uri values that are pre-registered by the RP for use at the OP. These URLs MUST use the https scheme unless the target Request Object is signed in a way that is verifiable by the OP. Servers MAY cache the contents of the files referenced by these URIs and not retrieve them at the time they are used in a request. OPs can require that request_uri values used be pre-registered with the require_request_uri_registration discovery parameter. If the contents of the request file could ever change, these URI values SHOULD include the base64url-encoded SHA-256 hash value of the file contents referenced by the URI as the value of the URI fragment. If the fragment value used for a URI changes, that signals the server that its cached value for that URI with the old fragment value is no longer valid.",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\registration\\claims.py",
    "ast_data": "FunctionDef name:validate_request_uris arg:self arguments arg Call"
  },
  {
    "library": "matplotlib",
    "name": "new_locator",
    "source_code": "def new_locator(self, ny, ny1=None):\n    return super().new_locator(0, ny, 0, ny1)",
    "docstring": "Create an axes locator callable for the specified cell. Parameters ---------- ny, ny1 : int Integers specifying the row-position of the cell. When *ny1* is None, a single *ny*-th row is specified. Otherwise, location of rows spanning between *ny* to *ny1* (but excluding *ny1*-th row) is specified.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_divider.py",
    "ast_data": "FunctionDef name:new_locator arg:self arg:ny arg:ny1 arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "__call__",
    "source_code": "def __call__(self, mu=None, kappa=1, seed=None):\n    return vonmises_fisher_frozen(mu, kappa, seed=seed)",
    "docstring": "Create a frozen von Mises-Fisher distribution. See for more information.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:mu arg:kappa arg:seed arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_joinstyle",
    "source_code": "@_docstring.interpd\ndef get_joinstyle(self):\n    return self._joinstyle.name if self._joinstyle else None",
    "docstring": "Return the join style for the collection (for all its elements). Returns ------- %(JoinStyle)s or None",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:get_joinstyle arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "finalize_objects",
    "source_code": "def finalize_objects(self):\n    layers_revived_from_config = []\n    layers_revived_from_saved_model = []\n    for node_id, (node, _) in self.loaded_nodes.items():\n        if not isinstance(node, base_layer.Layer) or node_id in self.model_layer_dependencies:\n            continue\n        self._unblock_model_reconstruction(node_id, node)\n        if isinstance(node, input_layer.InputLayer):\n            continue\n        elif isinstance(node, metrics.Metric):\n            continue\n        if isinstance(node, (RevivedLayer, RevivedInputLayer)):\n            layers_revived_from_saved_model.append(node)\n        else:\n            layers_revived_from_config.append(node)\n    _finalize_saved_model_layers(layers_revived_from_saved_model)\n    _finalize_config_layers(layers_revived_from_config)\n    self._reconstruct_all_models()",
    "docstring": "Finish setting up Keras objects. This function is executed after all objects and functions have been created. Call functions and losses are attached to each layer, and once all layers have been fully set up, graph networks are initialized. Subclassed models that are revived from the SavedModel are treated like layers, and have their call/loss functions attached here.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\load.py",
    "ast_data": "FunctionDef name:finalize_objects arg:self arguments arg Assign Assign For Call If BoolOp Call Compare Call If Call If Call If Call Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "is_lowered_module",
    "source_code": "def is_lowered_module(obj: Any) -> bool:\n    return type(obj).__name__ == LOWERED_BACKEND_MODULE_TYPE",
    "docstring": "This function is added to avoid using isinstance(obj, LoweredBackendModule) as it will import LoweredBackendModule, which may cause a circular import.",
    "type": "function",
    "file_path": "pytorch\\torch\\_higher_order_ops\\executorch_call_delegate.py",
    "ast_data": "FunctionDef name:is_lowered_module arg:obj arguments arg Return return:yes Compare Call"
  },
  {
    "library": "pytorch",
    "name": "ConvBnReLU2d",
    "source_code": "class ConvBnReLU2d(ConvBn2d):\n    _FLOAT_MODULE: ClassVar[type[nni.ConvBnReLU2d]] = nni.ConvBnReLU2d\n    _FLOAT_CONV_MODULE: ClassVar[type[nn.Conv2d]] = nn.Conv2d\n    _FLOAT_BN_MODULE: ClassVar[type[nn.BatchNorm2d]] = nn.BatchNorm2d\n    _FLOAT_RELU_MODULE: ClassVar[Optional[type[nn.Module]]] = nn.ReLU\n    _FUSED_FLOAT_MODULE: ClassVar[Optional[type[nni.ConvReLU2d]]] = nni.ConvReLU2d\n\n    def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=None, padding_mode='zeros', eps=1e-05, momentum=0.1, freeze_bn=False, qconfig=None):\n        super().__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, padding_mode, eps, momentum, freeze_bn, qconfig)\n\n    def forward(self, input):\n        return F.relu(self._forward(input))\n\n    @classmethod\n    def from_float(cls, mod, use_precomputed_fake_quant=False):\n        return super().from_float(mod, use_precomputed_fake_quant)",
    "docstring": "A ConvBnReLU2d module is a module fused from Conv2d, BatchNorm2d and ReLU, attached with FakeQuantize modules for weight, used in quantization aware training. We combined the interface of :class: and :class: and :class:. Similar to , with FakeQuantize modules initialized to default. Attributes: weight_fake_quant: fake quant module for weight",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\nn\\intrinsic\\qat\\modules\\conv_fused.py",
    "ast_data": "ClassDef name:ConvBnReLU2d FunctionDef name:__init__ arg:self arg:in_channels arg:out_channels arg:kernel_size arg:stride arg:padding arg:dilation arg:groups arg:bias arg:padding_mode arg:eps arg:momentum arg:freeze_bn arg:qconfig arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg Call Call FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call Call FunctionDef name:from_float arg:cls arg:mod arg:use_precomputed_fake_quant arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "__repr__",
    "source_code": "def __repr__(self):\n    return ','.join((f'{device}:{backend}' for device, backend in self.device_backend_map.items()))",
    "docstring": "Return all the device:backend pairs separated by commas.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "collective_permute",
    "source_code": "def collective_permute(x, source_target_pairs, name=None):\n    return gen_tpu_ops.collective_permute(x, source_target_pairs, name=name)",
    "docstring": "Permute the input tensor across replicas given source_target_pairs. For each source_target_pair , we send replica a's input to replica b. Each replica id must only appear once in the source column. Also it must only appear once in the target column. For the replica id not in the target column, this op returns a zero tensor with the same shape and dtype of the input x. For example, suppose there are 4 TPU instances: . Passing source_target_pairs= gets the outputs: . Args: x: The local tensor to be permuted. source_target_pairs: 2d int lists with shape [num_pairs, 2]. source_target_pairs[i][0] represents the source replica id and source_target_pairs[i][1] represents the target replica id. name: Optional op name. Returns: A which is permuted.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\ops\\tpu_ops.py",
    "ast_data": "FunctionDef name:collective_permute arg:x arg:source_target_pairs arg:name arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_input_spec",
    "source_code": "def get_input_spec(shape):\n    if isinstance(shape, tensor_shape.TensorShape):\n        input_spec_shape = shape.as_list()\n    else:\n        input_spec_shape = list(shape)\n    batch_index, time_step_index = (1, 0) if self.time_major else (0, 1)\n    if not self.stateful:\n        input_spec_shape[batch_index] = None\n    input_spec_shape[time_step_index] = None\n    return InputSpec(shape=tuple(input_spec_shape))",
    "docstring": "Convert input shape to InputSpec.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\recurrent.py",
    "ast_data": "FunctionDef name:get_input_spec arg:shape arguments arg If Call Assign Call Assign Call Assign If Assign Assign Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_shard_orig_param_state",
    "source_code": "def _shard_orig_param_state(fsdp_param_info: FSDPParamInfo, fqn: str, optim_state: dict[str, Any]) -> dict[str, Any]:\n    if not optim_state:\n        return {}\n    fsdp_state = fsdp_param_info.state\n    flat_param = fsdp_param_info.handle.flat_param\n    param_idx = fsdp_param_info.param_indices[fqn]\n    shard_param_info = flat_param._shard_param_infos[param_idx]\n    optim_state = _gather_state_dict(optim_state, pg=fsdp_state.process_group, device=fsdp_state.compute_device)\n    if not shard_param_info.in_shard:\n        return {}\n    new_optim_state: dict[str, Any] = {}\n    intra_param_start_idx = shard_param_info.intra_param_start_idx\n    intra_param_end_idx = shard_param_info.intra_param_end_idx\n    for state_name, value in optim_state.items():\n        if torch.is_tensor(value) and value.dim() > 0 and (fsdp_state.sharding_strategy != ShardingStrategy.NO_SHARD):\n            value = value.flatten()[intra_param_start_idx:intra_param_end_idx + 1].clone()\n        new_optim_state[state_name] = value\n    return new_optim_state",
    "docstring": "Shard the optimizer state for the original parameter with the name `` is True.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_optim_utils.py",
    "ast_data": "FunctionDef name:_shard_orig_param_state arg:fsdp_param_info arg:fqn arg:optim_state arguments arg arg arg If Return return:no Assign Assign Assign Assign Assign Call If Return return:no Assign Assign For Call If BoolOp Call Compare Call Compare Assign Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "TestOneInput",
    "source_code": "def TestOneInput(data):\n    fh = FuzzingHelper(data)\n    input_tensor = fh.get_random_numeric_tensor()\n    _ = tf.raw_ops.Abs(x=input_tensor)",
    "docstring": "Test randomized fuzzing input for tf.raw_ops.Abs.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\security\\fuzzing\\abs_fuzz.py",
    "ast_data": "FunctionDef name:TestOneInput arg:data arguments arg Assign Call Assign Call Assign Call"
  },
  {
    "library": "pandas",
    "name": "BaseExecutionEngine",
    "source_code": "class BaseExecutionEngine(abc.ABC):\n\n    @staticmethod\n    @abc.abstractmethod\n    def map(data: Series | DataFrame | np.ndarray, func: AggFuncType, args: tuple, kwargs: dict[str, Any], decorator: Callable | None, skip_na: bool):\n        pass\n\n    @staticmethod\n    @abc.abstractmethod\n    def apply(data: Series | DataFrame | np.ndarray, func: AggFuncType, args: tuple, kwargs: dict[str, Any], decorator: Callable, axis: Axis):\n        pass",
    "docstring": "Base class for execution engines for map and apply methods. An execution engine receives all the parameters of a call to ``, such as the data container, the function, etc. and takes care of running the execution. Supporting different engines allows functions to be JIT compiled, run in parallel, and others. Besides the default executor which simply runs the code with the Python interpreter and pandas.",
    "type": "class",
    "file_path": "pandas\\pandas\\core\\apply.py",
    "ast_data": "ClassDef name:BaseExecutionEngine FunctionDef name:map arg:data arg:func arg:args arg:kwargs arg:decorator arg:skip_na arguments arg arg arg arg arg arg FunctionDef name:apply arg:data arg:func arg:args arg:kwargs arg:decorator arg:axis arguments arg arg arg arg arg arg"
  },
  {
    "library": "matplotlib",
    "name": "sanitize_sequence",
    "source_code": "def sanitize_sequence(data):\n    return list(data) if isinstance(data, collections.abc.MappingView) else data",
    "docstring": "Convert dictview objects to list. Other inputs are returned unchanged.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\cbook.py",
    "ast_data": "FunctionDef name:sanitize_sequence arg:data arguments arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "fignum_exists",
    "source_code": "def fignum_exists(num: int | str) -> bool:\n    return _pylab_helpers.Gcf.has_fignum(num) if isinstance(num, int) else num in get_figlabels()",
    "docstring": "Return whether the figure with the given id exists. Parameters ---------- num : int or str A figure identifier. Returns ------- bool Whether or not a figure with id *num* exists.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:fignum_exists arg:num arguments arg Return return:yes Call Call Compare Call"
  },
  {
    "library": "pytorch",
    "name": "where",
    "source_code": "def where(self, condition: T, input: T, other: T) -> T:\n    raise NotImplementedError",
    "docstring": "Computes torch.where: when condition is true, return input; otherwise return other.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ops_handler.py",
    "ast_data": "FunctionDef name:where arg:self arg:condition arg:input arg:other arguments arg arg arg arg Raise"
  },
  {
    "library": "sphinx",
    "name": "stable_hash",
    "source_code": "def stable_hash(obj: Any) -> str:\n    if isinstance(obj, dict):\n        obj = sorted(map(stable_hash, obj.items()))\n    if isinstance(obj, list | tuple | set | frozenset):\n        obj = sorted(map(stable_hash, obj))\n    elif isinstance(obj, type | types.FunctionType):\n        obj = f'{obj.__module__}.{obj.__qualname__}'\n    return hashlib.md5(str(obj).encode(), usedforsecurity=False).hexdigest()",
    "docstring": "Return a stable hash for a Python data structure. We can't just use the md5 of str(obj) as the order of collections may be random.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\_serialise.py",
    "ast_data": "FunctionDef name:stable_hash arg:obj arguments arg If Call Assign Call Call Call If Call Assign Call Call If Call Assign Return return:yes Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "save_source_file",
    "source_code": "def save_source_file(self, module_name: str, file_or_directory: str, dependencies=True):\n    path = Path(file_or_directory)\n    if path.is_dir():\n        to_save = []\n        module_path = module_name.replace('.', '/')\n        for filename in path.glob('**/*.py'):\n            relative_path = filename.relative_to(path).as_posix()\n            archivename = module_path + '/' + relative_path\n            submodule_name = None\n            if filename.name == '__init__.py':\n                submodule_name = archivename[:-len('/__init__.py')].replace('/', '.')\n                is_package = True\n            else:\n                submodule_name = archivename[:-len('.py')].replace('/', '.')\n                is_package = False\n            to_save.append((submodule_name, _read_file(str(filename)), is_package, dependencies))\n        for item in to_save:\n            self.save_source_string(*item)\n    else:\n        is_package = path.name == '__init__.py'\n        self.save_source_string(module_name, _read_file(file_or_directory), is_package, dependencies)",
    "docstring": "Adds the local file system `save_source_file`, we scan the source for dependencies.",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\package_exporter.py",
    "ast_data": "FunctionDef name:save_source_file arg:self arg:module_name arg:file_or_directory arg:dependencies arguments arg arg arg arg Assign Call If Call Assign Assign Call For Call Assign Call Call Assign Assign If Compare Assign Call Call Assign Assign Call Call Assign Call Call Call For Call Assign Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "LazyConvTranspose1d",
    "source_code": "class LazyConvTranspose1d(_LazyConvXdMixin, ConvTranspose1d):\n    cls_to_become = ConvTranspose1d\n\n    def __init__(self, out_channels: int, kernel_size: _size_1_t, stride: _size_1_t=1, padding: _size_1_t=0, output_padding: _size_1_t=0, groups: int=1, bias: bool=True, dilation: _size_1_t=1, padding_mode: str='zeros', device=None, dtype=None) -> None:\n        factory_kwargs = {'device': device, 'dtype': dtype}\n        super().__init__(0, 0, kernel_size, stride, padding, output_padding, groups, False, dilation, padding_mode, **factory_kwargs)\n        self.weight = UninitializedParameter(**factory_kwargs)\n        self.out_channels = out_channels\n        if bias:\n            self.bias = UninitializedParameter(**factory_kwargs)\n\n    def _get_num_spatial_dims(self) -> int:\n        return 1",
    "docstring": "A :class: module with lazy initialization of the `ConvTranspose1dweightbiastorch.nn.modules.lazy.LazyModuleMixintorch.nn.ConvTranspose1dtorch.nn.modules.lazy.LazyModuleMixin`",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\conv.py",
    "ast_data": "ClassDef name:LazyConvTranspose1d Assign FunctionDef name:__init__ arg:self arg:out_channels arg:kernel_size arg:stride arg:padding arg:output_padding arg:groups arg:bias arg:dilation arg:padding_mode arg:device arg:dtype arguments arg arg arg arg arg arg arg arg arg arg arg arg Assign Call Call Assign Call Assign If Assign Call FunctionDef name:_get_num_spatial_dims arg:self arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "unix2dos",
    "source_code": "def unix2dos(file):\n    if os.path.isdir(file):\n        print(file, 'Directory!')\n        return\n    with open(file, 'rb') as fp:\n        data = fp.read()\n    if '\\x00' in data:\n        print(file, 'Binary!')\n        return\n    newdata = re.sub('\\r\\n', '\\n', data)\n    newdata = re.sub('\\n', '\\r\\n', newdata)\n    if newdata != data:\n        print('unix2dos:', file)\n        with open(file, 'wb') as f:\n            f.write(newdata)\n        return file\n    else:\n        print(file, 'ok')",
    "docstring": "Replace LF with CRLF in argument files. Print names of changed files.",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\line_endings.py",
    "ast_data": "FunctionDef name:unix2dos arg:file arguments arg If Call Call Return return:no With Call Assign Call If Compare Call Return return:no Assign Call Assign Call If Compare Call With Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_window_extent",
    "source_code": "def get_window_extent(self, renderer=None, dpi=None):\n    if not self.get_visible():\n        return Bbox.unit()\n    fig = self.get_figure(root=True)\n    if dpi is None:\n        dpi = fig.dpi\n    if self.get_text() == '':\n        with cbook._setattr_cm(fig, dpi=dpi):\n            tx, ty = self._get_xy_display()\n            return Bbox.from_bounds(tx, ty, 0, 0)\n    if renderer is not None:\n        self._renderer = renderer\n    if self._renderer is None:\n        self._renderer = fig._get_renderer()\n    if self._renderer is None:\n        raise RuntimeError(\"Cannot get window extent of text w/o renderer. You likely want to call 'figure.draw_without_rendering()' first.\")\n    with cbook._setattr_cm(fig, dpi=dpi):\n        bbox, info, descent = self._get_layout(self._renderer)\n        x, y = self.get_unitless_position()\n        x, y = self.get_transform().transform((x, y))\n        bbox = bbox.translated(x, y)\n        return bbox",
    "docstring": "Return the bounding the text, in display units. In addition to being used internally, this is useful for specifying clickable regions in a png file on a web page. Parameters ---------- renderer : Renderer, optional A renderer is needed to compute the bounding box. If the artist has already been drawn, the renderer is cached; thus, it is only necessary to pass this argument when calling before the first draw. In practice, it is usually easier to trigger a draw first, e.g. by calling or `` (*not* the renderer dpi); should be set e.g. if to match regions with a figure saved with a custom dpi value.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:get_window_extent arg:self arg:renderer arg:dpi arguments arg arg arg If Call Return return:yes Call Assign Call If Compare Assign If Compare Call With Call Assign Call Return return:yes Call If Compare Assign If Compare Assign Call If Compare Raise Call With Call Assign Call Assign Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "match",
    "source_code": "def match(self, graph: Graph) -> list[InternalMatch]:\n    internal_matches = super().match(graph)\n    for internal_match in internal_matches:\n        for k, n in self.name_node_map.items():\n            internal_match.name_node_map[k] = internal_match.nodes_map[n]\n    return internal_matches",
    "docstring": "The returned InternalMatch will have name_node_map populated with a map from node name (str) to the target node, e.g. {\"conv\": target_conv_ndoe, \"relu\": target_relu_node} this requires the pattern graph returns an additional output of node name to node, e.g. instead of: we should do: instead",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\passes\\utils\\matcher_with_name_node_map_utils.py",
    "ast_data": "FunctionDef name:match arg:self arg:graph arguments arg arg Assign Call Call For For Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_can_use_libjoin",
    "source_code": "@final\n@cache_readonly\ndef _can_use_libjoin(self) -> bool:\n    if not self.is_monotonic_increasing:\n        return False\n    if type(self) is Index:\n        return isinstance(self.dtype, np.dtype) or isinstance(self._values, (ArrowExtensionArray, BaseMaskedArray)) or (isinstance(self.dtype, StringDtype) and self.dtype.storage == 'python')\n    return not isinstance(self, (ABCIntervalIndex, ABCMultiIndex))",
    "docstring": "Whether we can use the fastpaths implemented in _libs.join. This is driven by whether (in monotonic increasing cases that are guaranteed not to have NAs) we can convert to a np.ndarray without making a copy. If we cannot, this negates the performance benefit of using libjoin.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_can_use_libjoin arg:self arguments arg If Return return:yes If Compare Call Return return:yes BoolOp Call Call BoolOp Call Compare Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "handle",
    "source_code": "def handle(self, args, kwargs):\n    args = nest.map_structure(_slice_to_dict, args)\n    kwargs = nest.map_structure(_slice_to_dict, kwargs)\n    if any((isinstance(x, keras_tensor.KerasTensor) for x in nest.flatten([args, kwargs]))):\n        return SlicingOpLambda(self.op)(*args, **kwargs)\n    else:\n        return self.NOT_SUPPORTED",
    "docstring": "Handle the specified operation with the specified arguments.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\core.py",
    "ast_data": "FunctionDef name:handle arg:self arg:args arg:kwargs arguments arg arg arg Assign Call Assign Call If Call Call Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "validate_joint_graph",
    "source_code": "def validate_joint_graph(joint_graph: torch.fx.Graph):\n    for node in joint_graph.nodes:\n        if node.op == 'call_function' and node.target == torch.ops.flex_lib.zeros_and_scatter.default:\n            for user in node.users:\n                if user.op != 'output':\n                    raise NotImplementedError('Using multiple indexing operations on the same tensor that requires gradients in a score_mod function is not currently supported. This typically happens when indexing the same tensor multiple times, like:\\n\\n    def score_mod(score, b, h, q_idx, kv_idx):\\n        return score + bias[q_idx] + bias[kv_idx]  # bias used twice!\\n\\nA valid workaround is to clone() the tensors that will be indexed multiple times. For example:\\n\\n    bias1 = bias.clone()\\n    def score_mod(score, b, h, q_idx, kv_idx):\\n        return score + bias[q_idx] + bias1[kv_idx]\\n\\nNote that this solution will use additional memory.')\n    return",
    "docstring": "We do some pre lowering graph checks in order to raise nicer error messages",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\kernel\\flex_attention.py",
    "ast_data": "FunctionDef name:validate_joint_graph arg:joint_graph arguments arg For If BoolOp Compare Compare For If Compare Raise Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "_broadcast_parameter_with_batch_shape",
    "source_code": "def _broadcast_parameter_with_batch_shape(param, param_ndims_to_matrix_ndims, batch_shape):\n    if hasattr(param, 'batch_shape_tensor'):\n        override_dict = {}\n        for name, ndims in param._experimental_parameter_ndims_to_matrix_ndims.items():\n            sub_param = getattr(param, name)\n            override_dict[name] = nest.map_structure_up_to(sub_param, functools.partial(_broadcast_parameter_with_batch_shape, batch_shape=batch_shape), sub_param, ndims)\n        parameters = dict(param.parameters, **override_dict)\n        return type(param)(**parameters)\n    base_shape = array_ops.concat([batch_shape, array_ops.ones([param_ndims_to_matrix_ndims], dtype=dtypes.int32)], axis=0)\n    return array_ops.broadcast_to(param, array_ops.broadcast_dynamic_shape(base_shape, array_ops.shape(param)))",
    "docstring": "Broadcasts with the given batch shape, recursively.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\slicing.py",
    "ast_data": "FunctionDef name:_broadcast_parameter_with_batch_shape arg:param arg:param_ndims_to_matrix_ndims arg:batch_shape arguments arg arg arg If Call Assign For Call Assign Call Assign Call Call Assign Call Return return:yes Call Call Assign Call Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_export_forward_backward",
    "source_code": "def _export_forward_backward(ep: torch.export.ExportedProgram, joint_loss_index: int=0) -> torch.export.ExportedProgram:\n    from torch._decomp import core_aten_decompositions\n    ep = _decompose_exported_program(ep, cia_to_decomp={}, python_decomp_table=core_aten_decompositions(), joint_loss_index=joint_loss_index, decompose_custom_triton_ops=False)\n    gm, new_graph_signature = _copy_graph_module_and_signature(ep)\n    _remove_detach_pass(gm, new_graph_signature)\n    return ep._update(gm, new_graph_signature)",
    "docstring": "WARNING: This API is highly unstable and will be subject to change in the future.",
    "type": "function",
    "file_path": "pytorch\\torch\\export\\experimental\\__init__.py",
    "ast_data": "FunctionDef name:_export_forward_backward arg:ep arg:joint_loss_index arguments arg arg Assign Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "num2timedelta",
    "source_code": "def num2timedelta(x):\n    return _ordinalf_to_timedelta_np_vectorized(x).tolist()",
    "docstring": "Convert number of days to a object. If *x* is a sequence, a sequence of objects will be returned. Parameters ---------- x : float, sequence of floats Number of days. The fraction part represents hours, minutes, seconds. Returns ------- or list[]",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\dates.py",
    "ast_data": "FunctionDef name:num2timedelta arg:x arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_broadcast_normalize_batch_in_training",
    "source_code": "def _broadcast_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=0.001):\n    mean, var = nn.moments(x, reduction_axes, None, None, False)\n    target_shape = []\n    for axis in range(ndim(x)):\n        if axis in reduction_axes:\n            target_shape.append(1)\n        else:\n            target_shape.append(array_ops.shape(x)[axis])\n    target_shape = array_ops_stack.stack(target_shape)\n    broadcast_mean = array_ops.reshape(mean, target_shape)\n    broadcast_var = array_ops.reshape(var, target_shape)\n    if gamma is None:\n        broadcast_gamma = None\n    else:\n        broadcast_gamma = array_ops.reshape(gamma, target_shape)\n    if beta is None:\n        broadcast_beta = None\n    else:\n        broadcast_beta = array_ops.reshape(beta, target_shape)\n    normed = nn.batch_normalization(x, broadcast_mean, broadcast_var, broadcast_beta, broadcast_gamma, epsilon)\n    return (normed, mean, var)",
    "docstring": "Non-fused, broadcast version of . Args: x: Input tensor or variable. gamma: Tensor by which to scale the input. beta: Tensor with which to center the input. reduction_axes: iterable of integers, axes over which to normalize. epsilon: Fuzz factor. Returns: A tuple length of 3, .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:_broadcast_normalize_batch_in_training arg:x arg:gamma arg:beta arg:reduction_axes arg:epsilon arguments arg arg arg arg arg Assign Call Assign For Call Call If Compare Call Call Call Assign Call Assign Call Assign Call If Compare Assign Assign Call If Compare Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "public_key",
    "source_code": "def public_key(self, key: CertificatePublicKeyTypes) -> CertificateBuilder:\n    if not isinstance(key, (dsa.DSAPublicKey, rsa.RSAPublicKey, ec.EllipticCurvePublicKey, ed25519.Ed25519PublicKey, ed448.Ed448PublicKey, x25519.X25519PublicKey, x448.X448PublicKey)):\n        raise TypeError('Expecting one of DSAPublicKey, RSAPublicKey, EllipticCurvePublicKey, Ed25519PublicKey, Ed448PublicKey, X25519PublicKey, or X448PublicKey.')\n    if self._public_key is not None:\n        raise ValueError('The public key may only be set once.')\n    return CertificateBuilder(self._issuer_name, self._subject_name, key, self._serial_number, self._not_valid_before, self._not_valid_after, self._extensions)",
    "docstring": "Sets the requestor's public key (as found in the signing request).",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\x509\\base.py",
    "ast_data": "FunctionDef name:public_key arg:self arg:key arguments arg arg If Call Raise Call If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_collection_ref",
    "source_code": "@tf_export(v1=['get_collection_ref'])\ndef get_collection_ref(key) -> list[Any]:\n    return get_default_graph().get_collection_ref(key)",
    "docstring": "Wrapper for using the default graph. See for more details. Args: key: The key for the collection. For example, the class contains many standard names for collections. Returns: The list of values in the collection with the given , or an empty list if no value has been added to that collection. Note that this returns the collection list itself, which can be modified in place to change the collection. @compatibility(eager) Collections are not supported when eager execution is enabled. @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:get_collection_ref arg:key arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "kornia",
    "name": "RgbToYcbcr",
    "source_code": "class RgbToYcbcr(Module):\n    ONNX_DEFAULT_INPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n    ONNX_DEFAULT_OUTPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n\n    def forward(self, image: Tensor) -> Tensor:\n        return rgb_to_ycbcr(image)",
    "docstring": "Convert an image from RGB to YCbCr. The image data is assumed to be in the range of (0, 1). Returns: YCbCr version of the image. Shape: - image: :math: - output: :math: Examples: >>> input = torch.rand(2, 3, 4, 5) >>> ycbcr = RgbToYcbcr() >>> output = ycbcr(input) # 2x3x4x5",
    "type": "class",
    "file_path": "kornia\\kornia\\color\\ycbcr.py",
    "ast_data": "ClassDef name:RgbToYcbcr FunctionDef name:forward arg:self arg:image arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "fuse_conv_bn_jit",
    "source_code": "def fuse_conv_bn_jit(model, inplace=False):\n    torch._C._log_api_usage_once('quantization_api.quantize_jit.fuse_conv_bn_jit')\n    model_c = model._c\n    model_c = torch._C._jit_pass_fold_convbn(model_c)\n    if inplace:\n        model._reconstruct(model_c)\n    else:\n        model = wrap_cpp_module(model_c)\n    return model",
    "docstring": "Fuse conv - bn module Works for eval model only. Args: model: TorchScript model from scripting or tracing",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantize_jit.py",
    "ast_data": "FunctionDef name:fuse_conv_bn_jit arg:model arg:inplace arguments arg arg Call Assign Assign Call If Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "overlaps",
    "source_code": "def overlaps(self, other):\n    ax1, ay1, ax2, ay2 = self.extents\n    bx1, by1, bx2, by2 = other.extents\n    if ax2 < ax1:\n        ax2, ax1 = (ax1, ax2)\n    if ay2 < ay1:\n        ay2, ay1 = (ay1, ay2)\n    if bx2 < bx1:\n        bx2, bx1 = (bx1, bx2)\n    if by2 < by1:\n        by2, by1 = (by1, by2)\n    return ax1 <= bx2 and bx1 <= ax2 and (ay1 <= by2) and (by1 <= ay2)",
    "docstring": "Return whether this bounding box overlaps with the other bounding box. Parameters ---------- other :",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:overlaps arg:self arg:other arguments arg arg Assign Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign Return return:yes BoolOp Compare Compare Compare Compare"
  },
  {
    "library": "django",
    "name": "__next__",
    "source_code": "def __next__(self):\n    raise NotImplementedError('subclasses of Deserializer must provide a __next__() method')",
    "docstring": "Iteration interface -- return the next item in the stream",
    "type": "method",
    "file_path": "django\\django\\core\\serializers\\base.py",
    "ast_data": "FunctionDef name:__next__ arg:self arguments arg Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "_e_step",
    "source_code": "def _e_step(self, X):\n    log_prob_norm, log_resp = self._estimate_log_prob_resp(X)\n    return (np.mean(log_prob_norm), log_resp)",
    "docstring": "E step. Parameters ---------- X : array-like of shape (n_samples, n_features) Returns ------- log_prob_norm : float Mean of the logarithms of the probabilities of each sample in X log_responsibility : array, shape (n_samples, n_components) Logarithm of the posterior probabilities (or responsibilities) of the point of each sample in X.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\mixture\\_base.py",
    "ast_data": "FunctionDef name:_e_step arg:self arg:X arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "build_dim_map",
    "source_code": "def build_dim_map(tensor):\n    return OrderedDict([(idx if name is None else name, name) for idx, name in enumerate(tensor.names)])",
    "docstring": "Returns a map of { dim: dim_name } where dim is a name if the dim is named and the dim index otherwise.",
    "type": "function",
    "file_path": "pytorch\\torch\\_namedtensor_internals.py",
    "ast_data": "FunctionDef name:build_dim_map arg:tensor arguments arg Return return:yes Call Compare Call"
  },
  {
    "library": "kornia",
    "name": "_init_random_ray_dataset",
    "source_code": "def _init_random_ray_dataset(self, num_img_rays: Tensor) -> None:\n    self._ray_sampler = RandomRaySampler(self._min_depth, self._max_depth, self._ndc, device=self._device, dtype=self._dtype)\n    self._ray_sampler.calc_ray_params(self._cameras, num_img_rays)",
    "docstring": "Initialize a random ray sampler and calculates dataset ray parameters. Args: num_img_rays: If not None, number of rays to randomly cast from each camers: math: .",
    "type": "method",
    "file_path": "kornia\\kornia\\nerf\\data_utils.py",
    "ast_data": "FunctionDef name:_init_random_ray_dataset arg:self arg:num_img_rays arguments arg arg Assign Call Call"
  },
  {
    "library": "numpy",
    "name": "get_flags_fix",
    "source_code": "def get_flags_fix(self):\n    return self._get_command_flags('compiler_fix')",
    "docstring": "List of Fortran 90 fixed format specific flags.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\fcompiler\\__init__.py",
    "ast_data": "FunctionDef name:get_flags_fix arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_tpu_system_device_name",
    "source_code": "def _tpu_system_device_name(job: Optional[Text]) -> Text:\n    if job is None:\n        return '/device:TPU_SYSTEM:0'\n    else:\n        return '/job:%s/device:TPU_SYSTEM:0' % job",
    "docstring": "Returns the device name for the TPU_SYSTEM device of .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu.py",
    "ast_data": "FunctionDef name:_tpu_system_device_name arg:job arguments arg If Compare Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_set_dpi",
    "source_code": "def _set_dpi(self, dpi, forward=True):\n    if dpi == self._dpi:\n        return\n    self._dpi = dpi\n    self.dpi_scale_trans.clear().scale(dpi)\n    w, h = self.get_size_inches()\n    self.set_size_inches(w, h, forward=forward)",
    "docstring": "Parameters ---------- dpi : float forward : bool Passed on to",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:_set_dpi arg:self arg:dpi arg:forward arguments arg arg arg If Compare Return return:no Assign Call Call Assign Call Call"
  },
  {
    "library": "scipy",
    "name": "handle_events",
    "source_code": "def handle_events(sol, events, active_events, event_count, max_events, t_old, t):\n    roots = [solve_event_equation(events[event_index], sol, t_old, t) for event_index in active_events]\n    roots = np.asarray(roots)\n    if np.any(event_count[active_events] >= max_events[active_events]):\n        if t > t_old:\n            order = np.argsort(roots)\n        else:\n            order = np.argsort(-roots)\n        active_events = active_events[order]\n        roots = roots[order]\n        t = np.nonzero(event_count[active_events] >= max_events[active_events])[0][0]\n        active_events = active_events[:t + 1]\n        roots = roots[:t + 1]\n        terminate = True\n    else:\n        terminate = False\n    return (active_events, roots, terminate)",
    "docstring": "Helper function to handle events. Parameters ---------- sol : DenseOutput Function `t_oldtt_oldt` and before a possible termination. roots : ndarray Values of t at which events occurred. terminate : bool Whether a terminal event occurred.",
    "type": "function",
    "file_path": "scipy\\scipy\\integrate\\_ivp\\ivp.py",
    "ast_data": "FunctionDef name:handle_events arg:sol arg:events arg:active_events arg:event_count arg:max_events arg:t_old arg:t arguments arg arg arg arg arg arg arg Assign Call Assign Call If Call Compare If Compare Assign Call Assign Call Assign Assign Assign Call Compare Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "sobol_sample",
    "source_code": "@tf_export('math.sobol_sample')\n@dispatch.add_dispatch_support\ndef sobol_sample(dim, num_results, skip=0, dtype=dtypes.float32, name=None):\n    with ops.name_scope(name, 'sobol', [dim, num_results, skip]):\n        return gen_math_ops.sobol_sample(dim, num_results, skip, dtype=dtype)",
    "docstring": "Generates points from the Sobol sequence. Creates a Sobol sequence with samples. Each sample has dimension . Skips the first samples. Args: dim: Positive scalar representing each sample's dimension. num_results: Positive scalar of dtype int32. The number of Sobol points to return in the output. skip: (Optional) Positive scalar of dtype int32. The number of initial points of the Sobol sequence to skip. Default value is 0. dtype: (Optional) The of the sample. One of: or . Defaults to . name: (Optional) Python name prefixed to ops created by this function. Returns: of samples from Sobol sequence with [num_results, dim].",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:sobol_sample arg:dim arg:num_results arg:skip arg:dtype arg:name arguments arg arg arg arg arg With Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_create_dataset_reader",
    "source_code": "def _create_dataset_reader(dataset_creator, filenames, num_parallel_reads=None, name=None):\n\n    def read_one_file(filename):\n        filename = ops.convert_to_tensor(filename, dtypes.string, name='filename')\n        return dataset_creator(filename)\n    if num_parallel_reads is None:\n        return filenames.flat_map(read_one_file, name=name)\n    elif num_parallel_reads == dataset_ops.AUTOTUNE:\n        return filenames.interleave(read_one_file, num_parallel_calls=num_parallel_reads, name=name)\n    else:\n        return ParallelInterleaveDataset(filenames, read_one_file, cycle_length=num_parallel_reads, block_length=1, sloppy=False, buffer_output_elements=None, prefetch_input_elements=None, name=name)",
    "docstring": "Creates a dataset that reads the given files using the given reader. Args: dataset_creator: A function that takes in a single file name and returns a dataset. filenames: A containing one or more filenames. num_parallel_reads: The number of parallel reads we should do. name: (Optional.) A name for the tf.data operation. Returns: A that reads data from .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\readers.py",
    "ast_data": "FunctionDef name:_create_dataset_reader arg:dataset_creator arg:filenames arg:num_parallel_reads arg:name arguments arg arg arg arg FunctionDef name:read_one_file arg:filename arguments arg Assign Call Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, worker_device_pairs, canonicalize_devices=True):\n    self._worker_device_pairs = worker_device_pairs\n    self._input_worker_devices = tuple((d for d, _ in self._worker_device_pairs))\n    self._canonicalize_devices = canonicalize_devices\n    if canonicalize_devices:\n        self._fed_devices = tuple((tuple((device_util.canonicalize(d) for d in f)) for _, f in self._worker_device_pairs))\n    else:\n        self._fed_devices = tuple((tuple((device_util.canonicalize_without_job_and_task(d) for d in f)) for _, f in self._worker_device_pairs))",
    "docstring": "Initialize an object. Args: worker_device_pairs: A sequence of pairs: . canonicalize_devices: Whether to canonicalize devices for workers fully or partially. If False, it will partially canonicalize devices by removing job and task.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:worker_device_pairs arg:canonicalize_devices arguments arg arg arg Assign Assign Call Assign If Assign Call Call Call Assign Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_dir_additions",
    "source_code": "def _dir_additions(self) -> set[str]:\n    return {accessor for accessor in self._accessors if hasattr(self, accessor)}",
    "docstring": "Add additional __dir__ for this object.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\accessor.py",
    "ast_data": "FunctionDef name:_dir_additions arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "mode",
    "source_code": "def mode(self, name='mode'):\n    with self._name_scope(name):\n        return self._mode()",
    "docstring": "Mode.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:mode arg:self arg:name arguments arg arg With Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "freqstr",
    "source_code": "@property\ndef freqstr(self) -> str | None:\n    if self.freq is None:\n        return None\n    return self.freq.freqstr",
    "docstring": "Return the frequency object as a string if it's set, otherwise None. See Also -------- DatetimeIndex.inferred_freq : Returns a string representing a frequency generated by infer_freq. Examples -------- For DatetimeIndex: >>> idx = pd.DatetimeIndex([\"1/1/2020 10:00:00+00:00\"], freq=\"D\") >>> idx.freqstr 'D' The frequency can be inferred if there are more than 2 points: >>> idx = pd.DatetimeIndex( ... [\"2018-01-01\", \"2018-01-03\", \"2018-01-05\"], freq=\"infer\" ... ) >>> idx.freqstr '2D' For PeriodIndex: >>> idx = pd.PeriodIndex([\"2023-1\", \"2023-2\", \"2023-3\"], freq=\"M\") >>> idx.freqstr 'M'",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py",
    "ast_data": "FunctionDef name:freqstr arg:self arguments arg If Compare Return return:no Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_save",
    "source_code": "def _save(filename, tensor_names, tensors, tensor_slices=None, name='save'):\n    if tensor_slices is None:\n        return gen_io_ops.save(filename, tensor_names, tensors, name=name)\n    else:\n        return gen_io_ops.save_slices(filename, tensor_names, tensor_slices, tensors, name=name)",
    "docstring": "Save a list of tensors to a file with given names. Example usage without slice info: Save(\"/foo/bar\", [\"w\", \"b\"], [w, b]) Example usage with slices: Save(\"/foo/bar\", [\"w\", \"w\"], [slice0, slice1], tensor_slices=[\"4 10 0,2:-\", \"4 10 2,2:-\"]) Args: filename: the file name of the sstable. tensor_names: a list of strings. tensors: the list of tensors to be saved. tensor_slices: Optional list of strings to specify the shape and slices of a larger virtual tensor that each tensor is a part of. If not specified each tensor is saved as a full slice. name: string. Optional name for the op. Requires: The length of tensors should match the size of tensor_names and of tensor_slices. Returns: An Operation that saves the tensors.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\io_ops.py",
    "ast_data": "FunctionDef name:_save arg:filename arg:tensor_names arg:tensors arg:tensor_slices arg:name arguments arg arg arg arg arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "desc_optional",
    "source_code": "class desc_optional(nodes.Part, nodes.Inline, nodes.FixedTextElement):\n    child_text_separator = ', '\n\n    def astext(self) -> str:\n        return '[' + super().astext() + ']'",
    "docstring": "Node for marking optional parts of the parameter list.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\addnodes.py",
    "ast_data": "ClassDef name:desc_optional Assign FunctionDef name:astext arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "dfs_helper",
    "source_code": "def dfs_helper(partition: Partition, latency_so_far_sec: float) -> float:\n    latency_so_far_sec += partition_to_latency_mapping[partition].overall_latency_sec\n    if partition.children:\n        max_latency_sec = 0.0\n        for child in partition.children:\n            comm_latency_sec = get_comm_latency_between(partition, child, transfer_rate_bytes_per_sec)\n            new_latency_sec = dfs_helper(child, latency_so_far_sec + comm_latency_sec)\n            if new_latency_sec > max_latency_sec:\n                max_latency_sec = new_latency_sec\n        return max_latency_sec\n    return latency_so_far_sec",
    "docstring": "This function helps to recursively get the latency of a path of partitions",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\partitioner_utils.py",
    "ast_data": "FunctionDef name:dfs_helper arg:partition arg:latency_so_far_sec arguments arg arg If Assign For Assign Call Assign Call If Compare Assign Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "render",
    "source_code": "def render(self, context):\n    pass",
    "docstring": "Return the node rendered as a string.",
    "type": "method",
    "file_path": "django\\django\\template\\base.py",
    "ast_data": "FunctionDef name:render arg:self arg:context arguments arg arg"
  },
  {
    "library": "tensorflow",
    "name": "size",
    "source_code": "def size(self, name=None):\n    if name is None:\n        name = '%s_size' % self._name\n    return gen_data_flow_ops.stage_size(name=name, shared_name=self._name, dtypes=self._dtypes, capacity=self._capacity, memory_limit=self._memory_limit)",
    "docstring": "Returns the number of elements in the staging area. Args: name: A name for the operation (optional) Returns: The created op",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:size arg:self arg:name arguments arg arg If Compare Assign Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_repr_html_inner",
    "source_code": "def _repr_html_inner(self):\n    return self._html_repr()",
    "docstring": "This function is returned by the @property to make TrueFalseget_config()[\"display\"]`.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_repr_html\\base.py",
    "ast_data": "FunctionDef name:_repr_html_inner arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, graph, resolver, namespace, scope, closure_types):\n    super(Analyzer, self).__init__(graph)\n    self.resolver = resolver\n    self.namespace = namespace\n    self.scope = scope\n    self.closure_types = closure_types\n    context_types = {n: t for n, t in closure_types.items() if n not in scope.bound}\n    if context_types:\n        self.context_types = _TypeMap()\n        self.context_types.types = context_types\n    else:\n        self.context_types = None",
    "docstring": "Creates a new analyzer. Args: graph: cfg.Graph resolver: Resolver namespace: Dict[str, Any] scope: activity.Scope closure_types: Dict[QN, Set]",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\static_analysis\\type_inference.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:graph arg:resolver arg:namespace arg:scope arg:closure_types arguments arg arg arg arg arg arg Call Call Assign Assign Assign Assign Assign Call Compare If Assign Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "source_lines",
    "source_code": "def source_lines(self, host_name, file_path):\n    offset = self._host_name_file_path_to_offset[host_name, file_path]\n    return list(self._reader.read_source_files_event(offset).source_file.lines)",
    "docstring": "Read the line-by-line content of a source file. Args: host_name: Host name on which the source file is located. file_path: File path at which the source file is located. Returns: Lines of the source file as a of s.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py",
    "ast_data": "FunctionDef name:source_lines arg:self arg:host_name arg:file_path arguments arg arg arg Assign Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "rotate_around",
    "source_code": "def rotate_around(self, x, y, theta):\n    return self.translate(-x, -y).rotate(theta).translate(x, y)",
    "docstring": "Add a rotation (in radians) around the point (x, y) in place. Returns *self*, so this method can easily be chained with more calls to :meth:, :meth:, :meth: and :meth:.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:rotate_around arg:self arg:x arg:y arg:theta arguments arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "scrapy",
    "name": "parallel",
    "source_code": "def parallel(iterable: Iterable[_T], count: int, callable: Callable[Concatenate[_T, _P], _T2], *args: _P.args, **named: _P.kwargs) -> Deferred[list[tuple[bool, Iterator[_T2]]]]:\n    coop = Cooperator()\n    work: Iterator[_T2] = (callable(elem, *args, **named) for elem in iterable)\n    return DeferredList([coop.coiterate(work) for _ in range(count)])",
    "docstring": "Execute a callable over the objects in the given iterable, in parallel, using no more than `` concurrent calls. Taken from:",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\defer.py",
    "ast_data": "FunctionDef name:parallel arg:iterable arg:count arg:callable arguments arg arg arg arg arg Assign Call Call Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "optimize_inner",
    "source_code": "def optimize_inner(self, operations, app_label):\n    new_operations = []\n    for i, operation in enumerate(operations):\n        right = True\n        for j, other in enumerate(operations[i + 1:]):\n            result = operation.reduce(other, app_label)\n            if isinstance(result, list):\n                in_between = operations[i + 1:i + j + 1]\n                if right:\n                    new_operations.extend(in_between)\n                    new_operations.extend(result)\n                elif all((op.reduce(other, app_label) is True for op in in_between)):\n                    new_operations.extend(result)\n                    new_operations.extend(in_between)\n                else:\n                    new_operations.append(operation)\n                    break\n                new_operations.extend(operations[i + j + 2:])\n                return new_operations\n            elif not result:\n                right = False\n        else:\n            new_operations.append(operation)\n    return new_operations",
    "docstring": "Inner optimization loop.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\optimizer.py",
    "ast_data": "FunctionDef name:optimize_inner arg:self arg:operations arg:app_label arguments arg arg arg Assign For Call Assign For Call Assign Call If Call Assign If Call Call If Call Compare Call Call Call Call Call Return return:yes If Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "stddev",
    "source_code": "@property\ndef stddev(self) -> Tensor:\n    return self.variance.sqrt()",
    "docstring": "Returns the standard deviation of the distribution.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:stddev arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "exit_cond_section",
    "source_code": "def exit_cond_section(self, section_id):\n    for split in self.cond_leaves[section_id]:\n        self.leaves |= split\n    del self.cond_entry[section_id]\n    del self.cond_leaves[section_id]",
    "docstring": "Exits a conditional section.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\cfg.py",
    "ast_data": "FunctionDef name:exit_cond_section arg:self arg:section_id arguments arg arg For"
  },
  {
    "library": "scikit-learn",
    "name": "_initialize",
    "source_code": "@abstractmethod\ndef _initialize(self, X, resp):\n    pass",
    "docstring": "Initialize the model parameters of the derived class. Parameters ---------- X : array-like of shape (n_samples, n_features) resp : array-like of shape (n_samples, n_components)",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\mixture\\_base.py",
    "ast_data": "FunctionDef name:_initialize arg:self arg:X arg:resp arguments arg arg arg"
  },
  {
    "library": "pandas",
    "name": "__len__",
    "source_code": "def __len__(self) -> int:\n    return len(self._data)",
    "docstring": "Return the length of the Index.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:__len__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_parse_variant_shapes_and_types",
    "source_code": "def _parse_variant_shapes_and_types(t):\n    shapes_and_types = _variant_handle_data(t)\n    if shapes_and_types is None or not shapes_and_types:\n        raise ValueError('Required handle data not set for {!r}'.format(t))\n    if shapes_and_types[0].type.type_id == full_type_pb2.TFT_ARRAY:\n        return shapes_and_types\n    elif shapes_and_types[0].type.type_id == full_type_pb2.TFT_UNSET:\n        return shapes_and_types\n    else:\n        raise ValueError('Attempted to stack a variant-dtype tensor with no type set ({!r})'.format(t))",
    "docstring": "Extracts shape and dtype information from a variant tensor .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "FunctionDef name:_parse_variant_shapes_and_types arg:t arguments arg Assign Call If BoolOp Compare Raise Call Call If Compare Return return:yes If Compare Return return:yes Raise Call Call"
  },
  {
    "library": "authlib",
    "name": "validate",
    "source_code": "def validate(self, now=None, leeway=0):\n    self._validate_essential_claims()\n    if now is None:\n        now = int(time.time())\n    self.validate_iss()\n    self.validate_sub()\n    self.validate_aud()\n    self.validate_exp(now, leeway)\n    self.validate_nbf(now, leeway)\n    self.validate_iat(now, leeway)\n    self.validate_jti()\n    for key in self.options.keys():\n        if key not in self.REGISTERED_CLAIMS:\n            self._validate_claim_value(key)",
    "docstring": "Validate everything in claims payload.",
    "type": "method",
    "file_path": "authlib\\authlib\\jose\\rfc7519\\claims.py",
    "ast_data": "FunctionDef name:validate arg:self arg:now arg:leeway arguments arg arg arg Call If Compare Assign Call Call Call Call Call Call Call Call Call For Call If Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "DefaultInputIterator",
    "source_code": "class DefaultInputIterator(object):\n\n    def __init__(self, dataset):\n        self._dataset = dataset\n        if eager_context.executing_eagerly():\n            self._iterator = dataset_ops.make_one_shot_iterator(dataset)\n        else:\n            self._iterator = dataset_ops.make_initializable_iterator(dataset)\n\n    def get_next(self):\n        return self._iterator.get_next()\n\n    def get_next_as_optional(self):\n        return self._iterator.get_next_as_optional()\n\n    @deprecated(None, \"Use the iterator's `initializer` property instead.\")\n    def initialize(self):\n        if eager_context.executing_eagerly():\n            self._iterator = self._dataset.make_one_shot_iterator()\n            return []\n        else:\n            return [self._iterator.initializer]\n\n    @property\n    def initializer(self):\n        return self.initialize()",
    "docstring": "Default implementation of for default strategy.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "ClassDef name:DefaultInputIterator FunctionDef name:__init__ arg:self arg:dataset arguments arg arg Assign If Call Assign Call Assign Call FunctionDef name:get_next arg:self arguments arg Return return:yes Call FunctionDef name:get_next_as_optional arg:self arguments arg Return return:yes Call FunctionDef name:initialize arg:self arguments arg If Call Assign Call Return return:no Return return:yes Call FunctionDef name:initializer arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "disable_graph_collection",
    "source_code": "def disable_graph_collection():\n    context().disable_graph_collection()",
    "docstring": "Disables graph collection of executed functions.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:disable_graph_collection arguments Call Call"
  },
  {
    "library": "sphinx",
    "name": "RSTParser",
    "source_code": "class RSTParser(docutils.parsers.rst.Parser, Parser):\n\n    def get_transforms(self) -> list[type[Transform]]:\n        transforms = super().get_transforms()\n        transforms.remove(SmartQuotes)\n        return transforms\n\n    def parse(self, inputstring: str | StringList, document: nodes.document) -> None:\n        self.setup_parse(inputstring, document)\n        self.statemachine = states.RSTStateMachine(state_classes=self.state_classes, initial_state=self.initial_state, debug=document.reporter.debug_flag)\n        if isinstance(inputstring, str):\n            lines = docutils.statemachine.string2lines(inputstring, tab_width=document.settings.tab_width, convert_whitespace=True)\n            inputlines = StringList(lines, document.current_source)\n        else:\n            inputlines = inputstring\n        self.decorate(inputlines)\n        self.statemachine.run(inputlines, document, inliner=self.inliner)\n        self.finish_parse()\n\n    def decorate(self, content: StringList) -> None:\n        prepend_prolog(content, self.config.rst_prolog)\n        append_epilog(content, self.config.rst_epilog)",
    "docstring": "A reST parser for Sphinx.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\parsers.py",
    "ast_data": "ClassDef name:RSTParser FunctionDef name:get_transforms arg:self arguments arg Assign Call Call Call Return return:yes FunctionDef name:parse arg:self arg:inputstring arg:document arguments arg arg arg Call Assign Call If Call Assign Call Assign Call Assign Call Call Call FunctionDef name:decorate arg:self arg:content arguments arg arg Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_feature_names_out",
    "source_code": "def get_feature_names_out(self, input_features=None):\n    check_is_fitted(self, attributes='n_features_in_')\n    input_features = _check_feature_names_in(self, input_features, generate_names=True)\n    est_name = self.__class__.__name__.lower()\n    names_list = [f'{est_name}_{name}_sqrt' for name in input_features]\n    for j in range(1, self.sample_steps):\n        cos_names = [f'{est_name}_{name}_cos{j}' for name in input_features]\n        sin_names = [f'{est_name}_{name}_sin{j}' for name in input_features]\n        names_list.extend(cos_names + sin_names)\n    return np.asarray(names_list, dtype=object)",
    "docstring": "Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Only used to validate feature names with the names seen in :meth:. Returns ------- feature_names_out : ndarray of str objects Transformed feature names.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\kernel_approximation.py",
    "ast_data": "FunctionDef name:get_feature_names_out arg:self arg:input_features arguments arg arg Call Assign Call Assign Call Assign For Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "kstest",
    "source_code": "@_rename_parameter('mode', 'method')\ndef kstest(data1, data2, args=(), alternative='two-sided', method='auto'):\n    return scipy.stats._stats_py.kstest(data1, data2, args, alternative=alternative, method=method)",
    "docstring": "Parameters ---------- data1 : array_like data2 : str, callable or array_like args : tuple, sequence, optional Distribution parameters, used if or are strings. alternative : str, as documented in stats.kstest method : str, as documented in stats.kstest Returns ------- tuple of (K-S statistic, probability)",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mstats_basic.py",
    "ast_data": "FunctionDef name:kstest arg:data1 arg:data2 arg:args arg:alternative arg:method arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_sequence_dense_tensor",
    "source_code": "def get_sequence_dense_tensor(self, transformation_cache, state_manager):\n    if not isinstance(self.categorical_column, SequenceCategoricalColumn):\n        raise ValueError('In embedding_column: {}. categorical_column must be of type SequenceCategoricalColumn to use SequenceFeatures. Suggested fix: Use one of sequence_categorical_column_with_*. Given (type {}): {}'.format(self.name, type(self.categorical_column), self.categorical_column))\n    sparse_tensors = self.categorical_column.get_sparse_tensors(transformation_cache, state_manager)\n    dense_tensor = self._get_dense_tensor_internal(sparse_tensors, state_manager)\n    sequence_length = fc_utils.sequence_length_from_sparse_tensor(sparse_tensors.id_tensor)\n    return SequenceDenseColumn.TensorSequenceLengthPair(dense_tensor=dense_tensor, sequence_length=sequence_length)",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:get_sequence_dense_tensor arg:self arg:transformation_cache arg:state_manager arguments arg arg arg If Call Raise Call Call Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "post_tracker_issue_comment",
    "source_code": "def post_tracker_issue_comment(org: str, project: str, issue_num: int, pr_num: int, cherry_pick_pr: str, classification: str, fixes: str, dry_run: bool=False) -> list[dict[str, Any]]:\n    comment = '\\n'.join(('Link to landed trunk PR (if applicable):', f'* https://github.com/{org}/{project}/pull/{pr_num}', '', 'Link to release branch PR:', f'* {cherry_pick_pr}', '', 'Criteria Category:', ' - '.join((classification.capitalize(), fixes.capitalize()))))\n    return gh_post_pr_comment(org, project, issue_num, comment, dry_run)",
    "docstring": "Post a comment on the tracker issue (if any) to record the cherry pick",
    "type": "function",
    "file_path": "pytorch\\.github\\scripts\\cherry_pick.py",
    "ast_data": "FunctionDef name:post_tracker_issue_comment arg:org arg:project arg:issue_num arg:pr_num arg:cherry_pick_pr arg:classification arg:fixes arg:dry_run arguments arg arg arg arg arg arg arg arg Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "sparse_segment_sum_v2",
    "source_code": "@tf_export('sparse.segment_sum', v1=[])\ndef sparse_segment_sum_v2(data, indices, segment_ids, num_segments=None, name=None, sparse_gradient=False):\n    return sparse_segment_sum(data, indices, segment_ids, name=name, num_segments=num_segments, sparse_gradient=sparse_gradient)",
    "docstring": "Computes the sum along sparse segments of a tensor. Read [the section on segmentation]( for an explanation of segments. Like , but can have rank less than 's first dimension, selecting a subset of dimension 0, specified by . is allowed to have missing ids, in which case the output will be zeros at those indices. In those cases is used to determine the size of the output. For example: Args: data: A with data that will be assembled in the output. indices: A 1-D with indices into . Has same rank as . segment_ids: A 1-D with indices into the output . Values should be sorted and can be repeated. num_segments: An optional int32 scalar. Indicates the size of the output . name: A name for the operation (optional). sparse_gradient: An optional . Defaults to . If , the gradient of this function will be sparse () instead of dense (). The sparse gradient will contain one non-zero row for each unique index in . Returns: A of the shape as data, except for dimension 0 which has size , the number of segments specified via or inferred for the last element in .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:sparse_segment_sum_v2 arg:data arg:indices arg:segment_ids arg:num_segments arg:name arg:sparse_gradient arguments arg arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, disjuncts):\n    self.disjuncts = disjuncts",
    "docstring": ":param disjuncts: Disjunction of constraints",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:disjuncts arguments arg arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "_make_input_fn_iterator",
    "source_code": "def _make_input_fn_iterator(self, input_fn, replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):\n    input_context = self._make_input_context()\n    return input_lib_v1.InputFunctionIterator(input_fn, self._input_workers, [input_context], self._container_strategy())",
    "docstring": "Distributes the input function to each local GPU.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\collective_all_reduce_strategy.py",
    "ast_data": "FunctionDef name:_make_input_fn_iterator arg:self arg:input_fn arg:replication_mode arguments arg arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "pygame",
    "name": "WeakDirtySprite",
    "source_code": "class WeakDirtySprite(WeakSprite, DirtySprite):\n    pass",
    "docstring": "A subclass of WeakSprite and DirtySprite that combines the benefits of both classes.",
    "type": "class",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "ClassDef name:WeakDirtySprite"
  },
  {
    "library": "tensorflow",
    "name": "set_signature_defs",
    "source_code": "def set_signature_defs(tflite_model, signature_def_map):\n    model = tflite_model\n    if not isinstance(tflite_model, bytearray):\n        model = bytearray(tflite_model)\n    serialized_signature_def_map = {k: v.SerializeToString() for k, v in signature_def_map.items()}\n    model_buffer = signature_def_util.SetSignatureDefMap(model, serialized_signature_def_map)\n    return model_buffer",
    "docstring": "Sets SignatureDefs to the Metadata of a TfLite flatbuffer buffer. Args: tflite_model: Binary TFLite model (bytes or bytes-like object) to which to add signature_def. signature_def_map: dict containing SignatureDefs to store in metadata. Returns: buffer: A TFLite model binary identical to model buffer with metadata field containing SignatureDef. Raises: ValueError: tflite_model buffer does not contain a valid TFLite model. signature_def_map is empty or does not contain a SignatureDef.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\tools\\signature\\signature_def_utils.py",
    "ast_data": "FunctionDef name:set_signature_defs arg:tflite_model arg:signature_def_map arguments arg arg Assign If Call Assign Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "snapshot",
    "source_code": "def snapshot() -> dict[str, Any]:\n    return torch._C._mtia_memorySnapshot()",
    "docstring": "Return a dictionary of MTIA memory allocator history",
    "type": "function",
    "file_path": "pytorch\\torch\\mtia\\__init__.py",
    "ast_data": "FunctionDef name:snapshot arguments Return return:yes Call"
  },
  {
    "library": "django",
    "name": "skip_default_on_alter",
    "source_code": "def skip_default_on_alter(self, field):\n    return False",
    "docstring": "Some backends don't accept default values for certain columns types (i.e. MySQL longtext and longblob) in the ALTER COLUMN statement.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\schema.py",
    "ast_data": "FunctionDef name:skip_default_on_alter arg:self arg:field arguments arg arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "get_additional_deps",
    "source_code": "def get_additional_deps(self, file: MypyFile) -> list[tuple[int, str, int]]:\n    fullname = file.fullname\n    if fullname == 'numpy':\n        _override_imports(file, f'{_MODULE}._extended_precision', imports=[(v, v) for v in _EXTENDED_PRECISION_LIST])\n    elif fullname == 'numpy.ctypeslib':\n        _override_imports(file, 'ctypes', imports=[(_C_INTP, '_c_intp')])\n    return [(PRI_MED, fullname, -1)]",
    "docstring": "Handle all import-based overrides. * Import platform-specific extended-precision subclasses (*e.g.* and ). * Import the appropriate equivalent to .",
    "type": "method",
    "file_path": "numpy\\numpy\\typing\\mypy_plugin.py",
    "ast_data": "FunctionDef name:get_additional_deps arg:self arg:file arguments arg arg Assign If Compare Call If Compare Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "set_ylabels",
    "source_code": "def set_ylabels(self, label=None, clear_inner=True, **kwargs):\n    if label is None:\n        label = self._y_var\n    for ax in self._left_axes:\n        ax.set_ylabel(label, **kwargs)\n    if clear_inner:\n        for ax in self._not_left_axes:\n            ax.set_ylabel('')\n    return self",
    "docstring": "Label the y axis on the left column of the grid.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\axisgrid.py",
    "ast_data": "FunctionDef name:set_ylabels arg:self arg:label arg:clear_inner arguments arg arg arg arg If Compare Assign For Call If For Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "as_real",
    "source_code": "def as_real(obj, kind=4):\n    if isinstance(obj, int):\n        return Expr(Op.REAL, (float(obj), kind))\n    if isinstance(obj, float):\n        return Expr(Op.REAL, (obj, kind))\n    if isinstance(obj, Expr):\n        if obj.op is Op.REAL:\n            return obj\n        elif obj.op is Op.INTEGER:\n            return Expr(Op.REAL, (float(obj.data[0]), kind))\n    raise OpError(f'cannot convert {obj} to REAL constant')",
    "docstring": "Return object as REAL constant.",
    "type": "function",
    "file_path": "numpy\\numpy\\f2py\\symbolic.py",
    "ast_data": "FunctionDef name:as_real arg:obj arg:kind arguments arg arg If Call Return return:yes Call Call If Call Return return:yes Call If Call If Compare Return return:yes If Compare Return return:yes Call Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_log_timed_event",
    "source_code": "def _log_timed_event(self, event_name: str, time_ns: int, phase: str, metadata: Optional[dict[str, Any]]=None) -> dict[str, Any]:\n    event = {'name': event_name, 'ts': time_ns / 1000, 'args': metadata, 'ph': phase, 'cat': 'dynamo_timed', 'tid': 0, 'pid': 0}\n    torch._logging.trace_structured('chromium_event', payload_fn=lambda: event, suppress_context=False, expect_trace_id=False)\n    record_chromium_event_internal(event)\n    return event",
    "docstring": "Logs a timed event in chromium format. See log_event_start, log_event_end, etc.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\utils.py",
    "ast_data": "FunctionDef name:_log_timed_event arg:self arg:event_name arg:time_ns arg:phase arg:metadata arguments arg arg arg arg arg Assign Call arguments Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "TrainingState",
    "source_code": "class TrainingState(Enum):\n    IDLE = auto()\n    FORWARD_BACKWARD = auto()\n    SUMMON_FULL_PARAMS = auto()",
    "docstring": "An enum that indicates the state of a ` instance.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_common_utils.py",
    "ast_data": "ClassDef name:TrainingState Assign Call Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "trainable_variables",
    "source_code": "@tf_export(v1=['trainable_variables'])\ndef trainable_variables(scope=None):\n    return ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES, scope)",
    "docstring": "Returns all variables created with . When passed , the constructor automatically adds new variables to the graph collection . This convenience function returns the contents of that collection. @compatibility(TF2) Not compatible with eager execution and . In particular, Graph collections are deprecated in TF2. Instead please create a container for all your model state, including variables. You can then list all the trainable variables in your through the attribute. @end_compatibility Args: scope: (Optional.) A string. If supplied, the resulting list is filtered to include only items whose attribute matches using . Items without a attribute are never returned if a scope is supplied. The choice of means that a without special tokens filters by prefix. Returns: A list of Variable objects.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:trainable_variables arg:scope arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_convert_formal_parameter",
    "source_code": "def _convert_formal_parameter(param: onnx.defs.OpSchema.FormalParameter, type_constraints: Mapping[str, TypeConstraintParam]) -> Parameter:\n    if param.type_str in type_constraints:\n        type_constraint = type_constraints[param.type_str]\n    else:\n        type_constraint = TypeConstraintParam(name=param.name, allowed_types={_get_type_from_str(param.type_str)})\n    return Parameter(name=param.name, type_constraint=type_constraint, required=param.option != onnx.defs.OpSchema.FormalParameterOption.Optional, variadic=param.option == onnx.defs.OpSchema.FormalParameterOption.Variadic)",
    "docstring": "Convert a formal parameter from ONNX Opschema to Parameter.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_schemas.py",
    "ast_data": "FunctionDef name:_convert_formal_parameter arg:param arg:type_constraints arguments arg arg If Compare Assign Assign Call Call Return return:yes Call Compare Compare"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, devices=None, cross_device_ops=None, *, mesh=None):\n    self._validate_init_args(mesh, devices)\n    if not mesh:\n        mesh = self._build_mesh_from_device_list(devices)\n    extended = dtensor_strategy_extended.DTensorStrategyExtended(container_strategy=self, mesh=mesh)\n    super().__init__(extended)\n    self._mesh = mesh\n    self._devices = devices",
    "docstring": "Synchronous training across multiple replicas on one machine. Args: devices: a list of device strings, such as ['/gpu:0', '/gpu:1']. If both and are None, all the available GPU/TPU will be used. If no accelerators are found, CPU is used. cross_device_ops: optional, a descendant of . The value is ignored at the moment, and support will be added later. mesh: optional DTensor mesh for the computation. Note that either or should be provided, and not both. The mesh should be 1D, and will be used to split the input data among that dimension.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\experimental\\mirrored_strategy.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:devices arg:cross_device_ops arguments arg arg arg arg Call If Assign Call Assign Call Call Call Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "get_minor_formatter",
    "source_code": "def get_minor_formatter(self):\n    return self.minor.formatter",
    "docstring": "Get the formatter of the minor ticker.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:get_minor_formatter arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_compute_dx_and_dy",
    "source_code": "def _compute_dx_and_dy(x, y, y_shape):\n    with x.graph.as_default():\n        dy_orig = constant_op.constant(1.0, shape=y_shape, dtype=y.dtype)\n        dy = array_ops.identity(dy_orig)\n    grads = gradients.gradients(y, x, dy)\n    assert len(grads) == 1\n    return (grads[0], dy_orig)",
    "docstring": "Returns a node to compute gradient of y wrt x.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\gradient_checker.py",
    "ast_data": "FunctionDef name:_compute_dx_and_dy arg:x arg:y arg:y_shape arguments arg arg arg With Call Assign Call Assign Call Assign Call Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "import_to_tensorboard",
    "source_code": "def import_to_tensorboard(model_dir, log_dir, tag_set):\n    with session.Session(graph=ops.Graph()) as sess:\n        input_graph_def = saved_model_utils.get_meta_graph_def(model_dir, tag_set).graph_def\n        importer.import_graph_def(input_graph_def)\n        pb_visual_writer = summary.FileWriter(log_dir)\n        pb_visual_writer.add_graph(sess.graph)\n        print('Model Imported. Visualize by running: tensorboard --logdir={}'.format(log_dir))",
    "docstring": "View an SavedModel as a graph in Tensorboard. Args: model_dir: The directory containing the SavedModel to import. log_dir: The location for the Tensorboard log to begin visualization from. tag_set: Group of tag(s) of the MetaGraphDef to load, in string format, separated by ','. For tag-set contains multiple tags, all tags must be passed in. Usage: Call this function with your SavedModel location and desired log directory. Launch Tensorboard by pointing it to the log directory. View your imported SavedModel as a graph.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\import_pb_to_tensorboard.py",
    "ast_data": "FunctionDef name:import_to_tensorboard arg:model_dir arg:log_dir arg:tag_set arguments arg arg arg With Call Call Assign Call Call Assign Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "make_compound_path_from_polys",
    "source_code": "@classmethod\ndef make_compound_path_from_polys(cls, XY):\n    numpolys, numsides, two = XY.shape\n    if two != 2:\n        raise ValueError(\"The third dimension of 'XY' must be 2\")\n    stride = numsides + 1\n    nverts = numpolys * stride\n    verts = np.zeros((nverts, 2))\n    codes = np.full(nverts, cls.LINETO, dtype=cls.code_type)\n    codes[0::stride] = cls.MOVETO\n    codes[numsides::stride] = cls.CLOSEPOLY\n    for i in range(numsides):\n        verts[i::stride] = XY[:, i]\n    return cls(verts, codes)",
    "docstring": "Make a compound object to draw a number of polygons with equal numbers of sides. .. plot:: gallery/misc/histogram_path.py Parameters ---------- XY : (numpolys, numsides, 2) array",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\path.py",
    "ast_data": "FunctionDef name:make_compound_path_from_polys arg:cls arg:XY arguments arg arg Assign If Compare Raise Call Assign Assign Assign Call Assign Call Assign Assign For Call Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "LogsSpecs",
    "source_code": "class LogsSpecs(ABC):\n\n    def __init__(self, log_dir: Optional[str]=None, redirects: Union[Std, dict[int, Std]]=Std.NONE, tee: Union[Std, dict[int, Std]]=Std.NONE, local_ranks_filter: Optional[set[int]]=None) -> None:\n        self._root_log_dir = log_dir\n        self._redirects = redirects\n        self._tee = tee\n        self._local_ranks_filter = local_ranks_filter\n\n    @abstractmethod\n    def reify(self, envs: dict[int, dict[str, str]]) -> LogsDest:\n        pass\n\n    @property\n    @abstractmethod\n    def root_log_dir(self) -> str:\n        pass",
    "docstring": "Defines logs processing and redirection for each worker process. Args: log_dir: Base directory where logs will be written. redirects: Streams to redirect to files. Pass a single `` enum to duplicate streams for all workers, or a mapping keyed by local_rank to selectively duplicate.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\elastic\\multiprocessing\\api.py",
    "ast_data": "ClassDef name:LogsSpecs FunctionDef name:__init__ arg:self arg:log_dir arg:redirects arg:tee arg:local_ranks_filter arguments arg arg arg arg arg Assign Assign Assign Assign FunctionDef name:reify arg:self arg:envs arguments arg arg FunctionDef name:root_log_dir arg:self arguments arg"
  },
  {
    "library": "scikit-learn",
    "name": "_get_weights",
    "source_code": "def _get_weights(dist, weights):\n    if weights in (None, 'uniform'):\n        return None\n    if weights == 'distance':\n        if dist.dtype is np.dtype(object):\n            for point_dist_i, point_dist in enumerate(dist):\n                if hasattr(point_dist, '__contains__') and 0.0 in point_dist:\n                    dist[point_dist_i] = point_dist == 0.0\n                else:\n                    dist[point_dist_i] = 1.0 / point_dist\n        else:\n            with np.errstate(divide='ignore'):\n                dist = 1.0 / dist\n            inf_mask = np.isinf(dist)\n            inf_row = np.any(inf_mask, axis=1)\n            dist[inf_row] = inf_mask[inf_row]\n        return dist\n    if callable(weights):\n        return weights(dist)",
    "docstring": "Get the weights from an array of distances and a parameter ``, then returns None.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\neighbors\\_base.py",
    "ast_data": "FunctionDef name:_get_weights arg:dist arg:weights arguments arg arg If Compare Return return:no If Compare If Compare Call For Call If BoolOp Call Compare Assign Compare Assign With Call Assign Assign Call Assign Call Assign Return return:yes If Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "stft",
    "source_code": "def stft(self, x: np.ndarray, p0: int | None=None, p1: int | None=None, *, k_offset: int=0, padding: PAD_TYPE='zeros', axis: int=-1) -> np.ndarray:\n    return self.stft_detrend(x, None, p0, p1, k_offset=k_offset, padding=padding, axis=axis)",
    "docstring": "Perform the short-time Fourier transform. A two-dimensional matrix with `f_ptsfwinffft_modep_minp_max(n)xxxxxaxisx~ShortTimeFFT.istftnscipy.signal.ShortTimeFFT`: Class this method belongs to.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_short_time_fft.py",
    "ast_data": "FunctionDef name:stft arg:self arg:x arg:p0 arg:p1 arguments arg arg arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_get_modules",
    "source_code": "def _get_modules(self) -> set[nn.Module]:\n    return {pi.module for pi in self.flat_param._param_infos}.union({spi.module for spi in self.flat_param._shared_param_infos})",
    "docstring": "Return a :class: of the modules whose parameters are included in this handle's flat parameter.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py",
    "ast_data": "FunctionDef name:_get_modules arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "list_linear_pointwise_strategy",
    "source_code": "def list_linear_pointwise_strategy(op_schema: OpSchema) -> StrategyType:\n    return list_pointwise_strategy(op_schema, linearity=True)",
    "docstring": "for each list op stratgy that supports linearity",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_ops\\_pointwise_ops.py",
    "ast_data": "FunctionDef name:list_linear_pointwise_strategy arg:op_schema arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "from_float",
    "source_code": "@classmethod\ndef from_float(cls, mod, use_precomputed_fake_quant=False):\n    return _ConvNd.from_float(cls, mod, use_precomputed_fake_quant=use_precomputed_fake_quant)",
    "docstring": "Creates a quantized module from a float module or qparams_dict. Args: mod (Module): a float module, either produced by torch.ao.quantization utilities or provided by the user",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\modules\\conv.py",
    "ast_data": "FunctionDef name:from_float arg:cls arg:mod arg:use_precomputed_fake_quant arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "reset",
    "source_code": "def reset(self, checkpoint_id: Union[str, os.PathLike, None]=None) -> None:\n    self.checkpoint_id = checkpoint_id",
    "docstring": "Implementation of the StorageReader method",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\format_utils.py",
    "ast_data": "FunctionDef name:reset arg:self arg:checkpoint_id arguments arg arg Assign"
  },
  {
    "library": "matplotlib",
    "name": "set_ticks",
    "source_code": "def set_ticks(self, ticks, labels=None, *, minor=False, **kwargs):\n    if labels is None and kwargs:\n        first_key = next(iter(kwargs))\n        raise ValueError(f\"Incorrect use of keyword argument {first_key!r}. Keyword arguments other than 'minor' modify the text labels and can only be used if 'labels' are passed as well.\")\n    result = self._set_tick_locations(ticks, minor=minor)\n    if labels is not None:\n        self.set_ticklabels(labels, minor=minor, **kwargs)\n    return result",
    "docstring": "Set this Axis' tick locations and optionally tick labels. If necessary, the view limits of the Axis are expanded so that all given ticks are visible. Parameters ---------- ticks : 1D array-like Array of tick locations (either floats or in axis units). The axis is replaced by a . Pass an empty list (`.Axis.set_major_formatter.FixedFormatter.Formatter.Text~.Axes.tick_params`. Notes ----- The mandatory expansion of the view limits is an intentional design choice to prevent the surprise of a non-visible tick. If you need other limits, you should set the limits explicitly after setting the ticks.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:set_ticks arg:self arg:ticks arg:labels arguments arg arg arg arg arg If BoolOp Compare Assign Call Call Raise Call Assign Call If Compare Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_get_data_coords",
    "source_code": "def _get_data_coords(self, event):\n    return (event.xdata, event.ydata) if event.inaxes is self.ax else self.ax.transData.inverted().transform((event.x, event.y))",
    "docstring": "Return *event*'s data coordinates in this widget's Axes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:_get_data_coords arg:self arg:event arguments arg arg Return return:yes Compare Call Call"
  },
  {
    "library": "tensorflow",
    "name": "variables",
    "source_code": "@property\ndef variables(self):\n    if save_context.in_save_context():\n        return [self._vars[0]]\n    return self._vars",
    "docstring": "The list of .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_replicated_variable.py",
    "ast_data": "FunctionDef name:variables arg:self arguments arg If Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_tpu_strategy",
    "source_code": "def is_tpu_strategy(strategy):\n    return _is_tpu_strategy_class(strategy.__class__)",
    "docstring": "Returns whether input is a TPUStrategy instance or subclass instance.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:is_tpu_strategy arg:strategy arguments arg Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "normvalue",
    "source_code": "def normvalue(self, value: _RawValueT | Iterable[_RawValueT]) -> list[bytes]:\n    _value: Iterable[_RawValueT]\n    if value is None:\n        _value = []\n    elif isinstance(value, (str, bytes)):\n        _value = [value]\n    elif hasattr(value, '__iter__'):\n        _value = value\n    else:\n        _value = [value]\n    return [self._tobytes(x) for x in _value]",
    "docstring": "Normalize values to bytes",
    "type": "method",
    "file_path": "scrapy\\scrapy\\http\\headers.py",
    "ast_data": "FunctionDef name:normvalue arg:self arg:value arguments arg arg If Compare Assign If Call Assign If Call Assign Assign Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "SpecialFunctionError",
    "source_code": "class SpecialFunctionError(Exception):\n    pass",
    "docstring": "Exception that can be raised by special functions.",
    "type": "class",
    "file_path": "scipy\\scipy\\special\\_sf_error.py",
    "ast_data": "ClassDef name:SpecialFunctionError"
  },
  {
    "library": "scikit-learn",
    "name": "strip_newsgroup_header",
    "source_code": "def strip_newsgroup_header(text):\n    _before, _blankline, after = text.partition('\\n\\n')\n    return after",
    "docstring": "Given text in \"news\" format, strip the headers, by removing everything before the first blank line. Parameters ---------- text : str The text from which to remove the signature block.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\datasets\\_twenty_newsgroups.py",
    "ast_data": "FunctionDef name:strip_newsgroup_header arg:text arguments arg Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "exit_dual_level",
    "source_code": "def exit_dual_level(*, level=None):\n    global _current_level\n    if level is None:\n        level = _current_level\n    if level != _current_level:\n        raise RuntimeError('Trying to exit a forward AD level that was not the last one that was created. This is not supported.')\n    torch._C._exit_dual_level(level=level)\n    _current_level = level - 1",
    "docstring": "Exit a forward grad level. This function deletes all the gradients associated with this level. Only deleting the latest entered level is allowed. This function also updates the current level that is used by default by the other functions in this API.",
    "type": "function",
    "file_path": "pytorch\\torch\\autograd\\forward_ad.py",
    "ast_data": "FunctionDef name:exit_dual_level arguments arg If Compare Assign If Compare Raise Call Call Assign"
  },
  {
    "library": "scipy",
    "name": "canberra",
    "source_code": "def canberra(u, v, w=None):\n    u = _validate_vector(u)\n    v = _validate_vector(v, dtype=np.float64)\n    if w is not None:\n        w = _validate_weights(w)\n    with np.errstate(invalid='ignore'):\n        abs_uv = abs(u - v)\n        abs_u = abs(u)\n        abs_v = abs(v)\n        d = abs_uv / (abs_u + abs_v)\n        if w is not None:\n            d = w * d\n        d = np.nansum(d)\n    return d",
    "docstring": "Compute the Canberra distance between two 1-D arrays. The Canberra distance is defined as .. math:: d(u,v) = \\sum_i \\frac{|u_i-v_i|} {|u_i|+|v_i|}. Parameters ---------- u : (N,) array_like Input array. v : (N,) array_like Input array. w : (N,) array_like, optional The weights for each value in and . Default is None, which gives each value a weight of 1.0 Returns ------- canberra : double The Canberra distance between vectors and . Notes ----- When `` are 0 for given i, then the fraction 0/0 = 0 is used in the calculation. Examples -------- >>> from scipy.spatial import distance >>> distance.canberra([1, 0, 0], [0, 1, 0]) 2.0 >>> distance.canberra([1, 1, 0], [0, 1, 0]) 1.0",
    "type": "function",
    "file_path": "scipy\\scipy\\spatial\\distance.py",
    "ast_data": "FunctionDef name:canberra arg:u arg:v arg:w arguments arg arg arg Assign Call Assign Call If Compare Assign Call With Call Assign Call Assign Call Assign Call Assign If Compare Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_next_as_optional",
    "source_code": "@deprecation.deprecated(None, 'Use `tf.data.Iterator.get_next_as_optional()` instead.')\n@tf_export('data.experimental.get_next_as_optional')\ndef get_next_as_optional(iterator):\n    return iterator.get_next_as_optional()",
    "docstring": "Returns a with the next element of the iterator. If the iterator has reached the end of the sequence, the returned will have no value. Args: iterator: A . Returns: A object which either contains the next element of the iterator (if it exists) or no value.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\iterator_ops.py",
    "ast_data": "FunctionDef name:get_next_as_optional arg:iterator arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "pandas",
    "name": "setitem_inplace",
    "source_code": "def setitem_inplace(self, indexer, value) -> None:\n    if not self._has_no_reference(0):\n        self.blocks = (self._block.copy(),)\n        self._reset_cache()\n    arr = self.array\n    if isinstance(arr, np.ndarray):\n        value = np_can_hold_element(arr.dtype, value)\n    if isinstance(value, np.ndarray) and value.ndim == 1 and (len(value) == 1):\n        value = value[0, ...]\n    arr[indexer] = value",
    "docstring": "Set values with indexer. For SingleBlockManager, this backs s[indexer] = value This is an inplace version of , mutating the manager/values in place, not returning a new Manager (and Block), and thus never changing the dtype.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:setitem_inplace arg:self arg:indexer arg:value arguments arg arg arg If Call Assign Call Call Assign If Call Assign Call If BoolOp Call Compare Compare Call Assign Assign"
  },
  {
    "library": "scipy",
    "name": "read_relational_attribute",
    "source_code": "def read_relational_attribute(ofile, relational_attribute, i):\n    r_end_relational = re.compile('^@[Ee][Nn][Dd]\\\\s*' + relational_attribute.name + '\\\\s*$')\n    while not r_end_relational.match(i):\n        m = r_headerline.match(i)\n        if m:\n            isattr = r_attribute.match(i)\n            if isattr:\n                attr, i = tokenize_attribute(ofile, i)\n                relational_attribute.attributes.append(attr)\n            else:\n                raise ValueError(f'Error parsing line {i}')\n        else:\n            i = next(ofile)\n    i = next(ofile)\n    return i",
    "docstring": "Read the nested attributes of a relational attribute",
    "type": "function",
    "file_path": "scipy\\scipy\\io\\arff\\_arffread.py",
    "ast_data": "FunctionDef name:read_relational_attribute arg:ofile arg:relational_attribute arg:i arguments arg arg arg Assign Call While Call Assign Call If Assign Call If Assign Call Call Raise Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "validate_subgraph_output_types",
    "source_code": "def validate_subgraph_output_types(output: VariableTracker):\n    from . import TensorVariable\n    if (non_tensor_output := find_mismatched_vars(output, TensorVariable, allow_none=True)):\n        for out in non_tensor_output:\n            if isinstance(out, SymNodeVariable) and out.python_type() in (int, bool) or (isinstance(out, ConstantVariable) and out.python_type() in (int, bool)):\n                continue\n            unimplemented(f\"HigherOrderOperator body's output must consist of tensors or ints only but got {out.python_type()}\")",
    "docstring": "Verify that that the output of the subgraph is a tensor, int, bool, SymBool, or SymInt.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\higher_order_ops.py",
    "ast_data": "FunctionDef name:validate_subgraph_output_types arg:output arguments arg If Call For If BoolOp BoolOp Call Compare Call BoolOp Call Compare Call Call Call"
  },
  {
    "library": "scipy",
    "name": "T",
    "source_code": "@property\ndef T(self) -> float:\n    return 1 / self._fs",
    "docstring": "Sampling interval of input signal and of the window. A `n` samples. ShortTimeFFT: Class this property belongs to.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_short_time_fft.py",
    "ast_data": "FunctionDef name:T arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "argmax_v2",
    "source_code": "@tf_export('math.argmax', 'argmax', v1=[])\n@dispatch.add_dispatch_support\ndef argmax_v2(input, axis=None, output_type=dtypes.int64, name=None):\n    if axis is None:\n        axis = 0\n    return gen_math_ops.arg_max(input, axis, name=name, output_type=output_type)",
    "docstring": "Returns the index with the largest value across axes of a tensor. In case of identity returns the smallest index. For example: >>> A = tf.constant([2, 20, 30, 3, 6]) >>> tf.math.argmax(A) # A[2] is maximum in tensor A >>> B = tf.constant([[2, 20, 30, 3, 6], [3, 11, 16, 1, 8], ... [14, 45, 23, 5, 27]]) >>> tf.math.argmax(B, 0) >>> tf.math.argmax(B, 1) >>> C = tf.constant([0, 0, 0, 0]) >>> tf.math.argmax(C) # Returns smallest index in case of ties Args: input: A . axis: An integer, the axis to reduce across. Default to 0. output_type: An optional output dtype ( or ). Defaults to . name: An optional name for the operation. Returns: A of type .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:argmax_v2 arg:input arg:axis arg:output_type arg:name arguments arg arg arg arg If Compare Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "num_replicas_in_sync",
    "source_code": "@property\ndef num_replicas_in_sync(self):\n    return self._num_replicas_in_sync",
    "docstring": "Returns the number of compute replicas in sync.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:num_replicas_in_sync arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_default_locators_and_formatters",
    "source_code": "def set_default_locators_and_formatters(self, axis):\n    raise NotImplementedError()",
    "docstring": "Set the locators and formatters of *axis* to instances suitable for this scale.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\scale.py",
    "ast_data": "FunctionDef name:set_default_locators_and_formatters arg:self arg:axis arguments arg arg Raise Call"
  },
  {
    "library": "kornia",
    "name": "homography_warp3d",
    "source_code": "def homography_warp3d(patch_src: Tensor, src_homo_dst: Tensor, dsize: tuple[int, int, int], mode: str='bilinear', padding_mode: str='zeros', align_corners: bool=False, normalized_coordinates: bool=True) -> Tensor:\n    if not src_homo_dst.device == patch_src.device:\n        raise TypeError(f'Patch and homography must be on the same device. Got patch.device: {patch_src.device} src_H_dst.device: {src_homo_dst.device}.')\n    depth, height, width = dsize\n    grid = create_meshgrid3d(depth, height, width, normalized_coordinates=normalized_coordinates, device=patch_src.device)\n    warped_grid = warp_grid3d(grid, src_homo_dst)\n    return F.grid_sample(patch_src, warped_grid, mode=mode, padding_mode=padding_mode, align_corners=align_corners)",
    "docstring": "Warp image patches or tensors by normalized 3D homographies. Args: patch_src: The image or tensor to warp. Should be from source of shape :math:. src_homo_dst: The homography or stack of homographies from destination to source of shape :math:. dsize: The height and width of the image to warp. mode: interpolation mode to calculate output values ``. align_corners: interpolation flag. normalized_coordinates: Whether the homography assumes [-1, 1] normalized coordinates or not. Return: Patch sampled at locations from source to destination. Example: >>> input = torch.rand(1, 3, 32, 32) >>> homography = torch.eye(3).view(1, 3, 3) >>> output = homography_warp(input, homography, (32, 32))",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\transform\\imgwarp.py",
    "ast_data": "FunctionDef name:homography_warp3d arg:patch_src arg:src_homo_dst arg:dsize arg:mode arg:padding_mode arg:align_corners arg:normalized_coordinates arguments arg arg arg arg arg arg arg If Compare Raise Call Assign Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_sample_visibles",
    "source_code": "def _sample_visibles(self, h, rng):\n    p = np.dot(h, self.components_)\n    p += self.intercept_visible_\n    expit(p, out=p)\n    return rng.uniform(size=p.shape) < p",
    "docstring": "Sample from the distribution P(v|h). Parameters ---------- h : ndarray of shape (n_samples, n_components) Values of the hidden layer to sample from. rng : RandomState instance Random number generator to use. Returns ------- v : ndarray of shape (n_samples, n_features) Values of the visible layer.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neural_network\\_rbm.py",
    "ast_data": "FunctionDef name:_sample_visibles arg:self arg:h arg:rng arguments arg arg arg Assign Call Call Return return:yes Compare Call"
  },
  {
    "library": "matplotlib",
    "name": "_unmultiplied_rgba8888_to_premultiplied_argb32",
    "source_code": "def _unmultiplied_rgba8888_to_premultiplied_argb32(rgba8888):\n    if sys.byteorder == 'little':\n        argb32 = np.take(rgba8888, [2, 1, 0, 3], axis=2)\n        rgb24 = argb32[..., :-1]\n        alpha8 = argb32[..., -1:]\n    else:\n        argb32 = np.take(rgba8888, [3, 0, 1, 2], axis=2)\n        alpha8 = argb32[..., :1]\n        rgb24 = argb32[..., 1:]\n    if alpha8.min() != 255:\n        np.multiply(rgb24, alpha8 / 255, out=rgb24, casting='unsafe')\n    return argb32",
    "docstring": "Convert an unmultiplied RGBA8888 buffer to a premultiplied ARGB32 buffer.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\cbook.py",
    "ast_data": "FunctionDef name:_unmultiplied_rgba8888_to_premultiplied_argb32 arg:rgba8888 arguments arg If Compare Assign Call Assign Assign Assign Call Assign Assign If Compare Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "match_dtype_and_rank",
    "source_code": "def match_dtype_and_rank(y_t, y_p, sw):\n    if y_t.shape.rank == 1 and y_p.shape.rank == 2:\n        y_t = array_ops.expand_dims_v2(y_t, axis=-1)\n    if sw is not None:\n        if sw.shape.rank == 1 and y_p.shape.rank == 2:\n            sw = array_ops.expand_dims_v2(sw, axis=-1)\n    if y_t.dtype.is_floating and y_p.dtype.is_floating or (y_t.dtype.is_integer and y_p.dtype.is_integer):\n        y_t = math_ops.cast(y_t, y_p.dtype)\n    if sw is not None:\n        sw = math_ops.cast(sw, y_p.dtype)\n    return (y_t, y_p, sw)",
    "docstring": "Match dtype and rank of predictions.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\compile_utils.py",
    "ast_data": "FunctionDef name:match_dtype_and_rank arg:y_t arg:y_p arg:sw arguments arg arg arg If BoolOp Compare Compare Assign Call If Compare If BoolOp Compare Compare Assign Call If BoolOp BoolOp BoolOp Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "Alpha",
    "source_code": "class Alpha(IntervalProperty):\n    _default_range = (0.3, 0.95)",
    "docstring": "Opacity of the color values for an arbitrary mark.",
    "type": "class",
    "file_path": "seaborn\\seaborn\\_core\\properties.py",
    "ast_data": "ClassDef name:Alpha Assign"
  },
  {
    "library": "scipy",
    "name": "Mishra08",
    "source_code": "class Mishra08(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n        self.custom_bounds = [(1.0, 2.0), (-4.0, 1.0)]\n        self.global_optimum = [[2.0, -3.0]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        val = abs(x[0] ** 10 - 20 * x[0] ** 9 + 180 * x[0] ** 8 - 960 * x[0] ** 7 + 3360 * x[0] ** 6 - 8064 * x[0] ** 5 + 13340 * x[0] ** 4 - 15360 * x[0] ** 3 + 11520 * x[0] ** 2 - 5120 * x[0] + 2624)\n        val += abs(x[1] ** 4 + 12 * x[1] ** 3 + 54 * x[1] ** 2 + 108 * x[1] + 81)\n        return 0.001 * val ** 2",
    "docstring": "Mishra 8 objective function. This class defines the Mishra 8 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Mishra08}}(x) = 0.001 \\left[\\lvert x_1^{10} - 20x_1^9 + 180x_1^8 - 960 x_1^7 + 3360x_1^6 - 8064x_1^5 + 13340x_1^4 - 15360x_1^3 + 11520x_1^2 - 5120x_1 + 2624 \\rvert \\lvert x_2^4 + 12x_2^3 + 54x_2^2 + 108x_2 + 81 \\rvert \\right]^2 with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Mishra, S. Global Optimization by Differential Evolution and Particle Swarm Methods: Evaluation on Some Benchmark Functions. Munich Personal RePEc Archive, 2006, 1005 TODO Line 1065",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_M.py",
    "ast_data": "ClassDef name:Mishra08 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "max_distance_point",
    "source_code": "def max_distance_point(self, x, p=2.0):\n    return minkowski_distance(0, np.maximum(self.maxes - x, x - self.mins), p)",
    "docstring": "Return the maximum distance between input and points in the hyperrectangle. Parameters ---------- x : array_like Input array. p : float, optional Input.",
    "type": "method",
    "file_path": "scipy\\scipy\\spatial\\_kdtree.py",
    "ast_data": "FunctionDef name:max_distance_point arg:self arg:x arg:p arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "Options",
    "source_code": "class Options(object):\n\n    def __init__(self, bytes_per_pack=0, timeout_seconds=None, implementation=CommunicationImplementation.AUTO):\n        if bytes_per_pack < 0:\n            raise ValueError(f'Argument `bytes_per_pack` must be >=0, Received {bytes_per_pack}.')\n        if isinstance(implementation, str):\n            implementation = CommunicationImplementation(implementation.upper())\n        if not isinstance(implementation, CommunicationImplementation):\n            raise ValueError('Argument `implementation` must be instance of `tf.distribute.experimental.CommunicationImplementation`.')\n        self.bytes_per_pack = bytes_per_pack\n        self.timeout_seconds = timeout_seconds\n        self.implementation = implementation\n    __init__.__doc__ = _OptionsExported.__init__.__doc__\n\n    def merge(self, options):\n        merged = copy.deepcopy(self)\n        if options is None:\n            return merged\n        if options.bytes_per_pack != 0:\n            merged.bytes_per_pack = options.bytes_per_pack\n        if options.timeout_seconds is not None:\n            merged.timeout_seconds = options.timeout_seconds\n        if options.implementation != CommunicationImplementation.AUTO:\n            merged.implementation = options.implementation\n        return merged\n\n    def __str__(self):\n        return f'Options(bytes_per_pack={self.bytes_per_pack},timeout_seconds={self.timeout_seconds}, implementation={self.implementation})'",
    "docstring": "Implementation of OptionsInterface.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\collective_util.py",
    "ast_data": "ClassDef name:Options FunctionDef name:__init__ arg:self arg:bytes_per_pack arg:timeout_seconds arg:implementation arguments arg arg arg arg If Compare Raise Call If Call Assign Call Call If Call Raise Call Assign Assign Assign Assign FunctionDef name:merge arg:self arg:options arguments arg arg Assign Call If Compare Return return:yes If Compare Assign If Compare Assign If Compare Assign Return return:yes FunctionDef name:__str__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_PendingCount",
    "source_code": "def _PendingCount(to_ops: list[ops.Operation], from_ops: list[ops.Operation], colocate_gradients_with_ops, func_graphs, xs_set):\n    reached_ops = set()\n    _MarkReachedOps(from_ops, reached_ops, func_graphs)\n    reachable_to_ops = set((op for op in to_ops if op in reached_ops))\n    between_ops = set()\n    between_op_list = []\n    queue = collections.deque()\n    queue.extend(to_ops)\n    while queue:\n        op = queue.popleft()\n        if op in reached_ops:\n            between_ops.add(op)\n            between_op_list.append(op)\n            reached_ops.remove(op)\n            for inp in _NonEagerInputs(op, xs_set):\n                queue.append(inp.op)\n    loop_state = control_flow_state.MaybeCreateControlFlowState(between_op_list, between_ops, colocate_gradients_with_ops)\n    pending_count = collections.defaultdict(int)\n    for op in between_op_list:\n        for x in _NonEagerInputs(op, xs_set):\n            if x.op in between_ops:\n                pending_count[x.op] += 1\n    return (reachable_to_ops, pending_count, loop_state)",
    "docstring": "Initialize the pending count for ops between two lists of Operations. 'pending_count[op]' indicates the number of backprop inputs to this operation. Args: to_ops: list of Operations. from_ops: list of Operations. colocate_gradients_with_ops: Python bool. See docstring of gradients(). func_graphs: list of FuncGraphs. This method will traverse through these functions if they capture from_ops or any reachable ops. This is useful if to_ops occur in a function and from_ops are in an outer function or graph. xs_set: ObjectIdentitySet of Tensors. Returns: A tuple containing: (1) the subset of to_ops reachable from from_ops by a path of zero or more backpropagatable tensors, (2) a mapping from operation to the number of backprop inputs to that op, and (3) a ControlFlowState object which is not None if the ops between from_ops and to_ops contain control flow loops.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\gradients_util.py",
    "ast_data": "FunctionDef name:_PendingCount arg:to_ops arg:from_ops arg:colocate_gradients_with_ops arg:func_graphs arg:xs_set arguments arg arg arg arg arg Assign Call Call Assign Call Compare Assign Call Assign Assign Call Call While Assign Call If Compare Call Call Call For Call Call Assign Call Assign Call For For Call If Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_v1_names",
    "source_code": "def get_v1_names(symbol: Any) -> Sequence[str]:\n    names_v1 = []\n    tensorflow_api_attr_v1 = API_ATTRS_V1[TENSORFLOW_API_NAME].names\n    keras_api_attr_v1 = API_ATTRS_V1[KERAS_API_NAME].names\n    if not hasattr(symbol, '__dict__'):\n        return names_v1\n    if tensorflow_api_attr_v1 in symbol.__dict__:\n        names_v1.extend(getattr(symbol, tensorflow_api_attr_v1))\n    if keras_api_attr_v1 in symbol.__dict__:\n        names_v1.extend(getattr(symbol, keras_api_attr_v1))\n    return names_v1",
    "docstring": "Get a list of TF 1.* names for this symbol. Args: symbol: symbol to get API names for. Returns: List of all API names for this symbol.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\tf_export.py",
    "ast_data": "FunctionDef name:get_v1_names arg:symbol arguments arg Assign Assign Assign If Call Return return:yes If Compare Call Call If Compare Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "FixedLenSequenceFeature",
    "source_code": "@tf_export('io.FixedLenSequenceFeature', v1=['io.FixedLenSequenceFeature', 'FixedLenSequenceFeature'])\nclass FixedLenSequenceFeature(collections.namedtuple('FixedLenSequenceFeature', ['shape', 'dtype', 'allow_missing', 'default_value'])):\n\n    def __new__(cls, shape, dtype, allow_missing=False, default_value=None):\n        return super(FixedLenSequenceFeature, cls).__new__(cls, shape, dtype, allow_missing, default_value)",
    "docstring": "Configuration for parsing a variable-length input feature into a . The resulting of parsing a single or has a static of and the specified . The resulting of parsing a many s has a static of and the specified . The entries in the from different will be padded with to the maximum length present in the . To treat a sparse input as dense, provide ; otherwise, the parse functions will fail on any examples missing this feature. Fields: shape: Shape of input data for dimension 2 and higher. First dimension is of variable length . dtype: Data type of input. allow_missing: Whether to allow this feature to be missing from a feature list item. Is available only for parsing not for parsing . default_value: Scalar value to be used to pad multiple s to their maximum length. Irrelevant for parsing a single or . Defaults to \"\" for dtype string and 0 otherwise (optional).",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parsing_config.py",
    "ast_data": "ClassDef name:FixedLenSequenceFeature Call FunctionDef name:__new__ arg:cls arg:shape arg:dtype arg:allow_missing arg:default_value arguments arg arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_substitute_impl_function_name_template",
    "source_code": "def _substitute_impl_function_name_template(module: str) -> str:\n    compiled_regex = re.compile('GenerateImplFunctionName\\\\(([\\\\w\\\\s]+)\\\\)')\n    while True:\n        func_match = re.search(compiled_regex, module)\n        if func_match is None:\n            break\n        text = func_match.group(1)\n        function_name = 'internal_{}_fn'.format(_format_snake_case_op_name(text))\n        module = re.sub(compiled_regex, function_name, module, count=1)\n    return module",
    "docstring": "Generates the op-specific implementation function name.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\gen_quantized_function_library.py",
    "ast_data": "FunctionDef name:_substitute_impl_function_name_template arg:module arguments arg Assign Call While Assign Call If Compare Assign Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_deserialization_dependencies",
    "source_code": "def _deserialization_dependencies(self, children):\n    del children\n    return {}",
    "docstring": "Returns a dictionary containing that this object depends on. Dependencies define the order to serialize and deserialize objects in the SavedModel. For example: class A(Trackable): b = B() def _deserialization_dependencies(self, children): return {'b': self.b} class B(Trackable): pass We say that object depends on . Dependencies are guaranteed to be serialized and deserialized before the object depending on them. The following methods use dependencies: - [loading] SavedModel loads with the bottom-up approach, by first creating all objects in the order defined by the dependencies, then connecting the children. Unlike , this function does not define the . It only changes the order in which things are saved/loaded. Therefore, if there are dependencies that are not in the , saving will fail. Args: children: Dict returned from . Returns: A dictionary mapping names to .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\base.py",
    "ast_data": "FunctionDef name:_deserialization_dependencies arg:self arg:children arguments arg arg Return return:no"
  },
  {
    "library": "pandas",
    "name": "sample",
    "source_code": "def sample(obj_len: int, size: int, replace: bool, weights: np.ndarray | None, random_state: np.random.RandomState | np.random.Generator) -> np.ndarray:\n    if weights is not None:\n        weight_sum = weights.sum()\n        if weight_sum != 0:\n            weights = weights / weight_sum\n        else:\n            raise ValueError('Invalid weights: weights sum to zero')\n    return random_state.choice(obj_len, size=size, replace=replace, p=weights).astype(np.intp, copy=False)",
    "docstring": "Randomly sample indices in . Parameters ---------- obj_len : int The length of the indices being considered size : int The number of values to choose replace : bool Allow or disallow sampling of the same row more than once. weights : np.ndarray[np.float64] or None If None, equal probability weighting, otherwise weights according to the vector normalized random_state: np.random.RandomState or np.random.Generator State used for the random sampling Returns ------- np.ndarray[np.intp]",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\sample.py",
    "ast_data": "FunctionDef name:sample arg:obj_len arg:size arg:replace arg:weights arg:random_state arguments arg arg arg arg arg If Compare Assign Call If Compare Assign Raise Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "T",
    "source_code": "def T(self):\n    if self.timezone is None:\n        return ''\n    return str(self.timezone.tzname(self.data))",
    "docstring": "Time zone of this machine; e.g. 'EST' or 'MDT'. If timezone information is not available, return an empty string.",
    "type": "method",
    "file_path": "django\\django\\utils\\dateformat.py",
    "ast_data": "FunctionDef name:T arg:self arguments arg If Compare Return return:yes Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_reset_build_compile_trackers",
    "source_code": "def _reset_build_compile_trackers(model):\n    model.built = False\n    model.inputs = None\n    model.outputs = None\n    model._is_compiled = False\n    if not ops.executing_eagerly_outside_functions():\n        model._v1_compile_was_called = False\n    model.optimizer = None",
    "docstring": "Reset state trackers for model. Note that we do not actually zero out attributes such as optimizer, but instead rely on the expectation that all of the attrs will be over-written on calling build/compile/etc. This is somewhat fragile, insofar as we check elsewhere for the presence of these attributes as evidence of having been built/compiled/etc. Pending a better way to do this, we reset key attributes here to allow building and compiling. Args: model: the model that is being reset",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\models.py",
    "ast_data": "FunctionDef name:_reset_build_compile_trackers arg:model arguments arg Assign Assign Assign Assign If Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_broadcast_inner_dimension_to_uniform",
    "source_code": "def _broadcast_inner_dimension_to_uniform(self, axis, length):\n    dim_size = self.dimension_size(axis)\n    axis_in_inner_dims = axis - self.num_partitioned_dimensions\n    partitioned_sizes = self._partitioned_dim_sizes\n    inner_sizes = array_ops.concat([self._inner_dim_sizes[:axis_in_inner_dims], [array_ops.where(math_ops.equal(dim_size, 1), length, dim_size)], self._inner_dim_sizes[axis_in_inner_dims + 1:]], axis=0)\n    return RaggedTensorDynamicShape(partitioned_sizes, inner_sizes, self.dim_size_dtype)",
    "docstring": "Broadcasts the inner dimension to match .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor_shape.py",
    "ast_data": "FunctionDef name:_broadcast_inner_dimension_to_uniform arg:self arg:axis arg:length arguments arg arg arg Assign Call Assign Assign Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "ReSTRole",
    "source_code": "class ReSTRole(ReSTMarkup):\n\n    def handle_signature(self, sig: str, signode: desc_signature) -> str:\n        desc_name = f':{sig}:'\n        signode['fullname'] = sig.strip()\n        signode += addnodes.desc_name(desc_name, desc_name)\n        return sig\n\n    def get_index_text(self, objectname: str, name: str) -> str:\n        return _('%s (role)') % name",
    "docstring": "Description of a reST role.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\domains\\rst.py",
    "ast_data": "ClassDef name:ReSTRole FunctionDef name:handle_signature arg:self arg:sig arg:signode arguments arg arg arg Assign Assign Call Call Return return:yes FunctionDef name:get_index_text arg:self arg:objectname arg:name arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "shift",
    "source_code": "@final\ndef shift(self, periods: int, fill_value: Any=None) -> list[Block]:\n    new_values = self.values.T.shift(periods=periods, fill_value=fill_value).T\n    return [self.make_block_same_class(new_values)]",
    "docstring": "Shift the block by . Dispatches to underlying ExtensionArray and re-boxes in an ExtensionBlock.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\blocks.py",
    "ast_data": "FunctionDef name:shift arg:self arg:periods arg:fill_value arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "onesided_fft",
    "source_code": "@property\ndef onesided_fft(self) -> bool:\n    return self.fft_mode in {'onesided', 'onesided2X'}",
    "docstring": "Return True if a one-sided FFT is used. Returns `fft_mode` is either 'onesided' or 'onesided2X'. See Also -------- fft_mode: Utilized FFT ('twosided', 'centered', 'onesided' or 'onesided2X') ShortTimeFFT: Class this property belongs to.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_short_time_fft.py",
    "ast_data": "FunctionDef name:onesided_fft arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "_rsqrt_grad_flops",
    "source_code": "@ops.RegisterStatistics('RsqrtGrad', 'flops')\ndef _rsqrt_grad_flops(graph, node):\n    return _binary_per_element_op_flops(graph, node, ops_per_element=4)",
    "docstring": "Compute flops for RsqrtGrad operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py",
    "ast_data": "FunctionDef name:_rsqrt_grad_flops arg:graph arg:node arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "MenuItem",
    "source_code": "class MenuItem:\n\n    def __init__(self, caption, content, enabled=True):\n        self._caption = caption\n        self._content = content\n        self._enabled = enabled\n\n    @property\n    def caption(self):\n        return self._caption\n\n    @property\n    def type(self):\n        return self._node_type\n\n    @property\n    def content(self):\n        return self._content\n\n    def is_enabled(self):\n        return self._enabled\n\n    def disable(self):\n        self._enabled = False\n\n    def enable(self):\n        self._enabled = True",
    "docstring": "A class for an item in a text-based menu.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py",
    "ast_data": "ClassDef name:MenuItem FunctionDef name:__init__ arg:self arg:caption arg:content arg:enabled arguments arg arg arg arg Assign Assign Assign FunctionDef name:caption arg:self arguments arg Return return:yes FunctionDef name:type arg:self arguments arg Return return:yes FunctionDef name:content arg:self arguments arg Return return:yes FunctionDef name:is_enabled arg:self arguments arg Return return:yes FunctionDef name:disable arg:self arguments arg Assign FunctionDef name:enable arg:self arguments arg Assign"
  },
  {
    "library": "matplotlib",
    "name": "find_bezier_t_intersecting_with_closedpath",
    "source_code": "def find_bezier_t_intersecting_with_closedpath(bezier_point_at_t, inside_closedpath, t0=0.0, t1=1.0, tolerance=0.01):\n    start = bezier_point_at_t(t0)\n    end = bezier_point_at_t(t1)\n    start_inside = inside_closedpath(start)\n    end_inside = inside_closedpath(end)\n    if start_inside == end_inside and start != end:\n        raise NonIntersectingPathException('Both points are on the same side of the closed path')\n    while True:\n        if np.hypot(start[0] - end[0], start[1] - end[1]) < tolerance:\n            return (t0, t1)\n        middle_t = 0.5 * (t0 + t1)\n        middle = bezier_point_at_t(middle_t)\n        middle_inside = inside_closedpath(middle)\n        if start_inside ^ middle_inside:\n            t1 = middle_t\n            if end == middle:\n                return (t0, t1)\n            end = middle\n        else:\n            t0 = middle_t\n            if start == middle:\n                return (t0, t1)\n            start = middle\n            start_inside = middle_inside",
    "docstring": "Find the intersection of the Bézier curve with a closed path. The intersection point *t* is approximated by two parameters *t0*, *t1* such that *t0* tuple[float, float] inside_closedpath : callable A function returning True if a given point (x, y) is inside the closed path. It must have the signature:: inside_closedpath(point: tuple[float, float]) -> bool t0, t1 : float Start parameters for the search. tolerance : float Maximal allowed distance between the final points. Returns ------- t0, t1 : float The Bézier path parameters.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\bezier.py",
    "ast_data": "FunctionDef name:find_bezier_t_intersecting_with_closedpath arg:bezier_point_at_t arg:inside_closedpath arg:t0 arg:t1 arg:tolerance arguments arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call If BoolOp Compare Compare Raise Call While If Compare Call Return return:yes Assign Assign Call Assign Call If Assign If Compare Return return:yes Assign Assign If Compare Return return:yes Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_build_shuffle_scatter",
    "source_code": "def _build_shuffle_scatter(reduced_shards, dst_devices):\n    num_devices = len(dst_devices)\n    out_tensors = []\n    for d in range(0, num_devices):\n        with ops.device(dst_devices[d]):\n            out_tensors.append(array_ops.concat(reduced_shards, 0))\n    return out_tensors",
    "docstring": "Build the scatter phase of shuffle all-reduce. Args: reduced_shards: list of fully reduced shards dst_devices: list of names of devices at which the fully-reduced value should be reconstituted. Returns: list of scattered tensors.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\all_reduce.py",
    "ast_data": "FunctionDef name:_build_shuffle_scatter arg:reduced_shards arg:dst_devices arguments arg arg Assign Call Assign For Call With Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "check_cache",
    "source_code": "def check_cache(cache: dict[str, Any], callback: Any=None) -> bool:\n    hit = True\n    for choice in choices:\n        choice_hash = choice.hash_key()\n        if choice_hash in cache.get(op, {}).get(inputs, {}).get(precision, {}):\n            timings[choice] = cache[op][inputs][precision][choice_hash]\n        else:\n            hit = False\n            break\n    if callback:\n        callback(cached=hit)\n    return hit",
    "docstring": "Check if contains data for all the choices",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codecache.py",
    "ast_data": "FunctionDef name:check_cache arg:cache arg:callback arguments arg arg Assign For Assign Call If Compare Call Call Call Assign Assign If Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_wrap",
    "source_code": "def _wrap(f, reverse=False):\n\n    def _f(a, b):\n        if reverse:\n            a, b = (b, a)\n        if getattr(b, '__array_priority__', 0) > np_arrays.ndarray.__array_priority__:\n            return NotImplemented\n        return f(a, b)\n    return _f",
    "docstring": "Wraps binary ops so they can be added as operator overloads on ndarray.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_math_ops.py",
    "ast_data": "FunctionDef name:_wrap arg:f arg:reverse arguments arg arg FunctionDef name:_f arg:a arg:b arguments arg arg If Assign If Compare Call Return return:yes Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "parents",
    "source_code": "@property\ndef parents(self):\n    return [self.source_column]",
    "docstring": "See 'FeatureColumn` base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:parents arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "remove_constraint",
    "source_code": "def remove_constraint(self, model, constraint):\n    sql = constraint.remove_sql(model, self)\n    if sql:\n        self.execute(sql)",
    "docstring": "Remove a constraint from a model.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\schema.py",
    "ast_data": "FunctionDef name:remove_constraint arg:self arg:model arg:constraint arguments arg arg arg Assign Call If Call"
  },
  {
    "library": "numpy",
    "name": "flatten_descr",
    "source_code": "def flatten_descr(ndtype):\n    names = ndtype.names\n    if names is None:\n        return (('', ndtype),)\n    else:\n        descr = []\n        for field in names:\n            typ, _ = ndtype.fields[field]\n            if typ.names is not None:\n                descr.extend(flatten_descr(typ))\n            else:\n                descr.append((field, typ))\n        return tuple(descr)",
    "docstring": "Flatten a structured data-type description. Examples -------- >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> ndtype = np.dtype([('a', '>> rfn.flatten_descr(ndtype) (('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32')))",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\recfunctions.py",
    "ast_data": "FunctionDef name:flatten_descr arg:ndtype arguments arg Assign If Compare Return return:yes Assign For Assign If Compare Call Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "safe_mask",
    "source_code": "@validate_params({'X': ['array-like', 'sparse matrix'], 'mask': ['array-like']}, prefer_skip_nested_validation=True)\ndef safe_mask(X, mask):\n    mask = np.asarray(mask)\n    if np.issubdtype(mask.dtype, np.signedinteger):\n        return mask\n    if hasattr(X, 'toarray'):\n        ind = np.arange(mask.shape[0])\n        mask = ind[mask]\n    return mask",
    "docstring": "Return a mask which is safe to use on X. Parameters ---------- X : {array-like, sparse matrix} Data on which to apply mask. mask : array-like Mask to be used on X. Returns ------- mask : ndarray Array that is safe to use on X. Examples -------- >>> from sklearn.utils import safe_mask >>> from scipy.sparse import csr_matrix >>> data = csr_matrix([[1], [2], [3], [4], [5]]) >>> condition = [False, True, True, False, True] >>> mask = safe_mask(data, condition) >>> data[mask].toarray() array([[2], [3], [5]])",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_mask.py",
    "ast_data": "FunctionDef name:safe_mask arg:X arg:mask arguments arg arg Assign Call If Call Return return:yes If Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "NamedAgg",
    "source_code": "@set_module('pandas')\nclass NamedAgg(NamedTuple):\n    column: Hashable\n    aggfunc: AggScalar",
    "docstring": "Helper for column specific aggregation with control over output column names. Subclass of typing.NamedTuple. Parameters ---------- column : Hashable Column label in the DataFrame to apply aggfunc. aggfunc : function or str Function to apply to the provided column. If string, the name of a built-in pandas function. See Also -------- DataFrame.groupby : Group DataFrame using a mapper or by a Series of columns. Examples -------- >>> df = pd.DataFrame({\"key\": [1, 1, 2], \"a\": [-1, 0, 1], 1: [10, 11, 12]}) >>> agg_a = pd.NamedAgg(column=\"a\", aggfunc=\"min\") >>> agg_1 = pd.NamedAgg(column=1, aggfunc=lambda x: np.mean(x)) >>> df.groupby(\"key\").agg(result_a=agg_a, result_1=agg_1) result_a result_1 key 1 -1 10.5 2 1 12.0",
    "type": "class",
    "file_path": "pandas\\pandas\\core\\groupby\\generic.py",
    "ast_data": "ClassDef name:NamedAgg Call"
  },
  {
    "library": "tensorflow",
    "name": "_is_tensor",
    "source_code": "def _is_tensor(x):\n    return isinstance(x, (tensor_lib.Tensor, variables.Variable))",
    "docstring": "Returns if is a symbolic tensor-like object. Args: x: A python object to check. Returns: if is a or , otherwise .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:_is_tensor arg:x arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "is_constant_jacobian",
    "source_code": "@property\ndef is_constant_jacobian(self):\n    return self._is_constant_jacobian",
    "docstring": "Returns true iff the Jacobian matrix is not a function of x. Note: Jacobian matrix is either constant for both forward and inverse or neither. Returns: is_constant_jacobian: Python .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bijector_impl.py",
    "ast_data": "FunctionDef name:is_constant_jacobian arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "tolist",
    "source_code": "def tolist(self):\n    return list(self)",
    "docstring": "Return a list containing the elements of this storage.",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:tolist arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "conv_flop_count",
    "source_code": "def conv_flop_count(x_shape: list[int], w_shape: list[int], out_shape: list[int], transposed: bool=False) -> int:\n    batch_size = x_shape[0]\n    conv_shape = (x_shape if transposed else out_shape)[2:]\n    c_out, c_in, *filter_size = w_shape\n    \"\\n    General idea here is that for a regular conv, for each point in the output\\n    spatial dimension we convolve the filter with something (hence\\n    `prod(conv_shape) * prod(filter_size)` ops). Then, this gets multiplied by\\n    1. batch_size, 2. the cross product of input and weight channels.\\n\\n    For the transpose, it's not each point in the *output* spatial dimension but\\n    each point in the *input* spatial dimension.\\n    \"\n    flop = prod(conv_shape) * prod(filter_size) * batch_size * c_out * c_in * 2\n    return flop",
    "docstring": "Count flops for convolution. Note only multiplication is counted. Computation for bias are ignored. Flops for a transposed convolution are calculated as flops = (x_shape[2:] * prod(w_shape) * batch_size). Args: x_shape (list(int)): The input shape before convolution. w_shape (list(int)): The filter shape. out_shape (list(int)): The output shape after convolution. transposed (bool): is the convolution transposed Returns: int: the number of flops",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\flop_counter.py",
    "ast_data": "FunctionDef name:conv_flop_count arg:x_shape arg:w_shape arg:out_shape arg:transposed arguments arg arg arg arg Assign Assign Assign Assign Call Call Return return:yes"
  },
  {
    "library": "virtualenv",
    "name": "generate",
    "source_code": "@abstractmethod\ndef generate(self, creator):\n    raise NotImplementedError",
    "docstring": "Generate activate script for the given creator. :param creator: the creator (based of :class:) we used to create this virtual environment",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\activation\\activator.py",
    "ast_data": "FunctionDef name:generate arg:self arg:creator arguments arg arg Raise"
  },
  {
    "library": "sphinx",
    "name": "is_singledispatch_method",
    "source_code": "def is_singledispatch_method(obj: Any) -> TypeIs[singledispatchmethod[Any]]:\n    return isinstance(obj, singledispatchmethod)",
    "docstring": "Check if the object is a :class:.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\inspect.py",
    "ast_data": "FunctionDef name:is_singledispatch_method arg:obj arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "is_in_tf_function",
    "source_code": "def is_in_tf_function():\n    if not ops.executing_eagerly_outside_functions():\n        return False\n    if not ops.inside_function():\n        return False\n    if is_in_keras_graph():\n        return False\n    graph = ops.get_default_graph()\n    if getattr(graph, 'name', False) and graph.name.startswith('wrapped_function'):\n        return False\n    return True",
    "docstring": "Returns if inside of a tf.function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_utils.py",
    "ast_data": "FunctionDef name:is_in_tf_function arguments If Call Return return:yes If Call Return return:yes If Call Return return:yes Assign Call If BoolOp Call Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "watch_variable",
    "source_code": "def watch_variable(tape, variable):\n    variables = _variables_override(variable)\n    for var in variables:\n        pywrap_tfe.TFE_Py_TapeWatchVariable(tape._tape, var)\n        pywrap_tfe.TFE_Py_VariableWatcherVariableAccessed(var)",
    "docstring": "Marks this variable to be watched by the given tape.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\tape.py",
    "ast_data": "FunctionDef name:watch_variable arg:tape arg:variable arguments arg arg Assign Call For Call Call"
  },
  {
    "library": "django",
    "name": "change_list_object_tools_tag",
    "source_code": "@register.tag(name='change_list_object_tools')\ndef change_list_object_tools_tag(parser, token):\n    return InclusionAdminNode(parser, token, func=lambda context: context, template_name='change_list_object_tools.html')",
    "docstring": "Display the row of change list object tools.",
    "type": "function",
    "file_path": "django\\django\\contrib\\admin\\templatetags\\admin_list.py",
    "ast_data": "FunctionDef name:change_list_object_tools_tag arg:parser arg:token arguments arg arg Return return:yes Call arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "Basic",
    "source_code": "class Basic(NoValue):\n    QN = 'Qualified name, as it appeared in the code. See qual_names.py.'\n    SKIP_PROCESSING = 'This node should be preserved as is and not processed any further.'\n    INDENT_BLOCK_REMAINDER = 'When a node is annotated with this, the remainder of the block should be indented below it. The annotation contains a tuple (new_body, name_map), where `new_body` is the new indented block and `name_map` allows renaming symbols.'\n    ORIGIN = 'Information about the source code that converted code originated from. See origin_information.py.'\n    DIRECTIVES = 'User directives associated with a statement or a variable. Typically, they affect the immediately-enclosing statement.'\n    EXTRA_LOOP_TEST = 'A special annotation containing additional test code to be executed in for loops.'",
    "docstring": "Container for basic annotation keys. The enum values are used strictly for documentation purposes.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\anno.py",
    "ast_data": "ClassDef name:Basic Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "django",
    "name": "clean",
    "source_code": "def clean(self):\n    pass",
    "docstring": "Hook for doing any extra model-wide validation after clean() has been called on every field by self.clean_fields. Any ValidationError raised by this method will not be associated with a particular field; it will have a special-case association with the field defined by NON_FIELD_ERRORS.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\base.py",
    "ast_data": "FunctionDef name:clean arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self):\n    self._dispatch_table = collections.OrderedDict()\n    self._dispatch_cache = collections.OrderedDict()",
    "docstring": "Creates a TypeDispatchTable object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\type_dispatch.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg Assign Call Assign Call"
  },
  {
    "library": "pandas",
    "name": "IndexingMixin",
    "source_code": "class IndexingMixin:\n\n    @property\n    def iloc(self) -> _iLocIndexer:\n        return _iLocIndexer('iloc', self)\n\n    @property\n    def loc(self) -> _LocIndexer:\n        return _LocIndexer('loc', self)\n\n    @property\n    def at(self) -> _AtIndexer:\n        return _AtIndexer('at', self)\n\n    @property\n    def iat(self) -> _iAtIndexer:\n        return _iAtIndexer('iat', self)",
    "docstring": "Mixin for adding .loc/.iloc/.at/.iat to Dataframes and Series.",
    "type": "class",
    "file_path": "pandas\\pandas\\core\\indexing.py",
    "ast_data": "ClassDef name:IndexingMixin FunctionDef name:iloc arg:self arguments arg Return return:yes Call FunctionDef name:loc arg:self arguments arg Return return:yes Call FunctionDef name:at arg:self arguments arg Return return:yes Call FunctionDef name:iat arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "reset_min_max_vals",
    "source_code": "@torch.jit.export\ndef reset_min_max_vals(self):\n    self.min_val.copy_(torch.tensor(float('inf')))\n    self.max_val.copy_(torch.tensor(float('-inf')))",
    "docstring": "Resets the min/max values.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\observer.py",
    "ast_data": "FunctionDef name:reset_min_max_vals arg:self arguments arg Call Call Call Call Call Call"
  },
  {
    "library": "django",
    "name": "linebreaksbr",
    "source_code": "@register.filter(is_safe=True, needs_autoescape=True)\n@stringfilter\ndef linebreaksbr(value, autoescape=True):\n    autoescape = autoescape and (not isinstance(value, SafeData))\n    value = normalize_newlines(value)\n    if autoescape:\n        value = escape(value)\n    return mark_safe(value.replace('\\n', '<br>'))",
    "docstring": "Convert all newlines in a piece of plain text to HTML line breaks (````).",
    "type": "function",
    "file_path": "django\\django\\template\\defaultfilters.py",
    "ast_data": "FunctionDef name:linebreaksbr arg:value arg:autoescape arguments arg arg Assign BoolOp Call Assign Call If Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "AutoEscapeControlNode",
    "source_code": "class AutoEscapeControlNode(Node):\n\n    def __init__(self, setting, nodelist):\n        self.setting = setting\n        self.nodelist = nodelist\n\n    def render(self, context):\n        old_setting = context.autoescape\n        context.autoescape = self.setting\n        output = self.nodelist.render(context)\n        context.autoescape = old_setting\n        if self.setting:\n            return mark_safe(output)\n        else:\n            return output",
    "docstring": "Implement the actions of the autoescape tag.",
    "type": "class",
    "file_path": "django\\django\\template\\defaulttags.py",
    "ast_data": "ClassDef name:AutoEscapeControlNode FunctionDef name:__init__ arg:self arg:setting arg:nodelist arguments arg arg arg Assign Assign FunctionDef name:render arg:self arg:context arguments arg arg Assign Assign Assign Call Assign If Return return:yes Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X, y=None):\n    return super()._transform(X, self.dictionary)",
    "docstring": "Encode the data as a sparse combination of the dictionary atoms. Coding method is determined by the object parameter . Parameters ---------- X : ndarray of shape (n_samples, n_features) Training vector, where is the number of samples and is the number of features. y : Ignored Not used, present for API consistency by convention. Returns ------- X_new : ndarray of shape (n_samples, n_components) Transformed data.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_dict_learning.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arg:y arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "PyrUp",
    "source_code": "class PyrUp(Module):\n\n    def __init__(self, border_type: str='reflect', align_corners: bool=False) -> None:\n        super().__init__()\n        self.border_type: str = border_type\n        self.align_corners: bool = align_corners\n\n    def forward(self, input: Tensor) -> Tensor:\n        return pyrup(input, self.border_type, self.align_corners)",
    "docstring": "Upsample a tensor and then blurs it. Args: borde_type: the padding mode to be applied before convolving. The expected modes are: `(B, C, H, W)(B, C, H * 2, W * 2)` Examples: >>> input = torch.rand(1, 2, 4, 4) >>> output = PyrUp()(input) # 1x2x8x8",
    "type": "class",
    "file_path": "kornia\\kornia\\geometry\\transform\\pyramid.py",
    "ast_data": "ClassDef name:PyrUp FunctionDef name:__init__ arg:self arg:border_type arg:align_corners arguments arg arg arg Call Call FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "relim",
    "source_code": "def relim(self, visible_only=False):\n    self.dataLim.ignore(True)\n    self.dataLim.set_points(mtransforms.Bbox.null().get_points())\n    self.ignore_existing_data_limits = True\n    for artist in self._children:\n        if not visible_only or artist.get_visible():\n            if isinstance(artist, mlines.Line2D):\n                self._update_line_limits(artist)\n            elif isinstance(artist, mpatches.Patch):\n                self._update_patch_limits(artist)\n            elif isinstance(artist, mimage.AxesImage):\n                self._update_image_limits(artist)",
    "docstring": "Recompute the data limits based on current artists. At present, instances are not supported. Parameters ---------- visible_only : bool, default: False Whether to exclude invisible artists.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:relim arg:self arg:visible_only arguments arg arg Call Call Call Call Assign For If BoolOp Call If Call Call If Call Call If Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_supported_signatures",
    "source_code": "def _supported_signatures(self):\n    return TT_SUMMARY_SIGNATURES",
    "docstring": "Returns a tuple of supported signatures.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer_flags.py",
    "ast_data": "FunctionDef name:_supported_signatures arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "kml",
    "source_code": "@property\ndef kml(self):\n    inner_kml = ''.join(('<innerBoundaryIs>%s</innerBoundaryIs>' % self[i + 1].kml for i in range(self.num_interior_rings)))\n    return '<Polygon><outerBoundaryIs>%s</outerBoundaryIs>%s</Polygon>' % (self[0].kml, inner_kml)",
    "docstring": "Return the KML representation of this Polygon.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\polygon.py",
    "ast_data": "FunctionDef name:kml arg:self arguments arg Assign Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X):\n    check_is_fitted(self, ['feature_names_', 'vocabulary_'])\n    return self._transform(X, fitting=False)",
    "docstring": "Transform feature->value dicts to array or sparse matrix. Named features not encountered during fit or fit_transform will be silently ignored. Parameters ---------- X : Mapping or iterable over Mappings of shape (n_samples,) Dict(s) or Mapping(s) from feature names (arbitrary Python objects) to feature values (strings or convertible to dtype). Returns ------- Xa : {array, sparse matrix} Feature vectors; always 2-d.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_extraction\\_dict_vectorizer.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_related_models_recursive",
    "source_code": "def get_related_models_recursive(model):\n    seen = set()\n    queue = _get_related_models(model)\n    for rel_mod in queue:\n        rel_app_label, rel_model_name = (rel_mod._meta.app_label, rel_mod._meta.model_name)\n        if (rel_app_label, rel_model_name) in seen:\n            continue\n        seen.add((rel_app_label, rel_model_name))\n        queue.extend(_get_related_models(rel_mod))\n    return seen - {(model._meta.app_label, model._meta.model_name)}",
    "docstring": "Return all models that have a direct or indirect relationship to the given model. Relationships are either defined by explicit relational fields, like ForeignKey, ManyToManyField or OneToOneField, or by inheriting from another model (a superclass is related to its subclasses, but not vice versa). Note, however, that a model inheriting from a concrete model is also related to its superclass through the implicit *_ptr OneToOneField on the subclass.",
    "type": "function",
    "file_path": "django\\django\\db\\migrations\\state.py",
    "ast_data": "FunctionDef name:get_related_models_recursive arg:model arguments arg Assign Call Assign Call For Assign If Compare Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "has_non_trivial_reshard_callback",
    "source_code": "def has_non_trivial_reshard_callback(self) -> bool:\n    return not issubclass(checkpoint_adapter.ReshardCallback, type(self.callback))",
    "docstring": "Determine whether this value has a non-trivial resharding callback.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\restore.py",
    "ast_data": "FunctionDef name:has_non_trivial_reshard_callback arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "forward",
    "source_code": "def forward(self, x: torch.Tensor, output_size: Optional[list[int]]=None) -> torch.Tensor:\n    assert isinstance(self.padding, tuple)\n    output_padding = self._output_padding(input, output_size, self.stride, self.padding, self.kernel_size, self.dilation)\n    weight_quant_dequant = self.get_weight()\n    result = F.conv_transpose1d(x, weight_quant_dequant, self.bias, self.stride, self.padding, output_padding, self.groups, self.dilation)\n    return result",
    "docstring": "we have: w(float) -- quant - dequant x(float) ------------- F.convTranspose1d --- In the full model, we will see w(float) -- quant - *dequant x -- quant --- *dequant -- *F.convTranspose1d --- *quant - dequant and the backend should be able to fuse the ops with into a quantized conv1d",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\reference\\modules\\conv.py",
    "ast_data": "FunctionDef name:forward arg:self arg:x arg:output_size arguments arg arg arg Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pygame",
    "name": "set_cursor",
    "source_code": "def set_cursor(*args):\n    cursor = Cursor(*args)\n    pygame.mouse._set_cursor(**{cursor.type: cursor.data})",
    "docstring": "set_cursor(pygame.cursors.Cursor OR args for a pygame.cursors.Cursor) -> None set the mouse cursor to a new cursor",
    "type": "function",
    "file_path": "pygame\\src_py\\cursors.py",
    "ast_data": "FunctionDef name:set_cursor arguments arg Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_should_unpack",
    "source_code": "def _should_unpack(arg):\n    return type(arg) is tuple",
    "docstring": "Determines whether the caller needs to unpack the argument from a tuple. Args: arg: argument to check Returns: Indication of whether the caller needs to unpack the argument from a tuple.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\structured_function.py",
    "ast_data": "FunctionDef name:_should_unpack arg:arg arguments arg Return return:yes Compare Call"
  },
  {
    "library": "authlib",
    "name": "decode",
    "source_code": "def decode(self, s, key, claims_cls=None, claims_options=None, claims_params=None):\n    if claims_cls is None:\n        claims_cls = JWTClaims\n    if callable(key):\n        load_key = key\n    else:\n        load_key = create_load_key(prepare_raw_key(key))\n    s = to_bytes(s)\n    dot_count = s.count(b'.')\n    if dot_count == 2:\n        data = self._jws.deserialize_compact(s, load_key, decode_payload)\n    elif dot_count == 4:\n        data = self._jwe.deserialize_compact(s, load_key, decode_payload)\n    else:\n        raise DecodeError('Invalid input segments length')\n    return claims_cls(data['payload'], data['header'], options=claims_options, params=claims_params)",
    "docstring": "Decode the JWT with the given key. This is similar with :meth:, except that it will raise BadSignatureError when signature doesn't match. :param s: text of JWT :param key: key used to verify the signature :param claims_cls: class to be used for JWT claims :param claims_options: parameters for claims_cls :param claims_params: parameters for claims_cls :return: claims_cls instance :raise: BadSignatureError",
    "type": "method",
    "file_path": "authlib\\authlib\\jose\\rfc7519\\jwt.py",
    "ast_data": "FunctionDef name:decode arg:self arg:s arg:key arg:claims_cls arg:claims_options arg:claims_params arguments arg arg arg arg arg arg If Compare Assign If Call Assign Assign Call Call Assign Call Assign Call If Compare Assign Call If Compare Assign Call Raise Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_hash_pandas_object",
    "source_code": "def _hash_pandas_object(self, *, encoding: str, hash_key: str, categorize: bool) -> npt.NDArray[np.uint64]:\n    from pandas.core.util.hashing import hash_array\n    values = np.asarray(self.categories._values)\n    hashed = hash_array(values, encoding, hash_key, categorize=False)\n    mask = self.isna()\n    if len(hashed):\n        result = hashed.take(self._codes)\n    else:\n        result = np.zeros(len(mask), dtype='uint64')\n    if mask.any():\n        result[mask] = lib.u8max\n    return result",
    "docstring": "Hash a Categorical by hashing its categories, and then mapping the codes to the hashes. Parameters ---------- encoding : str hash_key : str categorize : bool Ignored for Categorical. Returns ------- np.ndarray[uint64]",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\categorical.py",
    "ast_data": "FunctionDef name:_hash_pandas_object arg:self arguments arg arg arg arg Assign Call Assign Call Assign Call If Call Assign Call Assign Call Call If Call Assign Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "ntob",
    "source_code": "def ntob(n, encoding='ISO-8859-1'):\n    assert_native(n)\n    return n.encode(encoding)",
    "docstring": "Convert a native :class: to a :class: instance. The encoding can be changed to non-ASCII optionally.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\_cpcompat.py",
    "ast_data": "FunctionDef name:ntob arg:n arg:encoding arguments arg arg Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "leggauss",
    "source_code": "def leggauss(deg):\n    ideg = pu._as_int(deg, 'deg')\n    if ideg <= 0:\n        raise ValueError('deg must be a positive integer')\n    c = np.array([0] * deg + [1])\n    m = legcompanion(c)\n    x = la.eigvalsh(m)\n    dy = legval(x, c)\n    df = legval(x, legder(c))\n    x -= dy / df\n    fm = legval(x, c[1:])\n    fm /= np.abs(fm).max()\n    df /= np.abs(df).max()\n    w = 1 / (fm * df)\n    w = (w + w[::-1]) / 2\n    x = (x - x[::-1]) / 2\n    w *= 2.0 / w.sum()\n    return (x, w)",
    "docstring": "Gauss-Legendre quadrature. Computes the sample points and weights for Gauss-Legendre quadrature. These sample points and weights will correctly integrate polynomials of degree :math: or less over the interval :math: with the weight function :math:. Parameters ---------- deg : int Number of sample points and weights. It must be >= 1. Returns ------- x : ndarray 1-D ndarray containing the sample points. y : ndarray 1-D ndarray containing the weights. Notes ----- The results have only been tested up to degree 100, higher degrees may be problematic. The weights are determined by using the fact that .. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k)) where :math: is a constant independent of :math: and :math: is the k'th root of :math:, and then scaling the results to get the right value when integrating 1.",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\legendre.py",
    "ast_data": "FunctionDef name:leggauss arg:deg arguments arg Assign Call If Compare Raise Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Call Call Call Call Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_group",
    "source_code": "def _group(self, value, name=None):\n    value = nest.flatten(self._local_results(value))\n    if len(value) != 1 or name is not None:\n        return control_flow_ops.group(value, name=name)\n    v, = value\n    if hasattr(v, 'op'):\n        v = v.op\n    return v",
    "docstring": "Implementation of .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:_group arg:self arg:value arg:name arguments arg arg arg Assign Call Call If BoolOp Compare Call Compare Return return:yes Call Assign If Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "ragged_to_dense",
    "source_code": "def ragged_to_dense(rt_input, default_value=None, shape=None):\n    return rt_input.to_tensor(default_value=default_value, shape=shape)",
    "docstring": "Create a dense tensor from a ragged tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_conversion_ops.py",
    "ast_data": "FunctionDef name:ragged_to_dense arg:rt_input arg:default_value arg:shape arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_analyze",
    "source_code": "def _analyze(doc, analyzer=None, tokenizer=None, ngrams=None, preprocessor=None, decoder=None, stop_words=None):\n    if decoder is not None:\n        doc = decoder(doc)\n    if analyzer is not None:\n        doc = analyzer(doc)\n    else:\n        if preprocessor is not None:\n            doc = preprocessor(doc)\n        if tokenizer is not None:\n            doc = tokenizer(doc)\n        if ngrams is not None:\n            if stop_words is not None:\n                doc = ngrams(doc, stop_words)\n            else:\n                doc = ngrams(doc)\n    return doc",
    "docstring": "Chain together an optional series of text processing steps to go from a single document to ngrams, with or without tokenizing or preprocessing. If analyzer is used, only the decoder argument is used, as the analyzer is intended to replace the preprocessor, tokenizer, and ngrams steps. Parameters ---------- analyzer: callable, default=None tokenizer: callable, default=None ngrams: callable, default=None preprocessor: callable, default=None decoder: callable, default=None stop_words: list, default=None Returns ------- ngrams: list A sequence of tokens, possibly with pairs, triples, etc.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\feature_extraction\\text.py",
    "ast_data": "FunctionDef name:_analyze arg:doc arg:analyzer arg:tokenizer arg:ngrams arg:preprocessor arg:decoder arg:stop_words arguments arg arg arg arg arg arg arg If Compare Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call If Compare If Compare Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "node_recipients",
    "source_code": "def node_recipients(self, node_name, is_control=False, device_name=None):\n    if not self._debug_graphs:\n        raise LookupError('Node recipients are not loaded from partition graphs yet.')\n    device_name = self._infer_device_name(device_name, node_name)\n    debug_graph = self._debug_graphs[device_name]\n    if is_control:\n        return debug_graph.node_ctrl_recipients[node_name]\n    else:\n        return debug_graph.node_recipients[node_name]",
    "docstring": "Get recipient of the given node's output according to partition graphs. Args: node_name: () name of the node. is_control: () whether control outputs, rather than non-control outputs, are to be returned. device_name: () name of the device. If there is only one device or if node_name exists on only one device, this argument is optional. Returns: ( of ) all inputs to the node, as a list of node names. Raises: LookupError: If node inputs and control inputs have not been loaded from partition graphs yet.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:node_recipients arg:self arg:node_name arg:is_control arg:device_name arguments arg arg arg arg If Raise Call Assign Call Assign If Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "reset_default_graph",
    "source_code": "@tf_export(v1=['reset_default_graph'])\ndef reset_default_graph() -> None:\n    if not _default_graph_stack.is_cleared():\n        raise AssertionError('Do not use tf.reset_default_graph() to clear nested graphs. If you need a cleared graph, exit the nesting and create a new graph.')\n    _default_graph_stack.reset()",
    "docstring": "Clears the default graph stack and resets the global default graph. NOTE: The default graph is a property of the current thread. This function applies only to the current thread. Calling this function while a or is active will result in undefined behavior. Using any previously created or objects after calling this function will result in undefined behavior. @compatibility(TF2) does not work with either eager execution or , and you should not invoke it directly. To migrate code that uses Graph-related functions to TF2, rewrite the code without them. See the [migration guide]( for more description about the behavior and semantic changes between Tensorflow 1 and Tensorflow 2. @end_compatibility Raises: AssertionError: If this function is called within a nested graph.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:reset_default_graph arguments If Call Raise Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_embedding_table_size",
    "source_code": "def get_embedding_table_size(self):\n    return (self.categorical_column._num_buckets, self.dimension)",
    "docstring": "Returns num_ids and width.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\feature_column.py",
    "ast_data": "FunctionDef name:get_embedding_table_size arg:self arguments arg Return return:yes"
  },
  {
    "library": "authlib",
    "name": "query_device_credential",
    "source_code": "def query_device_credential(self, device_code):\n    raise NotImplementedError()",
    "docstring": "Get device credential from previously savings via ``. Developers MUST implement it in subclass:: def query_device_credential(self, device_code): return DeviceCredential.get(device_code) :param device_code: a string represent the code. :return: DeviceCredential instance",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc8628\\device_code.py",
    "ast_data": "FunctionDef name:query_device_credential arg:self arg:device_code arguments arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "merge_changes",
    "source_code": "def merge_changes(self, repo: GitRepo, skip_mandatory_checks: bool=False, comment_id: Optional[int]=None, branch: Optional[str]=None, skip_all_rule_checks: bool=False) -> list['GitHubPR']:\n    branch_to_merge_into = self.default_branch() if branch is None else branch\n    if repo.current_branch() != branch_to_merge_into:\n        repo.checkout(branch_to_merge_into)\n    if not self.is_ghstack_pr():\n        msg = self.gen_commit_message()\n        pr_branch_name = f'__pull-request-{self.pr_num}__init__'\n        repo.fetch(self.last_commit()['oid'], pr_branch_name)\n        repo._run_git('merge', '--squash', pr_branch_name)\n        repo._run_git('commit', f'--author=\"{self.get_author()}\"', '-m', msg)\n        pulled_sha = repo.show_ref(pr_branch_name)\n        latest_pr_status = GitHubPR(self.org, self.project, self.pr_num)\n        if pulled_sha != latest_pr_status.last_commit()['oid']:\n            raise RuntimeError('PR has been updated since CI checks last passed. Please rerun the merge command.')\n        return []\n    else:\n        return self.merge_ghstack_into(repo, skip_mandatory_checks, comment_id=comment_id, skip_all_rule_checks=skip_all_rule_checks)",
    "docstring": ":param skip_all_rule_checks: If true, skips all rule checks, useful for dry-running merge locally",
    "type": "method",
    "file_path": "pytorch\\.github\\scripts\\trymerge.py",
    "ast_data": "FunctionDef name:merge_changes arg:self arg:repo arg:skip_mandatory_checks arg:comment_id arg:branch arg:skip_all_rule_checks arguments arg arg arg arg arg arg Assign Compare Call If Compare Call Call If Call Assign Call Assign Call Call Call Call Call Assign Call Assign Call If Compare Call Raise Call Return return:no Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "render_partial",
    "source_code": "def render_partial(self, node: Node | None) -> dict[str, str]:\n    if node is None:\n        return {'fragment': ''}\n    doc = new_document('<partial node>')\n    doc.append(node)\n    self._publisher.set_source(doc)\n    self._publisher.publish()\n    return self._publisher.writer.parts",
    "docstring": "Utility: Render a lone doctree node.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\html\\__init__.py",
    "ast_data": "FunctionDef name:render_partial arg:self arg:node arguments arg arg If Compare Return return:yes Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_replace_path",
    "source_code": "def _replace_path(self, source_class):\n    replace_dict = {'_base._AxesBase': 'Axes', '_axes.Axes': 'Axes'}\n    for key, value in replace_dict.items():\n        source_class = source_class.replace(key, value)\n    return source_class",
    "docstring": "Changes the full path to the public API path that is used in sphinx. This is needed for links to work.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:_replace_path arg:self arg:source_class arguments arg arg Assign For Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "extract_tensors_from_dataset",
    "source_code": "def extract_tensors_from_dataset(dataset):\n    iterator = get_iterator(dataset)\n    inputs, targets, sample_weight = unpack_iterator_input(iterator)\n    return (inputs, targets, sample_weight)",
    "docstring": "Extract a tuple of tensors from a dataset. Args: dataset: Dataset instance. Returns: Tuple of tensors . and entry may be None.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py",
    "ast_data": "FunctionDef name:extract_tensors_from_dataset arg:dataset arguments arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "_inverse",
    "source_code": "def _inverse(self, values):\n    return np.sqrt(values)",
    "docstring": "Invert areal values back to point diameter.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\properties.py",
    "ast_data": "FunctionDef name:_inverse arg:self arg:values arguments arg arg Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "SphinxDummyWriter",
    "source_code": "class SphinxDummyWriter(UnfilteredWriter):\n    supported = ('html',)\n\n    def translate(self) -> None:\n        pass",
    "docstring": "Dummy writer module used for generating doctree.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\io.py",
    "ast_data": "ClassDef name:SphinxDummyWriter Assign FunctionDef name:translate arg:self arguments arg"
  },
  {
    "library": "kornia",
    "name": "visualize",
    "source_code": "def visualize(self, images: Union[Tensor, list[Tensor]], edge_maps: Optional[Union[Tensor, list[Tensor]]]=None, output_type: str='torch') -> Union[Tensor, list[Tensor], list[Image.Image]]:\n    if edge_maps is None:\n        edge_maps = self.forward(images)\n    output = []\n    for edge_map in edge_maps:\n        output.append(grayscale_to_rgb(edge_map)[0])\n    return self._tensor_to_type(output, output_type, is_batch=isinstance(images, Tensor))",
    "docstring": "Draw the edge detection results. Args: images: input tensor. edge_maps: detected edges. output_type: type of the output. Returns: output tensor.",
    "type": "method",
    "file_path": "kornia\\kornia\\models\\edge_detection\\base.py",
    "ast_data": "FunctionDef name:visualize arg:self arg:images arg:edge_maps arg:output_type arguments arg arg arg arg If Compare Assign Call Assign For Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "non_trainable_variables",
    "source_code": "@property\ndef non_trainable_variables(self):\n    return tuple(self._flatten(predicate=_is_non_trainable_variable, expand_composites=True))",
    "docstring": "Sequence of non-trainable variables owned by this module and its submodules. Note: this method uses reflection to find variables on the current instance and submodules. For performance reasons you may wish to cache the result of calling this method if you don't expect the return value to change. Returns: A sequence of variables for the current module (sorted by attribute name) followed by variables from all submodules recursively (breadth first).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\module\\module.py",
    "ast_data": "FunctionDef name:non_trainable_variables arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_validate_device",
    "source_code": "def _validate_device(query: Tensor, key: Tensor, value: Tensor):\n    if query.device.type != 'cuda' and query.device.type != 'cpu' and (query.device.type != 'hpu'):\n        raise ValueError(f'FlexAttention is only supported on CUDA, CPU or HPU devices. Found input tensors on {query.device.type} device.')",
    "docstring": "TODO: Remove once non cuda/cpu devices support is added We only need to check query since we have already that q,k,v are on the same device",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\attention\\flex_attention.py",
    "ast_data": "FunctionDef name:_validate_device arg:query arg:key arg:value arguments arg arg arg If BoolOp Compare Compare Compare Raise Call"
  },
  {
    "library": "cryptography",
    "name": "_lookup_kformat",
    "source_code": "def _lookup_kformat(key_type: utils.Buffer):\n    if not isinstance(key_type, bytes):\n        key_type = memoryview(key_type).tobytes()\n    if key_type in _KEY_FORMATS:\n        return _KEY_FORMATS[key_type]\n    raise UnsupportedAlgorithm(f'Unsupported key type: {key_type!r}')",
    "docstring": "Return valid format or throw error",
    "type": "function",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\serialization\\ssh.py",
    "ast_data": "FunctionDef name:_lookup_kformat arg:key_type arguments arg If Call Assign Call Call If Compare Return return:yes Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_get_targeting_node",
    "source_code": "def _get_targeting_node(self, prepared_fx_model: GraphModule, target_fqn: str) -> torch.fx.node.Node:\n    for node in prepared_fx_model.graph.nodes:\n        if node.target == target_fqn:\n            return node\n    parent_fqn_sep_index = target_fqn.rfind('.')\n    if parent_fqn_sep_index == -1:\n        raise ValueError(\"passed in target_fqn not found in graph's targets.\")\n    else:\n        return self._get_targeting_node(prepared_fx_model, target_fqn[:parent_fqn_sep_index])",
    "docstring": "Takes in a GraphModule and the target_fqn and finds the node whose target is this fqn. If it's not found, it means it is most likely inside a fused layer We just go one layer up in terms of the fqn we are searching for until we find parent node If we get to empty string, then we know that it doesn't exist The reason for the recursion is that if the model that we are looking for got fused, we will have module fqn as e.g. x.linear.0 but the graph will only have a node for the fused module, which would have fqn as x.linear so they will not match. To handle this, if we don't match, we then take off the last bit of the fqn e.g. x.linear.0 -> x.linear, or more generally foo.bar.baz -> foo.bar and search again, this will allow us to locate the correct module even in cases with fusion Args: prepared_fx_model (GraphModule): The prepared Fx GraphModule target_fqn (str): The fqn of the layer we are trying to target Returns the node object we are trying to add observers around",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\detector.py",
    "ast_data": "FunctionDef name:_get_targeting_node arg:self arg:prepared_fx_model arg:target_fqn arguments arg arg arg For If Compare Return return:yes Assign Call If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "AsyncMetricsContext",
    "source_code": "class AsyncMetricsContext(threading.local):\n\n    def __init__(self):\n        super().__init__()\n        self._in_async_metrics_context = False\n\n    def enter_async_metrics_context(self):\n        self._in_async_metrics_context = True\n\n    def exit_async_metrics_context(self):\n        self._in_async_metrics_context = False\n\n    def in_async_metrics_context(self):\n        return self._in_async_metrics_context",
    "docstring": "A context for controlling metrics recording when async checkpoint is used.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint_context.py",
    "ast_data": "ClassDef name:AsyncMetricsContext FunctionDef name:__init__ arg:self arguments arg Call Call Assign FunctionDef name:enter_async_metrics_context arg:self arguments arg Assign FunctionDef name:exit_async_metrics_context arg:self arguments arg Assign FunctionDef name:in_async_metrics_context arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "spatial_aggregate_name",
    "source_code": "def spatial_aggregate_name(self, agg_name):\n    agg_name = 'unionagg' if agg_name.lower() == 'union' else agg_name.lower()\n    return getattr(self, agg_name)",
    "docstring": "Return the spatial aggregate SQL template and function for the given Aggregate instance.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\spatialite\\operations.py",
    "ast_data": "FunctionDef name:spatial_aggregate_name arg:self arg:agg_name arguments arg arg Assign Compare Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "needs_i8_conversion",
    "source_code": "def needs_i8_conversion(dtype: DtypeObj | None) -> bool:\n    if isinstance(dtype, np.dtype):\n        return dtype.kind in 'mM'\n    return isinstance(dtype, (PeriodDtype, DatetimeTZDtype))",
    "docstring": "Check whether the dtype should be converted to int64. Dtype \"needs\" such a conversion if the dtype is of a datetime-like dtype Parameters ---------- dtype : np.dtype, ExtensionDtype, or None Returns ------- boolean Whether or not the dtype should be converted to int64. Examples -------- >>> needs_i8_conversion(str) False >>> needs_i8_conversion(np.int64) False >>> needs_i8_conversion(np.datetime64) False >>> needs_i8_conversion(np.dtype(np.datetime64)) True >>> needs_i8_conversion(np.array([\"a\", \"b\"])) False >>> needs_i8_conversion(pd.Series([1, 2])) False >>> needs_i8_conversion(pd.Series([], dtype=\"timedelta64[ns]\")) False >>> needs_i8_conversion(pd.DatetimeIndex([1, 2, 3], tz=\"US/Eastern\")) False >>> needs_i8_conversion(pd.DatetimeIndex([1, 2, 3], tz=\"US/Eastern\").dtype) True",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\common.py",
    "ast_data": "FunctionDef name:needs_i8_conversion arg:dtype arguments arg If Call Return return:yes Compare Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_sha256",
    "source_code": "def _sha256(path):\n    sha256hash = hashlib.sha256()\n    chunk_size = 8192\n    with open(path, 'rb') as f:\n        while True:\n            buffer = f.read(chunk_size)\n            if not buffer:\n                break\n            sha256hash.update(buffer)\n    return sha256hash.hexdigest()",
    "docstring": "Calculate the sha256 hash of the file at path.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\datasets\\_base.py",
    "ast_data": "FunctionDef name:_sha256 arg:path arguments arg Assign Call Assign With Call While Assign Call If Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "StateDictConfig",
    "source_code": "@dataclass\nclass StateDictConfig:\n    offload_to_cpu: bool = False",
    "docstring": "``)",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\api.py",
    "ast_data": "ClassDef name:StateDictConfig"
  },
  {
    "library": "django",
    "name": "o",
    "source_code": "def o(self):\n    return self.data.isocalendar().year",
    "docstring": "ISO 8601 year number matching the ISO week number (W)",
    "type": "method",
    "file_path": "django\\django\\utils\\dateformat.py",
    "ast_data": "FunctionDef name:o arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "unique",
    "source_code": "def unique(lst):\n    seen = set()\n    new_lst = []\n    for item in lst:\n        if item in seen:\n            continue\n        seen.add(item)\n        new_lst.append(item)\n    return new_lst",
    "docstring": "Return a list without repeated entries (first occurrence is kept), preserving order.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_generate_pyx.py",
    "ast_data": "FunctionDef name:unique arg:lst arguments arg Assign Call Assign For If Compare Call Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "is_system_TypeVar",
    "source_code": "def is_system_TypeVar(typ: Any) -> bool:\n    modname = getattr(typ, '__module__', '')\n    return modname == 'typing' and isinstance(typ, typing.TypeVar)",
    "docstring": "Check *typ* is system defined TypeVar.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\typing.py",
    "ast_data": "FunctionDef name:is_system_TypeVar arg:typ arguments arg Assign Call Return return:yes BoolOp Compare Call"
  },
  {
    "library": "scikit-learn",
    "name": "_estimate_gaussian_parameters",
    "source_code": "def _estimate_gaussian_parameters(X, resp, reg_covar, covariance_type):\n    nk = resp.sum(axis=0) + 10 * np.finfo(resp.dtype).eps\n    means = np.dot(resp.T, X) / nk[:, np.newaxis]\n    covariances = {'full': _estimate_gaussian_covariances_full, 'tied': _estimate_gaussian_covariances_tied, 'diag': _estimate_gaussian_covariances_diag, 'spherical': _estimate_gaussian_covariances_spherical}[covariance_type](resp, X, nk, means, reg_covar)\n    return (nk, means, covariances)",
    "docstring": "Estimate the Gaussian distribution parameters. Parameters ---------- X : array-like of shape (n_samples, n_features) The input data array. resp : array-like of shape (n_samples, n_components) The responsibilities for each data sample in X. reg_covar : float The regularization added to the diagonal of the covariance matrices. covariance_type : {'full', 'tied', 'diag', 'spherical'} The type of precision matrices. Returns ------- nk : array-like of shape (n_components,) The numbers of data samples in the current components. means : array-like of shape (n_components, n_features) The centers of the current components. covariances : array-like The covariance matrix of the current components. The shape depends of the covariance_type.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\mixture\\_gaussian_mixture.py",
    "ast_data": "FunctionDef name:_estimate_gaussian_parameters arg:X arg:resp arg:reg_covar arg:covariance_type arguments arg arg arg arg Assign Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "visit_Import",
    "source_code": "def visit_Import(self, node: ast.Import) -> None:\n    for name in node.names:\n        if name.name == self._decorator_package:\n            if name.asname:\n                self._current_file_decorators.add(name.asname + '.' + self._decorator_symbol)\n            else:\n                _, module = self._decorator_package.rsplit('.', 1)\n                self._current_file_decorators.add(module + '.' + self._decorator_symbol)\n    self.generic_visit(node)",
    "docstring": "Identifies imports of decorator.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\api\\generator2\\extractor\\extractor.py",
    "ast_data": "FunctionDef name:visit_Import arg:self arg:node arguments arg arg For If Compare If Call Assign Call Call Call"
  },
  {
    "library": "django",
    "name": "postgis_proj_version",
    "source_code": "def postgis_proj_version(self):\n    return self._get_postgis_func('postgis_proj_version')",
    "docstring": "Return the version of the PROJ library used with PostGIS.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\postgis\\operations.py",
    "ast_data": "FunctionDef name:postgis_proj_version arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_soft_mask_state",
    "source_code": "def _soft_mask_state(self, smask):\n    state = self._soft_mask_states.get(smask, None)\n    if state is not None:\n        return state[0]\n    name = next(self._soft_mask_seq)\n    groupOb = self.reserveObject('transparency group for soft mask')\n    self._soft_mask_states[smask] = (name, {'Type': Name('ExtGState'), 'AIS': False, 'SMask': {'Type': Name('Mask'), 'S': Name('Luminosity'), 'BC': [1], 'G': groupOb}})\n    self._soft_mask_groups.append((groupOb, {'Type': Name('XObject'), 'Subtype': Name('Form'), 'FormType': 1, 'Group': {'S': Name('Transparency'), 'CS': Name('DeviceGray')}, 'Matrix': [1, 0, 0, 1, 0, 0], 'Resources': {'Shading': {'S': smask}}, 'BBox': [0, 0, 1, 1]}, [Name('S'), Op.shading]))\n    return name",
    "docstring": "Return an ExtGState that sets the soft mask to the given shading. Parameters ---------- smask : Reference Reference to a shading in DeviceGray color space, whose luminosity is to be used as the alpha channel. Returns ------- Name",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py",
    "ast_data": "FunctionDef name:_soft_mask_state arg:self arg:smask arguments arg arg Assign Call If Compare Return return:yes Assign Call Assign Call Assign Call Call Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "modify_model_io_type",
    "source_code": "def modify_model_io_type(model, inference_input_type=dtypes.float32, inference_output_type=dtypes.float32):\n    if inference_input_type == dtypes.float32 and inference_output_type == dtypes.float32:\n        return model\n    model_object = _convert_model_from_bytearray_to_object(model)\n    _modify_model_input_type(model_object, inference_input_type)\n    _modify_model_output_type(model_object, inference_output_type)\n    _remove_redundant_quantize_ops(model_object)\n    return _convert_model_from_object_to_bytearray(model_object)",
    "docstring": "Modify the input/output type of a tflite model. Args: model: A tflite model. inference_input_type: tf.DType representing modified input type. (default tf.float32. If model input is int8 quantized, it must be in {tf.float32, tf.int8,tf.uint8}, else if model input is int16 quantized, it must be in {tf.float32, tf.int16}, else it must be tf.float32) inference_output_type: tf.DType representing modified output type. (default tf.float32. If model output is int8 dequantized, it must be in {tf.float32, tf.int8,tf.uint8}, else if model output is int16 dequantized, it must be in {tf.float32, tf.int16}, else it must be tf.float32) Returns: A tflite model with modified input/output type. Raises: ValueError: If / is unsupported or a supported integer type is specified for a model whose input/output is not quantized/dequantized. RuntimeError: If the modification was unsuccessful.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\util.py",
    "ast_data": "FunctionDef name:modify_model_io_type arg:model arg:inference_input_type arg:inference_output_type arguments arg arg arg If BoolOp Compare Compare Return return:yes Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "staged_predict_proba",
    "source_code": "def staged_predict_proba(self, X):\n    n_classes = self.n_classes_\n    for decision in self.staged_decision_function(X):\n        yield self._compute_proba_from_decision(decision, n_classes)",
    "docstring": "Predict class probabilities for X. The predicted class probabilities of an input sample is computed as the weighted mean predicted class probabilities of the classifiers in the ensemble. This generator method yields the ensemble predicted class probabilities after each iteration of boosting and therefore allows monitoring, such as to determine the predicted class probabilities on a test set after each boost. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. COO, DOK, and LIL are converted to CSR. Yields ------ p : generator of ndarray of shape (n_samples,) The class probabilities of the input samples. The order of outputs is the same of that of the :term: attribute.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_weight_boosting.py",
    "ast_data": "FunctionDef name:staged_predict_proba arg:self arg:X arguments arg arg Assign For Call Call"
  },
  {
    "library": "tensorflow",
    "name": "name",
    "source_code": "@property\ndef name(self):\n    return self._name",
    "docstring": "The name of this ExponentialMovingAverage object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\moving_averages.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "_define_bin_params",
    "source_code": "def _define_bin_params(self, data, orient, scale_type):\n    vals = data[orient]\n    weights = data.get('weight', None)\n    discrete = self.discrete or scale_type == 'nominal'\n    bin_edges = self._define_bin_edges(vals, weights, self.bins, self.binwidth, self.binrange, discrete)\n    if isinstance(self.bins, (str, int)):\n        n_bins = len(bin_edges) - 1\n        bin_range = (bin_edges.min(), bin_edges.max())\n        bin_kws = dict(bins=n_bins, range=bin_range)\n    else:\n        bin_kws = dict(bins=bin_edges)\n    return bin_kws",
    "docstring": "Given data, return numpy.histogram parameters to define bins.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_stats\\counting.py",
    "ast_data": "FunctionDef name:_define_bin_params arg:self arg:data arg:orient arg:scale_type arguments arg arg arg arg Assign Assign Call Assign BoolOp Compare Assign Call If Call Assign Call Assign Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pygame",
    "name": "make_sound",
    "source_code": "def make_sound(array):\n    return mixer.Sound(array=array)",
    "docstring": "pygame.sndarray.make_sound(array): return Sound Convert an array into a Sound object. Create a new playable Sound object from an array. The mixer module must be initialized and the array format must be similar to the mixer audio format.",
    "type": "function",
    "file_path": "pygame\\src_py\\sndarray.py",
    "ast_data": "FunctionDef name:make_sound arg:array arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "set",
    "source_code": "def set(self, value):\n    pywrap_tfe.TFE_MonitoringBoolGaugeCellSet(self._cell, value)",
    "docstring": "Atomically set the value. Args: value: bool value.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py",
    "ast_data": "FunctionDef name:set arg:self arg:value arguments arg arg Call"
  },
  {
    "library": "sphinx",
    "name": "versionmodified",
    "source_code": "class versionmodified(nodes.Admonition, nodes.TextElement):\n    pass",
    "docstring": "Node for version change entries. Currently used for \"versionadded\", \"versionchanged\", \"deprecated\" and \"versionremoved\" directives.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\addnodes.py",
    "ast_data": "ClassDef name:versionmodified"
  },
  {
    "library": "tensorflow",
    "name": "clip",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef clip(x, min_value, max_value):\n    if isinstance(min_value, (int, float)) and isinstance(max_value, (int, float)):\n        if max_value < min_value:\n            max_value = min_value\n    if min_value is None:\n        min_value = -np.inf\n    if max_value is None:\n        max_value = np.inf\n    return clip_ops.clip_by_value(x, min_value, max_value)",
    "docstring": "Element-wise value clipping. Args: x: Tensor or variable. min_value: Python float, integer, or tensor. max_value: Python float, integer, or tensor. Returns: A tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:clip arg:x arg:min_value arg:max_value arguments arg arg arg If BoolOp Call Call If Compare Assign If Compare Assign If Compare Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "Length",
    "source_code": "class Length(Transform):\n    function = 'LENGTH'\n    lookup_name = 'length'\n    output_field = IntegerField()\n\n    def as_mysql(self, compiler, connection, **extra_context):\n        return super().as_sql(compiler, connection, function='CHAR_LENGTH', **extra_context)",
    "docstring": "Return the number of characters in the expression.",
    "type": "class",
    "file_path": "django\\django\\db\\models\\functions\\text.py",
    "ast_data": "ClassDef name:Length Assign Assign Assign Call FunctionDef name:as_mysql arg:self arg:compiler arg:connection arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "construct_from_string",
    "source_code": "@classmethod\ndef construct_from_string(cls, string) -> Self:\n    if not isinstance(string, str):\n        raise TypeError(f\"'construct_from_string' expects a string, got {type(string)}\")\n    if string == 'string':\n        return cls()\n    elif string == 'str' and using_string_dtype():\n        return cls(na_value=np.nan)\n    elif string == 'string[python]':\n        return cls(storage='python')\n    elif string == 'string[pyarrow]':\n        return cls(storage='pyarrow')\n    elif string == 'string[pyarrow_numpy]':\n        return cls(storage='pyarrow_numpy')\n    else:\n        raise TypeError(f\"Cannot construct a '{cls.__name__}' from '{string}'\")",
    "docstring": "Construct a StringDtype from a string. Parameters ---------- string : str The type of the name. The storage type will be taking from . Valid options and their storage types are ========================== ============================================== string result storage ========================== ============================================== `` pyarrow ========================== ============================================== Returns ------- StringDtype Raise ----- TypeError If the string is not a valid option.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\string_.py",
    "ast_data": "FunctionDef name:construct_from_string arg:cls arg:string arguments arg arg If Call Raise Call Call If Compare Return return:yes Call If BoolOp Compare Call Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "queue_ref",
    "source_code": "@property\ndef queue_ref(self):\n    return self._queue_ref",
    "docstring": "The underlying queue reference.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:queue_ref arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, variable: tf_variables.Variable, shard_id: int, num_shards: int, shard_dim: int, name: str):\n    self._shard_id = shard_id\n    self._variable = variable\n    var_offset = [0] * len(variable.shape)\n    var_offset[shard_dim] = shard_id * variable.shape[shard_dim]\n    fullshape = variable.shape.as_list()\n    fullshape[shard_dim] = num_shards * fullshape[shard_dim]\n    save_slice_info = tf_variables.Variable.SaveSliceInfo(full_name=name, full_shape=fullshape, var_offset=var_offset, var_shape=variable.shape.as_list())\n    spec = saveable_object.SaveSpec(tensor=variable.read_value, slice_spec=save_slice_info.spec, name=name, dtype=variable.dtype, device=variable.device)\n    super().__init__(variable.read_value, [spec], name)",
    "docstring": "Init TPUEmbeddingShardedSaveable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:variable arg:shard_id arg:num_shards arg:shard_dim arg:name arguments arg arg arg arg arg arg Assign Assign Assign Call Assign Assign Call Assign Assign Call Call Assign Call Call Call"
  },
  {
    "library": "kornia",
    "name": "_transform_input3d_by_shape",
    "source_code": "def _transform_input3d_by_shape(input: Tensor, reference_shape: Tensor, match_channel: bool=True) -> Tensor:\n    B = reference_shape[-5] if len(reference_shape) >= 5 else None\n    C = reference_shape[-4] if len(reference_shape) >= 4 else None\n    if len(input.shape) == 3:\n        input = input.unsqueeze(0)\n    if len(input.shape) == 4 and B == input.shape[-4]:\n        input = input.unsqueeze(2)\n    if match_channel and C:\n        if not input.shape[-4] == C:\n            raise ValueError('The C dimension of tensor did not match with the reference tensor.')\n    elif match_channel and C is None:\n        raise ValueError('The reference tensor do not have a C dimension!')\n    return input",
    "docstring": "Reshape an input tensor to have the same dimensions as the reference_shape. Arguments: input: tensor to be transformed reference_shape: shape used as reference match_channel: if True, C_{src} == C_{ref}. otherwise, no constrain. C =1 by default",
    "type": "function",
    "file_path": "kornia\\kornia\\augmentation\\utils\\helpers.py",
    "ast_data": "FunctionDef name:_transform_input3d_by_shape arg:input arg:reference_shape arg:match_channel arguments arg arg arg Assign Compare Call Assign Compare Call If Compare Call Assign Call If BoolOp Compare Call Compare Assign Call If BoolOp If Compare Raise Call If BoolOp Compare Raise Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_update",
    "source_code": "def _update(self, level: int) -> None:\n    sl = level + 1\n    stack = inspect.stack()\n    try:\n        self._get_vars(stack[:sl], scopes=['locals'])\n    finally:\n        del stack[:], stack",
    "docstring": "Update the current scope by going back levels. Parameters ---------- level : int",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\computation\\scope.py",
    "ast_data": "FunctionDef name:_update arg:self arg:level arguments arg arg Assign Assign Call Try Call"
  },
  {
    "library": "cherrypy",
    "name": "stop",
    "source_code": "def stop(self):\n    self.state = states.STOPPING\n    self.log('Bus STOPPING')\n    self.publish('stop')\n    self.state = states.STOPPED\n    self.log('Bus STOPPED')",
    "docstring": "Stop all services.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\wspbus.py",
    "ast_data": "FunctionDef name:stop arg:self arguments arg Assign Call Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_LazyLoader",
    "source_code": "class _LazyLoader(_types.ModuleType):\n\n    def __init__(self, local_name, parent_module_globals, name):\n        self._local_name = local_name\n        self._parent_module_globals = parent_module_globals\n        super(_LazyLoader, self).__init__(name)\n\n    def _load(self):\n        module = _importlib.import_module(self.__name__)\n        self._parent_module_globals[self._local_name] = module\n        self.__dict__.update(module.__dict__)\n        return module\n\n    def __getattr__(self, item):\n        module = self._load()\n        return getattr(module, item)\n\n    def __dir__(self):\n        module = self._load()\n        return dir(module)\n\n    def __reduce__(self):\n        return (__import__, (self.__name__,))",
    "docstring": "Lazily import a module so that we can forward it.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\virtual_root_template_v1.__init__.py",
    "ast_data": "ClassDef name:_LazyLoader FunctionDef name:__init__ arg:self arg:local_name arg:parent_module_globals arg:name arguments arg arg arg arg Assign Assign Call Call FunctionDef name:_load arg:self arguments arg Assign Call Assign Call Return return:yes FunctionDef name:__getattr__ arg:self arg:item arguments arg arg Assign Call Return return:yes Call FunctionDef name:__dir__ arg:self arguments arg Assign Call Return return:yes Call FunctionDef name:__reduce__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "sync_executors",
    "source_code": "def sync_executors(self):\n    if self._context_handle:\n        pywrap_tfe.TFE_ContextSyncExecutors(self._context_handle)\n    else:\n        raise ValueError('Context is not initialized.')",
    "docstring": "Sync both local executors and the ones on remote workers. In async execution mode, local function calls can return before the corresponding remote op/function execution requests are completed. Calling this method creates a synchronization barrier for remote executors. It only returns when all remote pending nodes are finished, potentially with errors if any remote executors are in error state. Raises: ValueError: if context is not initialized.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:sync_executors arg:self arguments arg If Call Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "has_data",
    "source_code": "def has_data(self):\n    return any((isinstance(a, (mcoll.Collection, mimage.AxesImage, mlines.Line2D, mpatches.Patch)) for a in self._children))",
    "docstring": "Return whether any artists have been added to the Axes. This should not be used to determine whether the *dataLim* need to be updated, and may not actually be useful for anything.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:has_data arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "is_flash_attention_available",
    "source_code": "def is_flash_attention_available() -> bool:\n    return torch._C._is_flash_attention_available()",
    "docstring": "Check if PyTorch was built with FlashAttention for scaled_dot_product_attention. Returns: True if FlashAttention is built and available; otherwise, False. Note: This function is dependent on a CUDA-enabled build of PyTorch. It will return False in non-CUDA environments.",
    "type": "function",
    "file_path": "pytorch\\torch\\backends\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:is_flash_attention_available arguments Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "StreamContext",
    "source_code": "class StreamContext:\n    cur_stream: Optional['torch.cuda.Stream']\n\n    def __init__(self, stream: Optional['torch.cuda.Stream']):\n        self.stream = stream\n        self.idx = _get_device_index(None, True)\n        if not torch.jit.is_scripting():\n            if self.idx is None:\n                self.idx = -1\n        self.src_prev_stream = None if not torch.jit.is_scripting() else torch.cuda.default_stream(None)\n        self.dst_prev_stream = None if not torch.jit.is_scripting() else torch.cuda.default_stream(None)\n\n    def __enter__(self):\n        cur_stream = self.stream\n        if cur_stream is None or self.idx == -1:\n            return\n        self.src_prev_stream = torch.cuda.current_stream(None)\n        if self.src_prev_stream.device != cur_stream.device:\n            with device(cur_stream.device):\n                self.dst_prev_stream = torch.cuda.current_stream(cur_stream.device)\n        torch.cuda.set_stream(cur_stream)\n\n    def __exit__(self, type: Any, value: Any, traceback: Any):\n        cur_stream = self.stream\n        if cur_stream is None or self.idx == -1:\n            return\n        if self.src_prev_stream.device != cur_stream.device:\n            torch.cuda.set_stream(self.dst_prev_stream)\n        torch.cuda.set_stream(self.src_prev_stream)",
    "docstring": "Context-manager that selects a given stream. All CUDA kernels queued within its context will be enqueued on a selected stream. Args: Stream (Stream): selected stream. This manager is a no-op if it's ``. .. note:: Streams are per-device.",
    "type": "class",
    "file_path": "pytorch\\torch\\cuda\\__init__.py",
    "ast_data": "ClassDef name:StreamContext FunctionDef name:__init__ arg:self arg:stream arguments arg arg Assign Assign Call If Call If Compare Assign Assign Call Call Assign Call Call FunctionDef name:__enter__ arg:self arguments arg Assign If BoolOp Compare Compare Return return:no Assign Call If Compare With Call Assign Call Call FunctionDef name:__exit__ arg:self arg:type arg:value arg:traceback arguments arg arg arg arg Assign If BoolOp Compare Compare Return return:no If Compare Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_config",
    "source_code": "def get_config(self):\n    from tensorflow.python.feature_column.serialization import serialize_feature_column\n    config = dict(zip(self._fields, self))\n    config['keys'] = tuple([serialize_feature_column(fc) for fc in self.keys])\n    return config",
    "docstring": "See 'FeatureColumn` base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:get_config arg:self arguments arg Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_val_or_rc",
    "source_code": "def _val_or_rc(val, rc_name):\n    return val if val is not None else rcParams[rc_name]",
    "docstring": "If *val* is None, return ``, otherwise return val.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\__init__.py",
    "ast_data": "FunctionDef name:_val_or_rc arg:val arg:rc_name arguments arg arg Return return:yes Compare"
  },
  {
    "library": "django",
    "name": "Greatest",
    "source_code": "class Greatest(Func):\n    function = 'GREATEST'\n\n    def __init__(self, *expressions, **extra):\n        if len(expressions) < 2:\n            raise ValueError('Greatest must take at least two expressions')\n        super().__init__(*expressions, **extra)\n\n    def as_sqlite(self, compiler, connection, **extra_context):\n        return super().as_sqlite(compiler, connection, function='MAX', **extra_context)",
    "docstring": "Return the maximum expression. If any expression is null the return value is database-specific: On PostgreSQL, the maximum not-null expression is returned. On MySQL, Oracle, and SQLite, if any expression is null, null is returned.",
    "type": "class",
    "file_path": "django\\django\\db\\models\\functions\\comparison.py",
    "ast_data": "ClassDef name:Greatest Assign FunctionDef name:__init__ arg:self arguments arg arg arg If Compare Call Raise Call Call Call FunctionDef name:as_sqlite arg:self arg:compiler arg:connection arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "authlib",
    "name": "prepare_form_encoded_body",
    "source_code": "def prepare_form_encoded_body(oauth_params, body):\n    return url_encode(_append_params(oauth_params, body))",
    "docstring": "Prepare the Form-Encoded Body. Per _ of the spec. .. _:",
    "type": "function",
    "file_path": "authlib\\authlib\\oauth1\\rfc5849\\parameters.py",
    "ast_data": "FunctionDef name:prepare_form_encoded_body arg:oauth_params arg:body arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "vector_to_skew_symmetric_matrix",
    "source_code": "def vector_to_skew_symmetric_matrix(vec: Tensor) -> Tensor:\n    if vec.shape[-1] != 3 or len(vec.shape) > 2:\n        raise ValueError(f'Input vector must be of shape (B, 3) or (3,). Got {vec.shape}')\n    v1, v2, v3 = (vec[..., 0], vec[..., 1], vec[..., 2])\n    zeros = zeros_like(v1)\n    skew_symmetric_matrix = stack([stack([zeros, -v3, v2], dim=-1), stack([v3, zeros, -v1], dim=-1), stack([-v2, v1, zeros], dim=-1)], dim=-2)\n    return skew_symmetric_matrix",
    "docstring": "Convert a vector to a skew symmetric matrix. A vector :math: has a corresponding skew-symmetric matrix, which is of the form: .. math:: \\begin{bmatrix} 0 & -v3 & v2 \\\\ v3 & 0 & -v1 \\\\ -v2 & v1 & 0\\end{bmatrix} Args: vec: tensor of shape :math:. Returns: tensor of shape :math:. Example: >>> vec = torch.tensor([1.0, 2.0, 3.0]) >>> vector_to_skew_symmetric_matrix(vec) tensor([[ 0., -3., 2.], [ 3., 0., -1.], [-2., 1., 0.]])",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\conversions.py",
    "ast_data": "FunctionDef name:vector_to_skew_symmetric_matrix arg:vec arguments arg If BoolOp Compare Compare Call Raise Call Assign Assign Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_normalize_shortcut",
    "source_code": "def _normalize_shortcut(self, key):\n    special = {'backspace': 'BackSpace', 'pagedown': 'Page_Down', 'pageup': 'Page_Up', 'scroll_lock': 'Scroll_Lock'}\n    parts = key.split('+')\n    mods = ['<' + mod + '>' for mod in parts[:-1]]\n    key = parts[-1]\n    if key in special:\n        key = special[key]\n    elif len(key) > 1:\n        key = key.capitalize()\n    elif key.isupper():\n        mods += ['<shift>']\n    return ''.join(mods) + key",
    "docstring": "Convert Matplotlib key presses to GTK+ accelerator identifiers. Related to .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_gtk4.py",
    "ast_data": "FunctionDef name:_normalize_shortcut arg:self arg:key arguments arg arg Assign Assign Call Assign Assign If Compare Assign If Compare Call Assign Call If Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_combine_bounds",
    "source_code": "def _combine_bounds(name, user_bounds, shape_domain, integral):\n    user_bounds = np.atleast_1d(user_bounds)\n    if user_bounds[0] > user_bounds[1]:\n        message = f'There are no values for `{name}` on the interval {list(user_bounds)}.'\n        raise ValueError(message)\n    bounds = (max(user_bounds[0], shape_domain[0]), min(user_bounds[1], shape_domain[1]))\n    if integral and np.ceil(bounds[0]) > np.floor(bounds[1]):\n        message = f'There are no integer values for `{name}` on the interval defined by the user-provided bounds and the domain of the distribution.'\n        raise ValueError(message)\n    elif not integral and bounds[0] > bounds[1]:\n        message = f'There are no values for `{name}` on the interval defined by the user-provided bounds and the domain of the distribution.'\n        raise ValueError(message)\n    if not np.all(np.isfinite(bounds)):\n        message = f'The intersection of user-provided bounds for `{name}` and the domain of the distribution is not finite. Please provide finite bounds for shape `{name}` in `bounds`.'\n        raise ValueError(message)\n    return bounds",
    "docstring": "Intersection of user-defined bounds and distribution PDF/PMF domain",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_fit.py",
    "ast_data": "FunctionDef name:_combine_bounds arg:name arg:user_bounds arg:shape_domain arg:integral arguments arg arg arg arg Assign Call If Compare Assign Call Raise Call Assign Call Call If BoolOp Compare Call Call Assign Raise Call If BoolOp Compare Assign Raise Call If Call Call Assign Raise Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "_infer_batch_shape",
    "source_code": "def _infer_batch_shape(input: Union[Tensor, Tuple[Tensor, Tensor]]) -> torch.Size:\n    if isinstance(input, tuple):\n        tensor = _transform_input(input[0])\n    else:\n        tensor = _transform_input(input)\n    return tensor.shape",
    "docstring": "Infer input shape. Input may be either (tensor,) or (tensor, transform_matrix)",
    "type": "function",
    "file_path": "kornia\\kornia\\augmentation\\utils\\helpers.py",
    "ast_data": "FunctionDef name:_infer_batch_shape arg:input arguments arg If Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_get_legend_handles",
    "source_code": "def _get_legend_handles(axs, legend_handler_map=None):\n    handles_original = []\n    for ax in axs:\n        handles_original += [*(a for a in ax._children if isinstance(a, (Line2D, Patch, Collection, Text))), *ax.containers]\n        if hasattr(ax, 'parasites'):\n            for axx in ax.parasites:\n                handles_original += [*(a for a in axx._children if isinstance(a, (Line2D, Patch, Collection, Text))), *axx.containers]\n    handler_map = {**Legend.get_default_handler_map(), **(legend_handler_map or {})}\n    has_handler = Legend.get_legend_handler\n    for handle in handles_original:\n        label = handle.get_label()\n        if label != '_nolegend_' and has_handler(handler_map, handle):\n            yield handle\n        elif label and (not label.startswith('_')) and (not has_handler(handler_map, handle)):\n            _api.warn_external(f'Legend does not support handles for {type(handle).__name__} instances.\\nSee: https://matplotlib.org/stable/tutorials/intermediate/legend_guide.html#implementing-a-custom-legend-handler')\n            continue",
    "docstring": "Yield artists that can be used as handles in a legend.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\legend.py",
    "ast_data": "FunctionDef name:_get_legend_handles arg:axs arg:legend_handler_map arguments arg arg Assign For Call If Call For Call Assign Call BoolOp Assign For Assign Call If BoolOp Compare Call If BoolOp Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "version",
    "source_code": "def version() -> Optional[int]:\n    if not _init():\n        return None\n    return __cusparselt_version",
    "docstring": "Return the version of cuSPARSELt",
    "type": "function",
    "file_path": "pytorch\\torch\\backends\\cusparselt\\__init__.py",
    "ast_data": "FunctionDef name:version arguments If Call Return return:no Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "_set_response",
    "source_code": "def _set_response(body):\n    response = cherrypy.response\n    response.status = '200 OK'\n    response.body = ntob(body, 'utf-8')\n    response.headers['Content-Type'] = 'text/xml'\n    response.headers['Content-Length'] = len(body)",
    "docstring": "Set up HTTP status, headers and body within CherryPy.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\xmlrpcutil.py",
    "ast_data": "FunctionDef name:_set_response arg:body arguments arg Assign Assign Assign Call Assign Assign Call"
  },
  {
    "library": "cherrypy",
    "name": "__next__",
    "source_code": "def __next__(self):\n    chunk = self.input.read(self.chunkSize)\n    if chunk:\n        return chunk\n    else:\n        if hasattr(self.input, 'close'):\n            self.input.close()\n        raise StopIteration()",
    "docstring": "Return next chunk of file.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\__init__.py",
    "ast_data": "FunctionDef name:__next__ arg:self arguments arg Assign Call If Return return:yes If Call Call Raise Call"
  },
  {
    "library": "cherrypy",
    "name": "__iter__",
    "source_code": "def __iter__(self):\n    return self",
    "docstring": "Set up the iterator.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpreqbody.py",
    "ast_data": "FunctionDef name:__iter__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "unique",
    "source_code": "def unique(list_: Iterable[_T], key: Callable[[_T], Any]=lambda x: x) -> list[_T]:\n    seen = set()\n    result: list[_T] = []\n    for item in list_:\n        seenkey = key(item)\n        if seenkey in seen:\n            continue\n        seen.add(seenkey)\n        result.append(item)\n    return result",
    "docstring": "efficient function to uniquify a list preserving item order",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\python.py",
    "ast_data": "FunctionDef name:unique arg:list_ arg:key arguments arg arg arguments arg Assign Call For Assign Call If Compare Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "run_script_path",
    "source_code": "def run_script_path(training_script: str, *training_script_args: str):\n    import runpy\n    import sys\n    sys.argv = [training_script] + [*training_script_args]\n    runpy.run_path(sys.argv[0], run_name='__main__')",
    "docstring": "Run the provided from within this interpreter. Usage:",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\run.py",
    "ast_data": "FunctionDef name:run_script_path arg:training_script arguments arg arg Assign Call"
  },
  {
    "library": "pandas",
    "name": "NoBufferPresent",
    "source_code": "class NoBufferPresent(Exception):\n    pass",
    "docstring": "Exception is raised in _get_data_buffer to signal that there is no requested buffer.",
    "type": "class",
    "file_path": "pandas\\pandas\\errors\\__init__.py",
    "ast_data": "ClassDef name:NoBufferPresent"
  },
  {
    "library": "tensorflow",
    "name": "register_resource",
    "source_code": "def register_resource(handle, create_op, is_initialized_op, is_shared=True):\n    resource = _Resource(handle, create_op, is_initialized_op)\n    if is_shared:\n        ops.add_to_collection(ops.GraphKeys.RESOURCES, resource)\n    else:\n        ops.add_to_collection(ops.GraphKeys.LOCAL_RESOURCES, resource)",
    "docstring": "Registers a resource into the appropriate collections. This makes the resource findable in either the shared or local resources collection. Args: handle: op which returns a handle for the resource. create_op: op which initializes the resource. is_initialized_op: op which returns a scalar boolean tensor of whether the resource has been initialized. is_shared: if True, the resource gets added to the shared resource collection; otherwise it gets added to the local resource collection.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resources.py",
    "ast_data": "FunctionDef name:register_resource arg:handle arg:create_op arg:is_initialized_op arg:is_shared arguments arg arg arg arg Assign Call If Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_native_delegate_pointer",
    "source_code": "def _get_native_delegate_pointer(self):\n    return self._delegate_ptr",
    "docstring": "Returns the native TfLiteDelegate pointer. It is not safe to copy this pointer because it needs to be freed. Returns: TfLiteDelegate *",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\interpreter.py",
    "ast_data": "FunctionDef name:_get_native_delegate_pointer arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "check_keys",
    "source_code": "def check_keys(self, keys: Iterable[str]) -> tuple[list[str], list[str]]:\n    keys = set(keys)\n    valid_keys = {name for name, _ in self.named_tensors(remove_duplicate=False)}\n    missing_keys = valid_keys - keys\n    unexpected_keys = keys - valid_keys\n    return (sorted(missing_keys), sorted(unexpected_keys))",
    "docstring": "Check that the given keys are valid.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\utils\\_named_member_accessor.py",
    "ast_data": "FunctionDef name:check_keys arg:self arg:keys arguments arg arg Assign Call Assign Call Assign Assign Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "check_consistency",
    "source_code": "def check_consistency(self) -> None:\n    pass",
    "docstring": "Do consistency checks (**experimental**).",
    "type": "method",
    "file_path": "sphinx\\sphinx\\domains\\__init__.py",
    "ast_data": "FunctionDef name:check_consistency arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "_recreate",
    "source_code": "def _recreate(self, proto, node_id, nodes):\n    registered_class = registration.get_registered_class(proto.registered_name)\n    if registered_class is None:\n        registered_class = _BUILT_IN_REGISTRATIONS.get(proto.WhichOneof('kind'))\n    dependencies = {}\n    for key, dep_node_id in self._get_node_dependencies(proto).items():\n        dependencies[key] = nodes[dep_node_id]\n    if registered_class:\n        obj = registered_class._deserialize_from_proto(proto=proto.serialized_user_proto, object_proto=proto, dependencies=dependencies, export_dir=self._export_dir, asset_file_def=self._asset_file_def, operation_attributes=self._operation_attributes)\n        if isinstance(obj, base.Trackable):\n            setter = type(obj)._add_trackable_child\n        else:\n            setter = setattr\n        return (obj, setter)\n    else:\n        return self._recreate_default(proto, node_id, dependencies)",
    "docstring": "Creates a Python object from a SavedObject protocol buffer. Args: proto: a SavedObject proto node_id: int, the index of this object in the SavedObjectGraph node list. nodes: dict mapping int node_ids -> created objects. Returns: The recreated object, and the set-attribute function for reconnecting the trackable children.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\load.py",
    "ast_data": "FunctionDef name:_recreate arg:self arg:proto arg:node_id arg:nodes arguments arg arg arg arg Assign Call If Compare Assign Call Call Assign For Call Call Assign If Assign Call If Call Assign Call Assign Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_grouped_variables",
    "source_code": "def _get_grouped_variables(vars_to_warm_start):\n    if isinstance(vars_to_warm_start, str) or vars_to_warm_start is None:\n        logging.info('Warm-starting variables only in TRAINABLE_VARIABLES.')\n        list_of_vars = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES, scope=vars_to_warm_start)\n    elif isinstance(vars_to_warm_start, list):\n        if all((isinstance(v, str) for v in vars_to_warm_start)):\n            list_of_vars = []\n            for v in vars_to_warm_start:\n                list_of_vars += ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, scope=v)\n        elif all((checkpoint_utils._is_variable(v) for v in vars_to_warm_start)):\n            list_of_vars = vars_to_warm_start\n        else:\n            raise ValueError('If `vars_to_warm_start` is a list, it must be all `Variable` or all `str`.  Given types are {}'.format([type(v) for v in vars_to_warm_start]))\n    else:\n        raise ValueError('`vars_to_warm_start must be a `list` or `str`.  Given type is {}'.format(type(vars_to_warm_start)))\n    grouped_variables = {}\n    for v in list_of_vars:\n        t = [v] if not isinstance(v, list) else v\n        var_name = _infer_var_name(t)\n        grouped_variables.setdefault(var_name, []).append(v)\n    return grouped_variables",
    "docstring": "Collects and groups (possibly partitioned) variables into a dictionary. The variables can be provided explicitly through vars_to_warm_start, or they are retrieved from collections (see below). Args: vars_to_warm_start: One of the following: - A regular expression (string) that captures which variables to warm-start (see tf.compat.v1.get_collection). This expression will only consider variables in the TRAINABLE_VARIABLES collection. - A list of strings, each representing a full variable name to warm-start. These will consider variables in GLOBAL_VARIABLES collection. - A list of Variables to warm-start. - , in which case all variables in TRAINABLE_VARIABLES will be used. Returns: A dictionary mapping variable names (strings) to lists of Variables. Raises: ValueError: If vars_to_warm_start is not a string, , a list of , or a list of strings.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\warm_starting_util.py",
    "ast_data": "FunctionDef name:_get_grouped_variables arg:vars_to_warm_start arguments arg If BoolOp Call Compare Call Assign Call If Call If Call Call Assign For Call If Call Call Assign Raise Call Call Call Raise Call Call Call Assign For Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "IsLoopMerge",
    "source_code": "def IsLoopMerge(op):\n    if IsMerge(op):\n        ctxt = op._get_control_flow_context()\n        return ctxt is not None and ctxt.IsWhileContext() and (not IsCondMerge(op))\n    return False",
    "docstring": "Return true if is the Merge for a while loop.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_util.py",
    "ast_data": "FunctionDef name:IsLoopMerge arg:op arguments arg If Call Assign Call Return return:yes BoolOp Compare Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "PythonState",
    "source_code": "@tf_export('train.experimental.PythonState')\nclass PythonState(base.Trackable, metaclass=abc.ABCMeta):\n\n    @abc.abstractmethod\n    def serialize(self):\n        pass\n\n    @abc.abstractmethod\n    def deserialize(self, string_value):\n        pass\n\n    def _serialize_to_tensors(self):\n        with ops.init_scope():\n            value = constant_op.constant(self.serialize(), dtype=dtypes.string)\n        return {PYTHON_STATE: value}",
    "docstring": "A mixin for putting Python state in an object-based checkpoint. This is an abstract class which allows extensions to TensorFlow's object-based checkpointing (see ). For example a wrapper for NumPy arrays: Instances of are checkpointable objects, and will be saved and restored from checkpoints along with TensorFlow state like variables.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\python_state.py",
    "ast_data": "ClassDef name:PythonState FunctionDef name:serialize arg:self arguments arg FunctionDef name:deserialize arg:self arg:string_value arguments arg arg FunctionDef name:_serialize_to_tensors arg:self arguments arg With Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "step",
    "source_code": "@_use_grad_for_differentiable\ndef step(self, closure=None):\n    self._cuda_graph_capture_health_check()\n    loss = None\n    if closure is not None:\n        with torch.enable_grad():\n            loss = closure()\n    for group in self.param_groups:\n        params_with_grad: list[Tensor] = []\n        grads: list[Tensor] = []\n        exp_avgs: list[Tensor] = []\n        exp_avg_sqs: list[Tensor] = []\n        max_exp_avg_sqs: list[Tensor] = []\n        state_steps: list[Tensor] = []\n        beta1, beta2 = group['betas']\n        has_complex = self._init_group(group, params_with_grad, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps)\n        adam(params_with_grad, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, amsgrad=group['amsgrad'], has_complex=has_complex, beta1=beta1, beta2=beta2, lr=group['lr'], weight_decay=group['weight_decay'], eps=group['eps'], maximize=group['maximize'], foreach=group['foreach'], capturable=group['capturable'], differentiable=group['differentiable'], fused=group['fused'], grad_scale=getattr(self, 'grad_scale', None), found_inf=getattr(self, 'found_inf', None), decoupled_weight_decay=group['decoupled_weight_decay'])\n    return loss",
    "docstring": "Perform a single optimization step. Args: closure (Callable, optional): A closure that reevaluates the model and returns the loss.",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\adam.py",
    "ast_data": "FunctionDef name:step arg:self arg:closure arguments arg arg Call Assign If Compare With Call Assign Call For Assign Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "validate_putmask",
    "source_code": "def validate_putmask(values: ArrayLike | MultiIndex, mask: np.ndarray) -> tuple[npt.NDArray[np.bool_], bool]:\n    mask = extract_bool_array(mask)\n    if mask.shape != values.shape:\n        raise ValueError('putmask: mask and data must be the same size')\n    noop = not mask.any()\n    return (mask, noop)",
    "docstring": "Validate mask and check if this putmask operation is a no-op.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\array_algos\\putmask.py",
    "ast_data": "FunctionDef name:validate_putmask arg:values arg:mask arguments arg arg Assign Call If Compare Raise Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "v",
    "source_code": "@property\ndef v(self):\n    return self._v",
    "docstring": "If this operator is , this is the .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_low_rank_update.py",
    "ast_data": "FunctionDef name:v arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "is_tf32_supported",
    "source_code": "def is_tf32_supported() -> bool:\n    if torch.version.hip:\n        prop_name = torch.cuda.get_device_properties().gcnArchName\n        archs = ('gfx94', 'gfx95')\n        for arch in archs:\n            if arch in prop_name:\n                return True\n        return False\n    return is_bf16_supported(including_emulation=False)",
    "docstring": "Return a bool indicating if the current CUDA/ROCm device supports dtype tf32.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:is_tf32_supported arguments If Assign Call Assign For If Compare Return return:yes Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "loc",
    "source_code": "@property\ndef loc(self):\n    return self._loc",
    "docstring": "Locations of these Student's t distribution(s).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\student_t.py",
    "ast_data": "FunctionDef name:loc arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "compile",
    "source_code": "def compile(*args, **kwargs):\n    return torch.compile(*args, **kwargs)",
    "docstring": "See :func: for details on the arguments for this function.",
    "type": "function",
    "file_path": "pytorch\\torch\\compiler\\__init__.py",
    "ast_data": "FunctionDef name:compile arguments arg arg Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "hlistcol",
    "source_code": "class hlistcol(nodes.Element):\n    pass",
    "docstring": "Node for one column in a horizontal list.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\addnodes.py",
    "ast_data": "ClassDef name:hlistcol"
  },
  {
    "library": "django",
    "name": "decr",
    "source_code": "def decr(self, key, delta=1, version=None):\n    return self.incr(key, -delta, version=version)",
    "docstring": "Subtract delta from value in the cache. If the key does not exist, raise a ValueError exception.",
    "type": "method",
    "file_path": "django\\django\\core\\cache\\backends\\base.py",
    "ast_data": "FunctionDef name:decr arg:self arg:key arg:delta arg:version arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_fontproperties",
    "source_code": "def set_fontproperties(self, fp):\n    self._fontproperties = FontProperties._from_any(fp).copy()\n    self.stale = True",
    "docstring": "Set the font properties that control the text. Parameters ---------- fp : or or If a , it is interpreted as a fontconfig pattern parsed by . If a , it is interpreted as the absolute path to a font file.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:set_fontproperties arg:self arg:fp arguments arg arg Assign Call Call Assign"
  },
  {
    "library": "kornia",
    "name": "get_box_kernel1d",
    "source_code": "def get_box_kernel1d(kernel_size: int, *, device: Optional[Device]=None, dtype: Optional[Dtype]=None) -> Tensor:\n    scale = tensor(1.0 / kernel_size, device=device, dtype=dtype)\n    return scale.expand(1, kernel_size)",
    "docstring": "Return a 1-D box filter. Args: kernel_size: the size of the kernel. device: the desired device of returned tensor. dtype: the desired data type of returned tensor. Returns: A tensor with shape :math:, filled with the value :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\filters\\kernels.py",
    "ast_data": "FunctionDef name:get_box_kernel1d arg:kernel_size arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "assertProtoEqual",
    "source_code": "def assertProtoEqual(self, a, b, check_initialized=True, normalize_numbers=False, msg=None, relative_tolerance=None):\n    pool = descriptor_pool.Default()\n    if isinstance(a, str):\n        a = text_format.Parse(a, b.__class__(), descriptor_pool=pool)\n    for pb in (a, b):\n        if check_initialized:\n            errors = pb.FindInitializationErrors()\n            if errors:\n                self.fail('Initialization errors: %s\\n%s' % (errors, pb))\n        if normalize_numbers:\n            NormalizeNumberFields(pb)\n    if relative_tolerance is not None:\n        checkFloatEqAndReplace(self, expected=b, actual=a, relative_tolerance=relative_tolerance)\n    a_str = text_format.MessageToString(a, descriptor_pool=pool)\n    b_str = text_format.MessageToString(b, descriptor_pool=pool)\n    if len(a_str) < 2 ** 16 and len(b_str) < 2 ** 16:\n        self.assertMultiLineEqual(a_str, b_str, msg=msg)\n    else:\n        diff = ''.join(difflib.unified_diff(a_str.splitlines(True), b_str.splitlines(True)))\n        if diff:\n            self.fail('%s :\\n%s' % (msg, diff))",
    "docstring": "Fails with a useful error if a and b aren't equal. Comparison of repeated fields matches the semantics of unittest.TestCase.assertEqual(), ie order and extra duplicates fields matter. Args: self: googletest.TestCase a: proto2 PB instance, or text string representing one. b: proto2 PB instance -- message.Message or subclass thereof. check_initialized: boolean, whether to fail if either a or b isn't initialized. normalize_numbers: boolean, whether to normalize types and precision of numbers before comparison. msg: if specified, is used as the error message on failure. relative_tolerance: float, relative tolerance. If this is not provided, then all floats are compared using string comparison otherwise, floating point comparisons are done using the relative tolerance provided.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\protobuf\\compare.py",
    "ast_data": "FunctionDef name:assertProtoEqual arg:self arg:a arg:b arg:check_initialized arg:normalize_numbers arg:msg arg:relative_tolerance arguments arg arg arg arg arg arg arg Assign Call If Call Assign Call Call For If Assign Call If Call If Call If Compare Call Assign Call Assign Call If BoolOp Compare Call Compare Call Call Assign Call Call Call Call If Call"
  },
  {
    "library": "tensorflow",
    "name": "_MarkReachedOps",
    "source_code": "def _MarkReachedOps(from_ops, reached_ops, func_graphs):\n    queue = collections.deque()\n    queue.extend(from_ops)\n    while queue:\n        op = queue.popleft()\n        if op not in reached_ops:\n            reached_ops.add(op)\n            for output in op.outputs:\n                if backprop_util.IsTrainable(output):\n                    queue.extend(_Consumers(output, func_graphs))",
    "docstring": "Mark all ops reached from \"from_ops\". Args: from_ops: list of Operations. reached_ops: set of Operations. func_graphs: list of FuncGraphs. This method will traverse through these functions if they capture from_ops or any reachable ops.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\gradients_util.py",
    "ast_data": "FunctionDef name:_MarkReachedOps arg:from_ops arg:reached_ops arg:func_graphs arguments arg arg arg Assign Call Call While Assign Call If Compare Call For If Call Call Call"
  },
  {
    "library": "kornia",
    "name": "short2rgb",
    "source_code": "def short2rgb(short: str) -> str:\n    return SHORT2RGB_DICT[short]",
    "docstring": "Convert short to RGB code.",
    "type": "function",
    "file_path": "kornia\\kornia\\utils\\image_print.py",
    "ast_data": "FunctionDef name:short2rgb arg:short arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "VertexVectorField",
    "source_code": "class VertexVectorField(VertexBase):\n\n    def __init__(self, x, sfield=None, vfield=None, field_args=(), vfield_args=(), g_cons=None, g_cons_args=(), nn=None, index=None):\n        super().__init__(x, nn=nn, index=index)\n        raise NotImplementedError('This class is still a work in progress')",
    "docstring": "Add homology properties of a scalar field f: R^n --> R^m associated with the geometry built from the VertexBase class.",
    "type": "class",
    "file_path": "scipy\\scipy\\optimize\\_shgo_lib\\_vertex.py",
    "ast_data": "ClassDef name:VertexVectorField FunctionDef name:__init__ arg:self arg:x arg:sfield arg:vfield arg:field_args arg:vfield_args arg:g_cons arg:g_cons_args arg:nn arg:index arguments arg arg arg arg arg arg arg arg arg arg Call Call Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "_clear",
    "source_code": "def _clear(self):\n    self._position = None",
    "docstring": "Clear things directly related to the spine. In this way it is possible to avoid clearing the Axis as well when calling from library code where it is known that the Axis is cleared separately.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\spines.py",
    "ast_data": "FunctionDef name:_clear arg:self arguments arg Assign"
  },
  {
    "library": "pytorch",
    "name": "get_tensors_ts_device_data_node",
    "source_code": "def get_tensors_ts_device_data_node(tensors):\n    return torch._C._lazy_ts_backend._get_tensors_ts_device_data_node(tensors)",
    "docstring": "Return tensor ids and eager tensors for DeviceData nodes in the IR for the passed in lazy tensors. TODO: This API is currently ts backend specific. We are working on generalizing it to all backends including XLA.",
    "type": "function",
    "file_path": "pytorch\\torch\\_lazy\\computation.py",
    "ast_data": "FunctionDef name:get_tensors_ts_device_data_node arg:tensors arguments arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "__generator_ctor",
    "source_code": "def __generator_ctor(bit_generator_name='MT19937', bit_generator_ctor=__bit_generator_ctor):\n    if isinstance(bit_generator_name, BitGenerator):\n        return Generator(bit_generator_name)\n    return Generator(bit_generator_ctor(bit_generator_name))",
    "docstring": "Pickling helper function that returns a Generator object Parameters ---------- bit_generator_name : str or BitGenerator String containing the core BitGenerator's name or a BitGenerator instance bit_generator_ctor : callable, optional Callable function that takes bit_generator_name as its only argument and returns an instantized bit generator. Returns ------- rg : Generator Generator using the named core BitGenerator",
    "type": "function",
    "file_path": "numpy\\numpy\\random\\_pickle.py",
    "ast_data": "FunctionDef name:__generator_ctor arg:bit_generator_name arg:bit_generator_ctor arguments arg arg If Call Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "param_shapes",
    "source_code": "@classmethod\ndef param_shapes(cls, sample_shape, name='DistributionParamShapes'):\n    with ops.name_scope(name, values=[sample_shape]):\n        return cls._param_shapes(sample_shape)",
    "docstring": "Shapes of parameters given the desired shape of a call to . This is a class method that describes what key/value arguments are required to instantiate the given so that a particular shape is returned for that instance's call to . Subclasses should override class method . Args: sample_shape: or python list/tuple. Desired shape of a call to . name: name to prepend ops with. Returns: of parameter name to shapes.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:param_shapes arg:cls arg:sample_shape arg:name arguments arg arg arg With Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "_get_convex_edges",
    "source_code": "def _get_convex_edges(polygon: Tensor, h: int, w: int) -> Tuple[Tensor, Tensor]:\n    dtype = polygon.dtype\n    if not torch.allclose(polygon[..., -1, :], polygon[..., 0, :]):\n        polygon = torch.cat((polygon, polygon[..., :1, :]), dim=-2)\n    x_start, y_start = (polygon[..., :-1, 0], polygon[..., :-1, 1])\n    x_end, y_end = (polygon[..., 1:, 0], polygon[..., 1:, 1])\n    ys = torch.arange(h, device=polygon.device, dtype=dtype)\n    dx = ((x_end - x_start) / (y_end - y_start + 1e-12)).clamp(-w, w)\n    xs = (ys[..., :, None] - y_start[..., None, :]) * dx[..., None, :] + x_start[..., None, :]\n    valid_edges = (y_start[..., None, :] <= ys[..., :, None]).logical_and(ys[..., :, None] <= y_end[..., None, :])\n    valid_edges |= (y_start[..., None, :] >= ys[..., :, None]).logical_and(ys[..., :, None] >= y_end[..., None, :])\n    x_left_edges = xs.clone()\n    x_left_edges[~valid_edges] = w\n    x_right_edges = xs.clone()\n    x_right_edges[~valid_edges] = -1\n    x_left = x_left_edges.min(dim=-1).values\n    x_right = x_right_edges.max(dim=-1).values\n    return (x_left, x_right)",
    "docstring": "Get the left and right edges of a polygon for each y-coordinate y \\in [0, h). Args: polygon: represents polygons to draw in BxNx2 N is the number of points 2 is (x, y). h: bottom most coordinate (top coordinate is assumed to be 0) w: right most coordinate (left coordinate is assumed to be 0) Returns: The left and right edges of the polygon of shape (B,B).",
    "type": "function",
    "file_path": "kornia\\kornia\\utils\\draw.py",
    "ast_data": "FunctionDef name:_get_convex_edges arg:polygon arg:h arg:w arguments arg arg arg Assign If Call Assign Call Assign Assign Assign Call Assign Call Assign Assign Call Compare Compare Call Compare Compare Assign Call Assign Assign Call Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "var",
    "source_code": "def var(self, *args, **kwds):\n    kwds['moments'] = 'v'\n    res = self.stats(*args, **kwds)\n    if isinstance(res, ndarray) and res.ndim == 0:\n        return res[()]\n    return res",
    "docstring": "Variance of the distribution. Parameters ---------- arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- var : float the variance of the distribution",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:var arg:self arguments arg arg arg Assign Assign Call If BoolOp Call Compare Return return:yes Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_reindex_with_indexers",
    "source_code": "@final\ndef _reindex_with_indexers(self, reindexers, fill_value=None, allow_dups: bool=False) -> Self:\n    new_data = self._mgr\n    for axis in sorted(reindexers.keys()):\n        index, indexer = reindexers[axis]\n        baxis = self._get_block_manager_axis(axis)\n        if index is None:\n            continue\n        index = ensure_index(index)\n        if indexer is not None:\n            indexer = ensure_platform_int(indexer)\n        new_data = new_data.reindex_indexer(index, indexer, axis=baxis, fill_value=fill_value, allow_dups=allow_dups)\n    if new_data is self._mgr:\n        new_data = new_data.copy(deep=False)\n    return self._constructor_from_mgr(new_data, axes=new_data.axes).__finalize__(self)",
    "docstring": "allow_dups indicates an internal call here",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:_reindex_with_indexers arg:self arg:reindexers arg:fill_value arg:allow_dups arguments arg arg arg arg Assign For Call Call Assign Assign Call If Compare Assign Call If Compare Assign Call Assign Call If Compare Assign Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "no_limit_value",
    "source_code": "def no_limit_value(self):\n    raise NotImplementedError('subclasses of BaseDatabaseOperations may require a no_limit_value() method')",
    "docstring": "Return the value to use for the LIMIT when we are wanting \"LIMIT infinity\". Return None if the limit clause can be omitted in this case.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:no_limit_value arg:self arguments arg Raise Call"
  },
  {
    "library": "kornia",
    "name": "fit_plane",
    "source_code": "def fit_plane(points: Vector3) -> Hyperplane:\n    if points.shape[-1] != 3:\n        raise TypeError('vector must be (*, 3)')\n    mean = points.mean(-2, True)\n    points_centered = points - mean\n    _, _, V = _torch_svd_cast(points_centered)\n    direction = V[..., :, -1]\n    origin = mean[..., 0, :]\n    return Hyperplane.from_vector(Vector3(direction), Vector3(origin))",
    "docstring": "Fit a plane from a set of points using SVD. Args: points: tensor containing a batch of sets of n-dimensional points. The expected shape of the tensor is :math:. Return: The computed hyperplane object.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\plane.py",
    "ast_data": "FunctionDef name:fit_plane arg:points arguments arg If Compare Raise Call Assign Call Assign Assign Call Assign Assign Return return:yes Call Call Call"
  },
  {
    "library": "numpy",
    "name": "center",
    "source_code": "@set_module('numpy.strings')\n@array_function_dispatch(_just_dispatcher)\ndef center(a, width, fillchar=' '):\n    width = np.asanyarray(width)\n    if not np.issubdtype(width.dtype, np.integer):\n        raise TypeError(f\"unsupported type {width.dtype} for operand 'width'\")\n    a = np.asanyarray(a)\n    fillchar = np.asanyarray(fillchar)\n    if np.any(str_len(fillchar) != 1):\n        raise TypeError('The fill character must be exactly one character long')\n    if np.result_type(a, fillchar).char == 'T':\n        return _center(a, width, fillchar)\n    fillchar = fillchar.astype(a.dtype, copy=False)\n    width = np.maximum(str_len(a), width)\n    out_dtype = f'{a.dtype.char}{width.max()}'\n    shape = np.broadcast_shapes(a.shape, width.shape, fillchar.shape)\n    out = np.empty_like(a, shape=shape, dtype=out_dtype)\n    return _center(a, width, fillchar, out=out)",
    "docstring": "Return a copy of with its elements centered in a string of length . Parameters ---------- a : array-like, with ``width >> import numpy as np >>> c = np.array(['a1b2','1b2a','b2a1','2a1b']); c array(['a1b2', '1b2a', 'b2a1', '2a1b'], dtype='>> np.strings.center(c, width=9) array([' a1b2 ', ' 1b2a ', ' b2a1 ', ' 2a1b '], dtype='>> np.strings.center(c, width=9, fillchar='*') array(['***a1b2**', '***1b2a**', '***b2a1**', '***2a1b**'], dtype='>> np.strings.center(c, width=1) array(['a1b2', '1b2a', 'b2a1', '2a1b'], dtype='<U4')",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\strings.py",
    "ast_data": "FunctionDef name:center arg:a arg:width arg:fillchar arguments arg arg arg Assign Call If Call Raise Call Assign Call Assign Call If Call Compare Call Raise Call If Compare Call Return return:yes Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "make_test_data_loader",
    "source_code": "def make_test_data_loader(raw_data_file_path, processed_data_file):\n    test_data = CriteoDataset('kaggle', -1, 0.0, 'total', 'test', raw_data_file_path, processed_data_file, False, False)\n    test_loader = torch.utils.data.DataLoader(test_data, batch_size=16384, shuffle=False, num_workers=7, collate_fn=collate_wrapper_criteo_offset, pin_memory=False, drop_last=False)\n    return test_loader",
    "docstring": "Function to create dataset and dataloaders for the test dataset. Rewritten simpler version of from the dlrm_data_pytorch.py that makes the test dataset and dataloaders only for the ***kaggle criteo dataset***",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\data_sparsifier\\benchmarks\\dlrm_utils.py",
    "ast_data": "FunctionDef name:make_test_data_loader arg:raw_data_file_path arg:processed_data_file arguments arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "scale_",
    "source_code": "def scale_(self, scale_factor: Union[float, Tensor]) -> 'PinholeCamera':\n    self.intrinsics[..., 0, 0] *= scale_factor\n    self.intrinsics[..., 1, 1] *= scale_factor\n    self.intrinsics[..., 0, 2] *= scale_factor\n    self.intrinsics[..., 1, 2] *= scale_factor\n    self.height *= scale_factor\n    self.width *= scale_factor\n    return self",
    "docstring": "Scale the pinhole model in-place. Args: scale_factor: a tensor with the scale factor. It has to be broadcastable with class members. The expected shape is :math: or :math:. Returns: the camera model with scaled parameters.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\camera\\pinhole.py",
    "ast_data": "FunctionDef name:scale_ arg:self arg:scale_factor arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "array_to_int_csv",
    "source_code": "def array_to_int_csv(array_data):\n    flattened_array = array_data.flatten()\n    array_as_strings = [item.astype(int).astype(str) for item in flattened_array]\n    return ','.join(array_as_strings)",
    "docstring": "Converts all elements in a numerical array to a comma-separated string. Args: array_data: Numerical array to convert. Returns: String containing array values as integers, separated by commas.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\tools\\convert_image_to_csv.py",
    "ast_data": "FunctionDef name:array_to_int_csv arg:array_data arguments arg Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "geom_col_name",
    "source_code": "@classmethod\ndef geom_col_name(cls):\n    return 'f_geometry_column'",
    "docstring": "Return the name of the metadata column used to store the feature geometry column.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\spatialite\\models.py",
    "ast_data": "FunctionDef name:geom_col_name arg:cls arguments arg Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "GeneratorDemo",
    "source_code": "class GeneratorDemo:\n\n    def header(self):\n        return '<html><body><h2>Generators rule!</h2>'\n\n    def footer(self):\n        return '</body></html>'\n\n    @cherrypy.expose\n    def index(self):\n        users = ['Remi', 'Carlos', 'Hendrik', 'Lorenzo Lamas']\n        yield self.header()\n        yield '<h3>List of users:</h3>'\n        for user in users:\n            yield ('%s<br/>' % user)\n        yield self.footer()",
    "docstring": "HTTP response streaming app.",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\tutorial\\tut08_generators_and_yield.py",
    "ast_data": "ClassDef name:GeneratorDemo FunctionDef name:header arg:self arguments arg Return return:yes FunctionDef name:footer arg:self arguments arg Return return:yes FunctionDef name:index arg:self arguments arg Assign Call For Call"
  },
  {
    "library": "kornia",
    "name": "get_closest_point_on_epipolar_line",
    "source_code": "def get_closest_point_on_epipolar_line(pts1: Tensor, pts2: Tensor, Fm: Tensor) -> Tensor:\n    if not isinstance(Fm, Tensor):\n        raise TypeError(f'Fm type is not a torch.Tensor. Got {type(Fm)}')\n    if len(Fm.shape) < 3 or not Fm.shape[-2:] == (3, 3):\n        raise ValueError(f'Fm must be a (*, 3, 3) tensor. Got {Fm.shape}')\n    if pts1.shape[-1] == 2:\n        pts1 = convert_points_to_homogeneous(pts1)\n    if pts2.shape[-1] == 2:\n        pts2 = convert_points_to_homogeneous(pts2)\n    line1in2 = compute_correspond_epilines(pts1, Fm)\n    perp = get_perpendicular(line1in2, pts2)\n    points1_in_2 = convert_points_from_homogeneous(line1in2.cross(perp, dim=2))\n    return points1_in_2",
    "docstring": "Return closest point on the epipolar line to the correspondence, given the fundamental matrix. Args: pts1: correspondences from the left images with shape :math:. If they are not homogeneous, converted automatically. pts2: correspondences from the right images with shape :math:. If they are not homogeneous, converted automatically. Fm: Fundamental matrices with shape :math:. Called Fm to avoid ambiguity with torch.nn.functional. Returns: point on epipolar line :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\epipolar\\fundamental.py",
    "ast_data": "FunctionDef name:get_closest_point_on_epipolar_line arg:pts1 arg:pts2 arg:Fm arguments arg arg arg If Call Raise Call Call If BoolOp Compare Call Compare Raise Call If Compare Assign Call If Compare Assign Call Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "check_models_ready",
    "source_code": "def check_models_ready(self):\n    if not self.models_ready:\n        raise AppRegistryNotReady(\"Models aren't loaded yet.\")",
    "docstring": "Raise an exception if all models haven't been imported yet.",
    "type": "method",
    "file_path": "django\\django\\apps\\registry.py",
    "ast_data": "FunctionDef name:check_models_ready arg:self arguments arg If Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "calibrate_and_quantize",
    "source_code": "@convert_phase(Component.OPTIMIZE_TFLITE_MODEL, SubComponent.QUANTIZE_USING_DEPRECATED_QUANTIZER)\ndef calibrate_and_quantize(self, dataset_gen, input_type, output_type, allow_float, activations_type=dtypes.int8, bias_type=dtypes.int32, resize_input=True, disable_per_channel=False, disable_per_channel_quantization_for_dense_layers=False):\n    self._feed_tensors(dataset_gen, resize_input)\n    return self._calibrator.QuantizeModel(np.dtype(input_type.as_numpy_dtype()).num, np.dtype(output_type.as_numpy_dtype()).num, allow_float, np.dtype(activations_type.as_numpy_dtype()).num, np.dtype(bias_type.as_numpy_dtype()).num, disable_per_channel, disable_per_channel_quantization_for_dense_layers)",
    "docstring": "Calibrates the model with specified generator and then quantizes it. The input shapes of the calibrator are resized with the calibration data if is set. Returns: A quantized model. Args: dataset_gen: A generator that generates calibration samples. input_type: A tf.dtype representing the desired real-value input type. output_type: A tf.dtype representing the desired real-value output type. allow_float: A boolean. False if the resulting model cannot perform float computation, useful when targeting an integer-only backend. If False, an error will be thrown if an operation cannot be quantized, otherwise the model will fallback to float ops. activations_type: A tf.dtype representing the desired type for activations. bias_type: A tf.dtype representing the desired type for bias. resize_input: A boolean. True if the shape of the sample data is different from the input. disable_per_channel: A boolean. True if disabling per-channel quantization. disable_per_channel_quantization_for_dense_layers: A boolean. True if disabling per-channel quantization only in Dense layers.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\optimize\\calibrator.py",
    "ast_data": "FunctionDef name:calibrate_and_quantize arg:self arg:dataset_gen arg:input_type arg:output_type arg:allow_float arg:activations_type arg:bias_type arg:resize_input arg:disable_per_channel arg:disable_per_channel_quantization_for_dense_layers arguments arg arg arg arg arg arg arg arg arg arg Call Return return:yes Call Call Call Call Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "ObjectIdentityWeakSet",
    "source_code": "class ObjectIdentityWeakSet(ObjectIdentitySet):\n    __slots__ = ()\n\n    def _wrap_key(self, key):\n        return _WeakObjectIdentityWrapper(key)\n\n    def __len__(self):\n        return len([_ for _ in self])\n\n    def __iter__(self):\n        keys = list(self._storage)\n        for key in keys:\n            unwrapped = key.unwrapped\n            if unwrapped is None:\n                self.discard(key)\n            else:\n                yield unwrapped",
    "docstring": "Like weakref.WeakSet, but compares objects with \"is\".",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\object_identity.py",
    "ast_data": "ClassDef name:ObjectIdentityWeakSet Assign FunctionDef name:_wrap_key arg:self arg:key arguments arg arg Return return:yes Call FunctionDef name:__len__ arg:self arguments arg Return return:yes Call FunctionDef name:__iter__ arg:self arguments arg Assign Call For Assign If Compare Call"
  },
  {
    "library": "scipy",
    "name": "_getcol",
    "source_code": "def _getcol(self, i):\n    if self.ndim == 1:\n        raise ValueError('getcol not provided for 1d arrays. Use indexing A[j]')\n    M, N = self.shape\n    i = int(i)\n    if i < 0:\n        i += N\n    if i < 0 or i >= N:\n        raise IndexError(f'index ({i}) out of range')\n    indptr, indices, data = get_csr_submatrix(M, N, self.indptr, self.indices, self.data, 0, M, i, i + 1)\n    return self.__class__((data, indices, indptr), shape=(M, 1), dtype=self.dtype, copy=False)",
    "docstring": "Returns a copy of column i. A (m x 1) sparse array (column vector).",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_csr.py",
    "ast_data": "FunctionDef name:_getcol arg:self arg:i arguments arg arg If Compare Raise Call Assign Assign Call If Compare If BoolOp Compare Compare Raise Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "is_power_of_2",
    "source_code": "def is_power_of_2(n: int) -> bool:\n    return n > 0 and n & n - 1 == 0",
    "docstring": "Returns whether n = 2 ** m for some integer m.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\runtime_utils.py",
    "ast_data": "FunctionDef name:is_power_of_2 arg:n arguments arg Return return:yes BoolOp Compare Compare"
  },
  {
    "library": "pytorch",
    "name": "_has_unsupported_sympy_function",
    "source_code": "def _has_unsupported_sympy_function(self, expr: sympy.Basic) -> bool:\n    return expr.has(*self._unsupported_sympy_functions)",
    "docstring": "Tracks list of sympy.Functions the export solver doesn't know how to handle.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:_has_unsupported_sympy_function arg:self arg:expr arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "__new__",
    "source_code": "def __new__(cls, op: Callable, tensor: torch.Tensor, peer: Optional[int]=None, group: Optional[ProcessGroup]=None, tag: int=0, group_peer: Optional[int]=None):\n    _check_op(op)\n    _check_single_tensor(tensor, 'tensor')\n    return object.__new__(cls)",
    "docstring": "Create and return a new instance of the class.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:__new__ arg:cls arg:op arg:tensor arg:peer arg:group arg:tag arg:group_peer arguments arg arg arg arg arg arg arg Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "conditional_expression_supported_in_where_clause",
    "source_code": "def conditional_expression_supported_in_where_clause(self, expression):\n    if isinstance(expression, (Exists, Lookup, WhereNode)):\n        return True\n    if isinstance(expression, ExpressionWrapper) and expression.conditional:\n        return self.conditional_expression_supported_in_where_clause(expression.expression)\n    if isinstance(expression, RawSQL) and expression.conditional:\n        return True\n    return False",
    "docstring": "Oracle supports only EXISTS(...) or filters in the WHERE clause, others must be compared with True.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\oracle\\operations.py",
    "ast_data": "FunctionDef name:conditional_expression_supported_in_where_clause arg:self arg:expression arguments arg arg If Call Return return:yes If BoolOp Call Return return:yes Call If BoolOp Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "all_indices_partitioned",
    "source_code": "@property\ndef all_indices_partitioned(self):\n    return self._all_indices_partitioned",
    "docstring": "all_indices_partitioned property. Returns: True if we are inside a control flow construct and not all pfor iterations may be active.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "FunctionDef name:all_indices_partitioned arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "XinSheYang01",
    "source_code": "class XinSheYang01(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-5.0] * self.N, [5.0] * self.N))\n        self.custom_bounds = ([-2, 2], [-2, 2])\n        self.global_optimum = [[0 for _ in range(self.N)]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        i = arange(1.0, self.N + 1.0)\n        return sum(np.random.random(self.N) * abs(x) ** i)",
    "docstring": "Xin-She Yang 1 objective function. This class defines the Xin-She Yang 1 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{XinSheYang01}}(x) = \\sum_{i=1}^{n} \\epsilon_i \\lvert x_i \\rvert^i The variable :math: is a random variable uniformly distributed in :math:. Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_X.py",
    "ast_data": "ClassDef name:XinSheYang01 Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "Arc3",
    "source_code": "@_register_style(_style_list)\nclass Arc3(_Base):\n\n    def __init__(self, rad=0.0):\n        self.rad = rad\n\n    def connect(self, posA, posB):\n        x1, y1 = posA\n        x2, y2 = posB\n        x12, y12 = ((x1 + x2) / 2.0, (y1 + y2) / 2.0)\n        dx, dy = (x2 - x1, y2 - y1)\n        f = self.rad\n        cx, cy = (x12 + f * dy, y12 - f * dx)\n        vertices = [(x1, y1), (cx, cy), (x2, y2)]\n        codes = [Path.MOVETO, Path.CURVE3, Path.CURVE3]\n        return Path(vertices, codes)",
    "docstring": "Creates a simple quadratic Bézier curve between two points. The curve is created so that the middle control point (C1) is located at the same distance from the start (C0) and end points(C2) and the distance of the C1 to the line connecting C0-C2 is *rad* times the distance of C0-C2.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "ClassDef name:Arc3 FunctionDef name:__init__ arg:self arg:rad arguments arg arg Assign FunctionDef name:connect arg:self arg:posA arg:posB arguments arg arg arg Assign Assign Assign Assign Assign Assign Assign Assign Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_roll_to_vertical",
    "source_code": "def _roll_to_vertical(self, arr: 'np.typing.ArrayLike', reverse: bool=False) -> np.ndarray:\n    if reverse:\n        return np.roll(arr, (self._vertical_axis - 2) * -1)\n    else:\n        return np.roll(arr, self._vertical_axis - 2)",
    "docstring": "Roll arrays to match the different vertical axis. Parameters ---------- arr : ArrayLike Array to roll. reverse : bool, default: False Reverse the direction of the roll.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:_roll_to_vertical arg:self arg:arr arg:reverse arguments arg arg arg If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "get_client_id",
    "source_code": "def get_client_id(self):\n    raise NotImplementedError()",
    "docstring": "A method to return client_id of the client. For instance, the value in database is saved in a column called ``:: def get_client_id(self): return self.client_id :return: string",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\models.py",
    "ast_data": "FunctionDef name:get_client_id arg:self arguments arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "UndefinedReturnValue",
    "source_code": "class UndefinedReturnValue(object):\n    pass",
    "docstring": "Represents a return value that is undefined.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\variables.py",
    "ast_data": "ClassDef name:UndefinedReturnValue"
  },
  {
    "library": "tensorflow",
    "name": "RNNSavedModelSaver",
    "source_code": "class RNNSavedModelSaver(LayerSavedModelSaver):\n\n    @property\n    def object_identifier(self):\n        return constants.RNN_LAYER_IDENTIFIER\n\n    def _get_serialized_attributes_internal(self, serialization_cache):\n        objects, functions = super(RNNSavedModelSaver, self)._get_serialized_attributes_internal(serialization_cache)\n        states = data_structures.wrap_or_unwrap(self.obj.states)\n        if isinstance(states, tuple):\n            states = data_structures.wrap_or_unwrap(list(states))\n        objects['states'] = states\n        return (objects, functions)",
    "docstring": "RNN layer serialization.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\layer_serialization.py",
    "ast_data": "ClassDef name:RNNSavedModelSaver FunctionDef name:object_identifier arg:self arguments arg Return return:yes FunctionDef name:_get_serialized_attributes_internal arg:self arg:serialization_cache arguments arg arg Assign Call Call Assign Call If Call Assign Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "assert_positive_definite",
    "source_code": "def assert_positive_definite(self, name='assert_positive_definite'):\n    with self._name_scope(name):\n        return self._assert_positive_definite()",
    "docstring": "Returns an that asserts this operator is positive definite. Here, positive definite means that the quadratic form has positive real part for all nonzero . Note that we do not require the operator to be self-adjoint to be positive definite. Args: name: A name to give this . Returns: An , that, when run, will raise an if the operator is not positive definite.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "FunctionDef name:assert_positive_definite arg:self arg:name arguments arg arg With Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "expand_groups_in_paired_modules_list",
    "source_code": "def expand_groups_in_paired_modules_list(paired_modules_list):\n    new_list = []\n    for group in paired_modules_list:\n        if len(group) == 1:\n            raise ValueError('Group must have at least two modules')\n        elif len(group) == 2:\n            new_list.append(group)\n        elif len(group) > 2:\n            new_list.extend(([group[i], group[i + 1]] for i in range(len(group) - 1)))\n    return new_list",
    "docstring": "Expands module pair groups larger than two into groups of two modules.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\_equalize.py",
    "ast_data": "FunctionDef name:expand_groups_in_paired_modules_list arg:paired_modules_list arguments arg Assign For If Compare Call Raise Call If Compare Call Call If Compare Call Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_nanquantile_1d",
    "source_code": "def _nanquantile_1d(values: np.ndarray, mask: npt.NDArray[np.bool_], qs: npt.NDArray[np.float64], na_value: Scalar, interpolation: str) -> Scalar | np.ndarray:\n    values = values[~mask]\n    if len(values) == 0:\n        return np.full(len(qs), na_value)\n    return np.quantile(values, qs, method=interpolation)",
    "docstring": "Wrapper for np.quantile that skips missing values, specialized to 1-dimensional case. Parameters ---------- values : array over which to find quantiles mask : ndarray[bool] locations in values that should be considered missing qs : np.ndarray[float64] of quantile indices to find na_value : scalar value to return for empty or all-null values interpolation : str Returns ------- quantiles : scalar or array",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\array_algos\\quantile.py",
    "ast_data": "FunctionDef name:_nanquantile_1d arg:values arg:mask arg:qs arg:na_value arg:interpolation arguments arg arg arg arg arg Assign If Compare Call Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "AbsTransform",
    "source_code": "class AbsTransform(Transform):\n    domain = constraints.real\n    codomain = constraints.positive\n\n    def __eq__(self, other):\n        return isinstance(other, AbsTransform)\n\n    def _call(self, x):\n        return x.abs()\n\n    def _inverse(self, y):\n        return y",
    "docstring": "Transform via the mapping :math:.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributions\\transforms.py",
    "ast_data": "ClassDef name:AbsTransform Assign Assign FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes Call FunctionDef name:_call arg:self arg:x arguments arg arg Return return:yes Call FunctionDef name:_inverse arg:self arg:y arguments arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_filter_or_keep_complex",
    "source_code": "def _filter_or_keep_complex(self, node, default_and_custom_functions: list[registration.ONNXFunction]) -> list[registration.ONNXFunction]:\n    args_with_complex_dtype = [_is_arg_with_complex_dtype(arg) for arg in node.args]\n    if any(args_with_complex_dtype):\n        default_and_custom_functions = [func for func in default_and_custom_functions if func.is_complex]\n        if not default_and_custom_functions:\n            op_full_name = self._get_aten_name(node).qualified_name()\n            raise RuntimeError(f'Cannot find any COMPLEX symbolic function for {op_full_name}, which should be registered under {node.target}.')\n    else:\n        default_and_custom_functions = [func for func in default_and_custom_functions if not func.is_complex]\n        if not default_and_custom_functions:\n            op_full_name = self._get_aten_name(node).qualified_name()\n            raise RuntimeError(f'Can ONLY find COMPLEX symbolic function for {op_full_name}, which should be registered under {node.target}.')\n    return default_and_custom_functions",
    "docstring": "Filter the complex functions if the input has complex dtype.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\onnxfunction_dispatcher.py",
    "ast_data": "FunctionDef name:_filter_or_keep_complex arg:self arg:node arg:default_and_custom_functions arguments arg arg arg Assign Call If Call Assign If Assign Call Call Raise Call Assign If Assign Call Call Raise Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, aux_trans, extreme_finder=None, grid_locator1=None, grid_locator2=None, tick_formatter1=None, tick_formatter2=None):\n    super().__init__()\n    self._grid_info = None\n    self.grid_finder = GridFinder(aux_trans, extreme_finder, grid_locator1, grid_locator2, tick_formatter1, tick_formatter2)",
    "docstring": "Parameters ---------- aux_trans : or tuple[Callable, Callable] The transform from curved coordinates to rectilinear coordinate: either a instance (which provides also its inverse), or a pair of callables `` that define the transform and its inverse. The callables should have signature:: x_rect, y_rect = trans(x_curved, y_curved) x_curved, y_curved = inv_trans(x_rect, y_rect) extreme_finder grid_locator1, grid_locator2 Grid locators for each axis. tick_formatter1, tick_formatter2 Tick formatters for each axis.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\grid_helper_curvelinear.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:aux_trans arg:extreme_finder arg:grid_locator1 arg:grid_locator2 arg:tick_formatter1 arg:tick_formatter2 arguments arg arg arg arg arg arg arg Call Call Assign Assign Call"
  },
  {
    "library": "pytorch",
    "name": "enable_flash_sdp",
    "source_code": "def enable_flash_sdp(enabled: bool):\n    torch._C._set_sdp_use_flash(enabled)",
    "docstring": ".. warning:: This flag is beta and subject to change. Enables or disables flash scaled dot product attention.",
    "type": "function",
    "file_path": "pytorch\\torch\\backends\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:enable_flash_sdp arg:enabled arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "_cxx_flags",
    "source_code": "def _cxx_flags() -> str:\n    return torch._C._cxx_flags()",
    "docstring": "Returns the CXX_FLAGS used when building PyTorch.",
    "type": "function",
    "file_path": "pytorch\\torch\\__config__.py",
    "ast_data": "FunctionDef name:_cxx_flags arguments Return return:yes Call"
  },
  {
    "library": "django",
    "name": "add_internal_dependencies",
    "source_code": "def add_internal_dependencies(self, key, migration):\n    for parent in migration.dependencies:\n        if parent[0] == key[0] and parent[1] != '__first__':\n            self.graph.add_dependency(migration, key, parent, skip_validation=True)",
    "docstring": "Internal dependencies need to be added first to ensure dependencies find the correct root node.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\loader.py",
    "ast_data": "FunctionDef name:add_internal_dependencies arg:self arg:key arg:migration arguments arg arg arg For If BoolOp Compare Compare Call"
  },
  {
    "library": "numpy",
    "name": "_convolve_or_correlate",
    "source_code": "def _convolve_or_correlate(f, a, v, mode, propagate_mask):\n    if propagate_mask:\n        mask = f(getmaskarray(a), np.ones(np.shape(v), dtype=bool), mode=mode) | f(np.ones(np.shape(a), dtype=bool), getmaskarray(v), mode=mode)\n        data = f(getdata(a), getdata(v), mode=mode)\n    else:\n        mask = ~f(~getmaskarray(a), ~getmaskarray(v), mode=mode)\n        data = f(filled(a, 0), filled(v, 0), mode=mode)\n    return masked_array(data, mask=mask)",
    "docstring": "Helper function for ma.correlate and ma.convolve",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:_convolve_or_correlate arg:f arg:a arg:v arg:mode arg:propagate_mask arguments arg arg arg arg arg If Assign Call Call Call Call Call Call Call Call Assign Call Call Call Assign Call Call Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "next_fast_len",
    "source_code": "def next_fast_len(target, real=False):\n    pass",
    "docstring": "Find the next fast size of input data to ``: >>> b = fft.fft(a, 131072)",
    "type": "function",
    "file_path": "scipy\\scipy\\fft\\_helper.py",
    "ast_data": "FunctionDef name:next_fast_len arg:target arg:real arguments arg arg"
  },
  {
    "library": "sphinx",
    "name": "SphinxContentsFilter",
    "source_code": "class SphinxContentsFilter(ContentsFilter):\n    visit_pending_xref = ContentsFilter.ignore_node_but_process_children\n\n    def visit_image(self, node: nodes.image) -> None:\n        raise nodes.SkipNode",
    "docstring": "Used with BuildEnvironment.add_toc_from() to discard cross-file links within table-of-contents link nodes.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\transforms\\__init__.py",
    "ast_data": "ClassDef name:SphinxContentsFilter Assign FunctionDef name:visit_image arg:self arg:node arguments arg arg Raise"
  },
  {
    "library": "pytorch",
    "name": "RuntimeSchemaInfo",
    "source_code": "@dataclass\nclass RuntimeSchemaInfo:\n    static_argnum: int = 100\n    static_kwargkey: Optional[list[str]] = None\n    needs_pytree: bool = False",
    "docstring": "RuntimeSchemaInfo stores the operator schema related information for runtime (eager) execution. This is mainly used for two ways: 1. to generate hash for args to determine whether to re-run sharding prop or not 2. to determine if we need pytree",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_op_schema.py",
    "ast_data": "ClassDef name:RuntimeSchemaInfo"
  },
  {
    "library": "pytorch",
    "name": "long",
    "source_code": "def long(self):\n    return self._to(torch.long)",
    "docstring": "Casts this storage to long type.",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:long arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "match",
    "source_code": "def match(self, pattern: PatternExpr, node: NodeOrConstant) -> MatchResult:\n    if pattern in self.pattern_to_node:\n        if self.pattern_to_node[pattern] == node:\n            return Match(self, pattern)\n        else:\n            return FailedMatch('repeated pattern differs')\n    m = pattern._match(node, self)\n    assert pattern not in self.pattern_to_node\n    self.pattern_to_node[pattern] = node if m else None\n    return m",
    "docstring": "wrapper to check reused nodes in patterns",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\pattern_matcher.py",
    "ast_data": "FunctionDef name:match arg:self arg:pattern arg:node arguments arg arg arg If Compare If Compare Return return:yes Call Return return:yes Call Assign Call Compare Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "__len__",
    "source_code": "def __len__(self) -> int:\n    return len(self.index)",
    "docstring": "Returns length of info axis, but here we use the index.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:__len__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "warn_extraneous",
    "source_code": "def warn_extraneous(extraneous):\n    if extraneous:\n        warn(f'The following arguments have no effect for a chosen solver: {', '.join((f'`{x}`' for x in extraneous))}.', stacklevel=3)",
    "docstring": "Display a warning for extraneous keyword arguments. The initializer of each solver class is expected to collect keyword arguments that it doesn't understand and warn about them. This function prints a warning for each key in the supplied dictionary. Parameters ---------- extraneous : dict Extraneous keyword arguments",
    "type": "function",
    "file_path": "scipy\\scipy\\integrate\\_ivp\\common.py",
    "ast_data": "FunctionDef name:warn_extraneous arg:extraneous arguments arg If Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_v1_constants",
    "source_code": "def get_v1_constants(module: Any) -> Sequence[str]:\n    constants_v1 = []\n    tensorflow_constants_attr_v1 = API_ATTRS_V1[TENSORFLOW_API_NAME].constants\n    if hasattr(module, tensorflow_constants_attr_v1):\n        constants_v1.extend(getattr(module, tensorflow_constants_attr_v1))\n    return constants_v1",
    "docstring": "Get a list of TF 1.* constants in this module. Args: module: TensorFlow module. Returns: List of all API constants under the given module.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\tf_export.py",
    "ast_data": "FunctionDef name:get_v1_constants arg:module arguments arg Assign Assign If Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_is_replicated_or_sharded_to_logical_cores",
    "source_code": "def _is_replicated_or_sharded_to_logical_cores(self):\n    return isinstance(self._primary, tpu_replicated_variable.TPUReplicatedVariable)",
    "docstring": "Returns whether each of the underlying variables is replicated or sharded to logical cores. If True, the handles of the underlying variables are not available outside a TPU context.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_values.py",
    "ast_data": "FunctionDef name:_is_replicated_or_sharded_to_logical_cores arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "dtypes",
    "source_code": "@property\ndef dtypes(self):\n    return self._dtypes",
    "docstring": "The list of dtypes for each component of a staging area element.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:dtypes arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "clear",
    "source_code": "def clear(self) -> None:\n    self._dispatch_table.clear()\n    self._dispatch_cache.clear()",
    "docstring": "Deletes all targets in the table.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\type_dispatch.py",
    "ast_data": "FunctionDef name:clear arg:self arguments arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "_window_function_checks",
    "source_code": "def _window_function_checks(function_name: str, M: int, dtype: torch.dtype, layout: torch.layout) -> None:\n    if M < 0:\n        raise ValueError(f'{function_name} requires non-negative window length, got M={M}')\n    if layout is not torch.strided:\n        raise ValueError(f'{function_name} is implemented for strided tensors only, got: {layout}')\n    if dtype not in [torch.float32, torch.float64]:\n        raise ValueError(f'{function_name} expects float32 or float64 dtypes, got: {dtype}')",
    "docstring": "Performs common checks for all the defined windows. This function should be called before computing any window. Args: function_name (str): name of the window function. M (int): length of the window. dtype (:class:): the desired data type of returned tensor. layout (:class:): the desired layout of returned tensor.",
    "type": "function",
    "file_path": "pytorch\\torch\\signal\\windows\\windows.py",
    "ast_data": "FunctionDef name:_window_function_checks arg:function_name arg:M arg:dtype arg:layout arguments arg arg arg arg If Compare Raise Call If Compare Raise Call If Compare Raise Call"
  },
  {
    "library": "pytorch",
    "name": "schedule",
    "source_code": "def schedule(*, wait: int, warmup: int, active: int, repeat: int=0, skip_first: int=0, skip_first_wait: int=0) -> Callable:\n\n    def schedule_fn(step: int) -> ProfilerAction:\n        assert step >= 0\n        if step < skip_first:\n            return ProfilerAction.NONE\n        else:\n            step -= skip_first\n        if skip_first_wait != 0:\n            step += wait\n        num_steps = wait + warmup + active\n        if repeat > 0 and step / num_steps >= repeat:\n            return ProfilerAction.NONE\n        mod_step = step % num_steps\n        if mod_step < wait:\n            return ProfilerAction.NONE\n        elif mod_step < wait + warmup:\n            return ProfilerAction.WARMUP\n        else:\n            return ProfilerAction.RECORD if mod_step < num_steps - 1 else ProfilerAction.RECORD_AND_SAVE\n    assert wait >= 0 and warmup >= 0 and (active > 0) and (repeat >= 0) and (skip_first >= 0), 'Invalid profiler schedule arguments'\n    if warmup == 0:\n        warn(\"Profiler won't be using warmup, this can skew profiler results\")\n    return schedule_fn",
    "docstring": "Returns a callable that can be used as profiler `` is non-zero. All subsequent cycles will then wait 20 steps between the last active and warmup.",
    "type": "function",
    "file_path": "pytorch\\torch\\profiler\\profiler.py",
    "ast_data": "FunctionDef name:schedule arguments arg arg arg arg arg arg FunctionDef name:schedule_fn arg:step arguments arg Compare If Compare Return return:yes If Compare Assign If BoolOp Compare Compare Return return:yes Assign If Compare Return return:yes If Compare Return return:yes Return return:yes Compare BoolOp Compare Compare Compare Compare Compare If Compare Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "linear_check",
    "source_code": "def linear_check(tensor_type, module_instance):\n    if len(tensor_type.__args__) >= 2:\n        if is_consistent(module_instance.in_features, tensor_type.__args__[-1]):\n            new_type_args = list(tensor_type.__args__)\n            new_type_args[-1] = module_instance.out_features\n            return TensorType(tuple(new_type_args))\n        else:\n            raise TypeError(f'Inconsistent {module_instance.in_features} and {tensor_type.__args__[-1]} in {module_instance}')\n    else:\n        raise TypeError(f'Type {tensor_type} must have rank 2 or more.')",
    "docstring": "Checks that an input tensor type satisfies the conditions for linear operation and returns the output type based on in and out features given by module_instance",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\graph_gradual_typechecker.py",
    "ast_data": "FunctionDef name:linear_check arg:tensor_type arg:module_instance arguments arg arg If Compare Call If Call Assign Call Assign Return return:yes Call Call Raise Call Raise Call"
  },
  {
    "library": "authlib",
    "name": "get_scope",
    "source_code": "def get_scope(self):\n    raise NotImplementedError()",
    "docstring": "A method to get scope of the authorization code. For instance, the column is called ``:: def get_scope(self): return self.scope :return: scope string",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\models.py",
    "ast_data": "FunctionDef name:get_scope arg:self arguments arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "update_state_wrapper",
    "source_code": "def update_state_wrapper(update_state_fn):\n\n    def decorated(metric_obj, *args, **kwargs):\n        strategy = distribute_lib.get_strategy()\n        for weight in metric_obj.weights:\n            if backend.is_tpu_strategy(strategy) and (not strategy.extended.variable_created_in_scope(weight)) and (not distribute_lib.in_cross_replica_context()):\n                raise ValueError('Trying to run metric.update_state in replica context when the metric was not created in TPUStrategy scope. Make sure the keras Metric is created in TPUstrategy scope. ')\n        with tf_utils.graph_context_for_symbolic_tensors(*args, **kwargs):\n            update_op = update_state_fn(*args, **kwargs)\n        if update_op is not None:\n            metric_obj.add_update(update_op)\n        return update_op\n    return tf_decorator.make_decorator(update_state_fn, decorated)",
    "docstring": "Decorator to wrap metric with . Args: update_state_fn: function that accumulates metric statistics. Returns: Decorated function that wraps with .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\metrics_utils.py",
    "ast_data": "FunctionDef name:update_state_wrapper arg:update_state_fn arguments arg FunctionDef name:decorated arg:metric_obj arguments arg arg arg Assign Call For If BoolOp Call Call Call Raise Call With Call Assign Call If Compare Call Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_ragged_fill_empty_rows_grad",
    "source_code": "@ops.RegisterGradient('RaggedFillEmptyRows')\ndef _ragged_fill_empty_rows_grad(op, unused_grad_output_indices, output_grad_values, unused_grad_empty_row_indicator, unused_grad_reverse_index_map):\n    reverse_index_map = op.outputs[3]\n    d_values, d_default_value = gen_ragged_array_ops.ragged_fill_empty_rows_grad(reverse_index_map=reverse_index_map, grad_values=output_grad_values)\n    return [None, d_values, None, d_default_value]",
    "docstring": "Gradients for RaggedFillEmptyRows.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_array_ops.py",
    "ast_data": "FunctionDef name:_ragged_fill_empty_rows_grad arg:op arg:unused_grad_output_indices arg:output_grad_values arg:unused_grad_empty_row_indicator arg:unused_grad_reverse_index_map arguments arg arg arg arg arg Assign Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "TorchExportError",
    "source_code": "class TorchExportError(torch.onnx.errors.OnnxExporterError):\n    pass",
    "docstring": "Error during graph capturing using torch.export.",
    "type": "class",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_errors.py",
    "ast_data": "ClassDef name:TorchExportError"
  },
  {
    "library": "pytorch",
    "name": "get_stride_order",
    "source_code": "def get_stride_order(seq: Sequence[Union[int, torch.SymInt, Expr]], shape_env: Optional[ShapeEnv]=None) -> Sequence[int]:\n    sorted_idx: Sequence[int] = get_fill_order(seq, shape_env)\n    out = [0 for _ in range(len(seq))]\n    for i, elem in enumerate(sorted_idx):\n        out[elem] = i\n    return out",
    "docstring": "Convert strides to stride order",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\ir.py",
    "ast_data": "FunctionDef name:get_stride_order arg:seq arg:shape_env arguments arg arg Call Assign Call Call For Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_adjoint_pair",
    "source_code": "def is_adjoint_pair(x, y):\n    if x is y:\n        if x.is_self_adjoint is False:\n            return False\n        if x.is_self_adjoint:\n            return True\n    return x.H is y or y.H is x",
    "docstring": "True iff x and y are adjoints of each other (by id, not entries).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_util.py",
    "ast_data": "FunctionDef name:is_adjoint_pair arg:x arg:y arguments arg arg If Compare If Compare Return return:yes If Return return:yes Return return:yes BoolOp Compare Compare"
  },
  {
    "library": "pytorch",
    "name": "is_triton_capable",
    "source_code": "@staticmethod\ndef is_triton_capable(device: torch.types.Device=None) -> bool:\n    return False",
    "docstring": "Returns True if the device has Triton support, False otherwise, even if the appropriate Triton backend is not available.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\device_interface.py",
    "ast_data": "FunctionDef name:is_triton_capable arg:device arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "to_discrete",
    "source_code": "def to_discrete(self, dt, method='zoh', alpha=None):\n    raise NotImplementedError('to_discrete is not implemented for this system class.')",
    "docstring": "Return a discretized version of the current system. Parameters: See for details. Returns ------- sys: instance of",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:to_discrete arg:self arg:dt arg:method arg:alpha arguments arg arg arg arg Raise Call"
  },
  {
    "library": "scipy",
    "name": "itilbert",
    "source_code": "def itilbert(x, h, period=None, _cache=_cache):\n    if isinstance(_cache, threading.local):\n        if not hasattr(_cache, 'itilbert_cache'):\n            _cache.itilbert_cache = {}\n        _cache = _cache.itilbert_cache\n    tmp = asarray(x)\n    if iscomplexobj(tmp):\n        return itilbert(tmp.real, h, period, _cache) + 1j * itilbert(tmp.imag, h, period, _cache)\n    if period is not None:\n        h = h * 2 * pi / period\n    n = len(x)\n    omega = _cache.get((n, h))\n    if omega is None:\n        if len(_cache) > 20:\n            while _cache:\n                _cache.popitem()\n\n        def kernel(k, h=h):\n            if k:\n                return -tanh(h * k)\n            return 0\n        omega = convolve.init_convolution_kernel(n, kernel, d=1)\n        _cache[n, h] = omega\n    overwrite_x = _datacopied(tmp, x)\n    return convolve.convolve(tmp, omega, swap_real_imag=1, overwrite_x=overwrite_x)",
    "docstring": "Return inverse h-Tilbert transform of a periodic sequence x. If `tilbert`.",
    "type": "function",
    "file_path": "scipy\\scipy\\fftpack\\_pseudo_diffs.py",
    "ast_data": "FunctionDef name:itilbert arg:x arg:h arg:period arg:_cache arguments arg arg arg arg If Call If Call Assign Assign Assign Call If Call Return return:yes Call Call If Compare Assign Assign Call Assign Call If Compare If Compare Call While Call FunctionDef name:kernel arg:k arg:h arguments arg arg If Return return:yes Call Return return:yes Assign Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "mem_get_info",
    "source_code": "def mem_get_info(device: 'Device'=None) -> tuple[int, int]:\n    if device is None:\n        device = torch.cuda.current_device()\n    device = _get_device_index(device, optional=True)\n    return torch.cuda.cudart().cudaMemGetInfo(device)",
    "docstring": "Return the global free and total GPU memory for a given device using cudaMemGetInfo. Args: device (torch.device or int or str, optional): selected device. Returns statistic for the current device, given by :func:, if :attr: is `cuda-memory-management` for more details about GPU memory management.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\memory.py",
    "ast_data": "FunctionDef name:mem_get_info arg:device arguments arg If Compare Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "authlib",
    "name": "exists_nonce",
    "source_code": "def exists_nonce(self, nonce, request):\n    raise NotImplementedError()",
    "docstring": "The nonce value MUST be unique across all requests with the same timestamp, client credentials, and token combinations. :param nonce: A string value of `` :param request: OAuth1Request instance :return: Boolean",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth1\\rfc5849\\base_server.py",
    "ast_data": "FunctionDef name:exists_nonce arg:self arg:nonce arg:request arguments arg arg arg Raise Call"
  },
  {
    "library": "pandas",
    "name": "to_string",
    "source_code": "def to_string(self, buf: FilePath | WriteBuffer[str] | None=None, encoding: str | None=None, line_width: int | None=None) -> str | None:\n    from pandas.io.formats.string import StringFormatter\n    string_formatter = StringFormatter(self.fmt, line_width=line_width)\n    string = string_formatter.to_string()\n    return save_to_buffer(string, buf=buf, encoding=encoding)",
    "docstring": "Render a DataFrame to a console-friendly tabular output. Parameters ---------- buf : str, path object, file-like object, or None, default None String, path object (implementing `` function. If None, the result is returned as a string. encoding: str, default “utf-8” Set character encoding. line_width : int, optional Width to wrap a line in characters.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\format.py",
    "ast_data": "FunctionDef name:to_string arg:self arg:buf arg:encoding arg:line_width arguments arg arg arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "_is_annotated_form",
    "source_code": "def _is_annotated_form(obj: Any) -> TypeIs[Annotated[Any, ...]]:\n    return typing.get_origin(obj) is typing.Annotated or str(obj).startswith('typing.Annotated')",
    "docstring": "Check if *obj* is an annotated type.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\typing.py",
    "ast_data": "FunctionDef name:_is_annotated_form arg:obj arguments arg Return return:yes BoolOp Compare Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_make_alias",
    "source_code": "def _make_alias(fn, name):\n\n    def _fn(*args, **kwargs):\n        return fn(*args, **kwargs)\n    _fn.__name__ = name\n    _fn.__module__ = inspect.currentframe().f_back.f_globals['__name__']\n    return _fn",
    "docstring": "This function defines an alias of another function and sets its __name__ argument. It also sets its __module__ argument to the module of the caller. Note that when naively doing , we have that , and .",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\__init__.py",
    "ast_data": "FunctionDef name:_make_alias arg:fn arg:name arguments arg arg FunctionDef name:_fn arguments arg arg Return return:yes Call Assign Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "__init__",
    "source_code": "def __init__(self, *system, **kwargs):\n    if isinstance(system[0], LinearTimeInvariant):\n        return\n    super().__init__(**kwargs)\n    self._num = None\n    self._den = None\n    self.num, self.den = normalize(*system)",
    "docstring": "Initialize the state space LTI system.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg arg arg If Call Return return:no Call Call Assign Assign Assign Call"
  },
  {
    "library": "pandas",
    "name": "_get_pretty_string",
    "source_code": "def _get_pretty_string(obj) -> str:\n    sio = StringIO()\n    pprint.pprint(obj, stream=sio)\n    return sio.getvalue()",
    "docstring": "Return a prettier version of obj. Parameters ---------- obj : object Object to pretty print Returns ------- str Pretty print object repr",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\computation\\scope.py",
    "ast_data": "FunctionDef name:_get_pretty_string arg:obj arguments arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "fullargspec_to_signature",
    "source_code": "def fullargspec_to_signature(fullargspec: inspect.FullArgSpec) -> inspect.Signature:\n    defaults = _make_default_values(fullargspec)\n    parameters = []\n    for arg in fullargspec.args:\n        parameters.append(inspect.Parameter(arg, inspect.Parameter.POSITIONAL_OR_KEYWORD, default=defaults.get(arg, inspect.Parameter.empty)))\n    if fullargspec.varargs is not None:\n        parameters.append(inspect.Parameter(fullargspec.varargs, inspect.Parameter.VAR_POSITIONAL))\n    for kwarg in fullargspec.kwonlyargs:\n        parameters.append(inspect.Parameter(kwarg, inspect.Parameter.KEYWORD_ONLY, default=defaults.get(kwarg, inspect.Parameter.empty)))\n    if fullargspec.varkw is not None:\n        parameters.append(inspect.Parameter(fullargspec.varkw, inspect.Parameter.VAR_KEYWORD))\n    return inspect.Signature(parameters)",
    "docstring": "Repackages fullargspec information into an equivalent inspect.Signature.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\tf_decorator.py",
    "ast_data": "FunctionDef name:fullargspec_to_signature arg:fullargspec arguments arg Assign Call Assign For Call Call Call If Compare Call Call For Call Call Call If Compare Call Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "_blur_pool_by_kernel2d",
    "source_code": "def _blur_pool_by_kernel2d(input: Tensor, kernel: Tensor, stride: int) -> Tensor:\n    KORNIA_CHECK(len(kernel.shape) == 4 and kernel.shape[-2] == kernel.shape[-1], f'Invalid kernel shape. Expect CxC_(out, None)xNxN, Got {kernel.shape}')\n    padding = _compute_zero_padding((kernel.shape[-2], kernel.shape[-1]))\n    return F.conv2d(input, kernel, padding=padding, stride=stride, groups=input.shape[1])",
    "docstring": "Compute blur_pool by a given :math: kernel.",
    "type": "function",
    "file_path": "kornia\\kornia\\filters\\blur_pool.py",
    "ast_data": "FunctionDef name:_blur_pool_by_kernel2d arg:input arg:kernel arg:stride arguments arg arg arg Call BoolOp Compare Call Compare Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_init_param_reduce_dtypes",
    "source_code": "def _init_param_reduce_dtypes(self, mp_param_dtype: Optional[torch.dtype], mp_reduce_dtype: Optional[torch.dtype]) -> None:\n    self._low_prec_param_dtype_specified = mp_param_dtype is not None\n    self._low_prec_reduce_dtype_specified = mp_reduce_dtype is not None\n    if self._low_prec_param_dtype_specified and (not self._low_prec_reduce_dtype_specified):\n        self._fwd_bwd_param_dtype = mp_param_dtype\n        self._reduce_dtype = self._fwd_bwd_param_dtype\n    else:\n        self._fwd_bwd_param_dtype = mp_param_dtype or self._orig_param_dtype\n        self._reduce_dtype = mp_reduce_dtype or self._orig_param_dtype\n    assert self._fwd_bwd_param_dtype is not None\n    assert self._reduce_dtype is not None",
    "docstring": "Initialize param and reduce dtypes. Precondition: ``, in which case we assume the gradient reduction dtype matches the forward/backward parameter dtype.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py",
    "ast_data": "FunctionDef name:_init_param_reduce_dtypes arg:self arg:mp_param_dtype arg:mp_reduce_dtype arguments arg arg arg Assign Compare Assign Compare If BoolOp Assign Assign Assign BoolOp Assign BoolOp Compare Compare"
  },
  {
    "library": "matplotlib",
    "name": "__call__",
    "source_code": "def __call__(self):\n    dmin, dmax = self.axis.get_data_interval()\n    return self.tick_values(dmin, dmax)",
    "docstring": "Return the locations of the ticks",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:__call__ arg:self arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, request_queue: RequestQueue, max_interval: float, daemon: bool=True):\n    super().__init__()\n    self._request_queue = request_queue\n    self._max_interval = max_interval\n    self._daemon = daemon\n    self._watchdog_thread: Optional[threading.Thread] = None\n    self._stop_signaled = False",
    "docstring": ":param request_queue: Consumer `` :param max_interval: max time (in seconds) to wait for an item in the request_queue :param daemon: whether to run the watchdog thread as a daemon",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\timer\\api.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:request_queue arg:max_interval arg:daemon arguments arg arg arg arg Call Call Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "__same_types",
    "source_code": "@staticmethod\ndef __same_types(a, b):\n    if nest.is_namedtuple(a) and nest.is_namedtuple(b):\n        return nest.same_namedtuples(a, b)\n    else:\n        return type(a) is type(b)",
    "docstring": "Returns whether a and b have the same type, up to namedtuple equivalence. Consistent with tf.nest.assert_same_structure(), two namedtuple types are considered the same iff they agree in their class name (without qualification by module name) and in their sequence of field names. This makes namedtuples recreated by nested_structure_coder compatible with their original Python definition. Args: a: a Python object. b: a Python object. Returns: A boolean that is true iff type(a) and type(b) are the same object or equivalent namedtuple types.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py",
    "ast_data": "FunctionDef name:__same_types arg:a arg:b arguments arg arg If BoolOp Call Call Return return:yes Call Return return:yes Compare Call Call"
  },
  {
    "library": "authlib",
    "name": "get_redirect_uri",
    "source_code": "def get_redirect_uri(self):\n    raise NotImplementedError()",
    "docstring": "A method to get authorization code's ``:: def get_redirect_uri(self): return self.redirect_uri :return: A URL string",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\models.py",
    "ast_data": "FunctionDef name:get_redirect_uri arg:self arguments arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "get_output_at",
    "source_code": "def get_output_at(self, node_index):\n    return self._get_node_attribute_at_index(node_index, 'output_tensors', 'output')",
    "docstring": "Retrieves the output tensor(s) of a layer at a given node. Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. will correspond to the first output node of the layer. Returns: A tensor (or list of tensors if the layer has multiple outputs). Raises: RuntimeError: If called in Eager mode.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py",
    "ast_data": "FunctionDef name:get_output_at arg:self arg:node_index arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "control_dependencies",
    "source_code": "def control_dependencies(self, control_inputs):\n    if control_inputs is None:\n        return super().control_dependencies(control_inputs)\n    filtered_control_inputs = []\n    for c in control_inputs:\n        if isinstance(c, indexed_slices.IndexedSlices) or (hasattr(c, '_handle') and hasattr(c, 'op')):\n            c = c.op\n        graph_element = ops._as_graph_element(c)\n        if graph_element is None:\n            graph_element = c\n        if graph_element is not None and getattr(graph_element, 'graph', None) is not self:\n            self._function_captures.control.add(graph_element)\n        else:\n            filtered_control_inputs.append(graph_element)\n    return super().control_dependencies(filtered_control_inputs)",
    "docstring": "Handles control dependencies. FuncGraph wraps Graph's control_dependencies logic by first filtering out any external tensors / operations and storing them in the graph's control_captures member. Any consumers of this function graph must then decide how to handle the control captures. Args: control_inputs: A list of or objects which must be executed or computed before running the operations defined in the context. Can also be to clear the control dependencies. Returns: A context manager that specifies control dependencies for all operations constructed within the context. Raises: TypeError: If is not a list of or objects.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\func_graph.py",
    "ast_data": "FunctionDef name:control_dependencies arg:self arg:control_inputs arguments arg arg If Compare Return return:yes Call Call Assign For If BoolOp Call BoolOp Call Call Assign Assign Call If Compare Assign If BoolOp Compare Compare Call Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "module_name",
    "source_code": "@property\ndef module_name(self) -> str:\n    return self._module_name",
    "docstring": "Name of the module. E.g. .",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\modularization.py",
    "ast_data": "FunctionDef name:module_name arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "redistribute",
    "source_code": "def redistribute(self, device_mesh: Optional[DeviceMesh]=None, placements: Optional[Sequence[Placement]]=None, *, async_op: bool=False, forward_dtype: Optional[torch.dtype]=None, backward_dtype: Optional[torch.dtype]=None) -> 'DTensor':\n    device_mesh = device_mesh or self.device_mesh\n    if placements is None:\n        raise RuntimeError('placements is needed for redistribute!')\n    placements = list(placements)\n    for i, placement in enumerate(placements):\n        if placement.is_partial():\n            raise RuntimeError('Can not redistribute to Partial, redistributing to Partial is for internal use only!')\n        elif isinstance(placement, Shard) and placement.dim < 0:\n            placements[i] = Shard(placement.dim + self.ndim)\n    placements = tuple(placements)\n    return Redistribute.apply(self, device_mesh, placements, async_op, forward_dtype, backward_dtype)",
    "docstring": "`DeviceMeshPlacementDTensor` currently only supports redistributing DTensor on the same DeviceMesh, Please file an issue if you need to redistribute DTensor to different DeviceMesh.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_api.py",
    "ast_data": "FunctionDef name:redistribute arg:self arg:device_mesh arg:placements arguments arg arg arg arg arg arg Assign BoolOp If Compare Raise Call Assign Call For Call If Call Raise Call If BoolOp Call Compare Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_insert_masked_print",
    "source_code": "def _insert_masked_print(self):\n    if masked_print_option.enabled():\n        mask = self._mask\n        if mask is nomask:\n            res = self._data\n        else:\n            data = self._data\n            print_width = self._print_width if self.ndim > 1 else self._print_width_1d\n            for axis in range(self.ndim):\n                if data.shape[axis] > print_width:\n                    ind = print_width // 2\n                    arr = np.split(data, (ind, -ind), axis=axis)\n                    data = np.concatenate((arr[0], arr[2]), axis=axis)\n                    arr = np.split(mask, (ind, -ind), axis=axis)\n                    mask = np.concatenate((arr[0], arr[2]), axis=axis)\n            rdtype = _replace_dtype_fields(self.dtype, 'O')\n            res = data.astype(rdtype)\n            _recursive_printoption(res, mask, masked_print_option)\n    else:\n        res = self.filled(self.fill_value)\n    return res",
    "docstring": "Replace masked values with masked_print_option, casting all innermost dtypes to object.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:_insert_masked_print arg:self arguments arg If Call Assign If Compare Assign Assign Assign Compare For Call If Compare Assign Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "InvalidArgumentError",
    "source_code": "@tf_export('errors.InvalidArgumentError')\nclass InvalidArgumentError(OpError):\n\n    def __init__(self, node_def, op, message, *args):\n        super(InvalidArgumentError, self).__init__(node_def, op, message, INVALID_ARGUMENT, *args)",
    "docstring": "Raised when an operation receives an invalid argument. This error is typically raised when an op receives mismatched arguments. Example: >>> tf.reshape([1, 2, 3], (2,)) Traceback (most recent call last): ... InvalidArgumentError: ...",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py",
    "ast_data": "ClassDef name:InvalidArgumentError FunctionDef name:__init__ arg:self arg:node_def arg:op arg:message arguments arg arg arg arg arg Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_cumsum_flat_values_at_ragged_rank",
    "source_code": "def _cumsum_flat_values_at_ragged_rank(last_rp, flat_values, exclusive=False, reverse=False):\n    if not exclusive:\n        partial = _cumsum_flat_values_at_ragged_rank(last_rp, flat_values, exclusive=True, reverse=reverse)\n        return partial + flat_values\n    if reverse:\n        youngest_sibling = array_ops.gather(params=last_rp.row_splits(), indices=last_rp.value_rowids() + 1) - 1\n        new_flat_values = math_ops.cumsum(flat_values, exclusive=True, reverse=True)\n        initial_values = array_ops.gather(params=new_flat_values, indices=youngest_sibling)\n        return new_flat_values - initial_values\n    else:\n        eldest_sibling = array_ops.gather(params=last_rp.row_splits(), indices=last_rp.value_rowids())\n        new_flat_values = math_ops.cumsum(flat_values, exclusive=True)\n        initial_values = array_ops.gather(params=new_flat_values, indices=eldest_sibling)\n        return new_flat_values - initial_values",
    "docstring": "Calculate flat_values for math_ops.cumsum when axis==ragged_rank.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_math_ops.py",
    "ast_data": "FunctionDef name:_cumsum_flat_values_at_ragged_rank arg:last_rp arg:flat_values arg:exclusive arg:reverse arguments arg arg arg arg If Assign Call Return return:yes If Assign Call Call Call Assign Call Assign Call Return return:yes Assign Call Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "blocked",
    "source_code": "@contextlib.contextmanager\ndef blocked(self, *, signal=None):\n    orig = self.callbacks\n    try:\n        if signal is None:\n            self.callbacks = {}\n        else:\n            self.callbacks = {k: orig[k] for k in orig if k != signal}\n        yield\n    finally:\n        self.callbacks = orig",
    "docstring": "Block callback signals from being processed. A context manager to temporarily block/disable callback signals from being processed by the registered listeners. Parameters ---------- signal : str, optional The callback signal to block. The default is to block all signals.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\cbook.py",
    "ast_data": "FunctionDef name:blocked arg:self arguments arg arg Assign Try If Compare Assign Assign Compare Assign"
  },
  {
    "library": "tensorflow",
    "name": "finalize",
    "source_code": "@abc.abstractmethod\ndef finalize(self):\n    raise NotImplementedError('Must be implemented in subclasses.')",
    "docstring": "Prepares the total results to be returned.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py",
    "ast_data": "FunctionDef name:finalize arg:self arguments arg Raise Call"
  },
  {
    "library": "scipy",
    "name": "Branin01",
    "source_code": "class Branin01(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = [(-5.0, 10.0), (0.0, 15.0)]\n        self.global_optimum = [[-pi, 12.275], [pi, 2.275], [3 * pi, 2.475]]\n        self.fglob = 0.39788735772973816\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return (x[1] - 5.1 / (4 * pi ** 2) * x[0] ** 2 + 5 * x[0] / pi - 6) ** 2 + 10 * (1 - 1 / (8 * pi)) * cos(x[0]) + 10",
    "docstring": "Branin01 objective function. The Branin01 global optimization problem is a multimodal minimization problem defined as follows .. math:: f_{\\text{Branin01}}(x) = \\left(- 1.275 \\frac{x_1^{2}}{\\pi^{2}} + 5 \\frac{x_1}{\\pi} + x_2 -6\\right)^{2} + \\left(10 -\\frac{5}{4 \\pi} \\right) \\cos\\left(x_1\\right) + 10 with :math: *Global optimum*: :math: for :math: or :math: or :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO: Jamil#22, one of the solutions is different",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_B.py",
    "ast_data": "ClassDef name:Branin01 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_session_manager",
    "source_code": "def _get_session_manager(self):\n    if self._session_manager:\n        return self._session_manager\n    self._session_manager = sm.SessionManager(local_init_op=self._scaffold.local_init_op, local_init_feed_dict=self._scaffold.local_init_feed_dict, ready_op=self._scaffold.ready_op, ready_for_local_init_op=self._scaffold.ready_for_local_init_op, graph=ops.get_default_graph())\n    return self._session_manager",
    "docstring": "Gets or creates a SessionManager.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\monitored_session.py",
    "ast_data": "FunctionDef name:_get_session_manager arg:self arguments arg If Return return:yes Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_workers_wrapper",
    "source_code": "def _workers_wrapper(func):\n\n    @functools.wraps(func)\n    def inner(*args, **kwds):\n        kwargs = kwds.copy()\n        if 'workers' not in kwargs:\n            _workers = map\n        elif 'workers' in kwargs and kwargs['workers'] is None:\n            _workers = map\n        else:\n            _workers = kwargs['workers']\n        with MapWrapper(_workers) as mf:\n            kwargs['workers'] = mf\n            return func(*args, **kwargs)\n    return inner",
    "docstring": "Wrapper to deal with setup-cleanup of workers outside a user function via a ContextManager. It saves having to do the setup/tear down with within that function, which can be messy.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_util.py",
    "ast_data": "FunctionDef name:_workers_wrapper arg:func arguments arg FunctionDef name:inner arguments arg arg Assign Call If Compare Assign If BoolOp Compare Compare Assign Assign With Call Assign Return return:yes Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X):\n    return self._transform(X)",
    "docstring": "Return class labels or probabilities for X for each estimator. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vectors, where is the number of samples and is the number of features. Returns ------- y_preds : ndarray of shape (n_samples, n_estimators) or (n_samples, n_classes * n_estimators) Prediction outputs for each estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_stacking.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Return return:yes Call"
  },
  {
    "library": "pygame",
    "name": "get_device_info",
    "source_code": "def get_device_info(an_id):\n    _check_init()\n    return _pypm.GetDeviceInfo(an_id)",
    "docstring": "returns information about a midi device pygame.midi.get_device_info(an_id): return (interf, name, input, output, opened) interf - a byte string describing the device interface, eg b'ALSA'. name - a byte string for the name of the device, eg b'Midi Through Port-0' input - 0, or 1 if the device is an input device. output - 0, or 1 if the device is an output device. opened - 0, or 1 if the device is opened. If the id is out of range, the function returns None.",
    "type": "function",
    "file_path": "pygame\\src_py\\midi.py",
    "ast_data": "FunctionDef name:get_device_info arg:an_id arguments arg Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "FigureMpl",
    "source_code": "class FigureMpl(Figure):\n    has_content = False\n    required_arguments = 1\n    optional_arguments = 2\n    final_argument_whitespace = False\n    option_spec = {'alt': directives.unchanged, 'height': directives.length_or_unitless, 'width': directives.length_or_percentage_or_unitless, 'scale': directives.nonnegative_int, 'align': Image.align, 'class': directives.class_option, 'caption': directives.unchanged, 'srcset': directives.unchanged}\n\n    def run(self):\n        image_node = figmplnode()\n        imagenm = self.arguments[0]\n        image_node['alt'] = self.options.get('alt', '')\n        image_node['align'] = self.options.get('align', None)\n        image_node['class'] = self.options.get('class', None)\n        image_node['width'] = self.options.get('width', None)\n        image_node['height'] = self.options.get('height', None)\n        image_node['scale'] = self.options.get('scale', None)\n        image_node['caption'] = self.options.get('caption', None)\n        image_node['uri'] = imagenm\n        image_node['srcset'] = self.options.get('srcset', None)\n        return [image_node]",
    "docstring": "Implements a directive to allow an optional hidpi image. Meant to be used with the *plot_srcset* configuration option in conf.py, and gets set in the TEMPLATE of plot_directive.py e.g.:: .. figure-mpl:: plot_directive/some_plots-1.png :alt: bar :srcset: plot_directive/some_plots-1.png, plot_directive/some_plots-1.2x.png 2.00x :class: plot-directive The resulting html (at ``):: where the subdirectory is included in the image name for uniqueness.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\sphinxext\\figmpl_directive.py",
    "ast_data": "ClassDef name:FigureMpl Assign Assign Assign Assign Assign FunctionDef name:run arg:self arguments arg Assign Call Assign Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "SemanticSegmentationTrainer",
    "source_code": "class SemanticSegmentationTrainer(Trainer):\n\n    def compute_metrics(self, *args: Tensor) -> Dict[str, float]:\n        if len(args) != 2:\n            raise AssertionError\n        out, target = args\n        iou = mean_iou(out.argmax(1), target, out.shape[1]).mean()\n        return {'iou': iou.item()}",
    "docstring": "Module to be used for semantic segmentation purposes. The module subclasses :py:class: and overrides the :py:func: function implementing IoU :py:func:. .. seealso:: Learn how to use this class in the following __.",
    "type": "class",
    "file_path": "kornia\\kornia\\x\\trainers.py",
    "ast_data": "ClassDef name:SemanticSegmentationTrainer FunctionDef name:compute_metrics arg:self arguments arg arg If Compare Call Raise Assign Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "get_extension",
    "source_code": "def get_extension(self, cls: type[_T]) -> _T | None:\n    if not self.extensions:\n        raise RuntimeError('Crawler.get_extension() can only be called after the extension manager has been created.')\n    return self._get_component(cls, self.extensions.middlewares)",
    "docstring": "Return the run-time instance of an :ref: of the specified class or a subclass, or `engine_startedspider_opened`.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\crawler.py",
    "ast_data": "FunctionDef name:get_extension arg:self arg:cls arguments arg arg If Raise Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "start",
    "source_code": "def start(self, f, a, b, args=()):\n    self.function_calls = 0\n    self.iterations = 0\n    self.f = f\n    self.args = args\n    self.ab[:] = [a, b]\n    if not np.isfinite(a) or np.imag(a) != 0:\n        raise ValueError(f'Invalid x value: {a} ')\n    if not np.isfinite(b) or np.imag(b) != 0:\n        raise ValueError(f'Invalid x value: {b} ')\n    fa = self._callf(a)\n    if not np.isfinite(fa) or np.imag(fa) != 0:\n        raise ValueError(f'Invalid function value: f({a:f}) -> {fa} ')\n    if fa == 0:\n        return (_ECONVERGED, a)\n    fb = self._callf(b)\n    if not np.isfinite(fb) or np.imag(fb) != 0:\n        raise ValueError(f'Invalid function value: f({b:f}) -> {fb} ')\n    if fb == 0:\n        return (_ECONVERGED, b)\n    if np.sign(fb) * np.sign(fa) > 0:\n        raise ValueError(f'f(a) and f(b) must have different signs, but f({a:e})={fa:e}, f({b:e})={fb:e} ')\n    self.fab[:] = [fa, fb]\n    return (_EINPROGRESS, sum(self.ab) / 2.0)",
    "docstring": "Prepare for the iterations.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_zeros_py.py",
    "ast_data": "FunctionDef name:start arg:self arg:f arg:a arg:b arg:args arguments arg arg arg arg arg Assign Assign Assign Assign Assign If BoolOp Call Compare Call Raise Call If BoolOp Call Compare Call Raise Call Assign Call If BoolOp Call Compare Call Raise Call If Compare Return return:yes Assign Call If BoolOp Call Compare Call Raise Call If Compare Return return:yes If Compare Call Call Raise Call Assign Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "duplicated",
    "source_code": "def duplicated(values: ArrayLike, keep: Literal['first', 'last', False]='first', mask: npt.NDArray[np.bool_] | None=None) -> npt.NDArray[np.bool_]:\n    values = _ensure_data(values)\n    return htable.duplicated(values, keep=keep, mask=mask)",
    "docstring": "Return boolean ndarray denoting duplicate values. Parameters ---------- values : np.ndarray or ExtensionArray Array over which to check for duplicate values. keep : {'first', 'last', False}, default 'first' - ``. mask : ndarray[bool], optional array indicating which elements to exclude from checking Returns ------- duplicated : ndarray[bool]",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\algorithms.py",
    "ast_data": "FunctionDef name:duplicated arg:values arg:keep arg:mask arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "contains",
    "source_code": "def contains(self, other):\n    return self._topology(capi.ogr_contains, other)",
    "docstring": "Return True if this geometry contains the other.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:contains arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "save_op",
    "source_code": "def save_op(self, filename_tensor, saveables):\n    tensor_names = []\n    tensors = []\n    tensor_slices = []\n    for saveable in saveables:\n        for spec in saveable.specs:\n            tensor_names.append(spec.name)\n            tensors.append(spec.tensor)\n            tensor_slices.append(spec.slice_spec)\n    if self._write_version == saver_pb2.SaverDef.V1:\n        return io_ops._save(filename=filename_tensor, tensor_names=tensor_names, tensors=tensors, tensor_slices=tensor_slices)\n    elif self._write_version == saver_pb2.SaverDef.V2:\n        return io_ops.save_v2(filename_tensor, tensor_names, tensor_slices, tensors)\n    else:\n        raise RuntimeError('Unexpected write_version: ' + self._write_version)",
    "docstring": "Create an Op to save 'saveables'. This is intended to be overridden by subclasses that want to generate different Ops. Args: filename_tensor: String Tensor. saveables: A list of BaseSaverBuilder.SaveableObject objects. Returns: An Operation that save the variables. Raises: RuntimeError: (implementation detail) if \"self._write_version\" is an unexpected value.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\saver.py",
    "ast_data": "FunctionDef name:save_op arg:self arg:filename_tensor arg:saveables arguments arg arg arg Assign Assign Assign For For Call Call Call If Compare Return return:yes Call If Compare Return return:yes Call Raise Call"
  },
  {
    "library": "django",
    "name": "from_string",
    "source_code": "def from_string(self, template_code):\n    return Template(template_code, engine=self)",
    "docstring": "Return a compiled Template object for the given template code, handling template inheritance recursively.",
    "type": "method",
    "file_path": "django\\django\\template\\engine.py",
    "ast_data": "FunctionDef name:from_string arg:self arg:template_code arguments arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "rsh",
    "source_code": "def rsh(data, points=None):\n    data = ma.array(data, copy=False)\n    if points is None:\n        points = data\n    else:\n        points = np.atleast_1d(np.asarray(points))\n    if data.ndim != 1:\n        raise AttributeError('The input array should be 1D only !')\n    n = data.count()\n    r = idealfourths(data, axis=None)\n    h = 1.2 * (r[-1] - r[0]) / n ** (1.0 / 5)\n    nhi = (data[:, None] <= points[None, :] + h).sum(0)\n    nlo = (data[:, None] < points[None, :] - h).sum(0)\n    return (nhi - nlo) / (2.0 * n * h)",
    "docstring": "Evaluates Rosenblatt's shifted histogram estimators for each data point. Rosenblatt's estimator is a centered finite-difference approximation to the derivative of the empirical cumulative distribution function. Parameters ---------- data : sequence Input data, should be 1-D. Masked values are ignored. points : sequence or None, optional Sequence of points where to evaluate Rosenblatt shifted histogram. If None, use the data.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mstats_extras.py",
    "ast_data": "FunctionDef name:rsh arg:data arg:points arguments arg arg Assign Call If Compare Assign Assign Call Call If Compare Raise Call Assign Call Assign Call Assign Assign Call Compare Assign Call Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_update_weak_tensor_patched_ops_in_dispatch_dict",
    "source_code": "def _update_weak_tensor_patched_ops_in_dispatch_dict(patched_op):\n    dispatch_dict = dispatch._TYPE_BASED_DISPATCH_SIGNATURES\n    unpatched_api = patched_op.__wrapped__\n    if unpatched_api in dispatch_dict:\n        dispatch_dict[patched_op] = dispatch_dict[unpatched_api]",
    "docstring": "Update dispatch dictionary to store WeakTensor patched op references. _TYPE_BASED_DISPATCH_SIGNATURES in dispatch.py stores mappings from op reference to all the dispatchers it's registered with. We need to update this dictionary to add a mapping from the patched-op reference to the signature dictionary the unpatched-op reference is mapped to. This ensures that dispatch can be reigstered and unregistered with monkey-patched ops.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\weak_tensor_ops.py",
    "ast_data": "FunctionDef name:_update_weak_tensor_patched_ops_in_dispatch_dict arg:patched_op arguments arg Assign Assign If Compare Assign"
  },
  {
    "library": "pygame",
    "name": "note_on",
    "source_code": "def note_on(self, note, velocity, channel=0):\n    if not 0 <= channel <= 15:\n        raise ValueError('Channel not between 0 and 15.')\n    self.write_short(144 + channel, note, velocity)",
    "docstring": "turns a midi note on. Note must be off. Output.note_on(note, velocity, channel=0) note is an integer from 0 to 127 velocity is an integer from 0 to 127 channel is an integer from 0 to 15 Turn a note on in the output stream. The note must already be off for this to work correctly.",
    "type": "method",
    "file_path": "pygame\\src_py\\midi.py",
    "ast_data": "FunctionDef name:note_on arg:self arg:note arg:velocity arg:channel arguments arg arg arg arg If Compare Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "base_python",
    "source_code": "def base_python(self, *args: str, **popen_kwargs: Any) -> subprocess.CompletedProcess[str]:\n    return self.python(*args, python=self.base_executable, **popen_kwargs)",
    "docstring": "Run a Python command in the base environment.",
    "type": "method",
    "file_path": "pytorch\\tools\\nightly.py",
    "ast_data": "FunctionDef name:base_python arg:self arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "zero_ext",
    "source_code": "def zero_ext(x, n, axis=-1):\n    if n < 1:\n        return x\n    zeros_shape = list(x.shape)\n    zeros_shape[axis] = n\n    zeros = np.zeros(zeros_shape, dtype=x.dtype)\n    ext = np.concatenate((zeros, x, zeros), axis=axis)\n    return ext",
    "docstring": "Zero padding at the boundaries of an array Generate a new ndarray that is a zero-padded extension of along an axis. Parameters ---------- x : ndarray The array to be extended. n : int The number of elements by which to extend at each end of the axis. axis : int, optional The axis along which to extend . Default is -1. Examples -------- >>> import numpy as np >>> from scipy.signal._arraytools import zero_ext >>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]]) >>> zero_ext(a, 2) array([[ 0, 0, 1, 2, 3, 4, 5, 0, 0], [ 0, 0, 0, 1, 4, 9, 16, 0, 0]])",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_arraytools.py",
    "ast_data": "FunctionDef name:zero_ext arg:x arg:n arg:axis arguments arg arg arg If Compare Return return:yes Assign Call Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "format",
    "source_code": "@property\ndef format(self) -> str:\n    return self._format",
    "docstring": "Format string for matrix.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_base.py",
    "ast_data": "FunctionDef name:format arg:self arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "masked_less_equal",
    "source_code": "def masked_less_equal(x, value, copy=True):\n    return masked_where(less_equal(x, value), x, copy=copy)",
    "docstring": "Mask an array where less than or equal to a given value. This function is a shortcut to `condition` = (x >> import numpy as np >>> import numpy.ma as ma >>> a = np.arange(4) >>> a array([0, 1, 2, 3]) >>> ma.masked_less_equal(a, 2) masked_array(data=[--, --, --, 3], mask=[ True, True, True, False], fill_value=999999)",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:masked_less_equal arg:x arg:value arg:copy arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_set_intercept",
    "source_code": "def _set_intercept(self, X_offset, y_offset, X_scale):\n    xp, _ = get_namespace(X_offset, y_offset, X_scale)\n    if self.fit_intercept:\n        coef_ = xp.astype(self.coef_, X_scale.dtype, copy=False)\n        coef_ = self.coef_ = xp.divide(coef_, X_scale)\n        if coef_.ndim == 1:\n            intercept_ = y_offset - X_offset @ coef_\n        else:\n            intercept_ = y_offset - X_offset @ coef_.T\n        self.intercept_ = intercept_\n    else:\n        self.intercept_ = 0.0",
    "docstring": "Set the intercept_",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_base.py",
    "ast_data": "FunctionDef name:_set_intercept arg:self arg:X_offset arg:y_offset arg:X_scale arguments arg arg arg arg Assign Call If Assign Call Assign Call If Compare Assign Assign Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "get_label_position",
    "source_code": "def get_label_position(self):\n    return self._label_position",
    "docstring": "Get the label position. Returns ------- str : {'lower', 'upper', 'both', 'default', 'none'} The position of the axis label.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axis3d.py",
    "ast_data": "FunctionDef name:get_label_position arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_bfs_for_reachable_nodes",
    "source_code": "def _bfs_for_reachable_nodes(target_nodes, name_to_input_name):\n    nodes_to_keep = set()\n    next_to_visit = list(target_nodes)\n    while next_to_visit:\n        node = next_to_visit[0]\n        del next_to_visit[0]\n        if node in nodes_to_keep:\n            continue\n        nodes_to_keep.add(node)\n        if node in name_to_input_name:\n            next_to_visit += name_to_input_name[node]\n    return nodes_to_keep",
    "docstring": "Breadth first search for reachable nodes from target nodes.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\graph_util_impl.py",
    "ast_data": "FunctionDef name:_bfs_for_reachable_nodes arg:target_nodes arg:name_to_input_name arguments arg arg Assign Call Assign Call While Assign If Compare Call If Compare Return return:yes"
  },
  {
    "library": "django",
    "name": "has_view_permission",
    "source_code": "def has_view_permission(self, request, obj=None):\n    opts = self.opts\n    codename_view = get_permission_codename('view', opts)\n    codename_change = get_permission_codename('change', opts)\n    return request.user.has_perm('%s.%s' % (opts.app_label, codename_view)) or request.user.has_perm('%s.%s' % (opts.app_label, codename_change))",
    "docstring": "Return True if the given request has permission to view the given Django model instance. The default implementation doesn't examine the parameter. If overridden by the user in subclasses, it should return True if the given request has permission to view the model instance. If is None, it should return True if the request has permission to view any object of the given type.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:has_view_permission arg:self arg:request arg:obj arguments arg arg arg Assign Assign Call Assign Call Return return:yes BoolOp Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "SGDRegressorBenchmark",
    "source_code": "class SGDRegressorBenchmark(Predictor, Estimator, Benchmark):\n    param_names = ['representation']\n    params = (['dense', 'sparse'],)\n\n    def setup_cache(self):\n        super().setup_cache()\n\n    def make_data(self, params):\n        representation, = params\n        if representation == 'dense':\n            data = _synth_regression_dataset(n_samples=100000, n_features=200)\n        else:\n            data = _synth_regression_sparse_dataset(n_samples=100000, n_features=1000, density=0.01)\n        return data\n\n    def make_estimator(self, params):\n        representation, = params\n        max_iter = 60 if representation == 'dense' else 300\n        estimator = SGDRegressor(max_iter=max_iter, tol=None, random_state=0)\n        return estimator\n\n    def make_scorers(self):\n        make_gen_reg_scorers(self)",
    "docstring": "Benchmark for SGD",
    "type": "class",
    "file_path": "scikit-learn\\asv_benchmarks\\benchmarks\\linear_model.py",
    "ast_data": "ClassDef name:SGDRegressorBenchmark Assign Assign FunctionDef name:setup_cache arg:self arguments arg Call Call FunctionDef name:make_data arg:self arg:params arguments arg arg Assign If Compare Assign Call Assign Call Return return:yes FunctionDef name:make_estimator arg:self arg:params arguments arg arg Assign Assign Compare Assign Call Return return:yes FunctionDef name:make_scorers arg:self arguments arg Call"
  },
  {
    "library": "scipy",
    "name": "_lsq_solve_qr",
    "source_code": "def _lsq_solve_qr(x, y, t, k, w):\n    assert y.ndim == 2\n    y_w = y * w[:, None]\n    A, offset, nc = _dierckx.data_matrix(x, t, k, w)\n    _dierckx.qr_reduce(A, offset, nc, y_w)\n    c = _dierckx.fpback(A, nc, y_w)\n    return (A, y_w, c)",
    "docstring": "Solve for the LSQ spline coeffs given x, y and knots. is always 2D: for 1D data, the shape is `wx` value.",
    "type": "function",
    "file_path": "scipy\\scipy\\interpolate\\_bsplines.py",
    "ast_data": "FunctionDef name:_lsq_solve_qr arg:x arg:y arg:t arg:k arg:w arguments arg arg arg arg arg Compare Assign Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_outer_context_id",
    "source_code": "def _get_outer_context_id(self, graph):\n    if hasattr(graph, 'outer_graph') and graph.outer_graph:\n        return self._get_context_id(graph.outer_graph)\n    else:\n        return None",
    "docstring": "Get the ID of the immediate outer context of the input graph. Args: graph: The graph (context) in question. Returns: If an outer context exists, the immediate outer context name as a string. If such as outer context does not exist (i.e., is itself outermost), .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\dumping_callback.py",
    "ast_data": "FunctionDef name:_get_outer_context_id arg:self arg:graph arguments arg arg If BoolOp Call Return return:yes Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "WorkerSessionCreator",
    "source_code": "@tf_export(v1=['train.WorkerSessionCreator'])\nclass WorkerSessionCreator(SessionCreator):\n\n    def __init__(self, scaffold=None, master='', config=None, max_wait_secs=30 * 60):\n        self._scaffold = scaffold or Scaffold()\n        self._session_manager = None\n        self._master = master\n        self._config = config\n        self._max_wait_secs = max_wait_secs\n\n    def _get_session_manager(self):\n        if self._session_manager:\n            return self._session_manager\n        self._session_manager = sm.SessionManager(local_init_op=self._scaffold.local_init_op, local_init_feed_dict=self._scaffold.local_init_feed_dict, ready_op=self._scaffold.ready_op, ready_for_local_init_op=self._scaffold.ready_for_local_init_op, graph=ops.get_default_graph())\n        return self._session_manager\n\n    def create_session(self):\n        self._scaffold.finalize()\n        return self._get_session_manager().wait_for_session(self._master, config=self._config, max_wait_secs=self._max_wait_secs)",
    "docstring": "Creates a tf.compat.v1.Session for a worker.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\training\\monitored_session.py",
    "ast_data": "ClassDef name:WorkerSessionCreator FunctionDef name:__init__ arg:self arg:scaffold arg:master arg:config arg:max_wait_secs arguments arg arg arg arg arg Assign BoolOp Call Assign Assign Assign Assign FunctionDef name:_get_session_manager arg:self arguments arg If Return return:yes Assign Call Call Return return:yes FunctionDef name:create_session arg:self arguments arg Call Return return:yes Call Call Call"
  },
  {
    "library": "scrapy",
    "name": "SameOriginPolicy",
    "source_code": "class SameOriginPolicy(ReferrerPolicy):\n    name: str = POLICY_SAME_ORIGIN\n\n    def referrer(self, response_url: str, request_url: str) -> str | None:\n        if self.origin(response_url) == self.origin(request_url):\n            return self.stripped_referrer(response_url)\n        return None",
    "docstring": "The \"same-origin\" policy specifies that a full URL, stripped for use as a referrer, is sent as referrer information when making same-origin requests from a particular request client. Cross-origin requests, on the other hand, will contain no referrer information. A Referer HTTP header will not be sent.",
    "type": "class",
    "file_path": "scrapy\\scrapy\\spidermiddlewares\\referer.py",
    "ast_data": "ClassDef name:SameOriginPolicy FunctionDef name:referrer arg:self arg:response_url arg:request_url arguments arg arg arg If Compare Call Call Return return:yes Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "should_use_v2",
    "source_code": "def should_use_v2():\n    if context.executing_eagerly():\n        return True\n    elif ops.executing_eagerly_outside_functions():\n        graph = ops.get_default_graph()\n        if getattr(graph, 'name', False) and graph.name.startswith('wrapped_function'):\n            return False\n        return True\n    else:\n        return False",
    "docstring": "Determine if v1 or v2 version should be used.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\version_utils.py",
    "ast_data": "FunctionDef name:should_use_v2 arguments If Call Return return:yes If Call Assign Call If BoolOp Call Call Return return:yes Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_function_group",
    "source_code": "def get_function_group(self, name: str) -> Optional[_SymbolicFunctionGroup]:\n    return self._registry.get(name)",
    "docstring": "Returns the function group for the given name.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\registration.py",
    "ast_data": "FunctionDef name:get_function_group arg:self arg:name arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "string_to_hash_bucket",
    "source_code": "@tf_export('strings.to_hash_bucket', v1=[])\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef string_to_hash_bucket(input, num_buckets, name=None):\n    return gen_string_ops.string_to_hash_bucket(input, num_buckets, name)",
    "docstring": "Converts each string in the input Tensor to its hash mod by a number of buckets. The hash function is deterministic on the content of the string within the process. Note that the hash function may change from time to time. This functionality will be deprecated and it's recommended to use or . Examples: >>> tf.strings.to_hash_bucket([\"Hello\", \"TensorFlow\", \"2.x\"], 3) Args: input: A of type . num_buckets: An that is . The number of buckets. name: A name for the operation (optional). Returns: A of type .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\string_ops.py",
    "ast_data": "FunctionDef name:string_to_hash_bucket arg:input arg:num_buckets arg:name arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "roots_chebyt",
    "source_code": "def roots_chebyt(n, mu=False):\n    m = int(n)\n    if n < 1 or n != m:\n        raise ValueError('n must be a positive integer.')\n    x = _ufuncs._sinpi(np.arange(-m + 1, m, 2) / (2 * m))\n    w = np.full_like(x, pi / m)\n    if mu:\n        return (x, w, pi)\n    else:\n        return (x, w)",
    "docstring": "Gauss-Chebyshev (first kind) quadrature. Computes the sample points and weights for Gauss-Chebyshev quadrature. The sample points are the roots of the nth degree Chebyshev polynomial of the first kind, :math:. These sample points and weights correctly integrate polynomials of degree :math: or less over the interval :math: with weight function :math:. See 22.2.4 in [AS]_ for more details. Parameters ---------- n : int quadrature order mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights See Also -------- scipy.integrate.fixed_quad numpy.polynomial.chebyshev.chebgauss References ---------- .. [AS] Milton Abramowitz and Irene A. Stegun, eds. Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables. New York: Dover, 1972.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_orthogonal.py",
    "ast_data": "FunctionDef name:roots_chebyt arg:n arg:mu arguments arg arg Assign Call If BoolOp Compare Compare Raise Call Assign Call Call Assign Call If Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "global_device_ids",
    "source_code": "def global_device_ids(self) -> np.ndarray:\n    return np.array(super().global_device_ids(), dtype=np.int64).reshape(self.shape())",
    "docstring": "Returns a global device list as an array.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\layout.py",
    "ast_data": "FunctionDef name:global_device_ids arg:self arguments arg Return return:yes Call Call Call Call Call"
  },
  {
    "library": "scrapy",
    "name": "to_unicode_dict",
    "source_code": "def to_unicode_dict(self) -> CaseInsensitiveDict:\n    return CaseInsensitiveDict(((to_unicode(key, encoding=self.encoding), to_unicode(b','.join(value), encoding=self.encoding)) for key, value in self.items()))",
    "docstring": "Return headers as a CaseInsensitiveDict with str keys and str values. Multiple values are joined with ','.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\http\\headers.py",
    "ast_data": "FunctionDef name:to_unicode_dict arg:self arguments arg Return return:yes Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_global_shuffle",
    "source_code": "def _global_shuffle(input_dataset: dataset_ops.DatasetV2, seed: Optional[Union[int, tensor.Tensor]]=None, reshuffle_each_iteration: bool=True, name: Optional[str]=None) -> dataset_ops.DatasetV2:\n    return _GlobalShuffleDataset(input_dataset, seed=seed, reshuffle_each_iteration=reshuffle_each_iteration, name=name)",
    "docstring": "Globally shuffles the elements of . The shuffling is done efficiently, without needing to buffer any additional data. To achieve this, the transformations preceding global_shuffle must all support random access. Requires that: - The shuffled dataset and all its input datasets support random access. - The input_dataset to have a known, finite cardinality. Users can use to specify the cardinality of a dataset if it cannot be determined at runtime. TODO(b/325112575): Move the API to dataset_ops.py. Args: input_dataset: The dataset to be shuffled. seed: An int or scalar to control the shuffle order. If , a random seed will be used. reshuffle_each_iteration: A boolean, which if True, indicates that a different shuffle order should be generated for each iteration of the dataset. (Defaults to .) name: (Optional.) A name for the tf.data operation. Returns: A new where elements are produced in a globally shuffled order. Raises: - InvalidArgumentError if the input dataset does not support random access, or it has infinite or unknown cardinality. - FailedPreconditionError for batching with .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\global_shuffle_op.py",
    "ast_data": "FunctionDef name:_global_shuffle arg:input_dataset arg:seed arg:reshuffle_each_iteration arg:name arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "generate_blob",
    "source_code": "def generate_blob(self, gso_table: dict[str, tuple[int, int]]) -> bytes:\n    bio = BytesIO()\n    gso = bytes('GSO', 'ascii')\n    gso_type = struct.pack(self._byteorder + 'B', 130)\n    null = struct.pack(self._byteorder + 'B', 0)\n    v_type = self._byteorder + self._gso_v_type\n    o_type = self._byteorder + self._gso_o_type\n    len_type = self._byteorder + 'I'\n    for strl, vo in gso_table.items():\n        if vo == (0, 0):\n            continue\n        v, o = vo\n        bio.write(gso)\n        bio.write(struct.pack(v_type, v))\n        bio.write(struct.pack(o_type, o))\n        bio.write(gso_type)\n        utf8_string = bytes(strl, 'utf-8')\n        bio.write(struct.pack(len_type, len(utf8_string) + 1))\n        bio.write(utf8_string)\n        bio.write(null)\n    return bio.getvalue()",
    "docstring": "Generates the binary blob of GSOs that is written to the dta file. Parameters ---------- gso_table : dict Ordered dictionary (str, vo) Returns ------- gso : bytes Binary content of dta file to be placed between strl tags Notes ----- Output format depends on dta version. 117 uses two uint32s to express v and o while 118+ uses a uint32 for v and a uint64 for o.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\stata.py",
    "ast_data": "FunctionDef name:generate_blob arg:self arg:gso_table arguments arg arg Assign Call Assign Call Assign Call Assign Call Assign Assign Assign For Call If Compare Assign Call Call Call Call Call Call Assign Call Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_pack_tensors",
    "source_code": "def _pack_tensors(device_grads, num_packs=0):\n    if num_packs > 0:\n        tensor_packer = _ConcatAndSplitPacker(num_packs)\n        device_grad_packs = tensor_packer.pack(device_grads)\n    else:\n        tensor_packer = None\n        device_grad_packs = device_grads\n    return (device_grad_packs, tensor_packer)",
    "docstring": "Pack tensors if specified.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_ops.py",
    "ast_data": "FunctionDef name:_pack_tensors arg:device_grads arg:num_packs arguments arg arg If Compare Assign Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_list_with_ellipsis_to_str",
    "source_code": "def _list_with_ellipsis_to_str(arr):\n    if not arr:\n        return '[]'\n    return '[' + _element_to_string(arr[0]) + _list_tail_with_ellipsis(arr[1:])",
    "docstring": "Print a list that might have ellipsis.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:_list_with_ellipsis_to_str arg:arr arguments arg If Return return:yes Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "G",
    "source_code": "def G(self):\n    return self.data.hour",
    "docstring": "Hour, 24-hour format without leading zeros; i.e. '0' to '23'",
    "type": "method",
    "file_path": "django\\django\\utils\\dateformat.py",
    "ast_data": "FunctionDef name:G arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "hinge_embedding_loss",
    "source_code": "def hinge_embedding_loss(input: Tensor, target: Tensor, margin: float=1.0, size_average: Optional[bool]=None, reduce: Optional[bool]=None, reduction: str='mean') -> Tensor:\n    if has_torch_function_variadic(input, target):\n        return handle_torch_function(hinge_embedding_loss, (input, target), input, target, margin=margin, size_average=size_average, reduce=reduce, reduction=reduction)\n    if size_average is not None or reduce is not None:\n        reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)\n    else:\n        reduction_enum = _Reduction.get_enum(reduction)\n    return torch.hinge_embedding_loss(input, target, margin, reduction_enum)",
    "docstring": "Compute the hinge embedding loss. See :class: for details. Args: input (Tensor): Predicted values. target (Tensor): Ground truth values. margin (float, optional): Margin for hinge loss. Has a default value of 1. size_average (bool, optional): Deprecated (see :attr:). reduce (bool, optional): Deprecated (see :attr:). reduction (str, optional): Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. 'mean': the mean of the output is taken. 'sum': the output will be summed. 'none': no reduction will be applied. Default: 'mean'. Returns: Tensor: Hinge embedding loss.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:hinge_embedding_loss arg:input arg:target arg:margin arg:size_average arg:reduce arg:reduction arguments arg arg arg arg arg arg If Call Return return:yes Call If BoolOp Compare Compare Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "fix_minus",
    "source_code": "@staticmethod\ndef fix_minus(s):\n    return s.replace('-', '−') if mpl.rcParams['axes.unicode_minus'] else s",
    "docstring": "Some classes may want to replace a hyphen for minus with the proper Unicode symbol (U+2212) for typographical correctness. This is a helper method to perform such a replacement when it is enabled via :rc:.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:fix_minus arg:s arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "assoc",
    "source_code": "def assoc(d, key, value, factory=dict):\n    d2 = factory()\n    d2.update(d)\n    d2[key] = value\n    return d2",
    "docstring": "Return a new dict with new key value pair New dict has d[key] set to value. Does not modify the initial dictionary. >>> assoc({\"x\": 1}, \"x\", 2) {'x': 2} >>> assoc({\"x\": 1}, \"y\", 3) # doctest: +SKIP {'x': 1, 'y': 3}",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\unification\\unification_tools.py",
    "ast_data": "FunctionDef name:assoc arg:d arg:key arg:value arg:factory arguments arg arg arg arg Assign Call Call Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_prepare_x",
    "source_code": "def _prepare_x(self, x):\n    x = _asarray_validated(x, check_finite=False, as_inexact=True)\n    x_shape = x.shape\n    return (x.ravel(), x_shape)",
    "docstring": "Reshape input x array to 1-D",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_polyint.py",
    "ast_data": "FunctionDef name:_prepare_x arg:self arg:x arguments arg arg Assign Call Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "title",
    "source_code": "@register.filter(is_safe=True)\n@stringfilter\ndef title(value):\n    t = re.sub(\"([a-z])'([A-Z])\", lambda m: m[0].lower(), value.title())\n    return re.sub('\\\\d([A-Z])', lambda m: m[0].lower(), t)",
    "docstring": "Convert a string into titlecase.",
    "type": "function",
    "file_path": "django\\django\\template\\defaultfilters.py",
    "ast_data": "FunctionDef name:title arg:value arguments arg Assign Call arguments arg Call Call Return return:yes Call arguments arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "replace_tensors_by_numpy_ndarrays",
    "source_code": "def replace_tensors_by_numpy_ndarrays(repr_ds: RepresentativeDataset, sess: session.Session) -> RepresentativeDataset:\n    new_repr_ds = []\n    for sample in repr_ds:\n        new_sample = {}\n        for input_key, input_data in sample.items():\n            if isinstance(input_data, core.Tensor):\n                input_data = input_data.eval(session=sess)\n            new_sample[input_key] = input_data\n        new_repr_ds.append(new_sample)\n    return new_repr_ds",
    "docstring": "Replaces tf.Tensors in samples by their evaluated numpy arrays. Note: This should be run in graph mode (default in TF1) only. Args: repr_ds: Representative dataset to replace the tf.Tensors with their evaluated values. is iterated through, so it may not be reusable (e.g. if it is a generator object). sess: Session instance used to evaluate tf.Tensors. Returns: The new representative dataset where each tf.Tensor is replaced by its evaluated numpy ndarrays.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\representative_dataset.py",
    "ast_data": "FunctionDef name:replace_tensors_by_numpy_ndarrays arg:repr_ds arg:sess arguments arg arg Assign For Assign For Call If Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "WeekArchiveView",
    "source_code": "class WeekArchiveView(MultipleObjectTemplateResponseMixin, BaseWeekArchiveView):\n    template_name_suffix = '_archive_week'",
    "docstring": "List of objects published in a given week.",
    "type": "class",
    "file_path": "django\\django\\views\\generic\\dates.py",
    "ast_data": "ClassDef name:WeekArchiveView Assign"
  },
  {
    "library": "kornia",
    "name": "TotalVariation",
    "source_code": "class TotalVariation(Module):\n\n    def forward(self, img: Tensor) -> Tensor:\n        return total_variation(img)",
    "docstring": "Compute the Total Variation according to [1]. Shape: - Input: :math:. - Output: :math:. Examples: >>> tv = TotalVariation() >>> output = tv(torch.ones((2, 3, 4, 4), requires_grad=True)) >>> output.data tensor([[0., 0., 0.], [0., 0., 0.]]) >>> output.sum().backward() # grad can be implicitly created only for scalar outputs Reference: [1]",
    "type": "class",
    "file_path": "kornia\\kornia\\losses\\total_variation.py",
    "ast_data": "ClassDef name:TotalVariation FunctionDef name:forward arg:self arg:img arguments arg arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "rstrip",
    "source_code": "@set_module('numpy.strings')\ndef rstrip(a, chars=None):\n    if chars is None:\n        return _rstrip_whitespace(a)\n    return _rstrip_chars(a, chars)",
    "docstring": "For each element in , return a copy with the trailing characters removed. Parameters ---------- a : array-like, with `` dtype, depending on input types See Also -------- str.rstrip Examples -------- >>> import numpy as np >>> c = np.array(['aAaAaA', 'abBABba']) >>> c array(['aAaAaA', 'abBABba'], dtype='>> np.strings.rstrip(c, 'a') array(['aAaAaA', 'abBABb'], dtype='>> np.strings.rstrip(c, 'A') array(['aAaAa', 'abBABba'], dtype='<U7')",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\strings.py",
    "ast_data": "FunctionDef name:rstrip arg:a arg:chars arguments arg arg If Compare Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "toggle_collection_dynamic",
    "source_code": "def toggle_collection_dynamic(self, enabled: bool, activities: Iterable[ProfilerActivity]):\n    return _toggle_collection_dynamic(enabled, set(activities))",
    "docstring": "Toggles the collection of activities for the current profiler instance.",
    "type": "method",
    "file_path": "pytorch\\torch\\autograd\\profiler.py",
    "ast_data": "FunctionDef name:toggle_collection_dynamic arg:self arg:enabled arg:activities arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "parse_tables",
    "source_code": "def parse_tables(self):\n    tables = self._parse_tables(self._build_doc(), self.match, self.attrs)\n    return (self._parse_thead_tbody_tfoot(table) for table in tables)",
    "docstring": "Parse and return all tables from the DOM. Returns ------- list of parsed (header, body, footer) tuples from tables.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\html.py",
    "ast_data": "FunctionDef name:parse_tables arg:self arguments arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "type_spec_from_value",
    "source_code": "def type_spec_from_value(value):\n    if is_extension_type(value):\n        return value._type_spec\n    if hasattr(value, 'shape') and hasattr(value, 'dtype'):\n        return tensor_lib.TensorSpec(value.shape, value.dtype)\n    else:\n        return type_spec.type_spec_from_value(value)",
    "docstring": "Grab type_spec without converting array-likes to tensors.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_utils.py",
    "ast_data": "FunctionDef name:type_spec_from_value arg:value arguments arg If Call Return return:yes If BoolOp Call Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "experimental_make_numpy_dataset",
    "source_code": "def experimental_make_numpy_dataset(self, numpy_input, session=None):\n    return self.extended.experimental_make_numpy_dataset(numpy_input, session=session)",
    "docstring": "Makes a tf.data.Dataset for input provided via a numpy array. This avoids adding as a large constant in the graph, and copies the data to the machine or machines that will be processing the input. Note that you will likely need to use tf.distribute.Strategy.experimental_distribute_dataset with the returned dataset to further distribute it with the strategy. Example: Args: numpy_input: A nest of NumPy input arrays that will be converted into a dataset. Note that lists of Numpy arrays are stacked, as that is normal behavior. session: (TensorFlow v1.x graph execution only) A session used for initialization. Returns: A representing .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:experimental_make_numpy_dataset arg:self arg:numpy_input arg:session arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_init",
    "source_code": "def _init(self):\n    raise NotImplementedError('Abstract class only')",
    "docstring": "Generate the lookup table, ``.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:_init arg:self arguments arg Raise Call"
  },
  {
    "library": "django",
    "name": "_cache",
    "source_code": "@cached_property\ndef _cache(self):\n    return self._class(self.client_servers, **self._options)",
    "docstring": "Implement transparent thread-safe access to a memcached client.",
    "type": "method",
    "file_path": "django\\django\\core\\cache\\backends\\memcached.py",
    "ast_data": "FunctionDef name:_cache arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, pad=0.3):\n    self.pad = pad",
    "docstring": "Parameters ---------- pad : float, default: 0.3 The amount of padding around the original box.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:pad arguments arg arg Assign"
  },
  {
    "library": "matplotlib",
    "name": "get_checked_labels",
    "source_code": "def get_checked_labels(self):\n    return [l.get_text() for l, box_checked in zip(self.labels, self.get_status()) if box_checked]",
    "docstring": "Return a list of labels currently checked by user.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:get_checked_labels arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_update_inplace",
    "source_code": "@final\ndef _update_inplace(self, result) -> None:\n    self._mgr = result._mgr",
    "docstring": "Replace self internals with result. Parameters ---------- result : same type as self",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:_update_inplace arg:self arg:result arguments arg arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "set_soft_device_placement",
    "source_code": "@tf_export('__internal__.eager_context.set_soft_device_placement', v1=[])\ndef set_soft_device_placement(enabled):\n    context().soft_device_placement = enabled",
    "docstring": "Set if soft device placements should be allowed. Args: enabled: Whether to enable soft device placement.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:set_soft_device_placement arg:enabled arguments arg Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "tensor",
    "source_code": "def tensor(self, tensor_index):\n    return lambda: self._interpreter.tensor(self._interpreter, tensor_index)",
    "docstring": "Returns function that gives a numpy view of the current tensor buffer. This allows reading and writing to these tensors w/o copies. This more closely mirrors the C++ Interpreter class interface's tensor() member, hence the name. Be careful not to hold these output references through calls to and . This function cannot be used to read intermediate results. Usage: Notice how this function avoids making a numpy array directly. This is because it is important to not hold actual numpy views to the data longer than necessary. If you do, then the interpreter can no longer be invoked, because it is possible the interpreter would resize and invalidate the referenced tensors. The NumPy API doesn't allow any mutability of the underlying buffers. WRONG: Args: tensor_index: Tensor index of tensor to get. This value can be gotten from the 'index' field in get_output_details. Returns: A function that can return a new numpy array pointing to the internal TFLite tensor state at any point. It is safe to hold the function forever, but it is not safe to hold the numpy array forever.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\interpreter.py",
    "ast_data": "FunctionDef name:tensor arg:self arg:tensor_index arguments arg arg Return return:yes arguments Call"
  },
  {
    "library": "django",
    "name": "register",
    "source_code": "def register(self, model_or_iterable, admin_class=None, **options):\n    admin_class = admin_class or ModelAdmin\n    if isinstance(model_or_iterable, ModelBase):\n        model_or_iterable = [model_or_iterable]\n    for model in model_or_iterable:\n        if model._meta.abstract:\n            raise ImproperlyConfigured('The model %s is abstract, so it cannot be registered with admin.' % model.__name__)\n        if model._meta.is_composite_pk:\n            raise ImproperlyConfigured('The model %s has a composite primary key, so it cannot be registered with admin.' % model.__name__)\n        if self.is_registered(model):\n            registered_admin = str(self.get_model_admin(model))\n            msg = 'The model %s is already registered ' % model.__name__\n            if registered_admin.endswith('.ModelAdmin'):\n                msg += 'in app %r.' % registered_admin.removesuffix('.ModelAdmin')\n            else:\n                msg += 'with %r.' % registered_admin\n            raise AlreadyRegistered(msg)\n        if not model._meta.swapped:\n            if options:\n                options['__module__'] = __name__\n                admin_class = type('%sAdmin' % model.__name__, (admin_class,), options)\n            self._registry[model] = admin_class(model, self)",
    "docstring": "Register the given model(s) with the given admin class. The model(s) should be Model classes, not instances. If an admin class isn't given, use ModelAdmin (the default admin options). If keyword arguments are given -- e.g., list_display -- apply them as options to the admin class. If a model is already registered, raise AlreadyRegistered. If a model is abstract, raise ImproperlyConfigured.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\sites.py",
    "ast_data": "FunctionDef name:register arg:self arg:model_or_iterable arg:admin_class arguments arg arg arg arg Assign BoolOp If Call Assign For If Raise Call If Raise Call If Call Assign Call Call Assign If Call Call Raise Call If If Assign Assign Call Assign Call"
  },
  {
    "library": "numpy",
    "name": "__reduce__",
    "source_code": "def __reduce__(self):\n    return (self.__class__, ())",
    "docstring": "Override of MaskedArray's __reduce__.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:__reduce__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "parallel_stack",
    "source_code": "@tf_export('parallel_stack')\n@dispatch.add_dispatch_support\ndef parallel_stack(values, name='parallel_stack'):\n    if context.executing_eagerly():\n        raise RuntimeError('tf.parallel_stack() is not compatible with eager execution.')\n    with ops.name_scope(name):\n        value_t = ops.convert_to_tensor(values[0])\n        value_shape = ops.convert_to_tensor(value_t).get_shape()\n        output_shape = tensor_shape.TensorShape([len(values)])\n        output_shape = output_shape.concatenate(value_shape)\n        return gen_array_ops.parallel_concat([expand_dims(value, 0) for value in values], shape=output_shape)",
    "docstring": "Stacks a list of rank- tensors into one rank- tensor in parallel. Requires that the shape of inputs be known at graph construction time. Packs the list of tensors in into a tensor with rank one higher than each tensor in , by packing them along the first dimension. Given a list of length of tensors of shape ; the tensor will have the shape . For example: The difference between and is that requires all the inputs be computed before the operation will begin but doesn't require that the input shapes be known during graph construction. will copy pieces of the input into the output as they become available, in some situations this can provide a performance benefit. Unlike , does NOT support backpropagation. This is the opposite of unstack. The numpy equivalent is tf.parallel_stack([x, y, z]) = np.asarray([x, y, z]) @compatibility(eager) parallel_stack is not compatible with eager execution. @end_compatibility Args: values: A list of objects with the same shape and type. name: A name for this operation (optional). Returns: output: A stacked with the same type as . Raises: RuntimeError: if executed in eager mode.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:parallel_stack arg:values arg:name arguments arg arg If Call Raise Call With Call Assign Call Assign Call Call Assign Call Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "find_file_paths",
    "source_code": "def find_file_paths(dir_paths: list[str], files_to_exclude: set[str]) -> set[str]:\n    paths: set[str] = set()\n    for dir_path in dir_paths:\n        all_files = os.listdir(dir_path)\n        python_files = {fname for fname in all_files if '.py' == fname[-3:]}\n        filter_files = {fname for fname in python_files if fname not in files_to_exclude}\n        paths.update({os.path.join(dir_path, fname) for fname in filter_files})\n    return paths",
    "docstring": "When given a path to a directory, returns the paths to the relevant files within it. This function does NOT recursive traverse to subdirectories.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\data\\datapipes\\gen_pyi.py",
    "ast_data": "FunctionDef name:find_file_paths arg:dir_paths arg:files_to_exclude arguments arg arg Call For Assign Call Assign Compare Assign Compare Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "register_timers",
    "source_code": "@abc.abstractmethod\ndef register_timers(self, timer_requests: list[TimerRequest]) -> None:\n    pass",
    "docstring": "Processes the incoming timer requests and registers them with the server. The timer request can either be a acquire-timer or release-timer request. Timer requests with a negative expiration_time should be interpreted as a release-timer request.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\timer\\api.py",
    "ast_data": "FunctionDef name:register_timers arg:self arg:timer_requests arguments arg arg"
  },
  {
    "library": "pandas",
    "name": "set_locale",
    "source_code": "@contextmanager\ndef set_locale(new_locale: str | tuple[str, str], lc_var: int=locale.LC_ALL) -> Generator[str | tuple[str, str]]:\n    current_locale = locale.setlocale(lc_var)\n    try:\n        locale.setlocale(lc_var, new_locale)\n        normalized_code, normalized_encoding = locale.getlocale()\n        if normalized_code is not None and normalized_encoding is not None:\n            yield f'{normalized_code}.{normalized_encoding}'\n        else:\n            yield new_locale\n    finally:\n        locale.setlocale(lc_var, current_locale)",
    "docstring": "Context manager for temporarily setting a locale. Parameters ---------- new_locale : str or tuple A string of the form .. For example to set the current locale to US English with a UTF8 encoding, you would pass \"en_US.UTF-8\". lc_var : int, default The category of the locale being set. Notes ----- This is useful when you want to run a particular block of code under a particular locale, without globally setting the locale. This probably isn't thread-safe.",
    "type": "function",
    "file_path": "pandas\\pandas\\_config\\localization.py",
    "ast_data": "FunctionDef name:set_locale arg:new_locale arg:lc_var arguments arg arg Assign Call Try Call Assign Call If BoolOp Compare Compare Call"
  },
  {
    "library": "pandas",
    "name": "ewm",
    "source_code": "@final\ndef ewm(self, com: float | None=None, span: float | None=None, halflife: float | str | Timedelta | None=None, alpha: float | None=None, min_periods: int | None=0, adjust: bool=True, ignore_na: bool=False, times: np.ndarray | Series | None=None, method: str='single') -> ExponentialMovingWindowGroupby:\n    from pandas.core.window import ExponentialMovingWindowGroupby\n    return ExponentialMovingWindowGroupby(self._selected_obj, com=com, span=span, halflife=halflife, alpha=alpha, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, times=times, method=method, _grouper=self._grouper)",
    "docstring": "Return an ewm grouper, providing ewm functionality per group. Parameters ---------- com : float, optional Specify decay in terms of center of mass. Alternative to ``. Returns ------- pandas.api.typing.ExponentialMovingWindowGroupby An object that supports exponentially weighted moving transformations over each group. See Also -------- Series.ewm : EWM transformations for Series. DataFrame.ewm : EWM transformations for DataFrames. Series.groupby : Apply a function groupby to a Series. DataFrame.groupby : Apply a function groupby. Examples -------- >>> df = pd.DataFrame( ... { ... \"Class\": [\"A\", \"A\", \"A\", \"B\", \"B\", \"B\"], ... \"Value\": [10, 20, 30, 40, 50, 60], ... } ... ) >>> df Class Value 0 A 10 1 A 20 2 A 30 3 B 40 4 B 50 5 B 60 >>> df.groupby(\"Class\").ewm(com=0.5).mean() Value Class A 0 10.000000 1 17.500000 2 26.153846 B 3 40.000000 4 47.500000 5 56.153846",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\groupby\\groupby.py",
    "ast_data": "FunctionDef name:ewm arg:self arg:com arg:span arg:halflife arg:alpha arg:min_periods arg:adjust arg:ignore_na arg:times arg:method arguments arg arg arg arg arg arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "start",
    "source_code": "def start(self):\n    if self.frequency > 0:\n        threadname = self.name or self.__class__.__name__\n        if self.thread is None:\n            self.thread = BackgroundTask(self.frequency, self.callback, bus=self.bus)\n            self.thread.name = threadname\n            self.thread.start()\n            self.bus.log('Started monitor thread %r.' % threadname)\n        else:\n            self.bus.log('Monitor thread %r already started.' % threadname)",
    "docstring": "Start our callback in its own background thread.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\plugins.py",
    "ast_data": "FunctionDef name:start arg:self arguments arg If Compare Assign BoolOp If Compare Assign Call Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "getattr_from_fqn",
    "source_code": "def getattr_from_fqn(obj: Any, fqn: str) -> Any:\n    return functools.reduce(getattr, fqn.split('.'), obj)",
    "docstring": "Given an obj and a fqn such as \"foo.bar.baz\", returns gm.foo.bar.baz.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\utils.py",
    "ast_data": "FunctionDef name:getattr_from_fqn arg:obj arg:fqn arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "current_device",
    "source_code": "def current_device() -> str:\n    return 'cpu'",
    "docstring": "Returns current device for cpu. Always 'cpu'. N.B. This function only exists to facilitate device-agnostic code",
    "type": "function",
    "file_path": "pytorch\\torch\\cpu\\__init__.py",
    "ast_data": "FunctionDef name:current_device arguments Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_filter_ridge_lines",
    "source_code": "def _filter_ridge_lines(cwt, ridge_lines, window_size=None, min_length=None, min_snr=1, noise_perc=10):\n    num_points = cwt.shape[1]\n    if min_length is None:\n        min_length = np.ceil(cwt.shape[0] / 4)\n    if window_size is None:\n        window_size = np.ceil(num_points / 20)\n    window_size = int(window_size)\n    hf_window, odd = divmod(window_size, 2)\n    row_one = cwt[0, :]\n    noises = np.empty_like(row_one)\n    for ind, val in enumerate(row_one):\n        window_start = max(ind - hf_window, 0)\n        window_end = min(ind + hf_window + odd, num_points)\n        noises[ind] = scoreatpercentile(row_one[window_start:window_end], per=noise_perc)\n\n    def filt_func(line):\n        if len(line[0]) < min_length:\n            return False\n        snr = abs(cwt[line[0][0], line[1][0]] / noises[line[1][0]])\n        if snr < min_snr:\n            return False\n        return True\n    return list(filter(filt_func, ridge_lines))",
    "docstring": "Filter ridge lines according to prescribed criteria. Intended to be used for finding relative maxima. Parameters ---------- cwt : 2-D ndarray Continuous wavelet transform from which the were defined. ridge_lines : 1-D sequence Each element should contain 2 sequences, the rows and columns of the ridge line (respectively). window_size : int, optional Size of window to use to calculate noise floor. Default is `noise_percwindow_size10.1093/bioinformatics/btl355`",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_peak_finding.py",
    "ast_data": "FunctionDef name:_filter_ridge_lines arg:cwt arg:ridge_lines arg:window_size arg:min_length arg:min_snr arg:noise_perc arguments arg arg arg arg arg arg Assign If Compare Assign Call If Compare Assign Call Assign Call Assign Call Assign Assign Call For Call Assign Call Assign Call Assign Call FunctionDef name:filt_func arg:line arguments arg If Compare Call Return return:yes Assign Call If Compare Return return:yes Return return:yes Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_add_to_array",
    "source_code": "def _add_to_array(x_in, i_fixed, x_fixed):\n    i_free = ~i_fixed\n    if x_in.ndim == 2:\n        i_free = i_free[:, None] @ i_free[None, :]\n    x_out = np.zeros_like(i_free, dtype=x_in.dtype)\n    x_out[~i_free] = x_fixed\n    x_out[i_free] = x_in.ravel()\n    return x_out",
    "docstring": "Adds fixed variables back to an array",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_minimize.py",
    "ast_data": "FunctionDef name:_add_to_array arg:x_in arg:i_fixed arg:x_fixed arguments arg arg arg Assign If Compare Assign Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "can_handle",
    "source_code": "@staticmethod\ndef can_handle(x, y=None):\n    raise NotImplementedError",
    "docstring": "Whether the current DataAdapter could handle the input x and y. Structure wise, x and y can be single object, or list of objects if there multiple input/output, or dictionary of objects when the intput/output are named. Args: x: input features. y: target labels. Note that y could be None in the case of prediction. Returns: boolean",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\data_adapter.py",
    "ast_data": "FunctionDef name:can_handle arg:x arg:y arguments arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "_infer_frame_shape",
    "source_code": "def _infer_frame_shape(signal, frame_length, frame_step, pad_end, axis):\n    frame_length = tensor_util.constant_value(frame_length)\n    frame_step = tensor_util.constant_value(frame_step)\n    axis = tensor_util.constant_value(axis)\n    if signal.shape.ndims is None:\n        return None\n    if axis is None:\n        return [None] * (signal.shape.ndims + 1)\n    signal_shape = signal.shape.as_list()\n    num_frames = None\n    frame_axis = signal_shape[axis]\n    outer_dimensions = signal_shape[:axis]\n    inner_dimensions = signal_shape[axis:][1:]\n    if signal_shape and frame_axis is not None:\n        if frame_step is not None and pad_end:\n            num_frames = max(0, -(-frame_axis // frame_step))\n        elif frame_step is not None and frame_length is not None:\n            assert not pad_end\n            num_frames = max(0, (frame_axis - frame_length + frame_step) // frame_step)\n    return outer_dimensions + [num_frames, frame_length] + inner_dimensions",
    "docstring": "Infers the shape of the return value of .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\signal\\shape_ops.py",
    "ast_data": "FunctionDef name:_infer_frame_shape arg:signal arg:frame_length arg:frame_step arg:pad_end arg:axis arguments arg arg arg arg arg Assign Call Assign Call Assign Call If Compare Return return:no If Compare Return return:yes Assign Call Assign Assign Assign Assign If BoolOp Compare If BoolOp Compare Assign Call If BoolOp Compare Compare Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_zseries_mul",
    "source_code": "def _zseries_mul(z1, z2):\n    return np.convolve(z1, z2)",
    "docstring": "Multiply two z-series. Multiply two z-series to produce a z-series. Parameters ---------- z1, z2 : 1-D ndarray The arrays must be 1-D but this is not checked. Returns ------- product : 1-D ndarray The product z-series. Notes ----- This is simply convolution. If symmetric/anti-symmetric z-series are denoted by S/A then the following rules apply: S*S, A*A -> S S*A, A*S -> A",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\chebyshev.py",
    "ast_data": "FunctionDef name:_zseries_mul arg:z1 arg:z2 arguments arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "_transform_input3d",
    "source_code": "def _transform_input3d(input: Tensor) -> Tensor:\n    if not torch.is_tensor(input):\n        raise TypeError(f'Input type is not a Tensor. Got {type(input)}')\n    if len(input.shape) not in [3, 4, 5]:\n        raise ValueError(f'Input size must have a shape of either (D, H, W), (C, D, H, W) or (*, C, D, H, W). Got {input.shape}')\n    if len(input.shape) == 3:\n        input = input.unsqueeze(0)\n    if len(input.shape) == 4:\n        input = input.unsqueeze(0)\n    return input",
    "docstring": "Reshape an input tensor to be (*, C, D, H, W). Accept either (D, H, W), (C, D, H, W) or (*, C, D, H, W). Args: input: Tensor Returns: Tensor",
    "type": "function",
    "file_path": "kornia\\kornia\\augmentation\\utils\\helpers.py",
    "ast_data": "FunctionDef name:_transform_input3d arg:input arguments arg If Call Raise Call Call If Compare Call Raise Call If Compare Call Assign Call If Compare Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "reopen",
    "source_code": "def reopen(self):\n    self.event_writer.reopen()",
    "docstring": "Reopens the EventFileWriter. Can be called after to add more events in the same directory. The events will go into a new events file. Does nothing if the EventFileWriter was not closed.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\tensorboard\\writer.py",
    "ast_data": "FunctionDef name:reopen arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_dense_var_to_tensor",
    "source_code": "def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):\n    if tpu_util.enclosing_tpu_context() is None:\n        return super(TPUVariableMixin, self)._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref)\n    elif dtype is not None and dtype != self.dtype:\n        return math_ops.cast(self.read_value(), dtype)\n    else:\n        return self.handle if as_ref else self.read_value()",
    "docstring": "Converts a variable to a tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_values.py",
    "ast_data": "FunctionDef name:_dense_var_to_tensor arg:self arg:dtype arg:name arg:as_ref arguments arg arg arg arg If Compare Call Return return:yes Call Call If BoolOp Compare Compare Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "line_collection_2d_to_3d",
    "source_code": "def line_collection_2d_to_3d(col, zs=0, zdir='z', axlim_clip=False):\n    segments3d = _paths_to_3d_segments(col.get_paths(), zs, zdir)\n    col.__class__ = Line3DCollection\n    col.set_segments(segments3d)\n    col._axlim_clip = axlim_clip",
    "docstring": "Convert a to a object.",
    "type": "function",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py",
    "ast_data": "FunctionDef name:line_collection_2d_to_3d arg:col arg:zs arg:zdir arg:axlim_clip arguments arg arg arg arg Assign Call Call Assign Call Assign"
  },
  {
    "library": "pytorch",
    "name": "get_fresh_qualname",
    "source_code": "@compatibility(is_backward_compatible=True)\ndef get_fresh_qualname(self, prefix: str) -> str:\n    qualname = f'{prefix}0'\n    if not hasattr(self.root, qualname):\n        self._qualname_counter[prefix] = 0\n        return qualname\n    i = self._qualname_counter[prefix]\n    while True:\n        qualname = f'{prefix}{i}'\n        i += 1\n        if not hasattr(self.root, qualname):\n            break\n    self._qualname_counter[prefix] = i\n    return qualname",
    "docstring": "Gets a fresh name for a prefix and returns it. This function ensures that it will not clash with an existing attribute on the graph.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\_symbolic_trace.py",
    "ast_data": "FunctionDef name:get_fresh_qualname arg:self arg:prefix arguments arg arg Assign If Call Assign Return return:yes Assign While Assign If Call Assign Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X):\n    check_is_fitted(self)\n    X = validate_data(self, X, reset=False)\n    return np.dot(X, self.components_.T)",
    "docstring": "Apply the learned transformation to the given data. Parameters ---------- X : array-like of shape (n_samples, n_features) Data samples. Returns ------- X_embedded: ndarray of shape (n_samples, n_components) The data samples transformed. Raises ------ NotFittedError If :meth: has not been called before.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neighbors\\_nca.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Call Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "remove_field",
    "source_code": "def remove_field(self, model, field):\n    if field.many_to_many:\n        if field.remote_field.through._meta.auto_created:\n            self.delete_model(field.remote_field.through)\n    elif self.connection.features.can_alter_table_drop_column and (not field.primary_key) and (not field.unique) and (not field.db_index) and (not (field.remote_field and field.db_constraint)):\n        super().remove_field(model, field)\n    else:\n        if field.db_parameters(connection=self.connection)['type'] is None:\n            return\n        self._remake_table(model, delete_field=field)",
    "docstring": "Remove a field from a model. Usually involves deleting a column, but for M2Ms may involve deleting a table.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\sqlite3\\schema.py",
    "ast_data": "FunctionDef name:remove_field arg:self arg:model arg:field arguments arg arg arg If If Call If BoolOp BoolOp Call Call If Compare Call Return return:no Call"
  },
  {
    "library": "pytorch",
    "name": "get_config_line",
    "source_code": "def get_config_line(mod, k, v) -> str:\n    if importable_callable(v):\n        add_import(v)\n        return f'{mod}.{k} = {get_module_name(v, True)}{v.__name__}'\n    elif isinstance(v, (list, set)) and all((importable_callable(item) for item in v)):\n        for item in v:\n            add_import(item)\n        v_list = list_of_callables_to_string(v)\n        if isinstance(v, list):\n            return f'{mod}.{k} = {v_list}'\n        else:\n            return f'{mod}.{k} = {{ {', '.join(v_list)} }}'\n    else:\n        return f'{mod}.{k} = {v!r}'",
    "docstring": "Return a string version of the config line. Handle v when v is a callable, or a list/dict of callables. Add import statements for callables if necessary. We assume that the value of a single config won't be a mix of callables and non-callables. Example output: import logging import _warnings torch._dynamo.config.reorderable_logging_functions = { _warnings.warn, logging.warn, print }",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\_config_module.py",
    "ast_data": "FunctionDef name:get_config_line arg:mod arg:k arg:v arguments arg arg arg If Call Call Return return:yes Call If BoolOp Call Call Call For Call Assign Call If Call Return return:yes Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "SACStats",
    "source_code": "@dataclass\nclass SACStats:\n    func_names: list[str]\n    runtimes: list[float]\n    memory: list[int]\n    view_like_ops: list[int]\n    rand_ops: list[int]\n    saved_autograd_ops: list[int]\n    inplace_ops: list[tuple[int, int]]\n    force_store_random: bool",
    "docstring": "A class for storing Activation Checkpointing statistics corresponding to a module. Attributes: func_names (List[str]): List of operator names. runtimes (List[float]): List of operator runtimes in millliseconds. memory (List[int]): List of operator memory usage in bytes. view_like_ops (List[int]): Indices of view-like operators. rand_ops (List[int]): Indices of random operators. saved_autograd_ops (List[int]): Indices of operator results saved by autograd engine. inplace_ops (List[Tuple[int, int]]): Tuple of indices of op and its first parent for Inplace operators. force_store_random (bool): Whether to force store random operator results.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\_tools\\sac_estimator.py",
    "ast_data": "ClassDef name:SACStats"
  },
  {
    "library": "tensorflow",
    "name": "_show_all",
    "source_code": "def _show_all(saved_model_dir):\n    saved_model = saved_model_utils.read_saved_model(saved_model_dir)\n    for meta_graph_def in sorted(saved_model.meta_graphs, key=lambda meta_graph_def: list(meta_graph_def.meta_info_def.tags)):\n        tag_set = meta_graph_def.meta_info_def.tags\n        print(\"\\nMetaGraphDef with tag-set: '%s' contains the following SignatureDefs:\" % ', '.join(tag_set))\n        tag_set = ','.join(tag_set)\n        signature_def_map = meta_graph_def.signature_def\n        for signature_def_key in sorted(signature_def_map.keys()):\n            print(\"\\nsignature_def['\" + signature_def_key + \"']:\")\n            _show_inputs_outputs_mgd(meta_graph_def, signature_def_key, indent=1)\n        _show_ops_in_metagraph_mgd(meta_graph_def)\n    _show_defined_functions(saved_model_dir, saved_model.meta_graphs)",
    "docstring": "Prints tag-set, ops, SignatureDef, and Inputs/Outputs of SavedModel. Prints all tag-set, ops, SignatureDef and Inputs/Outputs information stored in SavedModel directory. Args: saved_model_dir: Directory containing the SavedModel to inspect.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\saved_model_cli.py",
    "ast_data": "FunctionDef name:_show_all arg:saved_model_dir arguments arg Assign Call For Call arguments arg Call Assign Call Call Assign Call Assign For Call Call Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "process_tempita",
    "source_code": "def process_tempita(fromfile, outfile=None):\n    from_filename = tempita.Template.from_filename\n    template = from_filename(fromfile, encoding=sys.getdefaultencoding())\n    content = template.substitute()\n    with open(outfile, 'w') as f:\n        f.write(content)",
    "docstring": "Process tempita templated file and write out the result. The template file is expected to end in or : E.g. processing generates .",
    "type": "function",
    "file_path": "scipy\\scipy\\_build_utils\\tempita.py",
    "ast_data": "FunctionDef name:process_tempita arg:fromfile arg:outfile arguments arg arg Assign Assign Call Call Assign Call With Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_cachedir",
    "source_code": "@_logged_cached('CACHEDIR=%s')\ndef get_cachedir():\n    return _get_config_or_cache_dir(_get_xdg_cache_dir)",
    "docstring": "Return the string path of the cache directory. The procedure used to find the directory is the same as for , except using `` instead.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\__init__.py",
    "ast_data": "FunctionDef name:get_cachedir arguments Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "sparse_segment_sqrt_n",
    "source_code": "@tf_export(v1=['sparse.segment_sqrt_n', 'sparse_segment_sqrt_n'])\n@deprecation.deprecated_endpoints('sparse_segment_sqrt_n')\ndef sparse_segment_sqrt_n(data, indices, segment_ids, name=None, num_segments=None, sparse_gradient=False):\n    if num_segments is not None:\n        return gen_math_ops.sparse_segment_sqrt_n_with_num_segments(data=data, indices=indices, segment_ids=segment_ids, num_segments=num_segments, name=name, sparse_gradient=sparse_gradient)\n    else:\n        return gen_math_ops.sparse_segment_sqrt_n(data=data, indices=indices, segment_ids=segment_ids, name=name, sparse_gradient=sparse_gradient)",
    "docstring": "Computes the sum along sparse segments of a tensor divided by the sqrt(N). is the size of the segment being reduced. Args: data: A with data that will be assembled in the output. indices: A 1-D with indices into . Has same rank as . segment_ids: A 1-D with indices into the output . Values should be sorted and can be repeated. name: A name for the operation (optional). num_segments: An optional int32 scalar. Indicates the size of the output . sparse_gradient: An optional . Defaults to . If , the gradient of this function will be sparse (IndexedSlices) instead of dense (Tensor). Returns: A of the shape as data, except for dimension 0 which has size , the number of segments specified via or inferred for the last element in .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:sparse_segment_sqrt_n arg:data arg:indices arg:segment_ids arg:name arg:num_segments arg:sparse_gradient arguments arg arg arg arg arg arg If Compare Return return:yes Call Return return:yes Call Call Call"
  },
  {
    "library": "cherrypy",
    "name": "graceful",
    "source_code": "def graceful(self):\n    self.stop()\n    self.start()",
    "docstring": "Stop the callback's background task thread and restart it.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\plugins.py",
    "ast_data": "FunctionDef name:graceful arg:self arguments arg Call Call"
  },
  {
    "library": "sphinx",
    "name": "set_translator",
    "source_code": "def set_translator(self, name: str, translator_class: type[nodes.NodeVisitor], override: bool=False) -> None:\n    self.registry.add_translator(name, translator_class, override=override)",
    "docstring": "Register or override a Docutils translator class. This is used to register a custom output translator or to replace a builtin translator. This allows extensions to use a custom translator and define custom nodes for the translator (see :meth:). :param name: The name of the builder for the translator :param translator_class: A translator class :param override: If true, install the translator forcedly even if another translator is already installed as the same name .. versionadded:: 1.3 .. versionchanged:: 1.8 Add *override* keyword.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\application.py",
    "ast_data": "FunctionDef name:set_translator arg:self arg:name arg:translator_class arg:override arguments arg arg arg arg Call"
  },
  {
    "library": "pandas",
    "name": "visit_Slice",
    "source_code": "def visit_Slice(self, node, **kwargs) -> slice:\n    lower = node.lower\n    if lower is not None:\n        lower = self.visit(lower).value\n    upper = node.upper\n    if upper is not None:\n        upper = self.visit(upper).value\n    step = node.step\n    if step is not None:\n        step = self.visit(step).value\n    return slice(lower, upper, step)",
    "docstring": "df.index[slice(4,6)]",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\computation\\expr.py",
    "ast_data": "FunctionDef name:visit_Slice arg:self arg:node arguments arg arg arg Assign If Compare Assign Call Assign If Compare Assign Call Assign If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_check_tuning_assertions",
    "source_code": "def _check_tuning_assertions() -> None:\n    if is_enabled() is False:\n        warnings.warn('TunableOp was disabled. Trying to enable now.')\n        enable(True)\n    assert is_enabled() is True\n    assert tuning_is_enabled() is True\n    assert record_untuned_is_enabled() is False",
    "docstring": "Helper function for multi-GPU tuning case. Need to check that TunableOp feature is enabled and that tuning is enabled.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\tunable.py",
    "ast_data": "FunctionDef name:_check_tuning_assertions arguments If Compare Call Call Call Compare Call Compare Call Compare Call"
  },
  {
    "library": "pytorch",
    "name": "_get_shape_env",
    "source_code": "@classmethod\ndef _get_shape_env(cls: type[GuardedCache[T]]) -> Optional[ShapeEnv]:\n    ctx = torch._guards.TracingContext.try_get()\n    if not ctx:\n        return None\n    return ctx.fake_mode.shape_env",
    "docstring": "Helper to get the shape env from the tracing context.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codecache.py",
    "ast_data": "FunctionDef name:_get_shape_env arg:cls arguments arg Assign Call If Return return:no Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "LiveRange",
    "source_code": "@dataclasses.dataclass\nclass LiveRange:\n    begin: float\n    end: float\n\n    def contains(self, other: LiveRange):\n        return self.begin <= other.begin and other.end <= self.end\n\n    def join(self, other: LiveRange):\n        return LiveRange(min(self.begin, other.begin), max(self.end, other.end))\n\n    def __len__(self):\n        return self.end - self.begin",
    "docstring": "A range where a given tensor is live. Begin and end are both counters representing points in the program of grouped memory operations. Begin is inclusive, end is exclusive. Invariant: begin <= end",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\memory_planning.py",
    "ast_data": "ClassDef name:LiveRange FunctionDef name:contains arg:self arg:other arguments arg arg Return return:yes BoolOp Compare Compare FunctionDef name:join arg:self arg:other arguments arg arg Return return:yes Call Call Call FunctionDef name:__len__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "run_eval",
    "source_code": "def run_eval(interpreter, input_image):\n    input_details = interpreter.get_input_details()\n    output_details = interpreter.get_output_details()\n    input_image = np.reshape(input_image, input_details[0]['shape'])\n    interpreter.set_tensor(input_details[0]['index'], input_image)\n    interpreter.invoke()\n    output_data = interpreter.get_tensor(output_details[0]['index'])\n    output = np.squeeze(output_data)\n    return output",
    "docstring": "Performs evaluation for input image over specified model. Args: interpreter: TFLite interpreter initialized with model to execute. input_image: Image input to the model. Returns: output: output tensor of model being executed.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\tutorials\\mnist_tflite.py",
    "ast_data": "FunctionDef name:run_eval arg:interpreter arg:input_image arguments arg arg Assign Call Assign Call Assign Call Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "validate_metadata",
    "source_code": "def validate_metadata(self, handler: AppendableTable) -> None:\n    if self.meta == 'category':\n        new_metadata = self.metadata\n        cur_metadata = handler.read_metadata(self.cname)\n        if new_metadata is not None and cur_metadata is not None and (not array_equivalent(new_metadata, cur_metadata, strict_nan=True, dtype_equal=True)):\n            raise ValueError('cannot append a categorical with different categories to the existing')",
    "docstring": "validate that kind=category does not change the categories",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:validate_metadata arg:self arg:handler arguments arg arg If Compare Assign Assign Call If BoolOp Compare Compare Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "validate_file",
    "source_code": "def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):\n    hasher = _resolve_hasher(algorithm, file_hash)\n    if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash):\n        return True\n    else:\n        return False",
    "docstring": "Validates a file against a sha256 or md5 hash. Args: fpath: path to the file being validated file_hash: The expected hash string of the file. The sha256 and md5 hash algorithms are both supported. algorithm: Hash algorithm, one of 'auto', 'sha256', or 'md5'. The default 'auto' detects the hash algorithm in use. chunk_size: Bytes to read at a time, important for large files. Returns: Whether the file is valid",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\data_utils.py",
    "ast_data": "FunctionDef name:validate_file arg:fpath arg:file_hash arg:algorithm arg:chunk_size arguments arg arg arg arg Assign Call If Compare Call Call Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_tf_tensor_set_item",
    "source_code": "def _tf_tensor_set_item(target, i, x):\n    return gen_array_ops.tensor_scatter_update(target, ((i,),), (x,))",
    "docstring": "Overload of set_item that stages a Tensor scatter update.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\slices.py",
    "ast_data": "FunctionDef name:_tf_tensor_set_item arg:target arg:i arg:x arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "shutdown",
    "source_code": "def shutdown(self, wait: bool=True) -> None:\n    if self.alive():\n        TuningProcess.send(None, self.write_pipe)\n    if wait:\n        self.wait()",
    "docstring": "Signal the child process to shut down gracefully.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\autotune_process.py",
    "ast_data": "FunctionDef name:shutdown arg:self arg:wait arguments arg arg If Call Call If Call"
  },
  {
    "library": "kornia",
    "name": "InRange",
    "source_code": "class InRange(Module):\n\n    def __init__(self, lower: Union[tuple[Any, ...], Tensor], upper: Union[tuple[Any, ...], Tensor], return_mask: bool=False) -> None:\n        super().__init__()\n        self.lower = lower\n        self.upper = upper\n        self.return_mask = return_mask\n\n    def forward(self, input: Tensor) -> Tensor:\n        return in_range(input, self.lower, self.upper, self.return_mask)",
    "docstring": "Create a module for applying lower and upper bounds to input tensors. Args: input: The input tensor to be filtered. lower: The lower bounds of the filter (inclusive). upper: The upper bounds of the filter (inclusive). return_mask: If is true, the filtered mask is returned, otherwise the filtered input image. Returns: A binary mask :math: of input indicating whether elements are within the range or filtered input image :math:. .. note:: View complete documentation in :func:. Examples: >>> rng = torch.manual_seed(1) >>> input = torch.rand(1, 3, 3, 3) >>> lower = (0.2, 0.3, 0.4) >>> upper = (0.8, 0.9, 1.0) >>> mask = InRange(lower, upper, return_mask=True)(input) >>> mask tensor([[[[1., 1., 0.], [0., 0., 0.], [0., 1., 1.]]]])",
    "type": "class",
    "file_path": "kornia\\kornia\\filters\\in_range.py",
    "ast_data": "ClassDef name:InRange FunctionDef name:__init__ arg:self arg:lower arg:upper arg:return_mask arguments arg arg arg arg Call Call Assign Assign Assign FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "run",
    "source_code": "def run(self):\n    args = (self.model.fcn, self.beta0, self.data.y, self.data.x)\n    kwds = {'full_output': 1}\n    kwd_l = ['ifixx', 'ifixb', 'job', 'iprint', 'errfile', 'rptfile', 'ndigit', 'taufac', 'sstol', 'partol', 'maxit', 'stpb', 'stpd', 'sclb', 'scld', 'work', 'iwork']\n    if self.delta0 is not None and self.job // 10000 % 10 == 0:\n        self._gen_work()\n        d0 = np.ravel(self.delta0)\n        self.work[:len(d0)] = d0\n    if self.model.fjacb is not None:\n        kwds['fjacb'] = self.model.fjacb\n    if self.model.fjacd is not None:\n        kwds['fjacd'] = self.model.fjacd\n    if self.data.we is not None:\n        kwds['we'] = self.data.we\n    if self.data.wd is not None:\n        kwds['wd'] = self.data.wd\n    if self.model.extra_args is not None:\n        kwds['extra_args'] = self.model.extra_args\n    for attr in kwd_l:\n        obj = getattr(self, attr)\n        if obj is not None:\n            kwds[attr] = obj\n    with ODR_LOCK:\n        self.output = Output(odr(*args, **kwds))\n    return self.output",
    "docstring": "Run the fitting routine with all of the information given and with ``. Returns ------- output : Output instance This object is also assigned to the attribute .output .",
    "type": "method",
    "file_path": "scipy\\scipy\\odr\\_odrpack.py",
    "ast_data": "FunctionDef name:run arg:self arguments arg Assign Assign Assign If BoolOp Compare Compare Call Assign Call Assign Call If Compare Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign For Assign Call If Compare Assign With Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "constraint",
    "source_code": "@property\ndef constraint(self):\n    return self._constraint",
    "docstring": "Returns the constraint function associated with this variable. Returns: The constraint function that was passed to the variable constructor. Can be if no constraint was passed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py",
    "ast_data": "FunctionDef name:constraint arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "parse_command",
    "source_code": "def parse_command(command):\n    command = command.strip()\n    if not command:\n        return []\n    brackets_intervals = [f.span() for f in _BRACKETS_PATTERN.finditer(command)]\n    quotes_intervals = [f.span() for f in _QUOTES_PATTERN.finditer(command)]\n    whitespaces_intervals = [f.span() for f in _WHITESPACE_PATTERN.finditer(command)]\n    if not whitespaces_intervals:\n        return [command]\n    arguments = []\n    idx0 = 0\n    for start, end in whitespaces_intervals + [(len(command), None)]:\n        if not any((interval[0] < start < interval[1] for interval in brackets_intervals + quotes_intervals)):\n            argument = command[idx0:start]\n            if argument.startswith('\"') and argument.endswith('\"') or (argument.startswith(\"'\") and argument.endswith(\"'\")):\n                argument = argument[1:-1]\n            arguments.append(argument)\n            idx0 = end\n    return arguments",
    "docstring": "Parse command string into a list of arguments. - Disregards whitespace inside double quotes and brackets. - Strips paired leading and trailing double quotes in arguments. - Splits the command at whitespace. Nested double quotes and brackets are not handled. Args: command: (str) Input command. Returns: (list of str) List of arguments.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\command_parser.py",
    "ast_data": "FunctionDef name:parse_command arg:command arguments arg Assign Call If Return return:no Assign Call Call Assign Call Call Assign Call Call If Return return:yes Assign Assign For Call If Call Compare Assign If BoolOp BoolOp Call Call BoolOp Call Call Assign Call Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "cache_policy",
    "source_code": "@property\ndef cache_policy(self):\n    return self._cache_policy",
    "docstring": "{None, \"no_cache\"}: Specifies the extent to which intermediate results are cached. Left unspecified, intermediate results of some calculations (e.g. distribution support, moments, etc.) are cached to improve performance of future calculations. Pass `` to reduce memory reserved by the class instance.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distribution_infrastructure.py",
    "ast_data": "FunctionDef name:cache_policy arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_add_signature",
    "source_code": "def _add_signature(self, name, concrete_function):\n    self._signatures[name] = concrete_function",
    "docstring": "Adds a signature to the _SignatureMap.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\signature_serialization.py",
    "ast_data": "FunctionDef name:_add_signature arg:self arg:name arg:concrete_function arguments arg arg arg Assign"
  },
  {
    "library": "django",
    "name": "_clone_test_db",
    "source_code": "def _clone_test_db(self, suffix, verbosity, keepdb=False):\n    raise NotImplementedError(\"The database backend doesn't support cloning databases. Disable the option to run tests in parallel processes.\")",
    "docstring": "Internal implementation - duplicate the test db tables.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\creation.py",
    "ast_data": "FunctionDef name:_clone_test_db arg:self arg:suffix arg:verbosity arg:keepdb arguments arg arg arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_init_from_converter",
    "source_code": "def _init_from_converter(self, options: QuantizationDebugOptions, converter: TFLiteConverter, calibrated_model: Optional[bytes]=None, float_model: Optional[bytes]=None) -> None:\n    self.quant_model = convert.mlir_quantize(calibrated_model, disable_per_channel=converter._experimental_disable_per_channel, fully_quantize=options.fully_quantize, enable_numeric_verify=True, denylisted_ops=options.denylisted_ops, denylisted_nodes=options.denylisted_nodes)\n    self._quant_interpreter = _interpreter.Interpreter(model_content=self.quant_model)\n    self._float_interpreter = None\n    if float_model is not None:\n        self._float_interpreter = _interpreter.Interpreter(model_content=float_model)",
    "docstring": "Convert the model and apply options. Converts the quantized model and initializes a quantized model interpreter with the quantized model. Returns a float model interpreter if float model is provided. Args: options: a QuantizationDebugOptions object. converter: an initialized tf.lite.TFLiteConverter. calibrated_model: Calibrated model bytes. float_model: Float model bytes.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\tools\\optimize\\debugging\\python\\debugger.py",
    "ast_data": "FunctionDef name:_init_from_converter arg:self arg:options arg:converter arg:calibrated_model arg:float_model arguments arg arg arg arg arg Assign Call Assign Call Assign If Compare Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_reset_jit_compiler_flags",
    "source_code": "def _reset_jit_compiler_flags():\n    pywrap_tfe.TF_ResetJitCompilerFlags()",
    "docstring": "Clears and re-initializes the TF JIT compiler flags. Should only be used for testing.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:_reset_jit_compiler_flags arguments Call"
  },
  {
    "library": "matplotlib",
    "name": "fully_contains",
    "source_code": "def fully_contains(self, x, y):\n    return self.fully_containsx(x) and self.fully_containsy(y)",
    "docstring": "Return whether `` is in the bounding box, but not on its edge.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:fully_contains arg:self arg:x arg:y arguments arg arg arg Return return:yes BoolOp Call Call"
  },
  {
    "library": "django",
    "name": "db_returning_fields",
    "source_code": "@cached_property\ndef db_returning_fields(self):\n    return [field for field in self._get_fields(forward=True, reverse=False, include_parents=PROXY_PARENTS) if getattr(field, 'db_returning', False)]",
    "docstring": "Private API intended only to be used by Django itself. Fields to be returned after a database insert.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\options.py",
    "ast_data": "FunctionDef name:db_returning_fields arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_index_to_param",
    "source_code": "@property\ndef _index_to_param(self) -> list[torch.Tensor]:\n    if len(self._index_to_param_cache) == 0:\n        self._index_to_param_cache = list(chain.from_iterable((g['params'] for g in self.param_groups)))\n    return self._index_to_param_cache",
    "docstring": "List mapping parameter indices in the global optimizer scheme to the actual params.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\optim\\zero_redundancy_optimizer.py",
    "ast_data": "FunctionDef name:_index_to_param arg:self arguments arg If Compare Call Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, pad=0.3, tooth_size=None):\n    self.pad = pad\n    self.tooth_size = tooth_size",
    "docstring": "Parameters ---------- pad : float, default: 0.3 The amount of padding around the original box. tooth_size : float, default: *pad*/2 Size of the sawtooth.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:pad arg:tooth_size arguments arg arg arg Assign Assign"
  },
  {
    "library": "kornia",
    "name": "get_canny_nms_kernel",
    "source_code": "def get_canny_nms_kernel(device: Optional[Device]=None, dtype: Optional[Dtype]=None) -> Tensor:\n    return tensor([[[[0.0, 0.0, 0.0], [0.0, 1.0, -1.0], [0.0, 0.0, 0.0]]], [[[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]]], [[[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, -1.0, 0.0]]], [[[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [-1.0, 0.0, 0.0]]], [[[0.0, 0.0, 0.0], [-1.0, 1.0, 0.0], [0.0, 0.0, 0.0]]], [[[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]]], [[[0.0, -1.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]]], [[[0.0, 0.0, -1.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]]]], device=device, dtype=dtype)",
    "docstring": "Return 3x3 kernels for the Canny Non-maximal suppression.",
    "type": "function",
    "file_path": "kornia\\kornia\\filters\\kernels.py",
    "ast_data": "FunctionDef name:get_canny_nms_kernel arg:device arg:dtype arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_rlabel_position",
    "source_code": "def set_rlabel_position(self, value):\n    self._r_label_position.clear().translate(np.deg2rad(value), 0.0)",
    "docstring": "Update the theta position of the radius labels. Parameters ---------- value : number The angular position of the radius labels in degrees.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\polar.py",
    "ast_data": "FunctionDef name:set_rlabel_position arg:self arg:value arguments arg arg Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "update_converged_count",
    "source_code": "def update_converged_count(self):\n    prev_count = self.ivars['converged_count']\n    tol = self.fparams['tol']\n    A_norm = self.fvars['A_norm']\n    B_norm = self.fvars['B_norm']\n    E, X, R = (self.E, self.X, self.R)\n    rerr = torch.norm(R, 2, (0,)) / (torch.norm(X, 2, (0,)) * (A_norm + torch.abs(E[:X.shape[-1]]) * B_norm))\n    converged = rerr < tol\n    count = 0\n    for b in converged:\n        if not b:\n            break\n        count += 1\n    assert count >= prev_count, f'the number of converged eigenpairs (was {prev_count}, got {count}) cannot decrease'\n    self.ivars['converged_count'] = count\n    self.tvars['rerr'] = rerr\n    return count",
    "docstring": "Determine the number of converged eigenpairs using backward stable convergence criterion, see discussion in Sec 4.3 of [DuerschEtal2018]. Users may redefine this method for custom convergence criteria.",
    "type": "method",
    "file_path": "pytorch\\torch\\_lobpcg.py",
    "ast_data": "FunctionDef name:update_converged_count arg:self arguments arg Assign Assign Assign Assign Assign Assign Call Call Call Assign Compare Assign For If Compare Assign Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_objects_to_td64ns",
    "source_code": "def _objects_to_td64ns(data, unit=None, errors: DateTimeErrorChoices='raise') -> np.ndarray:\n    values = np.asarray(data, dtype=np.object_)\n    result = array_to_timedelta64(values, unit=unit, errors=errors)\n    return result.view('timedelta64[ns]')",
    "docstring": "Convert a object-dtyped or string-dtyped array into an timedelta64[ns]-dtyped array. Parameters ---------- data : ndarray or Index unit : str, default \"ns\" The timedelta unit to treat integers as multiples of. Must not be specified if the data contains a str. errors : {\"raise\", \"coerce\", \"ignore\"}, default \"raise\" How to handle elements that cannot be converted to timedelta64[ns]. See `pandas.to_timedeltaerrors=ignore` will not cause errors to be ignored; they are caught and subsequently ignored at a higher level.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\arrays\\timedeltas.py",
    "ast_data": "FunctionDef name:_objects_to_td64ns arg:data arg:unit arg:errors arguments arg arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_generate_unsampled_indices",
    "source_code": "def _generate_unsampled_indices(random_state, n_samples, n_samples_bootstrap):\n    sample_indices = _generate_sample_indices(random_state, n_samples, n_samples_bootstrap)\n    sample_counts = np.bincount(sample_indices, minlength=n_samples)\n    unsampled_mask = sample_counts == 0\n    indices_range = np.arange(n_samples)\n    unsampled_indices = indices_range[unsampled_mask]\n    return unsampled_indices",
    "docstring": "Private function used to forest._set_oob_score function.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_forest.py",
    "ast_data": "FunctionDef name:_generate_unsampled_indices arg:random_state arg:n_samples arg:n_samples_bootstrap arguments arg arg arg Assign Call Assign Call Assign Compare Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "ShapeEquals",
    "source_code": "def ShapeEquals(tensor_proto, shape):\n    if not isinstance(tensor_proto, tensor_pb2.TensorProto):\n        raise TypeError(f'`tensor_proto` must be a tensor_pb2.TensorProto object, but got type {type(tensor_proto)}.')\n    if isinstance(shape, tensor_shape_pb2.TensorShapeProto):\n        shape = [d.size for d in shape.dim]\n    elif not isinstance(shape, (list, tuple)):\n        raise TypeError(f'`shape` must be a list or tuple, but got type {type(shape)}.')\n    tensor_shape_list = [d.size for d in tensor_proto.tensor_shape.dim]\n    return all((x == y for x, y in zip(tensor_shape_list, shape)))",
    "docstring": "Returns True if \"tensor_proto\" has the given \"shape\". Args: tensor_proto: A TensorProto. shape: A tensor shape, expressed as a TensorShape, list, or tuple. Returns: True if \"tensor_proto\" has the given \"shape\", otherwise False. Raises: TypeError: If \"tensor_proto\" is not a TensorProto, or shape is not a TensorShape, list, or tuple.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_util.py",
    "ast_data": "FunctionDef name:ShapeEquals arg:tensor_proto arg:shape arguments arg arg If Call Raise Call Call If Call Assign If Call Raise Call Call Assign Return return:yes Call Compare Call"
  },
  {
    "library": "authlib",
    "name": "verify",
    "source_code": "def verify(self, msg, sig, key):\n    raise NotImplementedError",
    "docstring": "Verify the signature of text msg with a public/verify key. :param msg: message bytes to be signed :param sig: result signature to be compared :param key: public key to verify the signature :return: boolean",
    "type": "method",
    "file_path": "authlib\\authlib\\jose\\rfc7515\\models.py",
    "ast_data": "FunctionDef name:verify arg:self arg:msg arg:sig arg:key arguments arg arg arg arg Raise"
  },
  {
    "library": "pytorch",
    "name": "guard_as_python_constant",
    "source_code": "def guard_as_python_constant(self):\n    return functools.partial(self.func.guard_as_python_constant(), *[v.guard_as_python_constant() for v in self.args], **{k: v.guard_as_python_constant() for k, v in self.keywords.items()})",
    "docstring": "Similar to as_python_constant(), but add ID_MATCH guards to try to force things to become constants",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\functions.py",
    "ast_data": "FunctionDef name:guard_as_python_constant arg:self arguments arg Return return:yes Call Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "validate",
    "source_code": "def validate(self, arr, parameter_values):\n    arr = np.asarray(arr)\n    valid_dtype = None\n    if arr.dtype == np.float64 or arr.dtype == np.float32:\n        pass\n    elif arr.dtype == np.int32 or arr.dtype == np.int64:\n        arr = np.asarray(arr, dtype=np.float64)\n    elif np.issubdtype(arr.dtype, np.floating):\n        pass\n    elif np.issubdtype(arr.dtype, np.integer):\n        arr = np.asarray(arr, dtype=np.float64)\n    else:\n        message = f'Parameter `{self.name}` must be of real dtype.'\n        raise TypeError(message)\n    valid = self.domain.contains(arr, parameter_values)\n    valid = valid & valid_dtype if valid_dtype is not None else valid\n    return (arr[()], arr.dtype, valid)",
    "docstring": "Input validation/standardization of numerical values of a parameter. Checks whether elements of the argument are reals, ensuring that the dtype reflects this. Also produces a logical array that indicates which elements meet the requirements. Parameters ---------- arr : ndarray The argument array to be validated and standardized. parameter_values : dict Map of parameter names to parameter value arrays. Returns ------- arr : ndarray The argument array that has been validated and standardized (converted to an appropriate dtype, if necessary). dtype : NumPy dtype The appropriate floating point dtype of the parameter. valid : boolean ndarray Logical array indicating which elements are valid (True) and which are not (False). The arrays of all distribution parameters will be broadcasted, and elements for which any parameter value does not meet the requirements will be replaced with NaN.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distribution_infrastructure.py",
    "ast_data": "FunctionDef name:validate arg:self arg:arr arg:parameter_values arguments arg arg arg Assign Call Assign If BoolOp Compare Compare If BoolOp Compare Compare Assign Call If Call If Call Assign Call Assign Raise Call Assign Call Assign Compare Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, figure, width, height, dpi, vector_renderer, raster_renderer_class=None, bbox_inches_restore=None):\n    if raster_renderer_class is None:\n        raster_renderer_class = RendererAgg\n    self._raster_renderer_class = raster_renderer_class\n    self._width = width\n    self._height = height\n    self.dpi = dpi\n    self._vector_renderer = vector_renderer\n    self._raster_renderer = None\n    self.figure = figure\n    self._figdpi = figure.dpi\n    self._bbox_inches_restore = bbox_inches_restore\n    self._renderer = vector_renderer",
    "docstring": "Parameters ---------- figure : The figure instance. width : float The width of the canvas in logical units height : float The height of the canvas in logical units dpi : float The dpi of the canvas vector_renderer : An instance of a subclass of that will be used for the vector drawing. raster_renderer_class : The renderer class to use for the raster drawing. If not provided, this will use the Agg backend (which is currently the only viable option anyway.)",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_mixed.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:figure arg:width arg:height arg:dpi arg:vector_renderer arg:raster_renderer_class arg:bbox_inches_restore arguments arg arg arg arg arg arg arg arg If Compare Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "sphinx",
    "name": "smart_capwords",
    "source_code": "def smart_capwords(s: str, sep: str | None=None) -> str:\n    words = s.split(sep)\n    for i, word in enumerate(words):\n        if all((x.islower() for x in word)):\n            words[i] = word.capitalize()\n    return (sep or ' ').join(words)",
    "docstring": "Like string.capwords() but does not capitalize words that already contain a capital letter.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\writers\\texinfo.py",
    "ast_data": "FunctionDef name:smart_capwords arg:s arg:sep arguments arg arg Assign Call For Call If Call Call Assign Call Return return:yes Call BoolOp"
  },
  {
    "library": "scipy",
    "name": "scaling",
    "source_code": "def scaling(self, z):\n    s = self.get_slack(z)\n    diag_elements = np.hstack((np.ones(self.n_vars), s))\n\n    def matvec(vec):\n        return diag_elements * vec\n    return LinearOperator((self.n_vars + self.n_ineq, self.n_vars + self.n_ineq), matvec)",
    "docstring": "Returns scaling vector. Given by: scaling = [ones(n_vars), s]",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_trustregion_constr\\tr_interior_point.py",
    "ast_data": "FunctionDef name:scaling arg:self arg:z arguments arg arg Assign Call Assign Call Call FunctionDef name:matvec arg:vec arguments arg Return return:yes Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "ndim",
    "source_code": "@final\n@property\ndef ndim(self) -> int:\n    return self._mgr.ndim",
    "docstring": "Return an int representing the number of axes / array dimensions. Return 1 if Series. Otherwise return 2 if DataFrame. See Also -------- numpy.ndarray.ndim : Number of array dimensions. Examples -------- >>> s = pd.Series({\"a\": 1, \"b\": 2, \"c\": 3}) >>> s.ndim 1 >>> df = pd.DataFrame({\"col1\": [1, 2], \"col2\": [3, 4]}) >>> df.ndim 2",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:ndim arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_ravel_coords",
    "source_code": "def _ravel_coords(coords, shape, order='C'):\n    if len(coords) == 1:\n        return coords[0]\n    if len(coords) == 2:\n        nrows, ncols = shape\n        row, col = coords\n        if order == 'C':\n            maxval = ncols * max(0, nrows - 1) + max(0, ncols - 1)\n            idx_dtype = get_index_dtype(maxval=maxval)\n            return np.multiply(ncols, row, dtype=idx_dtype) + col\n        elif order == 'F':\n            maxval = nrows * max(0, ncols - 1) + max(0, nrows - 1)\n            idx_dtype = get_index_dtype(maxval=maxval)\n            return np.multiply(nrows, col, dtype=idx_dtype) + row\n        else:\n            raise ValueError(\"'order' must be 'C' or 'F'\")\n    return np.ravel_multi_index(coords, shape, order=order)",
    "docstring": "Like np.ravel_multi_index, but avoids some overflow issues.",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\_coo.py",
    "ast_data": "FunctionDef name:_ravel_coords arg:coords arg:shape arg:order arguments arg arg arg If Compare Call Return return:yes If Compare Call Assign Assign If Compare Assign Call Call Assign Call Return return:yes Call If Compare Assign Call Call Assign Call Return return:yes Call Raise Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_ema_multi_avg_fn",
    "source_code": "def get_ema_multi_avg_fn(decay=0.999):\n    if decay < 0.0 or decay > 1.0:\n        raise ValueError(f'Invalid decay value {decay} provided. Please provide a value in [0,1] range.')\n\n    @torch.no_grad()\n    def ema_update(ema_param_list: PARAM_LIST, current_param_list: PARAM_LIST, _):\n        if torch.is_floating_point(ema_param_list[0]) or torch.is_complex(ema_param_list[0]):\n            torch._foreach_lerp_(ema_param_list, current_param_list, 1 - decay)\n        else:\n            for p_ema, p_model in zip(ema_param_list, current_param_list):\n                p_ema.copy_(p_ema * decay + p_model * (1 - decay))\n    return ema_update",
    "docstring": "Get the function applying exponential moving average (EMA) across multiple params.",
    "type": "function",
    "file_path": "pytorch\\torch\\optim\\swa_utils.py",
    "ast_data": "FunctionDef name:get_ema_multi_avg_fn arg:decay arguments arg If BoolOp Compare Compare Raise Call FunctionDef name:ema_update arg:ema_param_list arg:current_param_list arg:_ arguments arg arg arg If BoolOp Call Call Call For Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "Step",
    "source_code": "class Step(object):\n\n    def __init__(self, distribution):\n        self._distribution = distribution\n\n    @property\n    def distribution(self):\n        return self._distribution\n\n    def initialize(self):\n        return []\n\n    def __call__(self):\n        raise NotImplementedError('must be implemented in descendants')",
    "docstring": "Interface for performing each step of a training algorithm.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\step_fn.py",
    "ast_data": "ClassDef name:Step FunctionDef name:__init__ arg:self arg:distribution arguments arg arg Assign FunctionDef name:distribution arg:self arguments arg Return return:yes FunctionDef name:initialize arg:self arguments arg Return return:no FunctionDef name:__call__ arg:self arguments arg Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_stop_words",
    "source_code": "def get_stop_words(self):\n    return _check_stop_list(self.stop_words)",
    "docstring": "Build or fetch the effective stop words list. Returns ------- stop_words: list or None A list of stop words.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_extraction\\text.py",
    "ast_data": "FunctionDef name:get_stop_words arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_create_op",
    "source_code": "def _create_op(op_type, inputs, op_dtypes, attrs=None):\n    op = ops.get_default_graph().create_op(op_type, inputs, op_dtypes, attrs=attrs, compute_device=True)\n    flat_attrs = []\n    for a in attrs:\n        flat_attrs.append(str(a))\n        flat_attrs.append(op.get_attr(str(a)))\n    execute.record_gradient(op_type, op.inputs, tuple(flat_attrs), op.outputs[:])\n    return op",
    "docstring": "Utility to create an op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "FunctionDef name:_create_op arg:op_type arg:inputs arg:op_dtypes arg:attrs arguments arg arg arg arg Assign Call Call Assign For Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "copy",
    "source_code": "@abc.abstractmethod\ndef copy(self) -> HashContext:\n    pass",
    "docstring": "Return a HashContext that is a copy of the current context.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\hashes.py",
    "ast_data": "FunctionDef name:copy arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "_atleast_float",
    "source_code": "def _atleast_float(dtype, other_dtype):\n    if dtype is None:\n        dtype = other_dtype\n    if not (dtype.is_floating_point or dtype.is_complex):\n        return _dtypes_impl.default_dtypes().float_dtype\n    return dtype",
    "docstring": "Return a dtype that is real or complex floating-point. For inputs that are boolean or integer dtypes, this returns the default float dtype; inputs that are complex get converted to the default complex dtype; real floating-point dtypes () get passed through unchanged",
    "type": "function",
    "file_path": "pytorch\\torch\\_numpy\\_reductions_impl.py",
    "ast_data": "FunctionDef name:_atleast_float arg:dtype arg:other_dtype arguments arg arg If Compare Assign If BoolOp Return return:yes Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_theta_zero_location",
    "source_code": "def set_theta_zero_location(self, loc, offset=0.0):\n    mapping = {'N': np.pi * 0.5, 'NW': np.pi * 0.75, 'W': np.pi, 'SW': np.pi * 1.25, 'S': np.pi * 1.5, 'SE': np.pi * 1.75, 'E': 0, 'NE': np.pi * 0.25}\n    return self.set_theta_offset(mapping[loc] + np.deg2rad(offset))",
    "docstring": "Set the location of theta's zero. This simply calls with the correct value in radians. Parameters ---------- loc : str May be one of \"N\", \"NW\", \"W\", \"SW\", \"S\", \"SE\", \"E\", or \"NE\". offset : float, default: 0 An offset in degrees to apply from the specified *loc*. **Note:** this offset is *always* applied counter-clockwise regardless of the direction setting.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\polar.py",
    "ast_data": "FunctionDef name:set_theta_zero_location arg:self arg:loc arg:offset arguments arg arg arg Assign Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "make_mask_descr",
    "source_code": "def make_mask_descr(ndtype):\n    return _replace_dtype_fields(ndtype, MaskType)",
    "docstring": "Construct a dtype description list from a given dtype. Returns a new dtype object, with the type of all fields in to a boolean type. Field names are not altered. Parameters ---------- ndtype : dtype The dtype to convert. Returns ------- result : dtype A dtype that looks like , the type of all fields is boolean. Examples -------- >>> import numpy as np >>> import numpy.ma as ma >>> dtype = np.dtype({'names':['foo', 'bar'], ... 'formats':[np.float32, np.int64]}) >>> dtype dtype([('foo', '>> ma.make_mask_descr(dtype) dtype([('foo', '|b1'), ('bar', '|b1')]) >>> ma.make_mask_descr(np.float32) dtype('bool')",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:make_mask_descr arg:ndtype arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_client",
    "source_code": "def get_client(self):\n    return etcd.Client(host=self._host, port=self._port, version_prefix='/v2', read_timeout=10)",
    "docstring": "Return an etcd client object that can be used to make requests to this server.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\etcd_server.py",
    "ast_data": "FunctionDef name:get_client arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "demangle",
    "source_code": "def demangle(self, mangled: str) -> str:\n    if mangled.startswith(self._mangle_parent + '.'):\n        return mangled.partition('.')[2]\n    return mangled",
    "docstring": "Note: This only demangles names that were mangled by this specific PackageMangler. It will pass through names created by a different PackageMangler instance.",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\_mangling.py",
    "ast_data": "FunctionDef name:demangle arg:self arg:mangled arguments arg arg If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "get_random_forward_sequence",
    "source_code": "def get_random_forward_sequence(self, with_mix: bool=True) -> Tuple[Iterator[Tuple[str, Module]], bool]:\n    if isinstance(self.random_apply, tuple):\n        num_samples = int(torch.randint(*self.random_apply, (1,)).item())\n    else:\n        raise TypeError(f'random apply should be a tuple. Gotcha {type(self.random_apply)}')\n    multinomial_weights = self.random_apply_weights.clone()\n    mix_indices = self.get_mix_augmentation_indices(self.named_children())\n    multinomial_weights[mix_indices] = 0\n    indices = torch.multinomial(multinomial_weights, num_samples, replacement=num_samples > multinomial_weights.sum().item())\n    mix_added = False\n    if with_mix and len(mix_indices) != 0:\n        if (torch.rand(1) < (len(mix_indices) + len(indices)) / len(self)).item():\n            indices[-1] = torch.multinomial((~multinomial_weights.bool()).float(), 1)\n            indices = indices[torch.randperm(len(indices))]\n            mix_added = True\n    return (self.get_children_by_indices(indices), mix_added)",
    "docstring": "Get a forward sequence when random apply is in need. Args: with_mix: if to require a mix augmentation for the sequence. Note: Mix augmentations (e.g. RandomMixUp) will be only applied once even in a random forward.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\container\\image.py",
    "ast_data": "FunctionDef name:get_random_forward_sequence arg:self arg:with_mix arguments arg arg If Call Assign Call Call Call Raise Call Call Assign Call Assign Call Call Assign Assign Call Compare Call Call Assign If BoolOp Compare Call If Call Compare Call Call Call Call Assign Call Call Call Assign Call Call Assign Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "from_file",
    "source_code": "@classmethod\ndef from_file(cls, file_path: str | Path) -> Image:\n    data: Tensor = load_image(file_path, desired_type=ImageLoadType.RGB8, device='cpu')\n    pixel_format = PixelFormat(color_space=ColorSpace.RGB, bit_depth=data.element_size() * 8)\n    layout = ImageLayout(image_size=ImageSize(height=data.shape[1], width=data.shape[2]), channels=data.shape[0], channels_order=ChannelsOrder.CHANNELS_FIRST)\n    return cls(data, pixel_format, layout)",
    "docstring": "Construct an image tensor from a file. Args: file_path: the path to the file to read the image from.",
    "type": "method",
    "file_path": "kornia\\kornia\\image\\image.py",
    "ast_data": "FunctionDef name:from_file arg:cls arg:file_path arguments arg arg Call Assign Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "add_checks",
    "source_code": "def add_checks(self, check: Callable) -> None:\n    sig = inspect.signature(check)\n    if len(list(sig.parameters.values())) != 1:\n        raise TypeError('PassManager check function should only take in one variable, a module')\n    setattr(self, 'check', check)",
    "docstring": "Adds a function which takes runs various checks on a given graph module. This function is run before and after each pass if the flag is enabled.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\passes\\infra\\pass_manager.py",
    "ast_data": "FunctionDef name:add_checks arg:self arg:check arguments arg arg Assign Call If Compare Call Call Call Raise Call Call"
  },
  {
    "library": "numpy",
    "name": "angle",
    "source_code": "@array_function_dispatch(_angle_dispatcher)\ndef angle(z, deg=False):\n    z = asanyarray(z)\n    if issubclass(z.dtype.type, _nx.complexfloating):\n        zimag = z.imag\n        zreal = z.real\n    else:\n        zimag = 0\n        zreal = z\n    a = arctan2(zimag, zreal)\n    if deg:\n        a *= 180 / pi\n    return a",
    "docstring": "Return the angle of the complex argument. Parameters ---------- z : array_like A complex number or sequence of complex numbers. deg : bool, optional Return angle in degrees if True, radians if False (default). Returns ------- angle : ndarray or scalar The counterclockwise angle from the positive real axis on the complex plane in the range `arctan2arctan2` when the magnitude of the argument is zero. See example. Examples -------- >>> import numpy as np >>> np.angle([1.0, 1.0j, 1+1j]) # in radians array([ 0. , 1.57079633, 0.78539816]) # may vary >>> np.angle(1+1j, deg=True) # in degrees 45.0 >>> np.angle([0., -0., complex(0., -0.), complex(-0., -0.)]) # convention array([ 0. , 3.14159265, -0. , -3.14159265])",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_function_base_impl.py",
    "ast_data": "FunctionDef name:angle arg:z arg:deg arguments arg arg Assign Call If Call Assign Assign Assign Assign Assign Call If Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_max_pool_flops",
    "source_code": "@ops.RegisterStatistics('MaxPool', 'flops')\ndef _max_pool_flops(graph, node):\n    return _pool_flops(graph, node)",
    "docstring": "Compute flops for MaxPool operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py",
    "ast_data": "FunctionDef name:_max_pool_flops arg:graph arg:node arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "__init__",
    "source_code": "def __init__(self, options: dict[str, str]) -> None:\n    self.options = options",
    "docstring": "Initialize the class with the options the user has given.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\search\\__init__.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:options arguments arg arg Assign"
  },
  {
    "library": "matplotlib",
    "name": "get_aspect",
    "source_code": "def get_aspect(self):\n    return self._divider.get_aspect()",
    "docstring": "Return the aspect of the SubplotDivider.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_grid.py",
    "ast_data": "FunctionDef name:get_aspect arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "kl_div_loss_2d",
    "source_code": "def kl_div_loss_2d(pred: Tensor, target: Tensor, reduction: str='mean') -> Tensor:\n    return _reduce_loss(_kl_div_2d(target, pred), reduction)",
    "docstring": "Calculate the Kullback-Leibler divergence loss between heatmaps. Args: pred: the input tensor with shape :math:. target: the target tensor with shape :math:. reduction: Specifies the reduction to apply to the output: ``: the output will be summed. Examples: >>> pred = torch.full((1, 1, 2, 4), 0.125) >>> loss = kl_div_loss_2d(pred, pred) >>> loss.item() 0.0",
    "type": "function",
    "file_path": "kornia\\kornia\\losses\\divergence.py",
    "ast_data": "FunctionDef name:kl_div_loss_2d arg:pred arg:target arg:reduction arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "next_monday",
    "source_code": "def next_monday(dt: datetime) -> datetime:\n    if dt.weekday() == 5:\n        return dt + timedelta(2)\n    elif dt.weekday() == 6:\n        return dt + timedelta(1)\n    return dt",
    "docstring": "If holiday falls on Saturday, use following Monday instead; if holiday falls on Sunday, use Monday instead",
    "type": "function",
    "file_path": "pandas\\pandas\\tseries\\holiday.py",
    "ast_data": "FunctionDef name:next_monday arg:dt arguments arg If Compare Call Return return:yes Call If Compare Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, distributed_variables=None, name=None, **unused_kwargs):\n    if not ops.executing_eagerly_outside_functions():\n        raise ValueError('PackedDistributedVariable should be created in eager mode.')\n    if not distributed_variables:\n        raise ValueError('Expect a non-empty list of variables to pack.')\n    for i, var in enumerate(distributed_variables):\n        if not resource_variable_ops.is_resource_variable(var):\n            raise ValueError('Expect a list of ResourceVariables to pack, but the %d-th variable is %s' % (i, type(var)))\n    self._distributed_variables = distributed_variables\n    self._devices = [v.device for v in distributed_variables]\n    with ops.init_scope():\n        with ops.name_scope(name, 'Variable', skip_on_eager=False) as name:\n            handle = ops.pack_eager_tensors([var.handle for var in distributed_variables])\n            handle_name = ops.name_from_scope_name(name)\n            unique_id = '%s_%d' % (handle_name, ops.uid())\n            super(PackedDistributedVariable, self).__init__(trainable=distributed_variables[0].trainable, shape=distributed_variables[0].shape, dtype=distributed_variables[0].dtype, handle=handle, synchronization=distributed_variables[0].synchronization, constraint=distributed_variables[0].constraint, aggregation=distributed_variables[0].aggregation, distribute_strategy=distributed_variables[0]._distribute_strategy, name=name, unique_id=unique_id, handle_name=handle_name, graph_element=None, initial_value=None, initializer_op=None, is_initialized_op=None, cached_value=None, caching_device=None, is_distributed_variables=True)",
    "docstring": "Packs a list of variables which are distributed across devices. Args: distributed_variables: A list of distributed Variables to pack. name: Optional name for the variable. Defaults to and gets uniquified automatically.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\packed_distributed_variable.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:distributed_variables arg:name arguments arg arg arg arg If Call Raise Call If Raise Call For Call If Call Raise Call Call Assign Assign With Call With Call Assign Call Assign Call Assign Call Call Call"
  },
  {
    "library": "django",
    "name": "sanitize_strftime_format",
    "source_code": "@functools.lru_cache\ndef sanitize_strftime_format(fmt):\n    if datetime.date(1, 1, 1).strftime('%Y') == '0001':\n        return fmt\n    mapping = {'C': 2, 'F': 10, 'G': 4, 'Y': 4}\n    return re.sub('((?:^|[^%])(?:%%)*)%([CFGY])', lambda m: '%s%%0%s%s' % (m[1], mapping[m[2]], m[2]), fmt)",
    "docstring": "Ensure that certain specifiers are correctly padded with leading zeros. For years < 1000 specifiers %C, %F, %G, and %Y don't work as expected for strftime provided by glibc on Linux as they don't pad the year or century with leading zeros. Support for specifying the padding explicitly is available, however, which can be used to fix this issue. FreeBSD, macOS, and Windows do not support explicitly specifying the padding, but return four digit years (with leading zeros) as expected. This function checks whether the %Y produces a correctly padded string and, if not, makes the following substitutions: - %C → %02C - %F → %010F - %G → %04G - %Y → %04Y See for more details.",
    "type": "function",
    "file_path": "django\\django\\utils\\formats.py",
    "ast_data": "FunctionDef name:sanitize_strftime_format arg:fmt arguments arg If Compare Call Call Return return:yes Assign Return return:yes Call arguments arg"
  },
  {
    "library": "matplotlib",
    "name": "can_composite",
    "source_code": "def can_composite(self):\n    trans = self.get_transform()\n    return self._interpolation != 'none' and trans.is_affine and trans.is_separable",
    "docstring": "Return whether the image can be composited with its neighbors.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\image.py",
    "ast_data": "FunctionDef name:can_composite arg:self arguments arg Assign Call Return return:yes BoolOp Compare"
  },
  {
    "library": "django",
    "name": "rename_table_references",
    "source_code": "def rename_table_references(self, old_table, new_table):\n    pass",
    "docstring": "Rename all references to the old_name to the new_table.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\ddl_references.py",
    "ast_data": "FunctionDef name:rename_table_references arg:self arg:old_table arg:new_table arguments arg arg arg"
  },
  {
    "library": "django",
    "name": "date_trunc_sql",
    "source_code": "def date_trunc_sql(self, lookup_type, sql, params, tzname=None):\n    raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_trunc_sql() method.')",
    "docstring": "Given a lookup_type of 'year', 'month', or 'day', return the SQL that truncates the given date or datetime field field_name to a date object with only the given specificity. If is provided, the given value is truncated in a specific timezone.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:date_trunc_sql arg:self arg:lookup_type arg:sql arg:params arg:tzname arguments arg arg arg arg arg Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "_filter_filename",
    "source_code": "def _filter_filename(value, filter_dots=True):\n    value = unicodedata.normalize('NFKD', value).lower()\n    if filter_dots:\n        value = re.sub('[^\\\\w\\\\s-]+', '_', value)\n    else:\n        value = re.sub('[^.\\\\w\\\\s-]+', '_', value)\n    value = re.sub('[\\\\s-]+', '-', value)\n    return value.strip('-_.')",
    "docstring": "Derive a name that is safe to use as filename from the given string. Adapted from the function of django: Convert spaces or repeated dashes to single dashes. Replace characters that aren't alphanumerics, underscores, hyphens or dots by underscores. Convert to lowercase. Also strip leading and trailing whitespace, dashes, and underscores.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\datasets\\_base.py",
    "ast_data": "FunctionDef name:_filter_filename arg:value arg:filter_dots arguments arg arg Assign Call Call If Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "variables",
    "source_code": "@property\ndef variables(self):\n    if save_context.in_save_context():\n        return [self._saving_variable]\n    return self._variables",
    "docstring": "The list of s that make up the shards of this object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\sharded_variable.py",
    "ast_data": "FunctionDef name:variables arg:self arguments arg If Call Return return:yes Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_extend_region",
    "source_code": "def _extend_region(steep_point, xward_point, start, min_samples):\n    n_samples = len(steep_point)\n    non_xward_points = 0\n    index = start\n    end = start\n    while index < n_samples:\n        if steep_point[index]:\n            non_xward_points = 0\n            end = index\n        elif not xward_point[index]:\n            non_xward_points += 1\n            if non_xward_points > min_samples:\n                break\n        else:\n            return end\n        index += 1\n    return end",
    "docstring": "Extend the area until it's maximal. It's the same function for both upward and downward reagions, depending on the given input parameters. Assuming: - steep_{upward/downward}: bool array indicating whether a point is a steep {upward/downward}; - upward/downward: bool array indicating whether a point is upward/downward; To extend an upward reagion, `` index.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\cluster\\_optics.py",
    "ast_data": "FunctionDef name:_extend_region arg:steep_point arg:xward_point arg:start arg:min_samples arguments arg arg arg arg Assign Call Assign Assign Assign While Compare If Assign Assign If If Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_compute_dtype",
    "source_code": "@property\ndef _compute_dtype(self):\n    return self._dtype_policy.compute_dtype",
    "docstring": "Deprecated alias of .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:_compute_dtype arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "do_encode",
    "source_code": "def do_encode(self, type_spec_value, encode_fn):\n    type_spec_class_name = type_spec_registry.get_name(type(type_spec_value))\n    type_spec_class = struct_pb2.TypeSpecProto.REGISTERED_TYPE_SPEC\n    warnings.warn('Encoding a StructuredValue with type %s; loading this StructuredValue will require that this type be imported and registered.' % type_spec_class_name)\n    type_state = type_spec_value._serialize()\n    num_flat_components = len(nest.flatten(type_spec_value._component_specs, expand_composites=True))\n    encoded_type_spec = struct_pb2.StructuredValue()\n    encoded_type_spec.type_spec_value.CopyFrom(struct_pb2.TypeSpecProto(type_spec_class=type_spec_class, type_state=encode_fn(type_state), type_spec_class_name=type_spec_class_name, num_flat_components=num_flat_components))\n    return encoded_type_spec",
    "docstring": "Returns an encoded proto for the given .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\nested_structure_coder.py",
    "ast_data": "FunctionDef name:do_encode arg:self arg:type_spec_value arg:encode_fn arguments arg arg arg Assign Call Call Assign Call Assign Call Assign Call Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "DistributionParameter",
    "source_code": "class DistributionParameter(combinations_lib.ParameterModifier):\n\n    def modified_arguments(self, kwargs, requested_parameters):\n        use_var_policy = kwargs.get('use_var_policy', None)\n        distribution_arguments = {}\n        for k, v in kwargs.items():\n            if isinstance(v, NamedDistribution):\n                strategy = v.strategy\n                if use_var_policy:\n                    strategy.extended._use_var_policy = use_var_policy\n                distribution_arguments[k] = strategy\n        return distribution_arguments",
    "docstring": "Transforms arguments of type . Convert all arguments of type to the value of their property.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\combinations.py",
    "ast_data": "ClassDef name:DistributionParameter FunctionDef name:modified_arguments arg:self arg:kwargs arg:requested_parameters arguments arg arg arg Assign Call Assign For Call If Call Assign If Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_chief",
    "source_code": "@property\ndef is_chief(self):\n    return self._is_chief_node",
    "docstring": "Returns whether the task is a chief node.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_coordinator.py",
    "ast_data": "FunctionDef name:is_chief arg:self arguments arg Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "__copy__",
    "source_code": "@abc.abstractmethod\ndef __copy__(self) -> X25519PublicKey:\n    pass",
    "docstring": "Returns a copy.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\x25519.py",
    "ast_data": "FunctionDef name:__copy__ arg:self arguments arg"
  },
  {
    "library": "django",
    "name": "build_potfiles",
    "source_code": "def build_potfiles(self):\n    file_list = self.find_files('.')\n    self.remove_potfiles()\n    self.process_files(file_list)\n    potfiles = []\n    for path in self.locale_paths:\n        potfile = os.path.join(path, '%s.pot' % self.domain)\n        if not os.path.exists(potfile):\n            continue\n        args = ['msguniq', *self.msguniq_options, potfile]\n        msgs, errors, status = popen_wrapper(args)\n        if errors:\n            if status != STATUS_OK:\n                raise CommandError('errors happened while running msguniq\\n%s' % errors)\n            elif self.verbosity > 0:\n                self.stdout.write(errors)\n        msgs = normalize_eols(msgs)\n        with open(potfile, 'w', encoding='utf-8') as fp:\n            fp.write(msgs)\n        potfiles.append(potfile)\n    return potfiles",
    "docstring": "Build pot files and apply msguniq to them.",
    "type": "method",
    "file_path": "django\\django\\core\\management\\commands\\makemessages.py",
    "ast_data": "FunctionDef name:build_potfiles arg:self arguments arg Assign Call Call Call Assign For Assign Call If Call Assign Assign Call If If Compare Raise Call If Compare Call Assign Call With Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "Feed",
    "source_code": "class Feed(BaseFeed):\n    feed_type = GeoRSSFeed\n\n    def feed_extra_kwargs(self, obj):\n        return {'geometry': self._get_dynamic_attr('geometry', obj)}\n\n    def item_extra_kwargs(self, item):\n        return {'geometry': self._get_dynamic_attr('item_geometry', item)}",
    "docstring": "This is a subclass of the from . This allows users to define a and/or methods on their own subclasses so that geo-referenced information may placed in the feed.",
    "type": "class",
    "file_path": "django\\django\\contrib\\gis\\feeds.py",
    "ast_data": "ClassDef name:Feed Assign FunctionDef name:feed_extra_kwargs arg:self arg:obj arguments arg arg Return return:yes Call FunctionDef name:item_extra_kwargs arg:self arg:item arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "inv",
    "source_code": "@property\ndef inv(self) -> 'Transform':\n    inv = None\n    if self._inv is not None:\n        inv = self._inv()\n    if inv is None:\n        inv = _InverseTransform(self)\n        self._inv = weakref.ref(inv)\n    return inv",
    "docstring": "Returns the inverse :class: of this transform. This should satisfy ``.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributions\\transforms.py",
    "ast_data": "FunctionDef name:inv arg:self arguments arg Assign If Compare Assign Call If Compare Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "empty",
    "source_code": "@staticmethod\ndef empty(element_spec):\n    return _OptionalImpl(gen_optional_ops.optional_none(), element_spec)",
    "docstring": "Returns an that has no value. NOTE: This method takes an argument that defines the structure of the value that would be contained in the returned if it had a value. >>> optional = tf.experimental.Optional.empty( ... tf.TensorSpec(shape=(), dtype=tf.int32, name=None)) >>> print(optional.has_value()) tf.Tensor(False, shape=(), dtype=bool) Args: element_spec: A (nested) structure of objects matching the structure of an element of this optional. Returns: A with no value.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\optional_ops.py",
    "ast_data": "FunctionDef name:empty arg:element_spec arguments arg Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "isstaticmethod",
    "source_code": "def isstaticmethod(obj: Any, cls: Any=None, name: str | None=None) -> TypeIs[staticmethod[Any, Any]]:\n    if isinstance(obj, staticmethod):\n        return True\n    if cls and name:\n        sentinel = object()\n        for basecls in getattr(cls, '__mro__', [cls]):\n            meth = basecls.__dict__.get(name, sentinel)\n            if meth is not sentinel:\n                return isinstance(meth, staticmethod)\n    return False",
    "docstring": "Check if the object is a :class:.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\inspect.py",
    "ast_data": "FunctionDef name:isstaticmethod arg:obj arg:cls arg:name arguments arg arg arg If Call Return return:yes If BoolOp Assign Call For Call Assign Call If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "iscontiguous",
    "source_code": "def iscontiguous(self):\n    return self.flags['CONTIGUOUS']",
    "docstring": "Return a boolean indicating whether the data is contiguous. Parameters ---------- None Examples -------- >>> import numpy as np >>> x = np.ma.array([1, 2, 3]) >>> x.iscontiguous() True returns one of the flags of the masked array: >>> x.flags C_CONTIGUOUS : True F_CONTIGUOUS : True OWNDATA : False WRITEABLE : True ALIGNED : True WRITEBACKIFCOPY : False",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:iscontiguous arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "SimpleAdminConfig",
    "source_code": "class SimpleAdminConfig(AppConfig):\n    default_auto_field = 'django.db.models.AutoField'\n    default_site = 'django.contrib.admin.sites.AdminSite'\n    name = 'django.contrib.admin'\n    verbose_name = _('Administration')\n\n    def ready(self):\n        checks.register(check_dependencies, checks.Tags.admin)\n        checks.register(check_admin_app, checks.Tags.admin)",
    "docstring": "Simple AppConfig which does not do automatic discovery.",
    "type": "class",
    "file_path": "django\\django\\contrib\\admin\\apps.py",
    "ast_data": "ClassDef name:SimpleAdminConfig Assign Assign Assign Assign Call FunctionDef name:ready arg:self arguments arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "stateless_multinomial",
    "source_code": "@tf_export(v1=['random.stateless_multinomial'])\n@dispatch.add_dispatch_support\n@deprecation.deprecated(date=None, instructions='Use `tf.random.stateless_categorical` instead.')\ndef stateless_multinomial(logits, num_samples, seed, output_dtype=dtypes.int64, name=None):\n    with ops.name_scope(name, 'stateless_multinomial', [logits, seed]):\n        return stateless_multinomial_categorical_impl(logits, num_samples, output_dtype, seed)",
    "docstring": "Draws deterministic pseudorandom samples from a multinomial distribution. This is a stateless version of : if run twice with the same seeds and shapes, it will produce the same pseudorandom numbers. The output is consistent across multiple runs on the same hardware (and between CPU and GPU), but may change between versions of TensorFlow or on non-CPU/GPU hardware. Example: Args: logits: 2-D Tensor with shape . Each slice represents the unnormalized log-probabilities for all classes. num_samples: 0-D. Number of independent samples to draw for each row slice. seed: A shape [2] Tensor, the seed to the random number generator. Must have dtype or . (When using XLA, only is allowed.) output_dtype: The integer type of the output: or . Defaults to . name: Optional name for the operation. Returns: The drawn samples of shape .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\stateless_random_ops.py",
    "ast_data": "FunctionDef name:stateless_multinomial arg:logits arg:num_samples arg:seed arg:output_dtype arg:name arguments arg arg arg arg arg With Call Return return:yes Call Call Call"
  },
  {
    "library": "scrapy",
    "name": "write",
    "source_code": "def write(self, data: bytes) -> int:\n    return cast(int, self.head_plugin.write(data))",
    "docstring": "Uses all the declared plugins to process data first, then writes the processed data to target file. :param data: data passed to be written to target file :type data: bytes :return: returns number of bytes written :rtype: int",
    "type": "method",
    "file_path": "scrapy\\scrapy\\extensions\\postprocessing.py",
    "ast_data": "FunctionDef name:write arg:self arg:data arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "ObjectIdentityDictionary",
    "source_code": "class ObjectIdentityDictionary(collections.abc.MutableMapping):\n    __slots__ = ['_storage']\n\n    def __init__(self):\n        self._storage = {}\n\n    def _wrap_key(self, key):\n        return _ObjectIdentityWrapper(key)\n\n    def __getitem__(self, key):\n        return self._storage[self._wrap_key(key)]\n\n    def __setitem__(self, key, value):\n        self._storage[self._wrap_key(key)] = value\n\n    def __delitem__(self, key):\n        del self._storage[self._wrap_key(key)]\n\n    def __len__(self):\n        return len(self._storage)\n\n    def __iter__(self):\n        for key in self._storage:\n            yield key.unwrapped\n\n    def __repr__(self):\n        return 'ObjectIdentityDictionary(%s)' % repr(self._storage)",
    "docstring": "A mutable mapping data structure which compares using \"is\". This is necessary because we have trackable objects (_ListWrapper) which have behavior identical to built-in Python lists (including being unhashable and comparing based on the equality of their contents by default).",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\object_identity.py",
    "ast_data": "ClassDef name:ObjectIdentityDictionary Assign FunctionDef name:__init__ arg:self arguments arg Assign FunctionDef name:_wrap_key arg:self arg:key arguments arg arg Return return:yes Call FunctionDef name:__getitem__ arg:self arg:key arguments arg arg Return return:yes Call FunctionDef name:__setitem__ arg:self arg:key arg:value arguments arg arg arg Assign Call FunctionDef name:__delitem__ arg:self arg:key arguments arg arg Call FunctionDef name:__len__ arg:self arguments arg Return return:yes Call FunctionDef name:__iter__ arg:self arguments arg For FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "write",
    "source_code": "def write(self, obj, **kwargs) -> None:\n    raise NotImplementedError('WORMTable needs to implement write')",
    "docstring": "write in a format that we can search later on (but cannot append to): write out the indices and the values using _write_array (e.g. a CArray) create an indexing table so that we can search",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:write arg:self arg:obj arguments arg arg arg Raise Call"
  },
  {
    "library": "numpy",
    "name": "chebadd",
    "source_code": "def chebadd(c1, c2):\n    return pu._add(c1, c2)",
    "docstring": "Add one Chebyshev series to another. Returns the sum of two Chebyshev series + . The arguments are sequences of coefficients ordered from lowest order term to highest, i.e., [1,2,3] represents the series ``. Parameters ---------- c1, c2 : array_like 1-D arrays of Chebyshev series coefficients ordered from low to high. Returns ------- out : ndarray Array representing the Chebyshev series of their sum. See Also -------- chebsub, chebmulx, chebmul, chebdiv, chebpow Notes ----- Unlike multiplication, division, etc., the sum of two Chebyshev series is a Chebyshev series (without having to \"reproject\" the result onto the basis set) so addition, just like that of \"standard\" polynomials, is simply \"component-wise.\" Examples -------- >>> from numpy.polynomial import chebyshev as C >>> c1 = (1,2,3) >>> c2 = (3,2,1) >>> C.chebadd(c1,c2) array([4., 4., 4.])",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\chebyshev.py",
    "ast_data": "FunctionDef name:chebadd arg:c1 arg:c2 arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "normalize_start_end",
    "source_code": "@classmethod\ndef normalize_start_end(cls, x, dim, start, end):\n    sizevars = V.graph.sizevars\n    dim_size = x.get_size()[dim]\n    if any((free_unbacked_symbols(x) for x in (start, end, dim_size))):\n        min_func = sympy.Min\n        max_func = sympy.Max\n    else:\n        min_func = sizevars.evaluate_min\n        max_func = sizevars.evaluate_max\n\n    def clamp(x, lower, upper):\n        clamped_lower = x if sizevars.statically_known_geq(x, lower) else max_func(x, lower)\n        clamped_full = clamped_lower if sizevars.statically_known_leq(clamped_lower, upper) else min_func(clamped_lower, upper)\n        return clamped_full\n\n    def clamp_wrap(val, lower, upper, default):\n        if val is None:\n            return default\n        val = cls.handle_negative_index(val, dim_size)\n        return clamp(val, lower, upper)\n    start = clamp_wrap(start, 0, dim_size, 0)\n    end = clamp_wrap(end, start, dim_size, dim_size)\n    return (start, end)",
    "docstring": "Normalize start and end such that both are in the range [0, x.get_size()[dim]] and start <= end.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ir.py",
    "ast_data": "FunctionDef name:normalize_start_end arg:cls arg:x arg:dim arg:start arg:end arguments arg arg arg arg arg Assign Assign Call If Call Call Assign Assign Assign Assign FunctionDef name:clamp arg:x arg:lower arg:upper arguments arg arg arg Assign Call Call Assign Call Call Return return:yes FunctionDef name:clamp_wrap arg:val arg:lower arg:upper arg:default arguments arg arg arg arg If Compare Return return:yes Assign Call Return return:yes Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "replace_capture_with_deferred_capture",
    "source_code": "def replace_capture_with_deferred_capture(self, tensor, closure, spec, placeholder=None, default_value=None):\n    capture_index = None\n    for i, capture in enumerate(self._captured_inputs):\n        if id(tensor) == id(capture):\n            capture_index = i\n            break\n    if placeholder is None:\n        if capture_index is None:\n            raise ValueError(f\"Did not find `tensor` argument {tensor} in the ConcreteFunction's captured inputs list, and did not receive a placeholder argument. Thus we're unable to infer the internal placeholder. \")\n        placeholder = self.inputs[-len(self._captured_inputs) + capture_index]\n    if not (spec.is_compatible_with(tensor) or spec.is_compatible_with(placeholder)):\n        raise ValueError(f\"Attempting to substitute closure with spec {spec} that's incompatible with the original capture {tensor} or the internal placeholder {placeholder}.\")\n    self._func_graph.replace_capture_with_deferred_capture(tensor=tensor, closure=closure, spec=spec, placeholder=placeholder, default_value=default_value)\n    if capture_index is not None:\n        self._captured_inputs[capture_index] = closure",
    "docstring": "Replaces existing capture with a deferred capture . This API replaces the capture from the concrete function's captured inputs list, and places the deferred capture in its spot so the order of captured inputs is preserved. This is important because the old and the new will have the same internal placeholder, which can be passed through the argument, or skipped, in which case we find the placeholder from internal inputs by indexing in the external captured inputs list. Thus, it is important that the new deferred capture has output spec (specified by the argument) compatible with the internal placeholder () and the original capture (). For example, Args: tensor: Tensor already captured. This should be listed in concrete_function.captured_inputs except when it's empty such as when the concrete function is restored from SavedModel. closure: function which takes no arguments, to be evaluated at function call time, returning a nest of tensors compatible with . spec: nest of TypeSpec for the value to capture. placeholder: optional. The internal placeholder corresponding to the captured and the new . default_value: optional value to use in environments that cannot safely evaluate closure.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py",
    "ast_data": "FunctionDef name:replace_capture_with_deferred_capture arg:self arg:tensor arg:closure arg:spec arg:placeholder arg:default_value arguments arg arg arg arg arg arg Assign For Call If Compare Call Call Assign If Compare If Compare Raise Call Assign Call If BoolOp Call Call Raise Call Call If Compare Assign"
  },
  {
    "library": "tensorflow",
    "name": "__mod__",
    "source_code": "def __mod__(self, other):\n    other = as_dimension(other)\n    if self._value is None or other.value is None:\n        return Dimension(None)\n    else:\n        return Dimension(self._value % other.value)",
    "docstring": "Returns modulo . Dimension modulo are computed as follows: Args: other: Another Dimension, or a value accepted by . Returns: A Dimension whose value is modulo .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:__mod__ arg:self arg:other arguments arg arg Assign Call If BoolOp Compare Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_init_zero_for_overlap",
    "source_code": "def _init_zero_for_overlap(self) -> None:\n    assert self._overlap_with_ddp, '`_init_zero_for_overlap()` should only be called when `overlap_with_ddp=True`'\n    self._overlap_info.status = _OverlapStatus.INITIALIZED\n    self._clear_cache()\n    self._partition_parameters(self._overlap_info.params_per_rank)\n    self._build_ddp_param_buckets()\n    self._init_local_optimizer()",
    "docstring": "Perform a delayed initialization of the local optimizer and the supporting data structures.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\optim\\zero_redundancy_optimizer.py",
    "ast_data": "FunctionDef name:_init_zero_for_overlap arg:self arguments arg Assign Call Call Call Call"
  },
  {
    "library": "django",
    "name": "get_context_data",
    "source_code": "def get_context_data(self, *, object_list=None, **kwargs):\n    queryset = object_list if object_list is not None else self.object_list\n    page_size = self.get_paginate_by(queryset)\n    context_object_name = self.get_context_object_name(queryset)\n    if page_size:\n        paginator, page, queryset, is_paginated = self.paginate_queryset(queryset, page_size)\n        context = {'paginator': paginator, 'page_obj': page, 'is_paginated': is_paginated, 'object_list': queryset}\n    else:\n        context = {'paginator': None, 'page_obj': None, 'is_paginated': False, 'object_list': queryset}\n    if context_object_name is not None:\n        context[context_object_name] = queryset\n    context.update(kwargs)\n    return super().get_context_data(**context)",
    "docstring": "Get the context for this view.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\list.py",
    "ast_data": "FunctionDef name:get_context_data arg:self arguments arg arg arg Assign Compare Assign Call Assign Call If Assign Call Assign Assign If Compare Assign Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "on_scroll",
    "source_code": "def on_scroll(self, event):\n    if not self.ignore(event):\n        self._on_scroll(event)",
    "docstring": "Mouse scroll event handler and validator.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:on_scroll arg:self arg:event arguments arg arg If Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_clip",
    "source_code": "def _clip(params, ids, max_norm):\n\n    def _rank(x):\n        rank = ops.convert_to_tensor(x).get_shape().ndims\n        if rank:\n            return (rank, True)\n        else:\n            return (array_ops.rank(x), False)\n    if max_norm is None:\n        return params\n    ids_rank, ids_static = _rank(ids)\n    params_rank, params_static = _rank(params)\n    return clip_ops.clip_by_norm(params, max_norm, axes=list(range(ids_rank, params_rank)) if ids_static and params_static else math_ops.range(ids_rank, params_rank))",
    "docstring": "Helper function for _embedding_lookup_and_transform. This function optionally clips embeddings to an l2-norm of max_norm. Args: params: A of embeddings retrieved by . ids: The argument that was passed to . max_norm: If not , each embedding is clipped if its l2-norm is larger than this value. Returns: A with the same type as .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\embedding_ops.py",
    "ast_data": "FunctionDef name:_clip arg:params arg:ids arg:max_norm arguments arg arg arg FunctionDef name:_rank arg:x arguments arg Assign Call Call If Return return:yes Return return:yes Call If Compare Return return:yes Assign Call Assign Call Return return:yes Call BoolOp Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "guarantee_const",
    "source_code": "@tf_export('guarantee_const')\n@deprecation.deprecated(None, 'Not for public use.')\ndef guarantee_const(input, name=None):\n    return gen_array_ops.guarantee_const(input=input, name=name)",
    "docstring": "Promise to the TF runtime that the input tensor is a constant. The runtime is then free to make optimizations based on this. Returns the input tensor without modification. Args: input: A . name: A name for this operation. Returns: A . Has the same dtype as .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:guarantee_const arg:input arg:name arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "numpy",
    "name": "get_flags_free",
    "source_code": "def get_flags_free(self):\n    return []",
    "docstring": "List of Fortran 90 free format specific flags.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\fcompiler\\__init__.py",
    "ast_data": "FunctionDef name:get_flags_free arg:self arguments arg Return return:no"
  },
  {
    "library": "pandas",
    "name": "round",
    "source_code": "def round(self, decimals: int=0, *args, **kwargs) -> Self:\n    return type(self)(pc.round(self._pa_array, ndigits=decimals))",
    "docstring": "Round each value in the array a to the given number of decimals. Parameters ---------- decimals : int, default 0 Number of decimal places to round to. If decimals is negative, it specifies the number of positions to the left of the decimal point. *args, **kwargs Additional arguments and keywords have no effect. Returns ------- ArrowExtensionArray Rounded values of the ArrowExtensionArray. See Also -------- DataFrame.round : Round values of a DataFrame. Series.round : Round values of a Series.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\arrow\\array.py",
    "ast_data": "FunctionDef name:round arg:self arg:decimals arguments arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_repr_fits_vertical_",
    "source_code": "def _repr_fits_vertical_(self) -> bool:\n    max_rows = get_option('display.max_rows')\n    return len(self) <= max_rows",
    "docstring": "Check length against max_rows.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:_repr_fits_vertical_ arg:self arguments arg Assign Call Return return:yes Compare Call"
  },
  {
    "library": "scipy",
    "name": "_wrapped_func",
    "source_code": "def _wrapped_func(*fargs):\n    _wrapped_func.nfev += 1\n    return func(*fargs)",
    "docstring": "Wrapped to track the number of times the function has been called.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_minpack_py.py",
    "ast_data": "FunctionDef name:_wrapped_func arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "requires_vector_input",
    "source_code": "@property\ndef requires_vector_input(self):\n    return np.any([kernel.requires_vector_input for kernel in self.kernels])",
    "docstring": "Returns whether the kernel is defined on discrete structures.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:requires_vector_input arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "try_evaluate_constant",
    "source_code": "def try_evaluate_constant(tensor):\n    with tensor.graph._c_graph.get() as c_graph:\n        return c_api.TF_TryEvaluateConstant_wrapper(c_graph, tensor._as_tf_output())",
    "docstring": "Evaluates a symbolic tensor as a constant. Args: tensor: a symbolic Tensor. Returns: ndarray if the evaluation succeeds, or None if it fails.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_util.py",
    "ast_data": "FunctionDef name:try_evaluate_constant arg:tensor arguments arg With Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "trace_save_and_restore",
    "source_code": "def trace_save_and_restore(obj):\n    legacy_name = saveable_compat.get_saveable_name(obj)\n    obj_save_fn = obj._serialize_to_tensors\n    obj_restore_fn = obj._restore_from_tensors\n    if isinstance(obj_save_fn, defun.ConcreteFunction):\n        concrete_save = obj_save_fn\n    else:\n\n        @def_function.function\n        def save_fn():\n            tensor_dict = obj_save_fn()\n            if any((isinstance(v, tensor_callable.Callable) for v in tensor_dict.values())):\n                raise NotImplementedError(f'Unable to export SavedModel with object of type {type(obj)} because it returns a Callable in `_serialize_to_tensors`. If you need this functionality please file a feature request.')\n            if legacy_name:\n                return {f'{legacy_name}{key}': value for key, value in tensor_dict.items()}\n            return tensor_dict\n        concrete_save = save_fn.get_concrete_function()\n    if isinstance(obj_restore_fn, defun.ConcreteFunction):\n        concrete_restore = obj_restore_fn\n    else:\n\n        @def_function.function\n        def restore_fn(restored_tensors):\n            if legacy_name:\n                restored_tensors = {key[len(legacy_name):]: value for key, value in restored_tensors.items()}\n            obj_restore_fn(restored_tensors)\n        concrete_restore = restore_fn.get_concrete_function(concrete_save.structured_outputs)\n    return (concrete_save, concrete_restore)",
    "docstring": "Traces serialize- and restore-from-tensors functions. Args: obj: A object. Returns: A concrete Function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\tracing_utils.py",
    "ast_data": "FunctionDef name:trace_save_and_restore arg:obj arguments arg Assign Call Assign Assign If Call Assign FunctionDef name:save_fn arguments Assign Call If Call Call Call Raise Call Call If Return return:yes Call Return return:yes Assign Call If Call Assign FunctionDef name:restore_fn arg:restored_tensors arguments arg If Assign Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_maybe_unbox_datetimelike",
    "source_code": "def _maybe_unbox_datetimelike(value: Scalar, dtype: DtypeObj) -> Scalar:\n    if is_valid_na_for_dtype(value, dtype):\n        value = dtype.type('NaT', 'ns')\n    elif isinstance(value, Timestamp):\n        if value.tz is None:\n            value = value.to_datetime64()\n        elif not isinstance(dtype, DatetimeTZDtype):\n            raise TypeError('Cannot unbox tzaware Timestamp to tznaive dtype')\n    elif isinstance(value, Timedelta):\n        value = value.to_timedelta64()\n    _disallow_mismatched_datetimelike(value, dtype)\n    return value",
    "docstring": "Convert a Timedelta or Timestamp to timedelta64 or datetime64 for setting into a numpy array. Failing to unbox would risk dropping nanoseconds. Notes ----- Caller is responsible for checking dtype.kind in \"mM\"",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\cast.py",
    "ast_data": "FunctionDef name:_maybe_unbox_datetimelike arg:value arg:dtype arguments arg arg If Call Assign Call If Call If Compare Assign Call If Call Raise Call If Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_check_num_rows_possibly_add_asserts",
    "source_code": "def _check_num_rows_possibly_add_asserts(self):\n    if self._assert_proper_shapes:\n        self._num_rows = control_flow_ops.with_dependencies([check_ops.assert_rank(self._num_rows, 0, message='Argument num_rows must be a 0-D Tensor.'), check_ops.assert_non_negative(self._num_rows, message='Argument num_rows must be non-negative.')], self._num_rows)\n    if not self._num_rows.dtype.is_integer:\n        raise TypeError('Argument num_rows must be integer type.  Found: %s' % self._num_rows)\n    num_rows_static = self._num_rows_static\n    if num_rows_static is None:\n        return\n    if num_rows_static.ndim != 0:\n        raise ValueError('Argument num_rows must be a 0-D Tensor.  Found: %s' % num_rows_static)\n    if num_rows_static < 0:\n        raise ValueError('Argument num_rows must be non-negative.  Found: %s' % num_rows_static)",
    "docstring": "Static check of init arg , possibly add asserts.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_identity.py",
    "ast_data": "FunctionDef name:_check_num_rows_possibly_add_asserts arg:self arguments arg If Assign Call Call Call If Raise Call Assign If Compare Return return:no If Compare Raise Call If Compare Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_PhantomRoot",
    "source_code": "@dataclasses.dataclass\nclass _PhantomRoot:\n    name: str\n    constraint_range: 'StrictMinMaxConstraint'\n    val: int",
    "docstring": "This represents the root of a derived Dim where the root does not directly specify the shape of any input dimension, but the derived Dim does. e.g., the input shapes 2*dim and dim + 1 are related via a \"phantom\" dim. The fields , , and carried by a phantom root help create a symbol for it. Any derived dims with this phantom root are backed by expressions over this symbol.",
    "type": "class",
    "file_path": "pytorch\\torch\\export\\dynamic_shapes.py",
    "ast_data": "ClassDef name:_PhantomRoot"
  },
  {
    "library": "pandas",
    "name": "__getitem__",
    "source_code": "def __getitem__(self, item: PositionalIndexer) -> Self | Any:\n    raise AbstractMethodError(self)",
    "docstring": "Select a subset of self. Parameters ---------- item : int, slice, or ndarray * int: The position in 'self' to get. * slice: A slice object, where 'start', 'stop', and 'step' are integers or None * ndarray: A 1-d boolean NumPy ndarray the same length as 'self' * list[int]: A list of int Returns ------- item : scalar or ExtensionArray Notes ----- For scalar `` is True.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\base.py",
    "ast_data": "FunctionDef name:__getitem__ arg:self arg:item arguments arg arg Raise Call"
  },
  {
    "library": "django",
    "name": "localdate",
    "source_code": "def localdate(value=None, timezone=None):\n    return localtime(value, timezone).date()",
    "docstring": "Convert an aware datetime to local time and return the value's date. Only aware datetimes are allowed. When value is omitted, it defaults to now(). Local time is defined by the current time zone, unless another time zone is specified.",
    "type": "function",
    "file_path": "django\\django\\utils\\timezone.py",
    "ast_data": "FunctionDef name:localdate arg:value arg:timezone arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "to_tensorflow",
    "source_code": "def to_tensorflow() -> ModuleType:\n    return ivy.transpile(kornia, source='torch', target='tensorflow')",
    "docstring": "Convert Kornia to TensorFlow. Transpiles the Kornia library to TensorFlow using [ivy]( The transpilation process occurs lazily, so the transpilation on a given kornia function/class will only occur when it's called or instantiated for the first time. This will make any functions/classes slow when being used for the first time, but any subsequent uses should be as fast as expected. Return: The Kornia library transpiled to TensorFlow Example: .. highlight:: python .. code-block:: python import kornia tf_kornia = kornia.to_tensorflow() import tensorflow as tf input = tf.random.normal((2, 3, 4, 5)) gray = tf_kornia.color.gray.rgb_to_grayscale(input)",
    "type": "function",
    "file_path": "kornia\\kornia\\transpiler\\transpiler.py",
    "ast_data": "FunctionDef name:to_tensorflow arguments Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "dogplot",
    "source_code": "def dogplot(*_, **__):\n    from urllib.request import urlopen\n    from io import BytesIO\n    url = 'https://github.com/mwaskom/seaborn-data/raw/master/png/img{}.png'\n    pic = np.random.randint(2, 7)\n    data = BytesIO(urlopen(url.format(pic)).read())\n    img = plt.imread(data)\n    f, ax = plt.subplots(figsize=(5, 5), dpi=100)\n    f.subplots_adjust(0, 0, 1, 1)\n    ax.imshow(img)\n    ax.set_axis_off()",
    "docstring": "Who's a good boy?",
    "type": "function",
    "file_path": "seaborn\\seaborn\\miscplot.py",
    "ast_data": "FunctionDef name:dogplot arguments arg arg Assign Assign Call Assign Call Call Call Call Assign Call Assign Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "NullContextVariable",
    "source_code": "class NullContextVariable(ContextWrappingVariable):\n\n    def __init__(self, target_values=None, **kwargs) -> None:\n        super().__init__(target_values=target_values, **kwargs)\n\n    def enter(self, tx):\n        return variables.ConstantVariable.create(None)\n\n    def exit(self, tx: 'InstructionTranslator', *args):\n        return variables.ConstantVariable.create(None)\n\n    def module_name(self):\n        return 'contextlib'\n\n    def fn_name(self):\n        return 'nullcontext'",
    "docstring": "This class represents Python contextlib.nullcontext.",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\ctx_manager.py",
    "ast_data": "ClassDef name:NullContextVariable FunctionDef name:__init__ arg:self arg:target_values arguments arg arg arg Call Call FunctionDef name:enter arg:self arg:tx arguments arg arg Return return:yes Call FunctionDef name:exit arg:self arg:tx arguments arg arg arg Return return:yes Call FunctionDef name:module_name arg:self arguments arg Return return:yes FunctionDef name:fn_name arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_weights",
    "source_code": "def get_weights(self):\n    return backend.batch_get_value(self.weights)",
    "docstring": "Returns the current value of the weights of the optimizer. Returns: A list of numpy arrays.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v1.py",
    "ast_data": "FunctionDef name:get_weights arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_3d_properties",
    "source_code": "def set_3d_properties(self, z=0, zdir='z', axlim_clip=False):\n    self._z = z\n    self._dir_vec = get_dir_vector(zdir)\n    self._axlim_clip = axlim_clip\n    self.stale = True",
    "docstring": "Set the *z* position and direction of the text. Parameters ---------- z : float The z-position in 3D space. zdir : {'x', 'y', 'z', 3-tuple} The direction of the text. Default: 'z'. See for a description of the values. axlim_clip : bool, default: False Whether to hide text outside the axes view limits. .. versionadded:: 3.10",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py",
    "ast_data": "FunctionDef name:set_3d_properties arg:self arg:z arg:zdir arg:axlim_clip arguments arg arg arg arg Assign Assign Call Assign Assign"
  },
  {
    "library": "pandas",
    "name": "book",
    "source_code": "@property\ndef book(self) -> Workbook:\n    return self._book",
    "docstring": "Book instance of class openpyxl.workbook.Workbook. This attribute can be used to access engine-specific features.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\excel\\_openpyxl.py",
    "ast_data": "FunctionDef name:book arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "auto_id",
    "source_code": "@property\ndef auto_id(self):\n    auto_id = self.form.auto_id\n    if auto_id and '%s' in str(auto_id):\n        return auto_id % self.html_name\n    elif auto_id:\n        return self.html_name\n    return ''",
    "docstring": "Calculate and return the ID attribute for this BoundField, if the associated Form has specified auto_id. Return an empty string otherwise.",
    "type": "method",
    "file_path": "django\\django\\forms\\boundfield.py",
    "ast_data": "FunctionDef name:auto_id arg:self arguments arg Assign If BoolOp Compare Call Return return:yes If Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_save_and_write_assets",
    "source_code": "def _save_and_write_assets(self, meta_graph_def, assets_list=None):\n    write_fn = functools.partial(_add_asset_to_metagraph, meta_graph_def)\n    asset_filename_map = _maybe_save_assets(write_fn, assets_list)\n    if not asset_filename_map:\n        tf_logging.info('No assets to write.')\n        return\n    copy_assets_to_destination_dir(asset_filename_map, self._export_dir, self._saved_asset_files)",
    "docstring": "Saves asset to the meta graph and writes asset files to disk. Args: meta_graph_def: The meta graph def to which the assets will be added. assets_list: The list where the asset paths are setup.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\builder_impl.py",
    "ast_data": "FunctionDef name:_save_and_write_assets arg:self arg:meta_graph_def arg:assets_list arguments arg arg arg Assign Call Assign Call If Call Return return:no Call"
  },
  {
    "library": "matplotlib",
    "name": "CurveAB",
    "source_code": "@_register_style(_style_list, name='<->')\nclass CurveAB(_Curve):\n    arrow = '<->'",
    "docstring": "An arrow with heads both at the start and the end point.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "ClassDef name:CurveAB Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_gradients",
    "source_code": "def _get_gradients(self, tape, loss, var_list, grad_loss=None):\n    grads = tape.gradient(loss, var_list, grad_loss)\n    return list(zip(grads, var_list))",
    "docstring": "Called in to compute gradients from loss.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py",
    "ast_data": "FunctionDef name:_get_gradients arg:self arg:tape arg:loss arg:var_list arg:grad_loss arguments arg arg arg arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_select_options",
    "source_code": "def _select_options(pat: str) -> list[str]:\n    if pat in _registered_options:\n        return [pat]\n    keys = sorted(_registered_options.keys())\n    if pat == 'all':\n        return keys\n    return [k for k in keys if re.search(pat, k, re.I)]",
    "docstring": "returns a list of keys matching if pat==\"all\", returns all registered options",
    "type": "function",
    "file_path": "pandas\\pandas\\_config\\config.py",
    "ast_data": "FunctionDef name:_select_options arg:pat arguments arg If Compare Return return:yes Assign Call Call If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_full_path_with_slash",
    "source_code": "def get_full_path_with_slash(self, request):\n    new_path = request.get_full_path(force_append_slash=True)\n    new_path = escape_leading_slashes(new_path)\n    if settings.DEBUG and request.method in ('DELETE', 'POST', 'PUT', 'PATCH'):\n        raise RuntimeError(\"You called this URL via %(method)s, but the URL doesn't end in a slash and you have APPEND_SLASH set. Django can't redirect to the slash URL while maintaining %(method)s data. Change your form to point to %(url)s (note the trailing slash), or set APPEND_SLASH=False in your Django settings.\" % {'method': request.method, 'url': request.get_host() + new_path})\n    return new_path",
    "docstring": "Return the full path of the request with a trailing slash appended. Raise a RuntimeError if settings.DEBUG is True and request.method is DELETE, POST, PUT, or PATCH.",
    "type": "method",
    "file_path": "django\\django\\middleware\\common.py",
    "ast_data": "FunctionDef name:get_full_path_with_slash arg:self arg:request arguments arg arg Assign Call Assign Call If BoolOp Compare Raise Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "Circle",
    "source_code": "class Circle(Ellipse):\n\n    def __str__(self):\n        pars = (self.center[0], self.center[1], self.radius)\n        fmt = 'Circle(xy=(%g, %g), radius=%g)'\n        return fmt % pars\n\n    @_docstring.interpd\n    def __init__(self, xy, radius=5, **kwargs):\n        super().__init__(xy, radius * 2, radius * 2, **kwargs)\n        self.radius = radius\n\n    def set_radius(self, radius):\n        self.width = self.height = 2 * radius\n        self.stale = True\n\n    def get_radius(self):\n        return self.width / 2.0\n    radius = property(get_radius, set_radius)",
    "docstring": "A circle patch.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "ClassDef name:Circle FunctionDef name:__str__ arg:self arguments arg Assign Assign Return return:yes FunctionDef name:__init__ arg:self arg:xy arg:radius arguments arg arg arg arg Call Call Assign FunctionDef name:set_radius arg:self arg:radius arguments arg arg Assign Assign FunctionDef name:get_radius arg:self arguments arg Return return:yes Assign Call"
  },
  {
    "library": "scipy",
    "name": "get_coeffs",
    "source_code": "def get_coeffs(self):\n    return self.tck[2]",
    "docstring": "Return spline coefficients.",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_fitpack2.py",
    "ast_data": "FunctionDef name:get_coeffs arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "checkpoints",
    "source_code": "@property\ndef checkpoints(self):\n    return list(self._maybe_delete.keys())",
    "docstring": "A list of managed checkpoints. Note that checkpoints saved due to will not show up in this list (to avoid ever-growing filename lists). Returns: A list of filenames, sorted from oldest to newest.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint_management.py",
    "ast_data": "FunctionDef name:checkpoints arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "resolve_template",
    "source_code": "def resolve_template(self, template):\n    if isinstance(template, (list, tuple)):\n        return select_template(template, using=self.using)\n    elif isinstance(template, str):\n        return get_template(template, using=self.using)\n    else:\n        return template",
    "docstring": "Accept a template object, path-to-template, or list of paths.",
    "type": "method",
    "file_path": "django\\django\\template\\response.py",
    "ast_data": "FunctionDef name:resolve_template arg:self arg:template arguments arg arg If Call Return return:yes Call If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_fix_names",
    "source_code": "def _fix_names(field_spec):\n    names = field_spec['names']\n    for i, name in enumerate(names):\n        if name is not None:\n            continue\n        j = 0\n        while True:\n            name = f'f{j}'\n            if name not in names:\n                break\n            j = j + 1\n        names[i] = name",
    "docstring": "Replace names which are None with the next unused f%d name",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\_internal.py",
    "ast_data": "FunctionDef name:_fix_names arg:field_spec arguments arg Assign For Call If Compare Assign While Assign If Compare Assign Assign"
  },
  {
    "library": "numpy",
    "name": "isscalar",
    "source_code": "@set_module('numpy')\ndef isscalar(element):\n    return isinstance(element, generic) or type(element) in ScalarType or isinstance(element, numbers.Number)",
    "docstring": "Returns True if the type of is a scalar type. Parameters ---------- element : any Input argument, can be of any type and shape. Returns ------- val : bool True if is a scalar type, False if it is not. See Also -------- ndim : Get the number of dimensions of an array Notes ----- If you need a stricter way to identify a *numerical* scalar, use `gradienthistogrampathlib.PathExceptionre.compilematplotlib.figure.Figurelisttuple` | | sequence objects | | | +------------------------------------+---------------+-------------------+ Examples -------- >>> import numpy as np >>> np.isscalar(3.1) True >>> np.isscalar(np.array(3.1)) False >>> np.isscalar([3.1]) False >>> np.isscalar(False) True >>> np.isscalar('numpy') True NumPy supports PEP 3141 numbers: >>> from fractions import Fraction >>> np.isscalar(Fraction(5, 17)) True >>> from numbers import Number >>> np.isscalar(Number()) True",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\numeric.py",
    "ast_data": "FunctionDef name:isscalar arg:element arguments arg Return return:yes BoolOp Call Compare Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_polynomial_matrix",
    "source_code": "def _polynomial_matrix(x, powers):\n    out = np.empty((x.shape[0], powers.shape[0]), dtype=float)\n    polynomial_matrix(x, powers, out)\n    return out",
    "docstring": "Return monomials, with exponents from , evaluated at .",
    "type": "function",
    "file_path": "scipy\\scipy\\interpolate\\_rbfinterp_pythran.py",
    "ast_data": "FunctionDef name:_polynomial_matrix arg:x arg:powers arguments arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "get_downloader_middleware",
    "source_code": "def get_downloader_middleware(self, cls: type[_T]) -> _T | None:\n    if not self.engine:\n        raise RuntimeError('Crawler.get_downloader_middleware() can only be called after the crawl engine has been created.')\n    return self._get_component(cls, self.engine.downloader.middleware.middlewares)",
    "docstring": "Return the run-time instance of a :ref: of the specified class or a subclass, or `engine_startedspider_opened`.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\crawler.py",
    "ast_data": "FunctionDef name:get_downloader_middleware arg:self arg:cls arguments arg arg If Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "validate_and_slice_inputs",
    "source_code": "def validate_and_slice_inputs(names_to_saveables):\n    saveables = []\n    seen_ops = object_identity.ObjectIdentitySet()\n    for name, op in sorted(names_to_saveables.items(), key=lambda x: x[0]):\n        for converted_saveable_object in saveable_objects_for_op(op, name):\n            _add_saveable(saveables, seen_ops, converted_saveable_object)\n    return saveables",
    "docstring": "Returns the variables and names that will be used for a Saver. Args: names_to_saveables: A dict (k, v) where k is the name of an operation and v is an operation to save or a BaseSaverBuilder.Saver. Returns: A list of SaveableObjects. Raises: TypeError: If any of the keys are not strings or any of the values are not one of Tensor or Variable or a trackable operation. ValueError: If the same operation is given in more than one value (this also applies to slices of SlicedVariables).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\saving\\saveable_object_util.py",
    "ast_data": "FunctionDef name:validate_and_slice_inputs arg:names_to_saveables arguments arg Assign Assign Call For Call Call arguments arg For Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "decision_function",
    "source_code": "@_available_if_base_estimator_has('decision_function')\ndef decision_function(self, X):\n    return self._get_predictions(X, output_method='decision_function')",
    "docstring": "Evaluate the decision_function of the models in the chain. Parameters ---------- X : array-like of shape (n_samples, n_features) The input data. Returns ------- Y_decision : array-like of shape (n_samples, n_classes) Returns the decision function of the sample for each model in the chain.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\multioutput.py",
    "ast_data": "FunctionDef name:decision_function arg:self arg:X arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "index_of",
    "source_code": "def index_of(self, file_path, function_name, function_start_line):\n    function_key = (file_path, function_name, function_start_line)\n    if function_key in self._function_key_to_function:\n        return self._function_key_to_function[function_key].id\n    else:\n        function_index = len(self._function_key_to_function) + 1\n        function = profile_pb2.Function()\n        function.id = function_index\n        function.name = self._string_table.index_of(function_name)\n        function.filename = self._string_table.index_of(file_path)\n        function.start_line = function_start_line\n        self._function_key_to_function[function_key] = function\n        return function_index",
    "docstring": "Returns index of the function, adding the function if needed. Args: file_path: (string) Path to file where the function is defined. function_name: (string) Function name. function_start_line: (integer) Start line number of function definition. Returns: Function index.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\pprof_profiler.py",
    "ast_data": "FunctionDef name:index_of arg:self arg:file_path arg:function_name arg:function_start_line arguments arg arg arg arg Assign If Compare Return return:yes Assign Call Assign Call Assign Assign Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "time_stamp",
    "source_code": "@property\ndef time_stamp(self) -> str:\n    self._ensure_open()\n    return self._time_stamp",
    "docstring": "Return time stamp of Stata file.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\stata.py",
    "ast_data": "FunctionDef name:time_stamp arg:self arguments arg Call Return return:yes"
  },
  {
    "library": "django",
    "name": "contains",
    "source_code": "def contains(self, other):\n    return capi.geos_contains(self.ptr, other.ptr)",
    "docstring": "Return true if other.within(this) returns true.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:contains arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_is_autocast_sub_mod",
    "source_code": "def _is_autocast_sub_mod(node: torch.fx.Node) -> bool:\n    if node.op == 'call_module':\n        assert isinstance(node.target, str)\n        subgm = getattr(node.graph.owning_module, node.target)\n        first_non_ph = nodes_first(subgm.graph.nodes, lambda node: node.op != 'placeholder')\n        if first_non_ph and first_non_ph.op == 'call_function' and (first_non_ph.target == torch.amp.autocast_mode._enter_autocast):\n            return True\n    return False",
    "docstring": "Check if the first non-placeholder node is .",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\passes\\replace_autocast_with_hop_pass.py",
    "ast_data": "FunctionDef name:_is_autocast_sub_mod arg:node arguments arg If Compare Call Assign Call Assign Call arguments arg Compare If BoolOp Compare Compare Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "info_dict",
    "source_code": "def info_dict(self) -> dict[str, Union[PrimitiveInfoType, list[PrimitiveInfoType]]]:\n    return {'backend': 'ROCm', 'name': self.name, **dict(self.info_kwargs['op'].dict_items())}",
    "docstring": "Information returned here is logged to the autotune log file when that is enabled.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\rocm\\rocm_kernel.py",
    "ast_data": "FunctionDef name:info_dict arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "pre_unshard",
    "source_code": "def pre_unshard(self) -> bool:\n    if self._training_state == HandleTrainingState.SUMMON_FULL_PARAMS and self._skipped_use_sharded_views:\n        self._use_sharded_views()\n    ret = False\n    if self._use_orig_params and (not self._skip_writeback_check):\n        ret = self._writeback_orig_params()\n    if self.uses_sharded_strategy and (not self._offload_params) and (not self.needs_unshard()):\n        pass\n    elif self._uses_param_mixed_precision and (not self._force_full_precision):\n        self._use_low_precision_shard()\n        ret = True\n    elif self._offload_params and self.flat_param.device != self.device:\n        self.flat_param_to(self.device, non_blocking=True)\n        ret = True\n    self._check_on_compute_device(self.flat_param)\n    return ret",
    "docstring": "Return `` 's data is on the device for communication and is what should be all-gathered. This means that it matches the dtype of the expected unsharded parameter.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py",
    "ast_data": "FunctionDef name:pre_unshard arg:self arguments arg If BoolOp Compare Call Assign If BoolOp Assign Call If BoolOp Call If BoolOp Call Assign If BoolOp Compare Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_unit_change_handler",
    "source_code": "def _unit_change_handler(self, axis_name, event=None):\n    if event is None:\n        return functools.partial(self._unit_change_handler, axis_name, event=object())\n    _api.check_in_list(self._axis_map, axis_name=axis_name)\n    for line in self.lines:\n        line.recache_always()\n    self.relim()\n    self._request_autoscale_view(axis_name)",
    "docstring": "Process axis units changes: requests updates to data and view limits.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:_unit_change_handler arg:self arg:axis_name arg:event arguments arg arg arg If Compare Return return:yes Call Call Call For Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "TensorSpec",
    "source_code": "class TensorSpec(object):\n    pass",
    "docstring": "Interface for internal isinstance checks to framework/tensor_spec.py. This helps to avoid circular dependencies.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\types\\internal.py",
    "ast_data": "ClassDef name:TensorSpec"
  },
  {
    "library": "pytorch",
    "name": "is_python_constant",
    "source_code": "def is_python_constant(self):\n    return self.__variable.is_python_constant()",
    "docstring": "Returns True if as_python_constant would succeed.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\comptime.py",
    "ast_data": "FunctionDef name:is_python_constant arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "is_normalized",
    "source_code": "@property\ndef is_normalized(self) -> bool:\n    return is_date_array_normalized(self.asi8, self.tz, reso=self._creso)",
    "docstring": "Returns True if all of the dates are at midnight (\"no time\")",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimes.py",
    "ast_data": "FunctionDef name:is_normalized arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_remove_sentinel",
    "source_code": "def _remove_sentinel(samples, paired, sentinel):\n    if not paired:\n        return [sample[sample != sentinel] for sample in samples]\n    sentinels = samples[0] == sentinel\n    for sample in samples[1:]:\n        sentinels = sentinels | (sample == sentinel)\n    not_sentinels = ~sentinels\n    return [sample[not_sentinels] for sample in samples]",
    "docstring": "Remove sentinel values from paired or unpaired 1D samples",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_axis_nan_policy.py",
    "ast_data": "FunctionDef name:_remove_sentinel arg:samples arg:paired arg:sentinel arguments arg arg arg If Return return:yes Compare Assign Compare For Assign Compare Assign Return return:yes"
  },
  {
    "library": "pygame",
    "name": "sprite",
    "source_code": "@property\ndef sprite(self):\n    return self._get_sprite()",
    "docstring": "Property for the single sprite contained in this group :return: The sprite.",
    "type": "method",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:sprite arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "is_contiguous",
    "source_code": "def is_contiguous(a: TensorLikeType) -> bool:\n    from torch.fx.experimental.symbolic_shapes import guard_size_oblivious\n    if guard_size_oblivious(a.numel() < 2):\n        return True\n    expected_stride = 1\n    for x, y in reversed(tuple(zip(a.shape, a.stride()))):\n        if guard_size_oblivious(x == 1):\n            continue\n        if guard_size_oblivious(y != expected_stride):\n            return False\n        expected_stride = expected_stride * x\n    return True",
    "docstring": "Tests whether a tensor is contiguous or not. Tensors are contiguous when they have no elements, one element, or when they have \"nested\" strides.",
    "type": "function",
    "file_path": "pytorch\\torch\\_prims_common\\__init__.py",
    "ast_data": "FunctionDef name:is_contiguous arg:a arguments arg If Call Compare Call Return return:yes Assign For Call Call Call Call If Call Compare If Call Compare Return return:yes Assign Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "HttpError",
    "source_code": "class HttpError(IgnoreRequest):\n\n    def __init__(self, response: Response, *args: Any, **kwargs: Any):\n        self.response = response\n        super().__init__(*args, **kwargs)",
    "docstring": "A non-200 response was filtered",
    "type": "class",
    "file_path": "scrapy\\scrapy\\spidermiddlewares\\httperror.py",
    "ast_data": "ClassDef name:HttpError FunctionDef name:__init__ arg:self arg:response arguments arg arg arg arg Assign Call Call"
  },
  {
    "library": "kornia",
    "name": "ARKitQTVecs_to_ColmapQTVecs",
    "source_code": "def ARKitQTVecs_to_ColmapQTVecs(qvec: Tensor, tvec: Tensor) -> tuple[Tensor, Tensor]:\n    Rcg = quaternion_to_rotation_matrix(qvec)\n    Rcv, Tcv = camtoworld_graphics_to_vision_Rt(Rcg, tvec)\n    R_colmap, t_colmap = camtoworld_to_worldtocam_Rt(Rcv, Tcv)\n    t_colmap = t_colmap.reshape(-1, 3, 1)\n    q_colmap = rotation_matrix_to_quaternion(R_colmap.contiguous())\n    return (q_colmap, t_colmap)",
    "docstring": "Convert output of Apple ARKit screen pose to the camera-to-world transformation, expected by Colmap. Both poses in quaternion representation. Args: qvec: ARKit rotation quaternion :math:, [x, y, z, w] format. tvec: translation vector :math:, [x, y, z] Returns: qvec: Colmap rotation quaternion :math:, [w, x, y, z] format. tvec: translation vector :math:, [x, y, z] Example: >>> q, t = tensor([0, 1, 0, 1.])[None], torch.ones(3).reshape(1, 3, 1) >>> ARKitQTVecs_to_ColmapQTVecs(q, t) (tensor([[0.7071, 0.0000, 0.7071, 0.0000]]), tensor([[[-1.0000], [-1.0000], [ 1.0000]]]))",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\conversions.py",
    "ast_data": "FunctionDef name:ARKitQTVecs_to_ColmapQTVecs arg:qvec arg:tvec arguments arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "register_lowering",
    "source_code": "def register_lowering(aten_fn, broadcast=False, type_promotion_kind: Optional[ELEMENTWISE_TYPE_PROMOTION_KIND]=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, convert_input_to_bool=False) -> Callable[[Callable[_P, _T]], Callable[_P, _T]]:\n    return functools.partial(_register_lowering, aten_fn, broadcast=broadcast, type_promotion_kind=type_promotion_kind, convert_input_to_bool=convert_input_to_bool)",
    "docstring": "Shim to support decorator syntax.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\lowering.py",
    "ast_data": "FunctionDef name:register_lowering arg:aten_fn arg:broadcast arg:type_promotion_kind arg:convert_input_to_bool arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_set_optimizer",
    "source_code": "def _set_optimizer(self, optimizer):\n    if isinstance(optimizer, (list, tuple)):\n        self.optimizer = [optimizers.get(opt) for opt in optimizer]\n    else:\n        self.optimizer = optimizers.get(optimizer)\n    if isinstance(self._dtype_policy, policy.PolicyV1):\n        loss_scale = self._dtype_policy.loss_scale\n    elif self._dtype_policy.name == 'mixed_float16':\n        loss_scale = 'dynamic'\n    else:\n        loss_scale = None\n    if loss_scale is not None and (not isinstance(self.optimizer, loss_scale_optimizer.LossScaleOptimizer)):\n        if isinstance(self.optimizer, list):\n            raise ValueError('When a dtype policy with a loss scale is used, you can only pass a single optimizer. Using policy %s and got optimizers: %s' % self._dtype_policy, self.optimizer)\n        if not isinstance(self.optimizer, optimizer_v2.OptimizerV2):\n            raise ValueError('\"optimizer\" must be an instance of tf.keras.optimizers.Optimizer when a dype policy with a loss scale  used, but got: %s. Using policy: %s' % (self.optimizer, self._dtype_policy))\n        if loss_scale == 'dynamic':\n            self.optimizer = loss_scale_optimizer.LossScaleOptimizer(self.optimizer)\n        else:\n            self.optimizer = loss_scale_optimizer.LossScaleOptimizerV1(self.optimizer, loss_scale)",
    "docstring": "Sets self.optimizer. Sets self.optimizer to , potentially wrapping it with a LossScaleOptimizer. Args: optimizer: The optimizer(s) to assign to self.optimizer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py",
    "ast_data": "FunctionDef name:_set_optimizer arg:self arg:optimizer arguments arg arg If Call Assign Call Assign Call If Call Assign If Compare Assign Assign If BoolOp Compare Call If Call Raise Call If Call Raise Call If Compare Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "from_saved_model",
    "source_code": "def from_saved_model(layer):\n    return layer.__module__.find('keras.saving.saved_model') != -1",
    "docstring": "Returns whether the layer is loaded from a SavedModel.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_utils.py",
    "ast_data": "FunctionDef name:from_saved_model arg:layer arguments arg Return return:yes Compare Call"
  },
  {
    "library": "pytorch",
    "name": "get_outermost_event",
    "source_code": "def get_outermost_event(self) -> Optional[str]:\n    stack = self.get_stack()\n    return stack[0] if stack else None",
    "docstring": "Get the outermost event name (i.e. the longest running event) or None if the stack is empty.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\utils.py",
    "ast_data": "FunctionDef name:get_outermost_event arg:self arguments arg Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "__iter__",
    "source_code": "def __iter__(self):\n    return iter(self.forms)",
    "docstring": "Yield the forms in the order they should be rendered.",
    "type": "method",
    "file_path": "django\\django\\forms\\formsets.py",
    "ast_data": "FunctionDef name:__iter__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "set_device_type",
    "source_code": "@staticmethod\ndef set_device_type(device: str='cuda'):\n    DefaultDeviceType._default_device_type = device",
    "docstring": "Set the default device type for checkpointing. Args: device (str): The device type to be set as default. Default is 'cuda'.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\checkpoint.py",
    "ast_data": "FunctionDef name:set_device_type arg:device arguments arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "tf_broadcast",
    "source_code": "def tf_broadcast(*args):\n    if len(args) <= 1:\n        return args\n    sh = array_ops.shape(args[0])\n    for arg in args[1:]:\n        sh = array_ops.broadcast_dynamic_shape(sh, array_ops.shape(arg))\n    return [array_ops.broadcast_to(arg, sh) for arg in args]",
    "docstring": "Broadcast tensors. Args: *args: a list of tensors whose shapes are broadcastable against each other. Returns: Tensors broadcasted to the common shape.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_utils.py",
    "ast_data": "FunctionDef name:tf_broadcast arguments arg If Compare Call Return return:yes Assign Call For Assign Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "InvalidComparison",
    "source_code": "class InvalidComparison(Exception):\n    pass",
    "docstring": "Exception is raised by _validate_comparison_value to indicate an invalid comparison. Notes ----- This is an internal error.",
    "type": "class",
    "file_path": "pandas\\pandas\\errors\\__init__.py",
    "ast_data": "ClassDef name:InvalidComparison"
  },
  {
    "library": "matplotlib",
    "name": "get_data_path",
    "source_code": "@_logged_cached('matplotlib data path: %s')\ndef get_data_path():\n    return str(Path(__file__).with_name('mpl-data'))",
    "docstring": "Return the path to Matplotlib data.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\__init__.py",
    "ast_data": "FunctionDef name:get_data_path arguments Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "assign_group_v2",
    "source_code": "def assign_group_v2(group_assignment, device_index, base_key):\n    group_size, group_key = gen_collective_ops.collective_assign_group_v2(group_assignment=group_assignment, device_index=device_index, base_key=base_key)\n    return (group_size, group_key)",
    "docstring": "Assign group key based on group_assignment. Args: group_assignment: a 2 dimensional integer Tensor that encodes which devices belong to the same group. The values are indices of the devices within 0 to number of devices. device_index: integer for the index of the current device base_key: integer to offset the resulted group_key. The base key shall be unique for different values of group_assignment in the same tf.function. Notes: The device_index argument must be consistent with the index of the device of this Op in the device assignment list. The behavior of this Op is undefined if they are inconsistent. Returns: group_size, group_key: The group size and group key for the current device.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\collective_ops.py",
    "ast_data": "FunctionDef name:assign_group_v2 arg:group_assignment arg:device_index arg:base_key arguments arg arg arg Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "items",
    "source_code": "@property\ndef items(self):\n    return self.unpack_var_sequence(tx=None)",
    "docstring": "Need this when adding a BaseListVariable and a ConstantVariable together. Happens in detectron2.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\constant.py",
    "ast_data": "FunctionDef name:items arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "default_pg",
    "source_code": "@property\ndef default_pg(self) -> Optional[ProcessGroup]:\n    return self._default_pg",
    "docstring": "Process group that includes all ranks of the cluster. This default ProcessGroup is used by c10d APIs when a ProcessGroup is needed but None is provided.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:default_pg arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_rvs",
    "source_code": "def _rvs(self, n, shape, dim, df, C, random_state):\n    random_state = self._get_random_state(random_state)\n    A = self._inv_standard_rvs(n, shape, dim, df, random_state)\n    trsm = get_blas_funcs('trsm', (A,))\n    trmm = get_blas_funcs('trmm', (A,))\n    for index in np.ndindex(A.shape[:-2]):\n        if dim > 1:\n            CA = trsm(1.0, A[index], C, side=1, lower=True)\n            A[index] = trmm(1.0, CA, CA, side=1, lower=True, trans_a=True)\n        else:\n            A[index][0, 0] = (C[0, 0] / A[index][0, 0]) ** 2\n    return A",
    "docstring": "Draw random samples from an inverse Wishart distribution. Parameters ---------- n : integer Number of variates to generate shape : iterable Shape of the variates to generate dim : int Dimension of the scale matrix df : int Degrees of freedom C : ndarray Cholesky factorization of the scale matrix, lower triangular. %(_doc_random_state)s Notes ----- As this function does no argument checking, it should not be called directly; use 'rvs' instead.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:_rvs arg:self arg:n arg:shape arg:dim arg:df arg:C arg:random_state arguments arg arg arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call For Call If Compare Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_seed_custom_device",
    "source_code": "def _seed_custom_device(seed) -> None:\n    seed = int(seed)\n    custom_backend_name = torch._C._get_privateuse1_backend_name()\n    if hasattr(torch, custom_backend_name):\n        custom_device_mod = getattr(torch, custom_backend_name)\n        _bad_fork_name = '_is_in_bad_fork'\n        _seed_all_name = 'manual_seed_all'\n        if hasattr(custom_device_mod, _bad_fork_name) and hasattr(custom_device_mod, _seed_all_name):\n            if not getattr(custom_device_mod, _bad_fork_name)():\n                getattr(custom_device_mod, _seed_all_name)(seed)\n        else:\n            message = f\"Set seed for `{custom_backend_name}` device does not take effect, please add API's \"\n            message += f'`{_bad_fork_name}` and `{_seed_all_name}` to `{custom_backend_name}` device module.'\n            warnings.warn(message, UserWarning, stacklevel=3)",
    "docstring": "Sets the seed to generate random numbers for custom device. Args: seed (int): The desired seed. See [Note: support the custom device with privateuse1]",
    "type": "function",
    "file_path": "pytorch\\torch\\random.py",
    "ast_data": "FunctionDef name:_seed_custom_device arg:seed arguments arg Assign Call Assign Call If Call Assign Call Assign Assign If BoolOp Call Call If Call Call Call Call Assign Call"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X, copy=True):\n    check_is_fitted(self)\n    X = validate_data(self, X, accept_sparse='csr', dtype=[np.float64, np.float32], copy=copy, reset=False)\n    if not sp.issparse(X):\n        X = sp.csr_matrix(X, dtype=X.dtype)\n    if self.sublinear_tf:\n        np.log(X.data, X.data)\n        X.data += 1.0\n    if hasattr(self, 'idf_'):\n        X.data *= self.idf_[X.indices]\n    if self.norm is not None:\n        X = normalize(X, norm=self.norm, copy=False)\n    return X",
    "docstring": "Transform a count matrix to a tf or tf-idf representation. Parameters ---------- X : sparse matrix of (n_samples, n_features) A matrix of term/token counts. copy : bool, default=True Whether to copy X and operate on the copy or perform in-place operations. will only be effective with CSR sparse matrix. Returns ------- vectors : sparse matrix of shape (n_samples, n_features) Tf-idf-weighted document-term matrix.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_extraction\\text.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arg:copy arguments arg arg arg Call Assign Call If Call Assign Call If Call If Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_replace_booleans",
    "source_code": "def _replace_booleans(tok: tuple[int, str]) -> tuple[int, str]:\n    toknum, tokval = tok\n    if toknum == tokenize.OP:\n        if tokval == '&':\n            return (tokenize.NAME, 'and')\n        elif tokval == '|':\n            return (tokenize.NAME, 'or')\n        return (toknum, tokval)\n    return (toknum, tokval)",
    "docstring": "Replace `` so that bitwise precedence is changed to boolean precedence. Parameters ---------- tok : tuple of int, str ints correspond to the all caps constants in the tokenize module Returns ------- tuple of int, str Either the input or token or the replacement values",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\computation\\expr.py",
    "ast_data": "FunctionDef name:_replace_booleans arg:tok arguments arg Assign If Compare If Compare Return return:yes If Compare Return return:yes Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, session, run_with_hooks_fn):\n    self._session = session\n    self._run_with_hooks_fn = run_with_hooks_fn",
    "docstring": "Initializes the argument for a invocation. Args: session: An instance of . run_with_hooks_fn: A function for running fetches and hooks.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\monitored_session.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:session arg:run_with_hooks_fn arguments arg arg arg Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "global_unstructured",
    "source_code": "def global_unstructured(parameters, pruning_method, importance_scores=None, **kwargs):\n    if not isinstance(parameters, Iterable):\n        raise TypeError('global_unstructured(): parameters is not an Iterable')\n    importance_scores = importance_scores if importance_scores is not None else {}\n    if not isinstance(importance_scores, dict):\n        raise TypeError('global_unstructured(): importance_scores must be of type dict')\n    relevant_importance_scores = torch.nn.utils.parameters_to_vector([importance_scores.get((module, name), getattr(module, name)) for module, name in parameters])\n    default_mask = torch.nn.utils.parameters_to_vector([getattr(module, name + '_mask', torch.ones_like(getattr(module, name))) for module, name in parameters])\n    container = PruningContainer()\n    container._tensor_name = 'temp'\n    method = pruning_method(**kwargs)\n    method._tensor_name = 'temp'\n    if method.PRUNING_TYPE != 'unstructured':\n        raise TypeError(f'Only \"unstructured\" PRUNING_TYPE supported for the `pruning_method`. Found method {pruning_method} of type {method.PRUNING_TYPE}')\n    container.add_pruning_method(method)\n    final_mask = container.compute_mask(relevant_importance_scores, default_mask)\n    pointer = 0\n    for module, name in parameters:\n        param = getattr(module, name)\n        num_param = param.numel()\n        param_mask = final_mask[pointer:pointer + num_param].view_as(param)\n        custom_from_mask(module, name, mask=param_mask)\n        pointer += num_param",
    "docstring": "Globally prunes tensors corresponding to all parameters in `nn.Module` Note: Since global structured pruning doesn't make much sense unless the norm is normalized by the size of the parameter, we now limit the scope of global pruning to unstructured methods. Examples: >>> from torch.nn.utils import prune >>> from collections import OrderedDict >>> net = nn.Sequential(OrderedDict([ ... ('first', nn.Linear(10, 4)), ... ('second', nn.Linear(4, 1)), ... ])) >>> parameters_to_prune = ( ... (net.first, 'weight'), ... (net.second, 'weight'), ... ) >>> prune.global_unstructured( ... parameters_to_prune, ... pruning_method=prune.L1Unstructured, ... amount=10, ... ) >>> print(sum(torch.nn.utils.parameters_to_vector(net.buffers()) == 0)) tensor(10)",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\utils\\prune.py",
    "ast_data": "FunctionDef name:global_unstructured arg:parameters arg:pruning_method arg:importance_scores arguments arg arg arg arg If Call Raise Call Assign Compare If Call Raise Call Assign Call Call Call Assign Call Call Call Call Assign Call Assign Assign Call Assign If Compare Raise Call Call Assign Call Assign For Assign Call Assign Call Assign Call Call"
  },
  {
    "library": "django",
    "name": "handle_m2m_field",
    "source_code": "def handle_m2m_field(self, obj, field):\n    raise NotImplementedError('subclasses of Serializer must provide a handle_m2m_field() method')",
    "docstring": "Called to handle a ManyToManyField.",
    "type": "method",
    "file_path": "django\\django\\core\\serializers\\base.py",
    "ast_data": "FunctionDef name:handle_m2m_field arg:self arg:obj arg:field arguments arg arg arg Raise Call"
  },
  {
    "library": "authlib",
    "name": "generate_user_code",
    "source_code": "def generate_user_code(self):\n    if self.USER_CODE_TYPE == 'digital':\n        return create_digital_user_code()\n    return create_string_user_code()",
    "docstring": "A method to generate ``.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc8628\\endpoint.py",
    "ast_data": "FunctionDef name:generate_user_code arg:self arguments arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "device",
    "source_code": "@property\ndef device(self):\n    return self._vars[0].device",
    "docstring": "The device this variable is on.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_replicated_variable.py",
    "ast_data": "FunctionDef name:device arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_convert_to_ragged_tensor_values",
    "source_code": "def _convert_to_ragged_tensor_values(value):\n    if _is_supported_ragged_values_type(value):\n        return value\n    else:\n        return convert_to_tensor_or_ragged_tensor(value, name='values')",
    "docstring": "Converts value to supported RaggedTensor value. * If is an object of supported value type, then return it as-is. * Otherwise convert it to Tensor or RaggedTensor. Args: value: An object of , or registered RaggedTensor value types, or an object whose type has a registered conversion function. Returns: An object of , or registered RaggedTensor value types",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:_convert_to_ragged_tensor_values arg:value arguments arg If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_init_optim_state",
    "source_code": "def _init_optim_state(optim: torch.optim.Optimizer) -> None:\n    if optim.state:\n        return\n    for param_group in optim.param_groups:\n        for param in param_group[_PARAMS]:\n            if param.grad is not None:\n                return\n    for param_group in optim.param_groups:\n        for param in param_group[_PARAMS]:\n            if param.requires_grad:\n                param.grad = torch.zeros_like(param)\n    lrs = []\n    for param_group in optim.param_groups:\n        if 'lr' in param_group:\n            lrs.append(param_group['lr'])\n            param_group['lr'] = torch.tensor(0.0) if isinstance(param_group['lr'], torch.Tensor) else 0.0\n    optim.step(closure=None)\n    for param_group in optim.param_groups:\n        if 'lr' in param_group:\n            param_group['lr'] = lrs.pop(0)\n    optim.zero_grad(set_to_none=True)",
    "docstring": "Initialize optim states by calling the step() with zero grads.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\state_dict.py",
    "ast_data": "FunctionDef name:_init_optim_state arg:optim arguments arg If Return return:no For For If Compare Return return:no For For If Assign Call Assign For If Compare Call Assign Call Call Call For If Compare Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_create_variables",
    "source_code": "def _create_variables(self, table: tpu_embedding_v2_utils.TableConfig, trainable: bool) -> Dict[Text, tf_variables.Variable]:\n    variable_shape = (table.vocabulary_size, table.dim)\n\n    def getter(name, shape, dtype, initializer, trainable):\n        del shape\n        initial_value = functools.partial(initializer, variable_shape, dtype=dtype)\n        return tf_variables.Variable(name=name, initial_value=initial_value, shape=variable_shape, dtype=dtype, trainable=trainable)\n\n    def variable_creator(name, initializer, trainable=True):\n        return self._add_variable_with_custom_getter(name=name, initializer=initializer, shape=variable_shape, dtype=dtypes.float32, getter=getter, trainable=trainable)\n    parameters = variable_creator(table.name, table.initializer, trainable=trainable)\n\n    def slot_creator(name, initializer):\n        return variable_creator(table.name + '/' + name, initializer, False)\n    if table.optimizer is not None:\n        slot_vars = table.optimizer._create_slots(parameters, slot_creator)\n    else:\n        slot_vars = {}\n    slot_vars['parameters'] = parameters\n    return slot_vars",
    "docstring": "Create all variables including table variables and slot variables.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_base.py",
    "ast_data": "FunctionDef name:_create_variables arg:self arg:table arg:trainable arguments arg arg arg Assign FunctionDef name:getter arg:name arg:shape arg:dtype arg:initializer arg:trainable arguments arg arg arg arg arg Assign Call Return return:yes Call FunctionDef name:variable_creator arg:name arg:initializer arg:trainable arguments arg arg arg Return return:yes Call Assign Call FunctionDef name:slot_creator arg:name arg:initializer arguments arg arg Return return:yes Call If Compare Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "local_concrete_fields",
    "source_code": "@cached_property\ndef local_concrete_fields(self):\n    return make_immutable_fields_list('local_concrete_fields', (f for f in self.local_fields if f.concrete))",
    "docstring": "Return a list of all concrete fields on the model. Private API intended only to be used by Django itself; get_fields() combined with filtering of field properties is the public API for obtaining this field list.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\options.py",
    "ast_data": "FunctionDef name:local_concrete_fields arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_prepare_field_lists",
    "source_code": "def _prepare_field_lists(self):\n    self.kept_model_keys = self.old_model_keys & self.new_model_keys\n    self.kept_proxy_keys = self.old_proxy_keys & self.new_proxy_keys\n    self.kept_unmanaged_keys = self.old_unmanaged_keys & self.new_unmanaged_keys\n    self.through_users = {}\n    self.old_field_keys = {(app_label, model_name, field_name) for app_label, model_name in self.kept_model_keys for field_name in self.from_state.models[app_label, self.renamed_models.get((app_label, model_name), model_name)].fields}\n    self.new_field_keys = {(app_label, model_name, field_name) for app_label, model_name in self.kept_model_keys for field_name in self.to_state.models[app_label, model_name].fields}",
    "docstring": "Prepare field lists and a list of the fields that used through models in the old state so dependencies can be made from the through model deletion to the field that uses it.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\autodetector.py",
    "ast_data": "FunctionDef name:_prepare_field_lists arg:self arguments arg Assign Assign Assign Assign Assign Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "get_enclosing_xla_context",
    "source_code": "@tf_export('__internal__.get_enclosing_xla_context', v1=[])\ndef get_enclosing_xla_context():\n    graph = ops.get_default_graph()\n    while graph is not None:\n        context_ = graph._get_control_flow_context()\n        while context_ is not None:\n            if isinstance(context_, XLAControlFlowContext):\n                return context_\n            context_ = context_.outer_context\n        graph = getattr(graph, 'outer_graph', None)\n    return None",
    "docstring": "Recursively find and return the XLAControlFlowContext.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:get_enclosing_xla_context arguments Assign Call While Compare Assign Call While Compare If Call Return return:yes Assign Assign Call Return return:no Call"
  },
  {
    "library": "tensorflow",
    "name": "_right",
    "source_code": "def _right(operator):\n    return tf_decorator.make_decorator(operator, lambda y, x: operator(x, y))",
    "docstring": "Right-handed version of an operator: swap args x and y.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_operators.py",
    "ast_data": "FunctionDef name:_right arg:operator arguments arg Return return:yes Call arguments arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "forward_transitive_closure",
    "source_code": "def forward_transitive_closure(self, src: str) -> set[str]:\n    result = set(src)\n    working_set = deque(src)\n    while len(working_set) > 0:\n        cur = working_set.popleft()\n        for n in self.successors(cur):\n            if n not in result:\n                result.add(n)\n                working_set.append(n)\n    return result",
    "docstring": "Returns a set of nodes that are reachable from src",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\_digraph.py",
    "ast_data": "FunctionDef name:forward_transitive_closure arg:self arg:src arguments arg arg Assign Call Assign Call While Compare Call Assign Call For Call If Compare Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "transpose_w",
    "source_code": "def transpose_w(W: _T, trans_w: bool) -> _T:\n    if isinstance(W, ir.IRNode):\n        if trans_w:\n            if not isinstance(W, ir.TensorBox):\n                W = ir.TensorBox(W)\n            W = L.permute(W, [1, 0])\n    elif trans_w:\n        assert isinstance(W, torch.Tensor)\n        W = W.transpose(0, 1)\n    return W",
    "docstring": "Transpose W based on the trans_w flag.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp_gemm_template.py",
    "ast_data": "FunctionDef name:transpose_w arg:W arg:trans_w arguments arg arg If Call If If Call Assign Call Assign Call If Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_redo_transform_rel_fig",
    "source_code": "def _redo_transform_rel_fig(self, bbox=None):\n    if bbox is not None:\n        self.bbox_relative.p0 = bbox.p0\n        self.bbox_relative.p1 = bbox.p1\n        return\n    gs = self._subplotspec.get_gridspec()\n    wr = np.asarray(gs.get_width_ratios())\n    hr = np.asarray(gs.get_height_ratios())\n    dx = wr[self._subplotspec.colspan].sum() / wr.sum()\n    dy = hr[self._subplotspec.rowspan].sum() / hr.sum()\n    x0 = wr[:self._subplotspec.colspan.start].sum() / wr.sum()\n    y0 = 1 - hr[:self._subplotspec.rowspan.stop].sum() / hr.sum()\n    self.bbox_relative.p0 = (x0, y0)\n    self.bbox_relative.p1 = (x0 + dx, y0 + dy)",
    "docstring": "Make the transSubfigure bbox relative to Figure transform. Parameters ---------- bbox : bbox or None If not None, then the bbox is used for relative bounding box. Otherwise, it is calculated from the subplotspec.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:_redo_transform_rel_fig arg:self arg:bbox arguments arg arg If Compare Assign Assign Return return:no Assign Call Assign Call Call Assign Call Call Assign Call Call Assign Call Call Assign Call Call Assign Call Call Assign Assign"
  },
  {
    "library": "kornia",
    "name": "_blend_one",
    "source_code": "def _blend_one(input1: Tensor, input2: Tensor, factor: Tensor) -> Tensor:\n    if not isinstance(input1, Tensor):\n        raise AssertionError(f'`input1` must be a tensor. Got {input1}.')\n    if not isinstance(input2, Tensor):\n        raise AssertionError(f'`input1` must be a tensor. Got {input2}.')\n    if isinstance(factor, Tensor) and len(factor.size()) != 0:\n        raise AssertionError(f'Factor shall be a float or single element tensor. Got {factor}.')\n    if factor == 0.0:\n        return input1\n    if factor == 1.0:\n        return input2\n    diff = (input2 - input1) * factor\n    res = input1 + diff\n    if factor > 0.0 and factor < 1.0:\n        return res\n    return torch.clamp(res, 0, 1)",
    "docstring": "Blend two images into one. Args: input1: image tensor with shapes like :math: or :math:. input2: image tensor with shapes like :math: or :math:. factor: factor 0-dim tensor. Returns: : image tensor with the batch in the zero position.",
    "type": "function",
    "file_path": "kornia\\kornia\\enhance\\adjust.py",
    "ast_data": "FunctionDef name:_blend_one arg:input1 arg:input2 arg:factor arguments arg arg arg If Call Raise Call If Call Raise Call If BoolOp Call Compare Call Call Raise Call If Compare Return return:yes If Compare Return return:yes Assign Assign If BoolOp Compare Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "_get_support",
    "source_code": "def _get_support(self, data: DataFrame, orient: str) -> ndarray:\n    if self.gridsize is None:\n        return data[orient].to_numpy()\n    kde = self._fit(data, orient)\n    bw = np.sqrt(kde.covariance.squeeze())\n    gridmin = data[orient].min() - bw * self.cut\n    gridmax = data[orient].max() + bw * self.cut\n    return np.linspace(gridmin, gridmax, self.gridsize)",
    "docstring": "Define the grid that the KDE will be evaluated on.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_stats\\density.py",
    "ast_data": "FunctionDef name:_get_support arg:self arg:data arg:orient arguments arg arg arg If Compare Return return:yes Call Assign Call Assign Call Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_repr_png_",
    "source_code": "def _repr_png_(self):\n    if not self._isinit:\n        self._init()\n    pixels = self.lut\n    if pixels.shape[0] < _BIVAR_REPR_PNG_SIZE:\n        pixels = np.repeat(pixels, repeats=_BIVAR_REPR_PNG_SIZE // pixels.shape[0], axis=0)[:256, :]\n    if pixels.shape[1] < _BIVAR_REPR_PNG_SIZE:\n        pixels = np.repeat(pixels, repeats=_BIVAR_REPR_PNG_SIZE // pixels.shape[1], axis=1)[:, :256]\n    pixels = (pixels[::-1, :, :] * 255).astype(np.uint8)\n    png_bytes = io.BytesIO()\n    title = self.name + ' BivarColormap'\n    author = f'Matplotlib v{mpl.__version__}, https://matplotlib.org'\n    pnginfo = PngInfo()\n    pnginfo.add_text('Title', title)\n    pnginfo.add_text('Description', title)\n    pnginfo.add_text('Author', author)\n    pnginfo.add_text('Software', author)\n    Image.fromarray(pixels).save(png_bytes, format='png', pnginfo=pnginfo)\n    return png_bytes.getvalue()",
    "docstring": "Generate a PNG representation of the BivarColormap.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:_repr_png_ arg:self arguments arg If Call Assign If Compare Assign Call If Compare Assign Call Assign Call Assign Call Assign Assign Assign Call Call Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_get_error_file_path",
    "source_code": "def _get_error_file_path(self) -> Optional[str]:\n    return os.environ.get('TORCHELASTIC_ERROR_FILE', None)",
    "docstring": "Return the error file path. May return `` to have the structured error be logged only.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\multiprocessing\\errors\\error_handler.py",
    "ast_data": "FunctionDef name:_get_error_file_path arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_quantile",
    "source_code": "def _quantile(self, qs: npt.NDArray[np.float64], interpolation: str) -> Self:\n    mask = np.asarray(self.isna())\n    arr = np.asarray(self)\n    fill_value = np.nan\n    res_values = quantile_with_mask(arr, mask, fill_value, qs, interpolation)\n    return type(self)._from_sequence(res_values)",
    "docstring": "Compute the quantiles of self for each quantile in . Parameters ---------- qs : np.ndarray[float64] interpolation: str Returns ------- same type as self",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\base.py",
    "ast_data": "FunctionDef name:_quantile arg:self arg:qs arg:interpolation arguments arg arg arg Assign Call Call Assign Call Assign Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "TracingOptions",
    "source_code": "@dataclasses.dataclass\nclass TracingOptions:\n    python_function: Callable[[Any], Any] = lambda *args, **kwargs: None\n    name: str = 'function'\n    polymorphic_type: Optional[function_type_lib.FunctionType] = None\n    default_values: Optional[Dict[str, Any]] = None\n    scope_type: ScopeType = ScopeType.NO_SCOPE\n    attributes: Optional[Dict[str, Any]] = None\n    autograph: bool = True\n    autograph_options: Optional[Tuple[Any, ...]] = None\n    reduce_retracing: bool = False\n    bind_graph_to_function: bool = False\n    function_cache: Optional[function_cache_lib.FunctionCache] = None\n    function_captures: Optional[capture_container.FunctionCaptures] = None\n    lock: Optional[threading.Lock] = None\n\n    def __post_init__(self):\n        if self.attributes:\n            for attribute in self.attributes:\n                if attribute not in attributes_lib.TRACING_COMPILATION_ALLOWLIST:\n                    raise ValueError(f'Tracing compilation does not support `{attribute}` as an attribute.')\n        if not self.polymorphic_type or self.default_values is None:\n            self.polymorphic_type = function_type_lib.FunctionType.from_callable(self.python_function)\n            self.default_values = function_type_lib.FunctionType.get_default_values(self.python_function)\n        self._input_signature = function_type_utils.to_input_signature(self.polymorphic_type)\n\n    @property\n    def is_pure(self):\n        return self.attributes and attributes_lib.IMPLEMENTS in self.attributes\n\n    @property\n    def input_signature(self):\n        return self._input_signature",
    "docstring": "Configuration options for tracing.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\tracing_compilation.py",
    "ast_data": "ClassDef name:TracingOptions arguments arg arg FunctionDef name:__post_init__ arg:self arguments arg If For If Compare Raise Call If BoolOp Compare Assign Call Assign Call Assign Call FunctionDef name:is_pure arg:self arguments arg Return return:yes BoolOp Compare FunctionDef name:input_signature arg:self arguments arg Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "__eq__",
    "source_code": "@abc.abstractmethod\ndef __eq__(self, other: object) -> bool:\n    pass",
    "docstring": "Checks equality.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\rsa.py",
    "ast_data": "FunctionDef name:__eq__ arg:self arg:other arguments arg arg"
  },
  {
    "library": "seaborn",
    "name": "__init__",
    "source_code": "def __init__(self, stat='proportion', complementary=False):\n    _check_argument('stat', ['count', 'percent', 'proportion'], stat)\n    self.stat = stat\n    self.complementary = complementary",
    "docstring": "Initialize the class with its parameters Parameters ---------- stat : {{\"proportion\", \"percent\", \"count\"}} Distribution statistic to compute. complementary : bool If True, use the complementary CDF (1 - CDF)",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_statistics.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:stat arg:complementary arguments arg arg arg Call Assign Assign"
  },
  {
    "library": "scrapy",
    "name": "is_asyncio_reactor_installed",
    "source_code": "def is_asyncio_reactor_installed() -> bool:\n    if not is_reactor_installed():\n        raise RuntimeError('is_asyncio_reactor_installed() called without an installed reactor.')\n    from twisted.internet import reactor\n    return isinstance(reactor, asyncioreactor.AsyncioSelectorReactor)",
    "docstring": "Check whether the installed reactor is :class:. Raise a :exc: if no reactor is installed.",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\reactor.py",
    "ast_data": "FunctionDef name:is_asyncio_reactor_installed arguments If Call Raise Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "AliasedOptionError",
    "source_code": "class AliasedOptionError(DistutilsError):\n    pass",
    "docstring": "Aliases entries in config files should not be existing. In section '{section}' we found multiple appearances of options {options}.",
    "type": "class",
    "file_path": "numpy\\numpy\\distutils\\system_info.py",
    "ast_data": "ClassDef name:AliasedOptionError"
  },
  {
    "library": "pytorch",
    "name": "autograd_inlining",
    "source_code": "@property\ndef autograd_inlining(self) -> bool:\n    return self._autograd_inlining",
    "docstring": "Whether Autograd must be inlined.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_globals.py",
    "ast_data": "FunctionDef name:autograd_inlining arg:self arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "strseq",
    "source_code": "def strseq(object, convert, join=joinseq):\n    if type(object) in [list, tuple]:\n        return join([strseq(_o, convert, join) for _o in object])\n    else:\n        return convert(object)",
    "docstring": "Recursively walk a sequence, stringifying each element.",
    "type": "function",
    "file_path": "numpy\\numpy\\_utils\\_inspect.py",
    "ast_data": "FunctionDef name:strseq arg:object arg:convert arg:join arguments arg arg arg If Compare Call Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_semiminor",
    "source_code": "def set_semiminor(self, b):\n    self.b = float(b)\n    self._path = None\n    self.stale = True",
    "docstring": "Set the semi-minor axis *b* of the annulus. Parameters ---------- b : float",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:set_semiminor arg:self arg:b arguments arg arg Assign Call Assign Assign"
  },
  {
    "library": "pandas",
    "name": "apply",
    "source_code": "@final\ndef apply(self, func, **kwargs) -> list[Block]:\n    result = func(self.values, **kwargs)\n    result = maybe_coerce_values(result)\n    return self._split_op_result(result)",
    "docstring": "apply the function to my values; return a block if we are not one",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\blocks.py",
    "ast_data": "FunctionDef name:apply arg:self arg:func arguments arg arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "polyval2d",
    "source_code": "@array_function_dispatch(_polyval2d_dispatcher)\ndef polyval2d(x, y, c):\n    return pu._valnd(polyval, c, x, y)",
    "docstring": "Evaluate a 2-D polynomial at points (x, y). This function returns the value .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * x^i * y^j The parameters and are converted to arrays only if they are tuples or a lists, otherwise they are treated as a scalars and they must have the same shape after conversion. In either case, either and or their elements must support multiplication and addition both with themselves and with the elements of . If has fewer than two dimensions, ones are implicitly appended to its shape to make it 2-D. The shape of the result will be c.shape[2:] + x.shape. Parameters ---------- x, y : array_like, compatible objects The two dimensional series is evaluated at the points `xyxycxy`. See Also -------- polyval, polygrid2d, polyval3d, polygrid3d Examples -------- >>> from numpy.polynomial import polynomial as P >>> c = ((1, 2, 3), (4, 5, 6)) >>> P.polyval2d(1, 1, c) 21.0",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\polynomial.py",
    "ast_data": "FunctionDef name:polyval2d arg:x arg:y arg:c arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_doing_dynamic_loss_scaling",
    "source_code": "def _doing_dynamic_loss_scaling(self):\n    return isinstance(self._loss_scale, loss_scale_module.DynamicLossScale)",
    "docstring": "Check if dynamically manages the loss scale.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\experimental\\loss_scale_optimizer.py",
    "ast_data": "FunctionDef name:_doing_dynamic_loss_scaling arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "offsets_in_rows",
    "source_code": "def offsets_in_rows(self):\n    return gen_ragged_math_ops.ragged_range(starts=constant_op.constant(0, self.dtype), limits=self.row_lengths(), deltas=constant_op.constant(1, self.dtype)).rt_dense_values",
    "docstring": "Return the offset of each value. RowPartition takes an array x and converts it into sublists. offsets[i] is the index of x[i] in its sublist. Given a shape, such as: [*,*,*],[*,*],[],[*,*] This returns: 0,1,2,0,1,0,1 Returns: an offset for every value.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py",
    "ast_data": "FunctionDef name:offsets_in_rows arg:self arguments arg Return return:yes Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_check_gridspec_exists",
    "source_code": "@staticmethod\ndef _check_gridspec_exists(figure, nrows, ncols):\n    for ax in figure.get_axes():\n        gs = ax.get_gridspec()\n        if gs is not None:\n            if hasattr(gs, 'get_topmost_subplotspec'):\n                gs = gs.get_topmost_subplotspec().get_gridspec()\n            if gs.get_geometry() == (nrows, ncols):\n                return gs\n    return GridSpec(nrows, ncols, figure=figure)",
    "docstring": "Check if the figure already has a gridspec with these dimensions, or create a new one",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\gridspec.py",
    "ast_data": "FunctionDef name:_check_gridspec_exists arg:figure arg:nrows arg:ncols arguments arg arg arg For Call Assign Call If Compare If Call Assign Call Call If Compare Call Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__array__",
    "source_code": "def __array__(self, dtype=None):\n    return numpy_compat.np_asarray(self.numpy(), dtype=dtype)",
    "docstring": "Allows direct conversion to a numpy array. >>> np.array(tf.Variable([1.0])) array([1.], dtype=float32) Returns: The variable value as a numpy array.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:__array__ arg:self arg:dtype arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_device_capability",
    "source_code": "def get_device_capability(device: Optional[_device_t]=None) -> tuple[int, int]:\n    prop = get_device_properties(device)\n    return (prop.major, prop.minor)",
    "docstring": "Get the cuda capability of a device. Args: device (torch.device or int or str, optional): device for which to return the device capability. This function is a no-op if this argument is a negative integer. It uses the current device, given by :func:, if :attr: is `` (default). Returns: tuple(int, int): the major and minor cuda capability of the device",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:get_device_capability arg:device arguments arg Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "Transformer",
    "source_code": "class Transformer(ABC):\n    if Benchmark.bench_transform:\n\n        def time_transform(self, *args):\n            self.estimator.transform(self.X)\n\n        def peakmem_transform(self, *args):\n            self.estimator.transform(self.X)\n        if Benchmark.base_commit is not None:\n\n            def track_same_transform(self, *args):\n                est_path = get_estimator_path(self, Benchmark.base_commit, args, True)\n                with est_path.open(mode='rb') as f:\n                    estimator_base = pickle.load(f)\n                X_val_t_base = estimator_base.transform(self.X_val)\n                X_val_t = self.estimator.transform(self.X_val)\n                return np.allclose(X_val_t_base, X_val_t)\n\n    @property\n    @abstractmethod\n    def params(self):\n        pass",
    "docstring": "Abstract base class for benchmarks of estimators implementing transform",
    "type": "class",
    "file_path": "scikit-learn\\asv_benchmarks\\benchmarks\\common.py",
    "ast_data": "ClassDef name:Transformer If FunctionDef name:time_transform arg:self arguments arg arg Call FunctionDef name:peakmem_transform arg:self arguments arg arg Call If Compare FunctionDef name:track_same_transform arg:self arguments arg arg Assign Call With Call Assign Call Assign Call Assign Call Return return:yes Call FunctionDef name:params arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "AttributeMutationExisting",
    "source_code": "class AttributeMutationExisting(AttributeMutation):\n\n    def __init__(self):\n        super().__init__(SourceType.Existing)",
    "docstring": "This case of VariableTracker.mutation_type marker indicates 1. Dynamo allows mutation on the value's attributes. 2. The value exists before Dynamo tracing started. For instance, Dynamo could model a pre-existing object with this marker, indicating that if we encounter mutations to this object, we need to buffer then re-apply those mutations after the graph runs, since the object might be used afterwards in Python.",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\base.py",
    "ast_data": "ClassDef name:AttributeMutationExisting FunctionDef name:__init__ arg:self arguments arg Call Call"
  },
  {
    "library": "matplotlib",
    "name": "artists",
    "source_code": "@property\ndef artists(self):\n    handles_artists = getattr(self, '_handles_artists', ())\n    return (self._selection_artist,) + handles_artists",
    "docstring": "Tuple of the artists of the selector.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:artists arg:self arguments arg Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "numpy_random",
    "source_code": "def numpy_random(dtype, *shapes):\n    return np.random.rand(*shapes).astype(dtype)",
    "docstring": "Return a random numpy tensor of the provided dtype. Args: shapes: int or a sequence of ints to defining the shapes of the tensor dtype: use the dtypes from numpy ( Return: numpy tensor of dtype",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\operator_benchmark\\benchmark_utils.py",
    "ast_data": "FunctionDef name:numpy_random arg:dtype arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "find_partition_to_combine_based_on_size",
    "source_code": "def find_partition_to_combine_based_on_size(sorted_partitions: list[Partition], available_mem_bytes: int, partitions: list[Partition]) -> tuple[bool, list[Partition]]:\n    find_combination = False\n    smallest_partition = sorted_partitions.pop(0)\n    for p in sorted_partitions[::-1]:\n        if abs(smallest_partition.bfs_level - p.bfs_level) <= 1:\n            mem_bytes_needed = calculate_mem_bytes_needed(p, smallest_partition)\n            if mem_bytes_needed <= available_mem_bytes:\n                combine_two_partitions(p, smallest_partition, self.partitions)\n                partitions.remove(smallest_partition)\n                partitions.remove(p)\n                partitions.append(self.partitions[-1])\n                find_combination = True\n                break\n    return (find_combination, partitions)",
    "docstring": "step 1 in combine_partition_based_on_size()",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\accelerator_partitioner.py",
    "ast_data": "FunctionDef name:find_partition_to_combine_based_on_size arg:sorted_partitions arg:available_mem_bytes arg:partitions arguments arg arg arg Assign Assign Call For If Compare Call Assign Call If Compare Call Call Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='mean_absolute_error'):\n    super().__init__(mean_absolute_error, name=name, reduction=reduction)",
    "docstring": "Initializes instance. Args: reduction: Type of to apply to loss. Default value is . indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to . When used with , outside of built-in training loops such as and , using or will raise an error. Please see this custom training [tutorial]( for more details. name: Optional name for the instance. Defaults to 'mean_absolute_error'.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:reduction arg:name arguments arg arg arg Call Call"
  },
  {
    "library": "sphinx",
    "name": "GlossarySorter",
    "source_code": "class GlossarySorter(SphinxTransform):\n    default_priority = 500\n\n    def apply(self, **kwargs: Any) -> None:\n        for glossary in self.document.findall(addnodes.glossary):\n            if glossary['sorted']:\n                definition_list = cast('nodes.definition_list', glossary[0])\n                definition_list[:] = sorted(definition_list, key=lambda item: unicodedata.normalize('NFD', cast('nodes.term', item)[0].astext().lower()))",
    "docstring": "Sort glossaries that have the `` flag.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\transforms\\__init__.py",
    "ast_data": "ClassDef name:GlossarySorter Assign FunctionDef name:apply arg:self arguments arg arg For Call If Assign Call Assign Call arguments arg Call Call Call Call"
  },
  {
    "library": "pygame",
    "name": "RenderUpdates",
    "source_code": "class RenderUpdates(Group):\n\n    def draw(self, surface, bgsurf=None, special_flags=0):\n        surface_blit = surface.blit\n        dirty = self.lostsprites\n        self.lostsprites = []\n        dirty_append = dirty.append\n        for sprite in self.sprites():\n            old_rect = self.spritedict[sprite]\n            new_rect = surface_blit(sprite.image, sprite.rect, None, special_flags)\n            if old_rect:\n                if new_rect.colliderect(old_rect):\n                    dirty_append(new_rect.union(old_rect))\n                else:\n                    dirty_append(new_rect)\n                    dirty_append(old_rect)\n            else:\n                dirty_append(new_rect)\n            self.spritedict[sprite] = new_rect\n        return dirty",
    "docstring": "Group class that tracks dirty updates pygame.sprite.RenderUpdates(*sprites): return RenderUpdates This class is derived from pygame.sprite.Group(). It has an enhanced draw method that tracks the changed areas of the screen.",
    "type": "class",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "ClassDef name:RenderUpdates FunctionDef name:draw arg:self arg:surface arg:bgsurf arg:special_flags arguments arg arg arg arg Assign Assign Assign Assign For Call Assign Assign Call If If Call Call Call Call Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "TemporarilyPopInterpreterStackCtxManagerVariable",
    "source_code": "class TemporarilyPopInterpreterStackCtxManagerVariable(ContextWrappingVariable):\n\n    @staticmethod\n    def create(tx: 'InstructionTranslator', target_values, **kwargs):\n        return TemporarilyPopInterpreterStackCtxManagerVariable(target_values=target_values, initial_values=None, **kwargs)\n\n    def enter(self, tx):\n        self.saved = torch._C._functorch.pop_dynamic_layer_stack()\n        self.set_cleanup_hook(tx, lambda: torch._C._functorch.push_dynamic_layer_stack(self.saved))\n        self.proxy = tx.output.create_node('call_function', torch._C._functorch.pop_dynamic_layer_stack, (), {})\n        return variables.ConstantVariable.create(None)\n\n    def exit(self, tx: 'InstructionTranslator', *args):\n        self.cleanup()\n        tx.output.create_node('call_function', torch._C._functorch.push_dynamic_layer_stack, (self.proxy,), {})\n        return variables.ConstantVariable.create(None)",
    "docstring": "represents torch._functorch.pyfunction.temporarily_pop_interpreter_stack()",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\ctx_manager.py",
    "ast_data": "ClassDef name:TemporarilyPopInterpreterStackCtxManagerVariable FunctionDef name:create arg:tx arg:target_values arguments arg arg arg Return return:yes Call FunctionDef name:enter arg:self arg:tx arguments arg arg Assign Call Call arguments Call Assign Call Return return:yes Call FunctionDef name:exit arg:self arg:tx arguments arg arg arg Call Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "get_namespaces",
    "source_code": "def get_namespaces(self):\n    s = extrapolate_statistics(logging.statistics)\n    for title, ns in sorted(s.items()):\n        scalars = []\n        collections = []\n        ns_fmt = self.formatting.get(title, {})\n        for k, v in sorted(ns.items()):\n            fmt = ns_fmt.get(k, {})\n            if isinstance(v, dict):\n                headers, subrows = self.get_dict_collection(v, fmt)\n                collections.append((k, ['ID'] + headers, subrows))\n            elif isinstance(v, (list, tuple)):\n                headers, subrows = self.get_list_collection(v, fmt)\n                collections.append((k, headers, subrows))\n            else:\n                format = ns_fmt.get(k, missing)\n                if format is None:\n                    continue\n                if hasattr(format, '__call__'):\n                    v = format(v)\n                elif format is not missing:\n                    v = format % v\n                scalars.append((k, v))\n        yield (title, scalars, collections)",
    "docstring": "Yield (title, scalars, collections) for each namespace.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\cpstats.py",
    "ast_data": "FunctionDef name:get_namespaces arg:self arguments arg Assign Call For Call Call Assign Assign Assign Call For Call Call Assign Call If Call Assign Call Call If Call Assign Call Call Assign Call If Compare If Call Assign Call If Compare Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "wait_for_stop",
    "source_code": "def wait_for_stop(self):\n    self._coord.wait_for_stop()",
    "docstring": "Block waiting for the coordinator to stop.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py",
    "ast_data": "FunctionDef name:wait_for_stop arg:self arguments arg Call"
  },
  {
    "library": "scikit-learn",
    "name": "score",
    "source_code": "def score(self, X, y, **params):\n    check_is_fitted(self)\n    X = validate_data(self, X, ensure_all_finite=False, accept_sparse=True, reset=False)\n    _raise_for_params(params, self, 'score')\n    if _routing_enabled():\n        score_params = process_routing(self, 'score', **params).estimator['score']\n    else:\n        score_params = {}\n    return self.estimator_.score(X, y, **score_params)",
    "docstring": "Return the score of the prediction. This is a wrapper for . Parameters ---------- X : (array-like or sparse matrix} of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) or (n_samples, n_targets) Target values. **params : dict Parameters routed to the method of the sub-estimator via the metadata routing API. .. versionadded:: 1.5 Only available if is set. See :ref: for more details. Returns ------- z : float Score of the prediction.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_ransac.py",
    "ast_data": "FunctionDef name:score arg:self arg:X arg:y arguments arg arg arg arg Call Assign Call Call If Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "EmailBackend",
    "source_code": "class EmailBackend(BaseEmailBackend):\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        if not hasattr(mail, 'outbox'):\n            mail.outbox = []\n\n    def send_messages(self, messages):\n        msg_count = 0\n        for message in messages:\n            message.message()\n            mail.outbox.append(copy.deepcopy(message))\n            msg_count += 1\n        return msg_count",
    "docstring": "An email backend for use during test sessions. The test connection stores email messages in a dummy outbox, rather than sending them out on the wire. The dummy outbox is accessible through the outbox instance attribute.",
    "type": "class",
    "file_path": "django\\django\\core\\mail\\backends\\locmem.py",
    "ast_data": "ClassDef name:EmailBackend FunctionDef name:__init__ arg:self arguments arg arg arg Call Call If Call Assign FunctionDef name:send_messages arg:self arg:messages arguments arg arg Assign For Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "expand",
    "source_code": "def expand(self, batch_shape: _size, _instance=None):\n    raise NotImplementedError",
    "docstring": "Returns a new distribution instance (or populates an existing instance provided by a derived class) with batch dimensions expanded to . This method calls :class: on the distribution's parameters. As such, this does not allocate new memory for the expanded distribution instance. Additionally, this does not repeat any args checking or parameter broadcasting in , when an instance is first created. Args: batch_shape (torch.Size): the desired expanded size. _instance: new instance provided by subclasses that need to override . Returns: New distribution instance with batch dimensions expanded to .",
    "type": "method",
    "file_path": "pytorch\\torch\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:expand arg:self arg:batch_shape arg:_instance arguments arg arg arg Raise"
  },
  {
    "library": "scikit-learn",
    "name": "get_feature_names_out",
    "source_code": "def get_feature_names_out(self, input_features=None):\n    check_is_fitted(self, attributes='n_features_in_')\n    return _check_feature_names_in(self, input_features)",
    "docstring": "Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Input features. - If is , then is used as feature names in. If is not defined, then the following input feature names are generated: . - If is an array-like, then must match if is defined. Returns ------- feature_names_out : ndarray of str objects Same as input features.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\base.py",
    "ast_data": "FunctionDef name:get_feature_names_out arg:self arg:input_features arguments arg arg Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "Bukin04",
    "source_code": "class Bukin04(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = [(-15.0, -5.0), (-3.0, 3.0)]\n        self.global_optimum = [[-10.0, 0.0]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return 100 * x[1] ** 2 + 0.01 * abs(x[0] + 10)",
    "docstring": "Bukin04 objective function. The Bukin04 [1]_ global optimization problem is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Bukin04}}(x) = 100 x_2^{2} + 0.01 \\lvert{x_1 + 10} \\rvert with :math: *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_B.py",
    "ast_data": "ClassDef name:Bukin04 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "press_zoom",
    "source_code": "def press_zoom(self, event):\n    if event.button not in [MouseButton.LEFT, MouseButton.RIGHT] or event.x is None or event.y is None:\n        return\n    axes = self._start_event_axes_interaction(event, method='zoom')\n    if not axes:\n        return\n    id_zoom = self.canvas.mpl_connect('motion_notify_event', self.drag_zoom)\n    parent_ax = axes[0]\n    if hasattr(parent_ax, '_colorbar'):\n        cbar = parent_ax._colorbar.orientation\n    else:\n        cbar = None\n    self._zoom_info = self._ZoomInfo(button=event.button, start_xy=(event.x, event.y), axes=axes, cid=id_zoom, cbar=cbar)",
    "docstring": "Callback for mouse button press in zoom to rect mode.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:press_zoom arg:self arg:event arguments arg arg If BoolOp Compare Compare Compare Return return:no Assign Call If Return return:no Assign Call Assign If Call Assign Assign Assign Call"
  },
  {
    "library": "pytorch",
    "name": "_process_args",
    "source_code": "def _process_args(args, kwargs) -> tuple[torch.Tensor, ...]:\n    args = _flatten_inputs(args, kwargs)\n    args = _remove_none_from_inputs(args)\n    args = _remove_non_tensor(args)\n    args = _convert_complex_to_real_representation(args)\n    return args",
    "docstring": "Process input arguments for the ONNX model.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_onnx_program.py",
    "ast_data": "FunctionDef name:_process_args arg:args arg:kwargs arguments arg arg Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "build_shuffle_then_ring",
    "source_code": "def build_shuffle_then_ring(input_tensors, gather_devices, subdiv, red_n_op, red_op, un_op=None):\n\n    def upper_builder(tensors):\n        return build_ring_all_reduce(tensors, len(tensors), subdiv, [0], red_op, un_op)\n\n    def upper_level_f(tensors):\n        return _reduce_non_singleton(tensors, upper_builder, un_op)\n    return _build_shuffle_hybrid(input_tensors, gather_devices, red_n_op, upper_level_f)",
    "docstring": "Construct hybrid of Shuffle within workers, Ring across workers.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\all_reduce.py",
    "ast_data": "FunctionDef name:build_shuffle_then_ring arg:input_tensors arg:gather_devices arg:subdiv arg:red_n_op arg:red_op arg:un_op arguments arg arg arg arg arg arg FunctionDef name:upper_builder arg:tensors arguments arg Return return:yes Call Call FunctionDef name:upper_level_f arg:tensors arguments arg Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_gather_saveables_for_checkpoint",
    "source_code": "def _gather_saveables_for_checkpoint(self):\n\n    def _saveable_factory(name=self._common_name):\n        return _SyncOnReadSaveable(self, name)\n    return {trackable.VARIABLE_VALUE_KEY: _saveable_factory}",
    "docstring": "Overrides Trackable method. This allows both name-based and object-based save and restore of s. Returns: A dictionary mapping attribute names to factories.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py",
    "ast_data": "FunctionDef name:_gather_saveables_for_checkpoint arg:self arguments arg FunctionDef name:_saveable_factory arg:name arguments arg Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "name",
    "source_code": "@property\ndef name(self) -> str:\n    return 'etcd-v2'",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\etcd_rendezvous_backend.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "get_versions_from_ci",
    "source_code": "def get_versions_from_ci(content: list[str]) -> tuple[dict[str, str], dict[str, str]]:\n    seen_required = False\n    seen_optional = False\n    seen_test = False\n    required_deps = {}\n    optional_deps = {}\n    for line in content:\n        if '# test dependencies' in line:\n            seen_test = True\n        elif seen_test and '- pytest>=' in line:\n            package, version = line.strip().split('>=')\n            package = package[2:]\n            optional_deps[package.casefold()] = version\n        elif '# required dependencies' in line:\n            seen_required = True\n        elif '# optional dependencies' in line:\n            seen_optional = True\n        elif '- pip:' in line:\n            continue\n        elif seen_required and line.strip():\n            if '==' in line:\n                package, version = line.strip().split('==', maxsplit=1)\n            else:\n                package, version = line.strip().split('=', maxsplit=1)\n            package = package.split()[-1]\n            if package in EXCLUDE_DEPS:\n                continue\n            if not seen_optional:\n                required_deps[package.casefold()] = version\n            else:\n                optional_deps[package.casefold()] = version\n    return (required_deps, optional_deps)",
    "docstring": "Min versions in CI job for testing all optional dependencies.",
    "type": "function",
    "file_path": "pandas\\scripts\\validate_min_versions_in_sync.py",
    "ast_data": "FunctionDef name:get_versions_from_ci arg:content arguments arg Assign Assign Assign Assign Assign For If Compare Assign If BoolOp Compare Assign Call Call Assign Assign Call If Compare Assign If Compare Assign If Compare If BoolOp Call If Compare Assign Call Call Assign Call Call Assign Call If Compare If Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_select_training_loop",
    "source_code": "def _select_training_loop(self, inputs):\n    if isinstance(inputs, (iterator_ops.Iterator, iterator_ops.IteratorBase)):\n        raise ValueError('For performance reasons Keras `fit`, `evaluate` and`predict` accept tf.data `Datasets` as input but not iterators that have been manually generated from Datasets by users. Please directly pass in the original `Dataset` object instead of passing in `iter(dataset)`.')\n    if self._distribution_strategy:\n        if self._in_multi_worker_mode():\n            return training_distributed_v1.DistributionMultiWorkerTrainingLoop(training_distributed_v1.DistributionSingleWorkerTrainingLoop())\n        else:\n            return training_distributed_v1.DistributionSingleWorkerTrainingLoop()\n    if data_utils.is_generator_or_sequence(inputs):\n        return training_generator_v1.GeneratorOrSequenceTrainingLoop()\n    if training_utils_v1.is_eager_dataset_or_iterator(inputs):\n        return training_generator_v1.EagerDatasetOrIteratorTrainingLoop()\n    if self.run_eagerly:\n        return training_generator_v1.GeneratorLikeTrainingLoop()\n    else:\n        return training_arrays_v1.ArrayLikeTrainingLoop()",
    "docstring": "Select training loop for fit/eval/predict based on the inputs.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py",
    "ast_data": "FunctionDef name:_select_training_loop arg:self arg:inputs arguments arg arg If Call Raise Call If If Call Return return:yes Call Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Call If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_projection_names",
    "source_code": "def get_projection_names(self):\n    return sorted(self._all_projection_types)",
    "docstring": "Return the names of all projections currently registered.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\__init__.py",
    "ast_data": "FunctionDef name:get_projection_names arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "check_extension",
    "source_code": "@classmethod\ndef check_extension(cls, ext: str) -> Literal[True]:\n    if ext.startswith('.'):\n        ext = ext[1:]\n    if not any((ext in extension for extension in cls._supported_extensions)):\n        raise ValueError(f\"Invalid extension for engine '{cls.engine}': '{ext}'\")\n    return True",
    "docstring": "checks that path's extension against the Writer's supported extensions. If it isn't supported, raises UnsupportedFiletypeError.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\excel\\_base.py",
    "ast_data": "FunctionDef name:check_extension arg:cls arg:ext arguments arg arg If Call Assign If Call Compare Raise Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_BaseInfo",
    "source_code": "class _BaseInfo(ABC):\n    data: DataFrame | Series\n    memory_usage: bool | str\n\n    @property\n    @abstractmethod\n    def dtypes(self) -> Iterable[Dtype]:\n        pass\n\n    @property\n    @abstractmethod\n    def dtype_counts(self) -> Mapping[str, int]:\n        pass\n\n    @property\n    @abstractmethod\n    def non_null_counts(self) -> list[int] | Series:\n        pass\n\n    @property\n    @abstractmethod\n    def memory_usage_bytes(self) -> int:\n        pass\n\n    @property\n    def memory_usage_string(self) -> str:\n        return f'{_sizeof_fmt(self.memory_usage_bytes, self.size_qualifier)}\\n'\n\n    @property\n    def size_qualifier(self) -> str:\n        size_qualifier = ''\n        if self.memory_usage:\n            if self.memory_usage != 'deep':\n                if 'object' in self.dtype_counts or self.data.index._is_memory_usage_qualified:\n                    size_qualifier = '+'\n        return size_qualifier\n\n    @abstractmethod\n    def render(self, *, buf: WriteBuffer[str] | None, max_cols: int | None, verbose: bool | None, show_counts: bool | None) -> None:\n        pass",
    "docstring": "Base class for DataFrameInfo and SeriesInfo. Parameters ---------- data : DataFrame or Series Either dataframe or series. memory_usage : bool or str, optional If \"deep\", introspect the data deeply by interrogating object dtypes for system-level memory consumption, and include it in the returned values.",
    "type": "class",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "ClassDef name:_BaseInfo FunctionDef name:dtypes arg:self arguments arg FunctionDef name:dtype_counts arg:self arguments arg FunctionDef name:non_null_counts arg:self arguments arg FunctionDef name:memory_usage_bytes arg:self arguments arg FunctionDef name:memory_usage_string arg:self arguments arg Return return:yes Call FunctionDef name:size_qualifier arg:self arguments arg Assign If If Compare If BoolOp Compare Assign Return return:yes FunctionDef name:render arg:self arguments arg arg arg arg arg"
  },
  {
    "library": "pytorch",
    "name": "_reduce_graph_module",
    "source_code": "def _reduce_graph_module(self, gm: torch.fx.GraphModule) -> tuple[Any, tuple[dict[str, Any], str]]:\n    fn, (data, imports) = gm.__reduce__()\n    code = data['_code']\n    code = re.sub('kernel_idx = \\\\d+', '', code)\n    code = re.sub('constant_args_idx = \\\\d+', '', code)\n    data['_code'] = code\n    return (fn, (data, imports))",
    "docstring": "Custom reducer for graph module to handle irrelevant data for user defined triton kernels Essentially what we are doing here is a huge hack where user defined triton kernel contain a dynamo time side table and the arguments to the call_function are indicies into this side table. These arguments are not for hashing purposes since we included the source code into the cache key and the numbers are prone to give false negatives due to ordering.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codecache.py",
    "ast_data": "FunctionDef name:_reduce_graph_module arg:self arg:gm arguments arg arg Assign Call Assign Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_write_to_json",
    "source_code": "def _write_to_json(self, output_dir: str) -> None:\n    records = []\n    for entry in self.results:\n        metric_name = entry[1]\n        value = entry[2]\n        if not metric_name or value is None:\n            continue\n        records.append({'benchmark': {'name': 'pr_time_benchmarks', 'mode': self.mode(), 'extra_info': {'is_dynamic': self.is_dynamic(), 'device': self.device(), 'description': self.description()}}, 'model': {'name': self.name(), 'type': self.category(), 'backend': self.backend()}, 'metric': {'name': metric_name, 'benchmark_values': [value]}})\n    with open(os.path.join(output_dir, f'{self.name()}.json'), 'w') as f:\n        json.dump(records, f)",
    "docstring": "Write the result into JSON format, so that it can be uploaded to the benchmark database to be displayed on OSS dashboard. The JSON format is defined at",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\dynamo\\pr_time_benchmarks\\benchmarks\\benchmark_base.py",
    "ast_data": "FunctionDef name:_write_to_json arg:self arg:output_dir arguments arg arg Assign For Assign Assign If BoolOp Compare Call Call Call Call Call Call Call Call With Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "drag_pan",
    "source_code": "def drag_pan(self, event):\n    if event.buttons != {self._pan_info.button}:\n        self.release_pan(None)\n        return\n    for ax in self._pan_info.axes:\n        ax.drag_pan(self._pan_info.button, event.key, event.x, event.y)\n    self.canvas.draw_idle()",
    "docstring": "Callback for dragging in pan/zoom mode.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:drag_pan arg:self arg:event arguments arg arg If Compare Call Return return:no For Call Call"
  },
  {
    "library": "pytorch",
    "name": "_check_single_tensor",
    "source_code": "def _check_single_tensor(param, param_name) -> None:\n    if not isinstance(param, torch.Tensor):\n        raise TypeError(f'Invalid function argument. Expected parameter `{param_name}` of type torch.Tensor\\n             but got {type(param)} instead.')",
    "docstring": "Check that the parameter `` is a single tensor.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:_check_single_tensor arg:param arg:param_name arguments arg arg If Call Raise Call Call"
  },
  {
    "library": "matplotlib",
    "name": "update_datalim",
    "source_code": "def update_datalim(self, xys, **kwargs):\n    pass",
    "docstring": "Not implemented in .",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:update_datalim arg:self arg:xys arguments arg arg arg"
  },
  {
    "library": "matplotlib",
    "name": "get_vertical",
    "source_code": "def get_vertical(self):\n    return self._vertical",
    "docstring": "Return vertical sizes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_divider.py",
    "ast_data": "FunctionDef name:get_vertical arg:self arguments arg Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "SphinxPostTransform",
    "source_code": "class SphinxPostTransform(SphinxTransform):\n    builders: tuple[str, ...] = ()\n    formats: tuple[str, ...] = ()\n\n    def apply(self, **kwargs: Any) -> None:\n        if self.is_supported():\n            self.run(**kwargs)\n\n    def is_supported(self) -> bool:\n        if self.builders and self.app.builder.name not in self.builders:\n            return False\n        return not self.formats or self.app.builder.format in self.formats\n\n    def run(self, **kwargs: Any) -> None:\n        raise NotImplementedError",
    "docstring": "A base class of post-transforms. Post transforms are invoked to modify the document to restructure it for outputting. They resolve references, convert images, do special transformation for each output formats and so on. This class helps to implement these post transforms.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\transforms\\post_transforms\\__init__.py",
    "ast_data": "ClassDef name:SphinxPostTransform FunctionDef name:apply arg:self arguments arg arg If Call Call FunctionDef name:is_supported arg:self arguments arg If BoolOp Compare Return return:yes Return return:yes BoolOp Compare FunctionDef name:run arg:self arguments arg arg Raise"
  },
  {
    "library": "pytorch",
    "name": "_CustomBuiltin",
    "source_code": "class _CustomBuiltin(NamedTuple):\n    import_str: str\n    obj: Any",
    "docstring": "Additional objs that we add to every graph's globals. The repr() for some standard library objects is not valid Python code without an import. For common objects of this sort, we bundle them in the globals of every FX graph.",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\graph.py",
    "ast_data": "ClassDef name:_CustomBuiltin"
  },
  {
    "library": "pytorch",
    "name": "process_signature",
    "source_code": "def process_signature(line: str) -> list[str]:\n    tokens: list[str] = split_outside_bracket(line)\n    for i, token in enumerate(tokens):\n        tokens[i] = token.strip(' ')\n        if token == 'cls':\n            tokens[i] = 'self'\n        elif i > 0 and 'self' == tokens[i - 1] and (tokens[i][0] != '*'):\n            tokens[i] = ''\n        elif 'Callable =' in token:\n            head = token.rpartition('=')[0]\n            tokens[i] = head.strip(' ') + ' = ...'\n    tokens = [t for t in tokens if t != '']\n    return tokens",
    "docstring": "Clean up a given raw function signature. This includes removing the self-referential datapipe argument, default arguments of input functions, newlines, and spaces.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\data\\datapipes\\gen_pyi.py",
    "ast_data": "FunctionDef name:process_signature arg:line arguments arg Call For Call Assign Call If Compare Assign If BoolOp Compare Compare Compare Assign If Compare Assign Call Assign Call Assign Compare Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "svd_timing",
    "source_code": "def svd_timing(X, n_comps, n_iter, n_oversamples, power_iteration_normalizer='auto', method=None):\n    print('... running SVD ...')\n    if method != 'fbpca':\n        gc.collect()\n        t0 = time()\n        U, mu, V = randomized_svd(X, n_comps, n_oversamples=n_oversamples, n_iter=n_iter, power_iteration_normalizer=power_iteration_normalizer, random_state=random_state, transpose=False)\n        call_time = time() - t0\n    else:\n        gc.collect()\n        t0 = time()\n        U, mu, V = fbpca.pca(X, n_comps, raw=True, n_iter=n_iter, l=n_oversamples + n_comps)\n        call_time = time() - t0\n    return (U, mu, V, call_time)",
    "docstring": "Measure time for decomposition",
    "type": "function",
    "file_path": "scikit-learn\\benchmarks\\bench_plot_randomized_svd.py",
    "ast_data": "FunctionDef name:svd_timing arg:X arg:n_comps arg:n_iter arg:n_oversamples arg:power_iteration_normalizer arg:method arguments arg arg arg arg arg arg Call If Compare Call Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_slice",
    "source_code": "def _slice(self, slicer: slice | npt.NDArray[np.bool_] | npt.NDArray[np.intp]) -> ArrayLike:\n    return self.values[slicer]",
    "docstring": "return a slice of my values",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\blocks.py",
    "ast_data": "FunctionDef name:_slice arg:self arg:slicer arguments arg arg Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "connectionMade",
    "source_code": "def connectionMade(self) -> None:\n    self.setTimeout(self.IDLE_TIMEOUT)\n    assert self.transport is not None\n    destination = self.transport.getPeer()\n    self.metadata['ip_address'] = ipaddress.ip_address(destination.host)\n    self.conn.initiate_connection()\n    self._write_to_transport()",
    "docstring": "Called by Twisted when the connection is established. We can start sending some data now: we should open with the connection preamble.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\http2\\protocol.py",
    "ast_data": "FunctionDef name:connectionMade arg:self arguments arg Call Compare Assign Call Assign Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "is_pydata_sparse_array",
    "source_code": "def is_pydata_sparse_array(x: object) -> TypeIs[sparse.SparseArray]:\n    cls = cast(Hashable, type(x))\n    return _issubclass_fast(cls, 'sparse', 'SparseArray')",
    "docstring": "Return True if is an array from the package. This function does not import if it has not already been imported and is therefore cheap to use. See Also -------- array_namespace is_array_api_obj is_numpy_array is_cupy_array is_torch_array is_ndonnx_array is_dask_array is_jax_array",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\common\\_helpers.py",
    "ast_data": "FunctionDef name:is_pydata_sparse_array arg:x arguments arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "get_username",
    "source_code": "def get_username(self, user_id: str) -> str:\n    return None",
    "docstring": "Returns an username from a user ID. Developers MAY re-implement this method:: def get_username(self, user_id): return User.get(id=user_id).username",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc9068\\introspection.py",
    "ast_data": "FunctionDef name:get_username arg:self arg:user_id arguments arg arg Return return:no"
  },
  {
    "library": "pandas",
    "name": "dtypes",
    "source_code": "@property\ndef dtypes(self):\n    data = self._mgr.get_dtypes()\n    return self._constructor_sliced(data, index=self._info_axis, dtype=np.object_)",
    "docstring": "Return the dtypes in the DataFrame. This returns a Series with the data type of each column. The result's index is the original DataFrame's columns. Columns with mixed types are stored with the `the User Guide ` for more. Returns ------- pandas.Series The data type of each column. See Also -------- Series.dtypes : Return the dtype object of the underlying data. Examples -------- >>> df = pd.DataFrame( ... { ... \"float\": [1.0], ... \"int\": [1], ... \"datetime\": [pd.Timestamp(\"20180310\")], ... \"string\": [\"foo\"], ... } ... ) >>> df.dtypes float float64 int int64 datetime datetime64[s] string object dtype: object",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:dtypes arg:self arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_str_escape",
    "source_code": "def _str_escape(x, escape):\n    if isinstance(x, str):\n        if escape == 'html':\n            return escape_html(x)\n        elif escape == 'latex':\n            return _escape_latex(x)\n        elif escape == 'latex-math':\n            return _escape_latex_math(x)\n        else:\n            raise ValueError(f\"`escape` only permitted in {{'html', 'latex', 'latex-math'}}, got {escape}\")\n    return x",
    "docstring": "if escaping: only use on str, else return input",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\formats\\style_render.py",
    "ast_data": "FunctionDef name:_str_escape arg:x arg:escape arguments arg arg If Call If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call Raise Call Return return:yes"
  },
  {
    "library": "django",
    "name": "check_arg_errcode",
    "source_code": "def check_arg_errcode(result, func, cargs, cpl=False):\n    check_err(arg_byref(cargs), cpl=cpl)\n    return result",
    "docstring": "The error code is returned in the last argument, by reference. Check its value with before returning the result.",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\gdal\\prototypes\\errcheck.py",
    "ast_data": "FunctionDef name:check_arg_errcode arg:result arg:func arg:cargs arg:cpl arguments arg arg arg arg Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "LinearLeakyReLU",
    "source_code": "class LinearLeakyReLU(_FusedModule):\n\n    def __init__(self, linear, leaky_relu):\n        assert type(linear) == Linear and type(leaky_relu) == torch.nn.LeakyReLU, f'Incorrect types for input modules{type(linear)}{type(leaky_relu)}'\n        super().__init__(linear, leaky_relu)",
    "docstring": "This is a sequential container which calls the Linear and LeakyReLU modules. During quantization this will be replaced with the corresponding fused module.",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\nn\\intrinsic\\modules\\fused.py",
    "ast_data": "ClassDef name:LinearLeakyReLU FunctionDef name:__init__ arg:self arg:linear arg:leaky_relu arguments arg arg arg BoolOp Compare Call Compare Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__call__",
    "source_code": "def __call__(self, shape, dtype=None, **kwargs):\n    _validate_kwargs(self.__class__.__name__, kwargs, support_partition=False)\n    dtype = _assert_float_dtype(_get_dtype(dtype))\n    if len(shape) < 2:\n        raise ValueError('The tensor to initialize must be at least two-dimensional')\n    num_rows = 1\n    for dim in shape[:-1]:\n        num_rows *= dim\n    num_cols = shape[-1]\n    flat_shape = (max(num_cols, num_rows), min(num_cols, num_rows))\n    a = self._random_generator.random_normal(flat_shape, dtype=dtype)\n    q, r = gen_linalg_ops.qr(a, full_matrices=False)\n    d = array_ops.tensor_diag_part(r)\n    q *= math_ops.sign(d)\n    if num_rows < num_cols:\n        q = array_ops.matrix_transpose(q)\n    return self.gain * array_ops.reshape(q, shape)",
    "docstring": "Returns a tensor object initialized to an orthogonal matrix. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only floating point types are supported. If not specified, is used, which default to unless you configured it otherwise (via ) **kwargs: Additional keyword arguments.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\initializers\\initializers_v2.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:shape arg:dtype arguments arg arg arg arg Call Assign Call Call If Compare Call Raise Call Assign For Assign Assign Call Call Assign Call Assign Call Assign Call Call If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "validate_response_modes_supported",
    "source_code": "def validate_response_modes_supported(self):\n    validate_array_value(self, 'response_modes_supported')",
    "docstring": "OPTIONAL. JSON array containing a list of the OAuth 2.0 \"response_mode\" values that this authorization server supports, as specified in \"OAuth 2.0 Multiple Response Type Encoding Practices\" [OAuth.Responses]. If omitted, the default is \"[\"query\", \"fragment\"]\". The response mode value \"form_post\" is also defined in \"OAuth 2.0 Form Post Response Mode\" [OAuth.Post].",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc8414\\models.py",
    "ast_data": "FunctionDef name:validate_response_modes_supported arg:self arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "OptimStateKeyType",
    "source_code": "class OptimStateKeyType(Enum):\n    PARAM_NAME = auto()\n    PARAM_ID = auto()",
    "docstring": "Represents the type of key in an optimizer state-dict.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\fully_sharded_data_parallel.py",
    "ast_data": "ClassDef name:OptimStateKeyType Assign Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "_IterDataPipeMeta",
    "source_code": "class _IterDataPipeMeta(_DataPipeMeta):\n\n    def __new__(cls, name, bases, namespace, **kwargs):\n        if 'reset' in namespace:\n            reset_func = namespace['reset']\n\n            @functools.wraps(reset_func)\n            def conditional_reset(*args, **kwargs):\n                datapipe = args[0]\n                if datapipe._snapshot_state in (_SnapshotState.Iterating, _SnapshotState.NotStarted):\n                    datapipe._number_of_samples_yielded = 0\n                    datapipe._fast_forward_iterator = None\n                    reset_func(*args, **kwargs)\n                datapipe._snapshot_state = _SnapshotState.Iterating\n            namespace['reset'] = conditional_reset\n        if '__iter__' in namespace:\n            hook_iterator(namespace)\n        return super().__new__(cls, name, bases, namespace, **kwargs)",
    "docstring": "Metaclass for and inherits from . Add various functions for behaviors specific to .",
    "type": "class",
    "file_path": "pytorch\\torch\\utils\\data\\datapipes\\_typing.py",
    "ast_data": "ClassDef name:_IterDataPipeMeta FunctionDef name:__new__ arg:cls arg:name arg:bases arg:namespace arguments arg arg arg arg arg If Compare Assign FunctionDef name:conditional_reset arguments arg arg Assign If Compare Assign Assign Call Assign Call Assign If Compare Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, generated_reports: OrderedDict[str, Any]):\n    self.generated_reports = generated_reports",
    "docstring": "Initializes the ModelReportVisualizer instance with the necessary reports. Args: generated_reports (Dict[str, Any]): The reports generated by the ModelReport class can also be a dictionary generated in another manner, as long as format is same",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\model_report_visualizer.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:generated_reports arguments arg arg Assign"
  },
  {
    "library": "scipy",
    "name": "step",
    "source_code": "def step(self, f, jac, y0, t0, t1, f_params, jac_params):\n    raise NotImplementedError(f'{self.__class__.__name__} does not support step() method')",
    "docstring": "Make one integration step and return (y1,t1).",
    "type": "method",
    "file_path": "scipy\\scipy\\integrate\\_ode.py",
    "ast_data": "FunctionDef name:step arg:self arg:f arg:jac arg:y0 arg:t0 arg:t1 arg:f_params arg:jac_params arguments arg arg arg arg arg arg arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_add_fixed_len_feature",
    "source_code": "def _add_fixed_len_feature(self, key, feature):\n    if not feature.dtype:\n        raise ValueError(f'Missing type for feature {key}. Received feature={feature}.')\n    if feature.shape is None:\n        raise ValueError(f'Missing shape for feature {key}. Received feature={feature}.')\n    feature_tensor_shape = tensor_shape.as_shape(feature.shape)\n    if feature.shape and feature_tensor_shape.ndims and (feature_tensor_shape.dims[0].value is None):\n        raise ValueError(f'First dimension of shape for feature {key} unknown. Consider using FixedLenSequenceFeature. Received feature={feature}.')\n    if feature.shape is not None and (not feature_tensor_shape.is_fully_defined()):\n        raise ValueError(f'All dimensions of shape for feature {key} need to be known but received {feature.shape!s}.')\n    self.dense_keys.append(key)\n    self.dense_shapes.append(tensor_shape.as_shape(feature.shape))\n    self.dense_types.append(feature.dtype)\n    if feature.default_value is not None:\n        self.dense_defaults[key] = feature.default_value",
    "docstring": "Adds a FixedLenFeature.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parsing_config.py",
    "ast_data": "FunctionDef name:_add_fixed_len_feature arg:self arg:key arg:feature arguments arg arg arg If Raise Call If Compare Raise Call Assign Call If BoolOp Compare Raise Call If BoolOp Compare Call Raise Call Call Call Call Call If Compare Assign"
  },
  {
    "library": "tensorflow",
    "name": "put",
    "source_code": "def put(self, values, name=None):\n    with ops.name_scope(name, '%s_put' % self._name, self._scope_vals(values)) as scope:\n        if not isinstance(values, (list, tuple, dict)):\n            values = [values]\n        indices = list(range(len(values)))\n        vals, _ = self._check_put_dtypes(values, indices)\n        with ops.colocate_with(self._coloc_op):\n            op = gen_data_flow_ops.stage(values=vals, shared_name=self._name, name=scope, capacity=self._capacity, memory_limit=self._memory_limit)\n        return op",
    "docstring": "Create an op that places a value into the staging area. This operation will block if the has reached its capacity. Args: values: A single tensor, a list or tuple of tensors, or a dictionary with tensor values. The number of elements must match the length of the list provided to the dtypes argument when creating the StagingArea. name: A name for the operation (optional). Returns: The created op. Raises: ValueError: If the number or type of inputs don't match the staging area.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:put arg:self arg:values arg:name arguments arg arg arg With Call Call If Call Assign Assign Call Call Call Assign Call With Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "as_text",
    "source_code": "def as_text(bytes_or_text, encoding='utf-8'):\n    encoding = codecs.lookup(encoding).name\n    if isinstance(bytes_or_text, str):\n        return bytes_or_text\n    elif isinstance(bytes_or_text, bytes):\n        return bytes_or_text.decode(encoding)\n    else:\n        raise TypeError('Expected binary or unicode string, got %r' % bytes_or_text)",
    "docstring": "Converts any string-like python input types to unicode. Returns the input as a unicode string. Uses utf-8 encoding for text by default. Args: bytes_or_text: A , , or object. encoding: A string indicating the charset for decoding unicode. Returns: A (Python 2) or (Python 3) object. Raises: TypeError: If is not a binary or unicode string.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\compat.py",
    "ast_data": "FunctionDef name:as_text arg:bytes_or_text arg:encoding arguments arg arg Assign Call If Call Return return:yes If Call Return return:yes Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "current_allocated_memory",
    "source_code": "def current_allocated_memory() -> int:\n    return torch._C._mps_currentAllocatedMemory()",
    "docstring": "Returns the current GPU memory occupied by tensors in bytes. .. note:: The returned size does not include cached allocations in memory pools of MPSAllocator.",
    "type": "function",
    "file_path": "pytorch\\torch\\mps\\__init__.py",
    "ast_data": "FunctionDef name:current_allocated_memory arguments Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_get_submod_inputs",
    "source_code": "def _get_submod_inputs(self, main_module: torch.fx.GraphModule, submod_path: str) -> tuple[Tensors, Tensors]:\n    a_input = []\n    b_input = []\n    submodule = getattr(main_module, submod_path)\n    placeholders = [node.name for node in submodule.graph.nodes if node.op == 'placeholder']\n    if set(placeholders) <= self.a_outputs.keys():\n        for name in placeholders:\n            a_input.append(self.a_outputs[name])\n            b_input.append(self.b_outputs[name])\n    else:\n        if self.settings.accumulate_error:\n            print(f\"Can't find previous stored outputs named {placeholders}!\")\n\n        def get_inputs(self: torch.nn.Module, inputs: Any):\n            nonlocal a_input\n            a_input = inputs\n        handle = submodule.register_forward_pre_hook(get_inputs)\n        main_module(*self.sample_input)\n        handle.remove()\n        b_input = a_input\n    if not self.settings.accumulate_error:\n        return (a_input, a_input)\n    return (a_input, b_input)",
    "docstring": "Try get submodule inputs from stored outputs. If not found then use torch_glow.get_submod_inputs to get the inputs. If accumulate_error is False, use a_input for run_a() and run_b() otherwise use a_input for run_a and b_input for run_b. Args: main_module: Top-levlel fx module. submod_path: Path to the submodule we want to run and compare results. Returns: a_input: List of tensor(s) that will be used by run_a() as submodule inputs. b_input: List of tensor(s) that will be used by run_b() as submodule inputs.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\passes\\net_min_base.py",
    "ast_data": "FunctionDef name:_get_submod_inputs arg:self arg:main_module arg:submod_path arguments arg arg arg Assign Assign Assign Call Assign Compare If Compare Call Call For Call Call If Call FunctionDef name:get_inputs arg:self arg:inputs arguments arg arg Assign Assign Call Call Call Assign If Return return:yes Return return:yes"
  },
  {
    "library": "kornia",
    "name": "create_meshgrid3d",
    "source_code": "def create_meshgrid3d(depth: int, height: int, width: int, normalized_coordinates: bool=True, device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None) -> Tensor:\n    xs: Tensor = torch.linspace(0, width - 1, width, device=device, dtype=dtype)\n    ys: Tensor = torch.linspace(0, height - 1, height, device=device, dtype=dtype)\n    zs: Tensor = torch.linspace(0, depth - 1, depth, device=device, dtype=dtype)\n    if normalized_coordinates:\n        xs = (xs / (width - 1) - 0.5) * 2\n        ys = (ys / (height - 1) - 0.5) * 2\n        zs = (zs / (depth - 1) - 0.5) * 2\n    base_grid = stack(torch_meshgrid([zs, xs, ys], indexing='ij'), dim=-1)\n    return base_grid.permute(0, 2, 1, 3).unsqueeze(0)",
    "docstring": "Generate a coordinate grid for an image. When the flag `[-1,1]torch.nn.functional.grid_sample[-1,1]torch.nn.functional.grid_sample(1, D, H, W, 3)`.",
    "type": "function",
    "file_path": "kornia\\kornia\\utils\\grid.py",
    "ast_data": "FunctionDef name:create_meshgrid3d arg:depth arg:height arg:width arg:normalized_coordinates arg:device arg:dtype arguments arg arg arg arg arg arg Call Call Call If Assign Assign Assign Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "_start_relational_field",
    "source_code": "def _start_relational_field(self, field):\n    self.indent(2)\n    self.xml.startElement('field', {'name': field.name, 'rel': field.remote_field.__class__.__name__, 'to': str(field.remote_field.model._meta)})",
    "docstring": "Output the element for relational fields.",
    "type": "method",
    "file_path": "django\\django\\core\\serializers\\xml_serializer.py",
    "ast_data": "FunctionDef name:_start_relational_field arg:self arg:field arguments arg arg Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_exit_unshard_params_ctx",
    "source_code": "@no_type_check\ndef _exit_unshard_params_ctx(module: nn.Module, fsdp_state: _FSDPState) -> None:\n    fsdp_state._unshard_params_ctx[module].__exit__(None, None, None)\n    fsdp_state._unshard_params_ctx.pop(module)",
    "docstring": "A helper function to exit `` context.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_state_dict_utils.py",
    "ast_data": "FunctionDef name:_exit_unshard_params_ctx arg:module arg:fsdp_state arguments arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "res_slice",
    "source_code": "def res_slice(self, ns, types_ns, node_or_slice, value, slice_):\n    raise NotImplementedError('subclasses must implement')",
    "docstring": "Resolves the return type of slice operation.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\static_analysis\\type_inference.py",
    "ast_data": "FunctionDef name:res_slice arg:self arg:ns arg:types_ns arg:node_or_slice arg:value arg:slice_ arguments arg arg arg arg arg arg Raise Call"
  },
  {
    "library": "django",
    "name": "get_linear_geometry",
    "source_code": "def get_linear_geometry(self):\n    return OGRGeometry(capi.get_linear_geom(self.ptr, 0, None))",
    "docstring": "Return a linear version of this geometry.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:get_linear_geometry arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "ResamplingMethod",
    "source_code": "@dataclass\nclass ResamplingMethod:\n    n_resamples: int = 9999\n    batch: int = None",
    "docstring": "Configuration information for a statistical resampling method. Instances of this class can be passed into the parameter of some hypothesis test functions to perform a resampling or Monte Carlo version of the hypothesis test. Attributes ---------- n_resamples : int The number of resamples to perform or Monte Carlo samples to draw. batch : int, optional The number of resamples to process in each vectorized call to the statistic. Batch sizes >>1 tend to be faster when the statistic is vectorized, but memory usage scales linearly with the batch size. Default is ``, which processes all resamples in a single batch.",
    "type": "class",
    "file_path": "scipy\\scipy\\stats\\_resampling.py",
    "ast_data": "ClassDef name:ResamplingMethod"
  },
  {
    "library": "kornia",
    "name": "RandomTransplantation3D",
    "source_code": "class RandomTransplantation3D(RandomTransplantation, AugmentationBase3D):\n    pass",
    "docstring": "RandomTransplantation3D augmentation. 3D version of the :class: augmentation intended to be used with :class:. The interface is identical to the 2D version.",
    "type": "class",
    "file_path": "kornia\\kornia\\augmentation\\_3d\\mix\\transplantation.py",
    "ast_data": "ClassDef name:RandomTransplantation3D"
  },
  {
    "library": "scikit-learn",
    "name": "get_n_splits",
    "source_code": "def get_n_splits(self, X=None, y=None, groups=None):\n    return self.n_splits",
    "docstring": "Returns the number of splitting iterations in the cross-validator. Parameters ---------- X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns ------- n_splits : int Returns the number of splitting iterations in the cross-validator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py",
    "ast_data": "FunctionDef name:get_n_splits arg:self arg:X arg:y arg:groups arguments arg arg arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "do_decode",
    "source_code": "def do_decode(self, value, decode_fn):\n    del decode_fn\n    tensor_proto = value.tensor_value\n    tensor = constant(tensor_util.MakeNdarray(tensor_proto))\n    return tensor",
    "docstring": "Returns the encoded by the proto .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\constant_op.py",
    "ast_data": "FunctionDef name:do_decode arg:self arg:value arg:decode_fn arguments arg arg arg Assign Assign Call Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "checkpassword_dict",
    "source_code": "def checkpassword_dict(user_password_dict):\n\n    def checkpassword(realm, user, password):\n        p = user_password_dict.get(user)\n        return p and p == password or False\n    return checkpassword",
    "docstring": "Check credentials against a dictionary. Returns a checkpassword function which checks credentials against a dictionary of the form: {username : password}. If you want a simple dictionary-based authentication scheme, use checkpassword_dict(my_credentials_dict) as the value for the checkpassword argument to basic_auth().",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\auth_basic.py",
    "ast_data": "FunctionDef name:checkpassword_dict arg:user_password_dict arguments arg FunctionDef name:checkpassword arg:realm arg:user arg:password arguments arg arg arg Assign Call Return return:yes BoolOp BoolOp Compare Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "open",
    "source_code": "def open(self, spider: Spider) -> Deferred[None] | None:\n    self.spider: Spider = spider\n    self.mqs: ScrapyPriorityQueue = self._mq()\n    self.dqs: ScrapyPriorityQueue | None = self._dq() if self.dqdir else None\n    return self.df.open()",
    "docstring": "(1) initialize the memory queue (2) initialize the disk queue if the `` method",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\scheduler.py",
    "ast_data": "FunctionDef name:open arg:self arg:spider arguments arg arg Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "upsample_bilinear",
    "source_code": "def upsample_bilinear(input, size=None, scale_factor=None):\n    warnings.warn('`nn.functional.upsample_bilinear` is deprecated. Use `nn.functional.interpolate` instead.', stacklevel=2)\n    return interpolate(input, size, scale_factor, mode='bilinear', align_corners=True)",
    "docstring": "Upsamples the input, using bilinear upsampling. .. warning:: This function is deprecated in favor of :func:. This is equivalent with `upsample_trilinear` fo volumetric (5 dimensional) inputs. Args: input (Tensor): input size (int or Tuple[int, int]): output spatial size. scale_factor (int or Tuple[int, int]): multiplier for spatial size Note: {backward_reproducibility_note}",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:upsample_bilinear arg:input arg:size arg:scale_factor arguments arg arg arg Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_namespace",
    "source_code": "def get_namespace(*arrays, remove_none=True, remove_types=(str,), xp=None):\n    array_api_dispatch = get_config()['array_api_dispatch']\n    if not array_api_dispatch:\n        if xp is not None:\n            return (xp, False)\n        else:\n            return (np_compat, False)\n    if xp is not None:\n        return (xp, True)\n    arrays = _remove_non_arrays(*arrays, remove_none=remove_none, remove_types=remove_types)\n    if not arrays:\n        return (np_compat, False)\n    _check_array_api_dispatch(array_api_dispatch)\n    namespace, is_array_api_compliant = (array_api_compat.get_namespace(*arrays), True)\n    if namespace.__name__ == 'array_api_strict' and hasattr(namespace, 'set_array_api_strict_flags'):\n        namespace.set_array_api_strict_flags(api_version='2024.12')\n    return (namespace, is_array_api_compliant)",
    "docstring": "Get namespace of arrays. Introspect arguments and return their common Array API compatible namespace object, if any. Note that sparse arrays are filtered by default. See: If are regular numpy arrays, is returned instead. Namespace support is not enabled by default. To enabled it call: sklearn.set_config(array_api_dispatch=True) or: with sklearn.config_context(array_api_dispatch=True): # your code here Otherwise is always returned irrespective of the fact that arrays implement the protocol or not. Note that if no arrays pass the set filters, `arrays` are not arrays, the namespace defaults to the NumPy namespace. is_array_api_compliant : bool True if the arrays are containers that implement the array API spec (see Always False when array_api_dispatch=False.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_array_api.py",
    "ast_data": "FunctionDef name:get_namespace arguments arg arg arg arg Assign Call If If Compare Return return:yes Return return:yes If Compare Return return:yes Assign Call If Return return:yes Call Assign Call If BoolOp Compare Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_message",
    "source_code": "def set_message(self, s):\n    raise NotImplementedError",
    "docstring": "Display a message on the toolbar. Parameters ---------- s : str Message text.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:set_message arg:self arg:s arguments arg arg Raise"
  },
  {
    "library": "pandas",
    "name": "get_default_engine",
    "source_code": "def get_default_engine(ext: str, mode: Literal['reader', 'writer']='reader') -> str:\n    _default_readers = {'xlsx': 'openpyxl', 'xlsm': 'openpyxl', 'xlsb': 'pyxlsb', 'xls': 'xlrd', 'ods': 'odf'}\n    _default_writers = {'xlsx': 'openpyxl', 'xlsm': 'openpyxl', 'xlsb': 'pyxlsb', 'ods': 'odf'}\n    assert mode in ['reader', 'writer']\n    if mode == 'writer':\n        xlsxwriter = import_optional_dependency('xlsxwriter', errors='warn')\n        if xlsxwriter:\n            _default_writers['xlsx'] = 'xlsxwriter'\n        return _default_writers[ext]\n    else:\n        return _default_readers[ext]",
    "docstring": "Return the default reader/writer for the given extension. Parameters ---------- ext : str The excel file extension for which to get the default engine. mode : str {'reader', 'writer'} Whether to get the default engine for reading or writing. Either 'reader' or 'writer' Returns ------- str The default engine for the extension.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\excel\\_util.py",
    "ast_data": "FunctionDef name:get_default_engine arg:ext arg:mode arguments arg arg Assign Assign Compare If Compare Assign Call If Assign Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "LMDBReader",
    "source_code": "@tf_export(v1=['LMDBReader'])\nclass LMDBReader(ReaderBase):\n\n    @deprecation.deprecated(None, 'Queue-based input pipelines have been replaced by `tf.data`. Use `tf.contrib.data.LMDBDataset`.')\n    def __init__(self, name=None, options=None):\n        del options\n        rr = gen_io_ops.lmdb_reader(name=name)\n        super(LMDBReader, self).__init__(rr)",
    "docstring": "A Reader that outputs the records from a LMDB file. See ReaderBase for supported methods. @compatibility(eager) Readers are not compatible with eager execution. Instead, please use to get data into your model. @end_compatibility",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\io_ops.py",
    "ast_data": "ClassDef name:LMDBReader FunctionDef name:__init__ arg:self arg:name arg:options arguments arg arg arg Assign Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_serialized_attributes_internal",
    "source_code": "def _get_serialized_attributes_internal(self, serialization_cache):\n    objects = save_impl.wrap_layer_objects(self.obj, serialization_cache)\n    functions = save_impl.wrap_layer_functions(self.obj, serialization_cache)\n    functions['_default_save_signature'] = None\n    return (objects, functions)",
    "docstring": "Returns dictionary of serialized attributes.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\layer_serialization.py",
    "ast_data": "FunctionDef name:_get_serialized_attributes_internal arg:self arg:serialization_cache arguments arg arg Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "z",
    "source_code": "@property\ndef z(self) -> Tensor:\n    return self._z",
    "docstring": "Return the underlying data with shape :math:.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\so2.py",
    "ast_data": "FunctionDef name:z arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "run_cached_graph",
    "source_code": "def run_cached_graph(hash_str, graph_inputs):\n    return torch._C._lazy_ts_backend._run_cached_graph(hash_str, graph_inputs)",
    "docstring": "Running the cached computation graph with the given inputs TODO: This API is currently ts backend specific. We are working on generalizing it to all backends including XLA.",
    "type": "function",
    "file_path": "pytorch\\torch\\_lazy\\computation.py",
    "ast_data": "FunctionDef name:run_cached_graph arg:hash_str arg:graph_inputs arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "put_metric",
    "source_code": "def put_metric(metric_name: str, metric_value: int, metric_group: str='torchelastic'):\n    getStream(metric_group).add_value(metric_name, metric_value)",
    "docstring": "Publish a metric data point. Usage :: put_metric(\"metric_name\", 1) put_metric(\"metric_name\", 1, \"metric_group_name\")",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\elastic\\metrics\\api.py",
    "ast_data": "FunctionDef name:put_metric arg:metric_name arg:metric_value arg:metric_group arguments arg arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "enable_control_flow_v2",
    "source_code": "def enable_control_flow_v2():\n    global ENABLE_CONTROL_FLOW_V2\n    ENABLE_CONTROL_FLOW_V2 = True",
    "docstring": "Use control flow v2. Do not use this symbol. This will be removed.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_util.py",
    "ast_data": "FunctionDef name:enable_control_flow_v2 arguments Assign"
  },
  {
    "library": "pytorch",
    "name": "load_state_dict",
    "source_code": "def load_state_dict(self, state_dict):\n    self.__dict__.update(state_dict)",
    "docstring": "Loads the schedulers state. Args: state_dict (dict): scheduler state. Should be an object returned from a call to :meth:.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\pruning\\scheduler\\base_scheduler.py",
    "ast_data": "FunctionDef name:load_state_dict arg:self arg:state_dict arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_set_network_attributes_from_metadata",
    "source_code": "def _set_network_attributes_from_metadata(revived_obj):\n    with utils.no_automatic_dependency_tracking_scope(revived_obj):\n        metadata = revived_obj._serialized_attributes['metadata']\n        if metadata.get('dtype') is not None:\n            revived_obj._set_dtype_policy(metadata['dtype'])\n        revived_obj._trainable = metadata['trainable']",
    "docstring": "Sets attributes recorded in the metadata.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\load.py",
    "ast_data": "FunctionDef name:_set_network_attributes_from_metadata arg:revived_obj arguments arg With Call Assign If Compare Call Call Assign"
  },
  {
    "library": "scikit-learn",
    "name": "divide",
    "source_code": "def divide(self, y: Array | complex, /, copy: bool | None=None, xp: ModuleType | None=None) -> Array:\n    return self._op(_AtOp.DIVIDE, operator.itruediv, operator.truediv, y, copy=copy, xp=xp)",
    "docstring": "Apply `` and return the updated array.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_extra\\_lib\\_at.py",
    "ast_data": "FunctionDef name:divide arg:copy arg:xp arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_iszip",
    "source_code": "def _iszip(self, filename):\n    fname, ext = os.path.splitext(filename)\n    return ext in _file_openers.keys()",
    "docstring": "Test if the filename is a zip file by looking at the file extension.",
    "type": "method",
    "file_path": "numpy\\numpy\\lib\\_datasource.py",
    "ast_data": "FunctionDef name:_iszip arg:self arg:filename arguments arg arg Assign Call Return return:yes Compare Call"
  },
  {
    "library": "scipy",
    "name": "poles",
    "source_code": "@property\ndef poles(self):\n    return self._poles",
    "docstring": "Poles of the system.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:poles arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "GenerateAndLoadResult",
    "source_code": "class GenerateAndLoadResult(NamedTuple):\n    mod: ModuleType\n    extra: str\n    input_call_args: tuple[str, ...]\n    prologue_supported_inputs: OrderedSet[str]\n    kernel_args_sizevars_keys: tuple[sympy.Expr]\n    kernel_options: dict[str, Any]",
    "docstring": "Return type of TritonTemplate.generate_and_load.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\select_algorithm.py",
    "ast_data": "ClassDef name:GenerateAndLoadResult"
  },
  {
    "library": "tensorflow",
    "name": "values",
    "source_code": "@property\ndef values(self):\n    return self._values",
    "docstring": "A containing the values of the slices.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\indexed_slices.py",
    "ast_data": "FunctionDef name:values arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_get_routed_params_for_fit",
    "source_code": "def _get_routed_params_for_fit(self, params):\n    if _routing_enabled():\n        routed_params = process_routing(self, 'fit', **params)\n    else:\n        params = params.copy()\n        groups = params.pop('groups', None)\n        routed_params = Bunch(estimator=Bunch(fit=params), splitter=Bunch(split={'groups': groups}), scorer=Bunch(score={}))\n        if params.get('sample_weight') is not None and self._check_scorers_accept_sample_weight():\n            routed_params.scorer.score['sample_weight'] = params['sample_weight']\n    return routed_params",
    "docstring": "Get the parameters to be used for routing. This is a method instead of a snippet in ``.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_search.py",
    "ast_data": "FunctionDef name:_get_routed_params_for_fit arg:self arg:params arguments arg arg If Call Assign Call Assign Call Assign Call Assign Call Call Call Call If BoolOp Compare Call Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "model_unpickle",
    "source_code": "def model_unpickle(model_id):\n    if isinstance(model_id, tuple):\n        model = apps.get_model(*model_id)\n    else:\n        model = model_id\n    return model.__new__(model)",
    "docstring": "Used to unpickle Model subclasses with deferred fields.",
    "type": "function",
    "file_path": "django\\django\\db\\models\\base.py",
    "ast_data": "FunctionDef name:model_unpickle arg:model_id arguments arg If Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "duplicated",
    "source_code": "def duplicated(self, subset: Hashable | Iterable[Hashable] | None=None, keep: DropKeep='first') -> Series:\n    if self.empty:\n        return self._constructor_sliced(dtype=bool)\n\n    def f(vals) -> tuple[np.ndarray, int]:\n        labels, shape = algorithms.factorize(vals, size_hint=len(self))\n        return (labels.astype('i8'), len(shape))\n    if subset is None:\n        subset = self.columns\n    elif not np.iterable(subset) or isinstance(subset, str) or (isinstance(subset, tuple) and subset in self.columns):\n        subset = (subset,)\n    subset = cast(Sequence, subset)\n    diff = set(subset) - set(self.columns)\n    if diff:\n        raise KeyError(Index(diff))\n    if len(subset) == 1 and self.columns.is_unique:\n        result = self[next(iter(subset))].duplicated(keep)\n        result.name = None\n    else:\n        vals = (col.values for name, col in self.items() if name in subset)\n        labels, shape = map(list, zip(*map(f, vals)))\n        ids = get_group_index(labels, tuple(shape), sort=False, xnull=False)\n        result = self._constructor_sliced(duplicated(ids, keep), index=self.index)\n    return result.__finalize__(self, method='duplicated')",
    "docstring": "Return boolean Series denoting duplicate rows. Considering certain columns is optional. Parameters ---------- subset : column label or iterable of labels, optional Only consider certain columns for identifying duplicates, by default use all of the columns. keep : {'first', 'last', False}, default 'first' Determines which duplicates (if any) to mark. - ``. >>> df.duplicated(subset=[\"brand\"]) 0 False 1 True 2 False 3 True 4 True dtype: bool",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:duplicated arg:self arg:subset arg:keep arguments arg arg arg If Return return:yes Call FunctionDef name:f arg:vals arguments arg Assign Call Call Return return:yes Call Call If Compare Assign If BoolOp Call Call BoolOp Call Compare Assign Assign Call Assign Call Call If Raise Call Call If BoolOp Compare Call Assign Call Call Call Assign Assign Call Compare Assign Call Call Call Assign Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "collect_graphs",
    "source_code": "@contextlib.contextmanager\ndef collect_graphs(optimized=True):\n    ctx = context()\n    ctx.enable_graph_collection()\n    try:\n        graphs = []\n        yield graphs\n        metadata = ctx.export_run_metadata()\n    finally:\n        ctx.disable_graph_collection()\n    for graph in metadata.function_graphs:\n        if optimized:\n            graphs.append(graph.post_optimization_graph)\n        else:\n            graphs.append(graph.pre_optimization_graph)",
    "docstring": "Collects a flat list of pre- or post-optimization graphs. The collected graphs include device placements, which can be useful for testing. Usage: Args: optimized: whether to collect optimized graphs or non-optimized graphs Yields: A list of GraphDefs, populated when the context manager exits.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:collect_graphs arg:optimized arguments arg Assign Call Call Try Assign Assign Call Call For If Call Call"
  },
  {
    "library": "numpy",
    "name": "replaceDlamch",
    "source_code": "def replaceDlamch(source):\n\n    def repl(m):\n        s = m.group(1)\n        return {'E': 'EPSILON', 'P': 'PRECISION', 'S': 'SAFEMINIMUM', 'B': 'BASE'}[s[0]]\n    source = re.sub('dlamch_\\\\(\"(.*?)\"\\\\)', repl, source)\n    source = re.sub('^\\\\s+extern.*? dlamch_.*?;$(?m)', '', source)\n    return source",
    "docstring": "Replace dlamch_ calls with appropriate macros",
    "type": "function",
    "file_path": "numpy\\numpy\\linalg\\lapack_lite\\clapack_scrub.py",
    "ast_data": "FunctionDef name:replaceDlamch arg:source arguments arg FunctionDef name:repl arg:m arguments arg Assign Call Return return:yes Assign Call Assign Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "request",
    "source_code": "def request(self, method, url, withhold_token=False, auth=None, **kwargs):\n    if self.default_timeout:\n        kwargs.setdefault('timeout', self.default_timeout)\n    if not withhold_token and auth is None:\n        auth = self.token_auth\n    return super().request(method, url, auth=auth, **kwargs)",
    "docstring": "Send request with auto refresh token feature.",
    "type": "method",
    "file_path": "authlib\\authlib\\integrations\\requests_client\\assertion_session.py",
    "ast_data": "FunctionDef name:request arg:self arg:method arg:url arg:withhold_token arg:auth arguments arg arg arg arg arg arg If Call If BoolOp Compare Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "isfunction",
    "source_code": "def isfunction(object):\n    return _inspect.isfunction(tf_decorator.unwrap(object)[1])",
    "docstring": "TFDecorator-aware replacement for inspect.isfunction.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\tf_inspect.py",
    "ast_data": "FunctionDef name:isfunction arg:object arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_LiteOperand",
    "source_code": "class _LiteOperand:\n\n    def aggregate_and_return_name_for_input(self, out_graphdef):\n        del out_graphdef\n        raise RuntimeError('Unimplemented abstract method.')\n\n    def aggregate_and_return_name_for_output(self, fused_op_name, output_index, out_graphdef):\n        del fused_op_name, output_index, out_graphdef\n        raise RuntimeError('Unimplemented abstract method.')",
    "docstring": "Abstract operand for a tflite hint function._dynamic_rnn_loop. This is a base class that handles representing arguments to an OpHint. It also is able to serialize operands to the stubbed graph_def. Child classes are responsible for being able to store information about the hint identity operators. They are also responsible for knowing how to serialize to output graphdefs. Typically this will be implemented by holding one or more identity nodes that were previously discovered as hints.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\op_hint.py",
    "ast_data": "ClassDef name:_LiteOperand FunctionDef name:aggregate_and_return_name_for_input arg:self arg:out_graphdef arguments arg arg Raise Call FunctionDef name:aggregate_and_return_name_for_output arg:self arg:fused_op_name arg:output_index arg:out_graphdef arguments arg arg arg arg Raise Call"
  },
  {
    "library": "django",
    "name": "post_process",
    "source_code": "def post_process(self, paths, dry_run=False, **options):\n    if dry_run:\n        return\n    hashed_files = {}\n    adjustable_paths = [path for path in paths if matches_patterns(path, self._patterns)]\n    processed_adjustable_paths = {}\n    for name, hashed_name, processed, _ in self._post_process(paths, adjustable_paths, hashed_files):\n        if name not in adjustable_paths or isinstance(processed, Exception):\n            yield (name, hashed_name, processed)\n        else:\n            processed_adjustable_paths[name] = (name, hashed_name, processed)\n    paths = {path: paths[path] for path in adjustable_paths}\n    unresolved_paths = []\n    for i in range(self.max_post_process_passes):\n        unresolved_paths = []\n        for name, hashed_name, processed, subst in self._post_process(paths, adjustable_paths, hashed_files):\n            processed_adjustable_paths[name] = (name, hashed_name, processed)\n            if subst:\n                unresolved_paths.append(name)\n        if not unresolved_paths:\n            break\n    if unresolved_paths:\n        problem_paths = ', '.join(sorted(unresolved_paths))\n        yield (problem_paths, None, RuntimeError('Max post-process passes exceeded.'))\n    self.hashed_files.update(hashed_files)\n    yield from processed_adjustable_paths.values()",
    "docstring": "Post process the given dictionary of files (called from collectstatic). Processing is actually two separate operations: 1. renaming files to include a hash of their content for cache-busting, and copying those files to the target storage. 2. adjusting files which contain references to other files so they refer to the cache-busting filenames. If either of these are performed on a file, then that file is considered post-processed.",
    "type": "method",
    "file_path": "django\\django\\contrib\\staticfiles\\storage.py",
    "ast_data": "FunctionDef name:post_process arg:self arg:paths arg:dry_run arguments arg arg arg arg If Return return:no Assign Assign Call Assign For Call If BoolOp Compare Call Assign Assign Assign For Call Assign For Call Assign If Call If If Assign Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_checkpoint_adapter",
    "source_code": "def _checkpoint_adapter(self, path: str):\n    del path\n    return None",
    "docstring": "Returns a checkpoint adapter for this object. Needs to be overridden if the requires adapter at restore. Override this method to define callbacks for checkpoint positions to be applied at restore time. Args: path: Checkpoint path. Returns: A subclass of AbstractCheckpointAdapter that defines callbacks at restore for this trackable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\base.py",
    "ast_data": "FunctionDef name:_checkpoint_adapter arg:self arg:path arguments arg arg Return return:no"
  },
  {
    "library": "scikit-learn",
    "name": "_n_features_out",
    "source_code": "@property\ndef _n_features_out(self):\n    return self.eigenvalues_.shape[0]",
    "docstring": "Number of transformed output features.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_kernel_pca.py",
    "ast_data": "FunctionDef name:_n_features_out arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "parents",
    "source_code": "@property\ndef parents(self):\n    return [self.key]",
    "docstring": "See 'FeatureColumn` base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\sequence_feature_column.py",
    "ast_data": "FunctionDef name:parents arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "newer",
    "source_code": "def newer(source, target):\n    if not os.path.exists(source):\n        raise ValueError(f\"file '{os.path.abspath(source)}' does not exist\")\n    if not os.path.exists(target):\n        return 1\n    mtime1 = os.stat(source)[ST_MTIME]\n    mtime2 = os.stat(target)[ST_MTIME]\n    return mtime1 > mtime2",
    "docstring": "Return true if 'source' exists and is more recently modified than 'target', or if 'source' exists and 'target' doesn't. Return false if both exist and 'target' is the same age or younger than 'source'.",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\_generate_sparsetools.py",
    "ast_data": "FunctionDef name:newer arg:source arg:target arguments arg arg If Call Raise Call Call If Call Return return:yes Assign Call Assign Call Return return:yes Compare"
  },
  {
    "library": "pytorch",
    "name": "current_step",
    "source_code": "@classmethod\ndef current_step(cls) -> int:\n    return cls._current_step",
    "docstring": "Get the latest step for any requester",
    "type": "method",
    "file_path": "pytorch\\torch\\autograd\\profiler.py",
    "ast_data": "FunctionDef name:current_step arg:cls arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "log_prob",
    "source_code": "def log_prob(self, value: Tensor) -> Tensor:\n    raise NotImplementedError",
    "docstring": "Returns the log of the probability density/mass function evaluated at . Args: value (Tensor):",
    "type": "method",
    "file_path": "pytorch\\torch\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:log_prob arg:self arg:value arguments arg arg Raise"
  },
  {
    "library": "cherrypy",
    "name": "kwargs",
    "source_code": "@property\ndef kwargs(self):\n    kwargs = cherrypy.serving.request.params.copy()\n    if self._kwargs:\n        kwargs.update(self._kwargs)\n    return kwargs",
    "docstring": "Page handler keyword arguments. The returned value contains data merged in from ``.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpdispatch.py",
    "ast_data": "FunctionDef name:kwargs arg:self arguments arg Assign Call If Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_distrib",
    "source_code": "def get_distrib():\n    key = 'distrib'\n    out, err = run_shell_cmd(cmds_all[PLATFORM][key])\n    if err and FLAGS.debug:\n        print('Error in detecting distribution:\\n %s' % str(err))\n    return out.strip(b'\\n')",
    "docstring": "Retrieves distribution name of the operating system. Returns: String that is the name of distribution. e.g. 'Ubuntu'",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\tensorflow_builder\\config_detector\\config_detector.py",
    "ast_data": "FunctionDef name:get_distrib arguments Assign Assign Call If BoolOp Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_is_framework_filename",
    "source_code": "def _is_framework_filename(filename):\n    for pattern in _EXTERNAL_FILENAME_PATTERNS:\n        if pattern.search(filename):\n            return False\n    for pattern in _FRAMEWORK_FILENAME_PATTERNS:\n        if pattern.search(filename):\n            return True\n    for prefix in _FRAMEWORK_PATH_PREFIXES:\n        if filename.startswith(prefix):\n            return True\n    return False",
    "docstring": "Returns whether a filename should be considered a part of the framework. A file is part of the framework if it does not match a pattern in _EXTERNAL_FILENAME_PATTERNS and it either matches a pattern in _FRAMEWORK_FILENAME_PATTERNS or starts with a _FRAMEWORK_PATH_PREFIXES prefix. Args: filename: A filename string. Returns: Whether the filename should be considered to be internal to the TensorFlow framework for the purposes of reporting errors.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\error_interpolation.py",
    "ast_data": "FunctionDef name:_is_framework_filename arg:filename arguments arg For If Call Return return:yes For If Call Return return:yes For If Call Return return:yes Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "set_params",
    "source_code": "def set_params(self, **params):\n    if not params:\n        return self\n    valid_params = self.get_params(deep=True)\n    nested_params = defaultdict(dict)\n    for key, value in params.items():\n        key, delim, sub_key = key.partition('__')\n        if key not in valid_params:\n            local_valid_params = self._get_param_names()\n            raise ValueError(f'Invalid parameter {key!r} for estimator {self}. Valid parameters are: {local_valid_params!r}.')\n        if delim:\n            nested_params[key][sub_key] = value\n        else:\n            setattr(self, key, value)\n            valid_params[key] = value\n    for key, sub_params in nested_params.items():\n        valid_params[key].set_params(**sub_params)\n    return self",
    "docstring": "Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as :class:). The latter have parameters of the form `` so that it's possible to update each component of a nested object. Parameters ---------- **params : dict Estimator parameters. Returns ------- self : estimator instance Estimator instance.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\base.py",
    "ast_data": "FunctionDef name:set_params arg:self arguments arg arg If Return return:yes Assign Call Assign Call For Call Assign Call If Compare Assign Call Raise Call If Assign Call Assign For Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "has_module_perms",
    "source_code": "def has_module_perms(self, app_label):\n    if self.is_active and self.is_superuser:\n        return True\n    return _user_has_module_perms(self, app_label)",
    "docstring": "Return True if the user has any permissions in the given app label. Use similar logic as has_perm(), above.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\models.py",
    "ast_data": "FunctionDef name:has_module_perms arg:self arg:app_label arguments arg arg If BoolOp Return return:yes Return return:yes Call"
  },
  {
    "library": "django",
    "name": "bisect_keep_right",
    "source_code": "def bisect_keep_right(a, fn):\n    lo = 0\n    hi = len(a)\n    while lo < hi:\n        mid = (lo + hi) // 2\n        if fn(a[mid:]):\n            lo = mid + 1\n        else:\n            hi = mid\n    return lo",
    "docstring": "Find the index of the first element from the end of the array that verifies the given condition. The function is applied from the pivot to the end of array.",
    "type": "function",
    "file_path": "django\\django\\contrib\\messages\\storage\\cookie.py",
    "ast_data": "FunctionDef name:bisect_keep_right arg:a arg:fn arguments arg arg Assign Assign Call While Compare Assign If Call Assign Assign Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "update",
    "source_code": "@abc.abstractmethod\ndef update(self, data: utils.Buffer) -> bytes:\n    pass",
    "docstring": "Pads the provided bytes and returns any available data as bytes.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\padding.py",
    "ast_data": "FunctionDef name:update arg:self arg:data arguments arg arg"
  },
  {
    "library": "tensorflow",
    "name": "OnSessionInitRequest",
    "source_code": "class OnSessionInitRequest:\n\n    def __init__(self, sess):\n        _check_type(sess, (session.BaseSession, monitored_session.MonitoredSession))\n        self.session = sess",
    "docstring": "Request to an on-session-init callback. This callback is invoked during the __init__ call to a debug-wrapper session.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\framework.py",
    "ast_data": "ClassDef name:OnSessionInitRequest FunctionDef name:__init__ arg:self arg:sess arguments arg arg Call Assign"
  },
  {
    "library": "pytorch",
    "name": "_constrain_as_size",
    "source_code": "def _constrain_as_size(symbol, min: _Optional[builtins.int]=None, max: _Optional[builtins.int]=None):\n    torch.sym_constrain_range_for_size(symbol, min=min, max=max)",
    "docstring": "This indicates that a given int is size-like, and can be used in any context where a size is expected. You will typically use this when reading out integers from Tensors, e.g., max.item() or lengths.tolist() which then need to be used as tensor constructors. Providing these assertions to PyTorch can help resolve GuardOnDataDependentSymNode errors upon export, since we cannot guard on unbacked SymInts. This function has unusual semantics in some circumstances in framework code, we will treat this int as >= 2 (when we do a size-oblivious guard). This makes it easier to use the unbacked int in size contexts, as we will often attempt to guard on a size being zero/one (e.g., when computing the contiguity of a tensor, or testing if broadcasting can occur), which will not work on unbacked SymInts. However, if we conservatively assume that the size is not zero/one, we will end up with a graph that will still work even if the size is zero/one. For more details, see ```",
    "type": "function",
    "file_path": "pytorch\\torch\\__init__.py",
    "ast_data": "FunctionDef name:_constrain_as_size arg:symbol arg:min arg:max arguments arg arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_add_n_flops",
    "source_code": "@ops.RegisterStatistics('AddN', 'flops')\ndef _add_n_flops(graph, node):\n    if not node.input:\n        return _zero_flops(graph, node)\n    in_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])\n    in_shape.assert_is_fully_defined()\n    return ops.OpStats('flops', in_shape.num_elements() * (len(node.input) - 1))",
    "docstring": "Compute flops for AddN operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py",
    "ast_data": "FunctionDef name:_add_n_flops arg:graph arg:node arguments arg arg If Return return:yes Call Assign Call Call Return return:yes Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_select_data",
    "source_code": "def _select_data(self) -> DataFrame:\n    if self.include is None and self.exclude is None:\n        default_include: list[npt.DTypeLike] = [np.number, 'datetime']\n        data = self.obj.select_dtypes(include=default_include)\n        if len(data.columns) == 0:\n            data = self.obj\n    elif self.include == 'all':\n        if self.exclude is not None:\n            msg = \"exclude must be None when include is 'all'\"\n            raise ValueError(msg)\n        data = self.obj\n    else:\n        data = self.obj.select_dtypes(include=self.include, exclude=self.exclude)\n    return data",
    "docstring": "Select columns to be described.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\methods\\describe.py",
    "ast_data": "FunctionDef name:_select_data arg:self arguments arg If BoolOp Compare Compare Assign Call If Compare Call Assign If Compare If Compare Assign Raise Call Assign Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_ppf",
    "source_code": "def _ppf(self, x):\n    return np.interp(x, self._hcdf, self._hbins)",
    "docstring": "Percentile function calculated from the histogram",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_continuous_distns.py",
    "ast_data": "FunctionDef name:_ppf arg:self arg:x arguments arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_support_mask",
    "source_code": "def _support_mask(self, x):\n    residual = np.linalg.norm(x @ self._null_basis, axis=-1)\n    in_support = residual < self._eps\n    return in_support",
    "docstring": "Check whether x lies in the support of the distribution.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_covariance.py",
    "ast_data": "FunctionDef name:_support_mask arg:self arg:x arguments arg arg Assign Call Assign Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_unbatch",
    "source_code": "@abc.abstractmethod\ndef _unbatch(self) -> TypeSpec:\n    raise NotImplementedError(f'{type(self).__name__}._unbatch')",
    "docstring": "Returns a TypeSpec representing a single element this TypeSpec. Returns: A representing a single element of objects with this TypeSpec.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py",
    "ast_data": "FunctionDef name:_unbatch arg:self arguments arg Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, tensor_size, input_var, dim_replace, index, output):\n    assert isinstance(input_var, TVar)\n    assert isinstance(output, TVar)\n    assert isinstance(dim_replace, DVar) or dim_replace == Dyn\n    assert isinstance(index, int)\n    self.input_var = input_var\n    self.tensor_size = tensor_size\n    self.dim_replace = dim_replace\n    self.index = index\n    self.output = output",
    "docstring": "Args: input_var: input to index_select tensor_size: tensor size we are considering dim_replace: the dimension of the output at \"index\" index: location of the dimensions to replace in the input output: variable to store the result",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:tensor_size arg:input_var arg:dim_replace arg:index arg:output arguments arg arg arg arg arg arg Call Call BoolOp Call Compare Call Assign Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "TFRTypes",
    "source_code": "class TFRTypes(enum.Enum):\n    TENSOR = 1\n    TENSOR_LIST = 2\n    ATTR = 3\n    NONE = 4\n    SHAPE = 5\n    I1 = 21\n    I8 = 22\n    I16 = 23\n    I32 = 24\n    I64 = 25\n    F32 = 26\n    INDEX = 27\n    AG_UNDEFINED_VAL = 100\n    AG_BUILTIN_FUNC = 101\n    TF_RAW_OP = 102\n    TF_REGION = 103\n    TF_TENSOR_SHAPE_FUNC = 104\n    TF_TENSOR_SHAPE_LIST = 105\n    PY_BUILTIN_FUNC = 200\n    TFR_BUILTIN_FUNC = 201\n\n    def __getattribute__(self, name):\n        if name == 'shape' and object.__getattribute__(self, 'value') == 1:\n            return TFRTypes.SHAPE\n        if name == 'as_list' and object.__getattribute__(self, 'value') == 5:\n            return TFRTypes.TF_TENSOR_SHAPE_FUNC\n        return object.__getattribute__(self, name)\n\n    def __str__(self):\n        if self.value < 4:\n            return '!tfr.' + self.name.lower()\n        elif self.value < 10:\n            return '!shape.' + self.name.lower()\n        else:\n            return self.name.lower()",
    "docstring": "All the supported types. 1-3: tfr types 4-99: mlir built-in types 100-199: TF related translator internal types 200- : Python related translator internal types",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\tfr\\python\\tfr_gen.py",
    "ast_data": "ClassDef name:TFRTypes Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign FunctionDef name:__getattribute__ arg:self arg:name arguments arg arg If BoolOp Compare Compare Call Return return:yes If BoolOp Compare Compare Call Return return:yes Return return:yes Call FunctionDef name:__str__ arg:self arguments arg If Compare Return return:yes Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_dump",
    "source_code": "def _maybe_dump(self, step):\n    if not (step in self._dump_steps or self._dump_next_step):\n        return\n    if self._debug:\n        sys.stderr.write('debug: dumping file at step: %d\\n' % step)\n    gfile.MakeDirs(self._profiler_dir)\n    filename = os.path.join(compat.as_bytes(self._profiler_dir), compat.as_bytes('profile_%d' % step))\n    self.profiler._write_profile(filename)",
    "docstring": "Maybe dump the profile file.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\profile_context.py",
    "ast_data": "FunctionDef name:_maybe_dump arg:self arg:step arguments arg arg If BoolOp Compare Return return:no If Call Call Assign Call Call Call Call"
  },
  {
    "library": "cherrypy",
    "name": "get_dict_collection",
    "source_code": "def get_dict_collection(self, v, formatting):\n    headers = []\n    vals = v.values()\n    for record in vals:\n        for k3 in record:\n            format = formatting.get(k3, missing)\n            if format is None:\n                continue\n            if k3 not in headers:\n                headers.append(k3)\n    headers.sort()\n    subrows = []\n    for k2, record in sorted(v.items()):\n        subrow = [k2]\n        for k3 in headers:\n            v3 = record.get(k3, '')\n            format = formatting.get(k3, missing)\n            if format is None:\n                continue\n            if hasattr(format, '__call__'):\n                v3 = format(v3)\n            elif format is not missing:\n                v3 = format % v3\n            subrow.append(v3)\n        subrows.append(subrow)\n    return (headers, subrows)",
    "docstring": "Return ([headers], [rows]) for the given collection.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\cpstats.py",
    "ast_data": "FunctionDef name:get_dict_collection arg:self arg:v arg:formatting arguments arg arg arg Assign Assign Call For For Assign Call If Compare If Compare Call Call Assign For Call Call Assign For Assign Call Assign Call If Compare If Call Assign Call If Compare Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "debug_op",
    "source_code": "@property\ndef debug_op(self):\n    return self._debug_op",
    "docstring": "Name of the debug op. Returns: () debug op name (e.g., ).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:debug_op arg:self arguments arg Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "get_key",
    "source_code": "def get_key(self, uri: URI) -> ConnectionKeyT:\n    return (uri.scheme, uri.host, uri.port)",
    "docstring": "Arguments: uri - URI obtained directly from request URL",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\http2\\agent.py",
    "ast_data": "FunctionDef name:get_key arg:self arg:uri arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "read_execution",
    "source_code": "def read_execution(self, execution_digest):\n    debug_event = self._reader.read_execution_event(execution_digest.locator)\n    return _execution_from_debug_event_proto(debug_event, execution_digest.locator)",
    "docstring": "Read a detailed Execution object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py",
    "ast_data": "FunctionDef name:read_execution arg:self arg:execution_digest arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "reversed",
    "source_code": "def reversed(self, name=None):\n    if name is None:\n        name = self.name + '_r'\n    colors_r = list(reversed(self.colors))\n    new_cmap = ListedColormap(colors_r, name=name)\n    new_cmap._rgba_over = self._rgba_under\n    new_cmap._rgba_under = self._rgba_over\n    new_cmap._rgba_bad = self._rgba_bad\n    return new_cmap",
    "docstring": "Return a reversed instance of the Colormap. Parameters ---------- name : str, optional The name for the reversed colormap. If None, the name is set to ``. Returns ------- ListedColormap A reversed instance of the colormap.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:reversed arg:self arg:name arguments arg arg If Compare Assign Assign Call Call Assign Call Assign Assign Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "chebvander",
    "source_code": "def chebvander(x, deg):\n    ideg = pu._as_int(deg, 'deg')\n    if ideg < 0:\n        raise ValueError('deg must be non-negative')\n    x = np.array(x, copy=None, ndmin=1) + 0.0\n    dims = (ideg + 1,) + x.shape\n    dtyp = x.dtype\n    v = np.empty(dims, dtype=dtyp)\n    v[0] = x * 0 + 1\n    if ideg > 0:\n        x2 = 2 * x\n        v[1] = x\n        for i in range(2, ideg + 1):\n            v[i] = v[i - 1] * x2 - v[i - 2]\n    return np.moveaxis(v, 0, -1)",
    "docstring": "Pseudo-Vandermonde matrix of given degree. Returns the pseudo-Vandermonde matrix of degree and sample points . The pseudo-Vandermonde matrix is defined by .. math:: V[..., i] = T_i(x), where `VxcVxx`.",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\chebyshev.py",
    "ast_data": "FunctionDef name:chebvander arg:x arg:deg arguments arg arg Assign Call If Compare Raise Call Assign Call Assign Assign Assign Call Assign If Compare Assign Assign For Call Assign Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "DISKFeatures",
    "source_code": "@dataclass\nclass DISKFeatures:\n    keypoints: Tensor\n    descriptors: Tensor\n    detection_scores: Tensor\n\n    @property\n    def n(self) -> int:\n        return self.keypoints.shape[0]\n\n    @property\n    def device(self) -> Device:\n        return self.keypoints.device\n\n    @property\n    def x(self) -> Tensor:\n        return self.keypoints[:, 0]\n\n    @property\n    def y(self) -> Tensor:\n        return self.keypoints[:, 1]\n\n    def to(self, *args: Any, **kwargs: Any) -> DISKFeatures:\n        return DISKFeatures(self.keypoints.to(*args, **kwargs), self.descriptors.to(*args, **kwargs), self.detection_scores.to(*args, **kwargs))",
    "docstring": "A data structure holding DISK keypoints, descriptors and detection scores for an image. Since DISK detects a varying number of keypoints per image, is not batched. Args: keypoints: Tensor of shape :math:, where :math: is the number of keypoints. descriptors: Tensor of shape :math:, where :math: is the descriptor dimension. detection_scores: Tensor of shape :math: where the detection score can be interpreted as the log-probability of keeping a keypoint after it has been proposed (see the paper section *Method → Feature distribution* for details).",
    "type": "class",
    "file_path": "kornia\\kornia\\feature\\disk\\structs.py",
    "ast_data": "ClassDef name:DISKFeatures FunctionDef name:n arg:self arguments arg Return return:yes FunctionDef name:device arg:self arguments arg Return return:yes FunctionDef name:x arg:self arguments arg Return return:yes FunctionDef name:y arg:self arguments arg Return return:yes FunctionDef name:to arg:self arguments arg arg arg Return return:yes Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "find_nodes",
    "source_code": "@compatibility(is_backward_compatible=False)\ndef find_nodes(self, *, op: str, target: Optional['Target']=None, sort: bool=True):\n    node_list = self._find_nodes_lookup_table.find_nodes(op=op, target=target)\n    if sort:\n        return sorted(node_list)\n    return node_list",
    "docstring": "Allows for fast query of nodes Args: op (str): the name of the operation target (Optional[Target]): the target of the node. For call_function, the target is required. For other ops, the target is optional. sort (bool): whether to return nodes in the order they appear on on the graph. Returns: Iteratable of nodes with the requested op and target.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\graph.py",
    "ast_data": "FunctionDef name:find_nodes arg:self arguments arg arg arg arg Assign Call If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "update",
    "source_code": "def update(self, j, est):\n    do_oob = est.subsample < 1\n    i = j - self.begin_at_stage\n    if (i + 1) % self.verbose_mod == 0:\n        oob_impr = est.oob_improvement_[j] if do_oob else 0\n        remaining_time = (est.n_estimators - (j + 1)) * (time() - self.start_time) / float(i + 1)\n        if remaining_time > 60:\n            remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)\n        else:\n            remaining_time = '{0:.2f}s'.format(remaining_time)\n        print(self.verbose_fmt.format(iter=j + 1, train_score=est.train_score_[j], oob_impr=oob_impr, remaining_time=remaining_time))\n        if self.verbose == 1 and (i + 1) // (self.verbose_mod * 10) > 0:\n            self.verbose_mod *= 10",
    "docstring": "Update reporter with new iteration. Parameters ---------- j : int The new iteration. est : Estimator The estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_gb.py",
    "ast_data": "FunctionDef name:update arg:self arg:j arg:est arguments arg arg arg Assign Compare Assign If Compare Assign Assign Call Call If Compare Assign Call Assign Call Call Call If BoolOp Compare Compare"
  },
  {
    "library": "tensorflow",
    "name": "TPUStrategyV1",
    "source_code": "@tf_export.tf_export(v1=['distribute.experimental.TPUStrategy'])\nclass TPUStrategyV1(distribute_lib.StrategyV1):\n\n    def __init__(self, tpu_cluster_resolver=None, steps_per_run=None, device_assignment=None):\n        super().__init__(TPUExtended(self, tpu_cluster_resolver, steps_per_run, device_assignment))\n        distribute_lib.distribution_strategy_gauge.get_cell('V1').set('TPUStrategy')\n        distribute_lib.distribution_strategy_replica_gauge.get_cell('num_workers').set(self.extended.num_hosts)\n        distribute_lib.distribution_strategy_replica_gauge.get_cell('num_replicas_per_worker').set(self.extended.num_replicas_per_host)\n        self._enable_packed_variable_in_eager_mode = True\n\n    @property\n    def steps_per_run(self):\n        return self._extended.steps_per_run\n\n    def run(self, fn, args=(), kwargs=None, options=None):\n        validate_run_function(fn)\n        fn, args, kwargs = _maybe_partial_apply_variables(fn, args, kwargs)\n        fn = autograph.tf_convert(fn, autograph_ctx.control_status_ctx())\n        options = options or distribute_lib.RunOptions()\n        return self.extended.tpu_run(fn, args, kwargs, options)",
    "docstring": "TPU distribution strategy implementation.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_strategy.py",
    "ast_data": "ClassDef name:TPUStrategyV1 FunctionDef name:__init__ arg:self arg:tpu_cluster_resolver arg:steps_per_run arg:device_assignment arguments arg arg arg arg Call Call Call Call Call Call Call Call Call Assign FunctionDef name:steps_per_run arg:self arguments arg Return return:yes FunctionDef name:run arg:self arg:fn arg:args arg:kwargs arg:options arguments arg arg arg arg arg Call Assign Call Assign Call Call Assign BoolOp Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_ensure_c_contiguous",
    "source_code": "def _ensure_c_contiguous(self):\n    if not self.t.flags.c_contiguous:\n        self.t = self.t.copy()\n    if not self.c.flags.c_contiguous:\n        self.c = self.c.copy()",
    "docstring": "c and t may be modified by the user. The Cython code expects that they are C contiguous.",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_bsplines.py",
    "ast_data": "FunctionDef name:_ensure_c_contiguous arg:self arguments arg If Assign Call If Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_str_id",
    "source_code": "def _str_id(s, str_to_id):\n    num = str_to_id.get(s, None)\n    if num is None:\n        num = len(str_to_id)\n        str_to_id[s] = num\n    return num",
    "docstring": "Maps string to id.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\tfprof_logger.py",
    "ast_data": "FunctionDef name:_str_id arg:s arg:str_to_id arguments arg arg Assign Call If Compare Assign Call Assign Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "_should_unwrap",
    "source_code": "def _should_unwrap(subject: _SignatureType) -> bool:\n    __globals__ = getglobals(subject)\n    return __globals__.get('__name__') == 'contextlib' and __globals__.get('__file__') == contextlib.__file__",
    "docstring": "Check the function should be unwrapped on getting signature.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\inspect.py",
    "ast_data": "FunctionDef name:_should_unwrap arg:subject arguments arg Assign Call Return return:yes BoolOp Compare Call Compare Call"
  },
  {
    "library": "scrapy",
    "name": "handle_spider_error",
    "source_code": "def handle_spider_error(self, _failure: Failure, request: Request, response: Response | Failure, spider: Spider | None=None) -> None:\n    if spider is not None:\n        warnings.warn(\"Passing a 'spider' argument to Scraper.handle_spider_error() is deprecated.\", category=ScrapyDeprecationWarning, stacklevel=2)\n    assert self.crawler.spider\n    exc = _failure.value\n    if isinstance(exc, CloseSpider):\n        assert self.crawler.engine is not None\n        self.crawler.engine.close_spider(self.crawler.spider, exc.reason or 'cancelled')\n        return\n    logkws = self.logformatter.spider_error(_failure, request, response, self.crawler.spider)\n    logger.log(*logformatter_adapter(logkws), exc_info=failure_to_exc_info(_failure), extra={'spider': self.crawler.spider})\n    self.signals.send_catch_log(signal=signals.spider_error, failure=_failure, response=response, spider=self.crawler.spider)\n    assert self.crawler.stats\n    self.crawler.stats.inc_value('spider_exceptions/count', spider=self.crawler.spider)\n    self.crawler.stats.inc_value(f'spider_exceptions/{_failure.value.__class__.__name__}', spider=self.crawler.spider)",
    "docstring": "Handle an exception raised by a spider callback or errback.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\scraper.py",
    "ast_data": "FunctionDef name:handle_spider_error arg:self arg:_failure arg:request arg:response arg:spider arguments arg arg arg arg arg If Compare Call Assign If Call Compare Call BoolOp Return return:no Assign Call Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "std",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef std(x, axis=None, keepdims=False):\n    if x.dtype.base_dtype == dtypes_module.bool:\n        x = math_ops.cast(x, floatx())\n    return math_ops.reduce_std(x, axis=axis, keepdims=keepdims)",
    "docstring": "Standard deviation of a tensor, alongside the specified axis. It is an alias to . Args: x: A tensor or variable. It should have numerical dtypes. Boolean type inputs will be converted to float. axis: An integer, the axis to compute the standard deviation. If (the default), reduces all dimensions. Must be in the range . keepdims: A boolean, whether to keep the dimensions or not. If is , the rank of the tensor is reduced by 1. If is , the reduced dimension is retained with length 1. Returns: A tensor with the standard deviation of elements of with same dtype. Boolean type input will be converted to float.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:std arg:x arg:axis arg:keepdims arguments arg arg arg If Compare Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "readlines",
    "source_code": "def readlines(self):\n    self._preread_check()\n    lines = []\n    while True:\n        s = self.readline()\n        if not s:\n            break\n        lines.append(s)\n    return lines",
    "docstring": "Returns all lines from the file in a list.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py",
    "ast_data": "FunctionDef name:readlines arg:self arguments arg Call Assign While Assign Call If Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_stringify_tree",
    "source_code": "def _stringify_tree(self, str_list: list[str], preamble: str='', dir_ptr: str='─── '):\n    space = '    '\n    branch = '│   '\n    tee = '├── '\n    last = '└── '\n    str_list.append(f'{preamble}{dir_ptr}{self.name}\\n')\n    if dir_ptr == tee:\n        preamble = preamble + branch\n    else:\n        preamble = preamble + space\n    file_keys: list[str] = []\n    dir_keys: list[str] = []\n    for key, val in self.children.items():\n        if val.is_dir:\n            dir_keys.append(key)\n        else:\n            file_keys.append(key)\n    for index, key in enumerate(sorted(dir_keys)):\n        if index == len(dir_keys) - 1 and len(file_keys) == 0:\n            self.children[key]._stringify_tree(str_list, preamble, last)\n        else:\n            self.children[key]._stringify_tree(str_list, preamble, tee)\n    for index, file in enumerate(sorted(file_keys)):\n        pointer = last if index == len(file_keys) - 1 else tee\n        str_list.append(f'{preamble}{pointer}{file}\\n')",
    "docstring": "Recursive method to generate print-friendly version of a Directory.",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\file_structure_representation.py",
    "ast_data": "FunctionDef name:_stringify_tree arg:self arg:str_list arg:preamble arg:dir_ptr arguments arg arg arg arg Assign Assign Assign Assign Call If Compare Assign Assign For Call If Call Call For Call Call If BoolOp Compare Call Compare Call Call Call For Call Call Assign Compare Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X, **params):\n    _raise_for_params(params, self, 'predict')\n    predicted_probabilitiy = self.predict_proba(X, **params)\n    return self.classes_.take(np.argmax(predicted_probabilitiy, axis=1), axis=0)",
    "docstring": "Predict class for X. The predicted class of an input sample is computed as the class with the highest mean predicted probability. If base estimators do not implement a `predict_probapredictsklearn.set_config(enable_metadata_routing=True)Metadata Routing User Guide ` for more details. Returns ------- y : ndarray of shape (n_samples,) The predicted classes.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_bagging.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg arg Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "cherrypy",
    "name": "send_response",
    "source_code": "def send_response(req, status, headers, body, stream=False):\n    req.status = int(status[:3])\n    req.content_type = 'text/plain'\n    for header, value in headers:\n        if header.lower() == 'content-type':\n            req.content_type = value\n            continue\n        req.headers_out.add(header, value)\n    if stream:\n        req.flush()\n    for seg in always_iterable(body):\n        req.write(seg)",
    "docstring": "Send the HTTP response to the client.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\_cpmodpy.py",
    "ast_data": "FunctionDef name:send_response arg:req arg:status arg:headers arg:body arg:stream arguments arg arg arg arg arg Assign Call Assign For If Compare Call Assign Call If Call For Call Call"
  },
  {
    "library": "django",
    "name": "DateTimeRangeContains",
    "source_code": "class DateTimeRangeContains(PostgresOperatorLookup):\n    lookup_name = 'contains'\n    postgres_operator = RangeOperators.CONTAINS\n\n    def process_rhs(self, compiler, connection):\n        if isinstance(self.rhs, datetime.date):\n            value = models.Value(self.rhs)\n            self.rhs = value.resolve_expression(compiler.query)\n        return super().process_rhs(compiler, connection)\n\n    def as_postgresql(self, compiler, connection):\n        sql, params = super().as_postgresql(compiler, connection)\n        cast_sql = ''\n        if isinstance(self.rhs, models.Expression) and self.rhs._output_field_or_none and (not isinstance(self.rhs._output_field_or_none, self.lhs.output_field.__class__)):\n            cast_internal_type = self.lhs.output_field.base_field.get_internal_type()\n            cast_sql = '::{}'.format(connection.data_types.get(cast_internal_type))\n        return ('%s%s' % (sql, cast_sql), params)",
    "docstring": "Lookup for Date/DateTimeRange containment to cast the rhs to the correct type.",
    "type": "class",
    "file_path": "django\\django\\contrib\\postgres\\fields\\ranges.py",
    "ast_data": "ClassDef name:DateTimeRangeContains Assign Assign FunctionDef name:process_rhs arg:self arg:compiler arg:connection arguments arg arg arg If Call Assign Call Assign Call Return return:yes Call Call FunctionDef name:as_postgresql arg:self arg:compiler arg:connection arguments arg arg arg Assign Call Call Assign If BoolOp Call Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "anonymous_name",
    "source_code": "def anonymous_name():\n    return 'cd2c89b7-88b7-44c8-ad83-06c2a9158347'",
    "docstring": "Returns the anonymous shared name. In eager mode we create anonymous resources to avoid spurious sharing issues. The runtime generates a unique name on our behalf when the reserved anonymous shared name is used as a shared name. Returns: The anonymous shared name.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:anonymous_name arguments Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_scope_vals",
    "source_code": "def _scope_vals(self, vals):\n    if isinstance(vals, (list, tuple)):\n        return vals\n    elif isinstance(vals, dict):\n        return vals.values()\n    else:\n        return [vals]",
    "docstring": "Return a list of values to pass to . Args: vals: A tensor, a list or tuple of tensors, or a dictionary. Returns: The values in vals as a list.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:_scope_vals arg:self arg:vals arguments arg arg If Call Return return:yes If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_div",
    "source_code": "def _div(mul_f, c1, c2):\n    [c1, c2] = as_series([c1, c2])\n    if c2[-1] == 0:\n        raise ZeroDivisionError\n    lc1 = len(c1)\n    lc2 = len(c2)\n    if lc1 < lc2:\n        return (c1[:1] * 0, c1)\n    elif lc2 == 1:\n        return (c1 / c2[-1], c1[:1] * 0)\n    else:\n        quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)\n        rem = c1\n        for i in range(lc1 - lc2, -1, -1):\n            p = mul_f([0] * i + [1], c2)\n            q = rem[-1] / p[-1]\n            rem = rem[:-1] - q * p[:-1]\n            quo[i] = q\n        return (quo, trimseq(rem))",
    "docstring": "Helper function used to implement the `` functions for more detail",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\polyutils.py",
    "ast_data": "FunctionDef name:_div arg:mul_f arg:c1 arg:c2 arguments arg arg arg Assign Call If Compare Raise Assign Call Assign Call If Compare Return return:yes If Compare Return return:yes Assign Call Assign For Call Assign Call Assign Assign Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "should_invoke_op_callbacks",
    "source_code": "def should_invoke_op_callbacks():\n    ctx = context.context()\n    return ctx.op_callbacks and (not ctx.invoking_op_callbacks)",
    "docstring": "Determine if op callbacks are present and should be invoked. Returns: A thread-local result (boolean) indicating whether any op callback(s) exist and should be invoked.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\op_callbacks.py",
    "ast_data": "FunctionDef name:should_invoke_op_callbacks arguments Assign Call Return return:yes BoolOp"
  },
  {
    "library": "pandas",
    "name": "_from_sequence",
    "source_code": "@classmethod\ndef _from_sequence(cls, scalars, *, dtype: Dtype | None=None, copy: bool=False) -> Self:\n    pa_type = to_pyarrow_type(dtype)\n    pa_array = cls._box_pa_array(scalars, pa_type=pa_type, copy=copy)\n    arr = cls(pa_array)\n    return arr",
    "docstring": "Construct a new ExtensionArray from a sequence of scalars.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\arrow\\array.py",
    "ast_data": "FunctionDef name:_from_sequence arg:cls arg:scalars arguments arg arg arg arg Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "CurveFilledB",
    "source_code": "@_register_style(_style_list, name='-|>')\nclass CurveFilledB(_Curve):\n    arrow = '-|>'",
    "docstring": "An arrow with filled triangle head at the end.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "ClassDef name:CurveFilledB Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "GraphicsContextTemplate",
    "source_code": "class GraphicsContextTemplate(GraphicsContextBase):\n    pass",
    "docstring": "The graphics context provides the color, line styles, etc. See the cairo and postscript backends for examples of mapping the graphics context attributes (cap styles, join styles, line widths, colors) to a particular backend. In cairo this is done by wrapping a cairo.Context object and forwarding the appropriate calls to it using a dictionary mapping styles to gdk constants. In Postscript, all the work is done by the renderer, mapping line styles to postscript calls. If it's more appropriate to do the mapping at the renderer level (as in the postscript backend), you don't need to override any of the GC methods. If it's more appropriate to wrap an instance (as in the cairo backend) and do the mapping here, you'll need to override several of the setter methods. The base GraphicsContext stores colors as an RGB tuple on the unit interval, e.g., (0.5, 0.0, 1.0). You may need to map this to colors appropriate for your backend.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_template.py",
    "ast_data": "ClassDef name:GraphicsContextTemplate"
  },
  {
    "library": "pytorch",
    "name": "graph_break",
    "source_code": "def graph_break(self, msg='ComptimeContext.graph_break'):\n    unimplemented_v2(gb_type='ComptimeContext graph break', context=msg, explanation=f'Manually triggered ComptimeContext graph break with message {msg}.', hints=[])",
    "docstring": "Manually trigger a graph break",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\comptime.py",
    "ast_data": "FunctionDef name:graph_break arg:self arg:msg arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "to_complex128",
    "source_code": "@tf_export(v1=['to_complex128'])\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\n@deprecation.deprecated(date=None, instructions='Use `tf.cast` instead.')\ndef to_complex128(x, name='ToComplex128'):\n    return cast(x, dtypes.complex128, name=name)",
    "docstring": "Casts a tensor to type . Args: x: A or or . name: A name for the operation (optional). Returns: A or or with same shape as with type . Raises: TypeError: If cannot be cast to the . @compatibility(TF2) This name was deprecated and removed in TF2, but has an exact replacement . There are no further issues with eager execution or tf.function. Before: >>> tf.compat.v1.to_complex128(tf.constant(1. + 2.j, dtype=tf.complex64)) After: >>> tf.cast(tf.constant(1. + 2.j, dtype=tf.complex64), tf.complex128) @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:to_complex128 arg:x arg:name arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit_transform",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit_transform(self, X, y=None):\n    U, S, _, X, x_is_centered, xp = self._fit(X)\n    if U is not None:\n        U = U[:, :self.n_components_]\n        if self.whiten:\n            U *= sqrt(X.shape[0] - 1)\n        else:\n            U *= S[:self.n_components_]\n        return U\n    else:\n        return self._transform(X, xp, x_is_centered=x_is_centered)",
    "docstring": "Fit the model with X and apply the dimensionality reduction on X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. y : Ignored Ignored. Returns ------- X_new : ndarray of shape (n_samples, n_components) Transformed values. Notes ----- This method returns a Fortran-ordered array. To convert it to a C-ordered array, use 'np.ascontiguousarray'.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_pca.py",
    "ast_data": "FunctionDef name:fit_transform arg:self arg:X arg:y arguments arg arg arg Assign Call If Compare Assign If Call Return return:yes Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_row_partitions_identical",
    "source_code": "def _row_partitions_identical(shape_a, shape_b):\n    return shape_a.num_row_partitions == shape_b.num_row_partitions and all((a is b for a, b in zip(shape_a.row_partitions, shape_b.row_partitions)))",
    "docstring": "Returns True iff all row_partitions in shapes are identical.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:_row_partitions_identical arg:shape_a arg:shape_b arguments arg arg Return return:yes BoolOp Compare Call Compare Call"
  },
  {
    "library": "scikit-learn",
    "name": "_serialize",
    "source_code": "def _serialize(self):\n    result = list()\n    for route in self._routes:\n        result.append({'caller': route.caller, 'callee': route.callee})\n    return result",
    "docstring": "Serialize the object. Returns ------- obj : list A serialized version of the instance in the form of a list.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py",
    "ast_data": "FunctionDef name:_serialize arg:self arguments arg Assign Call For Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "compute_md5",
    "source_code": "def compute_md5(idirs):\n    return _compute_hash(idirs, hashlib.md5)",
    "docstring": "Compute md5 hash of files in idirs. Parameters ---------- idirs : directory path Directory containing files to be hashed.",
    "type": "function",
    "file_path": "numpy\\pavement.py",
    "ast_data": "FunctionDef name:compute_md5 arg:idirs arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "highlight_max",
    "source_code": "@Substitution(subset=subset_args, color=coloring_args.format(default='yellow'), props=properties_args)\ndef highlight_max(self, subset: Subset | None=None, color: str='yellow', axis: Axis | None=0, props: str | None=None) -> Styler:\n    if props is None:\n        props = f'background-color: {color};'\n    return self.apply(partial(_highlight_value, op='max'), axis=axis, subset=subset, props=props)",
    "docstring": "Highlight the maximum with a style. Parameters ---------- %(subset)s %(color)s axis : {0 or 'index', 1 or 'columns', None}, default 0 Apply to each column (`Table Visualization `_ for more examples.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\style.py",
    "ast_data": "FunctionDef name:highlight_max arg:self arg:subset arg:color arg:axis arg:props arguments arg arg arg arg arg If Compare Assign Return return:yes Call Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_cluster_to_bisect",
    "source_code": "def get_cluster_to_bisect(self):\n    max_score = None\n    for cluster_leaf in self.iter_leaves():\n        if max_score is None or cluster_leaf.score > max_score:\n            max_score = cluster_leaf.score\n            best_cluster_leaf = cluster_leaf\n    return best_cluster_leaf",
    "docstring": "Return the cluster node to bisect next. It's based on the score of the cluster, which can be either the number of data points assigned to that cluster or the inertia of that cluster (see for details).",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_bisect_k_means.py",
    "ast_data": "FunctionDef name:get_cluster_to_bisect arg:self arguments arg Assign For Call If BoolOp Compare Compare Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_find_root_edge_or_node",
    "source_code": "def _find_root_edge_or_node(edge_or_node: EdgeOrNode, shared_with_map: dict[EdgeOrNode, EdgeOrNode]) -> EdgeOrNode:\n    parent = shared_with_map[edge_or_node]\n    if parent == edge_or_node:\n        return edge_or_node\n    root = _find_root_edge_or_node(parent, shared_with_map)\n    shared_with_map[edge_or_node] = root\n    return root",
    "docstring": "Find the root node for the sharing tree Args: edge_or_node: edge/node that we want to find the root shared_with_map: each edge/node points to the parent, the root node will points to itself Returns: root edge/node",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\prepare.py",
    "ast_data": "FunctionDef name:_find_root_edge_or_node arg:edge_or_node arg:shared_with_map arguments arg arg Assign If Compare Return return:yes Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "expand_dims",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef expand_dims(x, axis=-1):\n    return array_ops.expand_dims(x, axis)",
    "docstring": "Adds a 1-sized dimension at index \"axis\". Args: x: A tensor or variable. axis: Position where to add a new axis. Returns: A tensor with expanded dimensions.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:expand_dims arg:x arg:axis arguments arg arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "Op",
    "source_code": "class Op(Enum):\n    INTEGER = 10\n    REAL = 12\n    COMPLEX = 15\n    STRING = 20\n    ARRAY = 30\n    SYMBOL = 40\n    TERNARY = 100\n    APPLY = 200\n    INDEXING = 210\n    CONCAT = 220\n    RELATIONAL = 300\n    TERMS = 1000\n    FACTORS = 2000\n    REF = 3000\n    DEREF = 3001",
    "docstring": "Used as Expr op attribute.",
    "type": "class",
    "file_path": "numpy\\numpy\\f2py\\symbolic.py",
    "ast_data": "ClassDef name:Op Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "authlib",
    "name": "resolve_client_key",
    "source_code": "def resolve_client_key(self, client, headers, payload):\n    raise NotImplementedError()",
    "docstring": "Resolve client key to decode assertion data. Developers MUST implement this method in subclass. For instance, there is a \"jwks\" column on client table, e.g.:: def resolve_client_key(self, client, headers, payload): # from authlib.jose import JsonWebKey key_set = JsonWebKey.import_key_set(client.jwks) return key_set.find_by_kid(headers[\"kid\"]) :param client: instance of OAuth client model :param headers: headers part of the JWT :param payload: payload part of the JWT :return: `` instance",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc7523\\jwt_bearer.py",
    "ast_data": "FunctionDef name:resolve_client_key arg:self arg:client arg:headers arg:payload arguments arg arg arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "UnsupportedOperatorError",
    "source_code": "class UnsupportedOperatorError(OnnxExporterError):\n\n    def __init__(self, name: str, version: int, supported_version: int | None):\n        if supported_version is not None:\n            msg = f\"Exporting the operator '{name}' to ONNX opset version {version} is not supported. Support for this operator was added in version {supported_version}, try exporting with this version\"\n        elif name.startswith(('aten::', 'prim::', 'quantized::')):\n            msg = f\"Exporting the operator '{name}' to ONNX opset version {version} is not supported\"\n        else:\n            msg = 'ONNX export failed on an operator with unrecognized namespace {op_name}. If you are trying to export a custom operator, make sure you registered it with the right domain and version.'\n        super().__init__(msg)",
    "docstring": "Raised when an operator is unsupported by the exporter.",
    "type": "class",
    "file_path": "pytorch\\torch\\onnx\\errors.py",
    "ast_data": "ClassDef name:UnsupportedOperatorError FunctionDef name:__init__ arg:self arg:name arg:version arg:supported_version arguments arg arg arg arg If Compare Assign If Call Assign Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_entrypoint_name",
    "source_code": "def _get_entrypoint_name(entrypoint: Union[Callable, str, None], args: list[Any]) -> str:\n    if isinstance(entrypoint, Callable):\n        return entrypoint.__name__\n    elif isinstance(entrypoint, str):\n        if entrypoint == sys.executable:\n            return next((arg for arg in args if arg[0] != '-'), '')\n        else:\n            return entrypoint\n    else:\n        return ''",
    "docstring": "Retrieve entrypoint name with the rule: 1. If entrypoint is a function, use `` value. 3. Otherwise, return empty string.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\launcher\\api.py",
    "ast_data": "FunctionDef name:_get_entrypoint_name arg:entrypoint arg:args arguments arg arg If Call Return return:yes If Call If Compare Return return:yes Call Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_IgnoreErrorsDataset",
    "source_code": "class _IgnoreErrorsDataset(dataset_ops.UnaryUnchangedStructureDataset):\n\n    def __init__(self, input_dataset, log_warning, name=None):\n        self._input_dataset = input_dataset\n        self._name = name\n        variant_tensor = gen_experimental_dataset_ops.ignore_errors_dataset(self._input_dataset._variant_tensor, log_warning=log_warning, **self._flat_structure)\n        super().__init__(input_dataset, variant_tensor)",
    "docstring": "A that drops erroneous elements from its input.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\ignore_errors_op.py",
    "ast_data": "ClassDef name:_IgnoreErrorsDataset FunctionDef name:__init__ arg:self arg:input_dataset arg:log_warning arg:name arguments arg arg arg arg Assign Assign Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "DonatedBuffer",
    "source_code": "class DonatedBuffer(InputBuffer):\n    pass",
    "docstring": "Represents a donated buffer which is a saved tensor that is not alias to any fwd inputs, fwd user outputs, and bwd outputs. We generally cannot inplace reuse the input tensor memory during backward since it might be used in another function. However, donated buffer can be inplace reused during backward to save memory.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\ir.py",
    "ast_data": "ClassDef name:DonatedBuffer"
  },
  {
    "library": "sphinx",
    "name": "__getstate__",
    "source_code": "def __getstate__(self) -> dict[str, Any]:\n    __dict__ = self.__dict__.copy()\n    __dict__.update(app=None, domains=None, events=None)\n    __dict__.update(_pickled_doctree_cache={}, _write_doc_doctree_cache={})\n    return __dict__",
    "docstring": "Obtains serializable data for pickling.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\environment\\__init__.py",
    "ast_data": "FunctionDef name:__getstate__ arg:self arguments arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "resolve_entity",
    "source_code": "def resolve_entity(node, source, entity):\n    lines, lineno = tf_inspect.getsourcelines(entity)\n    filepath = tf_inspect.getsourcefile(entity)\n    definition_line = lines[0]\n    col_offset = len(definition_line) - len(definition_line.lstrip())\n    resolve(node, source, filepath, lineno, col_offset)",
    "docstring": "Like resolve, but extracts the context information from an entity.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\origin_info.py",
    "ast_data": "FunctionDef name:resolve_entity arg:node arg:source arg:entity arguments arg arg arg Assign Call Assign Call Assign Assign Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "getmembers",
    "source_code": "def getmembers(object, predicate=None):\n    return _inspect.getmembers(object, predicate)",
    "docstring": "TFDecorator-aware replacement for inspect.getmembers.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_inspect.py",
    "ast_data": "FunctionDef name:getmembers arg:object arg:predicate arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "size",
    "source_code": "def size(self, name=None):\n    with ops.name_scope(name, '%s_Size' % self.name, [self.resource_handle]):\n        with ops.colocate_with(self.resource_handle):\n            return gen_lookup_ops.lookup_table_size_v2(self.resource_handle)",
    "docstring": "Compute the number of elements in this table. Args: name: A name for the operation (optional). Returns: A scalar tensor containing the number of elements in this table.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "FunctionDef name:size arg:self arg:name arguments arg arg With Call With Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_bounds",
    "source_code": "def set_bounds(self, *args):\n    if len(args) == 1:\n        l, b, w, h = args[0]\n    else:\n        l, b, w, h = args\n    self._x = l\n    self._y = b\n    self._width = w\n    self._height = h\n    self.stale = True",
    "docstring": "Set the bounds of the rectangle. Call signatures:: set_bounds(left, bottom, width, height) set_bounds((left, bottom, width, height)) Parameters ---------- left, bottom : float The coordinates of the bottom left corner of the rectangle. width, height : float The width/height of the rectangle.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:set_bounds arg:self arguments arg arg If Compare Call Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "pandas",
    "name": "_get_tz",
    "source_code": "def _get_tz(tz: tzinfo) -> str | tzinfo:\n    zone = timezones.get_timezone(tz)\n    return zone",
    "docstring": "for a tz-aware type, return an encoded zone",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:_get_tz arg:tz arguments arg Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_fwd_recv_ops",
    "source_code": "def get_fwd_recv_ops(self, fwd_chunk_id: int) -> list[dist.P2POp]:\n    recv_infos: tuple[InputInfo, ...] = self.args_recv_info[fwd_chunk_id]\n    return self._get_recv_ops(recv_infos)",
    "docstring": "Returns a list of ops that are needed to receive the input arguments for this stage.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py",
    "ast_data": "FunctionDef name:get_fwd_recv_ops arg:self arg:fwd_chunk_id arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "add_import",
    "source_code": "def add_import(self, symbol, source_module_name, source_name, dest_module_name, dest_name):\n    if source_module_name.endswith('python.modules_with_exports'):\n        source_module_name = symbol.__module__\n    import_str = self.format_import(source_module_name, source_name, dest_name)\n    full_api_name = dest_name\n    if dest_module_name:\n        full_api_name = dest_module_name + '.' + full_api_name\n    symbol_id = -1 if not symbol else id(symbol)\n    self._check_already_imported(symbol_id, full_api_name)\n    if not dest_module_name and dest_name.startswith('_'):\n        self._underscore_names_in_root.add(dest_name)\n    priority = 0\n    if symbol:\n        if hasattr(symbol, '__module__'):\n            priority = int(source_module_name == symbol.__module__)\n        if hasattr(symbol, '__name__'):\n            priority += int(source_name == symbol.__name__)\n    self._module_imports[dest_module_name][full_api_name].add((import_str, priority))",
    "docstring": "Adds this import to module_imports. Args: symbol: TensorFlow Python symbol. source_module_name: (string) Module to import from. source_name: (string) Name of the symbol to import. dest_module_name: (string) Module name to add import to. dest_name: (string) Import the symbol using this name. Raises: SymbolExposedTwiceError: Raised when an import with the same dest_name has already been added to dest_module_name.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\api\\generator\\create_python_api.py",
    "ast_data": "FunctionDef name:add_import arg:self arg:symbol arg:source_module_name arg:source_name arg:dest_module_name arg:dest_name arguments arg arg arg arg arg arg If Call Assign Assign Call Assign If Assign Assign Call Call If BoolOp Call Call Assign If If Call Assign Call Compare If Call Call Compare Call"
  },
  {
    "library": "kornia",
    "name": "warp_grid",
    "source_code": "def warp_grid(self, depth_src: Tensor) -> Tensor:\n    if self._dst_proj_src is None or self._pinhole_src is None:\n        raise ValueError('Please, call compute_projection_matrix.')\n    if len(depth_src.shape) != 4:\n        raise ValueError(f'Input depth_src has to be in the shape of Bx1xHxW. Got {depth_src.shape}')\n    batch_size, _, _, _ = depth_src.shape\n    device: torch.device = depth_src.device\n    dtype: torch.dtype = depth_src.dtype\n    pixel_coords: Tensor = self.grid.to(device=device, dtype=dtype).expand(batch_size, -1, -1, -1)\n    cam_coords_src: Tensor = pixel2cam(depth_src, self._pinhole_src.intrinsics_inverse().to(device=device, dtype=dtype), pixel_coords)\n    pixel_coords_src: Tensor = cam2pixel(cam_coords_src, self._dst_proj_src.to(device=device, dtype=dtype))\n    pixel_coords_src_norm: Tensor = normalize_pixel_coordinates(pixel_coords_src, self.height, self.width)\n    return pixel_coords_src_norm",
    "docstring": "Compute a grid for warping a given the depth from the reference pinhole camera. The function has to be called beforehand in order to have precomputed the relative projection matrices encoding the relative pose and the intrinsics between the reference and a non reference camera.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\depth.py",
    "ast_data": "FunctionDef name:warp_grid arg:self arg:depth_src arguments arg arg If BoolOp Compare Compare Raise Call If Compare Call Raise Call Assign Call Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_cycle_colors",
    "source_code": "def _cycle_colors(colors: list[Color], num_colors: int) -> Iterator[Color]:\n    max_colors = max(num_colors, len(colors))\n    yield from itertools.islice(itertools.cycle(colors), max_colors)",
    "docstring": "Cycle colors until achieving max of or length of . Extra colors will be ignored by matplotlib if there are more colors than needed and nothing needs to be done here.",
    "type": "function",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\style.py",
    "ast_data": "FunctionDef name:_cycle_colors arg:colors arg:num_colors arguments arg arg Assign Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_legacy_weights",
    "source_code": "def _legacy_weights(layer):\n    weights = layer.trainable_weights + layer.non_trainable_weights\n    if any((not isinstance(w, variables_module.Variable) for w in weights)):\n        raise NotImplementedError(\"Save or restore weights that is not an instance of `tf.Variable` is not supported in h5, use `save_format='tf'` instead. Got a model or layer {} with weights {}\".format(layer.__class__.__name__, weights))\n    return weights",
    "docstring": "DO NOT USE. For legacy reason, the layer.weights was in the order of [self.trainable_weights + self.non_trainable_weights], and this order was used for preserving the weights in h5 format. The new order of layer.weights are the same as layer.get_weights() which is more intuitive for user. To keep supporting the existing saved h5 file, this method should be used to save/load weights. In future version, we will delete this method and introduce a breaking change for h5 and stay with the new order for weights. Args: layer: a or instance. Returns: A list of variables with the order of trainable_weights, followed by non_trainable_weights.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\hdf5_format.py",
    "ast_data": "FunctionDef name:_legacy_weights arg:layer arguments arg Assign If Call Call Raise Call Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "_get_interpreter_argv",
    "source_code": "@staticmethod\ndef _get_interpreter_argv():\n    return [] if getattr(sys, 'frozen', False) else subprocess._args_from_interpreter_flags()",
    "docstring": "Retrieve current Python interpreter's arguments. Returns empty tuple in case of frozen mode, uses built-in arguments reproduction function otherwise. Frozen mode is possible for the app has been packaged into a binary executable using py2exe. In this case the interpreter's arguments are already built-in into that executable. :seealso: Ref:",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\wspbus.py",
    "ast_data": "FunctionDef name:_get_interpreter_argv arguments Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "add_cell",
    "source_code": "def add_cell(self, height: int, width: int) -> None:\n    self.cell_id += 1\n    for col in range(width):\n        for row in range(height):\n            assert self.cells[self.row + row, self.col + col] == 0\n            self.cells[self.row + row, self.col + col] = self.cell_id",
    "docstring": "Adds a new cell to a table. It will be located at current position: (``).",
    "type": "method",
    "file_path": "sphinx\\sphinx\\writers\\latex.py",
    "ast_data": "FunctionDef name:add_cell arg:self arg:height arg:width arguments arg arg arg For Call For Call Compare Assign"
  },
  {
    "library": "pytorch",
    "name": "_get_sharded_size",
    "source_code": "@staticmethod\ndef _get_sharded_size(tensor: Tensor, rank: int, world_size: int) -> torch.Size:\n    assert len(tensor.shape) == 1, f'{tensor.shape}'\n    unpadded_sharded_tensor, numel_to_pad = FlatParamHandle._get_unpadded_shard(tensor, rank, world_size)\n    unpadded_sharded_size = unpadded_sharded_tensor.size()\n    assert len(unpadded_sharded_size) == 1, f'{unpadded_sharded_size}'\n    return torch.Size([unpadded_sharded_size[0] + numel_to_pad])",
    "docstring": "Return the shape of `` to have 1D shape and ensures that the returned shape is 1D.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py",
    "ast_data": "FunctionDef name:_get_sharded_size arg:tensor arg:rank arg:world_size arguments arg arg arg Compare Call Assign Call Assign Call Compare Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "convert_graphdef_with_arrays",
    "source_code": "@convert_phase(Component.CONVERT_TF_TO_TFLITE_MODEL, SubComponent.CONVERT_GRAPHDEF)\ndef convert_graphdef_with_arrays(input_data, input_arrays_with_shape, output_arrays, control_output_arrays, **kwargs):\n    model_flags = build_model_flags(**kwargs)\n    conversion_flags = build_conversion_flags(**kwargs)\n    quantized_input_stats = kwargs.get('quantized_input_stats', None)\n    for idx, (name, shape) in enumerate(input_arrays_with_shape):\n        input_array = model_flags.input_arrays.add()\n        if _is_quantized_input_stats_required(conversion_flags):\n            if quantized_input_stats:\n                input_array.mean_value, input_array.std_value = quantized_input_stats[idx]\n            else:\n                raise ValueError('The `quantized_input_stats` flag must be defined when either `inference_type` flag or `inference_input_type` flag is set to tf.int8 or tf.uint8.')\n        input_array.name = name\n        input_array.shape.dims.extend(list(map(int, shape)))\n    if output_arrays:\n        for name in output_arrays:\n            model_flags.output_arrays.append(name)\n    if control_output_arrays:\n        for name in control_output_arrays:\n            model_flags.control_output_arrays.append(name)\n    data = convert(model_flags, conversion_flags, input_data.SerializeToString(), debug_info_str=None)\n    return data",
    "docstring": "Convert a frozen GraphDef that can't be loaded in TF. Conversion can be customized by providing arguments that are forwarded to and (see documentation). Args: input_data: Input data (i.e. often ), input_arrays_with_shape: Tuple of strings representing input tensor names and list of integers representing input shapes (e.g., [(\"foo\" : [1, 16, 16, 3])]). Use only when graph cannot be loaded into TensorFlow and when is None. output_arrays: List of output tensors to freeze graph with. Use only when graph cannot be loaded into TensorFlow and when is None. control_output_arrays: Control output node names. This is used when converting a Graph with no output tensors. For example, if the graph's last operation is a Print op, just specify that op's name in this field. This can be used together with the parameter. **kwargs: See and . Returns: The converted data. For example if TFLite was the destination, then this will be a tflite flatbuffer in a bytes array. Raises: Defined in .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\convert.py",
    "ast_data": "FunctionDef name:convert_graphdef_with_arrays arg:input_data arg:input_arrays_with_shape arg:output_arrays arg:control_output_arrays arguments arg arg arg arg arg Assign Call Assign Call Assign Call For Call Assign Call If Call If Assign Raise Call Assign Call Call Call If For Call If For Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "prefer_static_shape",
    "source_code": "def prefer_static_shape(x):\n    return prefer_static_value(array_ops.shape(x))",
    "docstring": "Return static shape of tensor if available, else . Args: x: (already converted). Returns: Numpy array (if static shape is obtainable), else .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\util.py",
    "ast_data": "FunctionDef name:prefer_static_shape arg:x arguments arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "_find_boundary",
    "source_code": "def _find_boundary(self, data):\n    index = data.find(self._boundary)\n    if index < 0:\n        return None\n    else:\n        end = index\n        next = index + len(self._boundary)\n        last = max(0, end - 1)\n        if data[last:last + 1] == b'\\n':\n            end -= 1\n        last = max(0, end - 1)\n        if data[last:last + 1] == b'\\r':\n            end -= 1\n        return (end, next)",
    "docstring": "Find a multipart boundary in data. Should no boundary exist in the data, return None. Otherwise, return a tuple containing the indices of the following: * the end of current encapsulation * the start of the next encapsulation",
    "type": "method",
    "file_path": "django\\django\\http\\multipartparser.py",
    "ast_data": "FunctionDef name:_find_boundary arg:self arg:data arguments arg arg Assign Call If Compare Return return:no Assign Assign Call Assign Call If Compare Assign Call If Compare Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "apply",
    "source_code": "@classmethod\ndef apply(cls, module, name, amount, dim=-1):\n    return super().apply(module, name, amount=amount, dim=dim)",
    "docstring": "Add pruning on the fly and reparametrization of a tensor. Adds the forward pre-hook that enables pruning on the fly and the reparametrization of a tensor in terms of the original tensor and the pruning mask. Args: module (nn.Module): module containing the tensor to prune name (str): parameter name within ``, it represents the absolute number of parameters to prune. dim (int, optional): index of the dim along which we define channels to prune. Default: -1.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\utils\\prune.py",
    "ast_data": "FunctionDef name:apply arg:cls arg:module arg:name arg:amount arg:dim arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_autoscale",
    "source_code": "def _autoscale(A, b, c, x0):\n    m, n = A.shape\n    C = 1\n    R = 1\n    if A.size > 0:\n        R = np.max(np.abs(A), axis=1)\n        if sps.issparse(A):\n            R = R.toarray().flatten()\n        R[R == 0] = 1\n        R = 1 / _round_to_power_of_two(R)\n        A = sps.diags_array(R) @ A if sps.issparse(A) else A * R.reshape(m, 1)\n        b = b * R\n        C = np.max(np.abs(A), axis=0)\n        if sps.issparse(A):\n            C = C.toarray().flatten()\n        C[C == 0] = 1\n        C = 1 / _round_to_power_of_two(C)\n        A = A @ sps.diags_array(C) if sps.issparse(A) else A * C\n        c = c * C\n    b_scale = np.max(np.abs(b)) if b.size > 0 else 1\n    if b_scale == 0:\n        b_scale = 1.0\n    b = b / b_scale\n    if x0 is not None:\n        x0 = x0 / b_scale * (1 / C)\n    return (A, b, c, x0, C, b_scale)",
    "docstring": "Scales the problem according to equilibration from [12]. Also normalizes the right hand side vector by its maximum element.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_linprog_util.py",
    "ast_data": "FunctionDef name:_autoscale arg:A arg:b arg:c arg:x0 arguments arg arg arg arg Assign Assign Assign If Compare Assign Call Call If Call Assign Call Call Assign Compare Assign Call Assign Call Call Call Assign Assign Call Call If Call Assign Call Call Assign Compare Assign Call Assign Call Call Assign Assign Compare Call Call If Compare Assign Assign If Compare Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "is_multi_index",
    "source_code": "@property\ndef is_multi_index(self) -> bool:\n    return isinstance(self.levels, list)",
    "docstring": "the levels attribute is 1 or a list in the case of a multi-index",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:is_multi_index arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_all_kernel_argdefs",
    "source_code": "def get_all_kernel_argdefs(kernels):\n    argdefs_list = [get_kernel_argdefs(kernel) for kernel in kernels]\n    return _get_all_args(argdefs_list)[0]",
    "docstring": "The logic here must match with , except no need to get arg_types here",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\multi_kernel.py",
    "ast_data": "FunctionDef name:get_all_kernel_argdefs arg:kernels arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "realize",
    "source_code": "def realize(self) -> Optional[str]:\n    raise NotImplementedError(f'realize NYI on {type(self)}')",
    "docstring": "If the IRNode refers to data which has not been materialized (e.g., it is a Pointwise/Reduction that could potentially have more compute fused into it), realize the IRNode into physical memory, ending the possibility of fusing into it, but allowing, e.g., multiple users to access the data without having to recompute. Check StorageBox.realize for a particularly notable implementation. TODO(ezyang): I think, in principle, every IRNode should have an implementation of this, and most of the time no-op is OK, but you really do have to audit each IRNode for this, so for now, raise an error if it's not implemented. Note that some code in graph.py will catch this thrown error and suppress it with a warning.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ir.py",
    "ast_data": "FunctionDef name:realize arg:self arguments arg Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "FakeRootModule",
    "source_code": "class FakeRootModule(torch.nn.Module):\n\n    def __init__(self, nn_modules: dict[str, torch.nn.Module]):\n        super().__init__()\n        for k, v in nn_modules.items():\n            setattr(self, k, v)\n\n    def __repr__(self) -> str:\n        return 'FakeRootModule(...)'\n\n    def add_nn_modules(self, nn_modules: dict[str, torch.nn.Module]):\n        for k, v in nn_modules.items():\n            setattr(self, k, v)",
    "docstring": "Trick the constructor of fx.GraphModule",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\output_graph.py",
    "ast_data": "ClassDef name:FakeRootModule FunctionDef name:__init__ arg:self arg:nn_modules arguments arg arg Call Call For Call Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:add_nn_modules arg:self arg:nn_modules arguments arg arg For Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_FresnelCosGrad",
    "source_code": "@ops.RegisterGradient('FresnelCos')\ndef _FresnelCosGrad(op: ops.Operation, grad):\n    x = op.inputs[0]\n    with ops.control_dependencies([grad]):\n        return grad * math_ops.cos(np.pi / 2.0 * math_ops.square(x))",
    "docstring": "Compute gradient of fresnel_cos(x) with respect to its argument.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_FresnelCosGrad arg:op arg:grad arguments arg arg Assign With Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "AlreadyExistsError",
    "source_code": "@tf_export('errors.AlreadyExistsError')\nclass AlreadyExistsError(OpError):\n\n    def __init__(self, node_def, op, message, *args):\n        super(AlreadyExistsError, self).__init__(node_def, op, message, ALREADY_EXISTS, *args)",
    "docstring": "Raised when an entity that we attempted to create already exists. An API raises this this error to avoid overwriting an existing resource, value, etc. Calling a creation API multiple times with the same arguments could raise this error if the creation API is not idempotent. For example, running an operation that saves a file (e.g. ) could potentially raise this exception if an explicit filename for an existing file was passed.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py",
    "ast_data": "ClassDef name:AlreadyExistsError FunctionDef name:__init__ arg:self arg:node_def arg:op arg:message arguments arg arg arg arg arg Call Call Call"
  },
  {
    "library": "numpy",
    "name": "CommandLineParser",
    "source_code": "class CommandLineParser:\n\n    @staticmethod\n    def join(argv):\n        raise NotImplementedError\n\n    @staticmethod\n    def split(cmd):\n        raise NotImplementedError",
    "docstring": "An object that knows how to split and join command-line arguments. It must be true that `join(split(cmd))` may result in the addition or removal of unnecessary escaping.",
    "type": "class",
    "file_path": "numpy\\numpy\\distutils\\_shell_utils.py",
    "ast_data": "ClassDef name:CommandLineParser FunctionDef name:join arg:argv arguments arg Raise FunctionDef name:split arg:cmd arguments arg Raise"
  },
  {
    "library": "django",
    "name": "check_dbl",
    "source_code": "def check_dbl(result, func, cargs):\n    if result != 1:\n        return None\n    return last_arg_byref(cargs)",
    "docstring": "Check the status code and returns the double value passed in by reference.",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\geos\\prototypes\\errcheck.py",
    "ast_data": "FunctionDef name:check_dbl arg:result arg:func arg:cargs arguments arg arg arg If Compare Return return:no Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "encode_data",
    "source_code": "def encode_data(self, data, attributes):\n    current_row = 0\n    for inst in data:\n        if len(inst) != len(attributes):\n            raise BadObject('Instance %d has %d attributes, expected %d' % (current_row, len(inst), len(attributes)))\n        new_data = []\n        for value in inst:\n            if value is None or value == '' or value != value:\n                s = '?'\n            else:\n                s = encode_string(str(value))\n            new_data.append(s)\n        current_row += 1\n        yield ','.join(new_data)",
    "docstring": "(INTERNAL) Encodes a line of data. Data instances follow the csv format, i.e, attribute values are delimited by commas. After converted from csv. :param data: a list of values. :param attributes: a list of attributes. Used to check if data is valid. :return: a string with the encoded data line.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\externals\\_arff.py",
    "ast_data": "FunctionDef name:encode_data arg:self arg:data arg:attributes arguments arg arg arg Assign For If Compare Call Call Raise Call Call Call Assign For If BoolOp Compare Compare Compare Assign Assign Call Call Call Call"
  },
  {
    "library": "django",
    "name": "geom_type",
    "source_code": "@property\ndef geom_type(self):\n    return OGRGeomType(capi.get_fd_geom_type(self._layer._ldefn))",
    "docstring": "Return the OGR Geometry Type for this Feature.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\feature.py",
    "ast_data": "FunctionDef name:geom_type arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "Yuv420ToRgb",
    "source_code": "class Yuv420ToRgb(Module):\n    ONNX_EXPORTABLE = False\n\n    def forward(self, inputy: Tensor, inputuv: Tensor) -> Tensor:\n        return yuv420_to_rgb(inputy, inputuv)",
    "docstring": "Convert an image from YUV to RGB. Width and Height must be evenly divisible by 2. The image data is assumed to be in the range of :math: for luma (Y). The ranges of U and V are :math: and :math:, respectively. YUV formula follows M/PAL values (see _, Table 2, items 2.5 and 2.6). Returns: RGB version of the image. Shape: - imagey: :math: - imageuv: :math: - output: :math: Examples: >>> inputy = torch.rand(2, 1, 4, 6) >>> inputuv = torch.rand(2, 2, 2, 3) >>> rgb = Yuv420ToRgb() >>> output = rgb(inputy, inputuv) # 2x3x4x6",
    "type": "class",
    "file_path": "kornia\\kornia\\color\\yuv.py",
    "ast_data": "ClassDef name:Yuv420ToRgb Assign FunctionDef name:forward arg:self arg:inputy arg:inputuv arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "scotts_factor",
    "source_code": "def scotts_factor(self):\n    return power(self.neff, -1.0 / (self.d + 4))",
    "docstring": "Compute Scott's factor. Returns ------- s : float Scott's factor.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_kde.py",
    "ast_data": "FunctionDef name:scotts_factor arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_json_to_enum",
    "source_code": "@classmethod\ndef _json_to_enum(cls, json_dict, enum_class):\n    if json_dict is None or json_dict.get('name', 'None') == 'None':\n        return None\n    return enum_class[json_dict['name']]",
    "docstring": "Convert JSON dict to enum value. Format: {name: \"EnumName\", value: 1} Args: json_dict: Dictionary representation enum_class: Target enum class Returns: Reconstructed enum value",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\serialization.py",
    "ast_data": "FunctionDef name:_json_to_enum arg:cls arg:json_dict arg:enum_class arguments arg arg arg If BoolOp Compare Compare Call Return return:no Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_set_read_only_resource_inputs_attr",
    "source_code": "def _set_read_only_resource_inputs_attr(op: ops.Operation, branch_graphs):\n    read_only_indices = set(range(len(op.inputs)))\n    for branch_graph in branch_graphs:\n        if not read_only_indices:\n            break\n        branch_read_only_indices = acd.get_read_only_resource_input_indices_graph(branch_graph)\n        read_only_indices = read_only_indices.intersection(branch_read_only_indices)\n    ops.set_int_list_attr(op, acd.READ_ONLY_RESOURCE_INPUTS_ATTR, sorted(read_only_indices))",
    "docstring": "Sets the list of resource inputs which are read-only. This is used by AutomaticControlDependencies. Args: op: While Operation. branch_graphs: List of branch FuncGraphs.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\while_v2.py",
    "ast_data": "FunctionDef name:_set_read_only_resource_inputs_attr arg:op arg:branch_graphs arguments arg arg Assign Call Call Call For If Assign Call Assign Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit_transform",
    "source_code": "def fit_transform(self, raw_documents, y=None):\n    self._check_params()\n    self._tfidf = TfidfTransformer(norm=self.norm, use_idf=self.use_idf, smooth_idf=self.smooth_idf, sublinear_tf=self.sublinear_tf)\n    X = super().fit_transform(raw_documents)\n    self._tfidf.fit(X)\n    return self._tfidf.transform(X, copy=False)",
    "docstring": "Learn vocabulary and idf, return document-term matrix. This is equivalent to fit followed by transform, but more efficiently implemented. Parameters ---------- raw_documents : iterable An iterable which generates either str, unicode or file objects. y : None This parameter is ignored. Returns ------- X : sparse matrix of (n_samples, n_features) Tf-idf-weighted document-term matrix.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_extraction\\text.py",
    "ast_data": "FunctionDef name:fit_transform arg:self arg:raw_documents arg:y arguments arg arg arg Call Assign Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "Zeros",
    "source_code": "class Zeros(Initializer):\n\n    def __call__(self, shape, dtype=None, **kwargs):\n        _validate_kwargs(self.__class__.__name__, kwargs)\n        dtype = _get_dtype(dtype)\n        if not dtype.is_numpy_compatible or dtype == dtypes.string:\n            raise ValueError('Expected numeric or boolean dtype, got %s.' % dtype)\n        if _PARTITION_SHAPE in kwargs:\n            shape = kwargs[_PARTITION_SHAPE]\n        return array_ops.zeros(shape, dtype)",
    "docstring": "Initializer that generates tensors initialized to 0. Also available via the shortcut function . Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.Zeros() >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.Zeros() >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\initializers\\initializers_v2.py",
    "ast_data": "ClassDef name:Zeros FunctionDef name:__call__ arg:self arg:shape arg:dtype arguments arg arg arg arg Call Assign Call If BoolOp Compare Raise Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "from_config",
    "source_code": "@classmethod\ndef from_config(cls, config):\n    config.pop('dtype', None)\n    return cls(**config)",
    "docstring": "Instantiates an initializer from a configuration dictionary. Example: Args: config: A Python dictionary. It will typically be the output of . Returns: An Initializer instance.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops_v2.py",
    "ast_data": "FunctionDef name:from_config arg:cls arg:config arguments arg arg Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "getmodule",
    "source_code": "def getmodule(object):\n    return _inspect.getmodule(object)",
    "docstring": "TFDecorator-aware replacement for inspect.getmodule.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\tf_inspect.py",
    "ast_data": "FunctionDef name:getmodule arg:object arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "minorticks_off",
    "source_code": "def minorticks_off(self):\n    self.xaxis.minorticks_off()\n    self.yaxis.minorticks_off()",
    "docstring": "Remove minor ticks from the Axes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:minorticks_off arg:self arguments arg Call Call"
  },
  {
    "library": "scipy",
    "name": "_qauto",
    "source_code": "def _qauto(func, covar, low, high, rng, error=0.001, limit=10000, **kwds):\n    n = len(covar)\n    n_samples = 0\n    if n == 1:\n        prob = phi(high / covar ** 0.5) - phi(low / covar ** 0.5)\n        est_error = 1e-15\n    else:\n        mi = min(limit, n * 1000)\n        prob = 0.0\n        est_error = 1.0\n        ei = 0.0\n        while est_error > error and n_samples < limit:\n            mi = round(np.sqrt(2) * mi)\n            pi, ei, ni = func(mi, covar, low, high, rng=rng, **kwds)\n            n_samples += ni\n            wt = 1.0 / (1 + (ei / est_error) ** 2)\n            prob += wt * (pi - prob)\n            est_error = np.sqrt(wt) * ei\n    return (prob, est_error, n_samples)",
    "docstring": "Automatically rerun the integration to get the required error bound. Parameters ---------- func : callable Either :func: or :func:. covar, low, high : array As specified in :func: and :func:. rng : Generator, optional default_rng(), yada, yada error : float > 0 The desired error bound. limit : int > 0: The rough limit of the number of integration points to consider. The integration will stop looping once this limit has been *exceeded*. **kwds : Other keyword arguments to pass to . When using :func:, be sure to include `` as one of these. Returns ------- prob : float The estimated probability mass within the bounds. est_error : float 3 times the standard error of the batch estimates. n_samples : int The number of integration points actually used.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_qmvnt.py",
    "ast_data": "FunctionDef name:_qauto arg:func arg:covar arg:low arg:high arg:rng arg:error arg:limit arguments arg arg arg arg arg arg arg arg Assign Call Assign If Compare Assign Call Call Assign Assign Call Assign Assign Assign While BoolOp Compare Compare Assign Call Call Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_split",
    "source_code": "def _split(self):\n    ind = np.arange(len(self.test_fold))\n    for test_index in self._iter_test_masks():\n        train_index = ind[np.logical_not(test_index)]\n        test_index = ind[test_index]\n        yield (train_index, test_index)",
    "docstring": "Generate indices to split data into training and test set. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py",
    "ast_data": "FunctionDef name:_split arg:self arguments arg Assign Call Call For Call Assign Call Assign"
  },
  {
    "library": "pandas",
    "name": "_dti_setop_align_tzs",
    "source_code": "@final\ndef _dti_setop_align_tzs(self, other: Index, setop: str_t) -> tuple[Index, Index]:\n    if isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex) and (self.tz is not None) and (other.tz is not None):\n        left = self.tz_convert('UTC')\n        right = other.tz_convert('UTC')\n        return (left, right)\n    return (self, other)",
    "docstring": "With mismatched timezones, cast both to UTC.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_dti_setop_align_tzs arg:self arg:other arg:setop arguments arg arg arg If BoolOp Call Call Compare Compare Assign Call Assign Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_applicable",
    "source_code": "@abc.abstractmethod\ndef is_applicable(self, trackable: base.Trackable) -> bool:\n    pass",
    "docstring": "Returns whether the adapter is applicable to trackable for resharding. Args: trackable: A Trackable object that is being restored. Returns: A Boolean indicating if the checkpoint value for this Trackable should be resharded.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint_adapter.py",
    "ast_data": "FunctionDef name:is_applicable arg:self arg:trackable arguments arg arg"
  },
  {
    "library": "numpy",
    "name": "from_data",
    "source_code": "@classmethod\ndef from_data(cls, data, **options):\n    format_functions = []\n    for field_name in data.dtype.names:\n        format_function = _get_format_function(data[field_name], **options)\n        if data.dtype[field_name].shape != ():\n            format_function = SubArrayFormat(format_function, **options)\n        format_functions.append(format_function)\n    return cls(format_functions)",
    "docstring": "This is a second way to initialize StructuredVoidFormat, using the raw data as input. Added to avoid changing the signature of __init__.",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\arrayprint.py",
    "ast_data": "FunctionDef name:from_data arg:cls arg:data arguments arg arg arg Assign For Assign Call If Compare Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, feature_config: Union[tpu_embedding_v2_utils.FeatureConfig, Iterable], optimizer: Optional[tpu_embedding_v2_utils._Optimizer], experimental_sparsecore_restore_info: Optional[Dict[str, Any]]=None):\n    super(TPUEmbeddingForServing, self).__init__(feature_config, optimizer)\n    self._strategy = distribute_lib.get_strategy()\n    if isinstance(self._strategy, (tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV2)):\n        raise RuntimeError('Serving on TPU is not yet supported.')",
    "docstring": "Creates the TPUEmbeddingForServing mid level API object. Args: feature_config: A nested structure of configs. optimizer: An instance of one of , or . When not created under TPUStrategy may be set to None to avoid the creation of the optimizer slot variables, useful for optimizing memory consumption when exporting the model for serving where slot variables aren't needed. experimental_sparsecore_restore_info: Information from the sparse core training, required to restore from checkpoint for serving (like number of TPU devices used .) Raises: RuntimeError: If created under TPUStrategy.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_for_serving.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:feature_config arg:optimizer arg:experimental_sparsecore_restore_info arguments arg arg arg arg Call Call Assign Call If Call Raise Call"
  },
  {
    "library": "numpy",
    "name": "lstrip",
    "source_code": "def lstrip(self, chars=None):\n    return lstrip(self, chars)",
    "docstring": "For each element in , return a copy with the leading characters removed. See Also -------- char.lstrip",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:lstrip arg:self arg:chars arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "ScopedTFImportGraphDefResults",
    "source_code": "class ScopedTFImportGraphDefResults(object):\n    __slots__ = ['results']\n\n    def __init__(self, results):\n        self.results = results\n\n    def __del__(self):\n        if c_api is not None and c_api.TF_DeleteImportGraphDefResults is not None:\n            c_api.TF_DeleteImportGraphDefResults(self.results)",
    "docstring": "Wrapper around TF_ImportGraphDefOptions that handles deletion.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\c_api_util.py",
    "ast_data": "ClassDef name:ScopedTFImportGraphDefResults Assign FunctionDef name:__init__ arg:self arg:results arguments arg arg Assign FunctionDef name:__del__ arg:self arguments arg If BoolOp Compare Compare Call"
  },
  {
    "library": "pytorch",
    "name": "get_hash_for_files",
    "source_code": "@lru_cache(1)\ndef get_hash_for_files(paths: tuple[str], extra: str='') -> bytes:\n    hasher = hashlib.sha256()\n    hasher.update(extra.encode('utf-8'))\n    for path in paths:\n        with open(path, 'rb') as f:\n            hasher.update(path.encode('utf-8'))\n            hasher.update(f.read())\n    return hasher.digest()",
    "docstring": "Helper to compute a unique string by hashing the contents of a list of files.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\custom_graph_pass.py",
    "ast_data": "FunctionDef name:get_hash_for_files arg:paths arg:extra arguments arg arg Assign Call Call Call For With Call Call Call Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "save",
    "source_code": "def save(self, file_prefix, session=None, options=None):\n    graph_building = not context.executing_eagerly()\n    if graph_building:\n        if ops.inside_function():\n            raise NotImplementedError('Calling tf.train.Checkpoint.save() from a function is not supported, as save() modifies saving metadata in ways not supported by TensorFlow Operations. Consider using tf.train.Checkpoint.write(), a lower-level API which does not update metadata. tf.train.latest_checkpoint and related APIs will not see this checkpoint.')\n        if session is None:\n            session = get_session()\n        if self._save_counter is None:\n            session.run(self.save_counter.initializer)\n    if not graph_building or self._save_assign_op is None:\n        with ops.colocate_with(self.save_counter):\n            assign_op = self.save_counter.assign_add(1, read_value=True)\n        if graph_building:\n            self._save_assign_op = data_structures.NoDependency(assign_op)\n    if graph_building:\n        checkpoint_number = session.run(self._save_assign_op)\n    else:\n        checkpoint_number = assign_op.numpy()\n    file_path = self.write('%s-%d' % (file_prefix, checkpoint_number), session=session, options=options)\n    checkpoint_management.update_checkpoint_state_internal(save_dir=os.path.dirname(file_prefix), model_checkpoint_path=file_path, all_model_checkpoint_paths=[file_path], save_relative_paths=True)\n    return file_path",
    "docstring": "Saves a training checkpoint and provides basic checkpoint management. The saved checkpoint includes variables created by this object and any trackable objects it depends on at the time is called. is a basic convenience wrapper around the method, sequentially numbering checkpoints using and updating the metadata used by . More advanced checkpoint management, for example garbage collection and custom numbering, may be provided by other utilities which also wrap ( for example). Args: file_prefix: A prefix to use for the checkpoint filenames (/path/to/directory/and_a_prefix). Names are generated based on this prefix and . session: The session to evaluate variables in. Ignored when executing eagerly. If not provided when graph building, the default session is used. options: Optional object. Returns: The full path to the checkpoint.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:save arg:self arg:file_prefix arg:session arg:options arguments arg arg arg arg Assign Call If If Call Raise Call If Compare Assign Call If Compare Call If BoolOp Compare With Call Assign Call If Assign Call If Assign Call Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_subtype_of",
    "source_code": "def is_subtype_of(self, other: trace.TraceType) -> bool:\n    if not self._has_same_structure(other):\n        return False\n    return all((self.mapping[key].is_subtype_of(other.mapping[key]) for key in self.mapping))",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\trace_type\\default_types.py",
    "ast_data": "FunctionDef name:is_subtype_of arg:self arg:other arguments arg arg If Call Return return:yes Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "reduce",
    "source_code": "def reduce(self, reduce_op, per_replica_value, destinations, options=None):\n    if options is None:\n        options = collective_util.Options()\n    per_replica_value = _make_tensor_into_per_replica(per_replica_value)\n    validate_destinations(destinations)\n    if self._num_between_graph_workers == 1 and len(per_replica_value.values) == 1 and _devices_match(per_replica_value, destinations, self._canonicalize_devices):\n        with ops.device(per_replica_value.values[0].device):\n            v = array_ops.identity(per_replica_value.values[0])\n        return distribute_utils.regroup((v,), wrap_class=value_lib.Mirrored)\n    if options is None:\n        options = collective_util.Options()\n    return self.reduce_implementation(reduce_op, per_replica_value, destinations, options)",
    "docstring": "Reduce to . See . This can only be called in the cross-replica context. Args: reduce_op: a specifying how values should be combined. per_replica_value: a , or a like object. destinations: a , a , a alike object, or a device string. It specifies the devices to reduce to. To perform an all-reduce, pass the same to and . Note that if it's a , the value is reduced to the devices of that variable, and this method doesn't update the variable. options: a . See for details. Returns: A or . Raises: ValueError: if per_replica_value can't be converted to a or if destinations is not a string, or .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_ops.py",
    "ast_data": "FunctionDef name:reduce arg:self arg:reduce_op arg:per_replica_value arg:destinations arg:options arguments arg arg arg arg arg If Compare Assign Call Assign Call Call If BoolOp Compare Compare Call Call With Call Assign Call Return return:yes Call If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "record_applied",
    "source_code": "def record_applied(self, app, name):\n    self.ensure_schema()\n    self.migration_qs.create(app=app, name=name)",
    "docstring": "Record that a migration was applied.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\recorder.py",
    "ast_data": "FunctionDef name:record_applied arg:self arg:app arg:name arguments arg arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "load_library",
    "source_code": "@tf_export('load_library')\ndef load_library(library_location):\n    if os.path.exists(library_location):\n        if os.path.isdir(library_location):\n            directory_contents = os.listdir(library_location)\n            kernel_libraries = [os.path.join(library_location, f) for f in directory_contents if _is_shared_object(f)]\n        else:\n            kernel_libraries = [library_location]\n        for lib in kernel_libraries:\n            py_tf.TF_LoadLibrary(lib)\n    else:\n        raise OSError(errno.ENOENT, 'The file or folder to load kernel libraries from does not exist.', library_location)",
    "docstring": "Loads a TensorFlow plugin. \"library_location\" can be a path to a specific shared object, or a folder. If it is a folder, all shared objects that are named \"libtfkernel*\" will be loaded. When the library is loaded, kernels registered in the library via the macros are made available in the TensorFlow process. Args: library_location: Path to the plugin or the folder of plugins. Relative or absolute filesystem path to a dynamic library file or folder. Returns: None Raises: OSError: When the file to be loaded is not found. RuntimeError: when unable to load the library.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\load_library.py",
    "ast_data": "FunctionDef name:load_library arg:library_location arguments arg If Call If Call Assign Call Assign Call Call Assign For Call Raise Call Call"
  },
  {
    "library": "django",
    "name": "first",
    "source_code": "def first(self):\n    if self.ordered:\n        queryset = self\n    else:\n        self._check_ordering_first_last_queryset_aggregation(method='first')\n        queryset = self.order_by('pk')\n    for obj in queryset[:1]:\n        return obj",
    "docstring": "Return the first object of a query or None if no match is found.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:first arg:self arguments arg If Assign Call Assign Call For Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "output_shapes",
    "source_code": "@property\ndef output_shapes(self):\n    return nest.map_structure(lambda x: getattr(x, 'shape', tensor_shape.TensorShape(None)), composite_tensor.replace_composites_with_components(self._func_graph.structured_outputs), expand_composites=False)",
    "docstring": "The function's output shapes.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py",
    "ast_data": "FunctionDef name:output_shapes arg:self arguments arg Return return:yes Call arguments arg Call Call Call"
  },
  {
    "library": "scipy",
    "name": "star",
    "source_code": "def star(self):\n    self.st = self.nn\n    self.st.add(self)\n    return self.st",
    "docstring": "Returns the star domain ``",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_shgo_lib\\_vertex.py",
    "ast_data": "FunctionDef name:star arg:self arguments arg Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "write_pth",
    "source_code": "@timed('Writing pytorch-nightly.pth')\ndef write_pth(venv: Venv) -> None:\n    (venv.site_packages() / 'pytorch-nightly.pth').write_text(f\"# This file was autogenerated by PyTorch's tools/nightly.py\\n# Please delete this file if you no longer need the following development\\n# version of PyTorch to be importable\\n{REPO_ROOT}\\n\", encoding='utf-8')",
    "docstring": "Writes Python path file for this dir.",
    "type": "function",
    "file_path": "pytorch\\tools\\nightly.py",
    "ast_data": "FunctionDef name:write_pth arg:venv arguments arg Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_from_proto_fn",
    "source_code": "def _from_proto_fn(v, import_scope=None):\n    if v.is_resource:\n        return resource_variable_ops.ResourceVariable.from_proto(v, import_scope=import_scope)\n    return variable_v1.VariableV1.from_proto(v, import_scope=import_scope)",
    "docstring": "Creates Variable or ResourceVariable from VariableDef as needed.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py",
    "ast_data": "FunctionDef name:_from_proto_fn arg:v arg:import_scope arguments arg arg If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_fit",
    "source_code": "def _fit(self, X, y, **params):\n    routed_params = process_routing(self, 'fit', **params)\n    self.estimator_ = clone(self.estimator).fit(X, y, **routed_params.estimator.fit)\n    return self",
    "docstring": "Fit the classifier. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) Target values. **params : dict Parameters to pass to the method of the underlying classifier. Returns ------- self : object Returns an instance of self.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_classification_threshold.py",
    "ast_data": "FunctionDef name:_fit arg:self arg:X arg:y arguments arg arg arg arg Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_validate_tensors_to_flatten",
    "source_code": "def _validate_tensors_to_flatten(self, tensors: list[Union[Tensor, nn.Parameter]]) -> tuple:\n    dtype: Optional[torch.dtype] = None\n    flat_param_requires_grad: Optional[bool] = None\n    device: Optional[torch.device] = None\n    for tensor in tensors:\n        if isinstance(tensor, FlatParameter):\n            raise ValueError('Cannot flatten a `FlatParameter`')\n        if dtype is None and (not tensor.is_floating_point()):\n            raise ValueError('Cannot flatten integer dtype tensors')\n        if dtype is not None and tensor.dtype != dtype:\n            raise ValueError(f'Must flatten tensors with uniform dtype but got {dtype} and {tensor.dtype}')\n        if not self._use_orig_params and flat_param_requires_grad is not None and (tensor.requires_grad != flat_param_requires_grad):\n            raise ValueError('Must flatten tensors with uniform `requires_grad` when `use_orig_params=False`')\n        if device is not None and tensor.device != device:\n            raise ValueError(f'Must flatten tensors on the same device but got both {device} and {tensor.device}')\n        dtype = tensor.dtype\n        flat_param_requires_grad = flat_param_requires_grad or tensor.requires_grad\n        device = tensor.device\n    assert flat_param_requires_grad is not None, 'Requires non-empty `tensors` list'\n    return (dtype, flat_param_requires_grad, device)",
    "docstring": "Validate the tensors to flatten and returns any necessary metadata.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py",
    "ast_data": "FunctionDef name:_validate_tensors_to_flatten arg:self arg:tensors arguments arg arg For If Call Raise Call If BoolOp Compare Call Raise Call If BoolOp Compare Compare Raise Call If BoolOp Compare Compare Raise Call If BoolOp Compare Compare Raise Call Assign Assign BoolOp Assign Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "var_creator",
    "source_code": "def var_creator(**kwargs):\n    v = next_creator(**kwargs)\n    wrapped_v = ps_values.CachingVariable(v)\n    wrapped = ps_values.AggregatingVariable(self._container_strategy(), wrapped_v, aggregation)\n    return wrapped",
    "docstring": "Create an AggregatingVariable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\parameter_server_strategy_v2.py",
    "ast_data": "FunctionDef name:var_creator arguments arg Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "warp_affine3d",
    "source_code": "def warp_affine3d(src: Tensor, M: Tensor, dsize: tuple[int, int, int], flags: str='bilinear', padding_mode: str='zeros', align_corners: bool=True) -> Tensor:\n    if len(src.shape) != 5:\n        raise AssertionError(src.shape)\n    if not (len(M.shape) == 3 and M.shape[-2:] == (3, 4)):\n        raise AssertionError(M.shape)\n    if len(dsize) != 3:\n        raise AssertionError(dsize)\n    B, C, D, H, W = src.size()\n    size_src: tuple[int, int, int] = (D, H, W)\n    size_out: tuple[int, int, int] = dsize\n    M_4x4 = convert_affinematrix_to_homography3d(M)\n    dst_norm_trans_src_norm: Tensor = normalize_homography3d(M_4x4, size_src, size_out)\n    src_norm_trans_dst_norm = _torch_inverse_cast(dst_norm_trans_src_norm)\n    P_norm: Tensor = src_norm_trans_dst_norm[:, :3]\n    dsize_out: list[int] = [B, C, *list(size_out)]\n    grid = F.affine_grid(P_norm, dsize_out, align_corners=align_corners)\n    return F.grid_sample(src, grid, align_corners=align_corners, mode=flags, padding_mode=padding_mode)",
    "docstring": "Apply a projective transformation a to 3d tensor. .. warning:: This API signature it is experimental and might suffer some changes in the future. Args: src : input tensor of shape :math:. M: projective transformation matrix of shape :math:. dsize: size of the output image (depth, height, width). flags: interpolation mode to calculate output values `(B, C, D, H, W)get_perspective_transform3d`.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\transform\\imgwarp.py",
    "ast_data": "FunctionDef name:warp_affine3d arg:src arg:M arg:dsize arg:flags arg:padding_mode arg:align_corners arguments arg arg arg arg arg arg If Compare Call Raise Call If BoolOp Compare Call Compare Raise Call If Compare Call Raise Call Assign Call Assign Call Call Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "decompose_generalized_scatter",
    "source_code": "def decompose_generalized_scatter(graph: torch.fx.Graph) -> None:\n    for node in itertools.chain(graph.find_nodes(op='call_function', target=_generalized_scatter), graph.find_nodes(op='call_function', target=_inplace_generalized_scatter)):\n        use_mutation = node.target is _inplace_generalized_scatter or scatter_always_uses_mutation(node)\n        with graph.inserting_before(node):\n            if use_mutation:\n                new_node = _decompose_scatter_mutating(graph, node)\n            else:\n                new_node = _decompose_scatter_functional(graph, node)\n        node.replace_all_uses_with(new_node)\n        graph.erase_node(node)",
    "docstring": "Replace _generalized_scatter with normal aten ops",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\reinplace.py",
    "ast_data": "FunctionDef name:decompose_generalized_scatter arg:graph arguments arg For Call Call Call Assign BoolOp Compare Call With Call If Assign Call Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "index_of",
    "source_code": "def index_of(self, value_str):\n    if value_str is None:\n        value_str = ''\n    if value_str in self._string_to_index:\n        return self._string_to_index[value_str]\n    index = len(self._string_table)\n    self._string_table.append(value_str)\n    self._string_to_index[value_str] = index\n    return index",
    "docstring": "Get index of value_str in the string table. If value_str is not in the string table, we will add it at the end and then return the new index. Args: value_str: (string) Value to lookup/add in/to the string table. Returns: Index of value_str in the string table.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\pprof_profiler.py",
    "ast_data": "FunctionDef name:index_of arg:self arg:value_str arguments arg arg If Compare Assign If Compare Return return:yes Assign Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_eval",
    "source_code": "def _eval(self, tensor):\n    return self._session.run(tensor)",
    "docstring": "Returns the value in the tensor. Must be implemented in sub-classes.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "FunctionDef name:_eval arg:self arg:tensor arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "gcd",
    "source_code": "def gcd(a, b, name=None):\n    with ops.name_scope(name, 'gcd', [a, b]):\n        a = ops.convert_to_tensor(a)\n        b = ops.convert_to_tensor(b)\n        a.shape.assert_has_rank(0)\n        b.shape.assert_has_rank(0)\n        if not a.dtype.is_integer:\n            raise ValueError('a must be an integer type. Got: %s' % a.dtype)\n        if not b.dtype.is_integer:\n            raise ValueError('b must be an integer type. Got: %s' % b.dtype)\n        const_a = tensor_util.constant_value(a)\n        const_b = tensor_util.constant_value(b)\n        if const_a is not None and const_b is not None:\n            if sys.version_info.major < 3:\n                math_gcd = fractions.gcd\n            else:\n                math_gcd = math.gcd\n            return ops.convert_to_tensor(math_gcd(const_a, const_b))\n        cond = lambda _, b: math_ops.greater(b, array_ops.zeros_like(b))\n        body = lambda a, b: [b, math_ops.mod(a, b)]\n        a, b = while_loop.while_loop(cond, body, [a, b], back_prop=False)\n        return a",
    "docstring": "Returns the greatest common divisor via Euclid's algorithm. Args: a: The dividend. A scalar integer . b: The divisor. A scalar integer . name: An optional name for the operation. Returns: A scalar representing the greatest common divisor between and . Raises: ValueError: If or are not scalar integers.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\signal\\util_ops.py",
    "ast_data": "FunctionDef name:gcd arg:a arg:b arg:name arguments arg arg arg With Call Assign Call Assign Call Call Call If Raise Call If Raise Call Assign Call Assign Call If BoolOp Compare Compare If Compare Assign Assign Return return:yes Call Call Assign arguments arg arg Call Call Assign arguments arg arg Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "rvs",
    "source_code": "def rvs(self, alpha, size=1, random_state=None):\n    alpha = _dirichlet_check_parameters(alpha)\n    random_state = self._get_random_state(random_state)\n    return random_state.dirichlet(alpha, size=size)",
    "docstring": "Draw random samples from a Dirichlet distribution. Parameters ---------- %(_dirichlet_doc_default_callparams)s size : int, optional Number of samples to draw (default 1). %(_doc_random_state)s Returns ------- rvs : ndarray or scalar Random variates of size (, ), where is the dimension of the random variable.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:rvs arg:self arg:alpha arg:size arg:random_state arguments arg arg arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "is_valid_positional_slice",
    "source_code": "def is_valid_positional_slice(slc: slice) -> bool:\n    return lib.is_int_or_none(slc.start) and lib.is_int_or_none(slc.stop) and lib.is_int_or_none(slc.step)",
    "docstring": "Check if a slice object can be interpreted as a positional indexer. Parameters ---------- slc : slice Returns ------- bool Notes ----- A valid positional slice may also be interpreted as a label-based slice depending on the index being sliced.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\indexers\\utils.py",
    "ast_data": "FunctionDef name:is_valid_positional_slice arg:slc arguments arg Return return:yes BoolOp Call Call Call"
  },
  {
    "library": "scipy",
    "name": "generate_file_wrapper",
    "source_code": "def generate_file_wrapper(sigs, accelerate):\n    file_text = [C_COMMENT, C_PREAMBLE, LAPACK_DECLS, CPP_GUARD_BEGIN]\n    for sig in sigs:\n        file_text.append(generate_decl_wrapper(**sig, accelerate=accelerate))\n    file_text.append(CPP_GUARD_END)\n    return ''.join(file_text)",
    "docstring": "Returns text of file containing wrappers for all BLAS/LAPACK functions.",
    "type": "function",
    "file_path": "scipy\\scipy\\_build_utils\\_generate_blas_wrapper.py",
    "ast_data": "FunctionDef name:generate_file_wrapper arg:sigs arg:accelerate arguments arg arg Assign For Call Call Call Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "AccountSelectionRequiredError",
    "source_code": "class AccountSelectionRequiredError(OAuth2Error):\n    error = 'account_selection_required'",
    "docstring": "The End-User is REQUIRED to select a session at the Authorization Server. The End-User MAY be authenticated at the Authorization Server with different associated accounts, but the End-User did not select a session. This error MAY be returned when the prompt parameter value in the Authentication Request is none, but the Authentication Request cannot be completed without displaying a user interface to prompt for a session to use.",
    "type": "class",
    "file_path": "authlib\\authlib\\oidc\\core\\errors.py",
    "ast_data": "ClassDef name:AccountSelectionRequiredError Assign"
  },
  {
    "library": "tensorflow",
    "name": "lengths_to_splits",
    "source_code": "def lengths_to_splits(lengths):\n    return array_ops.concat([[0], math_ops.cumsum(lengths)], axis=-1)",
    "docstring": "Returns splits corresponding to the given lengths.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_util.py",
    "ast_data": "FunctionDef name:lengths_to_splits arg:lengths arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_combiner",
    "source_code": "def get_combiner(self):\n    raise NotImplementedError('not implemented')",
    "docstring": "Returns the embedding combiner.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\feature_column.py",
    "ast_data": "FunctionDef name:get_combiner arg:self arguments arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "format_bytes",
    "source_code": "def format_bytes(b: int) -> str:\n    for i in range(1, len(_BYTE_UNITS)):\n        if b < _BYTE_UNITS[i][0]:\n            n = f'{b / _BYTE_UNITS[i - 1][0]:.2f}'\n            units = _BYTE_UNITS[i - 1][1]\n            break\n    else:\n        n = f'{b / _BYTE_UNITS[-1][0]:.2f}'\n        units = _BYTE_UNITS[-1][1]\n    n = n.rstrip('0').rstrip('.')\n    return f'{n}{units}'",
    "docstring": "Formats bytes into a human-readable string.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\proto_splitter\\util.py",
    "ast_data": "FunctionDef name:format_bytes arg:b arguments arg For Call Call If Compare Assign Assign Assign Assign Assign Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_dist_wrapper",
    "source_code": "def _dist_wrapper(dist_func, dist_matrix, slice_, *args, **kwargs):\n    dist_matrix[:, slice_] = dist_func(*args, **kwargs)",
    "docstring": "Write in-place to a slice of a distance matrix.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\metrics\\pairwise.py",
    "ast_data": "FunctionDef name:_dist_wrapper arg:dist_func arg:dist_matrix arg:slice_ arguments arg arg arg arg arg Assign Call"
  },
  {
    "library": "pytorch",
    "name": "unique",
    "source_code": "def unique(self, sorted=True, return_inverse=False, return_counts=False, dim=None):\n    if has_torch_function_unary(self):\n        return handle_torch_function(Tensor.unique, (self,), self, sorted=sorted, return_inverse=return_inverse, return_counts=return_counts, dim=dim)\n    return torch.unique(self, sorted=sorted, return_inverse=return_inverse, return_counts=return_counts, dim=dim)",
    "docstring": "Returns the unique elements of the input tensor. See :func:",
    "type": "method",
    "file_path": "pytorch\\torch\\_tensor.py",
    "ast_data": "FunctionDef name:unique arg:self arg:sorted arg:return_inverse arg:return_counts arg:dim arguments arg arg arg arg arg If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, pad=0.0, sep=0.0, width=None, height=None, align='baseline', mode='fixed', children=None):\n    super().__init__()\n    self.height = height\n    self.width = width\n    self.sep = sep\n    self.pad = pad\n    self.mode = mode\n    self.align = align\n    self._children = children",
    "docstring": "Parameters ---------- pad : float, default: 0.0 The boundary padding in points. sep : float, default: 0.0 The spacing between items in points. width, height : float, optional Width and height of the container box in pixels, calculated if *None*. align : {'top', 'bottom', 'left', 'right', 'center', 'baseline'}, default: 'baseline' Alignment of boxes. mode : {'fixed', 'expand', 'equal'}, default: 'fixed' The packing mode. - 'fixed' packs the given \\s tight with *sep* spacing. - 'expand' uses the maximal available space to distribute the artists with equal spacing in between. - 'equal': Each artist an equal fraction of the available space and is left-aligned (or top-aligned) therein. children : list of The artists to pack. Notes ----- *pad* and *sep* are in points and will be scaled with the renderer dpi, while *width* and *height* are in pixels.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:pad arg:sep arg:width arg:height arg:align arg:mode arg:children arguments arg arg arg arg arg arg arg arg Call Call Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "seaborn",
    "name": "first_non_overlapping_candidate",
    "source_code": "def first_non_overlapping_candidate(self, candidates, neighbors):\n    if len(neighbors) == 0:\n        return candidates[0]\n    neighbors_x = neighbors[:, 0]\n    neighbors_y = neighbors[:, 1]\n    neighbors_r = neighbors[:, 2]\n    for xyr_i in candidates:\n        x_i, y_i, r_i = xyr_i\n        dx = neighbors_x - x_i\n        dy = neighbors_y - y_i\n        sq_distances = np.square(dx) + np.square(dy)\n        sep_needed = np.square(neighbors_r + r_i)\n        good_candidate = np.all(sq_distances >= sep_needed)\n        if good_candidate:\n            return xyr_i\n    raise RuntimeError('No non-overlapping candidates found. This should not happen.')",
    "docstring": "Find the first candidate that does not overlap with the swarm.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\categorical.py",
    "ast_data": "FunctionDef name:first_non_overlapping_candidate arg:self arg:candidates arg:neighbors arguments arg arg arg If Compare Call Return return:yes Assign Assign Assign For Assign Assign Assign Assign Call Call Assign Call Assign Call Compare If Return return:yes Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_Context",
    "source_code": "class _Context(object):\n    __slots__ = ['_lock', '_group_id']\n\n    def __init__(self, lock, group_id):\n        self._lock = lock\n        self._group_id = group_id\n\n    def __enter__(self):\n        self._lock.acquire(self._group_id)\n\n    def __exit__(self, type_arg, value_arg, traceback_arg):\n        del type_arg, value_arg, traceback_arg\n        self._lock.release(self._group_id)",
    "docstring": "Context manager helper for .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\util\\lock_util.py",
    "ast_data": "ClassDef name:_Context Assign FunctionDef name:__init__ arg:self arg:lock arg:group_id arguments arg arg arg Assign Assign FunctionDef name:__enter__ arg:self arguments arg Call FunctionDef name:__exit__ arg:self arg:type_arg arg:value_arg arg:traceback_arg arguments arg arg arg arg Call"
  },
  {
    "library": "scipy",
    "name": "Meyer",
    "source_code": "class Meyer(Benchmark):\n\n    def __init__(self, dimensions=3):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([0.0, 100.0, 100.0], [1, 1000.0, 500.0]))\n        self.global_optimum = [[0.005609636471, 6181.3463463, 345.22363462]]\n        self.fglob = 87.945855171\n        self.a = asarray([34780.0, 28610.0, 23650.0, 19630.0, 16370.0, 13720.0, 11540.0, 9744.0, 8261.0, 7030.0, 6005.0, 5147.0, 4427.0, 3820.0, 3307.0, 2872.0])\n        self.b = asarray([50.0, 55.0, 60.0, 65.0, 70.0, 75.0, 80.0, 85.0, 90.0, 95.0, 100.0, 105.0, 110.0, 115.0, 120.0, 125.0])\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        vec = x[0] * exp(x[1] / (self.b + x[2]))\n        return sum((self.a - vec) ** 2)",
    "docstring": "Meyer [1]_ objective function. ..[1] TODO NIST regression standard",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_M.py",
    "ast_data": "ClassDef name:Meyer FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Assign Call Assign Call FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "coordination_leader",
    "source_code": "def coordination_leader(cluster_spec):\n    cluster_spec = normalize_cluster_spec(cluster_spec)\n    if not cluster_spec.as_dict():\n        return ''\n    if 'ps' in cluster_spec.jobs:\n        return '/job:ps/replica:0/task:0'\n    if 'chief' in cluster_spec.jobs:\n        return '/job:chief/replica:0/task:0'\n    assert 'worker' in cluster_spec.jobs\n    return '/job:worker/replica:0/task:0'",
    "docstring": "Return the task name of the coordination service leader. Args: cluster_spec: a dict, or object sxpecifying the cluster configurations. Returns: a string indicating the task name of the coordination service leader.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_worker_util.py",
    "ast_data": "FunctionDef name:coordination_leader arg:cluster_spec arguments arg Assign Call If Call Return return:yes If Compare Return return:yes If Compare Return return:yes Compare Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "time",
    "source_code": "def time(self):\n    now = datetime.datetime.now()\n    monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec']\n    month = monthnames[now.month - 1].capitalize()\n    return '[%02d/%s/%04d:%02d:%02d:%02d]' % (now.day, month, now.year, now.hour, now.minute, now.second)",
    "docstring": "Return now() in Apache Common Log Format (no timezone).",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cplogging.py",
    "ast_data": "FunctionDef name:time arg:self arguments arg Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "group_count",
    "source_code": "@property\ndef group_count(self) -> int:\n    global _group_count\n    return _group_count",
    "docstring": "Process group count for default naming. TODO don't expose group_count, use something else instead",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:group_count arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_fragment_2_1",
    "source_code": "def _fragment_2_1(X, T, s):\n    n = X.shape[0]\n    diag_T = np.ravel(T.diagonal().copy())\n    scale = 2 ** (-s)\n    exp_diag = np.exp(scale * diag_T)\n    for k in range(n):\n        X[k, k] = exp_diag[k]\n    for i in range(s - 1, -1, -1):\n        X = X.dot(X)\n        scale = 2 ** (-i)\n        exp_diag = np.exp(scale * diag_T)\n        for k in range(n):\n            X[k, k] = exp_diag[k]\n        for k in range(n - 1):\n            lam_1 = scale * diag_T[k]\n            lam_2 = scale * diag_T[k + 1]\n            t_12 = scale * T[k, k + 1]\n            value = _eq_10_42(lam_1, lam_2, t_12)\n            X[k, k + 1] = value\n    return X",
    "docstring": "A helper function for expm_2009. Notes ----- The argument X is modified in-place, but this modification is not the same as the returned value of the function. This function also takes pains to do things in ways that are compatible with sparse arrays, for example by avoiding fancy indexing and by using methods of the matrices whenever possible instead of using functions of the numpy or scipy libraries themselves.",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_matfuncs.py",
    "ast_data": "FunctionDef name:_fragment_2_1 arg:X arg:T arg:s arguments arg arg arg Assign Assign Call Call Call Assign Assign Call For Call Assign For Call Assign Call Assign Assign Call For Call Assign For Call Assign Assign Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_type",
    "source_code": "def _type(operator):\n    if isinstance(operator, linear_operator_diag.LinearOperatorDiag):\n        return _DIAG\n    if isinstance(operator, linear_operator_lower_triangular.LinearOperatorLowerTriangular):\n        return _TRIL\n    if isinstance(operator, linear_operator_full_matrix.LinearOperatorFullMatrix):\n        return _MATRIX\n    if isinstance(operator, linear_operator_identity.LinearOperatorIdentity):\n        return _IDENTITY\n    if isinstance(operator, linear_operator_identity.LinearOperatorScaledIdentity):\n        return _SCALED_IDENTITY\n    raise TypeError(f'Expected operator to be one of [LinearOperatorDiag, LinearOperatorLowerTriangular, LinearOperatorFullMatrix, LinearOperatorIdentity, LinearOperatorScaledIdentity]. Received: {operator}')",
    "docstring": "Returns the type name constant (e.g. _TRIL) for operator.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_addition.py",
    "ast_data": "FunctionDef name:_type arg:operator arguments arg If Call Return return:yes If Call Return return:yes If Call Return return:yes If Call Return return:yes If Call Return return:yes Raise Call"
  },
  {
    "library": "scipy",
    "name": "_initialize_grid_pool",
    "source_code": "def _initialize_grid_pool(self):\n    self.sample_pool = []\n    self.sample_grid = np.empty(np.append(self.grid_size, self.d), dtype=np.float32)\n    self.sample_grid.fill(np.nan)",
    "docstring": "Sampling pool and sample grid.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_qmc.py",
    "ast_data": "FunctionDef name:_initialize_grid_pool arg:self arguments arg Assign Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_check_norm_dtype",
    "source_code": "def _check_norm_dtype(dtype: Optional[torch.dtype], x_dtype: torch.dtype, fn_name: str):\n    if dtype is not None:\n        torch._check(utils.is_float_dtype(dtype) or utils.is_complex_dtype(dtype), lambda: f'{fn_name}: dtype should be floating point or complex. Got {dtype}')\n        torch._check(utils.is_complex_dtype(dtype) == utils.is_complex_dtype(x_dtype), lambda: '{fn_name}: dtype should be {d} for {d} inputs. Got {dtype}'.format(fn_name=fn_name, d='complex' if utils.is_complex_dtype(x_dtype) else 'real', dtype=dtype))\n        torch._check(utils.get_higher_dtype(dtype, x_dtype) == dtype, lambda: f'{fn_name}: the dtype of the input ({x_dtype}) should be convertible without narrowing to the specified dtype ({{dtype}})')",
    "docstring": "Checks related to the dtype kwarg in functions",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\linalg\\__init__.py",
    "ast_data": "FunctionDef name:_check_norm_dtype arg:dtype arg:x_dtype arg:fn_name arguments arg arg arg If Compare Call BoolOp Call Call arguments Call Compare Call Call arguments Call Call Call Compare Call arguments"
  },
  {
    "library": "pandas",
    "name": "get_values_for_csv",
    "source_code": "def get_values_for_csv(self, *, float_format, date_format, decimal, na_rep: str='nan', quoting=None) -> Self:\n    return self.apply('get_values_for_csv', na_rep=na_rep, quoting=quoting, float_format=float_format, date_format=date_format, decimal=decimal)",
    "docstring": "Convert values to native types (strings / python objects) that are used in formatting (repr / csv).",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:get_values_for_csv arg:self arguments arg arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "simple",
    "source_code": "@property\ndef simple(self):\n    return capi.geos_issimple(self.ptr)",
    "docstring": "Return false if the Geometry isn't simple.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:simple arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_restore_descendants",
    "source_code": "def _restore_descendants(self, reader=None):\n    visit_queue = collections.deque([(self, self.trackable)])\n    restore_ops = []\n    tensor_saveables = {}\n    python_positions = []\n    registered_savers = collections.defaultdict(dict)\n    while visit_queue:\n        current_position, _ = visit_queue.popleft()\n        new_restore_ops, new_tensor_saveables, new_python_positions, new_registered_savers = current_position._single_restore()\n        restore_ops.extend(new_restore_ops)\n        tensor_saveables.update(new_tensor_saveables)\n        python_positions.extend(new_python_positions)\n        for saver_name, trackable_map in new_registered_savers.items():\n            registered_savers[saver_name].update(trackable_map)\n        _queue_children_for_restoration(current_position, visit_queue)\n        _queue_slot_variables(current_position, visit_queue)\n    restore_ops.extend(current_position.checkpoint.restore_saveables(tensor_saveables, python_positions, registered_savers, reader=reader))\n    return restore_ops",
    "docstring": "Restore the bound Trackable and dependencies (may be deferred).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\restore.py",
    "ast_data": "FunctionDef name:_restore_descendants arg:self arg:reader arguments arg arg Assign Call Assign Assign Assign Assign Call While Assign Call Assign Call Call Call Call For Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "InvalidClientError",
    "source_code": "class InvalidClientError(OAuth2Error):\n    error = 'invalid_client'\n    status_code = 400\n\n    def get_headers(self):\n        headers = super().get_headers()\n        if self.status_code == 401:\n            error_description = self.get_error_description()\n            error_description = error_description.replace('\"', '|')\n            extras = [f'error=\"{self.error}\"', f'error_description=\"{error_description}\"']\n            headers.append(('WWW-Authenticate', 'Basic ' + ', '.join(extras)))\n        return headers",
    "docstring": "Client authentication failed (e.g., unknown client, no client authentication included, or unsupported authentication method). The authorization server MAY return an HTTP 401 (Unauthorized) status code to indicate which HTTP authentication schemes are supported. If the client attempted to authenticate via the \"Authorization\" request header field, the authorization server MUST respond with an HTTP 401 (Unauthorized) status code and include the \"WWW-Authenticate\" response header field matching the authentication scheme used by the client.",
    "type": "class",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\errors.py",
    "ast_data": "ClassDef name:InvalidClientError Assign Assign FunctionDef name:get_headers arg:self arguments arg Assign Call Call If Compare Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "new_document",
    "source_code": "def new_document(self) -> nodes.document:\n    document = super().new_document()\n    document.transformer = SphinxTransformer(document)\n    document.transformer.set_environment(self.settings.env)\n    reporter = document.reporter\n    document.reporter = LoggingReporter.from_reporter(reporter)\n    return document",
    "docstring": "Creates a new document object which has a special reporter object good for logging.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\io.py",
    "ast_data": "FunctionDef name:new_document arg:self arguments arg Assign Call Call Assign Call Call Assign Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_references",
    "source_code": "def get_references(state, model_tuple, field_tuple=()):\n    for state_model_tuple, model_state in state.models.items():\n        for name, field in model_state.fields.items():\n            reference = field_references(state_model_tuple, field, model_tuple, *field_tuple)\n            if reference:\n                yield (model_state, name, field, reference)",
    "docstring": "Generator of (model_state, name, field, reference) referencing provided context. If field_tuple is provided only references to this particular field of model_tuple will be generated.",
    "type": "function",
    "file_path": "django\\django\\db\\migrations\\utils.py",
    "ast_data": "FunctionDef name:get_references arg:state arg:model_tuple arg:field_tuple arguments arg arg arg For Call For Call Assign Call If"
  },
  {
    "library": "pytorch",
    "name": "parse_users",
    "source_code": "def parse_users(rollout_state: str) -> UserOptins:\n    _, users_text = extract_settings_user_opt_in_from_text(rollout_state)\n    return parse_user_opt_in_from_text(users_text)",
    "docstring": "Parse users from the rollout state.",
    "type": "function",
    "file_path": "pytorch\\.github\\scripts\\runner_determinator.py",
    "ast_data": "FunctionDef name:parse_users arg:rollout_state arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_copy_fields",
    "source_code": "def _copy_fields(ary):\n    dt = ary.dtype\n    copy_dtype = {'names': dt.names, 'formats': [dt.fields[name][0] for name in dt.names]}\n    return array(ary, dtype=copy_dtype, copy=True)",
    "docstring": "Return copy of structured array with padding between fields removed. Parameters ---------- ary : ndarray Structured array from which to remove padding bytes Returns ------- ary_copy : ndarray Copy of ary with padding bytes removed",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\_internal.py",
    "ast_data": "FunctionDef name:_copy_fields arg:ary arguments arg Assign Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_BlendedMixin",
    "source_code": "class _BlendedMixin:\n\n    def __eq__(self, other):\n        if isinstance(other, (BlendedAffine2D, BlendedGenericTransform)):\n            return self._x == other._x and self._y == other._y\n        elif self._x == self._y:\n            return self._x == other\n        else:\n            return NotImplemented\n\n    def contains_branch_seperately(self, transform):\n        return (self._x.contains_branch(transform), self._y.contains_branch(transform))\n    __str__ = _make_str_method('_x', '_y')",
    "docstring": "Common methods for and .",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "ClassDef name:_BlendedMixin FunctionDef name:__eq__ arg:self arg:other arguments arg arg If Call Return return:yes BoolOp Compare Compare If Compare Return return:yes Compare Return return:yes FunctionDef name:contains_branch_seperately arg:self arg:transform arguments arg arg Return return:yes Call Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "leaky_relu",
    "source_code": "def leaky_relu(input: Tensor, negative_slope: float=0.01, inplace: bool=False, scale: Optional[float]=None, zero_point: Optional[int]=None):\n    if scale is not None and zero_point is not None:\n        assert not inplace, 'Cannot rescale with `inplace`'\n        output = torch._empty_affine_quantized(input.shape, scale=scale, zero_point=int(zero_point), dtype=input.dtype)\n        torch._C._nn.leaky_relu(input, negative_slope, out=output)\n        return output\n    if inplace:\n        result = torch._C._nn.leaky_relu_(input, negative_slope)\n    else:\n        result = torch._C._nn.leaky_relu(input, negative_slope)\n    return result",
    "docstring": "Quantized version of the. leaky_relu(input, negative_slope=0.01, inplace=False, scale, zero_point) -> Tensor Applies element-wise, :math: Args: input: Quantized input negative_slope: The slope of the negative input inplace: Inplace modification of the input tensor scale, zero_point: Scale and zero point of the output tensor. See :class: for more details.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\functional.py",
    "ast_data": "FunctionDef name:leaky_relu arg:input arg:negative_slope arg:inplace arg:scale arg:zero_point arguments arg arg arg arg arg If BoolOp Compare Compare Assign Call Call Call Return return:yes If Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "ensure_dtype_can_hold_na",
    "source_code": "def ensure_dtype_can_hold_na(dtype: DtypeObj) -> DtypeObj:\n    if isinstance(dtype, ExtensionDtype):\n        if dtype._can_hold_na:\n            return dtype\n        elif isinstance(dtype, IntervalDtype):\n            return IntervalDtype(np.float64, closed=dtype.closed)\n        return _dtype_obj\n    elif dtype.kind == 'b':\n        return _dtype_obj\n    elif dtype.kind in 'iu':\n        return np.dtype(np.float64)\n    return dtype",
    "docstring": "If we have a dtype that cannot hold NA values, find the best match that can.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\cast.py",
    "ast_data": "FunctionDef name:ensure_dtype_can_hold_na arg:dtype arguments arg If Call If Return return:yes If Call Return return:yes Call Return return:yes If Compare Return return:yes If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "set_default_validate_args",
    "source_code": "@staticmethod\ndef set_default_validate_args(value: bool) -> None:\n    if value not in [True, False]:\n        raise ValueError\n    Distribution._validate_args = value",
    "docstring": "Sets whether validation is enabled or disabled. The default behavior mimics Python's ``). Validation may be expensive, so you may want to disable it once a model is working. Args: value (bool): Whether to enable validation.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:set_default_validate_args arg:value arguments arg If Compare Raise Assign"
  },
  {
    "library": "tensorflow",
    "name": "num_replicas_in_sync",
    "source_code": "@property\ndef num_replicas_in_sync(self):\n    return self._strategy.num_replicas_in_sync",
    "docstring": "Returns number of replicas that are kept in sync.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:num_replicas_in_sync arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_antialiased",
    "source_code": "def get_antialiased(self):\n    return self._antialiased",
    "docstring": "Return whether antialiased rendering is used.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:get_antialiased arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_pack",
    "source_code": "def _pack(coefs_, intercepts_):\n    return np.hstack([l.ravel() for l in coefs_ + intercepts_])",
    "docstring": "Pack the parameters into a single vector.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\neural_network\\_multilayer_perceptron.py",
    "ast_data": "FunctionDef name:_pack arg:coefs_ arg:intercepts_ arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "Jacobian",
    "source_code": "class Jacobian:\n\n    def __init__(self, **kw):\n        names = ['solve', 'update', 'matvec', 'rmatvec', 'rsolve', 'matmat', 'todense', 'shape', 'dtype']\n        for name, value in kw.items():\n            if name not in names:\n                raise ValueError(f'Unknown keyword argument {name}')\n            if value is not None:\n                setattr(self, name, kw[name])\n        if hasattr(self, 'todense'):\n\n            def __array__(self, dtype=None, copy=None):\n                if dtype is not None:\n                    raise ValueError(f'`dtype` must be None, was {dtype}')\n                return self.todense()\n\n    def aspreconditioner(self):\n        return InverseJacobian(self)\n\n    def solve(self, v, tol=0):\n        raise NotImplementedError\n\n    def update(self, x, F):\n        pass\n\n    def setup(self, x, F, func):\n        self.func = func\n        self.shape = (F.size, x.size)\n        self.dtype = F.dtype\n        if self.__class__.setup is Jacobian.setup:\n            self.update(x, F)",
    "docstring": "Common interface for Jacobians or Jacobian approximations. The optional methods come useful when implementing trust region etc., algorithms that often require evaluating transposes of the Jacobian. Methods ------- solve Returns J^-1 * v update Updates Jacobian to point (where the function has residual ) matvec : optional Returns J * v rmatvec : optional Returns A^H * v rsolve : optional Returns A^-H * v matmat : optional Returns A * V, where V is a dense matrix with dimensions (N,K). todense : optional Form the dense Jacobian matrix. Necessary for dense trust region algorithms, and useful for testing. Attributes ---------- shape Matrix dimensions (M, N) dtype Data type of the matrix. func : callable, optional Function the Jacobian corresponds to",
    "type": "class",
    "file_path": "scipy\\scipy\\optimize\\_nonlin.py",
    "ast_data": "ClassDef name:Jacobian FunctionDef name:__init__ arg:self arguments arg arg Assign For Call If Compare Raise Call If Compare Call If Call FunctionDef name:__array__ arg:self arg:dtype arg:copy arguments arg arg arg If Compare Raise Call Return return:yes Call FunctionDef name:aspreconditioner arg:self arguments arg Return return:yes Call FunctionDef name:solve arg:self arg:v arg:tol arguments arg arg arg Raise FunctionDef name:update arg:self arg:x arg:F arguments arg arg arg FunctionDef name:setup arg:self arg:x arg:F arg:func arguments arg arg arg arg Assign Assign Assign If Compare Call"
  },
  {
    "library": "pandas",
    "name": "freq",
    "source_code": "@property\ndef freq(self):\n    return self._get_values().inferred_freq",
    "docstring": "Tries to return a string representing a frequency generated by infer_freq. Returns None if it can't autodetect the frequency. See Also -------- Series.dt.to_period : Cast to PeriodArray/PeriodIndex at a particular frequency. Examples -------- >>> ser = pd.Series([\"2024-01-01\", \"2024-01-02\", \"2024-01-03\", \"2024-01-04\"]) >>> ser = pd.to_datetime(ser) >>> ser.dt.freq 'D' >>> ser = pd.Series([\"2022-01-01\", \"2024-01-01\", \"2026-01-01\", \"2028-01-01\"]) >>> ser = pd.to_datetime(ser) >>> ser.dt.freq '2YS-JAN'",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\accessors.py",
    "ast_data": "FunctionDef name:freq arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "rename_columns",
    "source_code": "def rename_columns(self, X, columns):\n    pass",
    "docstring": "Rename columns in . Parameters ---------- X : container Container which columns is updated. columns : ndarray of str Columns to update the 's columns with. Returns ------- updated_container : container Container with new names.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_set_output.py",
    "ast_data": "FunctionDef name:rename_columns arg:self arg:X arg:columns arguments arg arg arg"
  },
  {
    "library": "pytorch",
    "name": "advance_backend",
    "source_code": "@classmethod\ndef advance_backend(cls, curr_backend: str) -> Optional[str]:\n    current_system_index = list(BACKENDS.keys()).index(curr_backend)\n    if current_system_index < len(BACKENDS) - 1:\n        curr_backend = list(BACKENDS.keys())[current_system_index + 1]\n        cls.update_bisect_status(curr_backend, '')\n        print(f'Moving to the next system: {curr_backend}')\n        return curr_backend\n    else:\n        return None",
    "docstring": "Tries Move to the next backend.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\compiler_bisector.py",
    "ast_data": "FunctionDef name:advance_backend arg:cls arg:curr_backend arguments arg arg Assign Call Call Call If Compare Call Assign Call Call Call Call Return return:yes Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "_MaybeAddControlDependency",
    "source_code": "def _MaybeAddControlDependency(self, op: ops.Operation):\n\n    def _IsOpFree(op):\n        if op.control_inputs:\n            return False\n        if op.graph._is_function(op.type) or op.type == 'SymbolicGradient':\n            return True\n        for x in op.inputs:\n            if not util.IsLoopConstantEnter(x.op):\n                return False\n        return True\n    if _IsOpFree(op):\n        op._add_control_input(self.GetControlPivot().op)",
    "docstring": "Add a control input to the op if it only depends on loop invariants.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:_MaybeAddControlDependency arg:self arg:op arguments arg arg FunctionDef name:_IsOpFree arg:op arguments arg If Return return:yes If BoolOp Call Compare Return return:yes For If Call Return return:yes Return return:yes If Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "split_scan",
    "source_code": "def split_scan(size_hints, reduction_hint=False, triton_meta=None, filename=None, inductor_meta=None):\n    inductor_meta = {} if inductor_meta is None else inductor_meta\n    inductor_meta['reduction_hint'] = reduction_hint\n    if inductor_meta.get('no_x_dim'):\n        size_hints['x'] = 1\n    assert triton_meta is not None\n    if len(size_hints) != 2:\n        raise NotImplementedError(f'size_hints: {size_hints}')\n    configs = _reduction_configs(size_hints=size_hints, inductor_meta=inductor_meta)\n    min_rblock = inductor_meta.get('min_split_scan_rblock', 256)\n    for cfg in configs:\n        for var in list(cfg.kwargs.keys()):\n            if var.startswith('R') and cfg.kwargs[var] < min_rblock:\n                cfg.kwargs[var] = min_rblock\n    return cached_autotune(size_hints, configs=configs, triton_meta=triton_meta, inductor_meta=inductor_meta, heuristic_type=HeuristicType.SPLIT_SCAN, filename=filename)",
    "docstring": "Heuristic for TritonSplitScanKernel",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\triton_heuristics.py",
    "ast_data": "FunctionDef name:split_scan arg:size_hints arg:reduction_hint arg:triton_meta arg:filename arg:inductor_meta arguments arg arg arg arg arg Assign Compare Assign If Call Assign Compare If Compare Call Raise Call Assign Call Assign Call For For Call Call If BoolOp Call Compare Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_allow_empty",
    "source_code": "def get_allow_empty(self):\n    return self.allow_empty",
    "docstring": "Return `` if a 404 should be raised instead.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\list.py",
    "ast_data": "FunctionDef name:get_allow_empty arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_estimate_wishart_diag",
    "source_code": "def _estimate_wishart_diag(self, nk, xk, sk):\n    _, n_features = xk.shape\n    self.degrees_of_freedom_ = self.degrees_of_freedom_prior_ + nk\n    diff = xk - self.mean_prior_\n    self.covariances_ = self.covariance_prior_ + nk[:, np.newaxis] * (sk + (self.mean_precision_prior_ / self.mean_precision_)[:, np.newaxis] * np.square(diff))\n    self.covariances_ /= self.degrees_of_freedom_[:, np.newaxis]",
    "docstring": "Estimate the diag Wishart distribution parameters. Parameters ---------- X : array-like of shape (n_samples, n_features) nk : array-like of shape (n_components,) xk : array-like of shape (n_components, n_features) sk : array-like of shape (n_components, n_features)",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\mixture\\_bayesian_mixture.py",
    "ast_data": "FunctionDef name:_estimate_wishart_diag arg:self arg:nk arg:xk arg:sk arguments arg arg arg arg Assign Assign Assign Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "_get_tree_stats",
    "source_code": "def _get_tree_stats(self):\n    return self._cpp_trifinder.get_tree_stats()",
    "docstring": "Return a python list containing the statistics about the node tree: 0: number of nodes (tree size) 1: number of unique nodes 2: number of trapezoids (tree leaf nodes) 3: number of unique trapezoids 4: maximum parent count (max number of times a node is repeated in tree) 5: maximum depth of tree (one more than the maximum number of comparisons needed to search through the tree) 6: mean of all trapezoid depths (one more than the average number of comparisons needed to search through the tree)",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_trifinder.py",
    "ast_data": "FunctionDef name:_get_tree_stats arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "to_timestamp",
    "source_code": "def to_timestamp(self, freq: Frequency | None=None, how: Literal['s', 'e', 'start', 'end']='start', copy: bool | lib.NoDefault=lib.no_default) -> Series:\n    self._check_copy_deprecation(copy)\n    if not isinstance(self.index, PeriodIndex):\n        raise TypeError(f'unsupported Type {type(self.index).__name__}')\n    new_obj = self.copy(deep=False)\n    new_index = self.index.to_timestamp(freq=freq, how=how)\n    setattr(new_obj, 'index', new_index)\n    return new_obj",
    "docstring": "Cast to DatetimeIndex of Timestamps, at *beginning* of period. This can be changed to the *end* of the period, by specifying . Parameters ---------- freq : str, default frequency of PeriodIndex Desired frequency. how : {'s', 'e', 'start', 'end'} Convention for converting period to timestamp; start of period vs. end. copy : bool, default False Whether or not to return a copy. .. note:: The keyword will change behavior in pandas 3.0. __ will be enabled by default, which means that all methods with a keyword will use a lazy copy mechanism to defer the copy and ignore the keyword. The keyword will be removed in a future version of pandas. You can already get the future behavior and improvements through enabling copy on write `YearBeginfreq` which is the offset that the Timestamps will have >>> s2 = pd.Series([1, 2, 3], index=idx) >>> s2 = s2.to_timestamp(freq=\"M\") >>> s2 2023-01-31 1 2024-01-31 2 2025-01-31 3 Freq: YE-JAN, dtype: int64",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:to_timestamp arg:self arg:freq arg:how arg:copy arguments arg arg arg arg Call If Call Raise Call Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "eye",
    "source_code": "def eye(n, M=None, k=0, dtype=float, order='C'):\n    return asmatrix(np.eye(n, M=M, k=k, dtype=dtype, order=order))",
    "docstring": "Return a matrix with ones on the diagonal and zeros elsewhere. Parameters ---------- n : int Number of rows in the output. M : int, optional Number of columns in the output, defaults to . k : int, optional Index of the diagonal: 0 refers to the main diagonal, a positive value refers to an upper diagonal, and a negative value to a lower diagonal. dtype : dtype, optional Data-type of the returned matrix. order : {'C', 'F'}, optional Whether the output should be stored in row-major (C-style) or column-major (Fortran-style) order in memory. Returns ------- I : matrix A x matrix where all elements are equal to zero, except for the -th diagonal, whose values are equal to one. See Also -------- numpy.eye : Equivalent array function. identity : Square identity matrix. Examples -------- >>> import numpy.matlib >>> np.matlib.eye(3, k=1, dtype=float) matrix([[0., 1., 0.], [0., 0., 1.], [0., 0., 0.]])",
    "type": "function",
    "file_path": "numpy\\numpy\\matlib.py",
    "ast_data": "FunctionDef name:eye arg:n arg:M arg:k arg:dtype arg:order arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_metric_objects",
    "source_code": "def _get_metric_objects(self, metrics, y_t, y_p):\n    metrics = nest.flatten(metrics)\n    return [self._get_metric_object(m, y_t, y_p) for m in metrics]",
    "docstring": "Convert user-supplied metrics to objects.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\compile_utils.py",
    "ast_data": "FunctionDef name:_get_metric_objects arg:self arg:metrics arg:y_t arg:y_p arguments arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "RunOptions",
    "source_code": "@tf_export('distribute.RunOptions')\nclass RunOptions(collections.namedtuple('RunOptions', ['experimental_enable_dynamic_batch_size', 'experimental_bucketizing_dynamic_shape', 'experimental_xla_options'])):\n\n    def __new__(cls, experimental_enable_dynamic_batch_size=True, experimental_bucketizing_dynamic_shape=False, experimental_xla_options=None):\n        return super(RunOptions, cls).__new__(cls, experimental_enable_dynamic_batch_size, experimental_bucketizing_dynamic_shape, experimental_xla_options)",
    "docstring": "Run options for . This can be used to hold some strategy specific configs. Attributes: experimental_enable_dynamic_batch_size: Boolean. Only applies to TPUStrategy. Default to True. If True, TPUStrategy will enable dynamic padder to support dynamic batch size for the inputs. Otherwise only static shape inputs are allowed. experimental_bucketizing_dynamic_shape: Boolean. Only applies to TPUStrategy. Default to False. If True, TPUStrategy will automatic bucketize inputs passed into if the input shape is dynamic. This is a performance optimization to reduce XLA recompilation, which should not have impact on correctness. experimental_xla_options: A instance. Only applies to TPUStrategy. Controls the XLA compiling options on TPUs. Default to None.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "ClassDef name:RunOptions Call FunctionDef name:__new__ arg:cls arg:experimental_enable_dynamic_batch_size arg:experimental_bucketizing_dynamic_shape arg:experimental_xla_options arguments arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "CompilerWrapper",
    "source_code": "class CompilerWrapper:\n\n    def pre_compile(self, flat_fn, flat_args: list[Tensor], aot_config: AOTConfig, *, fw_metadata: ViewAndMutationMeta) -> tuple[Callable, list[Tensor], ViewAndMutationMeta]:\n        return (flat_fn, flat_args, fw_metadata)\n\n    def post_compile(self, compiled_fn, aot_config, *, runtime_metadata) -> Callable:\n        return compiled_fn",
    "docstring": "A wrapper around the inputs and outputs to the compiler_fn. We separate these into two parts: 1. The prologue, which edits the input to the compiler_fn(flat_fn, flat_args, etc) 2. The epilogue, which edits the outputs of the compiler_fn (compiled_fn, real arguments) Each wrapper below should be implemented as a CompilerWrapper, so that we can facilitate caching on the compiled output, and re-wrapping the output via epilogues. Extra metadata that is needed to compute pre or post compile can be passed in via attributes.",
    "type": "class",
    "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\runtime_wrappers.py",
    "ast_data": "ClassDef name:CompilerWrapper FunctionDef name:pre_compile arg:self arg:flat_fn arg:flat_args arg:aot_config arguments arg arg arg arg arg Return return:yes FunctionDef name:post_compile arg:self arg:compiled_fn arg:aot_config arguments arg arg arg arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "is_marray",
    "source_code": "def is_marray(xp):\n    return 'marray' in xp.__name__",
    "docstring": "Returns True if is an MArray namespace; False otherwise.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_array_api.py",
    "ast_data": "FunctionDef name:is_marray arg:xp arguments arg Return return:yes Compare"
  },
  {
    "library": "pytorch",
    "name": "add_node",
    "source_code": "def add_node(self, n, **kwargs):\n    if n not in self._node:\n        self._node[n] = kwargs\n        self._succ[n] = {}\n        self._pred[n] = {}\n        self._node_order[n] = self._insertion_idx\n        self._insertion_idx += 1\n    else:\n        self._node[n].update(kwargs)",
    "docstring": "Add a node to the graph. Args: n: the node. Can we any object that is a valid dict key. **kwargs: any attributes you want to attach to the node.",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\_digraph.py",
    "ast_data": "FunctionDef name:add_node arg:self arg:n arguments arg arg arg If Compare Assign Assign Assign Assign Call"
  },
  {
    "library": "pytorch",
    "name": "wrapped",
    "source_code": "@functools.wraps(orig_fn)\ndef wrapped(*args, **kwargs):\n    proxy = _find_proxy(args, kwargs)\n    if proxy is not None:\n        return proxy.tracer.create_proxy('call_method', name, args, kwargs)\n    return orig_fn(*args, **kwargs)",
    "docstring": "Search the args and kwargs for a Proxy object. If there is one, emit a `` node to preserve the call to this method directly. Otherwise, just return the results of this function call, as this function is not being traced.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\_symbolic_trace.py",
    "ast_data": "FunctionDef name:wrapped arguments arg arg Assign Call If Compare Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "is_scripting",
    "source_code": "def is_scripting() -> bool:\n    return False",
    "docstring": "Function that returns True when in compilation and False otherwise. This is useful especially with the @unused decorator to leave code in your model that is not yet TorchScript compatible. .. testcode:: import torch @torch.jit.unused def unsupported_linear_op(x): return x def linear(x): if torch.jit.is_scripting(): return torch.linear(x) else: return unsupported_linear_op(x)",
    "type": "function",
    "file_path": "pytorch\\torch\\_jit_internal.py",
    "ast_data": "FunctionDef name:is_scripting arguments Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_log_ndtr_lower",
    "source_code": "def _log_ndtr_lower(x, series_order):\n    x_2 = math_ops.square(x)\n    log_scale = -0.5 * x_2 - math_ops.log(-x) - 0.5 * np.log(2.0 * np.pi)\n    return log_scale + math_ops.log(_log_ndtr_asymptotic_series(x, series_order))",
    "docstring": "Asymptotic expansion version of , appropriate for .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\special_math.py",
    "ast_data": "FunctionDef name:_log_ndtr_lower arg:x arg:series_order arguments arg arg Assign Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "isspmatrix_csr",
    "source_code": "def isspmatrix_csr(x):\n    return isinstance(x, csr_matrix)",
    "docstring": "Is of csr_matrix type? Parameters ---------- x object to check for being a csr matrix Returns ------- bool True if is a csr matrix, False otherwise Examples -------- >>> from scipy.sparse import csr_array, csr_matrix, coo_matrix, isspmatrix_csr >>> isspmatrix_csr(csr_matrix([[5]])) True >>> isspmatrix_csr(csr_array([[5]])) False >>> isspmatrix_csr(coo_matrix([[5]])) False",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\_csr.py",
    "ast_data": "FunctionDef name:isspmatrix_csr arg:x arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "is_available",
    "source_code": "@_lru_cache\ndef is_available() -> bool:\n    return torch._C._mps_is_available()",
    "docstring": "Return a bool indicating if MPS is currently available.",
    "type": "function",
    "file_path": "pytorch\\torch\\backends\\mps\\__init__.py",
    "ast_data": "FunctionDef name:is_available arguments Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "is_empty_slice",
    "source_code": "def is_empty_slice(obj) -> bool:\n    return isinstance(obj, slice) and obj.start is not None and (obj.stop is not None) and (obj.start == obj.stop)",
    "docstring": "We have an empty slice, e.g. no values are selected.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\common.py",
    "ast_data": "FunctionDef name:is_empty_slice arg:obj arguments arg Return return:yes BoolOp Call Compare Compare Compare"
  },
  {
    "library": "tensorflow",
    "name": "parents",
    "source_code": "@property\ndef parents(self):\n    return [self.categorical_column]",
    "docstring": "See 'FeatureColumn` base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:parents arg:self arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "isMaskedArray",
    "source_code": "def isMaskedArray(x):\n    return isinstance(x, MaskedArray)",
    "docstring": "Test whether input is an instance of MaskedArray. This function returns True if is an instance of MaskedArray and returns False otherwise. Any object is accepted as input. Parameters ---------- x : object Object to test. Returns ------- result : bool True if is a MaskedArray. See Also -------- isMA : Alias to isMaskedArray. isarray : Alias to isMaskedArray. Examples -------- >>> import numpy as np >>> import numpy.ma as ma >>> a = np.eye(3, 3) >>> a array([[ 1., 0., 0.], [ 0., 1., 0.], [ 0., 0., 1.]]) >>> m = ma.masked_values(a, 0) >>> m masked_array( data=[[1.0, --, --], [--, 1.0, --], [--, --, 1.0]], mask=[[False, True, True], [ True, False, True], [ True, True, False]], fill_value=0.0) >>> ma.isMaskedArray(a) False >>> ma.isMaskedArray(m) True >>> ma.isMaskedArray([0, 1, 2]) False",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:isMaskedArray arg:x arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "combine_partitions_based_on_size",
    "source_code": "def combine_partitions_based_on_size(partitions: list[Partition], available_mem_bytes: int) -> None:\n    find_combination = True\n    while find_combination:\n        sorted_partitions = sorted(partitions, key=lambda p: p.used_mem_bytes)\n        get_bfs_level_partition(self.partitions)\n        find_combination, partitions = find_partition_to_combine_based_on_size(sorted_partitions, available_mem_bytes, partitions)\n    return",
    "docstring": "Combining small partitions together to keep as less partitions as possible. Here is an example of the algorithm to do this: Assume some partitions, we first sort them based on partition used memory size. [(partition_4, 1), (partition_3, 1), (partition_2, 2), (partition_1, 7), (partition_0, 9)] The available memory is 10. step 1: self.find_partition_to_combine_based_on_size() First, mark bfs level for each partition Second, look the smallest partition, partition_4: 10 - 1 = 9 It means any partition has a used memory equal or less than 9 could combine this partition We go from the largest and selection partition_0. Check the bfs level for two partitions, if the level difference is less than 2, it can be combined. step 2: repeat step 1 until no partitions can be combined",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\accelerator_partitioner.py",
    "ast_data": "FunctionDef name:combine_partitions_based_on_size arg:partitions arg:available_mem_bytes arguments arg arg Assign While Assign Call arguments arg Call Assign Call Return return:no"
  },
  {
    "library": "pytorch",
    "name": "wait",
    "source_code": "def wait(self) -> None:\n    return",
    "docstring": "Waits on the unshard op. This ensures that the current stream can use the unsharded parameters, which are now registered to the module.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fully_shard.py",
    "ast_data": "FunctionDef name:wait arg:self arguments arg Return return:no"
  },
  {
    "library": "scipy",
    "name": "num_censored",
    "source_code": "def num_censored(self):\n    return len(self._left) + len(self._right) + len(self._interval)",
    "docstring": "Number of censored values.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_censored_data.py",
    "ast_data": "FunctionDef name:num_censored arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_eval_if_composite",
    "source_code": "def _eval_if_composite(self, tensor):\n    from tensorflow.python.keras.utils import tf_utils\n    if tf_utils.is_extension_type(tensor):\n        return self._session.run(tensor)\n    else:\n        return tensor",
    "docstring": "Helper method which evaluates any CompositeTensors passed to it.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:_eval_if_composite arg:self arg:tensor arguments arg arg If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "obrientransform",
    "source_code": "def obrientransform(*samples):\n    TINY = np.sqrt(np.finfo(float).eps)\n    arrays = []\n    sLast = None\n    for sample in samples:\n        a = np.asarray(sample)\n        n = len(a)\n        mu = np.mean(a)\n        sq = (a - mu) ** 2\n        sumsq = sq.sum()\n        t = ((n - 1.5) * n * sq - 0.5 * sumsq) / ((n - 1) * (n - 2))\n        var = sumsq / (n - 1)\n        if abs(var - np.mean(t)) > TINY:\n            raise ValueError('Lack of convergence in obrientransform.')\n        arrays.append(t)\n        sLast = a.shape\n    if sLast:\n        for arr in arrays[:-1]:\n            if sLast != arr.shape:\n                return np.array(arrays, dtype=object)\n    return np.array(arrays)",
    "docstring": "Compute the O'Brien transform on input data (any number of arrays). Used to test for homogeneity of variance prior to running one-way stats. Each array in `f_onewayscipy.stats.f_oneway` for significance, we cannot conclude that the variances are different.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_stats_py.py",
    "ast_data": "FunctionDef name:obrientransform arguments arg Assign Call Call Assign Assign For Assign Call Assign Call Assign Call Assign Assign Call Assign Assign If Compare Call Call Raise Call Call Assign If For If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "LassoBenchmark",
    "source_code": "class LassoBenchmark(Predictor, Estimator, Benchmark):\n    param_names = ['representation', 'precompute']\n    params = (['dense', 'sparse'], [True, False])\n\n    def setup_cache(self):\n        super().setup_cache()\n\n    def make_data(self, params):\n        representation, precompute = params\n        if representation == 'dense':\n            data = _synth_regression_dataset(n_samples=1000000, n_features=100)\n        else:\n            data = _synth_regression_sparse_dataset(n_samples=50000, n_features=5000, density=0.01)\n        return data\n\n    def make_estimator(self, params):\n        representation, precompute = params\n        estimator = Lasso(precompute=precompute, alpha=0.001, random_state=0)\n        return estimator\n\n    def make_scorers(self):\n        make_gen_reg_scorers(self)\n\n    def skip(self, params):\n        representation, precompute = params\n        if representation == 'sparse' and precompute is False:\n            return True\n        return False",
    "docstring": "Benchmarks for Lasso.",
    "type": "class",
    "file_path": "scikit-learn\\asv_benchmarks\\benchmarks\\linear_model.py",
    "ast_data": "ClassDef name:LassoBenchmark Assign Assign FunctionDef name:setup_cache arg:self arguments arg Call Call FunctionDef name:make_data arg:self arg:params arguments arg arg Assign If Compare Assign Call Assign Call Return return:yes FunctionDef name:make_estimator arg:self arg:params arguments arg arg Assign Assign Call Return return:yes FunctionDef name:make_scorers arg:self arguments arg Call FunctionDef name:skip arg:self arg:params arguments arg arg Assign If BoolOp Compare Compare Return return:yes Return return:yes"
  },
  {
    "library": "virtualenv",
    "name": "from_bundle",
    "source_code": "def from_bundle(distribution, version, for_py_version, search_dirs, app_data, do_periodic_update, env):\n    of_version = Version.of_version(version)\n    wheel = load_embed_wheel(app_data, distribution, for_py_version, of_version)\n    if version != Version.embed:\n        if app_data.can_update:\n            per = do_periodic_update\n            wheel = periodic_update(distribution, of_version, for_py_version, wheel, search_dirs, app_data, per, env)\n        found_wheel = from_dir(distribution, of_version, for_py_version, search_dirs)\n        if found_wheel is not None and (wheel is None or found_wheel.version_tuple > wheel.version_tuple):\n            wheel = found_wheel\n    return wheel",
    "docstring": "Load the bundled wheel to a cache directory.",
    "type": "function",
    "file_path": "virtualenv\\src\\virtualenv\\seed\\wheels\\bundle.py",
    "ast_data": "FunctionDef name:from_bundle arg:distribution arg:version arg:for_py_version arg:search_dirs arg:app_data arg:do_periodic_update arg:env arguments arg arg arg arg arg arg arg Assign Call Assign Call If Compare If Assign Assign Call Assign Call If BoolOp Compare BoolOp Compare Compare Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "log_prob",
    "source_code": "def log_prob(self, input: Tensor) -> Tensor:\n    head_output = self.head(input)\n    return self._get_full_log_prob(input, head_output)",
    "docstring": "Compute log probabilities for all :math:. Args: input (Tensor): a minibatch of examples Returns: log-probabilities of for each class :math: in range :math:, where :math: is a parameter passed to `(N, \\texttt{in\\_features})(N, \\texttt{n\\_classes})`",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\adaptive.py",
    "ast_data": "FunctionDef name:log_prob arg:self arg:input arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "Identity",
    "source_code": "class Identity(Initializer):\n\n    def __init__(self, gain=1.0):\n        self.gain = gain\n\n    def __call__(self, shape, dtype=None, **kwargs):\n        _validate_kwargs(self.__class__.__name__, kwargs, support_partition=False)\n        dtype = _assert_float_dtype(_get_dtype(dtype))\n        if len(shape) != 2:\n            raise ValueError('Identity matrix initializer can only be used for 2D matrices.')\n        initializer = linalg_ops.eye(*shape, dtype=dtype)\n        return self.gain * initializer\n\n    def get_config(self):\n        return {'gain': self.gain}",
    "docstring": "Initializer that generates the identity matrix. Also available via the shortcut function . Only usable for generating 2D matrices. Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.Identity() >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.Identity() >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) Args: gain: Multiplicative factor to apply to the identity matrix.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\initializers\\initializers_v2.py",
    "ast_data": "ClassDef name:Identity FunctionDef name:__init__ arg:self arg:gain arguments arg arg Assign FunctionDef name:__call__ arg:self arg:shape arg:dtype arguments arg arg arg arg Call Assign Call Call If Compare Call Raise Call Assign Call Return return:yes FunctionDef name:get_config arg:self arguments arg Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "_finalize_grid",
    "source_code": "def _finalize_grid(self, axlabels):\n    self.set_axis_labels(*axlabels)\n    self.tight_layout()",
    "docstring": "Finalize the annotations and layout.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\axisgrid.py",
    "ast_data": "FunctionDef name:_finalize_grid arg:self arg:axlabels arguments arg arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "EtcdStubError",
    "source_code": "class EtcdStubError(ImportError):\n\n    def __init__(self) -> None:\n        super().__init__(\"The 'etcd' module is required but not installed.\")",
    "docstring": "Custom exception to indicate that the real etcd module is required.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\_etcd_stub.py",
    "ast_data": "ClassDef name:EtcdStubError FunctionDef name:__init__ arg:self arguments arg Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X):\n    check_is_fitted(self)\n    if self.break_ties and self.decision_function_shape == 'ovo':\n        raise ValueError(\"break_ties must be False when decision_function_shape is 'ovo'\")\n    if self.break_ties and self.decision_function_shape == 'ovr' and (len(self.classes_) > 2):\n        y = np.argmax(self.decision_function(X), axis=1)\n    else:\n        y = super().predict(X)\n    return self.classes_.take(np.asarray(y, dtype=np.intp))",
    "docstring": "Perform classification on samples in X. For an one-class model, +1 or -1 is returned. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples_test, n_samples_train) For kernel=\"precomputed\", the expected shape of X is (n_samples_test, n_samples_train). Returns ------- y_pred : ndarray of shape (n_samples,) Class labels for samples in X.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\svm\\_base.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Call If BoolOp Compare Raise Call If BoolOp Compare Compare Call Assign Call Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, initial_learning_rate, first_decay_steps, t_mul=2.0, m_mul=1.0, alpha=0.0, name=None):\n    super(CosineDecayRestarts, self).__init__()\n    self.initial_learning_rate = initial_learning_rate\n    self.first_decay_steps = first_decay_steps\n    self._t_mul = t_mul\n    self._m_mul = m_mul\n    self.alpha = alpha\n    self.name = name",
    "docstring": "Applies cosine decay with restarts to the learning rate. Args: initial_learning_rate: A scalar or Tensor or a Python number. The initial learning rate. first_decay_steps: A scalar or or a Python number. Number of steps to decay over. t_mul: A scalar or or a Python number. Used to derive the number of iterations in the i-th period m_mul: A scalar or or a Python number. Used to derive the initial learning rate of the i-th period: alpha: A scalar or Tensor or a Python number. Minimum learning rate value as a fraction of the initial_learning_rate. name: String. Optional name of the operation. Defaults to 'SGDRDecay'.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\learning_rate_schedule.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:initial_learning_rate arg:first_decay_steps arg:t_mul arg:m_mul arg:alpha arg:name arguments arg arg arg arg arg arg arg Call Call Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "cherrypy",
    "name": "messageArg",
    "source_code": "@cherrypy.expose\ndef messageArg(self):\n    message = \"If you construct an HTTPError with a 'message' argument, it wil be placed on the error page (underneath the status line by default).\"\n    raise cherrypy.HTTPError(500, message=message)",
    "docstring": "Respond with an HTTP 500 and a custom message.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\tutorial\\tut10_http_errors.py",
    "ast_data": "FunctionDef name:messageArg arg:self arguments arg Assign Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "__sklearn_tags__",
    "source_code": "def __sklearn_tags__(self):\n    tags = super().__sklearn_tags__()\n    tags.input_tags.pairwise = get_tags(self.estimator).input_tags.pairwise\n    tags.input_tags.sparse = get_tags(self.estimator).input_tags.sparse\n    return tags",
    "docstring": "Indicate if wrapped estimator is using a precomputed Gram matrix",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\multiclass.py",
    "ast_data": "FunctionDef name:__sklearn_tags__ arg:self arguments arg Assign Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_remove_optimizer_parameters",
    "source_code": "def _remove_optimizer_parameters(kwds):\n    kwds.pop('loc', None)\n    kwds.pop('scale', None)\n    kwds.pop('optimizer', None)\n    kwds.pop('method', None)\n    if kwds:\n        raise TypeError(f'Unknown arguments: {kwds}.')",
    "docstring": "Remove the optimizer-related keyword arguments 'loc', 'scale' and 'optimizer' from . Then check that is empty, and raise if it is not. This function is used in the fit method of distributions that override the default method and do not use the default optimization code. is modified in-place.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_continuous_distns.py",
    "ast_data": "FunctionDef name:_remove_optimizer_parameters arg:kwds arguments arg Call Call Call Call If Raise Call"
  },
  {
    "library": "cryptography",
    "name": "encode_private",
    "source_code": "def encode_private(self, private_key: ed25519.Ed25519PrivateKey, f_priv: _FragList) -> None:\n    public_key = private_key.public_key()\n    raw_private_key = private_key.private_bytes(Encoding.Raw, PrivateFormat.Raw, NoEncryption())\n    raw_public_key = public_key.public_bytes(Encoding.Raw, PublicFormat.Raw)\n    f_keypair = _FragList([raw_private_key, raw_public_key])\n    self.encode_public(public_key, f_priv)\n    f_priv.put_sshstr(f_keypair)",
    "docstring": "Write Ed25519 private key",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\serialization\\ssh.py",
    "ast_data": "FunctionDef name:encode_private arg:self arg:private_key arg:f_priv arguments arg arg arg Assign Call Assign Call Call Assign Call Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_summary_op",
    "source_code": "def _get_summary_op(self):\n    summary_op = None\n    if self._summary_op is not None:\n        summary_op = self._summary_op\n    elif self._scaffold.summary_op is not None:\n        summary_op = self._scaffold.summary_op\n    if summary_op is None:\n        return None\n    if not isinstance(summary_op, list):\n        return [summary_op]\n    return summary_op",
    "docstring": "Fetches the summary op either from self._summary_op or self._scaffold. Returns: Returns a list of summary .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\basic_session_run_hooks.py",
    "ast_data": "FunctionDef name:_get_summary_op arg:self arguments arg Assign If Compare Assign If Compare Assign If Compare Return return:no If Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "LSTMStateTuple",
    "source_code": "@tf_export(v1=['nn.rnn_cell.LSTMStateTuple'])\nclass LSTMStateTuple(_LSTMStateTuple):\n    __slots__ = ()\n\n    @property\n    def dtype(self):\n        c, h = self\n        if c.dtype != h.dtype:\n            raise TypeError('Inconsistent internal state: %s vs %s' % (str(c.dtype), str(h.dtype)))\n        return c.dtype",
    "docstring": "Tuple used by LSTM Cells for , , and output state. Stores two elements: , in that order. Where is the hidden state and is the output. Only used when .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\legacy_rnn\\rnn_cell_impl.py",
    "ast_data": "ClassDef name:LSTMStateTuple Assign FunctionDef name:dtype arg:self arguments arg Assign If Compare Raise Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "steps",
    "source_code": "def steps(self):\n    self._current_step = 0\n    while self._inferred_steps is None or self._current_step < self._inferred_steps:\n        if self._insufficient_data:\n            break\n        can_run_full_execution = self._steps_per_execution_value == 1 or self._inferred_steps is None or self._inferred_steps - self._current_step >= self._steps_per_execution_value\n        if can_run_full_execution:\n            self._step_increment = self._steps_per_execution_value - 1\n            yield self._current_step\n            self._current_step += self._steps_per_execution_value\n        else:\n            steps_remaining = self._inferred_steps - self._current_step\n            self._steps_per_execution.assign(steps_remaining)\n            self._step_increment = steps_remaining - 1\n            yield self._current_step\n            self._current_step += steps_remaining\n            self._steps_per_execution.assign(self._steps_per_execution_value)",
    "docstring": "Yields steps for the current epoch.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\data_adapter.py",
    "ast_data": "FunctionDef name:steps arg:self arguments arg Assign While BoolOp Compare Compare If Assign BoolOp Compare Compare Compare If Assign Assign Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "verbose",
    "source_code": "class verbose:\n\n    def __init__(self, enable):\n        self.enable = enable\n\n    def __enter__(self):\n        if self.enable == VERBOSE_OFF:\n            return\n        st = torch._C._verbose.mkl_set_verbose(self.enable)\n        assert st, 'Failed to set MKL into verbose mode. Please consider to disable this verbose scope.'\n        return self\n\n    def __exit__(self, exc_type, exc_val, exc_tb):\n        torch._C._verbose.mkl_set_verbose(VERBOSE_OFF)\n        return False",
    "docstring": "On-demand oneMKL verbosing functionality. To make it easier to debug performance issues, oneMKL can dump verbose messages containing execution information like duration while executing the kernel. The verbosing functionality can be invoked via an environment variable named . However, this methodology dumps messages in all steps. Those are a large amount of verbose messages. Moreover, for investigating the performance issues, generally taking verbose messages for one single iteration is enough. This on-demand verbosing functionality makes it possible to control scope for verbose message dumping. In the following example, verbose messages will be dumped out for the second inference only. .. highlight:: python .. code-block:: python import torch model(data) with torch.backends.mkl.verbose(torch.backends.mkl.VERBOSE_ON): model(data) Args: level: Verbose level - ``: Enable verbosing",
    "type": "class",
    "file_path": "pytorch\\torch\\backends\\mkl\\__init__.py",
    "ast_data": "ClassDef name:verbose FunctionDef name:__init__ arg:self arg:enable arguments arg arg Assign FunctionDef name:__enter__ arg:self arguments arg If Compare Return return:no Assign Call Return return:yes FunctionDef name:__exit__ arg:self arg:exc_type arg:exc_val arg:exc_tb arguments arg arg arg arg Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_has_precomputed_row_splits",
    "source_code": "def _has_precomputed_row_splits(self):\n    return self._row_splits is not None",
    "docstring": "Returns true if has already been computed. If true, then will return its value without calling any TensorFlow ops.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py",
    "ast_data": "FunctionDef name:_has_precomputed_row_splits arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "pytorch",
    "name": "AllOrAnyReductionTypePromotionRule",
    "source_code": "class AllOrAnyReductionTypePromotionRule(ReductionTypePromotionRule):\n\n    def __init__(self, op_name: str):\n        super().__init__('aten', op_name, _prims_common.REDUCTION_OUTPUT_TYPE_KIND.ALWAYS_BOOL)\n\n    def preview_type_promotion(self, args: tuple, kwargs: dict) -> TypePromotionSnapshot:\n        assert len(args) >= 1, f'Reduction op torch.ops.{self.namespace}.{self.op_name} expects at least one argument'\n        arg = args[0]\n        assert isinstance(arg, torch.Tensor), f'type(arg)={type(arg)!r} is not torch.Tensor'\n        computation_dtype = torch.bool\n        result_dtype = torch.uint8 if arg.dtype == torch.uint8 else torch.bool\n        return TypePromotionSnapshot({0: computation_dtype}, {}, result_dtype)",
    "docstring": "Reference type promotion rule from torch.ops.aten.all or torch.ops.aten.any. This is a special case where computation dtype is always torch.bool. The result dtype is always uint8 if kwarg is uint8, otherwise torch.bool.",
    "type": "class",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\type_promotion.py",
    "ast_data": "ClassDef name:AllOrAnyReductionTypePromotionRule FunctionDef name:__init__ arg:self arg:op_name arguments arg arg Call Call FunctionDef name:preview_type_promotion arg:self arg:args arg:kwargs arguments arg arg arg Compare Call Assign Call Call Assign Assign Compare Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_PrefetchDataset",
    "source_code": "class _PrefetchDataset(dataset_ops.UnaryUnchangedStructureDataset):\n\n    def __init__(self, input_dataset, buffer_size, slack_period=None, name=None):\n        self._input_dataset = input_dataset\n        if buffer_size is None:\n            buffer_size = dataset_ops.AUTOTUNE\n        self._buffer_size = ops.convert_to_tensor(buffer_size, dtype=dtypes.int64, name='buffer_size')\n        self._name = name\n        with ops.colocate_with(input_dataset._variant_tensor):\n            variant_tensor = gen_dataset_ops.prefetch_dataset(input_dataset._variant_tensor, buffer_size=self._buffer_size, slack_period=slack_period, legacy_autotune=buffer_size == dataset_ops.AUTOTUNE, **self._common_args)\n        super().__init__(input_dataset, variant_tensor)",
    "docstring": "A that asynchronously prefetches its input.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\prefetch_op.py",
    "ast_data": "ClassDef name:_PrefetchDataset FunctionDef name:__init__ arg:self arg:input_dataset arg:buffer_size arg:slack_period arg:name arguments arg arg arg arg arg Assign If Compare Assign Assign Call Assign With Call Assign Call Compare Call Call"
  },
  {
    "library": "tensorflow",
    "name": "contextmanager",
    "source_code": "def contextmanager(target: Callable[..., Iterator[_T]]) -> Callable[..., ContextManager[_T]]:\n    context_manager = _contextlib.contextmanager(target)\n    return tf_decorator.make_decorator(target, context_manager, 'contextmanager')",
    "docstring": "A tf_decorator-aware wrapper for . Usage is identical to . Args: target: A callable to be wrapped in a contextmanager. Returns: A callable that can be used inside of a statement.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\tf_contextlib.py",
    "ast_data": "FunctionDef name:contextmanager arg:target arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "random_state",
    "source_code": "def random_state(state: RandomState | None=None):\n    if is_integer(state) or isinstance(state, (np.ndarray, np.random.BitGenerator)):\n        return np.random.RandomState(state)\n    elif isinstance(state, np.random.RandomState):\n        return state\n    elif isinstance(state, np.random.Generator):\n        return state\n    elif state is None:\n        return np.random\n    else:\n        raise ValueError('random_state must be an integer, array-like, a BitGenerator, Generator, a numpy RandomState, or None')",
    "docstring": "Helper function for processing random_state arguments. Parameters ---------- state : int, array-like, BitGenerator, Generator, np.random.RandomState, None. If receives an int, array-like, or BitGenerator, passes to np.random.RandomState() as seed. If receives an np.random RandomState or Generator, just returns that unchanged. If receives , returns np.random. If receives anything else, raises an informative ValueError. Default None. Returns ------- np.random.RandomState or np.random.Generator. If state is None, returns np.random",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\common.py",
    "ast_data": "FunctionDef name:random_state arg:state arguments arg If BoolOp Call Call Return return:yes Call If Call Return return:yes If Call Return return:yes If Compare Return return:yes Raise Call"
  },
  {
    "library": "pytorch",
    "name": "forward",
    "source_code": "@abstractmethod\ndef forward(self, input: torch.Tensor) -> torch.Tensor:\n    pass",
    "docstring": "forward function should take the input tensor and updates internal stats and return the original input Tensor",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\observer.py",
    "ast_data": "FunctionDef name:forward arg:self arg:input arguments arg arg"
  },
  {
    "library": "scipy",
    "name": "_angular_rate_to_rotvec_dot_matrix",
    "source_code": "def _angular_rate_to_rotvec_dot_matrix(rotvecs):\n    norm = np.linalg.norm(rotvecs, axis=1)\n    k = np.empty_like(norm)\n    mask = norm > 0.0001\n    nm = norm[mask]\n    k[mask] = (1 - 0.5 * nm / np.tan(0.5 * nm)) / nm ** 2\n    mask = ~mask\n    nm = norm[mask]\n    k[mask] = 1 / 12 + 1 / 720 * nm ** 2\n    skew = _create_skew_matrix(rotvecs)\n    result = np.empty((len(rotvecs), 3, 3))\n    result[:] = np.identity(3)\n    result[:] += 0.5 * skew\n    result[:] += k[:, None, None] * np.matmul(skew, skew)\n    return result",
    "docstring": "Compute matrices to transform angular rates to rot. vector derivatives. The matrices depend on the current attitude represented as a rotation vector. Parameters ---------- rotvecs : ndarray, shape (n, 3) Set of rotation vectors. Returns ------- ndarray, shape (n, 3, 3)",
    "type": "function",
    "file_path": "scipy\\scipy\\spatial\\transform\\_rotation_spline.py",
    "ast_data": "FunctionDef name:_angular_rate_to_rotvec_dot_matrix arg:rotvecs arguments arg Assign Call Assign Call Assign Compare Assign Assign Call Assign Assign Assign Assign Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "impulse",
    "source_code": "def impulse(self, x0=None, t=None, n=None):\n    return dimpulse(self, x0=x0, t=t, n=n)",
    "docstring": "Return the impulse response of the discrete-time system. See for details.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:impulse arg:self arg:x0 arg:t arg:n arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "save_to_file",
    "source_code": "def save_to_file(json_data, filename):\n    if filename[-5:] != '.json':\n        print('filename: %s' % filename)\n        filename += '.json'\n    with open(PATH_TO_DIR + '/' + filename, 'w') as f:\n        json.dump(json_data, f, sort_keys=True, indent=4)\n    print(' Successfully wrote configs to file `%s`.\\n' % filename)",
    "docstring": "Saves all detected configuration(s) into a JSON file. Args: json_data: Dict of all configurations found. filename: String that is the name of the output JSON file.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\tensorflow_builder\\config_detector\\config_detector.py",
    "ast_data": "FunctionDef name:save_to_file arg:json_data arg:filename arguments arg arg If Compare Call With Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "total_concentration",
    "source_code": "@property\ndef total_concentration(self):\n    return self._total_concentration",
    "docstring": "Sum of concentration parameters.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\beta.py",
    "ast_data": "FunctionDef name:total_concentration arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "_get_level",
    "source_code": "def _get_level(self):\n    if not hasattr(self, '_level'):\n        self._level = getattr(settings, 'MESSAGE_LEVEL', constants.INFO)\n    return self._level",
    "docstring": "Return the minimum recorded level. The default level is the `` level is used.",
    "type": "method",
    "file_path": "django\\django\\contrib\\messages\\storage\\base.py",
    "ast_data": "FunctionDef name:_get_level arg:self arguments arg If Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "extra_forms",
    "source_code": "@property\ndef extra_forms(self):\n    return self.forms[self.initial_form_count():]",
    "docstring": "Return a list of all the extra forms in this formset.",
    "type": "method",
    "file_path": "django\\django\\forms\\formsets.py",
    "ast_data": "FunctionDef name:extra_forms arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_date_list",
    "source_code": "def get_date_list(self, queryset, date_type=None, ordering='ASC'):\n    date_field = self.get_date_field()\n    allow_empty = self.get_allow_empty()\n    if date_type is None:\n        date_type = self.get_date_list_period()\n    if self.uses_datetime_field:\n        date_list = queryset.datetimes(date_field, date_type, ordering)\n    else:\n        date_list = queryset.dates(date_field, date_type, ordering)\n    if date_list is not None and (not date_list) and (not allow_empty):\n        raise Http404(_('No %(verbose_name_plural)s available') % {'verbose_name_plural': queryset.model._meta.verbose_name_plural})\n    return date_list",
    "docstring": "Get a date list by calling , checking along the way for empty lists that aren't allowed.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\dates.py",
    "ast_data": "FunctionDef name:get_date_list arg:self arg:queryset arg:date_type arg:ordering arguments arg arg arg arg Assign Call Assign Call If Compare Assign Call If Assign Call Assign Call If BoolOp Compare Raise Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "expm_frechet_block_enlarge",
    "source_code": "def expm_frechet_block_enlarge(A, E):\n    n = A.shape[0]\n    M = np.vstack([np.hstack([A, E]), np.hstack([np.zeros_like(A), A])])\n    expm_M = scipy.linalg.expm(M)\n    return (expm_M[:n, :n], expm_M[:n, n:])",
    "docstring": "This is a helper function, mostly for testing and profiling. Return expm(A), frechet(A, E)",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_expm_frechet.py",
    "ast_data": "FunctionDef name:expm_frechet_block_enlarge arg:A arg:E arguments arg arg Assign Assign Call Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "remove_tool",
    "source_code": "def remove_tool(self, name):\n    tool = self.get_tool(name)\n    if getattr(tool, 'toggled', False):\n        self.trigger_tool(tool, 'toolmanager')\n    self._remove_keys(name)\n    event = ToolEvent('tool_removed_event', self, tool)\n    self._callbacks.process(event.name, event)\n    del self._tools[name]",
    "docstring": "Remove tool named *name*. Parameters ---------- name : str Name of the tool.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_managers.py",
    "ast_data": "FunctionDef name:remove_tool arg:self arg:name arguments arg arg Assign Call If Call Call Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "assign_add",
    "source_code": "def assign_add(self, delta, use_locking=False, name=None, read_value=True):\n    assign = state_ops.assign_add(self._variable, delta, use_locking=use_locking, name=name)\n    if read_value:\n        return assign\n    return assign.op",
    "docstring": "Adds a value to this variable. This is essentially a shortcut for . Args: delta: A . The value to add to this variable. use_locking: If , use locking during the operation. name: The name of the operation to be created read_value: if True, will return something which evaluates to the new value of the variable; if False will return the assign op. Returns: A that will hold the new value of this variable after the addition has completed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py",
    "ast_data": "FunctionDef name:assign_add arg:self arg:delta arg:use_locking arg:name arg:read_value arguments arg arg arg arg arg Assign Call If Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_cell",
    "source_code": "def get_cell(self, *labels):\n    return SamplerCell(super(Sampler, self).get_cell(*labels))",
    "docstring": "Retrieves the cell.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py",
    "ast_data": "FunctionDef name:get_cell arg:self arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "advise",
    "source_code": "def advise(self, options):\n    advise_pb = tfprof_output_pb2.AdviceProto()\n    opts = _build_advisor_options(options)\n    advise_pb.ParseFromString(print_mdl.Profile('advise'.encode('utf-8'), opts.SerializeToString()))\n    return advise_pb",
    "docstring": "Automatically detect problems and generate reports. Args: options: A dict of options. See ALL_ADVICE example above. Returns: An Advise proto that contains the reports from all checkers.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\model_analyzer.py",
    "ast_data": "FunctionDef name:advise arg:self arg:options arguments arg arg Assign Call Assign Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_standardize_args",
    "source_code": "def _standardize_args(inputs, initial_state, constants, num_constants):\n    if isinstance(inputs, list):\n        assert initial_state is None and constants is None\n        if num_constants:\n            constants = inputs[-num_constants:]\n            inputs = inputs[:-num_constants]\n        if len(inputs) > 1:\n            initial_state = inputs[1:]\n            inputs = inputs[:1]\n        if len(inputs) > 1:\n            inputs = tuple(inputs)\n        else:\n            inputs = inputs[0]\n\n    def to_list_or_none(x):\n        if x is None or isinstance(x, list):\n            return x\n        if isinstance(x, tuple):\n            return list(x)\n        return [x]\n    initial_state = to_list_or_none(initial_state)\n    constants = to_list_or_none(constants)\n    return (inputs, initial_state, constants)",
    "docstring": "Standardizes to a single list of tensor inputs. When running a model loaded from a file, the input tensors and can be passed to as part of instead of by the dedicated keyword arguments. This method makes sure the arguments are separated and that and are lists of tensors (or None). Args: inputs: Tensor or list/tuple of tensors. which may include constants and initial states. In that case must be specified. initial_state: Tensor or list of tensors or None, initial states. constants: Tensor or list of tensors or None, constant tensors. num_constants: Expected number of constants (if constants are passed as part of the list. Returns: inputs: Single tensor or tuple of tensors. initial_state: List of tensors or None. constants: List of tensors or None.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\recurrent.py",
    "ast_data": "FunctionDef name:_standardize_args arg:inputs arg:initial_state arg:constants arg:num_constants arguments arg arg arg arg If Call BoolOp Compare Compare If Assign Assign If Compare Call Assign Assign If Compare Call Assign Call Assign FunctionDef name:to_list_or_none arg:x arguments arg If BoolOp Compare Call Return return:yes If Call Return return:yes Call Return return:yes Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "predict_proba",
    "source_code": "def predict_proba(self, X):\n    check_is_fitted(self)\n    mean_proba = np.zeros((_num_samples(X), len(self.classes_)))\n    for calibrated_classifier in self.calibrated_classifiers_:\n        proba = calibrated_classifier.predict_proba(X)\n        mean_proba += proba\n    mean_proba /= len(self.calibrated_classifiers_)\n    return mean_proba",
    "docstring": "Calibrated probabilities of classification. This function returns calibrated probabilities of classification according to each class on an array of test vectors X. Parameters ---------- X : array-like of shape (n_samples, n_features) The samples, as accepted by . Returns ------- C : ndarray of shape (n_samples, n_classes) The predicted probas.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\calibration.py",
    "ast_data": "FunctionDef name:predict_proba arg:self arg:X arguments arg arg Call Assign Call Call Call For Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "OptimizerSingleTensorPattern",
    "source_code": "class OptimizerSingleTensorPattern(Pattern):\n\n    def __init__(self, prof: profile, should_benchmark: bool=False):\n        super().__init__(prof, should_benchmark)\n        self.name = 'Optimizer Single Tensor Pattern'\n        self.optimizers_with_foreach = ['adam', 'sgd', 'adamw']\n        self.description = \"Deteced optimizer running with single tensor implementation. Please enable multi tensor implementation by passing 'foreach=True' into optimizer.\"\n        self.url = ''\n\n    def match(self, event: _ProfilerEvent):\n        for optimizer in self.optimizers_with_foreach:\n            if event.name.endswith(f'_single_tensor_{optimizer}'):\n                return True\n        return False",
    "docstring": "This pattern identifies if we are using the single-tensor version of an optimizer. example: optimizer = torch.optim.SGD(model.parameters(), lr=0.1) By adding foreach=True to enable multi-tensor optimizer, we can gain speedup when the kernels are relatively small. Pattern: XXXXX: _single_tenser_ Algorithm: String match",
    "type": "class",
    "file_path": "pytorch\\torch\\profiler\\_pattern_matcher.py",
    "ast_data": "ClassDef name:OptimizerSingleTensorPattern FunctionDef name:__init__ arg:self arg:prof arg:should_benchmark arguments arg arg arg Call Call Assign Assign Assign Assign FunctionDef name:match arg:self arg:event arguments arg arg For If Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "step_function",
    "source_code": "def step_function(model, iterator):\n\n    def run_step(data):\n        outputs = model.train_step(data)\n        with ops.control_dependencies(_minimum_control_deps(outputs)):\n            model._train_counter.assign_add(1)\n        return outputs\n    data = next(iterator)\n    outputs = model.distribute_strategy.run(run_step, args=(data,))\n    outputs = reduce_per_replica(outputs, self.distribute_strategy, reduction='first')\n    write_scalar_summaries(outputs, step=model._train_counter)\n    return outputs",
    "docstring": "Runs a single training step.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py",
    "ast_data": "FunctionDef name:step_function arg:model arg:iterator arguments arg arg FunctionDef name:run_step arg:data arguments arg Assign Call With Call Call Call Return return:yes Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_setup_libdevice_routing",
    "source_code": "@classmethod\n@functools.lru_cache(None)\ndef _setup_libdevice_routing(cls):\n    from torch._inductor.codegen.common import OpDecompositions\n    for fn_name in torch._inductor.utils.op_requires_libdevice_fp64:\n        assert hasattr(cls, fn_name)\n        original_impl = getattr(cls, fn_name)\n\n        def decomposition_router(x, _original_impl, _fn_name):\n            if x.dtype != torch.float64:\n                return _original_impl(x)\n            else:\n                return getattr(OpDecompositions, _fn_name)(x).value\n        if fn_name == 'sigmoid':\n            assert hasattr(OpDecompositions, 'sigmoid')\n            fn = functools.partial(decomposition_router, _original_impl=original_impl, _fn_name=fn_name)\n            fn.__name__ = fn_name\n            setattr(cls, fn_name, staticmethod(fn))\n            continue\n\n        def dtype_router(x, _original_impl, _fn_name):\n            if x.dtype == torch.float64:\n                return f'libdevice.{_fn_name}({x})'\n            else:\n                return _original_impl(x)\n        fn = functools.partial(dtype_router, _original_impl=original_impl, _fn_name=fn_name)\n        fn.__name__ = fn_name\n        setattr(cls, fn_name, staticmethod(fn))",
    "docstring": "Set up routing to libdevice implementations for fp64 inputs.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py",
    "ast_data": "FunctionDef name:_setup_libdevice_routing arg:cls arguments arg For Call Assign Call FunctionDef name:decomposition_router arg:x arg:_original_impl arg:_fn_name arguments arg arg arg If Compare Return return:yes Call Return return:yes Call Call If Compare Call Assign Call Assign Call Call FunctionDef name:dtype_router arg:x arg:_original_impl arg:_fn_name arguments arg arg arg If Compare Return return:yes Return return:yes Call Assign Call Assign Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, widthA=1.0, angleA=0, widthB=1.0, angleB=0):\n    super().__init__(widthA=widthA, lengthA=0, angleA=angleA, widthB=widthB, lengthB=0, angleB=angleB)",
    "docstring": "Parameters ---------- widthA, widthB : float, default: 1.0 Width of the bracket. angleA, angleB : float, default: 0 degrees Orientation of the bracket, as a counterclockwise angle. 0 degrees means perpendicular to the line.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:widthA arg:angleA arg:widthB arg:angleB arguments arg arg arg arg arg Call Call"
  },
  {
    "library": "django",
    "name": "ImageFileDescriptor",
    "source_code": "class ImageFileDescriptor(FileDescriptor):\n\n    def __set__(self, instance, value):\n        previous_file = instance.__dict__.get(self.field.attname)\n        super().__set__(instance, value)\n        if previous_file is not None:\n            self.field.update_dimension_fields(instance, force=True)",
    "docstring": "Just like the FileDescriptor, but for ImageFields. The only difference is assigning the width/height to the width_field/height_field, if appropriate.",
    "type": "class",
    "file_path": "django\\django\\db\\models\\fields\\files.py",
    "ast_data": "ClassDef name:ImageFileDescriptor FunctionDef name:__set__ arg:self arg:instance arg:value arguments arg arg arg Assign Call Call Call If Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "replica_id_in_sync_group",
    "source_code": "@property\ndef replica_id_in_sync_group(self):\n    return self._replica_id_in_sync_group",
    "docstring": "Returns the replica ID.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:replica_id_in_sync_group arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_extract_name_arg",
    "source_code": "def _extract_name_arg(args, kwargs, name_index):\n    if name_index < 0:\n        name_value = None\n    elif name_index < len(args):\n        name_value = args[name_index]\n        args = args[:name_index] + args[name_index + 1:]\n    else:\n        name_value = kwargs.pop('name', None)\n    return (args, kwargs, name_value)",
    "docstring": "Extracts the parameter and returns .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\dispatch.py",
    "ast_data": "FunctionDef name:_extract_name_arg arg:args arg:kwargs arg:name_index arguments arg arg arg If Compare Assign If Compare Call Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "reshow",
    "source_code": "def reshow(self):\n    self._shown = False\n    self.show()",
    "docstring": "A special method to re-show the figure in the notebook.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_nbagg.py",
    "ast_data": "FunctionDef name:reshow arg:self arguments arg Assign Call"
  },
  {
    "library": "numpy",
    "name": "index",
    "source_code": "@set_module('numpy.strings')\ndef index(a, sub, start=0, end=None):\n    end = end if end is not None else MAX\n    return _index_ufunc(a, sub, start, end)",
    "docstring": "Like , but raises :exc: when the substring is not found. Parameters ---------- a : array-like, with `` dtype start, end : array_like, with any integer dtype, optional Returns ------- out : ndarray Output array of ints. See Also -------- find, str.index Examples -------- >>> import numpy as np >>> a = np.array([\"Computer Science\"]) >>> np.strings.index(a, \"Science\", start=0, end=None) array([9])",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\strings.py",
    "ast_data": "FunctionDef name:index arg:a arg:sub arg:start arg:end arguments arg arg arg arg Assign Compare Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "to",
    "source_code": "def to(self, device: Device=None, dtype: Dtype=None) -> Image:\n    if device is not None and isinstance(device, torch.dtype):\n        dtype, device = (device, None)\n    self._data = self.data.to(device, dtype)\n    return self",
    "docstring": "Move the image to the given device and dtype. Args: device: the device to move the image to. dtype: the data type to cast the image to. Returns: Image: the image moved to the given device and dtype.",
    "type": "method",
    "file_path": "kornia\\kornia\\image\\image.py",
    "ast_data": "FunctionDef name:to arg:self arg:device arg:dtype arguments arg arg arg If BoolOp Compare Call Assign Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "aprefetch_related_objects",
    "source_code": "async def aprefetch_related_objects(model_instances, *related_lookups):\n    return await sync_to_async(prefetch_related_objects)(model_instances, *related_lookups)",
    "docstring": "See prefetch_related_objects().",
    "type": "function",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "AsyncFunctionDef name:aprefetch_related_objects arg:model_instances arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "KeyNetAffNetHardNet",
    "source_code": "class KeyNetAffNetHardNet(LocalFeature):\n\n    def __init__(self, num_features: int=8000, upright: bool=False, device: Optional[Device]=None, scale_laf: float=1.0) -> None:\n        if device is None:\n            device = torch.device('cpu')\n        ori_module = PassLAF() if upright else LAFOrienter(angle_detector=OriNet(True))\n        detector = KeyNetDetector(True, num_features=num_features, ori_module=ori_module, aff_module=LAFAffNetShapeEstimator(True).eval()).to(device)\n        descriptor = LAFDescriptor(None, patch_size=32, grayscale_descriptor=True).to(device)\n        super().__init__(detector, descriptor, scale_laf)",
    "docstring": "Convenience module, which implements KeyNet detector + AffNet + HardNet descriptor. .. image:: _static/img/keynet_affnet.jpg",
    "type": "class",
    "file_path": "kornia\\kornia\\feature\\integrated.py",
    "ast_data": "ClassDef name:KeyNetAffNetHardNet FunctionDef name:__init__ arg:self arg:num_features arg:upright arg:device arg:scale_laf arguments arg arg arg arg arg If Compare Assign Call Assign Call Call Call Assign Call Call Call Call Assign Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "fully_containsy",
    "source_code": "def fully_containsy(self, y):\n    y0, y1 = self.intervaly\n    return y0 < y < y1 or y0 > y > y1",
    "docstring": "Return whether *y* is in the open (:attr:, :attr:) interval.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:fully_containsy arg:self arg:y arguments arg arg Assign Return return:yes BoolOp Compare Compare"
  },
  {
    "library": "pytorch",
    "name": "rrelu",
    "source_code": "def rrelu(input: Tensor, lower: float=1.0 / 8, upper: float=1.0 / 3, training: bool=False, inplace: bool=False) -> Tensor:\n    if has_torch_function_unary(input):\n        return handle_torch_function(rrelu, (input,), input, lower=lower, upper=upper, training=training, inplace=inplace)\n    if inplace:\n        result = torch.rrelu_(input, lower, upper, training)\n    else:\n        result = torch.rrelu(input, lower, upper, training)\n    return result",
    "docstring": "rrelu(input, lower=1./8, upper=1./3, training=False, inplace=False) -> Tensor Randomized leaky ReLU. See :class: for more details.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:rrelu arg:input arg:lower arg:upper arg:training arg:inplace arguments arg arg arg arg arg If Call Return return:yes Call If Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y, sample_weight=None, check_input=True):\n    super()._fit(X, y, sample_weight=sample_weight, check_input=check_input)\n    return self",
    "docstring": "Build a decision tree regressor from the training set (X, y). Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The training input samples. Internally, it will be converted to `` for maximum efficiency. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If None, then samples are equally weighted. Splits that would create child nodes with net zero or negative weight are ignored while searching for a split in each node. check_input : bool, default=True Allow to bypass several input checking. Don't use this parameter unless you know what you're doing. Returns ------- self : DecisionTreeRegressor Fitted estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\tree\\_classes.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arg:sample_weight arg:check_input arguments arg arg arg arg arg Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "lp_pool1d",
    "source_code": "def lp_pool1d(input: Tensor, norm_type: Union[int, float], kernel_size: int, stride: Optional[BroadcastingList1[int]]=None, ceil_mode: bool=False) -> Tensor:\n    if has_torch_function_unary(input):\n        return handle_torch_function(lp_pool1d, (input,), input, norm_type, kernel_size, stride=stride, ceil_mode=ceil_mode)\n    if stride is not None:\n        out = avg_pool1d(input.pow(norm_type), kernel_size, stride, 0, ceil_mode)\n    else:\n        out = avg_pool1d(input.pow(norm_type), kernel_size, padding=0, ceil_mode=ceil_mode)\n    return (torch.sign(out) * relu(torch.abs(out))).mul(kernel_size).pow(1.0 / norm_type)",
    "docstring": "Apply a 1D power-average pooling over an input signal composed of several input planes. If the sum of all inputs to the power of is zero, the gradient is set to zero as well. See :class: for details.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:lp_pool1d arg:input arg:norm_type arg:kernel_size arg:stride arg:ceil_mode arguments arg arg arg arg arg If Call Return return:yes Call If Compare Assign Call Call Assign Call Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "multiply",
    "source_code": "@tf_export('math.multiply', 'multiply')\n@dispatch.register_binary_elementwise_api\n@dispatch.add_dispatch_support\ndef multiply(x, y, name=None):\n    return gen_math_ops.mul(x, y, name)",
    "docstring": "Returns an element-wise x * y. For example: >>> x = tf.constant(([1, 2, 3, 4])) >>> tf.math.multiply(x, x) Since will convert its arguments to s, you can also pass in non- arguments: >>> tf.math.multiply(7,6) If is not the same as , they will be broadcast to a compatible shape. (More about broadcasting [here]( For example: >>> x = tf.ones([1, 2]); >>> y = tf.ones([2, 1]); >>> x * y # Taking advantage of operator overriding The reduction version of this elementwise operation is Args: x: A Tensor. Must be one of the following types: , , , , , , , , , , , . y: A . Must have the same type as . name: A name for the operation (optional). Returns: A . Has the same type as . Raises: * InvalidArgumentError: When and have incompatible shapes or types.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:multiply arg:x arg:y arg:name arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "read_metadata",
    "source_code": "def read_metadata(self) -> Metadata:\n    return Metadata(state_dict_metadata={})",
    "docstring": "Extends the default StorageReader to support building the metadata file",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\format_utils.py",
    "ast_data": "FunctionDef name:read_metadata arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "__init__",
    "source_code": "def __init__(self, msg: str, *args: Any, **kwargs: Any) -> None:\n    doc_help = '\\n Please check documents here: https://kornia.readthedocs.io/en/latest/geometry.camera.stereo.html for further information and examples.'\n    final_msg = msg + doc_help\n    super().__init__(final_msg, *args, **kwargs)",
    "docstring": "Construct custom exception for the :module: module. Adds a general helper module redirecting the user to the proper documentation site. Args: msg: Custom message to add to the general message. *args: Additional argument passthrough **kwargs: Additional argument passthrough",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\camera\\stereo.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:msg arguments arg arg arg arg Assign Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "maximum",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef maximum(x, y):\n    return math_ops.maximum(x, y)",
    "docstring": "Element-wise maximum of two tensors. Args: x: Tensor or variable. y: Tensor or variable. Returns: A tensor with the element wise maximum value(s) of and . Examples: >>> x = tf.Variable([[1, 2], [3, 4]]) >>> y = tf.Variable([[2, 1], [0, -1]]) >>> m = tf.keras.backend.maximum(x, y) >>> m",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:maximum arg:x arg:y arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_named_members",
    "source_code": "def _named_members(self, get_members_fn, prefix='', recurse=True, remove_duplicate: bool=True):\n    memo = set()\n    modules = self.named_modules(prefix=prefix, remove_duplicate=remove_duplicate) if recurse else [(prefix, self)]\n    for module_prefix, module in modules:\n        members = get_members_fn(module)\n        for k, v in members:\n            if v is None or v in memo:\n                continue\n            if remove_duplicate:\n                memo.add(v)\n            name = module_prefix + ('.' if module_prefix else '') + k\n            yield (name, v)",
    "docstring": "Help yield various names + members of modules.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:_named_members arg:self arg:get_members_fn arg:prefix arg:recurse arg:remove_duplicate arguments arg arg arg arg arg Assign Call Assign Call For Assign Call For If BoolOp Compare Compare If Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_XDivyGrad",
    "source_code": "@ops.RegisterGradient('Xdivy')\ndef _XDivyGrad(op: ops.Operation, grad):\n    x = op.inputs[0]\n    y = op.inputs[1]\n    sx = array_ops.shape(x)\n    sy = array_ops.shape(y)\n    rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)\n    with ops.control_dependencies([grad]):\n        not_zero_x = math_ops.cast(math_ops.not_equal(x, math_ops.cast(0.0, dtype=x.dtype)), dtype=x.dtype)\n        partial_x = gen_math_ops.xdivy(not_zero_x, y)\n        partial_y = gen_math_ops.xdivy(math_ops.negative(x), y ** 2)\n        return (array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx), array_ops.reshape(math_ops.reduce_sum(partial_y * grad, ry), sy))",
    "docstring": "Returns gradient of xdivy(x, y) with respect to x and y.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_XDivyGrad arg:op arg:grad arguments arg arg Assign Assign Assign Call Assign Call Assign Call With Call Assign Call Call Call Assign Call Assign Call Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, loc='/', port=80, opts=None, apache_path='apache', handler='cherrypy._cpmodpy::handler'):\n    self.loc = loc\n    self.port = port\n    self.opts = opts\n    self.apache_path = apache_path\n    self.handler = handler",
    "docstring": "Initialize a `` server.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpmodpy.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:loc arg:port arg:opts arg:apache_path arg:handler arguments arg arg arg arg arg arg Assign Assign Assign Assign Assign"
  },
  {
    "library": "seaborn",
    "name": "__init__",
    "source_code": "def __init__(self, plotter, palette=None, order=None, norm=None, saturation=1):\n    super().__init__(plotter)\n    data = plotter.plot_data.get('hue', pd.Series(dtype=float))\n    if isinstance(palette, np.ndarray):\n        msg = 'Numpy array is not a supported type for `palette`. Please convert your palette to a list. This will become an error in v0.14'\n        warnings.warn(msg, stacklevel=4)\n        palette = palette.tolist()\n    if data.isna().all():\n        if palette is not None:\n            msg = 'Ignoring `palette` because no `hue` variable has been assigned.'\n            warnings.warn(msg, stacklevel=4)\n    else:\n        map_type = self.infer_map_type(palette, norm, plotter.input_format, plotter.var_types['hue'])\n        if map_type == 'numeric':\n            data = pd.to_numeric(data)\n            levels, lookup_table, norm, cmap = self.numeric_mapping(data, palette, norm)\n        elif map_type == 'categorical':\n            cmap = norm = None\n            levels, lookup_table = self.categorical_mapping(data, palette, order)\n        else:\n            cmap = norm = None\n            levels, lookup_table = self.categorical_mapping(list(data), palette, order)\n        self.saturation = saturation\n        self.map_type = map_type\n        self.lookup_table = lookup_table\n        self.palette = palette\n        self.levels = levels\n        self.norm = norm\n        self.cmap = cmap",
    "docstring": "Map the levels of the variable to distinct colors. Parameters ---------- # TODO add generic parameters",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_base.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:plotter arg:palette arg:order arg:norm arg:saturation arguments arg arg arg arg arg arg Call Call Assign Call Call If Call Assign Call Assign Call If Call Call If Compare Assign Call Assign Call If Compare Assign Call Assign Call If Compare Assign Assign Call Assign Assign Call Call Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "object_identifier",
    "source_code": "@abc.abstractproperty\ndef object_identifier(self):\n    raise NotImplementedError",
    "docstring": "String stored in object identifier field in the SavedModel proto. Returns: A string with the object identifier, which is used at load time.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\base_serialization.py",
    "ast_data": "FunctionDef name:object_identifier arg:self arguments arg Raise"
  },
  {
    "library": "pytorch",
    "name": "export",
    "source_code": "def export(fn):\n    fn._torchscript_modifier = FunctionModifiers.EXPORT\n    return fn",
    "docstring": "This decorator indicates that a method on an `ScriptModuleforward@torch.jit.exportimplicitly_compiled_methodmforwardanother_forwardimplicitly_compiled_methodunused_method@torch.jit.export` m = torch.jit.script(MyModule())",
    "type": "function",
    "file_path": "pytorch\\torch\\_jit_internal.py",
    "ast_data": "FunctionDef name:export arg:fn arguments arg Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ipc_collect",
    "source_code": "def ipc_collect():\n    _lazy_init()\n    return torch._C._cuda_ipc_collect()",
    "docstring": "Force collects GPU memory after it has been released by CUDA IPC. .. note:: Checks if any sent CUDA tensors could be cleaned from the memory. Force closes shared memory file used for reference counting if there is no active counters. Useful when the producer process stopped actively sending tensors and want to release unused memory.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:ipc_collect arguments Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "add_extra",
    "source_code": "def add_extra(self, select, select_params, where, params, tables, order_by):\n    if select:\n        select_pairs = {}\n        if select_params:\n            param_iter = iter(select_params)\n        else:\n            param_iter = iter([])\n        for name, entry in select.items():\n            self.check_alias(name)\n            entry = str(entry)\n            entry_params = []\n            pos = entry.find('%s')\n            while pos != -1:\n                if pos == 0 or entry[pos - 1] != '%':\n                    entry_params.append(next(param_iter))\n                pos = entry.find('%s', pos + 2)\n            select_pairs[name] = (entry, entry_params)\n        self.extra.update(select_pairs)\n    if where or params:\n        self.where.add(ExtraWhere(where, params), AND)\n    if tables:\n        self.extra_tables += tuple(tables)\n    if order_by:\n        self.extra_order_by = order_by",
    "docstring": "Add data to the various extra_* attributes for user-created additions to the query.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\query.py",
    "ast_data": "FunctionDef name:add_extra arg:self arg:select arg:select_params arg:where arg:params arg:tables arg:order_by arguments arg arg arg arg arg arg arg If Assign If Assign Call Assign Call For Call Call Assign Call Assign Assign Call While Compare If BoolOp Compare Compare Call Call Assign Call Assign Call If BoolOp Call Call If Call If Assign"
  },
  {
    "library": "sphinx",
    "name": "make_footnote",
    "source_code": "def make_footnote(doc: nodes.document, label: str, uri: str) -> nodes.footnote:\n    footnote = nodes.footnote(uri)\n    para = nodes.paragraph()\n    para.append(nodes.Text(uri))\n    footnote.append(para)\n    footnote.insert(0, nodes.label('', label))\n    doc.note_autofootnote(footnote)\n    return footnote",
    "docstring": "Create a footnote node with children",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\_epub_base.py",
    "ast_data": "FunctionDef name:make_footnote arg:doc arg:label arg:uri arguments arg arg arg Assign Call Assign Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "bracket_category_matcher",
    "source_code": "@staticmethod\ndef bracket_category_matcher(title: str):\n    pairs = [('[dynamo]', 'dynamo'), ('[torchdynamo]', 'dynamo'), ('[torchinductor]', 'inductor'), ('[inductor]', 'inductor'), ('[codemod', 'skip'), ('[profiler]', 'profiler'), ('[functorch]', 'functorch'), ('[autograd]', 'autograd_frontend'), ('[quantization]', 'quantization'), ('[nn]', 'nn_frontend'), ('[complex]', 'complex_frontend'), ('[mps]', 'mps'), ('[optimizer]', 'optimizer_frontend'), ('[xla]', 'xla')]\n    title_lower = title.lower()\n    for bracket, category in pairs:\n        if bracket in title_lower:\n            return category\n    return None",
    "docstring": "Categorize a commit based on the presence of a bracketed category in the title. Args: title (str): title to seaarch Returns: optional[str]",
    "type": "method",
    "file_path": "pytorch\\scripts\\release_notes\\commitlist.py",
    "ast_data": "FunctionDef name:bracket_category_matcher arg:title arguments arg Assign Assign Call For If Compare Return return:yes Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "attributes",
    "source_code": "@property\ndef attributes(self) -> Any:\n    attrs = self.definition.attr\n    attrs.pop(attributes_lib.EAGER_RUNTIME_CONSTRUCTION_CONTEXT, None)\n    return attrs",
    "docstring": "Returns FunctionDef attributes in the Runtime.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\atomic_function.py",
    "ast_data": "FunctionDef name:attributes arg:self arguments arg Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "Locations",
    "source_code": "class Locations(object):\n\n    def __init__(self, functions):\n        self._functions = functions\n        self._location_key_to_location = {}\n\n    def index_of(self, file_path, line_number, called_function_name, called_file_path, called_function_start_line):\n        location_key = (file_path, called_function_name, line_number)\n        if location_key in self._location_key_to_location:\n            location = self._location_key_to_location[location_key]\n            return location.id\n        else:\n            location_index = len(self._location_key_to_location) + 1\n            location = profile_pb2.Location()\n            location.id = location_index\n            self._location_key_to_location[location_key] = location\n            line = location.line.add()\n            line.function_id = self._functions.index_of(called_file_path, called_function_name, called_function_start_line)\n            line.line = line_number\n            return location_index\n\n    def location_protos(self):\n        return self._location_key_to_location.values()",
    "docstring": "Keeps track of protos for pprof profile. store information about function call locations.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\pprof_profiler.py",
    "ast_data": "ClassDef name:Locations FunctionDef name:__init__ arg:self arg:functions arguments arg arg Assign Assign FunctionDef name:index_of arg:self arg:file_path arg:line_number arg:called_function_name arg:called_file_path arg:called_function_start_line arguments arg arg arg arg arg arg Assign If Compare Assign Return return:yes Assign Call Assign Call Assign Assign Assign Call Assign Call Assign Return return:yes FunctionDef name:location_protos arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_languages",
    "source_code": "@functools.lru_cache\ndef get_languages():\n    return {key.lower(): value for key, value in dict(settings.LANGUAGES).items()}",
    "docstring": "Cache of settings.LANGUAGES in a dictionary for easy lookups by key. Convert keys to lowercase as they should be treated as case-insensitive.",
    "type": "function",
    "file_path": "django\\django\\utils\\translation\\trans_real.py",
    "ast_data": "FunctionDef name:get_languages arguments Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "developer_warning",
    "source_code": "def developer_warning(msg: str) -> None:\n    if config.developer_warnings:\n        log.warning(msg)\n    else:\n        log.info(msg)",
    "docstring": "Warnings that will be actionable for PyTorch developers, but not end users. Allows us to easily disable them in stable releases but keep them on for nightly builds.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\utils.py",
    "ast_data": "FunctionDef name:developer_warning arg:msg arguments arg If Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_resource",
    "source_code": "def get_resource(self, feature_column, name):\n    del feature_column, name\n    raise NotImplementedError('StateManager.get_resource')",
    "docstring": "Returns an already created resource. Resources can be things such as tables, variables, trackables, etc. Args: feature_column: A object this variable corresponds to. name: Name of the resource.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:get_resource arg:self arg:feature_column arg:name arguments arg arg arg Raise Call"
  },
  {
    "library": "django",
    "name": "__delitem__",
    "source_code": "def __delitem__(self, key):\n    del self.dicts[-1][key]",
    "docstring": "Delete a variable from the current context",
    "type": "method",
    "file_path": "django\\django\\template\\context.py",
    "ast_data": "FunctionDef name:__delitem__ arg:self arg:key arguments arg arg"
  },
  {
    "library": "pytorch",
    "name": "Event",
    "source_code": "class Event(torch._C._XpuEventBase):\n\n    def __new__(cls, enable_timing=False):\n        return super().__new__(cls, enable_timing=enable_timing)\n\n    def record(self, stream=None) -> None:\n        if stream is None:\n            stream = torch.xpu.current_stream()\n        super().record(stream)\n\n    def wait(self, stream=None) -> None:\n        if stream is None:\n            stream = torch.xpu.current_stream()\n        super().wait(stream)\n\n    def query(self) -> bool:\n        return super().query()\n\n    def elapsed_time(self, end_event):\n        return super().elapsed_time(end_event)\n\n    def synchronize(self) -> None:\n        super().synchronize()\n\n    @property\n    def _as_parameter_(self):\n        return ctypes.c_void_p(self.sycl_event)\n\n    def __repr__(self):\n        if self.sycl_event:\n            return f'torch.xpu.Event(sycl_event={self.sycl_event:#x})'\n        else:\n            return 'torch.xpu.Event(uninitialized)'",
    "docstring": "Wrapper around a XPU event. XPU events are synchronization markers that can be used to monitor the device's progress, and to synchronize XPU streams. The underlying XPU events are lazily initialized when the event is first recorded. After creation, only streams on the same device may record the event. However, streams on any device can wait on the event. Args: enable_timing (bool, optional): indicates if the event should measure time (default: ``)",
    "type": "class",
    "file_path": "pytorch\\torch\\xpu\\streams.py",
    "ast_data": "ClassDef name:Event FunctionDef name:__new__ arg:cls arg:enable_timing arguments arg arg Return return:yes Call Call FunctionDef name:record arg:self arg:stream arguments arg arg If Compare Assign Call Call Call FunctionDef name:wait arg:self arg:stream arguments arg arg If Compare Assign Call Call Call FunctionDef name:query arg:self arguments arg Return return:yes Call Call FunctionDef name:elapsed_time arg:self arg:end_event arguments arg arg Return return:yes Call Call FunctionDef name:synchronize arg:self arguments arg Call Call FunctionDef name:_as_parameter_ arg:self arguments arg Return return:yes Call FunctionDef name:__repr__ arg:self arguments arg If Return return:yes Return return:yes"
  },
  {
    "library": "pandas",
    "name": "describe",
    "source_code": "@abstractmethod\ndef describe(self, percentiles: Sequence[float] | np.ndarray) -> DataFrame | Series:\n    pass",
    "docstring": "Do describe either series or dataframe. Parameters ---------- percentiles : list-like of numbers The percentiles to include in the output.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\methods\\describe.py",
    "ast_data": "FunctionDef name:describe arg:self arg:percentiles arguments arg arg"
  },
  {
    "library": "pandas",
    "name": "__dlpack_device__",
    "source_code": "def __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]:\n    return (DlpackDeviceType.CPU, None)",
    "docstring": "Device type and device ID for where the data in the buffer resides.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\interchange\\buffer.py",
    "ast_data": "FunctionDef name:__dlpack_device__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_is_graph_network",
    "source_code": "def _is_graph_network(layer):\n    if isinstance(layer, RevivedNetwork):\n        return False\n    elif isinstance(layer, functional_lib.Functional):\n        return layer._is_graph_network or isinstance(layer, models_lib.Sequential)\n    return False",
    "docstring": "Determines whether the layer is a graph network.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\load.py",
    "ast_data": "FunctionDef name:_is_graph_network arg:layer arguments arg If Call Return return:yes If Call Return return:yes BoolOp Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "gen_broadcasting_constraints",
    "source_code": "def gen_broadcasting_constraints(e1: TVar, e2: TVar, e11: TVar, e12: TVar, i: int, counter: int):\n    dims, counter = gen_lists_of_dims(4, i, counter)\n    [d1, d2, d3, d4] = dims\n    nat_dims_i = gen_nat_constraints(list(itertools.chain.from_iterable(dims)))\n    initialize_tensors_constraints = create_equality_constraints_for_broadcasting(e1, e2, e11, e12, d1, d2, d3, d4)\n    [e1_tensor, e11_tensor, e2_tensor, e12_tensor] = initialize_tensors_constraints\n    final_tensor_constraint_no_padding = Conj([*initialize_tensors_constraints, generate_all_broadcasting_possibilities_no_padding(d1, d2, d3, d4)])\n    final_tensor_constraint_padding_arg1, counter = apply_padding(e1, e11_tensor, e2_tensor, e12_tensor, d2, d3, d4, counter)\n    final_tensor_constraint_padding_arg2, counter = apply_padding(e2, e12_tensor, e1_tensor, e11_tensor, d1, d4, d3, counter)\n    return (final_tensor_constraint_no_padding, final_tensor_constraint_padding_arg1, final_tensor_constraint_padding_arg2, nat_dims_i, counter)",
    "docstring": "Simulates broadcasting on e1 and e2 and returns the results respectively in e11 and e12. Because of gradual types, e1 and e2 may not be equal. Similarly, e11 and e12 may not be equal. e11 and e12 should be guaranteed to be consistent as they represent the shapes of the tensors to be added after broadcasting. Args: e1: TVar representing the type of input 1 e2: TVar representing the type of input 2 e11: TVar representing the representing broadcasted input 1 e12: TVar representing the representing broadcasted input 2 i: The rank of the resulting type of addition counter: for variable tracking Returns: Simplified broadcasting constraints",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_transformation.py",
    "ast_data": "FunctionDef name:gen_broadcasting_constraints arg:e1 arg:e2 arg:e11 arg:e12 arg:i arg:counter arguments arg arg arg arg arg arg Assign Call Assign Assign Call Call Call Assign Call Assign Assign Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_is_fsdp_flattened",
    "source_code": "def _is_fsdp_flattened(tensor: torch.Tensor) -> bool:\n    return getattr(tensor, FSDP_FLATTENED, False)",
    "docstring": "Returns if `` has been marked as flattened by FSDP.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_common_utils.py",
    "ast_data": "FunctionDef name:_is_fsdp_flattened arg:tensor arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_make_dense_default",
    "source_code": "def _make_dense_default(self, key, shape, dtype):\n    default_value = self.dense_defaults.get(key)\n    if shape.ndims is not None and shape.ndims > 0 and (shape.dims[0].value is None):\n        if default_value is None:\n            default_value = ops.convert_to_tensor('' if dtype == dtypes.string else 0, dtype=dtype)\n        else:\n            key_name = 'padding_' + re.sub('[^A-Za-z0-9_.\\\\-/]', '_', key)\n            default_value = ops.convert_to_tensor(default_value, dtype=dtype, name=key_name)\n            default_value = array_ops.reshape(default_value, [])\n    elif default_value is None:\n        default_value = constant_op.constant([], dtype=dtype)\n    elif not isinstance(default_value, tensor.Tensor):\n        key_name = 'key_' + re.sub('[^A-Za-z0-9_.\\\\-/]', '_', key)\n        default_value = ops.convert_to_tensor(default_value, dtype=dtype, name=key_name)\n        default_value = array_ops.reshape(default_value, shape)\n    return default_value",
    "docstring": "Construct the default value tensor for a specified dense feature. Args: key: The key string identifying the dense feature. shape: The dense feature's shape. dtype: The dense feature's dtype. Returns: A Tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parsing_config.py",
    "ast_data": "FunctionDef name:_make_dense_default arg:self arg:key arg:shape arg:dtype arguments arg arg arg arg Assign Call If BoolOp Compare Compare Compare If Compare Assign Call Compare Assign Call Assign Call Assign Call If Compare Assign Call If Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_left_margin_bbox",
    "source_code": "def get_left_margin_bbox(self, rows=0, cols=0):\n    rows = np.atleast_1d(rows)\n    cols = np.atleast_1d(cols)\n    bbox = Bbox.from_extents(self.lefts[cols[0]].value() + self.margins['leftcb'][cols[0]].value(), self.bottoms[rows[-1]].value(), self.lefts[cols[0]].value() + self.margins['leftcb'][cols[0]].value() + self.margins['left'][cols[0]].value(), self.tops[rows[0]].value())\n    return bbox",
    "docstring": "Return the left margin bounding box of the subplot specs given by rows and cols. rows and cols can be spans.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_layoutgrid.py",
    "ast_data": "FunctionDef name:get_left_margin_bbox arg:self arg:rows arg:cols arguments arg arg arg Assign Call Assign Call Assign Call Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_dense_var_to_tensor",
    "source_code": "def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):\n    if as_ref:\n        raise ValueError('Cannot convert AutoCastVariable to a tensor if as_ref=True is passed to convert_to_tensor')\n    if not self._should_cast():\n        return tensor_conversion.convert_to_tensor_v2_with_dispatch(self._variable, dtype=dtype, name=name)\n    if dtype is not None and (not dtype.is_compatible_with(self._cast_dtype)):\n        raise ValueError('Incompatible type conversion requested to type {!r} for AutoCastVariable which is casted to type {!r}'.format(dtype.name, self._cast_dtype.name))\n    val = tensor_conversion.convert_to_tensor_v2_with_dispatch(self._variable, dtype=self._variable.dtype, name=name)\n    return math_ops.cast(val, self._cast_dtype)",
    "docstring": "Converts this variable to a tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\autocast_variable.py",
    "ast_data": "FunctionDef name:_dense_var_to_tensor arg:self arg:dtype arg:name arg:as_ref arguments arg arg arg arg If Raise Call If Call Return return:yes Call If BoolOp Compare Call Raise Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "join",
    "source_code": "def join(self, threads=None, stop_grace_period_secs=120, ignore_live_threads=False):\n    with self._lock:\n        if threads is None:\n            threads = self._registered_threads\n        else:\n            threads = self._registered_threads.union(set(threads))\n        threads = list(threads)\n    while any((t.is_alive() for t in threads)) and (not self.wait_for_stop(1.0)):\n        pass\n    stop_wait_secs = 0.001\n    while any((t.is_alive() for t in threads)) and stop_grace_period_secs >= 0.0:\n        time.sleep(stop_wait_secs)\n        stop_grace_period_secs -= stop_wait_secs\n        stop_wait_secs = 2 * stop_wait_secs\n        stop_wait_secs = max(min(stop_wait_secs, stop_grace_period_secs), 0.001)\n    stragglers = [t.name for t in threads if t.is_alive()]\n    with self._lock:\n        self._joined = True\n        self._registered_threads = set()\n        if self._exc_info_to_raise:\n            _, ex_instance, _ = self._exc_info_to_raise\n            raise ex_instance\n        elif stragglers:\n            if ignore_live_threads:\n                logging.info('Coordinator stopped with threads still running: %s', ' '.join(stragglers))\n            else:\n                raise RuntimeError('Coordinator stopped with threads still running: %s' % ' '.join(stragglers))",
    "docstring": "Wait for threads to terminate. This call blocks until a set of threads have terminated. The set of thread is the union of the threads passed in the argument and the list of threads that registered with the coordinator by calling . After the threads stop, if an was passed to , that exception is re-raised. Grace period handling: When is called, threads are given 'stop_grace_period_secs' seconds to terminate. If any of them is still alive after that period expires, a is raised. Note that if an was passed to then it is raised instead of that . Args: threads: List of . The started threads to join in addition to the registered threads. stop_grace_period_secs: Number of seconds given to threads to stop after has been called. ignore_live_threads: If , raises an error if any of the threads are still alive after . Raises: RuntimeError: If any thread is still alive after is called and the grace period expires.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\coordinator.py",
    "ast_data": "FunctionDef name:join arg:self arg:threads arg:stop_grace_period_secs arg:ignore_live_threads arguments arg arg arg arg With If Compare Assign Assign Call Call Assign Call While BoolOp Call Call Call Assign While BoolOp Call Call Compare Call Assign Assign Call Call Assign Call With Assign Assign Call If Assign Raise If If Call Call Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "is_recompilation",
    "source_code": "def is_recompilation(cache_size: CacheSizeRelevantForFrame) -> bool:\n    return cache_size.will_compilation_exceed(1)",
    "docstring": "If the frame (earlier parsed by compute_cache_size) has more than 1 cache entry with same ID_MATCH'd objects, then its a recompilation.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\cache_size.py",
    "ast_data": "FunctionDef name:is_recompilation arg:cache_size arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_find_valid_index",
    "source_code": "@final\ndef _find_valid_index(self, *, how: str) -> Hashable:\n    is_valid = self.notna().values\n    idxpos = find_valid_index(how=how, is_valid=is_valid)\n    if idxpos is None:\n        return None\n    return self.index[idxpos]",
    "docstring": "Retrieves the index of the first valid value. Parameters ---------- how : {'first', 'last'} Use this parameter to change between the first or last valid index. Returns ------- idx_first_valid : type of index",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:_find_valid_index arg:self arguments arg arg Assign Call Assign Call If Compare Return return:no Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_call_cross_entropy_loss",
    "source_code": "def _call_cross_entropy_loss(self, tx: 'InstructionTranslator', args, kwargs):\n    from . import ConstantVariable\n\n    def normalize_args(weight=ConstantVariable.create(None), size_average=ConstantVariable.create(None), ignore_index=ConstantVariable.create(-100), reduce=ConstantVariable.create(None), reduction=ConstantVariable.create('mean'), label_smoothing=ConstantVariable.create(0.0)):\n        return (weight, size_average, ignore_index, reduce, reduction, label_smoothing)\n    weight, size_average, ignore_index, reduce_arg, reduction, label_smoothing = normalize_args(*args, **kwargs)\n\n    def fake_cross_entropy_loss(input, target):\n        from .builder import wrap_fx_proxy\n        return wrap_fx_proxy(tx=tx, proxy=tx.output.create_proxy('call_function', torch.nn.functional.cross_entropy, *proxy_args_kwargs([input, target, weight, size_average, ignore_index, reduce_arg, reduction, label_smoothing], {})))\n    return variables.LambdaVariable(fake_cross_entropy_loss)",
    "docstring": "functional: input, target, weight=None, size_average=None, ignore_index=- 100, reduce=None, reduction='mean', label_smoothing=0.0 non functional ctor: weight=None, size_average=None, ignore_index=- 100, reduce=None, reduction='mean', label_smoothing=0.0 non functional loss call: input, target, optional_output",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\user_defined.py",
    "ast_data": "FunctionDef name:_call_cross_entropy_loss arg:self arg:tx arg:args arg:kwargs arguments arg arg arg arg FunctionDef name:normalize_args arg:weight arg:size_average arg:ignore_index arg:reduce arg:reduction arg:label_smoothing arguments arg arg arg arg arg arg Call Call Call Call Call Call Return return:yes Assign Call FunctionDef name:fake_cross_entropy_loss arg:input arg:target arguments arg arg Return return:yes Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "as_default",
    "source_code": "def as_default(self) -> ContextManager['Graph']:\n    return _default_graph_stack.get_controller(self)",
    "docstring": "Returns a context manager that makes this the default graph. This method should be used if you want to create multiple graphs in the same process. For convenience, a global default graph is provided, and all ops will be added to this graph if you do not create a new graph explicitly. Use this method with the keyword to specify that ops created within the scope of a block should be added to this graph. In this case, once the scope of the is exited, the previous default graph is set again as default. There is a stack, so it's ok to have multiple nested levels of calls. The default graph is a property of the current thread. If you create a new thread, and wish to use the default graph in that thread, you must explicitly add a in that thread's function. The following code examples are equivalent: If eager execution is enabled ops created under this context manager will be added to the graph instead of executed eagerly. Returns: A context manager for using this graph as the default graph.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:as_default arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "node_traceback",
    "source_code": "def node_traceback(self, element_name):\n    if self._python_graph is None:\n        raise LookupError('Python graph is not available for traceback lookup')\n    node_name = debug_graphs.get_node_name(element_name)\n    if node_name not in self._node_traceback:\n        raise KeyError('Cannot find node \"%s\" in Python graph' % node_name)\n    return self._node_traceback[node_name]",
    "docstring": "Try to retrieve the Python traceback of node's construction. Args: element_name: () Name of a graph element (node or tensor). Returns: (list) The traceback list object as returned by the method of Python's traceback module. Raises: LookupError: If Python graph is not available for traceback lookup. KeyError: If the node cannot be found in the Python graph loaded.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:node_traceback arg:self arg:element_name arguments arg arg If Compare Raise Call Assign Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "on_epoch_end",
    "source_code": "@doc_controls.for_subclass_implementers\ndef on_epoch_end(self, epoch, logs=None):\n    pass",
    "docstring": "Called at the end of an epoch. Subclasses should override for any actions to run. This function should only be called during TRAIN mode. Args: epoch: Integer, index of epoch. logs: Dict, metric results for this training epoch, and for the validation epoch if validation is performed. Validation result keys are prefixed with . For training epoch, the values of the 's metrics are returned. Example : .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:on_epoch_end arg:self arg:epoch arg:logs arguments arg arg arg"
  },
  {
    "library": "pytorch",
    "name": "_get_device_index",
    "source_code": "def _get_device_index(device: Any, optional: bool=False, allow_cpu: bool=False) -> int:\n    if isinstance(device, str):\n        device = torch.device(device)\n    device_idx: Optional[int] = None\n    if isinstance(device, torch.device):\n        if not allow_cpu and device.type == 'cpu':\n            raise ValueError(f'Expected a non cpu device, but got: {device}')\n        device_idx = -1 if device.type == 'cpu' else device.index\n    if isinstance(device, int):\n        device_idx = device\n    if device_idx is None:\n        if optional:\n            if torch.jit.is_scripting():\n                device_idx = get_current_device_index()\n            else:\n                device_idx = _get_current_device_index()\n        else:\n            raise ValueError(f'Expected a torch.device with a specified index or an integer, but got:{device}')\n    return device_idx",
    "docstring": "Gets the device index from :attr:, which can be a torch.device object, a Python integer, or `deviceoptionalallow_cpudevicedeviceoptional`. i.e., the current default CUDA device will be returned if CUDA runtime is supported.",
    "type": "function",
    "file_path": "pytorch\\torch\\_utils.py",
    "ast_data": "FunctionDef name:_get_device_index arg:device arg:optional arg:allow_cpu arguments arg arg arg If Call Assign Call If Call If BoolOp Compare Raise Call Assign Compare If Call Assign If Compare If If Call Assign Call Assign Call Raise Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "process_multipart",
    "source_code": "def process_multipart(entity):\n    ib = ''\n    if 'boundary' in entity.content_type.params:\n        ib = entity.content_type.params['boundary'].strip('\"')\n    if not re.match('^[ -~]{0,200}[!-~]$', ib):\n        raise ValueError('Invalid boundary in multipart form: %r' % (ib,))\n    ib = ('--' + ib).encode('ascii')\n    while True:\n        b = entity.readline()\n        if not b:\n            return\n        b = b.strip()\n        if b == ib:\n            break\n    while True:\n        part = entity.part_class.from_fp(entity.fp, ib)\n        entity.parts.append(part)\n        part.process()\n        if part.fp.done:\n            break",
    "docstring": "Read all multipart parts into entity.parts.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\_cpreqbody.py",
    "ast_data": "FunctionDef name:process_multipart arg:entity arguments arg Assign If Compare Assign Call If Call Raise Call Assign Call While Assign Call If Return return:no Assign Call If Compare While Assign Call Call Call If"
  },
  {
    "library": "tensorflow",
    "name": "invoke_op_callbacks",
    "source_code": "def invoke_op_callbacks(op_type, inputs, attrs, outputs, op_name=None, graph=None):\n    ctx = context.context()\n    if ctx.op_callbacks:\n        ctx.invoking_op_callbacks = True\n        try:\n            if isinstance(attrs, dict):\n                attrs_list = []\n                for key in attrs:\n                    attrs_list.append(key)\n                    attrs_list.append(attrs[key])\n                attrs_tuple = tuple(attrs_list)\n            else:\n                attrs_tuple = attrs\n            new_outputs = outputs\n            for callback in ctx.op_callbacks:\n                new_outputs = callback(op_type, inputs, attrs_tuple, new_outputs, op_name=op_name, graph=graph)\n                if new_outputs is not None and len(new_outputs) != len(outputs):\n                    raise ValueError(f'The op callback returned {len(new_outputs)} tensors, which does not match the original number of outputs of op {op_name} ({len(outputs)}).')\n            return new_outputs\n        finally:\n            ctx.invoking_op_callbacks = False\n    else:\n        return outputs",
    "docstring": "Invoke the callbacks that exist in the current scope (if any). If no callbacks are present in the current scope, this method returns immediately. Args: op_type: Type of the operation (e.g., \"MatMul\"). inputs: Input tensors to the op. These are s in the case of eager execution of ops or s, and are non-eager s in the case of graph construction. attrs: Attributes of the op, as of alternating keys and values. outputs: Output tensors from the op. These are s in the case of eager execution and are non-eager s in the case of graph construction. op_name: Name of the op. Applicable if and only if this method is invoked due to the graph construction of an op or the eager execution of a . graph: The graph involved (if any). - In the case if the eager execution of an op or FuncGraph, this is . - In the case of the graph construction of an op, this is the object being built. Returns: , or a or of output tenors that will override the original (input) .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\op_callbacks.py",
    "ast_data": "FunctionDef name:invoke_op_callbacks arg:op_type arg:inputs arg:attrs arg:outputs arg:op_name arg:graph arguments arg arg arg arg arg arg Assign Call If Assign Try If Call Assign For Call Call Assign Call Assign Assign For Assign Call If BoolOp Compare Compare Call Call Raise Call Call Call Return return:yes Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "_resolve_dependency",
    "source_code": "@staticmethod\ndef _resolve_dependency(dependency):\n    if dependency.app_label != '__setting__':\n        return (dependency, False)\n    resolved_app_label, resolved_object_name = getattr(settings, dependency.model_name).split('.')\n    return (OperationDependency(resolved_app_label, resolved_object_name.lower(), dependency.field_name, dependency.type), True)",
    "docstring": "Return the resolved dependency and a boolean denoting whether or not it was swappable.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\autodetector.py",
    "ast_data": "FunctionDef name:_resolve_dependency arg:dependency arguments arg If Compare Return return:yes Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "softsign",
    "source_code": "def softsign(input):\n    if has_torch_function_unary(input):\n        return handle_torch_function(softsign, (input,), input)\n    return input / (input.abs() + 1)",
    "docstring": "softsign(input) -> Tensor Applies element-wise, the function :math: See :class: for more details.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:softsign arg:input arguments arg If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "as_cuda",
    "source_code": "def as_cuda(self, device_id: int=0, **kwargs: Any) -> None:\n    self._session.set_providers(['CUDAExecutionProvider'], provider_options=[{'device_id': device_id, **kwargs}])",
    "docstring": "Set the session to run on CUDA. We set the ONNX runtime session to use CUDAExecutionProvider. For other CUDAExecutionProvider configurations, or CUDA/cuDNN/ONNX version issues, you may refer to Note: For using CUDA ONNXRuntime, you need to install . For handling different CUDA version, you may refer to Args: device_id: Select GPU to execute. kwargs: Additional arguments for cuda.",
    "type": "method",
    "file_path": "kornia\\kornia\\core\\mixin\\onnx.py",
    "ast_data": "FunctionDef name:as_cuda arg:self arg:device_id arguments arg arg arg Call"
  },
  {
    "library": "sphinx",
    "name": "NoneStyle",
    "source_code": "class NoneStyle(Style):\n    pass",
    "docstring": "Style without any styling.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\pygments_styles.py",
    "ast_data": "ClassDef name:NoneStyle"
  },
  {
    "library": "pandas",
    "name": "ravel_compat",
    "source_code": "def ravel_compat(meth: F) -> F:\n\n    @wraps(meth)\n    def method(self, *args, **kwargs):\n        if self.ndim == 1:\n            return meth(self, *args, **kwargs)\n        flags = self._ndarray.flags\n        flat = self.ravel('K')\n        result = meth(flat, *args, **kwargs)\n        order = 'F' if flags.f_contiguous else 'C'\n        return result.reshape(self.shape, order=order)\n    return cast(F, method)",
    "docstring": "Decorator to ravel a 2D array before passing it to a cython operation, then reshape the result to our own shape.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\arrays\\_mixins.py",
    "ast_data": "FunctionDef name:ravel_compat arg:meth arguments arg FunctionDef name:method arg:self arguments arg arg arg If Compare Return return:yes Call Assign Assign Call Assign Call Assign Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_read_typedesc",
    "source_code": "def _read_typedesc(f):\n    typedesc = {'typecode': _read_long(f), 'varflags': _read_long(f)}\n    if typedesc['varflags'] & 2 == 2:\n        raise Exception('System variables not implemented')\n    typedesc['array'] = typedesc['varflags'] & 4 == 4\n    typedesc['structure'] = typedesc['varflags'] & 32 == 32\n    if typedesc['structure']:\n        typedesc['array_desc'] = _read_arraydesc(f)\n        typedesc['struct_desc'] = _read_structdesc(f)\n    elif typedesc['array']:\n        typedesc['array_desc'] = _read_arraydesc(f)\n    return typedesc",
    "docstring": "Function to read in a type descriptor",
    "type": "function",
    "file_path": "scipy\\scipy\\io\\_idl.py",
    "ast_data": "FunctionDef name:_read_typedesc arg:f arguments arg Assign Call Call If Compare Raise Call Assign Compare Assign Compare If Assign Call Assign Call If Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "strip_math",
    "source_code": "def strip_math(s):\n    if len(s) >= 2 and s[0] == s[-1] == '$':\n        s = s[1:-1]\n        for tex, plain in [('\\\\times', 'x'), ('\\\\mathdefault', ''), ('\\\\rm', ''), ('\\\\cal', ''), ('\\\\tt', ''), ('\\\\it', ''), ('\\\\', ''), ('{', ''), ('}', '')]:\n            s = s.replace(tex, plain)\n    return s",
    "docstring": "Remove latex formatting from mathtext. Only handles fully math and fully non-math strings.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\cbook.py",
    "ast_data": "FunctionDef name:strip_math arg:s arguments arg If BoolOp Compare Call Compare Assign For Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "std",
    "source_code": "def std(self, axis=None, dtype=None, out=None, ddof=0):\n    return N.ndarray.std(self, axis, dtype, out, ddof, keepdims=True)._collapse(axis)",
    "docstring": "Return the standard deviation of the array elements along the given axis. Refer to for full documentation. See Also -------- numpy.std Notes ----- This is the same as , except that where an would be returned, a object is returned instead. Examples -------- >>> x = np.matrix(np.arange(12).reshape((3, 4))) >>> x matrix([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]]) >>> x.std() 3.4520525295346629 # may vary >>> x.std(0) matrix([[ 3.26598632, 3.26598632, 3.26598632, 3.26598632]]) # may vary >>> x.std(1) matrix([[ 1.11803399], [ 1.11803399], [ 1.11803399]])",
    "type": "method",
    "file_path": "numpy\\numpy\\matrixlib\\defmatrix.py",
    "ast_data": "FunctionDef name:std arg:self arg:axis arg:dtype arg:out arg:ddof arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "urlencode",
    "source_code": "def urlencode(self, safe=None):\n    output = []\n    if safe:\n        safe = safe.encode(self.encoding)\n\n        def encode(k, v):\n            return '%s=%s' % (quote(k, safe), quote(v, safe))\n    else:\n\n        def encode(k, v):\n            return urlencode({k: v})\n    for k, list_ in self.lists():\n        output.extend((encode(k.encode(self.encoding), str(v).encode(self.encoding)) for v in list_))\n    return '&'.join(output)",
    "docstring": "Return an encoded string of all query string arguments. specifies characters which don't require quoting, for example:: >>> q = QueryDict(mutable=True) >>> q['next'] = '/a&b/' >>> q.urlencode() 'next=%2Fa%26b%2F' >>> q.urlencode(safe='/') 'next=/a%26b/'",
    "type": "method",
    "file_path": "django\\django\\http\\request.py",
    "ast_data": "FunctionDef name:urlencode arg:self arg:safe arguments arg arg Assign If Assign Call FunctionDef name:encode arg:k arg:v arguments arg arg Return return:yes Call Call FunctionDef name:encode arg:k arg:v arguments arg arg Return return:yes Call For Call Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, reduce_to_device=None, accumulation_fn=None):\n    self.reduce_to_device = reduce_to_device\n    self.accumulation_fn = accumulation_fn or math_ops.add_n\n    super(ReductionToOneDevice, self).__init__()",
    "docstring": "Initializes with a device to reduce to and a way to accumulate. Args: reduce_to_device: the intermediate device to reduce to. If None, reduce to the first device in of the method. accumulation_fn: a function that does accumulation. If None, is used.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_ops.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:reduce_to_device arg:accumulation_fn arguments arg arg arg Assign Assign BoolOp Call Call"
  },
  {
    "library": "pytorch",
    "name": "atleast_3d",
    "source_code": "def atleast_3d(arg: Union[TensorLikeType, Sequence[TensorLikeType]], *args: TensorLikeType) -> Union[TensorLikeType, tuple[TensorLikeType, ...]]:\n    if not args and isinstance(arg, collections.abc.Sequence):\n        args_ = arg\n    else:\n        assert not isinstance(arg, collections.abc.Sequence)\n        args_ = (arg,) + args\n    unsqueeze_atleast_2d = partial(_unsqueeze_atleast, atleast_2d, -1)\n    res = tuple((a if a.ndim >= 3 else unsqueeze_atleast_2d(a) for a in args_))\n    return res if len(res) > 1 else res[0]",
    "docstring": "Reference implementation of :func:.",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\__init__.py",
    "ast_data": "FunctionDef name:atleast_3d arg:arg arguments arg arg If BoolOp Call Assign Call Assign Assign Call Assign Call Compare Call Return return:yes Compare Call"
  },
  {
    "library": "pytorch",
    "name": "_get_module",
    "source_code": "def _get_module(node: Node, modules: dict[str, nn.Module]) -> Optional[nn.Module]:\n    if node.op == 'call_module' and str(node.target) in modules:\n        return modules[str(node.target)]\n    else:\n        return None",
    "docstring": "Return the that corresponds to the specified node's target. If no such node exists, return None.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_lower_to_native_backend.py",
    "ast_data": "FunctionDef name:_get_module arg:node arg:modules arguments arg arg If BoolOp Compare Compare Call Return return:yes Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "fit_generator",
    "source_code": "def fit_generator(self, generator, steps_per_epoch=None, epochs=1, verbose=1, callbacks=None, validation_data=None, validation_steps=None, validation_freq=1, class_weight=None, max_queue_size=10, workers=1, use_multiprocessing=False, shuffle=True, initial_epoch=0):\n    warnings.warn('`Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators.')\n    return self.fit(generator, steps_per_epoch=steps_per_epoch, epochs=epochs, verbose=verbose, callbacks=callbacks, validation_data=validation_data, validation_steps=validation_steps, validation_freq=validation_freq, class_weight=class_weight, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, shuffle=shuffle, initial_epoch=initial_epoch)",
    "docstring": "Fits the model on data yielded batch-by-batch by a Python generator. DEPRECATED: now supports generators, so there is no longer any need to use this endpoint.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py",
    "ast_data": "FunctionDef name:fit_generator arg:self arg:generator arg:steps_per_epoch arg:epochs arg:verbose arg:callbacks arg:validation_data arg:validation_steps arg:validation_freq arg:class_weight arg:max_queue_size arg:workers arg:use_multiprocessing arg:shuffle arg:initial_epoch arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "parents",
    "source_code": "@property\ndef parents(self):\n    return [self.key]",
    "docstring": "See 'FeatureColumn` base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:parents arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "pretty_name",
    "source_code": "def pretty_name(name):\n    if not name:\n        return ''\n    return name.replace('_', ' ').capitalize()",
    "docstring": "Convert 'first_name' to 'First name'.",
    "type": "function",
    "file_path": "django\\django\\forms\\utils.py",
    "ast_data": "FunctionDef name:pretty_name arg:name arguments arg If Return return:yes Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "HourLocator",
    "source_code": "class HourLocator(RRuleLocator):\n\n    def __init__(self, byhour=None, interval=1, tz=None):\n        if byhour is None:\n            byhour = range(24)\n        rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval, byminute=0, bysecond=0)\n        super().__init__(rule, tz=tz)",
    "docstring": "Make ticks on occurrences of each hour.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\dates.py",
    "ast_data": "ClassDef name:HourLocator FunctionDef name:__init__ arg:self arg:byhour arg:interval arg:tz arguments arg arg arg arg If Compare Assign Call Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_check_mocked_error",
    "source_code": "def _check_mocked_error(module: Optional[str], field: Optional[str]):\n    assert isinstance(module, str)\n    assert isinstance(field, str)\n    if self._can_implicitly_extern(module):\n        return\n    for pattern, pattern_info in self.patterns.items():\n        if pattern.matches(module):\n            if pattern_info.action == _ModuleProviderAction.MOCK:\n                mocked_modules[module].append(field)\n            return",
    "docstring": "checks if an object (field) comes from a mocked module and then adds the pair to mocked_modules which contains mocked modules paired with their list of mocked objects present in the pickle. We also hold the invariant that the first user defined rule that applies to the module is the one we use.",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\package_exporter.py",
    "ast_data": "FunctionDef name:_check_mocked_error arg:module arg:field arguments arg arg Call Call If Call Return return:no For Call If Call If Compare Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "get_output_mask_at",
    "source_code": "def get_output_mask_at(self, node_index):\n    output = self.get_output_at(node_index)\n    if isinstance(output, list):\n        return [getattr(x, '_keras_mask', None) for x in output]\n    else:\n        return getattr(output, '_keras_mask', None)",
    "docstring": "Retrieves the output mask tensor(s) of a layer at a given node. Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. will correspond to the first time the layer was called. Returns: A mask tensor (or list of tensors if the layer has multiple outputs).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py",
    "ast_data": "FunctionDef name:get_output_mask_at arg:self arg:node_index arguments arg arg Assign Call If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "read_char_array",
    "source_code": "def read_char_array(self, hdr):\n    arr = self.read_sub_array(hdr).astype(np.uint8)\n    S = arr.tobytes().decode('latin-1')\n    return np.ndarray(shape=hdr.dims, dtype=np.dtype('U1'), buffer=np.array(S)).copy()",
    "docstring": "latin-1 text matrix (char matrix) reader Parameters ---------- hdr : `hdr`",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\matlab\\_mio4.py",
    "ast_data": "FunctionDef name:read_char_array arg:self arg:hdr arguments arg arg Assign Call Call Assign Call Call Return return:yes Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "ResizeEvent",
    "source_code": "class ResizeEvent(Event):\n\n    def __init__(self, name, canvas):\n        super().__init__(name, canvas)\n        self.width, self.height = canvas.get_width_height()",
    "docstring": "An event triggered by a canvas resize. A ResizeEvent has a number of special attributes in addition to those defined by the parent class. Attributes ---------- width : int Width of the canvas in pixels. height : int Height of the canvas in pixels.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "ClassDef name:ResizeEvent FunctionDef name:__init__ arg:self arg:name arg:canvas arguments arg arg arg Call Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "mark_as_skip_wait",
    "source_code": "def mark_as_skip_wait(x: ir.IRNode) -> None:\n    _bufs_to_skip_wait.add((id(V.graph), x.get_name()))",
    "docstring": "If a non-blocking collective is lowered as a blocking collective, the wait node in the original graph becomes useless and we can skip the lowering it.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\comm_lowering.py",
    "ast_data": "FunctionDef name:mark_as_skip_wait arg:x arguments arg Call Call Call"
  },
  {
    "library": "scipy",
    "name": "check",
    "source_code": "def check(arguments, wrong=operator.ne, msg=''):\n    if wrong(len(arguments), len(dispatch_args)):\n        raise TypeError(f'Expected {len(dispatch_args)} arguments, got {{len(arguments)}}{{msg}}')",
    "docstring": "Make sure one passes the expected number of arguments",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\decorator.py",
    "ast_data": "FunctionDef name:check arg:arguments arg:wrong arg:msg arguments arg arg arg If Call Call Call Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "maximum",
    "source_code": "@property\ndef maximum(self):\n    return self._maximum",
    "docstring": "Returns a NumPy array specifying the maximum bounds (inclusive).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor.py",
    "ast_data": "FunctionDef name:maximum arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "EnableControlFlowV2",
    "source_code": "def EnableControlFlowV2(graph):\n    return ENABLE_CONTROL_FLOW_V2 or (graph.building_function and (not hasattr(graph, '_captured')))",
    "docstring": "Returns whether control flow v2 should be used in .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_util.py",
    "ast_data": "FunctionDef name:EnableControlFlowV2 arg:graph arguments arg Return return:yes BoolOp BoolOp Call"
  },
  {
    "library": "scrapy",
    "name": "fingerprint",
    "source_code": "def fingerprint(request: Request, *, include_headers: Iterable[bytes | str] | None=None, keep_fragments: bool=False) -> bytes:\n    processed_include_headers: tuple[bytes, ...] | None = None\n    if include_headers:\n        processed_include_headers = tuple((to_bytes(h.lower()) for h in sorted(include_headers)))\n    cache = _fingerprint_cache.setdefault(request, {})\n    cache_key = (processed_include_headers, keep_fragments)\n    if cache_key not in cache:\n        headers: dict[str, list[str]] = {}\n        if processed_include_headers:\n            for header in processed_include_headers:\n                if header in request.headers:\n                    headers[header.hex()] = [header_value.hex() for header_value in request.headers.getlist(header)]\n        fingerprint_data = {'method': to_unicode(request.method), 'url': canonicalize_url(request.url, keep_fragments=keep_fragments), 'body': (request.body or b'').hex(), 'headers': headers}\n        fingerprint_json = json.dumps(fingerprint_data, sort_keys=True)\n        cache[cache_key] = hashlib.sha1(fingerprint_json.encode()).digest()\n    return cache[cache_key]",
    "docstring": "Return the request fingerprint. The request fingerprint is a hash that uniquely identifies the resource the request points to. For example, take the following two urls: `` Lots of sites use a cookie to store the session id, which adds a random component to the HTTP Request and thus should be ignored when calculating the fingerprint. For this reason, request headers are ignored by default when calculating the fingerprint. If you want to include specific headers use the include_headers argument, which is a list of Request headers to include. Also, servers usually ignore fragments in urls when handling requests, so they are also ignored by default when calculating the fingerprint. If you want to include them, set the keep_fragments argument to True (for instance when handling requests with a headless browser).",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\request.py",
    "ast_data": "FunctionDef name:fingerprint arg:request arguments arg arg arg If Assign Call Call Call Call Assign Call Assign If Compare If For If Compare Assign Call Call Call Assign Call Call Call BoolOp Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "num_chunks",
    "source_code": "def num_chunks(self) -> int:\n    return 1",
    "docstring": "Return the number of chunks the column consists of.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\interchange\\column.py",
    "ast_data": "FunctionDef name:num_chunks arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_from_proto",
    "source_code": "def _from_proto(self, pb):\n    raise NotImplementedError('{}._from_proto()'.format(type(self).__name__))",
    "docstring": "Convert protocol buffer to options.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\util\\options.py",
    "ast_data": "FunctionDef name:_from_proto arg:self arg:pb arguments arg arg Raise Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "stat_v2",
    "source_code": "@tf_export('io.gfile.stat')\ndef stat_v2(path):\n    return _pywrap_file_io.Stat(compat.path_to_str(path))",
    "docstring": "Returns file statistics for a given path. Args: path: string, path to a file Returns: FileStatistics struct that contains information about the path Raises: errors.OpError: If the operation fails.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py",
    "ast_data": "FunctionDef name:stat_v2 arg:path arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_diag_part",
    "source_code": "def _diag_part(self):\n    return array_ops.matrix_diag_part(self.to_dense())",
    "docstring": "Generic and often inefficient implementation. Override often.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "FunctionDef name:_diag_part arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_broadcast_shapes_remove_axis",
    "source_code": "def _broadcast_shapes_remove_axis(shapes, axis=None):\n    shapes = _broadcast_shapes(shapes, axis)\n    shape = shapes[0]\n    if axis is not None:\n        shape = np.delete(shape, axis)\n    return tuple(shape)",
    "docstring": "Broadcast shapes, dropping specified axes Same as _broadcast_array_shapes_remove_axis, but given a sequence of array shapes instead of the arrays themselves.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_axis_nan_policy.py",
    "ast_data": "FunctionDef name:_broadcast_shapes_remove_axis arg:shapes arg:axis arguments arg arg Assign Call Assign If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "escape",
    "source_code": "def escape(self, s: str) -> str:\n    s = s.replace('@', '@@')\n    s = s.replace('{', '@{')\n    s = s.replace('}', '@}')\n    s = s.replace('``', '`@w{`}')\n    s = s.replace(\"''\", \"'@w{'}\")\n    return s",
    "docstring": "Return a string with Texinfo command characters escaped.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\writers\\texinfo.py",
    "ast_data": "FunctionDef name:escape arg:self arg:s arguments arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "load_state_dict",
    "source_code": "@override\ndef load_state_dict(self, state_dict: dict[str, Any]) -> None:\n    _schedulers = state_dict.pop('_schedulers')\n    self.__dict__.update(state_dict)\n    state_dict['_schedulers'] = _schedulers\n    for idx, s in enumerate(_schedulers):\n        self._schedulers[idx].load_state_dict(s)",
    "docstring": "Load the scheduler's state. Args: state_dict (dict): scheduler state. Should be an object returned from a call to :meth:.",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\lr_scheduler.py",
    "ast_data": "FunctionDef name:load_state_dict arg:self arg:state_dict arguments arg arg Assign Call Call Assign For Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_canvas_width_height",
    "source_code": "def get_canvas_width_height(self):\n    return (1, 1)",
    "docstring": "Return the canvas width and height in display coords.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:get_canvas_width_height arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, pad=0.3):\n    self.pad = pad",
    "docstring": "Parameters ---------- pad : float, default: 0.3 The amount of padding around the original box.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:pad arguments arg arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "size_v2",
    "source_code": "@tf_export('size', v1=[])\n@dispatch.add_dispatch_support\ndef size_v2(input, out_type=None, name=None):\n    if out_type is None:\n        if flags.config().tf_shape_default_int64.value():\n            out_type = dtypes.int64\n        else:\n            out_type = dtypes.int32\n    return size(input, name, out_type)",
    "docstring": "Returns the size of a tensor. See also . Returns a 0-D representing the number of elements in of type . Defaults to tf.int32. For example: >>> t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]) >>> tf.size(t) Args: input: A or . out_type: (Optional) The specified non-quantized numeric output type of the operation. Defaults to . (Note: there is an experimental flag, that changes the default to . This is an unsupported, experimental setting that causes known breakages.) name: A name for the operation (optional). Returns: A of type . Defaults to . @compatibility(numpy) Equivalent to np.size() @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:size_v2 arg:input arg:out_type arg:name arguments arg arg arg If Compare If Call Call Assign Assign Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "init_population_random",
    "source_code": "def init_population_random(self):\n    rng = self.random_number_generator\n    self.population = rng.uniform(size=self.population_shape)\n    self.population_energies = np.full(self.num_population_members, np.inf)\n    self._nfev = 0",
    "docstring": "Initializes the population at random. This type of initialization can possess clustering, Latin Hypercube sampling is generally better.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_differentialevolution.py",
    "ast_data": "FunctionDef name:init_population_random arg:self arguments arg Assign Assign Call Assign Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "min",
    "source_code": "@property\ndef min(self):\n    return np.min(self.get_points(), axis=0)",
    "docstring": "The bottom-left corner of the bounding box.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:min arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "node_supports_equalization",
    "source_code": "def node_supports_equalization(node: Node, modules) -> bool:\n    if node.op == 'call_module':\n        return nn_module_supports_equalization(modules[str(node.target)]) or fused_module_supports_equalization(modules[str(node.target)]) or custom_module_supports_equalization(modules[str(node.target)])\n    elif node.op == 'call_function':\n        return node.target in [F.linear, F.conv1d, F.conv2d, F.conv3d]\n    return False",
    "docstring": "Checks if the current node supports equalization Currently we only support nn.Linear/F.Linear and nn.Conv/F.conv layers",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_equalize.py",
    "ast_data": "FunctionDef name:node_supports_equalization arg:node arg:modules arguments arg arg If Compare Return return:yes BoolOp Call Call Call Call Call Call If Compare Return return:yes Compare Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "Perc",
    "source_code": "@dataclass\nclass Perc(Stat):\n    k: int | list[float] = 5\n    method: str = 'linear'\n    group_by_orient: ClassVar[bool] = True\n\n    def _percentile(self, data: DataFrame, var: str) -> DataFrame:\n        k = list(np.linspace(0, 100, self.k)) if isinstance(self.k, int) else self.k\n        method = cast(_MethodKind, self.method)\n        values = data[var].dropna()\n        if _version_predates(np, '1.22'):\n            res = np.percentile(values, k, interpolation=method)\n        else:\n            res = np.percentile(data[var].dropna(), k, method=method)\n        return DataFrame({var: res, 'percentile': k})\n\n    def __call__(self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale]) -> DataFrame:\n        var = {'x': 'y', 'y': 'x'}[orient]\n        return groupby.apply(data, self._percentile, var)",
    "docstring": "Replace observations with percentile values. Parameters ---------- k : list of numbers or int If a list of numbers, this gives the percentiles (in [0, 100]) to compute. If an integer, compute evenly-spaced percentiles between 0 and 100. For example, computes the 0, 25, 50, 75, and 100th percentiles. method : str Method for interpolating percentiles between observed datapoints. See :func: for valid options and more information. Examples -------- .. include:: ../docstrings/objects.Perc.rst",
    "type": "class",
    "file_path": "seaborn\\seaborn\\_stats\\order.py",
    "ast_data": "ClassDef name:Perc FunctionDef name:_percentile arg:self arg:data arg:var arguments arg arg arg Assign Call Call Call Assign Call Assign Call If Call Assign Call Assign Call Call Return return:yes Call FunctionDef name:__call__ arg:self arg:data arg:groupby arg:orient arg:scales arguments arg arg arg arg arg Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "args",
    "source_code": "def args(self) -> list[str]:\n    args = [f'target={self.target}']\n    if self.scheduler:\n        args.append(f'autoscheduler={self.scheduler}')\n    if self.scheduler_flags:\n        assert self.scheduler\n        for k, v in self.scheduler_flags.items():\n            args.append(f'autoscheduler.{k}={v}')\n    return args",
    "docstring": "Command line args to pass to halide generator",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\hints.py",
    "ast_data": "FunctionDef name:args arg:self arguments arg Assign If Call If For Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "auto_to_manual_spmd_partition",
    "source_code": "def auto_to_manual_spmd_partition(tensor, manual_sharding, single_dim=-1, unspecified_dims=None):\n    return tf2xla.spmd_full_to_shard_shape(tensor, manual_sharding=manual_sharding, dim=single_dim, unspecified_dims=unspecified_dims or [])",
    "docstring": "Switches from automatic SPMD partitioning to manual partitioning. Converts a full-shaped tensor (to be automatically partitioned by SPMD partitioner) to a shard-shaped tensor to be consumed by manually partitioned ops. Args: tensor: A tf.Tensor in full shape. manual_sharding: A serialized string of OpSharding to be used in manual partitioning. single_dim: If >= 0, the conversion will happen only on this dim in subgroups. unspecified_dims: An optional list of dimensions unspecified. Returns: A shard-shaped tensor to be consumed by manually partitioned ops.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\xla\\experimental\\xla_sharding.py",
    "ast_data": "FunctionDef name:auto_to_manual_spmd_partition arg:tensor arg:manual_sharding arg:single_dim arg:unspecified_dims arguments arg arg arg arg Return return:yes Call BoolOp"
  },
  {
    "library": "matplotlib",
    "name": "geometry",
    "source_code": "@property\ndef geometry(self):\n    if hasattr(self._selection_artist, 'get_verts'):\n        xfm = self.ax.transData.inverted()\n        y, x = xfm.transform(self._selection_artist.get_verts()).T\n        return np.array([x, y])\n    else:\n        return np.array(self._selection_artist.get_data())",
    "docstring": "Return an array of shape (2, 5) containing the x (``) data coordinates of the four corners of the rectangle starting and ending in the top left corner.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:geometry arg:self arguments arg If Call Assign Call Assign Call Call Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "TransferFunctionContinuous",
    "source_code": "class TransferFunctionContinuous(TransferFunction, lti):\n\n    def to_discrete(self, dt, method='zoh', alpha=None):\n        return TransferFunction(*cont2discrete((self.num, self.den), dt, method=method, alpha=alpha)[:-1], dt=dt)",
    "docstring": "Continuous-time Linear Time Invariant system in transfer function form. Represents the system as the transfer function :math:, where :math: are elements of the numerator , :math: are elements of the denominator , and `TransferFunctionltiTransferFunctionltiStateSpaceTransferFunctionZerosPolesGainTransferFunctionABCDH(s) = \\frac{s^2 + 3s + 3}{s^2 + 2s + 1}`: >>> from scipy import signal >>> num = [1, 3, 3] >>> den = [1, 2, 1] >>> signal.TransferFunction(num, den) TransferFunctionContinuous( array([ 1., 3., 3.]), array([ 1., 2., 1.]), dt: None )",
    "type": "class",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "ClassDef name:TransferFunctionContinuous FunctionDef name:to_discrete arg:self arg:dt arg:method arg:alpha arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "conv3x3",
    "source_code": "def conv3x3(in_planes: int, out_planes: int, stride: int=1, groups: int=1, dilation: int=1) -> nn.Conv2d:\n    return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation)",
    "docstring": "3x3 convolution with padding.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\defmo.py",
    "ast_data": "FunctionDef name:conv3x3 arg:in_planes arg:out_planes arg:stride arg:groups arg:dilation arguments arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "sparse_reduce_max_sparse",
    "source_code": "@tf_export(v1=['sparse.reduce_max_sparse', 'sparse_reduce_max_sparse'])\n@deprecation.deprecated_endpoints('sparse_reduce_max_sparse')\n@deprecation.deprecated_args(None, 'keep_dims is deprecated, use keepdims instead', 'keep_dims')\ndef sparse_reduce_max_sparse(sp_input, axis=None, keepdims=None, reduction_axes=None, keep_dims=None):\n    keepdims = deprecation.deprecated_argument_lookup('keepdims', keepdims, 'keep_dims', keep_dims)\n    axis = deprecation.deprecated_argument_lookup('axis', axis, 'reduction_axes', reduction_axes)\n    if keepdims is None:\n        keepdims = False\n    output_ind, output_val, output_shape = gen_sparse_ops.sparse_reduce_max_sparse(sp_input.indices, sp_input.values, sp_input.dense_shape, math_ops._ReductionDims(sp_input, axis), keepdims)\n    return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)",
    "docstring": "Computes the max of elements across dimensions of a SparseTensor. This Op takes a SparseTensor and is the sparse counterpart to . In contrast to SparseReduceSum, this Op returns a SparseTensor. Note: A gradient is not defined for this function, so it can't be used in training models that need gradient descent. Reduces along the dimensions given in . Unless is true, the rank of the tensor is reduced by 1 for each entry in . If is true, the reduced dimensions are retained with length 1. If has no entries, all dimensions are reduced, and a tensor with a single element is returned. Additionally, the axes can be negative, which are interpreted according to the indexing rules in Python. Args: sp_input: The SparseTensor to reduce. Should have numeric type. axis: The dimensions to reduce; list or scalar. If (the default), reduces all dimensions. keepdims: If true, retain reduced dimensions with length 1. reduction_axes: Deprecated name of axis. keep_dims: Deprecated alias for . Returns: The reduced SparseTensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_ops.py",
    "ast_data": "FunctionDef name:sparse_reduce_max_sparse arg:sp_input arg:axis arg:keepdims arg:reduction_axes arg:keep_dims arguments arg arg arg arg arg Assign Call Assign Call If Compare Assign Assign Call Call Return return:yes Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_fill_non_empty_info",
    "source_code": "def _fill_non_empty_info(self) -> None:\n    self.add_object_type_line()\n    self.add_index_range_line()\n    self.add_columns_summary_line()\n    self.add_header_line()\n    self.add_separator_line()\n    self.add_body_lines()\n    self.add_dtypes_line()\n    if self.display_memory_usage:\n        self.add_memory_usage_line()",
    "docstring": "Add lines to the info table, pertaining to non-empty dataframe.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:_fill_non_empty_info arg:self arguments arg Call Call Call Call Call Call Call If Call"
  },
  {
    "library": "matplotlib",
    "name": "get_legend_handles_labels",
    "source_code": "def get_legend_handles_labels(self, legend_handler_map=None):\n    handles, labels = mlegend._get_legend_handles_labels([self], legend_handler_map)\n    return (handles, labels)",
    "docstring": "Return handles and labels for legend `` is equivalent to :: h, l = ax.get_legend_handles_labels() ax.legend(h, l)",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_axes.py",
    "ast_data": "FunctionDef name:get_legend_handles_labels arg:self arg:legend_handler_map arguments arg arg Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "default_stream",
    "source_code": "def default_stream(device: Optional[_device_t]=None) -> Stream:\n    return torch._C._mtia_getDefaultStream(_get_device_index(device, optional=True))",
    "docstring": "Return the default :class: for a given device. Args: device (torch.device or int, optional): selected device. Returns the default :class: for the current device, given by :func:, if :attr: is `` (default).",
    "type": "function",
    "file_path": "pytorch\\torch\\mtia\\__init__.py",
    "ast_data": "FunctionDef name:default_stream arg:device arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "parallel_compile_enabled_internally",
    "source_code": "def parallel_compile_enabled_internally() -> bool:\n    ENABLE_PARALLEL_COMPILE_VERSION = 1\n    jk_name = 'pytorch/inductor:enable_parallel_compile_version'\n    version = torch._utils_internal.justknobs_getval_int(jk_name)\n    return ENABLE_PARALLEL_COMPILE_VERSION >= version",
    "docstring": "TODO: Remove when parallel compiled is fully enabled internally. For rollout, use a knob to enable / disable. The justknob should not be performed at import, however. So for fbcode, we assign compile_threads to 'None' below and initialize lazily in async_compile.py.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\config.py",
    "ast_data": "FunctionDef name:parallel_compile_enabled_internally arguments Assign Assign Assign Call Return return:yes Compare"
  },
  {
    "library": "authlib",
    "name": "validate_nbf",
    "source_code": "def validate_nbf(self, now, leeway):\n    if 'nbf' in self:\n        nbf = self['nbf']\n        if not _validate_numeric_time(nbf):\n            raise InvalidClaimError('nbf')\n        if nbf > now + leeway:\n            raise InvalidTokenError()",
    "docstring": "The \"nbf\" (not before) claim identifies the time before which the JWT MUST NOT be accepted for processing. The processing of the \"nbf\" claim requires that the current date/time MUST be after or equal to the not-before date/time listed in the \"nbf\" claim. Implementers MAY provide for some small leeway, usually no more than a few minutes, to account for clock skew. Its value MUST be a number containing a NumericDate value. Use of this claim is OPTIONAL.",
    "type": "method",
    "file_path": "authlib\\authlib\\jose\\rfc7519\\claims.py",
    "ast_data": "FunctionDef name:validate_nbf arg:self arg:now arg:leeway arguments arg arg arg If Compare Assign If Call Raise Call If Compare Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_input_from_iterator",
    "source_code": "def _get_input_from_iterator(iterator, model):\n    next_element = iterator.get_next()\n    if len(nest.flatten(next_element)) == len(model.inputs):\n        x = next_element\n        y = None\n        sample_weights = None\n    elif len(nest.flatten(next_element)) == len(model.inputs) + len(model.outputs):\n        x, y = next_element\n        sample_weights = None\n    else:\n        x, y, sample_weights = next_element\n    validate_distributed_dataset_inputs(model._distribution_strategy, x, y, sample_weights)\n    return (x, y, sample_weights)",
    "docstring": "Get elements from the iterator and verify the input shape and type.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_training_utils_v1.py",
    "ast_data": "FunctionDef name:_get_input_from_iterator arg:iterator arg:model arguments arg arg Assign Call If Compare Call Call Call Assign Assign Assign If Compare Call Call Call Call Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "adjoint",
    "source_code": "def adjoint(self) -> Tensor:\n    return self.matrix()",
    "docstring": "Return the adjoint matrix of shape :math:. Example: >>> s = So3.identity() >>> s.adjoint() tensor([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]], grad_fn=)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\so3.py",
    "ast_data": "FunctionDef name:adjoint arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "x0",
    "source_code": "@property\ndef x0(self):\n    return self.get_points()[0, 0]",
    "docstring": "The first of the pair of *x* coordinates that define the bounding box. This is not guaranteed to be less than :attr: (for that, use :attr:).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:x0 arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_check_X",
    "source_code": "@abstractmethod\ndef _check_X(self, X):\n    pass",
    "docstring": "To be overridden in subclasses with the actual checks. Only used in predict* methods.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\naive_bayes.py",
    "ast_data": "FunctionDef name:_check_X arg:self arg:X arguments arg arg"
  },
  {
    "library": "seaborn",
    "name": "_add_legend",
    "source_code": "def _add_legend(self, ax_obj, artist, fill, element, multiple, alpha, artist_kws, legend_kws):\n    handles = []\n    labels = []\n    for level in self._hue_map.levels:\n        color = self._hue_map(level)\n        kws = self._artist_kws(artist_kws, fill, element, multiple, color, alpha)\n        if 'facecolor' in kws:\n            kws.pop('color', None)\n        handles.append(artist(**kws))\n        labels.append(level)\n    if isinstance(ax_obj, mpl.axes.Axes):\n        ax_obj.legend(handles, labels, title=self.variables['hue'], **legend_kws)\n    else:\n        legend_data = dict(zip(labels, handles))\n        ax_obj.add_legend(legend_data, title=self.variables['hue'], label_order=self.var_levels['hue'], **legend_kws)",
    "docstring": "Add artists that reflect semantic mappings and put then in a legend.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\distributions.py",
    "ast_data": "FunctionDef name:_add_legend arg:self arg:ax_obj arg:artist arg:fill arg:element arg:multiple arg:alpha arg:artist_kws arg:legend_kws arguments arg arg arg arg arg arg arg arg arg Assign Assign For Assign Call Assign Call If Compare Call Call Call Call If Call Call Assign Call Call Call"
  },
  {
    "library": "pandas",
    "name": "dropna",
    "source_code": "def dropna(self, *, axis: Axis=0, inplace: bool=False, how: AnyAll | None=None, ignore_index: bool=False) -> Series | None:\n    inplace = validate_bool_kwarg(inplace, 'inplace')\n    ignore_index = validate_bool_kwarg(ignore_index, 'ignore_index')\n    self._get_axis_number(axis or 0)\n    if self._can_hold_na:\n        result = remove_na_arraylike(self)\n    elif not inplace:\n        result = self.copy(deep=False)\n    else:\n        result = self\n    if ignore_index:\n        result.index = default_index(len(result))\n    if inplace:\n        return self._update_inplace(result)\n    else:\n        return result",
    "docstring": "Return a new Series with missing values removed. See the :ref: for more on which values are considered missing, and how to work with missing data. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. inplace : bool, default False If True, do operation inplace and return None. how : str, optional Not in use. Kept for compatibility. ignore_index : bool, default `` is considered an NA value. >>> ser = pd.Series([np.nan, 2, pd.NaT, \"\", None, \"I stay\"]) >>> ser 0 NaN 1 2 2 NaT 3 4 None 5 I stay dtype: object >>> ser.dropna() 1 2 3 5 I stay dtype: object",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:dropna arg:self arguments arg arg arg arg arg Assign Call Assign Call Call BoolOp If Assign Call If Assign Call Assign If Assign Call Call If Return return:yes Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y, sample_weight=None):\n    check_params = dict(accept_sparse=False, ensure_2d=False)\n    X = check_array(X, input_name='X', dtype=[np.float64, np.float32], **check_params)\n    y = check_array(y, input_name='y', dtype=X.dtype, **check_params)\n    check_consistent_length(X, y, sample_weight)\n    X, y = self._build_y(X, y, sample_weight)\n    self.X_thresholds_, self.y_thresholds_ = (X, y)\n    self._build_f(X, y)\n    return self",
    "docstring": "Fit the model using X, y as training data. Parameters ---------- X : array-like of shape (n_samples,) or (n_samples, 1) Training data. .. versionchanged:: 0.24 Also accepts 2d array with 1 feature. y : array-like of shape (n_samples,) Training target. sample_weight : array-like of shape (n_samples,), default=None Weights. If set to None, all weights will be set to 1 (equal weights). Returns ------- self : object Returns an instance of self. Notes ----- X is stored for future use, as :meth: needs X to interpolate new input data.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\isotonic.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg Assign Call Assign Call Assign Call Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, dataset, worker, devices, options=None):\n    self._dataset = dataset\n    self._worker = worker\n    self._devices = devices\n    self._element_spec = dataset.element_spec\n    self._options = options\n    self._make_iterator()",
    "docstring": "Create iterator for the to fetch data to worker's . A or is used to prefetch input to the devices on the given worker. Args: dataset: A instance. worker: Worker on which ops should be created. devices: Distribute data from to these devices. options: options.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:dataset arg:worker arg:devices arg:options arguments arg arg arg arg arg Assign Assign Assign Assign Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_AddNGrad",
    "source_code": "@ops.RegisterGradient('AddN')\ndef _AddNGrad(op: ops.Operation, grad):\n    return [grad] * len(op.inputs)",
    "docstring": "Copies the gradient to all inputs.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_AddNGrad arg:op arg:grad arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "minimize",
    "source_code": "def minimize(self, loss, var_list, grad_loss=None, tape=None):\n    if not callable(loss) and tape is None:\n        raise ValueError('`tape` is required when a `Tensor` loss is passed.')\n    tape = tape if tape is not None else backprop.GradientTape()\n    if callable(loss):\n        with tape:\n            if not callable(var_list):\n                tape.watch(var_list)\n            loss = loss()\n            if callable(var_list):\n                var_list = var_list()\n    var_list = nest.flatten(var_list)\n    if var_list:\n        grads = tape.gradient(loss, var_list, grad_loss)\n        grads_and_vars = list(zip(grads, var_list))\n        self.apply_gradients(grads_and_vars)",
    "docstring": "Mimics the API.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v1.py",
    "ast_data": "FunctionDef name:minimize arg:self arg:loss arg:var_list arg:grad_loss arg:tape arguments arg arg arg arg arg If BoolOp Call Compare Raise Call Assign Compare Call If Call With If Call Call Assign Call If Call Assign Call Assign Call If Assign Call Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_replace_ragged_with_flat_values",
    "source_code": "def _replace_ragged_with_flat_values(value, partition_lists, flat_values_nrows):\n    if ragged_tensor.is_ragged(value):\n        value = ragged_tensor.convert_to_tensor_or_ragged_tensor(value)\n        partition_lists.append(value._nested_row_partitions)\n        nrows = tensor_shape.dimension_at_index(value.flat_values.shape, 0).value\n        if nrows is not None:\n            flat_values_nrows.append(nrows)\n        return value.flat_values\n\n    def recurse(v):\n        return _replace_ragged_with_flat_values(v, partition_lists, flat_values_nrows)\n    if isinstance(value, list):\n        return [recurse(v) for v in value]\n    elif isinstance(value, tuple):\n        return tuple((recurse(v) for v in value))\n    elif isinstance(value, dict):\n        return dict(((k, recurse(v)) for k, v in value.items()))\n    else:\n        return value",
    "docstring": "Replace RaggedTensors with their flat_values, and record their partitions. Returns a copy of , with any nested s replaced by their tensor. Looks inside lists, tuples, and dicts. Appends each 's s to . Args: value: The value that should be transformed by replacing . partition_lists: An output parameter used to record the row partitions for any that were replaced. flat_values_nrows: An output parameter used to record the outer dimension size for each replacement (when known). Contains a list of int. Returns: A copy of with nested replaced by their .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_functional_ops.py",
    "ast_data": "FunctionDef name:_replace_ragged_with_flat_values arg:value arg:partition_lists arg:flat_values_nrows arguments arg arg arg If Call Assign Call Call Assign Call If Compare Call Return return:yes FunctionDef name:recurse arg:v arguments arg Return return:yes Call If Call Return return:yes Call If Call Return return:yes Call Call If Call Return return:yes Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "scatter_update",
    "source_code": "def scatter_update(self, sparse_delta, use_locking=False, name=None):\n    if not isinstance(sparse_delta, indexed_slices.IndexedSlices):\n        raise TypeError(f'Argument `sparse_delta` must be a `tf.IndexedSlices`. Received arg: {sparse_delta}')\n    return self._lazy_read(gen_resource_variable_ops.resource_scatter_update(self.handle, sparse_delta.indices, ops.convert_to_tensor(sparse_delta.values, self.dtype), name=name))",
    "docstring": "Assigns to this variable. Args: sparse_delta: to be assigned to this variable. use_locking: If , use locking during the operation. name: the name of the operation. Returns: The updated variable. Raises: TypeError: if is not an .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:scatter_update arg:self arg:sparse_delta arg:use_locking arg:name arguments arg arg arg arg If Call Raise Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_free_low_precision_sharded_param",
    "source_code": "def _free_low_precision_sharded_param(self):\n    self._check_low_precision_shard()\n    _no_dispatch_record_stream(self.flat_param._mp_shard, self._device_handle.current_stream())\n    _free_storage(self.flat_param._mp_shard)",
    "docstring": "Frees the low precision sharded flat parameter.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py",
    "ast_data": "FunctionDef name:_free_low_precision_sharded_param arg:self arguments arg Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_artifact_urls",
    "source_code": "def _get_artifact_urls(prefix: str, workflow_run_id: int) -> dict[Path, str]:\n    response = requests.get(f'{PYTORCH_REPO}/actions/runs/{workflow_run_id}/artifacts?per_page=100', headers=_get_request_headers())\n    artifacts = response.json()['artifacts']\n    while 'next' in response.links.keys():\n        response = requests.get(response.links['next']['url'], headers=_get_request_headers())\n        artifacts.extend(response.json()['artifacts'])\n    artifact_urls = {}\n    for artifact in artifacts:\n        if artifact['name'].startswith(prefix):\n            artifact_urls[Path(artifact['name'])] = artifact['archive_download_url']\n    return artifact_urls",
    "docstring": "Get all workflow artifacts with 'test-report' in the name.",
    "type": "function",
    "file_path": "pytorch\\tools\\stats\\upload_stats_lib.py",
    "ast_data": "FunctionDef name:_get_artifact_urls arg:prefix arg:workflow_run_id arguments arg arg Assign Call Call Assign Call While Compare Call Assign Call Call Call Call Assign For If Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_normal_points",
    "source_code": "def get_normal_points(cx, cy, cos_t, sin_t, length):\n    if length == 0.0:\n        return (cx, cy, cx, cy)\n    cos_t1, sin_t1 = (sin_t, -cos_t)\n    cos_t2, sin_t2 = (-sin_t, cos_t)\n    x1, y1 = (length * cos_t1 + cx, length * sin_t1 + cy)\n    x2, y2 = (length * cos_t2 + cx, length * sin_t2 + cy)\n    return (x1, y1, x2, y2)",
    "docstring": "For a line passing through (*cx*, *cy*) and having an angle *t*, return locations of the two points located along its perpendicular line at the distance of *length*.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\bezier.py",
    "ast_data": "FunctionDef name:get_normal_points arg:cx arg:cy arg:cos_t arg:sin_t arg:length arguments arg arg arg arg arg If Compare Return return:yes Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "ogrinfo",
    "source_code": "def ogrinfo(data_source, num_features=10):\n    if isinstance(data_source, str):\n        data_source = DataSource(data_source)\n    elif isinstance(data_source, DataSource):\n        pass\n    else:\n        raise Exception('Data source parameter must be a string or a DataSource object.')\n    for i, layer in enumerate(data_source):\n        print('data source : %s' % data_source.name)\n        print('==== layer %s' % i)\n        print('  shape type: %s' % GEO_CLASSES[layer.geom_type.num].__name__)\n        print('  # features: %s' % len(layer))\n        print('         srs: %s' % layer.srs)\n        extent_tup = layer.extent.tuple\n        print('      extent: %s - %s' % (extent_tup[0:2], extent_tup[2:4]))\n        print('Displaying the first %s features ====' % num_features)\n        width = max(*map(len, layer.fields))\n        fmt = ' %%%ss: %%s' % width\n        for j, feature in enumerate(layer[:num_features]):\n            print('=== Feature %s' % j)\n            for fld_name in layer.fields:\n                type_name = feature[fld_name].type_name\n                output = fmt % (fld_name, type_name)\n                val = feature.get(fld_name)\n                if val:\n                    if isinstance(val, str):\n                        val_fmt = ' (\"%s\")'\n                    else:\n                        val_fmt = ' (%s)'\n                    output += val_fmt % val\n                else:\n                    output += ' (None)'\n                print(output)",
    "docstring": "Walk the available layers in the supplied , displaying the fields for the first features.",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\utils\\ogrinfo.py",
    "ast_data": "FunctionDef name:ogrinfo arg:data_source arg:num_features arguments arg arg If Call Assign Call If Call Raise Call For Call Call Call Call Call Call Call Assign Call Call Assign Call Call Assign For Call Call For Assign Assign Assign Call If If Call Assign Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "set_facecolor",
    "source_code": "def set_facecolor(self, color):\n    self.patch.set_facecolor(color)",
    "docstring": "Set the face color of the Figure rectangle. Parameters ---------- color : :mpltype:",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:set_facecolor arg:self arg:color arguments arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "_get_record_key",
    "source_code": "def _get_record_key(record):\n    return (record.handle(), record.node_id())",
    "docstring": "Return a tuple for correlating start and end records in .",
    "type": "function",
    "file_path": "pytorch\\torch\\autograd\\profiler_legacy.py",
    "ast_data": "FunctionDef name:_get_record_key arg:record arguments arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_extent",
    "source_code": "def get_extent(self):\n    if self._extent is not None:\n        return self._extent\n    else:\n        sz = self.get_size()\n        numrows, numcols = sz\n        if self.origin == 'upper':\n            return (-0.5, numcols - 0.5, numrows - 0.5, -0.5)\n        else:\n            return (-0.5, numcols - 0.5, -0.5, numrows - 0.5)",
    "docstring": "Return the image extent as tuple (left, right, bottom, top).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\image.py",
    "ast_data": "FunctionDef name:get_extent arg:self arguments arg If Compare Return return:yes Assign Call Assign If Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_single_restore",
    "source_code": "def _single_restore(self):\n    trackable = self.trackable\n    trackable._maybe_initialize_trackable()\n    checkpoint = self.checkpoint\n    if checkpoint.restore_uid > trackable._update_uid:\n        restore_ops, tensor_saveables, python_positions, registered_savers = self.gather_ops_or_named_saveables()\n        trackable._update_uid = checkpoint.restore_uid\n    else:\n        restore_ops = ()\n        tensor_saveables = {}\n        python_positions = ()\n        registered_savers = {}\n    return (restore_ops, tensor_saveables, python_positions, registered_savers)",
    "docstring": "Restores the trackable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\restore.py",
    "ast_data": "FunctionDef name:_single_restore arg:self arguments arg Assign Call Assign If Compare Assign Call Assign Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "hand_clean_DELETE",
    "source_code": "def hand_clean_DELETE(self):\n    if self.cleaned_data.get(DELETION_FIELD_NAME, False):\n        using = router.db_for_write(self._meta.model)\n        collector = NestedObjects(using=using)\n        if self.instance._state.adding:\n            return\n        collector.collect([self.instance])\n        if collector.protected:\n            objs = []\n            for p in collector.protected:\n                objs.append(_('%(class_name)s %(instance)s') % {'class_name': p._meta.verbose_name, 'instance': p})\n            params = {'class_name': self._meta.model._meta.verbose_name, 'instance': self.instance, 'related_objects': get_text_list(objs, _('and'))}\n            msg = _('Deleting %(class_name)s %(instance)s would require deleting the following protected related objects: %(related_objects)s')\n            raise ValidationError(msg, code='deleting_protected', params=params)",
    "docstring": "We don't validate the 'DELETE' field itself because on templates it's not rendered using the field information, but just using a generic \"deletion_field\" of the InlineModelAdmin.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:hand_clean_DELETE arg:self arguments arg If Call Assign Call Assign Call If Return return:no Call If Assign For Call Call Assign Call Call Assign Call Raise Call"
  },
  {
    "library": "pandas",
    "name": "cov",
    "source_code": "def cov(self, other: Series, min_periods: int | None=None, ddof: int | None=1) -> float:\n    this, other = self.align(other, join='inner')\n    if len(this) == 0:\n        return np.nan\n    this_values = this.to_numpy(dtype=float, na_value=np.nan, copy=False)\n    other_values = other.to_numpy(dtype=float, na_value=np.nan, copy=False)\n    return nanops.nancov(this_values, other_values, min_periods=min_periods, ddof=ddof)",
    "docstring": "Compute covariance with Series, excluding missing values. The two objects are not required to be the same length and will be aligned internally before the covariance is calculated. Parameters ---------- other : Series Series with which to compute the covariance. min_periods : int, optional Minimum number of observations needed to have a valid result. ddof : int, default 1 Delta degrees of freedom. The divisor used in calculations is `` represents the number of elements. Returns ------- float Covariance between Series and other normalized by N-1 (unbiased estimator). See Also -------- DataFrame.cov : Compute pairwise covariance of columns. Examples -------- >>> s1 = pd.Series([0.90010907, 0.13484424, 0.62036035]) >>> s2 = pd.Series([0.12528585, 0.26962463, 0.51111198]) >>> s1.cov(s2) -0.01685762652715874",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:cov arg:self arg:other arg:min_periods arg:ddof arguments arg arg arg arg Assign Call If Compare Call Return return:yes Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "blknos",
    "source_code": "@property\ndef blknos(self) -> npt.NDArray[np.intp]:\n    if self._blknos is None:\n        self._rebuild_blknos_and_blklocs()\n    return self._blknos",
    "docstring": "Suppose we want to find the array corresponding to our i'th column. blknos[i] identifies the block from self.blocks that contains this column. blklocs[i] identifies the column of interest within self.blocks[self.blknos[i]]",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:blknos arg:self arguments arg If Compare Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_inplace_wrapper",
    "source_code": "def _inplace_wrapper(fn: Callable[_P, _T]) -> Callable[_P, _T]:\n\n    @wraps(fn)\n    def _fn(*args: _P.args, **kwargs: _P.kwargs) -> _T:\n        a = args[0]\n        if 'inplace' not in kwargs:\n            kwargs['inplace'] = False\n        if kwargs['inplace']:\n            torch._check('out' not in kwargs, lambda: 'Cannot set inplace=True and pass out= at the same time')\n            kwargs['inplace'] = False\n            kwargs['out'] = a\n            return fn(*args, **kwargs)\n        else:\n            return fn(*args, **kwargs)\n    return _fn",
    "docstring": "Given a nn.functional non-linearity, implements its argument",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\nn\\functional\\__init__.py",
    "ast_data": "FunctionDef name:_inplace_wrapper arg:fn arguments arg FunctionDef name:_fn arguments arg arg Assign If Compare Assign If Call Compare arguments Assign Assign Return return:yes Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_unpack_hook_tp",
    "source_code": "def _unpack_hook_tp(mesh: DeviceMesh, input_reshard_dim: int, x: Any) -> torch.Tensor:\n    if isinstance(x, DTensor) and len(x._spec.placements) == 1 and x._spec.placements[0].is_shard():\n        return x.redistribute(device_mesh=mesh, placements=[Replicate()])\n    elif not isinstance(x, DTensor) and isinstance(x, torch.Tensor) and (x.numel() >= mesh.size()):\n        return DTensor.from_local(x, device_mesh=mesh, placements=[Shard(input_reshard_dim)]).redistribute(device_mesh=mesh, placements=[Replicate()]).to_local()\n    else:\n        return x",
    "docstring": "Hook function called before activation recomputing in BWD to restore input.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\parallel\\input_reshard.py",
    "ast_data": "FunctionDef name:_unpack_hook_tp arg:mesh arg:input_reshard_dim arg:x arguments arg arg arg If BoolOp Call Compare Call Call Return return:yes Call Call If BoolOp Call Call Compare Call Call Return return:yes Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_write_object_proto",
    "source_code": "def _write_object_proto(self, proto, options):\n    resource_variable_ops.write_object_proto_for_resource_variable(self, proto, options)\n    values_util.write_object_proto(self, proto, options)",
    "docstring": "Update a SavedObject proto for the caller. If a DistributedVariable object supports this method, it will be called when saving with a pre-built proto representing the object, plus an instance of . This method is then free to modify that proto instance. with or synchronization optionally write out information about their components to the field of a (depending on the variable policy). Args: proto: A pre-built proto for this object. It is assumed this will be a instance. options: A instance.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py",
    "ast_data": "FunctionDef name:_write_object_proto arg:self arg:proto arg:options arguments arg arg arg Call Call"
  },
  {
    "library": "pandas",
    "name": "_maybe_adjust_name",
    "source_code": "def _maybe_adjust_name(name: str, version: Sequence[int]) -> str:\n    if isinstance(version, str) or len(version) < 3:\n        raise ValueError('Version is incorrect, expected sequence of 3 integers.')\n    if version[0] == 0 and version[1] <= 10 and (version[2] == 0):\n        m = re.search('values_block_(\\\\d+)', name)\n        if m:\n            grp = m.groups()[0]\n            name = f'values_{grp}'\n    return name",
    "docstring": "Prior to 0.10.1, we named values blocks like: values_block_0 an the name values_0, adjust the given name if necessary. Parameters ---------- name : str version : Tuple[int, int, int] Returns ------- str",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:_maybe_adjust_name arg:name arg:version arguments arg arg If BoolOp Call Compare Call Raise Call If BoolOp Compare Compare Compare Assign Call If Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_StorageReaderTransforms",
    "source_code": "class _StorageReaderTransforms:\n\n    def __init__(self, extension_registry: Optional[ExtensionRegistry]=None) -> None:\n        self.extension_registry = ExtensionRegistry() if extension_registry is None else extension_registry\n\n    def transform_load_stream(self, read_item: ReadItem, transform_descriptors: Sequence[str], raw_stream: IO[bytes]) -> IO[bytes]:\n        extensions = self.extension_registry.from_descriptor_list(transform_descriptors)\n        transform_from = raw_stream\n        for ex in extensions:\n            if isinstance(ex, StreamTransformExtension):\n                transform_from = ex.transform_from(transform_from)\n        return transform_from",
    "docstring": "This is experimental, and will likely move elsewhere in the future. It lives here to minimize changes while we are still learning and gathering feedback.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\filesystem.py",
    "ast_data": "ClassDef name:_StorageReaderTransforms FunctionDef name:__init__ arg:self arg:extension_registry arguments arg arg Assign Compare Call FunctionDef name:transform_load_stream arg:self arg:read_item arg:transform_descriptors arg:raw_stream arguments arg arg arg arg Assign Call Assign For If Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_LoopBodyCaptureWrapper",
    "source_code": "def _LoopBodyCaptureWrapper(func):\n\n    @function.Defun(*_GetInputDtypes(func), func_name='%s_Wrapper' % func.name)\n    def Wrapper(*args):\n        result = func(*args)\n        extra_args = tuple(function.get_extra_args())\n        if isinstance(result, ops.Operation):\n            return extra_args\n        elif not isinstance(result, (list, tuple)):\n            return (result,) + extra_args\n        else:\n            return result + type(result)(extra_args)\n    return Wrapper",
    "docstring": "Returns a wrapper for that handles loop-carried captured inputs.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\functional_ops.py",
    "ast_data": "FunctionDef name:_LoopBodyCaptureWrapper arg:func arguments arg FunctionDef name:Wrapper arguments arg Assign Call Assign Call Call If Call Return return:yes If Call Return return:yes Return return:yes Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "experimental_local_results",
    "source_code": "def experimental_local_results(self, value):\n    return self._extended._local_results(value)",
    "docstring": "Returns the list of all local per-replica values contained in . Note: This only returns values on the worker initiated by this client. When using a like , each worker will be its own client, and this function will only return values computed on that worker. Args: value: A value returned by , scopevaluevalue(value,).`",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:experimental_local_results arg:self arg:value arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "load_optimizer_weights_from_hdf5_group",
    "source_code": "def load_optimizer_weights_from_hdf5_group(hdf5_group):\n    weights_group = hdf5_group['optimizer_weights']\n    optimizer_weight_names = load_attributes_from_hdf5_group(weights_group, 'weight_names')\n    return [weights_group[weight_name] for weight_name in optimizer_weight_names]",
    "docstring": "Load optimizer weights from a HDF5 group. Args: hdf5_group: A pointer to a HDF5 group. Returns: data: List of optimizer weight names.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\hdf5_format.py",
    "ast_data": "FunctionDef name:load_optimizer_weights_from_hdf5_group arg:hdf5_group arguments arg Assign Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "ellipsoid",
    "source_code": "@property\ndef ellipsoid(self):\n    return (self.semi_major, self.semi_minor, self.inverse_flattening)",
    "docstring": "Return a tuple of the ellipsoid parameters: (semimajor axis, semiminor axis, and inverse flattening)",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\srs.py",
    "ast_data": "FunctionDef name:ellipsoid arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "column_or_1d",
    "source_code": "def column_or_1d(y, *, dtype=None, warn=False, device=None):\n    xp, _ = get_namespace(y)\n    y = check_array(y, ensure_2d=False, dtype=dtype, input_name='y', ensure_all_finite=False, ensure_min_samples=0)\n    shape = y.shape\n    if len(shape) == 1:\n        return _asarray_with_order(xp.reshape(y, (-1,)), order='C', xp=xp, device=device)\n    if len(shape) == 2 and shape[1] == 1:\n        if warn:\n            warnings.warn('A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().', DataConversionWarning, stacklevel=2)\n        return _asarray_with_order(xp.reshape(y, (-1,)), order='C', xp=xp, device=device)\n    raise ValueError('y should be a 1d array, got an array of shape {} instead.'.format(shape))",
    "docstring": "Ravel column or 1d numpy array, else raises an error. Parameters ---------- y : array-like Input data. dtype : data-type, default=None Data type for . .. versionadded:: 1.2 warn : bool, default=False To control display of warnings. device : device, default=None object. See the :ref: for more details. .. versionadded:: 1.6 Returns ------- y : ndarray Output data. Raises ------ ValueError If is not a 1D array or a 2D array with a single row or column. Examples -------- >>> from sklearn.utils.validation import column_or_1d >>> column_or_1d([1, 1]) array([1, 1])",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\validation.py",
    "ast_data": "FunctionDef name:column_or_1d arg:y arguments arg arg arg arg Assign Call Assign Call Assign If Compare Call Return return:yes Call Call If BoolOp Compare Call Compare If Call Return return:yes Call Call Raise Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_fontsize",
    "source_code": "def get_fontsize(self):\n    return self._text.get_fontsize()",
    "docstring": "Return the cell fontsize.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\table.py",
    "ast_data": "FunctionDef name:get_fontsize arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_num_replicas_in_sync",
    "source_code": "@property\ndef _num_replicas_in_sync(self):\n    raise NotImplementedError('must be implemented in descendants')",
    "docstring": "Returns number of replicas over which gradients are aggregated.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:_num_replicas_in_sync arg:self arguments arg Raise Call"
  },
  {
    "library": "django",
    "name": "clone",
    "source_code": "def clone(self):\n    new_state = ProjectState(models={k: v.clone() for k, v in self.models.items()}, real_apps=self.real_apps)\n    if 'apps' in self.__dict__:\n        new_state.apps = self.apps.clone()\n    new_state.is_delayed = self.is_delayed\n    return new_state",
    "docstring": "Return an exact copy of this ProjectState.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\state.py",
    "ast_data": "FunctionDef name:clone arg:self arguments arg Assign Call Call Call If Compare Assign Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_insert_quantile_level",
    "source_code": "def _insert_quantile_level(idx: Index, qs: npt.NDArray[np.float64]) -> MultiIndex:\n    nqs = len(qs)\n    lev_codes, lev = Index(qs).factorize()\n    lev_codes = coerce_indexer_dtype(lev_codes, lev)\n    if idx._is_multi:\n        idx = cast(MultiIndex, idx)\n        levels = list(idx.levels) + [lev]\n        codes = [np.repeat(x, nqs) for x in idx.codes] + [np.tile(lev_codes, len(idx))]\n        mi = MultiIndex(levels=levels, codes=codes, names=idx.names + [None])\n    else:\n        nidx = len(idx)\n        idx_codes = coerce_indexer_dtype(np.arange(nidx), idx)\n        levels = [idx, lev]\n        codes = [np.repeat(idx_codes, nqs), np.tile(lev_codes, nidx)]\n        mi = MultiIndex(levels=levels, codes=codes, names=[idx.name, None])\n    return mi",
    "docstring": "Insert the sequence 'qs' of quantiles as the inner-most level of a MultiIndex. The quantile level in the MultiIndex is a repeated copy of 'qs'. Parameters ---------- idx : Index qs : np.ndarray[float64] Returns ------- MultiIndex",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\groupby\\groupby.py",
    "ast_data": "FunctionDef name:_insert_quantile_level arg:idx arg:qs arguments arg arg Assign Call Assign Call Call Assign Call If Assign Call Assign Call Assign Call Call Call Assign Call Assign Call Assign Call Call Assign Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "set_signature",
    "source_code": "def set_signature(self, signature_map: signature_serialization._SignatureMap, wrapped_functions: Dict[Callable[..., Any], Callable[..., Any]]):\n    self.list_children(self.root)\n    name = signature_serialization.SIGNATURE_ATTRIBUTE_NAME\n    self._children_cache[self.root][name] = signature_map\n    self._wrapped_functions.update(wrapped_functions)",
    "docstring": "Attach signature to the root object. Args: signature_map: An object that contains signature functions. wrapped_functions: A dictionary mapping functions to functions that are guaranteed to not capture cached variables (functions that capture cached variables can't be saved).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\save.py",
    "ast_data": "FunctionDef name:set_signature arg:self arg:signature_map arg:wrapped_functions arguments arg arg arg Call Assign Assign Call"
  },
  {
    "library": "pytorch",
    "name": "__repr__",
    "source_code": "def __repr__(self) -> str:\n    return 'Replicate()'",
    "docstring": "machine readable representation of the Replicate placement",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\placement_types.py",
    "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_asarray_square",
    "source_code": "def _asarray_square(A):\n    A = np.asarray(A)\n    if len(A.shape) != 2 or A.shape[0] != A.shape[1]:\n        raise ValueError('expected square array_like input')\n    return A",
    "docstring": "Wraps asarray with the extra requirement that the input be a square matrix. The motivation is that the matfuncs module has real functions that have been lifted to square matrix functions. Parameters ---------- A : array_like A square matrix. Returns ------- out : ndarray An ndarray copy or view or other representation of A.",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_matfuncs.py",
    "ast_data": "FunctionDef name:_asarray_square arg:A arguments arg Assign Call If BoolOp Compare Call Compare Raise Call Return return:yes"
  },
  {
    "library": "django",
    "name": "format_number",
    "source_code": "def format_number(value, max_digits, decimal_places):\n    if value is None:\n        return None\n    context = decimal.getcontext().copy()\n    if max_digits is not None:\n        context.prec = max_digits\n    if decimal_places is not None:\n        value = value.quantize(decimal.Decimal(1).scaleb(-decimal_places), context=context)\n    else:\n        context.traps[decimal.Rounded] = 1\n        value = context.create_decimal(value)\n    return '{:f}'.format(value)",
    "docstring": "Format a number into a string with the requisite number of digits and decimal places.",
    "type": "function",
    "file_path": "django\\django\\db\\backends\\utils.py",
    "ast_data": "FunctionDef name:format_number arg:value arg:max_digits arg:decimal_places arguments arg arg arg If Compare Return return:no Assign Call Call If Compare Assign If Compare Assign Call Call Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, layer, trainable):\n    self._trainable = trainable\n    self._layer = layer\n    if self._layer is not None and (not hasattr(self._layer, '_resources')):\n        self._layer._resources = data_structures.Mapping()\n    self._cols_to_vars_map = collections.defaultdict(lambda: {})\n    self._cols_to_resources_map = collections.defaultdict(lambda: {})",
    "docstring": "Creates an _StateManagerImpl object. Args: layer: The input layer this state manager is associated with. trainable: Whether by default, variables created are trainable or not.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:layer arg:trainable arguments arg arg arg Assign Assign If BoolOp Compare Call Assign Call Assign Call arguments Assign Call arguments"
  },
  {
    "library": "tensorflow",
    "name": "_get_handle_mover",
    "source_code": "def _get_handle_mover(graph, feeder, handle):\n    dtype = _get_handle_feeder(graph, feeder)\n    if dtype is None:\n        return None\n    handle_device = TensorHandle._get_device_name(handle)\n    if feeder.op.device == handle_device:\n        return None\n    graph_key = TensorHandle._get_mover_key(feeder, handle)\n    result = graph._handle_movers.get(graph_key)\n    if result is None:\n        holder, reader = _get_handle_reader(graph, handle, dtype)\n        with graph.as_default(), graph.device(feeder.op.device):\n            mover = gen_data_flow_ops.get_session_handle(reader)\n        result = (holder, mover)\n        graph._handle_movers[graph_key] = result\n    return result",
    "docstring": "Return a move subgraph for this pair of feeder and handle.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\session_ops.py",
    "ast_data": "FunctionDef name:_get_handle_mover arg:graph arg:feeder arg:handle arguments arg arg arg Assign Call If Compare Return return:no Assign Call If Compare Return return:no Assign Call Assign Call If Compare Assign Call With Call Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "is_current_stream_capturing",
    "source_code": "def is_current_stream_capturing():\n    return _cuda_isCurrentStreamCapturing()",
    "docstring": "Return True if CUDA graph capture is underway on the current CUDA stream, False otherwise. If a CUDA context does not exist on the current device, returns False without initializing the context.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\graphs.py",
    "ast_data": "FunctionDef name:is_current_stream_capturing arguments Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, maxpool_result, input_var, kernel, padding, stride, dilation, matching_constraint_vars):\n    self.maxpool_result = maxpool_result\n    self.input_var = input_var\n    self.kernel = kernel\n    self.padding = padding\n    self.stride = stride\n    self.dilation = dilation\n    self.matching_constraint = matching_constraint_vars",
    "docstring": ":param maxpool_result: the result of maxpool :param input_var: input to convolution :param kernel: kernel tuple",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:maxpool_result arg:input_var arg:kernel arg:padding arg:stride arg:dilation arg:matching_constraint_vars arguments arg arg arg arg arg arg arg arg Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "scipy",
    "name": "sum",
    "source_code": "def sum(input, labels=None, index=None):\n    return sum_labels(input, labels, index)",
    "docstring": "Calculate the sum of the values of the array. Notes ----- This is an alias for kept for backwards compatibility reasons, for new code please prefer . See the docstring for more details.",
    "type": "function",
    "file_path": "scipy\\scipy\\ndimage\\_measurements.py",
    "ast_data": "FunctionDef name:sum arg:input arg:labels arg:index arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "datestr2num",
    "source_code": "def datestr2num(d, default=None):\n    if isinstance(d, str):\n        dt = dateutil.parser.parse(d, default=default)\n        return date2num(dt)\n    else:\n        if default is not None:\n            d = [date2num(dateutil.parser.parse(s, default=default)) for s in d]\n            return np.asarray(d)\n        d = np.asarray(d)\n        if not d.size:\n            return d\n        return date2num(_dateutil_parser_parse_np_vectorized(d))",
    "docstring": "Convert a date string to a datenum using . Parameters ---------- d : str or sequence of str The dates to convert. default : datetime.datetime, optional The default date to use when fields are missing in *d*.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\dates.py",
    "ast_data": "FunctionDef name:datestr2num arg:d arg:default arguments arg arg If Call Assign Call Return return:yes Call If Compare Assign Call Call Return return:yes Call Assign Call If Return return:yes Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "graph_def",
    "source_code": "@property\ndef graph_def(self):\n    return self._graph_def",
    "docstring": "The graph to be converted.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "FunctionDef name:graph_def arg:self arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_flatmask",
    "source_code": "def _flatmask(mask):\n    mnames = mask.dtype.names\n    if mnames is not None:\n        return [flatten_mask(mask[name]) for name in mnames]\n    else:\n        return mask",
    "docstring": "Flatten the mask and returns a (maybe nested) sequence of booleans.",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:_flatmask arg:mask arguments arg Assign If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_facecolor",
    "source_code": "def set_facecolor(self, color):\n    self._facecolor = color\n    self.stale = True\n    return self.patch.set_facecolor(color)",
    "docstring": "Set the facecolor of the Axes. Parameters ---------- color : :mpltype:",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:set_facecolor arg:self arg:color arguments arg arg Assign Assign Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_metadata_routing",
    "source_code": "def get_metadata_routing(self):\n    router = MetadataRouter(owner=self.__class__.__name__).add_self_request(self).add(estimator=self.estimator, method_mapping=MethodMapping().add(caller='fit', callee='fit').add(caller='partial_fit', callee='partial_fit'))\n    return router",
    "docstring": "Get metadata routing of this object. Please check :ref: on how the routing mechanism works. .. versionadded:: 1.4 Returns ------- routing : MetadataRouter A :class: encapsulating routing information.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\multiclass.py",
    "ast_data": "FunctionDef name:get_metadata_routing arg:self arguments arg Assign Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "Client",
    "source_code": "@tf_export('distribute.experimental.rpc.Client', v1=[])\nclass Client(object):\n\n    @staticmethod\n    def create(rpc_layer, address, name='', timeout_in_ms=0):\n        if rpc_layer != 'grpc':\n            raise ValueError('Only GRPC backend is supported at the moment.')\n        if context.executing_eagerly():\n            list_registered_methods = True\n        else:\n            list_registered_methods = False\n        return GrpcClient(address=address, name=name, list_registered_methods=list_registered_methods, timeout_in_ms=timeout_in_ms)\n\n    def call(self, method_name: str, args: Optional[Sequence[core_tf_types.Tensor]]=None, output_specs=None, timeout_in_ms=0):\n        raise NotImplementedError('Must be implemented in inherited classes.')",
    "docstring": "Client class for invoking RPCs to the server.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\experimental\\rpc\\rpc_ops.py",
    "ast_data": "ClassDef name:Client FunctionDef name:create arg:rpc_layer arg:address arg:name arg:timeout_in_ms arguments arg arg arg arg If Compare Raise Call If Call Assign Assign Return return:yes Call FunctionDef name:call arg:self arg:method_name arg:args arg:output_specs arg:timeout_in_ms arguments arg arg arg arg arg Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_should_convert",
    "source_code": "def _should_convert(self, name):\n    return (self._variable_names_allowlist is None or name in self._variable_names_allowlist) and (self._variable_names_denylist is None or name not in self._variable_names_denylist)",
    "docstring": "Checks whether to convert the given variable name to a constant.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "FunctionDef name:_should_convert arg:self arg:name arguments arg arg Return return:yes BoolOp BoolOp Compare Compare BoolOp Compare Compare"
  },
  {
    "library": "scikit-learn",
    "name": "OneClassSVM",
    "source_code": "class OneClassSVM(OutlierMixin, BaseLibSVM):\n    _impl = 'one_class'\n    _parameter_constraints: dict = {**BaseLibSVM._parameter_constraints}\n    for unused_param in ['C', 'class_weight', 'epsilon', 'probability', 'random_state']:\n        _parameter_constraints.pop(unused_param)\n\n    def __init__(self, *, kernel='rbf', degree=3, gamma='scale', coef0=0.0, tol=0.001, nu=0.5, shrinking=True, cache_size=200, verbose=False, max_iter=-1):\n        super().__init__(kernel, degree, gamma, coef0, tol, 0.0, nu, 0.0, shrinking, False, cache_size, None, verbose, max_iter, random_state=None)\n\n    def fit(self, X, y=None, sample_weight=None):\n        super().fit(X, np.ones(_num_samples(X)), sample_weight=sample_weight)\n        self.offset_ = -self._intercept_\n        return self\n\n    def decision_function(self, X):\n        dec = self._decision_function(X).ravel()\n        return dec\n\n    def score_samples(self, X):\n        return self.decision_function(X) + self.offset_\n\n    def predict(self, X):\n        y = super().predict(X)\n        return np.asarray(y, dtype=np.intp)",
    "docstring": "Unsupervised Outlier Detection. Estimate the support of a high-dimensional distribution. The implementation is based on libsvm. Read more in the :ref:. Parameters ---------- kernel : {'linear', 'poly', 'rbf', 'sigmoid', 'precomputed'} or callable, default='rbf' Specifies the kernel type to be used in the algorithm. If none is given, 'rbf' will be used. If a callable is given it is used to precompute the kernel matrix. degree : int, default=3 Degree of the polynomial kernel function ('poly'). Must be non-negative. Ignored by all other kernels. gamma : {'scale', 'auto'} or float, default='scale' Kernel coefficient for 'rbf', 'poly' and 'sigmoid'. - if `User Guide coef_dual_coef_support_vectors_fitn_features_in_fitXoffset_intercept_sphx_glr_auto_examples_applications_plot_species_distribution_modeling.py`",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\svm\\_classes.py",
    "ast_data": "ClassDef name:OneClassSVM Assign For Call FunctionDef name:__init__ arg:self arguments arg arg arg arg arg arg arg arg arg arg arg Call Call FunctionDef name:fit arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg Call Call Call Call Assign Return return:yes FunctionDef name:decision_function arg:self arg:X arguments arg arg Assign Call Call Return return:yes FunctionDef name:score_samples arg:self arg:X arguments arg arg Return return:yes Call FunctionDef name:predict arg:self arg:X arguments arg arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "hash_tuples",
    "source_code": "def hash_tuples(vals: MultiIndex | Iterable[tuple[Hashable, ...]], encoding: str='utf8', hash_key: str=_default_hash_key) -> npt.NDArray[np.uint64]:\n    if not is_list_like(vals):\n        raise TypeError('must be convertible to a list-of-tuples')\n    from pandas import Categorical, MultiIndex\n    if not isinstance(vals, ABCMultiIndex):\n        mi = MultiIndex.from_tuples(vals)\n    else:\n        mi = vals\n    cat_vals = [Categorical._simple_new(mi.codes[level], CategoricalDtype(categories=mi.levels[level], ordered=False)) for level in range(mi.nlevels)]\n    hashes = (cat._hash_pandas_object(encoding=encoding, hash_key=hash_key, categorize=False) for cat in cat_vals)\n    h = combine_hash_arrays(hashes, len(cat_vals))\n    return h",
    "docstring": "Hash an MultiIndex / listlike-of-tuples efficiently. Parameters ---------- vals : MultiIndex or listlike-of-tuples encoding : str, default 'utf8' hash_key : str, default _default_hash_key Returns ------- ndarray[np.uint64] of hashed values",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\util\\hashing.py",
    "ast_data": "FunctionDef name:hash_tuples arg:vals arg:encoding arg:hash_key arguments arg arg arg If Call Raise Call If Call Assign Call Assign Assign Call Call Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "register_dispatchable_type",
    "source_code": "def register_dispatchable_type(cls):\n    _api_dispatcher.register_dispatchable_type(cls)\n    return cls",
    "docstring": "Class decorator that registers a type for use with type-based dispatch. Should *not* be used with subclasses of or (which are automatically registered). Note: this function is intended to support internal legacy use cases (such as RaggedTensorValue), and will probably not be exposed as a public API. Args: cls: The class to register. Returns: .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\dispatch.py",
    "ast_data": "FunctionDef name:register_dispatchable_type arg:cls arguments arg Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_float_formatter_10",
    "source_code": "def _float_formatter_10(x):\n    if np.isposinf(x):\n        return '       inf'\n    elif np.isneginf(x):\n        return '      -inf'\n    elif np.isnan(x):\n        return '       nan'\n    return np.format_float_scientific(x, precision=3, pad_left=2, unique=False)",
    "docstring": "Returns a string representation of a float with exactly ten characters",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_util.py",
    "ast_data": "FunctionDef name:_float_formatter_10 arg:x arguments arg If Call Return return:yes If Call Return return:yes If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "to_jshtml",
    "source_code": "def to_jshtml(self, fps=None, embed_frames=True, default_mode=None):\n    if fps is None and hasattr(self, '_interval'):\n        fps = 1000 / self._interval\n    if default_mode is None:\n        default_mode = 'loop' if getattr(self, '_repeat', False) else 'once'\n    if not hasattr(self, '_html_representation'):\n        with TemporaryDirectory() as tmpdir:\n            path = Path(tmpdir, 'temp.html')\n            writer = HTMLWriter(fps=fps, embed_frames=embed_frames, default_mode=default_mode)\n            self.save(str(path), writer=writer)\n            self._html_representation = path.read_text()\n    return self._html_representation",
    "docstring": "Generate HTML representation of the animation. Parameters ---------- fps : int, optional Movie frame rate (per second). If not set, the frame rate from the animation's frame interval. embed_frames : bool, optional default_mode : str, optional What to do when the animation ends. Must be one of `.HTMLWriter`.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\animation.py",
    "ast_data": "FunctionDef name:to_jshtml arg:self arg:fps arg:embed_frames arg:default_mode arguments arg arg arg arg If BoolOp Compare Call Assign If Compare Assign Call If Call With Call Assign Call Assign Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "MethodDispatcher",
    "source_code": "class MethodDispatcher(Dispatcher):\n\n    def __call__(self, path_info):\n        request = cherrypy.serving.request\n        resource, vpath = self.find_handler(path_info)\n        if resource:\n            avail = [m for m in dir(resource) if m.isupper()]\n            if 'GET' in avail and 'HEAD' not in avail:\n                avail.append('HEAD')\n            avail.sort()\n            cherrypy.serving.response.headers['Allow'] = ', '.join(avail)\n            meth = request.method.upper()\n            func = getattr(resource, meth, None)\n            if func is None and meth == 'HEAD':\n                func = getattr(resource, 'GET', None)\n            if func:\n                if hasattr(func, '_cp_config'):\n                    request.config.update(func._cp_config)\n                vpath = [x.replace('%2F', '/') for x in vpath]\n                request.handler = LateParamPageHandler(func, *vpath)\n            else:\n                request.handler = cherrypy.HTTPError(405)\n        else:\n            request.handler = cherrypy.NotFound()",
    "docstring": "Additional dispatch based on cherrypy.request.method.upper(). Methods named GET, POST, etc will be called on an exposed class. The method names must be all caps; the appropriate Allow header will be output showing all capitalized method names as allowable HTTP verbs. Note that the containing class must be exposed, not the methods.",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\_cpdispatch.py",
    "ast_data": "ClassDef name:MethodDispatcher FunctionDef name:__call__ arg:self arg:path_info arguments arg arg Assign Assign Call If Assign Call Call If BoolOp Compare Compare Call Call Assign Call Assign Call Assign Call If BoolOp Compare Compare Assign Call If If Call Call Assign Call Assign Call Assign Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "gelu",
    "source_code": "@register_decomposition(aten.gelu)\n@out_wrapper()\n@elementwise_unary_scalar_wrapper\n@elementwise_type_promotion_wrapper(type_promoting_args=('a',), type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT)\ndef gelu(a: TensorLikeType, approximate: str='none') -> TensorLikeType:\n    if not isinstance(a, TensorLike):\n        raise RuntimeError('Expected a tensor input for an elementwise unary operation!')\n    M_SQRT2 = 1.4142135623730951\n    M_SQRT1_2 = 0.7071067811865476\n    M_2_SQRTPI = 1.1283791670955126\n    if approximate == 'tanh':\n        kBeta = M_SQRT2 * M_2_SQRTPI * 0.5\n        kKappa = 0.044715\n        a_cube = a * a * a\n        inner = kBeta * (a + kKappa * a_cube)\n        return 0.5 * a * (1 + torch.tanh(inner))\n    elif approximate == 'none':\n        kAlpha = M_SQRT1_2\n        return a * 0.5 * (1 + torch.erf(a * kAlpha))\n    else:\n        raise RuntimeError('approximate argument must be either none or tanh.')",
    "docstring": "Reference implementation of torch.nn.functional.gelu",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\nn\\functional\\__init__.py",
    "ast_data": "FunctionDef name:gelu arg:a arg:approximate arguments arg arg If Call Raise Call Assign Assign Assign If Compare Assign Assign Assign Assign Return return:yes Call If Compare Assign Return return:yes Call Raise Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_handle_indexes",
    "source_code": "@final\ndef _handle_indexes(self) -> None:\n    if not self.index:\n        return\n    first_key = next(iter(self.frame_dicts))\n    indexes: list[str] = [x for x in self.frame_dicts[first_key].keys() if x not in self.orig_cols]\n    if self.attr_cols:\n        self.attr_cols = indexes + self.attr_cols\n    if self.elem_cols:\n        self.elem_cols = indexes + self.elem_cols",
    "docstring": "Handle indexes. This method will add indexes into attr_cols or elem_cols.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\xml.py",
    "ast_data": "FunctionDef name:_handle_indexes arg:self arguments arg If Return return:no Assign Call Call Call Compare If Assign If Assign"
  },
  {
    "library": "tensorflow",
    "name": "_SkipDataset",
    "source_code": "class _SkipDataset(dataset_ops.UnaryUnchangedStructureDataset):\n\n    def __init__(self, input_dataset, count, name=None):\n        self._input_dataset = input_dataset\n        self._count = ops.convert_to_tensor(count, dtype=dtypes.int64, name='count')\n        self._name = name\n        variant_tensor = gen_dataset_ops.skip_dataset(input_dataset._variant_tensor, count=self._count, **self._common_args)\n        super().__init__(input_dataset, variant_tensor)",
    "docstring": "A skipping the first elements from its input.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\skip_op.py",
    "ast_data": "ClassDef name:_SkipDataset FunctionDef name:__init__ arg:self arg:input_dataset arg:count arg:name arguments arg arg arg arg Assign Assign Call Assign Assign Call Call Call"
  },
  {
    "library": "django",
    "name": "envelope",
    "source_code": "@property\ndef envelope(self):\n    return self._topology(capi.geos_envelope(self.ptr))",
    "docstring": "Return the envelope for this geometry (a polygon).",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:envelope arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_width",
    "source_code": "def set_width(self, width):\n    self._width = width\n    self.stale = True",
    "docstring": "Set the width of the ellipse. Parameters ---------- width : float",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:set_width arg:self arg:width arguments arg arg Assign Assign"
  },
  {
    "library": "seaborn",
    "name": "mpl_palette",
    "source_code": "def mpl_palette(name, n_colors=6, as_cmap=False):\n    if name.endswith('_d'):\n        sub_name = name[:-2]\n        if sub_name.endswith('_r'):\n            reverse = True\n            sub_name = sub_name[:-2]\n        else:\n            reverse = False\n        pal = color_palette(sub_name, 2) + ['#333333']\n        if reverse:\n            pal = pal[::-1]\n        cmap = blend_palette(pal, n_colors, as_cmap=True)\n    else:\n        cmap = get_colormap(name)\n    if name in MPL_QUAL_PALS:\n        bins = np.linspace(0, 1, MPL_QUAL_PALS[name])[:n_colors]\n    else:\n        bins = np.linspace(0, 1, int(n_colors) + 2)[1:-1]\n    palette = list(map(tuple, cmap(bins)[:, :3]))\n    if as_cmap:\n        return cmap\n    else:\n        return _ColorPalette(palette)",
    "docstring": "Return a palette or colormap from the matplotlib registry. For continuous palettes, evenly-spaced discrete samples are chosen while excluding the minimum and maximum value in the colormap to provide better contrast at the extremes. For qualitative palettes (e.g. those from colorbrewer), exact values are indexed (rather than interpolated), but fewer than can be returned if the palette does not define that many. Parameters ---------- name : string Name of the palette. This should be a named matplotlib colormap. n_colors : int Number of discrete colors in the palette. Returns ------- list of RGB tuples or :class: Examples -------- .. include:: ../docstrings/mpl_palette.rst",
    "type": "function",
    "file_path": "seaborn\\seaborn\\palettes.py",
    "ast_data": "FunctionDef name:mpl_palette arg:name arg:n_colors arg:as_cmap arguments arg arg arg If Call Assign If Call Assign Assign Assign Assign Call If Assign Assign Call Assign Call If Compare Assign Call Assign Call Call Assign Call Call Call If Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "save_args_for_compile_fx_inner",
    "source_code": "def save_args_for_compile_fx_inner(*args: Any, **kwargs: Any) -> None:\n    folder = '/tmp/inductor_saved_args'\n    if not os.path.exists(folder):\n        os.mkdir(folder)\n\n    def handle_tensor(x: Any) -> Any:\n        if isinstance(x, torch.Tensor):\n            return TensorMetadataHolder(_extract_tensor_metadata(x), x.device)\n        else:\n            return x\n    args_to_save, kwargs_to_save = tree_map(handle_tensor, (args, kwargs))\n    fn_name = 'compile_fx_inner'\n    path = f'{folder}/{fn_name}_{next(save_args_cnt)}.pkl'\n    with open(path, 'wb') as f:\n        pickle.dump((args_to_save, kwargs_to_save), f)\n    if log.isEnabledFor(logging.DEBUG):\n        message = f'\\nArguments for a compile_fx_inner call is saved to {path}. To replay the call,\\nrun the following:\\n\\nfrom torch._inductor.debug import load_args_and_run_compile_fx_inner\\nload_args_and_run_compile_fx_inner({path!r})\\n        '\n        print(message)",
    "docstring": "This function is used to save arguments for a compile_fx_inner function call to the file system. Later on one can replay the compile_fx_inner call with the saved arguments using load_args_and_run_compile_fx_inner.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\debug.py",
    "ast_data": "FunctionDef name:save_args_for_compile_fx_inner arguments arg arg Assign If Call Call FunctionDef name:handle_tensor arg:x arguments arg If Call Return return:yes Call Call Return return:yes Assign Call Assign Assign Call With Call Call If Call Assign Call"
  },
  {
    "library": "django",
    "name": "_create_user",
    "source_code": "def _create_user(self, username, email, password, **extra_fields):\n    user = self._create_user_object(username, email, password, **extra_fields)\n    user.save(using=self._db)\n    return user",
    "docstring": "Create and save a user with the given username, email, and password.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\models.py",
    "ast_data": "FunctionDef name:_create_user arg:self arg:username arg:email arg:password arguments arg arg arg arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "join",
    "source_code": "def join(self, join, reuse=None):\n    reuse_aliases = [a for a, j in self.alias_map.items() if (reuse is None or a in reuse) and j == join]\n    if reuse_aliases:\n        if join.table_alias in reuse_aliases:\n            reuse_alias = join.table_alias\n        else:\n            reuse_alias = reuse_aliases[-1]\n        self.ref_alias(reuse_alias)\n        return reuse_alias\n    alias, _ = self.table_alias(join.table_name, create=True, filtered_relation=join.filtered_relation)\n    if join.join_type:\n        if self.alias_map[join.parent_alias].join_type == LOUTER or join.nullable:\n            join_type = LOUTER\n        else:\n            join_type = INNER\n        join.join_type = join_type\n    join.table_alias = alias\n    self.alias_map[alias] = join\n    if (filtered_relation := join.filtered_relation):\n        resolve_reuse = reuse\n        if resolve_reuse is not None:\n            resolve_reuse = set(reuse) | {alias}\n        joins_len = len(self.alias_map)\n        join.filtered_relation = filtered_relation.resolve_expression(self, reuse=resolve_reuse)\n        if joins_len < len(self.alias_map):\n            self.alias_map[alias] = self.alias_map.pop(alias)\n    return alias",
    "docstring": "Return an alias for the 'join', either reusing an existing alias for that join or creating a new one. 'join' is either a base_table_class or join_class. The 'reuse' parameter can be either None which means all joins are reusable, or it can be a set containing the aliases that can be reused. A join is always created as LOUTER if the lhs alias is LOUTER to make sure chains like t1 LOUTER t2 INNER t3 aren't generated. All new joins are created as LOUTER if the join is nullable.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\query.py",
    "ast_data": "FunctionDef name:join arg:self arg:join arg:reuse arguments arg arg arg Assign Call BoolOp BoolOp Compare Compare Compare If If Compare Assign Assign Call Return return:yes Assign Call If If BoolOp Compare Assign Assign Assign Assign Assign If Assign If Compare Assign Call Assign Call Assign Call If Compare Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "extract_patches_simple",
    "source_code": "def extract_patches_simple(img: Tensor, laf: Tensor, PS: int=32, normalize_lafs_before_extraction: bool=True) -> Tensor:\n    KORNIA_CHECK_LAF(laf)\n    if normalize_lafs_before_extraction:\n        nlaf = normalize_laf(laf, img)\n    else:\n        nlaf = laf\n    _, ch, h, w = img.size()\n    B, N, _, _ = laf.size()\n    out = []\n    for i in range(B):\n        grid = generate_patch_grid_from_normalized_LAF(img[i:i + 1], nlaf[i:i + 1], PS).to(img.device)\n        out.append(F.grid_sample(img[i:i + 1].expand(grid.size(0), ch, h, w), grid, padding_mode='border', align_corners=False))\n    return concatenate(out, dim=0).view(B, N, ch, PS, PS)",
    "docstring": "Extract patches defined by LAFs from image tensor. No smoothing applied, huge aliasing (better use extract_patches_from_pyramid). Args: img: images, LAFs are detected in :math:. laf: :math:. PS: patch size. normalize_lafs_before_extraction: if True, lafs are normalized to image size. Returns: patches with shape :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\laf.py",
    "ast_data": "FunctionDef name:extract_patches_simple arg:img arg:laf arg:PS arg:normalize_lafs_before_extraction arguments arg arg arg arg Call If Assign Call Assign Assign Call Assign Call Assign For Call Assign Call Call Call Call Call Call Return return:yes Call Call"
  },
  {
    "library": "cherrypy",
    "name": "merge",
    "source_code": "def merge(self, config):\n    _cpconfig.merge(self.config, config)\n    self.namespaces(self.config.get('/', {}))",
    "docstring": "Merge the given config into self.config.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cptree.py",
    "ast_data": "FunctionDef name:merge arg:self arg:config arguments arg arg Call Call Call"
  },
  {
    "library": "kornia",
    "name": "w",
    "source_code": "@property\ndef w(self) -> Tensor:\n    return self.data[..., 0]",
    "docstring": "Return the :math: with shape :math:.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\quaternion.py",
    "ast_data": "FunctionDef name:w arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_add_control_dependencies_to_lock",
    "source_code": "def _add_control_dependencies_to_lock(self, created_ops, lock_op):\n    all_args = set([input_.op for op in created_ops for input_ in op.inputs])\n    all_args.update((input_op for op in created_ops for input_op in op.control_inputs))\n    all_args_dict = dict(((op._id, op) for op in all_args))\n    for op in created_ops:\n        all_args_dict.pop(op._id, None)\n    for op in lock_op.control_inputs:\n        all_args_dict.pop(op._id, None)\n    for input_ in lock_op.inputs:\n        all_args_dict.pop(input_.op._id, None)\n    all_args_dict.pop(lock_op._id, None)\n    all_args = all_args_dict.values()\n    if not all_args:\n        return\n    all_args = control_flow_ops.group(*all_args)\n    lock_op._add_control_input(all_args)",
    "docstring": "To avoid deadlocks, all args must be executed before lock_op.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\critical_section_ops.py",
    "ast_data": "FunctionDef name:_add_control_dependencies_to_lock arg:self arg:created_ops arg:lock_op arguments arg arg arg Assign Call Call Assign Call For Call For Call For Call Call Assign Call If Return return:no Assign Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_NanConstraint",
    "source_code": "class _NanConstraint(_Constraint):\n\n    def is_satisfied_by(self, val):\n        return not isinstance(val, Integral) and isinstance(val, Real) and math.isnan(val)\n\n    def __str__(self):\n        return 'numpy.nan'",
    "docstring": "Constraint representing the indicator .",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\utils\\_param_validation.py",
    "ast_data": "ClassDef name:_NanConstraint FunctionDef name:is_satisfied_by arg:self arg:val arguments arg arg Return return:yes BoolOp Call Call Call FunctionDef name:__str__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "masked",
    "source_code": "@contextlib.contextmanager\ndef masked(self, mask):\n    prior = self._load_mask\n    if prior:\n        mask = ops.and_(mask, prior)\n        if isinstance(mask, OpsValue):\n            mask = mask.value\n            assert isinstance(mask, CppCSEVariable)\n            mask.dtype = torch.bool\n    self._load_mask = mask\n    try:\n        yield mask\n    finally:\n        self._load_mask = prior",
    "docstring": "Context manager to add an additional mask to loads and stores.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp.py",
    "ast_data": "FunctionDef name:masked arg:self arg:mask arguments arg arg Assign If Assign Call If Call Assign Call Assign Assign Try Assign"
  },
  {
    "library": "scipy",
    "name": "_report_nonhermitian",
    "source_code": "def _report_nonhermitian(M, name):\n    from scipy.linalg import norm\n    md = M - M.T.conj()\n    nmd = norm(md, 1)\n    tol = 10 * np.finfo(M.dtype).eps\n    tol = max(tol, tol * norm(M, 1))\n    if nmd > tol:\n        warnings.warn(f'Matrix {name} of the type {M.dtype} is not Hermitian: condition: {nmd} < {tol} fails.', UserWarning, stacklevel=4)",
    "docstring": "Report if is not a Hermitian matrix given its type.",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_eigen\\lobpcg\\lobpcg.py",
    "ast_data": "FunctionDef name:_report_nonhermitian arg:M arg:name arguments arg arg Assign Call Assign Call Assign Call Assign Call Call If Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "_make_dataset",
    "source_code": "@function.Defun(capture_by_value=True, allowlisted_stateful_ops=allowlisted_stateful_ops)\ndef _make_dataset():\n    if graph_level_seed is not None:\n        assert op_level_seed is not None\n        core_random_seed.set_random_seed((graph_level_seed + 87654321 * op_level_seed) % (2 ** 63 - 1))\n    dataset = self._apply_debug_options()\n    return dataset._variant_tensor",
    "docstring": "Factory function for a dataset.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:_make_dataset arguments If Compare Compare Call Assign Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "note_reread",
    "source_code": "def note_reread(self) -> None:\n    self.reread_always.add(self.docname)",
    "docstring": "Add the current document to the list of documents that will automatically be re-read at the next build.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\environment\\__init__.py",
    "ast_data": "FunctionDef name:note_reread arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "shape",
    "source_code": "@property\ndef shape(self):\n    return self._variable.get_shape()",
    "docstring": "The of this variable. Returns: A .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py",
    "ast_data": "FunctionDef name:shape arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "pointless_view_pair",
    "source_code": "@register_graph_pattern(CallFunction(aten.view.default, CallFunction(aten.view.default, KeywordArg('arg'), KeywordArg('size1')), KeywordArg('size2')), pass_dict=patterns)\ndef pointless_view_pair(match: Match, arg, size1, size2):\n    node = match.output_node()\n    arg_size = list(arg.meta['val'].shape)\n    if _guard_sizes_oblivious(arg_size, size2):\n        node.replace_all_uses_with(arg)\n        match.erase_nodes()",
    "docstring": "Remove a pair of views that are pointless.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\joint_graph.py",
    "ast_data": "FunctionDef name:pointless_view_pair arg:match arg:arg arg:size1 arg:size2 arguments arg arg arg arg Assign Call Assign Call If Call Call Call Call Call Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "close",
    "source_code": "def close(fig: None | int | str | Figure | Literal['all']=None) -> None:\n    if fig is None:\n        manager = _pylab_helpers.Gcf.get_active()\n        if manager is None:\n            return\n        else:\n            _pylab_helpers.Gcf.destroy(manager)\n    elif fig == 'all':\n        _pylab_helpers.Gcf.destroy_all()\n    elif isinstance(fig, int):\n        _pylab_helpers.Gcf.destroy(fig)\n    elif hasattr(fig, 'int'):\n        _pylab_helpers.Gcf.destroy(fig.int)\n    elif isinstance(fig, str):\n        all_labels = get_figlabels()\n        if fig in all_labels:\n            num = get_fignums()[all_labels.index(fig)]\n            _pylab_helpers.Gcf.destroy(num)\n    elif isinstance(fig, Figure):\n        _pylab_helpers.Gcf.destroy_fig(fig)\n    else:\n        _api.check_isinstance((Figure, int, str, None), fig=fig)",
    "docstring": "Close a figure window, and unregister it from pyplot. Parameters ---------- fig : None or int or str or The figure to close. There are a number of ways to specify this: - *None*: the current figure - : the given instance - `figure()show()savefig()show()close()`.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:close arg:fig arguments arg If Compare Assign Call If Compare Return return:no Call If Compare Call If Call Call If Call Call If Call Assign Call If Compare Assign Call Call Call If Call Call Call"
  },
  {
    "library": "django",
    "name": "get_random_string",
    "source_code": "def get_random_string(length, allowed_chars=RANDOM_STRING_CHARS):\n    return ''.join((secrets.choice(allowed_chars) for i in range(length)))",
    "docstring": "Return a securely generated random string. The bit length of the returned value can be calculated with the formula: log_2(len(allowed_chars)^length) For example, with default (26+26+10), this gives: * length: 12, bit length =~ 71 bits * length: 22, bit length =~ 131 bits",
    "type": "function",
    "file_path": "django\\django\\utils\\crypto.py",
    "ast_data": "FunctionDef name:get_random_string arg:length arg:allowed_chars arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "note_included",
    "source_code": "def note_included(self, filename: str | os.PathLike[str]) -> None:\n    doc = self.path2doc(filename)\n    if doc:\n        self.included.setdefault(self.docname, set()).add(doc)",
    "docstring": "Add *filename* as a included from other document. This means the document is not orphaned. *filename* should be absolute or relative to the source directory.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\environment\\__init__.py",
    "ast_data": "FunctionDef name:note_included arg:self arg:filename arguments arg arg Assign Call If Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "load_with_process_group",
    "source_code": "@contextmanager\ndef load_with_process_group(process_group):\n    global _CURRENT_PROCESS_GROUP\n    if _CURRENT_PROCESS_GROUP is not None:\n        raise RuntimeError('ProcessGroup already set by previous \"load_with_process_group\" context manager')\n    _CURRENT_PROCESS_GROUP = process_group\n    try:\n        yield process_group\n    finally:\n        _CURRENT_PROCESS_GROUP = None",
    "docstring": "Context manager to set the process group with which to load a ShardedTensor.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\api.py",
    "ast_data": "FunctionDef name:load_with_process_group arg:process_group arguments arg If Compare Raise Call Assign Try Assign"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, byweekday=1, interval=1, tz=None):\n    rule = rrulewrapper(DAILY, byweekday=byweekday, interval=interval, **self.hms0d)\n    super().__init__(rule, tz=tz)",
    "docstring": "Parameters ---------- byweekday : int or list of int, default: all days Ticks will be placed on every weekday in *byweekday*. Default is every day. Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA, SU, the constants from :mod:, which have been imported into the :mod: namespace. interval : int, default: 1 The interval between each iteration. For example, if `~datetime.tzinfotimezonedateutil.tz`.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\dates.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:byweekday arg:interval arg:tz arguments arg arg arg arg Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "compiled_with_cxx11_abi",
    "source_code": "def compiled_with_cxx11_abi() -> builtins.bool:\n    return True",
    "docstring": "Returns whether PyTorch was built with _GLIBCXX_USE_CXX11_ABI=1",
    "type": "function",
    "file_path": "pytorch\\torch\\__init__.py",
    "ast_data": "FunctionDef name:compiled_with_cxx11_abi arguments Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "graph",
    "source_code": "@property\ndef graph(self) -> ops.Graph:\n    return self._values.graph",
    "docstring": "The that contains the values, indices, and shape tensors.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\indexed_slices.py",
    "ast_data": "FunctionDef name:graph arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__call__",
    "source_code": "def __call__(self, shape, dtype=None, **kwargs):\n    _validate_kwargs(self.__class__.__name__, kwargs, support_partition=False)\n    dtype = _assert_float_dtype(_get_dtype(dtype))\n    if len(shape) != 2:\n        raise ValueError('Identity matrix initializer can only be used for 2D matrices.')\n    initializer = linalg_ops.eye(*shape, dtype=dtype)\n    return self.gain * initializer",
    "docstring": "Returns a tensor object initialized to a 2D identity matrix. Args: shape: Shape of the tensor. It should have exactly rank 2. dtype: Optional dtype of the tensor. Only floating point types are supported. If not specified, is used, which default to unless you configured it otherwise (via ) **kwargs: Additional keyword arguments.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\initializers\\initializers_v2.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:shape arg:dtype arguments arg arg arg arg Call Assign Call Call If Compare Call Raise Call Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "column_stack",
    "source_code": "@array_function_dispatch(_column_stack_dispatcher)\ndef column_stack(tup):\n    arrays = []\n    for v in tup:\n        arr = asanyarray(v)\n        if arr.ndim < 2:\n            arr = array(arr, copy=None, subok=True, ndmin=2).T\n        arrays.append(arr)\n    return _nx.concatenate(arrays, 1)",
    "docstring": "Stack 1-D arrays as columns into a 2-D array. Take a sequence of 1-D arrays and stack them as columns to make a single 2-D array. 2-D arrays are stacked as-is, just like with . 1-D arrays are turned into 2-D columns first. Parameters ---------- tup : sequence of 1-D or 2-D arrays. Arrays to stack. All of them must have the same first dimension. Returns ------- stacked : 2-D array The array formed by stacking the given arrays. See Also -------- stack, hstack, vstack, concatenate Examples -------- >>> import numpy as np >>> a = np.array((1,2,3)) >>> b = np.array((2,3,4)) >>> np.column_stack((a,b)) array([[1, 2], [2, 3], [3, 4]])",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_shape_base_impl.py",
    "ast_data": "FunctionDef name:column_stack arg:tup arguments arg Assign For Assign Call If Compare Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_do_cell_alignment",
    "source_code": "def _do_cell_alignment(self):\n    widths = {}\n    heights = {}\n    for (row, col), cell in self._cells.items():\n        height = heights.setdefault(row, 0.0)\n        heights[row] = max(height, cell.get_height())\n        width = widths.setdefault(col, 0.0)\n        widths[col] = max(width, cell.get_width())\n    xpos = 0\n    lefts = {}\n    for col in sorted(widths):\n        lefts[col] = xpos\n        xpos += widths[col]\n    ypos = 0\n    bottoms = {}\n    for row in sorted(heights, reverse=True):\n        bottoms[row] = ypos\n        ypos += heights[row]\n    for (row, col), cell in self._cells.items():\n        cell.set_x(lefts[col])\n        cell.set_y(bottoms[row])",
    "docstring": "Calculate row heights and column widths; position cells accordingly.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\table.py",
    "ast_data": "FunctionDef name:_do_cell_alignment arg:self arguments arg Assign Assign For Call Assign Call Assign Call Call Assign Call Assign Call Call Assign Assign For Call Assign Assign Assign For Call Assign For Call Call Call"
  },
  {
    "library": "cherrypy",
    "name": "clean_headers",
    "source_code": "def clean_headers(status):\n    response = cherrypy.serving.response\n    respheaders = response.headers\n    for key in ['Accept-Ranges', 'Age', 'ETag', 'Location', 'Retry-After', 'Vary', 'Content-Encoding', 'Content-Length', 'Expires', 'Content-Location', 'Content-MD5', 'Last-Modified']:\n        if key in respheaders:\n            del respheaders[key]\n    if status != 416:\n        if 'Content-Range' in respheaders:\n            del respheaders['Content-Range']",
    "docstring": "Remove any headers which should not apply to an error response.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\_cperror.py",
    "ast_data": "FunctionDef name:clean_headers arg:status arguments arg Assign Assign For If Compare If Compare If Compare"
  },
  {
    "library": "tensorflow",
    "name": "_CheckNumericsGrad",
    "source_code": "@ops.RegisterGradient('CheckNumerics')\ndef _CheckNumericsGrad(op: ops.Operation, grad):\n    return array_ops.check_numerics(grad, 'Not a number (NaN) or infinity (Inf) values detected in gradient. %s' % op.get_attr('message'))",
    "docstring": "Gradient for check_numerics op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_grad.py",
    "ast_data": "FunctionDef name:_CheckNumericsGrad arg:op arg:grad arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pandas",
    "name": "construct_array_type",
    "source_code": "@classmethod\ndef construct_array_type(cls) -> type[IntegerArray]:\n    return IntegerArray",
    "docstring": "Return the array type associated with this dtype. Returns ------- type",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\integer.py",
    "ast_data": "FunctionDef name:construct_array_type arg:cls arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_check_parser",
    "source_code": "def _check_parser(parser: str) -> None:\n    if parser not in PARSERS:\n        raise KeyError(f\"Invalid parser '{parser}' passed, valid parsers are {PARSERS.keys()}\")",
    "docstring": "Make sure a valid parser is passed. Parameters ---------- parser : str Raises ------ KeyError * If an invalid parser is passed",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\computation\\eval.py",
    "ast_data": "FunctionDef name:_check_parser arg:parser arguments arg If Compare Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "multilabel_margin_loss",
    "source_code": "def multilabel_margin_loss(input: Tensor, target: Tensor, size_average: Optional[bool]=None, reduce: Optional[bool]=None, reduction: str='mean') -> Tensor:\n    if has_torch_function_variadic(input, target):\n        return handle_torch_function(multilabel_margin_loss, (input, target), input, target, size_average=size_average, reduce=reduce, reduction=reduction)\n    if size_average is not None or reduce is not None:\n        reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)\n    else:\n        reduction_enum = _Reduction.get_enum(reduction)\n    return torch._C._nn.multilabel_margin_loss(input, target, reduction_enum)",
    "docstring": "Compute the multilabel margin loss. See :class: for details. Args: input (Tensor): Predicted values. target (Tensor): Ground truth values. size_average (bool, optional): Deprecated (see :attr:). reduce (bool, optional): Deprecated (see :attr:). reduction (str, optional): Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. 'mean': the mean of the output is taken. 'sum': the output will be summed. 'none': no reduction will be applied. Default: 'mean'. Returns: Tensor: Mutilabel margin loss.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:multilabel_margin_loss arg:input arg:target arg:size_average arg:reduce arg:reduction arguments arg arg arg arg arg If Call Return return:yes Call If BoolOp Compare Compare Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_ylabel",
    "source_code": "def set_ylabel(self, ylabel, fontdict=None, labelpad=None, *, loc=None, **kwargs):\n    if labelpad is not None:\n        self.yaxis.labelpad = labelpad\n    protected_kw = ['y', 'horizontalalignment', 'ha']\n    if {*kwargs} & {*protected_kw}:\n        if loc is not None:\n            raise TypeError(f\"Specifying 'loc' is disallowed when any of its corresponding low level keyword arguments ({protected_kw}) are also supplied\")\n    else:\n        loc = mpl._val_or_rc(loc, 'yaxis.labellocation')\n        _api.check_in_list(('bottom', 'center', 'top'), loc=loc)\n        y, ha = {'bottom': (0, 'left'), 'center': (0.5, 'center'), 'top': (1, 'right')}[loc]\n        kwargs.update(y=y, horizontalalignment=ha)\n    return self.yaxis.set_label_text(ylabel, fontdict, **kwargs)",
    "docstring": "Set the label for the y-axis. Parameters ---------- ylabel : str The label text. labelpad : float, default: :rc: Spacing in points from the Axes bounding box including ticks and tick labels. If None, the previous value is left as is. loc : {'bottom', 'center', 'top'}, default: :rc: The label position. This is a high-level alternative for passing parameters *y* and *horizontalalignment*. Other Parameters ---------------- **kwargs : properties properties control the appearance of the label. See Also -------- text : Documents the properties supported by .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:set_ylabel arg:self arg:ylabel arg:fontdict arg:labelpad arguments arg arg arg arg arg arg If Compare Assign Assign If If Compare Raise Call Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "lp_pool3d",
    "source_code": "def lp_pool3d(input: Tensor, norm_type: Union[int, float], kernel_size: BroadcastingList3[int], stride: Optional[BroadcastingList3[int]]=None, ceil_mode: bool=False) -> Tensor:\n    if has_torch_function_unary(input):\n        return handle_torch_function(lp_pool3d, (input,), input, norm_type, kernel_size, stride=stride, ceil_mode=ceil_mode)\n    kd, kw, kh = _triple(kernel_size)\n    if stride is not None:\n        out = avg_pool3d(input.pow(norm_type), kernel_size, stride, 0, ceil_mode)\n    else:\n        out = avg_pool3d(input.pow(norm_type), kernel_size, padding=0, ceil_mode=ceil_mode)\n    return (torch.sign(out) * relu(torch.abs(out))).mul(kd * kw * kh).pow(1.0 / norm_type)",
    "docstring": "Apply a 3D power-average pooling over an input signal composed of several input planes. If the sum of all inputs to the power of is zero, the gradient is set to zero as well. See :class: for details.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:lp_pool3d arg:input arg:norm_type arg:kernel_size arg:stride arg:ceil_mode arguments arg arg arg arg arg If Call Return return:yes Call Assign Call If Compare Assign Call Call Assign Call Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "django",
    "name": "add_dependency",
    "source_code": "def add_dependency(self, migration, child, parent, skip_validation=False):\n    if child not in self.nodes:\n        error_message = 'Migration %s dependencies reference nonexistent child node %r' % (migration, child)\n        self.add_dummy_node(child, migration, error_message)\n    if parent not in self.nodes:\n        error_message = 'Migration %s dependencies reference nonexistent parent node %r' % (migration, parent)\n        self.add_dummy_node(parent, migration, error_message)\n    self.node_map[child].add_parent(self.node_map[parent])\n    self.node_map[parent].add_child(self.node_map[child])\n    if not skip_validation:\n        self.validate_consistency()",
    "docstring": "This may create dummy nodes if they don't yet exist. If , validate_consistency() should be called afterward.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\graph.py",
    "ast_data": "FunctionDef name:add_dependency arg:self arg:migration arg:child arg:parent arg:skip_validation arguments arg arg arg arg arg If Compare Assign Call If Compare Assign Call Call Call If Call"
  },
  {
    "library": "pandas",
    "name": "construct_array_type",
    "source_code": "@classmethod\ndef construct_array_type(cls) -> type_t[BaseMaskedArray]:\n    raise NotImplementedError",
    "docstring": "Return the array type associated with this dtype. Returns ------- type",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py",
    "ast_data": "FunctionDef name:construct_array_type arg:cls arguments arg Raise"
  },
  {
    "library": "scipy",
    "name": "_lnB",
    "source_code": "def _lnB(alpha):\n    return np.sum(gammaln(alpha)) - gammaln(np.sum(alpha))",
    "docstring": "Internal helper function to compute the log of the useful quotient. .. math:: B(\\alpha) = \\frac{\\prod_{i=1}{K}\\Gamma(\\alpha_i)} {\\Gamma\\left(\\sum_{i=1}^{K} \\alpha_i \\right)} Parameters ---------- %(_dirichlet_doc_default_callparams)s Returns ------- B : scalar Helper quotient, internal use only",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:_lnB arg:alpha arguments arg Return return:yes Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_read_array",
    "source_code": "def _read_array(f, typecode, array_desc):\n    if typecode in [1, 3, 4, 5, 6, 9, 13, 14, 15]:\n        if typecode == 1:\n            nbytes = _read_int32(f)\n            if nbytes != array_desc['nbytes']:\n                warnings.warn('Not able to verify number of bytes from header', stacklevel=3)\n        array = np.frombuffer(f.read(array_desc['nbytes']), dtype=DTYPE_DICT[typecode])\n    elif typecode in [2, 12]:\n        array = np.frombuffer(f.read(array_desc['nbytes'] * 2), dtype=DTYPE_DICT[typecode])[1::2]\n    else:\n        array = []\n        for i in range(array_desc['nelements']):\n            dtype = typecode\n            data = _read_data(f, dtype)\n            array.append(data)\n        array = np.array(array, dtype=np.object_)\n    if array_desc['ndims'] > 1:\n        dims = array_desc['dims'][:int(array_desc['ndims'])]\n        dims.reverse()\n        array = array.reshape(dims)\n    _align_32(f)\n    return array",
    "docstring": "Read an array of type , with the array descriptor given as .",
    "type": "function",
    "file_path": "scipy\\scipy\\io\\_idl.py",
    "ast_data": "FunctionDef name:_read_array arg:f arg:typecode arg:array_desc arguments arg arg arg If Compare If Compare Assign Call If Compare Call Assign Call Call If Compare Assign Call Call Assign For Call Assign Assign Call Call Assign Call If Compare Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "nlevels",
    "source_code": "@property\ndef nlevels(self) -> int:\n    return len(self._levels)",
    "docstring": "Integer number of levels in this MultiIndex. See Also -------- MultiIndex.levels : Get the levels of the MultiIndex. MultiIndex.codes : Get the codes of the MultiIndex. MultiIndex.from_arrays : Convert arrays to MultiIndex. MultiIndex.from_tuples : Convert list of tuples to MultiIndex. Examples -------- >>> mi = pd.MultiIndex.from_arrays([[\"a\"], [\"b\"], [\"c\"]]) >>> mi MultiIndex([('a', 'b', 'c')], ) >>> mi.nlevels 3",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "FunctionDef name:nlevels arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "clear_on_fresh_inductor_cache",
    "source_code": "def clear_on_fresh_inductor_cache(obj: Any) -> Any:\n    if not hasattr(obj, 'cache_clear') or not callable(obj.cache_clear):\n        raise AttributeError(f'{obj} does not have a cache_clear method')\n    _registered_caches.append(obj)\n    return obj",
    "docstring": "Use this decorator to register any caches that should be cache_clear'd with fresh_inductor_cache().",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\utils.py",
    "ast_data": "FunctionDef name:clear_on_fresh_inductor_cache arg:obj arguments arg If BoolOp Call Call Raise Call Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "get_index_text",
    "source_code": "def get_index_text(self, modname: str, name: tuple[str, str]) -> str:\n    msg = 'must be implemented in subclasses'\n    raise NotImplementedError(msg)",
    "docstring": "Return the text for the index entry of the object.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\domains\\python\\_object.py",
    "ast_data": "FunctionDef name:get_index_text arg:self arg:modname arg:name arguments arg arg arg Assign Raise Call"
  },
  {
    "library": "scipy",
    "name": "FreudensteinRoth",
    "source_code": "class FreudensteinRoth(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n        self.custom_bounds = [(-3, 3), (-5, 5)]\n        self.global_optimum = [[5.0, 4.0]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        f1 = (-13.0 + x[0] + ((5.0 - x[1]) * x[1] - 2.0) * x[1]) ** 2\n        f2 = (-29.0 + x[0] + ((x[1] + 1.0) * x[1] - 14.0) * x[1]) ** 2\n        return f1 + f2",
    "docstring": "FreudensteinRoth objective function. This class defines the Freudenstein & Roth [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{FreudensteinRoth}}(x) = \\left\\{x_1 - 13 + \\left[(5 - x_2) x_2 - 2 \\right] x_2 \\right\\}^2 + \\left \\{x_1 - 29 + \\left[(x_2 + 1) x_2 - 14 \\right] x_2 \\right\\}^2 with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_F.py",
    "ast_data": "ClassDef name:FreudensteinRoth FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_asfptype",
    "source_code": "def _asfptype(self):\n    fp_types = ['f', 'd', 'F', 'D']\n    if self.dtype.char in fp_types:\n        return self\n    else:\n        for fp_type in fp_types:\n            if self.dtype <= np.dtype(fp_type):\n                return self.astype(fp_type)\n        raise TypeError(f'cannot upcast [{self.dtype.name}] to a floating point format')",
    "docstring": "Upcast array to a floating point format (if necessary)",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_base.py",
    "ast_data": "FunctionDef name:_asfptype arg:self arguments arg Assign If Compare Return return:yes For If Compare Call Return return:yes Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "create_single_node_partition",
    "source_code": "def create_single_node_partition(self, node):\n    partition = self.create_partition()\n    partition.add_node(node)\n    return",
    "docstring": "Create a partition for a single node",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\accelerator_partitioner.py",
    "ast_data": "FunctionDef name:create_single_node_partition arg:self arg:node arguments arg arg Assign Call Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "__iter__",
    "source_code": "def __iter__(self):\n    if self._dims is None:\n        raise ValueError('Cannot iterate over a shape with unknown rank.')\n    elif self._v2_behavior:\n        return iter((d for d in self._dims))\n    else:\n        return iter((d for d in self.dims))",
    "docstring": "Returns if the rank is known, otherwise raises ValueError.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:__iter__ arg:self arguments arg If Compare Raise Call If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "has_kwargs",
    "source_code": "def has_kwargs(fn):\n    if isinstance(fn, functools.partial):\n        fn = fn.func\n    elif _is_callable_object(fn):\n        fn = fn.__call__\n    elif not callable(fn):\n        raise TypeError(f'Argument `fn` should be a callable. Received: fn={fn} (of type {type(fn)})')\n    return tf_inspect.getfullargspec(fn).varkw is not None",
    "docstring": "Returns whether the passed callable has **kwargs in its signature. Args: fn: Function, or function-like object (e.g., result of ). Returns: : if has **kwargs in its signature. Raises: : If fn is not a Function, or function-like object.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\function_utils.py",
    "ast_data": "FunctionDef name:has_kwargs arg:fn arguments arg If Call Assign If Call Assign If Call Raise Call Call Return return:yes Compare Call"
  },
  {
    "library": "pytorch",
    "name": "draw_buffers",
    "source_code": "def draw_buffers(nodes: list[BaseSchedulerNode], print_graph: bool=False, fname: Optional[str]=None) -> None:\n    if not has_dot():\n        log.warning('draw_buffers() requires `graphviz` package')\n        return\n    if fname is None:\n        fname = get_graph_being_compiled()\n    graph = create_fx_from_snodes(nodes)\n    for node in graph.nodes:\n        if 'fusion_meta' not in node.meta:\n            continue\n        group = node.meta['fusion_meta'].group\n        if isinstance(group, tuple):\n            if isinstance(group[1], int):\n                group = (group[1],)\n            else:\n                group = group[1]\n        dtype = None\n        if isinstance(node, ir.ComputedBuffer):\n            dtype = node.data.dtype\n        metadata = TensorMetadata(group, dtype, None, None, None, None, None)\n        node.meta['tensor_meta'] = metadata\n    if print_graph:\n        print(graph)\n    gm = GraphModule({}, graph)\n    legalize_graph(gm)\n    gm.graph.lint()\n    draw_graph(gm, fname, clear_meta=False, dot_graph_shape=config.trace.dot_graph_shape)",
    "docstring": "Draw a graph in fname.svg.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\debug.py",
    "ast_data": "FunctionDef name:draw_buffers arg:nodes arg:print_graph arg:fname arguments arg arg arg If Call Call Return return:no If Compare Assign Call Assign Call For If Compare Assign If Call If Call Assign Assign Assign If Call Assign Assign Call Assign If Call Assign Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_distribute_strategy",
    "source_code": "@property\ndef _distribute_strategy(self):\n    return None",
    "docstring": "The that this variable was created under.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py",
    "ast_data": "FunctionDef name:_distribute_strategy arg:self arguments arg Return return:no"
  },
  {
    "library": "pytorch",
    "name": "_set_config",
    "source_code": "def _set_config(self, field_name: str, value: Any) -> None:\n    setattr(self.config_module, field_name, value)",
    "docstring": "Set a config value in the module.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\fuzzer.py",
    "ast_data": "FunctionDef name:_set_config arg:self arg:field_name arg:value arguments arg arg arg Call"
  },
  {
    "library": "scikit-learn",
    "name": "_generate_sample_indices",
    "source_code": "def _generate_sample_indices(random_state, n_samples, n_samples_bootstrap):\n    random_instance = check_random_state(random_state)\n    sample_indices = random_instance.randint(0, n_samples, n_samples_bootstrap, dtype=np.int32)\n    return sample_indices",
    "docstring": "Private function used to _parallel_build_trees function.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_forest.py",
    "ast_data": "FunctionDef name:_generate_sample_indices arg:random_state arg:n_samples arg:n_samples_bootstrap arguments arg arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_optimizer_experimental_options",
    "source_code": "def get_optimizer_experimental_options(self):\n    rewrite_options = self.config.graph_options.rewrite_options\n    options = {}\n\n    def rewriter_toggle(option):\n        attr = getattr(rewrite_options, option)\n        if attr != 0:\n            options[option] = attr == rewriter_config_pb2.RewriterConfig.ON\n\n    def rewriter_bool(option):\n        options[option] = getattr(rewrite_options, option)\n    rewriter_toggle('layout_optimizer')\n    rewriter_toggle('constant_folding')\n    rewriter_toggle('shape_optimization')\n    rewriter_toggle('remapping')\n    rewriter_toggle('arithmetic_optimization')\n    rewriter_toggle('dependency_optimization')\n    rewriter_toggle('loop_optimization')\n    rewriter_toggle('function_optimization')\n    rewriter_toggle('debug_stripper')\n    rewriter_bool('disable_model_pruning')\n    rewriter_toggle('scoped_allocator_optimization')\n    rewriter_toggle('pin_to_host_optimization')\n    rewriter_toggle('implementation_selector')\n    rewriter_toggle('auto_mixed_precision')\n    rewriter_toggle('use_plugin_optimizers')\n    rewriter_bool('disable_meta_optimizer')\n    rewriter_toggle('auto_mixed_precision_onednn_bfloat16')\n    rewriter_toggle('auto_mixed_precision_mkl')\n    if rewrite_options.min_graph_nodes != 0:\n        options['min_graph_nodes'] = rewrite_options.min_graph_nodes\n    return options",
    "docstring": "Get experimental options for the optimizer. Returns: Dictionary of current option values",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:get_optimizer_experimental_options arg:self arguments arg Assign Assign FunctionDef name:rewriter_toggle arg:option arguments arg Assign Call If Compare Assign Compare FunctionDef name:rewriter_bool arg:option arguments arg Assign Call Call Call Call Call Call Call Call Call Call Call Call Call Call Call Call Call Call Call If Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_is_private",
    "source_code": "def _is_private(self, path, name, obj=None):\n    del obj\n    return path in self._private_map and name in self._private_map[path] or (name.startswith('_') and (not re.match('__.*__$', name)) or name in ['__base__', '__class__', '__next_in_mro__'])",
    "docstring": "Return whether a name is private.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\tools\\common\\public_api.py",
    "ast_data": "FunctionDef name:_is_private arg:self arg:path arg:name arg:obj arguments arg arg arg arg Return return:yes BoolOp BoolOp Compare Compare BoolOp BoolOp Call Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "monitoring_helper",
    "source_code": "def monitoring_helper(service_addr, duration_ms, monitoring_level, num_queries):\n    if monitoring_level <= 0 or monitoring_level > 2:\n        sys.exit('Please choose a monitoring level between 1 and 2.')\n    for query in range(0, num_queries):\n        res = profiler_client.monitor(service_addr, duration_ms, monitoring_level)\n        print('Cloud TPU Monitoring Results (Sample ', query, '):\\n\\n', res)",
    "docstring": "Helper function to print monitoring results. Helper function to print monitoring results for num_queries times. Args: service_addr: Address of the TPU profiler service. duration_ms: Duration of one monitoring sample in milliseconds. monitoring_level: An integer between 1 and 2. Level 2 is more verbose than level 1 and shows more metrics. num_queries: Number of monitoring samples to collect.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\profiler\\capture_tpu_profile.py",
    "ast_data": "FunctionDef name:monitoring_helper arg:service_addr arg:duration_ms arg:monitoring_level arg:num_queries arguments arg arg arg arg If BoolOp Compare Compare Call For Call Assign Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "plot_tree",
    "source_code": "@validate_params({'decision_tree': [DecisionTreeClassifier, DecisionTreeRegressor], 'max_depth': [Interval(Integral, 0, None, closed='left'), None], 'feature_names': ['array-like', None], 'class_names': ['array-like', 'boolean', None], 'label': [StrOptions({'all', 'root', 'none'})], 'filled': ['boolean'], 'impurity': ['boolean'], 'node_ids': ['boolean'], 'proportion': ['boolean'], 'rounded': ['boolean'], 'precision': [Interval(Integral, 0, None, closed='left'), None], 'ax': 'no_validation', 'fontsize': [Interval(Integral, 0, None, closed='left'), None]}, prefer_skip_nested_validation=True)\ndef plot_tree(decision_tree, *, max_depth=None, feature_names=None, class_names=None, label='all', filled=False, impurity=True, node_ids=False, proportion=False, rounded=False, precision=3, ax=None, fontsize=None):\n    check_is_fitted(decision_tree)\n    exporter = _MPLTreeExporter(max_depth=max_depth, feature_names=feature_names, class_names=class_names, label=label, filled=filled, impurity=impurity, node_ids=node_ids, proportion=proportion, rounded=rounded, precision=precision, fontsize=fontsize)\n    return exporter.export(decision_tree, ax=ax)",
    "docstring": "Plot a decision tree. The sample counts that are shown are weighted with any sample_weights that might be present. The visualization is fit automatically to the size of the axis. Use the `User Guide `, draw node boxes with rounded corners and use Helvetica fonts instead of Times-Roman. precision : int, default=3 Number of digits of precision for floating point in the values of impurity, threshold and value attributes of each node. ax : matplotlib axis, default=None Axes to plot to. If None, use current axis. Any previous content is cleared. fontsize : int, default=None Size of text font. If None, determined automatically to fit figure. Returns ------- annotations : list of artists List containing the artists for the annotation boxes making up the tree. Examples -------- >>> from sklearn.datasets import load_iris >>> from sklearn import tree >>> clf = tree.DecisionTreeClassifier(random_state=0) >>> iris = load_iris() >>> clf = clf.fit(iris.data, iris.target) >>> tree.plot_tree(clf) [...]",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\tree\\_export.py",
    "ast_data": "FunctionDef name:plot_tree arg:decision_tree arguments arg arg arg arg arg arg arg arg arg arg arg arg arg Call Assign Call Return return:yes Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "SavedModelSaver",
    "source_code": "class SavedModelSaver(object, metaclass=abc.ABCMeta):\n\n    def __init__(self, obj):\n        self.obj = obj\n\n    @abc.abstractproperty\n    def object_identifier(self):\n        raise NotImplementedError\n\n    @property\n    def tracking_metadata(self):\n        return json_utils.Encoder().encode(self.python_properties)\n\n    def trackable_children(self, serialization_cache):\n        if not utils.should_save_traces():\n            return {}\n        children = self.objects_to_serialize(serialization_cache)\n        children.update(self.functions_to_serialize(serialization_cache))\n        return children\n\n    @abc.abstractproperty\n    def python_properties(self):\n        raise NotImplementedError\n\n    @abc.abstractmethod\n    def objects_to_serialize(self, serialization_cache):\n        raise NotImplementedError\n\n    @abc.abstractmethod\n    def functions_to_serialize(self, serialization_cache):\n        raise NotImplementedError",
    "docstring": "Saver defining the methods and properties used to serialize Keras objects.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\base_serialization.py",
    "ast_data": "ClassDef name:SavedModelSaver FunctionDef name:__init__ arg:self arg:obj arguments arg arg Assign FunctionDef name:object_identifier arg:self arguments arg Raise FunctionDef name:tracking_metadata arg:self arguments arg Return return:yes Call Call FunctionDef name:trackable_children arg:self arg:serialization_cache arguments arg arg If Call Return return:no Assign Call Call Call Return return:yes FunctionDef name:python_properties arg:self arguments arg Raise FunctionDef name:objects_to_serialize arg:self arg:serialization_cache arguments arg arg Raise FunctionDef name:functions_to_serialize arg:self arg:serialization_cache arguments arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "_as_variant_tensor",
    "source_code": "@abc.abstractmethod\ndef _as_variant_tensor(self):\n    raise NotImplementedError(f'{type(self)}.as_variant_tensor()')",
    "docstring": "Creates a scalar of representing this dataset. Returns: A scalar of type, which represents this dataset.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:_as_variant_tensor arg:self arguments arg Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "async_save",
    "source_code": "def async_save(self, state_dict: STATE_DICT_TYPE) -> Future:\n    return saver.async_save(state_dict, storage_writer=self.storage_writer, process_group=self.process_group, planner=self.save_planner)",
    "docstring": "Calls :py:meth: . Utilizing values passed during initialization. Returns: Future: A future holding the resultant Metadata object from .",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\_checkpointer.py",
    "ast_data": "FunctionDef name:async_save arg:self arg:state_dict arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_amp_update_scale_cpu_",
    "source_code": "def _amp_update_scale_cpu_(self, found_inf: torch.Tensor) -> None:\n    assert self._scale is not None and self._growth_tracker is not None\n    if found_inf.item() >= 1.0:\n        self._scale *= self._backoff_factor\n        self._growth_tracker.fill_(0)\n    else:\n        successful = self._growth_tracker + 1\n        if successful == self._growth_interval:\n            self._scale *= self._growth_factor\n            self._growth_tracker.fill_(0)\n        else:\n            self._growth_tracker = successful",
    "docstring": "If found_inf is 1.0 (True), then scale is multiplied by backoff_factor and growth_tracker is set to zero. Otherwise, scale is multiplied by the growth factor when the growth interval is reached.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\sharded_grad_scaler.py",
    "ast_data": "FunctionDef name:_amp_update_scale_cpu_ arg:self arg:found_inf arguments arg arg BoolOp Compare Compare If Compare Call Call Assign If Compare Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_hbm_oom_event",
    "source_code": "def _hbm_oom_event(self, symptoms):\n    if not symptoms:\n        return False\n    for symptom in reversed(symptoms):\n        if symptom['symptomType'] != 'HBM_OUT_OF_MEMORY':\n            continue\n        oom_datetime_str = symptom['createTime'].split('.')[0]\n        oom_datetime = datetime.datetime.strptime(oom_datetime_str, '%Y-%m-%dT%H:%M:%S')\n        time_diff = _utcnow() - oom_datetime\n        if time_diff < datetime.timedelta(seconds=_OOM_EVENT_COOL_TIME_SEC):\n            logging.warning(self._symptom_msg('a recent HBM OOM has occurred ~{} seconds ago. The model script will terminate automatically. To prevent future HBM OOM events, please consider reducing the model size. To disable this behavior, set flag --hbm_oom_exit=false when starting the script.'.format(time_diff.seconds)))\n            return True\n    return False",
    "docstring": "Check if a HBM OOM event is reported.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\client\\client.py",
    "ast_data": "FunctionDef name:_hbm_oom_event arg:self arg:symptoms arguments arg arg If Return return:yes For Call If Compare Assign Call Assign Call Assign Call If Compare Call Call Call Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_rank",
    "source_code": "def get_rank(self) -> int:\n    return get_rank()",
    "docstring": "Returns the current global rank.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\device_mesh.py",
    "ast_data": "FunctionDef name:get_rank arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, module: torch.fx.GraphModule):\n    self.module = module\n    self.fake_mode = self._detect_fake_mode()",
    "docstring": "Initialize the transform. Args: module: The module to be transformed.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\_pass.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:module arguments arg arg Assign Assign Call"
  },
  {
    "library": "scrapy",
    "name": "FeedStorageProtocol",
    "source_code": "class FeedStorageProtocol(Protocol):\n\n    def __init__(self, uri: str, *, feed_options: dict[str, Any] | None=None):\n        pass\n\n    def open(self, spider: Spider) -> IO[bytes]:\n        pass\n\n    def store(self, file: IO[bytes]) -> Deferred[None] | None:\n        pass",
    "docstring": "Reimplementation of `` that can be used in type hints.",
    "type": "class",
    "file_path": "scrapy\\scrapy\\extensions\\feedexport.py",
    "ast_data": "ClassDef name:FeedStorageProtocol FunctionDef name:__init__ arg:self arg:uri arguments arg arg arg FunctionDef name:open arg:self arg:spider arguments arg arg FunctionDef name:store arg:self arg:file arguments arg arg"
  },
  {
    "library": "django",
    "name": "_check_field_spec",
    "source_code": "def _check_field_spec(self, obj, fields, label):\n    if isinstance(fields, tuple):\n        return list(chain.from_iterable((self._check_field_spec_item(obj, field_name, '%s[%d]' % (label, index)) for index, field_name in enumerate(fields))))\n    else:\n        return self._check_field_spec_item(obj, fields, label)",
    "docstring": "should be an item of or an item of fieldset[1]['fields'] for any in . It should be a field name or a tuple of field names.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\checks.py",
    "ast_data": "FunctionDef name:_check_field_spec arg:self arg:obj arg:fields arg:label arguments arg arg arg arg If Call Return return:yes Call Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_merge_with",
    "source_code": "def _merge_with(self, other):\n    nrows = self._nrows.merge_with(other.nrows)\n    nvals = self._nvals.merge_with(other.nvals)\n    ncols = self._uniform_row_length.merge_with(other.uniform_row_length)\n    if not RowPartitionSpec._dimensions_compatible(nrows, nvals, ncols):\n        raise ValueError('Merging incompatible RowPartitionSpecs')\n    if self.dtype != other.dtype:\n        raise ValueError('Merging RowPartitionSpecs with incompatible dtypes')\n    return RowPartitionSpec(nrows=nrows[0], nvals=nvals[0], uniform_row_length=ncols[0], dtype=self.dtype)",
    "docstring": "Merge two RowPartitionSpecs.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py",
    "ast_data": "FunctionDef name:_merge_with arg:self arg:other arguments arg arg Assign Call Assign Call Assign Call If Call Raise Call If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "as_text",
    "source_code": "def as_text(self, attrs=None, **kwargs):\n    return self.as_widget(TextInput(), attrs, **kwargs)",
    "docstring": "Return a string of HTML for representing this as an .",
    "type": "method",
    "file_path": "django\\django\\forms\\boundfield.py",
    "ast_data": "FunctionDef name:as_text arg:self arg:attrs arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_read_string",
    "source_code": "def _read_string(f):\n    length = _read_long(f)\n    if length > 0:\n        chars = _read_bytes(f, length).decode('latin1')\n        _align_32(f)\n    else:\n        chars = ''\n    return chars",
    "docstring": "Read a string",
    "type": "function",
    "file_path": "scipy\\scipy\\io\\_idl.py",
    "ast_data": "FunctionDef name:_read_string arg:f arguments arg Assign Call If Compare Assign Call Call Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "std",
    "source_code": "def std(self, axis=None, dtype=None, out=None, ddof: int=1, keepdims: bool=False, skipna: bool=True) -> Timedelta:\n    from pandas.core.arrays import TimedeltaArray\n    dtype_str = self._ndarray.dtype.name.replace('datetime64', 'timedelta64')\n    dtype = np.dtype(dtype_str)\n    tda = TimedeltaArray._simple_new(self._ndarray.view(dtype), dtype=dtype)\n    return tda.std(axis=axis, out=out, ddof=ddof, keepdims=keepdims, skipna=skipna)",
    "docstring": "Return sample standard deviation over requested axis. Normalized by by default. This can be changed using `pandas.SeriesN - ddofNpandas.DatetimeIndex`: >>> idx = pd.date_range(\"2001-01-01 00:00\", periods=3) >>> idx DatetimeIndex(['2001-01-01', '2001-01-02', '2001-01-03'], dtype='datetime64[ns]', freq='D') >>> idx.std() Timedelta('1 days 00:00:00')",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimes.py",
    "ast_data": "FunctionDef name:std arg:self arg:axis arg:dtype arg:out arg:ddof arg:keepdims arg:skipna arguments arg arg arg arg arg arg arg Assign Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "done",
    "source_code": "def done(self):\n    with self._queue_lock:\n        self._raise_if_error()\n        return self._queue.empty() and self._inflight_closure_count == 0",
    "docstring": "Returns true if the queue is empty and there is no inflight closure. If was called before , the error from the first invocation of will be raised.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py",
    "ast_data": "FunctionDef name:done arg:self arguments arg With Call Return return:yes BoolOp Call Compare"
  },
  {
    "library": "pandas",
    "name": "nunique",
    "source_code": "@final\ndef nunique(self):\n    return self._downsample('nunique')",
    "docstring": "Return number of unique elements in the group. Returns ------- Series Number of unique values within each group. See Also -------- core.groupby.SeriesGroupBy.nunique : Method nunique for SeriesGroupBy. Examples -------- >>> ser = pd.Series( ... [1, 2, 3, 3], ... index=pd.DatetimeIndex( ... [\"2023-01-01\", \"2023-01-15\", \"2023-02-01\", \"2023-02-15\"] ... ), ... ) >>> ser 2023-01-01 1 2023-01-15 2 2023-02-01 3 2023-02-15 3 dtype: int64 >>> ser.resample(\"MS\").nunique() 2023-01-01 2 2023-02-01 1 Freq: MS, dtype: int64",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\resample.py",
    "ast_data": "FunctionDef name:nunique arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=False)\ndef fit(self, X, Y, **fit_params):\n    _raise_for_params(fit_params, self, 'fit')\n    super().fit(X, Y, **fit_params)\n    self.classes_ = [estimator.classes_ for estimator in self.estimators_]\n    return self",
    "docstring": "Fit the model to data matrix X and targets Y. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input data. Y : array-like of shape (n_samples, n_classes) The target values. **fit_params : dict of string -> object Parameters passed to the method of each step. Only available if . See the :ref:. .. versionadded:: 1.3 Returns ------- self : object Class instance.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\multioutput.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:Y arguments arg arg arg arg Call Call Call Assign Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "std",
    "source_code": "@deprecate_nonkeyword_arguments(version='4.0', allowed_args=['self'], name='std')\ndef std(self, axis: Axis | None=0, skipna: bool=True, ddof: int=1, numeric_only: bool=False, **kwargs) -> Series | Any:\n    result = super().std(axis=axis, skipna=skipna, ddof=ddof, numeric_only=numeric_only, **kwargs)\n    if isinstance(result, Series):\n        result = result.__finalize__(self, method='std')\n    return result",
    "docstring": "Return sample standard deviation over requested axis. Normalized by N-1 by default. This can be changed using the ddof argument. Parameters ---------- axis : {index (0), columns (1)} For this parameter is unused and defaults to 0. .. warning:: The behavior of DataFrame.std with `numpy.stdddof=0ddof=1ddof=0` can be set to normalize by N instead of N-1: >>> df.std(ddof=0) age 16.269219 height 0.205609 dtype: float64",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:std arg:self arg:axis arg:skipna arg:ddof arg:numeric_only arguments arg arg arg arg arg arg Assign Call Call If Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "create_read_items_for_chunk_list",
    "source_code": "def create_read_items_for_chunk_list(fqn: str, checkpoint_md: TensorStorageMetadata, local_chunks: list[ChunkStorageMetadata]) -> list[ReadItem]:\n    read_items = []\n    for idx, shard in enumerate(local_chunks):\n        for storage_idx, storage_md in enumerate(checkpoint_md.chunks):\n            if not _check_shard_metadata_pair_overlap(shard, storage_md):\n                continue\n            storage_offsets = []\n            dest_offsets = []\n            lengths = []\n            for _dim, offset_for_saved_tensor, offset_for_current_tensor, length in _shards_get_overlap_region_wrt_saved_tensor(saved_shard=storage_md, current_shard=shard):\n                storage_offsets.append(offset_for_saved_tensor)\n                dest_offsets.append(offset_for_current_tensor)\n                lengths.append(length)\n            read_items.append(_create_read_item_for_tensor(dest_index=MetadataIndex(fqn, shard.offsets, idx), dest_offsets=dest_offsets, storage_index=MetadataIndex(fqn, storage_md.offsets, storage_idx), storage_offsets=storage_offsets, lengths=lengths))\n    return read_items",
    "docstring": "Create a list of `` that will satisfy all input chunks.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\planner_helpers.py",
    "ast_data": "FunctionDef name:create_read_items_for_chunk_list arg:fqn arg:checkpoint_md arg:local_chunks arguments arg arg arg Assign For Call For Call If Call Assign Assign Assign For Call Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "get_instances",
    "source_code": "def get_instances(cls):\n    return [x for x in gc.get_objects() if isinstance(x, cls)]",
    "docstring": "Return GC instances.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\gctools.py",
    "ast_data": "FunctionDef name:get_instances arg:cls arguments arg Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "PkgNotFound",
    "source_code": "class PkgNotFound(OSError):\n\n    def __init__(self, msg):\n        self.msg = msg\n\n    def __str__(self):\n        return self.msg",
    "docstring": "Exception raised when a package can not be located.",
    "type": "class",
    "file_path": "numpy\\numpy\\distutils\\npy_pkg_config.py",
    "ast_data": "ClassDef name:PkgNotFound FunctionDef name:__init__ arg:self arg:msg arguments arg arg Assign FunctionDef name:__str__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_colocate_with",
    "source_code": "@contextlib.contextmanager\ndef _maybe_colocate_with(self, value):\n    if not self._colocate_with_first_write_call:\n        yield\n    else:\n        if not self._colocate_with:\n            self._colocate_with.append(value)\n        with ops.colocate_with(self._colocate_with[0]):\n            yield",
    "docstring": "Colocate operations with an internal colocation group or . Args: value: , the tensor to try to colocate with. Yields: Does not yield anything, but the new context is a colocation context. If no internal colocation group is set, colocate with and set the internal colocation group to be value.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:_maybe_colocate_with arg:self arg:value arguments arg arg If If Call With Call"
  },
  {
    "library": "django",
    "name": "validate_autopk_value",
    "source_code": "def validate_autopk_value(self, value):\n    return value",
    "docstring": "Certain backends do not accept some values for \"serial\" fields (for example zero in MySQL). Raise a ValueError if the value is invalid, otherwise return the validated value.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:validate_autopk_value arg:self arg:value arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__call__",
    "source_code": "def __call__(self, shape, dtype=None, **kwargs):\n    _validate_kwargs(self.__class__.__name__, kwargs)\n    dtype = _assert_float_dtype(_get_dtype(dtype))\n    if _PARTITION_SHAPE in kwargs:\n        shape = kwargs[_PARTITION_SHAPE]\n    return self._random_generator.truncated_normal(shape, self.mean, self.stddev, dtype)",
    "docstring": "Returns a tensor object initialized to random normal values (truncated). Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only floating point types are supported. If not specified, is used, which default to unless you configured it otherwise (via ) **kwargs: Additional keyword arguments.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\initializers\\initializers_v2.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:shape arg:dtype arguments arg arg arg arg Call Assign Call Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "find_first_use_of_broken_modules",
    "source_code": "def find_first_use_of_broken_modules(exc: PackagingError) -> dict[str, list[str]]:\n    assert isinstance(exc, PackagingError), 'exception must be a PackagingError'\n    uses = {}\n    broken_module_names = [m for m, attr in exc.dependency_graph.nodes.items() if attr.get('error', False)]\n    for module_name in broken_module_names:\n        path = exc.dependency_graph.first_path(module_name)\n        uses[module_name] = path\n    return uses",
    "docstring": "Find all broken modules in a PackagingError, and for each one, return the dependency path in which the module was first encountered. E.g. broken module m.n.o was added to a dependency graph while processing a.b.c, then re-encountered while processing d.e.f. This method would return {'m.n.o': ['a', 'b', 'c']} Args: exc: a PackagingError Returns: A dict from broken module names to lists of module names in the path.",
    "type": "function",
    "file_path": "pytorch\\torch\\package\\analyze\\find_first_use_of_broken_modules.py",
    "ast_data": "FunctionDef name:find_first_use_of_broken_modules arg:exc arguments arg Call Assign Assign Call Call For Assign Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "Event",
    "source_code": "class Event:\n\n    def __init__(self, name, canvas, guiEvent=None):\n        self.name = name\n        self.canvas = canvas\n        self.guiEvent = guiEvent\n\n    def _process(self):\n        self.canvas.callbacks.process(self.name, self)\n        self.guiEvent = None",
    "docstring": "A Matplotlib event. The following attributes are defined and shown with their default values. Subclasses may define additional attributes. Attributes ---------- name : str The event name. canvas : The backend-specific canvas instance generating the event. guiEvent The GUI event that triggered the Matplotlib event.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "ClassDef name:Event FunctionDef name:__init__ arg:self arg:name arg:canvas arg:guiEvent arguments arg arg arg arg Assign Assign Assign FunctionDef name:_process arg:self arguments arg Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "extents",
    "source_code": "@property\ndef extents(self):\n    x0, y0, width, height = self._rect_bbox\n    xmin, xmax = sorted([x0, x0 + width])\n    ymin, ymax = sorted([y0, y0 + height])\n    return (xmin, xmax, ymin, ymax)",
    "docstring": "Return (xmin, xmax, ymin, ymax) in data coordinates as defined by the bounding box before rotation.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:extents arg:self arguments arg Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "normalize_cluster_spec",
    "source_code": "def normalize_cluster_spec(cluster_spec):\n    if isinstance(cluster_spec, (dict, cluster_pb2.ClusterDef)):\n        return server_lib.ClusterSpec(cluster_spec)\n    elif not isinstance(cluster_spec, server_lib.ClusterSpec):\n        raise ValueError(\"`cluster_spec' should be dict or a `tf.train.ClusterSpec` or a `tf.train.ClusterDef` object\")\n    return cluster_spec",
    "docstring": "Makes into a object. Args: cluster_spec: a dict, ClusterDef or ClusterSpec object specifying the cluster configurations. Returns: a object. Raises: ValueError: if is not a dict or a or a .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_worker_util.py",
    "ast_data": "FunctionDef name:normalize_cluster_spec arg:cluster_spec arguments arg If Call Return return:yes Call If Call Raise Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "_fill_and_warp",
    "source_code": "def _fill_and_warp(src: Tensor, grid: Tensor, mode: str, align_corners: bool, fill_value: Tensor) -> Tensor:\n    ones_mask = ones_like(src)\n    fill_value = fill_value.to(ones_mask)[None, :, None, None]\n    inv_ones_mask = 1 - F.grid_sample(ones_mask, grid, align_corners=align_corners, mode=mode, padding_mode='zeros')\n    inv_color_mask = inv_ones_mask * fill_value\n    return F.grid_sample(src, grid, align_corners=align_corners, mode=mode, padding_mode='zeros') + inv_color_mask",
    "docstring": "Warp a mask of ones, then multiple with fill_value and add to default warp. Args: src: input tensor of shape :math:. grid: grid tensor from . mode: interpolation mode to calculate output values `(3)(B, 3, H, W)`.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\transform\\imgwarp.py",
    "ast_data": "FunctionDef name:_fill_and_warp arg:src arg:grid arg:mode arg:align_corners arg:fill_value arguments arg arg arg arg arg Assign Call Assign Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "any",
    "source_code": "def any(self, axis: AxisInt=0, *args, **kwargs) -> bool:\n    nv.validate_any(args, kwargs)\n    values = self.sp_values\n    if len(values) != len(self) and np.any(self.fill_value):\n        return True\n    return values.any().item()",
    "docstring": "Tests whether at least one of elements evaluate True Returns ------- any : bool See Also -------- numpy.any",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\sparse\\array.py",
    "ast_data": "FunctionDef name:any arg:self arg:axis arguments arg arg arg arg Call Assign If BoolOp Compare Call Call Call Return return:yes Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_get_curve_scorer",
    "source_code": "def _get_curve_scorer(self):\n    scoring = check_scoring(self.estimator, scoring=self.scoring)\n    curve_scorer = _CurveScorer.from_scorer(scoring, self._get_response_method(), self.thresholds)\n    return curve_scorer",
    "docstring": "Get the curve scorer based on the objective metric used.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_classification_threshold.py",
    "ast_data": "FunctionDef name:_get_curve_scorer arg:self arguments arg Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_ureduce",
    "source_code": "def _ureduce(a, func, keepdims=False, **kwargs):\n    a = np.asanyarray(a)\n    axis = kwargs.get('axis')\n    out = kwargs.get('out')\n    if keepdims is np._NoValue:\n        keepdims = False\n    nd = a.ndim\n    if axis is not None:\n        axis = _nx.normalize_axis_tuple(axis, nd)\n        if keepdims and out is not None:\n            index_out = tuple((0 if i in axis else slice(None) for i in range(nd)))\n            kwargs['out'] = out[(Ellipsis,) + index_out]\n        if len(axis) == 1:\n            kwargs['axis'] = axis[0]\n        else:\n            keep = set(range(nd)) - set(axis)\n            nkeep = len(keep)\n            for i, s in enumerate(sorted(keep)):\n                a = a.swapaxes(i, s)\n            a = a.reshape(a.shape[:nkeep] + (-1,))\n            kwargs['axis'] = -1\n    elif keepdims and out is not None:\n        index_out = (0,) * nd\n        kwargs['out'] = out[(Ellipsis,) + index_out]\n    r = func(a, **kwargs)\n    if out is not None:\n        return out\n    if keepdims:\n        if axis is None:\n            index_r = (np.newaxis,) * nd\n        else:\n            index_r = tuple((np.newaxis if i in axis else slice(None) for i in range(nd)))\n        r = r[(Ellipsis,) + index_r]\n    return r",
    "docstring": "Internal Function. Call with as first argument swapping the axes to use extended axis on functions that don't support it natively. Returns result and a.shape with axis dims set to 1. Parameters ---------- a : array_like Input array or object that can be converted to an array. func : callable Reduction function capable of receiving a single axis argument. It is called with as first argument followed by . kwargs : keyword arguments additional keyword arguments to pass to . Returns ------- result : tuple Result of func(a, **kwargs) and a.shape with axis dims set to 1 which can be used to reshape the result to the same shape a ufunc with keepdims=True would produce.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_function_base_impl.py",
    "ast_data": "FunctionDef name:_ureduce arg:a arg:func arg:keepdims arguments arg arg arg arg Assign Call Assign Call Assign Call If Compare Assign Assign If Compare Assign Call If BoolOp Compare Assign Call Compare Call Call Assign If Compare Call Assign Assign Call Call Call Assign Call For Call Call Assign Call Assign Call Assign If BoolOp Compare Assign Assign Assign Call If Compare Return return:yes If If Compare Assign Assign Call Compare Call Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_get_resampler_for_grouping",
    "source_code": "@final\ndef _get_resampler_for_grouping(self, groupby: GroupBy, key):\n    return self._resampler_for_grouping(groupby=groupby, key=key, parent=self)",
    "docstring": "Return the correct class for resampling with groupby.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\resample.py",
    "ast_data": "FunctionDef name:_get_resampler_for_grouping arg:self arg:groupby arg:key arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_value_in_bounds",
    "source_code": "def _value_in_bounds(self, vals):\n    return (self._min_in_bounds(vals[0]), self._max_in_bounds(vals[1]))",
    "docstring": "Clip min, max values to the bounds.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:_value_in_bounds arg:self arg:vals arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "_float_conv",
    "source_code": "def _float_conv(self, value):\n    return array([value], self.ftype)",
    "docstring": "Converts float to conv. Parameters ---------- value : float value to be converted.",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\getlimits.py",
    "ast_data": "FunctionDef name:_float_conv arg:self arg:value arguments arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_gotitem",
    "source_code": "def _gotitem(self, key, ndim, subset=None):\n    if subset is None:\n        subset = self.obj\n    kwargs = {attr: getattr(self, attr) for attr in self._attributes}\n    selection = self._infer_selection(key, subset)\n    new_win = type(self)(subset, selection=selection, **kwargs)\n    return new_win",
    "docstring": "Sub-classes to define. Return a sliced object. Parameters ---------- key : str / list of selections ndim : {1, 2} requested ndim of result subset : object, default None subset to act on",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\window\\rolling.py",
    "ast_data": "FunctionDef name:_gotitem arg:self arg:key arg:ndim arg:subset arguments arg arg arg arg If Compare Assign Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ONNXFunction",
    "source_code": "@dataclasses.dataclass(frozen=True, eq=True)\nclass ONNXFunction:\n    onnx_function: onnxscript.OnnxFunction | onnxscript.TracedOnnxFunction\n    op_full_name: str\n    is_custom: bool = False\n    is_complex: bool = False",
    "docstring": "A wrapper of onnx-script function. op_full_name: The qualified name of the function. In the form of '::.'. onnx_function: The onnx-script function from torchlib. is_custom: Whether the function is a custom function. is_complex: Whether the function is a function that handles complex valued inputs.",
    "type": "class",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\registration.py",
    "ast_data": "ClassDef name:ONNXFunction Call"
  },
  {
    "library": "pytorch",
    "name": "validate_rearrange_expressions",
    "source_code": "def validate_rearrange_expressions(left: ParsedExpression, right: ParsedExpression, axes_lengths: Mapping[str, int]) -> None:\n    for length in axes_lengths.values():\n        if (length_type := type(length)) is not int:\n            raise TypeError(f'rearrange axis lengths must be integers, got: {length_type}')\n    if left.has_non_unitary_anonymous_axes or right.has_non_unitary_anonymous_axes:\n        raise ValueError('rearrange only supports unnamed axes of size 1')\n    difference = set.symmetric_difference(left.identifiers, right.identifiers)\n    if len(difference) > 0:\n        raise ValueError(f'Identifiers only on one side of rearrange expression (should be on both): {difference}')\n    unmatched_axes = axes_lengths.keys() - left.identifiers\n    if len(unmatched_axes) > 0:\n        raise ValueError(f'Identifiers not found in rearrange expression: {unmatched_axes}')",
    "docstring": "Perform expression validations that are specific to the operation. Args: left (ParsedExpression): left-hand side expression right (ParsedExpression): right-hand side expression axes_lengths (Mapping[str, int]): any additional length specifications for dimensions",
    "type": "function",
    "file_path": "pytorch\\functorch\\einops\\_parsing.py",
    "ast_data": "FunctionDef name:validate_rearrange_expressions arg:left arg:right arg:axes_lengths arguments arg arg arg For Call If Compare Call Raise Call If BoolOp Raise Call Assign Call If Compare Call Raise Call Assign Call If Compare Call Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "_n_features_out",
    "source_code": "@property\ndef _n_features_out(self):\n    return self.components_.shape[0]",
    "docstring": "Number of transformed output features.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_truncated_svd.py",
    "ast_data": "FunctionDef name:_n_features_out arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "in_place_subclassed_model_state_restoration",
    "source_code": "def in_place_subclassed_model_state_restoration(model):\n    assert not model._is_graph_network\n    if hasattr(model, '_original_attributes_cache') and model._original_attributes_cache is not None:\n        setattr_tracking = model._setattr_tracking\n        model._setattr_tracking = False\n        model._self_tracked_trackables = []\n        for name, value in model._original_attributes_cache.items():\n            setattr(model, name, value)\n            if isinstance(value, Layer):\n                model._self_tracked_trackables.append(value)\n        model._original_attributes_cache = None\n        model._setattr_tracking = setattr_tracking\n    else:\n        _reset_build_compile_trackers(model)",
    "docstring": "Restores the original state of a model after it was \"reset\". This undoes this action of , which is called in if is set to True. Args: model: Instance of a Keras model created via subclassing, on which was previously called.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\models.py",
    "ast_data": "FunctionDef name:in_place_subclassed_model_state_restoration arg:model arguments arg If BoolOp Call Compare Assign Assign Assign For Call Call If Call Call Assign Assign Call"
  },
  {
    "library": "cherrypy",
    "name": "collapse_body",
    "source_code": "def collapse_body(self):\n    new_body = b''.join(self.body)\n    self.body = new_body\n    return new_body",
    "docstring": "Collapse self.body to a single string; replace it and return it.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cprequest.py",
    "ast_data": "FunctionDef name:collapse_body arg:self arguments arg Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_type_promotion_rule",
    "source_code": "def get_type_promotion_rule(node: torch.fx.Node, type_promotion_table: TypePromotionTable) -> TypePromotionRule | None:\n    op = node.target\n    if not isinstance(op, torch._ops.OpOverload):\n        return None\n    if (rule := type_promotion_table.get_rule(op.overloadpacket)) is None:\n        return None\n    return rule",
    "docstring": "Get type promotion rule for a node. Args: node: Node to get type promotion rule for. type_promotion_table: Type promotion table. Returns: Type promotion rule for the node. None if no rule is found or if the node is not representing a torch operator.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\type_promotion.py",
    "ast_data": "FunctionDef name:get_type_promotion_rule arg:node arg:type_promotion_table arguments arg arg Assign If Call Return return:no If Compare Call Return return:no Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_metric_function",
    "source_code": "def get_metric_function(metric, output_shape=None, loss_fn=None):\n    if metric not in ['accuracy', 'acc', 'crossentropy', 'ce']:\n        return metrics_module.get(metric)\n    is_sparse_categorical_crossentropy = isinstance(loss_fn, losses.SparseCategoricalCrossentropy) or (isinstance(loss_fn, losses.LossFunctionWrapper) and loss_fn.fn == losses.sparse_categorical_crossentropy)\n    is_binary_crossentropy = isinstance(loss_fn, losses.BinaryCrossentropy) or (isinstance(loss_fn, losses.LossFunctionWrapper) and loss_fn.fn == losses.binary_crossentropy)\n    if metric in ['accuracy', 'acc']:\n        if output_shape[-1] == 1 or is_binary_crossentropy:\n            return metrics_module.binary_accuracy\n        elif is_sparse_categorical_crossentropy:\n            return metrics_module.sparse_categorical_accuracy\n        return metrics_module.categorical_accuracy\n    else:\n        if output_shape[-1] == 1 or is_binary_crossentropy:\n            return metrics_module.binary_crossentropy\n        elif is_sparse_categorical_crossentropy:\n            return metrics_module.sparse_categorical_crossentropy\n        return metrics_module.categorical_crossentropy",
    "docstring": "Returns the metric function corresponding to the given metric input. Args: metric: Metric function name or reference. output_shape: The shape of the output that this metric will be calculated for. loss_fn: The loss function used. Returns: The metric function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py",
    "ast_data": "FunctionDef name:get_metric_function arg:metric arg:output_shape arg:loss_fn arguments arg arg arg If Compare Return return:yes Call Assign BoolOp Call BoolOp Call Compare Assign BoolOp Call BoolOp Call Compare If Compare If BoolOp Compare Return return:yes If Return return:yes Return return:yes If BoolOp Compare Return return:yes If Return return:yes Return return:yes"
  },
  {
    "library": "pandas",
    "name": "nanstd",
    "source_code": "@bottleneck_switch(ddof=1)\ndef nanstd(values, *, axis: AxisInt | None=None, skipna: bool=True, ddof: int=1, mask=None):\n    if values.dtype == 'M8[ns]':\n        values = values.view('m8[ns]')\n    orig_dtype = values.dtype\n    values, mask = _get_values(values, skipna, mask=mask)\n    result = np.sqrt(nanvar(values, axis=axis, skipna=skipna, ddof=ddof, mask=mask))\n    return _wrap_results(result, orig_dtype)",
    "docstring": "Compute the standard deviation along given axis while ignoring NaNs Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. mask : ndarray[bool], optional nan-mask if known Returns ------- result : float Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> from pandas.core import nanops >>> s = pd.Series([1, np.nan, 2, 3]) >>> nanops.nanstd(s.values) 1.0",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\nanops.py",
    "ast_data": "FunctionDef name:nanstd arg:values arguments arg arg arg arg arg If Compare Assign Call Assign Assign Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "itemmap",
    "source_code": "def itemmap(func, d, factory=dict):\n    rv = factory()\n    rv.update(map(func, d.items()))\n    return rv",
    "docstring": "Apply function to items of dictionary >>> accountids = {\"Alice\": 10, \"Bob\": 20} >>> itemmap(reversed, accountids) # doctest: +SKIP {10: \"Alice\", 20: \"Bob\"} See Also: keymap valmap",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\unification\\unification_tools.py",
    "ast_data": "FunctionDef name:itemmap arg:func arg:d arg:factory arguments arg arg arg Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "char",
    "source_code": "def char(self):\n    _warn_typed_storage_removal()\n    return self._to(torch.int8)",
    "docstring": "Casts this storage to char type.",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:char arg:self arguments arg Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__iter__",
    "source_code": "def __iter__(self):\n    for item in (self[i] for i in range(len(self))):\n        yield item",
    "docstring": "Create a generator that iterate over the Sequence.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\data_utils.py",
    "ast_data": "FunctionDef name:__iter__ arg:self arguments arg For Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_show_mean_and_variance",
    "source_code": "def _show_mean_and_variance(tensor, cast_to_f32=True):\n    if cast_to_f32:\n        tensor = math_ops.cast(tensor, dtypes.float32)\n    mean, var = nn_impl.moments(array_ops.reshape(tensor, [-1]), axes=[0])\n    if not mean.get_shape().is_fully_defined():\n        mean = array_ops.reshape(mean, [])\n    if not var.get_shape().is_fully_defined():\n        var = array_ops.reshape(var, [])\n    return (mean, var)",
    "docstring": "Returns the mean and variance of the given tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:_show_mean_and_variance arg:tensor arg:cast_to_f32 arguments arg arg If Assign Call Assign Call Call If Call Call Assign Call If Call Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "ntemps",
    "source_code": "@property\ndef ntemps(self) -> int:\n    return len(self.temps)",
    "docstring": "The number of temporary variables in this scope",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\computation\\scope.py",
    "ast_data": "FunctionDef name:ntemps arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pygame",
    "name": "WeakSprite",
    "source_code": "class WeakSprite(Sprite):\n\n    def __init__(self, *groups):\n        super().__init__(*groups)\n        self.__dict__['_Sprite__g'] = WeakSet(self._Sprite__g)",
    "docstring": "A subclass of Sprite that references its Groups weakly. This means that any group this belongs to that is not referenced anywhere else is garbage collected automatically.",
    "type": "class",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "ClassDef name:WeakSprite FunctionDef name:__init__ arg:self arguments arg arg Call Call Assign Call"
  },
  {
    "library": "django",
    "name": "localtime",
    "source_code": "def localtime(value=None, timezone=None):\n    if value is None:\n        value = now()\n    if timezone is None:\n        timezone = get_current_timezone()\n    if is_naive(value):\n        raise ValueError('localtime() cannot be applied to a naive datetime')\n    return value.astimezone(timezone)",
    "docstring": "Convert an aware datetime.datetime to local time. Only aware datetimes are allowed. When value is omitted, it defaults to now(). Local time is defined by the current time zone, unless another time zone is specified.",
    "type": "function",
    "file_path": "django\\django\\utils\\timezone.py",
    "ast_data": "FunctionDef name:localtime arg:value arg:timezone arguments arg arg If Compare Assign Call If Compare Assign Call If Call Raise Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "process_link",
    "source_code": "def process_link(self, env: BuildEnvironment, refnode: Element, has_explicit_title: bool, title: str, target: str) -> tuple[str, str]:\n    return (title, ws_re.sub(' ', target))",
    "docstring": "Called after parsing title and target text, and creating the reference node (given in *refnode*). This method can alter the reference node and must return a new (or the same) `` tuple.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\roles.py",
    "ast_data": "FunctionDef name:process_link arg:self arg:env arg:refnode arg:has_explicit_title arg:title arg:target arguments arg arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "zpk2tf",
    "source_code": "def zpk2tf(z, p, k):\n    xp = array_namespace(z, p)\n    z, p, k = map(xp.asarray, (z, p, k))\n    z = xpx.atleast_nd(z, ndim=1, xp=xp)\n    k = xpx.atleast_nd(k, ndim=1, xp=xp)\n    if xp.isdtype(k.dtype, 'integral'):\n        k = xp.astype(k, xp_default_dtype(xp))\n    if z.ndim > 1:\n        temp = _pu.poly(z[0], xp=xp)\n        b = xp.empty((z.shape[0], z.shape[1] + 1), dtype=temp.dtype)\n        if k.shape[0] == 1:\n            k = [k[0]] * z.shape[0]\n        for i in range(z.shape[0]):\n            b[i] = k[i] * _pu.poly(z[i], xp=xp)\n    else:\n        b = k * _pu.poly(z, xp=xp)\n    a = _pu.poly(p, xp=xp)\n    a = xpx.atleast_nd(xp.asarray(a), ndim=1, xp=xp)\n    return (b, a)",
    "docstring": "Return polynomial transfer function representation from zeros and poles Parameters ---------- z : array_like Zeros of the transfer function. p : array_like Poles of the transfer function. k : float System gain. Returns ------- b : ndarray Numerator polynomial coefficients. a : ndarray Denominator polynomial coefficients. Examples -------- Find the polynomial representation of a transfer function H(s) using its 'zpk' (Zero-Pole-Gain) representation. .. math:: H(z) = 5 \\frac { (s - 2)(s - 6) } { (s - 1)(s - 8) } >>> from scipy.signal import zpk2tf >>> z = [2, 6] >>> p = [1, 8] >>> k = 5 >>> zpk2tf(z, p, k) ( array([ 5., -40., 60.]), array([ 1., -9., 8.]))",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_filter_design.py",
    "ast_data": "FunctionDef name:zpk2tf arg:z arg:p arg:k arguments arg arg arg Assign Call Assign Call Assign Call Assign Call If Call Assign Call Call If Compare Assign Call Assign Call If Compare Assign For Call Assign Call Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_sample_odds_ratio_ci",
    "source_code": "def _sample_odds_ratio_ci(self, confidence_level=0.95, alternative='two-sided'):\n    if confidence_level < 0 or confidence_level > 1:\n        raise ValueError('confidence_level must be between 0 and 1')\n    table = self._table\n    if 0 in table.sum(axis=0) or 0 in table.sum(axis=1):\n        ci = (0, np.inf)\n    else:\n        ci = _sample_odds_ratio_ci(table, confidence_level=confidence_level, alternative=alternative)\n    return ConfidenceInterval(low=ci[0], high=ci[1])",
    "docstring": "Confidence interval for the sample odds ratio.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_odds_ratio.py",
    "ast_data": "FunctionDef name:_sample_odds_ratio_ci arg:self arg:confidence_level arg:alternative arguments arg arg arg If BoolOp Compare Compare Raise Call Assign If BoolOp Compare Call Compare Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "process_file",
    "source_code": "def process_file(in_filename, out_filename, upgrader):\n    if in_filename.endswith('.py'):\n        files_processed, report_text, errors = upgrader.process_file(in_filename, out_filename)\n    elif in_filename.endswith('.ipynb'):\n        files_processed, report_text, errors = ipynb.process_file(in_filename, out_filename, upgrader)\n    else:\n        raise NotImplementedError('Currently converter only supports python or ipynb')\n    return (files_processed, report_text, errors)",
    "docstring": "Process a file of type or .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\tf_upgrade_v2_main.py",
    "ast_data": "FunctionDef name:process_file arg:in_filename arg:out_filename arg:upgrader arguments arg arg arg If Call Assign Call If Call Assign Call Raise Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "get_column",
    "source_code": "@abstractmethod\ndef get_column(self, i: int) -> Column:\n    pass",
    "docstring": "Return the column at the indicated position.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\interchange\\dataframe_protocol.py",
    "ast_data": "FunctionDef name:get_column arg:self arg:i arguments arg arg"
  },
  {
    "library": "tensorflow",
    "name": "get_default_graph",
    "source_code": "@tf_export(v1=['get_default_graph'])\ndef get_default_graph() -> Graph:\n    return _default_graph_stack.get_default()",
    "docstring": "Returns the default graph for the current thread. The returned graph will be the innermost graph on which a context has been entered, or a global default graph if none has been explicitly created. NOTE: The default graph is a property of the current thread. If you create a new thread, and wish to use the default graph in that thread, you must explicitly add a in that thread's function. @compatibility(TF2) does not work with either eager execution or , and you should not invoke it directly. To migrate code that uses Graph-related functions to TF2, rewrite the code without them. See the [migration guide]( for more description about the behavior and semantic changes between Tensorflow 1 and Tensorflow 2. @end_compatibility Returns: The default being used in the current thread.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:get_default_graph arguments Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__call__",
    "source_code": "def __call__(self, inputs, state, scope=None):\n    if scope is not None:\n        with vs.variable_scope(scope, custom_getter=self._rnn_get_variable) as scope:\n            return super(RNNCell, self).__call__(inputs, state, scope=scope)\n    else:\n        scope_attrname = 'rnncell_scope'\n        scope = getattr(self, scope_attrname, None)\n        if scope is None:\n            scope = vs.variable_scope(vs.get_variable_scope(), custom_getter=self._rnn_get_variable)\n            setattr(self, scope_attrname, scope)\n        with scope:\n            return super(RNNCell, self).__call__(inputs, state)",
    "docstring": "Run this RNN cell on inputs, starting from the given state. Args: inputs: tensor with shape . state: if is an integer, this should be a with shape . Otherwise, if is a tuple of integers, this should be a tuple with shapes . scope: VariableScope for the created subgraph; defaults to class name. Returns: A pair containing: - Output: A tensor with shape . - New state: Either a single tensor, or a tuple of tensors matching the arity and shapes of .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\legacy_rnn\\rnn_cell_impl.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:inputs arg:state arg:scope arguments arg arg arg arg If Compare With Call Return return:yes Call Call Assign Assign Call If Compare Assign Call Call Call With Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_register_op",
    "source_code": "def _register_op(op, func, op_table):\n    if len(signature(func).parameters) != 4:\n        raise TypeError(f'Custom sharded op function expects signature: (types, args, kwargs, process_group), but received signature: {signature(func)}')\n    op_table[op] = func",
    "docstring": "Performs basic validation and registers the provided op in the given op_table.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\op_registry_utils.py",
    "ast_data": "FunctionDef name:_register_op arg:op arg:func arg:op_table arguments arg arg arg If Compare Call Call Raise Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "itemfilter",
    "source_code": "def itemfilter(predicate, d, factory=dict):\n    rv = factory()\n    for item in d.items():\n        if predicate(item):\n            k, v = item\n            rv[k] = v\n    return rv",
    "docstring": "Filter items in dictionary by item >>> def isvalid(item): ... k, v = item ... return k % 2 == 0 and v >> d = {1: 2, 2: 3, 3: 4, 4: 5} >>> itemfilter(isvalid, d) {2: 3} See Also: keyfilter valfilter itemmap",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\unification\\unification_tools.py",
    "ast_data": "FunctionDef name:itemfilter arg:predicate arg:d arg:factory arguments arg arg arg Assign Call For Call If Call Assign Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "agg",
    "source_code": "def agg(self) -> DataFrame | Series | None:\n    func = self.func\n    if isinstance(func, str):\n        return self.apply_str()\n    if is_dict_like(func):\n        return self.agg_dict_like()\n    elif is_list_like(func):\n        return self.agg_list_like()\n    return None",
    "docstring": "Provide an implementation for the aggregators. Returns ------- Result of aggregation, or None if agg cannot be performed by this method.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\apply.py",
    "ast_data": "FunctionDef name:agg arg:self arguments arg Assign If Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "dtype",
    "source_code": "@property\ndef dtype(self) -> dtypes.DType:\n    return self._variable.dtype",
    "docstring": "The of this variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py",
    "ast_data": "FunctionDef name:dtype arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "set_up_storage_writer",
    "source_code": "@abc.abstractmethod\ndef set_up_storage_writer(self, is_coordinator: bool) -> None:\n    pass",
    "docstring": "Initialize this instance. Args: is_coordinator (bool): Whether this instance is responsible for coordinating the checkpoint.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\storage.py",
    "ast_data": "FunctionDef name:set_up_storage_writer arg:self arg:is_coordinator arguments arg arg"
  },
  {
    "library": "pytorch",
    "name": "_Interval",
    "source_code": "class _Interval(Constraint):\n\n    def __init__(self, lower_bound, upper_bound):\n        self.lower_bound = lower_bound\n        self.upper_bound = upper_bound\n        super().__init__()\n\n    def check(self, value):\n        return (self.lower_bound <= value) & (value <= self.upper_bound)\n\n    def __repr__(self):\n        fmt_string = self.__class__.__name__[1:]\n        fmt_string += f'(lower_bound={self.lower_bound}, upper_bound={self.upper_bound})'\n        return fmt_string",
    "docstring": "Constrain to a real interval .",
    "type": "class",
    "file_path": "pytorch\\torch\\distributions\\constraints.py",
    "ast_data": "ClassDef name:_Interval FunctionDef name:__init__ arg:self arg:lower_bound arg:upper_bound arguments arg arg arg Assign Assign Call Call FunctionDef name:check arg:self arg:value arguments arg arg Return return:yes Compare Compare FunctionDef name:__repr__ arg:self arguments arg Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "check_cs_get",
    "source_code": "def check_cs_get(result, func, cargs):\n    check_cs_op(result, func, cargs)\n    return last_arg_byref(cargs)",
    "docstring": "Check the coordinate sequence retrieval.",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\geos\\prototypes\\coordseq.py",
    "ast_data": "FunctionDef name:check_cs_get arg:result arg:func arg:cargs arguments arg arg arg Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_get_missing_features_info",
    "source_code": "def _get_missing_features_info(self, X):\n    if not self._precomputed:\n        imputer_mask = _get_mask(X, self.missing_values)\n    else:\n        imputer_mask = X\n    if sp.issparse(X):\n        imputer_mask.eliminate_zeros()\n        if self.features == 'missing-only':\n            n_missing = imputer_mask.sum(axis=0)\n        if self.sparse is False:\n            imputer_mask = imputer_mask.toarray()\n        elif imputer_mask.format == 'csr':\n            imputer_mask = imputer_mask.tocsc()\n    else:\n        if not self._precomputed:\n            imputer_mask = _get_mask(X, self.missing_values)\n        else:\n            imputer_mask = X\n        if self.features == 'missing-only':\n            n_missing = imputer_mask.sum(axis=0)\n        if self.sparse is True:\n            imputer_mask = sp.csc_matrix(imputer_mask)\n    if self.features == 'all':\n        features_indices = np.arange(X.shape[1])\n    else:\n        features_indices = np.flatnonzero(n_missing)\n    return (imputer_mask, features_indices)",
    "docstring": "Compute the imputer mask and the indices of the features containing missing values. Parameters ---------- X : {ndarray, sparse matrix} of shape (n_samples, n_features) The input data with missing values. Note that has been checked in :meth: and :meth: before to call this function. Returns ------- imputer_mask : {ndarray, sparse matrix} of shape (n_samples, n_features) The imputer mask of the original data. features_with_missing : ndarray of shape (n_features_with_missing) The features containing missing values.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\impute\\_base.py",
    "ast_data": "FunctionDef name:_get_missing_features_info arg:self arg:X arguments arg arg If Assign Call Assign If Call Call If Compare Assign Call If Compare Assign Call If Compare Assign Call If Assign Call Assign If Compare Assign Call If Compare Assign Call If Compare Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "set_model_state_dict",
    "source_code": "def set_model_state_dict(model: nn.Module, model_state_dict: dict[str, ValueType], *, options: Optional[StateDictOptions]=None) -> _IncompatibleKeys:\n    model_state_dict: dict[str, ValueType] = _unflatten_model_state_dict(model, model_state_dict)\n    with _gc_context():\n        info = _verify_options(model, (), optim_only=False, options=options)\n        _verify_state_dict(model_state_dict, {}, info)\n        return _load_model_state_dict(model, model_state_dict, info)",
    "docstring": "Load the model state_dict. The counterpart of `StateDictOptions` fields: * **missing_keys** is a list of str containing the missing keys * **unexpected_keys** is a list of str containing the unexpected keys :type model_state_dict: typing.Dict[str, ValueType]",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\state_dict.py",
    "ast_data": "FunctionDef name:set_model_state_dict arg:model arg:model_state_dict arguments arg arg arg Call With Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "parse_from_string",
    "source_code": "def parse_from_string(self, spec):\n    return self.from_string(spec)",
    "docstring": "Parse a name into its components. **2.x behavior change**: In TensorFlow 1.x, this function mutates its own state and returns itself. In 2.x, DeviceSpecs are immutable, and this function will return a DeviceSpec which contains the spec. * Recommended: * Will work in 1.x and 2.x (though deprecated in 2.x): * Will NOT work in 2.x: ``DeviceSpec`. Raises: ValueError: if the spec was not valid.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\device_spec.py",
    "ast_data": "FunctionDef name:parse_from_string arg:self arg:spec arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "debug_print_src",
    "source_code": "def debug_print_src(self, node):\n    if __debug__:\n        print(parser.unparse(node))\n    return node",
    "docstring": "Helper method useful for debugging. Prints the AST as code.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\transformer.py",
    "ast_data": "FunctionDef name:debug_print_src arg:self arg:node arguments arg arg If Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "from_tensors",
    "source_code": "def from_tensors(self, tensors: Iterator[core.Tensor]) -> Any:\n    del tensors\n    return self.placeholder_value(PlaceholderContext())",
    "docstring": "Generates a value of this type from Tensors. Must use the same fixed amount of tensors as . Args: tensors: An iterator from which the tensors can be pulled. Returns: A value of this type.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\types\\trace.py",
    "ast_data": "FunctionDef name:from_tensors arg:self arg:tensors arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "__itruediv__",
    "source_code": "def __itruediv__(self, other):\n    other_data = getdata(other)\n    dom_mask = _DomainSafeDivide().__call__(self._data, other_data)\n    other_mask = getmask(other)\n    new_mask = mask_or(other_mask, dom_mask)\n    if dom_mask.any():\n        _, fval = ufunc_fills[np.true_divide]\n        other_data = np.where(dom_mask, other_data.dtype.type(fval), other_data)\n    self._mask |= new_mask\n    other_data = np.where(self._mask, other_data.dtype.type(1), other_data)\n    self._data.__itruediv__(other_data)\n    return self",
    "docstring": "True divide self by other in-place.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:__itruediv__ arg:self arg:other arguments arg arg Assign Call Assign Call Call Assign Call Assign Call If Call Assign Assign Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "create_unbacked_symfloat",
    "source_code": "@record_shapeenv_event()\ndef create_unbacked_symfloat(self) -> SymFloat:\n    symbol: sympy.Symbol = make_symbol(SymT.UNBACKED_FLOAT, next(self.unbacked_symfloat_counter))\n    self.counter['create_unbacked_symbol'] += 1\n    if not self._ignore_fresh_unbacked_symbols_tls():\n        self.pending_fresh_unbacked_symbols.append(symbol)\n    self.var_to_stack[symbol] = CapturedTraceback.extract(skip=1)\n    vr = self.var_to_range[symbol] = ValueRanges.unknown()\n    assert vr.is_float\n    sloc = self._get_sloc()\n    self.var_to_range_sloc[symbol] = ValueRangesSLoc(sloc, sloc)\n    fx_node = self._create_fx_placeholder_and_z3var(symbol, float)\n    sym_node = SymNode(symbol, self, float, None, fx_node=fx_node)\n    self._log_create_unbacked_symbol('create_unbacked_symfloat', symbol, vr, sym_node=sym_node)\n    return SymFloat(sym_node)",
    "docstring": "Create a symbolic float without a hint value",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:create_unbacked_symfloat arg:self arguments arg Call Call If Call Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "_zseries_to_cseries",
    "source_code": "def _zseries_to_cseries(zs):\n    n = (zs.size + 1) // 2\n    c = zs[n - 1:].copy()\n    c[1:n] *= 2\n    return c",
    "docstring": "Convert z-series to a Chebyshev series. Convert a z series to the equivalent Chebyshev series. The result is never an empty array. The dtype of the return is the same as that of the input. No checks are run on the arguments as this routine is for internal use. Parameters ---------- zs : 1-D ndarray Odd length symmetric z-series, ordered from low to high. Returns ------- c : 1-D ndarray Chebyshev coefficients, ordered from low to high.",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\chebyshev.py",
    "ast_data": "FunctionDef name:_zseries_to_cseries arg:zs arguments arg Assign Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ShardMetadata",
    "source_code": "@dataclass\nclass ShardMetadata:\n    __slots__ = ['shard_offsets', 'shard_sizes', 'placement']\n    shard_offsets: list[int]\n    shard_sizes: list[int]\n    placement: Optional[_remote_device]\n\n    def __init__(self, shard_offsets: list[int], shard_sizes: list[int], placement: Optional[Union[str, _remote_device]]=None):\n        self.shard_offsets = shard_offsets\n        self.shard_sizes = shard_sizes\n        if isinstance(placement, str):\n            self.placement = _remote_device(placement)\n        else:\n            self.placement = placement\n        if len(self.shard_offsets) != len(self.shard_sizes):\n            raise ValueError(f'shard_offsets and shard_sizes should have the same number of elements, found {len(self.shard_offsets)} and {self.shard_sizes} respectively')\n        for i in range(len(self.shard_offsets)):\n            if self.shard_offsets[i] < 0:\n                raise ValueError('shard_offsets should be >=0')\n            if self.shard_sizes[i] < 0:\n                raise ValueError('shard_sizes should be >= 0')\n\n    def __hash__(self):\n\n        def _hash_reduce(a, b):\n            return (a << 8) + hash(b)\n        res = reduce(_hash_reduce, self.shard_offsets, 37)\n        res = reduce(_hash_reduce, self.shard_sizes, res)\n        res = _hash_reduce(res, self.placement)\n        return res",
    "docstring": "Represents a shard of the overall Tensor including its offsets, lengths and device placement. Args: shard_offsets(List[int]): Offsets in the original tensor indicating the start offsets for this shard. Should have the same rank as the original tensor. shard_sizes(List[int]): Integers indicating the size of each dimension for this shard. Should have the same rank as the original tensor. placement(:class:): Specifies the placement of this shard.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\_shard\\metadata.py",
    "ast_data": "ClassDef name:ShardMetadata Assign FunctionDef name:__init__ arg:self arg:shard_offsets arg:shard_sizes arg:placement arguments arg arg arg arg Assign Assign If Call Assign Call Assign If Compare Call Call Raise Call Call For Call Call If Compare Raise Call If Compare Raise Call FunctionDef name:__hash__ arg:self arguments arg FunctionDef name:_hash_reduce arg:a arg:b arguments arg arg Return return:yes Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "convert_zunits",
    "source_code": "def convert_zunits(self, z):\n    return self.zaxis.convert_units(z)",
    "docstring": "For artists in an Axes, if the zaxis has units support, convert *z* using zaxis unit type",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:convert_zunits arg:self arg:z arguments arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "each_context",
    "source_code": "def each_context(self, request):\n    script_name = request.META['SCRIPT_NAME']\n    site_url = script_name if self.site_url == '/' and script_name else self.site_url\n    return {'site_title': self.site_title, 'site_header': self.site_header, 'site_url': site_url, 'has_permission': self.has_permission(request), 'available_apps': self.get_app_list(request), 'is_popup': False, 'is_nav_sidebar_enabled': self.enable_nav_sidebar, 'log_entries': self.get_log_entries(request)}",
    "docstring": "Return a dictionary of variables to put in the template context for *every* page in the admin site. For sites running on a subpath, use the SCRIPT_NAME value if site_url hasn't been customized.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\sites.py",
    "ast_data": "FunctionDef name:each_context arg:self arg:request arguments arg arg Assign Assign BoolOp Compare Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "@deprecation.deprecated('2019-01-01', 'The TensorFlow Distributions library has moved to TensorFlow Probability (https://github.com/tensorflow/probability). You should update all references to use `tfp.distributions` instead of `tf.distributions`.', warn_once=True)\ndef __init__(self, dtype, reparameterization_type, validate_args, allow_nan_stats, parameters=None, graph_parents=None, name=None):\n    graph_parents = [] if graph_parents is None else graph_parents\n    for i, t in enumerate(graph_parents):\n        if t is None or not tensor_util.is_tf_type(t):\n            raise ValueError('Graph parent item %d is not a Tensor; %s.' % (i, t))\n    if not name or name[-1] != '/':\n        non_unique_name = name or type(self).__name__\n        with ops.name_scope(non_unique_name) as name:\n            pass\n    self._dtype = dtype\n    self._reparameterization_type = reparameterization_type\n    self._allow_nan_stats = allow_nan_stats\n    self._validate_args = validate_args\n    self._parameters = parameters or {}\n    self._graph_parents = graph_parents\n    self._name = name",
    "docstring": "Constructs the . **This is a private method for subclass use.** Args: dtype: The type of the event samples. implies no type-enforcement. reparameterization_type: Instance of . If , this can be reparameterized in terms of some standard distribution with a function whose Jacobian is constant for the support of the standard distribution. If , then no such reparameterization is available. validate_args: Python , default . When distribution parameters are checked for validity despite possibly degrading runtime performance. When invalid inputs may silently render incorrect outputs. allow_nan_stats: Python , default . When , statistics (e.g., mean, mode, variance) use the value \"\" to indicate the result is undefined. When , an exception is raised if one or more of the statistic's batch members are undefined. parameters: Python of parameters used to instantiate this . graph_parents: Python of graph prerequisites of this . name: Python name prefixed to Ops created by this class. Default: subclass name. Raises: ValueError: if any member of graph_parents is or not a .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:dtype arg:reparameterization_type arg:validate_args arg:allow_nan_stats arg:parameters arg:graph_parents arg:name arguments arg arg arg arg arg arg arg arg Assign Compare For Call If BoolOp Compare Call Raise Call If BoolOp Compare Assign BoolOp Call With Call Assign Assign Assign Assign Assign BoolOp Assign Assign Call"
  },
  {
    "library": "django",
    "name": "clear_cache",
    "source_code": "def clear_cache(self):\n    self.get_swappable_settings_name.cache_clear()\n    self.get_models.cache_clear()\n    if self.ready:\n        for app_config in self.app_configs.values():\n            for model in app_config.get_models(include_auto_created=True):\n                model._meta._expire_cache()",
    "docstring": "Clear all internal caches, for methods that alter the app registry. This is mostly used in tests.",
    "type": "method",
    "file_path": "django\\django\\apps\\registry.py",
    "ast_data": "FunctionDef name:clear_cache arg:self arguments arg Call Call If For Call For Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_get_dtype",
    "source_code": "def _maybe_get_dtype(x):\n    if isinstance(x, numbers.Real):\n        return x\n    if isinstance(x, indexed_slices.IndexedSlices) or tensor_util.is_tf_type(x):\n        return _to_numpy_type(x.dtype)\n    if isinstance(x, dtypes.DType):\n        return x.as_numpy_dtype\n    if isinstance(x, (list, tuple)):\n        raise ValueError(f'Cannot find dtype for type inference from argument `x` of a sequence type {type(x)}. For sequences, please call this function on each element individually.')\n    return x",
    "docstring": "Returns a numpy type if available from x. Skips if x is numpy.ndarray.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_utils.py",
    "ast_data": "FunctionDef name:_maybe_get_dtype arg:x arguments arg If Call Return return:yes If BoolOp Call Call Return return:yes Call If Call Return return:yes If Call Raise Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "points_to_pixels",
    "source_code": "def points_to_pixels(self, points):\n    return points",
    "docstring": "Convert points to display units. You need to override this function (unless your backend doesn't have a dpi, e.g., postscript or svg). Some imaging systems assume some value for pixels per inch:: points to pixels = points * pixels_per_inch/72 * dpi/72 Parameters ---------- points : float or array-like Returns ------- Points converted to pixels",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:points_to_pixels arg:self arg:points arguments arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "substitute_solution_one_type",
    "source_code": "def substitute_solution_one_type(mapping, t):\n    if isinstance(t, Var):\n        if t in mapping.keys():\n            return mapping[t]\n        else:\n            return t\n    elif isinstance(t, TensorType):\n        new_type = []\n        for typ in t.__args__:\n            if typ in mapping.keys():\n                new_type.append(mapping[typ])\n            else:\n                new_type.append(typ)\n        return TensorType(tuple(new_type))\n    elif isinstance(t, list):\n        new_type = []\n        for typ in t:\n            new_type.append(substitute_solution_one_type(mapping, typ))\n        return new_type\n    elif isinstance(t, tuple):\n        new_type = []\n        for typ in t:\n            new_type.append(substitute_solution_one_type(mapping, typ))\n        return tuple(new_type)\n    else:\n        return t",
    "docstring": "Apply the most general unifier to a type",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\unify_refinements.py",
    "ast_data": "FunctionDef name:substitute_solution_one_type arg:mapping arg:t arguments arg arg If Call If Compare Call Return return:yes Return return:yes If Call Assign For If Compare Call Call Call Return return:yes Call Call If Call Assign For Call Call Return return:yes If Call Assign For Call Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "disable_dump_debug_info",
    "source_code": "@tf_export('debugging.experimental.disable_dump_debug_info')\ndef disable_dump_debug_info():\n    if hasattr(_state, 'dumping_callback'):\n        dump_root = _state.dumping_callback.dump_root\n        tfdbg_run_id = _state.dumping_callback.tfdbg_run_id\n        debug_events_writer.DebugEventsWriter(dump_root, tfdbg_run_id).Close()\n        op_callbacks.remove_op_callback(_state.dumping_callback.callback)\n        if _state.dumping_callback.function_callback in function_lib.CONCRETE_FUNCTION_CALLBACKS:\n            function_lib.CONCRETE_FUNCTION_CALLBACKS.remove(_state.dumping_callback.function_callback)\n        delattr(_state, 'dumping_callback')\n        logging.info('Disabled dumping callback in thread %s (dump root: %s)', threading.current_thread().name, dump_root)",
    "docstring": "Disable the currently-enabled debugging dumping. If the method under the same Python namespace has been invoked before, calling this method disables it. If no call to has been made, calling this method is a no-op. Calling this method more than once is idempotent.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\dumping_callback.py",
    "ast_data": "FunctionDef name:disable_dump_debug_info arguments If Call Assign Assign Call Call Call If Compare Call Call Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_n_features_out",
    "source_code": "@property\ndef _n_features_out(self):\n    return self.components_.shape[0]",
    "docstring": "Number of transformed output features.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_dict_learning.py",
    "ast_data": "FunctionDef name:_n_features_out arg:self arguments arg Return return:yes"
  },
  {
    "library": "authlib",
    "name": "validate_token_request",
    "source_code": "def validate_token_request(self):\n    assertion = self.request.form.get('assertion')\n    if not assertion:\n        raise InvalidRequestError(\"Missing 'assertion' in request\")\n    claims = self.process_assertion_claims(assertion)\n    client = self.resolve_issuer_client(claims['iss'])\n    log.debug('Validate token request of %s', client)\n    if not client.check_grant_type(self.GRANT_TYPE):\n        raise UnauthorizedClientError(f\"The client is not authorized to use 'grant_type={self.GRANT_TYPE}'\")\n    self.request.client = client\n    self.validate_requested_scope()\n    subject = claims.get('sub')\n    if subject:\n        user = self.authenticate_user(subject)\n        if not user:\n            raise InvalidGrantError(description=\"Invalid 'sub' value in assertion\")\n        log.debug('Check client(%s) permission to User(%s)', client, user)\n        if not self.has_granted_permission(client, user):\n            raise InvalidClientError(description='Client has no permission to access user data')\n        self.request.user = user",
    "docstring": "The client makes a request to the token endpoint by sending the following parameters using the \"application/x-www-form-urlencoded\" format per _: grant_type REQUIRED. Value MUST be set to \"urn:ietf:params:oauth:grant-type:jwt-bearer\". assertion REQUIRED. Value MUST contain a single JWT. scope OPTIONAL. The following example demonstrates an access token request with a JWT as an authorization grant: .. code-block:: http POST /token.oauth2 HTTP/1.1 Host: as.example.com Content-Type: application/x-www-form-urlencoded grant_type=urn%3Aietf%3Aparams%3Aoauth%3Agrant-type%3Ajwt-bearer &assertion=eyJhbGciOiJFUzI1NiIsImtpZCI6IjE2In0. eyJpc3Mi[...omitted for brevity...]. J9l-ZhwP[...omitted for brevity...] .. _:",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc7523\\jwt_bearer.py",
    "ast_data": "FunctionDef name:validate_token_request arg:self arguments arg Assign Call If Raise Call Assign Call Assign Call Call If Call Raise Call Assign Call Assign Call If Assign Call If Raise Call Call If Call Raise Call Assign"
  },
  {
    "library": "pytorch",
    "name": "get_real_value",
    "source_code": "def get_real_value(self):\n    return get_real_value(self.proxy.node, self.proxy.tracer)",
    "docstring": "Get the actual value represented by this variable if computation is run using the user-provided inputs. NOTE: this runs actual tensor computation and may be slow and memory-intensive.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\tensor.py",
    "ast_data": "FunctionDef name:get_real_value arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_datetimelike_compat",
    "source_code": "def _datetimelike_compat(func: F) -> F:\n\n    @functools.wraps(func)\n    def new_func(values: np.ndarray, *, axis: AxisInt | None=None, skipna: bool=True, mask: npt.NDArray[np.bool_] | None=None, **kwargs):\n        orig_values = values\n        datetimelike = values.dtype.kind in 'mM'\n        if datetimelike and mask is None:\n            mask = isna(values)\n        result = func(values, axis=axis, skipna=skipna, mask=mask, **kwargs)\n        if datetimelike:\n            result = _wrap_results(result, orig_values.dtype, fill_value=iNaT)\n            if not skipna:\n                assert mask is not None\n                result = _mask_datetimelike_result(result, axis, mask, orig_values)\n        return result\n    return cast(F, new_func)",
    "docstring": "If we have datetime64 or timedelta64 values, ensure we have a correct mask before calling the wrapped function, then cast back afterwards.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\nanops.py",
    "ast_data": "FunctionDef name:_datetimelike_compat arg:func arguments arg FunctionDef name:new_func arg:values arguments arg arg arg arg arg Assign Assign Compare If BoolOp Compare Assign Call Assign Call If Assign Call If Compare Assign Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "Sharder",
    "source_code": "class Sharder(abc.ABC):\n\n    @abc.abstractmethod\n    def shard(self, module: nn.Module) -> nn.Module:\n        pass",
    "docstring": "This is an interface which allows user to create more advanced sharding strategies that are not easily be composed by the . :class: could take an object of the and call to shard the module, then replace the original module with sharded module returned.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharder.py",
    "ast_data": "ClassDef name:Sharder FunctionDef name:shard arg:self arg:module arguments arg arg"
  },
  {
    "library": "tensorflow",
    "name": "deconv_output_length",
    "source_code": "def deconv_output_length(input_length, filter_size, padding, stride):\n    if input_length is None:\n        return None\n    input_length *= stride\n    if padding == 'valid':\n        input_length += max(filter_size - stride, 0)\n    elif padding == 'full':\n        input_length -= stride + filter_size - 2\n    return input_length",
    "docstring": "Determines output length of a transposed convolution given input length. Args: input_length: integer. filter_size: integer. padding: one of \"same\", \"valid\", \"full\". stride: integer. Returns: The output length (integer).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\layers\\utils.py",
    "ast_data": "FunctionDef name:deconv_output_length arg:input_length arg:filter_size arg:padding arg:stride arguments arg arg arg arg If Compare Return return:no If Compare Call If Compare Return return:yes"
  },
  {
    "library": "pandas",
    "name": "rename_axis",
    "source_code": "def rename_axis(self, mapper: IndexLabel | lib.NoDefault=lib.no_default, *, index=lib.no_default, axis: Axis=0, copy: bool | lib.NoDefault=lib.no_default, inplace: bool=False) -> Self | None:\n    return super().rename_axis(mapper=mapper, index=index, axis=axis, inplace=inplace, copy=copy)",
    "docstring": "Set the name of the axis for the index. Parameters ---------- mapper : scalar, list-like, optional Value to set the axis name attribute. Use either `SeriescopyCopy-on-Write copycopycopy`. See Also -------- Series.rename : Alter Series index labels or name. DataFrame.rename : Alter DataFrame index labels or name. Index.rename : Set new names on index. Examples -------- >>> s = pd.Series([\"dog\", \"cat\", \"monkey\"]) >>> s 0 dog 1 cat 2 monkey dtype: object >>> s.rename_axis(\"animal\") animal 0 dog 1 cat 2 monkey dtype: object",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:rename_axis arg:self arg:mapper arguments arg arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_xdata",
    "source_code": "def get_xdata(self, orig=True):\n    if orig:\n        return self._xorig\n    if self._invalidx:\n        self.recache()\n    return self._x",
    "docstring": "Return the xdata. If *orig* is *True*, return the original data, else the processed data.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:get_xdata arg:self arg:orig arguments arg arg If Return return:yes If Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "is_batch_stride_largest",
    "source_code": "def is_batch_stride_largest(mat1, mat2, layout) -> bool:\n    sizes = [mat1.get_size(), mat2.get_size(), layout.size]\n    strides = [mat1.get_stride(), mat2.get_stride(), layout.stride]\n    for size, stride in zip(sizes, strides):\n        assert len(size) == len(stride) == 3, 'Expect 3D tensors'\n        if stride[0] != sympy_product(size[1:]):\n            return False\n    return True",
    "docstring": "Checking if the batch stride is the largest in the stride.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\kernel\\mm_common.py",
    "ast_data": "FunctionDef name:is_batch_stride_largest arg:mat1 arg:mat2 arg:layout arguments arg arg arg Assign Call Call Assign Call Call For Call Compare Call Call If Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "scipy",
    "name": "time_mrp_conversion",
    "source_code": "def time_mrp_conversion(self, num_rotations):\n    Rotation.from_mrp(self.rotations.as_mrp())",
    "docstring": "Time converting rotation from and to Modified Rodrigues Parameters",
    "type": "method",
    "file_path": "scipy\\benchmarks\\benchmarks\\spatial.py",
    "ast_data": "FunctionDef name:time_mrp_conversion arg:self arg:num_rotations arguments arg arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "ShardingSpec",
    "source_code": "class ShardingSpec(ABC):\n\n    @abstractmethod\n    def build_metadata(self, tensor_sizes: torch.Size, tensor_properties: sharded_tensor_meta.TensorProperties) -> sharded_tensor_meta.ShardedTensorMetadata:\n        pass\n\n    @abstractmethod\n    def shard(self, tensor: torch.Tensor, src_rank: int=0, process_group=None) -> 'ShardedTensor':\n        pass",
    "docstring": "Base class representing sharding specifications.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharding_spec\\api.py",
    "ast_data": "ClassDef name:ShardingSpec FunctionDef name:build_metadata arg:self arg:tensor_sizes arg:tensor_properties arguments arg arg arg FunctionDef name:shard arg:self arg:tensor arg:src_rank arg:process_group arguments arg arg arg arg"
  },
  {
    "library": "scrapy",
    "name": "start",
    "source_code": "async def start(self) -> AsyncIterator[Any]:\n    with warnings.catch_warnings():\n        warnings.filterwarnings('ignore', category=ScrapyDeprecationWarning, module='^scrapy\\\\.spiders$')\n        for item_or_request in self.start_requests():\n            yield item_or_request",
    "docstring": "Yield the initial :class: objects to send. .. versionadded:: 2.13 For example: .. code-block:: python from scrapy import Request, Spider class MySpider(Spider): name = \"myspider\" async def start(self): yield Request(\" The default implementation reads URLs from :attr: and yields a request for each with :attr: enabled. It is functionally equivalent to: .. code-block:: python async def start(self): for url in self.start_urls: yield Request(url, dont_filter=True) You can also yield :ref:. For example: .. code-block:: python async def start(self): yield {\"foo\": \"bar\"} To write spiders that work on Scrapy versions lower than 2.13, define also a synchronous `start-requests`",
    "type": "method",
    "file_path": "scrapy\\scrapy\\spiders\\__init__.py",
    "ast_data": "AsyncFunctionDef name:start arg:self arguments arg With Call Call For Call"
  },
  {
    "library": "pytorch",
    "name": "_set_target_dtype_info_for_matched_node_pattern",
    "source_code": "def _set_target_dtype_info_for_matched_node_pattern(matched_node_pattern: NodePattern, last_node: Node, qconfig: QConfigAny, qhandler: Optional[QuantizeHandler], backend_config: BackendConfig, named_modules: dict[str, torch.nn.Module], cache_for_no_tensor_check: dict[Node, bool], processed_nodes: set[Node]) -> None:\n    if isinstance(matched_node_pattern, (list, tuple)):\n        for node_pattern in matched_node_pattern:\n            _set_target_dtype_info_for_matched_node_pattern(node_pattern, last_node, qconfig, qhandler, backend_config, named_modules, cache_for_no_tensor_check, processed_nodes)\n    elif isinstance(matched_node_pattern, Node):\n        assert isinstance(matched_node_pattern, Node)\n        node = matched_node_pattern\n        if node in processed_nodes:\n            return\n        processed_nodes.add(node)\n        if qconfig is None:\n            return\n        target_dtype_info: dict[str, Any] = _get_target_activation_dtype_for_node(node, qconfig, qhandler, named_modules, backend_config, cache_for_no_tensor_check)\n        node.meta['target_dtype_info'] = target_dtype_info",
    "docstring": "Sets the target_dtype_info for each node in matched_node_pattern Note: processed_nodes is used to ensure we only process each node once",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\prepare.py",
    "ast_data": "FunctionDef name:_set_target_dtype_info_for_matched_node_pattern arg:matched_node_pattern arg:last_node arg:qconfig arg:qhandler arg:backend_config arg:named_modules arg:cache_for_no_tensor_check arg:processed_nodes arguments arg arg arg arg arg arg arg arg If Call For Call If Call Call Assign If Compare Return return:no Call If Compare Return return:no Call Assign"
  },
  {
    "library": "kornia",
    "name": "vgg19",
    "source_code": "def vgg19(*, weights: Optional[Any]=None, **kwargs: Any) -> VGG:\n    return _vgg('E', False, weights, **kwargs)",
    "docstring": "VGG-19 from __. Args: weights (:class:, optional): The pretrained weights to use. See :class: below for more details, and possible values. By default, no pre-trained weights are used. progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True. **kwargs: parameters passed to the `source code `_ for more details about this class. .. autoclass:: torchvision.models.VGG19_Weights :members:",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\dedode\\vgg.py",
    "ast_data": "FunctionDef name:vgg19 arguments arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "__iter__",
    "source_code": "def __iter__(self) -> Iterator:\n    if self.ndim > 1:\n        for i in range(len(self)):\n            yield self[i]\n    else:\n        data = self.asi8\n        length = len(self)\n        chunksize = _ITER_CHUNKSIZE\n        chunks = length // chunksize + 1\n        for i in range(chunks):\n            start_i = i * chunksize\n            end_i = min((i + 1) * chunksize, length)\n            converted = ints_to_pydatetime(data[start_i:end_i], tz=self.tz, box='timestamp', reso=self._creso)\n            yield from converted",
    "docstring": "Return an iterator over the boxed values Yields ------ tstamp : Timestamp",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimes.py",
    "ast_data": "FunctionDef name:__iter__ arg:self arguments arg If Compare For Call Call Assign Assign Call Assign Assign For Call Assign Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_resize_images",
    "source_code": "def _resize_images(resize_op, images, size, **kwargs):\n    if images.shape.rank != 4:\n        raise ValueError('tf.image.resize: images.shape.rank must be 4 if images is ragged.')\n    static_batch_size = tensor_shape.dimension_value(images.shape[0])\n    size = ops.convert_to_tensor(size, dtypes.int32, 'size')\n    size_as_shape = tensor_util.constant_value_as_shape(size).with_rank(2)\n    out_shape = size_as_shape + images.shape[-1:]\n    out_spec = tensor_spec.TensorSpec(out_shape, dtypes.float32)\n\n    def resize_one(image):\n        if isinstance(image, ragged_tensor.RaggedTensor):\n            image = image.to_tensor()\n        return resize_op(image, size, **kwargs)\n\n    def resize_with_map():\n        return map_fn.map_fn_v2(resize_one, images, fn_output_signature=out_spec)\n\n    def empty_result():\n        channels = array_ops.shape(images.flat_values)[-1:]\n        return array_ops.zeros(array_ops.concat([[0], size, channels], axis=0))\n    if static_batch_size == 0:\n        return empty_result()\n    elif static_batch_size is not None:\n        return resize_with_map()\n    else:\n        empty_batch = math_ops.equal(images.nrows(), 0)\n        return cond.cond(empty_batch, empty_result, resize_with_map)",
    "docstring": "RaggedTensor dispatcher for tf.image.resize.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_image_ops.py",
    "ast_data": "FunctionDef name:_resize_images arg:resize_op arg:images arg:size arguments arg arg arg arg If Compare Raise Call Assign Call Assign Call Assign Call Call Assign Assign Call FunctionDef name:resize_one arg:image arguments arg If Call Assign Call Return return:yes Call FunctionDef name:resize_with_map arguments Return return:yes Call FunctionDef name:empty_result arguments Assign Call Return return:yes Call Call If Compare Return return:yes Call If Compare Return return:yes Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_fit_edge",
    "source_code": "def _fit_edge(x, window_start, window_stop, interp_start, interp_stop, axis, polyorder, deriv, delta, y):\n    x_edge = axis_slice(x, start=window_start, stop=window_stop, axis=axis)\n    if axis == 0 or axis == -x.ndim:\n        xx_edge = x_edge\n        swapped = False\n    else:\n        xx_edge = x_edge.swapaxes(axis, 0)\n        swapped = True\n    xx_edge = xx_edge.reshape(xx_edge.shape[0], -1)\n    poly_coeffs = np.polyfit(np.arange(0, window_stop - window_start), xx_edge, polyorder)\n    if deriv > 0:\n        poly_coeffs = _polyder(poly_coeffs, deriv)\n    i = np.arange(interp_start - window_start, interp_stop - window_start)\n    values = np.polyval(poly_coeffs, i.reshape(-1, 1)) / delta ** deriv\n    shp = list(y.shape)\n    shp[0], shp[axis] = (shp[axis], shp[0])\n    values = values.reshape(interp_stop - interp_start, *shp[1:])\n    if swapped:\n        values = values.swapaxes(0, axis)\n    y_edge = axis_slice(y, start=interp_start, stop=interp_stop, axis=axis)\n    y_edge[...] = values",
    "docstring": "Given an N-d array and the specification of a slice of from to along , create an interpolating polynomial of each 1-D slice, and evaluate that polynomial in the slice from to . Put the result into the corresponding slice of .",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_savitzky_golay.py",
    "ast_data": "FunctionDef name:_fit_edge arg:x arg:window_start arg:window_stop arg:interp_start arg:interp_stop arg:axis arg:polyorder arg:deriv arg:delta arg:y arguments arg arg arg arg arg arg arg arg arg arg Assign Call If BoolOp Compare Compare Assign Assign Assign Call Assign Assign Call Assign Call Call If Compare Assign Call Assign Call Assign Call Call Assign Call Assign Assign Call If Assign Call Assign Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_parse_shard_info_str",
    "source_code": "def _parse_shard_info_str(spec: str) -> tuple[list[int], trackable_base.ShardInfo]:\n    shape = [int(x) for x in spec.split()[:-1]]\n    slices = spec.split()[-1].split(':')\n    offset = [int(x.split(',')[0]) for x in slices]\n    shard_shape = [int(x.split(',')[1]) for x in slices]\n    return (shape, trackable_base.ShardInfo(offset=offset, shape=shard_shape))",
    "docstring": "Parses shape and shard_info string.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3_checkpoint_adapter.py",
    "ast_data": "FunctionDef name:_parse_shard_info_str arg:spec arguments arg Assign Call Call Assign Call Call Assign Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "clean_memory",
    "source_code": "def clean_memory() -> None:\n    gc.collect()\n    torch.cuda.empty_cache()",
    "docstring": "Clean memory to avoid OOM.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\numeric_utils.py",
    "ast_data": "FunctionDef name:clean_memory arguments Call Call"
  },
  {
    "library": "pytorch",
    "name": "find_bracket_group",
    "source_code": "def find_bracket_group(input_string, start):\n    return find_closure_group(input_string, start, group=['{', '}'])",
    "docstring": "Finds the first balanced parantheses.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\hipify\\hipify_python.py",
    "ast_data": "FunctionDef name:find_bracket_group arg:input_string arg:start arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_get_module_call_graph",
    "source_code": "def _get_module_call_graph(export_artifact: ExportArtifact, preserve_module_call_signature: tuple[str, ...], strict_mode_export: bool, forward_arg_names: Optional[list[str]]=None) -> tuple[torch.fx.GraphModule, list[ModuleCallEntry]]:\n    gm: torch.fx.GraphModule = export_artifact.aten.gm\n    export_graph_signature: ExportGraphSignature = export_artifact.aten.sig\n    module_call_specs: dict[str, dict[str, TreeSpec]] = export_artifact.module_call_specs\n    in_spec: TreeSpec = export_artifact.in_spec\n    out_spec: TreeSpec = export_artifact.out_spec\n    module_call_signatures: dict[str, ModuleCallSignature] = {}\n    for fqn, specs in module_call_specs.items():\n        mod_fqn = _strip_root(fqn) if not strict_mode_export else fqn\n        module_call_signatures[mod_fqn] = ModuleCallSignature(inputs=[], outputs=[], in_spec=specs['in_spec'], out_spec=specs['out_spec'], forward_arg_names=None)\n    if len(preserve_module_call_signature) > 0:\n        if not strict_mode_export:\n            _rewrite_tracepoint_node(gm)\n        res = CollectTracepointsPass(module_call_signatures, export_graph_signature)(gm)\n        assert res is not None\n        gm = res.graph_module\n    assert _EXPORT_MODULE_HIERARCHY is not None\n    module_call_graph = _make_module_call_graph(in_spec, out_spec, module_call_signatures, forward_arg_names)\n    return (gm, module_call_graph)",
    "docstring": "In-place modify the graph module in export_artifact, remove _export_tracepoint nodes and return module_call_graph.",
    "type": "function",
    "file_path": "pytorch\\torch\\export\\_trace.py",
    "ast_data": "FunctionDef name:_get_module_call_graph arg:export_artifact arg:preserve_module_call_signature arg:strict_mode_export arg:forward_arg_names arguments arg arg arg arg For Call Assign Call Assign Call If Compare Call If Call Assign Call Call Compare Assign Compare Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_required_width",
    "source_code": "def get_required_width(self, renderer):\n    l, b, w, h = self.get_text_bounds(renderer)\n    return w * (1.0 + 2.0 * self.PAD)",
    "docstring": "Return the minimal required width for the cell.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\table.py",
    "ast_data": "FunctionDef name:get_required_width arg:self arg:renderer arguments arg arg Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "set_stream",
    "source_code": "def set_stream(stream: Stream):\n    if stream is None:\n        return\n    _lazy_init()\n    _set_stream_by_id(stream_id=stream.stream_id, device_index=stream.device_index, device_type=stream.device_type)",
    "docstring": "Set the current stream.This is a wrapper API to set the stream. Usage of this function is discouraged in favor of the ``.",
    "type": "function",
    "file_path": "pytorch\\torch\\xpu\\__init__.py",
    "ast_data": "FunctionDef name:set_stream arg:stream arguments arg If Compare Return return:no Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_symbolic_inputs",
    "source_code": "def get_symbolic_inputs(self, return_single_as_list=False):\n    for i, (k, v) in enumerate(zip(self._input_names, self._flattened_inputs)):\n        if isinstance(v, (list, float, int)):\n            v = numpy_compat.np_asarray(v)\n            if v.ndim == 1:\n                v = np.expand_dims(v, 1)\n        if isinstance(v, np.ndarray):\n            shape = (None,) + tuple(v.shape[1:])\n            if shape == (None,):\n                shape = (None, 1)\n            dtype = dtypes.as_dtype(v.dtype)\n            if dtype.is_floating:\n                dtype = backend.floatx()\n            v = backend.placeholder(shape=shape, name=k, dtype=dtype)\n        elif isinstance(v, tensor_spec.TensorSpec):\n            shape = (None,) + tuple(v.shape.as_list()[1:])\n            if shape == (None,):\n                shape = (None, 1)\n            v = backend.placeholder(shape=shape, name=k, dtype=v.dtype)\n        self._flattened_inputs[i] = v\n    if self._is_dict:\n        return dict(zip(self._input_names, self._flattened_inputs))\n    if self._is_single_input and (not return_single_as_list):\n        return self._flattened_inputs[0]\n    return self._flattened_inputs",
    "docstring": "Returns inputs to be set as self.inputs for a model.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py",
    "ast_data": "FunctionDef name:get_symbolic_inputs arg:self arg:return_single_as_list arguments arg arg For Call Call If Call Assign Call If Compare Assign Call If Call Assign Call If Compare Assign Assign Call If Assign Call Assign Call If Call Assign Call Call If Compare Assign Assign Call Assign If Return return:yes Call Call If BoolOp Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "_acreate_user",
    "source_code": "async def _acreate_user(self, username, email, password, **extra_fields):\n    user = self._create_user_object(username, email, password, **extra_fields)\n    await user.asave(using=self._db)\n    return user",
    "docstring": "See _create_user()",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\models.py",
    "ast_data": "AsyncFunctionDef name:_acreate_user arg:self arg:username arg:email arg:password arguments arg arg arg arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_set_rng_state_offset",
    "source_code": "def _set_rng_state_offset(offset: int, device: Union[int, str, torch.device]='xpu') -> None:\n    final_device = _get_device(device)\n\n    def cb():\n        default_generator = _get_generator(final_device)\n        default_generator.set_offset(offset)\n    _lazy_call(cb)",
    "docstring": "Set the random number generator state offset of the specified GPU. Args: offset (int): The desired offset device (torch.device or int, optional): The device to set the RNG state. Default: ``, the current XPU device).",
    "type": "function",
    "file_path": "pytorch\\torch\\xpu\\__init__.py",
    "ast_data": "FunctionDef name:_set_rng_state_offset arg:offset arg:device arguments arg arg Assign Call FunctionDef name:cb arguments Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "sharding_policies",
    "source_code": "@property\ndef sharding_policies(self):\n    return self._sharding_policies",
    "docstring": "Returns the sharding policies of the InfeedQueue tuple elements.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_feed.py",
    "ast_data": "FunctionDef name:sharding_policies arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "connect_bbox",
    "source_code": "@staticmethod\ndef connect_bbox(bbox1, bbox2, loc1, loc2=None):\n    if isinstance(bbox1, Rectangle):\n        bbox1 = TransformedBbox(Bbox.unit(), bbox1.get_transform())\n    if isinstance(bbox2, Rectangle):\n        bbox2 = TransformedBbox(Bbox.unit(), bbox2.get_transform())\n    if loc2 is None:\n        loc2 = loc1\n    x1, y1 = BboxConnector.get_bbox_edge_pos(bbox1, loc1)\n    x2, y2 = BboxConnector.get_bbox_edge_pos(bbox2, loc2)\n    return Path([[x1, y1], [x2, y2]])",
    "docstring": "Construct a connecting corner *loc1* of *bbox1* to corner *loc2* of *bbox2*, where parameters behave as documented as for the constructor.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\inset_locator.py",
    "ast_data": "FunctionDef name:connect_bbox arg:bbox1 arg:bbox2 arg:loc1 arg:loc2 arguments arg arg arg arg If Call Assign Call Call Call If Call Assign Call Call Call If Compare Assign Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "DeflectedCorrugatedSpring",
    "source_code": "class DeflectedCorrugatedSpring(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        alpha = 5.0\n        self._bounds = list(zip([0] * self.N, [2 * alpha] * self.N))\n        self.global_optimum = [[alpha for _ in range(self.N)]]\n        self.fglob = -1.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        K, alpha = (5.0, 5.0)\n        return -cos(K * sqrt(sum((x - alpha) ** 2))) + 0.1 * sum((x - alpha) ** 2)",
    "docstring": "DeflectedCorrugatedSpring objective function. This class defines the Deflected Corrugated Spring [1]_ function global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{DeflectedCorrugatedSpring}}(x) = 0.1\\sum_{i=1}^n \\left[ (x_i - \\alpha)^2 - \\cos \\left( K \\sqrt {\\sum_{i=1}^n (x_i - \\alpha)^2} \\right ) \\right ] Where, in this exercise, :math: and :math:. Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015 TODO: website has a different equation to the gavana codebase. The function below is different to the equation above. Also, the global minimum is wrong.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_D.py",
    "ast_data": "ClassDef name:DeflectedCorrugatedSpring Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Assign Call Call Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "run_loop",
    "source_code": "def run_loop(self):\n    if self._target:\n        self._target(*self._args, **self._kwargs)",
    "docstring": "Called at 'timer_interval_secs' boundaries.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\coordinator.py",
    "ast_data": "FunctionDef name:run_loop arg:self arguments arg If Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_experimental_kwarg_as_attr",
    "source_code": "def _get_experimental_kwarg_as_attr(attr_name, value):\n    if isinstance(value, bool):\n        return attr_value_pb2.AttrValue(b=value)\n    elif isinstance(value, int):\n        return attr_value_pb2.AttrValue(i=value)\n    elif isinstance(value, float):\n        return attr_value_pb2.AttrValue(f=value)\n    elif isinstance(value, str):\n        return attr_value_pb2.AttrValue(s=compat.as_bytes(value))\n    else:\n        raise ValueError(f'Attribute {attr_name} must be bool, int, float, or str. Got {type(value)}.')",
    "docstring": "Creates an AttrValue for a python object.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\function.py",
    "ast_data": "FunctionDef name:_get_experimental_kwarg_as_attr arg:attr_name arg:value arguments arg arg If Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Call Call Raise Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_color",
    "source_code": "def get_color(self):\n    return self._color",
    "docstring": "Return the color of the text.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:get_color arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "on_commit",
    "source_code": "def on_commit(func, using=None, robust=False):\n    get_connection(using).on_commit(func, robust)",
    "docstring": "Register to be called when the current transaction is committed. If the current transaction is rolled back, will not be called.",
    "type": "function",
    "file_path": "django\\django\\db\\transaction.py",
    "ast_data": "FunctionDef name:on_commit arg:func arg:using arg:robust arguments arg arg arg Call Call"
  },
  {
    "library": "numpy",
    "name": "_hist_bin_fd",
    "source_code": "def _hist_bin_fd(x, range):\n    del range\n    iqr = np.subtract(*np.percentile(x, [75, 25]))\n    return 2.0 * iqr * x.size ** (-1.0 / 3.0)",
    "docstring": "The Freedman-Diaconis histogram bin estimator. The Freedman-Diaconis rule uses interquartile range (IQR) to estimate binwidth. It is considered a variation of the Scott rule with more robustness as the IQR is less affected by outliers than the standard deviation. However, the IQR depends on fewer points than the standard deviation, so it is less accurate, especially for long tailed distributions. If the IQR is 0, this function returns 0 for the bin width. Binwidth is inversely proportional to the cube root of data size (asymptotically optimal). Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. Returns ------- h : An estimate of the optimal bin width for the given data.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_histograms_impl.py",
    "ast_data": "FunctionDef name:_hist_bin_fd arg:x arg:range arguments arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "run_monitored",
    "source_code": "def run_monitored(code):\n    if hasattr(os, 'wait4'):\n        return run_monitored_wait4(code)\n    else:\n        return run_monitored_proc(code)",
    "docstring": "Run code in a new Python process, and monitor peak memory usage. Returns ------- duration : float Duration in seconds (including Python startup time) peak_memusage : float or int Peak memory usage (rough estimate only) in bytes",
    "type": "function",
    "file_path": "scipy\\benchmarks\\benchmarks\\common.py",
    "ast_data": "FunctionDef name:run_monitored arg:code arguments arg If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "magic",
    "source_code": "@set_module('numpy.lib.format')\ndef magic(major, minor):\n    if major < 0 or major > 255:\n        raise ValueError('major version must be 0 <= major < 256')\n    if minor < 0 or minor > 255:\n        raise ValueError('minor version must be 0 <= minor < 256')\n    return MAGIC_PREFIX + bytes([major, minor])",
    "docstring": "Return the magic string for the given file format version. Parameters ---------- major : int in [0, 255] minor : int in [0, 255] Returns ------- magic : str Raises ------ ValueError if the version cannot be formatted.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_format_impl.py",
    "ast_data": "FunctionDef name:magic arg:major arg:minor arguments arg arg If BoolOp Compare Compare Raise Call If BoolOp Compare Compare Raise Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_params",
    "source_code": "def get_params(self, deep=True):\n    return super()._get_params('estimators', deep=deep)",
    "docstring": "Get the parameters of an estimator from the ensemble. Returns the parameters given in the constructor as well as the estimators contained within the parameter. Parameters ---------- deep : bool, default=True Setting it to True gets the various estimators and the parameters of the estimators as well. Returns ------- params : dict Parameter and estimator names mapped to their values or parameter names mapped to their values.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_base.py",
    "ast_data": "FunctionDef name:get_params arg:self arg:deep arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "MetaData",
    "source_code": "class MetaData:\n\n    def __init__(self, rel, attr):\n        self.name = rel\n        self._attributes = {a.name: a for a in attr}\n\n    def __repr__(self):\n        msg = ''\n        msg += f'Dataset: {self.name}\\n'\n        for i in self._attributes:\n            msg += f\"\\t{i}'s type is {self._attributes[i].type_name}\"\n            if self._attributes[i].range:\n                msg += f', range is {str(self._attributes[i].range)}'\n            msg += '\\n'\n        return msg\n\n    def __iter__(self):\n        return iter(self._attributes)\n\n    def __getitem__(self, key):\n        attr = self._attributes[key]\n        return (attr.type_name, attr.range)\n\n    def names(self):\n        return list(self._attributes)\n\n    def types(self):\n        attr_types = [self._attributes[name].type_name for name in self._attributes]\n        return attr_types",
    "docstring": "Small container to keep useful information on a ARFF dataset. Knows about attributes names and types. Examples -------- :: data, meta = loadarff('iris.arff') # This will print the attributes names of the iris.arff dataset for i in meta: print(i) # This works too meta.names() # Getting attribute type types = meta.types() Methods ------- names types Notes ----- Also maintains the list of attributes in order, i.e., doing for i in meta, where meta is an instance of MetaData, will return the different attribute names in the order they were defined.",
    "type": "class",
    "file_path": "scipy\\scipy\\io\\arff\\_arffread.py",
    "ast_data": "ClassDef name:MetaData FunctionDef name:__init__ arg:self arg:rel arg:attr arguments arg arg arg Assign Assign FunctionDef name:__repr__ arg:self arguments arg Assign For If Call Return return:yes FunctionDef name:__iter__ arg:self arguments arg Return return:yes Call FunctionDef name:__getitem__ arg:self arg:key arguments arg arg Assign Return return:yes FunctionDef name:names arg:self arguments arg Return return:yes Call FunctionDef name:types arg:self arguments arg Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "inverse_transform",
    "source_code": "def inverse_transform(self, X):\n    check_is_fitted(self)\n    if 'onehot' in self.encode:\n        X = self._encoder.inverse_transform(X)\n    Xinv = check_array(X, copy=True, dtype=(np.float64, np.float32))\n    n_features = self.n_bins_.shape[0]\n    if Xinv.shape[1] != n_features:\n        raise ValueError('Incorrect number of features. Expecting {}, received {}.'.format(n_features, Xinv.shape[1]))\n    for jj in range(n_features):\n        bin_edges = self.bin_edges_[jj]\n        bin_centers = (bin_edges[1:] + bin_edges[:-1]) * 0.5\n        Xinv[:, jj] = bin_centers[Xinv[:, jj].astype(np.int64)]\n    return Xinv",
    "docstring": "Transform discretized data back to original feature space. Note that this function does not regenerate the original data due to discretization rounding. Parameters ---------- X : array-like of shape (n_samples, n_features) Transformed data in the binned space. Returns ------- X_original : ndarray, dtype={np.float32, np.float64} Data in the original feature space.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_discretization.py",
    "ast_data": "FunctionDef name:inverse_transform arg:self arg:X arguments arg arg Call If Compare Assign Call Assign Call Assign If Compare Raise Call Call For Call Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "check_constraints",
    "source_code": "def check_constraints(self, table_names=None):\n    with self.cursor() as cursor:\n        if table_names is None:\n            violations = cursor.execute('PRAGMA foreign_key_check').fetchall()\n        else:\n            violations = chain.from_iterable((cursor.execute('PRAGMA foreign_key_check(%s)' % self.ops.quote_name(table_name)).fetchall() for table_name in table_names))\n        for table_name, rowid, referenced_table_name, foreign_key_index in violations:\n            foreign_key = cursor.execute('PRAGMA foreign_key_list(%s)' % self.ops.quote_name(table_name)).fetchall()[foreign_key_index]\n            column_name, referenced_column_name = foreign_key[3:5]\n            primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)\n            primary_key_value, bad_value = cursor.execute('SELECT %s, %s FROM %s WHERE rowid = %%s' % (self.ops.quote_name(primary_key_column_name), self.ops.quote_name(column_name), self.ops.quote_name(table_name)), (rowid,)).fetchone()\n            raise IntegrityError(\"The row in table '%s' with primary key '%s' has an invalid foreign key: %s.%s contains a value '%s' that does not have a corresponding value in %s.%s.\" % (table_name, primary_key_value, table_name, column_name, bad_value, referenced_table_name, referenced_column_name))",
    "docstring": "Check each table name in for rows with invalid foreign key references. This method is intended to be used in conjunction with and , to determine if rows with invalid references were entered while constraint checks were off.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\sqlite3\\base.py",
    "ast_data": "FunctionDef name:check_constraints arg:self arg:table_names arguments arg arg With Call If Compare Assign Call Call Assign Call Call Call Call For Assign Call Call Call Assign Assign Call Assign Call Call Call Call Call Raise Call"
  },
  {
    "library": "authlib",
    "name": "validate_token_endpoint",
    "source_code": "def validate_token_endpoint(self):\n    grant_types_supported = self.get('grant_types_supported')\n    if grant_types_supported and len(grant_types_supported) == 1 and (grant_types_supported[0] == 'implicit'):\n        return\n    url = self.get('token_endpoint')\n    if not url:\n        raise ValueError('\"token_endpoint\" is required')\n    if not is_secure_transport(url):\n        raise ValueError('\"token_endpoint\" MUST use \"https\" scheme')",
    "docstring": "URL of the authorization server's token endpoint [RFC6749]. This is REQUIRED unless only the implicit grant type is supported.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc8414\\models.py",
    "ast_data": "FunctionDef name:validate_token_endpoint arg:self arguments arg Assign Call If BoolOp Compare Call Compare Return return:no Assign Call If Raise Call If Call Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "__call__",
    "source_code": "def __call__(self, renderer):\n    if isinstance(self._artist, Artist):\n        bbox = self._artist.get_window_extent(renderer)\n        xf, yf = self._ref_coord\n        x = bbox.x0 + bbox.width * xf\n        y = bbox.y0 + bbox.height * yf\n    elif isinstance(self._artist, BboxBase):\n        bbox = self._artist\n        xf, yf = self._ref_coord\n        x = bbox.x0 + bbox.width * xf\n        y = bbox.y0 + bbox.height * yf\n    elif isinstance(self._artist, Transform):\n        x, y = self._artist.transform(self._ref_coord)\n    else:\n        _api.check_isinstance((Artist, BboxBase, Transform), artist=self._artist)\n    scale = 1 if self._unit == 'pixels' else renderer.points_to_pixels(1)\n    return Affine2D().scale(scale).translate(x, y)",
    "docstring": "Return the offset transform. Parameters ---------- renderer : The renderer to use to compute the offset Returns ------- Maps (x, y) in pixel or point units to screen units relative to the given artist.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:renderer arguments arg arg If Call Assign Call Assign Assign Assign If Call Assign Assign Assign Assign If Call Assign Call Call Assign Compare Call Return return:yes Call Call Call"
  },
  {
    "library": "pandas",
    "name": "remove_unused_categories",
    "source_code": "def remove_unused_categories(self) -> Self:\n    idx, inv = np.unique(self._codes, return_inverse=True)\n    if idx.size != 0 and idx[0] == -1:\n        idx, inv = (idx[1:], inv - 1)\n    new_categories = self.dtype.categories.take(idx)\n    new_dtype = CategoricalDtype._from_fastpath(new_categories, ordered=self.ordered)\n    new_codes = coerce_indexer_dtype(inv, new_dtype.categories)\n    cat = self.copy()\n    NDArrayBacked.__init__(cat, new_codes, new_dtype)\n    return cat",
    "docstring": "Remove categories which are not used. This method is useful when working with datasets that undergo dynamic changes where categories may no longer be relevant, allowing to maintain a clean, efficient data structure. Returns ------- Categorical Categorical with unused categories dropped. See Also -------- rename_categories : Rename categories. reorder_categories : Reorder categories. add_categories : Add new categories. remove_categories : Remove the specified categories. set_categories : Set the categories to the specified ones. Examples -------- >>> c = pd.Categorical([\"a\", \"c\", \"b\", \"c\", \"d\"]) >>> c ['a', 'c', 'b', 'c', 'd'] Categories (4, object): ['a', 'b', 'c', 'd'] >>> c[2] = \"a\" >>> c[4] = \"c\" >>> c ['a', 'c', 'a', 'c', 'c'] Categories (4, object): ['a', 'b', 'c', 'd'] >>> c.remove_unused_categories() ['a', 'c', 'a', 'c', 'c'] Categories (2, object): ['a', 'c']",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\categorical.py",
    "ast_data": "FunctionDef name:remove_unused_categories arg:self arguments arg Assign Call If BoolOp Compare Compare Assign Assign Call Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_device_handle",
    "source_code": "def _get_device_handle(device_type: str='cuda'):\n    return getattr(torch, device_type, None)",
    "docstring": "Get the module corresponding to the device_type which is cuda or cuda-like device. For example, when the device_type is cuda, the module is returned. Return None when there is no corresponding module for device_type, otherwise return the corresponding module.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\device_mesh.py",
    "ast_data": "FunctionDef name:_get_device_handle arg:device_type arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "set_rng_device_and_dtype",
    "source_code": "def set_rng_device_and_dtype(self, device: Optional[torch.device]=None, dtype: torch.dtype=torch.float32) -> None:\n    if device is None:\n        device = torch.device('cpu')\n    if self.device != device or self.dtype != dtype:\n        self.make_samplers(device, dtype)\n        self.device = device\n        self.dtype = dtype",
    "docstring": "Change the random generation device and dtype. Note: The generated random numbers are not reproducible across different devices and dtypes.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\random_generator\\base.py",
    "ast_data": "FunctionDef name:set_rng_device_and_dtype arg:self arg:device arg:dtype arguments arg arg arg If Compare Assign Call If BoolOp Compare Compare Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_create_sample_validator",
    "source_code": "def _create_sample_validator(expected_input_keys: Collection[str]) -> Callable[[rd.RepresentativeSample], rd.RepresentativeSample]:\n\n    def validator(sample: rd.RepresentativeSample) -> rd.RepresentativeSample:\n        if not isinstance(sample, Mapping):\n            raise ValueError(f'Invalid representative sample type. Provide a mapping (usually a dict) of {{input_key: input_value}}. Got type: {type(sample)} instead.')\n        if set(sample.keys()) != expected_input_keys:\n            raise KeyError(f'Invalid input keys for representative sample. The function expects input keys of: {set(expected_input_keys)}. Got: {set(sample.keys())}. Please provide correct input keys for representative samples.')\n        return sample\n    return validator",
    "docstring": "Creates a validator function for a representative sample. Args: expected_input_keys: Input keys (keyword argument names) that the function the sample will be used for is expecting to receive. Returns: A callable that validates a .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\py_function_lib.py",
    "ast_data": "FunctionDef name:_create_sample_validator arg:expected_input_keys arguments arg FunctionDef name:validator arg:sample arguments arg If Call Raise Call Call If Compare Call Call Raise Call Call Call Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_equalization_config_generator",
    "source_code": "def _equalization_config_generator(self, detector_qconfig_info: DetectorQConfigInfo, module: torch.nn.Module) -> EqualizationQConfig:\n    return detector_qconfig_info.generate_equalization_qconfig()",
    "docstring": "We ignore the module argument here, and only focus on thedetector_qconfig_info Returns the equalization configuration generated by the DetectorQConfigInfo object",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\model_report.py",
    "ast_data": "FunctionDef name:_equalization_config_generator arg:self arg:detector_qconfig_info arg:module arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "is_param",
    "source_code": "def is_param(program: 'ExportedProgram', node: torch.fx.Node) -> bool:\n    return node.name in program.graph_signature.inputs_to_parameters",
    "docstring": "Checks if the given node is a parameter within the exported program",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\utils.py",
    "ast_data": "FunctionDef name:is_param arg:program arg:node arguments arg arg Return return:yes Compare"
  },
  {
    "library": "pytorch",
    "name": "filter_ciflow_tags",
    "source_code": "def filter_ciflow_tags(tags: set[str]) -> list[str]:\n    return sorted((tag[:-2] for tag in tags if tag.startswith('ciflow/') and tag.endswith('/*')))",
    "docstring": "Return sorted list of ciflow tags",
    "type": "function",
    "file_path": "pytorch\\.github\\scripts\\collect_ciflow_labels.py",
    "ast_data": "FunctionDef name:filter_ciflow_tags arg:tags arguments arg Return return:yes Call BoolOp Call Call"
  },
  {
    "library": "pytorch",
    "name": "parse_et_yaml",
    "source_code": "def parse_et_yaml(path: str, tags_yaml_path: str, ignore_keys: set[DispatchKey] | None=None, skip_native_fns_gen: bool=False) -> tuple[list[NativeFunction], dict[OperatorName, dict[str, Any]]]:\n    with open(path) as f:\n        es = yaml.load(f, Loader=LineLoader)\n    et_kernel = extract_kernel_fields(es)\n    strip_et_fields(es)\n    native_yaml = parse_native_yaml(path, tags_yaml_path, ignore_keys, skip_native_fns_gen=skip_native_fns_gen, loaded_yaml=es)\n    return (native_yaml.native_functions, et_kernel)",
    "docstring": "Parse native_functions.yaml into NativeFunctions and an Operator Indexed Dict of fields to persist from native_functions.yaml to functions.yaml",
    "type": "function",
    "file_path": "pytorch\\torchgen\\executorch\\parse.py",
    "ast_data": "FunctionDef name:parse_et_yaml arg:path arg:tags_yaml_path arg:ignore_keys arg:skip_native_fns_gen arguments arg arg arg arg With Call Assign Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "fromfunction",
    "source_code": "@finalize_array_function_like\n@set_module('numpy')\ndef fromfunction(function, shape, *, dtype=float, like=None, **kwargs):\n    if like is not None:\n        return _fromfunction_with_like(like, function, shape, dtype=dtype, **kwargs)\n    args = indices(shape, dtype=dtype)\n    return function(*args, **kwargs)",
    "docstring": "Construct an array by executing a function over each coordinate. The resulting array therefore has a value `shapeshapefunctionfunctiondtypefunctionfromfunctionfunctionfunctionfromfunctionshapedtypelikefunction`. Examples -------- >>> import numpy as np >>> np.fromfunction(lambda i, j: i, (2, 2), dtype=float) array([[0., 0.], [1., 1.]]) >>> np.fromfunction(lambda i, j: j, (2, 2), dtype=float) array([[0., 1.], [0., 1.]]) >>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int) array([[ True, False, False], [False, True, False], [False, False, True]]) >>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int) array([[0, 1, 2], [1, 2, 3], [2, 3, 4]])",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\numeric.py",
    "ast_data": "FunctionDef name:fromfunction arg:function arg:shape arguments arg arg arg arg arg If Compare Return return:yes Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "add_node",
    "source_code": "def add_node(self, node: type[Element], override: bool=False, **kwargs: _NodeHandlerPair) -> None:\n    logger.debug('[app] adding node: %r', (node, kwargs))\n    if not override and docutils.is_node_registered(node):\n        logger.warning(__('node class %r is already registered, its visitors will be overridden'), node.__name__, type='app', subtype='add_node')\n    docutils.register_node(node)\n    self.registry.add_translation_handlers(node, **kwargs)",
    "docstring": "Register a Docutils node class. This is necessary for Docutils internals. It may also be used in the future to validate nodes in the parsed documents. :param node: A node class :param kwargs: Visitor functions for each builder (see below) :param override: If true, install the node forcedly even if another node is already installed as the same name Node visitor functions for the Sphinx HTML, LaTeX, text and manpage writers can be given as keyword arguments: the keyword should be one or more of `docutils.nodes.SkipNode`. Example: .. code-block:: python class math(docutils.nodes.Element): ... def visit_math_html(self, node): self.body.append(self.starttag(node, 'math')) def depart_math_html(self, node): self.body.append('') app.add_node(math, html=(visit_math_html, depart_math_html)) Obviously, translators for which you don't specify visitor methods will choke on the node when encountered in a document to translate. .. versionchanged:: 0.5 Added the support for keyword arguments giving visit functions.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\application.py",
    "ast_data": "FunctionDef name:add_node arg:self arg:node arg:override arguments arg arg arg arg Call If BoolOp Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "allow_fp16_bf16_reduction_math_sdp",
    "source_code": "def allow_fp16_bf16_reduction_math_sdp(enabled: bool):\n    torch._C._set_math_sdp_allow_fp16_bf16_reduction(enabled)",
    "docstring": ".. warning:: This flag is beta and subject to change. Enables or disables fp16/bf16 reduction in math scaled dot product attention.",
    "type": "function",
    "file_path": "pytorch\\torch\\backends\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:allow_fp16_bf16_reduction_math_sdp arg:enabled arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "_Action",
    "source_code": "class _Action(Enum):\n    KEEP_ALIVE = 1\n    ADD_TO_PARTICIPANTS = 2\n    ADD_TO_WAIT_LIST = 3\n    ADD_TO_REDUNDANCY_LIST = 4\n    REMOVE_FROM_PARTICIPANTS = 5\n    REMOVE_FROM_WAIT_LIST = 6\n    REMOVE_FROM_REDUNDANCY_LIST = 7\n    MARK_RENDEZVOUS_COMPLETE = 8\n    MARK_RENDEZVOUS_CLOSED = 9\n    SYNC = 10\n    ERROR_CLOSED = 11\n    ERROR_TIMEOUT = 12\n    FINISH = 13",
    "docstring": "Specifies the possible actions based on the state of the rendezvous.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\dynamic_rendezvous.py",
    "ast_data": "ClassDef name:_Action Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_GatherLayerBroadcaster",
    "source_code": "class _GatherLayerBroadcaster(_LayerBroadcaster):\n\n    def __init__(self, gather_index):\n        gather_index = ops.convert_to_tensor(gather_index)\n        if gather_index.dtype != dtypes.int64 and gather_index.dtype != dtypes.int32:\n            raise ValueError('gather_index must be int64 or int32')\n        self._gather_index = gather_index\n\n    @property\n    def gather_index(self):\n        return self._gather_index\n\n    def with_dtype(self, dtype):\n        return _GatherLayerBroadcaster(math_ops.cast(self._gather_index, dtype))\n\n    def with_dependencies(self, checks):\n        new_gather_index = control_flow_ops.with_dependencies(checks, self._gather_index)\n        return _GatherLayerBroadcaster(new_gather_index)",
    "docstring": "Implements _LayerBroadcaster with an explicit gather_index. For example, suppose that the source shape is: [*],[*,*] And the target shape is: [*],[*,*],[*],[*,*] Then, this can be represented with a map: [0,1,2,0,1,2]",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "ClassDef name:_GatherLayerBroadcaster FunctionDef name:__init__ arg:self arg:gather_index arguments arg arg Assign Call If BoolOp Compare Compare Raise Call Assign FunctionDef name:gather_index arg:self arguments arg Return return:yes FunctionDef name:with_dtype arg:self arg:dtype arguments arg arg Return return:yes Call Call FunctionDef name:with_dependencies arg:self arg:checks arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "logpdf",
    "source_code": "def logpdf(self, x):\n    points = atleast_2d(x)\n    d, m = points.shape\n    if d != self.d:\n        if d == 1 and m == self.d:\n            points = reshape(points, (self.d, 1))\n            m = 1\n        else:\n            msg = f'points have dimension {d}, dataset has dimension {self.d}'\n            raise ValueError(msg)\n    output_dtype, spec = _get_output_dtype(self.covariance, points)\n    result = gaussian_kernel_estimate_log[spec](self.dataset.T, self.weights[:, None], points.T, self.cho_cov, output_dtype)\n    return result[:, 0]",
    "docstring": "Evaluate the log of the estimated pdf on a provided set of points.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_kde.py",
    "ast_data": "FunctionDef name:logpdf arg:self arg:x arguments arg arg Assign Call Assign If Compare If BoolOp Compare Compare Assign Call Assign Assign Raise Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "remove_spectral_norm",
    "source_code": "def remove_spectral_norm(module: T_module, name: str='weight') -> T_module:\n    for k, hook in module._forward_pre_hooks.items():\n        if isinstance(hook, SpectralNorm) and hook.name == name:\n            hook.remove(module)\n            del module._forward_pre_hooks[k]\n            break\n    else:\n        raise ValueError(f\"spectral_norm of '{name}' not found in {module}\")\n    for k, hook in module._state_dict_hooks.items():\n        if isinstance(hook, SpectralNormStateDictHook) and hook.fn.name == name:\n            del module._state_dict_hooks[k]\n            break\n    for k, hook in module._load_state_dict_pre_hooks.items():\n        if isinstance(hook, SpectralNormLoadStateDictPreHook) and hook.fn.name == name:\n            del module._load_state_dict_pre_hooks[k]\n            break\n    return module",
    "docstring": "Remove the spectral normalization reparameterization from a module. Args: module (Module): containing module name (str, optional): name of weight parameter Example: >>> m = spectral_norm(nn.Linear(40, 10)) >>> remove_spectral_norm(m)",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\utils\\spectral_norm.py",
    "ast_data": "FunctionDef name:remove_spectral_norm arg:module arg:name arguments arg arg For Call If BoolOp Call Compare Call Raise Call For Call If BoolOp Call Compare For Call If BoolOp Call Compare Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_calculate_gpu_utilization",
    "source_code": "def _calculate_gpu_utilization(self, data_list: list[UsageData]) -> list[GpuUsage]:\n    calculate_gpu = []\n    gpu_mem_utilization = defaultdict(list)\n    gpu_utilization = defaultdict(list)\n    for data in data_list:\n        for gpu in data.gpu_list:\n            gpu_mem_utilization[gpu.uuid].append(gpu.mem_utilization)\n            gpu_utilization[gpu.uuid].append(gpu.utilization)\n    for gpu_uuid in gpu_utilization.keys():\n        gpu_util_stats = self._generate_stats(gpu_utilization[gpu_uuid])\n        gpu_mem_util_stats = self._generate_stats(gpu_mem_utilization[gpu_uuid])\n        calculate_gpu.append(GpuUsage(uuid=gpu_uuid, util_percent=gpu_util_stats, mem_util_percent=gpu_mem_util_stats))\n    return calculate_gpu",
    "docstring": "Calculates the GPU utilization.",
    "type": "method",
    "file_path": "pytorch\\tools\\stats\\monitor.py",
    "ast_data": "FunctionDef name:_calculate_gpu_utilization arg:self arg:data_list arguments arg arg Assign Assign Call Assign Call For For Call Call For Call Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "kwarg_error",
    "source_code": "def kwarg_error(name, kw):\n    if not isinstance(kw, str):\n        kw = next(iter(kw))\n    return TypeError(f\"{name}() got an unexpected keyword argument '{kw}'\")",
    "docstring": "Generate a TypeError to be raised by function calls with wrong kwarg. Parameters ---------- name : str The name of the calling function. kw : str or Iterable[str] Either the invalid keyword argument name, or an iterable yielding invalid keyword arguments (e.g., a `` dict).",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\_api\\__init__.py",
    "ast_data": "FunctionDef name:kwarg_error arg:name arg:kw arguments arg arg If Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_symmetric_projection",
    "source_code": "def _symmetric_projection(self, n):\n    q = self._orthogonal_matrix(n)\n    mask = math_ops.cast(random_ops.random_normal([n], seed=self.seed) > 0, self.dtype)\n    if self.seed:\n        self.seed += 1\n    c = math_ops.multiply(q, mask)\n    return math_ops.matmul(c, array_ops.matrix_transpose(c))",
    "docstring": "Compute a n x n symmetric projection matrix. Args: n: Dimension. Returns: A n x n symmetric projection matrix, i.e. a matrix P s.t. P=P*P, P=P^T.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops.py",
    "ast_data": "FunctionDef name:_symmetric_projection arg:self arg:n arguments arg arg Assign Call Assign Call Compare Call If Assign Call Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "english_upper",
    "source_code": "def english_upper(s):\n    uppered = s.translate(UPPER_TABLE)\n    return uppered",
    "docstring": "Apply English case rules to convert ASCII strings to all upper case. This is an internal utility function to replace calls to str.upper() such that we can avoid changing behavior with changing locales. In particular, Turkish has distinct dotted and dotless variants of the Latin letter \"I\" in both lowercase and uppercase. Thus, \"i\".upper() != \"I\" in a \"tr\" locale. Parameters ---------- s : str Returns ------- uppered : str Examples -------- >>> import numpy as np >>> from numpy.lib.utils import english_upper >>> s = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_' >>> english_upper(s) 'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_' >>> english_upper('') ''",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\code_generators\\generate_umath.py",
    "ast_data": "FunctionDef name:english_upper arg:s arguments arg Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "get_dense_pe",
    "source_code": "def get_dense_pe(self) -> Tensor:\n    return self.pe_layer(self.image_embedding_size)[None, ...]",
    "docstring": "Return the positional encoding used to encode point prompts, applied to a dense set of points the shape of the image encoding. Returns: Positional encoding with shape 1x(embed_dim)x(embedding_h)x(embedding_w)",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\models\\sam\\architecture\\prompt_encoder.py",
    "ast_data": "FunctionDef name:get_dense_pe arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "record_operation",
    "source_code": "def record_operation(op_type, output_tensors, input_tensors, backward_function, forward_function=None):\n    pywrap_tfe.TFE_Py_TapeSetRecordOperation(op_type, output_tensors, input_tensors, backward_function, forward_function)",
    "docstring": "Records the operation on all tapes in the stack.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\record.py",
    "ast_data": "FunctionDef name:record_operation arg:op_type arg:output_tensors arg:input_tensors arg:backward_function arg:forward_function arguments arg arg arg arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "write_frozen",
    "source_code": "def write_frozen(self, m: FrozenModule, outfp):\n    outfp.write(f'unsigned char {m.c_name}[] = {{')\n    for i in range(0, len(m.bytecode), 16):\n        outfp.write('\\n\\t')\n        for c in bytes(m.bytecode[i:i + 16]):\n            outfp.write(f'{c:d},')\n    outfp.write('\\n};\\n')",
    "docstring": "Write a single frozen module's bytecode out to a C variable.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\_freeze.py",
    "ast_data": "FunctionDef name:write_frozen arg:self arg:m arg:outfp arguments arg arg arg Call For Call Call Call For Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_nvtx_range_pop",
    "source_code": "def _nvtx_range_pop():\n    if torch.cuda.is_available():\n        torch.cuda.nvtx.range_pop()",
    "docstring": "If PyTorch is installed with CUDA support, this terminates NVTX range. Check torch.cuda.nvtx.range_pop's document for more details.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\onnxruntime.py",
    "ast_data": "FunctionDef name:_nvtx_range_pop arguments If Call Call"
  },
  {
    "library": "pytorch",
    "name": "generate",
    "source_code": "def generate(self, **kwargs: Any) -> ChoiceCaller:\n    raise NotImplementedError",
    "docstring": "Generates a ChoiceCaller instance from the given arguments.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\common.py",
    "ast_data": "FunctionDef name:generate arg:self arguments arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "should_stop",
    "source_code": "def should_stop(self):\n    return self._stop_event.is_set()",
    "docstring": "Check if stop was requested. Returns: True if a stop was requested.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\coordinator.py",
    "ast_data": "FunctionDef name:should_stop arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "add_artist",
    "source_code": "def add_artist(self, a):\n    self._children.append(a)\n    if not a.is_transform_set():\n        a.set_transform(self.get_transform())\n    if self.axes is not None:\n        a.axes = self.axes\n    fig = self.get_figure(root=False)\n    if fig is not None:\n        a.set_figure(fig)",
    "docstring": "Add an to the container box.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py",
    "ast_data": "FunctionDef name:add_artist arg:self arg:a arguments arg arg Call If Call Call Call If Compare Assign Assign Call If Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "_configure_session_config_for_std_servers",
    "source_code": "def _configure_session_config_for_std_servers(strategy, eval_strategy, session_config, cluster_spec, task_type, task_id):\n    if task_type == _TaskType.EVALUATOR:\n        if eval_strategy:\n            eval_strategy.configure(session_config=session_config)\n    else:\n        strategy = copy.deepcopy(strategy)\n        strategy.configure(session_config=session_config, cluster_spec=cluster_spec, task_type=task_type, task_id=task_id)\n    del session_config.device_filters[:]",
    "docstring": "Call strategy's to mutate the session_config. The session_config is currently needed as default config for a TensorFlow server. In the future, we should be able to remove this method and only pass the session config to a client session.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_coordinator.py",
    "ast_data": "FunctionDef name:_configure_session_config_for_std_servers arg:strategy arg:eval_strategy arg:session_config arg:cluster_spec arg:task_type arg:task_id arguments arg arg arg arg arg arg If Compare If Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "auto_set",
    "source_code": "def auto_set(self):\n    if not self._is_backward:\n        return False\n    if self._pass_count == 0:\n        self._num_inputs_require_grads += 1\n        return True\n    else:\n        self._auto_set_counter += 1\n        return self._pass_count == self._auto_set_counter",
    "docstring": "This is used to automatically set the require_grad for the backward patch. It is implemented based on two counters. One counter to save the number of times init has been called. The other counter to save the number of times this function itself has been called. In the very first time init is called, this function counts how many inputs require gradient. In each of the following init calls, this function will return only one true value. Here is an example: ... self.v1 = torch.rand(M, N, K, requires_grad=self.auto_set()) self.v2 = torch.rand(M, N, K, requires_grad=self.auto_set()) ...",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\operator_benchmark\\benchmark_pytorch.py",
    "ast_data": "FunctionDef name:auto_set arg:self arguments arg If Return return:yes If Compare Return return:yes Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "_find_scalar_and_max_depth",
    "source_code": "def _find_scalar_and_max_depth(pylist):\n    if isinstance(pylist, (list, tuple)) or np.ndim(pylist) != 0:\n        scalar_depth = None\n        max_depth = 1\n        for child in pylist:\n            child_scalar_depth, child_max_depth = _find_scalar_and_max_depth(child)\n            if child_scalar_depth is not None:\n                if scalar_depth is not None and scalar_depth != child_scalar_depth + 1:\n                    raise ValueError('all scalar values must have the same nesting depth')\n                scalar_depth = child_scalar_depth + 1\n            max_depth = max(max_depth, child_max_depth + 1)\n        return (scalar_depth, max_depth)\n    return (0, 0)",
    "docstring": "Finds nesting depth of scalar values in pylist. Args: pylist: A nested python or . Returns: A tuple . is the nesting depth of scalar values in , or if contains no scalars. is the maximum depth of (including empty lists). Raises: ValueError: If pylist has inconsistent nesting depths for scalars.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_factory_ops.py",
    "ast_data": "FunctionDef name:_find_scalar_and_max_depth arg:pylist arguments arg If BoolOp Call Compare Call Assign Assign For Assign Call If Compare If BoolOp Compare Compare Raise Call Assign Assign Call Return return:yes Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "get_shape",
    "source_code": "def get_shape(self, i):\n    indices = self.get_indices(i)\n    return tuple((len(i) for i in indices))",
    "docstring": "Shape of the 'th bicluster. Parameters ---------- i : int The index of the cluster. Returns ------- n_rows : int Number of rows in the bicluster. n_cols : int Number of columns in the bicluster.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\base.py",
    "ast_data": "FunctionDef name:get_shape arg:self arg:i arguments arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "output",
    "source_code": "@property\ndef output(self):\n    if not self._inbound_nodes:\n        raise AttributeError('Layer ' + self.name + ' has no inbound nodes.')\n    return self._get_node_attribute_at_index(0, 'output_tensors', 'output')",
    "docstring": "Retrieves the output tensor(s) of a layer. Only applicable if the layer has exactly one output, i.e. if it is connected to one incoming layer. Returns: Output tensor or list of output tensors. Raises: AttributeError: if the layer is connected to more than one incoming layers. RuntimeError: if called in Eager mode.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py",
    "ast_data": "FunctionDef name:output arg:self arguments arg If Raise Call Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "iflatten",
    "source_code": "def iflatten(x: Iterable[Any]) -> Iterable[Any]:\n    warnings.warn('The iflatten function is deprecated and will be removed in a future version of Scrapy.', category=ScrapyDeprecationWarning, stacklevel=2)\n    for el in x:\n        if is_listlike(el):\n            yield from iflatten(el)\n        else:\n            yield el",
    "docstring": "iflatten(sequence) -> iterator Similar to ``, but returns iterator instead",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\python.py",
    "ast_data": "FunctionDef name:iflatten arg:x arguments arg Call For If Call Call"
  },
  {
    "library": "scipy",
    "name": "_remove_redundancy_id",
    "source_code": "def _remove_redundancy_id(A, rhs, rank=None, randomized=True):\n    status = 0\n    message = ''\n    inconsistent = 'There is a linear combination of rows of A_eq that results in zero, suggesting a redundant constraint. However the same linear combination of b_eq is nonzero, suggesting that the constraints conflict and the problem is infeasible.'\n    A, rhs, status, message = _remove_zero_rows(A, rhs)\n    if status != 0:\n        return (A, rhs, status, message)\n    m, n = A.shape\n    k = rank\n    if rank is None:\n        k = np.linalg.matrix_rank(A)\n    idx, proj = interp_decomp(A.T, k, rand=randomized)\n    if not np.allclose(rhs[idx[:k]] @ proj, rhs[idx[k:]]):\n        status = 2\n        message = inconsistent\n    idx = sorted(idx[:k])\n    A2 = A[idx, :]\n    rhs2 = rhs[idx]\n    return (A2, rhs2, status, message)",
    "docstring": "Eliminates redundant equations from a system of equations. Eliminates redundant equations from system of equations defined by Ax = b and identifies infeasibilities. Parameters ---------- A : 2-D array An array representing the left-hand side of a system of equations rhs : 1-D array An array representing the right-hand side of a system of equations rank : int, optional The rank of A randomized: bool, optional True for randomized interpolative decomposition Returns ------- A : 2-D array An array representing the left-hand side of a system of equations rhs : 1-D array An array representing the right-hand side of a system of equations status: int An integer indicating the status of the system 0: No infeasibility identified 2: Trivially infeasible message : str A string descriptor of the exit status of the optimization.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_remove_redundancy.py",
    "ast_data": "FunctionDef name:_remove_redundancy_id arg:A arg:rhs arg:rank arg:randomized arguments arg arg arg arg Assign Assign Assign Assign Call If Compare Return return:yes Assign Assign If Compare Assign Call Assign Call If Call Assign Assign Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "diag_part",
    "source_code": "def diag_part(self, name='diag_part'):\n    with self._name_scope(name):\n        return self._diag_part()",
    "docstring": "Efficiently get the [batch] diagonal part of this operator. If this operator has shape , this returns a , of shape , where . Args: name: A name for this . Returns: diag_part: A of same as self.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "FunctionDef name:diag_part arg:self arg:name arguments arg arg With Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "justknobs_check",
    "source_code": "def justknobs_check(name: str, default: bool=True) -> bool:\n    return default",
    "docstring": "This function can be used to killswitch functionality in FB prod, where you can toggle this value to False in JK without having to do a code push. In OSS, we always have everything turned on all the time, because downstream users can simply choose to not update PyTorch. (If more fine-grained enable/disable is needed, we could potentially have a map we lookup name in to toggle behavior. But the point is that it's all tied to source code in OSS, since there's no live server to query.) This is the bare minimum functionality I needed to do some killswitches. We have a more detailed plan at In particular, in some circumstances it may be necessary to read in a knob once at process start, and then use it consistently for the rest of the process. Future functionality will codify these patterns into a better high level API. WARNING: Do NOT call this function at module import time, JK is not fork safe and you will break anyone who forks the process and then hits JK again.",
    "type": "function",
    "file_path": "pytorch\\torch\\_utils_internal.py",
    "ast_data": "FunctionDef name:justknobs_check arg:name arg:default arguments arg arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "_torch_linalg_svdvals",
    "source_code": "def _torch_linalg_svdvals(input: Tensor) -> Tensor:\n    if not isinstance(input, Tensor):\n        raise AssertionError(f'Input must be Tensor. Got: {type(input)}.')\n    dtype: torch.dtype = input.dtype\n    if dtype not in (torch.float32, torch.float64):\n        dtype = torch.float32\n    if TYPE_CHECKING:\n        out: Tensor\n    elif torch_version_ge(1, 10):\n        out = torch.linalg.svdvals(input.to(dtype))\n    else:\n        _, out, _ = torch.linalg.svd(input.to(dtype))\n    return out.to(input.dtype)",
    "docstring": "Make torch.linalg.svdvals work with other than fp32/64. The function torch.svd is only implemented for fp32/64 which makes impossible to be used by fp16 or others. What this function does, is cast input data type to fp32, apply torch.svd, and cast back to the input dtype. NOTE: in torch 1.8.1 this function is recommended to use as torch.linalg.svd",
    "type": "function",
    "file_path": "kornia\\kornia\\utils\\helpers.py",
    "ast_data": "FunctionDef name:_torch_linalg_svdvals arg:input arguments arg If Call Raise Call Call If Compare Assign If If Call Assign Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "reduce",
    "source_code": "def reduce(self, initial_state, reduce_fn):\n    iterator = iter(self)\n    optional_data = iterator.get_next_as_optional()\n\n    def cond(optional_data, state):\n        del state\n        return optional_data.has_value()\n\n    def loop_body(optional_data, state):\n        state = reduce_fn(state, optional_data.get_value())\n        optional_data = iterator.get_next_as_optional()\n        return (optional_data, state)\n    optional_data, final_state = while_loop.while_loop(cond, loop_body, [optional_data, initial_state], parallel_iterations=1, return_same_structure=True)\n    return final_state",
    "docstring": "Execute a over all the elements of the input.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py",
    "ast_data": "FunctionDef name:reduce arg:self arg:initial_state arg:reduce_fn arguments arg arg arg Assign Call Assign Call FunctionDef name:cond arg:optional_data arg:state arguments arg arg Return return:yes Call FunctionDef name:loop_body arg:optional_data arg:state arguments arg arg Assign Call Call Assign Call Return return:yes Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "linear_inference_rule",
    "source_code": "@register_inference_rule(torch.nn.Linear)\ndef linear_inference_rule(n: Node, module_instance):\n    assert isinstance(n.args[0], Node)\n    if n.args[0].type == Dyn and isinstance(n.type, TensorType):\n        n.args[0].type = expand_to_tensor_dim(n.args[0].type, len(n.type.__args__))\n    if isinstance(n.args[0].type, TensorType):\n        output_type = linear_check(n.args[0].type, module_instance)\n        n.type = get_greatest_upper_bound(output_type, n.type)\n    return n.type",
    "docstring": "Applies the shape information to the input then gets the greatest upper bound of the resulting type and the existing type",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\graph_gradual_typechecker.py",
    "ast_data": "FunctionDef name:linear_inference_rule arg:n arg:module_instance arguments arg arg Call If BoolOp Compare Call Assign Call Call If Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "register_backend",
    "source_code": "def register_backend(compiler_fn: Optional[CompilerFn]=None, name: Optional[str]=None, tags: Sequence[str]=()):\n    if compiler_fn is None:\n        return functools.partial(register_backend, name=name, tags=tags)\n    assert callable(compiler_fn)\n    name = name or compiler_fn.__name__\n    assert name not in _COMPILER_FNS, f'duplicate name: {name}'\n    if compiler_fn not in _BACKENDS:\n        _BACKENDS[name] = None\n    _COMPILER_FNS[name] = compiler_fn\n    compiler_fn._tags = tuple(tags)\n    return compiler_fn",
    "docstring": "Decorator to add a given compiler to the registry to allow calling with string shorthand. Note: for projects not imported by default, it might be easier to pass a function directly as a backend and not use a string. Args: compiler_fn: Callable taking a FX graph and fake tensor inputs name: Optional name, defaults to tags: Optional set of string tags to categorize backend with",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\backends\\registry.py",
    "ast_data": "FunctionDef name:register_backend arg:compiler_fn arg:name arg:tags arguments arg arg arg If Compare Return return:yes Call Call Assign BoolOp Compare If Compare Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_in_layout",
    "source_code": "def get_in_layout(self):\n    return self._in_layout",
    "docstring": "Return boolean flag, `constrainedlayout_guide.Figure.tight_layout()`.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:get_in_layout arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "ArpackNoConvergence",
    "source_code": "class ArpackNoConvergence(ArpackError):\n\n    def __init__(self, msg, eigenvalues, eigenvectors):\n        ArpackError.__init__(self, -1, {-1: msg})\n        self.eigenvalues = eigenvalues\n        self.eigenvectors = eigenvectors",
    "docstring": "ARPACK iteration did not converge Attributes ---------- eigenvalues : ndarray Partial result. Converged eigenvalues. eigenvectors : ndarray Partial result. Converged eigenvectors.",
    "type": "class",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_eigen\\arpack\\arpack.py",
    "ast_data": "ClassDef name:ArpackNoConvergence FunctionDef name:__init__ arg:self arg:msg arg:eigenvalues arg:eigenvectors arguments arg arg arg arg Call Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "query_key_value_clones",
    "source_code": "def query_key_value_clones(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, dtype: torch.dtype=None):\n    if dtype is None:\n        dtype = query.dtype\n    query_ref = query.clone().detach().to(dtype).requires_grad_(query.requires_grad)\n    key_ref = key.clone().detach().to(dtype).requires_grad_(key.requires_grad)\n    value_ref = value.clone().detach().to(dtype).requires_grad_(value.requires_grad)\n    return (query_ref, key_ref, value_ref)",
    "docstring": "Clones the query, key, and value tensors and moves them to the specified dtype.",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\transformer\\score_mod.py",
    "ast_data": "FunctionDef name:query_key_value_clones arg:query arg:key arg:value arg:dtype arguments arg arg arg arg If Compare Assign Assign Call Call Call Call Assign Call Call Call Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "health",
    "source_code": "def health(self):\n    return self._get_tpu_property('health')",
    "docstring": "Return health of the TPU.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\client\\client.py",
    "ast_data": "FunctionDef name:health arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "dataclass_to_dict",
    "source_code": "def dataclass_to_dict(obj: Any) -> Any:\n    if is_dataclass(obj) and (not isinstance(obj, type)):\n        return {key: dataclass_to_dict(value) for key, value in asdict(obj).items()}\n    elif isinstance(obj, (list, tuple)):\n        return type(obj)((dataclass_to_dict(item) for item in obj))\n    elif isinstance(obj, dict):\n        return {key: dataclass_to_dict(value) for key, value in obj.items()}\n    else:\n        return obj",
    "docstring": "Recursively convert dataclass instances to dictionaries.",
    "type": "function",
    "file_path": "kornia\\kornia\\utils\\helpers.py",
    "ast_data": "FunctionDef name:dataclass_to_dict arg:obj arguments arg If BoolOp Call Call Return return:yes Call Call Call If Call Return return:yes Call Call Call If Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "normkey",
    "source_code": "def normkey(self, key: AnyStr) -> AnyStr:\n    return key.lower()",
    "docstring": "Method to normalize dictionary key access",
    "type": "method",
    "file_path": "scrapy\\scrapy\\utils\\datatypes.py",
    "ast_data": "FunctionDef name:normkey arg:self arg:key arguments arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "mask_border_with_padding",
    "source_code": "def mask_border_with_padding(m: Tensor, bd: int, v: Union[Tensor, float, bool], p_m0: Tensor, p_m1: Tensor) -> None:\n    if bd <= 0:\n        return\n    m[:, :bd] = v\n    m[:, :, :bd] = v\n    m[:, :, :, :bd] = v\n    m[:, :, :, :, :bd] = v\n    h0s, w0s = (p_m0.sum(1).max(-1)[0].int(), p_m0.sum(-1).max(-1)[0].int())\n    h1s, w1s = (p_m1.sum(1).max(-1)[0].int(), p_m1.sum(-1).max(-1)[0].int())\n    for b_idx, (h0, w0, h1, w1) in enumerate(zip(h0s, w0s, h1s, w1s)):\n        m[b_idx, h0 - bd:] = v\n        m[b_idx, :, w0 - bd:] = v\n        m[b_idx, :, :, h1 - bd:] = v\n        m[b_idx, :, :, :, w1 - bd:] = v",
    "docstring": "Apply masking to a padded boarder.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\loftr\\utils\\coarse_matching.py",
    "ast_data": "FunctionDef name:mask_border_with_padding arg:m arg:bd arg:v arg:p_m0 arg:p_m1 arguments arg arg arg arg arg If Compare Return return:no Assign Assign Assign Assign Assign Call Call Call Call Call Call Assign Call Call Call Call Call Call For Call Call Assign Assign Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "set_solid_capstyle",
    "source_code": "@_docstring.interpd\ndef set_solid_capstyle(self, s):\n    cs = CapStyle(s)\n    if self._solidcapstyle != cs:\n        self.stale = True\n    self._solidcapstyle = cs",
    "docstring": "How to draw the end caps if the line is solid (not ) The default capstyle is :rc:. Parameters ---------- s : or %(CapStyle)s",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:set_solid_capstyle arg:self arg:s arguments arg arg Assign Call If Compare Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "release_mouse",
    "source_code": "def release_mouse(self, ax):\n    if self.mouse_grabber is ax:\n        self.mouse_grabber = None",
    "docstring": "Release the mouse grab held by the *ax*. Usually called by the widgets. It is ok to call this even if *ax* doesn't have the mouse grab currently.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:release_mouse arg:self arg:ax arguments arg arg If Compare Assign"
  },
  {
    "library": "matplotlib",
    "name": "set_rorigin",
    "source_code": "def set_rorigin(self, rorigin):\n    self._originViewLim.locked_y0 = rorigin",
    "docstring": "Update the radial origin. Parameters ---------- rorigin : float",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\polar.py",
    "ast_data": "FunctionDef name:set_rorigin arg:self arg:rorigin arguments arg arg Assign"
  },
  {
    "library": "pytorch",
    "name": "_coerce_to_tensor",
    "source_code": "def _coerce_to_tensor(obj, dtype=None, copy=False, ndmin=0):\n    if isinstance(obj, torch.Tensor):\n        tensor = obj\n    else:\n        default_dtype = torch.get_default_dtype()\n        torch.set_default_dtype(_dtypes_impl.get_default_dtype_for(torch.float32))\n        try:\n            tensor = _try_convert_to_tensor(obj)\n        finally:\n            torch.set_default_dtype(default_dtype)\n    tensor = cast_if_needed(tensor, dtype)\n    ndim_extra = ndmin - tensor.ndim\n    if ndim_extra > 0:\n        tensor = tensor.view((1,) * ndim_extra + tensor.shape)\n    if copy:\n        tensor = tensor.clone()\n    return tensor",
    "docstring": "The core logic of the array(...) function. Parameters ---------- obj : tensor_like The thing to coerce dtype : torch.dtype object or None Coerce to this torch dtype copy : bool Copy or not ndmin : int The results as least this many dimensions is_weak : bool Whether obj is a weakly typed python scalar. Returns ------- tensor : torch.Tensor a tensor object with requested dtype, ndim and copy semantics. Notes ----- This is almost a \"tensor_like\" coersion function. Does not handle wrapper ndarrays (those should be handled in the ndarray-aware layer prior to invoking this function).",
    "type": "function",
    "file_path": "pytorch\\torch\\_numpy\\_util.py",
    "ast_data": "FunctionDef name:_coerce_to_tensor arg:obj arg:dtype arg:copy arg:ndmin arguments arg arg arg arg If Call Assign Assign Call Call Call Try Assign Call Call Assign Call Assign If Compare Assign Call If Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "transform",
    "source_code": "def transform(self, ct, clone=False):\n    srid = self.srid\n    if ct == srid:\n        if clone:\n            return self.clone()\n        else:\n            return\n    if isinstance(ct, gdal.CoordTransform):\n        srid = None\n    elif srid is None or srid < 0:\n        raise GEOSException('Calling transform() with no SRID set is not supported')\n    g = gdal.OGRGeometry(self._ogr_ptr(), srid)\n    g.transform(ct)\n    ptr = g._geos_ptr()\n    if clone:\n        return GEOSGeometry(ptr, srid=g.srid)\n    if ptr:\n        capi.destroy_geom(self.ptr)\n        self.ptr = ptr\n        self._post_init()\n        self.srid = g.srid\n    else:\n        raise GEOSException('Transformed WKB was invalid.')",
    "docstring": "Requires GDAL. Transform the geometry according to the given transformation object, which may be an integer SRID, and WKT or PROJ string. By default, transform the geometry in-place and return nothing. However if the keyword is set, don't modify the geometry and return a transformed clone instead.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:transform arg:self arg:ct arg:clone arguments arg arg arg Assign If Compare If Return return:yes Call Return return:no If Call Assign If BoolOp Compare Compare Raise Call Assign Call Call Call Assign Call If Return return:yes Call If Call Assign Call Assign Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "GenerateTableHtml",
    "source_code": "def GenerateTableHtml(items, keys_to_print, display_index=True):\n    html = ''\n    html += '<table><tr>\\n'\n    html += '<tr>\\n'\n    if display_index:\n        html += '<th>index</th>'\n    for h, mapper in keys_to_print:\n        html += '<th>%s</th>' % h\n    html += '</tr>\\n'\n    for idx, tensor in enumerate(items):\n        html += '<tr>\\n'\n        if display_index:\n            html += '<td>%d</td>' % idx\n        for h, mapper in keys_to_print:\n            val = tensor[h] if h in tensor else None\n            val = val if mapper is None else mapper(val)\n            html += '<td>%s</td>\\n' % val\n        html += '</tr>\\n'\n    html += '</table>\\n'\n    return html",
    "docstring": "Given a list of object values and keys to print, make an HTML table. Args: items: Items to print an array of dicts. keys_to_print: (key, display_fn). is a key in the object. i.e. items[0][key] should exist. display_fn is the mapping function on display. i.e. the displayed html cell will have the string returned by . display_index: add a column which is the index of each row in . Returns: An html table.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\tools\\visualize.py",
    "ast_data": "FunctionDef name:GenerateTableHtml arg:items arg:keys_to_print arg:display_index arguments arg arg arg Assign If For For Call If For Assign Compare Assign Compare Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "override_lowering",
    "source_code": "@contextlib.contextmanager\ndef override_lowering(aten_op: Callable[..., Any], override_fn: Callable[..., Any]) -> Iterator[None]:\n    from torch._inductor import lowering\n    orig_fn = lowering.lowerings[aten_op]\n    try:\n        lowering.lowerings[aten_op] = functools.partial(override_fn, orig_fn)\n        yield\n    finally:\n        lowering.lowerings[aten_op] = orig_fn",
    "docstring": "Override the lowering of aten_op with override_fn. The first argument of override_fn is the original lowering fn.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\utils.py",
    "ast_data": "FunctionDef name:override_lowering arg:aten_op arg:override_fn arguments arg arg Assign Try Assign Call Assign"
  },
  {
    "library": "pytorch",
    "name": "tree_map_",
    "source_code": "def tree_map_(func: Callable[..., Any], tree: PyTree, *rests: PyTree, is_leaf: Optional[Callable[[PyTree], bool]]=None) -> PyTree:\n    leaves, treespec = tree_flatten(tree, is_leaf=is_leaf)\n    flat_args = [leaves] + [treespec.flatten_up_to(r) for r in rests]\n    deque(map(func, *flat_args), maxlen=0)\n    return tree",
    "docstring": "Like :func:, but do an inplace call on each leaf and return the original tree. See also :func:. Args: func (callable): A function that takes `True`.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_pytree.py",
    "ast_data": "FunctionDef name:tree_map_ arg:func arg:tree arguments arg arg arg arg Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_tensor_to_argdef",
    "source_code": "def _tensor_to_argdef(t, name=None, used_names=None):\n    arg = op_def_pb2.OpDef.ArgDef()\n    if name is None:\n        arg.name = _make_argname_from_tensor_name(t.name)\n        if used_names is not None:\n            if arg.name in used_names:\n                i = 0\n                while True:\n                    new_name = '%s_U%d' % (arg.name, i)\n                    if new_name not in used_names:\n                        arg.name = new_name\n                        break\n                    i += 1\n            used_names.add(arg.name)\n    else:\n        arg.name = name\n    arg.type = t.dtype.as_datatype_enum\n    return arg",
    "docstring": "Convert tensor t to an argdef, with a specified name or a unique name.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\graph_to_function_def.py",
    "ast_data": "FunctionDef name:_tensor_to_argdef arg:t arg:name arg:used_names arguments arg arg arg Assign Call If Compare Assign Call If Compare If Compare Assign While Assign If Compare Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "yeojohnson_normplot",
    "source_code": "def yeojohnson_normplot(x, la, lb, plot=None, N=80):\n    return _normplot('yeojohnson', x, la, lb, plot, N)",
    "docstring": "Compute parameters for a Yeo-Johnson normality plot, optionally show it. A Yeo-Johnson normality plot shows graphically what the best transformation parameter is to use in to obtain a distribution that is close to normal. Parameters ---------- x : array_like Input array. la, lb : scalar The lower and upper bounds for the `yeojohnsonplotmatplotlib.pyplotlalbprobplotxplotboxcox_normplotprobplot` and plot it in the same plot: >>> _, maxlog = stats.yeojohnson(x) >>> ax.axvline(maxlog, color='r') >>> plt.show()",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_morestats.py",
    "ast_data": "FunctionDef name:yeojohnson_normplot arg:x arg:la arg:lb arg:plot arg:N arguments arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "orthogonal_",
    "source_code": "def orthogonal_(tensor, gain=1, generator: _Optional[torch.Generator]=None):\n    if tensor.ndimension() < 2:\n        raise ValueError('Only tensors with 2 or more dimensions are supported')\n    if tensor.numel() == 0:\n        return tensor\n    rows = tensor.size(0)\n    cols = tensor.numel() // rows\n    flattened = tensor.new_empty((rows, cols)).normal_(0, 1, generator=generator)\n    if rows < cols:\n        flattened.t_()\n    q, r = torch.linalg.qr(flattened)\n    d = torch.diag(r, 0)\n    ph = d.sign()\n    q *= ph\n    if rows < cols:\n        q.t_()\n    with torch.no_grad():\n        tensor.view_as(q).copy_(q)\n        tensor.mul_(gain)\n    return tensor",
    "docstring": "Fill the input with a (semi) orthogonal matrix. Described in - Saxe, A. et al. (2013). The input tensor must have at least 2 dimensions, and for tensors with more than 2 dimensions the trailing dimensions are flattened. Args: tensor: an n-dimensional , where :math: gain: optional scaling factor generator: the torch Generator to sample from (default: None) Examples: >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_LAPACK) >>> w = torch.empty(3, 5) >>> nn.init.orthogonal_(w)",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\init.py",
    "ast_data": "FunctionDef name:orthogonal_ arg:tensor arg:gain arg:generator arguments arg arg arg If Compare Call Raise Call If Compare Call Return return:yes Assign Call Assign Call Assign Call Call If Compare Call Assign Call Assign Call Assign Call If Compare Call With Call Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_date_field",
    "source_code": "def get_date_field(self):\n    if self.date_field is None:\n        raise ImproperlyConfigured('%s.date_field is required.' % self.__class__.__name__)\n    return self.date_field",
    "docstring": "Get the name of the date field to be used to filter by.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\dates.py",
    "ast_data": "FunctionDef name:get_date_field arg:self arguments arg If Compare Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "save_text",
    "source_code": "def save_text(self, package: str, resource: str, text: str):\n    return self.save_binary(package, resource, text.encode('utf-8'))",
    "docstring": "Save text data to the package. Args: package (str): The name of module package this resource should go it (e.g. ``). resource (str): A unique name for the resource, used to identify it to load. text (str): The contents to save.",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\package_exporter.py",
    "ast_data": "FunctionDef name:save_text arg:self arg:package arg:resource arg:text arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_offset",
    "source_code": "def set_offset(self, xy):\n    self._offset = xy\n    self.stale = True",
    "docstring": "Set the offset. Parameters ---------- xy : (float, float) or callable The (x, y) coordinates of the offset in display units. These can either be given explicitly as a tuple (x, y), or by providing a function that converts the extent into the offset. This function must have the signature:: def offset(width, height, xdescent, ydescent, renderer) -> (float, float)",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py",
    "ast_data": "FunctionDef name:set_offset arg:self arg:xy arguments arg arg Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "swish",
    "source_code": "@dispatch.add_dispatch_support\ndef swish(x):\n    return nn.swish(x)",
    "docstring": "Swish activation function, . Swish activation function which returns . It is a smooth, non-monotonic function that consistently matches or outperforms ReLU on deep networks, it is unbounded above and bounded below. Example Usage: >>> a = tf.constant([-20, -1.0, 0.0, 1.0, 20], dtype = tf.float32) >>> b = tf.keras.activations.swish(a) >>> b.numpy() array([-4.1223075e-08, -2.6894143e-01, 0.0000000e+00, 7.3105860e-01, 2.0000000e+01], dtype=float32) Args: x: Input tensor. Returns: The swish activation applied to (see reference paper for details). Reference: - [Ramachandran et al., 2017](",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\activations.py",
    "ast_data": "FunctionDef name:swish arg:x arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "aps12_f",
    "source_code": "def aps12_f(x, n):\n    return np.power(x, 1.0 / n) - np.power(n, 1.0 / n)",
    "docstring": "nth root of x, with a zero at x=n",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_tstutils.py",
    "ast_data": "FunctionDef name:aps12_f arg:x arg:n arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "Storage",
    "source_code": "class Storage:\n\n    def __init__(self, minres):\n        self._add(minres)\n\n    def _add(self, minres):\n        self.minres = minres\n        self.minres.x = np.copy(minres.x)\n\n    def update(self, minres):\n        if minres.success and (minres.fun < self.minres.fun or not self.minres.success):\n            self._add(minres)\n            return True\n        else:\n            return False\n\n    def get_lowest(self):\n        return self.minres",
    "docstring": "Class used to store the lowest energy structure",
    "type": "class",
    "file_path": "scipy\\scipy\\optimize\\_basinhopping.py",
    "ast_data": "ClassDef name:Storage FunctionDef name:__init__ arg:self arg:minres arguments arg arg Call FunctionDef name:_add arg:self arg:minres arguments arg arg Assign Assign Call FunctionDef name:update arg:self arg:minres arguments arg arg If BoolOp BoolOp Compare Call Return return:yes Return return:yes FunctionDef name:get_lowest arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_word_ngrams",
    "source_code": "def _word_ngrams(self, tokens, stop_words=None):\n    if stop_words is not None:\n        tokens = [w for w in tokens if w not in stop_words]\n    min_n, max_n = self.ngram_range\n    if max_n != 1:\n        original_tokens = tokens\n        if min_n == 1:\n            tokens = list(original_tokens)\n            min_n += 1\n        else:\n            tokens = []\n        n_original_tokens = len(original_tokens)\n        tokens_append = tokens.append\n        space_join = ' '.join\n        for n in range(min_n, min(max_n + 1, n_original_tokens + 1)):\n            for i in range(n_original_tokens - n + 1):\n                tokens_append(space_join(original_tokens[i:i + n]))\n    return tokens",
    "docstring": "Turn tokens into a sequence of n-grams after stop words filtering",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_extraction\\text.py",
    "ast_data": "FunctionDef name:_word_ngrams arg:self arg:tokens arg:stop_words arguments arg arg arg If Compare Assign Compare Assign If Compare Assign If Compare Assign Call Assign Assign Call Assign Assign For Call Call For Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "is_dict_like",
    "source_code": "def is_dict_like(obj: object) -> bool:\n    dict_like_attrs = ('__getitem__', 'keys', '__contains__')\n    return all((hasattr(obj, attr) for attr in dict_like_attrs)) and (not isinstance(obj, type))",
    "docstring": "Check if the object is dict-like. Parameters ---------- obj : object The object to check. This can be any Python object, and the function will determine whether it behaves like a dictionary. Returns ------- bool Whether has dict-like properties. See Also -------- api.types.is_list_like : Check if the object is list-like. api.types.is_file_like : Check if the object is a file-like. api.types.is_named_tuple : Check if the object is a named tuple. Examples -------- >>> from pandas.api.types import is_dict_like >>> is_dict_like({1: 2}) True >>> is_dict_like([1, 2, 3]) False >>> is_dict_like(dict) False >>> is_dict_like(dict()) True",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\inference.py",
    "ast_data": "FunctionDef name:is_dict_like arg:obj arguments arg Assign Return return:yes BoolOp Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "xmin",
    "source_code": "@property\ndef xmin(self):\n    return np.min(self.get_points()[:, 0])",
    "docstring": "The left edge of the bounding box.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:xmin arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_summarize_exception_stack",
    "source_code": "def _summarize_exception_stack(e: BaseException) -> str:\n    causes = [e]\n    while e.__cause__ is not None:\n        causes.append(e.__cause__)\n        e = e.__cause__\n    return '\\n\\n## Exception summary\\n\\n' + '⬆️\\n'.join([f'{type(e)}: {e}\\n' for e in reversed(causes)]) + '\\n(Refer to the full stack trace above for more information.)'",
    "docstring": "Format the exception stack by showing the text of each exception.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_core.py",
    "ast_data": "FunctionDef name:_summarize_exception_stack arg:e arguments arg Assign While Compare Call Assign Return return:yes Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "default_latex_engine",
    "source_code": "def default_latex_engine(config: Config) -> str:\n    if config.language == 'ja':\n        return 'uplatex'\n    if config.language.startswith('zh'):\n        return 'xelatex'\n    if config.language == 'el':\n        return 'xelatex'\n    return 'pdflatex'",
    "docstring": "Better default latex_engine settings for specific languages.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\builders\\latex\\__init__.py",
    "ast_data": "FunctionDef name:default_latex_engine arg:config arguments arg If Compare Return return:yes If Call Return return:yes If Compare Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_op_functions",
    "source_code": "def get_op_functions(self, namespace: str, op_name: str, overload: str | None=None) -> list[registration.ONNXFunction] | None:\n    internal_name_instance = registration.OpName.from_name_parts(namespace=namespace, op_name=op_name, overload=overload)\n    return self._registry.get(internal_name_instance)",
    "docstring": "Returns a list of ONNXFunctions for the given op: torch.ops.... The list is ordered by the time of registration. The custom operators should be in the second half of the list. Args: namespace: The namespace of the operator to get. op_name: The name of the operator to get. overload: The overload of the operator to get. If it's default overload, leave it to None. Returns: A list of ONNXFunctions corresponding to the given name, or None if the name is not in the registry.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\_exporter_legacy.py",
    "ast_data": "FunctionDef name:get_op_functions arg:self arg:namespace arg:op_name arg:overload arguments arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_useMathText",
    "source_code": "def get_useMathText(self):\n    return self._useMathText",
    "docstring": "Return whether to use fancy math formatting. See Also -------- ScalarFormatter.set_useMathText",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:get_useMathText arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_inertia_per_cluster",
    "source_code": "def _inertia_per_cluster(self, X, centers, labels, sample_weight):\n    n_clusters = centers.shape[0]\n    _inertia = _inertia_sparse if sp.issparse(X) else _inertia_dense\n    inertia_per_cluster = np.empty(n_clusters)\n    for label in range(n_clusters):\n        inertia_per_cluster[label] = _inertia(X, sample_weight, centers, labels, self._n_threads, single_label=label)\n    return inertia_per_cluster",
    "docstring": "Calculate the sum of squared errors (inertia) per cluster. Parameters ---------- X : {ndarray, csr_matrix} of shape (n_samples, n_features) The input samples. centers : ndarray of shape (n_clusters=2, n_features) The cluster centers. labels : ndarray of shape (n_samples,) Index of the cluster each sample belongs to. sample_weight : ndarray of shape (n_samples,) The weights for each observation in X. Returns ------- inertia_per_cluster : ndarray of shape (n_clusters=2,) Sum of squared errors (inertia) for each cluster.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_bisect_k_means.py",
    "ast_data": "FunctionDef name:_inertia_per_cluster arg:self arg:X arg:centers arg:labels arg:sample_weight arguments arg arg arg arg arg Assign Assign Call Assign Call For Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "LocalFeature",
    "source_code": "class LocalFeature(Module):\n\n    def __init__(self, detector: Module, descriptor: LAFDescriptor, scaling_coef: float=1.0) -> None:\n        super().__init__()\n        self.detector = detector\n        self.descriptor = descriptor\n        if scaling_coef <= 0:\n            raise ValueError(f'Scaling coef should be >= 0, got {scaling_coef}')\n        self.scaling_coef = scaling_coef\n\n    def forward(self, img: Tensor, mask: Optional[Tensor]=None) -> Tuple[Tensor, Tensor, Tensor]:\n        lafs, responses = self.detector(img, mask)\n        lafs = scale_laf(lafs, self.scaling_coef)\n        descs = self.descriptor(img, lafs)\n        return (lafs, responses, descs)",
    "docstring": "Module, which combines local feature detector and descriptor. Args: detector: the detection module. descriptor: the descriptor module. scaling_coef: multiplier for change default detector scale (e.g. it is too small for KeyNet by default)",
    "type": "class",
    "file_path": "kornia\\kornia\\feature\\integrated.py",
    "ast_data": "ClassDef name:LocalFeature FunctionDef name:__init__ arg:self arg:detector arg:descriptor arg:scaling_coef arguments arg arg arg arg Call Call Assign Assign If Compare Raise Call Assign FunctionDef name:forward arg:self arg:img arg:mask arguments arg arg arg Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_decade_less",
    "source_code": "def _decade_less(x, base):\n    if x < 0:\n        return -_decade_greater(-x, base)\n    less = _decade_less_equal(x, base)\n    if less == x:\n        less /= base\n    return less",
    "docstring": "Return the largest integer power of *base* that's less than *x*. If *x* is negative, the exponent will be *greater*.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:_decade_less arg:x arg:base arguments arg arg If Compare Return return:yes Call Assign Call If Compare Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_routing_enabled",
    "source_code": "def _routing_enabled():\n    return get_config().get('enable_metadata_routing', False)",
    "docstring": "Return whether metadata routing is enabled. .. versionadded:: 1.3 Returns ------- enabled : bool Whether metadata routing is enabled. If the config is not set, it defaults to False.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py",
    "ast_data": "FunctionDef name:_routing_enabled arguments Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_orthogonal_kernel",
    "source_code": "def _orthogonal_kernel(self, ksize, cin, cout):\n    if cin > cout:\n        raise ValueError(f'The number of input channels (cin={cin}) cannot exceed the number of output channels (cout={cout}).')\n    orth = self._orthogonal_matrix(cout)[0:cin, :]\n    if ksize == 1:\n        return array_ops.expand_dims(array_ops.expand_dims(array_ops.expand_dims(orth, 0), 0), 0)\n    p = self._block_orth(self._symmetric_projection(cout), self._symmetric_projection(cout), self._symmetric_projection(cout))\n    for _ in range(ksize - 2):\n        temp = self._block_orth(self._symmetric_projection(cout), self._symmetric_projection(cout), self._symmetric_projection(cout))\n        p = self._matrix_conv(p, temp)\n    for i in range(ksize):\n        for j in range(ksize):\n            for k in range(ksize):\n                p[i, j, k] = math_ops.matmul(orth, p[i, j, k])\n    return self._dict_to_tensor(p, ksize, ksize, ksize)",
    "docstring": "Construct orthogonal kernel for convolution. Args: ksize: Kernel size. cin: Number of input channels. cout: Number of output channels. Returns: An [ksize, ksize, ksize, cin, cout] orthogonal kernel. Raises: ValueError: If cin > cout.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops.py",
    "ast_data": "FunctionDef name:_orthogonal_kernel arg:self arg:ksize arg:cin arg:cout arguments arg arg arg arg If Compare Raise Call Assign Call If Compare Return return:yes Call Call Call Assign Call Call Call Call For Call Assign Call Call Call Call Assign Call For Call For Call For Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_ExpintGrad",
    "source_code": "@ops.RegisterGradient('Expint')\ndef _ExpintGrad(op: ops.Operation, grad):\n    x = op.inputs[0]\n    with ops.control_dependencies([grad]):\n        return grad * math_ops.exp(x) / x",
    "docstring": "Compute gradient of expint(x) with respect to its argument.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_ExpintGrad arg:op arg:grad arguments arg arg Assign With Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_source_file_paths_outside_tensorflow_py_library",
    "source_code": "def _source_file_paths_outside_tensorflow_py_library(code_defs, id_to_string):\n    file_ids = set()\n    for code_def in code_defs:\n        for trace in code_def.traces:\n            file_ids.add(trace.file_id)\n    non_tf_files = (id_to_string[file_id] for file_id in file_ids)\n    non_tf_files = (f for f in non_tf_files if not source_utils.guess_is_tensorflow_py_library(f) and gfile.Exists(f))\n    return non_tf_files",
    "docstring": "Extract source file paths outside TensorFlow Python library. Args: code_defs: An iterable of protos, i.e., an iterable of stack traces. id_to_string: A proto map from integer ids to strings. Returns: An iterable of source file paths outside the TensorFlow Python library.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\source_remote.py",
    "ast_data": "FunctionDef name:_source_file_paths_outside_tensorflow_py_library arg:code_defs arg:id_to_string arguments arg arg Assign Call For For Call Assign Assign BoolOp Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_check_create_file_writer_args",
    "source_code": "def _check_create_file_writer_args(inside_function, **kwargs):\n    for arg_name, arg in kwargs.items():\n        if not isinstance(arg, ops.EagerTensor) and tensor_util.is_tf_type(arg):\n            if inside_function:\n                raise ValueError(f\"Invalid graph Tensor argument '{arg_name}={arg}' to create_file_writer() inside an @tf.function. The create call will be lifted into the outer eager execution context, so it cannot consume graph tensors defined inside the function body.\")\n            else:\n                raise ValueError(f\"Invalid graph Tensor argument '{arg_name}={arg}' to eagerly executed create_file_writer().\")",
    "docstring": "Helper to check the validity of arguments to a create_file_writer() call. Args: inside_function: whether the create_file_writer() call is in a tf.function **kwargs: the arguments to check, as kwargs to give them names. Raises: ValueError: if the arguments are graph tensors.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "FunctionDef name:_check_create_file_writer_args arg:inside_function arguments arg arg For Call If BoolOp Call Call If Raise Call Raise Call"
  },
  {
    "library": "numpy",
    "name": "unravel_index",
    "source_code": "@array_function_from_c_func_and_dispatcher(_multiarray_umath.unravel_index)\ndef unravel_index(indices, shape=None, order=None):\n    return (indices,)",
    "docstring": "unravel_index(indices, shape, order='C') Converts a flat index or array of flat indices into a tuple of coordinate arrays. Parameters ---------- indices : array_like An integer array whose elements are indices into the flattened version of an array of dimensions `` array. See Also -------- ravel_multi_index Examples -------- >>> import numpy as np >>> np.unravel_index([22, 41, 37], (7,6)) (array([3, 6, 6]), array([4, 5, 1])) >>> np.unravel_index([31, 41, 13], (7,6), order='F') (array([3, 6, 6]), array([4, 5, 1])) >>> np.unravel_index(1621, (6,7,8,9)) (3, 1, 4, 1)",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\multiarray.py",
    "ast_data": "FunctionDef name:unravel_index arg:indices arg:shape arg:order arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "tile",
    "source_code": "@dispatch.dispatch_for_api(array_ops.tile)\ndef tile(input: ragged_tensor.Ragged, multiples, name=None):\n    with ops.name_scope(name, 'RaggedTile', [input, multiples]):\n        input = ragged_tensor.convert_to_tensor_or_ragged_tensor(input, name='input')\n        if not ragged_tensor.is_ragged(input):\n            return array_ops.tile(input, multiples, name)\n        multiples = ragged_util.convert_to_int_tensor(multiples, name='multiples', dtype=input.row_splits.dtype)\n        multiples.shape.assert_has_rank(1)\n        const_multiples = tensor_util.constant_value(multiples)\n        return ragged_tensor.RaggedTensor.from_nested_row_splits(_tile_ragged_values(input, multiples, const_multiples), _tile_ragged_splits(input, multiples, const_multiples), validate=False)",
    "docstring": "Constructs a by tiling a given . The values of are replicated times along the th dimension (for each dimension ). For every dimension in , the length of each output element in that dimension is the length of corresponding input element multiplied by . Args: input: A . multiples: A 1-D integer . Length must be the same as the number of dimensions in . name: A name for the operation (optional). Returns: A with the same type, rank, and ragged_rank as . #### Example: >>> rt = tf.ragged.constant([[1, 2], [3]]) >>> tf.tile(rt, [3, 2]).to_list() [[1, 2, 1, 2], [3, 3], [1, 2, 1, 2], [3, 3], [1, 2, 1, 2], [3, 3]]",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_array_ops.py",
    "ast_data": "FunctionDef name:tile arg:input arg:multiples arg:name arguments arg arg arg With Call Assign Call If Call Return return:yes Call Assign Call Call Assign Call Return return:yes Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "reset",
    "source_code": "def reset(self):\n    for key in self.to_dict():\n        setattr(self, key, mpl.rcParams[f'figure.subplot.{key}'])",
    "docstring": "Restore the subplot positioning parameters to the default rcParams values",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\gridspec.py",
    "ast_data": "FunctionDef name:reset arg:self arguments arg For Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_parallel_predict_regression",
    "source_code": "def _parallel_predict_regression(estimators, estimators_features, X, params):\n    return sum((estimator.predict(X[:, features], **params) for estimator, features in zip(estimators, estimators_features)))",
    "docstring": "Private function used to compute predictions within a job.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_bagging.py",
    "ast_data": "FunctionDef name:_parallel_predict_regression arg:estimators arg:estimators_features arg:X arg:params arguments arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "add_sys_path",
    "source_code": "def add_sys_path(self):\n    site_dir = str(self.site)\n    sys.path.insert(0, site_dir)\n    os.environ['PYTHONPATH'] = os.pathsep.join((site_dir, os.environ.get('PYTHONPATH', '')))",
    "docstring": "Add site dir to sys.path / PYTHONPATH",
    "type": "method",
    "file_path": "scipy\\dev.py",
    "ast_data": "FunctionDef name:add_sys_path arg:self arguments arg Assign Call Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "is_autocast_available",
    "source_code": "def is_autocast_available(device_type: str) -> bool:\n    return torch._C._is_autocast_available(device_type)",
    "docstring": "Return a bool indicating if autocast is available on :attr:. Args: device_type(str): Device type to use. Possible values are: 'cuda', 'cpu', 'mtia', 'maia', 'xpu', and so on. The type is the same as the attribute of a :class:. Thus, you may obtain the device type of a tensor using .",
    "type": "function",
    "file_path": "pytorch\\torch\\amp\\autocast_mode.py",
    "ast_data": "FunctionDef name:is_autocast_available arg:device_type arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "max",
    "source_code": "def max(self, y: Array | complex, /, copy: bool | None=None, xp: ModuleType | None=None) -> Array:\n    xp = array_namespace(self._x) if xp is None else xp\n    mxp = meta_namespace(self._x, xp=xp)\n    y = xp.asarray(y)\n    return self._op(_AtOp.MAX, mxp.maximum, mxp.maximum, y, copy=copy, xp=xp)",
    "docstring": "Apply `` and return the updated array.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_extra\\_lib\\_at.py",
    "ast_data": "FunctionDef name:max arg:copy arg:xp arguments arg arg arg arg Assign Compare Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_get_input_node_fqn",
    "source_code": "def _get_input_node_fqn(input_name: str, graph_signature: ExportGraphSignature) -> str:\n    if input_name in graph_signature.inputs_to_parameters:\n        return graph_signature.inputs_to_parameters[input_name]\n    elif input_name in graph_signature.inputs_to_buffers:\n        return graph_signature.inputs_to_buffers[input_name]\n    else:\n        raise ValueError(f'{input_name} not found in inputs_to_parameters or inputs_to_buffers')",
    "docstring": "Return the FQN of an input node.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\experimental\\_tp_transform.py",
    "ast_data": "FunctionDef name:_get_input_node_fqn arg:input_name arg:graph_signature arguments arg arg If Compare Return return:yes If Compare Return return:yes Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "get_profiles",
    "source_code": "def get_profiles(self, cmd):\n    if cmd not in self._views:\n        raise ValueError('No autoprofiler for command: {}, was run'.format(cmd))\n    return self._views[cmd]",
    "docstring": "Returns profiling results for each step at which was run. Args: cmd: string, profiling command used in an call. Returns: dict[int: (MultiGraphNodeProto | GraphNodeProto)]. Keys are steps at which the profiling command was run. Values are the outputs of profiling. For \"code\" and \"op\" commands this will be a , for \"scope\" and \"graph\" commands this will be a cmdadd_auto_profilingcmd`.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\profile_context.py",
    "ast_data": "FunctionDef name:get_profiles arg:self arg:cmd arguments arg arg If Compare Raise Call Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "generate_client_registration_info",
    "source_code": "def generate_client_registration_info(self, client, request):\n    raise NotImplementedError()",
    "docstring": "Generate ``` for RFC7592. By default this method returns the values sent in the current request. Developers MUST rewrite this method to return different registration information.:: def generate_client_registration_info(self, client, request):{ access_token = request.headers['Authorization'].split(' ')[1] return { 'registration_client_uri': request.uri, 'registration_access_token': access_token, } :param client: the instance of OAuth client :param request: formatted request instance",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc7592\\endpoint.py",
    "ast_data": "FunctionDef name:generate_client_registration_info arg:self arg:client arg:request arguments arg arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "ones_like_v2",
    "source_code": "@dispatch.dispatch_for_types(array_ops.ones_like_v2, StructuredTensor)\ndef ones_like_v2(input, dtype=None, name=None, layout=None):\n    if layout is not None and (not layout.is_fully_replicated()):\n        raise ValueError(f'StructuredTensor only allows replicated layout. got {layout}')\n    if dtype is None:\n        dtype = dtypes.float32\n    with ops.name_scope(name, 'ones_like', [input]) as name:\n        if not input.row_partitions:\n            if input.nrows() is not None:\n                return array_ops.ones([input.nrows()], dtype, layout=layout)\n            else:\n                return array_ops.ones([], dtype, layout=layout)\n        last_row_partition = input.row_partitions[-1]\n        result = ragged_tensor.RaggedTensor._from_nested_row_partitions(array_ops.ones(last_row_partition.nvals(), dtype=dtype), input.row_partitions)\n        return result",
    "docstring": "Replace every object with a zero. Example: >>> st = StructuredTensor.from_pyval([{\"x\":[3]}, {\"x\":[4,5]}]) >>> tf.ones_like(st) >>> st = StructuredTensor.from_pyval([[{\"x\":[3]}], [{\"x\":[4,5]}, {\"x\":[]}]]) >>> tf.ones_like(st, dtype=tf.int32) Args: input: a structured tensor. dtype: the dtype of the resulting zeros. (default is tf.float32) name: a name for the op. layout: Optional Layout. Only supports replicated layout. Returns: a tensor of zeros of the same shape.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_array_ops.py",
    "ast_data": "FunctionDef name:ones_like_v2 arg:input arg:dtype arg:name arg:layout arguments arg arg arg arg If BoolOp Compare Call Raise Call If Compare Assign With Call If If Compare Call Return return:yes Call Call Return return:yes Call Assign Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "square",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef square(x):\n    return math_ops.square(x)",
    "docstring": "Element-wise square. Args: x: Tensor or variable. Returns: A tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:square arg:x arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "executable",
    "source_code": "@property\ndef executable(self) -> Path:\n    assert self.is_venv()\n    if self._executable is None:\n        if WINDOWS:\n            executable = self.prefix / 'Scripts' / 'python.exe'\n        else:\n            executable = self.prefix / 'bin' / 'python'\n        assert executable.is_file() or executable.is_symlink()\n        assert os.access(executable, os.X_OK), f'{executable} is not executable'\n        self._executable = executable\n    return self._executable",
    "docstring": "Get the Python executable for the virtual environment.",
    "type": "method",
    "file_path": "pytorch\\tools\\nightly.py",
    "ast_data": "FunctionDef name:executable arg:self arguments arg Call If Compare If Assign Assign BoolOp Call Call Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "writeString",
    "source_code": "def writeString(self, encoding):\n    s = StringIO()\n    self.write(s, encoding)\n    return s.getvalue()",
    "docstring": "Return the feed in the given encoding as a string.",
    "type": "method",
    "file_path": "django\\django\\utils\\feedgenerator.py",
    "ast_data": "FunctionDef name:writeString arg:self arg:encoding arguments arg arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n    X = validate_data(self, X)\n    if self.assume_centered:\n        self.location_ = np.zeros(X.shape[1])\n    else:\n        self.location_ = X.mean(0)\n    covariance, shrinkage = _ledoit_wolf(X - self.location_, assume_centered=True, block_size=self.block_size)\n    self.shrinkage_ = shrinkage\n    self._set_covariance(covariance)\n    return self",
    "docstring": "Fit the Ledoit-Wolf shrunk covariance model to X. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns the instance itself.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\covariance\\_shrunk_covariance.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Assign Call If Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "capitalize",
    "source_code": "@set_module('numpy.strings')\n@array_function_dispatch(_unary_op_dispatcher)\ndef capitalize(a):\n    a_arr = np.asarray(a)\n    return _vec_string(a_arr, a_arr.dtype, 'capitalize')",
    "docstring": "Return a copy of `str.capitalize` dtype, depending on input types See Also -------- str.capitalize Examples -------- >>> import numpy as np >>> c = np.array(['a1b2','1b2a','b2a1','2a1b'],'S4'); c array(['a1b2', '1b2a', 'b2a1', '2a1b'], dtype='|S4') >>> np.strings.capitalize(c) array(['A1b2', '1b2a', 'B2a1', '2a1b'], dtype='|S4')",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\strings.py",
    "ast_data": "FunctionDef name:capitalize arg:a arguments arg Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "fill",
    "source_code": "@dispatch.dispatch_for_api(array_ops.fill)\ndef fill(dims: dynamic_ragged_shape.DynamicRaggedShape, value: core_types.TensorLike, name: Optional[str]=None, layout=None) -> ragged_tensor.RaggedOrDense:\n    if layout is not None and (not layout.is_fully_replicated()):\n        raise ValueError(f'RaggedTensor only allows replicated layout. got {layout}')\n    flat_values = array_ops.fill(dims.inner_shape, value, name=name, layout=layout)\n    return dims._add_row_partitions(flat_values)",
    "docstring": "Creates a tensor with shape and fills it with .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_array_ops.py",
    "ast_data": "FunctionDef name:fill arg:dims arg:value arg:name arg:layout arguments arg arg arg arg If BoolOp Compare Call Raise Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_sharded_op_common",
    "source_code": "def _sharded_op_common(op, early_stop_func, extra_check):\n\n    def decorator_sharded_func(wrapped_func):\n\n        @functools.wraps(wrapped_func)\n        def wrapper(types, args=(), kwargs=None, pg=None):\n            _basic_validation(op, args, kwargs)\n            st = args[0]\n            if kwargs is None:\n                kwargs = {}\n            if extra_check:\n                extra_check(*args, **kwargs)\n            if early_stop_func:\n                early_stop = early_stop_func(*args, **kwargs)\n                if early_stop:\n                    return st\n            return wrapped_func(types, args, kwargs, pg)\n        return wrapper\n    return decorator_sharded_func",
    "docstring": "Inject sharded tensor op registration with common logics executed before different behaviors are done on either local shards or a local tensor. Example:: >>> # xdoctest: +SKIP(\"Undefined variables\") >>> op = torch.transpose >>> @_sharded_op_impl(op) >>> @_sharded_op_common(op, early_stop_func, extra_check) >>> def sharded_tensor_op(types, args, kwargs, process_group): >>> ... >>> >>> st = sharded_tensor.rand(32, 16) >>> st.transpose(1, 2) >>> # This will call '_sharded_op_common' Args: op: The op to be registered and applied to all shards of the st. early_stop_func (Callable, optional): the func for early stop. Default: if ``, no extra check. Return: func (Callable): Torch function for which we want to provide a sharded implementation (ex: torch.transpose)",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\_ops\\_common.py",
    "ast_data": "FunctionDef name:_sharded_op_common arg:op arg:early_stop_func arg:extra_check arguments arg arg arg FunctionDef name:decorator_sharded_func arg:wrapped_func arguments arg FunctionDef name:wrapper arg:types arg:args arg:kwargs arg:pg arguments arg arg arg arg Call Assign If Compare Assign If Call If Assign Call If Return return:yes Return return:yes Call Call Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_picklable_class_constructor",
    "source_code": "def _picklable_class_constructor(mixin_class, fmt, attr_name, base_class):\n    factory = _make_class_factory(mixin_class, fmt, attr_name)\n    cls = factory(base_class)\n    return cls.__new__(cls)",
    "docstring": "Internal helper for _make_class_factory.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\cbook.py",
    "ast_data": "FunctionDef name:_picklable_class_constructor arg:mixin_class arg:fmt arg:attr_name arg:base_class arguments arg arg arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "extract",
    "source_code": "@array_function_dispatch(_extract_dispatcher)\ndef extract(condition, arr):\n    return _nx.take(ravel(arr), nonzero(ravel(condition))[0])",
    "docstring": "Return the elements of an array that satisfy some condition. This is equivalent to `conditionplaceextractarrconditionarrconditioncondition` is boolean: >>> arr[condition] array([0, 3, 6, 9])",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_function_base_impl.py",
    "ast_data": "FunctionDef name:extract arg:condition arg:arr arguments arg arg Return return:yes Call Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_process_parameters",
    "source_code": "def _process_parameters(self, dim):\n    if dim is None or not np.isscalar(dim) or dim < 0 or (dim != int(dim)):\n        raise ValueError('Dimension of rotation must be specified,and must be a scalar nonnegative integer.')\n    return dim",
    "docstring": "Dimension N must be specified; it cannot be inferred.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:_process_parameters arg:self arg:dim arguments arg arg If BoolOp Compare Call Compare Compare Call Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "drop_removed_buffers",
    "source_code": "def drop_removed_buffers(self, lines):\n    for i, line in enumerate(lines):\n        if isinstance(line, (AllocateLine, FreeIfNotReusedLine, ReuseLine)):\n            if line.node.get_name() in V.graph.removed_buffers:\n                lines[i] = NullLine(self.wrapper)",
    "docstring": "Replace any memory planning lines in V.graph.removed_buffers with NullLine",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\memory_planning.py",
    "ast_data": "FunctionDef name:drop_removed_buffers arg:self arg:lines arguments arg arg For Call If Call If Compare Call Assign Call"
  },
  {
    "library": "cherrypy",
    "name": "_make_content_disposition",
    "source_code": "def _make_content_disposition(disposition, file_name):\n    ascii_name = unicodedata.normalize('NFKC', file_name).encode('ascii', errors='ignore').decode()\n    header = '{}; filename=\"{}\"'.format(disposition, ascii_name)\n    if ascii_name != file_name:\n        quoted_name = urllib.parse.quote(file_name)\n        header += \"; filename*=UTF-8''{}\".format(quoted_name)\n    return header",
    "docstring": "Create HTTP header for downloading a file with a UTF-8 filename. This function implements the recommendations of :rfc:. See this and related answers:",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\static.py",
    "ast_data": "FunctionDef name:_make_content_disposition arg:disposition arg:file_name arguments arg arg Assign Call Call Call Assign Call If Compare Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ZipperIterDataPipe",
    "source_code": "@functional_datapipe('zip')\nclass ZipperIterDataPipe(IterDataPipe[tuple[_T_co]]):\n    datapipes: tuple[IterDataPipe]\n\n    def __init__(self, *datapipes: IterDataPipe):\n        if not all((isinstance(dp, IterDataPipe) for dp in datapipes)):\n            raise TypeError('All inputs are required to be `IterDataPipe` for `ZipIterDataPipe`.')\n        super().__init__()\n        self.datapipes = datapipes\n\n    def __iter__(self) -> Iterator[tuple[_T_co]]:\n        iterators = [iter(datapipe) for datapipe in self.datapipes]\n        yield from zip(*iterators)\n\n    def __len__(self) -> int:\n        if all((isinstance(dp, Sized) for dp in self.datapipes)):\n            return min((len(dp) for dp in self.datapipes))\n        else:\n            raise TypeError(f\"{type(self).__name__} instance doesn't have valid length\")",
    "docstring": "Aggregates elements into a tuple from each of the input DataPipes (functional name: ``). The output is stopped as soon as the shortest input DataPipe is exhausted. Args: *datapipes: Iterable DataPipes being aggregated Example: >>> # xdoctest: +REQUIRES(module:torchdata) >>> from torchdata.datapipes.iter import IterableWrapper >>> dp1, dp2, dp3 = IterableWrapper(range(5)), IterableWrapper(range(10, 15)), IterableWrapper(range(20, 25)) >>> list(dp1.zip(dp2, dp3)) [(0, 10, 20), (1, 11, 21), (2, 12, 22), (3, 13, 23), (4, 14, 24)]",
    "type": "class",
    "file_path": "pytorch\\torch\\utils\\data\\datapipes\\iter\\combining.py",
    "ast_data": "ClassDef name:ZipperIterDataPipe FunctionDef name:__init__ arg:self arguments arg arg If Call Call Raise Call Call Call Assign FunctionDef name:__iter__ arg:self arguments arg Assign Call Call FunctionDef name:__len__ arg:self arguments arg If Call Call Return return:yes Call Call Raise Call Call Call"
  },
  {
    "library": "scipy",
    "name": "proc_fpool_nog",
    "source_code": "def proc_fpool_nog(self):\n    for v in self.fpool:\n        self.compute_sfield(v)\n    self.fpool = set()",
    "docstring": "Process all field functions with no constraints supplied.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_shgo_lib\\_vertex.py",
    "ast_data": "FunctionDef name:proc_fpool_nog arg:self arguments arg For Call Assign Call"
  },
  {
    "library": "django",
    "name": "no_style",
    "source_code": "@functools.cache\ndef no_style():\n    return make_style('nocolor')",
    "docstring": "Return a Style object with no color scheme.",
    "type": "function",
    "file_path": "django\\django\\core\\management\\color.py",
    "ast_data": "FunctionDef name:no_style arguments Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "expect_partial",
    "source_code": "def expect_partial(self):\n    self._checkpoint.expect_partial = True\n    return self",
    "docstring": "Silence warnings about incomplete checkpoint restores.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:expect_partial arg:self arguments arg Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "LogScale",
    "source_code": "class LogScale(ScaleBase):\n    name = 'log'\n\n    def __init__(self, axis, *, base=10, subs=None, nonpositive='clip'):\n        self._transform = LogTransform(base, nonpositive)\n        self.subs = subs\n    base = property(lambda self: self._transform.base)\n\n    def set_default_locators_and_formatters(self, axis):\n        axis.set_major_locator(LogLocator(self.base))\n        axis.set_major_formatter(LogFormatterSciNotation(self.base))\n        axis.set_minor_locator(LogLocator(self.base, self.subs))\n        axis.set_minor_formatter(LogFormatterSciNotation(self.base, labelOnlyBase=self.subs is not None))\n\n    def get_transform(self):\n        return self._transform\n\n    def limit_range_for_scale(self, vmin, vmax, minpos):\n        if not np.isfinite(minpos):\n            minpos = 1e-300\n        return (minpos if vmin <= 0 else vmin, minpos if vmax <= 0 else vmax)",
    "docstring": "A standard logarithmic scale. Care is taken to only plot positive values.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\scale.py",
    "ast_data": "ClassDef name:LogScale Assign FunctionDef name:__init__ arg:self arg:axis arguments arg arg arg arg arg Assign Call Assign Assign Call arguments arg FunctionDef name:set_default_locators_and_formatters arg:self arg:axis arguments arg arg Call Call Call Call Call Call Call Call Compare FunctionDef name:get_transform arg:self arguments arg Return return:yes FunctionDef name:limit_range_for_scale arg:self arg:vmin arg:vmax arg:minpos arguments arg arg arg arg If Call Assign Return return:yes Compare Compare"
  },
  {
    "library": "pandas",
    "name": "is_leap_year",
    "source_code": "@property\ndef is_leap_year(self) -> npt.NDArray[np.bool_]:\n    return isleapyear_arr(np.asarray(self.year))",
    "docstring": "Logical indicating if the date belongs to a leap year. See Also -------- PeriodIndex.qyear : Fiscal year the Period lies in according to its starting-quarter. PeriodIndex.year : The year of the period. Examples -------- >>> idx = pd.PeriodIndex([\"2023\", \"2024\", \"2025\"], freq=\"Y\") >>> idx.is_leap_year array([False, True, False])",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\period.py",
    "ast_data": "FunctionDef name:is_leap_year arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_handle_multiprocessing",
    "source_code": "def _handle_multiprocessing(self, x, workers, use_multiprocessing, max_queue_size):\n    if workers > 1 or (workers > 0 and use_multiprocessing):\n\n        def generator_fn():\n            enqueuer = data_utils.GeneratorEnqueuer(x, use_multiprocessing=use_multiprocessing)\n            enqueuer.start(workers=workers, max_queue_size=max_queue_size)\n            return enqueuer.get()\n    else:\n        generator_fn = lambda: x\n    return generator_fn",
    "docstring": "Create a callable, possibly including an Enqueuer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\data_adapter.py",
    "ast_data": "FunctionDef name:_handle_multiprocessing arg:self arg:x arg:workers arg:use_multiprocessing arg:max_queue_size arguments arg arg arg arg arg If BoolOp Compare BoolOp Compare FunctionDef name:generator_fn arguments Assign Call Call Return return:yes Call Assign arguments Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "map_fn",
    "source_code": "@doc_controls.do_not_generate_docs\ndef map_fn(fn, elems, name=None, dtype=None):\n    return map_fn_lib.map_fn(fn, elems, name=name, dtype=dtype)",
    "docstring": "Map the function fn over the elements elems and return the outputs. Args: fn: Callable that will be called upon each element in elems elems: tensor name: A string name for the map node in the graph dtype: Output data type. Returns: Tensor with dtype .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:map_fn arg:fn arg:elems arg:name arg:dtype arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_validate_inferred_freq",
    "source_code": "def _validate_inferred_freq(freq: BaseOffset | None, inferred_freq: BaseOffset | None) -> BaseOffset | None:\n    if inferred_freq is not None:\n        if freq is not None and freq != inferred_freq:\n            raise ValueError(f'Inferred frequency {inferred_freq} from passed values does not conform to passed frequency {freq.freqstr}')\n        if freq is None:\n            freq = inferred_freq\n    return freq",
    "docstring": "If the user passes a freq and another freq is inferred from passed data, require that they match. Parameters ---------- freq : DateOffset or None inferred_freq : DateOffset or None Returns ------- freq : DateOffset or None",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py",
    "ast_data": "FunctionDef name:_validate_inferred_freq arg:freq arg:inferred_freq arguments arg arg If Compare If BoolOp Compare Compare Raise Call If Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "preprocess",
    "source_code": "def preprocess(self, root_node):\n    return (root_node, [], [])",
    "docstring": "Preprocess a parse tree. Return a preprocessed node, logs and errors.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\ast_edits.py",
    "ast_data": "FunctionDef name:preprocess arg:self arg:root_node arguments arg arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "hermline",
    "source_code": "def hermline(off, scl):\n    if scl != 0:\n        return np.array([off, scl / 2])\n    else:\n        return np.array([off])",
    "docstring": "Hermite series whose graph is a straight line. Parameters ---------- off, scl : scalars The specified line is given by ``. See Also -------- numpy.polynomial.polynomial.polyline numpy.polynomial.chebyshev.chebline numpy.polynomial.legendre.legline numpy.polynomial.laguerre.lagline numpy.polynomial.hermite_e.hermeline Examples -------- >>> from numpy.polynomial.hermite import hermline, hermval >>> hermval(0,hermline(3, 2)) 3.0 >>> hermval(1,hermline(3, 2)) 5.0",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\hermite.py",
    "ast_data": "FunctionDef name:hermline arg:off arg:scl arguments arg arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "call",
    "source_code": "def call(self, y_true, y_pred):\n    if tensor_util.is_tf_type(y_pred) and tensor_util.is_tf_type(y_true):\n        y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(y_pred, y_true)\n    ag_fn = autograph.tf_convert(self.fn, ag_ctx.control_status_ctx())\n    return ag_fn(y_true, y_pred, **self._fn_kwargs)",
    "docstring": "Invokes the instance. Args: y_true: Ground truth values. y_pred: The predicted values. Returns: Loss values per sample.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py",
    "ast_data": "FunctionDef name:call arg:self arg:y_true arg:y_pred arguments arg arg arg If BoolOp Call Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "mask_zero_div_zero",
    "source_code": "def mask_zero_div_zero(x, y, result: np.ndarray) -> np.ndarray:\n    if not hasattr(y, 'dtype'):\n        y = np.array(y)\n    if not hasattr(x, 'dtype'):\n        x = np.array(x)\n    zmask = y == 0\n    if zmask.any():\n        zneg_mask = zmask & np.signbit(y)\n        zpos_mask = zmask & ~zneg_mask\n        x_lt0 = x < 0\n        x_gt0 = x > 0\n        nan_mask = zmask & (x == 0)\n        neginf_mask = zpos_mask & x_lt0 | zneg_mask & x_gt0\n        posinf_mask = zpos_mask & x_gt0 | zneg_mask & x_lt0\n        if nan_mask.any() or neginf_mask.any() or posinf_mask.any():\n            result = result.astype('float64', copy=False)\n            result[nan_mask] = np.nan\n            result[posinf_mask] = np.inf\n            result[neginf_mask] = -np.inf\n    return result",
    "docstring": "Set results of 0 // 0 to np.nan, regardless of the dtypes of the numerator or the denominator. Parameters ---------- x : ndarray y : ndarray result : ndarray Returns ------- ndarray The filled result. Examples -------- >>> x = np.array([1, 0, -1], dtype=np.int64) >>> x array([ 1, 0, -1]) >>> y = 0 # int 0; numpy behavior is different with float >>> result = x // y >>> result # raw numpy result does not fill division by zero array([0, 0, 0]) >>> mask_zero_div_zero(x, y, result) array([ inf, nan, -inf])",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\ops\\missing.py",
    "ast_data": "FunctionDef name:mask_zero_div_zero arg:x arg:y arg:result arguments arg arg arg If Call Assign Call If Call Assign Call Assign Compare If Call Assign Call Assign Assign Compare Assign Compare Assign Compare Assign Assign If BoolOp Call Call Call Assign Call Assign Assign Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "Message",
    "source_code": "class Message:\n\n    def __init__(self, level, message, extra_tags=None):\n        self.level = int(level)\n        self.message = message\n        self.extra_tags = extra_tags\n\n    def _prepare(self):\n        self.message = str(self.message)\n        self.extra_tags = str(self.extra_tags) if self.extra_tags is not None else None\n\n    def __eq__(self, other):\n        if not isinstance(other, Message):\n            return NotImplemented\n        return self.level == other.level and self.message == other.message\n\n    def __str__(self):\n        return str(self.message)\n\n    def __repr__(self):\n        extra_tags = f', extra_tags={self.extra_tags!r}' if self.extra_tags else ''\n        return f'Message(level={self.level}, message={self.message!r}{extra_tags})'\n\n    @property\n    def tags(self):\n        return ' '.join((tag for tag in [self.extra_tags, self.level_tag] if tag))\n\n    @property\n    def level_tag(self):\n        return LEVEL_TAGS.get(self.level, '')",
    "docstring": "Represent an actual message that can be stored in any of the supported storage classes (typically session- or cookie-based) and rendered in a view or template.",
    "type": "class",
    "file_path": "django\\django\\contrib\\messages\\storage\\base.py",
    "ast_data": "ClassDef name:Message FunctionDef name:__init__ arg:self arg:level arg:message arg:extra_tags arguments arg arg arg arg Assign Call Assign Assign FunctionDef name:_prepare arg:self arguments arg Assign Call Assign Compare Call FunctionDef name:__eq__ arg:self arg:other arguments arg arg If Call Return return:yes Return return:yes BoolOp Compare Compare FunctionDef name:__str__ arg:self arguments arg Return return:yes Call FunctionDef name:__repr__ arg:self arguments arg Assign Return return:yes FunctionDef name:tags arg:self arguments arg Return return:yes Call FunctionDef name:level_tag arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, offset=(2, -2), shadow_rgbFace=None, alpha=None, rho=0.3, **kwargs):\n    super().__init__(offset)\n    if shadow_rgbFace is None:\n        self._shadow_rgbFace = shadow_rgbFace\n    else:\n        self._shadow_rgbFace = mcolors.to_rgba(shadow_rgbFace)\n    if alpha is None:\n        alpha = 0.3\n    self._alpha = alpha\n    self._rho = rho\n    self._gc = kwargs",
    "docstring": "Parameters ---------- offset : (float, float), default: (2, -2) The (x, y) offset of the shadow in points. shadow_rgbFace : :mpltype: The shadow color. alpha : float, default: 0.3 The alpha transparency of the created shadow patch. rho : float, default: 0.3 A scale factor to apply to the rgbFace color if *shadow_rgbFace* is not specified. **kwargs Extra keywords are stored and passed through to :meth:.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patheffects.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:offset arg:shadow_rgbFace arg:alpha arg:rho arguments arg arg arg arg arg arg Call Call If Compare Assign Assign Call If Compare Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_apply_inlining",
    "source_code": "def _apply_inlining(func):\n    graph_def = func.graph.as_graph_def()\n    for function in graph_def.library.function:\n        if 'api_implements' in function.attr:\n            del function.attr['api_implements']\n    meta_graph = saver.export_meta_graph(graph_def=graph_def, graph=func.graph)\n    for name in ['variables', 'model_variables', 'trainable_variables', 'local_variables']:\n        raw_list = []\n        for raw in meta_graph.collection_def['variables'].bytes_list.value:\n            variable = variable_pb2.VariableDef()\n            variable.ParseFromString(raw)\n            variable.ClearField('initializer_name')\n            raw_list.append(variable.SerializeToString())\n        meta_graph.collection_def[name].bytes_list.value[:] = raw_list\n    fetch_collection = meta_graph_pb2.CollectionDef()\n    for array in func.inputs + func.outputs:\n        fetch_collection.node_list.value.append(array.name)\n    meta_graph.collection_def['train_op'].CopyFrom(fetch_collection)\n    config = config_pb2.ConfigProto()\n    rewrite_options = config.graph_options.rewrite_options\n    rewrite_options.min_graph_nodes = -1\n    rewrite_options.optimizers.append('function')\n    new_graph_def = tf_optimizer.OptimizeGraph(config, meta_graph)\n    return new_graph_def",
    "docstring": "Apply an inlining optimization to the function's graph definition.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\tensorrt\\trt_convert.py",
    "ast_data": "FunctionDef name:_apply_inlining arg:func arguments arg Assign Call For If Compare Assign Call For Assign For Assign Call Call Call Call Call Assign Assign Call For Call Call Assign Call Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "with_name_scope",
    "source_code": "@classmethod\ndef with_name_scope(cls, method):\n\n    def method_with_name_scope(self, *args, **kwargs):\n        with self.name_scope:\n            return method(self, *args, **kwargs)\n    return tf_decorator.make_decorator(method, method_with_name_scope)",
    "docstring": "Decorator to automatically enter the module name scope. >>> class MyModule(tf.Module): ... @tf.Module.with_name_scope ... def __call__(self, x): ... if not hasattr(self, 'w'): ... self.w = tf.Variable(tf.random.normal([x.shape[1], 3])) ... return tf.matmul(x, self.w) Using the above module would produce s and s whose names included the module name: >>> mod = MyModule() >>> mod(tf.ones([1, 2])) >>> mod.w Args: method: The method to wrap. Returns: The original method wrapped such that it enters the module's name scope.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\module\\module.py",
    "ast_data": "FunctionDef name:with_name_scope arg:cls arg:method arguments arg arg FunctionDef name:method_with_name_scope arg:self arguments arg arg arg With Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "setitem_datetimelike_compat",
    "source_code": "def setitem_datetimelike_compat(values: np.ndarray, num_set: int, other):\n    if values.dtype == object:\n        dtype, _ = infer_dtype_from(other)\n        if lib.is_np_dtype(dtype, 'mM'):\n            if not is_list_like(other):\n                other = [other] * num_set\n            else:\n                other = list(other)\n    return other",
    "docstring": "Parameters ---------- values : np.ndarray num_set : int For putmask, this is mask.sum() other : Any",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\array_algos\\putmask.py",
    "ast_data": "FunctionDef name:setitem_datetimelike_compat arg:values arg:num_set arg:other arguments arg arg arg If Compare Assign Call If Call If Call Assign Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_maybe_to_slice",
    "source_code": "def _maybe_to_slice(loc):\n    if not isinstance(loc, np.ndarray) or loc.dtype != np.intp:\n        return loc\n    loc = lib.maybe_indices_to_slice(loc, len(self))\n    if isinstance(loc, slice):\n        return loc\n    mask = np.empty(len(self), dtype='bool')\n    mask.fill(False)\n    mask[loc] = True\n    return mask",
    "docstring": "convert integer indexer to boolean mask or slice if possible",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "FunctionDef name:_maybe_to_slice arg:loc arguments arg If BoolOp Call Compare Return return:yes Assign Call Call If Call Return return:yes Assign Call Call Call Assign Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "path2doc",
    "source_code": "def path2doc(self, filename: str | os.PathLike[str]) -> str | None:\n    return self.project.path2doc(filename)",
    "docstring": "Return the docname for the filename if the file is document. *filename* should be absolute or relative to the source directory.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\environment\\__init__.py",
    "ast_data": "FunctionDef name:path2doc arg:self arg:filename arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "is_subtype_of",
    "source_code": "def is_subtype_of(self, other: 'Parameter') -> bool:\n    if not self.type_constraint or not other.type_constraint:\n        raise TypeError('Can not determine relationship between partially specified types.')\n    if (self.name, self.kind, self.optional) != (other.name, other.kind, other.optional):\n        return False\n    return self.type_constraint.is_subtype_of(other.type_constraint)",
    "docstring": "Returns True if self is a supertype of other Parameter.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_type.py",
    "ast_data": "FunctionDef name:is_subtype_of arg:self arg:other arguments arg arg If BoolOp Raise Call If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "register_rendezvous_handler",
    "source_code": "def register_rendezvous_handler(scheme, handler):\n    global _rendezvous_handlers\n    if scheme in _rendezvous_handlers:\n        raise RuntimeError(f'Rendezvous handler for {scheme}:// already registered')\n    _rendezvous_handlers[scheme] = handler",
    "docstring": "Register a new rendezvous handler. Before we can run collective algorithms, participating processes need to find each other and exchange information to be able to communicate. We call this process rendezvous. The outcome of the rendezvous process is a triplet containing a shared key/value store, the rank of the process, and the total number of participating processes. If none of the bundled rendezvous methods apply to your execution environment you can opt to register your own rendezvous handler. Pick a unique name and use the URL scheme to identify it when calling the function. Args: scheme (str): URL scheme to identify your rendezvous handler. handler (function): Handler that is invoked when the function is called with a URL that uses the corresponding scheme. It must be a generator function that yields the triplet.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\rendezvous.py",
    "ast_data": "FunctionDef name:register_rendezvous_handler arg:scheme arg:handler arguments arg arg If Compare Raise Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "fn_input_signature",
    "source_code": "@property\ndef fn_input_signature(self):\n    if self._has_kwargs:\n        return None\n    if None in nest.flatten(self._input_signature):\n        return None\n    return self._input_signature",
    "docstring": "Returns input signature for the wrapped layer call function.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\save_impl.py",
    "ast_data": "FunctionDef name:fn_input_signature arg:self arguments arg If Return return:no If Compare Call Return return:no Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_UnstableArchMixin",
    "source_code": "class _UnstableArchMixin:\n\n    def __sklearn_tags__(self):\n        tags = super().__sklearn_tags__()\n        tags.non_deterministic = _IS_32BIT or platform.machine().startswith(('ppc', 'powerpc'))\n        return tags",
    "docstring": "Mark estimators that are non-determinstic on 32bit or PowerPC",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\base.py",
    "ast_data": "ClassDef name:_UnstableArchMixin FunctionDef name:__sklearn_tags__ arg:self arguments arg Assign Call Call Assign BoolOp Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "debug_lines",
    "source_code": "def debug_lines(self, inp: FxGraphHashDetails) -> list[str]:\n\n    def get_str(obj: Any) -> str:\n        if isinstance(obj, torch.Tensor):\n            return str(extract_tensor_metadata_for_cache_key(obj))\n        elif isinstance(obj, bytes):\n            return '<bytes>'\n        elif type(obj) in self.dispatch_table:\n            return str(self.dispatch_table[type(obj)](obj)[1])\n        else:\n            return str(obj)\n    lines = []\n    for attr, obj in vars(inp).items():\n        if isinstance(obj, list):\n            for ii in range(len(obj)):\n                h = self.get_hash(obj[ii])\n                lines.append(f'[{h}] {attr}[{ii}]: {get_str(obj[ii])}')\n        elif isinstance(obj, dict):\n            for k, v in obj.items():\n                h = self.get_hash(v)\n                lines.append(f'[{h}] {attr}[{k}]: {get_str(v)}')\n        else:\n            h = self.get_hash(obj)\n            lines.append(f'[{h}] {attr}: {get_str(obj)}')\n    return lines",
    "docstring": "Get a printable string describing in more detail all the attributes comprising an object. Useful for debugging when one graph hashes to a different value than another.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codecache.py",
    "ast_data": "FunctionDef name:debug_lines arg:self arg:inp arguments arg arg FunctionDef name:get_str arg:obj arguments arg If Call Return return:yes Call Call If Call Return return:yes If Compare Call Return return:yes Call Call Call Return return:yes Call Assign For Call Call If Call For Call Call Assign Call Call Call If Call For Call Assign Call Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "matvec",
    "source_code": "def matvec(self, x, adjoint=False, name='matvec'):\n    with self._name_scope(name):\n        x = tensor_conversion.convert_to_tensor_v2_with_dispatch(x, name='x')\n        self._check_input_dtype(x)\n        self_dim = -2 if adjoint else -1\n        tensor_shape.dimension_at_index(self.shape, self_dim).assert_is_compatible_with(x.shape[-1])\n        return self._matvec(x, adjoint=adjoint)",
    "docstring": "Transform [batch] vector with left multiplication: . Args: x: with compatible shape and same as . is treated as a [batch] vector meaning for every set of leading dimensions, the last dimension defines a vector. See class docstring for definition of compatibility. adjoint: Python . If , left multiply by the adjoint: . name: A name for this . Returns: A with shape and same as .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "FunctionDef name:matvec arg:self arg:x arg:adjoint arg:name arguments arg arg arg arg With Call Assign Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "enable_mlir_bridge",
    "source_code": "@tf_export('config.experimental.enable_mlir_bridge')\ndef enable_mlir_bridge():\n    context.context().enable_mlir_bridge = True",
    "docstring": "Enables experimental MLIR-Based TensorFlow Compiler Bridge. TensorFlow Compiler Bridge (TF Bridge) is responsible for translating parts of TensorFlow graph into a form that can be accepted as an input by a backend compiler such as XLA.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\config.py",
    "ast_data": "FunctionDef name:enable_mlir_bridge arguments Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "clone",
    "source_code": "def clone(self):\n    _warn_typed_storage_removal()\n    return self._new_wrapped_storage(self._untyped_storage.clone())",
    "docstring": "Return a copy of this storage.",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:clone arg:self arguments arg Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "len",
    "source_code": "def len(self) -> Series:\n    from pandas import Series\n    value_lengths = pc.list_value_length(self._pa_array)\n    return Series(value_lengths, dtype=ArrowDtype(value_lengths.type), index=self._data.index, name=self._data.name)",
    "docstring": "Return the length of each list in the Series. Returns ------- pandas.Series The length of each list. See Also -------- str.len : Python built-in function returning the length of an object. Series.size : Returns the length of the Series. StringMethods.len : Compute the length of each element in the Series/Index. Examples -------- >>> import pyarrow as pa >>> s = pd.Series( ... [ ... [1, 2, 3], ... [3], ... ], ... dtype=pd.ArrowDtype(pa.list_(pa.int64())), ... ) >>> s.list.len() 0 3 1 1 dtype: int32[pyarrow]",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\arrow\\accessors.py",
    "ast_data": "FunctionDef name:len arg:self arguments arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "filter_symbols",
    "source_code": "def filter_symbols(symbols: OrderedSet[sympy.Symbol]) -> OrderedSet[sympy.Symbol]:\n    return OrderedSet((s for s in symbols if symbol_is_type(s, (SymT.SIZE, SymT.FLOAT, SymT.UNBACKED_INT, SymT.UNBACKED_FLOAT))))",
    "docstring": "Filters a set of symbols that are required for codegen. Skip symbols that are always internal to kernels, such as SymT.TMP, SymT.INDEX, and SymT.R0_INDEX.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:filter_symbols arg:symbols arguments arg Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "lagmulx",
    "source_code": "def lagmulx(c):\n    [c] = pu.as_series([c])\n    if len(c) == 1 and c[0] == 0:\n        return c\n    prd = np.empty(len(c) + 1, dtype=c.dtype)\n    prd[0] = c[0]\n    prd[1] = -c[0]\n    for i in range(1, len(c)):\n        prd[i + 1] = -c[i] * (i + 1)\n        prd[i] += c[i] * (2 * i + 1)\n        prd[i - 1] -= c[i] * i\n    return prd",
    "docstring": "Multiply a Laguerre series by x. Multiply the Laguerre series by x, where x is the independent variable. Parameters ---------- c : array_like 1-D array of Laguerre series coefficients ordered from low to high. Returns ------- out : ndarray Array representing the result of the multiplication. See Also -------- lagadd, lagsub, lagmul, lagdiv, lagpow Notes ----- The multiplication uses the recursion relationship for Laguerre polynomials in the form .. math:: xP_i(x) = (-(i + 1)*P_{i + 1}(x) + (2i + 1)P_{i}(x) - iP_{i - 1}(x)) Examples -------- >>> from numpy.polynomial.laguerre import lagmulx >>> lagmulx([1, 2, 3]) array([-1., -1., 11., -9.])",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\laguerre.py",
    "ast_data": "FunctionDef name:lagmulx arg:c arguments arg Assign Call If BoolOp Compare Call Compare Return return:yes Assign Call Call Assign Assign For Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "iterate_inbound",
    "source_code": "def iterate_inbound(self):\n    for kt in self.keras_inputs:\n        keras_history = kt._keras_history\n        layer = keras_history.layer\n        node_index = keras_history.node_index\n        tensor_index = keras_history.tensor_index\n        yield (layer, node_index, tensor_index, kt)",
    "docstring": "Yields tuples representing the data inbound from other nodes. Yields: tuples like: (inbound_layer, node_index, tensor_index, tensor).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\node.py",
    "ast_data": "FunctionDef name:iterate_inbound arg:self arguments arg For Assign Assign Assign Assign"
  },
  {
    "library": "scikit-learn",
    "name": "_reset",
    "source_code": "def _reset(self):\n    if hasattr(self, 'scale_'):\n        del self.scale_\n        del self.min_\n        del self.n_samples_seen_\n        del self.data_min_\n        del self.data_max_\n        del self.data_range_",
    "docstring": "Reset internal data-dependent state of the scaler, if necessary. __init__ parameters are not touched.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py",
    "ast_data": "FunctionDef name:_reset arg:self arguments arg If Call"
  },
  {
    "library": "pandas",
    "name": "_parse_subtype",
    "source_code": "@staticmethod\ndef _parse_subtype(dtype: str) -> tuple[str, bool]:\n    xpr = re.compile('Sparse\\\\[(?P<subtype>[^,]*)(, )?(?P<fill_value>.*?)?\\\\]$')\n    m = xpr.match(dtype)\n    has_fill_value = False\n    if m:\n        subtype = m.groupdict()['subtype']\n        has_fill_value = bool(m.groupdict()['fill_value'])\n    elif dtype == 'Sparse':\n        subtype = 'float64'\n    else:\n        raise ValueError(f'Cannot parse {dtype}')\n    return (subtype, has_fill_value)",
    "docstring": "Parse a string to get the subtype Parameters ---------- dtype : str A string like * Sparse[subtype] * Sparse[subtype, fill_value] Returns ------- subtype : str Raises ------ ValueError When the subtype cannot be extracted.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py",
    "ast_data": "FunctionDef name:_parse_subtype arg:dtype arguments arg Assign Call Assign Call Assign If Assign Call Assign Call Call If Compare Assign Raise Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "authenticate_user",
    "source_code": "def authenticate_user(self, authorization_code):\n    raise NotImplementedError()",
    "docstring": "Authenticate the user related to this authorization_code. Developers MUST implement this method in subclass, e.g.:: def authenticate_user(self, authorization_code): return User.get(authorization_code.user_id) :param authorization_code: AuthorizationCode object :return: user",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\grants\\authorization_code.py",
    "ast_data": "FunctionDef name:authenticate_user arg:self arg:authorization_code arguments arg arg Raise Call"
  },
  {
    "library": "sphinx",
    "name": "get_redirect_target",
    "source_code": "def get_redirect_target(self, resp: requests.Response) -> str | None:\n    if resp.is_redirect:\n        destination = urljoin(resp.url, resp.headers['location'])\n        if any((pat.match(destination) for pat in self._ignored_redirects)):\n            raise _IgnoredRedirection(destination=destination, status_code=resp.status_code)\n    return super().get_redirect_target(resp)",
    "docstring": "Overrides the default requests.Session.get_redirect_target",
    "type": "method",
    "file_path": "sphinx\\sphinx\\util\\requests.py",
    "ast_data": "FunctionDef name:get_redirect_target arg:self arg:resp arguments arg arg If Assign Call If Call Call Raise Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_DerivedBivariateSpline",
    "source_code": "class _DerivedBivariateSpline(_BivariateSplineBase):\n    _invalid_why = 'is unavailable, because _DerivedBivariateSpline instance is not constructed from data that are to be interpolated or smoothed, but derived from the underlying knots and coefficients of another spline object'\n\n    @property\n    def fp(self):\n        raise AttributeError(f'attribute \"fp\" {self._invalid_why}')\n\n    def get_residual(self):\n        raise AttributeError(f'method \"get_residual\" {self._invalid_why}')",
    "docstring": "Bivariate spline constructed from the coefficients and knots of another spline. Notes ----- The class is not meant to be instantiated directly from the data to be interpolated or smoothed. As a result, its `` is raised when they are accessed. The other inherited attributes can be used as usual.",
    "type": "class",
    "file_path": "scipy\\scipy\\interpolate\\_fitpack2.py",
    "ast_data": "ClassDef name:_DerivedBivariateSpline Assign FunctionDef name:fp arg:self arguments arg Raise Call FunctionDef name:get_residual arg:self arguments arg Raise Call"
  },
  {
    "library": "django",
    "name": "get_accessed_time",
    "source_code": "def get_accessed_time(self, name):\n    raise NotImplementedError('subclasses of Storage must provide a get_accessed_time() method')",
    "docstring": "Return the last accessed time (as a datetime) of the file specified by name. The datetime will be timezone-aware if USE_TZ=True.",
    "type": "method",
    "file_path": "django\\django\\core\\files\\storage\\base.py",
    "ast_data": "FunctionDef name:get_accessed_time arg:self arg:name arguments arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "DropoutWrapper",
    "source_code": "@tf_export(v1=['nn.rnn_cell.DropoutWrapper'])\nclass DropoutWrapper(rnn_cell_wrapper_impl.DropoutWrapperBase, _RNNCellWrapperV1):\n\n    def __init__(self, *args, **kwargs):\n        super(DropoutWrapper, self).__init__(*args, **kwargs)\n    __init__.__doc__ = rnn_cell_wrapper_impl.DropoutWrapperBase.__init__.__doc__",
    "docstring": "Operator adding dropout to inputs and outputs of the given cell.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\legacy_rnn\\rnn_cell_impl.py",
    "ast_data": "ClassDef name:DropoutWrapper FunctionDef name:__init__ arg:self arguments arg arg arg Call Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_nothing",
    "source_code": "def _nothing():\n    return constant_op.constant(False)",
    "docstring": "Convenient else branch for when summaries do not record.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "FunctionDef name:_nothing arguments Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_filterrad",
    "source_code": "def get_filterrad(self):\n    return self._filterrad",
    "docstring": "Return the filterrad setting.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\image.py",
    "ast_data": "FunctionDef name:get_filterrad arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "__call__",
    "source_code": "def __call__(self, mean=None, rowcov=1, colcov=1, seed=None):\n    return matrix_normal_frozen(mean, rowcov, colcov, seed=seed)",
    "docstring": "Create a frozen matrix normal distribution. See for more information.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:mean arg:rowcov arg:colcov arg:seed arguments arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_unflatten_optim_state",
    "source_code": "def _unflatten_optim_state(fsdp_param_info: FSDPParamInfo, flat_param_state: dict[str, Any], to_save: bool, shard_state: bool, cpu_offload: bool) -> list[dict[str, Any]]:\n    assert not shard_state or to_save, 'If ``shard_state`` is True, ``to_save`` has to be True.'\n    consolidated_state = _communicate_optim_state(fsdp_param_info, flat_param_state)\n    if to_save:\n        unflat_param_state = _unflatten_communicated_optim_state(fsdp_param_info, consolidated_state, shard_state)\n        for optim_state in unflat_param_state:\n            if cpu_offload:\n                for key in list(optim_state.keys()):\n                    state = optim_state[key]\n                    if not isinstance(state, torch.Tensor):\n                        continue\n                    optim_state[key] = state.cpu()\n        return unflat_param_state\n    else:\n        return []",
    "docstring": "Unflattens the optimizer state, consisting of the \"state\" part and the \"param_groups\" part. Unflattening the \"state\" part involves consolidating the state on the target rank and remapping from flattened to unflattened parameter IDs, and the \"param_groups\" part only involves remapping from flattened to unflattened parameter IDs. Args: fsdp_param_info (FSDPParamInfo): The FSDP state, the handle, and a mapping from FQN to original parameter index. flat_param_state (Dict[str, Any]): Entry for the flat parameter in the \"state\" part of the optimizer state dict. to_save (bool): Whether to save the state on this rank. Returns: List[Dict[str, Any]]: A :class: holding the entries in the \"state\" part of the optimizer state dict corresponding to the unflattened parameters comprising the flat parameter if on the target rank or an empty :class: otherwise. The final optimizer state dict will need to map these entries using the proper unflattened parameter IDs.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_optim_utils.py",
    "ast_data": "FunctionDef name:_unflatten_optim_state arg:fsdp_param_info arg:flat_param_state arg:to_save arg:shard_state arg:cpu_offload arguments arg arg arg arg arg BoolOp Assign Call If Assign Call For If For Call Call Assign If Call Assign Call Return return:yes Return return:no"
  },
  {
    "library": "matplotlib",
    "name": "get_subplotspec",
    "source_code": "def get_subplotspec(self):\n    return self._subplotspec",
    "docstring": "Return the associated with the subplot, or None.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:get_subplotspec arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "BgrToGrayscale",
    "source_code": "class BgrToGrayscale(Module):\n    ONNX_DEFAULT_INPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n    ONNX_DEFAULT_OUTPUTSHAPE: ClassVar[list[int]] = [-1, 1, -1, -1]\n\n    def forward(self, image: Tensor) -> Tensor:\n        return bgr_to_grayscale(image)",
    "docstring": "Module to convert a BGR image to grayscale version of image. The image data is assumed to be in the range of (0, 1). First flips to RGB, then converts. Shape: - image: :math: - output: :math: reference: Example: >>> input = torch.rand(2, 3, 4, 5) >>> gray = BgrToGrayscale() >>> output = gray(input) # 2x1x4x5",
    "type": "class",
    "file_path": "kornia\\kornia\\color\\gray.py",
    "ast_data": "ClassDef name:BgrToGrayscale FunctionDef name:forward arg:self arg:image arguments arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "prefetch_related",
    "source_code": "def prefetch_related(self, *lookups):\n    self._not_support_combined_queries('prefetch_related')\n    clone = self._chain()\n    if lookups == (None,):\n        clone._prefetch_related_lookups = ()\n    else:\n        for lookup in lookups:\n            if isinstance(lookup, Prefetch):\n                lookup = lookup.prefetch_to\n            lookup = lookup.split(LOOKUP_SEP, 1)[0]\n            if lookup in self.query._filtered_relations:\n                raise ValueError('prefetch_related() is not supported with FilteredRelation.')\n        clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups\n    return clone",
    "docstring": "Return a new QuerySet instance that will prefetch the specified Many-To-One and Many-To-Many related objects when the QuerySet is evaluated. When prefetch_related() is called more than once, append to the list of prefetch lookups. If prefetch_related(None) is called, clear the list.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:prefetch_related arg:self arguments arg arg Call Assign Call If Compare Assign For If Call Assign Assign Call If Compare Raise Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "device",
    "source_code": "class device:\n\n    def __init__(self, device: Any):\n        self.idx = _get_device_index(device, optional=True)\n        self.prev_idx = -1\n\n    def __enter__(self):\n        self.prev_idx = torch._C._accelerator_hooks_maybe_exchange_device(self.idx)\n\n    def __exit__(self, type: Any, value: Any, traceback: Any):\n        self.idx = torch._C._accelerator_hooks_maybe_exchange_device(self.prev_idx)\n        return False",
    "docstring": "Context-manager that changes the selected device. Args: device (torch.device or int): device index to select. It's a no-op if this argument is a negative integer or ``.",
    "type": "class",
    "file_path": "pytorch\\torch\\mtia\\__init__.py",
    "ast_data": "ClassDef name:device FunctionDef name:__init__ arg:self arg:device arguments arg arg Assign Call Assign FunctionDef name:__enter__ arg:self arguments arg Assign Call FunctionDef name:__exit__ arg:self arg:type arg:value arg:traceback arguments arg arg arg arg Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "fuse",
    "source_code": "def fuse(self, node1: BaseSchedulerNode, node2: BaseSchedulerNode) -> FusedSchedulerNode:\n    if node1.is_foreach() or node2.is_foreach():\n        return ForeachKernelSchedulerNode.fuse(node1, node2)\n    else:\n        return FusedSchedulerNode.fuse(node1, node2)",
    "docstring": "Fuse two nodes",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:fuse arg:self arg:node1 arg:node2 arguments arg arg arg If BoolOp Call Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "register_fsdp",
    "source_code": "@abstractmethod\ndef register_fsdp(self, fsdp: FullyShardedDataParallel) -> None:\n    raise NotImplementedError(f'{self.__class__.__name__} does not support overlapped FSDP.')",
    "docstring": "Registers the overlapped optimizer with FSDP.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\_optimizer_overlap\\optimizer_overlap.py",
    "ast_data": "FunctionDef name:register_fsdp arg:self arg:fsdp arguments arg arg Raise Call"
  },
  {
    "library": "django",
    "name": "get_min_num",
    "source_code": "def get_min_num(self, request, obj=None, **kwargs):\n    return self.min_num",
    "docstring": "Hook for customizing the min number of inline forms.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:get_min_num arg:self arg:request arg:obj arguments arg arg arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "server_def",
    "source_code": "@property\ndef server_def(self):\n    return self._server_def",
    "docstring": "Returns the for this server. Returns: A protocol buffer that describes the configuration of this server.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\server_lib.py",
    "ast_data": "FunctionDef name:server_def arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "evaluate",
    "source_code": "def evaluate(self, env, engine: str, parser, term_type, eval_in_python):\n    if engine == 'python':\n        res = self(env)\n    else:\n        left = self.lhs.evaluate(env, engine=engine, parser=parser, term_type=term_type, eval_in_python=eval_in_python)\n        right = self.rhs.evaluate(env, engine=engine, parser=parser, term_type=term_type, eval_in_python=eval_in_python)\n        if self.op in eval_in_python:\n            res = self.func(left.value, right.value)\n        else:\n            from pandas.core.computation.eval import eval\n            res = eval(self, local_dict=env, engine=engine, parser=parser)\n    name = env.add_tmp(res)\n    return term_type(name, env=env)",
    "docstring": "Evaluate a binary operation *before* being passed to the engine. Parameters ---------- env : Scope engine : str parser : str term_type : type eval_in_python : list Returns ------- term_type The \"pre-evaluated\" expression as an instance of ``",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\computation\\ops.py",
    "ast_data": "FunctionDef name:evaluate arg:self arg:env arg:engine arg:parser arg:term_type arg:eval_in_python arguments arg arg arg arg arg arg If Compare Assign Call Assign Call Assign Call If Compare Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "run",
    "source_code": "@tf_export(v1=['app.run'])\ndef run(main=None, argv=None):\n    main = main or _sys.modules['__main__'].main\n    _run(main=main, argv=argv, flags_parser=_parse_flags_tolerate_undef)",
    "docstring": "Runs the program with an optional 'main' function and 'argv' list.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\platform\\app.py",
    "ast_data": "FunctionDef name:run arg:main arg:argv arguments arg arg Assign BoolOp Call Call"
  },
  {
    "library": "pytorch",
    "name": "layer_norm_functional",
    "source_code": "@register_inference_rule(torch.nn.functional.layer_norm)\ndef layer_norm_functional(n: Node, symbols, constraints, counter):\n    assert isinstance(n.args[0], Node)\n    return gen_layer_norm_constraints(n, n.args[1], symbols, counter)",
    "docstring": "We generate the constraint: input = output",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_generator.py",
    "ast_data": "FunctionDef name:layer_norm_functional arg:n arg:symbols arg:constraints arg:counter arguments arg arg arg arg Call Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "rjust",
    "source_code": "def rjust(self, width, fillchar=' '):\n    return asarray(rjust(self, width, fillchar))",
    "docstring": "Return an array with the elements of right-justified in a string of length . See Also -------- char.rjust",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:rjust arg:self arg:width arg:fillchar arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "add_self_request",
    "source_code": "def add_self_request(self, obj):\n    if getattr(obj, '_type', None) == 'metadata_request':\n        self._self_request = deepcopy(obj)\n    elif hasattr(obj, '_get_metadata_request'):\n        self._self_request = deepcopy(obj._get_metadata_request())\n    else:\n        raise ValueError('Given `obj` is neither a `MetadataRequest` nor does it implement the required API. Inheriting from `BaseEstimator` implements the required API.')\n    return self",
    "docstring": "Add (as a consumer) to the routing. This method is used if the router is also a consumer, and hence the router itself needs to be included in the routing. The passed object can be an estimator or a :class:. A router should add itself using this method instead of since it should be treated differently than the other objects to which metadata is routed by the router. Parameters ---------- obj : object This is typically the router instance, i.e. in a `self`.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py",
    "ast_data": "FunctionDef name:add_self_request arg:self arg:obj arguments arg arg If Compare Call Assign Call If Call Assign Call Call Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "@deprecation.deprecated('2019-01-01', 'The TensorFlow Distributions library has moved to TensorFlow Probability (https://github.com/tensorflow/probability). You should update all references to use `tfp.distributions` instead of `tf.distributions`.', warn_once=True)\ndef __init__(self, df, loc, scale, validate_args=False, allow_nan_stats=True, name='StudentT'):\n    parameters = dict(locals())\n    with ops.name_scope(name, values=[df, loc, scale]) as name:\n        with ops.control_dependencies([check_ops.assert_positive(df)] if validate_args else []):\n            self._df = array_ops.identity(df, name='df')\n            self._loc = array_ops.identity(loc, name='loc')\n            self._scale = array_ops.identity(scale, name='scale')\n            check_ops.assert_same_float_dtype((self._df, self._loc, self._scale))\n    super(StudentT, self).__init__(dtype=self._scale.dtype, reparameterization_type=distribution.FULLY_REPARAMETERIZED, validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters, graph_parents=[self._df, self._loc, self._scale], name=name)",
    "docstring": "Construct Student's t distributions. The distributions have degree of freedom , mean , and scale . The parameters , , and must be shaped in a way that supports broadcasting (e.g. is a valid operation). Args: df: Floating-point . The degrees of freedom of the distribution(s). must contain only positive values. loc: Floating-point . The mean(s) of the distribution(s). scale: Floating-point . The scaling factor(s) for the distribution(s). Note that is not technically the standard deviation of this distribution but has semantics more similar to standard deviation than variance. validate_args: Python , default . When distribution parameters are checked for validity despite possibly degrading runtime performance. When invalid inputs may silently render incorrect outputs. allow_nan_stats: Python , default . When , statistics (e.g., mean, mode, variance) use the value \"\" to indicate the result is undefined. When , an exception is raised if one or more of the statistic's batch members are undefined. name: Python name prefixed to Ops created by this class. Raises: TypeError: if loc and scale are different dtypes.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\student_t.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:df arg:loc arg:scale arg:validate_args arg:allow_nan_stats arg:name arguments arg arg arg arg arg arg arg Assign Call Call With Call With Call Call Assign Call Assign Call Assign Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_load_initial_epoch_from_ckpt",
    "source_code": "def _maybe_load_initial_epoch_from_ckpt(self, initial_epoch):\n    if self._training_state is not None:\n        return self._training_state.maybe_load_initial_epoch_from_ckpt(initial_epoch, mode=ModeKeys.TRAIN)\n    return initial_epoch",
    "docstring": "Maybe load initial epoch from ckpt considering possible worker recovery. Refer to tensorflow/python/keras/distribute/worker_training_state.py for more information. Args: initial_epoch: The original initial_epoch user passes in in . Returns: If the training is recovering from previous failure under multi-worker training setting, return the epoch the training is supposed to continue at. Otherwise, return the the user passes in.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py",
    "ast_data": "FunctionDef name:_maybe_load_initial_epoch_from_ckpt arg:self arg:initial_epoch arguments arg arg If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "autostrip",
    "source_code": "def autostrip(self, method):\n    return lambda input: [_.strip() for _ in method(input)]",
    "docstring": "Wrapper to strip each member of the output of . Parameters ---------- method : function Function that takes a single argument and returns a sequence of strings. Returns ------- wrapped : function The result of wrapping . takes a single input argument and returns a list of strings that are stripped of white-space.",
    "type": "method",
    "file_path": "numpy\\numpy\\lib\\_iotools.py",
    "ast_data": "FunctionDef name:autostrip arg:self arg:method arguments arg arg Return return:yes arguments arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_create_resource",
    "source_code": "def _create_resource(self):\n    return self._coordinator_instance._create_resource()",
    "docstring": "A function that creates a resource handle for a table on coordinator.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\ps_values.py",
    "ast_data": "FunctionDef name:_create_resource arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_index",
    "source_code": "def _index(iterable: Iterable[Statement], id: str) -> int:\n    for i, value in enumerate(iterable):\n        if getattr(value, 'id', None) == id:\n            return i\n    raise ValueError(f'Failed to identify a `ImportFrom` instance with the following id: {id!r}')",
    "docstring": "Identify the first `id`.",
    "type": "function",
    "file_path": "numpy\\numpy\\typing\\mypy_plugin.py",
    "ast_data": "FunctionDef name:_index arg:iterable arg:id arguments arg arg For Call If Compare Call Return return:yes Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_resource_apply_dense",
    "source_code": "def _resource_apply_dense(self, grad, handle, apply_state):\n    raise NotImplementedError('Must be implemented in subclasses.')",
    "docstring": "Add ops to apply dense gradients to the variable . Args: grad: a representing the gradient. handle: a of dtype which points to the variable to be updated. apply_state: A dict which is used across multiple apply calls. Returns: An which updates the value of the variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py",
    "ast_data": "FunctionDef name:_resource_apply_dense arg:self arg:grad arg:handle arg:apply_state arguments arg arg arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_ParseBooleanFlag",
    "source_code": "class _ParseBooleanFlag(argparse.Action):\n\n    def __init__(self, option_strings, dest, nargs=None, **kwargs):\n        if nargs != '?':\n            raise ValueError(\"This parser only supports nargs='?' (0 or 1 additional arguments)\")\n        super(_ParseBooleanFlag, self).__init__(option_strings, dest, nargs=nargs, **kwargs)\n\n    def __call__(self, parser, namespace, values, option_string=None):\n        if values is None:\n            flag_value = True\n        elif values.lower() == 'true':\n            flag_value = True\n        elif values.lower() == 'false':\n            flag_value = False\n        else:\n            raise ValueError('Invalid argument to --{}. Must use flag alone, or specify true/false.'.format(self.dest))\n        setattr(namespace, self.dest, flag_value)",
    "docstring": "Helper class to parse boolean flag that optionally accepts truth value.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\tflite_convert.py",
    "ast_data": "ClassDef name:_ParseBooleanFlag FunctionDef name:__init__ arg:self arg:option_strings arg:dest arg:nargs arguments arg arg arg arg arg If Compare Raise Call Call Call FunctionDef name:__call__ arg:self arg:parser arg:namespace arg:values arg:option_string arguments arg arg arg arg arg If Compare Assign If Compare Call Assign If Compare Call Assign Raise Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "add_inner_graph_id",
    "source_code": "def add_inner_graph_id(self, inner_graph_id):\n    assert isinstance(inner_graph_id, str)\n    self._inner_graph_ids.append(inner_graph_id)",
    "docstring": "Add the debugger-generated ID of a graph nested within this graph. Args: inner_graph_id: The debugger-generated ID of the nested inner graph.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py",
    "ast_data": "FunctionDef name:add_inner_graph_id arg:self arg:inner_graph_id arguments arg arg Call Call"
  },
  {
    "library": "scipy",
    "name": "Levy13",
    "source_code": "class Levy13(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n        self.custom_bounds = [(-5, 5), (-5, 5)]\n        self.global_optimum = [[1 for _ in range(self.N)]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        u = sin(3 * pi * x[0]) ** 2\n        v = (x[0] - 1) ** 2 * (1 + sin(3 * pi * x[1]) ** 2)\n        w = (x[1] - 1) ** 2 * (1 + sin(2 * pi * x[1]) ** 2)\n        return u + v + w",
    "docstring": "Levy13 objective function. This class defines the Levy13 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Levy13}}(x) = \\left(x_{1} -1\\right)^{2} \\left[\\sin^{2} \\left(3 \\pi x_{2}\\right) + 1\\right] + \\left(x_{2} - 1\\right)^{2} \\left[\\sin^{2}\\left(2 \\pi x_{2}\\right) + 1\\right] + \\sin^{2}\\left(3 \\pi x_{1}\\right) with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Mishra, S. Some new test functions for global optimization and performance of repulsive particle swarm method. Munich Personal RePEc Archive, 2006, 2718",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_L.py",
    "ast_data": "ClassDef name:Levy13 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "is_torch_array",
    "source_code": "def is_torch_array(x: object) -> TypeIs[torch.Tensor]:\n    cls = cast(Hashable, type(x))\n    return _issubclass_fast(cls, 'torch', 'Tensor')",
    "docstring": "Return True if is a PyTorch tensor. This function does not import PyTorch if it has not already been imported and is therefore cheap to use. See Also -------- array_namespace is_array_api_obj is_numpy_array is_cupy_array is_dask_array is_jax_array is_pydata_sparse_array",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\common\\_helpers.py",
    "ast_data": "FunctionDef name:is_torch_array arg:x arguments arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_new_global_index",
    "source_code": "def _get_new_global_index(self, index_override):\n    if index_override is None:\n        global_index = self._next_global_index\n    else:\n        if index_override in self._used_global_indices:\n            raise ValueError('Index %d was already used by another call to add')\n        global_index = index_override\n    self._used_global_indices.add(global_index)\n    while self._next_global_index in self._used_global_indices:\n        self._next_global_index += 1\n    return global_index",
    "docstring": "Return the next unused argument index in order or use an override. Args: index_override: An index to use instead of the next available or None to use the next available. Returns: A valid global_index to use for the next hint argument. Raises: ValueError: If the index_override is already used by another hint.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\op_hint.py",
    "ast_data": "FunctionDef name:_get_new_global_index arg:self arg:index_override arguments arg arg If Compare Assign If Compare Raise Call Assign Call While Compare Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "symbolic_trace",
    "source_code": "@compatibility(is_backward_compatible=True)\ndef symbolic_trace(root: Union[torch.nn.Module, Callable[..., Any]], concrete_args: Optional[dict[str, Any]]=None) -> GraphModule:\n    tracer = Tracer()\n    graph = tracer.trace(root, concrete_args)\n    name = root.__class__.__name__ if isinstance(root, torch.nn.Module) else root.__name__\n    return _make_graph_module(tracer.root, graph, name)",
    "docstring": "Symbolic tracing API Given an `concrete_argsbbconcrete_argsfx.PH`.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\_symbolic_trace.py",
    "ast_data": "FunctionDef name:symbolic_trace arg:root arg:concrete_args arguments arg arg Assign Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_UnaliasedStorage",
    "source_code": "class _UnaliasedStorage(OutputAliasInfo):\n    pass",
    "docstring": "Singleton to mark that the graph output constructs a new alias or is None",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py",
    "ast_data": "ClassDef name:_UnaliasedStorage"
  },
  {
    "library": "django",
    "name": "__str__",
    "source_code": "def __str__(self):\n    return str(self.tuple)",
    "docstring": "Return a string representation of the tuple.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\envelope.py",
    "ast_data": "FunctionDef name:__str__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_process",
    "source_code": "def _process(op_queue, seen_ops):\n    reads = []\n    writes = []\n    op = op_queue.pop()\n    if op in seen_ops:\n        return (reads, writes)\n    seen_ops.add(op)\n    reads, writes = acd_utils.get_read_write_resource_inputs(op)\n    op_queue.extend((t.op for t in op.inputs if t.dtype == dtypes.variant))\n    return (reads, writes)",
    "docstring": "Processes the next element of the op queue. Args: op_queue: Queue of Dataset operations to process. seen_ops: Already processed set of Operations. Returns: A 2-tuple containing sets of resource handles. The first tuple entry contains read-only handles and the second entry contains read-write handles.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:_process arg:op_queue arg:seen_ops arguments arg arg Assign Assign Assign Call If Compare Return return:yes Call Assign Call Call Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_init_from_metadata",
    "source_code": "@classmethod\ndef _init_from_metadata(cls, metadata):\n    init_args = dict(name=metadata['name'], dtype=metadata['dtype'], sparse=metadata['sparse'], ragged=metadata['ragged'], batch_input_shape=metadata['batch_input_shape'])\n    revived_obj = cls(**init_args)\n    with utils.no_automatic_dependency_tracking_scope(revived_obj):\n        revived_obj._config = metadata['config']\n    return (revived_obj, setattr)",
    "docstring": "Revives the saved InputLayer from the Metadata.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\load.py",
    "ast_data": "FunctionDef name:_init_from_metadata arg:cls arg:metadata arguments arg arg Assign Call Assign Call With Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "disable_control_flow_v2",
    "source_code": "@tf_export(v1=['disable_control_flow_v2'])\ndef disable_control_flow_v2():\n    logging.vlog(1, 'Disabling control flow v2')\n    ops._control_flow_api_gauge.get_cell().set(False)\n    control_flow_util.ENABLE_CONTROL_FLOW_V2 = False",
    "docstring": "Opts out of control flow v2. Note: v2 control flow is always enabled inside of tf.function. Calling this function has no effect in that case. If your code needs tf.disable_control_flow_v2() to be called to work properly please file a bug.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_v2_toggles.py",
    "ast_data": "FunctionDef name:disable_control_flow_v2 arguments Call Call Call Assign Call"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X):\n    X = validate_data(self, X=X, ensure_2d=False, allow_nd=True, ensure_min_samples=1, ensure_min_features=1, reset=False)\n    random_state = check_random_state(self.random_state)\n    n_imgs, img_height, img_width = X.shape[:3]\n    if self.patch_size is None:\n        patch_size = (img_height // 10, img_width // 10)\n    else:\n        if len(self.patch_size) != 2:\n            raise ValueError(f'patch_size must be a tuple of two integers. Got {self.patch_size} instead.')\n        patch_size = self.patch_size\n    n_imgs, img_height, img_width = X.shape[:3]\n    X = np.reshape(X, (n_imgs, img_height, img_width, -1))\n    n_channels = X.shape[-1]\n    patch_height, patch_width = patch_size\n    n_patches = _compute_n_patches(img_height, img_width, patch_height, patch_width, self.max_patches)\n    patches_shape = (n_imgs * n_patches,) + patch_size\n    if n_channels > 1:\n        patches_shape += (n_channels,)\n    patches = np.empty(patches_shape)\n    for ii, image in enumerate(X):\n        patches[ii * n_patches:(ii + 1) * n_patches] = extract_patches_2d(image, patch_size, max_patches=self.max_patches, random_state=random_state)\n    return patches",
    "docstring": "Transform the image samples in into a matrix of patch data. Parameters ---------- X : ndarray of shape (n_samples, image_height, image_width) or (n_samples, image_height, image_width, n_channels) Array of images from which to extract patches. For color images, the last dimension specifies the channel: a RGB image would have . Returns ------- patches : array of shape (n_patches, patch_height, patch_width) or (n_patches, patch_height, patch_width, n_channels) The collection of patches extracted from the images, where is either or the total number of patches that can be extracted.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_extraction\\image.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Assign Call Assign Call Assign If Compare Assign If Compare Call Raise Call Assign Assign Assign Call Assign Assign Assign Call Assign If Compare Assign Call For Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "Rot180",
    "source_code": "class Rot180(Module):\n\n    def forward(self, input: Tensor) -> Tensor:\n        return rot180(input)\n\n    def __repr__(self) -> str:\n        return self.__class__.__name__",
    "docstring": "Rotate a tensor image or a batch of tensor images 180 degrees. Input must be a tensor of shape (C, H, W) or a batch of tensors :math:. Args: input: input tensor. Examples: >>> rot180 = Rot180() >>> input = torch.tensor([[[ ... [0., 0., 0.], ... [0., 0., 0.], ... [0., 1., 1.] ... ]]]) >>> rot180(input) tensor([[[[1., 1., 0.], [0., 0., 0.], [0., 0., 0.]]]])",
    "type": "class",
    "file_path": "kornia\\kornia\\geometry\\transform\\flips.py",
    "ast_data": "ClassDef name:Rot180 FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_get",
    "source_code": "def _get(self, key):\n    return dict.__getitem__(self, key)",
    "docstring": "Directly read data bypassing deprecation, backend and validation logic. Notes ----- As end user or downstream library you almost always should use ``, i.e. it is subject to Matplotlib's API and deprecation policy. :meta public:",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\__init__.py",
    "ast_data": "FunctionDef name:_get arg:self arg:key arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "resource_handle_call_time_value",
    "source_code": "def resource_handle_call_time_value(self):\n\n    def closure():\n        dispatch_context = coordinator_context.get_current_dispatch_context()\n        if dispatch_context:\n            local_resource_restore_context = get_current_local_resource_restore_context()\n            if local_resource_restore_context:\n                remote_value = local_resource_restore_context.instance.resource_handle\n            else:\n                remote_value = self._distributed_table._values[dispatch_context.worker_index]\n            ret = dispatch_context.maybe_get_remote_value(remote_value)\n            return ret\n        else:\n            return self._coordinator_instance.resource_handle\n    return (closure, tensor.TensorSpec(shape=(), dtype=dtypes.resource))",
    "docstring": "Returns a closure to run for a resource handle at call time and its spec. This function is called in self.resource_handle to create a placeholder which returns a resource handle on some worker or on the coordinator.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\ps_values.py",
    "ast_data": "FunctionDef name:resource_handle_call_time_value arg:self arguments arg FunctionDef name:closure arguments Assign Call If Assign Call If Assign Assign Assign Call Return return:yes Return return:yes Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "get_colspec",
    "source_code": "def get_colspec(self) -> str:\n    if self.colspec:\n        return self.colspec\n    _colsep = self.colsep\n    assert _colsep is not None\n    if self.colwidths and 'colwidths-given' in self.classes:\n        total = sum(self.colwidths)\n        colspecs = ['\\\\X{%d}{%d}' % (width, total) for width in self.colwidths]\n        return f'{{{_colsep}{_colsep.join(colspecs)}{_colsep}}}' + CR\n    elif self.has_problematic:\n        return '{%s*{%d}{\\\\X{1}{%d}%s}}' % (_colsep, self.colcount, self.colcount, _colsep) + CR\n    elif self.get_table_type() == 'tabulary':\n        return '{' + _colsep + ('T' + _colsep) * self.colcount + '}' + CR\n    elif self.has_oldproblematic:\n        return '{%s*{%d}{\\\\X{1}{%d}%s}}' % (_colsep, self.colcount, self.colcount, _colsep) + CR\n    else:\n        return '{' + _colsep + ('l' + _colsep) * self.colcount + '}' + CR",
    "docstring": "Returns a column spec of table. This is what LaTeX calls the 'preamble argument' of the used table environment. .. note:: The ``.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\writers\\latex.py",
    "ast_data": "FunctionDef name:get_colspec arg:self arguments arg If Return return:yes Assign Compare If BoolOp Compare Assign Call Assign Return return:yes Call If Return return:yes If Compare Call Return return:yes If Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "forward",
    "source_code": "def forward(self, x: torch.Tensor) -> torch.Tensor:\n    weight_quant_dequant = self.get_weight()\n    result = F.linear(x, weight_quant_dequant, self.bias)\n    return result",
    "docstring": "we have: w(float) -- quant - dequant x(float) ------------- F.linear --- In the full model, we will see w(float) -- quant - *dequant x -- quant --- *dequant -- *F.linear --- *quant - dequant and the backend should be able to fuse the ops with into a quantized linear",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\reference\\modules\\linear.py",
    "ast_data": "FunctionDef name:forward arg:self arg:x arguments arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_sparsify_hook",
    "source_code": "def _sparsify_hook(self, name):\n    mask = self.get_mask(name)\n    features = self.data_groups[name]['features']\n    feature_dim = self.data_groups[name]['feature_dim']\n\n    def hook(module, input):\n        input_data = input[0]\n        if features is None:\n            return input_data * mask\n        else:\n            for feature_idx in range(0, len(features)):\n                feature = torch.Tensor([features[feature_idx]]).long().to(input_data.device)\n                sparsified = torch.index_select(input_data, feature_dim, feature) * mask[feature_idx]\n                input_data.index_copy_(feature_dim, feature, sparsified)\n            return input_data\n    return hook",
    "docstring": "Returns hook that applies sparsification mask to input entering the attached layer",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\activation_sparsifier\\activation_sparsifier.py",
    "ast_data": "FunctionDef name:_sparsify_hook arg:self arg:name arguments arg arg Assign Call Assign Assign FunctionDef name:hook arg:module arg:input arguments arg arg Assign If Compare Return return:yes For Call Call Assign Call Call Call Assign Call Call Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "GeomOutput",
    "source_code": "class GeomOutput(GEOSFuncFactory):\n    restype = GEOM_PTR\n    errcheck = staticmethod(check_geom)",
    "docstring": "For GEOS routines that return a geometry.",
    "type": "class",
    "file_path": "django\\django\\contrib\\gis\\geos\\prototypes\\geom.py",
    "ast_data": "ClassDef name:GeomOutput Assign Assign Call"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "def forward(self, laf: torch.Tensor, img: torch.Tensor) -> torch.Tensor:\n    return laf",
    "docstring": "Run forward. Args: laf: :math: img: :math: Returns: LAF, unchanged :math:",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\orientation.py",
    "ast_data": "FunctionDef name:forward arg:self arg:laf arg:img arguments arg arg arg Return return:yes"
  },
  {
    "library": "django",
    "name": "_expiry_date",
    "source_code": "def _expiry_date(self, session_data):\n    return session_data.get('_session_expiry') or self._last_modification() + datetime.timedelta(seconds=self.get_session_cookie_age())",
    "docstring": "Return the expiry time of the file storing the session's content.",
    "type": "method",
    "file_path": "django\\django\\contrib\\sessions\\backends\\file.py",
    "ast_data": "FunctionDef name:_expiry_date arg:self arg:session_data arguments arg arg Return return:yes BoolOp Call Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "clone_with_theta",
    "source_code": "def clone_with_theta(self, theta):\n    cloned = clone(self)\n    cloned.theta = theta\n    return cloned",
    "docstring": "Returns a clone of self with given hyperparameters theta. Parameters ---------- theta : ndarray of shape (n_dims,) The hyperparameters",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:clone_with_theta arg:self arg:theta arguments arg arg Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "CoordinatorMode",
    "source_code": "class CoordinatorMode(object):\n    STANDALONE_CLIENT = 'standalone_client'\n    INDEPENDENT_WORKER = 'independent_worker'",
    "docstring": "Specify how distribute coordinator runs.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_coordinator.py",
    "ast_data": "ClassDef name:CoordinatorMode Assign Assign"
  },
  {
    "library": "seaborn",
    "name": "despine",
    "source_code": "def despine(self, **kwargs):\n    utils.despine(self._figure, **kwargs)\n    return self",
    "docstring": "Remove axis spines from the facets.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\axisgrid.py",
    "ast_data": "FunctionDef name:despine arg:self arguments arg arg Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_expand_user",
    "source_code": "def _expand_user(filepath_or_buffer: str | BaseBufferT) -> str | BaseBufferT:\n    if isinstance(filepath_or_buffer, str):\n        return os.path.expanduser(filepath_or_buffer)\n    return filepath_or_buffer",
    "docstring": "Return the argument with an initial component of ~ or ~user replaced by that user's home directory. Parameters ---------- filepath_or_buffer : object to be converted if possible Returns ------- expanded_filepath_or_buffer : an expanded filepath or the input if not expandable",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\common.py",
    "ast_data": "FunctionDef name:_expand_user arg:filepath_or_buffer arguments arg If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "django",
    "name": "__and__",
    "source_code": "def __and__(self, other):\n    return self.intersection(other)",
    "docstring": "Return the intersection of this Geometry and the other.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:__and__ arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_set_per_output_metric_attributes",
    "source_code": "def _set_per_output_metric_attributes(self, metrics_dict, output_index):\n    updated_metrics_dict = collections.OrderedDict()\n    for metric_name, metric_fn in metrics_dict.items():\n        metric_name = self._add_unique_metric_name(metric_name, metric_fn, output_index)\n        metric_fn._name = metric_name\n        updated_metrics_dict[metric_name] = metric_fn\n        self._compile_metric_functions.append(metric_fn)\n    return updated_metrics_dict",
    "docstring": "Sets the metric attributes on the model for the given output. Args: metrics_dict: A dict with metric names as keys and metric fns as values. output_index: The index of the model output for which the metric attributes are added. Returns: Metrics dict updated with unique metric names as keys.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py",
    "ast_data": "FunctionDef name:_set_per_output_metric_attributes arg:self arg:metrics_dict arg:output_index arguments arg arg arg Assign Call For Call Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_position",
    "source_code": "def get_position(self, figure):\n    gridspec = self.get_gridspec()\n    nrows, ncols = gridspec.get_geometry()\n    rows, cols = np.unravel_index([self.num1, self.num2], (nrows, ncols))\n    fig_bottoms, fig_tops, fig_lefts, fig_rights = gridspec.get_grid_positions(figure)\n    fig_bottom = fig_bottoms[rows].min()\n    fig_top = fig_tops[rows].max()\n    fig_left = fig_lefts[cols].min()\n    fig_right = fig_rights[cols].max()\n    return Bbox.from_extents(fig_left, fig_bottom, fig_right, fig_top)",
    "docstring": "Update the subplot position from ``.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\gridspec.py",
    "ast_data": "FunctionDef name:get_position arg:self arg:figure arguments arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "createResolutionCallbackFromFrame",
    "source_code": "def createResolutionCallbackFromFrame(frames_up: int=0):\n    frame = inspect.currentframe()\n    i = 0\n    while i < frames_up + 1:\n        assert frame is not None\n        frame = frame.f_back\n        i += 1\n    assert frame is not None\n    f_locals = frame.f_locals\n    f_globals = frame.f_globals\n\n    class env:\n\n        def __getattr__(self, key):\n            if key in f_locals:\n                return f_locals[key]\n            elif key in f_globals:\n                return f_globals[key]\n            elif key in dir(builtins):\n                return getattr(builtins, key)\n    return createResolutionCallbackFromEnv(env())",
    "docstring": "Creates a function which, given a string variable name, returns the value of the variable in the scope of the caller of the function which called createResolutionCallbackFromFrame (by default). This is used to enable access in-scope Python variables inside TorchScript fragments. frames_up is number of additional frames to go up on the stack. The default value is 0, which correspond to the frame of the caller of createResolutionCallbackFromFrame. Also for example, if frames_up is set to 1, then the frame of the caller's caller of createResolutionCallbackFromFrame will be taken. For example, the following program prints 2:: def bar(): cb = createResolutionCallbackFromFrame(1) print(cb(\"foo\")) def baz(): foo = 2 bar() baz()",
    "type": "function",
    "file_path": "pytorch\\torch\\_jit_internal.py",
    "ast_data": "FunctionDef name:createResolutionCallbackFromFrame arg:frames_up arguments arg Assign Call Assign While Compare Compare Assign Compare Assign Assign ClassDef name:env FunctionDef name:__getattr__ arg:self arg:key arguments arg arg If Compare Return return:yes If Compare Return return:yes If Compare Call Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "authlib",
    "name": "client_info",
    "source_code": "@property\ndef client_info(self):\n    return dict(client_id=self.client_id, client_secret=self.client_secret, client_id_issued_at=self.client_id_issued_at, client_secret_expires_at=self.client_secret_expires_at)",
    "docstring": "Implementation for Client Info in OAuth 2.0 Dynamic Client Registration Protocol via _. .. _:",
    "type": "method",
    "file_path": "authlib\\authlib\\integrations\\sqla_oauth2\\client_mixin.py",
    "ast_data": "FunctionDef name:client_info arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "synchronize",
    "source_code": "def synchronize(self) -> None:\n    super().synchronize()",
    "docstring": "Wait for the event to complete. Waits until the completion of all work currently captured in this event. This prevents the CPU thread from proceeding until the event completes.",
    "type": "method",
    "file_path": "pytorch\\torch\\xpu\\streams.py",
    "ast_data": "FunctionDef name:synchronize arg:self arguments arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_require_strategy_scope_extended",
    "source_code": "def _require_strategy_scope_extended(extended):\n    context = _get_per_thread_mode()\n    if context.strategy.extended is extended:\n        return\n    strategy = extended._container_strategy()\n    _wrong_strategy_scope(strategy, context)",
    "docstring": "Verify in a in this thread.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:_require_strategy_scope_extended arg:extended arguments arg Assign Call If Compare Return return:no Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "wait_for_session",
    "source_code": "def wait_for_session(self, master: str, config=None, max_wait_secs=float('Inf')) -> Optional[session.Session]:\n    self._target = master\n    if max_wait_secs is None:\n        max_wait_secs = float('Inf')\n    timer = _CountDownTimer(max_wait_secs)\n    while True:\n        sess = session.Session(self._target, graph=self._graph, config=config)\n        not_ready_msg = None\n        not_ready_local_msg = None\n        local_init_success, not_ready_local_msg = self._try_run_local_init_op(sess)\n        if local_init_success:\n            is_ready, not_ready_msg = self._model_ready(sess)\n            if is_ready:\n                return sess\n        self._safe_close(sess)\n        remaining_ms_after_wait = timer.secs_remaining() - self._recovery_wait_secs\n        if remaining_ms_after_wait < 0:\n            raise errors.DeadlineExceededError(None, None, 'Session was not ready after waiting %d secs.' % (max_wait_secs,))\n        logging.info('Waiting for model to be ready.  Ready_for_local_init_op:  %s, ready: %s', not_ready_local_msg, not_ready_msg)\n        time.sleep(self._recovery_wait_secs)",
    "docstring": "Creates a new and waits for model to be ready. Creates a new on 'master'. Waits for the model to be initialized or recovered from a checkpoint. It's expected that another thread or process will make the model ready, and that this is intended to be used by threads/processes that participate in a distributed training configuration where a different thread/process is responsible for initializing or recovering the model being trained. NB: The amount of time this method waits for the session is bounded by max_wait_secs. By default, this function will wait indefinitely. Args: master: representation of the TensorFlow master to use. config: Optional ConfigProto proto used to configure the session. max_wait_secs: Maximum time to wait for the session to become available. Returns: A . May be None if the operation exceeds the timeout specified by config.operation_timeout_in_ms. Raises: tf.DeadlineExceededError: if the session is not available after max_wait_secs.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\session_manager.py",
    "ast_data": "FunctionDef name:wait_for_session arg:self arg:master arg:config arg:max_wait_secs arguments arg arg arg arg Call Assign If Compare Assign Call Assign Call While Assign Call Assign Assign Assign Call If Assign Call If Return return:yes Call Assign Call If Compare Raise Call Call Call"
  },
  {
    "library": "cherrypy",
    "name": "generate_id",
    "source_code": "def generate_id(self):\n    return binascii.hexlify(os.urandom(20)).decode('ascii')",
    "docstring": "Return a new session id.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\sessions.py",
    "ast_data": "FunctionDef name:generate_id arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "copy_clean",
    "source_code": "def copy_clean(node, preserve_annos=None):\n    return CleanCopier(preserve_annos).copy(node)",
    "docstring": "Creates a deep copy of an AST. The copy will not include fields that are prefixed by '__', with the exception of user-specified annotations. Args: node: ast.AST preserve_annos: Optional[Set[Hashable]], annotation keys to include in the copy Returns: ast.AST",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\ast_util.py",
    "ast_data": "FunctionDef name:copy_clean arg:node arg:preserve_annos arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "prepare_events",
    "source_code": "def prepare_events(events):\n    if callable(events):\n        events = (events,)\n    max_events = np.empty(len(events))\n    direction = np.empty(len(events))\n    for i, event in enumerate(events):\n        terminal = getattr(event, 'terminal', None)\n        direction[i] = getattr(event, 'direction', 0)\n        message = 'The `terminal` attribute of each event must be a boolean or positive integer.'\n        if terminal is None or terminal == 0:\n            max_events[i] = np.inf\n        elif int(terminal) == terminal and terminal > 0:\n            max_events[i] = terminal\n        else:\n            raise ValueError(message)\n    return (events, max_events, direction)",
    "docstring": "Standardize event functions and extract attributes.",
    "type": "function",
    "file_path": "scipy\\scipy\\integrate\\_ivp\\ivp.py",
    "ast_data": "FunctionDef name:prepare_events arg:events arguments arg If Call Assign Assign Call Call Assign Call Call For Call Assign Call Assign Call Assign If BoolOp Compare Compare Assign If BoolOp Compare Call Compare Assign Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "extracted_wheel",
    "source_code": "@contextlib.contextmanager\ndef extracted_wheel(self, wheel: Path | str) -> Generator[Path]:\n    with tempfile.TemporaryDirectory(prefix='wheel-') as tempdir:\n        self.wheel_unpack(wheel, tempdir)\n        subdirs = [p for p in Path(tempdir).absolute().iterdir() if p.is_dir()]\n        if len(subdirs) != 1:\n            raise RuntimeError(f'Expected exactly one directory in {tempdir}, got {[str(d) for d in subdirs]}.')\n        yield subdirs[0]",
    "docstring": "Download and extract a wheel into a temporary directory.",
    "type": "method",
    "file_path": "pytorch\\tools\\nightly.py",
    "ast_data": "FunctionDef name:extracted_wheel arg:self arg:wheel arguments arg arg With Call Call Assign Call Call Call Call If Compare Call Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_ImagGrad",
    "source_code": "@ops.RegisterGradient('Imag')\ndef _ImagGrad(_, grad):\n    zero = constant_op.constant(0, dtype=grad.dtype)\n    return math_ops.complex(zero, grad)",
    "docstring": "Returns 'grad' as the imaginary part and set the real part 0.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_ImagGrad arg:_ arg:grad arguments arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "send",
    "source_code": "def send(self, fail_silently=False):\n    if not self.recipients():\n        return 0\n    return self.get_connection(fail_silently).send_messages([self])",
    "docstring": "Send the email message.",
    "type": "method",
    "file_path": "django\\django\\core\\mail\\message.py",
    "ast_data": "FunctionDef name:send arg:self arg:fail_silently arguments arg arg If Call Return return:yes Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_get_metadata_request",
    "source_code": "def _get_metadata_request(self):\n    if hasattr(self, '_metadata_request'):\n        requests = get_routing_for_object(self._metadata_request)\n    else:\n        requests = self._get_default_requests()\n    return requests",
    "docstring": "Get requested data properties. Please check :ref: on how the routing mechanism works. Returns ------- request : MetadataRequest A :class: instance.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py",
    "ast_data": "FunctionDef name:_get_metadata_request arg:self arguments arg If Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "_eval_univariate",
    "source_code": "def _eval_univariate(self, x, weights):\n    bin_kws = self.bin_kws\n    if bin_kws is None:\n        bin_kws = self.define_bin_params(x, weights=weights, cache=False)\n    density = self.stat == 'density'\n    hist, bin_edges = np.histogram(x, **bin_kws, weights=weights, density=density)\n    if self.stat == 'probability' or self.stat == 'proportion':\n        hist = hist.astype(float) / hist.sum()\n    elif self.stat == 'percent':\n        hist = hist.astype(float) / hist.sum() * 100\n    elif self.stat == 'frequency':\n        hist = hist.astype(float) / np.diff(bin_edges)\n    if self.cumulative:\n        if self.stat in ['density', 'frequency']:\n            hist = (hist * np.diff(bin_edges)).cumsum()\n        else:\n            hist = hist.cumsum()\n    return (hist, bin_edges)",
    "docstring": "Inner function for histogram of one variable.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_statistics.py",
    "ast_data": "FunctionDef name:_eval_univariate arg:self arg:x arg:weights arguments arg arg arg Assign If Compare Assign Call Assign Compare Assign Call If BoolOp Compare Compare Assign Call Call If Compare Assign Call Call If Compare Assign Call Call If If Compare Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_arraymethod",
    "source_code": "def _arraymethod(funcname, onmask=True):\n\n    def wrapped_method(self, *args, **params):\n        result = getattr(self._data, funcname)(*args, **params)\n        result = result.view(type(self))\n        result._update_from(self)\n        mask = self._mask\n        if not onmask:\n            result.__setmask__(mask)\n        elif mask is not nomask:\n            result._mask = getattr(mask, funcname)(*args, **params)\n        return result\n    methdoc = getattr(ndarray, funcname, None) or getattr(np, funcname, None)\n    if methdoc is not None:\n        wrapped_method.__doc__ = methdoc.__doc__\n    wrapped_method.__name__ = funcname\n    return wrapped_method",
    "docstring": "Return a class method wrapper around a basic array method. Creates a class method which returns a masked array, where the new `onmask_onmask` attribute. Returns ------- method : instancemethod Class method wrapper of the specified basic array method.",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:_arraymethod arg:funcname arg:onmask arguments arg arg FunctionDef name:wrapped_method arg:self arguments arg arg arg Assign Call Call Assign Call Call Call Assign If Call If Compare Assign Call Call Return return:yes Assign BoolOp Call Call If Compare Assign Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "get_data",
    "source_code": "def get_data(self):\n    warnings.warn('\"get_data\" is deprecated. Use \"data\" instead', DeprecationWarning, stacklevel=2)\n    return self.data",
    "docstring": "Deprecated getter for the property. .. deprecated:: 1.21",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\_internal.py",
    "ast_data": "FunctionDef name:get_data arg:self arguments arg Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_add_argument_transformer",
    "source_code": "def _add_argument_transformer(parent, node, full_name, name, logs, arg_name, arg_value_ast):\n    node.keywords.append(ast.keyword(arg=arg_name, value=arg_value_ast))\n    logs.append((ast_edits.INFO, node.lineno, node.col_offset, \"Adding argument '%s' to call to %s.\" % (pasta.dump(node.keywords[-1]), full_name or name)))\n    return node",
    "docstring": "Adds an argument (as a final kwarg arg_name=arg_value_ast).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\tf_upgrade_v2.py",
    "ast_data": "FunctionDef name:_add_argument_transformer arg:parent arg:node arg:full_name arg:name arg:logs arg:arg_name arg:arg_value_ast arguments arg arg arg arg arg arg arg Call Call Call Call BoolOp Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_LSTMSingleLayer",
    "source_code": "class _LSTMSingleLayer(torch.nn.Module):\n\n    def __init__(self, input_dim: int, hidden_dim: int, bias: bool=True, device=None, dtype=None, *, split_gates=False) -> None:\n        factory_kwargs = {'device': device, 'dtype': dtype}\n        super().__init__()\n        self.cell = LSTMCell(input_dim, hidden_dim, bias=bias, split_gates=split_gates, **factory_kwargs)\n\n    def forward(self, x: Tensor, hidden: Optional[tuple[Tensor, Tensor]]=None):\n        result = []\n        seq_len = x.shape[0]\n        for i in range(seq_len):\n            hidden = self.cell(x[i], hidden)\n            result.append(hidden[0])\n        result_tensor = torch.stack(result, 0)\n        return (result_tensor, hidden)\n\n    @classmethod\n    def from_params(cls, *args, **kwargs):\n        cell = LSTMCell.from_params(*args, **kwargs)\n        layer = cls(cell.input_size, cell.hidden_size, cell.bias, split_gates=cell.split_gates)\n        layer.cell = cell\n        return layer",
    "docstring": "A single one-directional LSTM layer. The difference between a layer and a cell is that the layer can process a sequence, while the cell only expects an instantaneous value.",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\nn\\quantizable\\modules\\rnn.py",
    "ast_data": "ClassDef name:_LSTMSingleLayer FunctionDef name:__init__ arg:self arg:input_dim arg:hidden_dim arg:bias arg:device arg:dtype arguments arg arg arg arg arg arg arg Assign Call Call Assign Call FunctionDef name:forward arg:self arg:x arg:hidden arguments arg arg arg Assign Assign For Call Assign Call Call Assign Call Return return:yes FunctionDef name:from_params arg:cls arguments arg arg arg Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_autoscale_on",
    "source_code": "def set_autoscale_on(self, b):\n    for axis in self._axis_map.values():\n        axis._set_autoscale_on(b)",
    "docstring": "Set whether autoscaling is applied to each axis on the next draw or call to . Parameters ---------- b : bool",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:set_autoscale_on arg:self arg:b arguments arg arg For Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_rename_if_any_arg_found_transformer",
    "source_code": "def _rename_if_any_arg_found_transformer(parent, node, full_name, name, logs, arg_names=None, arg_ok_predicate=None, remove_if_ok=False, message=None):\n    for arg_name in arg_names:\n        rename_node = _rename_if_arg_found_transformer(parent, node, full_name, name, logs, arg_name, arg_ok_predicate, remove_if_ok, message)\n        node = rename_node if rename_node else node\n    return node",
    "docstring": "Replaces the given call with tf.compat.v1 if any of the arg_names is found. Args: parent: Parent of node. node: ast.Call node to modify. full_name: full name of function to modify. name: name of function to modify. logs: list of logs to append to. arg_names: list of names of the argument to look for. arg_ok_predicate: predicate callable with the ast of the argument value, returns whether the argument value is allowed. remove_if_ok: remove the argument if present and ok as determined by arg_ok_predicate. message: message to print if a non-ok arg is found (and hence, the function is renamed to its compat.v1 version). Returns: node, if it was modified, else None.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\tf_upgrade_v2.py",
    "ast_data": "FunctionDef name:_rename_if_any_arg_found_transformer arg:parent arg:node arg:full_name arg:name arg:logs arg:arg_names arg:arg_ok_predicate arg:remove_if_ok arg:message arguments arg arg arg arg arg arg arg arg arg For Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "use_wrapped_call",
    "source_code": "def use_wrapped_call(layer, call_fn, default_training_value=None, return_method=False):\n    expects_training_arg = layer_uses_training_bool(layer)\n    if hasattr(call_fn, 'original_layer_call'):\n        original_call = call_fn.original_layer_call\n        call_fn = call_fn.__call__\n    else:\n        original_call = call_fn\n    fn, arg_spec = maybe_add_training_arg(original_call, call_fn, expects_training_arg, default_training_value)\n\n    def return_outputs_and_add_losses(*args, **kwargs):\n        if return_method:\n            args = args[1:]\n        outputs, losses = fn(*args, **kwargs)\n        layer.add_loss(losses, inputs=True)\n        if context.executing_eagerly():\n            for i in layer._flatten_layers():\n                if i is not layer:\n                    i._eager_losses = [base_layer_utils.REVIVED_LOSS_PLACEHOLDER]\n        return outputs\n    decorated = tf_decorator.make_decorator(target=call_fn, decorator_func=return_outputs_and_add_losses, decorator_argspec=arg_spec)\n    if return_method:\n        return types.MethodType(decorated, layer)\n    else:\n        return decorated",
    "docstring": "Creates fn that adds the losses returned by call_fn & returns the outputs. Args: layer: A Keras layer object call_fn: tf.function that takes layer inputs (and possibly a training arg), and returns a tuple of (outputs, list of losses). default_training_value: Default value of the training kwarg. If , the default is . return_method: Whether to return a method bound to the layer. Returns: function that calls call_fn and returns the outputs. Losses returned by call_fn are added to the layer losses.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\utils.py",
    "ast_data": "FunctionDef name:use_wrapped_call arg:layer arg:call_fn arg:default_training_value arg:return_method arguments arg arg arg arg Assign Call If Call Assign Assign Assign Assign Call FunctionDef name:return_outputs_and_add_losses arguments arg arg If Assign Assign Call Call If Call For Call If Compare Assign Return return:yes Assign Call If Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "set_local_bwd_input",
    "source_code": "def set_local_bwd_input(self, next_stage_bwd_outputs: tuple[Optional[torch.Tensor], ...], mb_index: int) -> None:\n    assert isinstance(next_stage_bwd_outputs, tuple), f'Expected tuple, got {type(next_stage_bwd_outputs)}'\n    assert self.has_backward, \"can't set bwd input if this stage doesn't have backward\"\n    assert not self.is_last, \"can't set bwd input if this stage is last\"\n    recv_infos = self.grad_recv_info[mb_index]\n    for info, tensor in zip(recv_infos, next_stage_bwd_outputs):\n        assert isinstance(tensor, torch.Tensor), f'expected tensor values as outputs from prev stage, got {type(tensor)}'\n        assert isinstance(info, _RecvInfo), f'Expected a recv info, got {type(info)}'\n        info.buffer = tensor",
    "docstring": "Moves 'grad input' tensors from the next stage to 'grad_output' on this stage, avoiding a copy or send/recv. Does not detach or set '_requires_grad'.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py",
    "ast_data": "FunctionDef name:set_local_bwd_input arg:self arg:next_stage_bwd_outputs arg:mb_index arguments arg arg arg Call Call Assign For Call Call Call Call Call Assign"
  },
  {
    "library": "scipy",
    "name": "RandomDisplacement",
    "source_code": "class RandomDisplacement:\n\n    def __init__(self, stepsize=0.5, rng=None):\n        self.stepsize = stepsize\n        self.rng = check_random_state(rng)\n\n    def __call__(self, x):\n        x += self.rng.uniform(-self.stepsize, self.stepsize, np.shape(x))\n        return x",
    "docstring": "Add a random displacement of maximum size to each coordinate. Calling this updates in-place. Parameters ---------- stepsize : float, optional Maximum stepsize in any dimension rng : {None, int, }, optional Random number generator",
    "type": "class",
    "file_path": "scipy\\scipy\\optimize\\_basinhopping.py",
    "ast_data": "ClassDef name:RandomDisplacement FunctionDef name:__init__ arg:self arg:stepsize arg:rng arguments arg arg arg Assign Assign Call FunctionDef name:__call__ arg:self arg:x arguments arg arg Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "is_namedtuple_class",
    "source_code": "def is_namedtuple_class(cls: type) -> bool:\n    return isinstance(cls, type) and issubclass(cls, tuple) and isinstance(getattr(cls, '_fields', None), tuple) and all((type(field) is str for field in cls._fields)) and callable(getattr(cls, '_make', None)) and callable(getattr(cls, '_asdict', None))",
    "docstring": "Return whether the class is a subclass of namedtuple.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_pytree.py",
    "ast_data": "FunctionDef name:is_namedtuple_class arg:cls arguments arg Return return:yes BoolOp Call Call Call Call Call Compare Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, layout, inputs, constant_args=(), has_bias=True) -> None:\n    self.has_bias = has_bias\n    super().__init__(layout, inputs, constant_args, None, op_overload=torch.ops.onednn.qlinear_pointwise.tensor, cpp_kernel_name='aoti_torch_cpu__qlinear_pointwise_tensor')",
    "docstring": "if bias is not None - inputs = [x, w, b, weight_scale, weight_zp] - const_args is: [x_scale, x_zp, o_scale, o_zp, fp32_output, unary_attr, unary_scalars, unary_algorithm] else - inputs = [x, w, weight_scale, weight_zp] - const_args is: [bias, x_scale, x_zp, o_scale, o_zp, fp32_output, unary_attr, unary_scalars, unary_algorithm]",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\mkldnn_ir.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:layout arg:inputs arg:constant_args arg:has_bias arguments arg arg arg arg arg Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "RReLU",
    "source_code": "class RReLU(Module):\n    __constants__ = ['lower', 'upper', 'inplace']\n    lower: float\n    upper: float\n    inplace: bool\n\n    def __init__(self, lower: float=1.0 / 8, upper: float=1.0 / 3, inplace: bool=False):\n        super().__init__()\n        self.lower = lower\n        self.upper = upper\n        self.inplace = inplace\n\n    def forward(self, input: Tensor) -> Tensor:\n        return F.rrelu(input, self.lower, self.upper, self.training, self.inplace)\n\n    def extra_repr(self):\n        inplace_str = ', inplace=True' if self.inplace else ''\n        return f'lower={self.lower}, upper={self.upper}{inplace_str}'",
    "docstring": "Applies the randomized leaky rectified linear unit function, element-wise. Method described in the paper: _. The function is defined as: .. math:: \\text{RReLU}(x) = \\begin{cases} x & \\text{if } x \\geq 0 \\\\ ax & \\text{ otherwise } \\end{cases} where :math: is randomly sampled from uniform distribution :math: during training while during evaluation :math: is fixed with :math:. Args: lower: lower bound of the uniform distribution. Default: :math: upper: upper bound of the uniform distribution. Default: :math: inplace: can optionally do the operation in-place. Default: `(*)*(*)`, same shape as the input. .. image:: ../scripts/activation_images/RReLU.png Examples:: >>> m = nn.RReLU(0.1, 0.3) >>> input = torch.randn(2) >>> output = m(input)",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\activation.py",
    "ast_data": "ClassDef name:RReLU Assign FunctionDef name:__init__ arg:self arg:lower arg:upper arg:inplace arguments arg arg arg arg Call Call Assign Assign Assign FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:extra_repr arg:self arguments arg Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "prefetch_to_device",
    "source_code": "@tf_export('data.experimental.prefetch_to_device')\ndef prefetch_to_device(device, buffer_size=None):\n\n    def _apply_fn(dataset):\n        return dataset.apply(copy_to_device(target_device=device)).prefetch(buffer_size)\n    return _apply_fn",
    "docstring": "A transformation that prefetches dataset values to the given . NOTE: Although the transformation creates a , the transformation must be the final in the input pipeline. For example, >>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3]) >>> dataset = dataset.apply(tf.data.experimental.prefetch_to_device(\"/cpu:0\")) >>> for element in dataset: ... print(f'Tensor {element} is on device {element.device}') Tensor 1 is on device /job:localhost/replica:0/task:0/device:CPU:0 Tensor 2 is on device /job:localhost/replica:0/task:0/device:CPU:0 Tensor 3 is on device /job:localhost/replica:0/task:0/device:CPU:0 Args: device: A string. The name of a device to which elements will be prefetched. buffer_size: (Optional.) The number of elements to buffer on . Defaults to an automatically chosen value. Returns: A transformation function, which can be passed to .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\prefetching_ops.py",
    "ast_data": "FunctionDef name:prefetch_to_device arg:device arg:buffer_size arguments arg arg FunctionDef name:_apply_fn arg:dataset arguments arg Return return:yes Call Call Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_inplace_binary_method",
    "source_code": "def _inplace_binary_method(ufunc, name):\n\n    def func(self, other):\n        return ufunc(self, other, out=(self,))\n    func.__name__ = f'__i{name}__'\n    return func",
    "docstring": "Implement an in-place binary method with a ufunc, e.g., __iadd__.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\mixins.py",
    "ast_data": "FunctionDef name:_inplace_binary_method arg:ufunc arg:name arguments arg arg FunctionDef name:func arg:self arg:other arguments arg arg Return return:yes Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_latency_of_partitioned_graph",
    "source_code": "def get_latency_of_partitioned_graph(partitions: list[Partition], partition_to_latency_mapping: dict[Partition, PartitionLatency], transfer_rate_bytes_per_sec: float):\n\n    def dfs_helper(partition: Partition, latency_so_far_sec: float) -> float:\n        latency_so_far_sec += partition_to_latency_mapping[partition].overall_latency_sec\n        if partition.children:\n            max_latency_sec = 0.0\n            for child in partition.children:\n                comm_latency_sec = get_comm_latency_between(partition, child, transfer_rate_bytes_per_sec)\n                new_latency_sec = dfs_helper(child, latency_so_far_sec + comm_latency_sec)\n                if new_latency_sec > max_latency_sec:\n                    max_latency_sec = new_latency_sec\n            return max_latency_sec\n        return latency_so_far_sec\n\n    def get_top_partitions(partitions: list[Partition]) -> list[Partition]:\n        top_partitions = [partition for partition in partitions if len(partition.parents) == 0]\n        return top_partitions\n    top_partitions = get_top_partitions(partitions)\n    critical_path_latency_sec = 0.0\n    for partition in top_partitions:\n        latency_sec = dfs_helper(partition, 0.0)\n        if latency_sec > critical_path_latency_sec:\n            critical_path_latency_sec = latency_sec\n    return critical_path_latency_sec",
    "docstring": "Given all partitions in a graph, find the critical path among all partitions and return its latency as the latency of the whole graph",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\partitioner_utils.py",
    "ast_data": "FunctionDef name:get_latency_of_partitioned_graph arg:partitions arg:partition_to_latency_mapping arg:transfer_rate_bytes_per_sec arguments arg arg arg FunctionDef name:dfs_helper arg:partition arg:latency_so_far_sec arguments arg arg If Assign For Assign Call Assign Call If Compare Assign Return return:yes Return return:yes FunctionDef name:get_top_partitions arg:partitions arguments arg Assign Compare Call Return return:yes Assign Call Assign For Assign Call If Compare Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ones_",
    "source_code": "def ones_(tensor: Tensor) -> Tensor:\n    return _no_grad_fill_(tensor, 1.0)",
    "docstring": "Fill the input Tensor with the scalar value . Args: tensor: an n-dimensional Examples: >>> w = torch.empty(3, 5) >>> nn.init.ones_(w)",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\init.py",
    "ast_data": "FunctionDef name:ones_ arg:tensor arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_compute_colocation_summary_from_op",
    "source_code": "def _compute_colocation_summary_from_op(op, prefix=''):\n    return _compute_colocation_summary_from_dict(op.name, op._colocation_dict, prefix)",
    "docstring": "Fetch colocation file, line, and nesting and return a summary string.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\error_interpolation.py",
    "ast_data": "FunctionDef name:_compute_colocation_summary_from_op arg:op arg:prefix arguments arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "catalog",
    "source_code": "def catalog():\n    global _default\n    t = getattr(_active, 'value', None)\n    if t is not None:\n        return t\n    if _default is None:\n        _default = translation(settings.LANGUAGE_CODE)\n    return _default",
    "docstring": "Return the current active catalog for further processing. This can be used if you need to modify the catalog or want to access the whole message catalog instead of just translating one string.",
    "type": "function",
    "file_path": "django\\django\\utils\\translation\\trans_real.py",
    "ast_data": "FunctionDef name:catalog arguments Assign Call If Compare Return return:yes If Compare Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "@available_if(_estimator_has('predict', delegates=('final_estimator_', 'final_estimator')))\ndef predict(self, X, **predict_params):\n    if _routing_enabled():\n        routed_params = process_routing(self, 'predict', **predict_params)\n    else:\n        routed_params = Bunch()\n        routed_params.final_estimator_ = Bunch(predict={})\n        routed_params.final_estimator_.predict = predict_params\n    y_pred = super().predict(X, **routed_params.final_estimator_['predict'])\n    return y_pred",
    "docstring": "Predict target for X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vectors, where is the number of samples and is the number of features. **predict_params : dict of str -> obj Parameters to the called by the . Note that this may be used to return uncertainties from some estimators with or . Be aware that it will only account for uncertainty in the final estimator. - If (default): Parameters directly passed to the method of the . - If : Parameters safely routed to the method of the . See :ref: for more details. .. versionchanged:: 1.6 can be routed via metadata routing API. Returns ------- y_pred : ndarray of shape (n_samples,) or (n_samples, n_output) Predicted targets.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_stacking.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg arg If Call Assign Call Assign Call Assign Call Assign Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "parse_directive",
    "source_code": "def parse_directive(d: str) -> tuple[str, str]:\n    dir = d.strip()\n    if not dir.startswith('.'):\n        return (dir, '')\n    m = dir_sig_re.match(dir)\n    if not m:\n        return (dir, '')\n    parsed_dir, parsed_args = m.groups()\n    if parsed_args.strip():\n        return (parsed_dir.strip(), ' ' + parsed_args.strip())\n    else:\n        return (parsed_dir.strip(), '')",
    "docstring": "Parse a directive signature. Returns (directive, arguments) string tuple. If no arguments are given, returns (directive, '').",
    "type": "function",
    "file_path": "sphinx\\sphinx\\domains\\rst.py",
    "ast_data": "FunctionDef name:parse_directive arg:d arguments arg Assign Call If Call Return return:yes Assign Call If Return return:yes Assign Call If Call Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_frame_on",
    "source_code": "def get_frame_on(self):\n    return self.legendPatch.get_visible()",
    "docstring": "Get whether the legend box patch is drawn.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\legend.py",
    "ast_data": "FunctionDef name:get_frame_on arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X):\n    check_is_fitted(self)\n    X = validate_data(self, X, accept_sparse='csr', reset=False)\n    return self._predict(X)",
    "docstring": "Predict data using the `` of subclusters. Avoid computation of the row norms of X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Input data. Returns ------- labels : ndarray of shape(n_samples,) Labelled data.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_birch.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Call Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_readonly_fields",
    "source_code": "def get_readonly_fields(self, request, obj=None):\n    return self.readonly_fields",
    "docstring": "Hook for specifying custom readonly fields.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:get_readonly_fields arg:self arg:request arg:obj arguments arg arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_add_marker",
    "source_code": "def _add_marker(self, marker_name: str) -> None:\n    marker_val = len(self.memories_allocated.values())\n    self._markers[marker_name] = marker_val",
    "docstring": "Set the marker's x-axis value.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_tools\\memory_tracker.py",
    "ast_data": "FunctionDef name:_add_marker arg:self arg:marker_name arguments arg arg Assign Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "get_flop_counts",
    "source_code": "def get_flop_counts(self) -> dict[str, dict[Any, int]]:\n    return {k: dict(v) for k, v in self.flop_counts.items()}",
    "docstring": "Return the flop counts as a dictionary of dictionaries. The outer dictionary is keyed by module name, and the inner dictionary is keyed by operation name. Returns: Dict[str, Dict[Any, int]]: The flop counts as a dictionary.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\flop_counter.py",
    "ast_data": "FunctionDef name:get_flop_counts arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "_compute_rotation_matrix3d",
    "source_code": "def _compute_rotation_matrix3d(yaw: Tensor, pitch: Tensor, roll: Tensor, center: Tensor) -> Tensor:\n    if len(yaw.shape) == len(pitch.shape) == len(roll.shape) == 0:\n        yaw = yaw.unsqueeze(dim=0)\n        pitch = pitch.unsqueeze(dim=0)\n        roll = roll.unsqueeze(dim=0)\n    if len(yaw.shape) == len(pitch.shape) == len(roll.shape) == 1:\n        yaw = yaw.unsqueeze(dim=1)\n        pitch = pitch.unsqueeze(dim=1)\n        roll = roll.unsqueeze(dim=1)\n    if not len(yaw.shape) == len(pitch.shape) == len(roll.shape) == 2:\n        raise AssertionError(f'Expected yaw, pitch, roll to be (B, 1). Got {yaw.shape}, {pitch.shape}, {roll.shape}.')\n    angles: Tensor = torch.cat([yaw, pitch, roll], dim=1)\n    scales: Tensor = ones_like(yaw)\n    matrix: Tensor = get_projective_transform(center, angles, scales)\n    return matrix",
    "docstring": "Compute a pure affine rotation matrix.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\transform\\affwarp.py",
    "ast_data": "FunctionDef name:_compute_rotation_matrix3d arg:yaw arg:pitch arg:roll arg:center arguments arg arg arg arg If Compare Call Call Call Assign Call Assign Call Assign Call If Compare Call Call Call Assign Call Assign Call Assign Call If Compare Call Call Call Raise Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "CustomObjectScope",
    "source_code": "class CustomObjectScope(object):\n\n    def __init__(self, *args):\n        self.custom_objects = args\n        self.backup = None\n\n    def __enter__(self):\n        self.backup = _GLOBAL_CUSTOM_OBJECTS.copy()\n        for objects in self.custom_objects:\n            _GLOBAL_CUSTOM_OBJECTS.update(objects)\n        return self\n\n    def __exit__(self, *args, **kwargs):\n        _GLOBAL_CUSTOM_OBJECTS.clear()\n        _GLOBAL_CUSTOM_OBJECTS.update(self.backup)",
    "docstring": "Exposes custom classes/functions to Keras deserialization internals. Under a scope , Keras methods such as or will be able to deserialize any custom object referenced by a saved config (e.g. a custom layer or metric). Example: Consider a custom regularizer : Args: *args: Dictionary or dictionaries of pairs.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\generic_utils.py",
    "ast_data": "ClassDef name:CustomObjectScope FunctionDef name:__init__ arg:self arguments arg arg Assign Assign FunctionDef name:__enter__ arg:self arguments arg Assign Call For Call Return return:yes FunctionDef name:__exit__ arg:self arguments arg arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "save_counter",
    "source_code": "@property\ndef save_counter(self):\n    self._maybe_create_save_counter()\n    return self._save_counter",
    "docstring": "An integer variable which starts at zero and is incremented on save. Used to number checkpoints. Returns: The save counter variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:save_counter arg:self arguments arg Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "rank",
    "source_code": "@property\ndef rank(self):\n    return np.array(self._rank, dtype=int)[()]",
    "docstring": "Rank of the covariance matrix",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_covariance.py",
    "ast_data": "FunctionDef name:rank arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_as_parameter_",
    "source_code": "@property\ndef _as_parameter_(self):\n    return self.data_as(ctypes.c_void_p)",
    "docstring": "Overrides the ctypes semi-magic method Enables",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\_internal.py",
    "ast_data": "FunctionDef name:_as_parameter_ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "disable_traceback_filtering",
    "source_code": "@tf_export('debugging.disable_traceback_filtering')\ndef disable_traceback_filtering():\n    global _ENABLE_TRACEBACK_FILTERING\n    _ENABLE_TRACEBACK_FILTERING.value = False",
    "docstring": "Disable filtering out TensorFlow-internal frames in exception stack traces. Raw TensorFlow stack traces involve many internal frames, which can be challenging to read through, while not being actionable for end users. By default, TensorFlow filters internal frames in most exceptions that it raises, to keep stack traces short, readable, and focused on what's actionable for end users (their own code). Calling disables this filtering mechanism, meaning that TensorFlow exceptions stack traces will include all frames, in particular TensorFlow-internal ones. **If you are debugging a TensorFlow-internal issue, you need to call **. To re-enable traceback filtering afterwards, you can call .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\traceback_utils.py",
    "ast_data": "FunctionDef name:disable_traceback_filtering arguments Assign Call"
  },
  {
    "library": "pytorch",
    "name": "bind_function",
    "source_code": "def bind_function(self, fn_name, fn, is_custom_function, is_traceable):\n    return ops.add(fn_name, fn, is_custom_function, is_traceable)",
    "docstring": "Binds ops.fn_name = fn",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\compiled_autograd.py",
    "ast_data": "FunctionDef name:bind_function arg:self arg:fn_name arg:fn arg:is_custom_function arg:is_traceable arguments arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "device_policy",
    "source_code": "@tf_contextlib.contextmanager\ndef device_policy(policy):\n    ctx = context()\n    old_policy = ctx.device_policy\n    try:\n        ctx.device_policy = policy\n        yield\n    finally:\n        ctx.device_policy = old_policy",
    "docstring": "Context manager for setting device placement policy for current thread.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:device_policy arg:policy arguments arg Assign Call Assign Try Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "get_worker_group",
    "source_code": "@abc.abstractmethod\ndef get_worker_group(self, role: str=DEFAULT_ROLE) -> WorkerGroup:\n    raise NotImplementedError",
    "docstring": "Return the ``. Note that the worker group is a mutable object and hence in a multi-threaded/process environment it may change state. Implementors are encouraged (but not required) to return a defensive read-only copy.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\agent\\server\\api.py",
    "ast_data": "FunctionDef name:get_worker_group arg:self arg:role arguments arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "_get_thread_id",
    "source_code": "def _get_thread_id():\n    thread_id = _thread.get_ident()\n    return thread_id & _THREAD_ID_MASK",
    "docstring": "Get id of current thread, suitable for logging as an unsigned quantity.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\platform\\tf_logging.py",
    "ast_data": "FunctionDef name:_get_thread_id arguments Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "random",
    "source_code": "@classmethod\ndef random(cls, batch_size: Optional[int]=None, device: Optional[Device]=None, dtype: Dtype=None) -> 'Quaternion':\n    rand_shape = (batch_size,) if batch_size is not None else ()\n    r1, r2, r3 = rand((3, *rand_shape), device=device, dtype=dtype)\n    q1 = (1.0 - r1).sqrt() * (2 * pi * r2).sin()\n    q2 = (1.0 - r1).sqrt() * (2 * pi * r2).cos()\n    q3 = r1.sqrt() * (2 * pi * r3).sin()\n    q4 = r1.sqrt() * (2 * pi * r3).cos()\n    return cls(stack((q1, q2, q3, q4), -1))",
    "docstring": "Create a random unit quaternion of shape :math:. Uniformly distributed across the rotation space as per: Args: batch_size: the batch size of the underlying data. device: device to place the result on. dtype: dtype of the result. Example: >>> q = Quaternion.random() >>> q = Quaternion.random(batch_size=2)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\quaternion.py",
    "ast_data": "FunctionDef name:random arg:cls arg:batch_size arg:device arg:dtype arguments arg arg arg arg Assign Compare Assign Call Assign Call Call Assign Call Call Assign Call Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "fx",
    "source_code": "@property\ndef fx(self) -> Tensor:\n    return self._params[..., 0]",
    "docstring": "Returns the focal length in x direction.",
    "type": "method",
    "file_path": "kornia\\kornia\\sensors\\camera\\camera_model.py",
    "ast_data": "FunctionDef name:fx arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "__iter__",
    "source_code": "def __iter__(self) -> Iterator[Any]:\n    na_value = self._dtype.na_value\n    pa_type = self._pa_array.type\n    box_timestamp = pa.types.is_timestamp(pa_type) and pa_type.unit != 'ns'\n    box_timedelta = pa.types.is_duration(pa_type) and pa_type.unit != 'ns'\n    for value in self._pa_array:\n        val = value.as_py()\n        if val is None:\n            yield na_value\n        elif box_timestamp:\n            yield Timestamp(val).as_unit(pa_type.unit)\n        elif box_timedelta:\n            yield Timedelta(val).as_unit(pa_type.unit)\n        else:\n            yield val",
    "docstring": "Iterate over elements of the array.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\arrow\\array.py",
    "ast_data": "FunctionDef name:__iter__ arg:self arguments arg Assign Assign Assign BoolOp Call Compare Assign BoolOp Call Compare For Assign Call If Compare If Call Call If Call Call"
  },
  {
    "library": "sphinx",
    "name": "getorigbases",
    "source_code": "def getorigbases(obj: Any) -> tuple[Any, ...] | None:\n    if not isclass(obj):\n        return None\n    __dict__ = safe_getattr(obj, '__dict__', {})\n    __orig_bases__ = __dict__.get('__orig_bases__')\n    if isinstance(__orig_bases__, tuple) and len(__orig_bases__) > 0:\n        return __orig_bases__\n    return None",
    "docstring": "Safely get `` is not well-defined (e.g., a non-tuple object or an empty sequence).",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\inspect.py",
    "ast_data": "FunctionDef name:getorigbases arg:obj arguments arg If Call Return return:no Assign Call Assign Call If BoolOp Call Compare Call Return return:yes Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "write_string_to_file",
    "source_code": "def write_string_to_file(filename, file_content):\n    with FileIO(filename, mode='w') as f:\n        f.write(file_content)",
    "docstring": "Writes a string to a given file. Args: filename: string, path to a file file_content: string, contents that need to be written to the file Raises: errors.OpError: If there are errors during the operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py",
    "ast_data": "FunctionDef name:write_string_to_file arg:filename arg:file_content arguments arg arg With Call Call"
  },
  {
    "library": "numpy",
    "name": "__init__",
    "source_code": "def __init__(self, mbfunc, fillx=0, filly=0):\n    super().__init__(mbfunc)\n    self.fillx = fillx\n    self.filly = filly\n    ufunc_domain[mbfunc] = None\n    ufunc_fills[mbfunc] = (fillx, filly)",
    "docstring": "abfunc(fillx, filly) must be defined. abfunc(x, filly) = x for all x to enable reduce.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:mbfunc arg:fillx arg:filly arguments arg arg arg arg Call Call Assign Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "wrap_cpp_class",
    "source_code": "def wrap_cpp_class(cpp_class):\n    return torch.jit.RecursiveScriptClass(cpp_class)",
    "docstring": "Wrap this torch._C.Object in a Python RecursiveScriptClass.",
    "type": "function",
    "file_path": "pytorch\\torch\\jit\\_recursive.py",
    "ast_data": "FunctionDef name:wrap_cpp_class arg:cpp_class arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "nbytes",
    "source_code": "@property\ndef nbytes(self) -> int:\n    return self._pa_array.nbytes",
    "docstring": "The number of bytes needed to store this object in memory.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\arrow\\array.py",
    "ast_data": "FunctionDef name:nbytes arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_batch_mv",
    "source_code": "def _batch_mv(bmat, bvec):\n    return torch.matmul(bmat, bvec.unsqueeze(-1)).squeeze(-1)",
    "docstring": "Performs a batched matrix-vector product, with compatible but different batch shapes. This function takes as input , containing :math: matrices, and , containing length :math: vectors. Both and may have any number of leading dimensions, which correspond to a batch shape. They are not necessarily assumed to have the same batch shape, just ones which can be broadcasted.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributions\\multivariate_normal.py",
    "ast_data": "FunctionDef name:_batch_mv arg:bmat arg:bvec arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pandas",
    "name": "to_2d_mgr",
    "source_code": "def to_2d_mgr(self, columns: Index) -> BlockManager:\n    blk = self.blocks[0]\n    arr = ensure_block_shape(blk.values, ndim=2)\n    bp = BlockPlacement(0)\n    new_blk = type(blk)(arr, placement=bp, ndim=2, refs=blk.refs)\n    axes = [columns, self.axes[0]]\n    return BlockManager([new_blk], axes=axes, verify_integrity=False)",
    "docstring": "Manager analogue of Series.to_frame",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:to_2d_mgr arg:self arg:columns arguments arg arg Assign Assign Call Assign Call Assign Call Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "finalize_state",
    "source_code": "@doc_controls.do_not_generate_docs\ndef finalize_state(self):\n    pass",
    "docstring": "Finalizes the layers state after updating layer weights. This function can be subclassed in a layer and will be called after updating a layer weights. It can be overridden to finalize any additional layer state after a weight update.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:finalize_state arg:self arguments arg"
  },
  {
    "library": "scipy",
    "name": "isspmatrix_dia",
    "source_code": "def isspmatrix_dia(x):\n    return isinstance(x, dia_matrix)",
    "docstring": "Is of dia_matrix type? Parameters ---------- x object to check for being a dia matrix Returns ------- bool True if is a dia matrix, False otherwise Examples -------- >>> from scipy.sparse import dia_array, dia_matrix, coo_matrix, isspmatrix_dia >>> isspmatrix_dia(dia_matrix([[5]])) True >>> isspmatrix_dia(dia_array([[5]])) False >>> isspmatrix_dia(coo_matrix([[5]])) False",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\_dia.py",
    "ast_data": "FunctionDef name:isspmatrix_dia arg:x arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "@torch.inference_mode()\ndef forward(self, images: Union[Tensor, list[Tensor]]) -> Union[Tensor, list[Tensor]]:\n    images, images_sizes = self.pre_processor(images)\n    logits, boxes = self.model(images)\n    detections = self.post_processor(logits, boxes, images_sizes)\n    return detections",
    "docstring": "Detect objects in a given list of images. Args: images: If list of RGB images. Each image is a Tensor with shape :math:. If Tensor, a Tensor with shape :math:. Returns: list of detections found in each image. For item in a batch, shape is :math:, where :math: is the number of detections in the given image, :math: represents class id, score, and bounding box.",
    "type": "method",
    "file_path": "kornia\\kornia\\models\\detection\\base.py",
    "ast_data": "FunctionDef name:forward arg:self arg:images arguments arg arg Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "num_obs_dm",
    "source_code": "def num_obs_dm(d):\n    d = np.asarray(d, order='c')\n    is_valid_dm(d, tol=np.inf, throw=True, name='d')\n    return d.shape[0]",
    "docstring": "Return the number of original observations that correspond to a square, redundant distance matrix. Parameters ---------- d : array_like The target distance matrix. Returns ------- num_obs_dm : int The number of observations in the redundant distance matrix. Examples -------- Find the number of original observations corresponding to a square redundant distance matrix d. >>> from scipy.spatial.distance import num_obs_dm >>> d = [[0, 100, 200], [100, 0, 150], [200, 150, 0]] >>> num_obs_dm(d) 3",
    "type": "function",
    "file_path": "scipy\\scipy\\spatial\\distance.py",
    "ast_data": "FunctionDef name:num_obs_dm arg:d arguments arg Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "MemoryStats",
    "source_code": "@dataclasses.dataclass\nclass MemoryStats:\n    persistent: StatsForKernelType\n    looped: StatsForKernelType\n\n    def get(self, persistent: bool) -> StatsForKernelType:\n        return self.persistent if persistent else self.looped\n\n    @classmethod\n    def compute(cls, estimator: MemoryEstimator) -> typing.Self:\n        persistent = StatsForKernelType.compute([estimator.persistent], estimator)\n        if len(estimator.loops) == 1 and (not (estimator.outside_loop and estimator.loops[0])):\n            looped = persistent\n        else:\n            looped = StatsForKernelType.compute([estimator.outside_loop, *estimator.loops], estimator)\n        return cls(persistent=persistent, looped=looped)",
    "docstring": "Memory usage stats collected for each generated kernel",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\simd_kernel_features.py",
    "ast_data": "ClassDef name:MemoryStats FunctionDef name:get arg:self arg:persistent arguments arg arg Return return:yes FunctionDef name:compute arg:cls arg:estimator arguments arg arg Assign Call If BoolOp Compare Call BoolOp Assign Assign Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "AutoContrast",
    "source_code": "class AutoContrast(OperationBase):\n\n    def __init__(self, initial_probability: float=0.5, temperature: float=0.1, symmetric_megnitude: bool=False) -> None:\n        super().__init__(K.RandomAutoContrast(same_on_batch=False, p=initial_probability), initial_magnitude=None, temperature=temperature, symmetric_megnitude=symmetric_megnitude)",
    "docstring": "Apply auto_contrast operation. Args: initial_probability: the initial probability. If None, the augmentation will be randomly applied according to he augmentation sampling range. temperature: temperature for RelaxedBernoulli distribution used during training. symmetric_megnitude: if to randomly assign the magnitude as negative or not.",
    "type": "class",
    "file_path": "kornia\\kornia\\augmentation\\auto\\operations\\ops.py",
    "ast_data": "ClassDef name:AutoContrast FunctionDef name:__init__ arg:self arg:initial_probability arg:temperature arg:symmetric_megnitude arguments arg arg arg arg Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_convert_or_cast",
    "source_code": "def _convert_or_cast(x, dtype, name):\n    if isinstance(x, weak_tensor.WeakTensor):\n        x = x.to_tensor()\n    if isinstance(x, core.Tensor) or isinstance(x, composite_tensor.CompositeTensor):\n        return math_ops.cast(x, dtype=dtype, name=name)\n    else:\n        return ops.convert_to_tensor(x, dtype=dtype, name=name)",
    "docstring": "Converts/casts the input x to dtype.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\weak_tensor_ops.py",
    "ast_data": "FunctionDef name:_convert_or_cast arg:x arg:dtype arg:name arguments arg arg arg If Call Assign Call If BoolOp Call Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "preprocess_image",
    "source_code": "def preprocess_image(self, x: Tensor, mean: Optional[Tensor]=None, std: Optional[Tensor]=None) -> Tensor:\n    if isinstance(mean, Tensor) and isinstance(std, Tensor):\n        x = normalize(x, mean, std)\n    elif isinstance(self.pixel_mean, Tensor) and isinstance(self.pixel_std, Tensor):\n        x = normalize(x, self.pixel_mean, self.pixel_std)\n    encoder_im_size = self.model.image_encoder.img_size\n    pad_h = encoder_im_size - x.shape[-2]\n    pad_w = encoder_im_size - x.shape[-1]\n    x = pad(x, (0, pad_w, 0, pad_h))\n    return x",
    "docstring": "Normalize and pad a tensor. For normalize the tensor: will prioritize the and passed as argument, if None will use the default Sam Dataset values. For pad the tensor: Will pad the tensor into the right and bottom to match with the size of Args: x: The image to be preprocessed mean: Mean for each channel. std: Standard deviations for each channel. Returns: The image preprocessed (normalized if has mean and str available and padded to encoder size)",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\visual_prompter.py",
    "ast_data": "FunctionDef name:preprocess_image arg:self arg:x arg:mean arg:std arguments arg arg arg arg If BoolOp Call Call Assign Call If BoolOp Call Call Assign Call Assign Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_validate_matrix",
    "source_code": "def _maybe_validate_matrix(a, validate_args):\n    assertions = []\n    if not a.dtype.is_floating:\n        raise TypeError('Input `a` must have `float`-like `dtype` (saw {}).'.format(a.dtype.name))\n    if a.shape is not None and a.shape.rank is not None:\n        if a.shape.rank < 2:\n            raise ValueError('Input `a` must have at least 2 dimensions (saw: {}).'.format(a.shape.rank))\n    elif validate_args:\n        assertions.append(check_ops.assert_rank_at_least(a, rank=2, message='Input `a` must have at least 2 dimensions.'))\n    return assertions",
    "docstring": "Checks that input is a matrix.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linalg_impl.py",
    "ast_data": "FunctionDef name:_maybe_validate_matrix arg:a arg:validate_args arguments arg arg Assign If Raise Call Call If BoolOp Compare Compare If Compare Raise Call Call If Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "fuzz_n_tuple",
    "source_code": "def fuzz_n_tuple(self, n: int, max_combinations: int=1000) -> ResultType:\n    results = ResultType()\n    print(f'Starting {n}-tuple testing with seed {self.seed}')\n    random.seed(self.seed)\n    for combo in itertools.combinations(self.fields, n):\n        st = self._fuzz_helper(results, combo)\n        if st != Status.SKIPPED:\n            max_combinations -= 1\n            if max_combinations <= 0:\n                print('Reached maximum combinations limit')\n                break\n    return results",
    "docstring": "Test every combination of n configs. returns a dict of this shape: {(config-1, config-2... config-n): status}",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\fuzzer.py",
    "ast_data": "FunctionDef name:fuzz_n_tuple arg:self arg:n arg:max_combinations arguments arg arg arg Assign Call Call Call For Call Assign Call If Compare If Compare Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "point_at_t",
    "source_code": "def point_at_t(self, t):\n    return tuple(self(t))",
    "docstring": "Evaluate the curve at a single point, returning a tuple of *d* floats.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\bezier.py",
    "ast_data": "FunctionDef name:point_at_t arg:self arg:t arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_op_schema",
    "source_code": "def _get_op_schema(node: Node, placement_strategies: dict[Node, PlacementStrategy]) -> OpSchema:\n    args_schema_list = pytree.tree_map_only(Node, lambda arg: placement_strategies[arg].output_specs, node.args)\n    op_schema = OpSchema(op=cast(torch._ops.OpOverload, node.target), args_schema=tuple(args_schema_list), kwargs_schema=cast(dict[str, object], node.kwargs))\n    return op_schema",
    "docstring": "Util function to construct the operator schema of a node.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\experimental\\_tp_transform.py",
    "ast_data": "FunctionDef name:_get_op_schema arg:node arg:placement_strategies arguments arg arg Assign Call arguments arg Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "private_bytes",
    "source_code": "@abc.abstractmethod\ndef private_bytes(self, encoding: _serialization.Encoding, format: _serialization.PrivateFormat, encryption_algorithm: _serialization.KeySerializationEncryption) -> bytes:\n    pass",
    "docstring": "The serialized bytes of the private key.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ed448.py",
    "ast_data": "FunctionDef name:private_bytes arg:self arg:encoding arg:format arg:encryption_algorithm arguments arg arg arg arg"
  },
  {
    "library": "tensorflow",
    "name": "get_synchronous_execution",
    "source_code": "@tf_export('config.experimental.get_synchronous_execution')\ndef get_synchronous_execution():\n    return context.context().execution_mode == context.SYNC",
    "docstring": "Gets whether operations are executed synchronously or asynchronously. TensorFlow can execute operations synchronously or asynchronously. If asynchronous execution is enabled, operations may return \"non-ready\" handles. Returns: Current thread execution mode",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\config.py",
    "ast_data": "FunctionDef name:get_synchronous_execution arguments Return return:yes Compare Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_make_classic_style_pseudo_toolbar",
    "source_code": "def _make_classic_style_pseudo_toolbar(self):\n    return SimpleNamespace(canvas=self.canvas)",
    "docstring": "Return a placeholder object with a single attribute. This is useful to reuse the implementations of tools already provided by the classic Toolbars.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py",
    "ast_data": "FunctionDef name:_make_classic_style_pseudo_toolbar arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "rank",
    "source_code": "@property\ndef rank(self) -> int:\n    return self._rank",
    "docstring": "Rank within a group",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\api.py",
    "ast_data": "FunctionDef name:rank arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "csrf",
    "source_code": "def csrf(request):\n\n    def _get_val():\n        token = get_token(request)\n        if token is None:\n            return 'NOTPROVIDED'\n        else:\n            return token\n    return {'csrf_token': SimpleLazyObject(_get_val)}",
    "docstring": "Context processor that provides a CSRF token, or the string 'NOTPROVIDED' if it has not been provided by either a view decorator or the middleware",
    "type": "function",
    "file_path": "django\\django\\template\\context_processors.py",
    "ast_data": "FunctionDef name:csrf arg:request arguments arg FunctionDef name:_get_val arguments Assign Call If Compare Return return:yes Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_NthElementGrad",
    "source_code": "@ops.RegisterGradient('NthElement')\ndef _NthElementGrad(op: ops.Operation, grad):\n    input = op.inputs[0]\n    output = op.outputs[0]\n    indicators = math_ops.cast(math_ops.equal(array_ops.expand_dims(output, -1), input), grad.dtype)\n    grad = array_ops.expand_dims(grad, -1)\n    num_selected = array_ops.expand_dims(math_ops.reduce_sum(indicators, -1), -1)\n    return [math_ops.divide(indicators, num_selected) * grad, None]",
    "docstring": "Return the gradients for NthElement. Args: op: The NthElementOp for which we need to generate gradients. grad: Tensor. The gradients passed to the NthElementOp Returns: A list of two tensors, the first being the gradient w.r.t. the input, the second being the gradient w.r.t. the N (None).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_grad.py",
    "ast_data": "FunctionDef name:_NthElementGrad arg:op arg:grad arguments arg arg Assign Assign Assign Call Call Call Assign Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_produce_tensor",
    "source_code": "def _produce_tensor(self, name: str, timestamp: int, tensors_pid: int, allocator: str, num_bytes: int) -> _TensorTracker:\n    object_id = len(self._tensors)\n    tensor = _TensorTracker(name, object_id, timestamp, tensors_pid, allocator, num_bytes)\n    self._tensors[name] = tensor\n    return tensor",
    "docstring": "Creates a new tensor tracker.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py",
    "ast_data": "FunctionDef name:_produce_tensor arg:self arg:name arg:timestamp arg:tensors_pid arg:allocator arg:num_bytes arguments arg arg arg arg arg arg Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "set_regularizer",
    "source_code": "def set_regularizer(self, regularizer):\n    self._regularizer = regularizer",
    "docstring": "Set regularizer for this scope.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variable_scope.py",
    "ast_data": "FunctionDef name:set_regularizer arg:self arg:regularizer arguments arg arg Assign"
  },
  {
    "library": "django",
    "name": "generate_created_proxies",
    "source_code": "def generate_created_proxies(self):\n    added = self.new_proxy_keys - self.old_proxy_keys\n    for app_label, model_name in sorted(added):\n        model_state = self.to_state.models[app_label, model_name]\n        assert model_state.options.get('proxy')\n        dependencies = [OperationDependency(app_label, model_name, None, OperationDependency.Type.REMOVE)]\n        for base in model_state.bases:\n            if isinstance(base, str) and '.' in base:\n                base_app_label, base_name = base.split('.', 1)\n                dependencies.append(OperationDependency(base_app_label, base_name, None, OperationDependency.Type.CREATE))\n        self.add_operation(app_label, operations.CreateModel(name=model_state.name, fields=[], options=model_state.options, bases=model_state.bases, managers=model_state.managers), dependencies=dependencies)",
    "docstring": "Make CreateModel statements for proxy models. Use the same statements as that way there's less code duplication, but for proxy models it's safe to skip all the pointless field stuff and chuck out an operation.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\autodetector.py",
    "ast_data": "FunctionDef name:generate_created_proxies arg:self arguments arg Assign For Call Assign Call Assign Call For If BoolOp Call Compare Assign Call Call Call Call Call"
  },
  {
    "library": "django",
    "name": "references_column",
    "source_code": "def references_column(self, table, column):\n    return False",
    "docstring": "Return whether or not this instance references the specified column.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\ddl_references.py",
    "ast_data": "FunctionDef name:references_column arg:self arg:table arg:column arguments arg arg arg Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "on_error",
    "source_code": "def on_error(*args, **kwargs):\n    body = str(sys.exc_info()[1])\n    _set_response(xmlrpc_dumps(XMLRPCFault(1, body)))",
    "docstring": "Construct HTTP response body for an error response.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\xmlrpcutil.py",
    "ast_data": "FunctionDef name:on_error arguments arg arg Assign Call Call Call Call Call"
  },
  {
    "library": "kornia",
    "name": "__init__",
    "source_code": "def __init__(self, q: Quaternion) -> None:\n    super().__init__()\n    KORNIA_CHECK_TYPE(q, Quaternion)\n    self._q = q",
    "docstring": "Construct the base class. Internally represented by a unit quaternion . Args: q: Quaternion with the shape of :math:. Example: >>> data = torch.ones((2, 4)) >>> q = Quaternion(data) >>> So3(q) Parameter containing: tensor([[1., 1., 1., 1.], [1., 1., 1., 1.]], requires_grad=True)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\so3.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:q arguments arg arg Call Call Call Assign"
  },
  {
    "library": "scrapy",
    "name": "format_part_strings",
    "source_code": "def format_part_strings(self, part_strings: list[str]) -> list[str]:\n    if part_strings and part_strings[0].startswith('usage: '):\n        part_strings[0] = 'Usage\\n=====\\n  ' + part_strings[0][len('usage: '):]\n    headings = [i for i in range(len(part_strings)) if part_strings[i].endswith(':\\n')]\n    for index in headings[::-1]:\n        char = '-' if 'Global Options' in part_strings[index] else '='\n        part_strings[index] = part_strings[index][:-2].title()\n        underline = ''.join(['\\n', char * len(part_strings[index]), '\\n'])\n        part_strings.insert(index + 1, underline)\n    return part_strings",
    "docstring": "Underline and title case command line help message headers.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\commands\\__init__.py",
    "ast_data": "FunctionDef name:format_part_strings arg:self arg:part_strings arguments arg arg If BoolOp Call Assign Call Assign Call Call Call For Assign Compare Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_eager_cond_implementation",
    "source_code": "def _eager_cond_implementation(pred, true_fn, false_fn, strict, name):\n    pred = ops.convert_to_tensor(pred)\n    pred_constant_value = tensor_util.constant_value(pred)\n    if pred_constant_value is None:\n        if not isinstance(true_fn, core.PolymorphicFunction) or not isinstance(false_fn, core.PolymorphicFunction):\n            raise TypeError(\"When running tf.cond on a parallel device, 'true_fn' and 'false_fn' must be decorated with `tf.function`.\")\n        functions_run_eagerly = eager_function_run.functions_run_eagerly()\n        if functions_run_eagerly:\n            logging.warning('It looks like tf.function behavior was disabled, perhaps using tf.config.run_functions_eagerly. Parallelized tf.cond requires tf.function to work. This primitive will override the disable.')\n        eager_function_run.run_functions_eagerly(False)\n        try:\n            return cond_v2.cond_v2(pred, true_fn, false_fn, name)\n        finally:\n            if functions_run_eagerly is not None:\n                eager_function_run.run_functions_eagerly(functions_run_eagerly)\n    else:\n        with ops.name_scope(name, 'cond', [pred]):\n            if pred_constant_value:\n                result = true_fn()\n            else:\n                result = false_fn()\n            if not strict:\n                result = _UnpackIfSingleton(result)\n            return result",
    "docstring": "Special cases for when executing eagerly.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\cond.py",
    "ast_data": "FunctionDef name:_eager_cond_implementation arg:pred arg:true_fn arg:false_fn arg:strict arg:name arguments arg arg arg arg arg Assign Call Assign Call If Compare If BoolOp Call Call Raise Call Assign Call If Call Call Try Return return:yes Call If Compare Call With Call If Assign Call Assign Call If Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "_add_metadata",
    "source_code": "def _add_metadata(self, op: onnx.ModelProto, additional_metadata: Optional[list[tuple[str, str]]]=None) -> onnx.ModelProto:\n    op = kornia.onnx.utils.add_metadata(op, additional_metadata)\n    return op",
    "docstring": "Add metadata to the combined ONNX model. Args: op: onnx operation. additional_metadata: A list of tuples representing additional metadata to add to the combined ONNX model. Example: [(\"version\", 0.1)], [(\"date\", 20240909)].",
    "type": "method",
    "file_path": "kornia\\kornia\\core\\mixin\\onnx.py",
    "ast_data": "FunctionDef name:_add_metadata arg:self arg:op arg:additional_metadata arguments arg arg arg Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "parse_user_opt_in_from_text",
    "source_code": "def parse_user_opt_in_from_text(user_optin_text: str) -> UserOptins:\n    optins = UserOptins()\n    for user in user_optin_text.split('\\n'):\n        user = user.strip('\\r\\n\\t -')\n        if not user or not user.startswith('@'):\n            continue\n        if user:\n            usr_name = user.split(',')[0].strip('@')\n            optins[usr_name] = [exp.strip(' ') for exp in user.split(',')[1:]]\n    return optins",
    "docstring": "Parse the user opt-in text into a key value pair of username and the list of features they have opted into Users are GitHub usernames with the @ prefix. Each user is also a comma-separated list of features/experiments to enable. - Example line: \"@User1,lf,split_build\" - A \"#\" prefix indicates the user is opted out of all experiments",
    "type": "function",
    "file_path": "pytorch\\.github\\scripts\\runner_determinator.py",
    "ast_data": "FunctionDef name:parse_user_opt_in_from_text arg:user_optin_text arguments arg Assign Call For Call Assign Call If BoolOp Call If Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_rng_html_rewrite",
    "source_code": "def _rng_html_rewrite(func):\n    pattern = re.compile('np.random.default_rng\\\\((0x[0-9A-F]+|\\\\d+)\\\\)', re.I)\n\n    def _wrapped(*args, **kwargs):\n        res = func(*args, **kwargs)\n        lines = [re.sub(pattern, 'np.random.default_rng()', line) for line in res]\n        return lines\n    return _wrapped",
    "docstring": "Rewrite the HTML rendering of ``. Examples are only run by Sphinx when there are plot involved. Even so, it does not change the result values getting printed.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_util.py",
    "ast_data": "FunctionDef name:_rng_html_rewrite arg:func arguments arg Assign Call FunctionDef name:_wrapped arguments arg arg Assign Call Assign Call Return return:yes Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "get_serving",
    "source_code": "def get_serving(self, local, remote, scheme, sproto):\n    req = self.request_class(local, remote, scheme, sproto)\n    req.app = self\n    for name, toolbox in self.toolboxes.items():\n        req.namespaces[name] = toolbox\n    resp = self.response_class()\n    cherrypy.serving.load(req, resp)\n    cherrypy.engine.publish('acquire_thread')\n    cherrypy.engine.publish('before_request')\n    return (req, resp)",
    "docstring": "Create and return a Request and Response object.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cptree.py",
    "ast_data": "FunctionDef name:get_serving arg:self arg:local arg:remote arg:scheme arg:sproto arguments arg arg arg arg arg Assign Call Assign For Call Assign Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "latex_visit_inheritance_diagram",
    "source_code": "def latex_visit_inheritance_diagram(self: LaTeXTranslator, node: inheritance_diagram) -> None:\n    graph = node['graph']\n    graph_hash = get_graph_hash(node)\n    name = 'inheritance%s' % graph_hash\n    dotcode = graph._generate_dot(name, config=self.config, graph_attrs={'size': '\"6.0,6.0\"'})\n    render_dot_latex(self, node, dotcode, {}, 'inheritance')\n    raise nodes.SkipNode",
    "docstring": "Output the graph for LaTeX. This will insert a PDF.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\ext\\inheritance_diagram.py",
    "ast_data": "FunctionDef name:latex_visit_inheritance_diagram arg:self arg:node arguments arg arg Assign Assign Call Assign Assign Call Call Raise"
  },
  {
    "library": "django",
    "name": "check_related_objects",
    "source_code": "def check_related_objects(self, field, value, opts):\n    if field.is_relation:\n        if isinstance(value, Query) and (not value.has_select_fields) and (not check_rel_lookup_compatibility(value.model, opts, field)):\n            raise ValueError('Cannot use QuerySet for \"%s\": Use a QuerySet for \"%s\".' % (value.model._meta.object_name, opts.object_name))\n        elif hasattr(value, '_meta'):\n            self.check_query_object_type(value, opts, field)\n        elif hasattr(value, '__iter__'):\n            for v in value:\n                self.check_query_object_type(v, opts, field)",
    "docstring": "Check the type of object passed to query relations.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\query.py",
    "ast_data": "FunctionDef name:check_related_objects arg:self arg:field arg:value arg:opts arguments arg arg arg arg If If BoolOp Call Call Raise Call If Call Call If Call For Call"
  },
  {
    "library": "django",
    "name": "_create_index_sql",
    "source_code": "def _create_index_sql(self, model, *, fields=None, name=None, suffix='', using='', db_tablespace=None, col_suffixes=(), sql=None, opclasses=(), condition=None, include=None, expressions=None):\n    fields = fields or []\n    expressions = expressions or []\n    compiler = Query(model, alias_cols=False).get_compiler(connection=self.connection)\n    tablespace_sql = self._get_index_tablespace_sql(model, fields, db_tablespace=db_tablespace)\n    columns = [field.column for field in fields]\n    sql_create_index = sql or self.sql_create_index\n    table = model._meta.db_table\n\n    def create_index_name(*args, **kwargs):\n        nonlocal name\n        if name is None:\n            name = self._create_index_name(*args, **kwargs)\n        return self.quote_name(name)\n    return Statement(sql_create_index, table=Table(table, self.quote_name), name=IndexName(table, columns, suffix, create_index_name), using=using, columns=self._index_columns(table, columns, col_suffixes, opclasses) if columns else Expressions(table, expressions, compiler, self.quote_value), extra=tablespace_sql, condition=self._index_condition_sql(condition), include=self._index_include_sql(model, include))",
    "docstring": "Return the SQL statement to create the index for one or several fields or expressions. can be specified if the syntax differs from the standard (GIS indexes, ...).",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\schema.py",
    "ast_data": "FunctionDef name:_create_index_sql arg:self arg:model arguments arg arg arg arg arg arg arg arg arg arg arg arg arg Assign BoolOp Assign BoolOp Assign Call Call Assign Call Assign Assign BoolOp Assign FunctionDef name:create_index_name arguments arg arg If Compare Assign Call Return return:yes Call Return return:yes Call Call Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "can_fuse_multi_outputs_template",
    "source_code": "def can_fuse_multi_outputs_template(self, node1: BaseSchedulerNode, node2: BaseSchedulerNode) -> bool:\n    return False",
    "docstring": "A Multi-Output Template (referenced in #144012) is a template node with MultiOutputLayout, and its output buffers are instances of MultiOutput. In this context, we verify whether node1 represents the Multi-Output Template and node2 corresponds to one of its outputs. If so, we further check if backend supports this fusion.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:can_fuse_multi_outputs_template arg:self arg:node1 arg:node2 arguments arg arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "new_func",
    "source_code": "@functools.wraps(func)\ndef new_func(*args, **kwargs):\n    if _PRINT_DEPRECATION_WARNINGS:\n        named_args = tf_inspect.getcallargs(func, *args, **kwargs)\n        for arg_name, arg_value in deprecated_kwargs.items():\n            if arg_name in named_args and _safe_eq(named_args[arg_name], arg_value):\n                if (func, arg_name) not in _PRINTED_WARNING:\n                    if warn_once:\n                        _PRINTED_WARNING[func, arg_name] = True\n                    _log_deprecation('From %s: calling %s (from %s) with %s=%s is deprecated and will be removed %s.\\nInstructions for updating:\\n%s', _call_location(), decorator_utils.get_qualified_name(func), func.__module__, arg_name, arg_value, 'in a future version' if date is None else 'after %s' % date, instructions)\n    return func(*args, **kwargs)",
    "docstring": "Deprecation wrapper.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\deprecation.py",
    "ast_data": "FunctionDef name:new_func arguments arg arg If Assign Call For Call If BoolOp Compare Call If Compare If Assign Call Call Call Compare Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "afmFontProperty",
    "source_code": "def afmFontProperty(fontpath, font):\n    name = font.get_familyname()\n    fontname = font.get_fontname().lower()\n    if font.get_angle() != 0 or 'italic' in name.lower():\n        style = 'italic'\n    elif 'oblique' in name.lower():\n        style = 'oblique'\n    else:\n        style = 'normal'\n    if name.lower() in ['capitals', 'small-caps']:\n        variant = 'small-caps'\n    else:\n        variant = 'normal'\n    weight = font.get_weight().lower()\n    if weight not in weight_dict:\n        weight = 'normal'\n    if 'demi cond' in fontname:\n        stretch = 'semi-condensed'\n    elif any((word in fontname for word in ['narrow', 'cond'])):\n        stretch = 'condensed'\n    elif any((word in fontname for word in ['wide', 'expanded', 'extended'])):\n        stretch = 'expanded'\n    else:\n        stretch = 'normal'\n    size = 'scalable'\n    return FontEntry(fontpath, name, style, variant, weight, stretch, size)",
    "docstring": "Extract information from an AFM font file. Parameters ---------- fontpath : str The filename corresponding to *font*. font : AFM The AFM font file from which information will be extracted. Returns ------- The extracted font properties.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\font_manager.py",
    "ast_data": "FunctionDef name:afmFontProperty arg:fontpath arg:font arguments arg arg Assign Call Assign Call Call If BoolOp Compare Call Compare Call Assign If Compare Call Assign Assign If Compare Call Assign Assign Assign Call Call If Compare Assign If Compare Assign If Call Compare Assign If Call Compare Assign Assign Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "on_run_start",
    "source_code": "def on_run_start(self, request):\n    self._is_run_start = True\n    self._update_run_calls_state(request.run_call_count, request.fetches, request.feed_dict, is_callable_runner=request.is_callable_runner)\n    if self._active_tensor_filter:\n        return self._active_tensor_filter_run_start_response\n    self._exit_if_requested_by_user()\n    if self._run_call_count > 1 and (not self._skip_debug):\n        if self._run_through_times > 0:\n            return framework.OnRunStartResponse(framework.OnRunStartAction.NON_DEBUG_RUN, [])\n        elif self._run_through_times == 0:\n            return self._run_start_response or framework.OnRunStartResponse(framework.OnRunStartAction.DEBUG_RUN, self._get_run_debug_urls())\n    if self._run_start_response is None:\n        self._prep_cli_for_run_start()\n        self._run_start_response = self._launch_cli()\n        if self._active_tensor_filter:\n            self._active_tensor_filter_run_start_response = self._run_start_response\n        if self._run_through_times > 1:\n            self._run_through_times -= 1\n    self._exit_if_requested_by_user()\n    return self._run_start_response",
    "docstring": "Overrides on-run-start callback. Args: request: An instance of . Returns: An instance of .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\local_cli_wrapper.py",
    "ast_data": "FunctionDef name:on_run_start arg:self arg:request arguments arg arg Assign Call If Return return:yes Call If BoolOp Compare If Compare Return return:yes Call If Compare Return return:yes BoolOp Call Call If Compare Call Assign Call If Assign If Compare Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "SharedParamInfo",
    "source_code": "class SharedParamInfo(NamedTuple):\n    param_name: str\n    module: nn.Module\n    module_name: str\n    prim_param_name: str\n    prim_module: nn.Module\n    prim_module_name: str",
    "docstring": "Additional information for a shared parameter. For each shared parameter, we designate one module and its parameter variable to be the primary owner, determined as the first one encountered in the parameter walk. These are prefixed with \"prim\". The primary module and parameter do not have their own :class: instance.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py",
    "ast_data": "ClassDef name:SharedParamInfo"
  },
  {
    "library": "pytorch",
    "name": "register_ddp_comm_hook",
    "source_code": "def register_ddp_comm_hook(comm_hook_type: DDPCommHookType, model, state=None):\n    comm_hook_type.value(model=model, state=state)",
    "docstring": "Register `` input. State input will be passed to the model. Uses Python comm hook implementations. Example:: >>> # xdoctest: +SKIP >>> register_ddp_comm_hook(DDPCommHookType.FP16_COMPRESS, model, state)",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\ddp_comm_hooks\\__init__.py",
    "ast_data": "FunctionDef name:register_ddp_comm_hook arg:comm_hook_type arg:model arg:state arguments arg arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "forward",
    "source_code": "def forward(self, inference_args, input_tangents):\n    if self._forward is None:\n        self._forward, self._forward_graph, self._backward, self._forwardprop_output_indices, self._num_forwardprop_outputs = self._forward_and_backward_functions(inference_args, input_tangents)\n    return self._forward",
    "docstring": "Construct or fetch a forward function with side-outputs. When graph building without a tape active, symbolic gradients rely on regenerating the backward function for higher-order gradients (to account for new side outputs of the rewritten forward function call). Thus there is no fixed backward function for this case. However, when a tape is active (eager or graph building), we generate fixed backward and forward functions at forward function call time. This difference between the tape and non-tape cases is to avoid building unneeded backward functions while graph building (where we may or may not eventually need gradients). Args: inference_args: A flat list of Tensors, arguments to the inference function. input_tangents: A flat list of Tensors, jvps associated with . Returns: A forward atomic_function.AtomicFunction.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py",
    "ast_data": "FunctionDef name:forward arg:self arg:inference_args arg:input_tangents arguments arg arg arg If Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "meshgrid",
    "source_code": "@tf_export.tf_export('experimental.numpy.meshgrid', v1=[])\n@np_utils.np_doc('meshgrid')\ndef meshgrid(*xi, **kwargs):\n    sparse = kwargs.get('sparse', False)\n    if sparse:\n        raise ValueError(f'Function `meshgrid` does not support returning sparse arrays yet. Received: sparse={sparse}')\n    copy = kwargs.get('copy', True)\n    if not copy:\n        raise ValueError(f'Function `meshgrid` only supports copy=True. Received: copy={copy}')\n    indexing = kwargs.get('indexing', 'xy')\n    xi = [np_array_ops.asarray(arg) for arg in xi]\n    kwargs = {'indexing': indexing}\n    outputs = array_ops.meshgrid(*xi, **kwargs)\n    return outputs",
    "docstring": "This currently requires copy=True and sparse=False.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_math_ops.py",
    "ast_data": "FunctionDef name:meshgrid arguments arg arg Assign Call If Raise Call Assign Call If Raise Call Assign Call Assign Call Assign Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "variance",
    "source_code": "@property\ndef variance(self) -> Tensor:\n    raise NotImplementedError",
    "docstring": "Returns the variance of the distribution.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:variance arg:self arguments arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, string_table):\n    self._string_table = string_table\n    self._node_name_to_sample = {}",
    "docstring": "Constructor. Args: string_table: A object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\pprof_profiler.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:string_table arguments arg arg Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "assert_nontrivial_match",
    "source_code": "def assert_nontrivial_match(self):\n    return self.assert_consumed()",
    "docstring": "Raises an exception if currently created objects are unmatched.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:assert_nontrivial_match arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_tzinfo",
    "source_code": "def set_tzinfo(self, tz):\n    self.tz = _get_tzinfo(tz)",
    "docstring": "Set timezone info. Parameters ---------- tz : str or , default: :rc: Ticks timezone. If a string, *tz* is passed to .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\dates.py",
    "ast_data": "FunctionDef name:set_tzinfo arg:self arg:tz arguments arg arg Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, linear_width, numticks=11, symthresh=0.2, base=10, subs=None):\n    super().__init__()\n    self.linear_width = linear_width\n    self.numticks = numticks\n    self.symthresh = symthresh\n    self.base = base\n    self.subs = subs",
    "docstring": "Parameters ---------- linear_width : float The scale parameter defining the extent of the quasi-linear region. numticks : int, default: 11 The approximate number of major ticks that will fit along the entire axis symthresh : float, default: 0.2 The fractional threshold beneath which data which covers a range that is approximately symmetric about zero will have ticks that are exactly symmetric. base : int, default: 10 The number base used for rounding tick locations on a logarithmic scale. If this is less than one, then rounding is to the nearest integer multiple of powers of ten. subs : tuple, default: None Multiples of the number base, typically used for the minor ticks, e.g. (2, 5) when base=10.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:linear_width arg:numticks arg:symthresh arg:base arg:subs arguments arg arg arg arg arg arg Call Call Assign Assign Assign Assign Assign"
  },
  {
    "library": "numpy",
    "name": "NDArrayLRShifts",
    "source_code": "class NDArrayLRShifts(Benchmark):\n    params = [['__lshift__', '__rshift__'], ['intp', 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64']]\n    param_names = ['methods', 'npdtypes']\n    timeout = 10\n\n    def setup(self, methname, npdtypes):\n        self.vals = np.ones(1000, dtype=getattr(np, npdtypes)) * np.random.randint(9)\n\n    def time_ndarray_meth(self, methname, npdtypes):\n        getattr(operator, methname)(*[self.vals, 2])",
    "docstring": "Benchmark for the shift methods",
    "type": "class",
    "file_path": "numpy\\benchmarks\\benchmarks\\bench_ufunc.py",
    "ast_data": "ClassDef name:NDArrayLRShifts Assign Assign Assign FunctionDef name:setup arg:self arg:methname arg:npdtypes arguments arg arg arg Assign Call Call Call FunctionDef name:time_ndarray_meth arg:self arg:methname arg:npdtypes arguments arg arg arg Call Call"
  },
  {
    "library": "pandas",
    "name": "_check_values_indices_shape_match",
    "source_code": "def _check_values_indices_shape_match(values: np.ndarray, index: Index, columns: Index) -> None:\n    if values.shape[1] != len(columns) or values.shape[0] != len(index):\n        if values.shape[0] == 0 < len(index):\n            raise ValueError('Empty data passed with indices specified.')\n        passed = values.shape\n        implied = (len(index), len(columns))\n        raise ValueError(f'Shape of passed values is {passed}, indices imply {implied}')",
    "docstring": "Check that the shape implied by our axes matches the actual shape of the data.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\internals\\construction.py",
    "ast_data": "FunctionDef name:_check_values_indices_shape_match arg:values arg:index arg:columns arguments arg arg arg If BoolOp Compare Call Compare Call If Compare Call Raise Call Assign Assign Call Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "is_attrs",
    "source_code": "@tf_export('__internal__.nest.is_attrs', v1=[])\ndef is_attrs(obj):\n    return _is_attrs(obj)",
    "docstring": "Returns a true if its input is an instance of an attr.s decorated class.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\nest.py",
    "ast_data": "FunctionDef name:is_attrs arg:obj arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "VariableWatcher",
    "source_code": "class VariableWatcher(object):\n    __slots__ = ['_variable_watcher']\n\n    def __init__(self):\n        self._variable_watcher = None\n\n    def __enter__(self):\n        self._variable_watcher = pywrap_tfe.TFE_Py_VariableWatcherNew()\n        return self\n\n    def __exit__(self, typ, value, traceback):\n        pywrap_tfe.TFE_Py_VariableWatcherRemove(self._variable_watcher)\n\n    def watched_variables(self):\n        return pywrap_tfe.TFE_Py_VariableWatcherWatchedVariables(self._variable_watcher)",
    "docstring": "A scope that tracks all trainable variable accesses within it. This explicitly ignores variables that are not marked as trainable. Sample usage: var = tf.Variable(0.0) with VariableWatcher() as variable_watcher: var.assign_add(1.0) assert variable_watcher.watched_variables == [var]",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\record.py",
    "ast_data": "ClassDef name:VariableWatcher Assign FunctionDef name:__init__ arg:self arguments arg Assign FunctionDef name:__enter__ arg:self arguments arg Assign Call Return return:yes FunctionDef name:__exit__ arg:self arg:typ arg:value arg:traceback arguments arg arg arg arg Call FunctionDef name:watched_variables arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "BadLayout",
    "source_code": "class BadLayout(ArffException):\n    message = 'Invalid layout of the ARFF file, at line %d.'\n\n    def __init__(self, msg=''):\n        super().__init__()\n        if msg:\n            self.message = BadLayout.message + ' ' + msg.replace('%', '%%')",
    "docstring": "Error raised when the layout of the ARFF file has something wrong.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\externals\\_arff.py",
    "ast_data": "ClassDef name:BadLayout Assign FunctionDef name:__init__ arg:self arg:msg arguments arg arg Call Call If Assign Call"
  },
  {
    "library": "seaborn",
    "name": "label",
    "source_code": "def label(self, *, title: str | None=None, legend: str | None=None, **variables: str | Callable[[str], str]) -> Plot:\n    new = self._clone()\n    if title is not None:\n        new._labels['title'] = title\n    if legend is not None:\n        new._labels['legend'] = legend\n    new._labels.update(variables)\n    return new",
    "docstring": "Control the labels and titles for axes, legends, and subplots. Additional keywords correspond to variables defined in the plot. Values can be one of the following types: - string (used literally; pass \"\" to clear the default label) - function (called on the default label) For coordinate variables, the value sets the axis label. For semantic variables, the value sets the legend title. For faceting variables, modifies the subplot-specific label, while and/or add a label for the faceting variable. When using a single subplot, sets its title. The parameter sets the title for the \"layer\" legend (i.e., when using in :meth:). Examples -------- .. include:: ../docstrings/objects.Plot.label.rst",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\plot.py",
    "ast_data": "FunctionDef name:label arg:self arguments arg arg arg arg Assign Call If Compare Assign If Compare Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "mingw32",
    "source_code": "def mingw32():\n    if sys.platform == 'win32':\n        if os.environ.get('OSTYPE', '') == 'msys':\n            return True\n        if os.environ.get('MSYSTEM', '') == 'MINGW32':\n            return True\n    return False",
    "docstring": "Return true when using mingw32 environment.",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\misc_util.py",
    "ast_data": "FunctionDef name:mingw32 arguments If Compare If Compare Call Return return:yes If Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "insert_type_promotion_nodes",
    "source_code": "def insert_type_promotion_nodes(graph_module: torch.fx.GraphModule) -> None:\n    for module in graph_module.modules():\n        assert isinstance(module, torch.fx.GraphModule)\n        passes.InsertTypePromotion(module).run()",
    "docstring": "Inplace pass to insert explicit type promotion nodes, recursively through nested modules.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_fx_passes.py",
    "ast_data": "FunctionDef name:insert_type_promotion_nodes arg:graph_module arguments arg For Call Call Call Call"
  },
  {
    "library": "kornia",
    "name": "as_openvino",
    "source_code": "def as_openvino(self, device_type: str='GPU', **kwargs: Any) -> None:\n    self._session.set_providers(['OpenVINOExecutionProvider'], provider_options=[{'device_type': device_type, **kwargs}])",
    "docstring": "Set the session to run on OpenVINO. We set the ONNX runtime session to use OpenVINOExecutionProvider. For other OpenVINOExecutionProvider configurations, or CUDA/cuDNN/ONNX/TensorRT version issues, you may refer to Args: device_type: CPU, NPU, GPU, GPU.0, GPU.1 based on the available GPUs, NPU, Any valid Hetero combination, Any valid Multi or Auto devices combination. kwargs: Additional arguments for OpenVINO.",
    "type": "method",
    "file_path": "kornia\\kornia\\core\\mixin\\onnx.py",
    "ast_data": "FunctionDef name:as_openvino arg:self arg:device_type arguments arg arg arg Call"
  },
  {
    "library": "django",
    "name": "geographic",
    "source_code": "@property\ndef geographic(self):\n    return self.srs.geographic",
    "docstring": "Is this Spatial Reference geographic?",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\base\\models.py",
    "ast_data": "FunctionDef name:geographic arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X):\n    check_is_fitted(self)\n    X = validate_data(self, X, accept_sparse=['csr', 'csc'], reset=False, dtype=[np.float64, np.float32])\n    return safe_sparse_dot(X, self.components_.T, dense_output=self.dense_output)",
    "docstring": "Project the data by using matrix product with the random matrix. Parameters ---------- X : {ndarray, sparse matrix} of shape (n_samples, n_features) The input data to project into a smaller dimensional space. Returns ------- X_new : {ndarray, sparse matrix} of shape (n_samples, n_components) Projected array. It is a sparse matrix only when the input is sparse and .",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\random_projection.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "arr_to_chars",
    "source_code": "def arr_to_chars(arr):\n    dims = list(arr.shape)\n    if not dims:\n        dims = [1]\n    dims.append(int(arr.dtype.str[2:]))\n    arr = np.ndarray(shape=dims, dtype=arr_dtype_number(arr, 1), buffer=arr)\n    empties = [arr == np.array('', dtype=arr.dtype)]\n    if not np.any(empties):\n        return arr\n    arr = arr.copy()\n    arr[tuple(empties)] = ' '\n    return arr",
    "docstring": "Convert string array to char array",
    "type": "function",
    "file_path": "scipy\\scipy\\io\\matlab\\_miobase.py",
    "ast_data": "FunctionDef name:arr_to_chars arg:arr arguments arg Assign Call If Assign Call Call Assign Call Call Assign Compare Call If Call Return return:yes Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_grad_fn",
    "source_code": "def _grad_fn(func_graph, grads):\n    assert len(func_graph.outputs) == len(grads)\n    ys = []\n    grad_ys = []\n    for y, grad_y in zip(func_graph.outputs, grads):\n        if not backprop_util.IsTrainable(y):\n            continue\n        ys.append(y)\n        grad_ys.append(grad_y)\n    result = gradients_util._GradientsHelper(ys, func_graph.inputs, grad_ys=grad_ys, src_graph=func_graph)\n    return result",
    "docstring": "The gradient function for each conditional branch. This function builds the gradient graph of the corresponding forward-pass conditional branch in . This is done by differentiating func_graph's outputs w.r.t. its inputs. Args: func_graph: FuncGraph. The corresponding forward-pass function. grads: The list of input gradient Tensors. Returns: The output gradient Tensors.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\cond_v2.py",
    "ast_data": "FunctionDef name:_grad_fn arg:func_graph arg:grads arguments arg arg Compare Call Call Assign Assign For Call If Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X):\n    y = super().predict(X)\n    return np.asarray(y, dtype=np.intp)",
    "docstring": "Perform classification on samples in X. For a one-class model, +1 or -1 is returned. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples_test, n_samples_train) For kernel=\"precomputed\", the expected shape of X is (n_samples_test, n_samples_train). Returns ------- y_pred : ndarray of shape (n_samples,) Class labels for samples in X.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\svm\\_classes.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_optimizer_experimental_options",
    "source_code": "@tf_export('config.optimizer.get_experimental_options')\ndef get_optimizer_experimental_options():\n    return context.context().get_optimizer_experimental_options()",
    "docstring": "Get experimental optimizer options. Refer to tf.config.optimizer.set_experimental_options for a list of current options. Note that optimizations are only applied in graph mode, (within tf.function). In addition, as these are experimental options, the list is subject to change. Returns: Dictionary of configured experimental optimizer options",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\config.py",
    "ast_data": "FunctionDef name:get_optimizer_experimental_options arguments Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "Sortable",
    "source_code": "class Sortable(typing.Protocol):\n\n    def __lt__(self, other: typing.Self) -> bool:\n        ...",
    "docstring": "Anything that can be used as a list.sort() key (int/tuple/etc)",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\choices.py",
    "ast_data": "ClassDef name:Sortable FunctionDef name:__lt__ arg:self arg:other arguments arg arg"
  },
  {
    "library": "numpy",
    "name": "_multi_dot_three",
    "source_code": "def _multi_dot_three(A, B, C, out=None):\n    a0, a1b0 = A.shape\n    b1c0, c1 = C.shape\n    cost1 = a0 * b1c0 * (a1b0 + c1)\n    cost2 = a1b0 * c1 * (a0 + b1c0)\n    if cost1 < cost2:\n        return dot(dot(A, B), C, out=out)\n    else:\n        return dot(A, dot(B, C), out=out)",
    "docstring": "Find the best order for three arrays and do the multiplication. For three arguments is approximately 15 times faster than",
    "type": "function",
    "file_path": "numpy\\numpy\\linalg\\_linalg.py",
    "ast_data": "FunctionDef name:_multi_dot_three arg:A arg:B arg:C arg:out arguments arg arg arg arg Assign Assign Assign Assign If Compare Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "all_paths",
    "source_code": "def all_paths(self, src: str, dst: str):\n    result_graph = DiGraph()\n    forward_reachable_from_src = self.forward_transitive_closure(src)\n    if dst not in forward_reachable_from_src:\n        return result_graph\n    working_set = deque(dst)\n    while len(working_set) > 0:\n        cur = working_set.popleft()\n        for n in self.predecessors(cur):\n            if n in forward_reachable_from_src:\n                result_graph.add_edge(n, cur)\n                working_set.append(n)\n    return result_graph.to_dot()",
    "docstring": "Returns a subgraph rooted at src that shows all the paths to dst.",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\_digraph.py",
    "ast_data": "FunctionDef name:all_paths arg:self arg:src arg:dst arguments arg arg arg Assign Call Assign Call If Compare Return return:yes Assign Call While Compare Call Assign Call For Call If Compare Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "RotatedEllipse01",
    "source_code": "class RotatedEllipse01(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-500.0] * self.N, [500.0] * self.N))\n        self.custom_bounds = ([-2.0, 2.0], [-2.0, 2.0])\n        self.global_optimum = [[0.0, 0.0]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return 7.0 * x[0] ** 2.0 - 6.0 * sqrt(3) * x[0] * x[1] + 13 * x[1] ** 2.0",
    "docstring": "Rotated Ellipse 1 objective function. This class defines the Rotated Ellipse 1 [1]_ global optimization problem. This is a unimodal minimization problem defined as follows: .. math:: f_{\\text{RotatedEllipse01}}(x) = 7x_1^2 - 6 \\sqrt{3} x_1x_2 + 13x_2^2 with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_R.py",
    "ast_data": "ClassDef name:RotatedEllipse01 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_get_torch_jit_trace_forward_signature",
    "source_code": "def _get_torch_jit_trace_forward_signature(mod: torch.nn.Module) -> inspect.Signature:\n    ast_mod = ast.parse(mod.code)\n    ast_func_def: ast.FunctionDef = ast_mod.body[0]\n    arg_type_map = {'args': Parameter.POSITIONAL_OR_KEYWORD}\n    param_list = []\n    for arg_type, param_type in arg_type_map.items():\n        arg_name_list = [a.arg for a in getattr(ast_func_def.args, arg_type)]\n        for arg_name in arg_name_list:\n            if arg_name == 'self':\n                continue\n            param_list.append(inspect.Parameter(arg_name, param_type))\n    return inspect.Signature(parameters=param_list)",
    "docstring": "Get source code and parse argument names using AST. The function returns a signature of the forward() function. # TODO: Directly provide inspect.signature compatible TS-d module.",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\utils.py",
    "ast_data": "FunctionDef name:_get_torch_jit_trace_forward_signature arg:mod arguments arg Assign Call Assign Assign For Call Assign Call For If Compare Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "rename_v2",
    "source_code": "@tf_export('io.gfile.rename')\ndef rename_v2(src, dst, overwrite=False):\n    _pywrap_file_io.RenameFile(compat.path_to_bytes(src), compat.path_to_bytes(dst), overwrite)",
    "docstring": "Rename or move a file / directory. Args: src: string, pathname for a file dst: string, pathname to which the file needs to be moved overwrite: boolean, if false it's an error for to be occupied by an existing file. Raises: errors.OpError: If the operation fails.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py",
    "ast_data": "FunctionDef name:rename_v2 arg:src arg:dst arg:overwrite arguments arg arg arg Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "is_nested_or_composite",
    "source_code": "def is_nested_or_composite(seq):\n    return _is_nested_or_composite(seq)",
    "docstring": "Returns true if its input is a nested structure or a composite. Refer to [tf.nest]( for the definition of a nested structure. Args: seq: the value to test. Returns: True if the input is a nested structure or a composite.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\nest.py",
    "ast_data": "FunctionDef name:is_nested_or_composite arg:seq arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_hash_file",
    "source_code": "def _hash_file(fpath, algorithm='sha256', chunk_size=65535):\n    if isinstance(algorithm, str):\n        hasher = _resolve_hasher(algorithm)\n    else:\n        hasher = algorithm\n    with open(fpath, 'rb') as fpath_file:\n        for chunk in iter(lambda: fpath_file.read(chunk_size), b''):\n            hasher.update(chunk)\n    return hasher.hexdigest()",
    "docstring": "Calculates a file sha256 or md5 hash. Example: Args: fpath: path to the file being validated algorithm: hash algorithm, one of , , or . The default detects the hash algorithm in use. chunk_size: Bytes to read at a time, important for large files. Returns: The file hash",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\data_utils.py",
    "ast_data": "FunctionDef name:_hash_file arg:fpath arg:algorithm arg:chunk_size arguments arg arg arg If Call Assign Call Assign With Call For Call arguments Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "rotated",
    "source_code": "def rotated(self, *, deg=None, rad=None):\n    if deg is None and rad is None:\n        raise ValueError('One of deg or rad is required')\n    if deg is not None and rad is not None:\n        raise ValueError('Only one of deg and rad can be supplied')\n    new_marker = MarkerStyle(self)\n    if new_marker._user_transform is None:\n        new_marker._user_transform = Affine2D()\n    if deg is not None:\n        new_marker._user_transform.rotate_deg(deg)\n    if rad is not None:\n        new_marker._user_transform.rotate(rad)\n    return new_marker",
    "docstring": "Return a new version of this marker rotated by specified angle. Parameters ---------- deg : float, optional Rotation angle in degrees. rad : float, optional Rotation angle in radians. .. note:: You must specify exactly one of deg or rad.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\markers.py",
    "ast_data": "FunctionDef name:rotated arg:self arguments arg arg arg If BoolOp Compare Compare Raise Call If BoolOp Compare Compare Raise Call Assign Call If Compare Assign Call If Compare Call If Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "GetContainingWhileContext",
    "source_code": "def GetContainingWhileContext(ctxt, stop_ctxt=None):\n    while ctxt:\n        if ctxt.IsWhileContext() or ctxt == stop_ctxt:\n            return ctxt\n        ctxt = ctxt.outer_context\n    return None",
    "docstring": "Returns the first ancestor WhileContext of . Returns if is a WhileContext, or None if is not in a while loop. Args: ctxt: ControlFlowContext stop_ctxt: ControlFlowContext, optional. If provided, the search will end if it sees stop_ctxt. Returns: if is a WhileContext, the most nested WhileContext containing , or None if is not in a while loop. If is not , this returns if it matches in its traversal.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\control_flow_util.py",
    "ast_data": "FunctionDef name:GetContainingWhileContext arg:ctxt arg:stop_ctxt arguments arg arg While If BoolOp Call Compare Return return:yes Assign Return return:no"
  },
  {
    "library": "cherrypy",
    "name": "setup",
    "source_code": "@classmethod\ndef setup(cls, **kwargs):\n    kwargs['storage_path'] = os.path.abspath(kwargs['storage_path'])\n    for k, v in kwargs.items():\n        setattr(cls, k, v)",
    "docstring": "Set up the storage system for file-based sessions. This should only be called once per process; this will be done automatically when using sessions.init (as the built-in Tool does).",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\sessions.py",
    "ast_data": "FunctionDef name:setup arg:cls arguments arg arg Assign Call For Call Call"
  },
  {
    "library": "pytorch",
    "name": "_transform_uuid_to_ordinals",
    "source_code": "def _transform_uuid_to_ordinals(candidates: list[str], uuids: list[str]) -> list[int]:\n\n    def uuid_to_ordinal(candidate: str, uuids: list[str]) -> int:\n        best_match = -1\n        for idx, uuid in enumerate(uuids):\n            if not uuid.startswith(candidate):\n                continue\n            if best_match != -1:\n                return -1\n            best_match = idx\n        return best_match\n    rc: list[int] = []\n    for candidate in candidates:\n        if torch.version.hip:\n            candidate = candidate.replace('GPU-', '', 1)\n        idx = uuid_to_ordinal(candidate, uuids)\n        if idx < 0:\n            break\n        if idx in rc:\n            return cast(list[int], [])\n        rc.append(idx)\n    return rc",
    "docstring": "Given the set of partial uuids and list of known uuids builds a set of ordinals excluding ambiguous partials IDs.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:_transform_uuid_to_ordinals arg:candidates arg:uuids arguments arg arg FunctionDef name:uuid_to_ordinal arg:candidate arg:uuids arguments arg arg Assign For Call If Call If Compare Return return:yes Assign Return return:yes For If Assign Call Assign Call If Compare If Compare Return return:yes Call Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "_standardize_color_sequence",
    "source_code": "def _standardize_color_sequence(self, colors: ArrayLike) -> ArrayLike:\n\n    def has_alpha(x):\n        return to_rgba(x) != to_rgba(x, 1)\n    if isinstance(colors, np.ndarray):\n        needs_alpha = colors.shape[1] == 4\n    else:\n        needs_alpha = any((has_alpha(x) for x in colors))\n    if needs_alpha:\n        return to_rgba_array(colors)\n    else:\n        return to_rgba_array(colors)[:, :3]",
    "docstring": "Convert color sequence to RGB(A) array, preserving but not adding alpha.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\properties.py",
    "ast_data": "FunctionDef name:_standardize_color_sequence arg:self arg:colors arguments arg arg FunctionDef name:has_alpha arg:x arguments arg Return return:yes Compare Call Call If Call Assign Compare Assign Call Call If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_slot_dict",
    "source_code": "def _slot_dict(self, slot_name):\n    named_slots = self._slots.get(slot_name, None)\n    if named_slots is None:\n        named_slots = {}\n        self._slots[slot_name] = named_slots\n    return named_slots",
    "docstring": "Returns a dict for caching slots created under the given name. Args: slot_name: Name for the slot. Returns: A dict that maps primary objects to the slot created for that variable, under the given slot name.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\optimizer.py",
    "ast_data": "FunctionDef name:_slot_dict arg:self arg:slot_name arguments arg arg Assign Call If Compare Assign Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "subclass_exception",
    "source_code": "def subclass_exception(name, bases, module, attached_to):\n    return type(name, bases, {'__module__': module, '__qualname__': '%s.%s' % (attached_to.__qualname__, name)})",
    "docstring": "Create exception subclass. Used by ModelBase below. The exception is created in a way that allows it to be pickled, assuming that the returned exception class will be added as an attribute to the 'attached_to' class.",
    "type": "function",
    "file_path": "django\\django\\db\\models\\base.py",
    "ast_data": "FunctionDef name:subclass_exception arg:name arg:bases arg:module arg:attached_to arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_global_batch_size",
    "source_code": "@property\ndef _global_batch_size(self):\n    return True",
    "docstring": "and use global batch size. assumes per-replica batching. Returns: Boolean.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\parameter_server_strategy.py",
    "ast_data": "FunctionDef name:_global_batch_size arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_should_stop",
    "source_code": "def _should_stop(self, scores):\n    reference_position = self.n_iter_no_change + 1\n    if len(scores) < reference_position:\n        return False\n    reference_score = scores[-reference_position] + self.tol\n    recent_scores = scores[-reference_position + 1:]\n    recent_improvements = [score > reference_score for score in recent_scores]\n    return not any(recent_improvements)",
    "docstring": "Return True (do early stopping) if the last n scores aren't better than the (n-1)th-to-last score, up to some tolerance.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\gradient_boosting.py",
    "ast_data": "FunctionDef name:_should_stop arg:self arg:scores arguments arg arg Assign If Compare Call Return return:yes Assign Assign Assign Compare Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "gen_commit_message",
    "source_code": "def gen_commit_message(self, filter_ghstack: bool=False, ghstack_deps: Optional[list['GitHubPR']]=None) -> str:\n    approved_by_urls = ', '.join((prefix_with_github_url(login) for login in self.get_approved_by()))\n    msg_body = re.sub(RE_PR_CC_LINE, '', self.get_body())\n    if filter_ghstack:\n        msg_body = re.sub(RE_GHSTACK_DESC, '', msg_body)\n    msg = self.get_title() + f' (#{self.pr_num})\\n\\n'\n    msg += msg_body\n    msg += f'\\nPull Request resolved: {self.get_pr_url()}\\n'\n    msg += f'Approved by: {approved_by_urls}\\n'\n    if ghstack_deps:\n        msg += f'ghstack dependencies: {', '.join([f'\n    first_coauthor = True\n    for author_login, author_name in self.get_authors().items():\n        if author_login != self.get_pr_creator_login():\n            if first_coauthor:\n                msg, first_coauthor = (msg + '\\n', False)\n            msg += f'\\nCo-authored-by: {author_name}'\n    return msg",
    "docstring": "Fetches title and body from PR description adds reviewed by, pull request resolved and optionally filters out ghstack info",
    "type": "method",
    "file_path": "pytorch\\.github\\scripts\\trymerge.py",
    "ast_data": "FunctionDef name:gen_commit_message arg:self arg:filter_ghstack arg:ghstack_deps arguments arg arg arg Assign Call Call Call Assign Call Call If Assign Call Assign Call Call If Call Assign For Call Call If Compare Call If Assign Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "Est",
    "source_code": "@dataclass\nclass Est(Stat):\n    func: str | Callable[[Vector], float] = 'mean'\n    errorbar: str | tuple[str, float] = ('ci', 95)\n    n_boot: int = 1000\n    seed: int | None = None\n    group_by_orient: ClassVar[bool] = True\n\n    def _process(self, data: DataFrame, var: str, estimator: EstimateAggregator) -> DataFrame:\n        res = estimator(data, var)\n        return pd.DataFrame([res])\n\n    def __call__(self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale]) -> DataFrame:\n        boot_kws = {'n_boot': self.n_boot, 'seed': self.seed}\n        if 'weight' in data:\n            engine = WeightedAggregator(self.func, self.errorbar, **boot_kws)\n        else:\n            engine = EstimateAggregator(self.func, self.errorbar, **boot_kws)\n        var = {'x': 'y', 'y': 'x'}[orient]\n        res = groupby.apply(data, self._process, var, engine).dropna(subset=[var]).reset_index(drop=True)\n        res = res.fillna({f'{var}min': res[var], f'{var}max': res[var]})\n        return res",
    "docstring": "Calculate a point estimate and error bar interval. For more information about the various choices, see the :doc:. Additional variables: - **weight**: When passed to a layer that uses this stat, a weighted estimate will be computed. Note that use of weights currently limits the choice of function and error bar method to and , respectively. Parameters ---------- func : str or callable Name of a :class: method or a vector -> scalar function. errorbar : str, (str, float) tuple, or callable Name of errorbar method (one of \"ci\", \"pi\", \"se\" or \"sd\"), or a tuple with a method name ane a level parameter, or a function that maps from a vector to a (min, max) interval. n_boot : int Number of bootstrap samples to draw for \"ci\" errorbars. seed : int Seed for the PRNG used to draw bootstrap samples. Examples -------- .. include:: ../docstrings/objects.Est.rst",
    "type": "class",
    "file_path": "seaborn\\seaborn\\_stats\\aggregation.py",
    "ast_data": "ClassDef name:Est FunctionDef name:_process arg:self arg:data arg:var arg:estimator arguments arg arg arg arg Assign Call Return return:yes Call FunctionDef name:__call__ arg:self arg:data arg:groupby arg:orient arg:scales arguments arg arg arg arg arg Assign If Compare Assign Call Assign Call Assign Assign Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "gelu",
    "source_code": "@dispatch.add_dispatch_support\ndef gelu(x, approximate=False):\n    return nn.gelu(x, approximate)",
    "docstring": "Applies the Gaussian error linear unit (GELU) activation function. Gaussian error linear unit (GELU) computes bool0.5 * x * (1 + tanh(sqrt(2 / pi) * (x + 0.044715 * x^3)))approximateTruex * P(X <= x) = 0.5 * x * (1 + erf(x / sqrt(2)))P(X) ~ N(0, 1)approximateFalse`. Reference: - [Gaussian Error Linear Units (GELUs)](",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\activations.py",
    "ast_data": "FunctionDef name:gelu arg:x arg:approximate arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "add_variable",
    "source_code": "def add_variable(trackable, name, shape=None, dtype=dtypes.float32, initializer=None, trainable=True):\n    return trackable._add_variable_with_custom_getter(name=name, shape=shape, dtype=dtype, initializer=initializer, getter=_default_getter, trainable=trainable)",
    "docstring": "Add a variable to a Trackable with no scope influence.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:add_variable arg:trackable arg:name arg:shape arg:dtype arg:initializer arg:trainable arguments arg arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "FormView",
    "source_code": "class FormView(TemplateResponseMixin, BaseFormView):\n    pass",
    "docstring": "A view for displaying a form and rendering a template response.",
    "type": "class",
    "file_path": "django\\django\\views\\generic\\edit.py",
    "ast_data": "ClassDef name:FormView"
  },
  {
    "library": "matplotlib",
    "name": "scaled",
    "source_code": "def scaled(self, sx, sy=None):\n    if sy is None:\n        sy = sx\n    new_marker = MarkerStyle(self)\n    _transform = new_marker._user_transform or Affine2D()\n    new_marker._user_transform = _transform.scale(sx, sy)\n    return new_marker",
    "docstring": "Return new marker scaled by specified scale factors. If *sy* is not given, the same scale is applied in both the *x*- and *y*-directions. Parameters ---------- sx : float *X*-direction scaling factor. sy : float, optional *Y*-direction scaling factor.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\markers.py",
    "ast_data": "FunctionDef name:scaled arg:self arg:sx arg:sy arguments arg arg arg If Compare Assign Assign Call Assign BoolOp Call Assign Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "read_into_file",
    "source_code": "def read_into_file(self, fp_out=None):\n    if fp_out is None:\n        fp_out = self.make_file()\n    self.read(fp_out=fp_out)\n    return fp_out",
    "docstring": "Read the request body into fp_out (or make_file() if None). Return fp_out.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpreqbody.py",
    "ast_data": "FunctionDef name:read_into_file arg:self arg:fp_out arguments arg arg If Compare Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "entropy",
    "source_code": "def entropy(self):\n    log_pdet = self.cov_object.log_pdet\n    rank = self.cov_object.rank\n    return 0.5 * (rank * (_LOG_2PI + 1) + log_pdet)",
    "docstring": "Computes the differential entropy of the multivariate normal. Returns ------- h : scalar Entropy of the multivariate normal distribution",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:entropy arg:self arguments arg Assign Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "decorator_from_middleware_with_args",
    "source_code": "def decorator_from_middleware_with_args(middleware_class):\n    return make_middleware_decorator(middleware_class)",
    "docstring": "Like decorator_from_middleware, but return a function that accepts the arguments to be passed to the middleware_class. Use like:: cache_page = decorator_from_middleware_with_args(CacheMiddleware) # ... @cache_page(3600) def my_view(request): # ...",
    "type": "function",
    "file_path": "django\\django\\utils\\decorators.py",
    "ast_data": "FunctionDef name:decorator_from_middleware_with_args arg:middleware_class arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_group_key",
    "source_code": "def get_group_key(self, devices):\n    with self._lock:\n        devices_key = ','.join(devices)\n        if devices_key not in self._known_groups:\n            self._known_groups[devices_key] = self._get_new_group_key(devices)\n        return self._known_groups[devices_key]",
    "docstring": "Returns a group key for the list of local devices. The same group key is returned if the list of local devices is the same. Args: devices: a list of local canonical device strings in a collective group. Returns: a group key.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_utils.py",
    "ast_data": "FunctionDef name:get_group_key arg:self arg:devices arguments arg arg With Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "defers",
    "source_code": "def defers(func: Callable[_P, _T]) -> Callable[_P, Deferred[_T]]:\n\n    @wraps(func)\n    def wrapped(*a: _P.args, **kw: _P.kwargs) -> Deferred[_T]:\n        return maybeDeferred(func, *a, **kw)\n    return wrapped",
    "docstring": "Decorator to make sure a function always returns a deferred",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\decorators.py",
    "ast_data": "FunctionDef name:defers arg:func arguments arg FunctionDef name:wrapped arguments arg arg Return return:yes Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "set_pos",
    "source_code": "def set_pos(self, pos: int) -> None:\n    self.pos = pos\n    if pos is not None and self.typ is not None:\n        self.typ._v_pos = pos",
    "docstring": "set the position of this column in the Table",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:set_pos arg:self arg:pos arguments arg arg Assign If BoolOp Compare Compare Assign"
  },
  {
    "library": "tensorflow",
    "name": "functions_run_eagerly",
    "source_code": "@tf_export('config.functions_run_eagerly')\ndef functions_run_eagerly():\n    return RUN_FUNCTIONS_EAGERLY",
    "docstring": "Returns the value of the setting.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\eager_function_run.py",
    "ast_data": "FunctionDef name:functions_run_eagerly arguments Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_cached_roots_legendre",
    "source_code": "def _cached_roots_legendre(n):\n    if n in _cached_roots_legendre.cache:\n        return _cached_roots_legendre.cache[n]\n    _cached_roots_legendre.cache[n] = roots_legendre(n)\n    return _cached_roots_legendre.cache[n]",
    "docstring": "Cache roots_legendre results to speed up calls of the fixed_quad function.",
    "type": "function",
    "file_path": "scipy\\scipy\\integrate\\_quadrature.py",
    "ast_data": "FunctionDef name:_cached_roots_legendre arg:n arguments arg If Compare Return return:yes Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_prepare_input_for_onnx",
    "source_code": "def _prepare_input_for_onnx(args, kwargs, remained_onnx_input_idx: Sequence[int] | None, flatten: bool):\n    onnx_inputs = _prepare_input_for_export(args, kwargs)\n    if flatten:\n        onnx_inputs, _ = torch.jit._flatten(onnx_inputs)\n    elif onnx_inputs and onnx_inputs[-1] == {}:\n        onnx_inputs = onnx_inputs[:-1]\n    if remained_onnx_input_idx is not None:\n        return [onnx_inputs[i] for i in remained_onnx_input_idx]\n    else:\n        return onnx_inputs",
    "docstring": "Prepare input for ONNX model execution in ONNX backend. Any future changes/formatting to the input before dispatching to the ONNX backend run should be made in this function. Args: args: positional arguments for PyTorch model forward method. kwargs: keyword arguments for PyTorch model forward method. remained_onnx_input_idx: indices of inputs to be used for ONNX model execution. flatten: whether to flatten the input before dispatching to the ONNX model execution. Returns: onnx_inputs: positional arguments for ONNX model execution in ONNX backend.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\verification.py",
    "ast_data": "FunctionDef name:_prepare_input_for_onnx arg:args arg:kwargs arg:remained_onnx_input_idx arg:flatten arguments arg arg arg arg Assign Call If Assign Call If BoolOp Compare Assign If Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "add_check_numerics_ops",
    "source_code": "@tf_export(v1=['add_check_numerics_ops'])\ndef add_check_numerics_ops():\n    if context.executing_eagerly():\n        raise RuntimeError(\"add_check_numerics_ops() is not compatible with eager execution. To check for Inf's and NaN's under eager execution, call tf.debugging.enable_check_numerics() once before executing the checked operations.\")\n    check_op = []\n    for op in ops.get_default_graph().get_operations():\n        for output in op.outputs:\n            if output.dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:\n                if op._get_control_flow_context() is not None:\n                    raise ValueError('`tf.add_check_numerics_ops() is not compatible with TensorFlow control flow operations such as `tf.cond()` or `tf.while_loop()`.')\n                message = op.name + ':' + str(output.value_index)\n                with ops.control_dependencies(check_op):\n                    check_op = [array_ops.check_numerics(output, message=message)]\n    return control_flow_ops.group(*check_op)",
    "docstring": "Connect a to every floating point tensor. operations themselves are added for each , , or tensor in the current default graph. For all ops in the graph, the op for all of its (, , or ) inputs is guaranteed to run before the op on any of its outputs. Note: This API is not compatible with the use of or , and will raise a if you attempt to call it in such a graph. Returns: A op depending on all ops added. Raises: ValueError: If the graph contains any numeric operations in a control flow structure. RuntimeError: If called with eager execution enabled. @compatibility(eager) Not compatible with eager execution. To check for s and s under eager execution, call once before executing the checked operations. @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numerics.py",
    "ast_data": "FunctionDef name:add_check_numerics_ops arguments If Call Raise Call Assign For Call Call For If Compare If Compare Call Raise Call Assign Call With Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "expect_partial",
    "source_code": "def expect_partial(self):\n    return self",
    "docstring": "Silence warnings about incomplete checkpoint restores.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:expect_partial arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_raw_feature_as_tensor",
    "source_code": "def _get_raw_feature_as_tensor(self, key):\n    raw_feature = self._features[key]\n    feature_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(raw_feature)\n\n    def expand_dims(input_tensor):\n        if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):\n            return sparse_ops.sparse_reshape(input_tensor, [array_ops.shape(input_tensor)[0], 1])\n        else:\n            return array_ops.expand_dims(input_tensor, -1)\n    rank = feature_tensor.get_shape().ndims\n    if rank is not None:\n        if rank == 0:\n            raise ValueError('Feature (key: {}) cannot have rank 0. Given: {}'.format(key, feature_tensor))\n        return feature_tensor if rank != 1 else expand_dims(feature_tensor)\n    with ops.control_dependencies([check_ops.assert_positive(array_ops.rank(feature_tensor), message='Feature (key: {}) cannot have rank 0. Given: {}'.format(key, feature_tensor))]):\n        return cond.cond(math_ops.equal(1, array_ops.rank(feature_tensor)), lambda: expand_dims(feature_tensor), lambda: feature_tensor)",
    "docstring": "Gets the raw_feature (keyed by ) as . The raw feature is converted to (sparse) tensor and maybe expand dim. For both and , the rank will be expanded (to 2) if the rank is 1. This supports dynamic rank also. For rank 0 raw feature, will error out as it is not supported. Args: key: A key to access the raw feature. Returns: A or . Raises: ValueError: if the raw feature has rank 0.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py",
    "ast_data": "FunctionDef name:_get_raw_feature_as_tensor arg:self arg:key arguments arg arg Assign Assign Call FunctionDef name:expand_dims arg:input_tensor arguments arg If Call Return return:yes Call Call Return return:yes Call Assign Call If Compare If Compare Raise Call Call Return return:yes Compare Call With Call Call Call Call Return return:yes Call Call Call arguments Call arguments"
  },
  {
    "library": "scipy",
    "name": "__call__",
    "source_code": "def __call__(self, n, p, seed=None):\n    return multinomial_frozen(n, p, seed)",
    "docstring": "Create a frozen multinomial distribution. See for more information.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:n arg:p arg:seed arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "validate_all_hashable",
    "source_code": "def validate_all_hashable(*args, error_name: str | None=None) -> None:\n    if not all((is_hashable(arg) for arg in args)):\n        if error_name:\n            raise TypeError(f'{error_name} must be a hashable type')\n        raise TypeError('All elements must be hashable')",
    "docstring": "Return None if all args are hashable, else raise a TypeError. Parameters ---------- *args Arguments to validate. error_name : str, optional The name to use if error Raises ------ TypeError : If an argument is not hashable Returns ------- None",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\common.py",
    "ast_data": "FunctionDef name:validate_all_hashable arguments arg arg If Call Call If Raise Call Raise Call"
  },
  {
    "library": "django",
    "name": "geotransform",
    "source_code": "@property\ndef geotransform(self):\n    gtf = (c_double * 6)()\n    capi.get_ds_geotransform(self._ptr, byref(gtf))\n    return list(gtf)",
    "docstring": "Return the geotransform of the data source. Return the default geotransform if it does not exist or has not been set previously. The default is [0.0, 1.0, 0.0, 0.0, 0.0, -1.0].",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\raster\\source.py",
    "ast_data": "FunctionDef name:geotransform arg:self arguments arg Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_results_select",
    "source_code": "def _results_select(full_output, r, method):\n    x, funcalls, iterations, flag = r\n    if full_output:\n        results = RootResults(root=x, iterations=iterations, function_calls=funcalls, flag=flag, method=method)\n        return (x, results)\n    return x",
    "docstring": "Select from a tuple of (root, funccalls, iterations, flag)",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_zeros_py.py",
    "ast_data": "FunctionDef name:_results_select arg:full_output arg:r arg:method arguments arg arg arg Assign If Assign Call Return return:yes Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_normalize_json_ordered",
    "source_code": "def _normalize_json_ordered(data: dict[str, Any], separator: str) -> dict[str, Any]:\n    top_dict_ = {k: v for k, v in data.items() if not isinstance(v, dict)}\n    nested_dict_ = _normalize_json(data={k: v for k, v in data.items() if isinstance(v, dict)}, key_string='', normalized_dict={}, separator=separator)\n    return {**top_dict_, **nested_dict_}",
    "docstring": "Order the top level keys and then recursively go to depth Parameters ---------- data : dict or list of dicts separator : str, default '.' Nested records will generate names separated by sep, e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar Returns ------- dict or list of dicts, matching",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\json\\_normalize.py",
    "ast_data": "FunctionDef name:_normalize_json_ordered arg:data arg:separator arguments arg arg Assign Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit_predict",
    "source_code": "@available_if(_check_novelty_fit_predict)\ndef fit_predict(self, X, y=None):\n    return self.fit(X)._predict()",
    "docstring": "Fit the model to the training set X and return the labels. **Not available for novelty detection (when novelty is set to True).** Label is 1 for an inlier and -1 for an outlier according to the LOF score and the contamination parameter. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features), default=None The query sample or samples to compute the Local Outlier Factor w.r.t. the training samples. y : Ignored Not used, present for API consistency by convention. Returns ------- is_inlier : ndarray of shape (n_samples,) Returns -1 for anomalies/outliers and 1 for inliers.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neighbors\\_lof.py",
    "ast_data": "FunctionDef name:fit_predict arg:self arg:X arg:y arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "set_backoff_factor",
    "source_code": "def set_backoff_factor(self, new_factor: float) -> None:\n    self._backoff_factor = new_factor",
    "docstring": "Set a new scale backoff factor. Args: new_scale (float): Value to use as the new scale backoff factor.",
    "type": "method",
    "file_path": "pytorch\\torch\\amp\\grad_scaler.py",
    "ast_data": "FunctionDef name:set_backoff_factor arg:self arg:new_factor arguments arg arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "_parse_config_to_function",
    "source_code": "def _parse_config_to_function(config, custom_objects, func_attr_name, func_type_attr_name, module_attr_name):\n    globs = globals()\n    module = config.pop(module_attr_name, None)\n    if module in sys.modules:\n        globs.update(sys.modules[module].__dict__)\n    elif module is not None:\n        warnings.warn('{} is not loaded, but a layer uses it. It may cause errors.'.format(module), UserWarning)\n    if custom_objects:\n        globs.update(custom_objects)\n    function_type = config.pop(func_type_attr_name)\n    if function_type == 'function':\n        function = generic_utils.deserialize_keras_object(config[func_attr_name], custom_objects=custom_objects, printable_module_name='function in wrapper')\n    elif function_type == 'lambda':\n        function = generic_utils.func_load(config[func_attr_name], globs=globs)\n    else:\n        raise TypeError('Unknown function type:', function_type)\n    return function",
    "docstring": "Reconstruct the function from the config.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\legacy_rnn\\rnn_cell_wrapper_impl.py",
    "ast_data": "FunctionDef name:_parse_config_to_function arg:config arg:custom_objects arg:func_attr_name arg:func_type_attr_name arg:module_attr_name arguments arg arg arg arg arg Assign Call Assign Call If Compare Call If Compare Call Call If Call Assign Call If Compare Assign Call If Compare Assign Call Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "hardtanh",
    "source_code": "@register_decomposition(aten.hardtanh)\n@_inplace_wrapper\n@out_wrapper()\n@elementwise_unary_scalar_wrapper\n@elementwise_type_promotion_wrapper(type_promoting_args='a', type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT)\ndef hardtanh(a: TensorLikeType, min_val: NumberType=-1, max_val: NumberType=1, inplace: bool=False) -> TensorLikeType:\n    if inplace:\n        raise NotImplementedError\n    if utils.is_boolean_dtype(a.dtype):\n        raise RuntimeError('Bool inputs not supported for hardtanh')\n    if utils.is_integer_dtype(a.dtype):\n        min_val = int(min_val)\n        max_val = int(max_val)\n        if not (a.dtype != torch.uint8 or (min_val >= 0 and max_val >= 0)):\n            raise RuntimeError('Cannot do hardtanh on an unsigned type with negative limits')\n    if min_val > max_val:\n        raise ValueError('min_val cannot be greater than max_val')\n    return torch.clamp(a, min_val, max_val)",
    "docstring": "Reference implementation of torch.nn.functional.hardtanh",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\nn\\functional\\__init__.py",
    "ast_data": "FunctionDef name:hardtanh arg:a arg:min_val arg:max_val arg:inplace arguments arg arg arg arg If Raise If Call Raise Call If Call Assign Call Assign Call If BoolOp Compare BoolOp Compare Compare Raise Call If Compare Raise Call Return return:yes Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "add_op_with_blocks",
    "source_code": "def add_op_with_blocks(graph_context: GraphContext, opname: str, *inputs: _C.Value, outputs: int=1, n_blocks: int=1, **attributes) -> tuple[Any, tuple[GraphContext, ...], _C.Node]:\n    output_values = graph_context.op(opname, *inputs, outputs=outputs, **attributes)\n    if isinstance(output_values, Sequence):\n        node = output_values[0].node()\n    else:\n        node = output_values.node()\n    new_contexts = []\n    for _ in range(n_blocks):\n        new_block = node.addBlock()\n        new_context = dataclasses.replace(graph_context, block=new_block)\n        new_contexts.append(new_context)\n    return (output_values, tuple(new_contexts), node)",
    "docstring": "Creates an ONNX operator \"opname\", taking inputs and attributes. Args: graph_context: The context for the current graph. opname: The ONNX operator name, e.g., or , or an operator qualified with a namespace, e.g., . inputs: The inputs to the operator. outputs: The number of outputs this operator returns. By default an operator is assumed to return a single output. If is greater than one, this functions returns a tuple of output , representing each output of the ONNX operator in order. n_blocks: The number of sub-blocks to create in the node. attributes: The attributes of the ONNX operator. Returns: A tuple of (output_values, new_contexts, node) where: output_values: One or more output value of this operator (see the keyword argument for multi-return nodes). new_contexts: A tuple of new graph contexts for each sub-block. node: The node representing the operator.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\jit_utils.py",
    "ast_data": "FunctionDef name:add_op_with_blocks arg:graph_context arg:opname arguments arg arg arg arg arg arg Assign Call If Call Assign Call Assign Call Assign For Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_get_uncompiled_header",
    "source_code": "@classmethod\ndef _get_uncompiled_header(cls, device: str) -> str | None:\n    return None",
    "docstring": "Header precompiling is currently disabled for halide.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codecache.py",
    "ast_data": "FunctionDef name:_get_uncompiled_header arg:cls arg:device arguments arg arg Return return:no"
  },
  {
    "library": "numpy",
    "name": "hermeweight",
    "source_code": "def hermeweight(x):\n    w = np.exp(-0.5 * x ** 2)\n    return w",
    "docstring": "Weight function of the Hermite_e polynomials. The weight function is :math: and the interval of integration is :math:. the HermiteE polynomials are orthogonal, but not normalized, with respect to this weight function. Parameters ---------- x : array_like Values at which the weight function will be computed. Returns ------- w : ndarray The weight function at .",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\hermite_e.py",
    "ast_data": "FunctionDef name:hermeweight arg:x arguments arg Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "rand",
    "source_code": "def rand(*size, requires_grad: bool=False, dtype: Optional[torch.dtype]=None, layout: torch.layout=torch.strided, device_mesh: Optional[DeviceMesh]=None, placements: Optional[Sequence[Placement]]=None) -> DTensor:\n    torch_size = normalize_to_torch_size(size)\n    return _dtensor_init_helper(torch.rand, torch_size, dtype=dtype, layout=layout, requires_grad=requires_grad, device_mesh=device_mesh, placements=placements)",
    "docstring": "Returns a :class: filled with random numbers from a uniform distribution on the interval `DTensortorch.dtypeDTensortorch.set_default_dtypetorch.layoutDTensorDeviceMeshPlacementDTensor` object on each rank",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_api.py",
    "ast_data": "FunctionDef name:rand arguments arg arg arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "normalize",
    "source_code": "def normalize(x, axis=-1, order=2):\n    l2 = np.atleast_1d(np.linalg.norm(x, order, axis))\n    l2[l2 == 0] = 1\n    return x / np.expand_dims(l2, axis)",
    "docstring": "Normalizes a Numpy array. Args: x: Numpy array to normalize. axis: axis along which to normalize. order: Normalization order (e.g. for L2 norm). Returns: A normalized copy of the array.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\np_utils.py",
    "ast_data": "FunctionDef name:normalize arg:x arg:axis arg:order arguments arg arg arg Assign Call Call Assign Compare Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "rot_x",
    "source_code": "@classmethod\ndef rot_x(cls, x: Tensor) -> So3:\n    zs = zeros_like(x)\n    return cls.exp(stack((x, zs, zs), -1))",
    "docstring": "Construct a x-axis rotation. Args: x: the x-axis rotation angle.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\so3.py",
    "ast_data": "FunctionDef name:rot_x arg:cls arg:x arguments arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_distribution_strategy_stack",
    "source_code": "@property\ndef _distribution_strategy_stack(self) -> list[Any]:\n    if not hasattr(self._thread_local, '_distribution_strategy_stack'):\n        self._thread_local._distribution_strategy_stack = []\n    return self._thread_local._distribution_strategy_stack",
    "docstring": "A stack to maintain distribution strategy context for each thread.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_distribution_strategy_stack arg:self arguments arg If Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_awaitable_nowait",
    "source_code": "def _awaitable_nowait(o):\n    return torch._C._awaitable_nowait(o)",
    "docstring": "Create completed Await with specified result.",
    "type": "function",
    "file_path": "pytorch\\torch\\jit\\_await.py",
    "ast_data": "FunctionDef name:_awaitable_nowait arg:o arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "has_db_default",
    "source_code": "def has_db_default(self):\n    return self.db_default is not NOT_PROVIDED",
    "docstring": "Return a boolean of whether this field has a db_default value.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\__init__.py",
    "ast_data": "FunctionDef name:has_db_default arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "pytorch",
    "name": "FakeImplCtx",
    "source_code": "class FakeImplCtx:\n\n    def __init__(self, _fake_mode, _op):\n        self._fake_mode = _fake_mode\n        self._shape_env = _fake_mode.shape_env\n        self._op = _op\n\n    @deprecated('`create_unbacked_symint` is deprecated, please use `new_dynamic_size` instead', category=FutureWarning)\n    def create_unbacked_symint(self, *, min=2, max=None) -> torch.SymInt:\n        return self.new_dynamic_size(min=min, max=max)\n\n    def new_dynamic_size(self, *, min=0, max=None) -> torch.SymInt:\n        if self._shape_env is None or not self._shape_env.allow_dynamic_output_shape_ops:\n            raise torch._subclasses.fake_tensor.DynamicOutputShapeException(self._op)\n        if isinstance(min, torch.SymInt) or isinstance(max, torch.SymInt):\n            raise ValueError(f'ctx.new_dynamic_size(min={min}, max={max}): expected min and max to be statically known ints but got SymInt. This is not supported.')\n        if min < 0:\n            raise ValueError(f'ctx.new_dynamic_size(min={min}, ...): expected min to be greater than or equal to 0: this API can only create non-negative sizes.')\n        return allocate_size(self._shape_env, min, max)",
    "docstring": "Context object for writing fake implementations for custom operators.",
    "type": "class",
    "file_path": "pytorch\\torch\\_library\\fake_impl.py",
    "ast_data": "ClassDef name:FakeImplCtx FunctionDef name:__init__ arg:self arg:_fake_mode arg:_op arguments arg arg arg Assign Assign Assign FunctionDef name:create_unbacked_symint arg:self arguments arg arg arg Return return:yes Call Call FunctionDef name:new_dynamic_size arg:self arguments arg arg arg If BoolOp Compare Raise Call If BoolOp Call Call Raise Call If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "action",
    "source_code": "def action(function=None, *, permissions=None, description=None):\n\n    def decorator(func):\n        if permissions is not None:\n            func.allowed_permissions = permissions\n        if description is not None:\n            func.short_description = description\n        return func\n    if function is None:\n        return decorator\n    else:\n        return decorator(function)",
    "docstring": "Conveniently add attributes to an action function:: @admin.action( permissions=['publish'], description='Mark selected stories as published', ) def make_published(self, request, queryset): queryset.update(status='p') This is equivalent to setting some attributes (with the original, longer names) on the function directly:: def make_published(self, request, queryset): queryset.update(status='p') make_published.allowed_permissions = ['publish'] make_published.short_description = 'Mark selected stories as published'",
    "type": "function",
    "file_path": "django\\django\\contrib\\admin\\decorators.py",
    "ast_data": "FunctionDef name:action arg:function arguments arg arg arg FunctionDef name:decorator arg:func arguments arg If Compare Assign If Compare Assign Return return:yes If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "enable_numpy_methods_on_tensor",
    "source_code": "def enable_numpy_methods_on_tensor():\n    _enable_numpy_methods(tensor.Tensor)",
    "docstring": "Adds additional NumPy methods on tf.Tensor class.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_math_ops.py",
    "ast_data": "FunctionDef name:enable_numpy_methods_on_tensor arguments Call"
  },
  {
    "library": "numpy",
    "name": "strip",
    "source_code": "@set_module('numpy.strings')\ndef strip(a, chars=None):\n    if chars is None:\n        return _strip_whitespace(a)\n    return _strip_chars(a, chars)",
    "docstring": "For each element in , return a copy with the leading and trailing characters removed. Parameters ---------- a : array-like, with `` dtype, depending on input types See Also -------- str.strip Examples -------- >>> import numpy as np >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> c array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> np.strings.strip(c) array(['aAaAaA', 'aA', 'abBABba'], dtype='>> np.strings.strip(c, 'a') array(['AaAaA', ' aA ', 'bBABb'], dtype='>> np.strings.strip(c, 'A') array(['aAaAa', ' aA ', 'abBABba'], dtype='<U7')",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\strings.py",
    "ast_data": "FunctionDef name:strip arg:a arg:chars arguments arg arg If Compare Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "find_anchor_nodes",
    "source_code": "def find_anchor_nodes(self, ctx: MatchContext, searched: OrderedSet[torch.fx.Node]) -> Generator[Optional[torch.fx.Node], None, None]:\n    if self in ctx.pattern_to_node:\n        yield ctx.pattern_to_node[self]\n        return\n    for pattern in self.flat_args_kwargs[0]:\n        if isinstance(pattern, PatternExpr):\n            for other_node in pattern.find_anchor_nodes(ctx, searched):\n                if not isinstance(other_node, torch.fx.Node):\n                    continue\n                for node in other_node.users:\n                    if node not in searched:\n                        if self._match_fns(node):\n                            yield node\n                            searched.add(node)",
    "docstring": "This is used when we are matching a pattern with multiple outputs. There is a partial match (stored in ctx) and we want to walk this pattern to find a connection to an already-matched node. Yields candidate nodes that might like.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\pattern_matcher.py",
    "ast_data": "FunctionDef name:find_anchor_nodes arg:self arg:ctx arg:searched arguments arg arg arg If Compare Return return:no For If Call For Call If Call For If Compare If Call Call"
  },
  {
    "library": "scipy",
    "name": "__init__",
    "source_code": "def __init__(self, dimensions):\n    self._dimensions = dimensions\n    self.nfev = 0\n    self.fglob = np.nan\n    self.global_optimum = None\n    self.custom_bounds = None",
    "docstring": "Initialises the problem Parameters ---------- dimensions : int The dimensionality of the problem",
    "type": "method",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_benchmark.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Assign Assign Assign Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "get_antialiased",
    "source_code": "def get_antialiased(self):\n    return self._antialiased",
    "docstring": "Return whether antialiased rendering is used.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:get_antialiased arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_all_registered_ops",
    "source_code": "def _all_registered_ops(self) -> set[str]:\n    return {op_name_class.qualified_name() for op_name_class in self._registry.keys()}",
    "docstring": "Returns the set of all registered function names.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\_exporter_legacy.py",
    "ast_data": "FunctionDef name:_all_registered_ops arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_name_scope",
    "source_code": "@tf_export('__internal__.get_name_scope', v1=[])\ndef get_name_scope() -> str:\n    if context.executing_eagerly():\n        return context.context().scope_name.rstrip('/')\n    return get_default_graph().get_name_scope()",
    "docstring": "Returns the current name scope in the default_graph. For example: would print the string . Returns: A string representing the current name scope.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:get_name_scope arguments If Call Return return:yes Call Call Return return:yes Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "__post_init__",
    "source_code": "def __post_init__(self):\n    if self.low > self.high:\n        raise ValueError(f'One must have low <= high; got low={self.low}, high={self.high}.')",
    "docstring": "Check that low <= high",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\_loss\\link.py",
    "ast_data": "FunctionDef name:__post_init__ arg:self arguments arg If Compare Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "time_and_memory",
    "source_code": "@staticmethod\ndef time_and_memory(min_micros=1, min_bytes=1, min_accelerator_micros=0, min_cpu_micros=0, min_peak_bytes=0, min_residual_bytes=0, min_output_bytes=0):\n    return {'max_depth': 10000, 'min_bytes': min_bytes, 'min_peak_bytes': min_peak_bytes, 'min_residual_bytes': min_residual_bytes, 'min_output_bytes': min_output_bytes, 'min_micros': min_micros, 'min_accelerator_micros': min_accelerator_micros, 'min_cpu_micros': min_cpu_micros, 'min_params': 0, 'min_float_ops': 0, 'min_occurrence': 0, 'order_by': 'micros', 'account_type_regexes': ['.*'], 'start_name_regexes': ['.*'], 'trim_name_regexes': [], 'show_name_regexes': ['.*'], 'hide_name_regexes': [], 'account_displayed_op_only': True, 'select': ['micros', 'bytes'], 'step': -1, 'output': 'stdout'}",
    "docstring": "Show operation time and memory consumptions. Args: min_micros: Only show profiler nodes with execution time no less than this. It sums accelerator and cpu times. min_bytes: Only show profiler nodes requested to allocate no less bytes than this. min_accelerator_micros: Only show profiler nodes spend no less than this time on accelerator (e.g. GPU). min_cpu_micros: Only show profiler nodes spend no less than this time on cpu. min_peak_bytes: Only show profiler nodes using no less than this bytes at peak (high watermark). For profiler nodes consist of multiple graph nodes, it sums the graph nodes' peak_bytes. min_residual_bytes: Only show profiler nodes have no less than this bytes not being de-allocated after Compute() ends. For profiler nodes consist of multiple graph nodes, it sums the graph nodes' residual_bytes. min_output_bytes: Only show profiler nodes have no less than this bytes output. The output are not necessarily allocated by this profiler nodes. Returns: A dict of profiling options.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\option_builder.py",
    "ast_data": "FunctionDef name:time_and_memory arg:min_micros arg:min_bytes arg:min_accelerator_micros arg:min_cpu_micros arg:min_peak_bytes arg:min_residual_bytes arg:min_output_bytes arguments arg arg arg arg arg arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "layer_statistics_dump",
    "source_code": "def layer_statistics_dump(self, file: IO[str]) -> None:\n    fields = ['op_name', 'tensor_idx'] + list(self._layer_debug_metrics.keys())\n    if self._debug_options.layer_direct_compare_metrics is not None:\n        fields += list(self._debug_options.layer_direct_compare_metrics.keys())\n    fields += ['scale', 'zero_point', 'tensor_name']\n    writer = csv.DictWriter(file, fields)\n    writer.writeheader()\n    if self.layer_statistics:\n        for name, metrics in self.layer_statistics.items():\n            data = metrics.copy()\n            data['tensor_name'], _ = self._get_operand_name_and_index(name)\n            data['tensor_idx'] = self._numeric_verify_op_details[name]['inputs'][0]\n            data['op_name'] = self._quant_interpreter._get_op_details(self._defining_op[data['tensor_idx']])['op_name']\n            details = self._quant_interpreter._get_tensor_details(data['tensor_idx'], subgraph_index=0)\n            data['scale'], data['zero_point'] = (details['quantization_parameters']['scales'][0], details['quantization_parameters']['zero_points'][0])\n            writer.writerow(data)",
    "docstring": "Dumps layer statistics into file, in csv format. Args: file: file, or file-like object to write.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\tools\\optimize\\debugging\\python\\debugger.py",
    "ast_data": "FunctionDef name:layer_statistics_dump arg:self arg:file arguments arg arg Assign Call Call If Compare Call Call Assign Call Call If For Call Assign Call Assign Call Assign Assign Call Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "transform_feature",
    "source_code": "def transform_feature(self, transformation_cache, state_manager):\n    input_tensor = _to_sparse_input_and_drop_ignore_values(transformation_cache.get(self.key, state_manager))\n    return self._transform_input_tensor(input_tensor, state_manager)",
    "docstring": "Creates a lookup table for the vocabulary.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:transform_feature arg:self arg:transformation_cache arg:state_manager arguments arg arg arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "fft_mode",
    "source_code": "@property\ndef fft_mode(self) -> FFT_MODE_TYPE:\n    return self._fft_mode",
    "docstring": "Mode of utilized FFT ('twosided', 'centered', 'onesided' or 'onesided2X'). It can have the following values: 'twosided': Two-sided FFT, where values for the negative frequencies are in upper half of the array. Corresponds to :func:. 'centered': Two-sided FFT with the values being ordered along monotonically increasing frequencies. Corresponds to applying :func: to :func:. 'onesided': Calculates only values for non-negative frequency values. Corresponds to :func:. 'onesided2X': Like , but the non-zero frequencies are doubled if is set to 'magnitude' or multiplied by `scalingfft_modeonesided2Xmfftonesidedonesided2Xff_pts` property. See Also -------- delta_f: Width of the frequency bins of the STFT. f: Frequencies values of the STFT. f_pts: Width of the frequency bins of the STFT. onesided_fft: True if a one-sided FFT is used. scaling: Normalization applied to the window function ShortTimeFFT: Class this property belongs to.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_short_time_fft.py",
    "ast_data": "FunctionDef name:fft_mode arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "replace_floor_div",
    "source_code": "def replace_floor_div(expr: sympy.Expr) -> sympy.Expr:\n    if isinstance(expr, sympy.core.mul.Mul) and isinstance(expr.args[0], sympy.Rational):\n        frac = expr.args[0]\n        numerator = sympy_product(expr.args[1:]) * frac.numerator\n        denominator = frac.denominator\n        new_expr = numerator / denominator\n        assert V.graph.sizevars.statically_known_equals(new_expr, expr), f\"Unsound replacement: '{new_expr}' != '{expr}'\"\n        return FloorDiv(numerator, denominator)\n    else:\n        return sympy.floor(expr)",
    "docstring": "Converts floor(x / c) to x // c.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\wrapper_fxir.py",
    "ast_data": "FunctionDef name:replace_floor_div arg:expr arguments arg If BoolOp Call Call Assign Assign Call Assign Assign Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "dtype",
    "source_code": "@property\ndef dtype(self):\n    return self._dtype",
    "docstring": "Returns the of elements in the tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor.py",
    "ast_data": "FunctionDef name:dtype arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "assume_constant_result",
    "source_code": "def assume_constant_result(fn):\n    import torch._dynamo\n    return torch._dynamo.assume_constant_result(fn)",
    "docstring": "This function is used to mark a function as having a constant result. This allows the compiler to optimize away your function. Returns The same function Args: fn: The function to be marked as having a constant result. .. warning:: can if invalid cause safety and soundness issues, :func: will not attempt to validate whether the constant assumption is true or not",
    "type": "function",
    "file_path": "pytorch\\torch\\compiler\\__init__.py",
    "ast_data": "FunctionDef name:assume_constant_result arg:fn arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "bias_add",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef bias_add(x, bias, data_format=None):\n    if data_format is None:\n        data_format = image_data_format()\n    if data_format not in {'channels_first', 'channels_last'}:\n        raise ValueError('Unknown data_format: ' + str(data_format))\n    bias_shape = int_shape(bias)\n    if len(bias_shape) != 1 and len(bias_shape) != ndim(x) - 1:\n        raise ValueError('Unexpected bias dimensions %d, expect to be 1 or %d dimensions' % (len(bias_shape), ndim(x) - 1))\n    if len(bias_shape) == 1:\n        if data_format == 'channels_first':\n            return nn.bias_add(x, bias, data_format='NCHW')\n        return nn.bias_add(x, bias, data_format='NHWC')\n    if ndim(x) in (3, 4, 5):\n        if data_format == 'channels_first':\n            bias_reshape_axis = (1, bias_shape[-1]) + bias_shape[:-1]\n            return x + reshape(bias, bias_reshape_axis)\n        return x + reshape(bias, (1,) + bias_shape)\n    return nn.bias_add(x, bias)",
    "docstring": "Adds a bias vector to a tensor. Args: x: Tensor or variable. bias: Bias tensor to add. data_format: string, or . Returns: Output tensor. Raises: ValueError: In one of the two cases below: 1. invalid argument. 2. invalid bias shape. the bias should be either a vector or a tensor with ndim(x) - 1 dimension",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:bias_add arg:x arg:bias arg:data_format arguments arg arg arg If Compare Assign Call If Compare Raise Call Call Assign Call If BoolOp Compare Call Compare Call Call Raise Call Call Call If Compare Call If Compare Return return:yes Call Return return:yes Call If Compare Call If Compare Assign Return return:yes Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "function_type",
    "source_code": "@property\ndef function_type(self) -> function_type_lib.FunctionType:\n    return self._function_type",
    "docstring": "Represents the input/output contract of this function.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\atomic_function.py",
    "ast_data": "FunctionDef name:function_type arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "uri_to_iri",
    "source_code": "def uri_to_iri(uri):\n    if uri is None:\n        return uri\n    uri = force_bytes(uri)\n    bits = uri.split(b'%')\n    if len(bits) == 1:\n        iri = uri\n    else:\n        parts = [bits[0]]\n        append = parts.append\n        hextobyte = _hextobyte\n        for item in bits[1:]:\n            hex = item[:2]\n            if hex in hextobyte:\n                append(hextobyte[item[:2]])\n                append(item[2:])\n            else:\n                append(b'%')\n                append(item)\n        iri = b''.join(parts)\n    return repercent_broken_unicode(iri).decode()",
    "docstring": "Convert a Uniform Resource Identifier(URI) into an Internationalized Resource Identifier(IRI). This is the algorithm from RFC 3987 Section 3.2, excluding step 4. Take an URI in ASCII bytes (e.g. '/I%20%E2%99%A5%20Django/') and return a string containing the encoded result (e.g. '/I%20♥%20Django/').",
    "type": "function",
    "file_path": "django\\django\\utils\\encoding.py",
    "ast_data": "FunctionDef name:uri_to_iri arg:uri arguments arg If Compare Return return:yes Assign Call Assign Call If Compare Call Assign Assign Assign Assign For Assign If Compare Call Call Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "Denormalize",
    "source_code": "class Denormalize(Module):\n\n    def __init__(self, mean: Union[Tensor, float], std: Union[Tensor, float]) -> None:\n        super().__init__()\n        self.mean = mean\n        self.std = std\n\n    def forward(self, input: Tensor) -> Tensor:\n        return denormalize(input, self.mean, self.std)\n\n    def __repr__(self) -> str:\n        repr = f'(mean={self.mean}, std={self.std})'\n        return self.__class__.__name__ + repr",
    "docstring": "Denormalize a tensor image with mean and standard deviation. .. math:: \\text{input[channel] = (input[channel] * std[channel]) + mean[channel]} Where is :math: and :math: for channels, Args: mean: Mean for each channel. std: Standard deviations for each channel. Shape: - Input: Image tensor of size :math:. - Output: Denormalised tensor with same size as input :math:. Examples: >>> x = torch.rand(1, 4, 3, 3) >>> out = Denormalize(0.0, 255.)(x) >>> out.shape torch.Size([1, 4, 3, 3]) >>> x = torch.rand(1, 4, 3, 3, 3) >>> mean = torch.zeros(1, 4) >>> std = 255. * torch.ones(1, 4) >>> out = Denormalize(mean, std)(x) >>> out.shape torch.Size([1, 4, 3, 3, 3])",
    "type": "class",
    "file_path": "kornia\\kornia\\enhance\\normalize.py",
    "ast_data": "ClassDef name:Denormalize FunctionDef name:__init__ arg:self arg:mean arg:std arguments arg arg arg Call Call Assign Assign FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:__repr__ arg:self arguments arg Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "normalize_points_with_intrinsics",
    "source_code": "def normalize_points_with_intrinsics(point_2d: Tensor, camera_matrix: Tensor) -> Tensor:\n    KORNIA_CHECK_SHAPE(point_2d, ['*', '2'])\n    KORNIA_CHECK_SHAPE(camera_matrix, ['*', '3', '3'])\n    cxcy = camera_matrix[..., :2, 2]\n    fxfy = camera_matrix[..., :2, :2].diagonal(dim1=-2, dim2=-1)\n    if len(cxcy.shape) < len(point_2d.shape):\n        cxcy, fxfy = (cxcy.unsqueeze(-2), fxfy.unsqueeze(-2))\n    xy = (point_2d - cxcy) / fxfy\n    return xy",
    "docstring": "Normalize points with intrinsics. Useful for conversion of keypoints to be used with essential matrix. Args: point_2d: tensor containing the 2d points in the image pixel coordinates. The shape of the tensor can be :math:. camera_matrix: tensor containing the intrinsics camera matrix. The tensor shape must be :math:. Returns: tensor of (u, v) cam coordinates with shape :math:. Example: >>> _ = torch.manual_seed(0) >>> X = torch.rand(1, 2) >>> K = torch.eye(3)[None] >>> normalize_points_with_intrinsics(X, K) tensor([[0.4963, 0.7682]])",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\conversions.py",
    "ast_data": "FunctionDef name:normalize_points_with_intrinsics arg:point_2d arg:camera_matrix arguments arg arg Call Call Assign Assign Call If Compare Call Call Assign Call Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "savepoint_rollback",
    "source_code": "def savepoint_rollback(sid, using=None):\n    get_connection(using).savepoint_rollback(sid)",
    "docstring": "Roll back the most recent savepoint (if one exists). Do nothing if savepoints are not supported.",
    "type": "function",
    "file_path": "django\\django\\db\\transaction.py",
    "ast_data": "FunctionDef name:savepoint_rollback arg:sid arg:using arguments arg arg Call Call"
  },
  {
    "library": "django",
    "name": "reverse",
    "source_code": "def reverse(self):\n    self[:] = self[-1::-1]",
    "docstring": "Standard list reverse method",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\mutable_list.py",
    "ast_data": "FunctionDef name:reverse arg:self arguments arg Assign"
  },
  {
    "library": "pytorch",
    "name": "pg_backend_config",
    "source_code": "@property\ndef pg_backend_config(self) -> dict[ProcessGroup, str]:\n    global _pg_backend_config\n    return _pg_backend_config",
    "docstring": "Process group's backend config. TODO don't expose the map, expose fine grained ops",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:pg_backend_config arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_dataset_attributes",
    "source_code": "def _get_dataset_attributes(dataset):\n    batched_dataset = _get_batched_dataset(dataset)\n    batch_size, drop_remainder = _get_batched_dataset_attributes(batched_dataset)\n    prefetch_buffer = None\n    if isinstance(dataset, dataset_ops.PrefetchDataset):\n        prefetch_buffer = dataset._buffer_size\n    elif isinstance(dataset, dataset_ops.DatasetV1Adapter) and isinstance(dataset._dataset, dataset_ops.PrefetchDataset):\n        prefetch_buffer = dataset._dataset._buffer_size\n    return (batch_size, drop_remainder, prefetch_buffer)",
    "docstring": "Get the underlying attributes from the dataset object.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py",
    "ast_data": "FunctionDef name:_get_dataset_attributes arg:dataset arguments arg Assign Call Assign Call Assign If Call Assign If BoolOp Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "check_file",
    "source_code": "def check_file(filename: str, test_globs: list[str]=CPP_TEST_GLOBS) -> list[LintMessage]:\n    lint_messages: list[LintMessage] = []\n    symbols: dict[str, int] = {}\n    with open(filename) as f:\n        for idx, line in enumerate(f):\n            symbol = line.strip()\n            if not symbol or symbol[0] == '#':\n                continue\n            symbols[symbol] = idx + 1\n    symbols_regex = re.compile('|'.join(sorted(symbols.keys(), reverse=True)))\n    matched_symbols = find_matched_symbols(symbols_regex, test_globs)\n    for s, lineno in symbols.items():\n        if s not in matched_symbols:\n            lint_messages.append(LintMessage(path=filename, line=lineno, char=None, code=LINTER_CODE, severity=LintSeverity.ERROR, name='[untested-symbol]', original=None, replacement=None, description=f\"{s} has been included as a header-only API but is not tested in any of CPP_TEST_GLOBS, which contains {CPP_TEST_GLOBS}.\\nPlease add a .cpp test using the symbol without linking anything to verify that the symbol is in fact header-only. If you already have a test but it's not found, please add the .cpp file to CPP_TEST_GLOBS in tools/linters/adapters/header_only_linter.py.\"))\n    return lint_messages",
    "docstring": "Goes through the header_only_apis.txt file and verifies that all symbols within the file can be found tested in an appropriately independent .cpp file. Note that we expect CPP_TEST_GLOBS to be passed in as test_globs--the only reason this is an argument at all is for ease of testing.",
    "type": "function",
    "file_path": "pytorch\\tools\\linter\\adapters\\header_only_linter.py",
    "ast_data": "FunctionDef name:check_file arg:filename arg:test_globs arguments arg arg With Call For Call Assign Call If BoolOp Compare Assign Assign Call Call Call Call Assign Call For Call If Compare Call Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "delete_authorization_code",
    "source_code": "def delete_authorization_code(self, authorization_code):\n    raise NotImplementedError()",
    "docstring": "Delete authorization code from database or cache. Developers MUST implement it in subclass, e.g.:: def delete_authorization_code(self, authorization_code): authorization_code.delete() :param authorization_code: the instance of authorization_code",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\grants\\authorization_code.py",
    "ast_data": "FunctionDef name:delete_authorization_code arg:self arg:authorization_code arguments arg arg Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "set_hatchcolor",
    "source_code": "def set_hatchcolor(self, c):\n    self._original_hatchcolor = c\n    self._set_hatchcolor(c)",
    "docstring": "Set the hatchcolor(s) of the collection. Parameters ---------- c : :mpltype: or list of :mpltype: or 'edge' The collection hatchcolor(s). If a sequence, the patches cycle through it.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:set_hatchcolor arg:self arg:c arguments arg arg Assign Call"
  },
  {
    "library": "pandas",
    "name": "disallow_ndim_indexing",
    "source_code": "def disallow_ndim_indexing(result) -> None:\n    if np.ndim(result) > 1:\n        raise ValueError('Multi-dimensional indexing (e.g. `obj[:, None]`) is no longer supported. Convert to a numpy array before indexing instead.')",
    "docstring": "Helper function to disallow multi-dimensional indexing on 1D Series/Index. GH#27125 indexer like idx[:, None] expands dim, but we cannot do that and keep an index, so we used to return ndarray, which was deprecated in GH#30588.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\indexers\\utils.py",
    "ast_data": "FunctionDef name:disallow_ndim_indexing arg:result arguments arg If Compare Call Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "_build_f",
    "source_code": "def _build_f(self, X, y):\n    bounds_error = self.out_of_bounds == 'raise'\n    if len(y) == 1:\n        self.f_ = lambda x: y.repeat(x.shape)\n    else:\n        self.f_ = interpolate.interp1d(X, y, kind='linear', bounds_error=bounds_error)",
    "docstring": "Build the f_ interp1d function.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\isotonic.py",
    "ast_data": "FunctionDef name:_build_f arg:self arg:X arg:y arguments arg arg arg Assign Compare If Compare Call Assign arguments arg Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "decode",
    "source_code": "def decode(self, spec, encoded_value):\n    return spec._from_components(encoded_value)",
    "docstring": "Decodes from a batchable tensor encoding. See for a description of the default encoding. Subclasses may override this default definition, when necessary. Args: spec: The TypeSpec for the result value. If encoded values with spec were batched, then should be ; or if encoded values with spec were unbatched, then should be . encoded_value: A nest of values returned by ; or a nest of values that was formed by stacking, unstacking, or concatenating the corresponding elements of values returned by . Returns: A value compatible with .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type.py",
    "ast_data": "FunctionDef name:decode arg:self arg:spec arg:encoded_value arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "Component",
    "source_code": "@compatibility(is_backward_compatible=False)\n@dataclass\nclass Component:\n    graph: torch.fx.Graph\n    order: int\n    name: str\n    input_placeholders: list = field(default_factory=list)\n    orig_inputs: list = field(default_factory=list)\n    orig_outputs: list = field(default_factory=list)\n    getattr_maps: dict[torch.fx.Node, torch.fx.Node] = field(default_factory=dict)\n    constructor_args: list[str] = field(default_factory=list)\n    gm: Optional[torch.fx.GraphModule] = None",
    "docstring": "A component serves as a container for a subgraph we want to create afterwards.",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\passes\\split_utils.py",
    "ast_data": "ClassDef name:Component Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "MemoryChecker",
    "source_code": "class MemoryChecker(object):\n\n    @trace.trace_wrapper\n    def __enter__(self):\n        self._python_memory_checker = _PythonMemoryChecker()\n        return self\n\n    @trace.trace_wrapper\n    def __exit__(self, exc_type, exc_value, traceback):\n        pass\n\n    def record_snapshot(self):\n        self._python_memory_checker.record_snapshot()\n\n    @trace.trace_wrapper\n    def report(self):\n        self._python_memory_checker.report()\n\n    @trace.trace_wrapper\n    def assert_no_leak_if_all_possibly_except_one(self):\n        self._python_memory_checker.assert_no_leak_if_all_possibly_except_one()\n\n    @trace.trace_wrapper\n    def assert_no_new_python_objects(self, threshold=None):\n        self._python_memory_checker.assert_no_new_objects(threshold=threshold)",
    "docstring": "Memory leak detection class. This is a utility class to detect Python and C++ memory leaks. It's intended for both testing and debugging. Basic usage: >>> # MemoryChecker() context manager tracks memory status inside its scope. >>> with MemoryChecker() as memory_checker: >>> tensors = [] >>> for _ in range(10): >>> # Simulating object leak every iteration. >>> tensors.append(tf.constant(1)) >>> >>> # Take a memory snapshot for later analysis. >>> memory_checker.record_snapshot() >>> >>> # generates a html graph file showing allocations over >>> # snapshots per every stack trace. >>> memory_checker.report() >>> >>> # This assertion will detect object leak. >>> memory_checker.assert_no_leak_if_all_possibly_except_one() must be called once every iteration at the same location. This is because the detection algorithm relies on the assumption that if there is a leak, it's happening similarly on every snapshot.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\memory_checker.py",
    "ast_data": "ClassDef name:MemoryChecker FunctionDef name:__enter__ arg:self arguments arg Assign Call Return return:yes FunctionDef name:__exit__ arg:self arg:exc_type arg:exc_value arg:traceback arguments arg arg arg arg FunctionDef name:record_snapshot arg:self arguments arg Call FunctionDef name:report arg:self arguments arg Call FunctionDef name:assert_no_leak_if_all_possibly_except_one arg:self arguments arg Call FunctionDef name:assert_no_new_python_objects arg:self arg:threshold arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "add_to_graph",
    "source_code": "def add_to_graph(self, g):\n    self._create_definition_if_needed()\n    if context.executing_eagerly():\n        context.context().add_function_def(self.definition)\n    else:\n        g._add_function(self)\n    for f in self._sub_functions.values():\n        g._add_function_recursive(f)\n    if self._grad_func:\n        self._grad_func.add_to_graph(g)",
    "docstring": "Adds this function into the graph g.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\function.py",
    "ast_data": "FunctionDef name:add_to_graph arg:self arg:g arguments arg arg Call If Call Call Call Call For Call Call If Call"
  },
  {
    "library": "django",
    "name": "get_dated_items",
    "source_code": "def get_dated_items(self):\n    year = self.get_year()\n    month = self.get_month()\n    date_field = self.get_date_field()\n    date = _date_from_string(year, self.get_year_format(), month, self.get_month_format())\n    since = self._make_date_lookup_arg(date)\n    until = self._make_date_lookup_arg(self._get_next_month(date))\n    lookup_kwargs = {'%s__gte' % date_field: since, '%s__lt' % date_field: until}\n    qs = self.get_dated_queryset(**lookup_kwargs)\n    date_list = self.get_date_list(qs)\n    return (date_list, qs, {'month': date, 'next_month': self.get_next_month(date), 'previous_month': self.get_previous_month(date)})",
    "docstring": "Return (date_list, items, extra_context) for this request.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\dates.py",
    "ast_data": "FunctionDef name:get_dated_items arg:self arguments arg Assign Call Assign Call Assign Call Assign Call Call Call Assign Call Assign Call Call Assign Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "scrapy",
    "name": "NotConfigured",
    "source_code": "class NotConfigured(Exception):\n    pass",
    "docstring": "Indicates a missing configuration situation",
    "type": "class",
    "file_path": "scrapy\\scrapy\\exceptions.py",
    "ast_data": "ClassDef name:NotConfigured"
  },
  {
    "library": "tensorflow",
    "name": "is_compatible_with",
    "source_code": "def is_compatible_with(self, other):\n    other = as_dtype(other)\n    return self._type_enum in (other.as_datatype_enum, other.base_dtype.as_datatype_enum)",
    "docstring": "Returns True if the DType will be converted to this DType (TF1). Programs written for TensorFlow 2.x do not need this function. Instead, they can do equality comparison on objects directly: . This function exists only for compatibility with TensorFlow 1.x, where it additionally allows conversion from a reference type (used by ) to its base type. Args: other: A (or object that may be converted to a ). Returns: True if a Tensor of the will be implicitly converted to this .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\dtypes.py",
    "ast_data": "FunctionDef name:is_compatible_with arg:self arg:other arguments arg arg Assign Call Return return:yes Compare"
  },
  {
    "library": "sphinx",
    "name": "IntersphinxDispatcher",
    "source_code": "class IntersphinxDispatcher(CustomReSTDispatcher):\n\n    def role(self, role_name: str, language_module: ModuleType, lineno: int, reporter: Reporter) -> tuple[RoleFunction, list[system_message]]:\n        if len(role_name) > 9 and role_name.startswith(('external:', 'external+')):\n            return (IntersphinxRole(role_name), [])\n        else:\n            return super().role(role_name, language_module, lineno, reporter)",
    "docstring": "Custom dispatcher for external role. This enables :external:***:/:external+***: roles on parsing reST document.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\ext\\intersphinx\\_resolve.py",
    "ast_data": "ClassDef name:IntersphinxDispatcher FunctionDef name:role arg:self arg:role_name arg:language_module arg:lineno arg:reporter arguments arg arg arg arg arg If BoolOp Compare Call Call Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "register_serializable",
    "source_code": "def register_serializable(cls: Type[Serializable]):\n    if cls.experimental_type_proto() in PROTO_CLASS_TO_PY_CLASS:\n        raise ValueError('Existing Python class ' + PROTO_CLASS_TO_PY_CLASS[cls.experimental_type_proto()].__name__ + ' already has ' + cls.experimental_type_proto().__name__ + ' as its associated proto representation. Please ensure ' + cls.__name__ + ' has a unique proto representation.')\n    PROTO_CLASS_TO_PY_CLASS[cls.experimental_type_proto()] = cls",
    "docstring": "Registers a Python class to support serialization. Only register standard TF types. Custom types should NOT be registered. Args: cls: Python class to register.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\core\\function\\trace_type\\serialization.py",
    "ast_data": "FunctionDef name:register_serializable arg:cls arguments arg If Compare Call Raise Call Call Call Assign Call"
  },
  {
    "library": "kornia",
    "name": "ty",
    "source_code": "@property\ndef ty(self) -> Tensor:\n    return self.extrinsics[..., 1, -1]",
    "docstring": "Return the y-coordinate of the translation vector. Returns: tensor of shape :math:.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\camera\\pinhole.py",
    "ast_data": "FunctionDef name:ty arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "crosses",
    "source_code": "def crosses(self, other):\n    return capi.geos_crosses(self.ptr, other.ptr)",
    "docstring": "Return true if the DE-9IM intersection matrix for the two Geometries is T*T****** (for a point and a curve,a point and an area or a line and an area) 0******** (for two curves).",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:crosses arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_resource_apply_dense",
    "source_code": "def _resource_apply_dense(self, grad, handle):\n    raise NotImplementedError()",
    "docstring": "Add ops to apply dense gradients to the variable . Args: grad: a representing the gradient. handle: a of dtype which points to the variable to be updated. Returns: An which updates the value of the variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\optimizer.py",
    "ast_data": "FunctionDef name:_resource_apply_dense arg:self arg:grad arg:handle arguments arg arg arg Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "add",
    "source_code": "def add(self, y: Array | complex, /, copy: bool | None=None, xp: ModuleType | None=None) -> Array:\n    return self._op(_AtOp.ADD, operator.iadd, operator.add, y, copy=copy, xp=xp)",
    "docstring": "Apply `` and return the updated array.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_extra\\_lib\\_at.py",
    "ast_data": "FunctionDef name:add arg:copy arg:xp arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "DeserializationError",
    "source_code": "class DeserializationError(Exception):\n\n    @classmethod\n    def WithData(cls, original_exc, model, fk, field_value):\n        return cls(\"%s: (%s:pk=%s) field_value was '%s'\" % (original_exc, model, fk, field_value))",
    "docstring": "Something bad happened during deserialization.",
    "type": "class",
    "file_path": "django\\django\\core\\serializers\\base.py",
    "ast_data": "ClassDef name:DeserializationError FunctionDef name:WithData arg:cls arg:original_exc arg:model arg:fk arg:field_value arguments arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "def forward(self, images: Tensor, n: Optional[int]=10000, apply_imagenet_normalization: bool=True, pad_if_not_divisible: bool=True) -> Tuple[Tensor, Tensor, Tensor]:\n    if apply_imagenet_normalization:\n        images = self.normalizer(images)\n    B, C, H, W = images.shape\n    h, w = images.shape[2:]\n    if pad_if_not_divisible:\n        pd_h = 14 - h % 14 if h % 14 > 0 else 0\n        pd_w = 14 - w % 14 if w % 14 > 0 else 0\n        images = torch.nn.functional.pad(images, (0, pd_w, 0, pd_h), value=0.0)\n    keypoints, scores = self.detect(images, n=n, apply_imagenet_normalization=False, crop_h=h, crop_w=w)\n    descriptions = self.describe(images, keypoints, apply_imagenet_normalization=False)\n    return (dedode_denormalize_pixel_coordinates(keypoints, H, W), scores, descriptions)",
    "docstring": "Detect and describe keypoints in the input images. Args: images: A tensor of shape :math: containing the ImageNet-Normalized input images. n: The number of keypoints to detect. apply_imagenet_normalization: Whether to apply ImageNet normalization to the input images. pad_if_not_divisible: pad image shape if not evenly divisible. Returns: keypoints: A tensor of shape :math: containing the detected keypoints in the image range, unlike function scores: A tensor of shape :math: containing the scores of the detected keypoints. descriptions: A tensor of shape :math: containing the descriptions of the detected keypoints. DIM is 256 for B and 512 for G.",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\dedode\\dedode.py",
    "ast_data": "FunctionDef name:forward arg:self arg:images arg:n arg:apply_imagenet_normalization arg:pad_if_not_divisible arguments arg arg arg arg arg If Assign Call Assign Assign If Assign Compare Assign Compare Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "min",
    "source_code": "def min(self, *, axis: AxisInt | None=None, skipna: bool=True):\n    nv.validate_minmax_axis(axis, self.ndim)\n    return self._min_max('min', skipna=skipna)",
    "docstring": "Min of array values, ignoring NA values if specified. Parameters ---------- axis : int, default 0 Not Used. NumPy compatibility. skipna : bool, default True Whether to ignore NA values. Returns ------- scalar",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\sparse\\array.py",
    "ast_data": "FunctionDef name:min arg:self arguments arg arg arg Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "apply_functional",
    "source_code": "def apply_functional(self, fn_name, grads, args, output_metadata):\n    op = ops.get(fn_name)\n    return self.proxy_call(op, (grads, *args), output_metadata)",
    "docstring": "Proxies a call to ops.fn_name(grads, *args) into the graph",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\compiled_autograd.py",
    "ast_data": "FunctionDef name:apply_functional arg:self arg:fn_name arg:grads arg:args arg:output_metadata arguments arg arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_izip_records",
    "source_code": "def _izip_records(seqarrays, fill_value=None, flatten=True):\n    if flatten:\n        zipfunc = _izip_fields_flat\n    else:\n        zipfunc = _izip_fields\n    for tup in itertools.zip_longest(*seqarrays, fillvalue=fill_value):\n        yield tuple(zipfunc(tup))",
    "docstring": "Returns an iterator of concatenated items from a sequence of arrays. Parameters ---------- seqarrays : sequence of arrays Sequence of arrays. fill_value : {None, integer} Value used to pad shorter iterables. flatten : {True, False}, Whether to",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\recfunctions.py",
    "ast_data": "FunctionDef name:_izip_records arg:seqarrays arg:fill_value arg:flatten arguments arg arg arg If Assign Assign For Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__call__",
    "source_code": "def __call__(self, features: Any, weights: Optional[Any]=None) -> Tuple[Any, Dict[str, PartitionedCsrFormatTensor]]:\n    return self.embedding_lookup(features, weights)",
    "docstring": "Call the mid level api to do embedding lookup.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:features arg:weights arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "triton_config_tiled_reduction",
    "source_code": "def triton_config_tiled_reduction(size_hints, x, y, r, num_stages=1):\n    rnumels = _get_nd_reduction_numels(r, size_hints)\n    x = min(x, size_hints['x'])\n    y = min(y, size_hints['y'])\n\n    def total_numel() -> int:\n        return conditional_product(x, y, *rnumels.values())\n    target = total_numel()\n    if conditional_product(*size_hints.values()) < target:\n        target //= 8\n    while x < size_hints['x'] and total_numel() < target:\n        x *= 2\n    for prefix in sorted(rnumels):\n        while rnumels[prefix] < size_hints[prefix] and total_numel() < target:\n            rnumels[prefix] *= 2\n    while y < size_hints[1] and total_numel() < target:\n        y *= 2\n    cfg = _get_config({'x': x, 'y': y, **rnumels})\n    num_warps = _num_warps(total_numel() // 256, min_num_warps=1)\n    check_config(cfg, xnumel=size_hints[0], ynumel=size_hints[1])\n    check_max_block(cfg)\n    return Config(cfg, num_warps=num_warps, num_stages=num_stages)",
    "docstring": "Construct a tile reduction triton config with some adjustment heuristics based on size_hints. Size_hints is a tuple of numels in each tile dimension and will be rounded up to the nearest power of 2.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\triton_heuristics.py",
    "ast_data": "FunctionDef name:triton_config_tiled_reduction arg:size_hints arg:x arg:y arg:r arg:num_stages arguments arg arg arg arg arg Assign Call Assign Call Assign Call FunctionDef name:total_numel arguments Return return:yes Call Call Assign Call If Compare Call Call While BoolOp Compare Compare Call For Call While BoolOp Compare Compare Call While BoolOp Compare Compare Call Assign Call Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_convert_strls",
    "source_code": "def _convert_strls(self, data: DataFrame) -> DataFrame:\n    return data",
    "docstring": "No-op, future compatibility",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\stata.py",
    "ast_data": "FunctionDef name:_convert_strls arg:self arg:data arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_output_shape_at",
    "source_code": "@doc_controls.do_not_doc_inheritable\ndef get_output_shape_at(self, node_index):\n    return self._get_node_attribute_at_index(node_index, 'output_shapes', 'output shape')",
    "docstring": "Retrieves the output shape(s) of a layer at a given node. Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. will correspond to the first time the layer was called. Returns: A shape tuple (or list of shape tuples if the layer has multiple outputs). Raises: RuntimeError: If called in Eager mode.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:get_output_shape_at arg:self arg:node_index arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "post_pr_comment",
    "source_code": "def post_pr_comment(org: str, project: str, pr_num: int, msg: str, dry_run: bool=False) -> list[dict[str, Any]]:\n    internal_debugging = ''\n    run_url = os.getenv('GH_RUN_URL')\n    if run_url is not None:\n        internal_debugging = '\\n'.join((line for line in ('<details><summary>Details for Dev Infra team</summary>', f'Raised by <a href=\"{run_url}\">workflow job</a>\\n', '</details>') if line))\n    comment = '\\n'.join((f'### Cherry picking #{pr_num}', f'{msg}', '', f'{internal_debugging}'))\n    return gh_post_pr_comment(org, project, pr_num, comment, dry_run)",
    "docstring": "Post a comment on the PR itself to point to the cherry picking PR when success or print the error when failure",
    "type": "function",
    "file_path": "pytorch\\.github\\scripts\\cherry_pick.py",
    "ast_data": "FunctionDef name:post_pr_comment arg:org arg:project arg:pr_num arg:msg arg:dry_run arguments arg arg arg arg arg Assign Assign Call If Compare Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "ExternalLinksChecker",
    "source_code": "class ExternalLinksChecker(SphinxPostTransform):\n    default_priority = 500\n\n    def run(self, **kwargs: Any) -> None:\n        if not self.config.extlinks_detect_hardcoded_links:\n            return\n        for refnode in self.document.findall(nodes.reference):\n            self.check_uri(refnode)\n\n    def check_uri(self, refnode: nodes.reference) -> None:\n        if 'internal' in refnode or 'refuri' not in refnode:\n            return\n        uri = refnode['refuri']\n        title = refnode.astext()\n        for alias, (base_uri, _caption) in self.app.config.extlinks.items():\n            uri_pattern = re.compile(re.escape(base_uri).replace('%s', '(?P<value>.+)'))\n            match = uri_pattern.match(uri)\n            if match and match.groupdict().get('value') and ('/' not in match.groupdict()['value']):\n                msg = __('hardcoded link %r could be replaced by an extlink (try using %r instead)')\n                value = match.groupdict().get('value')\n                if uri != title:\n                    replacement = f':{alias}:`{rst.escape(title)} <{value}>`'\n                else:\n                    replacement = f':{alias}:`{value}`'\n                logger.warning(msg, uri, replacement, location=refnode)",
    "docstring": "For each external link, check if it can be replaced by an extlink. We treat each `` attribute as an external link.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\ext\\extlinks.py",
    "ast_data": "ClassDef name:ExternalLinksChecker Assign FunctionDef name:run arg:self arguments arg arg If Return return:no For Call Call FunctionDef name:check_uri arg:self arg:refnode arguments arg arg If BoolOp Compare Compare Return return:no Assign Assign Call For Call Assign Call Call Call Assign Call If BoolOp Call Call Compare Call Assign Call Assign Call Call If Compare Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_wrap_function",
    "source_code": "def _wrap_function(self, fn, args=None, kwargs=None, signature=None, name=None):\n    fn_with_filter_and_scope, returned_ops = _filter_returned_ops(self._variable_holder.call_with_variable_creator_scope(fn))\n    func_graph.func_graph_from_py_func(None, fn_with_filter_and_scope, args=args, kwargs=kwargs, signature=signature, add_control_dependencies=False, func_graph=self.graph)\n    fn_inputs = self.graph.inputs[:-len(self.graph.captures)]\n    flat_fn_outputs = nest.flatten(self.graph.structured_outputs)\n    for index, op in returned_ops.items():\n        flat_fn_outputs[index] = op\n    fn_outputs = nest.pack_sequence_as(self.graph.structured_outputs, flat_fn_outputs)\n    name = name or fn.__name__\n    wrapped_function = self._wrapped_function.prune(fn_inputs, fn_outputs, name, self.graph.structured_input_signature)\n    self._functions[name] = wrapped_function\n    return wrapped_function",
    "docstring": "Internal wrap function method with extended func_graph arguments.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\wrap_function.py",
    "ast_data": "FunctionDef name:_wrap_function arg:self arg:fn arg:args arg:kwargs arg:signature arg:name arguments arg arg arg arg arg arg Assign Call Call Call Assign Call Assign Call For Call Assign Assign Call Assign BoolOp Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "dequeue",
    "source_code": "def dequeue(self, name=None):\n    if name is None:\n        name = '%s_Dequeue' % self._name\n    if self._queue_ref.dtype == _dtypes.resource:\n        ret = gen_data_flow_ops.queue_dequeue_v2(self._queue_ref, self._dtypes, name=name)\n    else:\n        ret = gen_data_flow_ops.queue_dequeue(self._queue_ref, self._dtypes, name=name)\n    if not context.executing_eagerly():\n        op = ret[0].op\n        for output, shape in zip(op.values(), self._shapes):\n            output.set_shape(shape)\n    return self._dequeue_return_value(ret)",
    "docstring": "Dequeues one element from this queue. If the queue is empty when this operation executes, it will block until there is an element to dequeue. At runtime, this operation may raise an error if the queue is before or during its execution. If the queue is closed, the queue is empty, and there are no pending enqueue operations that can fulfill this request, will be raised. If the session is , will be raised. >>> q = tf.queue.FIFOQueue(capacity=2, dtypes=tf.int32) >>> q.enqueue(1) >>> q.enqueue(2) >>> q.dequeue() >>> q.dequeue() Args: name: A name for the operation (optional). Returns: The tuple of tensors that was dequeued.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:dequeue arg:self arg:name arguments arg arg If Compare Assign If Compare Assign Call Assign Call If Call Assign For Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "private_map",
    "source_code": "@property\ndef private_map(self):\n    return self._private_map",
    "docstring": "A map from parents to symbols that should not be included at all. This map can be edited, but it should not be edited once traversal has begun. Returns: The map marking symbols to not include.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\tools\\common\\public_api.py",
    "ast_data": "FunctionDef name:private_map arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "is_named_tuple",
    "source_code": "def is_named_tuple(obj: object) -> bool:\n    return isinstance(obj, abc.Sequence) and hasattr(obj, '_fields')",
    "docstring": "Check if the object is a named tuple. Parameters ---------- obj : object The object that will be checked to determine whether it is a named tuple. Returns ------- bool Whether is a named tuple. See Also -------- api.types.is_dict_like: Check if the object is dict-like. api.types.is_hashable: Return True if hash(obj) will succeed, False otherwise. api.types.is_categorical_dtype : Check if the dtype is categorical. Examples -------- >>> from collections import namedtuple >>> from pandas.api.types import is_named_tuple >>> Point = namedtuple(\"Point\", [\"x\", \"y\"]) >>> p = Point(1, 2) >>> >>> is_named_tuple(p) True >>> is_named_tuple((1, 2)) False",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\inference.py",
    "ast_data": "FunctionDef name:is_named_tuple arg:obj arguments arg Return return:yes BoolOp Call Call"
  },
  {
    "library": "django",
    "name": "file_complete",
    "source_code": "def file_complete(self, file_size):\n    if not self.activated:\n        return\n    self.file.seek(0)\n    return InMemoryUploadedFile(file=self.file, field_name=self.field_name, name=self.file_name, content_type=self.content_type, size=file_size, charset=self.charset, content_type_extra=self.content_type_extra)",
    "docstring": "Return a file object if this handler is activated.",
    "type": "method",
    "file_path": "django\\django\\core\\files\\uploadhandler.py",
    "ast_data": "FunctionDef name:file_complete arg:self arg:file_size arguments arg arg If Return return:no Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "pproc_fpool_nog",
    "source_code": "def pproc_fpool_nog(self):\n    self.wfield.func\n    fpool_l = []\n    for v in self.fpool:\n        fpool_l.append(v.x_a)\n    F = self._mapwrapper(self.wfield.func, fpool_l)\n    for va, f in zip(fpool_l, F):\n        vt = tuple(va)\n        self[vt].f = f\n        self.nfev += 1\n    self.fpool = set()",
    "docstring": "Process all field functions with no constraints supplied in parallel.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_shgo_lib\\_vertex.py",
    "ast_data": "FunctionDef name:pproc_fpool_nog arg:self arguments arg Assign For Call Assign Call For Call Assign Call Assign Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "metrics",
    "source_code": "@property\ndef metrics(self):\n    if not self._built:\n        return []\n    per_output_metrics = [metric_obj for metric_obj in nest.flatten(self._per_output_metrics) if metric_obj is not None]\n    return [self._loss_metric] + per_output_metrics",
    "docstring": "Per-output loss metrics.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\compile_utils.py",
    "ast_data": "FunctionDef name:metrics arg:self arguments arg If Return return:no Assign Call Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__call__",
    "source_code": "def __call__(self, shape, dtype=dtypes.float32, **kwargs):\n    self._validate_kwargs(kwargs, support_partition=False)\n    dtype = _assert_float_dtype(dtype)\n    if len(shape) != 2:\n        raise ValueError(f'The tensor to initialize, specified by argument `shape` must be at least two-dimensional. Received shape={shape}')\n    initializer = linalg_ops_impl.eye(*shape, dtype=dtype)\n    return self.gain * initializer",
    "docstring": "Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only floating point types are supported. **kwargs: Additional keyword arguments. Raises: ValueError: If the dtype is not floating point ValueError: If the requested shape does not have exactly two axes.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops_v2.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:shape arg:dtype arguments arg arg arg arg Call Assign Call If Compare Call Raise Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "set_as_test_mirror",
    "source_code": "def set_as_test_mirror(self, primary_settings_dict):\n    self.connection.settings_dict['NAME'] = primary_settings_dict['NAME']",
    "docstring": "Set this database up to be used in testing as a mirror of a primary database whose settings are given.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\creation.py",
    "ast_data": "FunctionDef name:set_as_test_mirror arg:self arg:primary_settings_dict arguments arg arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "values",
    "source_code": "@property\ndef values(self):\n    return self._values",
    "docstring": "Returns the per replica values.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py",
    "ast_data": "FunctionDef name:values arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "theta",
    "source_code": "@theta.setter\ndef theta(self, theta):\n    params = self.get_params()\n    i = 0\n    for hyperparameter in self.hyperparameters:\n        if hyperparameter.fixed:\n            continue\n        if hyperparameter.n_elements > 1:\n            params[hyperparameter.name] = np.exp(theta[i:i + hyperparameter.n_elements])\n            i += hyperparameter.n_elements\n        else:\n            params[hyperparameter.name] = np.exp(theta[i])\n            i += 1\n    if i != len(theta):\n        raise ValueError('theta has not the correct number of entries. Should be %d; given are %d' % (i, len(theta)))\n    self.set_params(**params)",
    "docstring": "Sets the (flattened, log-transformed) non-fixed hyperparameters. Parameters ---------- theta : ndarray of shape (n_dims,) The non-fixed, log-transformed hyperparameters of the kernel",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:theta arg:self arg:theta arguments arg arg Assign Call Assign For If If Compare Assign Call Assign Call If Compare Call Raise Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "maybe_load_initial_epoch_from_ckpt",
    "source_code": "def maybe_load_initial_epoch_from_ckpt(self, initial_epoch, mode):\n    epoch = backend.eval(self._ckpt_saved_epoch)\n    if mode == mode_keys.ModeKeys.TRAIN and epoch >= 0:\n        return epoch + 1\n    return initial_epoch",
    "docstring": "Maybe load initial epoch from ckpt considering possible worker recovery. When attribute exists and is not , this is under multi-worker training setting and indicates the worker is recovering from previous failure. In this case, infer from to continue previous unfinished training from certain epoch. Args: initial_epoch: The original initial_epoch user passes in in . mode: The mode for running . Returns: If the training is recovering from previous failure under multi-worker training setting, return the epoch the training is supposed to continue at. Otherwise, return the the user passes in.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\worker_training_state.py",
    "ast_data": "FunctionDef name:maybe_load_initial_epoch_from_ckpt arg:self arg:initial_epoch arg:mode arguments arg arg arg Assign Call If BoolOp Compare Compare Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "get",
    "source_code": "def get(self, request, *args, **kwargs):\n    self.term, self.model_admin, self.source_field, to_field_name = self.process_request(request)\n    if not self.has_perm(request):\n        raise PermissionDenied\n    self.object_list = self.get_queryset()\n    context = self.get_context_data()\n    return JsonResponse({'results': [self.serialize_result(obj, to_field_name) for obj in context['object_list']], 'pagination': {'more': context['page_obj'].has_next()}})",
    "docstring": "Return a JsonResponse with search results as defined in serialize_result(), by default: { results: [{id: \"123\" text: \"foo\"}], pagination: {more: true} }",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\views\\autocomplete.py",
    "ast_data": "FunctionDef name:get arg:self arg:request arguments arg arg arg arg Assign Call If Call Raise Assign Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "cherrypy",
    "name": "__bool__",
    "source_code": "def __bool__(self):\n    return bool(self._exceptions)",
    "docstring": "Determine whether any error happened in channel.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\wspbus.py",
    "ast_data": "FunctionDef name:__bool__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "factorial",
    "source_code": "def factorial(n, exact=False, extend='zero'):\n    return _factorialx_wrapper('factorial', n, k=1, exact=exact, extend=extend)",
    "docstring": "The factorial of a number or array of numbers. The factorial of non-negative integer is the product of all positive integers less than or equal to :: n! = n * (n - 1) * (n - 2) * ... * 1 Parameters ---------- n : int or float or complex (or array_like thereof) Input values for ``n >> import numpy as np >>> from scipy.special import factorial >>> arr = np.array([3, 4, 5]) >>> factorial(arr, exact=False) array([ 6., 24., 120.]) >>> factorial(arr, exact=True) array([ 6, 24, 120]) >>> factorial(5, exact=True) 120",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_basic.py",
    "ast_data": "FunctionDef name:factorial arg:n arg:exact arg:extend arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "demote_joins",
    "source_code": "def demote_joins(self, aliases):\n    aliases = list(aliases)\n    while aliases:\n        alias = aliases.pop(0)\n        if self.alias_map[alias].join_type == LOUTER:\n            self.alias_map[alias] = self.alias_map[alias].demote()\n            parent_alias = self.alias_map[alias].parent_alias\n            if self.alias_map[parent_alias].join_type == INNER:\n                aliases.append(parent_alias)",
    "docstring": "Change join type from LOUTER to INNER for all joins in aliases. Similarly to promote_joins(), this method must ensure no join chains containing first an outer, then an inner join are generated. If we are demoting b->c join in chain a LOUTER b LOUTER c then we must demote a->b automatically, or otherwise the demotion of b->c doesn't actually change anything in the query results. .",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\query.py",
    "ast_data": "FunctionDef name:demote_joins arg:self arg:aliases arguments arg arg Assign Call While Assign Call If Compare Assign Call Assign If Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "_tf_assert_stmt",
    "source_code": "def _tf_assert_stmt(expression1, expression2):\n    expression2_tensors = expression2()\n    if not isinstance(expression2_tensors, list):\n        expression2_tensors = [expression2_tensors]\n    return control_flow_assert.Assert(expression1, expression2_tensors)",
    "docstring": "Overload of assert_stmt that stages a TF Assert. This implementation deviates from Python semantics as follows: (1) the assertion is verified regardless of the state of __debug__ (2) on assertion failure, the graph execution will fail with tensorflow.errors.ValueError, rather than AssertionError. Args: expression1: tensorflow.Tensor, must evaluate to a tf.bool scalar expression2: Callable[[], Union[tensorflow.Tensor, List[tensorflow.Tensor]]] Returns: tensorflow.Operation",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\exceptions.py",
    "ast_data": "FunctionDef name:_tf_assert_stmt arg:expression1 arg:expression2 arguments arg arg Assign Call If Call Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "BaseConfig",
    "source_code": "@dataclasses.dataclass\nclass BaseConfig:\n    block_m: int\n    block_n: int\n    block_k: int\n    num_stages: int\n    num_warps: int",
    "docstring": "Base Gemm configuration used for most backends (CPU, CUDA)",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\template_heuristics.py",
    "ast_data": "ClassDef name:BaseConfig"
  },
  {
    "library": "tensorflow",
    "name": "_tf_sess",
    "source_code": "def _tf_sess(self):\n    return self._coordinated_creator.tf_sess",
    "docstring": "Return underlying tf.compat.v1.Session object. Warning: accessing the returned object in user code is likely to cause races or \"flaky tests\". Returns: A tf.compat.v1.Session object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\monitored_session.py",
    "ast_data": "FunctionDef name:_tf_sess arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_score_samples",
    "source_code": "def _score_samples(self, X):\n    check_is_fitted(self)\n    return -self._compute_chunked_score_samples(X)",
    "docstring": "Private version of score_samples without input validation. Input validation would remove feature names, so we disable it.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_iforest.py",
    "ast_data": "FunctionDef name:_score_samples arg:self arg:X arguments arg arg Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "to_dict",
    "source_code": "def to_dict(self) -> dict[str, Any]:\n    return {_GLOBAL_DICT_KEY: self.global_qconfig, _OBJECT_TYPE_DICT_KEY: list(self.object_type_qconfigs.items()), _MODULE_NAME_REGEX_DICT_KEY: list(self.module_name_regex_qconfigs.items()), _MODULE_NAME_DICT_KEY: list(self.module_name_qconfigs.items()), _MODULE_NAME_OBJECT_TYPE_ORDER_DICT_KEY: [(*k, v) for k, v in self.module_name_object_type_order_qconfigs.items()]}",
    "docstring": "Convert this `` to a dictionary with the following keys: \"\" (for global QConfig) \"object_type\" \"module_name_regex\" \"module_name\" \"module_name_object_type_order\" The values of this dictionary are lists of tuples.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\qconfig_mapping.py",
    "ast_data": "FunctionDef name:to_dict arg:self arguments arg Return return:yes Call Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "to_dlpack",
    "source_code": "@tf_export('experimental.dlpack.to_dlpack', v1=[])\ndef to_dlpack(tf_tensor):\n    return pywrap_tfe.TFE_ToDlpackCapsule(tf_tensor)",
    "docstring": "Returns the dlpack capsule representing the tensor. This operation ensures the underlying data memory is ready when returns. Args: tf_tensor: Tensorflow eager tensor, to be converted to dlpack capsule. Returns: A PyCapsule named as dltensor, which shares the underlying memory to other framework. This PyCapsule can be consumed only once.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\dlpack\\dlpack.py",
    "ast_data": "FunctionDef name:to_dlpack arg:tf_tensor arguments arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "correspond",
    "source_code": "@xp_capabilities()\ndef correspond(Z, Y):\n    xp = array_namespace(Z, Y)\n    Z = _asarray(Z, xp=xp)\n    Y = _asarray(Y, xp=xp)\n    _is_valid_linkage(Z, throw=True, xp=xp)\n    distance.is_valid_y(Y, throw=True)\n    return distance.num_obs_y(Y) == num_obs_linkage(Z)",
    "docstring": "Check for correspondence between linkage and condensed distance matrices. They must have the same number of original observations for the check to succeed. This function is useful as a sanity check in algorithms that make extensive use of linkage and distance matrices that must correspond to the same set of original observations. Parameters ---------- Z : array_like The linkage matrix to check for correspondence. Y : array_like The condensed distance matrix to check for correspondence. Returns ------- b : bool A boolean indicating whether the linkage matrix and distance matrix could possibly correspond to one another. See Also -------- linkage : for a description of what a linkage matrix is. Examples -------- >>> from scipy.cluster.hierarchy import ward, correspond >>> from scipy.spatial.distance import pdist This method can be used to check if a given linkage matrix `` (in condensed form): >>> correspond(Z, X_condensed) True",
    "type": "function",
    "file_path": "scipy\\scipy\\cluster\\hierarchy.py",
    "ast_data": "FunctionDef name:correspond arg:Z arg:Y arguments arg arg Assign Call Assign Call Assign Call Call Call Return return:yes Compare Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "is_fetchable",
    "source_code": "def is_fetchable(self, tensor_or_op) -> bool:\n    if isinstance(tensor_or_op, tensor_lib.Tensor):\n        return tensor_or_op.op not in self._unfetchable_ops\n    else:\n        return tensor_or_op not in self._unfetchable_ops",
    "docstring": "Returns if and only if is fetchable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:is_fetchable arg:self arg:tensor_or_op arguments arg arg If Call Return return:yes Compare Return return:yes Compare"
  },
  {
    "library": "scipy",
    "name": "diff",
    "source_code": "def diff(x, order=1, period=None, _cache=_cache):\n    if isinstance(_cache, threading.local):\n        if not hasattr(_cache, 'diff_cache'):\n            _cache.diff_cache = {}\n        _cache = _cache.diff_cache\n    tmp = asarray(x)\n    if order == 0:\n        return tmp\n    if iscomplexobj(tmp):\n        return diff(tmp.real, order, period, _cache) + 1j * diff(tmp.imag, order, period, _cache)\n    if period is not None:\n        c = 2 * pi / period\n    else:\n        c = 1.0\n    n = len(x)\n    omega = _cache.get((n, order, c))\n    if omega is None:\n        if len(_cache) > 20:\n            while _cache:\n                _cache.popitem()\n\n        def kernel(k, order=order, c=c):\n            if k:\n                return pow(c * k, order)\n            return 0\n        omega = convolve.init_convolution_kernel(n, kernel, d=order, zero_nyquist=1)\n        _cache[n, order, c] = omega\n    overwrite_x = _datacopied(tmp, x)\n    return convolve.convolve(tmp, omega, swap_real_imag=order % 2, overwrite_x=overwrite_x)",
    "docstring": "Return kth derivative (or integral) of a periodic sequence x. If x_j and y_j are Fourier coefficients of periodic functions x and y, respectively, then:: y_j = pow(sqrt(-1)*j*2*pi/period, order) * x_j y_0 = 0 if order is not 0. Parameters ---------- x : array_like Input array. order : int, optional The order of differentiation. Default order is 1. If order is negative, then integration is carried out under the assumption that ``, the Nyquist mode is taken zero.",
    "type": "function",
    "file_path": "scipy\\scipy\\fftpack\\_pseudo_diffs.py",
    "ast_data": "FunctionDef name:diff arg:x arg:order arg:period arg:_cache arguments arg arg arg arg If Call If Call Assign Assign Assign Call If Compare Return return:yes If Call Return return:yes Call Call If Compare Assign Assign Assign Call Assign Call If Compare If Compare Call While Call FunctionDef name:kernel arg:k arg:order arg:c arguments arg arg arg If Return return:yes Call Return return:yes Assign Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "update_dtype",
    "source_code": "def update_dtype(self, attr_name, index, dtype):\n    attr = self._node.attr[attr_name]\n    num_types = 0\n    if attr.HasField('list'):\n        types = attr.list.type\n        num_types = len(types)\n        if num_types > index:\n            types[index] = dtype\n            return\n    elif attr.HasField('type'):\n        num_types = 1\n        if index == 0:\n            attr.type = dtype\n            return\n    raise ValueError(f'`index` {index:d} is out of range for node({self._node.name}).attr({attr_name}), which has {num_types:d} elements.')",
    "docstring": "Changes the type of a given input. Args: attr_name: The NodeDef attribute containing the type to change. index: The index of the input type to change. dtype: The type to change to.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "FunctionDef name:update_dtype arg:self arg:attr_name arg:index arg:dtype arguments arg arg arg arg Assign Assign If Call Assign Assign Call If Compare Assign Return return:no If Call Assign If Compare Assign Return return:no Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "_gen_cmap_registry",
    "source_code": "def _gen_cmap_registry():\n    cmap_d = {**cmaps_listed}\n    for name, spec in datad.items():\n        cmap_d[name] = colors.LinearSegmentedColormap(name, spec, _LUTSIZE) if 'red' in spec else colors.ListedColormap(spec['listed'], name) if 'listed' in spec else colors.LinearSegmentedColormap.from_list(name, spec, _LUTSIZE)\n    aliases = {'grey': 'gray', 'gist_grey': 'gist_gray', 'gist_yerg': 'gist_yarg', 'Grays': 'Greys'}\n    for alias, original_name in aliases.items():\n        cmap = cmap_d[original_name].copy()\n        cmap.name = alias\n        cmap_d[alias] = cmap\n    for cmap in list(cmap_d.values()):\n        rmap = cmap.reversed()\n        cmap_d[rmap.name] = rmap\n    return cmap_d",
    "docstring": "Generate a dict mapping standard colormap names to standard colormaps, as well as the reversed colormaps.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\cm.py",
    "ast_data": "FunctionDef name:_gen_cmap_registry arguments Assign For Call Assign Compare Call Compare Call Call Assign For Call Assign Call Assign Assign For Call Call Assign Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "astype",
    "source_code": "def astype(self, dtype: AstypeArg, copy: bool=True) -> ArrayLike:\n    dtype = pandas_dtype(dtype)\n    if dtype == self.dtype:\n        if not copy:\n            return self\n        else:\n            return self.copy()\n    if isinstance(dtype, ExtensionDtype):\n        cls = dtype.construct_array_type()\n        return cls._from_sequence(self, dtype=dtype, copy=copy)\n    elif lib.is_np_dtype(dtype, 'M'):\n        from pandas.core.arrays import DatetimeArray\n        return DatetimeArray._from_sequence(self, dtype=dtype, copy=copy)\n    elif lib.is_np_dtype(dtype, 'm'):\n        from pandas.core.arrays import TimedeltaArray\n        return TimedeltaArray._from_sequence(self, dtype=dtype, copy=copy)\n    if not copy:\n        return np.asarray(self, dtype=dtype)\n    else:\n        return np.array(self, dtype=dtype, copy=copy)",
    "docstring": "Cast to a NumPy array or ExtensionArray with 'dtype'. Parameters ---------- dtype : str or dtype Typecode or data-type to which the array is cast. copy : bool, default True Whether to copy the data, even if not necessary. If False, a copy is made only if the old dtype does not match the new dtype. Returns ------- np.ndarray or pandas.api.extensions.ExtensionArray An ``: >>> arr1 = arr.astype(\"Float64\") >>> arr1 [1.0, 2.0, 3.0] Length: 3, dtype: Float64 >>> arr1.dtype Float64Dtype() Otherwise, we will get a Numpy ndarray: >>> arr2 = arr.astype(\"float64\") >>> arr2 array([1., 2., 3.]) >>> arr2.dtype dtype('float64')",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\base.py",
    "ast_data": "FunctionDef name:astype arg:self arg:dtype arg:copy arguments arg arg arg Assign Call If Compare If Return return:yes Return return:yes Call If Call Assign Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Call If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "get_indexer_for",
    "source_code": "@final\ndef get_indexer_for(self, target) -> npt.NDArray[np.intp]:\n    if self._index_as_unique:\n        return self.get_indexer(target)\n    indexer, _ = self.get_indexer_non_unique(target)\n    return indexer",
    "docstring": "Guaranteed return of an indexer even when non-unique. This dispatches to get_indexer or get_indexer_non_unique as appropriate. Parameters ---------- target : Index An iterable containing the values to be used for computing indexer. Returns ------- np.ndarray[np.intp] List of indices. See Also -------- Index.get_indexer : Computes indexer and mask for new index given the current index. Index.get_non_unique : Returns indexer and masks for new index given the current index. Examples -------- >>> idx = pd.Index([np.nan, \"var1\", np.nan]) >>> idx.get_indexer_for([np.nan]) array([0, 2])",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:get_indexer_for arg:self arg:target arguments arg arg If Return return:yes Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "check_consistent_history",
    "source_code": "def check_consistent_history(self, connection):\n    recorder = MigrationRecorder(connection)\n    applied = recorder.applied_migrations()\n    for migration in applied:\n        if migration not in self.graph.nodes:\n            continue\n        for parent in self.graph.node_map[migration].parents:\n            if parent not in applied:\n                if parent in self.replacements:\n                    if all((m in applied for m in self.replacements[parent].replaces)):\n                        continue\n                raise InconsistentMigrationHistory(\"Migration {}.{} is applied before its dependency {}.{} on database '{}'.\".format(migration[0], migration[1], parent[0], parent[1], connection.alias))",
    "docstring": "Raise InconsistentMigrationHistory if any applied migrations have unapplied dependencies.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\loader.py",
    "ast_data": "FunctionDef name:check_consistent_history arg:self arg:connection arguments arg arg Assign Call Assign Call For If Compare For If Compare If Compare If Call Compare Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "denied_modules",
    "source_code": "def denied_modules(self) -> list[str]:\n    return self._nodes_with_action_type(_ModuleProviderAction.DENY)",
    "docstring": "Return all modules that are currently denied. Returns: A list containing the names of modules which will be denied in this package.",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\package_exporter.py",
    "ast_data": "FunctionDef name:denied_modules arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_wrap_decorator",
    "source_code": "def _wrap_decorator(wrapped_function, decorator_name):\n\n    def wrapper(wrapper_func):\n        return tf_decorator.make_decorator(wrapped_function, wrapper_func, decorator_name)\n    return wrapper",
    "docstring": "Indicate that one function wraps another. This decorator wraps a function using so that doc generation scripts can pick up original function signature. It would be better to use @functools.wrap decorator, but it would not update function signature to match wrapped function in Python 2. Args: wrapped_function: The function that decorated function wraps. decorator_name: The name of the decorator. Returns: Function that accepts wrapper function as an argument and returns instance.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\deprecation.py",
    "ast_data": "FunctionDef name:_wrap_decorator arg:wrapped_function arg:decorator_name arguments arg arg FunctionDef name:wrapper arg:wrapper_func arguments arg Return return:yes Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "cspline2d",
    "source_code": "def cspline2d(signal, lamb=0.0, precision=-1.0):\n    xp = array_namespace(signal)\n    signal = np.asarray(signal)\n    if precision < 0.0 or precision >= 1.0:\n        if signal.dtype in [np.float32, np.complex64]:\n            precision = 0.001\n        else:\n            precision = 1e-06\n    if lamb <= 1 / 144.0:\n        r = -2 + math.sqrt(3.0)\n        out = symiirorder_nd(symiirorder1, signal, -r * 6.0, r, precision=precision, axis=-1)\n        out = symiirorder_nd(symiirorder1, out, -r * 6.0, r, precision=precision, axis=0)\n        return out\n    r, omega = compute_root_from_lambda(lamb)\n    out = symiirorder_nd(symiirorder2, signal, r, omega, precision=precision, axis=-1)\n    out = symiirorder_nd(symiirorder2, out, r, omega, precision=precision, axis=0)\n    return xp.asarray(out)",
    "docstring": "Coefficients for 2-D cubic (3rd order) B-spline. Return the third-order B-spline coefficients over a regularly spaced input grid for the two-dimensional input image. Parameters ---------- input : ndarray The input signal. lamb : float Specifies the amount of smoothing in the transfer function. precision : float Specifies the precision for computing the infinite sum needed to apply mirror-symmetric boundary conditions. Returns ------- output : ndarray The filtered signal.",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_spline_filters.py",
    "ast_data": "FunctionDef name:cspline2d arg:signal arg:lamb arg:precision arguments arg arg arg Assign Call Assign Call If BoolOp Compare Compare If Compare Assign Assign If Compare Assign Call Assign Call Assign Call Return return:yes Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_generate_index_names_row",
    "source_code": "def _generate_index_names_row(self, iter: Sequence, max_cols: int, col_lengths: dict):\n    clabels = iter\n    index_names = [_element('th', f'{self.css['index_name']} {self.css['level']}{c}', self.css['blank_value'] if name is None else name, not self.hide_index_[c], display_value=None if name is None else self._display_funcs_index_names[c](name)) for c, name in enumerate(self.data.index.names)]\n    column_blanks: list = []\n    visible_col_count: int = 0\n    if clabels:\n        last_level = self.columns.nlevels - 1\n        for c, value in enumerate(clabels[last_level]):\n            header_element_visible = _is_visible(c, last_level, col_lengths)\n            if header_element_visible:\n                visible_col_count += 1\n            if self._check_trim(visible_col_count, max_cols, column_blanks, 'th', f'{self.css['blank']} {self.css['col']}{c} {self.css['col_trim']}', self.css['blank_value']):\n                break\n            column_blanks.append(_element('th', f'{self.css['blank']} {self.css['col']}{c}', self.css['blank_value'], c not in self.hidden_columns))\n    return index_names + column_blanks",
    "docstring": "Generate the row containing index names +----------------------------+---------------+---------------------------+ | index_names (level_0 to level_n) ... | column_blanks ... | +----------------------------+---------------+---------------------------+ Parameters ---------- iter : tuple Looping variables from outer scope max_cols : int Permissible number of columns Returns ------- list of elements",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\style_render.py",
    "ast_data": "FunctionDef name:_generate_index_names_row arg:self arg:iter arg:max_cols arg:col_lengths arguments arg arg arg arg Assign Assign Call Compare Compare Call Call If Assign For Call Assign Call If If Call Call Call Compare Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "get_n_leaf_nodes",
    "source_code": "def get_n_leaf_nodes(self):\n    return int(self.nodes['is_leaf'].sum())",
    "docstring": "Return number of leaves.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\predictor.py",
    "ast_data": "FunctionDef name:get_n_leaf_nodes arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "from_float",
    "source_code": "@classmethod\ndef from_float(cls, mod, use_precomputed_fake_quant=False):\n    return _ConvNd.from_float(cls, mod, use_precomputed_fake_quant=use_precomputed_fake_quant)",
    "docstring": "Creates a quantized module from a float module or qparams_dict. Args: mod (Module): a float module, either produced by torch.ao.quantization utilities or provided by the user",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\modules\\conv.py",
    "ast_data": "FunctionDef name:from_float arg:cls arg:mod arg:use_precomputed_fake_quant arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_ticks_position",
    "source_code": "def set_ticks_position(self, position):\n    if position == 'top':\n        self.set_tick_params(which='both', top=True, labeltop=True, bottom=False, labelbottom=False)\n        self._tick_position = 'top'\n        self.offsetText.set_verticalalignment('bottom')\n    elif position == 'bottom':\n        self.set_tick_params(which='both', top=False, labeltop=False, bottom=True, labelbottom=True)\n        self._tick_position = 'bottom'\n        self.offsetText.set_verticalalignment('top')\n    elif position == 'both':\n        self.set_tick_params(which='both', top=True, bottom=True)\n    elif position == 'none':\n        self.set_tick_params(which='both', top=False, bottom=False)\n    elif position == 'default':\n        self.set_tick_params(which='both', top=True, labeltop=False, bottom=True, labelbottom=True)\n        self._tick_position = 'bottom'\n        self.offsetText.set_verticalalignment('top')\n    else:\n        _api.check_in_list(['top', 'bottom', 'both', 'default', 'none'], position=position)\n    self.stale = True",
    "docstring": "Set the ticks position. Parameters ---------- position : {'top', 'bottom', 'both', 'default', 'none'} 'both' sets the ticks to appear on both positions, but does not change the tick labels. 'default' resets the tick positions to the default: ticks on both positions, labels at bottom. 'none' can be used if you don't want any ticks. 'none' and 'both' affect only the ticks, not the labels.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:set_ticks_position arg:self arg:position arguments arg arg If Compare Call Assign Call If Compare Call Assign Call If Compare Call If Compare Call If Compare Call Assign Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "profile_plot",
    "source_code": "def profile_plot(profile, device=None):\n    snapshot = _profile_to_snapshot(profile)\n    return _format_viz(snapshot, 'Active Memory Timeline', device)",
    "docstring": "Generate a visualization over time of the memory usage recorded by kineto memory profiling as an html file. Args: profile: profile as generated by device (torch.device, optional): Generate the trace for this device, needed if multiple devices have allocations. Returns: str: HTML of visualization",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\_memory_viz.py",
    "ast_data": "FunctionDef name:profile_plot arg:profile arg:device arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_transform_input_tensor",
    "source_code": "def _transform_input_tensor(self, input_tensor, state_manager=None):\n    if self.dtype.is_integer != input_tensor.dtype.is_integer:\n        raise ValueError('Column dtype and SparseTensors dtype must be compatible. key: {}, column dtype: {}, tensor dtype: {}'.format(self.key, self.dtype, input_tensor.dtype))\n    fc_utils.assert_string_or_int(input_tensor.dtype, prefix='column_name: {} input_tensor'.format(self.key))\n    key_dtype = self.dtype\n    if input_tensor.dtype.is_integer:\n        key_dtype = dtypes.int64\n        input_tensor = math_ops.cast(input_tensor, dtypes.int64)\n    name = '{}_lookup'.format(self.key)\n    if state_manager is None or not state_manager.has_resource(self, name):\n        with ops.init_scope():\n            table = lookup_ops.index_table_from_tensor(vocabulary_list=tuple(self.vocabulary_list), default_value=self.default_value, num_oov_buckets=self.num_oov_buckets, dtype=key_dtype, name=name)\n        if state_manager is not None:\n            state_manager.add_resource(self, name, table)\n    else:\n        table = state_manager.get_resource(self, name)\n    return table.lookup(input_tensor)",
    "docstring": "Creates a lookup table for the vocabulary list.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:_transform_input_tensor arg:self arg:input_tensor arg:state_manager arguments arg arg arg If Compare Raise Call Call Call Call Assign If Assign Assign Call Assign Call If BoolOp Compare Call With Call Assign Call Call If Compare Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "size",
    "source_code": "def size(self) -> int:\n    return self._col.size",
    "docstring": "Size of the column, in elements.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\interchange\\column.py",
    "ast_data": "FunctionDef name:size arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "all_reduce_v3",
    "source_code": "def all_reduce_v3(communicator, t, reduction='Add', group_assignment=None, timeout_seconds=None):\n    if group_assignment is None:\n        group_assignment = []\n    return gen_collective_ops.collective_reduce_v3(communicator=communicator, input=t, group_assignment=group_assignment, reduction=reduction, timeout_seconds=timeout_seconds)",
    "docstring": "Reduces tensors mutually. Args: communicator: the resource returned from . t: the to be reduced. reduction: a string. The name of the operation to reduce the values. Accpeted values are , , , . group_assignment: Optional int32 with shape [num_groups, num_ranks_per_group]. represents the ranks in the subgroup. timeout_seconds: If set to a non zero, set a completion timeout to detect staleness. If the timer goes off, a DeadlineExceededError is raised. The timeout value in seconds. This feature is experimental. Returns: The reduced .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\collective_ops.py",
    "ast_data": "FunctionDef name:all_reduce_v3 arg:communicator arg:t arg:reduction arg:group_assignment arg:timeout_seconds arguments arg arg arg arg arg If Compare Assign Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_formatter_func",
    "source_code": "@property\ndef _formatter_func(self):\n    return default_pprint",
    "docstring": "Return the formatter function.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_formatter_func arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_validate_tensor_info",
    "source_code": "def _validate_tensor_info(self, tensor_info):\n    if tensor_info is None:\n        raise AssertionError('All TensorInfo protos used in the SignatureDefs must have the name and dtype fields set.')\n    if tensor_info.WhichOneof('encoding') is None:\n        raise AssertionError(f\"Invalid `tensor_info`: {tensor_info}. All TensorInfo protos used in the SignatureDefs must have one of the 'encoding' fields (e.g., name or coo_sparse) set.\")\n    if tensor_info.WhichOneof('encoding') == 'composite_tensor':\n        for component in tensor_info.composite_tensor.components:\n            self._validate_tensor_info(component)\n    elif tensor_info.dtype == types_pb2.DT_INVALID:\n        raise AssertionError(f'Invalid `tensor_info`: {tensor_info}. All TensorInfo protos used in the SignatureDefs must have the dtype field set.')",
    "docstring": "Validates the proto. Checks if the ( or or ) and fields exist and are non-empty. Args: tensor_info: protocol buffer to validate. Raises: AssertionError: If the or fields of the supplied proto are not populated.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\builder_impl.py",
    "ast_data": "FunctionDef name:_validate_tensor_info arg:self arg:tensor_info arguments arg arg If Compare Raise Call If Compare Call Raise Call If Compare Call For Call If Compare Raise Call"
  },
  {
    "library": "scipy",
    "name": "_expm_multiply_simple_core",
    "source_code": "def _expm_multiply_simple_core(A, B, t, mu, m_star, s, tol=None, balance=False):\n    if balance:\n        raise NotImplementedError\n    if tol is None:\n        u_d = 2 ** (-53)\n        tol = u_d\n    F = B\n    eta = np.exp(t * mu / float(s))\n    for i in range(s):\n        c1 = _exact_inf_norm(B)\n        for j in range(m_star):\n            coeff = t / float(s * (j + 1))\n            B = coeff * A.dot(B)\n            c2 = _exact_inf_norm(B)\n            F = F + B\n            if c1 + c2 <= tol * _exact_inf_norm(F):\n                break\n            c1 = c2\n        F = eta * F\n        B = F\n    return F",
    "docstring": "A helper function.",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_expm_multiply.py",
    "ast_data": "FunctionDef name:_expm_multiply_simple_core arg:A arg:B arg:t arg:mu arg:m_star arg:s arg:tol arg:balance arguments arg arg arg arg arg arg arg arg If Raise If Compare Assign Assign Assign Assign Call Call For Call Assign Call For Call Assign Call Assign Call Assign Call Assign If Compare Call Assign Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, scores=None, classes=None):\n    if scores is not None and (not (isinstance(scores, tensor.Tensor) and scores.dtype.is_floating)):\n        raise ValueError('Classification scores must be a float32 Tensor; got {}'.format(scores))\n    if classes is not None and (not (isinstance(classes, tensor.Tensor) and dtypes.as_dtype(classes.dtype) == dtypes.string)):\n        raise ValueError('Classification classes must be a string Tensor; got {}'.format(classes))\n    if scores is None and classes is None:\n        raise ValueError('At least one of scores and classes must be set.')\n    self._scores = scores\n    self._classes = classes",
    "docstring": "Constructor for . Args: scores: A float giving scores (sometimes but not always interpretable as probabilities) for each class. May be , but only if is set. Interpretation varies-- see class doc. classes: A string giving predicted class labels. May be , but only if is set. Interpretation varies-- see class doc. Raises: ValueError: if neither classes nor scores is set, or one of them is not a with the correct dtype.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\utils_v1\\export_output.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:scores arg:classes arguments arg arg arg If BoolOp Compare BoolOp Call Raise Call Call If BoolOp Compare BoolOp Call Compare Call Raise Call Call If BoolOp Compare Compare Raise Call Assign Assign"
  },
  {
    "library": "numpy",
    "name": "_shrink_mask",
    "source_code": "def _shrink_mask(m):\n    if m.dtype.names is None and (not m.any()):\n        return nomask\n    else:\n        return m",
    "docstring": "Shrink a mask to nomask if possible",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:_shrink_mask arg:m arguments arg If BoolOp Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "pandas",
    "name": "arrays_to_mgr",
    "source_code": "def arrays_to_mgr(arrays, columns: Index, index, *, dtype: DtypeObj | None=None, verify_integrity: bool=True, consolidate: bool=True) -> Manager:\n    if verify_integrity:\n        if index is None:\n            index = _extract_index(arrays)\n        else:\n            index = ensure_index(index)\n        arrays, refs = _homogenize(arrays, index, dtype)\n    else:\n        index = ensure_index(index)\n        arrays = [extract_array(x, extract_numpy=True) for x in arrays]\n        refs = [None] * len(arrays)\n        for arr in arrays:\n            if not isinstance(arr, (np.ndarray, ExtensionArray)) or arr.ndim != 1 or len(arr) != len(index):\n                raise ValueError('Arrays must be 1-dimensional np.ndarray or ExtensionArray with length matching len(index)')\n    columns = ensure_index(columns)\n    if len(columns) != len(arrays):\n        raise ValueError('len(arrays) must match len(columns)')\n    axes = [columns, index]\n    return create_block_manager_from_column_arrays(arrays, axes, consolidate=consolidate, refs=refs)",
    "docstring": "Segregate Series based on type and coerce into matrices. Needs to handle a lot of exceptional cases.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\internals\\construction.py",
    "ast_data": "FunctionDef name:arrays_to_mgr arg:arrays arg:columns arg:index arguments arg arg arg arg arg arg If If Compare Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call For If BoolOp Call Compare Compare Call Call Raise Call Assign Call If Compare Call Call Raise Call Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "force_fallback",
    "source_code": "@contextlib.contextmanager\ndef force_fallback(op: torch._ops.OpOverload):\n    assert isinstance(op, torch._ops.OpOverload), 'Only OpOverload to make the clean up easier'\n    old_handler = lowerings.get(op)\n    try:\n        register_lowering(op)(fallback_handler(op))\n        yield\n    finally:\n        if old_handler:\n            lowerings[op] = old_handler\n        else:\n            lowerings.pop(op)",
    "docstring": "A context manager to force fallback an op. Used in unit test for FallbackKernel.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\lowering.py",
    "ast_data": "FunctionDef name:force_fallback arg:op arguments arg Call Assign Call Try Call Call Call If Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "predict_proba",
    "source_code": "def predict_proba(self, x, batch_size=32, verbose=0):\n    warnings.warn('`model.predict_proba()` is deprecated and will be removed after 2021-01-01. Please use `model.predict()` instead.')\n    preds = self.predict(x, batch_size, verbose)\n    if preds.min() < 0.0 or preds.max() > 1.0:\n        logging.warning('Network returning invalid probability values. The last layer might not normalize predictions into probabilities (like softmax or sigmoid would).')\n    return preds",
    "docstring": "Generates class probability predictions for the input samples. The input samples are processed batch by batch. Args: x: input data, as a Numpy array or list of Numpy arrays (if the model has multiple inputs). batch_size: integer. verbose: verbosity mode, 0 or 1. Returns: A Numpy array of probability predictions.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\sequential.py",
    "ast_data": "FunctionDef name:predict_proba arg:self arg:x arg:batch_size arg:verbose arguments arg arg arg arg Call Assign Call If BoolOp Compare Call Compare Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "wait_for_new_checkpoint",
    "source_code": "def wait_for_new_checkpoint(checkpoint_dir, last_checkpoint=None, seconds_to_sleep=1, timeout=None):\n    logging.info('Waiting for new checkpoint at %s', checkpoint_dir)\n    stop_time = time.time() + timeout if timeout is not None else None\n    while True:\n        checkpoint_path = checkpoint_management.latest_checkpoint(checkpoint_dir)\n        if checkpoint_path is None or checkpoint_path == last_checkpoint:\n            if stop_time is not None and time.time() + seconds_to_sleep > stop_time:\n                return None\n            time.sleep(seconds_to_sleep)\n        else:\n            logging.info('Found new checkpoint at %s', checkpoint_path)\n            return checkpoint_path",
    "docstring": "Waits until a new checkpoint file is found. Args: checkpoint_dir: The directory in which checkpoints are saved. last_checkpoint: The last checkpoint path used or if we're expecting a checkpoint for the first time. seconds_to_sleep: The number of seconds to sleep for before looking for a new checkpoint. timeout: The maximum number of seconds to wait. If left as , then the process will wait indefinitely. Returns: a new checkpoint path, or None if the timeout was reached.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\checkpoint_utils.py",
    "ast_data": "FunctionDef name:wait_for_new_checkpoint arg:checkpoint_dir arg:last_checkpoint arg:seconds_to_sleep arg:timeout arguments arg arg arg arg Call Assign Compare Call While Assign Call If BoolOp Compare Compare If BoolOp Compare Compare Call Return return:no Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "randn",
    "source_code": "def randn(sharding_spec: ShardingSpec, *size, dtype=None, layout=torch.strided, requires_grad=False, pin_memory=False, memory_format=torch.contiguous_format, process_group=None, init_rrefs=False) -> ShardedTensor:\n    sharded_tensor = ShardedTensor(sharding_spec, *size, dtype=dtype, layout=layout, requires_grad=requires_grad, pin_memory=pin_memory, memory_format=memory_format, process_group=process_group, init_rrefs=init_rrefs)\n    torch.nn.init.normal_(sharded_tensor, 0, 1)\n    return sharded_tensor",
    "docstring": "Creates a :class: filled with random numbers from a uniform distribution with mean and variance (also called standard normal distribution). The shape of the tensor is defined by the variable argument . Needs to be called on all ranks in an SPMD fashion. Args: sharding_spec (:class:): The specification describing how to shard the Tensor. size (int...): a list, tuple, or of integers defining the shape of the output tensor. Keyword args: dtype (:class:, optional): the desired data type of returned tensor. Default: if `torch.set_default_dtypetorch.layouttorch.distributed.rpc.RRefShardedTensor` object on each rank",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\__init__.py",
    "ast_data": "FunctionDef name:randn arg:sharding_spec arguments arg arg arg arg arg arg arg arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_validate_signature",
    "source_code": "def _validate_signature(signature):\n    if signature is None:\n        return\n    if not isinstance(signature, (tuple, list)):\n        raise TypeError(f'input_signature must be either a tuple or a list, got {type(signature)}.')\n    variable_specs = _get_variable_specs(signature)\n    if variable_specs:\n        raise TypeError(f\"input_signature doesn't support VariableSpec, got {variable_specs}\")\n    if any((not isinstance(arg, tensor.TensorSpec) for arg in nest.flatten(signature, expand_composites=True))):\n        bad_args = [arg for arg in nest.flatten(signature, expand_composites=True) if not isinstance(arg, tensor.TensorSpec)]\n        raise TypeError(f'input_signature must be a possibly nested sequence of TensorSpec objects, got invalid args {bad_args} with types {list(six.moves.map(type, bad_args))}.')",
    "docstring": "Checks the input_signature to be valid.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\function_type_utils.py",
    "ast_data": "FunctionDef name:_validate_signature arg:signature arguments arg If Compare Return return:no If Call Raise Call Call Assign Call If Raise Call If Call Call Call Assign Call Call Raise Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_copy_row_shape",
    "source_code": "def _copy_row_shape(rt_inputs, splits):\n    for rt in rt_inputs:\n        if rt.shape[0] is not None:\n            splits.set_shape(tensor_shape.TensorShape(rt.shape[0] + 1))",
    "docstring": "Sets splits.shape to [rt[shape[0]+1] for each rt in rt_inputs.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_concat_ops.py",
    "ast_data": "FunctionDef name:_copy_row_shape arg:rt_inputs arg:splits arguments arg arg For If Compare Call Call"
  },
  {
    "library": "authlib",
    "name": "serialize_compact",
    "source_code": "def serialize_compact(self, protected, payload, key):\n    jws_header = JWSHeader(protected, None)\n    self._validate_private_headers(protected)\n    algorithm, key = self._prepare_algorithm_key(protected, payload, key)\n    protected_segment = json_b64encode(jws_header.protected)\n    payload_segment = urlsafe_b64encode(to_bytes(payload))\n    signing_input = b'.'.join([protected_segment, payload_segment])\n    signature = urlsafe_b64encode(algorithm.sign(signing_input, key))\n    return b'.'.join([protected_segment, payload_segment, signature])",
    "docstring": "Generate a JWS Compact Serialization. The JWS Compact Serialization represents digitally signed or MACed content as a compact, URL-safe string, per _. .. code-block:: text BASE64URL(UTF8(JWS Protected Header)) || '.' || BASE64URL(JWS Payload) || '.' || BASE64URL(JWS Signature) :param protected: A dict of protected header :param payload: A bytes/string of payload :param key: Private key used to generate signature :return: byte",
    "type": "method",
    "file_path": "authlib\\authlib\\jose\\rfc7515\\jws.py",
    "ast_data": "FunctionDef name:serialize_compact arg:self arg:protected arg:payload arg:key arguments arg arg arg arg Assign Call Call Assign Call Assign Call Assign Call Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "cla",
    "source_code": "def cla() -> None:\n    return gca().cla()",
    "docstring": "Clear the current Axes.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:cla arguments Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "setup",
    "source_code": "def setup(app: Sphinx) -> ExtensionMetadata:\n    if not isinstance(app, Sphinx):\n        return {'version': sphinx.__display_version__, 'parallel_read_safe': True}\n    _patch_python_domain()\n    app.setup_extension('sphinx.ext.autodoc')\n    app.connect('autodoc-process-docstring', _process_docstring)\n    app.connect('autodoc-skip-member', _skip_member)\n    for name, default, rebuild, types in Config._config_values:\n        app.add_config_value(name, default, rebuild, types=types)\n    return {'version': sphinx.__display_version__, 'parallel_read_safe': True}",
    "docstring": "Sphinx extension setup function. When the extension is loaded, Sphinx imports this module and executes the `The Sphinx documentation on Extensions The Extension Tutorial The Extension API `_",
    "type": "function",
    "file_path": "sphinx\\sphinx\\ext\\napoleon\\__init__.py",
    "ast_data": "FunctionDef name:setup arg:app arguments arg If Call Return return:yes Call Call Call Call For Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_messages",
    "source_code": "def get_messages(request):\n    return getattr(request, '_messages', [])",
    "docstring": "Return the message storage on the request if it exists, otherwise return an empty list.",
    "type": "function",
    "file_path": "django\\django\\contrib\\messages\\api.py",
    "ast_data": "FunctionDef name:get_messages arg:request arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "shape",
    "source_code": "@property\ndef shape(self) -> Shape:\n    return self._values.shape",
    "docstring": "Return a tuple of the shape of the underlying data. See Also -------- Series.ndim : Number of dimensions of the underlying data. Series.size : Return the number of elements in the underlying data. Series.nbytes : Return the number of bytes in the underlying data. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.shape (3,)",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\base.py",
    "ast_data": "FunctionDef name:shape arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, task_type=None, task_id=None, rpc_layer=None, environment=None):\n    self._task_type = task_type\n    self._task_id = task_id\n    self._rpc_layer = rpc_layer\n    self._environment = environment",
    "docstring": "Creates a new TFConfigClusterResolver. Args: task_type: (String, optional) Overrides the task type specified in the TF_CONFIG environment variable. task_id: (Integer, optional) Overrides the task index specified in the TF_CONFIG environment variable. rpc_layer: (String, optional) Overrides the rpc layer TensorFlow uses. environment: (String, optional) Overrides the environment TensorFlow operates in.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\tfconfig_cluster_resolver.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:task_type arg:task_id arg:rpc_layer arg:environment arguments arg arg arg arg arg Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_create_attribute",
    "source_code": "@trackable.no_automatic_dependency_tracking\ndef _maybe_create_attribute(self, name, default_value):\n    if not hasattr(self, name):\n        self.__setattr__(name, default_value)",
    "docstring": "Create the attribute with the default value if it hasn't been created. This is useful for fields that is used for tracking purpose, _trainable_weights, or _layers. Note that user could create a layer subclass and assign an internal field before invoking the Layer.__init__(), the __setattr__() need to create the tracking fields and __init__() need to not override them. Args: name: String, the name of the attribute. default_value: Object, the default value of the attribute.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py",
    "ast_data": "FunctionDef name:_maybe_create_attribute arg:self arg:name arg:default_value arguments arg arg arg If Call Call"
  },
  {
    "library": "pytorch",
    "name": "init_from_stmts",
    "source_code": "@classmethod\ndef init_from_stmts(cls, py_stmt: Optional[str]=None, cpp_stmt: Optional[str]=None, setup: GroupedSetup=GroupedSetup(), signature: Optional[str]=None, torchscript: bool=False, autograd: bool=False, num_threads: Union[int, tuple[int, ...]]=1) -> 'GroupedBenchmark':\n    if py_stmt is not None:\n        py_stmt = textwrap.dedent(py_stmt)\n    if cpp_stmt is not None:\n        cpp_stmt = textwrap.dedent(cpp_stmt)\n    signature_args, signature_output = cls._parse_signature(signature)\n    py_model_setup = cls._model_from_py_stmt(py_stmt=py_stmt, signature_args=signature_args, signature_output=signature_output) if torchscript else None\n    return cls(py_fwd_stmt=py_stmt, cpp_fwd_stmt=cpp_stmt, py_model_setup=py_model_setup, cpp_model_setup=None, inferred_model_setup=True, setup=setup, signature_args=signature_args, signature_output=signature_output, torchscript=torchscript, autograd=autograd, num_threads=(num_threads,) if isinstance(num_threads, int) else num_threads)",
    "docstring": "Create a set of benchmarks from free-form statements. This method of benchmark definition is analogous to Timer use, where we simply execute the provided stmts.",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\instruction_counts\\core\\api.py",
    "ast_data": "FunctionDef name:init_from_stmts arg:cls arg:py_stmt arg:cpp_stmt arg:setup arg:signature arg:torchscript arg:autograd arg:num_threads arguments arg arg arg arg arg arg arg arg Call If Compare Assign Call If Compare Assign Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "mean",
    "source_code": "def mean(self, *args, **kwds):\n    kwds['moments'] = 'm'\n    res = self.stats(*args, **kwds)\n    if isinstance(res, ndarray) and res.ndim == 0:\n        return res[()]\n    return res",
    "docstring": "Mean of the distribution. Parameters ---------- arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- mean : float the mean of the distribution",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:mean arg:self arguments arg arg arg Assign Assign Call If BoolOp Call Compare Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "get_current_timezone_tag",
    "source_code": "@register.tag('get_current_timezone')\ndef get_current_timezone_tag(parser, token):\n    args = token.contents.split()\n    if len(args) != 3 or args[1] != 'as':\n        raise TemplateSyntaxError(\"'get_current_timezone' requires 'as variable' (got %r)\" % args)\n    return GetCurrentTimezoneNode(args[2])",
    "docstring": "Store the name of the current time zone in the context. Usage:: {% get_current_timezone as TIME_ZONE %} This will fetch the currently active time zone and put its name into the `` context variable.",
    "type": "function",
    "file_path": "django\\django\\templatetags\\tz.py",
    "ast_data": "FunctionDef name:get_current_timezone_tag arg:parser arg:token arguments arg arg Assign Call If BoolOp Compare Call Compare Raise Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_annotate_with_config",
    "source_code": "def _annotate_with_config(self, model: torch.fx.GraphModule, quantization_config: Optional[QuantizationConfig], filter_fn: FilterFn) -> None:\n    self._annotate_conv2d_fusion_pattern(model, quantization_config, filter_fn)\n    self._annotate_linear_fusion_pattern(model, quantization_config, filter_fn)\n    self._annotate_matmul(model, quantization_config, filter_fn)\n    self._annotate_propagation_quantizable_pattern_entry(model, quantization_config, filter_fn)",
    "docstring": "Annotate the model with the given quantization configuration. High-level description of quantization recipe for X86 Inductor Backend: Step 1: Apply quantization recipe for fusion patterns of conv/linear to enable int8 data type actively. Step 2: Propagate quantization annotation for patterns besides conv/linear. Go through the pattern in model from start to the end. If a pattern supports computation with int8 data type and inputs connected to quantized patterns, annotate its inputs as quantized pattern.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\x86_inductor_quantizer.py",
    "ast_data": "FunctionDef name:_annotate_with_config arg:self arg:model arg:quantization_config arg:filter_fn arguments arg arg arg arg Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "named_children",
    "source_code": "def named_children(self) -> Iterator[tuple[str, 'Module']]:\n    memo = set()\n    for name, module in self._modules.items():\n        if module is not None and module not in memo:\n            memo.add(module)\n            yield (name, module)",
    "docstring": "Return an iterator over immediate children modules, yielding both the name of the module as well as the module itself. Yields: (str, Module): Tuple containing a name and child module Example:: >>> # xdoctest: +SKIP(\"undefined vars\") >>> for name, module in model.named_children(): >>> if name in ['conv4', 'conv5']: >>> print(module)",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:named_children arg:self arguments arg Assign Call For Call If BoolOp Compare Compare Call"
  },
  {
    "library": "django",
    "name": "l",
    "source_code": "def l(self):\n    return WEEKDAYS[self.data.weekday()]",
    "docstring": "Day of the week, textual, long; e.g. 'Friday'",
    "type": "method",
    "file_path": "django\\django\\utils\\dateformat.py",
    "ast_data": "FunctionDef name:l arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "swish",
    "source_code": "@tf_export('nn.silu', 'nn.swish')\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef swish(features, beta=1.0):\n    features = ops.convert_to_tensor(features, name='features')\n    beta = ops.convert_to_tensor(beta, name='beta')\n    beta = math_ops.cast(beta, features.dtype)\n\n    @custom_gradient.custom_gradient\n    def swish_impl(features, beta):\n\n        def grad(dy):\n            with ops.control_dependencies([dy]):\n                sigmoid_features = math_ops.sigmoid(beta * features)\n            activation_grad = sigmoid_features * (1.0 + beta * features * (1.0 - sigmoid_features))\n            beta_grad = math_ops.reduce_sum(dy * math_ops.square(features) * sigmoid_features * (1.0 - sigmoid_features))\n            return (dy * activation_grad, beta_grad)\n        return (features * math_ops.sigmoid(beta * features), grad)\n    return swish_impl(features, beta)",
    "docstring": "Computes the SiLU or Swish activation function: . beta : Hyperparameter for Swish activation function. Default value 1.0. The SiLU activation function was introduced in \"Gaussian Error Linear Units (GELUs)\" [Hendrycks et al. 2016]( and \"Sigmoid-Weighted Linear Units for Neural Network Function Approximation in Reinforcement Learning\" [Elfwing et al. 2017]( and was independently discovered (and called swish) in \"Searching for Activation Functions\" [Ramachandran et al. 2017]( Args: features: A representing preactivation values. beta: A 'Tensor' representing value of beta hyperparameter. Returns: The activation value.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_impl.py",
    "ast_data": "FunctionDef name:swish arg:features arg:beta arguments arg arg Assign Call Assign Call Assign Call FunctionDef name:swish_impl arg:features arg:beta arguments arg arg FunctionDef name:grad arg:dy arguments arg With Call Assign Call Assign Assign Call Call Return return:yes Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "string_format",
    "source_code": "@dispatch.dispatch_for_api(string_ops.string_format)\ndef string_format(template: str, inputs: typing.Union[ragged_tensor.Ragged, typing.List[ragged_tensor.RaggedOrDense]], placeholder='{}', summarize=3, name=None):\n    if tensor_util.is_tf_type(inputs) or ragged_tensor.is_ragged(inputs):\n        inputs = [inputs]\n    split_template = template.split(placeholder)\n    if len(inputs) != len(split_template) - 1:\n        raise ValueError('num placeholders in template and num inputs must match: {} vs {}'.format(len(split_template) - 1, len(inputs)))\n    with ops.name_scope(name, 'StringFormat', [inputs]):\n        output_pieces = [constant_op.constant(split_template[0])]\n        for i, input in enumerate(inputs):\n            if ragged_tensor.is_ragged(input):\n                output_pieces.append(ragged_tensor_to_string(input, summarize))\n            else:\n                output_pieces.append(string_ops.string_format('{}', [input], summarize=summarize))\n            output_pieces.append(constant_op.constant(split_template[i + 1]))\n        if len(output_pieces) == 1:\n            return output_pieces[0]\n        else:\n            return string_ops.reduce_join(output_pieces)",
    "docstring": "Version of tf.strings.format that handles RaggedTensors.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_string_ops.py",
    "ast_data": "FunctionDef name:string_format arg:template arg:inputs arg:placeholder arg:summarize arg:name arguments arg arg arg arg arg If BoolOp Call Call Assign Assign Call If Compare Call Call Raise Call Call Call Call With Call Assign Call For Call If Call Call Call Call Call Call Call If Compare Call Return return:yes Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "lookup",
    "source_code": "def lookup(self, keys, dynamic_default_values=None, name=None):\n    with ops.name_scope(name, '%s_lookup_table_find' % self.name, (self.resource_handle, keys, self._default_value)):\n        keys = ops.convert_to_tensor(keys, dtype=self._key_dtype, name='keys')\n        with ops.colocate_with(self.resource_handle):\n            values = gen_lookup_ops.lookup_table_find_v2(self.resource_handle, keys, dynamic_default_values if dynamic_default_values is not None else self._default_value)\n    return values",
    "docstring": "Looks up in a table, outputs the corresponding values. The is used for keys not present in the table. Args: keys: Keys to look up. Can be a tensor of any shape. Must match the table's key_dtype. dynamic_default_values: The values to use if a key is missing in the table. If None (by default), the will be used. Shape of must be same with or the lookup result tensor. In the latter case, each key will have a different default value. For example: name: A name for the operation (optional). Returns: A tensor containing the values in the same shape as using the table's value type. Raises: TypeError: when do not match the table data types.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "FunctionDef name:lookup arg:self arg:keys arg:dynamic_default_values arg:name arguments arg arg arg arg With Call Assign Call With Call Assign Call Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "parallel_iterations",
    "source_code": "@property\ndef parallel_iterations(self):\n    return self._parallel_iterations",
    "docstring": "The number of iterations allowed to run in parallel.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:parallel_iterations arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "staged_predict",
    "source_code": "def staged_predict(self, X):\n    for raw_predictions in self._staged_raw_predict(X):\n        if raw_predictions.shape[1] == 1:\n            encoded_classes = (raw_predictions.ravel() > 0).astype(int)\n        else:\n            encoded_classes = np.argmax(raw_predictions, axis=1)\n        yield self.classes_.take(encoded_classes, axis=0)",
    "docstring": "Predict classes at each iteration. This method allows monitoring (i.e. determine error on testing set) after each stage. .. versionadded:: 0.24 Parameters ---------- X : array-like of shape (n_samples, n_features) The input samples. Yields ------ y : generator of ndarray of shape (n_samples,) The predicted classes of the input samples, for each iteration.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\gradient_boosting.py",
    "ast_data": "FunctionDef name:staged_predict arg:self arg:X arguments arg arg For Call If Compare Assign Call Compare Call Assign Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_enforce_prediction_order",
    "source_code": "def _enforce_prediction_order(classes, predictions, n_classes, method):\n    if n_classes != len(classes):\n        recommendation = 'To fix this, use a cross-validation technique resulting in properly stratified folds'\n        warnings.warn('Number of classes in training fold ({}) does not match total number of classes ({}). Results may not be appropriate for your use case. {}'.format(len(classes), n_classes, recommendation), RuntimeWarning)\n        if method == 'decision_function':\n            if predictions.ndim == 2 and predictions.shape[1] != len(classes):\n                raise ValueError('Output shape {} of {} does not match number of classes ({}) in fold. Irregular decision_function outputs are not currently supported by cross_val_predict'.format(predictions.shape, method, len(classes)))\n            if len(classes) <= 2:\n                raise ValueError('Only {} class/es in training fold, but {} in overall dataset. This is not supported for decision_function with imbalanced folds. {}'.format(len(classes), n_classes, recommendation))\n        float_min = np.finfo(predictions.dtype).min\n        default_values = {'decision_function': float_min, 'predict_log_proba': float_min, 'predict_proba': 0}\n        predictions_for_all_classes = np.full((_num_samples(predictions), n_classes), default_values[method], dtype=predictions.dtype)\n        predictions_for_all_classes[:, classes] = predictions\n        predictions = predictions_for_all_classes\n    return predictions",
    "docstring": "Ensure that prediction arrays have correct column order When doing cross-validation, if one or more classes are not present in the subset of data used for training, then the output prediction array might not have the same columns as other folds. Use the list of class names (assumed to be ints) to enforce the correct column order. Note that is the list of classes in this fold (a subset of the classes in the full training set) and is the number of classes in the full training set.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_validation.py",
    "ast_data": "FunctionDef name:_enforce_prediction_order arg:classes arg:predictions arg:n_classes arg:method arguments arg arg arg arg If Compare Call Assign Call Call Call If Compare If BoolOp Compare Compare Call Raise Call Call Call If Compare Call Raise Call Call Call Assign Call Assign Assign Call Call Assign Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_size",
    "source_code": "def get_size(self):\n    return self._size",
    "docstring": "Get the text size.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\textpath.py",
    "ast_data": "FunctionDef name:get_size arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_aspect",
    "source_code": "def get_aspect(self):\n    return self._aspect",
    "docstring": "Return aspect.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_divider.py",
    "ast_data": "FunctionDef name:get_aspect arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "stop",
    "source_code": "@property\ndef stop(self) -> int:\n    return self._range.stop",
    "docstring": "The value of the parameter. This property returns the value of the RangeIndex, which defines the upper (or lower, in case of negative steps) bound of the index range. The value is exclusive, meaning the RangeIndex includes values up to but not including this value. See Also -------- RangeIndex : Immutable index representing a range of integers. RangeIndex.start : The start value of the RangeIndex. RangeIndex.step : The step size between elements in the RangeIndex. Examples -------- >>> idx = pd.RangeIndex(5) >>> idx.stop 5 >>> idx = pd.RangeIndex(2, -10, -3) >>> idx.stop -10",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\range.py",
    "ast_data": "FunctionDef name:stop arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, sess, dump_root=None, ui_type='readline', thread_name_filter=None, config_file_path=False):\n    framework.BaseDebugWrapperSession.__init__(self, sess, thread_name_filter=thread_name_filter)\n    if not dump_root:\n        self._dump_root = tempfile.mkdtemp(prefix=_DUMP_ROOT_PREFIX)\n    else:\n        dump_root = os.path.expanduser(dump_root)\n        if os.path.isfile(dump_root):\n            raise ValueError('dump_root path points to a file: %s' % dump_root)\n        elif os.path.isdir(dump_root) and os.listdir(dump_root):\n            raise ValueError('dump_root path points to a non-empty directory: %s' % dump_root)\n        self._dump_root = dump_root\n    self._initialize_argparsers()\n    self._tensor_filters = {}\n    self.add_tensor_filter('has_inf_or_nan', debug_data.has_inf_or_nan)\n    self._active_tensor_filter = None\n    self._active_filter_exclude_node_names = None\n    self._active_tensor_filter_run_start_response = None\n    self._run_through_times = 1\n    self._skip_debug = False\n    self._run_start_response = None\n    self._is_run_start = True\n    self._ui_type = ui_type\n    self._config = None\n    if config_file_path:\n        self._config = cli_config.CLIConfig(config_file_path=config_file_path)",
    "docstring": "Constructor of LocalCLIDebugWrapperSession. Args: sess: The TensorFlow object being wrapped. dump_root: () optional path to the dump root directory. Must be a directory that does not exist or an empty directory. If the directory does not exist, it will be created by the debugger core during debug calls and removed afterwards. If , the debug dumps will be at tfdbg_ under the system temp directory. ui_type: () requested UI type. Currently supported: (readline) thread_name_filter: Regular-expression white list for thread name. See the doc of for details. config_file_path: Optional override to the default configuration file path, which is at . Raises: ValueError: If dump_root is an existing and non-empty directory or if dump_root is a file.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\local_cli_wrapper.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:sess arg:dump_root arg:ui_type arg:thread_name_filter arg:config_file_path arguments arg arg arg arg arg arg Call If Assign Call Assign Call If Call Raise Call If BoolOp Call Call Raise Call Assign Call Assign Call Assign Assign Assign Assign Assign Assign Assign Assign Assign If Assign Call"
  },
  {
    "library": "pandas",
    "name": "__call__",
    "source_code": "def __call__(self, env):\n    left = self.lhs(env)\n    right = self.rhs(env)\n    return self.func(left, right)",
    "docstring": "Recursively evaluate an expression in Python space. Parameters ---------- env : Scope Returns ------- object The result of an evaluated expression.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\computation\\ops.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:env arguments arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "DataLossError",
    "source_code": "@tf_export('errors.DataLossError')\nclass DataLossError(OpError):\n\n    def __init__(self, node_def, op, message, *args):\n        super(DataLossError, self).__init__(node_def, op, message, DATA_LOSS, *args)",
    "docstring": "Raised when unrecoverable data loss or corruption is encountered. This could be due to: * A truncated file. * A corrupted file. * Specifying the wrong data format. For example, this may be raised by running a operation, if the file is truncated while it is being read.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py",
    "ast_data": "ClassDef name:DataLossError FunctionDef name:__init__ arg:self arg:node_def arg:op arg:message arguments arg arg arg arg arg Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_find_matching_floating_dtype",
    "source_code": "def _find_matching_floating_dtype(*arrays, xp):\n    dtyped_arrays = [xp.asarray(a) for a in arrays if hasattr(a, 'dtype')]\n    floating_dtypes = [a.dtype for a in dtyped_arrays if xp.isdtype(a.dtype, 'real floating')]\n    if floating_dtypes:\n        return xp.result_type(*floating_dtypes)\n    return xp.asarray(0.0).dtype",
    "docstring": "Find a suitable floating point dtype when computing with arrays. If any of the arrays are floating point, return the dtype with the highest precision by following official type promotion rules: If there are no floating point input arrays (all integral inputs for instance), return the default floating point dtype for the namespace.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_array_api.py",
    "ast_data": "FunctionDef name:_find_matching_floating_dtype arguments arg arg Assign Call Call Assign Call If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "close",
    "source_code": "def close(self, reason: str) -> Deferred[None] | None:\n    pass",
    "docstring": "Called when the spider is closed by the engine. It receives the reason why the crawl finished as argument and it's useful to execute cleaning code. :param reason: a string which describes the reason why the spider was closed :type reason: :class:",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\scheduler.py",
    "ast_data": "FunctionDef name:close arg:self arg:reason arguments arg arg"
  },
  {
    "library": "scipy",
    "name": "_gen_harmonic_leq1",
    "source_code": "def _gen_harmonic_leq1(n, a):\n    if not np.size(n):\n        return n\n    n_max = np.nanmax(n)\n    out = np.zeros_like(a, dtype=float)\n    for i in np.arange(n_max, 0, -1, dtype=float):\n        mask = i <= n\n        out[mask] += 1 / i ** a[mask]\n    out[np.isnan(n)] = np.nan\n    return out",
    "docstring": "Generalized harmonic number, a <= 1",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_discrete_distns.py",
    "ast_data": "FunctionDef name:_gen_harmonic_leq1 arg:n arg:a arguments arg arg If Call Return return:yes Assign Call Assign Call For Call Assign Compare Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_JoinConfig",
    "source_code": "class _JoinConfig(NamedTuple):\n    enable: bool\n    throw_on_early_termination: bool\n    is_first_joinable: bool\n\n    @staticmethod\n    def construct_disabled_join_config():\n        return _JoinConfig(enable=False, throw_on_early_termination=False, is_first_joinable=False)",
    "docstring": "This includes all fields needed from a :class: instance for the join context manager side.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\join.py",
    "ast_data": "ClassDef name:_JoinConfig FunctionDef name:construct_disabled_join_config arguments Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "init_app",
    "source_code": "def init_app(self, app, query_client=None, save_token=None):\n    if query_client is not None:\n        self._query_client = query_client\n    if save_token is not None:\n        self._save_token = save_token\n    self.register_token_generator('default', self.create_bearer_token_generator(app.config))\n    self.scopes_supported = app.config.get('OAUTH2_SCOPES_SUPPORTED')\n    self._error_uris = app.config.get('OAUTH2_ERROR_URIS')",
    "docstring": "Initialize later with Flask app instance.",
    "type": "method",
    "file_path": "authlib\\authlib\\integrations\\flask_oauth2\\authorization_server.py",
    "ast_data": "FunctionDef name:init_app arg:self arg:app arg:query_client arg:save_token arguments arg arg arg arg If Compare Assign If Compare Assign Call Call Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "get",
    "source_code": "def get(query):\n    conversion_funcs = _tensor_conversion_func_cache.get(query)\n    if conversion_funcs is None:\n        with _tensor_conversion_func_lock:\n            conversion_funcs = _tensor_conversion_func_cache.get(query)\n            if conversion_funcs is None:\n                conversion_funcs = []\n                for _, funcs_at_priority in sorted(_tensor_conversion_func_registry.items()):\n                    conversion_funcs.extend(((base_type, conversion_func) for base_type, conversion_func in funcs_at_priority if issubclass(query, base_type)))\n                _tensor_conversion_func_cache[query] = conversion_funcs\n    return conversion_funcs",
    "docstring": "Get conversion function for objects of . Args: query: The type to query for. Returns: A list of conversion functions in increasing order of priority.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_conversion_registry.py",
    "ast_data": "FunctionDef name:get arg:query arguments arg Assign Call If Compare With Assign Call If Compare Assign For Call Call Call Call Assign Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "after_content",
    "source_code": "def after_content(self) -> None:\n    classes = self.env.ref_context.setdefault('py:classes', [])\n    if self.allow_nesting:\n        with contextlib.suppress(IndexError):\n            classes.pop()\n    self.env.ref_context['py:class'] = classes[-1] if len(classes) > 0 else None\n    if 'module' in self.options:\n        modules = self.env.ref_context.setdefault('py:modules', [])\n        if modules:\n            self.env.ref_context['py:module'] = modules.pop()\n        else:\n            self.env.ref_context.pop('py:module')",
    "docstring": "Handle object de-nesting after content If this class is a nestable object, removing the last nested class prefix ends further nesting in the object. If this class is not a nestable object, the list of classes should not be altered as we didn't affect the nesting levels in :py:meth:.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\domains\\python\\_object.py",
    "ast_data": "FunctionDef name:after_content arg:self arguments arg Assign Call If With Call Call Assign Compare Call If Compare Assign Call If Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "FunctionIdSet",
    "source_code": "class FunctionIdSet:\n    function_ids: Optional[set[int]] = None\n    function_names: Optional[dict[int, str]] = None\n\n    def __init__(self, lazy_initializer: Callable[[], Union[dict[int, str], set[int]]]) -> None:\n        self.lazy_initializer = lazy_initializer\n\n    def __call__(self) -> set[int]:\n        if self.function_ids is None:\n            value = self.lazy_initializer()\n            if isinstance(value, dict):\n                self.function_ids = set(value.keys())\n                self.function_names = value\n            else:\n                assert isinstance(value, set)\n                self.function_ids = value\n        return self.function_ids\n\n    def get_name(self, idx: int, default: str):\n        self()\n        assert self.function_names is not None\n        return self.function_names.get(idx, default)\n\n    def add(self, idx: int):\n        function_ids = self()\n        function_ids.add(idx)\n\n    def remove(self, idx: int):\n        function_ids = self()\n        if idx in function_ids:\n            function_ids.remove(idx)\n\n    def __contains__(self, idx: int) -> bool:\n        return idx in self()",
    "docstring": "Track a set of s of objects which are either allowed or not allowed to go into the generated FX graph. Use to test for torch.*, numpy.*, builtins.*, etc. Support user modification to permit customization of what can be added to the graph and what will cause a graph break.",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\trace_rules.py",
    "ast_data": "ClassDef name:FunctionIdSet FunctionDef name:__init__ arg:self arg:lazy_initializer arguments arg arg Assign FunctionDef name:__call__ arg:self arguments arg If Compare Assign Call If Call Assign Call Call Assign Call Assign Return return:yes FunctionDef name:get_name arg:self arg:idx arg:default arguments arg arg arg Call Compare Return return:yes Call FunctionDef name:add arg:self arg:idx arguments arg arg Assign Call Call FunctionDef name:remove arg:self arg:idx arguments arg arg Assign Call If Compare Call FunctionDef name:__contains__ arg:self arg:idx arguments arg arg Return return:yes Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "_tf_tensorarray_stack",
    "source_code": "def _tf_tensorarray_stack(list_):\n    return list_.stack()",
    "docstring": "Overload of list_stack that stages a TensorArray stack.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\data_structures.py",
    "ast_data": "FunctionDef name:_tf_tensorarray_stack arg:list_ arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "csc_median_axis_0",
    "source_code": "def csc_median_axis_0(X):\n    if not (sp.issparse(X) and X.format == 'csc'):\n        raise TypeError('Expected matrix of CSC format, got %s' % X.format)\n    indptr = X.indptr\n    n_samples, n_features = X.shape\n    median = np.zeros(n_features)\n    for f_ind, (start, end) in enumerate(itertools.pairwise(indptr)):\n        data = np.copy(X.data[start:end])\n        nz = n_samples - data.size\n        median[f_ind] = _get_median(data, nz)\n    return median",
    "docstring": "Find the median across axis 0 of a CSC matrix. It is equivalent to doing np.median(X, axis=0). Parameters ---------- X : sparse matrix of shape (n_samples, n_features) Input data. It should be of CSC format. Returns ------- median : ndarray of shape (n_features,) Median.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\sparsefuncs.py",
    "ast_data": "FunctionDef name:csc_median_axis_0 arg:X arguments arg If BoolOp Call Compare Raise Call Assign Assign Assign Call For Call Call Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_untyped_storages",
    "source_code": "def get_untyped_storages(t: torch.Tensor) -> set[torch.UntypedStorage]:\n    unflattened_tensors = [t]\n    flattened_tensor_storages = set()\n    while len(unflattened_tensors) > 0:\n        obj = unflattened_tensors.pop()\n        if is_traceable_wrapper_subclass(obj):\n            attrs, _ = obj.__tensor_flatten__()\n            unflattened_tensors.extend([getattr(obj, attr) for attr in attrs])\n        elif not hasattr(obj, 'untyped_storage'):\n            warnings.warn(f'Expected a tensor or a traceable wrapper-subclass of tensor, but got {type(obj)}', category=UserWarning, stacklevel=2)\n        else:\n            flattened_tensor_storages.add(obj.untyped_storage())\n    return flattened_tensor_storages",
    "docstring": "Recursively extracts untyped storages from a tensor or its subclasses. Args: t (torch.Tensor): The tensor to extract storages from. Returns: Set[torch.UntypedStorage]: A set of untyped storages.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_tools\\common_utils.py",
    "ast_data": "FunctionDef name:get_untyped_storages arg:t arguments arg Assign Assign Call While Compare Call Assign Call If Call Assign Call Call Call If Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "generate_configs",
    "source_code": "def generate_configs(**configs):\n    assert 'sample_func' in configs, 'Missing sample_func to generate configs'\n    result = []\n    for key, values in configs.items():\n        if key == 'sample_func':\n            continue\n        tmp_result = []\n        for value in values:\n            tmp_result.append({key: value})\n        result.append(tmp_result)\n    results = configs['sample_func'](*result)\n    return results",
    "docstring": "Given configs from users, we want to generate different combinations of those configs For example, given M = ((1, 2), N = (4, 5)) and sample_func being cross_product, we will generate (({'M': 1}, {'N' : 4}), ({'M': 1}, {'N' : 5}), ({'M': 2}, {'N' : 4}), ({'M': 2}, {'N' : 5}))",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\operator_benchmark\\benchmark_utils.py",
    "ast_data": "FunctionDef name:generate_configs arguments arg Compare Assign For Call If Compare Assign For Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "GetOutputContext",
    "source_code": "def GetOutputContext(op):\n    ctxt = op._get_control_flow_context()\n    if ctxt is not None and IsLoopExit(op):\n        ctxt = ctxt.outer_context\n    return ctxt",
    "docstring": "Return the control flow context for the output of an op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_util.py",
    "ast_data": "FunctionDef name:GetOutputContext arg:op arguments arg Assign Call If BoolOp Compare Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "LazyIrProperties",
    "source_code": "class LazyIrProperties:\n    Properties: tuple[tuple[str, ...], ...] = (('ShapePrecompute', 'ShapeCompute', 'ShapeCache'), ('Lower', 'LowerDeclOnly'), ('CanBeReused', 'CanBeReusedDeclOnly'), ('CreateFn', 'CreateFnDeclOnly'), ('TreatScalarsAsConstants',))\n\n    def __init__(self, *default_properties: str) -> None:\n        properties: dict[tuple[str, ...], str | None] = dict.fromkeys(LazyIrProperties.Properties)\n        self.__dict__['properties'] = properties\n        for p in default_properties:\n            setattr(self, p, True)\n\n    def __getattr__(self, key: str) -> Any:\n        properties = self.__dict__['properties']\n        for values in LazyIrProperties.Properties:\n            if key in values:\n                return properties[values] == key\n        return self.__getattribute__(key)\n\n    def __setattr__(self, key: str, value: Any) -> Any:\n        properties = self.__dict__['properties']\n        for values in LazyIrProperties.Properties:\n            if key in values:\n                properties[values] = key if value else None\n                return value\n        raise KeyError(f'Invalid property: {key}')",
    "docstring": "Collection of properties for an IR node The property groups are listed below. Each group is mutually exclusive, meaning that only one property from each group can be True at any one time. The properties can be accessed as if they were normal attributes. The mutual exclusivity is automatically handled.",
    "type": "class",
    "file_path": "pytorch\\torchgen\\api\\lazy.py",
    "ast_data": "ClassDef name:LazyIrProperties FunctionDef name:__init__ arg:self arguments arg arg Call Assign For Call FunctionDef name:__getattr__ arg:self arg:key arguments arg arg Assign For If Compare Return return:yes Compare Return return:yes Call FunctionDef name:__setattr__ arg:self arg:key arg:value arguments arg arg arg Assign For If Compare Assign Return return:yes Raise Call"
  },
  {
    "library": "pytorch",
    "name": "run_forward",
    "source_code": "def run_forward(model, **batch):\n    time_list = []\n    X, lS_o, lS_i = (batch['X'], batch['lS_o'], batch['lS_i'])\n    for _ in range(100):\n        start = time.time()\n        with torch.no_grad():\n            model(X, lS_o, lS_i)\n        end = time.time()\n        time_taken = end - start\n        time_list.append(time_taken)\n    avg_time = np.mean(time_list[1:])\n    return avg_time",
    "docstring": "The purpose of this function is to time the forward run of the model. The model forward happens a 100 times and each pass is timed. The average of this 100 runs is returned as avg_time.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\data_sparsifier\\benchmarks\\evaluate_forward_time.py",
    "ast_data": "FunctionDef name:run_forward arg:model arguments arg arg Assign Assign For Call Assign Call With Call Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "_index_to_label",
    "source_code": "def _index_to_label(index):\n    if isinstance(index, pd.MultiIndex):\n        return '-'.join(map(to_utf8, index.names))\n    else:\n        return index.name",
    "docstring": "Convert a pandas index or multiindex to an axis label.",
    "type": "function",
    "file_path": "seaborn\\seaborn\\matrix.py",
    "ast_data": "FunctionDef name:_index_to_label arg:index arguments arg If Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "get_params",
    "source_code": "def get_params(self, deep=True):\n    params = dict(k1=self.k1, k2=self.k2)\n    if deep:\n        deep_items = self.k1.get_params().items()\n        params.update((('k1__' + k, val) for k, val in deep_items))\n        deep_items = self.k2.get_params().items()\n        params.update((('k2__' + k, val) for k, val in deep_items))\n    return params",
    "docstring": "Get parameters of this kernel. Parameters ---------- deep : bool, default=True If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns ------- params : dict Parameter names mapped to their values.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:get_params arg:self arg:deep arguments arg arg Assign Call If Assign Call Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "flatten",
    "source_code": "def flatten(self):\n    yield self\n    for expr in self.get_source_expressions():\n        if expr:\n            if hasattr(expr, 'flatten'):\n                yield from expr.flatten()\n            else:\n                yield expr",
    "docstring": "Recursively yield this expression and all subexpressions, in depth-first order.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\expressions.py",
    "ast_data": "FunctionDef name:flatten arg:self arguments arg For Call If If Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_wrapped_model",
    "source_code": "@def_function.function(input_signature=input_signature, autograph=False)\ndef _wrapped_model(*args):\n    inputs = args[0] if len(input_signature) == 1 else list(args)\n    with keras_deps.get_call_context_function()().enter(model, inputs=inputs, build_graph=False, call_context_args={'training': False}, saving=True):\n        outputs = model(inputs, training=False)\n    return outputs",
    "docstring": "A concrete tf.function that wraps the model's call function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\tflite_keras_util.py",
    "ast_data": "FunctionDef name:_wrapped_model arguments arg Assign Compare Call Call With Call Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "parse_attribute",
    "source_code": "@classmethod\ndef parse_attribute(cls, name, attr_string):\n    return None",
    "docstring": "Parse the attribute line if it knows how. Returns the parsed attribute, or None.",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\arff\\_arffread.py",
    "ast_data": "FunctionDef name:parse_attribute arg:cls arg:name arg:attr_string arguments arg arg arg Return return:no"
  },
  {
    "library": "matplotlib",
    "name": "execute",
    "source_code": "def execute(self, fig):\n    info = self._params\n    renderer = fig._get_renderer()\n    with getattr(renderer, '_draw_disabled', nullcontext)():\n        kwargs = get_tight_layout_figure(fig, fig.axes, get_subplotspec_list(fig.axes), renderer, pad=info['pad'], h_pad=info['h_pad'], w_pad=info['w_pad'], rect=info['rect'])\n    if kwargs:\n        fig.subplots_adjust(**kwargs)",
    "docstring": "Execute tight_layout. This decides the subplot parameters given the padding that will allow the Axes labels to not be covered by other labels and Axes. Parameters ---------- fig : to perform layout on. See Also -------- .figure.Figure.tight_layout .pyplot.tight_layout",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\layout_engine.py",
    "ast_data": "FunctionDef name:execute arg:self arg:fig arguments arg arg Assign Assign Call With Call Call Assign Call Call If Call"
  },
  {
    "library": "tensorflow",
    "name": "_SwitchGrad",
    "source_code": "def _SwitchGrad(op, *grad):\n    graph = ops.get_default_graph()\n    op_ctxt = op._get_control_flow_context()\n    grad_ctxt = graph._get_control_flow_context()\n    if isinstance(op_ctxt, WhileContext):\n        merge_grad = grad_ctxt.grad_state.switch_map.get(op)\n        if merge_grad is not None:\n            if grad[1] is not None:\n                control_flow_ops._AddNextAndBackEdge(merge_grad, grad[1], enforce_shape_invariant=False)\n            return (None, None)\n        elif grad[0] is not None:\n            merge_grad = merge([grad[0], grad[0]], name='b_switch')[0]\n            grad_ctxt.grad_state.switch_map[op] = merge_grad\n            return (merge_grad, None)\n        else:\n            return (None, None)\n    elif isinstance(op_ctxt, CondContext):\n        zero_grad = grad[1 - op_ctxt.branch]\n        if zero_grad is None:\n            if op.inputs[0].dtype == dtypes.resource:\n                return (merge([grad[op_ctxt.branch]] * 2, name='cond_resource_grad')[0], None)\n            return (None, None)\n        return (merge(grad, name='cond_grad')[0], None)\n    else:\n        false_grad = switch(grad[0], op.inputs[1])[0]\n        true_grad = switch(grad[1], op.inputs[1])[1]\n        return (merge([false_grad, true_grad])[0], None)",
    "docstring": "Gradients for a Switch op is calculated using a Merge op. If the switch is a loop switch, it will be visited twice. We create the merge on the first visit, and update the other input of the merge on the second visit. A next_iteration is also added on second visit.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_grad.py",
    "ast_data": "FunctionDef name:_SwitchGrad arg:op arguments arg arg Assign Call Assign Call Assign Call If Call Assign Call If Compare If Compare Call Return return:no If Compare Assign Call Assign Return return:yes Return return:no If Call Assign If Compare If Compare Return return:yes Call Return return:no Return return:yes Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "size",
    "source_code": "@property\ndef size(self):\n    return capi.cs_getsize(self.ptr, byref(c_uint()))",
    "docstring": "Return the size of this coordinate sequence.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\coordseq.py",
    "ast_data": "FunctionDef name:size arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "identity",
    "source_code": "def identity(n, dtype='d', format=None):\n    return eye(n, n, dtype=dtype, format=format)",
    "docstring": "Identity matrix in sparse format Returns an identity matrix with shape `eye_arrayeye_arrayeye_array` to take advantage of the sparse array functionality. Parameters ---------- n : int Shape of the identity matrix. dtype : dtype, optional Data type of the matrix format : str, optional Sparse format of the result, e.g., format=\"csr\", etc. Returns ------- new_matrix : sparse matrix A square sparse matrix with ones on the main diagonal and zeros elsewhere. See Also -------- eye_array : Sparse array of chosen shape with ones on a specified diagonal. eye : Sparse matrix of chosen shape with ones on a specified diagonal. Examples -------- >>> import scipy as sp >>> sp.sparse.identity(3).toarray() array([[ 1., 0., 0.], [ 0., 1., 0.], [ 0., 0., 1.]]) >>> sp.sparse.identity(3, dtype='int8', format='dia') >>> sp.sparse.eye_array(3, dtype='int8', format='dia')",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\_construct.py",
    "ast_data": "FunctionDef name:identity arg:n arg:dtype arg:format arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_parent_list",
    "source_code": "def get_parent_list(self):\n    return list(self.all_parents)",
    "docstring": "Return all the ancestors of this model as a list ordered by MRO. Backward compatibility method.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\options.py",
    "ast_data": "FunctionDef name:get_parent_list arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "is_channels_last_contiguous",
    "source_code": "def is_channels_last_contiguous(a: Tensor) -> bool:\n    return is_channels_last_contiguous_2d(a) or is_channels_last_contiguous_3d(a)",
    "docstring": "True when a tensor is channels-last contiguous. This requires that: - the tensor is conceptually either 4 (NHWC) or 5 (NDHWC) dimensions - if we name the tensor's dimensions NCHW or NCDHW, then the strides are such that the stride of the 'C' dimension (Cs) is 1 and the strides corresponding to each dimension (Xs) can be ordered Cs <= Ws <= Hs <= (Ds) <= Ns and are \"nested\" -- so Ws = Cs * Cl, where Cl is the length of the 'C' dimension, for example.",
    "type": "function",
    "file_path": "pytorch\\torch\\_prims_common\\__init__.py",
    "ast_data": "FunctionDef name:is_channels_last_contiguous arg:a arguments arg Return return:yes BoolOp Call Call"
  },
  {
    "library": "scipy",
    "name": "_nolan_round_difficult_input",
    "source_code": "def _nolan_round_difficult_input(x0, alpha, beta, zeta, x_tol_near_zeta, alpha_tol_near_one):\n    if np.abs(alpha - 1) < alpha_tol_near_one:\n        alpha = 1.0\n    x0 = _nolan_round_x_near_zeta(x0, alpha, zeta, x_tol_near_zeta)\n    return (x0, alpha, beta)",
    "docstring": "Round difficult input values for Nolan's method in [NO].",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_levy_stable\\__init__.py",
    "ast_data": "FunctionDef name:_nolan_round_difficult_input arg:x0 arg:alpha arg:beta arg:zeta arg:x_tol_near_zeta arg:alpha_tol_near_one arguments arg arg arg arg arg arg If Compare Call Assign Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_format_attrs",
    "source_code": "def _format_attrs(self):\n    attrs = cast('list[tuple[str, str | int]]', self._get_data_as_items())\n    if self._name is not None:\n        attrs.append(('name', ibase.default_pprint(self._name)))\n    return attrs",
    "docstring": "Return a list of tuples of the (attr, formatted_value)",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\range.py",
    "ast_data": "FunctionDef name:_format_attrs arg:self arguments arg Assign Call Call If Compare Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "retrieve_key",
    "source_code": "def retrieve_key(key: str) -> DataKey:\n    if key.upper().startswith('INPUT'):\n        return DataKey.INPUT\n    for dk in DataKey:\n        if key.upper() in {'BBOX_XYXY', 'BBOX_XYWH'}:\n            return DataKey.get(key.upper())\n        if key.upper().startswith(dk.name):\n            return DataKey.get(dk.name)\n    allowed_dk = ' | '.join((f'`{d.name}`' for d in DataKey))\n    raise ValueError(f'Your input data dictionary keys should start with some of datakey values: {allowed_dk}. Got `{key}`')",
    "docstring": "Try to retrieve the datakey value by matching .",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\container\\augment.py",
    "ast_data": "FunctionDef name:retrieve_key arg:key arguments arg If Call Call Return return:yes For If Compare Call Return return:yes Call Call If Call Call Return return:yes Call Assign Call Raise Call"
  },
  {
    "library": "pandas",
    "name": "_downsample",
    "source_code": "def _downsample(self, how, **kwargs):\n    ax = self.ax\n    obj = self._obj_with_exclusions\n    if not len(ax):\n        obj = obj.copy()\n        obj.index = obj.index._with_freq(self.freq)\n        assert obj.index.freq == self.freq, (obj.index.freq, self.freq)\n        return obj\n    if (ax.freq is not None or ax.inferred_freq is not None) and len(self._grouper.binlabels) > len(ax) and (how is None):\n        return self.asfreq()\n    result = obj.groupby(self._grouper).aggregate(how, **kwargs)\n    return self._wrap_result(result)",
    "docstring": "Downsample the cython defined function. Parameters ---------- how : string / cython mapped function **kwargs : kw args passed to how function",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\resample.py",
    "ast_data": "FunctionDef name:_downsample arg:self arg:how arguments arg arg arg Assign Assign If Call Assign Call Assign Call Compare Return return:yes If BoolOp BoolOp Compare Compare Compare Call Call Compare Return return:yes Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "iat",
    "source_code": "@property\ndef iat(self) -> _iAtIndexer:\n    return _iAtIndexer('iat', self)",
    "docstring": "Access a single value for a row/column pair by integer position. Similar to `` if you only need to get or set a single value in a DataFrame or Series. Raises ------ IndexError When integer position is out of bounds. See Also -------- DataFrame.at : Access a single value for a row/column label pair. DataFrame.loc : Access a group of rows and columns by label(s). DataFrame.iloc : Access a group of rows and columns by integer position(s). Examples -------- >>> df = pd.DataFrame( ... [[0, 2, 3], [0, 4, 1], [10, 20, 30]], columns=[\"A\", \"B\", \"C\"] ... ) >>> df A B C 0 0 2 3 1 0 4 1 2 10 20 30 Get value at specified row/column pair >>> df.iat[1, 2] np.int64(1) Set value at specified row/column pair >>> df.iat[1, 2] = 10 >>> df.iat[1, 2] np.int64(10) Get value within a series >>> df.loc[0].iat[1] np.int64(2)",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexing.py",
    "ast_data": "FunctionDef name:iat arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "all",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef all(x, axis=None, keepdims=False):\n    x = math_ops.cast(x, dtypes_module.bool)\n    return math_ops.reduce_all(x, axis, keepdims)",
    "docstring": "Bitwise reduction (logical AND). Args: x: Tensor or variable. axis: axis along which to perform the reduction. keepdims: whether the drop or broadcast the reduction axes. Returns: A uint8 tensor (0s and 1s).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:all arg:x arg:axis arg:keepdims arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_no_dependency",
    "source_code": "def _no_dependency(self, value):\n    return value",
    "docstring": "If automatic dependency tracking is enabled, ignores .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\base.py",
    "ast_data": "FunctionDef name:_no_dependency arg:self arg:value arguments arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "replace_placeholder_name_and_target",
    "source_code": "def replace_placeholder_name_and_target(module: torch.fx.GraphModule, reference_module: torch.fx.GraphModule):\n    placeholders = [node for node in module.graph.nodes if node.op == 'placeholder']\n    reference_placeholders = [node for node in reference_module.graph.nodes if node.op == 'placeholder']\n    if len(placeholders) != len(reference_placeholders):\n        raise RuntimeError(f'The two modules have different number of arguments. module: {len(placeholders)}, reference_module: {len(reference_placeholders)}')\n    name_to_node: dict[str, torch.fx.Node] = {}\n    for node in module.graph.nodes:\n        name_to_node[node.name] = node\n    for placeholder, reference_placeholder in zip(placeholders, reference_placeholders):\n        placeholder.target = reference_placeholder.target\n        set_node_name(placeholder, reference_placeholder.name, name_to_node)\n    module.recompile()",
    "docstring": "Replace the argument names in module with those in reference_module. This function assumes the two modules have the same signature structure. The caller is responsible for ensuring this. Otherwise, the behavior of this function is undefined. This function only does minimal sanity check that the two modules have the same number of arguments. Name conflicts between new names and existing node names in the graph are handled. Check the documentation of :func: for more details. Raises: RuntimeError: If the two modules have different number of arguments.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\_utils.py",
    "ast_data": "FunctionDef name:replace_placeholder_name_and_target arg:module arg:reference_module arguments arg arg Assign Compare Assign Compare If Compare Call Call Raise Call Call Call For Assign For Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_update_snapshot",
    "source_code": "def _update_snapshot(self):\n    self._attribute_sentinel.invalidate_all()\n    if self._external_modification or self._non_append_mutation:\n        return\n    self._last_wrapped_list_snapshot = list(self._storage)",
    "docstring": "Acknowledges tracked changes to the wrapped list.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\data_structures.py",
    "ast_data": "FunctionDef name:_update_snapshot arg:self arguments arg Call If BoolOp Return return:no Assign Call"
  },
  {
    "library": "cryptography",
    "name": "sign",
    "source_code": "@abc.abstractmethod\ndef sign(self, data: Buffer, algorithm: asym_utils.Prehashed | hashes.HashAlgorithm) -> bytes:\n    pass",
    "docstring": "Signs the data",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\dsa.py",
    "ast_data": "FunctionDef name:sign arg:self arg:data arg:algorithm arguments arg arg arg"
  },
  {
    "library": "pandas",
    "name": "_get_result_dtype",
    "source_code": "def _get_result_dtype(self, dtype: np.dtype) -> np.dtype:\n    how = self.how\n    if how in ['sum', 'cumsum', 'sum', 'prod', 'cumprod']:\n        if dtype == np.dtype(bool):\n            return np.dtype(np.int64)\n    elif how in ['mean', 'median', 'var', 'std', 'sem']:\n        if dtype.kind in 'fc':\n            return dtype\n        elif dtype.kind in 'iub':\n            return np.dtype(np.float64)\n    return dtype",
    "docstring": "Get the desired dtype of a result based on the input dtype and how it was computed. Parameters ---------- dtype : np.dtype Returns ------- np.dtype The desired dtype of the result.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\groupby\\ops.py",
    "ast_data": "FunctionDef name:_get_result_dtype arg:self arg:dtype arguments arg arg Assign If Compare If Compare Call Return return:yes Call If Compare If Compare Return return:yes If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "format_guards",
    "source_code": "def format_guards(self, verbose: bool=False) -> str:\n    return '\\n'.join((f' - {guard.expr}{(' ' + str(guard.sloc) if verbose else '')}' for guard in self.guards))",
    "docstring": "Format this shape env's guard expressions with optional traceback info if verbose",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:format_guards arg:self arg:verbose arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_as_bool",
    "source_code": "def get_as_bool(self, key: str, default: Optional[bool]=None) -> Optional[bool]:\n    value = self.get(key, default)\n    if value is None or isinstance(value, bool):\n        return value\n    if isinstance(value, int):\n        if value == 1:\n            return True\n        if value == 0:\n            return False\n    elif isinstance(value, str):\n        if value.lower() in ['1', 'true', 't', 'yes', 'y']:\n            return True\n        if value.lower() in ['0', 'false', 'f', 'no', 'n']:\n            return False\n    raise ValueError(f\"The rendezvous configuration option '{key}' does not represent a valid boolean value.\")",
    "docstring": "Return the value for ``.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\api.py",
    "ast_data": "FunctionDef name:get_as_bool arg:self arg:key arg:default arguments arg arg arg Assign Call If BoolOp Compare Call Return return:yes If Call If Compare Return return:yes If Compare Return return:yes If Call If Compare Call Return return:yes If Compare Call Return return:yes Raise Call"
  },
  {
    "library": "numpy",
    "name": "array_ufunc_errmsg_formatter",
    "source_code": "def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs):\n    args_string = ', '.join([f'{arg!r}' for arg in inputs] + [f'{k}={v!r}' for k, v in kwargs.items()])\n    args = inputs + kwargs.get('out', ())\n    types_string = ', '.join((repr(type(arg).__name__) for arg in args))\n    return f'operand type(s) all returned NotImplemented from __array_ufunc__({ufunc!r}, {method!r}, {args_string}): {types_string}'",
    "docstring": "Format the error message for when __array_ufunc__ gives up.",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\_internal.py",
    "ast_data": "FunctionDef name:array_ufunc_errmsg_formatter arg:dummy arg:ufunc arg:method arguments arg arg arg arg arg Assign Call Call Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "from_config",
    "source_code": "@classmethod\ndef from_config(cls, config, custom_objects=None):\n    if 'initial_accumulator_value' not in config:\n        config['initial_accumulator_value'] = 0.1\n    if 'lr' in config:\n        config['learning_rate'] = config.pop('lr')\n    return cls(**config)",
    "docstring": "Creates an optimizer from its config. This method is the reverse of , capable of instantiating the same optimizer from the config dictionary. Args: config: A Python dictionary, typically the output of get_config. custom_objects: A Python dictionary mapping names to additional Python objects used to create this optimizer, such as a function used for a hyperparameter. Returns: An optimizer instance.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\adagrad.py",
    "ast_data": "FunctionDef name:from_config arg:cls arg:config arg:custom_objects arguments arg arg arg If Compare Assign If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "propagate_dtypes_for_known_nodes",
    "source_code": "def propagate_dtypes_for_known_nodes(graph: Graph, node_name_to_match_result_with_qconfig: dict[str, _MatchResultWithQConfig]) -> None:\n    for node in graph.nodes:\n        non_observable_arg_dict = get_non_observable_arg_indexes_and_types(node)\n        for arg_type in non_observable_arg_dict:\n            non_observable_indices = non_observable_arg_dict[arg_type](node)\n            for index in non_observable_indices:\n                arg = node.args[index]\n                if isinstance(arg, (tuple, list)):\n                    arg_list = list(arg)\n                else:\n                    arg_list = [arg]\n                for cur_arg in arg_list:\n                    if isinstance(cur_arg, torch.fx.node.Node):\n                        _maybe_propagate_dtype_for_node(cur_arg, arg_type, node_name_to_match_result_with_qconfig)",
    "docstring": "Currently we assume that inputs to the graph are either or , which is not always correct. For ops such as , we know that the dtype of is a . Propagate this information throughout the graph. Note: not all dtypes in the graph will be correct after this pass, but a higher percentage of them will be correct. Hopefully in the future we can replace this with a better way to reason about dtypes of tensors.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\prepare.py",
    "ast_data": "FunctionDef name:propagate_dtypes_for_known_nodes arg:graph arg:node_name_to_match_result_with_qconfig arguments arg arg For Assign Call For Assign Call For Assign If Call Assign Call Assign For If Call Call"
  },
  {
    "library": "tensorflow",
    "name": "device_path_to_device_name",
    "source_code": "def device_path_to_device_name(device_dir):\n    path_items = os.path.basename(device_dir)[len(METADATA_FILE_PREFIX) + len(DEVICE_TAG):].split(',')\n    return '/'.join([path_item.replace('device_', 'device:').replace('_', ':', 1) for path_item in path_items])",
    "docstring": "Parse device name from device path. Args: device_dir: (str) a directory name for the device. Returns: (str) parsed device name.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:device_path_to_device_name arg:device_dir arguments arg Assign Call Call Call Call Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "viewlim_to_dt",
    "source_code": "def viewlim_to_dt(self):\n    vmin, vmax = self.axis.get_view_interval()\n    if vmin > vmax:\n        vmin, vmax = (vmax, vmin)\n    return (num2date(vmin, self.tz), num2date(vmax, self.tz))",
    "docstring": "Convert the view interval to datetime objects.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\dates.py",
    "ast_data": "FunctionDef name:viewlim_to_dt arg:self arguments arg Assign Call If Compare Assign Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "fp",
    "source_code": "def fp(eps, a, b, x, phi):\n    eps_a = np.power(1.0 * eps, -a)\n    return eps * np.cos(phi) - a * x * eps_a * np.cos(a * phi) + 1 - b",
    "docstring": "Derivative of f w.r.t. phi.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_precompute\\wright_bessel.py",
    "ast_data": "FunctionDef name:fp arg:eps arg:a arg:b arg:x arg:phi arguments arg arg arg arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "_ravel_and_check_weights",
    "source_code": "def _ravel_and_check_weights(a, weights):\n    a = np.asarray(a)\n    if a.dtype == np.bool:\n        msg = f'Converting input from {a.dtype} to {np.uint8} for compatibility.'\n        warnings.warn(msg, RuntimeWarning, stacklevel=3)\n        a = a.astype(np.uint8)\n    if weights is not None:\n        weights = np.asarray(weights)\n        if weights.shape != a.shape:\n            raise ValueError('weights should have the same shape as a.')\n        weights = weights.ravel()\n    a = a.ravel()\n    return (a, weights)",
    "docstring": "Check a and weights have matching shapes, and ravel both",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_histograms_impl.py",
    "ast_data": "FunctionDef name:_ravel_and_check_weights arg:a arg:weights arguments arg arg Assign Call If Compare Assign Call Assign Call If Compare Assign Call If Compare Raise Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "GuidedBlur",
    "source_code": "class GuidedBlur(Module):\n\n    def __init__(self, kernel_size: tuple[int, int] | int, eps: float, border_type: str='reflect', subsample: int=1) -> None:\n        super().__init__()\n        self.kernel_size = kernel_size\n        self.eps = eps\n        self.border_type = border_type\n        self.subsample = subsample\n\n    def __repr__(self) -> str:\n        return f'{self.__class__.__name__}(kernel_size={self.kernel_size}, eps={self.eps}, border_type={self.border_type}, subsample={self.subsample})'\n\n    def forward(self, guidance: Tensor, input: Tensor) -> Tensor:\n        return guided_blur(guidance, input, self.kernel_size, self.eps, self.border_type, self.subsample)",
    "docstring": "Blur a tensor using a Guided filter. The operator is an edge-preserving image smoothing filter. See :cite: and :cite: for details. Guidance and input can have different number of channels. Arguments: kernel_size: the size of the kernel. eps: regularization parameter. Smaller values preserve more edges. border_type: the padding mode to be applied before convolving. The expected modes are: `(B, C, H, W)(B, C, H, W)(B, C, H, W)` Examples: >>> guidance = torch.rand(2, 3, 5, 5) >>> input = torch.rand(2, 4, 5, 5) >>> blur = GuidedBlur(3, 0.1) >>> output = blur(guidance, input) >>> output.shape torch.Size([2, 4, 5, 5])",
    "type": "class",
    "file_path": "kornia\\kornia\\filters\\guided.py",
    "ast_data": "ClassDef name:GuidedBlur FunctionDef name:__init__ arg:self arg:kernel_size arg:eps arg:border_type arg:subsample arguments arg arg arg arg arg Call Call Assign Assign Assign Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:forward arg:self arg:guidance arg:input arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "zip_folder",
    "source_code": "def zip_folder(folder_to_zip: Path, dest_file_base_name: Path) -> Path:\n    if dest_file_base_name.suffix == '.zip':\n        dest_file_base_name = dest_file_base_name.with_suffix('')\n    ensure_dir_exists(dest_file_base_name.parent)\n    print(f'Zipping {folder_to_zip}\\n     to {dest_file_base_name}')\n    return Path(shutil.make_archive(str(dest_file_base_name), 'zip', folder_to_zip))",
    "docstring": "Returns the path to the resulting zip file, with the appropriate extension added if needed",
    "type": "function",
    "file_path": "pytorch\\.github\\scripts\\file_io_utils.py",
    "ast_data": "FunctionDef name:zip_folder arg:folder_to_zip arg:dest_file_base_name arguments arg arg If Compare Assign Call Call Call Return return:yes Call Call Call"
  },
  {
    "library": "pandas",
    "name": "add_object_type_line",
    "source_code": "def add_object_type_line(self) -> None:\n    self._lines.append(str(type(self.data)))",
    "docstring": "Add line with string representation of dataframe to the table.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:add_object_type_line arg:self arguments arg Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_assert_in_training_states",
    "source_code": "@no_type_check\ndef _assert_in_training_states(state: _FSDPState, training_states: list[TrainingState]) -> None:\n    if state.training_state not in training_states:\n        msg = f'expected to be in states {training_states} but current state is {state.training_state}'\n        if state.rank == 0:\n            if isinstance(state, nn.Module):\n                print(f'Asserting FSDP instance is: {state}')\n            print(f'ERROR: {msg}')\n            traceback.print_stack()\n        raise ValueError(msg)",
    "docstring": "Asserts that FSDP is in the states ``.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_common_utils.py",
    "ast_data": "FunctionDef name:_assert_in_training_states arg:state arg:training_states arguments arg arg If Compare Assign If Compare If Call Call Call Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "as_tensor",
    "source_code": "def as_tensor(self):\n    with ops.control_dependencies(None):\n        return self._concat()",
    "docstring": "Returns the overall concatenated value as a . The returned tensor will not inherit the control dependencies from the scope where the value is used, which is similar to getting the value of . Returns: containing the concatenated value.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:as_tensor arg:self arguments arg With Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "KNeighborsClassifierBenchmark",
    "source_code": "class KNeighborsClassifierBenchmark(Predictor, Estimator, Benchmark):\n    param_names = ['algorithm', 'dimension', 'n_jobs']\n    params = (['brute', 'kd_tree', 'ball_tree'], ['low', 'high'], Benchmark.n_jobs_vals)\n\n    def setup_cache(self):\n        super().setup_cache()\n\n    def make_data(self, params):\n        algorithm, dimension, n_jobs = params\n        if Benchmark.data_size == 'large':\n            n_components = 40 if dimension == 'low' else 200\n        else:\n            n_components = 10 if dimension == 'low' else 50\n        data = _20newsgroups_lowdim_dataset(n_components=n_components)\n        return data\n\n    def make_estimator(self, params):\n        algorithm, dimension, n_jobs = params\n        estimator = KNeighborsClassifier(algorithm=algorithm, n_jobs=n_jobs)\n        return estimator\n\n    def make_scorers(self):\n        make_gen_classif_scorers(self)",
    "docstring": "Benchmarks for KNeighborsClassifier.",
    "type": "class",
    "file_path": "scikit-learn\\asv_benchmarks\\benchmarks\\neighbors.py",
    "ast_data": "ClassDef name:KNeighborsClassifierBenchmark Assign Assign FunctionDef name:setup_cache arg:self arguments arg Call Call FunctionDef name:make_data arg:self arg:params arguments arg arg Assign If Compare Assign Compare Assign Compare Assign Call Return return:yes FunctionDef name:make_estimator arg:self arg:params arguments arg arg Assign Assign Call Return return:yes FunctionDef name:make_scorers arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "call_for_each_replica",
    "source_code": "def call_for_each_replica(self, fn, args=(), kwargs=None):\n    distribute_lib._require_cross_replica_or_default_context_extended(self)\n    if kwargs is None:\n        kwargs = {}\n    map_fn = functools.partial(dtensor_util.convert_inputs_to_dtensor, mesh=self._mesh)\n    d_args = nest.map_structure(map_fn, args)\n    d_kwargs = nest.map_structure(map_fn, kwargs)\n    with self._container_strategy().scope():\n        with dtensor_util.DTensorReplicaContext(self._container_strategy()):\n            dtensor_result = fn(*d_args, **d_kwargs)\n    return nest.map_structure(dtensor_util.DTensorDistributedValue, dtensor_result)",
    "docstring": "Run once per replica. This is a method that expected by the strategy base class in its . Args: fn: function to run (will be run once per replica). args: Tuple or list with positional arguments for . kwargs: Dict with keyword arguments for . Returns: Merged return value of across all replicas.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\experimental\\dtensor_strategy_extended.py",
    "ast_data": "FunctionDef name:call_for_each_replica arg:self arg:fn arg:args arg:kwargs arguments arg arg arg arg Call If Compare Assign Assign Call Assign Call Assign Call With Call Call With Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_axis_method_wrapper",
    "source_code": "class _axis_method_wrapper:\n\n    def __init__(self, attr_name, method_name, *, doc_sub=None):\n        self.attr_name = attr_name\n        self.method_name = method_name\n        doc = inspect.getdoc(getattr(maxis.Axis, method_name))\n        self._missing_subs = []\n        if doc:\n            doc_sub = {'this Axis': f'the {self.attr_name}', **(doc_sub or {})}\n            for k, v in doc_sub.items():\n                if k not in doc:\n                    self._missing_subs.append(k)\n                doc = doc.replace(k, v)\n        self.__doc__ = doc\n\n    def __set_name__(self, owner, name):\n        get_method = attrgetter(f'{self.attr_name}.{self.method_name}')\n\n        def wrapper(self, *args, **kwargs):\n            return get_method(self)(*args, **kwargs)\n        wrapper.__module__ = owner.__module__\n        wrapper.__name__ = name\n        wrapper.__qualname__ = f'{owner.__qualname__}.{name}'\n        wrapper.__doc__ = self.__doc__\n        wrapper.__signature__ = inspect.signature(getattr(maxis.Axis, self.method_name))\n        if self._missing_subs:\n            raise ValueError('The definition of {} expected that the docstring of Axis.{} contains {!r} as substrings'.format(wrapper.__qualname__, self.method_name, ', '.join(map(repr, self._missing_subs))))\n        setattr(owner, name, wrapper)",
    "docstring": "Helper to generate Axes methods wrapping Axis methods. After :: get_foo = _axis_method_wrapper(\"xaxis\", \"get_bar\") (in the body of a class) `` is built by replacing \"this Axis\" by \"the {attr_name}\" (i.e., \"the xaxis\", \"the yaxis\") in the wrapped method's dedented docstring; additional replacements can be given in *doc_sub*.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "ClassDef name:_axis_method_wrapper FunctionDef name:__init__ arg:self arg:attr_name arg:method_name arguments arg arg arg arg Assign Assign Assign Call Call Assign If Assign BoolOp For Call If Compare Call Assign Call Assign FunctionDef name:__set_name__ arg:self arg:owner arg:name arguments arg arg arg Assign Call FunctionDef name:wrapper arg:self arguments arg arg arg Return return:yes Call Call Assign Assign Assign Assign Assign Call Call If Raise Call Call Call Call Call"
  },
  {
    "library": "numpy",
    "name": "swapcase",
    "source_code": "def swapcase(self):\n    return asarray(swapcase(self))",
    "docstring": "For each element in , return a copy of the string with uppercase characters converted to lowercase and vice versa. See Also -------- char.swapcase",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:swapcase arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "ContextPopException",
    "source_code": "class ContextPopException(Exception):\n    pass",
    "docstring": "pop() has been called more times than push()",
    "type": "class",
    "file_path": "django\\django\\template\\context.py",
    "ast_data": "ClassDef name:ContextPopException"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, op_type):\n    self.op_type = op_type",
    "docstring": "Creates an object to register a converter for op with type .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:op_type arguments arg arg Assign"
  },
  {
    "library": "django",
    "name": "field_as_sql",
    "source_code": "def field_as_sql(self, field, get_placeholder, val):\n    if field is None:\n        sql, params = (val, [])\n    elif hasattr(val, 'as_sql'):\n        sql, params = self.compile(val)\n    elif get_placeholder is not None:\n        sql, params = (get_placeholder(val, self, self.connection), [val])\n    else:\n        sql, params = ('%s', [val])\n    params = self.connection.ops.modify_insert_params(sql, params)\n    return (sql, params)",
    "docstring": "Take a field and a value intended to be saved on that field, and return placeholder SQL and accompanying params. Check for raw values, expressions, and fields with get_placeholder() defined in that order. When field is None, consider the value raw and use it as the placeholder, with no corresponding parameters returned.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\compiler.py",
    "ast_data": "FunctionDef name:field_as_sql arg:self arg:field arg:get_placeholder arg:val arguments arg arg arg arg If Compare Assign If Call Assign Call If Compare Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "adjust_sigmoid",
    "source_code": "def adjust_sigmoid(image: Tensor, cutoff: float=0.5, gain: float=10, inv: bool=False) -> Tensor:\n    KORNIA_CHECK_IS_TENSOR(image, 'Expected shape (*, H, W)')\n    if inv:\n        img_adjust = 1 - 1 / (1 + (gain * (cutoff - image)).exp())\n    else:\n        img_adjust = 1 / (1 + (gain * (cutoff - image)).exp())\n    return img_adjust",
    "docstring": "Adjust sigmoid correction on the input image tensor. The input image is expected to be in the range of [0, 1]. Reference: [1]: Gustav J. Braun, \"Image Lightness Rescaling Using Sigmoidal Contrast Enhancement Functions\", Args: image: Image to be adjusted in the shape of :math:. cutoff: The cutoff of sigmoid function. gain: The multiplier of sigmoid function. inv: If is set to True the function will return the inverse sigmoid correction. Returns: Adjusted tensor in the shape of :math:. Example: >>> x = torch.ones(1, 1, 2, 2) >>> adjust_sigmoid(x, gain=0) tensor([[[[0.5000, 0.5000], [0.5000, 0.5000]]]])",
    "type": "function",
    "file_path": "kornia\\kornia\\enhance\\adjust.py",
    "ast_data": "FunctionDef name:adjust_sigmoid arg:image arg:cutoff arg:gain arg:inv arguments arg arg arg arg Call If Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_mode",
    "source_code": "def _mode(self, dropna: bool=True) -> Self:\n    pa_type = self._pa_array.type\n    if pa.types.is_temporal(pa_type):\n        nbits = pa_type.bit_width\n        if nbits == 32:\n            data = self._pa_array.cast(pa.int32())\n        elif nbits == 64:\n            data = self._pa_array.cast(pa.int64())\n        else:\n            raise NotImplementedError(pa_type)\n    else:\n        data = self._pa_array\n    if dropna:\n        data = data.drop_null()\n    res = pc.value_counts(data)\n    most_common = res.field('values').filter(pc.equal(res.field('counts'), pc.max(res.field('counts'))))\n    if pa.types.is_temporal(pa_type):\n        most_common = most_common.cast(pa_type)\n    most_common = most_common.take(pc.array_sort_indices(most_common))\n    return type(self)(most_common)",
    "docstring": "Returns the mode(s) of the ExtensionArray. Always returns even if only one value. Parameters ---------- dropna : bool, default True Don't consider counts of NA values. Returns ------- same type as self Sorted, if possible.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\arrow\\array.py",
    "ast_data": "FunctionDef name:_mode arg:self arg:dropna arguments arg arg Assign If Call Assign If Compare Assign Call Call If Compare Assign Call Call Raise Call Assign If Assign Call Assign Call Assign Call Call Call Call Call Call If Call Assign Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "LastMessagesWriter",
    "source_code": "class LastMessagesWriter:\n\n    def __init__(self, app: Sphinx, stream: IO[str]) -> None:\n        self.app = app\n\n    def write(self, data: str) -> None:\n        self.app.messagelog.append(data)",
    "docstring": "Stream writer storing last 10 messages in memory to save trackback",
    "type": "class",
    "file_path": "sphinx\\sphinx\\util\\logging.py",
    "ast_data": "ClassDef name:LastMessagesWriter FunctionDef name:__init__ arg:self arg:app arg:stream arguments arg arg arg Assign FunctionDef name:write arg:self arg:data arguments arg arg Call"
  },
  {
    "library": "scipy",
    "name": "_select_singleton_columns",
    "source_code": "def _select_singleton_columns(A, b):\n    column_indices = np.nonzero(np.sum(np.abs(A) != 0, axis=0) == 1)[0]\n    columns = A[:, column_indices]\n    row_indices = np.zeros(len(column_indices), dtype=int)\n    nonzero_rows, nonzero_columns = np.nonzero(columns)\n    row_indices[nonzero_columns] = nonzero_rows\n    same_sign = A[row_indices, column_indices] * b[row_indices] >= 0\n    column_indices = column_indices[same_sign][::-1]\n    row_indices = row_indices[same_sign][::-1]\n    unique_row_indices, first_columns = np.unique(row_indices, return_index=True)\n    return (column_indices[first_columns], unique_row_indices)",
    "docstring": "Finds singleton columns for which the singleton entry is of the same sign as the right-hand side; these columns are eligible for inclusion in an initial basis. Determines the rows in which the singleton entries are located. For each of these rows, returns the indices of the one singleton column and its corresponding row.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_linprog_rs.py",
    "ast_data": "FunctionDef name:_select_singleton_columns arg:A arg:b arguments arg arg Assign Call Compare Call Compare Call Assign Assign Call Call Assign Call Assign Assign Compare Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "adjust_compatible",
    "source_code": "@property\ndef adjust_compatible(self):\n    if self._adjust_compatible is None:\n        raise NotImplementedError\n    return self._adjust_compatible",
    "docstring": "Return a boolean if the layout engine is compatible with .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\layout_engine.py",
    "ast_data": "FunctionDef name:adjust_compatible arg:self arguments arg If Compare Raise Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_set_unshard_async_op",
    "source_code": "def _set_unshard_async_op(self, async_op: bool):\n    self_module = cast(nn.Module, self)\n    for module in self_module.modules():\n        if isinstance(module, FSDPModule):\n            state = module._get_fsdp_state()\n            if (fsdp_param_group := state._fsdp_param_group):\n                fsdp_param_group.unshard_async_op = async_op",
    "docstring": "Sets whether to use `unshard`) in forward to still get overlap, and the pre-all-gather ops like dtype casting and copy-in will not overlap with compute.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fully_shard.py",
    "ast_data": "FunctionDef name:_set_unshard_async_op arg:self arg:async_op arguments arg arg Assign Call For Call If Call Assign Call If Assign"
  },
  {
    "library": "pytorch",
    "name": "FormattedTimesMixin",
    "source_code": "class FormattedTimesMixin:\n    cpu_time_str = _attr_formatter('cpu_time')\n    device_time_str = _attr_formatter('device_time')\n    cpu_time_total_str = _attr_formatter('cpu_time_total')\n    device_time_total_str = _attr_formatter('device_time_total')\n    self_cpu_time_total_str = _attr_formatter('self_cpu_time_total')\n    self_device_time_total_str = _attr_formatter('self_device_time_total')\n\n    @property\n    def cpu_time(self):\n        return 0.0 if self.count == 0 else 1.0 * self.cpu_time_total / self.count\n\n    @property\n    def device_time(self):\n        return 0.0 if self.count == 0 else 1.0 * self.device_time_total / self.count\n\n    @property\n    @deprecated('`cuda_time` is deprecated, please use `device_time` instead.', category=FutureWarning)\n    def cuda_time(self):\n        return self.device_time",
    "docstring": "Helpers for FunctionEvent and FunctionEventAvg. The subclass should define and attributes.",
    "type": "class",
    "file_path": "pytorch\\torch\\autograd\\profiler_util.py",
    "ast_data": "ClassDef name:FormattedTimesMixin Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call FunctionDef name:cpu_time arg:self arguments arg Return return:yes Compare FunctionDef name:device_time arg:self arguments arg Return return:yes Compare FunctionDef name:cuda_time arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "unused",
    "source_code": "def unused(g):\n    n = g.op('prim::Constant')\n    n.setType(_C.OptionalType.ofTensor())\n    return n",
    "docstring": "Represents \"missing\" optional inputs.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\symbolic_opset9.py",
    "ast_data": "FunctionDef name:unused arg:g arguments arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "remove_callback",
    "source_code": "def remove_callback(self, oid):\n    self._callbacks.disconnect(oid)",
    "docstring": "Remove a callback based on its observer id. See Also -------- add_callback",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:remove_callback arg:self arg:oid arguments arg arg Call"
  },
  {
    "library": "django",
    "name": "NotRegistered",
    "source_code": "class NotRegistered(Exception):\n    pass",
    "docstring": "The model is not registered.",
    "type": "class",
    "file_path": "django\\django\\contrib\\admin\\exceptions.py",
    "ast_data": "ClassDef name:NotRegistered"
  },
  {
    "library": "sphinx",
    "name": "Code",
    "source_code": "class Code(SphinxDirective):\n    optional_arguments = 1\n    option_spec: ClassVar[OptionSpec] = {'class': directives.class_option, 'force': directives.flag, 'name': directives.unchanged, 'number-lines': optional_int}\n    has_content = True\n\n    def run(self) -> list[Node]:\n        self.assert_has_content()\n        set_classes(self.options)\n        code = '\\n'.join(self.content)\n        node = nodes.literal_block(code, code, classes=self.options.get('classes', []), force='force' in self.options, highlight_args={})\n        self.add_name(node)\n        set_source_info(self, node)\n        if self.arguments:\n            node['language'] = self.arguments[0]\n        else:\n            node['language'] = self.env.current_document.highlight_language or self.config.highlight_language\n        if 'number-lines' in self.options:\n            node['linenos'] = True\n            if self.options['number-lines']:\n                node['highlight_args']['linenostart'] = self.options['number-lines']\n        return [node]",
    "docstring": "Parse and mark up content of a code block. This is compatible with docutils' :rst:dir: directive.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\directives\\patches.py",
    "ast_data": "ClassDef name:Code Assign Assign FunctionDef name:run arg:self arguments arg Call Call Assign Call Assign Call Call Compare Call Call If Assign Assign BoolOp If Compare Assign If Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__getitem__",
    "source_code": "@abstractmethod\ndef __getitem__(self, index):\n    raise NotImplementedError",
    "docstring": "Gets batch at position . Args: index: position of the batch in the Sequence. Returns: A batch",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\data_utils.py",
    "ast_data": "FunctionDef name:__getitem__ arg:self arg:index arguments arg arg Raise"
  },
  {
    "library": "kornia",
    "name": "transform_boxes",
    "source_code": "def transform_boxes(self, input: Union[Tensor, Boxes], params: List[ParamItem], extra_args: Optional[Dict[str, Any]]=None) -> Union[Tensor, Boxes]:\n    if isinstance(input, Tensor):\n        batchsize, frame_num = (input.size(0), input.size(1))\n        input = Boxes.from_tensor(input.view(-1, input.size(2), input.size(3), input.size(4)), mode='vertices_plus')\n        input = super().transform_boxes(input, params, extra_args=extra_args)\n        input = input.data.view(batchsize, frame_num, -1, 4, 2)\n    else:\n        input = super().transform_boxes(input, params, extra_args=extra_args)\n    return input",
    "docstring": "Transform bounding boxes. Args: input: tensor with shape :math:. If input is a type, the internal shape is :math:. params: params for the sequence. extra_args: Optional dictionary of extra arguments with specific options for different input types.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\container\\video.py",
    "ast_data": "FunctionDef name:transform_boxes arg:self arg:input arg:params arg:extra_args arguments arg arg arg arg If Call Assign Call Call Assign Call Call Call Call Call Assign Call Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_generate_module_fqn_to_detector_info_mapping",
    "source_code": "def _generate_module_fqn_to_detector_info_mapping(self, update_qconfig_info_function: Callable) -> dict[str, DetectorQConfigInfo]:\n    if not self._prepared_flag:\n        raise Exception('Cannot generate report without preparing model for callibration')\n    if self._removed_observers:\n        raise Exception('Cannot generate report on model you already removed observers from')\n    detector_qconfig_info_combined: dict[str, DetectorQConfigInfo] = {}\n    for detector in self._desired_report_detectors:\n        detector_info: dict[str, DetectorQConfigInfo] = detector.get_qconfig_info(self._model)\n        for module_fqn in detector_info:\n            if module_fqn in detector_qconfig_info_combined:\n                current_options = detector_qconfig_info_combined[module_fqn]\n                detector_options = detector_info[module_fqn]\n                update_qconfig_info_function(current_options, detector_options)\n            else:\n                detector_qconfig_info_combined[module_fqn] = detector_info[module_fqn]\n    return detector_qconfig_info_combined",
    "docstring": "Generates a QConfigMapping based on the suggestions of the ModelReport API. The generated mapping encompasses all the different types of feedback from the different detectors all into one place. These configs are based on the suggestions provided by the ModelReport API and can only be generated once the reports have been generated. Args: update_qconfig_info_function (Callable) takes in a function that takes in two DetectorQConfigInfo and updates the one that is being compiled Returns a Dict mapping module_fqns to DetectorQConfigInfo objects Note: Throws exception if we try to generate mapping on model we already removed observers from Throws exception if we try to generate mapping without preparing for callibration",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\model_report.py",
    "ast_data": "FunctionDef name:_generate_module_fqn_to_detector_info_mapping arg:self arg:update_qconfig_info_function arguments arg arg If Raise Call If Raise Call For Call For If Compare Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "register_filesystem_plugin",
    "source_code": "@tf_export('experimental.register_filesystem_plugin')\ndef register_filesystem_plugin(plugin_location):\n    if os.path.exists(plugin_location):\n        py_tf.TF_RegisterFilesystemPlugin(plugin_location)\n    else:\n        raise OSError(errno.ENOENT, 'The file to load file system plugin from does not exist.', plugin_location)",
    "docstring": "Loads a TensorFlow FileSystem plugin. Args: plugin_location: Path to the plugin. Relative or absolute filesystem plugin path to a dynamic library file. Returns: None Raises: OSError: When the file to be loaded is not found. RuntimeError: when unable to load the library.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\load_library.py",
    "ast_data": "FunctionDef name:register_filesystem_plugin arg:plugin_location arguments arg If Call Call Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "register_module_parameter_registration_hook",
    "source_code": "def register_module_parameter_registration_hook(hook: Callable[..., None]) -> RemovableHandle:\n    handle = RemovableHandle(_global_parameter_registration_hooks)\n    _global_parameter_registration_hooks[handle.id] = hook\n    return handle",
    "docstring": "Register a parameter registration hook common to all modules. .. warning :: This adds global state to the module The hook will be called every time :func: is invoked. It should have the following signature:: hook(module, name, param) -> None or new parameter The hook can modify the input or return a single modified value in the hook. Returns: :class:: a handle that can be used to remove the added hook by calling ``",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:register_module_parameter_registration_hook arg:hook arguments arg Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "add_data",
    "source_code": "@staticmethod\ndef add_data(event_name: str, log_level: CompileEventLogLevel, overwrite: bool=False, **metadata: object):\n    chromium_log = get_chromium_event_logger()\n    pt2_compile_substack = chromium_log.get_pt2_compile_substack()\n    if log_level == CompileEventLogLevel.CHROMIUM:\n        chromium_log.add_event_data(event_name, **metadata)\n    elif log_level == CompileEventLogLevel.PT2_COMPILE:\n        pt2_compile_substack = chromium_log.get_pt2_compile_substack()\n        if event_name not in pt2_compile_substack:\n            raise RuntimeError('Error: specified log level PT2_COMPILE, but the event %s is not logged to pt2_compile_events. Make sure the event is active and you passed log_pt2_compile_event=True to dynamo_timed', event_name)\n        chromium_log.add_event_data(event_name, **metadata)\n    else:\n        assert log_level == CompileEventLogLevel.COMPILATION_METRIC\n        top_event = chromium_log.get_outermost_event()\n        if event_name != top_event:\n            raise RuntimeError(\"Log level is COMPILATION_METRIC, but event_name isn't the toplevel event. CompilationMetrics must be logged to the toplevel event. Consider using `log_toplevel_event_data` directly.\")\n        metrics_context = get_metrics_context()\n        if not metrics_context.in_progress():\n            raise RuntimeError('No metrics context is in progress. Please only call this function within a metrics context.')\n        metrics_context.update(metadata, overwrite)\n        chromium_log.add_event_data(event_name, **metadata)",
    "docstring": "Centralized API for adding data to various events Log an event to a toplevel \"dynamo\" event or metrics context depending on log level.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\utils.py",
    "ast_data": "FunctionDef name:add_data arg:event_name arg:log_level arg:overwrite arguments arg arg arg arg Assign Call Assign Call If Compare Call If Compare Assign Call If Compare Raise Call Call Compare Assign Call If Compare Raise Call Assign Call If Call Raise Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "extract_shape_from_varargs",
    "source_code": "def extract_shape_from_varargs(shape: Union[ShapeType, tuple[ShapeType]], validate=True) -> tuple[int, ...]:\n    if len(shape) == 1 and isinstance(shape[0], Sequence):\n        shape = shape[0]\n    if validate:\n        validate_shape(shape)\n    return shape",
    "docstring": "Returns a shape from varargs. In PyTorch, operations that accept shapes often accept them as varargs, like foo(*shape). However a user can pass the shape as a sequence of integers, like this: foo(1, 2, 3) or as a sequence of integers foo((1, 2, 3)) In the first case shape will be a tuple of integers, and in the second case it's a tuple containing a tuple of integers. This validates those inputs and canonicalizes them to a tuple of integers.",
    "type": "function",
    "file_path": "pytorch\\torch\\_prims_common\\__init__.py",
    "ast_data": "FunctionDef name:extract_shape_from_varargs arg:shape arg:validate arguments arg arg If BoolOp Compare Call Call Assign If Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "to_rgb",
    "source_code": "def to_rgb(c):\n    return to_rgba(c)[:3]",
    "docstring": "Convert the :mpltype: *c* to an RGB color tuple. If c has an alpha channel value specified, that is silently dropped.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:to_rgb arg:c arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_legacy_output_types",
    "source_code": "@tf_export(v1=['data.get_output_types'])\ndef get_legacy_output_types(dataset_or_iterator):\n    return nest.map_structure(lambda component_spec: component_spec._to_legacy_output_types(), get_structure(dataset_or_iterator))",
    "docstring": "Returns the output shapes for elements of the input dataset / iterator. Args: dataset_or_iterator: A or . Returns: A (nested) structure of objects matching the structure of dataset / iterator elements and specifying the shape of the individual components. @compatibility(TF2) This is a legacy API for inspecting the type signature of dataset elements. In TF 2, you should use the attribute instead. @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:get_legacy_output_types arg:dataset_or_iterator arguments arg Return return:yes Call arguments arg Call Call Call"
  },
  {
    "library": "scrapy",
    "name": "processProxyResponse",
    "source_code": "def processProxyResponse(self, data: bytes) -> None:\n    assert self._protocol.transport\n    self._connectBuffer += data\n    if b'\\r\\n\\r\\n' not in self._connectBuffer:\n        return\n    self._protocol.dataReceived = self._protocolDataReceived\n    respm = TunnelingTCP4ClientEndpoint._responseMatcher.match(self._connectBuffer)\n    if respm and int(respm.group('status')) == 200:\n        sslOptions = self._contextFactory.creatorForNetloc(self._tunneledHost, self._tunneledPort)\n        self._protocol.transport.startTLS(sslOptions, self._protocolFactory)\n        self._tunnelReadyDeferred.callback(self._protocol)\n    else:\n        extra: Any\n        if respm:\n            extra = {'status': int(respm.group('status')), 'reason': respm.group('reason').strip()}\n        else:\n            extra = data[:self._truncatedLength]\n        self._tunnelReadyDeferred.errback(TunnelError(f'Could not open CONNECT tunnel with proxy {self._host}:{self._port} [{extra!r}]'))",
    "docstring": "Processes the response from the proxy. If the tunnel is successfully created, notifies the client that we are ready to send requests. If not raises a TunnelError.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\downloader\\handlers\\http11.py",
    "ast_data": "FunctionDef name:processProxyResponse arg:self arg:data arguments arg arg If Compare Return return:no Assign Assign Call If BoolOp Compare Call Call Assign Call Call Call If Assign Call Call Call Call Assign Call Call"
  },
  {
    "library": "pandas",
    "name": "check_case_sensitive",
    "source_code": "def check_case_sensitive(self, name: str, schema: str | None) -> None:\n    if not name.isdigit() and (not name.islower()):\n        from sqlalchemy import inspect as sqlalchemy_inspect\n        insp = sqlalchemy_inspect(self.con)\n        table_names = insp.get_table_names(schema=schema or self.meta.schema)\n        if name not in table_names:\n            msg = f\"The provided table name '{name}' is not found exactly as such in the database after writing the table, possibly due to case sensitivity issues. Consider using lower case table names.\"\n            warnings.warn(msg, UserWarning, stacklevel=find_stack_level())",
    "docstring": "Checks table name for issues with case-sensitivity. Method is called after data is inserted.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\sql.py",
    "ast_data": "FunctionDef name:check_case_sensitive arg:self arg:name arg:schema arguments arg arg arg If BoolOp Call Call Assign Call Assign Call BoolOp If Compare Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_master_target",
    "source_code": "def _get_master_target(self):\n    if not self._cluster_spec or self._task_type == _TaskType.EVALUATOR:\n        return ''\n    if not self._task_type:\n        if _TaskType.CHIEF in self._cluster_spec.jobs:\n            task_type = _TaskType.CHIEF\n            task_id = 0\n        else:\n            assert _TaskType.WORKER in self._cluster_spec.jobs\n            task_type = _TaskType.WORKER\n            task_id = 0\n    else:\n        task_type = self._task_type\n        task_id = self._task_id\n    prefix = ''\n    if self._rpc_layer:\n        prefix = self._rpc_layer + '://'\n    return prefix + self._cluster_spec.job_tasks(task_type)[task_id or 0]",
    "docstring": "Return the master target for a task.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distribute_coordinator_utils.py",
    "ast_data": "FunctionDef name:_get_master_target arg:self arguments arg If BoolOp Compare Return return:yes If If Compare Assign Assign Compare Assign Assign Assign Assign Assign If Assign Return return:yes Call BoolOp"
  },
  {
    "library": "django",
    "name": "union",
    "source_code": "def union(self, other):\n    return self._topology(capi.geos_union(self.ptr, other.ptr))",
    "docstring": "Return a Geometry representing all the points in this Geometry and other.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:union arg:self arg:other arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "is_parent_module_of",
    "source_code": "def is_parent_module_of(self, node: _IRNode) -> bool:\n    return node.stack_meta.is_superset_of(self.stack_meta)",
    "docstring": "Determines if this node represents a parent module of the provided node.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\modularization.py",
    "ast_data": "FunctionDef name:is_parent_module_of arg:self arg:node arguments arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "staged_predict",
    "source_code": "def staged_predict(self, X):\n    X = self._check_X(X)\n    n_classes = self.n_classes_\n    classes = self.classes_\n    if n_classes == 2:\n        for pred in self.staged_decision_function(X):\n            yield np.array(classes.take(pred > 0, axis=0))\n    else:\n        for pred in self.staged_decision_function(X):\n            yield np.array(classes.take(np.argmax(pred, axis=1), axis=0))",
    "docstring": "Return staged predictions for X. The predicted class of an input sample is computed as the weighted mean prediction of the classifiers in the ensemble. This generator method yields the ensemble prediction after each iteration of boosting and therefore allows monitoring, such as to determine the prediction on a test set after each boost. Parameters ---------- X : array-like of shape (n_samples, n_features) The input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. COO, DOK, and LIL are converted to CSR. Yields ------ y : generator of ndarray of shape (n_samples,) The predicted classes.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_weight_boosting.py",
    "ast_data": "FunctionDef name:staged_predict arg:self arg:X arguments arg arg Assign Call Assign Assign If Compare For Call Call Call Compare For Call Call Call Call"
  },
  {
    "library": "django",
    "name": "open",
    "source_code": "def open(self):\n    pass",
    "docstring": "Open a network connection. This method can be overwritten by backend implementations to open a network connection. It's up to the backend implementation to track the status of a network connection if it's needed by the backend. This method can be called by applications to force a single network connection to be used when sending mails. See the send_messages() method of the SMTP backend for a reference implementation. The default implementation does nothing.",
    "type": "method",
    "file_path": "django\\django\\core\\mail\\backends\\base.py",
    "ast_data": "FunctionDef name:open arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "metric_variable",
    "source_code": "def metric_variable(shape, dtype, validate_shape=True, name=None):\n    return variable_v1.VariableV1(lambda: array_ops.zeros(shape, dtype), trainable=False, collections=[ops.GraphKeys.LOCAL_VARIABLES, ops.GraphKeys.METRIC_VARIABLES], validate_shape=validate_shape, synchronization=variables.VariableSynchronization.ON_READ, aggregation=variables.VariableAggregation.SUM, name=name)",
    "docstring": "Create variable in collections. If running in a context, the variable will be \"sync on read\". This means: * The returned object will be a container with separate variables per replica of the model. * When writing to the variable, e.g. using in a metric update, the update will be applied to the variable local to the replica. * To get a metric's result value, we need to sum the variable values across the replicas before computing the final answer. Furthermore, the final answer should be computed once instead of in every replica. Both of these are accomplished by running the computation of the final result value inside . Inside the , ops are only added to the graph once and access to a sync on read variable in a computation returns the sum across all replicas. Args: shape: Shape of the created variable. dtype: Type of the created variable. validate_shape: (Optional) Whether shape validation is enabled for the created variable. name: (Optional) String name of the created variable. Returns: A (non-trainable) variable initialized to zero, or if inside a scope a sync on read variable container.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\metrics_impl.py",
    "ast_data": "FunctionDef name:metric_variable arg:shape arg:dtype arg:validate_shape arg:name arguments arg arg arg arg Return return:yes Call arguments Call"
  },
  {
    "library": "pygame",
    "name": "kill",
    "source_code": "def kill(self):\n    for group in self.__g:\n        group.remove_internal(self)\n    self.__g.clear()",
    "docstring": "remove the Sprite from all Groups Sprite.kill(): return None The Sprite is removed from all the Groups that contain it. This won't change anything about the state of the Sprite. It is possible to continue to use the Sprite after this method has been called, including adding it to Groups.",
    "type": "method",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:kill arg:self arguments arg For Call Call"
  },
  {
    "library": "sphinx",
    "name": "find_subsections",
    "source_code": "def find_subsections(section: Element) -> list[nodes.section]:\n    result = []\n    for child in section:\n        if isinstance(child, nodes.section):\n            result.append(child)\n            continue\n        if isinstance(child, nodes.Element):\n            result.extend(find_subsections(child))\n    return result",
    "docstring": "Return a list of subsections for the given ``.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\writers\\texinfo.py",
    "ast_data": "FunctionDef name:find_subsections arg:section arguments arg Assign For If Call Call If Call Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_read_params",
    "source_code": "def _read_params(name, value, non_default_params):\n    r = reprlib.Repr()\n    r.maxlist = 2\n    r.maxtuple = 1\n    r.maxstring = 50\n    cleaned_value = html.escape(r.repr(value))\n    param_type = 'user-set' if name in non_default_params else 'default'\n    return {'param_type': param_type, 'param_name': name, 'param_value': cleaned_value}",
    "docstring": "Categorizes parameters as 'default' or 'user-set' and formats their values. Escapes or truncates parameter values for display safety and readability.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_repr_html\\params.py",
    "ast_data": "FunctionDef name:_read_params arg:name arg:value arg:non_default_params arguments arg arg arg Assign Call Assign Assign Assign Assign Call Call Assign Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_uniquify_fetches",
    "source_code": "def _uniquify_fetches(fetch_mappers):\n    unique_fetches = []\n    value_indices = []\n    seen_fetches = {}\n    for m in fetch_mappers:\n        m_value_indices = []\n        for f in m.unique_fetches():\n            j = seen_fetches.get(id(f))\n            if j is None:\n                j = len(seen_fetches)\n                seen_fetches[id(f)] = j\n                unique_fetches.append(f)\n            m_value_indices.append(j)\n        value_indices.append(m_value_indices)\n    return (unique_fetches, value_indices)",
    "docstring": "Uniquifies fetches from a list of fetch_mappers. This is a utility function used by _ListFetchMapper and _DictFetchMapper. It gathers all the unique fetches from a list of mappers and builds a list containing all of them but without duplicates (unique_fetches). It also returns a 2-D list of integers (values_indices) indicating at which index in unique_fetches the fetches of the mappers are located. This list is as follows: values_indices[mapper_index][mapper_fetch_index] = unique_fetches_index Args: fetch_mappers: list of fetch mappers. Returns: A list of fetches. A 2-D list of integers.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\client\\session.py",
    "ast_data": "FunctionDef name:_uniquify_fetches arg:fetch_mappers arguments arg Assign Assign Assign For Assign For Call Assign Call Call If Compare Assign Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "update_node_id",
    "source_code": "def update_node_id(node: Element) -> None:\n    new_ids: list[str] = []\n    for node_id in node['ids']:\n        new_id = self.fix_fragment('', node_id)\n        if new_id not in new_ids:\n            new_ids.append(new_id)\n    node['ids'] = new_ids",
    "docstring": "Update IDs of given *node*.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\_epub_base.py",
    "ast_data": "FunctionDef name:update_node_id arg:node arguments arg For Assign Call If Compare Call Assign"
  },
  {
    "library": "sphinx",
    "name": "SphinxSmartQuotes",
    "source_code": "class SphinxSmartQuotes(SmartQuotes, SphinxTransform):\n    default_priority = 750\n\n    def apply(self, **kwargs: Any) -> None:\n        if not self.is_available():\n            return\n        self.smartquotes_action = self.config.smartquotes_action\n        super().apply()\n\n    def is_available(self) -> bool:\n        builders = self.config.smartquotes_excludes.get('builders', [])\n        languages = self.config.smartquotes_excludes.get('languages', [])\n        if self.document.settings.smart_quotes is False:\n            return False\n        if self.config.smartquotes is False:\n            return False\n        if self.app.builder.name in builders:\n            return False\n        if self.config.language in languages:\n            return False\n        language = self.env.settings['language_code']\n        return any((tag in smartchars.quotes for tag in normalize_language_tag(language)))\n\n    def get_tokens(self, txtnodes: list[Text]) -> Iterator[tuple[str, str]]:\n        for txtnode in txtnodes:\n            if is_smartquotable(txtnode):\n                text = re.sub('(?<=\\\\x00)([-\\\\\\\\\\\\\\'\".`])', '\\\\\\\\\\\\1', str(txtnode))\n                yield ('plain', text)\n            else:\n                yield ('literal', txtnode.astext())",
    "docstring": "Customized SmartQuotes to avoid transform for some extra node types. refs: sphinx.parsers.RSTParser",
    "type": "class",
    "file_path": "sphinx\\sphinx\\transforms\\__init__.py",
    "ast_data": "ClassDef name:SphinxSmartQuotes Assign FunctionDef name:apply arg:self arguments arg arg If Call Return return:no Assign Call Call FunctionDef name:is_available arg:self arguments arg Assign Call Assign Call If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Assign Return return:yes Call Compare Call FunctionDef name:get_tokens arg:self arg:txtnodes arguments arg arg For If Call Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "from_proto",
    "source_code": "@staticmethod\ndef from_proto(context_def, import_scope=None):\n    ret = WhileContext(context_def=context_def, import_scope=import_scope)\n    ret.Enter()\n    for nested_def in context_def.nested_contexts:\n        from_control_flow_context_def(nested_def, import_scope=import_scope)\n    ret.Exit()\n    return ret",
    "docstring": "Returns a object created from . Args: context_def: A protocol buffer. import_scope: Optional . Name scope to add. Returns: A Python object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:from_proto arg:context_def arg:import_scope arguments arg arg Assign Call Call For Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_name",
    "source_code": "def _maybe_name(obj) -> str:\n    if obj is None:\n        return 'None'\n    elif hasattr(obj, 'name'):\n        return obj.name\n    else:\n        return '<no name for %s>' % type(obj)",
    "docstring": "Returns object name if it has one, or a message otherwise. This is useful for names that apper in error messages. Args: obj: Object to get the name of. Returns: name, \"None\", or a \"no name\" message.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\session_manager.py",
    "ast_data": "FunctionDef name:_maybe_name arg:obj arguments arg If Compare Return return:yes If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "construct_pattern_matcher_pass",
    "source_code": "def construct_pattern_matcher_pass(pass_name: str):\n    if pass_name in PRE_GRAD_PATTERNS:\n        return PRE_GRAD_PATTERNS[pass_name]\n    else:\n        return POST_GRAD_PATTERNS[pass_name]",
    "docstring": "Return the specific pattern_matcher_pass given the pass name.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\split_cat.py",
    "ast_data": "FunctionDef name:construct_pattern_matcher_pass arg:pass_name arguments arg If Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "element_spec",
    "source_code": "@abc.abstractproperty\ndef element_spec(self):\n    raise NotImplementedError('Optional.element_spec')",
    "docstring": "The type specification of an element of this optional. >>> optional = tf.experimental.Optional.from_value(42) >>> print(optional.element_spec) tf.TensorSpec(shape=(), dtype=tf.int32, name=None) Returns: A (nested) structure of objects matching the structure of an element of this optional, specifying the type of individual components.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\optional_ops.py",
    "ast_data": "FunctionDef name:element_spec arg:self arguments arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "InternalTracingContext",
    "source_code": "class InternalTracingContext(trace.TracingContext):\n\n    def __init__(self, is_legacy_signature: bool=False):\n        self._global_to_local_id = {}\n        self._alias_id_to_placeholder = {}\n        self._is_legacy_signature = is_legacy_signature\n\n    def alias_global_id(self, global_id: Hashable) -> Hashable:\n        if global_id not in self._global_to_local_id:\n            self._global_to_local_id[global_id] = len(self._global_to_local_id)\n        return self._global_to_local_id[global_id]\n\n    def add_placeholder(self, alias_id: Hashable, variable) -> None:\n        self._alias_id_to_placeholder[alias_id] = variable\n\n    def get_placeholder_mapping(self) -> Dict[Hashable, Any]:\n        return self._alias_id_to_placeholder\n\n    @property\n    def is_legacy_signature(self) -> bool:\n        return self._is_legacy_signature",
    "docstring": "Container for variables and flags shared across TraceType generation.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\core\\function\\trace_type\\trace_type_builder.py",
    "ast_data": "ClassDef name:InternalTracingContext FunctionDef name:__init__ arg:self arg:is_legacy_signature arguments arg arg Assign Assign Assign FunctionDef name:alias_global_id arg:self arg:global_id arguments arg arg If Compare Assign Call Return return:yes FunctionDef name:add_placeholder arg:self arg:alias_id arg:variable arguments arg arg arg Assign FunctionDef name:get_placeholder_mapping arg:self arguments arg Return return:yes FunctionDef name:is_legacy_signature arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_FetchMapper",
    "source_code": "class _FetchMapper(object):\n\n    def unique_fetches(self):\n        raise NotImplementedError('unique_fetches must be implemented by subclasses')\n\n    def build_results(self, values):\n        raise NotImplementedError('build_results must be implemented by subclasses')\n\n    @staticmethod\n    def for_fetch(fetch):\n        if fetch is None:\n            raise TypeError(f'Argument `fetch` = {fetch} has invalid type \"{type(fetch).__name__}\". Cannot be None')\n        elif isinstance(fetch, (list, tuple)):\n            return _ListFetchMapper(fetch)\n        elif isinstance(fetch, collections_abc.Mapping):\n            return _DictFetchMapper(fetch)\n        elif _is_attrs_instance(fetch):\n            return _AttrsFetchMapper(fetch)\n        else:\n            for tensor_type, fetch_fn, _, _ in _REGISTERED_EXPANSIONS:\n                if isinstance(fetch, tensor_type):\n                    fetches, contraction_fn = fetch_fn(fetch)\n                    return _ElementFetchMapper(fetches, contraction_fn)\n        raise TypeError(f'Argument `fetch` = {fetch} has invalid type \"{type(fetch).__name__}\"')",
    "docstring": "Definition of the interface provided by fetch mappers. Fetch mappers are utility classes used by the _FetchHandler to handle arbitrary structures for the argument to . The argument can be of various shapes: single tensor or op, list of fetches, tuple of fetches, namedtuple of fetches, or dict of fetches. The structures can be arbitrarily nested. The low level run() API only wants a list of tensor or op names. The various subclasses below take care of handling the different shapes: uniquifying the fetches, and constructing results with the original shape.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\client\\session.py",
    "ast_data": "ClassDef name:_FetchMapper FunctionDef name:unique_fetches arg:self arguments arg Raise Call FunctionDef name:build_results arg:self arg:values arguments arg arg Raise Call FunctionDef name:for_fetch arg:fetch arguments arg If Compare Raise Call Call If Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Call For If Call Assign Call Return return:yes Call Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "keystr",
    "source_code": "def keystr(kp: KeyPath) -> str:\n    raise NotImplementedError('KeyPaths are not yet supported in cxx_pytree.')",
    "docstring": "Given a key path, return a pretty-printed representation.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_cxx_pytree.py",
    "ast_data": "FunctionDef name:keystr arg:kp arguments arg Raise Call"
  },
  {
    "library": "numpy",
    "name": "_hist_bin_doane",
    "source_code": "def _hist_bin_doane(x, range):\n    del range\n    if x.size > 2:\n        sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3)))\n        sigma = np.std(x)\n        if sigma > 0.0:\n            temp = x - np.mean(x)\n            np.true_divide(temp, sigma, temp)\n            np.power(temp, 3, temp)\n            g1 = np.mean(temp)\n            return _ptp(x) / (1.0 + np.log2(x.size) + np.log2(1.0 + np.absolute(g1) / sg1))\n    return 0.0",
    "docstring": "Doane's histogram bin estimator. Improved version of Sturges' formula which works better for non-normal data. See stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. Returns ------- h : An estimate of the optimal bin width for the given data.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_histograms_impl.py",
    "ast_data": "FunctionDef name:_hist_bin_doane arg:x arg:range arguments arg arg If Compare Assign Call Assign Call If Compare Assign Call Call Call Assign Call Return return:yes Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "calculate_gain",
    "source_code": "def calculate_gain(nonlinearity, param=None):\n    linear_fns = ['linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose1d', 'conv_transpose2d', 'conv_transpose3d']\n    if nonlinearity in linear_fns or nonlinearity == 'sigmoid':\n        return 1\n    elif nonlinearity == 'tanh':\n        return 5.0 / 3\n    elif nonlinearity == 'relu':\n        return math.sqrt(2.0)\n    elif nonlinearity == 'leaky_relu':\n        if param is None:\n            negative_slope = 0.01\n        elif not isinstance(param, bool) and isinstance(param, int) or isinstance(param, float):\n            negative_slope = param\n        else:\n            raise ValueError(f'negative_slope {param} not a valid number')\n        return math.sqrt(2.0 / (1 + negative_slope ** 2))\n    elif nonlinearity == 'selu':\n        return 3.0 / 4\n    else:\n        raise ValueError(f'Unsupported nonlinearity {nonlinearity}')",
    "docstring": "Return the recommended gain value for the given nonlinearity function. The values are as follows: ================= ==================================================== nonlinearity gain ================= ==================================================== Linear / Identity :math: Conv{1,2,3}D :math: Sigmoid :math: Tanh :math: ReLU :math: Leaky Relu :math: SELU :math: ================= ==================================================== .. warning:: In order to implement _ , you should use `nn.functional` name) param: optional parameter for the non-linear function Examples: >>> gain = nn.init.calculate_gain('leaky_relu', 0.2) # leaky_relu with negative_slope=0.2 .. _Self-Normalizing Neural Networks:",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\init.py",
    "ast_data": "FunctionDef name:calculate_gain arg:nonlinearity arg:param arguments arg arg Assign If BoolOp Compare Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Call If Compare If Compare Assign If BoolOp BoolOp Call Call Call Assign Raise Call Return return:yes Call If Compare Return return:yes Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "IsSwitch",
    "source_code": "def IsSwitch(op):\n    return op.type == 'Switch' or op.type == 'RefSwitch'",
    "docstring": "Return true if is a Switch.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_util.py",
    "ast_data": "FunctionDef name:IsSwitch arg:op arguments arg Return return:yes BoolOp Compare Compare"
  },
  {
    "library": "matplotlib",
    "name": "show",
    "source_code": "def show(self):\n    if sys.platform == 'linux' and (not os.environ.get('DISPLAY')):\n        return\n    raise NonGuiException(f'{type(self.canvas).__name__} is non-interactive, and thus cannot be shown')",
    "docstring": "For GUI backends, show the figure window and redraw. For non-GUI backends, raise an exception, unless running headless (i.e. on Linux with an unset DISPLAY); this exception is converted to a warning in .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:show arg:self arguments arg If BoolOp Compare Call Return return:no Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_first_layer_gather_index",
    "source_code": "def _first_layer_gather_index(nrows_source, nrows_target):\n\n    def gi_broadcast_first():\n        return array_ops.zeros(nrows_target, dtype=nrows_target.dtype)\n\n    def gi_no_broadcast_first():\n        gather_index = math_ops.range(nrows_target, dtype=nrows_target.dtype)\n        return gather_index\n    do_broadcast = math_ops.equal(nrows_source, constant_op.constant(1, nrows_source.dtype))\n    nrows_equal = math_ops.equal(nrows_source, nrows_target)\n    can_broadcast = check_ops.assert_equal(math_ops.logical_or(do_broadcast, nrows_equal), True, message='Cannot broadcast')\n    gather_index = cond.cond(do_broadcast, true_fn=gi_broadcast_first, false_fn=gi_no_broadcast_first)\n    return control_flow_ops.with_dependencies([can_broadcast], gather_index)",
    "docstring": "Return the first layer gather_index. Args: nrows_source: the number of rows in the source. nrows_target: the number of rows in the target. Returns: A tensor, usable as a gather_index for a _LayerBroadcaster.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:_first_layer_gather_index arg:nrows_source arg:nrows_target arguments arg arg FunctionDef name:gi_broadcast_first arguments Return return:yes Call FunctionDef name:gi_no_broadcast_first arguments Assign Call Return return:yes Assign Call Call Assign Call Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_size_to_string",
    "source_code": "@staticmethod\ndef _size_to_string(num_bytes):\n    LOG2_STEP = 10\n    STEP = 1024\n    units = ['bytes', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB']\n    unit_i = max(num_bytes.bit_length() - 1, 1) // LOG2_STEP\n    unit_val = 1 << unit_i * LOG2_STEP\n    n_units = num_bytes / unit_val\n    del unit_val\n    if round(n_units) == STEP:\n        unit_i += 1\n        n_units /= STEP\n    if unit_i >= len(units):\n        new_unit_i = len(units) - 1\n        n_units *= 1 << (unit_i - new_unit_i) * LOG2_STEP\n        unit_i = new_unit_i\n    unit_name = units[unit_i]\n    if unit_i == 0:\n        return f'{n_units:.0f} {unit_name}'\n    elif round(n_units) < 1000:\n        return f'{n_units:#.3g} {unit_name}'\n    else:\n        return f'{n_units:#.0f} {unit_name}'",
    "docstring": "Convert a number of bytes into a binary size string",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\_exceptions.py",
    "ast_data": "FunctionDef name:_size_to_string arg:num_bytes arguments arg Assign Assign Assign Assign Call Call Assign Assign If Compare Call If Compare Call Assign Call Assign Assign If Compare Return return:yes If Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_shared_object_loading_scope",
    "source_code": "def _shared_object_loading_scope():\n    return getattr(SHARED_OBJECT_LOADING, 'scope', NoopLoadingScope())",
    "docstring": "Get the current shared object saving scope in a threadsafe manner.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\generic_utils.py",
    "ast_data": "FunctionDef name:_shared_object_loading_scope arguments Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "__call__",
    "source_code": "def __call__(self, x, pos: int | None=0) -> str:\n    fmt = '%H:%M:%S.%f'\n    s = int(x)\n    msus = round((x - s) * 10 ** 6)\n    ms = msus // 1000\n    us = msus % 1000\n    m, s = divmod(s, 60)\n    h, m = divmod(m, 60)\n    _, h = divmod(h, 24)\n    if us != 0:\n        return pydt.time(h, m, s, msus).strftime(fmt)\n    elif ms != 0:\n        return pydt.time(h, m, s, msus).strftime(fmt)[:-3]\n    elif s != 0:\n        return pydt.time(h, m, s).strftime('%H:%M:%S')\n    return pydt.time(h, m).strftime('%H:%M')",
    "docstring": "Return the time of day as a formatted string. Parameters ---------- x : float The time of day specified as seconds since 00:00 (midnight), with up to microsecond precision. pos Unused Returns ------- str A string in HH:MM:SS.mmmuuu format. Microseconds, milliseconds and seconds are only displayed if non-zero.",
    "type": "method",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\converter.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:x arg:pos arguments arg arg arg Assign Assign Call Assign Call Assign Assign Assign Call Assign Call Assign Call If Compare Return return:yes Call Call If Compare Return return:yes Call Call If Compare Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "AbsoluteError",
    "source_code": "class AbsoluteError(BaseLoss):\n    differentiable = False\n    need_update_leaves_values = True\n\n    def __init__(self, sample_weight=None):\n        super().__init__(closs=CyAbsoluteError(), link=IdentityLink())\n        self.approx_hessian = True\n        self.constant_hessian = sample_weight is None\n\n    def fit_intercept_only(self, y_true, sample_weight=None):\n        if sample_weight is None:\n            return np.median(y_true, axis=0)\n        else:\n            return _weighted_percentile(y_true, sample_weight, 50)",
    "docstring": "Absolute error with identity link, for regression. Domain: y_true and y_pred all real numbers Link: y_pred = raw_prediction For a given sample x_i, the absolute error is defined as:: loss(x_i) = |y_true_i - raw_prediction_i| Note that the exact hessian = 0 almost everywhere (except at one point, therefore differentiable = False). Optimization routines like in HGBT, however, need a hessian > 0. Therefore, we assign 1.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\_loss\\loss.py",
    "ast_data": "ClassDef name:AbsoluteError Assign Assign FunctionDef name:__init__ arg:self arg:sample_weight arguments arg arg Call Call Call Call Assign Assign Compare FunctionDef name:fit_intercept_only arg:self arg:y_true arg:sample_weight arguments arg arg arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_check_X",
    "source_code": "def _check_X(self, X):\n    return validate_data(self, X, reset=False)",
    "docstring": "Validate X, used only in predict* methods.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\naive_bayes.py",
    "ast_data": "FunctionDef name:_check_X arg:self arg:X arguments arg arg Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "clean_up",
    "source_code": "def clean_up(self):\n    pass",
    "docstring": "Clean up expired sessions.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\sessions.py",
    "ast_data": "FunctionDef name:clean_up arg:self arguments arg"
  },
  {
    "library": "numpy",
    "name": "DummyArray",
    "source_code": "class DummyArray:\n\n    def __init__(self, interface, base=None):\n        self.__array_interface__ = interface\n        self.base = base",
    "docstring": "Dummy object that just exists to hang __array_interface__ dictionaries and possibly keep alive a reference to a base array.",
    "type": "class",
    "file_path": "numpy\\numpy\\lib\\_stride_tricks_impl.py",
    "ast_data": "ClassDef name:DummyArray FunctionDef name:__init__ arg:self arg:interface arg:base arguments arg arg arg Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "create",
    "source_code": "def create(self, nodes, namer, inner_factory_name='inner_factory', outer_factory_name='outer_factory', future_features=()):\n    if self._unbound_factory is not None:\n        raise ValueError('double initialization; create a new object instead')\n    inner_factory_name = namer.new_symbol(inner_factory_name, ())\n    outer_factory_name = namer.new_symbol(outer_factory_name, ())\n    nodes = _wrap_into_factory(nodes, self._name, inner_factory_name, outer_factory_name, self._freevars, self._extra_locals.keys(), future_features)\n    module, _, source_map = loader.load_ast(nodes, include_source_map=True)\n    outer_factory = getattr(module, outer_factory_name)\n    self._unbound_factory = outer_factory()\n    self.module = module\n    self.source_map = source_map",
    "docstring": "Initializes a function.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\transpiler.py",
    "ast_data": "FunctionDef name:create arg:self arg:nodes arg:namer arg:inner_factory_name arg:outer_factory_name arg:future_features arguments arg arg arg arg arg arg If Compare Raise Call Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Assign Assign"
  },
  {
    "library": "django",
    "name": "templatetag",
    "source_code": "@register.tag\ndef templatetag(parser, token):\n    bits = token.contents.split()\n    if len(bits) != 2:\n        raise TemplateSyntaxError(\"'templatetag' statement takes one argument\")\n    tag = bits[1]\n    if tag not in TemplateTagNode.mapping:\n        raise TemplateSyntaxError(\"Invalid templatetag argument: '%s'. Must be one of: %s\" % (tag, list(TemplateTagNode.mapping)))\n    return TemplateTagNode(tag)",
    "docstring": "Output one of the bits used to compose template tags. Since the template system has no concept of \"escaping\", to display one of the bits used in template tags, you must use the `` ================== =======",
    "type": "function",
    "file_path": "django\\django\\template\\defaulttags.py",
    "ast_data": "FunctionDef name:templatetag arg:parser arg:token arguments arg arg Assign Call If Compare Call Raise Call Assign If Compare Raise Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_find_header",
    "source_code": "def _find_header(base_paths, header_name, required_version, get_version):\n    return _find_versioned_file(base_paths, _header_paths(), header_name, required_version, get_version)",
    "docstring": "Returns first valid path to a header that matches the requested version.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\third_party\\gpus\\find_cuda_config.py",
    "ast_data": "FunctionDef name:_find_header arg:base_paths arg:header_name arg:required_version arg:get_version arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "name_scope",
    "source_code": "def name_scope(name, default_name=None, values=None, skip_on_eager=True) -> ContextManager[Optional[str]]:\n    if not context.executing_eagerly():\n        return internal_name_scope_v1(name, default_name, values)\n    if skip_on_eager:\n        return NullContextmanager()\n    name = default_name if name is None else name\n    if values:\n        graph_value = next((value for value in values if is_symbolic_tensor(value)), None)\n        if graph_value is not None:\n            return graph_value.graph.name_scope(name)\n    return name_scope_v2(name or '')",
    "docstring": "Internal-only entry point for . Internal ops do not use the public API and instead rely on regardless of the execution mode. This function dispatches to the correct implementation based on the arguments provided and the current mode. Specifically, * if contains a graph tensor is used; * is used in graph mode; * -- in eager mode. Args: name: The name argument that is passed to the op function. default_name: The default name to use if the argument is . values: The list of arguments that are passed to the op function. skip_on_eager: Indicates to return NullContextmanager if executing eagerly. By default this is True since naming tensors and operations in eager mode have little use and cause unnecessary performance overhead. However, it is important to preserve variable names since they are often useful for debugging and saved models. Returns: context manager.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:name_scope arg:name arg:default_name arg:values arg:skip_on_eager arguments arg arg arg arg If Call Return return:yes Call If Return return:yes Call Assign Compare If Assign Call Call If Compare Return return:yes Call Return return:yes Call BoolOp"
  },
  {
    "library": "pytorch",
    "name": "get_rdeps",
    "source_code": "def get_rdeps(self, module_name: str) -> list[str]:\n    if module_name in self.dependency_graph._pred.keys():\n        return list(self.dependency_graph._pred[module_name].keys())\n    else:\n        return []",
    "docstring": "Return a list of all modules which depend on the module ``.",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\package_exporter.py",
    "ast_data": "FunctionDef name:get_rdeps arg:self arg:module_name arguments arg arg If Compare Call Return return:yes Call Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "initializer",
    "source_code": "@property\ndef initializer(self) -> ops.Operation:\n    return self._initializer_op",
    "docstring": "The initializer operation for this variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py",
    "ast_data": "FunctionDef name:initializer arg:self arguments arg Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "NO_CALLBACK",
    "source_code": "def NO_CALLBACK(*args: Any, **kwargs: Any) -> NoReturn:\n    raise RuntimeError('The NO_CALLBACK callback has been called. This is a special callback value intended for requests whose callback is never meant to be called.')",
    "docstring": "When assigned to the `~scrapy.Requestcomponents scrapy.core.engine.ExecutionEngine.download~scrapy.Spider.parse` callback.",
    "type": "function",
    "file_path": "scrapy\\scrapy\\http\\request\\__init__.py",
    "ast_data": "FunctionDef name:NO_CALLBACK arguments arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "inside_function",
    "source_code": "@tf_export('inside_function', v1=[])\ndef inside_function() -> bool:\n    return get_default_graph().building_function",
    "docstring": "Indicates whether the caller code is executing inside a . Returns: Boolean, True if the caller code is executing inside a rather than eagerly. Example: >>> tf.inside_function() False >>> @tf.function ... def f(): ... print(tf.inside_function()) >>> f() True",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:inside_function arguments Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_size_of_all_nodes",
    "source_code": "@compatibility(is_backward_compatible=False)\ndef get_size_of_all_nodes(fx_module: GraphModule, args: Optional[list[torch.Tensor]]=None) -> None:\n    if args is not None:\n        ShapeProp(fx_module).propagate(*args)\n    for node in fx_module.graph.nodes:\n        if node.op == 'output':\n            break\n        node.size_bytes = get_size_of_node(fx_module, node)\n    return",
    "docstring": "Given a fx graph module, update each node with its total size (weights + bias + output) and its output_size(output). For a non-module node, the total size is the output size. return total size",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\passes\\graph_manipulation.py",
    "ast_data": "FunctionDef name:get_size_of_all_nodes arg:fx_module arg:args arguments arg arg If Compare Call Call For If Compare Assign Call Return return:no Call"
  },
  {
    "library": "scikit-learn",
    "name": "Parallel",
    "source_code": "class Parallel(joblib.Parallel):\n\n    def __call__(self, iterable):\n        config = get_config()\n        warning_filters = warnings.filters\n        iterable_with_config_and_warning_filters = ((_with_config_and_warning_filters(delayed_func, config, warning_filters), args, kwargs) for delayed_func, args, kwargs in iterable)\n        return super().__call__(iterable_with_config_and_warning_filters)",
    "docstring": "Tweak of :class: that propagates the scikit-learn configuration. This subclass of :class: ensures that the active configuration (thread-local) of scikit-learn is propagated to the parallel workers for the duration of the execution of the parallel tasks. The API does not change and you can refer to :class: documentation for more details. .. versionadded:: 1.3",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\utils\\parallel.py",
    "ast_data": "ClassDef name:Parallel FunctionDef name:__call__ arg:self arg:iterable arguments arg arg Assign Call Assign Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, usr_config, req_file):\n    self.usr_config = usr_config\n    self.req_file = req_file\n    self.warning_msg = []\n    self.error_msg = []\n    reqs_all = self.get_all_reqs()\n    self.required = reqs_all['required']\n    self.optional = reqs_all['optional']\n    self.unsupported = reqs_all['unsupported']\n    self.dependency = reqs_all['dependency']\n    self.successes = []\n    self.failures = []",
    "docstring": "Initializes a configuration compatibility checker. Args: usr_config: Dict of all configuration(s) whose version compatibilities are to be checked against the rules defined in the config file. req_file: String that is the full name of the config file. e.g.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\tools\\tensorflow_builder\\compat_checker\\compat_checker.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:usr_config arg:req_file arguments arg arg arg Assign Assign Assign Assign Assign Call Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "_unregister_create_node_hook",
    "source_code": "def _unregister_create_node_hook(self, f):\n    assert callable(f), 'create_node hook must be a callable.'\n    self._create_node_hooks.remove(f)",
    "docstring": "Takes a callable which was previously registered to be called after we create a node. This function will unregister that callable so it is no longer invoked on node creation.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\graph_module.py",
    "ast_data": "FunctionDef name:_unregister_create_node_hook arg:self arg:f arguments arg arg Call Call"
  },
  {
    "library": "numpy",
    "name": "keys",
    "source_code": "def keys(self):\n    return Mapping.keys(self)",
    "docstring": "D.keys() returns a set-like object providing a view on the keys",
    "type": "method",
    "file_path": "numpy\\numpy\\lib\\_npyio_impl.py",
    "ast_data": "FunctionDef name:keys arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "print_image",
    "source_code": "def print_image(image: Union[str, Tensor], max_width: int=96) -> None:\n    if isinstance(image, str):\n        img = kornia.io.load_image(image, ImageLoadType.RGB8)\n    elif isinstance(image, Tensor):\n        img = image\n    else:\n        raise RuntimeError(f'Expect image type to be either Tensor or str. Got {type(image)}.')\n    print(image_to_string(img, max_width))",
    "docstring": "Print an image to the terminal. .. image:: Args: image: path to a valid image file or a tensor. max_width: maximum width to print to terminal. Note: Need to use .",
    "type": "function",
    "file_path": "kornia\\kornia\\utils\\image_print.py",
    "ast_data": "FunctionDef name:print_image arg:image arg:max_width arguments arg arg If Call Assign Call If Call Assign Raise Call Call Call Call"
  },
  {
    "library": "cherrypy",
    "name": "index",
    "source_code": "@cherrypy.expose\ndef index(self):\n    return self.header() + '\\n            <p>\\n            Isn\\'t this exciting? There\\'s\\n            <a href=\"./another/\">another page</a>, too!\\n            </p>\\n        ' + self.footer()",
    "docstring": "Produce HTTP response body of home page app index URI.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\tutorial\\tut05_derived_objects.py",
    "ast_data": "FunctionDef name:index arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "TermValue",
    "source_code": "class TermValue:\n\n    def __init__(self, value, converted, kind: str) -> None:\n        assert isinstance(kind, str), kind\n        self.value = value\n        self.converted = converted\n        self.kind = kind\n\n    def tostring(self, encoding) -> str:\n        if self.kind == 'string':\n            if encoding is not None:\n                return str(self.converted)\n            return f'\"{self.converted}\"'\n        elif self.kind == 'float':\n            return repr(self.converted)\n        return str(self.converted)",
    "docstring": "hold a term value the we use to construct a condition/filter",
    "type": "class",
    "file_path": "pandas\\pandas\\core\\computation\\pytables.py",
    "ast_data": "ClassDef name:TermValue FunctionDef name:__init__ arg:self arg:value arg:converted arg:kind arguments arg arg arg arg Call Assign Assign Assign FunctionDef name:tostring arg:self arg:encoding arguments arg arg If Compare If Compare Return return:yes Call Return return:yes If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "score",
    "source_code": "def score(self, X, y, sample_weight=None):\n    return super().score(X, y, sample_weight)",
    "docstring": "Return the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters ---------- X : array-like of shape (n_samples, n_features), or None Test samples. If , predictions for all indexed points are used; in this case, points are not considered their own neighbors. This means that implicitly performs a leave-one-out cross-validation procedure and is equivalent to but typically much faster. y : array-like of shape (n_samples,) or (n_samples, n_outputs) True labels for . sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- score : float Mean accuracy of `y`.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neighbors\\_classification.py",
    "ast_data": "FunctionDef name:score arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "AlterUniqueTogether",
    "source_code": "class AlterUniqueTogether(AlterTogetherOptionOperation):\n    option_name = 'unique_together'\n\n    def __init__(self, name, unique_together):\n        super().__init__(name, unique_together)",
    "docstring": "Change the value of unique_together to the target one. Input value of unique_together must be a set of tuples.",
    "type": "class",
    "file_path": "django\\django\\db\\migrations\\operations\\models.py",
    "ast_data": "ClassDef name:AlterUniqueTogether Assign FunctionDef name:__init__ arg:self arg:name arg:unique_together arguments arg arg arg Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_metadata_routing",
    "source_code": "def get_metadata_routing(self):\n    router = MetadataRouter(owner=self.__class__.__name__)\n    router.add(estimator=self.estimator, method_mapping=MethodMapping().add(callee='fit', caller='fit').add(callee='score', caller='fit').add(callee='predict', caller='predict').add(callee='predict_proba', caller='predict_proba').add(callee='decision_function', caller='decision_function').add(callee='predict_log_proba', caller='predict_log_proba').add(callee='score', caller='score'))\n    return router",
    "docstring": "Get metadata routing of this object. Please check :ref: on how the routing mechanism works. .. versionadded:: 1.6 Returns ------- routing : MetadataRouter A :class: encapsulating routing information.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\semi_supervised\\_self_training.py",
    "ast_data": "FunctionDef name:get_metadata_routing arg:self arguments arg Assign Call Call Call Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "sctype2char",
    "source_code": "@set_module('numpy')\ndef sctype2char(sctype):\n    sctype = obj2sctype(sctype)\n    if sctype is None:\n        raise ValueError('unrecognized type')\n    if sctype not in sctypeDict.values():\n        raise KeyError(sctype)\n    return dtype(sctype).char",
    "docstring": "Return the string representation of a scalar dtype. Parameters ---------- sctype : scalar dtype or object If a scalar dtype, the corresponding string character is returned. If an object, tries to infer its scalar type and then return the corresponding string character. Returns ------- typechar : str The string character corresponding to the scalar type. Raises ------ ValueError If is an object for which the type can not be inferred. See Also -------- obj2sctype, issctype, issubsctype, mintypecode Examples -------- >>> from numpy._core.numerictypes import sctype2char >>> for sctype in [np.int32, np.double, np.cdouble, np.bytes_, np.ndarray]: ... print(sctype2char(sctype)) l # may vary d D S O >>> x = np.array([1., 2-1.j]) >>> sctype2char(x) 'D' >>> sctype2char(list) 'O'",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\numerictypes.py",
    "ast_data": "FunctionDef name:sctype2char arg:sctype arguments arg Assign Call If Compare Raise Call If Compare Call Raise Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_reset_parameters",
    "source_code": "def _reset_parameters(self):\n    for p in self.parameters():\n        if p.dim() > 1:\n            xavier_uniform_(p)",
    "docstring": "Initiate parameters in the transformer model.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\transformer.py",
    "ast_data": "FunctionDef name:_reset_parameters arg:self arguments arg For Call If Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "prepare_obs_or_fq_callback",
    "source_code": "def prepare_obs_or_fq_callback(self, model: torch.fx.GraphModule, edge_or_node_to_obs_or_fq: dict[EdgeOrNode, ObserverOrFakeQuantize]) -> None:\n    return",
    "docstring": "A callback that will be called after the observers or fake quants are created for each sharing group, but before they are inserted into the graph. The callback can be used to make final quantization adjustments, such as enforcing specific scale and zero point on model input or output. Args: * : the graph module being prepared. * : a dictionary mapping each annotated edge and node to the corresponding observer or fake quant object. Note that multiple edges and/or nodes can map to the same observer / fake quant instance if they were annotated with SharedQuantizationSpec. This dictionary can be modified by the callback.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\quantizer.py",
    "ast_data": "FunctionDef name:prepare_obs_or_fq_callback arg:self arg:model arg:edge_or_node_to_obs_or_fq arguments arg arg arg Return return:no"
  },
  {
    "library": "kornia",
    "name": "random",
    "source_code": "@classmethod\ndef random(cls, batch_size: Optional[int]=None, device: Optional[Device]=None, dtype: Optional[Dtype]=None) -> So3:\n    return cls(Quaternion.random(batch_size, device, dtype))",
    "docstring": "Create a So3 group representing a random rotation. Args: batch_size: the batch size of the underlying data. device: device to place the result on. dtype: dtype of the result. Example: >>> s = So3.random() >>> s = So3.random(batch_size=3)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\so3.py",
    "ast_data": "FunctionDef name:random arg:cls arg:batch_size arg:device arg:dtype arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_box_values",
    "source_code": "def _box_values(self, values) -> np.ndarray:\n    return lib.map_infer(values, self._box_func, convert=False)",
    "docstring": "apply box func to passed values",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py",
    "ast_data": "FunctionDef name:_box_values arg:self arg:values arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "FixedLossScale",
    "source_code": "@deprecation.deprecated_endpoints('mixed_precision.experimental.FixedLossScale', 'train.experimental.FixedLossScale')\n@tf_export(v1=['mixed_precision.FixedLossScale', 'mixed_precision.experimental.FixedLossScale', 'train.experimental.FixedLossScale'])\nclass FixedLossScale(LossScale):\n\n    @deprecation.deprecated(None, 'Use tf.keras.mixed_precision.LossScaleOptimizer instead. LossScaleOptimizer now has all the functionality of FixedLossScale')\n    def __init__(self, loss_scale_value):\n        super(FixedLossScale, self).__init__()\n        if not isinstance(loss_scale_value, (int, float)):\n            raise ValueError('loss_scale_value must be a Python int or float.')\n        if loss_scale_value < 1:\n            raise ValueError('loss_scale_value must be at least 1.')\n        self._loss_scale_value = float(loss_scale_value)\n\n    def __call__(self):\n        return ops.convert_to_tensor(self._loss_scale_value)\n\n    def update(self, grads):\n        del grads\n        return (control_flow_ops.no_op(), True)\n\n    def __repr__(self):\n        return 'FixedLossScale(%s)' % self._loss_scale_value\n\n    def get_config(self):\n        return {'loss_scale_value': self._loss_scale_value}",
    "docstring": "Loss scale with a fixed value. The loss scale is not updated for the lifetime of instances of this class. A given instance of this class always returns the same number when called.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\training\\experimental\\loss_scale.py",
    "ast_data": "ClassDef name:FixedLossScale FunctionDef name:__init__ arg:self arg:loss_scale_value arguments arg arg Call Call If Call Raise Call If Compare Raise Call Assign Call Call FunctionDef name:__call__ arg:self arguments arg Return return:yes Call FunctionDef name:update arg:self arg:grads arguments arg arg Return return:yes Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:get_config arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "benchmark",
    "source_code": "@ignore_warnings\ndef benchmark(metrics=tuple((v for k, v in sorted(METRICS.items()))), formats=tuple((v for k, v in sorted(FORMATS.items()))), samples=1000, classes=4, density=0.2, n_times=5):\n    metrics = np.atleast_1d(metrics)\n    samples = np.atleast_1d(samples)\n    classes = np.atleast_1d(classes)\n    density = np.atleast_1d(density)\n    formats = np.atleast_1d(formats)\n    out = np.zeros((len(metrics), len(formats), len(samples), len(classes), len(density)), dtype=float)\n    it = itertools.product(samples, classes, density)\n    for i, (s, c, d) in enumerate(it):\n        _, y_true = make_multilabel_classification(n_samples=s, n_features=1, n_classes=c, n_labels=d * c, random_state=42)\n        _, y_pred = make_multilabel_classification(n_samples=s, n_features=1, n_classes=c, n_labels=d * c, random_state=84)\n        for j, f in enumerate(formats):\n            f_true = f(y_true)\n            f_pred = f(y_pred)\n            for k, metric in enumerate(metrics):\n                t = timeit(partial(metric, f_true, f_pred), number=n_times)\n                out[k, j].flat[i] = t\n    return out",
    "docstring": "Times metric calculations for a number of inputs Parameters ---------- metrics : array-like of callables (1d or 0d) The metric functions to time. formats : array-like of callables (1d or 0d) These may transform a dense indicator matrix into multilabel representation. samples : array-like of ints (1d or 0d) The number of samples to generate as input. classes : array-like of ints (1d or 0d) The number of classes in the input. density : array-like of ints (1d or 0d) The density of positive labels in the input. n_times : int Time calling the metric n_times times. Returns ------- array of floats shaped like (metrics, formats, samples, classes, density) Time in seconds.",
    "type": "function",
    "file_path": "scikit-learn\\benchmarks\\bench_multilabel_metrics.py",
    "ast_data": "FunctionDef name:benchmark arg:metrics arg:formats arg:samples arg:classes arg:density arg:n_times arguments arg arg arg arg arg arg Call Call Call Call Call Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Call Call Call Call Assign Call For Call Assign Call Assign Call For Call Assign Call Assign Call For Call Assign Call Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "take_data",
    "source_code": "def take_data(self):\n    return self.data",
    "docstring": "return the data",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:take_data arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "save_model_states",
    "source_code": "def save_model_states(state_dict, sparsified_model_dump_path, save_file_name, sparse_block_shape, norm, zip=True):\n    folder_name = os.path.join(sparsified_model_dump_path, str(norm))\n    folder_str = f'config_{sparse_block_shape}'\n    model_state = state_dict['state_dict']\n    model_state_path = os.path.join(folder_name, folder_str, save_file_name)\n    os.makedirs(os.path.dirname(model_state_path), exist_ok=True)\n    torch.save(model_state, model_state_path)\n    if zip:\n        zip_path = model_state_path.replace('.ckpt', '.zip')\n        with ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zip:\n            zip.write(model_state_path, save_file_name)\n        os.remove(model_state_path)\n        model_state_path = zip_path\n    model_state_path = os.path.abspath(model_state_path)\n    file_size = os.path.getsize(model_state_path)\n    file_size = file_size >> 20\n    return (model_state_path, file_size)",
    "docstring": "Dumps the state_dict() of the model. Args: state_dict (Dict) The state_dict() as dumped by dlrm_s_pytorch.py. Only the model state will be extracted from this dictionary. This corresponds to the 'state_dict' key in the state_dict dictionary. >>> model_state = state_dict['state_dict'] save_file_name (str) The filename (not path) when saving the model state dictionary sparse_block_shape (Tuple) The block shape corresponding to the data norm sparsifier. **Used for creating save directory** norm (str) type of norm (L1, L2) for the datanorm sparsifier. **Used for creating save directory** zip (bool) if True, the file is zip-compressed.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\data_sparsifier\\benchmarks\\evaluate_disk_savings.py",
    "ast_data": "FunctionDef name:save_model_states arg:state_dict arg:sparsified_model_dump_path arg:save_file_name arg:sparse_block_shape arg:norm arg:zip arguments arg arg arg arg arg arg Assign Call Call Assign Assign Assign Call Call Call Call If Assign Call With Call Call Call Assign Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "patch",
    "source_code": "def patch(self, arg1: Optional[Union[str, dict[str, Any]]]=None, arg2: Any=None, **kwargs: dict[str, Any]) -> 'ContextDecorator':\n    changes: dict[str, Any]\n    if arg1 is not None:\n        if arg2 is not None:\n            assert isinstance(arg1, str)\n            changes = {arg1: arg2}\n        else:\n            assert isinstance(arg1, dict)\n            changes = arg1\n        assert not kwargs\n    else:\n        changes = kwargs\n        assert arg2 is None\n    assert isinstance(changes, dict), f'expected `dict` got {type(changes)}'\n    prior: dict[str, Any] = {}\n    config = self\n\n    class ConfigPatch(ContextDecorator):\n\n        def __init__(self) -> None:\n            self.changes = changes\n\n        def __enter__(self) -> None:\n            assert not prior\n            for key in self.changes.keys():\n                prior[key] = config.__getattr__(key)\n            for k, v in self.changes.items():\n                config.__setattr__(k, v)\n\n        def __exit__(self, exc_type, exc_val, exc_tb):\n            for k, v in prior.items():\n                config.__setattr__(k, v)\n            prior.clear()\n    return ConfigPatch()",
    "docstring": "Decorator and/or context manager to make temporary changes to a config. As a decorator: @config.patch(\"name\", val) @config.patch(name1=val1, name2=val2) @config.patch({\"name1\": val1, \"name2\", val2}) def foo(...): ... As a context manager: with config.patch(\"name\", val): ...",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\_config_module.py",
    "ast_data": "FunctionDef name:patch arg:self arg:arg1 arg:arg2 arguments arg arg arg arg If Compare If Compare Call Assign Call Assign Assign Compare Call Call Assign ClassDef name:ConfigPatch FunctionDef name:__init__ arg:self arguments arg Assign FunctionDef name:__enter__ arg:self arguments arg For Call Assign Call For Call Call FunctionDef name:__exit__ arg:self arg:exc_type arg:exc_val arg:exc_tb arguments arg arg arg arg For Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_cmake_cache_variables",
    "source_code": "def get_cmake_cache_variables(self) -> dict[str, CMakeValue]:\n    with open(self._cmake_cache_file) as f:\n        return get_cmake_cache_variables_from_file(f)",
    "docstring": "Gets values in CMakeCache.txt into a dictionary. Returns: dict: A `` containing the value of cached CMake variables.",
    "type": "method",
    "file_path": "pytorch\\tools\\setup_helpers\\cmake.py",
    "ast_data": "FunctionDef name:get_cmake_cache_variables arg:self arguments arg With Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "run",
    "source_code": "def run(self, **options):\n    use_reloader = options['use_reloader']\n    if use_reloader:\n        autoreload.run_with_reloader(self.inner_run, **options)\n    else:\n        self.inner_run(None, **options)",
    "docstring": "Run the server, using the autoreloader if needed.",
    "type": "method",
    "file_path": "django\\django\\core\\management\\commands\\runserver.py",
    "ast_data": "FunctionDef name:run arg:self arguments arg arg Assign If Call Call"
  },
  {
    "library": "pytorch",
    "name": "placeholder",
    "source_code": "def placeholder(self, index: int) -> T:\n    raise NotImplementedError",
    "docstring": "This is a fake op used in analysis but not codegen",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ops_handler.py",
    "ast_data": "FunctionDef name:placeholder arg:self arg:index arguments arg arg Raise"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n    X = validate_data(self, X)\n    if self.assume_centered:\n        self.location_ = np.zeros(X.shape[1])\n    else:\n        self.location_ = X.mean(0)\n    covariance, shrinkage = _oas(X - self.location_, assume_centered=True)\n    self.shrinkage_ = shrinkage\n    self._set_covariance(covariance)\n    return self",
    "docstring": "Fit the Oracle Approximating Shrinkage covariance model to X. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns the instance itself.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\covariance\\_shrunk_covariance.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Assign Call If Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_compute_lower_bound",
    "source_code": "def _compute_lower_bound(self, log_resp, log_prob_norm):\n    n_features, = self.mean_prior_.shape\n    log_det_precisions_chol = _compute_log_det_cholesky(self.precisions_cholesky_, self.covariance_type, n_features) - 0.5 * n_features * np.log(self.degrees_of_freedom_)\n    if self.covariance_type == 'tied':\n        log_wishart = self.n_components * np.float64(_log_wishart_norm(self.degrees_of_freedom_, log_det_precisions_chol, n_features))\n    else:\n        log_wishart = np.sum(_log_wishart_norm(self.degrees_of_freedom_, log_det_precisions_chol, n_features))\n    if self.weight_concentration_prior_type == 'dirichlet_process':\n        log_norm_weight = -np.sum(betaln(self.weight_concentration_[0], self.weight_concentration_[1]))\n    else:\n        log_norm_weight = _log_dirichlet_norm(self.weight_concentration_)\n    return -np.sum(np.exp(log_resp) * log_resp) - log_wishart - log_norm_weight - 0.5 * n_features * np.sum(np.log(self.mean_precision_))",
    "docstring": "Estimate the lower bound of the model. The lower bound on the likelihood (of the training data with respect to the model) is used to detect the convergence and has to increase at each iteration. Parameters ---------- X : array-like of shape (n_samples, n_features) log_resp : array, shape (n_samples, n_components) Logarithm of the posterior probabilities (or responsibilities) of the point of each sample in X. log_prob_norm : float Logarithm of the probability of each sample in X. Returns ------- lower_bound : float",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\mixture\\_bayesian_mixture.py",
    "ast_data": "FunctionDef name:_compute_lower_bound arg:self arg:log_resp arg:log_prob_norm arguments arg arg arg Assign Assign Call Call If Compare Assign Call Call Assign Call Call If Compare Assign Call Call Assign Call Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "checkpointable_objects",
    "source_code": "@property\ndef checkpointable_objects(self):\n    return {key: value for key, value in self._object_dict.items() if value is not None}",
    "docstring": "Returns dictionary of all checkpointable objects.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\serialized_attributes.py",
    "ast_data": "FunctionDef name:checkpointable_objects arg:self arguments arg Return return:yes Call Compare"
  },
  {
    "library": "matplotlib",
    "name": "on_submit",
    "source_code": "def on_submit(self, func):\n    return self._observers.connect('submit', lambda text: func(text))",
    "docstring": "When the user hits enter or leaves the submission box, call this *func* with event. A connection id is returned which can be used to disconnect.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:on_submit arg:self arg:func arguments arg arg Return return:yes Call arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "synchronize_staging",
    "source_code": "def synchronize_staging(self) -> None:\n    pass",
    "docstring": "In the case is async in some way, this method should be called to ensure staging is complete and it is safe to begin modifying the original",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\staging.py",
    "ast_data": "FunctionDef name:synchronize_staging arg:self arguments arg"
  },
  {
    "library": "matplotlib",
    "name": "get_ylim",
    "source_code": "def get_ylim(self):\n    return tuple(self.viewLim.intervaly)",
    "docstring": "Return the y-axis view limits. Returns ------- bottom, top : (float, float) The current y-axis limits in data coordinates. See Also -------- .Axes.set_ylim .Axes.set_ybound, .Axes.get_ybound .Axes.invert_yaxis, .Axes.yaxis_inverted Notes ----- The y-axis may be inverted, in which case the *bottom* value will be greater than the *top* value.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:get_ylim arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_unpack_tensor",
    "source_code": "def _unpack_tensor(self, parallel_tensor):\n    if not isinstance(parallel_tensor, (tensor_lib.Tensor, composite_tensor.CompositeTensor, variables.Variable)):\n        raise ValueError('Expected a tensor, got {}.'.format(parallel_tensor))\n    with ops.device(self._name):\n        return tpu_ops.tpu_replicated_output(parallel_tensor, num_replicas=len(self.components))",
    "docstring": "Helper to unpack a single tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\parallel_device\\parallel_device.py",
    "ast_data": "FunctionDef name:_unpack_tensor arg:self arg:parallel_tensor arguments arg arg If Call Raise Call Call With Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "build_tensor_info_internal",
    "source_code": "def build_tensor_info_internal(tensor):\n    if isinstance(tensor, composite_tensor.CompositeTensor) and (not isinstance(tensor, sparse_tensor.SparseTensor)) and (not isinstance(tensor, resource_variable_ops.ResourceVariable)):\n        return _build_composite_tensor_info_internal(tensor)\n    tensor_info = meta_graph_pb2.TensorInfo(dtype=dtypes.as_dtype(tensor.dtype).as_datatype_enum, tensor_shape=tensor.get_shape().as_proto())\n    if isinstance(tensor, sparse_tensor.SparseTensor):\n        tensor_info.coo_sparse.values_tensor_name = tensor.values.name\n        tensor_info.coo_sparse.indices_tensor_name = tensor.indices.name\n        tensor_info.coo_sparse.dense_shape_tensor_name = tensor.dense_shape.name\n    else:\n        tensor_info.name = tensor.name\n    return tensor_info",
    "docstring": "Utility function to build TensorInfo proto from a Tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\utils_impl.py",
    "ast_data": "FunctionDef name:build_tensor_info_internal arg:tensor arguments arg If BoolOp Call Call Call Return return:yes Call Assign Call Call Call Call If Call Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "highlightlang",
    "source_code": "class highlightlang(nodes.Element):\n    pass",
    "docstring": "Inserted to set the highlight language and line number options for subsequent code blocks.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\addnodes.py",
    "ast_data": "ClassDef name:highlightlang"
  },
  {
    "library": "tensorflow",
    "name": "convert_variable_to_constant",
    "source_code": "def convert_variable_to_constant(self, incoming_edge, tensor_data):\n    raise NotImplementedError",
    "docstring": "Converts a variable in this Convertible and its dependencies. This method should make sure that a converted copy of itself is present in the converted graph, and that all Convertibles depending on this one also go through the same process. Args: incoming_edge: The graph edge into this Convertible that is being converted to a constant. tensor_data: The tensor representing the constant.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "FunctionDef name:convert_variable_to_constant arg:self arg:incoming_edge arg:tensor_data arguments arg arg arg Raise"
  },
  {
    "library": "cryptography",
    "name": "generate_private_key",
    "source_code": "@abc.abstractmethod\ndef generate_private_key(self) -> DSAPrivateKey:\n    pass",
    "docstring": "Generates and returns a DSAPrivateKey.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\dsa.py",
    "ast_data": "FunctionDef name:generate_private_key arg:self arguments arg"
  },
  {
    "library": "scikit-learn",
    "name": "_run_search",
    "source_code": "def _run_search(self, evaluate_candidates):\n    evaluate_candidates(ParameterGrid(self.param_grid))",
    "docstring": "Search all candidates in param_grid",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_search.py",
    "ast_data": "FunctionDef name:_run_search arg:self arg:evaluate_candidates arguments arg arg Call Call"
  },
  {
    "library": "django",
    "name": "__str__",
    "source_code": "def __str__(self):\n    if not hasattr(self, 'model'):\n        return super().__str__()\n    model = self.model\n    return '%s.%s' % (model._meta.label, self.name)",
    "docstring": "Return \"app_label.model_label.field_name\" for fields attached to models.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\__init__.py",
    "ast_data": "FunctionDef name:__str__ arg:self arguments arg If Call Return return:yes Call Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "CreateView",
    "source_code": "class CreateView(SingleObjectTemplateResponseMixin, BaseCreateView):\n    template_name_suffix = '_form'",
    "docstring": "View for creating a new object, with a response rendered by a template.",
    "type": "class",
    "file_path": "django\\django\\views\\generic\\edit.py",
    "ast_data": "ClassDef name:CreateView Assign"
  },
  {
    "library": "uvicorn",
    "name": "resume_writing",
    "source_code": "def resume_writing(self) -> None:\n    self.flow.resume_writing()",
    "docstring": "Called by the transport when the write buffer drops below the low water mark.",
    "type": "method",
    "file_path": "uvicorn\\uvicorn\\protocols\\http\\h11_impl.py",
    "ast_data": "FunctionDef name:resume_writing arg:self arguments arg Call"
  },
  {
    "library": "scikit-learn",
    "name": "n_classes_",
    "source_code": "@property\ndef n_classes_(self):\n    return len(self.classes_)",
    "docstring": "Number of classes.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\multiclass.py",
    "ast_data": "FunctionDef name:n_classes_ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y, sample_weight=None):\n    X, y, sample_weight, Y = self._prepare_data(X, y, sample_weight, self.solver)\n    super().fit(X, Y, sample_weight=sample_weight)\n    return self",
    "docstring": "Fit Ridge classifier model. Parameters ---------- X : {ndarray, sparse matrix} of shape (n_samples, n_features) Training data. y : ndarray of shape (n_samples,) Target values. sample_weight : float or ndarray of shape (n_samples,), default=None Individual weights for each sample. If given a float, every sample will have the same weight. .. versionadded:: 0.17 *sample_weight* support to RidgeClassifier. Returns ------- self : object Instance of the estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_ridge.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_get_orig_buffer_dtypes",
    "source_code": "@no_type_check\ndef _get_orig_buffer_dtypes(state: _FSDPState, buffer_names: list[str]) -> list[torch.dtype]:\n    buffer_dtypes: list[torch.dtype] = []\n    for buffer_name in buffer_names:\n        _p_assert(buffer_name in state._buffer_name_to_orig_dtype, f'{buffer_name} is missing from pre-computed dict on rank {state.rank}, which only has keys {state._buffer_name_to_orig_dtype.keys()}')\n        buffer_dtypes.append(state._buffer_name_to_orig_dtype[buffer_name])\n    return buffer_dtypes",
    "docstring": "Returns the original buffer types of the given buffer names.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_runtime_utils.py",
    "ast_data": "FunctionDef name:_get_orig_buffer_dtypes arg:state arg:buffer_names arguments arg arg For Call Compare Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "maxpool2d_check",
    "source_code": "def maxpool2d_check(typ, module_instance):\n    new_type_list = list(typ.__args__)\n    if len(new_type_list) == 4 or len(new_type_list) == 3:\n        w_in = new_type_list[-1]\n        h_in = new_type_list[-2]\n        h_out = calculate_out_dimension(h_in, module_instance, 0)\n        w_out = calculate_out_dimension(w_in, module_instance, 1)\n        new_type_list[-1] = w_out\n        new_type_list[-2] = h_out\n        return TensorType(tuple(new_type_list))\n    else:\n        raise TypeError(f'Wrong size {typ} for {module_instance}')",
    "docstring": "Applies the maxpool2d shape information to the input this affects the last two dimensions",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\graph_gradual_typechecker.py",
    "ast_data": "FunctionDef name:maxpool2d_check arg:typ arg:module_instance arguments arg arg Assign Call If BoolOp Compare Call Compare Call Assign Assign Assign Call Assign Call Assign Assign Return return:yes Call Call Raise Call"
  },
  {
    "library": "django",
    "name": "cursor_iter",
    "source_code": "def cursor_iter(cursor, sentinel, col_count, itersize):\n    try:\n        for rows in iter(lambda: cursor.fetchmany(itersize), sentinel):\n            yield (rows if col_count is None else [r[:col_count] for r in rows])\n    finally:\n        cursor.close()",
    "docstring": "Yield blocks of rows from a cursor and ensure the cursor is closed when done.",
    "type": "function",
    "file_path": "django\\django\\db\\models\\sql\\compiler.py",
    "ast_data": "FunctionDef name:cursor_iter arg:cursor arg:sentinel arg:col_count arg:itersize arguments arg arg arg arg Try For Call arguments Call Compare Call"
  },
  {
    "library": "numpy",
    "name": "fill_value",
    "source_code": "@property\ndef fill_value(self):\n    if self._fill_value is None:\n        self._fill_value = _check_fill_value(None, self.dtype)\n    if isinstance(self._fill_value, ndarray):\n        return self._fill_value[()]\n    return self._fill_value",
    "docstring": "The filling value of the masked array is a scalar. When setting, None will set to a default based on the data type. Examples -------- >>> import numpy as np >>> for dt in [np.int32, np.int64, np.float64, np.complex128]: ... np.ma.array([0, 1], dtype=dt).get_fill_value() ... np.int64(999999) np.int64(999999) np.float64(1e+20) np.complex128(1e+20+0j) >>> x = np.ma.array([0, 1.], fill_value=-np.inf) >>> x.fill_value np.float64(-inf) >>> x.fill_value = np.pi >>> x.fill_value np.float64(3.1415926535897931) Reset to default: >>> x.fill_value = None >>> x.fill_value np.float64(1e+20)",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:fill_value arg:self arguments arg If Compare Assign Call If Call Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "@_docstring.interpd\ndef __init__(self, xy, r, width, angle=0.0, **kwargs):\n    super().__init__(**kwargs)\n    self.set_radii(r)\n    self.center = xy\n    self.width = width\n    self.angle = angle\n    self._path = None",
    "docstring": "Parameters ---------- xy : (float, float) xy coordinates of annulus centre. r : float or (float, float) The radius, or semi-axes: - If float: radius of the outer circle. - If two floats: semi-major and -minor axes of outer ellipse. width : float Width (thickness) of the annular ring. The width is measured inward from the outer ellipse so that for the inner ellipse the semi-axes are given by `Patch` properties: %(Patch:kwdoc)s",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:xy arg:r arg:width arg:angle arguments arg arg arg arg arg arg Call Call Call Assign Assign Assign Assign"
  },
  {
    "library": "numpy",
    "name": "_quote_arg",
    "source_code": "def _quote_arg(arg):\n    if '\"' not in arg and ' ' in arg:\n        return '\"%s\"' % arg\n    return arg",
    "docstring": "Quote the argument for safe use in a shell command line.",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\exec_command.py",
    "ast_data": "FunctionDef name:_quote_arg arg:arg arguments arg If BoolOp Compare Compare Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "slice_expression",
    "source_code": "def slice_expression(self, expression, start, length):\n    raise NotSupportedError('This field does not support slicing.')",
    "docstring": "Return a slice of this field.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\__init__.py",
    "ast_data": "FunctionDef name:slice_expression arg:self arg:expression arg:start arg:length arguments arg arg arg arg Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "patch_2d_to_3d",
    "source_code": "def patch_2d_to_3d(patch, z=0, zdir='z', axlim_clip=False):\n    verts = _get_patch_verts(patch)\n    patch.__class__ = Patch3D\n    patch.set_3d_properties(verts, z, zdir, axlim_clip)",
    "docstring": "Convert a to a object.",
    "type": "function",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py",
    "ast_data": "FunctionDef name:patch_2d_to_3d arg:patch arg:z arg:zdir arg:axlim_clip arguments arg arg arg arg Assign Call Assign Call"
  },
  {
    "library": "pandas",
    "name": "_get_level_names",
    "source_code": "@final\ndef _get_level_names(self) -> range | Sequence[Hashable]:\n    if self._is_multi:\n        return maybe_sequence_to_range([level if name is None else name for level, name in enumerate(self.names)])\n    else:\n        return range(1) if self.name is None else [self.name]",
    "docstring": "Return a name or list of names with None replaced by the level number.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_get_level_names arg:self arguments arg If Return return:yes Call Compare Call Return return:yes Compare Call"
  },
  {
    "library": "matplotlib",
    "name": "_interval_contains_close",
    "source_code": "def _interval_contains_close(interval, val, rtol=1e-10):\n    a, b = interval\n    if a > b:\n        a, b = (b, a)\n    rtol = (b - a) * rtol\n    return a - rtol <= val <= b + rtol",
    "docstring": "Check, inclusively, whether an interval includes a given value, with the interval expanded by a small tolerance to admit floating point errors. Parameters ---------- interval : (float, float) The endpoints of the interval. val : float Value to check is within interval. rtol : float, default: 1e-10 Relative tolerance slippage allowed outside of the interval. For an interval `` are considered inside the interval. Returns ------- bool Whether *val* is within the *interval* (with tolerance).",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:_interval_contains_close arg:interval arg:val arg:rtol arguments arg arg arg Assign If Compare Assign Assign Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "fn_args",
    "source_code": "def fn_args(fn):\n    if isinstance(fn, functools.partial):\n        args = fn_args(fn.func)\n        args = [a for a in args[len(fn.args):] if a not in (fn.keywords or [])]\n    else:\n        if _is_callable_object(fn):\n            fn = fn.__call__\n        args = tf_inspect.getfullargspec(fn).args\n        if _is_bound_method(fn) and args:\n            args.pop(0)\n    return tuple(args)",
    "docstring": "Get argument names for function-like object. Args: fn: Function, or function-like object (e.g., result of ). Returns: of string argument names. Raises: ValueError: if partial function has positionally bound arguments",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\function_utils.py",
    "ast_data": "FunctionDef name:fn_args arg:fn arguments arg If Call Assign Call Assign Call Compare BoolOp If Call Assign Assign Call If BoolOp Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "add_or_replace",
    "source_code": "def add_or_replace(self, key: Hashable, external: Any, internal: core.Tensor, tracetype: Any=None, is_by_ref: bool=False) -> None:\n    if is_by_ref:\n        self._by_ref_external[key] = external\n        self._by_ref_internal[key] = internal\n        self._by_ref_tracetype[key] = tracetype\n    else:\n        self._by_val_internal[key] = internal\n        self._by_val_external[key] = external\n        if tracetype is not None:\n            self._by_val_tracetype[key] = tracetype\n        else:\n            self._by_val_tracetype[key] = trace_type.from_value(external)",
    "docstring": "Replace a already exsiting capture, otherwise add it.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\capture\\capture_container.py",
    "ast_data": "FunctionDef name:add_or_replace arg:self arg:key arg:external arg:internal arg:tracetype arg:is_by_ref arguments arg arg arg arg arg arg If Assign Assign Assign Assign Assign If Compare Assign Assign Call"
  },
  {
    "library": "pytorch",
    "name": "keyfilter",
    "source_code": "def keyfilter(predicate, d, factory=dict):\n    rv = factory()\n    for k, v in d.items():\n        if predicate(k):\n            rv[k] = v\n    return rv",
    "docstring": "Filter items in dictionary by key >>> iseven = lambda x: x % 2 == 0 >>> d = {1: 2, 2: 3, 3: 4, 4: 5} >>> keyfilter(iseven, d) {2: 3, 4: 5} See Also: valfilter itemfilter keymap",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\unification\\unification_tools.py",
    "ast_data": "FunctionDef name:keyfilter arg:predicate arg:d arg:factory arguments arg arg arg Assign Call For Call If Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "decorated",
    "source_code": "def decorated(fn):\n\n    def wrapped(*args, **kwargs):\n        return _add_should_use_warning(fn(*args, **kwargs), warn_in_eager=warn_in_eager, error_in_function=error_in_function)\n    fn_doc = fn.__doc__ or ''\n    split_doc = fn_doc.split('\\n', 1)\n    if len(split_doc) == 1:\n        updated_doc = fn_doc\n    else:\n        brief, rest = split_doc\n        updated_doc = '\\n'.join([brief, textwrap.dedent(rest)])\n    note = '\\n\\nNote: The output of this function should be used. If it is not, a warning will be logged or an error may be raised. To mark the output as used, call its .mark_used() method.'\n    return tf_decorator.make_decorator(target=fn, decorator_func=wrapped, decorator_name='should_use_result', decorator_doc=updated_doc + note)",
    "docstring": "Decorates the input function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\tf_should_use.py",
    "ast_data": "FunctionDef name:decorated arg:fn arguments arg FunctionDef name:wrapped arguments arg arg Return return:yes Call Call Assign BoolOp Assign Call If Compare Call Assign Assign Assign Call Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, outputs):\n    self._outputs = self._wrap_and_check_outputs(outputs, self._SINGLE_OUTPUT_DEFAULT_NAME, error_label='Prediction')",
    "docstring": "Constructor for PredictOutput. Args: outputs: A or a dict of string to representing the predictions. Raises: ValueError: if the outputs is not dict, or any of its keys are not strings, or any of its values are not s.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\utils_v1\\export_output.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:outputs arguments arg arg Assign Call"
  },
  {
    "library": "django",
    "name": "save",
    "source_code": "def save(self, commit=True):\n    return self.set_password_and_save(self.user, commit=commit)",
    "docstring": "Save the new password.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\forms.py",
    "ast_data": "FunctionDef name:save arg:self arg:commit arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "is_number",
    "source_code": "def is_number(self):\n    return False",
    "docstring": "Is this a number token?",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_type1font.py",
    "ast_data": "FunctionDef name:is_number arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "flush",
    "source_code": "def flush(self):\n    raise NotImplementedError()",
    "docstring": "Flushes any buffered data.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "FunctionDef name:flush arg:self arguments arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_replace_sparse_with_values",
    "source_code": "def _replace_sparse_with_values(value, sparse_list):\n    flat_vals = nest.flatten(value, expand_composites=False)\n    new_vals = []\n    for v in flat_vals:\n        if isinstance(v, sparse_tensor.SparseTensor):\n            sparse_list.append(v)\n            new_vals.append(v.values)\n        else:\n            new_vals.append(v)\n    return nest.pack_sequence_as(value, new_vals, expand_composites=False)",
    "docstring": "Replace s with their values in Each in is replaced by its tensor, and collects all s in . Args: value: A structure of s and s sparse_list: A list. Output parameter that collects all s in . Returns: with each SparseTensor replaced by its attribute.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_ops.py",
    "ast_data": "FunctionDef name:_replace_sparse_with_values arg:value arg:sparse_list arguments arg arg Assign Call Assign For If Call Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "reset_state",
    "source_code": "def reset_state(self):\n    if not generic_utils.is_default(self.reset_states):\n        warnings.warn('Metric %s implements a `reset_states()` method; rename it to `reset_state()` (without the final \"s\"). The name `reset_states()` has been deprecated to improve API consistency.' % (self.__class__.__name__,))\n        return self.reset_states()\n    else:\n        backend.batch_set_value([(v, 0) for v in self.variables])",
    "docstring": "Resets all of the metric state variables. This function is called between epochs/steps, when a metric is evaluated during training.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py",
    "ast_data": "FunctionDef name:reset_state arg:self arguments arg If Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "generate_visualizer",
    "source_code": "def generate_visualizer(self) -> ModelReportVisualizer:\n    if len(self._generated_reports) == 0:\n        raise Exception('Unable to generate visualizers without first generating reports')\n    module_fqns_to_features: OrderedDict = self._reformat_reports_for_visualizer()\n    visualizer: ModelReportVisualizer = ModelReportVisualizer(module_fqns_to_features)\n    return visualizer",
    "docstring": "Generates a ModelReportVisualizer instance using the reports generated by the generate_model_report() method. Returns the generated ModelReportVisualizer instance initialized Note: Throws exception if attempt to get visualizers without generating report",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\model_report.py",
    "ast_data": "FunctionDef name:generate_visualizer arg:self arguments arg If Compare Call Raise Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "flow",
    "source_code": "@property\ndef flow(self):\n    return self._implementation._flow",
    "docstring": "The flow forcing ops leading to this TensorArray state.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:flow arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_stamp_token",
    "source_code": "def get_stamp_token(self):\n    stamp_token, _, _, _, _ = gen_boosted_trees_ops.boosted_trees_get_ensemble_states(self.resource_handle)\n    return stamp_token",
    "docstring": "Returns the current stamp token of the resource.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\boosted_trees_ops.py",
    "ast_data": "FunctionDef name:get_stamp_token arg:self arguments arg Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_StringCodec",
    "source_code": "class _StringCodec:\n\n    def can_encode(self, pyobj):\n        return isinstance(pyobj, str)\n\n    def do_encode(self, string_value, encode_fn):\n        del encode_fn\n        value = struct_pb2.StructuredValue()\n        value.string_value = string_value\n        return value\n\n    def can_decode(self, value):\n        return value.HasField('string_value')\n\n    def do_decode(self, value, decode_fn):\n        del decode_fn\n        return compat.as_str(value.string_value)",
    "docstring": "Codec for strings. See StructuredValue.string_value in proto/struct.proto for more detailed explanation.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\nested_structure_coder.py",
    "ast_data": "ClassDef name:_StringCodec FunctionDef name:can_encode arg:self arg:pyobj arguments arg arg Return return:yes Call FunctionDef name:do_encode arg:self arg:string_value arg:encode_fn arguments arg arg arg Assign Call Assign Return return:yes FunctionDef name:can_decode arg:self arg:value arguments arg arg Return return:yes Call FunctionDef name:do_decode arg:self arg:value arg:decode_fn arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "extract_shadow_logger_info",
    "source_code": "def extract_shadow_logger_info(model_a_shadows_b: nn.Module, logger_cls: Callable, model_name_to_use_for_layer_names: str) -> NSResultsType:\n    torch._C._log_api_usage_once('quantization_api._numeric_suite_fx.extract_shadow_logger_info')\n    results: NSResultsType = collections.defaultdict(dict)\n    _extract_logger_info_one_model(model_a_shadows_b, results, logger_cls)\n    maybe_add_missing_fqns(results)\n    results = rekey_logger_info_on_node_name_of_model(results, model_name_to_use_for_layer_names)\n    return dict(results)",
    "docstring": "Traverse all loggers in a shadow model, and extract the logged information. Args: model_a_shadows_b: shadow model logger_cls: class of Logger to use model_name_to_use_for_layer_names: string name of model to use for layer names in the output Return: NSResultsType, containing the logged comparisons",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\ns\\_numeric_suite_fx.py",
    "ast_data": "FunctionDef name:extract_shadow_logger_info arg:model_a_shadows_b arg:logger_cls arg:model_name_to_use_for_layer_names arguments arg arg arg Call Call Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "get_client_require_signed_request_object",
    "source_code": "def get_client_require_signed_request_object(self, client: ClientMixin) -> bool:\n    return False",
    "docstring": "Return the 'require_signed_request_object' client metadata. When :data:, the client requires that authorization requests use request objects, and an error will be returned when the authorization request payload is passed in the request body or query string:: class JWTAuthenticationRequest(rfc9101.JWTAuthenticationRequest): def get_client_require_signed_request_object(self, client): return client.require_signed_request_object If not implemented, the value is considered as :data:.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc9101\\authorization_server.py",
    "ast_data": "FunctionDef name:get_client_require_signed_request_object arg:self arg:client arguments arg arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "print_pgf",
    "source_code": "def print_pgf(self, fname_or_fh, **kwargs):\n    with cbook.open_file_cm(fname_or_fh, 'w', encoding='utf-8') as file:\n        if not cbook.file_requires_unicode(file):\n            file = codecs.getwriter('utf-8')(file)\n        self._print_pgf_to_fh(file, **kwargs)",
    "docstring": "Output pgf macros for drawing the figure so it can be included and rendered in latex documents.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pgf.py",
    "ast_data": "FunctionDef name:print_pgf arg:self arg:fname_or_fh arguments arg arg arg With Call If Call Assign Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_check_parameters",
    "source_code": "def _check_parameters(self, X):\n    _, n_features = X.shape\n    if self.weights_init is not None:\n        self.weights_init = _check_weights(self.weights_init, self.n_components)\n    if self.means_init is not None:\n        self.means_init = _check_means(self.means_init, self.n_components, n_features)\n    if self.precisions_init is not None:\n        self.precisions_init = _check_precisions(self.precisions_init, self.covariance_type, self.n_components, n_features)",
    "docstring": "Check the Gaussian mixture parameters are well defined.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\mixture\\_gaussian_mixture.py",
    "ast_data": "FunctionDef name:_check_parameters arg:self arg:X arguments arg arg Assign If Compare Assign Call If Compare Assign Call If Compare Assign Call"
  },
  {
    "library": "pytorch",
    "name": "_get_node_to_ancestors",
    "source_code": "def _get_node_to_ancestors(graph: torch.fx.Graph) -> dict[torch.fx.Node, OrderedSet[torch.fx.Node]]:\n    node_to_ancestors = defaultdict(OrderedSet[torch.fx.Node])\n    for node in graph.nodes:\n        node_to_ancestors[node] = OrderedSet(node.all_input_nodes)\n        for dep in node.all_input_nodes:\n            node_to_ancestors[node] |= node_to_ancestors[dep]\n    return node_to_ancestors",
    "docstring": "Compute the ancestors for all nodes in a graph.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\micro_pipeline_tp.py",
    "ast_data": "FunctionDef name:_get_node_to_ancestors arg:graph arguments arg Assign Call For Assign Call For Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "AveragePooling3D",
    "source_code": "class AveragePooling3D(keras_layers.AveragePooling3D, base.Layer):\n\n    def __init__(self, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs):\n        if strides is None:\n            raise ValueError('Argument `strides` must not be None.')\n        super(AveragePooling3D, self).__init__(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name, **kwargs)",
    "docstring": "Average pooling layer for 3D inputs (e.g. volumes). Args: pool_size: An integer or tuple/list of 3 integers: (pool_depth, pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 3 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. (default) and are supported. corresponds to inputs with shape while corresponds to inputs with shape . name: A string, the name of the layer.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\legacy_tf_layers\\pooling.py",
    "ast_data": "ClassDef name:AveragePooling3D FunctionDef name:__init__ arg:self arg:pool_size arg:strides arg:padding arg:data_format arg:name arguments arg arg arg arg arg arg arg If Compare Raise Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "logical_and",
    "source_code": "def logical_and(a, b):\n    a_value = get_static_value(a)\n    if a_value is not None:\n        if np.isscalar(a_value):\n            if a_value:\n                return _maybe_static(b)\n            else:\n                return a_value\n        else:\n            return a_value & _maybe_static(b)\n    else:\n        return a & _maybe_static(b)",
    "docstring": "A version of tf.logical_and that eagerly evaluates if possible.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_utils.py",
    "ast_data": "FunctionDef name:logical_and arg:a arg:b arguments arg arg Assign Call If Compare If Call If Return return:yes Call Return return:yes Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_transform_op_where_last_two_arguments_are_scale_and_zero_point",
    "source_code": "def _transform_op_where_last_two_arguments_are_scale_and_zero_point(gm: torch.fx.GraphModule, node: torch.fx.Node):\n    to_standard_op = {'mul': torch.ops.aten.mul, 'mul_relu': torch.ops.aten.mul, 'add': torch.ops.aten.add, 'add_relu': torch.ops.aten.add, 'softmax': torch.ops.aten.softmax, 'cat': torch.ops.aten.cat, 'hardswish': torch.ops.aten.hardswish}\n    assert isinstance(node.target, torch._ops.OpOverload)\n    opname, args = (node.target._opname, node.args)\n    scale_node, zero_point_node = (args[-2], args[-1])\n    op_res_node = gm.graph.call_function(to_standard_op[opname], tuple(args[:-2]))\n    return (op_res_node, scale_node, zero_point_node)",
    "docstring": "This transformation function can be used for function where the last two parameters are scale and zero point. Additionally, the function's parameters do not need any unpacking.",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\passes\\replace_quantized_ops_with_standard_ops_pass.py",
    "ast_data": "FunctionDef name:_transform_op_where_last_two_arguments_are_scale_and_zero_point arg:gm arg:node arguments arg arg Assign Call Assign Assign Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "GenerateMemoryReport",
    "source_code": "def GenerateMemoryReport(metagraph, detailed_report=True, cluster=None):\n    if cluster is None:\n        cluster = gcluster.Cluster(disable_detailed_stats=True, disable_timeline=True)\n    item = gitem.Item(metagraph)\n    peak_usage = cluster.DeterminePeakMemoryUsage(item)\n    report = ''\n    for device, snapshot in peak_usage.items():\n        peak_usage = snapshot[0]\n        report += 'Peak usage for device ' + device + ': ' + str(peak_usage) + ' bytes\\n'\n        if detailed_report:\n            live_tensors = snapshot[1]\n            for tensor in live_tensors:\n                op_name = tensor[0]\n                output_id = tensor[1]\n                mem_used = tensor[2]\n                report += '  ' + str(op_name) + ':' + str(output_id) + ' uses ' + str(mem_used) + ' bytes\\n'\n    return report",
    "docstring": "Analyze the peak memory usage for the provided metagraph. Args: metagraph: A TensorFlow MetaGraphDef. detailed_report: print the live tensors in addition to the peak memory usage. cluster: Analyze the memory using the specified cluster, or the local machine if no cluster was specified. Returns: A string with the formatted memory usage.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\grappler\\cost_analyzer.py",
    "ast_data": "FunctionDef name:GenerateMemoryReport arg:metagraph arg:detailed_report arg:cluster arguments arg arg arg If Compare Assign Call Assign Call Assign Call Assign For Call Assign Call If Assign For Assign Assign Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "compile_times",
    "source_code": "def compile_times(repr='str', aggregate: bool=False):\n\n    def fmt_fn(values, item_fn=lambda x: x):\n        if aggregate:\n            return item_fn(sum(values))\n        return ', '.join(map(item_fn, values))\n    if repr == 'str':\n        rows = [(k, fmt_fn(compilation_time_metrics[k], item_fn=lambda x: f'{x:.4f}')) for k in compilation_time_metrics]\n        out = 'TorchDynamo compilation metrics:\\n'\n        out += tabulate(rows, headers=('Function', 'Runtimes (s)'))\n        return out\n    elif repr == 'csv':\n        values = [fmt_fn(v, item_fn=lambda x: f'{x:.6f}') for v in compilation_time_metrics.values()]\n        headers = list(compilation_time_metrics.keys())\n        return (headers, values)\n    return None",
    "docstring": "Get metrics about torchdynamo frontend/backend compilation times. Accumulates information from functions tagged with . repr='str' returns a printable string for user interaction, and 'csv' returns headers, rows which can be logged for output aggregate causes values from multiple compilations (e.g. split graphs) to be accumulated into one value. If false, expect more than one value per metric.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\utils.py",
    "ast_data": "FunctionDef name:compile_times arg:repr arg:aggregate arguments arg arg FunctionDef name:fmt_fn arg:values arg:item_fn arguments arg arg arguments arg If Return return:yes Call Call Return return:yes Call Call If Compare Assign Call arguments arg Assign Call Return return:yes If Compare Assign Call arguments arg Call Assign Call Call Return return:yes Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "submodules",
    "source_code": "@property\ndef submodules(self):\n    return tuple(self._flatten(predicate=_is_module))",
    "docstring": "Sequence of all sub-modules. Submodules are modules which are properties of this module, or found as properties of modules which are properties of this module (and so on). >>> a = tf.Module() >>> b = tf.Module() >>> c = tf.Module() >>> a.b = b >>> b.c = c >>> list(a.submodules) == [b, c] True >>> list(b.submodules) == [c] True >>> list(c.submodules) == [] True Returns: A sequence of all submodules.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\module\\module.py",
    "ast_data": "FunctionDef name:submodules arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "cherrypy",
    "name": "pause",
    "source_code": "@cherrypy.expose\ndef pause(self, namespace):\n    logging.statistics.get(namespace, {})['Enabled'] = False\n    raise cherrypy.HTTPRedirect('./')",
    "docstring": "Pause gathering the statistics.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\cpstats.py",
    "ast_data": "FunctionDef name:pause arg:self arg:namespace arguments arg arg Assign Call Raise Call"
  },
  {
    "library": "django",
    "name": "IrreversibleError",
    "source_code": "class IrreversibleError(RuntimeError):\n    pass",
    "docstring": "An irreversible migration is about to be reversed.",
    "type": "class",
    "file_path": "django\\django\\db\\migrations\\exceptions.py",
    "ast_data": "ClassDef name:IrreversibleError"
  },
  {
    "library": "django",
    "name": "decode",
    "source_code": "def decode(self, encoded):\n    raise NotImplementedError('subclasses of BasePasswordHasher must provide a decode() method.')",
    "docstring": "Return a decoded database value. The result is a dictionary and should contain , , and . Extra keys can be algorithm specific like or .",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\hashers.py",
    "ast_data": "FunctionDef name:decode arg:self arg:encoded arguments arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "get_bwd_send_ops",
    "source_code": "def get_bwd_send_ops(self, bwd_chunk_id: int) -> list[dist.P2POp]:\n    self._check_chunk_id(bwd_chunk_id)\n    if not self.has_backward or self.is_first:\n        return []\n    if self.grad_send_info is None:\n        self.grad_send_info = self._create_grad_send_info(self.args_recv_info[0])\n    ops: list[dist.P2POp] = []\n    grads_input = self.bwd_cache.pop(bwd_chunk_id)\n    for grad, grad_recv_stage in zip(grads_input, self.grad_send_info):\n        if isinstance(grad, torch.Tensor) and grad_recv_stage is not None:\n            logger.debug('%s Sending gradient to Stage %s: %s', self.log_prefix, grad_recv_stage, grad.size())\n            peer_rank = self.stage_index_to_group_rank[grad_recv_stage]\n            peer_global_rank = peer_rank if self.group is None else dist.get_global_rank(self.group, peer_rank)\n            ops.append(dist.P2POp(dist.isend, grad, peer_global_rank, self.group))\n        elif not (grad is None and grad_recv_stage is None):\n            raise RuntimeError(f'[{self.stage_index}] for chunk {bwd_chunk_id} has gradients {grad} and is expecting to send gradients to stage {grad_recv_stage}')\n    return ops",
    "docstring": "Get the gradient send ops for current stage's backward.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py",
    "ast_data": "FunctionDef name:get_bwd_send_ops arg:self arg:bwd_chunk_id arguments arg arg Call If BoolOp Return return:no If Compare Assign Call Assign Call For Call If BoolOp Call Compare Call Call Assign Assign Compare Call Call Call If BoolOp Compare Compare Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "python_type_for_torch",
    "source_code": "def python_type_for_torch(dtyp):\n    if dtyp.is_floating_point:\n        typ = float\n    elif dtyp.is_complex:\n        typ = complex\n    elif dtyp == torch.bool:\n        typ = bool\n    else:\n        typ = int\n    return typ",
    "docstring": "Get a python scalar type a torch dtype",
    "type": "function",
    "file_path": "pytorch\\torch\\_numpy\\_dtypes_impl.py",
    "ast_data": "FunctionDef name:python_type_for_torch arg:dtyp arguments arg If Assign If Assign If Compare Assign Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "Parsopoulos",
    "source_code": "class Parsopoulos(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-5.0] * self.N, [5.0] * self.N))\n        self.global_optimum = [[pi / 2.0, pi]]\n        self.fglob = 0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return cos(x[0]) ** 2.0 + sin(x[1]) ** 2.0",
    "docstring": "Parsopoulos objective function. This class defines the Parsopoulos [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Parsopoulos}}(x) = \\cos(x_1)^2 + \\sin(x_2)^2 with :math: for :math:. *Global optimum*: This function has infinite number of global minima in R2, at points :math:, where :math: and :math: In the given domain problem, function has 12 global minima all equal to zero. .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_P.py",
    "ast_data": "ClassDef name:Parsopoulos FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_local_shard_size_and_offset",
    "source_code": "@staticmethod\ndef _local_shard_size_and_offset(curr_local_size: int, num_chunks: int, rank: int) -> tuple[int, int]:\n    if curr_local_size % num_chunks == 0:\n        full_chunk_size = curr_local_size // num_chunks\n        return (full_chunk_size, full_chunk_size * rank)\n    full_chunk_size = (curr_local_size + num_chunks - 1) // num_chunks\n    shard_starting_idx = full_chunk_size * rank\n    if curr_local_size < shard_starting_idx:\n        return (0, curr_local_size)\n    else:\n        local_shard_size = min(curr_local_size, shard_starting_idx + full_chunk_size) - shard_starting_idx\n        return (local_shard_size, shard_starting_idx)",
    "docstring": "Given the size of the current local tensor (which may already be sharded on some dimensions), computes the new local shard size and offset given the desired number of chunks (num_chunks is generally equal to the size of the current sharding dim). Note: new local shard offset is relative to the current sharded tensor, not the global tensor. See for computing global offset. Returns (new local shard size, offset)",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\placement_types.py",
    "ast_data": "FunctionDef name:_local_shard_size_and_offset arg:curr_local_size arg:num_chunks arg:rank arguments arg arg arg If Compare Assign Return return:yes Assign Assign If Compare Return return:yes Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n    self._fit(X, y=y, force_transform=False)\n    return self",
    "docstring": "Estimate the optimal parameter lambda for each feature. The optimal lambda parameter for minimizing skewness is estimated on each feature independently using maximum likelihood. Parameters ---------- X : array-like of shape (n_samples, n_features) The data used to estimate the optimal transformation parameters. y : None Ignored. Returns ------- self : object Fitted transformer.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "scaling",
    "source_code": "@property\ndef scaling(self) -> Literal['magnitude', 'psd', 'unitary'] | None:\n    return self._scaling",
    "docstring": "Normalization applied to the window function ('magnitude', 'psd', 'unitary', or `magnitudepsdunitaryscale_tounitaryfrom_win_equals_dualShortTimeFFT` instance. See Also -------- fac_magnitude: Scaling factor for to a magnitude spectrum. fac_psd: Scaling factor for to a power spectral density spectrum. fft_mode: Mode of utilized FFT scale_to: Scale window to obtain 'magnitude' or 'psd' scaling. from_win_equals_dual: Class-method for creating a unitary instance. ShortTimeFFT: Class this property belongs to.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_short_time_fft.py",
    "ast_data": "FunctionDef name:scaling arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_simple_gather",
    "source_code": "def _simple_gather(per_replica_value, reduce_to_device, axis):\n    all_values = per_replica_value.values\n    if not all_values:\n        raise ValueError('`per_replica_value` must be non-empty')\n    with ops.device(reduce_to_device):\n        with context.device_policy(context.DEVICE_PLACEMENT_SILENT):\n            gathered = array_ops.concat(all_values, axis)\n    return gathered",
    "docstring": "Concatenate all values in the DistributedValues input and return.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_ops.py",
    "ast_data": "FunctionDef name:_simple_gather arg:per_replica_value arg:reduce_to_device arg:axis arguments arg arg arg Assign If Raise Call With Call With Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "decorator",
    "source_code": "def decorator(dispatch_target):\n    if not callable(dispatch_target):\n        raise TypeError(f'Expected dispatch_target to be callable; got {dispatch_target!r}')\n    dispatch_target = _add_name_scope_wrapper(dispatch_target, api_signature)\n    _check_signature(api_signature, dispatch_target)\n    for signature_checker in signature_checkers:\n        dispatcher.Register(signature_checker, dispatch_target)\n    _TYPE_BASED_DISPATCH_SIGNATURES[api][dispatch_target].extend(signatures)\n    if not signature_checkers:\n        signature = _signature_from_annotations(dispatch_target)\n        checker = _make_signature_checker(api_signature, signature)\n        dispatcher.Register(checker, dispatch_target)\n        _TYPE_BASED_DISPATCH_SIGNATURES[api][dispatch_target].append(signature)\n    return dispatch_target",
    "docstring": "Decorator that registers the given dispatch target.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\dispatch.py",
    "ast_data": "FunctionDef name:decorator arg:dispatch_target arguments arg If Call Raise Call Assign Call Call For Call Call If Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_generate_kernel_call_helper",
    "source_code": "def _generate_kernel_call_helper(self, kernel_name: str, call_args: list[str], **kwargs: dict[str, Any]) -> None:\n    new_args = []\n    for idx, arg in enumerate(call_args[:-2]):\n        new_args.append(f'aoti_torch_mps_set_arg({kernel_name}_handle, {idx}, {arg});\\n')\n    threads, group_size = (call_args[-2], call_args[-1])\n    if threads is None:\n        raise NotImplementedError('No threads or group_size provided')\n    elif group_size is None:\n        new_args.append(f'{kernel_name}->dispatch({threads});\\n')\n    else:\n        new_args.append(f'{kernel_name}->dispatch({threads}, {group_size});\\n')\n    debug_printer_manager = V.graph.wrapper_code.debug_printer\n    debug_printer_manager.set_printer_args(call_args[:-2], kernel_name, None, None, 'cpp')\n    with debug_printer_manager:\n        self.writeline(self.wrap_kernel_call(kernel_name, new_args))",
    "docstring": "Generates MPS kernel call code. It should look something like:",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp_wrapper_mps.py",
    "ast_data": "FunctionDef name:_generate_kernel_call_helper arg:self arg:kernel_name arg:call_args arguments arg arg arg arg Assign For Call Call Assign If Compare Raise Call If Compare Call Call Assign Call With Call Call"
  },
  {
    "library": "pandas",
    "name": "relative_luminance",
    "source_code": "def relative_luminance(rgba) -> float:\n    r, g, b = (x / 12.92 if x <= 0.04045 else ((x + 0.055) / 1.055) ** 2.4 for x in rgba[:3])\n    return 0.2126 * r + 0.7152 * g + 0.0722 * b",
    "docstring": "Calculate relative luminance of a color. The calculation adheres to the W3C standards ( Parameters ---------- color : rgb or rgba tuple Returns ------- float The relative luminance as a value from 0 to 1",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\formats\\style.py",
    "ast_data": "FunctionDef name:relative_luminance arg:rgba arguments arg Assign Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "experimental_as_proto",
    "source_code": "def experimental_as_proto(self) -> tensor_shape_pb2.TensorShapeProto:\n    return self.as_proto()",
    "docstring": "Returns a proto representation of the TensorShape instance.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:experimental_as_proto arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "set_logical_cpu_devices",
    "source_code": "def set_logical_cpu_devices(self, num_cpus, prefix=''):\n    server_def = self._server_def or self._collective_ops_server_def\n    local_prefix = ['/device']\n    if server_def is not None:\n        local_prefix.append('/job:%s/replica:0/task:%d' % (server_def.job_name, server_def.task_index))\n    logical_local_devices = [d for d in self.list_logical_devices('CPU') if d.name.startswith(tuple(local_prefix))]\n    self.ensure_initialized()\n    if len(logical_local_devices) > 1:\n        raise RuntimeError('Virtual CPUs already set, cannot modify again.')\n    pywrap_tfe.TFE_SetLogicalCpuDevices(self._context_handle, num_cpus, prefix)\n    self._initialize_logical_devices()",
    "docstring": "Set virtual CPU devices in context. If virtual CPU devices are already configured at context initialization by tf.config.set_logical_device_configuration(), this method should not be called. Args: num_cpus: Number of virtual CPUs. prefix: Device name prefix. Raises: RuntimeError: If virtual CPUs are already configured at context initialization.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:set_logical_cpu_devices arg:self arg:num_cpus arg:prefix arguments arg arg arg Assign BoolOp Assign If Compare Call Assign Call Call Call Call If Compare Call Raise Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_default_backend_for_device",
    "source_code": "def get_default_backend_for_device(device: Union[str, torch.device]) -> str:\n    if isinstance(device, torch.device):\n        device_str = device.type\n    else:\n        device_str = torch.device(device).type\n    backend = Backend.default_device_backend_map.get(device_str)\n    if backend is None:\n        raise ValueError(f'Default backend not registered for device : {device}')\n    return backend",
    "docstring": "Return the default backend for the given device. Args: Union[str, torch.device]: The device to get the default backend for. Returns: The default backend for the given device as a lower case string.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:get_default_backend_for_device arg:device arguments arg If Call Assign Assign Call Assign Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "feature_importances_",
    "source_code": "@property\ndef feature_importances_(self):\n    check_is_fitted(self)\n    return self.tree_.compute_feature_importances()",
    "docstring": "Return the feature importances. The importance of a feature is computed as the (normalized) total reduction of the criterion brought by that feature. It is also known as the Gini importance. Warning: impurity-based feature importances can be misleading for high cardinality features (many unique values). See :func: as an alternative. Returns ------- feature_importances_ : ndarray of shape (n_features,) Normalized total reduction of criteria by feature (Gini importance).",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\tree\\_classes.py",
    "ast_data": "FunctionDef name:feature_importances_ arg:self arguments arg Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "fun",
    "source_code": "def fun(self, x):\n    raise NotImplementedError",
    "docstring": "Evaluation of the benchmark function. Parameters ---------- x : sequence The candidate vector for evaluating the benchmark problem. Must have ``. Returns ------- val : float the evaluated benchmark function",
    "type": "method",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_benchmark.py",
    "ast_data": "FunctionDef name:fun arg:self arg:x arguments arg arg Raise"
  },
  {
    "library": "scikit-learn",
    "name": "predict_proba",
    "source_code": "def predict_proba(self, X):\n    check_is_fitted(self)\n    X_2d = validate_data(self, X, accept_sparse=['csc', 'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'], reset=False)\n    weight_matrices = self._get_kernel(self.X_, X_2d)\n    if self.kernel == 'knn':\n        probabilities = np.array([np.sum(self.label_distributions_[weight_matrix], axis=0) for weight_matrix in weight_matrices])\n    else:\n        weight_matrices = weight_matrices.T\n        probabilities = safe_sparse_dot(weight_matrices, self.label_distributions_)\n    normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T\n    probabilities /= normalizer\n    return probabilities",
    "docstring": "Predict probability for each possible outcome. Compute the probability estimates for each single sample in X and each possible outcome seen during training (categorical distribution). Parameters ---------- X : array-like of shape (n_samples, n_features) The data matrix. Returns ------- probabilities : ndarray of shape (n_samples, n_classes) Normalized probability distributions across class labels.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\semi_supervised\\_label_propagation.py",
    "ast_data": "FunctionDef name:predict_proba arg:self arg:X arguments arg arg Call Assign Call Assign Call If Compare Assign Call Call Assign Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "getattr",
    "source_code": "@compatibility(is_backward_compatible=False)\ndef getattr(self, attr: str, attr_val: Any, parameter_proxy_cache: dict[str, Any]):\n\n    def maybe_get_proxy_for_attr(attr_val, collection_to_search, parameter_proxy_cache):\n        for n, p in collection_to_search:\n            if attr_val is p:\n                if n not in parameter_proxy_cache:\n                    kwargs = {}\n                    if 'proxy_factory_fn' in inspect.signature(self.create_proxy).parameters:\n                        kwargs['proxy_factory_fn'] = None if not self.param_shapes_constant else lambda node: ParameterProxy(self, node, n, attr_val)\n                    val_proxy = self.create_proxy('get_attr', n, (), {}, **kwargs)\n                    parameter_proxy_cache[n] = val_proxy\n                return parameter_proxy_cache[n]\n        return None\n    if isinstance(attr_val, torch.nn.Parameter):\n        maybe_parameter_proxy = maybe_get_proxy_for_attr(attr_val, self.root.named_parameters(), parameter_proxy_cache)\n        if maybe_parameter_proxy is not None:\n            return maybe_parameter_proxy\n    if self.proxy_buffer_attributes and isinstance(attr_val, torch.Tensor):\n        maybe_buffer_proxy = maybe_get_proxy_for_attr(attr_val, self.root.named_buffers(), parameter_proxy_cache)\n        if maybe_buffer_proxy is not None:\n            return maybe_buffer_proxy\n    return attr_val",
    "docstring": "Method that specifies the behavior of this ``, so that future calls will reuse the proxy rather than creating a new one. This method can be overridden to --for example-- not return proxies when querying parameters. Args: attr (str): The name of the attribute being queried attr_val (Any): The value of the attribute parameter_proxy_cache (Dict[str, Any]): A cache of attr names to proxies Return: The return value from the getattr call.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\_symbolic_trace.py",
    "ast_data": "FunctionDef name:getattr arg:self arg:attr arg:attr_val arg:parameter_proxy_cache arguments arg arg arg arg FunctionDef name:maybe_get_proxy_for_attr arg:attr_val arg:collection_to_search arg:parameter_proxy_cache arguments arg arg arg For If Compare If Compare Assign If Compare Call Assign arguments arg Call Assign Call Assign Return return:yes Return return:no If Call Assign Call Call If Compare Return return:yes If BoolOp Call Assign Call Call If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_quantized_operator",
    "source_code": "def get_quantized_operator(float_op: Union[Callable, str]) -> Callable:\n    quantized_op = DEFAULT_FLOAT_TO_QUANTIZED_OPERATOR_MAPPINGS.get(float_op, None)\n    assert quantized_op is not None, f'Operator {str(float_op)} does not have corresponding quantized op'\n    return quantized_op",
    "docstring": "Get the quantized operator corresponding to the float operator",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantization_mappings.py",
    "ast_data": "FunctionDef name:get_quantized_operator arg:float_op arguments arg Assign Call Compare Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_addsub_int_array_or_scalar",
    "source_code": "def _addsub_int_array_or_scalar(self, other: np.ndarray | int, op: Callable[[Any, Any], Any]) -> Self:\n    assert op in [operator.add, operator.sub]\n    if op is operator.sub:\n        other = -other\n    res_values = add_overflowsafe(self.asi8, np.asarray(other, dtype='i8'))\n    return type(self)(res_values, dtype=self.dtype)",
    "docstring": "Add or subtract array of integers. Parameters ---------- other : np.ndarray[int64] or int op : {operator.add, operator.sub} Returns ------- result : PeriodArray",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\period.py",
    "ast_data": "FunctionDef name:_addsub_int_array_or_scalar arg:self arg:other arg:op arguments arg arg arg Compare If Compare Assign Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_HuggingFaceSavePlanner",
    "source_code": "class _HuggingFaceSavePlanner(DefaultSavePlanner):\n\n    def _dedup_save_plans(self, all_plans: list[SavePlan]) -> list[SavePlan]:\n        assert len(all_plans) > 0, 'all_plans should not be empty'\n        assert all_plans[0].storage_data is not None, 'storage_data should not be None'\n        assert isinstance(all_plans[0].storage_data, _FqnToFileMapping), 'storage_data should be of type _FqnToFileMapping'\n        fqn_to_index_mapping: dict[str, int] = all_plans[0].storage_data.fqn_to_file_index_mapping\n        return dedup_save_plans_with_fqn_to_index_mapping(all_plans, fqn_to_index_mapping)",
    "docstring": "A save planner that dedups the save plans based on the fqn to file index mapping.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\_hf_planner.py",
    "ast_data": "ClassDef name:_HuggingFaceSavePlanner FunctionDef name:_dedup_save_plans arg:self arg:all_plans arguments arg arg Compare Call Compare Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "sym_sum",
    "source_code": "def sym_sum(args):\n    if overrides.has_torch_function(args):\n        return overrides.handle_torch_function(sym_sum, args, args)\n    found = None\n    for a in args:\n        if not isinstance(a, (SymInt, builtins.int)):\n            return builtins.sum(args)\n        if isinstance(a, SymInt):\n            found = a.node\n    if found is None:\n        return builtins.sum(args)\n    from torch.fx.experimental.sym_node import to_node, wrap_node\n    return wrap_node(found.sym_sum(tuple((to_node(found, a) for a in args))))",
    "docstring": "N-ary add which is faster to compute for long lists than iterated binary addition. Only does something special for integers.",
    "type": "function",
    "file_path": "pytorch\\torch\\__init__.py",
    "ast_data": "FunctionDef name:sym_sum arg:args arguments arg If Call Return return:yes Call Assign For If Call Return return:yes Call If Call Assign If Compare Return return:yes Call Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "fast_cond_v2",
    "source_code": "def fast_cond_v2(pred, true_fn, false_fn, name=None):\n    if isinstance(pred, bool):\n        raise TypeError('pred must not be a Python bool', pred)\n    if not name:\n        name = 'fast_cond'\n    with ops.name_scope(name) as scope:\n        true_name = util.unique_fn_name(scope, 'true')\n        false_name = util.unique_fn_name(scope, 'false')\n        pred = _normalize_pred(pred)\n        true_graph = func_graph_module.func_graph_from_py_func(true_name, true_fn, [], {}, func_graph=util.CondBranchFuncGraph(true_name, collections=ops.get_default_graph()._collections), add_control_dependencies=False, op_return_value=pred)\n        false_graph = func_graph_module.func_graph_from_py_func(false_name, false_fn, [], {}, func_graph=util.CondBranchFuncGraph(false_name, collections=ops.get_default_graph()._collections), add_control_dependencies=False, op_return_value=pred)\n        verify_captures(_COND, [true_graph, false_graph])\n        return _build_cond(pred, true_graph, false_graph, true_graph.external_captures, false_graph.external_captures, building_gradient=False, add_identities=False, prevent_lowering=True, name=scope)",
    "docstring": "Like cond_v2, except emits an If op and applies various optimizations. This function is intended to be used for cases where the cond is used to implement a simple conditional control flow operator. It makes the following assumptions: 1. The conditional is never differentiated. 2. The caller does not rely on V1 control flow semantics, i.e. for cross device execution, pruning subgraphs of the true or false branches, or non-strict evaluation order. 3. The caller manually configures any control dependencies within the graphs. In this case, the cond will be lowered to a single If (or StatelessIf) op and the true and false graphs will be executed as TF functions. Args: pred: boolean Tensor true_fn: function to execute if pred is true false_fn: function to execute if pred is false name: the name for the If op. Returns: A list of Tensors which are the outputs of the If op. Does not include intermediate outputs.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\cond_v2.py",
    "ast_data": "FunctionDef name:fast_cond_v2 arg:pred arg:true_fn arg:false_fn arg:name arguments arg arg arg arg If Call Raise Call If Assign With Call Assign Call Assign Call Assign Call Assign Call Call Call Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "Agg",
    "source_code": "@dataclass\nclass Agg(Stat):\n    func: str | Callable[[Vector], float] = 'mean'\n    group_by_orient: ClassVar[bool] = True\n\n    def __call__(self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale]) -> DataFrame:\n        var = {'x': 'y', 'y': 'x'}.get(orient)\n        res = groupby.agg(data, {var: self.func}).dropna(subset=[var]).reset_index(drop=True)\n        return res",
    "docstring": "Aggregate data along the value axis using given method. Parameters ---------- func : str or callable Name of a :class: method or a vector -> scalar function. See Also -------- objects.Est : Aggregation with error bars. Examples -------- .. include:: ../docstrings/objects.Agg.rst",
    "type": "class",
    "file_path": "seaborn\\seaborn\\_stats\\aggregation.py",
    "ast_data": "ClassDef name:Agg FunctionDef name:__call__ arg:self arg:data arg:groupby arg:orient arg:scales arguments arg arg arg arg arg Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "relu_inference_rule",
    "source_code": "@register_inference_rule(torch.nn.Dropout)\n@register_inference_rule(torch.nn.ReLU)\ndef relu_inference_rule(n: Node, module_instance, symbols, constraints, counter):\n    assert isinstance(n.args[0], Node)\n    output, counter = gen_tvar(counter)\n    symbols[n] = output\n    input = symbols[n.args[0]]\n    assert isinstance(input, TVar)\n    return ([BinConstraintT(input, output, op_eq)], counter)",
    "docstring": "Input and output shapes should be equal.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_generator.py",
    "ast_data": "FunctionDef name:relu_inference_rule arg:n arg:module_instance arg:symbols arg:constraints arg:counter arguments arg arg arg arg arg Call Assign Call Assign Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "_get_next_day",
    "source_code": "def _get_next_day(self, date):\n    return date + datetime.timedelta(days=1)",
    "docstring": "Return the start date of the next interval. The interval is defined by start date <= item date < next start date.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\dates.py",
    "ast_data": "FunctionDef name:_get_next_day arg:self arg:date arguments arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_override_key",
    "source_code": "def _override_key(self, func):\n    self._key = func",
    "docstring": "Set method by decorating a function.",
    "type": "method",
    "file_path": "scipy\\scipy\\special\\_multiufuncs.py",
    "ast_data": "FunctionDef name:_override_key arg:self arg:func arguments arg arg Assign"
  },
  {
    "library": "pytorch",
    "name": "vmap",
    "source_code": "@staticmethod\ndef vmap(info, in_dims, *args):\n    raise NotImplementedError('To use autograd.Function with vmap, you must either override the vmap staticmethod or set generate_vmap_rule=True.')",
    "docstring": "Define the behavior for this autograd.Function underneath :func:. For a :func: to support :func:, you must either override this static method, or set `torch.vmap~Function.forwardfunc-autograd-function` for more details.",
    "type": "method",
    "file_path": "pytorch\\torch\\autograd\\function.py",
    "ast_data": "FunctionDef name:vmap arg:info arg:in_dims arguments arg arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "super_signature",
    "source_code": "def super_signature(signatures):\n    n = len(signatures[0])\n    assert all((len(s) == n for s in signatures))\n    return [max((type.mro(sig[i]) for sig in signatures), key=len)[0] for i in range(n)]",
    "docstring": "A signature that would break ambiguities",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\unification\\multipledispatch\\conflict.py",
    "ast_data": "FunctionDef name:super_signature arg:signatures arguments arg Assign Call Call Compare Call Return return:yes Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_implicit_column_offset",
    "source_code": "def _implicit_column_offset(X, offset):\n    offset = offset[None, :]\n    XT = X.T\n    return LinearOperator(matvec=lambda x: X @ x - offset @ x, matmat=lambda x: X @ x - offset @ x, rmatvec=lambda x: XT @ x - offset * x.sum(), rmatmat=lambda x: XT @ x - offset.T @ x.sum(axis=0)[None, :], dtype=X.dtype, shape=X.shape)",
    "docstring": "Create an implicitly offset linear operator. This is used by PCA on sparse data to avoid densifying the whole data matrix. Params ------ X : sparse matrix of shape (n_samples, n_features) offset : ndarray of shape (n_features,) Returns ------- centered : LinearOperator",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\sparsefuncs.py",
    "ast_data": "FunctionDef name:_implicit_column_offset arg:X arg:offset arguments arg arg Assign Assign Return return:yes Call arguments arg arguments arg arguments arg Call arguments arg Call"
  },
  {
    "library": "authlib",
    "name": "validate_jti",
    "source_code": "def validate_jti(self):\n    self._validate_claim_value('jti')",
    "docstring": "The \"jti\" (JWT ID) claim provides a unique identifier for the JWT. The identifier value MUST be assigned in a manner that ensures that there is a negligible probability that the same value will be accidentally assigned to a different data object; if the application uses multiple issuers, collisions MUST be prevented among values produced by different issuers as well. The \"jti\" claim can be used to prevent the JWT from being replayed. The \"jti\" value is a case- sensitive string. Use of this claim is OPTIONAL.",
    "type": "method",
    "file_path": "authlib\\authlib\\jose\\rfc7519\\claims.py",
    "ast_data": "FunctionDef name:validate_jti arg:self arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "_fw_post_hook",
    "source_code": "def _fw_post_hook(self, mod, input, output):\n    super()._fw_post_hook(mod, input, output)",
    "docstring": "This function is called when the forward pass of a module is called. It updates the module tracker and removes the module from parent data",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\debug\\_comm_mode.py",
    "ast_data": "FunctionDef name:_fw_post_hook arg:self arg:mod arg:input arg:output arguments arg arg arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "as_signature_def",
    "source_code": "@abc.abstractmethod\ndef as_signature_def(self, receiver_tensors):\n    pass",
    "docstring": "Generate a SignatureDef proto for inclusion in a MetaGraphDef. The SignatureDef will specify outputs as described in this ExportOutput, and will use the provided receiver_tensors as inputs. Args: receiver_tensors: a , or a dict of string to , specifying input nodes that will be fed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\utils_v1\\export_output.py",
    "ast_data": "FunctionDef name:as_signature_def arg:self arg:receiver_tensors arguments arg arg"
  },
  {
    "library": "django",
    "name": "_check_token_format",
    "source_code": "def _check_token_format(token):\n    if len(token) not in (CSRF_TOKEN_LENGTH, CSRF_SECRET_LENGTH):\n        raise InvalidTokenFormat(REASON_INCORRECT_LENGTH)\n    if invalid_token_chars_re.search(token):\n        raise InvalidTokenFormat(REASON_INVALID_CHARACTERS)",
    "docstring": "Raise an InvalidTokenFormat error if the token has an invalid length or characters that aren't allowed. The token argument can be a CSRF cookie secret or non-cookie CSRF token, and either masked or unmasked.",
    "type": "function",
    "file_path": "django\\django\\middleware\\csrf.py",
    "ast_data": "FunctionDef name:_check_token_format arg:token arguments arg If Compare Call Raise Call If Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_convert",
    "source_code": "def _convert(self, value, dtype):\n    if isinstance(value, resource_variable_ops.ResourceVariable):\n        raise RuntimeError(f'Attempting to return a variable from an eagerly executed py_func. Only numeric data structures like Tensors or NumPy arrays should be returned; to return the value of a variable, make sure to obtain the Tensor backing it by calling `.read_value()` on the variable in question: {value}')\n    if value is None and self._is_grad_func:\n        return constant_op.constant(0.0, dtype=dtype)\n    return ops.convert_to_tensor(value, dtype=dtype)",
    "docstring": "Converts to a tensor of type , with error checking. Args: value: The tensor to convert. dtype: The desired dtype. Returns: A tensor of type , or a zeros tensor if value is None and this function is in fact a gradient function. Raises: RuntimeError: if is a variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\script_ops.py",
    "ast_data": "FunctionDef name:_convert arg:self arg:value arg:dtype arguments arg arg arg If Call Raise Call If BoolOp Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_next_instance_key",
    "source_code": "def _next_instance_key(self):\n    if self._use_unique_instance_key():\n        graph = ops.get_default_graph()\n        while getattr(graph, 'is_control_flow_graph', False):\n            graph = graph.outer_graph\n        if not context.executing_eagerly() and graph.building_function:\n            with graph.as_default():\n                return graph.capture_call_time_value(self._next_instance_key, tensor_spec.TensorSpec([], dtypes.int32))\n        else:\n            instance_key = self._collective_keys.get_instance_key(self._group_key, self._device)\n            with ops.device('CPU:0'):\n                return ops.convert_to_tensor(instance_key, dtype=dtypes.int32)\n    else:\n        return self._collective_keys.get_instance_key(self._group_key, self._device)",
    "docstring": "Returns the next instance key.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_utils.py",
    "ast_data": "FunctionDef name:_next_instance_key arg:self arguments arg If Call Assign Call While Call Assign If BoolOp Call With Call Return return:yes Call Call Assign Call With Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "InplaceBernoulliFallback",
    "source_code": "class InplaceBernoulliFallback(ExternKernel):\n\n    def codegen(self, wrapper) -> None:\n        x, = (t.codegen_reference() for t in self.inputs)\n        if V.graph.cpp_wrapper:\n            wrapper.writeline(f'{self.get_kernel_name()}({x}, {', '.join(map(repr, self.constant_args))}, NULL){wrapper.ending}')\n        else:\n            wrapper.writeline(f'{self.get_kernel_name()}({x}, {', '.join(map(repr, self.constant_args))}){wrapper.ending}')\n\n    def should_allocate(self) -> bool:\n        return False\n\n    def get_mutation_names(self):\n        return [self.inputs[0].get_name()]\n\n    def get_unbacked_symbol_defs(self) -> OrderedSet[sympy.Symbol]:\n        return OrderedSet()\n\n    def __init__(self, op_overload, x, *constant_args) -> None:\n        super().__init__(None, NoneLayout(device=x.get_device()), self.unwrap_storage([x]), constant_args, op_overload=op_overload)\n        V.graph.mark_buffer_mutated(x.get_name())\n        self.name = V.graph.register_buffer(self)\n        V.graph.register_operation(self)",
    "docstring": "This needs to be a custom class to handle mutation properly",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\ir.py",
    "ast_data": "ClassDef name:InplaceBernoulliFallback FunctionDef name:codegen arg:self arg:wrapper arguments arg arg Assign Call If Call Call Call Call Call Call Call Call FunctionDef name:should_allocate arg:self arguments arg Return return:yes FunctionDef name:get_mutation_names arg:self arguments arg Return return:yes Call FunctionDef name:get_unbacked_symbol_defs arg:self arguments arg Return return:yes Call FunctionDef name:__init__ arg:self arg:op_overload arg:x arguments arg arg arg arg Call Call Call Call Call Call Call Assign Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "yield_namespaces",
    "source_code": "def yield_namespaces(include_numpy_namespaces=True):\n    for array_namespace in ['numpy', 'array_api_strict', 'cupy', 'torch']:\n        if not include_numpy_namespaces and array_namespace in _NUMPY_NAMESPACE_NAMES:\n            continue\n        yield array_namespace",
    "docstring": "Yield supported namespace. This is meant to be used for testing purposes only. Parameters ---------- include_numpy_namespaces : bool, default=True If True, also yield numpy namespaces. Returns ------- array_namespace : str The name of the Array API namespace.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_array_api.py",
    "ast_data": "FunctionDef name:yield_namespaces arg:include_numpy_namespaces arguments arg For If BoolOp Compare"
  },
  {
    "library": "tensorflow",
    "name": "_unbatch_static_inner_shape",
    "source_code": "def _unbatch_static_inner_shape(old_shape: tensor_shape.TensorShape, batch_size: Optional[int]) -> tensor_shape.TensorShape:\n    head_dim = tensor_shape.dimension_at_index(old_shape, 0) // batch_size\n    return head_dim + old_shape[1:]",
    "docstring": "Unbatch a static_inner_shape when num_row_partitions > 0.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:_unbatch_static_inner_shape arg:old_shape arg:batch_size arguments arg arg Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_init_mgr",
    "source_code": "@final\n@classmethod\ndef _init_mgr(cls, mgr: Manager, axes: dict[Literal['index', 'columns'], Axes | None], dtype: DtypeObj | None=None, copy: bool=False) -> Manager:\n    for a, axe in axes.items():\n        if axe is not None:\n            axe = ensure_index(axe)\n            bm_axis = cls._get_block_manager_axis(a)\n            mgr = mgr.reindex_axis(axe, axis=bm_axis)\n    if copy:\n        mgr = mgr.copy()\n    if dtype is not None:\n        if isinstance(mgr, BlockManager) and len(mgr.blocks) == 1 and (mgr.blocks[0].values.dtype == dtype):\n            pass\n        else:\n            mgr = mgr.astype(dtype=dtype)\n    return mgr",
    "docstring": "passed a manager and a axes dict",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:_init_mgr arg:cls arg:mgr arg:axes arg:dtype arg:copy arguments arg arg arg arg arg For Call If Compare Assign Call Assign Call Assign Call If Assign Call If Compare If BoolOp Call Compare Call Compare Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_set_stream_by_id",
    "source_code": "def _set_stream_by_id(stream_id, device_index, device_type):\n    torch._C._cuda_setStream(stream_id=stream_id, device_index=device_index, device_type=device_type)",
    "docstring": "set stream specified by the stream id, device index and device type Args: stream_id (int): stream id in stream pool device_index (int): device index in topo device_type (int): enum device type",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:_set_stream_by_id arg:stream_id arg:device_index arg:device_type arguments arg arg arg Call"
  },
  {
    "library": "django",
    "name": "_get_add_plan",
    "source_code": "def _get_add_plan(self, db, source_field_name):\n    can_ignore_conflicts = self.through._meta.auto_created is not False and connections[db].features.supports_ignore_conflicts\n    must_send_signals = (self.reverse or source_field_name == self.source_field_name) and signals.m2m_changed.has_listeners(self.through)\n    return (can_ignore_conflicts, must_send_signals, can_ignore_conflicts and (not must_send_signals))",
    "docstring": "Return a boolean triple of the way the add should be performed. The first element is whether or not bulk_create(ignore_conflicts) can be used, the second whether or not signals must be sent, and the third element is whether or not the immediate bulk insertion with conflicts ignored can be performed.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\related_descriptors.py",
    "ast_data": "FunctionDef name:_get_add_plan arg:self arg:db arg:source_field_name arguments arg arg arg Assign BoolOp Compare Assign BoolOp BoolOp Compare Call Return return:yes BoolOp"
  },
  {
    "library": "scikit-learn",
    "name": "make_sparse_spd_matrix",
    "source_code": "@validate_params({'n_dim': [Interval(Integral, 1, None, closed='left')], 'alpha': [Interval(Real, 0, 1, closed='both')], 'norm_diag': ['boolean'], 'smallest_coef': [Interval(Real, 0, 1, closed='both')], 'largest_coef': [Interval(Real, 0, 1, closed='both')], 'sparse_format': [StrOptions({'bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil'}), None], 'random_state': ['random_state']}, prefer_skip_nested_validation=True)\ndef make_sparse_spd_matrix(n_dim=1, *, alpha=0.95, norm_diag=False, smallest_coef=0.1, largest_coef=0.9, sparse_format=None, random_state=None):\n    random_state = check_random_state(random_state)\n    chol = -sp.eye(n_dim)\n    aux = sp.random(m=n_dim, n=n_dim, density=1 - alpha, data_rvs=lambda x: random_state.uniform(low=smallest_coef, high=largest_coef, size=x), random_state=random_state)\n    aux = sp.tril(aux, k=-1, format='csc')\n    permutation = random_state.permutation(n_dim)\n    aux = aux[permutation].T[permutation]\n    chol += aux\n    prec = chol.T @ chol\n    if norm_diag:\n        d = sp.diags(1.0 / np.sqrt(prec.diagonal()))\n        prec = d @ prec @ d\n    if sparse_format is None:\n        return prec.toarray()\n    else:\n        return prec.asformat(sparse_format)",
    "docstring": "Generate a sparse symmetric definite positive matrix. Read more in the :ref:. Parameters ---------- n_dim : int, default=1 The size of the random matrix to generate. .. versionchanged:: 1.4 Renamed from `Glossary `, this would be an ndarray. Otherwise, this will be a sparse matrix of the specified format. See Also -------- make_spd_matrix : Generate a random symmetric, positive-definite matrix. Notes ----- The sparsity is actually imposed on the cholesky factor of the matrix. Thus alpha does not translate directly into the filling fraction of the matrix itself. Examples -------- >>> from sklearn.datasets import make_sparse_spd_matrix >>> make_sparse_spd_matrix(n_dim=4, norm_diag=False, random_state=42) array([[1., 0., 0., 0.], [0., 1., 0., 0.], [0., 0., 1., 0.], [0., 0., 0., 1.]])",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\datasets\\_samples_generator.py",
    "ast_data": "FunctionDef name:make_sparse_spd_matrix arg:n_dim arguments arg arg arg arg arg arg arg Assign Call Assign Call Assign Call arguments arg Call Assign Call Assign Call Assign Assign If Assign Call Call Call Assign If Compare Return return:yes Call Return return:yes Call Call Call Call Call Call Call"
  },
  {
    "library": "django",
    "name": "accepts",
    "source_code": "def accepts(self, media_type):\n    return self.accepted_type(media_type) is not None",
    "docstring": "Does the client accept a response in the given media type?",
    "type": "method",
    "file_path": "django\\django\\http\\request.py",
    "ast_data": "FunctionDef name:accepts arg:self arg:media_type arguments arg arg Return return:yes Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "_SparseTensorToCSRSparseMatrixGrad",
    "source_code": "@ops.RegisterGradient('SparseTensorToCSRSparseMatrix')\ndef _SparseTensorToCSRSparseMatrixGrad(op: ops.Operation, grad):\n    grad_values = sparse_csr_matrix_ops.csr_sparse_matrix_to_sparse_tensor(grad, type=op.get_attr('T')).values\n    return (None, grad_values, None)",
    "docstring": "Gradient for sparse_tensor_to_csr_sparse_matrix op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\sparse\\sparse_csr_matrix_grad.py",
    "ast_data": "FunctionDef name:_SparseTensorToCSRSparseMatrixGrad arg:op arg:grad arguments arg arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "__arrow_array__",
    "source_code": "def __arrow_array__(self, type=None):\n    import pyarrow\n    from pandas.core.arrays.arrow.extension_types import ArrowPeriodType\n    if type is not None:\n        if pyarrow.types.is_integer(type):\n            return pyarrow.array(self._ndarray, mask=self.isna(), type=type)\n        elif isinstance(type, ArrowPeriodType):\n            if self.freqstr != type.freq:\n                raise TypeError(f\"Not supported to convert PeriodArray to array with different 'freq' ({self.freqstr} vs {type.freq})\")\n        else:\n            raise TypeError(f\"Not supported to convert PeriodArray to '{type}' type\")\n    period_type = ArrowPeriodType(self.freqstr)\n    storage_array = pyarrow.array(self._ndarray, mask=self.isna(), type='int64')\n    return pyarrow.ExtensionArray.from_storage(period_type, storage_array)",
    "docstring": "Convert myself into a pyarrow Array.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\period.py",
    "ast_data": "FunctionDef name:__arrow_array__ arg:self arg:type arguments arg arg If Compare If Call Return return:yes Call Call If Call If Compare Raise Call Raise Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "parse_json",
    "source_code": "@staticmethod\ndef parse_json(obj):\n    return ensure_dict(obj, 'JWE')",
    "docstring": "Parse JWE JSON Serialization. :param obj: JWE JSON Serialization as str or dict :return: Parsed JWE JSON Serialization as dict if is an str, or as is if is already a dict",
    "type": "method",
    "file_path": "authlib\\authlib\\jose\\rfc7516\\jwe.py",
    "ast_data": "FunctionDef name:parse_json arg:obj arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "sum",
    "source_code": "def sum(self, axis=None, dtype=None, out=None):\n    if self.ndim == 2 and (not hasattr(self, 'blocksize')) and (axis in self._swap(((1, -1), (0, -2)))[0]):\n        res_dtype = get_sum_dtype(self.dtype)\n        ret = np.zeros(len(self.indptr) - 1, dtype=res_dtype)\n        major_index, value = self._minor_reduce(np.add)\n        ret[major_index] = value\n        ret = self._ascontainer(ret)\n        if axis % 2 == 1:\n            ret = ret.T\n        return ret.sum(axis=(), dtype=dtype, out=out)\n    else:\n        return _spbase.sum(self, axis=axis, dtype=dtype, out=out)",
    "docstring": "Sum the array/matrix over the given axis. If the axis is None, sum over both rows and columns, returning a scalar.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_compressed.py",
    "ast_data": "FunctionDef name:sum arg:self arg:axis arg:dtype arg:out arguments arg arg arg arg If BoolOp Compare Call Compare Call Assign Call Assign Call Call Assign Call Assign Assign Call If Compare Assign Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "tree_iter",
    "source_code": "def tree_iter(tree: PyTree, is_leaf: Optional[Callable[[PyTree], bool]]=None) -> Iterable[Any]:\n    return optree.tree_iter(tree, is_leaf=is_leaf, none_is_leaf=True, namespace='torch')",
    "docstring": "Get an iterator over the leaves of a pytree. See also :func:. >>> tree = {\"b\": (2, [3, 4]), \"a\": 1, \"c\": None, \"d\": 5} >>> list(tree_iter(tree)) [2, 3, 4, 1, None, 5] >>> list(tree_iter(1)) [1] >>> list(tree_iter(None)) [None] Args: tree (pytree): A pytree to flatten. is_leaf (callable, optional): An extra leaf predicate function that will be called at each flattening step. The function should have a single argument with signature `True`, the whole subtree being treated as a leaf. Otherwise, the default pytree registry will be used to determine a node is a leaf or not. If the function is not specified, the default pytree registry will be used. Returns: An iterator over the leaf values.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_cxx_pytree.py",
    "ast_data": "FunctionDef name:tree_iter arg:tree arg:is_leaf arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "manual_seed",
    "source_code": "def manual_seed(seed) -> torch._C.Generator:\n    seed = int(seed)\n    import torch.cuda\n    if not torch.cuda._is_in_bad_fork():\n        torch.cuda.manual_seed_all(seed)\n    import torch.mps\n    if not torch.mps._is_in_bad_fork():\n        torch.mps.manual_seed(seed)\n    import torch.xpu\n    if not torch.xpu._is_in_bad_fork():\n        torch.xpu.manual_seed_all(seed)\n    _seed_custom_device(seed)\n    return default_generator.manual_seed(seed)",
    "docstring": "Sets the seed for generating random numbers on all devices. Returns a object. Args: seed (int): The desired seed. Value must be within the inclusive range . Otherwise, a RuntimeError is raised. Negative inputs are remapped to positive values with the formula .",
    "type": "function",
    "file_path": "pytorch\\torch\\random.py",
    "ast_data": "FunctionDef name:manual_seed arg:seed arguments arg Assign Call If Call Call If Call Call If Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "make_indexer",
    "source_code": "def make_indexer(self) -> Callable[[Sequence[Expr]], Expr]:\n\n    def indexer(index):\n        assert len(index) == len(self.stride)\n        assert len(index) == len(self.size)\n        result = self.offset\n        for idx, stride, sz in zip(index, self.stride, self.size):\n            if sz != 1:\n                result = result + idx * stride\n        return result\n    return indexer",
    "docstring": "A closure containing math to read a given element",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ir.py",
    "ast_data": "FunctionDef name:make_indexer arg:self arguments arg FunctionDef name:indexer arg:index arguments arg Compare Call Call Compare Call Call Assign For Call If Compare Assign Return return:yes Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "_lookup_single",
    "source_code": "def _lookup_single(self, key, attr=None):\n    if attr is None:\n        value = self.lookup_table[key]\n    else:\n        value = self.lookup_table[key][attr]\n    return value",
    "docstring": "Get attribute(s) for a given data point.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_base.py",
    "ast_data": "FunctionDef name:_lookup_single arg:self arg:key arg:attr arguments arg arg arg If Compare Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "MeanAbsolutePercentageError",
    "source_code": "class MeanAbsolutePercentageError(MeanMetricWrapper):\n\n    def __init__(self, name='mean_absolute_percentage_error', dtype=None):\n        super(MeanAbsolutePercentageError, self).__init__(mean_absolute_percentage_error, name, dtype=dtype)",
    "docstring": "Computes the mean absolute percentage error between and . Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.MeanAbsolutePercentageError() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.result().numpy() 250000000.0 >>> m.reset_state() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]], ... sample_weight=[1, 0]) >>> m.result().numpy() 500000000.0 Usage with API:",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py",
    "ast_data": "ClassDef name:MeanAbsolutePercentageError FunctionDef name:__init__ arg:self arg:name arg:dtype arguments arg arg arg Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit_transform",
    "source_code": "@available_if(_can_fit_transform)\n@_fit_context(prefer_skip_nested_validation=False)\ndef fit_transform(self, X, y=None, **params):\n    routed_params = self._check_method_params(method='fit_transform', props=params)\n    Xt = self._fit(X, y, routed_params)\n    last_step = self._final_estimator\n    with _print_elapsed_time('Pipeline', self._log_message(len(self.steps) - 1)):\n        if last_step == 'passthrough':\n            return Xt\n        last_step_params = self._get_metadata_for_step(step_idx=len(self) - 1, step_params=routed_params[self.steps[-1][0]], all_params=params)\n        if hasattr(last_step, 'fit_transform'):\n            return last_step.fit_transform(Xt, y, **last_step_params['fit_transform'])\n        else:\n            return last_step.fit(Xt, y, **last_step_params['fit']).transform(Xt, **last_step_params['transform'])",
    "docstring": "Fit the model and transform with the final estimator. Fit all the transformers one after the other and sequentially transform the data. Only valid if the final estimator either implements or and . Parameters ---------- X : iterable Training data. Must fulfill input requirements of first step of the pipeline. y : iterable, default=None Training targets. Must fulfill label requirements for all steps of the pipeline. **params : dict of str -> object - If (default): Parameters passed to the `enable_metadata_routing=Trueenable_metadata_routing=TrueMetadata Routing User Guide ` for more details. Returns ------- Xt : ndarray of shape (n_samples, n_transformed_features) Transformed samples.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\pipeline.py",
    "ast_data": "FunctionDef name:fit_transform arg:self arg:X arg:y arguments arg arg arg arg Assign Call Assign Call Assign With Call Call Call If Compare Return return:yes Assign Call Call If Call Return return:yes Call Return return:yes Call Call Call Call"
  },
  {
    "library": "django",
    "name": "make_password",
    "source_code": "def make_password(password, salt=None, hasher='default'):\n    if password is None:\n        return UNUSABLE_PASSWORD_PREFIX + get_random_string(UNUSABLE_PASSWORD_SUFFIX_LENGTH)\n    if not isinstance(password, (bytes, str)):\n        raise TypeError('Password must be a string or bytes, got %s.' % type(password).__qualname__)\n    hasher = get_hasher(hasher)\n    salt = salt or hasher.salt()\n    return hasher.encode(password, salt)",
    "docstring": "Turn a plain-text password into a hash for database storage Same as encode() but generate a new random salt. If password is None then return a concatenation of UNUSABLE_PASSWORD_PREFIX and a random string, which disallows logins. Additional random string reduces chances of gaining access to staff or superuser accounts. See ticket #20079 for more info.",
    "type": "function",
    "file_path": "django\\django\\contrib\\auth\\hashers.py",
    "ast_data": "FunctionDef name:make_password arg:password arg:salt arg:hasher arguments arg arg arg If Compare Return return:yes Call If Call Raise Call Call Assign Call Assign BoolOp Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "WorkerSpec",
    "source_code": "@dataclass\nclass WorkerSpec:\n    role: str\n    local_world_size: int\n    rdzv_handler: rdzv.RendezvousHandler\n    fn: Optional[Callable] = None\n    entrypoint: Union[Callable, str, None] = None\n    args: tuple = ()\n    max_restarts: int = 3\n    monitor_interval: float = 0.1\n    master_port: Optional[int] = None\n    master_addr: Optional[str] = None\n    local_addr: Optional[str] = None\n\n    def __post_init__(self):\n        assert self.local_world_size > 0\n        assert self.monitor_interval > 0\n        if self.fn:\n            warnings.warn('WorkerSpec.fn will be deprecated, please use WorkerSpec.entrypoint instead', category=DeprecationWarning)\n            self.entrypoint = self.fn\n        assert self.entrypoint\n\n    def get_entrypoint_name(self):\n        if isinstance(self.entrypoint, str):\n            return os.path.basename(self.entrypoint)\n        else:\n            assert self.entrypoint is not None\n            return self.entrypoint.__qualname__",
    "docstring": "Blueprint information about a particular type of worker. For a given role, there must only exist a single worker spec. Worker spec is expected to be homogeneous across all nodes (machine), that is each node runs the same number of workers for a particular spec. Args: role: user-defined role for the workers with this spec local_world_size: number local workers to run fn: (deprecated use entrypoint instead) entrypoint: worker function or command args: arguments to pass to `` settings.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\elastic\\agent\\server\\api.py",
    "ast_data": "ClassDef name:WorkerSpec FunctionDef name:__post_init__ arg:self arguments arg Compare Compare If Call Assign FunctionDef name:get_entrypoint_name arg:self arguments arg If Call Return return:yes Call Compare Return return:yes"
  },
  {
    "library": "scipy",
    "name": "get_declaration",
    "source_code": "def get_declaration(ufunc, c_name, c_proto, cy_proto, header, proto_h_filename):\n    defs = []\n    defs_h = []\n    var_name = c_name.replace('[', '_').replace(']', '_').replace(' ', '_')\n    if header.endswith('.pxd'):\n        defs.append(f'from .{header[:-4]} cimport {ufunc.cython_func_name(c_name, prefix='')} as {ufunc.cython_func_name(c_name)}')\n        proto_name = f'_proto_{var_name}_t'\n        defs.append(f'ctypedef {cy_proto.replace('(*)', proto_name)}')\n        defs.append(f'cdef {proto_name} *{proto_name}_var = &{ufunc.cython_func_name(c_name, specialized=True)}')\n    else:\n        new_name = f'{ufunc.cython_func_name(c_name)} \"{c_name}\"'\n        proto_h_filename = os.path.basename(proto_h_filename)\n        defs.append(f'cdef extern from r\"{proto_h_filename}\":')\n        defs.append(f'    cdef {cy_proto.replace('(*)', new_name)}')\n        defs_h.append(f'#include \"{header}\"')\n        defs_h.append(f'{c_proto.replace('(*)', c_name)};')\n    return (defs, defs_h, var_name)",
    "docstring": "Construct a Cython declaration of a function coming either from a pxd or a header file. Do sufficient tricks to enable compile-time type checking against the signature expected by the ufunc.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_generate_pyx.py",
    "ast_data": "FunctionDef name:get_declaration arg:ufunc arg:c_name arg:c_proto arg:cy_proto arg:header arg:proto_h_filename arguments arg arg arg arg arg arg Assign Assign Assign Call Call Call If Call Call Call Call Assign Call Call Call Call Assign Call Assign Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_migration_by_prefix",
    "source_code": "def get_migration_by_prefix(self, app_label, name_prefix):\n    results = []\n    for migration_app_label, migration_name in self.disk_migrations:\n        if migration_app_label == app_label and migration_name.startswith(name_prefix):\n            results.append((migration_app_label, migration_name))\n    if len(results) > 1:\n        raise AmbiguityError(\"There is more than one migration for '%s' with the prefix '%s'\" % (app_label, name_prefix))\n    elif not results:\n        raise KeyError(f\"There is no migration for '{app_label}' with the prefix '{name_prefix}'\")\n    else:\n        return self.disk_migrations[results[0]]",
    "docstring": "Return the migration(s) which match the given app label and name_prefix.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\loader.py",
    "ast_data": "FunctionDef name:get_migration_by_prefix arg:self arg:app_label arg:name_prefix arguments arg arg arg Assign For If BoolOp Compare Call Call If Compare Call Raise Call If Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "unserializable_hook",
    "source_code": "def unserializable_hook(f):\n    f.__torch_unserializable__ = True\n    return f",
    "docstring": "Mark a function as an unserializable hook with this decorator. This suppresses warnings that would otherwise arise if you attempt to serialize a tensor that has a hook.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\hooks.py",
    "ast_data": "FunctionDef name:unserializable_hook arg:f arguments arg Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "get_test_func",
    "source_code": "def get_test_func(self):\n    return self.test_func",
    "docstring": "Override this method to use a different test_func method.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\mixins.py",
    "ast_data": "FunctionDef name:get_test_func arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_num_total_workers",
    "source_code": "def _num_total_workers(has_chief, num_workers):\n    if has_chief:\n        return num_workers + 1\n    return num_workers",
    "docstring": "Returns the number of workers including the chief.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\combinations.py",
    "ast_data": "FunctionDef name:_num_total_workers arg:has_chief arg:num_workers arguments arg arg If Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "verts",
    "source_code": "@property\ndef verts(self):\n    return self._xys[:-1]",
    "docstring": "The polygon vertices, as a list of `` pairs.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:verts arg:self arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "Legendre",
    "source_code": "class Legendre(ABCPolyBase):\n    _add = staticmethod(legadd)\n    _sub = staticmethod(legsub)\n    _mul = staticmethod(legmul)\n    _div = staticmethod(legdiv)\n    _pow = staticmethod(legpow)\n    _val = staticmethod(legval)\n    _int = staticmethod(legint)\n    _der = staticmethod(legder)\n    _fit = staticmethod(legfit)\n    _line = staticmethod(legline)\n    _roots = staticmethod(legroots)\n    _fromroots = staticmethod(legfromroots)\n    domain = np.array(legdomain)\n    window = np.array(legdomain)\n    basis_name = 'P'",
    "docstring": "A Legendre series class. The Legendre class provides the standard Python numerical methods '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the attributes and methods listed below. Parameters ---------- coef : array_like Legendre coefficients in order of increasing degree, i.e., `domain` for its use. The default value is [-1., 1.]. symbol : str, optional Symbol used to represent the independent variable in string representations of the polynomial expression, e.g. for printing. The symbol must be a valid Python identifier. Default value is 'x'. .. versionadded:: 1.24",
    "type": "class",
    "file_path": "numpy\\numpy\\polynomial\\legendre.py",
    "ast_data": "ClassDef name:Legendre Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign"
  },
  {
    "library": "pytorch",
    "name": "_orthogonalize",
    "source_code": "def _orthogonalize(matrices, epsilon=0):\n    assert len(matrices.shape) == 3 and matrices.shape[2] <= matrices.shape[1]\n    num_matrices = matrices.shape[0]\n    rank = matrices.shape[2]\n    dtype = matrices.dtype\n    if rank <= 2 or dtype in [torch.float16, torch.bfloat16]:\n        _orthogonalize_gram_schmidt(matrices, epsilon=epsilon)\n    else:\n        torch.linalg.qr(matrices, out=(matrices, torch.empty(num_matrices, rank, rank, device=matrices.device, dtype=dtype)))",
    "docstring": "Decide between Gram-Schmidt or QR factorization to orthogonalize a batch of matrices. QR factorization doesn't work with half-precision, but it is usually faster with a rank > 2.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\ddp_comm_hooks\\powerSGD_hook.py",
    "ast_data": "FunctionDef name:_orthogonalize arg:matrices arg:epsilon arguments arg arg BoolOp Compare Call Compare Assign Assign Assign If BoolOp Compare Compare Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_CustomReducer",
    "source_code": "class _CustomReducer:\n\n    def __init__(self, init_value, reduce_fn):\n        self.init_value = init_value\n        self.reduce_fn = reduce_fn",
    "docstring": "Custom reducer class that can be used to specify a custom operation that reduces losses of multiple microbatches into one value. Example: >>> # xdoctest: +SKIP >>> sum_reducer = _CustomReducer( >>> torch.tensor(0.0), >>> lambda a, b: a + b >>> )",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\microbatch.py",
    "ast_data": "ClassDef name:_CustomReducer FunctionDef name:__init__ arg:self arg:init_value arg:reduce_fn arguments arg arg arg Assign Assign"
  },
  {
    "library": "scikit-learn",
    "name": "_predict",
    "source_code": "def _predict(self, X=None):\n    check_is_fitted(self)\n    if X is not None:\n        shifted_opposite_lof_scores = self.decision_function(X)\n        is_inlier = np.ones(shifted_opposite_lof_scores.shape[0], dtype=int)\n        is_inlier[shifted_opposite_lof_scores < 0] = -1\n    else:\n        is_inlier = np.ones(self.n_samples_fit_, dtype=int)\n        is_inlier[self.negative_outlier_factor_ < self.offset_] = -1\n    return is_inlier",
    "docstring": "Predict the labels (1 inlier, -1 outlier) of X according to LOF. If X is None, returns the same as fit_predict(X_train). Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features), default=None The query sample or samples to compute the Local Outlier Factor w.r.t. the training samples. If None, makes prediction on the training data without considering them as their own neighbors. Returns ------- is_inlier : ndarray of shape (n_samples,) Returns -1 for anomalies/outliers and +1 for inliers.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neighbors\\_lof.py",
    "ast_data": "FunctionDef name:_predict arg:self arg:X arguments arg arg Call If Compare Assign Call Assign Call Assign Compare Assign Call Assign Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "prepare_srcs",
    "source_code": "def prepare_srcs(deps: list[str], deps_destinations: list[str], srcs_dir: str) -> None:\n    path_to_replace = {'external/local_xla/': 'tensorflow/compiler', 'external/local_tsl/': 'tensorflow'}\n    deps_mapping_dict = {}\n    for deps_destination in deps_destinations:\n        with open(deps_destination, 'r') as deps_destination_file:\n            deps_mapping_dict.update(json.load(deps_destination_file))\n    for file in deps:\n        for path, val in path_to_replace.items():\n            if path in file:\n                copy_file(file, os.path.join(srcs_dir, val), path)\n                break\n        else:\n            if 'external' not in file:\n                if file in deps_mapping_dict:\n                    dest = deps_mapping_dict[file]\n                    if dest:\n                        copy_file(file, srcs_dir, None, dest)\n                else:\n                    copy_file(file, srcs_dir, None, None)",
    "docstring": "Rearrange source files in target the target directory. Exclude files and move vendored xla/tsl files accordingly. Args: deps: a list of paths to files. deps_destinations: a list of json files with mapping of deps to their destinations for deps whose original path and path inside the wheel are different. srcs_dir: target directory where files are copied to.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\pip_package\\build_pip_package.py",
    "ast_data": "FunctionDef name:prepare_srcs arg:deps arg:deps_destinations arg:srcs_dir arguments arg arg arg Assign Assign For With Call Call Call For For Call If Compare Call Call If Compare If Compare Assign If Call Call"
  },
  {
    "library": "kornia",
    "name": "ResizePreProcessor",
    "source_code": "class ResizePreProcessor(Module):\n\n    def __init__(self, height: int, width: int, interpolation_mode: str='bilinear') -> None:\n        super().__init__()\n        self.size = (height, width)\n        self.interpolation_mode = interpolation_mode\n\n    def forward(self, imgs: Union[Tensor, List[Tensor]]) -> Tuple[Tensor, Tensor]:\n        resized_imgs: list[Tensor] = []\n        iters = len(imgs) if isinstance(imgs, list) else imgs.shape[0]\n        original_sizes = imgs[0].new_zeros((iters, 2))\n        for i in range(iters):\n            img = imgs[i]\n            original_sizes[i, 0] = img.shape[-2]\n            original_sizes[i, 1] = img.shape[-1]\n            resized_imgs.append(resize(img[None], size=self.size, interpolation=self.interpolation_mode))\n        return (concatenate(resized_imgs), original_sizes)",
    "docstring": "Resize a list of image tensors to the given size. Additionally, also returns the original image sizes for further post-processing.",
    "type": "class",
    "file_path": "kornia\\kornia\\models\\utils.py",
    "ast_data": "ClassDef name:ResizePreProcessor FunctionDef name:__init__ arg:self arg:height arg:width arg:interpolation_mode arguments arg arg arg arg Call Call Assign Assign FunctionDef name:forward arg:self arg:imgs arguments arg arg Assign Call Call Assign Call For Call Assign Assign Assign Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "generate_numba_table_func",
    "source_code": "@functools.cache\ndef generate_numba_table_func(func: Callable[..., np.ndarray], nopython: bool, nogil: bool, parallel: bool):\n    numba_func = jit_user_function(func)\n    if TYPE_CHECKING:\n        import numba\n    else:\n        numba = import_optional_dependency('numba')\n\n    @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)\n    def roll_table(values: np.ndarray, begin: np.ndarray, end: np.ndarray, minimum_periods: int, *args: Any):\n        result = np.empty((len(begin), values.shape[1]))\n        min_periods_mask = np.empty(result.shape)\n        for i in numba.prange(len(result)):\n            start = begin[i]\n            stop = end[i]\n            window = values[start:stop]\n            count_nan = np.sum(np.isnan(window), axis=0)\n            nan_mask = len(window) - count_nan >= minimum_periods\n            if nan_mask.any():\n                result[i, :] = numba_func(window, *args)\n            min_periods_mask[i, :] = nan_mask\n        result = np.where(min_periods_mask, result, np.nan)\n        return result\n    return roll_table",
    "docstring": "Generate a numba jitted function to apply window calculations table-wise. Func will be passed a M window size x N number of columns array, and must return a 1 x N number of columns array. 1. jit the user's function 2. Return a rolling apply function with the jitted function inline Parameters ---------- func : function function to be applied to each window and will be JITed nopython : bool nopython to be passed into numba.jit nogil : bool nogil to be passed into numba.jit parallel : bool parallel to be passed into numba.jit Returns ------- Numba function",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\window\\numba_.py",
    "ast_data": "FunctionDef name:generate_numba_table_func arg:func arg:nopython arg:nogil arg:parallel arguments arg arg arg arg Assign Call If Assign Call FunctionDef name:roll_table arg:values arg:begin arg:end arg:minimum_periods arguments arg arg arg arg arg Assign Call Call Assign Call For Call Call Assign Assign Assign Assign Call Call Assign Compare Call If Call Assign Call Assign Assign Call Return return:yes Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "cdf",
    "source_code": "def cdf(self, x, mean=None, cov=1, allow_singular=False, maxpts=None, abseps=1e-05, releps=1e-05, *, lower_limit=None, rng=None):\n    params = self._process_parameters(mean, cov, allow_singular)\n    dim, mean, cov_object = params\n    cov = cov_object.covariance\n    x = self._process_quantiles(x, dim)\n    if not maxpts:\n        maxpts = 1000000 * dim\n    rng = self._get_random_state(rng)\n    out = self._cdf(x, mean, cov, maxpts, abseps, releps, lower_limit, rng)\n    return out",
    "docstring": "Multivariate normal cumulative distribution function. Parameters ---------- x : array_like Quantiles, with the last axis of denoting the components. %(_mvn_doc_default_callparams)s maxpts : integer, optional The maximum number of points to use for integration (default `xx` Notes ----- %(_mvn_doc_callparams_note)s .. versionadded:: 1.0.0",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:cdf arg:self arg:x arg:mean arg:cov arg:allow_singular arg:maxpts arg:abseps arg:releps arguments arg arg arg arg arg arg arg arg arg arg Assign Call Assign Assign Assign Call If Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "google2_log_prefix",
    "source_code": "def google2_log_prefix(level, timestamp=None, file_and_line=None):\n    global _level_names\n    now = timestamp or _time.time()\n    now_tuple = _time.localtime(now)\n    now_microsecond = int(1000000.0 * (now % 1.0))\n    filename, line = file_and_line or _GetFileAndLine()\n    basename = _os.path.basename(filename)\n    severity = 'I'\n    if level in _level_names:\n        severity = _level_names[level][0]\n    s = '%c%02d%02d %02d:%02d:%02d.%06d %5d %s:%d] ' % (severity, now_tuple[1], now_tuple[2], now_tuple[3], now_tuple[4], now_tuple[5], now_microsecond, _get_thread_id(), basename, line)\n    return s",
    "docstring": "Assemble a logline prefix using the google2 format.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\platform\\tf_logging.py",
    "ast_data": "FunctionDef name:google2_log_prefix arg:level arg:timestamp arg:file_and_line arguments arg arg arg Assign BoolOp Call Assign Call Assign Call Assign BoolOp Call Assign Call Assign If Compare Assign Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_apply_tablewise",
    "source_code": "def _apply_tablewise(self, homogeneous_func: Callable[..., ArrayLike], name: str | None=None, numeric_only: bool=False) -> DataFrame | Series:\n    if self._selected_obj.ndim == 1:\n        raise ValueError(\"method='table' not applicable for Series objects.\")\n    obj = self._create_data(self._selected_obj, numeric_only)\n    values = self._prep_values(obj.to_numpy())\n    result = homogeneous_func(values)\n    index = self._slice_axis_for_step(obj.index, result)\n    columns = obj.columns if result.shape[1] == len(obj.columns) else obj.columns[::self.step]\n    out = obj._constructor(result, index=index, columns=columns)\n    return self._resolve_output(out, obj)",
    "docstring": "Apply the given function to the DataFrame across the entire object",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\window\\rolling.py",
    "ast_data": "FunctionDef name:_apply_tablewise arg:self arg:homogeneous_func arg:name arg:numeric_only arguments arg arg arg arg If Compare Raise Call Assign Call Assign Call Call Assign Call Assign Call Assign Compare Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_reindex_axes",
    "source_code": "@final\ndef _reindex_axes(self, axes, level: Level | None, limit: int | None, tolerance, method, fill_value: Scalar | None) -> Self:\n    obj = self\n    for a in self._AXIS_ORDERS:\n        labels = axes[a]\n        if labels is None:\n            continue\n        ax = self._get_axis(a)\n        new_index, indexer = ax.reindex(labels, level=level, limit=limit, tolerance=tolerance, method=method)\n        axis = self._get_axis_number(a)\n        obj = obj._reindex_with_indexers({axis: [new_index, indexer]}, fill_value=fill_value, allow_dups=False)\n    return obj",
    "docstring": "Perform the reindex for all the axes.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:_reindex_axes arg:self arg:axes arg:level arg:limit arg:tolerance arg:method arg:fill_value arguments arg arg arg arg arg arg arg Assign For Assign If Compare Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "T",
    "source_code": "@property\ndef T(self):\n    return self.transpose()",
    "docstring": "Returns the transpose of the matrix. Does *not* conjugate! For the complex conjugate transpose, use ``. Parameters ---------- None Returns ------- ret : matrix object The (non-conjugated) transpose of the matrix. See Also -------- transpose, getH Examples -------- >>> m = np.matrix('[1, 2; 3, 4]') >>> m matrix([[1, 2], [3, 4]]) >>> m.getT() matrix([[1, 3], [2, 4]])",
    "type": "method",
    "file_path": "numpy\\numpy\\matrixlib\\defmatrix.py",
    "ast_data": "FunctionDef name:T arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X):\n    check_is_fitted(self)\n    return self.one_hot_encoder_.transform(self.apply(X))",
    "docstring": "Transform dataset. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Input data to be transformed. Use `` for maximum efficiency. Returns ------- X_transformed : sparse matrix of shape (n_samples, n_out) Transformed dataset.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_forest.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "make_qconfig",
    "source_code": "def make_qconfig(obs_ctr: _PartialWrapper) -> QConfig:\n    if isinstance(obs_ctr(), FakeQuantizeBase):\n        weight = default_weight_fake_quant\n    else:\n        weight = default_weight_observer\n    return QConfig(activation=obs_ctr, weight=weight)",
    "docstring": "Make a QConfig with fixed qparams observers or fake quantizes.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\lstm_utils.py",
    "ast_data": "FunctionDef name:make_qconfig arg:obs_ctr arguments arg If Call Call Assign Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_serialize_keras_tensor",
    "source_code": "def _serialize_keras_tensor(t):\n    if hasattr(t, '_keras_history'):\n        kh = t._keras_history\n        node_index = kh.node_index\n        node_key = make_node_key(kh.layer.name, node_index)\n        new_node_index = node_conversion_map.get(node_key, 0)\n        return [kh.layer.name, new_node_index, kh.tensor_index]\n    if isinstance(t, np.ndarray):\n        return t.tolist()\n    if isinstance(t, tensor_lib.Tensor):\n        return backend.get_value(t).tolist()\n    return t",
    "docstring": "Serializes a single Tensor passed to .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\node.py",
    "ast_data": "FunctionDef name:_serialize_keras_tensor arg:t arguments arg If Call Assign Assign Assign Call Assign Call Return return:yes If Call Return return:yes Call If Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "reify",
    "source_code": "def reify(e, s):\n    if isvar(e):\n        return reify(s[e], s) if e in s else e\n    return _reify(e, s)",
    "docstring": "Replace variables of expression with substitution >>> # xdoctest: +SKIP >>> x, y = var(), var() >>> e = (1, x, (3, y)) >>> s = {x: 2, y: 4} >>> reify(e, s) (1, 2, (3, 4)) >>> e = {1: x, 3: (y, 5)} >>> reify(e, s) {1: 2, 3: (4, 5)}",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\unification\\core.py",
    "ast_data": "FunctionDef name:reify arg:e arg:s arguments arg arg If Call Return return:yes Compare Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_create_grad_indexed_slices_init",
    "source_code": "def _create_grad_indexed_slices_init(grad_output_slices, forward_input):\n    assert isinstance(grad_output_slices, indexed_slices.IndexedSlices)\n    assert isinstance(forward_input, tensor.Tensor)\n    values_out = grad_output_slices.values\n    indices_out = grad_output_slices.indices\n    if values_out.shape.is_fully_defined():\n        values_shape = tensor_shape.TensorShape([0] + values_out.shape.as_list()[1:])\n        values = array_ops.zeros(values_shape, dtype=values_out.dtype, name='values_init')\n    else:\n        if forward_input.dtype == dtypes.resource:\n            forward_shape = gen_resource_variable_ops.variable_shape(forward_input)\n        else:\n            forward_shape = array_ops.shape(forward_input)\n        values_shape = array_ops.concat([[0], forward_shape[1:]], 0)\n        values = array_ops.zeros(values_shape, dtype=values_out.dtype, name='values_init')\n    indices = constant_op.constant([], indices_out.dtype, name='indices_init')\n    if forward_input.dtype == dtypes.resource:\n        shape = gen_resource_variable_ops.variable_shape(forward_input, name='shape_init')\n    else:\n        shape = array_ops.shape(forward_input, name='shape_init')\n    return indexed_slices.IndexedSlices(values=values, indices=indices, dense_shape=shape)",
    "docstring": "Creates an IndexedSlices to pass as input to the while grad function. Args: grad_output_slices: IndexedSlices. The corresponding while grad function output. forward_input: Tensor. The corresponding input to the forward while op. Returns: Zeros IndexedSlices, created in current Graph.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\while_v2_indexed_slices_rewriter.py",
    "ast_data": "FunctionDef name:_create_grad_indexed_slices_init arg:grad_output_slices arg:forward_input arguments arg arg Call Call Assign Assign If Call Assign Call Call Assign Call If Compare Assign Call Assign Call Assign Call Assign Call Assign Call If Compare Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "insert_records",
    "source_code": "def insert_records(self, table: SQLTable, con, frame, name: str, index: bool | str | list[str] | None=True, schema=None, chunksize: int | None=None, method=None, **engine_kwargs) -> int | None:\n    raise AbstractMethodError(self)",
    "docstring": "Inserts data into already-prepared table",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\sql.py",
    "ast_data": "FunctionDef name:insert_records arg:self arg:table arg:con arg:frame arg:name arg:index arg:schema arg:chunksize arg:method arguments arg arg arg arg arg arg arg arg arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "num_nodes_waiting",
    "source_code": "@abstractmethod\ndef num_nodes_waiting(self) -> int:\n    pass",
    "docstring": "Return the number of nodes who arrived late at the rendezvous barrier, hence were not included in the current worker group. Callers should periodically call this method to check whether new nodes are waiting to join the job and if so admit them by calling :py:meth: (re-rendezvous).",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\api.py",
    "ast_data": "FunctionDef name:num_nodes_waiting arg:self arguments arg"
  },
  {
    "library": "scikit-learn",
    "name": "_validate_score_name",
    "source_code": "def _validate_score_name(score_name, scoring, negate_score):\n    if score_name is not None:\n        return score_name\n    elif scoring is None:\n        return 'Negative score' if negate_score else 'Score'\n    else:\n        score_name = scoring.__name__ if callable(scoring) else scoring\n        if negate_score:\n            if score_name.startswith('neg_'):\n                score_name = score_name[4:]\n            else:\n                score_name = f'Negative {score_name}'\n        elif score_name.startswith('neg_'):\n            score_name = f'Negative {score_name[4:]}'\n        score_name = score_name.replace('_', ' ')\n        return score_name.capitalize()",
    "docstring": "Validate the parameter. If is provided, we just return it as-is. If is , we use if is and otherwise. If is a string or a callable, we infer the name. We replace by spaces and capitalize the first letter. We remove and replace it by if is or just remove it otherwise.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_plotting.py",
    "ast_data": "FunctionDef name:_validate_score_name arg:score_name arg:scoring arg:negate_score arguments arg arg arg If Compare Return return:yes If Compare Return return:yes Assign Call If If Call Assign Assign If Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "encrypt",
    "source_code": "def encrypt(self, msg, aad, iv, key):\n    self.check_iv(iv)\n    chacha = ChaCha20Poly1305(key)\n    ciphertext = chacha.encrypt(iv, msg, aad)\n    return (ciphertext[:-16], ciphertext[-16:])",
    "docstring": "Content Encryption with AEAD_CHACHA20_POLY1305. :param msg: text to be encrypt in bytes :param aad: additional authenticated data in bytes :param iv: initialization vector in bytes :param key: encrypted key in bytes :return: (ciphertext, tag)",
    "type": "method",
    "file_path": "authlib\\authlib\\jose\\drafts\\_jwe_enc_cryptography.py",
    "ast_data": "FunctionDef name:encrypt arg:self arg:msg arg:aad arg:iv arg:key arguments arg arg arg arg arg Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "laf_to_boundary_points",
    "source_code": "def laf_to_boundary_points(LAF: Tensor, n_pts: int=50) -> Tensor:\n    KORNIA_CHECK_LAF(LAF)\n    B, N, _, _ = LAF.size()\n    pts = concatenate([sin(torch.linspace(0, 2 * math.pi, n_pts - 1)).unsqueeze(-1), cos(torch.linspace(0, 2 * math.pi, n_pts - 1)).unsqueeze(-1), torch.ones(n_pts - 1, 1)], dim=1)\n    pts = concatenate([tensor([0.0, 0.0, 1.0]).view(1, 3), pts], dim=0).unsqueeze(0).expand(B * N, n_pts, 3)\n    pts = pts.to(LAF.device).to(LAF.dtype)\n    aux = tensor([0.0, 0.0, 1.0]).view(1, 1, 3).expand(B * N, 1, 3)\n    HLAF = concatenate([LAF.view(-1, 2, 3), aux.to(LAF.device).to(LAF.dtype)], dim=1)\n    pts_h = torch.bmm(HLAF, pts.permute(0, 2, 1)).permute(0, 2, 1)\n    return convert_points_from_homogeneous(pts_h.view(B, N, n_pts, 3))",
    "docstring": "Convert LAFs to boundary points of the regions + center. Used for local features visualization, see visualize_laf function. Args: LAF: :math: n_pts: number of points to output. Returns: tensor of boundary points LAF: :math:",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\laf.py",
    "ast_data": "FunctionDef name:laf_to_boundary_points arg:LAF arg:n_pts arguments arg arg Call Assign Call Assign Call Call Call Call Call Call Call Call Assign Call Call Call Call Call Assign Call Call Assign Call Call Call Assign Call Call Call Call Assign Call Call Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "clear_backends",
    "source_code": "def clear_backends(domain, registered=True, globals=False):\n    _uarray.clear_backends(domain, registered, globals)",
    "docstring": "This utility method clears registered backends. .. warning:: We caution library authors against using this function in their code. We do *not* support this use-case. This function is meant to be used only by users themselves. .. warning:: Do NOT use this method inside a multimethod call, or the program is likely to crash. Parameters ---------- domain : Optional[str] The domain for which to de-register backends. `register_backendset_global_backend`. See Also -------- register_backend : Register a backend globally. set_global_backend : Set a global backend.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_uarray\\_backend.py",
    "ast_data": "FunctionDef name:clear_backends arg:domain arg:registered arg:globals arguments arg arg arg Call"
  },
  {
    "library": "matplotlib",
    "name": "_parse_datetime",
    "source_code": "def _parse_datetime(s):\n    return datetime.strptime(s, ISO8601) if s else datetime.fromtimestamp(0)",
    "docstring": "Parse dates in the format returned by the GitHub API.",
    "type": "function",
    "file_path": "matplotlib\\tools\\github_stats.py",
    "ast_data": "FunctionDef name:_parse_datetime arg:s arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, filenames, compression_type=None, buffer_size=None, num_parallel_reads=None, name=None):\n    filenames = _create_or_validate_filenames_dataset(filenames, name=name)\n    self._filenames = filenames\n    self._compression_type = compression_type\n    self._buffer_size = buffer_size\n    self._num_parallel_reads = num_parallel_reads\n\n    def creator_fn(filename):\n        return _TFRecordDataset(filename, compression_type, buffer_size, name=name)\n    self._impl = _create_dataset_reader(creator_fn, filenames, num_parallel_reads, name=name)\n    variant_tensor = self._impl._variant_tensor\n    super(TFRecordDatasetV2, self).__init__(variant_tensor)",
    "docstring": "Creates a to read one or more TFRecord files. Each element of the dataset will contain a single TFRecord. Args: filenames: A tensor or containing one or more filenames. compression_type: (Optional.) A scalar evaluating to one of (no compression), , or . buffer_size: (Optional.) A scalar representing the number of bytes in the read buffer. If your input pipeline is I/O bottlenecked, consider setting this parameter to a value 1-100 MBs. If , a sensible default for both local and remote file systems is used. num_parallel_reads: (Optional.) A scalar representing the number of files to read in parallel. If greater than one, the records of files read in parallel are outputted in an interleaved order. If your input pipeline is I/O bottlenecked, consider setting this parameter to a value greater than one to parallelize the I/O. If , files will be read sequentially. name: (Optional.) A name for the tf.data operation. Raises: TypeError: If any argument does not have the expected type. ValueError: If any argument does not have the expected shape.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\readers.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:filenames arg:compression_type arg:buffer_size arg:num_parallel_reads arg:name arguments arg arg arg arg arg arg Assign Call Assign Assign Assign Assign FunctionDef name:creator_fn arg:filename arguments arg Return return:yes Call Assign Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "set_number_of_shards",
    "source_code": "def set_number_of_shards(self, number_of_shards):\n    if self._frozen:\n        if self._number_of_shards != number_of_shards:\n            raise ValueError(f\"Can't set sharding policy to use {number_of_shards} shards since it has been frozen to use {self._number_of_shards}\")\n    elif number_of_shards > 0:\n        self._number_of_shards = number_of_shards\n    else:\n        raise ValueError(f\"Can't set sharding policy to use {number_of_shards} shards; value must be > 0\")",
    "docstring": "Sets the number of shards for the current policy. If the policy has been frozen then number_of_shards must match the existing setting. Args: number_of_shards: The number of shards to use in the policy. Raises: ValueError: If the policy has been frozen and number_of_shards differs from the frozen value; or number_of_shards <= 0.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_sharding.py",
    "ast_data": "FunctionDef name:set_number_of_shards arg:self arg:number_of_shards arguments arg arg If If Compare Raise Call If Compare Assign Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_batch",
    "source_code": "@abc.abstractmethod\ndef _batch(self, batch_size) -> TypeSpec:\n    raise NotImplementedError(f'{type(self).__name__}._batch')",
    "docstring": "Returns a TypeSpec representing a batch of objects with this TypeSpec. Args: batch_size: An representing the number of elements in a batch, or if the batch size may vary. Returns: A representing a batch of objects with this TypeSpec.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py",
    "ast_data": "FunctionDef name:_batch arg:self arg:batch_size arguments arg arg Raise Call Call"
  },
  {
    "library": "kornia",
    "name": "named_apply",
    "source_code": "def named_apply(fn: Callable, module: nn.Module, name='', depth_first=True, include_root=False) -> nn.Module:\n    if not depth_first and include_root:\n        fn(module=module, name=name)\n    for child_name, child_module in module.named_children():\n        child_name = '.'.join((name, child_name)) if name else child_name\n        named_apply(fn=fn, module=child_module, name=child_name, depth_first=depth_first, include_root=True)\n    if depth_first and include_root:\n        fn(module=module, name=name)\n    return module",
    "docstring": "Apply named function to module.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\dedode\\transformer\\dinov2.py",
    "ast_data": "FunctionDef name:named_apply arg:fn arg:module arg:name arg:depth_first arg:include_root arguments arg arg arg arg arg If BoolOp Call For Call Assign Call Call If BoolOp Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "heatmap_and_dense_descriptors",
    "source_code": "def heatmap_and_dense_descriptors(self, images: Tensor) -> tuple[Tensor, Tensor]:\n    unet_output = self.unet(images)\n    if unet_output.shape[1] != self.desc_dim + 1:\n        raise ValueError(f'U-Net output has {unet_output.shape[1]} channels, but expected self.desc_dim={self.desc_dim} + 1.')\n    descriptors = unet_output[:, :self.desc_dim]\n    heatmaps = unet_output[:, self.desc_dim:]\n    return (heatmaps, descriptors)",
    "docstring": "Return the heatmap and the dense descriptors. .. image:: _static/img/DISK.png Args: images: The image to detect features in. Shape :math:. Returns: A tuple of dense detection scores and descriptors. Shapes are :math: and :math:, where :math: is the descriptor dimension.",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\disk\\disk.py",
    "ast_data": "FunctionDef name:heatmap_and_dense_descriptors arg:self arg:images arguments arg arg Assign Call If Compare Raise Call Assign Assign Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "_not_left_axes",
    "source_code": "@property\ndef _not_left_axes(self):\n    if self._col_wrap is None:\n        return self.axes[:, 1:].flat\n    else:\n        axes = []\n        for i, ax in enumerate(self.axes):\n            if i % self._ncol:\n                axes.append(ax)\n        return np.array(axes, object).flat",
    "docstring": "Return a flat array of axes that aren't on the left column.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\axisgrid.py",
    "ast_data": "FunctionDef name:_not_left_axes arg:self arguments arg If Compare Return return:yes Assign For Call If Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_detect_function_free_vars",
    "source_code": "def _detect_function_free_vars(fn):\n    assert isinstance(fn, types.FunctionType) or isinstance(fn, types.MethodType), f'The input should be of Python function type. Got type: {type(fn)}.'\n    queue = collections.deque([fn])\n    fn_map = dict()\n    while queue:\n        obj = queue.popleft()\n        signature = _make_callable_signature(obj)\n        if signature not in fn_map:\n            free_vars = _search_callable_free_vars(obj)\n            if not free_vars:\n                continue\n            fn_map[signature] = free_vars\n            for var in free_vars:\n                if var.is_function:\n                    obj = var.obj\n                    if _make_callable_signature(obj) not in fn_map:\n                        queue.append(obj)\n    return fn_map",
    "docstring": "Detect free vars in any Python function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\core\\function\\capture\\free_vars_detect.py",
    "ast_data": "FunctionDef name:_detect_function_free_vars arg:fn arguments arg BoolOp Call Call Call Assign Call Assign Call While Assign Call Assign Call If Compare Assign Call If Assign For If Assign If Compare Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "set_initializer",
    "source_code": "def set_initializer(self, initializer):\n    self._initializer = initializer",
    "docstring": "Set initializer for this scope.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variable_scope.py",
    "ast_data": "FunctionDef name:set_initializer arg:self arg:initializer arguments arg arg Assign"
  },
  {
    "library": "pytorch",
    "name": "broadcast_all",
    "source_code": "def broadcast_all(*values: Union[Tensor, Number]) -> tuple[Tensor, ...]:\n    if not all((is_tensor_like(v) or isinstance(v, _Number) for v in values)):\n        raise ValueError('Input arguments must all be instances of Number, torch.Tensor or objects implementing __torch_function__.')\n    if not all((is_tensor_like(v) for v in values)):\n        options: dict[str, Any] = dict(dtype=torch.get_default_dtype())\n        for value in values:\n            if isinstance(value, torch.Tensor):\n                options = dict(dtype=value.dtype, device=value.device)\n                break\n        new_values = [v if is_tensor_like(v) else torch.tensor(v, **options) for v in values]\n        return torch.broadcast_tensors(*new_values)\n    return torch.broadcast_tensors(*values)",
    "docstring": "Given a list of values (possibly containing numbers), returns a list where each value is broadcasted based on the following rules: - instances are broadcasted as per :ref:. - Number instances (scalars) are upcast to tensors having the same size and type as the first tensor passed to . If all the values are scalars, then they are upcasted to scalar Tensors. Args: values (list of , or objects implementing __torch_function__) Raises: ValueError: if any of the values is not a instance, a instance, or an instance implementing __torch_function__",
    "type": "function",
    "file_path": "pytorch\\torch\\distributions\\utils.py",
    "ast_data": "FunctionDef name:broadcast_all arguments arg If Call BoolOp Call Call Raise Call If Call Call Call Call For If Call Assign Call Assign Call Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "open",
    "source_code": "def open(self, path, mode='r', encoding=None, newline=None):\n    if self._isurl(path) and self._iswritemode(mode):\n        raise ValueError('URLs are not writeable')\n    found = self._findfile(path)\n    if found:\n        _fname, ext = self._splitzipext(found)\n        if ext == 'bz2':\n            mode.replace('+', '')\n        return _file_openers[ext](found, mode=mode, encoding=encoding, newline=newline)\n    else:\n        raise FileNotFoundError(f'{path} not found.')",
    "docstring": "Open and return file-like object. If is an URL, it will be downloaded, stored in the directory and opened from there. Parameters ---------- path : str or pathlib.Path Local file path or URL to open. mode : {'r', 'w', 'a'}, optional Mode to open . Mode 'r' for reading, 'w' for writing, 'a' to append. Available modes depend on the type of object specified by . Default is 'r'. encoding : {None, str}, optional Open text file with given encoding. The default encoding will be what uses. newline : {None, str}, optional Newline to use when reading text file. Returns ------- out : file object File object.",
    "type": "method",
    "file_path": "numpy\\numpy\\lib\\_datasource.py",
    "ast_data": "FunctionDef name:open arg:self arg:path arg:mode arg:encoding arg:newline arguments arg arg arg arg arg If BoolOp Call Call Raise Call Assign Call If Assign Call If Compare Call Return return:yes Call Raise Call"
  },
  {
    "library": "numpy",
    "name": "mintypecode",
    "source_code": "@set_module('numpy')\ndef mintypecode(typechars, typeset='GDFgdf', default='d'):\n    typecodes = (isinstance(t, str) and t or asarray(t).dtype.char for t in typechars)\n    intersection = {t for t in typecodes if t in typeset}\n    if not intersection:\n        return default\n    if 'F' in intersection and 'd' in intersection:\n        return 'D'\n    return min(intersection, key=_typecodes_by_elsize.index)",
    "docstring": "Return the character for the minimum-size type to which given types can be safely cast. The returned type character must represent the smallest size dtype such that an array of the returned type can handle the data from an array of all types in (or if is an array, then its dtype.char). Parameters ---------- typechars : list of str or array_like If a list of strings, each string should represent a dtype. If array_like, the character representation of the array dtype is used. typeset : str or list of str, optional The set of characters that the returned character is chosen from. The default set is 'GDFgdf'. default : str, optional The default character, this is returned if none of the characters in matches a character in . Returns ------- typechar : str The character representing the minimum-size type that was found. See Also -------- dtype Examples -------- >>> import numpy as np >>> np.mintypecode(['d', 'f', 'S']) 'd' >>> x = np.array([1.1, 2-3.j]) >>> np.mintypecode(x) 'D' >>> np.mintypecode('abceh', default='G') 'G'",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_type_check_impl.py",
    "ast_data": "FunctionDef name:mintypecode arg:typechars arg:typeset arg:default arguments arg arg arg Assign BoolOp BoolOp Call Call Assign Compare If Return return:yes If BoolOp Compare Compare Return return:yes Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "combined_commuting_positive_definite_hint",
    "source_code": "def combined_commuting_positive_definite_hint(operator_a, operator_b):\n    if operator_a.is_positive_definite is True and operator_a.is_self_adjoint is True and (operator_b.is_positive_definite is True) and (operator_b.is_self_adjoint is True):\n        return True\n    return None",
    "docstring": "Get combined PD hint for compositions.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\property_hint_util.py",
    "ast_data": "FunctionDef name:combined_commuting_positive_definite_hint arg:operator_a arg:operator_b arguments arg arg If BoolOp Compare Compare Compare Compare Return return:yes Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "parent_nodes",
    "source_code": "@property\ndef parent_nodes(self):\n    node_deps = []\n    for kt in self.keras_inputs:\n        layer = kt._keras_history.layer\n        node_index = kt._keras_history.node_index\n        if layer is not None:\n            node_deps.append(layer._inbound_nodes[node_index])\n    return node_deps",
    "docstring": "Returns all the s whose output this node immediately depends on.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\node.py",
    "ast_data": "FunctionDef name:parent_nodes arg:self arguments arg Assign For Assign Assign If Compare Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "binary_log_loss",
    "source_code": "def binary_log_loss(y_true, y_prob, sample_weight=None):\n    eps = np.finfo(y_prob.dtype).eps\n    y_prob = np.clip(y_prob, eps, 1 - eps)\n    return -np.average(xlogy(y_true, y_prob) + xlogy(1 - y_true, 1 - y_prob), weights=sample_weight, axis=0).sum()",
    "docstring": "Compute binary logistic loss for classification. This is identical to log_loss in binary classification case, but is kept for its use in multilabel case. Parameters ---------- y_true : array-like or label indicator matrix Ground truth (correct) labels. y_prob : array-like of float, shape = (n_samples, 1) Predicted probabilities, as returned by a classifier's predict_proba method. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- loss : float The degree to which the samples are correctly predicted.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\neural_network\\_base.py",
    "ast_data": "FunctionDef name:binary_log_loss arg:y_true arg:y_prob arg:sample_weight arguments arg arg arg Assign Call Assign Call Return return:yes Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "MinimizerWrapper",
    "source_code": "class MinimizerWrapper:\n\n    def __init__(self, minimizer, func=None, **kwargs):\n        self.minimizer = minimizer\n        self.func = func\n        self.kwargs = kwargs\n\n    def __call__(self, x0):\n        if self.func is None:\n            return self.minimizer(x0, **self.kwargs)\n        else:\n            return self.minimizer(self.func, x0, **self.kwargs)",
    "docstring": "wrap a minimizer function as a minimizer class",
    "type": "class",
    "file_path": "scipy\\scipy\\optimize\\_basinhopping.py",
    "ast_data": "ClassDef name:MinimizerWrapper FunctionDef name:__init__ arg:self arg:minimizer arg:func arguments arg arg arg arg Assign Assign Assign FunctionDef name:__call__ arg:self arg:x0 arguments arg arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_stack",
    "source_code": "def get_stack(self) -> list[str]:\n    if hasattr(self.tls, 'stack'):\n        return self.tls.stack\n    else:\n        self.tls.stack = []\n        return self.tls.stack",
    "docstring": "The main event stack, with every chromium event. Logged to tlparse.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\utils.py",
    "ast_data": "FunctionDef name:get_stack arg:self arguments arg If Call Return return:yes Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "watch_gradients_by_tensors",
    "source_code": "def watch_gradients_by_tensors(self, graph, tensors):\n    if not isinstance(tensors, list):\n        tensors = [tensors]\n    tensor_name_regex = []\n    for tensor in tensors:\n        tensor_name_regex.append(re.escape(tensor.name) + '$')\n    tensor_name_regex = '(' + '|'.join(tensor_name_regex) + ')'\n    return self.watch_gradients_by_tensor_names(graph, tensor_name_regex)",
    "docstring": "Watch gradient tensors by x-tensor(s). The side effect of this method is that when gradient tensor(s) are created with respect to the any paths that include the s, the gradient tensor(s) with respect to the tensor will be registered with this this instance and can later be retrieved, with the methods and . Unlike the method , this method is used to retrieve gradient tensors after the construction of the forward subgraph has completed (but before the construction of the backward subgraph). This method is the same as except that the tensors are specified by the Python or objects, instead by name patterns. Example: Args: graph: the to watch the gradients on. tensors: a or object, or a list of such objects. Returns: The GradientsDebugger instance itself.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_gradients.py",
    "ast_data": "FunctionDef name:watch_gradients_by_tensors arg:self arg:graph arg:tensors arguments arg arg arg If Call Assign Assign For Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "Softmax2d",
    "source_code": "class Softmax2d(Module):\n\n    def forward(self, input: Tensor) -> Tensor:\n        if input.dim() not in (3, 4):\n            raise ValueError(f'Softmax2d: expected input to be 3D or 4D, got {input.dim()}D instead')\n        return F.softmax(input, -3, _stacklevel=5)",
    "docstring": "Applies SoftMax over features to each spatial location. When given an image of `Softmax(Channels, h_i, w_j)(N, C, H, W)(C, H, W)(N, C, H, W)(C, H, W)` (same shape as input) Returns: a Tensor of the same dimension and shape as the input with values in the range [0, 1] Examples:: >>> m = nn.Softmax2d() >>> # you softmax over the 2nd dimension >>> input = torch.randn(2, 3, 12, 13) >>> output = m(input)",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\activation.py",
    "ast_data": "ClassDef name:Softmax2d FunctionDef name:forward arg:self arg:input arguments arg arg If Compare Call Raise Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "size",
    "source_code": "@final\n@property\ndef size(self) -> int:\n    return int(np.prod(self.shape))",
    "docstring": "Return an int representing the number of elements in this object. Return the number of rows if Series. Otherwise return the number of rows times number of columns if DataFrame. See Also -------- numpy.ndarray.size : Number of elements in the array. Examples -------- >>> s = pd.Series({\"a\": 1, \"b\": 2, \"c\": 3}) >>> s.size 3 >>> df = pd.DataFrame({\"col1\": [1, 2], \"col2\": [3, 4]}) >>> df.size 4",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:size arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "authlib",
    "name": "validate_claim_types_supported",
    "source_code": "def validate_claim_types_supported(self):\n    values = self.get('claim_types_supported')\n    if not values:\n        return\n    if not isinstance(values, list):\n        raise ValueError('\"claim_types_supported\" MUST be JSON array')\n    valid_values = {'normal', 'aggregated', 'distributed'}\n    if not valid_values.issuperset(set(values)):\n        raise ValueError('\"claim_types_supported\" contains invalid values')",
    "docstring": "OPTIONAL. JSON array containing a list of the Claim Types that the OpenID Provider supports. These Claim Types are described in Section 5.6 of OpenID Connect Core 1.0. Values defined by this specification are normal, aggregated, and distributed. If omitted, the implementation supports only normal Claims.",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\discovery\\models.py",
    "ast_data": "FunctionDef name:validate_claim_types_supported arg:self arguments arg Assign Call If Return return:no If Call Raise Call Assign If Call Call Raise Call"
  },
  {
    "library": "numpy",
    "name": "isrealobj",
    "source_code": "@array_function_dispatch(_is_type_dispatcher)\ndef isrealobj(x):\n    return not iscomplexobj(x)",
    "docstring": "Return True if x is a not complex type or an array of complex numbers. The type of the input is checked, not the value. So even if the input has an imaginary part equal to zero, evaluates to False if the data type is complex. Parameters ---------- x : any The input can be of any type and shape. Returns ------- y : bool The return value, False if is of a complex type. See Also -------- iscomplexobj, isreal Notes ----- The function is only meant for arrays with numerical values but it accepts all other objects. Since it assumes array input, the return value of other objects may be True. >>> np.isrealobj('A string') True >>> np.isrealobj(False) True >>> np.isrealobj(None) True Examples -------- >>> import numpy as np >>> np.isrealobj(1) True >>> np.isrealobj(1+0j) False >>> np.isrealobj([3, 1+0j, True]) False",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_type_check_impl.py",
    "ast_data": "FunctionDef name:isrealobj arg:x arguments arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X):\n    check_is_fitted(self)\n    n_samples = _num_samples(X)\n    if self.label_binarizer_.y_type_ == 'multiclass':\n        maxima = np.empty(n_samples, dtype=float)\n        maxima.fill(-np.inf)\n        argmaxima = np.zeros(n_samples, dtype=int)\n        for i, e in enumerate(self.estimators_):\n            pred = _predict_binary(e, X)\n            np.maximum(maxima, pred, out=maxima)\n            argmaxima[maxima == pred] = i\n        return self.classes_[argmaxima]\n    else:\n        thresh = _threshold_for_binary_predict(self.estimators_[0])\n        indices = array.array('i')\n        indptr = array.array('i', [0])\n        for e in self.estimators_:\n            indices.extend(np.where(_predict_binary(e, X) > thresh)[0])\n            indptr.append(len(indices))\n        data = np.ones(len(indices), dtype=int)\n        indicator = sp.csc_matrix((data, indices, indptr), shape=(n_samples, len(self.estimators_)))\n        return self.label_binarizer_.inverse_transform(indicator)",
    "docstring": "Predict multi-class targets using underlying estimators. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Data. Returns ------- y : {array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_classes) Predicted multi-class targets.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\multiclass.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Call Assign Call If Compare Assign Call Call Assign Call For Call Assign Call Call Assign Compare Return return:yes Assign Call Assign Call Assign Call For Call Call Compare Call Call Call Assign Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "tolist",
    "source_code": "def tolist(self):\n    return self.__array__().tolist()",
    "docstring": "Return the matrix as a (possibly nested) list. See for full documentation. See Also -------- ndarray.tolist Examples -------- >>> x = np.matrix(np.arange(12).reshape((3,4))); x matrix([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]]) >>> x.tolist() [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]]",
    "type": "method",
    "file_path": "numpy\\numpy\\matrixlib\\defmatrix.py",
    "ast_data": "FunctionDef name:tolist arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "is_anonymous",
    "source_code": "@property\ndef is_anonymous(self):\n    return False",
    "docstring": "Always return False. This is a way of comparing User objects to anonymous users.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\base_user.py",
    "ast_data": "FunctionDef name:is_anonymous arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "generate_filename",
    "source_code": "def generate_filename(self, filename):\n    filename = str(filename).replace('\\\\', '/')\n    dirname, filename = os.path.split(filename)\n    if '..' in pathlib.PurePath(dirname).parts:\n        raise SuspiciousFileOperation(\"Detected path traversal attempt in '%s'\" % dirname)\n    return os.path.normpath(os.path.join(dirname, self.get_valid_name(filename)))",
    "docstring": "Validate the filename by calling get_valid_name() and return a filename to be passed to the save() method.",
    "type": "method",
    "file_path": "django\\django\\core\\files\\storage\\base.py",
    "ast_data": "FunctionDef name:generate_filename arg:self arg:filename arguments arg arg Assign Call Call Assign Call If Compare Call Raise Call Return return:yes Call Call Call"
  },
  {
    "library": "pandas",
    "name": "describe_ndframe",
    "source_code": "def describe_ndframe(*, obj: NDFrameT, include: str | Sequence[str] | None, exclude: str | Sequence[str] | None, percentiles: Sequence[float] | np.ndarray | None) -> NDFrameT:\n    percentiles = _refine_percentiles(percentiles)\n    describer: NDFrameDescriberAbstract\n    if obj.ndim == 1:\n        describer = SeriesDescriber(obj=cast('Series', obj))\n    else:\n        describer = DataFrameDescriber(obj=cast('DataFrame', obj), include=include, exclude=exclude)\n    result = describer.describe(percentiles=percentiles)\n    return cast(NDFrameT, result)",
    "docstring": "Describe series or dataframe. Called from pandas.core.generic.NDFrame.describe() Parameters ---------- obj: DataFrame or Series Either dataframe or series to be described. include : 'all', list-like of dtypes or None (default), optional A white list of data types to include in the result. Ignored for ``, which returns the 25th, 50th, and 75th percentiles. Returns ------- Dataframe or series description.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\methods\\describe.py",
    "ast_data": "FunctionDef name:describe_ndframe arguments arg arg arg arg Assign Call If Compare Assign Call Call Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "add",
    "source_code": "def add(self, word):\n    self._hash.update(word.encode())\n    self._digest = self._hash.digest()\n    node = self.root\n    for char in word:\n        node.children.setdefault(char, TrieNode())\n        node = node.children[char]\n    node.children[''] = True",
    "docstring": "Add a word to the Trie.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\hipify\\hipify_python.py",
    "ast_data": "FunctionDef name:add arg:self arg:word arguments arg arg Call Call Assign Call Assign For Call Call Assign Assign"
  },
  {
    "library": "virtualenv",
    "name": "add_parser_arguments",
    "source_code": "@classmethod\ndef add_parser_arguments(cls, parser, interpreter, app_data):\n    raise NotImplementedError",
    "docstring": "Add CLI arguments for this seed mechanisms. :param parser: the CLI parser :param app_data: the CLI parser :param interpreter: the interpreter this virtual environment is based of",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\seed\\seeder.py",
    "ast_data": "FunctionDef name:add_parser_arguments arg:cls arg:parser arg:interpreter arg:app_data arguments arg arg arg arg Raise"
  },
  {
    "library": "cherrypy",
    "name": "header_elements",
    "source_code": "def header_elements(fieldname, fieldvalue):\n    if not fieldvalue:\n        return []\n    result = []\n    for element in RE_HEADER_SPLIT.split(fieldvalue):\n        if fieldname.startswith('Accept') or fieldname == 'TE':\n            hv = AcceptElement.from_str(element)\n        else:\n            hv = HeaderElement.from_str(element)\n        result.append(hv)\n    return list(reversed(sorted(result)))",
    "docstring": "Return a sorted :class: list. Constucted from a comma-separated header string.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\httputil.py",
    "ast_data": "FunctionDef name:header_elements arg:fieldname arg:fieldvalue arguments arg arg If Return return:no Assign For Call If BoolOp Call Compare Assign Call Assign Call Call Return return:yes Call Call Call"
  },
  {
    "library": "authlib",
    "name": "JWEAlgorithm",
    "source_code": "class JWEAlgorithm(JWEAlgorithmBase, metaclass=ABCMeta):\n\n    def wrap(self, enc_alg, headers, key, preset=None):\n        raise NotImplementedError\n\n    def unwrap(self, enc_alg, ek, headers, key):\n        raise NotImplementedError",
    "docstring": "Interface for JWE algorithm conforming to RFC7518. JWA specification (RFC7518) SHOULD implement the algorithms for JWE with this base implementation.",
    "type": "class",
    "file_path": "authlib\\authlib\\jose\\rfc7516\\models.py",
    "ast_data": "ClassDef name:JWEAlgorithm FunctionDef name:wrap arg:self arg:enc_alg arg:headers arg:key arg:preset arguments arg arg arg arg arg Raise FunctionDef name:unwrap arg:self arg:enc_alg arg:ek arg:headers arg:key arguments arg arg arg arg arg Raise"
  },
  {
    "library": "pandas",
    "name": "logical_op",
    "source_code": "def logical_op(left: ArrayLike, right: Any, op) -> ArrayLike:\n\n    def fill_bool(x, left=None):\n        if x.dtype.kind in 'cfO':\n            mask = isna(x)\n            if mask.any():\n                x = x.astype(object)\n                x[mask] = False\n        if left is None or left.dtype.kind == 'b':\n            x = x.astype(bool)\n        return x\n    right = lib.item_from_zerodim(right)\n    if is_list_like(right) and (not hasattr(right, 'dtype')):\n        raise TypeError('Logical ops (and, or, xor) between Pandas objects and dtype-less sequences (e.g. list, tuple) are no longer supported. Wrap the object in a Series, Index, or np.array before operating instead.')\n    lvalues = ensure_wrapped_if_datetimelike(left)\n    rvalues = right\n    if should_extension_dispatch(lvalues, rvalues):\n        res_values = op(lvalues, rvalues)\n    else:\n        if isinstance(rvalues, np.ndarray):\n            is_other_int_dtype = rvalues.dtype.kind in 'iu'\n            if not is_other_int_dtype:\n                rvalues = fill_bool(rvalues, lvalues)\n        else:\n            is_other_int_dtype = lib.is_integer(rvalues)\n        res_values = na_logical_op(lvalues, rvalues, op)\n        if not (left.dtype.kind in 'iu' and is_other_int_dtype):\n            res_values = fill_bool(res_values)\n    return res_values",
    "docstring": "Evaluate a logical operation , , or . Parameters ---------- left : np.ndarray or ExtensionArray right : object Cannot be a DataFrame, Series, or Index. op : {operator.and_, operator.or_, operator.xor} Or one of the reversed variants from roperator. Returns ------- ndarray or ExtensionArray",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:logical_op arg:left arg:right arg:op arguments arg arg arg FunctionDef name:fill_bool arg:x arg:left arguments arg arg If Compare Assign Call If Call Assign Call Assign If BoolOp Compare Compare Assign Call Return return:yes Assign Call If BoolOp Call Call Raise Call Assign Call Assign If Call Assign Call If Call Assign Compare If Assign Call Assign Call Assign Call If BoolOp Compare Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "check_srs",
    "source_code": "def check_srs(self, source_srs):\n    if isinstance(source_srs, SpatialReference):\n        sr = source_srs\n    elif isinstance(source_srs, self.spatial_backend.spatial_ref_sys()):\n        sr = source_srs.srs\n    elif isinstance(source_srs, (int, str)):\n        sr = SpatialReference(source_srs)\n    else:\n        sr = self.layer.srs\n    if not sr:\n        raise LayerMapError('No source reference system defined.')\n    else:\n        return sr",
    "docstring": "Check the compatibility of the given spatial reference object.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\utils\\layermapping.py",
    "ast_data": "FunctionDef name:check_srs arg:self arg:source_srs arguments arg arg If Call Assign If Call Call Assign If Call Assign Call Assign If Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "convert_inner_node_data",
    "source_code": "def convert_inner_node_data(nested, wrap=False):\n\n    def _is_serialized_node_data(nested):\n        if isinstance(nested, list) and len(nested) in [3, 4] and isinstance(nested[0], str):\n            return True\n        return False\n\n    def _is_atomic_nested(nested):\n        if isinstance(nested, ListWrapper):\n            return True\n        if _is_serialized_node_data(nested):\n            return True\n        return not nest.is_nested(nested)\n\n    def _convert_object_or_list(nested):\n        if wrap:\n            if isinstance(nested, ListWrapper):\n                return nested\n            if _is_serialized_node_data(nested):\n                return ListWrapper(nested)\n            return nested\n        else:\n            if isinstance(nested, ListWrapper):\n                return nested.as_list()\n            return nested\n    return map_structure_with_atomic(_is_atomic_nested, _convert_object_or_list, nested)",
    "docstring": "Either wraps or unwraps innermost node data lists in objects. Args: nested: A nested data structure. wrap: If , wrap innermost lists in objects. If , unwraps objects into lists. Returns: Structure of same type as nested, with lists wrapped/unwrapped.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_utils.py",
    "ast_data": "FunctionDef name:convert_inner_node_data arg:nested arg:wrap arguments arg arg FunctionDef name:_is_serialized_node_data arg:nested arguments arg If BoolOp Call Compare Call Call Return return:yes Return return:yes FunctionDef name:_is_atomic_nested arg:nested arguments arg If Call Return return:yes If Call Return return:yes Return return:yes Call FunctionDef name:_convert_object_or_list arg:nested arguments arg If If Call Return return:yes If Call Return return:yes Call Return return:yes If Call Return return:yes Call Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "name",
    "source_code": "@property\ndef name(self):\n    return self.key",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_draw_xobject_glyph",
    "source_code": "def _draw_xobject_glyph(self, font, fontsize, glyph_idx, x, y):\n    glyph_name = font.get_glyph_name(glyph_idx)\n    name = self.file._get_xobject_glyph_name(font.fname, glyph_name)\n    self.file.output(Op.gsave, 0.001 * fontsize, 0, 0, 0.001 * fontsize, x, y, Op.concat_matrix, Name(name), Op.use_xobject, Op.grestore)",
    "docstring": "Draw a multibyte character from a Type 3 font as an XObject.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py",
    "ast_data": "FunctionDef name:_draw_xobject_glyph arg:self arg:font arg:fontsize arg:glyph_idx arg:x arg:y arguments arg arg arg arg arg arg Assign Call Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_from_local_devices",
    "source_code": "@classmethod\ndef _from_local_devices(cls, devices, communication=collective_util.CommunicationImplementation.AUTO):\n    obj = cls(communication)\n    obj.extended._initialize_local(tfconfig_cluster_resolver.TFConfigClusterResolver(), devices=devices)\n    return obj",
    "docstring": "A convenience method to create an object with a list of devices.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\collective_all_reduce_strategy.py",
    "ast_data": "FunctionDef name:_from_local_devices arg:cls arg:devices arg:communication arguments arg arg arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_scheduler_node_symbol_uses",
    "source_code": "def get_scheduler_node_symbol_uses(node: BaseSchedulerNode) -> OrderedSet[sympy.Symbol]:\n    if isinstance(node, FusedSchedulerNode):\n        return OrderedSet().union(*(get_scheduler_node_symbol_uses(snode) for snode in node.snodes))\n    assert node.node is not None\n    free_symbol_uses = node.node.get_free_symbol_uses()\n    free_symbol_uses.update(*(get_layout_symints(ir_node) for ir_node in node.node.get_outputs()))\n    return free_symbol_uses",
    "docstring": "Gets symbols used in node.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:get_scheduler_node_symbol_uses arg:node arguments arg If Call Return return:yes Call Call Call Compare Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "iteration_ends",
    "source_code": "def iteration_ends(self, time_step):\n    pass",
    "docstring": "Perform update to learning rate and potentially other states at the end of an iteration",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neural_network\\_stochastic_optimizers.py",
    "ast_data": "FunctionDef name:iteration_ends arg:self arg:time_step arguments arg arg"
  },
  {
    "library": "pandas",
    "name": "build_table_schema",
    "source_code": "def build_table_schema(data: DataFrame | Series, index: bool=True, primary_key: bool | None=None, version: bool=True) -> dict[str, JSONSerializable]:\n    if index is True:\n        data = set_default_names(data)\n    schema: dict[str, Any] = {}\n    fields = []\n    if index:\n        if data.index.nlevels > 1:\n            data.index = cast('MultiIndex', data.index)\n            for level, name in zip(data.index.levels, data.index.names):\n                new_field = convert_pandas_type_to_json_field(level)\n                new_field['name'] = name\n                fields.append(new_field)\n        else:\n            fields.append(convert_pandas_type_to_json_field(data.index))\n    if data.ndim > 1:\n        for column, s in data.items():\n            fields.append(convert_pandas_type_to_json_field(s))\n    else:\n        fields.append(convert_pandas_type_to_json_field(data))\n    schema['fields'] = fields\n    if index and data.index.is_unique and (primary_key is None):\n        if data.index.nlevels == 1:\n            schema['primaryKey'] = [data.index.name]\n        else:\n            schema['primaryKey'] = data.index.names\n    elif primary_key is not None:\n        schema['primaryKey'] = primary_key\n    if version:\n        schema['pandas_version'] = TABLE_SCHEMA_VERSION\n    return schema",
    "docstring": "Create a Table schema from `None'primaryKey'pandas_versionTable Schema anyenumorderedordered` field. Examples -------- >>> from pandas.io.json._table_schema import build_table_schema >>> df = pd.DataFrame( ... {'A': [1, 2, 3], ... 'B': ['a', 'b', 'c'], ... 'C': pd.date_range('2016-01-01', freq='D', periods=3), ... }, index=pd.Index(range(3), name='idx')) >>> build_table_schema(df) {'fields': [{'name': 'idx', 'type': 'integer'}, {'name': 'A', 'type': 'integer'}, {'name': 'B', 'type': 'string'}, {'name': 'C', 'type': 'datetime'}], 'primaryKey': ['idx'], 'pandas_version': '1.4.0'}",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\json\\_table_schema.py",
    "ast_data": "FunctionDef name:build_table_schema arg:data arg:index arg:primary_key arg:version arguments arg arg arg arg If Compare Assign Call Assign If If Compare Assign Call For Call Assign Call Assign Call Call Call If Compare For Call Call Call Call Call Assign If BoolOp Compare If Compare Assign Assign If Compare Assign If Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "type_to_name",
    "source_code": "def type_to_name(tensor_type):\n    for name, value in schema_fb.TensorType.__dict__.items():\n        if value == tensor_type:\n            return name\n    return None",
    "docstring": "Converts a numerical enum to a readable tensor type.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\tools\\flatbuffer_utils.py",
    "ast_data": "FunctionDef name:type_to_name arg:tensor_type arguments arg For Call If Compare Return return:yes Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "run_graph_optimizations",
    "source_code": "def run_graph_optimizations(graph_def, input_arrays, output_arrays, config, graph=None):\n    meta_graph = _export_meta_graph(graph_def=graph_def, graph=graph)\n    signature = _meta_graph_pb2.SignatureDef()\n    for array in input_arrays:\n        signature.inputs[array.name].name = array.name\n        signature.inputs[array.name].dtype = array.dtype.as_datatype_enum\n        signature.inputs[array.name].tensor_shape.CopyFrom(array.shape.as_proto())\n    for array in output_arrays:\n        signature.outputs[array.name].name = array.name\n        signature.outputs[array.name].dtype = array.dtype.as_datatype_enum\n        signature.outputs[array.name].tensor_shape.CopyFrom(array.shape.as_proto())\n    meta_graph.signature_def['not_used_key'].CopyFrom(signature)\n    fetch_collection = _meta_graph_pb2.CollectionDef()\n    for array in input_arrays + output_arrays:\n        fetch_collection.node_list.value.append(array.name)\n    meta_graph.collection_def['train_op'].CopyFrom(fetch_collection)\n    return tf_optimizer.OptimizeGraph(config, meta_graph)",
    "docstring": "Apply standard TensorFlow optimizations to the graph_def. Args: graph_def: Frozen GraphDef to be optimized. input_arrays: List of arrays that are considered inputs of the graph. output_arrays: List of arrays that are considered outputs of the graph. config: tf.ConfigProto. graph: TensorFlow Graph. Required when Eager mode is enabled. (default None) Returns: A new, optimized GraphDef.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\util.py",
    "ast_data": "FunctionDef name:run_graph_optimizations arg:graph_def arg:input_arrays arg:output_arrays arg:config arg:graph arguments arg arg arg arg arg Assign Call Assign Call For Assign Assign Call Call For Assign Assign Call Call Call Assign Call For Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "wkb",
    "source_code": "@property\ndef wkb(self):\n    if sys.byteorder == 'little':\n        byteorder = 1\n    else:\n        byteorder = 0\n    sz = self.wkb_size\n    buf = (c_ubyte * sz)()\n    to_wkb = capi.to_iso_wkb if self.is_measured else capi.to_wkb\n    to_wkb(self.ptr, byteorder, byref(buf))\n    return memoryview(string_at(buf, sz))",
    "docstring": "Return the WKB representation of the Geometry.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:wkb arg:self arguments arg If Compare Assign Assign Assign Assign Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "histogram2d",
    "source_code": "def histogram2d(x1: Tensor, x2: Tensor, bins: Tensor, bandwidth: Tensor, epsilon: float=1e-10) -> Tensor:\n    _, kernel_values1 = marginal_pdf(x1.unsqueeze(2), bins, bandwidth, epsilon)\n    _, kernel_values2 = marginal_pdf(x2.unsqueeze(2), bins, bandwidth, epsilon)\n    pdf = joint_pdf(kernel_values1, kernel_values2)\n    return pdf",
    "docstring": "Estimate the 2d histogram of the input tensor. The calculation uses kernel density estimation which requires a bandwidth (smoothing) parameter. Args: x1: Input tensor to compute the histogram with shape :math:. x2: Input tensor to compute the histogram with shape :math:. bins: The number of bins to use the histogram :math:. bandwidth: Gaussian smoothing factor with shape shape [1]. epsilon: A scalar, for numerical stability. Default: 1e-10. Returns: Computed histogram of shape :math:. Examples: >>> x1 = torch.rand(2, 32) >>> x2 = torch.rand(2, 32) >>> bins = torch.torch.linspace(0, 255, 128) >>> hist = histogram2d(x1, x2, bins, bandwidth=torch.tensor(0.9)) >>> hist.shape torch.Size([2, 128, 128])",
    "type": "function",
    "file_path": "kornia\\kornia\\enhance\\histogram.py",
    "ast_data": "FunctionDef name:histogram2d arg:x1 arg:x2 arg:bins arg:bandwidth arg:epsilon arguments arg arg arg arg arg Assign Call Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "build",
    "source_code": "def build(self, my_env: dict[str, str]) -> None:\n    from .env import build_type\n    build_args = ['--build', '.', '--target', 'install', '--config', build_type.build_type_string]\n    max_jobs = os.getenv('MAX_JOBS')\n    if max_jobs is not None or not USE_NINJA:\n        max_jobs = max_jobs or str(multiprocessing.cpu_count())\n        build_args += ['-j', max_jobs]\n    self.run(build_args, my_env)",
    "docstring": "Runs cmake to build binaries.",
    "type": "method",
    "file_path": "pytorch\\tools\\setup_helpers\\cmake.py",
    "ast_data": "FunctionDef name:build arg:self arg:my_env arguments arg arg Assign Assign Call If BoolOp Compare Assign BoolOp Call Call Call"
  },
  {
    "library": "kornia",
    "name": "save",
    "source_code": "def save(self, images: Union[Tensor, list[Tensor]], detections: Optional[Tensor]=None, directory: Optional[str]=None) -> None:\n    outputs = self.visualize(images, detections)\n    self._save_outputs(outputs, directory)",
    "docstring": "Save the output image(s) to a directory. Args: images: input tensor. detections: detection tensor. directory: directory to save the images.",
    "type": "method",
    "file_path": "kornia\\kornia\\models\\detection\\base.py",
    "ast_data": "FunctionDef name:save arg:self arg:images arg:detections arg:directory arguments arg arg arg arg Assign Call Call"
  },
  {
    "library": "numpy",
    "name": "min",
    "source_code": "def min(self, axis=None, out=None):\n    return N.ndarray.min(self, axis, out, keepdims=True)._collapse(axis)",
    "docstring": "Return the minimum value along an axis. Parameters ---------- See for complete descriptions. See Also -------- amin, ndarray.min Notes ----- This is the same as , but returns a object where would return an ndarray. Examples -------- >>> x = -np.matrix(np.arange(12).reshape((3,4))); x matrix([[ 0, -1, -2, -3], [ -4, -5, -6, -7], [ -8, -9, -10, -11]]) >>> x.min() -11 >>> x.min(0) matrix([[ -8, -9, -10, -11]]) >>> x.min(1) matrix([[ -3], [ -7], [-11]])",
    "type": "method",
    "file_path": "numpy\\numpy\\matrixlib\\defmatrix.py",
    "ast_data": "FunctionDef name:min arg:self arg:axis arg:out arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "eliminate_zeros",
    "source_code": "def eliminate_zeros(self):\n    M, N = self._swap(self._shape_as_2d)\n    csr_eliminate_zeros(M, N, self.indptr, self.indices, self.data)\n    self.prune()",
    "docstring": "Remove zero entries from the array/matrix This is an *in place* operation.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_compressed.py",
    "ast_data": "FunctionDef name:eliminate_zeros arg:self arguments arg Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "LayerCall",
    "source_code": "class LayerCall(object):\n\n    def __init__(self, call_collection, call_fn, name, input_signature):\n        self.call_collection = call_collection\n        self.input_signature = input_signature\n        self.wrapped_call = def_function.function(layer_call_wrapper(call_collection, call_fn, name), input_signature=input_signature)\n        self.original_layer_call = call_collection.layer_call_method\n\n    def _maybe_trace(self, args, kwargs):\n        if tracing_enabled():\n            self.call_collection.add_trace(*args, **kwargs)\n\n    def __call__(self, *args, **kwargs):\n        self._maybe_trace(args, kwargs)\n        return self.wrapped_call(*args, **kwargs)\n\n    def get_concrete_function(self, *args, **kwargs):\n        self._maybe_trace(args, kwargs)\n        return self.wrapped_call.get_concrete_function(*args, **kwargs)",
    "docstring": "Function that triggers traces of other functions in the same collection.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\save_impl.py",
    "ast_data": "ClassDef name:LayerCall FunctionDef name:__init__ arg:self arg:call_collection arg:call_fn arg:name arg:input_signature arguments arg arg arg arg arg Assign Assign Assign Call Call Assign FunctionDef name:_maybe_trace arg:self arg:args arg:kwargs arguments arg arg arg If Call Call FunctionDef name:__call__ arg:self arguments arg arg arg Call Return return:yes Call FunctionDef name:get_concrete_function arg:self arguments arg arg arg Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "broadcast_coalesced",
    "source_code": "def broadcast_coalesced(tensors, devices, buffer_size=10485760):\n    devices = [_get_device_index(d) for d in devices]\n    tensors = [_handle_complex(t) for t in tensors]\n    return torch._C._broadcast_coalesced(tensors, devices, buffer_size)",
    "docstring": "Broadcast a sequence of tensors to the specified GPUs. Small tensors are first coalesced into a buffer to reduce the number of synchronizations. Args: tensors (sequence): tensors to broadcast. Must be on the same device, either CPU or GPU. devices (Iterable[torch.device, str or int]): an iterable of GPU devices, among which to broadcast. buffer_size (int): maximum size of the buffer used for coalescing Returns: A tuple containing copies of :attr:, placed on :attr:.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\parallel\\comm.py",
    "ast_data": "FunctionDef name:broadcast_coalesced arg:tensors arg:devices arg:buffer_size arguments arg arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "sparse_categorical_crossentropy",
    "source_code": "@dispatch.add_dispatch_support\ndef sparse_categorical_crossentropy(y_true, y_pred, from_logits=False, axis=-1):\n    y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)\n    y_true = math_ops.cast(y_true, y_pred.dtype)\n    return backend.sparse_categorical_crossentropy(y_true, y_pred, from_logits=from_logits, axis=axis)",
    "docstring": "Computes the sparse categorical crossentropy loss. Standalone usage: >>> y_true = [1, 2] >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]] >>> loss = tf.keras.losses.sparse_categorical_crossentropy(y_true, y_pred) >>> assert loss.shape == (2,) >>> loss.numpy() array([0.0513, 2.303], dtype=float32) Args: y_true: Ground truth values. y_pred: The predicted values. from_logits: Whether is expected to be a logits tensor. By default, we assume that encodes a probability distribution. axis: Defaults to -1. The dimension along which the entropy is computed. Returns: Sparse categorical crossentropy loss value.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py",
    "ast_data": "FunctionDef name:sparse_categorical_crossentropy arg:y_true arg:y_pred arg:from_logits arg:axis arguments arg arg arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_processor",
    "source_code": "def _get_processor(v):\n    if context.executing_eagerly():\n        if isinstance(v, tensor.Tensor):\n            return _TensorProcessor(v)\n        else:\n            return _DenseResourceVariableProcessor(v)\n    if resource_variable_ops.is_resource_variable(v) and (not v._in_graph_mode):\n        return _DenseResourceVariableProcessor(v)\n    if v.op.type == 'VarHandleOp':\n        return _DenseResourceVariableProcessor(v)\n    if isinstance(v, variables.Variable):\n        return _RefVariableProcessor(v)\n    if isinstance(v, tensor.Tensor):\n        return _TensorProcessor(v)\n    raise NotImplementedError('Trying to optimize unsupported type ', v)",
    "docstring": "The processor of v.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\optimizer.py",
    "ast_data": "FunctionDef name:_get_processor arg:v arguments arg If Call If Call Return return:yes Call Return return:yes Call If BoolOp Call Return return:yes Call If Compare Return return:yes Call If Call Return return:yes Call If Call Return return:yes Call Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "remove",
    "source_code": "def remove(self, a):\n    self._axes.pop(a)",
    "docstring": "Remove the Axes from the stack.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:remove arg:self arg:a arguments arg arg Call"
  },
  {
    "library": "kornia",
    "name": "PassLAF",
    "source_code": "class PassLAF(nn.Module):\n\n    def forward(self, laf: torch.Tensor, img: torch.Tensor) -> torch.Tensor:\n        return laf",
    "docstring": "Dummy module to use instead of local feature orientation or affine shape estimator.",
    "type": "class",
    "file_path": "kornia\\kornia\\feature\\orientation.py",
    "ast_data": "ClassDef name:PassLAF FunctionDef name:forward arg:self arg:laf arg:img arguments arg arg arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_repr_html_",
    "source_code": "def _repr_html_(self):\n    return ''.join([c._repr_html_() for c in self._colormaps])",
    "docstring": "Generate an HTML representation of the MultivarColormap.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:_repr_html_ arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_ConstantTensorCodec",
    "source_code": "class _ConstantTensorCodec:\n\n    def can_encode(self, pyobj):\n        return isinstance(pyobj, tensor_lib.Tensor)\n\n    def do_encode(self, tensor_value, encode_fn):\n        del encode_fn\n        encoded_tensor = struct_pb2.StructuredValue()\n        if isinstance(tensor_value, ops.EagerTensor):\n            encoded_tensor.tensor_value.CopyFrom(tensor_util.make_tensor_proto(tensor_value.numpy()))\n        elif tensor_value.op.type == 'Const':\n            encoded_tensor.tensor_value.CopyFrom(tensor_value.op.get_attr('value'))\n        else:\n            raise nested_structure_coder.NotEncodableError(f'No encoder for object {str(tensor_value)} of type {type(tensor_value)}.')\n        return encoded_tensor\n\n    def can_decode(self, value):\n        return value.HasField('tensor_value')\n\n    def do_decode(self, value, decode_fn):\n        del decode_fn\n        tensor_proto = value.tensor_value\n        tensor = constant(tensor_util.MakeNdarray(tensor_proto))\n        return tensor",
    "docstring": "Codec for Tensor.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\constant_op.py",
    "ast_data": "ClassDef name:_ConstantTensorCodec FunctionDef name:can_encode arg:self arg:pyobj arguments arg arg Return return:yes Call FunctionDef name:do_encode arg:self arg:tensor_value arg:encode_fn arguments arg arg arg Assign Call If Call Call Call Call If Compare Call Call Raise Call Call Call Return return:yes FunctionDef name:can_decode arg:self arg:value arguments arg arg Return return:yes Call FunctionDef name:do_decode arg:self arg:value arg:decode_fn arguments arg arg arg Assign Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_lineoffset",
    "source_code": "def set_lineoffset(self, lineoffset):\n    if lineoffset == self.get_lineoffset():\n        return\n    linelength = self.get_linelength()\n    segments = self.get_segments()\n    pos = 1 if self.is_horizontal() else 0\n    for segment in segments:\n        segment[0, pos] = lineoffset + linelength / 2.0\n        segment[1, pos] = lineoffset - linelength / 2.0\n    self.set_segments(segments)\n    self._lineoffset = lineoffset",
    "docstring": "Set the offset of the lines used to mark each event.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:set_lineoffset arg:self arg:lineoffset arguments arg arg If Compare Call Return return:no Assign Call Assign Call Assign Call For Assign Assign Call Assign"
  },
  {
    "library": "kornia",
    "name": "fit",
    "source_code": "def fit(self, X: Tensor) -> None:\n    KORNIA_CHECK_SHAPE(X, ['N', 'D'])\n    if self._cluster_centers is None:\n        self._cluster_centers = self._initialise_cluster_centers(X, self.num_clusters)\n    else:\n        KORNIA_CHECK(X.shape[1] == self._cluster_centers.shape[1], f'Dimensions at position 1 of X and cluster_centers do not match.                 {X.shape[1]} != {self._cluster_centers.shape[1]}')\n    current_centers = self._cluster_centers\n    previous_centers: Tensor | None = None\n    iteration: int = 0\n    while True:\n        distance: Tensor = self._pairwise_euclidean_distance(X, current_centers)\n        cluster_assignment = distance.argmin(-1)\n        previous_centers = current_centers.clone()\n        for index in range(self.num_clusters):\n            selected = torch.nonzero(cluster_assignment == index).squeeze()\n            selected = torch.index_select(X, 0, selected)\n            if selected.shape[0] == 0:\n                selected = X[torch.randint(len(X), (1,), device=X.device)]\n            current_centers[index] = selected.mean(dim=0)\n        center_shift = torch.sum(torch.sqrt(torch.sum((current_centers - previous_centers) ** 2, dim=1)))\n        iteration = iteration + 1\n        if self.tolerance is not None and center_shift ** 2 < self.tolerance:\n            break\n        if self.max_iterations != 0 and iteration >= self.max_iterations:\n            break\n    self._final_cluster_assignments = cluster_assignment\n    self._final_cluster_centers = current_centers",
    "docstring": "Fit iterative KMeans clustering till a threshold for shift in cluster centers or a maximum no of iterations have reached. Args: X: 2D input tensor to be clustered",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\kmeans.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arguments arg arg Call If Compare Assign Call Call Compare Assign While Call Assign Call Assign Call For Call Assign Call Call Compare Assign Call If Compare Assign Call Call Assign Call Assign Call Call Call Assign If BoolOp Compare Compare If BoolOp Compare Compare Assign Assign"
  },
  {
    "library": "pandas",
    "name": "round",
    "source_code": "def round(self, decimals: int=0) -> Self | Index:\n    if decimals >= 0:\n        return self.copy()\n    elif self.start % 10 ** (-decimals) == 0 and self.step % 10 ** (-decimals) == 0:\n        return self.copy()\n    else:\n        return super().round(decimals=decimals)",
    "docstring": "Round each value in the Index to the given number of decimals. Parameters ---------- decimals : int, optional Number of decimal places to round to. If decimals is negative, it specifies the number of positions to the left of the decimal point e.g. ``. Returns ------- Index or RangeIndex A new Index with the rounded values. Examples -------- >>> import pandas as pd >>> idx = pd.RangeIndex(10, 30, 10) >>> idx.round(decimals=-1) RangeIndex(start=10, stop=30, step=10) >>> idx = pd.RangeIndex(10, 15, 1) >>> idx.round(decimals=-1) Index([10, 10, 10, 10, 10], dtype='int64')",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\range.py",
    "ast_data": "FunctionDef name:round arg:self arg:decimals arguments arg arg If Compare Return return:yes Call If BoolOp Compare Compare Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "Replicate",
    "source_code": "@dataclass(frozen=True)\nclass Replicate(Placement):\n\n    def __eq__(self, other: object) -> bool:\n        return isinstance(other, Replicate)\n\n    def __hash__(self) -> int:\n        return -1\n\n    def __repr__(self) -> str:\n        return 'Replicate()'\n\n    def __str__(self) -> str:\n        return 'R'\n\n    def _replicate_tensor(self, tensor: torch.Tensor, mesh: DeviceMesh, mesh_dim: int, src_data_rank: Optional[int]=0) -> torch.Tensor:\n        my_coordinate = mesh.get_coordinate()\n        if my_coordinate is None:\n            return tensor.new_empty(0, requires_grad=tensor.requires_grad)\n        tensor = tensor.contiguous()\n        if src_data_rank is not None:\n            mesh_broadcast(tensor, mesh, mesh_dim=mesh_dim, group_src=src_data_rank)\n        return tensor",
    "docstring": "The ``, etc.)",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\tensor\\placement_types.py",
    "ast_data": "ClassDef name:Replicate FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes Call FunctionDef name:__hash__ arg:self arguments arg Return return:yes FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:__str__ arg:self arguments arg Return return:yes FunctionDef name:_replicate_tensor arg:self arg:tensor arg:mesh arg:mesh_dim arg:src_data_rank arguments arg arg arg arg arg Assign Call If Compare Return return:yes Call Assign Call If Compare Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "AutoFunctionalized",
    "source_code": "class AutoFunctionalized(HigherOrderOperator):\n\n    def __init__(self) -> None:\n        super().__init__('auto_functionalized', cacheable=True)\n\n    def __call__(self, /, _mutable_op: OpOverload, **kwargs: Any) -> tuple[Any, tuple[Tensor, ...]]:\n        assert can_auto_functionalize(_mutable_op)\n        assert isinstance(kwargs, dict)\n        return super().__call__(_mutable_op, **kwargs)",
    "docstring": "auto_functionalized(_mutable_op, **kwargs) This HOP runs a \"functional\" version of _mutable_op. Concretely, it looks at all the arguments that are mutable through _mutable_op's operator schema, clones those kwargs, runs with the cloned values, and then returns the operator output concatenated with the cloned values that were mutated. We have some restrictions on . See for the restrictions. We can likely lift many of these if users request it. The reason why _mutable_op is prefixed with an underscore is to prevent collisions with kwarg names in **kwargs.",
    "type": "class",
    "file_path": "pytorch\\torch\\_higher_order_ops\\auto_functionalize.py",
    "ast_data": "ClassDef name:AutoFunctionalized FunctionDef name:__init__ arg:self arguments arg Call Call FunctionDef name:__call__ arg:_mutable_op arguments arg arg arg Call Call Return return:yes Call Call"
  },
  {
    "library": "seaborn",
    "name": "__enter__",
    "source_code": "def __enter__(self):\n    from .rcmod import set_palette\n    self._orig_palette = color_palette()\n    set_palette(self)\n    return self",
    "docstring": "Open the context.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\palettes.py",
    "ast_data": "FunctionDef name:__enter__ arg:self arguments arg Assign Call Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "get_item_pipeline",
    "source_code": "def get_item_pipeline(self, cls: type[_T]) -> _T | None:\n    if not self.engine:\n        raise RuntimeError('Crawler.get_item_pipeline() can only be called after the crawl engine has been created.')\n    return self._get_component(cls, self.engine.scraper.itemproc.middlewares)",
    "docstring": "Return the run-time instance of a :ref: of the specified class or a subclass, or `engine_startedspider_opened`.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\crawler.py",
    "ast_data": "FunctionDef name:get_item_pipeline arg:self arg:cls arguments arg arg If Raise Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "shard_parameter",
    "source_code": "def shard_parameter(module: torch.nn.Module, param_name: str, sharding_spec: ShardingSpec, src_rank=0, process_group=None):\n    if not hasattr(module, param_name):\n        raise AttributeError(f'{module._get_name()} has no attribute `{param_name}`')\n    tensor = getattr(module, param_name)\n    if not isinstance(tensor, torch.Tensor):\n        raise ValueError(f'Expected {type(module).__name__}.{param_name} to be a Tensor, but found {type(tensor).__name__}')\n    if not tensor.is_contiguous():\n        raise ValueError(f'param: {param_name} is not a contiguous Tensor')\n    st = _shard_tensor(tensor, sharding_spec, src_rank, process_group)\n    module.register_parameter(param_name, nn.Parameter(st))",
    "docstring": "Given a :class:, a `torch.distributed._sharded_tensor.ShardedTensortorch.nn.Moduletorch.distributed._shard.sharding_spec.ShardingSpectorch.distributed._shard.sharding_spec.ChunkShardingSpec`.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\api.py",
    "ast_data": "FunctionDef name:shard_parameter arg:module arg:param_name arg:sharding_spec arg:src_rank arg:process_group arguments arg arg arg arg arg If Call Raise Call Call Assign Call If Call Raise Call Call Call If Call Raise Call Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "register_custom_device",
    "source_code": "def register_custom_device(self, device_capsule, device_name, device_info_capsule):\n    self.ensure_initialized()\n    pywrap_tfe.TFE_Py_RegisterCustomDevice(self._handle, device_capsule, device_name, device_info_capsule)",
    "docstring": "Calls TFE_RegisterCustomDevice. See the non-member function.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:register_custom_device arg:self arg:device_capsule arg:device_name arg:device_info_capsule arguments arg arg arg arg Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "LogLink",
    "source_code": "class LogLink(BaseLink):\n    interval_y_pred = Interval(0, np.inf, False, False)\n\n    def link(self, y_pred, out=None):\n        return np.log(y_pred, out=out)\n\n    def inverse(self, raw_prediction, out=None):\n        return np.exp(raw_prediction, out=out)",
    "docstring": "The log link function g(x)=log(x).",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\_loss\\link.py",
    "ast_data": "ClassDef name:LogLink Assign Call FunctionDef name:link arg:self arg:y_pred arg:out arguments arg arg arg Return return:yes Call FunctionDef name:inverse arg:self arg:raw_prediction arg:out arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "gen_bvar",
    "source_code": "def gen_bvar(curr):\n    curr += 1\n    return (BVar(curr), curr)",
    "docstring": "Generate a boolean variable :param curr: the current counter :return: a boolean variable and an updated counter",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\util.py",
    "ast_data": "FunctionDef name:gen_bvar arg:curr arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_make_execution_function",
    "source_code": "def _make_execution_function(model, mode, class_weight=None):\n    if mode == ModeKeys.TRAIN:\n        f = functools.partial(model.train_on_batch, class_weight=class_weight)\n    elif mode == ModeKeys.TEST:\n        f = model.test_on_batch\n    else:\n\n        def predict_on_batch(x, y=None, sample_weights=None):\n            return model.predict_on_batch(x)\n        f = predict_on_batch\n    if mode != ModeKeys.PREDICT:\n        f = functools.partial(f, reset_metrics=False)\n    return f",
    "docstring": "Makes function to run one step of model execution.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_generator_v1.py",
    "ast_data": "FunctionDef name:_make_execution_function arg:model arg:mode arg:class_weight arguments arg arg arg If Compare Assign Call If Compare Assign FunctionDef name:predict_on_batch arg:x arg:y arg:sample_weights arguments arg arg arg Return return:yes Call Assign If Compare Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "DeviceGuard",
    "source_code": "class DeviceGuard:\n\n    def __init__(self, device_interface: type[DeviceInterface], index: Optional[int]) -> None:\n        self.device_interface = device_interface\n        self.idx = index\n        self.prev_idx = -1\n\n    def __enter__(self):\n        if self.idx is not None:\n            self.prev_idx = self.device_interface.exchange_device(self.idx)\n\n    def __exit__(self, type: Any, value: Any, traceback: Any):\n        if self.idx is not None:\n            self.idx = self.device_interface.maybe_exchange_device(self.prev_idx)\n        return False",
    "docstring": "This class provides a context manager for device switching. This is a stripped down version of torch.{device_name}.device. The context manager changes the current device to the given device index on entering the context and restores the original device on exiting. The device is switched using the provided device interface.",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\device_interface.py",
    "ast_data": "ClassDef name:DeviceGuard FunctionDef name:__init__ arg:self arg:device_interface arg:index arguments arg arg arg Assign Assign Assign FunctionDef name:__enter__ arg:self arguments arg If Compare Assign Call FunctionDef name:__exit__ arg:self arg:type arg:value arg:traceback arguments arg arg arg arg If Compare Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "DArrow",
    "source_code": "@_register_style(_style_list)\nclass DArrow:\n\n    def __init__(self, pad=0.3):\n        self.pad = pad\n\n    def __call__(self, x0, y0, width, height, mutation_size):\n        pad = mutation_size * self.pad\n        height = height + 2 * pad\n        x0, y0 = (x0 - pad, y0 - pad)\n        x1, y1 = (x0 + width, y0 + height)\n        dx = (y1 - y0) / 2\n        dxx = dx / 2\n        x0 = x0 + pad / 1.4\n        return Path._create_closed([(x0 + dxx, y0), (x1, y0), (x1, y0 - dxx), (x1 + dx + dxx, y0 + dx), (x1, y1 + dxx), (x1, y1), (x0 + dxx, y1), (x0 + dxx, y1 + dxx), (x0 - dx, y0 + dx), (x0 + dxx, y0 - dxx), (x0 + dxx, y0)])",
    "docstring": "A box in the shape of a two-way arrow.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "ClassDef name:DArrow FunctionDef name:__init__ arg:self arg:pad arguments arg arg Assign FunctionDef name:__call__ arg:self arg:x0 arg:y0 arg:width arg:height arg:mutation_size arguments arg arg arg arg arg arg Assign Assign Assign Assign Assign Assign Assign Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_select_samples",
    "source_code": "def _select_samples(self, candidate, number_samples):\n    self.random_number_generator.shuffle(self._random_population_index)\n    idxs = self._random_population_index[:number_samples + 1]\n    return idxs[idxs != candidate][:number_samples]",
    "docstring": "obtain random integers from range(self.num_population_members), without replacement. You can't have the original candidate either.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_differentialevolution.py",
    "ast_data": "FunctionDef name:_select_samples arg:self arg:candidate arg:number_samples arguments arg arg arg Call Assign Return return:yes Compare"
  },
  {
    "library": "scipy",
    "name": "Shekel05",
    "source_code": "class Shekel05(Benchmark):\n\n    def __init__(self, dimensions=4):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([0.0] * self.N, [10.0] * self.N))\n        self.global_optimum = [[4.00003715092, 4.00013327435, 4.00003714871, 4.0001332742]]\n        self.fglob = -10.1531996791\n        self.A = asarray([[4.0, 4.0, 4.0, 4.0], [1.0, 1.0, 1.0, 1.0], [8.0, 8.0, 8.0, 8.0], [6.0, 6.0, 6.0, 6.0], [3.0, 7.0, 3.0, 7.0]])\n        self.C = asarray([0.1, 0.2, 0.2, 0.4, 0.4])\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return -sum(1 / (sum((x - self.A) ** 2, axis=1) + self.C))",
    "docstring": "Shekel 5 objective function. This class defines the Shekel 5 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Shekel05}}(x) = \\sum_{i=1}^{m} \\frac{1}{c_{i} + \\sum_{j=1}^{n} (x_{j} - a_{ij})^2 }nx_i \\in [0, 10]i = 1, ..., 4f(x) = -10.15319585x_i = 4i = 1, ..., 4` .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO: this is a different global minimum compared to Jamil#130. The minimum is found by doing lots of optimisations. The solution is supposed to be at [4] * N, is there any numerical overflow?",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_S.py",
    "ast_data": "ClassDef name:Shekel05 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Assign Call Assign Call FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "padded",
    "source_code": "def padded(self, w_pad, h_pad=None):\n    points = self.get_points()\n    if h_pad is None:\n        h_pad = w_pad\n    return Bbox(points + [[-w_pad, -h_pad], [w_pad, h_pad]])",
    "docstring": "Construct a by padding this one on all four sides. Parameters ---------- w_pad : float Width pad h_pad : float, optional Height pad. Defaults to *w_pad*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:padded arg:self arg:w_pad arg:h_pad arguments arg arg arg Assign Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "implicit_val_and_grad",
    "source_code": "def implicit_val_and_grad(f):\n\n    def grad_fn(*args, **kwds):\n        this_tape = tape.push_new_tape()\n        try:\n            end_node = f(*args, **kwds)\n            if end_node is None:\n                raise ValueError('Cannot differentiate a function that returns None; did you forget to return a value from {}?'.format(f.__name__))\n        finally:\n            tape.pop_tape(this_tape)\n        variables = this_tape.watched_variables()\n        if not variables:\n            raise ValueError('No trainable variables were accessed while the function was being computed.')\n        sources = [v.handle for v in variables]\n        for s in sources:\n            if getattr(s, 'is_packed', False):\n                raise ValueError('GradientTape.gradient is not supported on packed EagerTensors yet.')\n        grad = imperative_grad.imperative_grad(this_tape, nest.flatten(end_node), sources)\n        return (end_node, list(zip(grad, variables)))\n    return grad_fn",
    "docstring": "Returns a function which differentiates f with respect to variables. The wrapped function returns the value and the gradient of f when called with the same arguments. The gradient is with respect to all trainable TFE variables accessed by . This function is useful when the exact set of variables to differentiate with is not known ahead of time. Example: Args: f: function to be differentiated. If returns a scalar, this scalar will be differentiated. If returns a tensor or list of tensors, by default a scalar will be computed by adding all their values to produce a single scalar. Returns: A function which, when called, returns a tuple pair. Its first element is the value to which the function evaluates. Its second element is list of (gradient, variable) pairs. Raises: ValueError: if returns None.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\backprop.py",
    "ast_data": "FunctionDef name:implicit_val_and_grad arg:f arguments arg FunctionDef name:grad_fn arguments arg arg Assign Call Try Assign Call If Compare Raise Call Call Call Assign Call If Raise Call Assign For If Call Raise Call Assign Call Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_default",
    "source_code": "def _default(self, name: str, args: tuple[Any, ...], kwargs: dict[str, Any]) -> Any:\n    raise NotImplementedError",
    "docstring": "Default implementation for all ops. Override in a subclass to provide generic op behavior. Args: name: name of the op, see OpHandler.{name} args: positional args passed to the op kwargs: keyword args passed to the op Returns: return value of the op",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ops_handler.py",
    "ast_data": "FunctionDef name:_default arg:self arg:name arg:args arg:kwargs arguments arg arg arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "_expand_distributed_variables",
    "source_code": "def _expand_distributed_variables(self):\n    return self == VariablePolicy.EXPAND_DISTRIBUTED_VARIABLES",
    "docstring": "Checks whether distributed variables should be expanded.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\save_options.py",
    "ast_data": "FunctionDef name:_expand_distributed_variables arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "_alt_inner_shape_from_tensor_shape",
    "source_code": "def _alt_inner_shape_from_tensor_shape(shape, dtype, new_inner_rank):\n    if new_inner_rank == 1:\n        return constant_op.constant([shape.num_elements()], dtype=dtype)\n    new_inner_rank_tail_length = new_inner_rank - 1\n    inner_shape_tail = shape[-new_inner_rank_tail_length:].as_list()\n    first_dim = shape[:-new_inner_rank_tail_length].num_elements()\n    return constant_op.constant([first_dim] + inner_shape_tail, dtype=dtype)",
    "docstring": "Helper for _alt_inner_shape, used directly in _with_num_row_partitions.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:_alt_inner_shape_from_tensor_shape arg:shape arg:dtype arg:new_inner_rank arguments arg arg arg If Compare Return return:yes Call Call Assign Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "to_onnx",
    "source_code": "def to_onnx(self, onnx_name: Optional[str]=None, image_size: Optional[int]=352, include_pre_and_post_processor: bool=True, save: bool=True, additional_metadata: Optional[list[tuple[str, str]]]=None, **kwargs: Any) -> onnx.ModelProto:\n    if onnx_name is None:\n        onnx_name = f'kornia_{self.name}_{image_size}.onnx'\n    return super().to_onnx(onnx_name, input_shape=[-1, 3, image_size or -1, image_size or -1], output_shape=[-1, 1, image_size or -1, image_size or -1], pseudo_shape=[1, 3, image_size or 352, image_size or 352], model=self if include_pre_and_post_processor else self.model, save=save, additional_metadata=additional_metadata, **kwargs)",
    "docstring": "Export the current edge detection model to an ONNX model file. Args: onnx_name: The name of the output ONNX file. If not provided, a default name in the format \"Kornia-.onnx\" will be used. image_size: The size to which input images will be resized during preprocessing. If None, image_size will be dynamic. For DexiNed, recommended scale is 352. include_pre_and_post_processor: Whether to include the pre-processor and post-processor in the exported model. save: If to save the model or load it. additional_metadata: Additional metadata to add to the ONNX model. kwargs: Additional arguments to convert to onnx.",
    "type": "method",
    "file_path": "kornia\\kornia\\models\\edge_detection\\base.py",
    "ast_data": "FunctionDef name:to_onnx arg:self arg:onnx_name arg:image_size arg:include_pre_and_post_processor arg:save arg:additional_metadata arguments arg arg arg arg arg arg arg If Compare Assign Return return:yes Call Call BoolOp BoolOp BoolOp BoolOp BoolOp BoolOp"
  },
  {
    "library": "tensorflow",
    "name": "ModuleWrapper",
    "source_code": "class ModuleWrapper(base_layer.Layer):\n\n    def __init__(self, module, method_name=None, **kwargs):\n        super(ModuleWrapper, self).__init__(**kwargs)\n        if method_name is None:\n            if hasattr(module, '__call__'):\n                method_name = '__call__'\n            elif hasattr(module, 'call'):\n                method_name = 'call'\n        if method_name is None or not hasattr(module, method_name):\n            raise ValueError('{} is not defined on object {}'.format(method_name, module))\n        self._module = module\n        self._method_name = method_name\n        method = getattr(module, method_name)\n        method_arg_spec = tf_inspect.getfullargspec(method)\n        self._expects_training_arg = 'training' in method_arg_spec.args or method_arg_spec.varkw is not None\n        self._expects_mask_arg = 'mask' in method_arg_spec.args or method_arg_spec.varkw is not None\n\n    def call(self, *args, **kwargs):\n        if 'training' in kwargs and (not self._expects_training_arg):\n            kwargs.pop('training')\n        if 'mask' in kwargs and (not self._expects_mask_arg):\n            kwargs.pop('mask')\n        return getattr(self._module, self._method_name)(*args, **kwargs)",
    "docstring": "Wrapper for s to support the Functional and Sequential API.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\functional.py",
    "ast_data": "ClassDef name:ModuleWrapper FunctionDef name:__init__ arg:self arg:module arg:method_name arguments arg arg arg arg Call Call If Compare If Call Assign If Call Assign If BoolOp Compare Call Raise Call Call Assign Assign Assign Call Assign Call Assign BoolOp Compare Compare Assign BoolOp Compare Compare FunctionDef name:call arg:self arguments arg arg arg If BoolOp Compare Call If BoolOp Compare Call Return return:yes Call Call"
  },
  {
    "library": "scrapy",
    "name": "crawled",
    "source_code": "def crawled(self, request: Request, response: Response, spider: Spider) -> LogFormatterResult:\n    request_flags = f' {request.flags!s}' if request.flags else ''\n    response_flags = f' {response.flags!s}' if response.flags else ''\n    return {'level': logging.DEBUG, 'msg': CRAWLEDMSG, 'args': {'status': response.status, 'request': request, 'request_flags': request_flags, 'referer': referer_str(request), 'response_flags': response_flags, 'flags': response_flags}}",
    "docstring": "Logs a message when the crawler finds a webpage.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\logformatter.py",
    "ast_data": "FunctionDef name:crawled arg:self arg:request arg:response arg:spider arguments arg arg arg arg Assign Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "construct_change_message",
    "source_code": "def construct_change_message(self, request, form, formsets, add=False):\n    return construct_change_message(form, formsets, add)",
    "docstring": "Construct a JSON structure describing changes from a changed object.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:construct_change_message arg:self arg:request arg:form arg:formsets arg:add arguments arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "nn_accuracy",
    "source_code": "def nn_accuracy(X, X_embedded, k=1):\n    knn = NearestNeighbors(n_neighbors=1, n_jobs=-1)\n    _, neighbors_X = knn.fit(X).kneighbors()\n    _, neighbors_X_embedded = knn.fit(X_embedded).kneighbors()\n    return np.mean(neighbors_X == neighbors_X_embedded)",
    "docstring": "Accuracy of the first nearest neighbor",
    "type": "function",
    "file_path": "scikit-learn\\benchmarks\\bench_tsne_mnist.py",
    "ast_data": "FunctionDef name:nn_accuracy arg:X arg:X_embedded arg:k arguments arg arg arg Assign Call Assign Call Call Assign Call Call Return return:yes Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "_restore_from_tensors",
    "source_code": "def _restore_from_tensors(self, restored_tensors):\n    expected_keys = []\n    for saveable in self.saveables:\n        expected_keys.extend((trackable_utils.extract_local_name(_convert_to_string(spec.name)) for spec in saveable.specs))\n    if set(expected_keys) != restored_tensors.keys():\n        raise ValueError(f'Could not restore object {self._obj} because not all expected tensors were in the checkpoint.\\n\\tExpected: {expected_keys}\\n\\tGot: {list(restored_tensors.keys())}')\n    return saveable_object_to_restore_fn(self.saveables)(restored_tensors)",
    "docstring": "Returns the restore ops defined in the Saveables.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\saving\\saveable_object_util.py",
    "ast_data": "FunctionDef name:_restore_from_tensors arg:self arg:restored_tensors arguments arg arg Assign For Call Call Call If Compare Call Call Raise Call Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_TensorArrayConcatGrad",
    "source_code": "@ops.RegisterGradient('TensorArrayConcat')\n@ops.RegisterGradient('TensorArrayConcatV2')\n@ops.RegisterGradient('TensorArrayConcatV3')\ndef _TensorArrayConcatGrad(op: ops.Operation, grad, unused_lengths_grad):\n    handle = op.inputs[0]\n    flow = op.inputs[1]\n    lengths = op.outputs[1]\n    dtype = op.get_attr('dtype')\n    grad_source = _GetGradSource(grad)\n    g = tensor_array_ops.TensorArray(dtype=dtype, handle=handle, flow=flow, colocate_with_first_write_call=False).grad(source=grad_source, flow=flow)\n    u_g = g.split(grad, lengths=lengths)\n    return [None, u_g.flow]",
    "docstring": "Gradient for TensorArrayConcat. Args: op: Forward TensorArrayConcat op. grad: Gradient to TensorArrayConcat. Returns: A flow , which can be used in control dependencies to force the write of to the gradient .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_grad.py",
    "ast_data": "FunctionDef name:_TensorArrayConcatGrad arg:op arg:grad arg:unused_lengths_grad arguments arg arg arg Assign Assign Assign Assign Call Assign Call Assign Call Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "cherrypy",
    "name": "pause_resume",
    "source_code": "def pause_resume(ns):\n\n    def _pause_resume(enabled):\n        pause_disabled = ''\n        resume_disabled = ''\n        if enabled:\n            resume_disabled = 'disabled=\"disabled\" '\n        else:\n            pause_disabled = 'disabled=\"disabled\" '\n        return '\\n            <form action=\"pause\" method=\"POST\" style=\"display:inline\">\\n            <input type=\"hidden\" name=\"namespace\" value=\"%s\" />\\n            <input type=\"submit\" value=\"Pause\" %s/>\\n            </form>\\n            <form action=\"resume\" method=\"POST\" style=\"display:inline\">\\n            <input type=\"hidden\" name=\"namespace\" value=\"%s\" />\\n            <input type=\"submit\" value=\"Resume\" %s/>\\n            </form>\\n            ' % (ns, pause_disabled, ns, resume_disabled)\n    return _pause_resume",
    "docstring": "Produce pause or resume HTML form maker.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\cpstats.py",
    "ast_data": "FunctionDef name:pause_resume arg:ns arguments arg FunctionDef name:_pause_resume arg:enabled arguments arg Assign Assign If Assign Assign Return return:yes Return return:yes"
  },
  {
    "library": "scipy",
    "name": "num_obs_linkage",
    "source_code": "@xp_capabilities()\ndef num_obs_linkage(Z):\n    xp = array_namespace(Z)\n    Z = _asarray(Z, xp=xp)\n    _is_valid_linkage(Z, throw=True, name='Z', xp=xp)\n    return Z.shape[0] + 1",
    "docstring": "Return the number of original observations of the linkage matrix passed. Parameters ---------- Z : ndarray The linkage matrix on which to perform the operation. Returns ------- n : int The number of original observations in the linkage. Examples -------- >>> from scipy.cluster.hierarchy import ward, num_obs_linkage >>> from scipy.spatial.distance import pdist >>> X = [[0, 0], [0, 1], [1, 0], ... [0, 4], [0, 3], [1, 4], ... [4, 0], [3, 0], [4, 1], ... [4, 4], [3, 4], [4, 3]] >>> Z = ward(pdist(X)) ``, a dataset with 12 data points. >>> num_obs_linkage(Z) 12",
    "type": "function",
    "file_path": "scipy\\scipy\\cluster\\hierarchy.py",
    "ast_data": "FunctionDef name:num_obs_linkage arg:Z arguments arg Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "split_bezier_intersecting_with_closedpath",
    "source_code": "def split_bezier_intersecting_with_closedpath(bezier, inside_closedpath, tolerance=0.01):\n    bz = BezierSegment(bezier)\n    bezier_point_at_t = bz.point_at_t\n    t0, t1 = find_bezier_t_intersecting_with_closedpath(bezier_point_at_t, inside_closedpath, tolerance=tolerance)\n    _left, _right = split_de_casteljau(bezier, (t0 + t1) / 2.0)\n    return (_left, _right)",
    "docstring": "Split a Bézier curve into two at the intersection with a closed path. Parameters ---------- bezier : (N, 2) array-like Control points of the Bézier segment. See . inside_closedpath : callable A function returning True if a given point (x, y) is inside the closed path. See also . tolerance : float The tolerance for the intersection. See also . Returns ------- left, right Lists of control points for the two Bézier segments.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\bezier.py",
    "ast_data": "FunctionDef name:split_bezier_intersecting_with_closedpath arg:bezier arg:inside_closedpath arg:tolerance arguments arg arg arg Assign Call Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_numeric_arrays",
    "source_code": "def _numeric_arrays(arrays, kinds='buifc', xp=None):\n    if xp is None:\n        xp = array_namespace(*arrays)\n    if not is_numpy(xp):\n        return True\n    if type(arrays) is np.ndarray:\n        return arrays.dtype.kind in kinds\n    for array_ in arrays:\n        if array_.dtype.kind not in kinds:\n            return False\n    return True",
    "docstring": "See if a list of arrays are all numeric. Parameters ---------- arrays : array or list of arrays arrays to check if numeric. kinds : string-like The dtypes of the arrays to be checked. If the dtype.kind of the ndarrays are not in this string the function returns False and otherwise returns True.",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_signaltools.py",
    "ast_data": "FunctionDef name:_numeric_arrays arg:arrays arg:kinds arg:xp arguments arg arg arg If Compare Assign Call If Call Return return:yes If Compare Call Return return:yes Compare For If Compare Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_full_log_prob",
    "source_code": "def _get_full_log_prob(self, input, head_output):\n    out = input.new_empty((head_output.size(0), self.n_classes))\n    head_logprob = F.log_softmax(head_output, dim=1)\n    out[:, :self.shortlist_size] = head_logprob[:, :self.shortlist_size]\n    for i, (start_idx, stop_idx) in enumerate(zip(self.cutoffs, self.cutoffs[1:])):\n        cluster_output = self.tail[i](input)\n        cluster_logprob = F.log_softmax(cluster_output, dim=1)\n        output_logprob = cluster_logprob + head_logprob[:, self.shortlist_size + i].unsqueeze(1)\n        out[:, start_idx:stop_idx] = output_logprob\n    return out",
    "docstring": "Given input tensor, and output of ``, compute the log of the full distribution.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\adaptive.py",
    "ast_data": "FunctionDef name:_get_full_log_prob arg:self arg:input arg:head_output arguments arg arg arg Assign Call Call Assign Call Assign For Call Call Assign Call Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "register_combinable_fields",
    "source_code": "def register_combinable_fields(lhs, connector, rhs, result):\n    _connector_combinators[connector].append((lhs, rhs, result))",
    "docstring": "Register combinable types: lhs rhs -> result e.g. register_combinable_fields( IntegerField, Combinable.ADD, FloatField, FloatField )",
    "type": "function",
    "file_path": "django\\django\\db\\models\\expressions.py",
    "ast_data": "FunctionDef name:register_combinable_fields arg:lhs arg:connector arg:rhs arg:result arguments arg arg arg arg Call"
  },
  {
    "library": "numpy",
    "name": "_quantile_unchecked",
    "source_code": "def _quantile_unchecked(a, q, axis=None, out=None, overwrite_input=False, method='linear', keepdims=False, weights=None):\n    return _ureduce(a, func=_quantile_ureduce_func, q=q, weights=weights, keepdims=keepdims, axis=axis, out=out, overwrite_input=overwrite_input, method=method)",
    "docstring": "Assumes that q is in [0, 1], and is an ndarray",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_function_base_impl.py",
    "ast_data": "FunctionDef name:_quantile_unchecked arg:a arg:q arg:axis arg:out arg:overwrite_input arg:method arg:keepdims arg:weights arguments arg arg arg arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_class_weights_map_fn",
    "source_code": "def _class_weights_map_fn(*data):\n    x, y, sw = unpack_x_y_sample_weight(data)\n    if nest.is_nested(y):\n        raise ValueError('`class_weight` is only supported for Models with a single output.')\n    if y.shape.rank > 2:\n        raise ValueError('`class_weight` not supported for 3+ dimensional targets.')\n    y_classes = smart_cond.smart_cond(y.shape.rank == 2 and backend.shape(y)[1] > 1, lambda: backend.argmax(y, axis=1), lambda: math_ops.cast(backend.reshape(y, (-1,)), dtypes.int64))\n    cw = array_ops.gather_v2(class_weight_tensor, y_classes)\n    if sw is not None:\n        cw = math_ops.cast(cw, sw.dtype)\n        sw, cw = expand_1d((sw, cw))\n        sw = sw * cw\n    else:\n        sw = cw\n    return (x, y, sw)",
    "docstring": "Convert to .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\data_adapter.py",
    "ast_data": "FunctionDef name:_class_weights_map_fn arguments arg Assign Call If Call Raise Call If Compare Raise Call Assign Call BoolOp Compare Compare Call arguments Call arguments Call Call Assign Call If Compare Assign Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_convert_strls",
    "source_code": "def _convert_strls(self, data: DataFrame) -> DataFrame:\n    convert_cols = [col for i, col in enumerate(data) if self.typlist[i] == 32768 or col in self._convert_strl]\n    if convert_cols:\n        ssw = StataStrLWriter(data, convert_cols, version=self._dta_version, byteorder=self._byteorder)\n        tab, new_data = ssw.generate_table()\n        data = new_data\n        self._strl_blob = ssw.generate_blob(tab)\n    return data",
    "docstring": "Convert columns to StrLs if either very large or in the convert_strl variable",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\stata.py",
    "ast_data": "FunctionDef name:_convert_strls arg:self arg:data arguments arg arg Assign Call BoolOp Compare Compare If Assign Call Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "Stochastic",
    "source_code": "class Stochastic(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-5.0] * self.N, [5.0] * self.N))\n        self.global_optimum = [[1.0 / _ for _ in range(1, self.N + 1)]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        rnd = uniform(0.0, 1.0, size=(self.N,))\n        i = arange(1, self.N + 1)\n        return sum(rnd * abs(x - 1.0 / i))",
    "docstring": "Stochastic objective function. This class defines the Stochastic [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Stochastic}}(x) = \\sum_{i=1}^{n} \\epsilon_i \\left | {x_i - \\frac{1}{i}} \\right | The variable :math: is a random variable uniformly distributed in :math:. Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_S.py",
    "ast_data": "ClassDef name:Stochastic Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "should_convert",
    "source_code": "def should_convert(var_name: str, blacklist: list[str] | None, whitelist_prefix: list[str] | None):\n    if blacklist and var_name in blacklist:\n        return False\n    if not whitelist_prefix:\n        return True\n    for prefix in whitelist_prefix:\n        if var_name.startswith(prefix):\n            return True\n    return False",
    "docstring": "Check the variable name against white/black lists.",
    "type": "function",
    "file_path": "tensorflow\\ci\\official\\utilities\\convert_msys_paths_to_win_paths.py",
    "ast_data": "FunctionDef name:should_convert arg:var_name arg:blacklist arg:whitelist_prefix arguments arg arg arg If BoolOp Compare Return return:yes If Return return:yes For If Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_IntegerInterval",
    "source_code": "class _IntegerInterval(Constraint):\n    is_discrete = True\n\n    def __init__(self, lower_bound, upper_bound):\n        self.lower_bound = lower_bound\n        self.upper_bound = upper_bound\n        super().__init__()\n\n    def check(self, value):\n        return (value % 1 == 0) & (self.lower_bound <= value) & (value <= self.upper_bound)\n\n    def __repr__(self):\n        fmt_string = self.__class__.__name__[1:]\n        fmt_string += f'(lower_bound={self.lower_bound}, upper_bound={self.upper_bound})'\n        return fmt_string",
    "docstring": "Constrain to an integer interval .",
    "type": "class",
    "file_path": "pytorch\\torch\\distributions\\constraints.py",
    "ast_data": "ClassDef name:_IntegerInterval Assign FunctionDef name:__init__ arg:self arg:lower_bound arg:upper_bound arguments arg arg arg Assign Assign Call Call FunctionDef name:check arg:self arg:value arguments arg arg Return return:yes Compare Compare Compare FunctionDef name:__repr__ arg:self arguments arg Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "lwgeom_version",
    "source_code": "def lwgeom_version(self):\n    return self._get_spatialite_func('lwgeom_version()')",
    "docstring": "Return the version of LWGEOM library used by SpatiaLite.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\spatialite\\operations.py",
    "ast_data": "FunctionDef name:lwgeom_version arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_compute_gram",
    "source_code": "def _compute_gram(self, X, sqrt_sw):\n    center = self.fit_intercept and sparse.issparse(X)\n    if not center:\n        X_mean = np.zeros(X.shape[1], dtype=X.dtype)\n        return (safe_sparse_dot(X, X.T, dense_output=True), X_mean)\n    n_samples = X.shape[0]\n    sample_weight_matrix = sparse.dia_matrix((sqrt_sw, 0), shape=(n_samples, n_samples))\n    X_weighted = sample_weight_matrix.dot(X)\n    X_mean, _ = mean_variance_axis(X_weighted, axis=0)\n    X_mean *= n_samples / sqrt_sw.dot(sqrt_sw)\n    X_mX = sqrt_sw[:, None] * safe_sparse_dot(X_mean, X.T, dense_output=True)\n    X_mX_m = np.outer(sqrt_sw, sqrt_sw) * np.dot(X_mean, X_mean)\n    return (safe_sparse_dot(X, X.T, dense_output=True) + X_mX_m - X_mX - X_mX.T, X_mean)",
    "docstring": "Computes the Gram matrix XX^T with possible centering. Parameters ---------- X : {ndarray, sparse matrix} of shape (n_samples, n_features) The preprocessed design matrix. sqrt_sw : ndarray of shape (n_samples,) square roots of sample weights Returns ------- gram : ndarray of shape (n_samples, n_samples) The Gram matrix. X_mean : ndarray of shape (n_feature,) The weighted mean of `` for each feature. Notes ----- When X is dense the centering has been done in preprocessing so the mean is 0 and we just compute XX^T. When X is sparse it has not been centered in preprocessing, but it has been scaled by sqrt(sample weights). When self.fit_intercept is False no centering is done. The centered X is never actually computed because centering would break the sparsity of X.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_ridge.py",
    "ast_data": "FunctionDef name:_compute_gram arg:self arg:X arg:sqrt_sw arguments arg arg arg Assign BoolOp Call If Assign Call Return return:yes Call Assign Assign Call Assign Call Assign Call Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_init_attrs",
    "source_code": "def _init_attrs(self, **kwargs):\n    attrs = self.__class__.__slots__\n    public_attrs = [attr[1:] for attr in attrs]\n    invalid_keys = set(kwargs.keys()) - set(public_attrs)\n    if invalid_keys:\n        raise ValueError(f'found {tuple(invalid_keys)} invalid keyword arguments, please only use {public_attrs}')\n    for attr in attrs:\n        setattr(self, attr, kwargs.get(attr[1:], None))",
    "docstring": "Initialize each attributes with the corresponding keyword arg value or a default of None",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\_mmio.py",
    "ast_data": "FunctionDef name:_init_attrs arg:self arguments arg arg Assign Assign Assign Call Call Call If Raise Call Call For Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_NamedTupleCodec",
    "source_code": "class _NamedTupleCodec:\n\n    def can_encode(self, pyobj):\n        return _is_named_tuple(pyobj)\n\n    def do_encode(self, named_tuple_value, encode_fn):\n        encoded_named_tuple = struct_pb2.StructuredValue()\n        encoded_named_tuple.named_tuple_value.CopyFrom(struct_pb2.NamedTupleValue())\n        encoded_named_tuple.named_tuple_value.name = named_tuple_value.__class__.__name__\n        for key in named_tuple_value._fields:\n            pair = encoded_named_tuple.named_tuple_value.values.add()\n            pair.key = key\n            pair.value.CopyFrom(encode_fn(named_tuple_value._asdict()[key]))\n        return encoded_named_tuple\n\n    def can_decode(self, value):\n        return value.HasField('named_tuple_value')\n\n    def do_decode(self, value, decode_fn):\n        key_value_pairs = value.named_tuple_value.values\n        items = [(pair.key, decode_fn(pair.value)) for pair in key_value_pairs]\n        named_tuple_type = collections.namedtuple(value.named_tuple_value.name, [item[0] for item in items])\n        return named_tuple_type(**dict(items))",
    "docstring": "Codec for namedtuples. Encoding and decoding a namedtuple reconstructs a namedtuple with a different actual Python type, but with the same and .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\nested_structure_coder.py",
    "ast_data": "ClassDef name:_NamedTupleCodec FunctionDef name:can_encode arg:self arg:pyobj arguments arg arg Return return:yes Call FunctionDef name:do_encode arg:self arg:named_tuple_value arg:encode_fn arguments arg arg arg Assign Call Call Call Assign For Assign Call Assign Call Call Call Return return:yes FunctionDef name:can_decode arg:self arg:value arguments arg arg Return return:yes Call FunctionDef name:do_decode arg:self arg:value arg:decode_fn arguments arg arg arg Assign Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_block_vars",
    "source_code": "def _get_block_vars(self, node, modified):\n    defined_in = anno.getanno(node, anno.Static.DEFINED_VARS_IN)\n    live_in = anno.getanno(node, anno.Static.LIVE_VARS_IN)\n    live_out = anno.getanno(node, anno.Static.LIVE_VARS_OUT)\n    fn_scope = self.state[_Function].scope\n    basic_scope_vars = self._get_block_basic_vars(modified, live_in, live_out)\n    composite_scope_vars = self._get_block_composite_vars(modified, live_in)\n    scope_vars = tuple(basic_scope_vars | composite_scope_vars)\n    possibly_undefined = modified - defined_in - fn_scope.globals - fn_scope.nonlocals\n    undefined = tuple((v for v in possibly_undefined if not v.is_composite()))\n    input_only = basic_scope_vars & live_in - live_out\n    scope_vars = sorted(scope_vars, key=lambda v: (v in input_only, v))\n    nouts = len(scope_vars) - len(input_only)\n    return (scope_vars, undefined, nouts)",
    "docstring": "Determines the variables affected inside a control flow statement.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\converters\\control_flow.py",
    "ast_data": "FunctionDef name:_get_block_vars arg:self arg:node arg:modified arguments arg arg arg Assign Call Assign Call Assign Call Assign Assign Call Assign Call Assign Call Assign Assign Call Call Assign Assign Call arguments arg Compare Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "Step",
    "source_code": "class Step(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-100.0] * self.N, [100.0] * self.N))\n        self.custom_bounds = ([-5, 5], [-5, 5])\n        self.global_optimum = [[0.0 for _ in range(self.N)]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return sum(floor(abs(x)))",
    "docstring": "Step objective function. This class defines the Step [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Step}}(x) = \\sum_{i=1}^{n} \\left ( \\lfloor x_i + 0.5 \\rfloor \\right )^2 Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_S.py",
    "ast_data": "ClassDef name:Step Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "year_lookup_bounds_for_datetime_field",
    "source_code": "def year_lookup_bounds_for_datetime_field(self, value, iso_year=False):\n    if iso_year:\n        first = datetime.datetime.fromisocalendar(value, 1, 1)\n        second = datetime.datetime.fromisocalendar(value + 1, 1, 1) - datetime.timedelta(microseconds=1)\n    else:\n        first = datetime.datetime(value, 1, 1)\n        second = datetime.datetime(value, 12, 31, 23, 59, 59, 999999)\n    if settings.USE_TZ:\n        tz = timezone.get_current_timezone()\n        first = timezone.make_aware(first, tz)\n        second = timezone.make_aware(second, tz)\n    first = self.adapt_datetimefield_value(first)\n    second = self.adapt_datetimefield_value(second)\n    return [first, second]",
    "docstring": "Return a two-elements list with the lower and upper bound to be used with a BETWEEN operator to query a DateTimeField value using a year lookup. is an int, containing the looked-up year. If is True, return bounds for ISO-8601 week-numbering years.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:year_lookup_bounds_for_datetime_field arg:self arg:value arg:iso_year arguments arg arg arg If Assign Call Assign Call Call Assign Call Assign Call If Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "with_dtype",
    "source_code": "def with_dtype(self, dtype):\n    if dtype == self.dtype:\n        return self\n    else:\n        return DynamicRaggedShape(self.row_partitions, self.inner_shape, dtype=dtype)",
    "docstring": "Change the dtype of the shape.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:with_dtype arg:self arg:dtype arguments arg arg If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "can_fuse_vertical",
    "source_code": "def can_fuse_vertical(self, node1: BaseSchedulerNode, node2: BaseSchedulerNode) -> bool:\n    raise NotImplementedError",
    "docstring": "Check whether node1 and node2 can be vertically fused or not.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:can_fuse_vertical arg:self arg:node1 arg:node2 arguments arg arg arg Raise"
  },
  {
    "library": "django",
    "name": "options",
    "source_code": "def options(self, request, *args, **kwargs):\n    response = HttpResponse()\n    response.headers['Allow'] = ', '.join(self._allowed_methods())\n    response.headers['Content-Length'] = '0'\n    if self.view_is_async:\n\n        async def func():\n            return response\n        return func()\n    else:\n        return response",
    "docstring": "Handle responding to requests for the OPTIONS HTTP verb.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\base.py",
    "ast_data": "FunctionDef name:options arg:self arg:request arguments arg arg arg arg Assign Call Assign Call Call Assign If AsyncFunctionDef name:func arguments Return return:yes Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "entropy",
    "source_code": "def entropy(self):\n    result = -self._mean_carrier_measure\n    nparams = [p.detach().requires_grad_() for p in self._natural_params]\n    lg_normal = self._log_normalizer(*nparams)\n    gradients = torch.autograd.grad(lg_normal.sum(), nparams, create_graph=True)\n    result += lg_normal\n    for np, g in zip(nparams, gradients):\n        result -= (np * g).reshape(self._batch_shape + (-1,)).sum(-1)\n    return result",
    "docstring": "Method to compute the entropy using Bregman divergence of the log normalizer.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributions\\exp_family.py",
    "ast_data": "FunctionDef name:entropy arg:self arguments arg Assign Assign Call Call Assign Call Assign Call Call For Call Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "as_ctypes",
    "source_code": "@set_module('numpy.ctypeslib')\ndef as_ctypes(obj):\n    ai = obj.__array_interface__\n    if ai['strides']:\n        raise TypeError('strided arrays not supported')\n    if ai['version'] != 3:\n        raise TypeError('only __array_interface__ version 3 supported')\n    addr, readonly = ai['data']\n    if readonly:\n        raise TypeError('readonly arrays unsupported')\n    ctype_scalar = as_ctypes_type(ai['typestr'])\n    result_type = _ctype_ndarray(ctype_scalar, ai['shape'])\n    result = result_type.from_address(addr)\n    result.__keep = obj\n    return result",
    "docstring": "Create and return a ctypes object from a numpy array. Actually anything that exposes the __array_interface__ is accepted. Examples -------- Create ctypes object from inferred int `` : >>> exp_int_array = np.array([1, 2, 3], dtype=np.uint8) >>> c_int_array = np.ctypeslib.as_ctypes(exp_int_array) >>> type(c_int_array) >>> c_int_array[:] [1, 2, 3]",
    "type": "function",
    "file_path": "numpy\\numpy\\ctypeslib\\_ctypeslib.py",
    "ast_data": "FunctionDef name:as_ctypes arg:obj arguments arg Assign If Raise Call If Compare Raise Call Assign If Raise Call Assign Call Assign Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_record_output_indices",
    "source_code": "def _record_output_indices(self, Xs):\n    idx = 0\n    self.output_indices_ = {}\n    for transformer_idx, (name, _, _, _) in enumerate(self._iter(fitted=True, column_as_labels=False, skip_drop=True, skip_empty_columns=True)):\n        n_columns = Xs[transformer_idx].shape[1]\n        self.output_indices_[name] = slice(idx, idx + n_columns)\n        idx += n_columns\n    all_names = [t[0] for t in self.transformers] + ['remainder']\n    for name in all_names:\n        if name not in self.output_indices_:\n            self.output_indices_[name] = slice(0, 0)",
    "docstring": "Record which transformer produced which column.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\compose\\_column_transformer.py",
    "ast_data": "FunctionDef name:_record_output_indices arg:self arg:Xs arguments arg arg Assign Assign For Call Call Assign Assign Call Assign For If Compare Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "regex_full_match",
    "source_code": "@tf_export('strings.regex_full_match')\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef regex_full_match(input, pattern, name=None):\n    if isinstance(pattern, util_compat.bytes_or_text_types):\n        return gen_string_ops.static_regex_full_match(input=input, pattern=pattern, name=name)\n    return gen_string_ops.regex_full_match(input=input, pattern=pattern, name=name)",
    "docstring": "Match elements of with regex . Args: input: string , the source strings to process. pattern: string or scalar string , regular expression to use, see more details at name: Name of the op. Returns: bool of the same shape as with match results.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\string_ops.py",
    "ast_data": "FunctionDef name:regex_full_match arg:input arg:pattern arg:name arguments arg arg arg If Call Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "@_docstring.interpd\ndef __init__(self, ax, *args, pivot='tip', length=7, barbcolor=None, flagcolor=None, sizes=None, fill_empty=False, barb_increments=None, rounding=True, flip_barb=False, **kwargs):\n    self.sizes = sizes or dict()\n    self.fill_empty = fill_empty\n    self.barb_increments = barb_increments or dict()\n    self.rounding = rounding\n    self.flip = np.atleast_1d(flip_barb)\n    transform = kwargs.pop('transform', ax.transData)\n    self._pivot = pivot\n    self._length = length\n    if None in (barbcolor, flagcolor):\n        kwargs['edgecolors'] = 'face'\n        if flagcolor:\n            kwargs['facecolors'] = flagcolor\n        elif barbcolor:\n            kwargs['facecolors'] = barbcolor\n        else:\n            kwargs.setdefault('facecolors', 'k')\n    else:\n        kwargs['edgecolors'] = barbcolor\n        kwargs['facecolors'] = flagcolor\n    if 'linewidth' not in kwargs and 'lw' not in kwargs:\n        kwargs['linewidth'] = 1\n    x, y, u, v, c = _parse_args(*args, caller_name='barbs')\n    self.x = x\n    self.y = y\n    xy = np.column_stack((x, y))\n    barb_size = self._length ** 2 / 4\n    super().__init__([], (barb_size,), offsets=xy, offset_transform=transform, **kwargs)\n    self.set_transform(transforms.IdentityTransform())\n    self.set_UVC(u, v, c)",
    "docstring": "The constructor takes one required argument, an Axes instance, followed by the args and kwargs described by the following pyplot interface documentation: %(barbs_doc)s",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\quiver.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:ax arguments arg arg arg arg arg arg arg arg arg arg arg arg arg Assign BoolOp Call Assign Assign BoolOp Call Assign Assign Call Assign Call Assign Assign If Compare Assign If Assign If Assign Call Assign Assign If BoolOp Compare Compare Assign Assign Call Assign Assign Assign Call Assign Call Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_data_ratio",
    "source_code": "def get_data_ratio(self):\n    return 1.0",
    "docstring": "Return the aspect ratio of the data itself.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\geo.py",
    "ast_data": "FunctionDef name:get_data_ratio arg:self arguments arg Return return:yes"
  },
  {
    "library": "pygame",
    "name": "get_clip",
    "source_code": "def get_clip(self):\n    return self._clip",
    "docstring": "get the area where drawing will occur LayeredDirty.get_clip(): return Rect",
    "type": "method",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:get_clip arg:self arguments arg Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, cpapp, pipeline=None):\n    self.cpapp = cpapp\n    self.pipeline = self.pipeline[:]\n    if pipeline:\n        self.pipeline.extend(pipeline)\n    self.config = self.config.copy()",
    "docstring": "Initialize a framework WSGI app wrapper.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpwsgi.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:cpapp arg:pipeline arguments arg arg arg Assign Assign If Call Assign Call"
  },
  {
    "library": "kornia",
    "name": "gaussian_discrete_erf",
    "source_code": "def gaussian_discrete_erf(window_size: int, sigma: Tensor | float, *, device: Optional[Device]=None, dtype: Optional[Dtype]=None) -> Tensor:\n    if isinstance(sigma, float):\n        sigma = tensor([[sigma]], device=device, dtype=dtype)\n    KORNIA_CHECK_SHAPE(sigma, ['B', '1'])\n    batch_size = sigma.shape[0]\n    x = (torch.arange(window_size, device=sigma.device, dtype=sigma.dtype) - window_size // 2).expand(batch_size, -1)\n    t = 0.70710678 / sigma.abs()\n    gauss = 0.5 * ((t * (x + 0.5)).erf() - (t * (x - 0.5)).erf())\n    gauss = gauss.clamp(min=0)\n    return gauss / gauss.sum(-1, keepdim=True)",
    "docstring": "Discrete Gaussian by interpolating the error function. Adapted from: Args: window_size: the size which drives the filter amount. sigma: gaussian standard deviation. If a tensor, should be in a shape :math: device: This value will be used if sigma is a float. Device desired to compute. dtype: This value will be used if sigma is a float. Dtype desired for compute. Returns: A tensor withshape :math:, with discrete Gaussian values computed by approximation of the error function.",
    "type": "function",
    "file_path": "kornia\\kornia\\filters\\kernels.py",
    "ast_data": "FunctionDef name:gaussian_discrete_erf arg:window_size arg:sigma arguments arg arg arg arg If Call Assign Call Call Assign Assign Call Call Assign Call Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "ntou",
    "source_code": "def ntou(n, encoding='ISO-8859-1'):\n    assert_native(n)\n    return n",
    "docstring": "Convert a native :class: to a :class: instance. This doesn't actually do anything.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\_cpcompat.py",
    "ast_data": "FunctionDef name:ntou arg:n arg:encoding arguments arg arg Call Return return:yes"
  },
  {
    "library": "pygame",
    "name": "read",
    "source_code": "def read(self, num_events):\n    _check_init()\n    self._check_open()\n    return self._input.Read(num_events)",
    "docstring": "reads num_events midi events from the buffer. Input.read(num_events): return midi_event_list Reads from the Input buffer and gives back midi events. [[[status,data1,data2,data3],timestamp], [[status,data1,data2,data3],timestamp],...]",
    "type": "method",
    "file_path": "pygame\\src_py\\midi.py",
    "ast_data": "FunctionDef name:read arg:self arg:num_events arguments arg arg Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_decorate_fun",
    "source_code": "def _decorate_fun(self, fun):\n    msg = 'Function %s is deprecated' % fun.__name__\n    if self.extra:\n        msg += '; %s' % self.extra\n\n    @functools.wraps(fun)\n    def wrapped(*args, **kwargs):\n        warnings.warn(msg, category=FutureWarning)\n        return fun(*args, **kwargs)\n    wrapped.__wrapped__ = fun\n    return wrapped",
    "docstring": "Decorate function fun",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\deprecation.py",
    "ast_data": "FunctionDef name:_decorate_fun arg:self arg:fun arguments arg arg Assign If FunctionDef name:wrapped arguments arg arg Call Return return:yes Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_weights",
    "source_code": "def get_weights(self):\n    params = self.weights\n    return backend.batch_get_value(params)",
    "docstring": "Returns the current weights of the optimizer. The weights of an optimizer are its state (ie, variables). This function returns the weight values associated with this optimizer as a list of Numpy arrays. The first value is always the iterations count of the optimizer, followed by the optimizer's state variables in the order they were created. The returned list can in turn be used to load state into similarly parameterized optimizers. For example, the RMSprop optimizer for this simple model returns a list of three values-- the iteration count, followed by the root-mean-square value of the kernel and bias of the single Dense layer: >>> opt = tf.keras.optimizers.RMSprop() >>> m = tf.keras.models.Sequential([tf.keras.layers.Dense(10)]) >>> m.compile(opt, loss='mse') >>> data = np.arange(100).reshape(5, 20) >>> labels = np.zeros(5) >>> results = m.fit(data, labels) # Training. >>> len(opt.get_weights()) 3 Returns: Weights values as a list of numpy arrays.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py",
    "ast_data": "FunctionDef name:get_weights arg:self arguments arg Assign Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "_load_additional_themes",
    "source_code": "def _load_additional_themes(self, theme_paths: list[str]) -> None:\n    for theme_path in theme_paths:\n        abs_theme_path = (self._app.confdir / theme_path).resolve()\n        themes = self._find_themes(abs_theme_path)\n        for name, theme in themes.items():\n            self._themes[name] = _StrPath(theme)",
    "docstring": "Load additional themes placed at specified directories.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\theming.py",
    "ast_data": "FunctionDef name:_load_additional_themes arg:self arg:theme_paths arguments arg arg For Assign Call Assign Call For Call Assign Call"
  },
  {
    "library": "seaborn",
    "name": "get_layout_engine",
    "source_code": "def get_layout_engine(fig: Figure) -> mpl.layout_engine.LayoutEngine | None:\n    if hasattr(fig, 'get_layout_engine'):\n        return fig.get_layout_engine()\n    else:\n        return None",
    "docstring": "Handle changes to auto layout engine interface in 3.6",
    "type": "function",
    "file_path": "seaborn\\seaborn\\_compat.py",
    "ast_data": "FunctionDef name:get_layout_engine arg:fig arguments arg If Call Return return:yes Call Return return:no"
  },
  {
    "library": "matplotlib",
    "name": "set_label_position",
    "source_code": "def set_label_position(self, position):\n    _api.check_in_list(['lower', 'upper', 'both', 'default', 'none'], position=position)\n    self._label_position = position",
    "docstring": "Set the label position. Parameters ---------- position : {'lower', 'upper', 'both', 'default', 'none'} The position of the axis label.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axis3d.py",
    "ast_data": "FunctionDef name:set_label_position arg:self arg:position arguments arg arg Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "ExportedConcreteFunction",
    "source_code": "class ExportedConcreteFunction(trackable.Trackable):\n    __slots__ = ('function', 'tensor_map')\n\n    def __init__(self, function, tensor_map):\n        self.function = function\n        self.tensor_map = tensor_map\n\n    def __call__(self, *args, **kwargs):\n        bound_arguments = function_type_utils.canonicalize_function_inputs(args, kwargs, self.function._function_type)\n        filtered_flat_args = self.function._function_type.unpack_inputs(bound_arguments)\n        export_captures = _map_captures_to_created_tensors(self.function.graph.captures, self.tensor_map, self.function)\n        return self.function._call_flat(filtered_flat_args, export_captures)",
    "docstring": "A callable class that uses captures from the exported SavedModel graph.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\saved_model_exported_concrete.py",
    "ast_data": "ClassDef name:ExportedConcreteFunction Assign FunctionDef name:__init__ arg:self arg:function arg:tensor_map arguments arg arg arg Assign Assign FunctionDef name:__call__ arg:self arguments arg arg arg Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "loc",
    "source_code": "@property\ndef loc(self) -> _LocIndexer:\n    return _LocIndexer('loc', self)",
    "docstring": "Access a group of rows and columns by label(s) or a boolean array. `Selection by Label for more details and explanations of Boolean indexing. .. note:: If you find yourself using 3 or more conditionals in `advanced indexinguser guide` for more details and explanations of advanced indexing.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexing.py",
    "ast_data": "FunctionDef name:loc arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "on_changed",
    "source_code": "def on_changed(self, func):\n    return self._observers.connect('changed', lambda val: func(val))",
    "docstring": "Connect *func* as callback function to changes of the slider value. Parameters ---------- func : callable Function to call when slider is changed. The function must accept a single float as its arguments. Returns ------- int Connection id (which can be used to disconnect *func*).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:on_changed arg:self arg:func arguments arg arg Return return:yes Call arguments arg Call"
  },
  {
    "library": "matplotlib",
    "name": "new_gc",
    "source_code": "def new_gc(self):\n    return GraphicsContextBase()",
    "docstring": "Return an instance of a .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:new_gc arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "comma_separate",
    "source_code": "def comma_separate(collection: Collection[Union[str, Collection[str]]]) -> str:\n    return ', '.join((item if isinstance(item, str) else f'({comma_separate(item)}{(',' if len(item) == 1 else '')})' for item in collection))",
    "docstring": "Convert a collection of strings representing first class dims into a comma-separated string. Args: collection (Collection[Union[str, Collection[str]]]): the collection of strings to convert Returns: str: the comma-separated string Examples: >>> comma_separate((\"d0\",)) 'd0' >>> comma_separate((\"d0\", \"d1\", \"d2\", \"d3\")) 'd0, d1, d2, d3' >>> comma_separate([(\"d1\", \"d4\")]) '(d1, d4)' >>> comma_separate([(\"d0\",), (), (\"d1\",), (\"d2\",), (\"d3\", \"d4\")]) '(d0,), (), (d1,), (d2,), (d3, d4)'",
    "type": "function",
    "file_path": "pytorch\\functorch\\einops\\_parsing.py",
    "ast_data": "FunctionDef name:comma_separate arg:collection arguments arg Return return:yes Call Call Call Compare Call"
  },
  {
    "library": "pytorch",
    "name": "stop",
    "source_code": "def stop(self) -> None:\n    log.info('Stopping noop health check server.')",
    "docstring": "Function to stop health check server",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\agent\\server\\health_check_server.py",
    "ast_data": "FunctionDef name:stop arg:self arguments arg Call"
  },
  {
    "library": "scipy",
    "name": "_scale_parameters",
    "source_code": "def _scale_parameters(self, trial):\n    scaled = self.__scale_arg1 + (trial - 0.5) * self.__scale_arg2\n    if np.count_nonzero(self.integrality):\n        i = np.broadcast_to(self.integrality, scaled.shape)\n        scaled[i] = np.round(scaled[i])\n    return scaled",
    "docstring": "Scale from a number between 0 and 1 to parameters.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_differentialevolution.py",
    "ast_data": "FunctionDef name:_scale_parameters arg:self arg:trial arguments arg arg Assign If Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "unitary_group_gen",
    "source_code": "class unitary_group_gen(multi_rv_generic):\n\n    def __init__(self, seed=None):\n        super().__init__(seed)\n        self.__doc__ = doccer.docformat(self.__doc__)\n\n    def __call__(self, dim=None, seed=None):\n        return unitary_group_frozen(dim, seed=seed)\n\n    def _process_parameters(self, dim):\n        if dim is None or not np.isscalar(dim) or dim < 0 or (dim != int(dim)):\n            raise ValueError('Dimension of rotation must be specified,and must be a scalar nonnegative integer.')\n        return dim\n\n    def rvs(self, dim, size=1, random_state=None):\n        random_state = self._get_random_state(random_state)\n        size = int(size)\n        dim = self._process_parameters(dim)\n        size = (size,) if size > 1 else ()\n        z = 1 / math.sqrt(2) * (random_state.normal(size=size + (dim, dim)) + 1j * random_state.normal(size=size + (dim, dim)))\n        q, r = np.linalg.qr(z)\n        d = r.diagonal(offset=0, axis1=-2, axis2=-1)\n        q *= (d / abs(d))[..., np.newaxis, :]\n        return q",
    "docstring": "A matrix-valued U(N) random variable. Return a random unitary matrix. The keyword specifies the dimension N. Methods ------- rvs(dim=None, size=1, random_state=None) Draw random samples from U(N). Parameters ---------- dim : scalar Dimension of matrices. seed : {None, int, np.random.RandomState, np.random.Generator}, optional Used for drawing random variates. If is , the singleton is used. If is an int, a new `seedNoneortho_groupmath-ph/0609050v2dim` parameter, return a \"frozen\" unitary_group random variable: >>> rv = unitary_group(5) See Also -------- ortho_group",
    "type": "class",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "ClassDef name:unitary_group_gen FunctionDef name:__init__ arg:self arg:seed arguments arg arg Call Call Assign Call FunctionDef name:__call__ arg:self arg:dim arg:seed arguments arg arg arg Return return:yes Call FunctionDef name:_process_parameters arg:self arg:dim arguments arg arg If BoolOp Compare Call Compare Compare Call Raise Call Return return:yes FunctionDef name:rvs arg:self arg:dim arg:size arg:random_state arguments arg arg arg arg Assign Call Assign Call Assign Call Assign Compare Assign Call Call Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "fully_overlaps",
    "source_code": "def fully_overlaps(self, other):\n    ax1, ay1, ax2, ay2 = self.extents\n    bx1, by1, bx2, by2 = other.extents\n    if ax2 < ax1:\n        ax2, ax1 = (ax1, ax2)\n    if ay2 < ay1:\n        ay2, ay1 = (ay1, ay2)\n    if bx2 < bx1:\n        bx2, bx1 = (bx1, bx2)\n    if by2 < by1:\n        by2, by1 = (by1, by2)\n    return ax1 < bx2 and bx1 < ax2 and (ay1 < by2) and (by1 < ay2)",
    "docstring": "Return whether this bounding box overlaps with the other bounding box, not including the edges. Parameters ---------- other :",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:fully_overlaps arg:self arg:other arguments arg arg Assign Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign Return return:yes BoolOp Compare Compare Compare Compare"
  },
  {
    "library": "scipy",
    "name": "tmin",
    "source_code": "@xp_capabilities()\n@_axis_nan_policy_factory(lambda x: x, n_outputs=1, result_to_tuple=lambda x, _: (x,))\ndef tmin(a, lowerlimit=None, axis=0, inclusive=True, nan_policy='propagate'):\n    xp = array_namespace(a)\n    max_ = xp.iinfo(a.dtype).max if xp.isdtype(a.dtype, 'integral') else xp.inf\n    a, mask = _put_val_to_limits(a, (lowerlimit, None), (inclusive, None), val=max_, xp=xp)\n    res = xp.min(a, axis=axis)\n    invalid = xp.all(mask, axis=axis)\n    if is_lazy_array(invalid) or xp.any(invalid):\n        res = xp_promote(res, force_floating=True, xp=xp)\n        res = xp.where(invalid, xp.nan, res)\n    return res[()] if res.ndim == 0 else res",
    "docstring": "Compute the trimmed minimum. This function finds the minimum value of an array along the specified axis, but only considering values greater than a specified lower limit. Parameters ---------- a : array_like Array of values. lowerlimit : None or float, optional Values in the input array less than the given limit will be ignored. When lowerlimit is None, then all values are used. The default value is None. axis : int or None, optional Axis along which to operate. Default is 0. If None, compute over the whole array . inclusive : {True, False}, optional This flag determines whether values exactly equal to the lower limit are included. The default value is True. Returns ------- tmin : float, int or ndarray Trimmed minimum. Examples -------- >>> import numpy as np >>> from scipy import stats >>> x = np.arange(20) >>> stats.tmin(x) 0 >>> stats.tmin(x, 13) 13 >>> stats.tmin(x, 13, inclusive=False) 14",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_stats_py.py",
    "ast_data": "FunctionDef name:tmin arg:a arg:lowerlimit arg:axis arg:inclusive arg:nan_policy arguments arg arg arg arg arg Assign Call Assign Call Call Assign Call Assign Call Assign Call If BoolOp Call Call Assign Call Assign Call Return return:yes Compare Call Call arguments arg arguments arg arg"
  },
  {
    "library": "kornia",
    "name": "camtoworld_graphics_to_vision_Rt",
    "source_code": "def camtoworld_graphics_to_vision_Rt(R: Tensor, t: Tensor) -> tuple[Tensor, Tensor]:\n    KORNIA_CHECK_SHAPE(R, ['B', '3', '3'])\n    KORNIA_CHECK_SHAPE(t, ['B', '3', '1'])\n    mat4x4 = camtoworld_graphics_to_vision_4x4(Rt_to_matrix4x4(R, t))\n    return matrix4x4_to_Rt(mat4x4)",
    "docstring": "Convert graphics coordinate frame (e.g. OpenGL) to vision coordinate frame (e.g. OpenCV.). I.e. flips y and z axis. Graphics convention: [+x, +y, +z] == [right, up, backwards]. Vision convention: [+x, +y, +z] == [right, down, forwards]. Args: R: Rotation matrix, :math: t: Translation matrix :math:. Returns: R: Rotation matrix, :math: t: Translation matrix :math:. Example: >>> R, t = torch.eye(3)[None], torch.ones(3).reshape(1, 3, 1) >>> camtoworld_graphics_to_vision_Rt(R, t) (tensor([[[ 1., 0., 0.], [ 0., -1., 0.], [ 0., 0., -1.]]]), tensor([[[1.], [1.], [1.]]]))",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\conversions.py",
    "ast_data": "FunctionDef name:camtoworld_graphics_to_vision_Rt arg:R arg:t arguments arg arg Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "show_path",
    "source_code": "def show_path(from_op, tensors, sources):\n    if isinstance(from_op, tensor_lib.Tensor):\n        from_op = from_op.op\n    if not isinstance(tensors, list):\n        tensors = [tensors]\n    final_ops = [_as_operation(tensor) for tensor in tensors]\n    visited_ops = set((x.op for x in sources))\n    ops_to_visit = list(final_ops)\n    some_op_output = {}\n    while ops_to_visit:\n        op = ops_to_visit.pop()\n        if op in visited_ops:\n            continue\n        visited_ops.add(op)\n        if op == from_op:\n            path_op = op\n            path = [path_op]\n            while path_op not in final_ops:\n                path_op = some_op_output[path_op]\n                path.append(path_op)\n            return ' <- '.join(('%s (%s)' % (x.name, x.type) for x in reversed(path)))\n        else:\n            for inp in graph_inputs(op):\n                if inp not in visited_ops and inp not in sources:\n                    some_op_output[inp] = op\n                    ops_to_visit.append(inp)\n    return '??'",
    "docstring": "Find one path from to any of , ignoring . Args: from_op: A . tensors: A , a , or a list thereof. sources: A list of . Returns: A python string containing the path, or \"??\" if none is found.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\op_selector.py",
    "ast_data": "FunctionDef name:show_path arg:from_op arg:tensors arg:sources arguments arg arg arg If Call Assign If Call Assign Assign Call Assign Call Assign Call Assign While Assign Call If Compare Call If Compare Assign Assign While Compare Assign Call Return return:yes Call Call For Call If BoolOp Compare Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "create_new_tf_function",
    "source_code": "def create_new_tf_function(func_graph):\n    transform.apply_func_graph_transforms(func_graph)\n    func = atomic_function.from_func_graph(func_graph.name, func_graph, {})\n    func_graph.outer_graph._add_function_recursive(func)\n    return func_graph.name",
    "docstring": "Converts func_graph to a TF_Function and adds it to the current graph. Args: func_graph: FuncGraph Returns: The name of the new TF_Function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_util_v2.py",
    "ast_data": "FunctionDef name:create_new_tf_function arg:func_graph arguments arg Call Assign Call Call Return return:yes"
  },
  {
    "library": "pygame",
    "name": "midis2events",
    "source_code": "def midis2events(midis, device_id):\n    evs = []\n    for midi in midis:\n        (status, data1, data2, data3), timestamp = midi\n        event = pygame.event.Event(MIDIIN, status=status, data1=data1, data2=data2, data3=data3, timestamp=timestamp, vice_id=device_id)\n        evs.append(event)\n    return evs",
    "docstring": "converts midi events to pygame events pygame.midi.midis2events(midis, device_id): return [Event, ...] Takes a sequence of midi events and returns list of pygame events.",
    "type": "function",
    "file_path": "pygame\\src_py\\midi.py",
    "ast_data": "FunctionDef name:midis2events arg:midis arg:device_id arguments arg arg Assign For Assign Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_location_string",
    "source_code": "def _get_location_string(self, location):\n    callstack = []\n    for single_call in reversed(location.call):\n        if location.type == converter_error_data_pb2.ConverterErrorData.CALLSITELOC:\n            callstack.append(f'  - {single_call.source.filename}:{single_call.source.line}')\n        else:\n            callstack.append(str(single_call))\n    callstack_dump = '\\n'.join(callstack)\n    return callstack_dump",
    "docstring": "Dump location of ConveterError.errors.location.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\authoring\\authoring.py",
    "ast_data": "FunctionDef name:_get_location_string arg:self arg:location arguments arg arg Assign For Call If Compare Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, input_fn, input_workers, input_contexts, strategy):\n    assert isinstance(input_workers, input_lib.InputWorkers)\n    if input_workers.num_workers != len(input_contexts):\n        raise ValueError('Number of input workers (%d) is not same as number of input_contexts (%d)' % (input_workers.num_workers, len(input_contexts)))\n    iterators = []\n    for i, ctx in enumerate(input_contexts):\n        worker = input_workers.worker_devices[i]\n        with ops.device(worker):\n            result = input_fn(ctx)\n            devices = input_workers.compute_devices_for_worker(i)\n            if isinstance(result, data_types.DatasetV2):\n                iterator = _SingleWorkerDatasetIterator(result, worker, devices)\n            elif callable(result):\n                iterator = _SingleWorkerCallableIterator(result, worker, devices)\n            else:\n                raise ValueError('input_fn must return a tf.data.Dataset or a callable.')\n            iterators.append(iterator)\n    super(InputFunctionIterator, self).__init__(input_workers, iterators, strategy, cardinality=cardinality_lib.UNKNOWN, enable_get_next_as_optional=False)\n    self._enable_get_next_as_optional = False",
    "docstring": "Make an iterator for input provided via an input function. Currently implements PER_WORKER mode, in which the is called once on each worker. TODO(priyag): Add other replication modes. Args: input_fn: Input function that returns a object. input_workers: an object. input_contexts: A list of instances to be passed to call(s) to . Length and order should match worker order in . strategy: a object, used to run all-reduce to handle last partial batch.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\input_lib.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:input_fn arg:input_workers arg:input_contexts arg:strategy arguments arg arg arg arg arg Call If Compare Call Raise Call Call Assign For Call Assign With Call Assign Call Assign Call If Call Assign Call If Call Assign Call Raise Call Call Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_name_scope",
    "source_code": "@contextlib.contextmanager\ndef _name_scope(self, name=None, values=None):\n    with ops.name_scope(self.name):\n        with ops.name_scope(name, values=([] if values is None else values) + self._graph_parents) as scope:\n            yield scope",
    "docstring": "Helper function to standardize op scope.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:_name_scope arg:self arg:name arg:values arguments arg arg arg With Call With Call Compare"
  },
  {
    "library": "kornia",
    "name": "get_translation_matrix2d",
    "source_code": "def get_translation_matrix2d(translations: Tensor) -> Tensor:\n    transform: Tensor = eye_like(3, translations)[:, :2, :]\n    transform[..., 2] += translations\n    transform_h = convert_affinematrix_to_homography(transform)\n    return transform_h",
    "docstring": "Compose translation matrix from the components. Args: translations: tensor containing the translation vector with shape :math:. Returns: the affine transformation matrix :math:. .. note:: This function is often used in conjunction with :func:, :func:.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\transform\\imgwarp.py",
    "ast_data": "FunctionDef name:get_translation_matrix2d arg:translations arguments arg Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_print_row",
    "source_code": "def _print_row(fields, positions, print_fn):\n    line = ''\n    for i, field in enumerate(fields):\n        field = str(field)\n        end_line_pos = positions[i]\n        if i > 0:\n            line = line + ' '\n        line = '{0:{min_length}}'.format(line + field, min_length=end_line_pos)\n        if len(line) > end_line_pos:\n            line = line[:end_line_pos - 4] + ' ...'\n    print_fn(line)",
    "docstring": "Prints a row.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\tensorrt\\trt_convert.py",
    "ast_data": "FunctionDef name:_print_row arg:fields arg:positions arg:print_fn arguments arg arg arg Assign For Call Assign Call Assign If Compare Assign Assign Call If Compare Call Assign Call"
  },
  {
    "library": "scikit-learn",
    "name": "default_device",
    "source_code": "def default_device(self):\n    return torch.device('cpu')",
    "docstring": "The default device used for new PyTorch arrays. See Also -------- __array_namespace_info__.capabilities, __array_namespace_info__.default_dtypes, __array_namespace_info__.dtypes, __array_namespace_info__.devices Returns ------- device : Device The default device used for new PyTorch arrays. Examples -------- >>> info = xp.__array_namespace_info__() >>> info.default_device() device(type='cpu') Notes ----- This method returns the static default device when PyTorch is initialized. However, the *current* device used by creation functions (`` etc.) can be changed at runtime. See Also --------",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\torch\\_info.py",
    "ast_data": "FunctionDef name:default_device arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "sorted_glob",
    "source_code": "def sorted_glob(fileglob):\n    return sorted(glob.glob(fileglob))",
    "docstring": "sorts output of python glob for to allow extensions to have reproducible build results",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\misc_util.py",
    "ast_data": "FunctionDef name:sorted_glob arg:fileglob arguments arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, name, N=256, *, bad=None, under=None, over=None):\n    self.name = name\n    self.N = int(N)\n    self._rgba_bad = (0.0, 0.0, 0.0, 0.0) if bad is None else to_rgba(bad)\n    self._rgba_under = None if under is None else to_rgba(under)\n    self._rgba_over = None if over is None else to_rgba(over)\n    self._i_under = self.N\n    self._i_over = self.N + 1\n    self._i_bad = self.N + 2\n    self._isinit = False\n    self.n_variates = 1\n    self.colorbar_extend = False",
    "docstring": "Parameters ---------- name : str The name of the colormap. N : int The number of RGB quantization levels. bad : :mpltype:, default: transparent The color for invalid values (NaN or masked). .. versionadded:: 3.11 under : :mpltype:, default: color of the lowest value The color for low out-of-range values. .. versionadded:: 3.11 over : :mpltype:, default: color of the highest value The color for high out-of-range values. .. versionadded:: 3.11",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:name arg:N arguments arg arg arg arg arg arg Assign Assign Call Assign Compare Call Assign Compare Call Assign Compare Call Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "iter_bezier",
    "source_code": "def iter_bezier(self, **kwargs):\n    first_vert = None\n    prev_vert = None\n    for verts, code in self.iter_segments(**kwargs):\n        if first_vert is None:\n            if code != Path.MOVETO:\n                raise ValueError('Malformed path, must start with MOVETO.')\n        if code == Path.MOVETO:\n            first_vert = verts\n            yield (BezierSegment(np.array([first_vert])), code)\n        elif code == Path.LINETO:\n            yield (BezierSegment(np.array([prev_vert, verts])), code)\n        elif code == Path.CURVE3:\n            yield (BezierSegment(np.array([prev_vert, verts[:2], verts[2:]])), code)\n        elif code == Path.CURVE4:\n            yield (BezierSegment(np.array([prev_vert, verts[:2], verts[2:4], verts[4:]])), code)\n        elif code == Path.CLOSEPOLY:\n            yield (BezierSegment(np.array([prev_vert, first_vert])), code)\n        elif code == Path.STOP:\n            return\n        else:\n            raise ValueError(f'Invalid Path.code_type: {code}')\n        prev_vert = verts[-2:]",
    "docstring": "Iterate over each Bézier curve (lines included) in a . Parameters ---------- **kwargs Forwarded to . Yields ------ B : The Bézier curves that make up the current path. Note in particular that freestanding points are Bézier curves of order 0, and lines are Bézier curves of order 1 (with two control points). code : The code describing what kind of curve is being returned. , , , and correspond to Bézier curves with 1, 2, 3, and 4 control points (respectively). is a with the control points correctly chosen based on the start/end points of the current stroke.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\path.py",
    "ast_data": "FunctionDef name:iter_bezier arg:self arguments arg arg Assign Assign For Call If Compare If Compare Raise Call If Compare Assign Call Call If Compare Call Call If Compare Call Call If Compare Call Call If Compare Call Call If Compare Return return:no Raise Call Assign"
  },
  {
    "library": "pytorch",
    "name": "check_exact_guard_match",
    "source_code": "def check_exact_guard_match(guard_expr, _hints):\n    return guard_expr == self.fx_graph_guard_expr",
    "docstring": "AOTAutogradCache tracks its own guards, so we just need to treat these guard expressions as a second cache key of sorts: we just check for equality, i.e. the FXGraphCache entry with the exact same guards as we originally saved into the cache.",
    "type": "method",
    "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\autograd_cache.py",
    "ast_data": "FunctionDef name:check_exact_guard_match arg:guard_expr arg:_hints arguments arg arg Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "_prune_invalid_ids_ragged",
    "source_code": "def _prune_invalid_ids_ragged(ids, weights):\n    is_id_valid = math_ops.greater_equal(ids.values, 0)\n    nrows = ids.nrows()\n    pruned_values = array_ops.boolean_mask_v2(ids.values, is_id_valid)\n    pruned_value_rowids = array_ops.boolean_mask_v2(ids.value_rowids(), is_id_valid)\n    ids = ragged_tensor.RaggedTensor.from_value_rowids(pruned_values, pruned_value_rowids, nrows=nrows, validate=False)\n    if weights is not None:\n        pruned_weights_values = array_ops.boolean_mask_v2(weights.values, is_id_valid)\n        weights = ragged_tensor.RaggedTensor.from_value_rowids(pruned_weights_values, pruned_value_rowids, nrows=nrows, validate=False)\n    return (ids, weights)",
    "docstring": "Prune invalid IDs (< 0) from the input ids and weights.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_embedding_ops.py",
    "ast_data": "FunctionDef name:_prune_invalid_ids_ragged arg:ids arg:weights arguments arg arg Assign Call Assign Call Assign Call Assign Call Call Assign Call If Compare Assign Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "tuple",
    "source_code": "@property\ndef tuple(self):\n    return self._cs.tuple",
    "docstring": "Return a tuple version of the geometry from the coordinate sequence.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\linestring.py",
    "ast_data": "FunctionDef name:tuple arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "clear_expired",
    "source_code": "@classmethod\ndef clear_expired(cls):\n    raise NotImplementedError('This backend does not support clear_expired().')",
    "docstring": "Remove expired sessions from the session store. If this operation isn't possible on a given backend, it should raise NotImplementedError. If it isn't necessary, because the backend has a built-in expiration mechanism, it should be a no-op.",
    "type": "method",
    "file_path": "django\\django\\contrib\\sessions\\backends\\base.py",
    "ast_data": "FunctionDef name:clear_expired arg:cls arguments arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, input_lists, skip_node_names=None, destination_node_name=None):\n    self._input_lists = input_lists\n    self._skip_node_names = skip_node_names\n    self._inputs = []\n    self._visited_nodes = []\n    self._depth_count = 0\n    self._depth_list = []\n    self._destination_node_name = destination_node_name",
    "docstring": "Constructor of _DFSGraphTracer. Args: input_lists: A list of dicts. Each dict is an adjacency (input) map from the recipient node name as the key and the list of input node names as the value. skip_node_names: Optional: a list of node names to skip tracing. destination_node_name: Optional: destination node name. If not , it should be the name of a destination not as a str and the graph tracing will raise GraphTracingReachedDestination as soon as the node has been reached. Raises: GraphTracingReachedDestination: if stop_at_node_name is not None and the specified node is reached.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_graphs.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:input_lists arg:skip_node_names arg:destination_node_name arguments arg arg arg arg Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "django",
    "name": "get_session_auth_hash",
    "source_code": "def get_session_auth_hash(self):\n    return self._get_session_auth_hash()",
    "docstring": "Return an HMAC of the password field.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\base_user.py",
    "ast_data": "FunctionDef name:get_session_auth_hash arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_image_projective_transform_grad",
    "source_code": "@ops.RegisterGradient('ImageProjectiveTransformV2')\ndef _image_projective_transform_grad(op, grad):\n    images = op.inputs[0]\n    transforms = op.inputs[1]\n    interpolation = op.get_attr('interpolation')\n    fill_mode = op.get_attr('fill_mode')\n    image_or_images = ops.convert_to_tensor(images, name='images')\n    transform_or_transforms = ops.convert_to_tensor(transforms, name='transforms', dtype=dtypes.float32)\n    if image_or_images.dtype.base_dtype not in _IMAGE_DTYPES:\n        raise TypeError('Invalid dtype %s.' % image_or_images.dtype)\n    if len(transform_or_transforms.get_shape()) == 1:\n        transforms = transform_or_transforms[None]\n    elif len(transform_or_transforms.get_shape()) == 2:\n        transforms = transform_or_transforms\n    else:\n        raise TypeError('Transforms should have rank 1 or 2.')\n    transforms = flat_transforms_to_matrices(transforms=transforms)\n    inverse = linalg_ops.matrix_inverse(transforms)\n    transforms = matrices_to_flat_transforms(inverse)\n    output = gen_image_ops.image_projective_transform_v2(images=grad, transforms=transforms, output_shape=array_ops.shape(image_or_images)[1:3], interpolation=interpolation, fill_mode=fill_mode)\n    return [output, None, None]",
    "docstring": "Computes the gradient for ImageProjectiveTransform.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops.py",
    "ast_data": "FunctionDef name:_image_projective_transform_grad arg:op arg:grad arguments arg arg Assign Assign Assign Call Assign Call Assign Call Assign Call If Compare Raise Call If Compare Call Call Assign If Compare Call Call Assign Raise Call Assign Call Assign Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "safe_embedding_lookup_sparse",
    "source_code": "@dispatch.dispatch_for_api(embedding_ops.safe_embedding_lookup_sparse)\ndef safe_embedding_lookup_sparse(embedding_weights: ShardedVariable, sparse_ids, sparse_weights=None, combiner='mean', default_id=None, name=None, partition_strategy='div', max_norm=None, allow_fast_lookup=False):\n    return embedding_ops.safe_embedding_lookup_sparse(embedding_weights.variables, sparse_ids, sparse_weights=sparse_weights, combiner=combiner, default_id=default_id, name=name, partition_strategy=partition_strategy, max_norm=max_norm, allow_fast_lookup=allow_fast_lookup)",
    "docstring": "Pass the individual shard variables as a list.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\sharded_variable.py",
    "ast_data": "FunctionDef name:safe_embedding_lookup_sparse arg:embedding_weights arg:sparse_ids arg:sparse_weights arg:combiner arg:default_id arg:name arg:partition_strategy arg:max_norm arg:allow_fast_lookup arguments arg arg arg arg arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "nms2d",
    "source_code": "def nms2d(input: Tensor, kernel_size: tuple[int, int], mask_only: bool=False) -> Tensor:\n    return NonMaximaSuppression2d(kernel_size)(input, mask_only)",
    "docstring": "Apply non maxima suppression to filter. See :class: for details.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\subpix\\nms.py",
    "ast_data": "FunctionDef name:nms2d arg:input arg:kernel_size arg:mask_only arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "width",
    "source_code": "@property\ndef width(self) -> torch.Tensor:\n    return self.xmax - self.xmin",
    "docstring": "The bounding box width.",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\face_detection.py",
    "ast_data": "FunctionDef name:width arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "add_custom_scalars_marginchart",
    "source_code": "def add_custom_scalars_marginchart(self, tags, category='default', title='untitled'):\n    torch._C._log_api_usage_once('tensorboard.logging.add_custom_scalars_marginchart')\n    assert len(tags) == 3\n    layout = {category: {title: ['Margin', tags]}}\n    self._get_file_writer().add_summary(custom_scalars(layout))",
    "docstring": "Shorthand for creating marginchart. Similar to `` Examples:: writer.add_custom_scalars_marginchart(['twse/0050', 'twse/2330', 'twse/2006'])",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\tensorboard\\writer.py",
    "ast_data": "FunctionDef name:add_custom_scalars_marginchart arg:self arg:tags arg:category arg:title arguments arg arg arg arg Call Compare Call Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_assert_sparse_compatible",
    "source_code": "def _assert_sparse_compatible(sparse_tensors):\n    checks = []\n    first = sparse_tensors[0]\n    for t in sparse_tensors[1:]:\n        checks.append(check_ops.assert_equal(first.dense_shape, t.dense_shape, message='Mismatched shapes!'))\n        checks.append(check_ops.assert_equal(first.indices, t.indices, message='Mismatched indices!'))\n    return checks",
    "docstring": "Check that all of have same and . Args: sparse_tensors: A list of sparse tensors. Returns: An op to be used as a control dependency.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_ops.py",
    "ast_data": "FunctionDef name:_assert_sparse_compatible arg:sparse_tensors arguments arg Assign Assign For Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "broadcast_static_shape",
    "source_code": "@tf_export('broadcast_static_shape')\n@dispatch.add_dispatch_support\ndef broadcast_static_shape(shape_x, shape_y):\n    return common_shapes.broadcast_shape(shape_x, shape_y)",
    "docstring": "Computes the shape of a broadcast given known shapes. When and are fully known s this computes a which is the shape of the result of a broadcasting op applied in tensors of shapes and . For example, if shape_x is and shape_y is , the result is a TensorShape whose value is . This is useful when validating the result of a broadcasting operation when the tensors have statically known shapes. Example: >>> shape_x = tf.TensorShape([1, 2, 3]) >>> shape_y = tf.TensorShape([5, 1 ,3]) >>> tf.broadcast_static_shape(shape_x, shape_y) TensorShape([5, 2, 3]) Args: shape_x: A shape_y: A Returns: A representing the broadcasted shape. Raises: ValueError: If the two shapes can not be broadcasted.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:broadcast_static_shape arg:shape_x arg:shape_y arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "clear",
    "source_code": "def clear(self) -> None:\n    self._modules.clear()",
    "docstring": "Remove all items from the ModuleDict.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\container.py",
    "ast_data": "FunctionDef name:clear arg:self arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "ROCmKernel",
    "source_code": "class ROCmKernel(Kernel):\n    overrides = OpOverrides",
    "docstring": "Baseclass for ROCm based Kernels",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\rocm\\rocm_kernel.py",
    "ast_data": "ClassDef name:ROCmKernel Assign"
  },
  {
    "library": "pandas",
    "name": "get_standard_colors",
    "source_code": "def get_standard_colors(num_colors: int, colormap: Colormap | None=None, color_type: str='default', *, color: dict[str, Color] | Color | Sequence[Color] | None=None) -> dict[str, Color] | list[Color]:\n    if isinstance(color, dict):\n        return color\n    colors = _derive_colors(color=color, colormap=colormap, color_type=color_type, num_colors=num_colors)\n    return list(_cycle_colors(colors, num_colors=num_colors))",
    "docstring": "Get standard colors based on , or inputs. Parameters ---------- num_colors : int Minimum number of colors to be returned. Ignored if is a dictionary. colormap : :py:class:, optional Matplotlib colormap. When provided, the resulting colors will be derived from the colormap. color_type : {\"default\", \"random\"}, optional Type of colors to derive. Used if provided and are None. Ignored if either or are not None. color : dict or str or sequence, optional Color(s) to be used for deriving sequence of colors. Can be either be a dictionary, or a single color (single color string, or sequence of floats representing a single color), or a sequence of colors. Returns ------- dict or list Standard colors. Can either be a mapping if was a dictionary, or a list of colors with a length of or more. Warns ----- UserWarning If both and are provided. Parameter will override.",
    "type": "function",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\style.py",
    "ast_data": "FunctionDef name:get_standard_colors arg:num_colors arg:colormap arg:color_type arguments arg arg arg arg If Call Return return:yes Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_or_create_steps_per_run_variable",
    "source_code": "def get_or_create_steps_per_run_variable():\n    graph = ops.get_default_graph()\n    collection_name = '{}_{}'.format(_HOOKS, _STEPS_PER_RUN_VAR)\n    steps_per_run_vars = graph.get_collection(collection_name)\n    if len(steps_per_run_vars) == 1:\n        return steps_per_run_vars[0]\n    elif len(steps_per_run_vars) > 1:\n        raise RuntimeError('Multiple steps_per_run_var in collection.')\n    with variable_scope.variable_scope(_HOOKS, reuse=variable_scope.AUTO_REUSE):\n        return variable_scope.get_variable(_STEPS_PER_RUN_VAR, initializer=init_ops.ones_initializer(), shape=[], dtype=dtypes.int32, trainable=False, collections=[collection_name, ops.GraphKeys.LOCAL_VARIABLES], use_resource=True)",
    "docstring": "Gets or creates the steps_per_run variable. In Estimator, the user provided computation, the model_fn, is wrapped inside a tf.while_loop for peak performance. The iterations of the loop are specified by this variable, which adjusts its value on the CPU after each device program execution and before the next execution. The purpose of using a variable, rather than a constant, is to allow Estimator adapt the device training iterations according to the final steps specified by users. For example, if the user sets the steps_per_run as 4 and steps as 10 in Estimator.train(), the steps_per_run variable will have the following value before each training run. - 1-st execution: steps_per_run = 4 - 2-nd execution: steps_per_run = 4 - 3-rd execution: steps_per_run = 2 As model_fn increases the global step once per train_op invocation, the global step is 10 after all executions, matching the steps=10 inputs passed in by users. Returns: A TF non-trainable resource variable. Raises: RuntimeError: If multi steps_per_run variables were found.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\basic_session_run_hooks.py",
    "ast_data": "FunctionDef name:get_or_create_steps_per_run_variable arguments Assign Call Assign Call Assign Call If Compare Call Return return:yes If Compare Call Raise Call With Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "is_nested_tuple",
    "source_code": "def is_nested_tuple(tup, labels) -> bool:\n    if not isinstance(tup, tuple):\n        return False\n    for k in tup:\n        if is_list_like(k) or isinstance(k, slice):\n            return isinstance(labels, MultiIndex)\n    return False",
    "docstring": "Returns ------- bool",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\indexing.py",
    "ast_data": "FunctionDef name:is_nested_tuple arg:tup arg:labels arguments arg arg If Call Return return:yes For If BoolOp Call Call Return return:yes Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "f3",
    "source_code": "def f3(x, h, k):\n    return -np.exp(-x)",
    "docstring": "cdf = np.exp(-np.exp(-x)) logcdf = ...",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_continuous_distns.py",
    "ast_data": "FunctionDef name:f3 arg:x arg:h arg:k arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_qconfig_info",
    "source_code": "def get_qconfig_info(self, model) -> dict[str, DetectorQConfigInfo]:\n    input_values: dict[str, dict] = self._extract_input_info(model)\n    weight_values: dict[str, dict] = self._extract_weight_info(model)\n    comp_stats: dict[str, torch.Tensor] = self._generate_comparison_values(input_values, weight_values)\n    input_weight_equalization_info: dict[str, dict] = self._generate_dict_info(input_values, weight_values, comp_stats)\n    module_fqn_to_detector_qconfig_info = {}\n    for module_fqn in input_weight_equalization_info:\n        detector_qconfig_info = DetectorQConfigInfo(module_fqn)\n        input_weight_recommended: bool = input_weight_equalization_info[module_fqn][self.RECOMMENDED_KEY]\n        detector_qconfig_info.is_equalization_recommended = input_weight_recommended\n        module_fqn_to_detector_qconfig_info[module_fqn] = detector_qconfig_info\n    return module_fqn_to_detector_qconfig_info",
    "docstring": "Returns the DetectorQConfigInfo for each module_fqn relevant Args model (nn.Module or subclass): model to find observer insertion points Returns a Dict mapping from unique observer fqns (where we want to insert them) to: A DetectorQConfigInfo with the information to generate a QConfig for a specific module",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\detector.py",
    "ast_data": "FunctionDef name:get_qconfig_info arg:self arg:model arguments arg arg Call Call Call Call Assign For Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_newton_quadratic",
    "source_code": "def _newton_quadratic(ab, fab, d, fd, k):\n    a, b = ab\n    fa, fb = fab\n    _, B, A = _compute_divided_differences([a, b, d], [fa, fb, fd], forward=True, full=False)\n\n    def _P(x):\n        return (A * (x - b) + B) * (x - a) + fa\n    if A == 0:\n        r = a - fa / B\n    else:\n        r = a if np.sign(A) * np.sign(fa) > 0 else b\n        for i in range(k):\n            r1 = r - _P(r) / (B + A * (2 * r - a - b))\n            if not ab[0] < r1 < ab[1]:\n                if ab[0] < r < ab[1]:\n                    return r\n                r = sum(ab) / 2.0\n                break\n            r = r1\n    return r",
    "docstring": "Apply Newton-Raphson like steps, using divided differences to approximate f' ab is a real interval [a, b] containing a root, fab holds the real values of f(a), f(b) d is a real number outside [ab, b] k is the number of steps to apply",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_zeros_py.py",
    "ast_data": "FunctionDef name:_newton_quadratic arg:ab arg:fab arg:d arg:fd arg:k arguments arg arg arg arg arg Assign Assign Assign Call FunctionDef name:_P arg:x arguments arg Return return:yes If Compare Assign Assign Compare Call Call For Call Assign Call If Compare If Compare Return return:yes Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "shape_tensor",
    "source_code": "def shape_tensor(shape, name=None):\n    if isinstance(shape, (tuple, list)) and (not shape):\n        dtype = dtypes.int32\n    else:\n        dtype = None\n    return tensor_conversion.convert_to_tensor_v2_with_dispatch(shape, dtype=dtype, name=name)",
    "docstring": "Convert Tensor using default type, unless empty list or tuple.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_util.py",
    "ast_data": "FunctionDef name:shape_tensor arg:shape arg:name arguments arg arg If BoolOp Call Assign Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "pprint_setters",
    "source_code": "def pprint_setters(self, prop=None, leadingspace=2):\n    if leadingspace:\n        pad = ' ' * leadingspace\n    else:\n        pad = ''\n    if prop is not None:\n        accepts = self.get_valid_values(prop)\n        return f'{pad}{prop}: {accepts}'\n    lines = []\n    for prop in sorted(self.get_setters()):\n        accepts = self.get_valid_values(prop)\n        name = self.aliased_name(prop)\n        lines.append(f'{pad}{name}: {accepts}')\n    return lines",
    "docstring": "If *prop* is *None*, return a list of strings of all settable properties and their valid values. If *prop* is not *None*, it is a valid property name and that property will be returned as a string of property : valid values.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:pprint_setters arg:self arg:prop arg:leadingspace arguments arg arg arg If Assign Assign If Compare Assign Call Return return:yes Assign For Call Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "interpolate",
    "source_code": "@classmethod\ndef interpolate(cls, func, deg, domain=None, args=()):\n    if domain is None:\n        domain = cls.domain\n    xfunc = lambda x: func(pu.mapdomain(x, cls.window, domain), *args)\n    coef = chebinterpolate(xfunc, deg)\n    return cls(coef, domain=domain)",
    "docstring": "Interpolate a function at the Chebyshev points of the first kind. Returns the series that interpolates at the Chebyshev points of the first kind scaled and shifted to the . The resulting series tends to a minmax approximation of when the function is continuous in the domain. Parameters ---------- func : function The function to be interpolated. It must be a function of a single variable of the form `argsfuncnumpy.polynomial.chebinterpolate` for more details.",
    "type": "method",
    "file_path": "numpy\\numpy\\polynomial\\chebyshev.py",
    "ast_data": "FunctionDef name:interpolate arg:cls arg:func arg:deg arg:domain arg:args arguments arg arg arg arg arg If Compare Assign Assign arguments arg Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "unique_fn_name",
    "source_code": "def unique_fn_name(scope, name):\n    return ('%s%s_%s' % (scope, name, ops.uid())).replace('/', '_')",
    "docstring": "Returns a unique name to use for a control flow function. Args: scope: A name scope string. name: An identifier for this function (e.g. \"true\", \"body\"). Returns: A string, the name to use for the function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_util_v2.py",
    "ast_data": "FunctionDef name:unique_fn_name arg:scope arg:name arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_strides_and_dilation_rate",
    "source_code": "def _get_strides_and_dilation_rate(num_spatial_dims, strides, dilation_rate):\n    if dilation_rate is None:\n        dilation_rate = [1] * num_spatial_dims\n    elif len(dilation_rate) != num_spatial_dims:\n        raise ValueError(f'`len(dilation_rate)` should be {num_spatial_dims}. Received: dilation_rate={dilation_rate} of length {len(dilation_rate)}')\n    dilation_rate = np.array(dilation_rate, dtype=np.int32)\n    if np.any(dilation_rate < 1):\n        raise ValueError(f'all values of `dilation_rate` must be positive. Received: dilation_rate={dilation_rate}')\n    if strides is None:\n        strides = [1] * num_spatial_dims\n    elif len(strides) != num_spatial_dims:\n        raise ValueError(f'`len(strides)` should be {num_spatial_dims}. Received: strides={strides} of length {len(strides)}')\n    strides = np.array(strides, dtype=np.int32)\n    if np.any(strides < 1):\n        raise ValueError(f'all values of `strides` must be positive. Received: strides={strides}')\n    if np.any(strides > 1) and np.any(dilation_rate > 1):\n        raise ValueError(f'`strides > 1` not supported in conjunction with `dilation_rate > 1`. Received: strides={strides} and dilation_rate={dilation_rate}')\n    return (strides, dilation_rate)",
    "docstring": "Helper function for verifying strides and dilation_rate arguments. This is used by and . Args: num_spatial_dims: int strides: Optional. List of N ints >= 1. Defaults to . If any value of strides is > 1, then all values of dilation_rate must be 1. dilation_rate: Optional. List of N ints >= 1. Defaults to . If any value of dilation_rate is > 1, then all values of strides must be 1. Returns: Normalized (strides, dilation_rate) as int32 numpy arrays of shape [num_spatial_dims]. Raises: ValueError: if the parameters are invalid.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py",
    "ast_data": "FunctionDef name:_get_strides_and_dilation_rate arg:num_spatial_dims arg:strides arg:dilation_rate arguments arg arg arg If Compare Assign If Compare Call Raise Call Call Assign Call If Call Compare Raise Call If Compare Assign If Compare Call Raise Call Call Assign Call If Call Compare Raise Call If BoolOp Call Compare Call Compare Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_checkpoint_size",
    "source_code": "def _get_checkpoint_size(prefix):\n    size = 0\n    files = glob.glob('{}*'.format(prefix))\n    for file in files:\n        size += metrics.CalculateFileSize(file)\n    return size",
    "docstring": "Calculates filesize of checkpoint based on prefix.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:_get_checkpoint_size arg:prefix arguments arg Assign Assign Call Call For Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "track_variable",
    "source_code": "def track_variable(v):\n    if context.executing_eagerly():\n        return\n    graph = v.graph if hasattr(v, 'graph') else get_graph()\n    _GRAPH_VARIABLES[graph].add(v)",
    "docstring": "Tracks the given variable for initialization.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:track_variable arg:v arguments arg If Call Return return:no Assign Call Call Call"
  },
  {
    "library": "pandas",
    "name": "maybe_convert_css_to_tuples",
    "source_code": "def maybe_convert_css_to_tuples(style: CSSProperties) -> CSSList:\n    if isinstance(style, str):\n        if style and ':' not in style:\n            raise ValueError(f\"Styles supplied as string must follow CSS rule formats, for example 'attr: val;'. '{style}' was given.\")\n        s = style.split(';')\n        return [(x.split(':')[0].strip(), ':'.join(x.split(':')[1:]).strip()) for x in s if x.strip() != '']\n    return style",
    "docstring": "Convert css-string to sequence of tuples format if needed. 'color:red; border:1px solid black;' -> [('color', 'red'), ('border','1px solid red')]",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\formats\\style_render.py",
    "ast_data": "FunctionDef name:maybe_convert_css_to_tuples arg:style arguments arg If Call If BoolOp Compare Raise Call Assign Call Return return:yes Call Call Call Call Call Compare Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "cuda",
    "source_code": "def cuda(self, device=None, non_blocking=False, memory_format=torch.preserve_format, process_group=None) -> ShardedTensor:\n    if memory_format != torch.preserve_format and memory_format != torch.contiguous_format:\n        raise RuntimeError('Only `torch.contiguous_format` or `torch.preserve_format` is supported!')\n    if device is not None:\n        device = torch.device(device) if isinstance(device, str) else device\n        assert isinstance(device, torch.device) and device.index == torch.cuda.current_device(), 'Only device without device id (e.g. \"cpu\" or \"cuda\") is expected for ShardedTensor!'\n    current_device = torch.device(torch.cuda.current_device())\n    list_shards: list[Shard] = []\n    for shard in self._local_shards:\n        cuda_tensor = shard.tensor.cuda(device=current_device, non_blocking=non_blocking, memory_format=memory_format)\n        metadata = copy.deepcopy(shard.metadata)\n        metadata.placement._device = current_device\n        list_shards.append(Shard(cuda_tensor, metadata))\n    st_meta = copy.deepcopy(self.metadata())\n    for meta in st_meta.shards_metadata:\n        if meta.placement.device().type != 'cuda':\n            meta.placement._device = current_device\n    pg = self._process_group if process_group is None else process_group\n    st_cuda = ShardedTensor._init_from_local_shards_and_global_metadata(list_shards, sharded_tensor_metadata=st_meta, process_group=pg, init_rrefs=self._init_rrefs)\n    return st_cuda",
    "docstring": "Returns a copy of this object in CUDA memory, if the original ShardedTensor is on CPU, we will move the local shard to the current GPU device of each process in a SPMD fashion. If this ShardedTensor is already on CUDA memory and local shards on each rank are already on current device, we still returns a new ShardedTensor object with new metadata, but no underlying data movements are performed. .. note:: When moving a ShardedTensor from CPU to GPU, the ShardedTensor might need to be managed by a different type of ProcessGroup(i.e. ProcessGroupNCCL), it is the user's responsiblity to explicitly pass in a new process_group that is compatible with GPU.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\api.py",
    "ast_data": "FunctionDef name:cuda arg:self arg:device arg:non_blocking arg:memory_format arg:process_group arguments arg arg arg arg arg If BoolOp Compare Compare Raise Call If Compare Assign Call Call BoolOp Call Compare Call Assign Call Call For Assign Call Assign Call Assign Call Call Assign Call Call For If Compare Call Assign Assign Compare Assign Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "on",
    "source_code": "@on.setter\ndef on(self, value):\n    raise AttributeError(_attr_error)",
    "docstring": "Set a flag for whether the tool is enabled.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cptools.py",
    "ast_data": "FunctionDef name:on arg:self arg:value arguments arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "read_bytes",
    "source_code": "def read_bytes(self, name: str) -> bytes:\n    return self.archive_file.get_record(name)",
    "docstring": "Read a bytes object from the archive. name: The source file inside the archive.",
    "type": "method",
    "file_path": "pytorch\\torch\\export\\pt2_archive\\_package.py",
    "ast_data": "FunctionDef name:read_bytes arg:self arg:name arguments arg arg Return return:yes Call"
  },
  {
    "library": "pygame",
    "name": "abort",
    "source_code": "def abort(self):\n    _check_init()\n    if self._output:\n        self._output.Abort()\n    self._aborted = 1",
    "docstring": "terminates outgoing messages immediately Output.abort(): return None The caller should immediately close the output port; this call may result in transmission of a partial midi message. There is no abort for Midi input because the user can simply ignore messages in the buffer and close an input device at any time.",
    "type": "method",
    "file_path": "pygame\\src_py\\midi.py",
    "ast_data": "FunctionDef name:abort arg:self arguments arg Call If Call Assign"
  },
  {
    "library": "django",
    "name": "get_autocommit",
    "source_code": "def get_autocommit(self):\n    self.ensure_connection()\n    return self.autocommit",
    "docstring": "Get the autocommit state.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\base.py",
    "ast_data": "FunctionDef name:get_autocommit arg:self arguments arg Call Return return:yes"
  },
  {
    "library": "django",
    "name": "TemplateResponseMixin",
    "source_code": "class TemplateResponseMixin:\n    template_name = None\n    template_engine = None\n    response_class = TemplateResponse\n    content_type = None\n\n    def render_to_response(self, context, **response_kwargs):\n        response_kwargs.setdefault('content_type', self.content_type)\n        return self.response_class(request=self.request, template=self.get_template_names(), context=context, using=self.template_engine, **response_kwargs)\n\n    def get_template_names(self):\n        if self.template_name is None:\n            raise ImproperlyConfigured(\"TemplateResponseMixin requires either a definition of 'template_name' or an implementation of 'get_template_names()'\")\n        else:\n            return [self.template_name]",
    "docstring": "A mixin that can be used to render a template.",
    "type": "class",
    "file_path": "django\\django\\views\\generic\\base.py",
    "ast_data": "ClassDef name:TemplateResponseMixin Assign Assign Assign Assign FunctionDef name:render_to_response arg:self arg:context arguments arg arg arg Call Return return:yes Call Call FunctionDef name:get_template_names arg:self arguments arg If Compare Raise Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "DefaultSubstitutions",
    "source_code": "class DefaultSubstitutions(SphinxTransform):\n    default_priority = 210\n\n    def apply(self, **kwargs: Any) -> None:\n        to_handle = _DEFAULT_SUBSTITUTIONS - set(self.document.substitution_defs)\n        for ref in self.document.findall(nodes.substitution_reference):\n            if (name := ref['refname']) in to_handle:\n                ref.replace_self(self._handle_default_substitution(name))\n\n    def _handle_default_substitution(self, name: _DEFAULT_SUBSTITUTION_NAMES) -> nodes.Text:\n        if name == 'translation progress':\n            return nodes.Text(_calculate_translation_progress(self.document))\n        if name == 'today':\n            if (text := self.config.today):\n                return nodes.Text(text)\n            today_fmt = self.config.today_fmt or _('%b %d, %Y')\n            return nodes.Text(format_date(today_fmt, language=self.config.language))\n        return nodes.Text(getattr(self.config, name))",
    "docstring": "Replace some substitutions if they aren't defined in the document.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\transforms\\__init__.py",
    "ast_data": "ClassDef name:DefaultSubstitutions Assign FunctionDef name:apply arg:self arguments arg arg Assign Call For Call If Compare Call Call FunctionDef name:_handle_default_substitution arg:self arg:name arguments arg arg If Compare Return return:yes Call Call If Compare If Return return:yes Call Assign BoolOp Call Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "refine_field",
    "source_code": "def refine_field(self, z, triinterpolator=None, subdiv=3):\n    if triinterpolator is None:\n        interp = matplotlib.tri.CubicTriInterpolator(self._triangulation, z)\n    else:\n        _api.check_isinstance(matplotlib.tri.TriInterpolator, triinterpolator=triinterpolator)\n        interp = triinterpolator\n    refi_tri, found_index = self.refine_triangulation(subdiv=subdiv, return_tri_index=True)\n    refi_z = interp._interpolate_multikeys(refi_tri.x, refi_tri.y, tri_index=found_index)[0]\n    return (refi_tri, refi_z)",
    "docstring": "Refine a field defined on the encapsulated triangulation. Parameters ---------- z : (npoints,) array-like Values of the field to refine, defined at the nodes of the encapsulated triangulation. (`~matplotlib.tri.TriInterpolator~matplotlib.tri.CubicTriInterpolator~matplotlib.tri.Triangulation` The returned refined triangulation. refi_z : 1D array of length: *refi_tri* node count. The returned interpolated field (at *refi_tri* nodes).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_trirefine.py",
    "ast_data": "FunctionDef name:refine_field arg:self arg:z arg:triinterpolator arg:subdiv arguments arg arg arg arg If Compare Assign Call Call Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_name_by_module",
    "source_code": "def get_name_by_module(model, module):\n    for name, m in model.named_modules():\n        if m is module:\n            return name\n    raise ValueError('module is not in the model')",
    "docstring": "Get the name of a module within a model. Args: model: a model (nn.module) that equalization is to be applied on module: a module within the model Returns: name: the name of the module within the model",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\_equalize.py",
    "ast_data": "FunctionDef name:get_name_by_module arg:model arg:module arguments arg arg For Call If Compare Return return:yes Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "TestOneInput",
    "source_code": "def TestOneInput(data):\n    fh = FuzzingHelper(data)\n    dtype = fh.get_tf_dtype(allowed_set=[tf.float16, tf.float32, tf.float64])\n    input_tensor = fh.get_random_numeric_tensor(dtype=dtype)\n    _ = tf.raw_ops.Acosh(x=input_tensor)",
    "docstring": "Test randomized fuzzing input for tf.raw_ops.Acosh.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\security\\fuzzing\\acosh_fuzz.py",
    "ast_data": "FunctionDef name:TestOneInput arg:data arguments arg Assign Call Assign Call Assign Call Assign Call"
  },
  {
    "library": "sphinx",
    "name": "toctree",
    "source_code": "class toctree(nodes.General, nodes.Element, translatable):\n\n    def preserve_original_messages(self) -> None:\n        rawentries: list[str] = self.setdefault('rawentries', [])\n        for title, _docname in self['entries']:\n            if title:\n                rawentries.append(title)\n        if self.get('caption'):\n            self['rawcaption'] = self['caption']\n\n    def apply_translated_message(self, original_message: str, translated_message: str) -> None:\n        for i, (title, docname) in enumerate(self['entries']):\n            if title == original_message:\n                self['entries'][i] = (translated_message, docname)\n        if self.get('rawcaption') == original_message:\n            self['caption'] = translated_message\n\n    def extract_original_messages(self) -> list[str]:\n        messages: list[str] = []\n        messages.extend(self.get('rawentries', []))\n        if 'rawcaption' in self:\n            messages.append(self['rawcaption'])\n        return messages",
    "docstring": "Node for inserting a \"TOC tree\".",
    "type": "class",
    "file_path": "sphinx\\sphinx\\addnodes.py",
    "ast_data": "ClassDef name:toctree FunctionDef name:preserve_original_messages arg:self arguments arg Call For If Call If Call Assign FunctionDef name:apply_translated_message arg:self arg:original_message arg:translated_message arguments arg arg arg For Call If Compare Assign If Compare Call Assign FunctionDef name:extract_original_messages arg:self arguments arg Call Call If Compare Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "get_feature_names_out",
    "source_code": "def get_feature_names_out(self, input_features=None):\n    check_is_fitted(self)\n    input_features = _check_feature_names_in(self, input_features)\n    return input_features[self.get_support()]",
    "docstring": "Mask feature names according to selected features. Parameters ---------- input_features : array-like of str or None, default=None Input features. - If is , then is used as feature names in. If is not defined, then the following input feature names are generated: . - If is an array-like, then must match if is defined. Returns ------- feature_names_out : ndarray of str objects Transformed feature names.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_selection\\_base.py",
    "ast_data": "FunctionDef name:get_feature_names_out arg:self arg:input_features arguments arg arg Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_set_axis",
    "source_code": "@final\ndef _set_axis(self, axis: AxisInt, labels: AnyArrayLike | list) -> None:\n    labels = ensure_index(labels)\n    self._mgr.set_axis(axis, labels)",
    "docstring": "This is called from the cython code when we set the attribute directly, e.g. .",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:_set_axis arg:self arg:axis arg:labels arguments arg arg arg Assign Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict_proba",
    "source_code": "def predict_proba(self, X):\n    if self.methods_to_check == 'all' or 'predict_proba' in self.methods_to_check:\n        X, y = self._check_X_y(X)\n    rng = check_random_state(self.random_state)\n    proba = rng.randn(_num_samples(X), len(self.classes_))\n    proba = np.abs(proba, out=proba)\n    proba /= np.sum(proba, axis=1)[:, np.newaxis]\n    return proba",
    "docstring": "Predict probabilities for each class. Here, the dummy classifier will provide a probability of 1 for the first class of and 0 otherwise. Parameters ---------- X : array-like of shape (n_samples, n_features) The input data. Returns ------- proba : ndarray of shape (n_samples, n_classes) The probabilities for each sample and class.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_mocking.py",
    "ast_data": "FunctionDef name:predict_proba arg:self arg:X arguments arg arg If BoolOp Compare Compare Assign Call Assign Call Assign Call Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "codegen_boilerplate",
    "source_code": "def codegen_boilerplate(self, heuristic_name, opt_name, threshold, shared_memory, device_capa, classes):\n    boiler_plate = f\"# flake8: noqa: B950\\n# fmt: off\\n# This file was generated by AutoHeuristic. Do not modify it manually!\\n# To regenerate this file, take a look at the steps in the README.md file inside torchgen/_autoheuristic/{opt_name}/\\nfrom typing import Optional\\n\\nfrom torch._inductor.autoheuristic.autoheuristic_utils import (\\n    AHContext,\\n    AHMetadata,\\n    Choice,\\n)\\nfrom torch._inductor.autoheuristic.learnedheuristic_interface import (\\n    LearnedHeuristicDecision,\\n)\\n\\n\\nclass {heuristic_name}(LearnedHeuristicDecision):\\n\\n    def __init__(self) -> None:\\n        self.choices: list[Choice] = []\\n        self.fill_choices()\\n\\n{self.gen_precondition(opt_name, shared_memory, device_capa)}\\n\\n    def get_confidence_threshold(self) -> float:\\n        return {threshold}\\n\\n    def get_choice(self, idx: int) -> Optional[str]:\\n        if idx < len(self.choices):\\n            return self.choices[idx]\\n        return None\\n\\n    def fill_choices(self) -> None:\\n{self.gen_classes(classes, num_spaces=8)}\\n\\n    def get_name(self) -> str:\\n        return '{opt_name}'\"\n    return boiler_plate",
    "docstring": "Generates the boilerplate code for the generated heuristic. This includes things like imports, class definition, etc.",
    "type": "method",
    "file_path": "pytorch\\torchgen\\_autoheuristic\\train_decision.py",
    "ast_data": "FunctionDef name:codegen_boilerplate arg:self arg:heuristic_name arg:opt_name arg:threshold arg:shared_memory arg:device_capa arg:classes arguments arg arg arg arg arg arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "from_matrix",
    "source_code": "@classmethod\ndef from_matrix(cls, matrix: Tensor) -> Se2:\n    r = So2.from_matrix(matrix[..., :2, :2])\n    t = matrix[..., :2, -1]\n    return cls(r, t)",
    "docstring": "Create an Se2 group from a matrix. Args: matrix: tensor of shape :math:. Example: >>> s = Se2.from_matrix(torch.eye(3).repeat(2, 1, 1)) >>> s.r Parameter containing: tensor([1.+0.j, 1.+0.j], requires_grad=True) >>> s.t Parameter containing: tensor([[0., 0.], [0., 0.]], requires_grad=True)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\se2.py",
    "ast_data": "FunctionDef name:from_matrix arg:cls arg:matrix arguments arg arg Assign Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_CheckAllInputsUsed",
    "source_code": "def _CheckAllInputsUsed(op_type_name, keywords):\n    if keywords:\n        all_keywords = ', '.join(sorted(keywords.keys()))\n        raise TypeError(f'{op_type_name} got unexpected keyword arguments: {all_keywords}.')",
    "docstring": "Ensures all inputs passed into _apply_op_helper were used.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\op_def_library.py",
    "ast_data": "FunctionDef name:_CheckAllInputsUsed arg:op_type_name arg:keywords arguments arg arg If Assign Call Call Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "checkout_nightly_version",
    "source_code": "@timed('Checking out nightly PyTorch')\ndef checkout_nightly_version(branch: str, site_dir: Path) -> None:\n    nightly_version = _nightly_version(site_dir)\n    cmd = git('checkout', '-b', branch, nightly_version)\n    subprocess.check_call(cmd)",
    "docstring": "Get's the nightly version and then checks it out.",
    "type": "function",
    "file_path": "pytorch\\tools\\nightly.py",
    "ast_data": "FunctionDef name:checkout_nightly_version arg:branch arg:site_dir arguments arg arg Assign Call Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_AverageMinMax",
    "source_code": "@_implements(_CalibrationMethod.CALIBRATION_METHOD_AVERAGE_MIN_MAX)\nclass _AverageMinMax(_CalibrationAlgorithmBase):\n\n    def get_min_max_value(self) -> tuple[float, float]:\n        average_min_max_statistics = self._statistics.average_min_max_statistics\n        num_samples = average_min_max_statistics.num_samples\n        if num_samples == 0:\n            raise ValueError(f'num_samples must not be 0 when calibration method is AverageMinMax: {self._calib_opts}')\n        min_value, max_value = (average_min_max_statistics.min_sum / num_samples, average_min_max_statistics.max_sum / num_samples)\n        return (min_value, max_value)",
    "docstring": "AverageMinMaxCalibrationAlgorithm for calculating min and max values of calibration result. AverageMinMax calibration calculates the average of min and max values. average of min = sum of min values / number of samples average of max = sum of max values / number of samples",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\calibrator\\calibration_algorithm.py",
    "ast_data": "ClassDef name:_AverageMinMax FunctionDef name:get_min_max_value arg:self arguments arg Assign Assign If Compare Raise Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, strategy, cluster_spec, task_type, task_id, session_config=None, rpc_layer='grpc', worker_barrier=None):\n    self._strategy = strategy\n    self._cluster_spec = cluster_spec\n    self._task_type = task_type\n    self._task_id = task_id\n    self._session_config = session_config\n    self._worker_barrier = worker_barrier\n    self._rpc_layer = rpc_layer\n    self._master_target = self._get_master_target()\n    self._num_workers = _get_num_workers(cluster_spec)\n    self._is_chief_node = self._is_chief()",
    "docstring": "Initialize the worker context object. Args: strategy: a object. cluster_spec: a ClusterSpec object. It can be empty or None in the local training case. task_type: a string indicating the role of the corresponding task, such as \"worker\" or \"ps\". It can be None if it is local training or in-graph replicated training. task_id: an integer indicating id of the corresponding task. It can be None if it is local training or in-graph replicated training. session_config: an optional object. rpc_layer: optional string specifying the RPC protocol for communication with worker masters. If None or empty, hosts in the will be used directly. worker_barrier: optional, the barrier object for worker synchronization.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distribute_coordinator_utils.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:strategy arg:cluster_spec arg:task_type arg:task_id arg:session_config arg:rpc_layer arg:worker_barrier arguments arg arg arg arg arg arg arg arg Assign Assign Assign Assign Assign Assign Assign Assign Call Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "trace_next_step",
    "source_code": "def trace_next_step(self):\n    if not self._enabled:\n        return\n    self._trace_next_step = True\n    self._slow_path_steps.add(self._step)",
    "docstring": "Enables tracing and adds traces to profiler at next step.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\profile_context.py",
    "ast_data": "FunctionDef name:trace_next_step arg:self arguments arg If Return return:no Assign Call"
  },
  {
    "library": "pytorch",
    "name": "DTypeWithConstraints",
    "source_code": "@dataclass\nclass DTypeWithConstraints:\n    dtype: Optional[torch.dtype] = None\n    quant_min_lower_bound: Union[int, float, None] = None\n    quant_max_upper_bound: Union[int, float, None] = None\n    scale_min_lower_bound: Union[int, float, None] = None\n    scale_max_upper_bound: Union[int, float, None] = None\n    scale_exact_match: Optional[float] = None\n    zero_point_exact_match: Optional[int] = None",
    "docstring": "Config for specifying additional constraints for a given dtype, such as quantization value ranges, scale value ranges, and fixed quantization params, to be used in :class:. The constraints currently supported are: * and : Lower and upper bounds for the minimum and maximum quantized values respectively. If the QConfig's and fall outside this range, then the QConfig will be ignored. * and : Lower and upper bounds for the minimum and maximum scale values respectively. If the QConfig's minimum scale value (currently exposed as ) falls below the lower bound, then the QConfig will be ignored. Note that the upper bound is currently not enforced. * and : Exact match requirements for scale and zero point, to be used for operators with fixed quantization parameters such as sigmoid and tanh. If the observer specified in the QConfig is neither nor , or if the quantization parameters don't match, then the QConfig will be ignored.",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\backend_config.py",
    "ast_data": "ClassDef name:DTypeWithConstraints"
  },
  {
    "library": "pandas",
    "name": "column_setitem",
    "source_code": "def column_setitem(self, loc: int, idx: int | slice | np.ndarray, value, inplace_only: bool=False) -> None:\n    if not self._has_no_reference(loc):\n        blkno = self.blknos[loc]\n        blk_loc = self.blklocs[loc]\n        values = self.blocks[blkno].values\n        if values.ndim == 1:\n            values = values.copy()\n        else:\n            values = values[[blk_loc]]\n        self._iset_split_block(blkno, [blk_loc], values)\n    col_mgr = self.iget(loc, track_ref=False)\n    if inplace_only:\n        col_mgr.setitem_inplace(idx, value)\n    else:\n        new_mgr = col_mgr.setitem((idx,), value)\n        self.iset(loc, new_mgr._block.values, inplace=True)",
    "docstring": "Set values (\"setitem\") into a single column (not setting the full column). This is a method on the BlockManager level, to avoid creating an intermediate Series at the DataFrame level ()",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:column_setitem arg:self arg:loc arg:idx arg:value arg:inplace_only arguments arg arg arg arg arg If Call Assign Assign Assign If Compare Assign Call Assign Call Assign Call If Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_compute_pluggable_device_options",
    "source_code": "def _compute_pluggable_device_options(self):\n    return self._compute_device_options(device_type='PluggableDevice')",
    "docstring": "Build the GPUOptions proto for PluggableDevice.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:_compute_pluggable_device_options arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "HatchPatternBase",
    "source_code": "class HatchPatternBase:\n    pass",
    "docstring": "The base class for a hatch pattern.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\hatch.py",
    "ast_data": "ClassDef name:HatchPatternBase"
  },
  {
    "library": "matplotlib",
    "name": "set_tight_layout",
    "source_code": "@_api.deprecated('3.6', alternative='set_layout_engine', pending=True)\ndef set_tight_layout(self, tight):\n    tight = mpl._val_or_rc(tight, 'figure.autolayout')\n    _tight = 'tight' if bool(tight) else 'none'\n    _tight_parameters = tight if isinstance(tight, dict) else {}\n    self.set_layout_engine(_tight, **_tight_parameters)\n    self.stale = True",
    "docstring": "Set whether and how is called when drawing. Parameters ---------- tight : bool or dict with keys \"pad\", \"w_pad\", \"h_pad\", \"rect\" or None If a bool, sets whether to call upon drawing. If `figure.autolayout.Figure.tight_layout`, overriding the default paddings.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:set_tight_layout arg:self arg:tight arguments arg arg Assign Call Assign Call Assign Call Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "compute_required_storage_length",
    "source_code": "def compute_required_storage_length(shape: ShapeType, strides: StrideType, storage_offset: int) -> int:\n    from torch.fx.experimental.symbolic_shapes import guard_size_oblivious\n    if guard_size_oblivious(reduce(operator.mul, shape, 1) == 0):\n        return 0\n    max_offset = sum(((x - 1) * y for x, y in zip(shape, strides)))\n    return 1 + storage_offset + max_offset",
    "docstring": "Computes the minimum storage size to hold the given tensor geometry. Example ======= This is the size of a newly allocated tensor's storage, in units of elements >>> t = torch.empty((10, 20)) >>> compute_required_storage_length(t.shape, t.stride(), t.storage_offset()) 200 >>> # xdoctest: +SKIP(failing) >>> t2 = torch.empty_strided((1, 2, 3), (5, 7, 11)) >>> size = compute_required_storage_length(t2.shape, t2.stride(), t2.storage_offset()) >>> size == t.storage().size() True A valid tensor may have a larger storage size, but never smaller >>> slice = torch.empty(100)[20:40] >>> slice.storage().size() 100 >>> compute_required_storage_length(slice.shape, slice.stride(), slice.storage_offset()) 40",
    "type": "function",
    "file_path": "pytorch\\torch\\_prims_common\\__init__.py",
    "ast_data": "FunctionDef name:compute_required_storage_length arg:shape arg:strides arg:storage_offset arguments arg arg arg If Call Compare Call Return return:yes Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "PredictOutput",
    "source_code": "class PredictOutput(ExportOutput):\n    _SINGLE_OUTPUT_DEFAULT_NAME = 'output'\n\n    def __init__(self, outputs):\n        self._outputs = self._wrap_and_check_outputs(outputs, self._SINGLE_OUTPUT_DEFAULT_NAME, error_label='Prediction')\n\n    @property\n    def outputs(self):\n        return self._outputs\n\n    def as_signature_def(self, receiver_tensors):\n        return signature_def_utils.predict_signature_def(receiver_tensors, self.outputs)",
    "docstring": "Represents the output of a generic prediction head. A generic prediction need not be either a classification or a regression. Named outputs must be provided as a dict from string to ,",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\model_utils\\export_output.py",
    "ast_data": "ClassDef name:PredictOutput Assign FunctionDef name:__init__ arg:self arg:outputs arguments arg arg Assign Call FunctionDef name:outputs arg:self arguments arg Return return:yes FunctionDef name:as_signature_def arg:self arg:receiver_tensors arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_prod",
    "source_code": "def _prod(xs: Iterable[int]) -> int:\n    prod = 1\n    for x in xs:\n        prod *= x\n    return prod",
    "docstring": "Compute product of a list",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\fft.py",
    "ast_data": "FunctionDef name:_prod arg:xs arguments arg Assign For Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit_transform",
    "source_code": "def fit_transform(self, X, y=None, *, normalize=True):\n    return self.fit(X, y).transform(X, normalize=normalize)",
    "docstring": "Fit to data, then transform it. Fits transformer to and and returns a transformed version of . Parameters ---------- X : array-like of shape (n_samples, n_features) Input samples. y : array-like of shape (n_samples,) or (n_samples, n_outputs), default=None Target values (None for unsupervised transformations). normalize : bool, default=True Whether to normalize the document topic distribution in . Returns ------- X_new : ndarray array of shape (n_samples, n_components) Transformed array.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_lda.py",
    "ast_data": "FunctionDef name:fit_transform arg:self arg:X arg:y arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "reduce_all",
    "source_code": "def reduce_all(input_tensor, axis=None, keepdims=False):\n    v = get_static_value(input_tensor)\n    if v is None:\n        return math_ops.reduce_all(input_tensor, axis=axis, keepdims=keepdims)\n    else:\n        return v.all(axis=axis, keepdims=keepdims)",
    "docstring": "A version of tf.reduce_all that eagerly evaluates if possible.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_utils.py",
    "ast_data": "FunctionDef name:reduce_all arg:input_tensor arg:axis arg:keepdims arguments arg arg arg Assign Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "read_execution_event",
    "source_code": "def read_execution_event(self, offset):\n    with self._reader_read_locks[self._execution_path]:\n        proto_string = self._get_reader(self._execution_path).read(offset)[0]\n    return debug_event_pb2.DebugEvent.FromString(proto_string)",
    "docstring": "Read a DebugEvent proto at a given offset from the .execution file. Args: offset: Offset to read the DebugEvent proto from. Returns: A DebugEventProto. Raises: if offset is at a wrong location. if offset is out of range of the file.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py",
    "ast_data": "FunctionDef name:read_execution_event arg:self arg:offset arguments arg arg With Assign Call Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "set_laf_orientation",
    "source_code": "def set_laf_orientation(LAF: Tensor, angles_degrees: Tensor) -> Tensor:\n    KORNIA_CHECK_LAF(LAF)\n    B, N = LAF.shape[:2]\n    ori = get_laf_orientation(LAF).reshape_as(angles_degrees)\n    return rotate_laf(LAF, angles_degrees - ori)",
    "docstring": "Change the orientation of the LAFs. Args: LAF: :math: angles_degrees: :math: in degrees. Returns: LAF oriented with angles :math:",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\laf.py",
    "ast_data": "FunctionDef name:set_laf_orientation arg:LAF arg:angles_degrees arguments arg arg Call Assign Assign Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "bench_scikit_tree_regressor",
    "source_code": "def bench_scikit_tree_regressor(X, Y):\n    from sklearn.tree import DecisionTreeRegressor\n    gc.collect()\n    tstart = datetime.now()\n    clf = DecisionTreeRegressor()\n    clf.fit(X, Y).predict(X)\n    delta = datetime.now() - tstart\n    scikit_regressor_results.append(delta.seconds + delta.microseconds / mu_second)",
    "docstring": "Benchmark with scikit-learn decision tree regressor",
    "type": "function",
    "file_path": "scikit-learn\\benchmarks\\bench_tree.py",
    "ast_data": "FunctionDef name:bench_scikit_tree_regressor arg:X arg:Y arguments arg arg Call Assign Call Assign Call Call Call Assign Call Call"
  },
  {
    "library": "pandas",
    "name": "_format_attrs",
    "source_code": "def _format_attrs(self) -> list[tuple[str_t, str_t | int | bool | None]]:\n    attrs: list[tuple[str_t, str_t | int | bool | None]] = []\n    if not self._is_multi:\n        attrs.append(('dtype', f\"'{self.dtype}'\"))\n    if self.name is not None:\n        attrs.append(('name', default_pprint(self.name)))\n    elif self._is_multi and any((x is not None for x in self.names)):\n        attrs.append(('names', default_pprint(self.names)))\n    max_seq_items = get_option('display.max_seq_items') or len(self)\n    if len(self) > max_seq_items:\n        attrs.append(('length', len(self)))\n    return attrs",
    "docstring": "Return a list of tuples of the (attr,formatted_value).",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_format_attrs arg:self arguments arg If Call If Compare Call Call If BoolOp Call Compare Call Call Assign BoolOp Call Call If Compare Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "tile",
    "source_code": "def tile(tensor, tile_assignment, assign_tuple_sharding=False, use_sharding_op=False, unspecified_dims=None):\n    return Sharding.tile(tile_assignment).apply_to_tensor(tensor, assign_tuple_sharding=assign_tuple_sharding, use_sharding_op=use_sharding_op, unspecified_dims=unspecified_dims or [])",
    "docstring": "Returns a tensor that has tiled sharding. Args: tensor: A tf.Tensor to shard. tile_assignment: An np.ndarray describing the topology of the tiling and which device will compute which part of the topology. assign_tuple_sharding: If the sharding type should be a tuple. use_sharding_op: If true, adds a sharding op to set the sharding. unspecified_dims: An optional list of dimensions unspecified.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\xla\\experimental\\xla_sharding.py",
    "ast_data": "FunctionDef name:tile arg:tensor arg:tile_assignment arg:assign_tuple_sharding arg:use_sharding_op arg:unspecified_dims arguments arg arg arg arg arg Return return:yes Call Call BoolOp"
  },
  {
    "library": "pytorch",
    "name": "_str_at_line",
    "source_code": "def _str_at_line(self, line: int) -> str:\n    return self._graph_segment_str_at_line(line) + self._connector_segment_str_at_line(line) + self._children_str_at_line(line)",
    "docstring": "Get the string representation of the graph at the given line.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\verification.py",
    "ast_data": "FunctionDef name:_str_at_line arg:self arg:line arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_offsets",
    "source_code": "def set_offsets(self, xy):\n    self.x = xy[:, 0]\n    self.y = xy[:, 1]\n    x, y, u, v = cbook.delete_masked_points(self.x.ravel(), self.y.ravel(), self.u, self.v)\n    _check_consistent_shapes(x, y, u, v)\n    xy = np.column_stack((x, y))\n    super().set_offsets(xy)\n    self.stale = True",
    "docstring": "Set the offsets for the barb polygons. This saves the offsets passed in and masks them as appropriate for the existing U/V data. Parameters ---------- xy : sequence of pairs of floats",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\quiver.py",
    "ast_data": "FunctionDef name:set_offsets arg:self arg:xy arguments arg arg Assign Assign Assign Call Call Call Call Assign Call Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "_TargetExprVarArgs",
    "source_code": "class _TargetExprVarArgs(_TargetExpr):\n\n    def _match(self, node: torch.fx.Node, ctx: MatchContext) -> MatchResult:\n        if not self._match_fns(node):\n            return FailedMatch('function_mismatch')\n        if not self._match_users(node, ctx):\n            return FailedMatch('multiple_users')\n        m = Match(ctx, self)\n        m.nodes.append(node)\n        m.targets[self] = node.target\n        m.args.extend(node.args)\n        m.kwargs.update(node.kwargs)\n        return m",
    "docstring": "Matches a call_function node with any arguments which are passed into the pattern",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\pattern_matcher.py",
    "ast_data": "ClassDef name:_TargetExprVarArgs FunctionDef name:_match arg:self arg:node arg:ctx arguments arg arg arg If Call Return return:yes Call If Call Return return:yes Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_call_wrapped_cell",
    "source_code": "def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs):\n    outputs, new_state = cell_call_fn(inputs, state, **kwargs)\n\n    def assert_shape_match(inp, out):\n        inp.get_shape().assert_is_compatible_with(out.get_shape())\n\n    def default_residual_fn(inputs, outputs):\n        nest.assert_same_structure(inputs, outputs)\n        nest.map_structure(assert_shape_match, inputs, outputs)\n        return nest.map_structure(lambda inp, out: inp + out, inputs, outputs)\n    res_outputs = (self._residual_fn or default_residual_fn)(inputs, outputs)\n    return (res_outputs, new_state)",
    "docstring": "Run the cell and then apply the residual_fn on its inputs to its outputs. Args: inputs: cell inputs. state: cell state. cell_call_fn: Wrapped cell's method to use for step computation (cell's or 'call' method). **kwargs: Additional arguments passed to the wrapped cell's . Returns: Tuple of cell outputs and new state. Raises: TypeError: If cell inputs and outputs have different structure (type). ValueError: If cell inputs and outputs have different structure (value).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\legacy_rnn\\rnn_cell_wrapper_impl.py",
    "ast_data": "FunctionDef name:_call_wrapped_cell arg:self arg:inputs arg:state arg:cell_call_fn arguments arg arg arg arg arg Assign Call FunctionDef name:assert_shape_match arg:inp arg:out arguments arg arg Call Call Call FunctionDef name:default_residual_fn arg:inputs arg:outputs arguments arg arg Call Call Return return:yes Call arguments arg arg Assign Call BoolOp Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_pane_color",
    "source_code": "def set_pane_color(self, color, alpha=None):\n    color = mcolors.to_rgba(color, alpha)\n    self._axinfo['color'] = color\n    self.pane.set_edgecolor(color)\n    self.pane.set_facecolor(color)\n    self.pane.set_alpha(color[-1])\n    self.stale = True",
    "docstring": "Set pane color. Parameters ---------- color : :mpltype: Color for axis pane. alpha : float, optional Alpha value for axis pane. If None, base it on *color*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axis3d.py",
    "ast_data": "FunctionDef name:set_pane_color arg:self arg:color arg:alpha arguments arg arg arg Assign Call Assign Call Call Call Assign"
  },
  {
    "library": "authlib",
    "name": "create_token_response",
    "source_code": "@hooked\ndef create_token_response(self):\n    refresh_token = self.request.refresh_token\n    user = self.authenticate_user(refresh_token)\n    if not user:\n        raise InvalidRequestError(\"There is no 'user' for this token.\")\n    client = self.request.client\n    token = self.issue_token(user, refresh_token)\n    log.debug('Issue token %r to %r', token, client)\n    self.request.user = user\n    self.save_token(token)\n    self.revoke_old_credential(refresh_token)\n    return (200, token, self.TOKEN_RESPONSE_HEADER)",
    "docstring": "If valid and authorized, the authorization server issues an access token as described in Section 5.1. If the request failed verification or is invalid, the authorization server returns an error response as described in Section 5.2.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\grants\\refresh_token.py",
    "ast_data": "FunctionDef name:create_token_response arg:self arguments arg Assign Assign Call If Raise Call Assign Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_extract_graph_module_outputs",
    "source_code": "def _extract_graph_module_outputs(graph_module: torch.fx.GraphModule) -> Any:\n    for node in graph_module.graph.nodes:\n        if node.op == 'output':\n            return node.args[0]\n    raise ValueError('No output node found in this torch.fx.GraphModule.')",
    "docstring": "Collect \"val\" fields from outputs metadata in this torch.fx.GraphModule.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\onnxruntime.py",
    "ast_data": "FunctionDef name:_extract_graph_module_outputs arg:graph_module arguments arg For If Compare Return return:yes Raise Call"
  },
  {
    "library": "cherrypy",
    "name": "check_app_config_brackets",
    "source_code": "def check_app_config_brackets(self):\n    for sn, app in cherrypy.tree.apps.items():\n        if not isinstance(app, cherrypy.Application):\n            continue\n        if not app.config:\n            continue\n        for key in app.config.keys():\n            if key.startswith('[') or key.endswith(']'):\n                warnings.warn('The application mounted at %r has config section names with extraneous brackets: %r. Config *files* need brackets; config *dicts* (e.g. passed to tree.mount) do not.' % (sn, key))",
    "docstring": "Check for App config with extraneous brackets in section names.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpchecker.py",
    "ast_data": "FunctionDef name:check_app_config_brackets arg:self arguments arg For Call If Call If For Call If BoolOp Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "logits",
    "source_code": "@property\ndef logits(self):\n    return self._logits",
    "docstring": "Log-odds of a outcome (vs ).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bernoulli.py",
    "ast_data": "FunctionDef name:logits arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "get_prep_value",
    "source_code": "def get_prep_value(self, value):\n    if isinstance(value, Promise):\n        value = value._proxy____cast()\n    return value",
    "docstring": "Perform preliminary non-db specific value checks and conversions.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\__init__.py",
    "ast_data": "FunctionDef name:get_prep_value arg:self arg:value arguments arg arg If Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "apply",
    "source_code": "def apply(self, model_outputs: Any, model: torch.nn.Module | Callable | torch_export.ExportedProgram | None=None) -> Sequence[Any]:\n    flattened_outputs, spec = pytree.tree_flatten(model_outputs)\n    if self._spec is None:\n        self._spec = spec\n    else:\n        _assert_identical_pytree_spec(self._spec, spec, error_message='Model outputs incompatible with the format that was exported. ')\n    return flattened_outputs",
    "docstring": "Flatten the model outputs and validate the output. Args: model_outputs: The model outputs to flatten. model: The PyTorch model. Returns: flattened_outputs: The flattened model outputs. Raises: ValueError: If the output produced from the current is not identical to the output produced from the first that was passed to this method.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\io_adapter.py",
    "ast_data": "FunctionDef name:apply arg:self arg:model_outputs arg:model arguments arg arg arg Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_default_custom_config_dict",
    "source_code": "def get_default_custom_config_dict():\n    return _DEFAULT_CUSTOM_CONFIG_DICT",
    "docstring": "Defines the default custom config dict.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantize.py",
    "ast_data": "FunctionDef name:get_default_custom_config_dict arguments Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "hyperparameter_length_scale",
    "source_code": "@property\ndef hyperparameter_length_scale(self):\n    return Hyperparameter('length_scale', 'numeric', self.length_scale_bounds)",
    "docstring": "Returns the length scale",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:hyperparameter_length_scale arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_apply_to_tensors",
    "source_code": "def _apply_to_tensors(fn, container):\n\n    def apply(x):\n        from torch.nn.parallel.scatter_gather import _is_namedtuple\n        if isinstance(x, torch.Tensor):\n            return fn(x)\n        elif hasattr(x, '__dataclass_fields__'):\n            dc = dataclasses.replace(x)\n            changes = {f.name: apply(getattr(dc, f.name)) for f in dataclasses.fields(dc)}\n            return dataclasses.replace(dc, **changes)\n        elif isinstance(x, OrderedDict):\n            od = x.__class__()\n            for key, value in x.items():\n                od[key] = apply(value)\n            return od\n        elif isinstance(x, PackedSequence):\n            apply(x.data)\n            return x\n        elif isinstance(x, dict):\n            return {key: apply(value) for key, value in x.items()}\n        elif _is_namedtuple(x):\n            res = (apply(el) for el in x)\n            return type(x)(*res)\n        elif isinstance(x, (list, tuple, set)):\n            return type(x)((apply(el) for el in x))\n        else:\n            return x\n    return apply(container)",
    "docstring": "Recursively apply to all tensor in different kinds of container types.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\utils.py",
    "ast_data": "FunctionDef name:_apply_to_tensors arg:fn arg:container arguments arg arg FunctionDef name:apply arg:x arguments arg If Call Return return:yes Call If Call Assign Call Assign Call Call Call Return return:yes Call If Call Assign Call For Call Assign Call Return return:yes If Call Call Return return:yes If Call Return return:yes Call Call If Call Assign Call Return return:yes Call Call If Call Return return:yes Call Call Call Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_non_chief_checkpoint_dir",
    "source_code": "def _non_chief_checkpoint_dir(checkpoint_dir, task_id):\n    dirpath = os.path.dirname(checkpoint_dir)\n    base = os.path.basename(checkpoint_dir)\n    base_dirpath = 'workertemp_' + str(task_id)\n    dirpath = os.path.join(dirpath, base_dirpath)\n    file_io.recursive_create_dir_v2(dirpath)\n    return os.path.join(dirpath, base)",
    "docstring": "Returns a directory for non-chief worker to save checkpoint.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\failure_handling\\failure_handling.py",
    "ast_data": "FunctionDef name:_non_chief_checkpoint_dir arg:checkpoint_dir arg:task_id arguments arg arg Assign Call Assign Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "__repr__",
    "source_code": "def __repr__(self):\n    cls = self.__class__\n    return '%s.%s(callback=%r, failsafe=%r, priority=%r, %s)' % (cls.__module__, cls.__name__, self.callback, self.failsafe, self.priority, ', '.join(['%s=%r' % (k, v) for k, v in self.kwargs.items()]))",
    "docstring": "Render a string representation of :class: instance.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cprequest.py",
    "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Assign Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_count",
    "source_code": "def _count(self, X, Y):\n    check_non_negative(X, 'MultinomialNB (input X)')\n    self.feature_count_ += safe_sparse_dot(Y.T, X)\n    self.class_count_ += Y.sum(axis=0)",
    "docstring": "Count and smooth feature occurrences.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\naive_bayes.py",
    "ast_data": "FunctionDef name:_count arg:self arg:X arg:Y arguments arg arg arg Call Call Call"
  },
  {
    "library": "scipy",
    "name": "confidence_interval",
    "source_code": "def confidence_interval(self, confidence_level=0.95):\n    low, high = _t_confidence_interval(self.df, self._statistic_np, confidence_level, self._alternative, self._dtype, self._xp)\n    low = low * self._standard_error + self._estimate\n    high = high * self._standard_error + self._estimate\n    return ConfidenceInterval(low=low, high=high)",
    "docstring": "Parameters ---------- confidence_level : float The confidence level for the calculation of the population mean confidence interval. Default is 0.95. Returns ------- ci : namedtuple The confidence interval is returned in a `lowhigh`.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_stats_py.py",
    "ast_data": "FunctionDef name:confidence_interval arg:self arg:confidence_level arguments arg arg Assign Call Assign Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "make_bytes",
    "source_code": "def make_bytes(self, value):\n    if isinstance(value, (bytes, memoryview)):\n        return bytes(value)\n    if isinstance(value, str):\n        return bytes(value.encode(self.charset))\n    return str(value).encode(self.charset)",
    "docstring": "Turn a value into a bytestring encoded in the output charset.",
    "type": "method",
    "file_path": "django\\django\\http\\response.py",
    "ast_data": "FunctionDef name:make_bytes arg:self arg:value arguments arg arg If Call Return return:yes Call If Call Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "from_reference",
    "source_code": "@classmethod\ndef from_reference(cls, ref_qlinear):\n    qlinear = cls(ref_qlinear.in_features, ref_qlinear.out_features, dtype=ref_qlinear.weight_dtype)\n    qweight = ref_qlinear.get_quantized_weight()\n    bias = ref_qlinear.bias\n    qlinear.set_weight_bias(qweight, bias)\n    return qlinear",
    "docstring": "Create a (fbgemm/qnnpack) dynamic quantized module from a reference quantized module Args: ref_qlinear (Module): a reference quantized module, either produced by torch.ao.quantization functions or provided by the user",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\dynamic\\modules\\linear.py",
    "ast_data": "FunctionDef name:from_reference arg:cls arg:ref_qlinear arguments arg arg Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__exit__",
    "source_code": "def __exit__(self, exc_type, exc_val, exc_tb):\n    while self.patches_made:\n        self.patches_made.pop().revert()\n    self.visited.clear()",
    "docstring": "Undo all the changes made via self.patch() and self.patch_method()",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\_symbolic_trace.py",
    "ast_data": "FunctionDef name:__exit__ arg:self arg:exc_type arg:exc_val arg:exc_tb arguments arg arg arg arg While Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "dot",
    "source_code": "def dot(self, V):\n    assert V.shape == (self.m,)\n    return np.bincount(self.rows, weights=self.vals * V[self.cols], minlength=self.m)",
    "docstring": "Dot product of self by a vector *V* in sparse-dense to dense format *V* dense vector of shape (self.m,).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triinterpolate.py",
    "ast_data": "FunctionDef name:dot arg:self arg:V arguments arg arg Compare Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "tensor_proto",
    "source_code": "def tensor_proto(tag, tensor):\n    if tensor.numel() * tensor.itemsize >= 1 << 31:\n        raise ValueError(\"tensor is bigger than protocol buffer's hard limit of 2GB in size\")\n    if tensor.dtype in _TENSOR_TYPE_MAP:\n        dtype, field_name, conversion_fn = _TENSOR_TYPE_MAP[tensor.dtype]\n        tensor_proto = TensorProto(**{'dtype': dtype, 'tensor_shape': TensorShapeProto(dim=[TensorShapeProto.Dim(size=x) for x in tensor.shape]), field_name: conversion_fn(tensor)})\n    else:\n        raise ValueError(f'{tag} has unsupported tensor dtype {tensor.dtype}')\n    plugin_data = SummaryMetadata.PluginData(plugin_name='tensor')\n    smd = SummaryMetadata(plugin_data=plugin_data)\n    return Summary(value=[Summary.Value(tag=tag, metadata=smd, tensor=tensor_proto)])",
    "docstring": "Outputs a protocol buffer containing the full tensor. The generated Summary has a Tensor.proto containing the input Tensor. Args: tag: A name for the generated node. Will also serve as the series name in TensorBoard. tensor: Tensor to be converted to protobuf Returns: A tensor protobuf in a protobuf. Raises: ValueError: If tensor is too big to be converted to protobuf, or tensor data type is not supported",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\tensorboard\\summary.py",
    "ast_data": "FunctionDef name:tensor_proto arg:tag arg:tensor arguments arg arg If Compare Call Raise Call If Compare Assign Assign Call Call Call Call Raise Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "group",
    "source_code": "@doc_controls.do_not_doc_inheritable\ndef group(self, value, name=None):\n    return self._extended._group(value, name)",
    "docstring": "Shortcut for .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:group arg:self arg:value arg:name arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "BlockingAsyncStager",
    "source_code": "class BlockingAsyncStager(AsyncStager):\n    _synchronize_after_execute: bool = False\n\n    def __init__(self, cache_staged_state_dict: bool=False, type_check: bool=False):\n        self.cache_staged_state_dict = cache_staged_state_dict\n        self.type_check = type_check\n        self.state_dict_cache: Optional[STATE_DICT_TYPE] = None\n\n    def stage(self, state_dict: STATE_DICT_TYPE) -> STATE_DICT_TYPE:\n        if not self.cache_staged_state_dict:\n            staged_state_dict = _create_cpu_state_dict(state_dict)\n            _copy_state_dict(state_dict, staged_state_dict, type_check=self.type_check)\n            return staged_state_dict\n        if self.state_dict_cache is None:\n            self.state_dict_cache = _create_cpu_state_dict(state_dict, pin_memory=True)\n        return _copy_state_dict(state_dict, self.state_dict_cache)\n\n    def synchronize_staging(self) -> None:\n        pass",
    "docstring": "An implementation of AsyncStager which stages the state_dict on CPU RAM and blocks until the copy is complete. This implementation also provides an option to optimize stage latency using pinned memory. N.B. synchronize_staging is a no-op in this case.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\staging.py",
    "ast_data": "ClassDef name:BlockingAsyncStager FunctionDef name:__init__ arg:self arg:cache_staged_state_dict arg:type_check arguments arg arg arg Assign Assign FunctionDef name:stage arg:self arg:state_dict arguments arg arg If Assign Call Call Return return:yes If Compare Assign Call Return return:yes Call FunctionDef name:synchronize_staging arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "_create_grad_recv_info",
    "source_code": "def _create_grad_recv_info(self, act_send_info: dict) -> tuple[_RecvInfo, ...]:\n    grad_recv_info: dict[int, _RecvInfo] = {}\n    output_node = self._get_output_node()\n    output_vals = flatten_args(output_node.args)\n    for out_idx, dst_list in act_send_info.items():\n        if not dst_list:\n            continue\n        output = output_vals[out_idx]\n        example_value = output.meta['val']\n        logger.debug(f'{self.log_prefix} Creating grad recv buffer for output {output.name} : {example_value.shape}, {example_value.dtype}')\n        assert len(dst_list) == 1, 'Backward of skip connections not supported yet'\n        grad_src = dst_list[0]\n        grad_recv_info[out_idx] = _RecvInfo(f'{grad_src}', grad_src, _make_tensor_from_meta(example_value, self.device))\n    grad_recv_info_tuple = tuple(grad_recv_info.values())\n    logger.debug('%s Grad recv info: %s', self.log_prefix, grad_recv_info_tuple)\n    return grad_recv_info_tuple",
    "docstring": "Create a tuple of for gradients.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py",
    "ast_data": "FunctionDef name:_create_grad_recv_info arg:self arg:act_send_info arguments arg arg Assign Call Assign Call For Call If Assign Assign Call Compare Call Assign Assign Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_pad_h",
    "source_code": "def _pad_h(h, up):\n    h_padlen = len(h) + -len(h) % up\n    h_full = np.zeros(h_padlen, h.dtype)\n    h_full[:len(h)] = h\n    h_full = h_full.reshape(-1, up).T[:, ::-1].ravel()\n    return h_full",
    "docstring": "Store coefficients in a transposed, flipped arrangement. For example, suppose upRate is 3, and the input number of coefficients is 10, represented as h[0], ..., h[9]. Then the internal buffer will look like this:: h[9], h[6], h[3], h[0], // flipped phase 0 coefs 0, h[7], h[4], h[1], // flipped phase 1 coefs (zero-padded) 0, h[8], h[5], h[2], // flipped phase 2 coefs (zero-padded)",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_upfirdn.py",
    "ast_data": "FunctionDef name:_pad_h arg:h arg:up arguments arg arg Assign Call Call Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "eval_condition",
    "source_code": "def eval_condition(self, condition: str) -> bool:\n    if condition in self._condition_cache:\n        return self._condition_cache[condition]\n    parser = BooleanParser(_ENV, condition, state='variable')\n    expr = parser.parse_expression()\n    if not parser.stream.eos:\n        msg = 'chunk after expression'\n        raise ValueError(msg)\n    evaluated = self._condition_cache[condition] = self._eval_node(expr)\n    return evaluated",
    "docstring": "Evaluate a boolean condition. Only conditional expressions and binary operators (and, or, not) are permitted, and operate on tag names, where truthy values mean the tag is present and vice versa.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\util\\tags.py",
    "ast_data": "FunctionDef name:eval_condition arg:self arg:condition arguments arg arg If Compare Return return:yes Assign Call Assign Call If Assign Raise Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "max_distance_rectangle",
    "source_code": "def max_distance_rectangle(self, other, p=2.0):\n    return minkowski_distance(0, np.maximum(self.maxes - other.mins, other.maxes - self.mins), p)",
    "docstring": "Compute the maximum distance between points in the two hyperrectangles. Parameters ---------- other : hyperrectangle Input. p : float, optional Input.",
    "type": "method",
    "file_path": "scipy\\scipy\\spatial\\_kdtree.py",
    "ast_data": "FunctionDef name:max_distance_rectangle arg:self arg:other arg:p arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_text",
    "source_code": "def get_text(self):\n    return self._text",
    "docstring": "Return the cell instance.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\table.py",
    "ast_data": "FunctionDef name:get_text arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "aug_body",
    "source_code": "def aug_body():\n    nonlocal has_next\n    opt_iterate = iter_.get_next_as_optional()\n    has_next = opt_iterate.has_value()\n    loop_vars = aug_get_state()\n\n    def main_path():\n        body(opt_iterate.get_value())\n        new_loop_vars = aug_get_state()\n        verify_tf_loop_vars(init_vars, loop_vars, new_loop_vars, symbol_names, opts)\n        return new_loop_vars\n\n    def noop_path():\n        return loop_vars\n    aug_set_state(tf_cond.cond(has_next, main_path, noop_path))",
    "docstring": "Main body passed to _tf_while_stmt.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\control_flow.py",
    "ast_data": "FunctionDef name:aug_body arguments Assign Call Assign Call Assign Call FunctionDef name:main_path arguments Call Call Assign Call Call Return return:yes FunctionDef name:noop_path arguments Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_remove_whitespace",
    "source_code": "def _remove_whitespace(s: str, regex: Pattern=_RE_WHITESPACE) -> str:\n    return regex.sub(' ', s.strip())",
    "docstring": "Replace extra whitespace inside of a string with a single space. Parameters ---------- s : str or unicode The string from which to remove extra whitespace. regex : re.Pattern The regular expression to use to remove extra whitespace. Returns ------- subd : str or unicode with all extra whitespace replaced with a single space.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\html.py",
    "ast_data": "FunctionDef name:_remove_whitespace arg:s arg:regex arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_unflatten_dense_tensors",
    "source_code": "def _unflatten_dense_tensors(flat, tensors):\n    return torch._C._nn.unflatten_dense_tensors(flat, tensors)",
    "docstring": "View a flat buffer using the sizes of tensors. Assume that tensors are of same dense type, and that flat is given by _flatten_dense_tensors. Args: flat (Tensor): flattened dense tensors to unflatten. tensors (Iterable[Tensor]): dense tensors whose sizes will be used to unflatten flat. Returns: Unflattened dense tensors with sizes same as tensors and values from flat.",
    "type": "function",
    "file_path": "pytorch\\torch\\_utils.py",
    "ast_data": "FunctionDef name:_unflatten_dense_tensors arg:flat arg:tensors arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "post_unshard",
    "source_code": "def post_unshard(self):\n    if self._uses_param_mixed_precision and self.uses_sharded_strategy:\n        self._free_low_precision_sharded_param()\n    self._check_on_compute_device(self.flat_param)",
    "docstring": "Run the post-unshard logic. This includes freeing the low precision shard if needed.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py",
    "ast_data": "FunctionDef name:post_unshard arg:self arguments arg If BoolOp Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_angles",
    "source_code": "def set_angles(self, angles):\n    self._angles = np.deg2rad(angles).ravel()\n    self.stale = True",
    "docstring": "Set the angles of the first axes, degrees CCW from the x-axis.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:set_angles arg:self arg:angles arguments arg arg Assign Call Call Assign"
  },
  {
    "library": "pandas",
    "name": "_get_period_range_edges",
    "source_code": "def _get_period_range_edges(first: Period, last: Period, freq: BaseOffset, closed: Literal['right', 'left']='left', origin: TimeGrouperOrigin='start_day', offset: Timedelta | None=None) -> tuple[Period, Period]:\n    if not all((isinstance(obj, Period) for obj in [first, last])):\n        raise TypeError(\"'first' and 'last' must be instances of type Period\")\n    first_ts = first.to_timestamp()\n    last_ts = last.to_timestamp()\n    adjust_first = not freq.is_on_offset(first_ts)\n    adjust_last = freq.is_on_offset(last_ts)\n    first_ts, last_ts = _get_timestamp_range_edges(first_ts, last_ts, freq, unit='ns', closed=closed, origin=origin, offset=offset)\n    first = (first_ts + int(adjust_first) * freq).to_period(freq)\n    last = (last_ts - int(adjust_last) * freq).to_period(freq)\n    return (first, last)",
    "docstring": "Adjust the provided and Periods to the respective Period of the given offset that encompasses them. Parameters ---------- first : pd.Period The beginning Period of the range to be adjusted. last : pd.Period The ending Period of the range to be adjusted. freq : pd.DateOffset The freq to which the Periods will be adjusted. closed : {'right', 'left'}, default \"left\" Which side of bin interval is closed. origin : {'epoch', 'start', 'start_day'}, Timestamp, default 'start_day' The timestamp on which to adjust the grouping. The timezone of origin must match the timezone of the index. If a timestamp is not used, these values are also supported: - 'epoch': is 1970-01-01 - 'start': is the first value of the timeseries - 'start_day': is the first day at midnight of the timeseries offset : pd.Timedelta, default is None An offset timedelta added to the origin. Returns ------- A tuple of length 2, containing the adjusted pd.Period objects.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\resample.py",
    "ast_data": "FunctionDef name:_get_period_range_edges arg:first arg:last arg:freq arg:closed arg:origin arg:offset arguments arg arg arg arg arg arg If Call Call Raise Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "time_pdist",
    "source_code": "def time_pdist(self, num_points, metric):\n    distance.pdist(self.points, self.metric, **self.kwargs)",
    "docstring": "Time scipy.spatial.distance.pdist over a range of input data sizes and metrics.",
    "type": "method",
    "file_path": "scipy\\benchmarks\\benchmarks\\spatial.py",
    "ast_data": "FunctionDef name:time_pdist arg:self arg:num_points arg:metric arguments arg arg arg Call"
  },
  {
    "library": "matplotlib",
    "name": "close",
    "source_code": "def close(self, id):\n    while len(self.__tags) > id:\n        self.end()",
    "docstring": "Close open elements, up to (and including) the element identified by the given identifier. Parameters ---------- id Element identifier, as returned by the :meth: method.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_svg.py",
    "ast_data": "FunctionDef name:close arg:self arg:id arguments arg arg While Compare Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_SanitizedMRO",
    "source_code": "def _SanitizedMRO(obj):\n    return_list = []\n    for cls in tf_inspect.getmro(obj):\n        if cls.__name__ == '_NewClass':\n            continue\n        str_repr = _NormalizeType(str(cls))\n        return_list.append(str_repr)\n        if 'tensorflow' not in str_repr and 'keras' not in str_repr:\n            break\n        if 'StubOutForTesting' in str_repr:\n            break\n    return return_list",
    "docstring": "Get a list of superclasses with minimal amount of non-TF classes. Based on many parameters like python version, OS, protobuf implementation or changes in google core libraries the list of superclasses of a class can change. We only return the first non-TF class to be robust to non API affecting changes. The Method Resolution Order returned by is still maintained in the return value. Args: obj: A python routine for us the create the sanitized arspec of. Returns: list of strings, string representation of the class names.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\api\\lib\\python_object_to_proto_visitor.py",
    "ast_data": "FunctionDef name:_SanitizedMRO arg:obj arguments arg Assign For Call If Compare Assign Call Call Call If BoolOp Compare Compare If Compare Return return:yes"
  },
  {
    "library": "django",
    "name": "merge",
    "source_code": "def merge(self, other):\n    if not getattr(other, '_catalog', None):\n        return\n    if self._catalog is None:\n        self.plural = other.plural\n        self._info = other._info.copy()\n        self._catalog = TranslationCatalog(other)\n    else:\n        self._catalog.update(other)\n    if other._fallback:\n        self.add_fallback(other._fallback)",
    "docstring": "Merge another translation into this catalog.",
    "type": "method",
    "file_path": "django\\django\\utils\\translation\\trans_real.py",
    "ast_data": "FunctionDef name:merge arg:self arg:other arguments arg arg If Call Return return:no If Compare Assign Assign Call Assign Call Call If Call"
  },
  {
    "library": "scipy",
    "name": "_getmaxprint",
    "source_code": "def _getmaxprint(self):\n    return self.maxprint",
    "docstring": "Maximum number of elements to display when printed.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_base.py",
    "ast_data": "FunctionDef name:_getmaxprint arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_output_slot",
    "source_code": "def get_output_slot(element_name):\n    _, output_slot = parse_node_or_tensor_name(element_name)\n    return output_slot if output_slot is not None else 0",
    "docstring": "Get the output slot number from the name of a graph element. If element_name is a node name without output slot at the end, 0 will be assumed. Args: element_name: () name of the graph element in question. Returns: () output slot number.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_graphs.py",
    "ast_data": "FunctionDef name:get_output_slot arg:element_name arguments arg Assign Call Return return:yes Compare"
  },
  {
    "library": "pytorch",
    "name": "_has_fsdp_params",
    "source_code": "@no_type_check\ndef _has_fsdp_params(state: _FSDPState, module: nn.Module) -> bool:\n    return _module_handle(state, module) is not None",
    "docstring": "Returns if `` has parameters managed by FSDP.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_common_utils.py",
    "ast_data": "FunctionDef name:_has_fsdp_params arg:state arg:module arguments arg arg Return return:yes Compare Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict_proba",
    "source_code": "@available_if(_estimators_has('predict_proba'))\ndef predict_proba(self, X):\n    check_is_fitted(self)\n    Y = np.array([e.predict_proba(X)[:, 1] for e in self.estimators_]).T\n    if len(self.estimators_) == 1:\n        Y = np.concatenate((1 - Y, Y), axis=1)\n    if not self.multilabel_:\n        row_sums = np.sum(Y, axis=1)[:, np.newaxis]\n        np.divide(Y, row_sums, out=Y, where=row_sums != 0)\n    return Y",
    "docstring": "Probability estimates. The returned estimates for all classes are ordered by label of classes. Note that in the multilabel case, each sample can have any number of labels. This returns the marginal probability that the given sample has the label in question. For example, it is entirely consistent that two labels both have a 90% probability of applying to a given sample. In the single label multiclass case, the rows of the returned matrix sum to 1. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Input data. Returns ------- T : array-like of shape (n_samples, n_classes) Returns the probability of the sample for each class in the model, where classes are ordered as they are in .",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\multiclass.py",
    "ast_data": "FunctionDef name:predict_proba arg:self arg:X arguments arg arg Call Assign Call Call If Compare Call Assign Call If Assign Call Call Compare Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_sum_of_squares",
    "source_code": "def _sum_of_squares(a, axis=0):\n    a, axis = _chk_asarray(a, axis)\n    return np_vecdot(a, a, axis=axis)",
    "docstring": "Square each element of the input array, and return the sum(s) of that. Parameters ---------- a : array_like Input array. axis : int or None, optional Axis along which to calculate. Default is 0. If None, compute over the whole array . Returns ------- sum_of_squares : ndarray The sum along the given axis for (a**2). See Also -------- _square_of_sums : The square(s) of the sum(s) (the opposite of ).",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_stats_py.py",
    "ast_data": "FunctionDef name:_sum_of_squares arg:a arg:axis arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "urlopen",
    "source_code": "def urlopen(*args: Any, **kwargs: Any) -> Any:\n    import urllib.request\n    return urllib.request.urlopen(*args, **kwargs)",
    "docstring": "Lazy-import wrapper for stdlib urlopen, as that imports a big chunk of the stdlib.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\common.py",
    "ast_data": "FunctionDef name:urlopen arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "write_file_on_exit",
    "source_code": "def write_file_on_exit(val: bool) -> None:\n    torch._C._cuda_tunableop_write_file_on_exit(val)",
    "docstring": "During Tuning Context destruction, write file to disk. This is useful as a final flush of your results to disk if your application terminates as result of normal operation or an error. Manual flushing of your results can be achieved by manually calling ``.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\tunable.py",
    "ast_data": "FunctionDef name:write_file_on_exit arg:val arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "gather",
    "source_code": "def gather(tensors, dim=0, destination=None, *, out=None):\n    tensors = [_handle_complex(t) for t in tensors]\n    if out is None:\n        if destination == -1:\n            warnings.warn('Using -1 to represent CPU tensor is deprecated. Please use a device object or string instead, e.g., \"cpu\".', FutureWarning, stacklevel=2)\n        destination = _get_device_index(destination, allow_cpu=True, optional=True)\n        return torch._C._gather(tensors, dim, destination)\n    else:\n        if destination is not None:\n            raise RuntimeError(f\"'destination' must not be specified when 'out' is specified, but got destination={destination}\")\n        return torch._C._gather_out(tensors, out, dim)",
    "docstring": "Gathers tensors from multiple GPU devices. Args: tensors (Iterable[Tensor]): an iterable of tensors to gather. Tensor sizes in all dimensions other than :attr: have to match. dim (int, optional): a dimension along which the tensors will be concatenated. Default: `tensorsdimdestinationoutdestinationdestinationtensorsdimoutouttensorsdim`.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\parallel\\comm.py",
    "ast_data": "FunctionDef name:gather arg:tensors arg:dim arg:destination arguments arg arg arg arg Assign Call If Compare If Compare Call Assign Call Return return:yes Call If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "resize_image_with_pad_v1",
    "source_code": "@tf_export(v1=['image.resize_image_with_pad'])\n@dispatch.add_dispatch_support\ndef resize_image_with_pad_v1(image, target_height, target_width, method=ResizeMethodV1.BILINEAR, align_corners=False):\n\n    def _resize_fn(im, new_size):\n        return resize_images(im, new_size, method, align_corners=align_corners)\n    return _resize_image_with_pad_common(image, target_height, target_width, _resize_fn)",
    "docstring": "Resizes and pads an image to a target width and height. Resizes an image to a target width and height by keeping the aspect ratio the same without distortion. If the target dimensions don't match the image dimensions, the image is resized and then padded with zeroes to match requested dimensions. Args: image: 4-D Tensor of shape or 3-D Tensor of shape . target_height: Target height. target_width: Target width. method: Method to use for resizing image. See align_corners: bool. If True, the centers of the 4 corner pixels of the input and output tensors are aligned, preserving the values at the corner pixels. Defaults to . Raises: ValueError: if or are zero or negative. Returns: Resized and padded image. If was 4-D, a 4-D float Tensor of shape . If was 3-D, a 3-D float Tensor of shape .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:resize_image_with_pad_v1 arg:image arg:target_height arg:target_width arg:method arg:align_corners arguments arg arg arg arg arg FunctionDef name:_resize_fn arg:im arg:new_size arguments arg arg Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "lazy_load_stub_paste",
    "source_code": "def lazy_load_stub_paste():\n    global copy, paste\n    copy, paste = determine_clipboard()\n    return paste()",
    "docstring": "A stub function for paste(), which will load the real paste() function when called so that the real paste() function is used for later calls. This allows users to import pyperclip without having determine_clipboard() automatically run, which will automatically select a clipboard mechanism. This could be a problem if it selects, say, the memory-heavy PyQt4 module but the user was just going to immediately call set_clipboard() to use a different clipboard mechanism. The lazy loading this stub function implements gives the user a chance to call set_clipboard() to pick another clipboard mechanism. Or, if the user simply calls copy() or paste() without calling set_clipboard() first, will fall back on whatever clipboard mechanism that determine_clipboard() automatically chooses.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\clipboard\\__init__.py",
    "ast_data": "FunctionDef name:lazy_load_stub_paste arguments Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "TypeSpecBatchEncoder",
    "source_code": "class TypeSpecBatchEncoder(object, metaclass=abc.ABCMeta):\n\n    @abc.abstractmethod\n    def batch(self, spec, batch_size):\n        raise NotImplementedError(f'{type(self).__name__}.batch')\n\n    @abc.abstractmethod\n    def unbatch(self, spec):\n        raise NotImplementedError(f'{type(self).__name__}.unbatch')\n\n    @abc.abstractmethod\n    def encode(self, spec, value, minimum_rank=0):\n        raise NotImplementedError(f'{type(self).__name__}.encode')\n\n    @abc.abstractmethod\n    def decode(self, spec, encoded_value):\n        raise NotImplementedError(f'{type(self).__name__}.decode')\n\n    @abc.abstractmethod\n    def encoding_specs(self, spec):\n        raise NotImplementedError(f'{type(self).__name__}.encoding_specs')",
    "docstring": "Class used to encode and decode composite tensor values for batching. In order to be batched and unbatched by APIs such as and , composite tensors must be encoded using flat tensors that can themselves be batched or unbatched. s are responsible for implementing this encoding. If a composite tensor's shape is a prefix of the shape of all of its component tensors, then this encoding can usually be performed by just returning those component tensors as a list. But if the composite tensor has components whose shape has a more complex relationship to the shape of the composite tensor, then a custom may need to be implemented.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py",
    "ast_data": "ClassDef name:TypeSpecBatchEncoder FunctionDef name:batch arg:self arg:spec arg:batch_size arguments arg arg arg Raise Call Call FunctionDef name:unbatch arg:self arg:spec arguments arg arg Raise Call Call FunctionDef name:encode arg:self arg:spec arg:value arg:minimum_rank arguments arg arg arg arg Raise Call Call FunctionDef name:decode arg:self arg:spec arg:encoded_value arguments arg arg arg Raise Call Call FunctionDef name:encoding_specs arg:self arg:spec arguments arg arg Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "elastic_launch",
    "source_code": "class elastic_launch:\n\n    def __init__(self, config: LaunchConfig, entrypoint: Union[Callable, str, None]):\n        self._config = config\n        self._entrypoint = entrypoint\n\n    def __call__(self, *args):\n        return launch_agent(self._config, self._entrypoint, list(args))",
    "docstring": "Launches an torchelastic agent on the container that invoked the entrypoint. 1. Pass the `` is the python module. outputs = elastic_launch(LaunchConfig, \"script.py\")(args) outputs = elastic_launch(LaunchConfig, \"python\")(\"script.py\")",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\launcher\\api.py",
    "ast_data": "ClassDef name:elastic_launch FunctionDef name:__init__ arg:self arg:config arg:entrypoint arguments arg arg arg Assign Assign FunctionDef name:__call__ arg:self arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "Softmax",
    "source_code": "class Softmax(Module):\n    __constants__ = ['dim']\n    dim: Optional[int]\n\n    def __init__(self, dim: Optional[int]=None) -> None:\n        super().__init__()\n        self.dim = dim\n\n    def __setstate__(self, state):\n        super().__setstate__(state)\n        if not hasattr(self, 'dim'):\n            self.dim = None\n\n    def forward(self, input: Tensor) -> Tensor:\n        return F.softmax(input, self.dim, _stacklevel=5)\n\n    def extra_repr(self) -> str:\n        return f'dim={self.dim}'",
    "docstring": "Applies the Softmax function to an n-dimensional input Tensor. Rescales them so that the elements of the n-dimensional output Tensor lie in the range [0,1] and sum to 1. Softmax is defined as: .. math:: \\text{Softmax}(x_{i}) = \\frac{\\exp(x_i)}{\\sum_j \\exp(x_j)} When the input Tensor is a sparse tensor then the unspecified values are treated as `(*)*(*)LogSoftmax` instead (it's faster and has better numerical properties). Examples:: >>> m = nn.Softmax(dim=1) >>> input = torch.randn(2, 3) >>> output = m(input)",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\activation.py",
    "ast_data": "ClassDef name:Softmax Assign FunctionDef name:__init__ arg:self arg:dim arguments arg arg Call Call Assign FunctionDef name:__setstate__ arg:self arg:state arguments arg arg Call Call If Call Assign FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:extra_repr arg:self arguments arg Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "isgenericalias",
    "source_code": "def isgenericalias(obj: Any) -> TypeIs[types.GenericAlias]:\n    return isinstance(obj, types.GenericAlias | typing._BaseGenericAlias)",
    "docstring": "Check if the object is a generic alias.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\inspect.py",
    "ast_data": "FunctionDef name:isgenericalias arg:obj arguments arg Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "_start_request_processing",
    "source_code": "@deferred_f_from_coro_f\nasync def _start_request_processing(self) -> None:\n    assert self._slot is not None\n    self._slot.nextcall.schedule()\n    self._slot.heartbeat.start(self._SLOT_HEARTBEAT_INTERVAL)\n    while self._start and self.spider:\n        await self._process_start_next()\n        if not self.needs_backout():\n            self._slot.nextcall.schedule()\n            await self._slot.nextcall.wait()",
    "docstring": "Starts consuming Spider.start() output and sending scheduled requests.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\engine.py",
    "ast_data": "AsyncFunctionDef name:_start_request_processing arg:self arguments arg Compare Call Call While BoolOp Call If Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "AddTranslationClasses",
    "source_code": "class AddTranslationClasses(SphinxTransform):\n    default_priority = 950\n\n    def apply(self, **kwargs: Any) -> None:\n        from sphinx.builders.gettext import MessageCatalogBuilder\n        if isinstance(self.app.builder, MessageCatalogBuilder):\n            return\n        if not self.config.translation_progress_classes:\n            return\n        if self.config.translation_progress_classes is True:\n            add_translated = add_untranslated = True\n        elif self.config.translation_progress_classes == 'translated':\n            add_translated = True\n            add_untranslated = False\n        elif self.config.translation_progress_classes == 'untranslated':\n            add_translated = False\n            add_untranslated = True\n        else:\n            msg = 'translation_progress_classes must be True, False, \"translated\" or \"untranslated\"'\n            raise ConfigError(msg)\n        for node in NodeMatcher(nodes.Element, translated=Any).findall(self.document):\n            if node['translated']:\n                if add_translated:\n                    node.setdefault('classes', []).append('translated')\n            elif add_untranslated:\n                node.setdefault('classes', []).append('untranslated')",
    "docstring": "Add `` classes to indicate translation status.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\transforms\\i18n.py",
    "ast_data": "ClassDef name:AddTranslationClasses Assign FunctionDef name:apply arg:self arguments arg arg If Call Return return:no If Return return:no If Compare Assign If Compare Assign Assign If Compare Assign Assign Assign Raise Call For Call Call If If Call Call If Call Call"
  },
  {
    "library": "scipy",
    "name": "_parse_local_version",
    "source_code": "def _parse_local_version(local):\n    if local is not None:\n        return tuple((part.lower() if not part.isdigit() else int(part) for part in _local_version_seperators.split(local)))",
    "docstring": "Takes a string like abc.1.twelve and turns it into (\"abc\", 1, \"twelve\").",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_pep440.py",
    "ast_data": "FunctionDef name:_parse_local_version arg:local arguments arg If Compare Return return:yes Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_consuming_ops",
    "source_code": "def get_consuming_ops(ts):\n    ts = make_list_of_t(ts, allow_graph=False)\n    tops = []\n    for t in ts:\n        for op in t.consumers():\n            if op not in tops:\n                tops.append(op)\n    return tops",
    "docstring": "Return all the consuming ops of the tensors in ts. Args: ts: a list of Returns: A list of all the consuming of the tensors in . Raises: TypeError: if ts cannot be converted to a list of .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\op_selector.py",
    "ast_data": "FunctionDef name:get_consuming_ops arg:ts arguments arg Assign Call Assign For For Call If Compare Call Return return:yes"
  },
  {
    "library": "django",
    "name": "geom_col_name",
    "source_code": "@classmethod\ndef geom_col_name(cls):\n    return 'f_geometry_column'",
    "docstring": "Return the name of the metadata column used to store the feature geometry column.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\postgis\\models.py",
    "ast_data": "FunctionDef name:geom_col_name arg:cls arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "register",
    "source_code": "def register(*models, site=None):\n    from django.contrib.admin import ModelAdmin\n    from django.contrib.admin.sites import AdminSite\n    from django.contrib.admin.sites import site as default_site\n\n    def _model_admin_wrapper(admin_class):\n        if not models:\n            raise ValueError('At least one model must be passed to register.')\n        admin_site = site or default_site\n        if not isinstance(admin_site, AdminSite):\n            raise ValueError('site must subclass AdminSite')\n        if not issubclass(admin_class, ModelAdmin):\n            raise ValueError('Wrapped class must subclass ModelAdmin.')\n        admin_site.register(models, admin_class=admin_class)\n        return admin_class\n    return _model_admin_wrapper",
    "docstring": "Register the given model(s) classes and wrapped ModelAdmin class with admin site: @register(Author) class AuthorAdmin(admin.ModelAdmin): pass The kwarg is an admin site to use instead of the default admin site.",
    "type": "function",
    "file_path": "django\\django\\contrib\\admin\\decorators.py",
    "ast_data": "FunctionDef name:register arguments arg arg FunctionDef name:_model_admin_wrapper arg:admin_class arguments arg If Raise Call Assign BoolOp If Call Raise Call If Call Raise Call Call Return return:yes Return return:yes"
  },
  {
    "library": "pandas",
    "name": "SpecificationError",
    "source_code": "class SpecificationError(Exception):\n    pass",
    "docstring": "Exception raised by `` on a Dataframe with duplicated functions names without assigning column name. See Also -------- DataFrame.agg : Aggregate using one or more operations over the specified axis. Series.agg : Aggregate using one or more operations over the specified axis. Examples -------- >>> df = pd.DataFrame({\"A\": [1, 1, 1, 2, 2], \"B\": range(5), \"C\": range(5)}) >>> df.groupby(\"A\").B.agg({\"foo\": \"count\"}) # doctest: +SKIP ... # SpecificationError: nested renamer is not supported >>> df.groupby(\"A\").agg({\"B\": {\"foo\": [\"sum\", \"max\"]}}) # doctest: +SKIP ... # SpecificationError: nested renamer is not supported >>> df.groupby(\"A\").agg([\"min\", \"min\"]) # doctest: +SKIP ... # SpecificationError: nested renamer is not supported",
    "type": "class",
    "file_path": "pandas\\pandas\\errors\\__init__.py",
    "ast_data": "ClassDef name:SpecificationError"
  },
  {
    "library": "cherrypy",
    "name": "start",
    "source_code": "def start(self):\n    opts = ''.join(['    PythonOption %s %s\\n' % (k, v) for k, v in self.opts])\n    conf_data = self.template % {'port': self.port, 'loc': self.loc, 'opts': opts, 'handler': self.handler}\n    mpconf = os.path.join(os.path.dirname(__file__), 'cpmodpy.conf')\n    with open(mpconf, 'wb') as f:\n        f.write(conf_data)\n    response = read_process(self.apache_path, '-k start -f %s' % mpconf)\n    self.ready = True\n    return response",
    "docstring": "Start an Apache2/httpd server.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpmodpy.py",
    "ast_data": "FunctionDef name:start arg:self arguments arg Assign Call Assign Assign Call Call With Call Call Assign Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_transform",
    "source_code": "def get_transform(self):\n    return self.dpi_transform + self.offset_transform",
    "docstring": "Return the applied to the children.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py",
    "ast_data": "FunctionDef name:get_transform arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "rename",
    "source_code": "@tf_export(v1=['gfile.Rename'])\ndef rename(oldname, newname, overwrite=False):\n    rename_v2(oldname, newname, overwrite)",
    "docstring": "Rename or move a file / directory. Args: oldname: string, pathname for a file newname: string, pathname to which the file needs to be moved overwrite: boolean, if false it's an error for to be occupied by an existing file. Raises: errors.OpError: If the operation fails.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py",
    "ast_data": "FunctionDef name:rename arg:oldname arg:newname arg:overwrite arguments arg arg arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "_export_file",
    "source_code": "def _export_file(model_bytes: bytes, f: io.BytesIO | str, export_map: Mapping[str, bytes]) -> None:\n    assert len(export_map) == 0\n    with torch.serialization._open_file_like(f, 'wb') as opened_file:\n        opened_file.write(model_bytes)",
    "docstring": "export/write model bytes into directory/protobuf/zip",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\onnx_proto_utils.py",
    "ast_data": "FunctionDef name:_export_file arg:model_bytes arg:f arg:export_map arguments arg arg arg Compare Call With Call Call"
  },
  {
    "library": "authlib",
    "name": "generate_client_registration_info",
    "source_code": "def generate_client_registration_info(self, client, request):\n    return None",
    "docstring": "Generate ``` by default. Developers MAY rewrite this method to return registration information.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc7591\\endpoint.py",
    "ast_data": "FunctionDef name:generate_client_registration_info arg:self arg:client arg:request arguments arg arg arg Return return:no"
  },
  {
    "library": "pytorch",
    "name": "_decorator_func",
    "source_code": "def _decorator_func(wrapped_func, op, op_table):\n\n    @functools.wraps(wrapped_func)\n    def wrapper(types, args, kwargs, process_group):\n        _basic_validation(op, args, kwargs)\n        return wrapped_func(types, args, kwargs, process_group)\n    _register_op(op, wrapper, op_table)\n    return wrapper",
    "docstring": "Decorator function to register the given ``",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\op_registry_utils.py",
    "ast_data": "FunctionDef name:_decorator_func arg:wrapped_func arg:op arg:op_table arguments arg arg arg FunctionDef name:wrapper arg:types arg:args arg:kwargs arg:process_group arguments arg arg arg arg Call Return return:yes Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get",
    "source_code": "def get(self, o: Any) -> Any:\n    return getattr(o, self.inner_name)",
    "docstring": "Get the inner tensor attribute",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:get arg:self arg:o arguments arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_add_a_b",
    "source_code": "def _add_a_b(tests):\n    for d in tests:\n        for k, v in zip(['a', 'b'], d.get('bracket', [])):\n            d[k] = v",
    "docstring": "Add \"a\" and \"b\" keys to each test from the \"bracket\" value",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_tstutils.py",
    "ast_data": "FunctionDef name:_add_a_b arg:tests arguments arg For For Call Call Assign"
  },
  {
    "library": "pandas",
    "name": "__dir__",
    "source_code": "def __dir__(self) -> list[str]:\n    rv = set(super().__dir__())\n    rv = rv - self._dir_deletions() | self._dir_additions()\n    return sorted(rv)",
    "docstring": "Provide method name lookup and completion. Notes ----- Only provide 'public' methods.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\accessor.py",
    "ast_data": "FunctionDef name:__dir__ arg:self arguments arg Assign Call Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "__imul__",
    "source_code": "def __imul__(self, n):\n    if n <= 0:\n        del self[:]\n    else:\n        cache = list(self)\n        for i in range(n - 1):\n            self.extend(cache)\n    return self",
    "docstring": "multiply",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\mutable_list.py",
    "ast_data": "FunctionDef name:__imul__ arg:self arg:n arguments arg arg If Compare Assign Call For Call Call Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "not_valid_after",
    "source_code": "def not_valid_after(self, time: datetime.datetime) -> CertificateBuilder:\n    if not isinstance(time, datetime.datetime):\n        raise TypeError('Expecting datetime object.')\n    if self._not_valid_after is not None:\n        raise ValueError('The not valid after may only be set once.')\n    time = _convert_to_naive_utc_time(time)\n    if time < _EARLIEST_UTC_TIME:\n        raise ValueError('The not valid after date must be on or after 1950 January 1.')\n    if self._not_valid_before is not None and time < self._not_valid_before:\n        raise ValueError('The not valid after date must be after the not valid before date.')\n    return CertificateBuilder(self._issuer_name, self._subject_name, self._public_key, self._serial_number, self._not_valid_before, time, self._extensions)",
    "docstring": "Sets the certificate expiration time.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\x509\\base.py",
    "ast_data": "FunctionDef name:not_valid_after arg:self arg:time arguments arg arg If Call Raise Call If Compare Raise Call Assign Call If Compare Raise Call If BoolOp Compare Compare Raise Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "add_cell",
    "source_code": "def add_cell(self, cell: Cell) -> None:\n    while self[self.current_line, self.current_col]:\n        self.current_col += 1\n    self[self.current_line, self.current_col] = cell\n    self.current_col += cell.colspan",
    "docstring": "Add a cell to the current line, to use with `` BEFORE inserting it into the table.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\writers\\text.py",
    "ast_data": "FunctionDef name:add_cell arg:self arg:cell arguments arg arg While Assign"
  },
  {
    "library": "matplotlib",
    "name": "render",
    "source_code": "def render(self, output: Output, x: float, y: float) -> None:\n    pass",
    "docstring": "Render this node.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_mathtext.py",
    "ast_data": "FunctionDef name:render arg:self arg:output arg:x arg:y arguments arg arg arg arg"
  },
  {
    "library": "scikit-learn",
    "name": "check_class_weight_balanced_linear_classifier",
    "source_code": "@ignore_warnings(category=FutureWarning)\ndef check_class_weight_balanced_linear_classifier(name, estimator_orig):\n    X = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]])\n    y = np.array([1, 1, 1, -1, -1])\n    classifier = clone(estimator_orig)\n    if hasattr(classifier, 'n_iter'):\n        classifier.set_params(n_iter=1000)\n    if hasattr(classifier, 'max_iter'):\n        classifier.set_params(max_iter=1000)\n    if hasattr(classifier, 'cv'):\n        classifier.set_params(cv=3)\n    set_random_state(classifier)\n    classifier.set_params(class_weight='balanced')\n    coef_balanced = classifier.fit(X, y).coef_.copy()\n    n_samples = len(y)\n    n_classes = float(len(np.unique(y)))\n    class_weight = {1: n_samples / (np.sum(y == 1) * n_classes), -1: n_samples / (np.sum(y == -1) * n_classes)}\n    classifier.set_params(class_weight=class_weight)\n    coef_manual = classifier.fit(X, y).coef_.copy()\n    assert_allclose(coef_balanced, coef_manual, err_msg='Classifier %s is not computing class_weight=balanced properly.' % name)",
    "docstring": "Test class weights with non-contiguous class labels.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\estimator_checks.py",
    "ast_data": "FunctionDef name:check_class_weight_balanced_linear_classifier arg:name arg:estimator_orig arguments arg arg Assign Call Assign Call Assign Call If Call Call If Call Call If Call Call Call Call Assign Call Call Assign Call Assign Call Call Call Assign Call Compare Call Compare Call Assign Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_lower_triangular_mask",
    "source_code": "def _lower_triangular_mask(shape):\n    row_index = math_ops.cumsum(array_ops.ones(shape=shape, dtype=dtypes.int32), axis=-2)\n    col_index = math_ops.cumsum(array_ops.ones(shape=shape, dtype=dtypes.int32), axis=-1)\n    return math_ops.greater_equal(row_index, col_index)",
    "docstring": "Creates a lower-triangular boolean mask over the last 2 dimensions.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\dense_attention.py",
    "ast_data": "FunctionDef name:_lower_triangular_mask arg:shape arguments arg Assign Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_set_inputs",
    "source_code": "def _set_inputs(self, inputs, outputs=None, training=None):\n    self._set_save_spec(inputs)",
    "docstring": "This method is for compat with Modelv1. Only inputs are needed here.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py",
    "ast_data": "FunctionDef name:_set_inputs arg:self arg:inputs arg:outputs arg:training arguments arg arg arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_parameters",
    "source_code": "@_parameters.setter\ndef _parameters(self, value):\n    if 'self' in value:\n        del value['self']\n    self._parameter_dict = value",
    "docstring": "Intercept assignments to self._parameters to avoid reference cycles. Parameters are often created using locals(), so we need to clean out any references to before assigning it to an attribute. Args: value: A dictionary of parameters to assign to the property.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:_parameters arg:self arg:value arguments arg arg If Compare Assign"
  },
  {
    "library": "matplotlib",
    "name": "_height_depth_of",
    "source_code": "def _height_depth_of(self, char):\n    metrics = self._tfm.get_metrics(char)\n    if metrics is None:\n        _log.debug('No metrics for char %d in font %s', char, self.texname)\n        return [0, 0]\n    hd = [_mul1220(metrics.tex_height, self._scale), _mul1220(metrics.tex_depth, self._scale)]\n    if re.match(b'^cmsy\\\\d+$', self.texname) and char == 0:\n        hd[-1] = 0\n    return hd",
    "docstring": "Height and depth of char in dvi units.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\dviread.py",
    "ast_data": "FunctionDef name:_height_depth_of arg:self arg:char arguments arg arg Assign Call If Compare Call Return return:yes Assign Call Call If BoolOp Call Compare Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_positions",
    "source_code": "def get_positions(self):\n    pos = 0 if self.is_horizontal() else 1\n    return [segment[0, pos] for segment in self.get_segments()]",
    "docstring": "Return an array containing the floating-point values of the positions.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:get_positions arg:self arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "find_parentheses_group",
    "source_code": "def find_parentheses_group(input_string, start):\n    return find_closure_group(input_string, start, group=['(', ')'])",
    "docstring": "Finds the first balanced bracket.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\hipify\\hipify_python.py",
    "ast_data": "FunctionDef name:find_parentheses_group arg:input_string arg:start arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_infer_ep_from_graph_module",
    "source_code": "def _infer_ep_from_graph_module(graph_module: torch.fx.GraphModule) -> tuple[str, ...]:\n    flattened_output_args, _ = _pytree.tree_flatten(_extract_graph_module_outputs(graph_module))\n    selected_output_args = [output_arg.meta['val'] for output_arg in flattened_output_args if hasattr(output_arg, 'meta') and 'val' in output_arg.meta]\n    return _infer_ep_from_device(*selected_output_args)",
    "docstring": "Return the all valid devices (i.e., GPU or CPU) among outputs of this torch.fx.GraphModule.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\onnxruntime.py",
    "ast_data": "FunctionDef name:_infer_ep_from_graph_module arg:graph_module arguments arg Assign Call Call Assign BoolOp Call Compare Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "generate_shared_aggregator",
    "source_code": "def generate_shared_aggregator(func: Callable[..., Scalar], dtype_mapping: dict[np.dtype, np.dtype], is_grouped_kernel: bool, nopython: bool, nogil: bool, parallel: bool):\n\n    def looper_wrapper(values, start=None, end=None, labels=None, ngroups=None, min_periods: int=0, **kwargs):\n        result_dtype = dtype_mapping[values.dtype]\n        column_looper = make_looper(func, result_dtype, is_grouped_kernel, nopython, nogil, parallel)\n        if is_grouped_kernel:\n            result, na_positions = column_looper(values, labels, ngroups, min_periods, *kwargs.values())\n        else:\n            result, na_positions = column_looper(values, start, end, min_periods, *kwargs.values())\n        if result.dtype.kind == 'i':\n            for na_pos in na_positions.values():\n                if len(na_pos) > 0:\n                    result = result.astype('float64')\n                    break\n        for i, na_pos in na_positions.items():\n            if len(na_pos) > 0:\n                result[i, na_pos] = np.nan\n        return result\n    return looper_wrapper",
    "docstring": "Generate a Numba function that loops over the columns 2D object and applies a 1D numba kernel over each column. Parameters ---------- func : function aggregation function to be applied to each column dtype_mapping: dict or None If not None, maps a dtype to a result dtype. Otherwise, will fall back to default mapping. is_grouped_kernel: bool, default False Whether func operates using the group labels (True) or using starts/ends arrays If true, you also need to pass the number of groups to this function nopython : bool nopython to be passed into numba.jit nogil : bool nogil to be passed into numba.jit parallel : bool parallel to be passed into numba.jit Returns ------- Numba function",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\_numba\\executor.py",
    "ast_data": "FunctionDef name:generate_shared_aggregator arg:func arg:dtype_mapping arg:is_grouped_kernel arg:nopython arg:nogil arg:parallel arguments arg arg arg arg arg arg FunctionDef name:looper_wrapper arg:values arg:start arg:end arg:labels arg:ngroups arg:min_periods arguments arg arg arg arg arg arg arg Assign Assign Call If Assign Call Call Assign Call Call If Compare For Call If Compare Call Assign Call For Call If Compare Call Assign Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "alive",
    "source_code": "def alive(self) -> bool:\n    return self.running and self.process.poll() is None",
    "docstring": "True if the subprocess is still running.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\autotune_process.py",
    "ast_data": "FunctionDef name:alive arg:self arguments arg Return return:yes BoolOp Compare Call"
  },
  {
    "library": "matplotlib",
    "name": "get_transform",
    "source_code": "def get_transform(self):\n    if self._user_transform is None:\n        return self._transform.frozen()\n    else:\n        return (self._transform + self._user_transform).frozen()",
    "docstring": "Return the transform to be applied to the from .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\markers.py",
    "ast_data": "FunctionDef name:get_transform arg:self arguments arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pygame",
    "name": "Group",
    "source_code": "class Group(AbstractGroup):\n\n    def __init__(self, *sprites):\n        AbstractGroup.__init__(self)\n        self.add(*sprites)",
    "docstring": "container class for many Sprites pygame.sprite.Group(*sprites): return Group A simple container for Sprite objects. This class can be subclassed to create containers with more specific behaviors. The constructor takes any number of Sprite arguments to add to the Group. The group supports the following standard Python operations: in test if a Sprite is contained len the number of Sprites contained bool test if any Sprites are contained iter iterate through all the Sprites The Sprites in the Group are not ordered, so the Sprites are drawn and iterated over in no particular order.",
    "type": "class",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "ClassDef name:Group FunctionDef name:__init__ arg:self arguments arg arg Call Call"
  },
  {
    "library": "matplotlib",
    "name": "random_ports",
    "source_code": "def random_ports(port, n):\n    for i in range(min(5, n)):\n        yield (port + i)\n    for i in range(n - 5):\n        yield (port + random.randint(-2 * n, 2 * n))",
    "docstring": "Generate a list of n random ports near the given port. The first 5 ports will be sequential, and the remaining n-5 will be randomly selected in the range [port-2*n, port+2*n].",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_webagg.py",
    "ast_data": "FunctionDef name:random_ports arg:port arg:n arguments arg arg For Call Call For Call Call"
  },
  {
    "library": "scipy",
    "name": "_read_int64",
    "source_code": "def _read_int64(f):\n    return np.int64(struct.unpack('>q', f.read(8))[0])",
    "docstring": "Read a signed 64-bit integer",
    "type": "function",
    "file_path": "scipy\\scipy\\io\\_idl.py",
    "ast_data": "FunctionDef name:_read_int64 arg:f arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "inverse_transform",
    "source_code": "def inverse_transform(self, X):\n    check_is_fitted(self)\n    unil, inverse = np.unique(self.labels_, return_inverse=True)\n    return X[..., inverse]",
    "docstring": "Inverse the transformation and return a vector of size . Parameters ---------- X : array-like of shape (n_samples, n_clusters) or (n_clusters,) The values to be assigned to each cluster of samples. Returns ------- X_original : ndarray of shape (n_samples, n_features) or (n_features,) A vector of size with the values of assigned to each of the cluster of samples.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_feature_agglomeration.py",
    "ast_data": "FunctionDef name:inverse_transform arg:self arg:X arguments arg arg Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_new_shared",
    "source_code": "@classmethod\ndef _new_shared(cls, size):\n    untyped_storage = torch.UntypedStorage._new_shared(size * cls()._element_size())\n    return cls(wrap_storage=untyped_storage)",
    "docstring": "Create a new storage in shared memory with the same data type.",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:_new_shared arg:cls arg:size arguments arg arg Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_is_full_circle_deg",
    "source_code": "def _is_full_circle_deg(thetamin, thetamax):\n    return abs(abs(thetamax - thetamin) - 360.0) < 1e-12",
    "docstring": "Determine if a wedge (in degrees) spans the full circle. The condition is derived from :class:.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\polar.py",
    "ast_data": "FunctionDef name:_is_full_circle_deg arg:thetamin arg:thetamax arguments arg arg Return return:yes Compare Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_reset_epoch_test_example",
    "source_code": "def _reset_epoch_test_example():\n    global _epoch\n    _epoch = None",
    "docstring": "Reset the Matplotlib date epoch so it can be set again. Only for use in tests and examples.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\dates.py",
    "ast_data": "FunctionDef name:_reset_epoch_test_example arguments Assign"
  },
  {
    "library": "tensorflow",
    "name": "__call__",
    "source_code": "def __call__(self, shape, dtype=None, partition_info=None):\n    raise NotImplementedError",
    "docstring": "Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. If not provided use the initializer dtype. partition_info: Optional information about the possible partitioning of a tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:shape arg:dtype arg:partition_info arguments arg arg arg arg Raise"
  },
  {
    "library": "scikit-learn",
    "name": "request_is_alias",
    "source_code": "def request_is_alias(item):\n    if item in VALID_REQUEST_VALUES:\n        return False\n    return isinstance(item, str) and item.isidentifier()",
    "docstring": "Check if an item is a valid alias. Values in `` are not considered aliases in this context. Only a string which is a valid identifier is. Parameters ---------- item : object The given item to be checked if it can be an alias. Returns ------- result : bool Whether the given item is a valid alias.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py",
    "ast_data": "FunctionDef name:request_is_alias arg:item arguments arg If Compare Return return:yes Return return:yes BoolOp Call Call"
  },
  {
    "library": "tensorflow",
    "name": "close",
    "source_code": "def close(self):\n    super(TFRecordWriter, self).close()",
    "docstring": "Close the file.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\tf_record.py",
    "ast_data": "FunctionDef name:close arg:self arguments arg Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_on_paint",
    "source_code": "def _on_paint(self, event):\n    _log.debug('%s - _on_paint()', type(self))\n    drawDC = wx.PaintDC(self)\n    if not self._isDrawn:\n        self.draw(drawDC=drawDC)\n    else:\n        self.gui_repaint(drawDC=drawDC)\n    drawDC.Destroy()",
    "docstring": "Called when wxPaintEvt is generated.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_wx.py",
    "ast_data": "FunctionDef name:_on_paint arg:self arg:event arguments arg arg Call Call Assign Call If Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_FakeOperation",
    "source_code": "class _FakeOperation(object):\n\n    def __init__(self):\n        self.device = ''\n        self.type = ''\n        self.name = ''\n        self.node_def = _FakeNodeDef()\n\n    def _set_device(self, device):\n        self.device = ops._device_string(device)\n\n    def _set_device_from_string(self, device_str):\n        self.device = device_str",
    "docstring": "A fake Operation object to pass to device functions.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\device_util.py",
    "ast_data": "ClassDef name:_FakeOperation FunctionDef name:__init__ arg:self arguments arg Assign Assign Assign Assign Call FunctionDef name:_set_device arg:self arg:device arguments arg arg Assign Call FunctionDef name:_set_device_from_string arg:self arg:device_str arguments arg arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "annotate_source_against_profile",
    "source_code": "def annotate_source_against_profile(profile_data, source_file_path, node_name_filter=None, op_type_filter=None, min_line=None, max_line=None):\n    source_file_path = _norm_abs_path(source_file_path)\n    node_name_regex = re.compile(node_name_filter) if node_name_filter else None\n    op_type_regex = re.compile(op_type_filter) if op_type_filter else None\n    line_to_profile_summary = {}\n    for profile_datum in profile_data:\n        if not profile_datum.file_path:\n            continue\n        if _norm_abs_path(profile_datum.file_path) != source_file_path:\n            continue\n        if min_line is not None and profile_datum.line_number < min_line or (max_line is not None and profile_datum.line_number >= max_line):\n            continue\n        if node_name_regex and (not node_name_regex.match(profile_datum.node_exec_stats.node_name)):\n            continue\n        if op_type_regex and (not op_type_regex.match(profile_datum.op_type)):\n            continue\n        if profile_datum.line_number not in line_to_profile_summary:\n            line_to_profile_summary[profile_datum.line_number] = profiling.AggregateProfile(profile_datum)\n        else:\n            line_to_profile_summary[profile_datum.line_number].add(profile_datum)\n    return line_to_profile_summary",
    "docstring": "Annotate a Python source file with profiling information at each line. (The annotation doesn't change the source file itself.) Args: profile_data: ( of ) A list of . source_file_path: () Path to the source file being annotated. node_name_filter: Regular expression to filter by node name. op_type_filter: Regular expression to filter by op type. min_line: ( or ) The 1-based line to start annotate the source file from (inclusive). max_line: ( or ) The 1-based line number to end the annotation at (exclusive). Returns: A mapping 1-based line number to a the namedtuple .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\source_utils.py",
    "ast_data": "FunctionDef name:annotate_source_against_profile arg:profile_data arg:source_file_path arg:node_name_filter arg:op_type_filter arg:min_line arg:max_line arguments arg arg arg arg arg arg Assign Call Assign Call Assign Call Assign For If If Compare Call If BoolOp BoolOp Compare Compare BoolOp Compare Compare If BoolOp Call If BoolOp Call If Compare Assign Call Call Return return:yes"
  },
  {
    "library": "pygame",
    "name": "as_machine_type",
    "source_code": "def as_machine_type(size):\n    if size == 32:\n        return 'x86'\n    if size == 64:\n        return 'x64'\n    raise ValueError('Unknown pointer size {}'.format(size))",
    "docstring": "Return pointer bit size as a Windows machine type",
    "type": "function",
    "file_path": "pygame\\buildconfig\\config_win.py",
    "ast_data": "FunctionDef name:as_machine_type arg:size arguments arg If Compare Return return:yes If Compare Return return:yes Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "gen_attr_descriptor_import",
    "source_code": "@lru_cache(None)\ndef gen_attr_descriptor_import() -> str:\n    if not has_triton_package():\n        return ''\n    import triton.compiler.compiler\n    if hasattr(triton.compiler.compiler, 'AttrsDescriptor'):\n        return 'from triton.compiler.compiler import AttrsDescriptor'\n    else:\n        return ''",
    "docstring": "import AttrsDescriptor if the triton version is new enough to have this class defined.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py",
    "ast_data": "FunctionDef name:gen_attr_descriptor_import arguments If Call Return return:yes If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "install_global",
    "source_code": "def install_global(self, prefix, value) -> str:\n    name = unique_id(prefix)\n    self.install_global_unsafe(name, value)\n    return name",
    "docstring": "Installs a global, generating a unique name for it. Returns the name of the newly installed global.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\output_graph.py",
    "ast_data": "FunctionDef name:install_global arg:self arg:prefix arg:value arguments arg arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__nested_list_to_tuple",
    "source_code": "@staticmethod\ndef __nested_list_to_tuple(value):\n    if isinstance(value, list):\n        return tuple((TypeSpec.__nested_list_to_tuple(v) for v in value))\n    return value",
    "docstring": "Converts a nested list to a corresponding nested tuple.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py",
    "ast_data": "FunctionDef name:__nested_list_to_tuple arg:value arguments arg If Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "call_for_each_replica",
    "source_code": "def call_for_each_replica(strategy, fn, args=None, kwargs=None):\n    if args is None:\n        args = ()\n    if kwargs is None:\n        kwargs = {}\n    if isinstance(fn, def_function.Function):\n        if fn._jit_compile and all([_is_gpu_device(d) for d in strategy.extended.worker_devices]):\n            return _call_for_each_replica(strategy, fn, args, kwargs)\n        if strategy not in _cfer_fn_cache:\n            _cfer_fn_cache[strategy] = weakref.WeakKeyDictionary()\n        wrapped = _cfer_fn_cache[strategy].get(fn)\n        if wrapped is None:\n\n            def wrapped_fn(*args, **kwargs):\n                return call_for_each_replica(strategy, fn.python_function, args, kwargs)\n            wrapped = fn._clone(python_function=wrapped_fn)\n            _cfer_fn_cache[strategy][fn] = wrapped\n        return wrapped(*args, **kwargs)\n    if context.executing_eagerly():\n        logging.log_first_n(logging.WARN, 'Using %s eagerly has significant overhead currently. We will be working on improving this in the future, but for now please wrap `call_for_each_replica` or `experimental_run` or `run` inside a tf.function to get the best performance.' % strategy.__class__.__name__, 5)\n    else:\n        fn = autograph.tf_convert(fn, autograph_ctx.control_status_ctx())\n    return _call_for_each_replica(strategy, fn, args, kwargs)",
    "docstring": "Call on each worker devices(replica). It's highly recommended to wrap the call to this function inside a , otherwise the performance is poor. Args: strategy: . fn: function to call on each worker devices. args: positional arguments to . kwargs: keyword arguments to . Returns: Wrapped returned value of from all replicas.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\mirrored_run.py",
    "ast_data": "FunctionDef name:call_for_each_replica arg:strategy arg:fn arg:args arg:kwargs arguments arg arg arg arg If Compare Assign If Compare Assign If Call If BoolOp Call Call Return return:yes Call If Compare Assign Call Assign Call If Compare FunctionDef name:wrapped_fn arguments arg arg Return return:yes Call Assign Call Assign Return return:yes Call If Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_keep_fields",
    "source_code": "def _keep_fields(base, keep_names, usemask=True, asrecarray=False):\n    newdtype = [(n, base.dtype[n]) for n in keep_names]\n    output = np.empty(base.shape, dtype=newdtype)\n    output = recursive_fill_fields(base, output)\n    return _fix_output(output, usemask=usemask, asrecarray=asrecarray)",
    "docstring": "Return a new array keeping only the fields in , and preserving the order of those fields. Parameters ---------- base : array Input array keep_names : string or sequence String or sequence of strings corresponding to the names of the fields to keep. Order of the names will be preserved. usemask : {False, True}, optional Whether to return a masked array or not. asrecarray : string or sequence, optional Whether to return a recarray or a mrecarray () or a plain ndarray or masked array with flexible dtype. The default is False.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\recfunctions.py",
    "ast_data": "FunctionDef name:_keep_fields arg:base arg:keep_names arg:usemask arg:asrecarray arguments arg arg arg arg Assign Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_remove_extra_dequantize",
    "source_code": "def _remove_extra_dequantize(m: GraphModule):\n    dq_op = torch.ops.quantized_decomposed.dequantize_per_tensor\n    for n in m.graph.nodes:\n        dq_users = [user for user in n.users if user.op == 'call_function' and user.target == dq_op]\n        if len(dq_users) > 1:\n            with m.graph.inserting_after(dq_users[0]):\n                new_node = m.graph.create_node('call_function', dq_op, dq_users[0].args, {})\n            for dq_user in dq_users:\n                dq_user.replace_all_uses_with(new_node)\n                m.graph.erase_node(dq_user)\n    m.recompile()",
    "docstring": "Removes duplicate dequant nodes in the graph, for an operator that has multiple dequant nodes as a user, replace them with a single dequant node that can be shared across all the uses. This should be seen as the \"reverse\" of .",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\qat_utils.py",
    "ast_data": "FunctionDef name:_remove_extra_dequantize arg:m arguments arg Assign For Assign BoolOp Compare Compare If Compare Call With Call Assign Call For Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "task_id",
    "source_code": "@property\ndef task_id(self):\n    return self._task_id",
    "docstring": "Returns the id or index of the corresponding task.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_coordinator.py",
    "ast_data": "FunctionDef name:task_id arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_distribute",
    "source_code": "def _distribute(processing_mode, service, job_name=None, consumer_index=None, num_consumers=None, max_outstanding_requests=None, task_refresh_interval_hint_ms=None, data_transfer_protocol=None, compression='AUTO', cross_trainer_cache=None, target_workers='AUTO') -> Callable[dataset_ops.Dataset, dataset_ops.Dataset]:\n    processing_mode = _get_validated_sharding_policy(processing_mode)\n    _validate_compression(compression)\n\n    def _apply_fn(dataset) -> dataset_ops.Dataset:\n        dataset_id = _register_dataset(service, dataset, compression=compression)\n        return _from_dataset_id(processing_mode, service, dataset_id, dataset.element_spec, job_name=job_name, consumer_index=consumer_index, num_consumers=num_consumers, max_outstanding_requests=max_outstanding_requests, task_refresh_interval_hint_ms=task_refresh_interval_hint_ms, data_transfer_protocol=data_transfer_protocol, cross_trainer_cache=cross_trainer_cache, target_workers=target_workers)\n    return _apply_fn",
    "docstring": "A transformation that moves dataset processing to the tf.data service. This transformation is similar to , but supports additional parameters which we do not yet want to add to the public Python API. Args: processing_mode: A specifying how to shard the dataset among tf.data workers. See for details. For backwards compatibility, may also be set to the strings or , which are respectively equivalent to and . service: A string or a tuple indicating how to connect to the tf.data service. If it's a string, it should be in the format , where `0num_consumersnum_consumersconsumer_indexnum_consumersdistributeelement_sizemax_outstanding_requestsNoneCrossTrainerCache\"AUTO\"\"ANY\"\"LOCAL\"\"AUTO\"\"LOCAL\"target_workers\"AUTO\"Dataset` of the elements produced by the data service.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\data_service_ops.py",
    "ast_data": "FunctionDef name:_distribute arg:processing_mode arg:service arg:job_name arg:consumer_index arg:num_consumers arg:max_outstanding_requests arg:task_refresh_interval_hint_ms arg:data_transfer_protocol arg:compression arg:cross_trainer_cache arg:target_workers arguments arg arg arg arg arg arg arg arg arg arg arg Assign Call Call FunctionDef name:_apply_fn arg:dataset arguments arg Assign Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_unshard",
    "source_code": "@no_type_check\ndef _unshard(state: _FSDPState, handle: FlatParamHandle, unshard_stream: torch.Stream, pre_unshard_stream: torch.Stream) -> None:\n    if not handle:\n        return\n    with state._device_handle.stream(pre_unshard_stream):\n        ran_pre_unshard = handle.pre_unshard()\n    if ran_pre_unshard:\n        unshard_stream.wait_stream(pre_unshard_stream)\n    if state.limit_all_gathers:\n        event = state._free_event_queue.dequeue_if_needed()\n        if event:\n            with torch.profiler.record_function('FullyShardedDataParallel.rate_limiter'):\n                event.synchronize()\n    with state._device_handle.stream(unshard_stream):\n        handle.unshard()\n        handle.post_unshard()",
    "docstring": "Unshards the handles in `summon_full_params` 's data is the padded unsharded flat parameter on the compute device.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_runtime_utils.py",
    "ast_data": "FunctionDef name:_unshard arg:state arg:handle arg:unshard_stream arg:pre_unshard_stream arguments arg arg arg arg If Return return:no With Call Assign Call If Call If Assign Call If With Call Call With Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "quantize",
    "source_code": "def quantize(model, run_fn, run_args, mapping=None, inplace=False):\n    torch._C._log_api_usage_once('quantization_api.quantize.quantize')\n    if mapping is None:\n        mapping = get_default_static_quant_module_mappings()\n    if not inplace:\n        model = copy.deepcopy(model)\n    model.eval()\n    prepare(model, inplace=True)\n    run_fn(model, *run_args)\n    convert(model, mapping, inplace=True)\n    return model",
    "docstring": "Quantize the input float model with post training static quantization. First it will prepare the model for calibration, then it calls which will run the calibration step, after that we will convert the model to a quantized model. Args: model: input float model run_fn: a calibration function for calibrating the prepared model run_args: positional arguments for inplace: carry out model transformations in-place, the original module is mutated mapping: correspondence between original module types and quantized counterparts Return: Quantized model.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantize.py",
    "ast_data": "FunctionDef name:quantize arg:model arg:run_fn arg:run_args arg:mapping arg:inplace arguments arg arg arg arg arg Call If Compare Assign Call If Assign Call Call Call Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "common_type",
    "source_code": "@array_function_dispatch(_common_type_dispatcher)\ndef common_type(*arrays):\n    is_complex = False\n    precision = 0\n    for a in arrays:\n        t = a.dtype.type\n        if iscomplexobj(a):\n            is_complex = True\n        if issubclass(t, _nx.integer):\n            p = 2\n        else:\n            p = array_precision.get(t)\n            if p is None:\n                raise TypeError(\"can't get common type for non-numeric array\")\n        precision = max(precision, p)\n    if is_complex:\n        return array_type[1][precision]\n    else:\n        return array_type[0][precision]",
    "docstring": "Return a scalar type which is common to the input arrays. The return type will always be an inexact (i.e. floating point) scalar type, even if all the arrays are integer arrays. If one of the inputs is an integer array, the minimum precision type that is returned is a 64-bit floating point dtype. All input arrays except int64 and uint64 can be safely cast to the returned dtype without loss of information. Parameters ---------- array1, array2, ... : ndarrays Input arrays. Returns ------- out : data type code Data type code. See Also -------- dtype, mintypecode Examples -------- >>> np.common_type(np.arange(2, dtype=np.float32)) >>> np.common_type(np.arange(2, dtype=np.float32), np.arange(2)) >>> np.common_type(np.arange(4), np.array([45, 6.j]), np.array([45.0]))",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_type_check_impl.py",
    "ast_data": "FunctionDef name:common_type arguments arg Assign Assign For Assign If Call Assign If Call Assign Assign Call If Compare Raise Call Assign Call If Return return:yes Return return:yes Call"
  },
  {
    "library": "cryptography",
    "name": "public_numbers",
    "source_code": "@abc.abstractmethod\ndef public_numbers(self) -> DSAPublicNumbers:\n    pass",
    "docstring": "Returns a DSAPublicNumbers.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\dsa.py",
    "ast_data": "FunctionDef name:public_numbers arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "Variadic",
    "source_code": "class Variadic(metaclass=VariadicSignatureMeta):\n    pass",
    "docstring": "A class whose getitem method can be used to generate a new type representing a specific variadic signature. Examples -------- >>> # xdoctest: +SKIP >>> Variadic[int] # any number of int arguments >>> Variadic[(int, str)] # any number of one of int or str arguments >>> issubclass(int, Variadic[int]) True >>> issubclass(int, Variadic[(int, str)]) True >>> issubclass(str, Variadic[(int, str)]) True >>> issubclass(float, Variadic[(int, str)]) False",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\experimental\\unification\\multipledispatch\\variadic.py",
    "ast_data": "ClassDef name:Variadic"
  },
  {
    "library": "scikit-learn",
    "name": "process_tempita",
    "source_code": "def process_tempita(fromfile, outfile=None):\n    with open(fromfile, 'r', encoding='utf-8') as f:\n        template_content = f.read()\n    template = tempita.Template(template_content)\n    content = template.substitute()\n    with open(outfile, 'w', encoding='utf-8') as f:\n        f.write(content)",
    "docstring": "Process tempita templated file and write out the result. The template file is expected to end in or : E.g. processing generates .",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\_build_utils\\tempita.py",
    "ast_data": "FunctionDef name:process_tempita arg:fromfile arg:outfile arguments arg arg With Call Assign Call Assign Call Assign Call With Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_encode_attribute",
    "source_code": "def _encode_attribute(self, name, type_):\n    for char in ' %{},':\n        if char in name:\n            name = '\"%s\"' % name\n            break\n    if isinstance(type_, (tuple, list)):\n        type_tmp = ['%s' % encode_string(type_k) for type_k in type_]\n        type_ = '{%s}' % ', '.join(type_tmp)\n    return '%s %s %s' % (_TK_ATTRIBUTE, name, type_)",
    "docstring": "(INTERNAL) Encodes an attribute line. The attribute follow the template:: @attribute where `` must be a list of values. :param name: a string. :param type_: a string or a list of string. :return: a string with the encoded attribute declaration.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\externals\\_arff.py",
    "ast_data": "FunctionDef name:_encode_attribute arg:self arg:name arg:type_ arguments arg arg arg For If Compare Assign If Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "A1",
    "source_code": "@property\ndef A1(self):\n    return self.__array__().ravel()",
    "docstring": "Return as a flattened . Equivalent to `selfndarray` Examples -------- >>> x = np.matrix(np.arange(12).reshape((3,4))); x matrix([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]]) >>> x.getA1() array([ 0, 1, 2, ..., 9, 10, 11])",
    "type": "method",
    "file_path": "numpy\\numpy\\matrixlib\\defmatrix.py",
    "ast_data": "FunctionDef name:A1 arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_SigmoidCalibration",
    "source_code": "class _SigmoidCalibration(RegressorMixin, BaseEstimator):\n\n    def fit(self, X, y, sample_weight=None):\n        X = column_or_1d(X)\n        y = column_or_1d(y)\n        X, y = indexable(X, y)\n        self.a_, self.b_ = _sigmoid_calibration(X, y, sample_weight)\n        return self\n\n    def predict(self, T):\n        T = column_or_1d(T)\n        return expit(-(self.a_ * T + self.b_))",
    "docstring": "Sigmoid regression model. Attributes ---------- a_ : float The slope. b_ : float The intercept.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\calibration.py",
    "ast_data": "ClassDef name:_SigmoidCalibration FunctionDef name:fit arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg Assign Call Assign Call Assign Call Assign Call Return return:yes FunctionDef name:predict arg:self arg:T arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X):\n    check_is_fitted(self)\n    X = validate_data(self, X, accept_sparse=('csr', 'csc'), reset=False)\n    K = self._get_kernel(X, self.X_fit_)\n    return np.dot(K, self.dual_coef_)",
    "docstring": "Predict using the kernel ridge model. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Samples. If kernel == \"precomputed\" this is instead a precomputed kernel matrix, shape = [n_samples, n_samples_fitted], where n_samples_fitted is the number of samples used in the fitting for this estimator. Returns ------- C : ndarray of shape (n_samples,) or (n_samples, n_targets) Returns predicted values.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\kernel_ridge.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "def fit(self, X, y=None, **fit_params):\n    self.fit_transform(X, **fit_params)\n    return self",
    "docstring": "Fit the imputer on and return self. Parameters ---------- X : array-like, shape (n_samples, n_features) Input data, where is the number of samples and is the number of features. y : Ignored Not used, present for API consistency by convention. **fit_params : dict Parameters routed to the method of the sub-estimator via the metadata routing API. .. versionadded:: 1.5 Only available if is set. See :ref: for more details. Returns ------- self : object Fitted estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\impute\\_iterative.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg arg Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "apply_transform_mask",
    "source_code": "def apply_transform_mask(self, input: Tensor, params: Dict[str, Tensor], flags: Dict[str, Any], transform: Optional[Tensor]=None) -> Tensor:\n    resample_method: Optional[Resample]\n    if 'resample' in flags:\n        resample_method = flags['resample']\n        flags['resample'] = Resample.get('nearest')\n    output = self.apply_transform(input, params, flags, transform)\n    if resample_method is not None:\n        flags['resample'] = resample_method\n    return output",
    "docstring": "Process masks corresponding to the inputs that are transformed. Note: Convert \"resample\" arguments to \"nearest\" by default.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\_2d\\geometric\\base.py",
    "ast_data": "FunctionDef name:apply_transform_mask arg:self arg:input arg:params arg:flags arg:transform arguments arg arg arg arg arg If Compare Assign Assign Call Assign Call If Compare Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "device_mesh",
    "source_code": "@property\ndef device_mesh(self) -> DeviceMesh:\n    return self._spec.mesh",
    "docstring": "The :class: attribute that associates with this DTensor object. .. note:: `` is a read-only property, it can not be set.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_api.py",
    "ast_data": "FunctionDef name:device_mesh arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "ogr",
    "source_code": "@property\ndef ogr(self):\n    return gdal.OGRGeometry(self._ogr_ptr(), self.srs)",
    "docstring": "Return the OGR Geometry for this Geometry.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:ogr arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "Trigonometric01",
    "source_code": "class Trigonometric01(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([0.0] * self.N, [pi] * self.N))\n        self.global_optimum = [[0.0 for _ in range(self.N)]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        i = atleast_2d(arange(1.0, self.N + 1)).T\n        inner = cos(x) + i * (1 - cos(x) - sin(x))\n        return sum((self.N - sum(inner, axis=1)) ** 2)",
    "docstring": "Trigonometric 1 objective function. This class defines the Trigonometric 1 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Trigonometric01}}(x) = \\sum_{i=1}^{n} \\left [n - \\sum_{j=1}^{n} \\cos(x_j) + i \\left(1 - cos(x_i) - sin(x_i) \\right ) \\right]^2 Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO: equaiton uncertain here. Is it just supposed to be the cos term in the inner sum, or the whole of the second line in Jamil #153.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_T.py",
    "ast_data": "ClassDef name:Trigonometric01 Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Call Assign Call Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "enable_debug_mode",
    "source_code": "@tf_export('data.experimental.enable_debug_mode')\ndef enable_debug_mode():\n    if context.executing_eagerly():\n        toggle_debug_mode(True)\n    else:\n        raise ValueError('`enable_debug_mode() is only supported in eager mode.')",
    "docstring": "Enables debug mode for tf.data. Example usage with pdb module: The effect of debug mode is two-fold: 1) Any transformations that would introduce asynchrony, parallelism, or non-determinism to the input pipeline execution will be forced to execute synchronously, sequentially, and deterministically. 2) Any user-defined functions passed into tf.data transformations such as will be wrapped in so that their body is executed \"eagerly\" as a Python function as opposed to a traced TensorFlow graph, which is the default behavior. Note that even when debug mode is enabled, the user-defined function is still traced to infer the shape and type of its outputs; as a consequence, any statements or breakpoints will be triggered once during the tracing before the actual execution of the input pipeline. NOTE: As the debug mode setting affects the construction of the tf.data input pipeline, it should be enabled before any tf.data definitions. Raises: ValueError: When invoked from graph mode.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\debug_mode.py",
    "ast_data": "FunctionDef name:enable_debug_mode arguments If Call Call Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_default_load_endianness",
    "source_code": "def get_default_load_endianness() -> Optional[LoadEndianness]:\n    from torch.utils.serialization import config\n    return config.load.endianness",
    "docstring": "Get fallback byte order for loading files If byteorder mark is not present in saved checkpoint, this byte order is used as fallback. By default, it's \"native\" byte order. Returns: default_load_endian: Optional[LoadEndianness]",
    "type": "function",
    "file_path": "pytorch\\torch\\serialization.py",
    "ast_data": "FunctionDef name:get_default_load_endianness arguments Return return:yes"
  },
  {
    "library": "kornia",
    "name": "unpad",
    "source_code": "def unpad(self, padding_size: Tensor) -> 'Keypoints':\n    if not (len(padding_size.shape) == 2 and padding_size.size(1) == 4):\n        raise RuntimeError(f'Expected padding_size as (B, 4). Got {padding_size.shape}.')\n    self._data[..., 0] -= padding_size[..., :1]\n    self._data[..., 1] -= padding_size[..., 2:3]\n    return self",
    "docstring": "Pad a bounding keypoints. Args: padding_size: (B, 4)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\keypoints.py",
    "ast_data": "FunctionDef name:unpad arg:self arg:padding_size arguments arg arg If BoolOp Compare Call Compare Call Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_write_keras_model_summary",
    "source_code": "def _write_keras_model_summary(self):\n    with self._train_writer.as_default():\n        with summary_ops_v2.record_if(True):\n            summary_writable = self.model._is_graph_network or self.model.__class__.__name__ == 'Sequential'\n            if summary_writable:\n                keras_model_summary('keras', self.model, step=0)",
    "docstring": "Writes Keras graph network summary to TensorBoard.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:_write_keras_model_summary arg:self arguments arg With Call With Call Assign BoolOp Compare If Call"
  },
  {
    "library": "seaborn",
    "name": "choose_cubehelix_palette",
    "source_code": "def choose_cubehelix_palette(as_cmap=False):\n    pal = []\n    if as_cmap:\n        cmap = _init_mutable_colormap()\n\n    @interact\n    def choose_cubehelix(n_colors=IntSlider(min=2, max=16, value=9), start=FloatSlider(min=0, max=3, value=0), rot=FloatSlider(min=-1, max=1, value=0.4), gamma=FloatSlider(min=0, max=5, value=1), hue=FloatSlider(min=0, max=1, value=0.8), light=FloatSlider(min=0, max=1, value=0.85), dark=FloatSlider(min=0, max=1, value=0.15), reverse=False):\n        if as_cmap:\n            colors = cubehelix_palette(256, start, rot, gamma, hue, light, dark, reverse)\n            _update_lut(cmap, np.c_[colors, np.ones(256)])\n            _show_cmap(cmap)\n        else:\n            pal[:] = cubehelix_palette(n_colors, start, rot, gamma, hue, light, dark, reverse)\n            palplot(pal)\n    if as_cmap:\n        return cmap\n    return pal",
    "docstring": "Launch an interactive widget to create a sequential cubehelix palette. This corresponds with the :func: function. This kind of palette is good for data that range between relatively uninteresting low values and interesting high values. The cubehelix system allows the palette to have more hue variance across the range, which can be helpful for distinguishing a wider range of values. Requires IPython 2+ and must be used in the notebook. Parameters ---------- as_cmap : bool If True, the return value is a matplotlib colormap rather than a list of discrete colors. Returns ------- pal or cmap : list of colors or matplotlib colormap Object that can be passed to plotting functions. See Also -------- cubehelix_palette : Create a sequential palette or colormap using the cubehelix system.",
    "type": "function",
    "file_path": "seaborn\\seaborn\\widgets.py",
    "ast_data": "FunctionDef name:choose_cubehelix_palette arg:as_cmap arguments arg Assign If Assign Call FunctionDef name:choose_cubehelix arg:n_colors arg:start arg:rot arg:gamma arg:hue arg:light arg:dark arg:reverse arguments arg arg arg arg arg arg arg arg Call Call Call Call Call Call Call If Assign Call Call Call Call Assign Call Call If Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_decorate_run_options_for_debug",
    "source_code": "def _decorate_run_options_for_debug(self, run_options, debug_urls, debug_ops='DebugIdentity', node_name_regex_allowlist=None, op_type_regex_allowlist=None, tensor_dtype_regex_allowlist=None, tolerate_debug_op_creation_failures=False):\n    run_options.output_partition_graphs = True\n    debug_utils.watch_graph(run_options, self._sess.graph, debug_urls=debug_urls, debug_ops=debug_ops, node_name_regex_allowlist=node_name_regex_allowlist, op_type_regex_allowlist=op_type_regex_allowlist, tensor_dtype_regex_allowlist=tensor_dtype_regex_allowlist, tolerate_debug_op_creation_failures=tolerate_debug_op_creation_failures, reset_disk_byte_usage=self._run_call_count == 1 or self._is_disk_usage_reset_each_run())",
    "docstring": "Modify a RunOptions object for debug tensor watching. Specifies request for outputting partition graphs. Adds debug_tensor_watch_opts with proper debug URLs. Args: run_options: (RunOptions) the modified RunOptions object. debug_urls: (list of str) debug URLs to be entered in run_options. debug_tensor_watch_opts. debug_ops: (str or list of str) debug op(s) to be used by the debugger. node_name_regex_allowlist: Regular-expression allowlist for node name. op_type_regex_allowlist: Regular-expression allowlist for op type. tensor_dtype_regex_allowlist: Regular-expression allowlist for tensor dtype. tolerate_debug_op_creation_failures: Whether debug op creation failures are to be tolerated.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\framework.py",
    "ast_data": "FunctionDef name:_decorate_run_options_for_debug arg:self arg:run_options arg:debug_urls arg:debug_ops arg:node_name_regex_allowlist arg:op_type_regex_allowlist arg:tensor_dtype_regex_allowlist arg:tolerate_debug_op_creation_failures arguments arg arg arg arg arg arg arg arg Assign Call BoolOp Compare Call"
  },
  {
    "library": "scipy",
    "name": "_briggs_helper_function",
    "source_code": "def _briggs_helper_function(a, k):\n    if k < 0 or int(k) != k:\n        raise ValueError('expected a nonnegative integer k')\n    if k == 0:\n        return a - 1\n    elif k == 1:\n        return np.sqrt(a) - 1\n    else:\n        k_hat = k\n        if np.angle(a) >= np.pi / 2:\n            a = np.sqrt(a)\n            k_hat = k - 1\n        z0 = a - 1\n        a = np.sqrt(a)\n        r = 1 + a\n        for j in range(1, k_hat):\n            a = np.sqrt(a)\n            r = r * (1 + a)\n        r = z0 / r\n        return r",
    "docstring": "Computes r = a^(1 / (2^k)) - 1. This is algorithm (2) of [1]_. The purpose is to avoid a danger of subtractive cancellation. For more computational efficiency it should probably be cythonized. Parameters ---------- a : complex A complex number. k : integer A nonnegative integer. Returns ------- r : complex The value r = a^(1 / (2^k)) - 1 computed with less cancellation. Notes ----- The algorithm as formulated in the reference does not handle k=0 or k=1 correctly, so these are special-cased in this implementation. This function is intended to not allow to belong to the closed negative real axis, but this constraint is relaxed. References ---------- .. [1] Awad H. Al-Mohy (2012) \"A more accurate Briggs method for the logarithm\", Numerical Algorithms, 59 : 393--402.",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_matfuncs_inv_ssq.py",
    "ast_data": "FunctionDef name:_briggs_helper_function arg:a arg:k arguments arg arg If BoolOp Compare Compare Call Raise Call If Compare Return return:yes If Compare Return return:yes Call Assign If Compare Call Assign Call Assign Assign Assign Call Assign For Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "batchable_to_tensor_list",
    "source_code": "def batchable_to_tensor_list(spec, value, minimum_rank=0):\n    if isinstance(spec, internal.TensorSpec):\n        return [value]\n    elif hasattr(spec, '__batch_encoder__'):\n        encoded_value = spec.__batch_encoder__.encode(spec, value, minimum_rank)\n        encoded_specs = spec.__batch_encoder__.encoding_specs(spec)\n        encoded_flats = nest.map_structure(functools.partial(batchable_to_tensor_list, minimum_rank=minimum_rank), encoded_specs, encoded_value)\n        return nest.flatten(encoded_flats)\n    else:\n        return spec._to_tensor_list(value)",
    "docstring": "Returns a list of tensors encoding , whose type is .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py",
    "ast_data": "FunctionDef name:batchable_to_tensor_list arg:spec arg:value arg:minimum_rank arguments arg arg arg If Call Return return:yes If Call Assign Call Assign Call Assign Call Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "aggregate_and_return_name_for_input",
    "source_code": "def aggregate_and_return_name_for_input(self, out_graphdef):\n    del out_graphdef\n    raise RuntimeError('Unimplemented abstract method.')",
    "docstring": "This adds the node(s) to out_graphdef and returns the input node name. Args: out_graphdef: A graphdef that is ready to have this input added. Returns: The output that the stub should use as an input for this operand. Raises: RuntimeError: if the method is not implemented.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\op_hint.py",
    "ast_data": "FunctionDef name:aggregate_and_return_name_for_input arg:self arg:out_graphdef arguments arg arg Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "default_dtypes",
    "source_code": "def default_dtypes(self, /, *, device: _Device | None=None) -> DefaultDTypes:\n    if device not in ['cpu', _DASK_DEVICE, None]:\n        raise ValueError(f'Device not understood. Only \"cpu\" or _DASK_DEVICE is allowed, but received: {device!r}')\n    return {'real floating': dtype(float64), 'complex floating': dtype(complex128), 'integral': dtype(intp), 'indexing': dtype(intp)}",
    "docstring": "The default data types used for new Dask arrays. For Dask, this always returns the following dictionary: - **\"real floating\"**: `` Parameters ---------- device : str, optional The device to get the default data types for. Returns ------- dtypes : dict A dictionary describing the default data types used for new Dask arrays. See Also -------- __array_namespace_info__.capabilities, __array_namespace_info__.default_device, __array_namespace_info__.dtypes, __array_namespace_info__.devices Examples -------- >>> info = xp.__array_namespace_info__() >>> info.default_dtypes() {'real floating': dask.float64, 'complex floating': dask.complex128, 'integral': dask.int64, 'indexing': dask.int64}",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\dask\\array\\_info.py",
    "ast_data": "FunctionDef name:default_dtypes arguments arg arg If Compare Raise Call Return return:yes Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "set_checkpoint_early_stop",
    "source_code": "@contextlib.contextmanager\ndef set_checkpoint_early_stop(enable: bool):\n    global _enable_checkpoint_early_stop\n    try:\n        prev = _enable_checkpoint_early_stop\n        _enable_checkpoint_early_stop = enable\n        yield\n    finally:\n        _enable_checkpoint_early_stop = prev",
    "docstring": "Context manager that sets whether checkpoint should stop recomputation early. By default, non-reentrant checkpoint stops recomputation as soon as it has computed all needed Tensors. This context manager can be used to disable that feature if it is problematic for your specific application. This context manager only needs to be active when forward is run. It does not need to be active during backward. Example:: >>> # xdoctest: +SKIP(failing) >>> message = \"saved tensors default hooks are disabled\" >>> with set_checkpoint_early_stop(False): ... # Any checkpoint under this context manager will respect this ... # context manager, even if its backward is performed outside. ... out = checkpoint(fn, inputs) ... >>> out.backward()",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\checkpoint.py",
    "ast_data": "FunctionDef name:set_checkpoint_early_stop arg:enable arguments arg Try Assign Assign Assign"
  },
  {
    "library": "django",
    "name": "cleaned_data",
    "source_code": "@property\ndef cleaned_data(self):\n    if not self.is_valid():\n        raise AttributeError(\"'%s' object has no attribute 'cleaned_data'\" % self.__class__.__name__)\n    return [form.cleaned_data for form in self.forms]",
    "docstring": "Return a list of form.cleaned_data dicts for every form in self.forms.",
    "type": "method",
    "file_path": "django\\django\\forms\\formsets.py",
    "ast_data": "FunctionDef name:cleaned_data arg:self arguments arg If Call Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_evaluate",
    "source_code": "def _evaluate(tensor):\n    if context.executing_eagerly():\n        return tensor.numpy()\n    return ops.get_default_session().run(tensor)",
    "docstring": "Returns the numpy value of a tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint_management.py",
    "ast_data": "FunctionDef name:_evaluate arg:tensor arguments arg If Call Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "cherrypy",
    "name": "json_out",
    "source_code": "def json_out(content_type='application/json', debug=False, handler=json_handler):\n    request = cherrypy.serving.request\n    if request.handler is None:\n        return\n    if debug:\n        cherrypy.log('Replacing %s with JSON handler' % request.handler, 'TOOLS.JSON_OUT')\n    request._json_inner_handler = request.handler\n    request.handler = handler\n    if content_type is not None:\n        if debug:\n            cherrypy.log('Setting Content-Type to %s' % content_type, 'TOOLS.JSON_OUT')\n        cherrypy.serving.response.headers['Content-Type'] = content_type",
    "docstring": "Wrap request.handler to serialize its output to JSON. Sets Content-Type. If the given content_type is None, the Content-Type response header is not set. Provide your own handler to use a custom encoder. For example cherrypy.config['tools.json_out.handler'] = , or @json_out(handler=function).",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\jsontools.py",
    "ast_data": "FunctionDef name:json_out arg:content_type arg:debug arg:handler arguments arg arg arg Assign If Compare Return return:no If Call Assign Assign If Compare If Call Assign"
  },
  {
    "library": "scikit-learn",
    "name": "_sparse_multidot_diag",
    "source_code": "def _sparse_multidot_diag(self, X, A, X_mean, sqrt_sw):\n    intercept_col = scale = sqrt_sw\n    batch_size = X.shape[1]\n    diag = np.empty(X.shape[0], dtype=X.dtype)\n    for start in range(0, X.shape[0], batch_size):\n        batch = slice(start, min(X.shape[0], start + batch_size), 1)\n        X_batch = np.empty((X[batch].shape[0], X.shape[1] + self.fit_intercept), dtype=X.dtype)\n        if self.fit_intercept:\n            X_batch[:, :-1] = X[batch].toarray() - X_mean * scale[batch][:, None]\n            X_batch[:, -1] = intercept_col[batch]\n        else:\n            X_batch = X[batch].toarray()\n        diag[batch] = (X_batch.dot(A) * X_batch).sum(axis=1)\n    return diag",
    "docstring": "Compute the diagonal of (X - X_mean).dot(A).dot((X - X_mean).T) without explicitly centering X nor computing X.dot(A) when X is sparse. Parameters ---------- X : sparse matrix of shape (n_samples, n_features) A : ndarray of shape (n_features, n_features) X_mean : ndarray of shape (n_features,) sqrt_sw : ndarray of shape (n_features,) square roots of sample weights Returns ------- diag : np.ndarray, shape (n_samples,) The computed diagonal.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_ridge.py",
    "ast_data": "FunctionDef name:_sparse_multidot_diag arg:self arg:X arg:A arg:X_mean arg:sqrt_sw arguments arg arg arg arg arg Assign Assign Assign Call For Call Assign Call Call Assign Call If Assign Call Assign Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "export_chrome_trace",
    "source_code": "def export_chrome_trace(self, path):\n    import os\n    device_name = 'cuda' if not self._use_device else self._use_device\n    with open(path, 'w') as f:\n        next_id = 0\n        f.write('[')\n        for evt in self:\n            if evt.trace_name is None:\n                continue\n            f.write('{{\"name\": \"{}\", \"ph\": \"X\", \"ts\": {}, \"dur\": {}, \"tid\": {}, \"pid\": \"CPU functions\", \"args\": {{}}}}, '.format(evt.trace_name, evt.time_range.start, evt.time_range.elapsed_us(), evt.thread if not evt.is_remote else f'\" node_id:{evt.node_id}, thread_id:{evt.thread} \"'))\n            for _ in evt.kernels:\n                f.write(f'{{\"name\": \"{evt.trace_name}\", \"ph\": \"s\", \"ts\": {evt.time_range.start}, \"tid\": {evt.thread}, \"pid\": \"CPU functions\", \"id\": {next_id}, \"cat\": \"cpu_to_{device_name}\", \"args\": {{}}}}, ')\n                next_id += 1\n        if len(self) > 0:\n            f.seek(f.tell() - 2, os.SEEK_SET)\n            f.truncate()\n        f.write(']')",
    "docstring": "Export an EventList as a Chrome tracing tools file. The checkpoint can be later loaded and inspected under `` URL. Args: path (str): Path where the trace will be written.",
    "type": "method",
    "file_path": "pytorch\\torch\\autograd\\profiler_util.py",
    "ast_data": "FunctionDef name:export_chrome_trace arg:self arg:path arguments arg arg Assign With Call Assign Call For If Compare Call Call Call For Call If Compare Call Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "rogerstanimoto",
    "source_code": "def rogerstanimoto(u, v, w=None):\n    u = _validate_vector(u)\n    v = _validate_vector(v)\n    if w is not None:\n        w = _validate_weights(w)\n    nff, nft, ntf, ntt = _nbool_correspond_all(u, v, w=w)\n    return float(2.0 * (ntf + nft)) / float(ntt + nff + 2.0 * (ntf + nft))",
    "docstring": "Compute the Rogers-Tanimoto dissimilarity between two boolean 1-D arrays. The Rogers-Tanimoto dissimilarity between two boolean 1-D arrays and , is defined as .. math:: \\frac{R} {c_{TT} + c_{FF} + R} where :math: is the number of occurrences of :math: and :math: for :math:`k >> from scipy.spatial import distance >>> distance.rogerstanimoto([1, 0, 0], [0, 1, 0]) 0.8 >>> distance.rogerstanimoto([1, 0, 0], [1, 1, 0]) 0.5 >>> distance.rogerstanimoto([1, 0, 0], [2, 0, 0]) -1.0",
    "type": "function",
    "file_path": "scipy\\scipy\\spatial\\distance.py",
    "ast_data": "FunctionDef name:rogerstanimoto arg:u arg:v arg:w arguments arg arg arg Assign Call Assign Call If Compare Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pygame",
    "name": "set_italic",
    "source_code": "def set_italic(self, value):\n    self.oblique = bool(value)",
    "docstring": "set_italic(bool) -> None enable fake rendering of italic text",
    "type": "method",
    "file_path": "pygame\\src_py\\ftfont.py",
    "ast_data": "FunctionDef name:set_italic arg:self arg:value arguments arg arg Assign Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict_proba",
    "source_code": "@available_if(_check_predict_proba)\ndef predict_proba(self, X):\n    check_is_fitted(self)\n    results = [estimator.predict_proba(X) for estimator in self.estimators_]\n    return results",
    "docstring": "Return prediction probabilities for each class of each output. This method will raise a `classes_`) for that particular output.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\multioutput.py",
    "ast_data": "FunctionDef name:predict_proba arg:self arg:X arguments arg arg Call Assign Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "set_separator",
    "source_code": "def set_separator(self) -> None:\n    self.separator = len(self.lines)",
    "docstring": "Sets the separator below the current line.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\writers\\text.py",
    "ast_data": "FunctionDef name:set_separator arg:self arguments arg Assign Call"
  },
  {
    "library": "kornia",
    "name": "match_initial",
    "source_code": "def match_initial(self, x: Tensor) -> Tuple[Tensor, bool]:\n    input_dict: Dict[str, Tensor] = {'image0': self.target, 'image1': x}\n    for k, v in self.target_initial_representation.items():\n        input_dict[f'{k}0'] = v\n    match_dict: Dict[str, Tensor] = self.initial_matcher(input_dict)\n    keypoints0 = match_dict['keypoints0'][match_dict['batch_indexes'] == 0]\n    keypoints1 = match_dict['keypoints1'][match_dict['batch_indexes'] == 0]\n    self.keypoints0_num = len(keypoints0)\n    self.keypoints1_num = len(keypoints1)\n    if self.keypoints0_num < self.minimum_inliers_num:\n        return self.no_match()\n    H, inliers = self.ransac(keypoints0, keypoints1)\n    self.inliers_num = inliers.sum().item()\n    if self.inliers_num < self.minimum_inliers_num:\n        return self.no_match()\n    self.previous_homography = H.clone()\n    return (H, True)",
    "docstring": "Match the frame with initial_matcher and verified with ransac.",
    "type": "method",
    "file_path": "kornia\\kornia\\tracking\\planar_tracker.py",
    "ast_data": "FunctionDef name:match_initial arg:self arg:x arguments arg arg For Call Assign Call Assign Compare Assign Compare Assign Call Assign Call If Compare Return return:yes Call Assign Call Assign Call Call If Compare Return return:yes Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, namespace: str, op_name: str, promotion_kind: _prims_common.REDUCTION_OUTPUT_TYPE_KIND):\n    super().__init__(namespace, op_name)\n    self.promotion_kind = promotion_kind",
    "docstring": "Constructs a TypePromotionRule for reduction operators. Args: namespace: Namespace of the op. E.g. 'aten' in 'torch.ops.aten.sum'. op_name: Name of the op. E.g. 'sum' in 'torch.ops.aten.sum'. promotion_kind: Type promotion kind. Refer to [_prims_common.reduction_dtypes](( for detail. # noqa: B950",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\type_promotion.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:namespace arg:op_name arg:promotion_kind arguments arg arg arg arg Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "non_tensor_outputs",
    "source_code": "@property\ndef non_tensor_outputs(self):\n    return self._non_tensor_outputs",
    "docstring": "A dictionary consisting of any non tensor outputs to be captured.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py",
    "ast_data": "FunctionDef name:non_tensor_outputs arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "ask_not_null_addition",
    "source_code": "def ask_not_null_addition(self, field_name, model_name):\n    return None",
    "docstring": "Adding a NOT NULL field to a model.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\questioner.py",
    "ast_data": "FunctionDef name:ask_not_null_addition arg:self arg:field_name arg:model_name arguments arg arg arg Return return:no"
  },
  {
    "library": "scikit-learn",
    "name": "_iter_test_indices",
    "source_code": "def _iter_test_indices(self, X=None, y=None, groups=None):\n    raise NotImplementedError",
    "docstring": "Generates integer indices corresponding to test sets.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py",
    "ast_data": "FunctionDef name:_iter_test_indices arg:self arg:X arg:y arg:groups arguments arg arg arg arg Raise"
  },
  {
    "library": "scikit-learn",
    "name": "get_commit_message",
    "source_code": "def get_commit_message():\n    build_source_version_message = os.environ['BUILD_SOURCEVERSIONMESSAGE']\n    if os.environ['BUILD_REASON'] == 'PullRequest':\n        commit_id = build_source_version_message.split()[1]\n        git_cmd = ['git', 'log', commit_id, '-1', '--pretty=%B']\n        commit_message = subprocess.run(git_cmd, capture_output=True, text=True).stdout.strip()\n    else:\n        commit_message = build_source_version_message\n    commit_message = commit_message.replace('##vso', '..vso')\n    return commit_message",
    "docstring": "Retrieve the commit message.",
    "type": "function",
    "file_path": "scikit-learn\\build_tools\\azure\\get_commit_message.py",
    "ast_data": "FunctionDef name:get_commit_message arguments Assign If Compare Assign Call Assign Assign Call Call Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, tpu_cluster_resolver=None, device_assignment=None):\n    logging.warning('`tf.distribute.experimental.TPUStrategy` is deprecated, please use the non-experimental symbol `tf.distribute.TPUStrategy` instead.')\n    super().__init__(TPUExtended(self, tpu_cluster_resolver, device_assignment=device_assignment))\n    distribute_lib.distribution_strategy_gauge.get_cell('V2').set('TPUStrategy')\n    distribute_lib.distribution_strategy_replica_gauge.get_cell('num_workers').set(self.extended.num_hosts)\n    distribute_lib.distribution_strategy_replica_gauge.get_cell('num_replicas_per_worker').set(self.extended.num_replicas_per_host)\n    self._enable_packed_variable_in_eager_mode = True",
    "docstring": "Synchronous training in TPU donuts or Pods. Args: tpu_cluster_resolver: A tf.distribute.cluster_resolver.TPUClusterResolver, which provides information about the TPU cluster. device_assignment: Optional to specify the placement of replicas on the TPU cluster.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_strategy.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:tpu_cluster_resolver arg:device_assignment arguments arg arg arg Call Call Call Call Call Call Call Call Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_handle_failure_and_recovery",
    "source_code": "def _handle_failure_and_recovery(self, e, on_failure_fn, on_transient_failure_fn, on_recovery_fn, worker_device_name):\n    if on_failure_fn:\n        on_failure_fn(e)\n    with self._cluster_update_lock:\n        self._cluster_due_for_update_or_finish.set()\n        self._worker_up_cond.wait(_WORKER_MAXIMUM_RECOVERY_SEC)\n        if self._error_from_recovery:\n            try:\n                raise self._error_from_recovery\n            finally:\n                self._error_from_recovery = None\n        logging.info('Worker %s has been recovered.', worker_device_name)\n    if on_recovery_fn:\n        logging.info('Worker %s calling on_recovery_fn', worker_device_name)\n        with self.wait_on_failure(on_recovery_fn=on_recovery_fn, on_transient_failure_fn=on_transient_failure_fn, worker_device_name=worker_device_name):\n            on_recovery_fn()",
    "docstring": "Call failure fn, wait for cluster to recover, then call recovery fn. Args: e: the Exception thrown during closure execution. on_failure_fn: an optional function to run if preemption happens. on_transient_failure_fn: an optional function to run if transient failure happens. on_recovery_fn: an optional function to run when a worker is recovered from preemption. worker_device_name: the device name of the worker instance that is passing through the failure.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py",
    "ast_data": "FunctionDef name:_handle_failure_and_recovery arg:self arg:e arg:on_failure_fn arg:on_transient_failure_fn arg:on_recovery_fn arg:worker_device_name arguments arg arg arg arg arg arg If Call With Call Call If Try Raise Assign Call If Call With Call Call"
  },
  {
    "library": "tensorflow",
    "name": "StandardInputStep",
    "source_code": "class StandardInputStep(Step):\n\n    def __init__(self, dataset_fn, distribution):\n        super(StandardInputStep, self).__init__(distribution)\n        self._iterator = distribution.make_input_fn_iterator(lambda _: dataset_fn())\n\n    def initialize(self):\n        return self._iterator.initializer",
    "docstring": "Step with a standard implementation of input handling. Args: dataset_fn: a function that returns a tf.data Dataset that produces the input for the model.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\step_fn.py",
    "ast_data": "ClassDef name:StandardInputStep FunctionDef name:__init__ arg:self arg:dataset_fn arg:distribution arguments arg arg arg Call Call Assign Call arguments arg Call FunctionDef name:initialize arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "score",
    "source_code": "def score(self, X, y, sample_weight=None):\n    if X is None:\n        X = np.zeros(shape=(len(y), 1))\n    return super().score(X, y, sample_weight)",
    "docstring": "Return the coefficient of determination R^2 of the prediction. The coefficient R^2 is defined as , where is the residual sum of squares and is the total sum of squares . The best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of y, disregarding the input features, would get a R^2 score of 0.0. Parameters ---------- X : None or array-like of shape (n_samples, n_features) Test samples. Passing None as test samples gives the same result as passing real test samples, since operates independently of the sampled observations. y : array-like of shape (n_samples,) or (n_samples, n_outputs) True values for X. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- score : float R^2 of w.r.t. y.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\dummy.py",
    "ast_data": "FunctionDef name:score arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg If Compare Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "line_search_armijo",
    "source_code": "def line_search_armijo(f, xk, pk, gfk, old_fval, args=(), c1=0.0001, alpha0=1):\n    xk = np.atleast_1d(xk)\n    fc = [0]\n\n    def phi(alpha1):\n        fc[0] += 1\n        return f(xk + alpha1 * pk, *args)\n    if old_fval is None:\n        phi0 = phi(0.0)\n    else:\n        phi0 = old_fval\n    derphi0 = np.dot(gfk, pk)\n    alpha, phi1 = scalar_search_armijo(phi, phi0, derphi0, c1=c1, alpha0=alpha0)\n    return (alpha, fc[0], phi1)",
    "docstring": "Minimize over alpha, the function `fxkfxkalpha` at start of the optimization. Returns ------- alpha f_count f_val_at_alpha Notes ----- Uses the interpolation algorithm (Armijo backtracking) as suggested by Wright and Nocedal in 'Numerical Optimization', 1999, pp. 56-57",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_linesearch.py",
    "ast_data": "FunctionDef name:line_search_armijo arg:f arg:xk arg:pk arg:gfk arg:old_fval arg:args arg:c1 arg:alpha0 arguments arg arg arg arg arg arg arg arg Assign Call Assign FunctionDef name:phi arg:alpha1 arguments arg Return return:yes Call If Compare Assign Call Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_func_graph_id_from_func_name",
    "source_code": "def _func_graph_id_from_func_name(self, op_type):\n    op_type = compat.as_bytes(op_type)\n    if is_op_type_function(op_type):\n        if op_type in self._op_type_to_context_id:\n            return self._op_type_to_context_id[op_type]\n        with self._context_lock:\n            for function in self._function_to_graph_id:\n                if function.name == op_type:\n                    graph_id = self._function_to_graph_id[function]\n                    self._op_type_to_context_id[op_type] = graph_id\n                    return graph_id\n        return None\n    else:\n        return None",
    "docstring": "Attempt to get the ID of a FuncGraph based on an op type name. Also caches the ID for faster access later. Args: op_type: Op type string, which may be the name of a function. Returns: If the op_type name does not fit the pattern of a function name (e.g., one that starts with \"__inference_\"), is returned immediately. Else, if the FuncGraph is found, ID of the underlying FuncGraph is returned as a string. Else, is returned.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\dumping_callback.py",
    "ast_data": "FunctionDef name:_func_graph_id_from_func_name arg:self arg:op_type arguments arg arg Assign Call If Call If Compare Return return:yes With For If Compare Assign Assign Return return:yes Return return:no Return return:no"
  },
  {
    "library": "matplotlib",
    "name": "minorformatter",
    "source_code": "@property\ndef minorformatter(self):\n    return self.long_axis.get_minor_formatter()",
    "docstring": "Minor tick for the colorbar.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colorbar.py",
    "ast_data": "FunctionDef name:minorformatter arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "aggregate_tensors_or_indexed_slices",
    "source_code": "def aggregate_tensors_or_indexed_slices(values, accumulation_fn=math_ops.add_n):\n    if any((isinstance(v, indexed_slices.IndexedSlices) for v in values)):\n        return backprop_util.AggregateIndexedSlicesGradients(values)\n    else:\n        return accumulation_fn(values)",
    "docstring": "Aggregate tensors using and IndexedSlices via concat.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_utils.py",
    "ast_data": "FunctionDef name:aggregate_tensors_or_indexed_slices arg:values arg:accumulation_fn arguments arg arg If Call Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "zeros",
    "source_code": "def zeros(sharding_spec: ShardingSpec, *size, dtype=None, layout=torch.strided, requires_grad=False, pin_memory=False, memory_format=torch.contiguous_format, process_group=None, init_rrefs=False) -> ShardedTensor:\n    return full(sharding_spec, size, fill_value=0, dtype=dtype, layout=layout, requires_grad=requires_grad, pin_memory=pin_memory, memory_format=memory_format, process_group=process_group, init_rrefs=init_rrefs)",
    "docstring": "Returns a :class: filled with the scalar value 0. Needs to be called on all ranks in an SPMD fashion. Args: sharding_spec (:class:): The specification describing how to shard the Tensor. size (int...): a sequence of integers defining the shape of the output tensor. Can be a variable number of arguments or a collection like a list or tuple. Keyword args: dtype (:class:, optional): the desired data type of returned tensor. Default: if `torch.set_default_dtypetorch.layouttorch.distributed.rpc.RRefShardedTensor` object on each rank",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\__init__.py",
    "ast_data": "FunctionDef name:zeros arg:sharding_spec arguments arg arg arg arg arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "tick",
    "source_code": "def tick(self, locator: Locator | None=None) -> Nominal:\n    new = copy(self)\n    new._tick_params = {'locator': locator}\n    return new",
    "docstring": "Configure the selection of ticks for the scale's axis or legend. .. note:: This API is under construction and will be enhanced over time. At the moment, it is probably not very useful. Parameters ---------- locator : :class: subclass Pre-configured matplotlib locator; other parameters will not be used. Returns ------- Copy of self with new tick configuration.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\scales.py",
    "ast_data": "FunctionDef name:tick arg:self arg:locator arguments arg arg Assign Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "as_bytes",
    "source_code": "def as_bytes(self, unixfrom=False, linesep='\\n'):\n    fp = BytesIO()\n    g = generator.BytesGenerator(fp, mangle_from_=False)\n    g.flatten(self, unixfrom=unixfrom, linesep=linesep)\n    return fp.getvalue()",
    "docstring": "Return the entire formatted message as bytes. Optional `unixfrom' when True, means include the Unix From_ envelope header. This overrides the default as_bytes() implementation to not mangle lines that begin with 'From '. See bug #13433 for details.",
    "type": "method",
    "file_path": "django\\django\\core\\mail\\message.py",
    "ast_data": "FunctionDef name:as_bytes arg:self arg:unixfrom arg:linesep arguments arg arg arg Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "shell",
    "source_code": "@property\ndef shell(self):\n    return self[0]",
    "docstring": "Return the shell of this Polygon.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:shell arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "backing_device",
    "source_code": "@property\ndef backing_device(self):\n    raise NotImplementedError()",
    "docstring": "Returns the name of the device holding this tensor's memory. is usually the same as , which returns the device on which the kernel of the operation that produced this tensor ran. However, some operations can produce tensors on a different device (e.g., an operation that executes on the GPU but produces output tensors in host memory).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:backing_device arg:self arguments arg Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "set_animated",
    "source_code": "def set_animated(self, value):\n    for artist in self.artists:\n        artist.set_animated(value)",
    "docstring": "Set the animated state of the handles artist.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:set_animated arg:self arg:value arguments arg arg For Call"
  },
  {
    "library": "pytorch",
    "name": "enable_wrap",
    "source_code": "@contextlib.contextmanager\ndef enable_wrap(*, wrapper_cls: Any, **wrapper_kwargs: Any) -> Generator[None, None, None]:\n    kwargs = {'wrapper_cls': wrapper_cls, **wrapper_kwargs}\n    with _ConfigAutoWrap(**kwargs):\n        yield",
    "docstring": "Context manager to wrap modules using a wrapper. Useful for when you'd like to apply the same configuration arguments to all child modules that you wrap. A particularly important use case is wrapping large layers so that they get sharded (in-place) during initialization, to avoid running out of system memory. Large layers can indicate that they should be sharded via the `wrapwrapFullyShardedDataParallel` instances inside the context",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\wrap.py",
    "ast_data": "FunctionDef name:enable_wrap arguments arg arg Assign With Call"
  },
  {
    "library": "tensorflow",
    "name": "stop",
    "source_code": "def stop(self):\n    logging.info('Stopping cluster, starting with failure handler')\n    self.failure_handler.stop()\n    logging.info('Stopping workers')\n    for worker in self.workers:\n        worker.stop()\n    logging.info('Stopping queue')\n    self.closure_queue.stop()\n    logging.info('Start cancelling remote resource-building functions')\n    self.resource_cancellation_mgr.start_cancel()",
    "docstring": "Stop worker, worker preemption threads, and the closure queue.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py",
    "ast_data": "FunctionDef name:stop arg:self arguments arg Call Call Call For Call Call Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "IdentityLink",
    "source_code": "class IdentityLink(BaseLink):\n\n    def link(self, y_pred, out=None):\n        if out is not None:\n            np.copyto(out, y_pred)\n            return out\n        else:\n            return y_pred\n    inverse = link",
    "docstring": "The identity link function g(x)=x.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\_loss\\link.py",
    "ast_data": "ClassDef name:IdentityLink FunctionDef name:link arg:self arg:y_pred arg:out arguments arg arg arg If Compare Call Return return:yes Return return:yes Assign"
  },
  {
    "library": "numpy",
    "name": "as_terms",
    "source_code": "def as_terms(obj):\n    if isinstance(obj, Expr):\n        obj = normalize(obj)\n        if obj.op is Op.TERMS:\n            return obj\n        if obj.op is Op.INTEGER:\n            return Expr(Op.TERMS, {as_integer(1, obj.data[1]): obj.data[0]})\n        if obj.op is Op.REAL:\n            return Expr(Op.TERMS, {as_real(1, obj.data[1]): obj.data[0]})\n        return Expr(Op.TERMS, {obj: 1})\n    raise OpError(f'cannot convert {type(obj)} to terms Expr')",
    "docstring": "Return expression as TERMS expression.",
    "type": "function",
    "file_path": "numpy\\numpy\\f2py\\symbolic.py",
    "ast_data": "FunctionDef name:as_terms arg:obj arguments arg If Call Assign Call If Compare Return return:yes If Compare Return return:yes Call Call If Compare Return return:yes Call Call Return return:yes Call Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "TerminateOnNaN",
    "source_code": "class TerminateOnNaN(Callback):\n\n    def __init__(self):\n        super(TerminateOnNaN, self).__init__()\n        self._supports_tf_logs = True\n\n    def on_batch_end(self, batch, logs=None):\n        logs = logs or {}\n        loss = logs.get('loss')\n        if loss is not None:\n            loss = tf_utils.sync_to_numpy_or_python_type(loss)\n            if np.isnan(loss) or np.isinf(loss):\n                print('Batch %d: Invalid loss, terminating training' % batch)\n                self.model.stop_training = True",
    "docstring": "Callback that terminates training when a NaN loss is encountered.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "ClassDef name:TerminateOnNaN FunctionDef name:__init__ arg:self arguments arg Call Call Assign FunctionDef name:on_batch_end arg:self arg:batch arg:logs arguments arg arg arg Assign BoolOp Assign Call If Compare Assign Call If BoolOp Call Call Call Assign"
  },
  {
    "library": "scikit-learn",
    "name": "_parallel_compute_tree_depths",
    "source_code": "def _parallel_compute_tree_depths(tree, X, features, tree_decision_path_lengths, tree_avg_path_lengths, depths, lock):\n    if features is None:\n        X_subset = X\n    else:\n        X_subset = X[:, features]\n    leaves_index = tree.apply(X_subset, check_input=False)\n    with lock:\n        depths += tree_decision_path_lengths[leaves_index] + tree_avg_path_lengths[leaves_index] - 1.0",
    "docstring": "Parallel computation of isolation tree depth.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_iforest.py",
    "ast_data": "FunctionDef name:_parallel_compute_tree_depths arg:tree arg:X arg:features arg:tree_decision_path_lengths arg:tree_avg_path_lengths arg:depths arg:lock arguments arg arg arg arg arg arg arg If Compare Assign Assign Assign Call With"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, element_type, dimensions, layout=None):\n    self.message = xla_data_pb2.ShapeProto()\n    self.message.element_type = element_type\n    if element_type == xla_data_pb2.TUPLE:\n        if not all((isinstance(subshape, Shape) for subshape in dimensions)):\n            raise ValueError('XLA tuple requires sequence of Shape objects as dimensions')\n        self._tuple_shapes = tuple(dimensions)\n        for component_shape in self._tuple_shapes:\n            component_message = self.message.tuple_shapes.add()\n            component_message.CopyFrom(component_shape.message)\n    else:\n        self.message.dimensions.extend(dimensions)\n        if layout is None:\n            layout = list(reversed(range(len(dimensions))))\n        self.message.layout.minor_to_major.extend(layout)",
    "docstring": "Creates a new XLA Shape. Args: element_type: element type from xla_data_pb2. dimensions: sequence of dimensions sizes (integers), or sequence of Shapes in the case of a tuple, i.e. when element_type is TUPLE. layout: optional minor_to_major sequence for layout. If not given, the default major-to-minor layout is used. Raises: ValueError: if element_type is TUPLE but dimensions are not Shape objects.",
    "type": "method",
    "file_path": "tensorflow\\third_party\\xla\\xla\\python_api\\xla_shape.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:element_type arg:dimensions arg:layout arguments arg arg arg arg Assign Call Assign If Compare If Call Call Raise Call Assign Call For Assign Call Call Call If Compare Assign Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_convert_model_from_bytearray_to_object",
    "source_code": "def _convert_model_from_bytearray_to_object(model_bytearray):\n    model_object = schema_fb.Model.GetRootAsModel(model_bytearray, 0)\n    model_object = schema_fb.ModelT.InitFromObj(model_object)\n    model_object = copy.deepcopy(model_object)\n    return model_object",
    "docstring": "Converts a tflite model from a bytearray into a parsable object.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\util.py",
    "ast_data": "FunctionDef name:_convert_model_from_bytearray_to_object arg:model_bytearray arguments arg Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "eye",
    "source_code": "@tf_export('eye', 'linalg.eye')\n@dispatch.add_dispatch_support\ndef eye(num_rows, num_columns=None, batch_shape=None, dtype=dtypes.float32, name=None):\n    return linalg_ops_impl.eye(num_rows, num_columns=num_columns, batch_shape=batch_shape, dtype=dtype, name=name)",
    "docstring": "Construct an identity matrix, or a batch of matrices. See also , , , . Args: num_rows: Non-negative scalar giving the number of rows in each batch matrix. num_columns: Optional non-negative scalar giving the number of columns in each batch matrix. Defaults to . batch_shape: A list or tuple of Python integers or a 1-D . If provided, the returned will have leading batch dimensions of this shape. dtype: The type of an element in the resulting name: A name for this . Defaults to \"eye\". Returns: A of shape",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg_ops.py",
    "ast_data": "FunctionDef name:eye arg:num_rows arg:num_columns arg:batch_shape arg:dtype arg:name arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "rstdim_to_latexdim",
    "source_code": "def rstdim_to_latexdim(width_str: str, scale: int=100) -> str:\n    match = re.match('^(\\\\d*\\\\.?\\\\d*)\\\\s*(\\\\S*)$', width_str)\n    if not match:\n        raise ValueError\n    res = width_str\n    amount, unit = match.groups()[:2]\n    if scale == 100:\n        float(amount)\n        if unit in {'', 'px'}:\n            res = '%s\\\\sphinxpxdimen' % amount\n        elif unit == 'pt':\n            res = '%sbp' % amount\n        elif unit == '%':\n            res = '%.3f\\\\linewidth' % (float(amount) / 100.0)\n    else:\n        amount_float = float(amount) * scale / 100.0\n        if unit in {'', 'px'}:\n            res = '%.5f\\\\sphinxpxdimen' % amount_float\n        elif unit == 'pt':\n            res = '%.5fbp' % amount_float\n        elif unit == '%':\n            res = '%.5f\\\\linewidth' % (amount_float / 100.0)\n        else:\n            res = f'{amount_float:.5f}{unit}'\n    return res",
    "docstring": "Convert with rst length to LaTeX length.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\writers\\latex.py",
    "ast_data": "FunctionDef name:rstdim_to_latexdim arg:width_str arg:scale arguments arg arg Assign Call If Raise Assign Assign Call If Compare Call If Compare Assign If Compare Assign If Compare Assign Call Assign Call If Compare Assign If Compare Assign If Compare Assign Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_get_grouper",
    "source_code": "def _get_grouper(self, obj: NDFrameT, validate: bool=True) -> tuple[ops.BaseGrouper, NDFrameT]:\n    obj, _, _ = self._set_grouper(obj)\n    grouper, _, obj = get_grouper(obj, [self.key], level=self.level, sort=self.sort, validate=validate, dropna=self.dropna)\n    return (grouper, obj)",
    "docstring": "Parameters ---------- obj : Series or DataFrame validate : bool, default True if True, validate the grouper Returns ------- a tuple of grouper, obj (possibly sorted)",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\groupby\\grouper.py",
    "ast_data": "FunctionDef name:_get_grouper arg:self arg:obj arg:validate arguments arg arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_markeredgewidth",
    "source_code": "def set_markeredgewidth(self, ew):\n    ew = mpl._val_or_rc(ew, 'lines.markeredgewidth')\n    if self._markeredgewidth != ew:\n        self.stale = True\n    self._markeredgewidth = ew",
    "docstring": "Set the marker edge width in points. Parameters ---------- ew : float Marker edge width, in points.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:set_markeredgewidth arg:self arg:ew arguments arg arg Assign Call If Compare Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "ReturnValueHandler",
    "source_code": "class ReturnValueHandler:\n\n    def __init__(self, lazy_out_list):\n        self.index: list[list[int]] = []\n        self.total_count = len(lazy_out_list)\n        tensor_id_to_idx: dict[int, int] = {}\n        for dup_idx, lazy_tensor in enumerate(lazy_out_list):\n            uniq_idx = tensor_id_to_idx.get(id(lazy_tensor), None)\n            if uniq_idx is not None:\n                self.index[uniq_idx].append(dup_idx)\n            else:\n                uniq_idx = len(self.index)\n                self.index.append([dup_idx])\n                tensor_id_to_idx[id(lazy_tensor)] = uniq_idx\n\n    def duplicate_eager_tensors(self, eager_tensor_list):\n        duplicated_list = [None] * self.total_count\n        assert len(eager_tensor_list) == len(self.index)\n        for uniq_idx, eager_tensor in enumerate(eager_tensor_list):\n            for dup_idx in self.index[uniq_idx]:\n                duplicated_list[dup_idx] = eager_tensor\n        return duplicated_list",
    "docstring": "When ltc_sync_multi is called on multi tensors, the compiled graph will contain output only for unique tensors - if a tensor appears multiple times in the input to _ltc_sync_multi, only the first occurance matters. However from python level, we still expect multi tensors returned with duplciation even if the TS graph dedup the output. e.g. for method: def forward(self, a): return a, a the TS graph captured by LTC will return a single tensor, but Python method expects 2. This class dedup the lazy tensors first to get the index that will be used to duplicate the eager tensors later.",
    "type": "class",
    "file_path": "pytorch\\torch\\_lazy\\extract_compiled_graph.py",
    "ast_data": "ClassDef name:ReturnValueHandler FunctionDef name:__init__ arg:self arg:lazy_out_list arguments arg arg Assign Call For Call Assign Call Call If Compare Call Assign Call Call Assign Call FunctionDef name:duplicate_eager_tensors arg:self arg:eager_tensor_list arguments arg arg Assign Compare Call Call For Call For Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "colocate_vars_with",
    "source_code": "@doc_controls.do_not_doc_inheritable\n@deprecated(None, 'use extended.colocate_vars_with() instead.')\ndef colocate_vars_with(self, colocate_with_variable):\n    return self._extended.colocate_vars_with(colocate_with_variable)",
    "docstring": "DEPRECATED: use extended.colocate_vars_with() instead.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:colocate_vars_with arg:self arg:colocate_with_variable arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_coerce_to_type",
    "source_code": "def _coerce_to_type(x: Index) -> tuple[Index, DtypeObj | None]:\n    dtype: DtypeObj | None = None\n    if _is_dt_or_td(x.dtype):\n        dtype = x.dtype\n    elif is_bool_dtype(x.dtype):\n        x = x.astype(np.int64)\n    elif isinstance(x.dtype, ExtensionDtype) and is_numeric_dtype(x.dtype):\n        x_arr = x.to_numpy(dtype=np.float64, na_value=np.nan)\n        x = Index(x_arr)\n    return (Index(x), dtype)",
    "docstring": "if the passed data is of datetime/timedelta, bool or nullable int type, this method converts it to numeric so that cut or qcut method can handle it",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\reshape\\tile.py",
    "ast_data": "FunctionDef name:_coerce_to_type arg:x arguments arg If Call Assign If Call Assign Call If BoolOp Call Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "flat",
    "source_code": "@property\ndef flat(self):\n    for block in self:\n        yield from block.flat",
    "docstring": "A 1-D flat iterator for Arrayterator objects. This iterator returns elements of the array to be iterated over in one by one. It is similar to . See Also -------- lib.Arrayterator flatiter Examples -------- >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) >>> a_itor = np.lib.Arrayterator(a, 2) >>> for subarr in a_itor.flat: ... if not subarr: ... print(subarr, type(subarr)) ... 0",
    "type": "method",
    "file_path": "numpy\\numpy\\lib\\_arrayterator_impl.py",
    "ast_data": "FunctionDef name:flat arg:self arguments arg For"
  },
  {
    "library": "django",
    "name": "clone",
    "source_code": "def clone(self):\n    return self.__class__(app_label=self.app_label, name=self.name, fields=dict(self.fields), options=dict(self.options), bases=self.bases, managers=list(self.managers))",
    "docstring": "Return an exact copy of this ModelState.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\state.py",
    "ast_data": "FunctionDef name:clone arg:self arguments arg Return return:yes Call Call Call Call"
  },
  {
    "library": "django",
    "name": "__init__",
    "source_code": "def __init__(self, *args, **kwargs):\n    if not args:\n        super().__init__(self._create_polygon(0, None), **kwargs)\n        return\n    ext_ring, *init_holes = args\n    n_holes = len(init_holes)\n    if n_holes == 1 and isinstance(init_holes[0], (tuple, list)):\n        if not init_holes[0]:\n            init_holes = ()\n            n_holes = 0\n        elif isinstance(init_holes[0][0], LinearRing):\n            init_holes = init_holes[0]\n            n_holes = len(init_holes)\n    polygon = self._create_polygon(n_holes + 1, [ext_ring, *init_holes])\n    super().__init__(polygon, **kwargs)",
    "docstring": "Initialize on an exterior ring and a sequence of holes (both instances may be either LinearRing instances, or a tuple/list that may be constructed into a LinearRing). Examples of initialization, where shell, hole1, and hole2 are valid LinearRing geometries: >>> from django.contrib.gis.geos import LinearRing, Polygon >>> shell = hole1 = hole2 = LinearRing() >>> poly = Polygon(shell, hole1, hole2) >>> poly = Polygon(shell, (hole1, hole2)) >>> # Example where a tuple parameters are used: >>> poly = Polygon(((0, 0), (0, 10), (10, 10), (10, 0), (0, 0)), ... ((4, 4), (4, 6), (6, 6), (6, 4), (4, 4)))",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\polygon.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg arg arg If Call Call Call Return return:no Assign Assign Call If BoolOp Compare Call If Assign Assign If Call Assign Assign Call Assign Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, markerline_stemlines_baseline, **kwargs):\n    markerline, stemlines, baseline = markerline_stemlines_baseline\n    self.markerline = markerline\n    self.stemlines = stemlines\n    self.baseline = baseline\n    super().__init__(markerline_stemlines_baseline, **kwargs)",
    "docstring": "Parameters ---------- markerline_stemlines_baseline : tuple Tuple of `.Line2D.LineCollection.Line2D` of the baseline.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\container.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:markerline_stemlines_baseline arguments arg arg arg Assign Assign Assign Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "aten_group_norm",
    "source_code": "@onnx_impl(aten.group_norm.default, trace_only=True, opset_introduced=21)\ndef aten_group_norm(input: TFloat, num_groups: int, weight: Optional[TFloat]=None, bias: Optional[TFloat]=None, eps: float=1e-05, cudnn_enabled: bool=True) -> TFloat:\n    c = op21.Shape(input, start=1, end=2)\n    if weight is None:\n        weight = op21.ConstantOfShape(c, value=ir.tensor(1.0, dtype=input.dtype))\n    if bias is None:\n        bias = op21.ConstantOfShape(c, value=ir.tensor(0.0, dtype=input.dtype))\n    return op21.GroupNormalization(input, weight, bias, epsilon=eps, num_groups=num_groups)",
    "docstring": "group_norm(Tensor input, int num_groups, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enabled=True) -> Tensor",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_torchlib\\ops\\nn.py",
    "ast_data": "FunctionDef name:aten_group_norm arg:input arg:num_groups arg:weight arg:bias arg:eps arg:cudnn_enabled arguments arg arg arg arg arg arg Assign Call If Compare Assign Call Call If Compare Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "ispartial",
    "source_code": "def ispartial(obj: Any) -> TypeIs[partial[Any] | partialmethod[Any]]:\n    return isinstance(obj, partial | partialmethod)",
    "docstring": "Check if the object is a partial function or method.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\inspect.py",
    "ast_data": "FunctionDef name:ispartial arg:obj arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "is_clusterer",
    "source_code": "def is_clusterer(estimator):\n    if isinstance(estimator, type):\n        warnings.warn(f'passing a class to {print(inspect.stack()[0][3])} is deprecated and will be removed in 1.8. Use an instance of the class instead.', FutureWarning)\n        return getattr(estimator, '_estimator_type', None) == 'clusterer'\n    return get_tags(estimator).estimator_type == 'clusterer'",
    "docstring": "Return True if the given estimator is (probably) a clusterer. .. versionadded:: 1.6 Parameters ---------- estimator : object Estimator object to test. Returns ------- out : bool True if estimator is a clusterer and False otherwise. Examples -------- >>> from sklearn.base import is_clusterer >>> from sklearn.cluster import KMeans >>> from sklearn.svm import SVC, SVR >>> classifier = SVC() >>> regressor = SVR() >>> kmeans = KMeans() >>> is_clusterer(classifier) False >>> is_clusterer(regressor) False >>> is_clusterer(kmeans) True",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\base.py",
    "ast_data": "FunctionDef name:is_clusterer arg:estimator arguments arg If Call Call Call Call Return return:yes Compare Call Return return:yes Compare Call"
  },
  {
    "library": "django",
    "name": "ask_unique_callable_default_addition",
    "source_code": "def ask_unique_callable_default_addition(self, field_name, model_name):\n    return None",
    "docstring": "Adding a unique field with a callable default.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\questioner.py",
    "ast_data": "FunctionDef name:ask_unique_callable_default_addition arg:self arg:field_name arg:model_name arguments arg arg arg Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "stop_server",
    "source_code": "def stop_server(self, grace=1.0):\n    self._server_lock.acquire()\n    try:\n        if not self._server_started:\n            raise ValueError('Server has not started running')\n        if self._stop_requested:\n            raise ValueError('Server has already stopped')\n        self._stop_requested = True\n        return self.server.stop(grace=grace)\n    finally:\n        self._server_lock.release()",
    "docstring": "Request server stopping. Once stopped, server cannot be stopped or started again. This method is non-blocking. Call on the returned event to block until the server has completely stopped. Args: grace: Grace period in seconds to be used when calling . Raises: ValueError: If server stop has already been requested, or if the server has not started running yet. Returns: A threading.Event that will be set when the server has completely stopped.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\grpc_debug_server.py",
    "ast_data": "FunctionDef name:stop_server arg:self arg:grace arguments arg arg Call Try If Raise Call If Raise Call Assign Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "TransactionManagementError",
    "source_code": "class TransactionManagementError(ProgrammingError):\n    pass",
    "docstring": "Transaction management is used improperly.",
    "type": "class",
    "file_path": "django\\django\\db\\transaction.py",
    "ast_data": "ClassDef name:TransactionManagementError"
  },
  {
    "library": "tensorflow",
    "name": "convert_variables_to_constants",
    "source_code": "@deprecation.deprecated(date=None, instructions='This API was designed for TensorFlow v1. See https://www.tensorflow.org/guide/migrate for instructions on how to migrate your code to TensorFlow v2.')\n@tf_export(v1=['graph_util.convert_variables_to_constants'])\ndef convert_variables_to_constants(sess, input_graph_def, output_node_names, variable_names_whitelist=None, variable_names_blacklist=None):\n    ret = convert_variables_to_constants_from_session_graph(session=sess, graph_def=input_graph_def, output_node_names=output_node_names, variable_names_allowlist=variable_names_whitelist, variable_names_denylist=variable_names_blacklist)\n    return ret",
    "docstring": "Replaces all the variables in a graph with constants of the same values. If you have a trained graph containing Variable ops, it can be convenient to convert them all to Const ops holding the same values. This makes it possible to describe the network fully with a single GraphDef file, and allows the removal of a lot of ops related to loading and saving the variables. Args: sess: Active TensorFlow session containing the variables. input_graph_def: GraphDef object holding the network. output_node_names: List of name strings for the result nodes of the graph. variable_names_whitelist: The set of variable names to convert (by default, all variables are converted). variable_names_blacklist: The set of variable names to omit converting to constants. Returns: GraphDef containing a simplified version of the original. Raises: RuntimeError: if a DT_RESOURCE op is found whose ancestor Variables are both denylisted AND whitelisted for freezing.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "FunctionDef name:convert_variables_to_constants arg:sess arg:input_graph_def arg:output_node_names arg:variable_names_whitelist arg:variable_names_blacklist arguments arg arg arg arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "merge_dims",
    "source_code": "def merge_dims(self, outer_axis, inner_axis):\n    outer_axis = array_ops.get_positive_axis(outer_axis, self.shape.rank, axis_name='outer_axis', ndims_name='rank(self)')\n    inner_axis = array_ops.get_positive_axis(inner_axis, self.shape.rank, axis_name='inner_axis', ndims_name='rank(self)')\n    if not outer_axis <= inner_axis:\n        raise ValueError('Expected outer_axis (%d) to be less than or equal to inner_axis (%d)' % (outer_axis, inner_axis))\n    return _merge_dims(self, outer_axis, inner_axis)",
    "docstring": "Merges outer_axis...inner_axis into a single dimension. Returns a copy of this RaggedTensor with the specified range of dimensions flattened into a single dimension, with elements in row-major order. >>> st = tf.experimental.StructuredTensor.from_pyval( ... [[{'foo': 12}, {'foo': 33}], [], [{'foo': 99}]]) >>> st.merge_dims(0, 1) Args: outer_axis: : The first dimension in the range of dimensions to merge. May be negative (to index from the last dimension). inner_axis: : The last dimension in the range of dimensions to merge. May be negative (to index from the last dimension). Returns: A copy of this tensor, with the specified dimensions merged into a single dimension. The shape of the returned tensor will be , where is the total number of slices in the merged dimensions.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor.py",
    "ast_data": "FunctionDef name:merge_dims arg:self arg:outer_axis arg:inner_axis arguments arg arg arg Assign Call Assign Call If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "dim_reduction",
    "source_code": "def dim_reduction(ndim: int, dim_or_dims: Optional[DimsType], keepdim: bool) -> DimMap:\n    if dim_or_dims is None:\n        dim_or_dims = tuple(range(ndim))\n    if isinstance(dim_or_dims, int):\n        dim_or_dims = (dim_or_dims,)\n    dim_or_dims = tuple((d if d >= 0 else d + ndim for d in dim_or_dims))\n    return tuple((InputDim(i) if i not in dim_or_dims else Singleton() for i in range(ndim) if i not in dim_or_dims or keepdim))",
    "docstring": "General fallback for reduction ops where Partial() does not apply. This will cause incoming tensor to be replicated on the reducing dimensions.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_ops\\_view_ops.py",
    "ast_data": "FunctionDef name:dim_reduction arg:ndim arg:dim_or_dims arg:keepdim arguments arg arg arg If Compare Assign Call Call If Call Assign Assign Call Compare Return return:yes Call Compare Call Call Call BoolOp Compare"
  },
  {
    "library": "django",
    "name": "_resolve_output_field",
    "source_code": "def _resolve_output_field(self):\n    sources_iter = (source for source in self.get_source_fields() if source is not None)\n    for output_field in sources_iter:\n        for source in sources_iter:\n            if not isinstance(output_field, source.__class__):\n                raise FieldError('Expression contains mixed types: %s, %s. You must set output_field.' % (output_field.__class__.__name__, source.__class__.__name__))\n        return output_field",
    "docstring": "Attempt to infer the output type of the expression. As a guess, if the output fields of all source fields match then simply infer the same type here. If a source's output field resolves to None, exclude it from this check. If all sources are None, then an error is raised higher up the stack in the output_field property.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\expressions.py",
    "ast_data": "FunctionDef name:_resolve_output_field arg:self arguments arg Assign Call Compare For For If Call Raise Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "parse_data",
    "source_code": "def parse_data(self) -> list[dict[str, str | None]]:\n    from lxml.etree import iterparse\n    if self.iterparse is None:\n        self.xml_doc = self._parse_doc(self.path_or_buffer)\n        if self.stylesheet:\n            self.xsl_doc = self._parse_doc(self.stylesheet)\n            self.xml_doc = self._transform_doc()\n        elems = self._validate_path()\n    self._validate_names()\n    xml_dicts: list[dict[str, str | None]] = self._parse_nodes(elems) if self.iterparse is None else self._iterparse_nodes(iterparse)\n    return xml_dicts",
    "docstring": "Parse xml data. This method will call the other internal methods to validate ``, names, optionally parse and run XSLT, and parse original or transformed XML and return specific nodes.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\xml.py",
    "ast_data": "FunctionDef name:parse_data arg:self arguments arg If Compare Assign Call If Assign Call Assign Call Assign Call Call Compare Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "GaussianNLLLoss",
    "source_code": "class GaussianNLLLoss(_Loss):\n    __constants__ = ['full', 'eps', 'reduction']\n    full: bool\n    eps: float\n\n    def __init__(self, *, full: bool=False, eps: float=1e-06, reduction: str='mean') -> None:\n        super().__init__(None, None, reduction)\n        self.full = full\n        self.eps = eps\n\n    def forward(self, input: Tensor, target: Tensor, var: Union[Tensor, float]) -> Tensor:\n        return F.gaussian_nll_loss(input, target, var, full=self.full, eps=self.eps, reduction=self.reduction)",
    "docstring": "Gaussian negative log likelihood loss. The targets are treated as samples from Gaussian distributions with expectations and variances predicted by the neural network. For a `epsfull(N, *)(*)*(N, *)(*)(N, *)(*)reductionreduction(N, *)` is ignored with respect to autograd, and so the gradients are unaffected by it. Reference: Nix, D. A. and Weigend, A. S., \"Estimating the mean and variance of the target probability distribution\", Proceedings of 1994 IEEE International Conference on Neural Networks (ICNN'94), Orlando, FL, USA, 1994, pp. 55-60 vol.1, doi: 10.1109/ICNN.1994.374138.",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\loss.py",
    "ast_data": "ClassDef name:GaussianNLLLoss Assign FunctionDef name:__init__ arg:self arguments arg arg arg arg Call Call Assign Assign FunctionDef name:forward arg:self arg:input arg:target arg:var arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "call_replica_local_fn",
    "source_code": "def call_replica_local_fn(fn, *args, **kwargs):\n    strategy = None\n    if 'strategy' in kwargs:\n        strategy = kwargs.pop('strategy')\n    elif distribute_lib.has_strategy():\n        strategy = distribute_lib.get_strategy()\n    is_tpu = backend.is_tpu_strategy(strategy)\n    if not is_tpu and strategy and distribute_lib.in_cross_replica_context():\n        with strategy.scope():\n            return strategy.extended.call_for_each_replica(fn, args, kwargs)\n    return fn(*args, **kwargs)",
    "docstring": "Call a function that uses replica-local variables. This function correctly handles calling in a cross-replica context. Args: fn: The function to call. *args: Positional arguments to the . **kwargs: Keyword argument to . Returns: The result of calling .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_training_utils.py",
    "ast_data": "FunctionDef name:call_replica_local_fn arg:fn arguments arg arg arg Assign If Compare Assign Call If Call Assign Call Assign Call If BoolOp Call With Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "auto_contrast",
    "source_code": "def auto_contrast(min_mag: float, max_mag: float) -> OperationBase:\n    return AutoContrast(1.0)",
    "docstring": "Return AutoConstrast op.",
    "type": "function",
    "file_path": "kornia\\kornia\\augmentation\\auto\\rand_augment\\ops.py",
    "ast_data": "FunctionDef name:auto_contrast arg:min_mag arg:max_mag arguments arg arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "is_free_format",
    "source_code": "def is_free_format(file):\n    result = 0\n    with open(file, encoding='latin1') as f:\n        line = f.readline()\n        n = 10000\n        if _has_f_header(line) or _has_fix_header(line):\n            n = 0\n        elif _has_f90_header(line):\n            n = 0\n            result = 1\n        while n > 0 and line:\n            line = line.rstrip()\n            if line and line[0] != '!':\n                n -= 1\n                if line[0] != '\\t' and _free_f90_start(line[:5]) or line[-1:] == '&':\n                    result = 1\n                    break\n            line = f.readline()\n    return result",
    "docstring": "Check if file is in free format Fortran.",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\fcompiler\\__init__.py",
    "ast_data": "FunctionDef name:is_free_format arg:file arguments arg Assign With Call Assign Call Assign If BoolOp Call Call Assign If Call Assign Assign While BoolOp Compare Assign Call If BoolOp Compare If BoolOp BoolOp Compare Call Compare Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "name",
    "source_code": "@property\ndef name(self):\n    return NotImplementedError",
    "docstring": "The name of the table.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "validate_kwargs",
    "source_code": "def validate_kwargs(kwargs, allowed_kwargs, error_message='Keyword argument not understood:'):\n    for kwarg in kwargs:\n        if kwarg not in allowed_kwargs:\n            raise TypeError(error_message, kwarg)",
    "docstring": "Checks that all keyword arguments are in the set of allowed keys.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\generic_utils.py",
    "ast_data": "FunctionDef name:validate_kwargs arg:kwargs arg:allowed_kwargs arg:error_message arguments arg arg arg For If Compare Raise Call"
  },
  {
    "library": "pytorch",
    "name": "check_invariants",
    "source_code": "def check_invariants(self, inputs: list[InputType]) -> tuple[CheckInvariantStatus, Callable[..., str]]:\n    _logger = functools.partial(log_data_ptr_mismatch, self.wrapped_function.placeholders, inputs, self.static_input_data_ptrs)\n    if not torch._C._tensors_data_ptrs_at_indices_equal(inputs, self.static_input_data_ptrs, self.cudagraph_managed_idxs):\n        status = CheckInvariantStatus.CudagraphManagedIdxMismatch\n        _logger = functools.partial(_logger, self.cudagraph_managed_idxs, status)\n        return (status, _logger)\n    if not self._check_liveness(self.expected_dead_indices_before_graph, self.path_weakrefs):\n        status = CheckInvariantStatus.ExpectedDeadIndicesBeforeGraphMismatch\n        return (status, lambda: f'{status}')\n    if self.rerecord_if_static_inputs_change and (not torch._C._tensors_data_ptrs_at_indices_equal(inputs, self.static_input_data_ptrs, self.static_input_idxs)):\n        status = CheckInvariantStatus.StaticInputIdxMismatch\n        _logger = functools.partial(_logger, self.static_input_idxs, status)\n        return (status, _logger)\n    for idx in self.cudagraph_managed_idxs:\n        if not self.preserved_aliased_inputs[idx]:\n            inputs[idx] = None\n    torch._check(self._check_liveness(self.expected_dead_indices_after_graph, self.path_weakrefs), lambda: 'TODO: graph recording observed an input tensor deallocate during graph  recording that did not occur during replay. Please file an issue.')\n    return (CheckInvariantStatus.SUCCESS, lambda: f'{CheckInvariantStatus.SUCCESS}')",
    "docstring": "Checks if this node can be run. The same pattern of tensor liveness, static inputs, and tensors managed in the cudagraph private pool must remain stable.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py",
    "ast_data": "FunctionDef name:check_invariants arg:self arg:inputs arguments arg arg Assign Call If Call Assign Assign Call Return return:yes If Call Assign Return return:yes arguments If BoolOp Call Assign Assign Call Return return:yes For If Assign Call Call arguments Return return:yes arguments"
  },
  {
    "library": "matplotlib",
    "name": "format_eng",
    "source_code": "def format_eng(self, num):\n    return self.format_data(num)",
    "docstring": "Alias to EngFormatter.format_data",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:format_eng arg:self arg:num arguments arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "geom_type",
    "source_code": "@property\ndef geom_type(self):\n    return OGRGeomType(capi.get_geom_type(self.ptr))",
    "docstring": "Return the Type for this Geometry.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:geom_type arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, sess):\n    self._sess = sess\n    self._wrapped_is_stoppable = isinstance(self._sess, _WrappedSession)",
    "docstring": "Creates a . Args: sess: A or object. The wrapped session.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\monitored_session.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:sess arguments arg arg Assign Assign Call"
  },
  {
    "library": "pygame",
    "name": "_parse_font_entry_darwin",
    "source_code": "def _parse_font_entry_darwin(name, filepath, fonts):\n    name = _simplename(name)\n    mods = ('regular',)\n    for mod in mods:\n        if mod in name:\n            name = name.replace(mod, '')\n    bold = italic = False\n    if 'bold' in name:\n        name = name.replace('bold', '')\n        bold = True\n    if 'italic' in name:\n        name = name.replace('italic', '')\n        italic = True\n    _addfont(name, bold, italic, filepath, fonts)",
    "docstring": "Parses a font entry for macOS :param name: The filepath without extensions or directories :param filepath: The full path to the font :param fonts: The pygame font dictionary to add the parsed font data to.",
    "type": "function",
    "file_path": "pygame\\src_py\\sysfont.py",
    "ast_data": "FunctionDef name:_parse_font_entry_darwin arg:name arg:filepath arg:fonts arguments arg arg arg Assign Call Assign For If Compare Assign Call Assign If Compare Assign Call Assign If Compare Assign Call Assign Call"
  },
  {
    "library": "numpy",
    "name": "feature_can_autovec",
    "source_code": "@_Cache.me\ndef feature_can_autovec(self, name):\n    assert isinstance(name, str)\n    d = self.feature_supported[name]\n    can = d.get('autovec', None)\n    if can is None:\n        valid_flags = [self.cc_test_flags([f]) for f in d.get('flags', [])]\n        can = valid_flags and any(valid_flags)\n    return can",
    "docstring": "check if the feature can be auto-vectorized by the compiler",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py",
    "ast_data": "FunctionDef name:feature_can_autovec arg:self arg:name arguments arg arg Call Assign Assign Call If Compare Assign Call Call Assign BoolOp Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "start",
    "source_code": "def start(self):\n    if self.finalized:\n        self.bus.log('Already deamonized.')\n    if threading.active_count() != 1:\n        self.bus.log('There are %r active threads. Daemonizing now may cause strange failures.' % threading.enumerate(), level=30)\n    self.daemonize(self.stdin, self.stdout, self.stderr, self.bus.log)\n    self.finalized = True",
    "docstring": "Attempt to daemonize the process.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\plugins.py",
    "ast_data": "FunctionDef name:start arg:self arguments arg If Call If Compare Call Call Call Call Assign"
  },
  {
    "library": "scipy",
    "name": "_compute_lwork",
    "source_code": "def _compute_lwork(routine, *args, **kwargs):\n    dtype = getattr(routine, 'dtype', None)\n    int_dtype = getattr(routine, 'int_dtype', None)\n    ret = routine(*args, **kwargs)\n    if ret[-1] != 0:\n        raise ValueError(f'Internal work array size computation failed: {ret[-1]}')\n    if len(ret) == 2:\n        return _check_work_float(ret[0].real, dtype, int_dtype)\n    else:\n        return tuple((_check_work_float(x.real, dtype, int_dtype) for x in ret[:-1]))",
    "docstring": "Round floating-point lwork returned by lapack to integer. Several LAPACK routines compute optimal values for LWORK, which they return in a floating-point variable. However, for large values of LWORK, single-precision floating point is not sufficient to hold the exact value --- some LAPACK versions (>> from scipy.linalg import lapack >>> n = 5000 >>> s_r, s_lw = lapack.get_lapack_funcs(('sysvx', 'sysvx_lwork')) >>> lwork = lapack._compute_lwork(s_lw, n) >>> lwork 32000",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\lapack.py",
    "ast_data": "FunctionDef name:_compute_lwork arg:routine arguments arg arg arg Assign Call Assign Call Assign Call If Compare Raise Call If Compare Call Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_clip_prob",
    "source_code": "def _clip_prob(p):\n    return np.clip(p, 0.0, 1.0)",
    "docstring": "clips a probability to range 0<=p<=1.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_ksstats.py",
    "ast_data": "FunctionDef name:_clip_prob arg:p arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "ones",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef ones(shape, dtype=None, name=None):\n    with ops.init_scope():\n        if dtype is None:\n            dtype = floatx()\n        tf_dtype = dtypes_module.as_dtype(dtype)\n        v = array_ops.ones(shape=shape, dtype=tf_dtype, name=name)\n        if py_all(v.shape.as_list()):\n            return variable(v, dtype=dtype, name=name)\n        return v",
    "docstring": "Instantiates an all-ones variable and returns it. Args: shape: Tuple of integers, shape of returned Keras variable. dtype: String, data type of returned Keras variable. name: String, name of returned Keras variable. Returns: A Keras variable, filled with . Note that if was symbolic, we cannot return a variable, and will return a dynamically-shaped tensor instead. Example: >>> kvar = tf.keras.backend.ones((3,4)) >>> tf.keras.backend.eval(kvar) array([[1., 1., 1., 1.], [1., 1., 1., 1.], [1., 1., 1., 1.]], dtype=float32)",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:ones arg:shape arg:dtype arg:name arguments arg arg arg With Call If Compare Assign Call Assign Call Assign Call If Call Call Return return:yes Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "pdf",
    "source_code": "def pdf(self, x, loc=None, shape=1, df=1, allow_singular=False):\n    dim, loc, shape, df = self._process_parameters(loc, shape, df)\n    x = self._process_quantiles(x, dim)\n    shape_info = _PSD(shape, allow_singular=allow_singular)\n    logpdf = self._logpdf(x, loc, shape_info.U, shape_info.log_pdet, df, dim, shape_info.rank)\n    return np.exp(logpdf)",
    "docstring": "Multivariate t-distribution probability density function. Parameters ---------- x : array_like Points at which to evaluate the probability density function. %(_mvt_doc_default_callparams)s Returns ------- pdf : Probability density function evaluated at . Examples -------- >>> from scipy.stats import multivariate_t >>> x = [0.4, 5] >>> loc = [0, 1] >>> shape = [[1, 0.1], [0.1, 1]] >>> df = 7 >>> multivariate_t.pdf(x, loc, shape, df) 0.00075713",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:pdf arg:self arg:x arg:loc arg:shape arg:df arg:allow_singular arguments arg arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "zipf_gen",
    "source_code": "class zipf_gen(rv_discrete):\n\n    def _shape_info(self):\n        return [_ShapeInfo('a', False, (1, np.inf), (False, False))]\n\n    def _rvs(self, a, size=None, random_state=None):\n        return random_state.zipf(a, size=size)\n\n    def _argcheck(self, a):\n        return a > 1\n\n    def _pmf(self, k, a):\n        k = k.astype(np.float64)\n        Pk = 1.0 / special.zeta(a, 1) * k ** (-a)\n        return Pk\n\n    def _munp(self, n, a):\n        return xpx.apply_where(a > n + 1, (a, n), lambda a, n: special.zeta(a - n, 1) / special.zeta(a, 1), fill_value=np.inf)",
    "docstring": "A Zipf (Zeta) discrete random variable. %(before_notes)s See Also -------- zipfian Notes ----- The probability mass function for is: .. math:: f(k, a) = \\frac{1}{\\zeta(a) k^a} for :math:, :math:. takes :math: as shape parameter. :math: is the Riemann zeta function () The Zipf distribution is also known as the zeta distribution, which is a special case of the Zipfian distribution (). %(after_notes)s References ---------- .. [1] \"Zeta Distribution\", Wikipedia, %(example)s Confirm that is the large limit of . >>> import numpy as np >>> from scipy.stats import zipf, zipfian >>> k = np.arange(11) >>> np.allclose(zipf.pmf(k, a), zipfian.pmf(k, a, n=10000000)) True",
    "type": "class",
    "file_path": "scipy\\scipy\\stats\\_discrete_distns.py",
    "ast_data": "ClassDef name:zipf_gen FunctionDef name:_shape_info arg:self arguments arg Return return:yes Call FunctionDef name:_rvs arg:self arg:a arg:size arg:random_state arguments arg arg arg arg Return return:yes Call FunctionDef name:_argcheck arg:self arg:a arguments arg arg Return return:yes Compare FunctionDef name:_pmf arg:self arg:k arg:a arguments arg arg arg Assign Call Assign Call Return return:yes FunctionDef name:_munp arg:self arg:n arg:a arguments arg arg arg Return return:yes Call Compare arguments arg arg Call Call"
  },
  {
    "library": "kornia",
    "name": "__repeat_param_across_channels__",
    "source_code": "def __repeat_param_across_channels__(self, param: Tensor, frame_num: int) -> Tensor:\n    repeated = param[:, None, ...].repeat(1, frame_num, *[1] * len(param.shape[1:]))\n    return repeated.reshape(-1, *list(param.shape[1:]))",
    "docstring": "Repeat parameters across channels. The input is shaped as (B, ...), while to output (B * same_on_frame, ...), which to guarantee that the same transformation would happen for each frame. (B1, B2, ..., Bn) => (B1, ... B1, B2, ..., B2, ..., Bn, ..., Bn) | ch_size | | ch_size | ..., | ch_size |",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\container\\video.py",
    "ast_data": "FunctionDef name:__repeat_param_across_channels__ arg:self arg:param arg:frame_num arguments arg arg arg Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "scatter",
    "source_code": "@tf_should_use.should_use_result\ndef scatter(self, indices, value, name=None):\n    return self._implementation.scatter(indices, value, name=name)",
    "docstring": "Scatter the values of a in specific indices of a . Args: indices: A taking values in . If the is not dynamic, . value: (N+1)-D. Tensor of type . The Tensor to unpack. name: A name for the operation (optional). Returns: A new TensorArray object with flow that ensures the scatter occurs. Use this object for all subsequent operations. Raises: ValueError: if the shape inference fails.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:scatter arg:self arg:indices arg:value arg:name arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_thetamin",
    "source_code": "def set_thetamin(self, thetamin):\n    self.viewLim.x0 = np.deg2rad(thetamin)",
    "docstring": "Set the minimum theta limit in degrees.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\polar.py",
    "ast_data": "FunctionDef name:set_thetamin arg:self arg:thetamin arguments arg arg Assign Call"
  },
  {
    "library": "django",
    "name": "find_template",
    "source_code": "def find_template(self, template_name, context):\n    history = context.render_context.setdefault(self.context_key, [self.origin])\n    template, origin = context.template.engine.find_template(template_name, skip=history)\n    history.append(origin)\n    return template",
    "docstring": "This is a wrapper around engine.find_template(). A history is kept in the render_context attribute between successive extends calls and passed as the skip argument. This enables extends to work recursively without extending the same template twice.",
    "type": "method",
    "file_path": "django\\django\\template\\loader_tags.py",
    "ast_data": "FunctionDef name:find_template arg:self arg:template_name arg:context arguments arg arg arg Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "get_metadata_routing",
    "source_code": "def get_metadata_routing(self):\n    router = MetadataRouter(owner=self.__class__.__name__)\n    for name, transformer in self.transformer_list:\n        router.add(**{name: transformer}, method_mapping=MethodMapping().add(caller='fit', callee='fit').add(caller='fit_transform', callee='fit_transform').add(caller='fit_transform', callee='fit').add(caller='fit_transform', callee='transform').add(caller='transform', callee='transform'))\n    return router",
    "docstring": "Get metadata routing of this object. Please check :ref: on how the routing mechanism works. .. versionadded:: 1.5 Returns ------- routing : MetadataRouter A :class: encapsulating routing information.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\pipeline.py",
    "ast_data": "FunctionDef name:get_metadata_routing arg:self arguments arg Assign Call For Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "StopFutureHandlers",
    "source_code": "class StopFutureHandlers(UploadFileException):\n    pass",
    "docstring": "Upload handlers that have handled a file and do not want future handlers to run should raise this exception instead of returning None.",
    "type": "class",
    "file_path": "django\\django\\core\\files\\uploadhandler.py",
    "ast_data": "ClassDef name:StopFutureHandlers"
  },
  {
    "library": "pytorch",
    "name": "byteswap",
    "source_code": "def byteswap(self, dtype):\n    elem_size = torch._utils._element_size(dtype)\n    if dtype.is_complex:\n        elem_size = max(int(elem_size / 2), 1)\n    self._byteswap(elem_size)",
    "docstring": "Swap bytes in underlying data.",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:byteswap arg:self arg:dtype arguments arg arg Assign Call If Assign Call Call Call"
  },
  {
    "library": "django",
    "name": "Options",
    "source_code": "class Options:\n\n    def __init__(self, table):\n        self.db_table = table\n        self.app_label = 'django_cache'\n        self.model_name = 'cacheentry'\n        self.verbose_name = 'cache entry'\n        self.verbose_name_plural = 'cache entries'\n        self.object_name = 'CacheEntry'\n        self.abstract = False\n        self.managed = True\n        self.proxy = False\n        self.swapped = False",
    "docstring": "A class that will quack like a Django model _meta class. This allows cache operations to be controlled by the router",
    "type": "class",
    "file_path": "django\\django\\core\\cache\\backends\\db.py",
    "ast_data": "ClassDef name:Options FunctionDef name:__init__ arg:self arg:table arguments arg arg Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "cryptography",
    "name": "private_bytes",
    "source_code": "@abc.abstractmethod\ndef private_bytes(self, encoding: _serialization.Encoding, format: _serialization.PrivateFormat, encryption_algorithm: _serialization.KeySerializationEncryption) -> bytes:\n    pass",
    "docstring": "The serialized bytes of the private key.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\x448.py",
    "ast_data": "FunctionDef name:private_bytes arg:self arg:encoding arg:format arg:encryption_algorithm arguments arg arg arg arg"
  },
  {
    "library": "tensorflow",
    "name": "_gather_trackable_data",
    "source_code": "def _gather_trackable_data(graph_view: graph_view_lib.ObjectGraphView, object_map: Mapping[base.Trackable, base.Trackable]) -> Tuple[List[_TrackableData], Dict[base.Trackable, int]]:\n    trackable_objects, node_paths = graph_view.breadth_first_traversal()\n    object_names = object_identity.ObjectIdentityDictionary()\n    for obj, path in node_paths.items():\n        object_names[obj] = trackable_utils.object_path_to_string(path)\n    node_ids = object_identity.ObjectIdentityDictionary()\n    for node_id, node in enumerate(trackable_objects):\n        node_ids[node] = node_id\n    slot_variables = util.serialize_slot_variables(trackable_objects=trackable_objects, node_ids=node_ids, object_names=object_names)\n    trackable_data = []\n    for trackable in trackable_objects:\n        children_proto = []\n        for child in graph_view.list_children(trackable):\n            children_proto.append(trackable_object_graph_pb2.TrackableObjectGraph.TrackableObject.ObjectReference(node_id=node_ids[child.ref], local_name=child.name))\n        trackable_data.append(_TrackableData(trackable, node_id=node_ids[trackable], object_name=object_names[trackable], children_proto=children_proto, slot_variable_proto=slot_variables.get(trackable, []), object_to_save=util.get_mapped_trackable(trackable, object_map)))\n    return (trackable_data, node_ids)",
    "docstring": "Returns a list of generated TrackableData based on the ObjectGraphView.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\save_util.py",
    "ast_data": "FunctionDef name:_gather_trackable_data arg:graph_view arg:object_map arguments arg arg Assign Call Assign Call For Call Assign Call Assign Call For Call Assign Assign Call Assign For Assign For Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "ImageClassifierTrainer",
    "source_code": "class ImageClassifierTrainer(Trainer):\n\n    def compute_metrics(self, *args: Tensor) -> Dict[str, float]:\n        if len(args) != 2:\n            raise AssertionError\n        out, target = args\n        acc1, acc5 = accuracy(out, target, topk=(1, 5))\n        return {'top1': acc1.item(), 'top5': acc5.item()}",
    "docstring": "Module to be used for image classification purposes. The module subclasses :py:class: and overrides the :py:func: function implementing a standard :py:func: topk@[1, 5]. .. seealso:: Learn how to use this class in the following __.",
    "type": "class",
    "file_path": "kornia\\kornia\\x\\trainers.py",
    "ast_data": "ClassDef name:ImageClassifierTrainer FunctionDef name:compute_metrics arg:self arguments arg arg If Compare Call Raise Assign Assign Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "get_migration",
    "source_code": "def get_migration(self, app_label, name_prefix):\n    return self.graph.nodes[app_label, name_prefix]",
    "docstring": "Return the named migration or raise NodeNotFoundError.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\loader.py",
    "ast_data": "FunctionDef name:get_migration arg:self arg:app_label arg:name_prefix arguments arg arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_MatrixSolveGrad",
    "source_code": "@ops.RegisterGradient('MatrixSolve')\ndef _MatrixSolveGrad(op: ops.Operation, grad):\n    a = op.inputs[0]\n    adjoint_a = op.get_attr('adjoint')\n    c = op.outputs[0]\n    grad_b = linalg_ops.matrix_solve(a, grad, adjoint=not adjoint_a)\n    if adjoint_a:\n        grad_a = -math_ops.matmul(c, grad_b, adjoint_b=True)\n    else:\n        grad_a = -math_ops.matmul(grad_b, c, adjoint_b=True)\n    return (grad_a, grad_b)",
    "docstring": "Gradient for MatrixSolve.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg_grad.py",
    "ast_data": "FunctionDef name:_MatrixSolveGrad arg:op arg:grad arguments arg arg Assign Assign Call Assign Assign Call If Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_solve_lsqr",
    "source_code": "def _solve_lsqr(X, y, *, alpha, fit_intercept=True, max_iter=None, tol=0.0001, X_offset=None, X_scale=None, sample_weight_sqrt=None):\n    if sample_weight_sqrt is None:\n        sample_weight_sqrt = np.ones(X.shape[0], dtype=X.dtype)\n    if sparse.issparse(X) and fit_intercept:\n        X_offset_scale = X_offset / X_scale\n        X1 = _get_rescaled_operator(X, X_offset_scale, sample_weight_sqrt)\n    else:\n        X1 = X\n    n_samples, n_features = X.shape\n    coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)\n    n_iter = np.empty(y.shape[1], dtype=np.int32)\n    sqrt_alpha = np.sqrt(alpha)\n    for i in range(y.shape[1]):\n        y_column = y[:, i]\n        info = sp_linalg.lsqr(X1, y_column, damp=sqrt_alpha[i], atol=tol, btol=tol, iter_lim=max_iter)\n        coefs[i] = info[0]\n        n_iter[i] = info[2]\n    return (coefs, n_iter)",
    "docstring": "Solve Ridge regression via LSQR. We expect that y is always mean centered. If X is dense, we expect it to be mean centered such that we can solve ||y - Xw||_2^2 + alpha * ||w||_2^2 If X is sparse, we expect X_offset to be given such that we can solve ||y - (X - X_offset)w||_2^2 + alpha * ||w||_2^2 With sample weights S=diag(sample_weight), this becomes ||sqrt(S) (y - (X - X_offset) w)||_2^2 + alpha * ||w||_2^2 and we expect y and X to already be rescaled, i.e. sqrt(S) @ y, sqrt(S) @ X. In this case, X_offset is the sample_weight weighted mean of X before scaling by sqrt(S). The objective then reads ||y - (X - sqrt(S) X_offset) w)||_2^2 + alpha * ||w||_2^2",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_ridge.py",
    "ast_data": "FunctionDef name:_solve_lsqr arg:X arg:y arguments arg arg arg arg arg arg arg arg arg If Compare Assign Call If BoolOp Call Assign Assign Call Assign Assign Assign Call Assign Call Assign Call For Call Assign Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "sql_table_creation_suffix",
    "source_code": "def sql_table_creation_suffix(self):\n    return ''",
    "docstring": "SQL to append to the end of the test table creation statements.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\creation.py",
    "ast_data": "FunctionDef name:sql_table_creation_suffix arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_safe_cast",
    "source_code": "@classmethod\ndef _safe_cast(cls, values: np.ndarray, dtype: np.dtype, copy: bool) -> np.ndarray:\n    return values.astype(dtype, copy=copy)",
    "docstring": "Safely cast the values to the given dtype. \"safe\" in this context means the casting is lossless.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\floating.py",
    "ast_data": "FunctionDef name:_safe_cast arg:cls arg:values arg:dtype arg:copy arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "message_event",
    "source_code": "def message_event(self, message, sender=None):\n    if sender is None:\n        sender = self\n    s = 'tool_message_event'\n    event = ToolManagerMessageEvent(s, sender, message)\n    self._callbacks.process(s, event)",
    "docstring": "Emit a .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_managers.py",
    "ast_data": "FunctionDef name:message_event arg:self arg:message arg:sender arguments arg arg arg If Compare Assign Assign Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "is_bf16_supported",
    "source_code": "def is_bf16_supported(including_emulation: bool=True):\n    if torch.version.hip:\n        return True\n    if not is_available():\n        return False\n    device = torch.cuda.current_device()\n    cuda_version = torch.version.cuda\n    if cuda_version is not None and torch.cuda.get_device_properties(device).major >= 8:\n        return True\n    if not including_emulation:\n        return False\n    return _check_bf16_tensor_supported(device)",
    "docstring": "Return a bool indicating if the current CUDA/ROCm device supports dtype bfloat16.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:is_bf16_supported arg:including_emulation arguments arg If Return return:yes If Call Return return:yes Assign Call Assign If BoolOp Compare Compare Call Return return:yes If Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "weighted_moments_v2",
    "source_code": "@tf_export('nn.weighted_moments', v1=[])\n@dispatch.add_dispatch_support\ndef weighted_moments_v2(x, axes, frequency_weights, keepdims=False, name=None):\n    return weighted_moments(x=x, axes=axes, frequency_weights=frequency_weights, name=name, keep_dims=keepdims)",
    "docstring": "Returns the frequency-weighted mean and variance of . Args: x: A tensor. axes: 1-d tensor of int32 values; these are the axes along which to compute mean and variance. frequency_weights: A tensor of positive weights which can be broadcast with x. keepdims: Produce moments with the same dimensionality as the input. name: Name used to scope the operation. Returns: Two tensors: and .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_impl.py",
    "ast_data": "FunctionDef name:weighted_moments_v2 arg:x arg:axes arg:frequency_weights arg:keepdims arg:name arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_minorticklocs",
    "source_code": "def get_minorticklocs(self):\n    minor_locs = np.asarray(self.minor.locator())\n    if self.remove_overlapping_locs:\n        major_locs = self.major.locator()\n        transform = self._scale.get_transform()\n        tr_minor_locs = transform.transform(minor_locs)\n        tr_major_locs = transform.transform(major_locs)\n        lo, hi = sorted(transform.transform(self.get_view_interval()))\n        tol = (hi - lo) * 1e-05\n        mask = np.isclose(tr_minor_locs[:, None], tr_major_locs[None, :], atol=tol, rtol=0).any(axis=1)\n        minor_locs = minor_locs[~mask]\n    return minor_locs",
    "docstring": "Return this Axis' minor tick locations in data coordinates.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:get_minorticklocs arg:self arguments arg Assign Call Call If Assign Call Assign Call Assign Call Assign Call Assign Call Call Call Assign Assign Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "set",
    "source_code": "def set(self, value):\n    pywrap_tfe.TFE_MonitoringIntGaugeCellSet(self._cell, value)",
    "docstring": "Atomically set the value. Args: value: integer value.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py",
    "ast_data": "FunctionDef name:set arg:self arg:value arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "get_inner_shape",
    "source_code": "def get_inner_shape(item):\n    if not isinstance(item, (list, tuple)) and np.ndim(item) == 0:\n        return ()\n    elif len(item) > 0:\n        return (len(item),) + get_inner_shape(item[0])\n    return (0,)",
    "docstring": "Returns the inner shape for a python list .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_factory_ops.py",
    "ast_data": "FunctionDef name:get_inner_shape arg:item arguments arg If BoolOp Call Compare Call Return return:no If Compare Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "eval",
    "source_code": "def eval(self, expr: str, *, inplace: bool=False, **kwargs) -> Any | None:\n    from pandas.core.computation.eval import eval as _eval\n    inplace = validate_bool_kwarg(inplace, 'inplace')\n    kwargs['level'] = kwargs.pop('level', 0) + 1\n    index_resolvers = self._get_index_resolvers()\n    column_resolvers = self._get_cleaned_column_resolvers()\n    resolvers = (column_resolvers, index_resolvers)\n    if 'target' not in kwargs:\n        kwargs['target'] = self\n    kwargs['resolvers'] = tuple(kwargs.get('resolvers', ())) + resolvers\n    return _eval(expr, inplace=inplace, **kwargs)",
    "docstring": "Evaluate a string describing operations on DataFrame columns. .. warning:: This method can run arbitrary code which can make you vulnerable to code injection if you pass user input to this function. Operates on columns only, not specific rows or elements. This allows to run arbitrary code, which can make you vulnerable to code injection if you pass user input to this function. Parameters ---------- expr : str The expression string to evaluate. You can refer to variables in the environment by prefixing them with an '@' character like `` + b`evaleval~pandas.DataFrame.eval~evalenhancing performance with eval C&C` character in front of the name: >>> local_var = 2 >>> df.eval(\"@local_var * A\") 0 2 1 4 2 6 3 8 4 10",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:eval arg:self arg:expr arguments arg arg arg arg Assign Call Assign Call Assign Call Assign Call Assign If Compare Assign Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_from_row_partition",
    "source_code": "@classmethod\ndef _from_row_partition(cls, values, row_partition, validate=True):\n    if not isinstance(row_partition, RowPartition):\n        raise TypeError(f'Argument `row_partition` must be a RowPartition. Received {row_partition}.')\n    if not isinstance(validate, bool):\n        raise TypeError(f'Argument `validate` must have type bool. Received {validate}.')\n    values, row_partition = cls._convert_values_and_partition(values, row_partition, 'partition')\n    if row_partition._has_precomputed_value_rowids():\n        value_rowids_shape = row_partition.value_rowids().shape\n        values.shape[:1].assert_is_compatible_with(value_rowids_shape)\n    if validate:\n        msg = 'Arguments to _from_row_partition do not form a valid RaggedTensor'\n        nvals = _nrows(values, row_partition.dtype)\n        checks = [check_ops.assert_equal(math_ops.cast(row_partition.nvals(), row_partition.dtype), nvals, message=msg)]\n        if not isinstance(values, RaggedTensor):\n            checks.append(check_ops.assert_rank_at_least(values, 1))\n        row_partition = row_partition._with_dependencies(checks)\n    return cls(values=values, internal=True, row_partition=row_partition)",
    "docstring": "Creates a with a row partition. This is used as a way for RaggedTensors to share row partitions. The outer dimension of values must be equal to . Args: values: A potentially ragged tensor. row_partition: a : can be shared between tensors. validate: If true, then use assertions to check that the arguments form a valid . Returns: A . . . Raises: ValueError: If partition.nvals() != _nrows(values)",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:_from_row_partition arg:cls arg:values arg:row_partition arg:validate arguments arg arg arg arg If Call Raise Call If Call Raise Call Assign Call If Call Assign Call Call If Assign Assign Call Assign Call Call Call If Call Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "shape_safe_assign_variable_handle",
    "source_code": "def shape_safe_assign_variable_handle(handle, shape, value, name=None):\n    with _handle_graph(handle):\n        value_tensor = ops.convert_to_tensor(value)\n    shape.assert_is_compatible_with(value_tensor.shape)\n    return gen_resource_variable_ops.assign_variable_op(handle, value_tensor, name=name)",
    "docstring": "Helper that checks shape compatibility and assigns variable.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:shape_safe_assign_variable_handle arg:handle arg:shape arg:value arg:name arguments arg arg arg arg With Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_measure_tensor_list_column_widths",
    "source_code": "def _measure_tensor_list_column_widths(self, data):\n    max_timestamp_width = 0\n    if data:\n        max_rel_time_ms = (data[-1].timestamp - self._debug_dump.t0) / 1000.0\n        max_timestamp_width = len('[%.3f] ' % max_rel_time_ms) + 1\n    max_timestamp_width = max(max_timestamp_width, len(self._TIMESTAMP_COLUMN_HEAD) + 1)\n    max_dump_size_width = 0\n    for dump in data:\n        dump_size_str = cli_shared.bytes_to_readable_str(dump.dump_size_bytes)\n        if len(dump_size_str) + 1 > max_dump_size_width:\n            max_dump_size_width = len(dump_size_str) + 1\n    max_dump_size_width = max(max_dump_size_width, len(self._DUMP_SIZE_COLUMN_HEAD) + 1)\n    max_op_type_width = 0\n    for dump in data:\n        op_type = self._debug_dump.node_op_type(dump.node_name)\n        if len(op_type) + 1 > max_op_type_width:\n            max_op_type_width = len(op_type) + 1\n    max_op_type_width = max(max_op_type_width, len(self._OP_TYPE_COLUMN_HEAD) + 1)\n    return (max_timestamp_width, max_dump_size_width, max_op_type_width)",
    "docstring": "Determine the maximum widths of the timestamp and op-type column. This method assumes that data is sorted in the default order, i.e., by ascending timestamps. Args: data: (list of DebugTensorDaum) the data based on which the maximum column widths will be determined. Returns: (int) maximum width of the timestamp column. 0 if data is empty. (int) maximum width of the dump size column. 0 if data is empty. (int) maximum width of the op type column. 0 if data is empty.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\analyzer_cli.py",
    "ast_data": "FunctionDef name:_measure_tensor_list_column_widths arg:self arg:data arguments arg arg Assign If Assign Assign Call Assign Call Call Assign For Assign Call If Compare Call Assign Call Assign Call Call Assign For Assign Call If Compare Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "export_memory_timeline",
    "source_code": "def export_memory_timeline(self, path: str, device: Optional[str]=None) -> None:\n    if device is None:\n        if self.use_device and self.use_device != 'cuda':\n            device = self.use_device + ':0'\n        else:\n            device = 'cuda:0' if torch.cuda.is_available() else 'cpu'\n    self.mem_tl = MemoryProfileTimeline(self._memory_profile())\n    if path.endswith('.html'):\n        self.mem_tl.export_memory_timeline_html(path, device)\n    elif path.endswith('.gz'):\n        fp = tempfile.NamedTemporaryFile('w+t', suffix='.json', delete=False)\n        fp.close()\n        if path.endswith('raw.json.gz'):\n            self.mem_tl.export_memory_timeline_raw(fp.name, device)\n        else:\n            self.mem_tl.export_memory_timeline(fp.name, device)\n        with open(fp.name) as fin:\n            with gzip.open(path, 'wt') as fout:\n                fout.writelines(fin)\n        os.remove(fp.name)\n    else:\n        self.mem_tl.export_memory_timeline(path, device)",
    "docstring": "Export memory event information from the profiler collected tree for a given device, and export a timeline plot. There are 3 exportable files using ``. Output: Memory timeline written as gzipped JSON, JSON, or HTML.",
    "type": "method",
    "file_path": "pytorch\\torch\\profiler\\profiler.py",
    "ast_data": "FunctionDef name:export_memory_timeline arg:self arg:path arg:device arguments arg arg arg If Compare If BoolOp Compare Assign Assign Call Assign Call Call If Call Call If Call Assign Call Call If Call Call Call With Call With Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "__str__",
    "source_code": "def __str__(self) -> str:\n    if len(self.placements) == 1:\n        placement_str = str(self.placements[0])\n    else:\n        placement_str = str(self.placements)\n    if self.tensor_meta is not None:\n        tensor_shape = str(tuple(self.tensor_meta.shape))\n    else:\n        tensor_shape = 'unknown shape'\n    return f'Spec({placement_str} on {tensor_shape})'",
    "docstring": "human readable representation of the DTensorSpec",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_dtensor_spec.py",
    "ast_data": "FunctionDef name:__str__ arg:self arguments arg If Compare Call Assign Call Assign Call If Compare Assign Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "init_variable",
    "source_code": "def init_variable(v, init, name='init'):\n    with ops.name_scope(None, v.op.name + '/', [v, init]):\n        with ops.name_scope(name) as scope:\n            with ops.colocate_with(v):\n                if callable(init):\n                    assert v.get_shape().is_fully_defined(), 'Variable shape unknown.'\n                    value = init(v.get_shape().as_list(), v.dtype.base_dtype)\n                    value = ops.convert_to_tensor(value, name='value')\n                    return gen_state_ops.assign(v, value, name=scope)\n                else:\n                    init = ops.convert_to_tensor(init, name='init')\n                    return gen_state_ops.assign(v, init, name=scope)",
    "docstring": "Initializes variable with \"init\". This op does the following: if init is a Tensor, v = init if callable(init): v = init(VariableShape(v), v.dtype) Args: v: Variable to initialize init: Tensor to assign to v, Or an object convertible to Tensor e.g. nparray, Or an Initializer that generates a tensor given the shape and type of v. An \"Initializer\" is a callable that returns a tensor that \"v\" should be set to. It will be called as init(shape, dtype). name: Optional name for the op. Returns: The operation that initializes v.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\state_ops.py",
    "ast_data": "FunctionDef name:init_variable arg:v arg:init arg:name arguments arg arg arg With Call With Call With Call If Call Call Call Assign Call Call Call Assign Call Return return:yes Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "emit_pid",
    "source_code": "def emit_pid(self, name: str, pid: int) -> None:\n    event = {}\n    event['name'] = 'process_name'\n    event['ph'] = 'M'\n    event['pid'] = pid\n    event['args'] = {'name': name}\n    self._metadata.append(event)",
    "docstring": "Adds a process metadata event to the trace. Args: name: The process name as a string. pid: Identifier of the process as an integer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py",
    "ast_data": "FunctionDef name:emit_pid arg:self arg:name arg:pid arguments arg arg arg Assign Assign Assign Assign Assign Call"
  },
  {
    "library": "numpy",
    "name": "parse_structure",
    "source_code": "def parse_structure(astr):\n    spanlist = []\n    ind = 0\n    while True:\n        m = routine_start_re.search(astr, ind)\n        if m is None:\n            break\n        start = m.start()\n        if function_start_re.match(astr, start, m.end()):\n            while True:\n                i = astr.rfind('\\n', ind, start)\n                if i == -1:\n                    break\n                start = i\n                if astr[i:i + 7] != '\\n     $':\n                    break\n        start += 1\n        m = routine_end_re.search(astr, m.end())\n        ind = end = m and m.end() - 1 or len(astr)\n        spanlist.append((start, end))\n    return spanlist",
    "docstring": "Return a list of tuples for each function or subroutine each tuple is the start and end of a subroutine or function to be expanded.",
    "type": "function",
    "file_path": "numpy\\numpy\\f2py\\_src_pyf.py",
    "ast_data": "FunctionDef name:parse_structure arg:astr arguments arg Assign Assign While Assign Call If Compare Assign Call If Call Call While Assign Call If Compare Assign If Compare Assign Call Call Assign BoolOp BoolOp Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_element",
    "source_code": "def get_element(root_dict: STATE_DICT_TYPE, path: OBJ_PATH, default_value: Optional[T]=None) -> Optional[T]:\n    cur_value = cast(CONTAINER_TYPE, root_dict)\n    for part in path:\n        if type(part) is int:\n            if not isinstance(cur_value, list) or len(cur_value) < part:\n                return default_value\n        elif not isinstance(cur_value, Mapping) or part not in cur_value:\n            return default_value\n        cur_value = cast(CONTAINER_TYPE, cur_value[part])\n    return cast(Optional[T], cur_value)",
    "docstring": "Retrieve the value at `` if not found.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\_traverse.py",
    "ast_data": "FunctionDef name:get_element arg:root_dict arg:path arg:default_value arguments arg arg arg Assign Call For If Compare Call If BoolOp Call Compare Call Return return:yes If BoolOp Call Compare Return return:yes Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_matching_files",
    "source_code": "@tf_export(v1=['gfile.Glob'])\ndef get_matching_files(filename):\n    return get_matching_files_v2(filename)",
    "docstring": "Returns a list of files that match the given pattern(s). Args: filename: string or iterable of strings. The glob pattern(s). Returns: A list of strings containing filenames that match the given pattern(s). Raises: * errors.OpError: If there are filesystem / directory listing errors. * errors.NotFoundError: If pattern to be matched is an invalid directory.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py",
    "ast_data": "FunctionDef name:get_matching_files arg:filename arguments arg Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "_median_nancheck",
    "source_code": "def _median_nancheck(data, result, axis):\n    if data.size == 0:\n        return result\n    potential_nans = data.take(-1, axis=axis)\n    n = np.isnan(potential_nans)\n    if np.ma.isMaskedArray(n):\n        n = n.filled(False)\n    if not n.any():\n        return result\n    if isinstance(result, np.generic):\n        return potential_nans\n    np.copyto(result, potential_nans, where=n)\n    return result",
    "docstring": "Utility function to check median result from data for NaN values at the end and return NaN in that case. Input result can also be a MaskedArray. Parameters ---------- data : array Sorted input data to median function result : Array or MaskedArray Result of median function. axis : int Axis along which the median was computed. Returns ------- result : scalar or ndarray Median or NaN in axes which contained NaN in the input. If the input was an array, NaN will be inserted in-place. If a scalar, either the input itself or a scalar NaN.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_utils_impl.py",
    "ast_data": "FunctionDef name:_median_nancheck arg:data arg:result arg:axis arguments arg arg arg If Compare Return return:yes Assign Call Assign Call If Call Assign Call If Call Return return:yes If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "django",
    "name": "geodetic",
    "source_code": "def geodetic(self, connection):\n    return get_srid_info(self.srid, connection).geodetic",
    "docstring": "Return true if this field's SRID corresponds with a coordinate system that uses non-projected units (e.g., latitude/longitude).",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\models\\fields.py",
    "ast_data": "FunctionDef name:geodetic arg:self arg:connection arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, trainer_id):\n    if not trainer_id:\n        raise ValueError('tf.data service cross-trainer cache requires a non-empty trainer ID.')\n    self.trainer_id = trainer_id",
    "docstring": "Constructs a CrossTrainerCache. Args: trainer_id: Each training job has a unique ID. Once a job has consumed data, the data remains in the cache and is re-used by jobs with different s. Requests with the same do not re-use data. Raises: ValueError if is empty.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\data_service_ops.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:trainer_id arguments arg arg If Raise Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "sparse_minimum",
    "source_code": "@tf_export('sparse.minimum', v1=['sparse.minimum', 'sparse_minimum'])\n@deprecation.deprecated_endpoints('sparse_minimum')\ndef sparse_minimum(sp_a, sp_b, name=None):\n    with ops.name_scope(name, 'SparseSparseMinimum', [sp_a.indices, sp_a.values, sp_b.indices, sp_b.values]) as name:\n        out_indices, out_values = gen_sparse_ops.sparse_sparse_minimum(sp_a.indices, sp_a.values, sp_a.dense_shape, sp_b.indices, sp_b.values, sp_b.dense_shape, name=name)\n    return sparse_tensor.SparseTensor(out_indices, out_values, sp_a.dense_shape)",
    "docstring": "Returns the element-wise min of two SparseTensors. Assumes the two SparseTensors have the same shape, i.e., no broadcasting. Example: >>> sp_zero = tf.sparse.SparseTensor([[0]], [0], [7]) >>> sp_one = tf.sparse.SparseTensor([[1]], [1], [7]) >>> res = tf.sparse.minimum(sp_zero, sp_one) >>> res.indices >>> res.values >>> res.dense_shape Args: sp_a: a operand whose dtype is real, and indices lexicographically ordered. sp_b: the other operand with the same requirements (and the same shape). name: optional name of the operation. Returns: output: the output SparseTensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_ops.py",
    "ast_data": "FunctionDef name:sparse_minimum arg:sp_a arg:sp_b arg:name arguments arg arg arg With Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_summary_op",
    "source_code": "def get_summary_op():\n    summary_op = ops.get_collection(ops.GraphKeys.SUMMARY_OP)\n    if summary_op is not None:\n        if summary_op:\n            summary_op = summary_op[0]\n        else:\n            summary_op = None\n    if summary_op is None:\n        summary_op = merge_all_summaries()\n        if summary_op is not None:\n            ops.add_to_collection(ops.GraphKeys.SUMMARY_OP, summary_op)\n    return summary_op",
    "docstring": "Returns a single Summary op that would run all summaries. Either existing one from collection or merges all existing summaries. Returns: If no summaries were collected, returns None. Otherwise returns a scalar of type containing the serialized protocol buffer resulting from the merging.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\logging_ops.py",
    "ast_data": "FunctionDef name:get_summary_op arguments Assign Call If Compare If Assign Assign If Compare Assign Call If Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "assert_type",
    "source_code": "@tf_export(v1=['debugging.assert_type', 'assert_type'])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints('assert_type')\ndef assert_type(tensor, tf_type, message=None, name=None):\n    tf_type = dtypes.as_dtype(tf_type)\n    with ops.name_scope(name, 'assert_type', [tensor]):\n        if not isinstance(tensor, sparse_tensor.SparseTensor):\n            tensor = ops.convert_to_tensor(tensor, name='tensor')\n        if tensor.dtype != tf_type:\n            raise TypeError(f'{_message_prefix(message)}{getattr(tensor, 'name', 'tensor')} must be of type {tf_type!r}; got {tensor.dtype!r}')\n        return control_flow_ops.no_op('statically_determined_correct_type')",
    "docstring": "Statically asserts that the given is of the specified type. Args: tensor: A or . tf_type: A tensorflow type (, , , etc). message: A string to prefix to the default message. name: A name to give this . Defaults to \"assert_type\" Raises: TypeError: If the tensors data type doesn't match . Returns: A that does nothing. Type can be determined statically.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\check_ops.py",
    "ast_data": "FunctionDef name:assert_type arg:tensor arg:tf_type arg:message arg:name arguments arg arg arg arg Assign Call With Call If Call Assign Call If Compare Raise Call Call Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "cast",
    "source_code": "@doc_controls.do_not_doc_inheritable\ndef cast(self, value, casting_context):\n    if casting_context.allow_specs and isinstance(value, TypeSpec):\n        assert value.is_subtype_of(self), f'Can not cast {value!r} to {self!r}'\n        return self\n    did_cast = False\n\n    def cast_fn(spec, v):\n        casted_v = spec.cast(v, casting_context)\n        if casted_v is not v:\n            nonlocal did_cast\n            did_cast = True\n        return casted_v\n    cast_components = nest.map_structure(cast_fn, self._component_specs, self._to_components(value))\n    if did_cast:\n        return self._from_components(cast_components)\n    else:\n        return value",
    "docstring": "See TraceType base class for details. Do not override.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py",
    "ast_data": "FunctionDef name:cast arg:self arg:value arg:casting_context arguments arg arg arg If BoolOp Call Call Return return:yes Assign FunctionDef name:cast_fn arg:spec arg:v arguments arg arg Assign Call If Compare Assign Return return:yes Assign Call Call If Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "StepLR",
    "source_code": "class StepLR(LRScheduler):\n\n    def __init__(self, optimizer: Optimizer, step_size: int, gamma: float=0.1, last_epoch: int=-1) -> None:\n        self.step_size = step_size\n        self.gamma = gamma\n        super().__init__(optimizer, last_epoch)\n\n    @override\n    def get_lr(self) -> list[float]:\n        _warn_get_lr_called_within_step(self)\n        if self.last_epoch == 0 or self.last_epoch % self.step_size != 0:\n            return [group['lr'] for group in self.optimizer.param_groups]\n        return [group['lr'] * self.gamma for group in self.optimizer.param_groups]\n\n    def _get_closed_form_lr(self) -> list[float]:\n        return [base_lr * self.gamma ** (self.last_epoch // self.step_size) for base_lr in self.base_lrs]",
    "docstring": "Decays the learning rate of each parameter group by gamma every step_size epochs. Notice that such decay can happen simultaneously with other changes to the learning rate from outside this scheduler. When last_epoch=-1, sets initial lr as lr. Args: optimizer (Optimizer): Wrapped optimizer. step_size (int): Period of learning rate decay. gamma (float): Multiplicative factor of learning rate decay. Default: 0.1. last_epoch (int): The index of last epoch. Default: -1. Example: >>> # xdoctest: +SKIP >>> # Assuming optimizer uses lr = 0.05 for all groups >>> # lr = 0.05 if epoch >> # lr = 0.005 if 30 >> # lr = 0.0005 if 60 >> # ... >>> scheduler = StepLR(optimizer, step_size=30, gamma=0.1) >>> for epoch in range(100): >>> train(...) >>> validate(...) >>> scheduler.step() .. image:: ../scripts/lr_scheduler_images/StepLR.png",
    "type": "class",
    "file_path": "pytorch\\torch\\optim\\lr_scheduler.py",
    "ast_data": "ClassDef name:StepLR FunctionDef name:__init__ arg:self arg:optimizer arg:step_size arg:gamma arg:last_epoch arguments arg arg arg arg arg Assign Assign Call Call FunctionDef name:get_lr arg:self arguments arg Call If BoolOp Compare Compare Return return:yes Return return:yes FunctionDef name:_get_closed_form_lr arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "PossibleTapeGradientTypes",
    "source_code": "def PossibleTapeGradientTypes(tensors):\n    return pywrap_tfe.TFE_Py_TapeSetPossibleGradientTypes(tensors)",
    "docstring": "Determines whether and how may require tape gradients.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\gradients_util.py",
    "ast_data": "FunctionDef name:PossibleTapeGradientTypes arg:tensors arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "unique_fetches",
    "source_code": "def unique_fetches(self):\n    raise NotImplementedError('unique_fetches must be implemented by subclasses')",
    "docstring": "Return the list of unique tensors or ops needed by this fetch mapper. Returns: A list of tensors or ops.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\session.py",
    "ast_data": "FunctionDef name:unique_fetches arg:self arguments arg Raise Call"
  },
  {
    "library": "kornia",
    "name": "compose_policy",
    "source_code": "def compose_policy(self, policy: List[SUBPOLICY_CONFIG]) -> List[PolicySequential]:\n    return [self.compose_subpolicy_sequential(subpolicy) for subpolicy in policy]",
    "docstring": "Compose policy by the provided policy config.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\auto\\base.py",
    "ast_data": "FunctionDef name:compose_policy arg:self arg:policy arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "half",
    "source_code": "def half(self):\n    _warn_typed_storage_removal()\n    return self._to(torch.half)",
    "docstring": "Casts this storage to half type.",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:half arg:self arguments arg Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "tpu_device",
    "source_code": "def tpu_device(self, replica: int=0, logical_core: int=0, job: Optional[str]=None) -> str:\n    coordinates = self.coordinates(replica, logical_core)\n    return self._topology.tpu_device_name_at_coordinates(coordinates, job=job)",
    "docstring": "Returns the name of the TPU device assigned to a logical core.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\device_assignment.py",
    "ast_data": "FunctionDef name:tpu_device arg:self arg:replica arg:logical_core arg:job arguments arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "clpmn",
    "source_code": "@_deprecated(__DEPRECATION_MSG_1_15.format('clpmn', 'assoc_legendre_p_all'))\ndef clpmn(m, n, z, type=3):\n    if abs(m) > n:\n        raise ValueError('m must be <= n.')\n    if not (type == 2 or type == 3):\n        raise ValueError('type must be either 2 or 3.')\n    m, n = (int(m), int(n))\n    if not np.iscomplexobj(z):\n        z = np.asarray(z, dtype=complex)\n    out, out_jac = assoc_legendre_p_all(n, abs(m), z, branch_cut=type, diff_n=1)\n    out = np.swapaxes(out, 0, 1)\n    out_jac = np.swapaxes(out_jac, 0, 1)\n    if m >= 0:\n        out = out[:m + 1]\n        out_jac = out_jac[:m + 1]\n    else:\n        out = np.insert(out[:m - 1:-1], 0, out[0], axis=0)\n        out_jac = np.insert(out_jac[:m - 1:-1], 0, out_jac[0], axis=0)\n    return (out, out_jac)",
    "docstring": "Associated Legendre function of the first kind for complex arguments. Computes the associated Legendre function of the first kind of order m and degree n, `P_n^m(z)scipy.special.assoc_legendre_p_all` is chosen. Approaching the real values on the interval (-1, 1) in the complex plane yields Ferrer's function of the first kind. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special Functions\", John Wiley and Sons, 1996. .. [2] NIST Digital Library of Mathematical Functions",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_basic.py",
    "ast_data": "FunctionDef name:clpmn arg:m arg:n arg:z arg:type arguments arg arg arg arg If Compare Call Raise Call If BoolOp Compare Compare Raise Call Assign Call Call If Call Assign Call Assign Call Call Assign Call Assign Call If Compare Assign Assign Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "tail",
    "source_code": "@final\n@Substitution(name='groupby')\n@Substitution(see_also=_common_see_also)\ndef tail(self, n: int=5) -> NDFrameT:\n    if n:\n        mask = self._make_mask_from_positional_indexer(slice(-n, None))\n    else:\n        mask = self._make_mask_from_positional_indexer([])\n    return self._mask_selected_obj(mask)",
    "docstring": "Return last n rows of each group. Similar to `` flag is ignored). Parameters ---------- n : int If positive: number of entries to include from end of each group. If negative: number of entries to exclude from start of each group. Returns ------- Series or DataFrame Subset of original Series or DataFrame as determined by n. %(see_also)s Examples -------- >>> df = pd.DataFrame( ... [[\"a\", 1], [\"a\", 2], [\"b\", 1], [\"b\", 2]], columns=[\"A\", \"B\"] ... ) >>> df.groupby(\"A\").tail(1) A B 1 a 2 3 b 2 >>> df.groupby(\"A\").tail(-1) A B 1 a 2 3 b 2",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\groupby\\groupby.py",
    "ast_data": "FunctionDef name:tail arg:self arg:n arguments arg arg If Assign Call Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_validate_path",
    "source_code": "def _validate_path(self) -> list[Any]:\n    raise AbstractMethodError(self)",
    "docstring": "Validate ``. This method checks for syntax, evaluation, or empty nodes return. Raises ------ SyntaxError * If xpah is not supported or issues with namespaces. ValueError * If xpah does not return any nodes.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\xml.py",
    "ast_data": "FunctionDef name:_validate_path arg:self arguments arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "NativeObject",
    "source_code": "class NativeObject(object):\n    pass",
    "docstring": "Types natively supported by various TF operations. The most notable example of NativeObject is Tensor.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\types\\internal.py",
    "ast_data": "ClassDef name:NativeObject"
  },
  {
    "library": "matplotlib",
    "name": "set_picker",
    "source_code": "def set_picker(self, picker):\n    self._picker = picker",
    "docstring": "Define the picking behavior of the artist. Parameters ---------- picker : None or bool or float or callable This can be one of the following: - *None*: Picking is disabled for this artist (default). - A boolean: If *True* then picking will be enabled and the artist will fire a pick event if the mouse event is over the artist. - A float: If picker is a number it is interpreted as an epsilon tolerance in points and the artist will fire off an event if its data is within epsilon of the mouse event. For some artists like lines and patch collections, the artist may provide additional data to the pick event that is generated, e.g., the indices of the data within epsilon of the pick event - A function: If picker is callable, it is a user supplied function which determines whether the artist is hit by the mouse event:: hit, props = picker(artist, mouseevent) to determine the hit test. if the mouse event is over the artist, return *hit=True* and props is a dictionary of properties you want added to the PickEvent attributes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:set_picker arg:self arg:picker arguments arg arg Assign"
  },
  {
    "library": "pandas",
    "name": "__len__",
    "source_code": "def __len__(self) -> int:\n    return len(self._range)",
    "docstring": "return the length of the RangeIndex",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\range.py",
    "ast_data": "FunctionDef name:__len__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_yaxis_transform",
    "source_code": "def get_yaxis_transform(self, which='grid'):\n    if which == 'grid':\n        return self._yaxis_transform\n    elif which == 'tick1':\n        return self.spines.left.get_spine_transform()\n    elif which == 'tick2':\n        return self.spines.right.get_spine_transform()\n    else:\n        raise ValueError(f'unknown value for which: {which!r}')",
    "docstring": "Get the transformation used for drawing y-axis labels, ticks and gridlines. The x-direction is in axis coordinates and the y-direction is in data coordinates. .. note:: This transformation is primarily used by the class, and is meant to be overridden by new kinds of projections that may need to place axis elements in different locations. Parameters ---------- which : {'grid', 'tick1', 'tick2'}",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:get_yaxis_transform arg:self arg:which arguments arg arg If Compare Return return:yes If Compare Return return:yes Call If Compare Return return:yes Call Raise Call"
  },
  {
    "library": "pandas",
    "name": "dtype",
    "source_code": "@property\ndef dtype(self) -> StringDtype:\n    return self._dtype",
    "docstring": "An instance of 'string[pyarrow]'.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\string_arrow.py",
    "ast_data": "FunctionDef name:dtype arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "OrtBackendOptions",
    "source_code": "@dataclasses.dataclass(frozen=True)\n@compatibility(is_backward_compatible=False)\nclass OrtBackendOptions:\n    preferred_execution_providers: Optional[Sequence[OrtExecutionProvider]] = None\n    'An optional sequence of execution providers to be prioritized ahead of any\\n    execution providers that may be inferred (see ``infer_execution_providers``).\\n    '\n    infer_execution_providers: bool = True\n    'Whether to infer an execution provider from ``torch.device`` bound to inputs or found in the graph.'\n    default_execution_providers: Optional[Sequence[OrtExecutionProvider]] = None\n    'The default fallback execution providers. If not specified, one will be\\n    be selected based on the host environment (most likely ``\"CPUExecutionProvider\"``).\\n    '\n    preallocate_output: bool = False\n    \"If ``True``, allocate memory for ONNX Runtime's outputs on the PyTorch side.\"\n    use_aot_autograd: bool = True\n    \"Whether to wrap the ``OrtBackend`` with TorchDynamo's aot_autograd backend\\n    to support training (i.e., backward graphs are also sent to ``OrtBackend``).\\n\\n    Symbolic execution is used to capture the forward pass and backward passes as a single graph.\\n    Then, a selected graph partition algorithm (``min_cut_rematerialization_partition``) is used\\n    to split the entire graph into forward sub-graph and backward sub-graph. Finally, both\\n    sub-graphs are compiled by ``OrtBackend``.\\n    \"\n    ort_session_options: Optional['onnxruntime.SessionOptions'] = None\n    'Options for the ``onnxruntime.InferenceSession`` used by the ``OrtBackend``.'\n    pre_ort_model_transforms: Optional[Sequence[Callable[['onnx.ModelProto'], None]]] = None\n    \"A list of graph transforms to be applied to the ONNX model before it\\n    is fed to ONNXRuntime's InferenceSession.\"",
    "docstring": "Options for constructing an ``. Example:: >>> @torch.compile( ... backend=\"onnxrt\", ... options=torch.onnx._OrtBackendOptions(...), ... ) ... def ort_function(x): ... return x ** x",
    "type": "class",
    "file_path": "pytorch\\torch\\onnx\\_internal\\onnxruntime.py",
    "ast_data": "ClassDef name:OrtBackendOptions Call Call"
  },
  {
    "library": "tensorflow",
    "name": "Multiply",
    "source_code": "class Multiply(_Merge):\n\n    def _merge_function(self, inputs):\n        output = inputs[0]\n        for i in range(1, len(inputs)):\n            output = output * inputs[i]\n        return output",
    "docstring": "Layer that multiplies (element-wise) a list of inputs. It takes as input a list of tensors, all of the same shape, and returns a single tensor (also of the same shape). >>> tf.keras.layers.Multiply()([np.arange(5).reshape(5, 1), ... np.arange(5, 10).reshape(5, 1)]) >>> x1 = tf.keras.layers.Dense(8)(np.arange(10).reshape(5, 2)) >>> x2 = tf.keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2)) >>> multiplied = tf.keras.layers.Multiply()([x1, x2]) >>> multiplied.shape TensorShape([5, 8])",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\merge.py",
    "ast_data": "ClassDef name:Multiply FunctionDef name:_merge_function arg:self arg:inputs arguments arg arg Assign For Call Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_format_attrs",
    "source_code": "def _format_attrs(self):\n    attrs: list[tuple[str, str | int | bool | None]]\n    attrs = [('categories', f'[{', '.join(self._data._repr_categories())}]'), ('ordered', self.ordered)]\n    extra = super()._format_attrs()\n    return attrs + extra",
    "docstring": "Return a list of tuples of the (attr,formatted_value)",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\category.py",
    "ast_data": "FunctionDef name:_format_attrs arg:self arguments arg Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "phi_and_derivative",
    "source_code": "def phi_and_derivative(alpha, suf, s, Delta):\n    denom = s ** 2 + alpha\n    p_norm = norm(suf / denom)\n    phi = p_norm - Delta\n    phi_prime = -np.sum(suf ** 2 / denom ** 3) / p_norm\n    return (phi, phi_prime)",
    "docstring": "Function of which to find zero. It is defined as \"norm of regularized (by alpha) least-squares solution minus \". Refer to [1]_.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_lsq\\common.py",
    "ast_data": "FunctionDef name:phi_and_derivative arg:alpha arg:suf arg:s arg:Delta arguments arg arg arg arg Assign Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "coeffs",
    "source_code": "@property\ndef coeffs(self) -> Tuple[Tensor, Tensor, Tensor, Tensor]:\n    return (self.w, self.x, self.y, self.z)",
    "docstring": "Return a tuple with the underlying coefficients in WXYZ order.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\quaternion.py",
    "ast_data": "FunctionDef name:coeffs arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "eager_warns",
    "source_code": "def eager_warns(x, warning_type, match=None):\n    import pytest\n    __thread_safe__ = False\n    if is_lazy_array(x):\n        return contextlib.nullcontext()\n    return pytest.warns(warning_type, match=match)",
    "docstring": "pytest.warns context manager, but only if x is not a lazy array.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_array_api.py",
    "ast_data": "FunctionDef name:eager_warns arg:x arg:warning_type arg:match arguments arg arg arg Assign If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "__array_wrap__",
    "source_code": "@final\ndef __array_wrap__(self, result, context=None, return_scalar=False):\n    result = lib.item_from_zerodim(result)\n    if not isinstance(result, Index) and is_bool_dtype(result.dtype) or np.ndim(result) > 1:\n        return result\n    return Index(result, name=self.name)",
    "docstring": "Gets called after a ufunc and other functions e.g. np.split.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:__array_wrap__ arg:self arg:result arg:context arg:return_scalar arguments arg arg arg arg Assign Call If BoolOp BoolOp Call Call Compare Call Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "reset_dropout_mask",
    "source_code": "def reset_dropout_mask(self):\n    self._dropout_mask_cache.clear()",
    "docstring": "Reset the cached dropout masks if any. This is important for the RNN layer to invoke this in it method so that the cached mask is cleared before calling the . The mask should be cached across the timestep within the same batch, but shouldn't be cached between batches. Otherwise it will introduce unreasonable bias against certain index of data within the batch.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\recurrent.py",
    "ast_data": "FunctionDef name:reset_dropout_mask arg:self arguments arg Call"
  },
  {
    "library": "scipy",
    "name": "getargspec",
    "source_code": "def getargspec(f):\n    spec = getfullargspec(f)\n    return ArgSpec(spec.args, spec.varargs, spec.varkw, spec.defaults)",
    "docstring": "A replacement for inspect.getargspec",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\decorator.py",
    "ast_data": "FunctionDef name:getargspec arg:f arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "virtualenv",
    "name": "__init__",
    "source_code": "def __init__(self, options) -> None:\n    self.flag_prompt = os.path.basename(os.getcwd()) if options.prompt == '.' else options.prompt",
    "docstring": "Create a new activator generator. :param options: the parsed options as defined within :meth:",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\activation\\activator.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:options arguments arg arg Assign Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, input_nodes: list[Buffer], layout: Layout, alpha: float, beta: float, input_reorder: Optional[list[int]]=None) -> None:\n    super().__init__(str(Placeholder.KERNEL_NAME), input_nodes, layout, input_reorder)\n    self.alpha = alpha\n    self.beta = beta\n    assert len(input_nodes) == 2 or len(input_nodes) == 3 or len(input_nodes) == 4\n    assert self._are_inputs_layout_compatible([node.get_layout() for node in input_nodes])\n    self.cache_key: str = create_inputs_key(self.input_nodes)",
    "docstring": "Args: input_nodes (List[Buffer]): List of input nodes of the GEMM kernel. layout (Layout): Layout type of the resulting output node. alpha (float): The scaling factor for the product of the inputs in the GEMM operation. beta (float): The scaling factor applied to the output matrix. input_reorder (Optional[List[int]]): Specifies the reordering of the input nodes. If not provided, no reordering is performed. Defaults to None.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\gemm_template.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:input_nodes arg:layout arg:alpha arg:beta arg:input_reorder arguments arg arg arg arg arg arg Call Call Call Assign Assign BoolOp Compare Call Compare Call Compare Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "broadcast",
    "source_code": "def broadcast(self, tensor, destinations):\n    validate_destinations(destinations)\n    return self.broadcast_implementation(tensor, destinations)",
    "docstring": "Broadcast to . This can only be called in the cross-replica context. Args: tensor: a like object. The value to broadcast. destinations: a , a , a alike object, or a device string. It specifies the devices to broadcast to. Note that if it's a , the value is broadcasted to the devices of that variable, this method doesn't update the variable. Returns: A or .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_ops.py",
    "ast_data": "FunctionDef name:broadcast arg:self arg:tensor arg:destinations arguments arg arg arg Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_as_ss",
    "source_code": "def _as_ss(self):\n    if isinstance(self, StateSpace):\n        return self\n    else:\n        return self.to_ss()",
    "docstring": "Convert to system, without copying. Returns ------- sys: StateSpace The system. If the class is already an instance of then this instance is returned.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:_as_ss arg:self arguments arg If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_choice",
    "source_code": "def get_choice(self) -> Choice:\n    if not self.satisfies_precondition():\n        return self.fallback()\n    if torch._inductor.config.use_autoheuristic(self.name):\n        if self.augment_context is not None:\n            self.context.apply_operations(self.augment_context)\n        controller = LearnedHeuristicController(self.metadata, self.context)\n        decision = controller.get_decision()\n        if decision not in self.choices:\n            return self.fallback()\n        if decision is not None:\n            return decision\n    return self.fallback()",
    "docstring": "Returns the chosen option based on the value of autoheuristic_use. If self.name is one of the comma separated strings in autoheuristic_use, it queries a learned heuristic to make a decision. Otherwise, it returns the fallback option.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\autoheuristic\\autoheuristic.py",
    "ast_data": "FunctionDef name:get_choice arg:self arguments arg If Call Return return:yes Call If Call If Compare Call Assign Call Assign Call If Compare Return return:yes Call If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "refresh_token",
    "source_code": "def refresh_token(self):\n    generate_assertion = self.ASSERTION_METHODS[self.grant_type]\n    assertion = generate_assertion(issuer=self.issuer, subject=self.subject, audience=self.audience, claims=self.claims, **self._kwargs)\n    data = {'assertion': to_native(assertion), 'grant_type': self.grant_type}\n    if self.scope:\n        data['scope'] = self.scope\n    return self._refresh_token(data)",
    "docstring": "Using Assertions as Authorization Grants to refresh token as described in _. .. _:",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc7521\\client.py",
    "ast_data": "FunctionDef name:refresh_token arg:self arguments arg Assign Assign Call Assign Call If Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "stripped_op_list_for_graph",
    "source_code": "def stripped_op_list_for_graph(graph_def):\n    used_ops = ops_used_by_graph_def(graph_def)\n    op_defs = []\n    for op in sorted(used_ops):\n        op_def = op_def_registry.get(op)\n        if op_def is not None:\n            op_defs.append(op_def)\n    return op_def_pb2.OpList(op=op_defs)",
    "docstring": "Collect the stripped OpDefs for ops used by a graph. This function computes the field of and similar protos. The result can be communicated from the producer to the consumer, which can then use the C++ function to improve forwards compatibility. Args: graph_def: A proto, as from . Returns: An of ops used by the graph.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\meta_graph.py",
    "ast_data": "FunctionDef name:stripped_op_list_for_graph arg:graph_def arguments arg Assign Call Assign For Call Assign Call If Compare Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "ask_rename",
    "source_code": "def ask_rename(self, model_name, old_name, new_name, field_instance):\n    msg = 'Was %s.%s renamed to %s.%s (a %s)? [y/N]'\n    return self._boolean_input(msg % (model_name, old_name, model_name, new_name, field_instance.__class__.__name__), False)",
    "docstring": "Was this field really renamed?",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\questioner.py",
    "ast_data": "FunctionDef name:ask_rename arg:self arg:model_name arg:old_name arg:new_name arg:field_instance arguments arg arg arg arg arg Assign Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "follow",
    "source_code": "def follow(self, url: str | Link | parsel.Selector, callback: CallbackT | None=None, method: str='GET', headers: Mapping[AnyStr, Any] | Iterable[tuple[AnyStr, Any]] | None=None, body: bytes | str | None=None, cookies: CookiesT | None=None, meta: dict[str, Any] | None=None, encoding: str | None=None, priority: int=0, dont_filter: bool=False, errback: Callable[[Failure], Any] | None=None, cb_kwargs: dict[str, Any] | None=None, flags: list[str] | None=None) -> Request:\n    if isinstance(url, parsel.Selector):\n        url = _url_from_selector(url)\n    elif isinstance(url, parsel.SelectorList):\n        raise ValueError('SelectorList is not supported')\n    encoding = self.encoding if encoding is None else encoding\n    return super().follow(url=url, callback=callback, method=method, headers=headers, body=body, cookies=cookies, meta=meta, encoding=encoding, priority=priority, dont_filter=dont_filter, errback=errback, cb_kwargs=cb_kwargs, flags=flags)",
    "docstring": "Return a :class: instance to follow a link `~scrapy.link.Linktopics-link-extractors~scrapy.Selector element, e.g. `~scrapy.Selectorresponse-follow-example` for usage examples.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\http\\response\\text.py",
    "ast_data": "FunctionDef name:follow arg:self arg:url arg:callback arg:method arg:headers arg:body arg:cookies arg:meta arg:encoding arg:priority arg:dont_filter arg:errback arg:cb_kwargs arg:flags arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg If Call Assign Call If Call Raise Call Assign Compare Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "Orthographic",
    "source_code": "class Orthographic(CameraModelBase):\n\n    def __init__(self, image_size: ImageSize, params: Tensor) -> None:\n        super().__init__(AffineTransform(), OrthographicProjection(), image_size, params)\n        if params.shape[-1] != 4 or len(params.shape) > 2:\n            raise ValueError('params must be of shape B, 4 for ORTHOGRAPHIC Camera')",
    "docstring": "Orthographic Camera Model.",
    "type": "class",
    "file_path": "kornia\\kornia\\sensors\\camera\\camera_model.py",
    "ast_data": "ClassDef name:Orthographic FunctionDef name:__init__ arg:self arg:image_size arg:params arguments arg arg arg Call Call Call Call If BoolOp Compare Compare Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "byte_swap_tensor_content",
    "source_code": "def byte_swap_tensor_content(tensor, from_endiness, to_endiness):\n    if tensor.dtype in byte_swappable:\n        tshape = tensor.tensor_shape.dim\n        tensor_bytes = tensor.tensor_content\n        if tensor_bytes:\n            tensor_size = 1\n            for sz in tshape:\n                if sz.size != 0:\n                    tensor_size *= sz.size\n            chunksize = len(tensor_bytes) // tensor_size\n            to_swap = [tensor_bytes[i:i + chunksize] for i in range(0, len(tensor_bytes), chunksize)]\n            tensor.tensor_content = b''.join([int.from_bytes(byteswap, from_endiness).to_bytes(chunksize, to_endiness) for byteswap in to_swap])",
    "docstring": "Byte swaps. Args: tensor: Target tensor to change endiness. from_endiness: The original endianness format. \"big\" or \"little\" to_endiness: The target endianness format. \"big\" or \"little\"",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\byte_swap_tensor.py",
    "ast_data": "FunctionDef name:byte_swap_tensor_content arg:tensor arg:from_endiness arg:to_endiness arguments arg arg arg If Compare Assign Assign If Assign For If Compare Assign Call Assign Call Call Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, input_bytes):\n    self.fdp = atheris.FuzzedDataProvider(input_bytes)",
    "docstring": "FuzzingHelper initializer. Args: input_bytes: Input randomized bytes used to create a FuzzedDataProvider.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\security\\fuzzing\\python_fuzzing.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:input_bytes arguments arg arg Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "string_split_v2",
    "source_code": "@tf_export('strings.split', v1=[])\n@dispatch.add_dispatch_support\ndef string_split_v2(input, sep=None, maxsplit=-1, name=None):\n    with ops.name_scope(name, 'StringSplit', [input]):\n        input = ragged_tensor.convert_to_tensor_or_ragged_tensor(input, dtype=dtypes.string, name='input')\n        if isinstance(input, ragged_tensor.RaggedTensor):\n            return input.with_flat_values(string_split_v2(input.flat_values, sep, maxsplit))\n        rank = input.shape.ndims\n        if rank == 0:\n            return string_split_v2(array_ops_stack.stack([input]), sep, maxsplit)[0]\n        elif rank == 1 or rank is None:\n            sparse_result = string_ops.string_split_v2(input, sep=sep, maxsplit=maxsplit)\n            return ragged_tensor.RaggedTensor.from_value_rowids(values=sparse_result.values, value_rowids=sparse_result.indices[:, 0], nrows=sparse_result.dense_shape[0], validate=False)\n        else:\n            return string_split_v2(ragged_tensor.RaggedTensor.from_tensor(input), sep, maxsplit)",
    "docstring": "Split elements of based on into a . Let N be the size of (typically N will be the batch size). Split each element of based on and return a containing the split tokens. Empty tokens are ignored. Example: >>> tf.strings.split('hello world').numpy() array([b'hello', b'world'], dtype=object) >>> tf.strings.split(['hello world', 'a b c']) If is given, consecutive delimiters are not grouped together and are deemed to delimit empty strings. For example, of and of returns . If is None or an empty string, consecutive whitespace are regarded as a single separator, and the result will contain no empty strings at the start or end if the string has leading or trailing whitespace. Note that the above mentioned behavior matches python's str.split. Args: input: A string of rank , the strings to split. If is not known statically, then it is assumed to be . sep: string , the delimiter string. maxsplit: An . If , limit of the split of the result. name: A name for the operation (optional). Raises: ValueError: If sep is not a string. Returns: A of rank , the strings split according to the delimiter.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_string_ops.py",
    "ast_data": "FunctionDef name:string_split_v2 arg:input arg:sep arg:maxsplit arg:name arguments arg arg arg arg With Call Assign Call If Call Return return:yes Call Call Assign If Compare Return return:yes Call Call If BoolOp Compare Compare Assign Call Return return:yes Call Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, resolution):\n    super().__init__()\n    self._resolution = resolution",
    "docstring": "Create a new geographical transform. Resolution is the number of steps to interpolate between each input line segment to approximate its path in curved space.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\geo.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:resolution arguments arg arg Call Call Assign"
  },
  {
    "library": "django",
    "name": "_alter_column_type_sql",
    "source_code": "def _alter_column_type_sql(self, model, old_field, new_field, new_type, old_collation, new_collation):\n    other_actions = []\n    if (collate_sql := self._collate_sql(new_collation, old_collation, model._meta.db_table)):\n        collate_sql = f' {collate_sql}'\n    else:\n        collate_sql = ''\n    comment_sql = ''\n    if self.connection.features.supports_comments and (not new_field.many_to_many):\n        if old_field.db_comment != new_field.db_comment:\n            sql, params = self._alter_column_comment_sql(model, new_field, new_type, new_field.db_comment)\n            if sql:\n                other_actions.append((sql, params))\n        if new_field.db_comment:\n            comment_sql = self._comment_sql(new_field.db_comment)\n    return ((self.sql_alter_column_type % {'column': self.quote_name(new_field.column), 'type': new_type, 'collation': collate_sql, 'comment': comment_sql}, []), other_actions)",
    "docstring": "Hook to specialize column type alteration for different backends, for cases when a creation type is different to an alteration type (e.g. SERIAL in PostgreSQL, PostGIS fields). Return a 2-tuple of: an SQL fragment of (sql, params) to insert into an ALTER TABLE statement and a list of extra (sql, params) tuples to run once the field is altered.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\schema.py",
    "ast_data": "FunctionDef name:_alter_column_type_sql arg:self arg:model arg:old_field arg:new_field arg:new_type arg:old_collation arg:new_collation arguments arg arg arg arg arg arg arg Assign If Call Assign Assign Assign If BoolOp If Compare Assign Call If Call If Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_compute_gradients",
    "source_code": "def _compute_gradients(self, loss, var_list, grad_loss=None, tape=None):\n    if not callable(loss) and tape is None:\n        raise ValueError('`tape` is required when a `Tensor` loss is passed.')\n    tape = tape if tape is not None else backprop.GradientTape()\n    if callable(loss):\n        with tape:\n            if not callable(var_list):\n                tape.watch(var_list)\n            loss = loss()\n            if callable(var_list):\n                var_list = var_list()\n    with tape:\n        loss = self._transform_loss(loss)\n    var_list = nest.flatten(var_list)\n    with ops.name_scope_v2(self._name + '/gradients'):\n        grads_and_vars = self._get_gradients(tape, loss, var_list, grad_loss)\n    self._assert_valid_dtypes([v for g, v in grads_and_vars if g is not None and v.dtype != dtypes.resource])\n    return grads_and_vars",
    "docstring": "Compute gradients of for the variables in . This is the first part of . It returns a list of (gradient, variable) pairs where \"gradient\" is the gradient for \"variable\". Note that \"gradient\" can be a , an , or if there is no gradient for the given variable. Args: loss: or callable. If a callable, should take no arguments and return the value to minimize. If a , the argument must be passed. var_list: list or tuple of objects to update to minimize , or a callable returning the list or tuple of objects. Use callable when the variable list would otherwise be incomplete before and the variables are created at the first time when is called. grad_loss: Optional. A holding the gradient computed for . tape: (Optional) . If is provided as a , the tape that computed the must be provided. Returns: A list of (gradient, variable) pairs. Variable is always present, but gradient can be . Raises: TypeError: If contains anything else than objects. ValueError: If some arguments are invalid, or var_list is None.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py",
    "ast_data": "FunctionDef name:_compute_gradients arg:self arg:loss arg:var_list arg:grad_loss arg:tape arguments arg arg arg arg arg If BoolOp Call Compare Raise Call Assign Compare Call If Call With If Call Call Assign Call If Call Assign Call With Assign Call Assign Call With Call Assign Call Call BoolOp Compare Compare Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "record",
    "source_code": "def record(self, stream=None):\n    if stream is None:\n        stream = torch.cuda.current_stream()\n    super().record(stream)",
    "docstring": "Record the event in a given stream. Uses `` if no stream is specified. The stream's device must match the event's device.",
    "type": "method",
    "file_path": "pytorch\\torch\\cuda\\streams.py",
    "ast_data": "FunctionDef name:record arg:self arg:stream arguments arg arg If Compare Assign Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_sd2wt",
    "source_code": "def _sd2wt(self, sd):\n    return 1.0 / np.power(sd, 2)",
    "docstring": "Convert standard deviation to weights.",
    "type": "method",
    "file_path": "scipy\\scipy\\odr\\_odrpack.py",
    "ast_data": "FunctionDef name:_sd2wt arg:self arg:sd arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__call__",
    "source_code": "def __call__(self, unused_op):\n    task = self._next_task\n    self._next_task = (self._next_task + 1) % self._num_tasks\n    return task",
    "docstring": "Choose a ps task index for the given . Args: unused_op: An to be placed on ps. Returns: The next ps task index to use for the . Returns the next index, in the range .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\device_setter.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:unused_op arguments arg arg Assign Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "aincr",
    "source_code": "async def aincr(self, key, delta=1, version=None):\n    value = await self.aget(key, self._missing_key, version=version)\n    if value is self._missing_key:\n        raise ValueError(\"Key '%s' not found\" % key)\n    new_value = value + delta\n    await self.aset(key, new_value, version=version)\n    return new_value",
    "docstring": "See incr().",
    "type": "method",
    "file_path": "django\\django\\core\\cache\\backends\\base.py",
    "ast_data": "AsyncFunctionDef name:aincr arg:self arg:key arg:delta arg:version arguments arg arg arg arg Assign Call If Compare Raise Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_tag_nodes",
    "source_code": "def _tag_nodes(self, selected_nodes: NodeSet):\n    for node in self.module.graph.nodes:\n        if node.op not in CALLABLE_NODE_OPS:\n            continue\n        if node in selected_nodes:\n            node.tag = 'minimize'\n        elif any((n.tag in {'minimize', 'main_1'} for n in node.all_input_nodes if n.op in CALLABLE_NODE_OPS)):\n            node.tag = 'main_1'\n        else:\n            node.tag = 'main_0'",
    "docstring": "Tag selected nodes with tag \"minimize\". Nodes with the same tags will be split to the same submodule afterwards. Args: selected_nodes: Nodes that we want to minimize. We will tag those nodes with \"minimize\", all preceding nodes with \"main_0\" and all following nodes with \"main_1\".",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\passes\\net_min_base.py",
    "ast_data": "FunctionDef name:_tag_nodes arg:self arg:selected_nodes arguments arg arg For If Compare If Compare Assign If Call Compare Compare Assign Assign"
  },
  {
    "library": "scikit-learn",
    "name": "_preprocess",
    "source_code": "def _preprocess(doc, accent_function=None, lower=False):\n    if lower:\n        doc = doc.lower()\n    if accent_function is not None:\n        doc = accent_function(doc)\n    return doc",
    "docstring": "Chain together an optional series of text preprocessing steps to apply to a document. Parameters ---------- doc: str The string to preprocess accent_function: callable, default=None Function for handling accented characters. Common strategies include normalizing and removing. lower: bool, default=False Whether to use str.lower to lowercase all of the text Returns ------- doc: str preprocessed string",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\feature_extraction\\text.py",
    "ast_data": "FunctionDef name:_preprocess arg:doc arg:accent_function arg:lower arguments arg arg arg If Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "PerWorkerDistributedIterator",
    "source_code": "class PerWorkerDistributedIterator(PerWorkerValues):\n\n    def __next__(self):\n        return self.get_next()\n\n    def get_next(self, name=None):\n        raise NotImplementedError('Iterating over an `AsyncDistributedIterator` is not supported right now.')",
    "docstring": "Distributed iterator for .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\values.py",
    "ast_data": "ClassDef name:PerWorkerDistributedIterator FunctionDef name:__next__ arg:self arguments arg Return return:yes Call FunctionDef name:get_next arg:self arg:name arguments arg arg Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "_print_elapsed_time",
    "source_code": "@contextmanager\ndef _print_elapsed_time(source, message=None):\n    if message is None:\n        yield\n    else:\n        start = timeit.default_timer()\n        yield\n        print(_message_with_time(source, message, timeit.default_timer() - start))",
    "docstring": "Log elapsed time to stdout when the context is exited. Parameters ---------- source : str String indicating the source or the reference of the message. message : str, default=None Short message. If None, nothing will be printed. Returns ------- context_manager Prints elapsed time upon exit if verbose.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_user_interface.py",
    "ast_data": "FunctionDef name:_print_elapsed_time arg:source arg:message arguments arg arg If Compare Assign Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "@deprecation.deprecated(None, 'Queue-based input pipelines have been replaced by `tf.data`. Use `tf.data.Dataset.map(...)`.')\ndef __init__(self, name=None):\n    rr = gen_io_ops.identity_reader_v2(name=name)\n    super(IdentityReader, self).__init__(rr, supports_serialize=True)",
    "docstring": "Create a IdentityReader. Args: name: A name for the operation (optional).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\io_ops.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:name arguments arg arg Assign Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "compress",
    "source_code": "def compress(element):\n    element_spec = structure.type_spec_from_value(element)\n    tensor_list = structure.to_tensor_list(element_spec, element)\n    return ged_ops.compress_element(tensor_list)",
    "docstring": "Compress a dataset element. Args: element: A nested structure of types supported by Tensorflow. Returns: A variant tensor representing the compressed element. This variant can be passed to to get back the original element.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\compression_ops.py",
    "ast_data": "FunctionDef name:compress arg:element arguments arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "set_integrator",
    "source_code": "def set_integrator(self, name, **integrator_params):\n    if name == 'zvode':\n        raise ValueError('zvode must be used with ode, not complex_ode')\n    lband = integrator_params.get('lband')\n    uband = integrator_params.get('uband')\n    if lband is not None or uband is not None:\n        integrator_params['lband'] = 2 * (lband or 0) + 1\n        integrator_params['uband'] = 2 * (uband or 0) + 1\n    return ode.set_integrator(self, name, **integrator_params)",
    "docstring": "Set integrator by name. Parameters ---------- name : str Name of the integrator **integrator_params Additional parameters for the integrator.",
    "type": "method",
    "file_path": "scipy\\scipy\\integrate\\_ode.py",
    "ast_data": "FunctionDef name:set_integrator arg:self arg:name arguments arg arg arg If Compare Raise Call Assign Call Assign Call If BoolOp Compare Compare Assign BoolOp Assign BoolOp Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X=None, y=None):\n    return self",
    "docstring": "Only validates estimator's parameters. This method allows to: (i) validate the estimator's parameters and (ii) be consistent with the scikit-learn transformer API. Parameters ---------- X : Ignored Not used, present here for API consistency by convention. y : Ignored Not used, present here for API consistency by convention. Returns ------- self : object FeatureHasher class instance.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_extraction\\_hash.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_kl_gamma_gamma",
    "source_code": "@kullback_leibler.RegisterKL(Gamma, Gamma)\ndef _kl_gamma_gamma(g0, g1, name=None):\n    with ops.name_scope(name, 'kl_gamma_gamma', values=[g0.concentration, g0.rate, g1.concentration, g1.rate]):\n        return (g0.concentration - g1.concentration) * math_ops.digamma(g0.concentration) + math_ops.lgamma(g1.concentration) - math_ops.lgamma(g0.concentration) + g1.concentration * math_ops.log(g0.rate) - g1.concentration * math_ops.log(g1.rate) + g0.concentration * (g1.rate / g0.rate - 1.0)",
    "docstring": "Calculate the batched KL divergence KL(g0 || g1) with g0 and g1 Gamma. Args: g0: instance of a Gamma distribution object. g1: instance of a Gamma distribution object. name: (optional) Name to use for created operations. Default is \"kl_gamma_gamma\". Returns: kl_gamma_gamma: . The batchwise KL(g0 || g1).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\gamma.py",
    "ast_data": "FunctionDef name:_kl_gamma_gamma arg:g0 arg:g1 arg:name arguments arg arg arg With Call Return return:yes Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "convert_to_tensor_v2",
    "source_code": "def convert_to_tensor_v2(value, dtype=None, dtype_hint=None, name=None) -> tensor_lib.Tensor:\n    return tensor_conversion_registry.convert(value, dtype, name, preferred_dtype=dtype_hint)",
    "docstring": "Converts the given to a .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_conversion.py",
    "ast_data": "FunctionDef name:convert_to_tensor_v2 arg:value arg:dtype arg:dtype_hint arg:name arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "make_multi",
    "source_code": "def make_multi(self, geom_type, model_field):\n    return geom_type.num in self.MULTI_TYPES and model_field.__class__.__name__ == 'Multi%s' % geom_type.django",
    "docstring": "Given the OGRGeomType for a geometry and its associated GeometryField, determine whether the geometry should be turned into a GeometryCollection.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\utils\\layermapping.py",
    "ast_data": "FunctionDef name:make_multi arg:self arg:geom_type arg:model_field arguments arg arg arg Return return:yes BoolOp Compare Compare"
  },
  {
    "library": "tensorflow",
    "name": "_add",
    "source_code": "def _add(*x):\n    s = None\n    for y in x:\n        if y is None:\n            continue\n        elif s is None:\n            s = y\n        else:\n            s += y\n    if s is None:\n        raise ValueError('Must specify at least one of `below`, `diag`, `above`.')\n    return s",
    "docstring": "Adds list of Tensors, ignoring .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\util.py",
    "ast_data": "FunctionDef name:_add arguments arg Assign For If Compare If Compare Assign If Compare Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "display_sac_stats",
    "source_code": "def display_sac_stats(self, sac_stats: SACStats, print_tabular: bool=False) -> None:\n    print(f'Total Memory: {sum(sac_stats.memory)} B Total Runtime: {sum(sac_stats.runtimes)} ms Store Random: {sac_stats.force_store_random}')\n    table_data = []\n    op_parent = dict(sac_stats.inplace_ops)\n    for i, fn_name in enumerate(sac_stats.func_names):\n        row = [str(i), fn_name, f'{sac_stats.runtimes[i]:.4f}', str(sac_stats.memory[i]), str(i in sac_stats.view_like_ops), str(i in sac_stats.rand_ops), str(i in sac_stats.saved_autograd_ops), str(op_parent.get(i, None))]\n        table_data.append(row)\n    headers = ['Op Idx', 'Op Name', 'Runtimes(ms)', 'Memory (B)', 'View-like', 'Random', 'Saved Autograd', 'In-place']\n    if print_tabular:\n        _display_stats_tabular(headers, table_data)\n    else:\n        max_widths = [0 for _ in range(len(headers))]\n        table_data.insert(0, headers)\n        for row in table_data:\n            for i, elem in enumerate(row):\n                max_widths[i] = max(max_widths[i], len(elem))\n        for row in table_data:\n            print('\\t'.join([f'{elem:<{max_widths[i]}}' for i, elem in enumerate(row)]))",
    "docstring": "Displays the SAC statistics. Args: sac_stats (SACStats): The SAC statistics to display. print_tabular (bool, optional): Whether to print the statistics in a tabular format. Defaults to False. Prints: 1. Total Memory: The total memory usage in bytes. 2. Total Runtime: The total runtime in milliseconds. 3. Store Random: A flag indicating whether to force store random operator results. Followed by a table with the following columns: 1. Op Idx: The operator index. 2. Op Name: The operator name. 3. Runtimes (ms): The operator runtime in milliseconds. 4. Memory (B): The operator memory usage in bytes. 5. View-like: A flag indicating whether the operator is view-like. 6. Random: A flag indicating whether the operator is random. 7. Saved Autograd: A flag indicating whether the operator's result is saved by autograd engine. 8. In-place: The index of the operator's first parent, or None if not in-place. If print_tabular is True, the table is printed in a tabular format. Otherwise, the table is printed in a plain text format.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_tools\\sac_estimator.py",
    "ast_data": "FunctionDef name:display_sac_stats arg:self arg:sac_stats arg:print_tabular arguments arg arg arg Call Call Call Assign Assign Call For Call Assign Call Call Call Compare Call Compare Call Compare Call Call Call Assign If Call Assign Call Call Call For For Call Assign Call Call For Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_SparseSliceGrad",
    "source_code": "@ops.RegisterGradient('SparseSlice')\ndef _SparseSliceGrad(op: ops.Operation, *grads):\n    backprop_val_grad = grads[1]\n    input_indices = op.inputs[0]\n    input_start = op.inputs[3]\n    output_indices = op.outputs[0]\n    val_grad = gen_sparse_ops.sparse_slice_grad(backprop_val_grad, input_indices, input_start, output_indices)\n    val_grad.set_shape(op.inputs[1].get_shape())\n    return (None, val_grad, None, None, None)",
    "docstring": "The backward operator for the SparseSlice op. This op takes in the upstream gradient w.r.t. non-empty values of the sliced , and outputs the gradients w.r.t. the non-empty values of input . Args: op: the SparseSlice op *grads: the incoming gradients, one element per output of Returns: Gradient for each of the 5 input tensors of SparseSlice: (indices, values, shape, start, size) The gradients for the indices, shape, start and the size are None.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_grad.py",
    "ast_data": "FunctionDef name:_SparseSliceGrad arg:op arguments arg arg Assign Assign Assign Assign Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "phone2numeric",
    "source_code": "@keep_lazy_text\ndef phone2numeric(phone):\n    char2number = {'a': '2', 'b': '2', 'c': '2', 'd': '3', 'e': '3', 'f': '3', 'g': '4', 'h': '4', 'i': '4', 'j': '5', 'k': '5', 'l': '5', 'm': '6', 'n': '6', 'o': '6', 'p': '7', 'q': '7', 'r': '7', 's': '7', 't': '8', 'u': '8', 'v': '8', 'w': '9', 'x': '9', 'y': '9', 'z': '9'}\n    return ''.join((char2number.get(c, c) for c in phone.lower()))",
    "docstring": "Convert a phone number with letters into its numeric equivalent.",
    "type": "function",
    "file_path": "django\\django\\utils\\text.py",
    "ast_data": "FunctionDef name:phone2numeric arg:phone arguments arg Assign Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "SendEvents",
    "source_code": "def SendEvents(self, request_iterator, context):\n    context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n    context.set_details('Method not implemented!')\n    raise NotImplementedError('Method not implemented!')",
    "docstring": "Client(s) can use this RPC method to send the EventListener Event protos. The Event protos can hold information such as: 1) intermediate tensors from a debugged graph being executed, which can be sent from DebugIdentity ops configured with grpc URLs. 2) GraphDefs of partition graphs, which can be sent from special debug ops that get executed immediately after the beginning of the graph execution.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_service_pb2_grpc.py",
    "ast_data": "FunctionDef name:SendEvents arg:self arg:request_iterator arg:context arguments arg arg arg Call Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "make_runtime_safe",
    "source_code": "def make_runtime_safe(fw_metadata: ViewAndMutationMeta, maybe_subclass_meta: Optional[SubclassMeta]):\n    fw_metadata.make_runtime_safe()\n    if maybe_subclass_meta is not None:\n        maybe_subclass_meta.fw_metadata.make_runtime_safe()\n        if maybe_subclass_meta.grad_input_metas:\n            for meta in maybe_subclass_meta.grad_input_metas:\n                if isinstance(meta, SubclassCreationMeta):\n                    meta.make_runtime_safe()",
    "docstring": "Calls make_runtime_safe on all ViewAndMutationMetas. Modifies both arguments. Allows ViewAndMutationMetas to be safely cached in AOTAutogradCache.",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\runtime_wrappers.py",
    "ast_data": "FunctionDef name:make_runtime_safe arg:fw_metadata arg:maybe_subclass_meta arguments arg arg Call If Compare Call If For If Call Call"
  },
  {
    "library": "kornia",
    "name": "emb_mags",
    "source_code": "def emb_mags(self, mags: Tensor) -> Tensor:\n    mags = torch.sqrt(mags + self.eps)\n    return mags",
    "docstring": "Embed square roots of magnitudes with eps for numerical reasons.",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\mkd.py",
    "ast_data": "FunctionDef name:emb_mags arg:self arg:mags arguments arg arg Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_image_magnification",
    "source_code": "def get_image_magnification(self):\n    return 1.0",
    "docstring": "Get the factor by which to magnify images passed to . Allows a backend to have images at a different resolution to other artists.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:get_image_magnification arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "transform_for_annotation",
    "source_code": "def transform_for_annotation(self, model: torch.fx.GraphModule) -> torch.fx.GraphModule:\n    return model",
    "docstring": "Allows for user defined transforms to run before annotating the graph. This allows quantizer to allow quantizing part of the model that are otherwise not quantizable. For example quantizer can a) decompose a compound operator like scaled dot product attention, into bmm and softmax if quantizer knows how to quantize bmm/softmax but not sdpa or b) transform scalars to tensor to allow quantizing scalares. Note: this is an optional method",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\quantizer.py",
    "ast_data": "FunctionDef name:transform_for_annotation arg:self arg:model arguments arg arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "is_consolidated",
    "source_code": "def is_consolidated(self) -> bool:\n    if not self._known_consolidated:\n        self._consolidate_check()\n    return self._is_consolidated",
    "docstring": "Return True if more than one block with the same dtype",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:is_consolidated arg:self arguments arg If Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_merge_partition_lists",
    "source_code": "def _merge_partition_lists(partition_lists):\n    dst = list(partition_lists[0])\n    for src in partition_lists[1:]:\n        if len(src) != len(dst):\n            raise ValueError('All ragged inputs must have the same ragged_rank.')\n        for i in range(len(dst)):\n            dst[i] = dst[i]._merge_precomputed_encodings(src[i])\n    return dst",
    "docstring": "Merges the given list of lists of RowPartitions. Args: partition_lists: A list of lists of RowPartition. Returns: A list of RowPartitions, where is formed by merging for all , using .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_functional_ops.py",
    "ast_data": "FunctionDef name:_merge_partition_lists arg:partition_lists arguments arg Assign Call For If Compare Call Call Raise Call For Call Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "user_autotune",
    "source_code": "def user_autotune(configs, triton_meta, filename=None, inductor_meta=None, custom_kernel=False):\n    if len(configs) == 0:\n        configs = [triton.Config({})]\n    else:\n        configs = [*map(config_from_dict, configs)]\n    return cached_autotune(None, configs, triton_meta=triton_meta, heuristic_type=HeuristicType.USER_AUTOTUNE, filename=filename, inductor_meta=inductor_meta, custom_kernel=custom_kernel)",
    "docstring": "Compile a user defined triton kernel",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\triton_heuristics.py",
    "ast_data": "FunctionDef name:user_autotune arg:configs arg:triton_meta arg:filename arg:inductor_meta arg:custom_kernel arguments arg arg arg arg arg If Compare Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_build_composite_tensor_info_internal",
    "source_code": "def _build_composite_tensor_info_internal(tensor):\n    spec = tensor._type_spec\n    tensor_info = meta_graph_pb2.TensorInfo()\n    spec_proto = nested_structure_coder.encode_structure(spec)\n    tensor_info.composite_tensor.type_spec.CopyFrom(spec_proto.type_spec_value)\n    for component in nest.flatten(tensor, expand_composites=True):\n        tensor_info.composite_tensor.components.add().CopyFrom(build_tensor_info_internal(component))\n    return tensor_info",
    "docstring": "Utility function to build TensorInfo proto from a CompositeTensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\utils_impl.py",
    "ast_data": "FunctionDef name:_build_composite_tensor_info_internal arg:tensor arguments arg Assign Assign Call Assign Call Call For Call Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_set_many",
    "source_code": "def _set_many(self, i, j, x):\n    i, j, M, N = self._prepare_indices(i, j)\n    x = np.atleast_1d(np.asarray(x, dtype=self.dtype)).ravel()\n    n_samples = x.size\n    offsets = np.empty(n_samples, dtype=self.indices.dtype)\n    ret = csr_sample_offsets(M, N, self.indptr, self.indices, n_samples, i, j, offsets)\n    if ret == 1:\n        self.sum_duplicates()\n        csr_sample_offsets(M, N, self.indptr, self.indices, n_samples, i, j, offsets)\n    if -1 not in offsets:\n        self.data[offsets] = x\n        return\n    else:\n        warn(f'Changing the sparsity structure of a {self.__class__.__name__} is expensive. lil and dok are more efficient.', SparseEfficiencyWarning, stacklevel=3)\n        mask = offsets > -1\n        self.data[offsets[mask]] = x[mask]\n        mask = ~mask\n        i = i[mask]\n        i[i < 0] += M\n        j = j[mask]\n        j[j < 0] += N\n        self._insert_many(i, j, x[mask])",
    "docstring": "Sets value at each (i, j) to x Here (i,j) index major and minor respectively, and must not contain duplicate entries.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_compressed.py",
    "ast_data": "FunctionDef name:_set_many arg:self arg:i arg:j arg:x arguments arg arg arg arg Assign Call Assign Call Call Call Assign Assign Call Assign Call If Compare Call Call If Compare Assign Return return:no Call Assign Compare Assign Assign Assign Compare Assign Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "recover_session",
    "source_code": "def recover_session(self, master: str, saver: saver_lib.Saver=None, checkpoint_dir: str=None, checkpoint_filename_with_path: str=None, wait_for_checkpoint=False, max_wait_secs=7200, config=None) -> Tuple[session.Session, bool]:\n    sess, is_loaded_from_checkpoint = self._restore_checkpoint(master, saver, checkpoint_dir=checkpoint_dir, checkpoint_filename_with_path=checkpoint_filename_with_path, wait_for_checkpoint=wait_for_checkpoint, max_wait_secs=max_wait_secs, config=config)\n    local_init_success, msg = self._try_run_local_init_op(sess)\n    if not is_loaded_from_checkpoint:\n        return (sess, False)\n    restoring_file = checkpoint_dir or checkpoint_filename_with_path\n    if not local_init_success:\n        logging.info('Restoring model from %s did not make model ready for local init: %s', restoring_file, msg)\n        return (sess, False)\n    is_ready, msg = self._model_ready(sess)\n    if not is_ready:\n        logging.info('Restoring model from %s did not make model ready: %s', restoring_file, msg)\n        return (sess, False)\n    logging.info('Restored model from %s', restoring_file)\n    return (sess, is_loaded_from_checkpoint)",
    "docstring": "Creates a , recovering if possible. Creates a new session on 'master'. If the session is not initialized and can be recovered from a checkpoint, recover it. Args: master: representation of the TensorFlow master to use. saver: A object used to restore a model. checkpoint_dir: Path to the checkpoint files. The latest checkpoint in the dir will be used to restore. checkpoint_filename_with_path: Full file name path to the checkpoint file. wait_for_checkpoint: Whether to wait for checkpoint to become available. max_wait_secs: Maximum time to wait for checkpoints to become available. config: Optional proto used to configure the session. Returns: A pair (sess, initialized) where 'initialized' is if the session could be recovered and initialized, otherwise. Raises: ValueError: If both checkpoint_dir and checkpoint_filename_with_path are set.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\session_manager.py",
    "ast_data": "FunctionDef name:recover_session arg:self arg:master arg:saver arg:checkpoint_dir arg:checkpoint_filename_with_path arg:wait_for_checkpoint arg:max_wait_secs arg:config arguments arg arg arg arg arg arg arg arg Assign Call Assign Call If Return return:yes Assign BoolOp If Call Return return:yes Assign Call If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_make_object_list",
    "source_code": "def get_make_object_list(self):\n    return self.make_object_list",
    "docstring": "Return if this view should contain the full list of objects in the given year.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\dates.py",
    "ast_data": "FunctionDef name:get_make_object_list arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "LoggingLoggerVariable",
    "source_code": "class LoggingLoggerVariable(VariableTracker):\n\n    def __init__(self, value, **kwargs) -> None:\n        super().__init__(**kwargs)\n        self.value = value\n\n    def call_method(self, tx: 'InstructionTranslator', name, args: 'list[VariableTracker]', kwargs: 'dict[str, VariableTracker]') -> 'VariableTracker':\n        if tx.export:\n            return\n        method = getattr(self.value, name, None)\n        function = getattr(method, '__func__', None)\n        if {method, function}.intersection(torch._dynamo.config.ignore_logger_methods):\n            return variables.ConstantVariable.create(None)\n        unimplemented('Logger not supported for non-export cases. To avoid graph breaks caused by logger in compile-mode, it is recommended to disable logging by adding logging methods to config.ignore_logger_methods')",
    "docstring": "Represents a call to any of logging.Logger methods",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\misc.py",
    "ast_data": "ClassDef name:LoggingLoggerVariable FunctionDef name:__init__ arg:self arg:value arguments arg arg arg Call Call Assign FunctionDef name:call_method arg:self arg:tx arg:name arg:args arg:kwargs arguments arg arg arg arg arg If Return return:no Assign Call Assign Call If Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_signature_list",
    "source_code": "def get_signature_list(self):\n    full_signature_defs = self._interpreter.GetSignatureDefs()\n    for _, signature_def in full_signature_defs.items():\n        signature_def['inputs'] = list(signature_def['inputs'].keys())\n        signature_def['outputs'] = list(signature_def['outputs'].keys())\n    return full_signature_defs",
    "docstring": "Gets the list of SignatureDefs in the model. Example, Returns: A list of SignatureDef details in a dictionary structure. It is keyed on the SignatureDef method name, and the value holds a dictionary of inputs and outputs.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\interpreter.py",
    "ast_data": "FunctionDef name:get_signature_list arg:self arguments arg Assign Call For Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_PyTreeInfo",
    "source_code": "class _PyTreeInfo(NamedTuple):\n    orig_args: list[str]\n    in_spec: pytree.TreeSpec\n    out_spec: Optional[pytree.TreeSpec]",
    "docstring": "Contains extra info stored when we're using Pytrees",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\graph.py",
    "ast_data": "ClassDef name:_PyTreeInfo"
  },
  {
    "library": "pytorch",
    "name": "get_block_size",
    "source_code": "def get_block_size(input_shape: tuple[int, ...], granularity: Granularity) -> tuple[int, ...]:\n    assert isinstance(granularity, Granularity), 'Please provide an instance of Granularity, not subclass of it'\n    if isinstance(granularity, PerTensor):\n        return input_shape\n    elif isinstance(granularity, PerAxis):\n        block_size = list(input_shape)\n        block_size[granularity.axis] = 1\n        return tuple(block_size)\n    elif isinstance(granularity, PerRow):\n        return (1,) * (len(input_shape) - 1) + (input_shape[-1],)\n    elif isinstance(granularity, PerGroup):\n        assert len(input_shape) == 2, f'Expecting input shape dim to be 2 for per group quantization, gotinput shape: {input_shape}'\n        return (1, granularity.group_size)\n    elif isinstance(granularity, PerToken):\n        block_size = [1] * len(input_shape)\n        block_size[-1] = input_shape[-1]\n        return tuple(block_size)\n    raise ValueError(f'Unsupported Granularity: {granularity}')",
    "docstring": "Get the block size based on the input shape and granularity type. Args: input_shape: The input tensor shape possibly more than 2 dimensions granularity: The granularity type of the quantization",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\observer.py",
    "ast_data": "FunctionDef name:get_block_size arg:input_shape arg:granularity arguments arg arg Call If Call Return return:yes If Call Assign Call Assign Return return:yes Call If Call Return return:yes Call If Call Compare Call Return return:yes If Call Assign Call Assign Return return:yes Call Raise Call"
  },
  {
    "library": "scipy",
    "name": "tempdir",
    "source_code": "@contextmanager\ndef tempdir():\n    d = mkdtemp()\n    yield d\n    rmtree(d)",
    "docstring": "Create and return a temporary directory. This has the same behavior as mkdtemp but can be used as a context manager. Upon exiting the context, the directory and everything contained in it are removed. Examples -------- >>> import os >>> with tempdir() as tmpdir: ... fname = os.path.join(tmpdir, 'example_file.txt') ... with open(fname, 'wt') as fobj: ... _ = fobj.write('a string\\n') >>> os.path.exists(tmpdir) False",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_tmpdirs.py",
    "ast_data": "FunctionDef name:tempdir arguments Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "unsupported_alg_error_msg",
    "source_code": "def unsupported_alg_error_msg(alg):\n    if isinstance(alg, int):\n        philox = Algorithm.PHILOX.value\n        threefry = Algorithm.THREEFRY.value\n        auto_select = Algorithm.AUTO_SELECT.value\n    elif isinstance(alg, str):\n        philox = 'philox'\n        threefry = 'threefry'\n        auto_select = 'auto_select'\n    else:\n        philox = Algorithm.PHILOX\n        threefry = Algorithm.THREEFRY\n        auto_select = Algorithm.AUTO_SELECT\n    return f'Argument `alg` got unsupported value {alg}. Supported values are {philox} for the Philox algorithm, {threefry} for the ThreeFry algorithm, and {auto_select} for auto-selection.'",
    "docstring": "Produces the unsupported-algorithm error message.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\random_ops_util.py",
    "ast_data": "FunctionDef name:unsupported_alg_error_msg arg:alg arguments arg If Call Assign Assign Assign If Call Assign Assign Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "ToolCopyToClipboardBase",
    "source_code": "class ToolCopyToClipboardBase(ToolBase):\n    description = 'Copy the canvas figure to clipboard'\n    default_keymap = property(lambda self: mpl.rcParams['keymap.copy'])\n\n    def trigger(self, *args, **kwargs):\n        message = 'Copy tool is not available'\n        self.toolmanager.message_event(message, self)",
    "docstring": "Tool to copy the figure to the clipboard.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py",
    "ast_data": "ClassDef name:ToolCopyToClipboardBase Assign Assign Call arguments arg FunctionDef name:trigger arg:self arguments arg arg arg Assign Call"
  },
  {
    "library": "scikit-learn",
    "name": "feature_importances_",
    "source_code": "@property\ndef feature_importances_(self):\n    self._check_initialized()\n    relevant_trees = [tree for stage in self.estimators_ for tree in stage if tree.tree_.node_count > 1]\n    if not relevant_trees:\n        return np.zeros(shape=self.n_features_in_, dtype=np.float64)\n    relevant_feature_importances = [tree.tree_.compute_feature_importances(normalize=False) for tree in relevant_trees]\n    avg_feature_importances = np.mean(relevant_feature_importances, axis=0, dtype=np.float64)\n    return avg_feature_importances / np.sum(avg_feature_importances)",
    "docstring": "The impurity-based feature importances. The higher, the more important the feature. The importance of a feature is computed as the (normalized) total reduction of the criterion brought by that feature. It is also known as the Gini importance. Warning: impurity-based feature importances can be misleading for high cardinality features (many unique values). See :func: as an alternative. Returns ------- feature_importances_ : ndarray of shape (n_features,) The values of this array sum to 1, unless all trees are single node trees consisting of only the root node, in which case it will be an array of zeros.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_gb.py",
    "ast_data": "FunctionDef name:feature_importances_ arg:self arguments arg Call Assign Compare If Return return:yes Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "astype",
    "source_code": "def astype(self, dtype: AstypeArg | None=None, copy: bool=True):\n    if dtype == self._dtype:\n        if not copy:\n            return self\n        else:\n            return self.copy()\n    future_dtype = pandas_dtype(dtype)\n    if not isinstance(future_dtype, SparseDtype):\n        values = np.asarray(self)\n        values = ensure_wrapped_if_datetimelike(values)\n        return astype_array(values, dtype=future_dtype, copy=False)\n    dtype = self.dtype.update_dtype(dtype)\n    subtype = pandas_dtype(dtype._subtype_with_str)\n    subtype = cast(np.dtype, subtype)\n    values = ensure_wrapped_if_datetimelike(self.sp_values)\n    sp_values = astype_array(values, subtype, copy=copy)\n    sp_values = np.asarray(sp_values)\n    return self._simple_new(sp_values, self.sp_index, dtype)",
    "docstring": "Change the dtype of a SparseArray. The output will always be a SparseArray. To convert to a dense ndarray with a certain dtype, use :meth:. Parameters ---------- dtype : np.dtype or ExtensionDtype For SparseDtype, this changes the dtype of ``. >>> arr.astype(SparseDtype(np.dtype(\"float64\"))) ... # doctest: +NORMALIZE_WHITESPACE [nan, nan, 1.0, 2.0] Fill: nan IntIndex Indices: array([2, 3], dtype=int32) Using a SparseDtype, you can also change the fill value as well. >>> arr.astype(SparseDtype(\"float64\", fill_value=0.0)) ... # doctest: +NORMALIZE_WHITESPACE [0.0, 0.0, 1.0, 2.0] Fill: 0.0 IntIndex Indices: array([2, 3], dtype=int32)",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\sparse\\array.py",
    "ast_data": "FunctionDef name:astype arg:self arg:dtype arg:copy arguments arg arg arg If Compare If Return return:yes Return return:yes Call Assign Call If Call Assign Call Assign Call Return return:yes Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "min_over_ndim",
    "source_code": "def min_over_ndim(input, axis_list, keepdim=False):\n    axis_list.sort(reverse=True)\n    for axis in axis_list:\n        input, _ = input.min(axis, keepdim)\n    return input",
    "docstring": "Apply 'torch.min' over the given axes.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\_equalize.py",
    "ast_data": "FunctionDef name:min_over_ndim arg:input arg:axis_list arg:keepdim arguments arg arg arg Call For Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "from_values",
    "source_code": "@staticmethod\ndef from_values(a, b, c, d, e, f):\n    return Affine2D(np.array([a, c, e, b, d, f, 0.0, 0.0, 1.0], float).reshape((3, 3)))",
    "docstring": "Create a new Affine2D instance from the given values:: a c e b d f 0 0 1 .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:from_values arg:a arg:b arg:c arg:d arg:e arg:f arguments arg arg arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "preprocess",
    "source_code": "def preprocess(self):\n    if not self.is_templatized:\n        return\n    with open(self.path, encoding='utf-8') as fp:\n        src_data = fp.read()\n    if self.domain == 'django':\n        content = templatize(src_data, origin=self.path[2:])\n    with open(self.work_path, 'w', encoding='utf-8') as fp:\n        fp.write(content)",
    "docstring": "Preprocess (if necessary) a translatable file before passing it to xgettext GNU gettext utility.",
    "type": "method",
    "file_path": "django\\django\\core\\management\\commands\\makemessages.py",
    "ast_data": "FunctionDef name:preprocess arg:self arguments arg If Return return:no With Call Assign Call If Compare Assign Call With Call Call"
  },
  {
    "library": "pandas",
    "name": "add_references",
    "source_code": "def add_references(self, mgr: BaseBlockManager) -> None:\n    if len(self.blocks) != len(mgr.blocks):\n        return\n    for i, blk in enumerate(self.blocks):\n        blk.refs = mgr.blocks[i].refs\n        blk.refs.add_reference(blk)",
    "docstring": "Adds the references from one manager to another. We assume that both managers have the same block structure.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:add_references arg:self arg:mgr arguments arg arg If Compare Call Call Return return:no For Call Assign Call"
  },
  {
    "library": "numpy",
    "name": "read_array_header_1_0",
    "source_code": "@set_module('numpy.lib.format')\ndef read_array_header_1_0(fp, max_header_size=_MAX_HEADER_SIZE):\n    return _read_array_header(fp, version=(1, 0), max_header_size=max_header_size)",
    "docstring": "Read an array header from a filelike object using the 1.0 file format version. This will leave the file object located just after the header. Parameters ---------- fp : filelike object A file object or something with a method like a file. Returns ------- shape : tuple of int The shape of the array. fortran_order : bool The array data will be written out directly if it is either C-contiguous or Fortran-contiguous. Otherwise, it will be made contiguous before writing it out. dtype : dtype The dtype of the file's data. max_header_size : int, optional Maximum allowed size of the header. Large headers may not be safe to load securely and thus require explicitly passing a larger value. See :py:func: for details. Raises ------ ValueError If the data is invalid.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_format_impl.py",
    "ast_data": "FunctionDef name:read_array_header_1_0 arg:fp arg:max_header_size arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_serialize_metadata",
    "source_code": "@staticmethod\ndef _serialize_metadata(metadata):\n\n    def is_namedtuple(obj) -> bool:\n        return isinstance(obj, tuple) and hasattr(obj, '_asdict') and hasattr(obj, '_fields')\n    if is_namedtuple(metadata):\n        return metadata._asdict()\n    else:\n        return metadata",
    "docstring": "Triton uses a nested class called KernelMetadata to store metadata information. Pickle does not work well with nested namedtuples, as the namedtuple doesn't appear in the toplevel namespace of the module. So these serialization/deser functions are used to convert the namedtuples to a dict and back. As for packed_metadata, depending on the triton backend, KernelMetadata can be a namedtuple, or a regular tuple! So the serialization function branches on whether the metadata to be serialized is a namedtuple or regular, serializable one.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\triton_heuristics.py",
    "ast_data": "FunctionDef name:_serialize_metadata arg:metadata arguments arg FunctionDef name:is_namedtuple arg:obj arguments arg Return return:yes BoolOp Call Call Call If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_live_ranges",
    "source_code": "def get_live_ranges(self) -> LiveRanges:\n    raise NotImplementedError",
    "docstring": "Aggregate LiveRanges for all objects below this in tree",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\memory_planning.py",
    "ast_data": "FunctionDef name:get_live_ranges arg:self arguments arg Raise"
  },
  {
    "library": "scikit-learn",
    "name": "get_feature_names_out",
    "source_code": "def get_feature_names_out(self, input_features=None):\n    check_is_fitted(self, 'n_features_in_')\n    n_splines = self.bsplines_[0].c.shape[1]\n    input_features = _check_feature_names_in(self, input_features)\n    feature_names = []\n    for i in range(self.n_features_in_):\n        for j in range(n_splines - 1 + self.include_bias):\n            feature_names.append(f'{input_features[i]}_sp_{j}')\n    return np.asarray(feature_names, dtype=object)",
    "docstring": "Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Input features. - If is , then is used as feature names in. If is not defined, then the following input feature names are generated: . - If is an array-like, then must match if is defined. Returns ------- feature_names_out : ndarray of str objects Transformed feature names.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_polynomial.py",
    "ast_data": "FunctionDef name:get_feature_names_out arg:self arg:input_features arguments arg arg Call Assign Assign Call Assign For Call For Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "device",
    "source_code": "def device(self) -> torch.device:\n    return self._device",
    "docstring": "Return the local device on the remote worker.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\remote_device.py",
    "ast_data": "FunctionDef name:device arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "RawPostDataException",
    "source_code": "class RawPostDataException(Exception):\n    pass",
    "docstring": "You cannot access raw_post_data from a request that has multipart/* POST data if it has been accessed via POST, FILES, etc..",
    "type": "class",
    "file_path": "django\\django\\http\\request.py",
    "ast_data": "ClassDef name:RawPostDataException"
  },
  {
    "library": "cryptography",
    "name": "put_sshstr",
    "source_code": "def put_sshstr(self, val: bytes | _FragList) -> None:\n    if isinstance(val, (bytes, memoryview, bytearray)):\n        self.put_u32(len(val))\n        self.flist.append(val)\n    else:\n        self.put_u32(val.size())\n        self.flist.extend(val.flist)",
    "docstring": "Bytes prefixed with u32 length",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\serialization\\ssh.py",
    "ast_data": "FunctionDef name:put_sshstr arg:self arg:val arguments arg arg If Call Call Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_assign_modules_buffers",
    "source_code": "def _assign_modules_buffers(self):\n    named_module_buffers = [(buffer, buffer_name) for buffer_name, buffer in self.module.named_buffers() if buffer_name not in self.parameters_to_ignore]\n    self.modules_buffers = [buffer for buffer, buffer_name in named_module_buffers]\n    self.named_module_buffers = {buffer_name: buffer for buffer, buffer_name in named_module_buffers}",
    "docstring": "Assign self.module.named_buffers to self.modules_buffers. Assigns module buffers to self.modules_buffers which are then used to broadcast across ranks when broadcast_buffers=True. Note that this must be called every time buffers need to be synced because buffers can be reassigned by user module, see",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\parallel\\distributed.py",
    "ast_data": "FunctionDef name:_assign_modules_buffers arg:self arguments arg Assign Call Compare Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "save_weights_to_hdf5_group",
    "source_code": "def save_weights_to_hdf5_group(f, layers):\n    from tensorflow.python.keras import __version__ as keras_version\n    save_attributes_to_hdf5_group(f, 'layer_names', [layer.name.encode('utf8') for layer in layers])\n    f.attrs['backend'] = backend.backend().encode('utf8')\n    f.attrs['keras_version'] = str(keras_version).encode('utf8')\n    for layer in sorted(layers, key=lambda x: x.name):\n        g = f.create_group(layer.name)\n        weights = _legacy_weights(layer)\n        weight_values = backend.batch_get_value(weights)\n        weight_names = [w.name.encode('utf8') for w in weights]\n        save_attributes_to_hdf5_group(g, 'weight_names', weight_names)\n        for name, val in zip(weight_names, weight_values):\n            param_dset = g.create_dataset(name, val.shape, dtype=val.dtype)\n            if not val.shape:\n                param_dset[()] = val\n            else:\n                param_dset[:] = val",
    "docstring": "Saves the weights of a list of layers to a HDF5 group. Args: f: HDF5 group. layers: List of layer instances.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\hdf5_format.py",
    "ast_data": "FunctionDef name:save_weights_to_hdf5_group arg:f arg:layers arguments arg arg Call Call Assign Call Call Assign Call Call For Call arguments arg Assign Call Assign Call Assign Call Assign Call Call For Call Assign Call If Assign Assign"
  },
  {
    "library": "django",
    "name": "as_textarea",
    "source_code": "def as_textarea(self, attrs=None, **kwargs):\n    return self.as_widget(Textarea(), attrs, **kwargs)",
    "docstring": "Return a string of HTML for representing this as a .",
    "type": "method",
    "file_path": "django\\django\\forms\\boundfield.py",
    "ast_data": "FunctionDef name:as_textarea arg:self arg:attrs arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_gen_line_numbers",
    "source_code": "def _gen_line_numbers(self) -> Iterator[str]:\n    for i, _ in enumerate(self.ids):\n        yield f' {i}'",
    "docstring": "Iterator with string representation of column numbers.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:_gen_line_numbers arg:self arguments arg For Call"
  },
  {
    "library": "django",
    "name": "_fix_polygon",
    "source_code": "@classmethod\ndef _fix_polygon(cls, poly, clone=True):\n    if clone:\n        poly = poly.clone()\n    if not poly.exterior_ring.is_counterclockwise:\n        poly.exterior_ring = list(reversed(poly.exterior_ring))\n    for i in range(1, len(poly)):\n        if poly[i].is_counterclockwise:\n            poly[i] = list(reversed(poly[i]))\n    return poly",
    "docstring": "Fix single polygon orientation as described in __init__().",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\oracle\\adapter.py",
    "ast_data": "FunctionDef name:_fix_polygon arg:cls arg:poly arg:clone arguments arg arg arg If Assign Call If Assign Call Call For Call Call If Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_main_op_tensor",
    "source_code": "def _get_main_op_tensor(meta_graph_def_to_load, init_op_key=constants.MAIN_OP_KEY):\n    collection_def = meta_graph_def_to_load.collection_def\n    init_op = None\n    if init_op_key in collection_def:\n        init_op_list = collection_def[init_op_key].node_list.value\n        if len(init_op_list) != 1:\n            raise RuntimeError(f'Expected exactly one SavedModel init op. Found {len(init_op_list)}: {init_op_list}.')\n        init_op = ops.get_collection(init_op_key)[0]\n    return init_op",
    "docstring": "Gets the main op tensor, if one exists. Args: meta_graph_def_to_load: The meta graph def from the SavedModel to be loaded. init_op_key: name of the collection to check; should be one of MAIN_OP_KEY or the deprecated LEGACY_INIT_OP_KEY Returns: The main op tensor, if it exists and otherwise. Raises: RuntimeError: If the collection def corresponding to the main op key has other than exactly one tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\loader_impl.py",
    "ast_data": "FunctionDef name:_get_main_op_tensor arg:meta_graph_def_to_load arg:init_op_key arguments arg arg Assign Assign If Compare Assign If Compare Call Raise Call Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "partial_fit",
    "source_code": "@available_if(_estimators_has('partial_fit'))\n@_fit_context(prefer_skip_nested_validation=False)\ndef partial_fit(self, X, y, classes=None, **partial_fit_params):\n    _raise_for_params(partial_fit_params, self, 'partial_fit')\n    routed_params = process_routing(self, 'partial_fit', **partial_fit_params)\n    if _check_partial_fit_first_call(self, classes):\n        self.estimators_ = [clone(self.estimator) for _ in range(self.n_classes_)]\n        self.label_binarizer_ = LabelBinarizer(sparse_output=True)\n        self.label_binarizer_.fit(self.classes_)\n    if len(np.setdiff1d(y, self.classes_)):\n        raise ValueError(('Mini-batch contains {0} while classes ' + 'must be subset of {1}').format(np.unique(y), self.classes_))\n    Y = self.label_binarizer_.transform(y)\n    Y = Y.tocsc()\n    columns = (col.toarray().ravel() for col in Y.T)\n    self.estimators_ = Parallel(n_jobs=self.n_jobs)((delayed(_partial_fit_binary)(estimator, X, column, partial_fit_params=routed_params.estimator.partial_fit) for estimator, column in zip(self.estimators_, columns)))\n    if hasattr(self.estimators_[0], 'n_features_in_'):\n        self.n_features_in_ = self.estimators_[0].n_features_in_\n    return self",
    "docstring": "Partially fit underlying estimators. Should be used when memory is inefficient to train all data. Chunks of data can be passed in several iterations. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Data. y : {array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_classes) Multi-class targets. An indicator matrix turns on multilabel classification. classes : array, shape (n_classes, ) Classes across all calls to partial_fit. Can be obtained via , where y_all is the target vector of the entire dataset. This argument is only required in the first call of partial_fit and can be omitted in the subsequent calls. **partial_fit_params : dict Parameters passed to the `enable_metadata_routing=TrueMetadata Routing User Guide ` for more details. Returns ------- self : object Instance of partially fitted estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\multiclass.py",
    "ast_data": "FunctionDef name:partial_fit arg:self arg:X arg:y arg:classes arguments arg arg arg arg arg Call Assign Call If Call Assign Call Call Assign Call Call If Call Call Raise Call Call Call Assign Call Assign Call Assign Call Call Assign Call Call Call Call Call If Call Assign Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "maybe_set_static_shape",
    "source_code": "def maybe_set_static_shape(tensor, shape):\n    if _ENABLE_MAYBE_SET_STATIC_SHAPE and (not context.executing_eagerly()) and ops.get_default_graph().building_function and (not tensor.shape.is_fully_defined()) and tensor_util.is_tensor(shape):\n        shape = shape_tensor(shape)\n        const_shape = tensor_util.constant_value_as_shape(shape)\n        tensor.set_shape(const_shape)",
    "docstring": "Sets the shape of to the 's constant value, if inferrable. This is a temporary workaround to fix shape inference across functional op boundaries. E.g. If we were to rely solely on C++ shape inference, the shape of inside would be unknown because C++ shape inference is not aware of the outer graph and all it sees is a Placeholder node when backtracing the captured tensor for . computes the static shape value of by traversing the boundaries and sets the correct shape. A longer term solution would be to fix C++ shape inference. Args: tensor: A tensor. shape: A shape tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\shape_util.py",
    "ast_data": "FunctionDef name:maybe_set_static_shape arg:tensor arg:shape arguments arg arg If BoolOp Call Call Call Call Assign Call Assign Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_metadata_routing",
    "source_code": "def get_metadata_routing(self):\n    router = MetadataRouter(owner=self.__class__.__name__)\n    for name, estimator in self.estimators:\n        router.add(**{name: estimator}, method_mapping=MethodMapping().add(callee='fit', caller='fit'))\n    return router",
    "docstring": "Get metadata routing of this object. Please check :ref: on how the routing mechanism works. .. versionadded:: 1.5 Returns ------- routing : MetadataRouter A :class: encapsulating routing information.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_voting.py",
    "ast_data": "FunctionDef name:get_metadata_routing arg:self arguments arg Assign Call For Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_parse_linprog",
    "source_code": "def _parse_linprog(lp, options, meth):\n    if options is None:\n        options = {}\n    solver_options = {k: v for k, v in options.items()}\n    solver_options, A_ub, A_eq = _check_sparse_inputs(solver_options, meth, lp.A_ub, lp.A_eq)\n    lp = _clean_inputs(lp._replace(A_ub=A_ub, A_eq=A_eq))\n    return (lp, solver_options)",
    "docstring": "Parse the provided linear programming problem `scipy.optimize._linprog_util._LPProblemx0show_options('linprog')scipy.optimize._linprog_util._LPProblemx0show_options('linprog')`.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_linprog_util.py",
    "ast_data": "FunctionDef name:_parse_linprog arg:lp arg:options arg:meth arguments arg arg arg If Compare Assign Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "boolean_dispatch",
    "source_code": "def boolean_dispatch(arg_name, arg_index, default, if_true, if_false, module_name, func_name):\n\n    def fn(*args, **kwargs):\n        dispatch_flag = default\n        if arg_name in kwargs:\n            dispatch_flag = kwargs[arg_name]\n        elif arg_index < len(args):\n            dispatch_flag = args[arg_index]\n        if dispatch_flag:\n            return if_true(*args, **kwargs)\n        else:\n            return if_false(*args, **kwargs)\n    if if_true.__doc__ is None and if_false.__doc__ is not None:\n        doc = if_false.__doc__\n        if_true.__doc__ = doc\n    elif if_false.__doc__ is None and if_true.__doc__ is not None:\n        doc = if_true.__doc__\n        if_false.__doc__ = doc\n    elif if_false.__doc__ is None and if_true.__doc__ is None:\n        doc = None\n    else:\n        raise RuntimeError('only one function can have a docstring')\n    fn.__doc__ = doc\n    if module_name is not None:\n        fn.__module__ = module_name\n    if func_name is not None:\n        fn.__name__ = func_name\n    boolean_dispatched[fn] = {'if_true': if_true, 'if_false': if_false, 'index': arg_index, 'default': default, 'arg_name': arg_name}\n    return fn",
    "docstring": "Dispatches to either of 2 script functions based on a boolean argument. In TorchScript, the boolean argument must be constant so that the correct function to use can be determined at compile time.",
    "type": "function",
    "file_path": "pytorch\\torch\\_jit_internal.py",
    "ast_data": "FunctionDef name:boolean_dispatch arg:arg_name arg:arg_index arg:default arg:if_true arg:if_false arg:module_name arg:func_name arguments arg arg arg arg arg arg arg FunctionDef name:fn arguments arg arg Assign If Compare Assign If Compare Call Assign If Return return:yes Call Return return:yes Call If BoolOp Compare Compare Assign Assign If BoolOp Compare Compare Assign Assign If BoolOp Compare Compare Assign Raise Call Assign If Compare Assign If Compare Assign Assign Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "receive_window_update",
    "source_code": "def receive_window_update(self) -> None:\n    if self.metadata['remaining_content_length'] and (not self.metadata['stream_closed_server']) and self.metadata['request_sent']:\n        self.send_data()",
    "docstring": "Flow control window size was changed. Send data that earlier could not be sent as we were blocked behind the flow control.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\http2\\stream.py",
    "ast_data": "FunctionDef name:receive_window_update arg:self arguments arg If BoolOp Call"
  },
  {
    "library": "pytorch",
    "name": "countable_fx",
    "source_code": "def countable_fx(node: torch.fx.Node) -> bool:\n    assert isinstance(node, torch.fx.Node)\n    if not hasattr(node, 'target'):\n        return False\n    target = node.target\n    if not hasattr(target, 'overloadpacket'):\n        return target in flop_registry\n    packet = target.overloadpacket\n    return packet in flop_registry",
    "docstring": "Whether or not we can count the flops of an FX node.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_utils.py",
    "ast_data": "FunctionDef name:countable_fx arg:node arguments arg Call If Call Return return:yes Assign If Call Return return:yes Compare Assign Return return:yes Compare"
  },
  {
    "library": "numpy",
    "name": "_get_legacy_print_mode",
    "source_code": "def _get_legacy_print_mode():\n    return format_options.get()['legacy']",
    "docstring": "Return the legacy print mode as an int.",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\arrayprint.py",
    "ast_data": "FunctionDef name:_get_legacy_print_mode arguments Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "Dataset",
    "source_code": "class Dataset(Generic[_T_co]):\n\n    def __getitem__(self, index) -> _T_co:\n        raise NotImplementedError('Subclasses of Dataset should implement __getitem__.')\n\n    def __add__(self, other: 'Dataset[_T_co]') -> 'ConcatDataset[_T_co]':\n        return ConcatDataset([self, other])",
    "docstring": "An abstract class representing a :class:. All datasets that represent a map from keys to data samples should subclass it. All subclasses should overwrite :meth:, supporting fetching a data sample for a given key. Subclasses could also optionally overwrite :meth:, which is expected to return the size of the dataset by many :class: implementations and the default options of :class:. Subclasses could also optionally implement :meth:, for speedup batched samples loading. This method accepts list of indices of samples of batch and returns list of samples. .. note:: :class: by default constructs an index sampler that yields integral indices. To make it work with a map-style dataset with non-integral indices/keys, a custom sampler must be provided.",
    "type": "class",
    "file_path": "pytorch\\torch\\utils\\data\\dataset.py",
    "ast_data": "ClassDef name:Dataset FunctionDef name:__getitem__ arg:self arg:index arguments arg arg Raise Call FunctionDef name:__add__ arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_next",
    "source_code": "def get_next(self, device, name=None):\n    del name\n    with ops.device(self._worker):\n        if _should_use_multi_device_iterator(self._options):\n            return self._iterator.get_next(device)\n        else:\n            return self._iterator.get_next()",
    "docstring": "Get next element for the given device.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py",
    "ast_data": "FunctionDef name:get_next arg:self arg:device arg:name arguments arg arg arg With Call If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "matfile_version",
    "source_code": "def matfile_version(file_name, *, appendmat=True):\n    from ._mio import _open_file_context\n    with _open_file_context(file_name, appendmat=appendmat) as fileobj:\n        return _get_matfile_version(fileobj)",
    "docstring": "Return major, minor tuple depending on apparent mat file type Where: #. 0,x -> version 4 format mat files #. 1,x -> version 5 format mat files #. 2,x -> version 7.3 format mat files (HDF format) Parameters ---------- file_name : str Name of the mat file (do not need .mat extension if appendmat==True). Can also pass open file-like object. appendmat : bool, optional True to append the .mat extension to the end of the given filename, if not already present. Default is True. Returns ------- major_version : {0, 1, 2} major MATLAB File format version minor_version : int minor MATLAB file format version Raises ------ MatReadError If the file is empty. ValueError The matfile version is unknown. Notes ----- Has the side effect of setting the file read pointer to 0",
    "type": "function",
    "file_path": "scipy\\scipy\\io\\matlab\\_miobase.py",
    "ast_data": "FunctionDef name:matfile_version arg:file_name arguments arg arg With Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "log_prob",
    "source_code": "def log_prob(self, value):\n    if self._validate_args:\n        self._validate_sample(value)\n    event_dim = len(self.event_shape)\n    log_prob = 0.0\n    y = value\n    for transform in reversed(self.transforms):\n        x = transform.inv(y)\n        event_dim += transform.domain.event_dim - transform.codomain.event_dim\n        log_prob = log_prob - _sum_rightmost(transform.log_abs_det_jacobian(x, y), event_dim - transform.domain.event_dim)\n        y = x\n    log_prob = log_prob + _sum_rightmost(self.base_dist.log_prob(y), event_dim - len(self.base_dist.event_shape))\n    return log_prob",
    "docstring": "Scores the sample by inverting the transform(s) and computing the score using the score of the base distribution and the log abs det jacobian.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributions\\transformed_distribution.py",
    "ast_data": "FunctionDef name:log_prob arg:self arg:value arguments arg arg If Call Assign Call Assign Assign For Call Assign Call Assign Call Call Assign Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "initialize_or_restore",
    "source_code": "@abc.abstractmethod\ndef initialize_or_restore(self, session=None):\n    pass",
    "docstring": "Runs restore ops from the checkpoint, or initializes variables.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:initialize_or_restore arg:self arg:session arguments arg arg"
  },
  {
    "library": "matplotlib",
    "name": "frame_format",
    "source_code": "@property\ndef frame_format(self):\n    return self._frame_format",
    "docstring": "Format (png, jpeg, etc.) to use for saving the frames, which can be decided by the individual subclasses.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\animation.py",
    "ast_data": "FunctionDef name:frame_format arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_uniform",
    "source_code": "def is_uniform(self, axis):\n    if not isinstance(axis, int):\n        raise TypeError('axis must be an integer')\n    rank = self.rank\n    if axis < 0:\n        raise IndexError('Negative axis values are not supported')\n    elif rank is not None and axis >= rank:\n        raise IndexError('Expected axis=%s < rank=%s' % (axis, rank))\n    else:\n        return (axis == 0 or axis > len(self._row_partitions)) or self._row_partitions[axis - 1].is_uniform()",
    "docstring": "Returns true if the indicated dimension is uniform.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:is_uniform arg:self arg:axis arguments arg arg If Call Raise Call Assign If Compare Raise Call If BoolOp Compare Compare Raise Call Return return:yes BoolOp BoolOp Compare Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "generate_mobile_module_lints",
    "source_code": "def generate_mobile_module_lints(script_module: torch.jit.ScriptModule):\n    if not isinstance(script_module, torch.jit.ScriptModule):\n        raise TypeError(f'Got {type(script_module)}, but ScriptModule is expected.')\n    lint_list = []\n    if not hasattr(script_module, '_generate_bundled_inputs_for_forward'):\n        lint_list.append({'name': LintCode.BUNDLED_INPUT.name, 'message': 'No bundled input for forward, please add bundled inputs before saving the module using torch.utils.bundled_inputs.augment_model_with_bundled_inputs.'})\n    for name, param in script_module.named_parameters():\n        if param.requires_grad:\n            lint_list.append({'name': LintCode.REQUIRES_GRAD.name, 'message': f'Param {name} requires grad, please set torch.no_grad() to reduce memory usage and improve computation speed during inference phase.'})\n    op_names = torch.jit.export_opnames(script_module)\n    for op_name in op_names:\n        if 'dropout' in op_name:\n            lint_list.append({'name': LintCode.DROPOUT.name, 'message': f'Operator {op_name} exists, remember to call eval() before saving the module.and call torch.utils.mobile_optimizer.optimize_for_mobile to drop dropout operator.'})\n        if 'batch_norm' in op_name:\n            lint_list.append({'name': LintCode.BATCHNORM.name, 'message': f'Operator {op_name} exists, remember to call eval() before saving the module and call torch.utils.mobile_optimizer.optimize_for_mobile to drop batch_norm operator.'})\n    return lint_list",
    "docstring": "Generate a list of lints for a given torch script module. Args: script_module: An instance of torch script module with type of ScriptModule. Returns: lint_map: A list of dictionary that contains modules lints",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\mobile_optimizer.py",
    "ast_data": "FunctionDef name:generate_mobile_module_lints arg:script_module arguments arg If Call Raise Call Call Assign If Call Call For Call If Call Assign Call For If Compare Call If Compare Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_scale_mm_configs",
    "source_code": "def _scale_mm_configs(self, m: int, n: int, k: int, configs: list[BaseConfig], scale: float, has_int8_tensor: bool, exclude: Callable[[int, int, int], bool]) -> list[BaseConfig]:\n    from .runtime.runtime_utils import next_power_of_2\n    min_block_size = 16\n    min_block_size_k = 32 if has_int8_tensor else 16\n    m = max(next_power_of_2(V.graph.sizevars.size_hint(m, fallback=config.unbacked_symint_fallback)), min_block_size)\n    n = max(next_power_of_2(V.graph.sizevars.size_hint(n, fallback=config.unbacked_symint_fallback)), min_block_size)\n    k = max(next_power_of_2(V.graph.sizevars.size_hint(k, fallback=config.unbacked_symint_fallback)), min_block_size_k)\n    scaled_configs = []\n    for c in configs:\n        scaled_config = dataclasses.replace(c, block_m=max(min(int(c.block_m * scale), m), min_block_size), block_n=max(min(int(c.block_n * scale), n), min_block_size), block_k=max(min(int(c.block_k * scale), k), min_block_size_k))\n        if not exclude(scaled_config.block_m, scaled_config.block_n, scaled_config.block_k):\n            scaled_configs.append(scaled_config)\n    return scaled_configs",
    "docstring": "Scales and filters matrix multiplication configs based on input size.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\template_heuristics.py",
    "ast_data": "FunctionDef name:_scale_mm_configs arg:self arg:m arg:n arg:k arg:configs arg:scale arg:has_int8_tensor arg:exclude arguments arg arg arg arg arg arg arg arg Assign Assign Assign Call Call Call Assign Call Call Call Assign Call Call Call Assign For Assign Call Call Call Call Call Call Call Call Call Call If Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "def forward(self, pred: Tensor, target: Tensor) -> Tensor:\n    if pred.dim() != 5:\n        raise ValueError(f'Only 3D images supported. Got {pred.dim()}.')\n    return super().forward(pred, target)",
    "docstring": "Compute 3D Hausdorff loss. Args: pred: predicted tensor with a shape of :math:. Each channel is as binary as: 1 -> fg, 0 -> bg. target: target tensor with a shape of :math:. Returns: Estimated Hausdorff Loss.",
    "type": "method",
    "file_path": "kornia\\kornia\\losses\\hausdorff.py",
    "ast_data": "FunctionDef name:forward arg:self arg:pred arg:target arguments arg arg arg If Compare Call Raise Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "no_automatic_dependency_tracking_scope",
    "source_code": "@tf_contextlib.contextmanager\ndef no_automatic_dependency_tracking_scope(obj):\n    previous_value = getattr(obj, '_setattr_tracking', True)\n    obj._setattr_tracking = False\n    try:\n        yield\n    finally:\n        obj._setattr_tracking = previous_value",
    "docstring": "A context that disables automatic dependency tracking when assigning attrs. Objects that inherit from Autotrackable automatically creates dependencies to trackable objects through attribute assignments, and wraps data structures (lists or dicts) with trackable classes. This scope may be used to temporarily disable this behavior. This works similar to the decorator . Example usage: Args: obj: A trackable object. Yields: a scope in which the object doesn't track dependencies.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\base.py",
    "ast_data": "FunctionDef name:no_automatic_dependency_tracking_scope arg:obj arguments arg Assign Call Assign Try Assign"
  },
  {
    "library": "django",
    "name": "get_table_description",
    "source_code": "def get_table_description(self, cursor, table_name):\n    cursor.execute('PRAGMA table_xinfo(%s)' % self.connection.ops.quote_name(table_name))\n    table_info = cursor.fetchall()\n    if not table_info:\n        raise DatabaseError(f'Table {table_name} does not exist (empty pragma).')\n    collations = self._get_column_collations(cursor, table_name)\n    json_columns = set()\n    if self.connection.features.can_introspect_json_field:\n        for line in table_info:\n            column = line[1]\n            json_constraint_sql = '%%json_valid(\"%s\")%%' % column\n            has_json_constraint = cursor.execute(\"\\n                    SELECT sql\\n                    FROM sqlite_master\\n                    WHERE\\n                        type = 'table' AND\\n                        name = %s AND\\n                        sql LIKE %s\\n                \", [table_name, json_constraint_sql]).fetchone()\n            if has_json_constraint:\n                json_columns.add(column)\n    table_description = [FieldInfo(name, data_type, get_field_size(data_type), None, None, None, not notnull, default, collations.get(name), bool(pk), name in json_columns) for cid, name, data_type, notnull, default, pk, hidden in table_info if hidden in [0, 2, 3]]\n    primary_key = [index for index, field_info in enumerate(table_description) if field_info.pk]\n    if len(primary_key) > 1:\n        for index in primary_key:\n            table_description[index] = table_description[index]._replace(pk=False)\n    return table_description",
    "docstring": "Return a description of the table with the DB-API cursor.description interface.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\sqlite3\\introspection.py",
    "ast_data": "FunctionDef name:get_table_description arg:self arg:cursor arg:table_name arguments arg arg arg Call Call Assign Call If Raise Call Assign Call Assign Call If For Assign Assign Assign Call Call If Call Assign Call Call Call Call Compare Compare Assign Call If Compare Call For Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "colspan",
    "source_code": "@property\ndef colspan(self):\n    ncols = self.get_gridspec().ncols\n    c1, c2 = sorted([self.num1 % ncols, self.num2 % ncols])\n    return range(c1, c2 + 1)",
    "docstring": "The columns spanned by this subplot, as a object.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\gridspec.py",
    "ast_data": "FunctionDef name:colspan arg:self arguments arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_is_atomic_nested",
    "source_code": "def _is_atomic_nested(nested):\n    if isinstance(nested, ListWrapper):\n        return True\n    if _is_serialized_node_data(nested):\n        return True\n    return not nest.is_nested(nested)",
    "docstring": "Returns if is a list representing node data.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_utils.py",
    "ast_data": "FunctionDef name:_is_atomic_nested arg:nested arguments arg If Call Return return:yes If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__call__",
    "source_code": "def __call__(self, shape, dtype=None, **kwargs):\n    raise NotImplementedError",
    "docstring": "Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. **kwargs: Additional keyword arguments.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\initializers\\initializers_v2.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:shape arg:dtype arguments arg arg arg arg Raise"
  },
  {
    "library": "pytorch",
    "name": "resolve_tensor",
    "source_code": "@abc.abstractmethod\ndef resolve_tensor(self, read_item: ReadItem) -> torch.Tensor:\n    pass",
    "docstring": "Return the tensor described by `read_item` method to copy the data back to the one in state_dict.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\planner.py",
    "ast_data": "FunctionDef name:resolve_tensor arg:self arg:read_item arguments arg arg"
  },
  {
    "library": "pytorch",
    "name": "extract_attrs_for_lowering",
    "source_code": "@compatibility(is_backward_compatible=False)\ndef extract_attrs_for_lowering(mod: nn.Module) -> dict[str, Any]:\n    attrs_for_lowering: dict[str, Any] = {}\n    attrs_for_lowering['name'] = torch.typename(mod)\n    if type(mod) in module_fetch_book:\n        version, param_to_fetch, matching_method = module_fetch_book[type(mod)]\n        if version < mod._version:\n            raise RuntimeError(f'Fetcher version {version} try to fetch {torch.typename(mod)} version {mod._version}, please upgrade the module_fetch_book, open an issue and @842974287 or report a bug to AIACC team directly.')\n        for attr in param_to_fetch:\n            attrs_for_lowering[attr] = getattr(mod, matching_method(attr, mod._version))\n    else:\n        raise RuntimeError(f'{torch.typename(mod)} is not in the module_fetch_book yet, please add it to the module_fetch_book, open an issue and @842974287 or report a bug to AIACC team directly.')\n    return attrs_for_lowering",
    "docstring": "If is in , fetch the mod's attributes that in the after checking module's version is compatible with the .",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\passes\\param_fetch.py",
    "ast_data": "FunctionDef name:extract_attrs_for_lowering arg:mod arguments arg Assign Call If Compare Call Assign Call If Compare Raise Call Call For Assign Call Call Raise Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_parse_float_vec",
    "source_code": "def _parse_float_vec(vec):\n    dtype = np.dtype('>u4,>u4')\n    vec1 = vec.view(dtype=dtype)\n    xport1 = vec1['f0']\n    xport2 = vec1['f1']\n    ieee1 = xport1 & 16777215\n    shift = np.zeros(len(vec), dtype=np.uint8)\n    shift[np.where(xport1 & 2097152)] = 1\n    shift[np.where(xport1 & 4194304)] = 2\n    shift[np.where(xport1 & 8388608)] = 3\n    ieee1 >>= shift\n    ieee2 = xport2 >> shift | (xport1 & 7) << 29 + (3 - shift)\n    ieee1 &= 4293918719\n    ieee1 |= ((xport1 >> 24 & 127) - 65 << 2) + shift + 1023 << 20 | xport1 & 2147483648\n    ieee = np.empty((len(ieee1),), dtype='>u4,>u4')\n    ieee['f0'] = ieee1\n    ieee['f1'] = ieee2\n    ieee = ieee.view(dtype='>f8')\n    ieee = ieee.astype('f8')\n    return ieee",
    "docstring": "Parse a vector of float values representing IBM 8 byte floats into native 8 byte floats.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\sas\\sas_xport.py",
    "ast_data": "FunctionDef name:_parse_float_vec arg:vec arguments arg Assign Call Assign Call Assign Assign Assign Assign Call Call Assign Call Assign Call Assign Call Assign Assign Call Call Assign Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_styles",
    "source_code": "@classmethod\ndef get_styles(cls):\n    return cls._style_list",
    "docstring": "Return a dictionary of available styles.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_styles arg:cls arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "legacy_raw_flush",
    "source_code": "def legacy_raw_flush(writer=None, name=None):\n    if writer is None or isinstance(writer, SummaryWriter):\n        return flush(writer, name)\n    else:\n        with ops.device('cpu:0'):\n            return gen_summary_ops.flush_summary_writer(writer, name=name)",
    "docstring": "Legacy version of flush() that accepts a raw resource tensor for . Do not use this function in any new code. Not supported and not part of the public TF APIs. Args: writer: The to flush. If None, the current default writer will be used instead; if there is no current writer, this returns . For this legacy version only, also accepts a raw resource tensor pointing to the underlying C++ writer resource. name: Ignored legacy argument for a name for the operation. Returns: The created .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "FunctionDef name:legacy_raw_flush arg:writer arg:name arguments arg arg If BoolOp Compare Call Return return:yes Call With Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "unregister_layer",
    "source_code": "def unregister_layer(self, name):\n    self.data_groups[name]['hook'].remove()\n    self.state.pop(name)\n    self.data_groups.pop(name)",
    "docstring": "Detaches the sparsifier from the layer",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\activation_sparsifier\\activation_sparsifier.py",
    "ast_data": "FunctionDef name:unregister_layer arg:self arg:name arguments arg arg Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "name",
    "source_code": "@property\ndef name(self) -> str:\n    return self._name",
    "docstring": "Name of this tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "SmokeTutorials",
    "source_code": "@cli.cls_cmd('smoke-tutorials')\nclass SmokeTutorials(Task):\n    ctx = CONTEXT\n    tests = Option(['--tests', '-t'], default=None, multiple=True, metavar='TESTS', help='Specify *rst files to smoke test')\n    verbose = Option(['--verbose', '-v'], default=False, is_flag=True, help='verbosity')\n    pytest_args = Argument(['pytest_args'], nargs=-1, metavar='PYTEST-ARGS', required=False)\n\n    @classmethod\n    def task_meta(cls, **kwargs):\n        kwargs.update(cls.ctx.get())\n        Args = namedtuple('Args', [k for k in kwargs.keys()])\n        args = Args(**kwargs)\n        dirs = Dirs(args)\n        cmd = ['pytest']\n        if args.tests:\n            cmd += list(args.tests)\n        else:\n            cmd += ['doc/source/tutorial', '--doctest-glob=*rst']\n        if args.verbose:\n            cmd += ['-v']\n        pytest_args = kwargs.pop('pytest_args', None)\n        extra_argv = list(pytest_args[:]) if pytest_args else []\n        if extra_argv and extra_argv[0] == '--':\n            extra_argv = extra_argv[1:]\n        cmd += extra_argv\n        cmd_str = ' '.join(cmd)\n        return {'actions': [f'env PYTHONPATH={dirs.site} {cmd_str}'], 'task_dep': ['build'], 'io': {'capture': False}}",
    "docstring": ":wrench: Run smoke-tests on tutorial files.",
    "type": "class",
    "file_path": "scipy\\dev.py",
    "ast_data": "ClassDef name:SmokeTutorials Assign Assign Call Assign Call Assign Call FunctionDef name:task_meta arg:cls arguments arg arg Call Call Assign Call Call Assign Call Assign Call Assign If Call If Assign Call Assign Call If BoolOp Compare Assign Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "is_number",
    "source_code": "def is_number(obj: object) -> TypeGuard[Number | np.number]:\n    return isinstance(obj, (Number, np.number))",
    "docstring": "Check if the object is a number. Returns True when the object is a number, and False if is not. Parameters ---------- obj : any type The object to check if is a number. Returns ------- bool Whether is a number or not. See Also -------- api.types.is_integer: Checks a subgroup of numbers. Examples -------- >>> from pandas.api.types import is_number >>> is_number(1) True >>> is_number(7.15) True Booleans are valid because they are int subclass. >>> is_number(False) True >>> is_number(\"foo\") False >>> is_number(\"5\") False",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\inference.py",
    "ast_data": "FunctionDef name:is_number arg:obj arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "parents",
    "source_code": "@property\ndef parents(self):\n    return [self.categorical_column]",
    "docstring": "See 'FeatureColumn` base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:parents arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "buffer_with_style",
    "source_code": "def buffer_with_style(self, width, quadsegs=8, end_cap_style=1, join_style=1, mitre_limit=5.0):\n    return self._topology(capi.geos_bufferwithstyle(self.ptr, width, quadsegs, end_cap_style, join_style, mitre_limit))",
    "docstring": "Same as buffer() but allows customizing the style of the memoryview. End cap style can be round (1), flat (2), or square (3). Join style can be round (1), mitre (2), or bevel (3). Mitre ratio limit only affects mitered join style.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:buffer_with_style arg:self arg:width arg:quadsegs arg:end_cap_style arg:join_style arg:mitre_limit arguments arg arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_DimRange",
    "source_code": "@dataclass(frozen=True)\nclass _DimRange:\n    dim: int\n    min: int\n    max: int",
    "docstring": "This represents an dimension of a tensor and the corresponding min and max values it can take. Don't create this class directly; instead, use :func:.",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\decorators.py",
    "ast_data": "ClassDef name:_DimRange Call"
  },
  {
    "library": "tensorflow",
    "name": "__call__",
    "source_code": "def __call__(self, inputs, state, scope=None, *args, **kwargs):\n    return base_layer.Layer.__call__(self, inputs, state, *args, scope=scope, **kwargs)",
    "docstring": "Run this RNN cell on inputs, starting from the given state. Args: inputs: tensor with shape . state: if is an integer, this should be a with shape . Otherwise, if is a tuple of integers, this should be a tuple with shapes . scope: optional cell scope. *args: Additional positional arguments. **kwargs: Additional keyword arguments. Returns: A pair containing: - Output: A tensor with shape . - New state: Either a single tensor, or a tuple of tensors matching the arity and shapes of .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\legacy_rnn\\rnn_cell_impl.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:inputs arg:state arg:scope arguments arg arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_list_display",
    "source_code": "def get_list_display(self, request):\n    return self.list_display",
    "docstring": "Return a sequence containing the fields to be displayed on the changelist.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:get_list_display arg:self arg:request arguments arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "hardtanh",
    "source_code": "def hardtanh(input: Tensor, min_val: float=-1.0, max_val: float=1.0, inplace: bool=False) -> Tensor:\n    if has_torch_function_unary(input):\n        return handle_torch_function(hardtanh, (input,), input, min_val=min_val, max_val=max_val, inplace=inplace)\n    if min_val > max_val:\n        raise ValueError('min_val cannot be greater than max_val')\n    if inplace:\n        result = torch._C._nn.hardtanh_(input, min_val, max_val)\n    else:\n        result = torch._C._nn.hardtanh(input, min_val, max_val)\n    return result",
    "docstring": "hardtanh(input, min_val=-1., max_val=1., inplace=False) -> Tensor Applies the HardTanh function element-wise. See :class: for more details.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:hardtanh arg:input arg:min_val arg:max_val arg:inplace arguments arg arg arg arg If Call Return return:yes Call If Compare Raise Call If Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "diff_files",
    "source_code": "def diff_files(sha: str) -> list[str]:\n    res = subprocess.run(['git', 'diff', '--name-only', '--diff-filter=ACMR', '-z', sha, '--', '*.[chCH]', '*.[ch]pp', '*.[ch]xx', '*.cc', '*.hh'], stdout=subprocess.PIPE, encoding='utf-8')\n    res.check_returncode()\n    return [f for f in res.stdout.split('\\x00') if f]",
    "docstring": "Find the diff since the given SHA. Adapted from lint.py",
    "type": "function",
    "file_path": "scipy\\tools\\check_python_h_first.py",
    "ast_data": "FunctionDef name:diff_files arg:sha arguments arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "serialize",
    "source_code": "def serialize(obj):\n    for identifier in _TYPE_IDENTIFIERS:\n        predicate, versions = _REVIVED_TYPE_REGISTRY[identifier]\n        if predicate(obj):\n            return versions[0].to_proto()\n    return None",
    "docstring": "Create a SavedUserObject from a trackable object.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\revived_types.py",
    "ast_data": "FunctionDef name:serialize arg:obj arguments arg For Assign If Call Return return:yes Call Return return:no"
  },
  {
    "library": "seaborn",
    "name": "Lines",
    "source_code": "@document_properties\n@dataclass\nclass Lines(Paths):\n    _sort: ClassVar[bool] = True",
    "docstring": "A faster but less-flexible mark for drawing many lines. See also -------- Line : A mark connecting data points with sorting along the orientation axis. Examples -------- .. include:: ../docstrings/objects.Lines.rst",
    "type": "class",
    "file_path": "seaborn\\seaborn\\_marks\\line.py",
    "ast_data": "ClassDef name:Lines"
  },
  {
    "library": "pytorch",
    "name": "Constraint",
    "source_code": "class Constraint:\n    is_discrete = False\n    event_dim = 0\n\n    def check(self, value):\n        raise NotImplementedError\n\n    def __repr__(self):\n        return self.__class__.__name__[1:] + '()'",
    "docstring": "Abstract base class for constraints. A constraint object represents a region over which a variable is valid, e.g. within which a variable can be optimized. Attributes: is_discrete (bool): Whether constrained space is discrete. Defaults to False. event_dim (int): Number of rightmost dimensions that together define an event. The :meth: method will remove this many dimensions when computing validity.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributions\\constraints.py",
    "ast_data": "ClassDef name:Constraint Assign Assign FunctionDef name:check arg:self arg:value arguments arg arg Raise FunctionDef name:__repr__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_config",
    "source_code": "@generic_utils.default\ndef get_config(self):\n    all_args = tf_inspect.getfullargspec(self.__init__).args\n    config = {'name': self.name, 'trainable': self.trainable}\n    if hasattr(self, '_batch_input_shape'):\n        config['batch_input_shape'] = self._batch_input_shape\n    config['dtype'] = policy.serialize(self._dtype_policy)\n    if hasattr(self, 'dynamic'):\n        if self.dynamic:\n            config['dynamic'] = self.dynamic\n        elif 'dynamic' in all_args:\n            all_args.remove('dynamic')\n    expected_args = config.keys()\n    extra_args = [arg for arg in all_args if arg not in expected_args]\n    if len(extra_args) > 1 and hasattr(self.get_config, '_is_default'):\n        raise NotImplementedError('Layers with arguments in `__init__` must override `get_config`.')\n    return config",
    "docstring": "Returns the config of the layer. A layer config is a Python dictionary (serializable) containing the configuration of a layer. The same layer can be reinstantiated later (without its trained weights) from this configuration. The config of a layer does not include connectivity information, nor the layer class name. These are handled by (one layer of abstraction above). Returns: Python dictionary.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py",
    "ast_data": "FunctionDef name:get_config arg:self arguments arg Assign Call Assign If Call Assign Assign Call If Call If Assign If Compare Call Assign Call Assign Compare If BoolOp Compare Call Call Raise Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n    if y is None:\n        X = validate_data(self, X, accept_sparse=['csr', 'csc'])\n    else:\n        X, y = validate_data(self, X, y, accept_sparse=['csr', 'csc'], multi_output=True)\n    self._check_params(X, y)\n    score_func_ret = self.score_func(X, y)\n    if isinstance(score_func_ret, (list, tuple)):\n        self.scores_, self.pvalues_ = score_func_ret\n        self.pvalues_ = np.asarray(self.pvalues_)\n    else:\n        self.scores_ = score_func_ret\n        self.pvalues_ = None\n    self.scores_ = np.asarray(self.scores_)\n    return self",
    "docstring": "Run score function on (X, y) and get the appropriate features. Parameters ---------- X : array-like of shape (n_samples, n_features) The training input samples. y : array-like of shape (n_samples,) or None The target values (class labels in classification, real numbers in regression). If the selector is unsupervised then can be set to . Returns ------- self : object Returns the instance itself.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_selection\\_univariate_selection.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg If Compare Assign Call Assign Call Call Assign Call If Call Assign Assign Call Assign Assign Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "unpack_dual",
    "source_code": "def unpack_dual(tensor, *, level=None):\n    if level is None:\n        level = _current_level\n    if level < 0:\n        return UnpackedDualTensor(tensor, None)\n    primal, dual = torch._VF._unpack_dual(tensor, level=level)\n    return UnpackedDualTensor(primal, dual)",
    "docstring": "Unpack a \"dual tensor\" to get both its Tensor value and its forward AD gradient. The result is a namedtuple `tensortensorlevelforward-mode AD tutorial `__ for detailed steps on how to use this API.",
    "type": "function",
    "file_path": "pytorch\\torch\\autograd\\forward_ad.py",
    "ast_data": "FunctionDef name:unpack_dual arg:tensor arguments arg arg If Compare Assign If Compare Return return:yes Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "generate_output",
    "source_code": "def generate_output(self, output_args: Argument) -> str:\n    return f'return {repr(output_args)}'",
    "docstring": "Given the output arguments, generates the return statement of the FX function. Note: The returned statement should not be indented.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\graph.py",
    "ast_data": "FunctionDef name:generate_output arg:self arg:output_args arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "record_has_frozen_params",
    "source_code": "def record_has_frozen_params(gm: torch.fx.GraphModule) -> None:\n    gm._has_frozen_params = True",
    "docstring": "Mark the gm as having frozen params.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\freezing_utils.py",
    "ast_data": "FunctionDef name:record_has_frozen_params arg:gm arguments arg Assign"
  },
  {
    "library": "matplotlib",
    "name": "Locator",
    "source_code": "class Locator(TickHelper):\n    MAXTICKS = 1000\n\n    def tick_values(self, vmin, vmax):\n        raise NotImplementedError('Derived must override')\n\n    def set_params(self, **kwargs):\n        _api.warn_external(\"'set_params()' not defined for locator of type \" + str(type(self)))\n\n    def __call__(self):\n        raise NotImplementedError('Derived must override')\n\n    def raise_if_exceeds(self, locs):\n        if len(locs) >= self.MAXTICKS:\n            _log.warning('Locator attempting to generate %s ticks ([%s, ..., %s]), which exceeds Locator.MAXTICKS (%s).', len(locs), locs[0], locs[-1], self.MAXTICKS)\n        return locs\n\n    def nonsingular(self, v0, v1):\n        return mtransforms.nonsingular(v0, v1, expander=0.05)\n\n    def view_limits(self, vmin, vmax):\n        return mtransforms.nonsingular(vmin, vmax)",
    "docstring": "Determine tick locations. Note that the same locator should not be used across multiple because the locator stores references to the Axis data and view limits.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "ClassDef name:Locator Assign FunctionDef name:tick_values arg:self arg:vmin arg:vmax arguments arg arg arg Raise Call FunctionDef name:set_params arg:self arguments arg arg Call Call Call FunctionDef name:__call__ arg:self arguments arg Raise Call FunctionDef name:raise_if_exceeds arg:self arg:locs arguments arg arg If Compare Call Call Call Return return:yes FunctionDef name:nonsingular arg:self arg:v0 arg:v1 arguments arg arg arg Return return:yes Call FunctionDef name:view_limits arg:self arg:vmin arg:vmax arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "freeze",
    "source_code": "def freeze(self) -> None:\n    self.frozen = True",
    "docstring": "Disable further changes to the current settings. After calling this method, the present state of the settings will become immutable. Trying to change values through the :meth: method and its variants won't be possible and will be alerted.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\settings\\__init__.py",
    "ast_data": "FunctionDef name:freeze arg:self arguments arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "_snapshot_task_progresses",
    "source_code": "def _snapshot_task_progresses(self) -> Iterable[_pywrap_server_lib.SnapshotTaskProgressWrapper]:\n    return self._server.snapshot_task_progresses()",
    "docstring": "Returns the progresses of the snapshot tasks currently being executed. Returns: An .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\service\\server_lib.py",
    "ast_data": "FunctionDef name:_snapshot_task_progresses arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "tee_output",
    "source_code": "def tee_output():\n    request = cherrypy.serving.request\n    if 'no-store' in request.headers.values('Cache-Control'):\n        return\n\n    def tee(body):\n        if 'no-cache' in response.headers.values('Pragma') or 'no-store' in response.headers.values('Cache-Control'):\n            for chunk in body:\n                yield chunk\n            return\n        output = []\n        for chunk in body:\n            output.append(chunk)\n            yield chunk\n        body = b''.join(output)\n        if not body:\n            cherrypy._cache.delete()\n        else:\n            cherrypy._cache.put((response.status, response.headers or {}, body, response.time), len(body))\n    response = cherrypy.serving.response\n    response.body = tee(response.body)",
    "docstring": "Tee response output to cache storage. Internal.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\caching.py",
    "ast_data": "FunctionDef name:tee_output arguments Assign If Compare Call Return return:no FunctionDef name:tee arg:body arguments arg If BoolOp Compare Call Compare Call For Return return:no Assign For Call Assign Call If Call Call BoolOp Call Assign Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "set_annotation_clip",
    "source_code": "def set_annotation_clip(self, b):\n    self._annotation_clip = b",
    "docstring": "Set the annotation's clipping behavior. Parameters ---------- b : bool or None - True: The annotation will be clipped when ``.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:set_annotation_clip arg:self arg:b arguments arg arg Assign"
  },
  {
    "library": "scipy",
    "name": "mean",
    "source_code": "def mean(self, row, col):\n    r, c, n = self._process_parameters(row, col)\n    return np.outer(r, c) / n",
    "docstring": "Mean of distribution of conditional tables. %(_doc_mean_params)s Returns ------- mean: ndarray Mean of the distribution. Notes ----- %(_doc_row_col_note)s Examples -------- >>> from scipy.stats import random_table >>> row = [1, 5] >>> col = [2, 3, 1] >>> random_table.mean(row, col) array([[0.33333333, 0.5 , 0.16666667], [1.66666667, 2.5 , 0.83333333]]) Alternatively, the object may be called (as a function) to fix the row and column vector sums, returning a \"frozen\" distribution. >>> d = random_table(row, col) >>> d.mean() array([[0.33333333, 0.5 , 0.16666667], [1.66666667, 2.5 , 0.83333333]])",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:mean arg:self arg:row arg:col arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "Equalize",
    "source_code": "class Equalize(OperationBase):\n\n    def __init__(self, initial_probability: float=0.5, temperature: float=0.1) -> None:\n        super().__init__(K.RandomEqualize(same_on_batch=False, p=initial_probability), initial_magnitude=None, temperature=temperature, symmetric_megnitude=False, gradient_estimator=STEFunction)",
    "docstring": "Apply equalize operation. Args: initial_probability: the initial probability. If None, the augmentation will be randomly applied according to he augmentation sampling range. temperature: temperature for RelaxedBernoulli distribution used during training. Note: Equalize cannot update probabilities yet. Note: STE gradient estimator applied for back propagation.",
    "type": "class",
    "file_path": "kornia\\kornia\\augmentation\\auto\\operations\\ops.py",
    "ast_data": "ClassDef name:Equalize FunctionDef name:__init__ arg:self arg:initial_probability arg:temperature arguments arg arg arg Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, interval=1, tz=None):\n    super().__init__(tz=tz)\n    self._interval = interval\n    self._wrapped_locator = ticker.MultipleLocator(interval)",
    "docstring": "Parameters ---------- interval : int, default: 1 The interval between each iteration. For example, if `~datetime.tzinfotimezonedateutil.tz`.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\dates.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:interval arg:tz arguments arg arg arg Call Call Assign Assign Call"
  },
  {
    "library": "pytorch",
    "name": "get_inline_skeleton",
    "source_code": "def get_inline_skeleton():\n    import importlib.resources\n    skeleton = importlib.resources.read_text(__package__, 'skeleton.html')\n    js_code = importlib.resources.read_text(__package__, 'code.js')\n    for js_module in ['preact', 'htm']:\n        js_lib = importlib.resources.read_binary(__package__, f'{js_module}.mjs')\n        js_url = 'data:application/javascript,' + urllib.parse.quote(js_lib)\n        js_code = js_code.replace(f'https://unpkg.com/{js_module}?module', js_url)\n    skeleton = skeleton.replace(' src=\"./code.js\">', '>\\n' + js_code)\n    return skeleton",
    "docstring": "Get a fully-inlined skeleton of the frontend. The returned HTML page has no external network dependencies for code. It can load model_info.json over HTTP, or be passed to burn_in_info.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\model_dump\\__init__.py",
    "ast_data": "FunctionDef name:get_inline_skeleton arguments Assign Call Assign Call For Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "constant_to_device",
    "source_code": "def constant_to_device(self, device: torch.device) -> IRNode:\n    loader = self.make_loader()\n    loader = patch.object(ConstantBuffer, 'override_device', device)(loader)\n    return Pointwise(device=device, dtype=self.get_dtype(), inner_fn=loader, ranges=self.get_size())",
    "docstring": "Move this to a given device. Requires that all reads are to constants.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ir.py",
    "ast_data": "FunctionDef name:constant_to_device arg:self arg:device arguments arg arg Assign Call Assign Call Call Return return:yes Call Call Call"
  },
  {
    "library": "pandas",
    "name": "sample",
    "source_code": "@final\ndef sample(self, n: int | None=None, frac: float | None=None, replace: bool=False, weights=None, random_state: RandomState | None=None, axis: Axis | None=None, ignore_index: bool=False) -> Self:\n    if axis is None:\n        axis = 0\n    axis = self._get_axis_number(axis)\n    obj_len = self.shape[axis]\n    rs = common.random_state(random_state)\n    size = sample.process_sampling_size(n, frac, replace)\n    if size is None:\n        assert frac is not None\n        size = round(frac * obj_len)\n    if weights is not None:\n        weights = sample.preprocess_weights(self, weights, axis)\n    sampled_indices = sample.sample(obj_len, size, replace, weights, rs)\n    result = self.take(sampled_indices, axis=axis)\n    if ignore_index:\n        result.index = default_index(len(result))\n    return result",
    "docstring": "Return a random sample of items from an axis of object. You can use for reproducibility. Parameters ---------- n : int, optional Number of items from axis to return. Cannot be used with . Default = 1 if = None. frac : float, optional Fraction of axis items to return. Cannot be used with . replace : bool, default False Allow or disallow sampling of the same row more than once. weights : str or ndarray-like, optional Default `SeriesNonenfracreplacementTruerandom_statereplaceTruefracnum_specimen_seen` column are more likely to be sampled. >>> df.sample(n=2, weights=\"num_specimen_seen\", random_state=1) num_legs num_wings num_specimen_seen falcon 2 2 10 fish 0 0 8",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:sample arg:self arg:n arg:frac arg:replace arg:weights arg:random_state arg:axis arg:ignore_index arguments arg arg arg arg arg arg arg arg If Compare Assign Assign Call Assign Assign Call Assign Call If Compare Compare Assign Call If Compare Assign Call Assign Call Assign Call If Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "codegen_static_numels",
    "source_code": "def codegen_static_numels(self, code):\n\n    def is_static_integer(expr: sympy.Expr) -> bool:\n        return isinstance(expr, (sympy.Integer, int))\n    for tree in self.range_trees:\n        if not tree.is_reduction or self.inside_reduction:\n            simplified_tree_numel = V.graph.sizevars.simplify(tree.numel)\n            if is_static_integer(simplified_tree_numel):\n                code.writeline(f'{tree.prefix}numel = {int(simplified_tree_numel)}')\n        if tree.is_reduction and self.persistent_reduction:\n            if self.cooperative_reduction:\n                numel = self.kexpr(self.rename_indexing(tree.numel))\n                val = f'triton_helpers.constexpr_next_power_of_2(({numel} + RSPLIT - 1) // RSPLIT)'\n            else:\n                val = self._get_persistent_RBLOCK(tree.numel)\n            code.writeline(f'{tree.prefix.upper()}BLOCK: tl.constexpr = {val}')\n        if tree.prefix == 'x' and self.no_x_dim:\n            code.writeline('XBLOCK: tl.constexpr = 1')",
    "docstring": "We get a small speedup from hard coding numels if they are static. This code stomps on the passed-in values by writing an constant to the top of the kernel. In a kernel like: def KERNEL_NAME(in_ptr0, in_ptr1, out_ptr2, xnumel, r0_numel, XBLOCK : tl.constexpr, R0_BLOCK : tl.constexpr): We would add xnumel = 4096 r0_numel = 768 After the signature, before the kernel code, if we decided to make these static. As its hardcoded, it becomes a better signal to triton on how to unroll and do some static indexing. So, it's not so much that downstream knows that its a static numel, as that you just plop a constant into the kernel.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py",
    "ast_data": "FunctionDef name:codegen_static_numels arg:self arg:code arguments arg arg FunctionDef name:is_static_integer arg:expr arguments arg Return return:yes Call For If BoolOp Assign Call If Call Call Call If BoolOp If Assign Call Call Assign Assign Call Call Call If BoolOp Compare Call"
  },
  {
    "library": "pandas",
    "name": "as_array",
    "source_code": "def as_array(self, dtype: np.dtype | None=None, copy: bool=False, na_value: object=lib.no_default) -> np.ndarray:\n    passed_nan = lib.is_float(na_value) and isna(na_value)\n    if len(self.blocks) == 0:\n        arr = np.empty(self.shape, dtype=float)\n        return arr.transpose()\n    if self.is_single_block:\n        blk = self.blocks[0]\n        if na_value is not lib.no_default:\n            if lib.is_np_dtype(blk.dtype, 'f') and passed_nan:\n                pass\n            else:\n                copy = True\n        if blk.is_extension:\n            arr = blk.values.to_numpy(dtype=dtype, na_value=na_value, copy=copy).reshape(blk.shape)\n        elif not copy:\n            arr = np.asarray(blk.values, dtype=dtype)\n        else:\n            arr = np.array(blk.values, dtype=dtype, copy=copy)\n        if passed_nan and blk.dtype.kind in 'mM':\n            arr[isna(blk.values)] = na_value\n        if not copy:\n            arr = arr.view()\n            arr.flags.writeable = False\n    else:\n        arr = self._interleave(dtype=dtype, na_value=na_value)\n    if na_value is lib.no_default:\n        pass\n    elif arr.dtype.kind == 'f' and passed_nan:\n        pass\n    else:\n        arr[isna(arr)] = na_value\n    return arr.transpose()",
    "docstring": "Convert the blockmanager data into an numpy array. Parameters ---------- dtype : np.dtype or None, default None Data type of the return array. copy : bool, default False If True then guarantee that a copy is returned. A value of False does not guarantee that the underlying data is not copied. na_value : object, default lib.no_default Value to be used as the missing value sentinel. Returns ------- arr : ndarray",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:as_array arg:self arg:dtype arg:copy arg:na_value arguments arg arg arg arg Assign BoolOp Call Call If Compare Call Assign Call Return return:yes Call If Assign If Compare If BoolOp Call Assign If Assign Call Call If Assign Call Assign Call If BoolOp Compare Assign Call If Assign Call Assign Assign Call If Compare If BoolOp Compare Assign Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "from_name",
    "source_code": "@staticmethod\ndef from_name(model_name: str, num_classes: int=80) -> RTDETRConfig:\n    if model_name == 'rtdetr_r18vd':\n        config = RTDETRConfig(RTDETRModelType.resnet18d, num_classes, input_size=640)\n    elif model_name == 'rtdetr_r34vd':\n        config = RTDETRConfig(RTDETRModelType.resnet34d, num_classes, input_size=640)\n    elif model_name == 'rtdetr_r50vd_m':\n        config = RTDETRConfig(RTDETRModelType.resnet50d_m, num_classes, input_size=640)\n    elif model_name == 'rtdetr_r50vd':\n        config = RTDETRConfig(RTDETRModelType.resnet50d, num_classes, input_size=640)\n    elif model_name == 'rtdetr_r101vd':\n        config = RTDETRConfig(RTDETRModelType.resnet101d, num_classes, input_size=640)\n    else:\n        raise ValueError\n    return config",
    "docstring": "Load model without pretrained weights. Args: model_name: 'rtdetr_r18vd', 'rtdetr_r34vd', 'rtdetr_r50vd_m', 'rtdetr_r50vd', 'rtdetr_r101vd'. num_classes: Number of classes to detect.",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\models\\rt_detr\\model.py",
    "ast_data": "FunctionDef name:from_name arg:model_name arg:num_classes arguments arg arg If Compare Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call Raise Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_verify_compatible_image_shapes",
    "source_code": "def _verify_compatible_image_shapes(img1, img2):\n    shape1 = img1.get_shape().with_rank_at_least(3)\n    shape2 = img2.get_shape().with_rank_at_least(3)\n    shape1[-3:].assert_is_compatible_with(shape2[-3:])\n    if shape1.ndims is not None and shape2.ndims is not None:\n        for dim1, dim2 in zip(reversed(shape1.dims[:-3]), reversed(shape2.dims[:-3])):\n            if not (dim1 == 1 or dim2 == 1 or dim1.is_compatible_with(dim2)):\n                raise ValueError('Two images are not compatible: %s and %s' % (shape1, shape2))\n    shape1, shape2 = array_ops.shape_n([img1, img2])\n    checks = []\n    checks.append(control_flow_assert.Assert(math_ops.greater_equal(array_ops.size(shape1), 3), [shape1, shape2], summarize=10))\n    checks.append(control_flow_assert.Assert(math_ops.reduce_all(math_ops.equal(shape1[-3:], shape2[-3:])), [shape1, shape2], summarize=10))\n    return (shape1, shape2, checks)",
    "docstring": "Checks if two image tensors are compatible for applying SSIM or PSNR. This function checks if two sets of images have ranks at least 3, and if the last three dimensions match. Args: img1: Tensor containing the first image batch. img2: Tensor containing the second image batch. Returns: A tuple containing: the first tensor shape, the second tensor shape, and a list of control_flow_ops.Assert() ops implementing the checks. Raises: ValueError: When static shape check fails.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:_verify_compatible_image_shapes arg:img1 arg:img2 arguments arg arg Assign Call Call Assign Call Call Call If BoolOp Compare Compare For Call Call Call If BoolOp Compare Compare Call Raise Call Assign Call Assign Call Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "scatter",
    "source_code": "@tf_should_use.should_use_result\ndef scatter(self, indices, value, name=None):\n    with ops.name_scope(name, 'TensorArrayScatter', [self._flow, value, indices]):\n        value = ops.convert_to_tensor(value, preferred_dtype=self._dtype, name='value')\n        _check_dtypes(value, self._dtype)\n        self._check_element_shape(value.shape[1:])\n        flow_out = list_ops.tensor_list_scatter(tensor=value, indices=indices, element_shape=self.element_shape, input_handle=self._flow)\n        return build_ta_with_new_flow(self, flow_out)",
    "docstring": "See TensorArray.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:scatter arg:self arg:indices arg:value arg:name arguments arg arg arg arg With Call Assign Call Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_tile_ragged_splits",
    "source_code": "def _tile_ragged_splits(rt_input, multiples, const_multiples=None):\n    ragged_rank = rt_input.ragged_rank\n    nested_splits = rt_input.nested_row_splits\n    projected_splits = [{i: nested_splits[i]} for i in range(ragged_rank)]\n    for src_axis in range(ragged_rank):\n        for dst_axis in range(src_axis + 1, ragged_rank - 1):\n            projected_splits[src_axis][dst_axis] = array_ops.gather(nested_splits[dst_axis], projected_splits[src_axis][dst_axis - 1])\n    result_splits = []\n    for axis in range(ragged_rank):\n        input_lengths = nested_splits[axis][1:] - nested_splits[axis][:-1]\n        output_lengths = input_lengths * multiples[axis + 1]\n        repeats = 1\n        for d in range(axis - 1, -1, -1):\n            if const_multiples is None or const_multiples[d + 1] != 1:\n                splits = projected_splits[d][axis - 1] * repeats\n                output_lengths = ragged_util.repeat_ranges(output_lengths, splits, multiples[d + 1])\n            repeats *= multiples[d + 1]\n        output_lengths = array_ops.tile(output_lengths, multiples[:1])\n        result_splits.append(ragged_util.lengths_to_splits(output_lengths))\n    return result_splits",
    "docstring": "Builds nested_split tensors for a tiled . Returns a list of split tensors that can be used to construct the that tiles as specified by . Args: rt_input: The that is being tiled. multiples: A 1-D integer , indicating how many times each dimension should be repeated. const_multiples: Optional constant value for multiples. Used to skip tiling dimensions where . Returns: A list of 1-D integer s (one for each ragged dimension in ). #### Example: >>> rt = tf.ragged.constant([[1, 2], [3]]) >>> _tile_ragged_splits(rt, [3, 2]) []",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_array_ops.py",
    "ast_data": "FunctionDef name:_tile_ragged_splits arg:rt_input arg:multiples arg:const_multiples arguments arg arg arg Assign Assign Assign Call For Call For Call Assign Call Assign For Call Assign Assign Assign For Call If BoolOp Compare Compare Assign Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "_generate_through_model_map",
    "source_code": "def _generate_through_model_map(self):\n    for app_label, model_name in sorted(self.old_model_keys):\n        old_model_name = self.renamed_models.get((app_label, model_name), model_name)\n        old_model_state = self.from_state.models[app_label, old_model_name]\n        for field_name, field in old_model_state.fields.items():\n            if hasattr(field, 'remote_field') and getattr(field.remote_field, 'through', None):\n                through_key = resolve_relation(field.remote_field.through, app_label, model_name)\n                self.through_users[through_key] = (app_label, old_model_name, field_name)",
    "docstring": "Through model map generation.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\autodetector.py",
    "ast_data": "FunctionDef name:_generate_through_model_map arg:self arguments arg For Call Assign Call Assign For Call If BoolOp Call Call Assign Call Assign"
  },
  {
    "library": "pytorch",
    "name": "tree_leaves_with_path",
    "source_code": "def tree_leaves_with_path(tree: PyTree, is_leaf: Optional[Callable[[PyTree], bool]]=None) -> list[tuple[KeyPath, Any]]:\n    raise NotImplementedError('KeyPaths are not yet supported in cxx_pytree.')",
    "docstring": "Gets the leaves of a pytree like `tree_flatten_with_path_fnregister_pytree_nodeTrue`, the whole subtree being treated as a leaf. Otherwise, the default pytree registry will be used to determine a node is a leaf or not. If the function is not specified, the default pytree registry will be used. Returns: A list of (key path, leaf) pairs.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_cxx_pytree.py",
    "ast_data": "FunctionDef name:tree_leaves_with_path arg:tree arg:is_leaf arguments arg arg Raise Call"
  },
  {
    "library": "scipy",
    "name": "vf_to_vv",
    "source_code": "def vf_to_vv(self, vertices, simplices):\n    if self.dim > 1:\n        for s in simplices:\n            edges = itertools.combinations(s, self.dim)\n            for e in edges:\n                self.V[tuple(vertices[e[0]])].connect(self.V[tuple(vertices[e[1]])])\n    else:\n        for e in simplices:\n            self.V[tuple(vertices[e[0]])].connect(self.V[tuple(vertices[e[1]])])\n    return",
    "docstring": "Convert a vertex-face mesh to a vertex-vertex mesh used by this class Parameters ---------- vertices : list Vertices simplices : list Simplices",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_shgo_lib\\_complex.py",
    "ast_data": "FunctionDef name:vf_to_vv arg:self arg:vertices arg:simplices arguments arg arg arg If Compare For Assign Call For Call Call Call For Call Call Call Return return:no"
  },
  {
    "library": "kornia",
    "name": "pad_to_length",
    "source_code": "def pad_to_length(x: Tensor, length: int) -> Tuple[Tensor, Tensor]:\n    if length <= x.shape[-2]:\n        return (x, ones_like(x[..., :1], dtype=torch.bool))\n    pad = ones(*x.shape[:-2], length - x.shape[-2], x.shape[-1], device=x.device, dtype=x.dtype)\n    y = concatenate([x, pad], dim=-2)\n    mask = zeros(*y.shape[:-1], 1, dtype=torch.bool, device=x.device)\n    mask[..., :x.shape[-2], :] = True\n    return (y, mask)",
    "docstring": "Pad tensor to desired length.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\lightglue.py",
    "ast_data": "FunctionDef name:pad_to_length arg:x arg:length arguments arg arg If Compare Return return:yes Call Assign Call Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "dispatch_for_types",
    "source_code": "def dispatch_for_types(op, *types):\n\n    def decorator(func):\n        _TypeBasedDispatcher(get_compatible_func(op, func), types).register(op)\n        return func\n    return decorator",
    "docstring": "Decorator to declare that a Python function overrides an op for a type. The decorated function is used to override if any of the arguments or keyword arguments (including elements of lists or tuples) have one of the specified types. Example: Args: op: Python function: the operation that should be overridden. *types: The argument types for which this function should be used.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\dispatch.py",
    "ast_data": "FunctionDef name:dispatch_for_types arg:op arguments arg arg FunctionDef name:decorator arg:func arguments arg Call Call Call Return return:yes Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_iter_test_masks",
    "source_code": "def _iter_test_masks(self, X=None, y=None, groups=None):\n    for test_index in self._iter_test_indices(X, y, groups):\n        test_mask = np.zeros(_num_samples(X), dtype=bool)\n        test_mask[test_index] = True\n        yield test_mask",
    "docstring": "Generates boolean masks corresponding to test sets. By default, delegates to _iter_test_indices(X, y, groups)",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py",
    "ast_data": "FunctionDef name:_iter_test_masks arg:self arg:X arg:y arg:groups arguments arg arg arg arg For Call Assign Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "set_server_def",
    "source_code": "def set_server_def(self, server_def, keep_alive_secs=_KEEP_ALIVE_SECS):\n    if not server_def:\n        raise ValueError('server_def is None.')\n    self._server_def = server_def\n    if self._context_handle:\n        server_def_str = server_def.SerializeToString()\n        pywrap_tfe.TFE_ContextSetServerDef(self._context_handle, keep_alive_secs, server_def_str)\n        self._initialize_logical_devices()\n    self._clear_caches()\n    _device_parsing_cache.clear()",
    "docstring": "Allow setting a server_def on the context. When a server def is replaced, it effectively clears a bunch of caches within the context. If you attempt to use a tensor object that was pointing to a tensor on the remote device, it will raise an error. Args: server_def: A tensorflow::ServerDef proto. Enables execution on remote devices. keep_alive_secs: Num. seconds after which the remote end will hang up. As long as the client is still alive, the server state for the context will be kept alive. If the client is killed (or there is some failure), the server will clean up its context keep_alive_secs after the final RPC it receives. Raises: ValueError: if server_def is None.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:set_server_def arg:self arg:server_def arg:keep_alive_secs arguments arg arg arg If Raise Call Assign If Assign Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "align_to",
    "source_code": "def align_to(self, *names):\n    if has_torch_function_unary(self):\n        return handle_torch_function(Tensor.align_to, (self,), self, *names)\n    ellipsis_idx = single_ellipsis_index(names, 'align_to')\n    if ellipsis_idx is None:\n        return super().align_to(names)\n    return super().align_to([name for name in names if not is_ellipsis(name)], ellipsis_idx)",
    "docstring": "Permutes the dimensions of the :attr: tensor to match the order specified in :attr:, adding size-one dims for any new names. All of the dims of :attr: must be named in order to use this method. The resulting tensor is a view on the original tensor. All dimension names of :attr: must be present in :attr:. :attr: may contain additional names that are not in `namesselfnamesselfself`. Examples:: >>> tensor = torch.randn(2, 2, 2, 2, 2, 2) >>> named_tensor = tensor.refine_names('A', 'B', 'C', 'D', 'E', 'F') # Move the F and E dims to the front while keeping the rest in order >>> named_tensor.align_to('F', 'E', ...) .. warning:: The named tensor API is experimental and subject to change.",
    "type": "method",
    "file_path": "pytorch\\torch\\_tensor.py",
    "ast_data": "FunctionDef name:align_to arg:self arguments arg arg If Call Return return:yes Call Assign Call If Compare Return return:yes Call Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "elu",
    "source_code": "@register_decomposition(aten.elu)\n@_inplace_wrapper\n@out_wrapper()\n@elementwise_type_promotion_wrapper(type_promoting_args=('a',), type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT)\ndef elu(a: TensorLikeType, alpha: NumberType=1.0, scale: NumberType=1.0, input_scale: NumberType=1.0, inplace: bool=False) -> TensorLikeType:\n    if inplace:\n        raise NotImplementedError\n    python_type = utils.dtype_to_type(a.dtype)\n    torch._check(utils.is_weakly_lesser_type(type(input_scale), python_type), lambda: f'input_scale argument of type {type(input_scale)} cannot be safely cast to type {python_type}!')\n    torch._check(utils.is_weakly_lesser_type(type(scale), python_type), lambda: f'scale argument of type {type(scale)} cannot be safely cast to type {python_type}!')\n    torch._check(utils.is_weakly_lesser_type(type(alpha), python_type), lambda: f'alpha argument of type {type(alpha)} cannot be safely cast to type {python_type}!')\n    return torch.where(a > 0, scale * a, alpha * scale * torch.expm1(a * input_scale))",
    "docstring": "Reference implementation of torch.nn.functional.elu",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\nn\\functional\\__init__.py",
    "ast_data": "FunctionDef name:elu arg:a arg:alpha arg:scale arg:input_scale arg:inplace arguments arg arg arg arg arg If Raise Assign Call Call Call Call arguments Call Call Call Call arguments Call Call Call Call arguments Call Return return:yes Call Compare Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_compute_covariance",
    "source_code": "def _compute_covariance(self):\n    self.factor = self.covariance_factor()\n    if not hasattr(self, '_data_cho_cov'):\n        self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1, bias=False, aweights=self.weights))\n        self._data_cho_cov = linalg.cholesky(self._data_covariance, lower=True)\n    self.covariance = self._data_covariance * self.factor ** 2\n    self.cho_cov = (self._data_cho_cov * self.factor).astype(np.float64)\n    self.log_det = 2 * np.log(np.diag(self.cho_cov * np.sqrt(2 * pi))).sum()",
    "docstring": "Computes the covariance matrix for each Gaussian kernel using covariance_factor().",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_kde.py",
    "ast_data": "FunctionDef name:_compute_covariance arg:self arguments arg Assign Call If Call Assign Call Call Assign Call Assign Assign Call Assign Call Call Call Call"
  },
  {
    "library": "authlib",
    "name": "normalize_base_string_uri",
    "source_code": "def normalize_base_string_uri(uri, host=None):\n    uri = to_unicode(uri)\n    scheme, netloc, path, params, query, fragment = urlparse.urlparse(uri)\n    if not scheme or not netloc:\n        raise ValueError('uri must include a scheme and netloc')\n    if not path:\n        path = '/'\n    scheme = scheme.lower()\n    netloc = netloc.lower()\n    if host is not None:\n        netloc = host.lower()\n    default_ports = (('http', '80'), ('https', '443'))\n    if ':' in netloc:\n        host, port = netloc.split(':', 1)\n        if (scheme, port) in default_ports:\n            netloc = host\n    return urlparse.urlunparse((scheme, netloc, path, params, '', ''))",
    "docstring": "Normalize Base String URI per _. For example, the HTTP request:: GET /r%20v/X?id=123 HTTP/1.1 Host: EXAMPLE.COM:80 is represented by the base string URI: \" In another example, the HTTPS request:: GET /?q=1 HTTP/1.1 Host: www.example.net:8080 is represented by the base string URI: \" .. _: The host argument overrides the netloc part of the uri argument.",
    "type": "function",
    "file_path": "authlib\\authlib\\oauth1\\rfc5849\\signature.py",
    "ast_data": "FunctionDef name:normalize_base_string_uri arg:uri arg:host arguments arg arg Assign Call Assign Call If BoolOp Raise Call If Assign Assign Call Assign Call If Compare Assign Call Assign If Compare Assign Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_validate_integer",
    "source_code": "def _validate_integer(self, key: int | np.integer, axis: AxisInt) -> None:\n    len_axis = len(self.obj._get_axis(axis))\n    if key >= len_axis or key < -len_axis:\n        raise IndexError('single positional indexer is out-of-bounds')",
    "docstring": "Check that 'key' is a valid position in the desired axis. Parameters ---------- key : int Requested position. axis : int Desired axis. Raises ------ IndexError If 'key' is not a valid position in axis 'axis'.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexing.py",
    "ast_data": "FunctionDef name:_validate_integer arg:self arg:key arg:axis arguments arg arg arg Assign Call Call If BoolOp Compare Compare Raise Call"
  },
  {
    "library": "scipy",
    "name": "logpdf",
    "source_code": "def logpdf(self, x, s2, mu=0, lmbda=1, a=1, b=1):\n    invalid, args = self._process_parameters_pdf(x, s2, mu, lmbda, a, b)\n    s2 = args[1]\n    with np.errstate(all='ignore'):\n        logpdf = np.asarray(self._logpdf(*args))\n    logpdf[s2 <= 0] = -np.inf\n    logpdf[invalid] = np.nan\n    return logpdf[()]",
    "docstring": "Log of the probability density function. Parameters ---------- x, s2 : array_like Arguments. must be greater than zero. mu, lmbda, a, b : array_like, optional Shape parameters. , , and must be greater than zero. Returns ------- logpdf : ndarray or scalar Log of the probability density function.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:logpdf arg:self arg:x arg:s2 arg:mu arg:lmbda arg:a arg:b arguments arg arg arg arg arg arg arg Assign Call Assign With Call Assign Call Call Assign Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_py_list_new",
    "source_code": "def _py_list_new(elements):\n    return list(elements)",
    "docstring": "Overload of new_list that creates a Python list.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\data_structures.py",
    "ast_data": "FunctionDef name:_py_list_new arg:elements arguments arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "compute_sha256",
    "source_code": "def compute_sha256(idirs):\n    return _compute_hash(idirs, hashlib.sha256)",
    "docstring": "Compute sha256 hash of files in idirs. Parameters ---------- idirs : directory path Directory containing files to be hashed.",
    "type": "function",
    "file_path": "numpy\\pavement.py",
    "ast_data": "FunctionDef name:compute_sha256 arg:idirs arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "mesh",
    "source_code": "@property\ndef mesh(self):\n    return self._mesh",
    "docstring": "Returns the mesh used by the strategy.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\experimental\\multi_worker_mirrored_strategy.py",
    "ast_data": "FunctionDef name:mesh arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "get_geometric_only_param",
    "source_code": "def get_geometric_only_param(module: 'K.container.ImageSequentialBase', param: List[ParamItem]) -> List[ParamItem]:\n    named_modules = module.get_forward_sequence(param)\n    res: List[ParamItem] = []\n    for (_, mod), p in zip(named_modules, param):\n        if isinstance(mod, (K.GeometricAugmentationBase2D, K.GeometricAugmentationBase3D)):\n            res.append(p)\n    return res",
    "docstring": "Return geometry param.",
    "type": "function",
    "file_path": "kornia\\kornia\\augmentation\\container\\ops.py",
    "ast_data": "FunctionDef name:get_geometric_only_param arg:module arg:param arguments arg arg Assign Call For Call If Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_linewidth",
    "source_code": "def get_linewidth(self):\n    return self._linewidth",
    "docstring": "Return the line width in points.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:get_linewidth arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_filename",
    "source_code": "def get_filename() -> str:\n    return torch._C._cuda_tunableop_get_filename()",
    "docstring": "Get the results filename.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\tunable.py",
    "ast_data": "FunctionDef name:get_filename arguments Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_initial_alias",
    "source_code": "def get_initial_alias(self):\n    if self.alias_map:\n        alias = self.base_table\n        self.ref_alias(alias)\n    elif self.model:\n        alias = self.join(self.base_table_class(self.get_meta().db_table, None))\n    else:\n        alias = None\n    return alias",
    "docstring": "Return the first alias for this query, after increasing its reference count.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\query.py",
    "ast_data": "FunctionDef name:get_initial_alias arg:self arguments arg If Assign Call If Assign Call Call Call Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "rvs",
    "source_code": "def rvs(self, n, p, size=None, random_state=None):\n    n, p, npcond = self._process_parameters(n, p)\n    random_state = self._get_random_state(random_state)\n    return random_state.multinomial(n, p, size)",
    "docstring": "Draw random samples from a Multinomial distribution. Parameters ---------- %(_doc_default_callparams)s size : integer or iterable of integers, optional Number of samples to draw (default 1). %(_doc_random_state)s Returns ------- rvs : ndarray or scalar Random variates of shape (, ) Notes ----- %(_doc_callparams_note)s",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:rvs arg:self arg:n arg:p arg:size arg:random_state arguments arg arg arg arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "insert_many",
    "source_code": "def insert_many(self, component_index, keys, values, name=None):\n    if name is None:\n        name = '%s_BarrierInsertMany' % self._name\n    return gen_data_flow_ops.barrier_insert_many(self._barrier_ref, keys, values, component_index, name=name)",
    "docstring": "For each key, assigns the respective value to the specified component. This operation updates each element at component_index. Args: component_index: The component of the value that is being assigned. keys: A vector of keys, with length n. values: An any-dimensional tensor of values, which are associated with the respective keys. The first dimension must have length n. name: Optional name for the op. Returns: The operation that performs the insertion. Raises: InvalidArgumentsError: If inserting keys and values without elements.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:insert_many arg:self arg:component_index arg:keys arg:values arg:name arguments arg arg arg arg arg If Compare Assign Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_collect_probas",
    "source_code": "def _collect_probas(self, X):\n    return np.asarray([clf.predict_proba(X) for clf in self.estimators_])",
    "docstring": "Collect results from clf.predict calls.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_voting.py",
    "ast_data": "FunctionDef name:_collect_probas arg:self arg:X arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "only",
    "source_code": "class only(nodes.Element):\n    pass",
    "docstring": "Node for \"only\" directives (conditional inclusion based on tags).",
    "type": "class",
    "file_path": "sphinx\\sphinx\\addnodes.py",
    "ast_data": "ClassDef name:only"
  },
  {
    "library": "numpy",
    "name": "check_gcc_version_at_least",
    "source_code": "def check_gcc_version_at_least(self, major, minor=0, patchlevel=0):\n    return check_gcc_version_at_least(self, major, minor, patchlevel)",
    "docstring": "Return True if the GCC version is greater than or equal to the specified version.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\command\\config.py",
    "ast_data": "FunctionDef name:check_gcc_version_at_least arg:self arg:major arg:minor arg:patchlevel arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "ascent",
    "source_code": "def ascent():\n    import pickle\n    fname = fetch_data('ascent.dat')\n    with open(fname, 'rb') as f:\n        ascent = array(pickle.load(f))\n    return ascent",
    "docstring": "Get an 8-bit grayscale bit-depth, 512 x 512 derived image for easy use in demos. The image is derived from Parameters ---------- None Returns ------- ascent : ndarray convenient image to use for testing and demonstration Examples -------- >>> import scipy.datasets >>> ascent = scipy.datasets.ascent() >>> ascent.shape (512, 512) >>> ascent.max() np.uint8(255) >>> import matplotlib.pyplot as plt >>> plt.gray() >>> plt.imshow(ascent) >>> plt.show()",
    "type": "function",
    "file_path": "scipy\\scipy\\datasets\\_fetchers.py",
    "ast_data": "FunctionDef name:ascent arguments Assign Call With Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_check_op",
    "source_code": "def _check_op(op) -> None:\n    if op not in [isend, irecv]:\n        raise ValueError('Invalid ``op``. Expected ``op`` to be of type ``torch.distributed.isend`` or ``torch.distributed.irecv``.')",
    "docstring": "Check that the `` is either isend or irecv.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:_check_op arg:op arguments arg If Compare Raise Call"
  },
  {
    "library": "pandas",
    "name": "is_monotonic_increasing",
    "source_code": "@property\ndef is_monotonic_increasing(self) -> bool:\n    return self._engine.is_monotonic_increasing",
    "docstring": "Return a boolean if the values are equal or increasing. Returns ------- bool See Also -------- Index.is_monotonic_decreasing : Check if the values are equal or decreasing. Examples -------- >>> pd.Index([1, 2, 3]).is_monotonic_increasing True >>> pd.Index([1, 2, 2]).is_monotonic_increasing True >>> pd.Index([1, 3, 2]).is_monotonic_increasing False",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:is_monotonic_increasing arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "set_children_bounds",
    "source_code": "def set_children_bounds(self, lower, upper):\n    self.children_lower_bound = lower\n    self.children_upper_bound = upper",
    "docstring": "Set children values bounds to respect monotonic constraints.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\grower.py",
    "ast_data": "FunctionDef name:set_children_bounds arg:self arg:lower arg:upper arguments arg arg arg Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_get_gradient_function",
    "source_code": "def _get_gradient_function(self):\n    return self._delayed_rewrite_functions._rewrite_forward_and_call_backward",
    "docstring": "Returns gradient function. It will be lazily created at first call.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py",
    "ast_data": "FunctionDef name:_get_gradient_function arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, widthA=1.0, lengthA=0.2, angleA=0):\n    super().__init__(widthA=widthA, lengthA=lengthA, angleA=angleA)",
    "docstring": "Parameters ---------- widthA : float, default: 1.0 Width of the bracket. lengthA : float, default: 0.2 Length of the bracket. angleA : float, default: 0 degrees Orientation of the bracket, as a counterclockwise angle. 0 degrees means perpendicular to the line.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:widthA arg:lengthA arg:angleA arguments arg arg arg arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "linetable_311_writer",
    "source_code": "def linetable_311_writer(first_lineno: int):\n    assert sys.version_info >= (3, 11)\n    linetable = []\n    lineno = first_lineno\n\n    def update(positions: 'dis.Positions', inst_size):\n        nonlocal lineno\n        lineno_new = positions.lineno if positions else None\n\n        def _update(delta, size):\n            assert 0 < size <= 8\n            other_varints: tuple[int, ...] = ()\n            if positions and positions.lineno is not None and (positions.end_lineno is not None) and (positions.col_offset is not None) and (positions.end_col_offset is not None):\n                linetable.append(240 + size - 1)\n                other_varints = (positions.end_lineno - positions.lineno, positions.col_offset + 1, positions.end_col_offset + 1)\n            else:\n                linetable.append(232 + size - 1)\n            if delta < 0:\n                delta = -delta << 1 | 1\n            else:\n                delta <<= 1\n            linetable.extend(encode_varint(delta))\n            for n in other_varints:\n                linetable.extend(encode_varint(n))\n        if lineno_new is None:\n            lineno_delta = 0\n        else:\n            lineno_delta = lineno_new - lineno\n            lineno = lineno_new\n        while inst_size > 8:\n            _update(lineno_delta, 8)\n            inst_size -= 8\n        _update(lineno_delta, inst_size)\n    return (linetable, update)",
    "docstring": "Used to create typing.CodeType.co_linetable See This is the internal format of the line number table for Python 3.11",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\bytecode_transformation.py",
    "ast_data": "FunctionDef name:linetable_311_writer arg:first_lineno arguments arg Compare Assign Assign FunctionDef name:update arg:positions arg:inst_size arguments arg arg Assign FunctionDef name:_update arg:delta arg:size arguments arg arg Compare If BoolOp Compare Compare Compare Compare Call Assign Call If Compare Assign Call Call For Call Call If Compare Assign Assign Assign While Compare Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "set_params",
    "source_code": "def set_params(self, **params):\n    if not params:\n        return self\n    valid_params = self.get_params(deep=True)\n    for key, value in params.items():\n        split = key.split('__', 1)\n        if len(split) > 1:\n            name, sub_name = split\n            if name not in valid_params:\n                raise ValueError('Invalid parameter %s for kernel %s. Check the list of available parameters with `kernel.get_params().keys()`.' % (name, self))\n            sub_object = valid_params[name]\n            sub_object.set_params(**{sub_name: value})\n        else:\n            if key not in valid_params:\n                raise ValueError('Invalid parameter %s for kernel %s. Check the list of available parameters with `kernel.get_params().keys()`.' % (key, self.__class__.__name__))\n            setattr(self, key, value)\n    return self",
    "docstring": "Set the parameters of this kernel. The method works on simple kernels as well as on nested kernels. The latter have parameters of the form `` so that it's possible to update each component of a nested object. Returns ------- self",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:set_params arg:self arguments arg arg If Return return:yes Assign Call For Call Assign Call If Compare Call Assign If Compare Raise Call Assign Call If Compare Raise Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "transform_dict_like",
    "source_code": "def transform_dict_like(self, func) -> DataFrame:\n    from pandas.core.reshape.concat import concat\n    obj = self.obj\n    args = self.args\n    kwargs = self.kwargs\n    assert isinstance(obj, ABCNDFrame)\n    if len(func) == 0:\n        raise ValueError('No transform functions were provided')\n    func = self.normalize_dictlike_arg('transform', obj, func)\n    results: dict[Hashable, DataFrame | Series] = {}\n    for name, how in func.items():\n        colg = obj._gotitem(name, ndim=1)\n        results[name] = colg.transform(how, 0, *args, **kwargs)\n    return concat(results, axis=1)",
    "docstring": "Compute transform in the case of a dict-like func",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\apply.py",
    "ast_data": "FunctionDef name:transform_dict_like arg:self arg:func arguments arg arg Assign Assign Assign Call If Compare Call Raise Call Assign Call For Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_feature_names_out",
    "source_code": "def get_feature_names_out(self, input_features=None):\n    check_is_fitted(self, '_n_features_out')\n    _check_feature_names_in(self, input_features=input_features, generate_names=False)\n    feature_names = [f'randomtreesembedding_{tree}_{leaf}' for tree in range(self.n_estimators) for leaf in self.one_hot_encoder_.categories_[tree]]\n    return np.asarray(feature_names, dtype=object)",
    "docstring": "Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Only used to validate feature names with the names seen in :meth:. Returns ------- feature_names_out : ndarray of str objects Transformed feature names, in the format of , where is the tree used to generate the leaf and is the index of a leaf node in that tree. Note that the node indexing scheme is used to index both nodes with children (split nodes) and leaf nodes. Only the latter can be present as output features. As a consequence, there are missing indices in the output feature names.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_forest.py",
    "ast_data": "FunctionDef name:get_feature_names_out arg:self arg:input_features arguments arg arg Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "tree_map_with_path",
    "source_code": "def tree_map_with_path(func: Callable[..., Any], tree: PyTree, *rests: PyTree, is_leaf: Optional[Callable[[PyTree], bool]]=None) -> PyTree:\n    keypath_leaves, treespec = tree_flatten_with_path(tree, is_leaf)\n    keypath_leaves = list(zip(*keypath_leaves))\n    all_keypath_leaves = keypath_leaves + [treespec.flatten_up_to(r) for r in rests]\n    return treespec.unflatten((func(*xs) for xs in zip(*all_keypath_leaves)))",
    "docstring": "Like :func:, but the provided callable takes an additional key path argument. Args: func: A function that takes `True`.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_pytree.py",
    "ast_data": "FunctionDef name:tree_map_with_path arg:func arg:tree arguments arg arg arg arg Assign Call Assign Call Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "must_record_gradient",
    "source_code": "def must_record_gradient():\n    return False",
    "docstring": "Import backprop if you want gradients recorded.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\execute.py",
    "ast_data": "FunctionDef name:must_record_gradient arguments Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "_eval_univariate",
    "source_code": "def _eval_univariate(self, x, weights):\n    sorter = x.argsort()\n    x = x[sorter]\n    weights = weights[sorter]\n    y = weights.cumsum()\n    if self.stat in ['percent', 'proportion']:\n        y = y / y.max()\n    if self.stat == 'percent':\n        y = y * 100\n    x = np.r_[-np.inf, x]\n    y = np.r_[0, y]\n    if self.complementary:\n        y = y.max() - y\n    return (y, x)",
    "docstring": "Inner function for ECDF of one variable.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_statistics.py",
    "ast_data": "FunctionDef name:_eval_univariate arg:self arg:x arg:weights arguments arg arg arg Assign Call Assign Assign Assign Call If Compare Assign Call If Compare Assign Assign Assign If Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "LapackNotFoundError",
    "source_code": "class LapackNotFoundError(NotFoundError):\n    pass",
    "docstring": "Lapack ( libraries not found. Directories to search for the libraries can be specified in the numpy/distutils/site.cfg file (section [lapack]) or by setting the LAPACK environment variable.",
    "type": "class",
    "file_path": "numpy\\numpy\\distutils\\system_info.py",
    "ast_data": "ClassDef name:LapackNotFoundError"
  },
  {
    "library": "matplotlib",
    "name": "set_linestyle",
    "source_code": "def set_linestyle(self, ls):\n    self._us_linestyles = mlines._get_dash_patterns(ls)\n    self._linewidths, self._linestyles = self._bcast_lwls(self._us_lw, self._us_linestyles)",
    "docstring": "Set the linestyle(s) for the collection. =========================== ================= linestyle description =========================== ================= `.Line2D.set_linestyle` for a complete description.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:set_linestyle arg:self arg:ls arguments arg arg Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "assert_compatible_matrix_dimensions",
    "source_code": "def assert_compatible_matrix_dimensions(operator, x):\n    assert_same_dd = check_ops.assert_equal(array_ops.shape(x)[-2], operator.domain_dimension_tensor(), message='Dimensions are not compatible.  shape[-2] of argument to be the same as this operator')\n    return assert_same_dd",
    "docstring": "Assert that an argument to solve/matmul has proper domain dimension. If , and , then is defined only if . This returns an that \"fires\" if this is not the case. Static checks are already done by the base class . Args: operator: . x: . Returns: .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_util.py",
    "ast_data": "FunctionDef name:assert_compatible_matrix_dimensions arg:operator arg:x arguments arg arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_f_dir_from_t",
    "source_code": "@staticmethod\ndef _f_dir_from_t(t_direction):\n    if t_direction == 'x':\n        return 'y'\n    elif t_direction == 'y':\n        return 'x'\n    else:\n        msg = f\"t_direction must be 'x' or 'y', got {t_direction!r}\"\n        raise ValueError(msg)",
    "docstring": "The direction that is other than .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:_f_dir_from_t arg:t_direction arguments arg If Compare Return return:yes If Compare Return return:yes Assign Raise Call"
  },
  {
    "library": "scrapy",
    "name": "StrictOriginWhenCrossOriginPolicy",
    "source_code": "class StrictOriginWhenCrossOriginPolicy(ReferrerPolicy):\n    name: str = POLICY_STRICT_ORIGIN_WHEN_CROSS_ORIGIN\n\n    def referrer(self, response_url: str, request_url: str) -> str | None:\n        origin = self.origin(response_url)\n        if origin == self.origin(request_url):\n            return self.stripped_referrer(response_url)\n        if self.tls_protected(response_url) and self.potentially_trustworthy(request_url) or not self.tls_protected(response_url):\n            return self.origin_referrer(response_url)\n        return None",
    "docstring": "The \"strict-origin-when-cross-origin\" policy specifies that a full URL, stripped for use as a referrer, is sent as referrer information when making same-origin requests from a particular request client, and only the ASCII serialization of the origin of the request client when making cross-origin requests: - from a TLS-protected environment settings object to a potentially trustworthy URL, and - from non-TLS-protected environment settings objects to any origin. Requests from TLS-protected clients to non- potentially trustworthy URLs, on the other hand, will contain no referrer information. A Referer HTTP header will not be sent.",
    "type": "class",
    "file_path": "scrapy\\scrapy\\spidermiddlewares\\referer.py",
    "ast_data": "ClassDef name:StrictOriginWhenCrossOriginPolicy FunctionDef name:referrer arg:self arg:response_url arg:request_url arguments arg arg arg Assign Call If Compare Call Return return:yes Call If BoolOp BoolOp Call Call Call Return return:yes Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "_save_checkpoint",
    "source_code": "def _save_checkpoint(self, *args, **kwargs):\n    distribute_lib.distribution_strategy_input_api_counter.get_cell(self._platform_device.name, 'PreemptionCheckpointHandler Saving Checkpoint').increase_by(1)\n    logging.info('PreemptionCheckpointHandler: Starting saving a checkpoint.')\n    if self._platform_device != failure_handling_util.PlatformDevice.INTERNAL_TPU:\n        self._checkpointed_runs.assign(self.total_run_calls)\n    start_time = time.monotonic()\n    with checkpoint_context.preemption_save_context():\n        if self._save_fn:\n            self._save_fn(*args, **kwargs)\n        else:\n            self._write_checkpoint_manager.save(*args, **kwargs)\n    end_time = time.monotonic()\n    logging.info('Checkpoint finished at path %s', self._write_checkpoint_manager.directory)\n    self._checkpoint_time = end_time - start_time",
    "docstring": "Saves the checkpoint and exit program.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\failure_handling\\failure_handling.py",
    "ast_data": "FunctionDef name:_save_checkpoint arg:self arguments arg arg arg Call Call Call If Compare Call Assign Call With Call If Call Call Assign Call Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "hatch",
    "source_code": "@staticmethod\n@lru_cache(8)\ndef hatch(hatchpattern, density=6):\n    from matplotlib.hatch import get_path\n    return get_path(hatchpattern, density) if hatchpattern is not None else None",
    "docstring": "Given a hatch specifier, *hatchpattern*, generates a that can be used in a repeated hatching pattern. *density* is the number of lines per unit square.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\path.py",
    "ast_data": "FunctionDef name:hatch arg:hatchpattern arg:density arguments arg arg Return return:yes Compare Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_feature_names_out",
    "source_code": "def get_feature_names_out(self, input_features=None):\n    check_is_fitted(self, 'n_features_in_')\n    input_features = _check_feature_names_in(self, input_features, generate_names=self.passthrough)\n    class_name = self.__class__.__name__.lower()\n    non_dropped_estimators = (name for name, est in self.estimators if est != 'drop')\n    meta_names = []\n    for est, n_features_out in zip(non_dropped_estimators, self._n_feature_outs):\n        if n_features_out == 1:\n            meta_names.append(f'{class_name}_{est}')\n        else:\n            meta_names.extend((f'{class_name}_{est}{i}' for i in range(n_features_out)))\n    if self.passthrough:\n        return np.concatenate((meta_names, input_features))\n    return np.asarray(meta_names, dtype=object)",
    "docstring": "Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Input features. The input feature names are only used when is . - If is , then is used as feature names in. If is not defined, then names are generated: . - If is an array-like, then must match if is defined. If is , then only the names of are used to generate the output feature names. Returns ------- feature_names_out : ndarray of str objects Transformed feature names.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_stacking.py",
    "ast_data": "FunctionDef name:get_feature_names_out arg:self arg:input_features arguments arg arg Call Assign Call Assign Call Assign Compare Assign For Call If Compare Call Call Call If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_construct_forward_backward",
    "source_code": "def _construct_forward_backward(self, num_doutputs):\n    trainable_outputs = [output for output in self._func_graph.outputs[:num_doutputs] if backprop_util.IsTrainable(output)]\n    signature = []\n    for t in trainable_outputs:\n        signature.append(tensor_lib.TensorSpec(*default_gradient.shape_and_dtype(t)))\n\n    def _backprop_function(*grad_ys):\n        with ops.device(None):\n            return gradients_util._GradientsHelper(trainable_outputs, self._func_graph.inputs, grad_ys=grad_ys, src_graph=self._func_graph)\n    with self._func_graph.as_default():\n        backwards_graph = func_graph_module.FuncGraph(_backward_name(self._func_graph.name))\n        func_graph_module.func_graph_from_py_func(name=backwards_graph.name, python_func=_backprop_function, args=[], kwargs={}, signature=signature, func_graph=backwards_graph)\n        backwards_graph_captures = backwards_graph.external_captures\n        captures_from_forward = [c for c in backwards_graph_captures if not isinstance(c, ops.EagerTensor) and c.graph is self._func_graph]\n        existing_outputs = object_identity.ObjectIdentitySet(self._func_graph.outputs)\n        for capture in captures_from_forward:\n            if capture not in existing_outputs:\n                existing_outputs.add(capture)\n                self._func_graph.outputs.append(capture)\n        forward_function, backward_function = _create_forward_backward_with_graph(self._attrs, self._func_graph, backwards_graph)\n        return (forward_function, backward_function)",
    "docstring": "Constructs a pair of forward and backward functions. Args: num_doutputs: The constructed backprop function will take output gradients for the first outputs of the forward function. Defaults to the number of outputs for the inference function, but when higher-order gradients are computed this will increase to include side outputs. Returns: A pair of (forward_function, backward_function): forward_function: A re-generated inference function (an AtomicFunction) to account for new side outputs, if any extra were required when building the backward pass. backward_function: A ConcreteFunction that Takes arguments and returns gradients with respect to inputs of the forward function.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py",
    "ast_data": "FunctionDef name:_construct_forward_backward arg:self arg:num_doutputs arguments arg arg Assign Call Assign For Call Call Call FunctionDef name:_backprop_function arguments arg With Call Return return:yes Call With Call Assign Call Call Call Assign Assign BoolOp Call Compare Assign Call For If Compare Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "stack",
    "source_code": "def stack(self, name=None):\n    return self._implementation.stack(name=name)",
    "docstring": "Return the values in the TensorArray as a stacked . All of the values must have been written and their shapes must all match. If input shapes have rank-, then output shape will have rank-. For example: >>> ta = tf.TensorArray(tf.int32, size=3) >>> ta = ta.write(0, tf.constant([1, 2])) >>> ta = ta.write(1, tf.constant([3, 4])) >>> ta = ta.write(2, tf.constant([5, 6])) >>> ta.stack() Args: name: A name for the operation (optional). Returns: All the tensors in the TensorArray stacked into one tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:stack arg:self arg:name arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_process_stmt",
    "source_code": "def _process_stmt(self, node: ast.stmt) -> None:\n    if isinstance(node, (ast.ClassDef, ast.FunctionDef)):\n        self._process_def(node)\n    elif isinstance(node, ast.Assign):\n        self._process_assign(node)\n    elif isinstance(node, ast.Expr):\n        self._process_expr(node)\n    else:\n        self.visit(node)",
    "docstring": "Process top-level statement for exported apis.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\api\\generator2\\extractor\\extractor.py",
    "ast_data": "FunctionDef name:_process_stmt arg:self arg:node arguments arg arg If Call Call If Call Call If Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_shard_counts",
    "source_code": "def _shard_counts(layout: layout_lib.Layout, batch_dim: Optional[str]=None) -> List[int]:\n    shard_counts = []\n    for spec in layout.sharding_specs:\n        if spec in (batch_dim, layout_lib.UNSHARDED):\n            shard_counts.append(1)\n        else:\n            shard_counts.append(layout.mesh.dim_size(spec))\n    return shard_counts",
    "docstring": "Computes a list of the number of shards in each dimension of the layout. The shard counts are used to slice each dataset element. The batch dimension's count is overridden to 1 since we only consider how many shards to make locally (within each local replica). Sharding across clients is handled by either tf.data.Dataset's shard transformation (in the single-client case) or tf.data service's distribute function (in the multi-client case). Args: layout: the layout to compute the shard counts for. batch_dim: the name of the batch dimension of the layout, if present. Returns: A list of shard counts, one element per dimension of the layout.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\input_util.py",
    "ast_data": "FunctionDef name:_shard_counts arg:layout arg:batch_dim arguments arg arg Assign For If Compare Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "truncate",
    "source_code": "def truncate(self) -> None:\n    if self.is_truncated_horizontally:\n        self._truncate_horizontally()\n    if self.is_truncated_vertically:\n        self._truncate_vertically()",
    "docstring": "Check whether the frame should be truncated. If so, slice the frame up.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\format.py",
    "ast_data": "FunctionDef name:truncate arg:self arguments arg If Call If Call"
  },
  {
    "library": "authlib",
    "name": "get_authorization_code_challenge",
    "source_code": "def get_authorization_code_challenge(self, authorization_code):\n    return authorization_code.code_challenge",
    "docstring": "Get \"code_challenge\" associated with this authorization code. Developers MAY re-implement it in subclass, the default logic:: def get_authorization_code_challenge(self, authorization_code): return authorization_code.code_challenge :param authorization_code: the instance of authorization_code",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc7636\\challenge.py",
    "ast_data": "FunctionDef name:get_authorization_code_challenge arg:self arg:authorization_code arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_reverse_seq",
    "source_code": "def _reverse_seq(input_seq, lengths):\n    if lengths is None:\n        return list(reversed(input_seq))\n    flat_input_seq = tuple((nest.flatten(input_) for input_ in input_seq))\n    flat_results = [[] for _ in range(len(input_seq))]\n    for sequence in zip(*flat_input_seq):\n        input_shape = tensor_shape.unknown_shape(rank=sequence[0].get_shape().rank)\n        for input_ in sequence:\n            input_shape.assert_is_compatible_with(input_.get_shape())\n            input_.set_shape(input_shape)\n        s_joined = array_ops_stack.stack(sequence)\n        s_reversed = array_ops.reverse_sequence(s_joined, lengths, 0, 1)\n        result = array_ops_stack.unstack(s_reversed)\n        for r, flat_result in zip(result, flat_results):\n            r.set_shape(input_shape)\n            flat_result.append(r)\n    results = [nest.pack_sequence_as(structure=input_, flat_sequence=flat_result) for input_, flat_result in zip(input_seq, flat_results)]\n    return results",
    "docstring": "Reverse a list of Tensors up to specified lengths. Args: input_seq: Sequence of seq_len tensors of dimension (batch_size, n_features) or nested tuples of tensors. lengths: A of dimension batch_size, containing lengths for each sequence in the batch. If \"None\" is specified, simply reverses the list. Returns: time-reversed sequence",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\rnn.py",
    "ast_data": "FunctionDef name:_reverse_seq arg:input_seq arg:lengths arguments arg arg If Compare Return return:yes Call Call Assign Call Call Assign Call Call For Call Assign Call Call For Call Call Call Assign Call Assign Call Assign Call For Call Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "init",
    "source_code": "def init(self):\n    return self._init_op_fn(self._resource)",
    "docstring": "See .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "FunctionDef name:init arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_determine_and_instrument_traced_tensors",
    "source_code": "def _determine_and_instrument_traced_tensors(self, graph_order, ops_in_exec_path, tensor_trace_points, report_handler):\n    traced_tensors = []\n    checkpoint_operations = set([tensor.op for tensor, _ in tensor_trace_points])\n    for op_id, op in enumerate(graph_order.operations):\n        if checkpoint_operations and op not in checkpoint_operations:\n            continue\n        if self._skip_op(op_id, op, ops_in_exec_path, report_handler):\n            continue\n        for i in range(len(op.outputs)):\n            out_tensor = op.outputs[i]\n            if not self._skip_tensor(op_id, out_tensor, report_handler):\n                traced_tensors.append(out_tensor)\n    return traced_tensors",
    "docstring": "Determines the tensors to trace and instruments the trace details. Args: graph_order: graph_order tuple containing graph (tf.graph), operations (list of operations), op_to_idx (op id mapping), (tensors) list of tensors, tensor_to_idx (tensor id mapping), contains_cycle (whether there is a cycle in the graph), topological_order_or_cycle (list of ops in topological order or list of ops creating a cycle). ops_in_exec_path: Set of ops in the execution path. tensor_trace_points: Collection of programatic tensor trace points. report_handler: An instance of tensor_tracer_report.TTReportHandle. Returns: List of tensors to be traced.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:_determine_and_instrument_traced_tensors arg:self arg:graph_order arg:ops_in_exec_path arg:tensor_trace_points arg:report_handler arguments arg arg arg arg arg Assign Assign Call For Call If BoolOp Compare If Call For Call Call Assign If Call Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "xpath",
    "source_code": "def xpath(self, *a: Any, **kw: Any) -> SelectorList:\n    raise NotSupported(\"Response content isn't text\")",
    "docstring": "Shortcut method implemented only by responses whose content is text (subclasses of TextResponse).",
    "type": "method",
    "file_path": "scrapy\\scrapy\\http\\response\\__init__.py",
    "ast_data": "FunctionDef name:xpath arg:self arguments arg arg arg Raise Call"
  },
  {
    "library": "numpy",
    "name": "MAxisConcatenator",
    "source_code": "class MAxisConcatenator(AxisConcatenator):\n    __slots__ = ()\n    concatenate = staticmethod(concatenate)\n\n    @classmethod\n    def makemat(cls, arr):\n        data = super().makemat(arr.data, copy=False)\n        return array(data, mask=arr.mask)\n\n    def __getitem__(self, key):\n        if isinstance(key, str):\n            raise MAError('Unavailable for masked array.')\n        return super().__getitem__(key)",
    "docstring": "Translate slice objects to concatenation along an axis. For documentation on usage, see . See Also -------- mr_class",
    "type": "class",
    "file_path": "numpy\\numpy\\ma\\extras.py",
    "ast_data": "ClassDef name:MAxisConcatenator Assign Assign Call FunctionDef name:makemat arg:cls arg:arr arguments arg arg Assign Call Call Return return:yes Call FunctionDef name:__getitem__ arg:self arg:key arguments arg arg If Call Raise Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "transform",
    "source_code": "def transform(self, srs, driver=None, name=None, resampling='NearestNeighbour', max_error=0.0):\n    algorithm = GDAL_RESAMPLE_ALGORITHMS[resampling]\n    if isinstance(srs, SpatialReference):\n        target_srs = srs\n    elif isinstance(srs, (int, str)):\n        target_srs = SpatialReference(srs)\n    else:\n        raise TypeError('Transform only accepts SpatialReference, string, and integer objects.')\n    if target_srs.srid == self.srid and (not driver or driver == self.driver.name):\n        return self.clone(name)\n    target = capi.auto_create_warped_vrt(self._ptr, self.srs.wkt.encode(), target_srs.wkt.encode(), algorithm, max_error, c_void_p())\n    target = GDALRaster(target)\n    data = {'srid': target_srs.srid, 'width': target.width, 'height': target.height, 'origin': [target.origin.x, target.origin.y], 'scale': [target.scale.x, target.scale.y], 'skew': [target.skew.x, target.skew.y]}\n    if driver:\n        data['driver'] = driver\n    if name:\n        data['name'] = name\n    return self.warp(data, resampling=resampling, max_error=max_error)",
    "docstring": "Return a copy of this raster reprojected into the given spatial reference system.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\raster\\source.py",
    "ast_data": "FunctionDef name:transform arg:self arg:srs arg:driver arg:name arg:resampling arg:max_error arguments arg arg arg arg arg arg Assign If Call Assign If Call Assign Call Raise Call If BoolOp Compare BoolOp Compare Return return:yes Call Assign Call Call Call Call Assign Call Assign If Assign If Assign Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "hsv_to_rgb",
    "source_code": "def hsv_to_rgb(image: torch.Tensor) -> torch.Tensor:\n    if not isinstance(image, torch.Tensor):\n        raise TypeError(f'Input type is not a torch.Tensor. Got {type(image)}')\n    if len(image.shape) < 3 or image.shape[-3] != 3:\n        raise ValueError(f'Input size must have a shape of (*, 3, H, W). Got {image.shape}')\n    h: torch.Tensor = image[..., 0, :, :] / (2 * math.pi)\n    s: torch.Tensor = image[..., 1, :, :]\n    v: torch.Tensor = image[..., 2, :, :]\n    hi: torch.Tensor = torch.floor(h * 6) % 6\n    f: torch.Tensor = h * 6 % 6 - hi\n    one: torch.Tensor = torch.tensor(1.0, device=image.device, dtype=image.dtype)\n    p: torch.Tensor = v * (one - s)\n    q: torch.Tensor = v * (one - f * s)\n    t: torch.Tensor = v * (one - (one - f) * s)\n    hi = hi.long()\n    indices: torch.Tensor = torch.stack([hi, hi + 6, hi + 12], dim=-3)\n    out = torch.stack((v, q, p, p, t, v, t, v, v, q, p, p, p, p, t, v, v, q), dim=-3)\n    out = torch.gather(out, -3, indices)\n    return out",
    "docstring": "Convert an image from HSV to RGB. The H channel values are assumed to be in the range 0..2pi. S and V are in the range 0..1. Args: image: HSV Image to be converted to HSV with shape of :math:. Returns: RGB version of the image with shape of :math:. Example: >>> input = torch.rand(2, 3, 4, 5) >>> output = hsv_to_rgb(input) # 2x3x4x5",
    "type": "function",
    "file_path": "kornia\\kornia\\color\\hsv.py",
    "ast_data": "FunctionDef name:hsv_to_rgb arg:image arguments arg If Call Raise Call Call If BoolOp Compare Call Compare Raise Call Call Call Assign Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "standalone_compile",
    "source_code": "def standalone_compile(gm: torch.fx.GraphModule, example_inputs: list[InputType], *, dynamic_shapes: Literal['from_example_inputs', 'from_tracing_context', 'from_graph']='from_graph', options: Optional[dict[str, Any]]=None) -> CompiledArtifact:\n    from .standalone_compile import standalone_compile\n    options = options if options else {}\n    return standalone_compile(gm, example_inputs, dynamic_shapes=dynamic_shapes, options=options)",
    "docstring": "Precompilation API for inductor. .. code-block:: python compiled_artifact = torch._inductor.standalone_compile(gm, args) compiled_artifact.save(path=path, format=\"binary\") # Later on a new process loaded = torch._inductor.CompiledArtifact.load(path=path, format=\"binary\") compiled_out = loaded(*args) Args: gm: Graph Module example_inputs: Inputs for the graph module dynamic_shapes: If \"from_graph\" (default), we will use the dynamic shapes in the passed-in graph module. If \"from_tracing_context\", we use the dynamic shape info in the ambient tracing context. If \"from_example_inputs\", we will specialize the graph on the example_inputs. options: Inductor compilation options Returns: CompiledArtifact that can be saved to disk or invoked directly.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\__init__.py",
    "ast_data": "FunctionDef name:standalone_compile arg:gm arg:example_inputs arguments arg arg arg arg Assign Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "format_percentiles",
    "source_code": "def format_percentiles(percentiles: np.ndarray | Sequence[float]) -> list[str]:\n    if len(percentiles) == 0:\n        return []\n    percentiles = np.asarray(percentiles)\n    if not is_numeric_dtype(percentiles) or not np.all(percentiles >= 0) or (not np.all(percentiles <= 1)):\n        raise ValueError('percentiles should all be in the interval [0,1]')\n    percentiles = 100 * percentiles\n    prec = get_precision(percentiles)\n    percentiles_round_type = percentiles.round(prec).astype(int)\n    int_idx = np.isclose(percentiles_round_type, percentiles)\n    if np.all(int_idx):\n        out = percentiles_round_type.astype(str)\n        return [i + '%' for i in out]\n    unique_pcts = np.unique(percentiles)\n    prec = get_precision(unique_pcts)\n    out = np.empty_like(percentiles, dtype=object)\n    out[int_idx] = percentiles[int_idx].round().astype(int).astype(str)\n    out[~int_idx] = percentiles[~int_idx].round(prec).astype(str)\n    return [i + '%' for i in out]",
    "docstring": "Outputs rounded and formatted percentiles. Parameters ---------- percentiles : list-like, containing floats from interval [0,1] Returns ------- formatted : list of strings Notes ----- Rounding precision is chosen so that: (1) if any two elements of `` differ, they remain different after rounding (2) no entry is *rounded* to 0% or 100%. Any non-integer is always rounded to at least 1 decimal place. Examples -------- Keeps all entries different after rounding: >>> format_percentiles([0.01999, 0.02001, 0.5, 0.666666, 0.9999]) ['1.999%', '2.001%', '50%', '66.667%', '99.99%'] No element is rounded to 0% or 100% (unless already equal to it). Duplicates are allowed: >>> format_percentiles([0, 0.5, 0.02001, 0.5, 0.666666, 0.9999]) ['0%', '50%', '2.0%', '50%', '66.67%', '99.99%']",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\formats\\format.py",
    "ast_data": "FunctionDef name:format_percentiles arg:percentiles arguments arg If Compare Call Return return:no Assign Call If BoolOp Call Call Compare Call Compare Raise Call Assign Assign Call Assign Call Call Assign Call If Call Assign Call Return return:yes Assign Call Assign Call Assign Call Assign Call Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "Problem08",
    "source_code": "class Problem08(Benchmark):\n\n    def __init__(self, dimensions=1):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = [(-10, 10)]\n        self.global_optimum = -7.083506\n        self.fglob = -14.508\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        x = x[0]\n        y = 0.0\n        for k in range(1, 6):\n            y += k * cos((k + 1) * x + k)\n        return -y",
    "docstring": "Univariate Problem08 objective function. This class defines the Univariate Problem08 global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Problem08}}(x) = - \\sum_{k=1}^6 k \\cos[(k+1)x+k] Bound constraints: :math: .. figure:: figures/Problem08.png :alt: Univariate Problem08 function :align: center **Univariate Problem08 function** *Global optimum*: :math: for :math:",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_univariate.py",
    "ast_data": "ClassDef name:Problem08 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Assign For Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "array_values",
    "source_code": "def array_values(self) -> ExtensionArray:\n    return self._block.array_values",
    "docstring": "The array that Series.array returns",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:array_values arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_compute_proba_from_decision",
    "source_code": "@staticmethod\ndef _compute_proba_from_decision(decision, n_classes):\n    if n_classes == 2:\n        decision = np.vstack([-decision, decision]).T / 2\n    else:\n        decision /= n_classes - 1\n    return softmax(decision, copy=False)",
    "docstring": "Compute probabilities from the decision function. This is based eq. (15) of [1] where: p(y=c|X) = exp((1 / K-1) f_c(X)) / sum_k(exp((1 / K-1) f_k(X))) = softmax((1 / K-1) * f(X)) References ---------- .. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, \"Multi-class AdaBoost\", 2009.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_weight_boosting.py",
    "ast_data": "FunctionDef name:_compute_proba_from_decision arg:decision arg:n_classes arguments arg arg If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "Softsign",
    "source_code": "class Softsign(Module):\n\n    def forward(self, input: Tensor) -> Tensor:\n        return F.softsign(input)",
    "docstring": "Applies the element-wise Softsign function. .. math:: \\text{SoftSign}(x) = \\frac{x}{ 1 + |x|} Shape: - Input: :math:, where :math: means any number of dimensions. - Output: :math:, same shape as the input. .. image:: ../scripts/activation_images/Softsign.png Examples:: >>> m = nn.Softsign() >>> input = torch.randn(2) >>> output = m(input)",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\activation.py",
    "ast_data": "ClassDef name:Softsign FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_convert_mask",
    "source_code": "def _convert_mask(self, states, sparse_coo=True):\n    states = copy.deepcopy(states)\n    for state in states.values():\n        if sparse_coo:\n            state['mask'] = state['mask'].to_sparse_coo()\n        else:\n            state['mask'] = state['mask'].to_dense()\n    return states",
    "docstring": "Converts the mask to sparse coo or dense tensors depending on the argument.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\data_sparsifier\\base_data_sparsifier.py",
    "ast_data": "FunctionDef name:_convert_mask arg:self arg:states arg:sparse_coo arguments arg arg arg Assign Call For Call If Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "DetectorBase",
    "source_code": "class DetectorBase(ABC):\n\n    def __init__(self) -> None:\n        super().__init__()\n        self.detector_config_info = None\n\n    @abstractmethod\n    def determine_observer_insert_points(self, model) -> dict:\n        pass\n\n    @abstractmethod\n    def get_detector_name(self) -> str:\n        pass\n\n    @abstractmethod\n    def get_qconfig_info(self, model) -> dict[str, DetectorQConfigInfo]:\n        pass\n\n    def _get_targeting_node(self, prepared_fx_model: GraphModule, target_fqn: str) -> torch.fx.node.Node:\n        for node in prepared_fx_model.graph.nodes:\n            if node.target == target_fqn:\n                return node\n        parent_fqn_sep_index = target_fqn.rfind('.')\n        if parent_fqn_sep_index == -1:\n            raise ValueError(\"passed in target_fqn not found in graph's targets.\")\n        else:\n            return self._get_targeting_node(prepared_fx_model, target_fqn[:parent_fqn_sep_index])\n\n    @abstractmethod\n    def generate_detector_report(self, model) -> tuple[str, dict[str, Any]]:\n        pass",
    "docstring": "Base Detector Module Any detector class should derive from this class. Concrete detectors should follow the same general API, which includes: - A method to calculate and return observer insertion points - Should return both the fqns and the Observer class to insert - A method to return a report based on the detector - Should return a str-based report and dict info in Tuple[str,Dict] format",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\detector.py",
    "ast_data": "ClassDef name:DetectorBase FunctionDef name:__init__ arg:self arguments arg Call Call Assign FunctionDef name:determine_observer_insert_points arg:self arg:model arguments arg arg FunctionDef name:get_detector_name arg:self arguments arg FunctionDef name:get_qconfig_info arg:self arg:model arguments arg arg FunctionDef name:_get_targeting_node arg:self arg:prepared_fx_model arg:target_fqn arguments arg arg arg For If Compare Return return:yes Assign Call If Compare Raise Call Return return:yes Call FunctionDef name:generate_detector_report arg:self arg:model arguments arg arg"
  },
  {
    "library": "kornia",
    "name": "_nullspace",
    "source_code": "def _nullspace(A: Tensor) -> Tuple[Tensor, Tensor]:\n    _, s, v = _torch_svd_cast(A)\n    return (s[..., -1], v[..., -1])",
    "docstring": "Compute the null space of A. Return the smallest singular value and the corresponding vector.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\epipolar\\projection.py",
    "ast_data": "FunctionDef name:_nullspace arg:A arguments arg Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "is_warn_always_enabled",
    "source_code": "def is_warn_always_enabled() -> builtins.bool:\n    return _C._get_warnAlways()",
    "docstring": "Returns True if the global warn_always flag is turned on. Refer to :func: documentation for more details.",
    "type": "function",
    "file_path": "pytorch\\torch\\__init__.py",
    "ast_data": "FunctionDef name:is_warn_always_enabled arguments Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "_deformable_attention_kernel",
    "source_code": "def _deformable_attention_kernel(value: Tensor, value_spatial_shapes: list[tuple[int, int]], sampling_locations: Tensor, attention_weights: Tensor) -> Tensor:\n    bs, _, n_head, c = value.shape\n    _, Len_q, _, n_levels, n_points, _ = sampling_locations.shape\n    split_shape: list[int] = [h * w for h, w in value_spatial_shapes]\n    value_list = value.split(split_shape, dim=1)\n    sampling_grids = 2 * sampling_locations - 1\n    sampling_value_list: list[Tensor] = []\n    for level, (h, w) in enumerate(value_spatial_shapes):\n        value_l_ = value_list[level].flatten(2).permute(0, 2, 1).reshape(bs * n_head, c, h, w)\n        sampling_grid_l_ = sampling_grids[:, :, :, level].permute(0, 2, 1, 3, 4).flatten(0, 1)\n        sampling_value_l_ = torch.nn.functional.grid_sample(value_l_, sampling_grid_l_, mode='bilinear', padding_mode='zeros', align_corners=False)\n        sampling_value_list.append(sampling_value_l_)\n    attention_weights = attention_weights.permute(0, 2, 1, 3, 4).reshape(bs * n_head, 1, Len_q, n_levels * n_points)\n    output = (torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights).sum(-1).reshape(bs, n_head * c, Len_q)\n    return output.permute(0, 2, 1)",
    "docstring": "Deformable Attention Kernel used in Deformable DETR. Described in Args: value: shape (N, Lv, n_head * C) value_spatial_shapes: [(H0, W0), (H1, W1), ...] sampling_locations: shape (N, Lq, n_head, n_levels, n_points, 2) attention_weights: shape (N, Lq, n_head, n_levels, n_points) Returns: output, shape (N, Lq, n_head * C)",
    "type": "function",
    "file_path": "kornia\\kornia\\contrib\\models\\rt_detr\\architecture\\rtdetr_head.py",
    "ast_data": "FunctionDef name:_deformable_attention_kernel arg:value arg:value_spatial_shapes arg:sampling_locations arg:attention_weights arguments arg arg arg arg Assign Assign Assign Call Assign For Call Assign Call Call Call Assign Call Call Assign Call Call Assign Call Call Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_gapcolor",
    "source_code": "def set_gapcolor(self, gapcolor):\n    self._original_gapcolor = gapcolor\n    self._set_gapcolor(gapcolor)",
    "docstring": "Set a color to fill the gaps in the dashed line style. .. note:: Striped lines are created by drawing two interleaved dashed lines. There can be overlaps between those two, which may result in artifacts when using transparency. This functionality is experimental and may change. Parameters ---------- gapcolor : :mpltype: or list of :mpltype: or None The color with which to fill the gaps. If None, the gaps are unfilled.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:set_gapcolor arg:self arg:gapcolor arguments arg arg Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_serialize_signature_def_map",
    "source_code": "def _serialize_signature_def_map(signature_def_map: _SignatureDefMap) -> dict[str, bytes]:\n    signature_def_map_serialized = {}\n    for key, signature_def in signature_def_map.items():\n        signature_def_map_serialized[key] = signature_def.SerializeToString()\n    return signature_def_map_serialized",
    "docstring": "Serializes SignatureDef values in . Args: signature_def_map: Signature key -> SignatureDef mapping. Returns: Signature def map where the values () are serialized.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\quantize_model.py",
    "ast_data": "FunctionDef name:_serialize_signature_def_map arg:signature_def_map arguments arg Assign For Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "__dlpack__",
    "source_code": "@abstractmethod\ndef __dlpack__(self):\n    raise NotImplementedError('__dlpack__')",
    "docstring": "Produce DLPack capsule (see array API standard). Raises: - TypeError : if the buffer contains unsupported dtypes. - NotImplementedError : if DLPack support is not implemented Useful to have to connect to array libraries. Support optional because it's not completely trivial to implement for a Python-only library.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\interchange\\dataframe_protocol.py",
    "ast_data": "FunctionDef name:__dlpack__ arg:self arguments arg Raise Call"
  },
  {
    "library": "scipy",
    "name": "aps14_f",
    "source_code": "def aps14_f(x, n):\n    if x <= 0:\n        return -n / 20.0\n    return n / 20.0 * (x / 1.5 + np.sin(x) - 1)",
    "docstring": "0 for negative x-values, trigonometric+linear for x positive",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_tstutils.py",
    "ast_data": "FunctionDef name:aps14_f arg:x arg:n arguments arg arg If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_has_same_id_matched_objs",
    "source_code": "def _has_same_id_matched_objs(frame: DynamoFrameType, cache_entry) -> bool:\n    if not cache_entry:\n        return False\n    for local_name, weakref_from_cache_entry in cache_entry.guard_manager.id_matched_objs.items():\n        if weakref_from_cache_entry() is not None:\n            weakref_from_frame = _get_weakref_from_f_locals(frame, local_name)\n            if weakref_from_frame is not weakref_from_cache_entry:\n                return False\n    return True",
    "docstring": "Checks if the ID_MATCH'd objects saved on cache_entry are same as the ones in frame.f_locals.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\cache_size.py",
    "ast_data": "FunctionDef name:_has_same_id_matched_objs arg:frame arg:cache_entry arguments arg arg If Return return:yes For Call If Compare Call Assign Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "authlib",
    "name": "parse_id_token",
    "source_code": "def parse_id_token(self, token, nonce, claims_options=None, claims_cls=None, leeway=120):\n    if 'id_token' not in token:\n        return None\n    load_key = self.create_load_key()\n    claims_params = dict(nonce=nonce, client_id=self.client_id)\n    if claims_cls is None:\n        if 'access_token' in token:\n            claims_params['access_token'] = token['access_token']\n            claims_cls = CodeIDToken\n        else:\n            claims_cls = ImplicitIDToken\n    metadata = self.load_server_metadata()\n    if claims_options is None and 'issuer' in metadata:\n        claims_options = {'iss': {'values': [metadata['issuer']]}}\n    alg_values = metadata.get('id_token_signing_alg_values_supported')\n    if alg_values:\n        _jwt = JsonWebToken(alg_values)\n    else:\n        _jwt = jwt\n    claims = _jwt.decode(token['id_token'], key=load_key, claims_cls=claims_cls, claims_options=claims_options, claims_params=claims_params)\n    if claims.get('nonce_supported') is False:\n        claims.params['nonce'] = None\n    claims.validate(leeway=leeway)\n    return UserInfo(claims)",
    "docstring": "Return an instance of UserInfo from token's ``.",
    "type": "method",
    "file_path": "authlib\\authlib\\integrations\\base_client\\sync_openid.py",
    "ast_data": "FunctionDef name:parse_id_token arg:self arg:token arg:nonce arg:claims_options arg:claims_cls arg:leeway arguments arg arg arg arg arg arg If Compare Return return:no Assign Call Assign Call If Compare If Compare Assign Assign Assign Assign Call If BoolOp Compare Compare Assign Assign Call If Assign Call Assign Assign Call If Compare Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_init_local_init_op",
    "source_code": "def _init_local_init_op(self, local_init_op=USE_DEFAULT):\n    if local_init_op is Supervisor.USE_DEFAULT:\n        local_init_op = self._get_first_op_from_collection(ops.GraphKeys.LOCAL_INIT_OP)\n        if local_init_op is None:\n            op_list = [variables.local_variables_initializer(), lookup_ops.tables_initializer()]\n            if op_list:\n                local_init_op = control_flow_ops.group(*op_list)\n                ops.add_to_collection(ops.GraphKeys.LOCAL_INIT_OP, local_init_op)\n    self._local_init_op = local_init_op",
    "docstring": "Initializes local_init_op. Args: local_init_op: run for every new supervisor instance. If set to USE_DEFAULT, use the first op from the GraphKeys.LOCAL_INIT_OP collection. If the collection is empty, create an op that initializes all local variables and all tables.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py",
    "ast_data": "FunctionDef name:_init_local_init_op arg:self arg:local_init_op arguments arg arg If Compare Assign Call If Compare Assign Call Call If Assign Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "_GeneralMultiDeviceReplicator",
    "source_code": "class _GeneralMultiDeviceReplicator(_MultiDeviceReplicator):\n\n    def __init__(self, master_tensor: torch.Tensor) -> None:\n        assert _is_supported_device(master_tensor)\n        self.master = master_tensor\n        self._per_device_tensors: dict[torch.device, torch.Tensor] = {}",
    "docstring": "Lazily serves tensor to request device. This class extends _MultiDeviceReplicator to allow support for \"cpu\" as a device.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\sharded_grad_scaler.py",
    "ast_data": "ClassDef name:_GeneralMultiDeviceReplicator FunctionDef name:__init__ arg:self arg:master_tensor arguments arg arg Call Assign"
  },
  {
    "library": "numpy",
    "name": "lag2poly",
    "source_code": "def lag2poly(c):\n    from .polynomial import polyadd, polymulx, polysub\n    [c] = pu.as_series([c])\n    n = len(c)\n    if n == 1:\n        return c\n    else:\n        c0 = c[-2]\n        c1 = c[-1]\n        for i in range(n - 1, 1, -1):\n            tmp = c0\n            c0 = polysub(c[i - 2], c1 * (i - 1) / i)\n            c1 = polyadd(tmp, polysub((2 * i - 1) * c1, polymulx(c1)) / i)\n        return polyadd(c0, polysub(c1, polymulx(c1)))",
    "docstring": "Convert a Laguerre series to a polynomial. Convert an array representing the coefficients of a Laguerre series, ordered from lowest degree to highest, to an array of the coefficients of the equivalent polynomial (relative to the \"standard\" basis) ordered from lowest to highest degree. Parameters ---------- c : array_like 1-D array containing the Laguerre series coefficients, ordered from lowest order term to highest. Returns ------- pol : ndarray 1-D array containing the coefficients of the equivalent polynomial (relative to the \"standard\" basis) ordered from lowest order term to highest. See Also -------- poly2lag Notes ----- The easy way to do conversions between polynomial basis sets is to use the convert method of a class instance. Examples -------- >>> from numpy.polynomial.laguerre import lag2poly >>> lag2poly([ 23., -63., 58., -18.]) array([0., 1., 2., 3.])",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\laguerre.py",
    "ast_data": "FunctionDef name:lag2poly arg:c arguments arg Assign Call Assign Call If Compare Return return:yes Assign Assign For Call Assign Assign Call Assign Call Call Call Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "onmove",
    "source_code": "def onmove(self, event):\n    if self.ignore(event):\n        return\n    if not self.canvas.widgetlock.available(self):\n        return\n    if not self.ax.contains(event)[0]:\n        self.linev.set_visible(False)\n        self.lineh.set_visible(False)\n        if self.needclear:\n            self.canvas.draw()\n            self.needclear = False\n        return\n    self.needclear = True\n    xdata, ydata = self._get_data_coords(event)\n    self.linev.set_xdata((xdata, xdata))\n    self.linev.set_visible(self.visible and self.vertOn)\n    self.lineh.set_ydata((ydata, ydata))\n    self.lineh.set_visible(self.visible and self.horizOn)\n    if not (self.visible and (self.vertOn or self.horizOn)):\n        return\n    if self.useblit:\n        if self.background is not None:\n            self.canvas.restore_region(self.background)\n        self.ax.draw_artist(self.linev)\n        self.ax.draw_artist(self.lineh)\n        self.canvas.blit(self.ax.bbox)\n    else:\n        self.canvas.draw_idle()",
    "docstring": "Internal event handler to draw the cursor when the mouse moves.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:onmove arg:self arg:event arguments arg arg If Call Return return:no If Call Return return:no If Call Call Call If Call Assign Return return:no Assign Assign Call Call Call BoolOp Call Call BoolOp If BoolOp BoolOp Return return:no If If Compare Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_rot90_3D",
    "source_code": "def _rot90_3D(image, k, name_scope):\n\n    def _rot90():\n        return array_ops.transpose(array_ops.reverse_v2(image, [1]), [1, 0, 2])\n\n    def _rot180():\n        return array_ops.reverse_v2(image, [0, 1])\n\n    def _rot270():\n        return array_ops.reverse_v2(array_ops.transpose(image, [1, 0, 2]), [1])\n    cases = [(math_ops.equal(k, 1), _rot90), (math_ops.equal(k, 2), _rot180), (math_ops.equal(k, 3), _rot270)]\n    result = control_flow_case.case(cases, default=lambda: image, exclusive=True, name=name_scope)\n    result.set_shape([None, None, image.get_shape()[2]])\n    return result",
    "docstring": "Rotate image counter-clockwise by 90 degrees times. Args: image: 3-D Tensor of shape . k: A scalar integer. The number of times the image is rotated by 90 degrees. name_scope: A valid TensorFlow name scope. Returns: A 3-D tensor of the same type and shape as .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:_rot90_3D arg:image arg:k arg:name_scope arguments arg arg arg FunctionDef name:_rot90 arguments Return return:yes Call Call FunctionDef name:_rot180 arguments Return return:yes Call FunctionDef name:_rot270 arguments Return return:yes Call Call Assign Call Call Call Assign Call arguments Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "function_callback",
    "source_code": "def function_callback(self, function):\n    graph_id = self._get_context_id(function.graph)\n    with self._context_lock:\n        self._function_to_graph_id[function] = graph_id",
    "docstring": "A callback to be called on creation of ConcreteFunctions.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\dumping_callback.py",
    "ast_data": "FunctionDef name:function_callback arg:self arg:function arguments arg arg Assign Call With Assign"
  },
  {
    "library": "pytorch",
    "name": "trace_dependencies",
    "source_code": "def trace_dependencies(callable: Callable[[Any], Any], inputs: Iterable[tuple[Any, ...]]) -> list[str]:\n    modules_used = set()\n\n    def record_used_modules(frame, event, arg):\n        if event != 'call':\n            return\n        name = frame.f_code.co_name\n        module = None\n        if name in frame.f_globals:\n            module = frame.f_globals[name].__module__\n        elif name in frame.f_locals:\n            module = frame.f_locals[name].__module__\n        elif 'self' in frame.f_locals:\n            method = getattr(frame.f_locals['self'], name, None)\n            module = method.__module__ if method else None\n        if module:\n            modules_used.add(module)\n    try:\n        sys.setprofile(record_used_modules)\n        for inp in inputs:\n            callable(*inp)\n    finally:\n        sys.setprofile(None)\n    return list(modules_used)",
    "docstring": "Trace the execution of a callable in order to determine which modules it uses. Args: callable: The callable to execute and trace. inputs: The input to use during tracing. The modules used by 'callable' when invoked by each set of inputs are union-ed to determine all modules used by the callable for the purpooses of packaging. Returns: A list of the names of all modules used during callable execution.",
    "type": "function",
    "file_path": "pytorch\\torch\\package\\analyze\\trace_dependencies.py",
    "ast_data": "FunctionDef name:trace_dependencies arg:callable arg:inputs arguments arg arg Assign Call FunctionDef name:record_used_modules arg:frame arg:event arg:arg arguments arg arg arg If Compare Return return:no Assign Assign If Compare Assign If Compare Assign If Compare Assign Call Assign If Call Try Call For Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, config_module: ConfigModule, test_model_fn_factory: FactoryType, seed: int, default: Optional[ConfigType]=None, sm: SamplingMethod=SamplingMethod.TOGGLE, test_timeout: int=3600):\n    if sys.version_info < (3, 10):\n        log.error('Only python 3.10 and later supported')\n        return\n    self.seed = seed\n    self.test_timeout = test_timeout\n    self.detailed_results: dict[ComboType, dict[str, Any]] = {}\n    self.config_module = config_module\n    self.test_model_fn_factory = test_model_fn_factory\n    self.fields: dict[str, _ConfigEntry] = self.config_module._config\n    self.sample = SamplingMethod.dispatch(sm)\n    if default is None:\n        if self.config_module.__name__ in MODULE_DEFAULTS:\n            self.default = MODULE_DEFAULTS[self.config_module.__name__]\n        else:\n            raise ValueError('No default passed to ConfigFuzzer.')\n    else:\n        self.default = default",
    "docstring": "Args: config_module: The module containing the configs to fuzz test_model_fn_factory: Function that returns a test model, which runs and returns True if successful, or the outputs if they should be compared with eager seed: Randomness seed. default: Default values for the config. Inductor has preset based on know failures. sm: How type value samples are generated, default TOGGLE. test_timeout: max time a test can take.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\fuzzer.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:config_module arg:test_model_fn_factory arg:seed arg:default arg:sm arg:test_timeout arguments arg arg arg arg arg arg arg If Compare Call Return return:no Assign Assign Assign Assign Assign Call If Compare If Compare Assign Raise Call Assign"
  },
  {
    "library": "pytorch",
    "name": "type_to_dtype",
    "source_code": "def type_to_dtype(typ: type) -> torch.dtype:\n    assert isinstance(typ, type)\n    if typ in (bool, torch.SymBool):\n        return torch.bool\n    if typ in (int, torch.SymInt):\n        return torch.long\n    if typ in (float, torch.SymFloat):\n        return torch.get_default_dtype()\n    if typ is complex:\n        return corresponding_complex_dtype(torch.get_default_dtype())\n    raise ValueError(f'Invalid type {typ}!')",
    "docstring": "Computes the corresponding dtype for a Number type.",
    "type": "function",
    "file_path": "pytorch\\torch\\_prims_common\\__init__.py",
    "ast_data": "FunctionDef name:type_to_dtype arg:typ arguments arg Call If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Call If Compare Return return:yes Call Call Raise Call"
  },
  {
    "library": "pandas",
    "name": "type",
    "source_code": "@property\ndef type(self) -> type[np.generic]:\n    return self._dtype.type",
    "docstring": "The type object used to instantiate a scalar of this NumPy data-type.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py",
    "ast_data": "FunctionDef name:type arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "log_likelihood",
    "source_code": "@validate_params({'emp_cov': [np.ndarray], 'precision': [np.ndarray]}, prefer_skip_nested_validation=True)\ndef log_likelihood(emp_cov, precision):\n    p = precision.shape[0]\n    log_likelihood_ = -np.sum(emp_cov * precision) + fast_logdet(precision)\n    log_likelihood_ -= p * np.log(2 * np.pi)\n    log_likelihood_ /= 2.0\n    return log_likelihood_",
    "docstring": "Compute the sample mean of the log_likelihood under a covariance model. Computes the empirical expected log-likelihood, allowing for universal comparison (beyond this software package), and accounts for normalization terms and scaling. Parameters ---------- emp_cov : ndarray of shape (n_features, n_features) Maximum Likelihood Estimator of covariance. precision : ndarray of shape (n_features, n_features) The precision matrix of the covariance model to be tested. Returns ------- log_likelihood_ : float Sample mean of the log-likelihood.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\covariance\\_empirical_covariance.py",
    "ast_data": "FunctionDef name:log_likelihood arg:emp_cov arg:precision arguments arg arg Assign Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n    if self.fit_inverse_transform and self.kernel == 'precomputed':\n        raise ValueError('Cannot fit_inverse_transform with a precomputed kernel.')\n    X = validate_data(self, X, accept_sparse='csr', copy=self.copy_X)\n    self.gamma_ = 1 / X.shape[1] if self.gamma is None else self.gamma\n    self._centerer = KernelCenterer().set_output(transform='default')\n    K = self._get_kernel(X)\n    self._fit_transform_in_place(K)\n    if self.fit_inverse_transform:\n        X_transformed = self.eigenvectors_ * np.sqrt(self.eigenvalues_)\n        self._fit_inverse_transform(X_transformed, X)\n    self.X_fit_ = X\n    return self",
    "docstring": "Fit the model from data in X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vector, where is the number of samples and is the number of features. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns the instance itself.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_kernel_pca.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg If BoolOp Compare Raise Call Assign Call Assign Compare Assign Call Call Assign Call Call If Assign Call Call Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "lookup",
    "source_code": "def lookup(self, divisor: sympy.Expr, length: sympy.Expr) -> IterationRangesEntry:\n    if V.graph.sizevars.statically_known_equals(divisor * length, self.numel):\n        expr = FloorDiv(self.index_sym(), divisor)\n    else:\n        expr = ModularIndexing(self.index_sym(), divisor, length)\n    if expr not in self.nodes:\n        node = IterationRangesEntry(f'{self.prefix}{next(V.kernel.iter_vars_count)}', divisor, length, expr, self)\n        V.kernel.range_tree_nodes[node.symbol()] = node\n        self.var_list.append(node.symbol())\n        self.var_ranges[node.symbol()] = length\n        self.nodes[expr] = node\n    return self.nodes[expr]",
    "docstring": "Lookup a given RangeTreeEntry, creating it if needed",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\simd.py",
    "ast_data": "FunctionDef name:lookup arg:self arg:divisor arg:length arguments arg arg arg If Call Assign Call Call Assign Call Call If Compare Assign Call Call Assign Call Call Call Assign Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "django_table_names",
    "source_code": "def django_table_names(self, only_existing=False, include_views=True):\n    tables = set()\n    for model in self.get_migratable_models():\n        if not model._meta.managed:\n            continue\n        tables.add(model._meta.db_table)\n        tables.update((f.m2m_db_table() for f in model._meta.local_many_to_many if f.remote_field.through._meta.managed))\n    tables = list(tables)\n    if only_existing:\n        existing_tables = set(self.table_names(include_views=include_views))\n        tables = [t for t in tables if self.identifier_converter(t) in existing_tables]\n    return tables",
    "docstring": "Return a list of all table names that have associated Django models and are in INSTALLED_APPS. If only_existing is True, include only the tables in the database.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\introspection.py",
    "ast_data": "FunctionDef name:django_table_names arg:self arg:only_existing arg:include_views arguments arg arg arg Assign Call For Call If Call Call Call Assign Call If Assign Call Call Assign Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "report_error_to_cluster",
    "source_code": "def report_error_to_cluster(self, error_code, error_message):\n    if self._context_handle:\n        pywrap_tfe.TFE_ReportErrorToCluster(self._context_handle, error_code, error_message)\n    else:\n        raise ValueError('Context is not initialized.')",
    "docstring": "Report error to other members in a multi-client cluster. Args: error_code: a error code. error_message: a string. The error message.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:report_error_to_cluster arg:self arg:error_code arg:error_message arguments arg arg arg If Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_variable_specs",
    "source_code": "def _get_variable_specs(args):\n    variable_specs = []\n    for arg in nest.flatten(args):\n        if not isinstance(arg, type_spec.TypeSpec):\n            continue\n        if isinstance(arg, resource_variable_ops.VariableSpec):\n            variable_specs.append(arg)\n        elif not isinstance(arg, tensor.TensorSpec):\n            variable_specs.extend(_get_variable_specs(arg._component_specs))\n    return variable_specs",
    "docstring": "Returns from .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\function_type_utils.py",
    "ast_data": "FunctionDef name:_get_variable_specs arg:args arguments arg Assign For Call If Call If Call Call If Call Call Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "InvalidSoftwareStatementError",
    "source_code": "class InvalidSoftwareStatementError(OAuth2Error):\n    error = 'invalid_software_statement'",
    "docstring": "The software statement presented is invalid.",
    "type": "class",
    "file_path": "authlib\\authlib\\oauth2\\rfc7591\\errors.py",
    "ast_data": "ClassDef name:InvalidSoftwareStatementError Assign"
  },
  {
    "library": "kornia",
    "name": "__repr__",
    "source_code": "def __repr__(self) -> str:\n    repr_buf = f'gain={self.gain}, center={self.center}, sigma={self.sigma}, sign={self.sign}'\n    return repr_buf",
    "docstring": "Return a string representation of the object.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\random_generator\\_2d\\gaussian_illumination.py",
    "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "save_formset",
    "source_code": "def save_formset(self, request, form, formset, change):\n    formset.save()",
    "docstring": "Given an inline formset save it to the database.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:save_formset arg:self arg:request arg:form arg:formset arg:change arguments arg arg arg arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, cell, device, **kwargs):\n    super(DeviceWrapperBase, self).__init__(cell, **kwargs)\n    self._device = device",
    "docstring": "Construct a for with device . Ensures the wrapped is called with . Args: cell: An instance of . device: A device string or function, for passing to . **kwargs: dict of keyword arguments for base layer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\legacy_rnn\\rnn_cell_wrapper_impl.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:cell arg:device arguments arg arg arg arg Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, node_def, op, message, *args):\n    super(AbortedError, self).__init__(node_def, op, message, ABORTED, *args)",
    "docstring": "Creates an .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:node_def arg:op arg:message arguments arg arg arg arg arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "gen_fallback_code",
    "source_code": "def gen_fallback_code(schema: LazyIrSchema, sig: DispatcherSignature | NativeSignature, overload_name: str) -> str:\n    dispatcher_sig = DispatcherSignature.from_schema(schema.func)\n    exprs = translate(sig.arguments(), dispatcher_sig.arguments())\n    fallback_args = ',\\n                '.join([a.expr for a in exprs])\n    if len(overload_name):\n        aten_op_str = f'ATEN_OP2({schema.aten_name}, {overload_name})'\n    else:\n        aten_op_str = f'ATEN_OP({schema.aten_name})'\n    return f'\\n        if (force_eager_fallback({aten_symbol(schema)})) {{\\n            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, {aten_op_str}>::call(\\n                {fallback_args}\\n            );\\n        }}\\n'",
    "docstring": "Generate code that falls back to eager conditioned on a predicate",
    "type": "function",
    "file_path": "pytorch\\torchgen\\dest\\lazy_ir.py",
    "ast_data": "FunctionDef name:gen_fallback_code arg:schema arg:sig arg:overload_name arguments arg arg arg Assign Call Assign Call Call Call Assign Call If Call Assign Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "run",
    "source_code": "def run(self, distributed_train_function, *args, **kwargs):\n    if self._platform_device == failure_handling_util.PlatformDevice.INTERNAL_TPU:\n        return self._run_for_tpu(distributed_train_function, *args, **kwargs)\n    elif self._platform_device in (failure_handling_util.PlatformDevice.GCE_TPU, failure_handling_util.PlatformDevice.GCE_CPU):\n        return distributed_train_function(*args, **kwargs)\n    else:\n        return self._run_for_multi_worker_mirrored(distributed_train_function, *args, **kwargs)",
    "docstring": "Runs a training function with error and preemption handling. This function handles the preemption signal from any peer in the cluster by saving the training progress and exiting gracefully. It will also broadcast any program error encountered during the execution of to all workers so that they can raise the same error. The argument should be a distributed train function (i.e., containing a call to ). For users, we recommend passing in a single-step to so that the checkpoint can be saved in time in case a preemption signal or maintenance notice is sent. Besides the preemption and error handling part, has the same effect and output as . can return either some or no result. The following is a shortened example: Args: distributed_train_function: A (single-step) distributed training function. *args: args for . **kwargs: kwargs for . Raises: Program error encountered by any member in the cluster while executing the , or any error from the program error propagation process. Returns: Result of running the .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\failure_handling\\failure_handling.py",
    "ast_data": "FunctionDef name:run arg:self arg:distributed_train_function arguments arg arg arg arg If Compare Return return:yes Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "splitext",
    "source_code": "def splitext(self, the_path):\n    base, ext = posixpath.splitext(the_path)\n    if base.lower().endswith('.tar'):\n        ext = base[-4:] + ext\n        base = base[:-4]\n    return (base, ext)",
    "docstring": "Like os.path.splitext, but takes off .tar, too",
    "type": "method",
    "file_path": "django\\django\\core\\management\\templates.py",
    "ast_data": "FunctionDef name:splitext arg:self arg:the_path arguments arg arg Assign Call If Call Call Assign Assign Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "index",
    "source_code": "@cherrypy.expose\ndef index(self):\n    return '\\n            <p>Hi, this is the home page! Check out the other\\n            fun stuff on this site:</p>\\n\\n            <ul>\\n                <li><a href=\"/joke/\">A silly joke</a></li>\\n                <li><a href=\"/links/\">Useful links</a></li>\\n            </ul>'",
    "docstring": "Produce HTTP response body of home page app index URI.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\tutorial\\tut04_complex_site.py",
    "ast_data": "FunctionDef name:index arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "stop",
    "source_code": "def stop(self):\n    self._timer_stop()",
    "docstring": "Stop the timer.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:stop arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "adjoint",
    "source_code": "def adjoint(self, name: str='adjoint') -> 'LinearOperator':\n    if self.is_self_adjoint is True:\n        return self\n    with self._name_scope(name):\n        return self._linop_adjoint()",
    "docstring": "Returns the adjoint of the current . Given representing this , return . Note that calling and are equivalent. Args: name: A name for this . Returns: which represents the adjoint of this .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "FunctionDef name:adjoint arg:self arg:name arguments arg arg If Compare Return return:yes With Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "MonteCarloTestResult",
    "source_code": "@dataclass\nclass MonteCarloTestResult:\n    statistic: float | np.ndarray\n    pvalue: float | np.ndarray\n    null_distribution: np.ndarray",
    "docstring": "Result object returned by . Attributes ---------- statistic : float or ndarray The observed test statistic of the sample. pvalue : float or ndarray The p-value for the given alternative. null_distribution : ndarray The values of the test statistic generated under the null hypothesis.",
    "type": "class",
    "file_path": "scipy\\scipy\\stats\\_resampling.py",
    "ast_data": "ClassDef name:MonteCarloTestResult"
  },
  {
    "library": "tensorflow",
    "name": "_convert_dtype_id_to_str",
    "source_code": "def _convert_dtype_id_to_str(dtype):\n    if isinstance(dtype, int):\n        return dtypes._TYPE_TO_STRING[dtype]\n    else:\n        return [dtypes._TYPE_TO_STRING[d] for d in dtype]",
    "docstring": "Helper function to convert a dtype id to a corresponding string name.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\tensorrt\\utils.py",
    "ast_data": "FunctionDef name:_convert_dtype_id_to_str arg:dtype arguments arg If Call Return return:yes Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "CitationReferenceTransform",
    "source_code": "class CitationReferenceTransform(SphinxPostTransform):\n    default_priority = 5\n    formats = ('latex',)\n\n    def run(self, **kwargs: Any) -> None:\n        domain = self.env.domains.citation_domain\n        matcher = NodeMatcher(addnodes.pending_xref, refdomain='citation', reftype='ref')\n        for node in matcher.findall(self.document):\n            docname, labelid, _ = domain.citations.get(node['reftarget'], ('', '', 0))\n            if docname:\n                citation_ref = nodes.citation_reference('', '', *node.children, docname=docname, refname=labelid)\n                node.replace_self(citation_ref)",
    "docstring": "Replace pending_xref nodes for citation by citation_reference. To handle citation reference easily on LaTeX writer, this converts pending_xref nodes to citation_reference.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\builders\\latex\\transforms.py",
    "ast_data": "ClassDef name:CitationReferenceTransform Assign Assign FunctionDef name:run arg:self arguments arg arg Assign Assign Call For Call Assign Call If Assign Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_legend",
    "source_code": "def get_legend(self):\n    return self.legend_",
    "docstring": "Return the instance, or None if no legend is defined.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:get_legend arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_n_features_out",
    "source_code": "@property\ndef _n_features_out(self):\n    return self.components_.shape[0]",
    "docstring": "Number of transformed output features.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_base.py",
    "ast_data": "FunctionDef name:_n_features_out arg:self arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "TemplateError",
    "source_code": "class TemplateError(Exception):\n\n    def __init__(self, message, position, name=None):\n        Exception.__init__(self, message)\n        self.position = position\n        self.name = name\n\n    def __str__(self):\n        msg = ' '.join(self.args)\n        if self.position:\n            msg = '%s at line %s column %s' % (msg, self.position[0], self.position[1])\n        if self.name:\n            msg += ' in %s' % self.name\n        return msg",
    "docstring": "Exception raised while parsing a template",
    "type": "class",
    "file_path": "numpy\\numpy\\_build_utils\\tempita\\_tempita.py",
    "ast_data": "ClassDef name:TemplateError FunctionDef name:__init__ arg:self arg:message arg:position arg:name arguments arg arg arg arg Call Assign Assign FunctionDef name:__str__ arg:self arguments arg Assign Call If Assign If Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "imageObject",
    "source_code": "def imageObject(self, image):\n    entry = self._images.get(id(image), None)\n    if entry is not None:\n        return entry[1]\n    name = next(self._image_seq)\n    ob = self.reserveObject(f'image {name}')\n    self._images[id(image)] = (image, name, ob)\n    return name",
    "docstring": "Return name of an image XObject representing the given image.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py",
    "ast_data": "FunctionDef name:imageObject arg:self arg:image arguments arg arg Assign Call Call If Compare Return return:yes Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "int64_output",
    "source_code": "def int64_output(func, argtypes):\n    func.argtypes = argtypes\n    func.restype = c_int64\n    return func",
    "docstring": "Generate a ctypes function that returns a 64-bit integer value.",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\gdal\\prototypes\\generation.py",
    "ast_data": "FunctionDef name:int64_output arg:func arg:argtypes arguments arg arg Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "keras_tensor_from_tensor",
    "source_code": "def keras_tensor_from_tensor(tensor):\n    keras_tensor_cls = None\n    for tensor_type, cls in keras_tensor_classes:\n        if isinstance(tensor, tensor_type):\n            keras_tensor_cls = cls\n            break\n    out = keras_tensor_cls.from_tensor(tensor)\n    if hasattr(tensor, '_keras_mask'):\n        out._keras_mask = keras_tensor_from_tensor(tensor._keras_mask)\n    return out",
    "docstring": "Convert a traced (composite)tensor to a representative KerasTensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\keras_tensor.py",
    "ast_data": "FunctionDef name:keras_tensor_from_tensor arg:tensor arguments arg Assign For If Call Assign Assign Call If Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "offset_to_shard",
    "source_code": "def offset_to_shard(self):\n    unravel_index = self.mesh.unravel_index()\n    locations = [None] * self.mesh.size\n    for offset, mesh_loc in unravel_index.items():\n        loc = []\n        for dim_sharding in self.sharding_specs:\n            if dim_sharding == UNSHARDED:\n                loc.append(0)\n            else:\n                loc.append(mesh_loc[dim_sharding])\n        locations[offset] = tuple(loc)\n    return locations",
    "docstring": "Mapping from offset in a flattened list to shard index.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\layout.py",
    "ast_data": "FunctionDef name:offset_to_shard arg:self arguments arg Assign Call Assign For Call Assign For If Compare Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "output_types",
    "source_code": "@property\ndef output_types(self):\n    return nest.map_structure(lambda component_spec: component_spec._to_legacy_output_types(), self._element_spec)",
    "docstring": "Returns the type of each component of an element of this iterator. Returns: A nested structure of objects corresponding to each component of an element of this dataset.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py",
    "ast_data": "FunctionDef name:output_types arg:self arguments arg Return return:yes Call arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_static_value",
    "source_code": "def _static_value(x):\n    return tensor_util.constant_value(ops.convert_to_tensor(x))",
    "docstring": "Returns the static value of a or .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\transformed_distribution.py",
    "ast_data": "FunctionDef name:_static_value arg:x arguments arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "do_get_current_language",
    "source_code": "@register.tag('get_current_language')\ndef do_get_current_language(parser, token):\n    args = token.contents.split()\n    if len(args) != 3 or args[1] != 'as':\n        raise TemplateSyntaxError(\"'get_current_language' requires 'as variable' (got %r)\" % args)\n    return GetCurrentLanguageNode(args[2])",
    "docstring": "Store the current language in the context. Usage:: {% get_current_language as language %} This fetches the currently active language and puts its value into the `` context variable.",
    "type": "function",
    "file_path": "django\\django\\templatetags\\i18n.py",
    "ast_data": "FunctionDef name:do_get_current_language arg:parser arg:token arguments arg arg Assign Call If BoolOp Compare Call Compare Raise Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "is_available",
    "source_code": "def is_available():\n    return torch._C._has_cudnn",
    "docstring": "Return a bool indicating if CUDNN is currently available.",
    "type": "function",
    "file_path": "pytorch\\torch\\backends\\cudnn\\__init__.py",
    "ast_data": "FunctionDef name:is_available arguments Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, key_dtype, value_dtype):\n    self._key_dtype = dtypes.as_dtype(key_dtype)\n    self._value_dtype = dtypes.as_dtype(value_dtype)\n    super(LookupInterface, self).__init__()",
    "docstring": "Construct a lookup table interface. Args: key_dtype: The table key type. value_dtype: The table value type.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:key_dtype arg:value_dtype arguments arg arg arg Assign Call Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_repopulate_pool",
    "source_code": "def _repopulate_pool(self):\n    for _ in range(self._processes - len(self._pool)):\n        args = (self._inqueue, self._outqueue, self._initializer, self._initargs, self._maxtasksperchild)\n        if hasattr(self, '_wrap_exception'):\n            args += (self._wrap_exception,)\n        w = self.Process(target=clean_worker, args=args)\n        self._pool.append(w)\n        w.name = w.name.replace('Process', 'PoolWorker')\n        w.daemon = True\n        w.start()\n        util.debug('added worker')",
    "docstring": "Increase the number of pool processes to the specified number. Bring the number of pool processes up to the specified number, for use after reaping workers which have exited.",
    "type": "method",
    "file_path": "pytorch\\torch\\multiprocessing\\pool.py",
    "ast_data": "FunctionDef name:_repopulate_pool arg:self arguments arg For Call Call Assign If Call Assign Call Call Assign Call Assign Call Call"
  },
  {
    "library": "pandas",
    "name": "_parse_tfoot_tr",
    "source_code": "def _parse_tfoot_tr(self, table):\n    raise AbstractMethodError(self)",
    "docstring": "Return the list of tfoot row elements from the parsed table element. Parameters ---------- table : a table element that contains row elements. Returns ------- list of node-like These are the row elements of a table.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\html.py",
    "ast_data": "FunctionDef name:_parse_tfoot_tr arg:self arg:table arguments arg arg Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "contains",
    "source_code": "def contains(self, mouseevent):\n    if self._different_canvas(mouseevent) or not self.get_visible():\n        return (False, {})\n    pickradius = float(self._picker) if isinstance(self._picker, Number) and self._picker is not True else self._pickradius\n    if self.axes:\n        self.axes._unstale_viewLim()\n    transform, offset_trf, offsets, paths = self._prepare_points()\n    ind = _path.point_in_path_collection(mouseevent.x, mouseevent.y, pickradius, transform.frozen(), paths, self.get_transforms(), offsets, offset_trf, pickradius <= 0)\n    return (len(ind) > 0, dict(ind=ind))",
    "docstring": "Test whether the mouse event occurred in the collection. Returns ``, where every item in itemlist contains the event.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:contains arg:self arg:mouseevent arguments arg arg If BoolOp Call Call Return return:yes Assign BoolOp Call Compare Call If Call Assign Call Assign Call Call Call Compare Return return:yes Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "aten_op",
    "source_code": "def aten_op(self, operator: str, *args, overload_name: str='', **kwargs):\n    return self.op('aten::ATen', *args, operator_s=operator, overload_name_s=overload_name, **kwargs)",
    "docstring": "Generates an ONNX ATen op node. This function is for backward compatibility with the old symbolic functions.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\jit_utils.py",
    "ast_data": "FunctionDef name:aten_op arg:self arg:operator arguments arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "Ref",
    "source_code": "class Ref(Expression):\n\n    def __init__(self, refs, source):\n        super().__init__()\n        self.refs, self.source = (refs, source)\n\n    def __repr__(self):\n        return '{}({}, {})'.format(self.__class__.__name__, self.refs, self.source)\n\n    def get_source_expressions(self):\n        return [self.source]\n\n    def set_source_expressions(self, exprs):\n        self.source, = exprs\n\n    def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):\n        return self\n\n    def get_refs(self):\n        return {self.refs}\n\n    def relabeled_clone(self, relabels):\n        clone = self.copy()\n        clone.source = self.source.relabeled_clone(relabels)\n        return clone\n\n    def as_sql(self, compiler, connection):\n        return (connection.ops.quote_name(self.refs), [])\n\n    def get_group_by_cols(self):\n        return [self]",
    "docstring": "Reference to column alias of the query. For example, Ref('sum_cost') in qs.annotate(sum_cost=Sum('cost')) query.",
    "type": "class",
    "file_path": "django\\django\\db\\models\\expressions.py",
    "ast_data": "ClassDef name:Ref FunctionDef name:__init__ arg:self arg:refs arg:source arguments arg arg arg Call Call Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call FunctionDef name:get_source_expressions arg:self arguments arg Return return:yes FunctionDef name:set_source_expressions arg:self arg:exprs arguments arg arg Assign FunctionDef name:resolve_expression arg:self arg:query arg:allow_joins arg:reuse arg:summarize arg:for_save arguments arg arg arg arg arg arg Return return:yes FunctionDef name:get_refs arg:self arguments arg Return return:yes FunctionDef name:relabeled_clone arg:self arg:relabels arguments arg arg Assign Call Assign Call Return return:yes FunctionDef name:as_sql arg:self arg:compiler arg:connection arguments arg arg arg Return return:yes Call FunctionDef name:get_group_by_cols arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "make_block_same_class",
    "source_code": "@final\ndef make_block_same_class(self, values, placement: BlockPlacement | None=None, refs: BlockValuesRefs | None=None) -> Self:\n    if placement is None:\n        placement = self._mgr_locs\n    return type(self)(values, placement=placement, ndim=self.ndim, refs=refs)",
    "docstring": "Wrap given values in a block of same type as self.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\blocks.py",
    "ast_data": "FunctionDef name:make_block_same_class arg:self arg:values arg:placement arg:refs arguments arg arg arg arg If Compare Assign Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_dtype_to_stata_type_117",
    "source_code": "def _dtype_to_stata_type_117(dtype: np.dtype, column: Series, force_strl: bool) -> int:\n    if force_strl:\n        return 32768\n    if dtype.type is np.object_:\n        itemsize = max_len_string_array(ensure_object(column._values))\n        itemsize = max(itemsize, 1)\n        if itemsize <= 2045:\n            return itemsize\n        return 32768\n    elif dtype.type is np.float64:\n        return 65526\n    elif dtype.type is np.float32:\n        return 65527\n    elif dtype.type is np.int32:\n        return 65528\n    elif dtype.type is np.int16:\n        return 65529\n    elif dtype.type is np.int8:\n        return 65530\n    else:\n        raise NotImplementedError(f'Data type {dtype} not supported.')",
    "docstring": "Converts dtype types to stata types. Returns the byte of the given ordinal. See TYPE_MAP and comments for an explanation. This is also explained in the dta spec. 1 - 2045 are strings of this length Pandas Stata 32768 - for object strL 65526 - for int8 byte 65527 - for int16 int 65528 - for int32 long 65529 - for float32 float 65530 - for double double If there are dates to convert, then dtype will already have the correct type inserted.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\stata.py",
    "ast_data": "FunctionDef name:_dtype_to_stata_type_117 arg:dtype arg:column arg:force_strl arguments arg arg arg If Return return:yes If Compare Assign Call Call Assign Call If Compare Return return:yes Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Raise Call"
  },
  {
    "library": "scrapy",
    "name": "RedirectMiddleware",
    "source_code": "class RedirectMiddleware(BaseRedirectMiddleware):\n\n    def process_response(self, request: Request, response: Response, spider: Spider) -> Request | Response:\n        if request.meta.get('dont_redirect', False) or response.status in getattr(spider, 'handle_httpstatus_list', []) or response.status in request.meta.get('handle_httpstatus_list', []) or request.meta.get('handle_httpstatus_all', False):\n            return response\n        allowed_status = (301, 302, 303, 307, 308)\n        if 'Location' not in response.headers or response.status not in allowed_status:\n            return response\n        assert response.headers['Location'] is not None\n        location = safe_url_string(response.headers['Location'])\n        if response.headers['Location'].startswith(b'//'):\n            request_scheme = urlparse_cached(request).scheme\n            location = request_scheme + '://' + location.lstrip('/')\n        redirected_url = urljoin(request.url, location)\n        redirected = _build_redirect_request(request, url=redirected_url)\n        if urlparse_cached(redirected).scheme not in {'http', 'https'}:\n            return response\n        if response.status in (301, 307, 308) or request.method == 'HEAD':\n            return self._redirect(redirected, request, spider, response.status)\n        redirected = self._redirect_request_using_get(request, redirected_url)\n        return self._redirect(redirected, request, spider, response.status)",
    "docstring": "Handle redirection of requests based on response status and meta-refresh html tag.",
    "type": "class",
    "file_path": "scrapy\\scrapy\\downloadermiddlewares\\redirect.py",
    "ast_data": "ClassDef name:RedirectMiddleware FunctionDef name:process_response arg:self arg:request arg:response arg:spider arguments arg arg arg arg If BoolOp Call Compare Call Compare Call Call Return return:yes Assign If BoolOp Compare Compare Return return:yes Compare Assign Call If Call Assign Call Assign Call Assign Call Assign Call If Compare Call Return return:yes If BoolOp Compare Compare Return return:yes Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "unify",
    "source_code": "@dispatch(object, object, dict)\ndef unify(u, v, s):\n    u = walk(u, s)\n    v = walk(v, s)\n    if u == v:\n        return s\n    if isvar(u):\n        return assoc(s, u, v)\n    if isvar(v):\n        return assoc(s, v, u)\n    return _unify(u, v, s)",
    "docstring": "Find substitution so that u == v while satisfying s >>> x = var(\"x\") >>> unify((1, x), (1, 2), {}) {~x: 2}",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\unification\\core.py",
    "ast_data": "FunctionDef name:unify arg:u arg:v arg:s arguments arg arg arg Assign Call Assign Call If Compare Return return:yes If Call Return return:yes Call If Call Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "_make_parser",
    "source_code": "def _make_parser(self):\n    return DefusedExpatParser()",
    "docstring": "Create a hardened XML parser (no custom/external entities).",
    "type": "method",
    "file_path": "django\\django\\core\\serializers\\xml_serializer.py",
    "ast_data": "FunctionDef name:_make_parser arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_validate_X_predict",
    "source_code": "def _validate_X_predict(self, X, check_input):\n    if check_input:\n        if self._support_missing_values(X):\n            ensure_all_finite = 'allow-nan'\n        else:\n            ensure_all_finite = True\n        X = validate_data(self, X, dtype=DTYPE, accept_sparse='csr', reset=False, ensure_all_finite=ensure_all_finite)\n        if issparse(X) and (X.indices.dtype != np.intc or X.indptr.dtype != np.intc):\n            raise ValueError('No support for np.int64 index based sparse matrices')\n    else:\n        _check_n_features(self, X, reset=False)\n    return X",
    "docstring": "Validate the training data on predict (probabilities).",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\tree\\_classes.py",
    "ast_data": "FunctionDef name:_validate_X_predict arg:self arg:X arg:check_input arguments arg arg arg If If Call Assign Assign Assign Call If BoolOp Call BoolOp Compare Compare Raise Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_box_as_indexlike",
    "source_code": "def _box_as_indexlike(dt_array: ArrayLike, utc: bool=False, name: Hashable | None=None) -> Index:\n    if lib.is_np_dtype(dt_array.dtype, 'M'):\n        tz = 'utc' if utc else None\n        return DatetimeIndex(dt_array, tz=tz, name=name)\n    return Index(dt_array, name=name, dtype=dt_array.dtype)",
    "docstring": "Properly boxes the ndarray of datetimes to DatetimeIndex if it is possible or to generic Index instead Parameters ---------- dt_array: 1-d array Array of datetimes to be wrapped in an Index. utc : bool Whether to convert/localize timestamps to UTC. name : string, default None Name for a resulting index Returns ------- result : datetime of converted dates - DatetimeIndex if convertible to sole datetime64 type - general Index otherwise",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\tools\\datetimes.py",
    "ast_data": "FunctionDef name:_box_as_indexlike arg:dt_array arg:utc arg:name arguments arg arg arg If Call Assign Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "most_specific_common_supertype",
    "source_code": "@abc.abstractmethod\ndef most_specific_common_supertype(self, others: Sequence['TraceType']) -> Optional['TraceType']:\n    pass",
    "docstring": "Returns the most specific supertype of and , if exists. The returned is a supertype of and , that is, they are all subtypes (see ) of it. It is also most specific, that is, there it has no subtype that is also a common supertype of and . If and have no common supertype, this returns . Args: others: A sequence of TraceTypes. Example:",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\types\\trace.py",
    "ast_data": "FunctionDef name:most_specific_common_supertype arg:self arg:others arguments arg arg"
  },
  {
    "library": "pytorch",
    "name": "__str__",
    "source_code": "def __str__(self) -> str:\n    return 'P'",
    "docstring": "human readable representation of the Partial placement",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\placement_types.py",
    "ast_data": "FunctionDef name:__str__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "MatchInfo",
    "source_code": "class MatchInfo:\n\n    def __init__(self, state: MatchState, culprit: Optional[str]=None) -> None:\n        self._state = state\n        self.culprit = culprit\n\n    def __str__(self) -> str:\n        details = f', {self.culprit}' if getattr(self, 'culprit', None) else ''\n        return f'Error type: {self._state.name}{details}'\n\n    @property\n    def state(self) -> MatchState:\n        return self._state",
    "docstring": "Aside from the match state, we also store some dynamic info for the match such as the culprit rank or collective state that caused the mismatch.",
    "type": "class",
    "file_path": "pytorch\\tools\\flight_recorder\\components\\types.py",
    "ast_data": "ClassDef name:MatchInfo FunctionDef name:__init__ arg:self arg:state arg:culprit arguments arg arg arg Assign Assign FunctionDef name:__str__ arg:self arguments arg Assign Call Return return:yes FunctionDef name:state arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "parse_func_attrs",
    "source_code": "def parse_func_attrs(attributes, allowlist=None):\n    if not allowlist:\n        allowlist = MONOMORPHIC_FUNCTION_ALLOWLIST\n    attrs = {}\n    for key, value in attributes.items():\n        if key not in allowlist:\n            raise ValueError(f'Allowlist does not support `{key}` as an attribute.')\n        attrs[key] = _parse_func_attr_value(key, value)\n    return attrs",
    "docstring": "Convert the keyword arguments into function_def attributes. Currently only support primitive types: bool, int, float and string. Args: attributes: the dictionary of attributes. allowlist: set of attribute names allowed. Returns: A dict of attributes where the key is the name of attribute and the value is the AttrValue proto. Raises: ValueError: If the kwargs contains unallowlisted name or unsupported value types.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\attributes.py",
    "ast_data": "FunctionDef name:parse_func_attrs arg:attributes arg:allowlist arguments arg arg If Assign Assign For Call If Compare Raise Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_reduction_symbols",
    "source_code": "def _get_reduction_symbols(self, suffix: str, **kwargs) -> list[sympy.Symbol]:\n    rn_prefixes = self.get_reduction_prefixes()\n    return [sympy.Symbol(f'{prefix}{suffix}', **kwargs) for prefix in rn_prefixes]",
    "docstring": "Helper to initialize symbols like rn_numel, rn_base, etc.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py",
    "ast_data": "FunctionDef name:_get_reduction_symbols arg:self arg:suffix arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_params",
    "source_code": "def set_params(self, base=None, offset=None):\n    if base is not None:\n        self._edge = _Edge_integer(base, 0)\n    if offset is not None:\n        self._offset = offset",
    "docstring": "Set parameters within this locator. Parameters ---------- base : float > 0, optional Interval between ticks. offset : float, optional Value added to each multiple of *base*. .. versionadded:: 3.8",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:set_params arg:self arg:base arg:offset arguments arg arg arg If Compare Assign Call If Compare Assign"
  },
  {
    "library": "matplotlib",
    "name": "viridis",
    "source_code": "def viridis() -> None:\n    set_cmap('viridis')",
    "docstring": "Set the colormap to 'viridis'. This changes the default colormap as well as the colormap of the current image if there is one. See `` for more information.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:viridis arguments Call"
  },
  {
    "library": "scikit-learn",
    "name": "_yeo_johnson_transform",
    "source_code": "def _yeo_johnson_transform(self, x, lmbda):\n    out = np.zeros_like(x)\n    pos = x >= 0\n    if abs(lmbda) < np.spacing(1.0):\n        out[pos] = np.log1p(x[pos])\n    else:\n        out[pos] = (np.power(x[pos] + 1, lmbda) - 1) / lmbda\n    if abs(lmbda - 2) > np.spacing(1.0):\n        out[~pos] = -(np.power(-x[~pos] + 1, 2 - lmbda) - 1) / (2 - lmbda)\n    else:\n        out[~pos] = -np.log1p(-x[~pos])\n    return out",
    "docstring": "Return transformed input x following Yeo-Johnson transform with parameter lambda.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py",
    "ast_data": "FunctionDef name:_yeo_johnson_transform arg:self arg:x arg:lmbda arguments arg arg arg Assign Call Assign Compare If Compare Call Call Assign Call Assign Call If Compare Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, function_name, level=1, children_inputs_mappings=None, **kwargs):\n    self._function_name = function_name\n    self._level = level\n    if self._level == 1:\n        assert children_inputs_mappings is None\n    else:\n        assert isinstance(children_inputs_mappings, dict)\n    self._children_inputs_mappings = children_inputs_mappings\n    if self._children_inputs_mappings is not None:\n        self._validate_children_inputs_mappings(self._children_inputs_mappings)\n    self._unique_function_id = _uuid.uuid1().hex\n    self._attrs_to_store_later = kwargs\n    self._stored_attrs = False\n    self._inputs = OpHint.OpHintArgumentTracker(self._function_name, self._unique_function_id, 'InputHint', OpHint.FUNCTION_INPUT_INDEX_ATTR, level, self._children_inputs_mappings)\n    self._outputs = OpHint.OpHintArgumentTracker(self._function_name, self._unique_function_id, 'OutputHint', OpHint.FUNCTION_OUTPUT_INDEX_ATTR, level, self._children_inputs_mappings)",
    "docstring": "Create a OpHint. Args: function_name: Name of the function (the custom op name in tflite) level: OpHint level. children_inputs_mappings: Children OpHint inputs/outputs mapping. children_inputs_mappings should like below: \"parent_first_child_input\": [{\"parent_input_index\": num, \"child_input_index\": num}, ...] \"parent_last_child_output\": [{\"parent_output_index\": num, \"child_output_index\": num}, ...] \"internal_children_input_output\": [{\"child_input_index\": num, \"child_output_index\": num}, ...] **kwargs: Keyword arguments of any constant attributes for the function.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\op_hint.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:function_name arg:level arg:children_inputs_mappings arguments arg arg arg arg arg Assign Assign If Compare Compare Call Assign If Compare Call Assign Call Assign Assign Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "set_session",
    "source_code": "def set_session(session):\n    global _SESSION\n    _SESSION.session = session",
    "docstring": "Sets the global TensorFlow session. Args: session: A TF Session.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:set_session arg:session arguments arg Assign"
  },
  {
    "library": "seaborn",
    "name": "tick",
    "source_code": "def tick(self, locator: Locator | None=None, *, upto: int | None=None) -> Temporal:\n    if locator is not None and (not isinstance(locator, Locator)):\n        err = f'Tick locator must be an instance of {Locator!r}, not {type(locator)!r}.'\n        raise TypeError(err)\n    new = copy(self)\n    new._tick_params = {'locator': locator, 'upto': upto}\n    return new",
    "docstring": "Configure the selection of ticks for the scale's axis or legend. .. note:: This API is under construction and will be enhanced over time. Parameters ---------- locator : :class: subclass Pre-configured matplotlib locator; other parameters will not be used. upto : int Choose \"nice\" locations for ticks, but do not exceed this number. Returns ------- scale Copy of self with new tick configuration.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\scales.py",
    "ast_data": "FunctionDef name:tick arg:self arg:locator arguments arg arg arg If BoolOp Compare Call Assign Call Raise Call Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "eval_in_original_context",
    "source_code": "def eval_in_original_context(f, args, caller_fn_scope):\n    ctx_frame = _find_originating_frame(caller_fn_scope, innermost=True)\n    args = (args[0], ctx_frame.f_globals if len(args) < 2 else args[1], ctx_frame.f_locals if len(args) < 3 else args[2])\n    return f(*args)",
    "docstring": "Executes the eval function in the context of a specified function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\py_builtins.py",
    "ast_data": "FunctionDef name:eval_in_original_context arg:f arg:args arg:caller_fn_scope arguments arg arg arg Assign Call Assign Compare Call Compare Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "type_based_dispatch_signatures_for",
    "source_code": "def type_based_dispatch_signatures_for(cls):\n\n    def contains_cls(x):\n        if isinstance(x, dict):\n            return any((contains_cls(v) for v in x.values()))\n        elif x is cls:\n            return True\n        elif type_annotations.is_generic_list(x) or type_annotations.is_generic_union(x):\n            type_args = type_annotations.get_generic_type_args(x)\n            return any((contains_cls(arg) for arg in type_args))\n        else:\n            return False\n    result = {}\n    for api, api_signatures in _TYPE_BASED_DISPATCH_SIGNATURES.items():\n        for _, signatures in api_signatures.items():\n            filtered = list(filter(contains_cls, signatures))\n            if filtered:\n                result.setdefault(api, []).extend(filtered)\n    return result",
    "docstring": "Returns dispatch signatures that have been registered for a given class. This function is intended for documentation-generation purposes. Args: cls: The class to search for. Type signatures are searched recursively, so e.g., if , then information will be returned for all dispatch targets that have anywhere in their type annotations (including nested in or .) Returns: A mapping -> , where is a TensorFlow API function; and is a list of dispatch signatures for that include . (Each signature is a dict mapping argument names to type annotations; see for more info.)",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\dispatch.py",
    "ast_data": "FunctionDef name:type_based_dispatch_signatures_for arg:cls arguments arg FunctionDef name:contains_cls arg:x arguments arg If Call Return return:yes Call Call Call If Compare Return return:yes If BoolOp Call Call Assign Call Return return:yes Call Call Return return:yes Assign For Call For Call Assign Call Call If Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "setup",
    "source_code": "@abc.abstractmethod\ndef setup(self, fig, outfile, dpi=None):\n    Path(outfile).parent.resolve(strict=True)\n    self.outfile = outfile\n    self.fig = fig\n    if dpi is None:\n        dpi = self.fig.dpi\n    self.dpi = dpi",
    "docstring": "Setup for writing the movie file. Parameters ---------- fig : The figure object that contains the information for frames. outfile : str The filename of the resulting movie file. dpi : float, default: `` The DPI (or resolution) for the file. This controls the size in pixels of the resulting movie file.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\animation.py",
    "ast_data": "FunctionDef name:setup arg:self arg:fig arg:outfile arg:dpi arguments arg arg arg arg Call Call Assign Assign If Compare Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "environment",
    "source_code": "@property\ndef environment(self):\n    return ''",
    "docstring": "Returns the current environment which TensorFlow is running in. There are two possible return values, \"google\" (when TensorFlow is running in a Google-internal environment) or an empty string (when TensorFlow is running elsewhere). If you are implementing a ClusterResolver that works in both the Google environment and the open-source world (for instance, a TPU ClusterResolver or similar), you will have to return the appropriate string depending on the environment, which you will have to detect. Otherwise, if you are implementing a ClusterResolver that will only work in open-source TensorFlow, you do not need to implement this property.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\cluster_resolver.py",
    "ast_data": "FunctionDef name:environment arg:self arguments arg Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "run",
    "source_code": "def run(self, point):\n    self.run_hooks(iter(sorted(self[point])))",
    "docstring": "Execute all registered Hooks (callbacks) for the given point.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cprequest.py",
    "ast_data": "FunctionDef name:run arg:self arg:point arguments arg arg Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "workspace",
    "source_code": "def workspace(self, nbytes: sympy.Expr, zero_fill: bool) -> tuple[str, int]:\n    arg = WorkspaceArg(count=nbytes, zero_mode=WorkspaceZeroMode.from_bool(zero_fill), device=V.graph.get_current_device_or_throw(), outer_name=WorkspaceArg.unique_name())\n    for i, existing_arg in enumerate(self.workspace_args):\n        if WorkspaceArg.can_join(existing_arg, arg):\n            offset = existing_arg.count\n            self.workspace_args[i] = WorkspaceArg.join(existing_arg, arg)\n            return (existing_arg.inner_name, offset)\n        assert existing_arg.inner_name != arg.inner_name and existing_arg.outer_name != arg.outer_name, existing_arg\n    self.workspace_args.append(arg)\n    return (arg.inner_name, 0)",
    "docstring": "Allocate or extend a workspace buffer of nbytes bytes. This function manages the allocation of a workspace buffer. It either creates a new WorkspaceArg or extends an existing one. Note: - Calling this function will in-place mutate the args by adding or updating a WorkspaceArg. - The codegen for generating the Python argdefs and call_defs will check this field and allocate the buffer accordingly. - A new argument \"ws_ptr\" will be present in the generated code. Args: nbytes (sympy.Expr): The number of bytes to allocate. zero_fill (bool): Whether to initialize the buffer to zero. Returns: Tuple[str, int]: A tuple containing: - \"ws_ptr\": A string identifier for the workspace pointer. - offset: An integer representing the byte offset in the workspace.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\common.py",
    "ast_data": "FunctionDef name:workspace arg:self arg:nbytes arg:zero_fill arguments arg arg arg Assign Call Call Call Call For Call If Call Assign Assign Call Return return:yes BoolOp Compare Compare Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "setdefault",
    "source_code": "def setdefault(self, key: str, default: Optional[Any]=None) -> Any:\n    if key not in self:\n        self[key] = default\n    return self[key]",
    "docstring": "Set the default for a key in the Parameterdict. If key is in the ParameterDict, return its value. If not, insert with a parameter and return . defaults to . Args: key (str): key to set default for default (Any): the parameter set to the key",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\container.py",
    "ast_data": "FunctionDef name:setdefault arg:self arg:key arg:default arguments arg arg arg If Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "getsource",
    "source_code": "def getsource(object):\n    return _inspect.getsource(tf_decorator.unwrap(object)[1])",
    "docstring": "TFDecorator-aware replacement for inspect.getsource.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\tf_inspect.py",
    "ast_data": "FunctionDef name:getsource arg:object arguments arg Return return:yes Call Call"
  },
  {
    "library": "scrapy",
    "name": "HttpAuthMiddleware",
    "source_code": "class HttpAuthMiddleware:\n\n    @classmethod\n    def from_crawler(cls, crawler: Crawler) -> Self:\n        o = cls()\n        crawler.signals.connect(o.spider_opened, signal=signals.spider_opened)\n        return o\n\n    def spider_opened(self, spider: Spider) -> None:\n        usr = getattr(spider, 'http_user', '')\n        pwd = getattr(spider, 'http_pass', '')\n        if usr or pwd:\n            self.auth = basic_auth_header(usr, pwd)\n            self.domain = spider.http_auth_domain\n\n    def process_request(self, request: Request, spider: Spider) -> Request | Response | None:\n        auth = getattr(self, 'auth', None)\n        if auth and b'Authorization' not in request.headers and (not self.domain or url_is_from_any_domain(request.url, [self.domain])):\n            request.headers[b'Authorization'] = auth\n        return None",
    "docstring": "Set Basic HTTP Authorization header (http_user and http_pass spider class attributes)",
    "type": "class",
    "file_path": "scrapy\\scrapy\\downloadermiddlewares\\httpauth.py",
    "ast_data": "ClassDef name:HttpAuthMiddleware FunctionDef name:from_crawler arg:cls arg:crawler arguments arg arg Assign Call Call Return return:yes FunctionDef name:spider_opened arg:self arg:spider arguments arg arg Assign Call Assign Call If BoolOp Assign Call Assign FunctionDef name:process_request arg:self arg:request arg:spider arguments arg arg arg Assign Call If BoolOp Compare BoolOp Call Assign Return return:no"
  },
  {
    "library": "scikit-learn",
    "name": "_transform_indicator",
    "source_code": "def _transform_indicator(self, X):\n    if self.add_indicator:\n        if not hasattr(self, 'indicator_'):\n            raise ValueError('Make sure to call _fit_indicator before _transform_indicator')\n        return self.indicator_.transform(X)",
    "docstring": "Compute the indicator mask.' Note that X must be the original data as passed to the imputer before any imputation, since imputation may be done inplace in some cases.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\impute\\_base.py",
    "ast_data": "FunctionDef name:_transform_indicator arg:self arg:X arguments arg arg If If Call Raise Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_codegen_partitions",
    "source_code": "def _codegen_partitions(self) -> None:\n    partitions, signatures = self.graph_partition()\n    for partition, signature in zip(partitions, signatures):\n        assert len(partition) >= 1, f'Each partition must have at least one node but found {len(partition)}'\n        if signature.skip_cudagraph:\n            self._codegen(partition)\n        else:\n            self._codegen_partition_wrapper(partition, signature)\n    num_partitions = next(self._graph_partition_counter)\n    V.graph.wrapper_code.set_all_partition_names(num_partitions)\n    if num_partitions > 0:\n        assert V.graph.partition_maps is not None\n        assert num_partitions == len(V.graph.partition_maps), f'Expect {num_partitions} partition maps but got {len(V.graph.partition_maps)}'",
    "docstring": "Split nodes into partitions and codegen each partition into separate functions. This allows further applying different optimizations (e.g., cudagraph) to each function.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:_codegen_partitions arg:self arguments arg Assign Call For Call Compare Call Call If Call Call Assign Call Call If Compare Compare Compare Call Call"
  },
  {
    "library": "django",
    "name": "is_3d",
    "source_code": "@property\ndef is_3d(self):\n    return capi.is_3d(self.ptr)",
    "docstring": "Return True if the geometry has Z coordinates.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:is_3d arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_load_checkpoint_save_and_restore_functions",
    "source_code": "def _load_checkpoint_save_and_restore_functions(self):\n    temp_session = [None]\n    for node_id, proto in self._iter_all_nodes():\n        node = self.get(node_id)\n        if proto.saveable_objects.keys() == {trackable_utils.SERIALIZE_TO_TENSORS_NAME}:\n            assert len(proto.saveable_objects) == 1\n            saveable_object_proto = next(iter(proto.saveable_objects.values()))\n            save_fn_id = saveable_object_proto.save_function\n            restore_fn_id = saveable_object_proto.restore_function\n            node._serialize_to_tensors = self.get(save_fn_id)\n            node._restore_from_tensors = self.get(restore_fn_id)\n        else:\n            saveable_fn_by_name = {}\n            for name, saveable_object_proto in proto.saveable_objects.items():\n                save_fn_id = saveable_object_proto.save_function\n                restore_fn_id = saveable_object_proto.restore_function\n                saveable_fn_by_name[name] = (self.get(save_fn_id), self.get(restore_fn_id))\n            node._self_saveable_object_factories = saveable_object_util.recreate_saveable_objects(saveable_fn_by_name, temp_session)",
    "docstring": "Restores the checkpoint-related save/restore functions to all nodes.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\load.py",
    "ast_data": "FunctionDef name:_load_checkpoint_save_and_restore_functions arg:self arguments arg Assign For Call Assign Call If Compare Call Compare Call Assign Call Call Call Assign Assign Assign Call Assign Call Assign For Call Assign Assign Assign Call Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "treespec_loads",
    "source_code": "@functools.lru_cache\ndef treespec_loads(serialized: str) -> TreeSpec:\n    orig_treespec = python_pytree.treespec_loads(serialized)\n    dummy_tree = python_pytree.tree_unflatten([0] * orig_treespec.num_leaves, orig_treespec)\n    treespec = tree_structure(dummy_tree)\n    return treespec",
    "docstring": "Deserialize a treespec from a JSON string.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_cxx_pytree.py",
    "ast_data": "FunctionDef name:treespec_loads arg:serialized arguments arg Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "spdiags",
    "source_code": "def spdiags(data, diags, m=None, n=None, format=None):\n    if m is None and n is None:\n        m = n = len(data[0])\n    elif n is None:\n        m, n = m\n    return dia_matrix((data, diags), shape=(m, n)).asformat(format)",
    "docstring": "Return a sparse matrix from diagonals. .. warning:: This function returns a sparse matrix -- not a sparse array. You are encouraged to use to take advantage of the sparse array functionality. (See Notes below.) Parameters ---------- data : array_like Matrix diagonals stored row-wise diags : sequence of int or an int Diagonals to set: * k = 0 the main diagonal * k > 0 the kth upper diagonal * k >> import numpy as np >>> from scipy.sparse import spdiags >>> data = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) >>> diags = np.array([0, -1, 2]) >>> spdiags(data, diags, 4, 4).toarray() array([[1, 0, 3, 0], [1, 2, 0, 4], [0, 2, 3, 0], [0, 0, 3, 4]])",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\_construct.py",
    "ast_data": "FunctionDef name:spdiags arg:data arg:diags arg:m arg:n arg:format arguments arg arg arg arg arg If BoolOp Compare Compare Assign Call If Compare Assign Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_swap",
    "source_code": "@staticmethod\ndef _swap(x):\n    return x",
    "docstring": "swap the members of x if this is a column-oriented matrix",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_csr.py",
    "ast_data": "FunctionDef name:_swap arg:x arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "report_uninitialized_resources",
    "source_code": "def report_uninitialized_resources(resource_list=None, name='report_uninitialized_resources'):\n    if resource_list is None:\n        resource_list = shared_resources() + local_resources()\n    with ops.name_scope(name):\n        local_device = os.environ.get('TF_DEVICE_FOR_UNINITIALIZED_VARIABLE_REPORTING', '/cpu:0')\n        with ops.device(local_device):\n            if not resource_list:\n                return array_ops.constant([], dtype=dtypes.string)\n            variables_mask = math_ops.logical_not(array_ops_stack.stack([r.is_initialized for r in resource_list]))\n            variable_names_tensor = array_ops.constant([s.handle.name for s in resource_list])\n            return array_ops.boolean_mask(variable_names_tensor, variables_mask)",
    "docstring": "Returns the names of all uninitialized resources in resource_list. If the returned tensor is empty then all resources have been initialized. Args: resource_list: resources to check. If None, will use shared_resources() + local_resources(). name: name for the resource-checking op. Returns: Tensor containing names of the handles of all resources which have not yet been initialized.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resources.py",
    "ast_data": "FunctionDef name:report_uninitialized_resources arg:resource_list arg:name arguments arg arg If Compare Assign Call Call With Call Assign Call With Call If Return return:yes Call Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "tolist",
    "source_code": "def tolist(self) -> list:\n    if self.ndim > 1:\n        return [x.tolist() for x in self]\n    return list(self)",
    "docstring": "Return a list of the values. These are each a scalar type, which is a Python scalar (for str, int, float) or a pandas scalar (for Timestamp/Timedelta/Interval/Period) Returns ------- list Python list of values in array. See Also -------- Index.to_list: Return a list of the values in the Index. Series.to_list: Return a list of the values in the Series. Examples -------- >>> arr = pd.array([1, 2, 3]) >>> arr.tolist() [1, 2, 3]",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\base.py",
    "ast_data": "FunctionDef name:tolist arg:self arguments arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_set_level",
    "source_code": "def _set_level(self, value=None):\n    if value is None and hasattr(self, '_level'):\n        del self._level\n    else:\n        self._level = int(value)",
    "docstring": "Set a custom minimum recorded level. If set to `` method).",
    "type": "method",
    "file_path": "django\\django\\contrib\\messages\\storage\\base.py",
    "ast_data": "FunctionDef name:_set_level arg:self arg:value arguments arg arg If BoolOp Compare Call Assign Call"
  },
  {
    "library": "sphinx",
    "name": "Program",
    "source_code": "class Program(SphinxDirective):\n    has_content = False\n    required_arguments = 1\n    optional_arguments = 0\n    final_argument_whitespace = True\n    option_spec: ClassVar[OptionSpec] = {}\n\n    def run(self) -> list[Node]:\n        program = ws_re.sub('-', self.arguments[0].strip())\n        if program == 'None':\n            self.env.ref_context.pop('std:program', None)\n        else:\n            self.env.ref_context['std:program'] = program\n        return []",
    "docstring": "Directive to name the program for which options are documented.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\domains\\std\\__init__.py",
    "ast_data": "ClassDef name:Program Assign Assign Assign Assign FunctionDef name:run arg:self arguments arg Assign Call Call If Compare Call Assign Return return:no"
  },
  {
    "library": "pytorch",
    "name": "_ModificationType",
    "source_code": "class _ModificationType(Enum):\n    SCORE_MOD = 1\n    MASK_MOD = 2\n    UNKNOWN = 3",
    "docstring": "Enum for the type of modification function. - SCORE_MOD: score_mod function which accepts a score as the first argument - mask_mod: mask function which does not accept a score and is only used for generating block mask",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\attention\\flex_attention.py",
    "ast_data": "ClassDef name:_ModificationType Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_global_distribute_strategy_scope",
    "source_code": "@property\ndef _global_distribute_strategy_scope(self):\n    if not hasattr(self._thread_local, 'distribute_strategy_scope'):\n        self._thread_local.distribute_strategy_scope = None\n    return self._thread_local.distribute_strategy_scope",
    "docstring": "For implementing .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_global_distribute_strategy_scope arg:self arguments arg If Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_create_serialize_to_tensor_saveable",
    "source_code": "def _create_serialize_to_tensor_saveable(self, saveable_factories):\n    suffix = saveable_compat.get_saveable_name(self.trackable) or ''\n    saveable_name = _extract_saveable_name(self.object_proto.attributes[0].checkpoint_key) + suffix\n    if not context.executing_eagerly():\n        existing_op = self._checkpoint.restore_ops_by_name.get(saveable_name, None)\n        if existing_op is not None:\n            return ([existing_op], {})\n        saveables_cache = self._checkpoint.saveables_cache.setdefault(self.trackable, {})\n        if saveable_name in saveables_cache:\n            return ([], {saveable_name: saveables_cache[saveable_name]})\n    saveable = saveable_factories[trackable_utils.SERIALIZE_TO_TENSORS_NAME](name=saveable_name)\n    if not context.executing_eagerly():\n        saveables_cache[saveable_name] = saveable\n    return ([], {saveable_name: saveable})",
    "docstring": "Creates a saveable using the _serialize_to_tensor method.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\restore.py",
    "ast_data": "FunctionDef name:_create_serialize_to_tensor_saveable arg:self arg:saveable_factories arguments arg arg Assign BoolOp Call Assign Call If Call Assign Call If Compare Return return:yes Assign Call If Compare Return return:yes Assign Call If Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "is_same_shape",
    "source_code": "def is_same_shape(a: Sequence, b: Sequence) -> bool:\n    return tuple(a) == tuple(b)",
    "docstring": "Compares two shapes a and b, returning True if they are the same (their ranks and corresponding lengths match) and False otherwise.",
    "type": "function",
    "file_path": "pytorch\\torch\\_prims_common\\__init__.py",
    "ast_data": "FunctionDef name:is_same_shape arg:a arg:b arguments arg arg Return return:yes Compare Call Call"
  },
  {
    "library": "django",
    "name": "wkt",
    "source_code": "@property\ndef wkt(self):\n    return 'POLYGON((%s %s,%s %s,%s %s,%s %s,%s %s))' % (self.min_x, self.min_y, self.min_x, self.max_y, self.max_x, self.max_y, self.max_x, self.min_y, self.min_x, self.min_y)",
    "docstring": "Return WKT representing a Polygon for this envelope.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\envelope.py",
    "ast_data": "FunctionDef name:wkt arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_WrapperFunction",
    "source_code": "class _WrapperFunction(function.ConcreteFunction):\n\n    def __init__(self, concrete_function):\n        self.__dict__.update(vars(concrete_function))\n\n    def _call_flat(self, args, captured_inputs):\n\n        def get_handle(x):\n            return x.handle if distribute_utils.is_distributed_variable(x) else x\n\n        def get_unused_handle(x):\n            return _unused_handle() if distribute_utils.is_distributed_variable(x) else x\n        if distribute_lib.get_replica_context() is not None or values_util.is_saving_non_distributed():\n            captured_inputs = list(map(get_handle, captured_inputs))\n        else:\n            captured_inputs = list(map(get_unused_handle, captured_inputs))\n        return super()._call_flat(args, captured_inputs)",
    "docstring": "A class wraps a concrete function to handle different distributed contexts. The reason for wrapping a concrete function is because the _captured_inputs fields used for in-replica context and cross-replica context are different. When is called from within a tf.distribute.strategy scope, the captured inputs are distributed variables. When using these distributed variables during calling the function, we need different approaches when it is in-replica and when it is not in-replica. When it is in replica, naturally we should use the corresponding component of the distributed variable; when it is not in-replica, calling the function should mean that it is constructing a graph that is not actually going to be used. A typical use case is when constructing a functional model. In this case, return a placeholder with a control dependency to ensure that is never accessed.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\load.py",
    "ast_data": "ClassDef name:_WrapperFunction FunctionDef name:__init__ arg:self arg:concrete_function arguments arg arg Call Call FunctionDef name:_call_flat arg:self arg:args arg:captured_inputs arguments arg arg arg FunctionDef name:get_handle arg:x arguments arg Return return:yes Call FunctionDef name:get_unused_handle arg:x arguments arg Return return:yes Call Call If BoolOp Compare Call Call Assign Call Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "chebval2d",
    "source_code": "def chebval2d(x, y, c):\n    return pu._valnd(chebval, c, x, y)",
    "docstring": "Evaluate a 2-D Chebyshev series at points (x, y). This function returns the values: .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * T_i(x) * T_j(y) The parameters and are converted to arrays only if they are tuples or a lists, otherwise they are treated as a scalars and they must have the same shape after conversion. In either case, either and or their elements must support multiplication and addition both with themselves and with the elements of . If is a 1-D array a one is implicitly appended to its shape to make it 2-D. The shape of the result will be c.shape[2:] + x.shape. Parameters ---------- x, y : array_like, compatible objects The two dimensional series is evaluated at the points `xyxycxy`. See Also -------- chebval, chebgrid2d, chebval3d, chebgrid3d",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\chebyshev.py",
    "ast_data": "FunctionDef name:chebval2d arg:x arg:y arg:c arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "is_array_api_obj",
    "source_code": "def is_array_api_obj(x: object) -> TypeIs[_ArrayApiObj]:\n    return hasattr(x, '__array_namespace__') or _is_array_api_cls(cast(Hashable, type(x)))",
    "docstring": "Return True if is an array API compatible array object. See Also -------- array_namespace is_numpy_array is_cupy_array is_torch_array is_ndonnx_array is_dask_array is_jax_array",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\common\\_helpers.py",
    "ast_data": "FunctionDef name:is_array_api_obj arg:x arguments arg Return return:yes BoolOp Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "build_ring_all_reduce",
    "source_code": "def build_ring_all_reduce(input_tensors, num_workers, num_subchunks, gpu_perm, red_op, un_op=None):\n    if len(input_tensors) < 2:\n        raise ValueError('input_tensors must be length 2 or longer')\n    input_tensors, shape = _flatten_tensors(input_tensors)\n    devices = [t.device for t in input_tensors]\n    pred_by_s_d, rank_by_s_d = _ring_permutations(num_workers, num_subchunks, gpu_perm)\n    chunks_by_dev, pad_len = _build_ring_gather(input_tensors, devices, num_subchunks, pred_by_s_d, rank_by_s_d, red_op)\n    if un_op:\n        chunks_by_dev = _apply_unary_to_chunks(un_op, chunks_by_dev)\n    output_tensors = _build_ring_scatter(pred_by_s_d, rank_by_s_d, chunks_by_dev)\n    if pad_len > 0:\n        output_tensors = _strip_padding(output_tensors, pad_len)\n    if len(shape) != 1:\n        output_tensors = _reshape_tensors(output_tensors, shape)\n    return output_tensors",
    "docstring": "Construct a subgraph performing a ring-style all-reduce of input_tensors. Args: input_tensors: a list of objects, which must all have the same shape and type. num_workers: number of worker tasks spanned by input_tensors. num_subchunks: number of subchunks each device should process in one tick. gpu_perm: a list of ints giving a ring-wise rank ordering of GPUs at each worker. All workers must have the same number of GPUs with the same rank ordering. If NVLINK is available, this should be a ring order supported by NVLINK edges. red_op: a binary operator for elementwise reduction. un_op: an optional unary operator to apply to fully reduced values. Raises: ValueError: empty input_tensors or they don't all have same size. Returns: a list of identical sum-reductions of input_tensors.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\all_reduce.py",
    "ast_data": "FunctionDef name:build_ring_all_reduce arg:input_tensors arg:num_workers arg:num_subchunks arg:gpu_perm arg:red_op arg:un_op arguments arg arg arg arg arg arg If Compare Call Raise Call Assign Call Assign Assign Call Assign Call If Assign Call Assign Call If Compare Assign Call If Compare Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "__init__",
    "source_code": "def __init__(self, dim=None, seed=None):\n    self._dist = unitary_group_gen(seed)\n    self.dim = self._dist._process_parameters(dim)",
    "docstring": "Create a frozen (U(N)) n-dimensional unitary matrix distribution. Parameters ---------- dim : scalar Dimension of matrices seed : {None, int, , }, optional If is None (or ), the singleton is used. If is an int, a new `seedseed` instance then that instance is used. Examples -------- >>> from scipy.stats import unitary_group >>> x = unitary_group(3) >>> x.rvs()",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:dim arg:seed arguments arg arg arg Assign Call Assign Call"
  },
  {
    "library": "django",
    "name": "__init__",
    "source_code": "def __init__(self, dr_input):\n    if isinstance(dr_input, str):\n        self.ensure_registered()\n        if dr_input.lower() in self._alias:\n            name = self._alias[dr_input.lower()]\n        else:\n            name = dr_input\n        driver = c_void_p(capi.get_driver_by_name(force_bytes(name)))\n    elif isinstance(dr_input, int):\n        self.ensure_registered()\n        driver = capi.get_driver(dr_input)\n    elif isinstance(dr_input, c_void_p):\n        driver = dr_input\n    else:\n        raise GDALException('Unrecognized input type for GDAL/OGR Driver: %s' % type(dr_input))\n    if not driver:\n        raise GDALException('Could not initialize GDAL/OGR Driver on input: %s' % dr_input)\n    self.ptr = driver",
    "docstring": "Initialize an GDAL/OGR driver on either a string or integer input.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\driver.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:dr_input arguments arg arg If Call Call If Compare Call Assign Call Assign Assign Call Call Call If Call Call Assign Call If Call Assign Raise Call Call If Raise Call Assign"
  },
  {
    "library": "pygame",
    "name": "wait",
    "source_code": "def wait(self):\n    self.queue.join()",
    "docstring": "waits until all tasks are complete.",
    "type": "method",
    "file_path": "pygame\\src_py\\threads\\__init__.py",
    "ast_data": "FunctionDef name:wait arg:self arguments arg Call"
  },
  {
    "library": "matplotlib",
    "name": "remove",
    "source_code": "def remove(self, a):\n    self._mapping.pop(a, {a}).remove(a)\n    self._ordering.pop(a, None)",
    "docstring": "Remove *a* from the grouper, doing nothing if it is not there.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\cbook.py",
    "ast_data": "FunctionDef name:remove arg:self arg:a arguments arg arg Call Call Call"
  },
  {
    "library": "pygame",
    "name": "MidiException",
    "source_code": "class MidiException(Exception):\n\n    def __init__(self, value):\n        super().__init__(value)\n        self.parameter = value\n\n    def __str__(self):\n        return repr(self.parameter)",
    "docstring": "exception that pygame.midi functions and classes can raise MidiException(errno)",
    "type": "class",
    "file_path": "pygame\\src_py\\midi.py",
    "ast_data": "ClassDef name:MidiException FunctionDef name:__init__ arg:self arg:value arguments arg arg Call Call Assign FunctionDef name:__str__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "update",
    "source_code": "def update(self):\n    if not self.ax.get_visible() or self.ax.get_figure(root=True)._get_renderer() is None:\n        return\n    if self.useblit:\n        if self.background is not None:\n            self.canvas.restore_region(self.background)\n        else:\n            self.update_background(None)\n        artists = sorted(self.artists + self._get_animated_artists(), key=lambda a: a.get_zorder())\n        for artist in artists:\n            self.ax.draw_artist(artist)\n        self.canvas.blit(self.ax.bbox)\n    else:\n        self.canvas.draw_idle()",
    "docstring": "Draw using blit() or draw_idle(), depending on ``.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:update arg:self arguments arg If BoolOp Call Compare Call Call Return return:no If If Compare Call Call Assign Call Call arguments arg Call For Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "meta_binop_inplace_alpha",
    "source_code": "@register_meta([aten.add_.Scalar, aten.sub_.Scalar, aten.add_.Tensor, aten.sub_.Tensor])\ndef meta_binop_inplace_alpha(self, other, alpha=1):\n\n    def is_integeric(arg):\n        if isinstance(arg, TensorLike):\n            return utils.is_integer_dtype(arg.dtype)\n        else:\n            return isinstance(arg, IntLike)\n\n    def is_floatic(arg):\n        if isinstance(arg, TensorLike):\n            return utils.is_float_dtype(arg.dtype)\n        else:\n            return isinstance(arg, FloatLike)\n\n    def is_booleanic(arg):\n        if isinstance(arg, TensorLike):\n            return utils.is_boolean_dtype(arg.dtype)\n        else:\n            return isinstance(arg, BoolLike)\n    if is_integeric(self) and is_floatic(other):\n        raise RuntimeError('Promotion of int.add/sub_(float) in in-place ops are not possible due to element size change.')\n    if is_booleanic(self) and (not is_booleanic(other)):\n        raise RuntimeError('Promotion of book.add/sub_(others) in in-place ops are not possible due to element size change.')\n    if isinstance(other, torch.Tensor):\n        check_inplace_broadcast(self.shape, other.shape)\n    return self",
    "docstring": "Some checks for inplace ops. Checks for promotion rules for some dtypes. int.add/sub_(float) and bool.add/sub_(others) are rejected. Promoting in these in-place operations would require reallocating and copying over elements, hence not allowed. Checks for alpha param.",
    "type": "function",
    "file_path": "pytorch\\torch\\_meta_registrations.py",
    "ast_data": "FunctionDef name:meta_binop_inplace_alpha arg:self arg:other arg:alpha arguments arg arg arg FunctionDef name:is_integeric arg:arg arguments arg If Call Return return:yes Call Return return:yes Call FunctionDef name:is_floatic arg:arg arguments arg If Call Return return:yes Call Return return:yes Call FunctionDef name:is_booleanic arg:arg arguments arg If Call Return return:yes Call Return return:yes Call If BoolOp Call Call Raise Call If BoolOp Call Call Raise Call If Call Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "visualize",
    "source_code": "def visualize(self, image: Tensor, show_trajectories: bool=True) -> Tensor:\n    frame_raw = (tensor_to_image(image) * 255).astype(np.uint8)\n    self.tracker.plot_results(frame_raw, show_trajectories=show_trajectories)\n    return tensor(frame_raw).permute(2, 0, 1)",
    "docstring": "Visualize the results of the tracker. Args: image: The input image. show_trajectories: Whether to show the trajectories. Returns: The image with the results of the tracker.",
    "type": "method",
    "file_path": "kornia\\kornia\\models\\tracking\\boxmot_tracker.py",
    "ast_data": "FunctionDef name:visualize arg:self arg:image arg:show_trajectories arguments arg arg arg Assign Call Call Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "decompress",
    "source_code": "def decompress(self, value):\n    raise NotImplementedError('Subclasses must implement this method.')",
    "docstring": "Return a list of decompressed values for the given compressed value. The given value can be assumed to be valid, but not necessarily non-empty.",
    "type": "method",
    "file_path": "django\\django\\forms\\widgets.py",
    "ast_data": "FunctionDef name:decompress arg:self arg:value arguments arg arg Raise Call"
  },
  {
    "library": "pandas",
    "name": "_get_sys_info",
    "source_code": "def _get_sys_info() -> dict[str, JSONSerializable]:\n    uname_result = platform.uname()\n    language_code, encoding = locale.getlocale()\n    return {'commit': _get_commit_hash(), 'python': platform.python_version(), 'python-bits': struct.calcsize('P') * 8, 'OS': uname_result.system, 'OS-release': uname_result.release, 'Version': uname_result.version, 'machine': uname_result.machine, 'processor': uname_result.processor, 'byteorder': sys.byteorder, 'LC_ALL': os.environ.get('LC_ALL'), 'LANG': os.environ.get('LANG'), 'LOCALE': {'language-code': language_code, 'encoding': encoding}}",
    "docstring": "Returns system information as a JSON serializable dictionary.",
    "type": "function",
    "file_path": "pandas\\pandas\\util\\_print_versions.py",
    "ast_data": "FunctionDef name:_get_sys_info arguments Assign Call Assign Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "django",
    "name": "_load_serializers",
    "source_code": "def _load_serializers():\n    global _serializers\n    serializers = {}\n    for format in BUILTIN_SERIALIZERS:\n        register_serializer(format, BUILTIN_SERIALIZERS[format], serializers)\n    if hasattr(settings, 'SERIALIZATION_MODULES'):\n        for format in settings.SERIALIZATION_MODULES:\n            register_serializer(format, settings.SERIALIZATION_MODULES[format], serializers)\n    _serializers = serializers",
    "docstring": "Register built-in and settings-defined serializers. This is done lazily so that user code has a chance to (e.g.) set up custom settings without needing to be careful of import order.",
    "type": "function",
    "file_path": "django\\django\\core\\serializers\\__init__.py",
    "ast_data": "FunctionDef name:_load_serializers arguments Assign For Call If Call For Call Assign"
  },
  {
    "library": "pytorch",
    "name": "remove_dims",
    "source_code": "def remove_dims(it):\n    return [item for item, is_removable in zip(it, removable_dims) if not is_removable]",
    "docstring": "Removes any broadcasting or singleton dims from a given sequence",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py",
    "ast_data": "FunctionDef name:remove_dims arg:it arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "broadcast_dynamic_shape",
    "source_code": "@tf_export('broadcast_dynamic_shape')\n@dispatch.add_dispatch_support\ndef broadcast_dynamic_shape(shape_x, shape_y):\n    return gen_array_ops.broadcast_args(shape_x, shape_y)",
    "docstring": "Computes the shape of a broadcast given symbolic shapes. When and are Tensors representing shapes (i.e. the result of calling tf.shape on another Tensor) this computes a Tensor which is the shape of the result of a broadcasting op applied in tensors of shapes and . This is useful when validating the result of a broadcasting operation when the tensors do not have statically known shapes. Example: >>> shape_x = (1, 2, 3) >>> shape_y = (5, 1, 3) >>> tf.broadcast_dynamic_shape(shape_x, shape_y) Args: shape_x: A rank 1 integer , representing the shape of x. shape_y: A rank 1 integer , representing the shape of y. Returns: A rank 1 integer representing the broadcasted shape. Raises: InvalidArgumentError: If the two shapes are incompatible for broadcasting.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:broadcast_dynamic_shape arg:shape_x arg:shape_y arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "codes",
    "source_code": "@property\ndef codes(self):\n    return self._codes",
    "docstring": "Return the codes",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\textpath.py",
    "ast_data": "FunctionDef name:codes arg:self arguments arg Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "_process_docstring",
    "source_code": "def _process_docstring(app: Sphinx, what: str, name: str, obj: Any, options: Any, lines: list[str]) -> None:\n    result_lines = lines\n    docstring: GoogleDocstring\n    if app.config.napoleon_numpy_docstring:\n        docstring = NumpyDocstring(result_lines, app.config, app, what, name, obj, options)\n        result_lines = docstring.lines()\n    if app.config.napoleon_google_docstring:\n        docstring = GoogleDocstring(result_lines, app.config, app, what, name, obj, options)\n        result_lines = docstring.lines()\n    lines[:] = result_lines.copy()",
    "docstring": "Process the docstring for a given python object. Called when autodoc has read and processed a docstring. is a list of docstring lines that modifies in place to change what Sphinx outputs. The following settings in conf.py control what styles of docstrings will be parsed: * `lines` is modified *in place*",
    "type": "function",
    "file_path": "sphinx\\sphinx\\ext\\napoleon\\__init__.py",
    "ast_data": "FunctionDef name:_process_docstring arg:app arg:what arg:name arg:obj arg:options arg:lines arguments arg arg arg arg arg arg Assign If Assign Call Assign Call If Assign Call Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "to_tensors",
    "source_code": "@doc_controls.do_not_doc_inheritable\ndef to_tensors(self, value):\n    return super().to_tensors(value)",
    "docstring": "See tf.types.experimental.TraceType base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:to_tensors arg:self arg:value arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "ParallelDepth",
    "source_code": "@dataclasses.dataclass\nclass ParallelDepth:\n    parallel_depth: int\n    start_depth: int",
    "docstring": "A class representing parallel depth. Includes the starting depth of parallelism and the depth of parallelism.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp.py",
    "ast_data": "ClassDef name:ParallelDepth"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, session_id, timeout):\n    self.session_id = session_id\n    if timeout:\n        self.timer = Timer.after(timeout)\n    else:\n        self.timer = NeverExpires()",
    "docstring": "Initialize a lock acquisition tracker.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\locking.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:session_id arg:timeout arguments arg arg arg Assign If Assign Call Assign Call"
  },
  {
    "library": "scikit-learn",
    "name": "_staged_raw_predict",
    "source_code": "def _staged_raw_predict(self, X, check_input=True):\n    if check_input:\n        X = validate_data(self, X, dtype=DTYPE, order='C', accept_sparse='csr', reset=False)\n    raw_predictions = self._raw_predict_init(X)\n    for i in range(self.estimators_.shape[0]):\n        predict_stage(self.estimators_, i, X, self.learning_rate, raw_predictions)\n        yield raw_predictions.copy()",
    "docstring": "Compute raw predictions of `classes_`.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_gb.py",
    "ast_data": "FunctionDef name:_staged_raw_predict arg:self arg:X arg:check_input arguments arg arg arg If Assign Call Assign Call For Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_ZerosLikeV1",
    "source_code": "def _ZerosLikeV1(op, index):\n    val = op.outputs[index]\n    op_ctxt = op._get_control_flow_context()\n    if op_ctxt:\n        pred = op_ctxt.pred\n        branch = op_ctxt.branch\n        switch_val = control_flow_ops.switch(op.inputs[0], pred)[1 - branch]\n        pivot = array_ops.identity(switch_val)\n        if val.dtype == dtypes.resource:\n            with ops.control_dependencies([pivot]):\n                return array_ops.zeros(gen_resource_variable_ops.variable_shape(switch_val), dtype=default_gradient.get_zeros_dtype(val))\n        zeros_shape = array_ops.shape_internal(switch_val, optimize=False)\n        with ops.control_dependencies([pivot]):\n            return array_ops.zeros(zeros_shape, dtype=val.dtype)\n    else:\n        return array_ops.zeros_like(val, optimize=False)",
    "docstring": "Branch of ZerosLike for TF1.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_state.py",
    "ast_data": "FunctionDef name:_ZerosLikeV1 arg:op arg:index arguments arg arg Assign Assign Call If Assign Assign Assign Call Assign Call If Compare With Call Return return:yes Call Call Call Assign Call With Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_make_hyp2f1_test_case",
    "source_code": "def _make_hyp2f1_test_case(a, b, c, z, rtol):\n    expected = mp_hyp2f1(a, b, c, z)\n    return f'    pytest.param(\\n        Hyp2f1TestCase(\\n            a={a},\\n            b={b},\\n            c={c},\\n            z={z},\\n            expected={expected},\\n            rtol={rtol},\\n        ),\\n    ),'",
    "docstring": "Generate string for single test case as used in test_hyp2f1.py.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_precompute\\hyp2f1_data.py",
    "ast_data": "FunctionDef name:_make_hyp2f1_test_case arg:a arg:b arg:c arg:z arg:rtol arguments arg arg arg arg arg Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "time_mul_inv",
    "source_code": "def time_mul_inv(self, num_rotations):\n    self.rotations * self.rotations.inv()",
    "docstring": "Time multiplication and inverse of rotations",
    "type": "method",
    "file_path": "scipy\\benchmarks\\benchmarks\\spatial.py",
    "ast_data": "FunctionDef name:time_mul_inv arg:self arg:num_rotations arguments arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "get_identical_regions",
    "source_code": "def get_identical_regions(self, graph: torch.fx.Graph) -> list[list[Region]]:\n    topological_ranking = {node: i for i, node in enumerate(graph.nodes)}\n    region_groups_with_rank = []\n    node_to_recursive_ancestors = _populate_recursive_ancestor_map(graph)\n    for group in self.hash_to_duplicates.values():\n        if len(group) > 1:\n            region_group = []\n            min_rank = math.inf\n            for node in group:\n                if node in topological_ranking:\n                    min_rank = min(min_rank, topological_ranking[node])\n                    region_group.append([node])\n            if len(region_group) > 1:\n                region_groups_with_rank.append((region_group, min_rank))\n    region_groups_with_rank.sort(key=lambda rg: -rg[1])\n    region_groups = [rg for rg, _ in region_groups_with_rank]\n    seen_nodes: set[Node] = set()\n    for region_group in region_groups:\n        fully_expand_region_group(region_group, seen_nodes, node_to_recursive_ancestors, self._is_identical)\n        for region in region_group:\n            region.sort(key=lambda n: topological_ranking[n])\n    return [region_group for region_group in region_groups if len(region_group[0]) > 1]",
    "docstring": "This function is responsible for extracting the largest regions of identical nodes from the given graph. **Note**: This function assumes the nodes that have been tracked with track_node are in the provided graph argument. The algorithm proceeds as follows: The nodes tracked via track_node above are organized into region groups. The initial region groups look like this: [[IdenticalNode1], [IdenticalNode2], [IdenticalNode3]] and each sublist is called a region. For each region group (starting at the topologically latest region group), the inner regions are gradually expanded one node at time from the flattened args and kwargs of the node in each region provided that for all regions in the group, the nodes being added are also identical (ie have the same key computed by track_node). This is checked by verifying that the two nodes have the same identical node list in node_to_duplicates.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\graph_region_tracker.py",
    "ast_data": "FunctionDef name:get_identical_regions arg:self arg:graph arguments arg arg Assign Call Assign Assign Call For Call If Compare Call Assign Assign For If Compare Assign Call Call If Compare Call Call Call arguments arg Assign Call For Call For Call arguments arg Return return:yes Compare Call"
  },
  {
    "library": "numpy",
    "name": "addRoutine",
    "source_code": "def addRoutine(self, rname):\n    self.getRoutine(rname)",
    "docstring": "Add a routine to the library.",
    "type": "method",
    "file_path": "numpy\\numpy\\linalg\\lapack_lite\\make_lite.py",
    "ast_data": "FunctionDef name:addRoutine arg:self arg:rname arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "node_device",
    "source_code": "def node_device(self, node_name):\n    if not self._debug_graphs:\n        raise LookupError('Node devices are not loaded from partition graphs yet.')\n    if node_name not in self._node_devices:\n        raise ValueError(\"Node '%s' does not exist in partition graphs.\" % node_name)\n    output = list(self._node_devices[node_name])\n    return output[0] if len(output) == 1 else output",
    "docstring": "Get the names of the devices that has nodes of the specified name. Args: node_name: () name of the node. Returns: ( or of ) name of the device(s) on which the node of the given name is found. Returns a if there is only one such device, otherwise return a of . Raises: LookupError: If node inputs and control inputs have not been loaded from partition graphs yet. ValueError: If the node does not exist in partition graphs.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:node_device arg:self arg:node_name arguments arg arg If Raise Call If Compare Raise Call Assign Call Return return:yes Compare Call"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, extensions: Optional[Sequence[StreamTransformExtension]]=None) -> None:\n    self.extensions = () if extensions is None else extensions",
    "docstring": "If the extensions arg is None, this means the implementation should provide whatever defaults it chooses. An empty sequence indicates no extensions should be used. At this time, the default extensions sequence is empty.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\filesystem.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:extensions arguments arg arg Assign Compare"
  },
  {
    "library": "scipy",
    "name": "find",
    "source_code": "def find(A):\n    A = coo_array(A, copy=True)\n    A.sum_duplicates()\n    nz_mask = A.data != 0\n    return (A.row[nz_mask], A.col[nz_mask], A.data[nz_mask])",
    "docstring": "Return the indices and values of the nonzero elements of a matrix Parameters ---------- A : dense or sparse array or matrix Matrix whose nonzero elements are desired. Returns ------- (I,J,V) : tuple of arrays I,J, and V contain the row indices, column indices, and values of the nonzero entries. Examples -------- >>> from scipy.sparse import csr_array, find >>> A = csr_array([[7.0, 8.0, 0],[0, 0, 9.0]]) >>> find(A) (array([0, 0, 1], dtype=int32), array([0, 1, 2], dtype=int32), array([ 7., 8., 9.]))",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\_extract.py",
    "ast_data": "FunctionDef name:find arg:A arguments arg Assign Call Call Assign Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "result_type_unary",
    "source_code": "def result_type_unary(a, dtype):\n    if dtype:\n        return result_type(dtype)\n    if isinstance(a, str):\n        return np.str_\n    elif isinstance(a, bytes):\n        return np.bytes_\n    return result_type(a)",
    "docstring": "Find the result type from a single input and a dtype.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_utils.py",
    "ast_data": "FunctionDef name:result_type_unary arg:a arg:dtype arguments arg arg If Return return:yes Call If Call Return return:yes If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_HistogramMseSymmetric",
    "source_code": "@_implements(_CalibrationMethod.CALIBRATION_METHOD_HISTOGRAM_MSE_SYMMETRIC)\nclass _HistogramMseSymmetric(_HistogramCalibrationAlgorithmBase):\n\n    def get_min_max_value(self) -> tuple[float, float]:\n        return self._get_min_max_value_by_expanding_range(self._num_bins // 2)",
    "docstring": "HistogramMseSymmetric for calculating min and max values of calibration result.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\calibrator\\calibration_algorithm.py",
    "ast_data": "ClassDef name:_HistogramMseSymmetric FunctionDef name:get_min_max_value arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "authlib",
    "name": "authorize_access_token",
    "source_code": "def authorize_access_token(self, **kwargs):\n    params = request.args.to_dict(flat=True)\n    state = params.get('oauth_token')\n    if not state:\n        raise OAuthError(description='Missing \"oauth_token\" parameter')\n    data = self.framework.get_state_data(session, state)\n    if not data:\n        raise OAuthError(description='Missing \"request_token\" in temporary data')\n    params['request_token'] = data['request_token']\n    params.update(kwargs)\n    self.framework.clear_state_data(session, state)\n    token = self.fetch_access_token(**params)\n    self.token = token\n    return token",
    "docstring": "Fetch access token in one step. :return: A token dict.",
    "type": "method",
    "file_path": "authlib\\authlib\\integrations\\flask_client\\apps.py",
    "ast_data": "FunctionDef name:authorize_access_token arg:self arguments arg arg Assign Call Assign Call If Raise Call Assign Call If Raise Call Assign Call Call Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_create_fakeparams",
    "source_code": "def _create_fakeparams(func_graph, template_tensors):\n    with func_graph.as_default():\n        return [gen_functional_ops.fake_param(dtype=t.dtype, shape=_convert_dynamic_dimension_to_zero(t.shape)) for t in template_tensors]",
    "docstring": "Creates FakeParams for the XLA case.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\cond_v2.py",
    "ast_data": "FunctionDef name:_create_fakeparams arg:func_graph arg:template_tensors arguments arg arg With Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "blended_transform_factory",
    "source_code": "def blended_transform_factory(x_transform, y_transform):\n    if isinstance(x_transform, Affine2DBase) and isinstance(y_transform, Affine2DBase):\n        return BlendedAffine2D(x_transform, y_transform)\n    return BlendedGenericTransform(x_transform, y_transform)",
    "docstring": "Create a new \"blended\" transform using *x_transform* to transform the *x*-axis and *y_transform* to transform the *y*-axis. A faster version of the blended transform is returned for the case where both child transforms are affine.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:blended_transform_factory arg:x_transform arg:y_transform arguments arg arg If BoolOp Call Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "fit",
    "source_code": "def fit(self, x, fix_mean=None, fix_cov=None):\n    x = np.asarray(x)\n    if x.ndim != 2:\n        raise ValueError('`x` must be two-dimensional.')\n    n_vectors, dim = x.shape\n    if fix_mean is not None:\n        fix_mean = np.atleast_1d(fix_mean)\n        if fix_mean.shape != (dim,):\n            msg = '`fix_mean` must be a one-dimensional array the same length as the dimensionality of the vectors `x`.'\n            raise ValueError(msg)\n        mean = fix_mean\n    else:\n        mean = x.mean(axis=0)\n    if fix_cov is not None:\n        fix_cov = np.atleast_2d(fix_cov)\n        if fix_cov.shape != (dim, dim):\n            msg = '`fix_cov` must be a two-dimensional square array of same side length as the dimensionality of the vectors `x`.'\n            raise ValueError(msg)\n        s, u = scipy.linalg.eigh(fix_cov, lower=True, check_finite=True)\n        eps = _eigvalsh_to_eps(s)\n        if np.min(s) < -eps:\n            msg = '`fix_cov` must be symmetric positive semidefinite.'\n            raise ValueError(msg)\n        cov = fix_cov\n    else:\n        centered_data = x - mean\n        cov = centered_data.T @ centered_data / n_vectors\n    return (mean, cov)",
    "docstring": "Fit a multivariate normal distribution to data. Parameters ---------- x : ndarray (m, n) Data the distribution is fitted to. Must have two axes. The first axis of length represents the number of vectors the distribution is fitted to. The second axis of length determines the dimensionality of the fitted distribution. fix_mean : ndarray(n, ) Fixed mean vector. Must have length . fix_cov: ndarray (n, n) Fixed covariance matrix. Must have shape ``. Returns ------- mean : ndarray (n, ) Maximum likelihood estimate of the mean vector cov : ndarray (n, n) Maximum likelihood estimate of the covariance matrix",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:fit arg:self arg:x arg:fix_mean arg:fix_cov arguments arg arg arg arg Assign Call If Compare Raise Call Assign If Compare Assign Call If Compare Assign Raise Call Assign Assign Call If Compare Assign Call If Compare Assign Raise Call Assign Call Assign Call If Compare Call Assign Raise Call Assign Assign Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit_transform",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit_transform(self, X, y=None):\n    return self._fit(X, y, force_transform=True)",
    "docstring": "Fit to , then transform . Parameters ---------- X : array-like of shape (n_samples, n_features) The data used to estimate the optimal transformation parameters and to be transformed using a power transformation. y : Ignored Not used, present for API consistency by convention. Returns ------- X_new : ndarray of shape (n_samples, n_features) Transformed data.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py",
    "ast_data": "FunctionDef name:fit_transform arg:self arg:X arg:y arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "write_char",
    "source_code": "def write_char(self, arr, codec='ascii'):\n    if arr.size == 0 or np.all(arr == ''):\n        shape = (0,) * np.max([arr.ndim, 2])\n        self.write_header(shape, mxCHAR_CLASS)\n        self.write_smalldata_element(arr, miUTF8, 0)\n        return\n    arr = arr_to_chars(arr)\n    shape = arr.shape\n    self.write_header(shape, mxCHAR_CLASS)\n    if arr.dtype.kind == 'U' and arr.size:\n        n_chars = math.prod(shape)\n        st_arr = np.ndarray(shape=(), dtype=arr_dtype_number(arr, n_chars), buffer=arr.T.copy())\n        st = st_arr.item().encode(codec)\n        arr = np.ndarray(shape=(len(st),), dtype='S1', buffer=st)\n    self.write_element(arr, mdtype=miUTF8)",
    "docstring": "Write string array with given",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\matlab\\_mio5.py",
    "ast_data": "FunctionDef name:write_char arg:self arg:arr arg:codec arguments arg arg arg If BoolOp Compare Call Compare Assign Call Call Call Return return:no Assign Call Assign Call If BoolOp Compare Assign Call Assign Call Call Call Assign Call Call Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_sequential_split_and_maybe_inline_subgraphs_helper",
    "source_code": "def _sequential_split_and_maybe_inline_subgraphs_helper(new_gm: torch.fx.GraphModule, graph_signature: Optional[ExportGraphSignature], maybe_inline_or_replace_with_hop: Callable[[torch.fx.Node], None]) -> tuple[torch.fx.GraphModule, Optional[ExportGraphSignature]]:\n    replace_ctx = contextlib.nullcontext()\n    new_signature = None\n    if graph_signature is not None:\n        new_signature = copy.copy(graph_signature)\n        new_gm_out_node = next(reversed(new_gm.graph.find_nodes(op='output')))\n        assert new_gm_out_node.op == 'output' and len(new_gm_out_node.args[0]) == len(new_signature.output_specs)\n        for arg_node, out_spec in zip(new_gm_out_node.args[0], new_signature.output_specs):\n            if arg_node is None:\n                assert out_spec.arg.value is None\n            elif isinstance(arg_node, torch.fx.Node) and out_spec.arg.name != arg_node.name:\n                out_spec.arg.name = arg_node.name\n        replace_ctx = new_gm._set_replace_hook(new_signature.get_replace_hook())\n    with replace_ctx:\n        nodes_map(list(new_gm.graph.nodes), lambda node: maybe_inline_or_replace_with_hop(node) if node.op == 'call_module' else node)\n    new_gm.recompile()\n    new_gm.graph.lint()\n    return (new_gm, new_signature)",
    "docstring": "Helper function for replacing graph nodse with higher order nodes. For each subgraph in , decides whether to construct a HOO subgraph, or inline the calls back into the parent graph module, depending on .",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\passes\\replace_with_hop_pass_util.py",
    "ast_data": "FunctionDef name:_sequential_split_and_maybe_inline_subgraphs_helper arg:new_gm arg:graph_signature arg:maybe_inline_or_replace_with_hop arguments arg arg arg Assign Call Assign If Compare Assign Call Assign Call Call Call BoolOp Compare Compare Call Call For Call If Compare Compare If BoolOp Call Compare Assign Assign Call Call With Call Call arguments arg Compare Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "size",
    "source_code": "def size(self, name=None):\n    if not self._dynamic_size and self._size is not None:\n        return ops.convert_to_tensor(self._size, dtype=dtypes.int32)\n    else:\n        return list_ops.tensor_list_length(input_handle=self._flow, name=name)",
    "docstring": "See TensorArray.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:size arg:self arg:name arguments arg arg If BoolOp Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_possible_names",
    "source_code": "def _possible_names(self, filename):\n    names = [filename]\n    if not self._iszip(filename):\n        for zipext in _file_openers.keys():\n            if zipext:\n                names.append(filename + zipext)\n    return names",
    "docstring": "Return a tuple containing compressed filename variations.",
    "type": "method",
    "file_path": "numpy\\numpy\\lib\\_datasource.py",
    "ast_data": "FunctionDef name:_possible_names arg:self arg:filename arguments arg arg Assign If Call For Call If Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "unique_consecutive",
    "source_code": "def unique_consecutive(self, return_inverse=False, return_counts=False, dim=None):\n    if has_torch_function_unary(self):\n        return handle_torch_function(Tensor.unique_consecutive, (self,), self, return_inverse=return_inverse, return_counts=return_counts, dim=dim)\n    return torch.unique_consecutive(self, return_inverse=return_inverse, return_counts=return_counts, dim=dim)",
    "docstring": "Eliminates all but the first element from every consecutive group of equivalent elements. See :func:",
    "type": "method",
    "file_path": "pytorch\\torch\\_tensor.py",
    "ast_data": "FunctionDef name:unique_consecutive arg:self arg:return_inverse arg:return_counts arg:dim arguments arg arg arg arg If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "broadcast",
    "source_code": "def broadcast(tensor, src, group=group.WORLD):\n    return _Broadcast.apply(src, group, tensor)",
    "docstring": "Broadcasts the tensor to the whole group. `` is the rank of current process. src (int): Source rank. group (ProcessGroup, optional): The process group to work on. Returns: Tensor: Received tensor from the broadcast op.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\nn\\functional.py",
    "ast_data": "FunctionDef name:broadcast arg:tensor arg:src arg:group arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_higher_type",
    "source_code": "def get_higher_type(a: type, b: type) -> type:\n    a, b = (_maybe_get_pytype(a), _maybe_get_pytype(b))\n    if a not in _ordered_types or b not in _ordered_types:\n        raise RuntimeError(f'Expected builtin numeric types, found {a}, {b}')\n    if a is b:\n        return a\n    for typ in _ordered_types:\n        if a is typ:\n            return b\n        if b is typ:\n            return a\n    raise ValueError('Unknown Python scalar type!')",
    "docstring": "Returns the higher of the two given Number types. The types are ordered bool -> int -> float -> complex.",
    "type": "function",
    "file_path": "pytorch\\torch\\_prims_common\\__init__.py",
    "ast_data": "FunctionDef name:get_higher_type arg:a arg:b arguments arg arg Assign Call Call If BoolOp Compare Compare Raise Call If Compare Return return:yes For If Compare Return return:yes If Compare Return return:yes Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "@deprecation.deprecated('2019-01-01', 'The TensorFlow Distributions library has moved to TensorFlow Probability (https://github.com/tensorflow/probability). You should update all references to use `tfp.distributions` instead of `tf.distributions`.', warn_once=True)\ndef __init__(self, total_count, concentration, validate_args=False, allow_nan_stats=True, name='DirichletMultinomial'):\n    parameters = dict(locals())\n    with ops.name_scope(name, values=[total_count, concentration]) as name:\n        self._total_count = ops.convert_to_tensor(total_count, name='total_count')\n        if validate_args:\n            self._total_count = distribution_util.embed_check_nonnegative_integer_form(self._total_count)\n        self._concentration = self._maybe_assert_valid_concentration(ops.convert_to_tensor(concentration, name='concentration'), validate_args)\n        self._total_concentration = math_ops.reduce_sum(self._concentration, -1)\n    super(DirichletMultinomial, self).__init__(dtype=self._concentration.dtype, validate_args=validate_args, allow_nan_stats=allow_nan_stats, reparameterization_type=distribution.NOT_REPARAMETERIZED, parameters=parameters, graph_parents=[self._total_count, self._concentration], name=name)",
    "docstring": "Initialize a batch of DirichletMultinomial distributions. Args: total_count: Non-negative floating point tensor, whose dtype is the same as . The shape is broadcastable to with . Defines this as a batch of different Dirichlet multinomial distributions. Its components should be equal to integer values. concentration: Positive floating point tensor, whose dtype is the same as with shape broadcastable to . Defines this as a batch of different class Dirichlet multinomial distributions. validate_args: Python , default . When distribution parameters are checked for validity despite possibly degrading runtime performance. When invalid inputs may silently render incorrect outputs. allow_nan_stats: Python , default . When , statistics (e.g., mean, mode, variance) use the value \"\" to indicate the result is undefined. When , an exception is raised if one or more of the statistic's batch members are undefined. name: Python name prefixed to Ops created by this class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\dirichlet_multinomial.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:total_count arg:concentration arg:validate_args arg:allow_nan_stats arg:name arguments arg arg arg arg arg arg Assign Call Call With Call Assign Call If Assign Call Assign Call Call Assign Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "set_object_type",
    "source_code": "def set_object_type(self, object_type: Union[Callable, str], qconfig: QConfigAny) -> QConfigMapping:\n    self.object_type_qconfigs[object_type] = qconfig\n    return self",
    "docstring": "Set the QConfig for a given module type, function, or method name. If the QConfig for an existing object type was already set, the new QConfig will override the old one.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\qconfig_mapping.py",
    "ast_data": "FunctionDef name:set_object_type arg:self arg:object_type arg:qconfig arguments arg arg arg Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "expand_bias",
    "source_code": "def expand_bias(B: Optional[_T], X: _T) -> Optional[_T]:\n    if B is not None:\n        if isinstance(B, ir.IRNode):\n            if not isinstance(B, ir.TensorBox):\n                B = ir.TensorBox(B)\n            assert hasattr(X, 'get_size')\n            B = L.expand(B, (X.get_size()[0], B.get_size()[-1]))\n        else:\n            assert isinstance(B, torch.Tensor)\n            assert isinstance(X, torch.Tensor)\n            B = B.expand(X.shape[0], B.shape[-1])\n    return B",
    "docstring": "Expand Bias to the same size of X.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp_gemm_template.py",
    "ast_data": "FunctionDef name:expand_bias arg:B arg:X arguments arg arg If Compare If Call If Call Assign Call Call Assign Call Call Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "step_param",
    "source_code": "def step_param(self, param: Tensor, grad: Optional[Tensor]):\n    weight_decay = self.defaults['weight_decay']\n    momentum = self.defaults['momentum']\n    dampening = self.defaults['dampening']\n    lr = self.defaults['lr']\n    params = [param]\n    momentum_buffer_list: list[Optional[Tensor]] = []\n    grads = []\n    has_sparse_grad = False\n    if grad is not None:\n        grads.append(grad)\n        if grad.is_sparse:\n            has_sparse_grad = True\n        if param not in self.state:\n            self.state[param] = {}\n        state = self.state[param]\n        if 'momentum_buffer' not in state:\n            momentum_buffer_list.append(None)\n        else:\n            momentum_buffer_list.append(state['momentum_buffer'])\n    with torch.no_grad():\n        F.sgd(params, grads, momentum_buffer_list, weight_decay=weight_decay, momentum=momentum, lr=lr, dampening=dampening, nesterov=self.nesterov, maximize=self.maximize, has_sparse_grad=has_sparse_grad, foreach=self.foreach, fused=self.fused, grad_scale=None, found_inf=None)\n    state = self.state[param]\n    momentum_buffer = momentum_buffer_list[0]\n    if momentum_buffer is not None:\n        state['momentum_buffer'] = momentum_buffer",
    "docstring": "Similar to self.step, but operates on a single parameter and its gradient.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\optim\\functional_sgd.py",
    "ast_data": "FunctionDef name:step_param arg:self arg:param arg:grad arguments arg arg arg Assign Assign Assign Assign Assign Assign Assign If Compare Call If Assign If Compare Assign Assign If Compare Call Call With Call Call Assign Assign If Compare Assign"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n    return self",
    "docstring": "Only validate the parameters of the estimator. This method allows to: (i) validate the parameters of the estimator and (ii) be consistent with the scikit-learn transformer API. Parameters ---------- X : ndarray of shape (n_samples, image_height, image_width) or (n_samples, image_height, image_width, n_channels) Array of images from which to extract patches. For color images, the last dimension specifies the channel: a RGB image would have . y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns the instance itself.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_extraction\\image.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "describe",
    "source_code": "def describe(self):\n    return '%s: %s' % (self.__class__.__name__, self._constructor_args)",
    "docstring": "Output a brief summary of what the action does.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\operations\\base.py",
    "ast_data": "FunctionDef name:describe arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "time_count_neighbors_deep",
    "source_code": "def time_count_neighbors_deep(self, mn1n2, Nr):\n    self.T1d.count_neighbors(self.T2d, self.r)",
    "docstring": "Count neighbors for a very deep kd-tree dim | # points T1 | # points T2 | Nr",
    "type": "method",
    "file_path": "scipy\\benchmarks\\benchmarks\\spatial.py",
    "ast_data": "FunctionDef name:time_count_neighbors_deep arg:self arg:mn1n2 arg:Nr arguments arg arg arg Call"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, urls, status=None, encoding=None):\n    self.urls = abs_urls = [urllib.parse.urljoin(cherrypy.url(), tonative(url, encoding or self.encoding)) for url in always_iterable(urls)]\n    status = int(status) if status is not None else self.default_status\n    if not 300 <= status <= 399:\n        raise ValueError('status must be between 300 and 399.')\n    CherryPyException.__init__(self, abs_urls, status)",
    "docstring": "Initialize the HTTP redirect exception.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cperror.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:urls arg:status arg:encoding arguments arg arg arg arg Assign Call Call Call BoolOp Call Assign Compare Call If Compare Raise Call Call"
  },
  {
    "library": "django",
    "name": "get_hstore_oids",
    "source_code": "@functools.lru_cache\ndef get_hstore_oids(connection_alias):\n    return get_type_oids(connection_alias, 'hstore')",
    "docstring": "Return hstore and hstore array OIDs.",
    "type": "function",
    "file_path": "django\\django\\contrib\\postgres\\signals.py",
    "ast_data": "FunctionDef name:get_hstore_oids arg:connection_alias arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "sparse_tensor_constructor",
    "source_code": "@staticmethod\ndef sparse_tensor_constructor(size, dtype, sparse_dim, nnz, is_coalesced):\n    if isinstance(size, Number):\n        size = [size] * sparse_dim\n    assert all((size[d] > 0 for d in range(sparse_dim))) or nnz == 0, 'invalid arguments'\n    v_size = [nnz] + list(size[sparse_dim:])\n    if dtype.is_floating_point:\n        v = torch.rand(size=v_size, dtype=dtype, device='cpu')\n    else:\n        v = torch.randint(1, 127, size=v_size, dtype=dtype, device='cpu')\n    i = torch.rand(sparse_dim, nnz, device='cpu')\n    i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i))\n    i = i.to(torch.long)\n    if not is_coalesced:\n        v = torch.cat([v, torch.randn_like(v)], 0)\n        i = torch.cat([i, i], 1)\n    x = torch.sparse_coo_tensor(i, v, torch.Size(size))\n    if is_coalesced:\n        x = x.coalesce()\n    return x",
    "docstring": "sparse_tensor_constructor creates a sparse tensor with coo format. Note that when is False, the number of elements is doubled but the number of indices represents the same amount of number of non zeros , i.e, this is virtually the same tensor with the same sparsity pattern. Moreover, most of the sparse operation will use coalesce() method and what we want here is to get a sparse tensor with the same even if this is coalesced or not. In the other hand when is True the number of elements is reduced in the coalescing process by an unclear amount however the probability to generate duplicates indices are low for most of the cases. This decision was taken on purpose to maintain the construction cost as low as possible.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\benchmark\\utils\\sparse_fuzzer.py",
    "ast_data": "FunctionDef name:sparse_tensor_constructor arg:size arg:dtype arg:sparse_dim arg:nnz arg:is_coalesced arguments arg arg arg arg arg If Call Assign BoolOp Call Compare Call Compare Assign Call If Assign Call Assign Call Assign Call Call Call Call Call Assign Call If Assign Call Call Assign Call Assign Call Call If Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "summary_writer_function",
    "source_code": "def summary_writer_function(name, tensor, function, family=None):\n    name_scope = ops.get_name_scope()\n    if name_scope:\n        name_scope += '/'\n\n    def record():\n        with ops.name_scope(name_scope), summary_op_util.summary_scope(name, family, values=[tensor]) as (tag, scope):\n            with ops.control_dependencies([function(tag, scope)]):\n                return constant_op.constant(True)\n    if _summary_state.writer is None:\n        return control_flow_ops.no_op()\n    with ops.device('cpu:0'):\n        op = smart_cond.smart_cond(_legacy_contrib_should_record_summaries(), record, _nothing, name='')\n        if not context.executing_eagerly():\n            ops.add_to_collection(ops.GraphKeys._SUMMARY_COLLECTION, op)\n    return op",
    "docstring": "Helper function to write summaries. Args: name: name of the summary tensor: main tensor to form the summary function: function taking a tag and a scope which writes the summary family: optional, the summary's family Returns: The result of writing the summary.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "FunctionDef name:summary_writer_function arg:name arg:tensor arg:function arg:family arguments arg arg arg arg Assign Call If FunctionDef name:record arguments With Call Call With Call Call Return return:yes Call If Compare Return return:yes Call With Call Assign Call Call If Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_inverse_log_det_jacobian",
    "source_code": "def _inverse_log_det_jacobian(self, y):\n    raise NotImplementedError('inverse_log_det_jacobian not implemented.')",
    "docstring": "Subclass implementation of public function. In particular, this method differs from the public function, in that it does not take . Thus, this implements the minimal Jacobian determinant calculation (i.e. over ). Args: y: . The input to the \"inverse_log_det_jacobian\" evaluation. Returns: inverse_log_det_jacobian: , if this bijector is injective. If not injective, returns the k-tuple containing jacobians for the unique points such that .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bijector_impl.py",
    "ast_data": "FunctionDef name:_inverse_log_det_jacobian arg:self arg:y arguments arg arg Raise Call"
  },
  {
    "library": "pandas",
    "name": "if_sheet_exists",
    "source_code": "@property\ndef if_sheet_exists(self) -> str:\n    return self._if_sheet_exists",
    "docstring": "How to behave when writing to a sheet that already exists in append mode.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\excel\\_base.py",
    "ast_data": "FunctionDef name:if_sheet_exists arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_register_fake",
    "source_code": "def _register_fake(self, op_name, fn, _stacklevel=1, *, allow_override=False):\n    if torch._running_with_deploy():\n        _library.utils.warn_deploy()\n        return\n    source = torch._library.utils.get_source(_stacklevel + 1)\n    frame = sys._getframe(_stacklevel)\n    caller_module = inspect.getmodule(frame)\n    caller_module_name = None if caller_module is None else caller_module.__name__\n    if caller_module_name is not None and caller_module_name.startswith('torchvision.'):\n        caller_module_name = None\n    qualname = f'{self.ns}::{op_name}'\n    entry = torch._library.simple_registry.singleton.find(qualname)\n    if caller_module_name is not None:\n        func_to_register = _check_pystubs_once(fn, qualname, caller_module_name)\n    else:\n        func_to_register = fn\n    handle = entry.fake_impl.register(func_to_register, source, lib=self, allow_override=allow_override)\n    self._registration_handles.append(handle)",
    "docstring": "Registers the fake impl for an operator defined in the library.",
    "type": "method",
    "file_path": "pytorch\\torch\\library.py",
    "ast_data": "FunctionDef name:_register_fake arg:self arg:op_name arg:fn arg:_stacklevel arguments arg arg arg arg arg If Call Call Return return:no Assign Call Assign Call Assign Call Assign Compare If BoolOp Compare Call Assign Assign Assign Call If Compare Assign Call Assign Assign Call Call"
  },
  {
    "library": "numpy",
    "name": "_dist_test_spawn_paths",
    "source_code": "def _dist_test_spawn_paths(self, cmd, display=None):\n    if not hasattr(self._ccompiler, '_paths'):\n        self._dist_test_spawn(cmd)\n        return\n    old_path = os.getenv('path')\n    try:\n        os.environ['path'] = self._ccompiler._paths\n        self._dist_test_spawn(cmd)\n    finally:\n        os.environ['path'] = old_path",
    "docstring": "Fix msvc SDK ENV path same as distutils do without it we get c1: fatal error C1356: unable to find mspdbcore.dll",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py",
    "ast_data": "FunctionDef name:_dist_test_spawn_paths arg:self arg:cmd arg:display arguments arg arg arg If Call Call Return return:no Assign Call Try Assign Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "dispatch",
    "source_code": "def dispatch(op, args, kwargs):\n    for dispatcher in getattr(op, FALLBACK_DISPATCH_ATTR):\n        result = dispatcher.handle(args, kwargs)\n        if result is not OpDispatcher.NOT_SUPPORTED:\n            return result\n    for dispatcher in _GLOBAL_DISPATCHERS:\n        result = dispatcher.handle(op, args, kwargs)\n        if result is not OpDispatcher.NOT_SUPPORTED:\n            return result\n    return OpDispatcher.NOT_SUPPORTED",
    "docstring": "Returns the result from the first successful dispatcher for a given op. Calls the method of each that has been registered to handle , and returns the value from the first successful handler. Args: op: Python function: the operation to dispatch for. args: The arguments to the operation. kwargs: They keyword arguments to the operation. Returns: The result of the operation, or if no registered dispatcher can handle the given arguments.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\dispatch.py",
    "ast_data": "FunctionDef name:dispatch arg:op arg:args arg:kwargs arguments arg arg arg For Call Assign Call If Compare Return return:yes For Assign Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "_check_max_num",
    "source_code": "def _check_max_num(self, obj):\n    if obj.max_num is None:\n        return []\n    elif not isinstance(obj.max_num, int):\n        return must_be('an integer', option='max_num', obj=obj, id='admin.E204')\n    else:\n        return []",
    "docstring": "Check that max_num is an integer.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\checks.py",
    "ast_data": "FunctionDef name:_check_max_num arg:self arg:obj arguments arg arg If Compare Return return:no If Call Return return:yes Call Return return:no"
  },
  {
    "library": "scipy",
    "name": "maximiser",
    "source_code": "def maximiser(self):\n    if self.check_max:\n        self._max = all((self.f > v.f for v in self.nn))\n        self.check_max = False\n    return self._max",
    "docstring": "Check whether this vertex is strictly greater than all its neighbours.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_shgo_lib\\_vertex.py",
    "ast_data": "FunctionDef name:maximiser arg:self arguments arg If Assign Call Compare Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "char",
    "source_code": "def char(self):\n    return self._to(torch.int8)",
    "docstring": "Casts this storage to char type.",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:char arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_prepare_onnx_paddings",
    "source_code": "def _prepare_onnx_paddings(dim: int, pad):\n    paddings = list(pad[:]) + [0] * (dim * 2 - len(pad))\n    paddings = paddings[-2::-2] + paddings[-1::-2]\n    return paddings",
    "docstring": "Generate paddings in ONNX order based on pad in pytorch. Args: dim: the dimension of the tensor. pad: the paddings in pytorch. The order is dim_n_begin, dim_n_end, dim_n-1_begin, dim_n-1_end, ...",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\symbolic_opset9.py",
    "ast_data": "FunctionDef name:_prepare_onnx_paddings arg:dim arg:pad arguments arg arg Assign Call Call Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "_compute_interpolation_tiles",
    "source_code": "def _compute_interpolation_tiles(padded_imgs: torch.Tensor, tile_size: Tuple[int, int]) -> torch.Tensor:\n    if padded_imgs.dim() != 4:\n        raise AssertionError('Images Tensor must be 4D.')\n    if padded_imgs.shape[-2] % tile_size[0] != 0:\n        raise AssertionError('Images are not correctly padded.')\n    if padded_imgs.shape[-1] % tile_size[1] != 0:\n        raise AssertionError('Images are not correctly padded.')\n    interp_kernel_vert: int = tile_size[0] // 2\n    interp_kernel_horz: int = tile_size[1] // 2\n    c: int = padded_imgs.shape[-3]\n    interp_tiles: torch.Tensor = padded_imgs.unfold(1, c, c).unfold(2, interp_kernel_vert, interp_kernel_vert).unfold(3, interp_kernel_horz, interp_kernel_horz).squeeze(1).contiguous()\n    if interp_tiles.shape[-3] != c:\n        raise AssertionError\n    if interp_tiles.shape[-2] != tile_size[0] / 2:\n        raise AssertionError\n    if interp_tiles.shape[-1] != tile_size[1] / 2:\n        raise AssertionError\n    return interp_tiles",
    "docstring": "Compute interpolation tiles on a properly padded set of images. Note that images must be padded. So, the tile_size (TH, TW) * grid_size (GH, GW) = image_size (H, W) Args: padded_imgs: batch of 2D images with shape (B, C, H, W) already padded to extract tiles of size (TH, TW). tile_size: shape of the current tiles (TH, TW). Returns: tensor with the interpolation tiles (B, 2GH, 2GW, C, TH/2, TW/2).",
    "type": "function",
    "file_path": "kornia\\kornia\\enhance\\equalization.py",
    "ast_data": "FunctionDef name:_compute_interpolation_tiles arg:padded_imgs arg:tile_size arguments arg arg If Compare Call Raise Call If Compare Raise Call If Compare Raise Call Call Call Call Call Call If Compare Raise If Compare Raise If Compare Raise Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_get_feature_name_out_for_transformer",
    "source_code": "def _get_feature_name_out_for_transformer(self, name, trans, feature_names_in):\n    column_indices = self._transformer_to_input_indices[name]\n    names = feature_names_in[column_indices]\n    if not hasattr(trans, 'get_feature_names_out'):\n        raise AttributeError(f'Transformer {name} (type {type(trans).__name__}) does not provide get_feature_names_out.')\n    return trans.get_feature_names_out(names)",
    "docstring": "Gets feature names of transformer. Used in conjunction with self._iter(fitted=True) in get_feature_names_out.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\compose\\_column_transformer.py",
    "ast_data": "FunctionDef name:_get_feature_name_out_for_transformer arg:self arg:name arg:trans arg:feature_names_in arguments arg arg arg arg Assign Assign If Call Raise Call Call Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "as_async_generator",
    "source_code": "async def as_async_generator(it: Iterable[_T] | AsyncIterator[_T]) -> AsyncGenerator[_T]:\n    if isinstance(it, AsyncIterator):\n        async for r in it:\n            yield r\n    else:\n        for r in it:\n            yield r",
    "docstring": "Wraps an iterable (sync or async) into an async generator.",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\asyncgen.py",
    "ast_data": "AsyncFunctionDef name:as_async_generator arg:it arguments arg If Call For"
  },
  {
    "library": "kornia",
    "name": "apply_transform_box",
    "source_code": "def apply_transform_box(self, input: Boxes, params: Dict[str, Tensor], flags: Dict[str, Any], transform: Optional[Tensor]=None) -> Boxes:\n    if transform is None:\n        if self.transform_matrix is None:\n            raise RuntimeError('No valid transformation matrix found. Please either pass one or forward one first.')\n        transform = self.transform_matrix\n    input = self.apply_non_transform_box(input, params, flags, transform)\n    return input.transform_boxes_(transform)",
    "docstring": "Process boxes corresponding to the inputs that are transformed.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\_2d\\geometric\\base.py",
    "ast_data": "FunctionDef name:apply_transform_box arg:self arg:input arg:params arg:flags arg:transform arguments arg arg arg arg arg If Compare If Compare Raise Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "learn_cache_key",
    "source_code": "def learn_cache_key(request, response, cache_timeout=None, key_prefix=None, cache=None):\n    if key_prefix is None:\n        key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX\n    if cache_timeout is None:\n        cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS\n    cache_key = _generate_cache_header_key(key_prefix, request)\n    if cache is None:\n        cache = caches[settings.CACHE_MIDDLEWARE_ALIAS]\n    if response.has_header('Vary'):\n        is_accept_language_redundant = settings.USE_I18N\n        headerlist = []\n        for header in cc_delim_re.split(response.headers['Vary']):\n            header = header.upper().replace('-', '_')\n            if header != 'ACCEPT_LANGUAGE' or not is_accept_language_redundant:\n                headerlist.append('HTTP_' + header)\n        headerlist.sort()\n        cache.set(cache_key, headerlist, cache_timeout)\n        return _generate_cache_key(request, request.method, headerlist, key_prefix)\n    else:\n        cache.set(cache_key, [], cache_timeout)\n        return _generate_cache_key(request, request.method, [], key_prefix)",
    "docstring": "Learn what headers to take into account for some request URL from the response object. Store those headers in a global URL registry so that later access to that URL will know what headers to take into account without building the response object itself. The headers are named in the Vary header of the response, but we want to prevent response generation. The list of headers to use for cache key generation is stored in the same cache as the pages themselves. If the cache ages some data out of the cache, this just means that we have to build the response once to get at the Vary header and so at the list of headers to use for the cache key.",
    "type": "function",
    "file_path": "django\\django\\utils\\cache.py",
    "ast_data": "FunctionDef name:learn_cache_key arg:request arg:response arg:cache_timeout arg:key_prefix arg:cache arguments arg arg arg arg arg If Compare Assign If Compare Assign Assign Call If Compare Assign If Call Assign Assign For Call Assign Call Call If BoolOp Compare Call Call Call Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_add_kernel_input",
    "source_code": "def _add_kernel_input(self, name: str):\n    return self.kernel.args.input(name)",
    "docstring": "Add name as input to kernel and return input ref.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\select_algorithm.py",
    "ast_data": "FunctionDef name:_add_kernel_input arg:self arg:name arguments arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_check_X",
    "source_code": "def _check_X(self, X):\n    return validate_data(self, X, accept_sparse='csr', reset=False)",
    "docstring": "Validate X, used only in predict* methods.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\naive_bayes.py",
    "ast_data": "FunctionDef name:_check_X arg:self arg:X arguments arg arg Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "allowed_max_concurrent_streams",
    "source_code": "@property\ndef allowed_max_concurrent_streams(self) -> int:\n    return min(self.conn.local_settings.max_concurrent_streams, self.conn.remote_settings.max_concurrent_streams)",
    "docstring": "We keep total two streams for client (sending data) and server side (receiving data) for a single request. To be safe we choose the minimum. Since this value can change in event RemoteSettingsChanged we make variable a property.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\http2\\protocol.py",
    "ast_data": "FunctionDef name:allowed_max_concurrent_streams arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "support",
    "source_code": "def support(self, *args, **kwargs):\n    args, loc, scale = self._parse_args(*args, **kwargs)\n    arrs = np.broadcast_arrays(*args, loc, scale)\n    args, loc, scale = (arrs[:-2], arrs[-2], arrs[-1])\n    cond = self._argcheck(*args) & (scale > 0)\n    _a, _b = self._get_support(*args)\n    if cond.all():\n        return (_a * scale + loc, _b * scale + loc)\n    elif cond.ndim == 0:\n        return (self.badvalue, self.badvalue)\n    _a, _b = (np.asarray(_a).astype('d'), np.asarray(_b).astype('d'))\n    out_a, out_b = (_a * scale + loc, _b * scale + loc)\n    place(out_a, 1 - cond, self.badvalue)\n    place(out_b, 1 - cond, self.badvalue)\n    return (out_a, out_b)",
    "docstring": "Support of the distribution. Parameters ---------- arg1, arg2, ... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional location parameter, Default is 0. scale : array_like, optional scale parameter, Default is 1. Returns ------- a, b : array_like end-points of the distribution's support.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:support arg:self arguments arg arg arg Assign Call Assign Call Assign Assign Call Compare Assign Call If Call Return return:yes If Compare Return return:yes Assign Call Call Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "from_value",
    "source_code": "@staticmethod\ndef from_value(value):\n    with ops.name_scope('optional') as scope:\n        with ops.name_scope('value'):\n            element_spec = structure.type_spec_from_value(value)\n            encoded_value = structure.to_tensor_list(element_spec, value)\n    return _OptionalImpl(gen_optional_ops.optional_from_value(encoded_value, name=scope), element_spec)",
    "docstring": "Returns a that wraps the given value. >>> optional = tf.experimental.Optional.from_value(42) >>> print(optional.has_value()) tf.Tensor(True, shape=(), dtype=bool) >>> print(optional.get_value()) tf.Tensor(42, shape=(), dtype=int32) Args: value: A value to wrap. The value must be convertible to or . Returns: A that wraps .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\optional_ops.py",
    "ast_data": "FunctionDef name:from_value arg:value arguments arg With Call With Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "set_initial_value",
    "source_code": "def set_initial_value(self, y, t=0.0):\n    y = asarray(y)\n    self.tmp = zeros(y.size * 2, 'float')\n    self.tmp[::2] = real(y)\n    self.tmp[1::2] = imag(y)\n    return ode.set_initial_value(self, self.tmp, t)",
    "docstring": "Set initial conditions y(t) = y.",
    "type": "method",
    "file_path": "scipy\\scipy\\integrate\\_ode.py",
    "ast_data": "FunctionDef name:set_initial_value arg:self arg:y arg:t arguments arg arg arg Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "ignore",
    "source_code": "def ignore(self, event):\n    return not self.active",
    "docstring": "Return whether *event* should be ignored. This method should be called at the beginning of any event callback.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:ignore arg:self arg:event arguments arg arg Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "add_object_type",
    "source_code": "def add_object_type(self, name: str, objtype: ObjType) -> None:\n    self.object_types[name] = objtype\n    if objtype.roles:\n        self._type2role[name] = objtype.roles[0]\n    else:\n        self._type2role[name] = ''\n    for role in objtype.roles:\n        self._role2type.setdefault(role, []).append(name)",
    "docstring": "Add an object type.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\domains\\__init__.py",
    "ast_data": "FunctionDef name:add_object_type arg:self arg:name arg:objtype arguments arg arg arg Assign If Assign Assign For Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__deepcopy__",
    "source_code": "def __deepcopy__(self, memo):\n    with distribute_lib.enter_or_assert_strategy(self._distribute_strategy):\n        v = copy.deepcopy(self._v, memo)\n    copied_variable = type(self)(strategy=self._distribute_strategy, v=v, aggregation=self._aggregation)\n    memo[id(self)] = copied_variable\n    return copied_variable",
    "docstring": "Perform a deepcopy of the . Unlike the deepcopy of a regular tf.Variable, this keeps the original strategy and devices of the . To avoid confusion with the behavior of deepcopy on a regular (which does copy into new devices), we only allow a deepcopy of a within its originating strategy scope. Args: memo: The memoization object for . Returns: A deep copy of the current . Raises: RuntimeError: If trying to deepcopy into a different strategy.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\ps_values.py",
    "ast_data": "FunctionDef name:__deepcopy__ arg:self arg:memo arguments arg arg With Call Assign Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_generate_enqueue_op",
    "source_code": "def _generate_enqueue_op(self, inputs, name_prefix, index, device=None, tpu_ordinal=-1):\n    full_name = '%s/%d' % (name_prefix, index)\n    shapes = [t.shape for t in inputs]\n    if device is None:\n        devices = [t.device for t in inputs]\n        for i in range(1, self.number_of_tuple_elements):\n            if devices[0] != devices[i]:\n                raise ValueError(f'input devices for shard {index} are {str(devices)}, but should all be the same')\n        with ops.colocate_with(inputs[0]):\n            return tpu_ops.infeed_enqueue_tuple(inputs=inputs, shapes=shapes, name=full_name, device_ordinal=tpu_ordinal)\n    else:\n        with ops.device(device):\n            return tpu_ops.infeed_enqueue_tuple(inputs=inputs, shapes=shapes, name=full_name, device_ordinal=tpu_ordinal)",
    "docstring": "Generate a host-side Op to enqueue a tuple to the queue. If device is None the inputs are all required to have the same device specification, and the enqueue Op is colocated with inputs[0]. Otherwise the enqueue Op is placed on 'device'. Args: inputs: a list of Tensors with the types and shapes of the tuple elements. name_prefix: the base name for the Op. index: the shard index, used to uniquify the Op name. device: device to place the Op on, or None if it should be colocated with the inputs. tpu_ordinal: ordinal of the TPU device on the host to use for infeed if device is a CPU device. Should be set to -1 if device is a TPU device. Returns: An Op corresponding to a shard of infeed enqueued at the host, suitable for use within a replicated block. Raises: ValueError: if device is None and inputs do not all have the same device specification.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_feed.py",
    "ast_data": "FunctionDef name:_generate_enqueue_op arg:self arg:inputs arg:name_prefix arg:index arg:device arg:tpu_ordinal arguments arg arg arg arg arg arg Assign Assign If Compare Assign For Call If Compare Raise Call Call With Call Return return:yes Call With Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "_deepcopy",
    "source_code": "def _deepcopy(el: Element) -> Element:\n    newnode = el.copy()\n    newnode.children = [child.deepcopy() for child in el.children]\n    for child in newnode.children:\n        child.parent = newnode\n        if el.document:\n            child.document = el.document\n            if child.source is None:\n                child.source = el.document.current_source\n            if child.line is None:\n                child.line = el.document.current_line\n    return newnode",
    "docstring": "Monkey-patch for speed.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\nodes.py",
    "ast_data": "FunctionDef name:_deepcopy arg:el arguments arg Assign Call Assign Call For Assign If Assign If Compare Assign If Compare Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "load",
    "source_code": "def load(self, path: str) -> None:\n    with open(path, 'rb') as f:\n        stats = pickle.load(f)\n    self.memories_allocated = stats['memories_allocated']\n    self.memories_active = stats['memories_active']\n    self.memories_reserved = stats['memories_reserved']\n    self._markers = stats['markers']\n    self._num_cuda_retries = stats['num_alloc_retries']",
    "docstring": "Load the pickled memory stats to plot the traces or print the summary.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_tools\\memory_tracker.py",
    "ast_data": "FunctionDef name:load arg:self arg:path arguments arg arg With Call Assign Call Assign Assign Assign Assign Assign"
  },
  {
    "library": "django",
    "name": "action_checkbox",
    "source_code": "def action_checkbox(self, obj):\n    attrs = {'class': 'action-select', 'aria-label': format_html(_('Select this object for an action - {}'), str(obj))}\n    checkbox = forms.CheckboxInput(attrs, lambda value: False)\n    return checkbox.render(helpers.ACTION_CHECKBOX_NAME, str(obj.pk))",
    "docstring": "A list_display column containing a checkbox widget.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:action_checkbox arg:self arg:obj arguments arg arg Assign Call Call Call Assign Call arguments arg Return return:yes Call Call"
  },
  {
    "library": "seaborn",
    "name": "_map_prop_with_hue",
    "source_code": "def _map_prop_with_hue(self, name, value, fallback, plot_kws):\n    if value is default:\n        value = plot_kws.pop(name, fallback)\n    if 'hue' in self.variables:\n        levels = self._hue_map.levels\n        if isinstance(value, list):\n            mapping = {k: v for k, v in zip(levels, value)}\n        else:\n            mapping = {k: value for k in levels}\n    else:\n        mapping = {None: value}\n    return mapping",
    "docstring": "Support pointplot behavior of modifying the marker/linestyle with hue.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\categorical.py",
    "ast_data": "FunctionDef name:_map_prop_with_hue arg:self arg:name arg:value arg:fallback arg:plot_kws arguments arg arg arg arg arg If Compare Assign Call If Compare Assign If Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "classproperty",
    "source_code": "class classproperty:\n\n    def __init__(self, method=None):\n        self.fget = method\n\n    def __get__(self, instance, cls=None):\n        return self.fget(cls)\n\n    def getter(self, method):\n        self.fget = method\n        return self",
    "docstring": "Decorator that converts a method with a single cls argument into a property that can be accessed directly from the class.",
    "type": "class",
    "file_path": "django\\django\\utils\\functional.py",
    "ast_data": "ClassDef name:classproperty FunctionDef name:__init__ arg:self arg:method arguments arg arg Assign FunctionDef name:__get__ arg:self arg:instance arg:cls arguments arg arg arg Return return:yes Call FunctionDef name:getter arg:self arg:method arguments arg arg Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "decorated",
    "source_code": "def decorated(metric_obj, *args, **kwargs):\n    strategy = distribute_lib.get_strategy()\n    for weight in metric_obj.weights:\n        if backend.is_tpu_strategy(strategy) and (not strategy.extended.variable_created_in_scope(weight)) and (not distribute_lib.in_cross_replica_context()):\n            raise ValueError('Trying to run metric.update_state in replica context when the metric was not created in TPUStrategy scope. Make sure the keras Metric is created in TPUstrategy scope. ')\n    with tf_utils.graph_context_for_symbolic_tensors(*args, **kwargs):\n        update_op = update_state_fn(*args, **kwargs)\n    if update_op is not None:\n        metric_obj.add_update(update_op)\n    return update_op",
    "docstring": "Decorated function with .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\metrics_utils.py",
    "ast_data": "FunctionDef name:decorated arg:metric_obj arguments arg arg arg Assign Call For If BoolOp Call Call Call Raise Call With Call Assign Call If Compare Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "dtype_counts",
    "source_code": "@property\n@abstractmethod\ndef dtype_counts(self) -> Mapping[str, int]:\n    pass",
    "docstring": "Mapping dtype - number of counts.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:dtype_counts arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "_remove_indices",
    "source_code": "def _remove_indices(a, b):\n    items = array_ops_stack.unstack(sort_ops.sort(array_ops_stack.stack(b)), num=len(b))\n    i = 0\n    result = []\n    for item in items:\n        result.append(a[i:item])\n        i = item + 1\n    result.append(a[i:])\n    return array_ops.concat(result, 0)",
    "docstring": "Remove indices () from .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_array_ops.py",
    "ast_data": "FunctionDef name:_remove_indices arg:a arg:b arguments arg arg Assign Call Call Call Call Assign Assign For Call Assign Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "expire",
    "source_code": "def expire():\n    name = cherrypy.serving.request.config.get('tools.sessions.name', 'session_id')\n    one_year = 60 * 60 * 24 * 365\n    e = time.time() - one_year\n    cherrypy.serving.response.cookie[name]['expires'] = httputil.HTTPDate(e)\n    cherrypy.serving.response.cookie[name].pop('max-age', None)",
    "docstring": "Expire the current session cookie.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\sessions.py",
    "ast_data": "FunctionDef name:expire arguments Assign Call Assign Assign Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "calculate_fused_tensor_size",
    "source_code": "def calculate_fused_tensor_size(split_node: torch.fx.Node, indices: list[int]) -> int:\n    fused_tensor_size = 0\n    for i in range(len(split_node.args[1])):\n        if i in indices:\n            fused_tensor_size += split_node.args[1][i]\n    return fused_tensor_size",
    "docstring": "Calculate the fused tensor size in the indices",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\split_cat.py",
    "ast_data": "FunctionDef name:calculate_fused_tensor_size arg:split_node arg:indices arguments arg arg Assign For Call Call If Compare Return return:yes"
  },
  {
    "library": "django",
    "name": "is_initial",
    "source_code": "def is_initial(self, value):\n    return bool(value and getattr(value, 'url', False))",
    "docstring": "Return whether value is considered to be initial value.",
    "type": "method",
    "file_path": "django\\django\\forms\\widgets.py",
    "ast_data": "FunctionDef name:is_initial arg:self arg:value arguments arg arg Return return:yes Call BoolOp Call"
  },
  {
    "library": "pytorch",
    "name": "close_streams",
    "source_code": "@classmethod\ndef close_streams(cls, v, depth=0):\n    if depth > 10:\n        return\n    if isinstance(v, StreamWrapper):\n        v.close()\n    elif isinstance(v, dict):\n        for vv in v.values():\n            cls.close_streams(vv, depth=depth + 1)\n    elif isinstance(v, (list, tuple)):\n        for vv in v:\n            cls.close_streams(vv, depth=depth + 1)",
    "docstring": "Traverse structure and attempts to close all found StreamWrappers on best effort basis.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\data\\datapipes\\utils\\common.py",
    "ast_data": "FunctionDef name:close_streams arg:cls arg:v arg:depth arguments arg arg arg If Compare Return return:no If Call Call If Call For Call Call If Call For Call"
  },
  {
    "library": "tensorflow",
    "name": "CategoricalHinge",
    "source_code": "class CategoricalHinge(MeanMetricWrapper):\n\n    def __init__(self, name='categorical_hinge', dtype=None):\n        super(CategoricalHinge, self).__init__(categorical_hinge, name, dtype=dtype)",
    "docstring": "Computes the categorical hinge metric between and . Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.CategoricalHinge() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]]) >>> m.result().numpy() 1.4000001 >>> m.reset_state() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]], ... sample_weight=[1, 0]) >>> m.result().numpy() 1.2 Usage with API:",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py",
    "ast_data": "ClassDef name:CategoricalHinge FunctionDef name:__init__ arg:self arg:name arg:dtype arguments arg arg arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "_wrap_with_op_strategy",
    "source_code": "def _wrap_with_op_strategy(self, op_schema: OpSchema) -> OpSchema:\n\n    def spec_to_strategy(spec: object) -> object:\n        if isinstance(spec, DTensorSpec):\n            return OpStrategy([PlacementStrategy(spec)])\n        elif isinstance(spec, (list, tuple)) and len(spec) > 0 and isinstance(spec[0], DTensorSpec):\n            tuple_strategy = [spec_to_strategy(s) for s in spec]\n            tuple_strategy = cast(Sequence[StrategyType], tuple_strategy)\n            return TupleStrategy(tuple(tuple_strategy) if isinstance(spec, tuple) else tuple_strategy)\n        else:\n            return spec\n    args_op_strategy = [spec_to_strategy(i) for i in op_schema.args_schema]\n    kwargs_op_strategy = {k: spec_to_strategy(v) for k, v in op_schema.kwargs_schema.items()}\n    return OpSchema(op=op_schema.op, args_schema=tuple(args_op_strategy), kwargs_schema=kwargs_op_strategy)",
    "docstring": "wrap a op_schema that contains DTensorSpec to another op_schema that contains OpStrategy/TupleStrategy, the returned op_schema is then used for sharding strategy propagation on pytorch operators.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_sharding_prop.py",
    "ast_data": "FunctionDef name:_wrap_with_op_strategy arg:self arg:op_schema arguments arg arg FunctionDef name:spec_to_strategy arg:spec arguments arg If Call Return return:yes Call Call If BoolOp Call Compare Call Call Assign Call Assign Call Return return:yes Call Call Call Return return:yes Assign Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "scatter_div",
    "source_code": "@tf_export(v1=['scatter_div'])\ndef scatter_div(ref, indices, updates, use_locking=False, name=None):\n    if ref.dtype._is_ref_dtype:\n        return gen_state_ops.scatter_div(ref, indices, updates, use_locking=use_locking, name=name)\n    return ref._lazy_read(gen_resource_variable_ops.resource_scatter_div(ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype), name=name))",
    "docstring": "Divides a variable reference by sparse updates. This operation computes This operation outputs after the update is done. This makes it easier to chain operations that need to use the reset value. Duplicate entries are handled correctly: if multiple reference the same location, their contributions divide. Requires or . Args: ref: A mutable . Must be one of the following types: , , , , , , , , , , , , , , , , . Should be from a node. indices: A . Must be one of the following types: , . A tensor of indices into the first dimension of . updates: A . Must have the same type as . A tensor of values that is divided by. use_locking: An optional . Defaults to . If True, the operation will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. name: A name for the operation (optional). Returns: A mutable . Has the same type as .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\state_ops.py",
    "ast_data": "FunctionDef name:scatter_div arg:ref arg:indices arg:updates arg:use_locking arg:name arguments arg arg arg arg arg If Return return:yes Call Return return:yes Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "RootDim",
    "source_code": "@dataclasses.dataclass\nclass RootDim:\n    min: int\n    max: Union[int, None]\n    derived: list[str]",
    "docstring": "This represents a Dim object.",
    "type": "class",
    "file_path": "pytorch\\torch\\_export\\serde\\dynamic_shapes.py",
    "ast_data": "ClassDef name:RootDim"
  },
  {
    "library": "tensorflow",
    "name": "get_tensor_file_paths",
    "source_code": "def get_tensor_file_paths(self, node_name, output_slot, debug_op, device_name=None):\n    device_name = self._infer_device_name(device_name, node_name)\n    watch_key = _get_tensor_watch_key(node_name, output_slot, debug_op)\n    if watch_key not in self._watch_key_to_datum[device_name]:\n        raise WatchKeyDoesNotExistInDebugDumpDirError('Watch key \"%s\" does not exist in the debug dump of device %s' % (watch_key, device_name))\n    return [datum.file_path for datum in self._watch_key_to_datum[device_name][watch_key]]",
    "docstring": "Get the file paths from a debug-dumped tensor. Args: node_name: () name of the node that the tensor is produced by. output_slot: () output slot index of tensor. debug_op: () name of the debug op. device_name: () name of the device. If there is only one device or if the specified debug_watch_key exists on only one device, this argument is optional. Returns: List of file path(s) loaded. This is a list because each debugged tensor may be dumped multiple times. Raises: WatchKeyDoesNotExistInDebugDumpDirError: If the tensor does not exist in the debug-dump data.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:get_tensor_file_paths arg:self arg:node_name arg:output_slot arg:debug_op arg:device_name arguments arg arg arg arg arg Assign Call Assign Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_safe_divide",
    "source_code": "def _safe_divide(numerator, denominator):\n    if abs(denominator) < 1e-150:\n        return 0.0\n    else:\n        result = float(numerator) / float(denominator)\n        result = float(numerator) / float(denominator)\n        if math.isinf(result):\n            warnings.warn('overflow encountered in _safe_divide', RuntimeWarning)\n        return result",
    "docstring": "Prevents overflow and division by zero.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_gb.py",
    "ast_data": "FunctionDef name:_safe_divide arg:numerator arg:denominator arguments arg arg If Compare Call Return return:yes Assign Call Call Assign Call Call If Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "current_device",
    "source_code": "def current_device() -> int:\n    _lazy_init()\n    return torch._C._xpu_getDevice()",
    "docstring": "Return the index of a currently selected device.",
    "type": "function",
    "file_path": "pytorch\\torch\\xpu\\__init__.py",
    "ast_data": "FunctionDef name:current_device arguments Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "experimental_from_proto",
    "source_code": "@classmethod\ndef experimental_from_proto(cls, proto: struct_pb2.TensorSpecProto) -> 'TensorSpec':\n    return TensorSpec(shape=tensor_shape.TensorShape.experimental_from_proto(proto.shape), dtype=proto.dtype, name=proto.name if proto.name else None)",
    "docstring": "Returns a TensorSpec instance based on the serialized proto.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor.py",
    "ast_data": "FunctionDef name:experimental_from_proto arg:cls arg:proto arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "starting_wall_time",
    "source_code": "def starting_wall_time(self):\n    return self._starting_wall_time",
    "docstring": "Get the starting timestamp of the instrumented TensorFlow program. When there are multiple hosts (i.e., multiple tfdbg file sets), the earliest timestamp among the file sets is returned. It is assumed to be the job that starts first (e.g., the coordinator). Returns: Starting timestamp in seconds since the epoch, as a float.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py",
    "ast_data": "FunctionDef name:starting_wall_time arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "dstn",
    "source_code": "@_dispatch\ndef dstn(x, type=2, s=None, axes=None, norm=None, overwrite_x=False, workers=None, orthogonalize=None):\n    return (Dispatchable(x, np.ndarray),)",
    "docstring": "Return multidimensional Discrete Sine Transform along the specified axes. Parameters ---------- x : array_like The input array. type : {1, 2, 3, 4}, optional Type of the DST (see Notes). Default type is 2. s : int or array_like of ints or None, optional The shape of the result. If both and (see below) are None, is `saxess`s[i] >> import numpy as np >>> from scipy.fft import dstn, idstn >>> rng = np.random.default_rng() >>> y = rng.standard_normal((16, 16)) >>> np.allclose(y, idstn(dstn(y))) True",
    "type": "function",
    "file_path": "scipy\\scipy\\fft\\_realtransforms.py",
    "ast_data": "FunctionDef name:dstn arg:x arg:type arg:s arg:axes arg:norm arg:overwrite_x arg:workers arg:orthogonalize arguments arg arg arg arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "named_parameters",
    "source_code": "def named_parameters(self, *args, **kwargs) -> Iterator[tuple[str, torch.nn.Parameter]]:\n    should_clean_name = self.training_state == TrainingState.SUMMON_FULL_PARAMS\n    for param_name, param in super().named_parameters(*args, **kwargs):\n        if should_clean_name:\n            param_name = param_name.replace(FSDP_PREFIX, '')\n        yield (param_name, param)",
    "docstring": "Return an iterator over module parameters, yielding both the name of the parameter and the parameter itself. Intercepts parameter names and removes all occurrences of the FSDP-specific flattened parameter prefix when inside the :meth: context manager.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\fully_sharded_data_parallel.py",
    "ast_data": "FunctionDef name:named_parameters arg:self arguments arg arg arg Assign Compare For Call Call If Assign Call"
  },
  {
    "library": "pytorch",
    "name": "get_seq_lens",
    "source_code": "def get_seq_lens(self, input_length):\n    seq_len = input_length\n    for m in self.conv.modules():\n        if type(m) == nn.modules.conv.Conv2d:\n            seq_len = seq_len + 2 * m.padding[1] - m.dilation[1] * (m.kernel_size[1] - 1) - 1\n            seq_len = seq_len.true_divide(m.stride[1]) + 1\n    return seq_len.int()",
    "docstring": "Given a 1D Tensor or Variable containing integer sequence lengths, return a 1D tensor or variable containing the size sequences that will be output by the network. :param input_length: 1D Tensor :return: 1D Tensor scaled by model",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\functional_autograd_benchmark\\torchaudio_models.py",
    "ast_data": "FunctionDef name:get_seq_lens arg:self arg:input_length arguments arg arg Assign For Call If Compare Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "var",
    "source_code": "@_apply_docstring_templates\ndef var(input: Union[Tensor, MaskedTensor], dim: DimOrDims=None, unbiased: Optional[bool]=None, *, correction: Optional[Union[int, float]]=None, keepdim: Optional[bool]=False, dtype: Optional[DType]=None, mask: Optional[Tensor]=None) -> Tensor:\n    return _std_var(input=input, dim=dim, unbiased=unbiased, correction_opt=correction, keepdim=keepdim, dtype=dtype, mask=mask, take_sqrt=False)",
    "docstring": "{reduction_signature} {reduction_descr} The identity value of sample variance operation is undefined. The elements of output tensor with strided layout, that correspond to fully masked-out elements, have `` values. {reduction_args} {reduction_example}",
    "type": "function",
    "file_path": "pytorch\\torch\\masked\\_ops.py",
    "ast_data": "FunctionDef name:var arg:input arg:dim arg:unbiased arguments arg arg arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "populate",
    "source_code": "def populate(self, installed_apps=None):\n    if self.ready:\n        return\n    with self._lock:\n        if self.ready:\n            return\n        if self.loading:\n            raise RuntimeError(\"populate() isn't reentrant\")\n        self.loading = True\n        for entry in installed_apps:\n            if isinstance(entry, AppConfig):\n                app_config = entry\n            else:\n                app_config = AppConfig.create(entry)\n            if app_config.label in self.app_configs:\n                raise ImproperlyConfigured(\"Application labels aren't unique, duplicates: %s\" % app_config.label)\n            self.app_configs[app_config.label] = app_config\n            app_config.apps = self\n        counts = Counter((app_config.name for app_config in self.app_configs.values()))\n        duplicates = [name for name, count in counts.most_common() if count > 1]\n        if duplicates:\n            raise ImproperlyConfigured(\"Application names aren't unique, duplicates: %s\" % ', '.join(duplicates))\n        self.apps_ready = True\n        for app_config in self.app_configs.values():\n            app_config.import_models()\n        self.clear_cache()\n        self.models_ready = True\n        for app_config in self.get_app_configs():\n            app_config.ready()\n        self.ready = True\n        self.ready_event.set()",
    "docstring": "Load application configurations and models. Import each application module and then each model module. It is thread-safe and idempotent, but not reentrant.",
    "type": "method",
    "file_path": "django\\django\\apps\\registry.py",
    "ast_data": "FunctionDef name:populate arg:self arg:installed_apps arguments arg arg If Return return:no With If Return return:no If Raise Call Assign For If Call Assign Assign Call If Compare Raise Call Assign Assign Assign Call Call Assign Call Compare If Raise Call Call Assign For Call Call Call Assign For Call Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "UninitializedBuffer",
    "source_code": "class UninitializedBuffer(UninitializedTensorMixin, torch.Tensor):\n    cls_to_become = torch.Tensor\n\n    def __new__(cls, requires_grad=False, device=None, dtype=None, persistent=True) -> None:\n        factory_kwargs = {'device': device, 'dtype': dtype}\n        data = torch.empty(0, **factory_kwargs)\n        ret = torch.Tensor._make_subclass(cls, data, requires_grad)\n        ret.persistent = persistent\n        ret._is_buffer = True\n        return ret",
    "docstring": "A buffer that is not initialized. Uninitialized Buffer is a a special case of :class: where the shape of the data is still unknown. Unlike a :class:, uninitialized parameters hold no data and attempting to access some properties, like their shape, will throw a runtime error. The only operations that can be performed on a uninitialized parameter are changing its datatype, moving it to a different device and converting it to a regular :class:. The default device or dtype to use when the buffer is materialized can be set during construction using e.g. ``.",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\parameter.py",
    "ast_data": "ClassDef name:UninitializedBuffer Assign FunctionDef name:__new__ arg:cls arg:requires_grad arg:device arg:dtype arg:persistent arguments arg arg arg arg arg Assign Assign Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_str_smallest_subnormal",
    "source_code": "@property\ndef _str_smallest_subnormal(self):\n    return self._float_to_str(self.smallest_subnormal)",
    "docstring": "Return the string representation of the smallest subnormal.",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\getlimits.py",
    "ast_data": "FunctionDef name:_str_smallest_subnormal arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_set_spawn_exe_path",
    "source_code": "def _set_spawn_exe_path():\n    if sys.argv[0].endswith('.py'):\n\n        def guess_path(package_root):\n            if 'bazel-out' in sys.argv[0] and package_root in sys.argv[0]:\n                package_root_base = sys.argv[0][:sys.argv[0].rfind(package_root)]\n                binary = os.environ['TEST_TARGET'][2:].replace(':', '/', 1)\n                possible_path = os.path.join(package_root_base, package_root, binary)\n                logging.info('Guessed test binary path: %s', possible_path)\n                if os.access(possible_path, os.X_OK):\n                    return possible_path\n                return None\n        path = guess_path('org_tensorflow')\n        if not path:\n            path = guess_path('org_keras')\n        if path is None:\n            logging.error('Cannot determine binary path. sys.argv[0]=%s os.environ=%s', sys.argv[0], os.environ)\n            raise RuntimeError('Cannot determine binary path')\n        sys.argv[0] = path\n    multiprocessing.get_context().set_executable(sys.argv[0])",
    "docstring": "Set the path to the executable for spawned processes. This utility searches for the binary the parent process is using, and sets the executable of multiprocessing's context accordingly. Raises: RuntimeError: If the binary path cannot be determined.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_process_lib.py",
    "ast_data": "FunctionDef name:_set_spawn_exe_path arguments If Call FunctionDef name:guess_path arg:package_root arguments arg If BoolOp Compare Compare Assign Call Assign Call Assign Call Call If Call Return return:yes Return return:no Assign Call If Assign Call If Compare Call Raise Call Assign Call Call"
  },
  {
    "library": "pandas",
    "name": "validate_argmax_with_skipna",
    "source_code": "def validate_argmax_with_skipna(skipna: bool | ndarray | None, args, kwargs) -> bool:\n    skipna, args = process_skipna(skipna, args)\n    validate_argmax(args, kwargs)\n    return skipna",
    "docstring": "If 'Series.argmax' is called via the 'numpy' library, the third parameter in its signature is 'out', which takes either an ndarray or 'None', so check if the 'skipna' parameter is either an instance of ndarray or is None, since 'skipna' itself should be a boolean",
    "type": "function",
    "file_path": "pandas\\pandas\\compat\\numpy\\function.py",
    "ast_data": "FunctionDef name:validate_argmax_with_skipna arg:skipna arg:args arg:kwargs arguments arg arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_is_comparable_dtype",
    "source_code": "def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:\n    if self.dtype.kind == 'b':\n        return dtype.kind == 'b'\n    elif is_numeric_dtype(self.dtype):\n        return is_numeric_dtype(dtype)\n    return True",
    "docstring": "Can we compare values of the given dtype to our own?",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_is_comparable_dtype arg:self arg:dtype arguments arg arg If Compare Return return:yes Compare If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "DistributedDatasetsFromFunctionV1",
    "source_code": "class DistributedDatasetsFromFunctionV1(input_lib.DistributedDatasetsFromFunction):\n\n    def _make_initializable_iterator(self, shared_name=None):\n        del shared_name\n        if context.executing_eagerly():\n            raise ValueError('Cannot create initializable iterator in Eager mode. Please use `iter()` instead.')\n        return self._get_iterator()\n\n    def _make_one_shot_iterator(self):\n        if not context.executing_eagerly():\n            raise ValueError('Cannot create a one shot iterator. Please use `make_initializable_iterator()` instead.')\n        return self._get_iterator()\n\n    def _get_iterator(self):\n        iterators = _create_iterators_per_worker(self._datasets, self._input_workers, self._options)\n        cardinality = input_lib._cardinality(self._datasets[0])\n        iterator = DistributedIteratorV1(self._input_workers, iterators, self._strategy, cardinality, self._enable_get_next_as_optional)\n        iterator._element_spec = self._element_spec\n        if context.executing_eagerly():\n            context.async_wait()\n        return iterator\n\n    def __iter__(self):\n        if ops.executing_eagerly_outside_functions() or ops.get_default_graph().building_function:\n            return self._get_iterator()\n        raise RuntimeError('__iter__() is only supported inside of tf.function or when eager execution is enabled.')",
    "docstring": "Inputs created from dataset function.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\input_lib.py",
    "ast_data": "ClassDef name:DistributedDatasetsFromFunctionV1 FunctionDef name:_make_initializable_iterator arg:self arg:shared_name arguments arg arg If Call Raise Call Return return:yes Call FunctionDef name:_make_one_shot_iterator arg:self arguments arg If Call Raise Call Return return:yes Call FunctionDef name:_get_iterator arg:self arguments arg Assign Call Assign Call Assign Call Assign If Call Call Return return:yes FunctionDef name:__iter__ arg:self arguments arg If BoolOp Call Call Return return:yes Call Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "NonGuiException",
    "source_code": "class NonGuiException(Exception):\n    pass",
    "docstring": "Raised when trying show a figure in a non-GUI backend.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "ClassDef name:NonGuiException"
  },
  {
    "library": "matplotlib",
    "name": "arc_spine",
    "source_code": "@classmethod\ndef arc_spine(cls, axes, spine_type, center, radius, theta1, theta2, **kwargs):\n    path = mpath.Path.arc(theta1, theta2)\n    result = cls(axes, spine_type, path, **kwargs)\n    result.set_patch_arc(center, radius, theta1, theta2)\n    return result",
    "docstring": "Create and return an arc .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\spines.py",
    "ast_data": "FunctionDef name:arc_spine arg:cls arg:axes arg:spine_type arg:center arg:radius arg:theta1 arg:theta2 arguments arg arg arg arg arg arg arg arg Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, axes, prop_name, valid_types=None, invalid_types=None):\n    self._axes = axes\n    self._prop_name = prop_name\n    self._type_check = lambda artist: (not valid_types or isinstance(artist, valid_types)) and (not invalid_types or not isinstance(artist, invalid_types))",
    "docstring": "Parameters ---------- axes : The Axes from which this sublist will pull the children Artists. prop_name : str The property name used to access this sublist from the Axes; used to generate deprecation warnings. valid_types : list of type, optional A list of types that determine which children will be returned by this sublist. If specified, then the Artists in the sublist must be instances of any of these types. If unspecified, then any type of Artist is valid (unless limited by *invalid_types*.) invalid_types : tuple, optional A list of types that determine which children will *not* be returned by this sublist. If specified, then Artists in the sublist will never be an instance of these types. Otherwise, no types will be excluded.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:axes arg:prop_name arg:valid_types arg:invalid_types arguments arg arg arg arg arg Assign Assign Assign arguments arg BoolOp BoolOp Call BoolOp Call"
  },
  {
    "library": "pytorch",
    "name": "enable_static_observation",
    "source_code": "@torch.jit.export\ndef enable_static_observation(self):\n    self.toggle_qparam_learning(enabled=False).toggle_fake_quant(enabled=False).toggle_observer_update(enabled=True)",
    "docstring": "Enable accumulation of data without updating quantization parameters. Enables static observer accumulating data from input but doesn't update the quantization parameters. Forward path returns the original X.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\_learnable_fake_quantize.py",
    "ast_data": "FunctionDef name:enable_static_observation arg:self arguments arg Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "batch_scatter_update",
    "source_code": "def batch_scatter_update(self, sparse_delta, use_locking=False, name=None):\n    raise NotImplementedError",
    "docstring": "Assigns to this variable batch-wise. Analogous to . This assumes that this variable and the sparse_delta IndexedSlices have a series of leading dimensions that are the same for all of them, and the updates are performed on the last dimension of indices. In other words, the dimensions should be the following: where And the operation performed can be expressed as: When sparse_delta.indices is a 1D tensor, this operation is equivalent to . To avoid this operation one can looping over the first of the variable and using on the subtensors that result of slicing the first dimension. This is a valid option for , but less efficient than this implementation. Args: sparse_delta: to be assigned to this variable. use_locking: If , use locking during the operation. name: the name of the operation. Returns: The updated variable. Raises: TypeError: if is not an .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:batch_scatter_update arg:self arg:sparse_delta arg:use_locking arg:name arguments arg arg arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "extend",
    "source_code": "def extend(self, other):\n    orig_num_lines = self.num_lines()\n    self._lines.extend(other.lines)\n    for line_index in other.font_attr_segs:\n        self._font_attr_segs[orig_num_lines + line_index] = other.font_attr_segs[line_index]\n    for key in other.annotations:\n        if isinstance(key, int):\n            self._annotations[orig_num_lines + key] = other.annotations[key]\n        else:\n            self._annotations[key] = other.annotations[key]",
    "docstring": "Extend this instance of RichTextLines with another instance. The extension takes effect on the text lines, the font attribute segments, as well as the annotations. The line indices in the font attribute segments and the annotations are adjusted to account for the existing lines. If there are duplicate, non-line-index fields in the annotations, the value from the input argument \"other\" will override that in this instance. Args: other: (RichTextLines) The other RichTextLines instance to be appended at the end of this instance.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py",
    "ast_data": "FunctionDef name:extend arg:self arg:other arguments arg arg Assign Call Call For Assign For If Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "deprecated_internal_set_learning_phase",
    "source_code": "def deprecated_internal_set_learning_phase(value):\n    global _GRAPH_LEARNING_PHASES\n    if value not in {0, 1}:\n        raise ValueError('Expected learning phase to be 0 or 1.')\n    with ops.init_scope():\n        if context.executing_eagerly():\n            _DUMMY_EAGER_GRAPH.learning_phase_is_set = True\n            _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH.key] = value\n        _GRAPH_LEARNING_PHASES[get_graph()] = value",
    "docstring": "A deprecated internal implementation of set_learning_phase. This method is an internal-only version of that does not raise a deprecation error. It is required because saved_model needs to keep working with user code that uses the deprecated learning phase methods until those APIs are fully removed from the public API. Specifically SavedModel saving needs to make sure the learning phase is 0 during tracing even if users overwrote it to a different value. But, we don't want to raise deprecation warnings for users when savedmodel sets learning phase just for compatibility with code that relied on explicitly setting the learning phase for other values. Args: value: Learning phase value, either 0 or 1 (integers). 0 = test, 1 = train Raises: ValueError: if is neither nor .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:deprecated_internal_set_learning_phase arg:value arguments arg If Compare Raise Call With Call If Call Assign Assign Assign Call"
  },
  {
    "library": "scikit-learn",
    "name": "_extract_xi_labels",
    "source_code": "def _extract_xi_labels(ordering, clusters):\n    labels = np.full(len(ordering), -1, dtype=int)\n    label = 0\n    for c in clusters:\n        if not np.any(labels[c[0]:c[1] + 1] != -1):\n            labels[c[0]:c[1] + 1] = label\n            label += 1\n    labels[ordering] = labels.copy()\n    return labels",
    "docstring": "Extracts the labels from the clusters returned by . We rely on the fact that clusters are stored with the smaller clusters coming before the larger ones. Parameters ---------- ordering : array-like of shape (n_samples,) The ordering of points calculated by OPTICS clusters : array-like of shape (n_clusters, 2) List of clusters i.e. (start, end) tuples, as returned by . Returns ------- labels : ndarray of shape (n_samples,)",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\cluster\\_optics.py",
    "ast_data": "FunctionDef name:_extract_xi_labels arg:ordering arg:clusters arguments arg arg Assign Call Call Assign For If Call Compare Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "add_op_callback",
    "source_code": "def add_op_callback(self, callback):\n    if callback not in self._thread_local_data.op_callbacks:\n        self._thread_local_data.op_callbacks.append(callback)",
    "docstring": "Add a post-op callback to the context. A post-op callback is invoked immediately after an eager operation or function has finished execution or after a op has been added to a graph, providing access to the op's type, name input and output tensors. Multiple op callbacks can be added, in which case the callbacks will be invoked in the order in which they are added. Args: callback: a callable of the signature . See doc strings in for details on the function signature and its semantics.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:add_op_callback arg:self arg:callback arguments arg arg If Compare Call"
  },
  {
    "library": "pytorch",
    "name": "run",
    "source_code": "@staticmethod\n@functools.lru_cache(None)\ndef run(obj: PatternExpr, output_name: str='output') -> str:\n    pp = PatternPrettyPrinter()\n    assert hasattr(obj, 'pretty_print')\n    out_str = obj.pretty_print(pp=pp)\n    output = [f'{pp.memoized_objs_names[key]} = {pp.memoized_objs_pp[key]}' for key in pp.memoized_objs_names]\n    output.append(f'{output_name} = {out_str}')\n    return '\\n'.join(output)",
    "docstring": "Serializes obj to python code with obj written out to",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\pattern_matcher.py",
    "ast_data": "FunctionDef name:run arg:obj arg:output_name arguments arg arg Assign Call Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "deferred_internal_captures",
    "source_code": "@property\ndef deferred_internal_captures(self):\n    return list(self._function_captures.by_ref_internal.values())",
    "docstring": "List of nest of placeholders which at call time will be fed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\func_graph.py",
    "ast_data": "FunctionDef name:deferred_internal_captures arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "LoadOptions",
    "source_code": "@tf_export('saved_model.LoadOptions', v1=[])\nclass LoadOptions(object):\n    __slots__ = ('allow_partial_checkpoint', 'experimental_io_device', 'experimental_skip_checkpoint', 'experimental_variable_policy', 'experimental_load_function_aliases')\n\n    def __init__(self, allow_partial_checkpoint=False, experimental_io_device=None, experimental_skip_checkpoint=False, experimental_variable_policy=None, experimental_load_function_aliases=False):\n        self.experimental_io_device = experimental_io_device\n        self.allow_partial_checkpoint = allow_partial_checkpoint\n        self.experimental_skip_checkpoint = experimental_skip_checkpoint\n        self.experimental_variable_policy = save_options.VariablePolicy.from_obj(experimental_variable_policy)\n        self.experimental_load_function_aliases = experimental_load_function_aliases",
    "docstring": "Options for loading a SavedModel. This function may be used in the argument in functions that load a SavedModel (, ).",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\load_options.py",
    "ast_data": "ClassDef name:LoadOptions Assign FunctionDef name:__init__ arg:self arg:allow_partial_checkpoint arg:experimental_io_device arg:experimental_skip_checkpoint arg:experimental_variable_policy arg:experimental_load_function_aliases arguments arg arg arg arg arg arg Assign Assign Assign Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_options",
    "source_code": "def _options(self):\n    return gen_dataset_ops.get_options(self._variant_tensor)",
    "docstring": "Returns the options tensor for this dataset.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:_options arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "__post_init__",
    "source_code": "def __post_init__(self):\n    if self.norm_type in (float('inf'), 'inf'):\n        object.__setattr__(self, 'reduce_op', 'max')\n    elif self.norm_type in (float('-inf'), '-inf'):\n        object.__setattr__(self, 'reduce_op', 'min')\n    elif isinstance(self.norm_type, (int, float)):\n        object.__setattr__(self, 'reduce_op', 'sum')\n    else:\n        raise NotImplementedError(f'Unsupported norm type: {self.norm_type}')",
    "docstring": "Set the appropriate reduce op based on the norm type.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_ops\\_math_ops.py",
    "ast_data": "FunctionDef name:__post_init__ arg:self arguments arg If Compare Call Call If Compare Call Call If Call Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "layout_match",
    "source_code": "@staticmethod\n@functools.lru_cache(32)\ndef layout_match(torch_layout: ir.Layout, cutlass_layout: 'cutlass_lib.LayoutType') -> bool:\n    return CUTLASSGemmTemplate.cutlass_layout(torch_layout) == cutlass_layout",
    "docstring": "Helper Method: Determines whether a given torch layout matches a given Cutlass layout",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\gemm_template.py",
    "ast_data": "FunctionDef name:layout_match arg:torch_layout arg:cutlass_layout arguments arg arg Return return:yes Compare Call Call"
  },
  {
    "library": "tensorflow",
    "name": "is_non_decreasing",
    "source_code": "@tf_export('math.is_non_decreasing', v1=['math.is_non_decreasing', 'debugging.is_non_decreasing', 'is_non_decreasing'])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints('debugging.is_non_decreasing', 'is_non_decreasing')\ndef is_non_decreasing(x, name=None):\n    with ops.name_scope(name, 'is_non_decreasing', [x]):\n        diff = _get_results_for_monotonic_comparison(x, math_ops.greater_equal)\n        return math_ops.reduce_all(diff)",
    "docstring": "Returns if is non-decreasing. Elements of are compared in row-major order. The tensor is non-decreasing if for every adjacent pair we have TensorTensorTruexx` is not a numeric tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\check_ops.py",
    "ast_data": "FunctionDef name:is_non_decreasing arg:x arg:name arguments arg arg With Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "threshold_",
    "source_code": "@property\ndef threshold_(self):\n    scores = _get_feature_importances(estimator=self.estimator_, getter=self.importance_getter, transform_func='norm', norm_order=self.norm_order)\n    return _calculate_threshold(self.estimator, scores, self.threshold)",
    "docstring": "Threshold value used for feature selection.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_selection\\_from_model.py",
    "ast_data": "FunctionDef name:threshold_ arg:self arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_unpack_field",
    "source_code": "def _unpack_field(dtype, offset, title=None):\n    return (dtype, offset, title)",
    "docstring": "Helper function to normalize the items in dtype.fields. Call as: dtype, offset, title = _unpack_field(*dtype.fields[name])",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\_dtype.py",
    "ast_data": "FunctionDef name:_unpack_field arg:dtype arg:offset arg:title arguments arg arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_annotate_conv_node_helper",
    "source_code": "def _annotate_conv_node_helper(self, conv_node: torch.fx.Node, annotate_output: bool, quantization_config: Optional[QuantizationConfig]) -> None:\n    if quantization_config is None:\n        _annotate_nodes_not_quantize(conv_node)\n        return\n    input_qspec_map = {}\n    input_node = conv_node.args[0]\n    assert isinstance(input_node, Node)\n    input_qspec_map[input_node] = get_input_act_qspec(quantization_config)\n    weight_node = conv_node.args[1]\n    assert isinstance(weight_node, Node)\n    input_qspec_map[weight_node] = get_weight_qspec(quantization_config)\n    bias_node = None if len(conv_node.args) == 2 else conv_node.args[2]\n    if isinstance(bias_node, Node):\n        input_qspec_map[bias_node] = get_bias_qspec(quantization_config)\n    if annotate_output:\n        conv_node.meta[QUANT_ANNOTATION_KEY] = _X86InductorQuantizationAnnotation(input_qspec_map=input_qspec_map, _annotated=True, _is_output_of_quantized_pattern=True)\n    else:\n        conv_node.meta[QUANT_ANNOTATION_KEY] = _X86InductorQuantizationAnnotation(input_qspec_map=input_qspec_map, _annotated=True)",
    "docstring": "Helper function to annotate the conv node",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\x86_inductor_quantizer.py",
    "ast_data": "FunctionDef name:_annotate_conv_node_helper arg:self arg:conv_node arg:annotate_output arg:quantization_config arguments arg arg arg arg If Compare Call Return return:no Assign Assign Call Assign Call Assign Call Assign Call Assign Compare Call If Call Assign Call If Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, cluster):\n    if isinstance(cluster, dict):\n        self._cluster_spec = {}\n        for job_name, tasks in cluster.items():\n            if isinstance(tasks, (list, tuple)):\n                job_tasks = {i: task for i, task in enumerate(tasks)}\n            elif isinstance(tasks, dict):\n                job_tasks = {int(i): task for i, task in tasks.items()}\n            else:\n                raise TypeError('The tasks for job %r must be a list or a dictionary from integers to strings.' % job_name)\n            self._cluster_spec[job_name] = job_tasks\n        self._make_cluster_def()\n    elif isinstance(cluster, cluster_pb2.ClusterDef):\n        self._cluster_def = cluster\n        self._cluster_spec = {}\n        for job_def in self._cluster_def.job:\n            self._cluster_spec[job_def.name] = {i: t for i, t in job_def.tasks.items()}\n    elif isinstance(cluster, ClusterSpec):\n        self._cluster_def = cluster_pb2.ClusterDef()\n        self._cluster_def.MergeFrom(cluster.as_cluster_def())\n        self._cluster_spec = {}\n        for job_def in self._cluster_def.job:\n            self._cluster_spec[job_def.name] = {i: t for i, t in job_def.tasks.items()}\n    else:\n        raise TypeError('`cluster` must be a dictionary mapping one or more job names to lists of network addresses, or a `ClusterDef` protocol buffer')",
    "docstring": "Creates a . Args: cluster: A dictionary mapping one or more job names to (i) a list of network addresses, or (ii) a dictionary mapping integer task indices to network addresses; or a protocol buffer. Raises: TypeError: If is not a dictionary mapping strings to lists of strings, and not a protobuf.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\server_lib.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:cluster arguments arg arg If Call Assign For Call If Call Assign Call If Call Assign Call Call Raise Call Assign Call If Call Assign Assign For Assign Call If Call Assign Call Call Call Assign For Assign Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "benchmark_map_and_batch",
    "source_code": "def benchmark_map_and_batch(self):\n    shapes = [(), (10,), (10, 10), (10, 10, 10), (224, 224, 3)]\n    batch_size_values = [1, 32, 64, 128, 1024]\n    for shape in shapes:\n        for batch_size in batch_size_values:\n            dataset = dataset_ops.Dataset.range(1000000000)\n            dense_value = random_ops.random_normal(shape=shape)\n            dataset = dataset.apply(batching.map_and_batch(lambda _: dense_value, batch_size))\n            options = options_lib.Options()\n            options.experimental_optimization.apply_default_optimizations = False\n            dataset = dataset.with_options(options)\n            self.run_and_report_benchmark(dataset=dataset, num_elements=batch_size, iters=100, warmup=True, extras={'model_name': 'map_and_batch.benchmark.1', 'parameters': '%d.%s' % (batch_size, str(shape))}, name='num_elements_%d_batch_size_%d' % (np.prod(shape), batch_size))",
    "docstring": "Measures the performance of parallelized batching.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\benchmarks\\map_and_batch_benchmark.py",
    "ast_data": "FunctionDef name:benchmark_map_and_batch arg:self arguments arg Assign Assign For For Assign Call Assign Call Assign Call Call arguments arg Assign Call Assign Assign Call Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_sample_hiddens",
    "source_code": "def _sample_hiddens(self, v, rng):\n    p = self._mean_hiddens(v)\n    return rng.uniform(size=p.shape) < p",
    "docstring": "Sample from the distribution P(h|v). Parameters ---------- v : ndarray of shape (n_samples, n_features) Values of the visible layer to sample from. rng : RandomState instance Random number generator to use. Returns ------- h : ndarray of shape (n_samples, n_components) Values of the hidden layer.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neural_network\\_rbm.py",
    "ast_data": "FunctionDef name:_sample_hiddens arg:self arg:v arg:rng arguments arg arg arg Assign Call Return return:yes Compare Call"
  },
  {
    "library": "matplotlib",
    "name": "_set_artist_props",
    "source_code": "def _set_artist_props(self, a):\n    a.set_figure(self.get_figure(root=False))\n    if self.isaxes:\n        a.axes = self.axes\n    a.set_transform(self.get_transform())",
    "docstring": "Set the boilerplate props for artists added to Axes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\legend.py",
    "ast_data": "FunctionDef name:_set_artist_props arg:self arg:a arguments arg arg Call Call If Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "ragged_op_list",
    "source_code": "def ragged_op_list(tf_version=2):\n    lines = []\n    api_signatures = dispatch.type_based_dispatch_signatures_for(ragged_tensor.RaggedTensor)\n    for api, signatures in api_signatures.items():\n        arg_names = tf_inspect.getargspec(api).args\n        ragged_args = set()\n        for signature in signatures:\n            for arg in signature:\n                ragged_args.add(arg if isinstance(arg, int) else arg_names.index(arg))\n        if _op_is_in_tf_version(api, tf_version):\n            lines.append(_ragged_op_signature(api, ragged_args))\n    lines.append(_ragged_op_signature(logging_ops.print_v2, [], ragged_varargs=True))\n    return '\\n\\n### Additional ops that support `RaggedTensor`\\n\\nArguments that accept `RaggedTensor`s are marked in **bold**.\\n\\n' + '\\n'.join(sorted(lines)) + 'n'",
    "docstring": "Returns a string listing operations that have dispathers registered.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_dispatch.py",
    "ast_data": "FunctionDef name:ragged_op_list arg:tf_version arguments arg Assign Assign Call For Call Assign Call Assign Call For For Call Call Call If Call Call Call Call Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_mwu_choose_method",
    "source_code": "def _mwu_choose_method(n1, n2, ties):\n    if n1 > 8 and n2 > 8:\n        return 'asymptotic'\n    if ties:\n        return 'asymptotic'\n    return 'exact'",
    "docstring": "Choose method 'asymptotic' or 'exact' depending on input size, ties",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mannwhitneyu.py",
    "ast_data": "FunctionDef name:_mwu_choose_method arg:n1 arg:n2 arg:ties arguments arg arg arg If BoolOp Compare Compare Return return:yes If Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "dynamo_timed_cudagraph",
    "source_code": "@contextlib.contextmanager\ndef dynamo_timed_cudagraph(name: str, compile_id: Optional[CompileId], mode: Optional[CompilationMode]) -> Generator[Any, None, None]:\n    with dynamo_timed(name, log_pt2_compile_event=True, compile_id=compile_id, is_backward=mode == CompilationMode.BACKWARD, dynamo_compile_column_us='runtime_cudagraphify_time_us'):\n        yield",
    "docstring": "Makes usages of dynamo_timed in this file less verbose. NOTE: This CM sums all durations into a single column in the dynamo_compile table. Use only if you consider the timed region to be part of the runtime overhead associated with the compiler.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py",
    "ast_data": "FunctionDef name:dynamo_timed_cudagraph arg:name arg:compile_id arg:mode arguments arg arg arg With Call Compare"
  },
  {
    "library": "authlib",
    "name": "_extract_session_request_params",
    "source_code": "def _extract_session_request_params(self, kwargs):\n    rv = {}\n    for k in self.SESSION_REQUEST_PARAMS:\n        if k in kwargs:\n            rv[k] = kwargs.pop(k)\n    return rv",
    "docstring": "Extract parameters for session object from the passing ``.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\client.py",
    "ast_data": "FunctionDef name:_extract_session_request_params arg:self arg:kwargs arguments arg arg Assign For If Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_complete_config_for_environment",
    "source_code": "def _complete_config_for_environment(platform_device, termination_config):\n    if not termination_config:\n        termination_config = TerminationConfig()\n    if platform_device is failure_handling_util.PlatformDevice.GCE_GPU:\n        return GcpGpuTerminationConfig(termination_config.termination_watcher_fn, termination_config.exit_fn, termination_config.grace_period, termination_config.save_fn)\n    elif platform_device is failure_handling_util.PlatformDevice.GCE_CPU:\n        return GcpCpuTerminationConfig(termination_config.termination_watcher_fn, termination_config.exit_fn, termination_config.grace_period, termination_config.save_fn)\n    elif platform_device is failure_handling_util.PlatformDevice.INTERNAL_TPU:\n        return BorgTPUTerminationConfig(termination_config.termination_watcher_fn, termination_config.exit_fn, termination_config.grace_period, termination_config.save_fn)\n    else:\n        return BorgTerminationConfig(termination_config.termination_watcher_fn, termination_config.exit_fn, termination_config.grace_period, termination_config.save_fn)",
    "docstring": "Complete un-filled fields of TerminationConfig based on platform.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\failure_handling\\failure_handling.py",
    "ast_data": "FunctionDef name:_complete_config_for_environment arg:platform_device arg:termination_config arguments arg arg If Assign Call If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "eval",
    "source_code": "def eval(self, feed_dict=None, session=None):\n    return _eval_using_default_session(self, feed_dict, self.graph, session)",
    "docstring": "Evaluates this tensor in a . Note: If you are not using libraries, you should not need this, (or or ). In eager execution (or within ) you do not need to call . Calling this method will execute all preceding operations that produce the inputs needed for the operation that produces this tensor. *N.B.* Before invoking , its graph must have been launched in a session, and either a default session must be available, or must be specified explicitly. Args: feed_dict: A dictionary that maps objects to feed values. See for a description of the valid feed values. session: (Optional.) The to be used to evaluate this tensor. If none, the default session will be used. Returns: A numpy array corresponding to the value of this tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor.py",
    "ast_data": "FunctionDef name:eval arg:self arg:feed_dict arg:session arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "virtualenv",
    "name": "PathRefToDest",
    "source_code": "class PathRefToDest(PathRef):\n\n    def __init__(self, src, dest, must=RefMust.NA, when=RefWhen.ANY) -> None:\n        super().__init__(src, must, when)\n        self.dest = dest\n\n    def run(self, creator, symlinks):\n        dest = self.dest(creator, self.src)\n        method = self.method(symlinks)\n        dest_iterable = dest if isinstance(dest, list) else (dest,)\n        if not dest.parent.exists():\n            dest.parent.mkdir(parents=True, exist_ok=True)\n        for dst in dest_iterable:\n            method(self.src, dst)",
    "docstring": "Link a path on the file system.",
    "type": "class",
    "file_path": "virtualenv\\src\\virtualenv\\create\\via_global_ref\\builtin\\ref.py",
    "ast_data": "ClassDef name:PathRefToDest FunctionDef name:__init__ arg:self arg:src arg:dest arg:must arg:when arguments arg arg arg arg arg Call Call Assign FunctionDef name:run arg:self arg:creator arg:symlinks arguments arg arg arg Assign Call Assign Call Assign Call If Call Call For Call"
  },
  {
    "library": "kornia",
    "name": "Saturate",
    "source_code": "class Saturate(OperationBase):\n\n    def __init__(self, initial_magnitude: Optional[float]=0.5, initial_probability: float=0.5, magnitude_range: Tuple[float, float]=(0.2, 1.8), temperature: float=0.1, symmetric_megnitude: bool=False) -> None:\n        super().__init__(K.RandomSaturation(magnitude_range, same_on_batch=False, p=initial_probability), initial_magnitude=[('saturation_factor', initial_magnitude)], temperature=temperature, symmetric_megnitude=symmetric_megnitude)",
    "docstring": "Apply saturation operation. Args: initial_probability: the initial probability. If None, the augmentation will be randomly applied according to he augmentation sampling range. initial_magnitude: the initial magnitude. magnitude_range: the sampling range for random sampling and clamping the optimized magnitude. temperature: temperature for RelaxedBernoulli distribution used during training. symmetric_megnitude: if to randomly assign the magnitude as negative or not.",
    "type": "class",
    "file_path": "kornia\\kornia\\augmentation\\auto\\operations\\ops.py",
    "ast_data": "ClassDef name:Saturate FunctionDef name:__init__ arg:self arg:initial_magnitude arg:initial_probability arg:magnitude_range arg:temperature arg:symmetric_megnitude arguments arg arg arg arg arg arg Call Call Call"
  },
  {
    "library": "pandas",
    "name": "is_categorical_dtype",
    "source_code": "def is_categorical_dtype(arr_or_dtype) -> bool:\n    warnings.warn('is_categorical_dtype is deprecated and will be removed in a future version. Use isinstance(dtype, pd.CategoricalDtype) instead', DeprecationWarning, stacklevel=2)\n    if isinstance(arr_or_dtype, ExtensionDtype):\n        return arr_or_dtype.name == 'category'\n    if arr_or_dtype is None:\n        return False\n    return CategoricalDtype.is_dtype(arr_or_dtype)",
    "docstring": "Check whether an array-like or dtype is of the Categorical dtype. .. deprecated:: 2.2.0 Use isinstance(dtype, pd.CategoricalDtype) instead. Parameters ---------- arr_or_dtype : array-like or dtype The array-like or dtype to check. Returns ------- boolean Whether or not the array-like or dtype is of the Categorical dtype. See Also -------- api.types.is_list_like: Check if the object is list-like. api.types.is_complex_dtype: Check whether the provided array or dtype is of a complex dtype. Examples -------- >>> from pandas.api.types import is_categorical_dtype >>> from pandas import CategoricalDtype >>> is_categorical_dtype(object) False >>> is_categorical_dtype(CategoricalDtype()) True >>> is_categorical_dtype([1, 2, 3]) False >>> is_categorical_dtype(pd.Categorical([1, 2, 3])) True >>> is_categorical_dtype(pd.CategoricalIndex([1, 2, 3])) True",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\common.py",
    "ast_data": "FunctionDef name:is_categorical_dtype arg:arr_or_dtype arguments arg Call If Call Return return:yes Compare If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_with_precomputed_value_rowids",
    "source_code": "def _with_precomputed_value_rowids(self):\n    return RowPartition(row_splits=self._row_splits, row_lengths=self._row_lengths, value_rowids=self.value_rowids(), nrows=self._nrows, nvals=self._nvals, uniform_row_length=self._uniform_row_length, internal=_row_partition_factory_key)",
    "docstring": "Returns a copy of with precomputed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py",
    "ast_data": "FunctionDef name:_with_precomputed_value_rowids arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "difference",
    "source_code": "def difference(self, other):\n    return self._topology(capi.geos_difference(self.ptr, other.ptr))",
    "docstring": "Return a Geometry representing the points making up this Geometry that do not make up other.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:difference arg:self arg:other arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "is_bw",
    "source_code": "@property\ndef is_bw(self):\n    return torch._C._current_graph_task_id() != -1",
    "docstring": "A boolean marking if this is currently running during the backward pass or not",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\module_tracker.py",
    "ast_data": "FunctionDef name:is_bw arg:self arguments arg Return return:yes Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_pad_shape",
    "source_code": "def _get_pad_shape(params, indices, row_splits_dtype):\n    num_batch_dimensions = indices.shape.ndims - 1\n    params_shape = ragged_tensor_shape.RaggedTensorDynamicShape.from_tensor(params, dim_size_dtype=row_splits_dtype)\n    if params.shape.ndims == indices.shape.ndims:\n        if params_shape.num_inner_dimensions == 0:\n            pad_dims = params_shape.partitioned_dim_sizes[:-1] + (array_ops.ones_like(params_shape.partitioned_dim_sizes[-1]),)\n            return ragged_tensor_shape.RaggedTensorDynamicShape(pad_dims, [])\n        else:\n            return ragged_tensor_shape.RaggedTensorDynamicShape(params_shape.partitioned_dim_sizes, array_ops.concat([params_shape.inner_dim_sizes[:-1], [1]], axis=0))\n    else:\n        pad_dims = None\n        if num_batch_dimensions == 0:\n            pad_dims = (constant_op.constant(1, dtype=row_splits_dtype),) + (constant_op.constant([1], dtype=row_splits_dtype),) * (params_shape.num_partitioned_dimensions - num_batch_dimensions - 1)\n        else:\n            batch_dimensions = params_shape.partitioned_dim_sizes[:num_batch_dimensions]\n            gather_dimension = params_shape.partitioned_dim_sizes[num_batch_dimensions]\n            pad_dims = batch_dimensions + (array_ops.ones_like(gather_dimension),) * (params_shape.num_partitioned_dimensions - num_batch_dimensions)\n        return ragged_tensor_shape.RaggedTensorDynamicShape(pad_dims, params_shape.inner_dim_sizes)",
    "docstring": "Gets the RaggedTensorDynamicShape for the pad tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_batch_gather_with_default_op.py",
    "ast_data": "FunctionDef name:_get_pad_shape arg:params arg:indices arg:row_splits_dtype arguments arg arg arg Assign Assign Call If Compare If Compare Assign Call Return return:yes Call Return return:yes Call Call Assign If Compare Assign Call Call Assign Assign Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_tick_out",
    "source_code": "def get_tick_out(self):\n    return self._tick_out",
    "docstring": "Return whether ticks are drawn inside or outside the axes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axis_artist.py",
    "ast_data": "FunctionDef name:get_tick_out arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "prepare_for_propagation_comparison",
    "source_code": "def prepare_for_propagation_comparison(model: GraphModule) -> GraphModule:\n    model = copy.deepcopy(model)\n    for n in model.graph.nodes:\n        if CUSTOM_KEY not in n.meta or NUMERIC_DEBUG_HANDLE_KEY not in n.meta[CUSTOM_KEY]:\n            continue\n        numeric_debug_handle = n.meta[CUSTOM_KEY][NUMERIC_DEBUG_HANDLE_KEY]\n        _insert_logger(model, n, numeric_debug_handle)\n    model.recompile()\n    return model",
    "docstring": "Add output loggers to node that has numeric_debug_handle Args: model (GraphModule): original model Returns: a model with output loggers for all nodes that has numeric_debug_handle_id",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\_numeric_debugger.py",
    "ast_data": "FunctionDef name:prepare_for_propagation_comparison arg:model arguments arg Assign Call For If BoolOp Compare Compare Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "Problem20",
    "source_code": "class Problem20(Benchmark):\n\n    def __init__(self, dimensions=1):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = [(-10, 10)]\n        self.global_optimum = 1.195137\n        self.fglob = -0.0634905\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        x = x[0]\n        return -(x - sin(x)) * exp(-x ** 2.0)",
    "docstring": "Univariate Problem20 objective function. This class defines the Univariate Problem20 global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Problem20}}(x) = -[x-\\sin(x)]e^{-x^2} Bound constraints: :math: .. figure:: figures/Problem20.png :alt: Univariate Problem20 function :align: center **Univariate Problem20 function** *Global optimum*: :math: for :math:",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_univariate.py",
    "ast_data": "ClassDef name:Problem20 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_ensure_listlike_indexer",
    "source_code": "@final\ndef _ensure_listlike_indexer(self, key, axis=None, value=None) -> None:\n    column_axis = 1\n    if self.ndim != 2:\n        return\n    if isinstance(key, tuple) and len(key) > 1:\n        if axis is None:\n            axis = column_axis\n        key = key[axis]\n    if axis == column_axis and (not isinstance(self.obj.columns, MultiIndex)) and is_list_like_indexer(key) and (not com.is_bool_indexer(key)) and all((is_hashable(k) for k in key)):\n        keys = self.obj.columns.union(key, sort=False)\n        diff = Index(key).difference(self.obj.columns, sort=False)\n        if len(diff):\n            indexer = np.arange(len(keys), dtype=np.intp)\n            indexer[len(self.obj.columns):] = -1\n            new_mgr = self.obj._mgr.reindex_indexer(keys, indexer=indexer, axis=0, only_slice=True, use_na_proxy=True)\n            self.obj._mgr = new_mgr\n            return\n        self.obj._mgr = self.obj._mgr.reindex_axis(keys, axis=0, only_slice=True)",
    "docstring": "Ensure that a list-like of column labels are all present by adding them if they do not already exist. Parameters ---------- key : list-like of column labels Target labels. axis : key axis if known",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexing.py",
    "ast_data": "FunctionDef name:_ensure_listlike_indexer arg:self arg:key arg:axis arg:value arguments arg arg arg arg Assign If Compare Return return:no If BoolOp Call Compare Call If Compare Assign Assign If BoolOp Compare Call Call Call Call Call Assign Call Assign Call Call If Call Assign Call Call Assign Call Assign Call Assign Return return:no Assign Call"
  },
  {
    "library": "pytorch",
    "name": "_is_arg_with_complex_dtype",
    "source_code": "def _is_arg_with_complex_dtype(arg: fx_type_utils.Argument) -> bool:\n    if isinstance(arg, torch.fx.Node) and 'val' in arg.meta and isinstance(arg.meta['val'], torch.Tensor) and torch.is_complex(arg.meta['val']):\n        return True\n    elif isinstance(arg, list):\n        for item in arg:\n            return _is_arg_with_complex_dtype(item)\n    return False",
    "docstring": "Check if the node has complex dtype recursively.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\onnxfunction_dispatcher.py",
    "ast_data": "FunctionDef name:_is_arg_with_complex_dtype arg:arg arguments arg If BoolOp Call Compare Call Call Return return:yes If Call For Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_entrypoint_name",
    "source_code": "def get_entrypoint_name(self):\n    if isinstance(self.entrypoint, str):\n        return os.path.basename(self.entrypoint)\n    else:\n        assert self.entrypoint is not None\n        return self.entrypoint.__qualname__",
    "docstring": "Get the entry point name. If the entrypoint is a function (e.g. ``), returns the binary name.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\agent\\server\\api.py",
    "ast_data": "FunctionDef name:get_entrypoint_name arg:self arguments arg If Call Return return:yes Call Compare Return return:yes"
  },
  {
    "library": "numpy",
    "name": "__init__",
    "source_code": "def __init__(self, display):\n    self._display = display\n    self._enabled = True",
    "docstring": "Create the masked_print_option object.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:display arguments arg arg Assign Assign"
  },
  {
    "library": "scipy",
    "name": "Hosaki",
    "source_code": "class Hosaki(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = ([0.0, 5.0], [0.0, 6.0])\n        self.custom_bounds = [(0, 5), (0, 5)]\n        self.global_optimum = [[4, 2]]\n        self.fglob = -2.3458115\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        val = 1 - 8 * x[0] + 7 * x[0] ** 2 - 7 / 3.0 * x[0] ** 3 + 0.25 * x[0] ** 4\n        return val * x[1] ** 2 * exp(-x[1])",
    "docstring": "Hosaki objective function. This class defines the Hosaki [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Hosaki}}(x) = \\left ( 1 - 8 x_1 + 7 x_1^2 - \\frac{7}{3} x_1^3 + \\frac{1}{4} x_1^4 \\right ) x_2^2 e^{-x_1} with :math: for :math:. *Global optimum*: :math: for :math:. .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_H.py",
    "ast_data": "ClassDef name:Hosaki FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "__init__",
    "source_code": "def __init__(self, kernel_size: tuple[int, int]=(16, 16), stride: tuple[int, int]=(16, 16), padding: tuple[int, int]=(0, 0), in_chans: int=3, embed_dim: int=768) -> None:\n    super().__init__()\n    self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding)",
    "docstring": "Construct Patch Embedding. Args: kernel_size: kernel size of the projection layer. stride: stride of the projection layer. padding: padding size of the projection layer. in_chans: Number of input image channels. embed_dim: Patch embedding dimension.",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\models\\sam\\architecture\\image_encoder.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:kernel_size arg:stride arg:padding arg:in_chans arg:embed_dim arguments arg arg arg arg arg arg Call Call Assign Call"
  },
  {
    "library": "cryptography",
    "name": "subject_name",
    "source_code": "def subject_name(self, name: Name) -> CertificateSigningRequestBuilder:\n    if not isinstance(name, Name):\n        raise TypeError('Expecting x509.Name object.')\n    if self._subject_name is not None:\n        raise ValueError('The subject name may only be set once.')\n    return CertificateSigningRequestBuilder(name, self._extensions, self._attributes)",
    "docstring": "Sets the certificate requestor's distinguished name.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\x509\\base.py",
    "ast_data": "FunctionDef name:subject_name arg:self arg:name arguments arg arg If Call Raise Call If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "unregister",
    "source_code": "def unregister(self, name: str, opset: OpsetVersion) -> None:\n    if name not in self._registry:\n        return\n    self._registry[name].remove_custom(opset)",
    "docstring": "Unregisters a symbolic function. Args: name: The qualified name of the function to unregister. opset: The opset version of the function to unregister.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\registration.py",
    "ast_data": "FunctionDef name:unregister arg:self arg:name arg:opset arguments arg arg arg If Compare Return return:no Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, dataset, input_workers, strategy, num_replicas_in_sync=None, input_context=None):\n    dist_dataset = DistributedDatasetV1(dataset, input_workers, strategy, num_replicas_in_sync=num_replicas_in_sync, input_context=input_context)\n    worker_iterators = _create_iterators_per_worker(dist_dataset._cloned_datasets, input_workers)\n    super(DatasetIterator, self).__init__(input_workers, worker_iterators, strategy, dist_dataset.cardinality, dist_dataset._enable_get_next_as_optional)\n    self._element_spec = dist_dataset.element_spec",
    "docstring": "Make an iterator for the dataset on given devices. If is not None, we split each batch of the dataset into smaller batches, to be distributed among that worker's replicas, so that the batch size for a global step (across all workers and replicas) is as expected. Args: dataset: that will be used as the input source. input_workers: an object. strategy: a object, used to run all-reduce to handle last partial batch. num_replicas_in_sync: Optional integer. If this is not None, the value is used to decide how to rebatch datasets into smaller batches so that the total batch size for each step (across all workers and replicas) adds up to 's batch size. input_context: for sharding. Only pass this in for between graph multi-worker cases where there is only one . In these cases, we will shard based on the and in the .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\input_lib.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:dataset arg:input_workers arg:strategy arg:num_replicas_in_sync arg:input_context arguments arg arg arg arg arg arg Assign Call Assign Call Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "parse_from_string",
    "source_code": "@staticmethod\ndef parse_from_string(string, version_type):\n    if not re.search('[0-9]+\\\\.[0-9]+\\\\.[a-zA-Z0-9]+', string):\n        raise RuntimeError('Invalid version string: %s' % string)\n    major, minor, extension = string.split('.', 2)\n    extension_split = extension.split('-', 1)\n    patch = extension_split[0]\n    if len(extension_split) == 2:\n        identifier_string = '-' + extension_split[1]\n    else:\n        identifier_string = ''\n    return Version(major, minor, patch, identifier_string, version_type)",
    "docstring": "Returns version object from Semver string. Args: string: version string version_type: version parameter Raises: RuntimeError: If the version string is not valid.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\tools\\ci_build\\update_version.py",
    "ast_data": "FunctionDef name:parse_from_string arg:string arg:version_type arguments arg arg If Call Raise Call Assign Call Assign Call Assign If Compare Call Assign Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_generate_tensor_table",
    "source_code": "def _generate_tensor_table(self, filtered_data: OrderedDict[str, dict[str, Any]], tensor_features: list[str]) -> tuple[list, list]:\n    tensor_table: list[list[Any]] = []\n    tensor_headers: list[str] = []\n    if len(tensor_features) > 0:\n        for index, module_fqn in enumerate(filtered_data):\n            tensor_table_row = [index, module_fqn]\n            for feature in tensor_features:\n                if feature in filtered_data[module_fqn]:\n                    feature_val = filtered_data[module_fqn][feature]\n                else:\n                    feature_val = 'Not Applicable'\n                if isinstance(feature_val, torch.Tensor):\n                    feature_val = feature_val.item()\n                tensor_table_row.append(feature_val)\n            tensor_table.append(tensor_table_row)\n    if len(tensor_table) != 0:\n        tensor_headers = ['idx', 'layer_fqn'] + tensor_features\n    return (tensor_headers, tensor_table)",
    "docstring": "Takes in the filtered data and features list and generates the tensor headers and table Currently meant to generate the headers and table for both the tensor information. Args: filtered_data (OrderedDict[str, Dict[str, Any]]): An OrderedDict (sorted in order of model) mapping: module_fqns -> feature_names -> values tensor_features (List[str]): A list of the tensor level features Returns a tuple with: A list of the headers of the tensor table A list of lists containing the table information row by row The 0th index row will contain the headers of the columns The rest of the rows will contain data",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\model_report_visualizer.py",
    "ast_data": "FunctionDef name:_generate_tensor_table arg:self arg:filtered_data arg:tensor_features arguments arg arg arg If Compare Call For Call Assign For If Compare Assign Assign If Call Assign Call Call Call If Compare Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "generalized_box_iou",
    "source_code": "def generalized_box_iou(boxes1, boxes2):\n    assert (boxes1[:, 2:] >= boxes1[:, :2]).all()\n    assert (boxes2[:, 2:] >= boxes2[:, :2]).all()\n    iou, union = box_iou(boxes1, boxes2)\n    lt = torch.min(boxes1[:, None, :2], boxes2[:, :2])\n    rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])\n    wh = (rb - lt).clamp(min=0)\n    area = wh[:, :, 0] * wh[:, :, 1]\n    return iou - (area - union) / area",
    "docstring": "Generalized IoU from The boxes should be in [x0, y0, x1, y1] format Returns a [N, M] pairwise matrix, where N = len(boxes1) and M = len(boxes2)",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\functional_autograd_benchmark\\torchvision_models.py",
    "ast_data": "FunctionDef name:generalized_box_iou arg:boxes1 arg:boxes2 arguments arg arg Call Compare Call Compare Assign Call Assign Call Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "unpad_sequence",
    "source_code": "def unpad_sequence(padded_sequences: Tensor, lengths: Tensor, batch_first: bool=False) -> list[Tensor]:\n    unpadded_sequences = []\n    if not batch_first:\n        padded_sequences.transpose_(0, 1)\n    max_length = padded_sequences.shape[1]\n    idx = torch.arange(max_length, device=lengths.device)\n    for seq, length in zip(padded_sequences, lengths):\n        mask = idx < length\n        unpacked_seq = seq[mask]\n        unpadded_sequences.append(unpacked_seq)\n    return unpadded_sequences",
    "docstring": "Unpad padded Tensor into a list of variable length Tensors. `Tensor` objects",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\utils\\rnn.py",
    "ast_data": "FunctionDef name:unpad_sequence arg:padded_sequences arg:lengths arg:batch_first arguments arg arg arg Assign If Call Assign Assign Call For Call Assign Compare Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "codegen_sync",
    "source_code": "def codegen_sync(self) -> None:\n    raise NotImplementedError",
    "docstring": "Generate synchronization code for the kernel. This method depends on the hardware characteristics.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:codegen_sync arg:self arguments arg Raise"
  },
  {
    "library": "pytorch",
    "name": "post_compile",
    "source_code": "def post_compile(self, result: CompiledFxGraph, fx_config: _CompileFxKwargs) -> CompiledFxGraph:\n    result.post_compile(self.example_inputs, self.constants, fx_config)\n    return result",
    "docstring": "Called after FXGraphCacheLoadable.load, mutates fx_config",
    "type": "method",
    "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\autograd_cache.py",
    "ast_data": "FunctionDef name:post_compile arg:self arg:result arg:fx_config arguments arg arg arg Call Return return:yes"
  },
  {
    "library": "django",
    "name": "return_insert_columns",
    "source_code": "def return_insert_columns(self, fields):\n    pass",
    "docstring": "For backends that support returning columns as part of an insert query, return the SQL and params to append to the INSERT query. The returned fragment should contain a format string to hold the appropriate column.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:return_insert_columns arg:self arg:fields arguments arg arg"
  },
  {
    "library": "matplotlib",
    "name": "CharacterTracker",
    "source_code": "class CharacterTracker:\n\n    def __init__(self):\n        self.used = {}\n\n    def track(self, font, s):\n        char_to_font = font._get_fontmap(s)\n        for _c, _f in char_to_font.items():\n            self.used.setdefault(_f.fname, set()).add(ord(_c))\n\n    def track_glyph(self, font, glyph):\n        self.used.setdefault(font.fname, set()).add(glyph)",
    "docstring": "Helper for font subsetting by the pdf and ps backends. Maintains a mapping of font paths to the set of character codepoints that are being used from that font.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\_backend_pdf_ps.py",
    "ast_data": "ClassDef name:CharacterTracker FunctionDef name:__init__ arg:self arguments arg Assign FunctionDef name:track arg:self arg:font arg:s arguments arg arg arg Assign Call For Call Call Call Call Call FunctionDef name:track_glyph arg:self arg:font arg:glyph arguments arg arg arg Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_IndependentConstraint",
    "source_code": "class _IndependentConstraint(Constraint):\n\n    def __init__(self, base_constraint, reinterpreted_batch_ndims):\n        assert isinstance(base_constraint, Constraint)\n        assert isinstance(reinterpreted_batch_ndims, int)\n        assert reinterpreted_batch_ndims >= 0\n        self.base_constraint = base_constraint\n        self.reinterpreted_batch_ndims = reinterpreted_batch_ndims\n        super().__init__()\n\n    @property\n    def is_discrete(self) -> bool:\n        return self.base_constraint.is_discrete\n\n    @property\n    def event_dim(self) -> int:\n        return self.base_constraint.event_dim + self.reinterpreted_batch_ndims\n\n    def check(self, value):\n        result = self.base_constraint.check(value)\n        if result.dim() < self.reinterpreted_batch_ndims:\n            expected = self.base_constraint.event_dim + self.reinterpreted_batch_ndims\n            raise ValueError(f'Expected value.dim() >= {expected} but got {value.dim()}')\n        result = result.reshape(result.shape[:result.dim() - self.reinterpreted_batch_ndims] + (-1,))\n        result = result.all(-1)\n        return result\n\n    def __repr__(self):\n        return f'{self.__class__.__name__[1:]}({repr(self.base_constraint)}, {self.reinterpreted_batch_ndims})'",
    "docstring": "Wraps a constraint by aggregating over `check`, so that an event is valid only if all its independent entries are valid.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributions\\constraints.py",
    "ast_data": "ClassDef name:_IndependentConstraint FunctionDef name:__init__ arg:self arg:base_constraint arg:reinterpreted_batch_ndims arguments arg arg arg Call Call Compare Assign Assign Call Call FunctionDef name:is_discrete arg:self arguments arg Return return:yes FunctionDef name:event_dim arg:self arguments arg Return return:yes FunctionDef name:check arg:self arg:value arguments arg arg Assign Call If Compare Call Assign Raise Call Call Assign Call Call Assign Call Return return:yes FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get",
    "source_code": "def get(self, token, default=None):\n    return self._funcs.get(token, default)",
    "docstring": "Gets the registered function corresponding to .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\script_ops.py",
    "ast_data": "FunctionDef name:get arg:self arg:token arg:default arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_set_oob_score_and_attributes",
    "source_code": "def _set_oob_score_and_attributes(self, X, y, scoring_function=None):\n    self.oob_prediction_ = super()._compute_oob_predictions(X, y).squeeze(axis=1)\n    if self.oob_prediction_.shape[-1] == 1:\n        self.oob_prediction_ = self.oob_prediction_.squeeze(axis=-1)\n    if scoring_function is None:\n        scoring_function = r2_score\n    self.oob_score_ = scoring_function(y, self.oob_prediction_)",
    "docstring": "Compute and set the OOB score and attributes. Parameters ---------- X : array-like of shape (n_samples, n_features) The data matrix. y : ndarray of shape (n_samples, n_outputs) The target matrix. scoring_function : callable, default=None Scoring function for OOB score. Defaults to .",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_forest.py",
    "ast_data": "FunctionDef name:_set_oob_score_and_attributes arg:self arg:X arg:y arg:scoring_function arguments arg arg arg arg Assign Call Call Call If Compare Assign Call If Compare Assign Assign Call"
  },
  {
    "library": "authlib",
    "name": "get_well_known_url",
    "source_code": "def get_well_known_url(issuer, external=False):\n    if external:\n        return issuer.rstrip('/') + '/.well-known/openid-configuration'\n    parsed = urlparse.urlparse(issuer)\n    path = parsed.path\n    return path.rstrip('/') + '/.well-known/openid-configuration'",
    "docstring": "Get well-known URI with issuer via Section 4.1. :param issuer: URL of the issuer :param external: return full external url or not :return: URL",
    "type": "function",
    "file_path": "authlib\\authlib\\oidc\\discovery\\well_known.py",
    "ast_data": "FunctionDef name:get_well_known_url arg:issuer arg:external arguments arg arg If Return return:yes Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_override_imports",
    "source_code": "def _override_imports(file: MypyFile, module: str, imports: list[tuple[str, str | None]]) -> None:\n    import_obj = ImportFrom(module, 0, names=imports)\n    import_obj.is_top_level = True\n    for lst in [file.defs, cast('list[Statement]', file.imports)]:\n        i = _index(lst, module)\n        lst[i] = import_obj",
    "docstring": "Override the first -based import with new .",
    "type": "function",
    "file_path": "numpy\\numpy\\typing\\mypy_plugin.py",
    "ast_data": "FunctionDef name:_override_imports arg:file arg:module arg:imports arguments arg arg arg Assign Call Assign For Call Assign Call Assign"
  },
  {
    "library": "scikit-learn",
    "name": "__getitem__",
    "source_code": "def __getitem__(self, ind):\n    for sub_grid in self.param_grid:\n        if not sub_grid:\n            if ind == 0:\n                return {}\n            else:\n                ind -= 1\n                continue\n        keys, values_lists = zip(*sorted(sub_grid.items())[::-1])\n        sizes = [len(v_list) for v_list in values_lists]\n        total = np.prod(sizes)\n        if ind >= total:\n            ind -= total\n        else:\n            out = {}\n            for key, v_list, n in zip(keys, values_lists, sizes):\n                ind, offset = divmod(ind, n)\n                out[key] = v_list[offset]\n            return out\n    raise IndexError('ParameterGrid index out of range')",
    "docstring": "Get the parameters that would be ``th in iteration Parameters ---------- ind : int The iteration index Returns ------- params : dict of str to any Equal to list(self)[ind]",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_search.py",
    "ast_data": "FunctionDef name:__getitem__ arg:self arg:ind arguments arg arg For If If Compare Return return:no Assign Call Call Call Assign Call Assign Call If Compare Assign For Call Assign Call Assign Return return:yes Raise Call"
  },
  {
    "library": "pytorch",
    "name": "gen_classes",
    "source_code": "def gen_classes(self, classes, num_spaces):\n    indent = ' ' * num_spaces\n    return '\\n'.join([f\"{indent}self.choices.append('{c}')\" for c in classes])",
    "docstring": "If classes=['choice1', 'choice2', 'choice3'], then this function returns the following string: self.choices.append('choice1') self.choices.append('choice2') self.choices.append('choice3') Used in the generated heuristic to map the index of a choice to its name.",
    "type": "method",
    "file_path": "pytorch\\torchgen\\_autoheuristic\\train_decision.py",
    "ast_data": "FunctionDef name:gen_classes arg:self arg:classes arg:num_spaces arguments arg arg arg Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "name",
    "source_code": "@property\ndef name(self):\n    return self.key",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\sequence_feature_column.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "parse_nm",
    "source_code": "def parse_nm(nm_output):\n    data = DATA_RE.findall(nm_output)\n    func = FUNC_RE.findall(nm_output)\n    flist = []\n    for sym in data:\n        if sym in func and (sym[:2] == 'Py' or sym[:3] == '_Py' or sym[:4] == 'init'):\n            flist.append(sym)\n    dlist = []\n    for sym in data:\n        if sym not in flist and (sym[:2] == 'Py' or sym[:3] == '_Py'):\n            dlist.append(sym)\n    dlist.sort()\n    flist.sort()\n    return (dlist, flist)",
    "docstring": "Returns a tuple of lists: dlist for the list of data symbols and flist for the list of function symbols. dlist, flist = parse_nm(nm_output)",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\lib2def.py",
    "ast_data": "FunctionDef name:parse_nm arg:nm_output arguments arg Assign Call Assign Call Assign For If BoolOp Compare BoolOp Compare Compare Compare Call Assign For If BoolOp Compare BoolOp Compare Compare Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "arithmetic_op",
    "source_code": "def arithmetic_op(left: ArrayLike, right: Any, op):\n    if should_extension_dispatch(left, right) or isinstance(right, (Timedelta, BaseOffset, Timestamp)) or right is NaT:\n        res_values = op(left, right)\n    else:\n        _bool_arith_check(op, left, right)\n        res_values = _na_arithmetic_op(left, right, op)\n    return res_values",
    "docstring": "Evaluate an arithmetic operation , , , , , , , ... Note: the caller is responsible for ensuring that numpy warnings are suppressed (with np.errstate(all=\"ignore\")) if needed. Parameters ---------- left : np.ndarray or ExtensionArray right : object Cannot be a DataFrame or Index. Series is *not* excluded. op : {operator.add, operator.sub, ...} Or one of the reversed variants from roperator. Returns ------- ndarray or ExtensionArray Or a 2-tuple of these in the case of divmod or rdivmod.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:arithmetic_op arg:left arg:right arg:op arguments arg arg arg If BoolOp Call Call Compare Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_debug_info",
    "source_code": "def get_debug_info(nodes_to_debug_info_func, converted_graph):\n    if not nodes_to_debug_info_func:\n        return None\n    original_nodes = set()\n    for node in converted_graph.node:\n        debug_nodes = node.experimental_debug_info.original_node_names\n        debug_funcs = node.experimental_debug_info.original_func_names\n        if not debug_nodes:\n            original_nodes.add(('', node.name))\n        else:\n            for i in range(len(debug_nodes)):\n                debug_func = '' if i >= len(debug_funcs) else debug_funcs[i]\n                original_nodes.add((debug_func, debug_nodes[i]))\n    return nodes_to_debug_info_func(original_nodes)",
    "docstring": "Returns the debug info for the original nodes in the . Args: nodes_to_debug_info_func: The method to collect the op debug info for the nodes. converted_graph: A after optimization and transformation. Returns: for all the original nodes in .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\util.py",
    "ast_data": "FunctionDef name:get_debug_info arg:nodes_to_debug_info_func arg:converted_graph arguments arg arg If Return return:no Assign Call For Assign Assign If Call For Call Call Assign Compare Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "registry_name",
    "source_code": "@staticmethod\n@abc.abstractmethod\ndef registry_name() -> str:\n    pass",
    "docstring": "See ExtensionRegistry.from_descriptor_list",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\_extension.py",
    "ast_data": "FunctionDef name:registry_name arguments"
  },
  {
    "library": "pandas",
    "name": "_update_strl_names",
    "source_code": "def _update_strl_names(self) -> None:\n    for orig, new in self._converted_names.items():\n        if orig in self._convert_strl:\n            idx = self._convert_strl.index(orig)\n            self._convert_strl[idx] = new",
    "docstring": "Update column names for conversion to strl if they might have been changed to comply with Stata naming rules",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\stata.py",
    "ast_data": "FunctionDef name:_update_strl_names arg:self arguments arg For Call If Compare Assign Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "from_tensor",
    "source_code": "@classmethod\ndef from_tensor(cls, rt_input, dim_size_dtype=None):\n    with ops.name_scope(None, 'RaggedTensorDynamicShapeFromTensor', [rt_input]):\n        rt_input = ragged_tensor.convert_to_tensor_or_ragged_tensor(rt_input)\n        if not ragged_tensor.is_ragged(rt_input):\n            return cls([], array_ops.shape(rt_input), dim_size_dtype=dim_size_dtype)\n        else:\n            partitioned_dim_sizes = (rt_input.nrows(),) + rt_input.nested_row_lengths()\n            return RaggedTensorDynamicShape(partitioned_dim_sizes, array_ops.shape(rt_input.flat_values)[1:], dim_size_dtype=dim_size_dtype)",
    "docstring": "Constructs a ragged shape for a potentially ragged tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor_shape.py",
    "ast_data": "FunctionDef name:from_tensor arg:cls arg:rt_input arg:dim_size_dtype arguments arg arg arg With Call Assign Call If Call Return return:yes Call Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "get_backend_timeout",
    "source_code": "def get_backend_timeout(self, timeout=DEFAULT_TIMEOUT):\n    if timeout == DEFAULT_TIMEOUT:\n        timeout = self.default_timeout\n    elif timeout == 0:\n        timeout = -1\n    return None if timeout is None else time.time() + timeout",
    "docstring": "Return the timeout value usable by this backend based upon the provided timeout.",
    "type": "method",
    "file_path": "django\\django\\core\\cache\\backends\\base.py",
    "ast_data": "FunctionDef name:get_backend_timeout arg:self arg:timeout arguments arg arg If Compare Assign If Compare Assign Return return:yes Compare Call"
  },
  {
    "library": "django",
    "name": "__copy__",
    "source_code": "def __copy__(self):\n    return self.clone()",
    "docstring": "Return a clone because the copy of a GEOSGeometry may contain an invalid pointer location if the original is garbage collected.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:__copy__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_infer_device_name",
    "source_code": "def _infer_device_name(self, device_name, node_name):\n    if device_name is None:\n        if node_name in self._node_devices:\n            if len(self._node_devices[node_name]) == 1:\n                return list(self._node_devices[node_name])[0]\n            else:\n                raise ValueError(\"There are multiple (%d) devices with nodes named '%s' but device_name is not specified.\" % (len(self._node_devices[node_name]), node_name))\n        else:\n            raise ValueError(\"None of the %d device(s) has a node named '%s'.\" % (len(self._device_names), node_name))\n    else:\n        return device_name",
    "docstring": "Infer the device name given node name. If device_name is provided (i.e., not None), it'll be simply returned right away. Args: device_name: (str or None) name of the device. If None, will try to infer the device name by looking at the available nodes. node_name: (str) name of the node. Returns: (str) Inferred name of the device, if available. Raises: ValueError: If the node name does not exist on any of the available devices or if there are multiple devices that contain the node with the given name.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:_infer_device_name arg:self arg:device_name arg:node_name arguments arg arg arg If Compare If Compare If Compare Call Return return:yes Call Raise Call Call Raise Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "cast_db_type",
    "source_code": "def cast_db_type(self, connection):\n    db_type = connection.ops.cast_data_types.get(self.get_internal_type())\n    if db_type:\n        return db_type % self.db_type_parameters(connection)\n    return self.db_type(connection)",
    "docstring": "Return the data type to use in the Cast() function.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\__init__.py",
    "ast_data": "FunctionDef name:cast_db_type arg:self arg:connection arguments arg arg Assign Call Call If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "is_integer",
    "source_code": "def is_integer(self):\n    raise TypeError('type stub not overridden')",
    "docstring": "Return True if the float is an integer.",
    "type": "method",
    "file_path": "pytorch\\torch\\__init__.py",
    "ast_data": "FunctionDef name:is_integer arg:self arguments arg Raise Call"
  },
  {
    "library": "scipy",
    "name": "initialize_read",
    "source_code": "def initialize_read(self):\n    self.dtypes = convert_dtypes(mdtypes_template, self.byte_order)\n    self._matrix_reader = VarReader4(self)",
    "docstring": "Run when beginning read of variables Sets up readers from parameters in",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\matlab\\_mio4.py",
    "ast_data": "FunctionDef name:initialize_read arg:self arguments arg Assign Call Assign Call"
  },
  {
    "library": "kornia",
    "name": "__repr__",
    "source_code": "def __repr__(self) -> str:\n    repr = f'gain={self.gain}, sign={self.sign}'\n    return repr",
    "docstring": "Return a string representation of the object.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\random_generator\\_2d\\linear_illumination.py",
    "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "_set_field_new_type",
    "source_code": "def _set_field_new_type(self, field, new_type):\n    if field.has_db_default():\n        default_sql, params = self.db_default_sql(field)\n        default_sql %= tuple((self.quote_value(p) for p in params))\n        new_type += f' DEFAULT {default_sql}'\n    if field.null:\n        new_type += ' NULL'\n    else:\n        new_type += ' NOT NULL'\n    return new_type",
    "docstring": "Keep the NULL and DEFAULT properties of the old field. If it has changed, it will be handled separately.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\mysql\\schema.py",
    "ast_data": "FunctionDef name:_set_field_new_type arg:self arg:field arg:new_type arguments arg arg arg If Call Assign Call Call Call If Return return:yes"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "def forward(self, laf: torch.Tensor, img: torch.Tensor) -> torch.Tensor:\n    KORNIA_CHECK_LAF(laf)\n    KORNIA_CHECK_SHAPE(img, ['B', '1', 'H', 'W'])\n    B, N = laf.shape[:2]\n    PS: int = self.patch_size\n    patches: torch.Tensor = extract_patches_from_pyramid(img, make_upright(laf), PS, True).view(-1, 1, PS, PS)\n    xy = self.features(self._normalize_input(patches)).view(-1, 3)\n    a1 = torch.cat([1.0 + xy[:, 0].reshape(-1, 1, 1), 0 * xy[:, 0].reshape(-1, 1, 1)], dim=2)\n    a2 = torch.cat([xy[:, 1].reshape(-1, 1, 1), 1.0 + xy[:, 2].reshape(-1, 1, 1)], dim=2)\n    new_laf_no_center = torch.cat([a1, a2], dim=1).reshape(B, N, 2, 2)\n    new_laf = torch.cat([new_laf_no_center, laf[:, :, :, 2:3]], dim=3)\n    scale_orig = get_laf_scale(laf)\n    if self.preserve_orientation:\n        ori_orig = get_laf_orientation(laf)\n    ellipse_scale = get_laf_scale(new_laf)\n    laf_out = scale_laf(make_upright(new_laf), scale_orig / ellipse_scale)\n    if self.preserve_orientation:\n        laf_out = set_laf_orientation(laf_out, ori_orig)\n    return laf_out",
    "docstring": "Run forward. Args: laf: :math: img: :math: Returns: LAF_out: :math:",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\affine_shape.py",
    "ast_data": "FunctionDef name:forward arg:self arg:laf arg:img arguments arg arg arg Call Call Assign Call Call Call Assign Call Call Call Assign Call Call Call Assign Call Call Call Assign Call Call Assign Call Assign Call If Assign Call Assign Call Assign Call Call If Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_is_chief",
    "source_code": "def _is_chief(self):\n    if not self._cluster_spec or self._task_type in [_TaskType.CHIEF, _TaskType.EVALUATOR, None]:\n        return True\n    if _TaskType.CHIEF not in self._cluster_spec.jobs and self._task_type == _TaskType.WORKER and (self._task_id == 0):\n        return True\n    return False",
    "docstring": "Return whether the task is the chief worker.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_coordinator.py",
    "ast_data": "FunctionDef name:_is_chief arg:self arguments arg If BoolOp Compare Return return:yes If BoolOp Compare Compare Compare Return return:yes Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "directive",
    "source_code": "def directive(self, name: str) -> type[Directive] | None:\n    if name in self._directive_cache:\n        return self._directive_cache[name]\n    if name not in self.directives:\n        return None\n    fullname = f'{self.name}:{name}'\n    BaseDirective = self.directives[name]\n\n    class DirectiveAdapter(BaseDirective):\n\n        def run(self) -> list[Node]:\n            self.name = fullname\n            return super().run()\n    self._directive_cache[name] = DirectiveAdapter\n    return DirectiveAdapter",
    "docstring": "Return a directive adapter class that always gives the registered directive its full name ('domain:name') as ``.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\domains\\__init__.py",
    "ast_data": "FunctionDef name:directive arg:self arg:name arguments arg arg If Compare Return return:yes If Compare Return return:no Assign Assign ClassDef name:DirectiveAdapter FunctionDef name:run arg:self arguments arg Assign Return return:yes Call Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "do_for",
    "source_code": "@register.tag('for')\ndef do_for(parser, token):\n    bits = token.split_contents()\n    if len(bits) < 4:\n        raise TemplateSyntaxError(\"'for' statements should have at least four words: %s\" % token.contents)\n    is_reversed = bits[-1] == 'reversed'\n    in_index = -3 if is_reversed else -2\n    if bits[in_index] != 'in':\n        raise TemplateSyntaxError(\"'for' statements should use the format 'for x in y': %s\" % token.contents)\n    invalid_chars = frozenset((' ', '\"', \"'\", FILTER_SEPARATOR))\n    loopvars = re.split(' *, *', ' '.join(bits[1:in_index]))\n    for var in loopvars:\n        if not var or not invalid_chars.isdisjoint(var):\n            raise TemplateSyntaxError(\"'for' tag received an invalid argument: %s\" % token.contents)\n    sequence = parser.compile_filter(bits[in_index + 1])\n    nodelist_loop = parser.parse(('empty', 'endfor'))\n    token = parser.next_token()\n    if token.contents == 'empty':\n        nodelist_empty = parser.parse(('endfor',))\n        parser.delete_first_token()\n    else:\n        nodelist_empty = None\n    return ForNode(loopvars, sequence, is_reversed, nodelist_loop, nodelist_empty)",
    "docstring": "Loop over each item in an array. For example, to display a list of athletes given `` For nested loops, this is the loop \"above\" the current one ========================== ================================================",
    "type": "function",
    "file_path": "django\\django\\template\\defaulttags.py",
    "ast_data": "FunctionDef name:do_for arg:parser arg:token arguments arg arg Assign Call If Compare Call Raise Call Assign Compare Assign If Compare Raise Call Assign Call Assign Call Call For If BoolOp Call Raise Call Assign Call Assign Call Assign Call If Compare Assign Call Call Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "prevent_fetching",
    "source_code": "def prevent_fetching(self, op) -> None:\n    self._unfetchable_ops.add(op)",
    "docstring": "Marks the given as unfetchable in this graph.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:prevent_fetching arg:self arg:op arguments arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "self_cpu_time_total",
    "source_code": "@property\ndef self_cpu_time_total(self):\n    self._check_finish()\n    assert self.function_events is not None\n    return self.function_events.self_cpu_time_total",
    "docstring": "Return CPU time as the sum of self times across all events.",
    "type": "method",
    "file_path": "pytorch\\torch\\autograd\\profiler_legacy.py",
    "ast_data": "FunctionDef name:self_cpu_time_total arg:self arguments arg Call Compare Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "fuse_conv_bn_eval",
    "source_code": "def fuse_conv_bn_eval(conv: ConvT, bn: torch.nn.modules.batchnorm._BatchNorm, transpose: bool=False) -> ConvT:\n    assert not (conv.training or bn.training), 'Fusion only for eval!'\n    fused_conv = copy.deepcopy(conv)\n    assert bn.running_mean is not None and bn.running_var is not None\n    fused_conv.weight, fused_conv.bias = fuse_conv_bn_weights(fused_conv.weight, fused_conv.bias, bn.running_mean, bn.running_var, bn.eps, bn.weight, bn.bias, transpose)\n    return fused_conv",
    "docstring": "Fuse a convolutional module and a BatchNorm module into a single, new convolutional module. Args: conv (torch.nn.modules.conv._ConvNd): A convolutional module. bn (torch.nn.modules.batchnorm._BatchNorm): A BatchNorm module. transpose (bool, optional): If True, transpose the convolutional weight. Defaults to False. Returns: torch.nn.modules.conv._ConvNd: The fused convolutional module. .. note:: Both `` must have its running buffers computed.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\utils\\fusion.py",
    "ast_data": "FunctionDef name:fuse_conv_bn_eval arg:conv arg:bn arg:transpose arguments arg arg arg BoolOp Assign Call BoolOp Compare Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "equal",
    "source_code": "@tf_export('math.equal', 'equal')\n@dispatch.register_binary_elementwise_api\n@dispatch.add_dispatch_support\ndef equal(x, y, name=None):\n    return gen_math_ops.equal(x, y, name=name)",
    "docstring": "Returns the truth value of (x == y) element-wise. Performs a [broadcast]( with the arguments and then an element-wise equality comparison, returning a Tensor of boolean values. For example: >>> x = tf.constant([2, 4]) >>> y = tf.constant(2) >>> tf.math.equal(x, y) >>> x = tf.constant([2, 4]) >>> y = tf.constant([2, 4]) >>> tf.math.equal(x, y) Args: x: A . y: A . name: A name for the operation (optional). Returns: A of type bool with the same size as that of x or y. Raises: : If shapes of arguments are incompatible",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:equal arg:x arg:y arg:name arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "float8_e4m3fn",
    "source_code": "def float8_e4m3fn(self):\n    return self._to(torch.float8_e4m3fn)",
    "docstring": "Casts this storage to float8_e4m3fn type",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:float8_e4m3fn arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "_server_namespace_handler",
    "source_code": "def _server_namespace_handler(k, v):\n    atoms = k.split('.', 1)\n    if len(atoms) > 1:\n        if not hasattr(cherrypy, 'servers'):\n            cherrypy.servers = {}\n        servername, k = atoms\n        if servername not in cherrypy.servers:\n            from cherrypy import _cpserver\n            cherrypy.servers[servername] = _cpserver.Server()\n            cherrypy.servers[servername].subscribe()\n        if k == 'on':\n            if v:\n                cherrypy.servers[servername].subscribe()\n            else:\n                cherrypy.servers[servername].unsubscribe()\n        else:\n            setattr(cherrypy.servers[servername], k, v)\n    else:\n        setattr(cherrypy.server, k, v)",
    "docstring": "Config handler for the \"server\" namespace.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\_cpconfig.py",
    "ast_data": "FunctionDef name:_server_namespace_handler arg:k arg:v arguments arg arg Assign Call If Compare Call If Call Assign Assign If Compare Assign Call Call If Compare If Call Call Call Call"
  },
  {
    "library": "numpy",
    "name": "linear_solve",
    "source_code": "def linear_solve(self, symbol):\n    b = self.substitute({symbol: as_number(0)})\n    ax = self - b\n    a = ax.substitute({symbol: as_number(1)})\n    zero, _ = as_numer_denom(a * symbol - ax)\n    if zero != as_number(0):\n        raise RuntimeError(f'not a {symbol}-linear equation: {a} * {symbol} + {b} == {self}')\n    return (a, b)",
    "docstring": "Return a, b such that a * symbol + b == self. If self is not linear with respect to symbol, raise RuntimeError.",
    "type": "method",
    "file_path": "numpy\\numpy\\f2py\\symbolic.py",
    "ast_data": "FunctionDef name:linear_solve arg:self arg:symbol arguments arg arg Assign Call Call Assign Assign Call Call Assign Call If Compare Call Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "compute_mean_iou",
    "source_code": "def compute_mean_iou(_, total_cm):\n    sum_over_row = math_ops.cast(math_ops.reduce_sum(total_cm, 0), dtypes.float32)\n    sum_over_col = math_ops.cast(math_ops.reduce_sum(total_cm, 1), dtypes.float32)\n    cm_diag = math_ops.cast(array_ops.diag_part(total_cm), dtypes.float32)\n    denominator = sum_over_row + sum_over_col - cm_diag\n    num_valid_entries = math_ops.reduce_sum(math_ops.cast(math_ops.not_equal(denominator, 0), dtype=dtypes.float32))\n    denominator = array_ops.where(math_ops.greater(denominator, 0), denominator, array_ops.ones_like(denominator))\n    iou = math_ops.divide(cm_diag, denominator)\n    result = array_ops.where(math_ops.greater(num_valid_entries, 0), math_ops.reduce_sum(iou, name='mean_iou') / num_valid_entries, 0)\n    return result",
    "docstring": "Compute the mean intersection-over-union via the confusion matrix.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\metrics_impl.py",
    "ast_data": "FunctionDef name:compute_mean_iou arg:_ arg:total_cm arguments arg arg Assign Call Call Assign Call Call Assign Call Call Assign Assign Call Call Call Assign Call Call Call Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_lazy_property_and_property",
    "source_code": "class _lazy_property_and_property(lazy_property[T, R], property):\n\n    def __init__(self, wrapped: Callable[[T], R]) -> None:\n        property.__init__(self, wrapped)",
    "docstring": "We want lazy properties to look like multiple things. * property when Sphinx autodoc looks * lazy_property when Distribution validate_args looks",
    "type": "class",
    "file_path": "pytorch\\torch\\distributions\\utils.py",
    "ast_data": "ClassDef name:_lazy_property_and_property FunctionDef name:__init__ arg:self arg:wrapped arguments arg arg Call"
  },
  {
    "library": "pandas",
    "name": "array_values",
    "source_code": "@property\ndef array_values(self) -> ExtensionArray:\n    raise AbstractMethodError(self)",
    "docstring": "The array that Series.array returns. Always an ExtensionArray.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\blocks.py",
    "ast_data": "FunctionDef name:array_values arg:self arguments arg Raise Call"
  },
  {
    "library": "pandas",
    "name": "_repr_html_",
    "source_code": "def _repr_html_(self) -> str | None:\n    if self._info_repr():\n        buf = StringIO()\n        self.info(buf=buf)\n        val = buf.getvalue().replace('<', '&lt;', 1)\n        val = val.replace('>', '&gt;', 1)\n        return f'<pre>{val}</pre>'\n    if get_option('display.notebook_repr_html'):\n        max_rows = get_option('display.max_rows')\n        min_rows = get_option('display.min_rows')\n        max_cols = get_option('display.max_columns')\n        show_dimensions = get_option('display.show_dimensions')\n        show_floats = get_option('display.float_format')\n        formatter = fmt.DataFrameFormatter(self, columns=None, col_space=None, na_rep='NaN', formatters=None, float_format=show_floats, sparsify=None, justify=None, index_names=True, header=True, index=True, bold_rows=True, escape=True, max_rows=max_rows, min_rows=min_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal='.')\n        return fmt.DataFrameRenderer(formatter).to_html(notebook=True)\n    else:\n        return None",
    "docstring": "Return a html representation for a particular DataFrame. Mainly for IPython notebook.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:_repr_html_ arg:self arguments arg If Call Assign Call Call Assign Call Call Assign Call Return return:yes If Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "_wrap_optimizer",
    "source_code": "def _wrap_optimizer(opt, loss_scale):\n    for _, wrapper_optimizer in _REGISTERED_WRAPPER_OPTIMIZER_CLS.values():\n        if isinstance(opt, wrapper_optimizer):\n            raise ValueError('\"opt\" must not already be an instance of a {cls}. `enable_mixed_precision_graph_rewrite` will automatically wrap the optimizer with a {cls}.'.format(cls=wrapper_optimizer.__name__))\n    for optimizer_cls, (wrapper_fn, _) in _REGISTERED_WRAPPER_OPTIMIZER_CLS.items():\n        if isinstance(opt, optimizer_cls):\n            return wrapper_fn(opt, loss_scale)\n    raise ValueError('\"opt\" must be an instance of a tf.train.Optimizer or a tf.keras.optimizers.Optimizer, but got: %s' % opt)",
    "docstring": "Wraps an optimizer with a LossScaleOptimizer.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\experimental\\mixed_precision.py",
    "ast_data": "FunctionDef name:_wrap_optimizer arg:opt arg:loss_scale arguments arg arg For Call If Call Raise Call Call For Call If Call Return return:yes Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "node_exists",
    "source_code": "def node_exists(self, node_name, device_name=None):\n    if not self._debug_graphs:\n        raise LookupError('Nodes have not been loaded from partition graphs yet.')\n    if device_name is not None and device_name not in self._debug_graphs:\n        raise ValueError(\"The specified device_name '%s' cannot be found.\" % device_name)\n    for _, debug_graph in self._debug_graphs.items():\n        if node_name in debug_graph.node_inputs:\n            return True\n    return False",
    "docstring": "Test if a node exists in the partition graphs. Args: node_name: () name of the node to be checked. device_name: optional device name. If None, will search for the node on all available devices. Otherwise, search for the node only on the given device. Returns: A boolean indicating whether the node exists. Raises: LookupError: If no partition graphs have been loaded yet. ValueError: If device_name is specified but cannot be found.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:node_exists arg:self arg:node_name arg:device_name arguments arg arg arg If Raise Call If BoolOp Compare Compare Raise Call For Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "Ignored",
    "source_code": "class Ignored(PatternExpr):\n\n    def _match(self, node: NodeOrConstant, ctx: MatchContext) -> MatchResult:\n        return Match(ctx, self)\n\n    def __repr__(self) -> str:\n        return '*'\n\n    def pretty_print(self, pp: PatternPrettyPrinter) -> str:\n        return 'Ignored()'",
    "docstring": "Match an arg, but don't pass it to handler",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\pattern_matcher.py",
    "ast_data": "ClassDef name:Ignored FunctionDef name:_match arg:self arg:node arg:ctx arguments arg arg arg Return return:yes Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:pretty_print arg:self arg:pp arguments arg arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "cwt_matrix",
    "source_code": "def cwt_matrix(n_rows, n_columns, rng=None):\n    rng = check_random_state(rng)\n    rows = rng_integers(rng, 0, n_rows, n_columns)\n    cols = np.arange(n_columns + 1)\n    signs = rng.choice([1, -1], n_columns)\n    S = csc_matrix((signs, rows, cols), shape=(n_rows, n_columns))\n    return S",
    "docstring": "Generate a matrix S which represents a Clarkson-Woodruff transform. Given the desired size of matrix, the method returns a matrix S of size (n_rows, n_columns) where each column has all the entries set to 0 except for one position which has been randomly set to +1 or -1 with equal probability. Parameters ---------- n_rows : int Number of rows of S n_columns : int Number of columns of S rng : , optional Pseudorandom number generator state. When is None, a new is created using entropy from the operating system. Types other than are passed to to instantiate a `` nonzero entries. Notes ----- Given a matrix A, with probability at least 9/10, .. math:: \\|SA\\| = (1 \\pm \\epsilon)\\|A\\| Where the error epsilon is related to the size of S.",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_sketches.py",
    "ast_data": "FunctionDef name:cwt_matrix arg:n_rows arg:n_columns arg:rng arguments arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_is_floats_color",
    "source_code": "def _is_floats_color(color: Color | Collection[Color]) -> bool:\n    return bool(is_list_like(color) and (len(color) == 3 or len(color) == 4) and all((isinstance(x, (int, float)) for x in color)))",
    "docstring": "Check if color comprises a sequence of floats representing color.",
    "type": "function",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\style.py",
    "ast_data": "FunctionDef name:_is_floats_color arg:color arguments arg Return return:yes Call BoolOp Call BoolOp Compare Call Compare Call Call Call"
  },
  {
    "library": "numpy",
    "name": "abspath",
    "source_code": "def abspath(self, path):\n    return DataSource.abspath(self, self._fullpath(path))",
    "docstring": "Return absolute path of file in the Repository directory. If is an URL, then will return either the location the file exists locally or the location it would exist when opened using the method. Parameters ---------- path : str or pathlib.Path Can be a local file or a remote URL. This may, but does not have to, include the with which the was initialized. Returns ------- out : str Complete path, including the destination directory.",
    "type": "method",
    "file_path": "numpy\\numpy\\lib\\_datasource.py",
    "ast_data": "FunctionDef name:abspath arg:self arg:path arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "get_join_indexers_non_unique",
    "source_code": "def get_join_indexers_non_unique(left: ArrayLike, right: ArrayLike, sort: bool=False, how: JoinHow='inner') -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:\n    lkey, rkey, count = _factorize_keys(left, right, sort=sort, how=how)\n    if count == -1:\n        return (lkey, rkey)\n    if how == 'left':\n        lidx, ridx = libjoin.left_outer_join(lkey, rkey, count, sort=sort)\n    elif how == 'right':\n        ridx, lidx = libjoin.left_outer_join(rkey, lkey, count, sort=sort)\n    elif how == 'inner':\n        lidx, ridx = libjoin.inner_join(lkey, rkey, count, sort=sort)\n    elif how == 'outer':\n        lidx, ridx = libjoin.full_outer_join(lkey, rkey, count)\n    return (lidx, ridx)",
    "docstring": "Get join indexers for left and right. Parameters ---------- left : ArrayLike right : ArrayLike sort : bool, default False how : {'inner', 'outer', 'left', 'right'}, default 'inner' Returns ------- np.ndarray[np.intp] Indexer into left. np.ndarray[np.intp] Indexer into right.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\reshape\\merge.py",
    "ast_data": "FunctionDef name:get_join_indexers_non_unique arg:left arg:right arg:sort arg:how arguments arg arg arg arg Assign Call If Compare Return return:yes If Compare Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "compute_elementwise_output_strides",
    "source_code": "def compute_elementwise_output_strides(*tensors) -> tuple[int, ...]:\n    if len(tensors) == 0:\n        msg = \"Can't compute elementwise output strides for zero tensors!\"\n        raise ValueError(msg)\n    check_same_shape(*tensors, allow_cpu_scalar_tensors=True)\n    tensors = tuple((a for a in tensors if isinstance(a, TensorLike) and (not is_cpu_scalar_tensor(a))))\n    if len(tensors) == 0:\n        return ()\n    ndim = tensors[0].ndim\n    shape = tensors[0].shape\n    if ndim == 0:\n        return ()\n    if ndim == 1:\n        return (1,)\n    logical_to_physical_perm = compute_elementwise_output_logical_to_physical_perm(*tensors, _skip_checks=True)\n    permuted_shape = apply_perm(shape, logical_to_physical_perm)\n    new_strides = make_contiguous_strides_for(permuted_shape)\n    permuted_strides = apply_perm(new_strides, invert_perm(logical_to_physical_perm))\n    return tuple(permuted_strides)",
    "docstring": "Computes the output strides for elementwise operations.",
    "type": "function",
    "file_path": "pytorch\\torch\\_prims_common\\__init__.py",
    "ast_data": "FunctionDef name:compute_elementwise_output_strides arguments arg If Compare Call Assign Raise Call Call Assign Call BoolOp Call Call If Compare Call Return return:no Assign Assign If Compare Return return:no If Compare Return return:yes Assign Call Assign Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "reverse",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef reverse(x, axes):\n    if isinstance(axes, int):\n        axes = [axes]\n    return array_ops.reverse(x, axes)",
    "docstring": "Reverse a tensor along the specified axes. Args: x: Tensor to reverse. axes: Integer or iterable of integers. Axes to reverse. Returns: A tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:reverse arg:x arg:axes arguments arg arg If Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__bool__",
    "source_code": "def __bool__(self):\n    return self._dims is not None",
    "docstring": "Returns True if this shape contains non-zero information.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:__bool__ arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "matplotlib",
    "name": "get_dir_vector",
    "source_code": "def get_dir_vector(zdir):\n    if zdir == 'x':\n        return np.array((1, 0, 0))\n    elif zdir == 'y':\n        return np.array((0, 1, 0))\n    elif zdir == 'z':\n        return np.array((0, 0, 1))\n    elif zdir is None:\n        return np.array((0, 0, 0))\n    elif np.iterable(zdir) and len(zdir) == 3:\n        return np.array(zdir)\n    else:\n        raise ValueError(\"'x', 'y', 'z', None or vector of length 3 expected\")",
    "docstring": "Return a direction vector. Parameters ---------- zdir : {'x', 'y', 'z', None, 3-tuple} The direction. Possible values are: - 'x': equivalent to (1, 0, 0) - 'y': equivalent to (0, 1, 0) - 'z': equivalent to (0, 0, 1) - *None*: equivalent to (0, 0, 0) - an iterable (x, y, z) is converted to an array Returns ------- x, y, z : array The direction vector.",
    "type": "function",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py",
    "ast_data": "FunctionDef name:get_dir_vector arg:zdir arguments arg If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call If BoolOp Call Compare Call Return return:yes Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "close",
    "source_code": "def close(self):\n    if not self._closed:\n        self.flush()\n        self._try_put(self._close_sentinel)\n        self._internal_close()",
    "docstring": "Flushes the event file to disk and close the file. Call this method when you do not need the summary writer anymore.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\event_file_writer.py",
    "ast_data": "FunctionDef name:close arg:self arguments arg If Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_default_values",
    "source_code": "@classmethod\ndef get_default_values(cls, obj: Callable[..., Any], *, follow_wrapped: bool=True) -> Dict[str, Any]:\n    signature = super().from_callable(obj, follow_wrapped=follow_wrapped)\n    default_values = {}\n    for p in signature.parameters.values():\n        if p.default is not p.empty:\n            default_values[p.name] = p.default\n    return default_values",
    "docstring": "Inspects and returns a dictionary of default values.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_type.py",
    "ast_data": "FunctionDef name:get_default_values arg:cls arg:obj arguments arg arg arg Assign Call Call Assign For Call If Compare Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "async_only_middleware",
    "source_code": "def async_only_middleware(func):\n    func.sync_capable = False\n    func.async_capable = True\n    return func",
    "docstring": "Mark a middleware factory as returning an async middleware.",
    "type": "function",
    "file_path": "django\\django\\utils\\decorators.py",
    "ast_data": "FunctionDef name:async_only_middleware arg:func arguments arg Assign Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "write_th",
    "source_code": "def write_th(self, s: Any, header: bool=False, indent: int=0, tags: str | None=None) -> None:\n    col_space = self.col_space.get(s, None)\n    if header and col_space is not None:\n        tags = tags or ''\n        tags += f'style=\"min-width: {col_space};\"'\n    self._write_cell(s, kind='th', indent=indent, tags=tags)",
    "docstring": "Method for writing a formatted cell. If col_space is set on the formatter then that is used for the value of min-width. Parameters ---------- s : object The data to be written inside the cell. header : bool, default False Set to True if the is for use inside . This will cause min-width to be set if there is one. indent : int, default 0 The indentation level of the cell. tags : str, default None Tags to include in the cell. Returns ------- A written cell.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\html.py",
    "ast_data": "FunctionDef name:write_th arg:self arg:s arg:header arg:indent arg:tags arguments arg arg arg arg arg Assign Call If BoolOp Compare Assign BoolOp Call"
  },
  {
    "library": "virtualenv",
    "name": "CPython3",
    "source_code": "class CPython3(CPython, Python3Supports, abc.ABC):\n    pass",
    "docstring": "CPython 3 or later.",
    "type": "class",
    "file_path": "virtualenv\\src\\virtualenv\\create\\via_global_ref\\builtin\\cpython\\cpython3.py",
    "ast_data": "ClassDef name:CPython3"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X):\n    check_is_fitted(self)\n    if self.voting == 'soft':\n        maj = np.argmax(self.predict_proba(X), axis=1)\n    else:\n        predictions = self._predict(X)\n        maj = np.apply_along_axis(lambda x: np.argmax(np.bincount(x, weights=self._weights_not_none)), axis=1, arr=predictions)\n    maj = self.le_.inverse_transform(maj)\n    return maj",
    "docstring": "Predict class labels for X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Returns ------- maj : array-like of shape (n_samples,) Predicted class labels.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_voting.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Call If Compare Assign Call Call Assign Call Assign Call arguments arg Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "ProgramContext",
    "source_code": "class ProgramContext(object):\n\n    def __init__(self, options, autograph_module=None):\n        self.options = options\n        self.autograph_module = autograph_module",
    "docstring": "ProgramContext keeps track of converting function hierarchies. Attributes: options: ConversionOptions autograph_module: Deprecated. Do not use.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\core\\converter.py",
    "ast_data": "ClassDef name:ProgramContext FunctionDef name:__init__ arg:self arg:options arg:autograph_module arguments arg arg arg Assign Assign"
  },
  {
    "library": "kornia",
    "name": "apply_non_transform_box",
    "source_code": "def apply_non_transform_box(self, input: Boxes, params: Dict[str, Tensor], flags: Dict[str, Any], transform: Optional[Tensor]=None) -> Boxes:\n    return input",
    "docstring": "Process boxes corresponding to the inputs that are no transformation applied.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\base.py",
    "ast_data": "FunctionDef name:apply_non_transform_box arg:self arg:input arg:params arg:flags arg:transform arguments arg arg arg arg arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_get_output_dtype",
    "source_code": "def _get_output_dtype(covariance, points):\n    output_dtype = np.common_type(covariance, points)\n    itemsize = np.dtype(output_dtype).itemsize\n    if itemsize == 4:\n        spec = 'float'\n    elif itemsize == 8:\n        spec = 'double'\n    elif itemsize in (12, 16):\n        spec = 'long double'\n    else:\n        raise ValueError(f'{output_dtype} has unexpected item size: {itemsize}')\n    return (output_dtype, spec)",
    "docstring": "Calculates the output dtype and the \"spec\" (=C type name). This was necessary in order to deal with the fused types in the Cython routine . See gh-10824 for details.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_kde.py",
    "ast_data": "FunctionDef name:_get_output_dtype arg:covariance arg:points arguments arg arg Assign Call Assign Call If Compare Assign If Compare Assign If Compare Assign Raise Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_astype_float_to_int_nansafe",
    "source_code": "def _astype_float_to_int_nansafe(values: np.ndarray, dtype: np.dtype, copy: bool) -> np.ndarray:\n    if not np.isfinite(values).all():\n        raise IntCastingNaNError('Cannot convert non-finite values (NA or inf) to integer')\n    if dtype.kind == 'u':\n        if not (values >= 0).all():\n            raise ValueError(f'Cannot losslessly cast from {values.dtype} to {dtype}')\n    with warnings.catch_warnings():\n        warnings.filterwarnings('ignore', category=RuntimeWarning)\n        return values.astype(dtype, copy=copy)",
    "docstring": "astype with a check preventing converting NaN to an meaningless integer value.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\astype.py",
    "ast_data": "FunctionDef name:_astype_float_to_int_nansafe arg:values arg:dtype arg:copy arguments arg arg arg If Call Call Raise Call If Compare If Call Compare Raise Call With Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_visible",
    "source_code": "def set_visible(self, b):\n    if b != self._visible:\n        self._visible = b\n        self.pchanged()\n        self.stale = True",
    "docstring": "Set the artist's visibility. Parameters ---------- b : bool",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:set_visible arg:self arg:b arguments arg arg If Compare Assign Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "MaxPooling1D",
    "source_code": "class MaxPooling1D(keras_layers.MaxPooling1D, base.Layer):\n\n    def __init__(self, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs):\n        if strides is None:\n            raise ValueError('Argument `strides` must not be None.')\n        super(MaxPooling1D, self).__init__(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name, **kwargs)",
    "docstring": "Max Pooling layer for 1D inputs. Args: pool_size: An integer or tuple/list of a single integer, representing the size of the pooling window. strides: An integer or tuple/list of a single integer, specifying the strides of the pooling operation. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string, one of (default) or . The ordering of the dimensions in the inputs. corresponds to inputs with shape while corresponds to inputs with shape . name: A string, the name of the layer.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\legacy_tf_layers\\pooling.py",
    "ast_data": "ClassDef name:MaxPooling1D FunctionDef name:__init__ arg:self arg:pool_size arg:strides arg:padding arg:data_format arg:name arguments arg arg arg arg arg arg arg If Compare Raise Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_transform",
    "source_code": "def _transform(self, T):\n    if hasattr(self, 'X_thresholds_'):\n        dtype = self.X_thresholds_.dtype\n    else:\n        dtype = np.float64\n    T = check_array(T, dtype=dtype, ensure_2d=False)\n    self._check_input_data_shape(T)\n    T = T.reshape(-1)\n    if self.out_of_bounds == 'clip':\n        T = np.clip(T, self.X_min_, self.X_max_)\n    res = self.f_(T)\n    res = res.astype(T.dtype)\n    return res",
    "docstring": "is called by both and methods. Since is wrapped to output arrays of specific types (e.g. NumPy arrays, pandas DataFrame), we cannot make call directly. The above behaviour could be changed in the future, if we decide to output other type of arrays when calling .",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\isotonic.py",
    "ast_data": "FunctionDef name:_transform arg:self arg:T arguments arg arg If Call Assign Assign Assign Call Call Assign Call If Compare Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "map_missing_dict_keys",
    "source_code": "def map_missing_dict_keys(y_pred, struct):\n    if not isinstance(y_pred, dict) or not isinstance(struct, dict):\n        return struct\n    for k in y_pred.keys():\n        if k not in struct:\n            struct[k] = None\n    return struct",
    "docstring": "Replaces missing dict keys in with placeholders.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\compile_utils.py",
    "ast_data": "FunctionDef name:map_missing_dict_keys arg:y_pred arg:struct arguments arg arg If BoolOp Call Call Return return:yes For Call If Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "UserRegisteredTypeKerasTensor",
    "source_code": "class UserRegisteredTypeKerasTensor(KerasTensor):\n\n    def __init__(self, user_registered_symbolic_object):\n        x = user_registered_symbolic_object\n        self._user_registered_symbolic_object = x\n        type_spec = UserRegisteredSpec(x.shape, x.dtype)\n        name = getattr(x, 'name', None)\n        super(UserRegisteredTypeKerasTensor, self).__init__(type_spec, name)\n\n    @classmethod\n    def from_tensor(cls, tensor):\n        return cls(tensor)\n\n    @classmethod\n    def from_type_spec(cls, type_spec, name=None):\n        raise NotImplementedError('You cannot instantiate a KerasTensor directly from TypeSpec: %s' % type_spec)\n\n    def _to_placeholder(self):\n        return self._user_registered_symbolic_object",
    "docstring": "KerasTensor that represents legacy register_symbolic_tensor_type.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\keras_tensor.py",
    "ast_data": "ClassDef name:UserRegisteredTypeKerasTensor FunctionDef name:__init__ arg:self arg:user_registered_symbolic_object arguments arg arg Assign Assign Assign Call Assign Call Call Call FunctionDef name:from_tensor arg:cls arg:tensor arguments arg arg Return return:yes Call FunctionDef name:from_type_spec arg:cls arg:type_spec arg:name arguments arg arg arg Raise Call FunctionDef name:_to_placeholder arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "PerWorkerResource",
    "source_code": "class PerWorkerResource:\n\n    def __init__(self, strategy, host_to_resources):\n        distribute_lib.distribution_strategy_input_api_counter.get_cell('PerWorkerResource', 'TPUDistributedLookupTable').increase_by(1)\n        self._strategy = strategy\n        self._host_to_resources = host_to_resources\n\n    def __getattribute__(self, name):\n        if name not in ('__init__', '__getattribute__', '_host_to_resources', '_strategy', 'local_resource'):\n            return getattr(self.local_resource(), name)\n        return super(PerWorkerResource, self).__getattribute__(name)\n\n    def __setattr__(self, name, value):\n        if name not in ('_strategy', '_host_to_resources'):\n            return setattr(self.local_resource(), name, value)\n        return super(PerWorkerResource, self).__setattr__(name, value)\n\n    def local_resource(self):\n        current_device = device_util.canonicalize(device_util.current())\n        host_device = device_util.canonicalize(device_util.get_host_for_device(current_device))\n        return self._host_to_resources.get(host_device, self._host_to_resources[next(iter(self._host_to_resources))])",
    "docstring": "A per-worker CapturableResource class for non-ParameterServer strategy. Resources that populate should be instances of classes subclassing CapturableResource, although currently it's only used and tested for StaticHashTable with TPUStrategy.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py",
    "ast_data": "ClassDef name:PerWorkerResource FunctionDef name:__init__ arg:self arg:strategy arg:host_to_resources arguments arg arg arg Call Call Assign Assign FunctionDef name:__getattribute__ arg:self arg:name arguments arg arg If Compare Return return:yes Call Call Return return:yes Call Call FunctionDef name:__setattr__ arg:self arg:name arg:value arguments arg arg arg If Compare Return return:yes Call Call Return return:yes Call Call FunctionDef name:local_resource arg:self arguments arg Assign Call Call Assign Call Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "store_timeout",
    "source_code": "@contextmanager\ndef store_timeout(store, timeout: float):\n    old_timeout = store.timeout\n    store.set_timeout(timedelta(seconds=timeout))\n    yield\n    store.set_timeout(old_timeout)",
    "docstring": "This sets the timeout and then restores the old timeout when the context manager exits. Args: store: the store to set the timeout on timeout: the timeout to set",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\elastic\\utils\\store.py",
    "ast_data": "FunctionDef name:store_timeout arg:store arg:timeout arguments arg arg Assign Call Call Call"
  },
  {
    "library": "scipy",
    "name": "dblquad",
    "source_code": "def dblquad(func, a, b, gfun, hfun, args=(), epsabs=1.49e-08, epsrel=1.49e-08):\n\n    def temp_ranges(*args):\n        return [gfun(args[0]) if callable(gfun) else gfun, hfun(args[0]) if callable(hfun) else hfun]\n    return nquad(func, [temp_ranges, [a, b]], args=args, opts={'epsabs': epsabs, 'epsrel': epsrel})",
    "docstring": "Compute a double integral. Return the double (definite) integral of `a\\int^{x=\\pi/4}_{x=0} \\int^{y=\\cos(x)}_{y=\\sin(x)} 1 \\,dy \\,dx\\int^{x=1}_{x=0} \\int^{y=2-x}_{y=x} a x y \\,dy \\,dxa=1, 3f(x,y) = e^{-(x^{2} + y^{2})}(-\\infty,+\\infty)\\iint^{+\\infty}_{-\\infty} e^{-(x^{2} + y^{2})} \\,dy\\,dx`. >>> f = lambda x, y: np.exp(-(x ** 2 + y ** 2)) >>> integrate.dblquad(f, -np.inf, np.inf, -np.inf, np.inf) (3.141592653589777, 2.5173086737433208e-08)",
    "type": "function",
    "file_path": "scipy\\scipy\\integrate\\_quadpack_py.py",
    "ast_data": "FunctionDef name:dblquad arg:func arg:a arg:b arg:gfun arg:hfun arg:args arg:epsabs arg:epsrel arguments arg arg arg arg arg arg arg arg FunctionDef name:temp_ranges arguments arg Return return:yes Call Call Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_default_weight",
    "source_code": "def set_default_weight(self, weight):\n    self.__default_weight = weight",
    "docstring": "Set the default font weight. The initial value is 'normal'.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\font_manager.py",
    "ast_data": "FunctionDef name:set_default_weight arg:self arg:weight arguments arg arg Assign"
  },
  {
    "library": "matplotlib",
    "name": "_uniform_y",
    "source_code": "def _uniform_y(self, N):\n    automin = automax = 1.0 / (N - 1.0)\n    extendlength = self._get_extension_lengths(self.extendfrac, automin, automax, default=0.05)\n    y = np.linspace(0, 1, N)\n    return (y, extendlength)",
    "docstring": "Return colorbar data coordinates for *N* uniformly spaced boundaries, plus extension lengths if required.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colorbar.py",
    "ast_data": "FunctionDef name:_uniform_y arg:self arg:N arguments arg arg Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "validate_args_and_kwargs",
    "source_code": "def validate_args_and_kwargs(fname, args, kwargs, max_fname_arg_count, compat_args) -> None:\n    _check_arg_length(fname, args + tuple(kwargs.values()), max_fname_arg_count, compat_args)\n    args_dict = dict(zip(compat_args, args))\n    for key in args_dict:\n        if key in kwargs:\n            raise TypeError(f\"{fname}() got multiple values for keyword argument '{key}'\")\n    kwargs.update(args_dict)\n    validate_kwargs(fname, kwargs, compat_args)",
    "docstring": "Checks whether parameters passed to the *args and **kwargs argument in a function are valid parameters as specified in and whether or not they are set to their default values. Parameters ---------- fname: str The name of the function being passed the parameter args: tuple The parameter passed into a function kwargs: dict The parameter passed into max_fname_arg_count: int The minimum number of arguments that the function requires, excluding those in . Used for displaying appropriate error messages. Must be non-negative. compat_args: dict A dictionary of keys that is allowed to have and their associated default values. Raises ------ TypeError if contains more values than there are OR contains keys not in ValueError if contains values not at the default value () contains keys in that do not map to the default value as specified in See Also -------- validate_args : Purely args validation. validate_kwargs : Purely kwargs validation.",
    "type": "function",
    "file_path": "pandas\\pandas\\util\\_validators.py",
    "ast_data": "FunctionDef name:validate_args_and_kwargs arg:fname arg:args arg:kwargs arg:max_fname_arg_count arg:compat_args arguments arg arg arg arg arg Call Call Call Assign Call Call For If Compare Raise Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_edgecolor",
    "source_code": "def get_edgecolor(self):\n    return self._edgecolor",
    "docstring": "Return the edge color.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_edgecolor arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "scaled",
    "source_code": "def scaled(self):\n    return self.vmin is not None and self.vmax is not None",
    "docstring": "Return whether *vmin* and *vmax* are both set.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:scaled arg:self arguments arg Return return:yes BoolOp Compare Compare"
  },
  {
    "library": "matplotlib",
    "name": "auto_set_font_size",
    "source_code": "def auto_set_font_size(self, value=True):\n    self._autoFontsize = value\n    self.stale = True",
    "docstring": "Automatically set font size.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\table.py",
    "ast_data": "FunctionDef name:auto_set_font_size arg:self arg:value arguments arg arg Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_filter_exception",
    "source_code": "def _filter_exception(self, ex):\n    if isinstance(ex, tuple):\n        ex2 = ex[1]\n    else:\n        ex2 = ex\n    if isinstance(ex2, self._clean_stop_exception_types):\n        ex = None\n    return ex",
    "docstring": "Check if the exception indicated in 'ex' should be ignored. This method examines to check if it is an exception that should be reported to the users. If yes, it returns as is, otherwise it returns None. The code returns None for exception types listed in . Args: ex: None, an , or a Python tuple as returned by . Returns: ex or None.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\coordinator.py",
    "ast_data": "FunctionDef name:_filter_exception arg:self arg:ex arguments arg arg If Call Assign Assign If Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_shape_offsets",
    "source_code": "def _shape_offsets(shape):\n    offsets = []\n    for dim in reversed(shape):\n        if offsets:\n            offsets.append(dim * offsets[-1])\n        else:\n            offsets.append(dim)\n    offsets.reverse()\n    return offsets",
    "docstring": "Returns moving offset for each dimension given shape.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py",
    "ast_data": "FunctionDef name:_shape_offsets arg:shape arguments arg Assign For Call If Call Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=False)\ndef fit(self, X, y, **fit_params):\n    _raise_for_params(fit_params, self, 'fit')\n    routed_params = process_routing(self, 'fit', **fit_params)\n    X, y = validate_data(self, X, y, accept_sparse=['csr', 'csc'], ensure_all_finite=False)\n    check_classification_targets(y)\n    self.classes_ = np.unique(y)\n    if len(self.classes_) == 1:\n        raise ValueError('OneVsOneClassifier can not be fit when only one class is present.')\n    n_classes = self.classes_.shape[0]\n    estimators_indices = list(zip(*Parallel(n_jobs=self.n_jobs)((delayed(_fit_ovo_binary)(self.estimator, X, y, self.classes_[i], self.classes_[j], fit_params=routed_params.estimator.fit) for i in range(n_classes) for j in range(i + 1, n_classes)))))\n    self.estimators_ = estimators_indices[0]\n    pairwise = self.__sklearn_tags__().input_tags.pairwise\n    self.pairwise_indices_ = estimators_indices[1] if pairwise else None\n    return self",
    "docstring": "Fit underlying estimators. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Data. y : array-like of shape (n_samples,) Multi-class targets. **fit_params : dict Parameters passed to the `enable_metadata_routing=TrueMetadata Routing User Guide ` for more details. Returns ------- self : object The fitted underlying estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\multiclass.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg arg Call Assign Call Assign Call Call Assign Call If Compare Call Raise Call Assign Assign Call Call Call Call Call Call Call Call Assign Assign Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "grad_index",
    "source_code": "@property\ndef grad_index(self):\n    return self._grad_index",
    "docstring": "The loop index of backprop loop.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_state.py",
    "ast_data": "FunctionDef name:grad_index arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_convert_mesh_to_paths",
    "source_code": "@staticmethod\ndef _convert_mesh_to_paths(coordinates):\n    if isinstance(coordinates, np.ma.MaskedArray):\n        c = coordinates.data\n    else:\n        c = coordinates\n    points = np.concatenate([c[:-1, :-1], c[:-1, 1:], c[1:, 1:], c[1:, :-1], c[:-1, :-1]], axis=2).reshape((-1, 5, 2))\n    return [mpath.Path(x) for x in points]",
    "docstring": "Convert a given mesh into a sequence of objects. This function is primarily of use to implementers of backends that do not directly support quadmeshes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:_convert_mesh_to_paths arg:coordinates arguments arg If Call Assign Assign Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "ctc_batch_cost",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef ctc_batch_cost(y_true, y_pred, input_length, label_length):\n    label_length = math_ops.cast(array_ops.squeeze(label_length, axis=-1), dtypes_module.int32)\n    input_length = math_ops.cast(array_ops.squeeze(input_length, axis=-1), dtypes_module.int32)\n    sparse_labels = math_ops.cast(ctc_label_dense_to_sparse(y_true, label_length), dtypes_module.int32)\n    y_pred = math_ops.log(array_ops.transpose(y_pred, perm=[1, 0, 2]) + epsilon())\n    return array_ops.expand_dims(ctc.ctc_loss(inputs=y_pred, labels=sparse_labels, sequence_length=input_length), 1)",
    "docstring": "Runs CTC loss algorithm on each batch element. Args: y_true: tensor containing the truth labels. y_pred: tensor containing the prediction, or output of the softmax. input_length: tensor containing the sequence length for each batch item in . label_length: tensor containing the sequence length for each batch item in . Returns: Tensor with shape (samples,1) containing the CTC loss of each element.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:ctc_batch_cost arg:y_true arg:y_pred arg:input_length arg:label_length arguments arg arg arg arg Assign Call Call Assign Call Call Assign Call Call Assign Call Call Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, xy1, xy2, slope, **kwargs):\n    super().__init__([0, 1], [0, 1], **kwargs)\n    if xy2 is None and slope is None or (xy2 is not None and slope is not None):\n        raise TypeError(\"Exactly one of 'xy2' and 'slope' must be given\")\n    self._slope = slope\n    self._xy1 = xy1\n    self._xy2 = xy2",
    "docstring": "Parameters ---------- xy1 : (float, float) The first set of (x, y) coordinates for the line to pass through. xy2 : (float, float) or None The second set of (x, y) coordinates for the line to pass through. Both *xy2* and *slope* must be passed, but one of them must be None. slope : float or None The slope of the line. Both *xy2* and *slope* must be passed, but one of them must be None.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:xy1 arg:xy2 arg:slope arguments arg arg arg arg arg Call Call If BoolOp BoolOp Compare Compare BoolOp Compare Compare Raise Call Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "num_global_devices",
    "source_code": "@tf_export('experimental.dtensor.num_global_devices', v1=[])\ndef num_global_devices(device_type: str) -> int:\n    return num_local_devices(device_type) * num_clients()",
    "docstring": "Returns the number of devices of device_type in this DTensor cluster.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\config.py",
    "ast_data": "FunctionDef name:num_global_devices arg:device_type arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "kornia",
    "name": "apply_transform_class",
    "source_code": "def apply_transform_class(self, input: Tensor, params: Dict[str, Tensor], flags: Dict[str, Any], transform: Optional[Tensor]=None) -> Tensor:\n    return input",
    "docstring": "Process class tags corresponding to the inputs that are transformed.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\_2d\\geometric\\elastic_transform.py",
    "ast_data": "FunctionDef name:apply_transform_class arg:self arg:input arg:params arg:flags arg:transform arguments arg arg arg arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_State",
    "source_code": "class _State(object):\n\n    def __init__(self):\n        self._value = {}\n\n    def __getitem__(self, key):\n        if key not in self._value:\n            self._value[key] = _StateStack(key)\n        return self._value[key]",
    "docstring": "Syntactic sugar for accessing an instance of a StateStack context manager. This structure offers syntactic sugar over a dict of stacks of objects of known type. These structures are useful to keep state during AST walks. Multiple different scopes can be tracked in parallel. For example: s = _State() s[foo].enter() s[bar].enter() # this will not affect s[foo] Element access has special semantics: * keys are a data type * element values are _StateStack(type=key) objects * missing elements are automatically added, similarly to defaultdict For example, the following block : _State s s[Foo] Is equivalent to: s = {} if Foo not in s: s[Foo] = Foo() s[Foo] See Base for how it's used.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\transformer.py",
    "ast_data": "ClassDef name:_State FunctionDef name:__init__ arg:self arguments arg Assign FunctionDef name:__getitem__ arg:self arg:key arguments arg arg If Compare Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "forward",
    "source_code": "def forward(self):\n    self.views[self.figure].forward()\n    self.positions[self.figure].forward()",
    "docstring": "Forward one step in the stack of views and positions.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py",
    "ast_data": "FunctionDef name:forward arg:self arguments arg Call Call"
  },
  {
    "library": "matplotlib",
    "name": "draw_quad_mesh",
    "source_code": "def draw_quad_mesh(self, gc, master_transform, meshWidth, meshHeight, coordinates, offsets, offsetTrans, facecolors, antialiased, edgecolors):\n    from matplotlib.collections import QuadMesh\n    paths = QuadMesh._convert_mesh_to_paths(coordinates)\n    if edgecolors is None:\n        edgecolors = facecolors\n    linewidths = np.array([gc.get_linewidth()], float)\n    return self.draw_path_collection(gc, master_transform, paths, [], offsets, offsetTrans, facecolors, edgecolors, linewidths, [], [antialiased], [None], 'screen')",
    "docstring": "Draw a quadmesh. The base (fallback) implementation converts the quadmesh to paths and then calls .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:draw_quad_mesh arg:self arg:gc arg:master_transform arg:meshWidth arg:meshHeight arg:coordinates arg:offsets arg:offsetTrans arg:facecolors arg:antialiased arg:edgecolors arguments arg arg arg arg arg arg arg arg arg arg arg Assign Call If Compare Assign Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "configure",
    "source_code": "@doc_controls.do_not_doc_inheritable\n@deprecated(None, 'use `update_config_proto` instead.')\ndef configure(self, session_config=None, cluster_spec=None, task_type=None, task_id=None):\n    return self._extended._configure(session_config, cluster_spec, task_type, task_id)",
    "docstring": "DEPRECATED: use instead. Configures the strategy class. DEPRECATED: This method's functionality has been split into the strategy constructor and . In the future, we will allow passing cluster and config_proto to the constructor to configure the strategy. And can be used to update the config_proto based on the specific strategy.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:configure arg:self arg:session_config arg:cluster_spec arg:task_type arg:task_id arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_uncensor",
    "source_code": "def _uncensor(self):\n    data = np.concatenate((self._uncensored, self._left, self._right, self._interval.mean(axis=1)))\n    return data",
    "docstring": "This function is used when a non-censored version of the data is needed to create a rough estimate of the parameters of a distribution via the method of moments or some similar method. The data is \"uncensored\" by taking the given endpoints as the data for the left- or right-censored data, and the mean for the interval-censored data.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_censored_data.py",
    "ast_data": "FunctionDef name:_uncensor arg:self arguments arg Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_accelerator_devices",
    "source_code": "def get_accelerator_devices(master, config_proto):\n    if context.executing_eagerly():\n        logical_devices = config.list_logical_devices()\n        devices = []\n        for d in logical_devices:\n            if d.device_type == 'CPU' or d.device_type == 'XLA_CPU':\n                continue\n            devices.append(session._DeviceAttributes(d.name, d.device_type, 0, 0))\n        return devices\n    else:\n        with ops.Graph().as_default():\n            with session.Session(master, config=config_proto) as s:\n                devices = s.list_devices()\n        return devices",
    "docstring": "Returns accelerator devices given a master and a configuration.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\cluster_resolver.py",
    "ast_data": "FunctionDef name:get_accelerator_devices arg:master arg:config_proto arguments arg arg If Call Assign Call Assign For If BoolOp Compare Compare Call Call Return return:yes With Call Call With Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_alloc_storage",
    "source_code": "def _alloc_storage(tensor: torch.Tensor, size: torch.Size) -> None:\n    with torch.no_grad():\n        if not torch.distributed._functional_collectives.is_torchdynamo_compiling():\n            already_allocated = tensor._typed_storage()._size() == size.numel()\n            if not already_allocated:\n                tensor_storage_size = tensor._typed_storage()._size()\n                _p_assert(tensor_storage_size == 0, 'Tensor storage should have been resized to be 0 but got PLACEHOLDEr')\n                tensor._typed_storage()._resize_(size.numel())",
    "docstring": "Allocate storage for `` if the storage was already allocated.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\utils.py",
    "ast_data": "FunctionDef name:_alloc_storage arg:tensor arg:size arguments arg arg With Call If Call Assign Compare Call Call Call If Assign Call Call Call Compare Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_generating_ops",
    "source_code": "def get_generating_ops(ts):\n    ts = make_list_of_t(ts, allow_graph=False)\n    return [t.op for t in ts]",
    "docstring": "Return all the generating ops of the tensors in . Args: ts: a list of Returns: A list of all the generating of the tensors in . Raises: TypeError: if cannot be converted to a list of .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\op_selector.py",
    "ast_data": "FunctionDef name:get_generating_ops arg:ts arguments arg Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "update_reorders_v2",
    "source_code": "def update_reorders_v2(output_file_path):\n    spec = tf_upgrade_v2.TFAPIChangeSpec()\n    reordered_function_names = spec.reordered_function_names\n    need_kwargs_function_names = spec.function_transformers.keys()\n    function_renames = spec.symbol_renames\n    all_reorders = collect_function_arg_names(reordered_function_names, need_kwargs_function_names, function_renames)\n    rename_lines = [get_reorder_line(name, arg_names) for name, arg_names in all_reorders.items()]\n    renames_file_text = '%sreorders = {\\n%s\\n}\\n' % (_FILE_HEADER, ',\\n'.join(sorted(rename_lines)))\n    file_io.write_string_to_file(output_file_path, renames_file_text)",
    "docstring": "Writes a Python dictionary mapping function name to argument order. Args: output_file_path: File path to write output to. Any existing contents would be replaced.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\update\\generate_v2_reorders_map.py",
    "ast_data": "FunctionDef name:update_reorders_v2 arg:output_file_path arguments arg Assign Call Assign Assign Call Assign Assign Call Assign Call Call Assign Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_set_noconvert_columns",
    "source_code": "def _set_noconvert_columns(self) -> None:\n    assert self.orig_names is not None\n    names_dict = {x: i for i, x in enumerate(self.orig_names)}\n    col_indices = [names_dict[x] for x in self.names]\n    noconvert_columns = self._set_noconvert_dtype_columns(col_indices, self.names)\n    for col in noconvert_columns:\n        self._reader.set_noconvert(col)",
    "docstring": "Set the columns that should not undergo dtype conversions. Currently, any column that is involved with date parsing will not undergo such conversions.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\parsers\\c_parser_wrapper.py",
    "ast_data": "FunctionDef name:_set_noconvert_columns arg:self arguments arg Compare Assign Call Assign Assign Call For Call"
  },
  {
    "library": "pandas",
    "name": "get_values",
    "source_code": "def get_values(self, dtype: DtypeObj | None=None) -> np.ndarray:\n    raise AbstractMethodError(self)",
    "docstring": "return an internal format, currently just the ndarray this is often overridden to handle to_dense like operations",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\blocks.py",
    "ast_data": "FunctionDef name:get_values arg:self arg:dtype arguments arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "GradIncrementNestingCtxManagerVariable",
    "source_code": "class GradIncrementNestingCtxManagerVariable(ContextWrappingVariable):\n    _guards_singleton = Guard(GlobalStateSource(), GuardBuilder.FUNCTORCH_STACK_MATCH)\n\n    @staticmethod\n    def create(tx: 'InstructionTranslator', **kwargs):\n        var = GradIncrementNestingCtxManagerVariable(target_values=None, initial_values=None, **kwargs)\n        return var\n\n    def enter(self, tx):\n        install_guard(self._guards_singleton)\n        grad_level = torch._C._functorch._grad_increment_nesting()\n        self.set_cleanup_hook(tx, lambda: torch._C._functorch._grad_decrement_nesting())\n        self.proxy = tx.output.create_node('call_function', torch._C._functorch._grad_increment_nesting, (), {})\n        return variables.ConstantVariable.create(grad_level)\n\n    def exit(self, tx: 'InstructionTranslator', *args):\n        self.cleanup()\n        tx.output.create_node('call_function', torch._C._functorch._grad_decrement_nesting, (), {})\n        return variables.ConstantVariable.create(None)",
    "docstring": "represents torch.func.grad increment/decrement nesting",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\ctx_manager.py",
    "ast_data": "ClassDef name:GradIncrementNestingCtxManagerVariable Assign Call Call FunctionDef name:create arg:tx arguments arg arg Assign Call Return return:yes FunctionDef name:enter arg:self arg:tx arguments arg arg Call Assign Call Call arguments Call Assign Call Return return:yes Call FunctionDef name:exit arg:self arg:tx arguments arg arg arg Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "counter_value",
    "source_code": "def counter_value(name: str):\n    return torch._C._lazy._counter_value(name)",
    "docstring": "Return the value of the counter with the speficied name",
    "type": "function",
    "file_path": "pytorch\\torch\\_lazy\\metrics.py",
    "ast_data": "FunctionDef name:counter_value arg:name arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_majorticklines",
    "source_code": "def get_majorticklines(self):\n    lines = []\n    ticks = self.get_major_ticks()\n    for tick in ticks:\n        lines.append(tick.tick1line)\n        lines.append(tick.tick2line)\n    return cbook.silent_list('Line2D ticklines', lines)",
    "docstring": "Return this Axis' major tick lines as a list of \\s.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:get_majorticklines arg:self arguments arg Assign Assign Call For Call Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "Language",
    "source_code": "class Language(Enum):\n    Python = 0\n    Fortran = 1\n    C = 2",
    "docstring": "Used as Expr.tostring language argument.",
    "type": "class",
    "file_path": "numpy\\numpy\\f2py\\symbolic.py",
    "ast_data": "ClassDef name:Language Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "get_peak_memory_runtime_baseline",
    "source_code": "def get_peak_memory_runtime_baseline(graph: Graph) -> tuple[int, float]:\n    P_1 = graph.nodes[0]['param_per_module']\n    num_nodes = len(graph.nodes)\n    peak_mem = 0\n    for i in range(num_nodes):\n        TG_i = graph.nodes[i]['grad_total']\n        AG_i = graph.nodes[i]['act_grad_per_module']\n        TA_i = graph.nodes[i]['act_total']\n        peak_mem = max(peak_mem, P_1 + TG_i + AG_i + TA_i)\n    compute_time = graph.nodes[0]['fw_runtime_per_module'] + graph.nodes[0]['bw_runtime_per_module']\n    return (peak_mem, compute_time)",
    "docstring": "Get the baseline peak memory and runtime. Baseline here means there is no FSDP or AC. Memory includes the parameters, gradients, activations, and activation gradients. Memory does not include e.g., optimizer states, embedding tables, etc. Returns: int: peak memory in bytes float: compute time in ms",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_tools\\ilp_utils.py",
    "ast_data": "FunctionDef name:get_peak_memory_runtime_baseline arg:graph arguments arg Assign Assign Call Assign For Call Assign Assign Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "cam2pixel",
    "source_code": "def cam2pixel(cam_coords_src: Tensor, dst_proj_src: Tensor, eps: float=1e-12) -> Tensor:\n    if not len(cam_coords_src.shape) == 4 and cam_coords_src.shape[3] == 3:\n        raise ValueError(f'Input cam_coords_src has to be in the shape of BxHxWx3. Got {cam_coords_src.shape}')\n    if not len(dst_proj_src.shape) == 3 and dst_proj_src.shape[-2:] == (4, 4):\n        raise ValueError(f'Input dst_proj_src has to be in the shape of Bx4x4. Got {dst_proj_src.shape}')\n    point_coords: Tensor = transform_points(dst_proj_src[:, None], cam_coords_src)\n    x_coord: Tensor = point_coords[..., 0]\n    y_coord: Tensor = point_coords[..., 1]\n    z_coord: Tensor = point_coords[..., 2]\n    u_coord: Tensor = x_coord / (z_coord + eps)\n    v_coord: Tensor = y_coord / (z_coord + eps)\n    pixel_coords_dst: Tensor = stack([u_coord, v_coord], dim=-1)\n    return pixel_coords_dst",
    "docstring": "Transform coordinates in the camera frame to the pixel frame. Args: cam_coords_src: (x, y, z) coordinates defined in the first camera coordinates system. Shape must be BxHxWx3. dst_proj_src: the projection matrix between the reference and the non reference camera frame. Shape must be Bx4x4. eps: small value to avoid division by zero error. Returns: tensor of shape BxHxWx2 with (u, v) pixel coordinates.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\camera\\pinhole.py",
    "ast_data": "FunctionDef name:cam2pixel arg:cam_coords_src arg:dst_proj_src arg:eps arguments arg arg arg If BoolOp Compare Call Compare Raise Call If BoolOp Compare Call Compare Raise Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "pink",
    "source_code": "def pink() -> None:\n    set_cmap('pink')",
    "docstring": "Set the colormap to 'pink'. This changes the default colormap as well as the colormap of the current image if there is one. See `` for more information.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:pink arguments Call"
  },
  {
    "library": "scipy",
    "name": "update",
    "source_code": "def update(self, delta_x, delta_grad):\n    raise NotImplementedError('The method ``update(delta_x, delta_grad)`` is not implemented.')",
    "docstring": "Update internal matrix. Update Hessian matrix or its inverse (depending on how 'approx_type' is defined) using information about the last evaluated points. Parameters ---------- delta_x : ndarray The difference between two points the gradient function have been evaluated at: ``.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_hessian_update_strategy.py",
    "ast_data": "FunctionDef name:update arg:self arg:delta_x arg:delta_grad arguments arg arg arg Raise Call"
  },
  {
    "library": "authlib",
    "name": "validate_id_token_signing_alg_values_supported",
    "source_code": "def validate_id_token_signing_alg_values_supported(self):\n    values = self.get('id_token_signing_alg_values_supported')\n    if values is None:\n        raise ValueError('\"id_token_signing_alg_values_supported\" is required')\n    if not isinstance(values, list):\n        raise ValueError('\"id_token_signing_alg_values_supported\" MUST be JSON array')\n    if 'RS256' not in values:\n        raise ValueError('\"RS256\" MUST be included in \"id_token_signing_alg_values_supported\"')",
    "docstring": "REQUIRED. JSON array containing a list of the JWS signing algorithms (alg values) supported by the OP for the ID Token to encode the Claims in a JWT [JWT]. The algorithm RS256 MUST be included. The value none MAY be supported, but MUST NOT be used unless the Response Type used returns no ID Token from the Authorization Endpoint (such as when using the Authorization Code Flow).",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\discovery\\models.py",
    "ast_data": "FunctionDef name:validate_id_token_signing_alg_values_supported arg:self arguments arg Assign Call If Compare Raise Call If Call Raise Call If Compare Raise Call"
  },
  {
    "library": "scrapy",
    "name": "equal_attributes",
    "source_code": "def equal_attributes(obj1: Any, obj2: Any, attributes: list[str | Callable[[Any], Any]] | None) -> bool:\n    warnings.warn('The equal_attributes function is deprecated and will be removed in a future version of Scrapy.', category=ScrapyDeprecationWarning, stacklevel=2)\n    if not attributes:\n        return False\n    temp1, temp2 = (object(), object())\n    for attr in attributes:\n        if callable(attr):\n            if attr(obj1) != attr(obj2):\n                return False\n        elif getattr(obj1, attr, temp1) != getattr(obj2, attr, temp2):\n            return False\n    return True",
    "docstring": "Compare two objects attributes",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\python.py",
    "ast_data": "FunctionDef name:equal_attributes arg:obj1 arg:obj2 arg:attributes arguments arg arg arg Call If Return return:yes Assign Call Call For If Call If Compare Call Call Return return:yes If Compare Call Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "original_args",
    "source_code": "@property\ndef original_args(self):\n    return self._original_args",
    "docstring": "A object holding the original arguments of . If user called , then this field is equal to SessionRunArgs(a, b). Returns: A object",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\session_run_hook.py",
    "ast_data": "FunctionDef name:original_args arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "adjust_hue_raw",
    "source_code": "def adjust_hue_raw(image: Tensor, factor: Union[float, Tensor]) -> Tensor:\n    KORNIA_CHECK_IS_TENSOR(image, 'Expected shape (*, H, W)')\n    KORNIA_CHECK(isinstance(factor, (float, Tensor)), f'The factor should be a float number or Tensor in the range between [-PI, PI]. Got {type(factor)}')\n    if isinstance(factor, float):\n        factor = torch.as_tensor(factor)\n    factor = factor.to(image.device, image.dtype)\n    while len(factor.shape) != len(image.shape):\n        factor = factor[..., None]\n    h, s, v = torch.chunk(image, chunks=3, dim=-3)\n    divisor: float = 2 * pi\n    h_out: Tensor = torch.fmod(h + factor, divisor)\n    out: Tensor = torch.cat([h_out, s, v], dim=-3)\n    return out",
    "docstring": "Adjust hue of an image. Expecting image to be in hsv format already.",
    "type": "function",
    "file_path": "kornia\\kornia\\enhance\\adjust.py",
    "ast_data": "FunctionDef name:adjust_hue_raw arg:image arg:factor arguments arg arg Call Call Call Call If Call Assign Call Assign Call While Compare Call Call Assign Assign Call Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, T):\n    return self._transform(T)",
    "docstring": "Predict new data by linear interpolation. Parameters ---------- T : array-like of shape (n_samples,) or (n_samples, 1) Data to transform. Returns ------- y_pred : ndarray of shape (n_samples,) Transformed data.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\isotonic.py",
    "ast_data": "FunctionDef name:predict arg:self arg:T arguments arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "where",
    "source_code": "@final\ndef where(self, cond, other=None) -> Index:\n    if isinstance(self, ABCMultiIndex):\n        raise NotImplementedError('.where is not supported for MultiIndex operations')\n    cond = np.asarray(cond, dtype=bool)\n    return self.putmask(~cond, other)",
    "docstring": "Replace values where the condition is False. The replacement is taken from other. Parameters ---------- cond : bool array-like with the same length as self Condition to select the values on. other : scalar, or array-like, default None Replacement if the condition is False. Returns ------- pandas.Index A copy of self with values replaced from other where the condition is False. See Also -------- Series.where : Same method for Series. DataFrame.where : Same method for DataFrame. Examples -------- >>> idx = pd.Index([\"car\", \"bike\", \"train\", \"tractor\"]) >>> idx Index(['car', 'bike', 'train', 'tractor'], dtype='object') >>> idx.where(idx.isin([\"car\", \"train\"]), \"other\") Index(['car', 'other', 'train', 'other'], dtype='object')",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:where arg:self arg:cond arg:other arguments arg arg arg If Call Raise Call Assign Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "get_channel_conf",
    "source_code": "def get_channel_conf(self, num_upsample: int) -> List[int]:\n    if num_upsample == 2:\n        return [256, 64, 16]\n    return [256, 64, 16, 4]",
    "docstring": "Get num of channels based on number of upsampling.",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\sold2\\backbones.py",
    "ast_data": "FunctionDef name:get_channel_conf arg:self arg:num_upsample arguments arg arg If Compare Return return:yes Return return:yes"
  },
  {
    "library": "scipy",
    "name": "primasum",
    "source_code": "def primasum(x, axis=None):\n    if not USE_NAIVE_MATH:\n        return np.sum(x, axis=axis)\n    if axis is None:\n        if x.ndim == 2:\n            return sum(primasum(x, axis=0))\n        else:\n            return sum(x)\n    elif axis == 0:\n        result = np.zeros(x.shape[1])\n        for i in range(x.shape[1]):\n            result[i] = sum(x[:, i])\n        return result\n    elif axis == 1:\n        result = np.zeros(x.shape[0])\n        for i in range(x.shape[0]):\n            result[i] = sum(x[i, :])\n        return result",
    "docstring": "According to its documentation, np.sum will sometimes do partial pairwise summation. For our purposes, when comparing, we want don't want to do anything fancy, and we just want to add things up one at a time.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\pyprima\\pyprima\\src\\pyprima\\common\\linalg.py",
    "ast_data": "FunctionDef name:primasum arg:x arg:axis arguments arg arg If Return return:yes Call If Compare If Compare Return return:yes Call Call Return return:yes Call If Compare Assign Call For Call Assign Call Return return:yes If Compare Assign Call For Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "unshard",
    "source_code": "def unshard(self):\n    if not self.needs_unshard():\n        unsharded_flat_param = self._get_padded_unsharded_flat_param() if self.uses_sharded_strategy else self.flat_param\n        self._use_unsharded_flat_param(unsharded_flat_param)\n        return\n    unsharded_flat_param = self._alloc_padded_unsharded_flat_param()\n    padded_unsharded_flat_param = self._all_gather_flat_param(unsharded_flat_param)\n    self._use_unsharded_flat_param(padded_unsharded_flat_param)",
    "docstring": "Run the unshard logic. This includes all-gathering the flat parameter and switching to using the unsharded flat parameter. If the handle does not need unsharding, then this only switches to using the unsharded flat parameter. For `summon_full_params` and the handle uses parameter mixed precision, then the parameter is forced to full precision.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py",
    "ast_data": "FunctionDef name:unshard arg:self arguments arg If Call Assign Call Call Return return:no Assign Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "_register_backend_select_dispatcher",
    "source_code": "def _register_backend_select_dispatcher(self, device_arg_index: int):\n\n    def backend_select(keyset, *args, **kwargs):\n        device = args[device_arg_index].type\n        if device not in self._backend_fns:\n            raise RuntimeError(f'{self._name} does not have a kernel registered for {device}. Please use register_kernel to do so.')\n        dispatch_key = _C._dispatch_key_for_device(device)\n        dispatch_key = getattr(_C.DispatchKey, dispatch_key)\n        return self._opoverload.redispatch(_C.DispatchKeySet(dispatch_key), *args, **kwargs)\n    self._lib.impl(self._name, backend_select, 'BackendSelect', with_keyset=True)",
    "docstring": "Switch on the device argument to select the correct backend to dispatch to.",
    "type": "method",
    "file_path": "pytorch\\torch\\_library\\custom_ops.py",
    "ast_data": "FunctionDef name:_register_backend_select_dispatcher arg:self arg:device_arg_index arguments arg arg FunctionDef name:backend_select arg:keyset arguments arg arg arg Assign If Compare Raise Call Assign Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "total_run_calls",
    "source_code": "@property\n@deprecated(None, 'Track steps using a tf.Variable saved in checkpoint instead.')\n@doc_controls.do_not_generate_docs\ndef total_run_calls(self):\n    if self._platform_device == failure_handling_util.PlatformDevice.INTERNAL_TPU:\n        raise NotImplementedError('Please create variables saved in checkpoint to keep track of steps and epochs.')\n    return self._run_counter",
    "docstring": "Returns the number of times is called. DEPRECATED: user should track total steps themselves, as this API provides little expressivity gain but could easily be misused and incurs extra synchronization cost for TPUStrategy users. This value tracks the number of all calls to including those before the program is restarted and the training is restored, by saving and reading the value in the checkpoint. A user can compute their total number of iterations by , while should be one for users. They can also use this value to infer the starting epoch and step after training restores, as shown in the example above.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\failure_handling\\failure_handling.py",
    "ast_data": "FunctionDef name:total_run_calls arg:self arguments arg If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_shape",
    "source_code": "def _get_shape(tensor):\n    shape = tensor.shape.as_list()\n    none_indices = [i for i, d in enumerate(shape) if d is None]\n    if none_indices:\n        shape_tensor = array_ops.shape(tensor)\n        for i in none_indices:\n            shape[i] = shape_tensor[i]\n    return shape",
    "docstring": "Like get_shape().as_list(), but explicitly queries the shape of a tensor if necessary to ensure that the returned value contains no unknown value.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\special_math_ops.py",
    "ast_data": "FunctionDef name:_get_shape arg:tensor arguments arg Assign Call Assign Call Compare If Assign Call For Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ConcaterMapDataPipe",
    "source_code": "@functional_datapipe('concat')\nclass ConcaterMapDataPipe(MapDataPipe):\n    datapipes: tuple[MapDataPipe]\n\n    def __init__(self, *datapipes: MapDataPipe):\n        if len(datapipes) == 0:\n            raise ValueError('Expected at least one DataPipe, but got nothing')\n        if not all((isinstance(dp, MapDataPipe) for dp in datapipes)):\n            raise TypeError('Expected all inputs to be `MapDataPipe`')\n        if not all((isinstance(dp, Sized) for dp in datapipes)):\n            raise TypeError('Expected all inputs to be `Sized`')\n        self.datapipes = datapipes\n\n    def __getitem__(self, index) -> _T_co:\n        offset = 0\n        for dp in self.datapipes:\n            if index - offset < len(dp):\n                return dp[index - offset]\n            else:\n                offset += len(dp)\n        raise IndexError(f'Index {index} is out of range.')\n\n    def __len__(self) -> int:\n        return sum((len(dp) for dp in self.datapipes))",
    "docstring": "Concatenate multiple Map DataPipes (functional name: `ConcatMapDataPipe` would refer to elements of the first DataPipe, and 5 to 9 would refer to elements of the second DataPipe. Args: datapipes: Map DataPipes being concatenated Example: >>> # xdoctest: +SKIP >>> from torchdata.datapipes.map import SequenceWrapper >>> dp1 = SequenceWrapper(range(3)) >>> dp2 = SequenceWrapper(range(3)) >>> concat_dp = dp1.concat(dp2) >>> list(concat_dp) [0, 1, 2, 0, 1, 2]",
    "type": "class",
    "file_path": "pytorch\\torch\\utils\\data\\datapipes\\map\\combining.py",
    "ast_data": "ClassDef name:ConcaterMapDataPipe FunctionDef name:__init__ arg:self arguments arg arg If Compare Call Raise Call If Call Call Raise Call If Call Call Raise Call Assign FunctionDef name:__getitem__ arg:self arg:index arguments arg arg Assign For If Compare Call Return return:yes Call Raise Call FunctionDef name:__len__ arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "random",
    "source_code": "def random(self, n: IntNumber=1) -> np.ndarray:\n    sample = np.empty((n, len(self.pvals)))\n    for i in range(n):\n        base_draws = self.engine.random(self.n_trials).ravel()\n        p_cumulative = np.empty_like(self.pvals, dtype=float)\n        _fill_p_cumulative(np.array(self.pvals, dtype=float), p_cumulative)\n        sample_ = np.zeros_like(self.pvals, dtype=np.intp)\n        _categorize(base_draws, p_cumulative, sample_)\n        sample[i] = sample_\n    return sample",
    "docstring": "Draw QMC samples from the multinomial distribution. Parameters ---------- n : int, optional Number of samples to generate in the parameter space. Default is 1. Returns ------- samples : array_like (n, pvals) Sample.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_qmc.py",
    "ast_data": "FunctionDef name:random arg:self arg:n arguments arg arg Assign Call Call For Call Assign Call Call Assign Call Call Call Assign Call Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "check_isinstance",
    "source_code": "def check_isinstance(types, /, **kwargs):\n    none_type = type(None)\n    types = (types,) if isinstance(types, type) else (none_type,) if types is None else tuple((none_type if tp is None else tp for tp in types))\n\n    def type_name(tp):\n        return 'None' if tp is none_type else tp.__qualname__ if tp.__module__ == 'builtins' else f'{tp.__module__}.{tp.__qualname__}'\n    for k, v in kwargs.items():\n        if not isinstance(v, types):\n            names = [*map(type_name, types)]\n            if 'None' in names:\n                names.remove('None')\n                names.append('None')\n            raise TypeError('{!r} must be an instance of {}, not a {}'.format(k, ', '.join(names[:-1]) + ' or ' + names[-1] if len(names) > 1 else names[0], type_name(type(v))))",
    "docstring": "For each *key, value* pair in *kwargs*, check that *value* is an instance of one of *types*; if not, raise an appropriate TypeError. As a special case, a `` entry in *types* is treated as NoneType. Examples -------- >>> _api.check_isinstance((SomeClass, None), arg=arg)",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\_api\\__init__.py",
    "ast_data": "FunctionDef name:check_isinstance arguments arg arg Assign Call Assign Call Compare Call Compare FunctionDef name:type_name arg:tp arguments arg Return return:yes Compare Compare For Call If Call Assign Call If Compare Call Call Raise Call Call Compare Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "DistributionCombination",
    "source_code": "class DistributionCombination(combinations_lib.TestCombination):\n\n    def should_execute_combination(self, kwargs):\n        distributions = [v for v in kwargs.values() if isinstance(v, NamedDistribution)]\n        if test_util.is_xla_enabled() and any((d.no_xla for d in distributions)):\n            return (False, 'n/a: skipping strategy combination with no_xla=True in XLA tests')\n        return (True, None)\n\n    def parameter_modifiers(self):\n        return [DistributionParameter(), combinations_lib.OptionalParameter('use_var_policy')]",
    "docstring": "Sets up distribution strategy for tests.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\combinations.py",
    "ast_data": "ClassDef name:DistributionCombination FunctionDef name:should_execute_combination arg:self arg:kwargs arguments arg arg Assign Call Call If BoolOp Call Call Return return:yes Return return:yes FunctionDef name:parameter_modifiers arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_transformed_clip_path_and_affine",
    "source_code": "def get_transformed_clip_path_and_affine(self):\n    if self._clippath is not None:\n        return self._clippath.get_transformed_path_and_affine()\n    return (None, None)",
    "docstring": "Return the clip path with the non-affine part of its transformation applied, and the remaining affine part of its transformation.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:get_transformed_clip_path_and_affine arg:self arguments arg If Compare Return return:yes Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "_add_attributes_to_object_graph",
    "source_code": "def _add_attributes_to_object_graph(trackable_objects, object_graph_proto, node_ids, object_names, object_map, call_with_mapped_captures, saveables_cache):\n    for checkpoint_id, (trackable, unused_object_proto) in enumerate(zip(trackable_objects, object_graph_proto.nodes)):\n        assert node_ids[trackable] == checkpoint_id\n    checkpoint_factory_map, unmapped_registered_savers = get_checkpoint_factories_and_keys(object_names, object_map)\n    registered_savers = _add_attributes_to_object_graph_for_registered_savers(unmapped_registered_savers, object_graph_proto, node_ids, object_map)\n    named_saveable_objects, feed_additions = generate_saveable_objects(checkpoint_factory_map, object_graph_proto, node_ids, object_map, call_with_mapped_captures, saveables_cache)\n    return (named_saveable_objects, feed_additions, registered_savers)",
    "docstring": "Create saveables/savers and corresponding protos in the object graph.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\save_util_v1.py",
    "ast_data": "FunctionDef name:_add_attributes_to_object_graph arg:trackable_objects arg:object_graph_proto arg:node_ids arg:object_names arg:object_map arg:call_with_mapped_captures arg:saveables_cache arguments arg arg arg arg arg arg arg For Call Call Compare Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "as_list",
    "source_code": "def as_list(self):\n    return [*self._axes]",
    "docstring": "List the Axes that have been added to the figure.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:as_list arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_LiteSingleOperand",
    "source_code": "class _LiteSingleOperand(_LiteOperand):\n\n    def __init__(self, node):\n        _LiteOperand.__init__(self)\n        self.node = node\n        self.name = _tensor_name_base(node.name)\n\n    def flatten(self):\n        return [self.name]\n\n    def aggregate_and_return_name_for_input(self, out_graphdef):\n        return self.name\n\n    def aggregate_and_return_name_for_output(self, fused_op_name, index, out_graphdef):\n        output_node = _copy.deepcopy(self.node)\n        del output_node.input[:]\n        output_node.input.append(_tensorflow_output_name(fused_op_name, index))\n        out_graphdef.node.extend([output_node])\n        return self.node.attr['type'].i\n\n    def __str__(self):\n        return str(self.name)",
    "docstring": "A simple operand that is non-aggregated (i.e. most hints).",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\op_hint.py",
    "ast_data": "ClassDef name:_LiteSingleOperand FunctionDef name:__init__ arg:self arg:node arguments arg arg Call Assign Assign Call FunctionDef name:flatten arg:self arguments arg Return return:yes FunctionDef name:aggregate_and_return_name_for_input arg:self arg:out_graphdef arguments arg arg Return return:yes FunctionDef name:aggregate_and_return_name_for_output arg:self arg:fused_op_name arg:index arg:out_graphdef arguments arg arg arg arg Assign Call Call Call Call Return return:yes FunctionDef name:__str__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "index_class",
    "source_code": "@cache_readonly\ndef index_class(self) -> type_t[Index]:\n    from pandas import Index\n    return Index",
    "docstring": "The Index subclass to return from Index.__new__ when this dtype is encountered.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\base.py",
    "ast_data": "FunctionDef name:index_class arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "set_default_device",
    "source_code": "def set_default_device(device: 'Device') -> None:\n    global _GLOBAL_DEVICE_CONTEXT\n    if hasattr(_GLOBAL_DEVICE_CONTEXT, 'device_context'):\n        device_context = _GLOBAL_DEVICE_CONTEXT.device_context\n        if device_context is not None:\n            device_context.__exit__(None, None, None)\n    if device is None:\n        device_context = None\n    else:\n        from torch.utils._device import DeviceContext\n        device_context = DeviceContext(device)\n        device_context.__enter__()\n    _GLOBAL_DEVICE_CONTEXT.device_context = device_context",
    "docstring": "Sets the default `torch.cuda.set_devicetorch.from_numpytorch.frombuffer` Args: device (device or string): the device to set as default Example:: >>> # xdoctest: +SKIP(\"requires cuda, changes global state\") >>> torch.get_default_device() device(type='cpu') >>> torch.set_default_device('cuda') # current device is 0 >>> torch.get_default_device() device(type='cuda', index=0) >>> torch.set_default_device('cuda') >>> torch.cuda.set_device('cuda:1') # current device is 1 >>> torch.get_default_device() device(type='cuda', index=1) >>> torch.set_default_device('cuda:1') >>> torch.get_default_device() device(type='cuda', index=1)",
    "type": "function",
    "file_path": "pytorch\\torch\\__init__.py",
    "ast_data": "FunctionDef name:set_default_device arg:device arguments arg If Call Assign If Compare Call If Compare Assign Assign Call Call Assign"
  },
  {
    "library": "django",
    "name": "__init__",
    "source_code": "def __init__(self, ptr, z=False):\n    if not isinstance(ptr, CS_PTR):\n        raise TypeError('Coordinate sequence should initialize with a CS_PTR.')\n    self._ptr = ptr\n    self._z = z",
    "docstring": "Initialize from a GEOS pointer.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\coordseq.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:ptr arg:z arguments arg arg arg If Call Raise Call Assign Assign"
  },
  {
    "library": "django",
    "name": "SetPasswordForm",
    "source_code": "class SetPasswordForm(SetPasswordMixin, forms.Form):\n    new_password1, new_password2 = SetPasswordMixin.create_password_fields(label1=_('New password'), label2=_('New password confirmation'))\n\n    def __init__(self, user, *args, **kwargs):\n        self.user = user\n        super().__init__(*args, **kwargs)\n\n    def clean(self):\n        self.validate_passwords('new_password1', 'new_password2')\n        self.validate_password_for_user(self.user, 'new_password2')\n        return super().clean()\n\n    def save(self, commit=True):\n        return self.set_password_and_save(self.user, 'new_password1', commit=commit)",
    "docstring": "A form that lets a user set their password without entering the old password",
    "type": "class",
    "file_path": "django\\django\\contrib\\auth\\forms.py",
    "ast_data": "ClassDef name:SetPasswordForm Assign Call Call Call FunctionDef name:__init__ arg:self arg:user arguments arg arg arg arg Assign Call Call FunctionDef name:clean arg:self arguments arg Call Call Return return:yes Call Call FunctionDef name:save arg:self arg:commit arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "getdoc",
    "source_code": "def getdoc(object):\n    return _inspect.getdoc(object)",
    "docstring": "TFDecorator-aware replacement for inspect.getdoc. Args: object: An object, possibly decorated. Returns: The docstring associated with the object. The outermost-decorated object is intended to have the most complete documentation, so the decorated parameter is not unwrapped.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_inspect.py",
    "ast_data": "FunctionDef name:getdoc arg:object arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_lexsort_depth",
    "source_code": "@cache_readonly\ndef _lexsort_depth(self) -> int:\n    if self.sortorder is not None:\n        return self.sortorder\n    return _lexsort_depth(self.codes, self.nlevels)",
    "docstring": "Compute and return the lexsort_depth, the number of levels of the MultiIndex that are sorted lexically Returns ------- int",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "FunctionDef name:_lexsort_depth arg:self arguments arg If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n    X = validate_data(self, X)\n    if self.assume_centered:\n        self.location_ = np.zeros(X.shape[1])\n    else:\n        self.location_ = X.mean(0)\n    covariance = empirical_covariance(X, assume_centered=self.assume_centered)\n    self._set_covariance(covariance)\n    return self",
    "docstring": "Fit the maximum likelihood covariance estimator to X. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns the instance itself.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\covariance\\_empirical_covariance.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Assign Call If Assign Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "share_axis",
    "source_code": "def share_axis(ax0, ax1, which):\n    if _version_predates(mpl, '3.5'):\n        group = getattr(ax0, f'get_shared_{which}_axes')()\n        group.join(ax1, ax0)\n    else:\n        getattr(ax1, f'share{which}')(ax0)",
    "docstring": "Handle changes to post-hoc axis sharing.",
    "type": "function",
    "file_path": "seaborn\\seaborn\\_compat.py",
    "ast_data": "FunctionDef name:share_axis arg:ax0 arg:ax1 arg:which arguments arg arg arg If Call Assign Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "rename_",
    "source_code": "def rename_(self, *names, **rename_map):\n    if has_torch_function_unary(self):\n        return handle_torch_function(Tensor.rename_, (self,), self, *names, **rename_map)\n    return update_names(self, names, rename_map, inplace=True)",
    "docstring": "In-place version of :meth:.",
    "type": "method",
    "file_path": "pytorch\\torch\\_tensor.py",
    "ast_data": "FunctionDef name:rename_ arg:self arguments arg arg arg If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "unoptimized_b2b_gemm",
    "source_code": "def unoptimized_b2b_gemm(is_left_assoc: bool, subgraph: Subgraph, A: torch.Tensor, B: torch.Tensor, C: torch.Tensor, *, out: torch.Tensor) -> torch.Tensor:\n    if is_left_assoc:\n        torch.mm(subgraph.graph_module(torch.mm(A, B)), C, out=out)\n    else:\n        torch.mm(A, subgraph.graph_module(torch.mm(B, C)), out=out)\n    return out",
    "docstring": "The unoptimized version is used as a fallback when the b2b_gemm kernel is not beneficial.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\b2b_gemm.py",
    "ast_data": "FunctionDef name:unoptimized_b2b_gemm arg:is_left_assoc arg:subgraph arg:A arg:B arg:C arguments arg arg arg arg arg arg If Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_mapping",
    "source_code": "@tf_export('__internal__.nest.is_mapping', v1=[])\ndef is_mapping(obj):\n    return _is_mapping(obj)",
    "docstring": "Returns a true if its input is a collections.Mapping.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\nest.py",
    "ast_data": "FunctionDef name:is_mapping arg:obj arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_variant_type_id",
    "source_code": "def _variant_type_id(t):\n    if t.dtype != dtypes.variant:\n        return None\n    shapes_and_types = _variant_handle_data(t)\n    if shapes_and_types is None or not shapes_and_types:\n        return None\n    return shapes_and_types[0].type.type_id",
    "docstring": "Returns the full_type_pb2 type of , or None if it is not available.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "FunctionDef name:_variant_type_id arg:t arguments arg If Compare Return return:no Assign Call If BoolOp Compare Return return:no Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "save_figure",
    "source_code": "def save_figure(self, *args):\n    self.canvas.send_event('save')\n    return self.UNKNOWN_SAVED_STATUS",
    "docstring": "Save the current figure.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_webagg_core.py",
    "ast_data": "FunctionDef name:save_figure arg:self arguments arg arg Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "Joinable",
    "source_code": "class Joinable(ABC):\n\n    @abstractmethod\n    def __init__(self) -> None:\n        super().__init__()\n        self._join_config = _JoinConfig.construct_disabled_join_config()\n\n    @abstractmethod\n    def join_hook(self, **kwargs) -> JoinHook:\n        ...\n\n    @property\n    @abstractmethod\n    def join_device(self) -> torch.device:\n        ...\n\n    @property\n    @abstractmethod\n    def join_process_group(self) -> Any:\n        ...",
    "docstring": "This defines an abstract base class for joinable classes. A joinable class (inheriting from :class:) should implement :meth:, which returns a :class: instance, in addition to :meth: and :meth: that return device and process group information, respectively.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\join.py",
    "ast_data": "ClassDef name:Joinable FunctionDef name:__init__ arg:self arguments arg Call Call Assign Call FunctionDef name:join_hook arg:self arguments arg arg FunctionDef name:join_device arg:self arguments arg FunctionDef name:join_process_group arg:self arguments arg"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y, sample_weight=None):\n    X, y = validate_data(self, X, y, accept_sparse='csr', dtype=np.float64, order='C', accept_large_sparse=False)\n    penalty = 'l2'\n    _dual = _validate_dual_parameter(self.dual, self.loss, penalty, 'ovr', X)\n    self.coef_, self.intercept_, n_iter_ = _fit_liblinear(X, y, self.C, self.fit_intercept, self.intercept_scaling, None, penalty, _dual, self.verbose, self.max_iter, self.tol, self.random_state, loss=self.loss, epsilon=self.epsilon, sample_weight=sample_weight)\n    self.coef_ = self.coef_.ravel()\n    self.n_iter_ = n_iter_.max().item()\n    return self",
    "docstring": "Fit the model according to the given training data. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vector, where is the number of samples and is the number of features. y : array-like of shape (n_samples,) Target vector relative to X. sample_weight : array-like of shape (n_samples,), default=None Array of weights that are assigned to individual samples. If not provided, then each sample is given unit weight. .. versionadded:: 0.18 Returns ------- self : object An instance of the estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\svm\\_classes.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg Assign Call Assign Assign Call Assign Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, config=None, start=True):\n    config = config or DispatcherConfig()\n    if config.fault_tolerant_mode and (not config.work_dir):\n        raise ValueError('Cannot enable fault tolerant mode without configuring a work dir. Make sure to set `work_dir` in the `config` object passed to `DispatcherServer`.')\n    self._config = config\n    if isinstance(config, service_config_pb2.DispatcherConfig):\n        config_proto = config\n    else:\n        config_proto = service_config_pb2.DispatcherConfig(port=config.port, protocol=config.protocol, work_dir=config.work_dir, fault_tolerant_mode=config.fault_tolerant_mode, worker_addresses=config.worker_addresses, job_gc_check_interval_ms=config.job_gc_check_interval_ms, job_gc_timeout_ms=config.job_gc_timeout_ms, worker_timeout_ms=config.worker_timeout_ms, worker_max_concurrent_snapshots=config.worker_max_concurrent_snapshots)\n    self._server = _pywrap_server_lib.TF_DATA_NewDispatchServer(config_proto.SerializeToString())\n    if start:\n        self._server.start()",
    "docstring": "Creates a new dispatch server. Args: config: (Optional.) A configuration. If , the dispatcher will use default configuration values. start: (Optional.) Boolean, indicating whether to start the server after creating it. Defaults to True.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\service\\server_lib.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:config arg:start arguments arg arg arg Assign BoolOp Call If BoolOp Raise Call Assign If Call Assign Assign Call Assign Call Call If Call"
  },
  {
    "library": "tensorflow",
    "name": "get_sparse_tensors",
    "source_code": "def get_sparse_tensors(self, transformation_cache, state_manager):\n    tensors = transformation_cache.get(self, state_manager)\n    return CategoricalColumn.IdWeightPair(tensors[0], tensors[1])",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:get_sparse_tensors arg:self arg:transformation_cache arg:state_manager arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "with_dtype",
    "source_code": "@abc.abstractmethod\ndef with_dtype(self, dtype):\n    pass",
    "docstring": "Returns an identical _LayerBroadcaster with a different dtype.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:with_dtype arg:self arg:dtype arguments arg arg"
  },
  {
    "library": "scipy",
    "name": "make_splrep",
    "source_code": "def make_splrep(x, y, *, w=None, xb=None, xe=None, k=3, s=0, t=None, nest=None):\n    if s == 0:\n        if t is not None or w is not None or nest is not None:\n            raise ValueError('s==0 is for interpolation only')\n        return make_interp_spline(x, y, k=k)\n    x, y, w, k, s, xb, xe = _validate_inputs(x, y, w, k, s, xb, xe, parametric=False)\n    spl = _make_splrep_impl(x, y, w=w, xb=xb, xe=xe, k=k, s=s, t=t, nest=nest)\n    spl.c = spl.c[:, 0]\n    return spl",
    "docstring": "Create a smoothing B-spline function with bounded error, minimizing derivative jumps. Given the set of data points ` is the input parameter. In other words, we balance maximizing the smoothness (measured as the jumps of the derivative, the first criterion), and the deviation of :math: from the data :math: (the second criterion). Note that the summation in the second criterion is over all data points, and in the first criterion it is over the internal spline knots (i.e. those with `generate_knotsmake_lsq_splineg(x)`. .. versionadded:: 1.15.0",
    "type": "function",
    "file_path": "scipy\\scipy\\interpolate\\_fitpack_repro.py",
    "ast_data": "FunctionDef name:make_splrep arg:x arg:y arguments arg arg arg arg arg arg arg arg arg If Compare If BoolOp Compare Compare Compare Raise Call Return return:yes Call Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "authlib",
    "name": "AccessDeniedError",
    "source_code": "class AccessDeniedError(OAuth2Error):\n    error = 'access_denied'\n    description = 'The resource owner or authorization server denied the request'",
    "docstring": "The resource owner or authorization server denied the request. Used in authorization endpoint for \"code\" and \"implicit\". Defined in _. .. _:",
    "type": "class",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\errors.py",
    "ast_data": "ClassDef name:AccessDeniedError Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "TFLiteMetricsInterface",
    "source_code": "class TFLiteMetricsInterface(metaclass=abc.ABCMeta):\n\n    @abc.abstractmethod\n    def increase_counter_debugger_creation(self):\n        raise NotImplementedError\n\n    @abc.abstractmethod\n    def increase_counter_interpreter_creation(self):\n        raise NotImplementedError\n\n    @abc.abstractmethod\n    def increase_counter_converter_attempt(self):\n        raise NotImplementedError\n\n    @abc.abstractmethod\n    def increase_counter_converter_success(self):\n        raise NotImplementedError\n\n    @abc.abstractmethod\n    def set_converter_param(self, name, value):\n        raise NotImplementedError\n\n    @abc.abstractmethod\n    def set_converter_error(self, error_data):\n        raise NotImplementedError\n\n    @abc.abstractmethod\n    def set_converter_latency(self, value):\n        raise NotImplementedError",
    "docstring": "Abstract class for TFLiteMetrics.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\metrics\\metrics_interface.py",
    "ast_data": "ClassDef name:TFLiteMetricsInterface FunctionDef name:increase_counter_debugger_creation arg:self arguments arg Raise FunctionDef name:increase_counter_interpreter_creation arg:self arguments arg Raise FunctionDef name:increase_counter_converter_attempt arg:self arguments arg Raise FunctionDef name:increase_counter_converter_success arg:self arguments arg Raise FunctionDef name:set_converter_param arg:self arg:name arg:value arguments arg arg arg Raise FunctionDef name:set_converter_error arg:self arg:error_data arguments arg arg Raise FunctionDef name:set_converter_latency arg:self arg:value arguments arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "on_graph_execution_trace",
    "source_code": "def on_graph_execution_trace(self, graph_execution_trace_index, graph_execution_trace):\n    pass",
    "docstring": "Monitor method for intra-graph execution events. Return values (if any) are ignored by the associated DebugDataReader. Args: graph_execution_trace_index: The index of the intra-graph execution event, as an int. graph_execution_trace: A GraphExecutionTrace data object, for an intra-graph tensor event.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_monitors.py",
    "ast_data": "FunctionDef name:on_graph_execution_trace arg:self arg:graph_execution_trace_index arg:graph_execution_trace arguments arg arg arg"
  },
  {
    "library": "numpy",
    "name": "isdecimal",
    "source_code": "def isdecimal(self):\n    return isdecimal(self)",
    "docstring": "For each element in , return True if there are only decimal characters in the element. See Also -------- char.isdecimal",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:isdecimal arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_unlift_inputs_as_getattr",
    "source_code": "def _unlift_inputs_as_getattr(gm: torch.fx.GraphModule, lifted_inputs: Sequence[Optional[str]]) -> tuple[dict[str, torch.fx.Node], dict[str, torch.fx.Node]]:\n    unlifted_name_to_node = {}\n    input_name_to_node = {}\n    placeholder_nodes = [node for node in gm.graph.nodes if node.op == 'placeholder']\n    assert len(lifted_inputs) == len(placeholder_nodes)\n    for input_node, lifted_node in zip(placeholder_nodes, lifted_inputs):\n        if lifted_node is None:\n            input_name_to_node[input_node.name] = input_node\n        else:\n            with gm.graph.inserting_after(input_node):\n                with warnings.catch_warnings():\n                    warnings.simplefilter('ignore')\n                    getattr_node = gm.graph.get_attr(lifted_node)\n                input_node.replace_all_uses_with(getattr_node)\n                metadata = input_node.meta\n                gm.graph.erase_node(input_node)\n                getattr_node.meta = metadata\n                unlifted_name_to_node[lifted_node] = getattr_node\n    return (unlifted_name_to_node, input_name_to_node)",
    "docstring": "Unlift inputs referring to params/buffers/constants as getattr nodes in the graph",
    "type": "function",
    "file_path": "pytorch\\torch\\export\\_unlift.py",
    "ast_data": "FunctionDef name:_unlift_inputs_as_getattr arg:gm arg:lifted_inputs arguments arg arg Assign Assign Assign Compare Compare Call Call For Call If Compare Assign With Call With Call Call Assign Call Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "infer_steps_for_dataset",
    "source_code": "def infer_steps_for_dataset(model, dataset, steps, epochs=1, steps_name='steps'):\n    assert isinstance(dataset, data_types.DatasetV2)\n    if model._in_multi_worker_mode() and dataset.options().experimental_distribute.auto_shard_policy != options_lib.AutoShardPolicy.OFF:\n        return None\n    size = backend.get_value(cardinality.cardinality(dataset))\n    if size == cardinality.INFINITE and steps is None:\n        raise ValueError('When passing an infinitely repeating dataset, you must specify the `%s` argument.' % (steps_name,))\n    if size >= 0:\n        if steps is not None and steps * epochs > size:\n            if epochs > 1:\n                raise ValueError('The dataset you passed contains %s batches, but you passed `epochs=%s` and `%s=%s`, which is a total of %s steps. We cannot draw that many steps from this dataset. We suggest to set `%s=%s`.' % (size, epochs, steps_name, steps, steps * epochs, steps_name, size // epochs))\n            else:\n                raise ValueError('The dataset you passed contains %s batches, but you passed `%s=%s`. We cannot draw that many steps from this dataset. We suggest to set `%s=%s`.' % (size, steps_name, steps, steps_name, size))\n    if steps is None:\n        if size >= 0:\n            return size\n        return None\n    return steps",
    "docstring": "Infers steps_per_epoch needed to loop through a dataset. Args: model: Keras model instance. dataset: Input data of type tf.data.Dataset. steps: Number of steps to draw from the dataset (may be None if unknown). epochs: Number of times to iterate over the dataset. steps_name: The string name of the steps argument, either , , or . Only used for error message formatting. Returns: Integer or . Inferred number of steps to loop through the dataset. is returned if 1) the size of the dataset is unknown and was not specified, or 2) this is multi-worker training and auto sharding is enabled. Raises: ValueError: In case of invalid argument values.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py",
    "ast_data": "FunctionDef name:infer_steps_for_dataset arg:model arg:dataset arg:steps arg:epochs arg:steps_name arguments arg arg arg arg arg Call If BoolOp Call Compare Call Return return:no Assign Call Call If BoolOp Compare Compare Raise Call If Compare If BoolOp Compare Compare If Compare Raise Call Raise Call If Compare If Compare Return return:yes Return return:no Return return:yes"
  },
  {
    "library": "django",
    "name": "get_db_prep_value",
    "source_code": "def get_db_prep_value(self, value, connection, prepared=False):\n    if not prepared:\n        value = self.get_prep_value(value)\n    return value",
    "docstring": "Return field's value prepared for interacting with the database backend. Used by the default implementations of get_db_prep_save().",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\__init__.py",
    "ast_data": "FunctionDef name:get_db_prep_value arg:self arg:value arg:connection arg:prepared arguments arg arg arg arg If Assign Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "run",
    "source_code": "def run(self) -> list[Node]:\n    if ':' in self.name:\n        self.domain, self.objtype = self.name.split(':', 1)\n    else:\n        self.domain, self.objtype = ('', self.name)\n    node = addnodes.desc()\n    node.document = self.state.document\n    node['domain'] = self.domain\n    node['objtype'] = node['desctype'] = self.objtype\n    node['no-index'] = True\n    self.names: list[str] = []\n    alias_options = {'maxdepth': self.options.get('maxdepth', 1), 'noroot': 'noroot' in self.options}\n    if alias_options['noroot'] and alias_options['maxdepth'] == 1:\n        logger.warning(\"Error in C alias declaration. Requested 'noroot' but 'maxdepth' 1. When skipping the root declaration, need 'maxdepth' 0 for infinite or at least 2.\", location=self.get_location())\n    for sig in self.get_signatures():\n        node.append(AliasNode(sig, alias_options, self.state.document, env=self.env))\n    return [node]",
    "docstring": "On purpose this doesn't call the ObjectDescription version, but is based on it. Each alias signature may expand into multiple real signatures if 'noroot'. The code is therefore based on the ObjectDescription version.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\domains\\c\\__init__.py",
    "ast_data": "FunctionDef name:run arg:self arguments arg If Compare Assign Call Assign Assign Call Assign Assign Assign Assign Assign Call Compare If BoolOp Compare Call Call For Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__ge__",
    "source_code": "def __ge__(self, other):\n    other = as_dimension(other)\n    if self._value is None or other.value is None:\n        return None\n    else:\n        return self._value >= other.value",
    "docstring": "Returns True if is known to be greater than or equal to . Dimensions are compared as follows: Args: other: Another Dimension. Returns: The value of if both are known, otherwise None.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:__ge__ arg:self arg:other arguments arg arg Assign Call If BoolOp Compare Compare Return return:no Return return:yes Compare"
  },
  {
    "library": "matplotlib",
    "name": "get_ylabel",
    "source_code": "def get_ylabel(self):\n    label = self.yaxis.label\n    return label.get_text()",
    "docstring": "Get the ylabel text string.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:get_ylabel arg:self arguments arg Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "append_step",
    "source_code": "def append_step(self, step: InputAdaptStep) -> None:\n    self._steps.append(step)",
    "docstring": "Appends a step to the input adapt steps. Args: step: The step to append.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\io_adapter.py",
    "ast_data": "FunctionDef name:append_step arg:self arg:step arguments arg arg Call"
  },
  {
    "library": "kornia",
    "name": "forward_with_coords",
    "source_code": "def forward_with_coords(self, coords_input: Tensor, image_size: tuple[int, int]) -> Tensor:\n    coords = coords_input.clone()\n    coords[:, :, 0] = coords[:, :, 0] / image_size[1]\n    coords[:, :, 1] = coords[:, :, 1] / image_size[0]\n    return self._pe_encoding(coords.to(torch.float32))",
    "docstring": "Positionally encode points that are not normalized to [0,1].",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\models\\sam\\architecture\\prompt_encoder.py",
    "ast_data": "FunctionDef name:forward_with_coords arg:self arg:coords_input arg:image_size arguments arg arg arg Assign Call Assign Assign Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_check_reducer_finalized",
    "source_code": "def _check_reducer_finalized(self):\n    self.reducer._check_reducer_finalized()",
    "docstring": "Check if the reducer has processed all buckets and finalized the backward appropriately. It is useful to call this method after calling .backward() in your training loop in order to avoid subsequent hard to debug errors down the road due to the reducer not finalizing backward.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\parallel\\distributed.py",
    "ast_data": "FunctionDef name:_check_reducer_finalized arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "graph_debug_info",
    "source_code": "@property\ndef graph_debug_info(self) -> graph_debug_info_pb2.GraphDebugInfo:\n    return self._bound_context.get_graph_debug_info(self.name)",
    "docstring": "A GraphDebugInfo proto mapping nodes to corresponding stack traces.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\atomic_function.py",
    "ast_data": "FunctionDef name:graph_debug_info arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "register",
    "source_code": "def register() -> None:\n    plot_backend = _get_plot_backend('matplotlib')\n    plot_backend.register()",
    "docstring": "Register pandas formatters and converters with matplotlib. This function modifies the global `` dictionary. pandas adds custom converters for * pd.Timestamp * pd.Period * np.datetime64 * datetime.datetime * datetime.date * datetime.time See Also -------- deregister_matplotlib_converters : Remove pandas formatters and converters. Examples -------- .. plot:: :context: close-figs The following line is done automatically by pandas so the plot can be rendered: >>> pd.plotting.register_matplotlib_converters() >>> df = pd.DataFrame( ... {\"ts\": pd.period_range(\"2020\", periods=2, freq=\"M\"), \"y\": [1, 2]} ... ) >>> plot = df.plot.line(x=\"ts\", y=\"y\") Unsetting the register manually an error will be raised: >>> pd.set_option( ... \"plotting.matplotlib.register_converters\", False ... ) # doctest: +SKIP >>> df.plot.line(x=\"ts\", y=\"y\") # doctest: +SKIP Traceback (most recent call last): TypeError: float() argument must be a string or a real number, not 'Period'",
    "type": "function",
    "file_path": "pandas\\pandas\\plotting\\_misc.py",
    "ast_data": "FunctionDef name:register arguments Assign Call Call"
  },
  {
    "library": "django",
    "name": "last_insert_id",
    "source_code": "def last_insert_id(self, cursor, table_name, pk_name):\n    return cursor.lastrowid",
    "docstring": "Given a cursor object that has just performed an INSERT statement into a table that has an auto-incrementing ID, return the newly created ID. is the name of the primary-key column.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:last_insert_id arg:self arg:cursor arg:table_name arg:pk_name arguments arg arg arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, axis=-1, **kwargs):\n    super(Concatenate, self).__init__(**kwargs)\n    self.axis = axis\n    self.supports_masking = True\n    self._reshape_required = False",
    "docstring": "Instantiates a Concatenate layer. >>> x = np.arange(20).reshape(2, 2, 5) >>> print(x) [[[ 0 1 2 3 4] [ 5 6 7 8 9]] [[10 11 12 13 14] [15 16 17 18 19]]] >>> y = np.arange(20, 30).reshape(2, 1, 5) >>> print(y) [[[20 21 22 23 24]] [[25 26 27 28 29]]] >>> tf.keras.layers.Concatenate(axis=1)([x, y]) Args: axis: Axis along which to concatenate. **kwargs: standard layer keyword arguments.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\merge.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:axis arguments arg arg arg Call Call Assign Assign Assign"
  },
  {
    "library": "scrapy",
    "name": "_get_handler",
    "source_code": "def _get_handler(self, scheme: str) -> DownloadHandlerProtocol | None:\n    if scheme in self._handlers:\n        return self._handlers[scheme]\n    if scheme in self._notconfigured:\n        return None\n    if scheme not in self._schemes:\n        self._notconfigured[scheme] = 'no handler available for that scheme'\n        return None\n    return self._load_handler(scheme)",
    "docstring": "Lazy-load the downloadhandler for a scheme only on the first request for that scheme.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\downloader\\handlers\\__init__.py",
    "ast_data": "FunctionDef name:_get_handler arg:self arg:scheme arguments arg arg If Compare Return return:yes If Compare Return return:no If Compare Assign Return return:no Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "stateless_random_flip_up_down",
    "source_code": "@tf_export('image.stateless_random_flip_up_down', v1=[])\n@dispatch.add_dispatch_support\ndef stateless_random_flip_up_down(image, seed):\n    random_func = functools.partial(stateless_random_ops.stateless_random_uniform, seed=seed)\n    return _random_flip(image, 0, random_func, 'stateless_random_flip_up_down')",
    "docstring": "Randomly flip an image vertically (upside down) deterministically. Guarantees the same results given the same independent of how many times the function is called, and independent of global seed settings (e.g. ). Example usage: >>> image = np.array([[[1], [2]], [[3], [4]]]) >>> seed = (2, 3) >>> tf.image.stateless_random_flip_up_down(image, seed).numpy().tolist() [[[3], [4]], [[1], [2]]] Args: image: 4-D Tensor of shape or 3-D Tensor of shape . seed: A shape [2] Tensor, the seed to the random number generator. Must have dtype or . (When using XLA, only is allowed.) Returns: A tensor of the same type and shape as .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:stateless_random_flip_up_down arg:image arg:seed arguments arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "as_tensorrt",
    "source_code": "def as_tensorrt(self, device_id: int=0, **kwargs: Any) -> None:\n    self._session.set_providers(['TensorrtExecutionProvider'], provider_options=[{'device_id': device_id, **kwargs}])",
    "docstring": "Set the session to run on TensorRT. We set the ONNX runtime session to use TensorrtExecutionProvider. For other TensorrtExecutionProvider configurations, or CUDA/cuDNN/ONNX/TensorRT version issues, you may refer to Args: device_id: select GPU to execute. kwargs: additional arguments from TensorRT.",
    "type": "method",
    "file_path": "kornia\\kornia\\core\\mixin\\onnx.py",
    "ast_data": "FunctionDef name:as_tensorrt arg:self arg:device_id arguments arg arg arg Call"
  },
  {
    "library": "virtualenv",
    "name": "interpreter",
    "source_code": "@property\ndef interpreter(self):\n    if self._has_run is False:\n        self._interpreter = self.run()\n        self._has_run = True\n    return self._interpreter",
    "docstring": ":return: the interpreter as returned by :meth:, cached",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\discovery\\discover.py",
    "ast_data": "FunctionDef name:interpreter arg:self arguments arg If Compare Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "run",
    "source_code": "def run(self, fn, args=(), kwargs=None, options=None):\n    return super(OneDeviceStrategy, self).run(fn, args, kwargs, options)",
    "docstring": "Run on each replica, with the given arguments. In , is simply called within a device scope for the given device, with the provided arguments. Args: fn: The function to run. The output must be a of s. args: (Optional) Positional arguments to . kwargs: (Optional) Keyword arguments to . options: (Optional) An instance of specifying the options to run . Returns: Return value from running .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\one_device_strategy.py",
    "ast_data": "FunctionDef name:run arg:self arg:fn arg:args arg:kwargs arg:options arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_data",
    "source_code": "def set_data(self, A):\n    super().set_data(A)\n    self.stale = True",
    "docstring": "Set the image array.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\image.py",
    "ast_data": "FunctionDef name:set_data arg:self arg:A arguments arg arg Call Call Assign"
  },
  {
    "library": "sphinx",
    "name": "escape_menu",
    "source_code": "def escape_menu(self, s: str) -> str:\n    s = self.escape_arg(s)\n    s = s.replace(':', ';')\n    s = ' '.join(s.split()).strip()\n    return s",
    "docstring": "Return an escaped string suitable for menu entries.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\writers\\texinfo.py",
    "ast_data": "FunctionDef name:escape_menu arg:self arg:s arguments arg arg Assign Call Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_allocator_backend",
    "source_code": "def get_allocator_backend() -> str:\n    return torch._C._cuda_getAllocatorBackend()",
    "docstring": "Return a string describing the active allocator backend as set by `cudaMallocAsync`cuda-memory-management` for details on choosing the allocator backend.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\memory.py",
    "ast_data": "FunctionDef name:get_allocator_backend arguments Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "StatelessSymbolicContext",
    "source_code": "@dataclass(frozen=True)\nclass StatelessSymbolicContext(SymbolicContext):\n    dynamic_sizes: DimList[DimDynamic]\n    dynamic_strides: DimList[DimDynamic] = None\n    constraint_sizes: DimList[DimConstraint] = None\n    constraint_strides: DimList[DimConstraint] = None\n    view_base_context: Optional[SymbolicContext] = None\n\n    def __post_init__(self) -> None:\n        if self.dynamic_strides is None:\n            object.__setattr__(self, 'dynamic_strides', [DimDynamic.INFER_STRIDE] * len(self.dynamic_sizes))\n        if self.constraint_sizes is None:\n            object.__setattr__(self, 'constraint_sizes', [None] * len(self.dynamic_sizes))\n        if self.constraint_strides is None:\n            object.__setattr__(self, 'constraint_strides', [None] * len(self.dynamic_sizes))\n        assert all((stride in (DimDynamic.INFER_STRIDE, DimDynamic.DYNAMIC, DimDynamic.DUCK) for stride in self.dynamic_strides))",
    "docstring": "Create symbols in ``. This will cause fresh symbols to be allocated",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "ClassDef name:StatelessSymbolicContext FunctionDef name:__post_init__ arg:self arguments arg If Compare Call Call If Compare Call Call If Compare Call Call Call Compare Call"
  },
  {
    "library": "scikit-learn",
    "name": "VotingRegressor",
    "source_code": "class VotingRegressor(RegressorMixin, _BaseVoting):\n\n    def __init__(self, estimators, *, weights=None, n_jobs=None, verbose=False):\n        super().__init__(estimators=estimators)\n        self.weights = weights\n        self.n_jobs = n_jobs\n        self.verbose = verbose\n\n    @_fit_context(prefer_skip_nested_validation=False)\n    def fit(self, X, y, **fit_params):\n        _raise_for_params(fit_params, self, 'fit', allow=['sample_weight'])\n        y = column_or_1d(y, warn=True)\n        return super().fit(X, y, **fit_params)\n\n    def predict(self, X):\n        check_is_fitted(self)\n        return np.average(self._predict(X), axis=1, weights=self._weights_not_none)\n\n    def transform(self, X):\n        check_is_fitted(self)\n        return self._predict(X)\n\n    def get_feature_names_out(self, input_features=None):\n        check_is_fitted(self, 'n_features_in_')\n        _check_feature_names_in(self, input_features, generate_names=False)\n        class_name = self.__class__.__name__.lower()\n        return np.asarray([f'{class_name}_{name}' for name, est in self.estimators if est != 'drop'], dtype=object)",
    "docstring": "Prediction voting regressor for unfitted estimators. A voting regressor is an ensemble meta-estimator that fits several base regressors, each on the whole dataset. Then it averages the individual predictions to form a final prediction. For a detailed example, refer to :ref:. Read more in the :ref:. .. versionadded:: 0.21 Parameters ---------- estimators : list of (str, estimator) tuples Invoking the `set_paramsfloatintNonejoblib.parallel_backendGlossary ~sklearn.utils.Bunchfitn_features_in_fit'lr'~VotingRegressor.set_params` and fit the remaining two estimators: >>> er = er.set_params(lr='drop') >>> er = er.fit(X, y) >>> len(er.estimators_) 2",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_voting.py",
    "ast_data": "ClassDef name:VotingRegressor FunctionDef name:__init__ arg:self arg:estimators arguments arg arg arg arg arg Call Call Assign Assign Assign FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg arg Call Assign Call Return return:yes Call Call Call FunctionDef name:predict arg:self arg:X arguments arg arg Call Return return:yes Call Call FunctionDef name:transform arg:self arg:X arguments arg arg Call Return return:yes Call FunctionDef name:get_feature_names_out arg:self arg:input_features arguments arg arg Call Call Assign Call Return return:yes Call Compare"
  },
  {
    "library": "pandas",
    "name": "frame_apply",
    "source_code": "def frame_apply(obj: DataFrame, func: AggFuncType, axis: Axis=0, raw: bool=False, result_type: str | None=None, by_row: Literal[False, 'compat']='compat', engine: str='python', engine_kwargs: dict[str, bool] | None=None, args=None, kwargs=None) -> FrameApply:\n    _, func, columns, _ = reconstruct_func(func, **kwargs)\n    axis = obj._get_axis_number(axis)\n    klass: type[FrameApply]\n    if axis == 0:\n        klass = FrameRowApply\n    elif axis == 1:\n        if columns:\n            raise NotImplementedError(f'Named aggregation is not supported when axis={axis!r}.')\n        klass = FrameColumnApply\n    return klass(obj, func, raw=raw, result_type=result_type, by_row=by_row, engine=engine, engine_kwargs=engine_kwargs, args=args, kwargs=kwargs)",
    "docstring": "construct and return a row or column based frame apply object",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\apply.py",
    "ast_data": "FunctionDef name:frame_apply arg:obj arg:func arg:axis arg:raw arg:result_type arg:by_row arg:engine arg:engine_kwargs arg:args arg:kwargs arguments arg arg arg arg arg arg arg arg arg arg Assign Call Assign Call If Compare Assign If Compare If Raise Call Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "result_list",
    "source_code": "def result_list(cl):\n    headers = list(result_headers(cl))\n    num_sorted_fields = 0\n    for h in headers:\n        if h['sortable'] and h['sorted']:\n            num_sorted_fields += 1\n    return {'cl': cl, 'result_hidden_fields': list(result_hidden_fields(cl)), 'result_headers': headers, 'num_sorted_fields': num_sorted_fields, 'results': list(results(cl))}",
    "docstring": "Display the headers and data list together.",
    "type": "function",
    "file_path": "django\\django\\contrib\\admin\\templatetags\\admin_list.py",
    "ast_data": "FunctionDef name:result_list arg:cl arguments arg Assign Call Call Assign For If BoolOp Return return:yes Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_nd_reduction_numels",
    "source_code": "def _get_nd_reduction_numels(r: int, size_hints: dict[str, int]) -> dict[str, int]:\n    r = min(r, get_total_reduction_numel(size_hints))\n    num_reduction_dims = len([prefix for prefix in size_hints if prefix_is_reduction(prefix)])\n    remaining = r\n    rnumels = {}\n    for idx in range(num_reduction_dims - 1, -1, -1):\n        prefix = f'r{idx}_'\n        max_size = min(size_hints[prefix], TRITON_MAX_BLOCK[prefix.upper()])\n        dim = min(max_size, remaining)\n        assert remaining % dim == 0, f\"Expected dimension '{dim}' to divide remaining size '{remaining}'\"\n        rnumels[prefix] = dim\n        remaining //= dim\n    final_numel = conditional_product(*rnumels.values())\n    assert r == final_numel, f'Expected ND reduction size ({rnumels}) to have {r} elements.'\n    assert all((rnumels[prefix] <= size_hints[prefix] for prefix in rnumels)), f'rnumels exceed size_hints. {rnumels} > {size_hints}'\n    return rnumels",
    "docstring": "Converts a linear reduction numel to ND, in row major order. This order is often desirable as it presents opportunities to coalesce memory accesses. For example, if r = 64 and size_hints = [32,32], this function returns [32, 2]. This unraveling works because both r and size_hints are powers of 2.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\triton_heuristics.py",
    "ast_data": "FunctionDef name:_get_nd_reduction_numels arg:r arg:size_hints arguments arg arg Assign Call Call Assign Call Call Assign Assign For Call Assign Assign Call Call Assign Call Compare Assign Assign Call Call Compare Call Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_squared_difference_flops",
    "source_code": "@ops.RegisterStatistics('SquaredDifference', 'flops')\ndef _squared_difference_flops(graph, node):\n    return _binary_per_element_op_flops(graph, node, ops_per_element=2)",
    "docstring": "Compute flops for SquaredDifference operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py",
    "ast_data": "FunctionDef name:_squared_difference_flops arg:graph arg:node arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "should_overwrite",
    "source_code": "def should_overwrite(filepath, overwrite):\n    if not overwrite and os.path.isfile(filepath):\n        return ask_to_proceed_with_overwrite(filepath)\n    return True",
    "docstring": "Returns whether the filepath should be overwritten.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saving_utils.py",
    "ast_data": "FunctionDef name:should_overwrite arg:filepath arg:overwrite arguments arg arg If BoolOp Call Return return:yes Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "moderatex",
    "source_code": "def moderatex(x):\n    x[np.isnan(x)] = 0\n    x = np.clip(x, -REALMAX, REALMAX)\n    return x",
    "docstring": "This function moderates a decision variable. It replaces NaN by 0 and Inf/-Inf by REALMAX/-REALMAX.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\pyprima\\pyprima\\src\\pyprima\\common\\evaluate.py",
    "ast_data": "FunctionDef name:moderatex arg:x arguments arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_is_amx_tile_supported",
    "source_code": "def _is_amx_tile_supported() -> bool:\n    return torch._C._cpu._is_amx_tile_supported()",
    "docstring": "Returns a bool indicating if CPU supports AMX_TILE.",
    "type": "function",
    "file_path": "pytorch\\torch\\cpu\\__init__.py",
    "ast_data": "FunctionDef name:_is_amx_tile_supported arguments Return return:yes Call"
  },
  {
    "library": "django",
    "name": "check_password",
    "source_code": "def check_password(self, raw_password):\n\n    def setter(raw_password):\n        self.set_password(raw_password)\n        self._password = None\n        self.save(update_fields=['password'])\n    return check_password(raw_password, self.password, setter)",
    "docstring": "Return a boolean of whether the raw_password was correct. Handles hashing formats behind the scenes.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\base_user.py",
    "ast_data": "FunctionDef name:check_password arg:self arg:raw_password arguments arg arg FunctionDef name:setter arg:raw_password arguments arg Call Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "from_dict",
    "source_code": "@classmethod\ndef from_dict(cls, file_dict):\n    return cls(file_dict['filename'], file_dict['content'], file_dict.get('content-type', 'text/plain'))",
    "docstring": "Create a SimpleUploadedFile object from a dictionary with keys: - filename - content-type - content",
    "type": "method",
    "file_path": "django\\django\\core\\files\\uploadedfile.py",
    "ast_data": "FunctionDef name:from_dict arg:cls arg:file_dict arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "overlaps",
    "source_code": "def overlaps(self, other):\n    return self._topology(capi.ogr_overlaps, other)",
    "docstring": "Return True if this geometry overlaps the other.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:overlaps arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "build_chunks",
    "source_code": "def build_chunks(self) -> None:\n    pass",
    "docstring": "Builds the Splitter object by generating chunks from the proto. Subclasses of should only need to override this method. This method should be called once per Splitter to create the chunks. Users should call the methods or instead.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\tools\\proto_splitter\\split.py",
    "ast_data": "FunctionDef name:build_chunks arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "aot_load",
    "source_code": "def aot_load(so_path: str, device: str) -> Callable:\n    aot_compile_warning()\n    if device == 'cpu':\n        runner = torch._C._aoti.AOTIModelContainerRunnerCpu(so_path, 1)\n    elif device == 'cuda' or device.startswith('cuda:'):\n        runner = torch._C._aoti.AOTIModelContainerRunnerCuda(so_path, 1, device)\n    elif device == 'xpu' or device.startswith('xpu:'):\n        runner = torch._C._aoti.AOTIModelContainerRunnerXpu(so_path, 1, device)\n    elif device == 'mps' or device.startswith('mps:'):\n        runner = torch._C._aoti.AOTIModelContainerRunnerMps(so_path, 1)\n    else:\n        raise RuntimeError('Unsupported device ' + device)\n\n    def optimized(*args, **kwargs):\n        call_spec = runner.get_call_spec()\n        in_spec = pytree.treespec_loads(call_spec[0])\n        out_spec = pytree.treespec_loads(call_spec[1])\n        flat_inputs = pytree.tree_flatten((args, reorder_kwargs(kwargs, in_spec)))[0]\n        flat_inputs = [x for x in flat_inputs if isinstance(x, torch.Tensor)]\n        flat_outputs = runner.run(flat_inputs)\n        return pytree.tree_unflatten(flat_outputs, out_spec)\n    return optimized",
    "docstring": "Loads a shared library generated by aot_compile and returns a callable Args: so_path: Path to the shared library Returns: A callable",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\__init__.py",
    "ast_data": "FunctionDef name:aot_load arg:so_path arg:device arguments arg arg Call If Compare Assign Call If BoolOp Compare Call Assign Call If BoolOp Compare Call Assign Call If BoolOp Compare Call Assign Call Raise Call FunctionDef name:optimized arguments arg arg Assign Call Assign Call Assign Call Assign Call Call Assign Call Assign Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "onnxscript_op",
    "source_code": "def onnxscript_op(self, onnx_fn, *raw_args: torch.Tensor | _C.Value, outputs: int=1, **kwargs):\n    symbolic_name = f'{onnx_fn.opset.domain}::{onnx_fn.name}'\n    opset_version = onnx_fn.opset.version\n    registration.custom_onnx_symbolic(symbolic_name, opset_version)(onnx_fn)\n    return _add_op(self, symbolic_name, *raw_args, outputs=outputs, **kwargs)",
    "docstring": "Creates an ONNX operator from onnx-script function, taking \"raw_args\" as inputs and \"kwargs\" as attributes. onnx-script repository: Args: onnx_fn: ONNXFunction from onnx-script; An example can be found at raw_args: The inputs to the operator; usually provided as arguments to the definition. outputs: The number of outputs this operator returns. By default an operator is assumed to return a single output. If is greater than one, this functions returns a tuple of output , representing each output of the ONNX operator in order. kwargs: The attributes of the ONNX operator, whose keys are named according to the following convention: indicates the attribute with type . The valid type specifiers are (float), (int), (string) or (Tensor). An attribute specified with type float accepts either a single float, or a list of floats (e.g., you would say for a attribute that takes a list of integers). Returns: The value representing the single output of this operator (see the keyword argument for multi-return nodes).",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\jit_utils.py",
    "ast_data": "FunctionDef name:onnxscript_op arg:self arg:onnx_fn arguments arg arg arg arg arg Assign Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_pack_iterator_resource_dtensor",
    "source_code": "def _pack_iterator_resource_dtensor(datasets: List[Tuple[int, data_types.DatasetV2]], layouts: Any, mesh: layout_lib.Mesh, num_local_devices_per_replica: int):\n    host_mesh_devices = mesh.host_mesh().local_devices()\n    device_idx = 0\n    iterators = []\n    for _, dataset in datasets:\n        for idx in range(num_local_devices_per_replica):\n            with ops.device_v2(host_mesh_devices[device_idx]):\n                device_dataset = dataset.shard(num_shards=num_local_devices_per_replica, index=idx)\n                iterators.append(iter(device_dataset))\n            device_idx += 1\n    if device_idx != len(host_mesh_devices):\n        raise ValueError(f'The `datasets` argument does not have the correct number of underlying datasets, found {device_idx} but expected {len(host_mesh_devices)}.')\n    host_layouts = nest.map_structure(lambda l: layout_lib.Layout(l.sharding_specs, mesh.host_mesh()), layouts)\n    iterator_resources = [it._iterator_resource for it in iterators]\n    d_iterator_resource = api.pack(iterator_resources, layout_lib.Layout.replicated(mesh=mesh.host_mesh(), rank=0))\n    api._dtensor_device().set_iterator_element_layouts(d_iterator_resource, nest.flatten(host_layouts))\n    return d_iterator_resource",
    "docstring": "Creates a DTensor iterator resource for the per-replica datasets. Given a list of replica ID to tf.data.Dataset mappings, this function creates iterators for each device and then packs the underlying iterator resource tensors into a single DTensor. This resource tensor is used by the IteratorGetNext op to retrieve the next element in the dataset. Args: datasets: a list of tuples of each unique local replica ID to the dataset object whose elements will be placed on the devices corresponding to that replica. layouts: a structure of DTensor layouts to be applied to the elements returned by the underlying iterators. This can be a single layout or (possibly nested) tuples or dictionaries of layouts, and the structure must match the structure of the iterator elements. mesh: the DTensor mesh to place the iterator batches on. num_local_devices_per_replica: the number of devices in each data-parallel replica. Returns: A DTensor of the underlying iterator resource tensors.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\input_util.py",
    "ast_data": "FunctionDef name:_pack_iterator_resource_dtensor arg:datasets arg:layouts arg:mesh arg:num_local_devices_per_replica arguments arg arg arg arg Assign Call Call Assign Assign For For Call With Call Assign Call Call Call If Compare Call Raise Call Call Assign Call arguments arg Call Call Assign Assign Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_AsTensorList",
    "source_code": "def _AsTensorList(x, p):\n    if not isinstance(x, (list, _basetuple)):\n        x = [x]\n    l = []\n    for v in x:\n        if isinstance(v, ops.Operation):\n            v = with_dependencies([v], p)\n        v = ops.convert_to_tensor_or_composite(v)\n        if isinstance(v, tensor_lib.Tensor):\n            l.append(array_ops.identity(v))\n        else:\n            l.append(indexed_slices.IndexedSlices(array_ops.identity(v.values), array_ops.identity(v.indices)))\n    return l",
    "docstring": "Return x as a list of Tensors or IndexedSlices. For entries of that are Operations, this returns an Identity of with a dependency on the operation. Args: x: A Tensor/IndexedSlices/Operation or a list or tuple of them. p: A Tensor to return for entries in that are Operations. Returns: A list of Tensors or IndexedSlices.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:_AsTensorList arg:x arg:p arguments arg arg If Call Assign Assign For If Call Assign Call Assign Call If Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "sqrt",
    "source_code": "@set_module('numpy.lib.scimath')\n@array_function_dispatch(_unary_dispatcher)\ndef sqrt(x):\n    x = _fix_real_lt_zero(x)\n    return nx.sqrt(x)",
    "docstring": "Compute the square root of x. For negative input elements, a complex value is returned (unlike which returns NaN). Parameters ---------- x : array_like The input value(s). Returns ------- out : ndarray or scalar The square root of . If was a scalar, so is , otherwise an array is returned. See Also -------- numpy.sqrt Examples -------- For real, non-negative inputs this works just like : >>> import numpy as np >>> np.emath.sqrt(1) 1.0 >>> np.emath.sqrt([1, 4]) array([1., 2.]) But it automatically handles negative inputs: >>> np.emath.sqrt(-1) 1j >>> np.emath.sqrt([-1,4]) array([0.+1.j, 2.+0.j]) Different results are expected because: floating point 0.0 and -0.0 are distinct. For more control, explicitly use complex() as follows: >>> np.emath.sqrt(complex(-4.0, 0.0)) 2j >>> np.emath.sqrt(complex(-4.0, -0.0)) -2j",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_scimath_impl.py",
    "ast_data": "FunctionDef name:sqrt arg:x arguments arg Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "numpy",
    "name": "sanitize_cxx_flags",
    "source_code": "def sanitize_cxx_flags(cxxflags):\n    return [flag for flag in cxxflags if flag not in _cxx_ignore_flags]",
    "docstring": "Some flags are valid for C but not C++. Prune them.",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\misc_util.py",
    "ast_data": "FunctionDef name:sanitize_cxx_flags arg:cxxflags arguments arg Return return:yes Compare"
  },
  {
    "library": "matplotlib",
    "name": "clear",
    "source_code": "def clear(self, keep_observers=False):\n    self.suppressComposite = None\n    for subfig in self.subfigs:\n        subfig.clear(keep_observers=keep_observers)\n    self.subfigs = []\n    for ax in tuple(self.axes):\n        ax.clear()\n        self.delaxes(ax)\n    self.artists = []\n    self.lines = []\n    self.patches = []\n    self.texts = []\n    self.images = []\n    self.legends = []\n    self.subplotpars.reset()\n    if not keep_observers:\n        self._axobservers = cbook.CallbackRegistry()\n    self._suptitle = None\n    self._supxlabel = None\n    self._supylabel = None\n    self.stale = True",
    "docstring": "Clear the figure. Parameters ---------- keep_observers : bool, default: False Set *keep_observers* to True if, for example, a gui widget is tracking the Axes in the figure.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:clear arg:self arg:keep_observers arguments arg arg Assign For Call Assign For Call Call Call Assign Assign Assign Assign Assign Assign Call If Assign Call Assign Assign Assign Assign"
  },
  {
    "library": "scikit-learn",
    "name": "_pprint_key_val_tuple",
    "source_code": "def _pprint_key_val_tuple(self, object, stream, indent, allowance, context, level):\n    k, v = object\n    rep = self._repr(k, context, level)\n    if isinstance(object, KeyValTupleParam):\n        rep = rep.strip(\"'\")\n        middle = '='\n    else:\n        middle = ': '\n    stream.write(rep)\n    stream.write(middle)\n    self._format(v, stream, indent + len(rep) + len(middle), allowance, context, level)",
    "docstring": "Pretty printing for key-value tuples from dict or parameters.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_pprint.py",
    "ast_data": "FunctionDef name:_pprint_key_val_tuple arg:self arg:object arg:stream arg:indent arg:allowance arg:context arg:level arguments arg arg arg arg arg arg arg Assign Assign Call If Call Assign Call Assign Assign Call Call Call Call Call"
  },
  {
    "library": "cherrypy",
    "name": "__new__",
    "source_code": "def __new__(cls, raw):\n    sanitized = cls._sanitize(raw)\n    if sanitized == raw:\n        return raw\n    instance = super().__new__(cls, sanitized)\n    instance.raw = raw\n    return instance",
    "docstring": "Construct a new :class: instance.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\httputil.py",
    "ast_data": "FunctionDef name:__new__ arg:cls arg:raw arguments arg arg Assign Call If Compare Return return:yes Assign Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "reduce_scatter_tensor_autograd",
    "source_code": "def reduce_scatter_tensor_autograd(self: torch.Tensor, reduceOp: str, scatter_dim: int, group: RANK_TYPES, tag: str=''):\n    group_name = _resolve_group_name(group, tag)\n    group_size = c10d._get_group_size_by_name(group_name)\n    assert self.size(scatter_dim) % group_size == 0, f'input dimension 0 ({self.size(0)} must be a multiple of group_size {group_size}'\n    if scatter_dim != 0:\n        tensor_list = torch.chunk(self, group_size, dim=scatter_dim)\n        self = torch.cat(tensor_list)\n    tensor = torch.ops._c10d_functional_autograd.reduce_scatter_tensor(self, reduceOp.lower(), group_size, group_name)\n    res = _FromTorchTensor.apply(tensor)\n    return res",
    "docstring": "Reduces the tensor data across all machines in such a way that all get the final result, then scatter the results to corresponding ranks. This function is the same as reduce_scatter_tensor but will propagate the backwards gradient across workers. Currently only the \"sum\" reduceOp is supported. See reduce_scatter_tensor for more details on usage.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_functional_collectives.py",
    "ast_data": "FunctionDef name:reduce_scatter_tensor_autograd arg:self arg:reduceOp arg:scatter_dim arg:group arg:tag arguments arg arg arg arg arg Assign Call Assign Call Compare Call Call If Compare Assign Call Assign Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "set_score_request",
    "source_code": "def set_score_request(self, **kwargs):\n    if not _routing_enabled():\n        raise RuntimeError('This method is only available when metadata routing is enabled. You can enable it using sklearn.set_config(enable_metadata_routing=True).')\n    for param, alias in kwargs.items():\n        self._metadata_request.score.add_request(param=param, alias=alias)\n    return self",
    "docstring": "Set requested parameters by the scorer. Please see :ref: on how the routing mechanism works. .. versionadded:: 1.5 Parameters ---------- kwargs : dict Arguments should be of the form `alias`.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\metrics\\_scorer.py",
    "ast_data": "FunctionDef name:set_score_request arg:self arguments arg arg If Call Raise Call For Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "InputStreamExhausted",
    "source_code": "class InputStreamExhausted(Exception):\n    pass",
    "docstring": "No more reads are allowed from this device.",
    "type": "class",
    "file_path": "django\\django\\http\\multipartparser.py",
    "ast_data": "ClassDef name:InputStreamExhausted"
  },
  {
    "library": "pandas",
    "name": "_empty",
    "source_code": "@classmethod\ndef _empty(cls, shape: Shape, dtype: ExtensionDtype):\n    obj = cls._from_sequence([], dtype=dtype)\n    taker = np.broadcast_to(np.intp(-1), shape)\n    result = obj.take(taker, allow_fill=True)\n    if not isinstance(result, cls) or dtype != result.dtype:\n        raise NotImplementedError(f\"Default 'empty' implementation is invalid for dtype='{dtype}'\")\n    return result",
    "docstring": "Create an ExtensionArray with the given shape and dtype. See also -------- ExtensionDtype.empty ExtensionDtype.empty is the 'official' public version of this API.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\base.py",
    "ast_data": "FunctionDef name:_empty arg:cls arg:shape arg:dtype arguments arg arg arg Assign Call Assign Call Call Assign Call If BoolOp Call Compare Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "WeakTensorGradient",
    "source_code": "class WeakTensorGradient(composite_tensor_gradient.CompositeTensorGradient):\n\n    def get_gradient_components(self, weak_tensor):\n        return weak_tensor.tensor\n\n    def replace_gradient_components(self, weak_tensor, component_grads):\n        return weak_tensor._type_spec._from_components([component_grads])",
    "docstring": "CompositeTensorGradient for WeakTensor.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\weak_tensor.py",
    "ast_data": "ClassDef name:WeakTensorGradient FunctionDef name:get_gradient_components arg:self arg:weak_tensor arguments arg arg Return return:yes FunctionDef name:replace_gradient_components arg:self arg:weak_tensor arg:component_grads arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_new_shared",
    "source_code": "def _new_shared(self, size, *, device=None):\n    if device is None:\n        device = 'cpu'\n    device = torch.device(device)\n    untyped_storage = torch.UntypedStorage._new_shared(size * self._element_size(), device=device)\n    return TypedStorage(wrap_storage=untyped_storage, dtype=self.dtype, _internal=True)",
    "docstring": "Create a new storage in shared memory with the same data type.",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:_new_shared arg:self arg:size arguments arg arg arg If Compare Assign Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "virtualenv",
    "name": "PathPythonInfo",
    "source_code": "class PathPythonInfo(PythonInfo):\n    pass",
    "docstring": "python info from path.",
    "type": "class",
    "file_path": "virtualenv\\src\\virtualenv\\discovery\\builtin.py",
    "ast_data": "ClassDef name:PathPythonInfo"
  },
  {
    "library": "django",
    "name": "handle",
    "source_code": "def handle(self, *args, **options):\n    raise NotImplementedError('subclasses of BaseCommand must provide a handle() method')",
    "docstring": "The actual logic of the command. Subclasses must implement this method.",
    "type": "method",
    "file_path": "django\\django\\core\\management\\base.py",
    "ast_data": "FunctionDef name:handle arg:self arguments arg arg arg Raise Call"
  },
  {
    "library": "scipy",
    "name": "_invert_index",
    "source_code": "def _invert_index(idx):\n    inv = np.zeros_like(idx)\n    inv[idx] = np.arange(len(idx))\n    return inv",
    "docstring": "Helper function to invert an index array.",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\_dia.py",
    "ast_data": "FunctionDef name:_invert_index arg:idx arguments arg Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "from_config",
    "source_code": "@classmethod\ndef from_config(cls, config, custom_objects=None, columns_by_name=None):\n    from tensorflow.python.feature_column.serialization import deserialize_feature_column\n    _check_config_keys(config, cls._fields)\n    kwargs = _standardize_and_copy_config(config)\n    kwargs['source_column'] = deserialize_feature_column(config['source_column'], custom_objects, columns_by_name)\n    return cls(**kwargs)",
    "docstring": "See 'FeatureColumn` base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:from_config arg:cls arg:config arg:custom_objects arg:columns_by_name arguments arg arg arg arg Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "make_xp_pytest_param",
    "source_code": "def make_xp_pytest_param(func, *args, capabilities_table=None):\n    import pytest\n    marks = _make_xp_pytest_marks(func, capabilities_table=capabilities_table)\n    return pytest.param(func, *args, marks=marks, id=func.__name__)",
    "docstring": "Variant of ``. *args : Any, optional Extra pytest parameters for the use case, e.g.:: @pytest.mark.parametrize(\"func,verb\", [ make_xp_pytest_param(f1, \"hello\"), make_xp_pytest_param(f2, \"world\")]) def test(func, verb, xp): # iterates on (func=f1, verb=\"hello\") # and (func=f2, verb=\"world\") See Also -------- xp_capabilities make_xp_test_case array_api_extra.testing.lazy_xp_function",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_array_api.py",
    "ast_data": "FunctionDef name:make_xp_pytest_param arg:func arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, node_def, op, message, *args):\n    super(FailedPreconditionError, self).__init__(node_def, op, message, FAILED_PRECONDITION, *args)",
    "docstring": "Creates a .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:node_def arg:op arg:message arguments arg arg arg arg arg Call Call"
  },
  {
    "library": "django",
    "name": "AlreadyRegistered",
    "source_code": "class AlreadyRegistered(Exception):\n    pass",
    "docstring": "The model is already registered.",
    "type": "class",
    "file_path": "django\\django\\contrib\\admin\\exceptions.py",
    "ast_data": "ClassDef name:AlreadyRegistered"
  },
  {
    "library": "cherrypy",
    "name": "get_ha1_file_htdigest",
    "source_code": "def get_ha1_file_htdigest(filename):\n\n    def get_ha1(realm, username):\n        result = None\n        with open(filename, 'r') as f:\n            for line in f:\n                u, r, ha1 = line.rstrip().split(':')\n                if u == username and r == realm:\n                    result = ha1\n                    break\n        return result\n    return get_ha1",
    "docstring": "Return a get_ha1 function. The returned function obtains a HA1 password hash from a flat file with lines of the same format as that produced by the Apache htdigest utility. For example, for realm 'wonderland', username 'alice', and password '4x5istwelve', the htdigest line would be:: alice:wonderland:3238cdfe91a8b2ed8e39646921a02d4c If you want to use an Apache htdigest file as the credentials store, then use get_ha1_file_htdigest(my_htdigest_file) as the value for the get_ha1 argument to digest_auth(). It is recommended that the filename argument be an absolute path, to avoid problems.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\auth_digest.py",
    "ast_data": "FunctionDef name:get_ha1_file_htdigest arg:filename arguments arg FunctionDef name:get_ha1 arg:realm arg:username arguments arg arg Assign With Call For Assign Call Call If BoolOp Compare Compare Assign Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "SaveFigureBase",
    "source_code": "class SaveFigureBase(ToolBase):\n    description = 'Save the figure'\n    image = 'mpl-data/images/filesave'\n    default_keymap = property(lambda self: mpl.rcParams['keymap.save'])",
    "docstring": "Base tool for figure saving.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py",
    "ast_data": "ClassDef name:SaveFigureBase Assign Assign Assign Call arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "_get_outputs_tensor_info_from_meta_graph_def",
    "source_code": "def _get_outputs_tensor_info_from_meta_graph_def(meta_graph_def, signature_def_key):\n    return meta_graph_def.signature_def[signature_def_key].outputs",
    "docstring": "Gets TensorInfos for all outputs of the SignatureDef. Returns a dictionary that maps each output key to its TensorInfo for the given signature_def_key in the meta_graph_def. Args: meta_graph_def: MetaGraphDef protocol buffer with the SignatureDefmap to look up signature_def_key. signature_def_key: A SignatureDef key string. Returns: A dictionary that maps output tensor keys to TensorInfos.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\saved_model_cli.py",
    "ast_data": "FunctionDef name:_get_outputs_tensor_info_from_meta_graph_def arg:meta_graph_def arg:signature_def_key arguments arg arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_consumes",
    "source_code": "def _consumes(self, params):\n    params = set(params)\n    res = set()\n    for prop, alias in self._requests.items():\n        if alias is True and prop in params:\n            res.add(prop)\n        elif isinstance(alias, str) and alias in params:\n            res.add(alias)\n    return res",
    "docstring": "Check whether the given parameters are consumed by this method. Parameters ---------- params : iterable of str An iterable of parameters to check. Returns ------- consumed : set of str A set of parameters which are consumed by this method.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py",
    "ast_data": "FunctionDef name:_consumes arg:self arg:params arguments arg arg Assign Call Assign Call For Call If BoolOp Compare Compare Call If BoolOp Call Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "global_variables",
    "source_code": "@property\ndef global_variables(self):\n    if not self._variables_created:\n        return []\n    return self.variables",
    "docstring": "Returns the list of global variables created by the Template.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\template.py",
    "ast_data": "FunctionDef name:global_variables arg:self arguments arg If Return return:no Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "is_match",
    "source_code": "def is_match(m: MatchResult) -> TypeIs[Match]:\n    return bool(m)",
    "docstring": "TypeIs cannot act on . Thus this function exists to let mypy recognize FailedMatch.__bool__ as a TypeIs.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\pattern_matcher.py",
    "ast_data": "FunctionDef name:is_match arg:m arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "pdf",
    "source_code": "def pdf(self, x, mu=None, kappa=1):\n    dim, mu, kappa = self._process_parameters(mu, kappa)\n    return np.exp(self._logpdf(x, dim, mu, kappa))",
    "docstring": "Von Mises-Fisher probability density function. Parameters ---------- x : array_like Points at which to evaluate the probability density function. The last axis of must correspond to unit vectors of the same dimensionality as the distribution. mu : array_like Mean direction of the distribution. Must be a one-dimensional unit vector of norm 1. kappa : float Concentration parameter. Must be positive. Returns ------- pdf : ndarray or scalar Probability density function evaluated at .",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:pdf arg:self arg:x arg:mu arg:kappa arguments arg arg arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "linear_inference_rule",
    "source_code": "@register_inference_rule(torch.nn.Linear)\ndef linear_inference_rule(n: Node, module_instance, symbols, constraints, counter):\n    assert isinstance(n.args[0], Node)\n    return linear_constraints(n, module_instance.in_features, module_instance.out_features, symbols, counter)",
    "docstring": "Input and output sizes should be the same except for the last dimension If the input is Dyn, then so should the output",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_generator.py",
    "ast_data": "FunctionDef name:linear_inference_rule arg:n arg:module_instance arg:symbols arg:constraints arg:counter arguments arg arg arg arg arg Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "simple_broadcast",
    "source_code": "def simple_broadcast(value, destinations, always_mirrored=False, canonicalize_devices=True):\n    devices = get_devices_from(destinations, canonicalize_devices)\n    if len(devices) == 1 and (not always_mirrored):\n        return cross_device_utils.copy_tensor_or_indexed_slices_to_device(value, devices[0])\n    else:\n        value_updates = []\n        for d in devices:\n            value_updates.append(cross_device_utils.copy_tensor_or_indexed_slices_to_device(value, d))\n        return distribute_utils.regroup(value_updates, wrap_class=value_lib.Mirrored)",
    "docstring": "Broadcast to using simple copies.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_ops.py",
    "ast_data": "FunctionDef name:simple_broadcast arg:value arg:destinations arg:always_mirrored arg:canonicalize_devices arguments arg arg arg arg Assign Call If BoolOp Compare Call Return return:yes Call Assign For Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "embedding_dense_backward_strategy",
    "source_code": "@register_op_strategy(aten.embedding_dense_backward.default)\ndef embedding_dense_backward_strategy(op_schema: OpSchema) -> StrategyType:\n    grad_out_strategy = cast(OpStrategy, op_schema.args_schema[0])\n    indices_strategy = cast(OpStrategy, op_schema.args_schema[1])\n    mesh = op_schema.get_mesh_from_args()\n    grad_out_shape = grad_out_strategy.shape\n    indices_shape = indices_strategy.shape\n    grad_out_ndim = len(grad_out_shape)\n    single_mesh_dim_strategies = []\n    all_replicate: PlacementList = [Replicate()] * 3\n    single_mesh_dim_strategies.append(all_replicate)\n    colwise_sharding: PlacementList = [Shard(1), Shard(grad_out_ndim - 1), Replicate()]\n    single_mesh_dim_strategies.append(colwise_sharding)\n    for input_dim in range(len(indices_shape)):\n        batch_sharding: PlacementList = [Partial(), Shard(input_dim), Shard(input_dim)]\n        single_mesh_dim_strategies.append(batch_sharding)\n    partial_sharding: PlacementList = [Partial(), Partial(), Replicate()]\n    single_mesh_dim_strategies.append(partial_sharding)\n    return expand_to_full_mesh_op_strategy(mesh, op_schema, single_mesh_dim_strategies)",
    "docstring": "This strategy handles embedding op. We have two possible embedding shardings: rowwise and colwise",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_ops\\_embedding_ops.py",
    "ast_data": "FunctionDef name:embedding_dense_backward_strategy arg:op_schema arguments arg Assign Call Assign Call Assign Call Assign Assign Assign Call Assign Call Call Call Call Call Call For Call Call Call Call Call Call Call Call Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "VariadicSignatureMeta",
    "source_code": "class VariadicSignatureMeta(type):\n\n    def __getitem__(cls, variadic_type):\n        if not (isinstance(variadic_type, (type, tuple)) or type(variadic_type)):\n            raise ValueError('Variadic types must be type or tuple of types (Variadic[int] or Variadic[(int, float)]')\n        if not isinstance(variadic_type, tuple):\n            variadic_type = (variadic_type,)\n        return VariadicSignatureType(f'Variadic[{typename(variadic_type)}]', (), dict(variadic_type=variadic_type, __slots__=()))",
    "docstring": "A metaclass that overrides `` on the class. This is used to generate a new type for Variadic signatures. See the Variadic class for examples of how this behaves.",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\experimental\\unification\\multipledispatch\\variadic.py",
    "ast_data": "ClassDef name:VariadicSignatureMeta FunctionDef name:__getitem__ arg:cls arg:variadic_type arguments arg arg If BoolOp Call Call Raise Call If Call Assign Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "clear_session",
    "source_code": "def clear_session():\n    global _SESSION\n    global _GRAPH_LEARNING_PHASES\n    global _GRAPH_VARIABLES\n    global _GRAPH_TF_OPTIMIZERS\n    global _GRAPH\n    _GRAPH.graph = None\n    ops.reset_default_graph()\n    reset_uids()\n    _SESSION.session = None\n    graph = get_graph()\n    with graph.as_default():\n        _DUMMY_EAGER_GRAPH.learning_phase_is_set = False\n        _GRAPH_LEARNING_PHASES.clear()\n        _GRAPH_LEARNING_PHASES.setdefault(graph)\n        _GRAPH_VARIABLES.pop(graph, None)\n        _GRAPH_TF_OPTIMIZERS.pop(graph, None)\n    if context.executing_eagerly():\n        context.context().clear_kernel_cache()",
    "docstring": "Resets all state generated by Keras. Keras manages a global state, which it uses to implement the Functional model-building API and to uniquify autogenerated layer names. If you are creating many models in a loop, this global state will consume an increasing amount of memory over time, and you may want to clear it. Calling releases the global state: this helps avoid clutter from old models and layers, especially when memory is limited. Example 1: calling when creating models in a loop Example 2: resetting the layer name generation counter >>> import tensorflow as tf >>> layers = [tf.keras.layers.Dense(10) for _ in range(10)] >>> new_layer = tf.keras.layers.Dense(10) >>> print(new_layer.name) dense_10 >>> tf.keras.backend.set_learning_phase(1) >>> print(tf.keras.backend.learning_phase()) 1 >>> tf.keras.backend.clear_session() >>> new_layer = tf.keras.layers.Dense(10) >>> print(new_layer.name) dense",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:clear_session arguments Assign Call Call Assign Assign Call With Call Assign Call Call Call Call If Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "HalfSquaredError",
    "source_code": "class HalfSquaredError(BaseLoss):\n\n    def __init__(self, sample_weight=None):\n        super().__init__(closs=CyHalfSquaredError(), link=IdentityLink())\n        self.constant_hessian = sample_weight is None",
    "docstring": "Half squared error with identity link, for regression. Domain: y_true and y_pred all real numbers Link: y_pred = raw_prediction For a given sample x_i, half squared error is defined as:: loss(x_i) = 0.5 * (y_true_i - raw_prediction_i)**2 The factor of 0.5 simplifies the computation of gradients and results in a unit hessian (and is consistent with what is done in LightGBM). It is also half the Normal distribution deviance.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\_loss\\loss.py",
    "ast_data": "ClassDef name:HalfSquaredError FunctionDef name:__init__ arg:self arg:sample_weight arguments arg arg Call Call Call Call Assign Compare"
  },
  {
    "library": "tensorflow",
    "name": "sanity_check_type",
    "source_code": "def sanity_check_type(self, other):\n    if type(self) is not type(other):\n        raise ValueError('No TypeSpec is compatible with both %s and %s' % (self, other))\n    if self._input_workers.serialize() != other._input_workers.serialize():\n        raise ValueError('_input_workers is not compatible with both %s and %s' % (self, other))\n    if self._strategy is not other._strategy:\n        raise ValueError('tf.distribute strategy is not compatible with both %s and %s' % (self, other))",
    "docstring": "Returns the most specific TypeSpec compatible with and . Args: other: A . Raises: ValueError: If there is no TypeSpec that is compatible with both and .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py",
    "ast_data": "FunctionDef name:sanity_check_type arg:self arg:other arguments arg arg If Compare Call Call Raise Call If Compare Call Call Raise Call If Compare Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "from_nested_value_rowids",
    "source_code": "@classmethod\n@dispatch.add_dispatch_support\ndef from_nested_value_rowids(cls, flat_values, nested_value_rowids, nested_nrows=None, name=None, validate=True):\n    if not isinstance(validate, bool):\n        raise TypeError(f'Argument `validate` must have type bool. Received {validate}.')\n    if isinstance(nested_value_rowids, tensor_lib.Tensor):\n        raise TypeError(f'Argument `nested_value_rowids` must be a list of Tensors. Received {nested_value_rowids}.')\n    if nested_nrows is None:\n        nested_nrows = [None] * len(nested_value_rowids)\n    else:\n        if isinstance(nested_nrows, tensor_lib.Tensor):\n            raise TypeError(f'Argument `nested_nrows` must be a list of Tensors. Received {nested_nrows}.')\n        if len(nested_nrows) != len(nested_value_rowids):\n            raise ValueError(f'Argument `nested_nrows` must have the same length as argument `nested_value_rowids`. len(nested_nrows) = {len(nested_nrows)} vs. len(nested_values_rowids) = {len(nested_value_rowids)}.')\n    with ops.name_scope(name, 'RaggedFromNestedValueRowIds', [flat_values] + list(nested_value_rowids) + list(nested_nrows)):\n        result = flat_values\n        for value_rowids, nrows in reversed(list(zip(nested_value_rowids, nested_nrows))):\n            result = cls.from_value_rowids(result, value_rowids, nrows, validate=validate)\n        return result",
    "docstring": "Creates a from a nested list of tensors. Equivalent to: Args: flat_values: A potentially ragged tensor. nested_value_rowids: A list of 1-D integer tensors. The th tensor is used as the for the th ragged dimension. nested_nrows: A list of integer scalars. The th scalar is used as the for the th ragged dimension. name: A name prefix for the RaggedTensor (optional). validate: If true, then use assertions to check that the arguments form a valid . Note: these assertions incur a runtime cost, since they must be checked for each tensor value. Returns: A (or if is empty). Raises: ValueError: If .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:from_nested_value_rowids arg:cls arg:flat_values arg:nested_value_rowids arg:nested_nrows arg:name arg:validate arguments arg arg arg arg arg arg If Call Raise Call If Call Raise Call If Compare Assign Call If Call Raise Call If Compare Call Call Raise Call Call Call With Call Call Call Assign For Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_DynamicStitchGrads",
    "source_code": "@ops.RegisterGradient('DynamicStitch')\n@ops.RegisterGradient('ParallelDynamicStitch')\ndef _DynamicStitchGrads(op, grad):\n    num_values = len(op.inputs) // 2\n    indices_grad = [None] * num_values\n\n    def AsInt32(x):\n        return x if op.inputs[0].dtype == dtypes.int32 else math_ops.cast(x, dtypes.int32)\n    inputs = [AsInt32(op.inputs[i]) for i in range(num_values)]\n    if isinstance(grad, indexed_slices.IndexedSlices):\n        output_shape = array_ops.shape(op.outputs[0])\n        output_rows = output_shape[0]\n        grad = math_ops.unsorted_segment_sum(grad.values, grad.indices, output_rows)\n    values_grad = [array_ops.gather(grad, inp) for inp in inputs]\n    return indices_grad + values_grad",
    "docstring": "Gradients for DynamicStitch and ParallelDynamicStitch.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_grad.py",
    "ast_data": "FunctionDef name:_DynamicStitchGrads arg:op arg:grad arguments arg arg Assign Call Assign FunctionDef name:AsInt32 arg:x arguments arg Return return:yes Compare Call Assign Call Call If Call Assign Call Assign Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_split_trackables",
    "source_code": "def _split_trackables(trackable_data: List[_TrackableData]) -> Tuple[List[_TrackableData], List[_TrackableData], Dict[str, List[_TrackableData]]]:\n    tensor_trackables = []\n    pystate_trackables = []\n    registered_trackables = collections.defaultdict(list)\n    for td in trackable_data:\n        saver_name = registration.get_registered_saver_name(td.object_to_save)\n        if isinstance(td.object_to_save, python_state.PythonState):\n            pystate_trackables.append(td)\n        elif saver_name:\n            registered_trackables[saver_name].append(td)\n        else:\n            tensor_trackables.append(td)\n    return (tensor_trackables, pystate_trackables, registered_trackables)",
    "docstring": "Splits Trackables into 3 categories (tensor/pystate/registered).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\save_util.py",
    "ast_data": "FunctionDef name:_split_trackables arg:trackable_data arguments arg Assign Assign Assign Call For Assign Call If Call Call If Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "restore",
    "source_code": "def restore(self, restored_tensors, restored_shapes):\n    tensor, = restored_tensors\n    return values_util.get_on_write_restore_ops(self._mirrored_variable, tensor)",
    "docstring": "Restore the same value into all variables.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py",
    "ast_data": "FunctionDef name:restore arg:self arg:restored_tensors arg:restored_shapes arguments arg arg arg Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_shape_tuple",
    "source_code": "def _shape_tuple(self) -> NoReturn:\n    raise NotImplementedError()",
    "docstring": "The shape of this Tensor, as a tuple. This is more performant than tuple(shape().as_list()) as it avoids two list and one object creation. Marked private for now as from an API perspective, it would be better to have a single performant way of getting a shape rather than exposing shape() and shape_tuple() (and heaven forbid, shape_list() etc. as well!). Punting on that for now, but ideally one would work things out and remove the need for this method. Returns: tuple with the shape.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_shape_tuple arg:self arguments arg Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "clear",
    "source_code": "def clear(self):\n    self.label._reset_visual_defaults()\n    self.label.set_color(mpl.rcParams['axes.labelcolor'])\n    self.label.set_fontsize(mpl.rcParams['axes.labelsize'])\n    self.label.set_fontweight(mpl.rcParams['axes.labelweight'])\n    self.offsetText._reset_visual_defaults()\n    self.labelpad = mpl.rcParams['axes.labelpad']\n    self._init()\n    self._set_scale('linear')\n    self.callbacks = cbook.CallbackRegistry(signals=['units'])\n    self._major_tick_kw['gridOn'] = mpl.rcParams['axes.grid'] and mpl.rcParams['axes.grid.which'] in ('both', 'major')\n    self._minor_tick_kw['gridOn'] = mpl.rcParams['axes.grid'] and mpl.rcParams['axes.grid.which'] in ('both', 'minor')\n    self.reset_ticks()\n    self._converter = None\n    self._converter_is_explicit = False\n    self.units = None\n    self.stale = True",
    "docstring": "Clear the axis. This resets axis properties to their default values: - the label - the scale - locators, formatters and ticks - major and minor grid - units - registered callbacks",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:clear arg:self arguments arg Call Call Call Call Call Assign Call Call Assign Call Assign BoolOp Compare Assign BoolOp Compare Call Assign Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "BytecodeDistpatchTableMeta",
    "source_code": "class BytecodeDistpatchTableMeta(type):\n\n    def __init__(cls, name, bases, dct) -> None:\n        super().__init__(name, bases, dct)\n\n        def _missing(opname, *args):\n            unimplemented_v2(gb_type='Missing bytecode handler', context=f'{opname} with args {args}', explanation=f'Dynamo does not know how to handle the bytecode instruction `{opname}`.', hints=[f'Do not trace code that produces the `{opname}` bytecode instruction (see https://docs.python.org/3/library/dis.html for bytecode semantics).', *graph_break_hints.SUPPORTABLE])\n        dispatch_table = {op: getattr(cls, opname, functools.partial(_missing, opname)) for opname, op in dis.opmap.items()}\n        cls.dispatch_table = [dispatch_table.get(i) for i in range(2 ** 8)]",
    "docstring": "Installs a on every subclass to speed up calls to self.OPCODE()",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\symbolic_convert.py",
    "ast_data": "ClassDef name:BytecodeDistpatchTableMeta FunctionDef name:__init__ arg:cls arg:name arg:bases arg:dct arguments arg arg arg arg Call Call FunctionDef name:_missing arg:opname arguments arg arg Call Assign Call Call Call Assign Call Call"
  },
  {
    "library": "seaborn",
    "name": "_repr_png_",
    "source_code": "def _repr_png_(self):\n    import io\n    from PIL import Image\n    import numpy as np\n    IMAGE_SIZE = (400, 50)\n    X = np.tile(np.linspace(0, 1, IMAGE_SIZE[0]), (IMAGE_SIZE[1], 1))\n    pixels = self(X, bytes=True)\n    png_bytes = io.BytesIO()\n    Image.fromarray(pixels).save(png_bytes, format='png')\n    return png_bytes.getvalue()",
    "docstring": "Generate a PNG representation of the Colormap.",
    "type": "function",
    "file_path": "seaborn\\seaborn\\palettes.py",
    "ast_data": "FunctionDef name:_repr_png_ arg:self arguments arg Assign Assign Call Call Assign Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "OrderWrt",
    "source_code": "class OrderWrt(fields.IntegerField):\n\n    def __init__(self, *args, **kwargs):\n        kwargs['name'] = '_order'\n        kwargs['editable'] = False\n        super().__init__(*args, **kwargs)",
    "docstring": "A proxy for the _order database field that is used when Meta.order_with_respect_to is specified.",
    "type": "class",
    "file_path": "django\\django\\db\\models\\fields\\proxy.py",
    "ast_data": "ClassDef name:OrderWrt FunctionDef name:__init__ arg:self arguments arg arg arg Assign Assign Call Call"
  },
  {
    "library": "pandas",
    "name": "kind",
    "source_code": "@property\ndef kind(self) -> str:\n    return 'O'",
    "docstring": "A character code (one of 'biufcmMOSUV'), default 'O' This should match the NumPy dtype used when the array is converted to an ndarray, which is probably 'O' for object if the extension type cannot be represented as a built-in NumPy type. See Also -------- numpy.dtype.kind",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\base.py",
    "ast_data": "FunctionDef name:kind arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "saved_tensors_hooks",
    "source_code": "class saved_tensors_hooks:\n\n    def __init__(self, pack_hook: Callable[[torch.Tensor], Any], unpack_hook: Callable[[Any], torch.Tensor]) -> None:\n        self.pack_hook = pack_hook\n        self.unpack_hook = unpack_hook\n\n    def __enter__(self) -> None:\n        torch._C._autograd._push_saved_tensors_default_hooks(self.pack_hook, self.unpack_hook)\n\n    def __exit__(self, *args: object) -> None:\n        torch._C._autograd._pop_saved_tensors_default_hooks()",
    "docstring": "Context-manager that sets a pair of pack / unpack hooks for saved tensors. Use this context-manager to define how intermediary results of an operation should be packed before saving, and unpacked on retrieval. In that context, the `~torch.autograd.function._ContextMethodMixin.save_for_backwardtorch.Tensor.backward()torch.autograd.grad()lambda x: x.detach()lambda x: x` as the pack hook.",
    "type": "class",
    "file_path": "pytorch\\torch\\autograd\\graph.py",
    "ast_data": "ClassDef name:saved_tensors_hooks FunctionDef name:__init__ arg:self arg:pack_hook arg:unpack_hook arguments arg arg arg Assign Assign FunctionDef name:__enter__ arg:self arguments arg Call FunctionDef name:__exit__ arg:self arguments arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "_store_outputs",
    "source_code": "def _store_outputs(self, a_result: TensorOrTensors, b_result: TensorOrTensors, submodule: torch.fx.GraphModule):\n    output_node = next((node for node in submodule.graph.nodes if node.op == 'output'))\n    if isinstance(output_node.args[0], torch.fx.Node):\n        self.a_outputs[output_node.args[0].name] = a_result\n        self.b_outputs[output_node.args[0].name] = b_result\n    else:\n        for i, arg in enumerate(output_node.args[0]):\n            self.a_outputs[arg.name] = a_result[i]\n            self.b_outputs[arg.name] = b_result[i]",
    "docstring": "Store the outputs of self.run_a() and self.run_b() into self.a_outputs and self.b_outputs, so that we can use them when execute preceding nodes that use those outputs as inputs. Args: a_result: Output of self.run_a(). Could be a tensor or tensors. b_result: Output of self.run_b(). Could be a tensor or tensors. submodule: The module that generates a_result and b_result.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\passes\\net_min_base.py",
    "ast_data": "FunctionDef name:_store_outputs arg:self arg:a_result arg:b_result arg:submodule arguments arg arg arg arg Assign Call Compare If Call Assign Assign For Call Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "get_capstyle",
    "source_code": "def get_capstyle(self):\n    return self._capstyle.name",
    "docstring": "Return the capstyle.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_capstyle arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_create_non_trackable_mask_cache",
    "source_code": "@trackable.no_automatic_dependency_tracking\ndef _create_non_trackable_mask_cache(self):\n    self._dropout_mask_cache = backend.ContextValueCache(self._create_dropout_mask)\n    self._recurrent_dropout_mask_cache = backend.ContextValueCache(self._create_recurrent_dropout_mask)",
    "docstring": "Create the cache for dropout and recurrent dropout mask. Note that the following two masks will be used in \"graph function\" mode, e.g. these masks are symbolic tensors. In eager mode, the tensors will be generated differently than in the \"graph function\" case, and they will be cached. Also note that in graph mode, we still cache those masks only because the RNN could be created with . In that case, the function will be invoked multiple times, and we want to ensure same mask is used every time. Also the caches are created without tracking. Since they are not picklable by python when deepcopy, we don't want to track it by default.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\recurrent.py",
    "ast_data": "FunctionDef name:_create_non_trackable_mask_cache arg:self arguments arg Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "Buckets",
    "source_code": "class Buckets(object):\n    __slots__ = ['buckets']\n\n    def __init__(self, buckets):\n        self.buckets = buckets\n\n    def __del__(self):\n        pywrap_tfe.TFE_MonitoringDeleteBuckets(self.buckets)",
    "docstring": "Bucketing strategies for the samplers.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py",
    "ast_data": "ClassDef name:Buckets Assign FunctionDef name:__init__ arg:self arg:buckets arguments arg arg Assign FunctionDef name:__del__ arg:self arguments arg Call"
  },
  {
    "library": "sphinx",
    "name": "make_id",
    "source_code": "def make_id(env: BuildEnvironment, document: nodes.document, prefix: str='', term: str | None=None) -> str:\n    node_id = None\n    if prefix:\n        idformat = prefix + '-%s'\n    else:\n        idformat = (document.settings.id_prefix or 'id') + '%s'\n    if prefix and term:\n        node_id = _make_id(idformat % term)\n        if node_id == prefix:\n            node_id = None\n    elif term:\n        node_id = _make_id(term)\n        if not node_id:\n            node_id = None\n    while node_id is None or node_id in document.ids:\n        node_id = idformat % env.new_serialno(prefix)\n    return node_id",
    "docstring": "Generate an appropriate node_id for given *prefix* and *term*.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\nodes.py",
    "ast_data": "FunctionDef name:make_id arg:env arg:document arg:prefix arg:term arguments arg arg arg arg Assign If Assign Assign BoolOp If BoolOp Assign Call If Compare Assign If Assign Call If Assign While BoolOp Compare Compare Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_raw_device_uuid_nvml",
    "source_code": "def _raw_device_uuid_nvml() -> Optional[list[str]]:\n    from ctypes import byref, c_int, c_void_p, CDLL, create_string_buffer\n    nvml_h = CDLL('libnvidia-ml.so.1')\n    rc = nvml_h.nvmlInit()\n    if rc != 0:\n        warnings.warn(\"Can't initialize NVML\")\n        return None\n    dev_count = c_int(-1)\n    rc = nvml_h.nvmlDeviceGetCount_v2(byref(dev_count))\n    if rc != 0:\n        warnings.warn(\"Can't get nvml device count\")\n        return None\n    uuids: list[str] = []\n    for idx in range(dev_count.value):\n        dev_id = c_void_p()\n        rc = nvml_h.nvmlDeviceGetHandleByIndex_v2(idx, byref(dev_id))\n        if rc != 0:\n            warnings.warn(\"Can't get device handle\")\n            return None\n        buf_len = 96\n        buf = create_string_buffer(buf_len)\n        rc = nvml_h.nvmlDeviceGetUUID(dev_id, buf, buf_len)\n        if rc != 0:\n            warnings.warn(\"Can't get device UUID\")\n            return None\n        uuids.append(buf.raw.decode('ascii').strip('\\x00'))\n    del nvml_h\n    return uuids",
    "docstring": "Return list of device UUID as reported by NVML or None if NVM discovery/initialization failed.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:_raw_device_uuid_nvml arguments Assign Call Assign Call If Compare Call Return return:no Assign Call Assign Call Call If Compare Call Return return:no For Call Assign Call Assign Call Call If Compare Call Return return:no Assign Assign Call Assign Call If Compare Call Return return:no Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "support",
    "source_code": "def support(self):\n    return self._domain_adj",
    "docstring": "Support of the distribution. Returns ------- a, b : float end-points of the distribution's support. Notes ----- Note that the support of the distribution depends on , and . Examples -------- >>> from scipy import stats >>> from scipy.stats.sampling import FastGeneratorInversion Define a truncated normal distribution: >>> d_norm = FastGeneratorInversion(stats.norm(), domain=(0, 1)) >>> d_norm.support() (0, 1) Shift the distribution: >>> d_norm.loc = 2.5 >>> d_norm.support() (2.5, 3.5)",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_sampling.py",
    "ast_data": "FunctionDef name:support arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_longitude_grid_ends",
    "source_code": "def set_longitude_grid_ends(self, degrees):\n    self._longitude_cap = np.deg2rad(degrees)\n    self._xaxis_pretransform.clear().scale(1.0, self._longitude_cap * 2.0).translate(0.0, -self._longitude_cap)",
    "docstring": "Set the latitude(s) at which to stop drawing the longitude grids.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\geo.py",
    "ast_data": "FunctionDef name:set_longitude_grid_ends arg:self arg:degrees arguments arg arg Assign Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_from_spec",
    "source_code": "@classmethod\ndef _from_spec(cls, spec: Union['DynamicRaggedShape.Spec', ragged_tensor.RaggedTensorSpec, tensor_lib.TensorSpec], dtype: dtypes.DType=dtypes.int64) -> 'DynamicRaggedShape.Spec':\n    if isinstance(spec, DynamicRaggedShape.Spec):\n        return spec\n    elif isinstance(spec, ragged_tensor.RaggedTensorSpec):\n        return cls._from_tensor_shape(spec.shape, spec.ragged_rank, spec.row_splits_dtype)\n    elif isinstance(spec, tensor_lib.TensorSpec):\n        return cls._from_tensor_shape(shape=spec.shape, num_row_partitions=0, dtype=dtype)",
    "docstring": "Create a TypeSpec for the shape of an object with a given TypeSpec. I.e., if , then returns a TypeSpec compatible with . >>> rt = tf.ragged.constant([[1, 2], [3], [4, 5, 6]]) >>> rt_spec = tf.type_spec_from_value(rt) >>> rt_shape = DynamicRaggedShape.from_tensor(rt) >>> shape_spec_1 = tf.type_spec_from_value(rt_shape) >>> shape_spec_2 = DynamicRaggedShape.Spec._from_spec(rt_spec) >>> assert shape_spec_1.is_compatible_with(shape_spec_2) Args: spec: a Spec of a Tensor or RaggedTensor. dtype: the default dtype (if necessary). Returns: A Spec of the shape of a Tensor or RaggedTensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:_from_spec arg:cls arg:spec arg:dtype arguments arg arg arg If Call Return return:yes If Call Return return:yes Call If Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "enable",
    "source_code": "def enable(self, event=None):\n    self.figure.canvas.widgetlock(self)\n    self._idPress = self.figure.canvas.mpl_connect('button_press_event', self._press)\n    self._idRelease = self.figure.canvas.mpl_connect('button_release_event', self._release)\n    self._idScroll = self.figure.canvas.mpl_connect('scroll_event', self.scroll_zoom)",
    "docstring": "Connect press/release events and lock the canvas.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py",
    "ast_data": "FunctionDef name:enable arg:self arg:event arguments arg arg Call Assign Call Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "build_results",
    "source_code": "def build_results(self, values):\n    raise NotImplementedError('build_results must be implemented by subclasses')",
    "docstring": "Build results that match the original shape of the fetch. Args: values: List of values returned by run(). The values correspond exactly to the list tensors or ops returned by unique_fetches(). Returns: A struct of the same shape as the original fetch object handled by this fetch mapper. In the returned struct, the original fetches are replaced by their fetched values.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\session.py",
    "ast_data": "FunctionDef name:build_results arg:self arg:values arguments arg arg Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "alphaState",
    "source_code": "def alphaState(self, alpha):\n    state = self.alphaStates.get(alpha, None)\n    if state is not None:\n        return state[0]\n    name = next(self._alpha_state_seq)\n    self.alphaStates[alpha] = (name, {'Type': Name('ExtGState'), 'CA': alpha[0], 'ca': alpha[1]})\n    return name",
    "docstring": "Return name of an ExtGState that sets alpha to the given value.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py",
    "ast_data": "FunctionDef name:alphaState arg:self arg:alpha arguments arg arg Assign Call If Compare Return return:yes Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_markerfacecoloralt",
    "source_code": "def set_markerfacecoloralt(self, fc):\n    self._set_markercolor('markerfacecoloralt', False, fc)",
    "docstring": "Set the alternate marker face color. Parameters ---------- fc : :mpltype:",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:set_markerfacecoloralt arg:self arg:fc arguments arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "no_observer_set",
    "source_code": "def no_observer_set() -> set[Any]:\n    no_observers = {nn.quantizable.LSTM, nn.quantizable.MultiheadAttention}\n    return no_observers",
    "docstring": "These modules cannot have observers inserted by default.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantization_mappings.py",
    "ast_data": "FunctionDef name:no_observer_set arguments Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "spbandwidth",
    "source_code": "def spbandwidth(A):\n    if not (issparse(A) and A.format in ('csc', 'csr', 'coo', 'dia', 'dok')):\n        warn('spbandwidth needs sparse format not LIL and BSR. Converting to CSR.', SparseEfficiencyWarning, stacklevel=2)\n        A = csr_array(A)\n    if A.format == 'dia':\n        return (max(0, -A.offsets.min().item()), max(0, A.offsets.max().item()))\n    if A.format in ('csc', 'csr'):\n        indptr, indices = (A.indptr, A.indices)\n        N = len(indptr) - 1\n        gap = np.repeat(np.arange(N), np.diff(indptr)) - indices\n        if A.format == 'csr':\n            gap = -gap\n    elif A.format == 'coo':\n        gap = A.coords[1] - A.coords[0]\n    elif A.format == 'dok':\n        gap = [c - r for r, c in A.keys()] + [0]\n        return (-min(gap), max(gap))\n    return (max(-np.min(gap).item(), 0), max(np.max(gap).item(), 0))",
    "docstring": "Return the lower and upper bandwidth of a 2D numeric array. Computes the lower and upper limits on the bandwidth of the sparse 2D array ``) is one less than the number of rows(cols). Only the sparse structure is used here. Values are not checked for zeros. Parameters ---------- A : SciPy sparse array or matrix A sparse matrix preferrably in CSR or CSC format. Returns ------- below, above : 2-tuple of int The distance to the farthest non-zero diagonal below/above the main diagonal. .. versionadded:: 1.15.0 Examples -------- >>> import numpy as np >>> from scipy.sparse.linalg import spbandwidth >>> from scipy.sparse import csc_array, eye_array >>> A = csc_array([[3, 0, 0], [1, -1, 0], [2, 0, 1]], dtype=float) >>> spbandwidth(A) (2, 0) >>> D = eye_array(3, format='csr') >>> spbandwidth(D) (0, 0)",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_dsolve\\linsolve.py",
    "ast_data": "FunctionDef name:spbandwidth arg:A arguments arg If BoolOp Call Compare Call Assign Call If Compare Return return:yes Call Call Call Call Call Call If Compare Assign Assign Call Assign Call Call Call If Compare Assign If Compare Assign If Compare Assign Call Return return:yes Call Call Return return:yes Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_is_in_placeholders",
    "source_code": "def _is_in_placeholders(op, func_arg_placeholders):\n    return op.values() and any((x.name in func_arg_placeholders for x in op.values()))",
    "docstring": "Checks whether any output of this op is in func_arg_placeholders.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\graph_to_function_def.py",
    "ast_data": "FunctionDef name:_is_in_placeholders arg:op arg:func_arg_placeholders arguments arg arg Return return:yes BoolOp Call Call Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "run",
    "source_code": "def run(self, fn, args=(), kwargs=None, options=None):\n    return super(CentralStorageStrategy, self).run(fn, args, kwargs, options)",
    "docstring": "Run on each replica, with the given arguments. In , is called on each of the compute replicas, with the provided \"per replica\" arguments specific to that device. Args: fn: The function to run. The output must be a of s. args: (Optional) Positional arguments to . kwargs: (Optional) Keyword arguments to . options: (Optional) An instance of specifying the options to run . Returns: Return value from running .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\central_storage_strategy.py",
    "ast_data": "FunctionDef name:run arg:self arg:fn arg:args arg:kwargs arg:options arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__get__",
    "source_code": "def __get__(self, instance, cls):\n    self._obj_func = self._func.__get__(instance, cls)\n    return self",
    "docstring": "A Python descriptor interface.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\authoring\\authoring.py",
    "ast_data": "FunctionDef name:__get__ arg:self arg:instance arg:cls arguments arg arg arg Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_yield_op",
    "source_code": "def get_yield_op(self):\n    compression_type = python_io.TFRecordOptions.get_compression_type_string(python_io.TFRecordOptions(self._compression_type))\n    records = gen_data_flow_ops.record_input(file_pattern=self._file_pattern, file_buffer_size=self._buffer_size, file_parallelism=self._parallelism, file_shuffle_shift_ratio=self._shift_ratio, batch_size=self._batch_size, file_random_seed=self._seed, compression_type=compression_type, name=self._name)\n    if self._batches is None:\n        return records\n    else:\n        with ops.name_scope(self._name):\n            batch_list = [[] for _ in range(self._batches)]\n            records = array_ops.split(records, self._batch_size, 0)\n            for index, protobuf in enumerate(records):\n                batch_index = index % self._batches\n                batch_list[batch_index].append(array_ops.reshape(protobuf, []))\n            return batch_list",
    "docstring": "Adds a node that yields a group of records every time it is executed. If RecordInput parameter is not None, it yields a list of record batches with the specified .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:get_yield_op arg:self arguments arg Assign Call Call Assign Call If Compare Return return:yes With Call Assign Call Assign Call For Call Assign Call Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "__setattr__",
    "source_code": "def __setattr__(self, name, value):\n    if isinstance(value, Tool):\n        if value._name is None:\n            value._name = name\n        value.namespace = self.namespace\n    object.__setattr__(self, name, value)",
    "docstring": "Set an attribute on this :class: instance.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cptools.py",
    "ast_data": "FunctionDef name:__setattr__ arg:self arg:name arg:value arguments arg arg arg If Call If Compare Assign Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "BracketA",
    "source_code": "@_register_style(_style_list, name=']-')\nclass BracketA(_Curve):\n    arrow = ']-'\n\n    def __init__(self, widthA=1.0, lengthA=0.2, angleA=0):\n        super().__init__(widthA=widthA, lengthA=lengthA, angleA=angleA)",
    "docstring": "An arrow with an outward square bracket at its start.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "ClassDef name:BracketA Assign FunctionDef name:__init__ arg:self arg:widthA arg:lengthA arg:angleA arguments arg arg arg arg Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "kaiming_uniform_",
    "source_code": "@_sharded_op_impl(torch.nn.init.kaiming_uniform_)\ndef kaiming_uniform_(types, args=(), kwargs=None, pg=None):\n    validate_param(kwargs, 'kwargs')\n    sharded_tensor = kwargs['tensor']\n    validate_param(sharded_tensor, 'tensor')\n    a = kwargs['a']\n    validate_param(a, 'a')\n    mode = kwargs['mode']\n    validate_param(mode, 'mode')\n    nonlinearity = kwargs['nonlinearity']\n    validate_param(nonlinearity, 'nonlinearity')\n    for shard in sharded_tensor.local_shards():\n        torch.nn.init.kaiming_uniform_(shard.tensor, a=a, mode=mode, nonlinearity=nonlinearity)\n    return sharded_tensor",
    "docstring": "Fills the Tensors in tensor.local_shards with values according to the method described in - He, K. et al. (2015), using a uniform distribution. The resulting tensor will have values sampled from :math: where .. math:: \\text{bound} = \\text{gain} \\times \\sqrt{\\frac{3}{\\text{fan\\_mode}}} Also known as He initialization. Args: tensor: tensor sharded across devices a: the negative slope of the rectifier used after this layer (only used with `nn.functional` (default).",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\_ops\\init.py",
    "ast_data": "FunctionDef name:kaiming_uniform_ arg:types arg:args arg:kwargs arg:pg arguments arg arg arg arg Call Assign Call Assign Call Assign Call Assign Call For Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "scale",
    "source_code": "@property\ndef scale(self):\n    return self._scale",
    "docstring": "Distribution parameter for scale.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\laplace.py",
    "ast_data": "FunctionDef name:scale arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_message",
    "source_code": "def set_message(self, s):\n    pass",
    "docstring": "Display a message on toolbar or in status bar.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:set_message arg:self arg:s arguments arg arg"
  },
  {
    "library": "pandas",
    "name": "dtypes",
    "source_code": "@cache_readonly\ndef dtypes(self) -> Series:\n    from pandas import Series\n    names = com.fill_missing_names(self.names)\n    return Series([level.dtype for level in self.levels], index=Index(names))",
    "docstring": "Return the dtypes as a Series for the underlying MultiIndex. See Also -------- Index.dtype : Return the dtype object of the underlying data. Series.dtypes : Return the data type of the underlying Series. Examples -------- >>> idx = pd.MultiIndex.from_product( ... [(0, 1, 2), (\"green\", \"purple\")], names=[\"number\", \"color\"] ... ) >>> idx MultiIndex([(0, 'green'), (0, 'purple'), (1, 'green'), (1, 'purple'), (2, 'green'), (2, 'purple')], names=['number', 'color']) >>> idx.dtypes number int64 color object dtype: object",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "FunctionDef name:dtypes arg:self arguments arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "sym_min",
    "source_code": "def sym_min(a, b):\n    if overrides.has_torch_function((a, b)):\n        return overrides.handle_torch_function(sym_min, (a, b), a, b)\n    if isinstance(a, (SymInt, SymFloat)):\n        return a.__sym_min__(b)\n    elif isinstance(b, (SymInt, SymFloat)):\n        return b.__sym_min__(a)\n    all_types, float_types = __all_and_float_types()\n    assert isinstance(a, all_types), type(a)\n    assert isinstance(b, all_types), type(b)\n    if isinstance(a, float_types) or isinstance(b, float_types):\n        return builtins.float(builtins.min(a, b))\n    else:\n        return builtins.min(a, b)",
    "docstring": "SymInt-aware utility for min().",
    "type": "function",
    "file_path": "pytorch\\torch\\__init__.py",
    "ast_data": "FunctionDef name:sym_min arg:a arg:b arguments arg arg If Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Call Assign Call Call Call Call Call If BoolOp Call Call Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_convert_tflite_enum_type_to_tf_type",
    "source_code": "def _convert_tflite_enum_type_to_tf_type(tflite_enum_type):\n    tf_type = _MAP_TFLITE_ENUM_TO_TF_TYPES.get(tflite_enum_type)\n    if tf_type is None:\n        raise ValueError('Unsupported enum {}. The valid map of enum to tf types is : {}'.format(tflite_enum_type, _MAP_TFLITE_ENUM_TO_TF_TYPES))\n    return tf_type",
    "docstring": "Converts tflite enum type (eg: 0) to tf type (eg: tf.float32). Args: tflite_enum_type: tflite enum type (eg: 0, that corresponds to float32) Raises: ValueError: If an invalid tflite enum type is provided. Returns: tf type (eg: tf.float32)",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\util.py",
    "ast_data": "FunctionDef name:_convert_tflite_enum_type_to_tf_type arg:tflite_enum_type arguments arg Assign Call If Compare Raise Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=False)\ndef fit(self, X, y=None):\n    self._fit(X)\n    self._n_features_out = self.n_samples_fit_\n    return self",
    "docstring": "Fit the radius neighbors transformer from the training dataset. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_samples) if metric='precomputed' Training data. y : Ignored Not used, present for API consistency by convention. Returns ------- self : RadiusNeighborsTransformer The fitted radius neighbors transformer.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neighbors\\_graph.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_fix_unknown_dimension",
    "source_code": "def _fix_unknown_dimension(self, input_shape, output_shape):\n    output_shape = list(output_shape)\n    msg = 'total size of new array must be unchanged, input_shape = {}, output_shape = {}'.format(input_shape, output_shape)\n    known, unknown = (1, None)\n    for index, dim in enumerate(output_shape):\n        if dim < 0:\n            if unknown is None:\n                unknown = index\n            else:\n                raise ValueError('Can only specify one unknown dimension.')\n        else:\n            known *= dim\n    original = np.prod(input_shape, dtype=int)\n    if unknown is not None:\n        if known == 0 or original % known != 0:\n            raise ValueError(msg)\n        output_shape[unknown] = original // known\n    elif original != known:\n        raise ValueError(msg)\n    return output_shape",
    "docstring": "Find and replace a missing dimension in an output shape. This is a near direct port of the internal Numpy function in Args: input_shape: Shape of array being reshaped output_shape: Desired shape of the array with at most a single -1 which indicates a dimension that should be derived from the input shape. Returns: The new output shape with a -1 replaced with its computed value. Raises: ValueError: If the total array size of the output_shape is different than the input_shape, or more than one unknown dimension is specified.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\core.py",
    "ast_data": "FunctionDef name:_fix_unknown_dimension arg:self arg:input_shape arg:output_shape arguments arg arg arg Assign Call Assign Call Assign For Call If Compare If Compare Assign Raise Call Assign Call If Compare If BoolOp Compare Compare Raise Call Assign If Compare Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_sparse_csr_where",
    "source_code": "def _sparse_csr_where(mask: Tensor, input: Tensor, fill_value: Tensor) -> Tensor:\n    return _sparse_coo_where(mask.to_sparse_coo(), input.to_sparse_coo(), fill_value).to_sparse_csr()",
    "docstring": "Sparse variant of torch.where. Supports sparse CSR tensors.",
    "type": "function",
    "file_path": "pytorch\\torch\\masked\\_ops.py",
    "ast_data": "FunctionDef name:_sparse_csr_where arg:mask arg:input arg:fill_value arguments arg arg arg Return return:yes Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "tight_layout",
    "source_code": "def tight_layout(self, figure, renderer=None, pad=1.08, h_pad=None, w_pad=None, rect=None):\n    if renderer is None:\n        renderer = figure._get_renderer()\n    kwargs = _tight_layout.get_tight_layout_figure(figure, figure.axes, _tight_layout.get_subplotspec_list(figure.axes, grid_spec=self), renderer, pad=pad, h_pad=h_pad, w_pad=w_pad, rect=rect)\n    if kwargs:\n        self.update(**kwargs)",
    "docstring": "Adjust subplot parameters to give specified padding. Parameters ---------- figure : The figure. renderer : subclass, optional The renderer to be used. pad : float Padding between the figure edge and the edges of subplots, as a fraction of the font-size. h_pad, w_pad : float, optional Padding (height/width) between edges of adjacent subplots. Defaults to *pad*. rect : tuple (left, bottom, right, top), default: None (left, bottom, right, top) rectangle in normalized figure coordinates that the whole subplots area (including labels) will fit into. Default (None) is the whole figure.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\gridspec.py",
    "ast_data": "FunctionDef name:tight_layout arg:self arg:figure arg:renderer arg:pad arg:h_pad arg:w_pad arg:rect arguments arg arg arg arg arg arg arg If Compare Assign Call Assign Call Call If Call"
  },
  {
    "library": "pytorch",
    "name": "scoped_copy",
    "source_code": "def scoped_copy(self) -> Self:\n    new_cse = self.clone()\n    new_cse._cache = ScopedDict(self._cache)\n    new_cse.reduction_cache = ScopedDict(self.reduction_cache)\n    new_cse.store_cache = ScopedDict(self.store_cache)\n    return new_cse",
    "docstring": "Return a copy of using ScopedDict so changes to *_cache aren't visible in self",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\common.py",
    "ast_data": "FunctionDef name:scoped_copy arg:self arguments arg Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "FileLock",
    "source_code": "class FileLock(base_FileLock):\n\n    def __enter__(self) -> Self:\n        self.region_counter = _WaitCounter('pytorch.filelock.region').guard()\n        with _WaitCounter('pytorch.filelock.enter').guard():\n            result = super().__enter__()\n        self.region_counter.__enter__()\n        return result\n\n    def __exit__(self, exc_type: Optional[type[BaseException]], exc_value: Optional[BaseException], traceback: Optional[TracebackType]) -> None:\n        self.region_counter.__exit__()\n        with _WaitCounter('pytorch.filelock.exit').guard():\n            super().__exit__(exc_type, exc_value, traceback)\n        return None",
    "docstring": "This behaves like a normal file lock. However, it adds waitcounters for acquiring and releasing the filelock as well as for the critical region within it. pytorch.filelock.enter - While we're acquiring the filelock. pytorch.filelock.region - While we're holding the filelock and doing work. pytorch.filelock.exit - While we're releasing the filelock.",
    "type": "class",
    "file_path": "pytorch\\torch\\utils\\_filelock.py",
    "ast_data": "ClassDef name:FileLock FunctionDef name:__enter__ arg:self arguments arg Assign Call Call With Call Call Assign Call Call Call Return return:yes FunctionDef name:__exit__ arg:self arg:exc_type arg:exc_value arg:traceback arguments arg arg arg arg Call With Call Call Call Call Return return:no"
  },
  {
    "library": "pandas",
    "name": "_get_dataframe_dtype_counts",
    "source_code": "def _get_dataframe_dtype_counts(df: DataFrame) -> Mapping[str, int]:\n    return df.dtypes.value_counts().groupby(lambda x: x.name).sum()",
    "docstring": "Create mapping between datatypes and their number of occurrences.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:_get_dataframe_dtype_counts arg:df arguments arg Return return:yes Call Call Call arguments arg"
  },
  {
    "library": "numpy",
    "name": "split",
    "source_code": "@staticmethod\ndef split(cmd):\n    raise NotImplementedError",
    "docstring": "Split a command line string into a list of arguments",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\_shell_utils.py",
    "ast_data": "FunctionDef name:split arg:cmd arguments arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "_make_enqueued_generator",
    "source_code": "def _make_enqueued_generator(generator, workers=1, use_multiprocessing=False, max_queue_size=10, shuffle=False):\n    is_sequence = isinstance(generator, data_utils.Sequence)\n    enqueuer = None\n    if workers > 0:\n        if is_sequence:\n            enqueuer = data_utils.OrderedEnqueuer(generator, use_multiprocessing=use_multiprocessing, shuffle=shuffle)\n        else:\n            enqueuer = data_utils.GeneratorEnqueuer(generator, use_multiprocessing=use_multiprocessing)\n        enqueuer.start(workers=workers, max_queue_size=max_queue_size)\n        output_generator = enqueuer.get()\n    elif is_sequence:\n        output_generator = data_utils.iter_sequence_infinite(generator)\n    else:\n        output_generator = generator\n    return (output_generator, enqueuer)",
    "docstring": "Create a buffered queue of next elements of the generator.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_generator_v1.py",
    "ast_data": "FunctionDef name:_make_enqueued_generator arg:generator arg:workers arg:use_multiprocessing arg:max_queue_size arg:shuffle arguments arg arg arg arg arg Assign Call Assign If Compare If Assign Call Assign Call Call Assign Call If Assign Call Assign Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "from_str",
    "source_code": "@classmethod\ndef from_str(cls, elementstr):\n    qvalue = None\n    atoms = q_separator.split(elementstr, 1)\n    media_range = atoms.pop(0).strip()\n    if atoms:\n        qvalue = HeaderElement.from_str(atoms[0].strip())\n    media_type, params = cls.parse(media_range)\n    if qvalue is not None:\n        params['q'] = qvalue\n    return cls(media_type, params)",
    "docstring": "Make an :class: instance from a string.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\httputil.py",
    "ast_data": "FunctionDef name:from_str arg:cls arg:elementstr arguments arg arg Assign Assign Call Assign Call Call If Assign Call Call Assign Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_named_colors_mapping",
    "source_code": "def get_named_colors_mapping():\n    return _colors_full_map",
    "docstring": "Return the global mapping of names to named colors.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:get_named_colors_mapping arguments Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_formula_transposed",
    "source_code": "def _formula_transposed(ln: int, p: int, d: int, k: int, s: int, op: int) -> int:\n    return (ln - 1) * s - 2 * p + d * (k - 1) + op + 1",
    "docstring": "Formula to apply to calculate the length of some dimension of the output if transposed convolution is used. See: Args: ln: length of the dimension p: padding in that dim d: dilation in that dim k: kernel size in that dim s: stride in that dim op: output padding in that dim Returns: The output length",
    "type": "function",
    "file_path": "pytorch\\torch\\_meta_registrations.py",
    "ast_data": "FunctionDef name:_formula_transposed arg:ln arg:p arg:d arg:k arg:s arg:op arguments arg arg arg arg arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "run_graph",
    "source_code": "def run_graph(self, device, n, m, k, transpose_a, transpose_b, num_iters, dtype):\n    graph = ops.Graph()\n    with graph.as_default():\n        output = build_graph(device, n, m, k, transpose_a, transpose_b, dtype)\n        with session_lib.Session(graph=graph) as session:\n            variables.global_variables_initializer().run()\n            for _ in range(500):\n                session.run(output)\n            start_time = time.time()\n            for _ in range(num_iters):\n                session.run(output)\n            duration = time.time() - start_time\n            num_items = n * m * k * 2\n            throughput = num_items * num_iters / duration / 1000000000.0\n            print('%s %s input_info:%s %d %.4fsec, %.4fGitems/s.' % (device, str(dtype), str(n) + 'x' + str(m) + 'x' + str(k) + ',ta:' + str(transpose_a) + '.tb:' + str(transpose_b), num_iters, duration, throughput))\n    name_template = 'matmul_{device}_{dtype}_input_info_{inputinfo}'\n    self.report_benchmark(name=name_template.format(device=device, dtype=str(dtype).replace(' ', ''), inputinfo=str(n) + 'x' + str(m) + 'x' + str(k) + ',ta:' + str(transpose_a) + ',tb:' + str(transpose_b)).replace(' ', ''), iters=num_iters, wall_time=duration)\n    return duration",
    "docstring": "Run the graph and print its execution time. Args: device: String, the device to run on. n: tensor A's first dimension size. m: tensor A's second dimension size. k: tensor B's second dimension size. transpose_a: boolean value to show if tensor A is transposed. transpose_b: boolean value to show if tensor B is transposed. num_iters: number of iterations to run the benchmark. dtype: numpy data type of the input tensor. Returns: The duration of the run in seconds.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\matmul_benchmark.py",
    "ast_data": "FunctionDef name:run_graph arg:self arg:device arg:n arg:m arg:k arg:transpose_a arg:transpose_b arg:num_iters arg:dtype arguments arg arg arg arg arg arg arg arg arg Assign Call With Call Assign Call With Call Call Call For Call Call Assign Call For Call Call Assign Call Assign Assign Call Call Call Call Call Call Call Assign Call Call Call Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_changelist_formset",
    "source_code": "def get_changelist_formset(self, request, **kwargs):\n    defaults = {'formfield_callback': partial(self.formfield_for_dbfield, request=request), **kwargs}\n    return modelformset_factory(self.model, self.get_changelist_form(request), extra=0, fields=self.list_editable, **defaults)",
    "docstring": "Return a FormSet class for use on the changelist page if list_editable is used.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:get_changelist_formset arg:self arg:request arguments arg arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "reduce_prod",
    "source_code": "@dispatch.dispatch_for_api(math_ops.reduce_prod)\ndef reduce_prod(input_tensor: ragged_tensor.Ragged, axis=None, keepdims=None, name=None):\n    return ragged_reduce_aggregate(reduce_op=math_ops.reduce_prod, unsorted_segment_op=math_ops.unsorted_segment_prod, rt_input=input_tensor, axis=axis, keepdims=keepdims, name=name or 'RaggedReduceProd')",
    "docstring": "For docs, see: _RAGGED_REDUCE_DOCSTRING.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_math_ops.py",
    "ast_data": "FunctionDef name:reduce_prod arg:input_tensor arg:axis arg:keepdims arg:name arguments arg arg arg arg Return return:yes Call BoolOp Call"
  },
  {
    "library": "matplotlib",
    "name": "rotate_deg_around",
    "source_code": "def rotate_deg_around(self, x, y, degrees):\n    x, y = (float(x), float(y))\n    return self.translate(-x, -y).rotate_deg(degrees).translate(x, y)",
    "docstring": "Add a rotation (in degrees) around the point (x, y) in place. Returns *self*, so this method can easily be chained with more calls to :meth:, :meth:, :meth: and :meth:.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:rotate_deg_around arg:self arg:x arg:y arg:degrees arguments arg arg arg arg Assign Call Call Return return:yes Call Call Call"
  },
  {
    "library": "pygame",
    "name": "SysFont",
    "source_code": "def SysFont(name, size, bold=False, italic=False, constructor=None):\n    if constructor is None:\n\n        def constructor(fontpath, size, bold, italic):\n            font = Font(fontpath, size)\n            font.strong = bold\n            font.oblique = italic\n            return font\n    return _SysFont(name, size, bold, italic, constructor)",
    "docstring": "pygame.ftfont.SysFont(name, size, bold=False, italic=False, constructor=None) -> Font Create a pygame Font from system font resources. This will search the system fonts for the given font name. You can also enable bold or italic styles, and the appropriate system font will be selected if available. This will always return a valid Font object, and will fallback on the builtin pygame font if the given font is not found. Name can also be an iterable of font names, a string of comma-separated font names, or a bytes of comma-separated font names, in which case the set of names will be searched in order. Pygame uses a small set of common font aliases. If the specific font you ask for is not available, a reasonable alternative may be used. If optional constructor is provided, it must be a function with signature constructor(fontpath, size, bold, italic) which returns a Font instance. If None, a pygame.freetype.Font object is created.",
    "type": "function",
    "file_path": "pygame\\src_py\\freetype.py",
    "ast_data": "FunctionDef name:SysFont arg:name arg:size arg:bold arg:italic arg:constructor arguments arg arg arg arg arg If Compare FunctionDef name:constructor arg:fontpath arg:size arg:bold arg:italic arguments arg arg arg arg Assign Call Assign Assign Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_kernel_category",
    "source_code": "def get_kernel_category(kernel_mod: ModuleType) -> str:\n    choices = [ch for ch in _kernel_category_choices if ch in kernel_mod.__dict__]\n    if len(choices) == 1:\n        return choices[0]\n    else:\n        return 'unknown'",
    "docstring": "Given the module defining a triton kernel, return the category of the kernel. Category can be one of: - pointwise - reduction - persistent_reduction Currently we simply decide the category depending on what decorator is imported by the kernel.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\wrapper_benchmark.py",
    "ast_data": "FunctionDef name:get_kernel_category arg:kernel_mod arguments arg Assign Compare If Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "add_data",
    "source_code": "def add_data(self, name: str, data, reuse_mask=True, **config):\n    assert type(data) in SUPPORTED_TYPES, 'specified data type not supported at the moment'\n    local_args = copy.deepcopy(self.defaults)\n    local_args.update(config)\n    weight = self._extract_weight(data)\n    mask = local_args.get('mask', torch.ones_like(weight))\n    param_class = local_args.get('parametrization', utils.FakeSparsity)\n    if name in self.state:\n        warnings.warn('Replacing existing data of the same name. - Did you mean a different name?')\n        old_args = self.data_groups[name]\n        local_args = copy.deepcopy(old_args)\n        local_args.update(config)\n        if reuse_mask:\n            current_data = self.get_data(name=name)\n            assert weight.shape == current_data.shape, 'to retain the old mask, the shape of the new data must be the same as the previous one'\n            mask = self.get_mask(name=name)\n        self._delete_data(name=name)\n    self._container.register_buffer(name=name, tensor=weight)\n    parametrize.register_parametrization(self._container, name, param_class(mask))\n    self.state[name]['mask'] = mask\n    self.data_groups[name] = local_args\n    return getattr(self._container, name)",
    "docstring": "Configures and parametrizes the internal container model with name and data. **Note**: 1. If the data with name already exists, it replaces the data. 2. While replacing, the old mask is reused when 3. If , then the replacing data needs to have the same shape as that of old data. 4. By default, the config of the replaced data is used as config for the replacing data, unless something is specified in the config dictionary.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\data_sparsifier\\base_data_sparsifier.py",
    "ast_data": "FunctionDef name:add_data arg:self arg:name arg:data arg:reuse_mask arguments arg arg arg arg arg Compare Call Assign Call Call Assign Call Assign Call Call Assign Call If Compare Call Assign Assign Call Call If Assign Call Compare Assign Call Call Call Call Call Assign Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_check_index",
    "source_code": "def _check_index(idx):\n    if isinstance(idx, (numbers.Integral, tensor_shape.Dimension)):\n        return\n    dtype = getattr(idx, 'dtype', None)\n    if dtype is None or dtypes.as_dtype(dtype) not in _SUPPORTED_SLICE_DTYPES or (idx.shape and len(idx.shape) == 1):\n        raise TypeError(_SLICE_TYPE_ERROR + ', got {!r}'.format(idx))",
    "docstring": "Check if a given value is a valid index into a tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_getitem_override.py",
    "ast_data": "FunctionDef name:_check_index arg:idx arguments arg If Call Return return:no Assign Call If BoolOp Compare Compare Call BoolOp Compare Call Raise Call Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, pad=0.3, rounding_size=None):\n    self.pad = pad\n    self.rounding_size = rounding_size",
    "docstring": "Parameters ---------- pad : float, default: 0.3 The amount of padding around the original box. rounding_size : float, default: *pad*/2 Rounding of edges.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:pad arg:rounding_size arguments arg arg arg Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "set_xbound",
    "source_code": "def set_xbound(self, lower=None, upper=None):\n    if upper is None and np.iterable(lower):\n        lower, upper = lower\n    old_lower, old_upper = self.get_xbound()\n    if lower is None:\n        lower = old_lower\n    if upper is None:\n        upper = old_upper\n    self.set_xlim(sorted((lower, upper), reverse=bool(self.xaxis_inverted())), auto=None)",
    "docstring": "Set the lower and upper numerical bounds of the x-axis. This method will honor axis inversion regardless of parameter order. It will not change the autoscaling setting (). Parameters ---------- lower, upper : float or None The lower and upper bounds. If *None*, the respective axis bound is not modified. .. ACCEPTS: (lower: float, upper: float) See Also -------- get_xbound get_xlim, set_xlim invert_xaxis, xaxis_inverted",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:set_xbound arg:self arg:lower arg:upper arguments arg arg arg If BoolOp Compare Call Assign Assign Call If Compare Assign If Compare Assign Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_pattern_to_quantize_handlers",
    "source_code": "def _get_pattern_to_quantize_handlers(backend_config: BackendConfig) -> dict[Pattern, QuantizerCls]:\n    pattern_to_quantize_handlers = {}\n    for pattern, config in backend_config._pattern_complex_format_to_config.items():\n        observation_type = config.observation_type\n        dtype_configs = config.dtype_configs\n        num_tensor_args_to_observation_type = config._num_tensor_args_to_observation_type\n        pattern_to_quantize_handlers[pattern] = _get_quantize_handler_cls(observation_type, dtype_configs, num_tensor_args_to_observation_type)\n    return pattern_to_quantize_handlers",
    "docstring": "Note: Quantize handler is just a holder for some check methods like (should_insert_observer_for_output), maybe this can be a enum as well, we can refactor this after we convert the path for fbgemm/qnnpack fully to the new path, this is not exposed to backend developers",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\quantize_handler.py",
    "ast_data": "FunctionDef name:_get_pattern_to_quantize_handlers arg:backend_config arguments arg Assign For Call Assign Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "remove_proxy_from_state_dict",
    "source_code": "def remove_proxy_from_state_dict(state_dict: dict, in_place: bool) -> dict:\n    if in_place:\n        for k, v in state_dict.items():\n            if hasattr(v, 'proxy'):\n                delattr(state_dict[k], 'proxy')\n        return state_dict\n    else:\n        new_state_dict = {}\n        for k, v in state_dict.items():\n            if hasattr(v, 'proxy'):\n                new_state_dict[k] = v.detach().clone()\n            else:\n                new_state_dict[k] = v\n        return new_state_dict",
    "docstring": "If is false, return a new copy of with \"proxy\" removed from . is the values in the dictionary. If is true, modify in place.",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\utils.py",
    "ast_data": "FunctionDef name:remove_proxy_from_state_dict arg:state_dict arg:in_place arguments arg arg If For Call If Call Call Return return:yes Assign For Call If Call Assign Call Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "Error",
    "source_code": "def Error(msg: str) -> ParserElement:\n\n    def raise_error(s: str, loc: int, toks: ParseResults) -> T.Any:\n        raise ParseFatalException(s, loc, msg)\n    return Empty().set_parse_action(raise_error)",
    "docstring": "Helper class to raise parser errors.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\_mathtext.py",
    "ast_data": "FunctionDef name:Error arg:msg arguments arg FunctionDef name:raise_error arg:s arg:loc arg:toks arguments arg arg arg Raise Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "axis_none_flatten",
    "source_code": "def axis_none_flatten(*tensors, axis=None):\n    if axis is None:\n        tensors = tuple((ar.flatten() for ar in tensors))\n        return (tensors, 0)\n    else:\n        return (tensors, axis)",
    "docstring": "Flatten the arrays if axis is None.",
    "type": "function",
    "file_path": "pytorch\\torch\\_numpy\\_util.py",
    "ast_data": "FunctionDef name:axis_none_flatten arguments arg arg If Compare Assign Call Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "index_to_str",
    "source_code": "def index_to_str(self, index: sympy.Expr) -> str:\n    if isinstance(index, list):\n        return f'[{', '.join(map(self.index_to_str, index))}]'\n    return self.kexpr(self.rename_indexing(index))",
    "docstring": "Convert an index expr to a string that can be used in output code. e.g. a sympy expression \"s2\" may actually appear as \"ks1\" in the generated kernel. Index expressions often need to be passed in as arguments to the triton kernel. Rename_indexing and codegen_indexing keep track of the needed indices and add new parameters to the function signature.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\simd.py",
    "ast_data": "FunctionDef name:index_to_str arg:self arg:index arguments arg arg If Call Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "LayerAttributes",
    "source_code": "class LayerAttributes(SerializedAttributes.with_attributes('LayerAttributes', checkpointable_objects=['non_trainable_variables', 'layers', 'metrics', 'layer_regularization_losses', 'layer_metrics'], functions=['call_and_return_conditional_losses', 'activity_regularizer_fn'], copy_from=[CommonEndpoints])):\n    pass",
    "docstring": "Layer checkpointable objects + functions that are saved to the SavedModel. List of all attributes: All attributes from CommonEndpoints non_trainable_variables: List of non-trainable variables in the layer and its sublayers. layers: List of all sublayers. metrics: List of all metrics in the layer and its sublayers. call_and_return_conditional_losses: Function that takes inputs and returns a tuple of (outputs of the call function, list of input-dependent losses). The list of losses excludes the activity regularizer function, which is separate to allow the deserialized Layer object to define a different activity regularizer. activity_regularizer_fn: Callable that returns the activity regularizer loss layer_regularization_losses: List of losses owned only by this layer. layer_metrics: List of metrics owned by this layer.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\serialized_attributes.py",
    "ast_data": "ClassDef name:LayerAttributes Call"
  },
  {
    "library": "tensorflow",
    "name": "visit_Import",
    "source_code": "def visit_Import(self, node):\n    for import_alias in node.names:\n        if import_alias.name == 'tensorflow.compat.v1' and import_alias.asname == 'tf':\n            import_alias.name = 'tensorflow'\n    self.generic_visit(node)",
    "docstring": "Handle visiting an import node in the AST. Args: node: Current Node",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\tf_upgrade_v2.py",
    "ast_data": "FunctionDef name:visit_Import arg:self arg:node arguments arg arg For If BoolOp Compare Compare Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "sparse_segment_sum",
    "source_code": "@tf_export(v1=['sparse.segment_sum', 'sparse_segment_sum'])\n@deprecation.deprecated_endpoints('sparse_segment_sum')\ndef sparse_segment_sum(data, indices, segment_ids, name=None, num_segments=None, sparse_gradient=False):\n    if num_segments is not None:\n        return gen_math_ops.sparse_segment_sum_with_num_segments(data=data, indices=indices, segment_ids=segment_ids, num_segments=num_segments, sparse_gradient=sparse_gradient, name=name)\n    else:\n        return gen_math_ops.sparse_segment_sum(data=data, indices=indices, segment_ids=segment_ids, sparse_gradient=sparse_gradient, name=name)",
    "docstring": "Computes the sum along sparse segments of a tensor. Read [the section on segmentation]( for an explanation of segments. Like , but can have rank less than 's first dimension, selecting a subset of dimension 0, specified by . is allowed to have missing ids, in which case the output will be zeros at those indices. In those cases is used to determine the size of the output. For example: Args: data: A with data that will be assembled in the output. indices: A 1-D with indices into . Has same rank as . segment_ids: A 1-D with indices into the output . Values should be sorted and can be repeated. name: A name for the operation (optional). num_segments: An optional int32 scalar. Indicates the size of the output . sparse_gradient: An optional . Defaults to . If , the gradient of this function will be sparse () instead of dense (). The sparse gradient will contain one non-zero row for each unique index in . Returns: A of the shape as data, except for dimension 0 which has size , the number of segments specified via or inferred for the last element in .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:sparse_segment_sum arg:data arg:indices arg:segment_ids arg:name arg:num_segments arg:sparse_gradient arguments arg arg arg arg arg arg If Compare Return return:yes Call Return return:yes Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_assign_where",
    "source_code": "def _assign_where(out, result, where) -> None:\n    if where is None:\n        out[:] = result\n    else:\n        np.putmask(out, where, result)",
    "docstring": "Set a ufunc result into 'out', masking with a 'where' argument if necessary.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\arraylike.py",
    "ast_data": "FunctionDef name:_assign_where arg:out arg:result arg:where arguments arg arg arg If Compare Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_num_elements",
    "source_code": "def _num_elements(losses):\n    with backend.name_scope('num_elements') as scope:\n        return math_ops.cast(array_ops.size(losses, name=scope), dtype=losses.dtype)",
    "docstring": "Computes the number of elements in tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\losses_utils.py",
    "ast_data": "FunctionDef name:_num_elements arg:losses arguments arg With Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "find_active_events",
    "source_code": "def find_active_events(g, g_new, direction):\n    g, g_new = (np.asarray(g), np.asarray(g_new))\n    up = (g <= 0) & (g_new >= 0)\n    down = (g >= 0) & (g_new <= 0)\n    either = up | down\n    mask = up & (direction > 0) | down & (direction < 0) | either & (direction == 0)\n    return np.nonzero(mask)[0]",
    "docstring": "Find which event occurred during an integration step. Parameters ---------- g, g_new : array_like, shape (n_events,) Values of event functions at a current and next points. direction : ndarray, shape (n_events,) Event \"direction\" according to the definition in . Returns ------- active_events : ndarray Indices of events which occurred during the step.",
    "type": "function",
    "file_path": "scipy\\scipy\\integrate\\_ivp\\ivp.py",
    "ast_data": "FunctionDef name:find_active_events arg:g arg:g_new arg:direction arguments arg arg arg Assign Call Call Assign Compare Compare Assign Compare Compare Assign Assign Compare Compare Compare Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "validate_introspection_endpoint",
    "source_code": "def validate_introspection_endpoint(self):\n    url = self.get('introspection_endpoint')\n    if url and (not is_secure_transport(url)):\n        raise ValueError('\"introspection_endpoint\" MUST use \"https\" scheme')",
    "docstring": "OPTIONAL. URL of the authorization server's OAuth 2.0 introspection endpoint [RFC7662].",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc8414\\models.py",
    "ast_data": "FunctionDef name:validate_introspection_endpoint arg:self arguments arg Assign Call If BoolOp Call Raise Call"
  },
  {
    "library": "sphinx",
    "name": "PyramidStyle",
    "source_code": "class PyramidStyle(Style):\n    background_color = '#f8f8f8'\n    default_style = ''\n    styles = {Whitespace: '#bbbbbb', Comment: 'italic #60a0b0', Comment.Preproc: 'noitalic #007020', Comment.Special: 'noitalic bg:#fff0f0', Keyword: 'bold #007020', Keyword.Pseudo: 'nobold', Keyword.Type: 'nobold #902000', Operator: '#666666', Operator.Word: 'bold #007020', Name.Builtin: '#007020', Name.Function: '#06287e', Name.Class: 'bold #0e84b5', Name.Namespace: 'bold #0e84b5', Name.Exception: '#007020', Name.Variable: '#bb60d5', Name.Constant: '#60add5', Name.Label: 'bold #002070', Name.Entity: 'bold #d55537', Name.Attribute: '#0e84b5', Name.Tag: 'bold #062873', Name.Decorator: 'bold #555555', String: '#4070a0', String.Doc: 'italic', String.Interpol: 'italic #70a0d0', String.Escape: 'bold #4070a0', String.Regex: '#235388', String.Symbol: '#517918', String.Other: '#c65d09', Number: '#40a070', Generic.Heading: 'bold #000080', Generic.Subheading: 'bold #800080', Generic.Deleted: '#A00000', Generic.Inserted: '#00A000', Generic.Error: '#FF0000', Generic.Emph: 'italic', Generic.Strong: 'bold', Generic.Prompt: 'bold #c65d09', Generic.Output: '#888', Generic.Traceback: '#04D', Error: '#a40000 bg:#fbe3e4'}",
    "docstring": "Pylons/pyramid pygments style based on friendly style, by Blaise Laflamme.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\pygments_styles.py",
    "ast_data": "ClassDef name:PyramidStyle Assign Assign Assign"
  },
  {
    "library": "django",
    "name": "savepoint",
    "source_code": "def savepoint(using=None):\n    return get_connection(using).savepoint()",
    "docstring": "Create a savepoint (if supported and required by the backend) inside the current transaction. Return an identifier for the savepoint that will be used for the subsequent rollback or commit.",
    "type": "function",
    "file_path": "django\\django\\db\\transaction.py",
    "ast_data": "FunctionDef name:savepoint arg:using arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "internal_operation_seed",
    "source_code": "def internal_operation_seed():\n    return context()._internal_operation_seed()",
    "docstring": "Returns the operation seed generated based on global seed.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:internal_operation_seed arguments Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_ones_diag",
    "source_code": "def _ones_diag(self):\n    if self.shape.is_fully_defined():\n        d_shape = self.batch_shape.concatenate([self._min_matrix_dim()])\n    else:\n        d_shape = array_ops.concat([self.batch_shape_tensor(), [self._min_matrix_dim_tensor()]], axis=0)\n    return array_ops.ones(shape=d_shape, dtype=self.dtype)",
    "docstring": "Returns the diagonal of this operator as all ones.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_identity.py",
    "ast_data": "FunctionDef name:_ones_diag arg:self arguments arg If Call Assign Call Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "as_cardan_angles",
    "source_code": "def as_cardan_angles(self):\n    qw = self.scalar\n    qx, qy, qz = self.vector[..., :]\n    azim = np.arctan2(2 * (-qw * qz + qx * qy), qw * qw + qx * qx - qy * qy - qz * qz)\n    elev = np.arcsin(np.clip(2 * (qw * qy + qz * qx) / (qw * qw + qx * qx + qy * qy + qz * qz), -1, 1))\n    roll = np.arctan2(2 * (qw * qx - qy * qz), qw * qw - qx * qx - qy * qy + qz * qz)\n    return (elev, azim, roll)",
    "docstring": "The inverse of . Note that the angles returned are in radians, not degrees. The angles are not sensitive to the quaternion's norm().",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:as_cardan_angles arg:self arguments arg Assign Assign Assign Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "patch_pickle",
    "source_code": "@contextlib.contextmanager\ndef patch_pickle() -> Generator[None]:\n    orig_loads = pickle.loads\n    try:\n        setattr(pickle, 'loads', loads)\n        yield\n    finally:\n        setattr(pickle, 'loads', orig_loads)",
    "docstring": "Temporarily patch pickle to use our unpickler.",
    "type": "function",
    "file_path": "pandas\\pandas\\compat\\pickle_compat.py",
    "ast_data": "FunctionDef name:patch_pickle arguments Assign Try Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_send_sequence",
    "source_code": "def _send_sequence(self):\n    _SHARED_SEQUENCES[self.uid] = self.sequence",
    "docstring": "Sends current Iterable to all workers.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\data_utils.py",
    "ast_data": "FunctionDef name:_send_sequence arg:self arguments arg Assign"
  },
  {
    "library": "matplotlib",
    "name": "BracketB",
    "source_code": "@_register_style(_style_list, name='-[')\nclass BracketB(_Curve):\n    arrow = '-['\n\n    def __init__(self, widthB=1.0, lengthB=0.2, angleB=0):\n        super().__init__(widthB=widthB, lengthB=lengthB, angleB=angleB)",
    "docstring": "An arrow with an outward square bracket at its end.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "ClassDef name:BracketB Assign FunctionDef name:__init__ arg:self arg:widthB arg:lengthB arg:angleB arguments arg arg arg arg Call Call Call"
  },
  {
    "library": "django",
    "name": "SerializerDoesNotExist",
    "source_code": "class SerializerDoesNotExist(KeyError):\n    pass",
    "docstring": "The requested serializer was not found.",
    "type": "class",
    "file_path": "django\\django\\core\\serializers\\base.py",
    "ast_data": "ClassDef name:SerializerDoesNotExist"
  },
  {
    "library": "scikit-learn",
    "name": "_estimator_with_converted_arrays",
    "source_code": "def _estimator_with_converted_arrays(estimator, converter):\n    from sklearn.base import clone\n    new_estimator = clone(estimator)\n    for key, attribute in vars(estimator).items():\n        if hasattr(attribute, '__dlpack__') or isinstance(attribute, numpy.ndarray):\n            attribute = converter(attribute)\n        setattr(new_estimator, key, attribute)\n    return new_estimator",
    "docstring": "Create new estimator which converting all attributes that are arrays. The converter is called on all NumPy arrays and arrays that support the __. Parameters ---------- estimator : Estimator Estimator to convert converter : callable Callable that takes an array attribute and returns the converted array. Returns ------- new_estimator : Estimator Convert estimator",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_array_api.py",
    "ast_data": "FunctionDef name:_estimator_with_converted_arrays arg:estimator arg:converter arguments arg arg Assign Call For Call Call If BoolOp Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "query_token",
    "source_code": "def query_token(self, token, token_type_hint):\n    token_model = self.server.token_model\n    if token_type_hint == 'access_token':\n        rv = _query_access_token(token_model, token)\n    elif token_type_hint == 'refresh_token':\n        rv = _query_refresh_token(token_model, token)\n    else:\n        rv = _query_access_token(token_model, token)\n        if not rv:\n            rv = _query_refresh_token(token_model, token)\n    return rv",
    "docstring": "Query requested token from database.",
    "type": "method",
    "file_path": "authlib\\authlib\\integrations\\django_oauth2\\endpoints.py",
    "ast_data": "FunctionDef name:query_token arg:self arg:token arg:token_type_hint arguments arg arg arg Assign If Compare Assign Call If Compare Assign Call Assign Call If Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "logpdf",
    "source_code": "def logpdf(self, x, mean=None, cov=1, allow_singular=False):\n    params = self._process_parameters(mean, cov, allow_singular)\n    dim, mean, cov_object = params\n    x = self._process_quantiles(x, dim)\n    out = self._logpdf(x, mean, cov_object)\n    if np.any(cov_object.rank < dim):\n        out_of_bounds = ~cov_object._support_mask(x - mean)\n        out[out_of_bounds] = -np.inf\n    return _squeeze_output(out)",
    "docstring": "Log of the multivariate normal probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of denoting the components. %(_mvn_doc_default_callparams)s Returns ------- pdf : ndarray or scalar Log of the probability density function evaluated at Notes ----- %(_mvn_doc_callparams_note)s",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:logpdf arg:self arg:x arg:mean arg:cov arg:allow_singular arguments arg arg arg arg arg Assign Call Assign Assign Call Assign Call If Call Compare Assign Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "all_gather",
    "source_code": "def all_gather(t, group_size, group_key, instance_key, communication_hint='auto', timeout=0):\n    if group_size < 1:\n        raise ValueError(f'Parameter `group_size` to all_gather must be at least 1. Received: {group_size}.')\n    return gen_collective_ops.collective_gather(t, shape=[0], group_size=group_size, group_key=group_key, instance_key=instance_key, communication_hint=communication_hint.lower(), timeout_seconds=timeout)",
    "docstring": "Accumulates tensors collectively, across devices, along first dimension. Args: t: the tensor to participate in the accumulation. group_size: the total number of tensors to be collectively accumulated. Each must reside on a different device. Should be a positive integer. group_key: an integer identifying the group of devices. instance_key: an integer identifying the participating group of Ops. communication_hint: preferred collective communication. The implementation may fall back to another mechanism. Options include , , and . timeout: a float. If set to a non zero, set a completion timeout to detect staleness. If the timer goes off, a DeadlineExceededError is raised. The timeout value in seconds. This feature is experimental. Returns: An Op implementing the distributed operation. Raises: ValueError: if any of the input parameter constraints are not met.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\collective_ops.py",
    "ast_data": "FunctionDef name:all_gather arg:t arg:group_size arg:group_key arg:instance_key arg:communication_hint arg:timeout arguments arg arg arg arg arg arg If Compare Raise Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "attach_alternative",
    "source_code": "def attach_alternative(self, content, mimetype):\n    if content is None or mimetype is None:\n        raise ValueError('Both content and mimetype must be provided.')\n    self.alternatives.append(EmailAlternative(content, mimetype))",
    "docstring": "Attach an alternative content representation.",
    "type": "method",
    "file_path": "django\\django\\core\\mail\\message.py",
    "ast_data": "FunctionDef name:attach_alternative arg:self arg:content arg:mimetype arguments arg arg arg If BoolOp Compare Compare Raise Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_estimate_weights",
    "source_code": "def _estimate_weights(self, nk):\n    if self.weight_concentration_prior_type == 'dirichlet_process':\n        self.weight_concentration_ = (1.0 + nk, self.weight_concentration_prior_ + np.hstack((np.cumsum(nk[::-1])[-2::-1], 0)))\n    else:\n        self.weight_concentration_ = self.weight_concentration_prior_ + nk",
    "docstring": "Estimate the parameters of the Dirichlet distribution. Parameters ---------- nk : array-like of shape (n_components,)",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\mixture\\_bayesian_mixture.py",
    "ast_data": "FunctionDef name:_estimate_weights arg:self arg:nk arguments arg arg If Compare Assign Call Call Assign"
  },
  {
    "library": "pandas",
    "name": "_period_break",
    "source_code": "def _period_break(dates: PeriodIndex, period: str) -> npt.NDArray[np.intp]:\n    mask = _period_break_mask(dates, period)\n    return np.nonzero(mask)[0]",
    "docstring": "Returns the indices where the given period changes. Parameters ---------- dates : PeriodIndex Array of intervals to monitor. period : str Name of the period to monitor.",
    "type": "function",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\converter.py",
    "ast_data": "FunctionDef name:_period_break arg:dates arg:period arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "add_onnx_graph",
    "source_code": "def add_onnx_graph(self, graph, walltime=None):\n    event = event_pb2.Event(graph_def=graph.SerializeToString())\n    self.add_event(event, None, walltime)",
    "docstring": "Add a protocol buffer to the event file. Args: graph: A protocol buffer. walltime: float. Optional walltime to override the default (current) _get_file_writerfrom time.time())",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\tensorboard\\writer.py",
    "ast_data": "FunctionDef name:add_onnx_graph arg:self arg:graph arg:walltime arguments arg arg arg Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_json_config",
    "source_code": "def _get_json_config(config_dict):\n    json_config = '{}'\n    if config_dict is not None:\n        json_config = json.dumps(config_dict, sort_keys=True)\n    return json_config",
    "docstring": "Parse and returns JSON string from python dictionary.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\tensorboard\\summary.py",
    "ast_data": "FunctionDef name:_get_json_config arg:config_dict arguments arg Assign If Compare Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_set_scale",
    "source_code": "def _set_scale(self):\n    if self._orientation == 'x':\n        pscale = self._parent.xaxis.get_scale()\n        set_scale = self.set_xscale\n    else:\n        pscale = self._parent.yaxis.get_scale()\n        set_scale = self.set_yscale\n    if pscale == self._parentscale:\n        return\n    if self._ticks_set:\n        ticks = self._axis.get_ticklocs()\n    set_scale('functionlog' if pscale == 'log' else 'function', functions=self._functions[::-1])\n    if self._ticks_set:\n        self._axis.set_major_locator(mticker.FixedLocator(ticks))\n    self._parentscale = pscale",
    "docstring": "Check if parent has set its scale",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_secondary_axes.py",
    "ast_data": "FunctionDef name:_set_scale arg:self arguments arg If Compare Assign Call Assign Assign Call Assign If Compare Return return:no If Assign Call Call Compare If Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "clear_timers",
    "source_code": "@abc.abstractmethod\ndef clear_timers(self, worker_ids: set[Any]) -> None:\n    pass",
    "docstring": "Clears all timers for the given ``.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\timer\\api.py",
    "ast_data": "FunctionDef name:clear_timers arg:self arg:worker_ids arguments arg arg"
  },
  {
    "library": "matplotlib",
    "name": "get_test_data",
    "source_code": "def get_test_data(delta=0.05):\n    x = y = np.arange(-3.0, 3.0, delta)\n    X, Y = np.meshgrid(x, y)\n    Z1 = np.exp(-(X ** 2 + Y ** 2) / 2) / (2 * np.pi)\n    Z2 = np.exp(-(((X - 1) / 1.5) ** 2 + ((Y - 1) / 0.5) ** 2) / 2) / (2 * np.pi * 0.5 * 1.5)\n    Z = Z2 - Z1\n    X = X * 10\n    Y = Y * 10\n    Z = Z * 500\n    return (X, Y, Z)",
    "docstring": "Return a tuple X, Y, Z with a test data set.",
    "type": "function",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:get_test_data arg:delta arguments arg Assign Call Assign Call Assign Call Assign Call Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "set_growth_interval",
    "source_code": "def set_growth_interval(self, new_interval: int) -> None:\n    self._growth_interval = new_interval",
    "docstring": "Set a new growth interval. Args: new_interval (int): Value to use as the new growth interval.",
    "type": "method",
    "file_path": "pytorch\\torch\\amp\\grad_scaler.py",
    "ast_data": "FunctionDef name:set_growth_interval arg:self arg:new_interval arguments arg arg Assign"
  },
  {
    "library": "scikit-learn",
    "name": "_transform",
    "source_code": "def _transform(self, X):\n    check_is_fitted(self)\n    predictions = [getattr(est, meth)(X) for est, meth in zip(self.estimators_, self.stack_method_) if est != 'drop']\n    return self._concatenate_predictions(X, predictions)",
    "docstring": "Concatenate and return the predictions of the estimators.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_stacking.py",
    "ast_data": "FunctionDef name:_transform arg:self arg:X arguments arg arg Call Assign Call Call Call Compare Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "ShardingPlanner",
    "source_code": "class ShardingPlanner(abc.ABC):\n\n    @abc.abstractmethod\n    def build_plan(self, module: nn.Module) -> ShardingPlan:\n        pass",
    "docstring": "Default ShardingPlanner interface, can be extended and implement advanced sharding strategies.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharding_plan\\api.py",
    "ast_data": "ClassDef name:ShardingPlanner FunctionDef name:build_plan arg:self arg:module arguments arg arg"
  },
  {
    "library": "numpy",
    "name": "_cseries_to_zseries",
    "source_code": "def _cseries_to_zseries(c):\n    n = c.size\n    zs = np.zeros(2 * n - 1, dtype=c.dtype)\n    zs[n - 1:] = c / 2\n    return zs + zs[::-1]",
    "docstring": "Convert Chebyshev series to z-series. Convert a Chebyshev series to the equivalent z-series. The result is never an empty array. The dtype of the return is the same as that of the input. No checks are run on the arguments as this routine is for internal use. Parameters ---------- c : 1-D ndarray Chebyshev coefficients, ordered from low to high Returns ------- zs : 1-D ndarray Odd length symmetric z-series, ordered from low to high.",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\chebyshev.py",
    "ast_data": "FunctionDef name:_cseries_to_zseries arg:c arguments arg Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "laf_to_three_points",
    "source_code": "def laf_to_three_points(laf: Tensor) -> Tensor:\n    KORNIA_CHECK_LAF(laf)\n    three_pts = stack([laf[..., 2] + laf[..., 0], laf[..., 2] + laf[..., 1], laf[..., 2]], dim=-1)\n    return three_pts",
    "docstring": "Convert local affine frame(LAF) to alternative representation: coordinates of LAF center, LAF-x unit vector, LAF-y unit vector. Args: laf: :math:. Returns: threepts :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\laf.py",
    "ast_data": "FunctionDef name:laf_to_three_points arg:laf arguments arg Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__call__",
    "source_code": "def __call__(self, x):\n    if self._cache_size == 0:\n        return self._call(x)\n    x_old, y_old = self._cached_x_y\n    if x is x_old:\n        return y_old\n    y = self._call(x)\n    self._cached_x_y = (x, y)\n    return y",
    "docstring": "Computes the transform .",
    "type": "method",
    "file_path": "pytorch\\torch\\distributions\\transforms.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:x arguments arg arg If Compare Return return:yes Call Assign If Compare Return return:yes Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "from_keras_model",
    "source_code": "@classmethod\ndef from_keras_model(cls, model):\n    TFLiteConverterBase._set_original_model_type(conversion_metadata_fb.ModelType.KERAS_MODEL)\n    return TFLiteKerasModelConverterV2(model)",
    "docstring": "Creates a TFLiteConverter object from a Keras model. Args: model: tf.Keras.Model Returns: TFLiteConverter object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "FunctionDef name:from_keras_model arg:cls arg:model arguments arg arg Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_remove_unneccessary_copy_op_pass",
    "source_code": "def _remove_unneccessary_copy_op_pass(gm: torch.fx.GraphModule, new_graph_signature: ExportGraphSignature) -> tuple[torch.fx.GraphModule, ExportGraphSignature]:\n    with gm._set_replace_hook(new_graph_signature.get_replace_hook()):\n        for node in gm.graph.nodes:\n            if node.op == 'output':\n                args, _ = pytree.tree_flatten(node.args)\n                for out in args:\n                    if isinstance(out, torch.fx.Node) and out.name in new_graph_signature.buffers_to_mutate:\n                        if out.op == 'call_function' and out.target == torch.ops.aten.copy.default:\n                            out.replace_all_uses_with(out.args[1])\n                            gm.graph.erase_node(out)\n        gm.recompile()\n    return (gm, new_graph_signature)",
    "docstring": "Removes redundant copy_ node that was introduced due to mutated buffer.",
    "type": "function",
    "file_path": "pytorch\\torch\\export\\exported_program.py",
    "ast_data": "FunctionDef name:_remove_unneccessary_copy_op_pass arg:gm arg:new_graph_signature arguments arg arg With Call Call For If Compare Assign Call For If BoolOp Call Compare If BoolOp Compare Compare Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_AxesDecorationsSize",
    "source_code": "class _AxesDecorationsSize(_Base):\n    _get_size_map = {'left': lambda tight_bb, axes_bb: axes_bb.xmin - tight_bb.xmin, 'right': lambda tight_bb, axes_bb: tight_bb.xmax - axes_bb.xmax, 'bottom': lambda tight_bb, axes_bb: axes_bb.ymin - tight_bb.ymin, 'top': lambda tight_bb, axes_bb: tight_bb.ymax - axes_bb.ymax}\n\n    def __init__(self, ax, direction):\n        _api.check_in_list(self._get_size_map, direction=direction)\n        self._direction = direction\n        self._ax_list = [ax] if isinstance(ax, Axes) else ax\n\n    def get_size(self, renderer):\n        sz = max([self._get_size_map[self._direction](ax.get_tightbbox(renderer, call_axes_locator=False), ax.bbox) for ax in self._ax_list])\n        dpi = renderer.points_to_pixels(72)\n        abs_size = sz / dpi\n        rel_size = 0\n        return (rel_size, abs_size)",
    "docstring": "Fixed size, corresponding to the size of decorations on a given Axes side.",
    "type": "class",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_size.py",
    "ast_data": "ClassDef name:_AxesDecorationsSize Assign arguments arg arg arguments arg arg arguments arg arg arguments arg arg FunctionDef name:__init__ arg:self arg:ax arg:direction arguments arg arg arg Call Assign Assign Call FunctionDef name:get_size arg:self arg:renderer arguments arg arg Assign Call Call Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "dissoc",
    "source_code": "def dissoc(d, *keys, **kwargs):\n    factory = _get_factory(dissoc, kwargs)\n    d2 = factory()\n    if len(keys) < len(d) * 0.6:\n        d2.update(d)\n        for key in keys:\n            if key in d2:\n                del d2[key]\n    else:\n        remaining = set(d)\n        remaining.difference_update(keys)\n        for k in remaining:\n            d2[k] = d[k]\n    return d2",
    "docstring": "Return a new dict with the given key(s) removed. New dict has d[key] deleted for each supplied key. Does not modify the initial dictionary. >>> dissoc({\"x\": 1, \"y\": 2}, \"y\") {'x': 1} >>> dissoc({\"x\": 1, \"y\": 2}, \"y\", \"x\") {} >>> dissoc({\"x\": 1}, \"y\") # Ignores missing keys {'x': 1}",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\unification\\unification_tools.py",
    "ast_data": "FunctionDef name:dissoc arg:d arguments arg arg arg Assign Call Assign Call If Compare Call Call Call For If Compare Assign Call Call For Assign Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "_init_i18n",
    "source_code": "def _init_i18n(self) -> None:\n    logger.info(bold(__('loading translations [%s]... ')), self.config.language, nonl=True)\n    repo = CatalogRepository(self.srcdir, self.config.locale_dirs, self.config.language, self.config.source_encoding)\n    for catalog in repo.catalogs:\n        if catalog.domain == 'sphinx' and catalog.is_outdated():\n            catalog.write_mo(self.config.language, self.config.gettext_allow_fuzzy_translations)\n    locale_dirs: list[_StrPath | None] = list(repo.locale_dirs)\n    locale_dirs += [None]\n    locale_dirs += [package_dir / 'locale']\n    self.translator, has_translation = locale.init(locale_dirs, self.config.language)\n    if has_translation or self.config.language == 'en':\n        logger.info(__('done'))\n    else:\n        logger.info(__('not available for built-in messages'))",
    "docstring": "Load translated strings from the configured localedirs if enabled in the configuration.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\application.py",
    "ast_data": "FunctionDef name:_init_i18n arg:self arguments arg Call Call Call Assign Call For If BoolOp Compare Call Call Call Assign Call If BoolOp Compare Call Call Call Call"
  },
  {
    "library": "seaborn",
    "name": "LineWidth",
    "source_code": "class LineWidth(IntervalProperty):\n\n    @property\n    def default_range(self) -> tuple[float, float]:\n        base = mpl.rcParams['lines.linewidth']\n        return (base * 0.5, base * 2)",
    "docstring": "Thickness of a line mark, in points.",
    "type": "class",
    "file_path": "seaborn\\seaborn\\_core\\properties.py",
    "ast_data": "ClassDef name:LineWidth FunctionDef name:default_range arg:self arguments arg Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_cell",
    "source_code": "def get_cell(self, *labels):\n    return StringGaugeCell(super(StringGauge, self).get_cell(*labels))",
    "docstring": "Retrieves the cell.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py",
    "ast_data": "FunctionDef name:get_cell arg:self arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_add_exported_symbol",
    "source_code": "def _add_exported_symbol(self, node: ast.Call, symbol_name: str) -> None:\n    if symbol_name.find('.') != -1:\n        raise BadExportError(f'{self._current_file}:{node.lineno} export called with symbol {symbol_name} not defined in current file: {ast.dump(node)}')\n    v2_apis = tuple((f'{self._api_name}.{self._literal_value(arg)}' for arg in node.args))\n    v1_apis = v2_apis\n    for kw in node.keywords:\n        if kw.arg == 'v1':\n            v1_apis = tuple((f'{self._api_name}.{v}' for v in self._literal_value(kw.value)))\n        elif kw.arg == 'allow_multiple_exports':\n            pass\n        else:\n            raise BadExportError(f'{self._current_file}:{node.lineno} export called with unknown argument {kw.arg}: {ast.dump(node)}')\n    self._exports.add_symbol(exported_api.ExportedSymbol.create(file_name=self._current_file, line_no=node.lineno, symbol_name=symbol_name, v2_apis=v2_apis, v1_apis=v1_apis))",
    "docstring": "Adds an exported symbol represented by the given call.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\api\\generator2\\extractor\\extractor.py",
    "ast_data": "FunctionDef name:_add_exported_symbol arg:self arg:node arg:symbol_name arguments arg arg arg If Compare Call Raise Call Call Assign Call Call Assign For If Compare Assign Call Call If Compare Raise Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "ragged_abs",
    "source_code": "def ragged_abs(self, name=None):\n    return math_ops.abs(self, name=name)",
    "docstring": "Computes the absolute value of a ragged tensor. Given a ragged tensor of integer or floating-point values, this operation returns a ragged tensor of the same type, where each element contains the absolute value of the corresponding element in the input. Given a ragged tensor of complex numbers, this operation returns a tensor of type or that is the absolute value of each element in . For a complex number \\\\(a + bj\\\\), its absolute value is computed as \\\\(\\sqrt{a^2 + b^2}\\\\). For example: >>> # real number >>> x = tf.ragged.constant([[-2.2, 3.2], [-4.2]]) >>> tf.abs(x) >>> # complex number >>> x = tf.ragged.constant([[-2.2 + 4.7j], [-3.2 + 5.7j], [-4.2 + 6.7j]]) >>> tf.abs(x) Args: name: A name for the operation (optional). Returns: A of the same size and type as , with absolute values. Note, for or input, the returned will be of type or , respectively.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_operators.py",
    "ast_data": "FunctionDef name:ragged_abs arg:self arg:name arguments arg arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "__pow__",
    "source_code": "def __pow__(self, other):\n    if self._delegate_binop(other):\n        return NotImplemented\n    return power(self, other)",
    "docstring": "Raise self to the power other, masking the potential NaNs/Infs",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:__pow__ arg:self arg:other arguments arg arg If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "replace_set_grad_with_hop_pass",
    "source_code": "def replace_set_grad_with_hop_pass(gm: torch.fx.GraphModule, graph_signature: Optional[ExportGraphSignature]) -> tuple[torch.fx.GraphModule, Optional[ExportGraphSignature]]:\n    return _replace_with_hop_pass_helper(gm, graph_signature, _sequential_split_and_maybe_inline_subgraphs)",
    "docstring": "Split gm into sub-graph-modules using , and then recursively call itself on each of the submodules.",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\passes\\replace_set_grad_with_hop_pass.py",
    "ast_data": "FunctionDef name:replace_set_grad_with_hop_pass arg:gm arg:graph_signature arguments arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "cy",
    "source_code": "@property\ndef cy(self) -> Tensor:\n    return self.intrinsics[..., 1, 2]",
    "docstring": "Return the y-coordinate of the principal point. Returns: tensor of shape :math:.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\camera\\pinhole.py",
    "ast_data": "FunctionDef name:cy arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "step",
    "source_code": "def step(self, closure: Optional[Callable[[], float]]=None, **kwargs: Any) -> Optional[float]:\n    if self._overlap_with_ddp:\n        logger.warning('`step()` should not be included in the training loop when `overlap_with_ddp=True`')\n        return None\n    loss = self._local_step(closure=closure, **kwargs)\n    self._sync_params()\n    return loss",
    "docstring": "Perform a single optimizer step and syncs parameters across all ranks. Arguments: closure (Callable): a closure that re-evaluates the model and returns the loss; optional for most optimizers. Returns: Optional loss depending on the underlying local optimizer. .. note:: Any extra parameters are passed to the base optimizer as-is.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\optim\\zero_redundancy_optimizer.py",
    "ast_data": "FunctionDef name:step arg:self arg:closure arguments arg arg arg If Call Return return:no Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "filter_op",
    "source_code": "def filter_op(self, op: 'CKTileGemmOperation'):\n    if not self.check_dtypes(op):\n        return None\n    if not self.check_layouts(op):\n        return None\n    if not self.check_block_tiles(op):\n        return None\n    if not self.check_alignments(op):\n        return None\n    return op",
    "docstring": "Determines whether a given op definition is suitable for the current input / output of the operation that this template implements. Filter is based on inputs' dtype, layout and statically inferred size. Returns None if the op is not suitable, otherwise returns the op to be used.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\rocm\\ck_tile_universal_gemm_template.py",
    "ast_data": "FunctionDef name:filter_op arg:self arg:op arguments arg arg If Call Return return:no If Call Return return:no If Call Return return:no If Call Return return:no Return return:yes"
  },
  {
    "library": "scipy",
    "name": "c2r",
    "source_code": "def c2r(forward, x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *, plan=None):\n    if plan is not None:\n        raise NotImplementedError('Passing a precomputed plan is not yet supported by scipy.fft functions')\n    tmp = _asfarray(x)\n    norm = _normalization(norm, forward)\n    workers = _workers(workers)\n    if np.isrealobj(tmp):\n        tmp = tmp + 0j\n    if n is None:\n        n = (tmp.shape[axis] - 1) * 2\n        if n < 1:\n            raise ValueError(f'Invalid number of data points ({n}) specified')\n    else:\n        tmp, _ = _fix_shape_1d(tmp, n // 2 + 1, axis)\n    return pfft.c2r(tmp, (axis,), n, forward, norm, None, workers)",
    "docstring": "Return inverse discrete Fourier transform of real sequence x.",
    "type": "function",
    "file_path": "scipy\\scipy\\fft\\_pocketfft\\basic.py",
    "ast_data": "FunctionDef name:c2r arg:forward arg:x arg:n arg:axis arg:norm arg:overwrite_x arg:workers arguments arg arg arg arg arg arg arg arg If Compare Raise Call Assign Call Assign Call Assign Call If Call Assign If Compare Assign If Compare Raise Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_is_multiple_state",
    "source_code": "def _is_multiple_state(state_size):\n    return hasattr(state_size, '__len__') and (not isinstance(state_size, tensor_shape.TensorShape))",
    "docstring": "Check whether the state_size contains multiple states.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\recurrent.py",
    "ast_data": "FunctionDef name:_is_multiple_state arg:state_size arguments arg Return return:yes BoolOp Call Call"
  },
  {
    "library": "kornia",
    "name": "_patchify_8x8",
    "source_code": "def _patchify_8x8(input: Tensor) -> Tensor:\n    B, H, W = input.shape\n    output: Tensor = input.view(B, H // 8, 8, W // 8, 8).permute(0, 1, 3, 2, 4).reshape(B, -1, 8, 8)\n    return output",
    "docstring": "Extract non-overlapping 8 x 8 patches from the given input image. Args: input (Tensor): Input image of the shape :math:. Returns: output (Tensor): Image patchify of the shape :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\enhance\\jpeg.py",
    "ast_data": "FunctionDef name:_patchify_8x8 arg:input arguments arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "label_from_instance",
    "source_code": "def label_from_instance(self, obj):\n    return str(obj)",
    "docstring": "Convert objects into strings and generate the labels for the choices presented by this object. Subclasses can override this method to customize the display of the choices.",
    "type": "method",
    "file_path": "django\\django\\forms\\models.py",
    "ast_data": "FunctionDef name:label_from_instance arg:self arg:obj arguments arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_get_scorer",
    "source_code": "def _get_scorer(self):\n    scoring = self.scoring or 'accuracy'\n    return get_scorer(scoring)",
    "docstring": "Get the scorer based on the scoring method specified. The default scoring method is .",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_logistic.py",
    "ast_data": "FunctionDef name:_get_scorer arg:self arguments arg Assign BoolOp Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "as_dense_shapes",
    "source_code": "def as_dense_shapes(shapes, classes):\n    ret = nest.pack_sequence_as(shapes, [tensor_shape.unknown_shape() if c is sparse_tensor.SparseTensor else shape for shape, c in zip(nest.flatten(shapes), nest.flatten(classes))])\n    return ret",
    "docstring": "Converts sparse tensor shapes to their physical shapes. Args: shapes: a structure of shapes to convert. classes: a structure of objects that identify the dataset item classes Returns: a structure matching the nested structure of , containing at positions where contains and matching contents of otherwise",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\util\\sparse.py",
    "ast_data": "FunctionDef name:as_dense_shapes arg:shapes arg:classes arguments arg arg Assign Call Compare Call Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "clear_cache",
    "source_code": "def clear_cache(self):\n    global SITE_CACHE\n    SITE_CACHE = {}",
    "docstring": "Clear the `` object cache.",
    "type": "method",
    "file_path": "django\\django\\contrib\\sites\\models.py",
    "ast_data": "FunctionDef name:clear_cache arg:self arguments arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "verify_dataset_shuffled",
    "source_code": "def verify_dataset_shuffled(x):\n    assert isinstance(x, data_types.DatasetV2)\n    graph_def = get_dataset_graph_def(x)\n    for node in graph_def.node:\n        if node.op.startswith('ShuffleDataset'):\n            return True\n    for function in graph_def.library.function:\n        for node in function.node_def:\n            if node.op.startswith('ShuffleDataset'):\n                return True\n    logging.warning('Expected a shuffled dataset but input dataset `x` is not shuffled. Please invoke `shuffle()` on input dataset.')\n    return False",
    "docstring": "Verifies that the dataset is shuffled. Args: x: Dataset passed as an input to the model. Returns: boolean, whether the input dataset is shuffled or not.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py",
    "ast_data": "FunctionDef name:verify_dataset_shuffled arg:x arguments arg Call Assign Call For If Call Return return:yes For For If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "Shubert01",
    "source_code": "class Shubert01(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n        self.global_optimum = [[-7.0835, 4.858]]\n        self.fglob = -186.7309\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        j = atleast_2d(arange(1, 6)).T\n        y = j * cos((j + 1) * x + j)\n        return prod(sum(y, axis=0))",
    "docstring": "Shubert 1 objective function. This class defines the Shubert 1 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Shubert01}}(x) = \\prod_{i=1}^{n}\\left(\\sum_{j=1}^{5} cos(j+1)x_i+j \\right ) Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: (and many others). .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015 TODO: Jamil#133 is missing a prefactor of j before the cos function.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_S.py",
    "ast_data": "ClassDef name:Shubert01 Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_num_present",
    "source_code": "def _num_present(losses, weights, per_batch=False):\n    if isinstance(weights, float) and weights != 0.0 or (context.executing_eagerly() and weights._rank() == 0 and (not math_ops.equal(weights, 0.0))):\n        return _num_elements(losses)\n    with ops.name_scope(None, 'num_present', (losses, weights)) as scope:\n        weights = math_ops.cast(weights, dtype=dtypes.float32)\n        present = array_ops.where(math_ops.equal(weights, 0.0), array_ops.zeros_like(weights), array_ops.ones_like(weights))\n        present = weights_broadcast_ops.broadcast_weights(present, losses)\n        if per_batch:\n            return math_ops.reduce_sum(present, axis=math_ops.range(1, array_ops.rank(present)), keepdims=True, name=scope)\n        return math_ops.reduce_sum(present, name=scope)",
    "docstring": "Computes the number of elements in the loss function induced by . A given weights tensor induces different numbers of usable elements in the tensor. The tensor is broadcast across for all possible dimensions. For example, if is a tensor of dimension and is a tensor of shape , then is, in effect, tiled to match the shape of . Following this effective tile, the total number of present elements is the number of non-zero weights. Args: losses: of shape . weights: of shape , or , where K < N. per_batch: Whether to return the number of elements per batch or as a sum total. Returns: The number of present (non-zero) elements in the losses tensor. If is , the value is returned as a tensor of size . Otherwise, a single scalar tensor is returned.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\losses\\losses_impl.py",
    "ast_data": "FunctionDef name:_num_present arg:losses arg:weights arg:per_batch arguments arg arg arg If BoolOp BoolOp Call Compare BoolOp Call Compare Call Call Return return:yes Call With Call Assign Call Assign Call Call Call Call Assign Call If Return return:yes Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "add_dependency",
    "source_code": "def add_dependency(self, module_name: str, dependencies=True):\n    if module_name in self.dependency_graph and self.dependency_graph.nodes[module_name].get('provided') is True:\n        return\n    if module_name == 'torch_package_importer':\n        self.dependency_graph.add_node(module_name, action=_ModuleProviderAction.SKIP, provided=True)\n        return\n    if module_name == '_mock':\n        self.dependency_graph.add_node(module_name, action=_ModuleProviderAction.REPACKAGED_MOCK_MODULE, provided=True)\n        return\n    if self._can_implicitly_extern(module_name):\n        self.dependency_graph.add_node(module_name, action=_ModuleProviderAction.EXTERN, provided=True)\n        return\n    for pattern, pattern_info in self.patterns.items():\n        if pattern.matches(module_name):\n            pattern_info.was_matched = True\n            self.dependency_graph.add_node(module_name, action=pattern_info.action, provided=True)\n            if pattern_info.action == _ModuleProviderAction.DENY:\n                self.dependency_graph.add_node(module_name, error=PackagingErrorReason.DENIED)\n            if pattern_info.action == _ModuleProviderAction.INTERN:\n                self._intern_module(module_name, dependencies)\n            return\n    self.dependency_graph.add_node(module_name, error=PackagingErrorReason.NO_ACTION)",
    "docstring": "Given a module, add it to the dependency graph according to patterns specified by the user.",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\package_exporter.py",
    "ast_data": "FunctionDef name:add_dependency arg:self arg:module_name arg:dependencies arguments arg arg arg If BoolOp Compare Compare Call Return return:no If Compare Call Return return:no If Compare Call Return return:no If Call Call Return return:no For Call If Call Assign Call If Compare Call If Compare Call Return return:no Call"
  },
  {
    "library": "matplotlib",
    "name": "get_function_values",
    "source_code": "def get_function_values(self, alpha, ecc, dofs):\n    subtri = np.argmin(alpha, axis=1)[:, 0]\n    ksi = _roll_vectorized(alpha, -subtri, axis=0)\n    E = _roll_vectorized(ecc, -subtri, axis=0)\n    x = ksi[:, 0, 0]\n    y = ksi[:, 1, 0]\n    z = ksi[:, 2, 0]\n    x_sq = x * x\n    y_sq = y * y\n    z_sq = z * z\n    V = _to_matrix_vectorized([[x_sq * x], [y_sq * y], [z_sq * z], [x_sq * z], [x_sq * y], [y_sq * x], [y_sq * z], [z_sq * y], [z_sq * x], [x * y * z]])\n    prod = self.M @ V\n    prod += _scalar_vectorized(E[:, 0, 0], self.M0 @ V)\n    prod += _scalar_vectorized(E[:, 1, 0], self.M1 @ V)\n    prod += _scalar_vectorized(E[:, 2, 0], self.M2 @ V)\n    s = _roll_vectorized(prod, 3 * subtri, axis=0)\n    return (dofs @ s)[:, 0, 0]",
    "docstring": "Parameters ---------- alpha : is a (N x 3 x 1) array (array of column-matrices) of barycentric coordinates, ecc : is a (N x 3 x 1) array (array of column-matrices) of triangle eccentricities, dofs : is a (N x 1 x 9) arrays (arrays of row-matrices) of computed degrees of freedom. Returns ------- Returns the N-array of interpolated function values.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triinterpolate.py",
    "ast_data": "FunctionDef name:get_function_values arg:self arg:alpha arg:ecc arg:dofs arguments arg arg arg arg Assign Call Assign Call Assign Call Assign Assign Assign Assign Assign Assign Assign Call Assign Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_clip_rectangle",
    "source_code": "def get_clip_rectangle(self):\n    return self._cliprect",
    "docstring": "Return the clip rectangle as a instance.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:get_clip_rectangle arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "request_stop",
    "source_code": "def request_stop(self):\n    raise StopIteration('step_fn has requested the iterations to stop.')",
    "docstring": "Exit the training loop by causing to return . Causes to exit by raising an exception. Raises: StopIteration",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\monitored_session.py",
    "ast_data": "FunctionDef name:request_stop arg:self arguments arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "init",
    "source_code": "def init(self):\n    raise NotImplementedError()",
    "docstring": "Initializes the summary writer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "FunctionDef name:init arg:self arguments arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "create_symlinks",
    "source_code": "def create_symlinks() -> None:\n    if not TORCH_LIB_DIR.exists():\n        raise RuntimeError(f\"Can't create symlinks as {TORCH_LIB_DIR} does not exist\")\n    if not BUILD_LIB_DIR.exists():\n        raise RuntimeError(f\"Can't create symlinks as {BUILD_LIB_DIR} does not exist\")\n    for torch_lib in TORCH_LIB_DIR.glob(f'*.{get_lib_extension()}'):\n        if torch_lib.is_symlink():\n            continue\n        build_lib = BUILD_LIB_DIR / torch_lib.name\n        if not build_lib.exists():\n            raise RuntimeError(f\"Can't find {build_lib} corresponding to {torch_lib}\")\n        torch_lib.unlink()\n        torch_lib.symlink_to(build_lib)",
    "docstring": "Creates symlinks from build/lib to torch/lib",
    "type": "function",
    "file_path": "pytorch\\tools\\build_with_debinfo.py",
    "ast_data": "FunctionDef name:create_symlinks arguments If Call Raise Call If Call Raise Call For Call Call If Call Assign If Call Raise Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_check_box_overlap",
    "source_code": "def _check_box_overlap(box0: ChunkStorageMetadata, box1: ChunkStorageMetadata) -> bool:\n    ndims = len(box0.offsets)\n    for i in range(ndims):\n        if box0.offsets[i] >= box1.offsets[i] + box1.sizes[i]:\n            return False\n        if box1.offsets[i] >= box0.offsets[i] + box0.sizes[i]:\n            return False\n    return True",
    "docstring": "Check if two boxes overlap. Tuples are (offset, lengths).",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\default_planner.py",
    "ast_data": "FunctionDef name:_check_box_overlap arg:box0 arg:box1 arguments arg arg Assign Call For Call If Compare Return return:yes If Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, col, row, is_non_singular=None, is_self_adjoint=None, is_positive_definite=None, is_square=None, name='LinearOperatorToeplitz'):\n    parameters = dict(col=col, row=row, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, name=name)\n    with ops.name_scope(name, values=[row, col]):\n        self._row = linear_operator_util.convert_nonref_to_tensor(row, name='row')\n        self._col = linear_operator_util.convert_nonref_to_tensor(col, name='col')\n        self._check_row_col(self._row, self._col)\n        if is_square is False:\n            raise ValueError('Only square Toeplitz operators currently supported.')\n        is_square = True\n        super(LinearOperatorToeplitz, self).__init__(dtype=self._row.dtype, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, parameters=parameters, name=name)",
    "docstring": "Initialize a . Args: col: Shape with . The first column of the operator. Allowed dtypes: , , , , . Note that the first entry of is assumed to be the same as the first entry of . row: Shape with . The first row of the operator. Allowed dtypes: , , , , . Note that the first entry of is assumed to be the same as the first entry of . is_non_singular: Expect that this operator is non-singular. is_self_adjoint: Expect that this operator is equal to its hermitian transpose. If is real, this is auto-set to . is_positive_definite: Expect that this operator is positive definite, meaning the quadratic form has positive real part for all nonzero . Note that we do not require the operator to be self-adjoint to be positive-definite. See: is_square: Expect that this operator acts like square [batch] matrices. name: A name for this .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_toeplitz.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:col arg:row arg:is_non_singular arg:is_self_adjoint arg:is_positive_definite arg:is_square arg:name arguments arg arg arg arg arg arg arg arg Assign Call With Call Assign Call Assign Call Call If Compare Raise Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "is_enabled",
    "source_code": "def is_enabled() -> bool:\n    return torch._C._cuda_tunableop_is_enabled()",
    "docstring": "Returns whether the TunableOp feature is enabled.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\tunable.py",
    "ast_data": "FunctionDef name:is_enabled arguments Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "display_benchmark_results",
    "source_code": "def display_benchmark_results(timer_list, metric_name):\n    mean_time = statistics.mean(timer_list)\n    stdev_time = statistics.stdev(timer_list)\n    stdev_time_percentage = stdev_time / mean_time * 100\n    print('%s: %.2f ms ± %.2f%%' % (metric_name, mean_time, stdev_time_percentage))",
    "docstring": "Display mean and stdev for a given metric.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\xla\\backends\\cpu\\benchmarks\\e2e\\gemma2\\flax_2b\\benchmark.py",
    "ast_data": "FunctionDef name:display_benchmark_results arg:timer_list arg:metric_name arguments arg arg Assign Call Assign Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "CPUOffload",
    "source_code": "@dataclass\nclass CPUOffload:\n    offload_params: bool = False",
    "docstring": "This configures CPU offloading. Attributes: offload_params (bool): This specifies whether to offload parameters to CPU when not involved in computation. If ``, then this offloads gradients to CPU as well, meaning that the optimizer step runs on CPU.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\api.py",
    "ast_data": "ClassDef name:CPUOffload"
  },
  {
    "library": "pandas",
    "name": "css_bar",
    "source_code": "def css_bar(start: float, end: float, color: str) -> str:\n    cell_css = base_css\n    if end > start:\n        cell_css += 'background: linear-gradient(90deg,'\n        if start > 0:\n            cell_css += f' transparent {start * 100:.1f}%, {color} {start * 100:.1f}%,'\n        cell_css += f' {color} {end * 100:.1f}%, transparent {end * 100:.1f}%)'\n    return cell_css",
    "docstring": "Generate CSS code to draw a bar from start to end in a table cell. Uses linear-gradient. Parameters ---------- start : float Relative positional start of bar coloring in [0,1] end : float Relative positional end of the bar coloring in [0,1] color : str CSS valid color to apply. Returns ------- str : The CSS applicable to the cell. Notes ----- Uses `` from outer scope.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\formats\\style.py",
    "ast_data": "FunctionDef name:css_bar arg:start arg:end arg:color arguments arg arg arg Assign If Compare If Compare Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_is_label_or_level_reference",
    "source_code": "@final\ndef _is_label_or_level_reference(self, key: Level, axis: AxisInt=0) -> bool:\n    return self._is_level_reference(key, axis=axis) or self._is_label_reference(key, axis=axis)",
    "docstring": "Test whether a key is a label or level reference for a given axis. To be considered either a label or a level reference, must be a string that: - (axis=0): Matches a column label or an index level - (axis=1): Matches an index label or a column level Parameters ---------- key : Hashable Potential label or level name axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- bool",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:_is_label_or_level_reference arg:self arg:key arg:axis arguments arg arg arg Return return:yes BoolOp Call Call"
  },
  {
    "library": "django",
    "name": "adapt_datetimefield_value",
    "source_code": "def adapt_datetimefield_value(self, value):\n    if value is None:\n        return None\n    return str(value)",
    "docstring": "Transform a datetime value to an object compatible with what is expected by the backend driver for datetime columns.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:adapt_datetimefield_value arg:self arg:value arguments arg arg If Compare Return return:no Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "most_specific_common_supertype",
    "source_code": "def most_specific_common_supertype(self, others: Sequence[trace.TraceType]) -> Optional['Attrs']:\n    if not all((isinstance(other, Attrs) for other in others)):\n        return None\n    supertyped_attributes = self.named_attributes.most_specific_common_supertype([other.named_attributes for other in others])\n    if supertyped_attributes is None:\n        return None\n    return Attrs(self.named_attributes.type_name, self.named_attributes.attribute_names, supertyped_attributes.attributes.components, self._placeholder_type)",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\trace_type\\default_types.py",
    "ast_data": "FunctionDef name:most_specific_common_supertype arg:self arg:others arguments arg arg If Call Call Return return:no Assign Call If Compare Return return:no Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, cells, state_is_tuple=True):\n    logging.warning('`tf.nn.rnn_cell.MultiRNNCell` is deprecated. This class is equivalent as `tf.keras.layers.StackedRNNCells`, and will be replaced by that in Tensorflow 2.0.')\n    super(MultiRNNCell, self).__init__()\n    if not cells:\n        raise ValueError('Must specify at least one cell for MultiRNNCell.')\n    if not nest.is_nested(cells):\n        raise TypeError('cells must be a list or tuple, but saw: %s.' % cells)\n    if len(set((id(cell) for cell in cells))) < len(cells):\n        logging.log_first_n(logging.WARN, 'At least two cells provided to MultiRNNCell are the same object and will share weights.', 1)\n    self._cells = cells\n    for cell_number, cell in enumerate(self._cells):\n        if isinstance(cell, trackable.Trackable):\n            self._track_trackable(cell, name='cell-%d' % (cell_number,))\n    self._state_is_tuple = state_is_tuple\n    if not state_is_tuple:\n        if any((nest.is_nested(c.state_size) for c in self._cells)):\n            raise ValueError('Some cells return tuples of states, but the flag state_is_tuple is not set.  State sizes are: %s' % str([c.state_size for c in self._cells]))",
    "docstring": "Create a RNN cell composed sequentially of a number of RNNCells. Args: cells: list of RNNCells that will be composed in this order. state_is_tuple: If True, accepted and returned states are n-tuples, where . If False, the states are all concatenated along the column axis. This latter behavior will soon be deprecated. Raises: ValueError: if cells is empty (not allowed), or at least one of the cells returns a state tuple but the flag is .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\legacy_rnn\\rnn_cell_impl.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:cells arg:state_is_tuple arguments arg arg arg Call Call Call If Raise Call If Call Raise Call If Compare Call Call Call Call Call Assign For Call If Call Call Assign If If Call Call Raise Call Call"
  },
  {
    "library": "scipy",
    "name": "mfft",
    "source_code": "@mfft.setter\ndef mfft(self, n_: int):\n    if not n_ >= self.m_num:\n        raise ValueError(f'Attribute mfft={n_} needs to be at least the ' + f'window length m_num={self.m_num}!')\n    self._mfft = n_",
    "docstring": "Setter for the length of FFT utilized. See the property for further details.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_short_time_fft.py",
    "ast_data": "FunctionDef name:mfft arg:self arg:n_ arguments arg arg If Compare Raise Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "convert_per_replica_to_dtensor",
    "source_code": "def convert_per_replica_to_dtensor(per_replica_value, mesh):\n    values = per_replica_value.values\n    if isinstance(values[0], (float, int)):\n        rank = 0\n    else:\n        rank = len(values[0].shape)\n    if rank == 0:\n        result = []\n        for v in values:\n            result.append(array_ops.expand_dims_v2(v, axis=0))\n        rank += 1\n    else:\n        result = list(values)\n    batch_layout = layout.Layout.batch_sharded(mesh, batch_dim=DEFAULT_BATCH_MESH_DIM_NAME, rank=rank)\n    return d_api.pack(result, batch_layout)",
    "docstring": "Convert a PerReplica result to a DTensor instance. Args: per_replica_value: A PerReplica instance whose value will be converted to DTensor. mesh: The mesh used for layout creation. Returns: A DTensor instance that packed from per_replica_value with batch sharded layout.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\experimental\\dtensor_util.py",
    "ast_data": "FunctionDef name:convert_per_replica_to_dtensor arg:per_replica_value arg:mesh arguments arg arg Assign If Call Assign Assign Call If Compare Assign For Call Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "data",
    "source_code": "@property\ndef data(self) -> Series:\n    return self.info.data",
    "docstring": "Series.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:data arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "containsx",
    "source_code": "def containsx(self, x):\n    x0, x1 = self.intervalx\n    return x0 <= x <= x1 or x0 >= x >= x1",
    "docstring": "Return whether *x* is in the closed (:attr:, :attr:) interval.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:containsx arg:self arg:x arguments arg arg Assign Return return:yes BoolOp Compare Compare"
  },
  {
    "library": "tensorflow",
    "name": "set_device_policy",
    "source_code": "@tf_export('config.experimental.set_device_policy')\ndef set_device_policy(device_policy):\n    if device_policy == 'silent':\n        context.context().device_policy = context.DEVICE_PLACEMENT_SILENT\n    elif device_policy == 'silent_for_int32':\n        context.context().device_policy = context.DEVICE_PLACEMENT_SILENT_FOR_INT32\n    elif device_policy == 'warn':\n        context.context().device_policy = context.DEVICE_PLACEMENT_WARN\n    elif device_policy == 'explicit':\n        context.context().device_policy = context.DEVICE_PLACEMENT_EXPLICIT\n    elif device_policy is None:\n        context.context().device_policy = None\n    else:\n        raise ValueError(f'Invalid argument `device_policy`: {device_policy!r}. Please refer to https://www.tensorflow.org/api_docs/python/tf/config/experimental/set_device_policy for valid `device_policy` arguments.')",
    "docstring": "Sets the current thread device policy. The device policy controls how operations requiring inputs on a specific device (e.g., on GPU:0) handle inputs on a different device (e.g. GPU:1). When using the default, an appropriate policy will be picked automatically. The default policy may change over time. This function only sets the device policy for the current thread. Any subsequently started thread will again use the default policy. Args: device_policy: A device policy. Valid values: - None: Switch to a system default. - 'warn': Copies the tensors which are not on the right device and logs a warning. - 'explicit': Raises an error if the placement is not as required. - 'silent': Silently copies the tensors. Note that this may hide performance problems as there is no notification provided when operations are blocked on the tensor being copied between devices. - 'silent_for_int32': silently copies tensors, raising errors on the other ones. Raises: ValueError: If an invalid is passed.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\config.py",
    "ast_data": "FunctionDef name:set_device_policy arg:device_policy arguments arg If Compare Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "has_multiple_dim_order",
    "source_code": "def has_multiple_dim_order(tensor):\n    sizes = tensor.size()\n    strides = tensor.stride()\n    has_duplicate_strides = any((earlier == later for earlier, later in zip(strides, strides[1:])))\n    has_singleton_dims = any((size == 1 for size in sizes))\n    return has_duplicate_strides or has_singleton_dims",
    "docstring": "Returns True if there're multiple legal dim orders for given tensor, False otherwise. The tensor is considered to have multiple legal dim orders if either of the following conditions is met: * Singleton Dimensions: There's at least one singleteon dimension in the tensor. Since their size is 1, they don't affect the memory offset (stride * index is zero because index is always zero). Therefore, they can be placed anywhere in the dimension order without changing how data is accessed. * Same strides: Strides reflect how the tensor is stored in memory. If any two dimensions have the same stride, swapping these dimensions won't change how data is accessed, leading to multiple correct dimension orders.",
    "type": "method",
    "file_path": "pytorch\\torch\\_tensor.py",
    "ast_data": "FunctionDef name:has_multiple_dim_order arg:tensor arguments arg Assign Call Assign Call Assign Call Compare Call Assign Call Compare Return return:yes BoolOp"
  },
  {
    "library": "django",
    "name": "file_path",
    "source_code": "def file_path(self, url):\n    relative_url = url.removeprefix(self.base_url.path)\n    return url2pathname(relative_url)",
    "docstring": "Return the relative path to the media file on disk for the given URL.",
    "type": "method",
    "file_path": "django\\django\\contrib\\staticfiles\\handlers.py",
    "ast_data": "FunctionDef name:file_path arg:self arg:url arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_nanmedian_small",
    "source_code": "def _nanmedian_small(a, axis=None, out=None, overwrite_input=False):\n    a = np.ma.masked_array(a, np.isnan(a))\n    m = np.ma.median(a, axis=axis, overwrite_input=overwrite_input)\n    for i in range(np.count_nonzero(m.mask.ravel())):\n        warnings.warn('All-NaN slice encountered', RuntimeWarning, stacklevel=5)\n    fill_value = np.timedelta64('NaT') if m.dtype.kind == 'm' else np.nan\n    if out is not None:\n        out[...] = m.filled(fill_value)\n        return out\n    return m.filled(fill_value)",
    "docstring": "sort + indexing median, faster for small medians along multiple dimensions due to the high overhead of apply_along_axis see nanmedian for parameter usage",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_nanfunctions_impl.py",
    "ast_data": "FunctionDef name:_nanmedian_small arg:a arg:axis arg:out arg:overwrite_input arguments arg arg arg arg Assign Call Call Assign Call For Call Call Call Call Assign Compare Call If Compare Assign Call Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "draw",
    "source_code": "def draw(self, n: int=1, out: Optional[torch.Tensor]=None, dtype: Optional[torch.dtype]=None) -> torch.Tensor:\n    if dtype is None:\n        dtype = torch.get_default_dtype()\n    if self.num_generated == 0:\n        if n == 1:\n            result = self._first_point.to(dtype)\n        else:\n            result, self.quasi = torch._sobol_engine_draw(self.quasi, n - 1, self.sobolstate, self.dimension, self.num_generated, dtype=dtype)\n            result = torch.cat((self._first_point.to(dtype), result), dim=-2)\n    else:\n        result, self.quasi = torch._sobol_engine_draw(self.quasi, n, self.sobolstate, self.dimension, self.num_generated - 1, dtype=dtype)\n    self.num_generated += n\n    if out is not None:\n        out.resize_as_(result).copy_(result)\n        return out\n    return result",
    "docstring": "Function to draw a sequence of :attr: points from a Sobol sequence. Note that the samples are dependent on the previous samples. The size of the result is :math:. Args: n (Int, optional): The length of sequence of points to draw. Default: 1 out (Tensor, optional): The output tensor dtype (:class:, optional): the desired data type of the returned tensor. Default: ``",
    "type": "method",
    "file_path": "pytorch\\torch\\quasirandom.py",
    "ast_data": "FunctionDef name:draw arg:self arg:n arg:out arg:dtype arguments arg arg arg arg If Compare Assign Call If Compare If Compare Assign Call Assign Call Assign Call Call Assign Call If Compare Call Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__get__",
    "source_code": "def __get__(self, instance, owner):\n    del owner\n    if isinstance(instance, composite_tensor.CompositeTensor) and instance._type_spec is not None:\n        return types_lib.MethodType(self, instance)\n    if instance not in self._descriptor_cache:\n        if instance is None:\n            return self\n        self._descriptor_cache[instance] = class_method_to_instance_method(self, instance)\n    return self._descriptor_cache[instance]",
    "docstring": "Makes it possible to decorate instance methods.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\polymorphic_function.py",
    "ast_data": "FunctionDef name:__get__ arg:self arg:instance arg:owner arguments arg arg arg If BoolOp Call Compare Return return:yes Call If Compare If Compare Return return:yes Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "from_bbox",
    "source_code": "@classmethod\ndef from_bbox(cls, bbox):\n    x0, y0, x1, y1 = bbox\n    for z in bbox:\n        if not isinstance(z, (float, int)):\n            return GEOSGeometry('POLYGON((%s %s, %s %s, %s %s, %s %s, %s %s))' % (x0, y0, x0, y1, x1, y1, x1, y0, x0, y0))\n    return Polygon(((x0, y0), (x0, y1), (x1, y1), (x1, y0), (x0, y0)))",
    "docstring": "Construct a Polygon from a bounding box (4-tuple).",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\polygon.py",
    "ast_data": "FunctionDef name:from_bbox arg:cls arg:bbox arguments arg arg Assign For If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_create_node",
    "source_code": "def _create_node(graph_or_block: _C.Graph | _C.Block, domain_op: str, inputs: Sequence, attributes: dict, params_dict: dict, opset_version: int, n_outputs: int, shape_inference: bool=True) -> _C.Node:\n    if isinstance(graph_or_block, _C.Graph):\n        graph = graph_or_block\n        node = graph.create(domain_op, inputs, n_outputs)\n        node = graph.insertNode(node)\n    elif isinstance(graph_or_block, _C.Block):\n        block = graph_or_block\n        node = block.addNode(domain_op, inputs)\n        if n_outputs > 1:\n            for _ in range(1, n_outputs):\n                node.addOutput()\n    node_outputs = tuple(node.outputs())\n    assert len(node_outputs) == n_outputs\n    aten = domain_op.startswith('aten::')\n    for key, value in sorted(attributes.items()):\n        if key in _SKIP_NODE_ATTRIBUTES:\n            continue\n        _add_attribute(node, key, value, aten=aten)\n    if shape_inference:\n        _C._jit_pass_onnx_node_shape_type_inference(node, params_dict, opset_version)\n    return node",
    "docstring": "Creates an node 'domain_op', taking inputs and attributes.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\jit_utils.py",
    "ast_data": "FunctionDef name:_create_node arg:graph_or_block arg:domain_op arg:inputs arg:attributes arg:params_dict arg:opset_version arg:n_outputs arg:shape_inference arguments arg arg arg arg arg arg arg arg If Call Assign Assign Call Assign Call If Call Assign Assign Call If Compare For Call Call Assign Call Call Compare Call Assign Call For Call Call If Compare Call If Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_window_title",
    "source_code": "def get_window_title(self):\n    return self._window_title",
    "docstring": "Return the title text of the window containing the figure.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:get_window_title arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "Price03",
    "source_code": "class Price03(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-5.0] * self.N, [5.0] * self.N))\n        self.custom_bounds = ([0, 2], [0, 2])\n        self.global_optimum = [[1.0, 1.0]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return 100 * (x[1] - x[0] ** 2) ** 2 + (6.4 * (x[1] - 0.5) ** 2 - x[0] - 0.6) ** 2",
    "docstring": "Price 3 objective function. This class defines the Price 3 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Price03}}(x) = 100(x_2 - x_1^2)^2 + \\left[6.4(x_2 - 0.5)^2 - x_1 - 0.6 \\right]^2 with :math: for :math:. *Global optimum*: :math: for :math:, :math:, :math:, :math:. .. [1] Price, W. A controlled random search procedure for global optimisation Computer Journal, 1977, 20, 367-370 TODO Jamil #96 has an erroneous factor of 6 in front of the square brackets",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_P.py",
    "ast_data": "ClassDef name:Price03 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes"
  },
  {
    "library": "django",
    "name": "get_base_chain",
    "source_code": "def get_base_chain(self, model):\n    if not self.parents:\n        return []\n    if model in self.parents:\n        return [model]\n    for parent in self.parents:\n        res = parent._meta.get_base_chain(model)\n        if res:\n            res.insert(0, parent)\n            return res\n    return []",
    "docstring": "Return a list of parent classes leading to (ordered from closest to most distant ancestor). This has to handle the case where is a grandparent or even more distant relation.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\options.py",
    "ast_data": "FunctionDef name:get_base_chain arg:self arg:model arguments arg arg If Return return:no If Compare Return return:yes For Assign Call If Call Return return:yes Return return:no"
  },
  {
    "library": "pytorch",
    "name": "is_gloo_available",
    "source_code": "def is_gloo_available() -> bool:\n    return _GLOO_AVAILABLE",
    "docstring": "Check if the Gloo backend is available.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:is_gloo_available arguments Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_fontproperties",
    "source_code": "def get_fontproperties(self):\n    return self._fontproperties",
    "docstring": "Return the .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:get_fontproperties arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "from_any",
    "source_code": "def from_any(size, fraction_ref=None):\n    if isinstance(size, Real):\n        return Fixed(size)\n    elif isinstance(size, str):\n        if size[-1] == '%':\n            return Fraction(float(size[:-1]) / 100, fraction_ref)\n    raise ValueError('Unknown format')",
    "docstring": "Create a Fixed unit when the first argument is a float, or a Fraction unit if that is a string that ends with %. The second argument is only meaningful when Fraction unit is created. >>> from mpl_toolkits.axes_grid1.axes_size import from_any >>> a = from_any(1.2) # => Fixed(1.2) >>> from_any(\"50%\", a) # => Fraction(0.5, a)",
    "type": "function",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_size.py",
    "ast_data": "FunctionDef name:from_any arg:size arg:fraction_ref arguments arg arg If Call Return return:yes Call If Call If Compare Return return:yes Call Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "benchmark_fused_nodes",
    "source_code": "def benchmark_fused_nodes(self, nodes: Sequence[BaseSchedulerNode]) -> tuple[float, str]:\n    assert len(nodes) > 0\n    device = nodes[0].get_device()\n    self.current_device = device\n    backend = self.get_backend(device)\n    with dynamo_timed('benchmark_fused_nodes', log_pt2_compile_event=True, dynamo_compile_column_us='compile_time_autotune_time_us'):\n        return backend.benchmark_fused_nodes(nodes)",
    "docstring": "Benchmark fused list of nodes and return the execution time in milliseconds on randomly generated inputs.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:benchmark_fused_nodes arg:self arg:nodes arguments arg arg Compare Call Assign Call Assign Assign Call With Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "broadcast_implementation",
    "source_code": "@doc_controls.for_subclass_implementers\ndef broadcast_implementation(self, tensor, destinations):\n    return simple_broadcast(tensor, destinations, always_mirrored=True, canonicalize_devices=self._canonicalize_devices)",
    "docstring": "Implementation of . Args: tensor: a like object. The value to broadcast. destinations: a , a , a alike object, or a device string. It specifies the devices to broadcast to. . Note that if it's a , the value is broadcasted to the devices of that variable, this method doesn't update the variable. Returns: A or .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_ops.py",
    "ast_data": "FunctionDef name:broadcast_implementation arg:self arg:tensor arg:destinations arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "__getitem__",
    "source_code": "def __getitem__(self, index):\n    if 0 <= index < self.point_count:\n        x, y, z, m = (c_double(), c_double(), c_double(), c_double())\n        capi.get_point(self.ptr, index, byref(x), byref(y), byref(z), byref(m))\n        if self.is_3d and self.is_measured:\n            return (x.value, y.value, z.value, m.value)\n        if self.is_3d:\n            return (x.value, y.value, z.value)\n        if self.is_measured:\n            return (x.value, y.value, m.value)\n        dim = self.coord_dim\n        if dim == 1:\n            return (x.value,)\n        elif dim == 2:\n            return (x.value, y.value)\n    else:\n        raise IndexError('Index out of range when accessing points of a line string: %s.' % index)",
    "docstring": "Return the Point at the given index.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:__getitem__ arg:self arg:index arguments arg arg If Compare Assign Call Call Call Call Call Call Call Call Call If BoolOp Return return:yes If Return return:yes If Return return:yes Assign If Compare Return return:yes If Compare Return return:yes Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "to_proto",
    "source_code": "def to_proto(self, export_scope=None):\n    if export_scope is None or self.full_name.startswith(export_scope):\n        save_slice_info_def = variable_pb2.SaveSliceInfoDef()\n        save_slice_info_def.full_name = ops.strip_name_scope(self.full_name, export_scope)\n        for i in self.full_shape:\n            save_slice_info_def.full_shape.append(i)\n        for i in self.var_offset:\n            save_slice_info_def.var_offset.append(i)\n        for i in self.var_shape:\n            save_slice_info_def.var_shape.append(i)\n        return save_slice_info_def\n    else:\n        return None",
    "docstring": "Returns a SaveSliceInfoDef() proto. Args: export_scope: Optional . Name scope to remove. Returns: A protocol buffer, or None if the is not in the specified name scope.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:to_proto arg:self arg:export_scope arguments arg arg If BoolOp Compare Call Assign Call Assign Call For Call For Call For Call Return return:yes Return return:no"
  },
  {
    "library": "matplotlib",
    "name": "intersects_path",
    "source_code": "def intersects_path(self, other, filled=True):\n    return _path.path_intersects_path(self, other, filled)",
    "docstring": "Return whether if this path intersects another given path. If *filled* is True, then this also returns True if one path completely encloses the other (i.e., the paths are treated as filled).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\path.py",
    "ast_data": "FunctionDef name:intersects_path arg:self arg:other arg:filled arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_create_metrics",
    "source_code": "def _create_metrics(self):\n    if len(self._output_names) == 1:\n        self._per_output_metrics = [None]\n    else:\n        self._per_output_metrics = []\n        for loss_obj, output_name in zip(self._losses, self._output_names):\n            if loss_obj is None:\n                self._per_output_metrics.append(None)\n            else:\n                self._per_output_metrics.append(metrics_mod.Mean(output_name + '_loss'))",
    "docstring": "Creates per-output loss metrics, but only for multi-output Models.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\compile_utils.py",
    "ast_data": "FunctionDef name:_create_metrics arg:self arguments arg If Compare Call Assign Assign For Call If Compare Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "delaxes",
    "source_code": "def delaxes(self, ax):\n    self._remove_axes(ax, owners=[self._axstack, self._localaxes])",
    "docstring": "Remove the *ax* from the figure; update the current Axes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:delaxes arg:self arg:ax arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "SparseKerasTensor",
    "source_code": "class SparseKerasTensor(KerasTensor):\n\n    def _to_placeholder(self):\n        spec = self.type_spec\n        return array_ops.sparse_placeholder(dtype=spec.dtype, shape=spec.shape)",
    "docstring": "A specialized KerasTensor representation for s. Specifically, it specializes the conversion to a placeholder in order to maintain dense shape information.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\keras_tensor.py",
    "ast_data": "ClassDef name:SparseKerasTensor FunctionDef name:_to_placeholder arg:self arguments arg Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "tpu_shard_context",
    "source_code": "@contextlib.contextmanager\ndef tpu_shard_context(number_of_shards):\n    if _current_tpu_context.number_of_shards is not None:\n        raise NotImplementedError('tpu_shard_context cannot be nested')\n    try:\n        _current_tpu_context.set_number_of_shards(number_of_shards)\n        yield\n    finally:\n        _current_tpu_context.set_number_of_shards(None)",
    "docstring": "A context manager setting current number of shards.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_function.py",
    "ast_data": "FunctionDef name:tpu_shard_context arg:number_of_shards arguments arg If Compare Raise Call Try Call Call"
  },
  {
    "library": "scipy",
    "name": "Mishra04",
    "source_code": "class Mishra04(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n        self.global_optimum = [[-8.88055269734, -8.89097599857]]\n        self.fglob = -0.177715264826\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return 0.01 * (x[0] + x[1]) + sqrt(abs(sin(sqrt(abs(x[0] ** 2 + x[1] ** 2)))))",
    "docstring": "Mishra 4 objective function. This class defines the Mishra 4 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Mishra04}}({x}) = \\sqrt{\\lvert \\sin{\\sqrt{\\lvert x_1^2 + x_2^2 \\rvert}} \\rvert} + 0.01(x_1 + x_2) with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO: I think that Jamil#77 has the wrong minimum, not possible",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_M.py",
    "ast_data": "ClassDef name:Mishra04 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call Call Call Call"
  },
  {
    "library": "django",
    "name": "_add_script_prefix",
    "source_code": "@staticmethod\ndef _add_script_prefix(value):\n    if value.startswith(('http://', 'https://', '/')):\n        return value\n    from django.urls import get_script_prefix\n    return '%s%s' % (get_script_prefix(), value)",
    "docstring": "Add SCRIPT_NAME prefix to relative paths. Useful when the app is being served at a subpath and manually prefixing subpath to STATIC_URL and MEDIA_URL in settings is inconvenient.",
    "type": "method",
    "file_path": "django\\django\\conf\\__init__.py",
    "ast_data": "FunctionDef name:_add_script_prefix arg:value arguments arg If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_feature_names_out",
    "source_code": "def get_feature_names_out(self, input_features=None):\n    check_is_fitted(self, '_n_features_out')\n    return _generate_get_feature_names_out(self, self._n_features_out, input_features=input_features)",
    "docstring": "Get output feature names for transformation. The feature names out will prefixed by the lowercased class name. For example, if the transformer outputs 3 features, then the feature names out are: . Parameters ---------- input_features : array-like of str or None, default=None Only used to validate feature names with the names seen in . Returns ------- feature_names_out : ndarray of str objects Transformed feature names.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\base.py",
    "ast_data": "FunctionDef name:get_feature_names_out arg:self arg:input_features arguments arg arg Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "copy_support_files",
    "source_code": "@progress_message(__('copying TeX support files'))\ndef copy_support_files(self) -> None:\n    xindy_lang_option = XINDY_LANG_OPTIONS.get(self.config.language[:2], '-L general -C utf8 ')\n    xindy_cyrillic = self.config.language[:2] in XINDY_CYRILLIC_SCRIPTS\n    context = {'latex_engine': self.config.latex_engine, 'xindy_use': self.config.latex_use_xindy, 'xindy_lang_option': xindy_lang_option, 'xindy_cyrillic': xindy_cyrillic}\n    static_dir_name = package_dir / 'texinputs'\n    for filename in Path(static_dir_name).iterdir():\n        if not filename.name.startswith('.'):\n            copy_asset_file(static_dir_name / filename, self.outdir, context=context, force=True)\n    if os.name == 'nt':\n        static_dir_name = package_dir / 'texinputs_win'\n        copy_asset_file(static_dir_name / 'Makefile.jinja', self.outdir, context=context, force=True)",
    "docstring": "Copy TeX support files from texinputs.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\latex\\__init__.py",
    "ast_data": "FunctionDef name:copy_support_files arg:self arguments arg Assign Call Assign Compare Assign Assign For Call Call If Call Call If Compare Assign Call Call Call"
  },
  {
    "library": "kornia",
    "name": "pyrup",
    "source_code": "def pyrup(input: Tensor, border_type: str='reflect', align_corners: bool=False) -> Tensor:\n    KORNIA_CHECK_SHAPE(input, ['B', 'C', 'H', 'W'])\n    kernel: Tensor = _get_pyramid_gaussian_kernel()\n    _, _, height, width = input.shape\n    x_up: Tensor = F.interpolate(input, size=(height * 2, width * 2), mode='bilinear', align_corners=align_corners)\n    x_blur: Tensor = filter2d(x_up, kernel, border_type)\n    return x_blur",
    "docstring": "Upsample a tensor and then blurs it. .. image:: _static/img/pyrup.png Args: input: the tensor to be downsampled. border_type: the padding mode to be applied before convolving. The expected modes are: ``. align_corners: interpolation flag. Return: the downsampled tensor. Examples: >>> input = torch.arange(4, dtype=torch.float32).reshape(1, 1, 2, 2) >>> pyrup(input, align_corners=True) tensor([[[[0.7500, 0.8750, 1.1250, 1.2500], [1.0000, 1.1250, 1.3750, 1.5000], [1.5000, 1.6250, 1.8750, 2.0000], [1.7500, 1.8750, 2.1250, 2.2500]]]])",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\transform\\pyramid.py",
    "ast_data": "FunctionDef name:pyrup arg:input arg:border_type arg:align_corners arguments arg arg arg Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "matrix4x4_to_Rt",
    "source_code": "def matrix4x4_to_Rt(extrinsics: Tensor) -> tuple[Tensor, Tensor]:\n    KORNIA_CHECK_SHAPE(extrinsics, ['B', '4', '4'])\n    R, t = (extrinsics[:, :3, :3], extrinsics[:, :3, 3:])\n    return (R, t)",
    "docstring": "Convert 4x4 extrinsics into 3x3 rotation matrix R and 1x3 translation vector ts. Args: extrinsics: pose matrix :math:. Returns: R: Rotation matrix, :math: t: Translation matrix :math:. Example: >>> ext = torch.eye(4)[None] >>> matrix4x4_to_Rt(ext) (tensor([[[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]]), tensor([[[0.], [0.], [0.]]]))",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\conversions.py",
    "ast_data": "FunctionDef name:matrix4x4_to_Rt arg:extrinsics arguments arg Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_group_size",
    "source_code": "def _get_group_size(group) -> int:\n    if group is GroupMember.WORLD or group is None:\n        default_pg = _get_default_group()\n        return default_pg.size()\n    return group.size()",
    "docstring": "Get a given group's world size.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:_get_group_size arg:group arguments arg If BoolOp Compare Compare Assign Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "adjust_hue",
    "source_code": "def adjust_hue(image: Tensor, factor: Union[float, Tensor]) -> Tensor:\n    x_hsv: Tensor = rgb_to_hsv(image)\n    x_adjusted: Tensor = adjust_hue_raw(x_hsv, factor)\n    out: Tensor = hsv_to_rgb(x_adjusted)\n    return out",
    "docstring": "Adjust hue of an image. .. image:: _static/img/adjust_hue.png The image is expected to be an RGB image in the range of [0, 1]. Args: image: Image to be adjusted in the shape of :math:. factor: How much to shift the hue channel. Should be in [-PI, PI]. PI and -PI give complete reversal of hue channel in HSV space in positive and negative direction respectively. 0 means no shift. Therefore, both -PI and PI will give an image with complementary colors while 0 gives the original image. Return: Adjusted image in the shape of :math:. .. note:: See a working example __. Example: >>> x = torch.ones(1, 3, 2, 2) >>> adjust_hue(x, 3.141516).shape torch.Size([1, 3, 2, 2]) >>> x = torch.ones(2, 3, 3, 3) >>> y = torch.ones(2) * 3.141516 >>> adjust_hue(x, y).shape torch.Size([2, 3, 3, 3])",
    "type": "function",
    "file_path": "kornia\\kornia\\enhance\\adjust.py",
    "ast_data": "FunctionDef name:adjust_hue arg:image arg:factor arguments arg arg Call Call Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "__copy__",
    "source_code": "def __copy__(self):\n    newmap = self.__class__()\n    for k, v in self.items():\n        newmap[k] = v[:]\n    return newmap",
    "docstring": "Duplicate object per the copy protocol.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cprequest.py",
    "ast_data": "FunctionDef name:__copy__ arg:self arguments arg Assign Call For Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "take_nd",
    "source_code": "@final\ndef take_nd(self, indexer: npt.NDArray[np.intp], axis: AxisInt, new_mgr_locs: BlockPlacement | None=None, fill_value=lib.no_default) -> Block:\n    values = self.values\n    if fill_value is lib.no_default:\n        fill_value = self.fill_value\n        allow_fill = False\n    else:\n        allow_fill = True\n    new_values = algos.take_nd(values, indexer, axis=axis, allow_fill=allow_fill, fill_value=fill_value)\n    if isinstance(self, ExtensionBlock):\n        assert not (self.ndim == 1 and new_mgr_locs is None)\n    assert not (axis == 0 and new_mgr_locs is None)\n    if new_mgr_locs is None:\n        new_mgr_locs = self._mgr_locs\n    if new_values.dtype != self.dtype:\n        return self.make_block(new_values, new_mgr_locs)\n    else:\n        return self.make_block_same_class(new_values, new_mgr_locs)",
    "docstring": "Take values according to indexer and return them as a block.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\blocks.py",
    "ast_data": "FunctionDef name:take_nd arg:self arg:indexer arg:axis arg:new_mgr_locs arg:fill_value arguments arg arg arg arg arg Assign If Compare Assign Assign Assign Assign Call If Call BoolOp Compare Compare BoolOp Compare Compare If Compare Assign If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "score_fusion_memory",
    "source_code": "def score_fusion_memory(self, node1: BaseSchedulerNode, node2: BaseSchedulerNode) -> int:\n    node1_dep_len = len(node1.read_writes.reads) + len(node1.read_writes.writes)\n    node2_dep_len = len(node1.read_writes.reads) + len(node2.read_writes.writes)\n    if min(node1_dep_len, node2_dep_len) * 4 < max(node1_dep_len, node2_dep_len):\n        if node1_dep_len > node2_dep_len:\n            tmp = node1\n            node1 = node2\n            node2 = tmp\n        deps = [dep for dep in node1.read_writes.reads | node1.read_writes.writes if dep in node2.read_writes.reads or dep in node2.read_writes.writes]\n        return sum((self.dep_size_hint(dep) for dep in deps))\n    common_memory_deps = (node1.read_writes.reads | node1.read_writes.writes) & (node2.read_writes.reads | node2.read_writes.writes)\n    return sum((self.dep_size_hint(dep) for dep in common_memory_deps))",
    "docstring": "The first term in our fusion score that estimates number of saved memory operations.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:score_fusion_memory arg:self arg:node1 arg:node2 arguments arg arg arg Assign Call Call Assign Call Call If Compare Call Call If Compare Assign Assign Assign Assign BoolOp Compare Compare Return return:yes Call Call Assign Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "save",
    "source_code": "def save(self) -> None:\n    with get_handle(self.filepath_or_buffer, self.mode, encoding=self.encoding, errors=self.errors, compression=self.compression, storage_options=self.storage_options) as handles:\n        self.writer = csvlib.writer(handles.handle, lineterminator=self.lineterminator, delimiter=self.sep, quoting=self.quoting, doublequote=self.doublequote, escapechar=self.escapechar, quotechar=self.quotechar)\n        self._save()",
    "docstring": "Create the writer & save.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\csvs.py",
    "ast_data": "FunctionDef name:save arg:self arguments arg With Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_lookup_tensor_name",
    "source_code": "def _lookup_tensor_name(self, tensor):\n    return self._tensor_aliases.get(tensor.name, tensor.name)",
    "docstring": "Look up the name of a graph tensor. This method maps the name of a debugger-generated Identity or DebugIdentityV2 tensor to the name of the original instrumented tensor, if is such a debugger-created tensor. Otherwise, it returns the name of as is. Args: tensor: The graph tensor to look up the name for. Returns: Name of the original instrumented tensor as known to the debugger.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\dumping_callback.py",
    "ast_data": "FunctionDef name:_lookup_tensor_name arg:self arg:tensor arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "pad_to_cardinality",
    "source_code": "@tf_export('data.experimental.pad_to_cardinality')\ndef pad_to_cardinality(cardinality, mask_key='valid'):\n\n    def make_filler_dataset(ds):\n        padding = cardinality - ds.cardinality()\n        filler_element = nest.map_structure(lambda spec: array_ops.zeros(spec.shape, spec.dtype), ds.element_spec)\n        filler_element[mask_key] = False\n        filler_dataset = dataset_ops.Dataset.from_tensors(filler_element)\n        filler_dataset = filler_dataset.repeat(padding)\n        return filler_dataset\n\n    def apply_valid_mask(x):\n        x[mask_key] = True\n        return x\n\n    def _apply_fn(dataset):\n        if context.executing_eagerly():\n            if dataset.cardinality() < 0:\n                raise ValueError(f'The dataset passed into `pad_to_cardinality` must have a known cardinalty, but has cardinality {dataset.cardinality()}')\n            if dataset.cardinality() > cardinality:\n                raise ValueError(f'The dataset passed into `pad_to_cardinality` must have a cardinalty less than the target cardinality ({cardinality}), but has cardinality {dataset.cardinality()}')\n        if not isinstance(dataset.element_spec, Mapping):\n            raise ValueError('`pad_to_cardinality` requires its input dataset to be a dictionary.')\n        filler = make_filler_dataset(dataset)\n        dataset = dataset.map(apply_valid_mask)\n        dataset = dataset.concatenate(filler)\n        return dataset\n    return _apply_fn",
    "docstring": "Pads a dataset with fake elements to reach the desired cardinality. The dataset to pad must have a known and finite cardinality and contain dictionary elements. The will be added to differentiate between real and padding elements -- real elements will have a entry while padding elements will have a entry. Example usage: ds = tf.data.Dataset.from_tensor_slices({'a': [1, 2]}) ds = ds.apply(tf.data.experimental.pad_to_cardinality(3)) list(ds.as_numpy_iterator()) [{'a': 1, 'valid': True}, {'a': 2, 'valid': True}, {'a': 0, 'valid': False}] This can be useful, e.g. during eval, when partial batches are undesirable but it is also important not to drop any data. Args: cardinality: The cardinality to pad the dataset to. mask_key: The key to use for identifying real vs padding elements. Returns: A dataset transformation that can be applied via .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\pad_to_cardinality.py",
    "ast_data": "FunctionDef name:pad_to_cardinality arg:cardinality arg:mask_key arguments arg arg FunctionDef name:make_filler_dataset arg:ds arguments arg Assign Call Assign Call arguments arg Call Assign Assign Call Assign Call Return return:yes FunctionDef name:apply_valid_mask arg:x arguments arg Assign Return return:yes FunctionDef name:_apply_fn arg:dataset arguments arg If Call If Compare Call Raise Call Call If Compare Call Raise Call Call If Call Raise Call Assign Call Assign Call Assign Call Return return:yes Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "new_subplotspec",
    "source_code": "def new_subplotspec(self, loc, rowspan=1, colspan=1):\n    loc1, loc2 = loc\n    subplotspec = self[loc1:loc1 + rowspan, loc2:loc2 + colspan]\n    return subplotspec",
    "docstring": "Create and return a instance. Parameters ---------- loc : (int, int) The position of the subplot in the grid as ``. rowspan, colspan : int, default: 1 The number of rows and columns the subplot should span in the grid.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\gridspec.py",
    "ast_data": "FunctionDef name:new_subplotspec arg:self arg:loc arg:rowspan arg:colspan arguments arg arg arg arg Assign Assign Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "basic_auth",
    "source_code": "def basic_auth(realm, checkpassword, debug=False, accept_charset='utf-8'):\n    fallback_charset = 'ISO-8859-1'\n    if '\"' in realm:\n        raise ValueError('Realm cannot contain the \" (quote) character.')\n    request = cherrypy.serving.request\n    auth_header = request.headers.get('authorization')\n    if auth_header is not None:\n        msg = 'Bad Request'\n        with cherrypy.HTTPError.handle((ValueError, binascii.Error), 400, msg):\n            scheme, params = auth_header.split(' ', 1)\n            if scheme.lower() == 'basic':\n                charsets = (accept_charset, fallback_charset)\n                decoded_params = base64.b64decode(params.encode('ascii'))\n                decoded_params = _try_decode(decoded_params, charsets)\n                decoded_params = ntou(decoded_params)\n                decoded_params = unicodedata.normalize('NFC', decoded_params)\n                decoded_params = tonative(decoded_params)\n                username, password = decoded_params.split(':', 1)\n                if checkpassword(realm, username, password):\n                    if debug:\n                        cherrypy.log('Auth succeeded', 'TOOLS.AUTH_BASIC')\n                    request.login = username\n                    return\n    charset = accept_charset.upper()\n    charset_declaration = ', charset=\"%s\"' % charset if charset != fallback_charset else ''\n    cherrypy.serving.response.headers['www-authenticate'] = 'Basic realm=\"%s\"%s' % (realm, charset_declaration)\n    raise cherrypy.HTTPError(401, 'You are not authorized to access that resource')",
    "docstring": "Perform basic auth. A CherryPy tool which hooks at before_handler to perform HTTP Basic Access Authentication, as specified in :rfc: and :rfc:. If the request has an 'authorization' header with a 'Basic' scheme, this tool attempts to authenticate the credentials supplied in that header. If the request has no 'authorization' header, or if it does but the scheme is not 'Basic', or if authentication fails, the tool sends a 401 response with a 'WWW-Authenticate' Basic header. realm A string containing the authentication realm. checkpassword A callable which checks the authentication credentials. Its signature is checkpassword(realm, username, password). where username and password are the values obtained from the request's 'authorization' header. If authentication succeeds, checkpassword returns True, else it returns False.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\auth_basic.py",
    "ast_data": "FunctionDef name:basic_auth arg:realm arg:checkpassword arg:debug arg:accept_charset arguments arg arg arg arg Assign If Compare Raise Call Assign Assign Call If Compare Assign With Call Assign Call If Compare Call Assign Assign Call Call Assign Call Assign Call Assign Call Assign Call Assign Call If Call If Call Assign Return return:no Assign Call Assign Compare Assign Raise Call"
  },
  {
    "library": "numpy",
    "name": "_recursive_set_fill_value",
    "source_code": "def _recursive_set_fill_value(fillvalue, dt):\n    fillvalue = np.resize(fillvalue, len(dt.names))\n    output_value = []\n    for fval, name in zip(fillvalue, dt.names):\n        cdtype = dt[name]\n        if cdtype.subdtype:\n            cdtype = cdtype.subdtype[0]\n        if cdtype.names is not None:\n            output_value.append(tuple(_recursive_set_fill_value(fval, cdtype)))\n        else:\n            output_value.append(np.array(fval, dtype=cdtype).item())\n    return tuple(output_value)",
    "docstring": "Create a fill value for a structured dtype. Parameters ---------- fillvalue : scalar or array_like Scalar or array representing the fill value. If it is of shorter length than the number of fields in dt, it will be resized. dt : dtype The structured dtype for which to create the fill value. Returns ------- val : tuple A tuple of values corresponding to the structured fill value.",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:_recursive_set_fill_value arg:fillvalue arg:dt arguments arg arg Assign Call Call Assign For Call Assign If Assign If Compare Call Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "split",
    "source_code": "def split(self, sep=None, maxsplit=None):\n    return split(self, sep, maxsplit)",
    "docstring": "For each element in , return a list of the words in the string, using as the delimiter string. See Also -------- char.split",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:split arg:self arg:sep arg:maxsplit arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "descendants",
    "source_code": "def descendants(self):\n    return list(self._descendants_with_paths().keys())",
    "docstring": "Returns a list of trackables by node_id attached to obj.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint_view.py",
    "ast_data": "FunctionDef name:descendants arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "numpy",
    "name": "_mask_propagate",
    "source_code": "def _mask_propagate(a, axis):\n    a = array(a, subok=False)\n    m = getmask(a)\n    if m is nomask or not m.any() or axis is None:\n        return a\n    a._mask = a._mask.copy()\n    axes = normalize_axis_tuple(axis, a.ndim)\n    for ax in axes:\n        a._mask |= m.any(axis=ax, keepdims=True)\n    return a",
    "docstring": "Mask whole 1-d vectors of an array that contain masked values.",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:_mask_propagate arg:a arg:axis arguments arg arg Assign Call Assign Call If BoolOp Compare Call Compare Return return:yes Assign Call Assign Call For Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "MultiIndexUInt8Engine",
    "source_code": "class MultiIndexUInt8Engine(libindex.BaseMultiIndexCodesEngine, libindex.UInt8Engine):\n    _base = libindex.UInt8Engine\n    _codes_dtype = 'uint8'",
    "docstring": "Manages a MultiIndex by mapping label combinations to positive integers. The number of possible label combinations must not overflow the 8 bits integers.",
    "type": "class",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "ClassDef name:MultiIndexUInt8Engine Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "set_offset",
    "source_code": "def set_offset(self, xy):\n    self._offset = xy\n    self.offset_transform.clear()\n    self.offset_transform.translate(xy[0], xy[1])\n    self.stale = True",
    "docstring": "Set the offset of the container. Parameters ---------- xy : (float, float) The (x, y) coordinates of the offset in display units.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py",
    "ast_data": "FunctionDef name:set_offset arg:self arg:xy arguments arg arg Assign Call Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "_proj_transform_vectors",
    "source_code": "def _proj_transform_vectors(vecs, M):\n    vecs_shape = vecs.shape\n    vecs = vecs.reshape(-1, 3).T\n    vecs_pad = np.empty((vecs.shape[0] + 1,) + vecs.shape[1:])\n    vecs_pad[:-1] = vecs\n    vecs_pad[-1] = 1\n    product = np.dot(M, vecs_pad)\n    tvecs = product[:3] / product[3]\n    return tvecs.T.reshape(vecs_shape)",
    "docstring": "Vectorized version of ``. Parameters ---------- vecs : ... x 3 np.ndarray Input vectors M : 4 x 4 np.ndarray Projection matrix",
    "type": "function",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\proj3d.py",
    "ast_data": "FunctionDef name:_proj_transform_vectors arg:vecs arg:M arguments arg arg Assign Assign Call Assign Call Assign Assign Assign Call Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "send_mass_mail",
    "source_code": "def send_mass_mail(datatuple, fail_silently=False, auth_user=None, auth_password=None, connection=None):\n    connection = connection or get_connection(username=auth_user, password=auth_password, fail_silently=fail_silently)\n    messages = [EmailMessage(subject, message, sender, recipient, connection=connection) for subject, message, sender, recipient in datatuple]\n    return connection.send_messages(messages)",
    "docstring": "Given a datatuple of (subject, message, from_email, recipient_list), send each message to each recipient list. Return the number of emails sent. If from_email is None, use the DEFAULT_FROM_EMAIL setting. If auth_user and auth_password are set, use them to log in. If auth_user is None, use the EMAIL_HOST_USER setting. If auth_password is None, use the EMAIL_HOST_PASSWORD setting. Note: The API for this method is frozen. New code wanting to extend the functionality should use the EmailMessage class directly.",
    "type": "function",
    "file_path": "django\\django\\core\\mail\\__init__.py",
    "ast_data": "FunctionDef name:send_mass_mail arg:datatuple arg:fail_silently arg:auth_user arg:auth_password arg:connection arguments arg arg arg arg arg Assign BoolOp Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "transform_to",
    "source_code": "@abc.abstractmethod\ndef transform_to(self, output: IO[bytes]) -> IO[bytes]:\n    pass",
    "docstring": "Takes a writeable output stream, and generates a new stream which implements the output transform. Input data written to the returned stream will be transformed and written to the argument stream.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\_extension.py",
    "ast_data": "FunctionDef name:transform_to arg:self arg:output arguments arg arg"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "@_docstring.interpd\ndef __init__(self, xy, width, height, *, angle=0.0, rotation_point='xy', **kwargs):\n    super().__init__(**kwargs)\n    self._x0 = xy[0]\n    self._y0 = xy[1]\n    self._width = width\n    self._height = height\n    self.angle = float(angle)\n    self.rotation_point = rotation_point\n    self._aspect_ratio_correction = 1.0\n    self._convert_units()",
    "docstring": "Parameters ---------- xy : (float, float) The anchor point. width : float Rectangle width. height : float Rectangle height. angle : float, default: 0 Rotation in degrees anti-clockwise about the rotation point. rotation_point : {'xy', 'center', (number, number)}, default: 'xy' If `~matplotlib.patches.Patch` properties %(Patch:kwdoc)s",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:xy arg:width arg:height arguments arg arg arg arg arg arg arg Call Call Assign Assign Assign Assign Assign Call Assign Assign Call"
  },
  {
    "library": "pytorch",
    "name": "activate_script",
    "source_code": "@property\ndef activate_script(self) -> Path:\n    if WINDOWS:\n        return self.prefix / 'Scripts' / 'Activate.ps1'\n    return self.prefix / 'bin' / 'activate'",
    "docstring": "Get the activation script for the virtual environment.",
    "type": "method",
    "file_path": "pytorch\\tools\\nightly.py",
    "ast_data": "FunctionDef name:activate_script arg:self arguments arg If Return return:yes Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "adjust_legend_subtitles",
    "source_code": "def adjust_legend_subtitles(legend):\n    font_size = plt.rcParams.get('legend.title_fontsize', None)\n    hpackers = legend.findobj(mpl.offsetbox.VPacker)[0].get_children()\n    for hpack in hpackers:\n        draw_area, text_area = hpack.get_children()\n        handles = draw_area.get_children()\n        if not all((artist.get_visible() for artist in handles)):\n            draw_area.set_width(0)\n            for text in text_area.get_children():\n                if font_size is not None:\n                    text.set_size(font_size)",
    "docstring": "Make invisible-handle \"subtitles\" entries look more like titles. Note: This function is not part of the public API and may be changed or removed.",
    "type": "function",
    "file_path": "seaborn\\seaborn\\utils.py",
    "ast_data": "FunctionDef name:adjust_legend_subtitles arg:legend arguments arg Assign Call Assign Call Call For Assign Call Assign Call If Call Call Call For Call If Compare Call"
  },
  {
    "library": "pytorch",
    "name": "_register_post_backward_hook",
    "source_code": "def _register_post_backward_hook(state: _FSDPState, handle: Optional[FlatParamHandle]) -> None:\n    if not torch.is_grad_enabled():\n        return\n    if not handle:\n        return\n    flat_param = handle.flat_param\n    if torch.distributed._functional_collectives.is_torchdynamo_compiling():\n        already_registered = hasattr(flat_param, '_post_backward_hook_handle')\n        if already_registered or not flat_param.requires_grad:\n            return\n        hook = functools.partial(_post_backward_hook, state, handle)\n        hook_handle = flat_param.register_post_accumulate_grad_hook(hook)\n        flat_param._post_backward_hook_handle = hook_handle\n    else:\n        already_registered = hasattr(flat_param, '_post_backward_hook_state')\n        if already_registered or not flat_param.requires_grad:\n            return\n        temp_flat_param = flat_param.expand_as(flat_param)\n        _p_assert(temp_flat_param.grad_fn is not None, 'The `grad_fn` is needed to access the `AccumulateGrad` and register the post-backward hook')\n        acc_grad = temp_flat_param.grad_fn.next_functions[0][0]\n        assert acc_grad is not None\n        hook_handle = acc_grad.register_hook(functools.partial(_post_backward_hook, state, handle))\n        flat_param._post_backward_hook_state = (acc_grad, hook_handle)",
    "docstring": "Registers post-backward hooks on the `` objects are the same.) If we instead prefer the *last* forward, then the hook runs early.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_runtime_utils.py",
    "ast_data": "FunctionDef name:_register_post_backward_hook arg:state arg:handle arguments arg arg If Call Return return:no If Return return:no Assign If Call Assign Call If BoolOp Return return:no Assign Call Assign Call Assign Assign Call If BoolOp Return return:no Assign Call Call Compare Assign Compare Assign Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_check_batch_shape_possibly_add_asserts",
    "source_code": "def _check_batch_shape_possibly_add_asserts(self):\n    if self._batch_shape_arg is None:\n        return\n    if self._assert_proper_shapes:\n        self._batch_shape_arg = control_flow_ops.with_dependencies([check_ops.assert_rank(self._batch_shape_arg, 1, message='Argument batch_shape must be a 1-D Tensor.'), check_ops.assert_non_negative(self._batch_shape_arg, message='Argument batch_shape must be non-negative.')], self._batch_shape_arg)\n    if not self._batch_shape_arg.dtype.is_integer:\n        raise TypeError('Argument batch_shape must be integer type.  Found: %s' % self._batch_shape_arg)\n    if self._batch_shape_static is None:\n        return\n    if self._batch_shape_static.ndim != 1:\n        raise ValueError('Argument batch_shape must be a 1-D Tensor.  Found: %s' % self._batch_shape_static)\n    if np.any(self._batch_shape_static < 0):\n        raise ValueError('Argument batch_shape must be non-negative.  Found:%s' % self._batch_shape_static)",
    "docstring": "Static check of init arg , possibly add asserts.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_identity.py",
    "ast_data": "FunctionDef name:_check_batch_shape_possibly_add_asserts arg:self arguments arg If Compare Return return:no If Assign Call Call Call If Raise Call If Compare Return return:no If Compare Raise Call If Call Compare Raise Call"
  },
  {
    "library": "pandas",
    "name": "_parse_thead_tr",
    "source_code": "def _parse_thead_tr(self, table):\n    raise AbstractMethodError(self)",
    "docstring": "Return the list of thead row elements from the parsed table element. Parameters ---------- table : a table element that contains zero or more thead elements. Returns ------- list of node-like These are the row elements of a table.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\html.py",
    "ast_data": "FunctionDef name:_parse_thead_tr arg:self arg:table arguments arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "get_device_name",
    "source_code": "@tf_export('__internal__.eager_context.get_device_name', v1=[])\ndef get_device_name():\n    return context().device_name",
    "docstring": "Get the device name for the current thread. Returns: The device name for the current thread.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:get_device_name arguments Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "looper",
    "source_code": "class looper:\n\n    def __init__(self, seq):\n        self.seq = seq\n\n    def __iter__(self):\n        return looper_iter(self.seq)\n\n    def __repr__(self):\n        return '<%s for %r>' % (self.__class__.__name__, self.seq)",
    "docstring": "Helper for looping (particularly in templates) Use this like:: for loop, item in looper(seq): if loop.first: ...",
    "type": "class",
    "file_path": "scipy\\scipy\\_build_utils\\tempita\\_looper.py",
    "ast_data": "ClassDef name:looper FunctionDef name:__init__ arg:self arg:seq arguments arg arg Assign FunctionDef name:__iter__ arg:self arguments arg Return return:yes Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "_get_session_key",
    "source_code": "def _get_session_key(self):\n    return signing.dumps(self._session, compress=True, salt='django.contrib.sessions.backends.signed_cookies', serializer=self.serializer)",
    "docstring": "Instead of generating a random string, generate a secure url-safe base64-encoded string of data as our session key.",
    "type": "method",
    "file_path": "django\\django\\contrib\\sessions\\backends\\signed_cookies.py",
    "ast_data": "FunctionDef name:_get_session_key arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_get_window_indexer",
    "source_code": "def _get_window_indexer(self) -> BaseIndexer:\n    if isinstance(self.window, BaseIndexer):\n        return self.window\n    if self._win_freq_i8 is not None:\n        return VariableWindowIndexer(index_array=self._index_array, window_size=self._win_freq_i8, center=self.center)\n    return FixedWindowIndexer(window_size=self.window)",
    "docstring": "Return an indexer class that will compute the window start and end bounds",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\window\\rolling.py",
    "ast_data": "FunctionDef name:_get_window_indexer arg:self arguments arg If Call Return return:yes If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_make_initializable_iterator",
    "source_code": "def _make_initializable_iterator(self, shared_name=None):\n    if context.executing_eagerly():\n        raise ValueError('Cannot create initializable iterator in Eager mode. Please use `iter()` instead.')\n    return self._get_iterator()",
    "docstring": "Get an initializable iterator for DistributedDatasetV1.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\input_lib.py",
    "ast_data": "FunctionDef name:_make_initializable_iterator arg:self arg:shared_name arguments arg arg If Call Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, text='', font_attr=None):\n    self.text = text\n    if font_attr:\n        self.font_attr_segs = [(0, len(text), font_attr)]\n    else:\n        self.font_attr_segs = []",
    "docstring": "Construct a RichLine with no rich attributes or a single attribute. Args: text: Raw text string font_attr: If specified, a single font attribute to be applied to the entire text. Extending this object via concatenation allows creation of text with varying attributes.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:text arg:font_attr arguments arg arg arg Assign If Assign Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "trace_model_call",
    "source_code": "def trace_model_call(model, input_signature=None):\n    if input_signature is None:\n        if isinstance(model.call, def_function.Function):\n            input_signature = model.call.input_signature\n    if input_signature is None:\n        input_signature = model_input_signature(model)\n    if input_signature is None:\n        raise_model_input_error(model)\n\n    @def_function.function(input_signature=input_signature)\n    def _wrapped_model(*args):\n        inputs = args[0] if len(input_signature) == 1 else list(args)\n        with base_layer_utils.call_context().enter(model, inputs=inputs, build_graph=False, training=False, saving=True):\n            outputs = model(inputs, training=False)\n        output_names = model.output_names\n        if output_names is None:\n            from tensorflow.python.keras.engine import compile_utils\n            output_names = compile_utils.create_pseudo_output_names(outputs)\n        outputs = nest.flatten(outputs)\n        return {name: output for name, output in zip(output_names, outputs)}\n    return _wrapped_model",
    "docstring": "Trace the model call to create a tf.function for exporting a Keras model. Args: model: A Keras model. input_signature: optional, a list of tf.TensorSpec objects specifying the inputs to the model. Returns: A tf.function wrapping the model's call function with input signatures set. Raises: ValueError: if input signature cannot be inferred from the model.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saving_utils.py",
    "ast_data": "FunctionDef name:trace_model_call arg:model arg:input_signature arguments arg arg If Compare If Call Assign If Compare Assign Call If Compare Call FunctionDef name:_wrapped_model arguments arg Assign Compare Call Call With Call Call Assign Call Assign If Compare Assign Call Assign Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_BroadcastMul",
    "source_code": "def _BroadcastMul(vec, mat):\n    vec = array_ops.expand_dims(vec, -1)\n    return vec * mat",
    "docstring": "Multiply after broadcasting vec to match dimensions of mat. Args: vec: A 1-D tensor of dimension [D0] mat: A 2-D tensor of dimension [D0, D1] Returns: A tensor of dimension [D0, D1], the result of vec * mat",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_grad.py",
    "ast_data": "FunctionDef name:_BroadcastMul arg:vec arg:mat arguments arg arg Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "subtract",
    "source_code": "def subtract(inputs, **kwargs):\n    return Subtract(**kwargs)(inputs)",
    "docstring": "Functional interface to the layer. Args: inputs: A list of input tensors (exactly 2). **kwargs: Standard layer keyword arguments. Returns: A tensor, the difference of the inputs. Examples:",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\merge.py",
    "ast_data": "FunctionDef name:subtract arg:inputs arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pygame",
    "name": "has_internal",
    "source_code": "def has_internal(self, sprite):\n    return sprite in self.spritedict",
    "docstring": "For checking if a sprite is in this group internally. :param sprite: The sprite we are checking.",
    "type": "method",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:has_internal arg:self arg:sprite arguments arg arg Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "from_dlpack",
    "source_code": "@tf_export('experimental.dlpack.from_dlpack', v1=[])\ndef from_dlpack(dlcapsule):\n    context.context().ensure_initialized()\n    return pywrap_tfe.TFE_FromDlpackCapsule(dlcapsule, context.context()._handle)",
    "docstring": "Returns the Tensorflow eager tensor. The returned tensor uses the memory shared by dlpack capsules from other framework. Args: dlcapsule: A PyCapsule named as dltensor Returns: A Tensorflow eager tensor",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\dlpack\\dlpack.py",
    "ast_data": "FunctionDef name:from_dlpack arg:dlcapsule arguments arg Call Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "DynamicScalar",
    "source_code": "class DynamicScalar(ExternKernel):\n\n    def get_reads(self) -> OrderedSet[Dep]:\n        return OrderedSet()\n\n    def should_allocate(self) -> bool:\n        return False\n\n    def __init__(self, sym, keypath, data) -> None:\n        data.realize()\n        super().__init__(None, NoneLayout(device=torch.device('cpu')), self.unwrap_storage([data]))\n        self.sym = sym\n        self.keypath = keypath\n\n    def get_unbacked_symbol_defs(self) -> OrderedSet[sympy.Symbol]:\n        return OrderedSet([self.sym])\n\n    def codegen(self, wrapper) -> None:\n        wrapper.codegen_dynamic_scalar(self)",
    "docstring": "The result of a call to aten._local_scalar_dense.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\ir.py",
    "ast_data": "ClassDef name:DynamicScalar FunctionDef name:get_reads arg:self arguments arg Return return:yes Call FunctionDef name:should_allocate arg:self arguments arg Return return:yes FunctionDef name:__init__ arg:self arg:sym arg:keypath arg:data arguments arg arg arg arg Call Call Call Call Call Call Assign Assign FunctionDef name:get_unbacked_symbol_defs arg:self arguments arg Return return:yes Call FunctionDef name:codegen arg:self arg:wrapper arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "serialize_many_sparse",
    "source_code": "@tf_export(v1=['io.serialize_many_sparse', 'serialize_many_sparse'])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints('serialize_many_sparse')\ndef serialize_many_sparse(sp_input, name=None, out_type=dtypes.string):\n    return serialize_many_sparse_v2(sp_input, out_type, name)",
    "docstring": "Serialize -minibatch into an . The must have rank greater than 1, and the first dimension is treated as the minibatch dimension. Elements of the must be sorted in increasing order of this first dimension. The serialized objects going into each row of the output will have rank . The minibatch size is extracted from . Args: sp_input: The input rank . name: A name prefix for the returned tensors (optional). out_type: The to use for serialization. Returns: A matrix (2-D ) with rows and columns. Each column represents serialized 's indices, values, and shape (respectively). Raises: TypeError: If is not a .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_ops.py",
    "ast_data": "FunctionDef name:serialize_many_sparse arg:sp_input arg:name arg:out_type arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "validate_html_static_path",
    "source_code": "def validate_html_static_path(app: Sphinx, config: Config) -> None:\n    html_static_path = []\n    for entry in config.html_static_path:\n        static_path = (app.confdir / entry).resolve()\n        if static_path.exists():\n            if app.outdir.drive == static_path.drive and static_path.is_relative_to(app.outdir):\n                logger.warning(__('html_static_path entry %r is placed inside outdir'), entry)\n            else:\n                html_static_path.append(entry)\n        else:\n            logger.warning(__('html_static_path entry %r does not exist'), entry)\n    config.html_static_path = html_static_path",
    "docstring": "Check html_static_paths setting.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\builders\\html\\__init__.py",
    "ast_data": "FunctionDef name:validate_html_static_path arg:app arg:config arguments arg arg Assign For Assign Call If Call If BoolOp Compare Call Call Call Call Call Call Assign"
  },
  {
    "library": "numpy",
    "name": "allRoutines",
    "source_code": "def allRoutines(self):\n    return list(self.names_to_routines.values())",
    "docstring": "Return all the routines.",
    "type": "method",
    "file_path": "numpy\\numpy\\linalg\\lapack_lite\\make_lite.py",
    "ast_data": "FunctionDef name:allRoutines arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "__call__",
    "source_code": "def __call__(self, x, pos=None):\n    return self.func(x, pos)",
    "docstring": "Return the value of the user defined function. *x* and *pos* are passed through as-is.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:x arg:pos arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "mark_step",
    "source_code": "def mark_step(device: str='', wait=False):\n    torch._C._lazy._mark_step(device, [], wait=wait)\n    run_step_closures()",
    "docstring": "Triggers a mark step, which amounts to - collecting a group of 'live' lazy tensors to index into the compilation cache (lowering/compiling their IR graphs if not cached) - kicking off execution of the compiled function - (optionally, wait=True) waiting for cpu-side execution to complete (does not sync the accelerator)",
    "type": "function",
    "file_path": "pytorch\\torch\\_lazy\\__init__.py",
    "ast_data": "FunctionDef name:mark_step arg:device arg:wait arguments arg arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_port",
    "source_code": "def get_port(self) -> int:\n    return self._port",
    "docstring": "Return the port the server is running on.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\etcd_server.py",
    "ast_data": "FunctionDef name:get_port arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_SyncOnReadSaveable",
    "source_code": "class _SyncOnReadSaveable(saveable_object.SaveableObject):\n\n    def __init__(self, sync_on_read_variable, name):\n        self._sync_on_read_variable = sync_on_read_variable\n        tensor, spec = values_util.get_on_read_saveable(sync_on_read_variable, sync_on_read_variable._primary, name)\n        super(_SyncOnReadSaveable, self).__init__(tensor, spec, name)\n\n    def restore(self, restored_tensors, restored_shapes):\n        tensor, = restored_tensors\n        return values_util.get_on_read_restore_ops(self._sync_on_read_variable, tensor, self._sync_on_read_variable.aggregation)",
    "docstring": "Class for defining how to restore a SyncOnReadVariable.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py",
    "ast_data": "ClassDef name:_SyncOnReadSaveable FunctionDef name:__init__ arg:self arg:sync_on_read_variable arg:name arguments arg arg arg Assign Assign Call Call Call FunctionDef name:restore arg:self arg:restored_tensors arg:restored_shapes arguments arg arg arg Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "load_state",
    "source_code": "def load_state(model: nn.Module, weights: Sequence[Tensor], weight_names: Sequence[str], buffers: Sequence[Tensor]=(), buffer_names: Sequence[str]=()) -> nn.Module:\n    assert len(weight_names) == len(weights)\n    load_weights(model, weight_names, weights)\n    if len(buffers) > 0:\n        assert len(buffer_names) == len(buffers)\n        load_buffers(model, buffer_names, buffers)\n    return model",
    "docstring": "load_state(model, weights, weight_names, buffers=(), buffer_names=()) -> model load_state takes and and assigns them to the model. This is the inverse operation of .",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\make_functional.py",
    "ast_data": "FunctionDef name:load_state arg:model arg:weights arg:weight_names arg:buffers arg:buffer_names arguments arg arg arg arg arg Compare Call Call Call If Compare Call Compare Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_minimize_trust_ncg",
    "source_code": "def _minimize_trust_ncg(fun, x0, args=(), jac=None, hess=None, hessp=None, **trust_region_options):\n    if jac is None:\n        raise ValueError('Jacobian is required for Newton-CG trust-region minimization')\n    if hess is None and hessp is None:\n        raise ValueError('Either the Hessian or the Hessian-vector product is required for Newton-CG trust-region minimization')\n    return _minimize_trust_region(fun, x0, args=args, jac=jac, hess=hess, hessp=hessp, subproblem=CGSteihaugSubproblem, **trust_region_options)",
    "docstring": "Minimization of scalar function of one or more variables using the Newton conjugate gradient trust-region algorithm. Options ------- initial_trust_radius : float Initial trust-region radius. max_trust_radius : float Maximum value of the trust-region radius. No steps that are longer than this value will be proposed. eta : float Trust region related acceptance stringency for proposed steps. gtol : float Gradient norm must be less than before successful termination.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_trustregion_ncg.py",
    "ast_data": "FunctionDef name:_minimize_trust_ncg arg:fun arg:x0 arg:args arg:jac arg:hess arg:hessp arguments arg arg arg arg arg arg arg If Compare Raise Call If BoolOp Compare Compare Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_TakeDataset",
    "source_code": "class _TakeDataset(dataset_ops.UnaryUnchangedStructureDataset):\n\n    def __init__(self, input_dataset, count, name=None):\n        self._input_dataset = input_dataset\n        self._count = ops.convert_to_tensor(count, dtype=dtypes.int64, name='count')\n        self._name = name\n        variant_tensor = gen_dataset_ops.take_dataset(input_dataset._variant_tensor, count=self._count, **self._common_args)\n        super().__init__(input_dataset, variant_tensor)",
    "docstring": "A containing the first elements from its input.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\take_op.py",
    "ast_data": "ClassDef name:_TakeDataset FunctionDef name:__init__ arg:self arg:input_dataset arg:count arg:name arguments arg arg arg arg Assign Assign Call Assign Assign Call Call Call"
  },
  {
    "library": "numpy",
    "name": "diag_indices_from",
    "source_code": "@array_function_dispatch(_diag_indices_from)\ndef diag_indices_from(arr):\n    if not arr.ndim >= 2:\n        raise ValueError('input array must be at least 2-d')\n    if not np.all(diff(arr.shape) == 0):\n        raise ValueError('All dimensions of input must be of equal length')\n    return diag_indices(arr.shape[0], arr.ndim)",
    "docstring": "Return the indices to access the main diagonal of an n-dimensional array. See for full details. Parameters ---------- arr : array, at least 2-D See Also -------- diag_indices Examples -------- >>> import numpy as np Create a 4 by 4 array. >>> a = np.arange(16).reshape(4, 4) >>> a array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11], [12, 13, 14, 15]]) Get the indices of the diagonal elements. >>> di = np.diag_indices_from(a) >>> di (array([0, 1, 2, 3]), array([0, 1, 2, 3])) >>> a[di] array([ 0, 5, 10, 15]) This is simply syntactic sugar for diag_indices. >>> np.diag_indices(a.shape[0]) (array([0, 1, 2, 3]), array([0, 1, 2, 3]))",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_index_tricks_impl.py",
    "ast_data": "FunctionDef name:diag_indices_from arg:arr arguments arg If Compare Raise Call If Call Compare Call Raise Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, grpc_debug_server_addresses, thread_name_filter=None, send_traceback_and_source_code=True):\n\n    def _gated_grpc_watch_fn(fetches, feeds):\n        del fetches, feeds\n        return framework.WatchOptions(debug_ops=['DebugIdentity(gated_grpc=true)'])\n    super(TensorBoardDebugHook, self).__init__(grpc_debug_server_addresses, watch_fn=_gated_grpc_watch_fn, thread_name_filter=thread_name_filter)\n    self._grpc_debug_server_addresses = grpc_debug_server_addresses\n    self._send_traceback_and_source_code = send_traceback_and_source_code\n    self._sent_graph_version = -1\n    grpc_wrapper.register_signal_handler()",
    "docstring": "Constructor of TensorBoardDebugHook. Args: grpc_debug_server_addresses: gRPC address(es) of debug server(s), as a or a of s. E.g., \"localhost:2333\", \"grpc://localhost:2333\", [\"192.168.0.7:2333\", \"192.168.0.8:2333\"]. thread_name_filter: Optional filter for thread names. send_traceback_and_source_code: Whether traceback of graph elements and the source code are to be sent to the debug server(s).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\hooks.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:grpc_debug_server_addresses arg:thread_name_filter arg:send_traceback_and_source_code arguments arg arg arg arg FunctionDef name:_gated_grpc_watch_fn arg:fetches arg:feeds arguments arg arg Return return:yes Call Call Call Assign Assign Assign Call"
  },
  {
    "library": "pandas",
    "name": "replace_regex",
    "source_code": "def replace_regex(values: ArrayLike, rx: re.Pattern, value, mask: npt.NDArray[np.bool_] | None) -> None:\n    if isna(value) or not isinstance(value, str):\n\n        def re_replacer(s):\n            if is_re(rx) and isinstance(s, str):\n                return value if rx.search(s) is not None else s\n            else:\n                return s\n    else:\n\n        def re_replacer(s):\n            if is_re(rx) and isinstance(s, str):\n                return rx.sub(value, s)\n            else:\n                return s\n    f = np.vectorize(re_replacer, otypes=[np.object_])\n    if mask is None:\n        values[:] = f(values)\n    else:\n        if values.ndim != mask.ndim:\n            mask = np.broadcast_to(mask, values.shape)\n        values[mask] = f(values[mask])",
    "docstring": "Parameters ---------- values : ArrayLike Object dtype. rx : re.Pattern value : Any mask : np.ndarray[bool], optional Notes ----- Alters values in-place.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\array_algos\\replace.py",
    "ast_data": "FunctionDef name:replace_regex arg:values arg:rx arg:value arg:mask arguments arg arg arg arg If BoolOp Call Call FunctionDef name:re_replacer arg:s arguments arg If BoolOp Call Call Return return:yes Compare Call Return return:yes FunctionDef name:re_replacer arg:s arguments arg If BoolOp Call Call Return return:yes Call Return return:yes Assign Call If Compare Assign Call If Compare Assign Call Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "get_minorticklines",
    "source_code": "def get_minorticklines(self):\n    lines = []\n    ticks = self.get_minor_ticks()\n    for tick in ticks:\n        lines.append(tick.tick1line)\n        lines.append(tick.tick2line)\n    return cbook.silent_list('Line2D ticklines', lines)",
    "docstring": "Return this Axis' minor tick lines as a list of \\s.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:get_minorticklines arg:self arguments arg Assign Assign Call For Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_loaded_messages",
    "source_code": "@property\ndef _loaded_messages(self):\n    if not hasattr(self, '_loaded_data'):\n        messages, all_retrieved = self._get()\n        self._loaded_data = messages or []\n    return self._loaded_data",
    "docstring": "Return a list of loaded messages, retrieving them first if they have not been loaded yet.",
    "type": "method",
    "file_path": "django\\django\\contrib\\messages\\storage\\base.py",
    "ast_data": "FunctionDef name:_loaded_messages arg:self arguments arg If Call Assign Call Assign BoolOp Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_reduce_join_reduction_dims",
    "source_code": "def _reduce_join_reduction_dims(x, axis):\n    if axis is not None:\n        return axis\n    else:\n        if x.get_shape().ndims is not None:\n            return constant_op.constant(np.arange(x.get_shape().ndims - 1, -1, -1), dtype=dtypes.int32)\n        return math_ops.range(array_ops.rank(x) - 1, -1, -1)",
    "docstring": "Returns range(rank(x) - 1, 0, -1) if axis is None; or axis otherwise.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\string_ops.py",
    "ast_data": "FunctionDef name:_reduce_join_reduction_dims arg:x arg:axis arguments arg arg If Compare Return return:yes If Compare Call Return return:yes Call Call Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_validate_estimators",
    "source_code": "def _validate_estimators(self):\n    if len(self.estimators) == 0:\n        raise ValueError(\"Invalid 'estimators' attribute, 'estimators' should be a non-empty list of (string, estimator) tuples.\")\n    names, estimators = zip(*self.estimators)\n    self._validate_names(names)\n    has_estimator = any((est != 'drop' for est in estimators))\n    if not has_estimator:\n        raise ValueError('All estimators are dropped. At least one is required to be an estimator.')\n    return (names, estimators)",
    "docstring": "Overload the method of to be more lenient towards the type of . Regressors can be accepted for some cases such as ordinal regression.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_stacking.py",
    "ast_data": "FunctionDef name:_validate_estimators arg:self arguments arg If Compare Call Raise Call Assign Call Call Assign Call Compare If Raise Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "kstatvar",
    "source_code": "@_axis_nan_policy_factory(lambda x: x, result_to_tuple=lambda x, _: (x,), n_outputs=1, default_axis=None)\ndef kstatvar(data, n=2, *, axis=None):\n    xp = array_namespace(data)\n    data = xp.asarray(data)\n    if axis is None:\n        data = xp.reshape(data, (-1,))\n        axis = 0\n    N = _length_nonmasked(data, axis, xp=xp)\n    if n == 1:\n        return kstat(data, n=2, axis=axis, _no_deco=True) * 1.0 / N\n    elif n == 2:\n        k2 = kstat(data, n=2, axis=axis, _no_deco=True)\n        k4 = kstat(data, n=4, axis=axis, _no_deco=True)\n        return (2 * N * k2 ** 2 + (N - 1) * k4) / (N * (N + 1))\n    else:\n        raise ValueError('Only n=1 or n=2 supported.')",
    "docstring": "Return an unbiased estimator of the variance of the k-statistic. See and [1]_ for more details about the k-statistic. Parameters ---------- data : array_like Input array. n : int, {1, 2}, optional Default is equal to 2. axis : int or None, default: None If an int, the axis of the input along which to compute the statistic. The statistic of each axis-slice (e.g. row) of the input will appear in a corresponding element of the output. If `n` th k-statistic variance. See Also -------- kstat : Returns the n-th k-statistic. moment : Returns the n-th central moment about the mean for a sample. Notes ----- Unbiased estimators of the variances of the first two k-statistics are given by .. math:: \\mathrm{var}(k_1) &= \\frac{k_2}{n}, \\\\ \\mathrm{var}(k_2) &= \\frac{2k_2^2n + (n-1)k_4}{n(n - 1)}. References ---------- .. [1]",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_morestats.py",
    "ast_data": "FunctionDef name:kstatvar arg:data arg:n arguments arg arg arg Assign Call Assign Call If Compare Assign Call Assign Assign Call If Compare Return return:yes Call If Compare Assign Call Assign Call Return return:yes Raise Call Call arguments arg arguments arg arg"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, units_mapping):\n    self._units = units_mapping",
    "docstring": "Parameters ---------- units_mapping : dict Mapping of category names (str) to indices (int).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\category.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:units_mapping arguments arg arg Assign"
  },
  {
    "library": "pandas",
    "name": "make_invalid_op",
    "source_code": "def make_invalid_op(name: str) -> Callable[..., NoReturn]:\n\n    def invalid_op(self: object, other: object=None) -> NoReturn:\n        typ = type(self).__name__\n        raise TypeError(f'cannot perform {name} with this index type: {typ}')\n    invalid_op.__name__ = name\n    return invalid_op",
    "docstring": "Return a binary method that always raises a TypeError. Parameters ---------- name : str Returns ------- invalid_op : function",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\ops\\invalid.py",
    "ast_data": "FunctionDef name:make_invalid_op arg:name arguments arg FunctionDef name:invalid_op arg:self arg:other arguments arg arg Assign Call Raise Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "auto_cast_partition_dtype",
    "source_code": "def auto_cast_partition_dtype():\n    return False",
    "docstring": "Whether incompatible row-partitioning dtypes should be auto-converted. If true, then operations that combine RaggedTensors but have different row-partitioning tensor dtypes will be automatically cast to a compatible dtype (). If false, then such operations will result in an error. Returns:",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_config.py",
    "ast_data": "FunctionDef name:auto_cast_partition_dtype arguments Return return:yes"
  },
  {
    "library": "numpy",
    "name": "ids",
    "source_code": "def ids(self):\n    if self._mask is nomask:\n        return (self.ctypes.data, id(nomask))\n    return (self.ctypes.data, self._mask.ctypes.data)",
    "docstring": "Return the addresses of the data and mask areas. Parameters ---------- None Examples -------- >>> import numpy as np >>> x = np.ma.array([1, 2, 3], mask=[0, 1, 1]) >>> x.ids() (166670640, 166659832) # may vary If the array has no mask, the address of is returned. This address is typically not close to the data in memory: >>> x = np.ma.array([1, 2, 3]) >>> x.ids() (166691080, 3083169284) # may vary",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:ids arg:self arguments arg If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_stage_index_of_submod",
    "source_code": "def get_stage_index_of_submod(self, submod_name: str):\n    if submod_name not in self.submod_to_stage_index:\n        raise AssertionError(f'Stage id of {submod_name} not found')\n    return self.submod_to_stage_index[submod_name]",
    "docstring": "Given a submodule name, return the stage index of the submodule.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py",
    "ast_data": "FunctionDef name:get_stage_index_of_submod arg:self arg:submod_name arguments arg arg If Compare Raise Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_compute_interactions",
    "source_code": "def _compute_interactions(self, node):\n    allowed_features = set()\n    interaction_cst_indices = []\n    for i in node.interaction_cst_indices:\n        if node.split_info.feature_idx in self.interaction_cst[i]:\n            interaction_cst_indices.append(i)\n            allowed_features.update(self.interaction_cst[i])\n    return (np.fromiter(allowed_features, dtype=np.uint32, count=len(allowed_features)), interaction_cst_indices)",
    "docstring": "Compute features allowed by interactions to be inherited by child nodes. Example: Assume constraints [{0, 1}, {1, 2}]. 1 <- Both constraint groups could be applied from now on / \\ 1 2 <- Left split still fulfills both constraint groups. / \\ / \\ Right split at feature 2 has only group {1, 2} from now on. LightGBM uses the same logic for overlapping groups. See for details. Parameters: ---------- node : TreeNode A node that might have children. Based on its feature_idx, the interaction constraints for possible child nodes are computed. Returns ------- allowed_features : ndarray, dtype=uint32 Indices of features allowed to split for children. interaction_cst_indices : list of ints Indices of the interaction sets that have to be applied on splits of child nodes. The fewer sets the stronger the constraint as fewer sets contain fewer features.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\grower.py",
    "ast_data": "FunctionDef name:_compute_interactions arg:self arg:node arguments arg arg Assign Call Assign For If Compare Call Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "delta_t",
    "source_code": "@property\ndef delta_t(self) -> float:\n    return self.T * self.hop",
    "docstring": "Time increment of STFT. The time increment = * represents the sample increment converted to time based on the sampling interval . See Also -------- delta_f: Width of the frequency bins of the STFT. hop: Hop size in signal samples for sliding window. t: Times of STFT for an input signal with samples. T: Sampling interval of input signal and window . ShortTimeFFT: Class this property belongs to",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_short_time_fft.py",
    "ast_data": "FunctionDef name:delta_t arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "rekey_logger_info_on_node_name_of_model",
    "source_code": "def rekey_logger_info_on_node_name_of_model(results: NSResultsType, model_name: str) -> NSResultsType:\n    new_results = {}\n    for old_layer_name, result_type_to_results in results.items():\n        new_layer_name = None\n        for model_name_to_results in result_type_to_results.values():\n            for cur_model_name, list_of_results in model_name_to_results.items():\n                if cur_model_name == model_name:\n                    assert len(list_of_results)\n                    new_layer_name = list_of_results[0]['ref_node_name']\n                else:\n                    continue\n        if new_layer_name is not None:\n            new_results[new_layer_name] = result_type_to_results\n        else:\n            new_results[old_layer_name] = result_type_to_results\n    return new_results",
    "docstring": "Rekeys the layer name of a results dictionary to use node names from . For example, transforms {'base_op_1_0': {'node_output': {'model_a': [{'ref_node_name': 'linear1', ...}]}}} into {'linear1': {'node_output': {'model_a': [{'ref_node_name': 'linear1', ...}]}}} Note: we cannot use these node names directly because they are not guaranteed to be consistent across models. This is why we extract the results first and rekey afterwards.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\ns\\fx\\utils.py",
    "ast_data": "FunctionDef name:rekey_logger_info_on_node_name_of_model arg:results arg:model_name arguments arg arg Assign For Call Assign For Call For Call If Compare Call Assign If Compare Assign Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "get_n_splits",
    "source_code": "def get_n_splits(self, X=None, y=None, groups=None):\n    if groups is None:\n        raise ValueError(\"The 'groups' parameter should not be None.\")\n    groups = check_array(groups, input_name='groups', ensure_2d=False, dtype=None)\n    return int(comb(len(np.unique(groups)), self.n_groups, exact=True))",
    "docstring": "Returns the number of splitting iterations in the cross-validator. Parameters ---------- X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : array-like of shape (n_samples,) Group labels for the samples used while splitting the dataset into train/test set. This 'groups' parameter must always be specified to calculate the number of splits, though the other parameters can be omitted. Returns ------- n_splits : int Returns the number of splitting iterations in the cross-validator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py",
    "ast_data": "FunctionDef name:get_n_splits arg:self arg:X arg:y arg:groups arguments arg arg arg arg If Compare Raise Call Assign Call Return return:yes Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "graph_module_from_producer_nodes",
    "source_code": "def graph_module_from_producer_nodes(root: GraphModule, producer_nodes: list[Node]) -> GraphModule:\n    assert len(producer_nodes) > 0, 'list of producer nodes can not be empty'\n    producer_nodes.reverse()\n    graph = Graph()\n    env: dict[Any, Any] = {}\n\n    def load_arg(a):\n        return map_arg(a, lambda node: env[node])\n    for producer_node in producer_nodes:\n        env[producer_node] = graph.node_copy(producer_node, load_arg)\n    graph.output(load_arg(producer_nodes[-1]))\n    graph_module = GraphModule(root, graph)\n    return graph_module",
    "docstring": "Construct a graph module from extracted producer nodes from function Args: root: the root module for the original graph producer_nodes: a list of nodes we use to construct the graph Return: A graph module constructed from the producer nodes",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\utils.py",
    "ast_data": "FunctionDef name:graph_module_from_producer_nodes arg:root arg:producer_nodes arguments arg arg Compare Call Call Assign Call FunctionDef name:load_arg arg:a arguments arg Return return:yes Call arguments arg For Assign Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_xlim",
    "source_code": "def get_xlim(self):\n    return tuple(self.viewLim.intervalx)",
    "docstring": "Return the x-axis view limits. Returns ------- left, right : (float, float) The current x-axis limits in data coordinates. See Also -------- .Axes.set_xlim .Axes.set_xbound, .Axes.get_xbound .Axes.invert_xaxis, .Axes.xaxis_inverted Notes ----- The x-axis may be inverted, in which case the *left* value will be greater than the *right* value.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:get_xlim arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "DispatchContext",
    "source_code": "class DispatchContext(object):\n\n    def __init__(self, worker_obj):\n        self._worker = worker_obj\n        self._worker_index = worker_obj.worker_index\n\n    @property\n    def worker(self):\n        return self._worker\n\n    @property\n    def worker_index(self):\n        return self._worker_index\n\n    def maybe_get_remote_value(self, ret):\n        return maybe_get_remote_value(ret)",
    "docstring": "Context entered when executing a closure on a given worker.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\coordinator_context.py",
    "ast_data": "ClassDef name:DispatchContext FunctionDef name:__init__ arg:self arg:worker_obj arguments arg arg Assign Assign FunctionDef name:worker arg:self arguments arg Return return:yes FunctionDef name:worker_index arg:self arguments arg Return return:yes FunctionDef name:maybe_get_remote_value arg:self arg:ret arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "maybe_saved_model_directory",
    "source_code": "@tf_export(v1=['saved_model.contains_saved_model', 'saved_model.maybe_saved_model_directory', 'saved_model.loader.maybe_saved_model_directory'])\n@deprecation.deprecated_endpoints('saved_model.loader.maybe_saved_model_directory')\ndef maybe_saved_model_directory(export_dir):\n    txt_path = file_io.join(export_dir, constants.SAVED_MODEL_FILENAME_PBTXT)\n    pb_path = file_io.join(export_dir, constants.SAVED_MODEL_FILENAME_PB)\n    cpb_path = file_io.join(export_dir, constants.SAVED_MODEL_FILENAME_CPB)\n    return file_io.file_exists(txt_path) or file_io.file_exists(pb_path) or file_io.file_exists(cpb_path)",
    "docstring": "Checks whether the provided export directory could contain a SavedModel. Note that the method does not load any data by itself. If the method returns , the export directory definitely does not contain a SavedModel. If the method returns , the export directory may contain a SavedModel but provides no guarantee that it can be loaded. Args: export_dir: Absolute string path to possible export location. For example, '/my/foo/model'. Returns: True if the export directory contains SavedModel files, False otherwise.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\loader_impl.py",
    "ast_data": "FunctionDef name:maybe_saved_model_directory arg:export_dir arguments arg Assign Call Assign Call Assign Call Return return:yes BoolOp Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "Softmin",
    "source_code": "class Softmin(Module):\n    __constants__ = ['dim']\n    dim: Optional[int]\n\n    def __init__(self, dim: Optional[int]=None) -> None:\n        super().__init__()\n        self.dim = dim\n\n    def __setstate__(self, state):\n        super().__setstate__(state)\n        if not hasattr(self, 'dim'):\n            self.dim = None\n\n    def forward(self, input: Tensor) -> Tensor:\n        return F.softmin(input, self.dim, _stacklevel=5)\n\n    def extra_repr(self):\n        return f'dim={self.dim}'",
    "docstring": "Applies the Softmin function to an n-dimensional input Tensor. Rescales them so that the elements of the n-dimensional output Tensor lie in the range and sum to 1. Softmin is defined as: .. math:: \\text{Softmin}(x_{i}) = \\frac{\\exp(-x_i)}{\\sum_j \\exp(-x_j)} Shape: - Input: :math: where means, any number of additional dimensions - Output: :math:, same shape as the input Args: dim (int): A dimension along which Softmin will be computed (so every slice along dim will sum to 1). Returns: a Tensor of the same dimension and shape as the input, with values in the range [0, 1] Examples:: >>> m = nn.Softmin(dim=1) >>> input = torch.randn(2, 3) >>> output = m(input)",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\activation.py",
    "ast_data": "ClassDef name:Softmin Assign FunctionDef name:__init__ arg:self arg:dim arguments arg arg Call Call Assign FunctionDef name:__setstate__ arg:self arg:state arguments arg arg Call Call If Call Assign FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:extra_repr arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "mm_flop",
    "source_code": "@register_flop_formula(aten.mm)\ndef mm_flop(a_shape, b_shape, *args, out_shape=None, **kwargs) -> int:\n    m, k = a_shape\n    k2, n = b_shape\n    assert k == k2\n    return m * n * 2 * k",
    "docstring": "Count flops for matmul.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\flop_counter.py",
    "ast_data": "FunctionDef name:mm_flop arg:a_shape arg:b_shape arguments arg arg arg arg arg Assign Assign Compare Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "colorize",
    "source_code": "def colorize(self, rowwise=False):\n    self._colorize = Colorize.ROWWISE if rowwise else Colorize.COLUMNWISE",
    "docstring": "Colorize formatted table. Colorize columnwise by default.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\benchmark\\utils\\compare.py",
    "ast_data": "FunctionDef name:colorize arg:self arg:rowwise arguments arg arg Assign"
  },
  {
    "library": "pytorch",
    "name": "visualize",
    "source_code": "def visualize(graph, name_prefix='', pb_graph=None, executors_it=None):\n    value_map = {}\n    pb_graph = pb_graph or graph_pb2.GraphDef()\n    if isinstance(graph, torch._C.GraphExecutorState):\n        visualize_graph_executor(graph, name_prefix, pb_graph, partial(visualize, pb_graph=pb_graph))\n        return pb_graph\n    pb_graph.node.add(op='input', name=name_prefix + 'input')\n    for i, value in enumerate(graph.param_node().outputs()):\n        value_map[value.unique()] = name_prefix + 'input:' + str(i)\n    visualize_rec(graph, value_map, name_prefix, pb_graph, executors_it)\n    return_node = pb_graph.node.add(op='output', name=name_prefix + 'output')\n    for value in graph.return_node().inputs():\n        return_node.input.append(value_map[value.unique()])\n    return pb_graph",
    "docstring": "Visualizes an independent graph, or a graph executor.",
    "type": "function",
    "file_path": "pytorch\\torch\\contrib\\_tensorboard_vis.py",
    "ast_data": "FunctionDef name:visualize arg:graph arg:name_prefix arg:pb_graph arg:executors_it arguments arg arg arg arg Assign Assign BoolOp Call If Call Call Call Return return:yes Call For Call Call Call Assign Call Call Call Assign Call For Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_packed",
    "source_code": "def is_packed(value):\n    if not isinstance(value, ExtensionType):\n        raise ValueError(f'Expected `value` to be an object of type ExtensionType,got an instance of {type(value)}.')\n    return '_tf_extension_type_packed_variant' in value.__dict__",
    "docstring": "Returns true if 's fields are packed in a single Variant.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type.py",
    "ast_data": "FunctionDef name:is_packed arg:value arguments arg If Call Raise Call Call Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "OnRunEndResponse",
    "source_code": "class OnRunEndResponse:\n\n    def __init__(self):\n        pass",
    "docstring": "Response from an on-run-end callback.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\framework.py",
    "ast_data": "ClassDef name:OnRunEndResponse FunctionDef name:__init__ arg:self arguments arg"
  },
  {
    "library": "django",
    "name": "configure",
    "source_code": "def configure(self, default_settings=global_settings, **options):\n    if self._wrapped is not empty:\n        raise RuntimeError('Settings already configured.')\n    holder = UserSettingsHolder(default_settings)\n    for name, value in options.items():\n        if not name.isupper():\n            raise TypeError('Setting %r must be uppercase.' % name)\n        setattr(holder, name, value)\n    self._wrapped = holder",
    "docstring": "Called to manually configure the settings. The 'default_settings' parameter sets where to retrieve any unspecified values from (its argument must support attribute access (__getattr__)).",
    "type": "method",
    "file_path": "django\\django\\conf\\__init__.py",
    "ast_data": "FunctionDef name:configure arg:self arg:default_settings arguments arg arg arg If Compare Raise Call Assign Call For Call If Call Raise Call Call Assign"
  },
  {
    "library": "seaborn",
    "name": "layout",
    "source_code": "def layout(self, *, size: tuple[float, float] | Default=default, engine: str | None | Default=default, extent: tuple[float, float, float, float] | Default=default) -> Plot:\n    new = self._clone()\n    if size is not default:\n        new._figure_spec['figsize'] = size\n    if engine is not default:\n        new._layout_spec['engine'] = engine\n    if extent is not default:\n        new._layout_spec['extent'] = extent\n    return new",
    "docstring": "Control the figure size and layout. .. note:: Default figure sizes and the API for specifying the figure size are subject to change in future \"experimental\" releases of the objects API. The default layout engine may also change. Parameters ---------- size : (width, height) Size of the resulting figure, in inches. Size is inclusive of legend when using pyplot, but not otherwise. engine : {{\"tight\", \"constrained\", \"none\"}} Name of method for automatically adjusting the layout to remove overlap. The default depends on whether :meth: is used. extent : (left, bottom, right, top) Boundaries of the plot layout, in fractions of the figure size. Takes effect through the layout engine; exact results will vary across engines. Note: the extent includes axis decorations when using a layout engine, but it is exclusive of them when . Examples -------- .. include:: ../docstrings/objects.Plot.layout.rst",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\plot.py",
    "ast_data": "FunctionDef name:layout arg:self arguments arg arg arg arg Assign Call If Compare Assign If Compare Assign If Compare Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "split",
    "source_code": "def split(self, labels, centers, scores):\n    self.left = _BisectingTree(indices=self.indices[labels == 0], center=centers[0], score=scores[0])\n    self.right = _BisectingTree(indices=self.indices[labels == 1], center=centers[1], score=scores[1])\n    self.indices = None",
    "docstring": "Split the cluster node into two subclusters.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_bisect_k_means.py",
    "ast_data": "FunctionDef name:split arg:self arg:labels arg:centers arg:scores arguments arg arg arg arg Assign Call Compare Assign Call Compare Assign"
  },
  {
    "library": "pytorch",
    "name": "get_kernel_category_by_source_code",
    "source_code": "def get_kernel_category_by_source_code(src_code: str) -> str:\n    choices = [ch for ch in _kernel_category_choices if f'@triton_heuristics.{ch}' in src_code]\n    if len(choices) == 1:\n        return choices[0]\n    else:\n        return 'unknown'",
    "docstring": "Similar to get_kernel_category but use the source code. Call this API if we have not compile the src_code to module yet.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\wrapper_benchmark.py",
    "ast_data": "FunctionDef name:get_kernel_category_by_source_code arg:src_code arguments arg Assign Compare If Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_hist_bin_sqrt",
    "source_code": "def _hist_bin_sqrt(x, range):\n    del range\n    return _ptp(x) / np.sqrt(x.size)",
    "docstring": "Square root histogram bin estimator. Bin width is inversely proportional to the data size. Used by many programs for its simplicity. Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. Returns ------- h : An estimate of the optimal bin width for the given data.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_histograms_impl.py",
    "ast_data": "FunctionDef name:_hist_bin_sqrt arg:x arg:range arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "_create_rays",
    "source_code": "def _create_rays(self, camera: PinholeCamera) -> Ray:\n    height, width = self._image_size\n    origin = camera.extrinsics[..., :3, -1]\n    origin = origin.repeat(height * width, 1)\n    destination = camera.unproject(self._pixels_grid, self._ones)\n    return Ray.through(origin, destination)",
    "docstring": "Create rays for a given camera. Args: camera: camera for image rendering: PinholeCamera.",
    "type": "method",
    "file_path": "kornia\\kornia\\nerf\\nerf_model.py",
    "ast_data": "FunctionDef name:_create_rays arg:self arg:camera arguments arg arg Assign Assign Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "to_esri",
    "source_code": "def to_esri(self):\n    capi.morph_to_esri(self.ptr)",
    "docstring": "Morph this SpatialReference to ESRI's format.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\srs.py",
    "ast_data": "FunctionDef name:to_esri arg:self arguments arg Call"
  },
  {
    "library": "sphinx",
    "name": "find_pending_xref_condition",
    "source_code": "def find_pending_xref_condition(node: addnodes.pending_xref, condition: str) -> Element | None:\n    for subnode in node:\n        if isinstance(subnode, addnodes.pending_xref_condition) and subnode.get('condition') == condition:\n            return subnode\n    return None",
    "docstring": "Pick matched pending_xref_condition node up from the pending_xref.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\nodes.py",
    "ast_data": "FunctionDef name:find_pending_xref_condition arg:node arg:condition arguments arg arg For If BoolOp Call Compare Call Return return:yes Return return:no"
  },
  {
    "library": "django",
    "name": "equals",
    "source_code": "def equals(self, other):\n    return capi.geos_equals(self.ptr, other.ptr)",
    "docstring": "Return true if the DE-9IM intersection matrix for the two Geometries is T*F**FFF*.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:equals arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "conditional_escape",
    "source_code": "def conditional_escape(text):\n    if isinstance(text, Promise):\n        text = str(text)\n    if hasattr(text, '__html__'):\n        return text.__html__()\n    else:\n        return escape(text)",
    "docstring": "Similar to escape(), except that it doesn't operate on pre-escaped strings. This function relies on the __html__ convention used both by Django's SafeData class and by third-party libraries like markupsafe.",
    "type": "function",
    "file_path": "django\\django\\utils\\html.py",
    "ast_data": "FunctionDef name:conditional_escape arg:text arguments arg If Call Assign Call If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "to_bytes",
    "source_code": "def to_bytes(text: str | bytes, encoding: str | None=None, errors: str='strict') -> bytes:\n    if isinstance(text, bytes):\n        return text\n    if not isinstance(text, str):\n        raise TypeError(f'to_bytes must receive a str or bytes object, got {type(text).__name__}')\n    if encoding is None:\n        encoding = 'utf-8'\n    return text.encode(encoding, errors)",
    "docstring": "Return the binary representation of `` is already a bytes object, return it as-is.",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\python.py",
    "ast_data": "FunctionDef name:to_bytes arg:text arg:encoding arg:errors arguments arg arg arg If Call Return return:yes If Call Raise Call Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_components_to_string",
    "source_code": "@staticmethod\ndef _components_to_string(job, replica, task, device_type, device_index):\n    key = (job, replica, task, device_type, device_index)\n    cached_result = _COMPONENTS_TO_STRING_CACHE.get(key)\n    if cached_result is not None:\n        return cached_result\n    output = []\n    if job is not None:\n        output.append('/job:' + job)\n    if replica is not None:\n        output.append('/replica:' + str(replica))\n    if task is not None:\n        output.append('/task:' + str(task))\n    if device_type is not None:\n        device_index_string = '*'\n        if device_index is not None:\n            device_index_string = str(device_index)\n        output.append('/device:%s:%s' % (device_type, device_index_string))\n    output = ''.join(output)\n    _COMPONENTS_TO_STRING_CACHE[key] = output\n    return output",
    "docstring": "Stateless portion of (separated to allow caching).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\device_spec.py",
    "ast_data": "FunctionDef name:_components_to_string arg:job arg:replica arg:task arg:device_type arg:device_index arguments arg arg arg arg arg Assign Assign Call If Compare Return return:yes Assign If Compare Call If Compare Call Call If Compare Call Call If Compare Assign If Compare Assign Call Call Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_validate_snapshot",
    "source_code": "def _validate_snapshot(path: str, metadata: snapshot_pb2.DistributedSnapshotMetadata, element_spec: Any, compression: str) -> None:\n    error_file = _pywrap_snapshot_utils.TF_DATA_SnapshotErrorFilePath(path)\n    if gfile.Exists(error_file):\n        with gfile.GFile(error_file, 'r') as f:\n            raise ValueError(f'Failed to load tf.data snapshot at {path}. The save job failed to write it. Status: {f.read()}')\n    snapshot_element_spec = _parse_element_spec(metadata.element_spec)\n    if element_spec and element_spec != snapshot_element_spec:\n        raise ValueError(f'Failed to load tf.data snapshot at {path}. User specified element_spec {element_spec}, but the actual element_spec is {snapshot_element_spec}.')\n    if compression and compression != metadata.compression:\n        raise ValueError(f'Failed to load tf.data snapshot at {path}. User specified compression {compression}, but the actual compression is {metadata.compression}.')",
    "docstring": "Validates a tf.data distributed snapshot. Args: path: Root path of the distributed snapshot. metadata: The DistributedSnapshotMetadata of the snapshot. element_spec: Dataset element_spec. compression: Compression method used for saving. Raises: ValueError if the snapshot is invalid.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\load_op.py",
    "ast_data": "FunctionDef name:_validate_snapshot arg:path arg:metadata arg:element_spec arg:compression arguments arg arg arg arg Assign Call If Call With Call Raise Call Call Assign Call If BoolOp Compare Raise Call If BoolOp Compare Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_var_key",
    "source_code": "def _var_key(var):\n    if hasattr(var, '_distributed_container'):\n        var = var._distributed_container()\n    if var._in_graph_mode:\n        return var._shared_name\n    return var._unique_id",
    "docstring": "Key for representing a primary variable, for looking up slots. In graph mode the name is derived from the var shared name. In eager mode the name is derived from the var unique id. If distribution strategy exists, get the primary variable first. Args: var: the variable. Returns: the unique name of the variable.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py",
    "ast_data": "FunctionDef name:_var_key arg:var arguments arg If Call Assign Call If Return return:yes Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_reverse_indexer",
    "source_code": "def _reverse_indexer(self) -> dict[Hashable, npt.NDArray[np.intp]]:\n    categories = self.categories\n    r, counts = libalgos.groupsort_indexer(ensure_platform_int(self.codes), categories.size)\n    counts = ensure_int64(counts).cumsum()\n    _result = (r[start:end] for start, end in zip(counts, counts[1:]))\n    return dict(zip(categories, _result))",
    "docstring": "Compute the inverse of a categorical, returning a dict of categories -> indexers. *This is an internal function* Returns ------- Dict[Hashable, np.ndarray[np.intp]] dict of categories -> indexers Examples -------- >>> c = pd.Categorical(list(\"aabca\")) >>> c ['a', 'a', 'b', 'c', 'a'] Categories (3, object): ['a', 'b', 'c'] >>> c.categories Index(['a', 'b', 'c'], dtype='object') >>> c.codes array([0, 0, 1, 2, 0], dtype=int8) >>> c._reverse_indexer() {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\categorical.py",
    "ast_data": "FunctionDef name:_reverse_indexer arg:self arguments arg Assign Assign Call Call Assign Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "__eq__",
    "source_code": "def __eq__(self, other):\n    return equal(self, other)",
    "docstring": "Return (self == other) element-wise. See Also -------- equal",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "min",
    "source_code": "@property\ndef min(self):\n    if self.is_quantized or self.base_dtype in (bool, string, complex64, complex128):\n        raise TypeError(f'Cannot find minimum value of {self} with {('quantized type' if self.is_quantized else 'type')} {self.base_dtype}.')\n    try:\n        return ml_dtypes.finfo(self.as_numpy_dtype).min\n    except:\n        try:\n            return ml_dtypes.iinfo(self.as_numpy_dtype).min\n        except:\n            raise TypeError(f'Cannot find minimum value of {self}.')",
    "docstring": "Returns the minimum representable value in this data type. Raises: TypeError: if this is a non-numeric, unordered, or quantized type.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\dtypes.py",
    "ast_data": "FunctionDef name:min arg:self arguments arg If BoolOp Compare Raise Call Try Return return:yes Call ExceptHandler Try Return return:yes Call ExceptHandler Raise Call"
  },
  {
    "library": "kornia",
    "name": "GrayscaleToRgb",
    "source_code": "class GrayscaleToRgb(Module):\n    ONNX_DEFAULT_INPUTSHAPE: ClassVar[list[int]] = [-1, 1, -1, -1]\n    ONNX_DEFAULT_OUTPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n\n    def forward(self, image: Tensor) -> Tensor:\n        return grayscale_to_rgb(image)",
    "docstring": "Module to convert a grayscale image to RGB version of image. The image data is assumed to be in the range of (0, 1). Shape: - image: :math: - output: :math: reference: Example: >>> input = torch.rand(2, 1, 4, 5) >>> rgb = GrayscaleToRgb() >>> output = rgb(input) # 2x3x4x5",
    "type": "class",
    "file_path": "kornia\\kornia\\color\\gray.py",
    "ast_data": "ClassDef name:GrayscaleToRgb FunctionDef name:forward arg:self arg:image arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, capacity, dtypes, shapes=None, names=None, shared_name=None, name='fifo_queue'):\n    dtypes = _as_type_list(dtypes)\n    shapes = _as_shape_list(shapes, dtypes)\n    names = _as_name_list(names, dtypes)\n    with ops.init_scope():\n        queue_ref = gen_data_flow_ops.fifo_queue_v2(component_types=dtypes, shapes=shapes, capacity=capacity, shared_name=_shared_name(shared_name), name=name)\n    super(GPUCompatibleFIFOQueue, self).__init__(dtypes, shapes, names, queue_ref)",
    "docstring": "Creates a queue that dequeues elements in a first-in first-out order. A has bounded capacity; supports multiple concurrent producers and consumers; and provides exactly-once delivery. A holds a list of up to elements. Each element is a fixed-length tuple of tensors whose dtypes are described by , and whose shapes are optionally described by the argument. If the argument is specified, each component of a queue element must have the respective fixed shape. If it is unspecified, different queue elements may have different shapes, but the use of is disallowed. Args: capacity: An integer. The upper bound on the number of elements that may be stored in this queue. dtypes: A list of objects. The length of must equal the number of tensors in each queue element. shapes: (Optional.) A list of fully-defined objects with the same length as , or . names: (Optional.) A list of strings naming the components in the queue with the same length as , or . If specified the dequeue methods return a dictionary with the names as keys. shared_name: (Optional.) If non-empty, this queue will be shared under the given name across multiple sessions. name: Optional name for the queue operation.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:capacity arg:dtypes arg:shapes arg:names arg:shared_name arg:name arguments arg arg arg arg arg arg arg Assign Call Assign Call Assign Call With Call Assign Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "circle",
    "source_code": "@classmethod\ndef circle(cls, center=(0.0, 0.0), radius=1.0, readonly=False):\n    MAGIC = 0.2652031\n    SQRTHALF = np.sqrt(0.5)\n    MAGIC45 = SQRTHALF * MAGIC\n    vertices = np.array([[0.0, -1.0], [MAGIC, -1.0], [SQRTHALF - MAGIC45, -SQRTHALF - MAGIC45], [SQRTHALF, -SQRTHALF], [SQRTHALF + MAGIC45, -SQRTHALF + MAGIC45], [1.0, -MAGIC], [1.0, 0.0], [1.0, MAGIC], [SQRTHALF + MAGIC45, SQRTHALF - MAGIC45], [SQRTHALF, SQRTHALF], [SQRTHALF - MAGIC45, SQRTHALF + MAGIC45], [MAGIC, 1.0], [0.0, 1.0], [-MAGIC, 1.0], [-SQRTHALF + MAGIC45, SQRTHALF + MAGIC45], [-SQRTHALF, SQRTHALF], [-SQRTHALF - MAGIC45, SQRTHALF - MAGIC45], [-1.0, MAGIC], [-1.0, 0.0], [-1.0, -MAGIC], [-SQRTHALF - MAGIC45, -SQRTHALF + MAGIC45], [-SQRTHALF, -SQRTHALF], [-SQRTHALF + MAGIC45, -SQRTHALF - MAGIC45], [-MAGIC, -1.0], [0.0, -1.0], [0.0, -1.0]], dtype=float)\n    codes = [cls.CURVE4] * 26\n    codes[0] = cls.MOVETO\n    codes[-1] = cls.CLOSEPOLY\n    return Path(vertices * radius + center, codes, readonly=readonly)",
    "docstring": "Return a representing a circle of a given radius and center. Parameters ---------- center : (float, float), default: (0, 0) The center of the circle. radius : float, default: 1 The radius of the circle. readonly : bool Whether the created path should have the \"readonly\" argument set when creating the Path instance. Notes ----- The circle is approximated using 8 cubic Bézier curves, as described in Lancaster, Don. _.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\path.py",
    "ast_data": "FunctionDef name:circle arg:cls arg:center arg:radius arg:readonly arguments arg arg arg arg Assign Assign Call Assign Assign Call Assign Assign Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "make_keyword_only",
    "source_code": "def make_keyword_only(since, name, func=None):\n    decorator = functools.partial(make_keyword_only, since, name)\n    if func is None:\n        return decorator\n    signature = inspect.signature(func)\n    POK = inspect.Parameter.POSITIONAL_OR_KEYWORD\n    KWO = inspect.Parameter.KEYWORD_ONLY\n    assert name in signature.parameters and signature.parameters[name].kind == POK, f'Matplotlib internal error: {name!r} must be a positional-or-keyword parameter for {func.__name__}(). If this error happens on a function with a pyplot wrapper, make sure make_keyword_only() is the outermost decorator.'\n    names = [*signature.parameters]\n    name_idx = names.index(name)\n    kwonly = [name for name in names[name_idx:] if signature.parameters[name].kind == POK]\n\n    @functools.wraps(func)\n    def wrapper(*args, **kwargs):\n        if len(args) > name_idx:\n            warn_deprecated(since, message='Passing the %(name)s %(obj_type)s positionally is deprecated since Matplotlib %(since)s; the parameter will become keyword-only in %(removal)s.', name=name, obj_type=f'parameter of {func.__name__}()')\n        return func(*args, **kwargs)\n    wrapper.__signature__ = signature.replace(parameters=[param.replace(kind=KWO) if param.name in kwonly else param for param in signature.parameters.values()])\n    DECORATORS[wrapper] = decorator\n    return wrapper",
    "docstring": "Decorator indicating that passing parameter *name* (or any of the following ones) positionally to *func* is being deprecated. When used on a method that has a pyplot wrapper, this should be the outermost decorator, so that :file: can access the original signature.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\_api\\deprecation.py",
    "ast_data": "FunctionDef name:make_keyword_only arg:since arg:name arg:func arguments arg arg arg Assign Call If Compare Return return:yes Assign Call Assign Assign BoolOp Compare Compare Assign Assign Call Assign Compare FunctionDef name:wrapper arguments arg arg If Compare Call Call Return return:yes Call Call Assign Call Compare Call Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "pchanged",
    "source_code": "def pchanged(self):\n    self._callbacks.process('pchanged')",
    "docstring": "Call all of the registered callbacks. This function is triggered internally when a property is changed. See Also -------- add_callback remove_callback",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:pchanged arg:self arguments arg Call"
  },
  {
    "library": "pygame",
    "name": "_quit_hook",
    "source_code": "def _quit_hook():\n    global _ft_init\n    _ft_init = False",
    "docstring": "Hook that gets run to quit module",
    "type": "function",
    "file_path": "pygame\\src_py\\fastevent.py",
    "ast_data": "FunctionDef name:_quit_hook arguments Assign"
  },
  {
    "library": "tensorflow",
    "name": "ResultAccuracy",
    "source_code": "class ResultAccuracy:\n    __slots__ = ('mode', 'atol', 'rtol', 'ulps')\n\n    def __init__(self):\n        self.mode = ops.ResultAccuracy_Mode.DEFAULT\n        self.atol = 0.0\n        self.rtol = 0.0\n        self.ulps = 0",
    "docstring": "Python representation of a xla.ResultAccuracy protobuf.",
    "type": "class",
    "file_path": "tensorflow\\third_party\\xla\\xla\\python\\xla_client.py",
    "ast_data": "ClassDef name:ResultAccuracy Assign FunctionDef name:__init__ arg:self arguments arg Assign Assign Assign Assign"
  },
  {
    "library": "scipy",
    "name": "Zettl",
    "source_code": "class Zettl(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-5.0] * self.N, [10.0] * self.N))\n        self.global_optimum = [[-0.02989597760285287, 0.0]]\n        self.fglob = -0.003791237220468656\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return (x[0] ** 2 + x[1] ** 2 - 2 * x[0]) ** 2 + 0.25 * x[0]",
    "docstring": "Zettl objective function. This class defines the Zettl [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Zettl}}(x) = \\frac{1}{4} x_{1} + \\left(x_{1}^{2} - 2 x_{1} + x_{2}^{2}\\right)^{2} with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_Z.py",
    "ast_data": "ClassDef name:Zettl FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_register_device_module",
    "source_code": "def _register_device_module(device_type, module):\n    device_type = torch.device(device_type).type\n    m = sys.modules[__name__]\n    if hasattr(m, device_type):\n        raise RuntimeError(f\"The runtime module of '{device_type}' has already been registered with '{getattr(m, device_type)}'\")\n    setattr(m, device_type, module)\n    torch_module_name = '.'.join([__name__, device_type])\n    sys.modules[torch_module_name] = module",
    "docstring": "Register an external runtime module of the specific :attr: supported by torch. After the :attr: is registered correctly, the user can refer the external runtime module as part of torch with attribute torch.xxx.",
    "type": "function",
    "file_path": "pytorch\\torch\\__init__.py",
    "ast_data": "FunctionDef name:_register_device_module arg:device_type arg:module arguments arg arg Assign Call Assign If Call Raise Call Call Call Assign Call Assign"
  },
  {
    "library": "sphinx",
    "name": "add_search_language",
    "source_code": "def add_search_language(self, cls: type[SearchLanguage]) -> None:\n    logger.debug('[app] adding search language: %r', cls)\n    from sphinx.search import languages\n    languages[cls.lang] = cls",
    "docstring": "Register a new language for the HTML search index. Add *cls*, which must be a subclass of :class:, as a support language for building the HTML full-text search index. The class must have a *lang* attribute that indicates the language it should be used for. See :confval:. .. versionadded:: 1.1",
    "type": "method",
    "file_path": "sphinx\\sphinx\\application.py",
    "ast_data": "FunctionDef name:add_search_language arg:self arg:cls arguments arg arg Call Assign"
  },
  {
    "library": "pytorch",
    "name": "adapt",
    "source_code": "@abc.abstractmethod\ndef adapt(self, target_spec: pytree.TreeSpec, input_spec: pytree.TreeSpec, input_args: list[Any], metadata: Optional[dict[str, Any]]=None, obj: Optional[Any]=None) -> list[Any]:\n    ...",
    "docstring": "NOTE: This adapter may mutate given ``.",
    "type": "method",
    "file_path": "pytorch\\torch\\export\\unflatten.py",
    "ast_data": "FunctionDef name:adapt arg:self arg:target_spec arg:input_spec arg:input_args arg:metadata arg:obj arguments arg arg arg arg arg arg"
  },
  {
    "library": "sphinx",
    "name": "run",
    "source_code": "def run(self, **kwargs: Any) -> None:\n    raise NotImplementedError",
    "docstring": "Main method of post transforms. Subclasses should override this method instead of ``.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\transforms\\post_transforms\\__init__.py",
    "ast_data": "FunctionDef name:run arg:self arguments arg arg Raise"
  },
  {
    "library": "pytorch",
    "name": "groupby",
    "source_code": "def groupby(func, seq):\n    d = OrderedDict()\n    for item in seq:\n        key = func(item)\n        if key not in d:\n            d[key] = []\n        d[key].append(item)\n    return d",
    "docstring": "Group a collection by a key function >>> names = [\"Alice\", \"Bob\", \"Charlie\", \"Dan\", \"Edith\", \"Frank\"] >>> groupby(len, names) # doctest: +SKIP {3: ['Bob', 'Dan'], 5: ['Alice', 'Edith', 'Frank'], 7: ['Charlie']} >>> iseven = lambda x: x % 2 == 0 >>> groupby(iseven, [1, 2, 3, 4, 5, 6, 7, 8]) # doctest: +SKIP {False: [1, 3, 5, 7], True: [2, 4, 6, 8]} See Also: ``",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\unification\\multipledispatch\\utils.py",
    "ast_data": "FunctionDef name:groupby arg:func arg:seq arguments arg arg Assign Call For Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_inputs_tensor_info_from_meta_graph_def",
    "source_code": "def _get_inputs_tensor_info_from_meta_graph_def(meta_graph_def, signature_def_key):\n    if signature_def_key not in meta_graph_def.signature_def:\n        raise ValueError(f'Could not find signature \"{signature_def_key}\". Please choose from: {', '.join(meta_graph_def.signature_def.keys())}')\n    return meta_graph_def.signature_def[signature_def_key].inputs",
    "docstring": "Gets TensorInfo for all inputs of the SignatureDef. Returns a dictionary that maps each input key to its TensorInfo for the given signature_def_key in the meta_graph_def Args: meta_graph_def: MetaGraphDef protocol buffer with the SignatureDef map to look up SignatureDef key. signature_def_key: A SignatureDef key string. Returns: A dictionary that maps input tensor keys to TensorInfos. Raises: ValueError if is not found in the MetaGraphDef.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\saved_model_cli.py",
    "ast_data": "FunctionDef name:_get_inputs_tensor_info_from_meta_graph_def arg:meta_graph_def arg:signature_def_key arguments arg arg If Compare Raise Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_run_weight_observers",
    "source_code": "def _run_weight_observers(observed: GraphModule, backend_config: BackendConfig) -> None:\n    for node in observed.graph.nodes:\n        if node.op != 'call_function':\n            continue\n        for node_arg in node.args:\n            if node_arg and node_arg_is_weight(node, node_arg):\n                weight_observer_nodes = collect_producer_nodes(node_arg)\n                if weight_observer_nodes is None:\n                    continue\n                weight_observer_module = graph_module_from_producer_nodes(observed, weight_observer_nodes)\n                weight_observer_module()",
    "docstring": "Extract the subgraph that produces the weight for dynamic quant or weight only quant node and run the subgraph to observe the weight. Note that the observers of dynamic quant or weight only quant ops are run during the convert step.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\convert.py",
    "ast_data": "FunctionDef name:_run_weight_observers arg:observed arg:backend_config arguments arg arg For If Compare For If BoolOp Call Assign Call If Compare Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_gradients",
    "source_code": "def get_gradients(self, loss, params):\n    grads = backend.gradients(loss, params)\n    if any((g is None for g in grads)):\n        raise ValueError('An operation has `None` for gradient. Please make sure that all of your ops have a gradient defined (i.e. are differentiable). Common ops without gradient: backend.argmax, backend.round, backend.eval.')\n    if hasattr(self, 'clipnorm'):\n        grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads]\n    if hasattr(self, 'clipvalue'):\n        grads = [clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue) for g in grads]\n    return grads",
    "docstring": "Returns gradients of with respect to . Args: loss: Loss tensor. params: List of variables. Returns: List of gradient tensors. Raises: ValueError: In case any gradient cannot be computed (e.g. if gradient function not implemented).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v1.py",
    "ast_data": "FunctionDef name:get_gradients arg:self arg:loss arg:params arguments arg arg arg Assign Call If Call Compare Raise Call If Call Assign Call If Call Assign Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "_InvalidSelector",
    "source_code": "class _InvalidSelector(ValueError):\n    pass",
    "docstring": "Raised when a URL cannot be obtained from a Selector",
    "type": "class",
    "file_path": "scrapy\\scrapy\\http\\response\\text.py",
    "ast_data": "ClassDef name:_InvalidSelector"
  },
  {
    "library": "pandas",
    "name": "clear",
    "source_code": "def clear(self) -> None:\n    clean_copy = Styler(self.data, uuid=self.uuid)\n    clean_attrs = [a for a in clean_copy.__dict__ if not callable(a)]\n    self_attrs = [a for a in self.__dict__ if not callable(a)]\n    for attr in clean_attrs:\n        setattr(self, attr, getattr(clean_copy, attr))\n    for attr in set(self_attrs).difference(clean_attrs):\n        delattr(self, attr)",
    "docstring": "Reset the `Table Visualization `_ for more examples.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\style.py",
    "ast_data": "FunctionDef name:clear arg:self arguments arg Assign Call Assign Call Assign Call For Call Call For Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "_split",
    "source_code": "def _split(self, text: str) -> list[str]:\n\n    def split(t: str) -> list[str]:\n        return super(TextWrapper, self)._split(t)\n    chunks: list[str] = []\n    for chunk in split(text):\n        for w, g in groupby(chunk, column_width):\n            if w == 1:\n                chunks.extend(split(''.join(g)))\n            else:\n                chunks.extend(list(g))\n    return chunks",
    "docstring": "Override original method that only split by 'wordsep_re'. This '_split' splits wide-characters into chunks by one character.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\writers\\text.py",
    "ast_data": "FunctionDef name:_split arg:self arg:text arguments arg arg FunctionDef name:split arg:t arguments arg Return return:yes Call Call For Call For Call If Compare Call Call Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "ppf",
    "source_code": "def ppf(self, q, *args, **kwds):\n    args, loc, _ = self._parse_args(*args, **kwds)\n    q, loc = map(asarray, (q, loc))\n    args = tuple(map(asarray, args))\n    _a, _b = self._get_support(*args)\n    cond0 = self._argcheck(*args) & (loc == loc)\n    cond1 = (q > 0) & (q < 1)\n    cond2 = (q == 1) & cond0\n    cond = cond0 & cond1\n    output = np.full(shape(cond), fill_value=self.badvalue, dtype='d')\n    place(output, (q == 0) * (cond == cond), _a - 1 + loc)\n    place(output, cond2, _b + loc)\n    if np.any(cond):\n        goodargs = argsreduce(cond, *(q,) + args + (loc,))\n        loc, goodargs = (goodargs[-1], goodargs[:-1])\n        place(output, cond, self._ppf(*goodargs) + loc)\n    if output.ndim == 0:\n        return output[()]\n    return output",
    "docstring": "Percent point function (inverse of ) at q of the given RV. Parameters ---------- q : array_like Lower tail probability. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). Returns ------- k : array_like Quantile corresponding to the lower tail probability, q.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:ppf arg:self arg:q arguments arg arg arg arg Assign Call Assign Call Assign Call Call Assign Call Assign Call Compare Assign Compare Compare Assign Compare Assign Assign Call Call Call Compare Compare Call If Call Assign Call Assign Call Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "gather",
    "source_code": "def gather(self, indices, name=None):\n    if self._element_shape:\n        element_shape = self._element_shape[0]\n    else:\n        element_shape = tensor_shape.unknown_shape(None)\n    value = gen_data_flow_ops.tensor_array_gather_v3(handle=self._handle, indices=indices, flow_in=self._flow, dtype=self._dtype, name=name, element_shape=element_shape)\n    if self.element_shape:\n        value.set_shape([None] + self.element_shape.dims)\n    return value",
    "docstring": "See TensorArray.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:gather arg:self arg:indices arg:name arguments arg arg arg If Assign Assign Call Assign Call If Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "flatten_metrics_in_order",
    "source_code": "def flatten_metrics_in_order(logs, metrics_names):\n    results = []\n    for name in metrics_names:\n        if name in logs:\n            results.append(logs[name])\n    for key in sorted(logs.keys()):\n        if key not in metrics_names:\n            results.append(logs[key])\n    if len(results) == 1:\n        return results[0]\n    return results",
    "docstring": "Turns the dict into a list as per key order of .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py",
    "ast_data": "FunctionDef name:flatten_metrics_in_order arg:logs arg:metrics_names arguments arg arg Assign For If Compare Call For Call Call If Compare Call If Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "all_functions",
    "source_code": "def all_functions(self) -> set[str]:\n    return set(self._registry)",
    "docstring": "Returns the set of all registered function names.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\registration.py",
    "ast_data": "FunctionDef name:all_functions arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "transform_key",
    "source_code": "def transform_key(self, key):\n    self.accessed_headers.add(key)\n    return super(MonitoredHeaderMap, self).transform_key(key)",
    "docstring": "Normalize and track an HTTP header name.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\cptools.py",
    "ast_data": "FunctionDef name:transform_key arg:self arg:key arguments arg arg Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_setdefaults",
    "source_code": "def _setdefaults(self, defaults, kw):\n    for k in defaults:\n        if kw.get(k, None) is None:\n            kw[k] = defaults[k]",
    "docstring": "Add to the dict *kw* the entries in the dict *default* that are absent or set to None in *kw*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:_setdefaults arg:self arg:defaults arg:kw arguments arg arg arg For If Compare Call Assign"
  },
  {
    "library": "pandas",
    "name": "shape",
    "source_code": "@property\ndef shape(self) -> tuple[int, ...]:\n    return tuple((len(self._get_axis(a)) for a in self._AXIS_ORDERS))",
    "docstring": "Return a tuple of axis dimensions",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:shape arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "geom_count",
    "source_code": "@property\ndef geom_count(self):\n    return capi.get_geom_count(self.ptr)",
    "docstring": "Return the number of elements in this Geometry.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:geom_count arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "Endianness",
    "source_code": "class Endianness:\n    LITTLE = '<'\n    BIG = '>'\n    NATIVE = '='\n    NA = '|'",
    "docstring": "Enum indicating the byte-order of a data-type.",
    "type": "class",
    "file_path": "pandas\\pandas\\core\\interchange\\utils.py",
    "ast_data": "ClassDef name:Endianness Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "run_step_fn",
    "source_code": "def run_step_fn(self, step_fn):\n    step_fn_arguments = function_utils.fn_args(step_fn)\n    if step_fn_arguments != ('step_context',) and step_fn_arguments != ('self', 'step_context'):\n        raise ValueError(\"`step_fn` may either have one `step_context` argument, or `self` and `step_context` arguments if it's an instance method. Got {} instead.\".format(step_fn_arguments))\n    return self._sess.run_step_fn(step_fn, self._tf_sess(), run_with_hooks=None)",
    "docstring": "Run ops using a step function. Args: step_fn: A function or a method with a single argument of type . The function may use methods of the argument to perform computations with access to a raw session. The returned value of the will be returned from , unless a stop is requested. In that case, the next call will return True. Example usage: Hooks interact with the call inside the as they do with a call. Returns: Returns the returned value of . Raises: StopIteration: if has called . It may be caught by to close the session. ValueError: if doesn't have a single argument called . It may also optionally have for cases when it belongs to an object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\monitored_session.py",
    "ast_data": "FunctionDef name:run_step_fn arg:self arg:step_fn arguments arg arg Assign Call If BoolOp Compare Compare Raise Call Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "changed",
    "source_code": "def changed(self):\n    self.callbacks.process('changed', self)\n    self.stale = True",
    "docstring": "Call this whenever the mappable is changed to notify all the callbackSM listeners to the 'changed' signal.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colorizer.py",
    "ast_data": "FunctionDef name:changed arg:self arguments arg Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "__enter__",
    "source_code": "def __enter__(self):\n    if self._device_scope is not None:\n        raise AssertionError('Re-entered a ParallelDevice scope without first exiting it.')\n    self._assert_eager()\n    self._device_scope = ops.device(self._name)\n    self._device_scope.__enter__()\n    return self",
    "docstring": "Runs ops in parallel, makes variables which save independent buffers.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\parallel_device\\parallel_device.py",
    "ast_data": "FunctionDef name:__enter__ arg:self arguments arg If Compare Raise Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_n_shadows_compare_weights",
    "source_code": "def _n_shadows_compare_weights(model: torch.nn.Module, example_inputs: Any, qconfig_mapping: QConfigMapping, backend_config: BackendConfig) -> NSResultsType:\n    qconfig_multi_mapping = QConfigMultiMapping.from_list_qconfig_mapping([qconfig_mapping])\n    mp = prepare_n_shadows_model(model, example_inputs, qconfig_multi_mapping, backend_config)\n    mp(*example_inputs)\n    mq = convert_n_shadows_model(mp)\n    weight_comparison = extract_weight_comparison(mq)\n    return weight_comparison",
    "docstring": "Note: this API is not recommended for wide usage, it is only provided for customers who need to migrate from the API.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\ns\\_numeric_suite_fx.py",
    "ast_data": "FunctionDef name:_n_shadows_compare_weights arg:model arg:example_inputs arg:qconfig_mapping arg:backend_config arguments arg arg arg arg Assign Call Assign Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "get",
    "source_code": "def get(url: str, **kwargs: Any) -> requests.Response:\n    with _Session() as session:\n        return session.get(url, **kwargs)",
    "docstring": "Sends a GET request like ``. This sets up User-Agent header and TLS verification automatically.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\requests.py",
    "ast_data": "FunctionDef name:get arg:url arguments arg arg With Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "replica_id_in_sync_group",
    "source_code": "@property\ndef replica_id_in_sync_group(self):\n    if tensor_util.is_tf_type(self._replica_id_in_sync_group):\n        return self._replica_id_in_sync_group\n    return constant_op.constant(self._replica_id_in_sync_group, dtypes.int32, name='replica_id_in_sync_group')",
    "docstring": "Returns the id of the replica. This identifies the replica among all replicas that are kept in sync. The value of the replica id can range from 0 to - 1. NOTE: This is not guaranteed to be the same ID as the XLA replica ID use for low-level operations such as collective_permute. Returns: a .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:replica_id_in_sync_group arg:self arguments arg If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "substitute_all_types",
    "source_code": "def substitute_all_types(graph, mapping):\n    flag = True\n    while flag:\n        flag = False\n        for k in mapping:\n            old_mapping_val = mapping[k]\n            if mapping[k] in mapping.keys():\n                new_key = mapping[k]\n                mapping[k] = mapping[new_key]\n            if old_mapping_val != mapping[k]:\n                flag = True\n    for n in graph.nodes:\n        n.type = substitute_solution_one_type(mapping, n.type)",
    "docstring": "Apply the most general unifier to all types in a graph till reaching a fixed point. If the input and output graph are the same, we converge.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\unify_refinements.py",
    "ast_data": "FunctionDef name:substitute_all_types arg:graph arg:mapping arguments arg arg Assign While Assign For Assign If Compare Call Assign Assign If Compare Assign For Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "add_intermediate_tensors",
    "source_code": "def add_intermediate_tensors(model_content):\n    return _calibration_wrapper.AddIntermediateTensors(model_content)",
    "docstring": "Adds intermediate tensors to fused op if needed.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\optimize\\calibrator.py",
    "ast_data": "FunctionDef name:add_intermediate_tensors arg:model_content arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "AitoffTransform",
    "source_code": "class AitoffTransform(_GeoTransform):\n\n    def transform_non_affine(self, values):\n        longitude, latitude = values.T\n        half_long = longitude / 2.0\n        cos_latitude = np.cos(latitude)\n        alpha = np.arccos(cos_latitude * np.cos(half_long))\n        sinc_alpha = np.sinc(alpha / np.pi)\n        x = cos_latitude * np.sin(half_long) / sinc_alpha\n        y = np.sin(latitude) / sinc_alpha\n        return np.column_stack([x, y])\n\n    def inverted(self):\n        return AitoffAxes.InvertedAitoffTransform(self._resolution)",
    "docstring": "The base Aitoff transform.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\geo.py",
    "ast_data": "ClassDef name:AitoffTransform FunctionDef name:transform_non_affine arg:self arg:values arguments arg arg Assign Assign Assign Call Assign Call Call Assign Call Assign Call Assign Call Return return:yes Call FunctionDef name:inverted arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_replace_scale_node",
    "source_code": "def _replace_scale_node(parent, old_value):\n    half = ast.Num(n=0.5)\n    half.lineno = 0\n    half.col_offset = 0\n    new_value = ast.BinOp(left=half, op=ast.Mult(), right=old_value)\n    pasta.ast_utils.replace_child(parent, old_value, new_value)\n    pasta.base.formatting.set(old_value, 'prefix', '(')\n    pasta.base.formatting.set(old_value, 'suffix', ')')",
    "docstring": "Replaces old_value with 0.5*(old_value).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\tf_upgrade_v2.py",
    "ast_data": "FunctionDef name:_replace_scale_node arg:parent arg:old_value arguments arg arg Assign Call Assign Assign Assign Call Call Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "write_temporary_file",
    "source_code": "def write_temporary_file(content: str) -> str:\n    import tempfile\n    with tempfile.NamedTemporaryFile('w', encoding='utf-8', suffix='.log', prefix='sphinx-err-', delete=False) as f:\n        f.write(content)\n    return f.name",
    "docstring": "Write content to a temporary file and return the filename.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\_cli\\util\\errors.py",
    "ast_data": "FunctionDef name:write_temporary_file arg:content arguments arg With Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_rfft",
    "source_code": "def _rfft(input_tensor, fft_length=None, name=None):\n    with _ops.name_scope(name, default_name, [input_tensor, fft_length]) as name:\n        input_tensor = _ops.convert_to_tensor(input_tensor, preferred_dtype=_dtypes.float32)\n        if input_tensor.dtype not in (_dtypes.float32, _dtypes.float64):\n            raise ValueError('RFFT requires tf.float32 or tf.float64 inputs, got: %s' % input_tensor)\n        real_dtype = input_tensor.dtype\n        if real_dtype == _dtypes.float32:\n            complex_dtype = _dtypes.complex64\n        else:\n            assert real_dtype == _dtypes.float64\n            complex_dtype = _dtypes.complex128\n        input_tensor.shape.with_rank_at_least(fft_rank)\n        if fft_length is None:\n            fft_length = _infer_fft_length_for_rfft(input_tensor, fft_rank)\n        else:\n            fft_length = _ops.convert_to_tensor(fft_length, _dtypes.int32)\n        input_tensor = _maybe_pad_for_rfft(input_tensor, fft_rank, fft_length)\n        fft_length_static = _tensor_util.constant_value(fft_length)\n        if fft_length_static is not None:\n            fft_length = fft_length_static\n        return fft_fn(input_tensor, fft_length, Tcomplex=complex_dtype, name=name)",
    "docstring": "Wrapper around gen_spectral_ops.rfft* that infers fft_length argument.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\signal\\fft_ops.py",
    "ast_data": "FunctionDef name:_rfft arg:input_tensor arg:fft_length arg:name arguments arg arg arg With Call Assign Call If Compare Raise Call Assign If Compare Assign Compare Assign Call If Compare Assign Call Assign Call Assign Call Assign Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "custom_call_v2",
    "source_code": "def custom_call_v2(call_target_name, operands, result_specs, backend_config=None, has_side_effect=None, name=None):\n    return gen_xla_ops.xla_custom_call_v2(operands=operands, call_target_name=call_target_name, backend_config='' if backend_config is None else backend_config, has_side_effect=False if has_side_effect is None else has_side_effect, result_dtypes=tuple((spec.dtype for spec in result_specs)), result_shapes=tuple((spec.shape for spec in result_specs)), name=name)",
    "docstring": "Emits an HLO operation with multiple outputs. See specification at and specification at Args: call_target_name: Name of the user function. The function signature must conform to version 3 of the API, see . All operands and results assumed to be in the default layout. operands: A sequence of tensors with possibly different types. result_specs: A sequence of tensor specs for all results. backend_config: A string that encodes a metadata for the backend. Empty string by default. has_side_effect: Indicates whether the custom call has side effects. by default. name: Optional name of the operation. Returns: A tuple of output tensors.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\tf2xla\\python\\xla.py",
    "ast_data": "FunctionDef name:custom_call_v2 arg:call_target_name arg:operands arg:result_specs arg:backend_config arg:has_side_effect arg:name arguments arg arg arg arg arg arg Return return:yes Call Compare Compare Call Call"
  },
  {
    "library": "kornia",
    "name": "RandomGeneratorBase",
    "source_code": "class RandomGeneratorBase(Module, metaclass=_PostInitInjectionMetaClass):\n    device: Optional[Device] = None\n    dtype: torch.dtype\n\n    def __init__(self) -> None:\n        super().__init__()\n\n    def __post_init__(self) -> None:\n        self.set_rng_device_and_dtype()\n\n    def set_rng_device_and_dtype(self, device: Optional[torch.device]=None, dtype: torch.dtype=torch.float32) -> None:\n        if device is None:\n            device = torch.device('cpu')\n        if self.device != device or self.dtype != dtype:\n            self.make_samplers(device, dtype)\n            self.device = device\n            self.dtype = dtype\n\n    def to(self, *args: Any, **kwargs: Any) -> 'RandomGeneratorBase':\n        device, dtype, _, _ = torch._C._nn._parse_to(*args, **kwargs)\n        self.set_rng_device_and_dtype(device=device, dtype=dtype)\n        return self\n\n    def make_samplers(self, device: torch.device, dtype: torch.dtype) -> None:\n        raise NotImplementedError\n\n    def forward(self, batch_shape: Tuple[int, ...], same_on_batch: bool=False) -> Dict[str, Tensor]:\n        raise NotImplementedError",
    "docstring": "Base class for generating random augmentation parameters.",
    "type": "class",
    "file_path": "kornia\\kornia\\augmentation\\random_generator\\base.py",
    "ast_data": "ClassDef name:RandomGeneratorBase FunctionDef name:__init__ arg:self arguments arg Call Call FunctionDef name:__post_init__ arg:self arguments arg Call FunctionDef name:set_rng_device_and_dtype arg:self arg:device arg:dtype arguments arg arg arg If Compare Assign Call If BoolOp Compare Compare Call Assign Assign FunctionDef name:to arg:self arguments arg arg arg Assign Call Call Return return:yes FunctionDef name:make_samplers arg:self arg:device arg:dtype arguments arg arg arg Raise FunctionDef name:forward arg:self arg:batch_shape arg:same_on_batch arguments arg arg arg Raise"
  },
  {
    "library": "pytorch",
    "name": "register_op",
    "source_code": "def register_op(self, target: TorchOp, function: Callable, is_complex: bool=False) -> None:\n    if isinstance(target, torch._ops.OpOverloadPacket):\n        raise TypeError(f\"Target '{target}' should be provided as an OpOverload instead of an OpOverloadPacket. You can get the default overload with <op>.default\")\n    self._register(target, OnnxDecompMeta(onnx_function=function, fx_target=target, signature=None, is_custom=True, is_complex=is_complex))",
    "docstring": "Registers a custom operator: torch.ops.... Args: target: The PyTorch node callable target. function: The onnx-script function to register. is_complex: Whether the function is a function that handles complex valued inputs.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_registration.py",
    "ast_data": "FunctionDef name:register_op arg:self arg:target arg:function arg:is_complex arguments arg arg arg arg If Call Raise Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "parents",
    "source_code": "@property\ndef parents(self):\n    return [self.categorical_column]",
    "docstring": "See 'FeatureColumn` base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:parents arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_read_legacy_metadata",
    "source_code": "def _read_legacy_metadata(object_graph_def, metadata):\n    node_paths = _generate_object_paths(object_graph_def)\n    for node_id, proto in enumerate(object_graph_def.nodes):\n        if proto.WhichOneof('kind') == 'user_object' and proto.user_object.identifier in constants.KERAS_OBJECT_IDENTIFIERS:\n            if not proto.user_object.metadata:\n                raise ValueError('Unable to create a Keras model from this SavedModel. This SavedModel was created with `tf.saved_model.save`, and lacks the Keras metadata.Please save your Keras model by calling `model.save`or `tf.keras.models.save_model`.')\n            metadata.nodes.add(node_id=node_id, node_path=node_paths[node_id], version=versions_pb2.VersionDef(producer=1, min_consumer=1, bad_consumers=[]), identifier=proto.user_object.identifier, metadata=proto.user_object.metadata)",
    "docstring": "Builds a KerasMetadata proto from the SavedModel ObjectGraphDef.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\load.py",
    "ast_data": "FunctionDef name:_read_legacy_metadata arg:object_graph_def arg:metadata arguments arg arg Assign Call For Call If BoolOp Compare Call Compare If Raise Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_broadcast_arrays",
    "source_code": "def _broadcast_arrays(arrays, axis=None, xp=None):\n    arrays = tuple(arrays)\n    if not arrays:\n        return arrays\n    xp = array_namespace(*arrays) if xp is None else xp\n    arrays = [xp.asarray(arr) for arr in arrays]\n    shapes = [arr.shape for arr in arrays]\n    new_shapes = _broadcast_shapes(shapes, axis)\n    if axis is None:\n        new_shapes = [new_shapes] * len(arrays)\n    return [xp.broadcast_to(array, new_shape) for array, new_shape in zip(arrays, new_shapes)]",
    "docstring": "Broadcast shapes of arrays, ignoring incompatibility of specified axes",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_axis_nan_policy.py",
    "ast_data": "FunctionDef name:_broadcast_arrays arg:arrays arg:axis arg:xp arguments arg arg arg Assign Call If Return return:yes Assign Compare Call Assign Call Assign Assign Call If Compare Assign Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "left",
    "source_code": "@cache_readonly\ndef left(self) -> Index:\n    return Index(self._data.left, copy=False)",
    "docstring": "Return left bounds of the intervals in the IntervalIndex. The left bounds of each interval in the IntervalIndex are returned as an Index. The datatype of the left bounds is the same as the datatype of the endpoints of the intervals. Returns ------- Index An Index containing the left bounds of the intervals. See Also -------- IntervalIndex.right : Return the right bounds of the intervals in the IntervalIndex. IntervalIndex.mid : Return the mid-point of the intervals in the IntervalIndex. IntervalIndex.length : Return the length of the intervals in the IntervalIndex. Examples -------- >>> iv_idx = pd.IntervalIndex.from_arrays([1, 2, 3], [4, 5, 6], closed=\"right\") >>> iv_idx.left Index([1, 2, 3], dtype='int64') >>> iv_idx = pd.IntervalIndex.from_tuples( ... [(1, 4), (2, 5), (3, 6)], closed=\"left\" ... ) >>> iv_idx.left Index([1, 2, 3], dtype='int64')",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\interval.py",
    "ast_data": "FunctionDef name:left arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "virtualenv",
    "name": "get_short_path_name",
    "source_code": "def get_short_path_name(long_name):\n    import ctypes\n    from ctypes import wintypes\n    GetShortPathNameW = ctypes.windll.kernel32.GetShortPathNameW\n    GetShortPathNameW.argtypes = [wintypes.LPCWSTR, wintypes.LPWSTR, wintypes.DWORD]\n    GetShortPathNameW.restype = wintypes.DWORD\n    output_buf_size = 0\n    while True:\n        output_buf = ctypes.create_unicode_buffer(output_buf_size)\n        needed = GetShortPathNameW(long_name, output_buf, output_buf_size)\n        if output_buf_size >= needed:\n            return output_buf.value\n        output_buf_size = needed",
    "docstring": "Gets the short path name of a given long path -",
    "type": "function",
    "file_path": "virtualenv\\src\\virtualenv\\util\\path\\_win.py",
    "ast_data": "FunctionDef name:get_short_path_name arg:long_name arguments arg Assign Assign Assign Assign While Assign Call Assign Call If Compare Return return:yes Assign"
  },
  {
    "library": "scrapy",
    "name": "process_results",
    "source_code": "def process_results(self, response: Response, results: Iterable[Any]) -> Iterable[Any]:\n    return results",
    "docstring": "This method has the same purpose as the one in XMLFeedSpider",
    "type": "method",
    "file_path": "scrapy\\scrapy\\spiders\\feed.py",
    "ast_data": "FunctionDef name:process_results arg:self arg:response arg:results arguments arg arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "GuardsCheckpointState",
    "source_code": "class GuardsCheckpointState:\n    dynamo_guards: set[Guard] = set()\n\n    def __init__(self, dynamo_guards):\n        self.dynamo_guards = dynamo_guards\n\n    def diff(self, other):\n        r = self.dynamo_guards.difference(other.dynamo_guards)\n        if len(r) == 0:\n            return None\n        return r\n\n    def __eq__(self, other):\n        return self.diff(other) is None",
    "docstring": "The GuardCheckpointState - it is the T of Checkpointable[T] for GuardsContext",
    "type": "class",
    "file_path": "pytorch\\torch\\_guards.py",
    "ast_data": "ClassDef name:GuardsCheckpointState Call FunctionDef name:__init__ arg:self arg:dynamo_guards arguments arg arg Assign FunctionDef name:diff arg:self arg:other arguments arg arg Assign Call If Compare Call Return return:no Return return:yes FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "_replace_variables_by_constants",
    "source_code": "def _replace_variables_by_constants(converter_data):\n    input_graph = _GraphDef(converter_data.graph_def)\n    for tensor_name, tensor_data in converter_data.tensor_data.items():\n        input_graph.nodes[tensor_name].convert_variable_to_constant(None, tensor_data)\n    converted_graph = input_graph.converted_self().graph_def\n    converted_input_indices = {t.index for t in converter_data.tensor_data.values() if t.index is not None}\n    return (converted_graph, converted_input_indices)",
    "docstring": "Replaces variables by constants on a given graph. Given a _ConverterData instance with converted variables in its tensor_data field, create a new graph where the respective variables are replaced with the converted constants. Args: converter_data: A pre-populated _ConverterData instance. Returns: The converted graph.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "FunctionDef name:_replace_variables_by_constants arg:converter_data arguments arg Assign Call For Call Call Assign Call Assign Call Compare Return return:yes"
  },
  {
    "library": "kornia",
    "name": "normalize_quaternion",
    "source_code": "def normalize_quaternion(quaternion: Tensor, eps: float=1e-12) -> Tensor:\n    if not isinstance(quaternion, Tensor):\n        raise TypeError(f'Input type is not a Tensor. Got {type(quaternion)}')\n    if not quaternion.shape[-1] == 4:\n        raise ValueError(f'Input must be a tensor of shape (*, 4). Got {quaternion.shape}')\n    return F.normalize(quaternion, p=2.0, dim=-1, eps=eps)",
    "docstring": "Normalize a quaternion. The quaternion should be in (x, y, z, w) or (w, x, y, z) format. Args: quaternion: a tensor containing a quaternion to be normalized. The tensor can be of shape :math:. eps: small value to avoid division by zero. Return: the normalized quaternion of shape :math:. Example: >>> quaternion = tensor((1., 0., 1., 0.)) >>> normalize_quaternion(quaternion) tensor([0.7071, 0.0000, 0.7071, 0.0000])",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\conversions.py",
    "ast_data": "FunctionDef name:normalize_quaternion arg:quaternion arg:eps arguments arg arg If Call Raise Call Call If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "create_session",
    "source_code": "def create_session(self):\n    self.tf_sess = self._session_creator.create_session()\n    self.coord = coordinator.Coordinator(clean_stop_exception_types=[])\n    if ops.get_collection(ops.GraphKeys.QUEUE_RUNNERS):\n        queue_runner.start_queue_runners(sess=self.tf_sess, coord=self.coord)\n    for hook in self._hooks:\n        hook.after_create_session(self.tf_sess, self.coord)\n    return _CoordinatedSession(_HookedSession(self.tf_sess, self._hooks), self.coord, self._stop_grace_period_secs)",
    "docstring": "Creates a coordinated session.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\monitored_session.py",
    "ast_data": "FunctionDef name:create_session arg:self arguments arg Assign Call Assign Call If Call Call For Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "DatabaseErrorWrapper",
    "source_code": "class DatabaseErrorWrapper:\n\n    def __init__(self, wrapper):\n        self.wrapper = wrapper\n\n    def __del__(self):\n        del self.wrapper\n\n    def __enter__(self):\n        pass\n\n    def __exit__(self, exc_type, exc_value, traceback):\n        if exc_type is None:\n            return\n        for dj_exc_type in (DataError, OperationalError, IntegrityError, InternalError, ProgrammingError, NotSupportedError, DatabaseError, InterfaceError, Error):\n            db_exc_type = getattr(self.wrapper.Database, dj_exc_type.__name__)\n            if issubclass(exc_type, db_exc_type):\n                dj_exc_value = dj_exc_type(*exc_value.args)\n                if dj_exc_type not in (DataError, IntegrityError):\n                    self.wrapper.errors_occurred = True\n                raise dj_exc_value.with_traceback(traceback) from exc_value\n\n    def __call__(self, func):\n\n        def inner(*args, **kwargs):\n            with self:\n                return func(*args, **kwargs)\n        return inner",
    "docstring": "Context manager and decorator that reraises backend-specific database exceptions using Django's common wrappers.",
    "type": "class",
    "file_path": "django\\django\\db\\utils.py",
    "ast_data": "ClassDef name:DatabaseErrorWrapper FunctionDef name:__init__ arg:self arg:wrapper arguments arg arg Assign FunctionDef name:__del__ arg:self arguments arg FunctionDef name:__enter__ arg:self arguments arg FunctionDef name:__exit__ arg:self arg:exc_type arg:exc_value arg:traceback arguments arg arg arg arg If Compare Return return:no For Assign Call If Call Assign Call If Compare Assign Raise Call FunctionDef name:__call__ arg:self arg:func arguments arg arg FunctionDef name:inner arguments arg arg With Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "random_crop",
    "source_code": "@tf_export('image.random_crop', v1=['image.random_crop', 'random_crop'])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints('random_crop')\ndef random_crop(value, size, seed=None, name=None):\n    with ops.name_scope(name, 'random_crop', [value, size]) as name:\n        value = ops.convert_to_tensor(value, name='value')\n        size = ops.convert_to_tensor(size, dtype=dtypes.int32, name='size')\n        shape = array_ops.shape(value)\n        check = control_flow_assert.Assert(math_ops.reduce_all(shape >= size), ['Need value.shape >= size, got ', shape, size], summarize=1000)\n        shape = control_flow_ops.with_dependencies([check], shape)\n        limit = shape - size + 1\n        offset = random_ops.random_uniform(array_ops.shape(shape), dtype=size.dtype, maxval=size.dtype.max, seed=seed) % limit\n        return array_ops.slice(value, offset, size, name=name)",
    "docstring": "Randomly crops a tensor to a given size. Slices a shape portion out of at a uniformly chosen offset. Requires . If a dimension should not be cropped, pass the full size of that dimension. For example, RGB images can be cropped with . Example usage: >>> image = [[1, 2, 3], [4, 5, 6]] >>> result = tf.image.random_crop(value=image, size=(1, 3)) >>> result.shape.as_list() [1, 3] For producing deterministic results given a value, use . Unlike using the param with ops, ops guarantee the same results given the same seed independent of how many times the function is called, and independent of global seed settings (e.g. tf.random.set_seed). Args: value: Input tensor to crop. size: 1-D tensor with size the rank of . seed: Python integer. Used to create a random seed. See for behavior. name: A name for this operation (optional). Returns: A cropped tensor of the same rank as and shape .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\random_crop_ops.py",
    "ast_data": "FunctionDef name:random_crop arg:value arg:size arg:seed arg:name arguments arg arg arg arg With Call Assign Call Assign Call Assign Call Assign Call Call Compare Assign Call Assign Assign Call Call Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "start",
    "source_code": "def start(self, tag, attrib={}, **extra):\n    self.__flush()\n    tag = _escape_cdata(tag)\n    self.__data = []\n    self.__tags.append(tag)\n    self.__write(self.__indentation[:len(self.__tags) - 1])\n    self.__write(f'<{tag}')\n    for k, v in {**attrib, **extra}.items():\n        if v:\n            k = _escape_cdata(k)\n            v = _quote_escape_attrib(v)\n            self.__write(f' {k}={v}')\n    self.__open = 1\n    return len(self.__tags) - 1",
    "docstring": "Open a new element. Attributes can be given as keyword arguments, or as a string/string dictionary. The method returns an opaque identifier that can be passed to the :meth: method, to close all open elements up to and including this one. Parameters ---------- tag Element tag. attrib Attribute dictionary. Alternatively, attributes can be given as keyword arguments. Returns ------- An element identifier.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_svg.py",
    "ast_data": "FunctionDef name:start arg:self arg:tag arg:attrib arguments arg arg arg arg Call Assign Call Assign Call Call Call Call For Call If Assign Call Assign Call Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__getitem__",
    "source_code": "def __getitem__(self, index):\n    rank = self.rank\n    if isinstance(index, slice):\n        if index.step is not None and index.step != 1:\n            raise IndexError('Cannot stride through a shape')\n        start = index.start\n        stop = index.stop\n        if start is None:\n            start = 0\n        start = _fix_start_index(start, rank, self.num_row_partitions)\n        stop = _fix_stop_index(stop, rank)\n        return self._slice_shape(start, stop)\n    elif isinstance(index, int):\n        if index < 0:\n            if rank is None:\n                raise ValueError('Rank must be known to use __getitem__ with a negative index.')\n            return self._dimension(rank + index)\n        return self._dimension(index)\n    else:\n        raise TypeError('Argument is not an int or a slice')",
    "docstring": "Returns a dimension or a slice of the shape. Ragged shapes can have ragged dimensions that depend upon other dimensions. Therefore, if you ask for a dimension that is ragged, this function returns a ValueError. For similar reasons, if a slice is selected that includes a ragged dimension without including the zero dimension, then this fails. Any slice that does not start at zero will return a shape with num_row_partitions == 0. Args: index: the index: can be an int or a slice. Raises: IndexError: if the index is not in range. ValueError: if the rank is unknown, or a ragged rank is requested incorrectly.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:__getitem__ arg:self arg:index arguments arg arg Assign If Call If BoolOp Compare Compare Raise Call Assign Assign If Compare Assign Assign Call Assign Call Return return:yes Call If Call If Compare If Compare Raise Call Return return:yes Call Return return:yes Call Raise Call"
  },
  {
    "library": "django",
    "name": "covers",
    "source_code": "def covers(self, other):\n    return capi.geos_covers(self.ptr, other.ptr)",
    "docstring": "Return True if the DE-9IM Intersection Matrix for the two geometries is T*****FF*, *T****FF*, ***T**FF*, or ****T*FF*. If either geometry is empty, return False.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:covers arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_truncate_vertically",
    "source_code": "def _truncate_vertically(self) -> None:\n    assert self.max_rows_fitted is not None\n    row_num = self.max_rows_fitted // 2\n    if row_num >= 1:\n        _len = len(self.tr_frame)\n        _slice = np.hstack([np.arange(row_num), np.arange(_len - row_num, _len)])\n        self.tr_frame = self.tr_frame.iloc[_slice]\n    else:\n        row_num = cast(int, self.max_rows)\n        self.tr_frame = self.tr_frame.iloc[:row_num, :]\n    self.tr_row_num = row_num",
    "docstring": "Remove rows, which are not to be displayed. Attributes affected: - tr_frame - tr_row_num",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\format.py",
    "ast_data": "FunctionDef name:_truncate_vertically arg:self arguments arg Compare Assign If Compare Assign Call Assign Call Call Call Assign Assign Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_compute_gpu_options",
    "source_code": "def _compute_gpu_options(self):\n    return self._compute_device_options(device_type='GPU')",
    "docstring": "Build the GPUOptions proto for GPU.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:_compute_gpu_options arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_unpack",
    "source_code": "def _unpack(self, packed_parameters):\n    for i in range(self.n_layers_ - 1):\n        start, end, shape = self._coef_indptr[i]\n        self.coefs_[i] = np.reshape(packed_parameters[start:end], shape)\n        start, end = self._intercept_indptr[i]\n        self.intercepts_[i] = packed_parameters[start:end]",
    "docstring": "Extract the coefficients and intercepts from packed_parameters.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neural_network\\_multilayer_perceptron.py",
    "ast_data": "FunctionDef name:_unpack arg:self arg:packed_parameters arguments arg arg For Call Assign Assign Call Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "contains",
    "source_code": "def contains(self, other: LiveRange):\n    return self.begin <= other.begin and other.end <= self.end",
    "docstring": "Is other entirely within self",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\memory_planning.py",
    "ast_data": "FunctionDef name:contains arg:self arg:other arguments arg arg Return return:yes BoolOp Compare Compare"
  },
  {
    "library": "pytorch",
    "name": "_init_intra_node_process_group",
    "source_code": "@no_type_check\ndef _init_intra_node_process_group(num_devices_per_node: int) -> dist.ProcessGroup:\n    intra_node_subgroup, _ = dist.new_subgroups(num_devices_per_node)\n    return intra_node_subgroup",
    "docstring": "Return a process group across the current node. For example, given each row is a distinct node: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 This API would return an intra-node subgroup across [0, 1, ..., 7] or [8, 9, ..., 15] depending on the process's rank. For example, rank 3 would get [0, 1, ..., 7].",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_init_utils.py",
    "ast_data": "FunctionDef name:_init_intra_node_process_group arg:num_devices_per_node arguments arg Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_dhtm",
    "source_code": "def _dhtm(mag, xp):\n    sig = xp.zeros(mag.shape[0])\n    midpt = mag.shape[0] // 2\n    sig[1:midpt] = 1\n    sig[midpt + 1:] = -1\n    recon = xp.real(ifft(mag * xp.exp(fft(sig * ifft(xp.log(mag))))))\n    return recon",
    "docstring": "Compute the modified 1-D discrete Hilbert transform Parameters ---------- mag : ndarray The magnitude spectrum. Should be 1-D with an even length, and preferably a fast length for FFT/IFFT.",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_fir_filter_design.py",
    "ast_data": "FunctionDef name:_dhtm arg:mag arg:xp arguments arg arg Assign Call Assign Assign Assign Assign Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "set_visible_devices",
    "source_code": "@tf_export('config.set_visible_devices', 'config.experimental.set_visible_devices')\n@deprecation.deprecated_endpoints('config.experimental.set_visible_devices')\ndef set_visible_devices(devices, device_type=None):\n    context.context().set_visible_devices(devices, device_type)",
    "docstring": "Set the list of visible devices. Specifies which objects are visible to the runtime. TensorFlow will only allocate memory and place operations on visible physical devices, as otherwise no will be created on them. By default all discovered devices are marked as visible. The following example demonstrates disabling the first GPU on the machine. >>> physical_devices = tf.config.list_physical_devices('GPU') >>> try: ... # Disable first GPU ... tf.config.set_visible_devices(physical_devices[1:], 'GPU') ... logical_devices = tf.config.list_logical_devices('GPU') ... # Logical device was not created for first GPU ... assert len(logical_devices) == len(physical_devices) - 1 ... except: ... # Invalid device or cannot modify virtual devices once initialized. ... pass Args: devices: List of s to make visible device_type: (optional) Only configure devices matching this device type. For example \"CPU\" or \"GPU\". Other devices will be left unaltered. Raises: ValueError: If argument validation fails. RuntimeError: Runtime is already initialized.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\config.py",
    "ast_data": "FunctionDef name:set_visible_devices arg:devices arg:device_type arguments arg arg Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "call_with_unspecified_conversion_status",
    "source_code": "def call_with_unspecified_conversion_status(func):\n\n    def wrapper(*args, **kwargs):\n        with ag_ctx.ControlStatusCtx(status=ag_ctx.Status.UNSPECIFIED):\n            return func(*args, **kwargs)\n    if inspect.isfunction(func) or inspect.ismethod(func):\n        wrapper = functools.update_wrapper(wrapper, func)\n    return autograph_artifact(wrapper)",
    "docstring": "Decorator that resets the conversion context to the unspecified status.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\impl\\api.py",
    "ast_data": "FunctionDef name:call_with_unspecified_conversion_status arg:func arguments arg FunctionDef name:wrapper arguments arg arg With Call Return return:yes Call If BoolOp Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "validate_claims_supported",
    "source_code": "def validate_claims_supported(self):\n    validate_array_value(self, 'claims_supported')",
    "docstring": "RECOMMENDED. JSON array containing a list of the Claim Names of the Claims that the OpenID Provider MAY be able to supply values for. Note that for privacy or other reasons, this might not be an exhaustive list.",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\discovery\\models.py",
    "ast_data": "FunctionDef name:validate_claims_supported arg:self arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "offsets_to_lengths",
    "source_code": "def offsets_to_lengths(offsets: torch.Tensor, device: Union[str, torch.device]) -> torch.tensor:\n    lengths = offsets[1:] - offsets[:-1]\n    return lengths",
    "docstring": "Converts a list of offsets to a list of lengths. Reverse op of attn_gym.masks.document_mask.length_to_offsets Args: offsets: A 1D tensor of offsets device: The device to place the output tensor on",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\transformer\\score_mod.py",
    "ast_data": "FunctionDef name:offsets_to_lengths arg:offsets arg:device arguments arg arg Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "register_tf_checkpoint_saver",
    "source_code": "def register_tf_checkpoint_saver(name=None, predicate=None, save_fn=None, restore_fn=None, strict_predicate_restore=True):\n    return register_checkpoint_saver(package='tf', name=name, predicate=predicate, save_fn=save_fn, restore_fn=restore_fn, strict_predicate_restore=strict_predicate_restore)",
    "docstring": "See the docstring for .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\registration\\__init__.py",
    "ast_data": "FunctionDef name:register_tf_checkpoint_saver arg:name arg:predicate arg:save_fn arg:restore_fn arg:strict_predicate_restore arguments arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_rmatmat",
    "source_code": "def _rmatmat(self, X):\n    if type(self)._adjoint == LinearOperator._adjoint:\n        return np.hstack([self.rmatvec(col.reshape(-1, 1)) for col in X.T])\n    else:\n        return self.H.matmat(X)",
    "docstring": "Default implementation of _rmatmat defers to rmatvec or adjoint.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_interface.py",
    "ast_data": "FunctionDef name:_rmatmat arg:self arg:X arguments arg arg If Compare Call Return return:yes Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "DistributedIteratorInterface",
    "source_code": "@tf_export('distribute.DistributedIterator', v1=[])\nclass DistributedIteratorInterface(Iterator):\n\n    def get_next(self):\n        raise NotImplementedError('DistributedIterator.get_next() must be implemented in descendants.')\n\n    @property\n    def element_spec(self):\n        raise NotImplementedError('DistributedIterator.element_spec() must be implemented in descendants')\n\n    def get_next_as_optional(self):\n        raise NotImplementedError('get_next_as_optional() not implemented in descendants')",
    "docstring": "An iterator over . is the primary mechanism for enumerating elements of a . It supports the Python Iterator protocol, which means it can be iterated over using a for-loop or by fetching individual elements explicitly via . You can create a by calling on a or creating a python loop over a . Visit the [tutorial]( on distributed input for more examples and caveats.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\types\\distribute.py",
    "ast_data": "ClassDef name:DistributedIteratorInterface FunctionDef name:get_next arg:self arguments arg Raise Call FunctionDef name:element_spec arg:self arguments arg Raise Call FunctionDef name:get_next_as_optional arg:self arguments arg Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "true_fn",
    "source_code": "def true_fn(control_inputs, body_pfor, body_output, stacked):\n    converted_control_inp = []\n    for x in control_inputs:\n        for t in x.outputs:\n            converted_control_inp.append(body_pfor._convert_helper(t).t)\n    if stacked:\n        output = body_pfor.convert(body_output)\n    else:\n        output, convert_stacked, _ = body_pfor._convert_helper(body_output)\n        assert convert_stacked == stacked, body_output\n    with ops.control_dependencies(converted_control_inp):\n        return array_ops.identity(output)",
    "docstring": "Converts the body function for all but last iteration. This essentially converts body_output. Additionally, it needs to handle any control dependencies on the NextIteration node. So it creates another Identity node with the converted dependencies.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "FunctionDef name:true_fn arg:control_inputs arg:body_pfor arg:body_output arg:stacked arguments arg arg arg arg Assign For For Call Call If Assign Call Assign Call Compare With Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "mean",
    "source_code": "def mean(x: Array, /, *, axis: int | tuple[int, ...] | None=None, keepdims: bool=False, xp: ModuleType | None=None) -> Array:\n    if xp is None:\n        xp = array_namespace(x)\n    if xp.isdtype(x.dtype, 'complex floating'):\n        x_real = xp.real(x)\n        x_imag = xp.imag(x)\n        mean_real = xp.mean(x_real, axis=axis, keepdims=keepdims)\n        mean_imag = xp.mean(x_imag, axis=axis, keepdims=keepdims)\n        return mean_real + mean_imag * xp.asarray(1j)\n    return xp.mean(x, axis=axis, keepdims=keepdims)",
    "docstring": "Complex mean,",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_extra\\_lib\\_utils\\_helpers.py",
    "ast_data": "FunctionDef name:mean arguments arg arg arg arg If Compare Assign Call If Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "path_stabilize",
    "source_code": "def path_stabilize(filepath: str | os.PathLike[str], /) -> str:\n    new_path = canon_path(filepath)\n    return unicodedata.normalize('NFC', new_path)",
    "docstring": "Normalize path separator and unicode string",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\osutil.py",
    "ast_data": "FunctionDef name:path_stabilize arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "python_graph",
    "source_code": "@property\ndef python_graph(self):\n    return self._python_graph",
    "docstring": "Get the Python graph. Returns: If the Python graph has been set, returns a object. Otherwise, returns None.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:python_graph arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, path: Union[str, os.PathLike], single_file_per_rank: bool=True, sync_files: bool=True, thread_count: int=1, per_thread_copy_ahead: int=10000000, cache_staged_state_dict: bool=False, overwrite: bool=True, _extensions: Optional[Sequence[StreamTransformExtension]]=None, serialization_format: SerializationFormat=SerializationFormat.TORCH_SAVE) -> None:\n    _FileSystemWriter.__init__(self, path=path, single_file_per_rank=single_file_per_rank, sync_files=sync_files, thread_count=thread_count, per_thread_copy_ahead=per_thread_copy_ahead, overwrite=overwrite, _extensions=_extensions, serialization_format=serialization_format)\n    BlockingAsyncStager.__init__(self, cache_staged_state_dict=cache_staged_state_dict)",
    "docstring": "Initialize the writer pointing to . Args: path: directory where the checkpoint will be written to. single_file_per_rank: Produce one file per rank instead of one file per tensor/blob. Default to True. sync_files : force files to be synced to permanent storage. Default to True. thread_count: Number of IO threads to use to write. Default to 1. per_thread_copy_ahead: How many bytes to copy from the GPU ahead of saving then. Default 10Mb. cache_staged_state_dict: Whether to cache the staged state_dict. This option decreases staging latency at the cost of increases memory usage. Additionally, if this parameter is set to True, it's the expectation that the stager is maintained and re-used for multiple dcp.async_save calls. Default to False. overwrite: Whether to allow overwriting existing checkpoints. Defaults to True. _extensions: Extensions to apply to output streams (EXPERIMENTAL) N. B. If sync_files is disabled, there's no guarantee that the checkpoint will be consistent in the case of a failure.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\filesystem.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:path arg:single_file_per_rank arg:sync_files arg:thread_count arg:per_thread_copy_ahead arg:cache_staged_state_dict arg:overwrite arg:_extensions arg:serialization_format arguments arg arg arg arg arg arg arg arg arg arg Call Call"
  },
  {
    "library": "scipy",
    "name": "_expm_multiply_interval_core_0",
    "source_code": "def _expm_multiply_interval_core_0(A, X, h, mu, q, norm_info, tol, ell, n0):\n    if norm_info.onenorm() == 0:\n        m_star, s = (0, 1)\n    else:\n        norm_info.set_scale(1.0 / q)\n        m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell)\n        norm_info.set_scale(1)\n    for k in range(q):\n        X[k + 1] = _expm_multiply_simple_core(A, X[k], h, mu, m_star, s)\n    return (X, 0)",
    "docstring": "A helper function, for the case q <= s.",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_expm_multiply.py",
    "ast_data": "FunctionDef name:_expm_multiply_interval_core_0 arg:A arg:X arg:h arg:mu arg:q arg:norm_info arg:tol arg:ell arg:n0 arguments arg arg arg arg arg arg arg arg arg If Compare Call Assign Call Assign Call Call For Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "codegen_reduction_numels",
    "source_code": "def codegen_reduction_numels(self, buffer: IndentedBuffer) -> None:\n    reduction_trees = [tree for tree in self.range_trees if tree.is_reduction]\n    rnumel = ' * '.join(sorted((f'{tree.prefix}numel' for tree in reduction_trees)))\n    buffer.splice(f'rnumel = {self.kexpr(rnumel)}')\n    rn_blocks = [TritonSymbols.block_sizes[tree.symt] for tree in self.range_trees if tree.is_reduction]\n    rblock = sympy_product(rn_blocks)\n    buffer.splice(f'RBLOCK: tl.constexpr = {self.kexpr(rblock)}')",
    "docstring": "Generates code that flattens ND reduction numels, block sizes, etc. into 1D.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py",
    "ast_data": "FunctionDef name:codegen_reduction_numels arg:self arg:buffer arguments arg arg Assign Assign Call Call Call Call Assign Assign Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "theta",
    "source_code": "@property\ndef theta(self):\n    return self.kernel.theta",
    "docstring": "Returns the (flattened, log-transformed) non-fixed hyperparameters. Note that theta are typically the log-transformed values of the kernel's hyperparameters as this representation of the search space is more amenable for hyperparameter search, as hyperparameters like length-scales naturally live on a log-scale. Returns ------- theta : ndarray of shape (n_dims,) The non-fixed, log-transformed hyperparameters of the kernel",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:theta arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, tail_width=0.3, shrink_factor=0.5):\n    self.tail_width = tail_width\n    self.shrink_factor = shrink_factor\n    super().__init__()",
    "docstring": "Parameters ---------- tail_width : float, default: 0.3 Width of the tail. shrink_factor : float, default: 0.5 Fraction of the arrow width at the middle point.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:tail_width arg:shrink_factor arguments arg arg arg Assign Assign Call Call"
  },
  {
    "library": "django",
    "name": "find",
    "source_code": "def find(self, path, find_all=False, **kwargs):\n    if kwargs:\n        find_all = self._check_deprecated_find_param(find_all=find_all, **kwargs)\n    matches = []\n    for app in self.apps:\n        app_location = self.storages[app].location\n        if app_location not in searched_locations:\n            searched_locations.append(app_location)\n        match = self.find_in_app(app, path)\n        if match:\n            if not find_all:\n                return match\n            matches.append(match)\n    return matches",
    "docstring": "Look for files in the app directories.",
    "type": "method",
    "file_path": "django\\django\\contrib\\staticfiles\\finders.py",
    "ast_data": "FunctionDef name:find arg:self arg:path arg:find_all arguments arg arg arg arg If Assign Call Assign For Assign If Compare Call Assign Call If If Return return:yes Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_build_tree",
    "source_code": "def _build_tree(self) -> bytes:\n    from lxml.etree import Element, SubElement, tostring\n    self.root = Element(f'{self.prefix_uri}{self.root_name}', nsmap=self.namespaces)\n    for d in self.frame_dicts.values():\n        elem_row = SubElement(self.root, f'{self.prefix_uri}{self.row_name}')\n        if not self.attr_cols and (not self.elem_cols):\n            self.elem_cols = list(d.keys())\n            self._build_elems(d, elem_row)\n        else:\n            elem_row = self._build_attribs(d, elem_row)\n            self._build_elems(d, elem_row)\n    self.out_xml = tostring(self.root, pretty_print=self.pretty_print, method='xml', encoding=self.encoding, xml_declaration=self.xml_declaration)\n    if self.stylesheet is not None:\n        self.out_xml = self._transform_doc()\n    return self.out_xml",
    "docstring": "Build tree from data. This method initializes the root and builds attributes and elements with optional namespaces.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\xml.py",
    "ast_data": "FunctionDef name:_build_tree arg:self arguments arg Assign Call For Call Assign Call If BoolOp Assign Call Call Call Assign Call Call Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_maybe_insert_input_equalization_observers_for_node",
    "source_code": "def _maybe_insert_input_equalization_observers_for_node(node: Node, equalization_qconfig: Any, model: torch.nn.Module, named_modules: dict[str, torch.nn.Module], graph: Graph, is_branch: bool) -> None:\n    if equalization_qconfig is None or not node_supports_equalization(node, named_modules):\n        return\n    if is_branch:\n        warnings.warn(f'Cannot equalize {node} because it is part of a branch.')\n        return\n    new_args = []\n    for arg in node.args:\n        if not isinstance(arg, Node) or node_arg_is_bias(node, arg):\n            new_args.append(arg)\n            continue\n        is_weight = node_arg_is_weight(node, arg)\n        act_eq_process_ctr = equalization_qconfig.weight if is_weight else equalization_qconfig.input_activation\n        new_eq_obs_mod = act_eq_process_ctr()\n        new_eq_obs_node = _insert_obs_or_fq(arg, new_eq_obs_mod, model, named_modules, graph)\n        new_args.append(new_eq_obs_node)\n    node.args = tuple(new_args)",
    "docstring": "If needs to be equalized, find the input/weight observers it needs in , creates them, and inserts it into . If does not need an equalization observer, returns None.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\prepare.py",
    "ast_data": "FunctionDef name:_maybe_insert_input_equalization_observers_for_node arg:node arg:equalization_qconfig arg:model arg:named_modules arg:graph arg:is_branch arguments arg arg arg arg arg arg If BoolOp Compare Call Return return:no If Call Return return:no Assign For If BoolOp Call Call Call Assign Call Assign Assign Call Assign Call Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "get_float_list",
    "source_code": "def get_float_list(self, min_length=_MIN_LENGTH, max_length=_MAX_LENGTH):\n    length = self.get_int(min_length, max_length)\n    return self.fdp.ConsumeFloatListInRange(length, _MIN_FLOAT, _MAX_FLOAT)",
    "docstring": "Consume a float list with given constraints. Args: min_length: The minimum length of the list. max_length: The maximum length of the list. Returns: Consumed integer list based on input bytes and constraints.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\security\\fuzzing\\python_fuzzing.py",
    "ast_data": "FunctionDef name:get_float_list arg:self arg:min_length arg:max_length arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "flush",
    "source_code": "def flush(self):\n    with ops.device('cpu:0'):\n        return gen_summary_ops.flush_summary_writer(self._resource)",
    "docstring": "See .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "FunctionDef name:flush arg:self arguments arg With Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "requires_vector_input",
    "source_code": "@property\ndef requires_vector_input(self):\n    return True",
    "docstring": "Returns whether the kernel is defined on fixed-length feature vectors or generic objects. Defaults to True for backward compatibility.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:requires_vector_input arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_get_predictions",
    "source_code": "def _get_predictions(self, X, *, output_method):\n    check_is_fitted(self)\n    X = validate_data(self, X, accept_sparse=True, reset=False)\n    Y_output_chain = np.zeros((X.shape[0], len(self.estimators_)))\n    Y_feature_chain = np.zeros((X.shape[0], len(self.estimators_)))\n    chain_method = getattr(self, 'chain_method_', 'predict')\n    hstack = sp.hstack if sp.issparse(X) else np.hstack\n    for chain_idx, estimator in enumerate(self.estimators_):\n        previous_predictions = Y_feature_chain[:, :chain_idx]\n        if sp.issparse(X) and (not sp.isspmatrix(X)) and (X.format == 'dok'):\n            X = sp.coo_array(X)\n        X_aug = hstack((X, previous_predictions))\n        feature_predictions, _ = _get_response_values(estimator, X_aug, response_method=chain_method)\n        Y_feature_chain[:, chain_idx] = feature_predictions\n        output_predictions, _ = _get_response_values(estimator, X_aug, response_method=output_method)\n        Y_output_chain[:, chain_idx] = output_predictions\n    inv_order = np.empty_like(self.order_)\n    inv_order[self.order_] = np.arange(len(self.order_))\n    Y_output = Y_output_chain[:, inv_order]\n    return Y_output",
    "docstring": "Get predictions for each model in the chain.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\multioutput.py",
    "ast_data": "FunctionDef name:_get_predictions arg:self arg:X arguments arg arg arg Call Assign Call Assign Call Call Assign Call Call Assign Call Assign Call For Call Assign If BoolOp Call Call Compare Assign Call Assign Call Assign Call Assign Assign Call Assign Assign Call Assign Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_rule",
    "source_code": "def get_rule(self, py_op: torch._ops.OpOverloadPacket) -> TypePromotionRule | None:\n    return self._rule_table.get(str(py_op), None)",
    "docstring": "Get type promotion rule for a python op under 'torch.ops.'.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\type_promotion.py",
    "ast_data": "FunctionDef name:get_rule arg:self arg:py_op arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "MessageEncoder",
    "source_code": "class MessageEncoder(json.JSONEncoder):\n    message_key = '__json_message'\n\n    def default(self, obj):\n        if isinstance(obj, Message):\n            is_safedata = 1 if isinstance(obj.message, SafeData) else 0\n            message = [self.message_key, is_safedata, obj.level, obj.message]\n            if obj.extra_tags is not None:\n                message.append(obj.extra_tags)\n            return message\n        return super().default(obj)",
    "docstring": "Compactly serialize instances of the `` class as JSON.",
    "type": "class",
    "file_path": "django\\django\\contrib\\messages\\storage\\cookie.py",
    "ast_data": "ClassDef name:MessageEncoder Assign FunctionDef name:default arg:self arg:obj arguments arg arg If Call Assign Call Assign If Compare Call Return return:yes Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "filter_kernels",
    "source_code": "def filter_kernels(kernels: list[dict[str, tuple[str, str]]], condition: str) -> list[dict[str, tuple[str, str]]]:\n    if condition.startswith('id:'):\n        i = condition.removeprefix('id:')\n        return [v for v in kernels if v[KERNEL_ID_FIELD][0] == i]\n    if condition.startswith('name:'):\n        r = condition.removeprefix('name:')\n        return [v for v in kernels if re.search(r, v[KERNEL_NAME_FIELD][0])]\n    if condition.startswith('after:'):\n        r = condition.removeprefix('after:')\n        sub = filter_kernels(kernels, r)\n        if not sub:\n            logging.warning(\"no kernels matched '%s', 'after:' has no effect\", r)\n            return kernels\n        after_id = sub[-1][KERNEL_ID_FIELD][0]\n        return list(itertools.dropwhile(lambda v: v[KERNEL_ID_FIELD][0] != after_id, kernels))[1:]\n    raise app.UsageError(f'unsupported filter: {condition}')",
    "docstring": "Filters kernels by a condition. Args: kernels: list of kernel tuples, extracted from ncu-rep CSV condition: filter condition. Supported filter expressions: 'id:' - kernel with an ID 'name:' - kernel with a name matching the regex 'after:' - kernels after the last kernel matching the filter. Returns: matching kernels",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\xla\\backends\\gpu\\codegen\\tools\\ncu_rep_lib.py",
    "ast_data": "FunctionDef name:filter_kernels arg:kernels arg:condition arguments arg arg If Call Assign Call Return return:yes Compare If Call Assign Call Return return:yes Call If Call Assign Call Assign Call If Call Return return:yes Assign Return return:yes Call Call arguments arg Compare Raise Call"
  },
  {
    "library": "kornia",
    "name": "reproject_disparity_to_3D",
    "source_code": "def reproject_disparity_to_3D(self, disparity_tensor: Tensor) -> Tensor:\n    return reproject_disparity_to_3D(disparity_tensor, self.Q)",
    "docstring": "Reproject the disparity tensor to a 3D point cloud. Args: disparity_tensor: Disparity tensor of shape :math:. Returns: The 3D point cloud of shape :math:",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\camera\\stereo.py",
    "ast_data": "FunctionDef name:reproject_disparity_to_3D arg:self arg:disparity_tensor arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_capstyle",
    "source_code": "@_docstring.interpd\ndef set_capstyle(self, cs):\n    self._capstyle = CapStyle(cs)",
    "docstring": "Set how to draw endpoints of lines. Parameters ---------- cs : or %(CapStyle)s",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:set_capstyle arg:self arg:cs arguments arg arg Assign Call"
  },
  {
    "library": "numpy",
    "name": "_search_sorted_inclusive",
    "source_code": "def _search_sorted_inclusive(a, v):\n    return np.concatenate((a.searchsorted(v[:-1], 'left'), a.searchsorted(v[-1:], 'right')))",
    "docstring": "Like , but where the last item in is placed on the right. In the context of a histogram, this makes the last bin edge inclusive",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_histograms_impl.py",
    "ast_data": "FunctionDef name:_search_sorted_inclusive arg:a arg:v arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_BetaincGrad",
    "source_code": "@ops.RegisterGradient('Betainc')\ndef _BetaincGrad(op: ops.Operation, grad):\n    a, b, x = op.inputs\n    sa = array_ops.shape(a)\n    sx = array_ops.shape(x)\n    _, rx = gen_array_ops.broadcast_gradient_args(sa, sx)\n    log_beta = gen_math_ops.lgamma(a) + gen_math_ops.lgamma(b) - gen_math_ops.lgamma(a + b)\n    partial_x = math_ops.exp(math_ops.xlog1py(b - 1, -x) + math_ops.xlogy(a - 1, x) - log_beta)\n    return (None, None, array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx))",
    "docstring": "Returns gradient of betainc(a, b, x) with respect to x.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_BetaincGrad arg:op arg:grad arguments arg arg Assign Assign Call Assign Call Assign Call Assign Call Call Call Assign Call Call Call Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "set_time_zone_sql",
    "source_code": "def set_time_zone_sql(self):\n    return ''",
    "docstring": "Return the SQL that will set the connection's time zone. Return '' if the backend doesn't support time zones.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:set_time_zone_sql arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_buffer_names",
    "source_code": "def _get_buffer_names(root_module: nn.Module) -> set[str]:\n    return {clean_tensor_name(buffer_name) for buffer_name, _ in root_module.named_buffers()}",
    "docstring": "Return the fully prefixed names of all buffers in the module hierarchy rooted at `set`.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_init_utils.py",
    "ast_data": "FunctionDef name:_get_buffer_names arg:root_module arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "parents",
    "source_code": "@property\ndef parents(self):\n    return [self.key]",
    "docstring": "See 'FeatureColumn` base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:parents arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_is_line_empty",
    "source_code": "def _is_line_empty(self, line: Sequence[Scalar]) -> bool:\n    return not line or all((not x for x in line))",
    "docstring": "Check if a line is empty or not. Parameters ---------- line : str, array-like The line of data to check. Returns ------- boolean : Whether or not the line is empty.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\parsers\\python_parser.py",
    "ast_data": "FunctionDef name:_is_line_empty arg:self arg:line arguments arg arg Return return:yes BoolOp Call"
  },
  {
    "library": "scrapy",
    "name": "from_mimetype",
    "source_code": "def from_mimetype(self, mimetype: str) -> type[Response]:\n    if mimetype is None:\n        return Response\n    if mimetype in self.classes:\n        return self.classes[mimetype]\n    basetype = f'{mimetype.split('/')[0]}/*'\n    return self.classes.get(basetype, Response)",
    "docstring": "Return the most appropriate Response class for the given mimetype",
    "type": "method",
    "file_path": "scrapy\\scrapy\\responsetypes.py",
    "ast_data": "FunctionDef name:from_mimetype arg:self arg:mimetype arguments arg arg If Compare Return return:yes If Compare Return return:yes Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "build_nccl_then_ring",
    "source_code": "def build_nccl_then_ring(input_tensors, subdiv, red_op, un_op=None):\n\n    def upper_builder(y):\n        return build_ring_all_reduce(y, len(y), subdiv, [0], red_op, un_op)\n\n    def upper_level_f(x):\n        return _reduce_non_singleton(x, upper_builder, un_op)\n    return _build_nccl_hybrid(input_tensors, red_op, upper_level_f)",
    "docstring": "Construct hybrid of NCCL within workers, Ring across workers.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\all_reduce.py",
    "ast_data": "FunctionDef name:build_nccl_then_ring arg:input_tensors arg:subdiv arg:red_op arg:un_op arguments arg arg arg arg FunctionDef name:upper_builder arg:y arguments arg Return return:yes Call Call FunctionDef name:upper_level_f arg:x arguments arg Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_attach_methods",
    "source_code": "def _attach_methods(self):\n    self._attach_argparser_methods()\n    self._ppfvec = vectorize(self._ppf_single, otypes='d')\n    self._ppfvec.nin = self.numargs + 1\n    self.vecentropy = vectorize(self._entropy, otypes='d')\n    self._cdfvec = vectorize(self._cdf_single, otypes='d')\n    self._cdfvec.nin = self.numargs + 1\n    if self.moment_type == 0:\n        self.generic_moment = vectorize(self._mom0_sc, otypes='d')\n    else:\n        self.generic_moment = vectorize(self._mom1_sc, otypes='d')\n    self.generic_moment.nin = self.numargs + 1",
    "docstring": "Attaches dynamically created methods to the rv_continuous instance.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:_attach_methods arg:self arguments arg Call Assign Call Assign Assign Call Assign Call Assign If Compare Assign Call Assign Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "get_tick_iterators",
    "source_code": "def get_tick_iterators(self, axes):\n    angle_normal, angle_tangent = {0: (90, 0), 1: (0, 90)}[self.nth_coord]\n    major = self.axis.major\n    major_locs = major.locator()\n    major_labels = major.formatter.format_ticks(major_locs)\n    minor = self.axis.minor\n    minor_locs = minor.locator()\n    minor_labels = minor.formatter.format_ticks(minor_locs)\n    tick_to_axes = self.get_tick_transform(axes) - axes.transAxes\n\n    def _f(locs, labels):\n        for loc, label in zip(locs, labels):\n            c = self._to_xy(loc, const=self._pos)\n            c2 = tick_to_axes.transform(c)\n            if mpl.transforms._interval_contains_close((0, 1), c2[self.nth_coord]):\n                yield (c, angle_normal, angle_tangent, label)\n    return (_f(major_locs, major_labels), _f(minor_locs, minor_labels))",
    "docstring": "tick_loc, tick_angle, tick_label",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axislines.py",
    "ast_data": "FunctionDef name:get_tick_iterators arg:self arg:axes arguments arg arg Assign Assign Assign Call Assign Call Assign Assign Call Assign Call Assign Call FunctionDef name:_f arg:locs arg:labels arguments arg arg For Call Assign Call Assign Call If Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_in_patch",
    "source_code": "def _in_patch(self, patch):\n    return lambda xy: patch.contains(SimpleNamespace(x=xy[0], y=xy[1]))[0]",
    "docstring": "Return a predicate function testing whether a point *xy* is contained in *patch*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:_in_patch arg:self arg:patch arguments arg arg Return return:yes arguments arg Call Call"
  },
  {
    "library": "scipy",
    "name": "asfptype",
    "source_code": "def asfptype(self):\n    return self._asfptype()",
    "docstring": "Upcast matrix to a floating point format (if necessary)",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_matrix.py",
    "ast_data": "FunctionDef name:asfptype arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "store",
    "source_code": "@property\ndef store(self) -> Store:\n    return self._store",
    "docstring": "Store used by torchelastic control plane",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\api.py",
    "ast_data": "FunctionDef name:store arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_parse_kwds",
    "source_code": "def _parse_kwds(self) -> None:\n    encoding: str | None = self.kwds.get('encoding')\n    self.encoding = 'utf-8' if encoding is None else encoding\n    na_values = self.kwds['na_values']\n    if isinstance(na_values, dict):\n        raise ValueError(\"The pyarrow engine doesn't support passing a dict for na_values\")\n    self.na_values = list(self.kwds['na_values'])",
    "docstring": "Validates keywords before passing to pyarrow.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\parsers\\arrow_parser_wrapper.py",
    "ast_data": "FunctionDef name:_parse_kwds arg:self arguments arg Call Assign Compare Assign If Call Raise Call Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "_compat_get_offset",
    "source_code": "def _compat_get_offset(meth):\n    sigs = [lambda self, width, height, xdescent, ydescent, renderer: locals(), lambda self, bbox, renderer: locals()]\n\n    @functools.wraps(meth)\n    def get_offset(self, *args, **kwargs):\n        params = _api.select_matching_signature(sigs, self, *args, **kwargs)\n        bbox = params['bbox'] if 'bbox' in params else Bbox.from_bounds(-params['xdescent'], -params['ydescent'], params['width'], params['height'])\n        return meth(params['self'], bbox, params['renderer'])\n    return get_offset",
    "docstring": "Decorator for the get_offset method of OffsetBox and subclasses, that allows supporting both the new signature (self, bbox, renderer) and the old signature (self, width, height, xdescent, ydescent, renderer).",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py",
    "ast_data": "FunctionDef name:_compat_get_offset arg:meth arguments arg Assign arguments arg arg arg arg arg arg Call arguments arg arg arg Call FunctionDef name:get_offset arg:self arguments arg arg arg Assign Call Assign Compare Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_process_levels",
    "source_code": "def _process_levels(self):\n    self._levels = list(self.levels)\n    if self.logscale:\n        lower, upper = (1e-250, 1e+250)\n    else:\n        lower, upper = (-1e+250, 1e+250)\n    if self.extend in ('both', 'min'):\n        self._levels.insert(0, lower)\n    if self.extend in ('both', 'max'):\n        self._levels.append(upper)\n    self._levels = np.asarray(self._levels)\n    if not self.filled:\n        self.layers = self.levels\n        return\n    if self.logscale:\n        self.layers = np.sqrt(self._levels[:-1]) * np.sqrt(self._levels[1:])\n    else:\n        self.layers = 0.5 * (self._levels[:-1] + self._levels[1:])",
    "docstring": "Assign values to :attr: based on :attr:, adding extended layers as needed if contours are filled. For line contours, layers simply coincide with levels; a line is a thin layer. No extended levels are needed with line contours.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\contour.py",
    "ast_data": "FunctionDef name:_process_levels arg:self arguments arg Assign Call If Assign Assign If Compare Call If Compare Call Assign Call If Assign Return return:no If Assign Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, version: str) -> None:\n    match = self._regex.search(version)\n    if not match:\n        raise InvalidVersion(f\"Invalid version: '{version}'\")\n    self._version = _Version(epoch=int(match.group('epoch')) if match.group('epoch') else 0, release=tuple((int(i) for i in match.group('release').split('.'))), pre=_parse_letter_version(match.group('pre_l'), match.group('pre_n')), post=_parse_letter_version(match.group('post_l'), match.group('post_n1') or match.group('post_n2')), dev=_parse_letter_version(match.group('dev_l'), match.group('dev_n')), local=_parse_local_version(match.group('local')))\n    self._key = _cmpkey(self._version.epoch, self._version.release, self._version.pre, self._version.post, self._version.dev, self._version.local)",
    "docstring": "Initialize a Version object. :param version: The string representation of a version which will be parsed and normalized before use. :raises InvalidVersion: If the `` does not conform to PEP 440 in any way then this exception will be raised.",
    "type": "method",
    "file_path": "pytorch\\torch\\_vendor\\packaging\\version.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:version arguments arg arg Assign Call If Raise Call Assign Call Call Call Call Call Call Call Call Call Call Call Call Call BoolOp Call Call Call Call Call Call Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "on_test_end",
    "source_code": "@doc_controls.for_subclass_implementers\ndef on_test_end(self, logs=None):\n    pass",
    "docstring": "Called at the end of evaluation or validation. Subclasses should override for any actions to run. Args: logs: Dict. Currently the output of the last call to is passed to this argument for this method but that may change in the future.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:on_test_end arg:self arg:logs arguments arg arg"
  },
  {
    "library": "pytorch",
    "name": "dump_error_file",
    "source_code": "def dump_error_file(self, rootcause_error_file: str, error_code: int=0):\n    with open(rootcause_error_file) as fp:\n        rootcause_error = json.load(fp)\n        if error_code:\n            self.override_error_code_in_rootcause_data(rootcause_error_file, rootcause_error, error_code)\n        logger.debug('child error file (%s) contents:\\n%s', rootcause_error_file, json.dumps(rootcause_error, indent=2))\n    my_error_file = self._get_error_file_path()\n    if my_error_file:\n        self._rm(my_error_file)\n        self._write_error_file(my_error_file, json.dumps(rootcause_error))\n        logger.info(\"dumped error file to parent's %s\", my_error_file)\n    else:\n        logger.error('no error file defined for parent, to copy child error file (%s)', rootcause_error_file)",
    "docstring": "Dump parent error file from child process's root cause error and error code.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\multiprocessing\\errors\\error_handler.py",
    "ast_data": "FunctionDef name:dump_error_file arg:self arg:rootcause_error_file arg:error_code arguments arg arg arg With Call Assign Call If Call Call Call Assign Call If Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_is_amx_fp16_supported",
    "source_code": "def _is_amx_fp16_supported() -> bool:\n    return torch._C._cpu._is_amx_fp16_supported()",
    "docstring": "Returns a bool indicating if CPU supports AMX FP16.",
    "type": "function",
    "file_path": "pytorch\\torch\\cpu\\__init__.py",
    "ast_data": "FunctionDef name:_is_amx_fp16_supported arguments Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_UFuncInputCastingError",
    "source_code": "@_display_as_base\nclass _UFuncInputCastingError(_UFuncCastingError):\n\n    def __init__(self, ufunc, casting, from_, to, i):\n        super().__init__(ufunc, casting, from_, to)\n        self.in_i = i\n\n    def __str__(self):\n        i_str = f'{self.in_i} ' if self.ufunc.nin != 1 else ''\n        return f'Cannot cast ufunc {self.ufunc.__name__!r} input {i_str}from {self.from_!r} to {self.to!r} with casting rule {self.casting!r}'",
    "docstring": "Thrown when a ufunc input cannot be casted",
    "type": "class",
    "file_path": "numpy\\numpy\\_core\\_exceptions.py",
    "ast_data": "ClassDef name:_UFuncInputCastingError FunctionDef name:__init__ arg:self arg:ufunc arg:casting arg:from_ arg:to arg:i arguments arg arg arg arg arg arg Call Call Assign FunctionDef name:__str__ arg:self arguments arg Assign Compare Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "set_theme",
    "source_code": "def set_theme(context='notebook', style='darkgrid', palette='deep', font='sans-serif', font_scale=1, color_codes=True, rc=None):\n    set_context(context, font_scale)\n    set_style(style, rc={'font.family': font})\n    set_palette(palette, color_codes=color_codes)\n    if rc is not None:\n        mpl.rcParams.update(rc)",
    "docstring": "Set aspects of the visual theme for all matplotlib and seaborn plots. This function changes the global defaults for all plots using the matplotlib rcParams system. The themeing is decomposed into several distinct sets of parameter values. The options are illustrated in the :doc: and :doc: tutorials. Parameters ---------- context : string or dict Scaling parameters, see :func:. style : string or dict Axes style parameters, see :func:. palette : string or sequence Color palette, see :func:. font : string Font family, see matplotlib font manager. font_scale : float, optional Separate scaling factor to independently scale the size of the font elements. color_codes : bool If `` is a seaborn palette, remap the shorthand color codes (e.g. \"b\", \"g\", \"r\", etc.) to the colors from this palette. rc : dict or None Dictionary of rc parameter mappings to override the above. Examples -------- .. include:: ../docstrings/set_theme.rst",
    "type": "function",
    "file_path": "seaborn\\seaborn\\rcmod.py",
    "ast_data": "FunctionDef name:set_theme arg:context arg:style arg:palette arg:font arg:font_scale arg:color_codes arg:rc arguments arg arg arg arg arg arg arg Call Call Call If Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "_show_inputs_outputs",
    "source_code": "def _show_inputs_outputs(saved_model_dir, tag_set, signature_def_key, indent=0):\n    meta_graph_def = saved_model_utils.get_meta_graph_def(saved_model_dir, tag_set)\n    _show_inputs_outputs_mgd(meta_graph_def, signature_def_key, indent)",
    "docstring": "Prints input and output TensorInfos. Prints the details of input and output TensorInfos for the SignatureDef mapped by the given signature_def_key. Args: saved_model_dir: Directory containing the SavedModel to inspect. tag_set: Group of tag(s) of the MetaGraphDef, in string format, separated by ','. For tag-set contains multiple tags, all tags must be passed in. signature_def_key: A SignatureDef key string. indent: How far (in increments of 2 spaces) to indent each line of output.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\saved_model_cli.py",
    "ast_data": "FunctionDef name:_show_inputs_outputs arg:saved_model_dir arg:tag_set arg:signature_def_key arg:indent arguments arg arg arg arg Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_IteratorSaveable",
    "source_code": "class _IteratorSaveable(BaseSaverBuilder.SaveableObject):\n\n    def __init__(self, iterator_resource, name, external_state_policy=options_lib.ExternalStatePolicy.FAIL):\n        serialized_iterator = gen_dataset_ops.serialize_iterator(iterator_resource, external_state_policy=external_state_policy.value)\n        specs = [BaseSaverBuilder.SaveSpec(serialized_iterator, '', name + '_STATE', device=iterator_resource.device)]\n        super(_IteratorSaveable, self).__init__(iterator_resource, specs, name)\n\n    def restore(self, restored_tensors, restored_shapes):\n        with ops.colocate_with(self.op):\n            return gen_dataset_ops.deserialize_iterator(self.op, restored_tensors[0])",
    "docstring": "SaveableObject for saving/restoring iterator state.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\iterator_ops.py",
    "ast_data": "ClassDef name:_IteratorSaveable FunctionDef name:__init__ arg:self arg:iterator_resource arg:name arg:external_state_policy arguments arg arg arg arg Assign Call Assign Call Call Call FunctionDef name:restore arg:self arg:restored_tensors arg:restored_shapes arguments arg arg arg With Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_reduce",
    "source_code": "def _reduce(self, name: str, *, skipna: bool=True, keepdims: bool=False, **kwargs):\n    result = self._reduce_calc(name, skipna=skipna, keepdims=keepdims, **kwargs)\n    if isinstance(result, pa.Array):\n        return type(self)(result)\n    else:\n        return result",
    "docstring": "Return a scalar result of performing the reduction operation. Parameters ---------- name : str Name of the function, supported values are: { any, all, min, max, sum, mean, median, prod, std, var, sem, kurt, skew }. skipna : bool, default True If True, skip NaN values. **kwargs Additional keyword arguments passed to the reduction function. Currently, is the only supported kwarg. Returns ------- scalar Raises ------ TypeError : subclass does not define reductions",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\arrow\\array.py",
    "ast_data": "FunctionDef name:_reduce arg:self arg:name arguments arg arg arg arg arg Assign Call If Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_capstyle",
    "source_code": "def get_capstyle(self):\n    return self._capstyle.name",
    "docstring": "Return the .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:get_capstyle arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "start",
    "source_code": "@property\ndef start(self) -> int:\n    return self._range.start",
    "docstring": "The value of the parameter (`RangeIndexstartRangeIndexRangeIndexRangeIndex`. Examples -------- >>> idx = pd.RangeIndex(5) >>> idx.start 0 >>> idx = pd.RangeIndex(2, -10, -3) >>> idx.start 2",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\range.py",
    "ast_data": "FunctionDef name:start arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "evaluate",
    "source_code": "def evaluate(self, x):\n    return self._f(x)",
    "docstring": "Evaluate the empirical CDF/SF function at the input. Parameters ---------- x : ndarray Argument to the CDF/SF Returns ------- y : ndarray The CDF/SF evaluated at the input",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_survival.py",
    "ast_data": "FunctionDef name:evaluate arg:self arg:x arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "global_clipnorm",
    "source_code": "@property\ndef global_clipnorm(self):\n    return self._global_clipnorm",
    "docstring": "or . If set, clips gradients to a maximum norm.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py",
    "ast_data": "FunctionDef name:global_clipnorm arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_dense_tensor_internal",
    "source_code": "def _get_dense_tensor_internal(self, inputs, weight_collections=None, trainable=None):\n    sparse_tensors = self.categorical_column._get_sparse_tensors(inputs, weight_collections=weight_collections, trainable=trainable)\n    sparse_ids = sparse_tensors.id_tensor\n    sparse_weights = sparse_tensors.weight_tensor\n    embedding_weights = self.layer_creator(weight_collections=weight_collections, scope=variable_scope.get_variable_scope())\n    if self.ckpt_to_load_from is not None:\n        to_restore = embedding_weights\n        if isinstance(to_restore, variables.PartitionedVariable):\n            to_restore = to_restore._get_variable_list()\n        checkpoint_utils.init_from_checkpoint(self.ckpt_to_load_from, {self.tensor_name_in_ckpt: to_restore})\n    sparse_id_rank = tensor_shape.dimension_value(sparse_ids.dense_shape.get_shape()[0])\n    embedding_lookup_sparse = embedding_ops.safe_embedding_lookup_sparse\n    if not self.use_safe_embedding_lookup and sparse_id_rank is not None and (sparse_id_rank <= 2):\n        embedding_lookup_sparse = embedding_ops.embedding_lookup_sparse_v2\n    return embedding_lookup_sparse(embedding_weights, sparse_ids, sparse_weights, combiner=self.combiner, name='%s_weights' % self.name, max_norm=self.max_norm)",
    "docstring": "Private method that follows the signature of _get_dense_tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py",
    "ast_data": "FunctionDef name:_get_dense_tensor_internal arg:self arg:inputs arg:weight_collections arg:trainable arguments arg arg arg arg Assign Call Assign Assign Assign Call Call If Compare Assign If Call Assign Call Call Assign Call Call Assign If BoolOp Compare Compare Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_OptimizableVariable",
    "source_code": "class _OptimizableVariable(metaclass=abc.ABCMeta):\n\n    @abc.abstractmethod\n    def target(self):\n        raise NotImplementedError('Calling an abstract method.')\n\n    @abc.abstractmethod\n    def update_op(self, optimizer, g):\n        raise NotImplementedError('Calling an abstract method.')",
    "docstring": "Interface for abstracting over variables in the optimizers.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\training\\optimizer.py",
    "ast_data": "ClassDef name:_OptimizableVariable FunctionDef name:target arg:self arguments arg Raise Call FunctionDef name:update_op arg:self arg:optimizer arg:g arguments arg arg arg Raise Call"
  },
  {
    "library": "django",
    "name": "set_rollback",
    "source_code": "def set_rollback(rollback, using=None):\n    return get_connection(using).set_rollback(rollback)",
    "docstring": "Set or unset the \"needs rollback\" flag -- for *advanced use* only. When is , trigger a rollback when exiting the innermost enclosing atomic block that has (that's the default). Use this to force a rollback without raising an exception. When is , prevent such a rollback. Use this only after rolling back to a known-good state! Otherwise, you break the atomic block and data corruption may occur.",
    "type": "function",
    "file_path": "django\\django\\db\\transaction.py",
    "ast_data": "FunctionDef name:set_rollback arg:rollback arg:using arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "score",
    "source_code": "def score(self, X, y, sample_weight=None):\n    if X is None:\n        X = np.zeros(shape=(len(y), 1))\n    return super().score(X, y, sample_weight)",
    "docstring": "Return the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters ---------- X : None or array-like of shape (n_samples, n_features) Test samples. Passing None as test samples gives the same result as passing real test samples, since DummyClassifier operates independently of the sampled observations. y : array-like of shape (n_samples,) or (n_samples, n_outputs) True labels for X. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- score : float Mean accuracy of self.predict(X) w.r.t. y.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\dummy.py",
    "ast_data": "FunctionDef name:score arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg If Compare Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, indices, values, dense_shape):\n    with ops.name_scope(None, 'SparseTensor', [indices, values, dense_shape]):\n        indices = ops.convert_to_tensor(indices, name='indices', dtype=dtypes.int64)\n        values = ops.convert_to_tensor(values, name='values')\n        dense_shape = ops.convert_to_tensor(dense_shape, name='dense_shape', dtype=dtypes.int64)\n        dense_shape_default = tensor_util.constant_value_as_shape(dense_shape)\n    self._indices = indices\n    self._values = values\n    self._dense_shape = dense_shape\n    self._dense_shape_default = dense_shape_default\n    indices_shape = indices.shape.with_rank(2)\n    values_shape = values.shape.with_rank(1)\n    dense_shape_shape = dense_shape.shape.with_rank(1)\n    indices_shape.dims[0].assert_is_compatible_with(values_shape.dims[0])\n    indices_shape.dims[1].assert_is_compatible_with(dense_shape_shape.dims[0])",
    "docstring": "Creates a . Args: indices: A 2-D int64 tensor of shape . values: A 1-D tensor of any type and shape . dense_shape: A 1-D int64 tensor of shape . Raises: ValueError: When building an eager SparseTensor if is unknown or contains unknown elements (None or -1).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\sparse_tensor.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:indices arg:values arg:dense_shape arguments arg arg arg arg With Call Assign Call Assign Call Assign Call Assign Call Assign Assign Assign Assign Assign Call Assign Call Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_update_shared_with",
    "source_code": "def _update_shared_with(child: EdgeOrNode, qspec: QuantizationSpecBase, shared_with_map: dict[EdgeOrNode, EdgeOrNode]):\n    if isinstance(qspec, SharedQuantizationSpec):\n        parent = qspec.edge_or_node\n        _union(parent, child, shared_with_map)",
    "docstring": "Update the based on the qspec, this applies the configuration and established the relationship between with the edge/node that it is pointing to, we'll use this information in the end to get the group id",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\prepare.py",
    "ast_data": "FunctionDef name:_update_shared_with arg:child arg:qspec arg:shared_with_map arguments arg arg arg If Call Assign Call"
  },
  {
    "library": "django",
    "name": "post",
    "source_code": "def post(self, request, *args, **kwargs):\n    auth_logout(request)\n    redirect_to = self.get_success_url()\n    if redirect_to != request.get_full_path():\n        return HttpResponseRedirect(redirect_to)\n    return super().get(request, *args, **kwargs)",
    "docstring": "Logout may be done via POST.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\views.py",
    "ast_data": "FunctionDef name:post arg:self arg:request arguments arg arg arg arg Call Assign Call If Compare Call Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "scatter_mul",
    "source_code": "@tf_export(v1=['scatter_mul'])\ndef scatter_mul(ref, indices, updates, use_locking=False, name=None):\n    if ref.dtype._is_ref_dtype:\n        return gen_state_ops.scatter_mul(ref, indices, updates, use_locking=use_locking, name=name)\n    return ref._lazy_read(gen_resource_variable_ops.resource_scatter_mul(ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype), name=name))",
    "docstring": "Multiplies sparse updates into a variable reference. This operation computes This operation outputs after the update is done. This makes it easier to chain operations that need to use the reset value. Duplicate entries are handled correctly: if multiple reference the same location, their contributions multiply. Requires or . Args: ref: A mutable . Must be one of the following types: , , , , , , , , , , , , , , , , . Should be from a node. indices: A . Must be one of the following types: , . A tensor of indices into the first dimension of . updates: A . Must have the same type as . A tensor of updated values to multiply to . use_locking: An optional . Defaults to . If True, the operation will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. name: A name for the operation (optional). Returns: A mutable . Has the same type as .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\state_ops.py",
    "ast_data": "FunctionDef name:scatter_mul arg:ref arg:indices arg:updates arg:use_locking arg:name arguments arg arg arg arg arg If Return return:yes Call Return return:yes Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "buffer_put_lines",
    "source_code": "def buffer_put_lines(buf: WriteBuffer[str], lines: list[str]) -> None:\n    if any((isinstance(x, str) for x in lines)):\n        lines = [str(x) for x in lines]\n    buf.write('\\n'.join(lines))",
    "docstring": "Appends lines to a buffer. Parameters ---------- buf The buffer to write to lines The lines to append.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\formats\\format.py",
    "ast_data": "FunctionDef name:buffer_put_lines arg:buf arg:lines arguments arg arg If Call Call Assign Call Call Call"
  },
  {
    "library": "django",
    "name": "RangeBoundary",
    "source_code": "class RangeBoundary(models.Expression):\n\n    def __init__(self, inclusive_lower=True, inclusive_upper=False):\n        self.lower = '[' if inclusive_lower else '('\n        self.upper = ']' if inclusive_upper else ')'\n\n    def as_sql(self, compiler, connection):\n        return (\"'%s%s'\" % (self.lower, self.upper), [])",
    "docstring": "A class that represents range boundaries.",
    "type": "class",
    "file_path": "django\\django\\contrib\\postgres\\fields\\ranges.py",
    "ast_data": "ClassDef name:RangeBoundary FunctionDef name:__init__ arg:self arg:inclusive_lower arg:inclusive_upper arguments arg arg arg Assign Assign FunctionDef name:as_sql arg:self arg:compiler arg:connection arguments arg arg arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "CurveB",
    "source_code": "@_register_style(_style_list, name='->')\nclass CurveB(_Curve):\n    arrow = '->'",
    "docstring": "An arrow with a head at its end point.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "ClassDef name:CurveB Assign Call"
  },
  {
    "library": "django",
    "name": "has_module_permission",
    "source_code": "def has_module_permission(self, request):\n    return request.user.has_module_perms(self.opts.app_label)",
    "docstring": "Return True if the given request has any permission in the given app label. Can be overridden by the user in subclasses. In such case it should return True if the given request has permission to view the module on the admin index page and access the module's index page. Overriding it does not restrict access to the add, change or delete views. Use for that.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:has_module_permission arg:self arg:request arguments arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_get",
    "source_code": "def _get(self, *args, **kwargs):\n    raise NotImplementedError('subclasses of BaseStorage must provide a _get() method')",
    "docstring": "Retrieve a list of stored messages. Return a tuple of the messages and a flag indicating whether or not all the messages originally intended to be stored in this storage were, in fact, stored and retrieved; e.g., ``.",
    "type": "method",
    "file_path": "django\\django\\contrib\\messages\\storage\\base.py",
    "ast_data": "FunctionDef name:_get arg:self arguments arg arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, sv, sess):\n    super(SVSummaryThread, self).__init__(sv.coord, sv.save_summaries_secs)\n    self._sv = sv\n    self._sess = sess",
    "docstring": "Create a SVSummaryThread. Args: sv: A . sess: A .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:sv arg:sess arguments arg arg arg Call Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "resize_images_v2",
    "source_code": "@dispatch.dispatch_for_api(image_ops.resize_images_v2)\ndef resize_images_v2(images: ragged_tensor.RaggedTensor, size, method=image_ops.ResizeMethod.BILINEAR, preserve_aspect_ratio=False, antialias=False, name=None):\n    with ops.name_scope(name, 'RaggedResizeImages', [images, size]):\n        return _resize_images(image_ops.resize_images_v2, images, size, method=method, preserve_aspect_ratio=preserve_aspect_ratio, antialias=antialias)",
    "docstring": "RaggedTensor dispatcher for tf.image.resize (tf-v2).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_image_ops.py",
    "ast_data": "FunctionDef name:resize_images_v2 arg:images arg:size arg:method arg:preserve_aspect_ratio arg:antialias arg:name arguments arg arg arg arg arg arg With Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_stacked",
    "source_code": "def _maybe_stacked(self, cache, inp):\n    if inp in cache:\n        return cache[inp]\n    if not self.op_is_inside_loop(inp.op):\n        return False\n    op = inp.op\n    output = False\n    if op.type in ['OnesLike', 'Shape', 'Rank', 'ShapeN', 'ZerosLike', 'TensorArrayV3', 'TensorArraySizeV3']:\n        output = False\n    elif _is_stateful_pfor_op(op):\n        output = True\n    elif op.type == 'Exit':\n        output = True\n    else:\n        for t in op.inputs:\n            if self._maybe_stacked(cache, t):\n                output = True\n                break\n    cache[inp] = output\n    return output",
    "docstring": "Heuristic to figure out if the converting inp leads to a stacked value. Args: cache: map from Tensor to boolean indicating stacked/unstacked. inp: input Tensor. Returns: True if could get stacked. If the function returns False, the converted value should be guaranteed to be unstacked. If returning True, it may or may not be stacked.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "FunctionDef name:_maybe_stacked arg:self arg:cache arg:inp arguments arg arg arg If Compare Return return:yes If Call Return return:yes Assign Assign If Compare Assign If Call Assign If Compare Assign For If Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_num_samples_or_steps",
    "source_code": "def _get_num_samples_or_steps(data, steps_per_epoch):\n    flat_inputs = nest.flatten(data)\n    if hasattr(flat_inputs[0], 'shape'):\n        return (int(flat_inputs[0].shape[0]), False)\n    return (steps_per_epoch, True)",
    "docstring": "Returns number of samples or steps, and whether to use steps count mode.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_generator_v1.py",
    "ast_data": "FunctionDef name:_get_num_samples_or_steps arg:data arg:steps_per_epoch arguments arg arg Assign Call If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_sparse_values_to_keep",
    "source_code": "def _sparse_values_to_keep(t, keep_input):\n    row_values = t.indices[:, 0]\n    return array_ops.gather(keep_input, row_values)",
    "docstring": "Convert a per-row vector to a per-value one.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\input.py",
    "ast_data": "FunctionDef name:_sparse_values_to_keep arg:t arg:keep_input arguments arg arg Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "freeze_runtime_asserts",
    "source_code": "@record_shapeenv_event()\ndef freeze_runtime_asserts(self) -> None:\n    self.runtime_asserts_frozen = True",
    "docstring": "Freeze this ShapeEnv to stop adding deferred runtime asserts. We will error if you try to install a new runtime assert when it is frozen. This would indicate a lowering violation, or perhaps something we know statically is already True but we are checking it again in a way that is not clearly dischargeable.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:freeze_runtime_asserts arg:self arguments arg Assign Call"
  },
  {
    "library": "pytorch",
    "name": "name",
    "source_code": "@property\ndef name(self) -> Optional[str]:\n    return self._name",
    "docstring": "Get the name of the timer.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\utils.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "inverse_transform",
    "source_code": "@available_if(_search_estimator_has('inverse_transform'))\ndef inverse_transform(self, X):\n    check_is_fitted(self)\n    return self.best_estimator_.inverse_transform(X)",
    "docstring": "Call inverse_transform on the estimator with the best found params. Only available if the underlying estimator implements `inverse_transformX` based on the estimator with the best found parameters.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_search.py",
    "ast_data": "FunctionDef name:inverse_transform arg:self arg:X arguments arg arg Call Return return:yes Call Call Call"
  },
  {
    "library": "scrapy",
    "name": "_lose_connection_with_error",
    "source_code": "def _lose_connection_with_error(self, errors: list[BaseException]) -> None:\n    self._conn_lost_errors += errors\n    assert self.transport is not None\n    self.transport.loseConnection()",
    "docstring": "Helper function to lose the connection with the error sent as a reason",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\http2\\protocol.py",
    "ast_data": "FunctionDef name:_lose_connection_with_error arg:self arg:errors arguments arg arg Compare Call"
  },
  {
    "library": "authlib",
    "name": "validate_id_token_encryption_alg_values_supported",
    "source_code": "def validate_id_token_encryption_alg_values_supported(self):\n    validate_array_value(self, 'id_token_encryption_alg_values_supported')",
    "docstring": "OPTIONAL. JSON array containing a list of the JWE encryption algorithms (alg values) supported by the OP for the ID Token to encode the Claims in a JWT.",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\discovery\\models.py",
    "ast_data": "FunctionDef name:validate_id_token_encryption_alg_values_supported arg:self arguments arg Call"
  },
  {
    "library": "numpy",
    "name": "lagder",
    "source_code": "def lagder(c, m=1, scl=1, axis=0):\n    c = np.array(c, ndmin=1, copy=True)\n    if c.dtype.char in '?bBhHiIlLqQpP':\n        c = c.astype(np.double)\n    cnt = pu._as_int(m, 'the order of derivation')\n    iaxis = pu._as_int(axis, 'the axis')\n    if cnt < 0:\n        raise ValueError('The order of derivation must be non-negative')\n    iaxis = normalize_axis_index(iaxis, c.ndim)\n    if cnt == 0:\n        return c\n    c = np.moveaxis(c, iaxis, 0)\n    n = len(c)\n    if cnt >= n:\n        c = c[:1] * 0\n    else:\n        for i in range(cnt):\n            n = n - 1\n            c *= scl\n            der = np.empty((n,) + c.shape[1:], dtype=c.dtype)\n            for j in range(n, 1, -1):\n                der[j - 1] = -c[j]\n                c[j - 1] += c[j]\n            der[0] = -c[1]\n            c = der\n    c = np.moveaxis(c, 0, iaxis)\n    return c",
    "docstring": "Differentiate a Laguerre series. Returns the Laguerre series coefficients differentiated times along . At each iteration the result is multiplied by (the scaling factor is for use in a linear change of variable). The argument is an array of coefficients from low to high degree along each axis, e.g., [1,2,3] represents the series `cscl`. This is for use in a linear change of variable. (Default: 1) axis : int, optional Axis over which the derivative is taken. (Default: 0). Returns ------- der : ndarray Laguerre series of the derivative. See Also -------- lagint Notes ----- In general, the result of differentiating a Laguerre series does not resemble the same operation on a power series. Thus the result of this function may be \"unintuitive,\" albeit correct; see Examples section below. Examples -------- >>> from numpy.polynomial.laguerre import lagder >>> lagder([ 1., 1., 1., -3.]) array([1., 2., 3.]) >>> lagder([ 1., 0., 0., -4., 3.], m=2) array([1., 2., 3.])",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\laguerre.py",
    "ast_data": "FunctionDef name:lagder arg:c arg:m arg:scl arg:axis arguments arg arg arg arg Assign Call If Compare Assign Call Assign Call Assign Call If Compare Raise Call Assign Call If Compare Return return:yes Assign Call Assign Call If Compare Assign For Call Assign Assign Call For Call Assign Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_log_signature_report",
    "source_code": "def _log_signature_report(signature_def_map, excluded_signatures):\n    sig_names_by_method_name = collections.defaultdict(list)\n    for method_name in _FRIENDLY_METHOD_NAMES:\n        sig_names_by_method_name[method_name] = []\n    for signature_name, sig in signature_def_map.items():\n        sig_names_by_method_name[sig.method_name].append(signature_name)\n    for method_name, sig_names in sig_names_by_method_name.items():\n        if method_name in _FRIENDLY_METHOD_NAMES:\n            method_name = _FRIENDLY_METHOD_NAMES[method_name]\n        logging.info('Signatures INCLUDED in export for {}: {}'.format(method_name, sig_names if sig_names else 'None'))\n    if excluded_signatures:\n        logging.info('Signatures EXCLUDED from export because they cannot be be served via TensorFlow Serving APIs:')\n        for signature_name, message in excluded_signatures.items():\n            logging.info(\"'{}' : {}\".format(signature_name, message))\n    if not signature_def_map:\n        logging.warn('Export includes no signatures!')\n    elif signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY not in signature_def_map:\n        logging.warn('Export includes no default signature!')",
    "docstring": "Log a report of which signatures were produced.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\model_utils\\export_utils.py",
    "ast_data": "FunctionDef name:_log_signature_report arg:signature_def_map arg:excluded_signatures arguments arg arg Assign Call For Assign For Call Call For Call If Compare Assign Call Call If Call For Call Call Call If Call If Compare Call"
  },
  {
    "library": "django",
    "name": "render",
    "source_code": "def render(request, template_name, context=None, content_type=None, status=None, using=None):\n    content = loader.render_to_string(template_name, context, request, using=using)\n    return HttpResponse(content, content_type, status)",
    "docstring": "Return an HttpResponse whose content is filled with the result of calling django.template.loader.render_to_string() with the passed arguments.",
    "type": "function",
    "file_path": "django\\django\\shortcuts.py",
    "ast_data": "FunctionDef name:render arg:request arg:template_name arg:context arg:content_type arg:status arg:using arguments arg arg arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "RunResult",
    "source_code": "@dataclass\nclass RunResult:\n    state: WorkerState\n    return_values: dict[int, Any] = field(default_factory=dict)\n    failures: dict[int, ProcessFailure] = field(default_factory=dict)\n\n    def is_failed(self) -> bool:\n        return self.state == WorkerState.FAILED",
    "docstring": "Return results of the worker executions. Run results follow an \"all-or-nothing\" policy where the run is successful if and only if ALL local workers managed by this agent complete successfully. If the result is successful (e.g. ``.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\elastic\\agent\\server\\api.py",
    "ast_data": "ClassDef name:RunResult Call Call FunctionDef name:is_failed arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "kornia",
    "name": "__init__",
    "source_code": "def __init__(self, image_encoder: ImageEncoderViT | TinyViT, prompt_encoder: PromptEncoder, mask_decoder: MaskDecoder) -> None:\n    super().__init__()\n    self.image_encoder = image_encoder\n    self.prompt_encoder = prompt_encoder\n    self.mask_decoder = mask_decoder",
    "docstring": "SAM predicts object masks from an image and input prompts. Args: image_encoder: The backbone used to encode the image into image embeddings that allow for efficient mask prediction. prompt_encoder: Encodes various types of input prompts. mask_decoder: Predicts masks from the image embeddings and encoded prompts.",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\models\\sam\\model.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:image_encoder arg:prompt_encoder arg:mask_decoder arguments arg arg arg arg Call Call Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "_generate_kernel_call_helper",
    "source_code": "def _generate_kernel_call_helper(self, kernel_name: str, call_args, *, device=None, triton=True, arg_types=None, raw_keys=None, raw_args=None, triton_meta=None, graph_name='', original_fxnode_name=None):\n    assert not triton, 'CppWrapperCpuArrayRef.generate_kernel_call does not support GPU'\n    assert arg_types is not None and len(call_args) == len(arg_types), 'Mismatch call_args and arg_types in generate_kernel_call'\n    new_args = []\n    for idx, arg in enumerate(call_args):\n        if '*' in arg_types[idx]:\n            var_name = f'var_{next(self.arg_var_id)}'\n            self.writeline(f'auto* {var_name} = get_data_ptr_wrapper({arg});')\n            new_args.append(f'({arg_types[idx]})({var_name})')\n        else:\n            new_args.append(arg)\n    debug_printer_manager = V.graph.wrapper_code.debug_printer\n    debug_printer_manager.set_printer_args(call_args, kernel_name, None, None, 'cpp')\n    with debug_printer_manager:\n        self.writeline(self.wrap_kernel_call(kernel_name, new_args))",
    "docstring": "Generates kernel call code. triton: Defines whether the GPU backend uses Triton for codegen. Otherwise it uses the CUDA language for codegen. Only valid when cuda == True.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp_wrapper_cpu_array_ref.py",
    "ast_data": "FunctionDef name:_generate_kernel_call_helper arg:self arg:kernel_name arg:call_args arguments arg arg arg arg arg arg arg arg arg arg arg BoolOp Compare Compare Call Call Assign For Call If Compare Assign Call Call Call Call Assign Call With Call Call"
  },
  {
    "library": "django",
    "name": "write_hex",
    "source_code": "def write_hex(self, geom):\n    geom = self._handle_empty_point(geom)\n    wkb = wkb_writer_write_hex(self.ptr, geom.ptr, byref(c_size_t()))\n    return wkb",
    "docstring": "Return the HEXEWKB representation of the given geometry.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\prototypes\\io.py",
    "ast_data": "FunctionDef name:write_hex arg:self arg:geom arguments arg arg Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "resolve_client_public_key",
    "source_code": "def resolve_client_public_key(self, client, headers):\n    raise NotImplementedError()",
    "docstring": "Resolve the client public key for verifying the JWT signature. A client may have many public keys, in this case, we can retrieve it via `` value in headers. Developers MUST implement this method:: def resolve_client_public_key(self, client, headers): return client.public_key",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc7523\\client.py",
    "ast_data": "FunctionDef name:resolve_client_public_key arg:self arg:client arg:headers arguments arg arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "reset",
    "source_code": "def reset(self):\n    super().reset()",
    "docstring": "Delete the graph currently held by this instance.",
    "type": "method",
    "file_path": "pytorch\\torch\\cuda\\graphs.py",
    "ast_data": "FunctionDef name:reset arg:self arguments arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "list_objects",
    "source_code": "def list_objects(root_trackable):\n    return util.list_objects(graph_view_lib.ObjectGraphView(root_trackable))",
    "docstring": "Traverse the object graph and list all accessible objects. Looks for objects which are dependencies of . Includes slot variables only if the variable they are slotting for and the optimizer are dependencies of (i.e. if they would be saved with a checkpoint). Args: root_trackable: A object whose dependencies should be flattened. Returns: A flat list of objects.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:list_objects arg:root_trackable arguments arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "clear",
    "source_code": "def clear(self):\n    self._clear_without_update()\n    self.update()",
    "docstring": "Clear the selection and set the selector ready to make a new one.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:clear arg:self arguments arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "optional",
    "source_code": "@property\ndef optional(self) -> bool:\n    return self.default is not self.empty",
    "docstring": "If this parameter might not be supplied for a call.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_type.py",
    "ast_data": "FunctionDef name:optional arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "django",
    "name": "field_is_referenced",
    "source_code": "def field_is_referenced(state, model_tuple, field_tuple):\n    return next(get_references(state, model_tuple, field_tuple), None) is not None",
    "docstring": "Return whether is referenced by any state models.",
    "type": "function",
    "file_path": "django\\django\\db\\migrations\\utils.py",
    "ast_data": "FunctionDef name:field_is_referenced arg:state arg:model_tuple arg:field_tuple arguments arg arg arg Return return:yes Compare Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, components):\n    global _next_device_number, _next_device_number_lock\n    self.components = tuple((device_util.canonicalize(d) for d in components))\n    if not self.components:\n        raise ValueError('ParallelDevice requires at least one component.')\n    ctx = context.context()\n    with _next_device_number_lock:\n        self._name = '{}/device:CUSTOM:{}'.format(ctx.host_address_space(), _next_device_number)\n        _next_device_number += 1\n    device, device_info = _pywrap_parallel_device.GetParallelDeviceCapsules(self._name, self.components)\n    context.register_custom_device(device, self._name, device_info)\n    self._device_ids = None\n    self._device_scope = None\n    _all_parallel_devices[self._name] = self",
    "docstring": "Creates a device which executes operations in parallel on . Args: components: A list of device names. Each operation executed on the returned device executes on these component devices. Returns: A string with the name of the newly created device.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\parallel_device\\parallel_device.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:components arguments arg arg Assign Call Call If Raise Call Assign Call With Assign Call Call Assign Call Call Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_convert_dynamic_dimension_to_zero",
    "source_code": "def _convert_dynamic_dimension_to_zero(shape):\n    if shape.rank is None:\n        return shape\n    return tensor_shape.TensorShape([0 if d is None else d for d in shape.as_list()])",
    "docstring": "Converts dynamic dimensions in to zero. The fake params created to match the intermediates captured in other branches could have dynamic dimensions. But the XLA shape is not able to handle dynamic dimensions in TF TensorShape. Setting the dynamic dimensions to size zero will help avoid failing safety checks in bridge. When XLA DynamicConditional op reconciles branch differences, XLA will replace the dimension size 0 with a bounded dimension determined from the shape of real argument in the other branch. Note: Rank unknown shapes are returned as they are. Args: shape: The TensorShape of fake param. Returns: The new TensorShape with dynamic dimensions set to zero.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\cond_v2.py",
    "ast_data": "FunctionDef name:_convert_dynamic_dimension_to_zero arg:shape arguments arg If Compare Return return:yes Return return:yes Call Compare Call"
  },
  {
    "library": "numpy",
    "name": "dtype_to_descr",
    "source_code": "@set_module('numpy.lib.format')\ndef dtype_to_descr(dtype):\n    new_dtype = drop_metadata(dtype)\n    if new_dtype is not dtype:\n        warnings.warn('metadata on a dtype is not saved to an npy/npz. Use another format (such as pickle) to store it.', UserWarning, stacklevel=2)\n    dtype = new_dtype\n    if dtype.names is not None:\n        return dtype.descr\n    elif not type(dtype)._legacy:\n        warnings.warn('Custom dtypes are saved as python objects using the pickle protocol. Loading this file requires allow_pickle=True to be set.', UserWarning, stacklevel=2)\n        return '|O'\n    else:\n        return dtype.str",
    "docstring": "Get a serializable descriptor from the dtype. The .descr attribute of a dtype object cannot be round-tripped through the dtype() constructor. Simple types, like dtype('float32'), have a descr which looks like a record array with one field with '' as a name. The dtype() constructor interprets this as a request to give a default name. Instead, we construct descriptor that can be passed to dtype(). Parameters ---------- dtype : dtype The dtype of the array that will be written to disk. Returns ------- descr : object An object that can be passed to in order to replicate the input dtype.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_format_impl.py",
    "ast_data": "FunctionDef name:dtype_to_descr arg:dtype arguments arg Assign Call If Compare Call Assign If Compare Return return:yes If Call Call Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "batch_norm",
    "source_code": "def batch_norm(input: Tensor, running_mean: Optional[Tensor], running_var: Optional[Tensor], weight: Optional[Tensor]=None, bias: Optional[Tensor]=None, training: bool=False, momentum: float=0.1, eps: float=1e-05) -> Tensor:\n    if has_torch_function_variadic(input, running_mean, running_var, weight, bias):\n        return handle_torch_function(batch_norm, (input, running_mean, running_var, weight, bias), input, running_mean, running_var, weight=weight, bias=bias, training=training, momentum=momentum, eps=eps)\n    if training:\n        _verify_batch_size(input.size())\n    return torch.batch_norm(input, weight, bias, running_mean, running_var, training, momentum, eps, torch.backends.cudnn.enabled)",
    "docstring": "Apply Batch Normalization for each channel across a batch of data. See :class:, :class:, :class: for details.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:batch_norm arg:input arg:running_mean arg:running_var arg:weight arg:bias arg:training arg:momentum arg:eps arguments arg arg arg arg arg arg arg arg If Call Return return:yes Call If Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_metadata_routing",
    "source_code": "def get_metadata_routing(self):\n    router = MetadataRouter(owner=self.__class__.__name__)\n    router.add(estimator=self.estimator, method_mapping=MethodMapping().add(caller='fit', callee='fit'))\n    router.add(splitter=check_cv(self.cv), method_mapping=MethodMapping().add(caller='fit', callee='split'))\n    router.add(scorer=self._get_scorer(), method_mapping=MethodMapping().add(caller='fit', callee='score').add(caller='score', callee='score'))\n    return router",
    "docstring": "Get metadata routing of this object. Please check :ref: on how the routing mechanism works. .. versionadded:: 1.6 Returns ------- routing : MetadataRouter A :class: encapsulating routing information.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_selection\\_rfe.py",
    "ast_data": "FunctionDef name:get_metadata_routing arg:self arguments arg Assign Call Call Call Call Call Call Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "unary_elementwise_apis",
    "source_code": "def unary_elementwise_apis():\n    return tuple(_UNARY_ELEMENTWISE_APIS)",
    "docstring": "Returns a list of APIs that have been registered as unary elementwise.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\dispatch.py",
    "ast_data": "FunctionDef name:unary_elementwise_apis arguments Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "full_tensor",
    "source_code": "def full_tensor(self, *, grad_placements: Optional[Sequence[Placement]]=None) -> torch.Tensor:\n    redist_res = self.redistribute(placements=[Replicate()] * self.device_mesh.ndim, async_op=False)\n    return _ToTorchTensor.apply(redist_res, grad_placements)",
    "docstring": "Return the full tensor of this DTensor. It will perform necessary collectives to gather the local tensors from other ranks in its DeviceMesh and concatenate them together. It's a syntatic sugar of the following code: `Placementfull_tensortorch.Tensor` is differentiable.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_api.py",
    "ast_data": "FunctionDef name:full_tensor arg:self arguments arg arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_pre_trace_quant_model",
    "source_code": "def _pre_trace_quant_model(model, args):\n    if any((hasattr(m, '_packed_params') for m in getattr(model, 'modules', list)())) or any((getattr(arg, 'is_quantized', False) for arg in args)):\n        return torch.jit.trace(model, args)\n    return model",
    "docstring": "Returns if model is quantized. Otherwise do nothing and return original model. This is due to",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\utils.py",
    "ast_data": "FunctionDef name:_pre_trace_quant_model arg:model arg:args arguments arg arg If BoolOp Call Call Call Call Call Call Return return:yes Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "maximum_sctype",
    "source_code": "@set_module('numpy')\ndef maximum_sctype(t):\n    warnings.warn('`maximum_sctype` is deprecated. Use an explicit dtype like int64 or float64 instead. (deprecated in NumPy 2.0)', DeprecationWarning, stacklevel=2)\n    g = obj2sctype(t)\n    if g is None:\n        return t\n    t = g\n    base = _kind_name(dtype(t))\n    if base in sctypes:\n        return sctypes[base][-1]\n    else:\n        return t",
    "docstring": "Return the scalar type of highest precision of the same kind as the input. .. deprecated:: 2.0 Use an explicit dtype like int64 or float64 instead. Parameters ---------- t : dtype or dtype specifier The input data type. This can be a object or an object that is convertible to a . Returns ------- out : dtype The highest precision data type of the same kind () as . See Also -------- obj2sctype, mintypecode, sctype2char dtype Examples -------- >>> from numpy._core.numerictypes import maximum_sctype >>> maximum_sctype(int) >>> maximum_sctype(np.uint8) >>> maximum_sctype(complex) # may vary >>> maximum_sctype(str) >>> maximum_sctype('i2') >>> maximum_sctype('f4') # may vary",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\numerictypes.py",
    "ast_data": "FunctionDef name:maximum_sctype arg:t arguments arg Call Assign Call If Compare Return return:yes Assign Assign Call Call If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "under_tpu_inference_context",
    "source_code": "def under_tpu_inference_context() -> bool:\n    graph = ops.get_default_graph()\n    while graph:\n        context = graph._get_control_flow_context()\n        while context:\n            if isinstance(context, _TPUInferenceContext):\n                return True\n            context = context.outer_context\n        if isinstance(graph, function._FuncGraph):\n            graph = graph._outer_graph\n        elif isinstance(graph, func_graph.FuncGraph):\n            graph = graph.outer_graph\n        else:\n            return False\n    return False",
    "docstring": "Check if it is currently under .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu.py",
    "ast_data": "FunctionDef name:under_tpu_inference_context arguments Assign Call While Assign Call While If Call Return return:yes Assign If Call Assign If Call Assign Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "wrap_function",
    "source_code": "@tf_export(v1=['wrap_function'])\ndef wrap_function(fn, signature, name=None):\n    holder = VariableHolder(fn)\n    func_graph_name = 'wrapped_function'\n    if name is not None:\n        func_graph_name = 'wrapped_function_' + name\n    return WrappedFunction(func_graph.func_graph_from_py_func(func_graph_name, holder, args=None, kwargs=None, signature=signature, add_control_dependencies=False, collections={}), variable_holder=holder, signature=signature)",
    "docstring": "Wraps the TF 1.x function fn into a graph function. The python function will be called once with symbolic arguments specified in the , traced, and turned into a graph function. Any variables created by will be owned by the object returned by . The resulting graph function can be called with tensors which match the signature. Both and create a callable TensorFlow graph. But while runs all stateful operations (e.g. ) and sequences operations to provide the same semantics as eager execution, is closer to the behavior of in TensorFlow 1.x. It will not run any operations unless they are required to compute the function's outputs, either through a data dependency or a control dependency. Nor will it sequence operations. Unlike , will only trace the Python function once. As with placeholders in TF 1.x, shapes and dtypes must be provided to 's argument. Since it is only traced once, variables and state may be created inside the function and owned by the function wrapper object. Args: fn: python function to be wrapped signature: the placeholder and python arguments to be passed to the wrapped function name: Optional. The name of the function. Returns: the wrapped graph function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\wrap_function.py",
    "ast_data": "FunctionDef name:wrap_function arg:fn arg:signature arg:name arguments arg arg arg Assign Call Assign If Compare Assign Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "config_flag",
    "source_code": "def config_flag(name: str) -> Callable[[Match], Any]:\n\n    def flag_check(match: Match) -> Any:\n        return getattr(config, name)\n    return flag_check",
    "docstring": "Function for extra_check to put pass behind a flag",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\pattern_matcher.py",
    "ast_data": "FunctionDef name:config_flag arg:name arguments arg FunctionDef name:flag_check arg:match arguments arg Return return:yes Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "keypoints_to_grid",
    "source_code": "def keypoints_to_grid(keypoints: Tensor, img_size: Tuple[int, int]) -> Tensor:\n    KORNIA_CHECK_SHAPE(keypoints, ['N', '2'])\n    n_points = len(keypoints)\n    grid_points = normalize_pixel_coordinates(keypoints[:, [1, 0]], img_size[0], img_size[1])\n    grid_points = grid_points.view(-1, n_points, 1, 2)\n    return grid_points",
    "docstring": "Convert a list of keypoints into a grid in [-1, 1]² that can be used in torch.nn.functional.interpolate. Args: keypoints: a tensor [N, 2] of N keypoints (ij coordinates convention). img_size: the original image size (H, W)",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\sold2\\sold2.py",
    "ast_data": "FunctionDef name:keypoints_to_grid arg:keypoints arg:img_size arguments arg arg Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "collect_constant_renames",
    "source_code": "def collect_constant_renames():\n    renames = set()\n    for module in sys.modules.copy().values():\n        try:\n            constants_v1_list = tf_export.get_v1_constants(module)\n            constants_v2_list = tf_export.get_v2_constants(module)\n        except:\n            pass\n        constants_v1 = {constant_name: api_names for api_names, constant_name in constants_v1_list}\n        constants_v2 = {constant_name: api_names for api_names, constant_name in constants_v2_list}\n        for constant_name, api_names_v1 in constants_v1.items():\n            api_names_v2 = constants_v2[constant_name]\n            for name in api_names_v1:\n                if name not in api_names_v2:\n                    renames.add((name, get_canonical_name(api_names_v2, name)))\n    return renames",
    "docstring": "Looks for constants that need to be renamed in TF 2.0. Returns: Set of tuples of the form (current name, new name).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\update\\generate_v2_renames_map.py",
    "ast_data": "FunctionDef name:collect_constant_renames arguments Assign Call For Call Call Try Assign Call Assign Call ExceptHandler Assign Assign For Call Assign For If Compare Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "all_gather_indexed_slices",
    "source_code": "def all_gather_indexed_slices(all_gather_fn: Callable[[core.TensorLike, Optional[collective_util.Options]], core.Tensor]) -> indexed_slices.IndexedSlices:\n    all_values = all_gather_fn(input_slices.values, options)\n    if options.implementation == collective_util.CommunicationImplementation.NCCL:\n        control = [all_values]\n    else:\n        control = []\n    with ops.control_dependencies(control):\n        all_indices = all_gather_fn(input_slices.indices, options)\n    return indexed_slices.IndexedSlices(values=all_values, indices=all_indices, dense_shape=input_slices.dense_shape)",
    "docstring": "Use all_gather_fn to aggregate .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_utils.py",
    "ast_data": "FunctionDef name:all_gather_indexed_slices arg:all_gather_fn arguments arg Assign Call If Compare Assign Assign With Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, base=1, month=1, day=1, tz=None):\n    rule = rrulewrapper(YEARLY, interval=base, bymonth=month, bymonthday=day, **self.hms0d)\n    super().__init__(rule, tz=tz)\n    self.base = ticker._Edge_integer(base, 0)",
    "docstring": "Parameters ---------- base : int, default: 1 Mark ticks every *base* years. month : int, default: 1 The month on which to place the ticks, starting from 1. Default is January. day : int, default: 1 The day on which to place the ticks. tz : str or , default: :rc: Ticks timezone. If a string, *tz* is passed to .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\dates.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:base arg:month arg:day arg:tz arguments arg arg arg arg arg Assign Call Call Call Assign Call"
  },
  {
    "library": "sphinx",
    "name": "collect_node_names",
    "source_code": "def collect_node_names(self) -> None:\n\n    def add_node_name(name: str) -> str:\n        node_id = self.escape_id(name)\n        nth, suffix = (1, '')\n        while node_id + suffix in self.written_ids or node_id + suffix in self.node_names:\n            nth += 1\n            suffix = '<%s>' % nth\n        node_id += suffix\n        self.written_ids.add(node_id)\n        self.node_names[node_id] = name\n        return node_id\n    self.document['node_name'] = 'Top'\n    add_node_name('Top')\n    add_node_name('top')\n    self.indices = [(add_node_name(name), content) for name, content in self.indices]\n    for section in self.document.findall(nodes.section):\n        title = cast('nodes.TextElement', section.next_node(nodes.Titular))\n        name = title.astext() if title else '<untitled>'\n        section['node_name'] = add_node_name(name)",
    "docstring": "Generates a unique id for each section. Assigns the attribute `` to each section.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\writers\\texinfo.py",
    "ast_data": "FunctionDef name:collect_node_names arg:self arguments arg FunctionDef name:add_node_name arg:name arguments arg Assign Call Assign While BoolOp Compare Compare Assign Call Assign Return return:yes Assign Call Call Assign Call For Call Assign Call Call Assign Call Assign Call"
  },
  {
    "library": "kornia",
    "name": "SolarizeAdd",
    "source_code": "class SolarizeAdd(OperationBase):\n\n    def __init__(self, initial_magnitude: Optional[float]=0.0, initial_probability: float=0.5, magnitude_range: Tuple[float, float]=(-0.3, 0.3), temperature: float=0.1, symmetric_megnitude: bool=False) -> None:\n        super().__init__(K.RandomSolarize(thresholds=0.5, additions=magnitude_range, same_on_batch=False, p=initial_probability), initial_magnitude=[('additions', initial_magnitude)], temperature=temperature, symmetric_megnitude=symmetric_megnitude, gradient_estimator=STEFunction)",
    "docstring": "Apply solarize-addition operation with a fixed thresholds of 0.5. Args: initial_magnitude: the initial magnitude. initial_probability: the initial probability. If None, the augmentation will be randomly applied according to he augmentation sampling range. magnitude_range: the sampling range for random sampling and clamping the optimized magnitude. temperature: temperature for RelaxedBernoulli distribution used during training. symmetric_megnitude: if to randomly assign the magnitude as negative or not. Note: STE gradient estimator applied for back propagation.",
    "type": "class",
    "file_path": "kornia\\kornia\\augmentation\\auto\\operations\\ops.py",
    "ast_data": "ClassDef name:SolarizeAdd FunctionDef name:__init__ arg:self arg:initial_magnitude arg:initial_probability arg:magnitude_range arg:temperature arg:symmetric_megnitude arguments arg arg arg arg arg arg Call Call Call"
  },
  {
    "library": "django",
    "name": "items",
    "source_code": "def items(self):\n    for key in self:\n        yield (key, self[key])",
    "docstring": "Yield (key, value) pairs, where value is the last item in the list associated with the key.",
    "type": "method",
    "file_path": "django\\django\\utils\\datastructures.py",
    "ast_data": "FunctionDef name:items arg:self arguments arg For"
  },
  {
    "library": "tensorflow",
    "name": "tf_output",
    "source_code": "def tf_output(c_op, index):\n    ret = c_api.TF_Output()\n    ret.oper = c_op\n    ret.index = index\n    return ret",
    "docstring": "Returns a wrapped TF_Output with specified operation and index. Args: c_op: wrapped TF_Operation index: integer Returns: Wrapped TF_Output",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\c_api_util.py",
    "ast_data": "FunctionDef name:tf_output arg:c_op arg:index arguments arg arg Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_scope",
    "source_code": "def _get_scope(node_name):\n    if not node_name:\n        raise ValueError(f'Node name cannot be empty or None. Received: {node_name}.')\n    if node_name.startswith('^'):\n        node_name = node_name[1:]\n    if '/' in node_name:\n        scope, _ = node_name.rsplit('/', 1)\n        return scope\n    return ''",
    "docstring": "Extract the scope name from a node name. The scope name is everything before the final slash, not including any ^ prefix denoting a control dependency. Args: node_name: the full name of an Op or a Tensor in the graph. Returns: The deepest named scope containing the node. Raises: ValueError: if tensor_name is None or empty",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\meta_graph.py",
    "ast_data": "FunctionDef name:_get_scope arg:node_name arguments arg If Raise Call If Call Assign If Compare Assign Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_prune_removed_feed_nodes",
    "source_code": "def _prune_removed_feed_nodes(signature_def, graph_def):\n    node_names = set([n.name for n in graph_def.node])\n    new_signature_def = meta_graph_pb2.SignatureDef()\n    new_signature_def.CopyFrom(signature_def)\n    for k, v in signature_def.inputs.items():\n        tensor_name, _ = _parse_tensor_name(v.name)\n        if tensor_name not in node_names:\n            logging.warn(\"Signature input key '{}', tensor name '{}', has been pruned while freezing the graph.  Removing it from the compiled signatures.\".format(k, tensor_name))\n            del new_signature_def.inputs[k]\n    return new_signature_def",
    "docstring": "Identify the inputs in the signature no longer in graph_def, prune them. Args: signature_def: A instance. graph_def: A instance. Returns: A new pruned .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\saved_model_aot_compile.py",
    "ast_data": "FunctionDef name:_prune_removed_feed_nodes arg:signature_def arg:graph_def arguments arg arg Assign Call Assign Call Call For Call Assign Call If Compare Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "_validate_input_dtype",
    "source_code": "def _validate_input_dtype(input: Tensor, accepted_dtypes: List[torch.dtype]) -> None:\n    if input.dtype not in accepted_dtypes:\n        raise TypeError(f'Expected input of {accepted_dtypes}. Got {input.dtype}')",
    "docstring": "Check if the dtype of the input tensor is in the range of accepted_dtypes. Args: input: Tensor accepted_dtypes: List. e.g. [torch.float32, torch.float64]",
    "type": "function",
    "file_path": "kornia\\kornia\\augmentation\\utils\\helpers.py",
    "ast_data": "FunctionDef name:_validate_input_dtype arg:input arg:accepted_dtypes arguments arg arg If Compare Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_fqns_in_shard",
    "source_code": "@property\ndef _fqns_in_shard(self) -> list[str]:\n    fqns_in_shard: list[str] = []\n    for fqn, shard_param_info in zip(self.flat_param._fqns, self.flat_param._shard_param_infos):\n        if shard_param_info.in_shard:\n            fqns_in_shard.append(fqn)\n    return fqns_in_shard",
    "docstring": "Return the FQNs of the parameters present in this rank's shard.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py",
    "ast_data": "FunctionDef name:_fqns_in_shard arg:self arguments arg For Call If Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_default_alignment",
    "source_code": "def set_default_alignment(self, d):\n    va, ha = _api.check_getitem(self._default_alignments, d=d)\n    self.set_va(va)\n    self.set_ha(ha)",
    "docstring": "Set the default alignment. See for details. Parameters ---------- d : {\"left\", \"bottom\", \"right\", \"top\"}",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axis_artist.py",
    "ast_data": "FunctionDef name:set_default_alignment arg:self arg:d arguments arg arg Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_seed",
    "source_code": "@tf_export(v1=['random.get_seed', 'get_seed'])\n@deprecation.deprecated_endpoints('get_seed')\ndef get_seed(op_seed):\n    eager = context.executing_eagerly()\n    if eager:\n        global_seed = context.global_seed()\n    else:\n        global_seed = ops.get_default_graph().seed\n    if global_seed is not None:\n        if op_seed is None:\n            if hasattr(ops.get_default_graph(), '_seed_used'):\n                ops.get_default_graph()._seed_used = True\n            if eager:\n                op_seed = context.internal_operation_seed()\n            else:\n                op_seed = _graph_to_seed_dict.setdefault(ops.get_default_graph(), 0)\n                _graph_to_seed_dict[ops.get_default_graph()] += 1\n        seeds = (_truncate_seed(global_seed), _truncate_seed(op_seed))\n    elif op_seed is not None:\n        seeds = (DEFAULT_GRAPH_SEED, _truncate_seed(op_seed))\n    else:\n        seeds = (None, None)\n    if seeds == (None, None) and config.is_op_determinism_enabled():\n        raise RuntimeError('Random ops require a seed to be set when determinism is enabled. Please set a seed before running the op, e.g. by calling tf.random.set_seed(1).')\n    if seeds == (0, 0):\n        return (0, _MAXINT32)\n    return seeds",
    "docstring": "Returns the local seeds an operation should use given an op-specific seed. Given operation-specific seed, , this helper function returns two seeds derived from graph-level and op-level seeds. Many random operations internally use the two seeds to allow user to change the seed globally for a graph, or for only specific operations. For details on how the graph-level seed interacts with op seeds, see . Args: op_seed: integer. Returns: A tuple of two integers that should be used for the local seed of this operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\random_seed.py",
    "ast_data": "FunctionDef name:get_seed arg:op_seed arguments arg Assign Call If Assign Call Assign Call If Compare If Compare If Call Call Assign Call If Assign Call Assign Call Call Call Assign Call Call If Compare Assign Call Assign If BoolOp Compare Call Raise Call If Compare Return return:yes Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "to_tensor",
    "source_code": "def to_tensor(self, x: Any) -> Tensor:\n    if isinstance(x, (str,)):\n        from kornia.io import ImageLoadType, load_image\n        return load_image(x, ImageLoadType.UNCHANGED) / 255\n    if isinstance(x, (Tensor,)):\n        return x\n    if isinstance(x, (np.ndarray,)):\n        from kornia.utils.image import image_to_tensor\n        return image_to_tensor(x) / 255\n    if isinstance(x, (Image.Image,)):\n        return from_numpy(np.array(x)).permute(2, 0, 1).float() / 255\n    raise TypeError('Input type not supported')",
    "docstring": "Convert input to tensor. Supports image path, numpy array, PIL image, and raw tensor. Args: x: The input to convert. Returns: Tensor: The converted tensor.",
    "type": "method",
    "file_path": "kornia\\kornia\\core\\mixin\\image_module.py",
    "ast_data": "FunctionDef name:to_tensor arg:self arg:x arguments arg arg If Call Return return:yes Call If Call Return return:yes If Call Return return:yes Call If Call Return return:yes Call Call Call Call Raise Call"
  },
  {
    "library": "sphinx",
    "name": "_evaluate_forwardref",
    "source_code": "def _evaluate_forwardref(ref: ForwardRef, globalns: dict[str, Any] | None, localns: dict[str, Any] | None) -> Any:\n    if sys.version_info[:2] >= (3, 14):\n        return typing.evaluate_forward_ref(ref, globals=globalns, locals=localns)\n    if sys.version_info >= (3, 12, 4):\n        return ref._evaluate(globalns, localns, type_params=(), recursive_guard=frozenset())\n    return ref._evaluate(globalns, localns, recursive_guard=frozenset())",
    "docstring": "Evaluate a forward reference.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\inspect.py",
    "ast_data": "FunctionDef name:_evaluate_forwardref arg:ref arg:globalns arg:localns arguments arg arg arg If Compare Return return:yes Call If Compare Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "check_no_collapsed_axes",
    "source_code": "def check_no_collapsed_axes(layoutgrids, fig):\n    for sfig in fig.subfigs:\n        ok = check_no_collapsed_axes(layoutgrids, sfig)\n        if not ok:\n            return False\n    for ax in fig.axes:\n        gs = ax.get_gridspec()\n        if gs in layoutgrids:\n            lg = layoutgrids[gs]\n            for i in range(gs.nrows):\n                for j in range(gs.ncols):\n                    bb = lg.get_inner_bbox(i, j)\n                    if bb.width <= 0 or bb.height <= 0:\n                        return False\n    return True",
    "docstring": "Check that no Axes have collapsed to zero size.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\_constrained_layout.py",
    "ast_data": "FunctionDef name:check_no_collapsed_axes arg:layoutgrids arg:fig arguments arg arg For Assign Call If Return return:yes For Assign Call If Compare Assign For Call For Call Assign Call If BoolOp Compare Compare Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "__len__",
    "source_code": "def __len__(self):\n    return self.num_geom",
    "docstring": "Return the number of geometries in this Collection.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\collections.py",
    "ast_data": "FunctionDef name:__len__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "safe_sqr",
    "source_code": "def safe_sqr(X, *, copy=True):\n    X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], ensure_2d=False)\n    if sparse.issparse(X):\n        if copy:\n            X = X.copy()\n        X.data **= 2\n    elif copy:\n        X = X ** 2\n    else:\n        X **= 2\n    return X",
    "docstring": "Element wise squaring of array-likes and sparse matrices. Parameters ---------- X : {array-like, ndarray, sparse matrix} copy : bool, default=True Whether to create a copy of X and operate on it or to perform inplace computation (default behaviour). Returns ------- X ** 2 : element wise square Return the element-wise square of the input. Examples -------- >>> from sklearn.utils import safe_sqr >>> safe_sqr([1, 2, 3]) array([1, 4, 9])",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\extmath.py",
    "ast_data": "FunctionDef name:safe_sqr arg:X arguments arg arg Assign Call If Call If Assign Call If Assign Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "TitleCollector",
    "source_code": "class TitleCollector(EnvironmentCollector):\n\n    def clear_doc(self, app: Sphinx, env: BuildEnvironment, docname: str) -> None:\n        env.titles.pop(docname, None)\n        env.longtitles.pop(docname, None)\n\n    def merge_other(self, app: Sphinx, env: BuildEnvironment, docnames: Set[str], other: BuildEnvironment) -> None:\n        for docname in docnames:\n            env.titles[docname] = other.titles[docname]\n            env.longtitles[docname] = other.longtitles[docname]\n\n    def process_doc(self, app: Sphinx, doctree: nodes.document) -> None:\n        titlenode = nodes.title()\n        longtitlenode = titlenode\n        if 'title' in doctree:\n            longtitlenode = nodes.title()\n            longtitlenode += nodes.Text(doctree['title'])\n        for node in doctree.findall(nodes.section):\n            visitor = SphinxContentsFilter(doctree)\n            node[0].walkabout(visitor)\n            titlenode += visitor.get_entry_text()\n            break\n        else:\n            titlenode += nodes.Text(doctree.get('title', '<no title>'))\n        app.env.titles[app.env.docname] = titlenode\n        app.env.longtitles[app.env.docname] = longtitlenode",
    "docstring": "title collector for sphinx.environment.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\environment\\collectors\\title.py",
    "ast_data": "ClassDef name:TitleCollector FunctionDef name:clear_doc arg:self arg:app arg:env arg:docname arguments arg arg arg arg Call Call FunctionDef name:merge_other arg:self arg:app arg:env arg:docnames arg:other arguments arg arg arg arg arg For Assign Assign FunctionDef name:process_doc arg:self arg:app arg:doctree arguments arg arg arg Assign Call Assign If Compare Assign Call Call For Call Assign Call Call Call Call Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "invalid_creator_scope",
    "source_code": "def invalid_creator_scope(*unused_args, **unused_kwds):\n    raise ValueError('tf.function only supports singleton tf.Variables created on the first call. Make sure the tf.Variable is only created once or created outside tf.function. See https://www.tensorflow.org/guide/function#creating_tfvariables for more information.')",
    "docstring": "Disables variable creation.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\polymorphic_function.py",
    "ast_data": "FunctionDef name:invalid_creator_scope arguments arg arg Raise Call"
  },
  {
    "library": "pygame",
    "name": "update",
    "source_code": "def update(self, *args, **kwargs):\n    for sprite in self.sprites():\n        sprite.update(*args, **kwargs)",
    "docstring": "call the update method of every member sprite Group.update(*args, **kwargs): return None Calls the update method of every member sprite. All arguments that were passed to this method are passed to the Sprite update function.",
    "type": "method",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:update arg:self arguments arg arg arg For Call Call"
  },
  {
    "library": "numpy",
    "name": "nansum",
    "source_code": "@array_function_dispatch(_nansum_dispatcher)\ndef nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._NoValue, where=np._NoValue):\n    a, mask = _replace_nan(a, 0)\n    return np.sum(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims, initial=initial, where=where)",
    "docstring": "Return the sum of array elements over a given axis treating Not a Numbers (NaNs) as zero. In NumPy versions >> import numpy as np >>> np.nansum(1) 1 >>> np.nansum([1]) 1 >>> np.nansum([1, np.nan]) 1.0 >>> a = np.array([[1, 1], [1, np.nan]]) >>> np.nansum(a) 3.0 >>> np.nansum(a, axis=0) array([2., 1.]) >>> np.nansum([1, np.nan, np.inf]) inf >>> np.nansum([1, np.nan, -np.inf]) -inf >>> from numpy.testing import suppress_warnings >>> with np.errstate(invalid=\"ignore\"): ... np.nansum([1, np.nan, np.inf, -np.inf]) # both +/- infinity present np.float64(nan)",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_nanfunctions_impl.py",
    "ast_data": "FunctionDef name:nansum arg:a arg:axis arg:dtype arg:out arg:keepdims arg:initial arg:where arguments arg arg arg arg arg arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "expand_range_expression",
    "source_code": "def expand_range_expression(range_exp):\n    for part in range_exp.split(','):\n        sub_range = part.split('-')\n        if len(sub_range) == 1:\n            sub_range = sub_range * 2\n        else:\n            assert len(sub_range) == 2\n        num_digits = len(sub_range[0])\n        for i in range(int(sub_range[0]), int(sub_range[1]) + 1):\n            yield str(i).zfill(num_digits)",
    "docstring": "Expand a range expression like '3-5' to values 3,4,5.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\slurm_cluster_resolver.py",
    "ast_data": "FunctionDef name:expand_range_expression arg:range_exp arguments arg For Call Assign Call If Compare Call Assign Compare Call Assign Call For Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "kwarg_only",
    "source_code": "def kwarg_only(f: Any) -> Any:\n    f_argspec = tf_inspect.getfullargspec(f)\n\n    def wrapper(*args, **kwargs):\n        if args:\n            raise TypeError('{f} only takes keyword args (possible keys: {kwargs}). Please pass these args as kwargs instead.'.format(f=f.__name__, kwargs=f_argspec.args))\n        return f(**kwargs)\n    return tf_decorator.make_decorator(f, wrapper, decorator_argspec=f_argspec)",
    "docstring": "A wrapper that throws away all non-kwarg arguments.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\tf_export.py",
    "ast_data": "FunctionDef name:kwarg_only arg:f arguments arg Assign Call FunctionDef name:wrapper arguments arg arg If Raise Call Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "OverlapsAboveLookup",
    "source_code": "@BaseSpatialField.register_lookup\nclass OverlapsAboveLookup(GISLookup):\n    lookup_name = 'overlaps_above'",
    "docstring": "The 'overlaps_above' operator returns true if A's bounding box overlaps or is above B's bounding box.",
    "type": "class",
    "file_path": "django\\django\\contrib\\gis\\db\\models\\lookups.py",
    "ast_data": "ClassDef name:OverlapsAboveLookup Assign"
  },
  {
    "library": "django",
    "name": "check_pointer",
    "source_code": "def check_pointer(result, func, cargs):\n    if isinstance(result, int):\n        result = c_void_p(result)\n    if result:\n        return result\n    else:\n        raise GDALException('Invalid pointer returned from \"%s\"' % func.__name__)",
    "docstring": "Make sure the result pointer is valid.",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\gdal\\prototypes\\errcheck.py",
    "ast_data": "FunctionDef name:check_pointer arg:result arg:func arg:cargs arguments arg arg arg If Call Assign Call If Return return:yes Raise Call"
  },
  {
    "library": "django",
    "name": "get_cleansed_multivaluedict",
    "source_code": "def get_cleansed_multivaluedict(self, request, multivaluedict):\n    sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])\n    if self.is_active(request) and sensitive_post_parameters:\n        multivaluedict = multivaluedict.copy()\n        for param in sensitive_post_parameters:\n            if param in multivaluedict:\n                multivaluedict[param] = self.cleansed_substitute\n    return multivaluedict",
    "docstring": "Replace the keys in a MultiValueDict marked as sensitive with stars. This mitigates leaking sensitive POST parameters if something like request.POST['nonexistent_key'] throws an exception (#21098).",
    "type": "method",
    "file_path": "django\\django\\views\\debug.py",
    "ast_data": "FunctionDef name:get_cleansed_multivaluedict arg:self arg:request arg:multivaluedict arguments arg arg arg Assign Call If BoolOp Call Assign Call For If Compare Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "seed_all",
    "source_code": "def seed_all() -> None:\n\n    def cb():\n        random_seed = 0\n        seeded = False\n        for i in range(device_count()):\n            default_generator = torch.xpu.default_generators[i]\n            if not seeded:\n                default_generator.seed()\n                random_seed = default_generator.initial_seed()\n                seeded = True\n            else:\n                default_generator.manual_seed(random_seed)\n    _lazy_call(cb)",
    "docstring": "Set the seed for generating random numbers to a random number on all GPUs. It's safe to call this function if XPU is not available; in that case, it is silently ignored.",
    "type": "function",
    "file_path": "pytorch\\torch\\xpu\\random.py",
    "ast_data": "FunctionDef name:seed_all arguments FunctionDef name:cb arguments Assign Assign For Call Call Assign If Call Assign Call Assign Call Call"
  },
  {
    "library": "scipy",
    "name": "pmf",
    "source_code": "def pmf(self, x, m, n):\n    out = np.exp(self.logpmf(x, m, n))\n    return out",
    "docstring": "Multivariate hypergeometric probability mass function. Parameters ---------- x : array_like Quantiles, with the last axis of denoting the components. %(_doc_default_callparams)s Returns ------- pmf : ndarray or scalar Probability density function evaluated at Notes ----- %(_doc_callparams_note)s",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:pmf arg:self arg:x arg:m arg:n arguments arg arg arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_get_stats",
    "source_code": "def _get_stats(padded, axis, width_pair, length_pair, stat_func):\n    left_index = width_pair[0]\n    right_index = padded.shape[axis] - width_pair[1]\n    max_length = right_index - left_index\n    left_length, right_length = length_pair\n    if left_length is None or max_length < left_length:\n        left_length = max_length\n    if right_length is None or max_length < right_length:\n        right_length = max_length\n    if (left_length == 0 or right_length == 0) and stat_func in {np.amax, np.amin}:\n        raise ValueError('stat_length of 0 yields no value for padding')\n    left_slice = _slice_at_axis(slice(left_index, left_index + left_length), axis)\n    left_chunk = padded[left_slice]\n    left_stat = stat_func(left_chunk, axis=axis, keepdims=True)\n    _round_if_needed(left_stat, padded.dtype)\n    if left_length == right_length == max_length:\n        return (left_stat, left_stat)\n    right_slice = _slice_at_axis(slice(right_index - right_length, right_index), axis)\n    right_chunk = padded[right_slice]\n    right_stat = stat_func(right_chunk, axis=axis, keepdims=True)\n    _round_if_needed(right_stat, padded.dtype)\n    return (left_stat, right_stat)",
    "docstring": "Calculate statistic for the empty-padded array in given dimension. Parameters ---------- padded : ndarray Empty-padded array. axis : int Dimension in which the statistic is calculated. width_pair : (int, int) Pair of widths that mark the pad area on both sides in the given dimension. length_pair : 2-element sequence of None or int Gives the number of values in valid area from each side that is taken into account when calculating the statistic. If None the entire valid area in is considered. stat_func : function Function to compute statistic. The expected signature is `padded`.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_arraypad_impl.py",
    "ast_data": "FunctionDef name:_get_stats arg:padded arg:axis arg:width_pair arg:length_pair arg:stat_func arguments arg arg arg arg arg Assign Assign Assign Assign If BoolOp Compare Compare Assign If BoolOp Compare Compare Assign If BoolOp BoolOp Compare Compare Compare Raise Call Assign Call Call Assign Assign Call Call If Compare Return return:yes Assign Call Call Assign Assign Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "_estimate_homography",
    "source_code": "def _estimate_homography(self, keypoints1: Tensor, keypoints2: Tensor) -> Tensor:\n    if self.estimator == 'vanilla':\n        homo = find_homography_dlt_iterated(keypoints2[None], keypoints1[None], torch.ones_like(keypoints1[None, :, 0]))\n    elif self.estimator == 'ransac':\n        homo, _ = self.ransac(keypoints2, keypoints1)\n        homo = homo[None]\n    else:\n        raise NotImplementedError(f'Unsupported estimator {self.estimator}. Use `ransac` or `vanilla` instead.')\n    return homo",
    "docstring": "Estimate homography by the matched keypoints. Args: keypoints1: matched keypoint set from an image, shaped as :math:. keypoints2: matched keypoint set from the other image, shaped as :math:.",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\image_stitching.py",
    "ast_data": "FunctionDef name:_estimate_homography arg:self arg:keypoints1 arg:keypoints2 arguments arg arg arg If Compare Assign Call Call If Compare Assign Call Assign Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_accumulator",
    "source_code": "def _get_accumulator(tensor):\n    assert isinstance(tensor.graph, func_graph_module.FuncGraph)\n\n    def get_func_graph_output(t):\n        for output in tensor.graph.outputs:\n            if output is t:\n                return t\n        identity_op = t.consumers()[0]\n        if identity_op.type == 'Identity' and any((identity_op.outputs[0] is t for t in tensor.graph.outputs)):\n            return identity_op.outputs[0]\n        return None\n    for consumer in tensor.consumers():\n        if consumer.type != 'TensorListPushBack':\n            continue\n        accum_input_idx = -1\n        for accum_input_idx, inp in enumerate(tensor.graph.inputs):\n            if inp is consumer.inputs[0]:\n                break\n        else:\n            continue\n        output = get_func_graph_output(consumer.outputs[0])\n        if output is None:\n            continue\n        for accum_output_idx, out in enumerate(tensor.graph.outputs):\n            if out is output:\n                if accum_input_idx == accum_output_idx:\n                    return output\n                break\n    return None",
    "docstring": "Returns TensorList if any containing accumulated values of tensor. We try to find a pattern of the form: input_tl tensor \\ / (TensorListPushBack) | output_tl which satisfies the following conditions: 1. input_tl must be in tensor.graph.inputs. 2. output_tl or Identity(output_tl) must be in tensor.graph.outputs. 3. tensor.graph.input_index(input_tl) == tensor.graph.output_index(output_t). output_tl or Identity(output_tl) (whichever is in tensor.graph.outputs) is returned if such a pattern is found else None is returned. Args: tensor: The Tensor to be accumulated. Returns: A variant tensor in the same graph as or None if no accumulator is found.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\while_v2.py",
    "ast_data": "FunctionDef name:_get_accumulator arg:tensor arguments arg Call FunctionDef name:get_func_graph_output arg:t arguments arg For If Compare Return return:yes Assign Call If BoolOp Compare Call Compare Return return:yes Return return:no For Call If Compare Assign For Call If Compare Assign Call If Compare For Call If Compare If Compare Return return:yes Return return:no"
  },
  {
    "library": "kornia",
    "name": "inverse",
    "source_code": "@classmethod\n@abstractmethod\ndef inverse(cls, input: T, module: Module, param: ParamItem, extra_args: Optional[Dict[str, Any]]=None) -> T:\n    raise NotImplementedError",
    "docstring": "Inverse a transformation with respect to the parameters. Args: input: the input tensor. module: any torch Module but only kornia augmentation modules will count to apply transformations. param: the corresponding parameters to the module. extra_args: Optional dictionary of extra arguments with specific options for different input types.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\container\\ops.py",
    "ast_data": "FunctionDef name:inverse arg:cls arg:input arg:module arg:param arg:extra_args arguments arg arg arg arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "_broadcast_dynamic_shape_from_rps",
    "source_code": "def _broadcast_dynamic_shape_from_rps(a_zero: _LayerBroadcaster, b_zero: _LayerBroadcaster, a_rps: Sequence[RowPartition], b_rps: Sequence[RowPartition]) -> Tuple[Sequence[RowPartition], Sequence[_LayerBroadcaster], Sequence[_LayerBroadcaster]]:\n    assert len(a_rps) == len(b_rps)\n    if a_rps:\n        c_1, ac_1, bc_1 = _broadcast_dynamic_shape_next_layer(a_zero, b_zero, a_rps[0], b_rps[0])\n        c_suffix, a_layers, b_layers = _broadcast_dynamic_shape_from_rps(ac_1, bc_1, a_rps[1:], b_rps[1:])\n        return ([c_1] + c_suffix, [ac_1] + a_layers, [bc_1] + b_layers)\n    else:\n        return ([], [], [])",
    "docstring": "Create BroadcastLayers from two shapes to a target shape. *--a_zero->***<-bc[2]--* Note: ac[0]=a_zero, and bc[0]=b_zero. Args: a_zero: broadcaster from rows of a_rps[0] to target shape. b_zero: broadcaster from rows of b_rps[0] to target shape. a_rps: RowPartitions of first shape. b_rps: RowPartitions of second shape, equal in length to a_rps. Returns: (c_rps, ac, bc) where: c_rps: RowPartitions of target shape. ac: layers broadcasting from the first shape. bc: layers broadcasting from the second shape.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:_broadcast_dynamic_shape_from_rps arg:a_zero arg:b_zero arg:a_rps arg:b_rps arguments arg arg arg arg Compare Call Call If Assign Call Assign Call Return return:yes Return return:no"
  },
  {
    "library": "matplotlib",
    "name": "set_size_inches",
    "source_code": "def set_size_inches(self, w, h=None, forward=True):\n    if h is None:\n        w, h = w\n    size = np.array([w, h])\n    if not np.isfinite(size).all() or (size < 0).any():\n        raise ValueError(f'figure size must be positive finite not {size}')\n    self.bbox_inches.p1 = size\n    if forward:\n        manager = self.canvas.manager\n        if manager is not None:\n            manager.resize(*(size * self.dpi).astype(int))\n    self.stale = True",
    "docstring": "Set the figure size in inches. Call signatures:: fig.set_size_inches(w, h) # OR fig.set_size_inches((w, h)) Parameters ---------- w : (float, float) or float Width and height in inches (if height not specified as a separate argument) or width. h : float Height in inches. forward : bool, default: True If `Figure.dpi`.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:set_size_inches arg:self arg:w arg:h arg:forward arguments arg arg arg arg If Compare Assign Assign Call If BoolOp Call Call Call Compare Raise Call Assign If Assign If Compare Call Call Assign"
  },
  {
    "library": "sphinx",
    "name": "setup_extension",
    "source_code": "def setup_extension(self, extname: str) -> None:\n    logger.debug('[app] setting up extension: %r', extname)\n    self.registry.load_extension(self, extname)",
    "docstring": "Import and setup a Sphinx extension module. Load the extension given by the module *name*. Use this if your extension needs the features provided by another extension. No-op if called twice.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\application.py",
    "ast_data": "FunctionDef name:setup_extension arg:self arg:extname arguments arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "description",
    "source_code": "@property\n@abc.abstractmethod\ndef description(self) -> str:\n    pass",
    "docstring": "Returns a text description of the sharding policy.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\sharding\\sharding_util.py",
    "ast_data": "FunctionDef name:description arg:self arguments arg"
  },
  {
    "library": "scipy",
    "name": "PowerResult",
    "source_code": "@dataclass\nclass PowerResult:\n    power: float | np.ndarray\n    pvalues: float | np.ndarray",
    "docstring": "Result object returned by . Attributes ---------- power : float or ndarray The estimated power. pvalues : float or ndarray The simulated p-values.",
    "type": "class",
    "file_path": "scipy\\scipy\\stats\\_resampling.py",
    "ast_data": "ClassDef name:PowerResult"
  },
  {
    "library": "matplotlib",
    "name": "set_rmax",
    "source_code": "def set_rmax(self, rmax):\n    self.viewLim.y1 = rmax",
    "docstring": "Set the outer radial limit. Parameters ---------- rmax : float",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\polar.py",
    "ast_data": "FunctionDef name:set_rmax arg:self arg:rmax arguments arg arg Assign"
  },
  {
    "library": "pytorch",
    "name": "_register_torch_dispatch_rule",
    "source_code": "def _register_torch_dispatch_rule(self, op_name, torch_dispatch_class, fn):\n    if torch._running_with_deploy():\n        _library.utils.warn_deploy()\n        return\n    qualname = f'{self.ns}::{op_name}'\n    entry = torch._library.simple_registry.singleton.find(qualname)\n    handle = entry.torch_dispatch_rules.register(torch_dispatch_class, fn)\n    self._registration_handles.append(handle)",
    "docstring": "Registers a torch_dispatch rule for the given operator and torch_dispatch_class. This allows for open registration to specify the behavior between the operator and the torch_dispatch_class without needing to modify the torch_dispatch_class or the operator directly. The torch_dispatch_class is either a Tensor subclass with or a TorchDispatchMode. If it is a Tensor subclass, we expect fn to have the following signature: (cls, func: OpOverload, types: Tuple[type, ...], args, kwargs) -> Any If it is a TorchDispatchMode, we expect fn to have the following signature: (mode, func: OpOverload, types: Tuple[type, ...], args, kwargs) -> Any",
    "type": "method",
    "file_path": "pytorch\\torch\\library.py",
    "ast_data": "FunctionDef name:_register_torch_dispatch_rule arg:self arg:op_name arg:torch_dispatch_class arg:fn arguments arg arg arg arg If Call Call Return return:no Assign Assign Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_default_device",
    "source_code": "def get_default_device() -> 'torch.device':\n    global _GLOBAL_DEVICE_CONTEXT\n    if hasattr(_GLOBAL_DEVICE_CONTEXT, 'device_context'):\n        device = _GLOBAL_DEVICE_CONTEXT.device_context.device\n        if device.index is not None:\n            return device\n        else:\n            return torch.tensor([]).device\n    else:\n        return torch.device('cpu')",
    "docstring": "Gets the default ``",
    "type": "function",
    "file_path": "pytorch\\torch\\__init__.py",
    "ast_data": "FunctionDef name:get_default_device arguments If Call Assign If Compare Return return:yes Return return:yes Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_data",
    "source_code": "def set_data(self, *args):\n    if len(args) == 1:\n        (x, y), = args\n    else:\n        x, y = args\n    self.set_xdata(x)\n    self.set_ydata(y)",
    "docstring": "Set the x and y data. Parameters ---------- *args : (2, N) array or two 1D arrays See Also -------- set_xdata set_ydata",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:set_data arg:self arguments arg arg If Compare Call Assign Assign Call Call"
  },
  {
    "library": "django",
    "name": "_get_raw_host",
    "source_code": "def _get_raw_host(self):\n    if settings.USE_X_FORWARDED_HOST and 'HTTP_X_FORWARDED_HOST' in self.META:\n        host = self.META['HTTP_X_FORWARDED_HOST']\n    elif 'HTTP_HOST' in self.META:\n        host = self.META['HTTP_HOST']\n    else:\n        host = self.META['SERVER_NAME']\n        server_port = self.get_port()\n        if server_port != ('443' if self.is_secure() else '80'):\n            host = '%s:%s' % (host, server_port)\n    return host",
    "docstring": "Return the HTTP host using the environment or request headers. Skip allowed hosts protection, so may return an insecure host.",
    "type": "method",
    "file_path": "django\\django\\http\\request.py",
    "ast_data": "FunctionDef name:_get_raw_host arg:self arguments arg If BoolOp Compare Assign If Compare Assign Assign Assign Call If Compare Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_use_low_precision_shard",
    "source_code": "def _use_low_precision_shard(self):\n    self._check_low_precision_shard()\n    flat_param = self.flat_param\n    _alloc_storage(flat_param._mp_shard, flat_param._local_shard.size())\n    flat_param._mp_shard.copy_(flat_param._local_shard.to(self.device, non_blocking=True))\n    flat_param.data = flat_param._mp_shard",
    "docstring": "Allocate on the compute device and switch to using the low precision sharded flat parameter.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py",
    "ast_data": "FunctionDef name:_use_low_precision_shard arg:self arguments arg Call Assign Call Call Call Call Assign"
  },
  {
    "library": "kornia",
    "name": "KORNIA_CHECK_SHAPE",
    "source_code": "def KORNIA_CHECK_SHAPE(x: Tensor, shape: list[str], raises: bool=True) -> bool:\n    if '*' == shape[0]:\n        shape_to_check = shape[1:]\n        x_shape_to_check = x.shape[-len(shape) + 1:]\n    elif '*' == shape[-1]:\n        shape_to_check = shape[:-1]\n        x_shape_to_check = x.shape[:len(shape) - 1]\n    else:\n        shape_to_check = shape\n        x_shape_to_check = x.shape\n    if len(x_shape_to_check) != len(shape_to_check):\n        if raises:\n            raise TypeError(f'{x} shape must be [{shape}]. Got {x.shape}')\n        else:\n            return False\n    for i in range(len(x_shape_to_check)):\n        dim_: str = shape_to_check[i]\n        if not dim_.isnumeric():\n            continue\n        dim = int(dim_)\n        if x_shape_to_check[i] != dim:\n            if raises:\n                raise TypeError(f'{x} shape must be [{shape}]. Got {x.shape}')\n            else:\n                return False\n    return True",
    "docstring": "Check whether a tensor has a specified shape. The shape can be specified with a implicit or explicit list of strings. The guard also check whether the variable is a type . Args: x: the tensor to evaluate. shape: a list with strings with the expected shape. raises: bool indicating whether an exception should be raised upon failure. Raises: Exception: if the input tensor is has not the expected shape and raises is True. Example: >>> x = torch.rand(2, 3, 4, 4) >>> KORNIA_CHECK_SHAPE(x, [\"B\", \"C\", \"H\", \"W\"]) # implicit True >>> x = torch.rand(2, 3, 4, 4) >>> KORNIA_CHECK_SHAPE(x, [\"2\", \"3\", \"H\", \"W\"]) # explicit True",
    "type": "function",
    "file_path": "kornia\\kornia\\core\\check.py",
    "ast_data": "FunctionDef name:KORNIA_CHECK_SHAPE arg:x arg:shape arg:raises arguments arg arg arg If Compare Assign Assign Call If Compare Assign Assign Call Assign Assign If Compare Call Call If Raise Call Return return:yes For Call Call If Call Assign Call If Compare If Raise Call Return return:yes Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_register_accessor",
    "source_code": "@doc(klass='', examples='', others='')\ndef _register_accessor(name: str, cls: type[NDFrame | Index]) -> Callable[[TypeT], TypeT]:\n\n    def decorator(accessor: TypeT) -> TypeT:\n        if hasattr(cls, name):\n            warnings.warn(f'registration of accessor {accessor!r} under name {name!r} for type {cls!r} is overriding a preexisting attribute with the same name.', UserWarning, stacklevel=find_stack_level())\n        setattr(cls, name, Accessor(name, accessor))\n        cls._accessors.add(name)\n        return accessor\n    return decorator",
    "docstring": "Register a custom accessor on {klass} objects. Parameters ---------- name : str Name under which the accessor should be registered. A warning is issued if this name conflicts with a preexisting attribute. Returns ------- callable A class decorator. See Also -------- register_dataframe_accessor : Register a custom accessor on DataFrame objects. register_series_accessor : Register a custom accessor on Series objects. register_index_accessor : Register a custom accessor on Index objects. Notes ----- This function allows you to register a custom-defined accessor class for {klass}. The requirements for the accessor class are as follows: * Must contain an init method that: * accepts a single {klass} object * raises an AttributeError if the {klass} object does not have correctly matching inputs for the accessor * Must contain a method for each access pattern. * The methods should be able to take any argument signature. * Accessible using the @property decorator if no additional arguments are needed. Examples -------- {examples}",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\accessor.py",
    "ast_data": "FunctionDef name:_register_accessor arg:name arg:cls arguments arg arg FunctionDef name:decorator arg:accessor arguments arg If Call Call Call Call Call Call Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_pack_kwargs",
    "source_code": "def _pack_kwargs(*args: Any, **kwargs: Any) -> tuple[tuple[Any, ...], tuple[str, ...]]:\n    kwarg_keys: list[str] = []\n    flat_args: list[Any] = list(args)\n    for k, v in kwargs.items():\n        kwarg_keys.append(k)\n        flat_args.append(v)\n    return (tuple(flat_args), tuple(kwarg_keys))",
    "docstring": "Turn argument list into separate key list and value list (unpack_kwargs does the opposite). Inspiration: Usage:: kwarg_keys, flat_args = pack_kwargs(1, 2, a=3, b=4) assert kwarg_keys == (\"a\", \"b\") assert flat_args == (1, 2, 3, 4) args, kwargs = unpack_kwargs(kwarg_keys, flat_args) assert args == (1, 2) assert kwargs == {\"a\": 3, \"b\": 4} Returns: Tuple[Tuple[Any, ...], Tuple[str, ...]]: The first tuple element gives gives both positional args and kwarg values, where the positional args proceed kwarg values and kwarg values are ordered consistently with the kwarg keys. The second tuple element gives the kwarg keys. The second tuple element's length is at most the first tuple element's length.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\utils.py",
    "ast_data": "FunctionDef name:_pack_kwargs arguments arg arg Call For Call Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "merge",
    "source_code": "def merge(*dicts, **kwargs):\n    if len(dicts) == 1 and (not isinstance(dicts[0], Mapping)):\n        dicts = dicts[0]\n    factory = _get_factory(merge, kwargs)\n    rv = factory()\n    for d in dicts:\n        rv.update(d)\n    return rv",
    "docstring": "Merge a collection of dictionaries >>> merge({1: \"one\"}, {2: \"two\"}) {1: 'one', 2: 'two'} Later dictionaries have precedence >>> merge({1: 2, 3: 4}, {3: 3, 4: 4}) {1: 2, 3: 3, 4: 4} See Also: merge_with",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\unification\\unification_tools.py",
    "ast_data": "FunctionDef name:merge arguments arg arg If BoolOp Compare Call Call Assign Assign Call Assign Call For Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "strip_newsgroup_quoting",
    "source_code": "def strip_newsgroup_quoting(text):\n    good_lines = [line for line in text.split('\\n') if not _QUOTE_RE.search(line)]\n    return '\\n'.join(good_lines)",
    "docstring": "Given text in \"news\" format, strip lines beginning with the quote characters > or |, plus lines that often introduce a quoted section (for example, because they contain the string 'writes:'.) Parameters ---------- text : str The text from which to remove the signature block.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\datasets\\_twenty_newsgroups.py",
    "ast_data": "FunctionDef name:strip_newsgroup_quoting arg:text arguments arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "take",
    "source_code": "def take(self, indexer: npt.NDArray[np.intp], axis: AxisInt=1, verify: bool=True) -> Self:\n    n = self.shape[axis]\n    indexer = maybe_convert_indices(indexer, n, verify=verify)\n    new_labels = self.axes[axis].take(indexer)\n    return self.reindex_indexer(new_axis=new_labels, indexer=indexer, axis=axis, allow_dups=True)",
    "docstring": "Take items along any axis. indexer : np.ndarray[np.intp] axis : int, default 1 verify : bool, default True Check that all entries are between 0 and len(self) - 1, inclusive. Pass verify=False if this check has been done by the caller. Returns ------- BlockManager",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:take arg:self arg:indexer arg:axis arg:verify arguments arg arg arg arg Assign Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "JoinStyle",
    "source_code": "class JoinStyle(str, Enum):\n    miter = 'miter'\n    round = 'round'\n    bevel = 'bevel'\n\n    @staticmethod\n    def demo():\n        import numpy as np\n        import matplotlib.pyplot as plt\n\n        def plot_angle(ax, x, y, angle, style):\n            phi = np.radians(angle)\n            xx = [x + 0.5, x, x + 0.5 * np.cos(phi)]\n            yy = [y, y, y + 0.5 * np.sin(phi)]\n            ax.plot(xx, yy, lw=12, color='tab:blue', solid_joinstyle=style)\n            ax.plot(xx, yy, lw=1, color='black')\n            ax.plot(xx[1], yy[1], 'o', color='tab:red', markersize=3)\n        fig, ax = plt.subplots(figsize=(5, 4), constrained_layout=True)\n        ax.set_title('Join style')\n        for x, style in enumerate(['miter', 'round', 'bevel']):\n            ax.text(x, 5, style)\n            for y, angle in enumerate([20, 45, 60, 90, 120]):\n                plot_angle(ax, x, y, angle, style)\n                if x == 0:\n                    ax.text(-1.3, y, f'{angle} degrees')\n        ax.set_xlim(-1.5, 2.75)\n        ax.set_ylim(-0.5, 5.5)\n        ax.set_axis_off()\n        fig.show()",
    "docstring": "Define how the connection between two line segments is drawn. For a visual impression of each *JoinStyle*, , or run . Lines in Matplotlib are typically defined by a 1D and a finite `~.path.Path~.backend_bases.GraphicsContextBaseMozilla Developer Docs `_ .. plot:: :alt: Demo of possible JoinStyle's from matplotlib._enums import JoinStyle JoinStyle.demo()",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\_enums.py",
    "ast_data": "ClassDef name:JoinStyle Assign Assign Assign FunctionDef name:demo arguments FunctionDef name:plot_angle arg:ax arg:x arg:y arg:angle arg:style arguments arg arg arg arg arg Assign Call Assign Call Assign Call Call Call Call Assign Call Call For Call Call For Call Call If Compare Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "CachedMetricsDeltas",
    "source_code": "@dataclass\nclass CachedMetricsDeltas:\n    generated_kernel_count: int\n    generated_cpp_vec_kernel_count: int\n    ir_nodes_pre_fusion: int\n    cpp_to_dtype_count: int\n    num_bytes_accessed: int\n    num_matches_for_scatter_upon_const_tensor: int",
    "docstring": "The subset of metrics we want update across cache hits, e.g., the FxGraphCache.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\metrics.py",
    "ast_data": "ClassDef name:CachedMetricsDeltas"
  },
  {
    "library": "pandas",
    "name": "_replace_locals",
    "source_code": "def _replace_locals(tok: tuple[int, str]) -> tuple[int, str]:\n    toknum, tokval = tok\n    if toknum == tokenize.OP and tokval == '@':\n        return (tokenize.OP, LOCAL_TAG)\n    return (toknum, tokval)",
    "docstring": "Replace local variables with a syntactically valid name. Parameters ---------- tok : tuple of int, str ints correspond to the all caps constants in the tokenize module Returns ------- tuple of int, str Either the input or token or the replacement values Notes ----- This is somewhat of a hack in that we rewrite a string such as `` symbol with it.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\computation\\expr.py",
    "ast_data": "FunctionDef name:_replace_locals arg:tok arguments arg Assign If BoolOp Compare Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "compute_weighted_loss",
    "source_code": "def compute_weighted_loss(losses, sample_weight=None, reduction=ReductionV2.SUM_OVER_BATCH_SIZE, name=None):\n    ReductionV2.validate(reduction)\n    if reduction == ReductionV2.AUTO:\n        reduction = ReductionV2.SUM_OVER_BATCH_SIZE\n    if sample_weight is None:\n        sample_weight = 1.0\n    with backend.name_scope(name or 'weighted_loss'):\n        ops.get_default_graph()._last_loss_reduction = reduction\n        if not isinstance(losses, (keras_tensor.KerasTensor, ragged_tensor.RaggedTensor)):\n            losses = tensor_conversion.convert_to_tensor_v2_with_dispatch(losses)\n        input_dtype = losses.dtype\n        if not isinstance(sample_weight, keras_tensor.KerasTensor):\n            sample_weight = tensor_conversion.convert_to_tensor_v2_with_dispatch(sample_weight)\n        losses = math_ops.cast(losses, 'float32')\n        sample_weight = math_ops.cast(sample_weight, 'float32')\n        losses, _, sample_weight = squeeze_or_expand_dimensions(losses, None, sample_weight)\n        weighted_losses = math_ops.multiply(losses, sample_weight)\n        loss = reduce_weighted_loss(weighted_losses, reduction)\n        loss = math_ops.cast(loss, input_dtype)\n        return loss",
    "docstring": "Computes the weighted loss. Args: losses: of shape . sample_weight: Optional whose rank is either 0, or the same rank as , or be broadcastable to . reduction: (Optional) Type of to apply to loss. Default value is . name: Optional name for the op. Raises: ValueError: If the shape of is not compatible with . Returns: Weighted loss of the same type as . If is , this has the same shape as ; otherwise, it is scalar.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\losses_utils.py",
    "ast_data": "FunctionDef name:compute_weighted_loss arg:losses arg:sample_weight arg:reduction arg:name arguments arg arg arg arg Call If Compare Assign If Compare Assign With Call BoolOp Assign Call If Call Assign Call Assign If Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_should_trace",
    "source_code": "def _should_trace(self, step, graph, fetches):\n    if self._traced_steps > MAX_TRACED_STEPS:\n        return False\n    if step in self._trace_steps or self._trace_next_step:\n        self._traced_steps += 1\n        return True\n    if self._auto_tracing and step > WARMUP_STEPS:\n        with graph.as_default():\n            fetch_names = [f.name for f in session._FetchMapper.for_fetch(fetches).unique_fetches()]\n        fetch_name = '-'.join(sorted(fetch_names))\n        if self._debug:\n            sys.stderr.write('debug: trace fetches: %s\\n' % fetch_name)\n        if fetch_name not in self._fetched:\n            self._fetched.add(fetch_name)\n            self._traced_steps += 1\n            return True\n        if self.profiler._coverage < 0.5 and step < MAX_TRACED_STEPS and (self._rng.randint(0, 10) < 2):\n            self._traced_steps += 1\n            return True\n    return False",
    "docstring": "Whether should do tracing at current step.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\profile_context.py",
    "ast_data": "FunctionDef name:_should_trace arg:self arg:step arg:graph arg:fetches arguments arg arg arg arg If Compare Return return:yes If BoolOp Compare Return return:yes If BoolOp Compare With Call Assign Call Call Assign Call Call If Call If Compare Call Return return:yes If BoolOp Compare Compare Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "getlist",
    "source_code": "def getlist(self, name: _SettingsKeyT, default: list[Any] | None=None) -> list[Any]:\n    value = self.get(name, default or [])\n    if not value:\n        return []\n    if isinstance(value, str):\n        value = value.split(',')\n    return list(value)",
    "docstring": "Get a setting value as a list. If the setting original type is a list, a copy of it will be returned. If it's a string it will be split by \",\". If it is an empty string, an empty list will be returned. For example, settings populated through environment variables set to `` will return a list ['one', 'two'] when using this method. :param name: the setting name :type name: str :param default: the value to return if no setting is found :type default: object",
    "type": "method",
    "file_path": "scrapy\\scrapy\\settings\\__init__.py",
    "ast_data": "FunctionDef name:getlist arg:self arg:name arg:default arguments arg arg arg Assign Call BoolOp If Return return:no If Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "loop_enters",
    "source_code": "@property\ndef loop_enters(self):\n    return self._loop_enters",
    "docstring": "The list of enter tensors for loop variables.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:loop_enters arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "integral_image",
    "source_code": "def integral_image(image: Tensor) -> Tensor:\n    KORNIA_CHECK_SHAPE(image, ['*', 'H', 'W'])\n    return integral_tensor(image, (-2, -1))",
    "docstring": "Calculate integral of the input image tensor. This particular version sums over the last two dimensions. Args: image: the input image tensor with shape :math:. Returns: Integral tensor for the input image tensor with shape :math:. Examples: >>> input = torch.ones(1, 5, 5) >>> output = integral_image(input) >>> output tensor([[[ 1., 2., 3., 4., 5.], [ 2., 4., 6., 8., 10.], [ 3., 6., 9., 12., 15.], [ 4., 8., 12., 16., 20.], [ 5., 10., 15., 20., 25.]]])",
    "type": "function",
    "file_path": "kornia\\kornia\\enhance\\integral.py",
    "ast_data": "FunctionDef name:integral_image arg:image arguments arg Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "distort_points_affine",
    "source_code": "def distort_points_affine(projected_points_in_camera_z1_plane: Tensor, params: Tensor) -> Tensor:\n    KORNIA_CHECK_SHAPE(projected_points_in_camera_z1_plane, ['*', '2'])\n    KORNIA_CHECK_SHAPE(params, ['*', '4'])\n    x = projected_points_in_camera_z1_plane[..., 0]\n    y = projected_points_in_camera_z1_plane[..., 1]\n    fx, fy = (params[..., 0], params[..., 1])\n    cx, cy = (params[..., 2], params[..., 3])\n    u = fx * x + cx\n    v = fy * y + cy\n    return ops.stack([u, v], dim=-1)",
    "docstring": "Distort one or more points from the canonical z=1 plane into the camera frame. .. math:: \\begin{bmatrix} u \\\\ v \\end{bmatrix} = \\begin{bmatrix} f_x & 0 \\\\ 0 & f_y \\end{bmatrix} \\begin{bmatrix} x \\\\ y \\end{bmatrix} + \\begin{bmatrix} c_x \\\\ c_y \\end{bmatrix} Args: projected_points_in_camera_z1_plane: Tensor representing the points to distort with shape (..., 2). params: Tensor representing the parameters of the affine distortion model with shape (..., 4). Returns: Tensor representing the distorted points with shape (..., 2). Example: >>> points = torch.tensor([319.5, 239.5]) # center of a 640x480 image >>> params = torch.tensor([600., 600., 319.5, 239.5]) >>> distort_points_affine(points, params) tensor([192019.5000, 143939.5000])",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\camera\\distortion_affine.py",
    "ast_data": "FunctionDef name:distort_points_affine arg:projected_points_in_camera_z1_plane arg:params arguments arg arg Call Call Assign Assign Assign Assign Assign Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "flatten",
    "source_code": "def flatten(self):\n    yield self\n    for child in self.children:\n        if isinstance(child, tuple):\n            child = child[1]\n        if hasattr(child, 'flatten'):\n            yield from child.flatten()\n        else:\n            yield child",
    "docstring": "Recursively yield this Q object and all subexpressions, in depth-first order.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query_utils.py",
    "ast_data": "FunctionDef name:flatten arg:self arguments arg For If Call Assign If Call Call"
  },
  {
    "library": "pandas",
    "name": "__arrow_array__",
    "source_code": "def __arrow_array__(self, type=None):\n    import pyarrow as pa\n    return pa.array(self._data, mask=self._mask, type=type)",
    "docstring": "Convert myself into a pyarrow Array.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\masked.py",
    "ast_data": "FunctionDef name:__arrow_array__ arg:self arg:type arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "is_sparse",
    "source_code": "def is_sparse(tensor):\n    return isinstance(tensor, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue))",
    "docstring": "Returns true if is a sparse tensor or sparse tensor value.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_utils.py",
    "ast_data": "FunctionDef name:is_sparse arg:tensor arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_maybe_real",
    "source_code": "def _maybe_real(A, B, tol=None):\n    if np.isrealobj(A) and np.iscomplexobj(B):\n        if tol is None:\n            tol = {0: feps * 1000.0, 1: eps * 1000000.0}[_array_precision[B.dtype.char]]\n        if np.allclose(B.imag, 0.0, atol=tol):\n            B = B.real\n    return B",
    "docstring": "Return either B or the real part of B, depending on properties of A and B. The motivation is that B has been computed as a complicated function of A, and B may be perturbed by negligible imaginary components. If A is real and B is complex with small imaginary components, then return a real copy of B. The assumption in that case would be that the imaginary components of B are numerical artifacts. Parameters ---------- A : ndarray Input array whose type is to be checked as real vs. complex. B : ndarray Array to be returned, possibly without its imaginary part. tol : float Absolute tolerance. Returns ------- out : real or complex array Either the input array B or only the real part of the input array B.",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_matfuncs.py",
    "ast_data": "FunctionDef name:_maybe_real arg:A arg:B arg:tol arguments arg arg arg If BoolOp Call Call If Compare Assign If Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_axis_direction",
    "source_code": "def set_axis_direction(self, d):\n    self.set_default_alignment(d)\n    self.set_default_angle(d)",
    "docstring": "Adjust the text angle and text alignment of axis label according to the matplotlib convention. ===================== ========== ========= ========== ========== Property left bottom right top ===================== ========== ========= ========== ========== axislabel angle 180 0 0 180 axislabel va center top center bottom axislabel ha right center right center ===================== ========== ========= ========== ========== Note that the text angles are actually relative to (90 + angle of the direction to the ticklabel), which gives 0 for bottom axis. Parameters ---------- d : {\"left\", \"bottom\", \"right\", \"top\"}",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axis_artist.py",
    "ast_data": "FunctionDef name:set_axis_direction arg:self arg:d arguments arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_restore_ops",
    "source_code": "def get_restore_ops(self, var, tensor):\n    return values_util.get_on_read_restore_ops(var, tensor, self._aggregation)",
    "docstring": "Restore the same value into all variables.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py",
    "ast_data": "FunctionDef name:get_restore_ops arg:self arg:var arg:tensor arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_offset_transform",
    "source_code": "def set_offset_transform(self, offset_transform):\n    self._offset_transform = offset_transform",
    "docstring": "Set the artist offset transform. Parameters ---------- offset_transform :",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:set_offset_transform arg:self arg:offset_transform arguments arg arg Assign"
  },
  {
    "library": "sphinx",
    "name": "_wrap_chunks",
    "source_code": "def _wrap_chunks(self, chunks: list[str]) -> list[str]:\n    lines: list[str] = []\n    if self.width <= 0:\n        raise ValueError('invalid width %r (must be > 0)' % self.width)\n    chunks.reverse()\n    while chunks:\n        cur_line = []\n        cur_len = 0\n        if lines:\n            indent = self.subsequent_indent\n        else:\n            indent = self.initial_indent\n        width = self.width - column_width(indent)\n        if self.drop_whitespace and (not chunks[-1].strip()) and lines:\n            del chunks[-1]\n        while chunks:\n            l = column_width(chunks[-1])\n            if cur_len + l <= width:\n                cur_line.append(chunks.pop())\n                cur_len += l\n            else:\n                break\n        if chunks and column_width(chunks[-1]) > width:\n            self._handle_long_word(chunks, cur_line, cur_len, width)\n        if self.drop_whitespace and cur_line and (not cur_line[-1].strip()):\n            del cur_line[-1]\n        if cur_line:\n            lines.append(indent + ''.join(cur_line))\n    return lines",
    "docstring": "The original _wrap_chunks uses len() to calculate width. This method respects wide/fullwidth characters for width adjustment.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\writers\\text.py",
    "ast_data": "FunctionDef name:_wrap_chunks arg:self arg:chunks arguments arg arg If Compare Raise Call Call While Assign Assign If Assign Assign Assign Call If BoolOp Call While Assign Call If Compare Call Call If BoolOp Compare Call Call If BoolOp Call If Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_extra",
    "source_code": "def get_extra(self, request, obj=None, **kwargs):\n    return self.extra",
    "docstring": "Hook for customizing the number of extra inline forms.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:get_extra arg:self arg:request arg:obj arguments arg arg arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "resolve_ellipsis",
    "source_code": "def resolve_ellipsis(names, tensor_names, fn_name):\n    ellipsis_idx = single_ellipsis_index(names, fn_name)\n    if ellipsis_idx is None:\n        return names\n    return replace_ellipsis_by_position(ellipsis_idx, names, tensor_names)",
    "docstring": "Expands ... inside to be equal to a list of names from .",
    "type": "function",
    "file_path": "pytorch\\torch\\_namedtensor_internals.py",
    "ast_data": "FunctionDef name:resolve_ellipsis arg:names arg:tensor_names arg:fn_name arguments arg arg arg Assign Call If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "index",
    "source_code": "@cherrypy.expose\ndef index(self):\n    return \"<html>\\n        <head><title>CherryPy profile data</title></head>\\n        <frameset cols='200, 1*'>\\n            <frame src='menu' />\\n            <frame name='main' src='' />\\n        </frameset>\\n        </html>\\n        \"",
    "docstring": "Render the profiling viewer index page.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\profiler.py",
    "ast_data": "FunctionDef name:index arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "swap_tensor",
    "source_code": "def swap_tensor(self, name: str, value: torch.Tensor, allow_missing: bool=False) -> torch.Tensor:\n    prefix, _, attr = name.rpartition('.')\n    return swap_tensor(self.get_submodule(prefix), attr, value, allow_missing=allow_missing)",
    "docstring": "Swap the attribute specified by the given path to value. For example, to swap the attribute mod.layer1.conv1.weight, use accessor.swap_tensor(\"layer1.conv1.weight\", value)",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\utils\\_named_member_accessor.py",
    "ast_data": "FunctionDef name:swap_tensor arg:self arg:name arg:value arg:allow_missing arguments arg arg arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "get_primary_key_columns",
    "source_code": "def get_primary_key_columns(self, cursor, table_name):\n    for constraint in self.get_constraints(cursor, table_name).values():\n        if constraint['primary_key']:\n            return constraint['columns']\n    return None",
    "docstring": "Return a list of primary key columns for the given table.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\introspection.py",
    "ast_data": "FunctionDef name:get_primary_key_columns arg:self arg:cursor arg:table_name arguments arg arg arg For Call Call If Return return:yes Return return:no"
  },
  {
    "library": "django",
    "name": "change_form_object_tools_tag",
    "source_code": "@register.tag(name='change_form_object_tools')\ndef change_form_object_tools_tag(parser, token):\n    return InclusionAdminNode(parser, token, func=lambda context: context, template_name='change_form_object_tools.html')",
    "docstring": "Display the row of change form object tools.",
    "type": "function",
    "file_path": "django\\django\\contrib\\admin\\templatetags\\admin_modify.py",
    "ast_data": "FunctionDef name:change_form_object_tools_tag arg:parser arg:token arguments arg arg Return return:yes Call arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "assert_mirrored",
    "source_code": "def assert_mirrored(structured):\n\n    def _assert_mirrored(x):\n        if isinstance(x, values_lib.DistributedValues) and (not is_mirrored(x)):\n            raise TypeError('Expected value to be mirrored across replicas: %s in %s.' % (x, structured))\n    nest.map_structure(_assert_mirrored, structured)",
    "docstring": "Raises if the structured is not composed of mirrored or regular values.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_utils.py",
    "ast_data": "FunctionDef name:assert_mirrored arg:structured arguments arg FunctionDef name:_assert_mirrored arg:x arguments arg If BoolOp Call Call Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_RemoveDefaultAttrs",
    "source_code": "def _RemoveDefaultAttrs(producer_op_list, graph_def):\n    producer_op_dict = {op.name: op for op in producer_op_list.op}\n    for node in graph_def.node:\n        if node.op in producer_op_dict:\n            op_def = op_def_registry.get(node.op)\n            if op_def is None:\n                continue\n            producer_op_def = producer_op_dict[node.op]\n            for key in list(node.attr):\n                if _FindAttrInOpDef(key, op_def) is None:\n                    attr_def = _FindAttrInOpDef(key, producer_op_def)\n                    if attr_def and attr_def.HasField('default_value') and (node.attr[key] == attr_def.default_value):\n                        del node.attr[key]",
    "docstring": "Removes unknown default attrs according to . Removes any unknown attrs in (i.e. attrs that do not appear in registered OpDefs) that have a default value in . Args: producer_op_list: OpList proto. graph_def: GraphDef proto",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\importer.py",
    "ast_data": "FunctionDef name:_RemoveDefaultAttrs arg:producer_op_list arg:graph_def arguments arg arg Assign For If Compare Assign Call If Compare Assign For Call If Compare Call Assign Call If BoolOp Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "_parse_example_spec",
    "source_code": "@abc.abstractproperty\ndef _parse_example_spec(self):\n    pass",
    "docstring": "Returns a parsing spec as dict. It is used for get_parsing_spec for . Returned spec is a dict from keys ('string') to , , and other supported objects. Please check documentation of for all supported spec objects. Let's say a Feature column depends on raw feature ('raw') and another (input_fc). One possible implementation of _parse_example_spec is as follows:",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py",
    "ast_data": "FunctionDef name:_parse_example_spec arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "forward_compatibility_horizon",
    "source_code": "@tf_export('compat.forward_compatibility_horizon')\n@tf_contextlib.contextmanager\ndef forward_compatibility_horizon(year, month, day):\n    try:\n        _update_forward_compatibility_date_number(datetime.date(year, month, day))\n        yield\n    finally:\n        _update_forward_compatibility_date_number()",
    "docstring": "Context manager for testing forward compatibility of generated graphs. See [Version compatibility]( To ensure forward compatibility of generated graphs (see ) with older binaries, new features can be gated with: However, when adding new features, one may want to unittest it before the forward compatibility window expires. This context manager enables such tests. For example: Args: year: A year (e.g., 2018). Must be an . month: A month (1 <= month <= 12) in year. Must be an . day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an . Yields: Nothing.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\compat\\compat.py",
    "ast_data": "FunctionDef name:forward_compatibility_horizon arg:year arg:month arg:day arguments arg arg arg Try Call Call Call Call"
  },
  {
    "library": "django",
    "name": "wordwrap",
    "source_code": "@register.filter(is_safe=True)\n@stringfilter\ndef wordwrap(value, arg):\n    return wrap(value, int(arg))",
    "docstring": "Wrap words at line length.",
    "type": "function",
    "file_path": "django\\django\\template\\defaultfilters.py",
    "ast_data": "FunctionDef name:wordwrap arg:value arg:arg arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_run_id",
    "source_code": "@abstractmethod\ndef get_run_id(self) -> str:\n    pass",
    "docstring": "Return the run id of the rendezvous. The run id is a user-defined id that uniquely identifies an instance of a distributed application. It typically maps to a job id and is used to allow nodes to join the correct distributed application.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\api.py",
    "ast_data": "FunctionDef name:get_run_id arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "__call__",
    "source_code": "def __call__(self, shape, dtype=dtypes.float32, **kwargs):\n    self._validate_kwargs(kwargs)\n    dtype = dtypes.as_dtype(dtype)\n    if not dtype.is_numpy_compatible or dtype == dtypes.string:\n        raise ValueError(f'Argument `dtype` expected to be numeric or boolean. Received {dtype}.')\n    if _PARTITION_SHAPE in kwargs:\n        shape = kwargs[_PARTITION_SHAPE]\n    return array_ops.ones(shape, dtype)",
    "docstring": "Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are supported. **kwargs: Additional keyword arguments. Raises: ValuesError: If the dtype is not numeric or boolean.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops_v2.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:shape arg:dtype arguments arg arg arg arg Call Assign Call If BoolOp Compare Raise Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_is_guaranteed_const",
    "source_code": "def _is_guaranteed_const(tensor):\n    if isinstance(tensor, ops.EagerTensor):\n        return False\n\n    class Work(object):\n\n        def __init__(self, op: ops.Operation, leaving):\n            self.op = op\n            self.leaving = leaving\n    is_guaranteed_const = lambda op: op.node_def.op == 'GuaranteeConst'\n    constants = set([])\n\n    def all_inputs_const(op: ops.Operation):\n        return op.inputs and all((inp.op in constants for inp in op.inputs))\n    visited = set([])\n    stack = [Work(tensor.op, leaving=False)]\n    while stack:\n        work = stack.pop()\n        if work.leaving:\n            if all_inputs_const(work.op):\n                constants.add(work.op)\n            continue\n        visited.add(work.op)\n        if is_guaranteed_const(work.op):\n            constants.add(work.op)\n            continue\n        stack.append(Work(work.op, leaving=True))\n        for inp in work.op.inputs:\n            if inp.op not in visited:\n                stack.append(Work(inp.op, leaving=False))\n    return tensor.op in constants",
    "docstring": "Determines whether is guaranteed to be a constant. A tensor is guaranteed to be a constant if either it was produced by a op or if all of its children are guaranteed to be constants. Args: tensor: The tensor for which to determine const-ness. Returns: True if is guaranteed to be a constant, False otherwise.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\function.py",
    "ast_data": "FunctionDef name:_is_guaranteed_const arg:tensor arguments arg If Call Return return:yes ClassDef name:Work FunctionDef name:__init__ arg:self arg:op arg:leaving arguments arg arg arg Assign Assign Assign arguments arg Compare Assign Call FunctionDef name:all_inputs_const arg:op arguments arg Return return:yes BoolOp Call Compare Assign Call Assign Call While Assign Call If If Call Call Call If Call Call Call Call For If Compare Call Call Return return:yes Compare"
  },
  {
    "library": "pytorch",
    "name": "create_minified_hlo_graph",
    "source_code": "def create_minified_hlo_graph(minified_fx_graph, inputs):\n    hlo_dir = f'{os.getcwd()}/hlo_files'\n    os.makedirs(hlo_dir, exists_ok=True)\n    from torch_xla.stablehlo import save_torch_model_as_stablehlo\n    save_torch_model_as_stablehlo(minified_fx_graph, inputs, hlo_dir)",
    "docstring": "Takes minified FX graph as primary input, and ports it to HLO via StableHLO Provides minified HLO graph as output, and archive them to local directory",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\fx_minifier.py",
    "ast_data": "FunctionDef name:create_minified_hlo_graph arg:minified_fx_graph arg:inputs arguments arg arg Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_conv_add_extra_inputs_getter_right",
    "source_code": "def _conv_add_extra_inputs_getter_right(pattern):\n    _, extra_input, _conv = pattern\n    return [extra_input]",
    "docstring": "get inputs pattern for extra inputs, inputs for root node are assumed to be copied over from root node to the fused node",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\onednn.py",
    "ast_data": "FunctionDef name:_conv_add_extra_inputs_getter_right arg:pattern arguments arg Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_read",
    "source_code": "def _read(self, file):\n    rawdata = file.read()\n    if not rawdata.startswith(b'\\x80'):\n        return rawdata\n    data = b''\n    while rawdata:\n        if not rawdata.startswith(b'\\x80'):\n            raise RuntimeError('Broken pfb file (expected byte 128, got %d)' % rawdata[0])\n        type = rawdata[1]\n        if type in (1, 2):\n            length, = struct.unpack('<i', rawdata[2:6])\n            segment = rawdata[6:6 + length]\n            rawdata = rawdata[6 + length:]\n        if type == 1:\n            data += segment\n        elif type == 2:\n            data += binascii.hexlify(segment)\n        elif type == 3:\n            break\n        else:\n            raise RuntimeError('Unknown segment type %d in pfb file' % type)\n    return data",
    "docstring": "Read the font from a file, decoding into usable parts.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_type1font.py",
    "ast_data": "FunctionDef name:_read arg:self arg:file arguments arg arg Assign Call If Call Return return:yes Assign While If Call Raise Call Assign If Compare Assign Call Assign Assign If Compare If Compare Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "django",
    "name": "Reference",
    "source_code": "class Reference:\n\n    def references_table(self, table):\n        return False\n\n    def references_column(self, table, column):\n        return False\n\n    def references_index(self, table, index):\n        return False\n\n    def rename_table_references(self, old_table, new_table):\n        pass\n\n    def rename_column_references(self, table, old_column, new_column):\n        pass\n\n    def __repr__(self):\n        return '<%s %r>' % (self.__class__.__name__, str(self))\n\n    def __str__(self):\n        raise NotImplementedError('Subclasses must define how they should be converted to string.')",
    "docstring": "Base class that defines the reference interface.",
    "type": "class",
    "file_path": "django\\django\\db\\backends\\ddl_references.py",
    "ast_data": "ClassDef name:Reference FunctionDef name:references_table arg:self arg:table arguments arg arg Return return:yes FunctionDef name:references_column arg:self arg:table arg:column arguments arg arg arg Return return:yes FunctionDef name:references_index arg:self arg:table arg:index arguments arg arg arg Return return:yes FunctionDef name:rename_table_references arg:self arg:old_table arg:new_table arguments arg arg arg FunctionDef name:rename_column_references arg:self arg:table arg:old_column arg:new_column arguments arg arg arg arg FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call FunctionDef name:__str__ arg:self arguments arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_MatMulGradAgainstFirstOnly",
    "source_code": "def _MatMulGradAgainstFirstOnly(op: ops.Operation, grad):\n    t_a = op.get_attr('transpose_a')\n    t_b = op.get_attr('transpose_b')\n    b = math_ops.conj(op.inputs[1])\n    if not t_a and (not t_b):\n        grad_a = gen_math_ops.mat_mul(grad, b, transpose_b=True, grad_a=True)\n    elif not t_a and t_b:\n        grad_a = gen_math_ops.mat_mul(grad, b, grad_a=True)\n    elif t_a and (not t_b):\n        grad_a = gen_math_ops.mat_mul(b, grad, transpose_b=True, grad_a=True)\n    elif t_a and t_b:\n        grad_a = gen_math_ops.mat_mul(b, grad, transpose_a=True, transpose_b=True, grad_a=True)\n    return (grad_a, None)",
    "docstring": "Gradient for MatMul, only for the first input.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_MatMulGradAgainstFirstOnly arg:op arg:grad arguments arg arg Assign Call Assign Call Assign Call If BoolOp Assign Call If BoolOp Assign Call If BoolOp Assign Call If BoolOp Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "set_server_def_retries",
    "source_code": "def set_server_def_retries(retries):\n    context().set_server_def_retries(retries)",
    "docstring": "Set the number of retries to use when calling SetServerDef. In cases where many servers run in high-preemption environments, jobs could be preempted during startup and initial connection via SetServerDef. Retries allow for more robust connection in these environments. Args: retries: int specifying the number of connection retries before failing. Retries follow an exponential backoff waiting period with min value 1ms, max value 10s, and exponent 1.3.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:set_server_def_retries arg:retries arguments arg Call Call"
  },
  {
    "library": "scipy",
    "name": "trimtail",
    "source_code": "def trimtail(data, proportiontocut=0.2, tail='left', inclusive=(True, True), axis=None):\n    tail = str(tail).lower()[0]\n    if tail == 'l':\n        limits = (proportiontocut, None)\n    elif tail == 'r':\n        limits = (None, proportiontocut)\n    else:\n        raise TypeError(\"The tail argument should be in ('left','right')\")\n    return trimr(data, limits=limits, axis=axis, inclusive=inclusive)",
    "docstring": "Trims the data by masking values from one tail. Parameters ---------- data : array_like Data to trim. proportiontocut : float, optional Percentage of trimming. If n is the number of unmasked values before trimming, the number of values after trimming is `proportiontocutproportiontocutdata` with masked tail values.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mstats_basic.py",
    "ast_data": "FunctionDef name:trimtail arg:data arg:proportiontocut arg:tail arg:inclusive arg:axis arguments arg arg arg arg arg Assign Call Call If Compare Assign If Compare Assign Raise Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "camtoworld_to_worldtocam_Rt",
    "source_code": "def camtoworld_to_worldtocam_Rt(R: Tensor, t: Tensor) -> tuple[Tensor, Tensor]:\n    KORNIA_CHECK_SHAPE(R, ['B', '3', '3'])\n    KORNIA_CHECK_SHAPE(t, ['B', '3', '1'])\n    R_inv = R.transpose(1, 2)\n    new_t: Tensor = -R_inv @ t\n    return (R_inv, new_t)",
    "docstring": "Convert camtoworld to worldtocam frame used in Colmap. See long-url: Args: R: Rotation matrix, :math: t: Translation matrix :math:. Returns: Rinv: Rotation matrix, :math: tinv: Translation matrix :math:. Example: >>> R, t = torch.eye(3)[None], torch.ones(3).reshape(1, 3, 1) >>> camtoworld_to_worldtocam_Rt(R, t) (tensor([[[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]]), tensor([[[-1.], [-1.], [-1.]]]))",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\conversions.py",
    "ast_data": "FunctionDef name:camtoworld_to_worldtocam_Rt arg:R arg:t arguments arg arg Call Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "predict_proba",
    "source_code": "@available_if(_check_proba)\ndef predict_proba(self, X):\n    check_is_fitted(self)\n    if self.loss == 'log_loss':\n        return self._predict_proba_lr(X)\n    elif self.loss == 'modified_huber':\n        binary = len(self.classes_) == 2\n        scores = self.decision_function(X)\n        if binary:\n            prob2 = np.ones((scores.shape[0], 2))\n            prob = prob2[:, 1]\n        else:\n            prob = scores\n        np.clip(scores, -1, 1, prob)\n        prob += 1.0\n        prob /= 2.0\n        if binary:\n            prob2[:, 0] -= prob\n            prob = prob2\n        else:\n            prob_sum = prob.sum(axis=1)\n            all_zero = prob_sum == 0\n            if np.any(all_zero):\n                prob[all_zero, :] = 1\n                prob_sum[all_zero] = len(self.classes_)\n            prob /= prob_sum.reshape((prob.shape[0], -1))\n        return prob\n    else:\n        raise NotImplementedError(\"predict_(log_)proba only supported when loss='log_loss' or loss='modified_huber' (%r given)\" % self.loss)",
    "docstring": "Probability estimates. This method is only available for log loss and modified Huber loss. Multiclass probability estimates are derived from binary (one-vs.-rest) estimates by simple normalization, as recommended by Zadrozny and Elkan. Binary probability estimates for loss=\"modified_huber\" are given by (clip(decision_function(X), -1, 1) + 1) / 2. For other loss functions it is necessary to perform proper probability calibration by wrapping the classifier with :class: instead. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Input data for prediction. Returns ------- ndarray of shape (n_samples, n_classes) Returns the probability of the sample for each class in the model, where classes are ordered as they are in . References ---------- Zadrozny and Elkan, \"Transforming classifier scores into multiclass probability estimates\", SIGKDD'02, The justification for the formula in the loss=\"modified_huber\" case is in the appendix B in:",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_stochastic_gradient.py",
    "ast_data": "FunctionDef name:predict_proba arg:self arg:X arguments arg arg Call If Compare Return return:yes Call If Compare Assign Compare Call Assign Call If Assign Call Assign Assign Call If Assign Assign Call Assign Compare If Call Assign Assign Call Call Return return:yes Raise Call Call"
  },
  {
    "library": "kornia",
    "name": "inverse_keypoints",
    "source_code": "def inverse_keypoints(self, input: Union[Tensor, Keypoints], params: List[ParamItem], extra_args: Optional[Dict[str, Any]]=None) -> Union[Tensor, Keypoints]:\n    if isinstance(input, Tensor):\n        frame_num, batchsize = (input.size(0), input.size(1))\n        input = Keypoints(input.view(-1, input.size(2), input.size(3)))\n        input = super().inverse_keypoints(input, params, extra_args=extra_args)\n        input = input.data.view(batchsize, frame_num, -1, 2)\n    else:\n        input = super().inverse_keypoints(input, params, extra_args=extra_args)\n    return input",
    "docstring": "Transform bounding boxes. Args: input: tensor with shape :math:. If input is a type, the internal shape is :math:. params: params for the sequence. extra_args: Optional dictionary of extra arguments with specific options for different input types.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\container\\video.py",
    "ast_data": "FunctionDef name:inverse_keypoints arg:self arg:input arg:params arg:extra_args arguments arg arg arg arg If Call Assign Call Call Assign Call Call Call Call Assign Call Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "map_resources",
    "source_code": "def map_resources(self):\n    assert not context.executing_eagerly()\n    object_map = object_identity.ObjectIdentityDictionary()\n    tensor_map = object_identity.ObjectIdentityDictionary()\n    asset_info = _AssetInfo(asset_defs=[], asset_initializers_by_resource=object_identity.ObjectIdentityDictionary(), asset_filename_map={}, asset_index={})\n    for node_id in _dependency_sorted_node_ids(self):\n        obj = self.nodes[node_id]\n        tensors = obj._export_to_saved_model_graph(object_map=object_map, tensor_map=tensor_map, options=self.options)\n        if isinstance(obj, asset.Asset):\n            _add_asset_info(obj, asset_info, tensor_map[obj.asset_path])\n        if tensors:\n            for tensor in tensors:\n                self.captured_tensor_node_ids[tensor] = node_id\n    return (object_map, tensor_map, asset_info)",
    "docstring": "Makes new resource handle ops corresponding to existing resource tensors. Creates resource handle ops in the current default graph, whereas will be from an eager context. Resource mapping adds resource handle ops to the main GraphDef of a SavedModel, which allows the C++ loader API to interact with resources. Returns: A tuple of (object_map, tensor_map, asset_info): object_map: A dictionary mapping from object in to replacement objects created to hold the new resource tensors. tensor_map: A dictionary mapping from resource tensors extracted from to newly created resource tensors. asset_info: An _AssetInfo tuple describing external assets referenced from accessible_objects.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\save.py",
    "ast_data": "FunctionDef name:map_resources arg:self arguments arg Call Assign Call Assign Call Assign Call Call For Call Assign Assign Call If Call Call If For Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_clear_cache",
    "source_code": "def _clear_cache(self) -> None:\n    self._partition_parameters_cache.clear()\n    self._param_to_rank_cache.clear()\n    self._index_to_param_cache.clear()\n    self._param_to_index_cache.clear()\n    self._device_to_params_per_rank_cache.clear()\n    self._bucket_assignments_per_rank_cache.clear()",
    "docstring": "Clear the cached data structures giving partition information.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\optim\\zero_redundancy_optimizer.py",
    "ast_data": "FunctionDef name:_clear_cache arg:self arguments arg Call Call Call Call Call Call"
  },
  {
    "library": "django",
    "name": "handle_uncaught_exception",
    "source_code": "def handle_uncaught_exception(request, resolver, exc_info):\n    if settings.DEBUG_PROPAGATE_EXCEPTIONS:\n        raise\n    if settings.DEBUG:\n        return debug.technical_500_response(request, *exc_info)\n    callback = resolver.resolve_error_handler(500)\n    return callback(request)",
    "docstring": "Processing for any otherwise uncaught exceptions (those that will generate HTTP 500 responses).",
    "type": "function",
    "file_path": "django\\django\\core\\handlers\\exception.py",
    "ast_data": "FunctionDef name:handle_uncaught_exception arg:request arg:resolver arg:exc_info arguments arg arg arg If Raise If Return return:yes Call Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "calinski_harabasz_score",
    "source_code": "@validate_params({'X': ['array-like'], 'labels': ['array-like']}, prefer_skip_nested_validation=True)\ndef calinski_harabasz_score(X, labels):\n    X, labels = check_X_y(X, labels)\n    le = LabelEncoder()\n    labels = le.fit_transform(labels)\n    n_samples, _ = X.shape\n    n_labels = len(le.classes_)\n    check_number_of_labels(n_labels, n_samples)\n    extra_disp, intra_disp = (0.0, 0.0)\n    mean = np.mean(X, axis=0)\n    for k in range(n_labels):\n        cluster_k = X[labels == k]\n        mean_k = np.mean(cluster_k, axis=0)\n        extra_disp += len(cluster_k) * np.sum((mean_k - mean) ** 2)\n        intra_disp += np.sum((cluster_k - mean_k) ** 2)\n    return float(1.0 if intra_disp == 0.0 else extra_disp * (n_samples - n_labels) / (intra_disp * (n_labels - 1.0)))",
    "docstring": "Compute the Calinski and Harabasz score. It is also known as the Variance Ratio Criterion. The score is defined as ratio of the sum of between-cluster dispersion and of within-cluster dispersion. Read more in the :ref:. Parameters ---------- X : array-like of shape (n_samples, n_features) A list of `T. Calinski and J. Harabasz, 1974. \"A dendrite method for cluster analysis\". Communications in Statistics `_ Examples -------- >>> from sklearn.datasets import make_blobs >>> from sklearn.cluster import KMeans >>> from sklearn.metrics import calinski_harabasz_score >>> X, _ = make_blobs(random_state=0) >>> kmeans = KMeans(n_clusters=3, random_state=0,).fit(X) >>> calinski_harabasz_score(X, kmeans.labels_) 114.8...",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\metrics\\cluster\\_unsupervised.py",
    "ast_data": "FunctionDef name:calinski_harabasz_score arg:X arg:labels arguments arg arg Assign Call Assign Call Assign Call Assign Assign Call Call Assign Assign Call For Call Assign Compare Assign Call Call Call Call Return return:yes Call Compare Call"
  },
  {
    "library": "pytorch",
    "name": "initialize_lazy_module",
    "source_code": "def initialize_lazy_module(tx: 'InstructionTranslator', mod, args, kwargs):\n    if hasattr(mod, '_initialize_hook'):\n\n        def convert_to_fake(x):\n            if is_namedtuple(x):\n                return type(x)(*(convert_to_fake(elem) for elem in x))\n            elif isinstance(x, dict):\n                return {k: convert_to_fake(v) for k, v in x.items()}\n            elif isinstance(x, (list, tuple, set)):\n                return type(x)((convert_to_fake(elem) for elem in x))\n            elif isinstance(x, torch.fx.Proxy):\n                return get_fake_value(x.node, tx)\n            else:\n                return x\n        proxy_args, proxy_kwargs = proxy_args_kwargs(args, kwargs)\n        fake_args = [convert_to_fake(arg) for arg in proxy_args]\n        fake_kwargs = {k: convert_to_fake(v) for k, v in proxy_kwargs.items()}\n        mod._infer_parameters(mod, fake_args, fake_kwargs)",
    "docstring": "Fairly coupled helper used by NNModuleVariable and UnspecializedNNModuleVariable. Used to cause lazy module to be initialized (and delete its init hook) before tracing. Especially useful now that 'allowed' modules graph-break on hooks, calling this first ensures there is no hook by the time we trace __call__ and thus no graph-break for lazy allowed modules.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\nn_module.py",
    "ast_data": "FunctionDef name:initialize_lazy_module arg:tx arg:mod arg:args arg:kwargs arguments arg arg arg arg If Call FunctionDef name:convert_to_fake arg:x arguments arg If Call Return return:yes Call Call Call If Call Return return:yes Call Call If Call Return return:yes Call Call Call If Call Return return:yes Call Return return:yes Assign Call Assign Call Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "transformer_auto_wrap_policy",
    "source_code": "def transformer_auto_wrap_policy(module: nn.Module, recurse: bool, nonwrapped_numel: int, transformer_layer_cls: set[type[nn.Module]]) -> bool:\n    return _module_wrap_policy(module, recurse, nonwrapped_numel, transformer_layer_cls)",
    "docstring": "See :func:, where ``. Note that shared parameters must be wrapped in the same FSDP instance, so this auto wrap policy can help wrap shared embeddings into the same FSDP instance for transformer models.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\wrap.py",
    "ast_data": "FunctionDef name:transformer_auto_wrap_policy arg:module arg:recurse arg:nonwrapped_numel arg:transformer_layer_cls arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "emit_region",
    "source_code": "def emit_region(self, timestamp: int, duration: int, pid: int, tid: int, category: str, name: str, args: Dict[str, Any]) -> None:\n    event = self._create_event('X', category, name, pid, tid, timestamp)\n    event['dur'] = duration\n    event['args'] = args\n    self._events.append(event)",
    "docstring": "Adds a region event to the trace. Args: timestamp: The start timestamp of this region as a long integer. duration: The duration of this region as a long integer. pid: Identifier of the process generating this event as an integer. tid: Identifier of the thread generating this event as an integer. category: The event category as a string. name: The event name as a string. args: A JSON-compatible dictionary of event arguments.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py",
    "ast_data": "FunctionDef name:emit_region arg:self arg:timestamp arg:duration arg:pid arg:tid arg:category arg:name arg:args arguments arg arg arg arg arg arg arg arg Assign Call Assign Assign Call"
  },
  {
    "library": "sphinx",
    "name": "validate_html_extra_path",
    "source_code": "def validate_html_extra_path(app: Sphinx, config: Config) -> None:\n    html_extra_path = []\n    for entry in config.html_extra_path:\n        extra_path = (app.confdir / entry).resolve()\n        if extra_path.exists():\n            if app.outdir.drive == extra_path.drive and extra_path.is_relative_to(app.outdir):\n                logger.warning(__('html_extra_path entry %r is placed inside outdir'), entry)\n            else:\n                html_extra_path.append(entry)\n        else:\n            logger.warning(__('html_extra_path entry %r does not exist'), entry)\n    config.html_extra_path = html_extra_path",
    "docstring": "Check html_extra_paths setting.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\builders\\html\\__init__.py",
    "ast_data": "FunctionDef name:validate_html_extra_path arg:app arg:config arguments arg arg Assign For Assign Call If Call If BoolOp Compare Call Call Call Call Call Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "view_limits",
    "source_code": "def view_limits(self, vmin, vmax):\n    b = self._base\n    if vmax < vmin:\n        vmin, vmax = (vmax, vmin)\n    if mpl.rcParams['axes.autolimit_mode'] == 'round_numbers':\n        vmin = _decade_less_equal(vmin, b)\n        vmax = _decade_greater_equal(vmax, b)\n        if vmin == vmax:\n            vmin = _decade_less(vmin, b)\n            vmax = _decade_greater(vmax, b)\n    return mtransforms.nonsingular(vmin, vmax)",
    "docstring": "Try to choose the view limits intelligently.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:view_limits arg:self arg:vmin arg:vmax arguments arg arg arg Assign If Compare Assign If Compare Assign Call Assign Call If Compare Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "__len__",
    "source_code": "def __len__(self):\n    return self.num_feat",
    "docstring": "The length is the number of features.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\layer.py",
    "ast_data": "FunctionDef name:__len__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, nx, ny):\n    self.nx = nx\n    self.ny = ny",
    "docstring": "Parameters ---------- nx, ny : int The number of samples in each direction.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\grid_finder.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:nx arg:ny arguments arg arg arg Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_create_dense_column_weighted_sum",
    "source_code": "def _create_dense_column_weighted_sum(column, transformation_cache, state_manager, weight_var):\n    tensor = column.get_dense_tensor(transformation_cache, state_manager)\n    num_elements = column.variable_shape.num_elements()\n    batch_size = array_ops.shape(tensor)[0]\n    tensor = array_ops.reshape(tensor, shape=(batch_size, num_elements))\n    return math_ops.matmul(tensor, weight_var, name='weighted_sum')",
    "docstring": "Create a weighted sum of a dense column for linear_model.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:_create_dense_column_weighted_sum arg:column arg:transformation_cache arg:state_manager arg:weight_var arguments arg arg arg arg Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_center",
    "source_code": "def get_center(self):\n    return self._center",
    "docstring": "Return the center of the ellipse.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_center arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_children",
    "source_code": "def set_children(self, *children):\n    id_self = id(self)\n    for child in children:\n        ref = weakref.ref(self, lambda _, pop=child._parents.pop, k=id_self: pop(k))\n        child._parents[id_self] = ref",
    "docstring": "Set the children of the transform, to let the invalidation system know which transforms can invalidate this transform. Should be called from the constructor of any transforms that depend on other transforms.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:set_children arg:self arguments arg arg Assign Call For Assign Call arguments arg arg arg Call Assign"
  },
  {
    "library": "pytorch",
    "name": "_create_c10d_store",
    "source_code": "def _create_c10d_store(hostname, port, rank, world_size, timeout, use_libuv=True) -> Store:\n    if not 0 <= port < 2 ** 16:\n        raise ValueError(f'port must have value from 0 to 65535 but was {port}.')\n    if _torchelastic_use_agent_store():\n        return TCPStore(host_name=hostname, port=port, world_size=world_size, is_master=False, timeout=timeout)\n    else:\n        start_daemon = rank == 0\n        return TCPStore(host_name=hostname, port=port, world_size=world_size, is_master=start_daemon, timeout=timeout, multi_tenant=True, use_libuv=use_libuv)",
    "docstring": "Smartly creates a c10d Store object on ``. All non-zero ranks will create and return a TCPStore client.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\rendezvous.py",
    "ast_data": "FunctionDef name:_create_c10d_store arg:hostname arg:port arg:rank arg:world_size arg:timeout arg:use_libuv arguments arg arg arg arg arg arg If Compare Raise Call If Call Return return:yes Call Assign Compare Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "MethodsV1",
    "source_code": "class MethodsV1(Benchmark):\n    params = [['__add__', '__eq__', '__ge__', '__gt__', '__le__', '__lt__', '__matmul__', '__mul__', '__ne__', '__pow__', '__sub__', '__truediv__'], TYPES1]\n    param_names = ['methods', 'npdtypes']\n    timeout = 10\n\n    def setup(self, methname, npdtypes):\n        values = get_squares_().get(npdtypes)\n        self.xargs = [values[0], values[1]]\n        if np.issubdtype(npdtypes, np.inexact):\n            self.xargs[1] *= 0.01\n\n    def time_ndarray_meth(self, methname, npdtypes):\n        getattr(operator, methname)(*self.xargs)",
    "docstring": "Benchmark for the methods which take an argument",
    "type": "class",
    "file_path": "numpy\\benchmarks\\benchmarks\\bench_ufunc.py",
    "ast_data": "ClassDef name:MethodsV1 Assign Assign Assign FunctionDef name:setup arg:self arg:methname arg:npdtypes arguments arg arg arg Assign Call Call Assign If Call FunctionDef name:time_ndarray_meth arg:self arg:methname arg:npdtypes arguments arg arg arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "_log_normalizer",
    "source_code": "def _log_normalizer(self, *natural_params):\n    raise NotImplementedError",
    "docstring": "Abstract method for log normalizer function. Returns a log normalizer based on the distribution and input",
    "type": "method",
    "file_path": "pytorch\\torch\\distributions\\exp_family.py",
    "ast_data": "FunctionDef name:_log_normalizer arg:self arguments arg arg Raise"
  },
  {
    "library": "pytorch",
    "name": "cleanup",
    "source_code": "def cleanup(self):\n    self.unregister_callback()",
    "docstring": "Calls unregister_callback() to make sure to finalize outputs.",
    "type": "method",
    "file_path": "pytorch\\torch\\profiler\\profiler.py",
    "ast_data": "FunctionDef name:cleanup arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "update_if_not_finite_grads",
    "source_code": "def update_if_not_finite_grads():\n    new_loss_scale = math_ops.maximum(self.current_loss_scale / self.multiplier, 1)\n    return control_flow_ops.group(self.counter.assign(0), self.current_loss_scale.assign(new_loss_scale))",
    "docstring": "Update assuming the gradients are nonfinite.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\loss_scale_optimizer.py",
    "ast_data": "FunctionDef name:update_if_not_finite_grads arguments Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "x",
    "source_code": "@property\ndef x(self):\n    return self._listarr(self._cs.getX)",
    "docstring": "Return a list or numpy array of the X variable.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\linestring.py",
    "ast_data": "FunctionDef name:x arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "enable_traceback_filtering",
    "source_code": "@tf_export('debugging.enable_traceback_filtering')\ndef enable_traceback_filtering():\n    if sys.version_info.major != 3 or sys.version_info.minor < 7:\n        raise RuntimeError(f'Traceback filtering is only available with Python 3.7 or higher. This Python version: {sys.version}')\n    global _ENABLE_TRACEBACK_FILTERING\n    _ENABLE_TRACEBACK_FILTERING.value = True",
    "docstring": "Enable filtering out TensorFlow-internal frames in exception stack traces. Raw TensorFlow stack traces involve many internal frames, which can be challenging to read through, while not being actionable for end users. By default, TensorFlow filters internal frames in most exceptions that it raises, to keep stack traces short, readable, and focused on what's actionable for end users (their own code). If you have previously disabled traceback filtering via , you can re-enable it via . Raises: RuntimeError: If Python version is not at least 3.7.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\traceback_utils.py",
    "ast_data": "FunctionDef name:enable_traceback_filtering arguments If BoolOp Compare Compare Raise Call Assign Call"
  },
  {
    "library": "scipy",
    "name": "x",
    "source_code": "@property\ndef x(self):\n    return self._scale_parameters(self.population[0])",
    "docstring": "The best solution from the solver",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_differentialevolution.py",
    "ast_data": "FunctionDef name:x arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "remove_op_callback",
    "source_code": "def remove_op_callback(op_callback):\n    ctx = context.context()\n    ctx.remove_op_callback(op_callback)\n    if ctx.executing_eagerly() and (not ctx.op_callbacks):\n        execute.execute = execute.quick_execute",
    "docstring": "Remove an already-added op callback. Args: op_callback: The op callback to be removed. Raises: KeyError: If has not been registered using before.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\op_callbacks.py",
    "ast_data": "FunctionDef name:remove_op_callback arg:op_callback arguments arg Assign Call Call If BoolOp Call Assign"
  },
  {
    "library": "pytorch",
    "name": "_streaming_save",
    "source_code": "def _streaming_save(obj: object, f: BufferedIOBase, pickle_module: Any=pickle, pickle_protocol: int=DEFAULT_PROTOCOL) -> None:\n    zip_file = _PseudoZipFile()\n    _save(obj, zip_file=zip_file, pickle_module=pickle_module, pickle_protocol=pickle_protocol, _disable_byteorder_record=False)\n    zip_file.write_to(f)",
    "docstring": "Save the object to a file-like object in a streaming fashion compatible with network sockets. This behaves similarly to :func: with a few notable differences: * A non-seekable file like object can be used when loading. * No forwards/backwards compatiblity is provided for the serialization format. This is only intended to be used with a single version of PyTorch with transient storage (i.e. sockets or temp files). * mmap is not supported See :func: for more details on specific arguments.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_serialization.py",
    "ast_data": "FunctionDef name:_streaming_save arg:obj arg:f arg:pickle_module arg:pickle_protocol arguments arg arg arg arg Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "random_uniform_variable",
    "source_code": "@doc_controls.do_not_generate_docs\ndef random_uniform_variable(shape, low, high, dtype=None, name=None, seed=None):\n    if dtype is None:\n        dtype = floatx()\n    tf_dtype = dtypes_module.as_dtype(dtype)\n    if seed is None:\n        seed = np.random.randint(1000000000.0)\n    value = init_ops.random_uniform_initializer(low, high, dtype=tf_dtype, seed=seed)(shape)\n    return variable(value, dtype=dtype, name=name)",
    "docstring": "Instantiates a variable with values drawn from a uniform distribution. Args: shape: Tuple of integers, shape of returned Keras variable. low: Float, lower boundary of the output interval. high: Float, upper boundary of the output interval. dtype: String, dtype of returned Keras variable. name: String, name of returned Keras variable. seed: Integer, random seed. Returns: A Keras variable, filled with drawn samples. Example: >>> kvar = tf.keras.backend.random_uniform_variable(shape=(2,3), ... low=0.0, high=1.0) >>> kvar",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:random_uniform_variable arg:shape arg:low arg:high arg:dtype arg:name arg:seed arguments arg arg arg arg arg arg If Compare Assign Call Assign Call If Compare Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_convert_as_saved_model",
    "source_code": "def _convert_as_saved_model(self):\n    temp_dir = tempfile.mkdtemp()\n    try:\n        self._freeze_keras_model(temp_dir)\n        if self.saved_model_dir:\n            return super(TFLiteKerasModelConverter, self).convert()\n    finally:\n        shutil.rmtree(temp_dir, True)",
    "docstring": "Converts a Keras model as a saved model. Returns: The converted data in serialized format.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "FunctionDef name:_convert_as_saved_model arg:self arguments arg Assign Call Try Call If Return return:yes Call Call Call"
  },
  {
    "library": "cherrypy",
    "name": "values",
    "source_code": "def values(self):\n    if not self.loaded:\n        self.load()\n    return self._data.values()",
    "docstring": "Return an iterable of session objects. D.values() -> list of D's values.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\sessions.py",
    "ast_data": "FunctionDef name:values arg:self arguments arg If Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_default_static_sparse_quant_module_mappings",
    "source_code": "def get_default_static_sparse_quant_module_mappings() -> dict[Callable, Any]:\n    return copy.deepcopy(DEFAULT_STATIC_SPARSE_QUANT_MODULE_MAPPINGS)",
    "docstring": "Get module mapping for post training static sparse quantization",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantization_mappings.py",
    "ast_data": "FunctionDef name:get_default_static_sparse_quant_module_mappings arguments Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "ElAttarVidyasagarDutta",
    "source_code": "class ElAttarVidyasagarDutta(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-100.0] * self.N, [100.0] * self.N))\n        self.custom_bounds = [(-4, 4), (-4, 4)]\n        self.global_optimum = [[3.40918683, -2.17143304]]\n        self.fglob = 1.712780354\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return (x[0] ** 2 + x[1] - 10) ** 2 + (x[0] + x[1] ** 2 - 7) ** 2 + (x[0] ** 2 + x[1] ** 3 - 1) ** 2",
    "docstring": "El-Attar-Vidyasagar-Dutta [1]_ objective function. This class defines the El-Attar-Vidyasagar-Dutta function global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{ElAttarVidyasagarDutta}}(x) = (x_1^2 + x_2 - 10)^2 + (x_1 + x_2^2 - 7)^2 + (x_1^2 + x_2^3 - 1)^2 with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_E.py",
    "ast_data": "ClassDef name:ElAttarVidyasagarDutta FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "joined",
    "source_code": "def joined(self, a, b):\n    return self._mapping.get(a, object()) is self._mapping.get(b)",
    "docstring": "Return whether *a* and *b* are members of the same set.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\cbook.py",
    "ast_data": "FunctionDef name:joined arg:self arg:a arg:b arguments arg arg arg Return return:yes Compare Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "dequantize_per_channel_group",
    "source_code": "@impl(quantized_decomposed_lib, 'dequantize_per_channel_group', 'CompositeExplicitAutograd')\ndef dequantize_per_channel_group(w_int8: torch.Tensor, scales: torch.Tensor, zero_points: Optional[torch.Tensor], quant_min: int, quant_max: int, dtype: torch.dtype, group_size: int=128, output_dtype: torch.dtype=torch.float32):\n    assert group_size > 1\n    if group_size > w_int8.shape[-1] and scales.shape[-1] == 1:\n        group_size = w_int8.shape[-1]\n    assert w_int8.shape[-1] % group_size == 0\n    assert w_int8.dim() == 2\n    w_int8_grouped = w_int8.reshape(-1, group_size)\n    scales = scales.reshape(-1, 1)\n    if zero_points is not None:\n        zp = zero_points.reshape(-1, 1)\n    else:\n        zp = torch.zeros([], dtype=torch.int32, device=scales.device)\n    w_dq = w_int8_grouped.sub(zp).mul(scales).reshape_as(w_int8).to(output_dtype)\n    return w_dq",
    "docstring": "Groupwise dequantization within each channel for an 2-d Tensor using the quantization parameters to map from floating point to quantized values. This means for each row of a 2-d Tensor (M, N), we calculate scales/zero_points for each elements and quantize every elements with the same quantization parameter. The dimension for scales/zero_points will be (M * ceil(N, group_size),) Args: input (torch.Tensor): quantized Tensor (uint8/int8 etc.) scales (float32 torch.Tensor): quantization parameter for per channel group affine quantization zero_points (int32 torch.Tensor): quantization parameter for per channel group affine quantization quant_min (int): minimum quantized value for input Tensor quant_max (int): maximum quantized value for input Tensor dtype (torch.dtype): dtype (e.g. torch.uint8) for input Tensor output_dtype (torch.dtype): dtype (e.g. torch.float32) for output Tensor Returns: dequantized Tensor with dtype",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_decomposed.py",
    "ast_data": "FunctionDef name:dequantize_per_channel_group arg:w_int8 arg:scales arg:zero_points arg:quant_min arg:quant_max arg:dtype arg:group_size arg:output_dtype arguments arg arg arg arg arg arg arg arg Compare If BoolOp Compare Compare Assign Compare Compare Call Assign Call Assign Call If Compare Assign Call Assign Call Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_ComplexGrad",
    "source_code": "@ops.RegisterGradient('Complex')\ndef _ComplexGrad(op: ops.Operation, grad):\n    x = op.inputs[0]\n    y = op.inputs[1]\n    gx = math_ops.real(grad)\n    gy = math_ops.imag(grad)\n    return _ReduceGradientArgs(x, y, gx, gy)",
    "docstring": "Returns the real and imaginary components of 'grad', respectively.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_ComplexGrad arg:op arg:grad arguments arg arg Assign Assign Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_cond",
    "source_code": "def _cond(i, _):\n    return math_ops.less(i, num_to_sample)",
    "docstring": "Stopping condition for the while loop.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\clustering_ops.py",
    "ast_data": "FunctionDef name:_cond arg:i arg:_ arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_CoreLocation",
    "source_code": "class _CoreLocation:\n\n    def __init__(self, x: int=0, y: int=0, z: int=0, core: int=0):\n        self.x = x\n        self.y = y\n        self.z = z\n        self.core = core\n\n    def __eq__(self, other):\n        if not isinstance(other, _CoreLocation):\n            return False\n        return self.x == other.x and self.y == other.y and (self.z == other.z) and (self.core == other.core)\n\n    def __ne__(self, other):\n        if not isinstance(other, _CoreLocation):\n            return True\n        return not self == other\n\n    def __hash__(self):\n        return hash((self.x, self.y, self.z, self.core))\n\n    def __repr__(self):\n        return f'{type(self).__name__}(x={self.x}, y={self.y}, z={self.z}, core={self.core})'\n\n    def to_list(self):\n        return [self.x, self.y, self.z, self.core]",
    "docstring": "Represents a TPU core's location in the mesh.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\tpu_util.py",
    "ast_data": "ClassDef name:_CoreLocation FunctionDef name:__init__ arg:self arg:x arg:y arg:z arg:core arguments arg arg arg arg arg Assign Assign Assign Assign FunctionDef name:__eq__ arg:self arg:other arguments arg arg If Call Return return:yes Return return:yes BoolOp Compare Compare Compare Compare FunctionDef name:__ne__ arg:self arg:other arguments arg arg If Call Return return:yes Return return:yes Compare FunctionDef name:__hash__ arg:self arguments arg Return return:yes Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call FunctionDef name:to_list arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_CropAndResizeGrad",
    "source_code": "@ops.RegisterGradient('CropAndResize')\ndef _CropAndResizeGrad(op: ops.Operation, grad):\n    image = op.inputs[0]\n    if image.get_shape().is_fully_defined():\n        image_shape = image.get_shape().as_list()\n    else:\n        image_shape = array_ops.shape(image)\n    allowed_types = [dtypes.float16, dtypes.float32, dtypes.float64]\n    if op.inputs[0].dtype in allowed_types:\n        grad0 = gen_image_ops.crop_and_resize_grad_image(grad, op.inputs[1], op.inputs[2], image_shape, T=op.get_attr('T'), method=op.get_attr('method'))\n    else:\n        grad0 = None\n    grad1 = gen_image_ops.crop_and_resize_grad_boxes(grad, op.inputs[0], op.inputs[1], op.inputs[2])\n    return [grad0, grad1, None, None]",
    "docstring": "The derivatives for crop_and_resize. We back-propagate to the image only when the input image tensor has floating point dtype but we always back-propagate to the input boxes tensor. Args: op: The CropAndResize op. grad: The tensor representing the gradient w.r.t. the output. Returns: The gradients w.r.t. the input image, boxes, as well as the always-None gradients w.r.t. box_ind and crop_size.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_grad.py",
    "ast_data": "FunctionDef name:_CropAndResizeGrad arg:op arg:grad arguments arg arg Assign If Call Call Assign Call Call Assign Call Assign If Compare Assign Call Call Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "convert_shapes",
    "source_code": "def convert_shapes(input_shape, to_tuples=True):\n\n    def _is_shape_component(value):\n        return value is None or isinstance(value, (int, tensor_shape.Dimension))\n\n    def _is_atomic_shape(input_shape):\n        if _is_shape_component(input_shape):\n            return True\n        if isinstance(input_shape, tensor_shape.TensorShape):\n            return True\n        if isinstance(input_shape, (tuple, list)) and all((_is_shape_component(ele) for ele in input_shape)):\n            return True\n        return False\n\n    def _convert_shape(input_shape):\n        input_shape = tensor_shape.TensorShape(input_shape)\n        if to_tuples:\n            input_shape = tuple(input_shape.as_list())\n        return input_shape\n    return map_structure_with_atomic(_is_atomic_shape, _convert_shape, input_shape)",
    "docstring": "Converts nested shape representations to desired format. Performs: TensorShapes -> tuples if . tuples of int or None -> TensorShapes if . Valid objects to be converted are: - TensorShapes - tuples with elements of type int or None. - ints - None Args: input_shape: A nested structure of objects to be converted to TensorShapes. to_tuples: If , converts all TensorShape to tuples. Otherwise converts all tuples representing shapes to TensorShapes. Returns: Nested structure of shapes in desired format. Raises: ValueError: when the input tensor shape can't be converted to tuples, eg unknown tensor shape.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_utils.py",
    "ast_data": "FunctionDef name:convert_shapes arg:input_shape arg:to_tuples arguments arg arg FunctionDef name:_is_shape_component arg:value arguments arg Return return:yes BoolOp Compare Call FunctionDef name:_is_atomic_shape arg:input_shape arguments arg If Call Return return:yes If Call Return return:yes If BoolOp Call Call Call Return return:yes Return return:yes FunctionDef name:_convert_shape arg:input_shape arguments arg Assign Call If Assign Call Call Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "__get_tensor_shard__",
    "source_code": "def __get_tensor_shard__(self, index: MetadataIndex) -> torch.Tensor:\n    if index.index is not None:\n        if len(self._local_shards) > index.index and self._storage_meta.chunks[index.index].offsets == index.offset:\n            return self._local_shards[index.index]\n    if index.offset is not None:\n        for shard, chunk in zip(self._local_shards, self._storage_meta.chunks):\n            if chunk.offsets == index.offset:\n                return shard\n    if len(self._local_shards) == 0 and self._storage_meta.chunks[0].sizes == torch.Size([0, 0]):\n        return torch.empty(0)\n    raise ValueError(f\"Could not find shard at '{index.offset}' for FQN: '{index.fqn}'\")",
    "docstring": "For compatibility with DCP, we support finding shard based on index Return a 'torch.Tensor' shard based on 'MetadataIndex'.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_shards_wrapper.py",
    "ast_data": "FunctionDef name:__get_tensor_shard__ arg:self arg:index arguments arg arg If Compare If BoolOp Compare Call Compare Return return:yes If Compare For Call If Compare Return return:yes If BoolOp Compare Call Compare Call Return return:yes Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "get_device_module",
    "source_code": "@functools.cache\ndef get_device_module(device: _Optional[_Union[torch.device, str]]=None):\n    if isinstance(device, torch.device):\n        device_module_name = device.type\n    elif isinstance(device, str):\n        device_module_name = torch.device(device).type\n    elif device is None:\n        device_module_name = torch._C._get_accelerator().type\n    else:\n        raise RuntimeError(f\"Invalid value of device '{device}', expect torch.device, str, or None\")\n    device_module = getattr(torch, device_module_name, None)\n    if device_module is None:\n        raise RuntimeError(f\"Device '{device_module_name}' does not have a corresponding module registered as 'torch.{device_module_name}'.\")\n    return device_module",
    "docstring": "Returns the module associated with a given device(e.g., torch.device('cuda'), \"mtia:0\", \"xpu\", ...). If no device is given, return the module for the current accelerator or CPU if none is present.",
    "type": "function",
    "file_path": "pytorch\\torch\\__init__.py",
    "ast_data": "FunctionDef name:get_device_module arg:device arguments arg If Call Assign If Call Assign Call If Compare Assign Call Raise Call Assign Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, axes, *, pickradius=15, clear=True):\n    super().__init__()\n    self._remove_overlapping_locs = True\n    self.set_figure(axes.get_figure(root=False))\n    self.isDefault_label = True\n    self.axes = axes\n    self.major = Ticker()\n    self.minor = Ticker()\n    self.callbacks = cbook.CallbackRegistry(signals=['units'])\n    self._autolabelpos = True\n    self.label = mtext.Text(np.nan, np.nan, fontsize=mpl.rcParams['axes.labelsize'], fontweight=mpl.rcParams['axes.labelweight'], color=mpl.rcParams['axes.labelcolor'])\n    self._set_artist_props(self.label)\n    self.offsetText = mtext.Text(np.nan, np.nan)\n    self._set_artist_props(self.offsetText)\n    self.labelpad = mpl.rcParams['axes.labelpad']\n    self.pickradius = pickradius\n    self._major_tick_kw = dict()\n    self._minor_tick_kw = dict()\n    if clear:\n        self.clear()\n    else:\n        self._converter = None\n        self._converter_is_explicit = False\n        self.units = None\n    self._autoscale_on = True",
    "docstring": "Parameters ---------- axes : The to which the created Axis belongs. pickradius : float The acceptance radius for containment tests. See also . clear : bool, default: True Whether to clear the Axis on creation. This is not required, e.g., when creating an Axis as part of an Axes, as ``. .. versionadded:: 3.8",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:axes arguments arg arg arg arg Call Call Assign Call Call Assign Assign Assign Call Assign Call Assign Call Assign Assign Call Call Assign Call Call Assign Assign Assign Call Assign Call If Call Assign Assign Assign Assign"
  },
  {
    "library": "pandas",
    "name": "col_count",
    "source_code": "@property\ndef col_count(self) -> int:\n    return len(self.ids)",
    "docstring": "Number of columns to be summarized.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:col_count arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_merge_dims_generic",
    "source_code": "def _merge_dims_generic(source, outer, inner):\n    if isinstance(source, StructuredTensor):\n        return source.merge_dims(outer, inner)\n    else:\n        return ragged_tensor.merge_dims(source, outer, inner)",
    "docstring": "Merges outer_axis...inner_axis into a single dimension. If outer == inner, this is a NOOP. If inner = source.shape.rank, then the behavior is undefined. Args: source: a tensor, ragged tensor, or structured tensor. outer: a python int, indicating the first dimension to compress (must be nonnegative). inner: a python int, indicating the first dimension to keep (of the tail) (must be nonnegative). Returns: source with outer_axis...inner_axis merged into a single dimension.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor.py",
    "ast_data": "FunctionDef name:_merge_dims_generic arg:source arg:outer arg:inner arguments arg arg arg If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_list_profile_sort_key",
    "source_code": "def _list_profile_sort_key(profile_datum, sort_by):\n    if sort_by == SORT_OPS_BY_OP_NAME:\n        return profile_datum.node_exec_stats.node_name\n    elif sort_by == SORT_OPS_BY_OP_TYPE:\n        return profile_datum.op_type\n    elif sort_by == SORT_OPS_BY_LINE:\n        return profile_datum.file_line_func\n    elif sort_by == SORT_OPS_BY_OP_TIME:\n        return profile_datum.op_time\n    elif sort_by == SORT_OPS_BY_EXEC_TIME:\n        return profile_datum.node_exec_stats.all_end_rel_micros\n    else:\n        return profile_datum.node_exec_stats.all_start_micros",
    "docstring": "Get a profile_datum property to sort by in list_profile command. Args: profile_datum: A object. sort_by: (string) indicates a value to sort by. Must be one of SORT_BY* constants. Returns: profile_datum property to sort by.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\profile_analyzer_cli.py",
    "ast_data": "FunctionDef name:_list_profile_sort_key arg:profile_datum arg:sort_by arguments arg arg If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "bucketize",
    "source_code": "def bucketize(self, values: T, boundaries: tuple[str, sympy.Expr, sympy.Expr, sympy.Expr], boundary_indices: T, indexing_dtype: torch.dtype, right: bool, sorter: Optional[tuple[str, sympy.Expr]]=None, sorter_indices: Optional[T]=None) -> None:\n    self._reads.add(StarDep(boundaries[0]))\n    if sorter is not None:\n        self._reads.add(StarDep(sorter[0]))",
    "docstring": "Records the names of the buffers that bucketize will read from.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\dependencies.py",
    "ast_data": "FunctionDef name:bucketize arg:self arg:values arg:boundaries arg:boundary_indices arg:indexing_dtype arg:right arg:sorter arg:sorter_indices arguments arg arg arg arg arg arg arg arg Call Call If Compare Call Call"
  },
  {
    "library": "pandas",
    "name": "is_string_or_object_np_dtype",
    "source_code": "def is_string_or_object_np_dtype(dtype: np.dtype) -> bool:\n    return dtype == object or dtype.kind in 'SU'",
    "docstring": "Faster alternative to is_string_dtype, assumes we have a np.dtype object.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\common.py",
    "ast_data": "FunctionDef name:is_string_or_object_np_dtype arg:dtype arguments arg Return return:yes BoolOp Compare Compare"
  },
  {
    "library": "pygame",
    "name": "groups",
    "source_code": "def groups(self):\n    return list(self.__g)",
    "docstring": "list of Groups that contain this Sprite Sprite.groups(): return group_list Returns a list of all the Groups that contain this Sprite.",
    "type": "method",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:groups arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "menu",
    "source_code": "@cherrypy.expose\ndef menu(self):\n    yield '<h2>Profiling runs</h2>'\n    yield '<p>Click on one of the runs below to see profiling data.</p>'\n    runs = self.statfiles()\n    runs.sort()\n    for i in runs:\n        yield (\"<a href='report?filename=%s' target='main'>%s</a><br />\" % (i, i))",
    "docstring": "Render the profiler menu page html layout.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\profiler.py",
    "ast_data": "FunctionDef name:menu arg:self arguments arg Assign Call Call For"
  },
  {
    "library": "tensorflow",
    "name": "get_workers_list",
    "source_code": "def get_workers_list(cluster_resolver):\n    worker_job_name = 'worker'\n    cluster_spec = cluster_resolver.cluster_spec()\n    if not cluster_spec:\n        raise errors.UnavailableError('None', 'None', 'Cluster spec not found, your client must run in GCE environment.')\n    task_indices = cluster_spec.task_indices(worker_job_name)\n    workers_list = [cluster_spec.task_address(worker_job_name, i).replace(':8470', ':8466') for i in task_indices]\n    return ','.join(workers_list)",
    "docstring": "Returns a comma separated list of TPU worker host:port pairs. Gets cluster_spec from cluster_resolver. Use the worker's task indices to obtain and return a list of host:port pairs. Args: cluster_resolver: TensorFlow TPUClusterResolver instance. Returns: A string of comma separated list of host:port pairs. For example: '10.2.0.1:8466,10.2.0.2:8466,10.2.0.3:8466,10.2.0.4:8466' Raises: UnavailableError: cluster_resolver doesn't contain a valid cluster_spec.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\profiler\\capture_tpu_profile.py",
    "ast_data": "FunctionDef name:get_workers_list arg:cluster_resolver arguments arg Assign Assign Call If Raise Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_VerifyGeneratedGradients",
    "source_code": "def _VerifyGeneratedGradients(grads, op: ops.Operation):\n    if op.type == 'While' or op.type == 'StatelessWhile':\n        return\n    if len(grads) != len(op.inputs):\n        raise ValueError(f'Num gradients {len(grads)} generated for op {op.node_def} do not match num inputs {len(op.inputs)}')",
    "docstring": "Verify that gradients are valid in number and type. Args: grads: List of generated gradients. op: Operation for which the gradients where generated. Raises: ValueError: if sizes of gradients and inputs don't match. TypeError: if type of any gradient is not valid for its input.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\gradients_util.py",
    "ast_data": "FunctionDef name:_VerifyGeneratedGradients arg:grads arg:op arguments arg arg If BoolOp Compare Compare Return return:no If Compare Call Call Raise Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "strip_tags",
    "source_code": "def strip_tags(s):\n    return re.compile('<([^>]+)>', flags=re.UNICODE).sub(' ', s)",
    "docstring": "Basic regexp based HTML / XML tag stripper function. For serious HTML/XML preprocessing you should rather use an external library such as lxml or BeautifulSoup. Parameters ---------- s : str The string to strip. Returns ------- s : str The stripped string.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\feature_extraction\\text.py",
    "ast_data": "FunctionDef name:strip_tags arg:s arguments arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "qspline2d",
    "source_code": "def qspline2d(signal, lamb=0.0, precision=-1.0):\n    if precision < 0.0 or precision >= 1.0:\n        if signal.dtype in [float32, complex64]:\n            precision = 0.001\n        else:\n            precision = 1e-06\n    if lamb > 0:\n        raise ValueError('lambda must be negative or zero')\n    r = -3 + 2 * math.sqrt(2.0)\n    c0 = -r * 8.0\n    z1 = r\n    out = symiirorder_nd(symiirorder1, signal, c0, z1, precision, axis=-1)\n    out = symiirorder_nd(symiirorder1, out, c0, z1, precision, axis=0)\n    return out",
    "docstring": "Coefficients for 2-D quadratic (2nd order) B-spline. Return the second-order B-spline coefficients over a regularly spaced input grid for the two-dimensional input image. Parameters ---------- input : ndarray The input signal. lamb : float Specifies the amount of smoothing in the transfer function. precision : float Specifies the precision for computing the infinite sum needed to apply mirror-symmetric boundary conditions. Returns ------- output : ndarray The filtered signal.",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_spline_filters.py",
    "ast_data": "FunctionDef name:qspline2d arg:signal arg:lamb arg:precision arguments arg arg arg If BoolOp Compare Compare If Compare Assign Assign If Compare Raise Call Assign Call Assign Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_event_pairs_min_timing",
    "source_code": "def get_event_pairs_min_timing(self: Self, event_pairs: list[tuple[torch.cuda.Event, torch.cuda.Event]]) -> float:\n    return min([start_event.elapsed_time(end_event) for start_event, end_event in event_pairs])",
    "docstring": "Get the minimum timing, in milliseconds, for a group of CUDA event pairs.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\benchmarking.py",
    "ast_data": "FunctionDef name:get_event_pairs_min_timing arg:self arg:event_pairs arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "__contains__",
    "source_code": "def __contains__(self, key: str) -> bool:\n    node = self.get_node(key)\n    if node is not None:\n        name = node._v_pathname\n        if key in (name, name[1:]):\n            return True\n    return False",
    "docstring": "check for existence of this key can match the exact pathname or the pathnm w/o the leading '/'",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:__contains__ arg:self arg:key arguments arg arg Assign Call If Compare Assign If Compare Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_has_uninterpretable_sympy_function",
    "source_code": "def _has_uninterpretable_sympy_function(expr: sympy.Basic) -> bool:\n    return expr.has(torch.utils._sympy.functions.ToFloat, torch.utils._sympy.functions.TruncToInt, torch.utils._sympy.functions.CeilToInt)",
    "docstring": "Add functions that our sympy interpreter can't reify into FX nodes",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:_has_uninterpretable_sympy_function arg:expr arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "PermissionDenied",
    "source_code": "class PermissionDenied(Exception):\n    pass",
    "docstring": "The user did not have permission to do that",
    "type": "class",
    "file_path": "django\\django\\core\\exceptions.py",
    "ast_data": "ClassDef name:PermissionDenied"
  },
  {
    "library": "scikit-learn",
    "name": "check_classifiers_multilabel_output_format_predict",
    "source_code": "@ignore_warnings(category=FutureWarning)\ndef check_classifiers_multilabel_output_format_predict(name, classifier_orig):\n    classifier = clone(classifier_orig)\n    set_random_state(classifier)\n    n_samples, test_size, n_outputs = (100, 25, 5)\n    X, y = make_multilabel_classification(n_samples=n_samples, n_features=2, n_classes=n_outputs, n_labels=3, length=50, allow_unlabeled=True, random_state=0)\n    X = scale(X)\n    X_train, X_test = (X[:-test_size], X[-test_size:])\n    y_train, y_test = (y[:-test_size], y[-test_size:])\n    X_train, X_test = _enforce_estimator_tags_X(classifier_orig, X_train, X_test=X_test)\n    classifier.fit(X_train, y_train)\n    response_method_name = 'predict'\n    predict_method = getattr(classifier, response_method_name, None)\n    if predict_method is None:\n        raise SkipTest(f'{name} does not have a {response_method_name} method.')\n    y_pred = predict_method(X_test)\n    assert isinstance(y_pred, np.ndarray), f'{name}.predict is expected to output a NumPy array. Got {type(y_pred)} instead.'\n    assert y_pred.shape == y_test.shape, f'{name}.predict outputs a NumPy array of shape {y_pred.shape} instead of {y_test.shape}.'\n    assert y_pred.dtype == y_test.dtype, f'{name}.predict does not output the same dtype than the targets. Got {y_pred.dtype} instead of {y_test.dtype}.'",
    "docstring": "Check the output of the method for classifiers supporting multilabel-indicator targets.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\estimator_checks.py",
    "ast_data": "FunctionDef name:check_classifiers_multilabel_output_format_predict arg:name arg:classifier_orig arguments arg arg Assign Call Call Assign Assign Call Assign Call Assign Assign Assign Call Call Assign Assign Call If Compare Raise Call Assign Call Call Call Compare Compare Call"
  },
  {
    "library": "pytorch",
    "name": "TritonCSE",
    "source_code": "class TritonCSE(CSE[TritonCSEVariable, Union[str, tuple[str, str]]]):\n\n    def augment_key(self, cache_key: str) -> Union[str, tuple[str, str]]:\n        if (mask := V.kernel._load_mask):\n            return (cache_key, mask.name)\n        else:\n            return cache_key",
    "docstring": "Subclasses CSE to apply the current load mask to the cache key to avoid CSEing variables across separate masked blocks.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py",
    "ast_data": "ClassDef name:TritonCSE FunctionDef name:augment_key arg:self arg:cache_key arguments arg arg If Return return:yes Return return:yes"
  },
  {
    "library": "authlib",
    "name": "create_authorization_url",
    "source_code": "async def create_authorization_url(self, redirect_uri=None, **kwargs):\n    if not self.authorize_url:\n        raise RuntimeError('Missing \"authorize_url\" value')\n    if self.authorize_params:\n        kwargs.update(self.authorize_params)\n    async with self._get_oauth_client() as client:\n        client.redirect_uri = redirect_uri\n        params = {}\n        if self.request_token_params:\n            params.update(self.request_token_params)\n        request_token = await client.fetch_request_token(self.request_token_url, **params)\n        log.debug(f'Fetch request token: {request_token!r}')\n        url = client.create_authorization_url(self.authorize_url, **kwargs)\n        state = request_token['oauth_token']\n    return {'url': url, 'request_token': request_token, 'state': state}",
    "docstring": "Generate the authorization url and state for HTTP redirect. :param redirect_uri: Callback or redirect URI for authorization. :param kwargs: Extra parameters to include. :return: dict",
    "type": "method",
    "file_path": "authlib\\authlib\\integrations\\base_client\\async_app.py",
    "ast_data": "AsyncFunctionDef name:create_authorization_url arg:self arg:redirect_uri arguments arg arg arg If Raise Call If Call Call Assign Assign If Call Assign Call Call Assign Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_figure",
    "source_code": "def set_figure(self, fig):\n    no_switch = 'The parent and root figures of a (Sub)Figure are set at instantiation and cannot be changed.'\n    if fig is self._root_figure:\n        _api.warn_deprecated('3.10', message=f'{no_switch} From Matplotlib 3.12 this operation will raise an exception.')\n        return\n    raise ValueError(no_switch)",
    "docstring": ".. deprecated:: 3.10 Currently this method will raise an exception if *fig* is anything other than the root this (Sub)Figure is on. In future it will always raise an exception.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:set_figure arg:self arg:fig arguments arg arg Assign If Compare Call Return return:no Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "get_restore_function",
    "source_code": "def get_restore_function(registered_name):\n    return _saver_registry.name_lookup(registered_name)[1]",
    "docstring": "Returns restore function registered to name.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\registration\\registration.py",
    "ast_data": "FunctionDef name:get_restore_function arg:registered_name arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "save",
    "source_code": "def save(self, image: Tensor, show_trajectories: bool=True, directory: Optional[str]=None) -> None:\n    if directory is None:\n        name = f'{self.name}_{datetime.datetime.now(tz=datetime.timezone.utc).strftime('%Y%m%d%H%M%S')!s}'\n        directory = os.path.join('kornia_outputs', name)\n    output = self.visualize(image, show_trajectories=show_trajectories)\n    os.makedirs(directory, exist_ok=True)\n    write_image(os.path.join(directory, f'{str(0).zfill(6)}.jpg'), output.byte())\n    logger.info(f'Outputs are saved in {directory}')",
    "docstring": "Save the model to ONNX format. Args: image: The input image. show_trajectories: Whether to visualize trajectories. directory: Where to save the file(s).",
    "type": "method",
    "file_path": "kornia\\kornia\\models\\tracking\\boxmot_tracker.py",
    "ast_data": "FunctionDef name:save arg:self arg:image arg:show_trajectories arg:directory arguments arg arg arg arg If Compare Assign Call Call Assign Call Assign Call Call Call Call Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_convert_to_indexer",
    "source_code": "def _convert_to_indexer(self, key: T, axis: AxisInt) -> T:\n    return key",
    "docstring": "Much simpler as we only have to deal with our valid types.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexing.py",
    "ast_data": "FunctionDef name:_convert_to_indexer arg:self arg:key arg:axis arguments arg arg arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "is_mps_tensor_safe",
    "source_code": "def is_mps_tensor_safe(x: Tensor) -> bool:\n    return 'mps' in str(x.device)",
    "docstring": "Return whether tensor is on MPS device.",
    "type": "function",
    "file_path": "kornia\\kornia\\utils\\helpers.py",
    "ast_data": "FunctionDef name:is_mps_tensor_safe arg:x arguments arg Return return:yes Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "set_keras_style",
    "source_code": "def set_keras_style():\n    global _KERAS_STYLE_SCOPE\n    _KERAS_STYLE_SCOPE = True",
    "docstring": "Use Keras-style variable management. All tf.layers and tf RNN cells created after keras style ha been enabled use Keras-style variable management. Creating such layers with a scope= argument is disallowed, and reuse=True is disallowed. The purpose of this function is to allow users of existing layers to slowly transition to Keras layers API without breaking existing functionality. For more details, see the documentation for . Note, once keras style has been set, it is set globally for the entire program and cannot be unset. Example:",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\legacy_tf_layers\\base.py",
    "ast_data": "FunctionDef name:set_keras_style arguments Assign"
  },
  {
    "library": "kornia",
    "name": "apply_non_transform_box",
    "source_code": "def apply_non_transform_box(self, input: Boxes, params: Dict[str, Tensor], flags: Dict[str, Any], transform: Optional[Tensor]=None) -> Boxes:\n    return input",
    "docstring": "Process boxes corresponding to the inputs that are no transformation applied.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\_2d\\geometric\\base.py",
    "ast_data": "FunctionDef name:apply_non_transform_box arg:self arg:input arg:params arg:flags arg:transform arguments arg arg arg arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_serialize_signature_def_map",
    "source_code": "def _serialize_signature_def_map(signature_def_map: _SignatureDefMap) -> dict[str, bytes]:\n    signature_def_map_serialized = {}\n    for key, signature_def in signature_def_map.items():\n        signature_def_map_serialized[key] = signature_def.SerializeToString()\n    return signature_def_map_serialized",
    "docstring": "Serializes SignatureDef values in . Args: signature_def_map: Signature key -> SignatureDef mapping. Returns: Signature def map where the values () are serialized.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\stablehlo\\python\\quantization.py",
    "ast_data": "FunctionDef name:_serialize_signature_def_map arg:signature_def_map arguments arg Assign For Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "r2c",
    "source_code": "def r2c(forward, x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *, plan=None):\n    if plan is not None:\n        raise NotImplementedError('Passing a precomputed plan is not yet supported by scipy.fft functions')\n    tmp = _asfarray(x)\n    norm = _normalization(norm, forward)\n    workers = _workers(workers)\n    if not np.isrealobj(tmp):\n        raise TypeError('x must be a real sequence')\n    if n is not None:\n        tmp, _ = _fix_shape_1d(tmp, n, axis)\n    elif tmp.shape[axis] < 1:\n        raise ValueError(f'invalid number of data points ({tmp.shape[axis]}) specified')\n    return pfft.r2c(tmp, (axis,), forward, norm, None, workers)",
    "docstring": "Discrete Fourier transform of a real sequence.",
    "type": "function",
    "file_path": "scipy\\scipy\\fft\\_pocketfft\\basic.py",
    "ast_data": "FunctionDef name:r2c arg:forward arg:x arg:n arg:axis arg:norm arg:overwrite_x arg:workers arguments arg arg arg arg arg arg arg arg If Compare Raise Call Assign Call Assign Call Assign Call If Call Raise Call If Compare Assign Call If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_str_from_wsgi",
    "source_code": "def get_str_from_wsgi(environ, key, default):\n    value = get_bytes_from_wsgi(environ, key, default)\n    return value.decode(errors='replace')",
    "docstring": "Get a value from the WSGI environ dictionary as str. key and default should be str objects.",
    "type": "function",
    "file_path": "django\\django\\core\\handlers\\wsgi.py",
    "ast_data": "FunctionDef name:get_str_from_wsgi arg:environ arg:key arg:default arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_input_arrays",
    "source_code": "def get_input_arrays(self):\n    if self._has_valid_tensors():\n        return [_get_tensor_name(tensor) for tensor in self._input_tensors]\n    else:\n        return [name for name, _ in self._input_arrays_with_shape]",
    "docstring": "Returns a list of the names of the input tensors. Returns: List of strings.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "FunctionDef name:get_input_arrays arg:self arguments arg If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "django",
    "name": "_datetime_from_timestamp",
    "source_code": "def _datetime_from_timestamp(self, ts):\n    tz = UTC if settings.USE_TZ else None\n    return datetime.fromtimestamp(ts, tz=tz)",
    "docstring": "If timezone support is enabled, make an aware datetime object in UTC; otherwise make a naive one in the local timezone.",
    "type": "method",
    "file_path": "django\\django\\core\\files\\storage\\filesystem.py",
    "ast_data": "FunctionDef name:_datetime_from_timestamp arg:self arg:ts arguments arg arg Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_add_many_sparse_to_tensors_map",
    "source_code": "def _add_many_sparse_to_tensors_map(sp_input, container=None, shared_name=None, name=None):\n    sp_input = _convert_to_sparse_tensor(sp_input)\n    return gen_sparse_ops.add_many_sparse_to_tensors_map(sp_input.indices, sp_input.values, sp_input.dense_shape, container=container, shared_name=shared_name, name=name)",
    "docstring": "Add a minibatch to a , return handles. The must have rank greater than 1, and the first dimension is treated as the minibatch dimension. Elements of the must be sorted in increasing order of this first dimension. The serialized objects going into each row of the output will have rank . The minibatch size is extracted from . Args: sp_input: The input rank . container: The container for the underlying (optional). shared_name: The shared name for the underlying (optional, defaults to the name of the newly created op). name: A name prefix for the returned tensors (optional). Returns: A string matrix (2-D ) with rows and column. Each row represents a unique handle to a stored by the underlying this op. Raises: TypeError: If is not a .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_ops.py",
    "ast_data": "FunctionDef name:_add_many_sparse_to_tensors_map arg:sp_input arg:container arg:shared_name arg:name arguments arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_math_fontfamily",
    "source_code": "def get_math_fontfamily(self):\n    return self._math_fontfamily",
    "docstring": "Return the name of the font family used for math text. The default font is :rc:.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\font_manager.py",
    "ast_data": "FunctionDef name:get_math_fontfamily arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "clf",
    "source_code": "def clf(self, keep_observers=False):\n    return self.clear(keep_observers=keep_observers)",
    "docstring": "[*Discouraged*] Alias for the method. .. admonition:: Discouraged The use of `` instead. Parameters ---------- keep_observers : bool, default: False Set *keep_observers* to True if, for example, a gui widget is tracking the Axes in the figure.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:clf arg:self arg:keep_observers arguments arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "latent_mean_and_variance",
    "source_code": "def latent_mean_and_variance(self, X):\n    if self.n_classes_ > 2:\n        raise ValueError(f'Returning the mean and variance of the latent function f is only supported for binary classification, received {self.n_classes_} classes.')\n    check_is_fitted(self)\n    if self.kernel is None or self.kernel.requires_vector_input:\n        X = validate_data(self, X, ensure_2d=True, dtype='numeric', reset=False)\n    else:\n        X = validate_data(self, X, ensure_2d=False, dtype=None, reset=False)\n    return self.base_estimator_.latent_mean_and_variance(X)",
    "docstring": "Compute the mean and variance of the latent function. Based on algorithm 3.2 of [RW2006]_, this function returns the latent mean (Line 4) and variance (Line 6) of the Gaussian process classification model. Note that this function is only supported for binary classification. .. versionadded:: 1.7 Parameters ---------- X : array-like of shape (n_samples, n_features) or list of object Query points where the GP is evaluated for classification. Returns ------- latent_mean : array-like of shape (n_samples,) Mean of the latent function values at the query points. latent_var : array-like of shape (n_samples,) Variance of the latent function values at the query points.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\_gpc.py",
    "ast_data": "FunctionDef name:latent_mean_and_variance arg:self arg:X arguments arg arg If Compare Raise Call Call If BoolOp Compare Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_n_features_out",
    "source_code": "@property\ndef _n_features_out(self):\n    return self.n_features_in_",
    "docstring": "Number of transformed output features.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py",
    "ast_data": "FunctionDef name:_n_features_out arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "construct_grids",
    "source_code": "def construct_grids(batch):\n    xmin = batch.x_left_lower_corner + batch.grid_size\n    xmax = xmin + batch.Nx * batch.grid_size\n    ymin = batch.y_left_lower_corner + batch.grid_size\n    ymax = ymin + batch.Ny * batch.grid_size\n    xgrid = np.arange(xmin, xmax, batch.grid_size)\n    ygrid = np.arange(ymin, ymax, batch.grid_size)\n    return (xgrid, ygrid)",
    "docstring": "Construct the map grid from the batch object Parameters ---------- batch : Batch object The object returned by :func: Returns ------- (xgrid, ygrid) : 1-D arrays The grid corresponding to the values in batch.coverages",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\datasets\\_species_distributions.py",
    "ast_data": "FunctionDef name:construct_grids arg:batch arguments arg Assign Assign Assign Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "add_dtypes_line",
    "source_code": "def add_dtypes_line(self) -> None:\n    collected_dtypes = [f'{key}({val:d})' for key, val in sorted(self.dtype_counts.items())]\n    self._lines.append(f'dtypes: {', '.join(collected_dtypes)}')",
    "docstring": "Add summary line with dtypes present in dataframe.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:add_dtypes_line arg:self arguments arg Assign Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "register_step_post_hook",
    "source_code": "def register_step_post_hook(self, hook: OptimizerPostHook) -> RemovableHandle:\n    handle = hooks.RemovableHandle(self._optimizer_step_post_hooks)\n    self._optimizer_step_post_hooks[handle.id] = hook\n    return handle",
    "docstring": "Register an optimizer step post hook which will be called after optimizer step. It should have the following signature:: hook(optimizer, args, kwargs) -> None The `torch.utils.hooks.RemovableHandle`",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\optimizer.py",
    "ast_data": "FunctionDef name:register_step_post_hook arg:self arg:hook arguments arg arg Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_fwd_send_ops",
    "source_code": "def get_fwd_send_ops(self, fwd_chunk_id: int) -> list[dist.P2POp]:\n    output_tuple, _ = self.fwd_cache[fwd_chunk_id]\n    ops: list[dist.P2POp] = []\n    for idx, out in enumerate(output_tuple):\n        dst_stages = self.act_send_info[idx]\n        for dst in dst_stages:\n            if dst is None:\n                continue\n            logger.debug('%s Sending tensor to Stage %s: %s', self.log_prefix, dst, out.size())\n            peer_rank = self.stage_index_to_group_rank[dst]\n            peer_global_rank = peer_rank if self.group is None else dist.get_global_rank(self.group, peer_rank)\n            ops.append(dist.P2POp(dist.isend, out, peer_global_rank, self.group))\n    return ops",
    "docstring": "Get the activation send ops for current stage's forward.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py",
    "ast_data": "FunctionDef name:get_fwd_send_ops arg:self arg:fwd_chunk_id arguments arg arg Assign For Call Assign For If Compare Call Call Assign Assign Compare Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "assign_sub",
    "source_code": "def assign_sub(self, delta, use_locking=False, name=None, read_value=True):\n    assign = state_ops.assign_sub(self._variable, delta, use_locking=use_locking, name=name)\n    if read_value:\n        return assign\n    return assign.op",
    "docstring": "Subtracts a value from this variable. This is essentially a shortcut for . Args: delta: A . The value to subtract from this variable. use_locking: If , use locking during the operation. name: The name of the operation to be created read_value: if True, will return something which evaluates to the new value of the variable; if False will return the assign op. Returns: A that will hold the new value of this variable after the subtraction has completed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py",
    "ast_data": "FunctionDef name:assign_sub arg:self arg:delta arg:use_locking arg:name arg:read_value arguments arg arg arg arg arg Assign Call If Return return:yes Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "crawl",
    "source_code": "def crawl(self, crawler_or_spidercls: type[Spider] | str | Crawler, *args: Any, **kwargs: Any) -> Deferred[None]:\n    if isinstance(crawler_or_spidercls, Spider):\n        raise ValueError('The crawler_or_spidercls argument cannot be a spider object, it must be a spider class (or a Crawler object)')\n    crawler = self.create_crawler(crawler_or_spidercls)\n    return self._crawl(crawler, *args, **kwargs)",
    "docstring": "Run a crawler with the provided arguments. It will call the given Crawler's :meth: method, while keeping track of it so it can be stopped later. If `~scrapy.crawler.Crawler~scrapy.crawler.Crawler~scrapy.spiders.Spider` subclass or string :param args: arguments to initialize the spider :param kwargs: keyword arguments to initialize the spider",
    "type": "method",
    "file_path": "scrapy\\scrapy\\crawler.py",
    "ast_data": "FunctionDef name:crawl arg:self arg:crawler_or_spidercls arguments arg arg arg arg If Call Raise Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_coordination_service_leader",
    "source_code": "def get_coordination_service_leader(self):\n    return '/job:' + self.get_job_name() + '/task:0'",
    "docstring": "Returns the location for coordination service. The coordination service should be located on TPU worker0. Returns: A string indicate the location path.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\tpu\\tpu_cluster_resolver.py",
    "ast_data": "FunctionDef name:get_coordination_service_leader arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "add_field",
    "source_code": "def add_field(self, model, field):\n    from django.db.models.expressions import Value\n    if field.many_to_many and field.remote_field.through._meta.auto_created:\n        self.create_model(field.remote_field.through)\n    elif isinstance(field, CompositePrimaryKey):\n        return\n    elif field.primary_key or field.unique or (not field.null) or (self.effective_default(field) is not None) or (field.has_db_default() and (not isinstance(field.db_default, Value))):\n        self._remake_table(model, create_field=field)\n    else:\n        super().add_field(model, field)",
    "docstring": "Create a field on a model.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\sqlite3\\schema.py",
    "ast_data": "FunctionDef name:add_field arg:self arg:model arg:field arguments arg arg arg If BoolOp Call If Call Return return:no If BoolOp Compare Call BoolOp Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "assemble_exception_table",
    "source_code": "def assemble_exception_table(tab: list[ExceptionTableEntry]) -> bytes:\n    b = []\n    for entry in tab:\n        first_entry = encode_exception_table_varint(entry.start // 2)\n        first_entry[0] |= 1 << 7\n        b.extend(first_entry)\n        length = entry.end - entry.start + 2\n        b.extend(encode_exception_table_varint(length // 2))\n        b.extend(encode_exception_table_varint(entry.target // 2))\n        dl = (entry.depth << 1) + entry.lasti\n        b.extend(encode_exception_table_varint(dl))\n    return bytes(b)",
    "docstring": "Inverse of parse_exception_table - encodes list of exception table entries into bytes.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\bytecode_transformation.py",
    "ast_data": "FunctionDef name:assemble_exception_table arg:tab arguments arg Assign For Assign Call Call Assign Call Call Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_position",
    "source_code": "def set_position(self, pos, which='both'):\n    self._set_position(pos, which=which)\n    self.set_in_layout(False)",
    "docstring": "Set the Axes position. Axes have two position attributes. The 'original' position is the position allocated for the Axes. The 'active' position is the position the Axes is actually drawn at. These positions are usually the same unless a fixed aspect is set to the Axes. See for details. Parameters ---------- pos : [left, bottom, width, height] or The new position of the Axes in coordinates. which : {'both', 'active', 'original'}, default: 'both' Determines which position variables to change. See Also -------- matplotlib.transforms.Bbox.from_bounds matplotlib.transforms.Bbox.from_extents",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:set_position arg:self arg:pos arg:which arguments arg arg arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "_local_step",
    "source_code": "def _local_step(self, gradients: Optional[list[Optional[torch.Tensor]]]=None, closure: Optional[Callable[[], float]]=None, **kwargs: Any) -> Optional[float]:\n    Join.notify_join_context(self)\n    is_trainable_mask = self._get_is_trainable_mask()\n    if is_trainable_mask != self._is_trainable_mask:\n        if self._overlap_with_ddp:\n            raise RuntimeError('ZeroRedundancyOptimizer with `overlap_with_ddp=True` does not support changing parameter trainability at run time')\n        logger.warning('ZeroRedundancyOptimizer detected that the trainable parameters changed; rebuilding the parameter buckets if enabled')\n        self._build_param_buckets()\n        self._is_trainable_mask = is_trainable_mask\n    self._sync_param_groups(self.param_groups, self.optim.param_groups)\n    if gradients is None:\n        loss = self.optim.step(**kwargs) if closure is None else self.optim.step(closure=closure, **kwargs)\n    else:\n        assert self._overlap_with_ddp, 'Specifying `gradients` should not be used when `overlap_with_ddp=False`'\n        assert closure is None, '`closure` is not supported when using a local functional optimizer'\n        loss = self.optim.step(gradients=gradients)\n    self._sync_param_groups(self.optim.param_groups, self.param_groups)\n    return loss",
    "docstring": "Perform a single optimizer step without syncing parameters across ranks. Arguments: gradients (list[Optional[torch.Tensor]], optional): a :class: of length equal to the number of parameters assigned to this rank containing gradient tensors or `listZeroRedundancyOptimizer` wraps a functional optimizer.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\optim\\zero_redundancy_optimizer.py",
    "ast_data": "FunctionDef name:_local_step arg:self arg:gradients arg:closure arguments arg arg arg arg Call Assign Call If Compare If Raise Call Call Call Assign Call If Compare Assign Compare Call Call Compare Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "newer",
    "source_code": "def newer(dst, src):\n    if not os.path.exists(dst):\n        raise ValueError(f\"file '{os.path.abspath(dst)}' does not exist\")\n    if not os.path.exists(src):\n        return 1\n    mtime1 = os.stat(dst)[ST_MTIME]\n    mtime2 = os.stat(src)[ST_MTIME]\n    return mtime1 > mtime2",
    "docstring": "Return true if 'dst' exists and is more recently modified than 'src', or if 'dst' exists and 'src' doesn't. Return false if both exist and 'dst' is the same age or younger than 'src'.",
    "type": "function",
    "file_path": "scipy\\scipy\\_build_utils\\_wrappers_common.py",
    "ast_data": "FunctionDef name:newer arg:dst arg:src arguments arg arg If Call Raise Call Call If Call Return return:yes Assign Call Assign Call Return return:yes Compare"
  },
  {
    "library": "matplotlib",
    "name": "FuncScale",
    "source_code": "class FuncScale(ScaleBase):\n    name = 'function'\n\n    def __init__(self, axis, functions):\n        forward, inverse = functions\n        transform = FuncTransform(forward, inverse)\n        self._transform = transform\n\n    def get_transform(self):\n        return self._transform\n\n    def set_default_locators_and_formatters(self, axis):\n        axis.set_major_locator(AutoLocator())\n        axis.set_major_formatter(ScalarFormatter())\n        axis.set_minor_formatter(NullFormatter())\n        if axis.axis_name == 'x' and mpl.rcParams['xtick.minor.visible'] or (axis.axis_name == 'y' and mpl.rcParams['ytick.minor.visible']):\n            axis.set_minor_locator(AutoMinorLocator())\n        else:\n            axis.set_minor_locator(NullLocator())",
    "docstring": "Provide an arbitrary scale with user-supplied function for the axis.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\scale.py",
    "ast_data": "ClassDef name:FuncScale Assign FunctionDef name:__init__ arg:self arg:axis arg:functions arguments arg arg arg Assign Assign Call Assign FunctionDef name:get_transform arg:self arguments arg Return return:yes FunctionDef name:set_default_locators_and_formatters arg:self arg:axis arguments arg arg Call Call Call Call Call Call If BoolOp BoolOp Compare BoolOp Compare Call Call Call Call"
  },
  {
    "library": "django",
    "name": "do_get_available_languages",
    "source_code": "@register.tag('get_available_languages')\ndef do_get_available_languages(parser, token):\n    args = token.contents.split()\n    if len(args) != 3 or args[1] != 'as':\n        raise TemplateSyntaxError(\"'get_available_languages' requires 'as variable' (got %r)\" % args)\n    return GetAvailableLanguagesNode(args[2])",
    "docstring": "Store a list of available languages in the context. Usage:: {% get_available_languages as languages %} {% for language in languages %} ... {% endfor %} This puts settings.LANGUAGES into the named variable.",
    "type": "function",
    "file_path": "django\\django\\templatetags\\i18n.py",
    "ast_data": "FunctionDef name:do_get_available_languages arg:parser arg:token arguments arg arg Assign Call If BoolOp Compare Call Compare Raise Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "sympy_index_symbol_with_prefix",
    "source_code": "def sympy_index_symbol_with_prefix(prefix: SymT, idx: int) -> sympy.Symbol:\n    assert prefix != SymT.SIZE\n    return make_symbol(prefix, idx, integer=True, nonnegative=True)",
    "docstring": "Used to generate an integer-nonnegative symbol.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\utils.py",
    "ast_data": "FunctionDef name:sympy_index_symbol_with_prefix arg:prefix arg:idx arguments arg arg Compare Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "index",
    "source_code": "@cherrypy.expose\ndef index(self):\n    return '\\n            <form action=\"greetUser\" method=\"GET\">\\n            What is your name?\\n            <input type=\"text\" name=\"name\" />\\n            <input type=\"submit\" />\\n            </form>'",
    "docstring": "Produce HTTP response body of welcome app index URI.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\tutorial\\tut03_get_and_post.py",
    "ast_data": "FunctionDef name:index arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "generate_save_uncompiled_kernels",
    "source_code": "def generate_save_uncompiled_kernels(self):\n    self.wrapper_call.splice(f'\\n            for kernel in globals().values():\\n                if isinstance(kernel, {triton_heuristics.__name__}.CachingAutotuner):\\n                    if not kernel.cuda_kernel_saved:\\n                        if len(kernel.launchers) == 0:\\n                            kernel.precompile()\\n                        kernel.save_gpu_kernel(\\n                            grid=(0, 0, 0),   # use dummy grid\\n                            stream=\"stream\",  # use dummy stream\\n                            launcher=kernel.launchers[0],\\n                        )\\n            ')",
    "docstring": "Precompile and save the CUBINs of the Triton kernels that haven't been precompiled and saved as a side effect of running the generated JIT model (Python wrapper). This can happen when the model contains control flow: only one pass through the control flow operators covers the kernels that are saved, the remaining kernels are not launched, hence not saved. The main purpose of this codegen is to compile and save the Triton kernels outside the active control flow path for subsequent AOTInductor code generation and compilation.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\wrapper.py",
    "ast_data": "FunctionDef name:generate_save_uncompiled_kernels arg:self arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "_maybe_set_opset_version",
    "source_code": "def _maybe_set_opset_version(opset_imports: dict[str, int], domain: str, version: int | None) -> None:\n    if domain in opset_imports and opset_imports[domain] != 1:\n        return\n    if domain == _ONNX_DOMAIN:\n        opset_imports[domain] = _constants.TORCHLIB_OPSET\n        return\n    if version is None:\n        opset_imports[domain] = 1\n        return\n    opset_imports[domain] = version",
    "docstring": "Set the opset version for the domain.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_ir_passes.py",
    "ast_data": "FunctionDef name:_maybe_set_opset_version arg:opset_imports arg:domain arg:version arguments arg arg arg If BoolOp Compare Compare Return return:no If Compare Assign Return return:no If Compare Assign Return return:no Assign"
  },
  {
    "library": "sphinx",
    "name": "new_serialno",
    "source_code": "def new_serialno(self, category: str='') -> int:\n    return self.current_document.new_serial_number(category)",
    "docstring": "Return a serial number, e.g. for index entry targets. The number is guaranteed to be unique in the current document.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\environment\\__init__.py",
    "ast_data": "FunctionDef name:new_serialno arg:self arg:category arguments arg arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "max",
    "source_code": "def max(self, axis=None, out=None):\n    return N.ndarray.max(self, axis, out, keepdims=True)._collapse(axis)",
    "docstring": "Return the maximum value along an axis. Parameters ---------- See for complete descriptions See Also -------- amax, ndarray.max Notes ----- This is the same as , but returns a object where would return an ndarray. Examples -------- >>> x = np.matrix(np.arange(12).reshape((3,4))); x matrix([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]]) >>> x.max() 11 >>> x.max(0) matrix([[ 8, 9, 10, 11]]) >>> x.max(1) matrix([[ 3], [ 7], [11]])",
    "type": "method",
    "file_path": "numpy\\numpy\\matrixlib\\defmatrix.py",
    "ast_data": "FunctionDef name:max arg:self arg:axis arg:out arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "authlib",
    "name": "get_jwks",
    "source_code": "def get_jwks(self):\n    raise NotImplementedError()",
    "docstring": "Return the JWKs that will be used to check the JWT access token signature. Developers MUST re-implement this method:: def get_jwks(self): return load_jwks(\"jwks.json\")",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc9068\\revocation.py",
    "ast_data": "FunctionDef name:get_jwks arg:self arguments arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "set_rng_state",
    "source_code": "def set_rng_state(new_state: Tensor, device: Union[int, str, torch.device]='mps') -> None:\n    new_state_copy = new_state.clone(memory_format=torch.contiguous_format)\n    _get_default_mps_generator().set_state(new_state_copy)",
    "docstring": "Sets the random number generator state. Args: new_state (torch.ByteTensor): The desired state device (torch.device or int, optional): The device to set the RNG state. Default: ``, the current MPS device).",
    "type": "function",
    "file_path": "pytorch\\torch\\mps\\__init__.py",
    "ast_data": "FunctionDef name:set_rng_state arg:new_state arg:device arguments arg arg Assign Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "trigger",
    "source_code": "def trigger(self, sender, event, data=None):\n    if not self.figure.canvas.widgetlock.available(sender):\n        return\n    if data is not None:\n        self.draw_rubberband(*data)\n    else:\n        self.remove_rubberband()",
    "docstring": "Call or based on data.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py",
    "ast_data": "FunctionDef name:trigger arg:self arg:sender arg:event arg:data arguments arg arg arg arg If Call Return return:no If Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, stage_module: torch.nn.Module, stage_index: int, pipe_info: PipeInfo, device: torch.device, group: Optional[dist.ProcessGroup]=None):\n    _PipelineStageBase.__init__(self, stage_module, stage_index, pipe_info.num_stages, device, group)\n    self.pipe_info = pipe_info\n    submod_nodes = [node for node in pipe_info.graph.nodes if node.op == 'call_module']\n    if len(submod_nodes) != self.num_stages:\n        raise AssertionError(f'Number of submodules in pipe graph {len(submod_nodes)} does not match number of stages {self.num_stages}')\n    self.node = submod_nodes[self.stage_index]\n    self.name = self.node.name\n    logger.info('[%s] Creating PipelineStage %s for %s', self.group_rank, stage_index, self.name)\n    self.submod_to_stage_index: dict[str, int] = {}\n    for i, node in enumerate(submod_nodes):\n        self.submod_to_stage_index.setdefault(node.name, i)\n    self._move_submod_to_device()",
    "docstring": "Create a pipeline stage given a stage_module to be wrapped by this stage and a describing the stage relationship of the pipeline. Args: stage_module (torch.nn.Module): the module to be wrapped by this stage stage_index (int): the index of this stage in the pipeline pipe_info (PipeInfo): information about the pipeline, can be retrieved by device (torch.device): the device to be used by this stage group (Optional[dist.ProcessGroup]): the process group to be used by this stage",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:stage_module arg:stage_index arg:pipe_info arg:device arg:group arguments arg arg arg arg arg arg Call Assign Assign Compare If Compare Call Raise Call Call Assign Assign Call For Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "create_artists",
    "source_code": "def create_artists(self, legend, orig_handle, xdescent, ydescent, width, height, fontsize, trans):\n    raise NotImplementedError('Derived must override')",
    "docstring": "Return the legend artists generated. Parameters ---------- legend : The legend for which these legend artists are being created. orig_handle : or similar The object for which these legend artists are being created. xdescent, ydescent, width, height : int The rectangle (*xdescent*, *ydescent*, *width*, *height*) that the legend artists being created should fit within. fontsize : int The fontsize in pixels. The legend artists being created should be scaled according to the given fontsize. trans : The transform that is applied to the legend artists being created. Typically from unit coordinates in the handler box to screen coordinates.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\legend_handler.py",
    "ast_data": "FunctionDef name:create_artists arg:self arg:legend arg:orig_handle arg:xdescent arg:ydescent arg:width arg:height arg:fontsize arg:trans arguments arg arg arg arg arg arg arg arg arg Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "_format_approx",
    "source_code": "def _format_approx(number, precision):\n    return f'{number:.{precision}f}'.rstrip('0').rstrip('.') or '0'",
    "docstring": "Format the number with at most the number of decimals given as precision. Remove trailing zeros and possibly the decimal point.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\cbook.py",
    "ast_data": "FunctionDef name:_format_approx arg:number arg:precision arguments arg arg Return return:yes BoolOp Call Call"
  },
  {
    "library": "matplotlib",
    "name": "print_png",
    "source_code": "def print_png(self, fname_or_fh, **kwargs):\n    converter = make_pdf_to_png_converter()\n    with TemporaryDirectory() as tmpdir:\n        tmppath = pathlib.Path(tmpdir)\n        pdf_path = tmppath / 'figure.pdf'\n        png_path = tmppath / 'figure.png'\n        self.print_pdf(pdf_path, **kwargs)\n        converter(pdf_path, png_path, dpi=self.figure.dpi)\n        with png_path.open('rb') as orig, cbook.open_file_cm(fname_or_fh, 'wb') as dest:\n            shutil.copyfileobj(orig, dest)",
    "docstring": "Use LaTeX to compile a pgf figure to pdf and convert it to png.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pgf.py",
    "ast_data": "FunctionDef name:print_png arg:self arg:fname_or_fh arguments arg arg arg Assign Call With Call Assign Call Assign Assign Call Call With Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "set_parents_and_children",
    "source_code": "def set_parents_and_children(partitions: list[Partition]) -> None:\n    for partition in partitions:\n        partition.children = set()\n        partition.parents = set()\n    for partition in partitions:\n        for node in partition.nodes:\n            users = node.users\n            for n in users:\n                for p in partitions:\n                    if p != partition and n in p.nodes and (node not in p.nodes):\n                        partition.children.add(p)\n                        p.parents.add(partition)\n    return",
    "docstring": "Given a list of partitions, mark parents and children for each partition",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\accelerator_partitioner.py",
    "ast_data": "FunctionDef name:set_parents_and_children arg:partitions arguments arg For Assign Call Assign Call For For Assign For For If BoolOp Compare Compare Compare Call Call Return return:no"
  },
  {
    "library": "django",
    "name": "_watch_glob",
    "source_code": "def _watch_glob(self, directory, patterns):\n    prefix = 'glob'\n    if not directory.exists():\n        if not directory.parent.exists():\n            logger.warning('Unable to watch directory %s as neither it or its parent exist.', directory)\n            return\n        prefix = 'glob-parent-%s' % directory.name\n        patterns = ['%s/%s' % (directory.name, pattern) for pattern in patterns]\n        directory = directory.parent\n    expression = ['anyof']\n    for pattern in patterns:\n        expression.append(['match', pattern, 'wholename'])\n    self._subscribe(directory, '%s:%s' % (prefix, directory), expression)",
    "docstring": "Watch a directory with a specific glob. If the directory doesn't yet exist, attempt to watch the parent directory and amend the patterns to include this. It's important this method isn't called more than one per directory when updating all subscriptions. Subsequent calls will overwrite the named subscription, so it must include all possible glob expressions.",
    "type": "method",
    "file_path": "django\\django\\utils\\autoreload.py",
    "ast_data": "FunctionDef name:_watch_glob arg:self arg:directory arg:patterns arguments arg arg arg Assign If Call If Call Call Return return:no Assign Assign Assign Assign For Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_history_buttons",
    "source_code": "def set_history_buttons(self):\n    pass",
    "docstring": "Enable or disable the back/forward button.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:set_history_buttons arg:self arguments arg"
  },
  {
    "library": "scipy",
    "name": "_construct_from_derivatives",
    "source_code": "@staticmethod\ndef _construct_from_derivatives(xa, xb, ya, yb):\n    ya, yb = (np.asarray(ya), np.asarray(yb))\n    if ya.shape[1:] != yb.shape[1:]:\n        raise ValueError(f'Shapes of ya {ya.shape} and yb {yb.shape} are incompatible')\n    dta, dtb = (ya.dtype, yb.dtype)\n    if np.issubdtype(dta, np.complexfloating) or np.issubdtype(dtb, np.complexfloating):\n        dt = np.complex128\n    else:\n        dt = np.float64\n    na, nb = (len(ya), len(yb))\n    n = na + nb\n    c = np.empty((na + nb,) + ya.shape[1:], dtype=dt)\n    for q in range(0, na):\n        c[q] = ya[q] / spec.poch(n - q, q) * (xb - xa) ** q\n        for j in range(0, q):\n            c[q] -= (-1) ** (j + q) * comb(q, j) * c[j]\n    for q in range(0, nb):\n        c[-q - 1] = yb[q] / spec.poch(n - q, q) * (-1) ** q * (xb - xa) ** q\n        for j in range(0, q):\n            c[-q - 1] -= (-1) ** (j + 1) * comb(q, j + 1) * c[-q + j]\n    return c",
    "docstring": "Compute the coefficients of a polynomial in the Bernstein basis given the values and derivatives at the edges. Return the coefficients of a polynomial in the Bernstein basis defined on `xaxbyaybyaybnanbxaxba=0B^{q}(x = xa)c_qq = 0, ..., na`.",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_interpolate.py",
    "ast_data": "FunctionDef name:_construct_from_derivatives arg:xa arg:xb arg:ya arg:yb arguments arg arg arg arg Assign Call Call If Compare Raise Call Assign If BoolOp Call Call Assign Assign Assign Call Call Assign Assign Call For Call Assign Call For Call Call For Call Assign Call For Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "CompositeAffine2D",
    "source_code": "class CompositeAffine2D(Affine2DBase):\n\n    def __init__(self, a, b, **kwargs):\n        if not a.is_affine or not b.is_affine:\n            raise ValueError(\"'a' and 'b' must be affine transforms\")\n        if a.output_dims != b.input_dims:\n            raise ValueError(\"The output dimension of 'a' must be equal to the input dimensions of 'b'\")\n        self.input_dims = a.input_dims\n        self.output_dims = b.output_dims\n        super().__init__(**kwargs)\n        self._a = a\n        self._b = b\n        self.set_children(a, b)\n        self._mtx = None\n\n    @property\n    def depth(self):\n        return self._a.depth + self._b.depth\n\n    def _iter_break_from_left_to_right(self):\n        for left, right in self._a._iter_break_from_left_to_right():\n            yield (left, right + self._b)\n        for left, right in self._b._iter_break_from_left_to_right():\n            yield (self._a + left, right)\n    __str__ = _make_str_method('_a', '_b')\n\n    def get_matrix(self):\n        if self._invalid:\n            self._mtx = np.dot(self._b.get_matrix(), self._a.get_matrix())\n            self._inverted = None\n            self._invalid = 0\n        return self._mtx",
    "docstring": "A composite transform formed by applying transform *a* then transform *b*. This version is an optimization that handles the case where both *a* and *b* are 2D affines.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "ClassDef name:CompositeAffine2D FunctionDef name:__init__ arg:self arg:a arg:b arguments arg arg arg arg If BoolOp Raise Call If Compare Raise Call Assign Assign Call Call Assign Assign Call Assign FunctionDef name:depth arg:self arguments arg Return return:yes FunctionDef name:_iter_break_from_left_to_right arg:self arguments arg For Call For Call Assign Call FunctionDef name:get_matrix arg:self arguments arg If Assign Call Call Call Assign Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "push_current",
    "source_code": "def push_current(self):\n    self._nav_stack.push(WeakKeyDictionary({ax: (ax._get_view(), (ax.get_position(True).frozen(), ax.get_position().frozen())) for ax in self.canvas.figure.axes}))\n    self.set_history_buttons()",
    "docstring": "Push the current view limits and position onto the stack.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:push_current arg:self arguments arg Call Call Call Call Call Call Call Call"
  },
  {
    "library": "cherrypy",
    "name": "protocol_from_http",
    "source_code": "def protocol_from_http(protocol_str):\n    return (int(protocol_str[5]), int(protocol_str[7]))",
    "docstring": "Return a protocol tuple from the given 'HTTP/x.y' string.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\httputil.py",
    "ast_data": "FunctionDef name:protocol_from_http arg:protocol_str arguments arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_solid_joinstyle",
    "source_code": "def get_solid_joinstyle(self):\n    return self._solidjoinstyle.name",
    "docstring": "Return the for solid lines. See also .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:get_solid_joinstyle arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "download",
    "source_code": "def download(name: str, output_dir: str, url: str, reference_bin_hash: str) -> bool:\n    binary_path = Path(output_dir, name)\n    if check(binary_path, reference_bin_hash):\n        logging.info('Correct binary already exists at %s. Exiting.', binary_path)\n        return True\n    binary_path.parent.mkdir(parents=True, exist_ok=True)\n    logging.info('Downloading %s to %s', url, binary_path)\n    if DRY_RUN:\n        logging.info('Exiting as there is nothing left to do in dry run mode')\n        return True\n    urllib.request.urlretrieve(url, binary_path, reporthook=report_download_progress if sys.stdout.isatty() else None)\n    logging.info('Downloaded %s successfully.', name)\n    if not check(binary_path, reference_bin_hash):\n        logging.critical('Downloaded binary %s failed its hash check', name)\n        return False\n    mode = os.stat(binary_path).st_mode\n    mode |= stat.S_IXUSR\n    os.chmod(binary_path, mode)\n    logging.info('Using %s located at %s', name, binary_path)\n    return True",
    "docstring": "Download a platform-appropriate binary if one doesn't already exist at the expected location and verifies that it is the right binary by checking its SHA256 hash against the expected hash.",
    "type": "function",
    "file_path": "pytorch\\tools\\linter\\adapters\\s3_init.py",
    "ast_data": "FunctionDef name:download arg:name arg:output_dir arg:url arg:reference_bin_hash arguments arg arg arg arg Assign Call If Call Call Return return:yes Call Call If Call Return return:yes Call Call Call If Call Call Return return:yes Assign Call Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "may_share_memory",
    "source_code": "@array_function_from_c_func_and_dispatcher(_multiarray_umath.may_share_memory)\ndef may_share_memory(a, b, max_work=None):\n    return (a, b)",
    "docstring": "may_share_memory(a, b, /, max_work=None) Determine if two arrays might share memory A return of True does not necessarily mean that the two arrays share any element. It just means that they *might*. Only the memory bounds of a and b are checked by default. Parameters ---------- a, b : ndarray Input arrays max_work : int, optional Effort to spend on solving the overlap problem. See for details. Default for `` is to do a bounds check. Returns ------- out : bool See Also -------- shares_memory Examples -------- >>> import numpy as np >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9])) False >>> x = np.zeros([3, 4]) >>> np.may_share_memory(x[:,0], x[:,1]) True",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\multiarray.py",
    "ast_data": "FunctionDef name:may_share_memory arg:a arg:b arg:max_work arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "increasing_map",
    "source_code": "@staticmethod\ndef increasing_map(x: Union[ExprIn, ExprVR], fn: ExprFn) -> ExprVR:\n    x = ValueRanges.wrap(x)\n    return ValueRanges(fn(x.lower), fn(x.upper))",
    "docstring": "Increasing: x f(x) <= f(y).",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\_sympy\\value_ranges.py",
    "ast_data": "FunctionDef name:increasing_map arg:x arg:fn arguments arg arg Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "seaborn",
    "name": "scotts_factor",
    "source_code": "def scotts_factor(self):\n    return power(self.neff, -1.0 / (self.d + 4))",
    "docstring": "Compute Scott's factor. Returns ------- s : float Scott's factor.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\external\\kde.py",
    "ast_data": "FunctionDef name:scotts_factor arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "list_operators",
    "source_code": "@classmethod\ndef list_operators(cls) -> None:\n    repo_contents = cls._fetch_repo_contents('operators')\n    operators = [file['path'] for file in repo_contents]\n    pprint.pp(operators)",
    "docstring": "List all available ONNX operators in the 'operators' folder of the Hugging Face repository.",
    "type": "method",
    "file_path": "kornia\\kornia\\onnx\\utils.py",
    "ast_data": "FunctionDef name:list_operators arg:cls arguments arg Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "evaluate",
    "source_code": "def evaluate(self, model, x=None, y=None, batch_size=None, verbose=1, sample_weight=None, steps=None, callbacks=None, **kwargs):\n    raise NotImplementedError()",
    "docstring": "Returns the loss value & metrics values for the model in test mode.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py",
    "ast_data": "FunctionDef name:evaluate arg:self arg:model arg:x arg:y arg:batch_size arg:verbose arg:sample_weight arg:steps arg:callbacks arguments arg arg arg arg arg arg arg arg arg arg Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "set_linewidth",
    "source_code": "def set_linewidth(self, w):\n    self._shared_setter('linewidth', w)",
    "docstring": "Set the linewidth in points of the rectangle and the connectors. Parameters ---------- w : float or None",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\inset.py",
    "ast_data": "FunctionDef name:set_linewidth arg:self arg:w arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "PerReplica",
    "source_code": "class PerReplica(DistributedValues, composite_tensor.CompositeTensor, ds_types.PerReplica):\n\n    @property\n    def _type_spec(self):\n        return PerReplicaSpec(*(type_spec.type_spec_from_value(v) for v in self._values))\n\n    @property\n    def values(self):\n        return self._values",
    "docstring": "Holds a map from replica to unsynchronized values.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py",
    "ast_data": "ClassDef name:PerReplica FunctionDef name:_type_spec arg:self arguments arg Return return:yes Call Call FunctionDef name:values arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "is_available",
    "source_code": "def is_available():\n    return torch._C.has_mkl",
    "docstring": "Return whether PyTorch is built with MKL support.",
    "type": "function",
    "file_path": "pytorch\\torch\\backends\\mkl\\__init__.py",
    "ast_data": "FunctionDef name:is_available arguments Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "inplace_identity_derivative",
    "source_code": "def inplace_identity_derivative(Z, delta):\n    pass",
    "docstring": "Apply the derivative of the identity function: do nothing. Parameters ---------- Z : {array-like, sparse matrix}, shape (n_samples, n_features) The data which was output from the identity activation function during the forward pass. delta : {array-like}, shape (n_samples, n_features) The backpropagated error signal to be modified inplace.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\neural_network\\_base.py",
    "ast_data": "FunctionDef name:inplace_identity_derivative arg:Z arg:delta arguments arg arg"
  },
  {
    "library": "scikit-learn",
    "name": "fit_predict",
    "source_code": "def fit_predict(self, X, y=None, sample_weight=None):\n    return self.fit(X, sample_weight=sample_weight).labels_",
    "docstring": "Compute cluster centers and predict cluster index for each sample. Convenience method; equivalent to calling fit(X) followed by predict(X). Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) New data to transform. y : Ignored Not used, present here for API consistency by convention. sample_weight : array-like of shape (n_samples,), default=None The weights for each observation in X. If None, all observations are assigned equal weight. Returns ------- labels : ndarray of shape (n_samples,) Index of the cluster each sample belongs to.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_kmeans.py",
    "ast_data": "FunctionDef name:fit_predict arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "typename",
    "source_code": "def typename(obj: _Any, /) -> str:\n    if isinstance(obj, torch.Tensor):\n        return obj.type()\n    module = getattr(obj, '__module__', '') or ''\n    qualname = ''\n    if hasattr(obj, '__qualname__'):\n        qualname = obj.__qualname__\n    elif hasattr(obj, '__name__'):\n        qualname = obj.__name__\n    else:\n        module = obj.__class__.__module__ or ''\n        qualname = obj.__class__.__qualname__\n    if module in {'', 'builtins'}:\n        return qualname\n    return f'{module}.{qualname}'",
    "docstring": "String representation of the type of an object. This function returns a fully qualified string representation of an object's type. Args: obj (object): The object whose type to represent Returns: str: the type of the object Example: >>> x = torch.tensor([1, 2, 3]) >>> torch.typename(x) 'torch.LongTensor' >>> torch.typename(torch.nn.Parameter) 'torch.nn.parameter.Parameter'",
    "type": "function",
    "file_path": "pytorch\\torch\\__init__.py",
    "ast_data": "FunctionDef name:typename arguments arg If Call Return return:yes Call Assign BoolOp Call Assign If Call Assign If Call Assign Assign BoolOp Assign If Compare Return return:yes Return return:yes"
  },
  {
    "library": "numpy",
    "name": "base_repr",
    "source_code": "@set_module('numpy')\ndef base_repr(number, base=2, padding=0):\n    digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n    if base > len(digits):\n        raise ValueError('Bases greater than 36 not handled in base_repr.')\n    elif base < 2:\n        raise ValueError('Bases less than 2 not handled in base_repr.')\n    num = abs(int(number))\n    res = []\n    while num:\n        res.append(digits[num % base])\n        num //= base\n    if padding:\n        res.append('0' * padding)\n    if number < 0:\n        res.append('-')\n    return ''.join(reversed(res or '0'))",
    "docstring": "Return a string representation of a number in the given base system. Parameters ---------- number : int The value to convert. Positive and negative values are handled. base : int, optional Convert to the number system. The valid range is 2-36, the default value is 2. padding : int, optional Number of zeros padded on the left. Default is 0 (no padding). Returns ------- out : str String representation of in system. See Also -------- binary_repr : Faster version of for base 2. Examples -------- >>> import numpy as np >>> np.base_repr(5) '101' >>> np.base_repr(6, 5) '11' >>> np.base_repr(7, base=5, padding=3) '00012' >>> np.base_repr(10, base=16) 'A' >>> np.base_repr(32, base=16) '20'",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\numeric.py",
    "ast_data": "FunctionDef name:base_repr arg:number arg:base arg:padding arguments arg arg arg Assign If Compare Call Raise Call If Compare Raise Call Assign Call Call Assign While Call If Call If Compare Call Return return:yes Call Call BoolOp Call"
  },
  {
    "library": "django",
    "name": "none",
    "source_code": "def none(self):\n    clone = self._chain()\n    clone.query.set_empty()\n    return clone",
    "docstring": "Return an empty QuerySet.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:none arg:self arguments arg Assign Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_box_func",
    "source_code": "def _box_func(self, x):\n    raise AbstractMethodError(self)",
    "docstring": "box function to get object from internal representation",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py",
    "ast_data": "FunctionDef name:_box_func arg:self arg:x arguments arg arg Raise Call"
  },
  {
    "library": "numpy",
    "name": "read_array_header_2_0",
    "source_code": "@set_module('numpy.lib.format')\ndef read_array_header_2_0(fp, max_header_size=_MAX_HEADER_SIZE):\n    return _read_array_header(fp, version=(2, 0), max_header_size=max_header_size)",
    "docstring": "Read an array header from a filelike object using the 2.0 file format version. This will leave the file object located just after the header. Parameters ---------- fp : filelike object A file object or something with a method like a file. max_header_size : int, optional Maximum allowed size of the header. Large headers may not be safe to load securely and thus require explicitly passing a larger value. See :py:func: for details. Returns ------- shape : tuple of int The shape of the array. fortran_order : bool The array data will be written out directly if it is either C-contiguous or Fortran-contiguous. Otherwise, it will be made contiguous before writing it out. dtype : dtype The dtype of the file's data. Raises ------ ValueError If the data is invalid.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_format_impl.py",
    "ast_data": "FunctionDef name:read_array_header_2_0 arg:fp arg:max_header_size arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "evaluate_copyright_placeholders",
    "source_code": "def evaluate_copyright_placeholders(_app: Sphinx, config: Config) -> None:\n    replace_yr = str(time.localtime().tm_year)\n    for k in ('copyright', 'epub_copyright'):\n        if k in config:\n            value: str | Sequence[str] = config[k]\n            if isinstance(value, str):\n                if '%Y' in value:\n                    config[k] = value.replace('%Y', replace_yr)\n            elif any(('%Y' in line for line in value)):\n                items = (line.replace('%Y', replace_yr) for line in value)\n                config[k] = type(value)(items)",
    "docstring": "Replace copyright year placeholders (%Y) with the current year.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\config.py",
    "ast_data": "FunctionDef name:evaluate_copyright_placeholders arg:_app arg:config arguments arg arg Assign Call Call For If Compare If Call If Compare Assign Call If Call Compare Assign Call Assign Call Call"
  },
  {
    "library": "numpy",
    "name": "as_array",
    "source_code": "def as_array(obj):\n    if isinstance(obj, Expr):\n        obj = (obj,)\n    return Expr(Op.ARRAY, obj)",
    "docstring": "Return object as ARRAY expression (array constant).",
    "type": "function",
    "file_path": "numpy\\numpy\\f2py\\symbolic.py",
    "ast_data": "FunctionDef name:as_array arg:obj arguments arg If Call Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "hash_tensor",
    "source_code": "def hash_tensor(t: torch.Tensor) -> torch.Tensor:\n    return t.detach().float().mean()",
    "docstring": "Some inexpensive hash. Used as a quick and dirty indicator for tensor mutation",
    "type": "function",
    "file_path": "pytorch\\torch\\_library\\utils.py",
    "ast_data": "FunctionDef name:hash_tensor arg:t arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "simplify",
    "source_code": "def simplify(self, tolerance=0.0, preserve_topology=False):\n    if preserve_topology:\n        return self._topology(capi.geos_preservesimplify(self.ptr, tolerance))\n    else:\n        return self._topology(capi.geos_simplify(self.ptr, tolerance))",
    "docstring": "Return the Geometry, simplified using the Douglas-Peucker algorithm to the specified tolerance (higher tolerance => less points). If no tolerance provided, defaults to 0. By default, don't preserve topology - e.g. polygons can be split, collapse to lines or disappear holes can be created or disappear, and lines can cross. By specifying preserve_topology=True, the result will have the same dimension and number of components as the input. This is significantly slower.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:simplify arg:self arg:tolerance arg:preserve_topology arguments arg arg arg If Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_arrowstyle",
    "source_code": "def get_arrowstyle(self):\n    return self._arrow_transmuter",
    "docstring": "Return the arrowstyle object.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_arrowstyle arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "WithData",
    "source_code": "@classmethod\ndef WithData(cls, original_exc, model, fk, field_value):\n    return cls(\"%s: (%s:pk=%s) field_value was '%s'\" % (original_exc, model, fk, field_value))",
    "docstring": "Factory method for creating a deserialization error which has a more explanatory message.",
    "type": "method",
    "file_path": "django\\django\\core\\serializers\\base.py",
    "ast_data": "FunctionDef name:WithData arg:cls arg:original_exc arg:model arg:fk arg:field_value arguments arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "move_code_under_inner_loop",
    "source_code": "def move_code_under_inner_loop(code: IndentedBuffer, iter_var: sympy.Expr, new_iter_var: str, loop_start: sympy.Expr, loop_end: sympy.Expr) -> BracesBuffer:\n    transformed_code = BracesBuffer()\n    with contextlib.ExitStack() as stack:\n        transformed_code.writeline(f'for ({INDEX_TYPE} {new_iter_var} = {cexpr_index(loop_start)};' + f'{new_iter_var} < {cexpr_index(loop_end)}; {new_iter_var}++)')\n        stack.enter_context(transformed_code.indent())\n        for _, line in enumerate(code._lines):\n            assert isinstance(line, (str, DeferredLine))\n            deferred_name = None\n            if isinstance(line, DeferredLine):\n                deferred_name = line.name\n                line = line.line\n            new_line = re.sub('\\\\b' + f'{iter_var}' + '\\\\b', f'{new_iter_var}', line)\n            if deferred_name:\n                new_line = DeferredLine(deferred_name, new_line)\n            transformed_code.writeline(new_line)\n    return transformed_code",
    "docstring": "f(iter_var) is transformed to f(new_iter_var) under the inner loop \\/ for (new_iter_var = loop_start; new_iter_var for (new_x0 = start; new_x0 < end; new_x0++){ auto tmp0 = in_ptr[new_x0]; } The tmp0 is invalid outside the loop.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp.py",
    "ast_data": "FunctionDef name:move_code_under_inner_loop arg:code arg:iter_var arg:new_iter_var arg:loop_start arg:loop_end arguments arg arg arg arg arg Assign Call With Call Call Call Call Call Call For Call Call Assign If Call Assign Assign Assign Call If Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "fulltypes_for_flat_tensors",
    "source_code": "def fulltypes_for_flat_tensors(element_spec):\n    specs = _specs_for_flat_tensors(element_spec)\n    full_types_lists = [_translate_to_fulltype_for_flat_tensors(s) for s in specs]\n    rval = nest.flatten(full_types_lists)\n    return rval",
    "docstring": "Convert the element_spec for a dataset to a list of FullType Def. Note that \"flat\" in this function and in is a nickname for the \"batchable tensor list\" encoding used by datasets and map_fn. The FullTypeDef created corresponds to this encoding (e.g. that uses variants and not the FullTypeDef corresponding to the default \"component\" encoding). This is intended for temporary internal use and expected to be removed when type inference support is sufficient. See limitations of . Args: element_spec: A nest of TypeSpec describing the elements of a dataset (or map_fn). Returns: A list of FullTypeDef corresponding to ELEMENT_SPEC. The items in this list correspond to the items in .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_utils.py",
    "ast_data": "FunctionDef name:fulltypes_for_flat_tensors arg:element_spec arguments arg Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "acorr",
    "source_code": "@_preprocess_data(replace_names=['x'], label_namer='x')\ndef acorr(self, x, **kwargs):\n    return self.xcorr(x, x, **kwargs)",
    "docstring": "Plot the autocorrelation of *x*. Parameters ---------- x : array-like Not run through Matplotlib's unit conversion, so this should be a unit-less array. detrend : callable, default: (no detrending) A detrending function applied to *x*. It must have the signature :: detrend(x: np.ndarray) -> np.ndarray normed : bool, default: True If `.Axes.vlines.Axes.axhline.Axes.plot.LineCollection.Line2D.Artist.LineCollection.Line2D~matplotlib.lines.Line2D~matplotlib.lines.Line2D.Axes.vlines.Axes.axhline.Axes.plotnumpy.correlate`.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_axes.py",
    "ast_data": "FunctionDef name:acorr arg:self arg:x arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "OffsetFrom",
    "source_code": "class OffsetFrom:\n\n    def __init__(self, artist, ref_coord, unit='points'):\n        self._artist = artist\n        x, y = ref_coord\n        self._ref_coord = (x, y)\n        self.set_unit(unit)\n\n    def set_unit(self, unit):\n        _api.check_in_list(['points', 'pixels'], unit=unit)\n        self._unit = unit\n\n    def get_unit(self):\n        return self._unit\n\n    def __call__(self, renderer):\n        if isinstance(self._artist, Artist):\n            bbox = self._artist.get_window_extent(renderer)\n            xf, yf = self._ref_coord\n            x = bbox.x0 + bbox.width * xf\n            y = bbox.y0 + bbox.height * yf\n        elif isinstance(self._artist, BboxBase):\n            bbox = self._artist\n            xf, yf = self._ref_coord\n            x = bbox.x0 + bbox.width * xf\n            y = bbox.y0 + bbox.height * yf\n        elif isinstance(self._artist, Transform):\n            x, y = self._artist.transform(self._ref_coord)\n        else:\n            _api.check_isinstance((Artist, BboxBase, Transform), artist=self._artist)\n        scale = 1 if self._unit == 'pixels' else renderer.points_to_pixels(1)\n        return Affine2D().scale(scale).translate(x, y)",
    "docstring": "Callable helper class for working with .",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "ClassDef name:OffsetFrom FunctionDef name:__init__ arg:self arg:artist arg:ref_coord arg:unit arguments arg arg arg arg Assign Assign Assign Call FunctionDef name:set_unit arg:self arg:unit arguments arg arg Call Assign FunctionDef name:get_unit arg:self arguments arg Return return:yes FunctionDef name:__call__ arg:self arg:renderer arguments arg arg If Call Assign Call Assign Assign Assign If Call Assign Assign Assign Assign If Call Assign Call Call Assign Compare Call Return return:yes Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "DocumentTargetTransform",
    "source_code": "class DocumentTargetTransform(SphinxPostTransform):\n    default_priority = 400\n    formats = ('latex',)\n\n    def run(self, **kwargs: Any) -> None:\n        for node in self.document.findall(addnodes.start_of_file):\n            section = node.next_node(nodes.section)\n            if section:\n                section['ids'].append(':doc')",
    "docstring": "Add :doc label to the first section of each document.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\builders\\latex\\transforms.py",
    "ast_data": "ClassDef name:DocumentTargetTransform Assign Assign FunctionDef name:run arg:self arguments arg arg For Call Assign Call If Call"
  },
  {
    "library": "tensorflow",
    "name": "trainable_variables",
    "source_code": "@property\ndef trainable_variables(self):\n    return tuple((v for v in self.variables if v.trainable))",
    "docstring": "A sequence of trainable variables accessed by this FuncGraph. Note that functions keep only weak references to variables. Calling the function after a variable it accesses has been deleted is an error. Returns: Sequence of trainable variables for this func graph.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\func_graph.py",
    "ast_data": "FunctionDef name:trainable_variables arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_encrypt",
    "source_code": "@staticmethod\ndef _encrypt(plaintext, key, ndiscard=4):\n    key = _api.check_getitem({'eexec': 55665, 'charstring': 4330}, key=key)\n    ciphertext = []\n    for byte in b'\\x00' * ndiscard + plaintext:\n        c = byte ^ key >> 8\n        ciphertext.append(c)\n        key = (key + c) * 52845 + 22719 & 65535\n    return bytes(ciphertext)",
    "docstring": "Encrypt plaintext using the Type-1 font algorithm. The algorithm is described in Adobe's \"Adobe Type 1 Font Format\". The key argument can be an integer, or one of the strings 'eexec' and 'charstring', which map to the key specified for the corresponding part of Type-1 fonts. The ndiscard argument should be an integer, usually 4. That number of bytes is prepended to the plaintext before encryption. This function prepends NUL bytes for reproducibility, even though the original algorithm uses random bytes, presumably to avoid cryptanalysis.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_type1font.py",
    "ast_data": "FunctionDef name:_encrypt arg:plaintext arg:key arg:ndiscard arguments arg arg arg Assign Call Assign For Assign Call Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_get_cpp_wrapper_header",
    "source_code": "def _get_cpp_wrapper_header(device: str, aot_mode: bool=False) -> str:\n    base_device = device.split(':')[0]\n    is_array_ref = config.aot_inductor.allow_stack_allocation and base_device == 'cpu'\n    return f'torch/csrc/inductor/{('aoti_include' if aot_mode else 'cpp_wrapper')}/{('array_ref' if is_array_ref else base_device)}.h'",
    "docstring": "Given a device type (and optionally whether we're in AOT Inductor mode), returns the path to the cpp_wrapper header file to be precompiled.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\codecache.py",
    "ast_data": "FunctionDef name:_get_cpp_wrapper_header arg:device arg:aot_mode arguments arg arg Assign Call Assign BoolOp Compare Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, majloc=None, minloc=None, majfmt=None, minfmt=None, label=None, default_limits=None):\n    self.majloc = majloc\n    self.minloc = minloc\n    self.majfmt = majfmt\n    self.minfmt = minfmt\n    self.label = label\n    self.default_limits = default_limits",
    "docstring": "Parameters ---------- majloc, minloc : Locator, optional Tick locators for the major and minor ticks. majfmt, minfmt : Formatter, optional Tick formatters for the major and minor ticks. label : str, optional The default axis label. default_limits : optional The default min and max limits of the axis if no data has been plotted. Notes ----- If any of the above are ``, the axis will simply use the default value.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\units.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:majloc arg:minloc arg:majfmt arg:minfmt arg:label arg:default_limits arguments arg arg arg arg arg arg arg Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "zip_schema",
    "source_code": "def zip_schema(schema: _C.FunctionSchema, args: tuple[Any, ...], kwargs: dict[str, Any]) -> Iterable[tuple[_C.Argument, Any]]:\n    assert len(schema.arguments) >= len(args) + len(kwargs)\n    for i in range(len(schema.arguments)):\n        info = schema.arguments[i]\n        if info.kwarg_only:\n            if info.name in kwargs:\n                yield (info, kwargs[info.name])\n            continue\n        if i >= len(args):\n            if not info.kwarg_only and info.name in kwargs:\n                yield (info, kwargs[info.name])\n            continue\n        yield (info, args[i])\n    return",
    "docstring": "zips schema.arguments and (args, kwargs) together. Assumes that (args, kwargs) were the inputs to some torch._ops.OpOverload: that is, (args, kwargs) must be bindable to the schema (args, kwargs).",
    "type": "function",
    "file_path": "pytorch\\torch\\_library\\utils.py",
    "ast_data": "FunctionDef name:zip_schema arg:schema arg:args arg:kwargs arguments arg arg arg Compare Call Call Call For Call Call Assign If If Compare If Compare Call If BoolOp Compare Return return:no"
  },
  {
    "library": "pytorch",
    "name": "count_tangents",
    "source_code": "def count_tangents(fx_g: torch.fx.GraphModule) -> int:\n\n    def is_saved_tensor(x: Node) -> bool:\n        return 'tangents' not in x.name and 'bwd_seed' not in x.name and ('bwd_base_offset' not in x.name) and ('bwd_rng_state' not in x.name)\n    arg_count = 0\n    static_arg_idxs = []\n    for n in fx_g.graph.nodes:\n        if n.op == 'placeholder':\n            if is_saved_tensor(n):\n                static_arg_idxs.append(arg_count)\n            arg_count += 1\n    assert static_arg_idxs == list(range(len(static_arg_idxs)))\n    return len(static_arg_idxs)",
    "docstring": "Infers which inputs are static for a backwards graph",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\utils.py",
    "ast_data": "FunctionDef name:count_tangents arg:fx_g arguments arg FunctionDef name:is_saved_tensor arg:x arguments arg Return return:yes BoolOp Compare Compare Compare Compare Assign Assign For If Compare If Call Call Compare Call Call Call Return return:yes Call"
  },
  {
    "library": "pygame",
    "name": "Video_AutoInit",
    "source_code": "def Video_AutoInit():\n    if 'Darwin' in platform.platform():\n        if os.getcwd() == '/' and len(sys.argv) > 1:\n            os.chdir(os.path.dirname(sys.argv[0]))\n    return True",
    "docstring": "Called from the base.c just before display module is initialized.",
    "type": "function",
    "file_path": "pygame\\src_py\\macosx.py",
    "ast_data": "FunctionDef name:Video_AutoInit arguments If Compare Call If BoolOp Compare Call Compare Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "placeholder",
    "source_code": "@tf_export(v1=['placeholder'])\ndef placeholder(dtype, shape=None, name=None):\n    if context.executing_eagerly():\n        raise RuntimeError('tf.placeholder() is not compatible with eager execution.')\n    return gen_array_ops.placeholder(dtype=dtype, shape=shape, name=name)",
    "docstring": "Inserts a placeholder for a tensor that will be always fed. **Important**: This tensor will produce an error if evaluated. Its value must be fed using the optional argument to , , or . For example: Args: dtype: The type of elements in the tensor to be fed. shape: The shape of the tensor to be fed (optional). If the shape is not specified, you can feed a tensor of any shape. name: A name for the operation (optional). Returns: A that may be used as a handle for feeding a value, but not evaluated directly. Raises: RuntimeError: if eager execution is enabled @compatibility(TF2) This API is not compatible with eager execution and . To migrate to TF2, rewrite the code to be compatible with eager execution. Check the [migration guide]( on replacing calls. In TF2, you can just pass tensors directly into ops and layers. If you want to explicitly set up your inputs, also see [Keras functional API]( on how to use to replace . arguments also do the job of . For more details please read [Better performance with tf.function]( @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:placeholder arg:dtype arg:shape arg:name arguments arg arg arg If Call Raise Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "has_static_value",
    "source_code": "def has_static_value(a: Union[SymBool, SymFloat, SymInt, bool, float, int]) -> bool:\n    assert isinstance(a, BoolLike + FloatLike + IntLike)\n    if isinstance(a, BoolLike) and is_concrete_bool(a) or (isinstance(a, FloatLike) and is_concrete_float(a)) or (isinstance(a, IntLike) and is_concrete_int(a)):\n        return True\n    assert isinstance(a, py_sym_types)\n    return a.node.shape_env.bound_sympy(a.node.expr).is_singleton()",
    "docstring": "User-code friendly utility to check if a value is static or dynamic. Returns true if given a constant, or a symbolic expression with a fixed value. Args: a (Union[SymBool, SymFloat, SymInt, bool, float, int]): Object to test",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:has_static_value arg:a arguments arg Call If BoolOp BoolOp Call Call BoolOp Call Call BoolOp Call Call Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "caching_allocator_alloc",
    "source_code": "def caching_allocator_alloc(size, device: 'Device'=None, stream=None):\n    if device is None:\n        device = torch.cuda.current_device()\n    device = _get_device_index(device)\n    if stream is None:\n        stream = torch.cuda.current_stream(device)\n    if isinstance(stream, torch.cuda.streams.Stream):\n        stream = stream.cuda_stream\n    if not isinstance(stream, int):\n        raise TypeError('Invalid type for stream argument, must be `torch.cuda.Stream` or `int` representing a pointer to a existing stream')\n    with torch.cuda.device(device):\n        return torch._C._cuda_cudaCachingAllocator_raw_alloc(size, stream)",
    "docstring": "Perform a memory allocation using the CUDA memory allocator. Memory is allocated for a given device and a stream, this function is intended to be used for interoperability with other frameworks. Allocated memory is released through :func:. Args: size (int): number of bytes to be allocated. device (torch.device or int, optional): selected device. If it is `cuda-memory-management` for more details about GPU memory management.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\memory.py",
    "ast_data": "FunctionDef name:caching_allocator_alloc arg:size arg:device arg:stream arguments arg arg arg If Compare Assign Call Assign Call If Compare Assign Call If Call Assign If Call Raise Call With Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "density",
    "source_code": "@property\ndef density(self) -> float:\n    return self.sp_index.npoints / self.sp_index.length",
    "docstring": "The percent of non- `` points, as decimal. See Also -------- DataFrame.sparse.from_spmatrix : Create a new DataFrame from a scipy sparse matrix. Examples -------- >>> from pandas.arrays import SparseArray >>> s = SparseArray([0, 0, 1, 1, 1], fill_value=0) >>> s.density 0.6",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\sparse\\array.py",
    "ast_data": "FunctionDef name:density arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "PCABenchmark",
    "source_code": "class PCABenchmark(Transformer, Estimator, Benchmark):\n    param_names = ['svd_solver']\n    params = (['full', 'arpack', 'randomized'],)\n\n    def setup_cache(self):\n        super().setup_cache()\n\n    def make_data(self, params):\n        return _mnist_dataset()\n\n    def make_estimator(self, params):\n        svd_solver, = params\n        estimator = PCA(n_components=32, svd_solver=svd_solver, random_state=0)\n        return estimator\n\n    def make_scorers(self):\n        make_pca_scorers(self)",
    "docstring": "Benchmarks for PCA.",
    "type": "class",
    "file_path": "scikit-learn\\asv_benchmarks\\benchmarks\\decomposition.py",
    "ast_data": "ClassDef name:PCABenchmark Assign Assign FunctionDef name:setup_cache arg:self arguments arg Call Call FunctionDef name:make_data arg:self arg:params arguments arg arg Return return:yes Call FunctionDef name:make_estimator arg:self arg:params arguments arg arg Assign Assign Call Return return:yes FunctionDef name:make_scorers arg:self arguments arg Call"
  },
  {
    "library": "django",
    "name": "set_autocommit",
    "source_code": "def set_autocommit(autocommit, using=None):\n    return get_connection(using).set_autocommit(autocommit)",
    "docstring": "Set the autocommit status of the connection.",
    "type": "function",
    "file_path": "django\\django\\db\\transaction.py",
    "ast_data": "FunctionDef name:set_autocommit arg:autocommit arg:using arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "checkpointer",
    "source_code": "def checkpointer(self):\n    if self._checkpoint is None:\n        self._checkpoint = self._checkpointer_impl(**self._checkpoint_items)\n    return self._checkpoint",
    "docstring": "Gets or creates the underlying Checkpoint instance.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\async_checkpoint_helper.py",
    "ast_data": "FunctionDef name:checkpointer arg:self arguments arg If Compare Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "__call__",
    "source_code": "def __call__(self, row, col, *, seed=None):\n    return random_table_frozen(row, col, seed=seed)",
    "docstring": "Create a frozen distribution of tables with given marginals. See for more information.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:row arg:col arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "compression_stats",
    "source_code": "def compression_stats(self):\n    compress_rate = self.total_numel_before_compression / self.total_numel_after_compression if self.total_numel_after_compression > 0 else 0\n    return (compress_rate, self.total_numel_before_compression, self.total_numel_after_compression)",
    "docstring": "Return latest compression statistics as tuple. Returns tuple of form (compress_rate, numel_before_compression, numel_after_compression) where: compress_rate is the effective compression rate i.e. (number of elements before compression) / (number of elements after compression); numel_before_compression is the total number of elements before compression was applied; and, numel_after_compression is the total number of elements after compression was applied.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\ddp_comm_hooks\\powerSGD_hook.py",
    "ast_data": "FunctionDef name:compression_stats arg:self arguments arg Assign Compare Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "maybe_remap_node_to_shadow",
    "source_code": "def maybe_remap_node_to_shadow(node):\n    if not isinstance(node, Node):\n        return node\n    if node.op in ('placeholder', 'get_attr'):\n        return node\n    prev_subgraph = _get_subgraph_containing_node(node, subgraphs_dedup)\n    if prev_subgraph is None:\n        prev_subgraph = [node]\n    prev_first_node = prev_subgraph[0]\n    prev_shadow_output = orig_first_node_to_shadow_out_node[prev_first_node]\n    return prev_shadow_output",
    "docstring": "If unshadowed has a shadow version, return that. If not, return .",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\ns\\fx\\n_shadows_utils.py",
    "ast_data": "FunctionDef name:maybe_remap_node_to_shadow arg:node arguments arg If Call Return return:yes If Compare Return return:yes Assign Call If Compare Assign Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, learning_rate, initial_accumulator_value=0.1, use_locking=False, name='Adagrad'):\n    if initial_accumulator_value <= 0.0:\n        raise ValueError('initial_accumulator_value must be positive: %s' % initial_accumulator_value)\n    super(AdagradOptimizer, self).__init__(use_locking, name)\n    self._learning_rate = learning_rate\n    self._initial_accumulator_value = initial_accumulator_value\n    self._learning_rate_tensor = None",
    "docstring": "Construct a new Adagrad optimizer. Args: learning_rate: A or a floating point value. The learning rate. initial_accumulator_value: A floating point value. Starting value for the accumulators, must be positive. use_locking: If use locks for update operations. name: Optional name prefix for the operations created when applying gradients. Defaults to \"Adagrad\". Raises: ValueError: If the is invalid.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\adagrad.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:learning_rate arg:initial_accumulator_value arg:use_locking arg:name arguments arg arg arg arg arg If Compare Raise Call Call Call Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_sequence_like",
    "source_code": "@tf_export('__internal__.nest.sequence_like', v1=[])\ndef _sequence_like(instance, args):\n    return nest_util.sequence_like(instance, args)",
    "docstring": "Converts the sequence to the same type as . Args: instance: an instance of , , , , , or or . args: items to be converted to the type. Returns: with the type of .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\nest.py",
    "ast_data": "FunctionDef name:_sequence_like arg:instance arg:args arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "get_form_kwargs",
    "source_code": "def get_form_kwargs(self, index):\n    return self.form_kwargs.copy()",
    "docstring": "Return additional keyword arguments for each individual formset form. index will be None if the form being constructed is a new empty form.",
    "type": "method",
    "file_path": "django\\django\\forms\\formsets.py",
    "ast_data": "FunctionDef name:get_form_kwargs arg:self arg:index arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "InterpreterWithCustomOps",
    "source_code": "class InterpreterWithCustomOps(Interpreter):\n\n    def __init__(self, custom_op_registerers=None, **kwargs):\n        self._custom_op_registerers = custom_op_registerers or []\n        super(InterpreterWithCustomOps, self).__init__(**kwargs)",
    "docstring": "Interpreter interface for TensorFlow Lite Models that accepts custom ops. The interface provided by this class is experimental and therefore not exposed as part of the public API. Wraps the tf.lite.Interpreter class and adds the ability to load custom ops by providing the names of functions that take a pointer to a BuiltinOpResolver and add a custom op.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\interpreter.py",
    "ast_data": "ClassDef name:InterpreterWithCustomOps FunctionDef name:__init__ arg:self arg:custom_op_registerers arguments arg arg arg Assign BoolOp Call Call"
  },
  {
    "library": "tensorflow",
    "name": "batch_slice",
    "source_code": "def batch_slice(linop, params_overrides, slices):\n    if not isinstance(slices, collections.abc.Sequence):\n        slices = (slices,)\n    if len(slices) == 1 and slices[0] is Ellipsis:\n        override_dict = {}\n    else:\n        batch_shape = linop.batch_shape_tensor()\n        override_dict = {}\n        for param_name, param_ndims_to_matrix_ndims in linop._experimental_parameter_ndims_to_matrix_ndims.items():\n            param = getattr(linop, param_name)\n            if param is not None:\n                override_dict[param_name] = nest.map_structure_up_to(param, functools.partial(_slice_single_param, slices=slices, batch_shape=batch_shape), param, param_ndims_to_matrix_ndims)\n    override_dict.update(params_overrides)\n    parameters = dict(linop.parameters, **override_dict)\n    return type(linop)(**parameters)",
    "docstring": "Slices along its batch dimensions. Args: linop: A instance. params_overrides: A of parameter overrides. slices: A or or or or thereof. (e.g. the argument of a method). Returns: new_linop: A batch-sliced .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\slicing.py",
    "ast_data": "FunctionDef name:batch_slice arg:linop arg:params_overrides arg:slices arguments arg arg arg If Call Assign If BoolOp Compare Call Compare Assign Assign Call Assign For Call Assign Call If Compare Assign Call Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "tensor_shape_from_node_def_name",
    "source_code": "@deprecation.deprecated(date=None, instructions=_DEPRECATION_MSG)\n@tf_export(v1=['graph_util.tensor_shape_from_node_def_name'])\ndef tensor_shape_from_node_def_name(graph, input_name):\n    if ':' not in input_name:\n        canonical_name = input_name + ':0'\n    else:\n        canonical_name = input_name\n    tensor = graph.get_tensor_by_name(canonical_name)\n    shape = tensor.get_shape()\n    return shape",
    "docstring": "Convenience function to get a shape from a NodeDef's input string.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\graph_util_impl.py",
    "ast_data": "FunctionDef name:tensor_shape_from_node_def_name arg:graph arg:input_name arguments arg arg If Compare Assign Assign Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "run",
    "source_code": "def run(self):\n    self.update()\n    if not torch.jit.is_scripting() and self.tracker is not None:\n        self.call_tracker()\n    while not self.stop_iteration():\n        self.update()\n        if not torch.jit.is_scripting() and self.tracker is not None:\n            self.call_tracker()",
    "docstring": "Run LOBPCG iterations. Use this method as a template for implementing LOBPCG iteration scheme with custom tracker that is compatible with TorchScript.",
    "type": "method",
    "file_path": "pytorch\\torch\\_lobpcg.py",
    "ast_data": "FunctionDef name:run arg:self arguments arg Call If BoolOp Call Compare Call While Call Call If BoolOp Call Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "from_session",
    "source_code": "@classmethod\n@_deprecation.deprecated(None, 'Use `lite.TFLiteConverter.from_session` instead.')\ndef from_session(cls, sess, input_tensors, output_tensors):\n    return TFLiteConverter.from_session(sess, input_tensors, output_tensors)",
    "docstring": "Creates a TocoConverter class from a TensorFlow Session.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "FunctionDef name:from_session arg:cls arg:sess arg:input_tensors arg:output_tensors arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_file_writer",
    "source_code": "def _get_file_writer(self):\n    if self.all_writers is None or self.file_writer is None:\n        self.file_writer = FileWriter(self.log_dir, self.max_queue, self.flush_secs, self.filename_suffix)\n        self.all_writers = {self.file_writer.get_logdir(): self.file_writer}\n        if self.purge_step is not None:\n            most_recent_step = self.purge_step\n            self.file_writer.add_event(Event(step=most_recent_step, file_version='brain.Event:2'))\n            self.file_writer.add_event(Event(step=most_recent_step, session_log=SessionLog(status=SessionLog.START)))\n            self.purge_step = None\n    return self.file_writer",
    "docstring": "Return the default FileWriter instance. Recreates it if closed.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\tensorboard\\writer.py",
    "ast_data": "FunctionDef name:_get_file_writer arg:self arguments arg If BoolOp Compare Compare Assign Call Assign Call If Compare Assign Call Call Call Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "gradient_tensor",
    "source_code": "def gradient_tensor(self, x_tensor):\n    x_tensor_name = self._get_tensor_name(x_tensor)\n    if x_tensor_name not in self._gradient_tensors:\n        raise LookupError('This GradientsDebugger has not received any gradient tensor for x-tensor %s' % x_tensor_name)\n    return self._gradient_tensors[x_tensor_name]",
    "docstring": "Get the gradient tensor of an x-tensor. Args: x_tensor: (, or ) The x-tensor object or its name. x-tensor refers to the independent , i.e., the tensor on the denominator of the differentiation. Returns: If found, the gradient tensor. Raises: TypeError: If is not a , or . LookupError: If the has not been registered with a gradient tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_gradients.py",
    "ast_data": "FunctionDef name:gradient_tensor arg:self arg:x_tensor arguments arg arg Assign Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_linewidth",
    "source_code": "def set_linewidth(self, linewidth):\n    self.patch.set_linewidth(linewidth)",
    "docstring": "Set the line width of the Figure rectangle. Parameters ---------- linewidth : number",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:set_linewidth arg:self arg:linewidth arguments arg arg Call"
  },
  {
    "library": "django",
    "name": "N",
    "source_code": "def N(self):\n    return MONTHS_AP[self.data.month]",
    "docstring": "Month abbreviation in Associated Press style. Proprietary extension.",
    "type": "method",
    "file_path": "django\\django\\utils\\dateformat.py",
    "ast_data": "FunctionDef name:N arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "hat",
    "source_code": "@staticmethod\ndef hat(v: Tensor) -> Tensor:\n    upsilon, omega = (v[..., :3], v[..., 3:])\n    rt = concatenate((So3.hat(omega), upsilon[..., None]), -1)\n    return pad(rt, (0, 0, 0, 1))",
    "docstring": "Convert elements from vector space to lie algebra. Args: v: vector of shape :math:. Returns: matrix of shape :math:. Example: >>> v = torch.ones((1, 6)) >>> m = Se3.hat(v) >>> m tensor([[[ 0., -1., 1., 1.], [ 1., 0., -1., 1.], [-1., 1., 0., 1.], [ 0., 0., 0., 0.]]])",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\se3.py",
    "ast_data": "FunctionDef name:hat arg:v arguments arg Assign Assign Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_linewidth",
    "source_code": "def set_linewidth(self, w):\n    w = mpl._val_or_rc(w, 'patch.linewidth')\n    self._linewidth = float(w)\n    self._dash_pattern = mlines._scale_dashes(*self._unscaled_dash_pattern, w)\n    self.stale = True",
    "docstring": "Set the patch linewidth in points. Parameters ---------- w : float or None",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:set_linewidth arg:self arg:w arguments arg arg Assign Call Assign Call Assign Call Assign"
  },
  {
    "library": "pytorch",
    "name": "add_to_set",
    "source_code": "def add_to_set(self, metric: str, value: Any) -> None:\n    if self._level == 0:\n        raise RuntimeError(f'Cannot add {metric} outside of a MetricsContext')\n    if metric not in self._metrics:\n        self._metrics[metric] = set()\n    self._metrics[metric].add(value)",
    "docstring": "Records a metric as a set() of values.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\metrics_context.py",
    "ast_data": "FunctionDef name:add_to_set arg:self arg:metric arg:value arguments arg arg arg If Compare Raise Call If Compare Assign Call Call"
  },
  {
    "library": "matplotlib",
    "name": "update_positions",
    "source_code": "def update_positions(self, renderer):\n    ox0, oy0 = self._get_xy(renderer, self.xybox, self.boxcoords)\n    bbox = self.offsetbox.get_bbox(renderer)\n    fw, fh = self._box_alignment\n    self.offsetbox.set_offset((ox0 - fw * bbox.width - bbox.x0, oy0 - fh * bbox.height - bbox.y0))\n    bbox = self.offsetbox.get_window_extent(renderer)\n    self.patch.set_bounds(bbox.bounds)\n    mutation_scale = renderer.points_to_pixels(self.get_fontsize())\n    self.patch.set_mutation_scale(mutation_scale)\n    if self.arrowprops:\n        arrow_begin = bbox.p0 + bbox.size * self._arrow_relpos\n        arrow_end = self._get_position_xy(renderer)\n        self.arrow_patch.set_positions(arrow_begin, arrow_end)\n        if 'mutation_scale' in self.arrowprops:\n            mutation_scale = renderer.points_to_pixels(self.arrowprops['mutation_scale'])\n        self.arrow_patch.set_mutation_scale(mutation_scale)\n        patchA = self.arrowprops.get('patchA', self.patch)\n        self.arrow_patch.set_patchA(patchA)",
    "docstring": "Update pixel positions for the annotated point, the text, and the arrow.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py",
    "ast_data": "FunctionDef name:update_positions arg:self arg:renderer arguments arg arg Assign Call Assign Call Assign Call Assign Call Call Assign Call Call Call If Assign Assign Call Call If Compare Assign Call Call Assign Call Call"
  },
  {
    "library": "kornia",
    "name": "_save_outputs",
    "source_code": "def _save_outputs(self, outputs: Union[Tensor, List[Tensor]], directory: Optional[str]=None, suffix: str='') -> None:\n    if directory is None:\n        name = f'{self.name}_{datetime.datetime.now(tz=datetime.timezone.utc).strftime('%Y%m%d%H%M%S')!s}'\n        directory = os.path.join('kornia_outputs', name)\n    os.makedirs(directory, exist_ok=True)\n    for i, out_image in enumerate(outputs):\n        write_image(os.path.join(directory, f'{str(i).zfill(6)}{suffix}.jpg'), out_image.mul(255.0).byte())\n    logger.info(f'Outputs are saved in {directory}')",
    "docstring": "Save the output image(s) to a directory. Args: outputs: output tensor. directory: directory to save the images. suffix: filename suffix.",
    "type": "method",
    "file_path": "kornia\\kornia\\models\\base.py",
    "ast_data": "FunctionDef name:_save_outputs arg:self arg:outputs arg:directory arg:suffix arguments arg arg arg arg If Compare Assign Call Call Assign Call Call For Call Call Call Call Call Call Call Call"
  },
  {
    "library": "django",
    "name": "strip_spaces_between_tags",
    "source_code": "@keep_lazy_text\ndef strip_spaces_between_tags(value):\n    return re.sub('>\\\\s+<', '><', str(value))",
    "docstring": "Return the given HTML with spaces between tags removed.",
    "type": "function",
    "file_path": "django\\django\\utils\\html.py",
    "ast_data": "FunctionDef name:strip_spaces_between_tags arg:value arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_breadth_first_traversal",
    "source_code": "def _breadth_first_traversal(self):\n    return super(ObjectGraphView, self)._descendants_with_paths()",
    "docstring": "Find shortest paths to all dependencies of self.root.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\graph_view.py",
    "ast_data": "FunctionDef name:_breadth_first_traversal arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "__repr__",
    "source_code": "def __repr__(self) -> str:\n    self.infer_axes()\n    jdc = ','.join(self.data_columns) if len(self.data_columns) else ''\n    dc = f',dc->[{jdc}]'\n    ver = ''\n    if self.is_old_version:\n        jver = '.'.join([str(x) for x in self.version])\n        ver = f'[{jver}]'\n    jindex_axes = ','.join([a.name for a in self.index_axes])\n    return f'{self.pandas_type:12.12}{ver} (typ->{self.table_type_short},nrows->{self.nrows},ncols->{self.ncols},indexers->[{jindex_axes}]{dc})'",
    "docstring": "return a pretty representation of myself",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Call Assign Call Call Assign Assign If Assign Call Call Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "dequeue",
    "source_code": "def dequeue(self, name: Optional[Text]=None):\n    if not self._using_tpu:\n        raise RuntimeError('dequeue is not valid when TPUEmbedding object is not created under a TPUStrategy.')\n    if not self._built:\n        raise RuntimeError('dequeue called on unbuilt TPUEmbedding object. Please either call enqueue first or manually call the build method.')\n    activations = tpu_ops.recv_tpu_embedding_activations(num_outputs=len(self._config_proto.feature_descriptor), config=self._config_proto.SerializeToString())\n    if name is not None:\n        _add_key_attr(activations[0].op, name)\n    return nest.pack_sequence_as(self._feature_config, activations)",
    "docstring": "Get the embedding results. Returns a nested structure of objects, matching the structure of the argument to the class. The output shape of the tensors is , is the dimension of the corresponding . For output_shape, there are three places where it can be set. 1. FeatureConfig provided in the __init__ function. 2. Per_replica_output_shapes by directly calling the build method after initializing the tpu embedding class. 3. Auto detected from the shapes of the input feature. The priority of these places is the exact same order. Args: name: A name for the underlying op. Returns: A nested structure of tensors, with the same structure as passed to this instance of the object. Raises: RuntimeError: If called when object wasn't created under a or if not built (either by manually calling build or calling enqueue).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2.py",
    "ast_data": "FunctionDef name:dequeue arg:self arg:name arguments arg arg If Raise Call If Raise Call Assign Call Call Call If Compare Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "clear",
    "source_code": "def clear(self, name=None):\n    if name is None:\n        name = '%s_clear' % self._name\n    return gen_data_flow_ops.stage_clear(name=name, shared_name=self._name, dtypes=self._dtypes, capacity=self._capacity, memory_limit=self._memory_limit)",
    "docstring": "Clears the staging area. Args: name: A name for the operation (optional) Returns: The created op",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:clear arg:self arg:name arguments arg arg If Compare Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_zoom_data_limits",
    "source_code": "def _zoom_data_limits(self, scale_u, scale_v, scale_w):\n    scale = np.array([scale_u, scale_v, scale_w])\n    if not np.allclose(scale, scale_u):\n        R = np.array([self._view_u, self._view_v, self._view_w])\n        S = scale * np.eye(3)\n        scale = np.linalg.norm(R.T @ S, axis=1)\n        if self._aspect in ('equal', 'equalxy', 'equalxz', 'equalyz'):\n            ax_idxs = self._equal_aspect_axis_indices(self._aspect)\n            min_ax_idxs = np.argmin(np.abs(scale[ax_idxs] - 1))\n            scale[ax_idxs] = scale[ax_idxs][min_ax_idxs]\n    self._scale_axis_limits(scale[0], scale[1], scale[2])",
    "docstring": "Zoom in or out of a 3D plot. Will scale the data limits by the scale factors. These will be transformed to the x, y, z data axes based on the current view angles. A scale factor > 1 zooms out and a scale factor < 1 zooms in. For an Axes that has had its aspect ratio set to 'equal', 'equalxy', 'equalyz', or 'equalxz', the relevant axes are constrained to zoom equally. Parameters ---------- scale_u : float Scale factor for the u view axis (view screen horizontal). scale_v : float Scale factor for the v view axis (view screen vertical). scale_w : float Scale factor for the w view axis (view screen depth).",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:_zoom_data_limits arg:self arg:scale_u arg:scale_v arg:scale_w arguments arg arg arg arg Assign Call If Call Assign Call Assign Call Assign Call If Compare Assign Call Assign Call Call Assign Call"
  },
  {
    "library": "pandas",
    "name": "get_data_from_filepath",
    "source_code": "def get_data_from_filepath(filepath_or_buffer: FilePath | bytes | ReadBuffer[bytes] | ReadBuffer[str], encoding: str | None, compression: CompressionOptions, storage_options: StorageOptions):\n    filepath_or_buffer = stringify_path(filepath_or_buffer)\n    with get_handle(filepath_or_buffer, 'r', encoding=encoding, compression=compression, storage_options=storage_options) as handle_obj:\n        return preprocess_data(handle_obj.handle.read()) if hasattr(handle_obj.handle, 'read') else handle_obj.handle",
    "docstring": "Extract raw XML data. The method accepts two input types: 1. filepath (string-like) 2. file-like object (e.g. open file object, StringIO)",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\xml.py",
    "ast_data": "FunctionDef name:get_data_from_filepath arg:filepath_or_buffer arg:encoding arg:compression arg:storage_options arguments arg arg arg arg Assign Call With Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "cached",
    "source_code": "@contextmanager\ndef cached():\n    global _cache\n    global _cache_enabled\n    _cache_enabled += 1\n    try:\n        yield\n    finally:\n        _cache_enabled -= 1\n        if not _cache_enabled:\n            _cache = {}",
    "docstring": "Context manager that enables the caching system within parametrizations registered with :func:. The value of the parametrized objects is computed and cached the first time they are required when this context manager is active. The cached values are discarded when leaving the context manager. This is useful when using a parametrized parameter more than once in the forward pass. An example of this is when parametrizing the recurrent kernel of an RNN or when sharing weights. The simplest way to activate the cache is by wrapping the forward pass of the neural network .. code-block:: python import torch.nn.utils.parametrize as P ... with P.cached(): output = model(inputs) in training and evaluation. One may also wrap the parts of the modules that use several times the parametrized tensors. For example, the loop of an RNN with a parametrized recurrent kernel: .. code-block:: python with P.cached(): for x in xs: out_rnn = self.rnn_cell(x, out_rnn)",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\utils\\parametrize.py",
    "ast_data": "FunctionDef name:cached arguments Try If Assign"
  },
  {
    "library": "pytorch",
    "name": "_reduce_symint",
    "source_code": "def _reduce_symint(self, s: SymInt) -> tuple[Callable[[T], T], tuple[str]]:\n    return (_ident, (str(s),))",
    "docstring": "Custom reducer to pickle SymInts.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codecache.py",
    "ast_data": "FunctionDef name:_reduce_symint arg:self arg:s arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, dataset_fn, coordinator):\n\n    def disallow_variable_creation(next_creator, **kwargs):\n        raise ValueError('Creating variables in `dataset_fn` is not allowed.')\n    if isinstance(dataset_fn, def_function.Function):\n        with variable_scope.variable_creator_scope(disallow_variable_creation):\n            dataset_fn = dataset_fn.get_concrete_function()\n    elif not isinstance(dataset_fn, tf_function.ConcreteFunction):\n        with variable_scope.variable_creator_scope(disallow_variable_creation):\n            dataset_fn = def_function.function(dataset_fn).get_concrete_function()\n    self._dataset_fn = dataset_fn\n    self._coordinator = coordinator\n    self._element_spec = None",
    "docstring": "Makes an iterable from datasets created by the given function. Args: dataset_fn: A function that returns a . coordinator: a object, used to create dataset resources.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\values.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:dataset_fn arg:coordinator arguments arg arg arg FunctionDef name:disallow_variable_creation arg:next_creator arguments arg arg Raise Call If Call With Call Assign Call If Call With Call Assign Call Call Assign Assign Assign"
  },
  {
    "library": "authlib",
    "name": "revoke_old_credential",
    "source_code": "def revoke_old_credential(self, refresh_token):\n    raise NotImplementedError()",
    "docstring": "The authorization server MAY revoke the old refresh token after issuing a new refresh token to the client. Developers MUST implement this method in subclass:: def revoke_old_credential(self, refresh_token): credential.revoked = True credential.save() :param refresh_token: Token object",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\grants\\refresh_token.py",
    "ast_data": "FunctionDef name:revoke_old_credential arg:self arg:refresh_token arguments arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_UnsortedSegmentMinGrad",
    "source_code": "@ops.RegisterGradient('UnsortedSegmentMin')\ndef _UnsortedSegmentMinGrad(op: ops.Operation, grad):\n    return _UnsortedSegmentMinOrMaxGrad(op, grad)",
    "docstring": "Gradient for UnsortedSegmentMin.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_UnsortedSegmentMinGrad arg:op arg:grad arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "exp",
    "source_code": "@staticmethod\ndef exp(v: Tensor) -> So3:\n    theta = batched_dot_product(v, v).sqrt()[..., None]\n    theta_nonzeros = theta != 0.0\n    theta_half = 0.5 * theta\n    w = where(theta_nonzeros, theta_half.cos(), tensor(1.0, device=v.device, dtype=v.dtype))\n    b = where(theta_nonzeros, theta_half.sin() / theta, tensor(0.0, device=v.device, dtype=v.dtype))\n    xyz = b * v\n    return So3(Quaternion(concatenate((w, xyz), -1)))",
    "docstring": "Convert elements of lie algebra to elements of lie group. See more: Args: v: vector of shape :math:. Example: >>> v = torch.zeros((2, 3)) >>> s = So3.exp(v) >>> s Parameter containing: tensor([[1., 0., 0., 0.], [1., 0., 0., 0.]], requires_grad=True)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\so3.py",
    "ast_data": "FunctionDef name:exp arg:v arguments arg Assign Call Call Assign Compare Assign Assign Call Call Call Assign Call Call Call Assign Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "end_transaction_sql",
    "source_code": "def end_transaction_sql(self, success=True):\n    if not success:\n        return 'ROLLBACK;'\n    return 'COMMIT;'",
    "docstring": "Return the SQL statement required to end a transaction.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:end_transaction_sql arg:self arg:success arguments arg arg If Return return:yes Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_penalized_nlpsf",
    "source_code": "def _penalized_nlpsf(self, theta, x):\n    loc, scale, args = self._unpack_loc_scale(theta)\n    if not self._argcheck(*args) or scale <= 0:\n        return inf\n    x = (np.sort(x) - loc) / scale\n\n    def log_psf(x, *args):\n        x, lj = np.unique(x, return_counts=True)\n        cdf_data = self._cdf(x, *args) if x.size else []\n        if not (x.size and 1 - cdf_data[-1] <= 0):\n            cdf = np.concatenate(([0], cdf_data, [1]))\n            lj = np.concatenate((lj, [1]))\n        else:\n            cdf = np.concatenate(([0], cdf_data))\n        return lj * np.log(np.diff(cdf) / lj)\n    return self._nlff_and_penalty(x, args, log_psf)",
    "docstring": "Penalized negative log product spacing function. i.e., - sum (log (diff (cdf (x, theta))), axis=0) + penalty where theta are the parameters (including loc and scale) Follows reference [1] of scipy.stats.fit",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:_penalized_nlpsf arg:self arg:theta arg:x arguments arg arg arg Assign Call If BoolOp Call Compare Return return:yes Assign Call FunctionDef name:log_psf arg:x arguments arg arg Assign Call Assign Call If BoolOp Compare Assign Call Assign Call Assign Call Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "reset_format_cache",
    "source_code": "def reset_format_cache():\n    global _format_cache, _format_modules_cache\n    _format_cache = {}\n    _format_modules_cache = {}",
    "docstring": "Clear any cached formats. This method is provided primarily for testing purposes, so that the effects of cached formats can be removed.",
    "type": "function",
    "file_path": "django\\django\\utils\\formats.py",
    "ast_data": "FunctionDef name:reset_format_cache arguments Assign Assign"
  },
  {
    "library": "kornia",
    "name": "intersect",
    "source_code": "def intersect(self, plane: Hyperplane, eps: float=1e-06) -> Tuple[Tensor, Tensor]:\n    dot_prod = batched_dot_product(plane.normal.data, self.direction.data)\n    dot_prod_mask = dot_prod.abs() >= eps\n    res_lambda = where(dot_prod_mask, -(plane.offset + batched_dot_product(plane.normal.data, self.origin.data)) / dot_prod, torch.empty_like(dot_prod))\n    res_point = self.point_at(res_lambda)\n    return (res_lambda, res_point)",
    "docstring": "Return the intersection point between the line and a given plane. Args: plane: the plane to compute the intersection point. eps: epsilon for numerical stability. Return: - the lambda value used to compute the look at point. - the intersected point.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\line.py",
    "ast_data": "FunctionDef name:intersect arg:self arg:plane arg:eps arguments arg arg arg Assign Call Assign Compare Call Assign Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "eta",
    "source_code": "def eta(lam):\n    if lam > 0:\n        return mp.sqrt(2 * (lam - mp.log(lam + 1)))\n    elif lam < 0:\n        return -mp.sqrt(2 * (lam - mp.log(lam + 1)))\n    else:\n        return 0",
    "docstring": "Function from DLMF 8.12.1 shifted to be centered at 0.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_precompute\\gammainc_asy.py",
    "ast_data": "FunctionDef name:eta arg:lam arguments arg If Compare Return return:yes Call Call If Compare Return return:yes Call Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "invisible_visit",
    "source_code": "def invisible_visit(self, node: Node) -> None:\n    pass",
    "docstring": "Invisible nodes should be ignored.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\transforms\\compact_bullet_list.py",
    "ast_data": "FunctionDef name:invisible_visit arg:self arg:node arguments arg arg"
  },
  {
    "library": "scikit-learn",
    "name": "decision_function",
    "source_code": "def decision_function(self, X):\n    dec = self._decision_function(X).ravel()\n    return dec",
    "docstring": "Signed distance to the separating hyperplane. Signed distance is positive for an inlier and negative for an outlier. Parameters ---------- X : array-like of shape (n_samples, n_features) The data matrix. Returns ------- dec : ndarray of shape (n_samples,) Returns the decision function of the samples.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\svm\\_classes.py",
    "ast_data": "FunctionDef name:decision_function arg:self arg:X arguments arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "get_feature_names_out",
    "source_code": "@available_if(lambda self: self.feature_names_out is not None)\ndef get_feature_names_out(self, input_features=None):\n    if hasattr(self, 'n_features_in_') or input_features is not None:\n        input_features = _check_feature_names_in(self, input_features)\n    if self.feature_names_out == 'one-to-one':\n        names_out = input_features\n    elif callable(self.feature_names_out):\n        names_out = self.feature_names_out(self, input_features)\n    else:\n        raise ValueError(f'feature_names_out={self.feature_names_out!r} is invalid. It must either be \"one-to-one\" or a callable with two arguments: the function transformer and an array-like of input feature names. The callable must return an array-like of output feature names.')\n    return np.asarray(names_out, dtype=object)",
    "docstring": "Get output feature names for transformation. This method is only defined if is not None. Parameters ---------- input_features : array-like of str or None, default=None Input feature names. - If is None, then is used as the input feature names. If is not defined, then names are generated: . - If is array-like, then must match if is defined. Returns ------- feature_names_out : ndarray of str objects Transformed feature names. - If is 'one-to-one', the input feature names are returned (see above). This requires and/or to be defined, which is done automatically if . Alternatively, you can set them in . - If is a callable, then it is called with two arguments, and , and its return value is returned by this method.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_function_transformer.py",
    "ast_data": "FunctionDef name:get_feature_names_out arg:self arg:input_features arguments arg arg If BoolOp Call Compare Assign Call If Compare Assign If Call Assign Call Raise Call Return return:yes Call Call arguments arg Compare"
  },
  {
    "library": "cherrypy",
    "name": "NotFound",
    "source_code": "class NotFound(HTTPError):\n\n    def __init__(self, path=None):\n        if path is None:\n            request = cherrypy.serving.request\n            path = request.script_name + request.path_info\n        self.args = (path,)\n        HTTPError.__init__(self, 404, \"The path '%s' was not found.\" % path)",
    "docstring": "Exception raised when a URL could not be mapped to any handler (404). This is equivalent to raising :class:.",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\_cperror.py",
    "ast_data": "ClassDef name:NotFound FunctionDef name:__init__ arg:self arg:path arguments arg arg If Compare Assign Assign Assign Call"
  },
  {
    "library": "pytorch",
    "name": "get_fuser_method",
    "source_code": "def get_fuser_method(op_list, additional_fuser_method_mapping=None):\n    if additional_fuser_method_mapping is None:\n        additional_fuser_method_mapping = {}\n    all_mappings = get_combined_dict(_DEFAULT_OP_LIST_TO_FUSER_METHOD, additional_fuser_method_mapping)\n    fuser_method = all_mappings.get(op_list, None)\n    assert fuser_method is not None, f'did not find fuser method for: {op_list} '\n    return fuser_method",
    "docstring": "Get fuser method for the given list of module types. Get fuser method for the given list of module types, return None if fuser method does not exist",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fuser_method_mappings.py",
    "ast_data": "FunctionDef name:get_fuser_method arg:op_list arg:additional_fuser_method_mapping arguments arg arg If Compare Assign Assign Call Assign Call Compare Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_matvec",
    "source_code": "def _matvec(self, x):\n    x = x.reshape(self.n, -1)\n    result_dtype = np.promote_types(x.dtype, self.dtype)\n    sx = np.zeros_like(x, dtype=result_dtype)\n    sx[0, :] = 5 * x[0, :] - 4 * x[1, :] + x[2, :]\n    sx[-1, :] = 5 * x[-1, :] - 4 * x[-2, :] + x[-3, :]\n    sx[1:-1, :] = 6 * x[1:-1, :] - 4 * (x[:-2, :] + x[2:, :]) + np.pad(x[:-3, :], ((1, 0), (0, 0))) + np.pad(x[3:, :], ((0, 1), (0, 0)))\n    return sx",
    "docstring": "Construct matrix-free callable banded-matrix-vector multiplication by the Sakurai matrix without constructing or storing the matrix itself using the knowledge of its entries and the 5-diagonal format.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_special_sparse_arrays.py",
    "ast_data": "FunctionDef name:_matvec arg:self arg:x arguments arg arg Assign Call Assign Call Assign Call Assign Assign Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "_update",
    "source_code": "def _update(self, values):\n    if self.query.is_sliced:\n        raise TypeError('Cannot update a query once a slice has been taken.')\n    query = self.query.chain(sql.UpdateQuery)\n    query.add_update_fields(values)\n    query.annotations = {}\n    self._result_cache = None\n    return query.get_compiler(self.db).execute_sql(ROW_COUNT)",
    "docstring": "A version of update() that accepts field objects instead of field names. Used primarily for model saving and not intended for use by general code (it requires too much poking around at model internals to be useful at that level).",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:_update arg:self arg:values arguments arg arg If Raise Call Assign Call Call Assign Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_gradient_function",
    "source_code": "def get_gradient_function(self):\n    return self._rewrite_forward_and_call_backward",
    "docstring": "Returns gradient function. The gradient rewrites an inference call op to a forward call op, but does not modify a pre-existing forward call op. It then computes the gradient from the output's gradients and the side outputs of the forward op.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py",
    "ast_data": "FunctionDef name:get_gradient_function arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "reader_ref",
    "source_code": "@property\ndef reader_ref(self):\n    return self._reader_ref",
    "docstring": "Op that implements the reader.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\io_ops.py",
    "ast_data": "FunctionDef name:reader_ref arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "worker_count",
    "source_code": "def worker_count(cluster_spec, task_type):\n    _validate_cluster_spec(cluster_spec, task_type, task_id=0)\n    cluster_spec = normalize_cluster_spec(cluster_spec).as_dict()\n    if task_type not in ['chief', 'worker', 'evaluator']:\n        raise ValueError('Unexpected `task_type` %r' % task_type)\n    if task_type == 'evaluator':\n        return len(cluster_spec['evaluator'])\n    else:\n        return len(cluster_spec.get('chief', [])) + len(cluster_spec.get('worker', []))",
    "docstring": "Returns the number of workers in the cluster.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_worker_util.py",
    "ast_data": "FunctionDef name:worker_count arg:cluster_spec arg:task_type arguments arg arg Call Assign Call Call If Compare Raise Call If Compare Return return:yes Call Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_module",
    "source_code": "def get_module(dir_path, relative_to_dir):\n    dir_path = dir_path[len(relative_to_dir):]\n    dir_path = dir_path.replace(os.sep, '/')\n    return dir_path.replace('/', '.').strip('.')",
    "docstring": "Get module that corresponds to path relative to relative_to_dir. Args: dir_path: Path to directory. relative_to_dir: Get module relative to this directory. Returns: Name of module that corresponds to the given directory.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\api\\generator\\create_python_api.py",
    "ast_data": "FunctionDef name:get_module arg:dir_path arg:relative_to_dir arguments arg arg Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "SpLuInv",
    "source_code": "class SpLuInv(LinearOperator):\n\n    def __init__(self, M):\n        self.M_lu = splu(M)\n        self.shape = M.shape\n        self.dtype = M.dtype\n        self.isreal = not np.issubdtype(self.dtype, np.complexfloating)\n\n    def _matvec(self, x):\n        x = np.asarray(x)\n        if self.isreal and np.issubdtype(x.dtype, np.complexfloating):\n            return self.M_lu.solve(np.real(x).astype(self.dtype)) + 1j * self.M_lu.solve(np.imag(x).astype(self.dtype))\n        else:\n            return self.M_lu.solve(x.astype(self.dtype))",
    "docstring": "SpLuInv: helper class to repeatedly solve M*x=b using a sparse LU-decomposition of M",
    "type": "class",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_eigen\\arpack\\arpack.py",
    "ast_data": "ClassDef name:SpLuInv FunctionDef name:__init__ arg:self arg:M arguments arg arg Assign Call Assign Assign Assign Call FunctionDef name:_matvec arg:self arg:x arguments arg arg Assign Call If BoolOp Call Return return:yes Call Call Call Call Call Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_support",
    "source_code": "def get_support(self, indices=False):\n    mask = self._get_support_mask()\n    return mask if not indices else np.nonzero(mask)[0]",
    "docstring": "Get a mask, or integer index, of the features selected. Parameters ---------- indices : bool, default=False If True, the return value will be an array of integers, rather than a boolean mask. Returns ------- support : array An index that selects the retained features from a feature vector. If is False, this is a boolean array of shape [# input features], in which an element is True iff its corresponding feature is selected for retention. If is True, this is an integer array of shape [# output features] whose values are indices into the input feature vector.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_selection\\_base.py",
    "ast_data": "FunctionDef name:get_support arg:self arg:indices arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_apply_axis_properties",
    "source_code": "@final\n@staticmethod\ndef _apply_axis_properties(axis: Axis, rot=None, fontsize: int | None=None) -> None:\n    if rot is not None or fontsize is not None:\n        labels = axis.get_majorticklabels() + axis.get_minorticklabels()\n        for label in labels:\n            if rot is not None:\n                label.set_rotation(rot)\n            if fontsize is not None:\n                label.set_fontsize(fontsize)",
    "docstring": "Tick creation within matplotlib is reasonably expensive and is internally deferred until accessed as Ticks are created/destroyed multiple times per draw. It's therefore beneficial for us to avoid accessing unless we will act on the Tick.",
    "type": "method",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\core.py",
    "ast_data": "FunctionDef name:_apply_axis_properties arg:axis arg:rot arg:fontsize arguments arg arg arg If BoolOp Compare Compare Assign Call Call For If Compare Call If Compare Call"
  },
  {
    "library": "pandas",
    "name": "keys",
    "source_code": "def keys(self) -> Index:\n    return self._info_axis",
    "docstring": "Get the 'info axis' (see Indexing for more). This is index for Series, columns for DataFrame. Returns ------- Index Info axis. See Also -------- DataFrame.index : The index (row labels) of the DataFrame. DataFrame.columns: The column labels of the DataFrame. Examples -------- >>> d = pd.DataFrame( ... data={\"A\": [1, 2, 3], \"B\": [0, 4, 8]}, index=[\"a\", \"b\", \"c\"] ... ) >>> d A B a 1 0 b 2 4 c 3 8 >>> d.keys() Index(['A', 'B'], dtype='object')",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:keys arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_estimate_wishart_tied",
    "source_code": "def _estimate_wishart_tied(self, nk, xk, sk):\n    _, n_features = xk.shape\n    self.degrees_of_freedom_ = self.degrees_of_freedom_prior_ + nk.sum() / self.n_components\n    diff = xk - self.mean_prior_\n    self.covariances_ = self.covariance_prior_ + sk * nk.sum() / self.n_components + self.mean_precision_prior_ / self.n_components * np.dot(nk / self.mean_precision_ * diff.T, diff)\n    self.covariances_ /= self.degrees_of_freedom_",
    "docstring": "Estimate the tied Wishart distribution parameters. Parameters ---------- X : array-like of shape (n_samples, n_features) nk : array-like of shape (n_components,) xk : array-like of shape (n_components, n_features) sk : array-like of shape (n_features, n_features)",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\mixture\\_bayesian_mixture.py",
    "ast_data": "FunctionDef name:_estimate_wishart_tied arg:self arg:nk arg:xk arg:sk arguments arg arg arg arg Assign Assign Call Assign Assign Call Call"
  },
  {
    "library": "matplotlib",
    "name": "twinx",
    "source_code": "def twinx(self, axes_class=None, **kwargs):\n    if axes_class:\n        kwargs['axes_class'] = axes_class\n    ax2 = self._make_twin_axes(sharex=self, **kwargs)\n    ax2.yaxis.tick_right()\n    ax2.yaxis.set_label_position('right')\n    ax2.yaxis.set_offset_position('right')\n    ax2.set_autoscalex_on(self.get_autoscalex_on())\n    self.yaxis.tick_left()\n    ax2.xaxis.set_visible(False)\n    ax2.patch.set_visible(False)\n    ax2.xaxis.units = self.xaxis.units\n    return ax2",
    "docstring": "Create a twin Axes sharing the xaxis. Create a new Axes with an invisible x-axis and an independent y-axis positioned opposite to the original one (i.e. at right). The x-axis autoscale setting will be inherited from the original Axes. To ensure that the tick marks of both y-axes align, see . Parameters ---------- axes_class : subclass type of , optional The subclass that is instantiated. This parameter is incompatible with *projection* and *polar*. See :ref: for examples. By default, is used. .. versionadded:: 3.11 kwargs : dict The keyword arguments passed to or . .. versionadded:: 3.11 Returns ------- Axes The newly created Axes instance Notes ----- For those who are 'picking' artists while using twinx, pick events are only called for the artists in the top-most Axes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:twinx arg:self arg:axes_class arguments arg arg arg If Assign Assign Call Call Call Call Call Call Call Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "replace",
    "source_code": "@_lru_cache\ndef replace(self, expr: _SympyT) -> _SympyT:\n    replacements = {}\n    for s in expr.free_symbols:\n        r = self._find(s)\n        if not r.is_Symbol or r != s:\n            replacements[s] = r\n    if replacements:\n        return safe_expand(expr.xreplace(replacements))\n    else:\n        return expr",
    "docstring": "Apply symbol replacements to any symbols in the given expression",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:replace arg:self arg:expr arguments arg arg Assign For Assign Call If BoolOp Compare Assign If Return return:yes Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "add_c_function",
    "source_code": "def add_c_function(c_func):\n    context().add_c_function(c_func)",
    "docstring": "Add a C API TF_Function to the context.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:add_c_function arg:c_func arguments arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "canonicalize_aten_ir_passes",
    "source_code": "def canonicalize_aten_ir_passes(gm: torch.fx.GraphModule):\n    canonicalize_quant_mapping(gm)",
    "docstring": "Canonicalization passes that will run immediately after aot autograd tracing. Thsis must be run before all other graph passes.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\joint_graph.py",
    "ast_data": "FunctionDef name:canonicalize_aten_ir_passes arg:gm arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "OptimizeGraph",
    "source_code": "def OptimizeGraph(config_proto, metagraph, verbose=True, graph_id=b'graph_to_optimize', cluster=None, strip_default_attributes=False):\n    if not isinstance(config_proto, config_pb2.ConfigProto):\n        raise TypeError(f'Argument `config_proto` should be a tf.ConfigProto, received type: {type(config_proto).__name__}')\n    if is_oss:\n        optimize_method = tf_opt.TF_OptimizeGraphSerialized\n        metagraph = metagraph.SerializeToString()\n    else:\n        optimize_method = tf_opt.TF_OptimizeGraph\n    if cluster is not None:\n        out_graph = optimize_method(cluster.tf_cluster, config_proto.SerializeToString(), metagraph, verbose, graph_id, strip_default_attributes)\n    else:\n        with _OPTIMIZE_GRAPH_CLUSTER_LOCK:\n            cluster = gcluster.Cluster()\n            try:\n                out_graph = optimize_method(cluster.tf_cluster, config_proto.SerializeToString(), metagraph, verbose, graph_id, strip_default_attributes)\n            finally:\n                cluster.Shutdown()\n    if is_oss:\n        out_graph = graph_pb2.GraphDef.FromString(out_graph)\n    return out_graph",
    "docstring": "Optimize the provided metagraph. For best results, the signature_def field in should be populated with information about input (feed) and output (fetch) tensors. Args: config_proto: a ConfigProto protobuf. metagraph: a MetagraphDef protobuf. verbose: whether to log optimization results. graph_id: a string identifying this graph. cluster: a grappler cluster object representing hardware resources available to run this graph. strip_default_attributes: whether graph node attributes having default values should be removed after all the optimization passes. This option is useful if the resulting graph will be executed by an older process that might not know some of the recently added attributes.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\grappler\\tf_optimizer.py",
    "ast_data": "FunctionDef name:OptimizeGraph arg:config_proto arg:metagraph arg:verbose arg:graph_id arg:cluster arg:strip_default_attributes arguments arg arg arg arg arg arg If Call Raise Call Call If Assign Assign Call Assign If Compare Assign Call Call With Assign Call Try Assign Call Call Call If Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "parent_child_names",
    "source_code": "def parent_child_names(name):\n    split_name = name.rsplit('.', 1)\n    if len(split_name) == 1:\n        return ('', split_name[0])\n    else:\n        return (split_name[0], split_name[1])",
    "docstring": "Split full name of submodule into parent submodule's full name and submodule's name.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\_correct_bias.py",
    "ast_data": "FunctionDef name:parent_child_names arg:name arguments arg Assign Call If Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self) -> None:\n    self._opset_version = _constants.TORCHLIB_OPSET\n    self.functions: dict[TorchOp | str, list[OnnxDecompMeta]] = {}",
    "docstring": "Initializes the registry",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_registration.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg Assign"
  },
  {
    "library": "scipy",
    "name": "get_right",
    "source_code": "def get_right(self):\n    return self.right",
    "docstring": "Return a reference to the right child tree object. Returns ------- right : ClusterNode The left child of the target node. If the node is a leaf, None is returned.",
    "type": "method",
    "file_path": "scipy\\scipy\\cluster\\hierarchy.py",
    "ast_data": "FunctionDef name:get_right arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_compute_gradient",
    "source_code": "def _compute_gradient(f, y_shape, y_dtype, xs, param, delta):\n    x = xs[param]\n    t = x.dtype\n    allowed_types = [dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128]\n    assert t.base_dtype in allowed_types, 'Cannot compute gradient for unsupported type %s of argument %s' % (t.name, param)\n    t2 = y_dtype\n    assert t2.base_dtype in allowed_types, 'Cannot compute gradient for unsupported type %s of y' % t2.name\n    y_size = _product(y_shape)\n    jacob_t = _compute_theoretical_jacobian(f, y_shape, y_dtype, xs, param)\n    jacob_n = _compute_numeric_jacobian(f, y_size, y_dtype, xs, param, delta)\n    return (jacob_t, jacob_n)",
    "docstring": "Computes the theoretical and numerical jacobian.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\gradient_checker_v2.py",
    "ast_data": "FunctionDef name:_compute_gradient arg:f arg:y_shape arg:y_dtype arg:xs arg:param arg:delta arguments arg arg arg arg arg arg Assign Assign Assign Compare Assign Compare Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "key_size",
    "source_code": "@property\n@abc.abstractmethod\ndef key_size(self) -> int:\n    pass",
    "docstring": "The size of the key being used as an integer in bits (e.g. 128, 256).",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\_cipheralgorithm.py",
    "ast_data": "FunctionDef name:key_size arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, layout, inputs, constant_args=()) -> None:\n    assert len(inputs) == 4\n    assert len(constant_args) == 0\n    super().__init__(layout, inputs, constant_args, None, op_overload=torch.ops.quantized.int4mm_packed_weight_cpu.default, cpp_kernel_name='aoti_torch_cpu__weight_int4pack_mm_cpu_tensor')",
    "docstring": "inputs = [x, w, qGroupSize, qScalesAndZeros] constant_args = ()",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\mkldnn_ir.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:layout arg:inputs arg:constant_args arguments arg arg arg arg Compare Call Compare Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "init_feed_dict",
    "source_code": "@property\ndef init_feed_dict(self):\n    return self._init_feed_dict",
    "docstring": "Return the feed dictionary used when evaluating the . Returns: A feed dictionary or .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py",
    "ast_data": "FunctionDef name:init_feed_dict arg:self arguments arg Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "Page",
    "source_code": "class Page:\n    title = 'Untitled Page'\n\n    def header(self):\n        return '\\n            <html>\\n            <head>\\n                <title>%s</title>\\n            <head>\\n            <body>\\n            <h2>%s</h2>\\n        ' % (self.title, self.title)\n\n    def footer(self):\n        return '\\n            </body>\\n            </html>\\n        '",
    "docstring": "Web page base class.",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\tutorial\\tut05_derived_objects.py",
    "ast_data": "ClassDef name:Page Assign FunctionDef name:header arg:self arguments arg Return return:yes FunctionDef name:footer arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "loc",
    "source_code": "@property\ndef loc(self):\n    return self._loc",
    "docstring": "Distribution parameter for the mean.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\normal.py",
    "ast_data": "FunctionDef name:loc arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "name",
    "source_code": "@abc.abstractmethod\ndef name(self) -> str:\n    raise NotImplementedError",
    "docstring": "Return the name. Example:: >>> import torch >>> a = torch.tensor([0., 0., 0.], requires_grad=True) >>> b = a.clone() >>> assert isinstance(b.grad_fn, torch.autograd.graph.Node) >>> print(b.grad_fn.name()) CloneBackward0",
    "type": "method",
    "file_path": "pytorch\\torch\\autograd\\graph.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "@deprecation.deprecated_args(None, 'Do not pass `graph_parents`.  They will  no longer be used.', 'graph_parents')\ndef __init__(self, dtype, graph_parents=None, is_non_singular=None, is_self_adjoint=None, is_positive_definite=None, is_square=None, name=None, parameters=None):\n    if is_positive_definite:\n        if is_non_singular is False:\n            raise ValueError('A positive definite matrix is always non-singular.')\n        is_non_singular = True\n    if is_non_singular:\n        if is_square is False:\n            raise ValueError('A non-singular matrix is always square.')\n        is_square = True\n    if is_self_adjoint:\n        if is_square is False:\n            raise ValueError('A self-adjoint matrix is always square.')\n        is_square = True\n    self._is_square_set_or_implied_by_hints = is_square\n    if graph_parents is not None:\n        self._set_graph_parents(graph_parents)\n    else:\n        self._graph_parents = []\n    self._dtype = dtypes.as_dtype(dtype).base_dtype if dtype else dtype\n    self._is_non_singular = is_non_singular\n    self._is_self_adjoint = is_self_adjoint\n    self._is_positive_definite = is_positive_definite\n    self._parameters = self._no_dependency(parameters)\n    self._parameters_sanitized = False\n    self._name = name or type(self).__name__",
    "docstring": "Initialize the . **This is a private method for subclass use.** **Subclasses should copy-paste this documentation.** Args: dtype: The type of the this . Arguments to and will have to be this type. graph_parents: (Deprecated) Python list of graph prerequisites of this Typically tensors that are passed during initialization is_non_singular: Expect that this operator is non-singular. is_self_adjoint: Expect that this operator is equal to its hermitian transpose. If is real, this is equivalent to being symmetric. is_positive_definite: Expect that this operator is positive definite, meaning the quadratic form has positive real part for all nonzero . Note that we do not require the operator to be self-adjoint to be positive-definite. See: is_square: Expect that this operator acts like square [batch] matrices. name: A name for this . parameters: Python of parameters used to instantiate this . Raises: ValueError: If any member of graph_parents is or not a . ValueError: If hints are set incorrectly.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:dtype arg:graph_parents arg:is_non_singular arg:is_self_adjoint arg:is_positive_definite arg:is_square arg:name arg:parameters arguments arg arg arg arg arg arg arg arg arg If If Compare Raise Call Assign If If Compare Raise Call Assign If If Compare Raise Call Assign Assign If Compare Call Assign Assign Call Assign Assign Assign Assign Call Assign Assign BoolOp Call Call"
  },
  {
    "library": "kornia",
    "name": "get_cuda_device_if_available",
    "source_code": "def get_cuda_device_if_available(index: int=0) -> torch.device:\n    if torch.cuda.is_available():\n        return torch.device(f'cuda:{index}')\n    return torch.device('cpu')",
    "docstring": "Try to get cuda device, if fail, return cpu. Args: index: cuda device index Returns: torch.device",
    "type": "function",
    "file_path": "kornia\\kornia\\utils\\helpers.py",
    "ast_data": "FunctionDef name:get_cuda_device_if_available arg:index arguments arg If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_Cat",
    "source_code": "class _Cat(Constraint):\n\n    def __init__(self, cseq, dim=0, lengths=None):\n        assert all((isinstance(c, Constraint) for c in cseq))\n        self.cseq = list(cseq)\n        if lengths is None:\n            lengths = [1] * len(self.cseq)\n        self.lengths = list(lengths)\n        assert len(self.lengths) == len(self.cseq)\n        self.dim = dim\n        super().__init__()\n\n    @property\n    def is_discrete(self) -> bool:\n        return any((c.is_discrete for c in self.cseq))\n\n    @property\n    def event_dim(self) -> int:\n        return max((c.event_dim for c in self.cseq))\n\n    def check(self, value):\n        assert -value.dim() <= self.dim < value.dim()\n        checks = []\n        start = 0\n        for constr, length in zip(self.cseq, self.lengths):\n            v = value.narrow(self.dim, start, length)\n            checks.append(constr.check(v))\n            start = start + length\n        return torch.cat(checks, self.dim)",
    "docstring": "Constraint functor that applies a sequence of constraints at the submatrices at dimension , each of size , in a way compatible with :func:.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributions\\constraints.py",
    "ast_data": "ClassDef name:_Cat FunctionDef name:__init__ arg:self arg:cseq arg:dim arg:lengths arguments arg arg arg arg Call Call Assign Call If Compare Assign Call Assign Call Compare Call Call Assign Call Call FunctionDef name:is_discrete arg:self arguments arg Return return:yes Call FunctionDef name:event_dim arg:self arguments arg Return return:yes Call FunctionDef name:check arg:self arg:value arguments arg arg Compare Call Call Assign Assign For Call Assign Call Call Call Assign Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict_proba",
    "source_code": "def predict_proba(self, X):\n    predictions, _ = _get_response_values(self.estimator, X, response_method=['decision_function', 'predict_proba'])\n    if predictions.ndim == 1:\n        predictions = predictions.reshape(-1, 1)\n    n_classes = len(self.classes)\n    label_encoder = LabelEncoder().fit(self.classes)\n    pos_class_indices = label_encoder.transform(self.estimator.classes_)\n    proba = np.zeros((_num_samples(X), n_classes))\n    for class_idx, this_pred, calibrator in zip(pos_class_indices, predictions.T, self.calibrators):\n        if n_classes == 2:\n            class_idx += 1\n        proba[:, class_idx] = calibrator.predict(this_pred)\n    if n_classes == 2:\n        proba[:, 0] = 1.0 - proba[:, 1]\n    else:\n        denominator = np.sum(proba, axis=1)[:, np.newaxis]\n        uniform_proba = np.full_like(proba, 1 / n_classes)\n        proba = np.divide(proba, denominator, out=uniform_proba, where=denominator != 0)\n    proba[(1.0 < proba) & (proba <= 1.0 + 1e-05)] = 1.0\n    return proba",
    "docstring": "Calculate calibrated probabilities. Calculates classification calibrated probabilities for each class, in a one-vs-all manner, for . Parameters ---------- X : ndarray of shape (n_samples, n_features) The sample data. Returns ------- proba : array, shape (n_samples, n_classes) The predicted probabilities. Can be exact zeros.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\calibration.py",
    "ast_data": "FunctionDef name:predict_proba arg:self arg:X arguments arg arg Assign Call If Compare Assign Call Assign Call Assign Call Call Assign Call Assign Call Call For Call If Compare Assign Call If Compare Assign Assign Call Assign Call Assign Call Compare Assign Compare Compare Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, nrows, ncols, subplot_spec, wspace=None, hspace=None, height_ratios=None, width_ratios=None):\n    self._wspace = wspace\n    self._hspace = hspace\n    if isinstance(subplot_spec, SubplotSpec):\n        self._subplot_spec = subplot_spec\n    else:\n        raise TypeError('subplot_spec must be type SubplotSpec, usually from GridSpec, or axes.get_subplotspec.')\n    self.figure = self._subplot_spec.get_gridspec().figure\n    super().__init__(nrows, ncols, width_ratios=width_ratios, height_ratios=height_ratios)",
    "docstring": "Parameters ---------- nrows, ncols : int Number of rows and number of columns of the grid. subplot_spec : SubplotSpec Spec from which the layout parameters are inherited. wspace, hspace : float, optional See for more details. If not specified default values (from the figure or rcParams) are used. height_ratios : array-like of length *nrows*, optional See for details. width_ratios : array-like of length *ncols*, optional See for details.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\gridspec.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:nrows arg:ncols arg:subplot_spec arg:wspace arg:hspace arg:height_ratios arg:width_ratios arguments arg arg arg arg arg arg arg arg Assign Assign If Call Assign Raise Call Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_softmax_flops",
    "source_code": "@ops.RegisterStatistics('Softmax', 'flops')\ndef _softmax_flops(graph, node):\n    return _unary_op_flops(graph, node, ops_per_element=5)",
    "docstring": "Compute flops for Softmax operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py",
    "ast_data": "FunctionDef name:_softmax_flops arg:graph arg:node arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "end_index",
    "source_code": "def end_index(self):\n    if self.number == self.paginator.num_pages:\n        return self.paginator.count\n    return self.number * self.paginator.per_page",
    "docstring": "Return the 1-based index of the last object on this page, relative to total objects found (hits).",
    "type": "method",
    "file_path": "django\\django\\core\\paginator.py",
    "ast_data": "FunctionDef name:end_index arg:self arguments arg If Compare Return return:yes Return return:yes"
  },
  {
    "library": "kornia",
    "name": "height",
    "source_code": "@property\ndef height(self) -> int | Tensor:\n    return self._height",
    "docstring": "Returns the height of the image.",
    "type": "method",
    "file_path": "kornia\\kornia\\sensors\\camera\\camera_model.py",
    "ast_data": "FunctionDef name:height arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "rgb_to_bgr",
    "source_code": "def rgb_to_bgr(image: Tensor) -> Tensor:\n    if not isinstance(image, Tensor):\n        raise TypeError(f'Input type is not a Tensor. Got {type(image)}')\n    if len(image.shape) < 3 or image.shape[-3] != 3:\n        raise ValueError(f'Input size must have a shape of (*, 3, H, W).Got {image.shape}')\n    return bgr_to_rgb(image)",
    "docstring": "Convert a RGB image to BGR. .. image:: _static/img/rgb_to_bgr.png Args: image: RGB Image to be converted to BGRof of shape :math:. Returns: BGR version of the image with shape of shape :math:. Example: >>> input = torch.rand(2, 3, 4, 5) >>> output = rgb_to_bgr(input) # 2x3x4x5",
    "type": "function",
    "file_path": "kornia\\kornia\\color\\rgb.py",
    "ast_data": "FunctionDef name:rgb_to_bgr arg:image arguments arg If Call Raise Call Call If BoolOp Compare Call Compare Raise Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_leading_trailing",
    "source_code": "def _leading_trailing(a, edgeitems, index=()):\n    axis = len(index)\n    if axis == a.ndim:\n        return a[index]\n    if a.shape[axis] > 2 * edgeitems:\n        return concatenate((_leading_trailing(a, edgeitems, index + np.index_exp[:edgeitems]), _leading_trailing(a, edgeitems, index + np.index_exp[-edgeitems:])), axis=axis)\n    else:\n        return _leading_trailing(a, edgeitems, index + np.index_exp[:])",
    "docstring": "Keep only the N-D corners (leading and trailing edges) of an array. Should be passed a base-class ndarray, since it makes no guarantees about preserving subclasses.",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\arrayprint.py",
    "ast_data": "FunctionDef name:_leading_trailing arg:a arg:edgeitems arg:index arguments arg arg arg Assign Call If Compare Return return:yes If Compare Return return:yes Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_next",
    "source_code": "def get_next(self, device=None):\n    if device is not None:\n        index = self._devices.index(device)\n        return self._device_iterators[index].get_next()\n    result = []\n    for i, device in enumerate(self._devices):\n        with ops.device(device):\n            result.append(self._device_iterators[i].get_next())\n    return result",
    "docstring": "Returns the next element given a , else returns all in a list.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\multi_device_iterator_ops.py",
    "ast_data": "FunctionDef name:get_next arg:self arg:device arguments arg arg If Compare Assign Call Return return:yes Call Assign For Call With Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "call_options",
    "source_code": "@property\ndef call_options(self) -> CallOptions:\n    return self._call_options",
    "docstring": "Call options declared for this AtomicFunction.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\atomic_function.py",
    "ast_data": "FunctionDef name:call_options arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "ensure_echo_on",
    "source_code": "def ensure_echo_on():\n    if not termios or not sys.stdin.isatty():\n        return\n    attr_list = termios.tcgetattr(sys.stdin)\n    if not attr_list[3] & termios.ECHO:\n        attr_list[3] |= termios.ECHO\n        if hasattr(signal, 'SIGTTOU'):\n            old_handler = signal.signal(signal.SIGTTOU, signal.SIG_IGN)\n        else:\n            old_handler = None\n        termios.tcsetattr(sys.stdin, termios.TCSANOW, attr_list)\n        if old_handler is not None:\n            signal.signal(signal.SIGTTOU, old_handler)",
    "docstring": "Ensure that echo mode is enabled. Some tools such as PDB disable it which causes usability issues after reload.",
    "type": "function",
    "file_path": "django\\django\\utils\\autoreload.py",
    "ast_data": "FunctionDef name:ensure_echo_on arguments If BoolOp Call Return return:no Assign Call If If Call Assign Call Assign Call If Compare Call"
  },
  {
    "library": "kornia",
    "name": "YuvToRgb",
    "source_code": "class YuvToRgb(Module):\n    ONNX_DEFAULT_INPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n    ONNX_DEFAULT_OUTPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n\n    def forward(self, input: Tensor) -> Tensor:\n        return yuv_to_rgb(input)",
    "docstring": "Convert an image from YUV to RGB. The image data is assumed to be in the range of :math: for luma (Y). The ranges of U and V are :math: and :math:, respectively. YUV formula follows M/PAL values (see _, Table 2, items 2.5 and 2.6). Returns: RGB version of the image. Shape: - image: :math: - output: :math: Examples: >>> input = torch.rand(2, 3, 4, 5) >>> rgb = YuvToRgb() >>> output = rgb(input) # 2x3x4x5",
    "type": "class",
    "file_path": "kornia\\kornia\\color\\yuv.py",
    "ast_data": "ClassDef name:YuvToRgb FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "cluster_spec",
    "source_code": "@property\ndef cluster_spec(self):\n    return copy.deepcopy(self._cluster_spec)",
    "docstring": "Returns a copy of the cluster_spec object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distribute_coordinator_utils.py",
    "ast_data": "FunctionDef name:cluster_spec arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "non_trainable_weights",
    "source_code": "@property\ndef non_trainable_weights(self):\n    if self.trainable:\n        children_weights = self._gather_children_attribute('non_trainable_variables')\n        non_trainable_weights = self._non_trainable_weights + children_weights\n    else:\n        children_weights = self._gather_children_attribute('variables')\n        non_trainable_weights = self._trainable_weights + self._non_trainable_weights + children_weights\n    return self._dedup_weights(non_trainable_weights)",
    "docstring": "List of all non-trainable weights tracked by this layer. Non-trainable weights are *not* updated during training. They are expected to be updated manually in . Returns: A list of non-trainable variables.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:non_trainable_weights arg:self arguments arg If Assign Call Assign Assign Call Assign Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "_update_lut",
    "source_code": "def _update_lut(cmap, colors):\n    cmap._lut[:256] = colors\n    cmap._set_extremes()",
    "docstring": "Change the LUT values in a matplotlib colormap in-place.",
    "type": "function",
    "file_path": "seaborn\\seaborn\\widgets.py",
    "ast_data": "FunctionDef name:_update_lut arg:cmap arg:colors arguments arg arg Assign Call"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, layout, inputs, constant_args=()) -> None:\n    self.has_bias = len(inputs) == 5\n    super().__init__(layout, inputs, constant_args, None, op_overload=torch.ops.onednn.qconv_pointwise.default, cpp_kernel_name='aoti_torch_cpu__qconv_pointwise_tensor')",
    "docstring": "if bias is not None - inputs = [x, w, b, weight_scale, weight_zp] - const_args is: [stride, padding, dilation, groups, x_scale, x_zp, o_scale, o_zp, fp32_output, unary_attr, unary_scalars, unary_algorithm] else - inputs = [x, w, weight_scale, weight_zp] - const_args is: [bias, stride, padding, dilation, groups, x_scale, x_zp, o_scale, o_zp, fp32_output, unary_attr, unary_scalars, unary_algorithm]",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\mkldnn_ir.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:layout arg:inputs arg:constant_args arguments arg arg arg arg Assign Compare Call Call Call"
  },
  {
    "library": "django",
    "name": "configure_user",
    "source_code": "def configure_user(self, request, user, created=True):\n    return user",
    "docstring": "Configure a user and return the updated user. By default, return the user unmodified.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\backends.py",
    "ast_data": "FunctionDef name:configure_user arg:self arg:request arg:user arg:created arguments arg arg arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "variable_shape",
    "source_code": "@property\ndef variable_shape(self):\n    return tensor_shape.TensorShape(tuple(self.source_column.shape) + (len(self.boundaries) + 1,))",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:variable_shape arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_is_node_annotated",
    "source_code": "def _is_node_annotated(_node):\n    return QUANT_ANNOTATION_KEY in _node.meta and _node.meta[QUANT_ANNOTATION_KEY]._annotated",
    "docstring": "return True if the node is annotated, otherwise return False",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\x86_inductor_quantizer.py",
    "ast_data": "FunctionDef name:_is_node_annotated arg:_node arguments arg Return return:yes BoolOp Compare"
  },
  {
    "library": "tensorflow",
    "name": "_add_state_variable",
    "source_code": "def _add_state_variable(self, name, shape, dtype, initializer=None, partitioner=None, use_resource=None, **kwargs):\n    weight = self.add_weight(name=name, shape=shape, dtype=dtype, initializer=initializer, regularizer=None, trainable=False, constraint=None, partitioner=partitioner, use_resource=use_resource, **kwargs)\n    self.state_variables[name] = weight\n    return weight",
    "docstring": "Add a variable that can hold state which is updated during adapt(). Args: name: Variable name. shape: Variable shape. Defaults to scalar if unspecified. dtype: The type of the variable. Defaults to or . initializer: initializer instance (callable). partitioner: Partitioner to be passed to the API. use_resource: Whether to use **kwargs: Additional keyword arguments. Accepted values are and . Returns: The created variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_preprocessing_layer.py",
    "ast_data": "FunctionDef name:_add_state_variable arg:self arg:name arg:shape arg:dtype arg:initializer arg:partitioner arg:use_resource arguments arg arg arg arg arg arg arg arg Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "build_shuffle_then_shuffle",
    "source_code": "def build_shuffle_then_shuffle(input_tensors, first_gather_devices, second_gather_devices, red_op, un_op=None):\n\n    def upper_builder(tensors):\n        return build_shuffle_all_reduce(tensors, second_gather_devices, red_op, un_op)\n\n    def upper_level_f(tensors):\n        return _reduce_non_singleton(tensors, upper_builder, un_op)\n    return _build_shuffle_hybrid(input_tensors, first_gather_devices, red_op, upper_level_f)",
    "docstring": "Construct hybrid of Shuffle within workers, Shuffle across workers.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\all_reduce.py",
    "ast_data": "FunctionDef name:build_shuffle_then_shuffle arg:input_tensors arg:first_gather_devices arg:second_gather_devices arg:red_op arg:un_op arguments arg arg arg arg arg FunctionDef name:upper_builder arg:tensors arguments arg Return return:yes Call FunctionDef name:upper_level_f arg:tensors arguments arg Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "combined_commuting_self_adjoint_hint",
    "source_code": "def combined_commuting_self_adjoint_hint(operator_a, operator_b):\n    if operator_a.is_self_adjoint and operator_b.is_self_adjoint:\n        return True\n    if operator_a.is_self_adjoint is True and operator_b.is_self_adjoint is False or (operator_a.is_self_adjoint is False and operator_b.is_self_adjoint is True):\n        return False\n    return None",
    "docstring": "Get combined hint for self-adjoint-ness.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\property_hint_util.py",
    "ast_data": "FunctionDef name:combined_commuting_self_adjoint_hint arg:operator_a arg:operator_b arguments arg arg If BoolOp Return return:yes If BoolOp BoolOp Compare Compare BoolOp Compare Compare Return return:yes Return return:no"
  },
  {
    "library": "matplotlib",
    "name": "get_tick_iterators",
    "source_code": "def get_tick_iterators(self, axes):\n    angle_normal, angle_tangent = {0: (90, 0), 1: (0, 90)}[self.nth_coord]\n    major = self.axis.major\n    major_locs = major.locator()\n    major_labels = major.formatter.format_ticks(major_locs)\n    minor = self.axis.minor\n    minor_locs = minor.locator()\n    minor_labels = minor.formatter.format_ticks(minor_locs)\n    data_to_axes = axes.transData - axes.transAxes\n\n    def _f(locs, labels):\n        for loc, label in zip(locs, labels):\n            c = self._to_xy(loc, const=self._value)\n            c1, c2 = data_to_axes.transform(c)\n            if 0 <= c1 <= 1 and 0 <= c2 <= 1:\n                yield (c, angle_normal, angle_tangent, label)\n    return (_f(major_locs, major_labels), _f(minor_locs, minor_labels))",
    "docstring": "tick_loc, tick_angle, tick_label",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axislines.py",
    "ast_data": "FunctionDef name:get_tick_iterators arg:self arg:axes arguments arg arg Assign Assign Assign Call Assign Call Assign Assign Call Assign Call Assign FunctionDef name:_f arg:locs arg:labels arguments arg arg For Call Assign Call Assign Call If BoolOp Compare Compare Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "compute_stats",
    "source_code": "def compute_stats(array):\n    q1 = np.percentile(array, 25)\n    q3 = np.percentile(array, 75)\n    low = q1 - 1.5 * (q3 - q1)\n    high = q3 + 1.5 * (q3 - q1)\n    filtered_array = list(filter(lambda x: low <= x and x <= high, array))\n    mean = np.mean(filtered_array)\n    min_val = np.min(filtered_array)\n    max_val = np.max(filtered_array)\n    max_diff = max(max_val - mean, mean - min_val)\n    diff = max_diff / mean * 100.0\n    return (mean, diff)",
    "docstring": "Reports mean and ± range for the given array. The range computation follows benchstat's. Args: array: The array to compute stats for. Returns: mean and ± %diff range.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\xla\\backends\\cpu\\benchmarks\\e2e\\gemma2\\keras\\benchmark.py",
    "ast_data": "FunctionDef name:compute_stats arg:array arguments arg Assign Call Assign Call Assign Assign Assign Call Call arguments arg BoolOp Compare Compare Assign Call Assign Call Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "seekable",
    "source_code": "def seekable(self):\n    return True",
    "docstring": "Returns True as FileIO supports random access ops of seek()/tell()",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py",
    "ast_data": "FunctionDef name:seekable arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "force_static",
    "source_code": "def force_static(self):\n    if isinstance(self.__variable, SymNodeVariable):\n        self.__variable.evaluate_expr()\n    elif isinstance(self.__variable, ConstantVariable):\n        pass\n    else:\n        raise AssertionError(f'cannot force {self.__variable} ({type(self.__variable)}) static')",
    "docstring": "Forces that a value is static, inducing a guard on its specific value",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\comptime.py",
    "ast_data": "FunctionDef name:force_static arg:self arguments arg If Call Call If Call Raise Call Call"
  },
  {
    "library": "django",
    "name": "__init__",
    "source_code": "def __init__(self, x=None, y=None, z=None, srid=None):\n    if x is None:\n        coords = []\n    elif isinstance(x, (tuple, list)):\n        coords = x\n    elif isinstance(x, (float, int)) and isinstance(y, (float, int)):\n        if isinstance(z, (float, int)):\n            coords = [x, y, z]\n        else:\n            coords = [x, y]\n    else:\n        raise TypeError('Invalid parameters given for Point initialization.')\n    point = self._create_point(len(coords), coords)\n    super().__init__(point, srid=srid)",
    "docstring": "The Point object may be initialized with either a tuple, or individual parameters. For example: >>> p = Point((5, 23)) # 2D point, passed in as a tuple >>> p = Point(5, 23, 8) # 3D point, passed in with individual parameters",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\point.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:x arg:y arg:z arg:srid arguments arg arg arg arg arg If Compare Assign If Call Assign If BoolOp Call Call If Call Assign Assign Raise Call Assign Call Call Call Call"
  },
  {
    "library": "kornia",
    "name": "RgbToHls",
    "source_code": "class RgbToHls(Module):\n    ONNX_DEFAULT_INPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n    ONNX_DEFAULT_OUTPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n\n    def forward(self, image: Tensor) -> Tensor:\n        return rgb_to_hls(image)",
    "docstring": "Convert an image from RGB to HLS. The image data is assumed to be in the range of (0, 1). Returns: HLS version of the image. Shape: - image: :math: - output: :math: Examples: >>> input = torch.rand(2, 3, 4, 5) >>> hls = RgbToHls() >>> output = hls(input) # 2x3x4x5",
    "type": "class",
    "file_path": "kornia\\kornia\\color\\hls.py",
    "ast_data": "ClassDef name:RgbToHls FunctionDef name:forward arg:self arg:image arguments arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "preprocess",
    "source_code": "def preprocess(self, image_1: Tensor, image_2: Tensor) -> Dict[str, Tensor]:\n    if isinstance(self.matcher, (LoFTR, LocalFeatureMatcher)):\n        input_dict = {'image0': rgb_to_grayscale(image_1), 'image1': rgb_to_grayscale(image_2)}\n    else:\n        raise NotImplementedError(f'The preprocessor for {self.matcher} has not been implemented.')\n    return input_dict",
    "docstring": "Preprocess input to the required format.",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\image_stitching.py",
    "ast_data": "FunctionDef name:preprocess arg:self arg:image_1 arg:image_2 arguments arg arg arg If Call Assign Call Call Raise Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_build_xpath_expr",
    "source_code": "def _build_xpath_expr(attrs) -> str:\n    if 'class_' in attrs:\n        attrs['class'] = attrs.pop('class_')\n    s = ' and '.join([f'@{k}={v!r}' for k, v in attrs.items()])\n    return f'[{s}]'",
    "docstring": "Build an xpath expression to simulate bs4's ability to pass in kwargs to search for attributes when using the lxml parser. Parameters ---------- attrs : dict A dict of HTML attributes. These are NOT checked for validity. Returns ------- expr : unicode An XPath expression that checks for the given HTML attributes.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\html.py",
    "ast_data": "FunctionDef name:_build_xpath_expr arg:attrs arguments arg If Compare Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_load_variables_impl",
    "source_code": "@def_function.function\ndef _load_variables_impl(config: Text, hosts: List[Tuple[int, Text]], variables: Dict[Text, Dict[Text, tf_variables.Variable]], table_config: tpu_embedding_v2_utils.TableConfig):\n\n    def select_fn(host_id):\n\n        def select_or_zeros(x):\n            if host_id >= len(x.variables):\n                return array_ops.zeros_like(x.variables[0])\n            return x.variables[host_id]\n        return select_or_zeros\n    for host_id, host in enumerate(hosts):\n        with ops.device(host):\n            host_variables = nest.map_structure(select_fn(host_id), variables)\n            for table in table_config:\n                table.optimizer._load()(table_name=table.name, num_shards=len(hosts), shard_id=host_id, config=config, **host_variables[table.name])\n                config = None",
    "docstring": "Load embedding tables to onto TPU for each table and host. Args: config: A serialized TPUEmbeddingConfiguration proto. hosts: A list of CPU devices, on per host. variables: A dictionary of dictionaries of TPUEmbeddingVariables. First key is the table name, second key is 'parameters' or the optimizer slot name. table_config: A list of tf.tpu.experimental.embedding.TableConfig objects.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2.py",
    "ast_data": "FunctionDef name:_load_variables_impl arg:config arg:hosts arg:variables arg:table_config arguments arg arg arg arg FunctionDef name:select_fn arg:host_id arguments arg FunctionDef name:select_or_zeros arg:x arguments arg If Compare Call Return return:yes Call Return return:yes Return return:yes For Call With Call Assign Call Call For Call Call Call Assign"
  },
  {
    "library": "pandas",
    "name": "_execute_insert_multi",
    "source_code": "def _execute_insert_multi(self, conn, keys: list[str], data_iter) -> int:\n    from sqlalchemy import insert\n    data = [dict(zip(keys, row)) for row in data_iter]\n    stmt = insert(self.table).values(data)\n    result = self.pd_sql.execute(stmt)\n    return result.rowcount",
    "docstring": "Alternative to _execute_insert for DBs support multi-value INSERT. Note: multi-value insert is usually faster for analytics DBs and tables containing a few columns but performance degrades quickly with increase of columns.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\sql.py",
    "ast_data": "FunctionDef name:_execute_insert_multi arg:self arg:conn arg:keys arg:data_iter arguments arg arg arg arg Assign Call Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "map_structure",
    "source_code": "def map_structure(func, *structure, **check_types_dict):\n    return nest_util.map_structure(nest_util.Modality.DATA, func, *structure, **check_types_dict)",
    "docstring": "Applies to each entry in and returns a new structure. Applies where x[i] is an entry in . All structures in must have the same arity, and the return value will contain the results in the same structure. Args: func: A callable that accepts as many arguments are there are structures. *structure: scalar, or tuple or list of constructed scalars and/or other tuples/lists, or scalars. Note: numpy arrays are considered scalars. **check_types_dict: only valid keyword argument is . If set to (default) the types of iterables within the structures have to be same (e.g. raises a exception). To allow this set this argument to . Returns: A new structure with the same arity as , whose values correspond to where is a value in the corresponding location in . If there are different sequence types and is the sequence types of the first structure will be used. Raises: TypeError: If is not callable or if the structures do not match each other by depth tree. ValueError: If no structure is provided or if the structures do not match each other by type. ValueError: If wrong keyword arguments are provided.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\util\\nest.py",
    "ast_data": "FunctionDef name:map_structure arg:func arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "startswith",
    "source_code": "@forbid_nonstring_types(['bytes'])\ndef startswith(self, pat: str | tuple[str, ...], na: Scalar | lib.NoDefault=lib.no_default) -> Series | Index:\n    if not isinstance(pat, (str, tuple)):\n        msg = f'expected a string or tuple, not {type(pat).__name__}'\n        raise TypeError(msg)\n    result = self._data.array._str_startswith(pat, na=na)\n    return self._wrap_result(result, returns_string=False)",
    "docstring": "Test if the start of each string element matches a pattern. Equivalent to :meth:. Parameters ---------- pat : str or tuple[str, ...] Character sequence or tuple of strings. Regular expressions are not accepted. na : scalar, optional Object shown if element tested is not a string. The default depends on dtype of the array. For object-dtype, `naFalseNaN`. >>> s.str.startswith(\"b\", na=False) 0 True 1 False 2 False 3 False dtype: bool",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\strings\\accessor.py",
    "ast_data": "FunctionDef name:startswith arg:self arg:pat arg:na arguments arg arg arg If Call Assign Call Raise Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_gen_non_null_counts",
    "source_code": "def _gen_non_null_counts(self) -> Iterator[str]:\n    for count in self.non_null_counts:\n        yield f'{count} non-null'",
    "docstring": "Iterator with string representation of non-null counts.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:_gen_non_null_counts arg:self arguments arg For"
  },
  {
    "library": "seaborn",
    "name": "_clone",
    "source_code": "def _clone(self) -> Plot:\n    new = Plot()\n    new._data = self._data\n    new._layers.extend(self._layers)\n    new._scales.update(self._scales)\n    new._shares.update(self._shares)\n    new._limits.update(self._limits)\n    new._labels.update(self._labels)\n    new._theme.update(self._theme)\n    new._facet_spec.update(self._facet_spec)\n    new._pair_spec.update(self._pair_spec)\n    new._figure_spec.update(self._figure_spec)\n    new._subplot_spec.update(self._subplot_spec)\n    new._layout_spec.update(self._layout_spec)\n    new._target = self._target\n    return new",
    "docstring": "Generate a new object with the same information as the current spec.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\plot.py",
    "ast_data": "FunctionDef name:_clone arg:self arguments arg Assign Call Assign Call Call Call Call Call Call Call Call Call Call Call Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "Hflip",
    "source_code": "class Hflip(Module):\n\n    def forward(self, input: Tensor) -> Tensor:\n        return hflip(input)\n\n    def __repr__(self) -> str:\n        return self.__class__.__name__",
    "docstring": "Horizontally flip a tensor image or a batch of tensor images. Input must be a tensor of shape (C, H, W) or a batch of tensors :math:. Args: input: input tensor. Returns: The horizontally flipped image tensor. Examples: >>> hflip = Hflip() >>> input = torch.tensor([[[ ... [0., 0., 0.], ... [0., 0., 0.], ... [0., 1., 1.] ... ]]]) >>> hflip(input) tensor([[[[0., 0., 0.], [0., 0., 0.], [1., 1., 0.]]]])",
    "type": "class",
    "file_path": "kornia\\kornia\\geometry\\transform\\flips.py",
    "ast_data": "ClassDef name:Hflip FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_ragged_tile_axis",
    "source_code": "def _ragged_tile_axis(rt_input, axis, repeats, row_splits_dtype):\n    assert axis > 0\n    if not ragged_tensor.is_ragged(rt_input):\n        rt_input = ragged_tensor.RaggedTensor.from_tensor(rt_input, ragged_rank=1, row_splits_dtype=row_splits_dtype)\n    if axis > 1:\n        return rt_input.with_values(_ragged_tile_axis(rt_input.values, axis - 1, repeats, row_splits_dtype))\n    else:\n        src_row_splits = rt_input.nested_row_splits\n        src_row_lengths = rt_input.nested_row_lengths()\n        splits = src_row_splits[0]\n        dst_row_lengths = [repeats]\n        for i in range(1, len(src_row_lengths)):\n            dst_row_lengths.append(ragged_util.repeat_ranges(src_row_lengths[i], splits, repeats))\n            splits = array_ops.gather(src_row_splits[i], splits)\n        dst_values = ragged_util.repeat_ranges(rt_input.flat_values, splits, repeats)\n        return ragged_tensor.RaggedTensor.from_nested_row_lengths(dst_values, dst_row_lengths, validate=False)",
    "docstring": "Tile a dimension of a RaggedTensor to match a ragged shape.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor_shape.py",
    "ast_data": "FunctionDef name:_ragged_tile_axis arg:rt_input arg:axis arg:repeats arg:row_splits_dtype arguments arg arg arg arg Compare If Call Assign Call If Compare Return return:yes Call Call Assign Assign Call Assign Assign For Call Call Call Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_van_der_corput_permutations",
    "source_code": "def _van_der_corput_permutations(base: IntNumber, *, rng: SeedType=None) -> np.ndarray:\n    rng = check_random_state(rng)\n    count = math.ceil(54 / math.log2(base)) - 1\n    permutations = np.repeat(np.arange(base)[None], count, axis=0)\n    for perm in permutations:\n        rng.shuffle(perm)\n    return permutations",
    "docstring": "Permutations for scrambling a Van der Corput sequence. Parameters ---------- base : int Base of the sequence. rng : , optional Pseudorandom number generator state. When is None, a new is created using entropy from the operating system. Types other than are passed to to instantiate a `SPEC-007 numpy.random.RandomStatenumpy.random.Generatorseedrngcheck_random_statenp.arange(base)k`, which makes it more apparent how many permutations we need to create.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_qmc.py",
    "ast_data": "FunctionDef name:_van_der_corput_permutations arg:base arguments arg arg Assign Call Assign Call Call Assign Call Call For Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "table",
    "source_code": "@property\ndef table(self):\n    return self.storable",
    "docstring": "return the table group (this is my storable)",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:table arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "on_read_assign_cross_replica",
    "source_code": "def on_read_assign_cross_replica(var, value, read_value=True):\n    with distribute_lib.enter_or_assert_strategy(var.distribute_strategy):\n        if distribute_lib.in_cross_replica_context():\n            tensor = value\n            if var.aggregation == vs.VariableAggregation.SUM:\n                strategy = var._distribute_strategy\n                tensor = math_ops.cast(tensor / strategy.num_replicas_in_sync, var.dtype)\n            return assign_on_each_device(var, assign_on_device, tensor, read_value)",
    "docstring": "Return the value of the variable in cross replica context.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values_util.py",
    "ast_data": "FunctionDef name:on_read_assign_cross_replica arg:var arg:value arg:read_value arguments arg arg arg With Call If Call Assign If Compare Assign Assign Call Return return:yes Call"
  },
  {
    "library": "virtualenv",
    "name": "VirtualenvBuiltin",
    "source_code": "class VirtualenvBuiltin(Creator, Describe, ABC):\n\n    def __init__(self, options, interpreter) -> None:\n        Creator.__init__(self, options, interpreter)\n        Describe.__init__(self, self.dest, interpreter)",
    "docstring": "A creator that does operations itself without delegation, if we can create it we can also describe it.",
    "type": "class",
    "file_path": "virtualenv\\src\\virtualenv\\create\\via_global_ref\\builtin\\builtin_way.py",
    "ast_data": "ClassDef name:VirtualenvBuiltin FunctionDef name:__init__ arg:self arg:options arg:interpreter arguments arg arg arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "codegen_broadcast_and_reshape",
    "source_code": "def codegen_broadcast_and_reshape(self, value: str, initial_shape: Sequence[sympy.Expr], final_shape: Sequence[sympy.Expr], allow_implicit: bool) -> str:\n    pre_broadcast_shape = [sympy.S.One if is_broadcasting else dim for dim, is_broadcasting in zip(self.broadcast_shape, self.broadcasting_dims)]\n    value = triton_reshape(value, initial_shape, pre_broadcast_shape)\n    sizevars = V.graph.sizevars\n    supports_implicit_broadcast = allow_implicit and (len(pre_broadcast_shape) == len(final_shape) and all((sizevars.statically_known_equals(pre_dim, 1) or sizevars.statically_known_equals(pre_dim, post_dim) for pre_dim, post_dim in zip(pre_broadcast_shape, final_shape))))\n    if any(self.broadcasting_dims) and (not supports_implicit_broadcast):\n        value = f'tl.broadcast_to({value}, {V.kernel.index_to_str(self.broadcast_shape)})'\n    value = triton_reshape(value, self.broadcast_shape, final_shape)\n    return value",
    "docstring": "Generate a broadcast and a reshape for the block pointer. This restores stride-0 dimensions which were removed from the block pointer.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py",
    "ast_data": "FunctionDef name:codegen_broadcast_and_reshape arg:self arg:value arg:initial_shape arg:final_shape arg:allow_implicit arguments arg arg arg arg arg Assign Call Assign Call Assign Assign BoolOp BoolOp Compare Call Call Call BoolOp Call Call Call If BoolOp Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "exceeds_info_cols",
    "source_code": "@property\ndef exceeds_info_cols(self) -> bool:\n    return bool(self.col_count > self.max_cols)",
    "docstring": "Check if number of columns to be summarized does not exceed maximum.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:exceeds_info_cols arg:self arguments arg Return return:yes Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "is_quantization_aware_trained_model",
    "source_code": "def is_quantization_aware_trained_model(self):\n    training_quant_ops = frozenset({'FakeQuantWithMinMaxVars', 'FakeQuantWithMinMaxVarsPerChannel', 'FakeQuantWithMinMaxArgs', 'QuantizeAndDequantizeV2', 'QuantizeAndDequantizeV3'})\n    if self._graph_def:\n        for node_def in self._graph_def.node:\n            if node_def.op in training_quant_ops:\n                return True\n        for function in self._graph_def.library.function:\n            for node_def in function.node_def:\n                if node_def.op in training_quant_ops:\n                    return True\n    return False",
    "docstring": "Checks if the graph contains any training-time quantization ops.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "FunctionDef name:is_quantization_aware_trained_model arg:self arguments arg Assign Call If For If Compare Return return:yes For For If Compare Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "set_module_name_regex",
    "source_code": "def set_module_name_regex(self, module_name_regex: str, qconfig: QConfigAny) -> QConfigMapping:\n    self.module_name_regex_qconfigs[module_name_regex] = qconfig\n    return self",
    "docstring": "Set the QConfig for modules matching the given regex string. Regexes will be matched in the order in which they are registered through this method. Thus, the caller should register more specific patterns first, e.g.:: qconfig_mapping = QConfigMapping() .set_module_name_regex(\"foo.*bar.*conv[0-9]+\", qconfig1) .set_module_name_regex(\"foo.*bar.*\", qconfig2) .set_module_name_regex(\"foo.*\", qconfig3) In this example, \"foo.bar.conv0\" would match qconfig1, \"foo.bar.linear\" would match qconfig2, and \"foo.baz.relu\" would match qconfig3. If the QConfig for an existing module name regex was already set, the new QConfig will override the old one while preserving the order in which the regexes were originally registered.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\qconfig_mapping.py",
    "ast_data": "FunctionDef name:set_module_name_regex arg:self arg:module_name_regex arg:qconfig arguments arg arg arg Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "get_handler",
    "source_code": "def get_handler(self, *args, **options):\n    return get_internal_wsgi_application()",
    "docstring": "Return the default WSGI handler for the runner.",
    "type": "method",
    "file_path": "django\\django\\core\\management\\commands\\runserver.py",
    "ast_data": "FunctionDef name:get_handler arg:self arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "InvalidVersion",
    "source_code": "class InvalidVersion(ValueError):\n    pass",
    "docstring": "An invalid version was found, users should refer to PEP 440.",
    "type": "class",
    "file_path": "numpy\\numpy\\_utils\\_pep440.py",
    "ast_data": "ClassDef name:InvalidVersion"
  },
  {
    "library": "pandas",
    "name": "reconstruct_data_with_by",
    "source_code": "def reconstruct_data_with_by(data: DataFrame, by: IndexLabel, cols: IndexLabel) -> DataFrame:\n    by_modified = unpack_single_str_list(by)\n    grouped = data.groupby(by_modified)\n    data_list = []\n    for key, group in grouped:\n        columns = MultiIndex.from_product([[key], cols])\n        sub_group = group[cols]\n        sub_group.columns = columns\n        data_list.append(sub_group)\n    data = concat(data_list, axis=1)\n    return data",
    "docstring": "Internal function to group data, and reassign multiindex column names onto the result in order to let grouped data be used in _compute_plot_data method. Parameters ---------- data : Original DataFrame to plot by : grouped parameter selected by users cols : columns of data set (excluding columns used in ) Returns ------- Output is the reconstructed DataFrame with MultiIndex columns. The first level of MI is unique values of groups, and second level of MI is the columns selected by users. Examples -------- >>> d = {\"h\": [\"h1\", \"h1\", \"h2\"], \"a\": [1, 3, 5], \"b\": [3, 4, 6]} >>> df = pd.DataFrame(d) >>> reconstruct_data_with_by(df, by=\"h\", cols=[\"a\", \"b\"]) h1 h2 a b a b 0 1.0 3.0 NaN NaN 1 3.0 4.0 NaN NaN 2 NaN NaN 5.0 6.0",
    "type": "function",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\groupby.py",
    "ast_data": "FunctionDef name:reconstruct_data_with_by arg:data arg:by arg:cols arguments arg arg arg Assign Call Assign Call Assign For Assign Call Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "ExponentialMovingWindowIndexer",
    "source_code": "class ExponentialMovingWindowIndexer(BaseIndexer):\n\n    @Appender(get_window_bounds_doc)\n    def get_window_bounds(self, num_values: int=0, min_periods: int | None=None, center: bool | None=None, closed: str | None=None, step: int | None=None) -> tuple[np.ndarray, np.ndarray]:\n        return (np.array([0], dtype=np.int64), np.array([num_values], dtype=np.int64))",
    "docstring": "Calculate ewm window bounds (the entire window)",
    "type": "class",
    "file_path": "pandas\\pandas\\core\\indexers\\objects.py",
    "ast_data": "ClassDef name:ExponentialMovingWindowIndexer FunctionDef name:get_window_bounds arg:self arg:num_values arg:min_periods arg:center arg:closed arg:step arguments arg arg arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=False)\ndef fit(self, X, y, **fit_params):\n    _raise_for_params(fit_params, self, 'fit', allow=['sample_weight'])\n    y = column_or_1d(y, warn=True)\n    return super().fit(X, y, **fit_params)",
    "docstring": "Fit the estimators. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vectors, where is the number of samples and is the number of features. y : array-like of shape (n_samples,) Target values. **fit_params : dict Parameters to pass to the underlying estimators. .. versionadded:: 1.5 Only available if , which can be set by using `Metadata Routing User Guide ` for more details. Returns ------- self : object Fitted estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_voting.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg arg Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_tf_core_packed_nest_with_indices",
    "source_code": "def _tf_core_packed_nest_with_indices(structure, flat, index, is_nested_fn, sequence_fn=None):\n    packed = []\n    sequence_fn = sequence_fn or sequence_like\n    for s in _tf_core_yield_value(structure):\n        if is_nested_fn(s):\n            new_index, child = _tf_core_packed_nest_with_indices(s, flat, index, is_nested_fn, sequence_fn)\n            packed.append(sequence_fn(s, child))\n            index = new_index\n        else:\n            packed.append(flat[index])\n            index += 1\n    return (index, packed)",
    "docstring": "Helper function for pack_sequence_as. Args: structure: structure to mimic. flat: Flattened values to output substructure for. index: Index at which to start reading from flat. is_nested_fn: Function used to test if a value should be treated as a nested structure. sequence_fn: Function used to generate a new structure instance. Returns: The tuple (new_index, child), where: * new_index - the updated index into having processed . * packed - the subset of corresponding to , having started at , and packed into the same nested format. Raises: ValueError: if contains more atoms than (assuming indexing starts from ).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\nest_util.py",
    "ast_data": "FunctionDef name:_tf_core_packed_nest_with_indices arg:structure arg:flat arg:index arg:is_nested_fn arg:sequence_fn arguments arg arg arg arg arg Assign Assign BoolOp For Call If Call Assign Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_visible_devices",
    "source_code": "def get_visible_devices(self, device_type=None):\n    self._initialize_physical_devices()\n    if device_type is None:\n        return list(self._visible_device_list)\n    return [d for d in self._visible_device_list if d.device_type == device_type]",
    "docstring": "Get the list of visible devices.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:get_visible_devices arg:self arg:device_type arguments arg arg Call If Compare Return return:yes Call Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "get_size",
    "source_code": "@abc.abstractmethod\ndef get_size(self):\n    raise NotImplementedError",
    "docstring": "Return the size (number of batches) for the dataset created. For certain type of the data input, the number of batches is known, eg for Numpy data, the size is same as (number_of_element / batch_size). Whereas for dataset or python generator, the size is unknown since it may or may not have a end state. Returns: int, the number of batches for the dataset, or None if it is unknown. The caller could use this to control the loop of training, show progress bar, or handle unexpected StopIteration error.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\data_adapter.py",
    "ast_data": "FunctionDef name:get_size arg:self arguments arg Raise"
  },
  {
    "library": "pandas",
    "name": "make_block",
    "source_code": "def make_block(values, placement, klass=None, ndim=None, dtype: Dtype | None=None) -> Block:\n    warnings.warn('make_block is deprecated and will be removed in a future version. Use pd.api.internals.create_dataframe_from_blocks or (recommended) higher-level public APIs instead.', DeprecationWarning, stacklevel=2)\n    if dtype is not None:\n        dtype = pandas_dtype(dtype)\n    values, dtype = extract_pandas_array(values, dtype, ndim)\n    from pandas.core.internals.blocks import ExtensionBlock\n    if klass is ExtensionBlock and isinstance(values.dtype, PeriodDtype):\n        klass = None\n    if klass is None:\n        dtype = dtype or values.dtype\n        klass = get_block_type(dtype)\n    if not isinstance(placement, BlockPlacement):\n        placement = BlockPlacement(placement)\n    ndim = maybe_infer_ndim(values, placement, ndim)\n    if isinstance(values.dtype, (PeriodDtype, DatetimeTZDtype)):\n        values = extract_array(values, extract_numpy=True)\n        values = ensure_block_shape(values, ndim)\n    check_ndim(values, placement, ndim)\n    values = maybe_coerce_values(values)\n    return klass(values, ndim=ndim, placement=placement)",
    "docstring": "This is a pseudo-public analogue to blocks.new_block. We ask that downstream libraries use this rather than any fully-internal APIs, including but not limited to: - core.internals.blocks.make_block - Block.make_block - Block.make_block_same_class - Block.__init__",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\internals\\api.py",
    "ast_data": "FunctionDef name:make_block arg:values arg:placement arg:klass arg:ndim arg:dtype arguments arg arg arg arg arg Call If Compare Assign Call Assign Call If BoolOp Compare Call Assign If Compare Assign BoolOp Assign Call If Call Assign Call Assign Call If Call Assign Call Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "create_new_variable",
    "source_code": "def create_new_variable(next_creator, **kwargs):\n    canonical_name = _canonicalize_variable_name(kwargs.get('name'))\n    v = next_creator(**kwargs)\n    if canonical_name not in shared_variable_store:\n        shared_variable_store[canonical_name] = []\n    shared_variable_store[canonical_name].append(v)\n    return v",
    "docstring": "Create the variable using and store it.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\shared_variable_creator.py",
    "ast_data": "FunctionDef name:create_new_variable arg:next_creator arguments arg arg Assign Call Call Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "distributions_and_v2_optimizers",
    "source_code": "def distributions_and_v2_optimizers():\n    return combinations.combine(distribution=[strategy_combinations_base.one_device_strategy, strategy_combinations_base.mirrored_strategy_with_gpu_and_cpu, strategy_combinations_base.mirrored_strategy_with_two_gpus, strategy_combinations_base.mirrored_strategy_with_two_gpus_no_merge_call], optimizer_fn=optimizers_v2)",
    "docstring": "A common set of combination with DistributionStrategies and Optimizers.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\optimizer_combinations.py",
    "ast_data": "FunctionDef name:distributions_and_v2_optimizers arguments Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "quantize_qat",
    "source_code": "def quantize_qat(model, run_fn, run_args, inplace=False):\n    torch._C._log_api_usage_once('quantization_api.quantize.quantize_qat')\n    if not inplace:\n        model = copy.deepcopy(model)\n    model.train()\n    prepare_qat(model, inplace=True)\n    run_fn(model, *run_args)\n    convert(model, inplace=True)\n    return model",
    "docstring": "Do quantization aware training and output a quantized model Args: model: input model run_fn: a function for evaluating the prepared model, can be a function that simply runs the prepared model or a training loop run_args: positional arguments for Return: Quantized model.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantize.py",
    "ast_data": "FunctionDef name:quantize_qat arg:model arg:run_fn arg:run_args arg:inplace arguments arg arg arg arg Call If Assign Call Call Call Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "symmetric_transfer_error",
    "source_code": "def symmetric_transfer_error(pts1: Tensor, pts2: Tensor, H: Tensor, squared: bool=True, eps: float=1e-08) -> Tensor:\n    KORNIA_CHECK_SHAPE(H, ['B', '3', '3'])\n    if pts1.size(-1) == 3:\n        pts1 = convert_points_from_homogeneous(pts1)\n    if pts2.size(-1) == 3:\n        pts2 = convert_points_from_homogeneous(pts2)\n    max_num = torch.finfo(pts1.dtype).max\n    H_inv, good_H = safe_inverse_with_mask(H)\n    there: Tensor = oneway_transfer_error(pts1, pts2, H, True, eps)\n    back: Tensor = oneway_transfer_error(pts2, pts1, H_inv, True, eps)\n    good_H_reshape: Tensor = good_H.view(-1, 1).expand_as(there)\n    out = (there + back) * good_H_reshape.to(there.dtype) + max_num * (~good_H_reshape).to(there.dtype)\n    if squared:\n        return out\n    return (out + eps).sqrt()",
    "docstring": "Return Symmetric transfer error for correspondences given the homography matrix. Args: pts1: correspondences from the left images with shape (B, N, 2 or 3). If they are homogeneous, converted automatically. pts2: correspondences from the right images with shape (B, N, 2 or 3). If they are homogeneous, converted automatically. H: Homographies with shape :math:. squared: if True (default), the squared distance is returned. eps: Small constant for safe sqrt. Returns: the computed distance with shape :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\homography.py",
    "ast_data": "FunctionDef name:symmetric_transfer_error arg:pts1 arg:pts2 arg:H arg:squared arg:eps arguments arg arg arg arg arg Call If Compare Call Assign Call If Compare Call Assign Call Assign Call Assign Call Call Call Call Call Assign Call Call If Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "BufferGroup",
    "source_code": "class BufferGroup:\n\n    def __init__(self, node: BufferLike):\n        self.node = node\n        self.names = [node.get_name()]\n        self.is_output = False\n        self.allocation: Optional[Allocation] = None\n        self.live_range = LiveRange(float('inf'), -float('inf'))\n\n    def update_usage(self, timestep: int):\n        self.live_range = LiveRange(min(timestep, self.live_range.begin), max(timestep, self.live_range.end))\n\n    def sym_nbytes(self):\n        return self.node.get_layout().storage_size() * self.node.get_dtype().itemsize\n\n    def make_allocation(self):\n        assert not self.allocation, 'multiple allocations'\n        assert isinstance(self.live_range.begin, int), 'live ranges not computed'\n        nbytes = self.sym_nbytes()\n        size_hint = V.graph.sizevars.size_hint(nbytes, fallback=64)\n        self.allocation = Allocation(self.node, self.live_range, size_hint=size_hint, symbolic_size=nbytes)\n\n    def __repr__(self):\n        return f'{self.__class__.__name__}({self.names!r}, is_output={self.is_output}, live_range={self.live_range}'",
    "docstring": "Due to inplace reuse an allocated buffer can have many names. This tracks these collections of buffers sharing underlying memory.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\memory_planning.py",
    "ast_data": "ClassDef name:BufferGroup FunctionDef name:__init__ arg:self arg:node arguments arg arg Assign Assign Call Assign Assign Call Call Call FunctionDef name:update_usage arg:self arg:timestep arguments arg arg Assign Call Call Call FunctionDef name:sym_nbytes arg:self arguments arg Return return:yes Call Call Call FunctionDef name:make_allocation arg:self arguments arg Call Assign Call Assign Call Assign Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "reset_accumulated_memory_stats",
    "source_code": "def reset_accumulated_memory_stats(device: 'Device'=None) -> None:\n    device = _get_device_index(device, optional=True)\n    return torch._C._cuda_resetAccumulatedMemoryStats(device)",
    "docstring": "Reset the \"accumulated\" (historical) stats tracked by the CUDA memory allocator. See :func: for details. Accumulated stats correspond to the and keys in each individual stat dict, as well as and . Args: device (torch.device or int, optional): selected device. Returns statistic for the current device, given by :func:, if :attr: is `cuda-memory-management` for more details about GPU memory management.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\memory.py",
    "ast_data": "FunctionDef name:reset_accumulated_memory_stats arg:device arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "validate_host",
    "source_code": "def validate_host(host, allowed_hosts):\n    return any((pattern == '*' or is_same_domain(host, pattern) for pattern in allowed_hosts))",
    "docstring": "Validate the given host for this site. Check that the host looks valid and matches a host or host pattern in the given list of `` otherwise.",
    "type": "function",
    "file_path": "django\\django\\http\\request.py",
    "ast_data": "FunctionDef name:validate_host arg:host arg:allowed_hosts arguments arg arg Return return:yes Call BoolOp Compare Call"
  },
  {
    "library": "pandas",
    "name": "validate_version",
    "source_code": "def validate_version(self, where=None) -> None:\n    pass",
    "docstring": "are we trying to operate on an old version?",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:validate_version arg:self arg:where arguments arg arg"
  },
  {
    "library": "django",
    "name": "dict",
    "source_code": "def dict(self):\n    return {key: self[key] for key in self}",
    "docstring": "Return current object as a dict with singular values.",
    "type": "method",
    "file_path": "django\\django\\utils\\datastructures.py",
    "ast_data": "FunctionDef name:dict arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_get_tickdir",
    "source_code": "def _get_tickdir(self, position):\n    _api.check_in_list(('upper', 'lower', 'default'), position=position)\n    tickdirs_base = [v['tickdir'] for v in self._AXINFO.values()]\n    elev_mod = np.mod(self.axes.elev + 180, 360) - 180\n    azim_mod = np.mod(self.axes.azim, 360)\n    if position == 'upper':\n        if elev_mod >= 0:\n            tickdirs_base = [2, 2, 0]\n        else:\n            tickdirs_base = [1, 0, 0]\n        if 0 <= azim_mod < 180:\n            tickdirs_base[2] = 1\n    elif position == 'lower':\n        if elev_mod >= 0:\n            tickdirs_base = [1, 0, 1]\n        else:\n            tickdirs_base = [2, 2, 1]\n        if 0 <= azim_mod < 180:\n            tickdirs_base[2] = 0\n    info_i = [v['i'] for v in self._AXINFO.values()]\n    i = self._axinfo['i']\n    vert_ax = self.axes._vertical_axis\n    j = vert_ax - 2\n    tickdir = np.roll(info_i, -j)[np.roll(tickdirs_base, j)][i]\n    return tickdir",
    "docstring": "Get the direction of the tick. Parameters ---------- position : str, optional : {'upper', 'lower', 'default'} The position of the axis. Returns ------- tickdir : int Index which indicates which coordinate the tick line will align with.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axis3d.py",
    "ast_data": "FunctionDef name:_get_tickdir arg:self arg:position arguments arg arg Call Assign Call Assign Call Assign Call If Compare If Compare Assign Assign If Compare Assign If Compare If Compare Assign Assign If Compare Assign Assign Call Assign Assign Assign Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__enter__",
    "source_code": "def __enter__(self) -> str:\n    if self._name is None and self._values is not None:\n        raise ValueError('At least one of name (%s) and default_name (%s) must be provided.' % (self._name, self._default_name))\n    g = get_default_graph()\n    if self._values and (not g.building_function):\n        g_from_inputs = _get_graph_from_inputs(self._values)\n        if g_from_inputs is not g:\n            g = g_from_inputs\n            self._g_manager = g.as_default()\n            self._g_manager.__enter__()\n        else:\n            self._g_manager = None\n    else:\n        self._g_manager = None\n    try:\n        self._name_scope = g.name_scope(self._name)\n        return self._name_scope.__enter__()\n    except:\n        if self._g_manager is not None:\n            self._g_manager.__exit__(*sys.exc_info())\n        raise",
    "docstring": "Start the scope block. Returns: The scope name. Raises: ValueError: if neither nor is provided but are.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:__enter__ arg:self arguments arg If BoolOp Compare Compare Raise Call Assign Call If BoolOp Assign Call If Compare Assign Assign Call Call Assign Assign Try Assign Call Return return:yes Call ExceptHandler If Compare Call Call Raise"
  },
  {
    "library": "scikit-learn",
    "name": "_fit_calibrator",
    "source_code": "def _fit_calibrator(clf, predictions, y, classes, method, sample_weight=None):\n    Y = label_binarize(y, classes=classes)\n    label_encoder = LabelEncoder().fit(classes)\n    pos_class_indices = label_encoder.transform(clf.classes_)\n    calibrators = []\n    for class_idx, this_pred in zip(pos_class_indices, predictions.T):\n        if method == 'isotonic':\n            calibrator = IsotonicRegression(out_of_bounds='clip')\n        else:\n            calibrator = _SigmoidCalibration()\n        calibrator.fit(this_pred, Y[:, class_idx], sample_weight)\n        calibrators.append(calibrator)\n    pipeline = _CalibratedClassifier(clf, calibrators, method=method, classes=classes)\n    return pipeline",
    "docstring": "Fit calibrator(s) and return a instance. (i.e. ) calibrators are fitted. However, if equals 2, one calibrator is fitted. Parameters ---------- clf : estimator instance Fitted classifier. predictions : array-like, shape (n_samples, n_classes) or (n_samples, 1) when binary. Raw predictions returned by the un-calibrated base classifier. y : array-like, shape (n_samples,) The targets. classes : ndarray, shape (n_classes,) All the prediction classes. method : {'sigmoid', 'isotonic'} The method to use for calibration. sample_weight : ndarray, shape (n_samples,), default=None Sample weights. If None, then samples are equally weighted. Returns ------- pipeline : _CalibratedClassifier instance",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\calibration.py",
    "ast_data": "FunctionDef name:_fit_calibrator arg:clf arg:predictions arg:y arg:classes arg:method arg:sample_weight arguments arg arg arg arg arg arg Assign Call Assign Call Call Assign Call Assign For Call If Compare Assign Call Assign Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "print_svg",
    "source_code": "def print_svg(self, filename, *, bbox_inches_restore=None, metadata=None):\n    with cbook.open_file_cm(filename, 'w', encoding='utf-8') as fh:\n        if not cbook.file_requires_unicode(fh):\n            fh = codecs.getwriter('utf-8')(fh)\n        dpi = self.figure.dpi\n        self.figure.dpi = 72\n        width, height = self.figure.get_size_inches()\n        w, h = (width * 72, height * 72)\n        renderer = MixedModeRenderer(self.figure, width, height, dpi, RendererSVG(w, h, fh, image_dpi=dpi, metadata=metadata), bbox_inches_restore=bbox_inches_restore)\n        self.figure.draw(renderer)\n        renderer.finalize()",
    "docstring": "Parameters ---------- filename : str or path-like or file-like Output target; if a string, a file will be opened for writing. metadata : dict[str, Any], optional Metadata in the SVG file defined as key-value pairs of strings, datetimes, or lists of strings, e.g., `NoneDublin Core Metadata`__. .. _DC: __ DC_",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_svg.py",
    "ast_data": "FunctionDef name:print_svg arg:self arg:filename arguments arg arg arg arg With Call If Call Assign Call Call Assign Assign Assign Call Assign Assign Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "register",
    "source_code": "def register(self, *projections):\n    for projection in projections:\n        name = projection.name\n        self._all_projection_types[name] = projection",
    "docstring": "Register a new set of projections.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\__init__.py",
    "ast_data": "FunctionDef name:register arg:self arguments arg arg For Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "_register_lowering",
    "source_code": "def _register_lowering(aten_fn, decomp_fn, broadcast, type_promotion_kind: Optional[ELEMENTWISE_TYPE_PROMOTION_KIND], convert_input_to_bool):\n\n    @functools.wraps(decomp_fn)\n    def wrapped(*args, **kwargs):\n        args: list[Any] = list(args)\n        kwargs: dict[str, Any] = dict(kwargs)\n        unpacked = False\n        if len(args) == 1 and isinstance(args[0], (list, tuple)):\n            unpacked = True\n            args = list(args[0])\n        if not all((fn in fallbacks or in_namespace(fn, '_c10d_functional') for fn in aten_fn)):\n            assert not any((x == 'out' for x in kwargs.keys())), \"out= ops aren't yet supported\"\n        args, kwargs = transform_args(args, kwargs, broadcast, type_promotion_kind, convert_input_to_bool)\n        if unpacked:\n            args = [args]\n        out = decomp_fn(*args, **kwargs)\n        validate_ir(out)\n        return out\n    aten_fn = get_overloads(aten_fn)\n    lowerings.update(dict.fromkeys(aten_fn, wrapped))\n    return wrapped",
    "docstring": "Add a lowering to lowerings dict Arguments: aten_fn: torch.ops.aten.* fn we are lowering decomp_fn: alternate implementation on our IR broadcast: True to apply broadcasting to tensor inputs type_promotion_kind: kind of type promotion applied to tensor inputs, means no type promotion convert_input_to_bool: some logical ops require inputs are converted to bool",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\lowering.py",
    "ast_data": "FunctionDef name:_register_lowering arg:aten_fn arg:decomp_fn arg:broadcast arg:type_promotion_kind arg:convert_input_to_bool arguments arg arg arg arg arg FunctionDef name:wrapped arguments arg arg Call Call Assign If BoolOp Compare Call Call Assign Assign Call If Call BoolOp Compare Call Call Compare Call Assign Call If Assign Assign Call Call Return return:yes Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "input_shape",
    "source_code": "@property\ndef input_shape(self):\n    return nest.map_structure(backend.int_shape, self.input)",
    "docstring": "Retrieves the input shape(s) of a layer. Only applicable if the layer has exactly one input, i.e. if it is connected to one incoming layer, or if all inputs have the same shape. Returns: Input shape, as an integer shape tuple (or list of shape tuples, one tuple per input tensor). Raises: AttributeError: if the layer has no defined input_shape. RuntimeError: if called in Eager mode.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\functional.py",
    "ast_data": "FunctionDef name:input_shape arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "key_averages",
    "source_code": "def key_averages(self, group_by_input_shape: bool=False, group_by_stack_n: int=0, group_by_overload_name: bool=False):\n    assert self.profiler\n    return self.profiler.key_averages(group_by_input_shape, group_by_stack_n, group_by_overload_name)",
    "docstring": "Averages events, grouping them by operator name and (optionally) input shapes, stack and overload name. .. note:: To use shape/stack functionality make sure to set record_shapes/with_stack when creating profiler context manager.",
    "type": "method",
    "file_path": "pytorch\\torch\\profiler\\profiler.py",
    "ast_data": "FunctionDef name:key_averages arg:self arg:group_by_input_shape arg:group_by_stack_n arg:group_by_overload_name arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "polyline",
    "source_code": "def polyline(off, scl):\n    if scl != 0:\n        return np.array([off, scl])\n    else:\n        return np.array([off])",
    "docstring": "Returns an array representing a linear polynomial. Parameters ---------- off, scl : scalars The \"y-intercept\" and \"slope\" of the line, respectively. Returns ------- y : ndarray This module's representation of the linear polynomial ``. See Also -------- numpy.polynomial.chebyshev.chebline numpy.polynomial.legendre.legline numpy.polynomial.laguerre.lagline numpy.polynomial.hermite.hermline numpy.polynomial.hermite_e.hermeline Examples -------- >>> from numpy.polynomial import polynomial as P >>> P.polyline(1, -1) array([ 1, -1]) >>> P.polyval(1, P.polyline(1, -1)) # should be 0 0.0",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\polynomial.py",
    "ast_data": "FunctionDef name:polyline arg:off arg:scl arguments arg arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "share_memory_",
    "source_code": "def share_memory_(self):\n    if has_torch_function_unary(self):\n        return handle_torch_function(Tensor.share_memory_, (self,), self)\n    self._typed_storage()._share_memory_()\n    return self",
    "docstring": "Moves the underlying storage to shared memory. This is a no-op if the underlying storage is already in shared memory and for CUDA tensors. Tensors in shared memory cannot be resized. See :meth: for more details.",
    "type": "method",
    "file_path": "pytorch\\torch\\_tensor.py",
    "ast_data": "FunctionDef name:share_memory_ arg:self arguments arg If Call Return return:yes Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "node_arg_is_bias",
    "source_code": "def node_arg_is_bias(node: Node, arg: Any) -> bool:\n    bias_index = None\n    if 'target_dtype_info' in node.meta:\n        bias_index = node.meta['target_dtype_info'].get('bias_index', None)\n    if bias_index is not None and bias_index < len(node.args) and (node.args[bias_index] is arg):\n        return True\n    return node.kwargs.get('bias') is arg",
    "docstring": "Returns if node arg is bias",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\utils.py",
    "ast_data": "FunctionDef name:node_arg_is_bias arg:node arg:arg arguments arg arg Assign If Compare Assign Call If BoolOp Compare Compare Call Compare Return return:yes Return return:yes Compare Call"
  },
  {
    "library": "pytorch",
    "name": "get_rng_state_all",
    "source_code": "def get_rng_state_all() -> list[Tensor]:\n    results = [get_rng_state(i) for i in range(device_count())]\n    return results",
    "docstring": "Return a list of ByteTensor representing the random number states of all devices.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\random.py",
    "ast_data": "FunctionDef name:get_rng_state_all arguments Assign Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "quantile",
    "source_code": "@final\ndef quantile(self, qs: Index, interpolation: QuantileInterpolation='linear') -> Block:\n    assert self.ndim == 2\n    assert is_list_like(qs)\n    result = quantile_compat(self.values, np.asarray(qs._values), interpolation)\n    result = ensure_block_shape(result, ndim=2)\n    return new_block_2d(result, placement=self._mgr_locs)",
    "docstring": "compute the quantiles of the Parameters ---------- qs : Index The quantiles to be computed in float64. interpolation : str, default 'linear' Type of interpolation. Returns ------- Block",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\blocks.py",
    "ast_data": "FunctionDef name:quantile arg:self arg:qs arg:interpolation arguments arg arg arg Compare Call Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "@abc.abstractmethod\ndef __init__(self, x, y=None, **kwargs):\n    if not self.can_handle(x, y):\n        raise ValueError('{} Cannot handle input {}, {}'.format(self.__class__, x, y))",
    "docstring": "Create a DataAdapter based on data inputs. The caller must make sure to call first before invoking this method. Provide unsupported data type will result into unexpected behavior. Args: x: input features. y: target labels. Note that y could be None in the case of prediction. **kwargs: Other keyword arguments for DataAdapter during the construction of the tf.dataset.Dataset. For example: - Numpy data might have which will be used for weighting the loss function during training. - Numpy data might need to have parameter when constructing the dataset and iterator. - Certain input might need to be distribution strategy aware. When is passed, the created dataset need to respect the strategy. DataAdapter might choose to ignore any keyword argument if it doesn't use it, or raise exception if any required argument is not provide.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\data_adapter.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:x arg:y arguments arg arg arg arg If Call Raise Call Call"
  },
  {
    "library": "scrapy",
    "name": "install_reactor",
    "source_code": "def install_reactor(reactor_path: str, event_loop_path: str | None=None) -> None:\n    reactor_class = load_object(reactor_path)\n    if reactor_class is asyncioreactor.AsyncioSelectorReactor:\n        set_asyncio_event_loop_policy()\n        with suppress(error.ReactorAlreadyInstalledError):\n            event_loop = set_asyncio_event_loop(event_loop_path)\n            asyncioreactor.install(eventloop=event_loop)\n    else:\n        *module, _ = reactor_path.split('.')\n        installer_path = [*module, 'install']\n        installer = load_object('.'.join(installer_path))\n        with suppress(error.ReactorAlreadyInstalledError):\n            installer()",
    "docstring": "Installs the :mod: with the specified import path. Also installs the asyncio event loop with the specified import path if the asyncio reactor is enabled",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\reactor.py",
    "ast_data": "FunctionDef name:install_reactor arg:reactor_path arg:event_loop_path arguments arg arg Assign Call If Compare Call With Call Assign Call Call Assign Call Assign Assign Call Call With Call Call"
  },
  {
    "library": "pytorch",
    "name": "_shard_tensor",
    "source_code": "def _shard_tensor(tensor: torch.Tensor, sharding_spec: ShardingSpec, src_rank=0, process_group=None) -> ShardedTensor:\n    if not tensor.is_contiguous():\n        raise ValueError('input tensor is not a contiguous Tensor')\n    pg = process_group if process_group is not None else distributed_c10d._get_default_group()\n    world_size = dist.get_world_size(pg)\n    current_rank = dist.get_rank(pg)\n    gathered_list = [None] * world_size\n    dist.all_gather_object(gathered_list, (src_rank, sharding_spec), group=pg)\n    for idx, entry in enumerate(gathered_list):\n        if src_rank != entry[0]:\n            raise ValueError(f'src_rank={src_rank} on rank: {current_rank} does not match with src_rank={entry[0]} on rank: {idx}')\n        if sharding_spec != entry[1]:\n            raise ValueError(f'sharding_spec={sharding_spec} on rank: {current_rank} does not match with sharding_spec={entry[1]} on rank: {idx}')\n    st = sharding_spec.shard(tensor, src_rank=src_rank, process_group=pg)\n    return st",
    "docstring": "Given a :class:, it shards that tensor according to the provided `torch.Tensortorch.distributed._shard.sharding_spec.ShardingSpecShardedTensortorch.distributed._shard.sharding_spec.ChunkShardingSpec`.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\api.py",
    "ast_data": "FunctionDef name:_shard_tensor arg:tensor arg:sharding_spec arg:src_rank arg:process_group arguments arg arg arg arg If Call Raise Call Assign Compare Call Assign Call Assign Call Assign Call For Call If Compare Raise Call If Compare Raise Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "__init__",
    "source_code": "def __init__(self, A, structure=None, use_exact_onenorm=False):\n    self.A = A\n    self._A2 = None\n    self._A4 = None\n    self._A6 = None\n    self._A8 = None\n    self._A10 = None\n    self._d4_exact = None\n    self._d6_exact = None\n    self._d8_exact = None\n    self._d10_exact = None\n    self._d4_approx = None\n    self._d6_approx = None\n    self._d8_approx = None\n    self._d10_approx = None\n    self.ident = _ident_like(A)\n    self.structure = structure\n    self.use_exact_onenorm = use_exact_onenorm",
    "docstring": "Initialize the object. Parameters ---------- A : a dense or sparse square numpy matrix or ndarray The matrix to be exponentiated. structure : str, optional A string describing the structure of matrix . Only is currently supported. use_exact_onenorm : bool, optional If True then only the exact one-norm of matrix powers and products will be used. Otherwise, the one-norm of powers and products may initially be estimated.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_matfuncs.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:A arg:structure arg:use_exact_onenorm arguments arg arg arg arg Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Call Assign Assign"
  },
  {
    "library": "django",
    "name": "subwidgets",
    "source_code": "@cached_property\ndef subwidgets(self):\n    id_ = self.field.widget.attrs.get('id') or self.auto_id\n    attrs = {'id': id_} if id_ else {}\n    attrs = self.build_widget_attrs(attrs)\n    return [BoundWidget(self.field.widget, widget, self.form.renderer) for widget in self.field.widget.subwidgets(self.html_name, self.value(), attrs=attrs)]",
    "docstring": "Most widgets yield a single subwidget, but others like RadioSelect and CheckboxSelectMultiple produce one subwidget for each choice. This property is cached so that only one database query occurs when rendering ModelChoiceFields.",
    "type": "method",
    "file_path": "django\\django\\forms\\boundfield.py",
    "ast_data": "FunctionDef name:subwidgets arg:self arguments arg Assign BoolOp Call Assign Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "kwdoc",
    "source_code": "def kwdoc(artist):\n    ai = ArtistInspector(artist)\n    return '\\n'.join(ai.pprint_setters_rest(leadingspace=4)) if mpl.rcParams['docstring.hardcopy'] else 'Properties:\\n' + '\\n'.join(ai.pprint_setters(leadingspace=4))",
    "docstring": "Inspect an class (using ) and return information about its settable properties and their current values. Parameters ---------- artist : or an iterable of \\s Returns ------- str The settable properties of *artist*, as plain text if :rc: is False and as a rst table (intended for use in Sphinx) if it is True.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:kwdoc arg:artist arguments arg Assign Call Return return:yes Call Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "get_terms",
    "source_code": "def get_terms(self, fn2index: dict[str, int]) -> tuple[dict[str, list[int] | int], dict[str, list[int] | int]]:\n    rvs: tuple[dict[str, list[int] | int], dict[str, list[int] | int]] = ({}, {})\n    for rv, mapping in zip(rvs, (self._mapping, self._title_mapping), strict=True):\n        for k, v in mapping.items():\n            if len(v) == 1:\n                fn, = v\n                if fn in fn2index:\n                    rv[k] = fn2index[fn]\n            else:\n                rv[k] = sorted((fn2index[fn] for fn in v if fn in fn2index))\n    return rvs",
    "docstring": "Return a mapping of document and title terms to sorted document IDs. When a term is only found within a single document, then the value for that term will be an integer value. When a term is found within multiple documents, the value will be a list of integers.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\search\\__init__.py",
    "ast_data": "FunctionDef name:get_terms arg:self arg:fn2index arguments arg arg For Call For Call If Compare Call Assign If Compare Assign Assign Call Compare Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_theta_direction",
    "source_code": "def get_theta_direction(self):\n    return self._direction.get_matrix()[0, 0]",
    "docstring": "Get the direction in which theta increases. -1: Theta increases in the clockwise direction 1: Theta increases in the counterclockwise direction",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\polar.py",
    "ast_data": "FunctionDef name:get_theta_direction arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "unit",
    "source_code": "@cache_readonly\ndef unit(self) -> str:\n    return dtype_to_unit(self.dtype)",
    "docstring": "The precision unit of the datetime data. Returns the precision unit for the dtype. It means the smallest time frame that can be stored within this dtype. Returns ------- str Unit string representation (e.g. \"ns\"). See Also -------- TimelikeOps.as_unit : Converts to a specific unit. Examples -------- >>> idx = pd.DatetimeIndex([\"2020-01-02 01:02:03.004005006\"]) >>> idx.unit 'ns' >>> idx.as_unit(\"s\").unit 's'",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py",
    "ast_data": "FunctionDef name:unit arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "train",
    "source_code": "def train(self, mode=True):\n    self.training = mode\n    if not self.freeze_bn:\n        for module in self.children():\n            module.train(mode)\n    return self",
    "docstring": "Batchnorm's training behavior is using the self.training flag. Prevent changing it if BN is frozen. This makes sure that calling on a model with a frozen BN will behave properly.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\nn\\intrinsic\\qat\\modules\\conv_fused.py",
    "ast_data": "FunctionDef name:train arg:self arg:mode arguments arg arg Assign If For Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "ast",
    "source_code": "def ast(self):\n    if self.has_subscript():\n        return gast.Subscript(value=self.parent.ast(), slice=self.qn[-1].ast(), ctx=CallerMustSetThis)\n    if self.has_attr():\n        return gast.Attribute(value=self.parent.ast(), attr=self.qn[-1], ctx=CallerMustSetThis)\n    base = self.qn[0]\n    if isinstance(base, str):\n        return gast.Name(base, ctx=CallerMustSetThis, annotation=None, type_comment=None)\n    elif isinstance(base, Literal):\n        return gast.Constant(base.value, kind=None)\n    else:\n        assert False, 'the constructor should prevent types other than str and Literal'",
    "docstring": "AST representation.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\qual_names.py",
    "ast_data": "FunctionDef name:ast arg:self arguments arg If Call Return return:yes Call Call Call If Call Return return:yes Call Call Assign If Call Return return:yes Call If Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "remove_state",
    "source_code": "def remove_state(self, state):\n    self._validate_state(state)\n    self._state.remove(state)",
    "docstring": "Remove a state to define the widget's behavior. See the parameters for details. Parameters ---------- state : str Must be a supported state of the selector. See the parameters for details. Raises ------ ValueError When the state is not supported by the selector.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:remove_state arg:self arg:state arguments arg arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "make_simplify_loops_cache",
    "source_code": "def make_simplify_loops_cache(self):\n    cache: dict[tuple[Any, ...], Any] = {}\n    replacement_count = len(self.replacements)\n\n    def simplify_loops(index_vars, sizes, index_formulas):\n        nonlocal replacement_count\n        if replacement_count != len(self.replacements):\n            cache.clear()\n            replacement_count = len(self.replacements)\n        key = (*index_vars, *sizes, *index_formulas)\n        result = cache.get(key, None)\n        if result is None:\n            result = self._simplify_loops_impl(index_vars, sizes, index_formulas)\n            cache[key] = result\n        return result\n    return simplify_loops",
    "docstring": "self._simplify_with_ranges() can be expensive, cache its results",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\sizevars.py",
    "ast_data": "FunctionDef name:make_simplify_loops_cache arg:self arguments arg Assign Call FunctionDef name:simplify_loops arg:index_vars arg:sizes arg:index_formulas arguments arg arg arg If Compare Call Call Assign Call Assign Assign Call If Compare Assign Call Assign Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "target",
    "source_code": "@property\ndef target(self):\n    return c_api.TF_ServerTarget(self._server)",
    "docstring": "Returns the target for a to connect to this server. To create a that connects to this server, use the following snippet: Returns: A string containing a session target for this server.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\server_lib.py",
    "ast_data": "FunctionDef name:target arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "CategoricalDtypeType",
    "source_code": "class CategoricalDtypeType(type):\n    pass",
    "docstring": "the type of CategoricalDtype, this metaclass determines subclass ability",
    "type": "class",
    "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py",
    "ast_data": "ClassDef name:CategoricalDtypeType"
  },
  {
    "library": "django",
    "name": "_mask_cipher_secret",
    "source_code": "def _mask_cipher_secret(secret):\n    mask = _get_new_csrf_string()\n    chars = CSRF_ALLOWED_CHARS\n    pairs = zip((chars.index(x) for x in secret), (chars.index(x) for x in mask))\n    cipher = ''.join((chars[(x + y) % len(chars)] for x, y in pairs))\n    return mask + cipher",
    "docstring": "Given a secret (assumed to be a string of CSRF_ALLOWED_CHARS), generate a token by adding a mask and applying it to the secret.",
    "type": "function",
    "file_path": "django\\django\\middleware\\csrf.py",
    "ast_data": "FunctionDef name:_mask_cipher_secret arg:secret arguments arg Assign Call Assign Assign Call Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "TightLayoutEngine",
    "source_code": "class TightLayoutEngine(LayoutEngine):\n    _adjust_compatible = True\n    _colorbar_gridspec = True\n\n    def __init__(self, *, pad=1.08, h_pad=None, w_pad=None, rect=(0, 0, 1, 1), **kwargs):\n        super().__init__(**kwargs)\n        for td in ['pad', 'h_pad', 'w_pad', 'rect']:\n            self._params[td] = None\n        self.set(pad=pad, h_pad=h_pad, w_pad=w_pad, rect=rect)\n\n    def execute(self, fig):\n        info = self._params\n        renderer = fig._get_renderer()\n        with getattr(renderer, '_draw_disabled', nullcontext)():\n            kwargs = get_tight_layout_figure(fig, fig.axes, get_subplotspec_list(fig.axes), renderer, pad=info['pad'], h_pad=info['h_pad'], w_pad=info['w_pad'], rect=info['rect'])\n        if kwargs:\n            fig.subplots_adjust(**kwargs)\n\n    def set(self, *, pad=None, w_pad=None, h_pad=None, rect=None):\n        for td in self.set.__kwdefaults__:\n            if locals()[td] is not None:\n                self._params[td] = locals()[td]",
    "docstring": "Implements the `tight_layout_guide` for details.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\layout_engine.py",
    "ast_data": "ClassDef name:TightLayoutEngine Assign Assign FunctionDef name:__init__ arg:self arguments arg arg arg arg arg arg Call Call For Assign Call FunctionDef name:execute arg:self arg:fig arguments arg arg Assign Assign Call With Call Call Assign Call Call If Call FunctionDef name:set arg:self arguments arg arg arg arg arg For If Compare Call Assign Call"
  },
  {
    "library": "django",
    "name": "driver_count",
    "source_code": "@classmethod\ndef driver_count(cls):\n    return capi.get_driver_count()",
    "docstring": "Return the number of GDAL/OGR data source drivers registered.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\driver.py",
    "ast_data": "FunctionDef name:driver_count arg:cls arguments arg Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "compress",
    "source_code": "def compress(body, compress_level):\n    import zlib\n    yield b'\\x1f\\x8b'\n    yield b'\\x08'\n    yield b'\\x00'\n    yield struct.pack('<L', int(time.time()) & int('FFFFFFFF', 16))\n    if compress_level == _COMPRESSION_LEVEL_BEST:\n        yield b'\\x02'\n    elif compress_level == _COMPRESSION_LEVEL_FAST:\n        yield b'\\x04'\n    else:\n        yield b'\\x00'\n    yield b'\\xff'\n    crc = zlib.crc32(b'')\n    size = 0\n    zobj = zlib.compressobj(compress_level, zlib.DEFLATED, -zlib.MAX_WBITS, zlib.DEF_MEM_LEVEL, 0)\n    for line in body:\n        size += len(line)\n        crc = zlib.crc32(line, crc)\n        yield zobj.compress(line)\n    yield zobj.flush()\n    yield struct.pack('<L', crc & int('FFFFFFFF', 16))\n    yield struct.pack('<L', size & int('FFFFFFFF', 16))",
    "docstring": "Compress 'body' at the given compress_level.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\encoding.py",
    "ast_data": "FunctionDef name:compress arg:body arg:compress_level arguments arg arg Call Call Call Call If Compare If Compare Assign Call Assign Assign Call For Call Assign Call Call Call Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "MatReadWarning",
    "source_code": "class MatReadWarning(UserWarning):\n    pass",
    "docstring": "Warning class for read issues.",
    "type": "class",
    "file_path": "scipy\\scipy\\io\\matlab\\_miobase.py",
    "ast_data": "ClassDef name:MatReadWarning"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, name, description, *labels):\n    super(StringGauge, self).__init__('StringGauge', _string_gauge_methods, len(labels), name, description, *labels)",
    "docstring": "Creates a new StringGauge. Args: name: name of the new metric. description: description of the new metric. *labels: The label list of the new metric.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:name arg:description arguments arg arg arg arg Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "validate_sparse_weights",
    "source_code": "def validate_sparse_weights(values, weights, dtype=None):\n    if weights is None:\n        if dtype:\n            return array_ops.constant([], dtype=dtype)\n        return array_ops.constant([], dtype=values.values.dtype)\n    if not isinstance(weights, sparse_tensor.SparseTensor):\n        raise ValueError(f'Argument `weights` must be a SparseTensor if `values` is a SparseTensor. Received weights={weights} of type: {type(weights).__name__}')\n    checks = []\n    if weights.dense_shape is not values.dense_shape:\n        checks.append(check_ops.assert_equal(weights.dense_shape, values.dense_shape, message=\"'weights' and 'values' must have the same dense shape.\"))\n    if weights.indices is not values.indices:\n        checks.append(check_ops.assert_equal(weights.indices, values.indices, message=\"'weights' and 'values' must have the same indices.\"))\n    if checks:\n        with ops.control_dependencies(checks):\n            weights = array_ops.identity(weights.values)\n    else:\n        weights = weights.values\n    return weights",
    "docstring": "Validates the passed weight tensor or creates an empty one.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_ops.py",
    "ast_data": "FunctionDef name:validate_sparse_weights arg:values arg:weights arg:dtype arguments arg arg arg If Compare If Return return:yes Call Return return:yes Call If Call Raise Call Call Assign If Compare Call Call If Compare Call Call If With Call Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "initialize_or_restore",
    "source_code": "def initialize_or_restore(self, session=None):\n    if context.executing_eagerly():\n        return\n    if session is None:\n        session = get_session()\n    trackable_objects = util.list_objects(self._object_graph_view)\n    initializers = [c.initializer for c in trackable_objects if hasattr(c, 'initializer') and c.initializer is not None and (getattr(c, '_update_uid', self._restore_uid - 1) < self._restore_uid)]\n    session.run(initializers)",
    "docstring": "Runs initialization ops for variables. Objects which would be saved by will be initialized, unless those variables are being restored by a later call to . This method does nothing when executing eagerly (initializers get run eagerly). Args: session: The session to run initialization ops in. If , uses the default session.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:initialize_or_restore arg:self arg:session arguments arg arg If Call Return return:no If Compare Assign Call Assign Call Assign BoolOp Call Compare Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "try_add_event_data",
    "source_code": "def try_add_event_data(self, event_name: str, **kwargs) -> None:\n    if event_name not in self.get_stack():\n        return\n    self.add_event_data(event_name, **kwargs)",
    "docstring": "Same as add_event_data, but will silently not log if the event isn't in the stack.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\utils.py",
    "ast_data": "FunctionDef name:try_add_event_data arg:self arg:event_name arguments arg arg arg If Compare Call Return return:no Call"
  },
  {
    "library": "pandas",
    "name": "nansum",
    "source_code": "@disallow('M8')\n@_datetimelike_compat\n@maybe_operate_rowwise\ndef nansum(values: np.ndarray, *, axis: AxisInt | None=None, skipna: bool=True, min_count: int=0, mask: npt.NDArray[np.bool_] | None=None) -> float:\n    dtype = values.dtype\n    values, mask = _get_values(values, skipna, fill_value=0, mask=mask)\n    dtype_sum = _get_dtype_max(dtype)\n    if dtype.kind == 'f':\n        dtype_sum = dtype\n    elif dtype.kind == 'm':\n        dtype_sum = np.dtype(np.float64)\n    the_sum = values.sum(axis, dtype=dtype_sum)\n    the_sum = _maybe_null_out(the_sum, axis, mask, values.shape, min_count=min_count)\n    return the_sum",
    "docstring": "Sum the elements along an axis ignoring NaNs Parameters ---------- values : ndarray[dtype] axis : int, optional skipna : bool, default True min_count: int, default 0 mask : ndarray[bool], optional nan-mask if known Returns ------- result : dtype Examples -------- >>> from pandas.core import nanops >>> s = pd.Series([1, 2, np.nan]) >>> nanops.nansum(s.values) np.float64(3.0)",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\nanops.py",
    "ast_data": "FunctionDef name:nansum arg:values arguments arg arg arg arg arg Assign Assign Call Assign Call If Compare Assign If Compare Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_descriptor",
    "source_code": "@abc.abstractmethod\ndef get_descriptor(self) -> str:\n    pass",
    "docstring": "Return descriptor name to be included in metadata. The form should be \"extension_name[@local-domain][/version]\".",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\_extension.py",
    "ast_data": "FunctionDef name:get_descriptor arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "run",
    "source_code": "def run(self, *args, **kwargs) -> torch.fx.GraphModule:\n    return self._run(*args, **kwargs)",
    "docstring": "Run the transform on . Note that this method may or may not mutate , and the returned could be either or a new . Args: *args: Positional arguments for to run. **kwargs: Keyword arguments for to run.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\_pass.py",
    "ast_data": "FunctionDef name:run arg:self arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_SparseSegmentReduceGradV2",
    "source_code": "def _SparseSegmentReduceGradV2(op, grad, norm=None):\n    assert norm is None or norm == 'mean' or norm == 'sqrtn'\n    indices = op.inputs[1]\n    segment_ids = op.inputs[2]\n    data_shape = array_ops.shape(op.inputs[0])\n    dense_output_dim0 = data_shape[0]\n    if norm == 'mean':\n        grad_fn = math_ops.sparse_segment_mean_grad_v2\n    elif norm == 'sqrtn':\n        grad_fn = math_ops.sparse_segment_sqrt_n_grad_v2\n    else:\n        grad_fn = math_ops.sparse_segment_sum_grad_v2\n    grad_values, sorted_unique_indices = grad_fn(grad, indices, segment_ids, dense_output_dim0)\n    return indexed_slices_lib.IndexedSlices(grad_values, sorted_unique_indices, data_shape)",
    "docstring": "Sparse gradient for SparseSegment(Sum|Mean|SqrtN)[WithNumSegments].",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_SparseSegmentReduceGradV2 arg:op arg:grad arg:norm arguments arg arg arg BoolOp Compare Compare Compare Assign Assign Assign Call Assign If Compare Assign If Compare Assign Assign Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "has_default",
    "source_code": "def has_default(self):\n    return self.default is not NOT_PROVIDED",
    "docstring": "Return a boolean of whether this field has a default value.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\__init__.py",
    "ast_data": "FunctionDef name:has_default arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "unique_with_counts",
    "source_code": "@tf_export('unique_with_counts')\n@dispatch.add_dispatch_support\ndef unique_with_counts(x, out_idx=dtypes.int32, name=None):\n    return gen_array_ops.unique_with_counts(x, out_idx, name)",
    "docstring": "Finds unique elements in a 1-D tensor. See also . This operation returns a tensor containing all the unique elements of sorted in the same order that they occur in . This operation also returns a tensor the same size as that contains the index of each value of in the unique output . Finally, it returns a third tensor that contains the count of each element of in . In other words: y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1] Example usage: >>> x = tf.constant([1, 1, 2, 4, 4, 4, 7, 8, 8]) >>> y, idx, count = unique_with_counts(x) >>> y >>> idx >>> count Args: x: A Tensor. 1-D. out_idx: An optional tf.DType from: tf.int32, tf.int64. Defaults to tf.int32. name: A name for the operation (optional). Returns: A tuple of Tensor objects (y, idx, count). y: A Tensor. Has the same type as x. idx: A Tensor of type out_idx. count: A Tensor of type out_idx.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:unique_with_counts arg:x arg:out_idx arg:name arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_experimental_capture_side_input_by_ref",
    "source_code": "def _experimental_capture_side_input_by_ref(self, identifier: Hashable, func: Callable[[], Any]) -> ...:\n    if context.executing_eagerly():\n        return func()\n\n    def maybe_convert_to_tensor():\n        value = func()\n        if not (isinstance(value, core.Value) or isinstance(value, core.Symbol)):\n            value = constant_op.constant(value)\n        return value\n    placeholder = self._function_captures._capture_by_ref(self, maybe_convert_to_tensor, identifier)\n    return placeholder",
    "docstring": "Implement capturing side input by reference for tf.function. Note that this API will only register the capture in the func_graph where it is called. In the case of nested graph, like nested tf.function or tf.while, the outer graph is not aware of this capture in the inner graph. Thus, the outer tf.function will not retrace when the by-ref capture changes. It's the user's responsibility to call this API in the outer func_graph as well if proper retracing is needed. For example: Args: identifier: A hashable object as the key for the capture. func: A Python function that takes no arguments and returns the value of side input. The function is evaluated at function call time. Returns: A nested structure with the same structure as the side input. Tensors are replaced with placehoders, and non-tensors remain the same.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\func_graph.py",
    "ast_data": "FunctionDef name:_experimental_capture_side_input_by_ref arg:self arg:identifier arg:func arguments arg arg arg If Call Return return:yes Call FunctionDef name:maybe_convert_to_tensor arguments Assign Call If BoolOp Call Call Assign Call Return return:yes Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_BesselK1Grad",
    "source_code": "@ops.RegisterGradient('BesselK1')\ndef _BesselK1Grad(op: ops.Operation, grad):\n    x = op.inputs[0]\n    y = op.outputs[0]\n    with ops.control_dependencies([grad]):\n        partial_x = -special_math_ops.bessel_k0(x) - math_ops.div(y, x)\n        return grad * partial_x",
    "docstring": "Compute gradient of bessel_k1(x) with respect to its argument.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_BesselK1Grad arg:op arg:grad arguments arg arg Assign Assign With Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_init_func",
    "source_code": "@def_function.function()\ndef _init_func():\n    ds_variant = gen_dataset_ops.unwrap_dataset_variant(wrap_ds_variant)\n    resource = gen_dataset_ops.anonymous_iterator(**self._input_dataset._flat_structure)\n    with ops.control_dependencies([gen_dataset_ops.make_iterator(ds_variant, resource)]):\n        return gen_dataset_ops.iterator_to_string_handle(resource)",
    "docstring": "Creates an iterator for the input dataset. Returns: A tensor that encapsulates the iterator created.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\prefetching_ops.py",
    "ast_data": "FunctionDef name:_init_func arguments Assign Call Assign Call With Call Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_metadata_routing",
    "source_code": "def get_metadata_routing(self):\n    router = MetadataRouter(owner=self.__class__.__name__)\n    method_mapping = MethodMapping()\n    method_mapping.add(caller='fit', callee='fit').add(caller='decision_function', callee='decision_function')\n    if hasattr(self._get_estimator(), 'predict_proba'):\n        method_mapping.add(caller='predict', callee='predict_proba').add(caller='predict_proba', callee='predict_proba')\n    else:\n        method_mapping.add(caller='predict', callee='predict').add(caller='predict_proba', callee='predict')\n    if hasattr(self._get_estimator(), 'predict_log_proba'):\n        method_mapping.add(caller='predict_log_proba', callee='predict_log_proba')\n    elif hasattr(self._get_estimator(), 'predict_proba'):\n        method_mapping.add(caller='predict_log_proba', callee='predict_proba')\n    else:\n        method_mapping.add(caller='predict_log_proba', callee='predict')\n    router.add(estimator=self._get_estimator(), method_mapping=method_mapping)\n    return router",
    "docstring": "Get metadata routing of this object. Please check :ref: on how the routing mechanism works. .. versionadded:: 1.5 Returns ------- routing : MetadataRouter A :class: encapsulating routing information.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_bagging.py",
    "ast_data": "FunctionDef name:get_metadata_routing arg:self arguments arg Assign Call Assign Call Call Call If Call Call Call Call Call Call If Call Call Call If Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "xavier_normal_",
    "source_code": "def xavier_normal_(tensor: Tensor, gain: float=1.0, generator: _Optional[torch.Generator]=None) -> Tensor:\n    fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)\n    std = gain * math.sqrt(2.0 / float(fan_in + fan_out))\n    return _no_grad_normal_(tensor, 0.0, std, generator)",
    "docstring": "Fill the input with values using a Xavier normal distribution. The method is described in - Glorot, X. & Bengio, Y. (2010). The resulting tensor will have values sampled from :math: where .. math:: \\text{std} = \\text{gain} \\times \\sqrt{\\frac{2}{\\text{fan\\_in} + \\text{fan\\_out}}} Also known as Glorot initialization. Args: tensor: an n-dimensional gain: an optional scaling factor generator: the torch Generator to sample from (default: None) Examples: >>> w = torch.empty(3, 5) >>> nn.init.xavier_normal_(w) Note: Be aware that ``.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\init.py",
    "ast_data": "FunctionDef name:xavier_normal_ arg:tensor arg:gain arg:generator arguments arg arg arg Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "OracleSpatialRefSys",
    "source_code": "class OracleSpatialRefSys(models.Model, SpatialRefSysMixin):\n    cs_name = models.CharField(max_length=68)\n    srid = models.IntegerField(primary_key=True)\n    auth_srid = models.IntegerField()\n    auth_name = models.CharField(max_length=256)\n    wktext = models.CharField(max_length=2046)\n    cs_bounds = models.PolygonField(null=True)\n\n    class Meta:\n        app_label = 'gis'\n        db_table = 'CS_SRS'\n        managed = False\n\n    @property\n    def wkt(self):\n        return self.wktext",
    "docstring": "Maps to the Oracle MDSYS.CS_SRS table.",
    "type": "class",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\oracle\\models.py",
    "ast_data": "ClassDef name:OracleSpatialRefSys Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call ClassDef name:Meta Assign Assign Assign FunctionDef name:wkt arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "update_regroup",
    "source_code": "def update_regroup(extended, updates, group):\n    if not group:\n        regrouped = regroup(updates, values_lib.Mirrored)\n        return nest.map_structure(extended._local_results, regrouped)\n\n    def _make_grouped_mirrored(values):\n        if len(values) == 1:\n            return values_lib.Mirrored(values)\n        g = control_flow_ops.group(values)\n        if not all((tensor_util.is_tf_type(v) for v in values)):\n            return g\n        with_dep = []\n        for v in values:\n            with ops.device(v.device), ops.control_dependencies([g]):\n                with_dep.append(array_ops.identity(v))\n        return values_lib.Mirrored(with_dep)\n    return regroup(updates, _make_grouped_mirrored)",
    "docstring": "Regroup for an update, with dependencies to ensure all updates execute.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_utils.py",
    "ast_data": "FunctionDef name:update_regroup arg:extended arg:updates arg:group arguments arg arg arg If Assign Call Return return:yes Call FunctionDef name:_make_grouped_mirrored arg:values arguments arg If Compare Call Return return:yes Call Assign Call If Call Call Return return:yes Assign For With Call Call Call Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "OddSquare",
    "source_code": "class OddSquare(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-5.0 * pi] * self.N, [5.0 * pi] * self.N))\n        self.custom_bounds = ([-2.0, 4.0], [-2.0, 4.0])\n        self.a = asarray([1, 1.3, 0.8, -0.4, -1.3, 1.6, -0.2, -0.6, 0.5, 1.4] * 2)\n        self.global_optimum = [[1.0873320463871847, 1.387332045681808]]\n        self.fglob = -1.00846728102\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        b = self.a[0:self.N]\n        d = self.N * max((x - b) ** 2.0)\n        h = sum((x - b) ** 2.0)\n        return -exp(-d / (2.0 * pi)) * cos(pi * d) * (1.0 + 0.02 * h / (d + 0.01))",
    "docstring": "Odd Square objective function. This class defines the Odd Square [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{OddSquare}}(x) = -e^{-\\frac{d}{2\\pi}} \\cos(\\pi d) \\left( 1 + \\frac{0.02h}{d + 0.01} \\right ) Where, in this exercise: .. math:: \\begin{cases} d = n \\cdot \\smash{\\displaystyle\\max_{1 \\leq i \\leq n}} \\left[ (x_i - b_i)^2 \\right ] \\\\ h = \\sum_{i=1}^{n} (x_i - b_i)^2 \\end{cases} And :math: Here, :math: represents the number of dimensions and :math: for :math: and :math:. *Global optimum*: :math: for :math: .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015 TODO The best solution changes on dimensionality",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_O.py",
    "ast_data": "ClassDef name:OddSquare FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "conjugate",
    "source_code": "def conjugate(self) -> 'SymFloat':\n    return self",
    "docstring": "Returns the complex conjugate of the float.",
    "type": "method",
    "file_path": "pytorch\\torch\\__init__.py",
    "ast_data": "FunctionDef name:conjugate arg:self arguments arg Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "note_module",
    "source_code": "def note_module(self, name: str, node_id: str, synopsis: str, platform: str, deprecated: bool) -> None:\n    self.modules[name] = ModuleEntry(docname=self.env.docname, node_id=node_id, synopsis=synopsis, platform=platform, deprecated=deprecated)",
    "docstring": "Note a python module for cross reference. .. versionadded:: 2.1",
    "type": "method",
    "file_path": "sphinx\\sphinx\\domains\\python\\__init__.py",
    "ast_data": "FunctionDef name:note_module arg:self arg:name arg:node_id arg:synopsis arg:platform arg:deprecated arguments arg arg arg arg arg arg Assign Call"
  },
  {
    "library": "pytorch",
    "name": "LazyInstanceNorm3d",
    "source_code": "class LazyInstanceNorm3d(_LazyNormBase, _InstanceNorm):\n    cls_to_become = InstanceNorm3d\n\n    def _get_no_batch_dim(self):\n        return 4\n\n    def _check_input_dim(self, input):\n        if input.dim() not in (4, 5):\n            raise ValueError(f'expected 4D or 5D input (got {input.dim()}D input)')",
    "docstring": "A :class: module with lazy initialization of the `InstanceNorm3dweightbiasrunning_meanrunning_vartorch.nn.modules.lazy.LazyModuleMixinC(N, C, D, H, W)(C, D, H, W)(N, C, D, H, W)(C, D, H, W)(N, C, D, H, W)(C, D, H, W)` (same shape as input)",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\instancenorm.py",
    "ast_data": "ClassDef name:LazyInstanceNorm3d Assign FunctionDef name:_get_no_batch_dim arg:self arguments arg Return return:yes FunctionDef name:_check_input_dim arg:self arg:input arguments arg arg If Compare Call Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "model_input_signature",
    "source_code": "def model_input_signature(model, keep_original_batch_size=False):\n    input_specs = model._get_save_spec(dynamic_batch=not keep_original_batch_size)\n    if input_specs is None:\n        return None\n    input_specs = _enforce_names_consistency(input_specs)\n    if isinstance(input_specs, collections.abc.Sequence) and len(input_specs) == 1:\n        return input_specs\n    else:\n        return [input_specs]",
    "docstring": "Inspect model to get its input signature. The model's input signature is a list with a single (possibly-nested) object. This is due to the Keras-enforced restriction that tensor inputs must be passed in as the first argument. For example, a model with input {'feature1': , 'feature2': } will have input signature: [{'feature1': TensorSpec, 'feature2': TensorSpec}] Args: model: Keras Model object. keep_original_batch_size: A boolean indicating whether we want to keep using the original batch size or set it to None. Default is , which means that the batch dim of the returned input signature will always be set to . Returns: A list containing either a single TensorSpec or an object with nested TensorSpecs. This list does not contain the argument.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saving_utils.py",
    "ast_data": "FunctionDef name:model_input_signature arg:model arg:keep_original_batch_size arguments arg arg Assign Call If Compare Return return:no Assign Call If BoolOp Call Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "PackagingErrorReason",
    "source_code": "class PackagingErrorReason(Enum):\n\n    def __repr__(self):\n        return f'<{self.__class__.__name__}.{self.name}>'\n    IS_EXTENSION_MODULE = 'Module is a C extension module. torch.package supports Python modules only.'\n    NO_DUNDER_FILE = 'Module had no __file__ defined.'\n    SOURCE_FILE_NOT_FOUND = 'Module had a __file__, but we could not find it in your filesystem.'\n    DEPENDENCY_RESOLUTION_FAILED = 'Dependency resolution failed.'\n    NO_ACTION = 'Module did not match against any action pattern. Extern, mock, or intern it.'\n    DENIED = 'Module was denied by a pattern.'\n    MOCKED_BUT_STILL_USED = 'Module was mocked out, but is still being used in the package. Please intern or extern the mocked modules if objects are supposed to be in the package.'",
    "docstring": "Listing of different reasons a dependency may fail to package. This enum is used to provide good error messages when :class: is raised.",
    "type": "class",
    "file_path": "pytorch\\torch\\package\\package_exporter.py",
    "ast_data": "ClassDef name:PackagingErrorReason FunctionDef name:__repr__ arg:self arguments arg Return return:yes Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "numpy",
    "name": "feature_implies",
    "source_code": "def feature_implies(self, names, keep_origins=False):\n\n    def get_implies(name, _caller=set()):\n        implies = set()\n        d = self.feature_supported[name]\n        for i in d.get('implies', []):\n            implies.add(i)\n            if i in _caller:\n                continue\n            _caller.add(name)\n            implies = implies.union(get_implies(i, _caller))\n        return implies\n    if isinstance(names, str):\n        implies = get_implies(names)\n        names = [names]\n    else:\n        assert hasattr(names, '__iter__')\n        implies = set()\n        for n in names:\n            implies = implies.union(get_implies(n))\n    if not keep_origins:\n        implies.difference_update(names)\n    return implies",
    "docstring": "Return a set of CPU features that implied by 'names' Parameters ---------- names : str or sequence of str CPU feature name(s) in uppercase. keep_origins : bool if False(default) then the returned set will not contain any features from 'names'. This case happens only when two features imply each other. Examples -------- >>> self.feature_implies(\"SSE3\") {'SSE', 'SSE2'} >>> self.feature_implies(\"SSE2\") {'SSE'} >>> self.feature_implies(\"SSE2\", keep_origins=True) # 'SSE2' found here since 'SSE' and 'SSE2' imply each other {'SSE', 'SSE2'}",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py",
    "ast_data": "FunctionDef name:feature_implies arg:self arg:names arg:keep_origins arguments arg arg arg FunctionDef name:get_implies arg:name arg:_caller arguments arg arg Call Assign Call Assign For Call Call If Compare Call Assign Call Call Return return:yes If Call Assign Call Assign Call Assign Call For Assign Call Call If Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "from_config",
    "source_code": "@classmethod\ndef from_config(cls, config, custom_objects=None, columns_by_name=None):\n    if 'use_safe_embedding_lookup' not in config:\n        config['use_safe_embedding_lookup'] = True\n    _check_config_keys(config, cls._fields)\n    kwargs = _standardize_and_copy_config(config)\n    kwargs['categorical_column'] = serialization.deserialize_feature_column(config['categorical_column'], custom_objects, columns_by_name)\n    all_initializers = dict(tf_inspect.getmembers(init_ops, tf_inspect.isclass))\n    kwargs['initializer'] = serialization._deserialize_keras_object(config['initializer'], module_objects=all_initializers, custom_objects=custom_objects)\n    return cls(**kwargs)",
    "docstring": "See 'FeatureColumn` base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:from_config arg:cls arg:config arg:custom_objects arg:columns_by_name arguments arg arg arg arg If Compare Assign Call Assign Call Assign Call Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_set_optimization_parameters",
    "source_code": "def _set_optimization_parameters(self, parameters: optimization_parameters_pb2.OptimizationParameters):\n    if self.use_gradient_accumulation:\n        parameters.gradient_accumulation_status = optimization_parameters_pb2.GradientAccumulationStatus.ENABLED\n    else:\n        parameters.gradient_accumulation_status = optimization_parameters_pb2.GradientAccumulationStatus.DISABLED\n    if self.clip_weight_min is not None:\n        parameters.clipping_limits.lower.value = self.clip_weight_min\n    if self.clip_weight_max is not None:\n        parameters.clipping_limits.upper.value = self.clip_weight_max\n    if self.clip_gradient_min is not None:\n        parameters.gradient_clipping_limits.lower.value = self.clip_gradient_min\n    if self.clip_gradient_max is not None:\n        parameters.gradient_clipping_limits.upper.value = self.clip_gradient_max\n    if self.weight_decay_factor:\n        parameters.weight_decay_factor = self.weight_decay_factor\n        if self.multiply_weight_decay_factor_by_learning_rate:\n            parameters.multiply_weight_decay_factor_by_learning_rate = True\n    parameters.low_dimensional_packing_status = self.low_dimensional_packing_status",
    "docstring": "Sets the optimizer fields in the OptimizationParameters.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2_utils.py",
    "ast_data": "FunctionDef name:_set_optimization_parameters arg:self arg:parameters arguments arg arg If Assign Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign If Assign If Assign Assign"
  },
  {
    "library": "sphinx",
    "name": "PyStaticMethod",
    "source_code": "class PyStaticMethod(PyMethod):\n    option_spec: ClassVar[OptionSpec] = PyObject.option_spec.copy()\n\n    def run(self) -> list[Node]:\n        self.name = 'py:method'\n        self.options['staticmethod'] = True\n        return super().run()",
    "docstring": "Description of a staticmethod.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\domains\\python\\__init__.py",
    "ast_data": "ClassDef name:PyStaticMethod Call FunctionDef name:run arg:self arguments arg Assign Assign Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "check_in_list",
    "source_code": "def check_in_list(values, /, *, _print_supported_values=True, **kwargs):\n    if not kwargs:\n        raise TypeError('No argument to check!')\n    for key, val in kwargs.items():\n        if val not in values:\n            msg = f'{val!r} is not a valid value for {key}'\n            if _print_supported_values:\n                msg += f'; supported values are {', '.join(map(repr, values))}'\n            raise ValueError(msg)",
    "docstring": "For each *key, value* pair in *kwargs*, check that *value* is in *values*; if not, raise an appropriate ValueError. Parameters ---------- values : iterable Sequence of values to check on. _print_supported_values : bool, default: True Whether to print *values* when raising ValueError. **kwargs : dict *key, value* pairs as keyword arguments to find in *values*. Raises ------ ValueError If any *value* in *kwargs* is not found in *values*. Examples -------- >>> _api.check_in_list([\"foo\", \"bar\"], arg=arg, other_arg=other_arg)",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\_api\\__init__.py",
    "ast_data": "FunctionDef name:check_in_list arguments arg arg arg If Raise Call For Call If Compare Assign If Call Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_merge_row_partitions",
    "source_code": "def _merge_row_partitions(row_partitions, value, rank, dtype, validate):\n    if isinstance(value, tensor.Tensor):\n        value_row_partitions = _row_partitions_for_tensor(value, rank, dtype)\n    elif isinstance(value, ragged_tensor.RaggedTensor):\n        value_row_partitions = _row_partitions_for_ragged_tensor(value, rank, dtype)\n    else:\n        assert isinstance(value, StructuredTensor), type(value)\n        value_row_partitions = value.row_partitions[:rank - 1]\n    assert len(value_row_partitions) == rank - 1\n    if row_partitions is None:\n        return tuple(value_row_partitions)\n    else:\n        return tuple([p1._merge_precomputed_encodings(p2, validate) for p1, p2 in zip(row_partitions, value_row_partitions)])",
    "docstring": "Merges with .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor.py",
    "ast_data": "FunctionDef name:_merge_row_partitions arg:row_partitions arg:value arg:rank arg:dtype arg:validate arguments arg arg arg arg arg If Call Assign Call If Call Assign Call Call Call Assign Compare Call If Compare Return return:yes Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "bessel_k1",
    "source_code": "@tf_export('math.special.bessel_k1')\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef bessel_k1(x, name=None):\n    with ops.name_scope(name, 'bessel_k1', [x]):\n        return gen_special_math_ops.bessel_k1(x)",
    "docstring": "Computes the Bessel k1 function of element-wise. Modified Bessel function of order 1. It is preferable to use the numerically stabler function instead. >>> tf.math.special.bessel_k1([0.5, 1., 2., 4.]).numpy() array([1.65644112, 0.60190723, 0.13986588, 0.0124835 ], dtype=float32) Args: x: A or . Must be one of the following types: , , . name: A name for the operation (optional). Returns: A or , respectively. Has the same type as . @compatibility(scipy) Equivalent to scipy.special.k1 @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\special_math_ops.py",
    "ast_data": "FunctionDef name:bessel_k1 arg:x arg:name arguments arg arg With Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_set_mutable",
    "source_code": "def _set_mutable(self, mutable):\n    object.__setattr__(self, '_mutable', mutable)",
    "docstring": "Change the mutability property to .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\util\\options.py",
    "ast_data": "FunctionDef name:_set_mutable arg:self arg:mutable arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "get_signature_runner",
    "source_code": "def get_signature_runner(self, signature_key=None):\n    if signature_key is None:\n        if len(self._signature_defs) != 1:\n            raise ValueError('SignatureDef signature_key is None and model has {0} Signatures. None is only allowed when the model has 1 SignatureDef'.format(len(self._signature_defs)))\n        else:\n            signature_key = next(iter(self._signature_defs))\n    return SignatureRunner(interpreter=self, signature_key=signature_key)",
    "docstring": "Gets callable for inference of specific SignatureDef. Example usage, None can be passed for signature_key if the model has a single Signature only. All names used are these specific SignatureDef names. Args: signature_key: Signature key for the SignatureDef, it can be None if and only if the model has a single SignatureDef. The Default value is None. Returns: This returns a callable that can run inference for SignatureDef defined by argument 'signature_key'. The callable will take key arguments corresponding to the arguments of the SignatureDef, that should have numpy values. The callable will return dictionary that maps from output names to numpy values of the computed results. Raises: ValueError: If passed signature_key is invalid.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\interpreter.py",
    "ast_data": "FunctionDef name:get_signature_runner arg:self arg:signature_key arguments arg arg If Compare If Compare Call Raise Call Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "lagsub",
    "source_code": "def lagsub(c1, c2):\n    return pu._sub(c1, c2)",
    "docstring": "Subtract one Laguerre series from another. Returns the difference of two Laguerre series - . The sequences of coefficients are from lowest order term to highest, i.e., [1,2,3] represents the series ``. Parameters ---------- c1, c2 : array_like 1-D arrays of Laguerre series coefficients ordered from low to high. Returns ------- out : ndarray Of Laguerre series coefficients representing their difference. See Also -------- lagadd, lagmulx, lagmul, lagdiv, lagpow Notes ----- Unlike multiplication, division, etc., the difference of two Laguerre series is a Laguerre series (without having to \"reproject\" the result onto the basis set) so subtraction, just like that of \"standard\" polynomials, is simply \"component-wise.\" Examples -------- >>> from numpy.polynomial.laguerre import lagsub >>> lagsub([1, 2, 3, 4], [1, 2, 3]) array([0., 0., 0., 4.])",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\laguerre.py",
    "ast_data": "FunctionDef name:lagsub arg:c1 arg:c2 arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "ExportError",
    "source_code": "class ExportError(Exception):\n\n    def __init__(self, error_code: ExportErrorType, message: str) -> None:\n        prefix = f'[{error_code}]: '\n        super().__init__(prefix + message)",
    "docstring": "This type of exception is raised for errors that are directly caused by the user code. In general, user errors happen during model authoring, tracing, using our public facing APIs, and writing graph passes.",
    "type": "class",
    "file_path": "pytorch\\torch\\_export\\error.py",
    "ast_data": "ClassDef name:ExportError FunctionDef name:__init__ arg:self arg:error_code arg:message arguments arg arg arg Assign Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_default_size",
    "source_code": "@staticmethod\ndef get_default_size():\n    return mpl.rcParams['font.size']",
    "docstring": "Return the default font size.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\font_manager.py",
    "ast_data": "FunctionDef name:get_default_size arguments Return return:yes"
  },
  {
    "library": "scipy",
    "name": "solve",
    "source_code": "def solve(self, f, a, b, args=(), xtol=_xtol, rtol=_rtol, k=2, maxiter=_iter, disp=True):\n    self.configure(xtol=xtol, rtol=rtol, maxiter=maxiter, disp=disp, k=k)\n    status, xn = self.start(f, a, b, args)\n    if status == _ECONVERGED:\n        return self.get_result(xn)\n    c = _secant(self.ab, self.fab)\n    if not self.ab[0] < c < self.ab[1]:\n        c = sum(self.ab) / 2.0\n    fc = self._callf(c)\n    if fc == 0:\n        return self.get_result(c)\n    self.d, self.fd = self._update_bracket(c, fc)\n    self.e, self.fe = (None, None)\n    self.iterations += 1\n    while True:\n        status, xn = self.iterate()\n        if status == _ECONVERGED:\n            return self.get_result(xn)\n        if status == _ECONVERR:\n            fmt = 'Failed to converge after %d iterations, bracket is %s'\n            if disp:\n                msg = fmt % (self.iterations + 1, self.ab)\n                raise RuntimeError(msg)\n            return self.get_result(xn, _ECONVERR)",
    "docstring": "Solve f(x) = 0 given an interval containing a root.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_zeros_py.py",
    "ast_data": "FunctionDef name:solve arg:self arg:f arg:a arg:b arg:args arg:xtol arg:rtol arg:k arg:maxiter arg:disp arguments arg arg arg arg arg arg arg arg arg arg Call Assign Call If Compare Return return:yes Call Assign Call If Compare Assign Call Assign Call If Compare Return return:yes Call Assign Call Assign While Assign Call If Compare Return return:yes Call If Compare Assign If Assign Raise Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_swap_modules",
    "source_code": "def _swap_modules(ep: ExportedProgram, modules_to_swap: dict[str, torch.nn.Module]) -> torch.fx.GraphModule:\n    module_call_graph = {entry.fqn: entry.signature for entry in ep.module_call_graph if entry.signature}\n    gm = ep.module()\n    gm.validate_inputs = False\n    gm.graph.eliminate_dead_code()\n    assert isinstance(gm, torch.fx.GraphModule)\n    _fix_input_output_signature(gm, ep.module_call_graph[0].signature)\n    gm.module_call_graph = ep.module_call_graph\n    gm.train = types.MethodType(type(gm).train, gm)\n    gm.eval = types.MethodType(type(gm).eval, gm)\n    assert isinstance(gm, torch.fx.GraphModule)\n    gm = _swap_module_helper(gm, modules_to_swap, module_call_graph)\n    return gm",
    "docstring": "Unlifts the given ExportedProgram into a fx.GraphModule, and then swaps previously traced modules with new eager modules specified. Returns a fx.GraphModule with a custom forward function. Args: ep (ExportedProgram): Exported program to modify modules_to_swap (Dict[str, torch.nn.Module]): Mapping from module fqn to eager module to swap with. The specified module fqn should have also been specified in the argument to torch.export so that we know how to restore the calling convention to this argument. run_with_interpreter: Whether or not to run the graph using fx.Interpreter. Setting to true will help result in better error messages and easier debugging, but it has found to result in a QPS drop.",
    "type": "function",
    "file_path": "pytorch\\torch\\export\\_swap.py",
    "ast_data": "FunctionDef name:_swap_modules arg:ep arg:modules_to_swap arguments arg arg Assign Assign Call Assign Call Call Call Assign Assign Call Call Assign Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "validate_exp",
    "source_code": "def validate_exp(self, now, leeway):\n    if 'exp' in self:\n        exp = self['exp']\n        if not _validate_numeric_time(exp):\n            raise InvalidClaimError('exp')\n        if exp < now - leeway:\n            raise ExpiredTokenError()",
    "docstring": "The \"exp\" (expiration time) claim identifies the expiration time on or after which the JWT MUST NOT be accepted for processing. The processing of the \"exp\" claim requires that the current date/time MUST be before the expiration date/time listed in the \"exp\" claim. Implementers MAY provide for some small leeway, usually no more than a few minutes, to account for clock skew. Its value MUST be a number containing a NumericDate value. Use of this claim is OPTIONAL.",
    "type": "method",
    "file_path": "authlib\\authlib\\jose\\rfc7519\\claims.py",
    "ast_data": "FunctionDef name:validate_exp arg:self arg:now arg:leeway arguments arg arg arg If Compare Assign If Call Raise Call If Compare Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_trackable_children",
    "source_code": "def _trackable_children(self, save_type=trackable.SaveType.CHECKPOINT, **kwargs):\n    current_graph_non_slot_variables = {}\n    current_graph_key = ops.get_default_graph()._graph_key\n    for (name, _), variable_object in sorted(self._non_slot_dict.items(), key=lambda item: item[0][0]):\n        if context.executing_eagerly() or variable_object._graph_key == current_graph_key:\n            current_graph_non_slot_variables[name] = variable_object\n    current_graph_non_slot_variables.update(super()._trackable_children(save_type, **kwargs))\n    return current_graph_non_slot_variables",
    "docstring": "From Trackable. Gather graph-specific non-slot variables to save.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\optimizer.py",
    "ast_data": "FunctionDef name:_trackable_children arg:self arg:save_type arguments arg arg arg Assign Assign Call For Call Call arguments arg If BoolOp Call Compare Assign Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "check_termination",
    "source_code": "def check_termination(work):\n    stop = xp.zeros(work.Sn.shape, dtype=bool)\n    if work.nit == 0:\n        i = xp_ravel(work.a == work.b)\n        zero = xp.asarray(-xp.inf if log else 0.0)\n        zero = xp.full(work.Sn.shape, zero, dtype=Sn.dtype)\n        zero[xp.isnan(Sn)] = xp.nan\n        work.Sn[i] = zero[i]\n        work.aerr[i] = zero[i]\n        work.status[i] = eim._ECONVERGED\n        stop[i] = True\n    else:\n        rerr, aerr = _estimate_error(work, xp)\n        i = (rerr < rtol) | (aerr < atol)\n        work.aerr = xp.reshape(xp.astype(aerr, work.dtype), work.Sn.shape)\n        work.status[i] = eim._ECONVERGED\n        stop[i] = True\n    if log:\n        Sn_real = xp.real(work.Sn)\n        Sn_pos_inf = xp.isinf(Sn_real) & (Sn_real > 0)\n        i = (Sn_pos_inf | xp.isnan(work.Sn)) & ~stop\n    else:\n        i = ~xp.isfinite(work.Sn) & ~stop\n    work.status[i] = eim._EVALUEERR\n    stop[i] = True\n    return stop",
    "docstring": "Terminate due to convergence or encountering non-finite values",
    "type": "function",
    "file_path": "scipy\\scipy\\integrate\\_tanhsinh.py",
    "ast_data": "FunctionDef name:check_termination arg:work arguments arg Assign Call If Compare Assign Call Compare Assign Call Assign Call Assign Call Assign Assign Assign Assign Assign Call Assign Compare Compare Assign Call Call Assign Assign If Assign Call Assign Call Compare Assign Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "inverse_transform",
    "source_code": "def inverse_transform(self, X, dict_type=dict):\n    check_is_fitted(self, 'feature_names_')\n    X = check_array(X, accept_sparse=['csr', 'csc'])\n    n_samples = X.shape[0]\n    names = self.feature_names_\n    dicts = [dict_type() for _ in range(n_samples)]\n    if sp.issparse(X):\n        for i, j in zip(*X.nonzero()):\n            dicts[i][names[j]] = X[i, j]\n    else:\n        for i, d in enumerate(dicts):\n            for j, v in enumerate(X[i, :]):\n                if v != 0:\n                    d[names[j]] = X[i, j]\n    return dicts",
    "docstring": "Transform array or sparse matrix X back to feature mappings. X must have been produced by this DictVectorizer's transform or fit_transform method; it may only have passed through transformers that preserve the number of features and their order. In the case of one-hot/one-of-K coding, the constructed feature names and values are returned rather than the original ones. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Sample matrix. dict_type : type, default=dict Constructor for feature mappings. Must conform to the collections.Mapping API. Returns ------- X_original : list of dict_type objects of shape (n_samples,) Feature mappings for the samples in X.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_extraction\\_dict_vectorizer.py",
    "ast_data": "FunctionDef name:inverse_transform arg:self arg:X arg:dict_type arguments arg arg arg Call Assign Call Assign Assign Assign Call Call If Call For Call Call Assign For Call For Call If Compare Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "exceeds_info_rows",
    "source_code": "@property\ndef exceeds_info_rows(self) -> bool:\n    return bool(len(self.data) > self.max_rows)",
    "docstring": "Check if number of rows to be summarized does not exceed maximum.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:exceeds_info_rows arg:self arguments arg Return return:yes Call Compare Call"
  },
  {
    "library": "pytorch",
    "name": "StorageReader",
    "source_code": "class StorageReader(abc.ABC):\n\n    @abc.abstractmethod\n    def reset(self, checkpoint_id: Union[str, os.PathLike, None]=None) -> None:\n        ...\n\n    @abc.abstractmethod\n    def read_metadata(self) -> Metadata:\n        pass\n\n    @abc.abstractmethod\n    def set_up_storage_reader(self, metadata: Metadata, is_coordinator: bool) -> None:\n        pass\n\n    @abc.abstractmethod\n    def prepare_local_plan(self, plan: LoadPlan) -> LoadPlan:\n        pass\n\n    @abc.abstractmethod\n    def prepare_global_plan(self, plans: list[LoadPlan]) -> list[LoadPlan]:\n        pass\n\n    @abc.abstractmethod\n    def read_data(self, plan: LoadPlan, planner: LoadPlanner) -> Future[None]:\n        pass\n\n    @classmethod\n    @abc.abstractmethod\n    def validate_checkpoint_id(cls, checkpoint_id: Union[str, os.PathLike]) -> bool:\n        ...",
    "docstring": "Interface used by ``: 0) (all ranks) set checkpoint_id if users pass a valid checkpoint_id. 1) (all ranks) read_metadata() 2) (all ranks) set_up_storage_reader() 3) (all ranks) prepare_local_plan() 4) (coordinator) prepare_global_plan() 5) (all ranks) read_data()",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\storage.py",
    "ast_data": "ClassDef name:StorageReader FunctionDef name:reset arg:self arg:checkpoint_id arguments arg arg FunctionDef name:read_metadata arg:self arguments arg FunctionDef name:set_up_storage_reader arg:self arg:metadata arg:is_coordinator arguments arg arg arg FunctionDef name:prepare_local_plan arg:self arg:plan arguments arg arg FunctionDef name:prepare_global_plan arg:self arg:plans arguments arg arg FunctionDef name:read_data arg:self arg:plan arg:planner arguments arg arg arg FunctionDef name:validate_checkpoint_id arg:cls arg:checkpoint_id arguments arg arg"
  },
  {
    "library": "tensorflow",
    "name": "_greater_flops",
    "source_code": "@ops.RegisterStatistics('Greater', 'flops')\ndef _greater_flops(graph, node):\n    return _binary_per_element_op_flops(graph, node)",
    "docstring": "Compute flops for Greater operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py",
    "ast_data": "FunctionDef name:_greater_flops arg:graph arg:node arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "allow_cpu_device",
    "source_code": "def allow_cpu_device(self, node: fx.Node) -> bool:\n    return node.target in (torch.ops.aten.index.Tensor, torch.ops.aten.index_put.default, torch.ops.aten.index_put_.default, torch.ops.aten.copy.default, torch.ops.aten.copy_.default, torch.ops.aten.slice_scatter.default)",
    "docstring": "Returns whether a node that returns a tensor on the target device may have cpu tensors as input.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\post_grad.py",
    "ast_data": "FunctionDef name:allow_cpu_device arg:self arg:node arguments arg arg Return return:yes Compare"
  },
  {
    "library": "matplotlib",
    "name": "ToolXScale",
    "source_code": "class ToolXScale(AxisScaleBase):\n    description = 'Toggle scale X axis'\n    default_keymap = property(lambda self: mpl.rcParams['keymap.xscale'])\n\n    def set_scale(self, ax, scale):\n        ax.set_xscale(scale)",
    "docstring": "Tool to toggle between linear and logarithmic scales on the X axis.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py",
    "ast_data": "ClassDef name:ToolXScale Assign Assign Call arguments arg FunctionDef name:set_scale arg:self arg:ax arg:scale arguments arg arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_name_element",
    "source_code": "def _name_element(self, key):\n    return key",
    "docstring": "Tells TrackableDataStructure to use keys as names as-is.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\data_structures.py",
    "ast_data": "FunctionDef name:_name_element arg:self arg:key arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_convert_enter",
    "source_code": "def _convert_enter(self, parent_pfor: 'PFor', enter):\n    inp, stacked, _ = parent_pfor._convert_helper(enter.op.inputs[0])\n    control_inputs = []\n    for x in enter.op.control_inputs:\n        converted = parent_pfor._convert_helper(x)\n        if not isinstance(converted, ops.Operation):\n            converted = converted.t\n        control_inputs.append(converted)\n    if control_inputs:\n        with ops.control_dependencies(control_inputs):\n            inp = array_ops.identity(inp)\n    return (inp, stacked)",
    "docstring": "Converts an Enter node.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "FunctionDef name:_convert_enter arg:self arg:parent_pfor arg:enter arguments arg arg arg Assign Call Assign For Assign Call If Call Assign Call If With Call Assign Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "before_content",
    "source_code": "def before_content(self) -> None:\n    prefix = None\n    if self.names:\n        obj_name, obj_name_prefix = self.names.pop()\n        prefix = obj_name_prefix.strip('.') if obj_name_prefix else None\n        if self.allow_nesting:\n            prefix = obj_name\n    if prefix:\n        self.env.ref_context['js:object'] = prefix\n        if self.allow_nesting:\n            objects = self.env.ref_context.setdefault('js:objects', [])\n            objects.append(prefix)",
    "docstring": "Handle object nesting before content :py:class: represents JavaScript language constructs. For constructs that are nestable, this method will build up a stack of the nesting hierarchy so that it can be later de-nested correctly, in :py:meth:. For constructs that aren't nestable, the stack is bypassed, and instead only the most recent object is tracked. This object prefix name will be removed with :py:meth:. The following keys are used in `after_content` is triggered and the prefix is removed from the end of the list. js:object Current object prefix. This should generally reflect the last element in the prefix history",
    "type": "method",
    "file_path": "sphinx\\sphinx\\domains\\javascript.py",
    "ast_data": "FunctionDef name:before_content arg:self arguments arg Assign If Assign Call Assign Call If Assign If Assign If Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_tf_ag_dataset_len",
    "source_code": "def _tf_ag_dataset_len(s):\n    l = s.cardinality()\n    msg = gen_string_ops.string_join(['len requires dataset with definitive cardinality, got ', gen_string_ops.as_string(l)])\n    with ops.control_dependencies([control_flow_assert.Assert(math_ops.logical_and(math_ops.not_equal(l, dataset_ops.INFINITE), math_ops.not_equal(l, dataset_ops.UNKNOWN)), [msg])]):\n        l = array_ops.identity(l)\n    return l",
    "docstring": "Autograph override of the builtin len for dataset_ops.DataSetV2.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_autograph.py",
    "ast_data": "FunctionDef name:_tf_ag_dataset_len arg:s arguments arg Assign Call Assign Call Call With Call Call Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_initialize",
    "source_code": "def _initialize(self):\n    pass",
    "docstring": "A function that initializes the resource. Optional.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\resource.py",
    "ast_data": "FunctionDef name:_initialize arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "InputFunctionIterator",
    "source_code": "class InputFunctionIterator(DistributedIteratorV1):\n\n    def __init__(self, input_fn, input_workers, input_contexts, strategy):\n        assert isinstance(input_workers, input_lib.InputWorkers)\n        if input_workers.num_workers != len(input_contexts):\n            raise ValueError('Number of input workers (%d) is not same as number of input_contexts (%d)' % (input_workers.num_workers, len(input_contexts)))\n        iterators = []\n        for i, ctx in enumerate(input_contexts):\n            worker = input_workers.worker_devices[i]\n            with ops.device(worker):\n                result = input_fn(ctx)\n                devices = input_workers.compute_devices_for_worker(i)\n                if isinstance(result, data_types.DatasetV2):\n                    iterator = _SingleWorkerDatasetIterator(result, worker, devices)\n                elif callable(result):\n                    iterator = _SingleWorkerCallableIterator(result, worker, devices)\n                else:\n                    raise ValueError('input_fn must return a tf.data.Dataset or a callable.')\n                iterators.append(iterator)\n        super(InputFunctionIterator, self).__init__(input_workers, iterators, strategy, cardinality=cardinality_lib.UNKNOWN, enable_get_next_as_optional=False)\n        self._enable_get_next_as_optional = False",
    "docstring": "Iterator created from input function.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\input_lib.py",
    "ast_data": "ClassDef name:InputFunctionIterator FunctionDef name:__init__ arg:self arg:input_fn arg:input_workers arg:input_contexts arg:strategy arguments arg arg arg arg arg Call If Compare Call Raise Call Call Assign For Call Assign With Call Assign Call Assign Call If Call Assign Call If Call Assign Call Raise Call Call Call Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "get_filternorm",
    "source_code": "def get_filternorm(self):\n    return self._filternorm",
    "docstring": "Return whether the resize filter normalizes the weights.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\image.py",
    "ast_data": "FunctionDef name:get_filternorm arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "initial_seed",
    "source_code": "def initial_seed() -> int:\n    _lazy_init()\n    idx = current_device()\n    default_generator = torch.cuda.default_generators[idx]\n    return default_generator.initial_seed()",
    "docstring": "Return the current random seed of the current GPU. .. warning:: This function eagerly initializes CUDA.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\random.py",
    "ast_data": "FunctionDef name:initial_seed arguments Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "open_group",
    "source_code": "def open_group(self, s, gid=None):\n    pass",
    "docstring": "Open a grouping element with label *s* and *gid* (if set) as id. Only used by the SVG renderer.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:open_group arg:self arg:s arg:gid arguments arg arg arg"
  },
  {
    "library": "tensorflow",
    "name": "constant_value",
    "source_code": "def constant_value(pred):\n    if isinstance(pred, int):\n        if pred == 1:\n            pred = True\n        elif pred == 0:\n            pred = False\n    if isinstance(pred, variables.Variable):\n        return None\n    return smart_module.smart_constant_value(pred)",
    "docstring": "Return the bool value for , or None if had a dynamic value. Args: pred: A scalar, either a Python bool or a TensorFlow boolean variable or tensor, or the Python integer 1 or 0. Returns: True or False if has a constant boolean value, None otherwise. Raises: TypeError: If is not a Variable, Tensor or bool, or Python integer 1 or 0.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\layers\\utils.py",
    "ast_data": "FunctionDef name:constant_value arg:pred arguments arg If Call If Compare Assign If Compare Assign If Call Return return:no Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_NextIterationGrad",
    "source_code": "@ops.RegisterGradient('NextIteration')\ndef _NextIterationGrad(_, grad):\n    return grad",
    "docstring": "A forward next_iteration is translated into a backprop identity. Note that the backprop next_iteration is added in switch grad.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_grad.py",
    "ast_data": "FunctionDef name:_NextIterationGrad arg:_ arg:grad arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_merge_run_options",
    "source_code": "def _merge_run_options(self, options, incoming_options):\n    options.trace_level = max(options.trace_level, incoming_options.trace_level)\n    options.timeout_in_ms = max(options.timeout_in_ms, incoming_options.timeout_in_ms)\n    options.inter_op_thread_pool = max(options.inter_op_thread_pool, incoming_options.inter_op_thread_pool)\n    options.output_partition_graphs = max(options.output_partition_graphs, incoming_options.output_partition_graphs)\n    options.debug_options.debug_tensor_watch_opts.extend(incoming_options.debug_options.debug_tensor_watch_opts)\n    options.debug_options.reset_disk_byte_usage = options.debug_options.reset_disk_byte_usage or incoming_options.debug_options.reset_disk_byte_usage\n    options.report_tensor_allocations_upon_oom = options.report_tensor_allocations_upon_oom or incoming_options.report_tensor_allocations_upon_oom",
    "docstring": "Merge two instances of RunOptions into the first one. During the merger, the numerical fields including trace_level, timeout_in_ms, inter_op_thread_pool are set to the larger one of the two. The boolean value is set to the logical OR of the two. debug_tensor_watch_opts of the original options is extended with that from the incoming one. Args: options: The options to merge into. incoming_options: The options to be merged into the first argument.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\monitored_session.py",
    "ast_data": "FunctionDef name:_merge_run_options arg:self arg:options arg:incoming_options arguments arg arg arg Assign Call Assign Call Assign Call Assign Call Call Assign BoolOp Assign BoolOp"
  },
  {
    "library": "scipy",
    "name": "A",
    "source_code": "@property\ndef A(self):\n    return self._A",
    "docstring": "State matrix of the system.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:A arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=False)\ndef fit(self, X, Y, **fit_params):\n    super().fit(X, Y, **fit_params)\n    return self",
    "docstring": "Fit the model to data matrix X and targets Y. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input data. Y : array-like of shape (n_samples, n_classes) The target values. **fit_params : dict of string -> object Parameters passed to the method at each step of the regressor chain. .. versionadded:: 0.23 Returns ------- self : object Returns a fitted instance.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\multioutput.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:Y arguments arg arg arg arg Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "geom_type",
    "source_code": "@property\ndef geom_type(self):\n    return OGRGeomType(capi.get_fd_geom_type(self._ldefn))",
    "docstring": "Return the geometry type (OGRGeomType) of the Layer.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\layer.py",
    "ast_data": "FunctionDef name:geom_type arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "ExponentialDecay",
    "source_code": "class ExponentialDecay(LearningRateSchedule):\n\n    def __init__(self, initial_learning_rate, decay_steps, decay_rate, staircase=False, name=None):\n        super(ExponentialDecay, self).__init__()\n        self.initial_learning_rate = initial_learning_rate\n        self.decay_steps = decay_steps\n        self.decay_rate = decay_rate\n        self.staircase = staircase\n        self.name = name\n\n    def __call__(self, step):\n        with ops.name_scope_v2(self.name or 'ExponentialDecay') as name:\n            initial_learning_rate = tensor_conversion.convert_to_tensor_v2_with_dispatch(self.initial_learning_rate, name='initial_learning_rate')\n            dtype = initial_learning_rate.dtype\n            decay_steps = math_ops.cast(self.decay_steps, dtype)\n            decay_rate = math_ops.cast(self.decay_rate, dtype)\n            global_step_recomp = math_ops.cast(step, dtype)\n            p = global_step_recomp / decay_steps\n            if self.staircase:\n                p = math_ops.floor(p)\n            return math_ops.multiply(initial_learning_rate, math_ops.pow(decay_rate, p), name=name)\n\n    def get_config(self):\n        return {'initial_learning_rate': self.initial_learning_rate, 'decay_steps': self.decay_steps, 'decay_rate': self.decay_rate, 'staircase': self.staircase, 'name': self.name}",
    "docstring": "A LearningRateSchedule that uses an exponential decay schedule. When training a model, it is often useful to lower the learning rate as the training progresses. This schedule applies an exponential decay function to an optimizer step, given a provided initial learning rate. The schedule a 1-arg callable that produces a decayed learning rate when passed the current optimizer step. This can be useful for changing the learning rate value across different invocations of optimizer functions. It is computed as: If the argument is , then is an integer division and the decayed learning rate follows a staircase function. You can pass this schedule directly into a as the learning rate. Example: When fitting a Keras model, decay every 100000 steps with a base of 0.96: The learning rate schedule is also serializable and deserializable using and . Returns: A 1-arg callable learning rate schedule that takes the current optimizer step and outputs the decayed learning rate, a scalar of the same type as .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\learning_rate_schedule.py",
    "ast_data": "ClassDef name:ExponentialDecay FunctionDef name:__init__ arg:self arg:initial_learning_rate arg:decay_steps arg:decay_rate arg:staircase arg:name arguments arg arg arg arg arg arg Call Call Assign Assign Assign Assign Assign FunctionDef name:__call__ arg:self arg:step arguments arg arg With Call BoolOp Assign Call Assign Assign Call Assign Call Assign Call Assign If Assign Call Return return:yes Call Call FunctionDef name:get_config arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "Threshold",
    "source_code": "class Threshold(Module):\n    __constants__ = ['threshold', 'value', 'inplace']\n    threshold: float\n    value: float\n    inplace: bool\n\n    def __init__(self, threshold: float, value: float, inplace: bool=False) -> None:\n        super().__init__()\n        self.threshold = threshold\n        self.value = value\n        self.inplace = inplace\n\n    def forward(self, input: Tensor) -> Tensor:\n        return F.threshold(input, self.threshold, self.value, self.inplace)\n\n    def extra_repr(self):\n        inplace_str = ', inplace=True' if self.inplace else ''\n        return f'threshold={self.threshold}, value={self.value}{inplace_str}'",
    "docstring": "Thresholds each element of the input Tensor. Threshold is defined as: .. math:: y = \\begin{cases} x, &\\text{ if } x > \\text{threshold} \\\\ \\text{value}, &\\text{ otherwise } \\end{cases} Args: threshold: The value to threshold at value: The value to replace with inplace: can optionally do the operation in-place. Default: `(*)*(*)`, same shape as the input. .. image:: ../scripts/activation_images/Threshold.png Examples:: >>> m = nn.Threshold(0, 0.5) >>> input = torch.arange(-3, 3) >>> output = m(input)",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\activation.py",
    "ast_data": "ClassDef name:Threshold Assign FunctionDef name:__init__ arg:self arg:threshold arg:value arg:inplace arguments arg arg arg arg Call Call Assign Assign Assign FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:extra_repr arg:self arguments arg Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_monitor_workers",
    "source_code": "@abc.abstractmethod\ndef _monitor_workers(self, worker_group: WorkerGroup) -> RunResult:\n    raise NotImplementedError",
    "docstring": "Check on the workers for the ``. This function also returns the new state of the worker group.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\agent\\server\\api.py",
    "ast_data": "FunctionDef name:_monitor_workers arg:self arg:worker_group arguments arg arg Raise"
  },
  {
    "library": "matplotlib",
    "name": "clim",
    "source_code": "def clim(vmin: float | None=None, vmax: float | None=None) -> None:\n    im = gci()\n    if im is None:\n        raise RuntimeError('You must first define an image, e.g., with imshow')\n    im.set_clim(vmin, vmax)",
    "docstring": "Set the color limits of the current image. If either *vmin* or *vmax* is None, the image min/max respectively will be used for color scaling. If you want to set the clim of multiple images, use on every image, for example:: for im in gca().get_images(): im.set_clim(0, 0.5)",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:clim arg:vmin arg:vmax arguments arg arg Assign Call If Compare Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_set_rank_if_unknown",
    "source_code": "def _set_rank_if_unknown(self, new_rank: int) -> 'DynamicRaggedShape.Spec':\n    if new_rank is None:\n        raise TypeError('new_rank is None, but expected int')\n    if new_rank < 0:\n        raise ValueError('Rank must be non-negative')\n    current_rank = self.rank\n    if current_rank is not None and current_rank < new_rank:\n        raise ValueError('Rank is {current_rank}, expected at least {new_rank}.'.format(current_rank=current_rank, new_rank=new_rank))\n    if current_rank is not None:\n        return self\n    if self._row_partitions:\n        new_inner_rank = max(new_rank - self.num_row_partitions, 1)\n        first_dim = self._row_partitions[-1].nvals\n        static_inner_shape = tensor_shape.TensorShape([first_dim] + [None] * (new_inner_rank - 1))\n    else:\n        static_inner_shape = tensor_shape.TensorShape([None] * new_rank)\n    return DynamicRaggedShape.Spec(row_partitions=self._row_partitions, static_inner_shape=static_inner_shape, dtype=self.dtype)",
    "docstring": "Ensures this has a known rank at least new_rank.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:_set_rank_if_unknown arg:self arg:new_rank arguments arg arg If Compare Raise Call If Compare Raise Call Assign If BoolOp Compare Compare Raise Call Call If Compare Return return:yes If Assign Call Assign Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "RepresentativeDatasetLoader",
    "source_code": "class RepresentativeDatasetLoader:\n\n    def load(self) -> RepresentativeDatasetMapping:\n        raise NotImplementedError('Method \"load\" is not implemented.')",
    "docstring": "Representative dataset loader. Exposes the method that loads the representative dataset from files.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\representative_dataset.py",
    "ast_data": "ClassDef name:RepresentativeDatasetLoader FunctionDef name:load arg:self arguments arg Raise Call"
  },
  {
    "library": "kornia",
    "name": "elu_feature_map",
    "source_code": "def elu_feature_map(x: Tensor) -> Tensor:\n    return torch.nn.functional.elu(x) + 1",
    "docstring": "Apply elu activation.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\loftr\\loftr_module\\linear_attention.py",
    "ast_data": "FunctionDef name:elu_feature_map arg:x arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "save_binary",
    "source_code": "def save_binary(self, package, resource, binary: bytes):\n    filename = self._filename(package, resource)\n    self._write(filename, binary)",
    "docstring": "Save raw bytes to the package. Args: package (str): The name of module package this resource should go it (e.g. ``). resource (str): A unique name for the resource, used to identify it to load. binary (str): The data to save.",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\package_exporter.py",
    "ast_data": "FunctionDef name:save_binary arg:self arg:package arg:resource arg:binary arguments arg arg arg arg Assign Call Call"
  },
  {
    "library": "scipy",
    "name": "minimum",
    "source_code": "def minimum(self, other):\n    return self._maximum_minimum(other, np.minimum)",
    "docstring": "Element-wise minimum between this and another array/matrix.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_base.py",
    "ast_data": "FunctionDef name:minimum arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "VersioneerConfig",
    "source_code": "class VersioneerConfig:\n    pass",
    "docstring": "Container for Versioneer configuration parameters.",
    "type": "class",
    "file_path": "pandas\\pandas\\_version.py",
    "ast_data": "ClassDef name:VersioneerConfig"
  },
  {
    "library": "kornia",
    "name": "batch_2x2_invQ",
    "source_code": "def batch_2x2_invQ(m: Tensor) -> Tensor:\n    return m @ m.transpose(-1, -2)",
    "docstring": "Returns inverse Q of batch of 2x2 matrices.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\adalam\\utils.py",
    "ast_data": "FunctionDef name:batch_2x2_invQ arg:m arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "from_config",
    "source_code": "@classmethod\ndef from_config(cls, config):\n    return cls(**config)",
    "docstring": "Creates a regularizer from its config. This method is the reverse of , capable of instantiating the same regularizer from the config dictionary. This method is used by saving and loading models to HDF5 formats, Keras model cloning, some visualization utilities, and exporting models to and from JSON. Args: config: A Python dictionary, typically the output of get_config. Returns: A regularizer instance.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\regularizers.py",
    "ast_data": "FunctionDef name:from_config arg:cls arg:config arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, extremes):\n    x0, x1, y0, y1 = extremes\n    self._tbbox = Bbox.from_extents(x0, y0, x1, y1)",
    "docstring": "This subclass always returns the same bounding box. Parameters ---------- extremes : (float, float, float, float) The bounding box that this helper always returns.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\floating_axes.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:extremes arguments arg arg Assign Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "minposy",
    "source_code": "@property\ndef minposy(self):\n    return self._minpos[1]",
    "docstring": "The minimum positive value in the *y*-direction within the Bbox. This is useful when dealing with logarithmic scales and other scales where negative bounds result in floating point errors, and will be used as the minimum *y*-extent instead of *y0*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:minposy arg:self arguments arg Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "adapt_response",
    "source_code": "def adapt_response(self, response: Response) -> Response:\n    return response",
    "docstring": "You can override this function in order to make any changes you want to into the feed before parsing it. This function must return a response.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\spiders\\feed.py",
    "ast_data": "FunctionDef name:adapt_response arg:self arg:response arguments arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "memory_stats",
    "source_code": "def memory_stats(self, groups_dict: Optional[dict[str, sympy.Expr]]=None) -> MemoryStats:\n    if groups_dict is None:\n        groups = (self.numel, self.reduction_numel)\n    elif groups_dict.keys() == OrderedSet(['x', 'r0_']):\n        groups = (groups_dict['x'], groups_dict['r0_'])\n    else:\n        raise NotImplementedError(f'groups_dict={groups_dict!r}')\n    result = self._stats_cache.get(groups)\n    if result is None:\n        self._stats_cache[groups] = result = MemoryStats.compute(MemoryEstimator(self, groups))\n    return result",
    "docstring": "Analysis to generate features that can be used in heuristics",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\simd_kernel_features.py",
    "ast_data": "FunctionDef name:memory_stats arg:self arg:groups_dict arguments arg arg If Compare Assign If Compare Call Call Assign Raise Call Assign Call If Compare Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "inverse_stft_window_fn_inner",
    "source_code": "def inverse_stft_window_fn_inner(frame_length, dtype):\n    with ops.name_scope(name, 'inverse_stft_window_fn', [forward_window_fn]):\n        frame_step_ = ops.convert_to_tensor(frame_step, name='frame_step')\n        frame_step_.shape.assert_has_rank(0)\n        frame_length = ops.convert_to_tensor(frame_length, name='frame_length')\n        frame_length.shape.assert_has_rank(0)\n        forward_window = forward_window_fn(frame_length, dtype=dtype)\n        denom = math_ops.square(forward_window)\n        overlaps = -(-frame_length // frame_step_)\n        denom = array_ops.pad(denom, [(0, overlaps * frame_step_ - frame_length)])\n        denom = array_ops.reshape(denom, [overlaps, frame_step_])\n        denom = math_ops.reduce_sum(denom, 0, keepdims=True)\n        denom = array_ops.tile(denom, [overlaps, 1])\n        denom = array_ops.reshape(denom, [overlaps * frame_step_])\n        return forward_window / denom[:frame_length]",
    "docstring": "Computes a window that can be used in . Args: frame_length: An integer scalar . The window length in samples. dtype: Data type of waveform passed to . Returns: A window suitable for reconstructing original waveform in . Raises: ValueError: If is not scalar, is not a callable that takes a window length and a keyword argument and returns a of samples in the provided datatype is not scalar, or is not scalar.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\signal\\spectral_ops.py",
    "ast_data": "FunctionDef name:inverse_stft_window_fn_inner arg:frame_length arg:dtype arguments arg arg With Call Assign Call Call Assign Call Call Assign Call Assign Call Assign Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "min",
    "source_code": "def min(self, axis: AxisInt | None=None, skipna: bool=True, *args, **kwargs):\n    nv.validate_min(args, kwargs)\n    nv.validate_minmax_axis(axis)\n    if not len(self):\n        return self._na_value\n    if len(self) and self.is_monotonic_increasing:\n        first = self[0]\n        if not isna(first):\n            return first\n    if not self._is_multi and self.hasnans:\n        mask = self._isnan\n        if not skipna or mask.all():\n            return self._na_value\n    if not self._is_multi and (not isinstance(self._values, np.ndarray)):\n        return self._values._reduce(name='min', skipna=skipna)\n    return nanops.nanmin(self._values, skipna=skipna)",
    "docstring": "Return the minimum value of the Index. Parameters ---------- axis : {None} Dummy argument for consistency with Series. skipna : bool, default True Exclude NA/null values when showing the result. *args, **kwargs Additional arguments and keywords for compatibility with NumPy. Returns ------- scalar Minimum value. See Also -------- Index.max : Return the maximum value of the object. Series.min : Return the minimum value in a Series. DataFrame.min : Return the minimum values in a DataFrame. Examples -------- >>> idx = pd.Index([3, 2, 1]) >>> idx.min() 1 >>> idx = pd.Index([\"c\", \"b\", \"a\"]) >>> idx.min() 'a' For a MultiIndex, the minimum is determined lexicographically. >>> idx = pd.MultiIndex.from_product([(\"a\", \"b\"), (2, 1)]) >>> idx.min() ('a', 1)",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:min arg:self arg:axis arg:skipna arguments arg arg arg arg arg Call Call If Call Return return:yes If BoolOp Call Assign If Call Return return:yes If BoolOp Assign If BoolOp Call Return return:yes If BoolOp Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_size",
    "source_code": "def get_size(self):\n    return self._size",
    "docstring": "Return the font size.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\font_manager.py",
    "ast_data": "FunctionDef name:get_size arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "dates",
    "source_code": "def dates(self, field_name, kind, order='ASC'):\n    if kind not in ('year', 'month', 'week', 'day'):\n        raise ValueError(\"'kind' must be one of 'year', 'month', 'week', or 'day'.\")\n    if order not in ('ASC', 'DESC'):\n        raise ValueError(\"'order' must be either 'ASC' or 'DESC'.\")\n    return self.annotate(datefield=Trunc(field_name, kind, output_field=DateField()), plain_field=F(field_name)).values_list('datefield', flat=True).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datefield')",
    "docstring": "Return a list of date objects representing all available dates for the given field_name, scoped to 'kind'.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:dates arg:self arg:field_name arg:kind arg:order arguments arg arg arg arg If Compare Raise Call If Compare Raise Call Return return:yes Call Call Call Call Call Call Call Call Compare"
  },
  {
    "library": "pytorch",
    "name": "get_ctx",
    "source_code": "def get_ctx() -> 'torch._library.fake_impl.FakeImplCtx':\n    return torch._library.fake_impl.global_ctx_getter()",
    "docstring": "get_ctx() returns the current AbstractImplCtx object. Calling `torch.library.register_fake` for more usage details.",
    "type": "function",
    "file_path": "pytorch\\torch\\library.py",
    "ast_data": "FunctionDef name:get_ctx arguments Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "map_fn",
    "source_code": "def map_fn(*columns):\n    features = collections.OrderedDict(zip(column_names, columns))\n    if label_name is not None:\n        label = features.pop(label_name)\n        return (features, label)\n    return features",
    "docstring": "Organizes columns into a features dictionary. Args: *columns: list of s corresponding to one csv record. Returns: An OrderedDict of feature names to values for that particular record. If label_name is provided, extracts the label feature to be returned as the second element of the tuple.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\readers.py",
    "ast_data": "FunctionDef name:map_fn arguments arg Assign Call Call If Compare Assign Call Return return:yes Return return:yes"
  },
  {
    "library": "scipy",
    "name": "todia",
    "source_code": "def todia(self, copy=False):\n    return self.tocoo(copy=copy).todia(copy=False)",
    "docstring": "Convert this array/matrix to sparse DIAgonal format. With copy=False, the data/indices may be shared between this array/matrix and the resultant dia_array/matrix.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_base.py",
    "ast_data": "FunctionDef name:todia arg:self arg:copy arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "mr_class",
    "source_code": "class mr_class(MAxisConcatenator):\n    __slots__ = ()\n\n    def __init__(self):\n        MAxisConcatenator.__init__(self, 0)",
    "docstring": "Translate slice objects to concatenation along the first axis. This is the masked array version of . See Also -------- r_ Examples -------- >>> import numpy as np >>> np.ma.mr_[np.ma.array([1,2,3]), 0, 0, np.ma.array([4,5,6])] masked_array(data=[1, 2, 3, ..., 4, 5, 6], mask=False, fill_value=999999)",
    "type": "class",
    "file_path": "numpy\\numpy\\ma\\extras.py",
    "ast_data": "ClassDef name:mr_class Assign FunctionDef name:__init__ arg:self arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "remote_shards",
    "source_code": "def remote_shards(self) -> dict[int, list[rpc.RRef[Shard]]]:\n    if not self._init_rrefs:\n        raise RuntimeError('ShardedTensor created with init_rrefs=False, no RRefs to remote shards available')\n    return self._remote_shards",
    "docstring": "Returns a Dict[int, RRef] with keys being the RPC rank and values being RRefs to shards on that rank. Need to initialize the RPC framework for this functionality. Raises an exception if ShardedTensor was created with ``",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\api.py",
    "ast_data": "FunctionDef name:remote_shards arg:self arguments arg If Raise Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_sanitize_relative_path",
    "source_code": "def _sanitize_relative_path(self, path):\n    last = None\n    path = os.path.normpath(path)\n    while path != last:\n        last = path\n        path = path.lstrip(os.sep).lstrip('/')\n        path = path.lstrip(os.pardir).removeprefix('..')\n        drive, path = os.path.splitdrive(path)\n    return path",
    "docstring": "Return a sanitised relative path for which os.path.abspath(os.path.join(base, path)).startswith(base)",
    "type": "method",
    "file_path": "numpy\\numpy\\lib\\_datasource.py",
    "ast_data": "FunctionDef name:_sanitize_relative_path arg:self arg:path arguments arg arg Assign Assign Call While Compare Assign Assign Call Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "async_wait",
    "source_code": "def async_wait():\n    disable_async_executor_env_var = 'TF_PS_DISABLE_ASYNC_EXECUTOR_GLOBALLY'\n    if os.environ.get(disable_async_executor_env_var) == str(True):\n        return\n    if context()._context_handle is not None:\n        context().sync_executors()",
    "docstring": "Sync all async operations and raise any errors during execution. In async execution mode, an op/function call can return before finishing the actual execution. Calling this method creates a synchronization barrier for all async op and function execution. It only returns when all pending nodes are finished, potentially raising exceptions if async execution results in an error state. It is a no-op if the context is not initialized.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:async_wait arguments Assign If Compare Call Call Return return:no If Compare Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_attr_inference_rule",
    "source_code": "@register_inference_rule(getattr)\ndef get_attr_inference_rule(n: Node, traced):\n    attr_name = n.args[1]\n    if attr_name == 'shape':\n        n.type = Dyn\n    else:\n        raise TypeError('Not yet implemented')\n    return n.type",
    "docstring": "The current getattr rule only handles the shape attribute Can be extended to other attributes The most representitive type we have is \"Dyn\" but the system can be extended with more types, such as a type to represent shapes",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\graph_gradual_typechecker.py",
    "ast_data": "FunctionDef name:get_attr_inference_rule arg:n arg:traced arguments arg arg Assign If Compare Assign Raise Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "check_constraints",
    "source_code": "def check_constraints(self, table_names=None):\n    with self.cursor() as cursor:\n        if table_names is None:\n            table_names = self.introspection.table_names(cursor)\n        for table_name in table_names:\n            primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)\n            if not primary_key_column_name:\n                continue\n            relations = self.introspection.get_relations(cursor, table_name)\n            for column_name, (referenced_column_name, referenced_table_name) in relations.items():\n                cursor.execute('\\n                        SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING\\n                        LEFT JOIN `%s` as REFERRED\\n                        ON (REFERRING.`%s` = REFERRED.`%s`)\\n                        WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL\\n                        ' % (primary_key_column_name, column_name, table_name, referenced_table_name, column_name, referenced_column_name, column_name, referenced_column_name))\n                for bad_row in cursor.fetchall():\n                    raise IntegrityError(\"The row in table '%s' with primary key '%s' has an invalid foreign key: %s.%s contains a value '%s' that does not have a corresponding value in %s.%s.\" % (table_name, bad_row[0], table_name, column_name, bad_row[1], referenced_table_name, referenced_column_name))",
    "docstring": "Check each table name in for rows with invalid foreign key references. This method is intended to be used in conjunction with and , to determine if rows with invalid references were entered while constraint checks were off.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\mysql\\base.py",
    "ast_data": "FunctionDef name:check_constraints arg:self arg:table_names arguments arg arg With Call If Compare Assign Call For Assign Call If Assign Call For Call Call For Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "UnknownError",
    "source_code": "@tf_export('errors.UnknownError')\nclass UnknownError(OpError):\n\n    def __init__(self, node_def, op, message, *args):\n        super(UnknownError, self).__init__(node_def, op, message, UNKNOWN, *args)",
    "docstring": "Unknown error. An example of where this error may be returned is if a Status value received from another address space belongs to an error-space that is not known to this address space. Also, errors raised by APIs that do not return enough error information may be converted to this error.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py",
    "ast_data": "ClassDef name:UnknownError FunctionDef name:__init__ arg:self arg:node_def arg:op arg:message arguments arg arg arg arg arg Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X):\n    check_is_fitted(self)\n    if self.voting == 'soft':\n        probas = self._collect_probas(X)\n        if not self.flatten_transform:\n            return probas\n        return np.hstack(probas)\n    else:\n        return self._predict(X)",
    "docstring": "Return class labels or probabilities for X for each estimator. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vectors, where is the number of samples and is the number of features. Returns ------- probabilities_or_labels If and : returns ndarray of shape (n_samples, n_classifiers * n_classes), being class probabilities calculated by each classifier. If flatten_transform=Falsevoting='hard'`: ndarray of shape (n_samples, n_classifiers), being class labels predicted by each classifier.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_voting.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Call If Compare Assign Call If Return return:yes Return return:yes Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_set_pad_area",
    "source_code": "def _set_pad_area(padded, axis, width_pair, value_pair):\n    left_slice = _slice_at_axis(slice(None, width_pair[0]), axis)\n    padded[left_slice] = value_pair[0]\n    right_slice = _slice_at_axis(slice(padded.shape[axis] - width_pair[1], None), axis)\n    padded[right_slice] = value_pair[1]",
    "docstring": "Set empty-padded area in given dimension. Parameters ---------- padded : ndarray Array with the pad area which is modified inplace. axis : int Dimension with the pad area to set. width_pair : (int, int) Pair of widths that mark the pad area on both sides in the given dimension. value_pair : tuple of scalars or ndarrays Values inserted into the pad area on each side. It must match or be broadcastable to the shape of .",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_arraypad_impl.py",
    "ast_data": "FunctionDef name:_set_pad_area arg:padded arg:axis arg:width_pair arg:value_pair arguments arg arg arg arg Assign Call Call Assign Assign Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "aot_based_partition",
    "source_code": "def aot_based_partition(self, node_to_partition_mapping, partition_to_logical_device_mapping):\n    partition_id_to_partition_mapping: dict[int, Partition] = {}\n    self.node_to_partition = node_to_partition_mapping\n    for node in self.node_to_partition:\n        partition_id = self.node_to_partition[node]\n        if partition_id not in partition_id_to_partition_mapping:\n            partition = Partition(partition_id)\n            self.partitions.append(partition)\n            partition_id_to_partition_mapping[partition_id] = partition\n            partition.logical_device_ids = partition_to_logical_device_mapping[partition_id]\n        else:\n            partition = partition_id_to_partition_mapping[self.node_to_partition[node]]\n        partition.add_node(node)",
    "docstring": "This function helps to rebuild the partitions given the nodes and its corresponding partition id",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\accelerator_partitioner.py",
    "ast_data": "FunctionDef name:aot_based_partition arg:self arg:node_to_partition_mapping arg:partition_to_logical_device_mapping arguments arg arg arg Assign For Assign If Compare Assign Call Call Assign Assign Assign Call"
  },
  {
    "library": "pytorch",
    "name": "_sequential_wrapper2",
    "source_code": "def _sequential_wrapper2(sequential):\n\n    def fuser_method(is_qat, m1, m2):\n        return sequential(m1, m2)\n    return fuser_method",
    "docstring": "Return a sequential wrapped that for is_qat and two modules. Given a sequential class for two modules, return a function that takes is_qat, and then two modules as argument, that ignores the is_qat flag and always returns the sequential that combines the two input modules",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fuser_method_mappings.py",
    "ast_data": "FunctionDef name:_sequential_wrapper2 arg:sequential arguments arg FunctionDef name:fuser_method arg:is_qat arg:m1 arg:m2 arguments arg arg arg Return return:yes Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_filter_kwargs_for_object",
    "source_code": "def get_filter_kwargs_for_object(self, obj):\n    return {self.fk_field: getattr(obj, self.fk_field), self.ct_field: getattr(obj, self.ct_field)}",
    "docstring": "See corresponding method on Field",
    "type": "method",
    "file_path": "django\\django\\contrib\\contenttypes\\fields.py",
    "ast_data": "FunctionDef name:get_filter_kwargs_for_object arg:self arg:obj arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, ddp, divide_by_initial_world_size):\n    assert isinstance(ddp, DistributedDataParallel), 'DDP join hook requires passing in a DistributedDataParallel instance as the state'\n    assert ddp.logger is not None\n    ddp.logger._set_uneven_input_join()\n    self.ddp = ddp\n    self.ddp._divide_by_initial_world_size = divide_by_initial_world_size\n    super().__init__()",
    "docstring": "Set config variables for internal usage.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\parallel\\distributed.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:ddp arg:divide_by_initial_world_size arguments arg arg arg Call Compare Call Assign Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "generate_link",
    "source_code": "def generate_link(flag, np_fun_name):\n    if flag == 'dev':\n        template = 'https://numpy.org/devdocs/reference/generated/numpy.%s.html'\n    elif flag == 'stable':\n        template = 'https://numpy.org/doc/stable/reference/generated/numpy.%s.html'\n    elif re.match('\\\\d+(\\\\.\\\\d+(\\\\.\\\\d+)?)?$', flag):\n        template = f'https://numpy.org/doc/{flag}/reference/generated/numpy.%s.html'\n    else:\n        return None\n    return template % np_fun_name",
    "docstring": "Generates link from numpy function name. Args: flag: the flag to control link form. See . np_fun_name: the numpy function name. Returns: A string.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_utils.py",
    "ast_data": "FunctionDef name:generate_link arg:flag arg:np_fun_name arguments arg arg If Compare Assign If Compare Assign If Call Assign Return return:no Return return:yes"
  },
  {
    "library": "django",
    "name": "start_serialization",
    "source_code": "def start_serialization(self):\n    self.xml = SimplerXMLGenerator(self.stream, self.options.get('encoding', settings.DEFAULT_CHARSET))\n    self.xml.startDocument()\n    self.xml.startElement('django-objects', {'version': '1.0'})",
    "docstring": "Start serialization -- open the XML document and the root element.",
    "type": "method",
    "file_path": "django\\django\\core\\serializers\\xml_serializer.py",
    "ast_data": "FunctionDef name:start_serialization arg:self arguments arg Assign Call Call Call Call"
  },
  {
    "library": "django",
    "name": "DummyNode",
    "source_code": "class DummyNode(Node):\n\n    def __init__(self, key, origin, error_message):\n        super().__init__(key)\n        self.origin = origin\n        self.error_message = error_message\n\n    def raise_error(self):\n        raise NodeNotFoundError(self.error_message, self.key, origin=self.origin)",
    "docstring": "A node that doesn't correspond to a migration file on disk. (A squashed migration that was removed, for example.) After the migration graph is processed, all dummy nodes should be removed. If there are any left, a nonexistent dependency error is raised.",
    "type": "class",
    "file_path": "django\\django\\db\\migrations\\graph.py",
    "ast_data": "ClassDef name:DummyNode FunctionDef name:__init__ arg:self arg:key arg:origin arg:error_message arguments arg arg arg arg Call Call Assign Assign FunctionDef name:raise_error arg:self arguments arg Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "_raw_predict",
    "source_code": "def _raw_predict(self, X):\n    check_is_fitted(self)\n    raw_predictions = self._raw_predict_init(X)\n    predict_stages(self.estimators_, X, self.learning_rate, raw_predictions)\n    return raw_predictions",
    "docstring": "Return the sum of the trees raw predictions (+ init estimator).",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_gb.py",
    "ast_data": "FunctionDef name:_raw_predict arg:self arg:X arguments arg arg Call Assign Call Call Return return:yes"
  },
  {
    "library": "virtualenv",
    "name": "current_system",
    "source_code": "@classmethod\ndef current_system(cls, app_data=None) -> PythonInfo:\n    if cls._current_system is None:\n        cls._current_system = cls.from_exe(sys.executable, app_data, raise_on_error=True, resolve_to_host=True)\n    return cls._current_system",
    "docstring": "This locates the current host interpreter information. This might be different than what we run into in case the host python has been upgraded from underneath us.",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\discovery\\py_info.py",
    "ast_data": "FunctionDef name:current_system arg:cls arg:app_data arguments arg arg If Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "wait_at_barrier",
    "source_code": "def wait_at_barrier(self, barrier_id, timeout_in_ms):\n    ensure_initialized()\n    pywrap_tfe.TFE_WaitAtBarrier(self._context_handle, barrier_id, timeout_in_ms)",
    "docstring": "Blocks until all coordinated tasks are at the barrier. The barrier may fail if it times out or if one of the tasks is unhealthy. Args: barrier_id: Unique string identifying the barrier. timeout_in_ms: Duration before the barrier times out and fails.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:wait_at_barrier arg:self arg:barrier_id arg:timeout_in_ms arguments arg arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_run_debug_urls",
    "source_code": "def _get_run_debug_urls(self):\n    return ['file://' + self._dump_root]",
    "docstring": "Get the debug_urls value for the current run() call. Returns: debug_urls: (list of str) Debug URLs for the current run() call. Currently, the list consists of only one URL that is a file:// URL.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\local_cli_wrapper.py",
    "ast_data": "FunctionDef name:_get_run_debug_urls arg:self arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "count",
    "source_code": "@set_module('numpy.strings')\ndef count(a, sub, start=0, end=None):\n    end = end if end is not None else MAX\n    return _count_ufunc(a, sub, start, end)",
    "docstring": "Returns an array with the number of non-overlapping occurrences of substring `` dtype The substring to search for. start, end : array_like, with any integer dtype The range to look in, interpreted as in slice notation. Returns ------- y : ndarray Output array of ints See Also -------- str.count Examples -------- >>> import numpy as np >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> c array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> np.strings.count(c, 'A') array([3, 1, 1]) >>> np.strings.count(c, 'aA') array([3, 1, 0]) >>> np.strings.count(c, 'A', start=1, end=4) array([2, 1, 1]) >>> np.strings.count(c, 'A', start=1, end=3) array([1, 0, 0])",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\strings.py",
    "ast_data": "FunctionDef name:count arg:a arg:sub arg:start arg:end arguments arg arg arg arg Assign Compare Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "scatter",
    "source_code": "def scatter(self, indices, value, name=None):\n    del name\n    if isinstance(indices, ops.EagerTensor):\n        indices = indices.numpy()\n    for index, val in zip(indices, array_ops_stack.unstack(value)):\n        self._write(index, val)\n    return self.parent()",
    "docstring": "See TensorArray.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:scatter arg:self arg:indices arg:value arg:name arguments arg arg arg arg If Call Assign Call For Call Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "should_use_regex",
    "source_code": "def should_use_regex(regex: bool, to_replace: Any) -> bool:\n    if is_re(to_replace):\n        regex = True\n    regex = regex and is_re_compilable(to_replace)\n    regex = regex and re.compile(to_replace).pattern != ''\n    return regex",
    "docstring": "Decide whether to treat as a regular expression.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\array_algos\\replace.py",
    "ast_data": "FunctionDef name:should_use_regex arg:regex arg:to_replace arguments arg arg If Call Assign Assign BoolOp Call Assign BoolOp Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "from_concrete_function",
    "source_code": "def from_concrete_function(concrete_fn, specialized_flat_specs: Optional[List[tensor_spec.TensorSpec]]=None):\n    context.ensure_initialized()\n    fn_name = concrete_fn.name\n    filtered_flat_specs = specialized_flat_specs or list(nest.flatten(concrete_fn.structured_input_signature))\n    if not all((s.shape.is_fully_defined() for s in filtered_flat_specs)):\n        raise ValueError(f'Only support static input shape but got inputs = {concrete_fn.inputs}')\n\n    def compiler_ir_generator(stage='hlo', device_name=None, platform_name=None):\n        if device_name is not None:\n            if platform_name is not None:\n                raise ValueError('device_name and platform_name cannot be provided at the same time.')\n            warnings.warn('device_name is being deprecated. Use platform_name.')\n        device_name = maybe_get_device_name(device_name)\n        res_bytes = context.context().get_compiler_ir(device_name=device_name, platform_name=platform_name, function_name=fn_name, flat_args=filtered_flat_specs, captured_inputs=concrete_fn.captured_inputs, stage=stage)\n        if stage in ('stablehlo_serialized', 'hlo_serialized', 'optimized_hlo_serialized', 'optimized_hlo_proto_serialized'):\n            return res_bytes\n        else:\n            return res_bytes.decode('utf-8')\n    return compiler_ir_generator",
    "docstring": "Generate the Compiler Ir from tf concrete function with TensorSpec. Args: concrete_fn: returned by using get_concrete_function. specialized_flat_specs: specialized flat tf.TensorSpecs for function args. Returns: Function callable that generate the HLO text. Raises: ValueError: if concrete_fn is not \"compilable\" without concrete inputs.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\compiler_ir.py",
    "ast_data": "FunctionDef name:from_concrete_function arg:concrete_fn arg:specialized_flat_specs arguments arg arg Call Assign Assign BoolOp Call Call If Call Call Raise Call FunctionDef name:compiler_ir_generator arg:stage arg:device_name arg:platform_name arguments arg arg arg If Compare If Compare Raise Call Call Assign Call Assign Call Call If Compare Return return:yes Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_log_signature_report",
    "source_code": "def _log_signature_report(signature_def_map, excluded_signatures):\n    sig_names_by_method_name = collections.defaultdict(list)\n    for method_name in _FRIENDLY_METHOD_NAMES:\n        sig_names_by_method_name[method_name] = []\n    for signature_name, sig in signature_def_map.items():\n        sig_names_by_method_name[sig.method_name].append(signature_name)\n    for method_name, sig_names in sig_names_by_method_name.items():\n        if method_name in _FRIENDLY_METHOD_NAMES:\n            method_name = _FRIENDLY_METHOD_NAMES[method_name]\n        logging.info('Signatures INCLUDED in export for {}: {}'.format(method_name, sig_names if sig_names else 'None'))\n    if excluded_signatures:\n        logging.info('Signatures EXCLUDED from export because they cannot be be served via TensorFlow Serving APIs:')\n        for signature_name, message in excluded_signatures.items():\n            logging.info(\"'{}' : {}\".format(signature_name, message))\n    if not signature_def_map:\n        logging.warning('Export includes no signatures!')\n    elif signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY not in signature_def_map:\n        logging.warning('Export includes no default signature!')",
    "docstring": "Log a report of which signatures were produced.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\utils_v1\\export_utils.py",
    "ast_data": "FunctionDef name:_log_signature_report arg:signature_def_map arg:excluded_signatures arguments arg arg Assign Call For Assign For Call Call For Call If Compare Assign Call Call If Call For Call Call Call If Call If Compare Call"
  },
  {
    "library": "pandas",
    "name": "classes",
    "source_code": "def classes(*klasses) -> Callable:\n    return lambda tipo: issubclass(tipo, klasses)",
    "docstring": "Evaluate if the tipo is a subclass of the klasses.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\common.py",
    "ast_data": "FunctionDef name:classes arguments arg Return return:yes arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "gather",
    "source_code": "def gather(self, indices, name=None):\n    value = list_ops.tensor_list_gather(input_handle=self._flow, indices=indices, element_dtype=self._dtype, element_shape=self.element_shape, name=name)\n    return value",
    "docstring": "See TensorArray.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:gather arg:self arg:indices arg:name arguments arg arg arg Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "__str__",
    "source_code": "def __str__(self):\n    return str(self.value).strip()",
    "docstring": "Return the string representation of the Field.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\field.py",
    "ast_data": "FunctionDef name:__str__ arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_log_write_dir",
    "source_code": "def _get_log_write_dir(self):\n    return distributed_file_utils.write_dirpath(self.log_dir, self.model.distribute_strategy)",
    "docstring": "For multi-worker, only chief should write, others write to '/tmp'.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:_get_log_write_dir arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "clear_path_state",
    "source_code": "def clear_path_state(self) -> None:\n    pass",
    "docstring": "Clear the path state in this current executing node",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py",
    "ast_data": "FunctionDef name:clear_path_state arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "_update_signature_def_tensors",
    "source_code": "def _update_signature_def_tensors(tensor_maps, map_old_to_new_tensors):\n    for i in range(len(tensor_maps)):\n        if tensor_maps[i].tensorIndex in map_old_to_new_tensors:\n            tensor_maps[i].tensorIndex = map_old_to_new_tensors[tensor_maps[i].tensorIndex]",
    "docstring": "Update the tensors in the SignatureDef's TensorMaps.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\util.py",
    "ast_data": "FunctionDef name:_update_signature_def_tensors arg:tensor_maps arg:map_old_to_new_tensors arguments arg arg For Call Call If Compare Assign"
  },
  {
    "library": "pytorch",
    "name": "__call__",
    "source_code": "def __call__(self, code_options, cleanup):\n    load_args = []\n    if self.target_values:\n        load_args = [create_load_const(val) for val in self.target_values]\n    create_ctx: list[Instruction] = []\n    _initial_push_null(create_ctx)\n    create_ctx.extend([*load_args, *create_call_function(len(load_args), False)])\n\n    def _template(ctx, dummy):\n        with ctx:\n            dummy\n    setup_with, epilogue = _bytecode_from_template_with_split(_template, self.stack_index)\n    cleanup[:] = epilogue + cleanup\n    load_fast_ctx_inst = next((inst for inst in setup_with if inst.opname == 'LOAD_FAST' and inst.argval == 'ctx'), None)\n    assert load_fast_ctx_inst is not None\n    overwrite_instruction(load_fast_ctx_inst, [create_instruction('NOP')])\n    push_exc_info_gen = (inst for inst in epilogue if inst.opname == 'PUSH_EXC_INFO')\n    push_exc_info_inst = next(push_exc_info_gen, None)\n    assert next(push_exc_info_gen, None) is None\n    return (create_ctx + setup_with, push_exc_info_inst)",
    "docstring": "Codegen based off of: with ctx(args): (rest)",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\resume_execution.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:code_options arg:cleanup arguments arg arg arg Assign If Assign Call Call Call Call Call FunctionDef name:_template arg:ctx arg:dummy arguments arg arg With Assign Call Assign Assign Call BoolOp Compare Compare Compare Call Call Assign Compare Assign Call Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, visitor):\n    self._visitor = visitor\n    self._root_name = 'tf'\n    self._private_map = {'tf': ['compiler', 'core', 'security', 'dtensor', 'python', 'tsl'], 'tf.flags': ['cpp_flags']}\n    self._do_not_descend_map = {'tf': ['examples', 'flags', 'platform', 'pywrap_tensorflow', 'user_ops', 'tools', 'tensorboard'], 'tf.app': ['flags'], 'tf.test': ['mock']}",
    "docstring": "Constructor. should be a callable suitable as a visitor for . It will be called only for members of the public TensorFlow API. Args: visitor: A visitor to call for the public API.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\tools\\common\\public_api.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:visitor arguments arg arg Assign Assign Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "start_main_loop",
    "source_code": "@classmethod\ndef start_main_loop(cls):\n    pass",
    "docstring": "Start the main event loop. This method is called by , which is the implementation of . To customize the behavior of , interactive backends should usually override ; if more customized logic is necessary, can also be overridden.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:start_main_loop arg:cls arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "_RestructuredDataset",
    "source_code": "class _RestructuredDataset(UnaryDataset):\n\n    def __init__(self, dataset, element_spec):\n        self._input_dataset = dataset\n        self._element_spec = element_spec\n        variant_tensor = self._input_dataset._variant_tensor\n        super(_RestructuredDataset, self).__init__(dataset, variant_tensor)\n\n    @property\n    def element_spec(self):\n        return self._element_spec",
    "docstring": "An internal helper for changing the element spec of a dataset.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "ClassDef name:_RestructuredDataset FunctionDef name:__init__ arg:self arg:dataset arg:element_spec arguments arg arg arg Assign Assign Assign Call Call FunctionDef name:element_spec arg:self arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "open",
    "source_code": "def open(self, path, mode='r', encoding=None, newline=None):\n    return DataSource.open(self, self._fullpath(path), mode, encoding=encoding, newline=newline)",
    "docstring": "Open and return file-like object prepending Repository base URL. If is an URL, it will be downloaded, stored in the DataSource directory and opened from there. Parameters ---------- path : str or pathlib.Path Local file path or URL to open. This may, but does not have to, include the with which the was initialized. mode : {'r', 'w', 'a'}, optional Mode to open . Mode 'r' for reading, 'w' for writing, 'a' to append. Available modes depend on the type of object specified by . Default is 'r'. encoding : {None, str}, optional Open text file with given encoding. The default encoding will be what uses. newline : {None, str}, optional Newline to use when reading text file. Returns ------- out : file object File object.",
    "type": "method",
    "file_path": "numpy\\numpy\\lib\\_datasource.py",
    "ast_data": "FunctionDef name:open arg:self arg:path arg:mode arg:encoding arg:newline arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_collect_all_valid_cia_ops",
    "source_code": "def _collect_all_valid_cia_ops() -> set['OperatorBase']:\n    cia_ops = set()\n    for op_namespace_name in torch.ops._dir:\n        if op_namespace_name != 'aten':\n            assert hasattr(torch.ops, op_namespace_name)\n            op_namespace = getattr(torch.ops, op_namespace_name)\n            if isinstance(op_namespace, torch._ops._OpNamespace):\n                cia_ops |= _collect_all_valid_cia_ops_for_namespace(op_namespace)\n        else:\n            cia_ops |= _collect_all_valid_cia_ops_for_aten_namespace()\n    return cia_ops",
    "docstring": "This is an util function that gets the all CIA functional ops. The algorithm is in 2 steps: 1. We first query C++ dispatcher to get the list of CIA ops and then we call getattr on torch.ops.aten to lazily populate them. 2. Sometimes, handful of ops have CIA registered in python dispatcher but not on the C++ side, these can't be caught at the first step. So we walk again to get the final list. Note that the output of this function should never be modified",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\utils.py",
    "ast_data": "FunctionDef name:_collect_all_valid_cia_ops arguments Assign Call For If Compare Call Assign Call If Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "from_tensor",
    "source_code": "@classmethod\ndef from_tensor(cls, tensor, name=None):\n    if isinstance(tensor, core_tf_types.Value):\n        return TensorSpec(tensor.shape, tensor.dtype, name)\n    elif isinstance(tensor, core_tf_types.Symbol):\n        return TensorSpec(tensor.shape, tensor.dtype, name or tensor.op.name)\n    else:\n        raise ValueError(f'`tensor` should be a tf.Tensor, but got type {type(tensor)}.')",
    "docstring": "Returns a that describes . >>> tf.TensorSpec.from_tensor(tf.constant([1, 2, 3])) TensorSpec(shape=(3,), dtype=tf.int32, name=None) Args: tensor: The that should be described. name: A name for the . Defaults to . Returns: A that describes .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor.py",
    "ast_data": "FunctionDef name:from_tensor arg:cls arg:tensor arg:name arguments arg arg arg If Call Return return:yes Call If Call Return return:yes Call BoolOp Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_ragged_tensor_to_string",
    "source_code": "def _ragged_tensor_to_string(string_tensor, summarize):\n    if string_tensor.shape.rank == 1:\n        pieces = string_tensor\n    else:\n        pieces = map_fn_lib.map_fn(lambda s: _ragged_tensor_to_string(s, summarize), string_tensor, fn_output_signature=tensor_lib.TensorSpec(None, dtypes.string))\n    if summarize not in (-1, None):\n        pieces = cond.cond(_nrows(string_tensor) <= 2 * summarize, lambda: pieces, lambda: array_ops.concat([pieces[:summarize], ['...'], pieces[-summarize:]], axis=0))\n    return '[' + string_ops.reduce_join(pieces, separator=', ') + ']'",
    "docstring": "Returns a scalar string tensor with the contents of . Args: string_tensor: A potentially ragged tensor with dtype=string. summarize: Include only the first and last elements of each dimension. If or , then include all elements. Returns: A scalar string Tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_string_ops.py",
    "ast_data": "FunctionDef name:_ragged_tensor_to_string arg:string_tensor arg:summarize arguments arg arg If Compare Assign Assign Call arguments arg Call Call If Compare Assign Call Compare Call arguments arguments Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "feature_untied",
    "source_code": "def feature_untied(self, names):\n    assert not isinstance(names, str) and hasattr(names, '__iter__')\n    final = []\n    for n in names:\n        implies = self.feature_implies(n)\n        tied = [nn for nn in final if nn in implies and n in self.feature_implies(nn)]\n        if tied:\n            tied = self.feature_sorted(tied + [n])\n            if n not in tied[1:]:\n                continue\n            final.remove(tied[:1][0])\n        final.append(n)\n    return final",
    "docstring": "same as 'feature_ahead()' but if both features implied each other and keep the highest interest. Parameters ---------- 'names': sequence sequence of CPU feature names in uppercase. Returns ------- list of CPU features sorted as-is 'names' Examples -------- >>> self.feature_untied([\"SSE2\", \"SSE3\", \"SSE41\"]) [\"SSE2\", \"SSE3\", \"SSE41\"] # assume AVX2 and FMA3 implies each other >>> self.feature_untied([\"SSE2\", \"SSE3\", \"SSE41\", \"FMA3\", \"AVX2\"]) [\"SSE2\", \"SSE3\", \"SSE41\", \"AVX2\"]",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py",
    "ast_data": "FunctionDef name:feature_untied arg:self arg:names arguments arg arg BoolOp Call Call Assign For Assign Call Assign BoolOp Compare Compare Call If Assign Call If Compare Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_finalize_config_layers",
    "source_code": "def _finalize_config_layers(layers):\n    for layer in layers:\n        if _is_graph_network(layer):\n            _restore_layer_unconditional_losses(layer)\n        _restore_layer_activation_loss(layer)\n        _restore_layer_metrics(layer)\n        if isinstance(layer, recurrent.RNN) and layer.stateful and hasattr(_get_keras_attr(layer), 'states'):\n            layer.states = getattr(_get_keras_attr(layer), 'states', None)\n            for variable in nest.flatten(layer.states):\n                backend.track_variable(variable)\n        layer.finalize_state()",
    "docstring": "Runs the final steps of loading Keras Layers from config.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\load.py",
    "ast_data": "FunctionDef name:_finalize_config_layers arg:layers arguments arg For If Call Call Call Call If BoolOp Call Call Call Assign Call Call For Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_eigen_decompose_gram",
    "source_code": "def _eigen_decompose_gram(self, X, y, sqrt_sw):\n    K, X_mean = self._compute_gram(X, sqrt_sw)\n    if self.fit_intercept:\n        K += np.outer(sqrt_sw, sqrt_sw)\n    eigvals, Q = linalg.eigh(K)\n    QT_y = np.dot(Q.T, y)\n    return (X_mean, eigvals, Q, QT_y)",
    "docstring": "Eigendecomposition of X.X^T, used when n_samples <= n_features.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_ridge.py",
    "ast_data": "FunctionDef name:_eigen_decompose_gram arg:self arg:X arg:y arg:sqrt_sw arguments arg arg arg arg Assign Call If Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "convert_inference_tf_type_to_tflite_type",
    "source_code": "def convert_inference_tf_type_to_tflite_type(tf_type: dtypes.DType, usage: str='') -> _types_pb2.IODataType:\n    mapping = {dtypes.float32: _types_pb2.FLOAT, dtypes.uint8: _types_pb2.QUANTIZED_UINT8, dtypes.int8: _types_pb2.QUANTIZED_INT8, dtypes.int16: _types_pb2.QUANTIZED_INT16}\n    tflite_type = mapping.get(tf_type)\n    if tflite_type is None:\n        raise ValueError('Unsupported TensorFlow type `{0}` provided for the {1}'.format(tf_type, usage))\n    return tflite_type",
    "docstring": "Convert inference type from tf type to tflite type. Args: tf_type: TensorFlow type. usage: Text describing the reason for invoking this function. Raises: ValueError: If is unsupported. Returns: tflite_type: TFLite type. Refer to compiler/mlir/lite/types.proto.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\convert.py",
    "ast_data": "FunctionDef name:convert_inference_tf_type_to_tflite_type arg:tf_type arg:usage arguments arg arg Assign Assign Call If Compare Raise Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_add_kl_info",
    "source_code": "def _add_kl_info():\n    rows = ['KL divergence is currently implemented for the following distribution pairs:']\n    for p, q in sorted(_KL_REGISTRY, key=lambda p_q: (p_q[0].__name__, p_q[1].__name__)):\n        rows.append(f'* :class:`~torch.distributions.{p.__name__}` and :class:`~torch.distributions.{q.__name__}`')\n    kl_info = '\\n\\t'.join(rows)\n    if kl_divergence.__doc__:\n        kl_divergence.__doc__ += kl_info",
    "docstring": "Appends a list of implemented KL functions to the doc for kl_divergence.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributions\\kl.py",
    "ast_data": "FunctionDef name:_add_kl_info arguments Assign For Call arguments arg Call Assign Call If"
  },
  {
    "library": "django",
    "name": "ProhibitNullCharactersValidator",
    "source_code": "@deconstructible\nclass ProhibitNullCharactersValidator:\n    message = _('Null characters are not allowed.')\n    code = 'null_characters_not_allowed'\n\n    def __init__(self, message=None, code=None):\n        if message is not None:\n            self.message = message\n        if code is not None:\n            self.code = code\n\n    def __call__(self, value):\n        if '\\x00' in str(value):\n            raise ValidationError(self.message, code=self.code, params={'value': value})\n\n    def __eq__(self, other):\n        return isinstance(other, self.__class__) and self.message == other.message and (self.code == other.code)",
    "docstring": "Validate that the string doesn't contain the null character.",
    "type": "class",
    "file_path": "django\\django\\core\\validators.py",
    "ast_data": "ClassDef name:ProhibitNullCharactersValidator Assign Call Assign FunctionDef name:__init__ arg:self arg:message arg:code arguments arg arg arg If Compare Assign If Compare Assign FunctionDef name:__call__ arg:self arg:value arguments arg arg If Compare Call Raise Call FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes BoolOp Call Compare Compare"
  },
  {
    "library": "pytorch",
    "name": "_reset_is_grad_none",
    "source_code": "def _reset_is_grad_none(self) -> None:\n    if not self._use_orig_params:\n        return\n    _p_assert(self._training_state == HandleTrainingState.BACKWARD_POST, 'Expects to only be called in the post-backward after gradient computation')\n    flat_param = self.flat_param\n    assert flat_param._params is not None\n    for i, param in enumerate(flat_param._params):\n        if param.requires_grad:\n            assert flat_param._is_grad_none_mask is not None\n            flat_param._is_grad_none_mask[i] = False",
    "docstring": "Reset ``.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py",
    "ast_data": "FunctionDef name:_reset_is_grad_none arg:self arguments arg If Return return:no Call Compare Assign Compare For Call If Compare Assign"
  },
  {
    "library": "tensorflow",
    "name": "_ResourceGatherNd",
    "source_code": "class _ResourceGatherNd(_Node):\n\n    def convert_variable_to_constant(self, incoming_edge, tensor_data):\n        output_node = self.converted_self().node\n        output_node.Clear()\n        output_node.name = self._node.name\n        output_node.op = 'GatherNd'\n        output_node.input.extend([self._node.input[0], self._node.input[1]])\n        output_node.attr['Tparams'].CopyFrom(self._node.attr['dtype'])\n        output_node.attr['Tindices'].CopyFrom(self._node.attr['Tindices'])\n        if '_class' in self._node.attr:\n            output_node.attr['_class'].CopyFrom(self._node.attr['_class'])",
    "docstring": "Specialization of _Node to ResourceGatherNd.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "ClassDef name:_ResourceGatherNd FunctionDef name:convert_variable_to_constant arg:self arg:incoming_edge arg:tensor_data arguments arg arg arg Assign Call Call Assign Assign Call Call Call If Compare Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, axis):\n    pass",
    "docstring": "Construct a new scale. Notes ----- The following note is for scale implementers. For back-compatibility reasons, scales take an object as first argument. However, this argument should not be used: a single scale object should be usable by multiple \\es at the same time.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\scale.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:axis arguments arg arg"
  },
  {
    "library": "django",
    "name": "S",
    "source_code": "def S(self):\n    if self.data.day in (11, 12, 13):\n        return 'th'\n    last = self.data.day % 10\n    if last == 1:\n        return 'st'\n    if last == 2:\n        return 'nd'\n    if last == 3:\n        return 'rd'\n    return 'th'",
    "docstring": "English ordinal suffix for the day of the month, 2 characters; i.e. 'st', 'nd', 'rd' or 'th'.",
    "type": "method",
    "file_path": "django\\django\\utils\\dateformat.py",
    "ast_data": "FunctionDef name:S arg:self arguments arg If Compare Return return:yes Assign If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "process_only_nodes",
    "source_code": "def process_only_nodes(document: Node, tags: Tags) -> None:\n    for node in document.findall(addnodes.only):\n        if _only_node_keep_children(node, tags):\n            node.replace_self(node.children or nodes.comment())\n        else:\n            node.replace_self(nodes.comment())",
    "docstring": "Filter `` nodes which do not match *tags*.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\nodes.py",
    "ast_data": "FunctionDef name:process_only_nodes arg:document arg:tags arguments arg arg For Call If Call Call BoolOp Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "parse_time_interval",
    "source_code": "def parse_time_interval(interval_str):\n    str_interval = _parse_interval(interval_str)\n    interval_start = 0\n    interval_end = float('inf')\n    if str_interval.start:\n        interval_start = parse_readable_time_str(str_interval.start)\n    if str_interval.end:\n        interval_end = parse_readable_time_str(str_interval.end)\n    if interval_start > interval_end:\n        raise ValueError('Invalid interval %s. Start must be before end of interval.' % interval_str)\n    return Interval(interval_start, str_interval.start_included, interval_end, str_interval.end_included)",
    "docstring": "Convert a human-readable time interval to a tuple of start and end value. Args: interval_str: () A human-readable str representing an interval (e.g., \"[10us, 20us]\", \"100ms\"). Supported time suffixes are us, ms, s. Returns: object where start and end are in microseconds. Raises: ValueError: if the input is not valid.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\command_parser.py",
    "ast_data": "FunctionDef name:parse_time_interval arg:interval_str arguments arg Assign Call Assign Assign Call If Assign Call If Assign Call If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "print_pdf",
    "source_code": "def print_pdf(self, fname_or_fh, *, metadata=None, **kwargs):\n    w, h = self.figure.get_size_inches()\n    info_dict = _create_pdf_info_dict('pgf', metadata or {})\n    pdfinfo = ','.join((_metadata_to_str(k, v) for k, v in info_dict.items()))\n    with TemporaryDirectory() as tmpdir:\n        tmppath = pathlib.Path(tmpdir)\n        self.print_pgf(tmppath / 'figure.pgf', **kwargs)\n        (tmppath / 'figure.tex').write_text('\\n'.join([_DOCUMENTCLASS, '\\\\usepackage[pdfinfo={%s}]{hyperref}' % pdfinfo, '\\\\usepackage[papersize={%fin,%fin}, margin=0in]{geometry}' % (w, h), '\\\\usepackage{pgf}', _get_preamble(), '\\\\begin{document}', '\\\\centering', '\\\\input{figure.pgf}', '\\\\end{document}']), encoding='utf-8')\n        texcommand = mpl.rcParams['pgf.texsystem']\n        cbook._check_and_log_subprocess([texcommand, '-interaction=nonstopmode', '-halt-on-error', 'figure.tex'], _log, cwd=tmpdir)\n        with (tmppath / 'figure.pdf').open('rb') as orig, cbook.open_file_cm(fname_or_fh, 'wb') as dest:\n            shutil.copyfileobj(orig, dest)",
    "docstring": "Use LaTeX to compile a pgf generated figure to pdf.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pgf.py",
    "ast_data": "FunctionDef name:print_pdf arg:self arg:fname_or_fh arguments arg arg arg arg Assign Call Assign Call BoolOp Assign Call Call Call With Call Assign Call Call Call Call Call Assign Call With Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "isabstractmethod",
    "source_code": "def isabstractmethod(obj: Any) -> bool:\n    return safe_getattr(obj, '__isabstractmethod__', False) is True",
    "docstring": "Check if the object is an :func:.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\inspect.py",
    "ast_data": "FunctionDef name:isabstractmethod arg:obj arguments arg Return return:yes Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "get_variable",
    "source_code": "def get_variable(self, feature_column, name):\n    if name in self._cols_to_vars_map[feature_column]:\n        return self._cols_to_vars_map[feature_column][name]\n    raise ValueError('Variable does not exist.')",
    "docstring": "Returns an existing variable. Args: feature_column: A object this variable corresponds to. name: variable name.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:get_variable arg:self arg:feature_column arg:name arguments arg arg arg If Compare Return return:yes Raise Call"
  },
  {
    "library": "cryptography",
    "name": "key_size",
    "source_code": "@property\n@abc.abstractmethod\ndef key_size(self) -> int:\n    pass",
    "docstring": "Bit size of a secret scalar for the curve.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ec.py",
    "ast_data": "FunctionDef name:key_size arg:self arguments arg"
  },
  {
    "library": "scipy",
    "name": "make_splprep",
    "source_code": "def make_splprep(x, *, w=None, u=None, ub=None, ue=None, k=3, s=0, t=None, nest=None):\n    x = np.stack(x, axis=1)\n    if u is None:\n        dp = (x[1:, :] - x[:-1, :]) ** 2\n        u = np.sqrt(dp.sum(axis=1)).cumsum()\n        u = np.r_[0, u / u[-1]]\n    if s == 0:\n        if t is not None or w is not None or nest is not None:\n            raise ValueError('s==0 is for interpolation only')\n        return (make_interp_spline(u, x.T, k=k, axis=1), u)\n    u, x, w, k, s, ub, ue = _validate_inputs(u, x, w, k, s, ub, ue, parametric=True)\n    spl = _make_splrep_impl(u, x, w=w, xb=ub, xe=ue, k=k, s=s, t=t, nest=nest)\n    cc = spl.c.T\n    spl1 = BSpline(spl.t, cc, spl.k, axis=1)\n    return (spl1, u)",
    "docstring": "Create a smoothing parametric B-spline curve with bounded error, minimizing derivative jumps. Given a list of N 1D arrays, , which represent a curve in N-dimensional space parametrized by , find a smooth approximating spline curve `xwk is the input parameter. In other words, we balance maximizing the smoothness (measured as the jumps of the derivative, the first criterion), and the deviation of :math: from the data :math: (the second criterion). Note that the summation in the second criterion is over all data points, and in the first criterion it is over the internal spline knots (i.e. those with `generate_knots` for details. .. versionadded:: 1.15.0 References ---------- .. [1] P. Dierckx, \"Algorithms for smoothing data with periodic and parametric splines, Computer Graphics and Image Processing\", 20 (1982) 171-184. .. [2] P. Dierckx, \"Curve and surface fitting with splines\", Monographs on Numerical Analysis, Oxford University Press, 1993.",
    "type": "function",
    "file_path": "scipy\\scipy\\interpolate\\_fitpack_repro.py",
    "ast_data": "FunctionDef name:make_splprep arg:x arguments arg arg arg arg arg arg arg arg arg Assign Call If Compare Assign Assign Call Call Call Assign If Compare If BoolOp Compare Compare Compare Raise Call Return return:yes Call Assign Call Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "GlobalMaxPooling2D",
    "source_code": "class GlobalMaxPooling2D(GlobalPooling2D):\n\n    def call(self, inputs):\n        if self.data_format == 'channels_last':\n            return backend.max(inputs, axis=[1, 2], keepdims=self.keepdims)\n        else:\n            return backend.max(inputs, axis=[2, 3], keepdims=self.keepdims)",
    "docstring": "Global max pooling operation for spatial data. Examples: >>> input_shape = (2, 4, 5, 3) >>> x = tf.random.normal(input_shape) >>> y = tf.keras.layers.GlobalMaxPool2D()(x) >>> print(y.shape) (2, 3) Args: data_format: A string, one of (default) or . The ordering of the dimensions in the inputs. corresponds to inputs with shape while corresponds to inputs with shape . It defaults to the value found in your Keras config file at . If you never set it, then it will be \"channels_last\". keepdims: A boolean, whether to keep the spatial dimensions or not. If is (default), the rank of the tensor is reduced for spatial dimensions. If is , the spatial dimensions are retained with length 1. The behavior is the same as for or . Input shape: - If : 4D tensor with shape . - If : 4D tensor with shape . Output shape: - If =False: 2D tensor with shape . - If =True: - If : 4D tensor with shape - If : 4D tensor with shape",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\pooling.py",
    "ast_data": "ClassDef name:GlobalMaxPooling2D FunctionDef name:call arg:self arg:inputs arguments arg arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "add_item",
    "source_code": "def add_item(self, title, link, description, author_email=None, author_name=None, author_link=None, pubdate=None, comments=None, unique_id=None, unique_id_is_permalink=None, categories=(), item_copyright=None, ttl=None, updateddate=None, enclosures=None, **kwargs):\n\n    def to_str(s):\n        return str(s) if s is not None else s\n    categories = categories and [to_str(c) for c in categories]\n    self.items.append({'title': to_str(title), 'link': iri_to_uri(link), 'description': to_str(description), 'author_email': to_str(author_email), 'author_name': to_str(author_name), 'author_link': iri_to_uri(author_link), 'pubdate': pubdate, 'updateddate': updateddate, 'comments': to_str(comments), 'unique_id': to_str(unique_id), 'unique_id_is_permalink': unique_id_is_permalink, 'enclosures': enclosures or (), 'categories': categories or (), 'item_copyright': to_str(item_copyright), 'ttl': to_str(ttl), **kwargs})",
    "docstring": "Add an item to the feed. All args are expected to be strings except pubdate and updateddate, which are datetime.datetime objects, and enclosures, which is an iterable of instances of the Enclosure class.",
    "type": "method",
    "file_path": "django\\django\\utils\\feedgenerator.py",
    "ast_data": "FunctionDef name:add_item arg:self arg:title arg:link arg:description arg:author_email arg:author_name arg:author_link arg:pubdate arg:comments arg:unique_id arg:unique_id_is_permalink arg:categories arg:item_copyright arg:ttl arg:updateddate arg:enclosures arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg FunctionDef name:to_str arg:s arguments arg Return return:yes Compare Call Assign BoolOp Call Call Call Call Call Call Call Call Call Call BoolOp BoolOp Call Call"
  },
  {
    "library": "tensorflow",
    "name": "convert",
    "source_code": "def convert(self):\n    assert not self._converted\n    if self._input_graph_def:\n        self._convert_graph_def()\n    else:\n        self._convert_saved_model()\n    return self._converted_graph_def",
    "docstring": "Run the TF-TRT conversion. Returns: The converted GraphDef for TF 1.x.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\tensorrt\\trt_convert.py",
    "ast_data": "FunctionDef name:convert arg:self arguments arg If Call Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "ClientAuth",
    "source_code": "class ClientAuth:\n    DEFAULT_AUTH_METHODS = {'client_secret_basic': encode_client_secret_basic, 'client_secret_post': encode_client_secret_post, 'none': encode_none}\n\n    def __init__(self, client_id, client_secret, auth_method=None):\n        if auth_method is None:\n            auth_method = 'client_secret_basic'\n        self.client_id = client_id\n        self.client_secret = client_secret\n        if auth_method in self.DEFAULT_AUTH_METHODS:\n            auth_method = self.DEFAULT_AUTH_METHODS[auth_method]\n        self.auth_method = auth_method\n\n    def prepare(self, method, uri, headers, body):\n        return self.auth_method(self, method, uri, headers, body)",
    "docstring": "Attaches OAuth Client Information to HTTP requests. :param client_id: Client ID, which you get from client registration. :param client_secret: Client Secret, which you get from registration. :param auth_method: Client auth method for token endpoint. The supported methods for now: * client_secret_basic (default) * client_secret_post * none",
    "type": "class",
    "file_path": "authlib\\authlib\\oauth2\\auth.py",
    "ast_data": "ClassDef name:ClientAuth Assign FunctionDef name:__init__ arg:self arg:client_id arg:client_secret arg:auth_method arguments arg arg arg arg If Compare Assign Assign Assign If Compare Assign Assign FunctionDef name:prepare arg:self arg:method arg:uri arg:headers arg:body arguments arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "run_and_return_first_line",
    "source_code": "def run_and_return_first_line(run_lambda, command):\n    rc, out, _ = run_lambda(command)\n    if rc != 0:\n        return None\n    return out.split('\\n')[0]",
    "docstring": "Run command using run_lambda and returns first line if output is not empty.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\collect_env.py",
    "ast_data": "FunctionDef name:run_and_return_first_line arg:run_lambda arg:command arguments arg arg Assign Call If Compare Return return:no Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "shape",
    "source_code": "@property\ndef shape(self):\n    return self._shape",
    "docstring": "The statically known shape of the RaggedTensor. Examples: >>> rt = tf.ragged.constant([[0], [1, 2]]) >>> tf.type_spec_from_value(rt).shape TensorShape([2, None]) >>> rt = tf.ragged.constant([[[0, 1]], [[1, 2], [3, 4]]], ragged_rank=1) >>> tf.type_spec_from_value(rt).shape TensorShape([2, None, 2]) Returns: A containing the statically known shape of the RaggedTensor. Ragged dimensions have a size of .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:shape arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_output_from_cache_entry",
    "source_code": "def _output_from_cache_entry(self, state: _CacheKeyState, entry: _DispatchCacheValidEntry, key: _DispatchCacheKey, func: OpOverload, args: Sequence[object]) -> Union[Optional[FakeTensor], tuple[Optional[FakeTensor], ...]]:\n    if entry.is_output_tuple:\n        outputs = [self._get_output_tensor_from_cache_entry(state, output_info, key, func, args) for output_info in entry.output_infos]\n        return tuple(outputs)\n    else:\n        return self._get_output_tensor_from_cache_entry(state, entry.output_infos[0], key, func, args)",
    "docstring": "Create a new FakeTensor from the cache entry.",
    "type": "method",
    "file_path": "pytorch\\torch\\_subclasses\\fake_tensor.py",
    "ast_data": "FunctionDef name:_output_from_cache_entry arg:self arg:state arg:entry arg:key arg:func arg:args arguments arg arg arg arg arg arg If Assign Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, strategy, replica_id_in_sync_group):\n    self._strategy = strategy\n    self._thread_context = _InReplicaThreadMode(self)\n    if not (replica_id_in_sync_group is None or tensor_util.is_tf_type(replica_id_in_sync_group) or isinstance(replica_id_in_sync_group, int)):\n        raise ValueError('replica_id_in_sync_group can only be an integer, a Tensor or None.')\n    self._replica_id_in_sync_group = replica_id_in_sync_group\n    if strategy:\n        self._local_replica_id = strategy.extended._get_local_replica_id(replica_id_in_sync_group)\n    self._summary_recording_distribution_strategy = None",
    "docstring": "Creates a ReplicaContext. Args: strategy: A . replica_id_in_sync_group: An integer, a or None. Prefer an integer whenever possible to avoid issues with nested . It accepts a only to be compatible with .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:strategy arg:replica_id_in_sync_group arguments arg arg arg Assign Assign Call If BoolOp Compare Call Call Raise Call Assign If Assign Call Assign"
  },
  {
    "library": "scipy",
    "name": "_inputs_swap_needed",
    "source_code": "def _inputs_swap_needed(mode, shape1, shape2, axes=None):\n    if mode != 'valid':\n        return False\n    if not shape1:\n        return False\n    if axes is None:\n        axes = range(len(shape1))\n    ok1 = all((shape1[i] >= shape2[i] for i in axes))\n    ok2 = all((shape2[i] >= shape1[i] for i in axes))\n    if not (ok1 or ok2):\n        raise ValueError(\"For 'valid' mode, one must be at least as large as the other in every dimension\")\n    return not ok1",
    "docstring": "Determine if inputs arrays need to be swapped in mode. If in mode, returns whether or not the input arrays need to be swapped depending on whether is at least as large as in every calculated dimension. This is important for some of the correlation and convolution implementations in this module, where the larger array input needs to come before the smaller array input when operating in this mode. Note that if the mode provided is not 'valid', False is immediately returned.",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_signaltools.py",
    "ast_data": "FunctionDef name:_inputs_swap_needed arg:mode arg:shape1 arg:shape2 arg:axes arguments arg arg arg arg If Compare Return return:yes If Return return:yes If Compare Assign Call Call Assign Call Compare Assign Call Compare If BoolOp Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_print_tensor",
    "source_code": "def _print_tensor(tensor_name, num_elements, tensor, output_tensor):\n    if self._parameters.is_brief_mode():\n        if tensor_name not in tensor_trace_order.tensorname_to_cache_idx:\n            raise ValueError('Tensor %s with name %s is not in the tensorname_to_cache_idx' % (tensor, tensor_name))\n        msg = '%d' % tensor_trace_order.tensorname_to_cache_idx[tensor_name]\n    else:\n        msg = '\"%s\"' % tensor_name\n    if self._parameters.trace_dir:\n        output_path = os.path.join(self._parameters.trace_dir, _TRACE_FILE_NAME + self._get_outfile_suffix())\n        output_stream = _OUTPUT_STREAM_ESCAPE + output_path\n    else:\n        output_stream = sys.stderr\n    return logging_ops.print_v2(msg, array_ops.shape(output_tensor), '@', self._replica_id, '\\n', output_tensor, '\\n', summarize=num_elements, output_stream=output_stream)",
    "docstring": "Prints a tensor value to a file. Args: tensor_name: name of the tensor being traced. num_elements: number of elements to print (-1 means print all). tensor: the tensor needs to be returned. output_tensor: the tensor needs to be printed. Returns: The same tensor passed via the \"tensor\" argument. Raises: ValueError: If tensor_name is not already in tensor_trace_order.tensorname_to_cache_idx.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:_print_tensor arg:tensor_name arg:num_elements arg:tensor arg:output_tensor arguments arg arg arg arg If Call If Compare Raise Call Assign Assign If Assign Call Call Assign Assign Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "GroupShuffleSplit",
    "source_code": "class GroupShuffleSplit(GroupsConsumerMixin, BaseShuffleSplit):\n\n    def __init__(self, n_splits=5, *, test_size=None, train_size=None, random_state=None):\n        super().__init__(n_splits=n_splits, test_size=test_size, train_size=train_size, random_state=random_state)\n        self._default_test_size = 0.2\n\n    def _iter_indices(self, X, y, groups):\n        if groups is None:\n            raise ValueError(\"The 'groups' parameter should not be None.\")\n        groups = check_array(groups, input_name='groups', ensure_2d=False, dtype=None)\n        classes, group_indices = np.unique(groups, return_inverse=True)\n        for group_train, group_test in super()._iter_indices(X=classes):\n            train = np.flatnonzero(np.isin(group_indices, group_train))\n            test = np.flatnonzero(np.isin(group_indices, group_test))\n            yield (train, test)\n\n    def split(self, X, y=None, groups=None):\n        return super().split(X, y, groups)",
    "docstring": "Shuffle-Group(s)-Out cross-validation iterator. Provides randomized train/test indices to split data according to a third-party provided group. This group information can be used to encode arbitrary domain specific stratifications of the samples as integers. For instance the groups could be the year of collection of the samples and thus allow for cross-validation against time-based splits. The difference between :class: and `ShuffleSplitUser Guide sphx_glr_auto_examples_model_selection_plot_cv_indices.pyGlossary p` groups.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py",
    "ast_data": "ClassDef name:GroupShuffleSplit FunctionDef name:__init__ arg:self arg:n_splits arguments arg arg arg arg arg Call Call Assign FunctionDef name:_iter_indices arg:self arg:X arg:y arg:groups arguments arg arg arg arg If Compare Raise Call Assign Call Assign Call For Call Call Assign Call Call Assign Call Call FunctionDef name:split arg:self arg:X arg:y arg:groups arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_all_groups",
    "source_code": "def get_all_groups(self) -> list[ProcessGroup]:\n    return [self.get_group(i) for i in range(self.mesh.ndim)]",
    "docstring": "Returns a list of ProcessGroups for all mesh dimensions. Returns: A list of :class: object.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\device_mesh.py",
    "ast_data": "FunctionDef name:get_all_groups arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "is_sharded",
    "source_code": "def is_sharded(self, tensor: Tensor) -> bool:\n    if not hasattr(self.flat_param, '_sharded_size') or not self.uses_sharded_strategy:\n        return False\n    sharded_size = self.flat_param._sharded_size\n    return tensor.size() == sharded_size",
    "docstring": "Return whether `` for clarity.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py",
    "ast_data": "FunctionDef name:is_sharded arg:self arg:tensor arguments arg arg If BoolOp Call Return return:yes Assign Return return:yes Compare Call"
  },
  {
    "library": "pytorch",
    "name": "clip_grad_norm",
    "source_code": "@deprecated('`torch.nn.utils.clip_grad_norm` is now deprecated in favor of `torch.nn.utils.clip_grad_norm_`.', category=FutureWarning)\ndef clip_grad_norm(parameters: _tensor_or_tensors, max_norm: float, norm_type: float=2.0, error_if_nonfinite: bool=False, foreach: Optional[bool]=None) -> torch.Tensor:\n    return clip_grad_norm_(parameters, max_norm, norm_type, error_if_nonfinite, foreach)",
    "docstring": "Clip the gradient norm of an iterable of parameters. .. warning:: This method is now deprecated in favor of :func:.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\utils\\clip_grad.py",
    "ast_data": "FunctionDef name:clip_grad_norm arg:parameters arg:max_norm arg:norm_type arg:error_if_nonfinite arg:foreach arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "delete",
    "source_code": "def delete(self, loc: int | np.integer | list[int] | npt.NDArray[np.integer]) -> Self:\n    values = self._values\n    res_values: ArrayLike\n    if isinstance(values, np.ndarray):\n        res_values = np.delete(values, loc)\n    else:\n        res_values = values.delete(loc)\n    return self._constructor._simple_new(res_values, name=self.name)",
    "docstring": "Make new Index with passed location(-s) deleted. Parameters ---------- loc : int or list of int Location of item(-s) which will be deleted. Use a list of locations to delete more than one value at the same time. Returns ------- Index Will be same type as self, except for RangeIndex. See Also -------- numpy.delete : Delete any rows and column from NumPy array (ndarray). Examples -------- >>> idx = pd.Index([\"a\", \"b\", \"c\"]) >>> idx.delete(1) Index(['a', 'c'], dtype='object') >>> idx = pd.Index([\"a\", \"b\", \"c\"]) >>> idx.delete([0, 2]) Index(['b'], dtype='object')",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:delete arg:self arg:loc arguments arg arg Assign If Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "pdf",
    "source_code": "def pdf(self, x, s2, mu=0, lmbda=1, a=1, b=1):\n    invalid, args = self._process_parameters_pdf(x, s2, mu, lmbda, a, b)\n    s2 = args[1]\n    with np.errstate(all='ignore'):\n        pdf = np.asarray(self._pdf(*args))\n    pdf[s2 <= 0] = 0\n    pdf[invalid] = np.nan\n    return pdf[()]",
    "docstring": "The probability density function. Parameters ---------- x, s2 : array_like Arguments. must be greater than zero. mu, lmbda, a, b : array_like, optional Shape parameters. , , and must be greater than zero. Returns ------- logpdf : ndarray or scalar The probability density function.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:pdf arg:self arg:x arg:s2 arg:mu arg:lmbda arg:a arg:b arguments arg arg arg arg arg arg arg Assign Call Assign With Call Assign Call Call Assign Compare Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_buffered_line",
    "source_code": "def _buffered_line(self) -> list[Scalar]:\n    if len(self.buf) > 0:\n        return self.buf[0]\n    else:\n        return self._next_line()",
    "docstring": "Return a line from buffer, filling buffer if required.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\parsers\\python_parser.py",
    "ast_data": "FunctionDef name:_buffered_line arg:self arguments arg If Compare Call Return return:yes Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "_cdist",
    "source_code": "def _cdist(d1: Tensor, d2: Tensor) -> Tensor:\n    if not is_mps_tensor_safe(d1) and (not is_mps_tensor_safe(d2)):\n        return torch.cdist(d1, d2)\n    d1_sq = (d1 ** 2).sum(dim=1, keepdim=True)\n    d2_sq = (d2 ** 2).sum(dim=1, keepdim=True)\n    dm = d1_sq.repeat(1, d2.size(0)) + d2_sq.repeat(1, d1.size(0)).t() - 2.0 * d1 @ d2.t()\n    dm = dm.clamp(min=0.0).sqrt()\n    return dm",
    "docstring": "Manual for M1.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\matching.py",
    "ast_data": "FunctionDef name:_cdist arg:d1 arg:d2 arguments arg arg If BoolOp Call Call Return return:yes Call Assign Call Assign Call Assign Call Call Call Call Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "prefixed_warnings",
    "source_code": "@contextmanager\ndef prefixed_warnings(prefix: str) -> Iterator[None]:\n    logger = logging.getLogger(NAMESPACE)\n    warning_handler = None\n    for handler in logger.handlers:\n        if isinstance(handler, WarningStreamHandler):\n            warning_handler = handler\n            break\n    else:\n        yield\n        return\n    prefix_filter = None\n    for _filter in warning_handler.filters:\n        if isinstance(_filter, MessagePrefixFilter):\n            prefix_filter = _filter\n            break\n    if prefix_filter:\n        try:\n            previous = prefix_filter.prefix\n            prefix_filter.prefix = prefix\n            yield\n        finally:\n            prefix_filter.prefix = previous\n    else:\n        prefix_filter = MessagePrefixFilter(prefix)\n        try:\n            warning_handler.addFilter(prefix_filter)\n            yield\n        finally:\n            warning_handler.removeFilter(prefix_filter)",
    "docstring": "Context manager to prepend prefix to all warning log records temporarily. For example:: >>> with prefixed_warnings(\"prefix:\"): >>> logger.warning('Warning message!') # => prefix: Warning message! .. versionadded:: 2.0",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\logging.py",
    "ast_data": "FunctionDef name:prefixed_warnings arg:prefix arguments arg Assign Call Assign For If Call Assign Return return:no Assign For If Call Assign If Try Assign Assign Assign Assign Call Try Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_finalize_splittable_nodes",
    "source_code": "def _finalize_splittable_nodes(self):\n    while len(self.splittable_nodes) > 0:\n        node = self.splittable_nodes.pop()\n        self._finalize_leaf(node)",
    "docstring": "Transform all splittable nodes into leaves. Used when some constraint is met e.g. maximum number of leaves or maximum depth.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\grower.py",
    "ast_data": "FunctionDef name:_finalize_splittable_nodes arg:self arguments arg While Compare Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "forward",
    "source_code": "def forward(self, x, y):\n    if x.is_quantized:\n        x = x.dequantize()\n    self.count += 1\n    if self.stats['quantized'] is None:\n        self.stats['quantized'] = x\n        self.quant_sum = x\n    else:\n        self.quant_sum += x\n        self.stats['quantized'] = self.quant_sum / self.count\n    if self.stats['float'] is None:\n        self.stats['float'] = y\n        self.float_sum = y\n    else:\n        self.float_sum += y\n        self.stats['float'] = self.float_sum / self.count",
    "docstring": "Compute the average of quantized and floating-point data from modules. The inputs x,y are output data from the quantized and floating-point modules. x is for the quantized module, y is for the floating point module",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\_correct_bias.py",
    "ast_data": "FunctionDef name:forward arg:self arg:x arg:y arguments arg arg arg If Assign Call If Compare Assign Assign Assign If Compare Assign Assign Assign"
  },
  {
    "library": "scipy",
    "name": "diagonal",
    "source_code": "def diagonal(self, k=0):\n    return self.tocsr().diagonal(k=k)",
    "docstring": "Returns the kth diagonal of the array/matrix. Parameters ---------- k : int, optional Which diagonal to get, corresponding to elements a[i, i+k]. Default: 0 (the main diagonal). .. versionadded:: 1.0 See also -------- numpy.diagonal : Equivalent numpy function. Examples -------- >>> from scipy.sparse import csr_array >>> A = csr_array([[1, 2, 0], [0, 0, 3], [4, 0, 5]]) >>> A.diagonal() array([1, 0, 5]) >>> A.diagonal(k=1) array([2, 3])",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_base.py",
    "ast_data": "FunctionDef name:diagonal arg:self arg:k arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "is_jpeg",
    "source_code": "@tf_export('io.is_jpeg', 'image.is_jpeg', v1=['io.is_jpeg', 'image.is_jpeg'])\ndef is_jpeg(contents, name=None):\n    with ops.name_scope(name, 'is_jpeg'):\n        substr = string_ops.substr(contents, 0, 3)\n        return math_ops.equal(substr, b'\\xff\\xd8\\xff', name=name)",
    "docstring": "Convenience function to check if the 'contents' encodes a JPEG image. Args: contents: 0-D . The encoded image bytes. name: A name for the operation (optional) Returns: A scalar boolean tensor indicating if 'contents' may be a JPEG image. is_jpeg is susceptible to false positives.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:is_jpeg arg:contents arg:name arguments arg arg With Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "generate_proxy_api_files",
    "source_code": "def generate_proxy_api_files(output_files: list[str], proxy_module_root: str, output_dir: str):\n    for file in output_files:\n        file_dir = os.path.dirname(file)\n        if not os.path.isdir(file_dir):\n            os.makedirs(file_dir)\n        module = get_module(file_dir, output_dir)\n        content = f'from {proxy_module_root}.{module} import *'\n        with open(file, 'w') as f:\n            f.write(content)",
    "docstring": "Creates __init__.py files in proxy format for the Python API. Args: output_files: List of __init__.py file paths to create. proxy_module_root: Module root for proxy-import format. If specified, proxy files with content like will be created to enable import resolution under TensorFlow. output_dir: output API root directory.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\api\\generator2\\generator\\generator.py",
    "ast_data": "FunctionDef name:generate_proxy_api_files arg:output_files arg:proxy_module_root arg:output_dir arguments arg arg arg For Assign Call If Call Call Assign Call Assign With Call Call"
  },
  {
    "library": "pytorch",
    "name": "_RefType",
    "source_code": "class _RefType(str, Enum):\n    pass",
    "docstring": "Base Class for defining memory reference types, categorizing tensors based on their usage within a model.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\_tools\\mem_tracker.py",
    "ast_data": "ClassDef name:_RefType"
  },
  {
    "library": "tensorflow",
    "name": "_handle_metrics",
    "source_code": "def _handle_metrics(self, outputs, targets=None, skip_target_masks=None, sample_weights=None, masks=None, return_weighted_metrics=False, return_weighted_and_unweighted_metrics=False):\n    skip_target_masks = skip_target_masks or [False] * len(outputs)\n    metric_results = []\n    with backend.name_scope('metrics'):\n        for i in range(len(outputs)):\n            if skip_target_masks[i]:\n                continue\n            output = outputs[i] if outputs else None\n            target = targets[i] if targets else None\n            output_mask = masks[i] if masks else None\n            if return_weighted_and_unweighted_metrics or not return_weighted_metrics:\n                metric_results.extend(self._handle_per_output_metrics(self._per_output_metrics[i], target, output, output_mask))\n            if return_weighted_and_unweighted_metrics or return_weighted_metrics:\n                metric_results.extend(self._handle_per_output_metrics(self._per_output_weighted_metrics[i], target, output, output_mask, weights=sample_weights[i] if sample_weights else None))\n    return metric_results",
    "docstring": "Handles calling metric functions. Args: outputs: List of outputs (predictions). targets: List of targets. skip_target_masks: Optional. List of boolean for whether the corresponding target should be ignored or not. sample_weights: Optional list of sample weight arrays. masks: List of computed output mask values. return_weighted_metrics: Flag that indicates whether weighted metrics should be computed instead of unweighted metrics. This flag is ignored when is enabled. return_weighted_and_unweighted_metrics: Flag that is used to indicate whether both weighted and unweighted metrics should be computed. When this is not enabled, we use param to indicate whether weighted or unweighted metrics should be returned. Returns: A list of metric result tensors.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py",
    "ast_data": "FunctionDef name:_handle_metrics arg:self arg:outputs arg:targets arg:skip_target_masks arg:sample_weights arg:masks arg:return_weighted_metrics arg:return_weighted_and_unweighted_metrics arguments arg arg arg arg arg arg arg arg Assign BoolOp Call Assign With Call For Call Call If Assign Assign Assign If BoolOp Call Call If BoolOp Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_exception_info",
    "source_code": "def get_exception_info(exception):\n    context_lines = 10\n    lineno = exception.lineno\n    source = exception.source\n    if source is None:\n        exception_file = Path(exception.filename)\n        if exception_file.exists():\n            source = exception_file.read_text()\n    if source is not None:\n        lines = list(enumerate(source.strip().split('\\n'), start=1))\n        during = lines[lineno - 1][1]\n        total = len(lines)\n        top = max(0, lineno - context_lines - 1)\n        bottom = min(total, lineno + context_lines)\n    else:\n        during = ''\n        lines = []\n        total = top = bottom = 0\n    return {'name': exception.filename, 'message': exception.message, 'source_lines': lines[top:bottom], 'line': lineno, 'before': '', 'during': during, 'after': '', 'total': total, 'top': top, 'bottom': bottom}",
    "docstring": "Format exception information for display on the debug page using the structure described in the template API documentation.",
    "type": "function",
    "file_path": "django\\django\\template\\backends\\jinja2.py",
    "ast_data": "FunctionDef name:get_exception_info arg:exception arguments arg Assign Assign Assign If Compare Assign Call If Call Assign Call If Compare Assign Call Call Call Call Assign Assign Call Assign Call Assign Call Assign Assign Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "_check_search_fields",
    "source_code": "def _check_search_fields(self, obj):\n    if not isinstance(obj.search_fields, (list, tuple)):\n        return must_be('a list or tuple', option='search_fields', obj=obj, id='admin.E126')\n    else:\n        return []",
    "docstring": "Check search_fields is a sequence.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\checks.py",
    "ast_data": "FunctionDef name:_check_search_fields arg:self arg:obj arguments arg arg If Call Return return:yes Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "_apply_fn",
    "source_code": "def _apply_fn(dataset):\n    out_dataset = _ParseExampleDataset(dataset, features, num_parallel_calls, deterministic)\n    if any((isinstance(feature, parsing_ops.SparseFeature) or isinstance(feature, parsing_ops.RaggedFeature) for feature in features.values())):\n        out_dataset = out_dataset.map(lambda x: parsing_ops._construct_tensors_for_composite_features(features, x), num_parallel_calls=num_parallel_calls)\n    return out_dataset",
    "docstring": "Function from to that applies the transformation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\parsing_ops.py",
    "ast_data": "FunctionDef name:_apply_fn arg:dataset arguments arg Assign Call If Call BoolOp Call Call Call Assign Call arguments arg Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "update_nested_dict",
    "source_code": "def update_nested_dict(main_dict, new_dict):\n    for name, rc_dict in new_dict.items():\n        main_dict.setdefault(name, {}).update(rc_dict)\n    return main_dict",
    "docstring": "Update nested dict (only level of nesting) with new values. Unlike , this assumes that the values of the parent dict are dicts (or dict-like), so you shouldn't replace the nested dict if it already exists. Instead you should update the sub-dict.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\style\\core.py",
    "ast_data": "FunctionDef name:update_nested_dict arg:main_dict arg:new_dict arguments arg arg For Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_check_matrix",
    "source_code": "def _check_matrix(self, matrix):\n    allowed_dtypes = [dtypes.float16, dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128]\n    matrix = tensor_conversion.convert_to_tensor_v2_with_dispatch(matrix, name='matrix')\n    dtype = matrix.dtype\n    if dtype not in allowed_dtypes:\n        raise TypeError(f'Argument `matrix` must have dtype in {allowed_dtypes}. Received: {dtype}.')\n    if matrix.shape.ndims is not None and matrix.shape.ndims < 2:\n        raise ValueError(f'Argument `matrix` must have at least 2 dimensions. Received: {matrix}.')",
    "docstring": "Static check of the argument.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_full_matrix.py",
    "ast_data": "FunctionDef name:_check_matrix arg:self arg:matrix arguments arg arg Assign Assign Call Assign If Compare Raise Call If BoolOp Compare Compare Raise Call"
  },
  {
    "library": "scipy",
    "name": "f2",
    "source_code": "def f2(x):\n    return x ** 2 - 1",
    "docstring": "f2 is a symmetric parabola, x**2 - 1",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_tstutils.py",
    "ast_data": "FunctionDef name:f2 arg:x arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "learning_phase_scope",
    "source_code": "@tf_contextlib.contextmanager\n@doc_controls.do_not_generate_docs\ndef learning_phase_scope(value):\n    warnings.warn('`tf.keras.backend.learning_phase_scope` is deprecated and will be removed after 2020-10-11. To update it, simply pass a True/False value to the `training` argument of the `__call__` method of your layer or model.')\n    with deprecated_internal_learning_phase_scope(value):\n        try:\n            yield\n        finally:\n            pass",
    "docstring": "Provides a scope within which the learning phase is equal to . The learning phase gets restored to its original value upon exiting the scope. Args: value: Learning phase value, either 0 or 1 (integers). 0 = test, 1 = train Yields: None. Raises: ValueError: if is neither nor .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:learning_phase_scope arg:value arguments arg Call With Call Try"
  },
  {
    "library": "pandas",
    "name": "__call__",
    "source_code": "def __call__(self):\n    vi = tuple(self.axis.get_view_interval())\n    vmin, vmax = vi\n    if vmax < vmin:\n        vmin, vmax = (vmax, vmin)\n    if self.isdynamic:\n        locs = self._get_default_locs(vmin, vmax)\n    else:\n        base = self.base\n        d, m = divmod(vmin, base)\n        vmin = (d + 1) * base\n        locs = list(range(vmin, vmax + 1, base))\n    return locs",
    "docstring": "Return the locations of the ticks.",
    "type": "method",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\converter.py",
    "ast_data": "FunctionDef name:__call__ arg:self arguments arg Assign Call Call Assign If Compare Assign If Assign Call Assign Assign Call Assign Assign Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "DTypePromotionError",
    "source_code": "class DTypePromotionError(TypeError):\n    pass",
    "docstring": "Multiple DTypes could not be converted to a common one. This exception derives from `arr1 == arr2object('field1', 'field2')('field1',)` mismatch.",
    "type": "class",
    "file_path": "numpy\\numpy\\exceptions.py",
    "ast_data": "ClassDef name:DTypePromotionError"
  },
  {
    "library": "kornia",
    "name": "GFTTAffNetHardNet",
    "source_code": "class GFTTAffNetHardNet(LocalFeature):\n\n    def __init__(self, num_features: int=8000, upright: bool=False, device: Optional[Device]=None, config: Optional[Detector_config]=None) -> None:\n        if device is None:\n            device = torch.device('cpu')\n        if config is None:\n            config = get_default_detector_config()\n        detector = MultiResolutionDetector(CornerGFTT(), num_features, config, ori_module=PassLAF() if upright else LAFOrienter(19), aff_module=LAFAffNetShapeEstimator(True).eval()).to(device)\n        descriptor = LAFDescriptor(None, patch_size=32, grayscale_descriptor=True).to(device)\n        super().__init__(detector, descriptor)",
    "docstring": "Convenience module, which implements GFTT detector + AffNet-HardNet descriptor.",
    "type": "class",
    "file_path": "kornia\\kornia\\feature\\integrated.py",
    "ast_data": "ClassDef name:GFTTAffNetHardNet FunctionDef name:__init__ arg:self arg:num_features arg:upright arg:device arg:config arguments arg arg arg arg arg If Compare Assign Call If Compare Assign Call Assign Call Call Call Call Call Call Call Assign Call Call Call Call"
  },
  {
    "library": "kornia",
    "name": "__init__",
    "source_code": "def __init__(self, embedding_dim: int, num_heads: int, mlp_dim: int=2048, activation: type[Module]=nn.ReLU, attention_downsample_rate: int=2, skip_first_layer_pe: bool=False) -> None:\n    super().__init__()\n    self.self_attn = Attention(embedding_dim, num_heads)\n    self.norm1 = nn.LayerNorm(embedding_dim)\n    self.cross_attn_token_to_image = Attention(embedding_dim, num_heads, downsample_rate=attention_downsample_rate)\n    self.norm2 = nn.LayerNorm(embedding_dim)\n    self.mlp = MLPBlock(embedding_dim, mlp_dim, activation)\n    self.norm3 = nn.LayerNorm(embedding_dim)\n    self.norm4 = nn.LayerNorm(embedding_dim)\n    self.cross_attn_image_to_token = Attention(embedding_dim, num_heads, downsample_rate=attention_downsample_rate)\n    self.skip_first_layer_pe = skip_first_layer_pe",
    "docstring": "Construct a transformer block with four layers. (1) self-attention of sparse inputs, (2) cross attention of sparse inputs to dense inputs, (3) mlp block on sparse inputs, and (4) cross attention of dense inputs to sparse inputs. Args: embedding_dim: the channel dimension of the embeddings num_heads: the number of heads in the attention layers mlp_dim: the hidden dimension of the mlp block activation: the activation of the mlp block skip_first_layer_pe: skip the PE on the first layer attention_downsample_rate: downsampling rate from embedding dimension",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\models\\sam\\architecture\\transformer.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:embedding_dim arg:num_heads arg:mlp_dim arg:activation arg:attention_downsample_rate arg:skip_first_layer_pe arguments arg arg arg arg arg arg arg Call Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign"
  },
  {
    "library": "scrapy",
    "name": "strip_url",
    "source_code": "def strip_url(url: str, strip_credentials: bool=True, strip_default_port: bool=True, origin_only: bool=False, strip_fragment: bool=True) -> str:\n    parsed_url = urlparse(url)\n    netloc = parsed_url.netloc\n    if (strip_credentials or origin_only) and (parsed_url.username or parsed_url.password):\n        netloc = netloc.split('@')[-1]\n    if strip_default_port and parsed_url.port and ((parsed_url.scheme, parsed_url.port) in (('http', 80), ('https', 443), ('ftp', 21))):\n        netloc = netloc.replace(f':{parsed_url.port}', '')\n    return urlunparse((parsed_url.scheme, netloc, '/' if origin_only else parsed_url.path, '' if origin_only else parsed_url.params, '' if origin_only else parsed_url.query, '' if strip_fragment else parsed_url.fragment))",
    "docstring": "Strip URL string from some of its components: - `` drops any #fragment component",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\url.py",
    "ast_data": "FunctionDef name:strip_url arg:url arg:strip_credentials arg:strip_default_port arg:origin_only arg:strip_fragment arguments arg arg arg arg arg Assign Call Assign If BoolOp BoolOp BoolOp Assign Call If BoolOp Compare Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "output",
    "source_code": "@property\ndef output(self):\n    if not self._inbound_nodes:\n        raise AttributeError('Layer ' + self.name + ' has no inbound nodes.')\n    return self._get_node_attribute_at_index(0, 'output_tensors', 'output')",
    "docstring": "Retrieves the output tensor(s) of a layer. Only applicable if the layer has exactly one output, i.e. if it is connected to one incoming layer. Returns: Output tensor or list of output tensors. Raises: AttributeError: if the layer is connected to more than one incoming layers. RuntimeError: if called in Eager mode.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:output arg:self arguments arg If Raise Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "PlaceholderObserver",
    "source_code": "class PlaceholderObserver(ObserverBase):\n\n    def __init__(self, dtype=torch.float32, custom_op_name='', compute_dtype=None, quant_min=None, quant_max=None, qscheme=None, eps=None, is_dynamic=False) -> None:\n        super().__init__(dtype=dtype, is_dynamic=is_dynamic)\n        if qscheme is None:\n            qscheme = torch.per_tensor_affine\n        if eps is None:\n            eps = torch.finfo(torch.float32).eps\n        self.dtype = dtype\n        self.qscheme = qscheme\n        self.quant_min = quant_min\n        self.quant_max = quant_max\n        self.eps = eps\n        self.custom_op = custom_op_name\n        if compute_dtype:\n            is_dynamic = True\n            warnings.warn('Please use `is_dynamic` instead of `compute_dtype`.                     `compute_dtype` will be deprecated in a future release                     of PyTorch.')\n\n    def forward(self, x):\n        return x\n\n    @torch.jit.export\n    def extra_repr(self):\n        return f'dtype={self.dtype}, is_dynamic={self.is_dynamic}'\n\n    @torch.jit.export\n    def calculate_qparams(self):\n        raise Exception('calculate_qparams should not be called for PlaceholderObserver')",
    "docstring": "Observer that doesn't do anything and just passes its configuration to the quantized module's `quantizeis_dynamic=Truequantize` function in the reference model representation taking stats from this observer instance will use dynamic quantization.",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\quantization\\observer.py",
    "ast_data": "ClassDef name:PlaceholderObserver FunctionDef name:__init__ arg:self arg:dtype arg:custom_op_name arg:compute_dtype arg:quant_min arg:quant_max arg:qscheme arg:eps arg:is_dynamic arguments arg arg arg arg arg arg arg arg arg Call Call If Compare Assign If Compare Assign Call Assign Assign Assign Assign Assign Assign If Assign Call FunctionDef name:forward arg:self arg:x arguments arg arg Return return:yes FunctionDef name:extra_repr arg:self arguments arg Return return:yes FunctionDef name:calculate_qparams arg:self arguments arg Raise Call"
  },
  {
    "library": "numpy",
    "name": "run_ruff",
    "source_code": "def run_ruff(self, fix: bool) -> tuple[int, str]:\n    command = ['ruff', 'check']\n    if fix:\n        command.append('--fix')\n    res = subprocess.run(command, stdout=subprocess.PIPE, cwd=self.repository_root, encoding='utf-8')\n    return (res.returncode, res.stdout)",
    "docstring": "Original Author: Josh Wilson (@person142) Source: Unlike pycodestyle, ruff by itself is not capable of limiting its output to the given diff.",
    "type": "method",
    "file_path": "numpy\\tools\\linter.py",
    "ast_data": "FunctionDef name:run_ruff arg:self arg:fix arguments arg arg Assign If Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_info_repr",
    "source_code": "def _info_repr(self) -> bool:\n    info_repr_option = get_option('display.large_repr') == 'info'\n    return info_repr_option and (not (self._repr_fits_horizontal_() and self._repr_fits_vertical_()))",
    "docstring": "True if the repr should show the info view.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:_info_repr arg:self arguments arg Assign Compare Call Return return:yes BoolOp BoolOp Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_variable_shape",
    "source_code": "@property\ndef _variable_shape(self):\n    return tensor_shape.TensorShape([1, self.categorical_column._num_buckets])",
    "docstring": "Returns a representing the shape of the dense .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py",
    "ast_data": "FunctionDef name:_variable_shape arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "trace_tensor",
    "source_code": "def trace_tensor(tensor, tracepoint_name=None):\n    if tracepoint_name is None:\n        tracepoint_name = tensor.name\n    tensor.graph.get_collection(_TENSOR_TRACER_COLLECTION)\n    tensor.graph.add_to_collection(_TENSOR_TRACER_COLLECTION, (tensor, tracepoint_name))\n    return tensor",
    "docstring": "Programmatic interface to trace a tensor with Tensor Tracer. Tensor Tracer, by default, traces all tensors in the execution. This function can be used to limit traced tensors. If this function is called for a subset of the tensors, only those will be traced. For example, Tensor Traacer will only trace c below. c = tf.MatMul(a, b) tensor_tracer.trace_tensor(c) d = tf.add(c, 1) Args: tensor: the tensor object for which the tracing is requested. tracepoint_name: an optional tensor tracepoint name string. A tracepoint name is an Tensor Tracer internal name for the tensor. It is useful when comparing equivalent traces from different models that have different tensor namings. Equivalent tensors (with different names) can be mapped to each other by assigning a common tracepoint_name. Returns: The provided tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:trace_tensor arg:tensor arg:tracepoint_name arguments arg arg If Compare Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_AttrsFetchMapper",
    "source_code": "class _AttrsFetchMapper(_FetchMapper):\n\n    def __init__(self, fetches):\n        values = _get_attrs_values(fetches)\n        self._fetch_type = type(fetches)\n        self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in values]\n        self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)\n\n    def unique_fetches(self):\n        return self._unique_fetches\n\n    def build_results(self, values):\n        results = []\n        for m, vi in zip(self._mappers, self._value_indices):\n            results.append(m.build_results([values[j] for j in vi]))\n        return self._fetch_type(*results)",
    "docstring": "Fetch mapper for attrs decorated classes.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\client\\session.py",
    "ast_data": "ClassDef name:_AttrsFetchMapper FunctionDef name:__init__ arg:self arg:fetches arguments arg arg Assign Call Assign Call Assign Call Assign Call FunctionDef name:unique_fetches arg:self arguments arg Return return:yes FunctionDef name:build_results arg:self arg:values arguments arg arg Assign For Call Call Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, local_host, remote_host, scheme='http', server_protocol='HTTP/1.1'):\n    self.local = local_host\n    self.remote = remote_host\n    self.scheme = scheme\n    self.server_protocol = server_protocol\n    self.closed = False\n    self.error_page = self.error_page.copy()\n    self.namespaces = self.namespaces.copy()\n    self.stage = None\n    self.unique_id = LazyUUID4()",
    "docstring": "Populate a new Request object. local_host should be an httputil.Host object with the server info. remote_host should be an httputil.Host object with the client info. scheme should be a string, either \"http\" or \"https\".",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cprequest.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:local_host arg:remote_host arg:scheme arg:server_protocol arguments arg arg arg arg arg Assign Assign Assign Assign Assign Assign Call Assign Call Assign Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "writeXref",
    "source_code": "def writeXref(self):\n    self.startxref = self.fh.tell() - self.tell_base\n    self.write(b'xref\\n0 %d\\n' % len(self.xrefTable))\n    for i, (offset, generation, name) in enumerate(self.xrefTable):\n        if offset is None:\n            raise AssertionError('No offset for object %d (%s)' % (i, name))\n        else:\n            key = b'f' if name == 'the zero object' else b'n'\n            text = b'%010d %05d %b \\n' % (offset, generation, key)\n            self.write(text)",
    "docstring": "Write out the xref table.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py",
    "ast_data": "FunctionDef name:writeXref arg:self arguments arg Assign Call Call Call For Call If Compare Raise Call Assign Compare Assign Call"
  },
  {
    "library": "kornia",
    "name": "SamModelType",
    "source_code": "class SamModelType(Enum):\n    vit_h = 0\n    vit_l = 1\n    vit_b = 2\n    mobile_sam = 3",
    "docstring": "Map the SAM model types.",
    "type": "class",
    "file_path": "kornia\\kornia\\contrib\\models\\sam\\model.py",
    "ast_data": "ClassDef name:SamModelType Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "scatter_sub",
    "source_code": "def scatter_sub(self, sparse_delta, use_locking=False, name=None):\n    raise NotImplementedError",
    "docstring": "Subtracts from this variable. Args: sparse_delta: to be subtracted from this variable. use_locking: If , use locking during the operation. name: the name of the operation. Returns: The updated variable. Raises: TypeError: if is not an .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:scatter_sub arg:self arg:sparse_delta arg:use_locking arg:name arguments arg arg arg arg Raise"
  },
  {
    "library": "kornia",
    "name": "inverse",
    "source_code": "@classmethod\ndef inverse(cls, input: Keypoints, module: Module, param: ParamItem, extra_args: Optional[Dict[str, Any]]=None) -> Keypoints:\n    if extra_args is None:\n        extra_args = {}\n    _input = input.clone()\n    if isinstance(module, (K.GeometricAugmentationBase2D,)):\n        if module.transform_matrix is None:\n            raise ValueError(f'No valid transformation matrix found in {module.__class__}.')\n        transform = module.compute_inverse_transformation(module.transform_matrix)\n        _input = module.inverse_keypoints(_input, cls.get_instance_module_param(param), module.flags, transform=transform, **extra_args)\n    elif isinstance(module, (K.GeometricAugmentationBase3D,)):\n        raise NotImplementedError('The support for 3d keypoint operations are not yet supported. You are welcome to file a PR in our repo.')\n    elif isinstance(module, K.ImageSequential) and (not module.is_intensity_only()):\n        _input = module.inverse_keypoints(_input, params=cls.get_sequential_module_param(param), extra_args=extra_args)\n    elif isinstance(module, K.container.ImageSequentialBase):\n        _input = module.inverse_keypoints(_input, params=cls.get_sequential_module_param(param), extra_args=extra_args)\n    elif isinstance(module, (K.auto.operations.OperationBase,)):\n        return KeypointSequentialOps.inverse(input, module=module.op, param=param, extra_args=extra_args)\n    return _input",
    "docstring": "Inverse a transformation with respect to the parameters. Args: input: the input tensor. module: any torch Module but only kornia augmentation modules will count to apply transformations. param: the corresponding parameters to the module. extra_args: Optional dictionary of extra arguments with specific options for different input types.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\container\\ops.py",
    "ast_data": "FunctionDef name:inverse arg:cls arg:input arg:module arg:param arg:extra_args arguments arg arg arg arg arg If Compare Assign Assign Call If Call If Compare Raise Call Assign Call Assign Call Call If Call Raise Call If BoolOp Call Call Assign Call Call If Call Assign Call Call If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "CauchyLoss",
    "source_code": "class CauchyLoss(Module):\n\n    def __init__(self, reduction: str='none') -> None:\n        super().__init__()\n        self.reduction = reduction\n\n    def forward(self, img1: Tensor, img2: Tensor) -> Tensor:\n        return cauchy_loss(img1=img1, img2=img2, reduction=self.reduction)",
    "docstring": "Criterion that computes the Cauchy [2] (aka. Lorentzian) loss. According to [1], we compute the Cauchy loss as follows: .. math:: \\text{WL}(x, y) = log(\\frac{1}{2} (x - y)^{2} + 1) Where: - :math: is the prediction. - :math: is the target to be regressed to. Reference: [1] [2] Args: reduction: Specifies the reduction to apply to the output: `(*)`. - img2: the target tensor with the same shape as img1. Example: >>> criterion = CauchyLoss(reduction=\"mean\") >>> img1 = torch.randn(2, 3, 32, 2107, requires_grad=True) >>> img2 = torch.randn(2, 3, 32, 2107) >>> output = criterion(img1, img2) >>> output.backward()",
    "type": "class",
    "file_path": "kornia\\kornia\\losses\\cauchy.py",
    "ast_data": "ClassDef name:CauchyLoss FunctionDef name:__init__ arg:self arg:reduction arguments arg arg Call Call Assign FunctionDef name:forward arg:self arg:img1 arg:img2 arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_angle",
    "source_code": "def set_angle(self, angle):\n    self._angle = angle\n    self._path = None\n    self.stale = True",
    "docstring": "Set the tilt angle of the annulus. Parameters ---------- angle : float",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:set_angle arg:self arg:angle arguments arg arg Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_Enter",
    "source_code": "def _Enter(tensor, frame_name, is_constant=False, parallel_iterations=10, use_ref=True, use_input_shape=True, name=None):\n    tensor = ops.internal_convert_to_tensor_or_composite(tensor, as_ref=True)\n    if isinstance(tensor, tensor_lib.Tensor):\n        if tensor.dtype._is_ref_dtype and use_ref:\n            result = gen_control_flow_ops.ref_enter(tensor, frame_name, is_constant, parallel_iterations, name=name)\n        else:\n            result = gen_control_flow_ops.enter(tensor, frame_name, is_constant, parallel_iterations, name=name)\n        if use_input_shape:\n            result.set_shape(tensor.get_shape())\n        return result\n    elif isinstance(tensor, composite_tensor.CompositeTensor):\n\n        def enter_component(t):\n            return _Enter(t, frame_name, is_constant, parallel_iterations, use_ref, use_input_shape)\n        return nest.map_structure(enter_component, tensor, expand_composites=True)\n    else:\n        raise TypeError(f\"'tensor' must be a Tensor or CompositeTensor. Received: {type(tensor)}.\")",
    "docstring": "Creates or finds a child frame, and makes available to it. The unique is used by the to identify frames. If is true, is a constant in the child frame; otherwise it may be changed in the child frame. At most iterations are run in parallel in the child frame. Args: tensor: The tensor to be made available to the child frame. frame_name: The name of the child frame. is_constant: If true, the output is constant within the child frame. parallel_iterations: The number of iterations allowed to run in parallel. use_ref: If true, use ref_enter if tensor is of ref type. use_input_shape: If true, set the result's shape based on tensor's shape. name: A name for this operation (optional). Returns: The same tensor as . Raises: ValueError: If any tensor in has a less specific shape than its corresponding shape in .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:_Enter arg:tensor arg:frame_name arg:is_constant arg:parallel_iterations arg:use_ref arg:use_input_shape arg:name arguments arg arg arg arg arg arg arg Assign Call If Call If BoolOp Assign Call Assign Call If Call Call Return return:yes If Call FunctionDef name:enter_component arg:t arguments arg Return return:yes Call Return return:yes Call Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_compute_carry_and_output_fused",
    "source_code": "def _compute_carry_and_output_fused(self, z, c_tm1):\n    z0, z1, z2, z3 = z\n    i = self.recurrent_activation(z0)\n    f = self.recurrent_activation(z1)\n    c = f * c_tm1 + i * self.activation(z2)\n    o = self.recurrent_activation(z3)\n    return (c, o)",
    "docstring": "Computes carry and output using fused kernels.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\recurrent.py",
    "ast_data": "FunctionDef name:_compute_carry_and_output_fused arg:self arg:z arg:c_tm1 arguments arg arg arg Assign Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "is_monotonic_increasing",
    "source_code": "@property\ndef is_monotonic_increasing(self) -> Series:\n    return self.apply(lambda ser: ser.is_monotonic_increasing)",
    "docstring": "Return whether each group's values are monotonically increasing. Returns ------- Series See Also -------- SeriesGroupBy.is_monotonic_decreasing : Return whether each group's values are monotonically decreasing. Examples -------- >>> s = pd.Series([2, 1, 3, 4], index=[\"Falcon\", \"Falcon\", \"Parrot\", \"Parrot\"]) >>> s.groupby(level=0).is_monotonic_increasing Falcon False Parrot True dtype: bool",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\groupby\\generic.py",
    "ast_data": "FunctionDef name:is_monotonic_increasing arg:self arguments arg Return return:yes Call arguments arg"
  },
  {
    "library": "kornia",
    "name": "rot_x",
    "source_code": "@classmethod\ndef rot_x(cls, x: Tensor) -> Se3:\n    zs = zeros_like(x)\n    return cls(So3.rot_x(x), stack((zs, zs, zs), -1))",
    "docstring": "Construct a x-axis rotation. Args: x: the x-axis rotation angle.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\se3.py",
    "ast_data": "FunctionDef name:rot_x arg:cls arg:x arguments arg arg Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "__init__",
    "source_code": "def __init__(self, geom):\n    if isinstance(geom, Polygon):\n        if self._polygon_must_be_fixed(geom):\n            geom = self._fix_polygon(geom)\n    elif isinstance(geom, GeometryCollection):\n        if any((isinstance(g, Polygon) and self._polygon_must_be_fixed(g) for g in geom)):\n            geom = self._fix_geometry_collection(geom)\n    self.wkt = geom.wkt\n    self.srid = geom.srid",
    "docstring": "Oracle requires that polygon rings are in proper orientation. This affects spatial operations and an invalid orientation may cause failures. Correct orientations are: * Outer ring - counter clockwise * Inner ring(s) - clockwise",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\oracle\\adapter.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:geom arguments arg arg If Call If Call Assign Call If Call If Call BoolOp Call Call Assign Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "SessionCreator",
    "source_code": "@tf_export(v1=['train.SessionCreator'])\nclass SessionCreator(metaclass=abc.ABCMeta):\n\n    @abc.abstractmethod\n    def create_session(self):\n        raise NotImplementedError('create_session is not implemented for {}.'.format(self))",
    "docstring": "A factory for tf.Session.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\training\\monitored_session.py",
    "ast_data": "ClassDef name:SessionCreator FunctionDef name:create_session arg:self arguments arg Raise Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "DeQuantStub",
    "source_code": "class DeQuantStub(nn.Module):\n\n    def __init__(self, qconfig=None):\n        super().__init__()\n        if qconfig:\n            self.qconfig = qconfig\n\n    def forward(self, x):\n        return x",
    "docstring": "Dequantize stub module, before calibration, this is same as identity, this will be swapped as in . Args: qconfig: quantization configuration for the tensor, if qconfig is not provided, we will get qconfig from parent modules",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\quantization\\stubs.py",
    "ast_data": "ClassDef name:DeQuantStub FunctionDef name:__init__ arg:self arg:qconfig arguments arg arg Call Call If Assign FunctionDef name:forward arg:self arg:x arguments arg arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "BadAttributeName",
    "source_code": "class BadAttributeName(ArffException):\n\n    def __init__(self, value, value2):\n        super().__init__()\n        self.message = 'Bad @ATTRIBUTE name %s at line' % value + ' %d, this name is already in use in line' + ' %d.' % value2",
    "docstring": "Error raised when an attribute name is provided twice the attribute declaration.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\externals\\_arff.py",
    "ast_data": "ClassDef name:BadAttributeName FunctionDef name:__init__ arg:self arg:value arg:value2 arguments arg arg arg Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "_get_tensor_info_op_configs",
    "source_code": "def _get_tensor_info_op_configs(dtype_configs):\n\n    def _get_config(op):\n        return BackendPatternConfig(op).set_observation_type(ObservationType.INPUT_OUTPUT_NOT_OBSERVED).set_dtype_configs(dtype_configs)\n    return [_get_config(op) for op in ('shape', 'size')]",
    "docstring": "These ops work on tensors of different dtypes but return non-tensors containing information about the input tensor.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\_common_operator_config_utils.py",
    "ast_data": "FunctionDef name:_get_tensor_info_op_configs arg:dtype_configs arguments arg FunctionDef name:_get_config arg:op arguments arg Return return:yes Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "sparse_embedding_aggregate_slice",
    "source_code": "def sparse_embedding_aggregate_slice(params, values_and_values_mask, combiner='mean', name='sparse_embedding_aggregate_slice'):\n    values, values_mask = values_and_values_mask\n    with ops.name_scope(name):\n        _, embedding_dimension = params.get_shape().as_list()\n        n_batch, n_indices_padded = values.get_shape().as_list()\n        if not n_batch:\n            n_batch = -1\n        emb_lookup = array_ops.reshape(embedding_ops.embedding_lookup(params, array_ops.reshape(values, [n_batch, n_indices_padded])), [n_batch, n_indices_padded, embedding_dimension])\n        values_mask_broadcast = array_ops.reshape(values_mask, [n_batch, n_indices_padded, 1])\n        aggregate_emb = math_ops.reduce_sum(emb_lookup * values_mask_broadcast, axis=1)\n        if combiner == 'sum':\n            return aggregate_emb\n        elif combiner == 'mean':\n            return aggregate_emb / math_ops.maximum(math_ops.reduce_sum(values_mask_broadcast, axis=1), 1.0)\n        else:\n            raise ValueError('Dense TPU Embedding does not support combiner other than sum and mean.')",
    "docstring": "Uses XLA's dynamic slice operations to perform embedding lookups. From third_party/cloud_tpu/models/movielens/tpu_embedding.py Args: params: Tensor of embedding table. Rank 2 (table_size x embedding dim) values_and_values_mask: is a two-tuple that contains: values - Tensor of embedding indices. Rank 2 (batch x n_indices) values_mask - Tensor of mask / weights. Rank 2 (batch x n_indices) combiner: The combiner to use for the embedding lookup. Currently supports 'sum' and 'mean'. name: Optional name scope for created ops Returns: Rank 2 tensor of aggregated (per batch element) embedding vectors. Raises: ValueError: Combiner is not supported.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\feature_column_v2.py",
    "ast_data": "FunctionDef name:sparse_embedding_aggregate_slice arg:params arg:values_and_values_mask arg:combiner arg:name arguments arg arg arg arg Assign With Call Assign Call Call Assign Call Call If Assign Assign Call Call Call Assign Call Assign Call If Compare Return return:yes If Compare Return return:yes Call Call Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "_convert_psfrags",
    "source_code": "def _convert_psfrags(tmppath, psfrags, paper_width, paper_height, orientation):\n    with mpl.rc_context({'text.latex.preamble': mpl.rcParams['text.latex.preamble'] + mpl.texmanager._usepackage_if_not_loaded('color') + mpl.texmanager._usepackage_if_not_loaded('graphicx') + mpl.texmanager._usepackage_if_not_loaded('psfrag') + '\\\\geometry{papersize={%(width)sin,%(height)sin},margin=0in}' % {'width': paper_width, 'height': paper_height}}):\n        dvifile = TexManager().make_dvi('\\n\\\\begin{figure}\\n  \\\\centering\\\\leavevmode\\n  %(psfrags)s\\n  \\\\includegraphics*[angle=%(angle)s]{%(epsfile)s}\\n\\\\end{figure}' % {'psfrags': '\\n'.join(psfrags), 'angle': 90 if orientation == 'landscape' else 0, 'epsfile': tmppath.resolve().as_posix()}, fontsize=10)\n    with TemporaryDirectory() as tmpdir:\n        psfile = os.path.join(tmpdir, 'tmp.ps')\n        cbook._check_and_log_subprocess(['dvips', '-q', '-R0', '-o', psfile, dvifile], _log)\n        shutil.move(psfile, tmppath)\n    with open(tmppath) as fh:\n        psfrag_rotated = 'Landscape' in fh.read(1000)\n    return psfrag_rotated",
    "docstring": "When we want to use the LaTeX backend with postscript, we write PSFrag tags to a temporary postscript file, each one marking a position for LaTeX to render some text. convert_psfrags generates a LaTeX document containing the commands to convert those tags to text. LaTeX/dvips produces the postscript file that includes the actual text.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_ps.py",
    "ast_data": "FunctionDef name:_convert_psfrags arg:tmppath arg:psfrags arg:paper_width arg:paper_height arg:orientation arguments arg arg arg arg arg With Call Call Call Call Assign Call Call Call Compare Call Call With Call Assign Call Call Call With Call Assign Compare Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "cat_core",
    "source_code": "def cat_core(list_of_columns: list, sep: str):\n    if sep == '':\n        arr_of_cols = np.asarray(list_of_columns, dtype=object)\n        return np.sum(arr_of_cols, axis=0)\n    list_with_sep = [sep] * (2 * len(list_of_columns) - 1)\n    list_with_sep[::2] = list_of_columns\n    arr_with_sep = np.asarray(list_with_sep, dtype=object)\n    return np.sum(arr_with_sep, axis=0)",
    "docstring": "Auxiliary function for :meth: Parameters ---------- list_of_columns : list of numpy arrays List of arrays to be concatenated with sep; these arrays may not contain NaNs! sep : string The separator string for concatenating the columns. Returns ------- nd.array The concatenation of list_of_columns with sep.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\strings\\accessor.py",
    "ast_data": "FunctionDef name:cat_core arg:list_of_columns arg:sep arguments arg arg If Compare Assign Call Return return:yes Call Assign Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "dtype_to_type",
    "source_code": "def dtype_to_type(dtype: torch.dtype) -> type:\n    assert isinstance(dtype, torch.dtype)\n    if dtype is torch.bool:\n        return bool\n    if dtype in _integer_dtypes:\n        return int\n    if dtype.is_floating_point:\n        return float\n    if dtype in _complex_dtypes:\n        return complex\n    raise ValueError('Invalid dtype!')",
    "docstring": "Computes the corresponding Python type (AKA \"type kind\") for the given dtype.",
    "type": "function",
    "file_path": "pytorch\\torch\\_prims_common\\__init__.py",
    "ast_data": "FunctionDef name:dtype_to_type arg:dtype arguments arg Call If Compare Return return:yes If Compare Return return:yes If Return return:yes If Compare Return return:yes Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "add_run_metadata",
    "source_code": "def add_run_metadata(self, run_metadata, tag, global_step=None):\n    if tag in self._session_run_tags:\n        raise ValueError('The provided tag was already used for this event type')\n    self._session_run_tags[tag] = True\n    tagged_metadata = event_pb2.TaggedRunMetadata()\n    tagged_metadata.tag = tag\n    tagged_metadata.run_metadata = run_metadata.SerializeToString()\n    event = event_pb2.Event(tagged_run_metadata=tagged_metadata)\n    self._add_event(event, global_step)",
    "docstring": "Adds a metadata information for a single session.run() call. Args: run_metadata: A protobuf object. tag: The tag name for this metadata. global_step: Number. Optional global step counter to record with the StepStats. Raises: ValueError: If the provided tag was already used for this type of event.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\writer.py",
    "ast_data": "FunctionDef name:add_run_metadata arg:self arg:run_metadata arg:tag arg:global_step arguments arg arg arg arg If Compare Raise Call Assign Assign Call Assign Assign Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "coordinates",
    "source_code": "def coordinates(self, replica: int, logical_core: int) -> Tuple:\n    return tuple(self.core_assignment[replica, logical_core, :])",
    "docstring": "Returns the physical topology coordinates of a logical core.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\device_assignment.py",
    "ast_data": "FunctionDef name:coordinates arg:self arg:replica arg:logical_core arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "dot",
    "source_code": "def dot(self, p):\n    if self.approx_type == 'hess':\n        return self._symv(1, self.B, p)\n    else:\n        return self._symv(1, self.H, p)",
    "docstring": "Compute the product of the internal matrix with the given vector. Parameters ---------- p : array_like 1-D array representing a vector. Returns ------- Hp : array 1-D represents the result of multiplying the approximation matrix by vector p.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_hessian_update_strategy.py",
    "ast_data": "FunctionDef name:dot arg:self arg:p arguments arg arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "pipe_split",
    "source_code": "def pipe_split():\n    return torch.ops.pippy._pipe_split()",
    "docstring": "pipe_split is a special operator that is used to mark the boundary between stages in a module. It is used to split the module into stages. It is a no-op if your annotated module is run eagerly. Example: >>> # xdoctest: +SKIP >>> def forward(self, x): >>> x = torch.mm(x, self.mm_param) >>> x = torch.relu(x) >>> pipe_split() >>> x = self.lin(x) >>> return x The above example will be split into two stages.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\_IR.py",
    "ast_data": "FunctionDef name:pipe_split arguments Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_default_angle",
    "source_code": "def set_default_angle(self, d):\n    self.set_rotation(_api.check_getitem(self._default_angles, d=d))",
    "docstring": "Set the default angle. See for details. Parameters ---------- d : {\"left\", \"bottom\", \"right\", \"top\"}",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axis_artist.py",
    "ast_data": "FunctionDef name:set_default_angle arg:self arg:d arguments arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "random_uniform",
    "source_code": "def random_uniform(self, shape, minval, maxval, dtype):\n    if self.seed:\n        op = stateless_random_ops.stateless_random_uniform\n    else:\n        op = random_ops.random_uniform\n    return op(shape=shape, minval=minval, maxval=maxval, dtype=dtype, seed=self.seed)",
    "docstring": "A deterministic random uniform if seed is passed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\initializers\\initializers_v2.py",
    "ast_data": "FunctionDef name:random_uniform arg:self arg:shape arg:minval arg:maxval arg:dtype arguments arg arg arg arg arg If Assign Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_geom_placeholder",
    "source_code": "def get_geom_placeholder(self, f, value, compiler):\n    transform_func = self.spatial_function_name('Transform')\n    if hasattr(value, 'as_sql'):\n        if value.field.srid == f.srid:\n            placeholder = '%s'\n        else:\n            placeholder = '%s(%%s, %s)' % (transform_func, f.srid)\n        return placeholder\n    if value is None:\n        value_srid = None\n    else:\n        value_srid = value.srid\n    if value_srid is None or value_srid == f.srid:\n        placeholder = '%s'\n    else:\n        placeholder = '%s(%%s, %s)' % (transform_func, f.srid)\n    return placeholder",
    "docstring": "Provide a proper substitution value for Geometries or rasters that are not in the SRID of the field. Specifically, this routine will substitute in the ST_Transform() function call.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\postgis\\operations.py",
    "ast_data": "FunctionDef name:get_geom_placeholder arg:self arg:f arg:value arg:compiler arguments arg arg arg arg Assign Call If Call If Compare Assign Assign Return return:yes If Compare Assign Assign If BoolOp Compare Compare Assign Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "cumprod",
    "source_code": "@array_function_dispatch(_cumprod_dispatcher)\ndef cumprod(a, axis=None, dtype=None, out=None):\n    return _wrapfunc(a, 'cumprod', axis=axis, dtype=dtype, out=out)",
    "docstring": "Return the cumulative product of elements along a given axis. Parameters ---------- a : array_like Input array. axis : int, optional Axis along which the cumulative product is computed. By default the input is flattened. dtype : dtype, optional Type of the returned array, as well as of the accumulator in which the elements are multiplied. If *dtype* is not specified, it defaults to the dtype of , unless has an integer dtype with a precision less than that of the default platform integer. In that case, the default platform integer is used instead. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output but the type of the resulting values will be cast if necessary. Returns ------- cumprod : ndarray A new array holding the result is returned unless is specified, in which case a reference to out is returned. See Also -------- cumulative_prod : Array API compatible alternative for `ufuncs-output-typeaa`: >>> np.cumprod(a,axis=1) array([[ 1, 2, 6], [ 4, 20, 120]])",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\fromnumeric.py",
    "ast_data": "FunctionDef name:cumprod arg:a arg:axis arg:dtype arg:out arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "cherrypy",
    "name": "save",
    "source_code": "def save():\n    if not hasattr(cherrypy.serving, 'session'):\n        return\n    request = cherrypy.serving.request\n    response = cherrypy.serving.response\n    if hasattr(request, '_sessionsaved'):\n        return\n    request._sessionsaved = True\n    if response.stream:\n        request.hooks.attach('on_end_request', cherrypy.session.save)\n    else:\n        if is_iterator(response.body):\n            response.collapse_body()\n        cherrypy.session.save()",
    "docstring": "Save any changed session data.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\sessions.py",
    "ast_data": "FunctionDef name:save arguments If Call Return return:no Assign Assign If Call Return return:no Assign If Call If Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "before_content",
    "source_code": "def before_content(self) -> None:\n    pass",
    "docstring": "Called before parsing content. Used to set information about the current directive context on the build environment.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\directives\\__init__.py",
    "ast_data": "FunctionDef name:before_content arg:self arguments arg"
  },
  {
    "library": "django",
    "name": "__init__",
    "source_code": "def __init__(self, obj, geography=False):\n    self.is_geometry = isinstance(obj, (GEOSGeometry, PostGISAdapter))\n    if self.is_geometry:\n        self.ewkb = bytes(obj.ewkb)\n    else:\n        self.ewkb = to_pgraster(obj)\n    self.srid = obj.srid\n    self.geography = geography",
    "docstring": "Initialize on the spatial object.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\postgis\\adapter.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:obj arg:geography arguments arg arg arg Assign Call If Assign Call Assign Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "preprocess_input_examples_arg_string",
    "source_code": "def preprocess_input_examples_arg_string(input_examples_str):\n    input_dict = preprocess_input_exprs_arg_string(input_examples_str)\n    for input_key, example_list in input_dict.items():\n        if not isinstance(example_list, list):\n            raise ValueError('tf.Example input must be a list of dictionaries, but \"%s\" is %s' % (example_list, type(example_list)))\n        input_dict[input_key] = [_create_example_string(example) for example in example_list]\n    return input_dict",
    "docstring": "Parses input into dict that maps input keys to lists of tf.Example. Parses input string in the format of 'input_key1=[{feature_name: feature_list}];input_key2=[{feature_name:feature_list}];' into a dictionary that maps each input_key to its list of serialized tf.Example. Args: input_examples_str: A string that specifies a list of dictionaries of feature_names and their feature_lists for each input. Each input is separated by semicolon. For each input key: 'input=[{feature_name1: feature_list1, feature_name2:feature_list2}]' items in feature_list can be the type of float, int, long or str. Returns: A dictionary that maps input keys to lists of serialized tf.Example. Raises: ValueError: An error when the given tf.Example is not a list.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\saved_model_cli.py",
    "ast_data": "FunctionDef name:preprocess_input_examples_arg_string arg:input_examples_str arguments arg Assign Call For Call If Call Raise Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "encode",
    "source_code": "def encode(self, spec, value, minimum_rank=0):\n    return spec._to_components(value)",
    "docstring": "Encodes as a nest of batchable Tensors or CompositeTensors. The default definition returns a flat tuple of all the s, s, and s from a depth-first traversal of 's fields. Subclasses may override this default definition, when necessary. Args: spec: The TypeSpec of the value to encode. value: A value compatible with . minimum_rank: The minimum rank for the returned Tensors, CompositeTensors, and ExtensionType values. This can be used to ensure that the encoded values can be unbatched this number of times. If , then must be compatible for all values returned by . Returns: A nest (as defined by ) of s, batchable s, or s. Stacking, unstacking, or concatenating these encoded values and then decoding the result must be equivalent to stacking, unstacking, or concatenating the original values.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type.py",
    "ast_data": "FunctionDef name:encode arg:self arg:spec arg:value arg:minimum_rank arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_r2r",
    "source_code": "def _r2r(forward, transform, x, type=2, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, orthogonalize=None):\n    tmp = _asfarray(x)\n    overwrite_x = overwrite_x or _datacopied(tmp, x)\n    norm = _normalization(norm, forward)\n    workers = _workers(workers)\n    if not forward:\n        if type == 2:\n            type = 3\n        elif type == 3:\n            type = 2\n    if n is not None:\n        tmp, copied = _fix_shape_1d(tmp, n, axis)\n        overwrite_x = overwrite_x or copied\n    elif tmp.shape[axis] < 1:\n        raise ValueError(f'invalid number of data points ({tmp.shape[axis]}) specified')\n    out = tmp if overwrite_x else None\n    if np.iscomplexobj(x):\n        out = np.empty_like(tmp) if out is None else out\n        transform(tmp.real, type, (axis,), norm, out.real, workers)\n        transform(tmp.imag, type, (axis,), norm, out.imag, workers)\n        return out\n    return transform(tmp, type, (axis,), norm, out, workers, orthogonalize)",
    "docstring": "Forward or backward 1-D DCT/DST Parameters ---------- forward : bool Transform direction (determines type and normalisation) transform : {pypocketfft.dct, pypocketfft.dst} The transform to perform",
    "type": "function",
    "file_path": "scipy\\scipy\\fft\\_pocketfft\\realtransforms.py",
    "ast_data": "FunctionDef name:_r2r arg:forward arg:transform arg:x arg:type arg:n arg:axis arg:norm arg:overwrite_x arg:workers arg:orthogonalize arguments arg arg arg arg arg arg arg arg arg arg Assign Call Assign BoolOp Call Assign Call Assign Call If If Compare Assign If Compare Assign If Compare Assign Call Assign BoolOp If Compare Raise Call Assign If Call Assign Compare Call Call Call Return return:yes Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_get_ax_layer",
    "source_code": "@final\n@classmethod\ndef _get_ax_layer(cls, ax, primary: bool=True):\n    if primary:\n        return getattr(ax, 'left_ax', ax)\n    else:\n        return getattr(ax, 'right_ax', ax)",
    "docstring": "get left (primary) or right (secondary) axes",
    "type": "method",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\core.py",
    "ast_data": "FunctionDef name:_get_ax_layer arg:cls arg:ax arg:primary arguments arg arg arg If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "fmin_l_bfgs_b",
    "source_code": "def fmin_l_bfgs_b(func, x0, fprime=None, args=(), approx_grad=0, bounds=None, m=10, factr=10000000.0, pgtol=1e-05, epsilon=1e-08, iprint=-1, maxfun=15000, maxiter=15000, disp=None, callback=None, maxls=20):\n    if approx_grad:\n        fun = func\n        jac = None\n    elif fprime is None:\n        fun = MemoizeJac(func)\n        jac = fun.derivative\n    else:\n        fun = func\n        jac = fprime\n    callback = _wrap_callback(callback)\n    opts = {'maxcor': m, 'ftol': factr * np.finfo(float).eps, 'gtol': pgtol, 'eps': epsilon, 'maxfun': maxfun, 'maxiter': maxiter, 'callback': callback, 'maxls': maxls}\n    res = _minimize_lbfgsb(fun, x0, args=args, jac=jac, bounds=bounds, **opts)\n    d = {'grad': res['jac'], 'task': res['message'], 'funcalls': res['nfev'], 'nit': res['nit'], 'warnflag': res['status']}\n    f = res['fun']\n    x = res['x']\n    return (x, f, d)",
    "docstring": "Minimize a function func using the L-BFGS-B algorithm. Parameters ---------- func : callable f(x,*args) Function to minimize. x0 : ndarray Initial guess. fprime : callable fprime(x,*args), optional The gradient of . If None, then returns the function value and the gradient (`approx_gradfuncfuncfprimefuncbounds` parameter. >>> bounds = [(0, 5), (5, 10)] >>> x_opt, f_op, info = fmin_l_bfgs_b(func, x0=initial_values, args=(X, Y), ... approx_grad=True, bounds=bounds) >>> x_opt, f_opt array([1.65990508, 5.31649385]), 15.721334516453945 # may vary",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_lbfgsb_py.py",
    "ast_data": "FunctionDef name:fmin_l_bfgs_b arg:func arg:x0 arg:fprime arg:args arg:approx_grad arg:bounds arg:m arg:factr arg:pgtol arg:epsilon arg:iprint arg:maxfun arg:maxiter arg:disp arg:callback arg:maxls arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg If Assign Assign If Compare Assign Call Assign Assign Assign Assign Call Assign Call Assign Call Assign Assign Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_labels_inertia",
    "source_code": "def _labels_inertia(X, sample_weight, centers, n_threads=1, return_inertia=True):\n    n_samples = X.shape[0]\n    n_clusters = centers.shape[0]\n    labels = np.full(n_samples, -1, dtype=np.int32)\n    center_shift = np.zeros(n_clusters, dtype=centers.dtype)\n    if sp.issparse(X):\n        _labels = lloyd_iter_chunked_sparse\n        _inertia = _inertia_sparse\n    else:\n        _labels = lloyd_iter_chunked_dense\n        _inertia = _inertia_dense\n    _labels(X, sample_weight, centers, centers_new=None, weight_in_clusters=None, labels=labels, center_shift=center_shift, n_threads=n_threads, update_centers=False)\n    if return_inertia:\n        inertia = _inertia(X, sample_weight, centers, labels, n_threads)\n        return (labels, inertia)\n    return labels",
    "docstring": "E step of the K-means EM algorithm. Compute the labels and the inertia of the given samples and centers. Parameters ---------- X : {ndarray, sparse matrix} of shape (n_samples, n_features) The input samples to assign to the labels. If sparse matrix, must be in CSR format. sample_weight : ndarray of shape (n_samples,) The weights for each observation in X. x_squared_norms : ndarray of shape (n_samples,) Precomputed squared euclidean norm of each data point, to speed up computations. centers : ndarray of shape (n_clusters, n_features) The cluster centers. n_threads : int, default=1 The number of OpenMP threads to use for the computation. Parallelism is sample-wise on the main cython loop which assigns each sample to its closest center. return_inertia : bool, default=True Whether to compute and return the inertia. Returns ------- labels : ndarray of shape (n_samples,) The resulting assignment. inertia : float Sum of squared distances of samples to their closest cluster center. Inertia is only returned if return_inertia is True.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\cluster\\_kmeans.py",
    "ast_data": "FunctionDef name:_labels_inertia arg:X arg:sample_weight arg:centers arg:n_threads arg:return_inertia arguments arg arg arg arg arg Assign Assign Assign Call Assign Call If Call Assign Assign Assign Assign Call If Assign Call Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "safe_summary",
    "source_code": "def safe_summary(self, encoded):\n    raise NotImplementedError('subclasses of BasePasswordHasher must provide a safe_summary() method')",
    "docstring": "Return a summary of safe values. The result is a dictionary and will be used where the password field must be displayed to construct a safe representation of the password.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\hashers.py",
    "ast_data": "FunctionDef name:safe_summary arg:self arg:encoded arguments arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_push_critical_section_stack",
    "source_code": "@contextlib.contextmanager\ndef _push_critical_section_stack(signature):\n    stack = _get_critical_section_stack()\n    if signature in stack:\n        raise ValueError(f'Attempting to lock a CriticalSection (signature={signature}) in which we are already running. This is illegal and may cause deadlocks.')\n    stack.append(signature)\n    try:\n        yield\n    finally:\n        received_signature = stack.pop()\n        if received_signature != signature:\n            raise RuntimeError(f'CriticalSection stack inconsistency: expected signature {signature} but received {received_signature}')",
    "docstring": "Push a CriticalSection._signature to the thread-local stack. If the signature is already on the stack, raise an error because it means we're trying to execute inside the same locked CriticalSection, which will create a deadlock. Args: signature: Tuple of the type . Uniquely identifies a CriticalSection by its , , and device. Yields: An empty value. The context is guaranteed to run without deadlock. Raises: ValueError: If the signature is already on the stack. RuntimeError: If another thread or function modifies the current stack entry during the yield.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\critical_section_ops.py",
    "ast_data": "FunctionDef name:_push_critical_section_stack arg:signature arguments arg Assign Call If Compare Raise Call Call Try Assign Call If Compare Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_LogOpGradients",
    "source_code": "def _LogOpGradients(op: ops.Operation, out_grads, in_grads):\n    logging.vlog(1, \"Gradient for '\" + op.name + \"'\")\n\n    def _FilterGrad(x):\n        if x is None:\n            return False\n        if isinstance(x, (list, tuple)):\n            return bool(x)\n        else:\n            return True\n    logging.vlog(1, '  in  --> %s', ', '.join((x.name for x in out_grads if _FilterGrad(x))))\n    logging.vlog(1, '  out --> %s', ', '.join((x.name for x in in_grads if _FilterGrad(x))))",
    "docstring": "Log the in and out grads of an op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\gradients_util.py",
    "ast_data": "FunctionDef name:_LogOpGradients arg:op arg:out_grads arg:in_grads arguments arg arg arg Call FunctionDef name:_FilterGrad arg:x arguments arg If Compare Return return:yes If Call Return return:yes Call Return return:yes Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_build",
    "source_code": "def _build(self, shape):\n    if self.multi_label:\n        if shape.ndims != 2:\n            raise ValueError('`y_true` must have rank=2 when `multi_label` is True. Found rank %s.' % shape.ndims)\n        self._num_labels = shape[1]\n        variable_shape = tensor_shape.TensorShape([tensor_shape.Dimension(self.num_thresholds), self._num_labels])\n    else:\n        variable_shape = tensor_shape.TensorShape([tensor_shape.Dimension(self.num_thresholds)])\n    self._build_input_shape = shape\n    self.true_positives = self.add_weight('true_positives', shape=variable_shape, initializer=init_ops.zeros_initializer)\n    self.true_negatives = self.add_weight('true_negatives', shape=variable_shape, initializer=init_ops.zeros_initializer)\n    self.false_positives = self.add_weight('false_positives', shape=variable_shape, initializer=init_ops.zeros_initializer)\n    self.false_negatives = self.add_weight('false_negatives', shape=variable_shape, initializer=init_ops.zeros_initializer)\n    if self.multi_label:\n        with ops.init_scope():\n            if not context.executing_eagerly():\n                backend._initialize_variables(backend._get_session())\n    self._built = True",
    "docstring": "Initialize TP, FP, TN, and FN tensors, given the shape of the data.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py",
    "ast_data": "FunctionDef name:_build arg:self arg:shape arguments arg arg If If Compare Raise Call Assign Assign Call Call Assign Call Call Assign Assign Call Assign Call Assign Call Assign Call If With Call If Call Call Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "get_zaxis",
    "source_code": "def get_zaxis(self):\n    return self.zaxis",
    "docstring": "Return the `~.axis3d.Axis`) instance.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:get_zaxis arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "add_summary",
    "source_code": "def add_summary(self, summary, global_step=None):\n    if isinstance(summary, bytes):\n        summ = summary_pb2.Summary()\n        summ.ParseFromString(summary)\n        summary = summ\n    for value in summary.value:\n        if not value.metadata:\n            continue\n        if value.tag in self._seen_summary_tags:\n            value.ClearField('metadata')\n            continue\n        self._seen_summary_tags.add(value.tag)\n    event = event_pb2.Event(summary=summary)\n    self._add_event(event, global_step)",
    "docstring": "Adds a protocol buffer to the event file. This method wraps the provided summary in an protocol buffer and adds it to the event file. You can pass the result of evaluating any summary op, using or , to this function. Alternatively, you can pass a protocol buffer that you populate with your own data. The latter is commonly done to report evaluation results in event files. Args: summary: A protocol buffer, optionally serialized as a string. global_step: Number. Optional global step value to record with the summary.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\writer.py",
    "ast_data": "FunctionDef name:add_summary arg:self arg:summary arg:global_step arguments arg arg arg If Call Assign Call Call Assign For If If Compare Call Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "reduce_acc_nodes_non_tensor_output",
    "source_code": "def reduce_acc_nodes_non_tensor_output(self):\n    while True:\n        new_cpu_nodes: NodeList = []\n        for acc_node in self.acc_nodes:\n            if is_node_output_tensor(acc_node):\n                continue\n            for user in acc_node.users:\n                if user not in self.acc_nodes:\n                    new_cpu_nodes.append(acc_node)\n                    break\n        if not new_cpu_nodes:\n            break\n        for new_cpu_node in new_cpu_nodes:\n            self.acc_nodes.remove(new_cpu_node)\n        self.reduce_acc_nodes_non_tensor_input_helper(new_cpu_nodes)",
    "docstring": "Excludes nodes from ACC supported set that produce non-tensor outputs and have downstream CPU nodes.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\passes\\splitter_base.py",
    "ast_data": "FunctionDef name:reduce_acc_nodes_non_tensor_output arg:self arguments arg While For If Call For If Compare Call If For Call Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, bounds, transform):\n    self._bounds = bounds\n    self._transform = transform",
    "docstring": "*bounds* (a `` rectangle) and *transform* together specify the position of the inset Axes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:bounds arg:transform arguments arg arg arg Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "get_closed",
    "source_code": "def get_closed(self):\n    return self._closed",
    "docstring": "Return whether the polygon is closed.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_closed arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_is_binary_mode",
    "source_code": "def _is_binary_mode(handle: FilePath | BaseBuffer, mode: str) -> bool:\n    if 't' in mode or 'b' in mode:\n        return 'b' in mode\n    text_classes = (codecs.StreamWriter, codecs.StreamReader, codecs.StreamReaderWriter)\n    if issubclass(type(handle), text_classes):\n        return False\n    return isinstance(handle, _get_binary_io_classes()) or 'b' in getattr(handle, 'mode', mode)",
    "docstring": "Whether the handle is opened in binary mode",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\common.py",
    "ast_data": "FunctionDef name:_is_binary_mode arg:handle arg:mode arguments arg arg If BoolOp Compare Compare Return return:yes Compare Assign If Call Call Return return:yes Return return:yes BoolOp Call Call Compare Call"
  },
  {
    "library": "scikit-learn",
    "name": "partial_fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef partial_fit(self, X, y=None, W=None, H=None):\n    has_components = hasattr(self, 'components_')\n    X = validate_data(self, X, accept_sparse=('csr', 'csc'), dtype=[np.float64, np.float32], reset=not has_components)\n    if not has_components:\n        self._check_params(X)\n        _, H = self._check_w_h(X, W=W, H=H, update_H=True)\n        self._components_numerator = H.copy()\n        self._components_denominator = np.ones(H.shape, dtype=H.dtype)\n        self.n_steps_ = 0\n    else:\n        H = self.components_\n    self._minibatch_step(X, None, H, update_H=True)\n    self.n_components_ = H.shape[0]\n    self.components_ = H\n    self.n_steps_ += 1\n    return self",
    "docstring": "Update the model using the data in as a mini-batch. This method is expected to be called several times consecutively on different chunks of a dataset so as to implement out-of-core or online learning. This is especially useful when the whole dataset is too big to fit in memory at once (see :ref:). Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Data matrix to be decomposed. y : Ignored Not used, present here for API consistency by convention. W : array-like of shape (n_samples, n_components), default=None If , it is used as initial guess for the solution. Only used for the first call to . H : array-like of shape (n_components, n_features), default=None If , it is used as initial guess for the solution. Only used for the first call to . Returns ------- self Returns the instance itself.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_nmf.py",
    "ast_data": "FunctionDef name:partial_fit arg:self arg:X arg:y arg:W arg:H arguments arg arg arg arg arg Assign Call Assign Call If Call Assign Call Assign Call Assign Call Assign Assign Call Assign Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_random_secret_key",
    "source_code": "def get_random_secret_key():\n    chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'\n    return get_random_string(50, chars)",
    "docstring": "Return a 50 character random string usable as a SECRET_KEY setting value.",
    "type": "function",
    "file_path": "django\\django\\core\\management\\utils.py",
    "ast_data": "FunctionDef name:get_random_secret_key arguments Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, error_type: UserErrorType, msg: str, case_name: Optional[str]=None) -> None:\n    if case_name is not None:\n        assert isinstance(case_name, str)\n        if msg.endswith('.'):\n            msg += ' '\n        else:\n            msg += '\\n'\n        msg += exportdb_error_message(case_name)\n    super().__init__(msg)\n    self.error_type = error_type\n    self.message = msg",
    "docstring": "Type of errors that would be valid in Eager, but not supported in TorchDynamo. The error message should tell user about next actions. error_type: Type of user error msg: Actionable error message case_name: (Optional) Unique name (snake case) for the usage example in exportdb.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\exc.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:error_type arg:msg arg:case_name arguments arg arg arg arg If Compare Call If Call Call Call Call Assign Assign"
  },
  {
    "library": "scipy",
    "name": "Schwefel26",
    "source_code": "class Schwefel26(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-500.0] * self.N, [500.0] * self.N))\n        self.global_optimum = [[420.968746 for _ in range(self.N)]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return 418.982887 * self.N - sum(x * sin(sqrt(abs(x))))",
    "docstring": "Schwefel 26 objective function. This class defines the Schwefel 26 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Schwefel26}}(x) = 418.9829n - \\sum_{i=1}^n x_i \\sin(\\sqrt{|x_i|}) Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_S.py",
    "ast_data": "ClassDef name:Schwefel26 Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "check_existence",
    "source_code": "def check_existence(filename):\n    if not os.path.exists(filename):\n        raise RuntimeError('%s not found.' % filename)",
    "docstring": "Check the existence of file or dir.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\ci_build\\copy_binary.py",
    "ast_data": "FunctionDef name:check_existence arg:filename arguments arg If Call Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "inplace_tanh_derivative",
    "source_code": "def inplace_tanh_derivative(Z, delta):\n    delta *= 1 - Z ** 2",
    "docstring": "Apply the derivative of the hyperbolic tanh function. It exploits the fact that the derivative is a simple function of the output value from hyperbolic tangent. Parameters ---------- Z : {array-like, sparse matrix}, shape (n_samples, n_features) The data which was output from the hyperbolic tangent activation function during the forward pass. delta : {array-like}, shape (n_samples, n_features) The backpropagated error signal to be modified inplace.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\neural_network\\_base.py",
    "ast_data": "FunctionDef name:inplace_tanh_derivative arg:Z arg:delta arguments arg arg"
  },
  {
    "library": "scipy",
    "name": "time_euler_conversion",
    "source_code": "def time_euler_conversion(self, num_rotations):\n    Rotation.from_euler('XYZ', self.rotations.as_euler('XYZ'))",
    "docstring": "Time converting rotation from and to euler angles",
    "type": "method",
    "file_path": "scipy\\benchmarks\\benchmarks\\spatial.py",
    "ast_data": "FunctionDef name:time_euler_conversion arg:self arg:num_rotations arguments arg arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "apply",
    "source_code": "@classmethod\ndef apply(cls, module, name, amount):\n    return super().apply(module, name, amount=amount)",
    "docstring": "Add pruning on the fly and reparametrization of a tensor. Adds the forward pre-hook that enables pruning on the fly and the reparametrization of a tensor in terms of the original tensor and the pruning mask. Args: module (nn.Module): module containing the tensor to prune name (str): parameter name within ``, it represents the absolute number of parameters to prune.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\utils\\prune.py",
    "ast_data": "FunctionDef name:apply arg:cls arg:module arg:name arg:amount arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_connector_segment_str_at_line",
    "source_code": "def _connector_segment_str_at_line(self, line: int) -> str:\n    if self.upper_printer is None and self.lower_printer is None:\n        return ''\n    upper_total_rows = self.upper_printer._total_rows() if self.upper_printer else 1\n    lower_total_rows = self.lower_printer._total_rows() if self.lower_printer else 1\n    if line == 0:\n        return '  __'\n    elif line < upper_total_rows + 1:\n        return ' |  '\n    elif line == upper_total_rows + 1:\n        return ' |__'\n    elif line < upper_total_rows + lower_total_rows + 1:\n        return '    '\n    return ''",
    "docstring": "Get the connector segment string at the given line.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\verification.py",
    "ast_data": "FunctionDef name:_connector_segment_str_at_line arg:self arg:line arguments arg arg If BoolOp Compare Compare Return return:yes Assign Call Assign Call If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_mean",
    "source_code": "def _mean(self, dim, df, scale):\n    if df > dim + 1:\n        out = scale / (df - dim - 1)\n    else:\n        out = None\n    return out",
    "docstring": "Mean of the inverse Wishart distribution. Parameters ---------- dim : int Dimension of the scale matrix %(_doc_default_callparams)s Notes ----- As this function does no argument checking, it should not be called directly; use 'mean' instead.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:_mean arg:self arg:dim arg:df arg:scale arguments arg arg arg arg If Compare Assign Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "deprecate",
    "source_code": "def deprecate(name: str, alternative: Callable[..., Any], version: str, alt_name: str | None=None, klass: type[Warning] | None=None, stacklevel: int=2, msg: str | None=None) -> Callable[[F], F]:\n    alt_name = alt_name or alternative.__name__\n    klass = klass or FutureWarning\n    warning_msg = msg or f'{name} is deprecated, use {alt_name} instead.'\n\n    @wraps(alternative)\n    def wrapper(*args, **kwargs) -> Callable[..., Any]:\n        warnings.warn(warning_msg, klass, stacklevel=stacklevel)\n        return alternative(*args, **kwargs)\n    msg = msg or f'Use `{alt_name}` instead.'\n    doc_error_msg = f'deprecate needs a correctly formatted docstring in the target function (should have a one liner short summary, and opening quotes should be in their own line). Found:\\n{alternative.__doc__}'\n    if alternative.__doc__:\n        if alternative.__doc__.count('\\n') < 3:\n            raise AssertionError(doc_error_msg)\n        empty1, summary, empty2, doc_string = alternative.__doc__.split('\\n', 3)\n        if empty1 or (empty2 and (not summary)):\n            raise AssertionError(doc_error_msg)\n        wrapper.__doc__ = dedent(f'\\n        {summary.strip()}\\n\\n        .. deprecated:: {version}\\n            {msg}\\n\\n        {dedent(doc_string)}')\n    return wrapper",
    "docstring": "Return a new function that emits a deprecation warning on use. To use this method for a deprecated function, another function with the same signature must exist. The deprecated function will emit a deprecation warning, and in the docstring it will contain the deprecation directive with the provided version so it can be detected for future removal. Parameters ---------- name : str Name of function to deprecate. alternative : func Function to use instead. version : str Version of pandas in which the method has been deprecated. alt_name : str, optional Name to use in preference of alternative.__name__. klass : Warning, default FutureWarning stacklevel : int, default 2 msg : str The message to display in the warning. Default is '{name} is deprecated. Use {alt_name} instead.'",
    "type": "function",
    "file_path": "pandas\\pandas\\util\\_decorators.py",
    "ast_data": "FunctionDef name:deprecate arg:name arg:alternative arg:version arg:alt_name arg:klass arg:stacklevel arg:msg arguments arg arg arg arg arg arg arg Assign BoolOp Assign BoolOp Assign BoolOp FunctionDef name:wrapper arguments arg arg Call Return return:yes Call Call Assign BoolOp Assign If If Compare Call Raise Call Assign Call If BoolOp BoolOp Raise Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "qform",
    "source_code": "def qform(A: Optional[Tensor], S: Tensor):\n    return bform(S, A, S)",
    "docstring": "Return quadratic form :math:.",
    "type": "function",
    "file_path": "pytorch\\torch\\_linalg_utils.py",
    "ast_data": "FunctionDef name:qform arg:A arg:S arguments arg arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "decode",
    "source_code": "def decode(self, encoding=None, errors=None):\n    return decode(self, encoding, errors)",
    "docstring": "Calls `` element-wise. See Also -------- char.decode",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:decode arg:self arg:encoding arg:errors arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_lineoffset",
    "source_code": "def get_lineoffset(self):\n    return self._lineoffset",
    "docstring": "Return the offset of the lines used to mark each event.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:get_lineoffset arg:self arguments arg Return return:yes"
  },
  {
    "library": "authlib",
    "name": "authorize_access_token",
    "source_code": "def authorize_access_token(self, request, **kwargs):\n    params = request.GET.dict()\n    state = params.get('oauth_token')\n    if not state:\n        raise OAuthError(description='Missing \"oauth_token\" parameter')\n    data = self.framework.get_state_data(request.session, state)\n    if not data:\n        raise OAuthError(description='Missing \"request_token\" in temporary data')\n    params['request_token'] = data['request_token']\n    params.update(kwargs)\n    self.framework.clear_state_data(request.session, state)\n    return self.fetch_access_token(**params)",
    "docstring": "Fetch access token in one step. :param request: HTTP request instance from Django view. :return: A token dict.",
    "type": "method",
    "file_path": "authlib\\authlib\\integrations\\django_client\\apps.py",
    "ast_data": "FunctionDef name:authorize_access_token arg:self arg:request arguments arg arg arg Assign Call Assign Call If Raise Call Assign Call If Raise Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_calculate_min_max_stats",
    "source_code": "def _calculate_min_max_stats(self, x_copy):\n    min_val = self.min_val\n    max_val = self.max_val\n    x_dim = x_copy.size()\n    new_axis_list = [i for i in range(len(x_dim))]\n    new_axis_list[self.ch_axis] = 0\n    new_axis_list[0] = self.ch_axis\n    y = x_copy.permute(new_axis_list)\n    y = y.to(self.min_val.dtype)\n    y = torch.flatten(y, start_dim=1)\n    if min_val.numel() == 0 or max_val.numel() == 0:\n        min_val, max_val = torch.aminmax(y, dim=1)\n    else:\n        min_val_cur, max_val_cur = torch.aminmax(y, dim=1)\n        min_val = torch.min(min_val_cur, min_val)\n        max_val = torch.max(max_val_cur, max_val)\n    self.min_val.resize_(min_val.shape)\n    self.max_val.resize_(max_val.shape)\n    self.min_val.copy_(min_val)\n    self.max_val.copy_(max_val)\n    return x_copy",
    "docstring": "Calculates and stores the per_channel min, max stats with forward values. Does calculation based on channel axis: self.ch_axis Args x_copy: A copy of the forward data Returns the passed in x_copy",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\model_report_observer.py",
    "ast_data": "FunctionDef name:_calculate_min_max_stats arg:self arg:x_copy arguments arg arg Assign Assign Assign Call Assign Call Call Assign Assign Assign Call Assign Call Assign Call If BoolOp Compare Call Compare Call Assign Call Assign Call Assign Call Assign Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_check_for_invalid_keys",
    "source_code": "def _check_for_invalid_keys(fname, kwargs, compat_args) -> None:\n    diff = set(kwargs) - set(compat_args)\n    if diff:\n        bad_arg = next(iter(diff))\n        raise TypeError(f\"{fname}() got an unexpected keyword argument '{bad_arg}'\")",
    "docstring": "Checks whether 'kwargs' contains any keys that are not in 'compat_args' and raises a TypeError if there is one.",
    "type": "function",
    "file_path": "pandas\\pandas\\util\\_validators.py",
    "ast_data": "FunctionDef name:_check_for_invalid_keys arg:fname arg:kwargs arg:compat_args arguments arg arg arg Assign Call Call If Assign Call Call Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "set_minor_formatter",
    "source_code": "def set_minor_formatter(self, formatter):\n    self._set_formatter(formatter, self.minor)",
    "docstring": "Set the formatter of the minor ticker. In addition to a instance, this also accepts a `.Axis.set_major_formatter~matplotlib.ticker.Formatter`, or function",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:set_minor_formatter arg:self arg:formatter arguments arg arg Call"
  },
  {
    "library": "authlib",
    "name": "get_issuer",
    "source_code": "def get_issuer(self) -> Optional[str]:\n    return None",
    "docstring": "Return the issuer URL. Developers MAY implement this method if they want to support :rfc::: def get_issuer(self) -> str: return \"",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc9207\\parameter.py",
    "ast_data": "FunctionDef name:get_issuer arg:self arguments arg Return return:no"
  },
  {
    "library": "pytorch",
    "name": "_wrap_iter_in_set",
    "source_code": "@classmethod\ndef _wrap_iter_in_set(cls, other: Any) -> Any:\n    if not isinstance(other, AbstractSet) and isinstance(other, Iterable):\n        return cls(other)\n    else:\n        return other",
    "docstring": "Wrap non-Set Iterables in OrderedSets Some of the magic methods are more strict on input types than the public apis, so we need to wrap inputs in sets.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\_ordered_set.py",
    "ast_data": "FunctionDef name:_wrap_iter_in_set arg:cls arg:other arguments arg arg If BoolOp Call Call Return return:yes Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "_get_default_qt_y",
    "source_code": "def _get_default_qt_y(device: Device, dtype: Dtype) -> Tensor:\n    return torch.tensor([[16, 11, 10, 16, 24, 40, 51, 61], [12, 12, 14, 19, 26, 58, 60, 55], [14, 13, 16, 24, 40, 57, 69, 56], [14, 17, 22, 29, 51, 87, 80, 62], [18, 22, 37, 56, 68, 109, 103, 77], [24, 35, 55, 64, 81, 104, 113, 92], [49, 64, 78, 87, 103, 121, 120, 101], [72, 92, 95, 98, 112, 100, 103, 99]], device=device, dtype=dtype)",
    "docstring": "Generate default Quantization table of Y channel.",
    "type": "function",
    "file_path": "kornia\\kornia\\enhance\\jpeg.py",
    "ast_data": "FunctionDef name:_get_default_qt_y arg:device arg:dtype arguments arg arg Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "apply",
    "source_code": "def apply(self, data: DataFrame, func: Callable[..., DataFrame], *args, **kwargs) -> DataFrame:\n    grouper, groups = self._get_groups(data)\n    if not grouper:\n        return self._reorder_columns(func(data, *args, **kwargs), data)\n    parts = {}\n    for key, part_df in data.groupby(grouper, sort=False, observed=False):\n        parts[key] = func(part_df, *args, **kwargs)\n    stack = []\n    for key in groups:\n        if key in parts:\n            if isinstance(grouper, list):\n                group_ids = dict(zip(grouper, cast(Iterable, key)))\n            else:\n                group_ids = {grouper: key}\n            stack.append(parts[key].assign(**group_ids))\n    res = pd.concat(stack, ignore_index=True)\n    return self._reorder_columns(res, data)",
    "docstring": "Apply a DataFrame -> DataFrame mapping to each group.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\groupby.py",
    "ast_data": "FunctionDef name:apply arg:self arg:data arg:func arguments arg arg arg arg arg Assign Call If Return return:yes Call Call Assign For Call Assign Call Assign For If Compare If Call Assign Call Call Call Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "_eval_bivariate",
    "source_code": "def _eval_bivariate(self, x1, x2, weights):\n    bin_kws = self.bin_kws\n    if bin_kws is None:\n        bin_kws = self.define_bin_params(x1, x2, cache=False)\n    density = self.stat == 'density'\n    hist, *bin_edges = np.histogram2d(x1, x2, **bin_kws, weights=weights, density=density)\n    area = np.outer(np.diff(bin_edges[0]), np.diff(bin_edges[1]))\n    if self.stat == 'probability' or self.stat == 'proportion':\n        hist = hist.astype(float) / hist.sum()\n    elif self.stat == 'percent':\n        hist = hist.astype(float) / hist.sum() * 100\n    elif self.stat == 'frequency':\n        hist = hist.astype(float) / area\n    if self.cumulative:\n        if self.stat in ['density', 'frequency']:\n            hist = (hist * area).cumsum(axis=0).cumsum(axis=1)\n        else:\n            hist = hist.cumsum(axis=0).cumsum(axis=1)\n    return (hist, bin_edges)",
    "docstring": "Inner function for histogram of two variables.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_statistics.py",
    "ast_data": "FunctionDef name:_eval_bivariate arg:self arg:x1 arg:x2 arg:weights arguments arg arg arg arg Assign If Compare Assign Call Assign Compare Assign Call Assign Call Call Call If BoolOp Compare Compare Assign Call Call If Compare Assign Call Call If Compare Assign Call If If Compare Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "main",
    "source_code": "def main():\n    if not os.path.exists('release'):\n        os.makedirs('release')\n    write_release_task(os.path.join('release', 'README'))\n    write_log_task(os.path.join('release', 'Changelog'))",
    "docstring": "Checks weather release directory is present or not and calls the method to generate logs and notes",
    "type": "function",
    "file_path": "scipy\\tools\\write_release_and_log.py",
    "ast_data": "FunctionDef name:main arguments If Call Call Call Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "head",
    "source_code": "def head(url: str, **kwargs: Any) -> requests.Response:\n    with _Session() as session:\n        return session.head(url, **kwargs)",
    "docstring": "Sends a HEAD request like ``. This sets up User-Agent header and TLS verification automatically.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\requests.py",
    "ast_data": "FunctionDef name:head arg:url arguments arg arg With Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_reset",
    "source_code": "def _reset(self):\n    if hasattr(self, 'scale_'):\n        del self.scale_\n        del self.n_samples_seen_\n        del self.max_abs_",
    "docstring": "Reset internal data-dependent state of the scaler, if necessary. __init__ parameters are not touched.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py",
    "ast_data": "FunctionDef name:_reset arg:self arguments arg If Call"
  },
  {
    "library": "scipy",
    "name": "query_pairs",
    "source_code": "def query_pairs(self, r, p=2.0, eps=0, output_type='set'):\n    return super().query_pairs(r, p, eps, output_type)",
    "docstring": "Find all pairs of points in whose distance is at most r. Parameters ---------- r : positive float The maximum distance. p : float, optional Which Minkowski norm to use. has to meet the condition ``1 >> import matplotlib.pyplot as plt >>> import numpy as np >>> from scipy.spatial import KDTree >>> rng = np.random.default_rng() >>> points = rng.random((20, 2)) >>> plt.figure(figsize=(6, 6)) >>> plt.plot(points[:, 0], points[:, 1], \"xk\", markersize=14) >>> kd_tree = KDTree(points) >>> pairs = kd_tree.query_pairs(r=0.2) >>> for (i, j) in pairs: ... plt.plot([points[i, 0], points[j, 0]], ... [points[i, 1], points[j, 1]], \"-r\") >>> plt.show()",
    "type": "method",
    "file_path": "scipy\\scipy\\spatial\\_kdtree.py",
    "ast_data": "FunctionDef name:query_pairs arg:self arg:r arg:p arg:eps arg:output_type arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_gridlines",
    "source_code": "def get_gridlines(self):\n    ticks = self.get_major_ticks()\n    return cbook.silent_list('Line2D gridline', [tick.gridline for tick in ticks])",
    "docstring": "Return this Axis' grid lines as a list of \\s.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:get_gridlines arg:self arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_LazyEvalTensor",
    "source_code": "class _LazyEvalTensor(core.Tensor):\n\n    def __init__(self, thunk):\n        self._thunk = thunk\n        self._master_tensor = thunk()\n\n    def _as_tensor(self, dtype=None, name=None, as_ref=False):\n        del name\n        assert not as_ref\n        assert dtype in [None, self.dtype]\n        return self._thunk()",
    "docstring": "A Tensor-like object that only evaluates its thunk when used.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variable_scope.py",
    "ast_data": "ClassDef name:_LazyEvalTensor FunctionDef name:__init__ arg:self arg:thunk arguments arg arg Assign Assign Call FunctionDef name:_as_tensor arg:self arg:dtype arg:name arg:as_ref arguments arg arg arg arg Compare Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_blknos",
    "source_code": "@property\ndef _blknos(self) -> None:\n    return None",
    "docstring": "compat with BlockManager",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:_blknos arg:self arguments arg Return return:no"
  },
  {
    "library": "scikit-learn",
    "name": "__init__",
    "source_code": "def __init__(self, center, indices, score):\n    self.center = center\n    self.indices = indices\n    self.score = score\n    self.left = None\n    self.right = None",
    "docstring": "Create a new cluster node in the tree. The node holds the center of this cluster and the indices of the data points that belong to it.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_bisect_k_means.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:center arg:indices arg:score arguments arg arg arg arg Assign Assign Assign Assign Assign"
  },
  {
    "library": "sphinx",
    "name": "get_import_prefixes_from_env",
    "source_code": "def get_import_prefixes_from_env(env: BuildEnvironment) -> list[str | None]:\n    prefixes: list[str | None] = [None]\n    currmodule = env.ref_context.get('py:module')\n    if currmodule:\n        prefixes.insert(0, currmodule)\n    currclass = env.ref_context.get('py:class')\n    if currclass:\n        if currmodule:\n            prefixes.insert(0, f'{currmodule}.{currclass}')\n        else:\n            prefixes.insert(0, currclass)\n    return prefixes",
    "docstring": "Obtain current Python import prefixes (for ) from ``",
    "type": "function",
    "file_path": "sphinx\\sphinx\\ext\\autosummary\\__init__.py",
    "ast_data": "FunctionDef name:get_import_prefixes_from_env arg:env arguments arg Assign Call If Call Assign Call If If Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "irfft2",
    "source_code": "@_dispatch\ndef irfft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *, plan=None):\n    return (Dispatchable(x, np.ndarray),)",
    "docstring": "Computes the inverse of Parameters ---------- x : array_like The input array s : sequence of ints, optional Shape of the real output to the inverse FFT. axes : sequence of ints, optional The axes over which to compute the inverse fft. Default is the last two axes. norm : {\"backward\", \"ortho\", \"forward\"}, optional Normalization mode (see ). Default is \"backward\". overwrite_x : bool, optional If True, the contents of can be destroyed; the default is False. See :func: for more details. workers : int, optional Maximum number of workers to use for parallel computation. If negative, the value wraps around from `~scipy.fft.fftirfftnirfftn`.",
    "type": "function",
    "file_path": "scipy\\scipy\\fft\\_basic.py",
    "ast_data": "FunctionDef name:irfft2 arg:x arg:s arg:axes arg:norm arg:overwrite_x arg:workers arguments arg arg arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_make_inplace",
    "source_code": "def _make_inplace(fn):\n\n    @wraps(fn)\n    def _fn(a, *args, **kwargs):\n        return fn(a, *args, out=a, **kwargs)\n    inplace_name = f'{fn.__name__}_'\n    _fn.__name__ = inplace_name\n    _fn = register_decomposition(getattr(aten, inplace_name))(_fn)\n    from inspect import getmodule\n    _all = getmodule(fn).__all__\n    if inplace_name not in _all:\n        _all.append(inplace_name)\n    return _fn",
    "docstring": "Given a function with out variant (i.e. using `out_wrapper()), it returns its in-place variant See",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\__init__.py",
    "ast_data": "FunctionDef name:_make_inplace arg:fn arguments arg FunctionDef name:_fn arg:a arguments arg arg arg Return return:yes Call Call Assign Assign Assign Call Call Call Assign Call If Compare Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "select_initial_step",
    "source_code": "def select_initial_step(fun, t0, y0, t_bound, max_step, f0, direction, order, rtol, atol):\n    if y0.size == 0:\n        return np.inf\n    interval_length = abs(t_bound - t0)\n    if interval_length == 0.0:\n        return 0.0\n    scale = atol + np.abs(y0) * rtol\n    d0 = norm(y0 / scale)\n    d1 = norm(f0 / scale)\n    if d0 < 1e-05 or d1 < 1e-05:\n        h0 = 1e-06\n    else:\n        h0 = 0.01 * d0 / d1\n    h0 = min(h0, interval_length)\n    y1 = y0 + h0 * direction * f0\n    f1 = fun(t0 + h0 * direction, y1)\n    d2 = norm((f1 - f0) / scale) / h0\n    if d1 <= 1e-15 and d2 <= 1e-15:\n        h1 = max(1e-06, h0 * 0.001)\n    else:\n        h1 = (0.01 / max(d1, d2)) ** (1 / (order + 1))\n    return min(100 * h0, h1, interval_length, max_step)",
    "docstring": "Empirically select a good initial step. The algorithm is described in [1]_. Parameters ---------- fun : callable Right-hand side of the system. t0 : float Initial value of the independent variable. y0 : ndarray, shape (n,) Initial value of the dependent variable. t_bound : float End-point of integration interval; used to ensure that t0+step<=tbound and that fun is only evaluated in the interval [t0,tbound] max_step : float Maximum allowable step size. f0 : ndarray, shape (n,) Initial value of the derivative, i.e., `. rtol : float Desired relative tolerance. atol : float Desired absolute tolerance. Returns ------- h_abs : float Absolute value of the suggested initial step. References ---------- .. [1] E. Hairer, S. P. Norsett G. Wanner, \"Solving Ordinary Differential Equations I: Nonstiff Problems\", Sec. II.4.",
    "type": "function",
    "file_path": "scipy\\scipy\\integrate\\_ivp\\common.py",
    "ast_data": "FunctionDef name:select_initial_step arg:fun arg:t0 arg:y0 arg:t_bound arg:max_step arg:f0 arg:direction arg:order arg:rtol arg:atol arguments arg arg arg arg arg arg arg arg arg arg If Compare Return return:yes Assign Call If Compare Return return:yes Assign Call Assign Call Assign Call If BoolOp Compare Compare Assign Assign Assign Call Assign Assign Call Assign Call If BoolOp Compare Compare Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "enable_resource_variables",
    "source_code": "@tf_export(v1=['enable_resource_variables'])\ndef enable_resource_variables() -> None:\n    global _DEFAULT_USE_RESOURCE\n    _DEFAULT_USE_RESOURCE = True\n    logging.vlog(1, 'Enabling resource variables')\n    _api_usage_gauge.get_cell().set(True)",
    "docstring": "Creates resource variables by default. Resource variables are improved versions of TensorFlow variables with a well-defined memory model. Accessing a resource variable reads its value, and all ops which access a specific read value of the variable are guaranteed to see the same value for that tensor. Writes which happen after a read (by having a control or data dependency on the read) are guaranteed not to affect the value of the read tensor, and similarly writes which happen before a read are guaranteed to affect the value. No guarantees are made about unordered read/write pairs. Calling tf.enable_resource_variables() lets you opt-in to this TensorFlow 2.0 feature.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variables_toggle.py",
    "ast_data": "FunctionDef name:enable_resource_variables arguments Assign Call Call Call Call"
  },
  {
    "library": "numpy",
    "name": "polyadd",
    "source_code": "def polyadd(c1, c2):\n    return pu._add(c1, c2)",
    "docstring": "Add one polynomial to another. Returns the sum of two polynomials + . The arguments are sequences of coefficients from lowest order term to highest, i.e., [1,2,3] represents the polynomial ``. Parameters ---------- c1, c2 : array_like 1-D arrays of polynomial coefficients ordered from low to high. Returns ------- out : ndarray The coefficient array representing their sum. See Also -------- polysub, polymulx, polymul, polydiv, polypow Examples -------- >>> from numpy.polynomial import polynomial as P >>> c1 = (1, 2, 3) >>> c2 = (3, 2, 1) >>> sum = P.polyadd(c1,c2); sum array([4., 4., 4.]) >>> P.polyval(2, sum) # 4 + 4(2) + 4(2**2) 28.0",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\polynomial.py",
    "ast_data": "FunctionDef name:polyadd arg:c1 arg:c2 arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_replace_keep_prob_node",
    "source_code": "def _replace_keep_prob_node(parent, old_value):\n    one = ast.Num(n=1)\n    one.lineno = 0\n    one.col_offset = 0\n    new_value = ast.BinOp(left=one, op=ast.Sub(), right=old_value)\n    pasta.ast_utils.replace_child(parent, old_value, new_value)\n    ast.copy_location(new_value, old_value)\n    pasta.base.formatting.set(old_value, 'prefix', '(')\n    pasta.base.formatting.set(old_value, 'suffix', ')')",
    "docstring": "Replaces old_value with 1-(old_value).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\tf_upgrade_v2.py",
    "ast_data": "FunctionDef name:_replace_keep_prob_node arg:parent arg:old_value arguments arg arg Assign Call Assign Assign Assign Call Call Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_sizes",
    "source_code": "def set_sizes(self, sizes, dpi=72.0):\n    if sizes is None:\n        self._sizes = np.array([])\n        self._transforms = np.empty((0, 3, 3))\n    else:\n        self._sizes = np.asarray(sizes)\n        self._transforms = np.zeros((len(self._sizes), 3, 3))\n        scale = np.sqrt(self._sizes) * dpi / 72.0 * self._factor\n        self._transforms[:, 0, 0] = scale\n        self._transforms[:, 1, 1] = scale\n        self._transforms[:, 2, 2] = 1.0\n    self.stale = True",
    "docstring": "Set the sizes of each member of the collection. Parameters ---------- sizes : or None The size to set for each element of the collection. The value is the 'area' of the element. dpi : float, default: 72 The dpi of the canvas.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:set_sizes arg:self arg:sizes arg:dpi arguments arg arg arg If Compare Assign Call Assign Call Assign Call Assign Call Call Assign Call Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_copy_trackable_to_cpu",
    "source_code": "def _copy_trackable_to_cpu(self, object_map):\n    self._v._copy_trackable_to_cpu(object_map)\n    if self not in object_map:\n        object_map[self] = AggregatingVariable(self._distribute_strategy, object_map[self._v], self._aggregation)",
    "docstring": "For implementing .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\ps_values.py",
    "ast_data": "FunctionDef name:_copy_trackable_to_cpu arg:self arg:object_map arguments arg arg Call If Compare Assign Call"
  },
  {
    "library": "django",
    "name": "_if_modified_since_passes",
    "source_code": "def _if_modified_since_passes(last_modified, if_modified_since):\n    return not last_modified or last_modified > if_modified_since",
    "docstring": "Test the If-Modified-Since comparison as defined in RFC 9110 Section 13.1.3.",
    "type": "function",
    "file_path": "django\\django\\utils\\cache.py",
    "ast_data": "FunctionDef name:_if_modified_since_passes arg:last_modified arg:if_modified_since arguments arg arg Return return:yes BoolOp Compare"
  },
  {
    "library": "numpy",
    "name": "lagval",
    "source_code": "def lagval(x, c, tensor=True):\n    c = np.array(c, ndmin=1, copy=None)\n    if c.dtype.char in '?bBhHiIlLqQpP':\n        c = c.astype(np.double)\n    if isinstance(x, (tuple, list)):\n        x = np.asarray(x)\n    if isinstance(x, np.ndarray) and tensor:\n        c = c.reshape(c.shape + (1,) * x.ndim)\n    if len(c) == 1:\n        c0 = c[0]\n        c1 = 0\n    elif len(c) == 2:\n        c0 = c[0]\n        c1 = c[1]\n    else:\n        nd = len(c)\n        c0 = c[-2]\n        c1 = c[-1]\n        for i in range(3, len(c) + 1):\n            tmp = c0\n            nd = nd - 1\n            c0 = c[-i] - c1 * (nd - 1) / nd\n            c1 = tmp + c1 * (2 * nd - 1 - x) / nd\n    return c0 + c1 * (1 - x)",
    "docstring": "Evaluate a Laguerre series at points x. If is of length `xxccxctensortensortensorxxcccxcxxcc` is multidimensional. The default value is True. Returns ------- values : ndarray, algebra_like The shape of the return value is described above. See Also -------- lagval2d, laggrid2d, lagval3d, laggrid3d Notes ----- The evaluation uses Clenshaw recursion, aka synthetic division. Examples -------- >>> from numpy.polynomial.laguerre import lagval >>> coef = [1, 2, 3] >>> lagval(1, coef) -0.5 >>> lagval([[1, 2],[3, 4]], coef) array([[-0.5, -4. ], [-4.5, -2. ]])",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\laguerre.py",
    "ast_data": "FunctionDef name:lagval arg:x arg:c arg:tensor arguments arg arg arg Assign Call If Compare Assign Call If Call Assign Call If BoolOp Call Assign Call If Compare Call Assign Assign If Compare Call Assign Assign Assign Call Assign Assign For Call Call Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "disable",
    "source_code": "@staticmethod\ndef disable():\n    torch._C._set_check_sparse_tensor_invariants(False)",
    "docstring": "Disable sparse tensor invariants checking in sparse tensor constructors. See :func: for more information.",
    "type": "method",
    "file_path": "pytorch\\torch\\sparse\\__init__.py",
    "ast_data": "FunctionDef name:disable arguments Call"
  },
  {
    "library": "pandas",
    "name": "convert_dtypes",
    "source_code": "@final\ndef convert_dtypes(self, infer_objects: bool=True, convert_string: bool=True, convert_integer: bool=True, convert_boolean: bool=True, convert_floating: bool=True, dtype_backend: DtypeBackend='numpy_nullable') -> Self:\n    check_dtype_backend(dtype_backend)\n    new_mgr = self._mgr.convert_dtypes(infer_objects=infer_objects, convert_string=convert_string, convert_integer=convert_integer, convert_boolean=convert_boolean, convert_floating=convert_floating, dtype_backend=dtype_backend)\n    res = self._constructor_from_mgr(new_mgr, axes=new_mgr.axes)\n    return res.__finalize__(self, method='convert_dtypes')",
    "docstring": "Convert columns from numpy dtypes to the best dtypes that support `convert_integerDataFrameSeriesDataFrameSeriresArrowDtypeDataFrameSeries`. >>> s.convert_dtypes() 0 a 1 b 2 dtype: string",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:convert_dtypes arg:self arg:infer_objects arg:convert_string arg:convert_integer arg:convert_boolean arg:convert_floating arg:dtype_backend arguments arg arg arg arg arg arg arg Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "@deprecation.deprecated('2019-01-01', 'The TensorFlow Distributions library has moved to TensorFlow Probability (https://github.com/tensorflow/probability). You should update all references to use `tfp.distributions` instead of `tf.distributions`.', warn_once=True)\ndef __init__(self, logits=None, probs=None, dtype=dtypes.int32, validate_args=False, allow_nan_stats=True, name='Bernoulli'):\n    parameters = dict(locals())\n    with ops.name_scope(name) as name:\n        self._logits, self._probs = distribution_util.get_logits_and_probs(logits=logits, probs=probs, validate_args=validate_args, name=name)\n    super(Bernoulli, self).__init__(dtype=dtype, reparameterization_type=distribution.NOT_REPARAMETERIZED, validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters, graph_parents=[self._logits, self._probs], name=name)",
    "docstring": "Construct Bernoulli distributions. Args: logits: An N-D representing the log-odds of a event. Each entry in the parametrizes an independent Bernoulli distribution where the probability of an event is sigmoid(logits). Only one of or should be passed in. probs: An N-D representing the probability of a event. Each entry in the parameterizes an independent Bernoulli distribution. Only one of or should be passed in. dtype: The type of the event samples. Default: . validate_args: Python , default . When distribution parameters are checked for validity despite possibly degrading runtime performance. When invalid inputs may silently render incorrect outputs. allow_nan_stats: Python , default . When , statistics (e.g., mean, mode, variance) use the value \"\" to indicate the result is undefined. When , an exception is raised if one or more of the statistic's batch members are undefined. name: Python name prefixed to Ops created by this class. Raises: ValueError: If p and logits are passed, or if neither are passed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bernoulli.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:logits arg:probs arg:dtype arg:validate_args arg:allow_nan_stats arg:name arguments arg arg arg arg arg arg arg Assign Call Call With Call Assign Call Call Call Call"
  },
  {
    "library": "django",
    "name": "x",
    "source_code": "@property\ndef x(self):\n    return self._cs.getOrdinate(0, 0)",
    "docstring": "Return the X component of the Point.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\point.py",
    "ast_data": "FunctionDef name:x arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "xmax",
    "source_code": "@property\ndef xmax(self):\n    return asarray([b[1] for b in self.bounds])",
    "docstring": "The upper bounds for the problem Returns ------- xmax : sequence The upper bounds for the problem",
    "type": "method",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_benchmark.py",
    "ast_data": "FunctionDef name:xmax arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_ragged_stack_concat_axis_0",
    "source_code": "def _ragged_stack_concat_axis_0(rt_inputs, stack_values):\n    flat_values = [rt.flat_values for rt in rt_inputs]\n    concatenated_flat_values = array_ops.concat(flat_values, axis=0)\n    nested_splits = [rt.nested_row_splits for rt in rt_inputs]\n    ragged_rank = rt_inputs[0].ragged_rank\n    concatenated_nested_splits = [_concat_ragged_splits([ns[dim] for ns in nested_splits]) for dim in range(ragged_rank)]\n    if stack_values:\n        stack_lengths = array_ops_stack.stack([rt.nrows() for rt in rt_inputs])\n        stack_splits = ragged_util.lengths_to_splits(stack_lengths)\n        concatenated_nested_splits.insert(0, stack_splits)\n    return ragged_tensor.RaggedTensor.from_nested_row_splits(concatenated_flat_values, concatenated_nested_splits, validate=False)",
    "docstring": "Helper function to concatenate or stack ragged tensors along axis 0. Args: rt_inputs: A list of RaggedTensors, all with the same rank and ragged_rank. stack_values: Boolean. If true, then stack values; otherwise, concatenate them. Returns: A RaggedTensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_concat_ops.py",
    "ast_data": "FunctionDef name:_ragged_stack_concat_axis_0 arg:rt_inputs arg:stack_values arguments arg arg Assign Assign Call Assign Assign Assign Call Call If Assign Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "from_seed",
    "source_code": "@classmethod\ndef from_seed(cls, seed, alg=None):\n    if alg is None:\n        alg = DEFAULT_ALGORITHM\n    alg = random_ops_util.convert_alg_to_int(alg)\n    state = create_rng_state(seed, alg)\n    return cls(state=state, alg=alg)",
    "docstring": "Creates a generator from a seed. A seed is a 1024-bit unsigned integer represented either as a Python integer or a vector of integers. Seeds shorter than 1024-bit will be padded. The padding, the internal structure of a seed and the way a seed is converted to a state are all opaque (unspecified). The only semantics specification of seeds is that two different seeds are likely to produce two independent generators (but no guarantee). Args: seed: the seed for the RNG. alg: (optional) the RNG algorithm. If None, it will be auto-selected. See for its possible values. Returns: The new generator.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\stateful_random_ops.py",
    "ast_data": "FunctionDef name:from_seed arg:cls arg:seed arg:alg arguments arg arg arg If Compare Assign Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_idxmax_idxmin",
    "source_code": "def _idxmax_idxmin(self, how: Literal['idxmax', 'idxmin'], ignore_unobserved: bool=False, skipna: bool=True, numeric_only: bool=False) -> NDFrameT:\n    if not self.observed and any((ping._passed_categorical for ping in self._grouper.groupings)):\n        expected_len = len(self._grouper.result_index)\n        group_sizes = self._grouper.size()\n        result_len = group_sizes[group_sizes > 0].shape[0]\n        assert result_len <= expected_len\n        has_unobserved = result_len < expected_len\n        raise_err: bool | np.bool_ = not ignore_unobserved and has_unobserved\n        data = self._obj_with_exclusions\n        if raise_err and isinstance(data, DataFrame):\n            if numeric_only:\n                data = data._get_numeric_data()\n            raise_err = len(data.columns) > 0\n        if raise_err:\n            raise ValueError(f\"Can't get {how} of an empty group due to unobserved categories. Specify observed=True in groupby instead.\")\n    elif not skipna and self._obj_with_exclusions.isna().any(axis=None):\n        raise ValueError(f'{type(self).__name__}.{how} with skipna=False encountered an NA value.')\n    result = self._agg_general(numeric_only=numeric_only, min_count=1, alias=how, skipna=skipna)\n    return result",
    "docstring": "Compute idxmax/idxmin. Parameters ---------- how : {'idxmin', 'idxmax'} Whether to compute idxmin or idxmax. numeric_only : bool, default False Include only float, int, boolean columns. skipna : bool, default True Exclude NA/null values. If an entire group is NA, the result will be NA. ignore_unobserved : bool, default False When True and an unobserved group is encountered, do not raise. This used for transform where unobserved groups do not play an impact on the result. Returns ------- Series or DataFrame idxmax or idxmin for the groupby operation.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\groupby\\groupby.py",
    "ast_data": "FunctionDef name:_idxmax_idxmin arg:self arg:how arg:ignore_unobserved arg:skipna arg:numeric_only arguments arg arg arg arg arg If BoolOp Call Assign Call Assign Call Assign Compare Compare Assign Compare BoolOp Assign If BoolOp Call If Assign Call Assign Compare Call If Raise Call If BoolOp Call Call Raise Call Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "CleanupHook",
    "source_code": "@dataclasses.dataclass\nclass CleanupHook:\n    scope: dict[str, Any]\n    name: str\n\n    def __call__(self, *args):\n        if CleanupManager is not None:\n            CleanupManager.count -= 1\n        del self.scope[self.name]\n\n    @staticmethod\n    def create(scope, name, val):\n        assert name not in scope\n        CleanupManager.count += 1\n        scope[name] = val\n        return CleanupHook(scope, name)",
    "docstring": "Remove a global variable when hook is called",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\utils.py",
    "ast_data": "ClassDef name:CleanupHook FunctionDef name:__call__ arg:self arguments arg arg If Compare FunctionDef name:create arg:scope arg:name arg:val arguments arg arg arg Compare Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_make_default_values",
    "source_code": "def _make_default_values(fullargspec: inspect.FullArgSpec) -> Dict[str, Any]:\n    if fullargspec.defaults is not None:\n        defaults = {name: value for name, value in zip(fullargspec.args[-len(fullargspec.defaults):], fullargspec.defaults)}\n    else:\n        defaults = {}\n    if fullargspec.kwonlydefaults is not None:\n        defaults.update(fullargspec.kwonlydefaults)\n    return defaults",
    "docstring": "Returns default values from the function's fullargspec.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\tf_decorator.py",
    "ast_data": "FunctionDef name:_make_default_values arg:fullargspec arguments arg If Compare Assign Call Call Assign If Compare Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "rot90",
    "source_code": "@register_decomposition(aten.rot90)\n@out_wrapper()\ndef rot90(a: TensorLikeType, k: int=1, dims: DimsSequenceType=(0, 1)) -> TensorLikeType:\n    if len(dims) != 2:\n        raise RuntimeError(f'expected total rotation dims == 2, but got dims = {len(dims)}')\n    if a.ndim < 2:\n        raise RuntimeError(f'expected total dims >= 2, but got total dims = {a.ndim}')\n    dims = utils.canonicalize_dims(a.ndim, dims)\n    if dims[0] == dims[1]:\n        raise RuntimeError(f'expected rotation dims to be different, but got dim0 = {dims[0]} and dim1 = {dims[1]}')\n    k = k % 4\n    if k == 1:\n        return torch.transpose(torch.flip(a, (dims[1],)), dims[0], dims[1])\n    elif k == 2:\n        return torch.flip(a, dims)\n    elif k == 3:\n        return torch.transpose(torch.flip(a, (dims[0],)), dims[0], dims[1])\n    else:\n        return a.clone(memory_format=torch.contiguous_format)",
    "docstring": "Reference implementation of :func:.",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\__init__.py",
    "ast_data": "FunctionDef name:rot90 arg:a arg:k arg:dims arguments arg arg arg If Compare Call Raise Call Call If Compare Raise Call Assign Call If Compare Raise Call Assign If Compare Return return:yes Call Call If Compare Return return:yes Call If Compare Return return:yes Call Call Return return:yes Call Call Call"
  },
  {
    "library": "pygame",
    "name": "get_arraytypes",
    "source_code": "def get_arraytypes():\n    warnings.warn(DeprecationWarning('only numpy arrays are now supported, this function will be removed in a future version of the module'))\n    return ('numpy',)",
    "docstring": "pygame.sndarray.get_arraytypes(): return tuple DEPRECATED - only numpy arrays are now supported.",
    "type": "function",
    "file_path": "pygame\\src_py\\sndarray.py",
    "ast_data": "FunctionDef name:get_arraytypes arguments Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "inverse_transform",
    "source_code": "def inverse_transform(self, input: Tensor, flags: Dict[str, Any], transform: Optional[Tensor]=None, size: Optional[Tuple[int, int]]=None) -> Tensor:\n    raise NotImplementedError",
    "docstring": "By default, the exact transformation as `` will be used.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\_2d\\geometric\\base.py",
    "ast_data": "FunctionDef name:inverse_transform arg:self arg:input arg:flags arg:transform arg:size arguments arg arg arg arg arg Raise"
  },
  {
    "library": "scipy",
    "name": "cheb1ap",
    "source_code": "def cheb1ap(N, rp):\n    if abs(int(N)) != N:\n        raise ValueError('Filter order must be a nonnegative integer')\n    elif N == 0:\n        return (np.array([]), np.array([]), 10 ** (-rp / 20))\n    z = np.array([])\n    eps = np.sqrt(10 ** (0.1 * rp) - 1.0)\n    mu = 1.0 / N * arcsinh(1 / eps)\n    m = np.arange(-N + 1, N, 2)\n    theta = pi * m / (2 * N)\n    p = -sinh(mu + 1j * theta)\n    k = np.prod(-p, axis=0).real\n    if N % 2 == 0:\n        k = k / sqrt(1 + eps * eps)\n    return (z, p, k)",
    "docstring": "Return (z,p,k) for Nth-order Chebyshev type I analog lowpass filter. The returned filter prototype has decibels of ripple in the passband. The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1, defined as the point at which the gain first drops below ``. See Also -------- cheby1 : Filter design function using this prototype",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_filter_design.py",
    "ast_data": "FunctionDef name:cheb1ap arg:N arg:rp arguments arg arg If Compare Call Call Raise Call If Compare Return return:yes Call Call Assign Call Assign Call Assign Call Assign Call Assign Assign Call Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "outer",
    "source_code": "def outer(self, a, b):\n    ma = getmask(a)\n    mb = getmask(b)\n    if ma is nomask and mb is nomask:\n        m = nomask\n    else:\n        ma = getmaskarray(a)\n        mb = getmaskarray(b)\n        m = logical_or.outer(ma, mb)\n    result = self.f.outer(filled(a), filled(b))\n    if not isinstance(result, MaskedArray):\n        result = result.view(MaskedArray)\n    result._mask = m\n    return result",
    "docstring": "Return the function applied to the outer product of a and b.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:outer arg:self arg:a arg:b arguments arg arg arg Assign Call Assign Call If BoolOp Compare Compare Assign Assign Call Assign Call Assign Call Assign Call Call Call If Call Assign Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_hatch",
    "source_code": "def set_hatch(self, hatch):\n    mhatch._validate_hatch_pattern(hatch)\n    self._hatch = hatch\n    self.stale = True",
    "docstring": "Set the hatching pattern *hatch* can be one of:: / - diagonal hatching \\ - back diagonal | - vertical - - horizontal + - crossed x - crossed diagonal o - small circle O - large circle . - dots * - stars Letters can be combined, in which case all the specified hatchings are done. If same letter repeats, it increases the density of hatching of that pattern. Unlike other properties such as linewidth and colors, hatching can only be specified for the collection as a whole, not separately for each member. Parameters ---------- hatch : {'/', '\\\\', '|', '-', '+', 'x', 'o', 'O', '.', '*'}",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:set_hatch arg:self arg:hatch arguments arg arg Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_convert_to_sparse_tensor",
    "source_code": "def _convert_to_sparse_tensor(sp_input):\n    if isinstance(sp_input, sparse_tensor.SparseTensorValue):\n        return sparse_tensor.SparseTensor.from_value(sp_input)\n    if not isinstance(sp_input, sparse_tensor.SparseTensor):\n        raise TypeError('Input must be a SparseTensor.')\n    return sp_input",
    "docstring": "Convert to and return it. Args: sp_input: or . Returns: converted to . Raises: ValueError: if is neither nor .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_ops.py",
    "ast_data": "FunctionDef name:_convert_to_sparse_tensor arg:sp_input arguments arg If Call Return return:yes Call If Call Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "GcpCpuTerminationConfig",
    "source_code": "class GcpCpuTerminationConfig(TerminationConfig):\n\n    def __init__(self, termination_watcher_fn=None, exit_fn=None, grace_period=None, save_fn=None):\n        self.termination_watcher_fn = termination_watcher_fn or failure_handling_util.termination_watcher_function_gce\n        self.exit_fn = exit_fn or failure_handling_util.gce_exit_fn\n        self.grace_period = grace_period or 0\n        self.save_fn = save_fn",
    "docstring": "Configurations for GCP CPU VM.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\failure_handling\\failure_handling.py",
    "ast_data": "ClassDef name:GcpCpuTerminationConfig FunctionDef name:__init__ arg:self arg:termination_watcher_fn arg:exit_fn arg:grace_period arg:save_fn arguments arg arg arg arg arg Assign BoolOp Assign BoolOp Assign BoolOp Assign"
  },
  {
    "library": "pytorch",
    "name": "parse_fuller_format",
    "source_code": "def parse_fuller_format(lines: Union[str, list[str]]) -> GitCommit:\n    if isinstance(lines, str):\n        lines = lines.split('\\n')\n    if len(lines) > 1 and lines[1].startswith('Merge:'):\n        del lines[1]\n    assert len(lines) > 7\n    assert lines[0].startswith('commit')\n    assert lines[1].startswith('Author: ')\n    assert lines[2].startswith('AuthorDate: ')\n    assert lines[3].startswith('Commit: ')\n    assert lines[4].startswith('CommitDate: ')\n    assert len(lines[5]) == 0\n    return GitCommit(commit_hash=lines[0].split()[1].strip(), author=lines[1].split(':', 1)[1].strip(), author_date=datetime.fromtimestamp(int(lines[2].split(':', 1)[1].strip())), commit_date=datetime.fromtimestamp(int(lines[4].split(':', 1)[1].strip())), title=lines[6].strip(), body='\\n'.join(lines[7:]))",
    "docstring": "Expect commit message generated using format, i.e.: commit Author: AuthorDate: Commit: CommitDate:",
    "type": "function",
    "file_path": "pytorch\\.github\\scripts\\gitutils.py",
    "ast_data": "FunctionDef name:parse_fuller_format arg:lines arguments arg If Call Assign Call If BoolOp Compare Call Call Compare Call Call Call Call Call Call Compare Call Return return:yes Call Call Call Call Call Call Call Call Call Call Call Call Call Call Call"
  },
  {
    "library": "numpy",
    "name": "__setstate__",
    "source_code": "def __setstate__(self, state):\n    _, shp, typ, isf, raw, msk, flv = state\n    super().__setstate__((shp, typ, isf, raw))\n    self._mask.__setstate__((shp, make_mask_descr(typ), isf, msk))\n    self.fill_value = flv",
    "docstring": "Restore the internal state of the masked array, for pickling purposes. `` output, and is a 5-tuple: - class name - a tuple giving the shape of the data - a typecode for the data - a binary string for the data - a binary string for the mask.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:__setstate__ arg:self arg:state arguments arg arg Assign Call Call Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "grad_sync",
    "source_code": "@property\ndef grad_sync(self):\n    if self._grad_sync is None:\n        with ops.control_dependencies(None):\n            self._grad_sync = control_flow_ops.control_trigger(name='b_sync')\n        self._grad_sync._set_control_flow_context(self._grad_context)\n        self._grad_index.op._add_control_input(self._grad_sync)\n        if self._grad_context.outer_context:\n            self._grad_context.outer_context.AddInnerOp(self._grad_sync)\n    return self._grad_sync",
    "docstring": "A control trigger node for synchronization in the grad loop. One main use is to keep the pop ops of a stack executed in the iteration order.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_state.py",
    "ast_data": "FunctionDef name:grad_sync arg:self arguments arg If Compare With Call Assign Call Call Call If Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "Tanhshrink",
    "source_code": "class Tanhshrink(Module):\n\n    def forward(self, input: Tensor) -> Tensor:\n        return F.tanhshrink(input)",
    "docstring": "Applies the element-wise Tanhshrink function. .. math:: \\text{Tanhshrink}(x) = x - \\tanh(x) Shape: - Input: :math:, where :math: means any number of dimensions. - Output: :math:, same shape as the input. .. image:: ../scripts/activation_images/Tanhshrink.png Examples:: >>> m = nn.Tanhshrink() >>> input = torch.randn(2) >>> output = m(input)",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\activation.py",
    "ast_data": "ClassDef name:Tanhshrink FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_edgecolor",
    "source_code": "def set_edgecolor(self, color):\n    self.patch.set_edgecolor(color)",
    "docstring": "Set the edge color of the Figure rectangle. Parameters ---------- color : :mpltype:",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:set_edgecolor arg:self arg:color arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "false_positives_at_thresholds",
    "source_code": "@tf_export(v1=['metrics.false_positives_at_thresholds'])\ndef false_positives_at_thresholds(labels, predictions, thresholds, weights=None, metrics_collections=None, updates_collections=None, name=None):\n    if context.executing_eagerly():\n        raise RuntimeError('tf.metrics.false_positives_at_thresholds is not supported when eager execution is enabled.')\n    with variable_scope.variable_scope(name, 'false_positives', (predictions, labels, weights)):\n        values, update_ops = _confusion_matrix_at_thresholds(labels, predictions, thresholds, weights=weights, includes=('fp',))\n        fp_value = _aggregate_variable(values['fp'], metrics_collections)\n        if updates_collections:\n            ops.add_to_collections(updates_collections, update_ops['fp'])\n        return (fp_value, update_ops['fp'])",
    "docstring": "Computes false positives at provided threshold values. If is , weights default to 1. Use weights of 0 to mask values. Args: labels: A whose shape matches . Will be cast to . predictions: A floating point of arbitrary shape and whose values are in the range . thresholds: A python list or tuple of float thresholds in . weights: Optional whose rank is either 0, or the same rank as , and must be broadcastable to (i.e., all dimensions must be either , or the same as the corresponding dimension). metrics_collections: An optional list of collections that should be added to. updates_collections: An optional list of collections that should be added to. name: An optional variable_scope name. Returns: false_positives: A float of shape . update_op: An operation that updates the variable and returns its current value. Raises: ValueError: If and have mismatched shapes, or if is not and its shape doesn't match , or if either or are not a list or tuple. RuntimeError: If eager execution is enabled.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\metrics_impl.py",
    "ast_data": "FunctionDef name:false_positives_at_thresholds arg:labels arg:predictions arg:thresholds arg:weights arg:metrics_collections arg:updates_collections arg:name arguments arg arg arg arg arg arg arg If Call Raise Call With Call Assign Call Assign Call If Call Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "from_crawler",
    "source_code": "@classmethod\ndef from_crawler(cls, crawler: Crawler) -> Self:\n    return cls()",
    "docstring": "Factory method which receives the current :class: object as argument.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\scheduler.py",
    "ast_data": "FunctionDef name:from_crawler arg:cls arg:crawler arguments arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "DataIndexableCol",
    "source_code": "class DataIndexableCol(DataCol):\n    is_data_indexable = True\n\n    def validate_names(self) -> None:\n        if not is_string_dtype(Index(self.values).dtype):\n            raise ValueError('cannot have non-object label DataIndexableCol')\n\n    @classmethod\n    def get_atom_string(cls, shape, itemsize):\n        return _tables().StringCol(itemsize=itemsize)\n\n    @classmethod\n    def get_atom_data(cls, shape, kind: str) -> Col:\n        return cls.get_atom_coltype(kind=kind)()\n\n    @classmethod\n    def get_atom_datetime64(cls, shape):\n        return _tables().Int64Col()\n\n    @classmethod\n    def get_atom_timedelta64(cls, shape):\n        return _tables().Int64Col()",
    "docstring": "represent a data column that can be indexed",
    "type": "class",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "ClassDef name:DataIndexableCol Assign FunctionDef name:validate_names arg:self arguments arg If Call Call Raise Call FunctionDef name:get_atom_string arg:cls arg:shape arg:itemsize arguments arg arg arg Return return:yes Call Call FunctionDef name:get_atom_data arg:cls arg:shape arg:kind arguments arg arg arg Return return:yes Call Call FunctionDef name:get_atom_datetime64 arg:cls arg:shape arguments arg arg Return return:yes Call Call FunctionDef name:get_atom_timedelta64 arg:cls arg:shape arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "step",
    "source_code": "@_use_grad_for_differentiable\ndef step(self, closure=None):\n    self._cuda_graph_capture_health_check()\n    loss = None\n    if closure is not None:\n        with torch.enable_grad():\n            loss = closure()\n    for group in self.param_groups:\n        params_with_grad: list[Tensor] = []\n        grads: list[Tensor] = []\n        exp_avgs: list[Tensor] = []\n        exp_avg_sqs: list[Tensor] = []\n        state_steps: list[Tensor] = []\n        beta1, beta2 = cast(tuple[float, float], group['betas'])\n        has_complex = self._init_group(group, params_with_grad, grads, exp_avgs, exp_avg_sqs, state_steps)\n        radam(params_with_grad, grads, exp_avgs, exp_avg_sqs, state_steps, beta1=beta1, beta2=beta2, lr=group['lr'], weight_decay=group['weight_decay'], eps=group['eps'], maximize=group['maximize'], foreach=group['foreach'], capturable=group['capturable'], differentiable=group['differentiable'], decoupled_weight_decay=group['decoupled_weight_decay'], has_complex=has_complex)\n    return loss",
    "docstring": "Perform a single optimization step. Args: closure (Callable, optional): A closure that reevaluates the model and returns the loss.",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\radam.py",
    "ast_data": "FunctionDef name:step arg:self arg:closure arguments arg arg Call Assign If Compare With Call Assign Call For Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "note_toctree",
    "source_code": "def note_toctree(env: BuildEnvironment, docname: str, toctreenode: addnodes.toctree) -> None:\n    if toctreenode['glob']:\n        env.glob_toctrees.add(docname)\n    if toctreenode.get('numbered'):\n        env.numbered_toctrees.add(docname)\n    include_files = toctreenode['includefiles']\n    for include_file in include_files:\n        env.files_to_rebuild.setdefault(include_file, set()).add(docname)\n    env.toctree_includes.setdefault(docname, []).extend(include_files)",
    "docstring": "Note a TOC tree directive in a document and gather information about file relations from it.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\environment\\adapters\\toctree.py",
    "ast_data": "FunctionDef name:note_toctree arg:env arg:docname arg:toctreenode arguments arg arg arg If Call If Call Call Assign For Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_per_replica_aggregate_batch",
    "source_code": "def _per_replica_aggregate_batch(strategy, batch_outs, model, mode):\n    if strategy is not None and mode == ModeKeys.PREDICT:\n        total_batch_outs = []\n        for i in range(len(model.outputs)):\n            num_replicas = strategy.num_replicas_in_sync\n            nested_outs = batch_outs[i * num_replicas:i * num_replicas + num_replicas]\n            total_batch_outs.append(concat_along_batch_dimension(nest.flatten(nested_outs)))\n        return total_batch_outs\n    return batch_outs",
    "docstring": "Aggregates the per-replica batch-level outputs from a distributed step.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_training_utils_v1.py",
    "ast_data": "FunctionDef name:_per_replica_aggregate_batch arg:strategy arg:batch_outs arg:model arg:mode arguments arg arg arg arg If BoolOp Compare Compare Assign For Call Call Assign Assign Call Call Call Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "locally_modified_subplot_params",
    "source_code": "def locally_modified_subplot_params(self):\n    return [k for k in self._AllowedKeys if getattr(self, k)]",
    "docstring": "Return a list of the names of the subplot parameters explicitly set in the GridSpec. This is a subset of the attributes of .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\gridspec.py",
    "ast_data": "FunctionDef name:locally_modified_subplot_params arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "indices",
    "source_code": "@cache_readonly\ndef indices(self) -> dict[Hashable, npt.NDArray[np.intp]]:\n    if len(self.groupings) == 1 and isinstance(self.result_index, CategoricalIndex):\n        return self.groupings[0].indices\n    codes_list = [ping.codes for ping in self.groupings]\n    return get_indexer_dict(codes_list, self.levels)",
    "docstring": "dict {group name -> group indices}",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\groupby\\ops.py",
    "ast_data": "FunctionDef name:indices arg:self arguments arg If BoolOp Compare Call Call Return return:yes Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "make_sharded_variable_creator",
    "source_code": "def make_sharded_variable_creator(hosts: List[Text]) -> Callable[..., TPUEmbeddingVariable]:\n\n    def sharded_variable_creator(next_creator: Callable[..., tf_variables.Variable], *args, **kwargs):\n        kwargs['skip_mirrored_creator'] = True\n        num_hosts = len(hosts)\n        name, shape, dtype, unwrapped_initial_value = extract_variable_info(kwargs)\n        initial_value = kwargs['initial_value']\n        rows = shape[0]\n        cols = shape[1]\n        partial_partition = rows % num_hosts\n        full_rows_per_host = rows // num_hosts\n        partitions = [full_rows_per_host + 1] * partial_partition + [full_rows_per_host] * (num_hosts - partial_partition)\n        variables = []\n        sharding_aware = 'shard_info' in tf_inspect.getargspec(initial_value).args\n        offset = 0\n        kwargs['dtype'] = dtype\n        for i, p in enumerate(partitions):\n            if p == 0:\n                continue\n            with ops.device(hosts[i]):\n                kwargs['name'] = '{}_{}'.format(name, i)\n                kwargs['shape'] = (p, cols)\n                if sharding_aware:\n                    shard_info = base.ShardInfo(kwargs['shape'], (offset, 0))\n                    kwargs['initial_value'] = functools.partial(initial_value, shard_info=shard_info)\n                    offset += p\n                else:\n                    kwargs['initial_value'] = functools.partial(unwrapped_initial_value, kwargs['shape'], dtype=dtype)\n                variables.append(next_creator(*args, **kwargs))\n        return TPUEmbeddingVariable(variables, name=name)\n    return sharded_variable_creator",
    "docstring": "Makes a sharded variable creator given a list of hosts. Args: hosts: a list of tensorflow devices on which to shard the tensors. Returns: A variable creator function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2.py",
    "ast_data": "FunctionDef name:make_sharded_variable_creator arg:hosts arguments arg FunctionDef name:sharded_variable_creator arg:next_creator arguments arg arg arg Assign Assign Call Assign Call Assign Assign Assign Assign Assign Assign Assign Assign Compare Call Assign Assign For Call If Compare With Call Assign Call Assign If Assign Call Assign Call Assign Call Call Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_evenly_distributed_thresholds",
    "source_code": "def is_evenly_distributed_thresholds(thresholds):\n    num_thresholds = len(thresholds)\n    if num_thresholds < 3:\n        return False\n    even_thresholds = np.arange(num_thresholds, dtype=np.float32) / (num_thresholds - 1)\n    return np.allclose(thresholds, even_thresholds, atol=backend.epsilon())",
    "docstring": "Check if the thresholds list is evenly distributed. We could leverage evenly distributed thresholds to use less memory when calculate metrcis like AUC where each individual threshold need to be evaluated. Args: thresholds: A python list or tuple, or 1D numpy array whose value is ranged in [0, 1]. Returns: boolean, whether the values in the inputs are evenly distributed.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\metrics_utils.py",
    "ast_data": "FunctionDef name:is_evenly_distributed_thresholds arg:thresholds arguments arg Assign Call If Compare Return return:yes Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "add_layer_norm_constraints",
    "source_code": "def add_layer_norm_constraints(input_dim, normalized_dim):\n    if len(normalized_dim) > len(input_dim):\n        return [F()]\n    else:\n        constraints = []\n        for i, n in zip(reversed(input_dim), reversed(normalized_dim)):\n            constraints.append(BinConstraintD(i, n, op_consistency))\n        return constraints",
    "docstring": "The constraints say that the type has te form: [*, 1024, 1024] while the normalized_dim have the form [1024, 1024] Args: input_dim: Input shape of layer norm normalized_dim: normalized_dim parameter of the module instance",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_generator.py",
    "ast_data": "FunctionDef name:add_layer_norm_constraints arg:input_dim arg:normalized_dim arguments arg arg If Compare Call Call Return return:yes Call Assign For Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "shape_type_conversion",
    "source_code": "def shape_type_conversion(fn):\n\n    def wrapper(instance, input_shape):\n        if input_shape is not None:\n            input_shape = convert_shapes(input_shape, to_tuples=True)\n        output_shape = fn(instance, input_shape)\n        if output_shape is not None:\n            output_shape = convert_shapes(output_shape, to_tuples=False)\n        return output_shape\n    return wrapper",
    "docstring": "Decorator that handles tuple/TensorShape conversion. Used in and . Args: fn: function to wrap. Returns: Wrapped function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_utils.py",
    "ast_data": "FunctionDef name:shape_type_conversion arg:fn arguments arg FunctionDef name:wrapper arg:instance arg:input_shape arguments arg arg If Compare Assign Call Assign Call If Compare Assign Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_reorg_layouts",
    "source_code": "def _reorg_layouts(layouts: Sequence[sparse_core_layout_pb2.SparseCoreTableLayout]) -> Mapping[str, Sequence[sparse_core_layout_pb2.SparseCoreTableLayout]]:\n    stacked_name_to_table_names = collections.defaultdict(list)\n    for layout in layouts:\n        stacked_name_to_table_names[layout.stacked_table_name].append(layout)\n    for stacked_name in stacked_name_to_table_names.keys():\n        sorted_layouts = sorted(stacked_name_to_table_names[stacked_name], key=lambda layout: layout.sparse_core_shard_row_offset)\n        stacked_name_to_table_names[stacked_name] = sorted_layouts\n    return stacked_name_to_table_names",
    "docstring": "Reorg the layouts to be in the order of the logical table.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3_checkpoint_adapter.py",
    "ast_data": "FunctionDef name:_reorg_layouts arg:layouts arguments arg Assign Call For Call For Call Assign Call arguments arg Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "get_results",
    "source_code": "def get_results(params, Z, n_jobs=1, compute_mp=True):\n    input_ = ((a, b, c, z, group) for (a, b, c, group), z in product(params, Z))\n    with Pool(n_jobs) as pool:\n        rows = pool.starmap(get_result if compute_mp else get_result_no_mp, input_)\n    return rows",
    "docstring": "Batch compute results for multiple parameter and argument values. Parameters ---------- params : iterable iterable of tuples of floats (a, b, c) specifying parameter values a, b, c for hyp2f1 Z : iterable of complex Arguments at which to evaluate hyp2f1 n_jobs : Optional[int] Number of jobs for parallel execution. Returns ------- list List of tuples of results values. See return value in source code of .",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_precompute\\hyp2f1_data.py",
    "ast_data": "FunctionDef name:get_results arg:params arg:Z arg:n_jobs arg:compute_mp arguments arg arg arg arg Assign Call With Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_save_function_alias",
    "source_code": "def _save_function_alias(saved_model_dir: str, tags: Collection[str], function_aliases: Mapping[str, str]) -> None:\n    loader = saved_model_loader.SavedModelLoader(saved_model_dir)\n    meta_graph_def = loader.get_meta_graph_def_from_tags(tags)\n    for function_name, function_alias in function_aliases.items():\n        meta_graph_def.meta_info_def.function_aliases[function_name] = function_alias\n    saved_model_proto_serialized = loader.saved_model.SerializeToString()\n    path = file_io.join(saved_model_dir, saved_model_constants.SAVED_MODEL_FILENAME_PB)\n    file_io.atomic_write_string_to_file(path, saved_model_proto_serialized)",
    "docstring": "Saves the function alias to the SavedModel. SavedModelBuilder (TF1 saved model saver) does not support saving function aliases, so this function loads the SavedModel proto and adds the field. Args: saved_model_dir: Path to the saved model directory. tags: A collection of tags to specify the meta graph. function_aliases: Function name -> function alias mapping.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\save_model.py",
    "ast_data": "FunctionDef name:_save_function_alias arg:saved_model_dir arg:tags arg:function_aliases arguments arg arg arg Assign Call Assign Call For Call Assign Assign Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "MetricsAggregator",
    "source_code": "class MetricsAggregator(Aggregator):\n\n    def __init__(self, use_steps, num_samples=None, steps=None):\n        super(MetricsAggregator, self).__init__(use_steps=use_steps, num_samples=num_samples, steps=steps, batch_size=None)\n\n    def create(self, batch_outs):\n        self.results = [0.0] * len(batch_outs)\n\n    def aggregate(self, batch_outs, batch_start=None, batch_end=None):\n        if self.use_steps:\n            self.results[0] += batch_outs[0]\n        else:\n            self.results[0] += batch_outs[0] * (batch_end - batch_start)\n        self.results[1:] = batch_outs[1:]\n\n    def finalize(self):\n        if not self.results:\n            raise ValueError('Empty training data.')\n        self.results[0] /= self.num_samples or self.steps",
    "docstring": "Aggregator that calculates loss and metrics info. Attributes: use_steps: Whether the loop is using or . num_samples: Total number of samples: . steps: Total number of steps, ie number of times to iterate over a dataset to cover all samples.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py",
    "ast_data": "ClassDef name:MetricsAggregator FunctionDef name:__init__ arg:self arg:use_steps arg:num_samples arg:steps arguments arg arg arg arg Call Call FunctionDef name:create arg:self arg:batch_outs arguments arg arg Assign Call FunctionDef name:aggregate arg:self arg:batch_outs arg:batch_start arg:batch_end arguments arg arg arg arg If Assign FunctionDef name:finalize arg:self arguments arg If Raise Call BoolOp"
  },
  {
    "library": "tensorflow",
    "name": "set_device_filters",
    "source_code": "def set_device_filters(self, job_name, task_index, device_filters):\n    assert all((isinstance(df, str) for df in device_filters))\n    self._device_filters.setdefault(job_name, {})\n    self._device_filters[job_name][task_index] = [df for df in device_filters]\n    self._cluster_device_filters = None",
    "docstring": "Set the device filters for given job name and task id.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\server_lib.py",
    "ast_data": "FunctionDef name:set_device_filters arg:self arg:job_name arg:task_index arg:device_filters arguments arg arg arg arg Call Call Call Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "get_run_state",
    "source_code": "@classmethod\ndef get_run_state(cls, backend_name: str, subsystem_name: str) -> Optional[str]:\n    file_path = os.path.join(cls.get_dir(), backend_name, f'{subsystem_name}_run_state.txt')\n    lines = cls.read_lines_from_file(file_path)\n    if lines:\n        out = lines[0].strip()\n        assert out in ('test_disable', 'find_max_bounds', 'bisect')\n        return out\n    return None",
    "docstring": "Returns the current stage of bisecting, if Any",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\compiler_bisector.py",
    "ast_data": "FunctionDef name:get_run_state arg:cls arg:backend_name arg:subsystem_name arguments arg arg arg Assign Call Call Assign Call If Assign Call Compare Return return:yes Return return:no"
  },
  {
    "library": "sphinx",
    "name": "read_source",
    "source_code": "def read_source(self, env: BuildEnvironment) -> str:\n    content = self.source.read()\n    arg = [content]\n    env.events.emit('source-read', env.docname, arg)\n    return arg[0]",
    "docstring": "Read content from source and do post-process.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\io.py",
    "ast_data": "FunctionDef name:read_source arg:self arg:env arguments arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "to_local",
    "source_code": "def to_local(self, *, grad_placements: Optional[Sequence[Placement]]=None) -> torch.Tensor:\n    if not torch.is_grad_enabled():\n        return self._local_tensor\n    if grad_placements is not None and (not isinstance(grad_placements, tuple)):\n        grad_placements = tuple(grad_placements)\n    return _ToTorchTensor.apply(self, grad_placements)",
    "docstring": "Get the local tensor of this DTensor on its current rank. For sharding it returns a local shard of the logical tensor view, for replication it returns the replica on its current rank. Keyword args: grad_placements (List[:class:], optional): the placements describes the future layout of any gradient layout of the Tensor returned from this function. converts DTensor to local tensor and the returned local tensor might not be used as the original DTensor layout later in the code. This argument is the hint that user can give to autograd in case the gradient layout of the returned tensor does not match the original DTensor layout. If not specified, we will assume the gradient layout remains the same as the original DTensor and use that for gradient computation. Returns: A :class: or `DTensor` requires_grad or not.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_api.py",
    "ast_data": "FunctionDef name:to_local arg:self arguments arg arg If Call Return return:yes If BoolOp Compare Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_check_indexing_method",
    "source_code": "@final\ndef _check_indexing_method(self, method: str_t | None, limit: int | None=None, tolerance=None) -> None:\n    if method not in [None, 'bfill', 'backfill', 'pad', 'ffill', 'nearest']:\n        raise ValueError('Invalid fill method')\n    if self._is_multi:\n        if method == 'nearest':\n            raise NotImplementedError(\"method='nearest' not implemented yet for MultiIndex; see GitHub issue 9365\")\n        if method in ('pad', 'backfill'):\n            if tolerance is not None:\n                raise NotImplementedError('tolerance not implemented yet for MultiIndex')\n    if isinstance(self.dtype, (IntervalDtype, CategoricalDtype)):\n        if method is not None:\n            raise NotImplementedError(f'method {method} not yet implemented for {type(self).__name__}')\n    if method is None:\n        if tolerance is not None:\n            raise ValueError('tolerance argument only valid if doing pad, backfill or nearest reindexing')\n        if limit is not None:\n            raise ValueError('limit argument only valid if doing pad, backfill or nearest reindexing')",
    "docstring": "Raise if we have a get_indexer that is not supported or valid.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_check_indexing_method arg:self arg:method arg:limit arg:tolerance arguments arg arg arg arg If Compare Raise Call If If Compare Raise Call If Compare If Compare Raise Call If Call If Compare Raise Call Call If Compare If Compare Raise Call If Compare Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_partial_update",
    "source_code": "def _partial_update(original: torch.Tensor, new: torch.Tensor, dim: int, n_chunks: int, idx: int, add: bool) -> torch.Tensor:\n    chunks = list(original.chunk(n_chunks, dim=dim))\n    assert chunks[idx].shape == new.shape, (original.shape, new.shape, idx)\n    if add:\n        chunks[idx] += new\n    else:\n        chunks[idx] = new\n    return torch.cat(chunks, dim=dim)",
    "docstring": "This API partially update a chunk of ``.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\experimental\\_attention.py",
    "ast_data": "FunctionDef name:_partial_update arg:original arg:new arg:dim arg:n_chunks arg:idx arg:add arguments arg arg arg arg arg arg Assign Call Call Compare If Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "device",
    "source_code": "class device:\n\n    def __init__(self, device: Any):\n        self.idx = _get_device_index(device, optional=True)\n        self.prev_idx = -1\n\n    def __enter__(self):\n        self.prev_idx = torch.xpu._exchange_device(self.idx)\n\n    def __exit__(self, type: Any, value: Any, traceback: Any):\n        self.idx = torch.xpu._maybe_exchange_device(self.prev_idx)\n        return False",
    "docstring": "Context-manager that changes the selected device. Args: device (torch.device or int or str): device index to select. It's a no-op if this argument is a negative integer or ``.",
    "type": "class",
    "file_path": "pytorch\\torch\\xpu\\__init__.py",
    "ast_data": "ClassDef name:device FunctionDef name:__init__ arg:self arg:device arguments arg arg Assign Call Assign FunctionDef name:__enter__ arg:self arguments arg Assign Call FunctionDef name:__exit__ arg:self arg:type arg:value arg:traceback arguments arg arg arg arg Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "find_last_k",
    "source_code": "def find_last_k(self):\n    dtype = self.args.dtypes[0]\n    df = pd.read_csv(self.lookup_file, names=('day', 'mode', 'prec', 'path'))\n    df = df[df['mode'] == 'performance']\n    df = df[df['prec'] == dtype]\n    log_infos = []\n    for day, path in zip(df['day'], df['path']):\n        log_infos.append(LogInfo(day, path))\n    assert len(log_infos) >= self.k\n    log_infos = log_infos[len(log_infos) - self.k:]\n    return log_infos",
    "docstring": "Find the last k pairs of (day number, log_path)",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\dynamo\\runner.py",
    "ast_data": "FunctionDef name:find_last_k arg:self arguments arg Assign Assign Call Assign Compare Assign Compare Assign For Call Call Call Compare Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, action, debug_urls, debug_ops='DebugIdentity', node_name_regex_allowlist=None, op_type_regex_allowlist=None, tensor_dtype_regex_allowlist=None, tolerate_debug_op_creation_failures=False):\n    _check_type(action, str)\n    self.action = action\n    _check_type(debug_urls, list)\n    self.debug_urls = debug_urls\n    self.debug_ops = debug_ops\n    self.node_name_regex_allowlist = node_name_regex_allowlist\n    self.op_type_regex_allowlist = op_type_regex_allowlist\n    self.tensor_dtype_regex_allowlist = tensor_dtype_regex_allowlist\n    self.tolerate_debug_op_creation_failures = tolerate_debug_op_creation_failures",
    "docstring": "Constructor of . Args: action: () the action actually taken by the wrapped session for the run() call. debug_urls: ( of ) debug_urls used in watching the tensors during the run() call. debug_ops: ( or of ) Debug op(s) to be used by the debugger. node_name_regex_allowlist: Regular-expression allowlist for node name. op_type_regex_allowlist: Regular-expression allowlist for op type. tensor_dtype_regex_allowlist: Regular-expression allowlist for tensor dtype. tolerate_debug_op_creation_failures: Whether debug op creation failures are to be tolerated.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\framework.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:action arg:debug_urls arg:debug_ops arg:node_name_regex_allowlist arg:op_type_regex_allowlist arg:tensor_dtype_regex_allowlist arg:tolerate_debug_op_creation_failures arguments arg arg arg arg arg arg arg arg Call Assign Call Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "pandas",
    "name": "_expand_ellipsis",
    "source_code": "@final\ndef _expand_ellipsis(self, tup: tuple) -> tuple:\n    if any((x is Ellipsis for x in tup)):\n        if tup.count(Ellipsis) > 1:\n            raise IndexingError(_one_ellipsis_message)\n        if len(tup) == self.ndim:\n            i = tup.index(Ellipsis)\n            new_key = tup[:i] + (_NS,) + tup[i + 1:]\n            return new_key\n    return tup",
    "docstring": "If a tuple key includes an Ellipsis, replace it with an appropriate number of null slices.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexing.py",
    "ast_data": "FunctionDef name:_expand_ellipsis arg:self arg:tup arguments arg arg If Call Compare If Compare Call Raise Call If Compare Call Assign Call Assign Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__ne__",
    "source_code": "def __ne__(self, other):\n    if tensor_lib.Tensor._USE_EQUALITY and ops.executing_eagerly_outside_functions():\n        return gen_math_ops.not_equal(self, other, incompatible_shape_error=False)\n    else:\n        return self is not other",
    "docstring": "Compares two variables element-wise for equality.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:__ne__ arg:self arg:other arguments arg arg If BoolOp Call Return return:yes Call Return return:yes Compare"
  },
  {
    "library": "django",
    "name": "timezone_tag",
    "source_code": "@register.tag('timezone')\ndef timezone_tag(parser, token):\n    bits = token.split_contents()\n    if len(bits) != 2:\n        raise TemplateSyntaxError(\"'%s' takes one argument (timezone)\" % bits[0])\n    tz = parser.compile_filter(bits[1])\n    nodelist = parser.parse(('endtimezone',))\n    parser.delete_first_token()\n    return TimezoneNode(nodelist, tz)",
    "docstring": "Enable a given time zone just for this block. The ``, the default time zone is used within the block. Sample usage:: {% timezone \"Europe/Paris\" %} It is {{ now }} in Paris. {% endtimezone %}",
    "type": "function",
    "file_path": "django\\django\\templatetags\\tz.py",
    "ast_data": "FunctionDef name:timezone_tag arg:parser arg:token arguments arg arg Assign Call If Compare Call Raise Call Assign Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "tuple",
    "source_code": "@property\ndef tuple(self):\n    return tuple((self[i].tuple for i in range(self.geom_count)))",
    "docstring": "Return a tuple of LinearRing coordinate tuples.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:tuple arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "cherrypy",
    "name": "get_context",
    "source_code": "def get_context(obj):\n    if isinstance(obj, _cprequest.Request):\n        return 'path=%s;stage=%s' % (obj.path_info, obj.stage)\n    elif isinstance(obj, _cprequest.Response):\n        return 'status=%s' % obj.status\n    elif isinstance(obj, _cpwsgi.AppResponse):\n        return 'PATH_INFO=%s' % obj.environ.get('PATH_INFO', '')\n    elif hasattr(obj, 'tb_lineno'):\n        return 'tb_lineno=%s' % obj.tb_lineno\n    return ''",
    "docstring": "Compute object's runtime context information.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\gctools.py",
    "ast_data": "FunctionDef name:get_context arg:obj arguments arg If Call Return return:yes If Call Return return:yes If Call Return return:yes Call If Call Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "__init__",
    "source_code": "def __init__(self, path=None, cache=0, country=None, city=None):\n    if cache not in self.cache_options:\n        raise GeoIP2Exception('Invalid GeoIP caching option: %s' % cache)\n    path = path or getattr(settings, 'GEOIP_PATH', None)\n    city = city or getattr(settings, 'GEOIP_CITY', 'GeoLite2-City.mmdb')\n    country = country or getattr(settings, 'GEOIP_COUNTRY', 'GeoLite2-Country.mmdb')\n    if not path:\n        raise GeoIP2Exception('GeoIP path must be provided via parameter or the GEOIP_PATH setting.')\n    path = to_path(path)\n    for path in (path, path / city, path / country):\n        if path.is_file():\n            self._path = path\n            self._reader = geoip2.database.Reader(path, mode=cache)\n            break\n    else:\n        raise GeoIP2Exception('Path must be a valid database or directory containing databases.')\n    database_type = self._metadata.database_type\n    if database_type not in SUPPORTED_DATABASE_TYPES:\n        raise GeoIP2Exception(f'Unable to handle database edition: {database_type}')",
    "docstring": "Initialize the GeoIP object. No parameters are required to use default settings. Keyword arguments may be passed in to customize the locations of the GeoIP datasets. * path: Base directory to where GeoIP data is located or the full path to where the city or country data files (*.mmdb) are located. Assumes that both the city and country data sets are located in this directory; overrides the GEOIP_PATH setting. * cache: The cache settings when opening up the GeoIP datasets. May be an integer in (0, 1, 2, 4, 8) corresponding to the MODE_AUTO, MODE_MMAP_EXT, MODE_MMAP, MODE_FILE, and MODE_MEMORY, C API settings, respectively. Defaults to 0, meaning MODE_AUTO. * country: The name of the GeoIP country data file. Defaults to 'GeoLite2-Country.mmdb'; overrides the GEOIP_COUNTRY setting. * city: The name of the GeoIP city data file. Defaults to 'GeoLite2-City.mmdb'; overrides the GEOIP_CITY setting.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geoip2.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:path arg:cache arg:country arg:city arguments arg arg arg arg arg If Compare Raise Call Assign BoolOp Call Assign BoolOp Call Assign BoolOp Call If Raise Call Assign Call For If Call Assign Assign Call Raise Call Assign If Compare Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "get_from_env_or_user_or_default",
    "source_code": "def get_from_env_or_user_or_default(environ_cp, var_name, ask_for_var, var_default):\n    var = environ_cp.get(var_name)\n    if var is None:\n        var = get_input(ask_for_var)\n        print('\\n')\n    if not var:\n        var = var_default\n    return var",
    "docstring": "Get var_name either from env, or user or default. If var_name has been set as environment variable, use the preset value, else ask for user input. If no input is provided, the default is used. Args: environ_cp: copy of the os.environ. var_name: string for name of environment variable, e.g. \"TF_NEED_CUDA\". ask_for_var: string for how to ask for user input. var_default: default value string. Returns: string value for var_name",
    "type": "function",
    "file_path": "tensorflow\\configure.py",
    "ast_data": "FunctionDef name:get_from_env_or_user_or_default arg:environ_cp arg:var_name arg:ask_for_var arg:var_default arguments arg arg arg arg Assign Call If Compare Assign Call Call If Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_loss_for_variable",
    "source_code": "def _loss_for_variable(v):\n    with backend.name_scope(name + '/Regularizer'):\n        regularization = regularizer(v)\n    return regularization",
    "docstring": "Creates a regularization loss for variable .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py",
    "ast_data": "FunctionDef name:_loss_for_variable arg:v arguments arg With Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "loss_masks",
    "source_code": "def loss_masks(self, outputs, targets, indices, num_boxes):\n    assert 'pred_masks' in outputs\n    src_idx = self._get_src_permutation_idx(indices)\n    tgt_idx = self._get_tgt_permutation_idx(indices)\n    src_masks = outputs['pred_masks']\n    target_masks, valid = nested_tensor_from_tensor_list([t['masks'] for t in targets]).decompose()\n    target_masks = target_masks.to(src_masks)\n    src_masks = src_masks[src_idx]\n    src_masks = interpolate(src_masks[:, None], size=target_masks.shape[-2:], mode='bilinear', align_corners=False)\n    src_masks = src_masks[:, 0].flatten(1)\n    target_masks = target_masks[tgt_idx].flatten(1)\n    losses = {'loss_mask': sigmoid_focal_loss(src_masks, target_masks, num_boxes), 'loss_dice': dice_loss(src_masks, target_masks, num_boxes)}\n    return losses",
    "docstring": "Compute the losses related to the masks: the focal loss and the dice loss. targets dicts must contain the key \"masks\" containing a tensor of dim [nb_target_boxes, h, w]",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\functional_autograd_benchmark\\torchvision_models.py",
    "ast_data": "FunctionDef name:loss_masks arg:self arg:outputs arg:targets arg:indices arg:num_boxes arguments arg arg arg arg arg Compare Assign Call Assign Call Assign Assign Call Call Assign Call Assign Assign Call Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, metrics=None, weighted_metrics=None, output_names=None, from_serialized=False):\n    super(MetricsContainer, self).__init__(output_names=output_names)\n    self._user_metrics = metrics\n    self._user_weighted_metrics = weighted_metrics\n    self._metrics = metrics\n    self._weighted_metrics = weighted_metrics\n    self._built = False\n    self._from_serialized = from_serialized",
    "docstring": "Initializes a container for metrics. Arguments: metrics: see the argument from . weighted_metrics: see the argument from . output_names: A list of strings of names of outputs for the model. from_serialized: Whether the model being compiled is from a serialized model. Used to avoid redundantly applying pre-processing renaming steps.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\compile_utils.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:metrics arg:weighted_metrics arg:output_names arg:from_serialized arguments arg arg arg arg arg Call Call Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "_get_logger_for_subgraph",
    "source_code": "def _get_logger_for_subgraph(model: GraphModule, first_node: Node, last_node: Node, subgraph_idx: int, subgraph_candidate_idx: int, qconfig_str: str, logger_cls: Callable, fqn: Optional[str]) -> torch.nn.Module:\n    if fqn is None:\n        fqn = ''\n    logger_mod_orig = logger_cls(first_node.name, last_node.name, f'subgraph_{subgraph_idx}_{subgraph_candidate_idx}', 'model', get_target_type_str(last_node, model), get_target_type_str(first_node, model), NSSingleResultValuesType.NODE_OUTPUT.value, 0, 0, fqn, qconfig_str)\n    logger_mod_orig.enabled = False\n    return logger_mod_orig",
    "docstring": "Given a model and a linear subgraph starting from and ending with , creates a logger for the end of this subgraph.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\ns\\fx\\n_shadows_utils.py",
    "ast_data": "FunctionDef name:_get_logger_for_subgraph arg:model arg:first_node arg:last_node arg:subgraph_idx arg:subgraph_candidate_idx arg:qconfig_str arg:logger_cls arg:fqn arguments arg arg arg arg arg arg arg arg If Compare Assign Assign Call Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, tensor_size, input_var, index1, index2, output):\n    assert isinstance(input_var, TVar)\n    assert isinstance(output, TVar)\n    assert isinstance(index1, int)\n    assert isinstance(index2, int)\n    self.input_var = input_var\n    self.tensor_size = tensor_size\n    self.index1 = index1\n    self.index2 = index2\n    self.output = output",
    "docstring": "Args: tensor_size: current tensor size input_var: variable to hold input index1: dimension 1 index2: dimension 2 output: output that stores result",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:tensor_size arg:input_var arg:index1 arg:index2 arg:output arguments arg arg arg arg arg arg Call Call Call Call Assign Assign Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "enter_freezing",
    "source_code": "@contextlib.contextmanager\ndef enter_freezing() -> Generator[Any, None, None]:\n    prev = _freezing_active()\n    _TLS.freezing_active = True\n    try:\n        yield\n    finally:\n        _TLS.freezing_active = prev",
    "docstring": "Context manager to designate when freezing is active.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\freezing_utils.py",
    "ast_data": "FunctionDef name:enter_freezing arguments Assign Call Assign Try Assign"
  },
  {
    "library": "seaborn",
    "name": "_boxen_scale_backcompat",
    "source_code": "def _boxen_scale_backcompat(self, scale, width_method):\n    if scale is not deprecated:\n        width_method = scale\n        msg = f'\\n\\nThe `scale` parameter has been renamed to `width_method` and will be removed in v0.15. Pass `width_method={scale!r}'\n        if scale == 'area':\n            msg += \", but note that the result for 'area' will appear different.\"\n        else:\n            msg += ' for the same effect.'\n        warnings.warn(msg, FutureWarning, stacklevel=3)\n    return width_method",
    "docstring": "Provide two cycles of backcompat for scale kwargs",
    "type": "method",
    "file_path": "seaborn\\seaborn\\categorical.py",
    "ast_data": "FunctionDef name:_boxen_scale_backcompat arg:self arg:scale arg:width_method arguments arg arg arg If Compare Assign Assign If Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "symbolic_tensor_id",
    "source_code": "def symbolic_tensor_id(self, graph_id, op_name, output_slot):\n    return self._graph_by_id[graph_id].get_tensor_id(op_name, output_slot)",
    "docstring": "Get the ID of a symbolic tensor. Args: graph_id: The ID of the immediately-enclosing graph. op_name: Name of the op. output_slot: Output slot as an int. Returns: The ID of the symbolic tensor as an int.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py",
    "ast_data": "FunctionDef name:symbolic_tensor_id arg:self arg:graph_id arg:op_name arg:output_slot arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "mean",
    "source_code": "@property\ndef mean(self) -> Tensor:\n    return self.loc",
    "docstring": "The provided mean is the circular one.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributions\\von_mises.py",
    "ast_data": "FunctionDef name:mean arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "range_push",
    "source_code": "def range_push(msg):\n    return _itt.rangePush(msg)",
    "docstring": "Pushes a range onto a stack of nested range span. Returns zero-based depth of the range that is started. Arguments: msg (str): ASCII message to associate with range",
    "type": "function",
    "file_path": "pytorch\\torch\\profiler\\itt.py",
    "ast_data": "FunctionDef name:range_push arg:msg arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "synchronize",
    "source_code": "def synchronize(device: _device_t=None) -> None:\n    pass",
    "docstring": "Waits for all kernels in all streams on the CPU device to complete. Args: device (torch.device or int, optional): ignored, there's only one CPU device. N.B. This function only exists to facilitate device-agnostic code.",
    "type": "function",
    "file_path": "pytorch\\torch\\cpu\\__init__.py",
    "ast_data": "FunctionDef name:synchronize arg:device arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "ctc_unique_labels",
    "source_code": "@tf_export('nn.ctc_unique_labels')\n@dispatch.add_dispatch_support\ndef ctc_unique_labels(labels, name=None):\n    with ops.name_scope(name, 'ctc_unique_labels', [labels]):\n        labels = ops.convert_to_tensor(labels, name='labels')\n\n        def _unique(x):\n            u = array_ops.unique(x)\n            y = array_ops.pad(u.y, [[0, _get_dim(u.idx, 0) - _get_dim(u.y, 0)]])\n            y = math_ops.cast(y, dtypes.int64)\n            return [y, u.idx]\n        return map_fn.map_fn(_unique, labels, dtype=[dtypes.int64, dtypes.int32])",
    "docstring": "Get unique labels and indices for batched labels for . For use with optional argument : This op can be used to preprocess labels in input pipeline to for better speed/memory use computing the ctc loss on TPU. Example: ctc_unique_labels([[3, 4, 4, 3]]) -> unique labels padded with 0: [[3, 4, 0, 0]] indices of original labels in unique: [0, 1, 1, 0] Args: labels: tensor of shape [batch_size, max_label_length] padded with 0. name: A name for this . Defaults to \"ctc_unique_labels\". Returns: tuple of - unique labels, tensor of shape - indices into unique labels, shape",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ctc_ops.py",
    "ast_data": "FunctionDef name:ctc_unique_labels arg:labels arg:name arguments arg arg With Call Assign Call FunctionDef name:_unique arg:x arguments arg Assign Call Assign Call Call Call Assign Call Return return:yes Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict_log_proba",
    "source_code": "@available_if(_estimator_has('predict_log_proba'))\ndef predict_log_proba(self, X):\n    _check_is_fitted(self)\n    estimator = getattr(self, 'estimator_', self.estimator)\n    return estimator.predict_log_proba(X)",
    "docstring": "Predict logarithm class probabilities for using the fitted estimator. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vectors, where is the number of samples and is the number of features. Returns ------- log_probabilities : ndarray of shape (n_samples, n_classes) The logarithm class probabilities of the input samples.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_classification_threshold.py",
    "ast_data": "FunctionDef name:predict_log_proba arg:self arg:X arguments arg arg Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict_proba",
    "source_code": "def predict_proba(self, X):\n    check_is_fitted(self)\n    ovr = self.multi_class in ['ovr', 'warn'] or (self.multi_class in ['auto', 'deprecated'] and (self.classes_.size <= 2 or self.solver == 'liblinear'))\n    if ovr:\n        return super()._predict_proba_lr(X)\n    else:\n        decision = self.decision_function(X)\n        if decision.ndim == 1:\n            decision_2d = np.c_[-decision, decision]\n        else:\n            decision_2d = decision\n        return softmax(decision_2d, copy=False)",
    "docstring": "Probability estimates. The returned estimates for all classes are ordered by the label of classes. For a multi_class problem, if multi_class is set to be \"multinomial\" the softmax function is used to find the predicted probability of each class. Else use a one-vs-rest approach, i.e. calculate the probability of each class assuming it to be positive using the logistic function and normalize these values across all the classes. Parameters ---------- X : array-like of shape (n_samples, n_features) Vector to be scored, where is the number of samples and is the number of features. Returns ------- T : array-like of shape (n_samples, n_classes) Returns the probability of the sample for each class in the model, where classes are ordered as they are in ``.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_logistic.py",
    "ast_data": "FunctionDef name:predict_proba arg:self arg:X arguments arg arg Call Assign BoolOp Compare BoolOp Compare BoolOp Compare Compare If Return return:yes Call Call Assign Call If Compare Assign Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "pre_compile",
    "source_code": "def pre_compile(self, flat_fn, flat_args: list[Tensor], aot_config: AOTConfig, *, fw_metadata: ViewAndMutationMeta) -> tuple[Callable, list[Tensor], ViewAndMutationMeta]:\n    return (flat_fn, flat_args, fw_metadata)",
    "docstring": "Process the inputs to the compiler_fn. You can pass in extra metadata via kwargs. Args: flat_fn: The function to compile flat_args: Metadata from example inputs of the function to compile aot_config: AOTConfig passed in at compile time fw_metadata: ViewAndMutationMeta generated from flat_fn and flat_args",
    "type": "method",
    "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\runtime_wrappers.py",
    "ast_data": "FunctionDef name:pre_compile arg:self arg:flat_fn arg:flat_args arg:aot_config arguments arg arg arg arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "mark_non_differentiable",
    "source_code": "def mark_non_differentiable(self, *args: Any, **kwargs: Any) -> None:\n    self.non_differentiable = tuple(_iter_tensors((args, kwargs)))",
    "docstring": "See :meth:.",
    "type": "method",
    "file_path": "pytorch\\torch\\autograd\\function.py",
    "ast_data": "FunctionDef name:mark_non_differentiable arg:self arguments arg arg arg Assign Call Call"
  },
  {
    "library": "kornia",
    "name": "update",
    "source_code": "def update(self, key: str, val: float, batch_size: int) -> None:\n    if key not in self._stats:\n        self._stats[key] = AverageMeter()\n    self._stats[key].update(val, batch_size)",
    "docstring": "Update the stats by the key value pair.",
    "type": "method",
    "file_path": "kornia\\kornia\\x\\utils.py",
    "ast_data": "FunctionDef name:update arg:self arg:key arg:val arg:batch_size arguments arg arg arg arg If Compare Assign Call Call"
  },
  {
    "library": "sphinx",
    "name": "epoch_to_rfc1123",
    "source_code": "def epoch_to_rfc1123(epoch: float) -> str:\n    yr, mn, dd, hh, mm, ss, wd, _yd, _tz = time.gmtime(epoch)\n    weekday_name = _WEEKDAY_NAME[wd]\n    month = _MONTH_NAME[mn]\n    return f'{weekday_name}, {dd:02} {month} {yr:04} {hh:02}:{mm:02}:{ss:02} GMT'",
    "docstring": "Return HTTP-date string from epoch offset.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\http_date.py",
    "ast_data": "FunctionDef name:epoch_to_rfc1123 arg:epoch arguments arg Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "render",
    "source_code": "def render(self, context):\n    resolved_args, resolved_kwargs = self.get_resolved_arguments(context)\n    _dict = self.func(*resolved_args, **resolved_kwargs)\n    t = context.render_context.get(self)\n    if t is None:\n        if isinstance(self.filename, Template):\n            t = self.filename\n        elif isinstance(getattr(self.filename, 'template', None), Template):\n            t = self.filename.template\n        elif not isinstance(self.filename, str) and isinstance(self.filename, Iterable):\n            t = context.template.engine.select_template(self.filename)\n        else:\n            t = context.template.engine.get_template(self.filename)\n        context.render_context[self] = t\n    new_context = context.new(_dict)\n    csrf_token = context.get('csrf_token')\n    if csrf_token is not None:\n        new_context['csrf_token'] = csrf_token\n    return t.render(new_context)",
    "docstring": "Render the specified template and context. Cache the template object in render_context to avoid reparsing and loading when used in a for loop.",
    "type": "method",
    "file_path": "django\\django\\template\\library.py",
    "ast_data": "FunctionDef name:render arg:self arg:context arguments arg arg Assign Call Assign Call Assign Call If Compare If Call Assign If Call Call Assign If BoolOp Call Call Assign Call Assign Call Assign Assign Call Assign Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_check_config_keys",
    "source_code": "def _check_config_keys(config, expected_keys):\n    if set(config.keys()) != set(expected_keys):\n        raise ValueError('Invalid config: {}, expected keys: {}'.format(config, expected_keys))",
    "docstring": "Checks that a config has all expected_keys.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:_check_config_keys arg:config arg:expected_keys arguments arg arg If Compare Call Call Call Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_destroy_resource",
    "source_code": "def _destroy_resource(self):\n    pass",
    "docstring": "A function that destroys the resource. Optional.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\resource.py",
    "ast_data": "FunctionDef name:_destroy_resource arg:self arguments arg"
  },
  {
    "library": "scipy",
    "name": "_fragment_3_1",
    "source_code": "def _fragment_3_1(norm_info, n0, tol, m_max=55, ell=2):\n    if ell < 1:\n        raise ValueError('expected ell to be a positive integer')\n    best_m = None\n    best_s = None\n    if _condition_3_13(norm_info.onenorm(), n0, m_max, ell):\n        for m, theta in _theta.items():\n            s = int(np.ceil(norm_info.onenorm() / theta))\n            if best_m is None or m * s < best_m * best_s:\n                best_m = m\n                best_s = s\n    else:\n        for p in range(2, _compute_p_max(m_max) + 1):\n            for m in range(p * (p - 1) - 1, m_max + 1):\n                if m in _theta:\n                    s = _compute_cost_div_m(m, p, norm_info)\n                    if best_m is None or m * s < best_m * best_s:\n                        best_m = m\n                        best_s = s\n        best_s = max(best_s, 1)\n    return (best_m, best_s)",
    "docstring": "A helper function for the _expm_multiply_* functions. Parameters ---------- norm_info : LazyOperatorNormInfo Information about norms of certain linear operators of interest. n0 : int Number of columns in the _expm_multiply_* B matrix. tol : float Expected to be :math: for single precision or :math: for double precision. m_max : int A value related to a bound. ell : int The number of columns used in the 1-norm approximation. This is usually taken to be small, maybe between 1 and 5. Returns ------- best_m : int Related to bounds for error control. best_s : int Amount of scaling. Notes ----- This is code fragment (3.1) in Al-Mohy and Higham (2011). The discussion of default values for m_max and ell is given between the definitions of equation (3.11) and the definition of equation (3.12).",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_expm_multiply.py",
    "ast_data": "FunctionDef name:_fragment_3_1 arg:norm_info arg:n0 arg:tol arg:m_max arg:ell arguments arg arg arg arg arg If Compare Raise Call Assign Assign If Call Call For Call Assign Call Call Call If BoolOp Compare Compare Assign Assign For Call Call For Call If Compare Assign Call If BoolOp Compare Compare Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_init_global_step",
    "source_code": "def _init_global_step(self, global_step=USE_DEFAULT):\n    if global_step is Supervisor.USE_DEFAULT:\n        global_step = self._get_first_op_from_collection(ops.GraphKeys.GLOBAL_STEP)\n        if global_step is None:\n            global_step = self._default_global_step_tensor()\n            if global_step is not None:\n                ops.add_to_collection(ops.GraphKeys.GLOBAL_STEP, global_step)\n    self._global_step = global_step",
    "docstring": "Initializes global_step. Args: global_step: An integer Tensor of size 1 that counts steps. If set to USE_DEFAULT, creates global_step tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py",
    "ast_data": "FunctionDef name:_init_global_step arg:self arg:global_step arguments arg arg If Compare Assign Call If Compare Assign Call If Compare Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "MeasureCosts",
    "source_code": "def MeasureCosts(self, item):\n    op_perf_bytes_list, run_time, step_stats_bytes = tf_cluster.TF_MeasureCosts(item.tf_item, self._tf_cluster, self._generate_timeline)\n    op_perfs = [op_performance_data_pb2.OpPerformance.FromString(op_perf_bytes) for op_perf_bytes in op_perf_bytes_list]\n    return (op_perfs, run_time, step_stats_pb2.StepStats.FromString(step_stats_bytes))",
    "docstring": "Returns the cost of running the specified item. Args: item: The item for which to measure the costs. Returns: The triplet op_perfs, runtime, step_stats.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\grappler\\cluster.py",
    "ast_data": "FunctionDef name:MeasureCosts arg:self arg:item arguments arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "Portal",
    "source_code": "class Portal:\n\n    @defers\n    def login(self_, credentials, mind, *interfaces):\n        if not (credentials.username == self.username.encode('utf8') and credentials.checkPassword(self.password.encode('utf8'))):\n            raise ValueError('Invalid credentials')\n        protocol = telnet.TelnetBootstrapProtocol(insults.ServerProtocol, manhole.Manhole, self._get_telnet_vars())\n        return (interfaces[0], protocol, lambda: None)",
    "docstring": "An implementation of IPortal",
    "type": "class",
    "file_path": "scrapy\\scrapy\\extensions\\telnet.py",
    "ast_data": "ClassDef name:Portal FunctionDef name:login arg:self_ arg:credentials arg:mind arguments arg arg arg arg If BoolOp Compare Call Call Call Raise Call Assign Call Call Return return:yes arguments"
  },
  {
    "library": "kornia",
    "name": "random_prob_generator",
    "source_code": "def random_prob_generator(batch_size: int, p: float=0.5, same_on_batch: bool=False, device: Optional[torch.device]=None, dtype: torch.dtype=torch.float32) -> Tensor:\n    if device is None:\n        device = torch.device('cpu')\n    _common_param_check(batch_size, same_on_batch)\n    if not isinstance(p, (int, float)) or p > 1 or p < 0:\n        raise TypeError(f'The probability should be a float number within [0, 1]. Got {type(p)}.')\n    _bernoulli = Bernoulli(tensor(float(p), device=device, dtype=dtype))\n    probs_mask: Tensor = _adapted_sampling((batch_size,), _bernoulli, same_on_batch).bool()\n    return probs_mask",
    "docstring": "Generate random probabilities for a batch of inputs. Args: batch_size (int): the number of images. p (float): probability to generate an 1-d binary mask. Default value is 0.5. same_on_batch (bool): apply the same transformation across the batch. Default: False. device (torch.device): the device on which the random numbers will be generated. Default: cpu. dtype (torch.dtype): the data type of the generated random numbers. Default: float32. Returns: Tensor: parameters to be passed for transformation. - probs (Tensor): element-wise probabilities with a shape of (B,). Note: The generated random numbers are not reproducible across different devices and dtypes.",
    "type": "function",
    "file_path": "kornia\\kornia\\augmentation\\random_generator\\_2d\\probability.py",
    "ast_data": "FunctionDef name:random_prob_generator arg:batch_size arg:p arg:same_on_batch arg:device arg:dtype arguments arg arg arg arg arg If Compare Assign Call Call If BoolOp Call Compare Compare Raise Call Call Assign Call Call Call Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "override_parameters",
    "source_code": "def override_parameters(params: Dict[str, Any], params_override: Optional[Dict[str, Any]]=None, if_none_exist: str='ignore', in_place: bool=False) -> Dict[str, Any]:\n    if params_override is None:\n        return params\n    out = params if in_place else deepcopy_dict(params)\n    for k, v in params_override.items():\n        if k in params_override:\n            out[k] = v\n        elif if_none_exist == 'ignore':\n            pass\n        elif if_none_exist == 'raise':\n            raise RuntimeError(f'Param `{k}` not existed in `{params_override}`.')\n        else:\n            raise ValueError(f'`{if_none_exist}` is not a valid option.')\n    return out",
    "docstring": "Override params dict w.r.t params_override. Args: params: source parameters. params_override: key-values to override the source parameters. if_none_exist: behaviour if the key in does not exist in . 'raise' | 'ignore'. in_place: if to override in-place or not.",
    "type": "function",
    "file_path": "kornia\\kornia\\augmentation\\utils\\helpers.py",
    "ast_data": "FunctionDef name:override_parameters arg:params arg:params_override arg:if_none_exist arg:in_place arguments arg arg arg arg If Compare Return return:yes Assign Call For Call If Compare Assign If Compare If Compare Raise Call Raise Call Return return:yes"
  },
  {
    "library": "django",
    "name": "_make_date_lookup_arg",
    "source_code": "def _make_date_lookup_arg(self, value):\n    if self.uses_datetime_field:\n        value = datetime.datetime.combine(value, datetime.time.min)\n        if settings.USE_TZ:\n            value = timezone.make_aware(value)\n    return value",
    "docstring": "Convert a date into a datetime when the date field is a DateTimeField. When time zone support is enabled, is assumed to be in the current time zone, so that displayed items are consistent with the URL.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\dates.py",
    "ast_data": "FunctionDef name:_make_date_lookup_arg arg:self arg:value arguments arg arg If Assign Call If Assign Call Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "exchange",
    "source_code": "@abc.abstractmethod\ndef exchange(self, algorithm: ECDH, peer_public_key: EllipticCurvePublicKey) -> bytes:\n    pass",
    "docstring": "Performs a key exchange operation using the provided algorithm with the provided peer's public key.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ec.py",
    "ast_data": "FunctionDef name:exchange arg:self arg:algorithm arg:peer_public_key arguments arg arg arg"
  },
  {
    "library": "scikit-learn",
    "name": "_get_n_samples_bootstrap",
    "source_code": "def _get_n_samples_bootstrap(n_samples, max_samples):\n    if max_samples is None:\n        return n_samples\n    if isinstance(max_samples, Integral):\n        if max_samples > n_samples:\n            msg = '`max_samples` must be <= n_samples={} but got value {}'\n            raise ValueError(msg.format(n_samples, max_samples))\n        return max_samples\n    if isinstance(max_samples, Real):\n        return max(round(n_samples * max_samples), 1)",
    "docstring": "Get the number of samples in a bootstrap sample. Parameters ---------- n_samples : int Number of samples in the dataset. max_samples : int or float The maximum number of samples to draw from the total available: - if float, this indicates a fraction of the total and should be the interval ; - if int, this indicates the exact number of samples; - if None, this indicates the total number of samples. Returns ------- n_samples_bootstrap : int The total number of samples to draw for the bootstrap sample.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_forest.py",
    "ast_data": "FunctionDef name:_get_n_samples_bootstrap arg:n_samples arg:max_samples arguments arg arg If Compare Return return:yes If Call If Compare Assign Raise Call Call Return return:yes If Call Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "feature_get_til",
    "source_code": "def feature_get_til(self, names, keyisfalse):\n\n    def til(tnames):\n        tnames = self.feature_implies_c(tnames)\n        tnames = self.feature_sorted(tnames, reverse=True)\n        for i, n in enumerate(tnames):\n            if not self.feature_supported[n].get(keyisfalse, True):\n                tnames = tnames[:i + 1]\n                break\n        return tnames\n    if isinstance(names, str) or len(names) <= 1:\n        names = til(names)\n        names.reverse()\n        return names\n    names = self.feature_ahead(names)\n    names = {t for n in names for t in til(n)}\n    return self.feature_sorted(names)",
    "docstring": "same as but stop collecting implied features when feature's option that provided through parameter 'keyisfalse' is False, also sorting the returned features.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py",
    "ast_data": "FunctionDef name:feature_get_til arg:self arg:names arg:keyisfalse arguments arg arg arg FunctionDef name:til arg:tnames arguments arg Assign Call Assign Call For Call If Call Assign Return return:yes If BoolOp Call Compare Call Assign Call Call Return return:yes Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "upload_file_to_s3",
    "source_code": "def upload_file_to_s3(file_name: str, bucket: str, key: str) -> None:\n    print(f'Upload {file_name} to s3://{bucket}/{key}')\n    boto3.client('s3').upload_file(file_name, bucket, key)",
    "docstring": "Upload a local file to S3",
    "type": "function",
    "file_path": "pytorch\\tools\\stats\\upload_stats_lib.py",
    "ast_data": "FunctionDef name:upload_file_to_s3 arg:file_name arg:bucket arg:key arguments arg arg arg Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "changed",
    "source_code": "def changed(self):\n    self.callbacks.process('changed')\n    self.stale = True",
    "docstring": "Call this whenever the mappable is changed to notify all the callbackSM listeners to the 'changed' signal.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colorizer.py",
    "ast_data": "FunctionDef name:changed arg:self arguments arg Call Assign"
  },
  {
    "library": "pytorch",
    "name": "get_default_qconfig",
    "source_code": "def get_default_qconfig(backend='x86', version=0):\n    supported_backends = ['fbgemm', 'x86', 'qnnpack', 'onednn']\n    if backend not in supported_backends:\n        raise AssertionError('backend: ' + str(backend) + f' not supported. backend must be one of {supported_backends}')\n    if version == 0:\n        if backend == 'fbgemm':\n            qconfig = QConfig(activation=HistogramObserver.with_args(reduce_range=True), weight=default_per_channel_weight_observer)\n        elif backend == 'qnnpack':\n            qconfig = QConfig(activation=HistogramObserver.with_args(reduce_range=False), weight=default_weight_observer)\n        elif backend == 'onednn':\n            if not torch.cpu._is_vnni_supported():\n                warnings.warn('Default qconfig of oneDNN backend with reduce_range of false may have accuracy issues on CPU without Vector Neural Network Instruction support.')\n            qconfig = QConfig(activation=HistogramObserver.with_args(reduce_range=False), weight=default_per_channel_weight_observer)\n        elif backend == 'x86':\n            qconfig = QConfig(activation=HistogramObserver.with_args(reduce_range=True), weight=default_per_channel_weight_observer)\n        else:\n            qconfig = default_qconfig\n    else:\n        raise AssertionError('Version number: ' + str(version) + ' in get_default_qconfig is not supported. Version number must be 0')\n    return qconfig",
    "docstring": "Returns the default PTQ qconfig for the specified backend. Args: * (str): a string representing the target backend. Currently supports (default), , and . Return: qconfig",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\qconfig.py",
    "ast_data": "FunctionDef name:get_default_qconfig arg:backend arg:version arguments arg arg Assign If Compare Raise Call Call If Compare If Compare Assign Call Call If Compare Assign Call Call If Compare If Call Call Assign Call Call If Compare Assign Call Call Assign Raise Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "get_arglist",
    "source_code": "def get_arglist(I_type, T_type):\n    args = []\n    next_is_writeable = False\n    j = 0\n    for t in arg_spec:\n        const = '' if next_is_writeable else 'const '\n        next_is_writeable = False\n        if t == '*':\n            next_is_writeable = True\n            continue\n        elif t == 'i':\n            args.append(f'*({const + I_type}*)a[{j}]')\n        elif t == 'I':\n            args.append(f'({const + I_type}*)a[{j}]')\n        elif t == 'T':\n            args.append(f'({const + T_type}*)a[{j}]')\n        elif t == 'B':\n            args.append(f'(npy_bool_wrapper*)a[{j}]')\n        elif t == 'V':\n            if const:\n                raise ValueError(\"'V' argument must be an output arg\")\n            args.append(f'(std::vector<{I_type}>*)a[{j}]')\n        elif t == 'W':\n            if const:\n                raise ValueError(\"'W' argument must be an output arg\")\n            args.append(f'(std::vector<{T_type}>*)a[{j}]')\n        elif t == 'l':\n            args.append(f'*({const}npy_int64*)a[{j}]')\n        else:\n            raise ValueError(f'Invalid spec character {t!r}')\n        j += 1\n    return ', '.join(args)",
    "docstring": "Generate argument list for calling the C++ function",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\_generate_sparsetools.py",
    "ast_data": "FunctionDef name:get_arglist arg:I_type arg:T_type arguments arg arg Assign Assign Assign For Assign Assign If Compare Assign If Compare Call If Compare Call If Compare Call If Compare Call If Compare If Raise Call Call If Compare If Raise Call Call If Compare Call Raise Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "write_files",
    "source_code": "def write_files(file_dict):\n    for file_path, content in file_dict.items():\n        with open(file_path, 'w') as f:\n            f.write(content)",
    "docstring": "Takes a mapping of full filepath to file contents to write at that path.",
    "type": "function",
    "file_path": "scipy\\scipy\\_build_utils\\_wrappers_common.py",
    "ast_data": "FunctionDef name:write_files arg:file_dict arguments arg For Call With Call Call"
  },
  {
    "library": "pandas",
    "name": "repeat",
    "source_code": "def repeat(self, repeats: int | Sequence[int], axis: None=None) -> Series:\n    nv.validate_repeat((), {'axis': axis})\n    new_index = self.index.repeat(repeats)\n    new_values = self._values.repeat(repeats)\n    return self._constructor(new_values, index=new_index, copy=False).__finalize__(self, method='repeat')",
    "docstring": "Repeat elements of a Series. Returns a new Series where each element of the current Series is repeated consecutively a given number of times. Parameters ---------- repeats : int or array of ints The number of repetitions for each element. This should be a non-negative integer. Repeating 0 times will return an empty Series. axis : None Unused. Parameter needed for compatibility with DataFrame. Returns ------- Series Newly created Series with repeated elements. See Also -------- Index.repeat : Equivalent function for Index. numpy.repeat : Similar method for :class:. Examples -------- >>> s = pd.Series([\"a\", \"b\", \"c\"]) >>> s 0 a 1 b 2 c dtype: object >>> s.repeat(2) 0 a 0 a 1 b 1 b 2 c 2 c dtype: object >>> s.repeat([1, 2, 3]) 0 a 1 b 1 b 2 c 2 c 2 c dtype: object",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:repeat arg:self arg:repeats arg:axis arguments arg arg arg Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_single_shard_restore",
    "source_code": "def _single_shard_restore(file_prefix: tensor_lib.Tensor, shardable_tensors: Sequence[sharding_util.ShardableTensor], options: 'checkpoint_options.CheckpointOptions | None'=None) -> sharding_util.Shard:\n    options = options or checkpoint_options.CheckpointOptions()\n    tensor_names = []\n    tensor_dtypes = []\n    slice_specs = []\n    for shardable_tensor in shardable_tensors:\n        if shardable_tensor._tensor_save_spec:\n            name = shardable_tensor._tensor_save_spec.name\n            spec = shardable_tensor._tensor_save_spec.slice_spec\n        else:\n            name, spec = (shardable_tensor.checkpoint_key, shardable_tensor.slice_spec)\n        tensor_names.append(name)\n        slice_specs.append(spec)\n        tensor_dtypes.append(shardable_tensor.dtype)\n    restore_device = options.experimental_io_device or 'cpu:0'\n    with ops.device(restore_device):\n        restored_tensors = io_ops.restore_v2(file_prefix, tensor_names, slice_specs, tensor_dtypes)\n    restored_tensor_dict = {}\n    for shardable_tensor in shardable_tensors:\n        restored_tensor = restored_tensors.pop(0)\n        restored_tensor_dict.setdefault(shardable_tensor.checkpoint_key, {})[shardable_tensor.slice_spec] = restored_tensor\n    return restored_tensor_dict",
    "docstring": "Restore the saveable objects from a checkpoint with . Args: file_prefix: A string or scalar string Tensor containing the prefix for files to read from. shardable_tensors: A list of ShardableTensors to restore. options: Optional object. Returns: A restored tensor dict (maps checkpoint_key -> slice_spec -> tensor).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\functional_saver.py",
    "ast_data": "FunctionDef name:_single_shard_restore arg:file_prefix arg:shardable_tensors arg:options arguments arg arg arg Assign BoolOp Call Assign Assign Assign For If Assign Assign Assign Call Call Call Assign BoolOp With Call Assign Call Assign For Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "display_bytes",
    "source_code": "def display_bytes(b: int, unit: str='MiB') -> str:\n    if unit == 'KiB':\n        return f'{b / 2 ** 10:.2f} KiB'\n    if unit == 'MiB':\n        return f'{b / 2 ** 20:.2f} MiB'\n    if unit == 'GiB':\n        return f'{b / 2 ** 30:.2f} GiB'\n    return f'{b:.2f} bytes'",
    "docstring": "return a string that represent the number of bytes in a desired unit",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_tools\\ilp_utils.py",
    "ast_data": "FunctionDef name:display_bytes arg:b arg:unit arguments arg arg If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=False)\ndef fit(self, X, y):\n    return self._fit(X, y)",
    "docstring": "Fit the radius neighbors regressor from the training dataset. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_samples) if metric='precomputed' Training data. y : {array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_outputs) Target values. Returns ------- self : RadiusNeighborsRegressor The fitted radius neighbors regressor.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neighbors\\_regression.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "vee",
    "source_code": "@staticmethod\ndef vee(omega: Tensor) -> Tensor:\n    head = omega[..., :3, -1]\n    tail = So3.vee(omega[..., :3, :3])\n    return concatenate((head, tail), -1)",
    "docstring": "Convert elements from lie algebra to vector space. Args: omega: 4x4-matrix representing lie algebra of shape :math:. Returns: vector of shape :math:. Example: >>> v = torch.ones((1, 6)) >>> omega_hat = Se3.hat(v) >>> Se3.vee(omega_hat) tensor([[1., 1., 1., 1., 1., 1.]])",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\se3.py",
    "ast_data": "FunctionDef name:vee arg:omega arguments arg Assign Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "splitlines",
    "source_code": "def splitlines(self, keepends=None):\n    return splitlines(self, keepends)",
    "docstring": "For each element in , return a list of the lines in the element, breaking at line boundaries. See Also -------- char.splitlines",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:splitlines arg:self arg:keepends arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_preprocess_conv2d_input",
    "source_code": "def _preprocess_conv2d_input(x, data_format, force_transpose=False):\n    tf_data_format = 'NHWC'\n    if data_format == 'channels_first':\n        if not _has_nchw_support() or force_transpose:\n            x = array_ops.transpose(x, (0, 2, 3, 1))\n        else:\n            tf_data_format = 'NCHW'\n    return (x, tf_data_format)",
    "docstring": "Transpose and cast the input before the conv2d. Args: x: input tensor. data_format: string, or . force_transpose: Boolean. If True, the input will always be transposed from NCHW to NHWC if is . If False, the transposition only occurs on CPU (GPU ops are assumed to support NCHW). Returns: A tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:_preprocess_conv2d_input arg:x arg:data_format arg:force_transpose arguments arg arg arg Assign If Compare If BoolOp Call Assign Call Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_perm_test",
    "source_code": "def _perm_test(x, y, stat, reps=1000, workers=-1, random_state=None):\n    random_state = check_random_state(random_state)\n    random_states = [np.random.RandomState(rng_integers(random_state, 1 << 32, size=4, dtype=np.uint32)) for _ in range(reps)]\n    parallelp = _ParallelP(x=x, y=y, random_states=random_states)\n    with MapWrapper(workers) as mapwrapper:\n        null_dist = np.array(list(mapwrapper(parallelp, range(reps))))\n    pvalue = (1 + (null_dist >= stat).sum()) / (1 + reps)\n    return (pvalue, null_dist)",
    "docstring": "Helper function that calculates the p-value. See below for uses. Parameters ---------- x, y : ndarray and have shapes `workersworkersmultiprocessing.Pool -1multiprocessing.Pool.mapworkers(func, iterable)funcnumpy.random.Generatornumpy.random.RandomStateseednp.randomnumpy.random.RandomStateseedseedseed` instance then that instance is used. Returns ------- pvalue : float The sample test p-value. null_dist : list The approximated null distribution.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mgc.py",
    "ast_data": "FunctionDef name:_perm_test arg:x arg:y arg:stat arg:reps arg:workers arg:random_state arguments arg arg arg arg arg arg Assign Call Assign Call Call Call Assign Call With Call Assign Call Call Call Call Assign Call Compare Return return:yes"
  },
  {
    "library": "scipy",
    "name": "sample_AB",
    "source_code": "def sample_AB(A: np.ndarray, B: np.ndarray) -> np.ndarray:\n    d, n = A.shape\n    AB = np.tile(A, (d, 1, 1))\n    i = np.arange(d)\n    AB[i, i] = B[i]\n    return AB",
    "docstring": "AB matrix. AB: rows of B into A. Shape (d, d, n). - Copy A into d \"pages\" - In the first page, replace 1st rows of A with 1st row of B. ... - In the dth page, replace dth row of A with dth row of B. - return the stack of pages",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_sensitivity_analysis.py",
    "ast_data": "FunctionDef name:sample_AB arg:A arg:B arguments arg arg Assign Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "XMLRPCDispatcher",
    "source_code": "def XMLRPCDispatcher(next_dispatcher=Dispatcher()):\n    from cherrypy.lib import xmlrpcutil\n\n    def xmlrpc_dispatch(path_info):\n        path_info = xmlrpcutil.patched_path(path_info)\n        return next_dispatcher(path_info)\n    return xmlrpc_dispatch",
    "docstring": "Chain an HTTP dispatcher variant implementing XML-RPC.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\_cpdispatch.py",
    "ast_data": "FunctionDef name:XMLRPCDispatcher arg:next_dispatcher arguments arg Call FunctionDef name:xmlrpc_dispatch arg:path_info arguments arg Assign Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_default_schedule_fn",
    "source_code": "def _default_schedule_fn(_: int) -> ProfilerAction:\n    return ProfilerAction.RECORD",
    "docstring": "Default profiler behavior - immediately starts recording the events, keeps doing it on every profiler step.",
    "type": "function",
    "file_path": "pytorch\\torch\\profiler\\profiler.py",
    "ast_data": "FunctionDef name:_default_schedule_fn arg:_ arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "Oracle_datetime",
    "source_code": "class Oracle_datetime(datetime.datetime):\n    input_size = Database.DB_TYPE_TIMESTAMP\n\n    @classmethod\n    def from_datetime(cls, dt):\n        return Oracle_datetime(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond)",
    "docstring": "A datetime object, with an additional class attribute to tell oracledb to save the microseconds too.",
    "type": "class",
    "file_path": "django\\django\\db\\backends\\oracle\\utils.py",
    "ast_data": "ClassDef name:Oracle_datetime Assign FunctionDef name:from_datetime arg:cls arg:dt arguments arg arg Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "create_token_credential",
    "source_code": "def create_token_credential(self, request):\n    raise NotImplementedError()",
    "docstring": "Create and save token credential into database. This method would be re-implemented like this:: def create_token_credential(self, request): oauth_token = generate_token(36) oauth_token_secret = generate_token(48) temporary_credential = request.credential token_credential = TokenCredential( oauth_token=oauth_token, oauth_token_secret=oauth_token_secret, client_id=temporary_credential.get_client_id(), user_id=temporary_credential.get_user_id(), ) # if the credential has a save method token_credential.save() return token_credential :param request: OAuth1Request instance :return: TokenCredential instance",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth1\\rfc5849\\authorization_server.py",
    "ast_data": "FunctionDef name:create_token_credential arg:self arg:request arguments arg arg Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "_build_request_for_signature",
    "source_code": "@classmethod\ndef _build_request_for_signature(cls, router, method):\n    mmr = MethodMetadataRequest(owner=cls.__name__, method=method)\n    if not hasattr(cls, method) or not inspect.isfunction(getattr(cls, method)):\n        return mmr\n    params = list(inspect.signature(getattr(cls, method)).parameters.items())[1:]\n    for pname, param in params:\n        if pname in {'X', 'y', 'Y', 'Xt', 'yt'}:\n            continue\n        if param.kind in {param.VAR_POSITIONAL, param.VAR_KEYWORD}:\n            continue\n        mmr.add_request(param=pname, alias=None)\n    return mmr",
    "docstring": "Build the for a method using its signature. This method takes all arguments from the method signature and uses `MethodMetadataRequest`. method : str The name of the method. Returns ------- method_request : MethodMetadataRequest The prepared request using the method's signature.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py",
    "ast_data": "FunctionDef name:_build_request_for_signature arg:cls arg:router arg:method arguments arg arg arg Assign Call If BoolOp Call Call Call Return return:yes Assign Call Call Call Call For If Compare If Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_SoftmaxGrad",
    "source_code": "@ops.RegisterGradient('Softmax')\ndef _SoftmaxGrad(op: ops.Operation, grad_softmax):\n    softmax = op.outputs[0]\n    sum_channels = math_ops.reduce_sum(grad_softmax * softmax, -1, keepdims=True)\n    return (grad_softmax - sum_channels) * softmax",
    "docstring": "The derivative of the softmax nonlinearity. We assume that probs is of shape [batch_size * dim] The formula for dsoftmax / dx = (diag(softmax) - softmax * softmax'). This matrix is diagonal minus a rank one matrix, so it is easy to implement as follows: grad_x = grad_softmax * softmax - sum(grad_softmax * softmax) * softmax Args: op: the Softmax op. grad_softmax: the tensor representing the gradient w.r.t. the softmax output. Returns: gradient w.r.t the input to the softmax",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_grad.py",
    "ast_data": "FunctionDef name:_SoftmaxGrad arg:op arg:grad_softmax arguments arg arg Assign Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "tolist",
    "source_code": "def tolist(self) -> list:\n    return self._values.tolist()",
    "docstring": "Return a list of the values. These are each a scalar type, which is a Python scalar (for str, int, float) or a pandas scalar (for Timestamp/Timedelta/Interval/Period) Returns ------- list List containing the values as Python or pandas scalers. See Also -------- numpy.ndarray.tolist : Return the array as an a.ndim-levels deep nested list of Python scalars. Examples -------- For Series >>> s = pd.Series([1, 2, 3]) >>> s.to_list() [1, 2, 3] For Index: >>> idx = pd.Index([1, 2, 3]) >>> idx Index([1, 2, 3], dtype='int64') >>> idx.to_list() [1, 2, 3]",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\base.py",
    "ast_data": "FunctionDef name:tolist arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_dot_graph",
    "source_code": "def get_dot_graph(self, submod_name=None) -> pydot.Dot:\n    if submod_name is None:\n        return self.get_main_dot_graph()\n    else:\n        return self.get_submod_dot_graph(submod_name)",
    "docstring": "Visualize a torch.fx.Graph with graphviz Example: >>> # xdoctest: +REQUIRES(module:pydot) >>> # xdoctest: +REQUIRES(module:ubelt) >>> # define module >>> class MyModule(torch.nn.Module): >>> def __init__(self) -> None: >>> super().__init__() >>> self.linear = torch.nn.Linear(4, 5) >>> def forward(self, x): >>> return self.linear(x).clamp(min=0.0, max=1.0) >>> module = MyModule() >>> # trace the module >>> symbolic_traced = torch.fx.symbolic_trace(module) >>> # setup output file >>> import ubelt as ub >>> dpath = ub.Path.appdir(\"torch/tests/FxGraphDrawer\").ensuredir() >>> fpath = dpath / \"linear.svg\" >>> # draw the graph >>> g = FxGraphDrawer(symbolic_traced, \"linear\") >>> g.get_dot_graph().write_svg(fpath)",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\passes\\graph_drawer.py",
    "ast_data": "FunctionDef name:get_dot_graph arg:self arg:submod_name arguments arg arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "Sepia",
    "source_code": "class Sepia(Module):\n\n    def __init__(self, rescale: bool=True, eps: float=1e-06) -> None:\n        self.rescale = rescale\n        self.eps = eps\n        super().__init__()\n\n    def __repr__(self) -> str:\n        return self.__class__.__name__ + f'(rescale={self.rescale}, eps={self.eps})'\n\n    def forward(self, input: Tensor) -> Tensor:\n        return sepia_from_rgb(input, rescale=self.rescale, eps=self.eps)",
    "docstring": "Module that apply the sepia filter to tensors. Args: input: the input tensor with shape of :math:. rescale: If True, the output tensor will be rescaled (max values be 1. or 255). eps: scalar to enforce numerical stability. Returns: Tensor: The sepia tensor of same size and numbers of channels as the input with shape :math:. Example: >>> >>> input = torch.ones(3, 1, 1) >>> Sepia(rescale=False)(input) tensor([[[1.3510]], [[1.2030]], [[0.9370]]])",
    "type": "class",
    "file_path": "kornia\\kornia\\color\\sepia.py",
    "ast_data": "ClassDef name:Sepia FunctionDef name:__init__ arg:self arg:rescale arg:eps arguments arg arg arg Assign Assign Call Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "prepare_default",
    "source_code": "def prepare_default(self, value):\n    raise NotImplementedError('subclasses of BaseDatabaseSchemaEditor for backends which have requires_literal_defaults must provide a prepare_default() method')",
    "docstring": "Only used for backends which have requires_literal_defaults feature",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\schema.py",
    "ast_data": "FunctionDef name:prepare_default arg:self arg:value arguments arg arg Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "_minibatch_step",
    "source_code": "def _minibatch_step(self, X, W, H, update_H):\n    batch_size = X.shape[0]\n    l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H = self._compute_regularization(X)\n    if self.fresh_restarts or W is None:\n        W = self._solve_W(X, H, self.fresh_restarts_max_iter)\n    else:\n        W, *_ = _multiplicative_update_w(X, W, H, self._beta_loss, l1_reg_W, l2_reg_W, self._gamma)\n    if self._beta_loss < 1:\n        W[W < np.finfo(np.float64).eps] = 0.0\n    batch_cost = (_beta_divergence(X, W, H, self._beta_loss) + l1_reg_W * W.sum() + l1_reg_H * H.sum() + l2_reg_W * (W ** 2).sum() + l2_reg_H * (H ** 2).sum()) / batch_size\n    if update_H:\n        H[:] = _multiplicative_update_h(X, W, H, beta_loss=self._beta_loss, l1_reg_H=l1_reg_H, l2_reg_H=l2_reg_H, gamma=self._gamma, A=self._components_numerator, B=self._components_denominator, rho=self._rho)\n        if self._beta_loss <= 1:\n            H[H < np.finfo(np.float64).eps] = 0.0\n    return batch_cost",
    "docstring": "Perform the update of W and H for one minibatch.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_nmf.py",
    "ast_data": "FunctionDef name:_minibatch_step arg:self arg:X arg:W arg:H arg:update_H arguments arg arg arg arg arg Assign Assign Call If BoolOp Compare Assign Call Assign Call If Compare Assign Compare Call Assign Call Call Call Call Call If Assign Call If Compare Assign Compare Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "n_classes_",
    "source_code": "@property\ndef n_classes_(self):\n    return len(self.classes_)",
    "docstring": "Number of classes.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\multiclass.py",
    "ast_data": "FunctionDef name:n_classes_ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_get_builtin_permissions",
    "source_code": "def _get_builtin_permissions(opts):\n    perms = []\n    for action in opts.default_permissions:\n        perms.append((get_permission_codename(action, opts), 'Can %s %s' % (action, opts.verbose_name_raw)))\n    return perms",
    "docstring": "Return (codename, name) for all autogenerated permissions. By default, this is ('add', 'change', 'delete', 'view')",
    "type": "function",
    "file_path": "django\\django\\contrib\\auth\\management\\__init__.py",
    "ast_data": "FunctionDef name:_get_builtin_permissions arg:opts arguments arg Assign For Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "dtype",
    "source_code": "def dtype(self) -> torch.dtype:\n    return _SCALAR_TYPE_TO_DTYPE[self]",
    "docstring": "Convert a JitScalarType to a torch dtype.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_type_utils.py",
    "ast_data": "FunctionDef name:dtype arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "inverse_transform",
    "source_code": "def inverse_transform(self, X):\n    xp, _ = get_namespace(X)\n    if self.whiten:\n        scaled_components = xp.sqrt(self.explained_variance_[:, np.newaxis]) * self.components_\n        return X @ scaled_components + self.mean_\n    else:\n        return X @ self.components_ + self.mean_",
    "docstring": "Transform data back to its original space. In other words, return an input whose transform would be X. Parameters ---------- X : array-like of shape (n_samples, n_components) New data, where is the number of samples and is the number of components. Returns ------- X_original : array-like of shape (n_samples, n_features) Original data, where is the number of samples and is the number of features. Notes ----- If whitening is enabled, inverse_transform will compute the exact inverse operation, which includes reversing whitening.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_base.py",
    "ast_data": "FunctionDef name:inverse_transform arg:self arg:X arguments arg arg Assign Call If Assign Call Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "integer_field_range",
    "source_code": "def integer_field_range(self, internal_type):\n    return self.integer_field_ranges[internal_type]",
    "docstring": "Given an integer field internal type (e.g. 'PositiveIntegerField'), return a tuple of the (min_value, max_value) form representing the range of the column type bound to the field.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:integer_field_range arg:self arg:internal_type arguments arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "calculate_mem_bytes_needed",
    "source_code": "def calculate_mem_bytes_needed(p1, p2):\n    nodes = p1.nodes.union(p2.nodes)\n    mem_bytes_needed = 0\n    for node in nodes:\n        mem_bytes_needed += get_extra_size_of(node, nodes)\n    return mem_bytes_needed",
    "docstring": "Given two partitions, calculate how many mem bytes are needed if two partitions are combined",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\accelerator_partitioner.py",
    "ast_data": "FunctionDef name:calculate_mem_bytes_needed arg:p1 arg:p2 arguments arg arg Assign Call Assign For Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_params_html_repr",
    "source_code": "def _params_html_repr(params):\n    HTML_TEMPLATE = '\\n        <div class=\"estimator-table\">\\n            <details>\\n                <summary>Parameters</summary>\\n                <table class=\"parameters-table\">\\n                  <tbody>\\n                    {rows}\\n                  </tbody>\\n                </table>\\n            </details>\\n        </div>\\n    '\n    ROW_TEMPLATE = '\\n        <tr class=\"{param_type}\">\\n            <td><i class=\"copy-paste-icon\"\\n                 onclick=\"copyToClipboard(\\'{param_name}\\',\\n                          this.parentElement.nextElementSibling)\"\\n            ></i></td>\\n            <td class=\"param\">{param_name}&nbsp;</td>\\n            <td class=\"value\">{param_value}</td>\\n        </tr>\\n    '\n    rows = [ROW_TEMPLATE.format(**_read_params(name, value, params.non_default)) for name, value in params.items()]\n    return HTML_TEMPLATE.format(rows='\\n'.join(rows))",
    "docstring": "Generate HTML representation of estimator parameters. Creates an HTML table with parameter names and values, wrapped in a collapsible details element. Parameters are styled differently based on whether they are default or user-set values.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_repr_html\\params.py",
    "ast_data": "FunctionDef name:_params_html_repr arg:params arguments arg Assign Assign Assign Call Call Call Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "strides",
    "source_code": "@property\ndef strides(self):\n    return self.strides_as(_getintp_ctype())",
    "docstring": "(c_intp*self.ndim): A ctypes array of length self.ndim where the basetype is the same as for the shape attribute. This ctypes array contains the strides information from the underlying array. This strides information is important for showing how many bytes must be jumped to get to the next element in the array.",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\_internal.py",
    "ast_data": "FunctionDef name:strides arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "CudagraphCachedInfo",
    "source_code": "@dataclasses.dataclass(frozen=True)\nclass CudagraphCachedInfo:\n    placeholders: Sequence[PlaceholderInfo]\n    stack_traces: list[Optional[str]]\n    cudagraph_fail_reasons: list[str]",
    "docstring": "Info needed to realign inputs",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\cudagraph_utils.py",
    "ast_data": "ClassDef name:CudagraphCachedInfo Call"
  },
  {
    "library": "tensorflow",
    "name": "_transform_feature",
    "source_code": "@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION)\ndef _transform_feature(self, inputs):\n    weight_tensor = inputs.get(self.weight_feature_key)\n    weight_tensor = self._transform_weight_tensor(weight_tensor)\n    return (inputs.get(self.categorical_column), weight_tensor)",
    "docstring": "Applies weights to tensor generated from '.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:_transform_feature arg:self arg:inputs arguments arg arg Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_fontsize",
    "source_code": "def set_fontsize(self, size):\n    for cell in self._cells.values():\n        cell.set_fontsize(size)\n    self.stale = True",
    "docstring": "Set the font size, in points, of the cell text. Parameters ---------- size : float Notes ----- As long as auto font size has not been disabled, the value will be clipped such that the text fits horizontally into the cell. You can disable this behavior using . >>> the_table.auto_set_font_size(False) >>> the_table.set_fontsize(20) However, there is no automatic scaling of the row height so that the text may exceed the cell boundary.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\table.py",
    "ast_data": "FunctionDef name:set_fontsize arg:self arg:size arguments arg arg For Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "convert",
    "source_code": "@_export_metrics\ndef convert(self):\n    return super(TFLiteSavedModelConverter, self).convert()",
    "docstring": "Converts a TensorFlow GraphDef based on instance variables. Note that in the converted TensorFlow Lite model, the input tensor's order might be changed each time is called. To access input tensor information, please consider using the API (). Returns: The converted data in serialized format, either a TFLite Flatbuffer or a Graphviz graph depending on value in . Raises: ValueError: Input shape is not specified. None value for dimension in input_tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "FunctionDef name:convert arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "CompileEventLogLevel",
    "source_code": "class CompileEventLogLevel(enum.Enum):\n    CHROMIUM = 1\n    PT2_COMPILE = 2\n    COMPILATION_METRIC = 3",
    "docstring": "Enum that loosely corresponds with a \"log level\" of a given event. CHROMIUM_EVENT: Logs only to tlparse. COMPILE_EVENT: Logs to tlparse + PT2 Compile Events COMPILATION_METRIC: Logs to tlparse, PT2 Compile Events, and dynamo_compile",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\utils.py",
    "ast_data": "ClassDef name:CompileEventLogLevel Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "read_value_no_copy",
    "source_code": "def read_value_no_copy(self):\n    with ops.name_scope('Read'):\n        value = self._read_variable_op(no_copy=True)\n    return array_ops.identity(value)",
    "docstring": "Constructs an op which reads the value of this variable without copy. The variable is read without making a copy even when it has been sparsely accessed. Variables in copy-on-read mode will be converted to copy-on-write mode. Returns: The value of the variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:read_value_no_copy arg:self arguments arg With Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "read_up_to",
    "source_code": "def read_up_to(self, queue, num_records, name=None):\n    if isinstance(queue, tensor_lib.Tensor):\n        queue_ref = queue\n    else:\n        queue_ref = queue.queue_ref\n    if self._reader_ref.dtype == dtypes.resource:\n        return gen_io_ops.reader_read_up_to_v2(self._reader_ref, queue_ref, num_records, name=name)\n    else:\n        old_queue_op = gen_data_flow_ops.fake_queue(queue_ref)\n        return gen_io_ops.reader_read_up_to(self._reader_ref, old_queue_op, num_records, name=name)",
    "docstring": "Returns up to num_records (key, value) pairs produced by a reader. Will dequeue a work unit from queue if necessary (e.g., when the Reader needs to start reading from a new file since it has finished with the previous file). It may return less than num_records even before the last batch. Args: queue: A Queue or a mutable string Tensor representing a handle to a Queue, with string work items. num_records: Number of records to read. name: A name for the operation (optional). Returns: A tuple of Tensors (keys, values). keys: A 1-D string Tensor. values: A 1-D string Tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\io_ops.py",
    "ast_data": "FunctionDef name:read_up_to arg:self arg:queue arg:num_records arg:name arguments arg arg arg arg If Call Assign Assign If Compare Return return:yes Call Assign Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "FaceKeypoint",
    "source_code": "class FaceKeypoint(Enum):\n    EYE_LEFT = 0\n    EYE_RIGHT = 1\n    NOSE = 2\n    MOUTH_LEFT = 3\n    MOUTH_RIGHT = 4",
    "docstring": "Define the keypoints detected in a face. The left/right convention is based on the screen viewer.",
    "type": "class",
    "file_path": "kornia\\kornia\\contrib\\face_detection.py",
    "ast_data": "ClassDef name:FaceKeypoint Assign Assign Assign Assign Assign"
  },
  {
    "library": "scikit-learn",
    "name": "bounds",
    "source_code": "@property\ndef bounds(self):\n    return np.vstack([kernel.bounds for kernel in self.kernels])",
    "docstring": "Returns the log-transformed bounds on the theta. Returns ------- bounds : array of shape (n_dims, 2) The log-transformed bounds on the kernel's hyperparameters theta",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:bounds arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_read_only_resource_input_indices_graph",
    "source_code": "def get_read_only_resource_input_indices_graph(func_graph):\n    result = []\n    op_read_only_resource_inputs = {}\n    for input_index, t in enumerate(func_graph.inputs):\n        if t.dtype != dtypes.resource:\n            continue\n        read_only = True\n        for op in t.consumers():\n            if op in op_read_only_resource_inputs:\n                if t not in op_read_only_resource_inputs[op]:\n                    read_only = False\n                    break\n            else:\n                indices = _get_read_only_resource_input_indices_op(op)\n                op_read_only_resource_inputs[op] = object_identity.ObjectIdentitySet([op.inputs[i] for i in indices])\n                if t not in op_read_only_resource_inputs[op]:\n                    read_only = False\n                    break\n        if read_only:\n            result.append(input_index)\n    return result",
    "docstring": "Returns sorted list of read-only resource indices in func_graph.inputs.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\auto_control_deps_utils.py",
    "ast_data": "FunctionDef name:get_read_only_resource_input_indices_graph arg:func_graph arguments arg Assign Assign For Call If Compare Assign For Call If Compare If Compare Assign Assign Call Assign Call If Compare Assign If Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "complex_float",
    "source_code": "def complex_float(self):\n    return self._to(torch.cfloat)",
    "docstring": "Casts this storage to complex float type.",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:complex_float arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, fallback: Callable[[], Choice], choices: list[Choice], feedback: Optional[LocalFeedback], context: AHContext, name: str, augment_context: Optional[list[AHOperation]]=None, precondition: Optional[Callable[[AHMetadata, AHContext], bool]]=None) -> None:\n    self.fallback = fallback\n    self.choices = choices\n    self.feedback = feedback\n    self.context = context\n    self.name = name\n    self.collected_feedback = {}\n    self.augment_context = augment_context\n    self.metadata = AHMetadata(get_gpu_shared_memory(), torch.cuda.get_device_capability(), self.choices, self.name)\n    self.precondition = precondition\n    if not self.satisfies_precondition():\n        return\n    if torch._inductor.config.autoheuristic_log_path == 'DEFAULT':\n        self.log_path = self.get_default_log_path()\n    else:\n        self.log_path = torch._inductor.config.autoheuristic_log_path\n    if torch._inductor.config.collect_autoheuristic(self.name):\n        if self.feedback is not None:\n            for choice in self.choices:\n                feedback_val = self.feedback(choice)\n                self.save_data(choice, feedback_val)",
    "docstring": "Initializes an instance of the AutoHeuristic class. Args: fallback: A callable that returns a Choice when the heuristic is unsure which choice to make, or AutoHeuristic is in data collection mode. choices: A list of possible choices the heuristic can make. feedback: An instance of LocalFeedback that provides feedback for a given choice. context: Context to store with each choice and feedback. name: A string that identifies the heuristic. augment_context: An optional list of AHOperation instances that augment the context. precondition: A callable that returns a boolean indicating whether AutoHeuristic should run.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\autoheuristic\\autoheuristic.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:fallback arg:choices arg:feedback arg:context arg:name arg:augment_context arg:precondition arguments arg arg arg arg arg arg arg arg Assign Assign Assign Assign Assign Assign Assign Assign Call Call Call Assign If Call Return return:no If Compare Assign Call Assign If Call If Compare For Assign Call Call"
  },
  {
    "library": "sphinx",
    "name": "build_epub",
    "source_code": "def build_epub(self) -> None:\n    outname = self.config.epub_basename + '.epub'\n    logger.info(__('writing %s file...'), outname)\n    epub_filename = self.outdir / outname\n    with ZipFile(epub_filename, 'w', ZIP_DEFLATED) as epub:\n        epub.write(self.outdir / 'mimetype', 'mimetype', ZIP_STORED)\n        for filename in ('META-INF/container.xml', 'content.opf', 'toc.ncx'):\n            epub.write(self.outdir / filename, filename, ZIP_DEFLATED)\n        for filename in self.files:\n            epub.write(self.outdir / filename, filename, ZIP_DEFLATED)",
    "docstring": "Write the epub file. It is a zip file with the mimetype file stored uncompressed as the first entry.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\_epub_base.py",
    "ast_data": "FunctionDef name:build_epub arg:self arguments arg Assign Call Call Assign With Call Call For Call For Call"
  },
  {
    "library": "tensorflow",
    "name": "_check_enqueue_dtypes",
    "source_code": "def _check_enqueue_dtypes(self, vals):\n    if isinstance(vals, dict):\n        if not self._names:\n            raise ValueError('Queue must have names to enqueue a dictionary')\n        if sorted(self._names, key=str) != sorted(vals.keys(), key=str):\n            raise ValueError(f'Keys in dictionary to enqueue do not match names of Queue.  Dictionary: {sorted(vals.keys())},Queue: {sorted(self._names)}')\n        vals = [vals[k] for k in self._names]\n    else:\n        if self._names:\n            raise ValueError('You must enqueue a dictionary in a Queue with names')\n        if not isinstance(vals, (list, tuple)):\n            vals = [vals]\n    tensors = []\n    for i, (val, dtype) in enumerate(zip(vals, self._dtypes)):\n        tensors.append(ops.convert_to_tensor(val, dtype=dtype, name='component_%d' % i))\n    return tensors",
    "docstring": "Validate and convert to a list of s. The argument can be a Tensor, a list or tuple of tensors, or a dictionary with tensor values. If it is a dictionary, the queue must have been constructed with a attribute and the dictionary keys must match the queue names. If the queue was constructed with a attribute, must be a dictionary. Args: vals: A tensor, a list or tuple of tensors, or a dictionary.. Returns: A list of objects. Raises: ValueError: If is invalid.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:_check_enqueue_dtypes arg:self arg:vals arguments arg arg If Call If Raise Call If Compare Call Call Call Raise Call Call Call Call Assign If Raise Call If Call Assign Assign For Call Call Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X, copy=None):\n    copy = copy if copy is not None else self.copy\n    X = validate_data(self, X, accept_sparse=['csr', 'csc'], force_writeable=True, copy=copy, reset=False)\n    return binarize(X, threshold=self.threshold, copy=False)",
    "docstring": "Binarize each element of X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data to binarize, element by element. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. copy : bool Copy the input X or not. Returns ------- X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) Transformed array.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arg:copy arguments arg arg arg Assign Compare Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "Worker",
    "source_code": "class Worker:\n\n    @staticmethod\n    def set_device(device: int):\n        raise NotImplementedError\n\n    @staticmethod\n    def current_device() -> int:\n        raise NotImplementedError\n\n    @staticmethod\n    def get_device_properties(device: torch.types.Device=None):\n        raise NotImplementedError",
    "docstring": "Worker API to query device properties that will work in multi processing workers that cannot use the GPU APIs (due to processing fork() and initialization time issues). Properties are recorded in the main process before we fork the workers.",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\device_interface.py",
    "ast_data": "ClassDef name:Worker FunctionDef name:set_device arg:device arguments arg Raise FunctionDef name:current_device arguments Raise FunctionDef name:get_device_properties arg:device arguments arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "get_distrib_version",
    "source_code": "def get_distrib_version():\n    key = 'distrib_ver'\n    out, err = run_shell_cmd(cmds_all[PLATFORM][key])\n    if err and FLAGS.debug:\n        print('Error in detecting distribution version:\\n %s' % str(err))\n    return out.strip(b'\\n')",
    "docstring": "Retrieves distribution version of the operating system. Returns: String that is the distribution version. e.g. '14.04'",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\tensorflow_builder\\config_detector\\config_detector.py",
    "ast_data": "FunctionDef name:get_distrib_version arguments Assign Assign Call If BoolOp Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "KerasModeCombination",
    "source_code": "class KerasModeCombination(test_combinations.TestCombination):\n\n    def context_managers(self, kwargs):\n        run_eagerly = kwargs.pop('run_eagerly', None)\n        if run_eagerly is not None:\n            return [testing_utils.run_eagerly_scope(run_eagerly)]\n        else:\n            return []\n\n    def parameter_modifiers(self):\n        return [test_combinations.OptionalParameter('run_eagerly')]",
    "docstring": "Combination for Keras test mode. It by default includes v1_session, v2_eager and v2_tf_function.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\combinations.py",
    "ast_data": "ClassDef name:KerasModeCombination FunctionDef name:context_managers arg:self arg:kwargs arguments arg arg Assign Call If Compare Return return:yes Call Return return:no FunctionDef name:parameter_modifiers arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "LineLocation",
    "source_code": "class LineLocation(collections.namedtuple('LineLocation', ('filename', 'lineno'))):\n    pass",
    "docstring": "Similar to Location, but without column information. Attributes: filename: Text lineno: int, 1-based",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\origin_info.py",
    "ast_data": "ClassDef name:LineLocation Call"
  },
  {
    "library": "tensorflow",
    "name": "make_raw_scatter_xxx_fn",
    "source_code": "def make_raw_scatter_xxx_fn(raw_scatter_xxx_fn):\n\n    def scatter_xxx_fn(var, sparse_delta, use_locking=False, name=None):\n        del use_locking\n        handle = var.handle\n        with _maybe_enter_graph(handle), _maybe_on_device(var):\n            op = raw_scatter_xxx_fn(handle, sparse_delta.indices, ops.convert_to_tensor(sparse_delta.values, var.dtype), name=name)\n            with ops.control_dependencies([op]):\n                return var._read_variable_op()\n    return scatter_xxx_fn",
    "docstring": "Wrap so that it can be called w/ and w/o packed handle.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_util.py",
    "ast_data": "FunctionDef name:make_raw_scatter_xxx_fn arg:raw_scatter_xxx_fn arguments arg FunctionDef name:scatter_xxx_fn arg:var arg:sparse_delta arg:use_locking arg:name arguments arg arg arg arg Assign With Call Call Assign Call Call With Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "objects_ids_and_slot_variables_and_paths",
    "source_code": "def objects_ids_and_slot_variables_and_paths(graph_view, skip_slot_variables=False):\n    trackable_objects, node_paths = graph_view.breadth_first_traversal()\n    object_names = object_identity.ObjectIdentityDictionary()\n    for obj, path in node_paths.items():\n        object_names[obj] = trackable_utils.object_path_to_string(path)\n    node_ids = object_identity.ObjectIdentityDictionary()\n    for node_id, node in enumerate(trackable_objects):\n        node_ids[node] = node_id\n    if skip_slot_variables:\n        slot_variables = object_identity.ObjectIdentityDictionary()\n    else:\n        slot_variables = serialize_slot_variables(trackable_objects=trackable_objects, node_ids=node_ids, object_names=object_names)\n    return (trackable_objects, node_paths, node_ids, slot_variables, object_names)",
    "docstring": "Traverse the object graph and list all accessible objects. Looks for objects which are dependencies of . Includes slot variables only if the variable they are slotting for and the optimizer are dependencies of (i.e. if they would be saved with a checkpoint). Args: graph_view: A GraphView object. skip_slot_variables: If True does not return trackables for slot variable. Default False. Returns: A tuple of (trackable objects, paths from root for each object, object -> node id, slot variables, object_names)",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\util.py",
    "ast_data": "FunctionDef name:objects_ids_and_slot_variables_and_paths arg:graph_view arg:skip_slot_variables arguments arg arg Assign Call Assign Call For Call Assign Call Assign Call For Call Assign If Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "max_pooling1d",
    "source_code": "def max_pooling1d(inputs, pool_size, strides, padding='valid', data_format='channels_last', name=None):\n    warnings.warn('`tf.layers.max_pooling1d` is deprecated and will be removed in a future version. Please use `tf.keras.layers.MaxPooling1D` instead.')\n    layer = MaxPooling1D(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name)\n    return layer.apply(inputs)",
    "docstring": "Max Pooling layer for 1D inputs. Args: inputs: The tensor over which to pool. Must have rank 3. pool_size: An integer or tuple/list of a single integer, representing the size of the pooling window. strides: An integer or tuple/list of a single integer, specifying the strides of the pooling operation. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string, one of (default) or . The ordering of the dimensions in the inputs. corresponds to inputs with shape while corresponds to inputs with shape . name: A string, the name of the layer. Returns: The output tensor, of rank 3. Raises: ValueError: if eager execution is enabled.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\legacy_tf_layers\\pooling.py",
    "ast_data": "FunctionDef name:max_pooling1d arg:inputs arg:pool_size arg:strides arg:padding arg:data_format arg:name arguments arg arg arg arg arg arg Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "ReuseInputObserver",
    "source_code": "class ReuseInputObserver(ObserverBase):\n\n    def __init__(self) -> None:\n        super().__init__(torch.quint8, is_dynamic=False)\n\n    def forward(self, x):\n        return x\n\n    @torch.jit.export\n    def calculate_qparams(self):\n        raise Exception('calculate_qparams should not be called for ReuseInputObserver')",
    "docstring": "This observer is used when we want to reuse the observer from the operator that produces the input Tensor, typically used for operators like reshape, e.g. if we configure x0 to be observed by some observer, let's say MinMaxObserver, and reshape is configured with ReuseInputObserver, we'll reuse the observer instance for x0 for x1 (output of reshape). If x0 is not observed, we also won't observe x1. Note: this is only enabled in FX Graph Mode Quantization",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\quantization\\observer.py",
    "ast_data": "ClassDef name:ReuseInputObserver FunctionDef name:__init__ arg:self arguments arg Call Call FunctionDef name:forward arg:self arg:x arguments arg arg Return return:yes FunctionDef name:calculate_qparams arg:self arguments arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "GetSessionToolData",
    "source_code": "def GetSessionToolData(self, request, context):\n    context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n    context.set_details('Method not implemented!')\n    raise NotImplementedError('Method not implemented!')",
    "docstring": "Retrieve specific tool's data for specific session.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\profiler\\profiler_analysis_pb2_grpc.py",
    "ast_data": "FunctionDef name:GetSessionToolData arg:self arg:request arg:context arguments arg arg arg Call Call Raise Call"
  },
  {
    "library": "scipy",
    "name": "ObjectPointer",
    "source_code": "class ObjectPointer(Pointer):\n    pass",
    "docstring": "Class used to define object pointers",
    "type": "class",
    "file_path": "scipy\\scipy\\io\\_idl.py",
    "ast_data": "ClassDef name:ObjectPointer"
  },
  {
    "library": "pytorch",
    "name": "is_user_opted_out",
    "source_code": "def is_user_opted_out(user: str, user_optins: UserOptins, experiment_name: str) -> bool:\n    experiment_optout = '-' + experiment_name\n    if experiment_optout not in user_optins.get(user, []):\n        return False\n    if is_user_opted_in(user, user_optins, experiment_name):\n        log.warning(f'User {user} is opted into experiment {experiment_name}, but also opted out of it. Defaulting to opting out')\n    return True",
    "docstring": "Check if a user explicitly opted out of an experiment",
    "type": "function",
    "file_path": "pytorch\\.github\\scripts\\runner_determinator.py",
    "ast_data": "FunctionDef name:is_user_opted_out arg:user arg:user_optins arg:experiment_name arguments arg arg arg Assign If Compare Call Return return:yes If Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_RealGrad",
    "source_code": "@ops.RegisterGradient('Real')\ndef _RealGrad(_, grad):\n    zero = constant_op.constant(0, dtype=grad.dtype)\n    return math_ops.complex(grad, zero)",
    "docstring": "Returns 'grad' as the real part and set the imaginary part 0.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_RealGrad arg:_ arg:grad arguments arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "build",
    "source_code": "def build(self):\n    super().build()\n    self._maybe_delete_sc_layouts_from_checkpoint()",
    "docstring": "Create variables and slots variables for TPU embeddings.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_for_serving.py",
    "ast_data": "FunctionDef name:build arg:self arguments arg Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_equal_flops",
    "source_code": "@ops.RegisterStatistics('Equal', 'flops')\ndef _equal_flops(graph, node):\n    return _binary_per_element_op_flops(graph, node)",
    "docstring": "Compute flops for Equal operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py",
    "ast_data": "FunctionDef name:_equal_flops arg:graph arg:node arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "ListOptionAction",
    "source_code": "class ListOptionAction(argparse.Action):\n\n    def __call__(self, parser, namespace, value, option_string=None):\n        if value.lower() == 'true':\n            setattr(namespace, self.dest, True)\n        else:\n            setattr(namespace, self.dest, value.split(','))",
    "docstring": "Custom argparse action for keywords that require a string list. If the string is 'True'/'true' then the option value will be a boolean instead.",
    "type": "class",
    "file_path": "django\\django\\contrib\\gis\\management\\commands\\ogrinspect.py",
    "ast_data": "ClassDef name:ListOptionAction FunctionDef name:__call__ arg:self arg:parser arg:namespace arg:value arg:option_string arguments arg arg arg arg arg If Compare Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "multinomial_categorical_impl",
    "source_code": "def multinomial_categorical_impl(logits, num_samples, dtype, seed):\n    logits = ops.convert_to_tensor(logits, name='logits')\n    dtype = dtypes.as_dtype(dtype) if dtype else dtypes.int64\n    accepted_dtypes = (dtypes.int32, dtypes.int64)\n    if dtype not in accepted_dtypes:\n        raise ValueError(f'Argument `dtype` got invalid value {dtype}. Accepted dtypes are {accepted_dtypes}.')\n    seed1, seed2 = random_seed.get_seed(seed)\n    return gen_random_ops.multinomial(logits, num_samples, seed=seed1, seed2=seed2, output_dtype=dtype)",
    "docstring": "Implementation for random.categorical (v1) and random.categorical (v2).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\random_ops.py",
    "ast_data": "FunctionDef name:multinomial_categorical_impl arg:logits arg:num_samples arg:dtype arg:seed arguments arg arg arg arg Assign Call Assign Call Assign If Compare Raise Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_FixedLengthRecordDataset",
    "source_code": "class _FixedLengthRecordDataset(dataset_ops.DatasetSource):\n\n    def __init__(self, filenames, record_bytes, header_bytes=None, footer_bytes=None, buffer_size=None, compression_type=None, name=None):\n        self._filenames = filenames\n        self._record_bytes = ops.convert_to_tensor(record_bytes, dtype=dtypes.int64, name='record_bytes')\n        self._header_bytes = convert.optional_param_to_tensor('header_bytes', header_bytes)\n        self._footer_bytes = convert.optional_param_to_tensor('footer_bytes', footer_bytes)\n        self._buffer_size = convert.optional_param_to_tensor('buffer_size', buffer_size, _DEFAULT_READER_BUFFER_SIZE_BYTES)\n        self._compression_type = convert.optional_param_to_tensor('compression_type', compression_type, argument_default='', argument_dtype=dtypes.string)\n        self._name = name\n        variant_tensor = gen_dataset_ops.fixed_length_record_dataset_v2(self._filenames, self._header_bytes, self._record_bytes, self._footer_bytes, self._buffer_size, self._compression_type, metadata=self._metadata.SerializeToString())\n        super(_FixedLengthRecordDataset, self).__init__(variant_tensor)\n\n    @property\n    def element_spec(self):\n        return tensor_spec.TensorSpec([], dtypes.string)",
    "docstring": "A of fixed-length records from one or more binary files.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\readers.py",
    "ast_data": "ClassDef name:_FixedLengthRecordDataset FunctionDef name:__init__ arg:self arg:filenames arg:record_bytes arg:header_bytes arg:footer_bytes arg:buffer_size arg:compression_type arg:name arguments arg arg arg arg arg arg arg arg Assign Assign Call Assign Call Assign Call Assign Call Assign Call Assign Assign Call Call Call Call FunctionDef name:element_spec arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "modify_insert_params",
    "source_code": "def modify_insert_params(self, placeholder, params):\n    return params",
    "docstring": "Allow modification of insert parameters. Needed for Oracle Spatial backend due to #10888.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:modify_insert_params arg:self arg:placeholder arg:params arguments arg arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_gemm_function_call",
    "source_code": "def get_gemm_function_call(self, kernel: CppTemplateKernel, function_name: str, placeholder: str, b_index: str) -> str:\n\n    def hook():\n        arg_defs, call_args, _, _ = kernel.args.python_argdefs()\n        for i, buf in enumerate(call_args):\n            if buf == self.b_index:\n                arg_defs[i] = ArgName(b_index)\n        call = f'{function_name}({', '.join((x.full_name() for x in arg_defs))});'\n        return call\n    assert placeholder not in kernel.render_hooks\n    kernel.render_hooks[placeholder] = hook\n    return placeholder",
    "docstring": "Similar to 'def_kernel' in cpp_template_kernel, but instead of generating a function definition, generate a function call for the GEMM kernel. Args: placeholder: The string to replace the function call with b_index: The index for slicing the 3D batch tensors",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp_bmm_template.py",
    "ast_data": "FunctionDef name:get_gemm_function_call arg:self arg:kernel arg:function_name arg:placeholder arg:b_index arguments arg arg arg arg arg FunctionDef name:hook arguments Assign Call For Call If Compare Assign Call Assign Call Call Return return:yes Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "remove_function",
    "source_code": "def remove_function(self, name):\n    self.ensure_initialized()\n    pywrap_tfe.TFE_ContextRemoveFunction(self._handle, name)",
    "docstring": "Remove a function from the context. Once removed, the function cannot be executed anymore. Args: name: function signature name.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:remove_function arg:self arg:name arguments arg arg Call Call"
  },
  {
    "library": "kornia",
    "name": "compute_inverse_transformation",
    "source_code": "def compute_inverse_transformation(self, transform: Tensor) -> Tensor:\n    return _torch_inverse_cast(transform)",
    "docstring": "Compute the inverse transform of given transformation matrices.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\_2d\\geometric\\base.py",
    "ast_data": "FunctionDef name:compute_inverse_transformation arg:self arg:transform arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_pid_namespace_link",
    "source_code": "def _pid_namespace_link(pid: Optional[int]=None) -> str:\n    PID_NAMESPACE_PATH = '/proc/{}/ns/pid'\n    pid = pid or os.getpid()\n    return os.readlink(PID_NAMESPACE_PATH.format(pid))",
    "docstring": "Returns the link to the process's namespace, example: pid:[4026531836]",
    "type": "function",
    "file_path": "pytorch\\torch\\_strobelight\\cli_function_profiler.py",
    "ast_data": "FunctionDef name:_pid_namespace_link arg:pid arguments arg Assign Assign BoolOp Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "diag",
    "source_code": "def diag(self, X):\n    return self.kernel.diag(X) ** self.exponent",
    "docstring": "Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters ---------- X : array-like of shape (n_samples_X, n_features) or list of object Argument to the kernel. Returns ------- K_diag : ndarray of shape (n_samples_X,) Diagonal of kernel k(X, X)",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:diag arg:self arg:X arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "StepStatsAnalysis",
    "source_code": "class StepStatsAnalysis(collections.namedtuple('StepStatsAnalysis', ('chrome_trace', 'allocator_maximums'))):\n    pass",
    "docstring": "Stores the step stats analysis output. Parameters: chrome_trace: A dict containing the chrome trace analysis. allocator_maximums: A dict mapping allocator names to AllocationMaximum.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py",
    "ast_data": "ClassDef name:StepStatsAnalysis Call"
  },
  {
    "library": "pandas",
    "name": "_get_cell_string_value",
    "source_code": "def _get_cell_string_value(self, cell) -> str:\n    from odf.element import Element\n    from odf.namespaces import TEXTNS\n    from odf.office import Annotation\n    from odf.text import S\n    office_annotation = Annotation().qname\n    text_s = S().qname\n    value = []\n    for fragment in cell.childNodes:\n        if isinstance(fragment, Element):\n            if fragment.qname == text_s:\n                spaces = int(fragment.attributes.get((TEXTNS, 'c'), 1))\n                value.append(' ' * spaces)\n            elif fragment.qname == office_annotation:\n                continue\n            else:\n                value.append(self._get_cell_string_value(fragment))\n        else:\n            value.append(str(fragment).strip('\\n'))\n    return ''.join(value)",
    "docstring": "Find and decode OpenDocument text:s tags that represent a run length encoded sequence of space characters.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\excel\\_odfreader.py",
    "ast_data": "FunctionDef name:_get_cell_string_value arg:self arg:cell arguments arg arg Assign Call Assign Call Assign For If Call If Compare Assign Call Call Call If Compare Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "to_frame",
    "source_code": "def to_frame(self, index: bool=True, name=lib.no_default, allow_duplicates: bool=False) -> DataFrame:\n    from pandas import DataFrame\n    if name is not lib.no_default:\n        if not is_list_like(name):\n            raise TypeError(\"'name' must be a list / sequence of column names.\")\n        if len(name) != len(self.levels):\n            raise ValueError(\"'name' should have same length as number of levels on index.\")\n        idx_names = name\n    else:\n        idx_names = self._get_level_names()\n    if not allow_duplicates and len(set(idx_names)) != len(idx_names):\n        raise ValueError('Cannot create duplicate column labels if allow_duplicates is False')\n    result = DataFrame({level: self._get_level_values(level) for level in range(len(self.levels))}, copy=False)\n    result.columns = idx_names\n    if index:\n        result.index = self\n    return result",
    "docstring": "Create a DataFrame with the levels of the MultiIndex as columns. Column ordering is determined by the DataFrame constructor with data as a dict. Parameters ---------- index : bool, default True Set the index of the returned DataFrame as the original MultiIndex. name : list / sequence of str, optional The passed names should substitute index level names. allow_duplicates : bool, optional default False Allow duplicate column labels to be created. .. versionadded:: 1.5.0 Returns ------- DataFrame DataFrame representation of the MultiIndex, with levels as columns. See Also -------- DataFrame : Two-dimensional, size-mutable, potentially heterogeneous tabular data. Examples -------- >>> mi = pd.MultiIndex.from_arrays([[\"a\", \"b\"], [\"c\", \"d\"]]) >>> mi MultiIndex([('a', 'c'), ('b', 'd')], ) >>> df = mi.to_frame() >>> df 0 1 a c a c b d b d >>> df = mi.to_frame(index=False) >>> df 0 1 0 a c 1 b d >>> df = mi.to_frame(name=[\"x\", \"y\"]) >>> df x y a c a c b d b d",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "FunctionDef name:to_frame arg:self arg:index arg:name arg:allow_duplicates arguments arg arg arg arg If Compare If Call Raise Call If Compare Call Call Raise Call Assign Assign Call If BoolOp Compare Call Call Call Raise Call Assign Call Call Call Call Assign If Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "__eq__",
    "source_code": "def __eq__(self, other: object) -> bool:\n    if isinstance(other, str):\n        return other == self.name\n    elif other is self:\n        return True\n    elif not (hasattr(other, 'ordered') and hasattr(other, 'categories')):\n        return False\n    elif self.categories is None or other.categories is None:\n        return self.categories is other.categories\n    elif self.ordered or other.ordered:\n        return self.ordered == other.ordered and self.categories.equals(other.categories)\n    else:\n        left = self.categories\n        right = other.categories\n        if not left.dtype == right.dtype:\n            return False\n        if len(left) != len(right):\n            return False\n        if self.categories.equals(other.categories):\n            return True\n        if left.dtype != object:\n            indexer = left.get_indexer(right)\n            return bool((indexer != -1).all())\n        return set(left) == set(right)",
    "docstring": "Rules for CDT equality: 1) Any CDT is equal to the string 'category' 2) Any CDT is equal to itself 3) Any CDT is equal to a CDT with categories=None regardless of ordered 4) A CDT with ordered=True is only equal to another CDT with ordered=True and identical categories in the same order 5) A CDT with ordered={False, None} is only equal to another CDT with ordered={False, None} and identical categories, but same order is not required. There is no distinction between False/None. 6) Any other comparison returns False",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py",
    "ast_data": "FunctionDef name:__eq__ arg:self arg:other arguments arg arg If Call Return return:yes Compare If Compare Return return:yes If BoolOp Call Call Return return:yes If BoolOp Compare Compare Return return:yes Compare If BoolOp Return return:yes BoolOp Compare Call Assign Assign If Compare Return return:yes If Compare Call Call Return return:yes If Call Return return:yes If Compare Assign Call Return return:yes Call Call Compare Return return:yes Compare Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_find_executable_or_die",
    "source_code": "def _find_executable_or_die(executable_name: str, executable_path: Optional[str]=None) -> str:\n    if executable_path:\n        return str(pathlib.Path(executable_path).resolve(strict=True))\n    resolved_path_to_exe = _find_executable(executable_name)\n    if resolved_path_to_exe is None:\n        raise RuntimeError(f'Could not find executable `{executable_name}`! Please change your $PATH or pass the path directly like`--{executable_name}_path=path/to/executable.')\n    logging.info('Found path to %s at %s', executable_name, resolved_path_to_exe)\n    return resolved_path_to_exe",
    "docstring": "Finds executable and resolves symlinks or raises RuntimeError. Resolving symlinks is sometimes necessary for finding system headers. Args: executable_name: The name of the executable that we want to find. executable_path: If not None, the path to the executable. Returns: The path to the executable we are looking for, after symlinks are resolved. Raises: RuntimeError: if path to the executable cannot be found.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\build_tools\\configure\\configure.py",
    "ast_data": "FunctionDef name:_find_executable_or_die arg:executable_name arg:executable_path arguments arg arg If Return return:yes Call Call Call Assign Call If Compare Raise Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "normalize",
    "source_code": "def normalize(self) -> 'MemoryDep':\n    return MemoryDep(self.name, *_RecordLoadStoreInner._normalize(self.index, self.ranges), self.mode)",
    "docstring": "Normalize by merging loops. The different to normalize_with_stride_order is, this method does not reorder loops while normalize_with_stride_order reorder loops based on stride order.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\dependencies.py",
    "ast_data": "FunctionDef name:normalize arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "cauchy_loss",
    "source_code": "def cauchy_loss(img1: Tensor, img2: Tensor, reduction: str='none') -> Tensor:\n    KORNIA_CHECK_IS_TENSOR(img1)\n    KORNIA_CHECK_IS_TENSOR(img2)\n    KORNIA_CHECK_SAME_SHAPE(img1, img2)\n    KORNIA_CHECK_SAME_DEVICE(img1, img2)\n    KORNIA_CHECK(reduction in ('mean', 'sum', 'none', None), f'Given type of reduction is not supported. Got: {reduction}')\n    loss = (0.5 * (img1 - img2) ** 2 + 1.0).log()\n    if reduction == 'mean':\n        loss = loss.mean()\n    elif reduction == 'sum':\n        loss = loss.sum()\n    elif reduction == 'none' or reduction is None:\n        pass\n    else:\n        raise NotImplementedError('Invalid reduction option.')\n    return loss",
    "docstring": "Criterion that computes the Cauchy [2] (aka. Lorentzian) loss. According to [1], we compute the Cauchy loss as follows: .. math:: \\text{WL}(x, y) = log(\\frac{1}{2} (x - y)^{2} + 1) Where: - :math: is the prediction. - :math: is the target to be regressed to. Reference: [1] [2] Args: img1: the predicted tensor with shape :math:. img2: the target tensor with the same shape as img1. reduction: Specifies the reduction to apply to the output: ``: the output will be summed. Return: a scalar with the computed loss. Example: >>> img1 = torch.randn(2, 3, 32, 32, requires_grad=True) >>> img2 = torch.randn(2, 3, 32, 32) >>> output = cauchy_loss(img1, img2, reduction=\"mean\") >>> output.backward()",
    "type": "function",
    "file_path": "kornia\\kornia\\losses\\cauchy.py",
    "ast_data": "FunctionDef name:cauchy_loss arg:img1 arg:img2 arg:reduction arguments arg arg arg Call Call Call Call Call Compare Assign Call If Compare Assign Call If Compare Assign Call If BoolOp Compare Compare Raise Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X):\n    probas = self.predict_proba(X)\n    return self.classes_[np.argmax(probas, axis=1)].ravel()",
    "docstring": "Perform inductive inference across the model. Parameters ---------- X : array-like of shape (n_samples, n_features) The data matrix. Returns ------- y : ndarray of shape (n_samples,) Predictions for input data.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\semi_supervised\\_label_propagation.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "references_same_values",
    "source_code": "def references_same_values(self, mgr: BaseBlockManager, blkno: int) -> bool:\n    blk = self.blocks[blkno]\n    return any((blk is ref() for ref in mgr.blocks[blkno].refs.referenced_blocks))",
    "docstring": "Checks if two blocks from two different block managers reference the same underlying values.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:references_same_values arg:self arg:mgr arg:blkno arguments arg arg arg Assign Return return:yes Call Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "transform_feature",
    "source_code": "def transform_feature(self, transformation_cache, state_manager):\n    input_tensor = _to_sparse_input_and_drop_ignore_values(transformation_cache.get(self.key, state_manager))\n    return self._transform_input_tensor(input_tensor)",
    "docstring": "Hashes the values in the feature_column.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:transform_feature arg:self arg:transformation_cache arg:state_manager arguments arg arg arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "db_returning",
    "source_code": "@property\ndef db_returning(self):\n    return self.has_db_default() and connection.features.can_return_columns_from_insert",
    "docstring": "Private API intended only to be used by Django itself.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\__init__.py",
    "ast_data": "FunctionDef name:db_returning arg:self arguments arg Return return:yes BoolOp Call"
  },
  {
    "library": "pytorch",
    "name": "batch_stride",
    "source_code": "def batch_stride(self, node: IRNode, default_value: int=0) -> str:\n    if node is None:\n        return str(default_value)\n    if len(node.get_size()) < 3:\n        return str(default_value)\n    batch_stride = node.get_stride()[0]\n    if V.graph.sizevars.statically_known_leq(batch_stride, 1):\n        return str(batch_stride)\n    return '{}*{}'.format(self.find_symbol(node, 'size', dim=1) or node.get_size()[1], self.find_symbol(node, 'size', dim=2) or node.get_size()[2])",
    "docstring": "Hook called from template code to get the batch stride of an arg. Returns 0 if batch dim is not present. This method assumes that batch stride is the largest stride.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\cuda_kernel.py",
    "ast_data": "FunctionDef name:batch_stride arg:self arg:node arg:default_value arguments arg arg arg If Compare Return return:yes Call If Compare Call Call Return return:yes Call Assign Call If Call Return return:yes Call Return return:yes Call BoolOp Call Call BoolOp Call Call"
  },
  {
    "library": "tensorflow",
    "name": "variables",
    "source_code": "def variables(self):\n    return self._optimizer.variables() + list(self._loss_scale._weights.values())",
    "docstring": "Returns the variables of the Optimizer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\experimental\\loss_scale_optimizer.py",
    "ast_data": "FunctionDef name:variables arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_copy_trackable_to_cpu",
    "source_code": "def _copy_trackable_to_cpu(self, object_map):\n    if self not in object_map:\n        object_map[self] = MutableHashTable(self._key_dtype, self._value_dtype, self._default_value, self._name, self._checkpoint, self._is_anonymous)\n    serialized = self._serialize_to_tensors()\n    object_map[self]._restore_from_tensors(serialized)",
    "docstring": "Implements checkpointing protocols for .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "FunctionDef name:_copy_trackable_to_cpu arg:self arg:object_map arguments arg arg If Compare Assign Call Assign Call Call"
  },
  {
    "library": "django",
    "name": "FullResultSet",
    "source_code": "class FullResultSet(Exception):\n    pass",
    "docstring": "A database query predicate is matches everything.",
    "type": "class",
    "file_path": "django\\django\\core\\exceptions.py",
    "ast_data": "ClassDef name:FullResultSet"
  },
  {
    "library": "pandas",
    "name": "mean",
    "source_code": "def mean(self, axis: Axis=0, *args, **kwargs):\n    nv.validate_mean(args, kwargs)\n    valid_vals = self._valid_sp_values\n    sp_sum = valid_vals.sum()\n    ct = len(valid_vals)\n    if self._null_fill_value:\n        return sp_sum / ct\n    else:\n        nsparse = self.sp_index.ngaps\n        return (sp_sum + self.fill_value * nsparse) / (ct + nsparse)",
    "docstring": "Mean of non-NA/null values Returns ------- mean : float",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\sparse\\array.py",
    "ast_data": "FunctionDef name:mean arg:self arg:axis arguments arg arg arg arg Call Assign Assign Call Assign Call If Return return:yes Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_wxcolour",
    "source_code": "def get_wxcolour(self, color):\n    _log.debug('%s - get_wx_color()', type(self))\n    return wx.Colour(*[int(255 * x) for x in color])",
    "docstring": "Convert an RGB(A) color to a wx.Colour.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_wx.py",
    "ast_data": "FunctionDef name:get_wxcolour arg:self arg:color arguments arg arg Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "to_double",
    "source_code": "@tf_export(v1=['to_double'])\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\n@deprecation.deprecated(date=None, instructions='Use `tf.cast` instead.')\ndef to_double(x, name='ToDouble'):\n    return cast(x, dtypes.float64, name=name)",
    "docstring": "Casts a tensor to type . Args: x: A or or . name: A name for the operation (optional). Returns: A or or with same shape as with type . Raises: TypeError: If cannot be cast to the . @compatibility(TF2) This name was deprecated and removed in TF2, but has an exact replacement . There are no further issues with eager execution or tf.function. Before: >>> tf.compat.v1.to_double(tf.constant(3.14, dtype=tf.float32)) After: >>> tf.cast(tf.constant(3.14, dtype=tf.float32), tf.double) @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:to_double arg:x arg:name arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "send_messages",
    "source_code": "def send_messages(self, messages):\n    msg_count = 0\n    for message in messages:\n        message.message()\n        mail.outbox.append(copy.deepcopy(message))\n        msg_count += 1\n    return msg_count",
    "docstring": "Redirect messages to the dummy outbox",
    "type": "method",
    "file_path": "django\\django\\core\\mail\\backends\\locmem.py",
    "ast_data": "FunctionDef name:send_messages arg:self arg:messages arguments arg arg Assign For Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "lu",
    "source_code": "def lu(self, pivot=True, get_infos=False):\n    if has_torch_function_unary(self):\n        return handle_torch_function(Tensor.lu, (self,), self, pivot=pivot, get_infos=get_infos)\n    LU, pivots, infos = torch._lu_with_info(self, pivot=pivot, check_errors=not get_infos)\n    if get_infos:\n        return (LU, pivots, infos)\n    else:\n        return (LU, pivots)",
    "docstring": "See :func:",
    "type": "method",
    "file_path": "pytorch\\torch\\_tensor.py",
    "ast_data": "FunctionDef name:lu arg:self arg:pivot arg:get_infos arguments arg arg arg If Call Return return:yes Call Assign Call If Return return:yes Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "start_itemproc",
    "source_code": "def start_itemproc(self, item: Any, *, response: Response | None) -> Deferred[None]:\n    return deferred_from_coro(self.start_itemproc_async(item, response=response))",
    "docstring": "Send *item* to the item pipelines for processing. *response* is the source of the item data. If the item does not come from response data, e.g. it was hard-coded, set it to ``.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\scraper.py",
    "ast_data": "FunctionDef name:start_itemproc arg:self arg:item arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "DatasetSource",
    "source_code": "class DatasetSource(DatasetV2):\n\n    def _inputs(self):\n        return []",
    "docstring": "Abstract class representing a dataset with no inputs.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "ClassDef name:DatasetSource FunctionDef name:_inputs arg:self arguments arg Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "create_slot_with_initializer",
    "source_code": "def create_slot_with_initializer(primary, initializer, shape, dtype, name, colocate_with_primary=True, *, copy_xla_sharding=False):\n    validate_shape = shape.is_fully_defined()\n    if isinstance(primary, variables.Variable):\n        prefix = primary._shared_name\n    else:\n        prefix = primary.op.name\n    with variable_scope.variable_scope(None, prefix + '/' + name):\n        if colocate_with_primary:\n            distribution_strategy = distribute_lib.get_strategy()\n            with distribution_strategy.extended.colocate_vars_with(primary):\n                return _create_slot_var(primary, initializer, '', validate_shape, shape, dtype, copy_xla_sharding=copy_xla_sharding)\n        else:\n            return _create_slot_var(primary, initializer, '', validate_shape, shape, dtype, copy_xla_sharding=copy_xla_sharding)",
    "docstring": "Creates a slot initialized using an . The type of the slot is determined by the given value. Args: primary: The primary or . initializer: An . The initial value of the slot. shape: Shape of the initial value of the slot. dtype: Type of the value of the slot. name: Name to use for the slot variable. colocate_with_primary: Boolean. If True the slot is located on the same device as . copy_xla_sharding: Boolean. If True also copies XLA sharding from primary. Returns: A object.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\slot_creator.py",
    "ast_data": "FunctionDef name:create_slot_with_initializer arg:primary arg:initializer arg:shape arg:dtype arg:name arg:colocate_with_primary arguments arg arg arg arg arg arg arg Assign Call If Call Assign Assign With Call If Assign Call With Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_type",
    "source_code": "def _type(self, dtype=None, non_blocking=False, **kwargs):\n    non_blocking = _get_async_or_non_blocking('type', non_blocking, kwargs)\n    if dtype is None:\n        return self.__module__ + '.' + self.__class__.__name__\n    if isinstance(dtype, str):\n        dtype = _import_dotted_name(dtype)\n    if dtype == type(self):\n        return self\n    if self.is_sparse:\n        if not dtype.is_sparse:\n            raise RuntimeError('Cannot cast sparse tensor to dense tensor')\n        new_module_name = dtype.__module__.replace('.sparse', '')\n        new_values_type_name = new_module_name + '.' + dtype.__name__\n        new_values = torch.Tensor._values(self).type(new_values_type_name, non_blocking)\n        new_indices_type_name = new_module_name + '.LongTensor'\n        new_indices = torch.Tensor._indices(self).type(new_indices_type_name, non_blocking)\n        return dtype(new_indices, new_values, self.size())\n    if dtype.is_sparse:\n        raise RuntimeError('Cannot cast dense tensor to sparse tensor')\n    return dtype(self.size()).copy_(self, non_blocking)",
    "docstring": "Returns the type if is not provided, else casts this object to the specified type. If this is already of the correct type, no copy is performed and the original object is returned. Args: dtype (type or string): The desired type non_blocking (bool): If `` arg is deprecated.",
    "type": "function",
    "file_path": "pytorch\\torch\\_utils.py",
    "ast_data": "FunctionDef name:_type arg:self arg:dtype arg:non_blocking arguments arg arg arg arg Assign Call If Compare Return return:yes If Call Assign Call If Compare Call Return return:yes If If Raise Call Assign Call Assign Assign Call Call Assign Assign Call Call Return return:yes Call Call If Raise Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "graph_execution_traces",
    "source_code": "def graph_execution_traces(self, digest=False, begin=None, end=None):\n    digests = self._graph_execution_trace_digests\n    if begin is not None or end is not None:\n        begin = begin or 0\n        end = end or len(digests)\n        digests = digests[begin:end]\n    if digest:\n        return digests\n    else:\n        return [self.read_graph_execution_trace(digest) for digest in digests]",
    "docstring": "Get all the intra-graph execution tensor traces read so far. Args: digest: Whether the results will be returned in the more light-weight digest form. begin: Optional beginning index for the requested traces or their digests. Python-style negative indices are supported. end: Optional ending index for the requested traces or their digests. Python-style negative indices are supported. Returns: If : a of objects. Else: a of objects.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py",
    "ast_data": "FunctionDef name:graph_execution_traces arg:self arg:digest arg:begin arg:end arguments arg arg arg arg Assign If BoolOp Compare Compare Assign BoolOp Assign BoolOp Call Assign If Return return:yes Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_todata",
    "source_code": "def _todata(s) -> np.ndarray:\n    if isinstance(s, sp._data._data_matrix):\n        return s._deduped_data()\n    if isinstance(s, sp.dok_array):\n        return np.fromiter(s.values(), dtype=s.dtype, count=s.nnz)\n    if isinstance(s, sp.lil_array):\n        data = np.empty(s.nnz, dtype=s.dtype)\n        sp._csparsetools.lil_flatten_to_array(s.data, data)\n        return data\n    return s.tocoo()._deduped_data()",
    "docstring": "Access nonzero values, possibly after summing duplicates. Parameters ---------- s : sparse array Input sparse array. Returns ------- data: ndarray Nonzero values of the array, with shape (s.nnz,)",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\_sputils.py",
    "ast_data": "FunctionDef name:_todata arg:s arguments arg If Call Return return:yes Call If Call Return return:yes Call Call If Call Assign Call Call Return return:yes Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "logpdf",
    "source_code": "def logpdf(self, x, loc=None, shape=1, df=1):\n    dim, loc, shape, df = self._process_parameters(loc, shape, df)\n    x = self._process_quantiles(x, dim)\n    shape_info = _PSD(shape)\n    return self._logpdf(x, loc, shape_info.U, shape_info.log_pdet, df, dim, shape_info.rank)",
    "docstring": "Log of the multivariate t-distribution probability density function. Parameters ---------- x : array_like Points at which to evaluate the log of the probability density function. %(_mvt_doc_default_callparams)s Returns ------- logpdf : Log of the probability density function evaluated at . Examples -------- >>> from scipy.stats import multivariate_t >>> x = [0.4, 5] >>> loc = [0, 1] >>> shape = [[1, 0.1], [0.1, 1]] >>> df = 7 >>> multivariate_t.logpdf(x, loc, shape, df) -7.1859802 See Also -------- pdf : Probability density function.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:logpdf arg:self arg:x arg:loc arg:shape arg:df arguments arg arg arg arg arg Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "recheck_autotune_cache",
    "source_code": "def recheck_autotune_cache(self, reload_kernel_from_src: Callable[[], CachingAutotuner]) -> None:\n    assert self.is_statically_launchable()\n    configs = [result.config for result in self.compile_results]\n    cached_configs, _, autotune_cache_info = check_autotune_cache(configs, self.filename, self.inductor_meta)\n    self.autotune_cache_info = autotune_cache_info\n    if len(cached_configs) == 1 and len(configs) > 1:\n        best_config = cached_configs[0]\n        best_config_hash = triton_config_to_hashable(best_config)\n        for compile_result in self.compile_results:\n            if triton_config_to_hashable(compile_result.config) == best_config_hash:\n                self.compile_results = [compile_result]\n                return\n        if best_config.found_by_coordesc:\n            with dynamo_timed('CachingAutotuner.slow_precompile_config'):\n                if self.fn.fn is None:\n                    self.fn = reload_kernel_from_src().fn\n                self.compile_results = [self._precompile_config(best_config)]",
    "docstring": "On cache load on static autotuner, we need to recheck the autotune cache, since a best config could have been found from a previous run",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\triton_heuristics.py",
    "ast_data": "FunctionDef name:recheck_autotune_cache arg:self arg:reload_kernel_from_src arguments arg arg Call Assign Assign Call Assign If BoolOp Compare Call Compare Call Assign Assign Call For If Compare Call Assign Return return:no If With Call If Compare Assign Call Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "get_texts_widths_heights_descents",
    "source_code": "def get_texts_widths_heights_descents(self, renderer):\n    whd_list = []\n    for _loc, _angle, label in self._locs_angles_labels:\n        if not label.strip():\n            continue\n        clean_line, ismath = self._preprocess_math(label)\n        whd = mtext._get_text_metrics_with_cache(renderer, clean_line, self._fontproperties, ismath=ismath, dpi=self.get_figure(root=True).dpi)\n        whd_list.append(whd)\n    return whd_list",
    "docstring": "Return a list of `` tuples for ticklabels. Empty labels are left out.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axis_artist.py",
    "ast_data": "FunctionDef name:get_texts_widths_heights_descents arg:self arg:renderer arguments arg arg Assign For If Call Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "save",
    "source_code": "@_dcp_method_logger(log_exceptions=True)\n@_api_bc_check\ndef save(state_dict: STATE_DICT_TYPE, *, checkpoint_id: Union[str, os.PathLike, None]=None, storage_writer: Optional[StorageWriter]=None, planner: Optional[SavePlanner]=None, process_group: Optional[dist.ProcessGroup]=None, no_dist: bool=False) -> Metadata:\n    torch._C._log_api_usage_once('torch.distributed.checkpoint.save')\n    no_dist = no_dist or not dist.is_available() or (not dist.is_initialized())\n    if no_dist:\n        warnings.warn('torch.distributed is disabled, unavailable or uninitialized, assuming the intent is to save in a single process.')\n    with _profile():\n        storage_writer = cast(StorageWriter, _storage_setup(storage_writer, checkpoint_id, reader=False))\n        return _save_state_dict(state_dict=_stateful_to_state_dict(state_dict), storage_writer=storage_writer, process_group=process_group, no_dist=no_dist, planner=planner)",
    "docstring": "Save a distributed model in SPMD style. This function is different from `process_groupsave_state_dictShardingStrategy.HYBRID_SHARDsave_state_dict`.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\state_dict_saver.py",
    "ast_data": "FunctionDef name:save arg:state_dict arguments arg arg arg arg arg arg Call Assign BoolOp Call Call If Call With Call Assign Call Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "build_graph",
    "source_code": "def build_graph(device, input_shape, output_sizes, axis):\n    with ops.device('/%s:0' % device):\n        inp = array_ops.zeros(input_shape)\n        outputs = []\n        for _ in range(100):\n            outputs.extend(array_ops.split(inp, output_sizes, axis))\n        return control_flow_ops.group(*outputs)",
    "docstring": "Build a graph containing a sequence of split operations. Args: device: string, the device to run on. input_shape: shape of the input tensor. output_sizes: size of each output along axis. axis: axis to be split along. Returns: An array of tensors to run()",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\split_benchmark.py",
    "ast_data": "FunctionDef name:build_graph arg:device arg:input_shape arg:output_sizes arg:axis arguments arg arg arg arg With Call Assign Call Assign For Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "gen_fn_def",
    "source_code": "def gen_fn_def(self, free_vars: list[str], maybe_return_annotation: str) -> str:\n    if len(free_vars) == 0 or free_vars[0] != 'self':\n        free_vars.insert(0, 'self')\n    return f'def {self._func_name}({', '.join(free_vars)}){maybe_return_annotation}:'",
    "docstring": "Given the free variables and a return annotation, generates the beginning of the FX function. By default,",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\graph.py",
    "ast_data": "FunctionDef name:gen_fn_def arg:self arg:free_vars arg:maybe_return_annotation arguments arg arg arg If BoolOp Compare Call Compare Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "@trackable.no_automatic_dependency_tracking\ndef __init__(self, layers=None, name=None):\n    super(functional.Functional, self).__init__(name=name, autocast=False)\n    self.supports_masking = True\n    self._compute_output_and_mask_jointly = True\n    self._auto_track_sub_layers = False\n    self._inferred_input_shape = None\n    self._has_explicit_input_shape = False\n    self._input_dtype = None\n    self._layer_call_argspecs = {}\n    self._created_nodes = set()\n    self._graph_initialized = False\n    self._use_legacy_deferred_behavior = False\n    if layers:\n        if not isinstance(layers, (list, tuple)):\n            layers = [layers]\n        for layer in layers:\n            self.add(layer)",
    "docstring": "Creates a model instance. Args: layers: Optional list of layers to add to the model. name: Optional name for the model.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\sequential.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:layers arg:name arguments arg arg arg Call Call Assign Assign Assign Assign Assign Assign Assign Assign Call Assign Assign If If Call Assign For Call"
  },
  {
    "library": "pytorch",
    "name": "rename",
    "source_code": "def rename(self, *names, **rename_map):\n    if has_torch_function_unary(self):\n        return handle_torch_function(Tensor.rename, (self,), self, *names, **rename_map)\n    return update_names(self, names, rename_map, inplace=False)",
    "docstring": "Renames dimension names of :attr:. There are two main usages: `rename_mapnamesnamesrename_map`. Examples:: >>> imgs = torch.rand(2, 3, 5, 7, names=('N', 'C', 'H', 'W')) >>> renamed_imgs = imgs.rename(N='batch', C='channels') >>> renamed_imgs.names ('batch', 'channels', 'H', 'W') >>> renamed_imgs = imgs.rename(None) >>> renamed_imgs.names (None, None, None, None) >>> renamed_imgs = imgs.rename('batch', 'channel', 'height', 'width') >>> renamed_imgs.names ('batch', 'channel', 'height', 'width') .. warning:: The named tensor API is experimental and subject to change.",
    "type": "method",
    "file_path": "pytorch\\torch\\_tensor.py",
    "ast_data": "FunctionDef name:rename arg:self arguments arg arg arg If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_ResourceMetaclass",
    "source_code": "class _ResourceMetaclass(type):\n\n    def __call__(cls, *args, **kwargs):\n\n        def default_resource_creator(next_creator, *a, **kw):\n            assert next_creator is None\n            obj = cls.__new__(cls, *a, **kw)\n            obj.__init__(*a, **kw)\n            return obj\n        previous_getter = lambda *a, **kw: default_resource_creator(None, *a, **kw)\n        resource_creator_stack = ops.get_default_graph()._resource_creator_stack\n        for getter in resource_creator_stack[cls._resource_type()]:\n            previous_getter = _make_getter(getter, previous_getter)\n        return previous_getter(*args, **kwargs)",
    "docstring": "Metaclass for CapturableResource.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\resource.py",
    "ast_data": "ClassDef name:_ResourceMetaclass FunctionDef name:__call__ arg:cls arguments arg arg arg FunctionDef name:default_resource_creator arg:next_creator arguments arg arg arg Compare Assign Call Call Return return:yes Assign arguments arg arg Call Assign Call For Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "Format",
    "source_code": "def Format(pb):\n    if isinstance(pb, message.Message):\n        return dict(((desc.number, value) for desc, value in pb.ListFields()))\n    elif _IsMap(pb):\n        return dict(pb.items())\n    elif _IsRepeatedContainer(pb):\n        return dict(enumerate(list(pb)))\n    else:\n        return pb",
    "docstring": "Returns a dictionary or unchanged pb bases on its type. Specifically, this function returns a dictionary that maps tag number (for messages) or element index (for repeated fields) to value, or just pb unchanged if it's neither. Args: pb: A proto2 message or a primitive. Returns: A dict or unchanged pb.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\protobuf\\compare.py",
    "ast_data": "FunctionDef name:Format arg:pb arguments arg If Call Return return:yes Call Call If Call Return return:yes Call Call If Call Return return:yes Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, *args, zs=0, zdir='z', depthshade=None, depthshade_minalpha=None, axlim_clip=False, **kwargs):\n    if depthshade is None:\n        depthshade = rcParams['axes3d.depthshade']\n    if depthshade_minalpha is None:\n        depthshade_minalpha = rcParams['axes3d.depthshade_minalpha']\n    self._depthshade = depthshade\n    self._depthshade_minalpha = depthshade_minalpha\n    self._in_draw = False\n    super().__init__(*args, **kwargs)\n    self.set_3d_properties(zs, zdir, axlim_clip)\n    self._offset_zordered = None",
    "docstring": "Create a collection of flat 3D paths with its normal vector pointed in *zdir* direction, and located at *zs* on the *zdir* axis. 'zs' can be a scalar or an array-like of the same length as the number of paths in the collection. Constructor arguments are the same as for :class:. In addition, keywords *zs=0* and *zdir='z'* are available. Also, the keyword argument *depthshade* is available to indicate whether or not to shade the patches in order to give the appearance of depth (default is *True*). This is typically desired in scatter plots. *depthshade_minalpha* sets the minimum alpha value applied by depth-shading.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg arg arg arg arg arg arg arg If Compare Assign If Compare Assign Assign Assign Assign Call Call Call Assign"
  },
  {
    "library": "django",
    "name": "localtime_tag",
    "source_code": "@register.tag('localtime')\ndef localtime_tag(parser, token):\n    bits = token.split_contents()\n    if len(bits) == 1:\n        use_tz = True\n    elif len(bits) > 2 or bits[1] not in ('on', 'off'):\n        raise TemplateSyntaxError(\"%r argument should be 'on' or 'off'\" % bits[0])\n    else:\n        use_tz = bits[1] == 'on'\n    nodelist = parser.parse(('endlocaltime',))\n    parser.delete_first_token()\n    return LocalTimeNode(nodelist, use_tz)",
    "docstring": "Force or prevent conversion of datetime objects to local time, regardless of the value of ``. Sample usage:: {% localtime off %}{{ value_in_utc }}{% endlocaltime %}",
    "type": "function",
    "file_path": "django\\django\\templatetags\\tz.py",
    "ast_data": "FunctionDef name:localtime_tag arg:parser arg:token arguments arg arg Assign Call If Compare Call Assign If BoolOp Compare Call Compare Raise Call Assign Compare Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "indirect_indexing",
    "source_code": "def indirect_indexing(self, index_proxy, size, check=True, wrap_neg=True):\n    var = self.body.add_indirect(size)\n    set_indirect = self.body.bind_set_indirect_shim(var, size, check, wrap_neg)\n    self.tracer.create_proxy('call_module', self.body.add_submodule(set_indirect, f'set_{var}'), (index_proxy,), {})\n    return var",
    "docstring": "Flow data from tensors into indexing formulas. Introduce a call_module to update the indexing.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\loop_body.py",
    "ast_data": "FunctionDef name:indirect_indexing arg:self arg:index_proxy arg:size arg:check arg:wrap_neg arguments arg arg arg arg arg Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "pdf",
    "source_code": "def pdf(self, x):\n    return self.evaluate(x)",
    "docstring": "Evaluate the estimated pdf on a provided set of points. Notes ----- This is an alias for . See the `` docstring for more details.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\external\\kde.py",
    "ast_data": "FunctionDef name:pdf arg:self arg:x arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "begin",
    "source_code": "def begin(self):\n    pass",
    "docstring": "Called once before using the session. When called, the default graph is the one that will be launched in the session. The hook can modify the graph by adding new operations to it. After the call the graph will be finalized and the other callbacks can not modify the graph anymore. Second call of on the same graph, should not change the graph.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\session_run_hook.py",
    "ast_data": "FunctionDef name:begin arg:self arguments arg"
  },
  {
    "library": "scipy",
    "name": "logpdf",
    "source_code": "def logpdf(self, x, df, scale):\n    dim, df, scale = self._process_parameters(df, scale)\n    x = self._process_quantiles(x, dim)\n    C, log_det_scale = self._cholesky_logdet(scale)\n    out = self._logpdf(x, dim, df, log_det_scale, C)\n    return _squeeze_output(out)",
    "docstring": "Log of the inverse Wishart probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of denoting the components. Each quantile must be a symmetric positive definite matrix. %(_doc_default_callparams)s Returns ------- pdf : ndarray Log of the probability density function evaluated at Notes ----- %(_doc_callparams_note)s",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:logpdf arg:self arg:x arg:df arg:scale arguments arg arg arg arg Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_cell",
    "source_code": "def get_cell(self, *labels):\n    return IntGaugeCell(super(IntGauge, self).get_cell(*labels))",
    "docstring": "Retrieves the cell.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py",
    "ast_data": "FunctionDef name:get_cell arg:self arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict_proba",
    "source_code": "@available_if(_check_voting)\ndef predict_proba(self, X):\n    check_is_fitted(self)\n    avg = np.average(self._collect_probas(X), axis=0, weights=self._weights_not_none)\n    return avg",
    "docstring": "Compute probabilities of possible outcomes for samples in X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Returns ------- avg : array-like of shape (n_samples, n_classes) Weighted average probability for each class per sample.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_voting.py",
    "ast_data": "FunctionDef name:predict_proba arg:self arg:X arguments arg arg Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "_valid_masks",
    "source_code": "def _valid_masks(self, masks: Tensor) -> Tensor:\n    KORNIA_CHECK_SHAPE(masks, ['K', '1', '256', '256'])\n    return masks",
    "docstring": "Validate the input masks shape.",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\visual_prompter.py",
    "ast_data": "FunctionDef name:_valid_masks arg:self arg:masks arguments arg arg Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_get_binner",
    "source_code": "@final\ndef _get_binner(self):\n    binner, bins, binlabels = self._get_binner_for_time()\n    assert len(bins) == len(binlabels)\n    bin_grouper = BinGrouper(bins, binlabels, indexer=self._indexer)\n    return (binner, bin_grouper)",
    "docstring": "Create the BinGrouper, assume that self.set_grouper(obj) has already been called.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\resample.py",
    "ast_data": "FunctionDef name:_get_binner arg:self arguments arg Assign Call Compare Call Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "apply",
    "source_code": "def apply(self, X):\n    leaves = super().apply(X)\n    leaves = leaves.reshape(X.shape[0], self.estimators_.shape[0])\n    return leaves",
    "docstring": "Apply trees in the ensemble to X, return leaf indices. .. versionadded:: 0.17 Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, its dtype will be converted to ``. Returns ------- X_leaves : array-like of shape (n_samples, n_estimators) For each datapoint x in X and for each tree in the ensemble, return the index of the leaf x ends up in each estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_gb.py",
    "ast_data": "FunctionDef name:apply arg:self arg:X arguments arg arg Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "clone",
    "source_code": "def clone(self):\n    clone = StateApps([], {})\n    clone.all_models = copy.deepcopy(self.all_models)\n    for app_label in self.app_configs:\n        app_config = AppConfigStub(app_label)\n        app_config.apps = clone\n        app_config.import_models()\n        clone.app_configs[app_label] = app_config\n    clone.real_models = self.real_models\n    return clone",
    "docstring": "Return a clone of this registry.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\state.py",
    "ast_data": "FunctionDef name:clone arg:self arguments arg Assign Call Assign Call For Assign Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "shape_and_dtype",
    "source_code": "def shape_and_dtype(t):\n    if t.dtype == dtypes.resource:\n        handle_data = resource_variable_ops.get_eager_safe_handle_data(t)\n        if handle_data is None or not handle_data.is_set or len(handle_data.shape_and_type) != 1:\n            raise ValueError('Internal error: Tried to take gradients (or similar) of a variable without handle data:\\n%s' % str(t))\n        shape_and_type = handle_data.shape_and_type[0]\n        return (tensor_shape.TensorShape(shape_and_type.shape), dtypes.as_dtype(shape_and_type.dtype))\n    return (t.shape, t.dtype)",
    "docstring": "Return the shape and dtype for the default gradient for a Tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\default_gradient.py",
    "ast_data": "FunctionDef name:shape_and_dtype arg:t arguments arg If Compare Assign Call If BoolOp Compare Compare Call Raise Call Call Assign Return return:yes Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "keras_tensor_from_type_spec",
    "source_code": "def keras_tensor_from_type_spec(type_spec, name=None):\n    keras_tensor_cls = None\n    value_type = type_spec.value_type\n    for tensor_type, cls in keras_tensor_classes:\n        if issubclass(value_type, tensor_type):\n            keras_tensor_cls = cls\n            break\n    return keras_tensor_cls.from_type_spec(type_spec, name=name)",
    "docstring": "Convert a TypeSpec to a representative KerasTensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\keras_tensor.py",
    "ast_data": "FunctionDef name:keras_tensor_from_type_spec arg:type_spec arg:name arguments arg arg Assign Assign For If Call Assign Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "roots",
    "source_code": "def roots(self):\n    if self._roots is None:\n        m = self.weights.size\n        B = np.eye(m + 1, dtype=self.weights.dtype)\n        B[0, 0] = 0\n        E = np.zeros_like(B, dtype=np.result_type(self.weights, self._support_values, self._support_points))\n        E[0, 1:] = self.weights * self._support_values\n        E[1:, 0] = 1\n        np.fill_diagonal(E[1:, 1:], self._support_points)\n        zer = scipy.linalg.eigvals(E, B)\n        self._roots = zer[np.isfinite(zer)]\n    return self._roots",
    "docstring": "Compute the zeros of the rational approximation. Returns ------- zeros : array Zeros of the AAA approximation, repeated according to their multiplicity but not in any specific order.",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_bary_rational.py",
    "ast_data": "FunctionDef name:roots arg:self arguments arg If Compare Assign Assign Call Assign Assign Call Call Assign Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "ImageFile",
    "source_code": "class ImageFile(File):\n\n    @property\n    def width(self):\n        return self._get_image_dimensions()[0]\n\n    @property\n    def height(self):\n        return self._get_image_dimensions()[1]\n\n    def _get_image_dimensions(self):\n        if not hasattr(self, '_dimensions_cache'):\n            close = self.closed\n            self.open()\n            self._dimensions_cache = get_image_dimensions(self, close=close)\n        return self._dimensions_cache",
    "docstring": "A mixin for use alongside django.core.files.base.File, which provides additional features for dealing with images.",
    "type": "class",
    "file_path": "django\\django\\core\\files\\images.py",
    "ast_data": "ClassDef name:ImageFile FunctionDef name:width arg:self arguments arg Return return:yes Call FunctionDef name:height arg:self arguments arg Return return:yes Call FunctionDef name:_get_image_dimensions arg:self arguments arg If Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "has_level_label",
    "source_code": "def has_level_label(label_flags: npt.NDArray[np.intp], vmin: float) -> bool:\n    if label_flags.size == 0 or (label_flags.size == 1 and label_flags[0] == 0 and (vmin % 1 > 0.0)):\n        return False\n    else:\n        return True",
    "docstring": "Returns true if the `` indicate there is at least one label for this level. if the minimum view limit is not an exact integer, then the first tick label won't be shown, so we must adjust for that.",
    "type": "function",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\converter.py",
    "ast_data": "FunctionDef name:has_level_label arg:label_flags arg:vmin arguments arg arg If BoolOp Compare BoolOp Compare Compare Compare Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_equalization_qconfig_dict",
    "source_code": "def get_equalization_qconfig_dict(layer_sqnr_dict: dict[str, float], num_layers_to_equalize: int) -> Any:\n    layer_sqnr_sorted = sorted(layer_sqnr_dict.items(), key=operator.itemgetter(1))\n    layers_to_equalize = layer_sqnr_sorted[:num_layers_to_equalize]\n    module_to_qconfig_list = [(item[0], default_equalization_qconfig) for item in layers_to_equalize]\n    equalization_qconfig_dict = {'module_name': module_to_qconfig_list}\n    return equalization_qconfig_dict",
    "docstring": "Given the layer to SQNR dictionary, find the layers with the highest quantization errors, and return an equalization_qconfig_dict specifying to only equalize those top layers. Args: layer_sqnr_dict: Dictionary mapping layer names to SQNR values (found when comparing an equalized model against a float model) num_layers_to_equalize: Number of layers with the highest quantization errors to equalize",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_equalize.py",
    "ast_data": "FunctionDef name:get_equalization_qconfig_dict arg:layer_sqnr_dict arg:num_layers_to_equalize arguments arg arg Assign Call Call Call Assign Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_DynamicPartitionGrads",
    "source_code": "@ops.RegisterGradient('DynamicPartition')\ndef _DynamicPartitionGrads(op, *grads):\n    data = op.inputs[0]\n    indices = op.inputs[1]\n    num_partitions = op.get_attr('num_partitions')\n    prefix_shape = array_ops.shape(indices)\n    original_indices = array_ops.reshape(math_ops.range(math_ops.reduce_prod(prefix_shape)), prefix_shape)\n    partitioned_indices = data_flow_ops.dynamic_partition(original_indices, indices, num_partitions)\n    reconstructed = data_flow_ops.parallel_dynamic_stitch(partitioned_indices, grads)\n    reconstructed = array_ops.reshape(reconstructed, array_ops.shape(data))\n    return [reconstructed, None]",
    "docstring": "Gradients for DynamicPartition.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_grad.py",
    "ast_data": "FunctionDef name:_DynamicPartitionGrads arg:op arguments arg arg Assign Assign Assign Call Assign Call Assign Call Call Call Assign Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_fill_object_graph_proto",
    "source_code": "def _fill_object_graph_proto(trackable_data: List[_TrackableData]) -> trackable_object_graph_pb2.TrackableObjectGraph:\n    object_graph_proto = trackable_object_graph_pb2.TrackableObjectGraph()\n    for checkpoint_id, td in enumerate(trackable_data):\n        assert td.node_id == checkpoint_id\n        object_graph_proto.nodes.add(slot_variables=td.slot_variable_proto, children=td.children_proto)\n    return object_graph_proto",
    "docstring": "Name non-slot s and add them to .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\save_util.py",
    "ast_data": "FunctionDef name:_fill_object_graph_proto arg:trackable_data arguments arg Assign Call For Call Compare Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_calculate_winsorized_variance",
    "source_code": "def _calculate_winsorized_variance(a, g, axis):\n    if g == 0:\n        return _var(a, ddof=1, axis=axis)\n    a_win = np.moveaxis(a, axis, -1)\n    nans_indices = np.any(np.isnan(a_win), axis=-1)\n    a_win[..., :g] = a_win[..., [g]]\n    a_win[..., -g:] = a_win[..., [-g - 1]]\n    var_win = np.asarray(_var(a_win, ddof=2 * g + 1, axis=-1))\n    var_win[nans_indices] = np.nan\n    return var_win",
    "docstring": "Calculates g-times winsorized variance along specified axis",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_stats_py.py",
    "ast_data": "FunctionDef name:_calculate_winsorized_variance arg:a arg:g arg:axis arguments arg arg arg If Compare Return return:yes Call Assign Call Assign Call Call Assign Assign Assign Call Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "add_table",
    "source_code": "def add_table(self, tab):\n    _api.check_isinstance(mtable.Table, tab=tab)\n    self._set_artist_props(tab)\n    self._children.append(tab)\n    if tab.get_clip_path() is None:\n        tab.set_clip_path(self.patch)\n    tab._remove_method = self._children.remove\n    return tab",
    "docstring": "Add a to the Axes; return the table.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:add_table arg:self arg:tab arguments arg arg Call Call Call If Compare Call Call Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "transformed_limits",
    "source_code": "@property\ndef transformed_limits(self):\n    raise NotImplementedError",
    "docstring": "New limits of integration after applying the transformation.",
    "type": "method",
    "file_path": "scipy\\scipy\\integrate\\_cubature.py",
    "ast_data": "FunctionDef name:transformed_limits arg:self arguments arg Raise"
  },
  {
    "library": "kornia",
    "name": "init_kron",
    "source_code": "def init_kron(self) -> Tuple[Tensor, Tensor]:\n    kron = get_kron_order(self.in_dims, self.d_emb)\n    _emb = torch.jit.annotate(Tensor, self.emb)\n    emb2 = torch.index_select(_emb, 1, kron[:, 1])\n    return (emb2, kron[:, 0])",
    "docstring": "Initialize helper variables to calculate kronecker.",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\mkd.py",
    "ast_data": "FunctionDef name:init_kron arg:self arguments arg Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "update",
    "source_code": "def update(self, modules: Mapping[str, Module]) -> None:\n    if not isinstance(modules, container_abcs.Iterable):\n        raise TypeError('ModuleDict.update should be called with an iterable of key/value pairs, but got ' + type(modules).__name__)\n    if isinstance(modules, (OrderedDict, ModuleDict, container_abcs.Mapping)):\n        for key, module in modules.items():\n            self[key] = module\n    else:\n        for j, m in enumerate(modules):\n            if not isinstance(m, container_abcs.Iterable):\n                raise TypeError('ModuleDict update sequence element #' + str(j) + ' should be Iterable; is' + type(m).__name__)\n            if not len(m) == 2:\n                raise ValueError('ModuleDict update sequence element #' + str(j) + ' has length ' + str(len(m)) + '; 2 is required')\n            self[m[0]] = m[1]",
    "docstring": "Update the :class: with key-value pairs from a mapping, overwriting existing keys. .. note:: If :attr: is an `~torch.nn.ModuleDict~torch.nn.Module~torch.nn.Module`)",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\container.py",
    "ast_data": "FunctionDef name:update arg:self arg:modules arguments arg arg If Call Raise Call Call If Call For Call Assign For Call If Call Raise Call Call Call If Compare Call Raise Call Call Call Call Assign"
  },
  {
    "library": "numpy",
    "name": "rsplit",
    "source_code": "def rsplit(self, sep=None, maxsplit=None):\n    return rsplit(self, sep, maxsplit)",
    "docstring": "For each element in , return a list of the words in the string, using as the delimiter string. See Also -------- char.rsplit",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:rsplit arg:self arg:sep arg:maxsplit arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "lagvander2d",
    "source_code": "def lagvander2d(x, y, deg):\n    return pu._vander_nd_flat((lagvander, lagvander), (x, y), deg)",
    "docstring": "Pseudo-Vandermonde matrix of given degrees. Returns the pseudo-Vandermonde matrix of degrees and sample points ``0 >> import numpy as np >>> from numpy.polynomial.laguerre import lagvander2d >>> x = np.array([0]) >>> y = np.array([2]) >>> lagvander2d(x, y, [2, 1]) array([[ 1., -1., 1., -1., 1., -1.]])",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\laguerre.py",
    "ast_data": "FunctionDef name:lagvander2d arg:x arg:y arg:deg arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "default_dtypes",
    "source_code": "def default_dtypes(self, *, device=None):\n    default_floating = torch.get_default_dtype()\n    default_complex = torch.complex64 if default_floating == torch.float32 else torch.complex128\n    default_integral = torch.int64\n    return {'real floating': default_floating, 'complex floating': default_complex, 'integral': default_integral, 'indexing': default_integral}",
    "docstring": "The default data types used for new PyTorch arrays. Parameters ---------- device : Device, optional The device to get the default data types for. Unused for PyTorch, as all devices use the same default dtypes. Returns ------- dtypes : dict A dictionary describing the default data types used for new PyTorch arrays. See Also -------- __array_namespace_info__.capabilities, __array_namespace_info__.default_device, __array_namespace_info__.dtypes, __array_namespace_info__.devices Examples -------- >>> info = xp.__array_namespace_info__() >>> info.default_dtypes() {'real floating': torch.float32, 'complex floating': torch.complex64, 'integral': torch.int64, 'indexing': torch.int64}",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\torch\\_info.py",
    "ast_data": "FunctionDef name:default_dtypes arg:self arguments arg arg Assign Call Assign Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_dropout_mask_for_cell",
    "source_code": "def get_dropout_mask_for_cell(self, inputs, training, count=1):\n    if self.dropout == 0:\n        return None\n    init_kwargs = dict(inputs=inputs, training=training, count=count)\n    return self._dropout_mask_cache.setdefault(kwargs=init_kwargs)",
    "docstring": "Get the dropout mask for RNN cell's input. It will create mask based on context if there isn't any existing cached mask. If a new mask is generated, it will update the cache in the cell. Args: inputs: The input tensor whose shape will be used to generate dropout mask. training: Boolean tensor, whether its in training mode, dropout will be ignored in non-training mode. count: Int, how many dropout mask will be generated. It is useful for cell that has internal weights fused together. Returns: List of mask tensor, generated or cached mask based on context.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\recurrent.py",
    "ast_data": "FunctionDef name:get_dropout_mask_for_cell arg:self arg:inputs arg:training arg:count arguments arg arg arg arg If Compare Return return:no Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "partialize_and_update_signature",
    "source_code": "def partialize_and_update_signature(func, **kwargs):\n    original_sig = inspect.signature(func)\n    parameters = original_sig.parameters\n    new_parameters = {key: value for key, value in parameters.items() if key not in kwargs}\n    new_sig = inspect.Signature(parameters=list(new_parameters.values()))\n    partial_func = functools.partial(func, **kwargs)\n\n    def wrapper(*args, **kwargs):\n        return partial_func(*args, **kwargs)\n    wrapper.__signature__ = new_sig\n    wrapper.__name__ = func.__name__\n    return wrapper",
    "docstring": "Equivalent to functools.partial but also updates the signature on returned function",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\fuse_attention.py",
    "ast_data": "FunctionDef name:partialize_and_update_signature arg:func arguments arg arg Assign Call Assign Assign Call Compare Assign Call Call Call Assign Call FunctionDef name:wrapper arguments arg arg Return return:yes Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "acos",
    "source_code": "@tf_export('math.acos', 'acos')\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef acos(x, name=None):\n    return gen_math_ops.acos(x, name)",
    "docstring": "Computes acos of x element-wise. Provided an input tensor, the operation returns the inverse cosine of each element of the tensor. If then, . Input range is and the output has a range of . For example: >>> x = tf.constant([1.0, -0.5, 3.4, 0.2, 0.0, -2], dtype = tf.float32) >>> tf.math.acos(x) Args: x: A . Must be one of the following types: , , , , , . name: A name for the operation (optional). Returns: A . Has the same type as x.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:acos arg:x arg:name arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "evaluate_quadratic",
    "source_code": "def evaluate_quadratic(J, g, s, diag=None):\n    if s.ndim == 1:\n        Js = J.dot(s)\n        q = np.dot(Js, Js)\n        if diag is not None:\n            q += np.dot(s * diag, s)\n    else:\n        Js = J.dot(s.T)\n        q = np.sum(Js ** 2, axis=0)\n        if diag is not None:\n            q += np.sum(diag * s ** 2, axis=1)\n    l = np.dot(s, g)\n    return 0.5 * q + l",
    "docstring": "Compute values of a quadratic function arising in least squares. The function is 0.5 * s.T * (J.T * J + diag) * s + g.T * s. Parameters ---------- J : ndarray, sparse array or LinearOperator, shape (m, n) Jacobian matrix, affects the quadratic term. g : ndarray, shape (n,) Gradient, defines the linear term. s : ndarray, shape (k, n) or (n,) Array containing steps as rows. diag : ndarray, shape (n,), optional Addition diagonal part, affects the quadratic term. If None, assumed to be 0. Returns ------- values : ndarray with shape (k,) or float Values of the function. If was 2-D, then ndarray is returned, otherwise, float is returned.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_lsq\\common.py",
    "ast_data": "FunctionDef name:evaluate_quadratic arg:J arg:g arg:s arg:diag arguments arg arg arg arg If Compare Assign Call Assign Call If Compare Call Assign Call Assign Call If Compare Call Assign Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "HList",
    "source_code": "class HList(SphinxDirective):\n    has_content = True\n    required_arguments = 0\n    optional_arguments = 0\n    final_argument_whitespace = False\n    option_spec: ClassVar[OptionSpec] = {'columns': int}\n\n    def run(self) -> list[Node]:\n        ncolumns = self.options.get('columns', 2)\n        children = self.parse_content_to_nodes()\n        if len(children) != 1 or not isinstance(children[0], nodes.bullet_list):\n            logger.warning(__('.. hlist content is not a list'), location=(self.env.docname, self.lineno))\n            return []\n        fulllist = children[0]\n        npercol, nmore = divmod(len(fulllist), ncolumns)\n        index = 0\n        newnode = addnodes.hlist()\n        newnode['ncolumns'] = str(ncolumns)\n        for column in range(ncolumns):\n            endindex = index + (npercol + 1 if column < nmore else npercol)\n            bullet_list = nodes.bullet_list()\n            bullet_list += fulllist.children[index:endindex]\n            newnode += addnodes.hlistcol('', bullet_list)\n            index = endindex\n        return [newnode]",
    "docstring": "Directive for a list that gets compacted horizontally.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\directives\\other.py",
    "ast_data": "ClassDef name:HList Assign Assign Assign Assign FunctionDef name:run arg:self arguments arg Assign Call Assign Call If BoolOp Compare Call Call Call Call Return return:no Assign Assign Call Call Assign Assign Call Assign Call For Call Assign Compare Assign Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "reciprocal",
    "source_code": "@staticmethod\ndef reciprocal(x):\n    x = ValueRanges.wrap(x)\n    if 0 in x:\n        return ValueRanges.unknown()\n    else:\n        return ValueRanges.decreasing_map(x, lambda y: FloatTrueDiv(1.0, y))",
    "docstring": "Needed as it's used in pow, but it won't appear on a SymPy expression",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\_sympy\\value_ranges.py",
    "ast_data": "FunctionDef name:reciprocal arg:x arguments arg Assign Call If Compare Return return:yes Call Return return:yes Call arguments arg Call"
  },
  {
    "library": "pandas",
    "name": "maybe_mi_droplevels",
    "source_code": "def maybe_mi_droplevels(indexer, levels):\n    new_index = self[indexer]\n    for i in sorted(levels, reverse=True):\n        new_index = new_index._drop_level_numbers([i])\n    return new_index",
    "docstring": "If level does not exist or all levels were dropped, the exception has to be handled outside.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "FunctionDef name:maybe_mi_droplevels arg:indexer arg:levels arguments arg arg Assign For Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_get_wrapped_text",
    "source_code": "def _get_wrapped_text(self):\n    if not self.get_wrap():\n        return self.get_text()\n    if self.get_usetex():\n        return self.get_text()\n    line_width = self._get_wrap_line_width()\n    wrapped_lines = []\n    unwrapped_lines = self.get_text().split('\\n')\n    for unwrapped_line in unwrapped_lines:\n        sub_words = unwrapped_line.split(' ')\n        while len(sub_words) > 0:\n            if len(sub_words) == 1:\n                wrapped_lines.append(sub_words.pop(0))\n                continue\n            for i in range(2, len(sub_words) + 1):\n                line = ' '.join(sub_words[:i])\n                current_width = self._get_rendered_text_width(line)\n                if current_width > line_width:\n                    wrapped_lines.append(' '.join(sub_words[:i - 1]))\n                    sub_words = sub_words[i - 1:]\n                    break\n                elif i == len(sub_words):\n                    wrapped_lines.append(' '.join(sub_words[:i]))\n                    sub_words = []\n                    break\n    return '\\n'.join(wrapped_lines)",
    "docstring": "Return a copy of the text string with new lines added so that the text is wrapped relative to the parent figure (if is True).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:_get_wrapped_text arg:self arguments arg If Call Return return:yes Call If Call Return return:yes Call Assign Call Assign Assign Call Call For Assign Call While Compare Call If Compare Call Call Call For Call Call Assign Call Assign Call If Compare Call Call Assign If Compare Call Call Call Assign Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "getnnz",
    "source_code": "def getnnz(self, axis=None):\n    return self._getnnz(axis=axis)",
    "docstring": "Number of stored values, including explicit zeros. Parameters ---------- axis : None, 0, or 1 Select between the number of values across the whole array, in each column, or in each row.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_matrix.py",
    "ast_data": "FunctionDef name:getnnz arg:self arg:axis arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "sparse_read",
    "source_code": "def sparse_read(self, indices, name=None):\n    raise AttributeError",
    "docstring": "Gather slices from params axis axis according to indices. This function supports a subset of tf.gather, see tf.gather for details on usage. Args: indices: The index . Must be one of the following types: , . Must be in range . name: A name for the operation (optional). Returns: A . Has the same type as .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:sparse_read arg:self arg:indices arg:name arguments arg arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "reduce_any",
    "source_code": "@tf_export('math.reduce_any', 'reduce_any', v1=[])\n@dispatch.add_dispatch_support\ndef reduce_any(input_tensor, axis=None, keepdims=False, name=None):\n    keepdims = False if keepdims is None else bool(keepdims)\n    return _may_reduce_to_scalar(keepdims, axis, gen_math_ops._any(input_tensor, _ReductionDims(input_tensor, axis), keepdims, name=name))",
    "docstring": "Computes of elements across dimensions of a tensor. This is the reduction operation for the elementwise op. Reduces along the dimensions given in . Unless is true, the rank of the tensor is reduced by 1 for each of the entries in , which must be unique. If is true, the reduced dimensions are retained with length 1. If is None, all dimensions are reduced, and a tensor with a single element is returned. For example: >>> x = tf.constant([[True, True], [False, False]]) >>> tf.reduce_any(x) >>> tf.reduce_any(x, 0) >>> tf.reduce_any(x, 1) Args: input_tensor: The boolean tensor to reduce. axis: The dimensions to reduce. If (the default), reduces all dimensions. Must be in the range . keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). Returns: The reduced tensor. @compatibility(numpy) Equivalent to np.any @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:reduce_any arg:input_tensor arg:axis arg:keepdims arg:name arguments arg arg arg arg Assign Compare Call Return return:yes Call Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_decode_comment",
    "source_code": "def _decode_comment(self, s):\n    res = re.sub('^\\\\%( )?', '', s)\n    return res",
    "docstring": "(INTERNAL) Decodes a comment line. Comments are single line strings starting, obligatorily, with the `` character, and can have any symbol, including whitespaces or special characters. This method must receive a normalized string, i.e., a string without padding, including the \" \" characters. :param s: a normalized string. :return: a string with the decoded comment.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\externals\\_arff.py",
    "ast_data": "FunctionDef name:_decode_comment arg:self arg:s arguments arg arg Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "custom_bwd",
    "source_code": "def custom_bwd(bwd=None, *, device_type: str):\n    if not isinstance(device_type, str):\n        raise ValueError(f'Expected `device_type` of type `str`, got: `{type(device_type)}`')\n    if bwd is None:\n        return functools.partial(custom_bwd, device_type=device_type)\n\n    @functools.wraps(bwd)\n    def decorate_bwd(*args, **kwargs):\n        with autocast(device_type=device_type, enabled=args[0]._fwd_used_autocast, dtype=args[0]._dtype):\n            return bwd(*args, **kwargs)\n    return decorate_bwd",
    "docstring": "Create a helper decorator for backward methods of custom autograd functions. Autograd functions are subclasses of :class:. Ensures that `example pagetypetorch.deviceTensor.device.type`.",
    "type": "function",
    "file_path": "pytorch\\torch\\amp\\autocast_mode.py",
    "ast_data": "FunctionDef name:custom_bwd arg:bwd arguments arg arg If Call Raise Call Call If Compare Return return:yes Call FunctionDef name:decorate_bwd arguments arg arg With Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "record_event",
    "source_code": "def record_event(self, event=None):\n    if event is None:\n        event = Event()\n    event.record(self)\n    return event",
    "docstring": "Record an event. Args: event (torch.cuda.Event, optional): event to record. If not given, a new one will be allocated. Returns: Recorded event.",
    "type": "method",
    "file_path": "pytorch\\torch\\cuda\\streams.py",
    "ast_data": "FunctionDef name:record_event arg:self arg:event arguments arg arg If Compare Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "flush",
    "source_code": "def flush(self):\n    self.clear()\n    self.delete()\n    self._session_key = None",
    "docstring": "Remove the current session data from the database and regenerate the key.",
    "type": "method",
    "file_path": "django\\django\\contrib\\sessions\\backends\\base.py",
    "ast_data": "FunctionDef name:flush arg:self arguments arg Call Call Assign"
  },
  {
    "library": "django",
    "name": "process_request",
    "source_code": "def process_request(self, request):\n    user_agent = request.META.get('HTTP_USER_AGENT')\n    if user_agent is not None:\n        for user_agent_regex in settings.DISALLOWED_USER_AGENTS:\n            if user_agent_regex.search(user_agent):\n                raise PermissionDenied('Forbidden user agent')\n    host = request.get_host()\n    if settings.PREPEND_WWW and host and (not host.startswith('www.')):\n        if self.should_redirect_with_slash(request):\n            path = self.get_full_path_with_slash(request)\n        else:\n            path = request.get_full_path()\n        return self.response_redirect_class(f'{request.scheme}://www.{host}{path}')",
    "docstring": "Check for denied User-Agents and rewrite the URL based on settings.APPEND_SLASH and settings.PREPEND_WWW",
    "type": "method",
    "file_path": "django\\django\\middleware\\common.py",
    "ast_data": "FunctionDef name:process_request arg:self arg:request arguments arg arg Assign Call If Compare For If Call Raise Call Assign Call If BoolOp Call If Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_TensorArraySplitGrad",
    "source_code": "@ops.RegisterGradient('TensorArraySplit')\n@ops.RegisterGradient('TensorArraySplitV2')\n@ops.RegisterGradient('TensorArraySplitV3')\ndef _TensorArraySplitGrad(op: ops.Operation, flow):\n    handle = op.inputs[0]\n    dtype = op.get_attr('T')\n    grad_source = _GetGradSource(flow)\n    flow_out = array_ops.identity(op.outputs[0], 'flow_out')\n    with ops.control_dependencies([flow_out]):\n        flow = array_ops.identity(flow, 'write_barrier')\n    g = tensor_array_ops.TensorArray(dtype=dtype, handle=handle, flow=flow, colocate_with_first_write_call=False).grad(source=grad_source, flow=flow)\n    grad = g.concat()\n    return [None, grad, None, flow]",
    "docstring": "Gradient for TensorArraySplit. Args: op: Forward TensorArraySplit op. flow: Gradient flow to TensorArraySplit. Returns: A grad , the gradient created in upstream ReadGrads or PackGrad.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_grad.py",
    "ast_data": "FunctionDef name:_TensorArraySplitGrad arg:op arg:flow arguments arg arg Assign Assign Call Assign Call Assign Call With Call Assign Call Assign Call Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "pandas",
    "name": "render_pep440",
    "source_code": "def render_pep440(pieces):\n    if pieces['closest-tag']:\n        rendered = pieces['closest-tag']\n        if pieces['distance'] or pieces['dirty']:\n            rendered += plus_or_dot(pieces)\n            rendered += f'{pieces['distance']}.g{pieces['short']}'\n            if pieces['dirty']:\n                rendered += '.dirty'\n    else:\n        rendered = f'0+untagged.{pieces['distance']}.g{pieces['short']}'\n        if pieces['dirty']:\n            rendered += '.dirty'\n    return rendered",
    "docstring": "Build up version string, with post-release \"local version identifier\". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]",
    "type": "function",
    "file_path": "pandas\\pandas\\_version.py",
    "ast_data": "FunctionDef name:render_pep440 arg:pieces arguments arg If Assign If BoolOp Call If Assign If Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_python_apply_general",
    "source_code": "@final\ndef _python_apply_general(self, f: Callable, data: DataFrame | Series, not_indexed_same: bool | None=None, is_transform: bool=False, is_agg: bool=False) -> NDFrameT:\n    values, mutated = self._grouper.apply_groupwise(f, data)\n    if not_indexed_same is None:\n        not_indexed_same = mutated\n    return self._wrap_applied_output(data, values, not_indexed_same, is_transform)",
    "docstring": "Apply function f in python space Parameters ---------- f : callable Function to apply data : Series or DataFrame Data to apply f to not_indexed_same: bool, optional When specified, overrides the value of not_indexed_same. Apply behaves differently when the result index is equal to the input index, but this can be coincidental leading to value-dependent behavior. is_transform : bool, default False Indicator for whether the function is actually a transform and should not have group keys prepended. is_agg : bool, default False Indicator for whether the function is an aggregation. When the result is empty, we don't want to warn for this case. See _GroupBy._python_agg_general. Returns ------- Series or DataFrame data after applying f",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\groupby\\groupby.py",
    "ast_data": "FunctionDef name:_python_apply_general arg:self arg:f arg:data arg:not_indexed_same arg:is_transform arg:is_agg arguments arg arg arg arg arg arg Assign Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_random_cd",
    "source_code": "def _random_cd(best_sample: np.ndarray, n_iters: int, n_nochange: int, rng: GeneratorType, **kwargs: dict) -> np.ndarray:\n    del kwargs\n    n, d = best_sample.shape\n    if d == 0 or n == 0:\n        return np.empty((n, d))\n    if d == 1 or n == 1:\n        return best_sample\n    best_disc = discrepancy(best_sample)\n    bounds = ([0, d - 1], [0, n - 1], [0, n - 1])\n    n_nochange_ = 0\n    n_iters_ = 0\n    while n_nochange_ < n_nochange and n_iters_ < n_iters:\n        n_iters_ += 1\n        col = rng_integers(rng, *bounds[0], endpoint=True)\n        row_1 = rng_integers(rng, *bounds[1], endpoint=True)\n        row_2 = rng_integers(rng, *bounds[2], endpoint=True)\n        disc = _perturb_discrepancy(best_sample, row_1, row_2, col, best_disc)\n        if disc < best_disc:\n            best_sample[row_1, col], best_sample[row_2, col] = (best_sample[row_2, col], best_sample[row_1, col])\n            best_disc = disc\n            n_nochange_ = 0\n        else:\n            n_nochange_ += 1\n    return best_sample",
    "docstring": "Optimal LHS on CD. Create a base LHS and do random permutations of coordinates to lower the centered discrepancy. Because it starts with a normal LHS, it also works with the keyword argument. Two stopping criterion are used to stop the algorithm: at most, iterations are performed; or if there is no improvement for consecutive iterations.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_qmc.py",
    "ast_data": "FunctionDef name:_random_cd arg:best_sample arg:n_iters arg:n_nochange arg:rng arguments arg arg arg arg arg Assign If BoolOp Compare Compare Return return:yes Call If BoolOp Compare Compare Return return:yes Assign Call Assign Assign Assign While BoolOp Compare Compare Assign Call Assign Call Assign Call Assign Call If Compare Assign Assign Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_color",
    "source_code": "def set_color(self, color):\n    mcolors._check_color_like(color=color)\n    self._color = color\n    self.stale = True",
    "docstring": "Set the color of the line. Parameters ---------- color : :mpltype:",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:set_color arg:self arg:color arguments arg arg Call Assign Assign"
  },
  {
    "library": "django",
    "name": "mean",
    "source_code": "@property\ndef mean(self):\n    return self.statistics()[2]",
    "docstring": "Return the mean of all pixel values of this band.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\raster\\band.py",
    "ast_data": "FunctionDef name:mean arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "add_source_parser",
    "source_code": "def add_source_parser(self, parser: type[Parser], override: bool=False) -> None:\n    self.registry.add_source_parser(parser, override=override)",
    "docstring": "Register a parser class. :param override: If false, do not install it if another parser is already installed for the same suffix. If true, unconditionally install the parser. .. versionadded:: 1.4 .. versionchanged:: 1.8 *suffix* argument is deprecated. It only accepts *parser* argument. Use :meth: API to register suffix instead. .. versionchanged:: 1.8 Add *override* keyword.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\application.py",
    "ast_data": "FunctionDef name:add_source_parser arg:self arg:parser arg:override arguments arg arg arg Call"
  },
  {
    "library": "django",
    "name": "slugify",
    "source_code": "@keep_lazy_text\ndef slugify(value, allow_unicode=False):\n    value = str(value)\n    if allow_unicode:\n        value = unicodedata.normalize('NFKC', value)\n    else:\n        value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')\n    value = re.sub('[^\\\\w\\\\s-]', '', value.lower())\n    return re.sub('[-\\\\s]+', '-', value).strip('-_')",
    "docstring": "Convert to ASCII if 'allow_unicode' is False. Convert spaces or repeated dashes to single dashes. Remove characters that aren't alphanumerics, underscores, or hyphens. Convert to lowercase. Also strip leading and trailing whitespace, dashes, and underscores.",
    "type": "function",
    "file_path": "django\\django\\utils\\text.py",
    "ast_data": "FunctionDef name:slugify arg:value arg:allow_unicode arguments arg arg Assign Call If Assign Call Assign Call Call Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "xxd_output_to_bytes",
    "source_code": "def xxd_output_to_bytes(input_cc_file):\n    pattern = re.compile('\\\\W*(0x[0-9a-fA-F,x ]+).*')\n    model_bytearray = bytearray()\n    with open(input_cc_file) as file_handle:\n        for line in file_handle:\n            values_match = pattern.match(line)\n            if values_match is None:\n                continue\n            list_text = values_match.group(1)\n            values_text = filter(None, list_text.split(','))\n            values = [int(x, base=16) for x in values_text]\n            model_bytearray.extend(values)\n    return bytes(model_bytearray)",
    "docstring": "Converts xxd output C++ source file to bytes (immutable). Args: input_cc_file: Full path name to th C++ source file dumped by xxd Raises: RuntimeError: If input_cc_file path is invalid. IOError: If input_cc_file cannot be opened. Returns: A bytearray corresponding to the input cc file array.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\tools\\flatbuffer_utils.py",
    "ast_data": "FunctionDef name:xxd_output_to_bytes arg:input_cc_file arguments arg Assign Call Assign Call With Call For Assign Call If Compare Assign Call Assign Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "record_gradient",
    "source_code": "@tf_export('__internal__.record_gradient', v1=[])\ndef record_gradient(op_name, inputs, attrs, outputs):\n    pywrap_tfe.TFE_Py_RecordGradient(op_name, inputs, attrs, outputs, ops.get_name_scope())",
    "docstring": "Explicitly record the gradient for a given op. Args: op_name: The op name as listed in the for the op. inputs: A list of tensor inputs to the op. attrs: The op attributes as a flattened list of alternating attribute names and attribute values. outputs: A list of tensor outputs from the op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\backprop.py",
    "ast_data": "FunctionDef name:record_gradient arg:op_name arg:inputs arg:attrs arg:outputs arguments arg arg arg arg Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "record",
    "source_code": "def record(self, flat_outputs):\n    if self._tape_watching and (not isinstance(flat_outputs, ops.Operation)) and (flat_outputs is not None):\n        self._functions.record(flat_outputs, self._inference_args, self._input_tangents)",
    "docstring": "Given outputs from the execution of , records the operation.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py",
    "ast_data": "FunctionDef name:record arg:self arg:flat_outputs arguments arg arg If BoolOp Call Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "process_batch_and_step_size",
    "source_code": "def process_batch_and_step_size(strategy, inputs, batch_size, steps_per_epoch, mode, validation_split=0.0):\n    first_x_value = nest.flatten(inputs)[0]\n    if isinstance(first_x_value, np.ndarray):\n        num_samples = first_x_value.shape[0]\n        if validation_split and 0.0 < validation_split < 1.0:\n            num_samples = int(num_samples * (1 - validation_split))\n        steps_per_epoch, batch_size = get_input_params(strategy, num_samples, steps_per_epoch, batch_size, mode=mode)\n    return (batch_size, steps_per_epoch)",
    "docstring": "Process the batch size and step size based on input and dist strategy.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_training_utils_v1.py",
    "ast_data": "FunctionDef name:process_batch_and_step_size arg:strategy arg:inputs arg:batch_size arg:steps_per_epoch arg:mode arg:validation_split arguments arg arg arg arg arg arg Assign Call If Call Assign If BoolOp Compare Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_create_pseudo_names",
    "source_code": "def _create_pseudo_names(tensors, prefix):\n\n    def one_index(ele):\n        if isinstance(ele, int):\n            return ele + 1\n        return ele\n    flat_paths = list(nest.yield_flat_paths(tensors))\n    flat_paths = nest.map_structure(one_index, flat_paths)\n    names = []\n    for path in flat_paths:\n        if not path:\n            name = prefix + '1'\n        else:\n            name = '_'.join((str(p) for p in path))\n            if isinstance(path[0], int):\n                name = prefix + name\n        names.append(name)\n    return names",
    "docstring": "Creates pseudo {input | output} names for subclassed Models. Warning: this function should only be used to define default names for and . No other use cases should rely on a 's input or output names. Example with dict: becomes: Example with list: becomes: Args: tensors: 's outputs or inputs. prefix: 'output_' for outputs, 'input_' for inputs. Returns: Flattened list of pseudo names.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\compile_utils.py",
    "ast_data": "FunctionDef name:_create_pseudo_names arg:tensors arg:prefix arguments arg arg FunctionDef name:one_index arg:ele arguments arg If Call Return return:yes Return return:yes Assign Call Call Assign Call Assign For If Assign Assign Call Call If Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "error_log",
    "source_code": "def error_log(error_msg, level=ERROR):\n    del error_msg, level",
    "docstring": "Empty helper method.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\platform\\tf_logging.py",
    "ast_data": "FunctionDef name:error_log arg:error_msg arg:level arguments arg arg"
  },
  {
    "library": "matplotlib",
    "name": "set_positions",
    "source_code": "def set_positions(self, posA, posB):\n    if posA is not None:\n        self._posA_posB[0] = posA\n    if posB is not None:\n        self._posA_posB[1] = posB\n    self.stale = True",
    "docstring": "Set the start and end positions of the connecting path. Parameters ---------- posA, posB : None, tuple (x, y) coordinates of arrow tail and arrow head respectively. If use current value.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:set_positions arg:self arg:posA arg:posB arguments arg arg arg If Compare Assign If Compare Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "AUCSummationMethod",
    "source_code": "class AUCSummationMethod(Enum):\n    INTERPOLATION = 'interpolation'\n    MAJORING = 'majoring'\n    MINORING = 'minoring'\n\n    @staticmethod\n    def from_str(key):\n        if key in ('interpolation', 'Interpolation'):\n            return AUCSummationMethod.INTERPOLATION\n        elif key in ('majoring', 'Majoring'):\n            return AUCSummationMethod.MAJORING\n        elif key in ('minoring', 'Minoring'):\n            return AUCSummationMethod.MINORING\n        else:\n            raise ValueError('Invalid AUC summation method value \"%s\".' % key)",
    "docstring": "Type of AUC summation method. Contains the following values: * 'interpolation': Applies mid-point summation scheme for curve. For curve, interpolates (true/false) positives but not the ratio that is precision (see Davis & Goadrich 2006 for details). * 'minoring': Applies left summation for increasing intervals and right summation for decreasing intervals. * 'majoring': Applies right summation for increasing intervals and left summation for decreasing intervals.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\metrics_utils.py",
    "ast_data": "ClassDef name:AUCSummationMethod Assign Assign Assign FunctionDef name:from_str arg:key arguments arg If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "has_function",
    "source_code": "def has_function(self, name):\n    self.ensure_initialized()\n    return bool(pywrap_tfe.TFE_ContextHasFunction(self._handle, name))",
    "docstring": "Check if a function is registered.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:has_function arg:self arg:name arguments arg arg Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_handle_to_forward_prefetch",
    "source_code": "def get_handle_to_forward_prefetch(self, current_handle: FlatParamHandle) -> Optional[FlatParamHandle]:\n    current_index = current_handle._pre_forward_order_index\n    if current_index is None:\n        return None\n    target_index = current_index + 1\n    target_handle: Optional[FlatParamHandle] = None\n    for _ in range(self._forward_prefetch_limit):\n        if target_index >= len(self.handles_pre_forward_order):\n            break\n        target_handle = self.handles_pre_forward_order[target_index]\n        target_index += 1\n    return target_handle",
    "docstring": "Returns a :class: of the handles keys of the handles to forward prefetch given the current handles key. If there are no valid handles keys to prefetch, then this returns an empty :class:.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_exec_order_utils.py",
    "ast_data": "FunctionDef name:get_handle_to_forward_prefetch arg:self arg:current_handle arguments arg arg Assign If Compare Return return:no Assign For Call If Compare Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, meshes: List[layout_lib.Mesh], is_async=True, in_flight_nodes_limit=8):\n    if any((not isinstance(mesh, layout_lib.Mesh) for mesh in meshes)):\n        raise TypeError('Expected a flat list of Mesh objects, got {}'.format(meshes))\n    global _next_device_number\n    ctx = context.context()\n    with _next_device_number_lock:\n        self.name = '{}/device:CUSTOM:{}'.format(ctx.host_address_space(), _next_device_number)\n        _next_device_number += 1\n    device, device_info = _pywrap_dtensor_device.Allocate(self.name, is_async, in_flight_nodes_limit)\n    context.register_custom_device(device, self.name, device_info)\n    self._device_info = device_info\n    self._current_output_layout = None\n    self._current_default_mesh = None\n    self._meshes = set()\n    self._mesh_lock = threading.Lock()\n    for mesh in meshes:\n        self._register_mesh(mesh)",
    "docstring": "Create a new DTensorDevice which executes ops on . Args: meshes: A list of objects indicating groups of devices to execute on. These may also be registered lazily. is_async: Indicates whether DTensor operations on this client will return immediately (with \"non-ready\" handles) or block until executed. This is on by default and is exposed as an option for ease of debugging. in_flight_nodes_limit: Indicates the limit of in-flight nodes before enqueueing of async operations to DTensorDevice is blocked. This limit is per mesh. 0 for no limits from DTensor. Default is 8.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\dtensor_device.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:meshes arg:is_async arg:in_flight_nodes_limit arguments arg arg arg arg If Call Call Raise Call Call Assign Call With Assign Call Call Assign Call Call Assign Assign Assign Assign Call Assign Call For Call"
  },
  {
    "library": "pytorch",
    "name": "move_param_to_callee",
    "source_code": "def move_param_to_callee(root, callee_name, param_fqn):\n    atoms = param_fqn.split('.')\n    mod_itr, param_val = _recursive_getattr_with_parent(split, param_fqn)\n    is_buffer = atoms[-1] in mod_itr._buffers\n    assert isinstance(param_val, torch.Tensor), f\"Expected '{param_fqn}' to be {torch.Tensor} but got {type(param_val)}.\" + (f\" It might happen if module '{param_fqn}' was passed to some 'leaf function'(see https://pytorch.org/docs/stable/fx.html#fx.wrap). Please inspect usages of '{param_fqn}' in the traced graph.\" if isinstance(param_val, torch.nn.Module) else '')\n    callee = root.get_submodule(callee_name)\n    assert not hasattr(callee, param_fqn), f'Module {callee_name} already has a parameter named {param_fqn}'\n    if is_buffer:\n        _assign_attr(param_val, callee, param_fqn, attr_kind=_AttrKind.BUFFER, persistent=True)\n    else:\n        _assign_attr(param_val, callee, param_fqn, attr_kind=_AttrKind.PARAMETER)\n    logger.debug(f'Moved parameter {param_fqn} to {callee_name}')\n    to_delete.append((mod_itr, atoms[-1]))",
    "docstring": "Move a parameter from the root module to a submodule. Args: root: The root module. callee_name: The name of the submodule to move the parameter to. param_fqn: The fully qualified name of the parameter to move.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\_IR.py",
    "ast_data": "FunctionDef name:move_param_to_callee arg:root arg:callee_name arg:param_fqn arguments arg arg arg Assign Call Assign Call Assign Compare Call Call Call Assign Call Call If Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_as_graph_element",
    "source_code": "def _as_graph_element(self):\n    values = self.values\n    while isinstance(values, RaggedTensor):\n        values = values.values\n    return values",
    "docstring": "Convert to a graph element.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:_as_graph_element arg:self arguments arg Assign While Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "shared_resources",
    "source_code": "def shared_resources():\n    return ops.get_collection(ops.GraphKeys.RESOURCES)",
    "docstring": "Returns resources visible to all tasks in the cluster.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resources.py",
    "ast_data": "FunctionDef name:shared_resources arguments Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "SpatialGradient3d",
    "source_code": "class SpatialGradient3d(Module):\n    ONNX_DEFAULT_INPUTSHAPE: ClassVar[list[int]] = [-1, -1, -1, -1, -1]\n    ONNX_DEFAULT_OUTPUTSHAPE: ClassVar[list[int]] = [-1, -1, -1, -1, -1, -1]\n\n    def __init__(self, mode: str='diff', order: int=1) -> None:\n        super().__init__()\n        self.order: int = order\n        self.mode: str = mode\n        self.kernel = get_spatial_gradient_kernel3d(mode, order)\n\n    def __repr__(self) -> str:\n        return f'{self.__class__.__name__}(order={self.order}, mode={self.mode})'\n\n    def forward(self, input: Tensor) -> Tensor:\n        return spatial_gradient3d(input, self.mode, self.order)",
    "docstring": "Compute the first and second order volume derivative in x, y and d using a diff operator. Args: mode: derivatives modality, can be: or . order: the order of the derivatives. Return: the spatial gradients of the input feature map. Shape: - Input: :math:. D, H, W are spatial dimensions, gradient is calculated w.r.t to them. - Output: :math: or :math: Examples: >>> input = torch.rand(1, 4, 2, 4, 4) >>> output = SpatialGradient3d()(input) >>> output.shape torch.Size([1, 4, 3, 2, 4, 4])",
    "type": "class",
    "file_path": "kornia\\kornia\\filters\\sobel.py",
    "ast_data": "ClassDef name:SpatialGradient3d FunctionDef name:__init__ arg:self arg:mode arg:order arguments arg arg arg Call Call Assign Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_Stack",
    "source_code": "class _Stack(Constraint):\n\n    def __init__(self, cseq, dim=0):\n        assert all((isinstance(c, Constraint) for c in cseq))\n        self.cseq = list(cseq)\n        self.dim = dim\n        super().__init__()\n\n    @property\n    def is_discrete(self) -> bool:\n        return any((c.is_discrete for c in self.cseq))\n\n    @property\n    def event_dim(self) -> int:\n        dim = max((c.event_dim for c in self.cseq))\n        if self.dim + dim < 0:\n            dim += 1\n        return dim\n\n    def check(self, value):\n        assert -value.dim() <= self.dim < value.dim()\n        vs = [value.select(self.dim, i) for i in range(value.size(self.dim))]\n        return torch.stack([constr.check(v) for v, constr in zip(vs, self.cseq)], self.dim)",
    "docstring": "Constraint functor that applies a sequence of constraints at the submatrices at dimension , in a way compatible with :func:.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributions\\constraints.py",
    "ast_data": "ClassDef name:_Stack FunctionDef name:__init__ arg:self arg:cseq arg:dim arguments arg arg arg Call Call Assign Call Assign Call Call FunctionDef name:is_discrete arg:self arguments arg Return return:yes Call FunctionDef name:event_dim arg:self arguments arg Assign Call If Compare Return return:yes FunctionDef name:check arg:self arg:value arguments arg arg Compare Call Call Assign Call Call Call Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "url_has_allowed_host_and_scheme",
    "source_code": "def url_has_allowed_host_and_scheme(url, allowed_hosts, require_https=False):\n    if url is not None:\n        url = url.strip()\n    if not url:\n        return False\n    if allowed_hosts is None:\n        allowed_hosts = set()\n    elif isinstance(allowed_hosts, str):\n        allowed_hosts = {allowed_hosts}\n    return _url_has_allowed_host_and_scheme(url, allowed_hosts, require_https=require_https) and _url_has_allowed_host_and_scheme(url.replace('\\\\', '/'), allowed_hosts, require_https=require_https)",
    "docstring": "Return ``. Note: \"True\" doesn't entail that a URL is \"safe\". It may still be e.g. quoted incorrectly. Ensure to also use django.utils.encoding.iri_to_uri() on the path component of untrusted URLs.",
    "type": "function",
    "file_path": "django\\django\\utils\\http.py",
    "ast_data": "FunctionDef name:url_has_allowed_host_and_scheme arg:url arg:allowed_hosts arg:require_https arguments arg arg arg If Compare Assign Call If Return return:yes If Compare Assign Call If Call Assign Return return:yes BoolOp Call Call Call"
  },
  {
    "library": "cherrypy",
    "name": "__iter__",
    "source_code": "def __iter__(self):\n    return self",
    "docstring": "Return iterator.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\__init__.py",
    "ast_data": "FunctionDef name:__iter__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "CacheArtifact",
    "source_code": "@dataclasses.dataclass(frozen=True)\nclass CacheArtifact(ABC):\n    key: str\n    content: bytes = dataclasses.field(repr=False)\n\n    @staticmethod\n    def serialize(writer: BytesWriter, cls: 'CacheArtifact') -> None:\n        writer.write_str(cls.key)\n        writer.write_bytes(cls.content)\n\n    @staticmethod\n    def deserialize(artifact_type: str, reader: BytesReader) -> 'CacheArtifact':\n        key = reader.read_str()\n        content = reader.read_bytes()\n        return CacheArtifactFactory.create(artifact_type, key, content)\n\n    @staticmethod\n    def encode(content: Any) -> bytes:\n        assert isinstance(content, bytes), f'Expected bytes, got {type(content)}'\n        return content\n\n    @abstractmethod\n    def populate_cache(self) -> None:\n        pass\n\n    @staticmethod\n    def type() -> str:\n        raise RuntimeError('CacheArtifact is an abstract class, please use a subclass')",
    "docstring": "Data for each cache artifact that will be serialized and deserialized",
    "type": "class",
    "file_path": "pytorch\\torch\\compiler\\_cache.py",
    "ast_data": "ClassDef name:CacheArtifact Call FunctionDef name:serialize arg:writer arg:cls arguments arg arg Call Call FunctionDef name:deserialize arg:artifact_type arg:reader arguments arg arg Assign Call Assign Call Return return:yes Call FunctionDef name:encode arg:content arguments arg Call Call Return return:yes FunctionDef name:populate_cache arg:self arguments arg FunctionDef name:type arguments Raise Call Call"
  },
  {
    "library": "scipy",
    "name": "confidence_interval",
    "source_code": "def confidence_interval(self, confidence_level=0.95, method=None):\n    if isinstance(method, BootstrapMethod):\n        xp = array_namespace(self._x)\n        message = '`method` must be `None` if `pearsonr` arguments were not NumPy arrays.'\n        if not is_numpy(xp):\n            raise ValueError(message)\n        ci = _pearsonr_bootstrap_ci(confidence_level, method, self._x, self._y, self._alternative, self._axis)\n    elif method is None:\n        ci = _pearsonr_fisher_ci(self.statistic, self._n, confidence_level, self._alternative)\n    else:\n        message = '`method` must be an instance of `BootstrapMethod` or None.'\n        raise ValueError(message)\n    return ci",
    "docstring": "The confidence interval for the correlation coefficient. Compute the confidence interval for the correlation coefficient `methodmethodBootstrapMethodscipy.stats.bootstraplowhigh`. References ---------- .. [1] \"Pearson correlation coefficient\", Wikipedia,",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_stats_py.py",
    "ast_data": "FunctionDef name:confidence_interval arg:self arg:confidence_level arg:method arguments arg arg arg If Call Assign Call Assign If Call Raise Call Assign Call If Compare Assign Call Assign Raise Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "_apply_transform_unimplemented",
    "source_code": "def _apply_transform_unimplemented(self: Module, *input: Any) -> Tensor:\n    raise NotImplementedError(f'Module [{type(self).__name__}] is missing the required \"apply_tranform\" function')",
    "docstring": "Define the computation performed at every call. Should be overridden by all subclasses.",
    "type": "function",
    "file_path": "kornia\\kornia\\augmentation\\base.py",
    "ast_data": "FunctionDef name:_apply_transform_unimplemented arg:self arguments arg arg Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "DSaveSpec",
    "source_code": "class DSaveSpec(saveable_object.SaveSpec):\n\n    def __init__(self, tensor, slice_spec, name, global_shape, layout, dtype=None, device=None):\n        super().__init__(tensor=tensor, slice_spec=slice_spec, name=name, dtype=dtype, device=device)\n        self.global_shape = global_shape\n        self.layout = layout",
    "docstring": "DTensor SaveSpec that additionaly captures global_shape and layout.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\d_variable.py",
    "ast_data": "ClassDef name:DSaveSpec FunctionDef name:__init__ arg:self arg:tensor arg:slice_spec arg:name arg:global_shape arg:layout arg:dtype arg:device arguments arg arg arg arg arg arg arg arg Call Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "take_while",
    "source_code": "@deprecation.deprecated(None, 'Use `tf.data.Dataset.take_while(...)')\n@tf_export('data.experimental.take_while')\ndef take_while(predicate):\n\n    def _apply_fn(dataset):\n        return dataset.take_while(predicate=predicate)\n    return _apply_fn",
    "docstring": "A transformation that stops dataset iteration based on a . Args: predicate: A function that maps a nested structure of tensors (having shapes and types defined by and ) to a scalar tensor. Returns: A transformation function, which can be passed to .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\take_while_ops.py",
    "ast_data": "FunctionDef name:take_while arg:predicate arguments arg FunctionDef name:_apply_fn arg:dataset arguments arg Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "tuple",
    "source_code": "@property\ndef tuple(self):\n    return self._cs.tuple",
    "docstring": "Return a tuple of the point.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\point.py",
    "ast_data": "FunctionDef name:tuple arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_objects_with_attributes",
    "source_code": "def _objects_with_attributes(full_list):\n    return [o for o in full_list if saveable_object_util.saveable_objects_from_trackable(o)]",
    "docstring": "Filters out objects with no direct variable dependencies for assertions.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:_objects_with_attributes arg:full_list arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "mjci",
    "source_code": "def mjci(data, prob=(0.25, 0.5, 0.75), axis=None):\n\n    def _mjci_1D(data, p):\n        data = np.sort(data.compressed())\n        n = data.size\n        prob = (np.array(p) * n + 0.5).astype(int)\n        betacdf = beta.cdf\n        mj = np.empty(len(prob), float64)\n        x = np.arange(1, n + 1, dtype=float64) / n\n        y = x - 1.0 / n\n        for i, m in enumerate(prob):\n            W = betacdf(x, m - 1, n - m) - betacdf(y, m - 1, n - m)\n            C1 = np.dot(W, data)\n            C2 = np.dot(W, data ** 2)\n            mj[i] = np.sqrt(C2 - C1 ** 2)\n        return mj\n    data = ma.array(data, copy=False)\n    if data.ndim > 2:\n        raise ValueError(f\"Array 'data' must be at most two dimensional, but got data.ndim = {data.ndim}\")\n    p = np.atleast_1d(np.asarray(prob))\n    if axis is None:\n        return _mjci_1D(data, p)\n    else:\n        return ma.apply_along_axis(_mjci_1D, axis, data, p)",
    "docstring": "Returns the Maritz-Jarrett estimators of the standard error of selected experimental quantiles of the data. Parameters ---------- data : ndarray Data array. prob : sequence, optional Sequence of quantiles to compute. axis : int or None, optional Axis along which to compute the quantiles. If None, use a flattened array.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mstats_extras.py",
    "ast_data": "FunctionDef name:mjci arg:data arg:prob arg:axis arguments arg arg arg FunctionDef name:_mjci_1D arg:data arg:p arguments arg arg Assign Call Call Assign Assign Call Call Assign Assign Call Call Assign Call Assign For Call Assign Call Call Assign Call Assign Call Assign Call Return return:yes Assign Call If Compare Raise Call Assign Call Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "to_complex64",
    "source_code": "@tf_export(v1=['to_complex64'])\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\n@deprecation.deprecated(date=None, instructions='Use `tf.cast` instead.')\ndef to_complex64(x, name='ToComplex64'):\n    return cast(x, dtypes.complex64, name=name)",
    "docstring": "Casts a tensor to type . Args: x: A or or . name: A name for the operation (optional). Returns: A or or with same shape as with type . Raises: TypeError: If cannot be cast to the . @compatibility(TF2) This name was deprecated and removed in TF2, but has an exact replacement . There are no further issues with eager execution or tf.function. Before: >>> tf.compat.v1.to_complex64(tf.constant(1. + 2.j, dtype=tf.complex128)) After: >>> tf.cast(tf.constant(1. + 2.j, dtype=tf.complex128), tf.complex64) @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:to_complex64 arg:x arg:name arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_next",
    "source_code": "def get_next(self, device, name=None):\n    del device, name\n    with ops.device(self._worker):\n        return self._fn()",
    "docstring": "Get next element for the given device from the callable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\input_lib.py",
    "ast_data": "FunctionDef name:get_next arg:self arg:device arg:name arguments arg arg arg With Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "upsample",
    "source_code": "def upsample(input, size=None, scale_factor=None, mode='nearest', align_corners=None):\n    warnings.warn('nn.quantized.functional.upsample is deprecated. Use nn.quantized.functional.interpolate instead.')\n    return interpolate(input, size, scale_factor, mode, align_corners)",
    "docstring": "Upsamples the input to either the given :attr: or the given :attr: .. warning:: This function is deprecated in favor of :func:. This is equivalent with `torch.nn.functional.interpolatemini-batch x channels x [optional depth] x [optional height] x widthbilinearnearestscale_factormodebilinear~torch.nn.Upsample` for concrete examples on how this affects the outputs.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\functional.py",
    "ast_data": "FunctionDef name:upsample arg:input arg:size arg:scale_factor arg:mode arg:align_corners arguments arg arg arg arg arg Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "reset_metrics",
    "source_code": "def reset_metrics(self):\n    metrics = self._get_training_eval_metrics()\n    for m in metrics:\n        m.reset_state()\n    if self._distribution_strategy:\n        distributed_training_utils_v1._reset_metrics(self)",
    "docstring": "Resets the state of metrics.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py",
    "ast_data": "FunctionDef name:reset_metrics arg:self arguments arg Assign Call For Call If Call"
  },
  {
    "library": "pandas",
    "name": "get_period_alias",
    "source_code": "def get_period_alias(offset_str: str) -> str | None:\n    return OFFSET_TO_PERIOD_FREQSTR.get(offset_str, None)",
    "docstring": "Alias to closest period strings BQ->Q etc.",
    "type": "function",
    "file_path": "pandas\\pandas\\tseries\\frequencies.py",
    "ast_data": "FunctionDef name:get_period_alias arg:offset_str arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "FunctionType",
    "source_code": "@tf_export('types.experimental.FunctionType')\nclass FunctionType(inspect.Signature, metaclass=abc.ABCMeta):\n\n    @classmethod\n    def from_callable(cls, obj, *, follow_wrapped=True):\n        return super().from_callable(obj, follow_wrapped=follow_wrapped)",
    "docstring": "Represents the type of a TensorFlow callable. FunctionType inherits from inspect.Signature which canonically represents the structure (and optionally type) information of input parameters and output of a Python function. Additionally, it integrates with the tf.function type system () to provide a holistic representation of the the I/O contract of the callable. It is used for: - Canonicalization and type-checking of Python input arguments - Type-based dispatch to concrete functions - Packing/unpacking structured python values to Tensors - Generation of structured placeholder values for tracing",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\types\\core.py",
    "ast_data": "ClassDef name:FunctionType FunctionDef name:from_callable arg:cls arg:obj arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_check_feature_names_in",
    "source_code": "def _check_feature_names_in(estimator, input_features=None, *, generate_names=True):\n    feature_names_in_ = getattr(estimator, 'feature_names_in_', None)\n    n_features_in_ = getattr(estimator, 'n_features_in_', None)\n    if input_features is not None:\n        input_features = np.asarray(input_features, dtype=object)\n        if feature_names_in_ is not None and (not np.array_equal(feature_names_in_, input_features)):\n            raise ValueError('input_features is not equal to feature_names_in_')\n        if n_features_in_ is not None and len(input_features) != n_features_in_:\n            raise ValueError(f'input_features should have length equal to number of features ({n_features_in_}), got {len(input_features)}')\n        return input_features\n    if feature_names_in_ is not None:\n        return feature_names_in_\n    if not generate_names:\n        return\n    if n_features_in_ is None:\n        raise ValueError('Unable to generate feature names without n_features_in_')\n    return np.asarray([f'x{i}' for i in range(n_features_in_)], dtype=object)",
    "docstring": "Check and generate names if needed. Commonly used in :term:. Parameters ---------- input_features : array-like of str or None, default=None Input features. - If is , then is used as feature names in. If is not defined, then the following input feature names are generated: . - If is an array-like, then must match if is defined. generate_names : bool, default=True Whether to generate names when is and is not defined. This is useful for transformers that validates but do not require them in :term: e.g. . Returns ------- feature_names_in : ndarray of str or Feature names in.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\validation.py",
    "ast_data": "FunctionDef name:_check_feature_names_in arg:estimator arg:input_features arguments arg arg arg Assign Call Assign Call If Compare Assign Call If BoolOp Compare Call Raise Call If BoolOp Compare Compare Call Raise Call Call Return return:yes If Compare Return return:yes If Return return:no If Compare Raise Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "conv_output_shape",
    "source_code": "def conv_output_shape(input_shape, kernel_shape, strides, padding):\n    dims = range(len(kernel_shape))\n    output_shape = [conv_output_length(input_shape[d], kernel_shape[d], padding, strides[d]) for d in dims]\n    output_shape = tuple([0 if input_shape[d] == 0 else output_shape[d] for d in dims])\n    return output_shape",
    "docstring": "Return the output shape of an N-D convolution. Forces dimensions where input is empty (size 0) to remain empty. Args: input_shape: tuple of size N: , spatial shape of the input. kernel_shape: tuple of size N, spatial shape of the convolutional kernel / receptive field. strides: tuple of size N, strides along each spatial dimension. padding: type of padding, string or . means no padding. results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. Returns: tuple of size N: , spatial shape of the output.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\conv_utils.py",
    "ast_data": "FunctionDef name:conv_output_shape arg:input_shape arg:kernel_shape arg:strides arg:padding arguments arg arg arg arg Assign Call Call Assign Call Assign Call Compare Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_maybe_convert_scalar_types_to_dtypes",
    "source_code": "def _maybe_convert_scalar_types_to_dtypes(scalar_types: list[Any]) -> list[Optional[torch.dtype]]:\n    _SCALAR_TYPE_TO_DTYPE = {0: torch.uint8, 1: torch.int8, 2: torch.short, 3: torch.int, 4: torch.int64, 5: torch.half, 6: torch.float, 7: torch.double, 8: torch.complex32, 9: torch.complex64, 10: torch.complex128, 11: torch.bool, 12: torch.qint8, 13: torch.quint8, 14: torch.qint32, 15: torch.bfloat16, 16: torch.float8_e5m2, 17: torch.float8_e4m3fn, 18: torch.float8_e5m2fnuz, 19: torch.float8_e4m3fnuz}\n    if any((not isinstance(x, (type(None), int)) for x in scalar_types)):\n        return scalar_types\n    dtypes: list[Optional[torch.dtype]] = []\n    for scalar_type in scalar_types:\n        if scalar_type is None:\n            dtypes.append(scalar_type)\n        elif scalar_type not in _SCALAR_TYPE_TO_DTYPE:\n            raise ValueError('Unrecognized scalar type {scalar_type}')\n        else:\n            dtypes.append(_SCALAR_TYPE_TO_DTYPE[scalar_type])\n    return dtypes",
    "docstring": "When a list of s is passed through the dispatcher as , it is converted to a list of scalar type enum values. This function converts it back to a list of s.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_symmetric_memory\\__init__.py",
    "ast_data": "FunctionDef name:_maybe_convert_scalar_types_to_dtypes arg:scalar_types arguments arg Assign If Call Call Call Return return:yes For If Compare Call If Compare Raise Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "render_pep440_post",
    "source_code": "def render_pep440_post(pieces):\n    if pieces['closest-tag']:\n        rendered = pieces['closest-tag']\n        if pieces['distance'] or pieces['dirty']:\n            rendered += f'.post{pieces['distance']}'\n            if pieces['dirty']:\n                rendered += '.dev0'\n            rendered += plus_or_dot(pieces)\n            rendered += f'g{pieces['short']}'\n    else:\n        rendered = f'0.post{pieces['distance']}'\n        if pieces['dirty']:\n            rendered += '.dev0'\n        rendered += f'+g{pieces['short']}'\n    return rendered",
    "docstring": "TAG[.postDISTANCE[.dev0]+gHEX] . The \".dev0\" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear \"older\" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0]",
    "type": "function",
    "file_path": "pandas\\pandas\\_version.py",
    "ast_data": "FunctionDef name:render_pep440_post arg:pieces arguments arg If Assign If BoolOp If Call Assign If Return return:yes"
  },
  {
    "library": "django",
    "name": "from_bbox",
    "source_code": "@classmethod\ndef from_bbox(cls, bbox):\n    x0, y0, x1, y1 = bbox\n    return OGRGeometry('POLYGON((%s %s, %s %s, %s %s, %s %s, %s %s))' % (x0, y0, x0, y1, x1, y1, x1, y0, x0, y0))",
    "docstring": "Construct a Polygon from a bounding box (4-tuple).",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:from_bbox arg:cls arg:bbox arguments arg arg Assign Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "SvcDoRun",
    "source_code": "def SvcDoRun(self):\n    from cherrypy import process\n    process.bus.start()\n    process.bus.block()",
    "docstring": "Start the service.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\win32.py",
    "ast_data": "FunctionDef name:SvcDoRun arg:self arguments arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "auto_shard_dataset",
    "source_code": "def auto_shard_dataset(dataset, num_shards, index, num_replicas_in_sync=None):\n    if isinstance(dataset, distribute_types.DistributedDatasetInterface):\n        return dataset.auto_shard(num_shards, index)\n    if dataset.options().experimental_distribute.auto_shard_policy != AutoShardPolicy.OFF:\n        if num_replicas_in_sync is None:\n            num_replicas_in_sync = 1\n        if isinstance(dataset, data_types.DatasetV1):\n            return distribute._AutoShardDatasetV1(dataset, num_shards, index, num_replicas_in_sync)\n        else:\n            return distribute._AutoShardDataset(dataset, num_shards, index, num_replicas_in_sync)\n    else:\n        return dataset",
    "docstring": "Shard the input pipeline by sharding the underlying list of files. Args: dataset: A instance, typically the result of a bunch of dataset transformations. num_shards: A scalar , representing the number of shards operating in parallel. Same usage as in . index: A scalar , representing the worker index. Same usage as in . num_replicas_in_sync: An integer representing the total number of replicas across all workers. This is used in the rewrite when sharding by data. Returns: A modified obtained by updating the pipeline sharded by the files. The input dataset will be returned if we cannot automatically determine a good way to shard the input dataset.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_ops.py",
    "ast_data": "FunctionDef name:auto_shard_dataset arg:dataset arg:num_shards arg:index arg:num_replicas_in_sync arguments arg arg arg arg If Call Return return:yes Call If Compare Call If Compare Assign If Call Return return:yes Call Return return:yes Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_pre_draw",
    "source_code": "def _pre_draw(self, framedata, blit):\n    if blit:\n        self._blit_clear(self._drawn_artists)\n    else:\n        for artist in self._drawn_artists:\n            artist.set_visible(False)",
    "docstring": "Clears artists from the last frame.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\animation.py",
    "ast_data": "FunctionDef name:_pre_draw arg:self arg:framedata arg:blit arguments arg arg arg If Call For Call"
  },
  {
    "library": "django",
    "name": "_assign_simple_slice",
    "source_code": "def _assign_simple_slice(self, start, stop, valueList):\n    origLen = len(self)\n    stop = max(start, stop)\n    newLen = origLen - stop + start + len(valueList)\n\n    def newItems():\n        for i in range(origLen + 1):\n            if i == start:\n                yield from valueList\n            if i < origLen:\n                if i < start or i >= stop:\n                    yield self._get_single_internal(i)\n    self._rebuild(newLen, newItems())",
    "docstring": "Assign a simple slice; Can assign slice of any length",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\mutable_list.py",
    "ast_data": "FunctionDef name:_assign_simple_slice arg:self arg:start arg:stop arg:valueList arguments arg arg arg arg Assign Call Assign Call Assign Call FunctionDef name:newItems arguments For Call If Compare If Compare If BoolOp Compare Compare Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "add_html_math_renderer",
    "source_code": "def add_html_math_renderer(self, name: str, inline_renderers: _MathsInlineRenderers | None=None, block_renderers: _MathsBlockRenderers | None=None) -> None:\n    self.registry.add_html_math_renderer(name, inline_renderers, block_renderers)",
    "docstring": "Register a math renderer for HTML. The *name* is a name of math renderer. Both *inline_renderers* and *block_renderers* are used as visitor functions for the HTML writer: the former for inline math node (`add_node` for details. .. versionadded:: 1.8",
    "type": "method",
    "file_path": "sphinx\\sphinx\\application.py",
    "ast_data": "FunctionDef name:add_html_math_renderer arg:self arg:name arg:inline_renderers arg:block_renderers arguments arg arg arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "_conv_add_relu_extra_inputs_getter_right",
    "source_code": "def _conv_add_relu_extra_inputs_getter_right(pattern):\n    _relu, add_pattern = pattern\n    _, extra_input, _conv = add_pattern\n    return [extra_input]",
    "docstring": "get inputs pattern for extra inputs, inputs for root node are assumed to be copied over from root node to the fused node",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\onednn.py",
    "ast_data": "FunctionDef name:_conv_add_relu_extra_inputs_getter_right arg:pattern arguments arg Assign Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "GroupByNthSelector",
    "source_code": "class GroupByNthSelector:\n\n    def __init__(self, groupby_object: groupby.GroupBy) -> None:\n        self.groupby_object = groupby_object\n\n    def __call__(self, n: PositionalIndexer | tuple, dropna: Literal['any', 'all', None]=None) -> DataFrame | Series:\n        return self.groupby_object._nth(n, dropna)\n\n    def __getitem__(self, n: PositionalIndexer | tuple) -> DataFrame | Series:\n        return self.groupby_object._nth(n)",
    "docstring": "Dynamically substituted for GroupBy.nth to enable both call and index",
    "type": "class",
    "file_path": "pandas\\pandas\\core\\groupby\\indexing.py",
    "ast_data": "ClassDef name:GroupByNthSelector FunctionDef name:__init__ arg:self arg:groupby_object arguments arg arg Assign FunctionDef name:__call__ arg:self arg:n arg:dropna arguments arg arg arg Return return:yes Call FunctionDef name:__getitem__ arg:self arg:n arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "compile_package",
    "source_code": "@indent_msg\ndef compile_package(self, path: Path, top_package_path: Path):\n    assert path.is_dir()\n    if path.name in DENY_LIST:\n        self.msg(path, 'X')\n        return\n    is_package_dir = any((child.name == '__init__.py' for child in path.iterdir()))\n    if not is_package_dir:\n        self.msg(path, 'S')\n        return\n    self.msg(path, 'P')\n    for child in path.iterdir():\n        self.compile_path(child, top_package_path)",
    "docstring": "Compile all the files within a Python package dir.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\_freeze.py",
    "ast_data": "FunctionDef name:compile_package arg:self arg:path arg:top_package_path arguments arg arg arg Call If Compare Call Return return:no Assign Call Compare Call If Call Return return:no Call For Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_SparseSegmentMeanGrad",
    "source_code": "@ops.RegisterGradient('SparseSegmentMean')\ndef _SparseSegmentMeanGrad(op: ops.Operation, grad):\n    if _GetOpAttrOrNone(op, 'sparse_gradient'):\n        return (_SparseSegmentReduceGradV2(op, grad, 'mean'), None, None)\n    dim0 = array_ops.shape(op.inputs[0])[0]\n    return (math_ops.sparse_segment_mean_grad(grad, op.inputs[1], op.inputs[2], dim0), None, None)",
    "docstring": "Gradient for SparseSegmentMean.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_SparseSegmentMeanGrad arg:op arg:grad arguments arg arg If Call Return return:yes Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_to_backend_index",
    "source_code": "def _to_backend_index(self) -> BackendIndex:\n    index: dict[OperatorName, BackendMetadata] = {}\n    for op in self.index:\n        kernel_dict = self.index[op]\n        assert len(kernel_dict.values()) == 1, f\"Can't convert ETKernelIndex to BackendIndex because {op} has more than one kernels. Got {kernel_dict}\"\n        index[op] = kernel_dict.get(ETKernelKey(default=True), BackendMetadata(kernel='', structured=False, cpp_namespace=''))\n    return BackendIndex(dispatch_key=DispatchKey.CPU, use_out_as_primary=False, device_guard=False, external=False, index=index)",
    "docstring": "WARNING: this will be deprecated once all the codegen places know how to handle ETKernelIndex.",
    "type": "method",
    "file_path": "pytorch\\torchgen\\executorch\\model.py",
    "ast_data": "FunctionDef name:_to_backend_index arg:self arguments arg For Assign Compare Call Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_filter_header",
    "source_code": "def _filter_header(s):\n    import tokenize\n    from io import StringIO\n    tokens = []\n    last_token_was_number = False\n    for token in tokenize.generate_tokens(StringIO(s).readline):\n        token_type = token[0]\n        token_string = token[1]\n        if last_token_was_number and token_type == tokenize.NAME and (token_string == 'L'):\n            continue\n        else:\n            tokens.append(token)\n        last_token_was_number = token_type == tokenize.NUMBER\n    return tokenize.untokenize(tokens)",
    "docstring": "Clean up 'L' in npz header ints. Cleans up the 'L' in strings representing integers. Needed to allow npz headers produced in Python2 to be read in Python3. Parameters ---------- s : string Npy file header. Returns ------- header : str Cleaned up header.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_format_impl.py",
    "ast_data": "FunctionDef name:_filter_header arg:s arguments arg Assign Assign For Call Call Assign Assign If BoolOp Compare Compare Call Assign Compare Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "inverse_transform",
    "source_code": "def inverse_transform(self, X):\n    if self.validate:\n        X = check_array(X, accept_sparse=self.accept_sparse)\n    return self._transform(X, func=self.inverse_func, kw_args=self.inv_kw_args)",
    "docstring": "Transform X using the inverse function. Parameters ---------- X : {array-like, sparse-matrix} of shape (n_samples, n_features) if else any object that can handle Input array. Returns ------- X_original : array-like, shape (n_samples, n_features) Transformed input.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_function_transformer.py",
    "ast_data": "FunctionDef name:inverse_transform arg:self arg:X arguments arg arg If Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "decorated",
    "source_code": "def decorated(*args, **kwds):\n    dy = kwds.pop('dy', None)\n    if kwds:\n        raise ValueError('Functions to be differentiated cannot receive keyword arguments.')\n    val, vjp = make_vjp(f, params)(*args, **kwds)\n    return (val, vjp(dy=dy))",
    "docstring": "Computes the value and gradient of the decorated function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\backprop.py",
    "ast_data": "FunctionDef name:decorated arguments arg arg Assign Call If Raise Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "full",
    "source_code": "@finalize_array_function_like\n@set_module('numpy')\ndef full(shape, fill_value, dtype=None, order='C', *, device=None, like=None):\n    if like is not None:\n        return _full_with_like(like, shape, fill_value, dtype=dtype, order=order, device=device)\n    if dtype is None:\n        fill_value = asarray(fill_value)\n        dtype = fill_value.dtype\n    a = empty(shape, dtype, order, device=device)\n    multiarray.copyto(a, fill_value, casting='unsafe')\n    return a",
    "docstring": "Return a new array of given shape and type, filled with . Parameters ---------- shape : int or sequence of ints Shape of the new array, e.g., `fill_value` with the given shape, dtype, and order. See Also -------- full_like : Return a new array with shape of input filled with value. empty : Return a new uninitialized array. ones : Return a new array setting values to one. zeros : Return a new array setting values to zero. Examples -------- >>> import numpy as np >>> np.full((2, 2), np.inf) array([[inf, inf], [inf, inf]]) >>> np.full((2, 2), 10) array([[10, 10], [10, 10]]) >>> np.full((2, 2), [1, 2]) array([[1, 2], [1, 2]])",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\numeric.py",
    "ast_data": "FunctionDef name:full arg:shape arg:fill_value arg:dtype arg:order arguments arg arg arg arg arg arg If Compare Return return:yes Call If Compare Assign Call Assign Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "RegionWrapper",
    "source_code": "class RegionWrapper:\n\n    def __init__(self, region: Region, node_to_recursive_ancestors: dict[Node, set[Node]]) -> None:\n        assert len(region) == 1, 'all regions should start with one node'\n        node = region[0]\n        self.node_to_recursive_ancestors = node_to_recursive_ancestors\n        self.iter = BackwardBfsArgIter.create(node)\n        self.nodes_unique = OrderedSet([node])\n        self.ancestors = set(node_to_recursive_ancestors[node])\n        self.region = region\n\n    def next_candidate(self) -> Optional[Node]:\n        return self.iter.next()\n\n    def will_inclusion_create_cycle(self, node: Node) -> bool:\n        external_users = [user for user in node.users if user not in self.nodes_unique]\n        for user in external_users:\n            if user in self.ancestors:\n                return True\n        return False\n\n    def add(self, node: Node) -> None:\n        self.nodes_unique.add(node)\n        self.region.append(node)\n        self.iter.add_children(node)\n        self.ancestors.update(self.node_to_recursive_ancestors[node])",
    "docstring": "Holds state for regions e.g. ancestors and new candidate nodes for consideration",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\graph_region_tracker.py",
    "ast_data": "ClassDef name:RegionWrapper FunctionDef name:__init__ arg:self arg:region arg:node_to_recursive_ancestors arguments arg arg arg Compare Call Assign Assign Assign Call Assign Call Assign Call Assign FunctionDef name:next_candidate arg:self arguments arg Return return:yes Call FunctionDef name:will_inclusion_create_cycle arg:self arg:node arguments arg arg Assign Compare For If Compare Return return:yes Return return:yes FunctionDef name:add arg:self arg:node arguments arg arg Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "find_args_with_derivatives",
    "source_code": "def find_args_with_derivatives(differentiable_inputs: list[DifferentiableInput]) -> list[DifferentiableInput]:\n    if info is None or not info.has_derivatives:\n        return differentiable_inputs\n    names = {name for d in info.derivatives for name in d.var_names}\n    differentiable = [arg for arg in differentiable_inputs if arg.name in names]\n    if len(differentiable) != len(names):\n        missing = names - {arg.name for arg in differentiable}\n        raise RuntimeError(f'Missing arguments for derivatives: {missing} in {info.name}')\n    return differentiable",
    "docstring": "Find arguments that have derivative definitions",
    "type": "function",
    "file_path": "pytorch\\tools\\autograd\\gen_variable_type.py",
    "ast_data": "FunctionDef name:find_args_with_derivatives arg:differentiable_inputs arguments arg If BoolOp Compare Return return:yes Assign Assign Compare If Compare Call Call Assign Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_run_policy",
    "source_code": "@abstractmethod\ndef _run_policy(self, root_module: nn.Module, ignored_modules: set[nn.Module], root_kwargs: dict[str, Any]) -> dict[nn.Module, dict[str, Any]]:\n    ...",
    "docstring": "This should return a dict `` that maps from each target module to wrap to its kwargs.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\wrap.py",
    "ast_data": "FunctionDef name:_run_policy arg:self arg:root_module arg:ignored_modules arg:root_kwargs arguments arg arg arg arg"
  },
  {
    "library": "tensorflow",
    "name": "get_device_policy",
    "source_code": "@tf_export('config.experimental.get_device_policy')\ndef get_device_policy():\n    device_policy = context.context().device_policy\n    if device_policy == context.DEVICE_PLACEMENT_SILENT:\n        return 'silent'\n    elif device_policy == context.DEVICE_PLACEMENT_SILENT_FOR_INT32:\n        return 'silent_for_int32'\n    elif device_policy == context.DEVICE_PLACEMENT_WARN:\n        return 'warn'\n    elif device_policy == context.DEVICE_PLACEMENT_EXPLICIT:\n        return 'explicit'\n    else:\n        raise errors.InternalError(f'Got an invalid device policy: {device_policy!r}.')",
    "docstring": "Gets the current device policy. The device policy controls how operations requiring inputs on a specific device (e.g., on GPU:0) handle inputs on a different device (e.g. GPU:1). This function only gets the device policy for the current thread. Any subsequently started thread will again use the default policy. Returns: Current thread device policy",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\config.py",
    "ast_data": "FunctionDef name:get_device_policy arguments Assign Call If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Raise Call Call"
  },
  {
    "library": "django",
    "name": "static",
    "source_code": "def static(prefix, view=serve, **kwargs):\n    if not prefix:\n        raise ImproperlyConfigured('Empty static prefix not permitted')\n    elif not settings.DEBUG or urlsplit(prefix).netloc:\n        return []\n    return [re_path('^%s(?P<path>.*)$' % re.escape(prefix.lstrip('/')), view, kwargs=kwargs)]",
    "docstring": "Return a URL pattern for serving files in debug mode. from django.conf import settings from django.conf.urls.static import static urlpatterns = [ # ... the rest of your URLconf goes here ... ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)",
    "type": "function",
    "file_path": "django\\django\\conf\\urls\\static.py",
    "ast_data": "FunctionDef name:static arg:prefix arg:view arguments arg arg arg If Raise Call If BoolOp Call Return return:no Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "add_contrib_direct_import_support",
    "source_code": "def add_contrib_direct_import_support(symbol_dict):\n    for symbol_name in list(symbol_dict.keys()):\n        symbol_alias = symbol_name.replace('tf.contrib.', 'contrib_')\n        symbol_dict[symbol_alias] = symbol_dict[symbol_name]",
    "docstring": "Add support for alias Updates dict in place.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\all_renames_v2.py",
    "ast_data": "FunctionDef name:add_contrib_direct_import_support arg:symbol_dict arguments arg For Call Call Assign Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "all_gather_with_padding",
    "source_code": "def all_gather_with_padding(input_tensor: core.TensorLike, options: Optional[collective_util.Options]) -> core.Tensor:\n    max_length = math_ops.reduce_max(all_lengths)\n    padded_tensor = _pad_util(input_tensor, max_length)\n    all_padded_tensors = self._all_gather(padded_tensor, options)\n    split_tensors = []\n    for i in range(self._group_size):\n        start_pos = i * max_length\n        split_tensors.append(all_padded_tensors[start_pos:start_pos + all_lengths[i]])\n    return array_ops.concat(split_tensors, 0)",
    "docstring": "all_gather tensors of different sizes using padding.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_utils.py",
    "ast_data": "FunctionDef name:all_gather_with_padding arg:input_tensor arg:options arguments arg arg Assign Call Assign Call Assign Call Assign For Call Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_check_precisions",
    "source_code": "def _check_precisions(precisions, covariance_type, n_components, n_features):\n    precisions = check_array(precisions, dtype=[np.float64, np.float32], ensure_2d=False, allow_nd=covariance_type == 'full')\n    precisions_shape = {'full': (n_components, n_features, n_features), 'tied': (n_features, n_features), 'diag': (n_components, n_features), 'spherical': (n_components,)}\n    _check_shape(precisions, precisions_shape[covariance_type], '%s precision' % covariance_type)\n    _check_precisions = {'full': _check_precisions_full, 'tied': _check_precision_matrix, 'diag': _check_precision_positivity, 'spherical': _check_precision_positivity}\n    _check_precisions[covariance_type](precisions, covariance_type)\n    return precisions",
    "docstring": "Validate user provided precisions. Parameters ---------- precisions : array-like 'full' : shape of (n_components, n_features, n_features) 'tied' : shape of (n_features, n_features) 'diag' : shape of (n_components, n_features) 'spherical' : shape of (n_components,) covariance_type : str n_components : int Number of components. n_features : int Number of features. Returns ------- precisions : array",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\mixture\\_gaussian_mixture.py",
    "ast_data": "FunctionDef name:_check_precisions arg:precisions arg:covariance_type arg:n_components arg:n_features arguments arg arg arg arg Assign Call Compare Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_outer_bbox",
    "source_code": "def get_outer_bbox(self, rows=0, cols=0):\n    rows = np.atleast_1d(rows)\n    cols = np.atleast_1d(cols)\n    bbox = Bbox.from_extents(self.lefts[cols[0]].value(), self.bottoms[rows[-1]].value(), self.rights[cols[-1]].value(), self.tops[rows[0]].value())\n    return bbox",
    "docstring": "Return the outer bounding box of the subplot specs given by rows and cols. rows and cols can be spans.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_layoutgrid.py",
    "ast_data": "FunctionDef name:get_outer_bbox arg:self arg:rows arg:cols arguments arg arg arg Assign Call Assign Call Assign Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_descendants_with_paths",
    "source_code": "def _descendants_with_paths(self):\n    all_nodes_with_paths = {}\n    to_visit = collections.deque([0])\n    all_nodes_with_paths[0] = 'root'\n    path = all_nodes_with_paths.get(0)\n    while to_visit:\n        node_id = to_visit.popleft()\n        obj = self._object_graph_proto.nodes[node_id]\n        for child in obj.children:\n            if child.node_id == 0 or child.node_id in all_nodes_with_paths.keys():\n                continue\n            path = all_nodes_with_paths.get(node_id)\n            if child.node_id not in all_nodes_with_paths.keys():\n                to_visit.append(child.node_id)\n            all_nodes_with_paths[child.node_id] = path + '.' + child.local_name\n    return all_nodes_with_paths",
    "docstring": "Returns a dict of descendants by node_id and paths to node. The names returned by this private method are subject to change.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint_view.py",
    "ast_data": "FunctionDef name:_descendants_with_paths arg:self arguments arg Assign Assign Call Assign Assign Call While Assign Call Assign For If BoolOp Compare Compare Call Assign Call If Compare Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "scatter_nd_add",
    "source_code": "def scatter_nd_add(self, indices, updates, name=None):\n    return self._lazy_read(gen_state_ops.resource_scatter_nd_add(self.handle, indices, ops.convert_to_tensor(updates, self.dtype), name=name))",
    "docstring": "Applies sparse addition to individual values or slices in a Variable. is a with rank and is a of rank . must be integer tensor, containing indices into . It must be shape where . The innermost dimension of (with length ) corresponds to indices into elements (if ) or slices (if ) along the th dimension of . is of rank with shape: For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 elements. In Python, that update would look like this: The resulting update to ref would look like this: [1, 13, 3, 14, 14, 6, 7, 20] See for more details about how to make updates to slices. Args: indices: The indices to be used in the operation. updates: The values to be used in the operation. name: the name of the operation. Returns: The updated variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:scatter_nd_add arg:self arg:indices arg:updates arg:name arguments arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "is_cudagraph_unsafe_op",
    "source_code": "def is_cudagraph_unsafe_op(node: Operation) -> bool:\n    from . import ir\n    if not isinstance(node, ir.FallbackKernel):\n        return False\n    if isinstance(node.op_overload, torch._ops.OpOverload) and torch._C.Tag.cudagraph_unsafe in node.op_overload.tags:\n        return True\n    return False",
    "docstring": "Returns True if the node is an op that is not cudagraphable. Usually only custom ops have this tag.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\utils.py",
    "ast_data": "FunctionDef name:is_cudagraph_unsafe_op arg:node arguments arg If Call Return return:yes If BoolOp Call Compare Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "finalize",
    "source_code": "def finalize(self):\n    cmds = []\n    while self.parent is not None:\n        cmds.extend(self.pop())\n    return cmds",
    "docstring": "Make sure every pushed graphics state is popped.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py",
    "ast_data": "FunctionDef name:finalize arg:self arguments arg Assign While Compare Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "reconstructed_non_debug_partition_graphs",
    "source_code": "def reconstructed_non_debug_partition_graphs(self):\n    non_debug_graphs = {}\n    for key in self._debug_graphs:\n        non_debug_graphs[key] = self._debug_graphs[key].non_debug_graph_def\n    return non_debug_graphs",
    "docstring": "Reconstruct partition graphs with the debugger-inserted ops stripped. The reconstructed partition graphs are identical to the original (i.e., non-debugger-decorated) partition graphs except in the following respects: 1) The exact names of the runtime-inserted internal nodes may differ. These include _Send, _Recv, _HostSend, _HostRecv, _Retval ops. 2) As a consequence of 1, the nodes that receive input directly from such send- and recv-type ops will have different input names. 3) The parallel_iteration attribute of while-loop Enter ops are set to 1. Returns: A dict mapping device names (s) to reconstructed s.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:reconstructed_non_debug_partition_graphs arg:self arguments arg Assign For Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_train_with_multi_worker",
    "source_code": "def _train_with_multi_worker(method):\n\n    def wrapper(model, **kwargs):\n\n        def _worker_fn(_):\n            callbacks = kwargs.pop('callbacks', None)\n            filtered_callbacks = dist_utils.filter_distributed_callbacks(callbacks, model)\n            kwargs['callbacks'] = filtered_callbacks\n            return method(model, **kwargs)\n        return dc.run_distribute_coordinator(_worker_fn, model._distribution_strategy)\n    return wrapper",
    "docstring": "Decorator that handles multi worker training with distribution strategy.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_distributed_v1.py",
    "ast_data": "FunctionDef name:_train_with_multi_worker arg:method arguments arg FunctionDef name:wrapper arg:model arguments arg arg FunctionDef name:_worker_fn arg:_ arguments arg Assign Call Assign Call Assign Return return:yes Call Return return:yes Call Return return:yes"
  },
  {
    "library": "django",
    "name": "target_field",
    "source_code": "@property\ndef target_field(self):\n    target_fields = self.path_infos[-1].target_fields\n    if len(target_fields) > 1:\n        raise exceptions.FieldError(\"Can't use target_field for multicolumn relations.\")\n    return target_fields[0]",
    "docstring": "When filtering against this relation, return the field on the remote model against which the filtering should happen.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\reverse_related.py",
    "ast_data": "FunctionDef name:target_field arg:self arguments arg Assign If Compare Call Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_replace_mode",
    "source_code": "def _replace_mode(parent, old_value):\n    new_value = pasta.parse('mode.lower()')\n    mode = new_value.body[0].value.func\n    pasta.ast_utils.replace_child(mode, mode.value, old_value)\n    pasta.ast_utils.replace_child(parent, old_value, new_value)\n    pasta.base.formatting.set(old_value, 'prefix', '(')\n    pasta.base.formatting.set(old_value, 'suffix', ')')",
    "docstring": "Replaces old_value with (old_value).lower().",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\tf_upgrade_v2.py",
    "ast_data": "FunctionDef name:_replace_mode arg:parent arg:old_value arguments arg arg Assign Call Assign Call Call Call Call"
  },
  {
    "library": "kornia",
    "name": "KORNIA_CHECK_IS_COLOR",
    "source_code": "def KORNIA_CHECK_IS_COLOR(x: Tensor, msg: Optional[str]=None, raises: bool=True) -> bool:\n    if len(x.shape) < 3 or x.shape[-3] != 3:\n        if raises:\n            raise TypeError(f'Not a color tensor. Got: {type(x)}.\\n{msg}')\n        return False\n    return True",
    "docstring": "Check whether an image tensor is a color images. Args: x: image tensor to evaluate. msg: message to show in the exception. raises: bool indicating whether an exception should be raised upon failure. Raises: TypeException: if all the input tensor has not a shape :math: and raises is True. Example: >>> img = torch.rand(2, 3, 4, 4) >>> KORNIA_CHECK_IS_COLOR(img, \"Image is not color\") True",
    "type": "function",
    "file_path": "kornia\\kornia\\core\\check.py",
    "ast_data": "FunctionDef name:KORNIA_CHECK_IS_COLOR arg:x arg:msg arg:raises arguments arg arg arg If BoolOp Compare Call Compare If Raise Call Call Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "_alter_many_to_many",
    "source_code": "def _alter_many_to_many(self, model, old_field, new_field, strict):\n    if old_field.remote_field.through._meta.db_table == new_field.remote_field.through._meta.db_table:\n        self._remake_table(old_field.remote_field.through, alter_fields=[(old_field.remote_field.through._meta.get_field(old_field.m2m_reverse_field_name()), new_field.remote_field.through._meta.get_field(new_field.m2m_reverse_field_name())), (old_field.remote_field.through._meta.get_field(old_field.m2m_field_name()), new_field.remote_field.through._meta.get_field(new_field.m2m_field_name()))])\n        return\n    self.create_model(new_field.remote_field.through)\n    self.execute('INSERT INTO %s (%s) SELECT %s FROM %s' % (self.quote_name(new_field.remote_field.through._meta.db_table), ', '.join(['id', new_field.m2m_column_name(), new_field.m2m_reverse_name()]), ', '.join(['id', old_field.m2m_column_name(), old_field.m2m_reverse_name()]), self.quote_name(old_field.remote_field.through._meta.db_table)))\n    self.delete_model(old_field.remote_field.through)",
    "docstring": "Alter M2Ms to repoint their to= endpoints.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\sqlite3\\schema.py",
    "ast_data": "FunctionDef name:_alter_many_to_many arg:self arg:model arg:old_field arg:new_field arg:strict arguments arg arg arg arg arg If Compare Call Call Call Call Call Call Call Call Call Return return:no Call Call Call Call Call Call Call Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_datacopied",
    "source_code": "def _datacopied(arr, original):\n    if arr is original:\n        return False\n    if not isinstance(original, np.ndarray) and hasattr(original, '__array__'):\n        return False\n    return arr.base is None",
    "docstring": "Strict check for not sharing any data with , under the assumption that arr = asarray(original)",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_misc.py",
    "ast_data": "FunctionDef name:_datacopied arg:arr arg:original arguments arg arg If Compare Return return:yes If BoolOp Call Call Return return:yes Return return:yes Compare"
  },
  {
    "library": "pandas",
    "name": "size",
    "source_code": "@final\ndef size(self) -> Series:\n    ids = self.ids\n    ngroups = self.ngroups\n    out: np.ndarray | list\n    if ngroups:\n        out = np.bincount(ids[ids != -1], minlength=ngroups)\n    else:\n        out = []\n    return Series(out, index=self.result_index, dtype='int64', copy=False)",
    "docstring": "Compute group sizes.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\groupby\\ops.py",
    "ast_data": "FunctionDef name:size arg:self arguments arg Assign Assign If Assign Call Compare Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "stop_filter",
    "source_code": "def stop_filter(self, filter_func):\n    pass",
    "docstring": "Switch back to the original renderer. The contents of the temporary renderer is processed with the *filter_func* and is drawn on the original renderer as an image. Currently only supported by the agg renderer.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:stop_filter arg:self arg:filter_func arguments arg arg"
  },
  {
    "library": "tensorflow",
    "name": "_get_dependent_variables",
    "source_code": "def _get_dependent_variables(input_ops, output_ops):\n    output_ops = nest.map_structure(gen_array_ops.identity, output_ops)\n    inbetween_ops = op_selector.get_backward_walk_ops(seed_ops=output_ops, stop_at_ts=input_ops, inclusive=False, only_differentiable=True)\n    var_ops = (op for op in inbetween_ops if op.type in VAR_OP_TYPES)\n    var_names = (op.name for op in var_ops)\n    tf_vars = (get_variable_by_name(var_name) for var_name in var_names)\n    tf_vars = [v for v in tf_vars if v is not None]\n    return tf_vars",
    "docstring": "Finds variables involved in the subgraph between input_ops and output_ops. Args: input_ops: Flattened list of input ops output_ops: Flattened list of output ops Returns: A list of variables",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\custom_gradient.py",
    "ast_data": "FunctionDef name:_get_dependent_variables arg:input_ops arg:output_ops arguments arg arg Assign Call Assign Call Assign Compare Assign Assign Call Assign Compare Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "_visit_arg_with_default",
    "source_code": "def _visit_arg_with_default(self, arg: ast.arg, default: ast.AST | None) -> str:\n    name = self.visit(arg)\n    if default:\n        if arg.annotation:\n            name += ' = %s' % self.visit(default)\n        else:\n            name += '=%s' % self.visit(default)\n    return name",
    "docstring": "Unparse a single argument to a string.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\pycode\\ast.py",
    "ast_data": "FunctionDef name:_visit_arg_with_default arg:self arg:arg arg:default arguments arg arg arg Assign Call If If Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "vec_to_tril_matrix",
    "source_code": "def vec_to_tril_matrix(vec: Tensor, diag: int=0) -> Tensor:\n    n = (-(1 + 2 * diag) + ((1 + 2 * diag) ** 2 + 8 * vec.shape[-1] + 4 * abs(diag) * (diag + 1)) ** 0.5) / 2\n    eps = torch.finfo(vec.dtype).eps\n    if not torch._C._get_tracing_state() and round(n) - n > eps:\n        raise ValueError(f'The size of last dimension is {vec.shape[-1]} which cannot be expressed as ' + 'the lower triangular part of a square D x D matrix.')\n    n = round(n.item()) if isinstance(n, torch.Tensor) else round(n)\n    mat = vec.new_zeros(vec.shape[:-1] + torch.Size((n, n)))\n    arange = torch.arange(n, device=vec.device)\n    tril_mask = arange < arange.view(-1, 1) + (diag + 1)\n    mat[..., tril_mask] = vec\n    return mat",
    "docstring": "Convert a vector or a batch of vectors into a batched lower triangular matrix containing elements from the vector in row order.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributions\\utils.py",
    "ast_data": "FunctionDef name:vec_to_tril_matrix arg:vec arg:diag arguments arg arg Assign Call Assign Call If BoolOp Call Compare Call Raise Call Assign Call Call Call Call Assign Call Call Assign Call Assign Compare Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__enter__",
    "source_code": "def __enter__(self) -> str:\n    ctx = context.context()\n    if ctx.executing_eagerly():\n        old_name = ctx.scope_name\n        name = self._name\n        if not name:\n            scope_name = ''\n        elif name[-1] == '/':\n            scope_name = name\n        elif old_name:\n            scope_name = old_name + name + '/'\n        else:\n            scope_name = name + '/'\n        ctx.scope_name = scope_name\n\n        def _restore_name_scope(*_):\n            ctx.scope_name = old_name\n        self._exit_fns.append(_restore_name_scope)\n    else:\n        scope = get_default_graph().name_scope(self._name)\n        scope_name = scope.__enter__()\n        self._exit_fns.append(scope.__exit__)\n    return scope_name",
    "docstring": "Start the scope block. Returns: The scope name.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:__enter__ arg:self arguments arg Assign Call If Call Assign Assign If Assign If Compare Assign If Assign Assign Assign FunctionDef name:_restore_name_scope arguments arg Assign Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "fill",
    "source_code": "def fill(self, *args):\n    if len(args):\n        _fillcolor = args[0]\n    else:\n        _fillcolor = self._fillcolor\n    return self._hatch or (_fillcolor is not None and (len(_fillcolor) <= 3 or _fillcolor[3] != 0.0))",
    "docstring": "Predicate: does the path need to be filled? An optional argument can be used to specify an alternative _fillcolor, as needed by RendererPdf.draw_markers.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py",
    "ast_data": "FunctionDef name:fill arg:self arguments arg arg If Call Assign Assign Return return:yes BoolOp BoolOp Compare BoolOp Compare Call Compare"
  },
  {
    "library": "pytorch",
    "name": "add_event",
    "source_code": "def add_event(self, event, step=None, walltime=None):\n    event.wall_time = time.time() if walltime is None else walltime\n    if step is not None:\n        event.step = int(step)\n    self.event_writer.add_event(event)",
    "docstring": "Add an event to the event file. Args: event: An protocol buffer. step: Number. Optional global step value for training process to record with the event. walltime: float. Optional walltime to override the default (current) walltime (from time.time()) seconds after epoch",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\tensorboard\\writer.py",
    "ast_data": "FunctionDef name:add_event arg:self arg:event arg:step arg:walltime arguments arg arg arg arg Assign Compare Call If Compare Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "write",
    "source_code": "@abc.abstractmethod\ndef write(self, file_prefix: str) -> str:\n    pass",
    "docstring": "Serializes proto to disk. Args: file_prefix: string prefix of the filepath. Returns: The actual path the proto is written to.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\tools\\proto_splitter\\split.py",
    "ast_data": "FunctionDef name:write arg:self arg:file_prefix arguments arg arg"
  },
  {
    "library": "tensorflow",
    "name": "validate_run_function",
    "source_code": "def validate_run_function(fn):\n    if context.executing_eagerly() and (not isinstance(fn, def_function.Function)) and (not isinstance(fn, function.ConcreteFunction)) and (not (callable(fn) and isinstance(fn.__call__, def_function.Function))):\n        raise NotImplementedError('TPUStrategy.run(fn, ...) does not support pure eager execution. please make sure the function passed into `strategy.run` is a `tf.function` or `strategy.run` is called inside a `tf.function` if eager behavior is enabled.')",
    "docstring": "Validate the function passed into strategy.run.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_strategy.py",
    "ast_data": "FunctionDef name:validate_run_function arg:fn arguments arg If BoolOp Call Call Call BoolOp Call Call Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "get_y",
    "source_code": "def get_y(self):\n    return self._y0",
    "docstring": "Return the bottom coordinate of the rectangle.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_y arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "def fit(self, X, y=None, sample_weight=None):\n    self.fit_transform(X, y, sample_weight=sample_weight)\n    return self",
    "docstring": "Fit estimator. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Use `` for maximum efficiency. y : Ignored Not used, present for API consistency by convention. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If None, then samples are equally weighted. Splits that would create child nodes with net zero or negative weight are ignored while searching for a split in each node. In the case of classification, splits are also ignored if they would result in any single class carrying a negative weight in either child node. Returns ------- self : object Returns the instance itself.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_forest.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_m_step",
    "source_code": "def _m_step(self, X, log_resp):\n    n_samples, _ = X.shape\n    nk, xk, sk = _estimate_gaussian_parameters(X, np.exp(log_resp), self.reg_covar, self.covariance_type)\n    self._estimate_weights(nk)\n    self._estimate_means(nk, xk)\n    self._estimate_precisions(nk, xk, sk)",
    "docstring": "M step. Parameters ---------- X : array-like of shape (n_samples, n_features) log_resp : array-like of shape (n_samples, n_components) Logarithm of the posterior probabilities (or responsibilities) of the point of each sample in X.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\mixture\\_bayesian_mixture.py",
    "ast_data": "FunctionDef name:_m_step arg:self arg:X arg:log_resp arguments arg arg arg Assign Assign Call Call Call Call Call"
  },
  {
    "library": "django",
    "name": "is_internal_request",
    "source_code": "def is_internal_request(self, domain, referer):\n    return bool(re.match('^https?://%s/' % re.escape(domain), referer))",
    "docstring": "Return True if the referring URL is the same domain as the current request.",
    "type": "method",
    "file_path": "django\\django\\middleware\\common.py",
    "ast_data": "FunctionDef name:is_internal_request arg:self arg:domain arg:referer arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "keystr",
    "source_code": "def keystr(kp: KeyPath) -> str:\n    return ''.join([str(k) for k in kp])",
    "docstring": "Given a key path, return a pretty-printed representation.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_pytree.py",
    "ast_data": "FunctionDef name:keystr arg:kp arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_align",
    "source_code": "def _align(nbytes: int) -> int:\n    return nbytes + ALIGN_BYTES - 1 & -ALIGN_BYTES",
    "docstring": "Round up to the nearest multiple of ALIGN_BYTES",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\utils.py",
    "ast_data": "FunctionDef name:_align arg:nbytes arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "TypePromotionRule",
    "source_code": "class TypePromotionRule(abc.ABC):\n\n    def __init__(self, namespace: str, op_name: str):\n        self.namespace = namespace\n        self.op_name = op_name\n\n    @abc.abstractmethod\n    def __hash__(self) -> int:\n        ...\n\n    @abc.abstractmethod\n    def __repr__(self):\n        ...\n\n    @abc.abstractmethod\n    def __eq__(self, other: object) -> bool:\n        ...\n\n    def is_valid(self) -> bool:\n        module = getattr(torch.ops, self.namespace)\n        py_op = getattr(module, self.op_name, None)\n        if py_op is None:\n            logger.warning('Cannot find op: %s in module: %s', self.op_name, self.namespace)\n            return False\n        if not isinstance(py_op, torch._ops.OpOverloadPacket):\n            logger.warning('Op: torch.ops.%s.%s is not an OpOverloadPacket, got: %s', self.namespace, self.op_name, type(py_op))\n            return False\n        return True\n\n    @abc.abstractmethod\n    def preview_type_promotion(self, args: tuple, kwargs: dict) -> TypePromotionSnapshot:\n        ...",
    "docstring": "Base class for type promotion rule per 'torch.ops.{namespace}.{op_name}'.",
    "type": "class",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\type_promotion.py",
    "ast_data": "ClassDef name:TypePromotionRule FunctionDef name:__init__ arg:self arg:namespace arg:op_name arguments arg arg arg Assign Assign FunctionDef name:__hash__ arg:self arguments arg FunctionDef name:__repr__ arg:self arguments arg FunctionDef name:__eq__ arg:self arg:other arguments arg arg FunctionDef name:is_valid arg:self arguments arg Assign Call Assign Call If Compare Call Return return:yes If Call Call Call Return return:yes Return return:yes FunctionDef name:preview_type_promotion arg:self arg:args arg:kwargs arguments arg arg arg"
  },
  {
    "library": "pandas",
    "name": "array",
    "source_code": "@final\n@property\ndef array(self) -> ArrayLike:\n    return self.blocks[0].values",
    "docstring": "Quick access to the backing array of the Block.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:array arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "create_unspecified_symint_and_symbol",
    "source_code": "@record_shapeenv_event()\ndef create_unspecified_symint_and_symbol(self, value: int, source: Source, dynamic_dim: DimDynamic) -> IntLikeType:\n    return self.create_symintnode(self.create_unspecified_symbol(value, source=source, dynamic_dim=dynamic_dim), hint=value, source=source)",
    "docstring": "Create a SymInt wrapping a new unspecified symbol",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:create_unspecified_symint_and_symbol arg:self arg:value arg:source arg:dynamic_dim arguments arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_dequeue_return_value",
    "source_code": "def _dequeue_return_value(self, tensors):\n    if self._names:\n        return {n: tensors[i] for i, n in enumerate(self._names)}\n    elif len(tensors) == 1:\n        return tensors[0]\n    else:\n        return tensors",
    "docstring": "Return the value to return from a dequeue op. If the queue has names, return a dictionary with the names as keys. Otherwise return either a single tensor or a list of tensors depending on the length of . Args: tensors: List of tensors from the dequeue op. Returns: A single tensor, a list of tensors, or a dictionary of tensors.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:_dequeue_return_value arg:self arg:tensors arguments arg arg If Return return:yes Call If Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__reduce_package__",
    "source_code": "def __reduce_package__(self, exporter: PackageExporter):\n    python_code = self._real_recompile()\n    dict_without_graph = self.__dict__.copy()\n    dict_without_graph['_graphmodule_cls_name'] = self.__class__.__name__\n    del dict_without_graph['_graph']\n    generated_module_name = f'fx-generated._{exporter.get_unique_id()}'\n    import_block = _format_import_block(python_code.globals, exporter.importer)\n    module_code = import_block + self.code\n    exporter.save_source_string(generated_module_name, module_code)\n    return (reduce_package_graph_module, (dict_without_graph, generated_module_name))",
    "docstring": "Follow GraphModule.__reduce__ but call 'self._real_recompile' rather than 'self.recompile' since for a _LazyGraphModule, self.recompile just mark the need of recompilation and does not return the PythonCode object.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\_lazy_graph_module.py",
    "ast_data": "FunctionDef name:__reduce_package__ arg:self arg:exporter arguments arg arg Assign Call Assign Call Assign Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "site_packages",
    "source_code": "def site_packages(self, python: Path | str | None=None) -> Path:\n    output = self.python('-c', 'import site; [print(p) for p in site.getsitepackages()]', python=python, capture_output=True).stdout\n    candidates = list(map(Path, filter(None, map(str.strip, output.splitlines()))))\n    candidates = [p for p in candidates if p.is_dir() and p.name == 'site-packages']\n    if not candidates:\n        raise RuntimeError(f'No site-packages directory found for executable {python}')\n    return candidates[0]",
    "docstring": "Get the site-packages directory for the virtual environment.",
    "type": "method",
    "file_path": "pytorch\\tools\\nightly.py",
    "ast_data": "FunctionDef name:site_packages arg:self arg:python arguments arg arg Assign Call Assign Call Call Call Call Call Assign BoolOp Call Compare If Raise Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_logexpxmexpy",
    "source_code": "def _logexpxmexpy(x, y):\n    i = np.isneginf(np.real(y))\n    if np.any(i):\n        y = np.asarray(y.copy())\n        y[i] = np.finfo(y.dtype).min\n    x, y = np.broadcast_arrays(x, y)\n    res = np.asarray(special.logsumexp([x, y + np.pi * 1j], axis=0))\n    i = x == y\n    res[i] = -np.inf\n    return res",
    "docstring": "Compute the log of the difference of the exponentials of two arguments. Avoids over/underflow, but does not prevent loss of precision otherwise.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_distribution_infrastructure.py",
    "ast_data": "FunctionDef name:_logexpxmexpy arg:x arg:y arguments arg arg Assign Call Call If Call Assign Call Call Assign Call Assign Call Assign Call Call Assign Compare Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "group",
    "source_code": "class group(metaclass=_WorldMeta):\n    pass",
    "docstring": "Group class. Placeholder.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "ClassDef name:group"
  },
  {
    "library": "scikit-learn",
    "name": "check_clusterings",
    "source_code": "def check_clusterings(labels_true, labels_pred):\n    labels_true = check_array(labels_true, ensure_2d=False, ensure_min_samples=0, dtype=None)\n    labels_pred = check_array(labels_pred, ensure_2d=False, ensure_min_samples=0, dtype=None)\n    type_label = type_of_target(labels_true)\n    type_pred = type_of_target(labels_pred)\n    if 'continuous' in (type_pred, type_label):\n        msg = f'Clustering metrics expects discrete values but received {type_label} values for label, and {type_pred} values for target'\n        warnings.warn(msg, UserWarning)\n    if labels_true.ndim != 1:\n        raise ValueError('labels_true must be 1D: shape is %r' % (labels_true.shape,))\n    if labels_pred.ndim != 1:\n        raise ValueError('labels_pred must be 1D: shape is %r' % (labels_pred.shape,))\n    check_consistent_length(labels_true, labels_pred)\n    return (labels_true, labels_pred)",
    "docstring": "Check that the labels arrays are 1D and of same dimension. Parameters ---------- labels_true : array-like of shape (n_samples,) The true labels. labels_pred : array-like of shape (n_samples,) The predicted labels.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\metrics\\cluster\\_supervised.py",
    "ast_data": "FunctionDef name:check_clusterings arg:labels_true arg:labels_pred arguments arg arg Assign Call Assign Call Assign Call Assign Call If Compare Assign Call If Compare Raise Call If Compare Raise Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "formfield",
    "source_code": "def formfield(self, form_class=None, choices_form_class=None, **kwargs):\n    defaults = {'required': not self.blank, 'label': capfirst(self.verbose_name), 'help_text': self.help_text}\n    if self.has_default():\n        if callable(self.default):\n            defaults['initial'] = self.default\n            defaults['show_hidden_initial'] = True\n        else:\n            defaults['initial'] = self.get_default()\n    if self.choices is not None:\n        include_blank = self.blank or not (self.has_default() or 'initial' in kwargs)\n        defaults['choices'] = self.get_choices(include_blank=include_blank)\n        defaults['coerce'] = self.to_python\n        if self.null:\n            defaults['empty_value'] = None\n        if choices_form_class is not None:\n            form_class = choices_form_class\n        else:\n            form_class = forms.TypedChoiceField\n        for k in list(kwargs):\n            if k not in ('coerce', 'empty_value', 'choices', 'required', 'widget', 'label', 'initial', 'help_text', 'error_messages', 'show_hidden_initial', 'disabled'):\n                del kwargs[k]\n    defaults.update(kwargs)\n    if form_class is None:\n        form_class = forms.CharField\n    return form_class(**defaults)",
    "docstring": "Return a django.forms.Field instance for this field.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\__init__.py",
    "ast_data": "FunctionDef name:formfield arg:self arg:form_class arg:choices_form_class arguments arg arg arg arg Assign Call If Call If Call Assign Assign Assign Call If Compare Assign BoolOp BoolOp Call Compare Assign Call Assign If Assign If Compare Assign Assign For Call If Compare Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_set_stream_by_id",
    "source_code": "def _set_stream_by_id(stream_id, device_index, device_type):\n    torch._C._xpu_setStream(stream_id=stream_id, device_index=device_index, device_type=device_type)",
    "docstring": "set stream specified by the stream id, device index and device type Args: stream_id (int): not visible to the user, used to assigned to the specific stream. device_index (int): selected device index. device_type (int): selected device type.",
    "type": "function",
    "file_path": "pytorch\\torch\\xpu\\__init__.py",
    "ast_data": "FunctionDef name:_set_stream_by_id arg:stream_id arg:device_index arg:device_type arguments arg arg arg Call"
  },
  {
    "library": "kornia",
    "name": "ModelCheckpoint",
    "source_code": "class ModelCheckpoint:\n\n    def __init__(self, filepath: str, monitor: str, filename_fcn: Optional[Callable[..., str]]=None, max_mode: bool=False) -> None:\n        self.filepath = filepath\n        self.monitor = monitor\n        self._filename_fcn = filename_fcn or default_filename_fcn\n        self.best_metric: float = -inf if max_mode else inf\n        self.max_mode = max_mode\n        Path(self.filepath).mkdir(parents=True, exist_ok=True)\n\n    def __call__(self, model: Module, epoch: int, valid_metric: Dict[str, AverageMeter]) -> None:\n        valid_metric_value: float = valid_metric[self.monitor].avg\n        is_best: bool = valid_metric_value > self.best_metric if self.max_mode else valid_metric_value < self.best_metric\n        if is_best:\n            self.best_metric = valid_metric_value\n            filename = Path(self.filepath) / self._filename_fcn(epoch, valid_metric_value)\n            torch.save(model, filename)",
    "docstring": "Callback that save the model at the end of every epoch. Args: filepath: the where to save the mode. monitor: the name of the value to track. max_mode: if true metric will be multiply by -1 turn this flag when increasing metric value is expected for example Accuracy **Usage example:** .. code:: python model_checkpoint = ModelCheckpoint( filepath=\"./outputs\", monitor=\"loss\", ) trainer = ImageClassifierTrainer(..., callbacks={\"on_checkpoint\", model_checkpoint} )",
    "type": "class",
    "file_path": "kornia\\kornia\\x\\callbacks.py",
    "ast_data": "ClassDef name:ModelCheckpoint FunctionDef name:__init__ arg:self arg:filepath arg:monitor arg:filename_fcn arg:max_mode arguments arg arg arg arg arg Assign Assign Assign BoolOp Assign Call Call FunctionDef name:__call__ arg:self arg:model arg:epoch arg:valid_metric arguments arg arg arg arg Compare Compare If Assign Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "RespectCompiledTrainableState",
    "source_code": "class RespectCompiledTrainableState(object):\n\n    def __init__(self, model):\n        self._model = model\n        self._current_trainable_state = None\n        self._compiled_trainable_state = None\n        self._should_set_trainable = False\n\n    def __enter__(self):\n        self._current_trainable_state = self._model._get_trainable_state()\n        self._compiled_trainable_state = self._model._compiled_trainable_state\n        for layer, trainable in self._compiled_trainable_state.items():\n            if layer in self._current_trainable_state and trainable != self._current_trainable_state[layer]:\n                self._should_set_trainable = True\n                break\n        if self._should_set_trainable:\n            self._model._set_trainable_state(self._compiled_trainable_state)\n\n    def __exit__(self, type_arg, value_arg, traceback_arg):\n        if self._should_set_trainable:\n            self._model._set_trainable_state(self._current_trainable_state)\n        return False",
    "docstring": "Set and restore trainable state if it has changed since compile. The keras API guarantees that the value of each Layer's property at time will be used when training that model. In order to respect this requirement, it may be necessary to set the trainable value of layers to their compile time values before beginning a training endpoint and restore the values before returning from said endpoint. This scope checks if any layer's trainable state has changed since Model compile, and performs this set and un-set bookkeeping. However, the trainable state of a layer changes quite infrequently, if ever, for many kinds of workflows. Moreover, updating every layer in a model is an expensive operation. As a result, we will only explicitly set and unset the trainable state of a model if a trainable value has changed since compile.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils.py",
    "ast_data": "ClassDef name:RespectCompiledTrainableState FunctionDef name:__init__ arg:self arg:model arguments arg arg Assign Assign Assign Assign FunctionDef name:__enter__ arg:self arguments arg Assign Call Assign For Call If BoolOp Compare Compare Assign If Call FunctionDef name:__exit__ arg:self arg:type_arg arg:value_arg arg:traceback_arg arguments arg arg arg arg If Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_LazyTickList",
    "source_code": "class _LazyTickList:\n\n    def __init__(self, major):\n        self._major = major\n\n    def __get__(self, instance, owner):\n        if instance is None:\n            return self\n        elif self._major:\n            instance.majorTicks = []\n            tick = instance._get_tick(major=True)\n            instance.majorTicks = [tick]\n            return instance.majorTicks\n        else:\n            instance.minorTicks = []\n            tick = instance._get_tick(major=False)\n            instance.minorTicks = [tick]\n            return instance.minorTicks",
    "docstring": "A descriptor for lazy instantiation of tick lists. See comment above definition of the `` attributes.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "ClassDef name:_LazyTickList FunctionDef name:__init__ arg:self arg:major arguments arg arg Assign FunctionDef name:__get__ arg:self arg:instance arg:owner arguments arg arg arg If Compare Return return:yes If Assign Assign Call Assign Return return:yes Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "sparse_series_to_coo",
    "source_code": "def sparse_series_to_coo(ss: Series, row_levels: Iterable[int]=(0,), column_levels: Iterable[int]=(1,), sort_labels: bool=False) -> tuple[scipy.sparse.coo_matrix, list[IndexLabel], list[IndexLabel]]:\n    import scipy.sparse\n    if ss.index.nlevels < 2:\n        raise ValueError('to_coo requires MultiIndex with nlevels >= 2.')\n    if not ss.index.is_unique:\n        raise ValueError('Duplicate index entries are not allowed in to_coo transformation.')\n    row_levels = [ss.index._get_level_number(x) for x in row_levels]\n    column_levels = [ss.index._get_level_number(x) for x in column_levels]\n    v, i, j, rows, columns = _to_ijv(ss, row_levels=row_levels, column_levels=column_levels, sort_labels=sort_labels)\n    sparse_matrix = scipy.sparse.coo_matrix((v, (i, j)), shape=(len(rows), len(columns)))\n    return (sparse_matrix, rows, columns)",
    "docstring": "Convert a sparse Series to a scipy.sparse.coo_matrix using index levels row_levels, column_levels as the row and column labels respectively. Returns the sparse_matrix, row and column labels.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\arrays\\sparse\\scipy_sparse.py",
    "ast_data": "FunctionDef name:sparse_series_to_coo arg:ss arg:row_levels arg:column_levels arg:sort_labels arguments arg arg arg arg If Compare Raise Call If Raise Call Assign Call Assign Call Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "max_pool1d",
    "source_code": "def max_pool1d(input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False, return_indices=False):\n    if return_indices:\n        raise NotImplementedError('return_indices is not yet implemented!')\n    if stride is None:\n        stride = torch.jit.annotate(list[int], [])\n    return torch.nn.functional.max_pool1d(input, kernel_size, stride, padding, dilation, ceil_mode=ceil_mode, return_indices=return_indices)",
    "docstring": "Applies a 1D max pooling over a quantized input signal composed of several quantized input planes. .. note:: The input quantization parameters are propagated to the output. See :class: for details.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\functional.py",
    "ast_data": "FunctionDef name:max_pool1d arg:input arg:kernel_size arg:stride arg:padding arg:dilation arg:ceil_mode arg:return_indices arguments arg arg arg arg arg arg arg If Raise Call If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "__matmul__",
    "source_code": "def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series:\n    return self.dot(other)",
    "docstring": "Matrix multiplication using binary operator.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:__matmul__ arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "HandlerTool",
    "source_code": "class HandlerTool(Tool):\n\n    def __init__(self, callable, name=None):\n        Tool.__init__(self, 'before_handler', callable, name)\n\n    def handler(self, *args, **kwargs):\n\n        @expose\n        def handle_func(*a, **kw):\n            handled = self.callable(*args, **self._merged_args(kwargs))\n            if not handled:\n                raise cherrypy.NotFound()\n            return cherrypy.serving.response.body\n        return handle_func\n\n    def _wrapper(self, **kwargs):\n        if self.callable(**kwargs):\n            cherrypy.serving.request.handler = None\n\n    def _setup(self):\n        conf = self._merged_args()\n        p = conf.pop('priority', None)\n        if p is None:\n            p = getattr(self.callable, 'priority', self._priority)\n        cherrypy.serving.request.hooks.attach(self._point, self._wrapper, priority=p, **conf)",
    "docstring": "Tool which is called 'before main', that may skip normal handlers. If the tool successfully handles the request (by setting response.body), if should return True. This will cause CherryPy to skip any 'normal' page handler. If the tool did not handle the request, it should return False to tell CherryPy to continue on and call the normal page handler. If the tool is declared AS a page handler (see the 'handler' method), returning False will raise NotFound.",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\_cptools.py",
    "ast_data": "ClassDef name:HandlerTool FunctionDef name:__init__ arg:self arg:callable arg:name arguments arg arg arg Call FunctionDef name:handler arg:self arguments arg arg arg FunctionDef name:handle_func arguments arg arg Assign Call Call If Raise Call Return return:yes Return return:yes FunctionDef name:_wrapper arg:self arguments arg arg If Call Assign FunctionDef name:_setup arg:self arguments arg Assign Call Assign Call If Compare Assign Call Call"
  },
  {
    "library": "pandas",
    "name": "normalize",
    "source_code": "@forbid_nonstring_types(['bytes'])\ndef normalize(self, form):\n    result = self._data.array._str_normalize(form)\n    return self._wrap_result(result)",
    "docstring": "Return the Unicode normal form for the strings in the Series/Index. For more information on the forms, see the :func:. Parameters ---------- form : {'NFC', 'NFKC', 'NFD', 'NFKD'} Unicode form. Returns ------- Series/Index of objects A Series or Index of strings in the same Unicode form specified by . The returned object retains the same type as the input (Series or Index), and contains the normalized strings. See Also -------- Series.str.upper : Convert all characters in each string to uppercase. Series.str.lower : Convert all characters in each string to lowercase. Series.str.title : Convert each string to title case (capitalizing the first letter of each word). Series.str.strip : Remove leading and trailing whitespace from each string. Series.str.replace : Replace occurrences of a substring with another substring in each string. Examples -------- >>> ser = pd.Series([\"ñ\"]) >>> ser.str.normalize(\"NFC\") == ser.str.normalize(\"NFD\") 0 False dtype: bool",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\strings\\accessor.py",
    "ast_data": "FunctionDef name:normalize arg:self arg:form arguments arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "values",
    "source_code": "@property\ndef values(self):\n    return self._values",
    "docstring": "The concatenated rows for this ragged tensor. is a potentially ragged tensor formed by flattening the two outermost dimensions of into a single dimension. (where is the number of items in the outer two dimensions of ). Returns: A potentially ragged tensor. #### Example: >>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []]) >>> print(rt.values) tf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32)",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:values arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, shape=None, dtype=dtypes.float32, ragged_rank=None, row_splits_dtype=dtypes.int64, flat_values_spec=None):\n    self._shape = tensor_shape.as_shape(shape)\n    self._row_splits_dtype = dtypes.as_dtype(row_splits_dtype)\n    if flat_values_spec is not None:\n        if dtype is None:\n            dtype = flat_values_spec.dtype\n        elif dtype != flat_values_spec.dtype:\n            raise ValueError('dtype must be the same as flat_values_spec.dtype')\n    elif dtype is None:\n        raise ValueError('At least one of dtype or flat_values_spec must be provided')\n    self._dtype = dtypes.as_dtype(dtype)\n    self._flat_values_spec = flat_values_spec\n    rank = self._shape.ndims\n    if ragged_rank is None:\n        if rank is None:\n            raise ValueError('Must specify ragged_rank or a shape with a known rank.')\n        ragged_rank = rank - 1\n    self._ragged_rank = ragged_rank\n    if not isinstance(self._ragged_rank, int):\n        raise TypeError(f'Argument `ragged_rank` must be an int. Received {ragged_rank}.')\n    if rank is not None:\n        if ragged_rank >= rank:\n            raise ValueError(f'Argument `ragged_rank` ({ragged_rank}) must be less than rank ({rank}).')",
    "docstring": "Constructs a type specification for a . Args: shape: The shape of the RaggedTensor, or to allow any shape. If a shape is specified, then all ragged dimensions must have size . dtype: of values in the RaggedTensor. ragged_rank: Python integer, the number of times the RaggedTensor's flat_values is partitioned. Defaults to . row_splits_dtype: for the RaggedTensor's tensor. One of or . flat_values_spec: TypeSpec for flat_value of the RaggedTensor. It shall be provided when the flat_values is a CompositeTensor rather then Tensor. If both and and are provided, must be the same as . (experimental)",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:shape arg:dtype arg:ragged_rank arg:row_splits_dtype arg:flat_values_spec arguments arg arg arg arg arg arg Assign Call Assign Call If Compare If Compare Assign If Compare Raise Call If Compare Raise Call Assign Call Assign Assign If Compare If Compare Raise Call Assign Assign If Call Raise Call If Compare If Compare Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "load_data",
    "source_code": "@memory.cache\ndef load_data(dtype=np.float32, order='C', random_state=13):\n    print('Loading dataset...')\n    data = fetch_covtype(download_if_missing=True, shuffle=True, random_state=random_state)\n    X = check_array(data['data'], dtype=dtype, order=order)\n    y = (data['target'] != 1).astype(int)\n    print('Creating train-test split...')\n    n_train = 522911\n    X_train = X[:n_train]\n    y_train = y[:n_train]\n    X_test = X[n_train:]\n    y_test = y[n_train:]\n    mean = X_train.mean(axis=0)\n    std = X_train.std(axis=0)\n    mean[10:] = 0.0\n    std[10:] = 1.0\n    X_train = (X_train - mean) / std\n    X_test = (X_test - mean) / std\n    return (X_train, X_test, y_train, y_test)",
    "docstring": "Load the data, then cache and memmap the train/test split",
    "type": "function",
    "file_path": "scikit-learn\\benchmarks\\bench_covertype.py",
    "ast_data": "FunctionDef name:load_data arg:dtype arg:order arg:random_state arguments arg arg arg Call Assign Call Assign Call Assign Call Compare Call Assign Assign Assign Assign Assign Assign Call Assign Call Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "HesAffNetHardNet",
    "source_code": "class HesAffNetHardNet(LocalFeature):\n\n    def __init__(self, num_features: int=2048, upright: bool=False, device: Optional[Device]=None, config: Optional[Detector_config]=None) -> None:\n        if device is None:\n            device = torch.device('cpu')\n        if config is None:\n            config = get_default_detector_config()\n        detector = MultiResolutionDetector(BlobHessian(), num_features, config, ori_module=PassLAF() if upright else LAFOrienter(19), aff_module=LAFAffNetShapeEstimator(True).eval()).to(device)\n        descriptor = LAFDescriptor(None, patch_size=32, grayscale_descriptor=True).to(device)\n        super().__init__(detector, descriptor)",
    "docstring": "Convenience module, which implements GFTT detector + AffNet-HardNet descriptor.",
    "type": "class",
    "file_path": "kornia\\kornia\\feature\\integrated.py",
    "ast_data": "ClassDef name:HesAffNetHardNet FunctionDef name:__init__ arg:self arg:num_features arg:upright arg:device arg:config arguments arg arg arg arg arg If Compare Assign Call If Compare Assign Call Assign Call Call Call Call Call Call Call Assign Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "from_config",
    "source_code": "@classmethod\ndef from_config(cls, config, custom_objects=None, columns_by_name=None):\n    fc._check_config_keys(config, cls._fields)\n    kwargs = fc._standardize_and_copy_config(config)\n    kwargs['dtype'] = dtypes.as_dtype(config['dtype'])\n    return cls(**kwargs)",
    "docstring": "See 'FeatureColumn` base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\sequence_feature_column.py",
    "ast_data": "FunctionDef name:from_config arg:cls arg:config arg:custom_objects arg:columns_by_name arguments arg arg arg arg Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, num_shards):\n    self._num_shards = num_shards",
    "docstring": "Creates a new . Args: num_shards: , number of shards to partition.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\sharded_variable.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:num_shards arguments arg arg Assign"
  },
  {
    "library": "pandas",
    "name": "nargminmax",
    "source_code": "def nargminmax(values: ExtensionArray, method: str, axis: AxisInt=0):\n    assert method in {'argmax', 'argmin'}\n    func = np.argmax if method == 'argmax' else np.argmin\n    mask = np.asarray(isna(values))\n    arr_values = values._values_for_argsort()\n    if arr_values.ndim > 1:\n        if mask.any():\n            if axis == 1:\n                zipped = zip(arr_values, mask)\n            else:\n                zipped = zip(arr_values.T, mask.T)\n            return np.array([_nanargminmax(v, m, func) for v, m in zipped])\n        return func(arr_values, axis=axis)\n    return _nanargminmax(arr_values, mask, func)",
    "docstring": "Implementation of np.argmin/argmax but for ExtensionArray and which handles missing values. Parameters ---------- values : ExtensionArray method : {\"argmax\", \"argmin\"} axis : int, default 0 Returns ------- int",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\sorting.py",
    "ast_data": "FunctionDef name:nargminmax arg:values arg:method arg:axis arguments arg arg arg Compare Assign Compare Assign Call Call Assign Call If Compare If Call If Compare Assign Call Assign Call Return return:yes Call Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_convert_values_to_tf_tensors",
    "source_code": "def _convert_values_to_tf_tensors(sample: rd.RepresentativeSample) -> Mapping[str, core.Tensor]:\n    tensor_mapping = {}\n    for name, tensorlike_value in sample.items():\n        if isinstance(tensorlike_value, core.Tensor):\n            tensor_value = tensorlike_value\n        else:\n            tensor_value = tensor_conversion.convert_to_tensor_v2_with_dispatch(tensorlike_value)\n        tensor_mapping[name] = tensor_value\n    return tensor_mapping",
    "docstring": "Converts TensorLike values of to Tensors. Creates a copy of , where each value is converted to Tensors unless it is already a Tensor. The values are not converted in-place (i.e. is not mutated). Args: sample: A representative sample, which is a map of {name -> tensorlike value}. Returns: Converted map of {name -> tensor}.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\py_function_lib.py",
    "ast_data": "FunctionDef name:_convert_values_to_tf_tensors arg:sample arguments arg Assign For Call If Call Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_to_datetimearray",
    "source_code": "def _to_datetimearray(self) -> DatetimeArray:\n    from pandas.core.arrays.datetimes import DatetimeArray, tz_to_dtype\n    pa_type = self._pa_array.type\n    assert pa.types.is_timestamp(pa_type)\n    np_dtype = np.dtype(f'M8[{pa_type.unit}]')\n    dtype = tz_to_dtype(pa_type.tz, pa_type.unit)\n    np_array = self._pa_array.to_numpy()\n    np_array = np_array.astype(np_dtype, copy=False)\n    return DatetimeArray._simple_new(np_array, dtype=dtype)",
    "docstring": "Convert a pyarrow timestamp typed array to a DatetimeArray.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\arrow\\array.py",
    "ast_data": "FunctionDef name:_to_datetimearray arg:self arguments arg Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "gated_grpc_debug_watches",
    "source_code": "def gated_grpc_debug_watches(self):\n    return list(self._gated_grpc_debug_watches)",
    "docstring": "Get the list of debug watches with attribute gated_grpc=True. Since the server receives from the debugged runtime, it can only return such debug watches that it has received so far. Returns: A of representing the debug watches with gated_grpc=True. Each element has the attributes: as a , as an , as a .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\grpc_debug_server.py",
    "ast_data": "FunctionDef name:gated_grpc_debug_watches arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_list_tail_with_ellipsis",
    "source_code": "def _list_tail_with_ellipsis(arr):\n    if not arr:\n        return ']'\n    else:\n        return ', ' + _element_to_string(arr[0]) + _list_tail_with_ellipsis(arr[1:])",
    "docstring": "Print the tail of a list where the list might have an ellipsis.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:_list_tail_with_ellipsis arg:arr arguments arg If Return return:yes Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "num_fw_fixed_arguments",
    "source_code": "def num_fw_fixed_arguments(dynamo_gm_num_inputs: int, aot_fw_gm_num_inputs: int) -> int:\n    num_rng_seed_offset_inputs = 2 if torch._functorch.config.functionalize_rng_ops else 0\n    return aot_fw_gm_num_inputs - dynamo_gm_num_inputs - num_rng_seed_offset_inputs",
    "docstring": "Computes the number of inputs to the aot fw graph which have fixed addresses (params and buffers)",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\utils.py",
    "ast_data": "FunctionDef name:num_fw_fixed_arguments arg:dynamo_gm_num_inputs arg:aot_fw_gm_num_inputs arguments arg arg Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_event_pairs",
    "source_code": "def get_event_pairs(self: Self, iters: int) -> list[tuple[torch.cuda.Event, torch.cuda.Event]]:\n    return [(torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)) for _ in range(iters)]",
    "docstring": "Get pairs of CUDA events.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\benchmarking.py",
    "ast_data": "FunctionDef name:get_event_pairs arg:self arg:iters arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_single_array_device",
    "source_code": "def _single_array_device(array):\n    if isinstance(array, (numpy.ndarray, numpy.generic)) or not hasattr(array, 'device') or (not get_config()['array_api_dispatch']):\n        return None\n    else:\n        return array.device",
    "docstring": "Hardware device where the array data resides on.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_array_api.py",
    "ast_data": "FunctionDef name:_single_array_device arg:array arguments arg If BoolOp Call Call Call Return return:no Return return:yes"
  },
  {
    "library": "scipy",
    "name": "pprint",
    "source_code": "def pprint(self):\n    print('Beta:', self.beta)\n    print('Beta Std Error:', self.sd_beta)\n    print('Beta Covariance:', self.cov_beta)\n    if hasattr(self, 'info'):\n        print('Residual Variance:', self.res_var)\n        print('Inverse Condition #:', self.inv_condnum)\n        print('Reason(s) for Halting:')\n        for r in self.stopreason:\n            print(f'  {r}')",
    "docstring": "Pretty-print important results.",
    "type": "method",
    "file_path": "scipy\\scipy\\odr\\_odrpack.py",
    "ast_data": "FunctionDef name:pprint arg:self arguments arg Call Call Call If Call Call Call Call For Call"
  },
  {
    "library": "tensorflow",
    "name": "_flag_value_as_list",
    "source_code": "def _flag_value_as_list(self, wanted_flag_name):\n    string_value_list = []\n    found, flag_value = self.get_flag_value(wanted_flag_name)\n    if found:\n        assert flag_value is not None\n        string_value_list = flag_value.split(',')\n    return string_value_list",
    "docstring": "Returns the string list of a TensorTracer flag. Args: wanted_flag_name: the name of the flag we are looking for. Returns: The list value of the flag.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer_flags.py",
    "ast_data": "FunctionDef name:_flag_value_as_list arg:self arg:wanted_flag_name arguments arg arg Assign Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_get_fill_value",
    "source_code": "def _get_fill_value(dtype: DtypeObj, fill_value: Scalar | None=None, fill_value_typ=None):\n    if fill_value is not None:\n        return fill_value\n    if _na_ok_dtype(dtype):\n        if fill_value_typ is None:\n            return np.nan\n        elif fill_value_typ == '+inf':\n            return np.inf\n        else:\n            return -np.inf\n    elif fill_value_typ == '+inf':\n        return lib.i8max\n    else:\n        return iNaT",
    "docstring": "return the correct fill value for the dtype of the values",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\nanops.py",
    "ast_data": "FunctionDef name:_get_fill_value arg:dtype arg:fill_value arg:fill_value_typ arguments arg arg arg If Compare Return return:yes If Call If Compare Return return:yes If Compare Return return:yes Return return:yes If Compare Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "can_fast_delete",
    "source_code": "def can_fast_delete(self, objs, from_field=None):\n    if from_field and from_field.remote_field.on_delete is not CASCADE:\n        return False\n    if hasattr(objs, '_meta'):\n        model = objs._meta.model\n    elif hasattr(objs, 'model') and hasattr(objs, '_raw_delete'):\n        model = objs.model\n    else:\n        return False\n    if self._has_signal_listeners(model):\n        return False\n    opts = model._meta\n    return all((link == from_field for link in opts.concrete_model._meta.parents.values())) and all((related.field.remote_field.on_delete is DO_NOTHING for related in get_candidate_relations_to_delete(opts))) and (not any((hasattr(field, 'bulk_related_objects') for field in opts.private_fields)))",
    "docstring": "Determine if the objects in the given queryset-like or single object can be fast-deleted. This can be done if there are no cascades, no parents and no signal listeners for the object class. The 'from_field' tells where we are coming from - we need this to determine if the objects are in fact to be deleted. Allow also skipping parent -> child -> parent chain preventing fast delete of the child.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\deletion.py",
    "ast_data": "FunctionDef name:can_fast_delete arg:self arg:objs arg:from_field arguments arg arg arg If BoolOp Compare Return return:yes If Call Assign If BoolOp Call Call Assign Return return:yes If Call Return return:yes Assign Return return:yes BoolOp Call Compare Call Call Compare Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_convert",
    "source_code": "def _convert(module, mapping=None, inplace=False, is_reference=False, convert_custom_config_dict=None, use_precomputed_fake_quant=False):\n    if mapping is None:\n        mapping = get_default_static_quant_reference_module_mappings() if is_reference else get_default_static_quant_module_mappings()\n    if convert_custom_config_dict is None:\n        convert_custom_config_dict = get_default_custom_config_dict()\n    custom_module_class_mapping = convert_custom_config_dict.get('observed_to_quantized_custom_module_class', {})\n    if not inplace:\n        module = copy.deepcopy(module)\n    reassign = {}\n    for name, mod in module.named_children():\n        if not isinstance(mod, _FusedModule) and type_before_parametrizations(mod) not in custom_module_class_mapping:\n            _convert(mod, mapping, True, is_reference, convert_custom_config_dict, use_precomputed_fake_quant=use_precomputed_fake_quant)\n        reassign[name] = swap_module(mod, mapping, custom_module_class_mapping, use_precomputed_fake_quant)\n    for key, value in reassign.items():\n        module._modules[key] = value\n    return module",
    "docstring": "Converts submodules in input module to a different module according to by calling method on the target module class Args: module: input module mapping: a dictionary that maps from source module type to target module type, can be overwritten to allow swapping user defined Modules inplace: carry out model transformations in-place, the original module is mutated is_reference: a flag to enable quantized reference module use_precomputed_fake_quant: a flag to enable use of precomputed fake quant",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantize.py",
    "ast_data": "FunctionDef name:_convert arg:module arg:mapping arg:inplace arg:is_reference arg:convert_custom_config_dict arg:use_precomputed_fake_quant arguments arg arg arg arg arg arg If Compare Assign Call Call If Compare Assign Call Assign Call If Assign Call Assign For Call If BoolOp Call Compare Call Call Assign Call For Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "sharded_optim_state_dict",
    "source_code": "@staticmethod\ndef sharded_optim_state_dict(model: torch.nn.Module, optim: torch.optim.Optimizer, group: Optional[dist.ProcessGroup]=None) -> dict[str, Any]:\n    FullyShardedDataParallel._warn_legacy_optim_state_dict('sharded_optim_state_dict', 'optim_state_dict', stacklevel=2)\n    return FullyShardedDataParallel._optim_state_dict_impl(model=model, optim=optim, optim_state_dict=optim.state_dict(), optim_input=None, rank0_only=False, full_state_dict=False, group=group, _stacklevel=2)",
    "docstring": "Return the optimizer state-dict in its sharded form. The API is similar to :meth: but this API chunks all non-zero-dimension states to :class: to save memory. This API should only be used when the model `full_optim_state_dict`.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\fully_sharded_data_parallel.py",
    "ast_data": "FunctionDef name:sharded_optim_state_dict arg:model arg:optim arg:group arguments arg arg arg Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "random_binomial",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef random_binomial(shape, p=0.0, dtype=None, seed=None):\n    warnings.warn('`tf.keras.backend.random_binomial` is deprecated, and will be removed in a future version.Please use `tf.keras.backend.random_bernoulli` instead.')\n    return random_bernoulli(shape, p, dtype, seed)",
    "docstring": "Returns a tensor with random binomial distribution of values. DEPRECATED, use instead. The binomial distribution with parameters and is the probability distribution of the number of successful Bernoulli process. Only supports = 1 for now. Args: shape: A tuple of integers, the shape of tensor to create. p: A float, `0. >> random_binomial_tensor = tf.keras.backend.random_binomial(shape=(2,3), ... p=0.5) >>> random_binomial_tensor",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:random_binomial arg:shape arg:p arg:dtype arg:seed arguments arg arg arg arg Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_min",
    "source_code": "def set_min(self, min):\n    self.set_val((min, self.val[1]))",
    "docstring": "Set the lower value of the slider to *min*. Parameters ---------- min : float",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:set_min arg:self arg:min arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_check_dims_and_partition_or_replicate_on_host",
    "source_code": "def _check_dims_and_partition_or_replicate_on_host(self, tensor, dims):\n    self._check_input_partition_dims(tensor, dims)\n    return partition_or_replicate_on_host(tensor, dims)",
    "docstring": "Checks dims and partitions or replicates the input tensor. The ops inside this function are placed on the host side. Args: tensor: The input tensor which will be partitioned or replicated. dims: A list of integer describes how to partition the input tensor. Returns: An iterator of s or a list of partitioned tensors.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_feed.py",
    "ast_data": "FunctionDef name:_check_dims_and_partition_or_replicate_on_host arg:self arg:tensor arg:dims arguments arg arg arg Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "def fit(self, X, y=None, **params):\n    self.fit_transform(X, **params)\n    return self",
    "docstring": "Learn a NMF model for the data X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vector, where is the number of samples and is the number of features. y : Ignored Not used, present for API consistency by convention. **params : kwargs Parameters (keyword arguments) and values passed to the fit_transform instance. Returns ------- self : object Returns the instance itself.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_nmf.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg arg Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "verts",
    "source_code": "@verts.setter\ndef verts(self, xys):\n    self._xys = [*xys, xys[0]]\n    self._selection_completed = True\n    self.set_visible(True)\n    if self._draw_box and self._box is None:\n        self._add_box()\n    self._draw_polygon()",
    "docstring": "Set the polygon vertices. This will remove any preexisting vertices, creating a complete polygon with the new vertices.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:verts arg:self arg:xys arguments arg arg Assign Assign Call If BoolOp Compare Call Call"
  },
  {
    "library": "tensorflow",
    "name": "flush",
    "source_code": "def flush(self):\n    super(TFRecordWriter, self).flush()",
    "docstring": "Flush the file.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\tf_record.py",
    "ast_data": "FunctionDef name:flush arg:self arguments arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "remove_start_callback",
    "source_code": "def remove_start_callback(self, callback: Callable[[], None]) -> None:\n    self.start_callbacks.remove(callback)",
    "docstring": "Remove a registered start callback function. Args: - callback (Callable): The callback function to remove.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\callback.py",
    "ast_data": "FunctionDef name:remove_start_callback arg:self arg:callback arguments arg arg Call"
  },
  {
    "library": "seaborn",
    "name": "fit_fast",
    "source_code": "def fit_fast(self, grid):\n\n    def reg_func(_x, _y):\n        return np.linalg.pinv(_x).dot(_y)\n    X, y = (np.c_[np.ones(len(self.x)), self.x], self.y)\n    grid = np.c_[np.ones(len(grid)), grid]\n    yhat = grid.dot(reg_func(X, y))\n    if self.ci is None:\n        return (yhat, None)\n    beta_boots = algo.bootstrap(X, y, func=reg_func, n_boot=self.n_boot, units=self.units, seed=self.seed).T\n    yhat_boots = grid.dot(beta_boots).T\n    return (yhat, yhat_boots)",
    "docstring": "Low-level regression and prediction using linear algebra.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\regression.py",
    "ast_data": "FunctionDef name:fit_fast arg:self arg:grid arguments arg arg FunctionDef name:reg_func arg:_x arg:_y arguments arg arg Return return:yes Call Call Assign Call Call Assign Call Call Assign Call Call If Compare Return return:yes Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_process_indexing",
    "source_code": "def _process_indexing(self, index):\n    return self.kernel.kexpr(self.kernel.rename_indexing(index))",
    "docstring": "Process and rename indexing, adding symbols as kernel inputs.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\select_algorithm.py",
    "ast_data": "FunctionDef name:_process_indexing arg:self arg:index arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "get_level_lengths",
    "source_code": "def get_level_lengths(levels: Any, sentinel: bool | object | str='') -> list[dict[int, int]]:\n    if len(levels) == 0:\n        return []\n    control = [True] * len(levels[0])\n    result = []\n    for level in levels:\n        last_index = 0\n        lengths = {}\n        for i, key in enumerate(level):\n            if control[i] and key == sentinel:\n                pass\n            else:\n                control[i] = False\n                lengths[last_index] = i - last_index\n                last_index = i\n        lengths[last_index] = len(level) - last_index\n        result.append(lengths)\n    return result",
    "docstring": "For each index in each level the function returns lengths of indexes. Parameters ---------- levels : list of lists List of values on for level. sentinel : string, optional Value which states that no new index starts on there. Returns ------- Returns list of maps. For each level returns map of indexes (key is index in row and value is length of index).",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\formats\\format.py",
    "ast_data": "FunctionDef name:get_level_lengths arg:levels arg:sentinel arguments arg arg If Compare Call Return return:no Assign Call Assign For Assign Assign For Call If BoolOp Compare Assign Assign Assign Assign Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "UFuncTypeError",
    "source_code": "class UFuncTypeError(TypeError):\n\n    def __init__(self, ufunc):\n        self.ufunc = ufunc",
    "docstring": "Base class for all ufunc exceptions",
    "type": "class",
    "file_path": "numpy\\numpy\\_core\\_exceptions.py",
    "ast_data": "ClassDef name:UFuncTypeError FunctionDef name:__init__ arg:self arg:ufunc arguments arg arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "IsLoopSwitch",
    "source_code": "def IsLoopSwitch(op):\n    if IsSwitch(op):\n        ctxt = op._get_control_flow_context()\n        return ctxt is not None and ctxt.IsWhileContext() and (not IsCondSwitch(op))\n    return False",
    "docstring": "Return true if is the Switch for a while loop.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_util.py",
    "ast_data": "FunctionDef name:IsLoopSwitch arg:op arguments arg If Call Assign Call Return return:yes BoolOp Compare Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_figure",
    "source_code": "def set_figure(self, figure, update_tools=True):\n    if self._key_press_handler_id:\n        self.canvas.mpl_disconnect(self._key_press_handler_id)\n    self._figure = figure\n    if figure:\n        self._key_press_handler_id = self.canvas.mpl_connect('key_press_event', self._key_press)\n    if update_tools:\n        for tool in self._tools.values():\n            tool.figure = figure",
    "docstring": "Bind the given figure to the tools. Parameters ---------- figure : update_tools : bool, default: True Force tools to update figure.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_managers.py",
    "ast_data": "FunctionDef name:set_figure arg:self arg:figure arg:update_tools arguments arg arg arg If Call Assign If Assign Call If For Call Assign"
  },
  {
    "library": "pytorch",
    "name": "check_consistent",
    "source_code": "def check_consistent(new: _T, old: _T) -> None:\n    scalar_types = (torch.SymInt, torch.SymFloat, int, float)\n    if isinstance(new, torch.Tensor):\n        assert isinstance(old, torch.Tensor)\n        torch._check(old.dim() == new.dim(), lambda: f'{old.shape} != {new.shape} (old != new)')\n        for i, j in zip(old.shape, new.shape):\n            torch._check(i == j, lambda: f'{old.shape} != {new.shape} (old != new)')\n    elif isinstance(new, scalar_types) and (not isinstance(new, bool)):\n        assert isinstance(old, scalar_types) and (not isinstance(old, bool)), f'{old} != {new}'\n        torch._check(old == new, lambda: f'{old} != {new} (old != new)')",
    "docstring": "Test that two \"meta\" values (typically either Tensor or SymInt) have the same values, e.g., after retracing. If we don't understand the quantities in question, we'll just skip the consistency check.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:check_consistent arg:new arg:old arguments arg arg Assign If Call Call Call Compare Call Call arguments For Call Call Compare arguments If BoolOp Call Call BoolOp Call Call Call Compare arguments"
  },
  {
    "library": "pytorch",
    "name": "named_buffers",
    "source_code": "def named_buffers(self, *args, **kwargs) -> Iterator[tuple[str, torch.Tensor]]:\n    should_clean_name = self.training_state == TrainingState.SUMMON_FULL_PARAMS\n    for buffer_name, buffer in super().named_buffers(*args, **kwargs):\n        if should_clean_name:\n            buffer_name = buffer_name.replace(FSDP_PREFIX, '')\n        yield (buffer_name, buffer)",
    "docstring": "Return an iterator over module buffers, yielding both the name of the buffer and the buffer itself. Intercepts buffer names and removes all occurrences of the FSDP-specific flattened buffer prefix when inside the :meth: context manager.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\fully_sharded_data_parallel.py",
    "ast_data": "FunctionDef name:named_buffers arg:self arguments arg arg arg Assign Compare For Call Call If Assign Call"
  },
  {
    "library": "django",
    "name": "ValuesListIterable",
    "source_code": "class ValuesListIterable(BaseIterable):\n\n    def __iter__(self):\n        queryset = self.queryset\n        query = queryset.query\n        compiler = query.get_compiler(queryset.db)\n        return compiler.results_iter(tuple_expected=True, chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size)",
    "docstring": "Iterable returned by QuerySet.values_list(flat=False) that yields a tuple for each row.",
    "type": "class",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "ClassDef name:ValuesListIterable FunctionDef name:__iter__ arg:self arguments arg Assign Assign Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "get_residual",
    "source_code": "def get_residual(self):\n    return self.fp",
    "docstring": "Return weighted sum of squared residuals of the spline approximation: sum ((w[i]*(z[i]-s(x[i],y[i])))**2,axis=0)",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_fitpack2.py",
    "ast_data": "FunctionDef name:get_residual arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "MKDGradients",
    "source_code": "class MKDGradients(nn.Module):\n\n    def __init__(self) -> None:\n        super().__init__()\n        self.eps = 1e-08\n        self.grad = SpatialGradient(mode='diff', order=1, normalized=False)\n\n    def forward(self, x: Tensor) -> Tensor:\n        if not isinstance(x, Tensor):\n            raise TypeError(f'Input type is not a Tensor. Got {type(x)}')\n        if not len(x.shape) == 4:\n            raise ValueError(f'Invalid input shape, we expect Bx1xHxW. Got: {x.shape}')\n        grads_xy = -self.grad(x)\n        gx = grads_xy[:, :, 0, :, :]\n        gy = grads_xy[:, :, 1, :, :]\n        y = torch.cat(cart2pol(gx, gy, self.eps), dim=1)\n        return y\n\n    def __repr__(self) -> str:\n        return self.__class__.__name__",
    "docstring": "Module, which computes gradients of given patches, stacked as [magnitudes, orientations]. Given gradients $g_x$, $g_y$ with respect to $x$, $y$ respectively, - $\\mathbox{mags} = $\\sqrt{g_x^2 + g_y^2 + eps}$ - $\\mathbox{oris} = $\\mbox{tan}^{-1}(\\nicefrac{g_y}{g_x})$. Args: patch_size: Input patch size in pixels. Returns: gradients of given patches. Shape: - Input: (B, 1, patch_size, patch_size) - Output: (B, 2, patch_size, patch_size) Example: >>> patches = torch.rand(23, 1, 32, 32) >>> gradient = MKDGradients() >>> g = gradient(patches) # 23x2x32x32",
    "type": "class",
    "file_path": "kornia\\kornia\\feature\\mkd.py",
    "ast_data": "ClassDef name:MKDGradients FunctionDef name:__init__ arg:self arguments arg Call Call Assign Assign Call FunctionDef name:forward arg:self arg:x arguments arg arg If Call Raise Call Call If Compare Call Raise Call Assign Call Assign Assign Assign Call Call Return return:yes FunctionDef name:__repr__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "any",
    "source_code": "def any(self, axis=None, out=None, keepdims=np._NoValue):\n    kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}\n    mask = _check_mask_axis(self._mask, axis, **kwargs)\n    if out is None:\n        d = self.filled(False).any(axis=axis, **kwargs).view(type(self))\n        if d.ndim:\n            d.__setmask__(mask)\n        elif mask:\n            d = masked\n        return d\n    self.filled(False).any(axis=axis, out=out, **kwargs)\n    if isinstance(out, MaskedArray):\n        if out.ndim or mask:\n            out.__setmask__(mask)\n    return out",
    "docstring": "Returns True if any of the elements of evaluate to True. Masked values are considered as False during computation. Refer to for full documentation. See Also -------- numpy.ndarray.any : corresponding function for ndarrays numpy.any : equivalent function",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:any arg:self arg:axis arg:out arg:keepdims arguments arg arg arg arg Assign Compare Assign Call If Compare Assign Call Call Call Call If Call If Assign Return return:yes Call Call If Call If BoolOp Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_get_counts_nanvar",
    "source_code": "def _get_counts_nanvar(values_shape: Shape, mask: npt.NDArray[np.bool_] | None, axis: AxisInt | None, ddof: int, dtype: np.dtype=np.dtype(np.float64)) -> tuple[float | np.ndarray, float | np.ndarray]:\n    count = _get_counts(values_shape, mask, axis, dtype=dtype)\n    d = count - dtype.type(ddof)\n    if is_float(count):\n        if count <= ddof:\n            count = np.nan\n            d = np.nan\n    else:\n        count = cast(np.ndarray, count)\n        mask = count <= ddof\n        if mask.any():\n            np.putmask(d, mask, np.nan)\n            np.putmask(count, mask, np.nan)\n    return (count, d)",
    "docstring": "Get the count of non-null values along an axis, accounting for degrees of freedom. Parameters ---------- values_shape : Tuple[int, ...] shape tuple from values ndarray, used if mask is None mask : Optional[ndarray[bool]] locations in values that should be considered missing axis : Optional[int] axis to count along ddof : int degrees of freedom dtype : type, optional type to use for count Returns ------- count : int, np.nan or np.ndarray d : int, np.nan or np.ndarray",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\nanops.py",
    "ast_data": "FunctionDef name:_get_counts_nanvar arg:values_shape arg:mask arg:axis arg:ddof arg:dtype arguments arg arg arg arg arg Call Assign Call Assign Call If Call If Compare Assign Assign Assign Call Assign Compare If Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_submode",
    "source_code": "def _get_submode(self):\n    found, submode = self.get_flag_value(FLAG_NAME_SUBMODE)\n    if not found or not submode:\n        submode = _SUBMODE_DETAILED\n    if not submode:\n        return\n    valid_submodes = [_SUBMODE_DETAILED, _SUBMODE_BRIEF]\n    if submode not in valid_submodes:\n        raise ValueError('Invalid submode \"%s\" given to the Tensor_Tracer.Valid submodes are: %s' % (submode, valid_submodes))\n    return submode",
    "docstring": "Checks if the given submode is valid.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer_flags.py",
    "ast_data": "FunctionDef name:_get_submode arg:self arguments arg Assign Call If BoolOp Assign If Return return:no Assign If Compare Raise Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "hot",
    "source_code": "def hot() -> None:\n    set_cmap('hot')",
    "docstring": "Set the colormap to 'hot'. This changes the default colormap as well as the colormap of the current image if there is one. See `` for more information.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:hot arguments Call"
  },
  {
    "library": "matplotlib",
    "name": "_norm_angle",
    "source_code": "def _norm_angle(a):\n    a = (a + 360) % 360\n    if a > 180:\n        a = a - 360\n    return a",
    "docstring": "Return the given angle normalized to -180 < *a* <= 180 degrees.",
    "type": "function",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py",
    "ast_data": "FunctionDef name:_norm_angle arg:a arguments arg Assign If Compare Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_isconst",
    "source_code": "def _isconst(x):\n    y = x[~np.isnan(x)]\n    if y.size == 0:\n        return np.array([True])\n    else:\n        return (y[0] == y).all(keepdims=True)",
    "docstring": "Check if all values in x are the same. nans are ignored. x must be a 1d array. The return value is a 1d array with length 1, so it can be used in np.apply_along_axis.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_stats_py.py",
    "ast_data": "FunctionDef name:_isconst arg:x arguments arg Assign Call If Compare Return return:yes Call Return return:yes Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "L2",
    "source_code": "class L2(Regularizer):\n\n    def __init__(self, l2=0.01, **kwargs):\n        l2 = kwargs.pop('l', l2)\n        if kwargs:\n            raise TypeError('Argument(s) not recognized: %s' % (kwargs,))\n        l2 = 0.01 if l2 is None else l2\n        _check_penalty_number(l2)\n        self.l2 = backend.cast_to_floatx(l2)\n\n    def __call__(self, x):\n        return self.l2 * math_ops.reduce_sum(math_ops.square(x))\n\n    def get_config(self):\n        return {'l2': float(self.l2)}",
    "docstring": "A regularizer that applies a L2 regularization penalty. The L2 regularization penalty is computed as: L2 may be passed to a layer as a string identifier: >>> dense = tf.keras.layers.Dense(3, kernel_regularizer='l2') In this case, the default value used is . Attributes: l2: Float; L2 regularization factor.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\regularizers.py",
    "ast_data": "ClassDef name:L2 FunctionDef name:__init__ arg:self arg:l2 arguments arg arg arg Assign Call If Raise Call Assign Compare Call Assign Call FunctionDef name:__call__ arg:self arg:x arguments arg arg Return return:yes Call Call FunctionDef name:get_config arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "VerticalFlip",
    "source_code": "class VerticalFlip(OperationBase):\n\n    def __init__(self, initial_probability: float=0.5, temperature: float=0.1) -> None:\n        super().__init__(K.RandomVerticalFlip(same_on_batch=False, p=initial_probability), initial_magnitude=None, temperature=temperature, symmetric_megnitude=False)",
    "docstring": "Apply vertical flip operation. Args: initial_magnitude: the initial magnitude. temperature: temperature for RelaxedBernoulli distribution used during training.",
    "type": "class",
    "file_path": "kornia\\kornia\\augmentation\\auto\\operations\\ops.py",
    "ast_data": "ClassDef name:VerticalFlip FunctionDef name:__init__ arg:self arg:initial_probability arg:temperature arguments arg arg arg Call Call Call"
  },
  {
    "library": "scrapy",
    "name": "gotStatus",
    "source_code": "def gotStatus(self, version, status, message):\n    self.version, self.status, self.message = (version, status, message)",
    "docstring": "Set the status of the request on us. @param version: The HTTP version. @type version: L{bytes} @param status: The HTTP status code, an integer represented as a bytestring. @type status: L{bytes} @param message: The HTTP status message. @type message: L{bytes}",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\downloader\\webclient.py",
    "ast_data": "FunctionDef name:gotStatus arg:self arg:version arg:status arg:message arguments arg arg arg arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "SumOverBatchSizeMetricWrapper",
    "source_code": "class SumOverBatchSizeMetricWrapper(SumOverBatchSize):\n\n    def __init__(self, fn, name=None, dtype=None, **kwargs):\n        super(SumOverBatchSizeMetricWrapper, self).__init__(name=name, dtype=dtype)\n        self._fn = fn\n        self._fn_kwargs = kwargs\n\n    def update_state(self, y_true, y_pred, sample_weight=None):\n        y_true = math_ops.cast(y_true, self._dtype)\n        y_pred = math_ops.cast(y_pred, self._dtype)\n        y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(y_pred, y_true)\n        ag_fn = autograph.tf_convert(self._fn, ag_ctx.control_status_ctx())\n        matches = ag_fn(y_true, y_pred, **self._fn_kwargs)\n        return super(SumOverBatchSizeMetricWrapper, self).update_state(matches, sample_weight=sample_weight)\n\n    def get_config(self):\n        config = {}\n        for k, v in self._fn_kwargs.items():\n            config[k] = backend.eval(v) if is_tensor_or_variable(v) else v\n        base_config = super(SumOverBatchSizeMetricWrapper, self).get_config()\n        return dict(list(base_config.items()) + list(config.items()))",
    "docstring": "Wraps a function with the metric.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py",
    "ast_data": "ClassDef name:SumOverBatchSizeMetricWrapper FunctionDef name:__init__ arg:self arg:fn arg:name arg:dtype arguments arg arg arg arg arg Call Call Assign Assign FunctionDef name:update_state arg:self arg:y_true arg:y_pred arg:sample_weight arguments arg arg arg arg Assign Call Assign Call Assign Call Assign Call Call Assign Call Return return:yes Call Call FunctionDef name:get_config arg:self arguments arg Assign For Call Assign Call Call Assign Call Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "with_dtype",
    "source_code": "def with_dtype(self, dtype):\n    return _Broadcaster(self._source_shape, self._target_shape, self._layer_broadcasters, dtype)",
    "docstring": "Return a copy of this Broadcaster with a different dtype.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:with_dtype arg:self arg:dtype arguments arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "affine",
    "source_code": "def affine(tensor: Tensor, matrix: Tensor, mode: str='bilinear', padding_mode: str='zeros', align_corners: bool=True) -> Tensor:\n    is_unbatched: bool = tensor.ndimension() == 3\n    if is_unbatched:\n        tensor = torch.unsqueeze(tensor, dim=0)\n    matrix = matrix.expand(tensor.shape[0], -1, -1)\n    height: int = tensor.shape[-2]\n    width: int = tensor.shape[-1]\n    warped: Tensor = warp_affine(tensor, matrix, (height, width), mode, padding_mode, align_corners)\n    if is_unbatched:\n        warped = torch.squeeze(warped, dim=0)\n    return warped",
    "docstring": "Apply an affine transformation to the image. .. image:: _static/img/warp_affine.png Args: tensor: The image tensor to be warped in shapes of :math:, :math: and :math:. matrix: The 2x3 affine transformation matrix. mode: interpolation mode to calculate output values ``. align_corners: interpolation flag. Returns: The warped image with the same shape as the input. Example: >>> img = torch.rand(1, 2, 3, 5) >>> aff = torch.eye(2, 3)[None] >>> out = affine(img, aff) >>> print(out.shape) torch.Size([1, 2, 3, 5])",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\transform\\affwarp.py",
    "ast_data": "FunctionDef name:affine arg:tensor arg:matrix arg:mode arg:padding_mode arg:align_corners arguments arg arg arg arg arg Compare Call If Assign Call Assign Call Call If Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "draw",
    "source_code": "def draw(self, renderer):\n    bbox, offsets = self._get_bbox_and_child_offsets(renderer)\n    px, py = self.get_offset(bbox, renderer)\n    for c, (ox, oy) in zip(self.get_visible_children(), offsets):\n        c.set_offset((px + ox, py + oy))\n        c.draw(renderer)\n    _bbox_artist(self, renderer, fill=False, props=dict(pad=0.0))\n    self.stale = False",
    "docstring": "Update the location of children if necessary and draw them to the given *renderer*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py",
    "ast_data": "FunctionDef name:draw arg:self arg:renderer arguments arg arg Assign Call Assign Call For Call Call Call Call Call Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "get_default_filename",
    "source_code": "def get_default_filename(self):\n    default_basename = self.manager.get_window_title() if self.manager is not None else ''\n    default_basename = default_basename or 'image'\n    removed_chars = '<>:\"/\\\\|?*\\x00 '\n    default_basename = default_basename.translate({ord(c): '_' for c in removed_chars})\n    default_filetype = self.get_default_filetype()\n    return f'{default_basename}.{default_filetype}'",
    "docstring": "Return a suitable default filename, including the extension.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:get_default_filename arg:self arguments arg Assign Compare Call Assign BoolOp Assign Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "is_stationary",
    "source_code": "@abstractmethod\ndef is_stationary(self):\n    pass",
    "docstring": "Returns whether the kernel is stationary.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:is_stationary arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "unique_object_name",
    "source_code": "def unique_object_name(name, name_uid_map=None, avoid_names=None, namespace='', zero_based=False, avoid_observed_names=False):\n    if name_uid_map is None:\n        name_uid_map = get_default_graph_uid_map()\n    if avoid_names is None:\n        if avoid_observed_names:\n            avoid_names = OBSERVED_NAMES\n        else:\n            avoid_names = set()\n    proposed_name = None\n    while proposed_name is None or proposed_name in avoid_names:\n        name_key = (namespace, name)\n        if zero_based:\n            number = name_uid_map[name_key]\n            if number:\n                proposed_name = name + '_' + str(number)\n            else:\n                proposed_name = name\n            name_uid_map[name_key] += 1\n        else:\n            name_uid_map[name_key] += 1\n            proposed_name = name + '_' + str(name_uid_map[name_key])\n    return proposed_name",
    "docstring": "Makes a object name (or arbitrary string) unique within a TensorFlow graph. Args: name: String name to make unique. name_uid_map: An optional defaultdict(int) to use when creating unique names. If None (default), uses a per-Graph dictionary. avoid_names: An optional set or dict with names which should not be used. If None (default), don't avoid any names unless is True. namespace: Gets a name which is unique within the (graph, namespace). Layers which are not Networks use a blank namespace and so get graph-global names. zero_based: If True, name sequences start with no suffix (e.g. \"dense\", \"dense_1\"). If False, naming is one-based (\"dense_1\", \"dense_2\"). avoid_observed_names: If True, avoid any names that have been observed by . Returns: Unique string name. Example: unique_object_name('dense') # dense_1 unique_object_name('dense') # dense_2",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:unique_object_name arg:name arg:name_uid_map arg:avoid_names arg:namespace arg:zero_based arg:avoid_observed_names arguments arg arg arg arg arg arg If Compare Assign Call If Compare If Assign Assign Call Assign While BoolOp Compare Compare Assign If Assign If Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "read_graph_execution_trace",
    "source_code": "def read_graph_execution_trace(self, graph_execution_trace_digest):\n    debug_event = self._reader.read_graph_execution_traces_event(graph_execution_trace_digest.locator)\n    return self._graph_execution_trace_from_debug_event_proto(debug_event, graph_execution_trace_digest.locator)",
    "docstring": "Read the detailed graph execution trace. Args: graph_execution_trace_digest: A object. Returns: The corresponding object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py",
    "ast_data": "FunctionDef name:read_graph_execution_trace arg:self arg:graph_execution_trace_digest arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_get_combined_index",
    "source_code": "def _get_combined_index(indexes: list[Index], intersect: bool=False, sort: bool=False) -> Index:\n    indexes = _get_distinct_objs(indexes)\n    if len(indexes) == 0:\n        index: Index = default_index(0)\n    elif len(indexes) == 1:\n        index = indexes[0]\n    elif intersect:\n        index = indexes[0]\n        for other in indexes[1:]:\n            index = index.intersection(other)\n    else:\n        index = union_indexes(indexes, sort=False)\n        index = ensure_index(index)\n    if sort:\n        index = safe_sort_index(index)\n    return index",
    "docstring": "Return the union or intersection of indexes. Parameters ---------- indexes : list of Index or list objects When intersect=True, do not accept list of lists. intersect : bool, default False If True, calculate the intersection between indexes. Otherwise, calculate the union. sort : bool, default False Whether the result index should come out sorted or not. Returns ------- Index",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\indexes\\api.py",
    "ast_data": "FunctionDef name:_get_combined_index arg:indexes arg:intersect arg:sort arguments arg arg arg Assign Call If Compare Call Call If Compare Call Assign If Assign For Assign Call Assign Call Assign Call If Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "semaphores",
    "source_code": "def semaphores(self, min_size: sympy.Expr) -> str:\n    current_device = V.graph.get_current_device_or_throw()\n    arg = WorkspaceArg(count=min_size, zero_mode=WorkspaceZeroMode.ZERO_PER_GRAPH, dtype=torch.uint32, inner_name='sem_ptr', outer_name=f'semaphores_{current_device.type}_{current_device.index}', device=current_device)\n    for existing_arg in self.workspace_args:\n        if existing_arg.inner_name == arg.inner_name:\n            assert arg == existing_arg, (arg, existing_arg)\n    self.workspace_args.append(arg)\n    return arg.inner_name",
    "docstring": "Lazily allocate a graph-wide semaphores buffer with at least min_size. This is a single buffer shared by all kernels and zero initialized once at graph start. Each kernel must leave the buffer zeroed on exit. Warning: multiple calls to this function will return the same buffer. Args: min_size: the number of int32 semaphores required Returns: name of the semaphores buffer",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\common.py",
    "ast_data": "FunctionDef name:semaphores arg:self arg:min_size arguments arg arg Assign Call Assign Call For If Compare Compare Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "kronsum",
    "source_code": "def kronsum(A, B, format=None):\n    if isinstance(A, sparray) or isinstance(B, sparray):\n        coo_sparse = coo_array\n        identity_sparse = eye_array\n    else:\n        coo_sparse = coo_matrix\n        identity_sparse = identity\n    A = coo_sparse(A)\n    B = coo_sparse(B)\n    if A.ndim != 2:\n        raise ValueError(f'kronsum requires 2D inputs. `A` is {A.ndim}D.')\n    if B.ndim != 2:\n        raise ValueError(f'kronsum requires 2D inputs. `B` is {B.ndim}D.')\n    if A.shape[0] != A.shape[1]:\n        raise ValueError('A is not square')\n    if B.shape[0] != B.shape[1]:\n        raise ValueError('B is not square')\n    dtype = upcast(A.dtype, B.dtype)\n    I_n = identity_sparse(A.shape[0], dtype=dtype)\n    I_m = identity_sparse(B.shape[0], dtype=dtype)\n    L = kron(I_m, A, format='coo')\n    R = kron(B, I_n, format='coo')\n    return (L + R).asformat(format)",
    "docstring": "kronecker sum of square sparse matrices A and B Kronecker sum of two sparse matrices is a sum of two Kronecker products kron(I_n,A) + kron(B,I_m) where A has shape (m,m) and B has shape (n,n) and I_m and I_n are identity matrices of shape (m,m) and (n,n), respectively. Parameters ---------- A square matrix B square matrix format : str format of the result (e.g. \"csr\") Returns ------- kronecker sum in a sparse matrix format",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\_construct.py",
    "ast_data": "FunctionDef name:kronsum arg:A arg:B arg:format arguments arg arg arg If BoolOp Call Call Assign Assign Assign Assign Assign Call Assign Call If Compare Raise Call If Compare Raise Call If Compare Raise Call If Compare Raise Call Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "SphinxStandaloneReader",
    "source_code": "class SphinxStandaloneReader(SphinxBaseReader):\n\n    def setup(self, app: Sphinx) -> None:\n        self.transforms = self.transforms + app.registry.get_transforms()\n        super().setup(app)\n\n    def read(self, source: Input, parser: Parser, settings: Values) -> nodes.document:\n        self.source = source\n        if not self.parser:\n            self.parser = parser\n        self.settings = settings\n        self.input = self.read_source(settings.env)\n        self.parse()\n        return self.document\n\n    def read_source(self, env: BuildEnvironment) -> str:\n        content = self.source.read()\n        arg = [content]\n        env.events.emit('source-read', env.docname, arg)\n        return arg[0]",
    "docstring": "A basic document reader for Sphinx.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\io.py",
    "ast_data": "ClassDef name:SphinxStandaloneReader FunctionDef name:setup arg:self arg:app arguments arg arg Assign Call Call Call FunctionDef name:read arg:self arg:source arg:parser arg:settings arguments arg arg arg arg Assign If Assign Assign Assign Call Call Return return:yes FunctionDef name:read_source arg:self arg:env arguments arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "CallContextManager",
    "source_code": "class CallContextManager(object):\n\n    def __init__(self, call_ctx, state):\n        self._call_ctx = call_ctx\n        self._state = state\n        self._build_graph = state['build_graph']\n\n    def __enter__(self):\n        call_ctx = self._call_ctx\n        self._prev_in_call = call_ctx.in_call\n        self._prev_state = call_ctx._state\n        call_ctx.in_call = True\n        call_ctx._state = self._state\n        if self._build_graph:\n            self._prev_in_keras_graph = call_ctx._in_keras_graph\n            call_ctx._in_keras_graph = call_ctx._in_keras_graph or getattr(backend.get_graph(), 'name', None) == 'keras_graph'\n\n    def __exit__(self, *exc_info):\n        call_ctx = self._call_ctx\n        call_ctx.in_call = self._prev_in_call\n        call_ctx._state = self._prev_state\n        if self._build_graph:\n            call_ctx._in_keras_graph = self._prev_in_keras_graph",
    "docstring": "Context manager for .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_utils.py",
    "ast_data": "ClassDef name:CallContextManager FunctionDef name:__init__ arg:self arg:call_ctx arg:state arguments arg arg arg Assign Assign Assign FunctionDef name:__enter__ arg:self arguments arg Assign Assign Assign Assign Assign If Assign Assign BoolOp Compare Call Call FunctionDef name:__exit__ arg:self arguments arg arg Assign Assign Assign If Assign"
  },
  {
    "library": "scikit-learn",
    "name": "Benchmark",
    "source_code": "class Benchmark(ABC):\n    timer = timeit.default_timer\n    processes = 1\n    timeout = 500\n    profile, n_jobs_vals, save_estimators, save_dir, base_commit, bench_predict, bench_transform = get_from_config()\n    if profile == 'fast':\n        warmup_time = 0\n        repeat = 1\n        number = 1\n        min_run_count = 1\n        data_size = 'small'\n    elif profile == 'regular':\n        warmup_time = 1\n        repeat = (3, 100, 30)\n        data_size = 'small'\n    elif profile == 'large_scale':\n        warmup_time = 1\n        repeat = 3\n        number = 1\n        data_size = 'large'\n\n    @property\n    @abstractmethod\n    def params(self):\n        pass",
    "docstring": "Abstract base class for all the benchmarks",
    "type": "class",
    "file_path": "scikit-learn\\asv_benchmarks\\benchmarks\\common.py",
    "ast_data": "ClassDef name:Benchmark Assign Assign Assign Assign Call If Compare Assign Assign Assign Assign Assign If Compare Assign Assign Assign If Compare Assign Assign Assign Assign FunctionDef name:params arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "building_function",
    "source_code": "@property\ndef building_function(self) -> bool:\n    return self._building_function",
    "docstring": "Returns True iff this graph represents a function.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:building_function arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "ManyToManyDescriptor",
    "source_code": "class ManyToManyDescriptor(ReverseManyToOneDescriptor):\n\n    def __init__(self, rel, reverse=False):\n        super().__init__(rel)\n        self.reverse = reverse\n\n    @property\n    def through(self):\n        return self.rel.through\n\n    @cached_property\n    def related_manager_cls(self):\n        related_model = self.rel.related_model if self.reverse else self.rel.model\n        return create_forward_many_to_many_manager(related_model._default_manager.__class__, self.rel, reverse=self.reverse)\n\n    def _get_set_deprecation_msg_params(self):\n        return ('%s side of a many-to-many set' % ('reverse' if self.reverse else 'forward'), self.rel.accessor_name if self.reverse else self.field.name)",
    "docstring": "Accessor to the related objects manager on the forward and reverse sides of a many-to-many relation. In the example:: class Pizza(Model): toppings = ManyToManyField(Topping, related_name='pizzas') `` defined below.",
    "type": "class",
    "file_path": "django\\django\\db\\models\\fields\\related_descriptors.py",
    "ast_data": "ClassDef name:ManyToManyDescriptor FunctionDef name:__init__ arg:self arg:rel arg:reverse arguments arg arg arg Call Call Assign FunctionDef name:through arg:self arguments arg Return return:yes FunctionDef name:related_manager_cls arg:self arguments arg Assign Return return:yes Call FunctionDef name:_get_set_deprecation_msg_params arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "function_def",
    "source_code": "@property\ndef function_def(self):\n    return self._delayed_rewrite_functions.forward().cached_definition",
    "docstring": "Returns a object representing this function.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py",
    "ast_data": "FunctionDef name:function_def arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "options",
    "source_code": "def options(self):\n    if context.executing_eagerly():\n        options = self._options_tensor_to_options(self._options())\n        options._set_mutable(False)\n        return options\n    warnings.warn('To make it possible to preserve tf.data options across serialization boundaries, their implementation has moved to be part of the TensorFlow graph. As a consequence, the options value is in general no longer known at graph construction time. Invoking this method in graph mode retains the legacy behavior of the original implementation, but note that the returned value might not reflect the actual value of the options.')\n    return self._options_attr",
    "docstring": "Returns the options for this dataset and its inputs. Returns: A object representing the dataset options.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:options arg:self arguments arg If Call Assign Call Call Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_cpu_arch",
    "source_code": "def get_cpu_arch():\n    key = 'cpu_arch'\n    out, err = run_shell_cmd(cmds_all[PLATFORM][key])\n    if err and FLAGS.debug:\n        print('Error in detecting CPU arch:\\n %s' % str(err))\n    return out.strip(b'\\n')",
    "docstring": "Retrieves processor architecture type (32-bit or 64-bit). Returns: String that is CPU architecture. e.g. 'x86_64'",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\tensorflow_builder\\config_detector\\config_detector.py",
    "ast_data": "FunctionDef name:get_cpu_arch arguments Assign Assign Call If BoolOp Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_key_to_file",
    "source_code": "def _key_to_file(self, session_key=None):\n    if session_key is None:\n        session_key = self._get_or_create_session_key()\n    if not set(session_key).issubset(VALID_KEY_CHARS):\n        raise InvalidSessionKey('Invalid characters in session key')\n    return os.path.join(self.storage_path, self.file_prefix + session_key)",
    "docstring": "Get the file associated with this session key.",
    "type": "method",
    "file_path": "django\\django\\contrib\\sessions\\backends\\file.py",
    "ast_data": "FunctionDef name:_key_to_file arg:self arg:session_key arguments arg arg If Compare Assign Call If Call Call Raise Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "_sanitize",
    "source_code": "@classmethod\ndef _sanitize(cls, raw):\n    return cls.dangerous.sub('', raw)",
    "docstring": "Clean up the CR LF chars from input.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\httputil.py",
    "ast_data": "FunctionDef name:_sanitize arg:cls arg:raw arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "capture_end",
    "source_code": "def capture_end(self):\n    super().capture_end()",
    "docstring": "End CUDA graph capture on the current stream. After `~torch.cuda.graph~torch.cuda.make_graphed_callables` internally.",
    "type": "method",
    "file_path": "pytorch\\torch\\cuda\\graphs.py",
    "ast_data": "FunctionDef name:capture_end arg:self arguments arg Call Call"
  },
  {
    "library": "scipy",
    "name": "silverman_factor",
    "source_code": "def silverman_factor(self):\n    return power(self.neff * (self.d + 2.0) / 4.0, -1.0 / (self.d + 4))",
    "docstring": "Compute the Silverman factor. Returns ------- s : float The silverman factor.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_kde.py",
    "ast_data": "FunctionDef name:silverman_factor arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "set_up_planner",
    "source_code": "def set_up_planner(self, state_dict: STATE_DICT_TYPE, metadata: Optional[Metadata]=None, is_coordinator: bool=False) -> None:\n    super().set_up_planner(state_dict, metadata, is_coordinator)\n    state_dict_metadata: dict[str, STORAGE_TYPES] = {}\n    for key, tensor in self.state_dict.items():\n        if not torch.is_tensor(tensor):\n            raise RuntimeError(f'Non-tensor value identified at {key}. At this time {type(self).__name__} only supports loading Tensors.')\n        state_dict_metadata[key] = TensorStorageMetadata(TensorProperties(dtype=tensor.dtype), tensor.size(), _create_chunk_list(tensor))\n    self.metadata = Metadata(state_dict_metadata=state_dict_metadata)",
    "docstring": "Setups of the planner, extnding default behavior by creating the Metadata object from the state dict",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\format_utils.py",
    "ast_data": "FunctionDef name:set_up_planner arg:self arg:state_dict arg:metadata arg:is_coordinator arguments arg arg arg arg Call Call For Call If Call Raise Call Call Assign Call Call Call Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_test_name_best_effort",
    "source_code": "def _get_test_name_best_effort():\n    for stack in tf_inspect.stack():\n        function_name = stack[3]\n        if function_name.startswith('test'):\n            try:\n                class_name = stack[0].f_locals['self'].__class__.__name__\n                return class_name + '.' + function_name\n            except:\n                pass\n    return None",
    "docstring": "If available, return the current test name. Otherwise, .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\memory_checker.py",
    "ast_data": "FunctionDef name:_get_test_name_best_effort arguments For Call Assign If Call Try Assign Return return:yes ExceptHandler Return return:no"
  },
  {
    "library": "cryptography",
    "name": "decrypt",
    "source_code": "@abc.abstractmethod\ndef decrypt(self, ciphertext: bytes, padding: AsymmetricPadding) -> bytes:\n    pass",
    "docstring": "Decrypts the provided ciphertext.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\rsa.py",
    "ast_data": "FunctionDef name:decrypt arg:self arg:ciphertext arg:padding arguments arg arg arg"
  },
  {
    "library": "pytorch",
    "name": "bind_symbols",
    "source_code": "def bind_symbols(self, placeholders: Sequence[FakeTensor], args: Sequence[Tensor]) -> dict[sympy.Symbol, int]:\n    bindings: dict[sympy.Symbol, int] = {}\n\n    def bind_symint(arg: object, val: object) -> None:\n        if isinstance(val, SymInt):\n            assert isinstance(arg, int)\n            s = val.node.expr\n            if isinstance(s, sympy.Symbol):\n                if s in bindings:\n                    assert bindings[s] == arg, f'{bindings[s]} != {arg}'\n                else:\n                    bindings[s] = arg\n            elif isinstance(-s, sympy.Symbol):\n                if -s in bindings:\n                    assert bindings[-s] == -arg, f'{bindings[-s]} != {-arg}'\n                else:\n                    bindings[-s] = -arg\n    for t, arg in zip(placeholders, args):\n        if t is None:\n            continue\n        if isinstance(t, SymInt):\n            bind_symint(arg, t)\n            continue\n        assert isinstance(t, torch.Tensor)\n        for i, s in enumerate(t.size()):\n            bind_symint(arg.size(i), s)\n        for i, s in enumerate(t.stride()):\n            bind_symint(arg.stride(i), s)\n        bind_symint(arg.storage_offset(), t.storage_offset())\n    return bindings",
    "docstring": "Given a paired list of placeholders (fake tensors with symbolic sizes) and concrete arguments (regular tensors with real sizes), returns a dictionary mapping each symbol to its real value. So for example, if you have a placeholder with size (s0, s1), binding (2, 4) to it will give you {s0: 2, s1: 4}. This is not guaranteed to bind ALL symbols in the ShapeEnv; we can't bind a symbol if it doesn't occur in any placeholder, and symbols that already have replacements won't get bindings. This is a little duplicative with evaluate_guards but it's different enough that it seemed cleanest to make another copy. This assumes the guards are already checked, though if it's cheap we'll check for shenanigans",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:bind_symbols arg:self arg:placeholders arg:args arguments arg arg arg FunctionDef name:bind_symint arg:arg arg:val arguments arg arg If Call Call Assign If Call If Compare Compare Assign If Call If Compare Compare Assign For Call If Compare If Call Call Call For Call Call Call Call For Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "while_body",
    "source_code": "def while_body(i, *ta_list):\n    fn_conv = autograph.tf_convert(loop_fn, autograph_ctx.control_status_ctx())\n    fn_output = nest.flatten(fn_conv(i))\n    if len(fn_output) != len(flat_loop_fn_dtypes):\n        raise ValueError(f'Number of expected outputs {len(flat_loop_fn_dtypes)}, does not match the number of actual outputs {len(fn_output)} from loop_fn: {loop_fn} with output {fn_output}.')\n    outputs = []\n    del is_none_list[:]\n    is_none_list.extend((x is None for x in fn_output))\n    for out, ta in zip(fn_output, ta_list):\n        if out is not None:\n            ta = ta.write(i, out)\n        outputs.append(ta)\n    return tuple([i + 1] + outputs)",
    "docstring": "Body of while loop.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\control_flow_ops.py",
    "ast_data": "FunctionDef name:while_body arg:i arguments arg arg Assign Call Call Assign Call Call If Compare Call Call Raise Call Call Call Assign Call Compare For Call If Compare Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_subclass_typing_container",
    "source_code": "def get_subclass_typing_container(tensor_subclass: torch.Tensor) -> dict[type[torch.Tensor], list[type[torch.Tensor]]]:\n\n    def _get_types_for_subclass(tensor_subclass: torch.Tensor) -> None:\n        if not is_traceable_wrapper_subclass(tensor_subclass):\n            return\n        tracker[type(tensor_subclass)].append(tensor_subclass)\n        inner_keys, _ = tensor_subclass.__tensor_flatten__()\n        for key in inner_keys:\n            inner_tensor = getattr(tensor_subclass, key)\n            _get_types_for_subclass(inner_tensor)\n    tracker: dict[Any, list[Any]] = collections.defaultdict(list)\n    _get_types_for_subclass(tensor_subclass)\n    return tracker",
    "docstring": "Given a subclass, returns a recursive dictionary mapping each inner tensors to its' subclass types.",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\subclass_utils.py",
    "ast_data": "FunctionDef name:get_subclass_typing_container arg:tensor_subclass arguments arg FunctionDef name:_get_types_for_subclass arg:tensor_subclass arguments arg If Call Return return:no Call Call Assign Call For Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "print_disas",
    "source_code": "def print_disas(self, *, file=None, stacklevel=0):\n    tx = self.__get_tx(stacklevel)\n    print(dis.Bytecode(tx.f_code, current_offset=tx.instructions[tx.instruction_pointer].offset).dis(), file=file)",
    "docstring": "Print the current series of opcodes being executed (not including parent frames), including where you are in the particular opcode stream.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\comptime.py",
    "ast_data": "FunctionDef name:print_disas arg:self arguments arg arg arg Assign Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_log_flops",
    "source_code": "@ops.RegisterStatistics('Log', 'flops')\ndef _log_flops(graph, node):\n    return _unary_op_flops(graph, node)",
    "docstring": "Compute flops for Log operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py",
    "ast_data": "FunctionDef name:_log_flops arg:graph arg:node arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "p0",
    "source_code": "@property\ndef p0(self):\n    return self.get_points()[0]",
    "docstring": "The first pair of (*x*, *y*) coordinates that define the bounding box. This is not guaranteed to be the bottom-left corner (for that, use :attr:).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:p0 arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "safe_embedding_lookup_sparse_v2",
    "source_code": "@tf_export('nn.safe_embedding_lookup_sparse', v1=[])\n@dispatch.add_dispatch_support\ndef safe_embedding_lookup_sparse_v2(embedding_weights, sparse_ids, sparse_weights=None, combiner='mean', default_id=None, max_norm=None, name=None, allow_fast_lookup=False):\n    return safe_embedding_lookup_sparse(embedding_weights, sparse_ids, sparse_weights=sparse_weights, combiner=combiner, default_id=default_id, name=name, partition_strategy='div', max_norm=max_norm, allow_fast_lookup=allow_fast_lookup)",
    "docstring": "Lookup embedding results, accounting for invalid IDs and empty features. The partitioned embedding in must all be the same shape except for the first dimension. The first dimension is allowed to vary as the vocabulary size is not necessarily a multiple of num of shards. This is similar to , except invalid IDs ( 1ididsembedding_weights[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10], [11, 12]](max_id + 1) % len(embedding_weights)SparseTensor[d_0, d_1, ..., d_n]d_0RaggedTensorSparseTensorRaggedTensorsparse_idssparse_idsNoneNoneparamsmax_normNoneTruesparse_idsshape(combined embedding_weights) = [p0, p1, ..., pm]shape(sparse_ids) = shape(sparse_weights) = [d0, d1, ..., dn]shape(output) = [d0, d1, ... dn-1, p1, ..., pm]default_idcombinerembedding_weights` is empty.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\embedding_ops.py",
    "ast_data": "FunctionDef name:safe_embedding_lookup_sparse_v2 arg:embedding_weights arg:sparse_ids arg:sparse_weights arg:combiner arg:default_id arg:max_norm arg:name arg:allow_fast_lookup arguments arg arg arg arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "imshow_rgb",
    "source_code": "def imshow_rgb(self, r, g, b, **kwargs):\n    if not r.shape == g.shape == b.shape:\n        raise ValueError(f'Input shapes ({r.shape}, {g.shape}, {b.shape}) do not match')\n    RGB = np.dstack([r, g, b])\n    R = np.zeros_like(RGB)\n    R[:, :, 0] = r\n    G = np.zeros_like(RGB)\n    G[:, :, 1] = g\n    B = np.zeros_like(RGB)\n    B[:, :, 2] = b\n    im_rgb = self.RGB.imshow(RGB, **kwargs)\n    im_r = self.R.imshow(R, **kwargs)\n    im_g = self.G.imshow(G, **kwargs)\n    im_b = self.B.imshow(B, **kwargs)\n    return (im_rgb, im_r, im_g, im_b)",
    "docstring": "Create the four images {rgb, r, g, b}. Parameters ---------- r, g, b : array-like The red, green, and blue arrays. **kwargs Forwarded to calls for the four images. Returns ------- rgb : r : g : b :",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_rgb.py",
    "ast_data": "FunctionDef name:imshow_rgb arg:self arg:r arg:g arg:b arguments arg arg arg arg arg If Compare Raise Call Assign Call Assign Call Assign Assign Call Assign Assign Call Assign Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "scatter_add",
    "source_code": "def scatter_add(self, sparse_delta, use_locking=False, name=None):\n    per_var_sparse_delta = self._decompose_indexed_slices(sparse_delta)\n    for i, v in enumerate(self._variables):\n        new_name = None\n        if name is not None:\n            new_name = '{}/part_{}'.format(name, i)\n        v.scatter_add(per_var_sparse_delta[i], name=new_name)\n    return self",
    "docstring": "Implements tf.Variable.scatter_add.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\sharded_variable.py",
    "ast_data": "FunctionDef name:scatter_add arg:self arg:sparse_delta arg:use_locking arg:name arguments arg arg arg arg Assign Call For Call Assign If Compare Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "dump_chrome_trace",
    "source_code": "def dump_chrome_trace(f, input, trace_filename, optimize_ctx, activities, num_runs=1, devices=None, kwargs_for_f=None, kwargs_for_profiler=None):\n    if devices is None:\n        devices = ['cuda']\n    global synchronize\n    if devices != ['cpu'] and torch.cuda.is_available():\n        synchronize = torch.cuda.synchronize\n    if kwargs_for_f is None:\n        kwargs_for_f = {}\n    if kwargs_for_profiler is None:\n        kwargs_for_profiler = {}\n    with optimize_ctx:\n        torch.manual_seed(1337)\n        for _ in range(5):\n            f(input, **kwargs_for_f)\n            synchronize()\n        torch.manual_seed(1337)\n        t0 = time.perf_counter()\n        for _ in range(num_runs):\n            f(input, **kwargs_for_f)\n            synchronize()\n        t1 = time.perf_counter()\n    timing = t1 - t0\n    with profile(activities=activities, **kwargs_for_profiler) as prof:\n        with optimize_ctx:\n            synchronize()\n            torch.manual_seed(1337)\n            for _ in range(num_runs):\n                f(input, **kwargs_for_f)\n                synchronize()\n    prof.export_chrome_trace(trace_filename)\n    return timing",
    "docstring": "Output the chrome trace of running f(input, **kwargs_for_f) with [optimize_ctx] [num_runs] times to [trace_filename]. [activities] are the activities that the profiler will record, e.g. ProfilerActivity.CUDA. Return total runtime without the profiler Outputs to trace_filename",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\benchmark_utils.py",
    "ast_data": "FunctionDef name:dump_chrome_trace arg:f arg:input arg:trace_filename arg:optimize_ctx arg:activities arg:num_runs arg:devices arg:kwargs_for_f arg:kwargs_for_profiler arguments arg arg arg arg arg arg arg arg arg If Compare Assign If BoolOp Compare Call Assign If Compare Assign If Compare Assign With Call For Call Call Call Call Assign Call For Call Call Call Assign Call Assign With Call With Call Call For Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "internal_convert_to_tensor_or_composite",
    "source_code": "def internal_convert_to_tensor_or_composite(value, dtype=None, name=None, as_ref=False) -> Union[EagerTensor, SymbolicTensor, composite_tensor.CompositeTensor]:\n    if isinstance(value, composite_tensor.CompositeTensor):\n        value_dtype = getattr(value, 'dtype', None)\n        if dtype and (not dtypes.as_dtype(dtype).is_compatible_with(value_dtype)):\n            raise ValueError(f'Tensor conversion dtype mismatch. Requested dtype is {dtypes.as_dtype(dtype).name}, Tensor has dtype {value.dtype.name}: {value!r}')\n        return value\n    else:\n        return convert_to_tensor(value, dtype=dtype, name=name, as_ref=as_ref, accepted_result_types=(tensor_lib.Tensor, composite_tensor.CompositeTensor))",
    "docstring": "Converts the given object to a or . If is a it is returned unmodified. Otherwise, it is converted to a using . Args: value: A , or an object that can be consumed by . dtype: (Optional.) The required of the returned or . name: (Optional.) A name to use if a new is created. as_ref: True if the caller wants the results as ref tensors. Returns: A or , based on . Raises: ValueError: If does not match the element type of .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:internal_convert_to_tensor_or_composite arg:value arg:dtype arg:name arg:as_ref arguments arg arg arg arg If Call Assign Call If BoolOp Call Call Raise Call Call Return return:yes Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_width_ratios",
    "source_code": "def get_width_ratios(self):\n    return self._col_width_ratios",
    "docstring": "Return the width ratios. This is *None* if no width ratios have been set explicitly.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\gridspec.py",
    "ast_data": "FunctionDef name:get_width_ratios arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "CUDASanitizerErrors",
    "source_code": "class CUDASanitizerErrors(Exception):\n\n    def __init__(self, errors: list[SynchronizationError]):\n        self.errors = errors\n\n    def __str__(self):\n        return f'detected {len(self.errors)} errors'",
    "docstring": "Wrapper class for errors reported by CUDA Sanitizer.",
    "type": "class",
    "file_path": "pytorch\\torch\\cuda\\_sanitizer.py",
    "ast_data": "ClassDef name:CUDASanitizerErrors FunctionDef name:__init__ arg:self arg:errors arguments arg arg Assign FunctionDef name:__str__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_broadcast_concatenate",
    "source_code": "def _broadcast_concatenate(arrays, axis, paired=False):\n    arrays = _broadcast_arrays(arrays, axis if not paired else None)\n    res = np.concatenate(arrays, axis=axis)\n    return res",
    "docstring": "Concatenate arrays along an axis with broadcasting.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_axis_nan_policy.py",
    "ast_data": "FunctionDef name:_broadcast_concatenate arg:arrays arg:axis arg:paired arguments arg arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "replace_extern_shared",
    "source_code": "def replace_extern_shared(input_string):\n    output_string = input_string\n    output_string = RE_EXTERN_SHARED.sub(lambda inp: f'HIP_DYNAMIC_SHARED({inp.group(1) or ''} {inp.group(2)}, {inp.group(3)})', output_string)\n    return output_string",
    "docstring": "Match extern __shared__ type foo[]; syntax and use HIP_DYNAMIC_SHARED() MACRO instead. Example: \"extern __shared__ char smemChar[];\" => \"HIP_DYNAMIC_SHARED( char, smemChar)\" \"extern __shared__ unsigned char smem[];\" => \"HIP_DYNAMIC_SHARED( unsigned char, my_smem)\"",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\hipify\\hipify_python.py",
    "ast_data": "FunctionDef name:replace_extern_shared arg:input_string arguments arg Assign Assign Call arguments arg BoolOp Call Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "mean",
    "source_code": "def mean(self, axis=None, dtype=None, out=None):\n    return N.ndarray.mean(self, axis, dtype, out, keepdims=True)._collapse(axis)",
    "docstring": "Returns the average of the matrix elements along the given axis. Refer to for full documentation. See Also -------- numpy.mean Notes ----- Same as except that, where that returns an , this returns a object. Examples -------- >>> x = np.matrix(np.arange(12).reshape((3, 4))) >>> x matrix([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]]) >>> x.mean() 5.5 >>> x.mean(0) matrix([[4., 5., 6., 7.]]) >>> x.mean(1) matrix([[ 1.5], [ 5.5], [ 9.5]])",
    "type": "method",
    "file_path": "numpy\\numpy\\matrixlib\\defmatrix.py",
    "ast_data": "FunctionDef name:mean arg:self arg:axis arg:dtype arg:out arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "distribute_datasets_from_function",
    "source_code": "def distribute_datasets_from_function(self, dataset_fn, options=None):\n    return super(OneDeviceStrategy, self).distribute_datasets_from_function(dataset_fn, options)",
    "docstring": "Distributes instances created by calls to . will be called once for each worker in the strategy. In this case, we only have one worker and one device so is called once. The should take an instance where information about batching and input replication can be accessed: IMPORTANT: The returned by should have a per-replica batch size, unlike , which uses the global batch size. This may be computed using . Args: dataset_fn: A function taking a instance and returning a . options: used to control options on how this dataset is distributed. Returns: A \"distributed \", which the caller can iterate over like regular datasets.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\one_device_strategy.py",
    "ast_data": "FunctionDef name:distribute_datasets_from_function arg:self arg:dataset_fn arg:options arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "byte_swap_buffer_content",
    "source_code": "def byte_swap_buffer_content(buffer, chunksize, from_endiness, to_endiness):\n    to_swap = [buffer.data[i:i + chunksize] for i in range(0, len(buffer.data), chunksize)]\n    buffer.data = b''.join([int.from_bytes(byteswap, from_endiness).to_bytes(chunksize, to_endiness) for byteswap in to_swap])",
    "docstring": "Helper function for byte-swapping the buffers field.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\tools\\flatbuffer_utils.py",
    "ast_data": "FunctionDef name:byte_swap_buffer_content arg:buffer arg:chunksize arg:from_endiness arg:to_endiness arguments arg arg arg arg Assign Call Call Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "Sampler",
    "source_code": "class Sampler(Metric):\n    __slots__ = []\n\n    def __init__(self, name, buckets, description, *labels):\n        super(Sampler, self).__init__('Sampler', _sampler_methods, len(labels), name, buckets.buckets, description, *labels)\n\n    def get_cell(self, *labels):\n        return SamplerCell(super(Sampler, self).get_cell(*labels))",
    "docstring": "A stateful class for updating a cumulative histogram metric. This class encapsulates a set of histograms (or a single histogram for a label-less metric) configured with a list of increasing bucket boundaries. Each histogram is identified by a tuple of labels. The class allows the user to add a sample to each histogram value.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py",
    "ast_data": "ClassDef name:Sampler Assign FunctionDef name:__init__ arg:self arg:name arg:buckets arg:description arguments arg arg arg arg arg Call Call Call FunctionDef name:get_cell arg:self arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "builtin_template_path",
    "source_code": "def builtin_template_path(name):\n    return Path(__file__).parent / 'templates' / name",
    "docstring": "Return a path to a builtin template. Avoid calling this function at the module level or in a class-definition because __file__ may not exist, e.g. in frozen environments.",
    "type": "function",
    "file_path": "django\\django\\views\\i18n.py",
    "ast_data": "FunctionDef name:builtin_template_path arg:name arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_python_properties_internal",
    "source_code": "def _python_properties_internal(self):\n    metadata = dict(name=self.obj.name, trainable=self.obj.trainable, expects_training_arg=self.obj._expects_training_arg, dtype=policy.serialize(self.obj._dtype_policy), batch_input_shape=getattr(self.obj, '_batch_input_shape', None), stateful=self.obj.stateful, must_restore_from_config=self.obj._must_restore_from_config)\n    metadata.update(get_serialized(self.obj))\n    if self.obj.input_spec is not None:\n        metadata['input_spec'] = nest.map_structure(lambda x: generic_utils.serialize_keras_object(x) if x else None, self.obj.input_spec)\n    if self.obj.activity_regularizer is not None and hasattr(self.obj.activity_regularizer, 'get_config'):\n        metadata['activity_regularizer'] = generic_utils.serialize_keras_object(self.obj.activity_regularizer)\n    if self.obj._build_input_shape is not None:\n        metadata['build_input_shape'] = self.obj._build_input_shape\n    return metadata",
    "docstring": "Returns dictionary of all python properties.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\layer_serialization.py",
    "ast_data": "FunctionDef name:_python_properties_internal arg:self arguments arg Assign Call Call Call Call Call If Compare Assign Call arguments arg Call If BoolOp Compare Call Assign Call If Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "input_mask",
    "source_code": "@property\ndef input_mask(self):\n    inputs = self.input\n    if isinstance(inputs, list):\n        return [getattr(x, '_keras_mask', None) for x in inputs]\n    else:\n        return getattr(inputs, '_keras_mask', None)",
    "docstring": "Retrieves the input mask tensor(s) of a layer. Only applicable if the layer has exactly one inbound node, i.e. if it is connected to one incoming layer. Returns: Input mask tensor (potentially None) or list of input mask tensors. Raises: AttributeError: if the layer is connected to more than one incoming layers.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py",
    "ast_data": "FunctionDef name:input_mask arg:self arguments arg Assign If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_TensorScatterMinOrMaxGrad",
    "source_code": "def _TensorScatterMinOrMaxGrad(op: ops.Operation, grad):\n    indices = op.inputs[1]\n    x = op.inputs[0]\n    y = op.inputs[2]\n    output = op.outputs[0]\n    x_indicators = math_ops.cast(math_ops.equal(x, output), grad.dtype)\n    y_output = array_ops.gather_nd(output, indices)\n    y_indicators = math_ops.cast(math_ops.equal(y, y_output), grad.dtype)\n    ys_indicators = array_ops.scatter_nd(indices, y_indicators, array_ops.shape(x, out_type=indices.dtype))\n    indicators = x_indicators + ys_indicators\n    x_grad = grad * x_indicators / indicators\n    y_grad = array_ops.gather_nd(grad / indicators, indices) * y_indicators\n    return [x_grad, None, y_grad]",
    "docstring": "Gradient for TensorScatterMin and TensorScatterMax.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_grad.py",
    "ast_data": "FunctionDef name:_TensorScatterMinOrMaxGrad arg:op arg:grad arguments arg arg Assign Assign Assign Assign Assign Call Call Assign Call Assign Call Call Assign Call Call Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "create_names_map",
    "source_code": "def create_names_map(named_params: Union[dict[str, Tensor], Iterable[tuple[str, Tensor]]], tied_named_params: Union[dict[str, Tensor], Iterable[tuple[str, Tensor]]]) -> dict[str, list[str]]:\n    named_params = dict(named_params)\n    tied_named_params = dict(tied_named_params)\n    tensors_dict_keys = set(named_params.keys())\n    tied_tensors_dict_keys = set(tied_named_params.keys())\n    assert tensors_dict_keys.issubset(tied_tensors_dict_keys)\n    tensor_to_mapping: dict[Tensor, tuple[str, list[str]]] = {}\n    for key, tensor in named_params.items():\n        tensor_to_mapping[tensor] = (key, [])\n    for key, tensor in tied_named_params.items():\n        assert tensor in tensor_to_mapping\n        tensor_to_mapping[tensor][1].append(key)\n    return dict(tensor_to_mapping.values())",
    "docstring": "named_params is a dictionary of tensors: {'A': A, 'B': B} tied_named_params is another dictionary of tensors {'A': A, 'B': B, 'B_tied': B} with potentially tied (or 'duplicated') tensors This function creates a mapping from the names in named_params to the names in tied_named_params: {'A': ['A'], 'B': ['B', 'B_tied']}.",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\make_functional.py",
    "ast_data": "FunctionDef name:create_names_map arg:named_params arg:tied_named_params arguments arg arg Assign Call Assign Call Assign Call Call Assign Call Call Call For Call Assign For Call Compare Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_colocation_stack",
    "source_code": "@property\ndef _colocation_stack(self) -> traceable_stack.TraceableStack:\n    if self._stack_state_is_thread_local:\n        if not hasattr(self._thread_local, '_colocation_stack'):\n            stack_copy_for_this_thread = self._graph_colocation_stack.copy()\n            self._thread_local._colocation_stack = stack_copy_for_this_thread\n        return self._thread_local._colocation_stack\n    else:\n        return self._graph_colocation_stack",
    "docstring": "Return thread-local copy of colocation stack.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_colocation_stack arg:self arguments arg If If Call Assign Call Assign Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_glibc_version",
    "source_code": "def get_glibc_version():\n    key = 'glibc_ver'\n    out, err = run_shell_cmd(cmds_all[PLATFORM.lower()][key])\n    if err and FLAGS.debug:\n        print('Error in detecting GCC version:\\n %s' % str(err))\n    return out.strip(b'\\n')",
    "docstring": "Retrieves version of GLIBC detected. Returns: String that is the version of GLIBC. e.g. '2.24'",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\tensorflow_builder\\config_detector\\config_detector.py",
    "ast_data": "FunctionDef name:get_glibc_version arguments Assign Assign Call Call If BoolOp Call Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "synthesize_nonce",
    "source_code": "def synthesize_nonce(s, key, timestamp=None):\n    if timestamp is None:\n        timestamp = int(time.time())\n    h = md5_hex('%s:%s:%s' % (timestamp, s, key))\n    nonce = '%s:%s' % (timestamp, h)\n    return nonce",
    "docstring": "Synthesize a nonce value. A nonce value resists spoofing and can be checked for staleness. Returns a string suitable as the value for 'nonce' in the www-authenticate header. s A string related to the resource, such as the hostname of the server. key A secret string known only to the server. timestamp An integer seconds-since-the-epoch timestamp",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\auth_digest.py",
    "ast_data": "FunctionDef name:synthesize_nonce arg:s arg:key arg:timestamp arguments arg arg arg If Compare Assign Call Call Assign Call Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_minimize_cobyqa",
    "source_code": "def _minimize_cobyqa(fun, x0, args=(), bounds=None, constraints=(), callback=None, disp=False, maxfev=None, maxiter=None, f_target=-np.inf, feasibility_tol=1e-08, initial_tr_radius=1.0, final_tr_radius=1e-06, scale=False, **unknown_options):\n    from .._lib.cobyqa import minimize\n    _check_unknown_options(unknown_options)\n    options = {'disp': bool(disp), 'maxfev': int(maxfev) if maxfev is not None else 500 * len(x0), 'maxiter': int(maxiter) if maxiter is not None else 1000 * len(x0), 'target': float(f_target), 'feasibility_tol': float(feasibility_tol), 'radius_init': float(initial_tr_radius), 'radius_final': float(final_tr_radius), 'scale': bool(scale)}\n    with COBYQA_LOCK:\n        return minimize(fun, x0, args, bounds, constraints, callback, options)",
    "docstring": "Minimize a scalar function of one or more variables using the Constrained Optimization BY Quadratic Approximations (COBYQA) algorithm [1]_. .. versionadded:: 1.14.0 Options ------- disp : bool Set to True to print information about the optimization procedure. Default is `feasibility_toltolminimize[-1, 1]`. References ---------- .. [1] COBYQA",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_cobyqa_py.py",
    "ast_data": "FunctionDef name:_minimize_cobyqa arg:fun arg:x0 arg:args arg:bounds arg:constraints arg:callback arg:disp arg:maxfev arg:maxiter arg:f_target arg:feasibility_tol arg:initial_tr_radius arg:final_tr_radius arg:scale arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg Call Assign Call Compare Call Call Compare Call Call Call Call Call Call Call With Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "get_format_datetime64",
    "source_code": "def get_format_datetime64(is_dates_only: bool, nat_rep: str='NaT', date_format: str | None=None) -> Callable:\n    if is_dates_only:\n        return lambda x: _format_datetime64_dateonly(x, nat_rep=nat_rep, date_format=date_format)\n    else:\n        return lambda x: _format_datetime64(x, nat_rep=nat_rep)",
    "docstring": "Return a formatter callable taking a datetime64 as input and providing a string as output",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\formats\\format.py",
    "ast_data": "FunctionDef name:get_format_datetime64 arg:is_dates_only arg:nat_rep arg:date_format arguments arg arg arg If Return return:yes arguments arg Call Return return:yes arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "freeze_model",
    "source_code": "def freeze_model():\n    checkpoint_path = _SMCLI_CHECKPOINT_PATH.value or os.path.join(_SMCLI_DIR.value, 'variables/variables')\n    if not _SMCLI_VARIABLES_TO_FEED.value:\n        variables_to_feed = []\n    elif _SMCLI_VARIABLES_TO_FEED.value.lower() == 'all':\n        variables_to_feed = None\n    else:\n        variables_to_feed = _SMCLI_VARIABLES_TO_FEED.value.split(',')\n    saved_model_aot_compile.freeze_model(checkpoint_path=checkpoint_path, meta_graph_def=saved_model_utils.get_meta_graph_def(_SMCLI_DIR.value, _SMCLI_TAG_SET.value), signature_def_key=_SMCLI_SIGNATURE_DEF_KEY.value, variables_to_feed=variables_to_feed, output_prefix=_SMCLI_OUTPUT_PREFIX.value)",
    "docstring": "Function triggered by freeze_model command.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\saved_model_cli.py",
    "ast_data": "FunctionDef name:freeze_model arguments Assign BoolOp Call If Assign If Compare Call Assign Assign Call Call Call"
  },
  {
    "library": "scipy",
    "name": "write_header",
    "source_code": "def write_header(self, shape, mclass, is_complex=False, is_logical=False, nzmax=0):\n    name = self._var_name\n    is_global = self._var_is_global\n    self._mat_tag_pos = self.file_stream.tell()\n    self.write_bytes(self.mat_tag)\n    af = np.zeros((), NDT_ARRAY_FLAGS)\n    af['data_type'] = miUINT32\n    af['byte_count'] = 8\n    flags = is_complex << 3 | is_global << 2 | is_logical << 1\n    af['flags_class'] = mclass | flags << 8\n    af['nzmax'] = nzmax\n    self.write_bytes(af)\n    self.write_element(np.array(shape, dtype='i4'))\n    name = np.asarray(name)\n    if name == '':\n        self.write_smalldata_element(name, miINT8, 0)\n    else:\n        self.write_element(name, miINT8)\n    self._var_name = ''\n    self._var_is_global = False",
    "docstring": "Write header for given data options shape : sequence array shape mclass - mat5 matrix class is_complex - True if matrix is complex is_logical - True if matrix is logical nzmax - max non zero elements for sparse arrays We get the name and the global flag from the object, and reset them to defaults after we've used them",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\matlab\\_mio5.py",
    "ast_data": "FunctionDef name:write_header arg:self arg:shape arg:mclass arg:is_complex arg:is_logical arg:nzmax arguments arg arg arg arg arg arg Assign Assign Assign Call Call Assign Call Assign Assign Assign Assign Assign Call Call Call Assign Call If Compare Call Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_add_deprecated_function_notice_to_docstring",
    "source_code": "def _add_deprecated_function_notice_to_docstring(doc, date, instructions):\n    main_text = ['THIS FUNCTION IS DEPRECATED. It will be removed %s.' % ('in a future version' if date is None else 'after %s' % date)]\n    if instructions:\n        main_text.append('Instructions for updating:')\n    return decorator_utils.add_notice_to_docstring(doc, instructions, 'DEPRECATED FUNCTION', '(deprecated)', main_text, notice_type='Deprecated')",
    "docstring": "Adds a deprecation notice to a docstring for deprecated functions.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\deprecation.py",
    "ast_data": "FunctionDef name:_add_deprecated_function_notice_to_docstring arg:doc arg:date arg:instructions arguments arg arg arg Assign Compare If Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "estimate_op_runtime",
    "source_code": "def estimate_op_runtime(snode: BaseSchedulerNode) -> float:\n    if config.estimate_op_runtime == 'default':\n        runtime = snode.get_estimated_runtime()\n    else:\n        assert callable(config.estimate_op_runtime)\n        runtime = config.estimate_op_runtime(snode)\n    return runtime",
    "docstring": "Returns estimated op runtime in nanoseconds (ns)",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\comms.py",
    "ast_data": "FunctionDef name:estimate_op_runtime arg:snode arguments arg If Compare Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "fmask",
    "source_code": "def fmask(x):\n    if x is masked:\n        return True\n    return filled(x)",
    "docstring": "Returns the filled array, or True if masked.",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:fmask arg:x arguments arg If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "copy_to_graph_uninitialized",
    "source_code": "def copy_to_graph_uninitialized(var):\n    new_variable = UninitializedVariable(trainable=var.trainable, constraint=var._constraint, shape=var.shape, dtype=var.dtype, name=var._shared_name, synchronization=var.synchronization, aggregation=var.aggregation, extra_handle_data=var.handle)\n    new_variable._maybe_initialize_trackable()\n    return new_variable",
    "docstring": "Copies an existing variable to a new graph, with no initializer.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:copy_to_graph_uninitialized arg:var arguments arg Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "transfer_parametrizations_and_params",
    "source_code": "def transfer_parametrizations_and_params(from_module: Module, to_module: Module, tensor_name: Optional[str]=None) -> Module:\n    if is_parametrized(from_module):\n        assert isinstance(from_module.parametrizations, ModuleDict)\n        parameters_to_transfer: Union[list, ModuleDict] = from_module.parametrizations if tensor_name is None else [tensor_name]\n        assert hasattr(parameters_to_transfer, '__iter__')\n        for parameter_name in parameters_to_transfer:\n            if not hasattr(to_module, parameter_name):\n                setattr(to_module, parameter_name, Parameter(getattr(from_module, parameter_name)))\n            for param_func in from_module.parametrizations[parameter_name]:\n                register_parametrization(to_module, parameter_name, param_func)\n            assert isinstance(to_module.parametrizations, ModuleDict)\n            if hasattr(from_module.parametrizations[parameter_name], 'original'):\n                to_module.parametrizations[parameter_name].original = from_module.parametrizations[parameter_name].original\n            else:\n                num = 0\n                orig_num = 'original' + str(num)\n                while hasattr(from_module.parametrizations[parameter_name], orig_num):\n                    setattr(to_module.parametrizations[parameter_name], orig_num, getattr(from_module.parametrizations[parameter_name], orig_num))\n                    num = num + 1\n                    orig_num = 'original' + str(num)\n    return to_module",
    "docstring": "Transfer parametrizations and the parameters they parametrize from :attr: to :attr:. If :attr: is specified, only transfers the specified parameter, otherwise transfers all parametrized parameters. If those parameters do not exist in to_module, it will create them. Does nothing if from_module is not parametrized. Args: from_module (nn.Module): module to transfer from to_module (nn.Module): module to transfer to tensor_name (str, optional): parameter to transfer Returns: Module: to_module",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\utils\\parametrize.py",
    "ast_data": "FunctionDef name:transfer_parametrizations_and_params arg:from_module arg:to_module arg:tensor_name arguments arg arg arg If Call Call Compare Call For If Call Call Call Call For Call Call If Call Assign Assign Assign Call While Call Call Call Assign Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_eliminate_duplicate_packed_nodes",
    "source_code": "def _eliminate_duplicate_packed_nodes(gm):\n    if not (torch.backends.mkldnn.enabled and torch.backends.mkldnn.is_available()):\n        return gm\n    packed_weight_ops = [torch._C._nn.mkldnn_reorder_conv2d_weight, torch._C._nn.mkldnn_reorder_conv3d_weight, mkldnn._reorder_convolution_transpose_weight, mkldnn._reorder_linear_weight, mkldnn._reorder_mkldnn_rnn_layer_weight]\n    if torch._C.has_mkl:\n        packed_weight_ops.append(torch.ops.mkl._mkl_reorder_linear_weight)\n    for node in gm.graph.nodes:\n        if node.target in packed_weight_ops and len(node.args[0].users) > 1:\n            for user_node in list(node.args[0].users.keys()):\n                if user_node.target == node.target and user_node != node and (user_node.args == node.args):\n                    user_node.replace_all_uses_with(node)\n                    gm.graph.erase_node(user_node)",
    "docstring": "Combine packed weight nodes with the same inputs to reduce memory usage. for example: class Model(nn.Module): def __init__(self) -> None: super().__init__() self.linear = nn.Linear(32, 32, bias=True) def forward(self, x): return self.linear(self.linear(x)) the above's packed weight nodes are duplicate if two linear calls have same input size.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\mkldnn_fusion.py",
    "ast_data": "FunctionDef name:_eliminate_duplicate_packed_nodes arg:gm arguments arg If BoolOp Call Return return:yes Assign If Call For If BoolOp Compare Compare Call For Call Call If BoolOp Compare Compare Compare Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_geometry",
    "source_code": "def get_geometry(self):\n    return (self._nrows, self._ncols)",
    "docstring": "Return the number of rows and columns of the grid as (nrows, ncols).",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_grid.py",
    "ast_data": "FunctionDef name:get_geometry arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "on_test_batch_end",
    "source_code": "def on_test_batch_end(self, batch, logs=None):\n    if self._should_call_test_batch_hooks:\n        self._call_batch_hook(ModeKeys.TEST, 'end', batch, logs=logs)",
    "docstring": "Calls the methods of its callbacks. Args: batch: Integer, index of batch within the current epoch. logs: Dict. Aggregated metric results up until this batch.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:on_test_batch_end arg:self arg:batch arg:logs arguments arg arg arg If Call"
  },
  {
    "library": "tensorflow",
    "name": "dense_to_sparse_batch",
    "source_code": "@tf_export('data.experimental.dense_to_sparse_batch')\n@deprecation.deprecated(None, 'Use `tf.data.Dataset.sparse_batch` instead.')\ndef dense_to_sparse_batch(batch_size, row_shape):\n\n    def _apply_fn(dataset):\n        return dataset.sparse_batch(batch_size, row_shape)\n    return _apply_fn",
    "docstring": "A transformation that batches ragged elements into s. Like , this transformation combines multiple consecutive elements of the dataset, which might have different shapes, into a single element. The resulting element has three components (, , and ), which comprise a that represents the same data. The represents the dense shape of each row in the resulting , to which the effective batch size is prepended. For example: Args: batch_size: A scalar , representing the number of consecutive elements of this dataset to combine in a single batch. row_shape: A or vector tensor-like object representing the equivalent dense shape of a row in the resulting . Each element of this dataset must have the same rank as , and must have size less than or equal to in each dimension. Returns: A transformation function, which can be passed to .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\batching.py",
    "ast_data": "FunctionDef name:dense_to_sparse_batch arg:batch_size arg:row_shape arguments arg arg FunctionDef name:_apply_fn arg:dataset arguments arg Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "compute_boundary_check",
    "source_code": "def compute_boundary_check(self, get_max_block: Callable[[str], int]) -> None:\n    sizevars = V.graph.sizevars\n    block_to_max: dict[sympy.Expr, Any] = {block_size: get_max_block(prefix_str[symt]) for symt, block_size in TritonSymbols.block_sizes.items()}\n    self._boundary_check = [idx for idx in range(len(self.shape)) if not sizevars.statically_known_equals(self.strides[idx], sympy.S.Zero) and (not sizevars.statically_known_multiple_of(self.shape[idx], self.block_shape[idx])) and (not sizevars.statically_known_multiple_of(self.shape[idx], sympy_subs(self.block_shape[idx], block_to_max))) and (not (V.kernel.no_x_dim and self.block_shape[idx] == TritonSymbols.block_sizes[SymT.XBLOCK]))]",
    "docstring": "List of indices to pass to tl.load(boundary_check=...)",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py",
    "ast_data": "FunctionDef name:compute_boundary_check arg:self arg:get_max_block arguments arg arg Assign Call Call Assign Call Call BoolOp Call Call Call Call BoolOp Compare"
  },
  {
    "library": "pytorch",
    "name": "range",
    "source_code": "@contextmanager\ndef range(msg, *args, **kwargs):\n    range_push(msg.format(*args, **kwargs))\n    try:\n        yield\n    finally:\n        range_pop()",
    "docstring": "Context manager / decorator that pushes an NVTX range at the beginning of its scope, and pops it at the end. If extra arguments are given, they are passed as arguments to msg.format(). Args: msg (str): message to associate with the range",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\nvtx.py",
    "ast_data": "FunctionDef name:range arg:msg arguments arg arg arg Call Call Try Call"
  },
  {
    "library": "pytorch",
    "name": "GradNotSetToNonePattern",
    "source_code": "class GradNotSetToNonePattern(Pattern):\n\n    def __init__(self, prof: profile, should_benchmark: bool=False):\n        super().__init__(prof, should_benchmark)\n        self.name = 'Gradient Set To Zero Instead of None Pattern'\n        self.description = \"Detected gradient set to zero instead of None. Please add 'set_to_none=True' when calling zero_grad().\"\n        self.url = 'https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html#disable-gradient-calculation-for-validation-or-inference'\n\n    def match(self, event: _ProfilerEvent):\n        if not event.name.endswith(': zero_grad'):\n            return False\n        if not event.children:\n            return False\n        for sub_event in traverse_dfs(event.children):\n            if sub_event.name == 'aten::zero_' and sub_event.parent.name != 'aten::zeros':\n                return True\n        return False",
    "docstring": "This pattern identifies if we are not setting grad to None in zero_grad. example: optimizer.zero_grad() By setting set_to_none=True, we can gain speedup Pattern: XXXXX: _zero_grad NOT aten::zeros aten::zero_ aten::zero_ is called on each parameter in the model. We also want to make sure it is not called by aten::zeros. Algorithm: String match",
    "type": "class",
    "file_path": "pytorch\\torch\\profiler\\_pattern_matcher.py",
    "ast_data": "ClassDef name:GradNotSetToNonePattern FunctionDef name:__init__ arg:self arg:prof arg:should_benchmark arguments arg arg arg Call Call Assign Assign Assign FunctionDef name:match arg:self arg:event arguments arg arg If Call Return return:yes If Return return:yes For Call If BoolOp Compare Compare Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_StatefulGraphModuleFactory",
    "source_code": "class _StatefulGraphModuleFactory(type):\n\n    def __call__(cls, *args, **kwargs):\n        raise TypeError(f'{cls.__module__}.{cls.__qualname__} has no public constructor. ')\n\n    def _create(cls, root, graph, range_constraints=None):\n        return super().__call__(root, graph, range_constraints=range_constraints)",
    "docstring": "Metaclass that ensures a private constructor for _StatefulGraphModule",
    "type": "class",
    "file_path": "pytorch\\torch\\export\\_unlift.py",
    "ast_data": "ClassDef name:_StatefulGraphModuleFactory FunctionDef name:__call__ arg:cls arguments arg arg arg Raise Call FunctionDef name:_create arg:cls arg:root arg:graph arg:range_constraints arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "depthwise_conv2d",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef depthwise_conv2d(x, depthwise_kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)):\n    if data_format is None:\n        data_format = image_data_format()\n    if data_format not in {'channels_first', 'channels_last'}:\n        raise ValueError('Unknown data_format: ' + str(data_format))\n    x, tf_data_format = _preprocess_conv2d_input(x, data_format)\n    padding = _preprocess_padding(padding)\n    if tf_data_format == 'NHWC':\n        strides = (1,) + strides + (1,)\n    else:\n        strides = (1, 1) + strides\n    x = nn.depthwise_conv2d(x, depthwise_kernel, strides=strides, padding=padding, rate=dilation_rate, data_format=tf_data_format)\n    if data_format == 'channels_first' and tf_data_format == 'NHWC':\n        x = array_ops.transpose(x, (0, 3, 1, 2))\n    return x",
    "docstring": "2D convolution with separable filters. Args: x: input tensor depthwise_kernel: convolution kernel for the depthwise convolution. strides: strides tuple (length 2). padding: string, or . data_format: string, or . dilation_rate: tuple of integers, dilation rates for the separable convolution. Returns: Output tensor. Raises: ValueError: if is neither or .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:depthwise_conv2d arg:x arg:depthwise_kernel arg:strides arg:padding arg:data_format arg:dilation_rate arguments arg arg arg arg arg arg If Compare Assign Call If Compare Raise Call Call Assign Call Assign Call If Compare Assign Assign Assign Call If BoolOp Compare Compare Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "hex",
    "source_code": "@property\ndef hex(self):\n    return wkb_w(dim=3 if self.hasz else 2).write_hex(self)",
    "docstring": "Return the WKB of this Geometry in hexadecimal form. Please note that the SRID is not included in this representation because it is not a part of the OGC specification (use the property instead).",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:hex arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_node_device",
    "source_code": "def get_node_device(self, node: fx.Node) -> Optional[torch.device]:\n    ten = node.meta.get('val')\n    return None if not isinstance(ten, torch.Tensor) else ten.device",
    "docstring": "Get the device of a node.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\post_grad.py",
    "ast_data": "FunctionDef name:get_node_device arg:self arg:node arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_initial_step",
    "source_code": "def _initial_step(self) -> None:\n    self._step_count = 0\n    with _initial_mode(self):\n        self.step()",
    "docstring": "Initialize step counts and perform a step.",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\lr_scheduler.py",
    "ast_data": "FunctionDef name:_initial_step arg:self arguments arg Assign With Call Call"
  },
  {
    "library": "scipy",
    "name": "entropy",
    "source_code": "def entropy(self, *args, **kwds):\n    args, loc, scale = self._parse_args(*args, **kwds)\n    loc, scale = map(asarray, (loc, scale))\n    args = tuple(map(asarray, args))\n    cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)\n    output = zeros(shape(cond0), 'd')\n    place(output, 1 - cond0, self.badvalue)\n    goodargs = argsreduce(cond0, scale, *args)\n    goodscale = goodargs[0]\n    goodargs = goodargs[1:]\n    place(output, cond0, self.vecentropy(*goodargs) + log(goodscale))\n    return output[()]",
    "docstring": "Differential entropy of the RV. Parameters ---------- arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). scale : array_like, optional (continuous distributions only). Scale parameter (default=1). Notes ----- Entropy is defined base : >>> import numpy as np >>> from scipy.stats._distn_infrastructure import rv_discrete >>> drv = rv_discrete(values=((0, 1), (0.5, 0.5))) >>> np.allclose(drv.entropy(), np.log(2.0)) True",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:entropy arg:self arguments arg arg arg Assign Call Assign Call Assign Call Call Assign Call Compare Compare Assign Call Call Call Assign Call Assign Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_embedding_table_size",
    "source_code": "def get_embedding_table_size(self):\n    raise NotImplementedError('not implemented')",
    "docstring": "Returns the embedding table size, tuple of vocab size and dimension.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\feature_column.py",
    "ast_data": "FunctionDef name:get_embedding_table_size arg:self arguments arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "__create_write_items__",
    "source_code": "def __create_write_items__(self, fqn: str, object: object) -> list[object]:\n    raise NotImplementedError('_Checkpointable._create_write_items is not implemented')",
    "docstring": "Return a list of WriteItems based on object's contents.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_checkpointable.py",
    "ast_data": "FunctionDef name:__create_write_items__ arg:self arg:fqn arg:object arguments arg arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_check_input_dtype",
    "source_code": "def _check_input_dtype(self, arg):\n    if arg.dtype.base_dtype != self.dtype:\n        raise TypeError('Expected argument to have dtype %s.  Found: %s in tensor %s' % (self.dtype, arg.dtype, arg))",
    "docstring": "Check that arg.dtype == self.dtype.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "FunctionDef name:_check_input_dtype arg:self arg:arg arguments arg arg If Compare Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "flatten_with_tuple_paths",
    "source_code": "def flatten_with_tuple_paths(structure, expand_composites=False):\n    return list(zip(yield_flat_paths(structure, expand_composites=expand_composites), flatten(structure, expand_composites=expand_composites)))",
    "docstring": "Returns a list of tuples. The order of pairs produced matches that of . This allows you to flatten a nested structure while keeping information about where in the structure each atom was located. See for more information about tuple paths. Args: structure: the nested structure to flatten. expand_composites: If true, then composite tensors such as and are expanded into their component tensors. Returns: A list of tuples. Each is a tuple of indices and/or dictionary keys that uniquely specify the path to within .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\nest.py",
    "ast_data": "FunctionDef name:flatten_with_tuple_paths arg:structure arg:expand_composites arguments arg arg Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "zeros_like",
    "source_code": "@tf_export(v1=['zeros_like'])\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef zeros_like(tensor, dtype=None, name=None, optimize=True):\n    return zeros_like_impl(tensor, dtype, name, optimize)",
    "docstring": "Creates a tensor with all elements set to zero. See also . Given a single tensor (), this operation returns a tensor of the same type and shape as with all elements set to zero. Optionally, you can use to specify a new type for the returned tensor. Examples: >>> tensor = tf.constant([[1, 2, 3], [4, 5, 6]]) >>> tf.zeros_like(tensor) >>> tf.zeros_like(tensor, dtype=tf.float32) Args: tensor: A . dtype: A type for the returned . Must be , , , , , , , , , , , or . (optional) name: A name for the operation (optional). optimize: if , attempt to statically determine the shape of and encode it as a constant. (optional, defaults to ) Returns: A with all elements set to zero.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:zeros_like arg:tensor arg:dtype arg:name arg:optimize arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "version_def",
    "source_code": "@property\n@abc.abstractmethod\ndef version_def(self) -> versions_pb2.VersionDef:\n    pass",
    "docstring": "Version info about the splitter and merge implementation required.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\tools\\proto_splitter\\split.py",
    "ast_data": "FunctionDef name:version_def arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "_maybe_promote_tensor_fft",
    "source_code": "def _maybe_promote_tensor_fft(t: TensorLikeType, require_complex: bool=False) -> TensorLikeType:\n    cur_type = t.dtype\n    new_type = _promote_type_fft(cur_type, require_complex, t.device)\n    return _maybe_convert_to_dtype(t, new_type)",
    "docstring": "Helper to promote a tensor to a dtype supported by the FFT primitives",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\fft.py",
    "ast_data": "FunctionDef name:_maybe_promote_tensor_fft arg:t arg:require_complex arguments arg arg Assign Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "unused",
    "source_code": "def unused(fn):\n    if isinstance(fn, property):\n        prop = fn\n        setattr(prop.fget, '_torchscript_modifier', FunctionModifiers.UNUSED)\n        if prop.fset:\n            setattr(prop.fset, '_torchscript_modifier', FunctionModifiers.UNUSED)\n        return prop\n    fn._torchscript_modifier = FunctionModifiers.UNUSED\n    return fn",
    "docstring": "This decorator indicates to the compiler that a function or method should be ignored and replaced with the raising of an exception. This allows you to leave code in your model that is not yet TorchScript compatible and still export your model. Example (using `` on a method):: import torch import torch.nn as nn class MyModule(nn.Module): def __init__(self, use_memory_efficient): super().__init__() self.use_memory_efficient = use_memory_efficient @torch.jit.unused def memory_efficient(self, x): import pdb pdb.set_trace() return x + 10 def forward(self, x): # Use not-yet-scriptable memory efficient mode if self.use_memory_efficient: return self.memory_efficient(x) else: return x + 10 m = torch.jit.script(MyModule(use_memory_efficient=False)) m.save(\"m.pt\") m = torch.jit.script(MyModule(use_memory_efficient=True)) # exception raised m(torch.rand(100))",
    "type": "function",
    "file_path": "pytorch\\torch\\_jit_internal.py",
    "ast_data": "FunctionDef name:unused arg:fn arguments arg If Call Assign Call If Call Return return:yes Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_metric_name",
    "source_code": "def get_metric_name(metric, weighted=False):\n    if tf2.enabled():\n        if isinstance(metric, str):\n            return metric\n        metric = metrics_module.get(metric)\n        return metric.name if hasattr(metric, 'name') else metric.__name__\n    else:\n        metric_name_prefix = 'weighted_' if weighted else ''\n        if metric in ('accuracy', 'acc', 'crossentropy', 'ce'):\n            if metric in ('accuracy', 'acc'):\n                suffix = 'acc'\n            elif metric in ('crossentropy', 'ce'):\n                suffix = 'ce'\n        else:\n            metric_fn = metrics_module.get(metric)\n            if hasattr(metric_fn, 'name'):\n                suffix = metric_fn.name\n            else:\n                suffix = metric_fn.__name__\n        metric_name = metric_name_prefix + suffix\n        return metric_name",
    "docstring": "Returns the name corresponding to the given metric input. Args: metric: Metric function name or reference. weighted: Boolean indicating if the given metric is weighted. Returns: The metric name.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py",
    "ast_data": "FunctionDef name:get_metric_name arg:metric arg:weighted arguments arg arg If Call If Call Return return:yes Assign Call Return return:yes Call Assign If Compare If Compare Assign If Compare Assign Assign Call If Call Assign Assign Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_make_numeric_only",
    "source_code": "def _make_numeric_only(self, obj: NDFrameT) -> NDFrameT:\n    result = obj.select_dtypes(include=['number'], exclude=['timedelta'])\n    return result",
    "docstring": "Subset DataFrame to numeric columns. Parameters ---------- obj : DataFrame Returns ------- obj subset to numeric-only columns.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\window\\rolling.py",
    "ast_data": "FunctionDef name:_make_numeric_only arg:self arg:obj arguments arg arg Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_izip_fields_flat",
    "source_code": "def _izip_fields_flat(iterable):\n    for element in iterable:\n        if isinstance(element, np.void):\n            yield from _izip_fields_flat(tuple(element))\n        else:\n            yield element",
    "docstring": "Returns an iterator of concatenated fields from a sequence of arrays, collapsing any nested structure.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\recfunctions.py",
    "ast_data": "FunctionDef name:_izip_fields_flat arg:iterable arguments arg For If Call Call Call"
  },
  {
    "library": "django",
    "name": "build_absolute_uri",
    "source_code": "def build_absolute_uri(self, location=None):\n    if location is None:\n        location = '//%s' % self.get_full_path()\n    else:\n        location = str(location)\n    bits = urlsplit(location)\n    if not (bits.scheme and bits.netloc):\n        if bits.path.startswith('/') and (not bits.scheme) and (not bits.netloc) and ('/./' not in bits.path) and ('/../' not in bits.path):\n            location = self._current_scheme_host + location.removeprefix('//')\n        else:\n            location = urljoin(self._current_scheme_host + self.path, location)\n    return iri_to_uri(location)",
    "docstring": "Build an absolute URI from the location and the variables available in this request. If no ``), urljoin() it to a base URL constructed from the request variables.",
    "type": "method",
    "file_path": "django\\django\\http\\request.py",
    "ast_data": "FunctionDef name:build_absolute_uri arg:self arg:location arguments arg arg If Compare Assign Call Assign Call Assign Call If BoolOp If BoolOp Call Compare Compare Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "DataError",
    "source_code": "class DataError(Exception):\n    pass",
    "docstring": "Exception raised when performing an operation on non-numerical data. For example, calling `` on a non-numerical column or a function on a rolling window. See Also -------- Series.rolling : Provide rolling window calculations on Series object. DataFrame.rolling : Provide rolling window calculations on DataFrame object. Examples -------- >>> ser = pd.Series([\"a\", \"b\", \"c\"]) >>> ser.rolling(2).sum() Traceback (most recent call last): DataError: No numeric types to aggregate",
    "type": "class",
    "file_path": "pandas\\pandas\\errors\\__init__.py",
    "ast_data": "ClassDef name:DataError"
  },
  {
    "library": "kornia",
    "name": "_init_Q_matrix",
    "source_code": "def _init_Q_matrix(self) -> Tensor:\n    Q = zeros((self.batch_size, 4, 4), device=self.device, dtype=self.dtype)\n    baseline: Tensor = -self.tx\n    Q[:, 0, 0] = self.fy * baseline\n    Q[:, 0, 3] = -self.fy * self.cx_left * baseline\n    Q[:, 1, 1] = self.fx * baseline\n    Q[:, 1, 3] = -self.fx * self.cy * baseline\n    Q[:, 2, 3] = self.fx * self.fy * baseline\n    Q[:, 3, 2] = -self.fy\n    Q[:, 3, 3] = self.fy * (self.cx_left - self.cx_right)\n    return Q",
    "docstring": "Initialize the Q matrix of the horizontal stereo setup. See the Q property. Returns: The Q matrix of shape :math:.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\camera\\stereo.py",
    "ast_data": "FunctionDef name:_init_Q_matrix arg:self arguments arg Assign Call Assign Assign Assign Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "variables_to_restore",
    "source_code": "@doc_controls.do_not_generate_docs\ndef variables_to_restore(self, moving_avg_variables=None):\n    name_map = {}\n    if moving_avg_variables is None:\n        moving_avg_variables = variables.trainable_variables()\n        moving_avg_variables += variables.moving_average_variables()\n    moving_avg_variables = set((v.ref() for v in moving_avg_variables))\n    for v in moving_avg_variables:\n        name_map[self.average_name(v.deref())] = v.deref()\n    moving_avg_variable_names = set((v.deref().name for v in moving_avg_variables))\n    for v in list(set(variables.global_variables())):\n        if v.name not in moving_avg_variable_names and v.op.name not in name_map:\n            name_map[v.op.name] = v\n    return name_map",
    "docstring": "[Designed for TF 1.x] Returns a map of names to to restore. (Designed to work with legacy , sensitive to specific variable names and not recommended for TF2) If a variable has a moving average, use the moving average variable name as the restore name; otherwise, use the variable name. For example, Below is an example of such mapping: Args: moving_avg_variables: a list of variables that require to use of the moving average variable name to be restored. If None, it will default to variables.moving_average_variables() + variables.trainable_variables() Returns: A map from restore_names to variables. The restore_name is either the original or the moving average version of the variable name, depending on whether the variable name is in the .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\moving_averages.py",
    "ast_data": "FunctionDef name:variables_to_restore arg:self arg:moving_avg_variables arguments arg arg Assign If Compare Assign Call Call Assign Call Call For Assign Call Call Call Assign Call Call For Call Call Call If BoolOp Compare Compare Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "mannwhitneyu",
    "source_code": "def mannwhitneyu(x, y, use_continuity=True):\n    x = ma.asarray(x).compressed().view(ndarray)\n    y = ma.asarray(y).compressed().view(ndarray)\n    ranks = rankdata(np.concatenate([x, y]))\n    nx, ny = (len(x), len(y))\n    nt = nx + ny\n    U = ranks[:nx].sum() - nx * (nx + 1) / 2.0\n    U = max(U, nx * ny - U)\n    u = nx * ny - U\n    mu = nx * ny / 2.0\n    sigsq = (nt ** 3 - nt) / 12.0\n    ties = count_tied_groups(ranks)\n    sigsq -= sum((v * (k ** 3 - k) for k, v in ties.items())) / 12.0\n    sigsq *= nx * ny / float(nt * (nt - 1))\n    if use_continuity:\n        z = (U - 1 / 2.0 - mu) / ma.sqrt(sigsq)\n    else:\n        z = (U - mu) / ma.sqrt(sigsq)\n    prob = special.erfc(abs(z) / np.sqrt(2))\n    return MannwhitneyuResult(u, prob)",
    "docstring": "Computes the Mann-Whitney statistic Missing values in and/or are discarded. Parameters ---------- x : sequence Input y : sequence Input use_continuity : {True, False}, optional Whether a continuity correction (1/2.) should be taken into account. Returns ------- statistic : float The minimum of the Mann-Whitney statistics pvalue : float Approximate two-sided p-value assuming a normal distribution.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mstats_basic.py",
    "ast_data": "FunctionDef name:mannwhitneyu arg:x arg:y arg:use_continuity arguments arg arg arg Assign Call Call Call Assign Call Call Call Assign Call Call Assign Call Call Assign Assign Call Assign Call Assign Assign Assign Assign Call Call Call Call If Assign Call Assign Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_fill",
    "source_code": "def get_fill(self):\n    return self._fill",
    "docstring": "Return whether the patch is filled.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_fill arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "minorticks_on",
    "source_code": "def minorticks_on(self):\n    self.xaxis.minorticks_on()\n    self.yaxis.minorticks_on()",
    "docstring": "Display minor ticks on the Axes. Displaying minor ticks may reduce performance; you may turn them off using if drawing speed is a problem.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:minorticks_on arg:self arguments arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "cleanup_graph",
    "source_code": "def cleanup_graph(self):\n    assert self.should_exit\n    nodes = list(self.graph.nodes)\n    for node in nodes:\n        node.meta.pop('creation_timestamp', None)\n    grad_enabled = torch.is_grad_enabled()\n    for node1, node2 in zip(nodes, nodes[1:]):\n        if node1.target is torch._C._set_grad_enabled and tuple(node1.args) == (not grad_enabled,) and (not node1._erased):\n            grad_enabled = node1.args[0]\n            if node2.target is torch._C._set_grad_enabled and tuple(node2.args) == (not grad_enabled,) and (not node2._erased):\n                grad_enabled = node2.args[0]\n                self.graph.erase_node(node1)\n                self.graph.erase_node(node2)",
    "docstring": "Remove \"creation_timestamp\" from node meta Remove this pattern from the graph: torch._C._set_grad_enabled(False) torch._C._set_grad_enabled(True)",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\output_graph.py",
    "ast_data": "FunctionDef name:cleanup_graph arg:self arguments arg Assign Call For Call Assign Call For Call If BoolOp Compare Compare Call Assign If BoolOp Compare Compare Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "could_possibly_record",
    "source_code": "def could_possibly_record():\n    return not pywrap_tfe.TFE_Py_TapeSetIsEmpty()",
    "docstring": "Returns True if any tape is active.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\record.py",
    "ast_data": "FunctionDef name:could_possibly_record arguments Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "isin",
    "source_code": "def isin(element, test_elements, assume_unique=False, invert=False):\n    element = ma.asarray(element)\n    return in1d(element, test_elements, assume_unique=assume_unique, invert=invert).reshape(element.shape)",
    "docstring": "Calculates , broadcasting over only. The output is always a masked array of the same shape as . See for more details. See Also -------- in1d : Flattened version of this function. numpy.isin : Equivalent function for ndarrays. Examples -------- >>> import numpy as np >>> element = np.ma.array([1, 2, 3, 4, 5, 6]) >>> test_elements = [0, 2] >>> np.ma.isin(element, test_elements) masked_array(data=[False, True, False, False, False, False], mask=False, fill_value=True)",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\extras.py",
    "ast_data": "FunctionDef name:isin arg:element arg:test_elements arg:assume_unique arg:invert arguments arg arg arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_set_attr_with_buf",
    "source_code": "def _set_attr_with_buf(self, attr_name, attr_buf) -> None:\n    with self.graph._c_graph.get() as c_graph:\n        pywrap_tf_session.SetAttr(c_graph, self._c_op, attr_name, attr_buf)",
    "docstring": "Set an attr in the node_def with a pre-allocated buffer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_set_attr_with_buf arg:self arg:attr_name arg:attr_buf arguments arg arg arg With Call Call"
  },
  {
    "library": "pytorch",
    "name": "_init_with_tracing",
    "source_code": "def _init_with_tracing(self, fn, args):\n    self.indexing_exprs = {}\n    self.indexing_exprs_name = {}\n    self.submodules = {'get_index': self.get_index}\n    self.subblocks = {}\n    self.indirect_vars = []\n    self.indirect_var_ranges: dict[sympy.Symbol, sympy.Expr] = {}\n    self.memory_usage = {t: [] for t in MemoryUsageType}\n    self.op_counts = collections.Counter()\n    self.root_block = LoopBodyBlock(self, fn, args)\n    del self.indexing_exprs_name",
    "docstring": "Do an FX trace of an arbitrary callable to construct self",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\loop_body.py",
    "ast_data": "FunctionDef name:_init_with_tracing arg:self arg:fn arg:args arguments arg arg arg Assign Assign Assign Assign Assign Assign Assign Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "_permute_strides",
    "source_code": "def _permute_strides(out: torch.Tensor, query_strides: tuple[int, ...]) -> torch.Tensor:\n    from torch._inductor.ir import get_fill_order\n    fill_order = get_fill_order(query_strides)\n    assert out.storage_offset() == 0, 'Only support storage_offset == 0'\n    out_strides = _construct_strides(out.shape, fill_order)\n    new_out = out.new_empty(out.shape).as_strided(out.shape, out_strides)\n    new_out.copy_(out)\n    return new_out",
    "docstring": "Create a new tensor with the same data and shape as the input, but with strides permuted based on the input tensor's stride order. Args: out (torch.Tensor): The output tensor of attention. query_strides (List[int]): The stride order of the input query tensor Returns: torch.Tensor: A new tensor with same shape and data as the input, but with strides permuted based on the query tensor's stride order.",
    "type": "function",
    "file_path": "pytorch\\torch\\_higher_order_ops\\flex_attention.py",
    "ast_data": "FunctionDef name:_permute_strides arg:out arg:query_strides arguments arg arg Assign Call Compare Call Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_recompute_cached_properties",
    "source_code": "def _recompute_cached_properties(self):\n    self._by_val_internal.mutated = False\n    self._by_val_external.mutated = False\n    assert len(self._by_val_internal) == len(self._by_val_external)\n    self._cached_by_val_capture_tuples = []\n    for key in self._by_val_internal:\n        assert key in self._by_val_external\n        internal = self._by_val_internal[key]\n        external = self._by_val_external[key]\n        self._cached_by_val_capture_tuples.append((external, internal))\n    self._cached_capture_types = py_collections.OrderedDict(list(self._by_val_tracetype.items()) + list(self._by_ref_tracetype.items()))",
    "docstring": "Regenerates cached properties if there have been mutations.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\capture\\capture_container.py",
    "ast_data": "FunctionDef name:_recompute_cached_properties arg:self arguments arg Assign Assign Compare Call Call Assign For Compare Assign Assign Call Assign Call Call Call Call Call"
  },
  {
    "library": "scrapy",
    "name": "close",
    "source_code": "def close(self) -> None:\n    self.head_plugin.close()",
    "docstring": "Close the target file along with all the plugins.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\extensions\\postprocessing.py",
    "ast_data": "FunctionDef name:close arg:self arguments arg Call"
  },
  {
    "library": "matplotlib",
    "name": "_set_alpha_for_array",
    "source_code": "def _set_alpha_for_array(self, alpha):\n    if isinstance(alpha, str):\n        raise TypeError('alpha must be numeric or None, not a string')\n    if not np.iterable(alpha):\n        Artist.set_alpha(self, alpha)\n        return\n    alpha = np.asarray(alpha)\n    if not (0 <= alpha.min() and alpha.max() <= 1):\n        raise ValueError(f'alpha must be between 0 and 1, inclusive, but min is {alpha.min()}, max is {alpha.max()}')\n    self._alpha = alpha\n    self.pchanged()\n    self.stale = True",
    "docstring": "Set the alpha value used for blending - not supported on all backends. Parameters ---------- alpha : array-like or float or None All values must be within the 0-1 range, inclusive. Masked values and nans are not supported.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:_set_alpha_for_array arg:self arg:alpha arguments arg arg If Call Raise Call If Call Call Return return:no Assign Call If BoolOp Compare Call Compare Call Raise Call Call Call Assign Call Assign"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n    super().fit(X)\n    self.offset_ = np.percentile(-self.dist_, 100.0 * self.contamination)\n    return self",
    "docstring": "Fit the EllipticEnvelope model. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns the instance itself.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\covariance\\_elliptic_envelope.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "denormalize_points_with_intrinsics",
    "source_code": "def denormalize_points_with_intrinsics(point_2d_norm: Tensor, camera_matrix: Tensor) -> Tensor:\n    KORNIA_CHECK_SHAPE(point_2d_norm, ['*', '2'])\n    KORNIA_CHECK_SHAPE(camera_matrix, ['*', '3', '3'])\n    x_coord: Tensor = point_2d_norm[..., 0]\n    y_coord: Tensor = point_2d_norm[..., 1]\n    fx: Tensor = camera_matrix[..., 0, 0]\n    fy: Tensor = camera_matrix[..., 1, 1]\n    cx: Tensor = camera_matrix[..., 0, 2]\n    cy: Tensor = camera_matrix[..., 1, 2]\n    if len(cx.shape) < len(x_coord.shape):\n        cx, cy, fx, fy = (cx.unsqueeze(-1), cy.unsqueeze(-1), fx.unsqueeze(-1), fy.unsqueeze(-1))\n    u_coord: Tensor = x_coord * fx + cx\n    v_coord: Tensor = y_coord * fy + cy\n    return stack([u_coord, v_coord], dim=-1)",
    "docstring": "Normalize points with intrinsics. Useful for conversion of keypoints to be used with essential matrix. Args: point_2d_norm: tensor containing the 2d points in the image pixel coordinates. The shape of the tensor can be :math:. camera_matrix: tensor containing the intrinsics camera matrix. The tensor shape must be :math:. Returns: tensor of (u, v) cam coordinates with shape :math:. Example: >>> _ = torch.manual_seed(0) >>> X = torch.rand(1, 2) >>> K = torch.eye(3)[None] >>> denormalize_points_with_intrinsics(X, K) tensor([[0.4963, 0.7682]])",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\conversions.py",
    "ast_data": "FunctionDef name:denormalize_points_with_intrinsics arg:point_2d_norm arg:camera_matrix arguments arg arg Call Call If Compare Call Call Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "from_saveables",
    "source_code": "@classmethod\ndef from_saveables(cls, saveables: Sequence[base.Trackable], registered_savers: 'RegisteredSaversDict | None'=None, call_with_mapped_captures: 'MappedCapturesCallable | None'=None) -> 'MultiDeviceSaver':\n    serialized_tensors = object_identity.ObjectIdentityDictionary()\n    for saveable in saveables:\n        trackable = saveable_object_util.SaveableCompatibilityConverter(saveable, saveables=[saveable])\n        serialized_tensors[trackable] = trackable._serialize_to_tensors()\n    return cls(serialized_tensors, registered_savers, call_with_mapped_captures)",
    "docstring": "Constructs a MultiDeviceSaver from a list of s.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\functional_saver.py",
    "ast_data": "FunctionDef name:from_saveables arg:cls arg:saveables arg:registered_savers arg:call_with_mapped_captures arguments arg arg arg arg Assign Call For Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "is_tensor_partial",
    "source_code": "def is_tensor_partial(spec: DTensorSpec) -> bool:\n    return any((p.is_partial() for p in spec.placements))",
    "docstring": "Return True if tensor is partial on the mesh.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_ops\\utils.py",
    "ast_data": "FunctionDef name:is_tensor_partial arg:spec arguments arg Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "PyDecoratorFunction",
    "source_code": "class PyDecoratorFunction(PyFunction):\n\n    def run(self) -> list[Node]:\n        self.name = 'py:function'\n        return super().run()\n\n    def handle_signature(self, sig: str, signode: desc_signature) -> tuple[str, str]:\n        ret = super().handle_signature(sig, signode)\n        signode.insert(0, addnodes.desc_addname('@', '@'))\n        return ret\n\n    def needs_arglist(self) -> bool:\n        return False",
    "docstring": "Description of a decorator.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\domains\\python\\__init__.py",
    "ast_data": "ClassDef name:PyDecoratorFunction FunctionDef name:run arg:self arguments arg Assign Return return:yes Call Call FunctionDef name:handle_signature arg:self arg:sig arg:signode arguments arg arg arg Assign Call Call Call Call Return return:yes FunctionDef name:needs_arglist arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, node_def, op, message, *args):\n    super(InvalidArgumentError, self).__init__(node_def, op, message, INVALID_ARGUMENT, *args)",
    "docstring": "Creates an .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:node_def arg:op arg:message arguments arg arg arg arg arg Call Call"
  },
  {
    "library": "scipy",
    "name": "_conditional_oddsratio_ci",
    "source_code": "def _conditional_oddsratio_ci(table, confidence_level=0.95, alternative='two-sided'):\n    if alternative == 'two-sided':\n        alpha = 0.5 * (1 - confidence_level)\n        lower = _ci_lower(table, alpha)\n        upper = _ci_upper(table, alpha)\n    elif alternative == 'less':\n        lower = 0.0\n        upper = _ci_upper(table, 1 - confidence_level)\n    else:\n        lower = _ci_lower(table, 1 - confidence_level)\n        upper = np.inf\n    return (lower, upper)",
    "docstring": "Conditional exact confidence interval for the odds ratio.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_odds_ratio.py",
    "ast_data": "FunctionDef name:_conditional_oddsratio_ci arg:table arg:confidence_level arg:alternative arguments arg arg arg If Compare Assign Assign Call Assign Call If Compare Assign Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "StableDiffusionDissolving",
    "source_code": "class StableDiffusionDissolving(ImageModule):\n\n    def __init__(self, version: str='2.1', **kwargs: Any):\n        super().__init__()\n        StableDiffusionPipeline = diffusers.StableDiffusionPipeline\n        DDIMScheduler = diffusers.DDIMScheduler\n        scheduler = DDIMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule='scaled_linear', clip_sample=False, set_alpha_to_one=False, steps_offset=1)\n        if version == '1.4':\n            self._sdm_model = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4', scheduler=scheduler, **kwargs)\n        elif version == '1.5':\n            self._sdm_model = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5', scheduler=scheduler, **kwargs)\n        elif version == '2.1':\n            self._sdm_model = StableDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1', scheduler=scheduler, **kwargs)\n        else:\n            raise NotImplementedError\n        self.model = _DissolvingWraper_HF(self._sdm_model, num_ddim_steps=1000)\n\n    def forward(self, input: Tensor, step_number: int) -> Tensor:\n        return self.model.dissolve(input, step_number)",
    "docstring": "Perform dissolving transformation using StableDiffusion models. Based on :cite:, the dissolving transformation is essentially applying one-step reverse diffusion. Our implementation currently supports HuggingFace implementations of SD 1.4, 1.5 and 2.1. SD 1.X tends to remove more details than SD2.1. .. list-table:: Title :widths: 32 32 32 :header-rows: 1 * - SD 1.4 - SD 1.5 - SD 2.1 * - figure:: - figure:: - figure:: Args: version: the version of the stable diffusion model. **kwargs: additional arguments for .",
    "type": "class",
    "file_path": "kornia\\kornia\\filters\\dissolving.py",
    "ast_data": "ClassDef name:StableDiffusionDissolving FunctionDef name:__init__ arg:self arg:version arguments arg arg arg Call Call Assign Assign Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call Raise Assign Call FunctionDef name:forward arg:self arg:input arg:step_number arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "add_axobserver",
    "source_code": "def add_axobserver(self, func):\n    self._axobservers.connect('_axes_change_event', lambda arg: func(arg))",
    "docstring": "Whenever the Axes state change, `` will be called.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:add_axobserver arg:self arg:func arguments arg arg Call arguments arg Call"
  },
  {
    "library": "cherrypy",
    "name": "SanitizedHost",
    "source_code": "class SanitizedHost(str):\n    dangerous = re.compile('[\\\\n\\\\r]')\n\n    def __new__(cls, raw):\n        sanitized = cls._sanitize(raw)\n        if sanitized == raw:\n            return raw\n        instance = super().__new__(cls, sanitized)\n        instance.raw = raw\n        return instance\n\n    @classmethod\n    def _sanitize(cls, raw):\n        return cls.dangerous.sub('', raw)",
    "docstring": "A normalized host header value. Wraps a raw host header received from the network in a sanitized version that elides dangerous characters. >>> SanitizedHost('foo\\nbar') 'foobar' >>> SanitizedHost('foo\\nbar').raw 'foo\\nbar' A SanitizedInstance is only returned if sanitization was performed. >>> isinstance(SanitizedHost('foobar'), SanitizedHost) False",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\lib\\httputil.py",
    "ast_data": "ClassDef name:SanitizedHost Assign Call FunctionDef name:__new__ arg:cls arg:raw arguments arg arg Assign Call If Compare Return return:yes Assign Call Call Assign Return return:yes FunctionDef name:_sanitize arg:cls arg:raw arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "grab_batch",
    "source_code": "def grab_batch(indices):\n\n    def py_method(ind):\n\n        def slice_array(data):\n            return training_utils.slice_arrays(data, ind.numpy(), contiguous=contiguous)\n        return [slice_array(inp) for inp in flat_inputs]\n    flat_out = script_ops.eager_py_func(py_method, [indices], flat_dtypes)\n    for v, original_inp in zip(flat_out, flat_inputs):\n        v.set_shape(dynamic_shape_like(original_inp))\n    return nest.pack_sequence_as(inputs, flat_out)",
    "docstring": "Grab a batch of data from the inputs.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\data_adapter.py",
    "ast_data": "FunctionDef name:grab_batch arg:indices arguments arg FunctionDef name:py_method arg:ind arguments arg FunctionDef name:slice_array arg:data arguments arg Return return:yes Call Call Return return:yes Call Assign Call For Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_eval_no_call",
    "source_code": "def _eval_no_call(stmt, glob, loc):\n    bytecode = compile(stmt, '', mode='eval')\n    for insn in dis.get_instructions(bytecode):\n        if 'CALL' in insn.opname:\n            raise RuntimeError(f\"Type annotation should not contain calls, but '{stmt}' does\")\n    return eval(bytecode, glob, loc)",
    "docstring": "Evaluate statement as long as it does not contain any method/function calls.",
    "type": "function",
    "file_path": "pytorch\\torch\\jit\\annotations.py",
    "ast_data": "FunctionDef name:_eval_no_call arg:stmt arg:glob arg:loc arguments arg arg arg Assign Call For Call If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_rasterization_zorder",
    "source_code": "def get_rasterization_zorder(self):\n    return self._rasterization_zorder",
    "docstring": "Return the zorder value below which artists will be rasterized.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:get_rasterization_zorder arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "zero_state",
    "source_code": "def zero_state(self, batch_size, dtype):\n    state_size = self.state_size\n    is_eager = context.executing_eagerly()\n    if is_eager and _hasattr(self, '_last_zero_state'):\n        last_state_size, last_batch_size, last_dtype, last_output = getattr(self, '_last_zero_state')\n        if last_batch_size == batch_size and last_dtype == dtype and (last_state_size == state_size):\n            return last_output\n    with backend.name_scope(type(self).__name__ + 'ZeroState'):\n        output = _zero_state_tensors(state_size, batch_size, dtype)\n    if is_eager:\n        self._last_zero_state = (state_size, batch_size, dtype, output)\n    return output",
    "docstring": "Return zero-filled state tensor(s). Args: batch_size: int, float, or unit Tensor representing the batch size. dtype: the data type to use for the state. Returns: If is an int or TensorShape, then the return value is a tensor of shape filled with zeros. If is a nested list or tuple, then the return value is a nested list or tuple (of the same structure) of tensors with the shapes for each s in .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\legacy_rnn\\rnn_cell_impl.py",
    "ast_data": "FunctionDef name:zero_state arg:self arg:batch_size arg:dtype arguments arg arg arg Assign Assign Call If BoolOp Call Assign Call If BoolOp Compare Compare Compare Return return:yes With Call Call Assign Call If Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "atleast_1d",
    "source_code": "def atleast_1d(arg: Union[TensorLikeType, Sequence[TensorLikeType]], *args: TensorLikeType) -> Union[TensorLikeType, tuple[TensorLikeType, ...]]:\n    if not args and isinstance(arg, collections.abc.Sequence):\n        args_ = arg\n    else:\n        assert not isinstance(arg, collections.abc.Sequence)\n        args_ = (arg,) + args\n    res = tuple((a if a.ndim >= 1 else unsqueeze(a, 0) for a in args_))\n    return res if len(res) > 1 else res[0]",
    "docstring": "Reference implementation of :func:.",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\__init__.py",
    "ast_data": "FunctionDef name:atleast_1d arg:arg arguments arg arg If BoolOp Call Assign Call Assign Assign Call Compare Call Return return:yes Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "ExportOutput",
    "source_code": "class ExportOutput(object):\n    __metaclass__ = abc.ABCMeta\n    _SEPARATOR_CHAR = '/'\n\n    @abc.abstractmethod\n    def as_signature_def(self, receiver_tensors):\n        pass\n\n    def _check_output_key(self, key, error_label):\n        if isinstance(key, tuple):\n            key = self._SEPARATOR_CHAR.join(key)\n        if not isinstance(key, str):\n            raise ValueError('{} output key must be a string; got {}.'.format(error_label, key))\n        return key\n\n    def _wrap_and_check_outputs(self, outputs, single_output_default_name, error_label=None):\n        if not isinstance(outputs, dict):\n            outputs = {single_output_default_name: outputs}\n        output_dict = {}\n        for key, value in outputs.items():\n            error_name = error_label or single_output_default_name\n            key = self._check_output_key(key, error_name)\n            if not isinstance(value, tensor.Tensor):\n                raise ValueError('{} output value must be a Tensor; got {}.'.format(error_name, value))\n            output_dict[key] = value\n        return output_dict",
    "docstring": "Represents an output of a model that can be served. These typically correspond to model heads.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\utils_v1\\export_output.py",
    "ast_data": "ClassDef name:ExportOutput Assign Assign FunctionDef name:as_signature_def arg:self arg:receiver_tensors arguments arg arg FunctionDef name:_check_output_key arg:self arg:key arg:error_label arguments arg arg arg If Call Assign Call If Call Raise Call Call Return return:yes FunctionDef name:_wrap_and_check_outputs arg:self arg:outputs arg:single_output_default_name arg:error_label arguments arg arg arg arg If Call Assign Assign For Call Assign BoolOp Assign Call If Call Raise Call Call Assign Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "revocation_date_utc",
    "source_code": "@property\n@abc.abstractmethod\ndef revocation_date_utc(self) -> datetime.datetime:\n    pass",
    "docstring": "Returns the date of when this certificate was revoked as a non-naive UTC datetime.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\x509\\base.py",
    "ast_data": "FunctionDef name:revocation_date_utc arg:self arguments arg"
  },
  {
    "library": "matplotlib",
    "name": "autumn",
    "source_code": "def autumn() -> None:\n    set_cmap('autumn')",
    "docstring": "Set the colormap to 'autumn'. This changes the default colormap as well as the colormap of the current image if there is one. See `` for more information.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:autumn arguments Call"
  },
  {
    "library": "kornia",
    "name": "compute_correspond_epilines",
    "source_code": "def compute_correspond_epilines(points: Tensor, F_mat: Tensor) -> Tensor:\n    KORNIA_CHECK_SHAPE(points, ['*', 'N', 'DIM'])\n    if points.shape[-1] == 2:\n        points_h: Tensor = convert_points_to_homogeneous(points)\n    elif points.shape[-1] == 3:\n        points_h = points\n    else:\n        raise AssertionError(points.shape)\n    KORNIA_CHECK_SHAPE(F_mat, ['*', '3', '3'])\n    points_h = torch.transpose(points_h, dim0=-2, dim1=-1)\n    a, b, c = torch.chunk(F_mat @ points_h, dim=-2, chunks=3)\n    nu: Tensor = a * a + b * b\n    nu = where(nu > 0.0, 1.0 / torch.sqrt(nu), torch.ones_like(nu))\n    line = torch.cat([a * nu, b * nu, c * nu], dim=-2)\n    return torch.transpose(line, dim0=-2, dim1=-1)",
    "docstring": "Compute the corresponding epipolar line for a given set of points. Args: points: tensor containing the set of points to project in the shape of :math: or :math:. F_mat: the fundamental to use for projection the points in the shape of :math:. Returns: a tensor with shape :math: containing a vector of the epipolar lines corresponding to the points to the other image. Each line is described as :math: and encoding the vectors as :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\epipolar\\fundamental.py",
    "ast_data": "FunctionDef name:compute_correspond_epilines arg:points arg:F_mat arguments arg arg Call If Compare Call If Compare Assign Raise Call Call Assign Call Assign Call Assign Call Compare Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "complete_partial_tiling",
    "source_code": "@classmethod\ndef complete_partial_tiling(cls, tiling: dict[str, sympy.Expr], numel: sympy.Expr, reduction_numel: sympy.Expr) -> dict[str, sympy.Expr]:\n    splits = list(tiling.values())\n    is_pointwise = 'x' in tiling\n    total_numel = numel * reduction_numel\n    missing_tiling = [total_numel / sympy_product(splits)]\n    tiling_args = (splits, missing_tiling) if is_pointwise else (missing_tiling, splits)\n    return cls.create_tiling(*tiling_args)",
    "docstring": "Given a tiling for only pointwise or reduction dimensions, adds the missing one.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\simd.py",
    "ast_data": "FunctionDef name:complete_partial_tiling arg:cls arg:tiling arg:numel arg:reduction_numel arguments arg arg arg arg Assign Call Call Assign Compare Assign Assign Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "trace_model_call",
    "source_code": "def trace_model_call(model, input_signature=None):\n    if input_signature is None:\n        if isinstance(model.call, def_function.Function):\n            input_signature = model.call.input_signature\n    if input_signature is None:\n        input_signature = model_input_signature(model)\n    if input_signature is None:\n        raise_model_input_error(model)\n\n    @def_function.function(input_signature=input_signature, autograph=False)\n    def _wrapped_model(*args):\n        inputs = args[0] if len(input_signature) == 1 else list(args)\n        with keras_deps.get_call_context_function()().enter(model, inputs=inputs, build_graph=False, call_context_args={'training': False}, saving=True):\n            outputs = model(inputs, training=False)\n        return outputs\n    return _wrapped_model",
    "docstring": "Trace the model call to create a tf.function for exporting a Keras model. Args: model: A Keras model. input_signature: optional, a list of tf.TensorSpec objects specifying the inputs to the model. Returns: A tf.function wrapping the model's call function with input signatures set. Raises: ValueError: if input signature cannot be inferred from the model.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\tflite_keras_util.py",
    "ast_data": "FunctionDef name:trace_model_call arg:model arg:input_signature arguments arg arg If Compare If Call Assign If Compare Assign Call If Compare Call FunctionDef name:_wrapped_model arguments arg Assign Compare Call Call With Call Call Call Assign Call Return return:yes Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "putmask",
    "source_code": "def putmask(a, mask, values):\n    if not isinstance(a, MaskedArray):\n        a = a.view(MaskedArray)\n    valdata, valmask = (getdata(values), getmask(values))\n    if getmask(a) is nomask:\n        if valmask is not nomask:\n            a._sharedmask = True\n            a._mask = make_mask_none(a.shape, a.dtype)\n            np.copyto(a._mask, valmask, where=mask)\n    elif a._hardmask:\n        if valmask is not nomask:\n            m = a._mask.copy()\n            np.copyto(m, valmask, where=mask)\n            a.mask |= m\n    else:\n        if valmask is nomask:\n            valmask = getmaskarray(values)\n        np.copyto(a._mask, valmask, where=mask)\n    np.copyto(a._data, valdata, where=mask)",
    "docstring": "Changes elements of an array based on conditional and input values. This is the masked array version of , for details see . See Also -------- numpy.putmask Notes ----- Using a masked array as will **not** transform a into a . Examples -------- >>> import numpy as np >>> arr = [[1, 2], [3, 4]] >>> mask = [[1, 0], [0, 0]] >>> x = np.ma.array(arr, mask=mask) >>> np.ma.putmask(x, x >> x masked_array( data=[[--, 20], [30, 4]], mask=[[ True, False], [False, False]], fill_value=999999) >>> x.data array([[10, 20], [30, 4]])",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:putmask arg:a arg:mask arg:values arguments arg arg arg If Call Assign Call Assign Call Call If Compare Call If Compare Assign Assign Call Call If If Compare Assign Call Call If Compare Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "export_onnx_opset_version",
    "source_code": "@property\ndef export_onnx_opset_version(self) -> int:\n    return self._export_onnx_opset_version",
    "docstring": "Opset version used during export.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_globals.py",
    "ast_data": "FunctionDef name:export_onnx_opset_version arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_figwidth",
    "source_code": "def set_figwidth(self, val, forward=True):\n    self.set_size_inches(val, self.get_figheight(), forward=forward)",
    "docstring": "Set the width of the figure in inches. Parameters ---------- val : float forward : bool See . See Also -------- matplotlib.figure.Figure.set_figheight matplotlib.figure.Figure.set_size_inches",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:set_figwidth arg:self arg:val arg:forward arguments arg arg arg Call Call"
  },
  {
    "library": "numpy",
    "name": "safe_eval",
    "source_code": "def safe_eval(source):\n    warnings.warn('`safe_eval` is deprecated. Use `ast.literal_eval` instead. Be aware of security implications, such as memory exhaustion based attacks (deprecated in NumPy 2.0)', DeprecationWarning, stacklevel=2)\n    import ast\n    return ast.literal_eval(source)",
    "docstring": "Protected string evaluation. .. deprecated:: 2.0 Use instead. Evaluate a string containing a Python literal expression without allowing the execution of arbitrary non-literal code. .. warning:: This function is identical to :py:meth: and has the same security implications. It may not always be safe to evaluate large input strings. Parameters ---------- source : str The string to evaluate. Returns ------- obj : object The result of evaluating . Raises ------ SyntaxError If the code has invalid Python syntax, or if it contains non-literal code. Examples -------- >>> np.safe_eval('1') 1 >>> np.safe_eval('[1, 2, 3]') [1, 2, 3] >>> np.safe_eval('{\"foo\": (\"bar\", 10.0)}') {'foo': ('bar', 10.0)} >>> np.safe_eval('import os') Traceback (most recent call last): ... SyntaxError: invalid syntax >>> np.safe_eval('open(\"/home/user/.ssh/id_dsa\").read()') Traceback (most recent call last): ... ValueError: malformed node or string:",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_utils_impl.py",
    "ast_data": "FunctionDef name:safe_eval arg:source arguments arg Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, conjuncts):\n    self.conjucts = conjuncts",
    "docstring": ":param conjuncts: Conjunction of constraints",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:conjuncts arguments arg arg Assign"
  },
  {
    "library": "scikit-learn",
    "name": "_boost",
    "source_code": "def _boost(self, iboost, X, y, sample_weight, random_state):\n    estimator = self._make_estimator(random_state=random_state)\n    estimator.fit(X, y, sample_weight=sample_weight)\n    y_predict = estimator.predict(X)\n    if iboost == 0:\n        self.classes_ = getattr(estimator, 'classes_', None)\n        self.n_classes_ = len(self.classes_)\n    incorrect = y_predict != y\n    estimator_error = np.mean(np.average(incorrect, weights=sample_weight, axis=0))\n    if estimator_error <= 0:\n        return (sample_weight, 1.0, 0.0)\n    n_classes = self.n_classes_\n    if estimator_error >= 1.0 - 1.0 / n_classes:\n        self.estimators_.pop(-1)\n        if len(self.estimators_) == 0:\n            raise ValueError('BaseClassifier in AdaBoostClassifier ensemble is worse than random, ensemble can not be fit.')\n        return (None, None, None)\n    estimator_weight = self.learning_rate * (np.log((1.0 - estimator_error) / estimator_error) + np.log(n_classes - 1.0))\n    if not iboost == self.n_estimators - 1:\n        sample_weight = np.exp(np.log(sample_weight) + estimator_weight * incorrect * (sample_weight > 0))\n    return (sample_weight, estimator_weight, estimator_error)",
    "docstring": "Implement a single boost. Perform a single boost according to the discrete SAMME algorithm and return the updated sample weights. Parameters ---------- iboost : int The index of the current boost iteration. X : {array-like, sparse matrix} of shape (n_samples, n_features) The training input samples. y : array-like of shape (n_samples,) The target values (class labels). sample_weight : array-like of shape (n_samples,) The current sample weights. random_state : RandomState instance The RandomState instance used if the base estimator accepts a attribute. Returns ------- sample_weight : array-like of shape (n_samples,) or None The reweighted sample weights. If None then boosting has terminated early. estimator_weight : float The weight for the current boost. If None then boosting has terminated early. estimator_error : float The classification error for the current boost. If None then boosting has terminated early.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_weight_boosting.py",
    "ast_data": "FunctionDef name:_boost arg:self arg:iboost arg:X arg:y arg:sample_weight arg:random_state arguments arg arg arg arg arg arg Assign Call Call Assign Call If Compare Assign Call Assign Call Assign Compare Assign Call Call If Compare Return return:yes Assign If Compare Call If Compare Call Raise Call Return return:no Assign Call Call If Compare Assign Call Call Compare Return return:yes"
  },
  {
    "library": "kornia",
    "name": "RgbaToRgb",
    "source_code": "class RgbaToRgb(Module):\n    ONNX_DEFAULT_INPUTSHAPE: ClassVar[list[int]] = [-1, 4, -1, -1]\n    ONNX_DEFAULT_OUTPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n\n    def forward(self, image: Tensor) -> Tensor:\n        return rgba_to_rgb(image)",
    "docstring": "Convert an image from RGBA to RGB. Remove an alpha channel from RGB image. Returns: RGB version of the image. Shape: - image: :math: - output: :math: Example: >>> input = torch.rand(2, 4, 4, 5) >>> rgba = RgbaToRgb() >>> output = rgba(input) # 2x3x4x5",
    "type": "class",
    "file_path": "kornia\\kornia\\color\\rgb.py",
    "ast_data": "ClassDef name:RgbaToRgb FunctionDef name:forward arg:self arg:image arguments arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "BootstrapResult",
    "source_code": "@dataclass\nclass BootstrapResult:\n    confidence_interval: ConfidenceInterval\n    bootstrap_distribution: np.ndarray\n    standard_error: float | np.ndarray",
    "docstring": "Result object returned by . Attributes ---------- confidence_interval : ConfidenceInterval The bootstrap confidence interval as an instance of with attributes and . bootstrap_distribution : ndarray The bootstrap distribution, that is, the value of for each resample. The last dimension corresponds with the resamples (e.g. ``). standard_error : float or ndarray The bootstrap standard error, that is, the sample standard deviation of the bootstrap distribution.",
    "type": "class",
    "file_path": "scipy\\scipy\\stats\\_resampling.py",
    "ast_data": "ClassDef name:BootstrapResult"
  },
  {
    "library": "kornia",
    "name": "convert_points_from_homogeneous",
    "source_code": "def convert_points_from_homogeneous(points: Tensor, eps: float=1e-08) -> Tensor:\n    if not isinstance(points, Tensor):\n        raise TypeError(f'Input type is not a Tensor. Got {type(points)}')\n    if len(points.shape) < 2:\n        raise ValueError(f'Input must be at least a 2D tensor. Got {points.shape}')\n    z_vec: Tensor = points[..., -1:]\n    mask: Tensor = torch.abs(z_vec) > eps\n    scale = where(mask, 1.0 / (z_vec + eps), torch.ones_like(z_vec))\n    return scale * points[..., :-1]",
    "docstring": "Convert points from homogeneous to Euclidean space. Args: points: the points to be transformed of shape :math:. eps: to avoid division by zero. Returns: the points in Euclidean space :math:. Examples: >>> input = tensor([[0., 0., 1.]]) >>> convert_points_from_homogeneous(input) tensor([[0., 0.]])",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\conversions.py",
    "ast_data": "FunctionDef name:convert_points_from_homogeneous arg:points arg:eps arguments arg arg If Call Raise Call Call If Compare Call Raise Call Compare Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "string_handle",
    "source_code": "def string_handle(self, name=None):\n    if name is None:\n        return self._string_handle\n    else:\n        return gen_dataset_ops.iterator_to_string_handle(self._iterator_resource, name=name)",
    "docstring": "Returns a string-valued that represents this iterator. Args: name: (Optional.) A name for the created operation. Returns: A scalar of type .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\iterator_ops.py",
    "ast_data": "FunctionDef name:string_handle arg:self arg:name arguments arg arg If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "validate_exclusive_idx",
    "source_code": "def validate_exclusive_idx(rank: int, ex_idx: int):\n    assert isinstance(ex_idx, Dim)\n    assert isinstance(rank, Dim)\n    assert ex_idx > 0 and ex_idx <= rank",
    "docstring": "Validates that ex_idx is a valid exclusive index for the given shape.",
    "type": "function",
    "file_path": "pytorch\\torch\\_prims_common\\__init__.py",
    "ast_data": "FunctionDef name:validate_exclusive_idx arg:rank arg:ex_idx arguments arg arg Call Call BoolOp Compare Compare"
  },
  {
    "library": "tensorflow",
    "name": "scan",
    "source_code": "def scan(self, initial_state, scan_func, name=None) -> 'DatasetV2':\n    from tensorflow.python.data.ops import scan_op\n    return scan_op._scan(self, initial_state, scan_func, name=name)",
    "docstring": "A transformation that scans a function across an input dataset. This transformation is a stateful relative of . In addition to mapping across the elements of the input dataset, accumulates one or more state tensors, whose initial values are . >>> dataset = tf.data.Dataset.range(10) >>> initial_state = tf.constant(0, dtype=tf.int64) >>> scan_func = lambda state, i: (state + i, state + i) >>> dataset = dataset.scan(initial_state=initial_state, scan_func=scan_func) >>> [a.item() for a in dataset.as_numpy_iterator()] [0, 1, 3, 6, 10, 15, 21, 28, 36, 45] Args: initial_state: A nested structure of tensors, representing the initial state of the accumulator. scan_func: A function that maps to . It must take two arguments and return a pair of nested structures of tensors. The must match the structure of . name: (Optional.) A name for the tf.data operation. Returns: A new with the transformation applied as described above.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:scan arg:self arg:initial_state arg:scan_func arg:name arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "lookup",
    "source_code": "def lookup(self, obj):\n    for registered in self._registry:\n        if isinstance(obj, registered):\n            return self._registry[registered]\n    raise LookupError(f'{type(obj)} has not been registered.')",
    "docstring": "Looks up 'obj'. Args: obj: The object to lookup within the registry. Returns: Value for 'obj' in the registry if found. Raises: LookupError: if 'obj' has not been registered.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\utils\\type_registry.py",
    "ast_data": "FunctionDef name:lookup arg:self arg:obj arguments arg arg For If Call Return return:yes Raise Call Call"
  },
  {
    "library": "django",
    "name": "cache_name",
    "source_code": "@cached_property\ndef cache_name(self):\n    return self.accessor_name",
    "docstring": "Return the name of the cache key to use for storing an instance of the forward model on the reverse model.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\reverse_related.py",
    "ast_data": "FunctionDef name:cache_name arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "stat",
    "source_code": "@tf_export(v1=['gfile.Stat'])\ndef stat(filename):\n    return stat_v2(filename)",
    "docstring": "Returns file statistics for a given path. Args: filename: string, path to a file Returns: FileStatistics struct that contains information about the path Raises: errors.OpError: If the operation fails.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py",
    "ast_data": "FunctionDef name:stat arg:filename arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_make_replica_execution_function",
    "source_code": "def _make_replica_execution_function(model, mode):\n    if mode == ModeKeys.TRAIN:\n        func = model.train_on_batch\n    elif mode == ModeKeys.TEST:\n        func = model.test_on_batch\n    else:\n\n        def predict_on_batch(x, y=None, sample_weights=None):\n            del y, sample_weights\n            return model.predict_on_batch(x)\n        func = predict_on_batch\n    if mode != ModeKeys.PREDICT:\n        func = functools.partial(func, reset_metrics=False)\n    return func",
    "docstring": "A single step of the distributed execution on a replica.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_training_utils_v1.py",
    "ast_data": "FunctionDef name:_make_replica_execution_function arg:model arg:mode arguments arg arg If Compare Assign If Compare Assign FunctionDef name:predict_on_batch arg:x arg:y arg:sample_weights arguments arg arg arg Return return:yes Call Assign If Compare Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_batch_generator",
    "source_code": "def _batch_generator(iterable, batch):\n    iterator = iter(iterable)\n    if batch <= 0:\n        raise ValueError('`batch` must be positive.')\n    z = [item for i, item in zip(range(batch), iterator)]\n    while z:\n        yield z\n        z = [item for i, item in zip(range(batch), iterator)]",
    "docstring": "A generator that yields batches of elements from an iterable",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_resampling.py",
    "ast_data": "FunctionDef name:_batch_generator arg:iterable arg:batch arguments arg arg Assign Call If Compare Raise Call Assign Call Call While Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "call_kernel",
    "source_code": "def call_kernel(self, kernel_name):\n    assert kernel_name == self.kernel_name\n    V.graph.wrapper_code.write_triton_header_once()\n    _, call_args, _, arg_types = self.kernels[0].args.python_argdefs()\n    for kernel in self.kernels[1:]:\n        _, other_call_args, _, other_arg_types = kernel.args.python_argdefs()\n        assert call_args == other_call_args, (call_args, other_call_args)\n        assert arg_types == other_arg_types\n    if V.graph.cpp_wrapper and (not config.triton.autotune_at_compile_time):\n        kernel_name = MultiKernelCall.lookup_choice(self.kernel_name)\n    self.kernels[0].add_numel_to_call_args(kernel_name, call_args, arg_types)\n    for ws in self.kernels[0].args.workspace_args:\n        V.graph.wrapper_code.generate_workspace_allocation(ws)\n    V.graph.wrapper_code.generate_kernel_call(kernel_name, call_args, arg_types=arg_types)\n    for ws in reversed(self.kernels[0].args.workspace_args):\n        V.graph.wrapper_code.generate_workspace_deallocation(ws)",
    "docstring": "Collect the union of arguments from all subkernels as the arguments for the multi-kernel.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\multi_kernel.py",
    "ast_data": "FunctionDef name:call_kernel arg:self arg:kernel_name arguments arg arg Compare Call Assign Call For Assign Call Compare Compare If BoolOp Assign Call Call For Call Call For Call Call"
  },
  {
    "library": "pytorch",
    "name": "set_rotating_buffer_size",
    "source_code": "def set_rotating_buffer_size(buffer_size: int) -> None:\n    return torch._C._cuda_tunableop_set_rotating_buffer_size(buffer_size)",
    "docstring": "Set rotating buffer size to this value in MB, if the buffer size is greater than zero. If less than zero, query L2 cache size. If equal to zero, means deactivate rotating buffer.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\tunable.py",
    "ast_data": "FunctionDef name:set_rotating_buffer_size arg:buffer_size arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_flatten_first_two_dims",
    "source_code": "def _flatten_first_two_dims(x):\n    old_shape = array_ops.shape(x)\n    new_shape = array_ops.concat([[old_shape[0] * old_shape[1]], old_shape[2:]], axis=0)\n    return array_ops.reshape(x, new_shape)",
    "docstring": "Flattens the first two dimensions of x into a single dimension.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\control_flow_ops.py",
    "ast_data": "FunctionDef name:_flatten_first_two_dims arg:x arguments arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "element_spec",
    "source_code": "@property\ndef element_spec(self):\n    if not isinstance(self._dataset_fn, tf_function.ConcreteFunction):\n        raise NotImplementedError('`element_spec` is not supported when the `dataset_fn` is not a `ConcreteFunction`.')\n    return self._dataset_fn.structured_outputs.element_spec",
    "docstring": "The type specification of an element of this dataset. This property is subject to change without notice.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\values.py",
    "ast_data": "FunctionDef name:element_spec arg:self arguments arg If Call Raise Call Return return:yes"
  },
  {
    "library": "django",
    "name": "geotransform",
    "source_code": "@geotransform.setter\ndef geotransform(self, values):\n    if len(values) != 6 or not all((isinstance(x, (int, float)) for x in values)):\n        raise ValueError('Geotransform must consist of 6 numeric values.')\n    values = (c_double * 6)(*values)\n    capi.set_ds_geotransform(self._ptr, byref(values))\n    self._flush()",
    "docstring": "Set the geotransform for the data source.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\raster\\source.py",
    "ast_data": "FunctionDef name:geotransform arg:self arg:values arguments arg arg If BoolOp Compare Call Call Call Raise Call Assign Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "fuse_convtranspose_bn",
    "source_code": "def fuse_convtranspose_bn(is_qat, convt, bn):\n    assert convt.training == bn.training, 'ConvTranspose and BN both must be in the same mode (train or eval).'\n    if is_qat:\n        raise Exception('Fusing ConvTranspose+BatchNorm not yet supported in QAT.')\n    else:\n        return nn.utils.fusion.fuse_conv_bn_eval(convt, bn, transpose=True)",
    "docstring": "Return the fused ConvTranspose and bn modules. Given ConvTranspose and bn modules, fuses them and returns the fused module Args: convt: Module instance of type ConvTransposeNd bn: BatchNormNd instance that needs to be fused with the linear layer. batch norm N should match the ConvTranspose N Examples:: >>> m1 = nn.ConvTranspose2d(10, 20, 3) >>> b1 = nn.BatchNorm2d(20) >>> # xdoctest: +SKIP >>> m2 = fuse_convtranspose_bn(m1, b1)",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fuser_method_mappings.py",
    "ast_data": "FunctionDef name:fuse_convtranspose_bn arg:is_qat arg:convt arg:bn arguments arg arg arg Compare If Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_construct_concrete_function",
    "source_code": "def _construct_concrete_function(func, output_graph_def, converted_input_indices):\n    input_tensors = func.graph.internal_captures\n    converted_inputs = object_identity.ObjectIdentitySet([input_tensors[index] for index in converted_input_indices])\n    not_converted_inputs = [tensor for tensor in func.inputs if tensor not in converted_inputs]\n    not_converted_inputs_map = {tensor.name: tensor for tensor in not_converted_inputs}\n    new_input_names = [tensor.name for tensor in not_converted_inputs]\n    new_output_names = [tensor.name for tensor in func.outputs]\n    for f in output_graph_def.library.function:\n        if context.context().has_function(f.signature.name):\n            context.context().remove_function(f.signature.name)\n    new_func = wrap_function.function_from_graph_def(output_graph_def, new_input_names, new_output_names)\n    for input_tensor in new_func.inputs:\n        input_tensor.set_shape(not_converted_inputs_map[input_tensor.name].shape)\n    return new_func",
    "docstring": "Constructs a concrete function from the . Args: func: ConcreteFunction output_graph_def: GraphDef proto. converted_input_indices: Set of integers of input indices that were converted to constants. Returns: ConcreteFunction.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "FunctionDef name:_construct_concrete_function arg:func arg:output_graph_def arg:converted_input_indices arguments arg arg arg Assign Assign Call Assign Compare Assign Assign Assign For If Call Call Call Call Assign Call For Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "clear_per_iter_info",
    "source_code": "def clear_per_iter_info(self) -> None:\n    self.bucket_indices_seen.clear()\n    self.bucket_index_to_future.clear()\n    self.bucket_index_to_bucket.clear()",
    "docstring": "Clear the data structures that are modified per-iteration. This function should be called at the end of an iteration.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\optim\\zero_redundancy_optimizer.py",
    "ast_data": "FunctionDef name:clear_per_iter_info arg:self arguments arg Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_disallow_remote_value_as_input",
    "source_code": "def _disallow_remote_value_as_input(structured):\n\n    def _raise_if_remote_value(x):\n        if isinstance(x, RemoteValue):\n            raise ValueError('`tf.distribute.experimental.coordinator.RemoteValue` used as an input to scheduled function is not yet supported.')\n    nest.map_structure(_raise_if_remote_value, structured)",
    "docstring": "Raises if any element of is a RemoteValue.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py",
    "ast_data": "FunctionDef name:_disallow_remote_value_as_input arg:structured arguments arg FunctionDef name:_raise_if_remote_value arg:x arguments arg If Call Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "is_graphable_type",
    "source_code": "def is_graphable_type(typ) -> bool:\n    return issubclass(typ, torch.fx.node.base_types)",
    "docstring": "Return whether the given type is graphable",
    "type": "function",
    "file_path": "pytorch\\torch\\_higher_order_ops\\flat_apply.py",
    "ast_data": "FunctionDef name:is_graphable_type arg:typ arguments arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_broadcast_shape",
    "source_code": "def _broadcast_shape(*args):\n    b = np.broadcast(*args[:32])\n    for pos in range(32, len(args), 31):\n        b = broadcast_to(0, b.shape)\n        b = np.broadcast(b, *args[pos:pos + 31])\n    return b.shape",
    "docstring": "Returns the shape of the arrays that would result from broadcasting the supplied arrays against each other.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_stride_tricks_impl.py",
    "ast_data": "FunctionDef name:_broadcast_shape arguments arg Assign Call For Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_set_covariance",
    "source_code": "def _set_covariance(self, covariance):\n    covariance = check_array(covariance)\n    self.covariance_ = covariance\n    if self.store_precision:\n        self.precision_ = linalg.pinvh(covariance, check_finite=False)\n    else:\n        self.precision_ = None",
    "docstring": "Saves the covariance and precision estimates Storage is done accordingly to . Precision stored only if invertible. Parameters ---------- covariance : array-like of shape (n_features, n_features) Estimated covariance matrix to be stored, and from which precision is computed.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\covariance\\_empirical_covariance.py",
    "ast_data": "FunctionDef name:_set_covariance arg:self arg:covariance arguments arg arg Assign Call Assign If Assign Call Assign"
  },
  {
    "library": "django",
    "name": "y",
    "source_code": "@property\ndef y(self):\n    return capi.gety(self.ptr, 0)",
    "docstring": "Return the Y coordinate for this Point.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:y arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "check_libs",
    "source_code": "def check_libs(self, lib_dirs, libs, opt_libs=[]):\n    exts = self.library_extensions()\n    info = None\n    for ext in exts:\n        info = self._check_libs(lib_dirs, libs, opt_libs, [ext])\n        if info is not None:\n            break\n    if not info:\n        log.info('  libraries %s not found in %s', ','.join(libs), lib_dirs)\n    return info",
    "docstring": "If static or shared libraries are available then return their info dictionary. Checks for all libraries as shared libraries first, then static (or vice versa if self.search_static_first is True).",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\system_info.py",
    "ast_data": "FunctionDef name:check_libs arg:self arg:lib_dirs arg:libs arg:opt_libs arguments arg arg arg arg Assign Call Assign For Assign Call If Compare If Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "determine_observer_insert_points",
    "source_code": "def determine_observer_insert_points(self, prepared_fx_model: GraphModule) -> dict[str, dict[str, Any]]:\n    obs_ctr = ModelReportObserver\n    obs_fqn_to_info: dict[str, dict[str, Any]] = {}\n    for fqn, module in prepared_fx_model.named_modules():\n        if self._supports_insertion(module):\n            targeted_node = self._get_targeting_node(prepared_fx_model, fqn)\n            pre_obs_fqn = fqn + '.' + self.DEFAULT_PRE_OBSERVER_NAME\n            obs_fqn_to_info[pre_obs_fqn] = {DETECTOR_TARGET_NODE_KEY: targeted_node, DETECTOR_OBS_TO_INSERT_KEY: obs_ctr(ch_axis=self.ch_axis, comp_percentile=self.reference_percentile), DETECTOR_IS_POST_OBS_KEY: False, DETECTOR_OBS_ARGS_KEY: targeted_node.args}\n    return obs_fqn_to_info",
    "docstring": "Determines where observers need to be inserted for the Outlier Detector. For this detector, we want to place observers in front of supported layers. Currently inserts observers for: all layers that do not have children (leaf level layers) Args: prepared_fx_model (GraphModule): The prepared Fx GraphModule Returns a Dict mapping from unique observer fqns (where we want to insert them) to a Dict with: key \"target_node\" -> the node we are trying to observe with this observer (torch.fx.node.Node) key \"observer_to_insert\" -> the observer we wish to insert (ObserverBase) key \"is_post_observer\" -> True if this is meant to be a post-observer for target_node, False if pre-observer key \"observer_args\" -> The arguments that are meant to be passed into the observer",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\detector.py",
    "ast_data": "FunctionDef name:determine_observer_insert_points arg:self arg:prepared_fx_model arguments arg arg Assign For Call If Call Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "_palette_without_hue_backcompat",
    "source_code": "def _palette_without_hue_backcompat(self, palette, hue_order):\n    if 'hue' not in self.variables and palette is not None:\n        msg = f'\\n\\nPassing `palette` without assigning `hue` is deprecated and will be removed in v0.14.0. Assign the `{self.orient}` variable to `hue` and set `legend=False` for the same effect.\\n'\n        warnings.warn(msg, FutureWarning, stacklevel=3)\n        self.legend = False\n        self.plot_data['hue'] = self.plot_data[self.orient]\n        self.variables['hue'] = self.variables.get(self.orient)\n        self.var_types['hue'] = self.var_types.get(self.orient)\n        hue_order = self.var_levels.get(self.orient)\n        self._var_levels.pop('hue', None)\n    return hue_order",
    "docstring": "Provide one cycle where palette= implies hue= when not provided",
    "type": "method",
    "file_path": "seaborn\\seaborn\\categorical.py",
    "ast_data": "FunctionDef name:_palette_without_hue_backcompat arg:self arg:palette arg:hue_order arguments arg arg arg If BoolOp Compare Compare Assign Call Assign Assign Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "output_shapes",
    "source_code": "@property\ndef output_shapes(self):\n    return nest.map_structure(lambda component_spec: component_spec._to_legacy_output_shapes(), self._element_spec)",
    "docstring": "Returns the shape of each component of an element of this iterator. Returns: A nested structure of objects corresponding to each component of an element of this dataset.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py",
    "ast_data": "FunctionDef name:output_shapes arg:self arguments arg Return return:yes Call arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "init_state",
    "source_code": "def init_state(self) -> None:\n    for param in self.named_parameters.values():\n        if param.requires_grad:\n            t = torch.zeros_like(param)\n            param.grad = torch.autograd.Variable(t)\n    self.step(closure=None)",
    "docstring": "Run a dummy optimizer step, which allows to initialize optimizer state because we do lazy init for most optimizers. This allows doing in-place loading of optimizer state from a checkpoint.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\optim\\named_optimizer.py",
    "ast_data": "FunctionDef name:init_state arg:self arguments arg For Call If Assign Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "prefetch",
    "source_code": "def prefetch(self, buffer_size, name=None) -> 'DatasetV2':\n    return prefetch_op._prefetch(self, buffer_size, name=name)",
    "docstring": "Creates a that prefetches elements from this dataset. Most dataset input pipelines should end with a call to . This allows later elements to be prepared while the current element is being processed. This often improves latency and throughput, at the cost of using additional memory to store prefetched elements. Note: Like other methods, prefetch operates on the elements of the input dataset. It has no concept of examples vs. batches. will prefetch two elements (2 examples), while will prefetch 2 elements (2 batches, of 20 examples each). >>> dataset = tf.data.Dataset.range(3) >>> dataset = dataset.prefetch(2) >>> [a.item() for a in dataset.as_numpy_iterator()] [0, 1, 2] Args: buffer_size: A scalar , representing the maximum number of elements that will be buffered when prefetching. If the value is used, then the buffer size is dynamically tuned. name: Optional. A name for the tf.data transformation. Returns: A new with the transformation applied as described above.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:prefetch arg:self arg:buffer_size arg:name arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "IntegrationWarning",
    "source_code": "class IntegrationWarning(UserWarning):\n    pass",
    "docstring": "Warning on issues during integration.",
    "type": "class",
    "file_path": "scipy\\scipy\\integrate\\_quadpack_py.py",
    "ast_data": "ClassDef name:IntegrationWarning"
  },
  {
    "library": "tensorflow",
    "name": "_get_broadcast_num_row_partitions",
    "source_code": "def _get_broadcast_num_row_partitions(a: DynamicRaggedShape, b: DynamicRaggedShape):\n    if a.num_row_partitions == 0 and b.num_row_partitions == 0:\n        return 0\n    expanded_num_row_partitions_a = a.num_row_partitions + max(0, b.rank - a.rank)\n    expanded_num_row_partitions_b = b.num_row_partitions + max(0, a.rank - b.rank)\n    if a.num_row_partitions == 0:\n        return expanded_num_row_partitions_b\n    if b.num_row_partitions == 0:\n        return expanded_num_row_partitions_a\n    return max(expanded_num_row_partitions_a, expanded_num_row_partitions_b)",
    "docstring": "Returns broadcast_dynamic_shape(a, b).num_row_partitions.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:_get_broadcast_num_row_partitions arg:a arg:b arguments arg arg If BoolOp Compare Compare Return return:yes Assign Call Assign Call If Compare Return return:yes If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "get_hysteresis_kernel",
    "source_code": "def get_hysteresis_kernel(device: Optional[Device]=None, dtype: Optional[Dtype]=None) -> Tensor:\n    return tensor([[[[0.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, 0.0]]], [[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 1.0]]], [[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 1.0, 0.0]]], [[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [1.0, 0.0, 0.0]]], [[[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 0.0]]], [[[1.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]], [[[0.0, 1.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]], [[[0.0, 0.0, 1.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]]], device=device, dtype=dtype)",
    "docstring": "Return the 3x3 kernels for the Canny hysteresis.",
    "type": "function",
    "file_path": "kornia\\kornia\\filters\\kernels.py",
    "ast_data": "FunctionDef name:get_hysteresis_kernel arg:device arg:dtype arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "lut",
    "source_code": "@property\ndef lut(self):\n    if not self._isinit:\n        self._init()\n    lut = np.copy(self._lut)\n    if self.shape == 'circle' or self.shape == 'circleignore':\n        n = np.linspace(-1, 1, self.N)\n        m = np.linspace(-1, 1, self.M)\n        radii_sqr = (n ** 2)[:, np.newaxis] + (m ** 2)[np.newaxis, :]\n        mask_outside = radii_sqr > 1\n        lut[mask_outside, 3] = 0\n    return lut",
    "docstring": "For external access to the lut, i.e. for displaying the cmap. For circular colormaps this returns a lut with a circular mask. Internal functions (such as to_rgb()) should use _lut which stores the lut without a circular mask A lut without the circular mask is needed in to_rgb() because the conversion from floats to ints results in some some pixel-requests just outside of the circular mask",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:lut arg:self arguments arg If Call Assign Call If BoolOp Compare Compare Assign Call Assign Call Assign Assign Compare Assign Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "error_on_html_sidebars_string_values",
    "source_code": "def error_on_html_sidebars_string_values(app: Sphinx, config: Config) -> None:\n    errors = {}\n    for pattern, pat_sidebars in config.html_sidebars.items():\n        if isinstance(pat_sidebars, str):\n            errors[pattern] = [pat_sidebars]\n    if not errors:\n        return\n    msg = __(\"Values in 'html_sidebars' must be a list of strings. At least one pattern has a string value: %s. Change to `html_sidebars = %r`.\")\n    bad_patterns = ', '.join(map(repr, errors))\n    fixed = config.html_sidebars | errors\n    raise ConfigError(msg % (bad_patterns, fixed))",
    "docstring": "Support removed in Sphinx 2.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\builders\\html\\__init__.py",
    "ast_data": "FunctionDef name:error_on_html_sidebars_string_values arg:app arg:config arguments arg arg Assign For Call If Call Assign If Return return:no Assign Call Assign Call Call Assign Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "DateFormatter",
    "source_code": "class DateFormatter(ticker.Formatter):\n\n    def __init__(self, fmt, tz=None, *, usetex=None):\n        self.tz = _get_tzinfo(tz)\n        self.fmt = fmt\n        self._usetex = mpl._val_or_rc(usetex, 'text.usetex')\n\n    def __call__(self, x, pos=0):\n        result = num2date(x, self.tz).strftime(self.fmt)\n        return _wrap_in_tex(result) if self._usetex else result\n\n    def set_tzinfo(self, tz):\n        self.tz = _get_tzinfo(tz)",
    "docstring": "Format a tick (in days since the epoch) with a format string.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\dates.py",
    "ast_data": "ClassDef name:DateFormatter FunctionDef name:__init__ arg:self arg:fmt arg:tz arguments arg arg arg arg Assign Call Assign Assign Call FunctionDef name:__call__ arg:self arg:x arg:pos arguments arg arg arg Assign Call Call Return return:yes Call FunctionDef name:set_tzinfo arg:self arg:tz arguments arg arg Assign Call"
  },
  {
    "library": "django",
    "name": "F",
    "source_code": "def F(self):\n    return MONTHS[self.data.month]",
    "docstring": "Month, textual, long; e.g. 'January'",
    "type": "method",
    "file_path": "django\\django\\utils\\dateformat.py",
    "ast_data": "FunctionDef name:F arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "eigvalsh_tridiagonal",
    "source_code": "@_apply_over_batch(('d', 1), ('e', 1))\ndef eigvalsh_tridiagonal(d, e, select='a', select_range=None, check_finite=True, tol=0.0, lapack_driver='auto'):\n    return eigh_tridiagonal(d, e, eigvals_only=True, select=select, select_range=select_range, check_finite=check_finite, tol=tol, lapack_driver=lapack_driver)",
    "docstring": "Solve eigenvalue problem for a real symmetric tridiagonal matrix. Find eigenvalues of `de`. Parameters ---------- d : ndarray, shape (ndim,) The diagonal elements of the array. e : ndarray, shape (ndim-1,) The off-diagonal elements of the array. select : {'a', 'v', 'i'}, optional Which eigenvalues to calculate ====== ======================================== select calculated ====== ======================================== 'a' All eigenvalues 'v' Eigenvalues in the interval (min, max] 'i' Eigenvalues with indices min >> import numpy as np >>> from scipy.linalg import eigvalsh_tridiagonal, eigvalsh >>> d = 3*np.ones(4) >>> e = -1*np.ones(3) >>> w = eigvalsh_tridiagonal(d, e) >>> A = np.diag(d) + np.diag(e, k=1) + np.diag(e, k=-1) >>> w2 = eigvalsh(A) # Verify with other eigenvalue routines >>> np.allclose(w - w2, np.zeros(4)) True",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_decomp.py",
    "ast_data": "FunctionDef name:eigvalsh_tridiagonal arg:d arg:e arg:select arg:select_range arg:check_finite arg:tol arg:lapack_driver arguments arg arg arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "reset_state",
    "source_code": "def reset_state(self):\n    raise NotImplementedError",
    "docstring": "Resets the statistics of the preprocessing layer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_preprocessing_layer.py",
    "ast_data": "FunctionDef name:reset_state arg:self arguments arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "_filter_returned_ops",
    "source_code": "def _filter_returned_ops(fn):\n    returned_ops = {}\n\n    def wrap_and_filter_returned_ops(*args, **kwargs):\n        outputs = fn(*args, **kwargs)\n        flat_outputs = nest.flatten(outputs)\n        for n in range(len(flat_outputs)):\n            output = flat_outputs[n]\n            if isinstance(output, ops.Operation):\n                returned_ops[n] = output\n                flat_outputs[n] = None\n        return nest.pack_sequence_as(outputs, flat_outputs)\n    return (wrap_and_filter_returned_ops, returned_ops)",
    "docstring": "Filtering out any ops returned by function. Args: fn: a function Returns: A tuple of ( Wrapped function that returns in place of any ops, dict that maps the index in the flat output structure to the returned op )",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\wrap_function.py",
    "ast_data": "FunctionDef name:_filter_returned_ops arg:fn arguments arg Assign FunctionDef name:wrap_and_filter_returned_ops arguments arg arg Assign Call Assign Call For Call Call Assign If Call Assign Assign Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "OnRunEndRequest",
    "source_code": "class OnRunEndRequest:\n\n    def __init__(self, performed_action, run_metadata=None, client_graph_def=None, tf_error=None):\n        _check_type(performed_action, str)\n        self.performed_action = performed_action\n        if run_metadata is not None:\n            _check_type(run_metadata, config_pb2.RunMetadata)\n        self.run_metadata = run_metadata\n        self.client_graph_def = client_graph_def\n        self.tf_error = tf_error",
    "docstring": "Request to an on-run-end callback. The callback is invoked immediately before the wrapped run() call ends.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\framework.py",
    "ast_data": "ClassDef name:OnRunEndRequest FunctionDef name:__init__ arg:self arg:performed_action arg:run_metadata arg:client_graph_def arg:tf_error arguments arg arg arg arg arg Call Assign If Compare Call Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "InplaceCopyFallback",
    "source_code": "class InplaceCopyFallback(ExternKernel):\n\n    def codegen(self, wrapper) -> None:\n        dst, src, non_blocking = self.codegen_args()\n        wrapper.codegen_device_copy(src, dst, non_blocking)\n\n    def should_allocate(self) -> bool:\n        return False\n\n    def get_mutation_names(self):\n        return [self.inputs[0].get_name()]\n\n    def get_unbacked_symbol_defs(self) -> OrderedSet[sympy.Symbol]:\n        return OrderedSet()\n\n    def __init__(self, layout, inputs, constant_args) -> None:\n        super().__init__(None, layout, inputs, constant_args, python_kernel_name='aten.copy_', cpp_kernel_name='aoti_torch_copy_')\n        V.graph.mark_buffer_mutated(inputs[0].get_name())\n        self.name = V.graph.register_buffer(self)\n        V.graph.register_operation(self)\n\n    @classmethod\n    def create(cls, dst, src, non_blocking: bool=False):\n        inputs = [cls.realize_input(t) for t in [dst, src]]\n        constant_args = (non_blocking,)\n        result = InplaceCopyFallback(NoneLayout(device=dst.get_device()), inputs, constant_args)\n        return result",
    "docstring": "This needs to be a custom class to handle mutation properly",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\ir.py",
    "ast_data": "ClassDef name:InplaceCopyFallback FunctionDef name:codegen arg:self arg:wrapper arguments arg arg Assign Call Call FunctionDef name:should_allocate arg:self arguments arg Return return:yes FunctionDef name:get_mutation_names arg:self arguments arg Return return:yes Call FunctionDef name:get_unbacked_symbol_defs arg:self arguments arg Return return:yes Call FunctionDef name:__init__ arg:self arg:layout arg:inputs arg:constant_args arguments arg arg arg arg Call Call Call Call Assign Call Call FunctionDef name:create arg:cls arg:dst arg:src arg:non_blocking arguments arg arg arg arg Assign Call Assign Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "MemPoolContext",
    "source_code": "class MemPoolContext(_MemPoolContext):\n\n    def __init__(self, pool: _MemPool):\n        super().__init__(pool)\n\n    @staticmethod\n    def active_pool() -> Optional[_MemPool]:\n        return _MemPoolContext.active_pool()",
    "docstring": "MemPoolContext holds the currently active pool and stashes the previous pool. On deletion it makes the previous pool active. Args: pool(torch.cuda.MemPool): a MemPool object to be made active so that allocations route to this pool.",
    "type": "class",
    "file_path": "pytorch\\torch\\cuda\\memory.py",
    "ast_data": "ClassDef name:MemPoolContext FunctionDef name:__init__ arg:self arg:pool arguments arg arg Call Call FunctionDef name:active_pool arguments Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "ModeKeyMap",
    "source_code": "class ModeKeyMap(collections_abc.Mapping):\n\n    def __init__(self, **kwargs):\n        self._internal_dict = {}\n        self._keys = []\n        for key in kwargs:\n            self._keys.append(key)\n            dict_key = self._get_internal_key(key)\n            if dict_key in self._internal_dict:\n                raise ValueError('Error creating ModeKeyMap. Multiple keys/values found for {} mode.'.format(dict_key))\n            self._internal_dict[dict_key] = kwargs[key]\n\n    def _get_internal_key(self, key):\n        if is_train(key):\n            return KerasModeKeys.TRAIN\n        if is_eval(key):\n            return KerasModeKeys.TEST\n        if is_predict(key):\n            return KerasModeKeys.PREDICT\n        raise ValueError('Invalid mode key: {}.'.format(key))\n\n    def __getitem__(self, key):\n        return self._internal_dict[self._get_internal_key(key)]\n\n    def __iter__(self):\n        return iter(self._keys)\n\n    def __len__(self):\n        return len(self._keys)",
    "docstring": "Map using ModeKeys as keys. This class creates an immutable mapping from modes to values. For example, SavedModel export of Keras and Estimator models use this to map modes to their corresponding MetaGraph tags/SignatureDef keys. Since this class uses modes, rather than strings, as keys, both \"predict\" (Keras's PREDICT ModeKey) and \"infer\" (Estimator's PREDICT ModeKey) map to the same value.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\model_utils\\mode_keys.py",
    "ast_data": "ClassDef name:ModeKeyMap FunctionDef name:__init__ arg:self arguments arg arg Assign Assign For Call Assign Call If Compare Raise Call Call Assign FunctionDef name:_get_internal_key arg:self arg:key arguments arg arg If Call Return return:yes If Call Return return:yes If Call Return return:yes Raise Call Call FunctionDef name:__getitem__ arg:self arg:key arguments arg arg Return return:yes Call FunctionDef name:__iter__ arg:self arguments arg Return return:yes Call FunctionDef name:__len__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "sphere_intersections",
    "source_code": "def sphere_intersections(z, d, trust_radius, entire_line=False):\n    if norm(d) == 0:\n        return (0, 0, False)\n    if np.isinf(trust_radius):\n        if entire_line:\n            ta = -np.inf\n            tb = np.inf\n        else:\n            ta = 0\n            tb = 1\n        intersect = True\n        return (ta, tb, intersect)\n    a = np.dot(d, d)\n    b = 2 * np.dot(z, d)\n    c = np.dot(z, z) - trust_radius ** 2\n    discriminant = b * b - 4 * a * c\n    if discriminant < 0:\n        intersect = False\n        return (0, 0, intersect)\n    sqrt_discriminant = np.sqrt(discriminant)\n    aux = b + copysign(sqrt_discriminant, b)\n    ta = -aux / (2 * a)\n    tb = -2 * c / aux\n    ta, tb = sorted([ta, tb])\n    if entire_line:\n        intersect = True\n    elif tb < 0 or ta > 1:\n        intersect = False\n        ta = 0\n        tb = 0\n    else:\n        intersect = True\n        ta = max(0, ta)\n        tb = min(1, tb)\n    return (ta, tb, intersect)",
    "docstring": "Find the intersection between segment (or line) and spherical constraints. Find the intersection between the segment (or line) defined by the parametric equation ``, there is no intersection.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_trustregion_constr\\qp_subproblem.py",
    "ast_data": "FunctionDef name:sphere_intersections arg:z arg:d arg:trust_radius arg:entire_line arguments arg arg arg arg If Compare Call Return return:yes If Call If Assign Assign Assign Assign Assign Return return:yes Assign Call Assign Call Assign Call Assign If Compare Assign Return return:yes Assign Call Assign Call Assign Assign Assign Call If Assign If BoolOp Compare Compare Assign Assign Assign Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_var",
    "source_code": "def _var(self, dim, df, scale):\n    var = scale ** 2\n    diag = scale.diagonal()\n    var += np.outer(diag, diag)\n    var *= df\n    return var",
    "docstring": "Variance of the Wishart distribution. Parameters ---------- dim : int Dimension of the scale matrix %(_doc_default_callparams)s Notes ----- As this function does no argument checking, it should not be called directly; use 'var' instead.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:_var arg:self arg:dim arg:df arg:scale arguments arg arg arg arg Assign Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "experimental_type_proto",
    "source_code": "@classmethod\n@abc.abstractmethod\ndef experimental_type_proto(cls) -> Type[message.Message]:\n    raise NotImplementedError",
    "docstring": "Returns the unique type of proto associated with this class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\trace_type\\serialization.py",
    "ast_data": "FunctionDef name:experimental_type_proto arg:cls arguments arg Raise"
  },
  {
    "library": "kornia",
    "name": "load",
    "source_code": "def load(model_name: Union[onnx.ModelProto, str]) -> ONNXModule:\n    return ONNXModule(model_name)",
    "docstring": "Load an ONNX model from either a file path or HuggingFace. The loaded model is an ONNXModule object, of which you may run the model with the method, with less boilerplate. Args: model_name: The name of the model to load. For Hugging Face-hosted models, use the format 'hf://model_name'. Valid can be found on Or a URL to the ONNX model.",
    "type": "function",
    "file_path": "kornia\\kornia\\onnx\\module.py",
    "ast_data": "FunctionDef name:load arg:model_name arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "iterable_not_string",
    "source_code": "def iterable_not_string(obj: object) -> bool:\n    return isinstance(obj, abc.Iterable) and (not isinstance(obj, str))",
    "docstring": "Check if the object is an iterable but not a string. Parameters ---------- obj : The object to check. Returns ------- is_iter_not_string : bool Whether is a non-string iterable. Examples -------- >>> iterable_not_string([1, 2, 3]) True >>> iterable_not_string(\"foo\") False >>> iterable_not_string(1) False",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\inference.py",
    "ast_data": "FunctionDef name:iterable_not_string arg:obj arguments arg Return return:yes BoolOp Call Call"
  },
  {
    "library": "pandas",
    "name": "_tuplify",
    "source_code": "def _tuplify(ndim: int, loc: Hashable) -> tuple[Hashable | slice, ...]:\n    _tup: list[Hashable | slice]\n    _tup = [slice(None, None) for _ in range(ndim)]\n    _tup[0] = loc\n    return tuple(_tup)",
    "docstring": "Given an indexer for the first dimension, create an equivalent tuple for indexing over all dimensions. Parameters ---------- ndim : int loc : object Returns ------- tuple",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\indexing.py",
    "ast_data": "FunctionDef name:_tuplify arg:ndim arg:loc arguments arg arg Assign Call Call Assign Return return:yes Call"
  },
  {
    "library": "cryptography",
    "name": "key_size",
    "source_code": "@property\n@abc.abstractmethod\ndef key_size(self) -> int:\n    pass",
    "docstring": "Bit size of a secret scalar for the curve.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ec.py",
    "ast_data": "FunctionDef name:key_size arg:self arguments arg"
  },
  {
    "library": "matplotlib",
    "name": "_cbar_cla",
    "source_code": "def _cbar_cla(self):\n    for x in self._interactive_funcs:\n        delattr(self.ax, x)\n    del self.ax.cla\n    self.ax.cla()",
    "docstring": "Function to clear the interactive colorbar state.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colorbar.py",
    "ast_data": "FunctionDef name:_cbar_cla arg:self arguments arg For Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_offset_transform",
    "source_code": "def get_offset_transform(self):\n    if self._offset_transform is None:\n        self._offset_transform = transforms.IdentityTransform()\n    elif not isinstance(self._offset_transform, transforms.Transform) and hasattr(self._offset_transform, '_as_mpl_transform'):\n        self._offset_transform = self._offset_transform._as_mpl_transform(self.axes)\n    return self._offset_transform",
    "docstring": "Return the instance used by this artist offset.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:get_offset_transform arg:self arguments arg If Compare Assign Call If BoolOp Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "UnauthenticatedError",
    "source_code": "@tf_export('errors.UnauthenticatedError')\nclass UnauthenticatedError(OpError):\n\n    def __init__(self, node_def, op, message, *args):\n        super(UnauthenticatedError, self).__init__(node_def, op, message, UNAUTHENTICATED, *args)",
    "docstring": "Raised when the request does not have valid authentication credentials. This exception is not currently used.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py",
    "ast_data": "ClassDef name:UnauthenticatedError FunctionDef name:__init__ arg:self arg:node_def arg:op arg:message arguments arg arg arg arg arg Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "_toctree_copy",
    "source_code": "def _toctree_copy(node: ET, depth: int, maxdepth: int, collapse: bool, tags: Tags) -> ET:\n    keep_bullet_list_sub_nodes = depth <= 1 or ((depth <= maxdepth or maxdepth <= 0) and (not collapse or 'iscurrent' in node))\n    copy = node.copy()\n    for subnode in node.children:\n        if isinstance(subnode, addnodes.compact_paragraph | nodes.list_item):\n            copy.append(_toctree_copy(subnode, depth, maxdepth, collapse, tags))\n        elif isinstance(subnode, nodes.bullet_list):\n            if keep_bullet_list_sub_nodes:\n                copy.append(_toctree_copy(subnode, depth + 1, maxdepth, collapse, tags))\n        elif isinstance(subnode, addnodes.toctree):\n            copy.append(subnode.copy())\n        elif isinstance(subnode, addnodes.only):\n            if _only_node_keep_children(subnode, tags):\n                for child in subnode.children:\n                    copy.append(_toctree_copy(child, depth, maxdepth, collapse, tags))\n        elif isinstance(subnode, nodes.reference | nodes.title):\n            sub_node_copy = subnode.copy()\n            sub_node_copy.children = [child.deepcopy() for child in subnode.children]\n            for child in sub_node_copy.children:\n                child.parent = sub_node_copy\n            copy.append(sub_node_copy)\n        else:\n            msg = f'Unexpected node type {subnode.__class__.__name__!r}!'\n            raise ValueError(msg)\n    return copy",
    "docstring": "Utility: Cut and deep-copy a TOC at a specified depth.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\environment\\adapters\\toctree.py",
    "ast_data": "FunctionDef name:_toctree_copy arg:node arg:depth arg:maxdepth arg:collapse arg:tags arguments arg arg arg arg arg Assign BoolOp Compare BoolOp BoolOp Compare Compare BoolOp Compare Assign Call For If Call Call Call If Call If Call Call If Call Call Call If Call If Call For Call Call If Call Assign Call Assign Call For Assign Call Assign Raise Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit_predict",
    "source_code": "def fit_predict(self, X, y=None):\n    return super().fit_predict(X, y)",
    "docstring": "Fit and return the result of each sample's clustering assignment. In addition to fitting, this method also return the result of the clustering assignment for each sample in the training set. Parameters ---------- X : array-like of shape (n_samples, n_features) or (n_samples, n_samples) Training instances to cluster, or distances between instances if ``. y : Ignored Not used, present here for API consistency by convention. Returns ------- labels : ndarray of shape (n_samples,) Cluster labels.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_agglomerative.py",
    "ast_data": "FunctionDef name:fit_predict arg:self arg:X arg:y arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "Shear",
    "source_code": "class Shear(Module):\n\n    def __init__(self, shear: Tensor, mode: str='bilinear', padding_mode: str='zeros', align_corners: bool=True) -> None:\n        super().__init__()\n        self.shear: Tensor = shear\n        self.mode: str = mode\n        self.padding_mode: str = padding_mode\n        self.align_corners: bool = align_corners\n\n    def forward(self, input: Tensor) -> Tensor:\n        return shear(input, self.shear, self.mode, self.padding_mode, self.align_corners)",
    "docstring": "Shear the tensor. Args: shear: tensor containing the angle to shear in the x and y direction. The tensor must have a shape of (B, 2), where B is batch size, last dimension contains shx shy. mode: interpolation mode to calculate output values ``. align_corners: interpolation flag. Returns: The skewed tensor with the same shape as the input. Example: >>> img = torch.rand(1, 3, 4, 4) >>> shear_factor = torch.tensor([[0.5, 0.0]]) >>> out = Shear(shear_factor)(img) >>> print(out.shape) torch.Size([1, 3, 4, 4])",
    "type": "class",
    "file_path": "kornia\\kornia\\geometry\\transform\\affwarp.py",
    "ast_data": "ClassDef name:Shear FunctionDef name:__init__ arg:self arg:shear arg:mode arg:padding_mode arg:align_corners arguments arg arg arg arg arg Call Call FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "IsCondMerge",
    "source_code": "def IsCondMerge(op):\n    if not IsMerge(op):\n        return False\n    if not op.inputs:\n        return False\n    is_cond_merge = True\n    for i in op.inputs:\n        ctxt = GetOutputContext(i.op)\n        is_cond_merge = is_cond_merge and ctxt is not None and ctxt.IsCondContext()\n    return is_cond_merge",
    "docstring": "Return true if is the Merge for a conditional.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_util.py",
    "ast_data": "FunctionDef name:IsCondMerge arg:op arguments arg If Call Return return:yes If Return return:yes Assign For Assign Call Assign BoolOp Compare Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "PandasExtensionDtype",
    "source_code": "class PandasExtensionDtype(ExtensionDtype):\n    type: Any\n    kind: Any\n    subdtype = None\n    str: str_type\n    num = 100\n    shape: tuple[int, ...] = ()\n    itemsize = 8\n    base: DtypeObj | None = None\n    isbuiltin = 0\n    isnative = 0\n    _cache_dtypes: dict[str_type, PandasExtensionDtype] = {}\n\n    def __repr__(self) -> str_type:\n        return str(self)\n\n    def __hash__(self) -> int:\n        raise NotImplementedError('sub-classes should implement an __hash__ method')\n\n    def __getstate__(self) -> dict[str_type, Any]:\n        return {k: getattr(self, k, None) for k in self._metadata}\n\n    @classmethod\n    def reset_cache(cls) -> None:\n        cls._cache_dtypes = {}",
    "docstring": "A np.dtype duck-typed class, suitable for holding a custom dtype. THIS IS NOT A REAL NUMPY DTYPE",
    "type": "class",
    "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py",
    "ast_data": "ClassDef name:PandasExtensionDtype Assign Assign Assign Assign Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call FunctionDef name:__hash__ arg:self arguments arg Raise Call FunctionDef name:__getstate__ arg:self arguments arg Return return:yes Call FunctionDef name:reset_cache arg:cls arguments arg Assign"
  },
  {
    "library": "pytorch",
    "name": "fetch_model",
    "source_code": "def fetch_model(model_path, device, sparse_dlrm=False):\n    if zipfile.is_zipfile(model_path):\n        with zipfile.ZipFile(model_path, 'r', zipfile.ZIP_DEFLATED) as zip_ref:\n            zip_ref.extractall(os.path.dirname(model_path))\n            unzip_path = model_path.replace('.zip', '.ckpt')\n    else:\n        unzip_path = model_path\n    model = get_dlrm_model(sparse_dlrm=sparse_dlrm)\n    model.load_state_dict(torch.load(unzip_path, map_location=device))\n    model = model.to(device)\n    model.eval()\n    if zipfile.is_zipfile(model_path):\n        os.remove(unzip_path)\n    return model",
    "docstring": "This function unzips the zipped model checkpoint (if zipped) and returns a model object Args: model_path (str) path pointing to the zipped/raw model checkpoint file that was dumped in evaluate disk savings device (torch.device) device to which model needs to be loaded to",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\data_sparsifier\\benchmarks\\dlrm_utils.py",
    "ast_data": "FunctionDef name:fetch_model arg:model_path arg:device arg:sparse_dlrm arguments arg arg arg If Call With Call Call Call Assign Call Assign Assign Call Call Call Assign Call Call If Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_cpu_count_user",
    "source_code": "def _cpu_count_user(os_cpu_count):\n    cpu_count_affinity = _cpu_count_affinity(os_cpu_count)\n    cpu_count_cgroup = _cpu_count_cgroup(os_cpu_count)\n    cpu_count_loky = int(os.environ.get('LOKY_MAX_CPU_COUNT', os_cpu_count))\n    return min(cpu_count_affinity, cpu_count_cgroup, cpu_count_loky)",
    "docstring": "Number of user defined available CPUs",
    "type": "function",
    "file_path": "scipy\\.spin\\cmds.py",
    "ast_data": "FunctionDef name:_cpu_count_user arg:os_cpu_count arguments arg Assign Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "Expression",
    "source_code": "@deconstructible\nclass Expression(BaseExpression, Combinable):\n\n    @classproperty\n    @functools.lru_cache(maxsize=128)\n    def _constructor_signature(cls):\n        return inspect.signature(cls.__init__)\n\n    @classmethod\n    def _identity(cls, value):\n        if isinstance(value, tuple):\n            return tuple(map(cls._identity, value))\n        if isinstance(value, dict):\n            return tuple(((key, cls._identity(val)) for key, val in value.items()))\n        if isinstance(value, fields.Field):\n            if value.name and value.model:\n                return (value.model._meta.label, value.name)\n            return type(value)\n        return make_hashable(value)\n\n    @cached_property\n    def identity(self):\n        args, kwargs = self._constructor_args\n        signature = self._constructor_signature.bind_partial(self, *args, **kwargs)\n        signature.apply_defaults()\n        arguments = iter(signature.arguments.items())\n        next(arguments)\n        identity = [self.__class__]\n        for arg, value in arguments:\n            value = self._identity(value)\n            identity.append((arg, value))\n        return tuple(identity)\n\n    def __eq__(self, other):\n        if not isinstance(other, Expression):\n            return NotImplemented\n        return other.identity == self.identity\n\n    def __hash__(self):\n        return hash(self.identity)",
    "docstring": "An expression that can be combined with other expressions.",
    "type": "class",
    "file_path": "django\\django\\db\\models\\expressions.py",
    "ast_data": "ClassDef name:Expression FunctionDef name:_constructor_signature arg:cls arguments arg Return return:yes Call Call FunctionDef name:_identity arg:cls arg:value arguments arg arg If Call Return return:yes Call Call If Call Return return:yes Call Call Call If Call If BoolOp Return return:yes Return return:yes Call Return return:yes Call FunctionDef name:identity arg:self arguments arg Assign Assign Call Call Assign Call Call Call Assign For Assign Call Call Return return:yes Call FunctionDef name:__eq__ arg:self arg:other arguments arg arg If Call Return return:yes Return return:yes Compare FunctionDef name:__hash__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, data, comment='', with_margin=False, parent=None):\n    super().__init__(parent)\n    self.data = copy.deepcopy(data)\n    self.widgets = []\n    self.formlayout = QtWidgets.QFormLayout(self)\n    if not with_margin:\n        self.formlayout.setContentsMargins(0, 0, 0, 0)\n    if comment:\n        self.formlayout.addRow(QtWidgets.QLabel(comment))\n        self.formlayout.addRow(QtWidgets.QLabel(' '))",
    "docstring": "Parameters ---------- data : list of (label, value) pairs The data to be edited in the form. comment : str, optional with_margin : bool, default: False If False, the form elements reach to the border of the widget. This is the desired behavior if the FormWidget is used as a widget alongside with other widgets such as a QComboBox, which also do not have a margin around them. However, a margin can be desired if the FormWidget is the only widget within a container, e.g. a tab in a QTabWidget. parent : QWidget or None The parent widget.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\qt_editor\\_formlayout.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:data arg:comment arg:with_margin arg:parent arguments arg arg arg arg arg Call Call Assign Call Assign Assign Call If Call If Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, scaffold=None, master='', config=None, max_wait_secs=30 * 60):\n    self._scaffold = scaffold or Scaffold()\n    self._session_manager = None\n    self._master = master\n    self._config = config\n    self._max_wait_secs = max_wait_secs",
    "docstring": "Initializes a worker session creator. Args: scaffold: A used for gathering or building supportive ops. If not specified a default one is created. It's used to finalize the graph. master: representation of the TensorFlow master to use. config: proto used to configure the session. max_wait_secs: Maximum time to wait for the session to become available.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\monitored_session.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:scaffold arg:master arg:config arg:max_wait_secs arguments arg arg arg arg arg Assign BoolOp Call Assign Assign Assign Assign"
  },
  {
    "library": "scrapy",
    "name": "_process_spidermw_output",
    "source_code": "@deferred_f_from_coro_f\nasync def _process_spidermw_output(self, output: Any, response: Response) -> None:\n    if isinstance(output, Request):\n        assert self.crawler.engine is not None\n        self.crawler.engine.crawl(request=output)\n        return\n    if output is not None:\n        await self.start_itemproc_async(output, response=response)",
    "docstring": "Process each Request/Item (given in the output parameter) returned from the given spider. Items are sent to the item pipelines, requests are scheduled.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\scraper.py",
    "ast_data": "AsyncFunctionDef name:_process_spidermw_output arg:self arg:output arg:response arguments arg arg arg If Call Compare Call Return return:no If Compare Call"
  },
  {
    "library": "authlib",
    "name": "create_temporary_credential",
    "source_code": "def create_temporary_credential(self, request):\n    raise NotImplementedError()",
    "docstring": "Generate and save a temporary credential into database or cache. A temporary credential is used for exchanging token credential. This method should be re-implemented:: def create_temporary_credential(self, request): oauth_token = generate_token(36) oauth_token_secret = generate_token(48) temporary_credential = TemporaryCredential( oauth_token=oauth_token, oauth_token_secret=oauth_token_secret, client_id=request.client_id, redirect_uri=request.redirect_uri, ) # if the credential has a save method temporary_credential.save() return temporary_credential :param request: OAuth1Request instance :return: TemporaryCredential instance",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth1\\rfc5849\\authorization_server.py",
    "ast_data": "FunctionDef name:create_temporary_credential arg:self arg:request arguments arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "__validate__",
    "source_code": "def __validate__(self):\n    pass",
    "docstring": "Perform post-construction validation.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type.py",
    "ast_data": "FunctionDef name:__validate__ arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "breakpoint",
    "source_code": "@staticmethod\ndef breakpoint():\n\n    def inner(inner_ctx):\n        ctx = inner_ctx.parent()\n        builtins.breakpoint()\n    comptime(inner)",
    "docstring": "Like pdb breakpoint(), but drop into pdb whenever this line of code is compiled by dynamo. Use it by putting this in your model code:: from torch._dynamo.comptime import comptime comptime.breakpoint() And then, inside pdb, you can access 'ctx' to query things about the compilation context:: (Pdb) !ctx.print_bt() (Pdb) !ctx.print_locals() (Pdb) p ctx.get_local(\"attention\").as_fake()",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\comptime.py",
    "ast_data": "FunctionDef name:breakpoint arguments FunctionDef name:inner arg:inner_ctx arguments arg Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "major_minor_change",
    "source_code": "def major_minor_change(old_version, new_version):\n    major_mismatch = old_version.major != new_version.major\n    minor_mismatch = old_version.minor != new_version.minor\n    if major_mismatch or minor_mismatch:\n        return True\n    return False",
    "docstring": "Check if a major or minor change occurred.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\ci_build\\update_version.py",
    "ast_data": "FunctionDef name:major_minor_change arg:old_version arg:new_version arguments arg arg Assign Compare Assign Compare If BoolOp Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, show_memory: bool=False) -> None:\n    self._show_memory = show_memory\n    self._events = []\n    self._metadata = []",
    "docstring": "Constructs a new Chrome Trace formatter.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:show_memory arguments arg arg Assign Assign Assign"
  },
  {
    "library": "seaborn",
    "name": "fig",
    "source_code": "@property\ndef fig(self):\n    return self._figure",
    "docstring": "DEPRECATED: prefer the property.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\axisgrid.py",
    "ast_data": "FunctionDef name:fig arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit_transform",
    "source_code": "def fit_transform(self, X, y=None, **fit_params):\n    if _routing_enabled():\n        transform_params = self.get_metadata_routing().consumes(method='transform', params=fit_params.keys())\n        if transform_params:\n            warnings.warn(f\"This object ({self.__class__.__name__}) has a `transform` method which consumes metadata, but `fit_transform` does not forward metadata to `transform`. Please implement a custom `fit_transform` method to forward metadata to `transform` as well. Alternatively, you can explicitly do `set_transform_request`and set all values to `False` to disable metadata routed to `transform`, if that's an option.\", UserWarning)\n    if y is None:\n        return self.fit(X, **fit_params).transform(X)\n    else:\n        return self.fit(X, y, **fit_params).transform(X)",
    "docstring": "Fit to data, then transform it. Fits transformer to and with optional parameters and returns a transformed version of . Parameters ---------- X : array-like of shape (n_samples, n_features) Input samples. y : array-like of shape (n_samples,) or (n_samples, n_outputs), default=None Target values (None for unsupervised transformations). **fit_params : dict Additional fit parameters. Returns ------- X_new : ndarray array of shape (n_samples, n_features_new) Transformed array.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\base.py",
    "ast_data": "FunctionDef name:fit_transform arg:self arg:X arg:y arguments arg arg arg arg If Call Assign Call Call Call If Call If Compare Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "@deprecation.deprecated(None, 'Queue-based input pipelines have been replaced by `tf.data`. Use `tf.data.Dataset.map(tf.read_file)`.')\ndef __init__(self, name=None):\n    rr = gen_io_ops.whole_file_reader_v2(name=name)\n    super(WholeFileReader, self).__init__(rr, supports_serialize=True)",
    "docstring": "Create a WholeFileReader. Args: name: A name for the operation (optional).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\io_ops.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:name arguments arg arg Assign Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "resample",
    "source_code": "@final\n@doc(klass=_shared_doc_kwargs['klass'])\ndef resample(self, rule, closed: Literal['right', 'left'] | None=None, label: Literal['right', 'left'] | None=None, convention: Literal['start', 'end', 's', 'e'] | lib.NoDefault=lib.no_default, on: Level | None=None, level: Level | None=None, origin: str | TimestampConvertibleTypes='start_day', offset: TimedeltaConvertibleTypes | None=None, group_keys: bool=False) -> Resampler:\n    from pandas.core.resample import get_resampler\n    if convention is not lib.no_default:\n        warnings.warn(f\"The 'convention' keyword in {type(self).__name__}.resample is deprecated and will be removed in a future version. Explicitly cast PeriodIndex to DatetimeIndex before resampling instead.\", FutureWarning, stacklevel=find_stack_level())\n    else:\n        convention = 'start'\n    return get_resampler(cast('Series | DataFrame', self), freq=rule, label=label, closed=closed, convention=convention, key=on, level=level, origin=origin, offset=offset, group_keys=group_keys)",
    "docstring": "Resample time-series data. Convenience method for frequency conversion and resampling of time series. The object must have a datetime-like index (, , or ), or the caller must pass the label of a datetime-like series/index to the `PeriodIndexruleleveloriginoriginoriginoriginoriginpandas 1.5.0 Release notes ~pandas.core.Resampleruser guide this link onleveloffsetstart_dayend_day` to take the ceiling midnight of the largest Timestamp as the end of the bins and drop the bins not containing data: >>> ts.resample(\"17min\", origin=\"end_day\").sum() 2000-10-01 23:38:00 3 2000-10-01 23:55:00 15 2000-10-02 00:12:00 45 2000-10-02 00:29:00 45 Freq: 17min, dtype: int64",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:resample arg:self arg:rule arg:closed arg:label arg:convention arg:on arg:level arg:origin arg:offset arg:group_keys arguments arg arg arg arg arg arg arg arg arg arg If Compare Call Call Call Assign Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_default_build_root",
    "source_code": "def get_default_build_root() -> str:\n    return os.path.realpath(torch._appdirs.user_cache_dir(appname='torch_extensions'))",
    "docstring": "Return the path to the root folder under which extensions will built. For each extension module built, there will be one folder underneath the folder returned by this function. For example, if ``. This directory is **user-specific** so that multiple users on the same machine won't meet permission issues.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\cpp_extension.py",
    "ast_data": "FunctionDef name:get_default_build_root arguments Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "regression_signature_def",
    "source_code": "@tf_export(v1=['saved_model.regression_signature_def', 'saved_model.signature_def_utils.regression_signature_def'])\n@deprecation.deprecated_endpoints('saved_model.signature_def_utils.regression_signature_def')\ndef regression_signature_def(examples, predictions):\n    if examples is None:\n        raise ValueError('Regression `examples` cannot be None.')\n    if not isinstance(examples, tensor_lib.Tensor):\n        raise ValueError(f'Expected regression `examples` to be of type Tensor. Found `examples` of type {type(examples)}.')\n    if predictions is None:\n        raise ValueError('Regression `predictions` cannot be None.')\n    input_tensor_info = utils.build_tensor_info(examples)\n    if input_tensor_info.dtype != types_pb2.DT_STRING:\n        raise ValueError(f'Regression input tensors must be of type string. Found tensors with type {input_tensor_info.dtype}.')\n    signature_inputs = {signature_constants.REGRESS_INPUTS: input_tensor_info}\n    output_tensor_info = utils.build_tensor_info(predictions)\n    if output_tensor_info.dtype != types_pb2.DT_FLOAT:\n        raise ValueError(f'Regression output tensors must be of type float. Found tensors with type {output_tensor_info.dtype}.')\n    signature_outputs = {signature_constants.REGRESS_OUTPUTS: output_tensor_info}\n    signature_def = build_signature_def(signature_inputs, signature_outputs, signature_constants.REGRESS_METHOD_NAME)\n    return signature_def",
    "docstring": "Creates regression signature from given examples and predictions. This function produces signatures intended for use with the TensorFlow Serving Regress API (tensorflow_serving/apis/prediction_service.proto), and so constrains the input and output types to those allowed by TensorFlow Serving. Args: examples: A string , expected to accept serialized tf.Examples. predictions: A float . Returns: A regression-flavored signature_def. Raises: ValueError: If examples is .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\signature_def_utils_impl.py",
    "ast_data": "FunctionDef name:regression_signature_def arg:examples arg:predictions arguments arg arg If Compare Raise Call If Call Raise Call Call If Compare Raise Call Assign Call If Compare Raise Call Assign Assign Call If Compare Raise Call Assign Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "is_self_or_submodule",
    "source_code": "def is_self_or_submodule(name_descendant: str, name_ancestor: str) -> bool:\n    return name_descendant == name_ancestor or name_ancestor + '.' in name_descendant",
    "docstring": "check if name_descendant is a submodule of name_ancestor, or if they are the same",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_tools\\ilp_utils.py",
    "ast_data": "FunctionDef name:is_self_or_submodule arg:name_descendant arg:name_ancestor arguments arg arg Return return:yes BoolOp Compare Compare"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, session_creator, hooks, should_recover, stop_grace_period_secs=120):\n    self._graph_was_finalized = ops.get_default_graph().finalized\n    self._hooks = hooks or []\n    for h in self._hooks:\n        h.begin()\n    worker_context = distribute_coordinator_context.get_current_worker_context()\n    if not session_creator and worker_context:\n        session_creator = worker_context.session_creator()\n    self._coordinated_creator = self._CoordinatedSessionCreator(session_creator=session_creator or ChiefSessionCreator(), hooks=self._hooks, stop_grace_period_secs=stop_grace_period_secs)\n    if should_recover:\n        self._sess = _RecoverableSession(self._coordinated_creator)\n    else:\n        self._sess = self._coordinated_creator.create_session()",
    "docstring": "Sets up a Monitored or Hooked Session. Args: session_creator: A factory object to create session. Typically a or a . hooks: An iterable of AbortedErrorUnavailableErrorclose()` has been called.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\monitored_session.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:session_creator arg:hooks arg:should_recover arg:stop_grace_period_secs arguments arg arg arg arg arg Assign Call Assign BoolOp For Call Assign Call If BoolOp Assign Call Assign Call BoolOp Call If Assign Call Assign Call"
  },
  {
    "library": "scipy",
    "name": "scoreatpercentile",
    "source_code": "def scoreatpercentile(data, per, limit=(), alphap=0.4, betap=0.4):\n    if per < 0 or per > 100.0:\n        raise ValueError(f'The percentile should be between 0. and 100. ! (got {per})')\n    return mquantiles(data, prob=[per / 100.0], alphap=alphap, betap=betap, limit=limit, axis=0).squeeze()",
    "docstring": "Calculate the score at the given 'per' percentile of the sequence a. For example, the score at per=50 is the median. This function is a shortcut to mquantile",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mstats_basic.py",
    "ast_data": "FunctionDef name:scoreatpercentile arg:data arg:per arg:limit arg:alphap arg:betap arguments arg arg arg arg arg If BoolOp Compare Compare Raise Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_gather_implementation",
    "source_code": "def _gather_implementation(self, per_replica_value, destinations, axis, options):\n    raise NotImplementedError('_gather method must be implemented in descendants.')",
    "docstring": "Implementation of method of . Overriding this method is useful for subclass implementers. Args: per_replica_value: a , or a like object. destinations: a , a , a alike object, or a device string. It specifies the devices to gather to. To perform an all-gather, pass the same to and . Note that if it's a , the value is gathered to the devices of that variable, this method doesn't update the variable. axis: specifies the dimension to gather along within each replica's tensor. options: a . See for details. Returns: A or . Raises: ValueError: if per_replica_value can't be converted to a or if destinations is not a string, or .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_ops.py",
    "ast_data": "FunctionDef name:_gather_implementation arg:self arg:per_replica_value arg:destinations arg:axis arg:options arguments arg arg arg arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "WeightedRandomSampler",
    "source_code": "class WeightedRandomSampler(Sampler[int]):\n    weights: torch.Tensor\n    num_samples: int\n    replacement: bool\n\n    def __init__(self, weights: Sequence[float], num_samples: int, replacement: bool=True, generator=None) -> None:\n        if not isinstance(num_samples, int) or isinstance(num_samples, bool) or num_samples <= 0:\n            raise ValueError(f'num_samples should be a positive integer value, but got num_samples={num_samples}')\n        if not isinstance(replacement, bool):\n            raise ValueError(f'replacement should be a boolean value, but got replacement={replacement}')\n        weights_tensor = torch.as_tensor(weights, dtype=torch.double)\n        if len(weights_tensor.shape) != 1:\n            raise ValueError(f'weights should be a 1d sequence but given weights have shape {tuple(weights_tensor.shape)}')\n        self.weights = weights_tensor\n        self.num_samples = num_samples\n        self.replacement = replacement\n        self.generator = generator\n\n    def __iter__(self) -> Iterator[int]:\n        rand_tensor = torch.multinomial(self.weights, self.num_samples, self.replacement, generator=self.generator)\n        yield from iter(rand_tensor.tolist())\n\n    def __len__(self) -> int:\n        return self.num_samples",
    "docstring": "Samples elements from ``, samples are drawn with replacement. If not, they are drawn without replacement, which means that when a sample index is drawn for a row, it cannot be drawn again for that row. generator (Generator): Generator used in sampling. Example: >>> # xdoctest: +IGNORE_WANT(\"non-deterministic\") >>> list(WeightedRandomSampler([0.1, 0.9, 0.4, 0.7, 3.0, 0.6], 5, replacement=True)) [4, 4, 1, 4, 5] >>> list(WeightedRandomSampler([0.9, 0.4, 0.05, 0.2, 0.3, 0.1], 5, replacement=False)) [0, 1, 4, 3, 2]",
    "type": "class",
    "file_path": "pytorch\\torch\\utils\\data\\sampler.py",
    "ast_data": "ClassDef name:WeightedRandomSampler FunctionDef name:__init__ arg:self arg:weights arg:num_samples arg:replacement arg:generator arguments arg arg arg arg arg If BoolOp Call Call Compare Raise Call If Call Raise Call Assign Call If Compare Call Raise Call Call Assign Assign Assign Assign FunctionDef name:__iter__ arg:self arguments arg Assign Call Call Call FunctionDef name:__len__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_blit",
    "source_code": "def _blit(argsid):\n    photoimage, data, offsets, bbox, comp_rule = _blit_args.pop(argsid)\n    if not photoimage.tk.call('info', 'commands', photoimage):\n        return\n    _tkagg.blit(photoimage.tk.interpaddr(), str(photoimage), data, comp_rule, offsets, bbox)",
    "docstring": "Thin wrapper to blit called via tkapp.call. *argsid* is a unique string identifier to fetch the correct arguments from the `` dict, since arguments cannot be passed directly.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\_backend_tk.py",
    "ast_data": "FunctionDef name:_blit arg:argsid arguments arg Assign Call If Call Return return:no Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_init",
    "source_code": "def _init():\n    global _lib\n    if _lib is not None or not is_built():\n        return\n    from torch._decomp.decompositions import native_group_norm_backward\n    from torch._refs import native_group_norm\n    _lib = _Library('aten', 'IMPL')\n    _lib.impl('native_group_norm', native_group_norm, 'MPS')\n    _lib.impl('native_group_norm_backward', native_group_norm_backward, 'MPS')",
    "docstring": "Register prims as implementation of var_mean and group_norm.",
    "type": "function",
    "file_path": "pytorch\\torch\\backends\\mps\\__init__.py",
    "ast_data": "FunctionDef name:_init arguments If BoolOp Compare Call Return return:no Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_register",
    "source_code": "def _register(self, target: TorchOp, onnx_decomposition: OnnxDecompMeta) -> None:\n    target_or_name: str | TorchOp\n    if isinstance(target, torch._ops.OpOverload):\n        target_or_name = target.name()\n    else:\n        target_or_name = target\n    if onnx_decomposition.is_custom:\n        self.functions.setdefault(target_or_name, []).insert(0, onnx_decomposition)\n    else:\n        self.functions.setdefault(target_or_name, []).append(onnx_decomposition)",
    "docstring": "Registers a OnnxDecompMeta to an operator. Args: target: The PyTorch node callable target. onnx_decomposition: The OnnxDecompMeta to register.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_registration.py",
    "ast_data": "FunctionDef name:_register arg:self arg:target arg:onnx_decomposition arguments arg arg arg If Call Assign Call Assign If Call Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "inverse_transform",
    "source_code": "def inverse_transform(self, X):\n    if issparse(X):\n        X = X.tocsc()\n        it = self.inverse_transform(np.diff(X.indptr).reshape(1, -1))\n        col_nonzeros = it.ravel()\n        indptr = np.concatenate([[0], np.cumsum(col_nonzeros)])\n        Xt = csc_matrix((X.data, X.indices, indptr), shape=(X.shape[0], len(indptr) - 1), dtype=X.dtype)\n        return Xt\n    support = self.get_support()\n    X = check_array(X, dtype=None)\n    if support.sum() != X.shape[1]:\n        raise ValueError('X has a different shape than during fitting.')\n    if X.ndim == 1:\n        X = X[None, :]\n    Xt = np.zeros((X.shape[0], support.size), dtype=X.dtype)\n    Xt[:, support] = X\n    return Xt",
    "docstring": "Reverse the transformation operation. Parameters ---------- X : array of shape [n_samples, n_selected_features] The input samples. Returns ------- X_original : array of shape [n_samples, n_original_features] with columns of zeros inserted where features would have been removed by :meth:.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_selection\\_base.py",
    "ast_data": "FunctionDef name:inverse_transform arg:self arg:X arguments arg arg If Call Assign Call Assign Call Call Call Assign Call Assign Call Call Assign Call Call Return return:yes Assign Call Assign Call If Compare Call Raise Call If Compare Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "check_get_feature_names_out_error",
    "source_code": "@ignore_warnings(category=FutureWarning)\ndef check_get_feature_names_out_error(name, estimator_orig):\n    estimator = clone(estimator_orig)\n    err_msg = f'Estimator {name} should have raised a NotFitted error when fit is called before get_feature_names_out'\n    with raises(NotFittedError, err_msg=err_msg):\n        estimator.get_feature_names_out()",
    "docstring": "Check the error raised by get_feature_names_out when called before fit. Unfitted estimators with get_feature_names_out should raise a NotFittedError.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\estimator_checks.py",
    "ast_data": "FunctionDef name:check_get_feature_names_out_error arg:name arg:estimator_orig arguments arg arg Assign Call Assign With Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_lazy_load_hook",
    "source_code": "def _lazy_load_hook(self: _LazyProtocol, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):\n    for name, param in itertools.chain(self._parameters.items(), self._buffers.items()):\n        key = prefix + name\n        if key in state_dict and param is not None:\n            input_param = state_dict[key]\n            if is_lazy(param):\n                if not is_lazy(input_param):\n                    with torch.no_grad():\n                        param.materialize(input_param.shape)",
    "docstring": "load_state_dict pre-hook function for lazy buffers and parameters. The purpose of this hook is to adjust the current state and/or `` for the details of the hook specification.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\lazy.py",
    "ast_data": "FunctionDef name:_lazy_load_hook arg:self arg:state_dict arg:prefix arg:local_metadata arg:strict arg:missing_keys arg:unexpected_keys arg:error_msgs arguments arg arg arg arg arg arg arg arg For Call Call Call Assign If BoolOp Compare Compare Assign If Call If Call With Call Call"
  },
  {
    "library": "authlib",
    "name": "create_authorization_response",
    "source_code": "def create_authorization_response(self, request, grant_user=None):\n    request = self.create_oauth1_request(request)\n    self.validate_authorization_request(request)\n    temporary_credentials = request.credential\n    redirect_uri = temporary_credentials.get_redirect_uri()\n    if not redirect_uri or redirect_uri == 'oob':\n        client_id = temporary_credentials.get_client_id()\n        client = self.get_client_by_id(client_id)\n        redirect_uri = client.get_default_redirect_uri()\n    if grant_user is None:\n        error = AccessDeniedError()\n        location = add_params_to_uri(redirect_uri, error.get_body())\n        return self.handle_response(302, '', [('Location', location)])\n    request.user = grant_user\n    verifier = self.create_authorization_verifier(request)\n    params = [('oauth_token', request.token), ('oauth_verifier', verifier)]\n    location = add_params_to_uri(redirect_uri, params)\n    return self.handle_response(302, '', [('Location', location)])",
    "docstring": "Validate authorization request and create authorization response. Assume the endpoint for authorization request is `` the client redirects Jane's user-agent to the server's Resource Owner Authorization endpoint to obtain Jane's approval for accessing her private photos:: The server requests Jane to sign in using her username and password and if successful, asks her to approve granting 'printer.example.com' access to her private photos. Jane approves the request and her user-agent is redirected to the callback URI provided by the client in the previous request (line breaks are for display purposes only):: oauth_token=hh5s93j4hdidpola&oauth_verifier=hfdp7dh39dks9884 :param request: OAuth1Request instance. :param grant_user: if granted, pass the grant user, otherwise None. :returns: (status_code, body, headers)",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth1\\rfc5849\\authorization_server.py",
    "ast_data": "FunctionDef name:create_authorization_response arg:self arg:request arg:grant_user arguments arg arg arg Assign Call Call Assign Assign Call If BoolOp Compare Assign Call Assign Call Assign Call If Compare Assign Call Assign Call Call Return return:yes Call Assign Assign Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "meta",
    "source_code": "@property\ndef meta(self):\n    return getattr(self.queryables.get(self.lhs), 'meta', None)",
    "docstring": "the meta of my field",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\computation\\pytables.py",
    "ast_data": "FunctionDef name:meta arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_build_distributed_table",
    "source_code": "def _maybe_build_distributed_table(self):\n    with self._distributed_table_creation_lock:\n        if not self._distributed_table:\n\n            def create_copy():\n                new_table = self._wrapped_creator()\n                ret = new_table.resource_handle\n                return ret\n            self._distributed_table = self._coordinator._create_per_worker_resources(create_copy)",
    "docstring": "Create table objects and resources on each worker if hasn't been created.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\ps_values.py",
    "ast_data": "FunctionDef name:_maybe_build_distributed_table arg:self arguments arg With If FunctionDef name:create_copy arguments Assign Call Assign Return return:yes Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "get_all_registered_kernels",
    "source_code": "def get_all_registered_kernels():\n    buf = c_api.TF_GetAllRegisteredKernels()\n    data = c_api.TF_GetBuffer(buf)\n    kernel_list = kernel_def_pb2.KernelList()\n    kernel_list.ParseFromString(compat.as_bytes(data))\n    return kernel_list",
    "docstring": "Returns a KernelList proto of all registered kernels.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\kernels.py",
    "ast_data": "FunctionDef name:get_all_registered_kernels arguments Assign Call Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_argcheck",
    "source_code": "def _argcheck(self, *args):\n    cond = 1\n    for arg in args:\n        cond = logical_and(cond, asarray(arg) > 0)\n    return cond",
    "docstring": "Default check for correct values on args and keywords. Returns condition array of 1's where arguments are correct and 0's where they are not.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:_argcheck arg:self arguments arg arg Assign For Assign Call Compare Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_gotitem",
    "source_code": "def _gotitem(self, key, ndim: int, subset=None):\n    if ndim == 2:\n        if subset is None:\n            subset = self.obj\n        return DataFrameGroupBy(subset, self.keys, level=self.level, grouper=self._grouper, exclusions=self.exclusions, selection=key, as_index=self.as_index, sort=self.sort, group_keys=self.group_keys, observed=self.observed, dropna=self.dropna)\n    elif ndim == 1:\n        if subset is None:\n            subset = self.obj[key]\n        return SeriesGroupBy(subset, self.keys, level=self.level, grouper=self._grouper, exclusions=self.exclusions, selection=key, as_index=self.as_index, sort=self.sort, group_keys=self.group_keys, observed=self.observed, dropna=self.dropna)\n    raise AssertionError('invalid ndim for _gotitem')",
    "docstring": "sub-classes to define return a sliced object Parameters ---------- key : string / list of selections ndim : {1, 2} requested ndim of result subset : object, default None subset to act on",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\groupby\\generic.py",
    "ast_data": "FunctionDef name:_gotitem arg:self arg:key arg:ndim arg:subset arguments arg arg arg arg If Compare If Compare Assign Return return:yes Call If Compare If Compare Assign Return return:yes Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_swap_axis",
    "source_code": "def _swap_axis(input_tensor, dim_index, last_index, name=None):\n    return array_ops.transpose(input_tensor, array_ops.concat([math_ops.range(dim_index), [last_index], math_ops.range(dim_index + 1, last_index), [dim_index]], 0), name=name)",
    "docstring": "Swaps logits's dim_index and last_index.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py",
    "ast_data": "FunctionDef name:_swap_axis arg:input_tensor arg:dim_index arg:last_index arg:name arguments arg arg arg arg Return return:yes Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "stop",
    "source_code": "def stop():\n    torch._C._mps_profilerStopTrace()",
    "docstring": "Stops generating OS Signpost tracing from MPS backend.",
    "type": "function",
    "file_path": "pytorch\\torch\\mps\\profiler.py",
    "ast_data": "FunctionDef name:stop arguments Call"
  },
  {
    "library": "tensorflow",
    "name": "_predict_step_fn",
    "source_code": "def _predict_step_fn(inputs):\n    distribute_lib.get_replica_context().merge_call(_build_model, args=(model, mode, inputs))\n    _, outputs, updates, _ = _per_replica_execution_function(dist_utils.get_distributed_model(model, mode), mode)\n    with ops.control_dependencies([updates]):\n        return [array_ops.identity(out) for out in outputs]",
    "docstring": "A fn that returns output of single prediction step.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_distributed_v1.py",
    "ast_data": "FunctionDef name:_predict_step_fn arg:inputs arguments arg Call Call Assign Call Call With Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "scatter_nd_update",
    "source_code": "def scatter_nd_update(self, indices, updates, name=None):\n    return gen_state_ops.scatter_nd_update(self._variable, indices, updates, use_locking=True, name=name)",
    "docstring": "Applies sparse assignment to individual values or slices in a Variable. is a with rank and is a of rank . must be integer tensor, containing indices into . It must be shape where . The innermost dimension of (with length ) corresponds to indices into elements (if ) or slices (if ) along the th dimension of . is of rank with shape: For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 elements. In Python, that update would look like this: The resulting update to ref would look like this: [1, 11, 3, 10, 9, 6, 7, 12] See for more details about how to make updates to slices. Args: indices: The indices to be used in the operation. updates: The values to be used in the operation. name: the name of the operation. Returns: A that will hold the new value of this variable after the scattered assignment has completed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py",
    "ast_data": "FunctionDef name:scatter_nd_update arg:self arg:indices arg:updates arg:name arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "RTDETRConfig",
    "source_code": "@dataclass\nclass RTDETRConfig:\n    model_type: RTDETRModelType | str | int\n    num_classes: int\n    input_size: int = 640\n    checkpoint: Optional[str] = None\n    neck_hidden_dim: Optional[int] = None\n    neck_dim_feedforward: Optional[int] = None\n    neck_expansion: Optional[float] = None\n    head_hidden_dim: int = 256\n    head_num_queries: int = 300\n    head_num_decoder_layers: Optional[int] = None\n    confidence_threshold: float = 0.3\n\n    @staticmethod\n    def from_name(model_name: str, num_classes: int=80) -> RTDETRConfig:\n        if model_name == 'rtdetr_r18vd':\n            config = RTDETRConfig(RTDETRModelType.resnet18d, num_classes, input_size=640)\n        elif model_name == 'rtdetr_r34vd':\n            config = RTDETRConfig(RTDETRModelType.resnet34d, num_classes, input_size=640)\n        elif model_name == 'rtdetr_r50vd_m':\n            config = RTDETRConfig(RTDETRModelType.resnet50d_m, num_classes, input_size=640)\n        elif model_name == 'rtdetr_r50vd':\n            config = RTDETRConfig(RTDETRModelType.resnet50d, num_classes, input_size=640)\n        elif model_name == 'rtdetr_r101vd':\n            config = RTDETRConfig(RTDETRModelType.resnet101d, num_classes, input_size=640)\n        else:\n            raise ValueError\n        return config",
    "docstring": "Configuration to construct RT-DETR model. Args: model_type: model variant. Available models are - ResNetD-18: `RTDETRModelType.resnet18dRTDETRModelType.resnet34dRTDETRModelType.resnet50dRTDETRModelType.resnet101dRTDETRModelType.hgnetv2_lRTDETRModelType.hgnetv2_x` num_classes: number of classes. checkpoint: URL or local path of model weights. neck_hidden_dim: hidden dim for neck. neck_dim_feedforward: feed-forward network dim for neck. neck_expansion: expansion ratio for neck. head_hidden_dim: hidden dim for head. head_num_queries: number of queries for Deformable DETR transformer decoder. head_num_decoder_layers: number of decoder layers for Deformable DETR transformer decoder.",
    "type": "class",
    "file_path": "kornia\\kornia\\contrib\\models\\rt_detr\\model.py",
    "ast_data": "ClassDef name:RTDETRConfig FunctionDef name:from_name arg:model_name arg:num_classes arguments arg arg If Compare Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call Raise Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "hstack",
    "source_code": "def hstack(self, Xs):\n    pass",
    "docstring": "Stack containers horizontally (column-wise). Parameters ---------- Xs : list of containers List of containers to stack. Returns ------- stacked_Xs : container Stacked containers.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_set_output.py",
    "ast_data": "FunctionDef name:hstack arg:self arg:Xs arguments arg arg"
  },
  {
    "library": "pytorch",
    "name": "call_tracker",
    "source_code": "@torch.jit.unused\ndef call_tracker(self):\n    pass",
    "docstring": "Interface for tracking iteration process in Python mode. Tracking the iteration process is disabled in TorchScript mode. In fact, one should specify tracker=None when JIT compiling functions using lobpcg.",
    "type": "method",
    "file_path": "pytorch\\torch\\_lobpcg.py",
    "ast_data": "FunctionDef name:call_tracker arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "_tf_not",
    "source_code": "def _tf_not(a):\n    return gen_math_ops.logical_not(a)",
    "docstring": "Implementation of the \"not_\" operator for TensorFlow.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\logical.py",
    "ast_data": "FunctionDef name:_tf_not arg:a arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_wrap_call_and_conditional_losses",
    "source_code": "def _wrap_call_and_conditional_losses(layer):\n    layer_call = _get_layer_call_method(layer)\n\n    def call_and_return_conditional_losses(*args, **kwargs):\n        call_output = layer_call(*args, **kwargs)\n        if version_utils.is_v1_layer_or_model(layer):\n            conditional_losses = layer.get_losses_for(_filtered_inputs([args, kwargs]))\n        else:\n            conditional_losses = [l for l in layer.losses if not hasattr(l, '_unconditional_loss')]\n        return (call_output, conditional_losses)\n    return _create_call_fn_decorator(layer, call_and_return_conditional_losses)",
    "docstring": "Wraps call function that returns a tuple of (outputs, losses). The losses returned are conditional on the inputs passed to the call function. Unconditional losses (e.g. weight regularizeration) are wrapped separately. Args: layer: a Keras layer object Returns: python call function that returns outputs and conditional losses -- excludes activity regularizer",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\save_impl.py",
    "ast_data": "FunctionDef name:_wrap_call_and_conditional_losses arg:layer arguments arg Assign Call FunctionDef name:call_and_return_conditional_losses arguments arg arg Assign Call If Call Assign Call Call Assign Call Return return:yes Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_label_props",
    "source_code": "def set_label_props(self, props):\n    _api.check_isinstance(dict, props=props)\n    props = _expand_text_props(props)\n    for text, prop in zip(self.labels, props):\n        text.update(prop)",
    "docstring": "Set properties of the labels. .. versionadded:: 3.7 Parameters ---------- props : dict Dictionary of properties to be used for the labels.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:set_label_props arg:self arg:props arguments arg arg Call Assign Call For Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_padding_value_to_tensor",
    "source_code": "def _padding_value_to_tensor(value, output_type):\n    value = ops.convert_to_tensor(value, name='padding_value')\n    if not value.shape.is_compatible_with(tensor_shape.TensorShape([])):\n        raise ValueError(f'Invalid `padding_values`. `padding_values` values should be scalars, but got {value.shape}.')\n    if value.dtype != output_type:\n        raise TypeError(f'Invalid `padding_values`. `padding_values` values type {value.dtype} does not match type {output_type} of the corresponding input component.')\n    return value",
    "docstring": "Converts the padding value to a tensor. Args: value: The padding value. output_type: Its expected dtype. Returns: A scalar . Raises: ValueError: if the padding value is not a scalar. TypeError: if the padding value's type does not match .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\padded_batch_op.py",
    "ast_data": "FunctionDef name:_padding_value_to_tensor arg:value arg:output_type arguments arg arg Assign Call If Call Call Raise Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, node_def, op, message, *args):\n    super(AlreadyExistsError, self).__init__(node_def, op, message, ALREADY_EXISTS, *args)",
    "docstring": "Creates an .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:node_def arg:op arg:message arguments arg arg arg arg arg Call Call"
  },
  {
    "library": "django",
    "name": "W",
    "source_code": "def W(self):\n    return self.data.isocalendar().week",
    "docstring": "ISO-8601 week number of year, weeks starting on Monday",
    "type": "method",
    "file_path": "django\\django\\utils\\dateformat.py",
    "ast_data": "FunctionDef name:W arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "supports_serialize",
    "source_code": "@property\ndef supports_serialize(self):\n    return self._supports_serialize",
    "docstring": "Whether the Reader implementation can serialize its state.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\io_ops.py",
    "ast_data": "FunctionDef name:supports_serialize arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "is_unique",
    "source_code": "@property\ndef is_unique(self) -> bool:\n    return self.nunique(dropna=False) == len(self)",
    "docstring": "Return True if values in the object are unique. Returns ------- bool See Also -------- Series.unique : Return unique values of Series object. Series.drop_duplicates : Return Series with duplicate values removed. Series.duplicated : Indicate duplicate Series values. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.is_unique True >>> s = pd.Series([1, 2, 3, 1]) >>> s.is_unique False",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\base.py",
    "ast_data": "FunctionDef name:is_unique arg:self arguments arg Return return:yes Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "is_general_tensor_value_op",
    "source_code": "def is_general_tensor_value_op(self) -> bool:\n    return False",
    "docstring": "Returns True if the operator works for both floating point and quantized input, and does some computation based on the input Tensor, or the ops that only re-arranges the Tensor values or query some metadata about the Tensor so we need to insert observer/fake_quant for the output of the operator (same observer instance as input) since the distribution of values is different for input and output Tensors (for HistogramObserver) while they share the same quantization parameters Example operator: avgpool2d, reshape, transpose, maxpool2d Example observed operator: observer_0 - avgpool2d - observer_0 (same observer instance as input)",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\quantize_handler.py",
    "ast_data": "FunctionDef name:is_general_tensor_value_op arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "quantize_per_tensor_tensor2",
    "source_code": "@impl(quantized_decomposed_lib, 'quantize_per_tensor.tensor2', 'CompositeExplicitAutograd')\ndef quantize_per_tensor_tensor2(input: torch.Tensor, scale: torch.Tensor, zero_point: torch.Tensor, quant_min: torch.Tensor, quant_max: torch.Tensor, dtype: torch.dtype) -> torch.Tensor:\n    assert zero_point.numel() == 1, f'Expecting zero_point tensor to be one element, but received : {zero_point.numel()}'\n    assert scale.numel() == 1, f'Expecting scale tensor to be one element, but received : {scale.numel()}'\n    return quantize_per_tensor(input, scale.item(), zero_point.item(), quant_min.item(), quant_max.item(), dtype)",
    "docstring": "Affine quantization for the Tensor using the same quantization parameters to map from floating point to quantized values Same as but scale and zero_point are Scalar Tensor instead of scalar values",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_decomposed.py",
    "ast_data": "FunctionDef name:quantize_per_tensor_tensor2 arg:input arg:scale arg:zero_point arg:quant_min arg:quant_max arg:dtype arguments arg arg arg arg arg arg Compare Call Call Compare Call Call Return return:yes Call Call Call Call Call Call"
  },
  {
    "library": "scrapy",
    "name": "crawl",
    "source_code": "def crawl(self, request: Request) -> None:\n    if self.spider is None:\n        raise RuntimeError(f'No open spider to crawl: {request}')\n    self._schedule_request(request)\n    self._slot.nextcall.schedule()",
    "docstring": "Inject the request into the spider downloader pipeline",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\engine.py",
    "ast_data": "FunctionDef name:crawl arg:self arg:request arguments arg arg If Compare Raise Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_clone_dataset",
    "source_code": "def _clone_dataset(dataset):\n    variant_tensor_ops = traverse.obtain_all_variant_tensor_ops(dataset)\n    remap_dict = _clone_helper(dataset._variant_tensor.op, variant_tensor_ops)\n    new_variant_tensor = remap_dict[dataset._variant_tensor.op].outputs[0]\n    return dataset_ops._VariantDataset(new_variant_tensor, dataset.element_spec)",
    "docstring": "Returns a cloned version of .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_ops.py",
    "ast_data": "FunctionDef name:_clone_dataset arg:dataset arguments arg Assign Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "locate_label",
    "source_code": "def locate_label(self, linecontour, labelwidth):\n    ctr_size = len(linecontour)\n    n_blocks = int(np.ceil(ctr_size / labelwidth)) if labelwidth > 1 else 1\n    block_size = ctr_size if n_blocks == 1 else int(labelwidth)\n    xx = np.resize(linecontour[:, 0], (n_blocks, block_size))\n    yy = np.resize(linecontour[:, 1], (n_blocks, block_size))\n    yfirst = yy[:, :1]\n    ylast = yy[:, -1:]\n    xfirst = xx[:, :1]\n    xlast = xx[:, -1:]\n    s = (yfirst - yy) * (xlast - xfirst) - (xfirst - xx) * (ylast - yfirst)\n    l = np.hypot(xlast - xfirst, ylast - yfirst)\n    with np.errstate(divide='ignore', invalid='ignore'):\n        distances = (abs(s) / l).sum(axis=-1)\n    hbsize = block_size // 2\n    adist = np.argsort(distances)\n    for idx in np.append(adist, adist[0]):\n        x, y = (xx[idx, hbsize], yy[idx, hbsize])\n        if not self.too_close(x, y, labelwidth):\n            break\n    return (x, y, (idx * block_size + hbsize) % ctr_size)",
    "docstring": "Find good place to draw a label (relatively flat part of the contour).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\contour.py",
    "ast_data": "FunctionDef name:locate_label arg:self arg:linecontour arg:labelwidth arguments arg arg arg Assign Call Assign Compare Call Call Assign Compare Call Assign Call Assign Call Assign Assign Assign Assign Assign Assign Call With Call Assign Call Call Assign Assign Call For Call Assign If Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "EdgeWidth",
    "source_code": "class EdgeWidth(IntervalProperty):\n\n    @property\n    def default_range(self) -> tuple[float, float]:\n        base = mpl.rcParams['patch.linewidth']\n        return (base * 0.5, base * 2)",
    "docstring": "Thickness of the edges on a patch mark, in points.",
    "type": "class",
    "file_path": "seaborn\\seaborn\\_core\\properties.py",
    "ast_data": "ClassDef name:EdgeWidth FunctionDef name:default_range arg:self arguments arg Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "max_in_list_size",
    "source_code": "def max_in_list_size(self):\n    return None",
    "docstring": "Return the maximum number of items that can be passed in a single 'IN' list condition, or None if the backend does not impose a limit.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:max_in_list_size arg:self arguments arg Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "create_slot_variable_position",
    "source_code": "def create_slot_variable_position(self, optimizer_object: Any, variable: base.Trackable, slot_variable_id: str, slot_name: str, reshard_callback: Optional[checkpoint_adapter.ReshardCallback]=None):\n    slot_variable_position = CheckpointPosition(checkpoint=self.checkpoint, proto_id=slot_variable_id)\n    if reshard_callback is not None:\n        slot_variable_position.update_resharding_callback(reshard_callback)\n        slot_variable = optimizer_object._create_or_restore_slot_variable(slot_variable_position=slot_variable_position, variable=variable, slot_name=slot_name, slot_variable_shape=variable.shape)\n    else:\n        slot_variable = optimizer_object._create_or_restore_slot_variable(slot_variable_position=slot_variable_position, variable=variable, slot_name=slot_name)\n    if slot_variable is not None and slot_variable_position.bind_object(slot_variable):\n        return (slot_variable_position, slot_variable)\n    else:\n        return (None, None)",
    "docstring": "Generates CheckpointPosition for a slot variable. Args: optimizer_object: Optimizer that owns the slot variable. variable: Variable associated with the slot variable. slot_variable_id: ID of the slot variable. slot_name: Name of the slot variable. reshard_callback: A callback object for resharding value from checkpoint at restore. Returns: If there is a slot variable in the that has not been bound to the checkpoint, this function returns a tuple of ( new for the slot variable, the slot variable itself).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\restore.py",
    "ast_data": "FunctionDef name:create_slot_variable_position arg:self arg:optimizer_object arg:variable arg:slot_variable_id arg:slot_name arg:reshard_callback arguments arg arg arg arg arg arg Assign Call If Compare Call Assign Call Assign Call If BoolOp Compare Call Return return:yes Return return:no"
  },
  {
    "library": "pandas",
    "name": "_partial_date_slice",
    "source_code": "@final\ndef _partial_date_slice(self, reso: Resolution, parsed: datetime) -> slice | npt.NDArray[np.intp]:\n    if not self._can_partial_date_slice(reso):\n        raise ValueError\n    t1, t2 = self._parsed_string_to_bounds(reso, parsed)\n    vals = self._data._ndarray\n    unbox = self._data._unbox\n    if self.is_monotonic_increasing:\n        if len(self) and (t1 < self[0] and t2 < self[0] or (t1 > self[-1] and t2 > self[-1])):\n            raise KeyError\n        left = vals.searchsorted(unbox(t1), side='left')\n        right = vals.searchsorted(unbox(t2), side='right')\n        return slice(left, right)\n    else:\n        lhs_mask = vals >= unbox(t1)\n        rhs_mask = vals <= unbox(t2)\n        return (lhs_mask & rhs_mask).nonzero()[0]",
    "docstring": "Parameters ---------- reso : Resolution parsed : datetime Returns ------- slice or ndarray[intp]",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\datetimelike.py",
    "ast_data": "FunctionDef name:_partial_date_slice arg:self arg:reso arg:parsed arguments arg arg arg If Call Raise Assign Call Assign Assign If If BoolOp Call BoolOp BoolOp Compare Compare BoolOp Compare Compare Raise Assign Call Call Assign Call Call Return return:yes Call Assign Compare Call Assign Compare Call Return return:yes Call"
  }
]